Merge tag android-5.1.0_r1 into AOSP_5.1_MERGE

Change-Id: Icab7f8c0a6e9a08449f9d06ac4404cb4f98a59a4
diff --git a/.DEPS.git b/.DEPS.git
index 94fc2d8..8f9da45 100644
--- a/.DEPS.git
+++ b/.DEPS.git
@@ -14,12 +14,21 @@
     'v8/build/gyp':
         Var('git_url') + '/external/gyp.git@a3e2a5caf24a1e0a45401e09ad131210bf16b852',
     'v8/buildtools':
-        Var('git_url') + '/chromium/buildtools.git@5d89977ce55240995d1596fe420b818468f5ec37',
+        Var('git_url') + '/chromium/buildtools.git@fb782d4369d5ae04f17a2fceef7de5a63e50f07b',
+    'v8/testing/gmock':
+        Var('git_url') + '/external/googlemock.git@896ba0e03f520fb9b6ed582bde2bd00847e3c3f2',
+    'v8/testing/gtest':
+        Var('git_url') + '/external/googletest.git@4650552ff637bb44ecf7784060091cbed3252211',
     'v8/third_party/icu':
-        Var('git_url') + '/chromium/deps/icu46.git@7a1ec88f69e25b3efcf76196d07f7815255db025',
+        Var('git_url') + '/chromium/deps/icu52.git@26d8859357ac0bfb86b939bf21c087b8eae22494',
 }
 
 deps_os = {
+    'android':
+    {
+        'v8/third_party/android_tools':
+            Var('git_url') + '/android_tools.git@31869996507de16812bb53a3d0aaa15cd6194c16',
+    },
     'win':
     {
         'v8/third_party/cygwin':
@@ -30,17 +39,71 @@
 }
 
 include_rules = [
-    
+    '+include',
+    '+unicode',
+    '+third_party/fdlibm'
 ]
 
 skip_child_includes = [
-    
+    'build',
+    'third_party'
 ]
 
 hooks = [
     {
     'action':
          [
+    'download_from_google_storage',
+    '--no_resume',
+    '--platform=win32',
+    '--no_auth',
+    '--bucket',
+    'chromium-clang-format',
+    '-s',
+    'v8/buildtools/win/clang-format.exe.sha1'
+],
+    'pattern':
+         '.',
+    'name':
+         'clang_format_win'
+},
+    {
+    'action':
+         [
+    'download_from_google_storage',
+    '--no_resume',
+    '--platform=darwin',
+    '--no_auth',
+    '--bucket',
+    'chromium-clang-format',
+    '-s',
+    'v8/buildtools/mac/clang-format.sha1'
+],
+    'pattern':
+         '.',
+    'name':
+         'clang_format_mac'
+},
+    {
+    'action':
+         [
+    'download_from_google_storage',
+    '--no_resume',
+    '--platform=linux*',
+    '--no_auth',
+    '--bucket',
+    'chromium-clang-format',
+    '-s',
+    'v8/buildtools/linux64/clang-format.sha1'
+],
+    'pattern':
+         '.',
+    'name':
+         'clang_format_linux'
+},
+    {
+    'action':
+         [
     'python',
     'v8/build/gyp_v8'
 ],
diff --git a/.gitignore b/.gitignore
index 2c8a2be..9d4325b 100644
--- a/.gitignore
+++ b/.gitignore
@@ -23,6 +23,7 @@
 .cpplint-cache
 .cproject
 .d8_history
+.gclient_entries
 .project
 .pydevproject
 .settings
@@ -60,7 +61,10 @@
 /test/test262/data
 /test/test262/data.old
 /test/test262/tc39-test262-*
-/third_party
+/testing/gmock
+/testing/gtest
+/third_party/icu
+/third_party/llvm
 /tools/jsfunfuzz
 /tools/jsfunfuzz.zip
 /tools/oom_dump/oom_dump
@@ -76,3 +80,4 @@
 GSYMS
 GPATH
 gtags.files
+turbo*.dot
diff --git a/AUTHORS b/AUTHORS
index 7ac0815..f18761e 100644
--- a/AUTHORS
+++ b/AUTHORS
@@ -56,6 +56,7 @@
 Peter Varga <pvarga@inf.u-szeged.hu>
 Rafal Krypa <rafal@krypa.net>
 Rajeev R Krithivasan <rkrithiv@codeaurora.org>
+Refael Ackermann <refack@gmail.com>
 Rene Rebe <rene@exactcode.de>
 Robert Mustacchi <rm@fingolfin.org>
 Rodolph Perfetta <rodolph.perfetta@arm.com>
diff --git a/BUILD.gn b/BUILD.gn
index 75a6b29..315c203 100644
--- a/BUILD.gn
+++ b/BUILD.gn
@@ -14,8 +14,8 @@
 v8_interpreted_regexp = false
 v8_object_print = false
 v8_postmortem_support = false
-v8_use_default_platform = true
 v8_use_snapshot = true
+v8_use_external_startup_data = false
 v8_enable_extra_checks = is_debug
 v8_target_arch = cpu_arch
 v8_random_seed = "314159265"
@@ -25,7 +25,7 @@
 # Configurations
 #
 config("internal_config") {
-  visibility = ":*"  # Only targets in this file can depend on this.
+  visibility = [ ":*" ]  # Only targets in this file can depend on this.
 
   include_dirs = [ "." ]
 
@@ -37,6 +37,12 @@
   }
 }
 
+config("internal_config_base") {
+  visibility = [ ":*" ]  # Only targets in this file can depend on this.
+
+  include_dirs = [ "." ]
+}
+
 # This config should only be applied to code using V8 and not any V8 code
 # itself.
 config("external_config") {
@@ -50,7 +56,7 @@
 }
 
 config("features") {
-  visibility = ":*"  # Only targets in this file can depend on this.
+  visibility = [ ":*" ]  # Only targets in this file can depend on this.
 
   defines = []
 
@@ -89,11 +95,6 @@
       "V8_I18N_SUPPORT",
     ]
   }
-  if (v8_use_default_platform == true) {
-    defines += [
-      "V8_USE_DEFAULT_PLATFORM",
-    ]
-  }
   if (v8_compress_startup_data == "bz2") {
     defines += [
       "COMPRESS_STARTUP_DATA_BZ2",
@@ -109,10 +110,15 @@
       "ENABLE_HANDLE_ZAPPING",
     ]
   }
+  if (v8_use_external_startup_data == true) {
+    defines += [
+      "V8_USE_EXTERNAL_STARTUP_DATA",
+    ]
+  }
 }
 
 config("toolchain") {
-  visibility = ":*"  # Only targets in this file can depend on this.
+  visibility = [ ":*" ]  # Only targets in this file can depend on this.
 
   defines = []
   cflags = []
@@ -160,20 +166,8 @@
 # Actions
 #
 
-action("generate_trig_table") {
-  visibility = ":*"  # Only targets in this file can depend on this.
-
-  script = "tools/generate-trig-table.py"
-
-  outputs = [
-    "$target_gen_dir/trig-table.cc"
-  ]
-
-  args = rebase_path(outputs, root_build_dir)
-}
-
 action("js2c") {
-  visibility = ":*"  # Only targets in this file can depend on this.
+  visibility = [ ":*" ]  # Only targets in this file can depend on this.
 
   script = "tools/js2c.py"
 
@@ -184,23 +178,30 @@
   sources = [
     "src/runtime.js",
     "src/v8natives.js",
+    "src/symbol.js",
     "src/array.js",
     "src/string.js",
     "src/uri.js",
+    "third_party/fdlibm/fdlibm.js",
     "src/math.js",
-    "src/messages.js",
     "src/apinatives.js",
-    "src/debug-debugger.js",
-    "src/mirror-debugger.js",
-    "src/liveedit-debugger.js",
     "src/date.js",
-    "src/json.js",
     "src/regexp.js",
     "src/arraybuffer.js",
     "src/typedarray.js",
-    "src/weak_collection.js",
-    "src/promise.js",
+    "src/generator.js",
     "src/object-observe.js",
+    "src/collection.js",
+    "src/weak-collection.js",
+    "src/collection-iterator.js",
+    "src/promise.js",
+    "src/messages.js",
+    "src/json.js",
+    "src/array-iterator.js",
+    "src/string-iterator.js",
+    "src/debug-debugger.js",
+    "src/mirror-debugger.js",
+    "src/liveedit-debugger.js",
     "src/macros.py",
   ]
 
@@ -212,14 +213,23 @@
     sources += [ "src/i18n.js" ]
   }
 
-  args =
-    rebase_path(outputs, root_build_dir) +
-    [ "CORE", v8_compress_startup_data ] +
-    rebase_path(sources, root_build_dir)
+  args = [
+    rebase_path("$target_gen_dir/libraries.cc", root_build_dir),
+    "CORE",
+    v8_compress_startup_data
+  ] + rebase_path(sources, root_build_dir)
+
+  if (v8_use_external_startup_data) {
+    outputs += [ "$target_gen_dir/libraries.bin" ]
+    args += [
+      "--startup_blob",
+      rebase_path("$target_gen_dir/libraries.bin", root_build_dir)
+    ]
+  }
 }
 
 action("js2c_experimental") {
-  visibility = ":*"  # Only targets in this file can depend on this.
+  visibility = [ ":*" ]  # Only targets in this file can depend on this.
 
   script = "tools/js2c.py"
 
@@ -229,29 +239,58 @@
 
   sources = [
     "src/macros.py",
-    "src/symbol.js",
     "src/proxy.js",
-    "src/collection.js",
-    "src/collection-iterator.js",
     "src/generator.js",
-    "src/array-iterator.js",
     "src/harmony-string.js",
     "src/harmony-array.js",
-    "src/harmony-math.js",
+    "src/harmony-classes.js",
   ]
 
   outputs = [
     "$target_gen_dir/experimental-libraries.cc"
   ]
 
-  args =
-    rebase_path(outputs, root_build_dir) +
-    [ "EXPERIMENTAL", v8_compress_startup_data ] +
-    rebase_path(sources, root_build_dir)
+  args = [
+    rebase_path("$target_gen_dir/experimental-libraries.cc", root_build_dir),
+    "EXPERIMENTAL",
+    v8_compress_startup_data
+  ] + rebase_path(sources, root_build_dir)
+
+  if (v8_use_external_startup_data) {
+    outputs += [ "$target_gen_dir/libraries_experimental.bin" ]
+    args += [
+      "--startup_blob",
+      rebase_path("$target_gen_dir/libraries_experimental.bin", root_build_dir)
+    ]
+  }
+}
+
+if (v8_use_external_startup_data) {
+  action("natives_blob") {
+    visibility = [ ":*" ]  # Only targets in this file can depend on this.
+
+    deps = [
+      ":js2c",
+      ":js2c_experimental"
+    ]
+
+    sources = [
+      "$target_gen_dir/libraries.bin",
+      "$target_gen_dir/libraries_experimental.bin"
+    ]
+
+    outputs = [
+      "$root_gen_dir/natives_blob.bin"
+    ]
+
+    script = "tools/concatenate-files.py"
+
+    args = rebase_path(sources + outputs, root_build_dir)
+  }
 }
 
 action("postmortem-metadata") {
-  visibility = ":*"  # Only targets in this file can depend on this.
+  visibility = [ ":*" ]  # Only targets in this file can depend on this.
 
   script = "tools/gen-postmortem-metadata.py"
 
@@ -270,7 +309,7 @@
 }
 
 action("run_mksnapshot") {
-  visibility = ":*"  # Only targets in this file can depend on this.
+  visibility = [ ":*" ]  # Only targets in this file can depend on this.
 
   deps = [ ":mksnapshot($host_toolchain)" ]
 
@@ -285,14 +324,21 @@
                                       "root_out_dir") + "/mksnapshot",
                        root_build_dir),
     "--log-snapshot-positions",
-    "--logfile", rebase_path("$target_gen_dir/snapshot.log", root_build_dir)
+    "--logfile", rebase_path("$target_gen_dir/snapshot.log", root_build_dir),
+    rebase_path("$target_gen_dir/snapshot.cc", root_build_dir)
   ]
 
   if (v8_random_seed != "0") {
     args += [ "--random-seed", v8_random_seed ]
   }
 
-  args += rebase_path(outputs, root_build_dir)
+  if (v8_use_external_startup_data) {
+    outputs += [ "$root_gen_dir/snapshot_blob.bin" ]
+    args += [
+      "--startup_blob",
+      rebase_path("$root_gen_dir/snapshot_blob.bin", root_build_dir)
+    ]
+  }
 }
 
 
@@ -301,20 +347,19 @@
 #
 
 source_set("v8_nosnapshot") {
-  visibility = ":*"  # Only targets in this file can depend on this.
+  visibility = [ ":*" ]  # Only targets in this file can depend on this.
 
   deps = [
     ":js2c",
     ":js2c_experimental",
-    ":generate_trig_table",
     ":v8_base",
   ]
 
   sources = [
     "$target_gen_dir/libraries.cc",
     "$target_gen_dir/experimental-libraries.cc",
-    "$target_gen_dir/trig-table.cc",
     "src/snapshot-empty.cc",
+    "src/snapshot-common.cc",
   ]
 
   configs -= [ "//build/config/compiler:chromium_code" ]
@@ -323,12 +368,11 @@
 }
 
 source_set("v8_snapshot") {
-  visibility = ":*"  # Only targets in this file can depend on this.
+  visibility = [ ":*" ]  # Only targets in this file can depend on this.
 
   deps = [
     ":js2c",
     ":js2c_experimental",
-    ":generate_trig_table",
     ":run_mksnapshot",
     ":v8_base",
   ]
@@ -336,8 +380,8 @@
   sources = [
     "$target_gen_dir/libraries.cc",
     "$target_gen_dir/experimental-libraries.cc",
-    "$target_gen_dir/trig-table.cc",
     "$target_gen_dir/snapshot.cc",
+    "src/snapshot-common.cc",
   ]
 
   configs -= [ "//build/config/compiler:chromium_code" ]
@@ -345,8 +389,31 @@
   configs += [ ":internal_config", ":features", ":toolchain" ]
 }
 
+if (v8_use_external_startup_data) {
+  source_set("v8_external_snapshot") {
+    visibility = [ ":*" ]  # Only targets in this file can depend on this.
+
+    deps = [
+      ":js2c",
+      ":js2c_experimental",
+      ":run_mksnapshot",
+      ":v8_base",
+      ":natives_blob",
+    ]
+
+    sources = [
+      "src/natives-external.cc",
+      "src/snapshot-external.cc",
+    ]
+
+    configs -= [ "//build/config/compiler:chromium_code" ]
+    configs += [ "//build/config/compiler:no_chromium_code" ]
+    configs += [ ":internal_config", ":features", ":toolchain" ]
+  }
+}
+
 source_set("v8_base") {
-  visibility = ":*"  # Only targets in this file can depend on this.
+  visibility = [ ":*" ]  # Only targets in this file can depend on this.
 
   sources = [
     "src/accessors.cc",
@@ -365,8 +432,14 @@
     "src/assembler.h",
     "src/assert-scope.h",
     "src/assert-scope.cc",
+    "src/ast-value-factory.cc",
+    "src/ast-value-factory.h",
     "src/ast.cc",
     "src/ast.h",
+    "src/background-parsing-task.cc",
+    "src/background-parsing-task.h",
+    "src/bailout-reason.cc",
+    "src/bailout-reason.h",
     "src/bignum-dtoa.cc",
     "src/bignum-dtoa.h",
     "src/bignum.cc",
@@ -384,6 +457,8 @@
     "src/checks.h",
     "src/circular-queue-inl.h",
     "src/circular-queue.h",
+    "src/code-factory.cc",
+    "src/code-factory.h",
     "src/code-stubs.cc",
     "src/code-stubs.h",
     "src/code-stubs-hydrogen.cc",
@@ -392,6 +467,107 @@
     "src/codegen.h",
     "src/compilation-cache.cc",
     "src/compilation-cache.h",
+    "src/compiler/access-builder.cc",
+    "src/compiler/access-builder.h",
+    "src/compiler/ast-graph-builder.cc",
+    "src/compiler/ast-graph-builder.h",
+    "src/compiler/change-lowering.cc",
+    "src/compiler/change-lowering.h",
+    "src/compiler/code-generator-impl.h",
+    "src/compiler/code-generator.cc",
+    "src/compiler/code-generator.h",
+    "src/compiler/common-node-cache.h",
+    "src/compiler/common-operator.cc",
+    "src/compiler/common-operator.h",
+    "src/compiler/control-builders.cc",
+    "src/compiler/control-builders.h",
+    "src/compiler/frame.h",
+    "src/compiler/gap-resolver.cc",
+    "src/compiler/gap-resolver.h",
+    "src/compiler/generic-algorithm-inl.h",
+    "src/compiler/generic-algorithm.h",
+    "src/compiler/generic-graph.h",
+    "src/compiler/generic-node-inl.h",
+    "src/compiler/generic-node.h",
+    "src/compiler/graph-builder.cc",
+    "src/compiler/graph-builder.h",
+    "src/compiler/graph-inl.h",
+    "src/compiler/graph-reducer.cc",
+    "src/compiler/graph-reducer.h",
+    "src/compiler/graph-replay.cc",
+    "src/compiler/graph-replay.h",
+    "src/compiler/graph-visualizer.cc",
+    "src/compiler/graph-visualizer.h",
+    "src/compiler/graph.cc",
+    "src/compiler/graph.h",
+    "src/compiler/instruction-codes.h",
+    "src/compiler/instruction-selector-impl.h",
+    "src/compiler/instruction-selector.cc",
+    "src/compiler/instruction-selector.h",
+    "src/compiler/instruction.cc",
+    "src/compiler/instruction.h",
+    "src/compiler/js-builtin-reducer.cc",
+    "src/compiler/js-builtin-reducer.h",
+    "src/compiler/js-context-specialization.cc",
+    "src/compiler/js-context-specialization.h",
+    "src/compiler/js-generic-lowering.cc",
+    "src/compiler/js-generic-lowering.h",
+    "src/compiler/js-graph.cc",
+    "src/compiler/js-graph.h",
+    "src/compiler/js-inlining.cc",
+    "src/compiler/js-inlining.h",
+    "src/compiler/js-operator.h",
+    "src/compiler/js-typed-lowering.cc",
+    "src/compiler/js-typed-lowering.h",
+    "src/compiler/linkage-impl.h",
+    "src/compiler/linkage.cc",
+    "src/compiler/linkage.h",
+    "src/compiler/machine-operator-reducer.cc",
+    "src/compiler/machine-operator-reducer.h",
+    "src/compiler/machine-operator.cc",
+    "src/compiler/machine-operator.h",
+    "src/compiler/machine-type.cc",
+    "src/compiler/machine-type.h",
+    "src/compiler/node-aux-data-inl.h",
+    "src/compiler/node-aux-data.h",
+    "src/compiler/node-cache.cc",
+    "src/compiler/node-cache.h",
+    "src/compiler/node-matchers.h",
+    "src/compiler/node-properties-inl.h",
+    "src/compiler/node-properties.h",
+    "src/compiler/node.cc",
+    "src/compiler/node.h",
+    "src/compiler/opcodes.h",
+    "src/compiler/operator-properties-inl.h",
+    "src/compiler/operator-properties.h",
+    "src/compiler/operator.cc",
+    "src/compiler/operator.h",
+    "src/compiler/phi-reducer.h",
+    "src/compiler/pipeline.cc",
+    "src/compiler/pipeline.h",
+    "src/compiler/raw-machine-assembler.cc",
+    "src/compiler/raw-machine-assembler.h",
+    "src/compiler/register-allocator.cc",
+    "src/compiler/register-allocator.h",
+    "src/compiler/representation-change.h",
+    "src/compiler/schedule.cc",
+    "src/compiler/schedule.h",
+    "src/compiler/scheduler.cc",
+    "src/compiler/scheduler.h",
+    "src/compiler/simplified-lowering.cc",
+    "src/compiler/simplified-lowering.h",
+    "src/compiler/simplified-operator-reducer.cc",
+    "src/compiler/simplified-operator-reducer.h",
+    "src/compiler/simplified-operator.cc",
+    "src/compiler/simplified-operator.h",
+    "src/compiler/source-position.cc",
+    "src/compiler/source-position.h",
+    "src/compiler/typer.cc",
+    "src/compiler/typer.h",
+    "src/compiler/value-numbering-reducer.cc",
+    "src/compiler/value-numbering-reducer.h",
+    "src/compiler/verifier.cc",
+    "src/compiler/verifier.h",
     "src/compiler.cc",
     "src/compiler.h",
     "src/contexts.cc",
@@ -404,8 +580,6 @@
     "src/cpu-profiler-inl.h",
     "src/cpu-profiler.cc",
     "src/cpu-profiler.h",
-    "src/cpu.cc",
-    "src/cpu.h",
     "src/data-flow.cc",
     "src/data-flow.h",
     "src/date.cc",
@@ -447,7 +621,6 @@
     "src/fast-dtoa.cc",
     "src/fast-dtoa.h",
     "src/feedback-slots.h",
-    "src/field-index.cc",
     "src/field-index.h",
     "src/field-index-inl.h",
     "src/fixed-dtoa.cc",
@@ -471,14 +644,34 @@
     "src/handles.cc",
     "src/handles.h",
     "src/hashmap.h",
-    "src/heap-inl.h",
     "src/heap-profiler.cc",
     "src/heap-profiler.h",
     "src/heap-snapshot-generator-inl.h",
     "src/heap-snapshot-generator.cc",
     "src/heap-snapshot-generator.h",
-    "src/heap.cc",
-    "src/heap.h",
+    "src/heap/gc-idle-time-handler.cc",
+    "src/heap/gc-idle-time-handler.h",
+    "src/heap/gc-tracer.cc",
+    "src/heap/gc-tracer.h",
+    "src/heap/heap-inl.h",
+    "src/heap/heap.cc",
+    "src/heap/heap.h",
+    "src/heap/incremental-marking.cc",
+    "src/heap/incremental-marking.h",
+    "src/heap/mark-compact-inl.h",
+    "src/heap/mark-compact.cc",
+    "src/heap/mark-compact.h",
+    "src/heap/objects-visiting-inl.h",
+    "src/heap/objects-visiting.cc",
+    "src/heap/objects-visiting.h",
+    "src/heap/spaces-inl.h",
+    "src/heap/spaces.cc",
+    "src/heap/spaces.h",
+    "src/heap/store-buffer-inl.h",
+    "src/heap/store-buffer.cc",
+    "src/heap/store-buffer.h",
+    "src/heap/sweeper-thread.h",
+    "src/heap/sweeper-thread.cc",
     "src/hydrogen-alias-analysis.h",
     "src/hydrogen-bce.cc",
     "src/hydrogen-bce.h",
@@ -535,13 +728,25 @@
     "src/i18n.h",
     "src/icu_util.cc",
     "src/icu_util.h",
-    "src/ic-inl.h",
-    "src/ic.cc",
-    "src/ic.h",
-    "src/incremental-marking.cc",
-    "src/incremental-marking.h",
+    "src/ic/access-compiler.cc",
+    "src/ic/access-compiler.h",
+    "src/ic/call-optimization.cc",
+    "src/ic/call-optimization.h",
+    "src/ic/handler-compiler.cc",
+    "src/ic/handler-compiler.h",
+    "src/ic/ic-inl.h",
+    "src/ic/ic-state.cc",
+    "src/ic/ic-state.h",
+    "src/ic/ic.cc",
+    "src/ic/ic.h",
+    "src/ic/ic-compiler.cc",
+    "src/ic/ic-compiler.h",
+    "src/ic/stub-cache.cc",
+    "src/ic/stub-cache.h",
     "src/interface.cc",
     "src/interface.h",
+    "src/interface-descriptors.cc",
+    "src/interface-descriptors.h",
     "src/interpreter-irregexp.cc",
     "src/interpreter-irregexp.h",
     "src/isolate.cc",
@@ -551,13 +756,6 @@
     "src/jsregexp-inl.h",
     "src/jsregexp.cc",
     "src/jsregexp.h",
-    # TODO(jochen): move libplatform/ files to their own target.
-    "src/libplatform/default-platform.cc",
-    "src/libplatform/default-platform.h",
-    "src/libplatform/task-queue.cc",
-    "src/libplatform/task-queue.h",
-    "src/libplatform/worker-thread.cc",
-    "src/libplatform/worker-thread.h",
     "src/list-inl.h",
     "src/list.h",
     "src/lithium-allocator-inl.h",
@@ -574,11 +772,10 @@
     "src/log-utils.h",
     "src/log.cc",
     "src/log.h",
+    "src/lookup-inl.h",
     "src/lookup.cc",
     "src/lookup.h",
     "src/macro-assembler.h",
-    "src/mark-compact.cc",
-    "src/mark-compact.h",
     "src/messages.cc",
     "src/messages.h",
     "src/msan.h",
@@ -586,24 +783,16 @@
     "src/objects-debug.cc",
     "src/objects-inl.h",
     "src/objects-printer.cc",
-    "src/objects-visiting.cc",
-    "src/objects-visiting.h",
     "src/objects.cc",
     "src/objects.h",
-    "src/optimizing-compiler-thread.h",
     "src/optimizing-compiler-thread.cc",
+    "src/optimizing-compiler-thread.h",
+    "src/ostreams.cc",
+    "src/ostreams.h",
     "src/parser.cc",
     "src/parser.h",
-    "src/platform/elapsed-timer.h",
-    "src/platform/time.cc",
-    "src/platform/time.h",
-    "src/platform.h",
-    "src/platform/condition-variable.cc",
-    "src/platform/condition-variable.h",
-    "src/platform/mutex.cc",
-    "src/platform/mutex.h",
-    "src/platform/semaphore.cc",
-    "src/platform/semaphore.h",
+    "src/perf-jit.cc",
+    "src/perf-jit.h",
     "src/preparse-data-format.h",
     "src/preparse-data.cc",
     "src/preparse-data.h",
@@ -617,6 +806,7 @@
     "src/property-details.h",
     "src/property.cc",
     "src/property.h",
+    "src/prototype.h",
     "src/regexp-macro-assembler-irregexp-inl.h",
     "src/regexp-macro-assembler-irregexp.cc",
     "src/regexp-macro-assembler-irregexp.h",
@@ -648,29 +838,23 @@
     "src/serialize.h",
     "src/small-pointer-list.h",
     "src/smart-pointers.h",
-    "src/snapshot-common.cc",
+    "src/snapshot-source-sink.cc",
+    "src/snapshot-source-sink.h",
     "src/snapshot.h",
-    "src/spaces-inl.h",
-    "src/spaces.cc",
-    "src/spaces.h",
-    "src/store-buffer-inl.h",
-    "src/store-buffer.cc",
-    "src/store-buffer.h",
     "src/string-search.cc",
     "src/string-search.h",
     "src/string-stream.cc",
     "src/string-stream.h",
     "src/strtod.cc",
     "src/strtod.h",
-    "src/stub-cache.cc",
-    "src/stub-cache.h",
-    "src/sweeper-thread.h",
-    "src/sweeper-thread.cc",
     "src/token.cc",
     "src/token.h",
     "src/transitions-inl.h",
     "src/transitions.cc",
     "src/transitions.h",
+    "src/type-feedback-vector-inl.h",
+    "src/type-feedback-vector.cc",
+    "src/type-feedback-vector.h",
     "src/type-info.cc",
     "src/type-info.h",
     "src/types-inl.h",
@@ -688,11 +872,8 @@
     "src/utils-inl.h",
     "src/utils.cc",
     "src/utils.h",
-    "src/utils/random-number-generator.cc",
-    "src/utils/random-number-generator.h",
     "src/v8.cc",
     "src/v8.h",
-    "src/v8checks.h",
     "src/v8memory.h",
     "src/v8threads.cc",
     "src/v8threads.h",
@@ -705,6 +886,8 @@
     "src/zone-inl.h",
     "src/zone.cc",
     "src/zone.h",
+    "third_party/fdlibm/fdlibm.cc",
+    "third_party/fdlibm/fdlibm.h",
   ]
 
   if (v8_target_arch == "x86") {
@@ -724,7 +907,7 @@
       "src/ia32/frames-ia32.cc",
       "src/ia32/frames-ia32.h",
       "src/ia32/full-codegen-ia32.cc",
-      "src/ia32/ic-ia32.cc",
+      "src/ia32/interface-descriptors-ia32.cc",
       "src/ia32/lithium-codegen-ia32.cc",
       "src/ia32/lithium-codegen-ia32.h",
       "src/ia32/lithium-gap-resolver-ia32.cc",
@@ -735,7 +918,13 @@
       "src/ia32/macro-assembler-ia32.h",
       "src/ia32/regexp-macro-assembler-ia32.cc",
       "src/ia32/regexp-macro-assembler-ia32.h",
-      "src/ia32/stub-cache-ia32.cc",
+      "src/compiler/ia32/code-generator-ia32.cc",
+      "src/compiler/ia32/instruction-codes-ia32.h",
+      "src/compiler/ia32/instruction-selector-ia32.cc",
+      "src/compiler/ia32/linkage-ia32.cc",
+      "src/ic/ia32/ic-ia32.cc",
+      "src/ic/ia32/ic-compiler-ia32.cc",
+      "src/ic/ia32/stub-cache-ia32.cc",
     ]
   } else if (v8_target_arch == "x64") {
     sources += [
@@ -754,7 +943,7 @@
       "src/x64/frames-x64.cc",
       "src/x64/frames-x64.h",
       "src/x64/full-codegen-x64.cc",
-      "src/x64/ic-x64.cc",
+      "src/x64/interface-descriptors-x64.cc",
       "src/x64/lithium-codegen-x64.cc",
       "src/x64/lithium-codegen-x64.h",
       "src/x64/lithium-gap-resolver-x64.cc",
@@ -765,7 +954,15 @@
       "src/x64/macro-assembler-x64.h",
       "src/x64/regexp-macro-assembler-x64.cc",
       "src/x64/regexp-macro-assembler-x64.h",
-      "src/x64/stub-cache-x64.cc",
+      "src/compiler/x64/code-generator-x64.cc",
+      "src/compiler/x64/instruction-codes-x64.h",
+      "src/compiler/x64/instruction-selector-x64.cc",
+      "src/compiler/x64/linkage-x64.cc",
+      "src/ic/x64/access-compiler-x64.cc",
+      "src/ic/x64/handler-compiler-x64.cc",
+      "src/ic/x64/ic-x64.cc",
+      "src/ic/x64/ic-compiler-x64.cc",
+      "src/ic/x64/stub-cache-x64.cc",
     ]
   } else if (v8_target_arch == "arm") {
     sources += [
@@ -786,7 +983,8 @@
       "src/arm/frames-arm.cc",
       "src/arm/frames-arm.h",
       "src/arm/full-codegen-arm.cc",
-      "src/arm/ic-arm.cc",
+      "src/arm/interface-descriptors-arm.cc",
+      "src/arm/interface-descriptors-arm.h",
       "src/arm/lithium-arm.cc",
       "src/arm/lithium-arm.h",
       "src/arm/lithium-codegen-arm.cc",
@@ -798,7 +996,15 @@
       "src/arm/regexp-macro-assembler-arm.cc",
       "src/arm/regexp-macro-assembler-arm.h",
       "src/arm/simulator-arm.cc",
-      "src/arm/stub-cache-arm.cc",
+      "src/compiler/arm/code-generator-arm.cc",
+      "src/compiler/arm/instruction-codes-arm.h",
+      "src/compiler/arm/instruction-selector-arm.cc",
+      "src/compiler/arm/linkage-arm.cc",
+      "src/ic/arm/access-compiler-arm.cc",
+      "src/ic/arm/handler-compiler-arm.cc",
+      "src/ic/arm/ic-arm.cc",
+      "src/ic/arm/ic-compiler-arm.cc",
+      "src/ic/arm/stub-cache-arm.cc",
     ]
   } else if (v8_target_arch == "arm64") {
     sources += [
@@ -822,11 +1028,12 @@
       "src/arm64/frames-arm64.cc",
       "src/arm64/frames-arm64.h",
       "src/arm64/full-codegen-arm64.cc",
-      "src/arm64/ic-arm64.cc",
       "src/arm64/instructions-arm64.cc",
       "src/arm64/instructions-arm64.h",
       "src/arm64/instrument-arm64.cc",
       "src/arm64/instrument-arm64.h",
+      "src/arm64/interface-descriptors-arm64.cc",
+      "src/arm64/interface-descriptors-arm64.h",
       "src/arm64/lithium-arm64.cc",
       "src/arm64/lithium-arm64.h",
       "src/arm64/lithium-codegen-arm64.cc",
@@ -840,9 +1047,17 @@
       "src/arm64/regexp-macro-assembler-arm64.h",
       "src/arm64/simulator-arm64.cc",
       "src/arm64/simulator-arm64.h",
-      "src/arm64/stub-cache-arm64.cc",
       "src/arm64/utils-arm64.cc",
       "src/arm64/utils-arm64.h",
+      "src/compiler/arm64/code-generator-arm64.cc",
+      "src/compiler/arm64/instruction-codes-arm64.h",
+      "src/compiler/arm64/instruction-selector-arm64.cc",
+      "src/compiler/arm64/linkage-arm64.cc",
+      "src/ic/arm64/access-compiler-arm64.cc",
+      "src/ic/arm64/handler-compiler-arm64.cc",
+      "src/ic/arm64/ic-arm64.cc",
+      "src/ic/arm64/ic-compiler-arm64.cc",
+      "src/ic/arm64/stub-cache-arm64.cc",
     ]
   } else if (v8_target_arch == "mipsel") {
     sources += [
@@ -863,7 +1078,7 @@
       "src/mips/frames-mips.cc",
       "src/mips/frames-mips.h",
       "src/mips/full-codegen-mips.cc",
-      "src/mips/ic-mips.cc",
+      "src/mips/interface-descriptors-mips.cc",
       "src/mips/lithium-codegen-mips.cc",
       "src/mips/lithium-codegen-mips.h",
       "src/mips/lithium-gap-resolver-mips.cc",
@@ -875,7 +1090,48 @@
       "src/mips/regexp-macro-assembler-mips.cc",
       "src/mips/regexp-macro-assembler-mips.h",
       "src/mips/simulator-mips.cc",
-      "src/mips/stub-cache-mips.cc",
+      "src/ic/mips/access-compiler-mips.cc",
+      "src/ic/mips/handler-compiler-mips.cc",
+      "src/ic/mips/ic-mips.cc",
+      "src/ic/mips/ic-compiler-mips.cc",
+      "src/ic/mips/stub-cache-mips.cc",
+    ]
+  } else if (v8_target_arch == "mips64el") {
+    sources += [
+      "src/mips64/assembler-mips64.cc",
+      "src/mips64/assembler-mips64.h",
+      "src/mips64/assembler-mips64-inl.h",
+      "src/mips64/builtins-mips64.cc",
+      "src/mips64/codegen-mips64.cc",
+      "src/mips64/codegen-mips64.h",
+      "src/mips64/code-stubs-mips64.cc",
+      "src/mips64/code-stubs-mips64.h",
+      "src/mips64/constants-mips64.cc",
+      "src/mips64/constants-mips64.h",
+      "src/mips64/cpu-mips64.cc",
+      "src/mips64/debug-mips64.cc",
+      "src/mips64/deoptimizer-mips64.cc",
+      "src/mips64/disasm-mips64.cc",
+      "src/mips64/frames-mips64.cc",
+      "src/mips64/frames-mips64.h",
+      "src/mips64/full-codegen-mips64.cc",
+      "src/mips64/interface-descriptors-mips64.cc",
+      "src/mips64/lithium-codegen-mips64.cc",
+      "src/mips64/lithium-codegen-mips64.h",
+      "src/mips64/lithium-gap-resolver-mips64.cc",
+      "src/mips64/lithium-gap-resolver-mips64.h",
+      "src/mips64/lithium-mips64.cc",
+      "src/mips64/lithium-mips64.h",
+      "src/mips64/macro-assembler-mips64.cc",
+      "src/mips64/macro-assembler-mips64.h",
+      "src/mips64/regexp-macro-assembler-mips64.cc",
+      "src/mips64/regexp-macro-assembler-mips64.h",
+      "src/mips64/simulator-mips64.cc",
+      "src/ic/mips64/access-compiler-mips64.cc",
+      "src/ic/mips64/handler-compiler-mips64.cc",
+      "src/ic/mips64/ic-mips64.cc",
+      "src/ic/mips64/ic-compiler-mips64.cc",
+      "src/ic/mips64/stub-cache-mips64.cc",
     ]
   }
 
@@ -883,58 +1139,24 @@
   configs += [ "//build/config/compiler:no_chromium_code" ]
   configs += [ ":internal_config", ":features", ":toolchain" ]
 
+  if (!is_debug) {
+    configs -= [ "//build/config/compiler:optimize" ]
+    configs += [ "//build/config/compiler:optimize_max" ]
+  }
+
   defines = []
   deps = [ ":v8_libbase" ]
 
-  if (is_posix) {
-    sources += [
-      "src/platform-posix.cc"
-    ]
+  if (is_win) {
+    # TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
+    cflags = [ "/wd4267" ]
   }
-
   if (is_linux) {
-    sources += [
-      "src/platform-linux.cc"
-    ]
-
-    libs = [ "rt" ]
     if (v8_compress_startup_data == "bz2") {
       libs += [ "bz2" ]
     }
-  } else if (is_android) {
-    defines += [ "CAN_USE_VFP_INSTRUCTIONS" ]
-    sources += [ "src/platform-posix.cc" ]
-
-    if (build_os == "mac") {
-      if (current_toolchain == host_toolchain) {
-        sources += [ "src/platform-macos.cc" ]
-      } else {
-        sources += [ "src/platform-linux.cc" ]
-      }
-    } else {
-      sources += [ "src/platform-linux.cc" ]
-      if (current_toolchain == host_toolchain) {
-        defines += [ "V8_LIBRT_NOT_AVAILABLE" ]
-      }
-    }
-  } else if (is_mac) {
-    sources += [ "src/platform-macos.cc" ]
-  } else if (is_win) {
-    # TODO(jochen): Add support for cygwin.
-    sources += [
-      "src/platform-win32.cc",
-      "src/win32-math.cc",
-      "src/win32-math.h",
-    ]
-
-    defines += [ "_CRT_RAND_S" ]  # for rand_s()
-
-    libs = [ "winmm.lib", "ws2_32.lib" ]
   }
 
-  # TODO(jochen): Add support for qnx, freebsd, openbsd, netbsd, and solaris.
-
-
   if (v8_enable_i18n_support) {
     deps += [ "//third_party/icu" ]
     if (is_win) {
@@ -956,7 +1178,7 @@
 }
 
 source_set("v8_libbase") {
-  visibility = ":*"  # Only targets in this file can depend on this.
+  visibility = [ ":*" ]  # Only targets in this file can depend on this.
 
   sources = [
     "src/base/atomicops.h",
@@ -969,21 +1191,120 @@
     "src/base/atomicops_internals_x86_gcc.cc",
     "src/base/atomicops_internals_x86_gcc.h",
     "src/base/atomicops_internals_x86_msvc.h",
+    "src/base/bits.cc",
+    "src/base/bits.h",
     "src/base/build_config.h",
+    "src/base/cpu.cc",
+    "src/base/cpu.h",
+    "src/base/division-by-constant.cc",
+    "src/base/division-by-constant.h",
+    "src/base/flags.h",
     "src/base/lazy-instance.h",
+    "src/base/logging.cc",
+    "src/base/logging.h",
     "src/base/macros.h",
     "src/base/once.cc",
     "src/base/once.h",
+    "src/base/platform/elapsed-timer.h",
+    "src/base/platform/time.cc",
+    "src/base/platform/time.h",
+    "src/base/platform/condition-variable.cc",
+    "src/base/platform/condition-variable.h",
+    "src/base/platform/mutex.cc",
+    "src/base/platform/mutex.h",
+    "src/base/platform/platform.h",
+    "src/base/platform/semaphore.cc",
+    "src/base/platform/semaphore.h",
     "src/base/safe_conversions.h",
     "src/base/safe_conversions_impl.h",
     "src/base/safe_math.h",
     "src/base/safe_math_impl.h",
-    "src/base/win32-headers.h",
+    "src/base/sys-info.cc",
+    "src/base/sys-info.h",
+    "src/base/utils/random-number-generator.cc",
+    "src/base/utils/random-number-generator.h",
   ]
 
   configs -= [ "//build/config/compiler:chromium_code" ]
   configs += [ "//build/config/compiler:no_chromium_code" ]
-  configs += [ ":internal_config", ":features", ":toolchain" ]
+  configs += [ ":internal_config_base", ":features", ":toolchain" ]
+
+  if (!is_debug) {
+    configs -= [ "//build/config/compiler:optimize" ]
+    configs += [ "//build/config/compiler:optimize_max" ]
+  }
+
+  defines = []
+
+  if (is_posix) {
+    sources += [
+      "src/base/platform/platform-posix.cc"
+    ]
+  }
+
+  if (is_linux) {
+    sources += [
+      "src/base/platform/platform-linux.cc"
+    ]
+
+    libs = [ "rt" ]
+  } else if (is_android) {
+    defines += [ "CAN_USE_VFP_INSTRUCTIONS" ]
+
+    if (build_os == "mac") {
+      if (current_toolchain == host_toolchain) {
+        sources += [ "src/base/platform/platform-macos.cc" ]
+      } else {
+        sources += [ "src/base/platform/platform-linux.cc" ]
+      }
+    } else {
+      sources += [ "src/base/platform/platform-linux.cc" ]
+      if (current_toolchain == host_toolchain) {
+        defines += [ "V8_LIBRT_NOT_AVAILABLE" ]
+      }
+    }
+  } else if (is_mac) {
+    sources += [ "src/base/platform/platform-macos.cc" ]
+  } else if (is_win) {
+    # TODO(jochen): Add support for cygwin.
+    sources += [
+      "src/base/platform/platform-win32.cc",
+      "src/base/win32-headers.h",
+      "src/base/win32-math.cc",
+      "src/base/win32-math.h",
+    ]
+
+    defines += [ "_CRT_RAND_S" ]  # for rand_s()
+
+    libs = [ "winmm.lib", "ws2_32.lib" ]
+  }
+
+  # TODO(jochen): Add support for qnx, freebsd, openbsd, netbsd, and solaris.
+}
+
+source_set("v8_libplatform") {
+  sources = [
+    "include/libplatform/libplatform.h",
+    "src/libplatform/default-platform.cc",
+    "src/libplatform/default-platform.h",
+    "src/libplatform/task-queue.cc",
+    "src/libplatform/task-queue.h",
+    "src/libplatform/worker-thread.cc",
+    "src/libplatform/worker-thread.h",
+  ]
+
+  configs -= [ "//build/config/compiler:chromium_code" ]
+  configs += [ "//build/config/compiler:no_chromium_code" ]
+  configs += [ ":internal_config_base", ":features", ":toolchain" ]
+
+  if (!is_debug) {
+    configs -= [ "//build/config/compiler:optimize" ]
+    configs += [ "//build/config/compiler:optimize_max" ]
+  }
+
+  deps = [
+    ":v8_libbase",
+  ]
 }
 
 ###############################################################################
@@ -992,7 +1313,7 @@
 
 if (current_toolchain == host_toolchain) {
   executable("mksnapshot") {
-    visibility = ":*"  # Only targets in this file can depend on this.
+    visibility = [ ":*" ]  # Only targets in this file can depend on this.
 
     sources = [
       "src/mksnapshot.cc",
@@ -1004,6 +1325,7 @@
 
     deps = [
       ":v8_base",
+      ":v8_libplatform",
       ":v8_nosnapshot",
     ]
 
@@ -1024,7 +1346,12 @@
     "src/v8dll-main.cc",
   ]
 
-  if (v8_use_snapshot) {
+  if (v8_use_external_startup_data) {
+    deps = [
+      ":v8_base",
+      ":v8_external_snapshot",
+    ]
+  } else if (v8_use_snapshot) {
     deps = [
       ":v8_base",
       ":v8_snapshot",
@@ -1042,6 +1369,7 @@
 
   direct_dependent_configs = [ ":external_config" ]
 
+  libs = []
   if (is_android && current_toolchain != host_toolchain) {
     libs += [ "log" ]
   }
@@ -1050,7 +1378,12 @@
 } else {
 
 group("v8") {
-  if (v8_use_snapshot) {
+  if (v8_use_external_startup_data) {
+    deps = [
+      ":v8_base",
+      ":v8_external_snapshot",
+    ]
+  } else if (v8_use_snapshot) {
     deps = [
       ":v8_base",
       ":v8_snapshot",
diff --git a/ChangeLog b/ChangeLog
index ac285e3..d787965 100644
--- a/ChangeLog
+++ b/ChangeLog
@@ -1,3 +1,672 @@
+2014-09-25: Version 3.29.88
+
+        Performance and stability improvements on all platforms.
+
+
+2014-09-24: Version 3.29.87
+
+        Preserve message when rethrowing exception (issue 3583).
+
+        Fix escaped index JSON parsing (Chromium issue 416449).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-09-23: Version 3.29.84
+
+        Performance and stability improvements on all platforms.
+
+
+2014-09-23: Version 3.29.83
+
+        Performance and stability improvements on all platforms.
+
+
+2014-09-23: Version 3.29.82
+
+        Fix escaped index JSON parsing (Chromium issue 416449).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-09-17: Version 3.29.70
+
+        Enable ES6 generators (issue 2355).
+
+        Fixed int vs. uintptr_t confusion (plus some cleanup on the way) (issue
+        3556).
+
+        Move configuration of ResourceConstraints to Isolate construction.
+
+        Performance and stability improvements on all platforms.
+
+
+2014-09-16: Version 3.29.66
+
+        Currently, a new isolate is created in an uninitialized state, and
+        several API methods will automatically initialize it. During this
+        uninitialized state, code event handlers and function entry handlers can
+        be attached to the isolate.
+
+        Performance and stability improvements on all platforms.
+
+
+2014-09-15: Version 3.29.64
+
+        ES6: String(symbol) should work like symbol.toString (issue 3554).
+
+        Arrow functions: Cleanup handling of the prototype property (issue
+        2700).
+
+        Remove V8_HOST_CAN_READ_UNALIGNED and its uses (Chromium issue 412967).
+
+        Fix Smi vs. HeapObject confusion in HConstants (Chromium issue 412215).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-09-12: Version 3.29.59
+
+        Do not use wide reads in CopyCharsUnsigned (Chromium issue 412967).
+
+        Fix inaccurate type condition in Hydrogen (Chromium issue 412210).
+
+        Fix crash in ScriptDebugServer::wrapCallFrames (Chromium issue 411196).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-09-11: Version 3.29.57
+
+        ES6: Add support for method shorthand in object literals (issue 3516).
+
+        Unbreak FreeBSD build (hopefully) (issue 3548).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-09-09: Version 3.29.53
+
+        Performance and stability improvements on all platforms.
+
+
+2014-09-08: Version 3.29.50
+
+        Allocate a new empty number dictionary when resetting elements (Chromium
+        issue 410332).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-09-05: Version 3.29.43
+
+        Enforce correct number comparisons when inlining Array.indexOf (Chromium
+        issue 407946).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-09-04: Version 3.29.41
+
+        Performance and stability improvements on all platforms.
+
+
+2014-09-03: Version 3.29.40
+
+        Use correct receiver for DOM accessors on the prototype chain (issue
+        3538).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-09-02: Version 3.29.38
+
+        Do not clear weak monomorphic IC after context disposal (Chromium issue
+        404020).
+
+        Turn on job-based sweeping (issue 3104).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-09-01: Version 3.29.35
+
+        Performance and stability improvements on all platforms.
+
+
+2014-08-29: Version 3.29.29
+
+        Performance and stability improvements on all platforms.
+
+
+2014-08-28: Version 3.29.27
+
+        Performance and stability improvements on all platforms.
+
+
+2014-08-28: Version 3.29.25
+
+        Performance and stability improvements on all platforms.
+
+
+2014-08-28: Version 3.29.24
+
+        Tweaks to generate XP-compatible .exes (Chromium issue 407517).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-08-28: Version 3.29.23
+
+        Performance and stability improvements on all platforms.
+
+
+2014-08-27: Version 3.29.20
+
+        Handle empty allocation list in CodeRange properly (issue 3540, Chromium
+        issue 407566).
+
+        Fixed inlining of constant values (issue 3529).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-08-25: Version 3.29.17
+
+        Performance and stability improvements on all platforms.
+
+
+2014-08-24: Version 3.29.16
+
+        Fix issue with numeric property names (issue 3507).
+
+        Add back the duplicate property checker (issue 3498).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-08-22: Version 3.29.14
+
+        Don't inline Array.shift() if receiver map is not extensible (Chromium
+        issue 405517).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-08-21: Version 3.29.11
+
+        Refactor ParseObjectLiteral.
+
+        Support symbol-named properties in API (issue 3394).
+
+        Suppress test262 test that tests duplicate properties.
+
+        ES6: Duplicate properties are no longer an error (issue 3498).
+
+        Expose function CheckDebugBreak in the debugger api.
+
+        Remove RegExp.$input (issue 3486).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-08-21: Version 3.29.10
+
+        ES6: Make sure we do not store -0 as the key in Map/Set (issue 3515).
+
+        Remove removed flags from tests.
+
+        Expose well-known Symbols to C++ API (Chromium issue 341423).
+
+        Implement ES6 Array.of() (issue 3427).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-08-20: Version 3.29.9
+
+        Correctly handle holes when concat()ing double arrays (Chromium issue
+        403409).
+
+        [turbofan] Refactor the InstructionSelector tests (issue 3489).
+
+        ES6: Make Map/Set constructors support iterable values (issue 3508).
+
+        WeakMap/WeakSet: Add test for non object keys (issue 3399).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-08-12: Version 3.28.71
+
+        ToNumber(Symbol) should throw TypeError (issue 3499).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-08-11: Version 3.28.69
+
+        Performance and stability improvements on all platforms.
+
+
+2014-08-09: Version 3.28.65
+
+        Performance and stability improvements on all platforms.
+
+
+2014-08-08: Version 3.28.64
+
+        ES6: Implement WeakMap and WeakSet constructor logic (issue 3399).
+
+        Enable ES6 unscopables (issue 3401).
+
+        Turn on harmony_unscopables for es_staging (issue 3401).
+
+        Remove proxies from --harmony switch for M38, because problems.
+
+        Reland "Add initial support for compiler unit tests using GTest/GMock."
+        (issue 3489).
+
+        Enable ES6 iteration by default (issue 2214).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-08-07: Version 3.28.62
+
+        Only escape U+0022 in argument values of `String.prototype` HTML methods
+        (issue 2217).
+
+        Update webkit test for expected own properties.
+
+        This implements unscopables (issue 3401).
+
+        Add `CheckObjectCoercible` for the `String.prototype` HTML methods
+        (issue 2218).
+
+        Add initial support for compiler unit tests using GTest/GMock (issue
+        3489).
+
+        Trigger exception debug events on Promise reject (Chromium issue
+        393913).
+
+        Refactor unit tests for the base library to use GTest (issue 3489).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-08-06: Version 3.28.60
+
+        Enable ES6 Map and Set by default (issue 1622).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-08-06: Version 3.28.59
+
+        Removed GetConstructor from the API. Instead either get the
+        "constructor" property stored in the prototype, or keep a side-table.
+
+        Enable ES6 Symbols by default (issue 2158).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-08-05: Version 3.28.57
+
+        Add dependencies on gtest and gmock.
+
+        Performance and stability improvements on all platforms.
+
+
+2014-08-04: Version 3.28.54
+
+        Performance and stability improvements on all platforms.
+
+
+2014-08-01: Version 3.28.53
+
+        Performance and stability improvements on all platforms.
+
+
+2014-07-31: Version 3.28.52
+
+        Performance and stability improvements on all platforms.
+
+
+2014-07-31: Version 3.28.51
+
+        Drop deprecated memory related notification API (Chromium issue 397026).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-07-31: Version 3.28.50
+
+        Use emergency memory in the case of out of memory during evacuation
+        (Chromium issue 395314).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-07-30: Version 3.28.48
+
+        Fix Object.freeze with field type tracking. Keep the descriptor properly
+        intact while update the field type (issue 3458).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-07-29: Version 3.28.45
+
+        Performance and stability improvements on all platforms.
+
+
+2014-07-28: Version 3.28.43
+
+        Performance and stability improvements on all platforms.
+
+
+2014-07-25: Version 3.28.38
+
+        Fix issue with setters and their holders in accessors.cc (Chromium issue
+        3462).
+
+        Introduce more debug events for promises (issue 3093).
+
+        Move gc notifications from V8 to Isolate and make idle hint mandatory
+        (Chromium issue 397026).
+
+        The accessors should get the value from the holder and not from this
+        (issue 3461).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-07-24: Version 3.28.35
+
+        Rebaseline/update the intl tests with ICU 52 (issue 3454).
+
+        Expose the content of Sets and WeakSets through SetMirror (issue 3093).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-07-23: Version 3.28.32
+
+        Update ICU to 5.2 (matching chromium) (issue 3452).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-07-22: Version 3.28.31
+
+        Remove harmony-typeof.
+
+        Implement String.prototype.codePointAt and String.fromCodePoint (issue
+        2840).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-07-21: Version 3.28.30
+
+        Performance and stability improvements on all platforms.
+
+
+2014-07-21: Version 3.28.29
+
+        Performance and stability improvements on all platforms.
+
+
+2014-07-18: Version 3.28.28
+
+        Performance and stability improvements on all platforms.
+
+
+2014-07-17: Version 3.28.26
+
+        Ship ES6 Math functions (issue 2938).
+
+        Make ToPrimitive throw on symbol wrappers (issue 3442).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-07-16: Version 3.28.25
+
+        Performance and stability improvements on all platforms.
+
+
+2014-07-16: Version 3.28.24
+
+        Removed some copy-n-paste from StackFrame::Foo API entries (issue 3436).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-07-15: Version 3.28.23
+
+        Fix error message about read-only symbol properties (issue 3441).
+
+        Include symbol properties in Object.{create,defineProperties} (issue
+        3440).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-07-14: Version 3.28.22
+
+        Performance and stability improvements on all platforms.
+
+
+2014-07-11: Version 3.28.21
+
+        Make `let` usable as an identifier in ES6 sloppy mode (issue 2198).
+
+        Support ES6 Map and Set in heap profiler (issue 3368).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-07-10: Version 3.28.20
+
+        Remove deprecate counter/histogram methods.
+
+        Fixed printing of external references (Chromium issue 392068).
+
+        Fix several issues with ES6 redeclaration checks (issue 3426).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-07-09: Version 3.28.19
+
+        Performance and stability improvements on all platforms.
+
+
+2014-07-09: Version 3.28.18
+
+        Reland "Postpone termination exceptions in debug scope." (issue 3408).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-07-08: Version 3.28.17
+
+        MIPS: Fix computed properties on object literals with a double as
+        propertyname (Chromium issue 390732).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-07-08: Version 3.28.16
+
+        Fix computed properties on object literals with a double as propertyname
+        (Chromium issue 390732).
+
+        Avoid brittle use of .bind in Promise.all (issue 3420).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-07-07: Version 3.28.15
+
+        Remove a bunch of Isolate::UncheckedCurrent calls.
+
+        Performance and stability improvements on all platforms.
+
+
+2014-07-07: Version 3.28.14
+
+        Use the HeapObjectIterator to scan-on-scavenge map pages (Chromium issue
+        390732).
+
+        Introduce debug events for Microtask queue (Chromium issue 272416).
+
+        Split out libplatform into a separate libary.
+
+        Add clang-format to presubmit checks.
+
+        Stack traces exposed to Javascript should omit extensions (issue 311).
+
+        Remove deprecated v8::Context::HasOutOfMemoryException.
+
+        Postpone termination exceptions in debug scope (issue 3408).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-07-04: Version 3.28.13
+
+        Rollback to r22134.
+
+
+2014-07-04: Version 3.28.12
+
+        Use the HeapObjectIterator to scan-on-scavenge map pages (Chromium issue
+        390732).
+
+        Introduce debug events for Microtask queue (Chromium issue 272416).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-07-03: Version 3.28.11
+
+        Split out libplatform into a separate libary.
+
+        Performance and stability improvements on all platforms.
+
+
+2014-07-03: Version 3.28.10
+
+        Add clang-format to presubmit checks.
+
+        Stack traces exposed to Javascript should omit extensions (issue 311).
+
+        Remove deprecated v8::Context::HasOutOfMemoryException.
+
+        Postpone termination exceptions in debug scope (issue 3408).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-07-02: Version 3.28.9
+
+        Make freeze & friends ignore private properties (issue 3419).
+
+        Introduce a builddeps make target (issue 3418).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-07-01: Version 3.28.8
+
+        Remove static initializer from isolate.
+
+        ES6: Add missing Set.prototype.keys function (issue 3411).
+
+        Introduce debug events for promises (issue 3093).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-06-30: Version 3.28.7
+
+        Performance and stability improvements on all platforms.
+
+
+2014-06-30: Version 3.28.6
+
+        Unbreak "os" stuff in shared d8 builds (issue 3407).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-06-26: Version 3.28.4
+
+        Compile optimized code with active debugger but no break points
+        (Chromium issue 386492).
+
+        Optimize Map/Set.prototype.forEach.
+
+        Collect garbage with kReduceMemoryFootprintMask in IdleNotification
+        (Chromium issue 350720).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-06-26: Version 3.28.3
+
+        Grow heap slower if GC freed many global handles (Chromium issue
+        263503).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-06-25: Version 3.28.2
+
+        Remove bogus assertions in HCompareObjectEqAndBranch (Chromium issue
+        387636).
+
+        Do not eagerly update allow_osr_at_loop_nesting_level (Chromium issue
+        387599).
+
+        Set host_arch to ia32 on machines with a 32bit userland but a 64bit
+        kernel (Chromium issue 368384).
+
+        Map/Set: Implement constructor parameter handling (issue 3398).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-06-24: Version 3.28.1
+
+        Support LiveEdit on Arm64 (Chromium issue 368580).
+
+        Run JS micro tasks in the appropriate context (Chromium issue 385349).
+
+        Add a use counter API.
+
+        Set host_arch to ia32 on machines with a 32bit userland but a 64bit
+        kernel.
+
+        Performance and stability improvements on all platforms.
+
+
+2014-06-23: Version 3.28.0
+
+        MIPS: Support LiveEdit (Chromium issue 368580).
+
+        Array.concat: properly go to dictionary mode when required (Chromium
+        issue 387031).
+
+        Support LiveEdit on ARM (Chromium issue 368580).
+
+        Performance and stability improvements on all platforms.
+
+
 2014-06-18: Version 3.27.34
 
         Reduce number of writes to DependentCode array when inserting dependent
diff --git a/DEPS b/DEPS
index df419a7..d4139c6 100644
--- a/DEPS
+++ b/DEPS
@@ -3,9 +3,11 @@
 # all paths in here must match this assumption.
 
 vars = {
+  "chromium_git": "https://chromium.googlesource.com",
+
   "chromium_trunk": "https://src.chromium.org/svn/trunk",
 
-  "buildtools_revision": "5d89977ce55240995d1596fe420b818468f5ec37",
+  "buildtools_revision": "fb782d4369d5ae04f17a2fceef7de5a63e50f07b",
 }
 
 deps = {
@@ -14,14 +16,25 @@
     "http://gyp.googlecode.com/svn/trunk@1831",
 
   "v8/third_party/icu":
-    Var("chromium_trunk") + "/deps/third_party/icu46@258359",
+    Var("chromium_trunk") + "/deps/third_party/icu52@277999",
 
   "v8/buildtools":
     "https://chromium.googlesource.com/chromium/buildtools.git@" +
     Var("buildtools_revision"),
+
+  "v8/testing/gtest":
+    "http://googletest.googlecode.com/svn/trunk@692",
+
+  "v8/testing/gmock":
+    "http://googlemock.googlecode.com/svn/trunk@485",
 }
 
 deps_os = {
+  "android": {
+    "v8/third_party/android_tools":
+      Var("chromium_git") + "/android_tools.git" + "@" +
+          "31869996507de16812bb53a3d0aaa15cd6194c16",
+  },
   "win": {
     "v8/third_party/cygwin":
       Var("chromium_trunk") + "/deps/third_party/cygwin@66844",
@@ -35,6 +48,7 @@
   # Everybody can use some things.
   "+include",
   "+unicode",
+  "+third_party/fdlibm",
 ]
 
 # checkdeps.py shouldn't check for includes in these directories:
@@ -44,6 +58,40 @@
 ]
 
 hooks = [
+  # Pull clang-format binaries using checked-in hashes.
+  {
+    "name": "clang_format_win",
+    "pattern": ".",
+    "action": [ "download_from_google_storage",
+                "--no_resume",
+                "--platform=win32",
+                "--no_auth",
+                "--bucket", "chromium-clang-format",
+                "-s", "v8/buildtools/win/clang-format.exe.sha1",
+    ],
+  },
+  {
+    "name": "clang_format_mac",
+    "pattern": ".",
+    "action": [ "download_from_google_storage",
+                "--no_resume",
+                "--platform=darwin",
+                "--no_auth",
+                "--bucket", "chromium-clang-format",
+                "-s", "v8/buildtools/mac/clang-format.sha1",
+    ],
+  },
+  {
+    "name": "clang_format_linux",
+    "pattern": ".",
+    "action": [ "download_from_google_storage",
+                "--no_resume",
+                "--platform=linux*",
+                "--no_auth",
+                "--bucket", "chromium-clang-format",
+                "-s", "v8/buildtools/linux64/clang-format.sha1",
+    ],
+  },
   {
     # A change to a .gyp, .gypi, or to GYP itself should run the generator.
     "pattern": ".",
diff --git a/Makefile b/Makefile
index f49be61..2fbe1ba 100644
--- a/Makefile
+++ b/Makefile
@@ -70,6 +70,10 @@
 else
   GYPFLAGS += -Dv8_enable_backtrace=1
 endif
+# verifypredictable=on
+ifeq ($(verifypredictable), on)
+  GYPFLAGS += -Dv8_enable_verify_predictable=1
+endif
 # snapshot=off
 ifeq ($(snapshot), off)
   GYPFLAGS += -Dv8_use_snapshot='false'
@@ -187,19 +191,19 @@
   GYPFLAGS += -Darm_thumb=1
 endif
 endif
-# armtest=on
+# arm_test_noprobe=on
 # With this flag set, by default v8 will only use features implied
 # by the compiler (no probe). This is done by modifying the default
 # values of enable_armv7, enable_vfp3, enable_32dregs and enable_neon.
 # Modifying these flags when launching v8 will enable the probing for
 # the specified values.
-# When using the simulator, this flag is implied.
-ifeq ($(armtest), on)
-  GYPFLAGS += -Darm_test=on
+ifeq ($(arm_test_noprobe), on)
+  GYPFLAGS += -Darm_test_noprobe=on
 endif
 
 # ----------------- available targets: --------------------
-# - "dependencies": pulls in external dependencies (currently: GYP)
+# - "builddeps": pulls in external dependencies for building
+# - "dependencies": pulls in all external dependencies
 # - "grokdump": rebuilds heap constants lists used by grokdump
 # - any arch listed in ARCHES (see below)
 # - any mode listed in MODES
@@ -217,7 +221,7 @@
 
 # Architectures and modes to be compiled. Consider these to be internal
 # variables, don't override them (use the targets instead).
-ARCHES = ia32 x64 arm arm64 mips mipsel x87
+ARCHES = ia32 x64 x32 arm arm64 mips mipsel mips64el x87
 DEFAULT_ARCHES = ia32 x64 arm
 MODES = release debug optdebug
 DEFAULT_MODES = release debug
@@ -226,8 +230,8 @@
 
 # List of files that trigger Makefile regeneration:
 GYPFILES = build/all.gyp build/features.gypi build/standalone.gypi \
-           build/toolchain.gypi samples/samples.gyp src/d8.gyp \
-           test/cctest/cctest.gyp tools/gyp/v8.gyp
+           build/toolchain.gypi samples/samples.gyp src/compiler/compiler.gyp \
+           src/d8.gyp test/cctest/cctest.gyp tools/gyp/v8.gyp
 
 # If vtunejit=on, the v8vtune.gyp will be appended.
 ifeq ($(vtunejit), on)
@@ -247,7 +251,7 @@
 # File where previously used GYPFLAGS are stored.
 ENVFILE = $(OUTDIR)/environment
 
-.PHONY: all check clean dependencies $(ENVFILE).new native \
+.PHONY: all check clean builddeps dependencies $(ENVFILE).new native \
         qc quickcheck $(QUICKCHECKS) \
         $(addsuffix .quickcheck,$(MODES)) $(addsuffix .quickcheck,$(ARCHES)) \
         $(ARCHES) $(MODES) $(BUILDS) $(CHECKS) $(addsuffix .clean,$(ARCHES)) \
@@ -395,18 +399,22 @@
 # GYP file generation targets.
 OUT_MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(BUILDS))
 $(OUT_MAKEFILES): $(GYPFILES) $(ENVFILE)
-	PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(PYTHONPATH)" \
-	PYTHONPATH="$(shell pwd)/build/gyp/pylib:$(PYTHONPATH)" \
+	$(eval CXX_TARGET_ARCH:=$(shell $(CXX) -v 2>&1 | grep ^Target: | \
+	        cut -f 2 -d " " | cut -f 1 -d "-" ))
+	$(eval CXX_TARGET_ARCH:=$(subst aarch64,arm64,$(CXX_TARGET_ARCH)))
+	$(eval V8_TARGET_ARCH:=$(subst .,,$(suffix $(basename $@))))
+	PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(shell pwd)/build:$(PYTHONPATH):$(shell pwd)/build/gyp/pylib:$(PYTHONPATH)" \
 	GYP_GENERATORS=make \
 	build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
 	              -Ibuild/standalone.gypi --depth=. \
-	              -Dv8_target_arch=$(subst .,,$(suffix $(basename $@))) \
+	              -Dv8_target_arch=$(V8_TARGET_ARCH) \
+	              $(if $(findstring $(CXX_TARGET_ARCH),$(V8_TARGET_ARCH)), \
+	              -Dtarget_arch=$(V8_TARGET_ARCH),) \
 	              $(if $(findstring optdebug,$@),-Dv8_optimized_debug=2,) \
 	              -S$(suffix $(basename $@))$(suffix $@) $(GYPFLAGS)
 
 $(OUTDIR)/Makefile.native: $(GYPFILES) $(ENVFILE)
-	PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(PYTHONPATH)" \
-	PYTHONPATH="$(shell pwd)/build/gyp/pylib:$(PYTHONPATH)" \
+	PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(shell pwd)/build:$(PYTHONPATH):$(shell pwd)/build/gyp/pylib:$(PYTHONPATH)" \
 	GYP_GENERATORS=make \
 	build/gyp/gyp --generator-output="$(OUTDIR)" build/all.gyp \
 	              -Ibuild/standalone.gypi --depth=. -S.native $(GYPFLAGS)
@@ -460,16 +468,26 @@
 gtags.clean:
 	rm -f gtags.files GPATH GRTAGS GSYMS GTAGS
 
-# Dependencies.
+# Dependencies. "builddeps" are dependencies required solely for building,
+# "dependencies" includes also dependencies required for development.
 # Remember to keep these in sync with the DEPS file.
-dependencies:
+builddeps:
 	svn checkout --force http://gyp.googlecode.com/svn/trunk build/gyp \
 	    --revision 1831
-	svn checkout --force \
-	    https://src.chromium.org/chrome/trunk/deps/third_party/icu46 \
-	    third_party/icu --revision 258359
-	( test -d buildtools || \
-	  git clone https://chromium.googlesource.com/chromium/buildtools.git; \
-	  cd buildtools; \
-	  git fetch origin; \
-	  git checkout 5d89977ce55240995d1596fe420b818468f5ec37 )
+	if svn info third_party/icu 2>&1 | grep -q icu46 ; then \
+	  svn switch --force \
+	      https://src.chromium.org/chrome/trunk/deps/third_party/icu52 \
+	      third_party/icu --revision 277999 ; \
+	else \
+	  svn checkout --force \
+	      https://src.chromium.org/chrome/trunk/deps/third_party/icu52 \
+	      third_party/icu --revision 277999 ; \
+	fi
+	svn checkout --force http://googletest.googlecode.com/svn/trunk \
+	    testing/gtest --revision 692
+	svn checkout --force http://googlemock.googlecode.com/svn/trunk \
+	    testing/gmock --revision 485
+
+dependencies: builddeps
+	# The spec is a copy of the hooks in v8's DEPS file.
+	gclient sync -r fb782d4369d5ae04f17a2fceef7de5a63e50f07b --spec="solutions = [{u'managed': False, u'name': u'buildtools', u'url': u'https://chromium.googlesource.com/chromium/buildtools.git', u'custom_deps': {}, u'custom_hooks': [{u'name': u'clang_format_win',u'pattern': u'.',u'action': [u'download_from_google_storage',u'--no_resume',u'--platform=win32',u'--no_auth',u'--bucket',u'chromium-clang-format',u'-s',u'buildtools/win/clang-format.exe.sha1']},{u'name': u'clang_format_mac',u'pattern': u'.',u'action': [u'download_from_google_storage',u'--no_resume',u'--platform=darwin',u'--no_auth',u'--bucket',u'chromium-clang-format',u'-s',u'buildtools/mac/clang-format.sha1']},{u'name': u'clang_format_linux',u'pattern': u'.',u'action': [u'download_from_google_storage',u'--no_resume',u'--platform=linux*',u'--no_auth',u'--bucket',u'chromium-clang-format',u'-s',u'buildtools/linux64/clang-format.sha1']}],u'deps_file': u'.DEPS.git', u'safesync_url': u''}]"
diff --git a/Makefile.android b/Makefile.android
index d3ed1f4..8e200f1 100644
--- a/Makefile.android
+++ b/Makefile.android
@@ -51,33 +51,33 @@
   DEFINES += arm_neon=0 arm_version=7
   TOOLCHAIN_ARCH = arm-linux-androideabi
   TOOLCHAIN_PREFIX = $(TOOLCHAIN_ARCH)
-  TOOLCHAIN_VER = 4.6
+  TOOLCHAIN_VER = 4.8
 else
   ifeq ($(ARCH), android_arm64)
-    DEFINES  = target_arch=arm64 v8_target_arch=arm64 android_target_arch=arm64 android_target_platform=20
+    DEFINES  = target_arch=arm64 v8_target_arch=arm64 android_target_arch=arm64 android_target_platform=L
     TOOLCHAIN_ARCH = aarch64-linux-android
     TOOLCHAIN_PREFIX = $(TOOLCHAIN_ARCH)
-    TOOLCHAIN_VER = 4.8
+    TOOLCHAIN_VER = 4.9
   else
     ifeq ($(ARCH), android_mipsel)
       DEFINES  = target_arch=mipsel v8_target_arch=mipsel android_target_platform=14
       DEFINES += android_target_arch=mips mips_arch_variant=mips32r2
       TOOLCHAIN_ARCH = mipsel-linux-android
       TOOLCHAIN_PREFIX = $(TOOLCHAIN_ARCH)
-      TOOLCHAIN_VER = 4.6
+      TOOLCHAIN_VER = 4.8
 
     else
       ifeq ($(ARCH), android_ia32)
         DEFINES = target_arch=ia32 v8_target_arch=ia32 android_target_arch=x86 android_target_platform=14
         TOOLCHAIN_ARCH = x86
         TOOLCHAIN_PREFIX = i686-linux-android
-        TOOLCHAIN_VER = 4.6
+        TOOLCHAIN_VER = 4.8
       else
         ifeq ($(ARCH), android_x87)
           DEFINES = target_arch=x87 v8_target_arch=x87 android_target_arch=x86 android_target_platform=14
           TOOLCHAIN_ARCH = x86
           TOOLCHAIN_PREFIX = i686-linux-android
-          TOOLCHAIN_VER = 4.6
+          TOOLCHAIN_VER = 4.8
 	else
           $(error Target architecture "${ARCH}" is not supported)
         endif
@@ -98,6 +98,7 @@
 
 # For mksnapshot host generation.
 DEFINES += host_os=${HOST_OS}
+DEFINES += OS=android
 
 .SECONDEXPANSION:
 $(ANDROID_BUILDS): $(OUTDIR)/Makefile.$$@
@@ -119,7 +120,7 @@
 	GYP_DEFINES="${DEFINES}" \
 	CC="${ANDROID_TOOLCHAIN}/bin/${TOOLCHAIN_PREFIX}-gcc" \
 	CXX="${ANDROID_TOOLCHAIN}/bin/${TOOLCHAIN_PREFIX}-g++" \
-	PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(PYTHONPATH)" \
+	PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(shell pwd)/build:$(PYTHONPATH)" \
 	build/gyp/gyp --generator-output="${OUTDIR}" build/all.gyp \
 	              -Ibuild/standalone.gypi --depth=. -Ibuild/android.gypi \
 	              -S$(suffix $(basename $@))$(suffix $@) ${GYPFLAGS}
diff --git a/Makefile.nacl b/Makefile.nacl
index 1d34a3b..34bd960 100644
--- a/Makefile.nacl
+++ b/Makefile.nacl
@@ -97,7 +97,7 @@
 	GYP_DEFINES="${GYPENV}" \
 	CC=${NACL_CC} \
 	CXX=${NACL_CXX} \
-	PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(PYTHONPATH)" \
+	PYTHONPATH="$(shell pwd)/tools/generate_shim_headers:$(shell pwd)/build:$(PYTHONPATH)" \
 	build/gyp/gyp --generator-output="${OUTDIR}" build/all.gyp \
 	              -Ibuild/standalone.gypi --depth=. \
 	              -S$(suffix $(basename $@))$(suffix $@) $(GYPFLAGS) \
diff --git a/OWNERS b/OWNERS
index 2fbb3ef..f67b3ec 100644
--- a/OWNERS
+++ b/OWNERS
@@ -18,4 +18,5 @@
 ulan@chromium.org
 vegorov@chromium.org
 verwaest@chromium.org
+vogelheim@chromium.org
 yangguo@chromium.org
diff --git a/PRESUBMIT.py b/PRESUBMIT.py
index 70f576a..3a9895d 100644
--- a/PRESUBMIT.py
+++ b/PRESUBMIT.py
@@ -34,6 +34,32 @@
 import sys
 
 
+_EXCLUDED_PATHS = (
+    r"^test[\\\/].*",
+    r"^testing[\\\/].*",
+    r"^third_party[\\\/].*",
+    r"^tools[\\\/].*",
+)
+
+
+# Regular expression that matches code only used for test binaries
+# (best effort).
+_TEST_CODE_EXCLUDED_PATHS = (
+    r'.+-unittest\.cc',
+    # Has a method VisitForTest().
+    r'src[\\\/]compiler[\\\/]ast-graph-builder\.cc',
+    # Test extension.
+    r'src[\\\/]extensions[\\\/]gc-extension\.cc',
+)
+
+
+_TEST_ONLY_WARNING = (
+    'You might be calling functions intended only for testing from\n'
+    'production code.  It is OK to ignore this warning if you know what\n'
+    'you are doing, as the heuristics used to detect the situation are\n'
+    'not perfect.  The commit queue will not block on this warning.')
+
+
 def _V8PresubmitChecks(input_api, output_api):
   """Runs the V8 presubmit checks."""
   import sys
@@ -41,6 +67,8 @@
         input_api.PresubmitLocalPath(), 'tools'))
   from presubmit import CppLintProcessor
   from presubmit import SourceProcessor
+  from presubmit import CheckRuntimeVsNativesNameClashes
+  from presubmit import CheckExternalReferenceRegistration
 
   results = []
   if not CppLintProcessor().Run(input_api.PresubmitLocalPath()):
@@ -49,6 +77,12 @@
     results.append(output_api.PresubmitError(
         "Copyright header, trailing whitespaces and two empty lines " \
         "between declarations check failed"))
+  if not CheckRuntimeVsNativesNameClashes(input_api.PresubmitLocalPath()):
+    results.append(output_api.PresubmitError(
+        "Runtime/natives name clash check failed"))
+  if not CheckExternalReferenceRegistration(input_api.PresubmitLocalPath()):
+    results.append(output_api.PresubmitError(
+        "External references registration check failed"))
   return results
 
 
@@ -105,13 +139,60 @@
   return results
 
 
+def _CheckNoProductionCodeUsingTestOnlyFunctions(input_api, output_api):
+  """Attempts to prevent use of functions intended only for testing in
+  non-testing code. For now this is just a best-effort implementation
+  that ignores header files and may have some false positives. A
+  better implementation would probably need a proper C++ parser.
+  """
+  # We only scan .cc files, as the declaration of for-testing functions in
+  # header files are hard to distinguish from calls to such functions without a
+  # proper C++ parser.
+  file_inclusion_pattern = r'.+\.cc'
+
+  base_function_pattern = r'[ :]test::[^\s]+|ForTest(ing)?|for_test(ing)?'
+  inclusion_pattern = input_api.re.compile(r'(%s)\s*\(' % base_function_pattern)
+  comment_pattern = input_api.re.compile(r'//.*(%s)' % base_function_pattern)
+  exclusion_pattern = input_api.re.compile(
+    r'::[A-Za-z0-9_]+(%s)|(%s)[^;]+\{' % (
+      base_function_pattern, base_function_pattern))
+
+  def FilterFile(affected_file):
+    black_list = (_EXCLUDED_PATHS +
+                  _TEST_CODE_EXCLUDED_PATHS +
+                  input_api.DEFAULT_BLACK_LIST)
+    return input_api.FilterSourceFile(
+      affected_file,
+      white_list=(file_inclusion_pattern, ),
+      black_list=black_list)
+
+  problems = []
+  for f in input_api.AffectedSourceFiles(FilterFile):
+    local_path = f.LocalPath()
+    for line_number, line in f.ChangedContents():
+      if (inclusion_pattern.search(line) and
+          not comment_pattern.search(line) and
+          not exclusion_pattern.search(line)):
+        problems.append(
+          '%s:%d\n    %s' % (local_path, line_number, line.strip()))
+
+  if problems:
+    return [output_api.PresubmitPromptOrNotify(_TEST_ONLY_WARNING, problems)]
+  else:
+    return []
+
+
 def _CommonChecks(input_api, output_api):
   """Checks common to both upload and commit."""
   results = []
   results.extend(input_api.canned_checks.CheckOwners(
       input_api, output_api, source_file_filter=None))
+  results.extend(input_api.canned_checks.CheckPatchFormatted(
+      input_api, output_api))
   results.extend(_V8PresubmitChecks(input_api, output_api))
   results.extend(_CheckUnwantedDependencies(input_api, output_api))
+  results.extend(
+      _CheckNoProductionCodeUsingTestOnlyFunctions(input_api, output_api))
   return results
 
 
@@ -170,5 +251,6 @@
       'v8_linux_layout_dbg': set(['defaulttests']),
       'v8_mac_rel': set(['defaulttests']),
       'v8_win_rel': set(['defaulttests']),
+      'v8_win64_compile_rel': set(['defaulttests']),
     },
   }
diff --git a/benchmarks/v8.json b/benchmarks/v8.json
index f4210d9..03ea962 100644
--- a/benchmarks/v8.json
+++ b/benchmarks/v8.json
@@ -3,7 +3,7 @@
   "main": "run.js",
   "run_count": 2,
   "results_regexp": "^%s: (.+)$",
-  "benchmarks": [
+  "tests": [
     {"name": "Richards"},
     {"name": "DeltaBlue"},
     {"name": "Crypto"},
diff --git a/build/all.gyp b/build/all.gyp
index 3860379..1e420fa 100644
--- a/build/all.gyp
+++ b/build/all.gyp
@@ -9,7 +9,11 @@
       'type': 'none',
       'dependencies': [
         '../samples/samples.gyp:*',
+        '../src/base/base.gyp:base-unittests',
+        '../src/compiler/compiler.gyp:compiler-unittests',
         '../src/d8.gyp:d8',
+        '../src/heap/heap.gyp:heap-unittests',
+        '../src/libplatform/libplatform.gyp:libplatform-unittests',
         '../test/cctest/cctest.gyp:*',
       ],
       'conditions': [
diff --git a/build/android.gypi b/build/android.gypi
index 5c6fb32..f984ea3 100644
--- a/build/android.gypi
+++ b/build/android.gypi
@@ -35,9 +35,6 @@
     'variables': {
       'android_ndk_root%': '<!(/bin/echo -n $ANDROID_NDK_ROOT)',
       'android_toolchain%': '<!(/bin/echo -n $ANDROID_TOOLCHAIN)',
-      # This is set when building the Android WebView inside the Android build
-      # system, using the 'android' gyp backend.
-      'android_webview_build%': 0,
     },
     'conditions': [
       ['android_ndk_root==""', {
@@ -64,9 +61,6 @@
     # link the NDK one?
     'use_system_stlport%': '<(android_webview_build)',
     'android_stlport_library': 'stlport_static',
-    # Copy it out one scope.
-    'android_webview_build%': '<(android_webview_build)',
-    'OS': 'android',
   },  # variables
   'target_defaults': {
     'defines': [
@@ -81,14 +75,18 @@
       },  # Release
     },  # configurations
     'cflags': [ '-Wno-abi', '-Wall', '-W', '-Wno-unused-parameter',
-                '-Wnon-virtual-dtor', '-fno-rtti', '-fno-exceptions', ],
+                '-Wnon-virtual-dtor', '-fno-rtti', '-fno-exceptions',
+                # Note: Using -std=c++0x will define __STRICT_ANSI__, which in
+                # turn will leave out some template stuff for 'long long'. What
+                # we want is -std=c++11, but this is not supported by GCC 4.6 or
+                # Xcode 4.2
+                '-std=gnu++0x' ],
     'target_conditions': [
       ['_toolset=="target"', {
         'cflags!': [
           '-pthread',  # Not supported by Android toolchain.
         ],
         'cflags': [
-          '-U__linux__',  # Don't allow toolchain to claim -D__linux__
           '-ffunction-sections',
           '-funwind-tables',
           '-fstack-protector',
@@ -266,15 +264,8 @@
       }],  # _toolset=="target"
       # Settings for building host targets using the system toolchain.
       ['_toolset=="host"', {
-        'conditions': [
-          ['target_arch=="x64"', {
-            'cflags': [ '-m64', '-pthread' ],
-            'ldflags': [ '-m64', '-pthread' ],
-          }, {
-            'cflags': [ '-m32', '-pthread' ],
-            'ldflags': [ '-m32', '-pthread' ],
-          }],
-        ],
+        'cflags': [ '-pthread' ],
+        'ldflags': [ '-pthread' ],
         'ldflags!': [
           '-Wl,-z,noexecstack',
           '-Wl,--gc-sections',
diff --git a/build/detect_v8_host_arch.py b/build/detect_v8_host_arch.py
new file mode 100644
index 0000000..3460a9a
--- /dev/null
+++ b/build/detect_v8_host_arch.py
@@ -0,0 +1,69 @@
+#!/usr/bin/env python
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+"""Outputs host CPU architecture in format recognized by gyp."""
+
+import platform
+import re
+import sys
+
+
+def main():
+  print DoMain([])
+  return 0
+
+def DoMain(_):
+  """Hook to be called from gyp without starting a separate python
+  interpreter."""
+  host_arch = platform.machine()
+
+  # Convert machine type to format recognized by gyp.
+  if re.match(r'i.86', host_arch) or host_arch == 'i86pc':
+    host_arch = 'ia32'
+  elif host_arch in ['x86_64', 'amd64']:
+    host_arch = 'x64'
+  elif host_arch.startswith('arm'):
+    host_arch = 'arm'
+  elif host_arch == 'aarch64':
+    host_arch = 'arm64'
+  elif host_arch == 'mips64':
+    host_arch = 'mips64el'
+  elif host_arch.startswith('mips'):
+    host_arch = 'mipsel'
+
+  # platform.machine is based on running kernel. It's possible to use 64-bit
+  # kernel with 32-bit userland, e.g. to give linker slightly more memory.
+  # Distinguish between different userland bitness by querying
+  # the python binary.
+  if host_arch == 'x64' and platform.architecture()[0] == '32bit':
+    host_arch = 'ia32'
+
+  return host_arch
+
+if __name__ == '__main__':
+  sys.exit(main())
diff --git a/build/features.gypi b/build/features.gypi
index d542d05..8201ea9 100644
--- a/build/features.gypi
+++ b/build/features.gypi
@@ -41,6 +41,8 @@
 
     'v8_use_snapshot%': 'true',
 
+    'v8_enable_verify_predictable%': 0,
+
     # With post mortem support enabled, metadata is embedded into libv8 that
     # describes various parameters of the VM for use by debuggers. See
     # tools/gen-postmortem-metadata.py for details.
@@ -57,8 +59,9 @@
     # Enable compiler warnings when using V8_DEPRECATED apis.
     'v8_deprecation_warnings%': 0,
 
-    # Use the v8 provided v8::Platform implementation.
-    'v8_use_default_platform%': 1,
+    # Use external files for startup data blobs:
+    # the JS builtins sources and the start snapshot.
+    'v8_use_external_startup_data%': 0,
   },
   'target_defaults': {
     'conditions': [
@@ -74,6 +77,9 @@
       ['v8_enable_verify_heap==1', {
         'defines': ['VERIFY_HEAP',],
       }],
+      ['v8_enable_verify_predictable==1', {
+        'defines': ['VERIFY_PREDICTABLE',],
+      }],
       ['v8_interpreted_regexp==1', {
         'defines': ['V8_INTERPRETED_REGEXP',],
       }],
@@ -83,13 +89,11 @@
       ['v8_enable_i18n_support==1', {
         'defines': ['V8_I18N_SUPPORT',],
       }],
-      ['v8_use_default_platform==1', {
-        'defines': ['V8_USE_DEFAULT_PLATFORM',],
-      }],
       ['v8_compress_startup_data=="bz2"', {
-        'defines': [
-          'COMPRESS_STARTUP_DATA_BZ2',
-        ],
+        'defines': ['COMPRESS_STARTUP_DATA_BZ2',],
+      }],
+      ['v8_use_external_startup_data==1', {
+        'defines': ['V8_USE_EXTERNAL_STARTUP_DATA',],
       }],
     ],  # conditions
     'configurations': {
diff --git a/build/get_landmines.py b/build/get_landmines.py
new file mode 100755
index 0000000..66a86cb
--- /dev/null
+++ b/build/get_landmines.py
@@ -0,0 +1,27 @@
+#!/usr/bin/env python
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+This file emits the list of reasons why a particular build needs to be clobbered
+(or a list of 'landmines').
+"""
+
+import sys
+
+
+def main():
+  """
+  ALL LANDMINES ARE EMITTED FROM HERE.
+  """
+  print 'Need to clobber after ICU52 roll.'
+  print 'Landmines test.'
+  print 'Activating MSVS 2013.'
+  print 'Revert activation of MSVS 2013.'
+  print 'Activating MSVS 2013 again.'
+  return 0
+
+
+if __name__ == '__main__':
+  sys.exit(main())
diff --git a/build/gyp_v8 b/build/gyp_v8
index bc733df..14467ec 100755
--- a/build/gyp_v8
+++ b/build/gyp_v8
@@ -34,6 +34,7 @@
 import os
 import platform
 import shlex
+import subprocess
 import sys
 
 script_dir = os.path.dirname(os.path.realpath(__file__))
@@ -107,6 +108,14 @@
 
 def run_gyp(args):
   rc = gyp.main(args)
+
+  # Check for landmines (reasons to clobber the build). This must be run here,
+  # rather than a separate runhooks step so that any environment modifications
+  # from above are picked up.
+  print 'Running build/landmines.py...'
+  subprocess.check_call(
+      [sys.executable, os.path.join(script_dir, 'landmines.py')])
+
   if rc != 0:
     print 'Error running GYP'
     sys.exit(rc)
diff --git a/build/landmine_utils.py b/build/landmine_utils.py
new file mode 100644
index 0000000..e8b7c98
--- /dev/null
+++ b/build/landmine_utils.py
@@ -0,0 +1,114 @@
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+
+import functools
+import logging
+import os
+import shlex
+import sys
+
+
+def memoize(default=None):
+  """This decorator caches the return value of a parameterless pure function"""
+  def memoizer(func):
+    val = []
+    @functools.wraps(func)
+    def inner():
+      if not val:
+        ret = func()
+        val.append(ret if ret is not None else default)
+        if logging.getLogger().isEnabledFor(logging.INFO):
+          print '%s -> %r' % (func.__name__, val[0])
+      return val[0]
+    return inner
+  return memoizer
+
+
+@memoize()
+def IsWindows():
+  return sys.platform in ['win32', 'cygwin']
+
+
+@memoize()
+def IsLinux():
+  return sys.platform.startswith(('linux', 'freebsd'))
+
+
+@memoize()
+def IsMac():
+  return sys.platform == 'darwin'
+
+
+@memoize()
+def gyp_defines():
+  """Parses and returns GYP_DEFINES env var as a dictionary."""
+  return dict(arg.split('=', 1)
+      for arg in shlex.split(os.environ.get('GYP_DEFINES', '')))
+
+@memoize()
+def gyp_msvs_version():
+  return os.environ.get('GYP_MSVS_VERSION', '')
+
+@memoize()
+def distributor():
+  """
+  Returns a string which is the distributed build engine in use (if any).
+  Possible values: 'goma', 'ib', ''
+  """
+  if 'goma' in gyp_defines():
+    return 'goma'
+  elif IsWindows():
+    if 'CHROME_HEADLESS' in os.environ:
+      return 'ib' # use (win and !goma and headless) as approximation of ib
+
+
+@memoize()
+def platform():
+  """
+  Returns a string representing the platform this build is targetted for.
+  Possible values: 'win', 'mac', 'linux', 'ios', 'android'
+  """
+  if 'OS' in gyp_defines():
+    if 'android' in gyp_defines()['OS']:
+      return 'android'
+    else:
+      return gyp_defines()['OS']
+  elif IsWindows():
+    return 'win'
+  elif IsLinux():
+    return 'linux'
+  else:
+    return 'mac'
+
+
+@memoize()
+def builder():
+  """
+  Returns a string representing the build engine (not compiler) to use.
+  Possible values: 'make', 'ninja', 'xcode', 'msvs', 'scons'
+  """
+  if 'GYP_GENERATORS' in os.environ:
+    # for simplicity, only support the first explicit generator
+    generator = os.environ['GYP_GENERATORS'].split(',')[0]
+    if generator.endswith('-android'):
+      return generator.split('-')[0]
+    elif generator.endswith('-ninja'):
+      return 'ninja'
+    else:
+      return generator
+  else:
+    if platform() == 'android':
+      # Good enough for now? Do any android bots use make?
+      return 'make'
+    elif platform() == 'ios':
+      return 'xcode'
+    elif IsWindows():
+      return 'msvs'
+    elif IsLinux():
+      return 'make'
+    elif IsMac():
+      return 'xcode'
+    else:
+      assert False, 'Don\'t know what builder we\'re using!'
diff --git a/build/landmines.py b/build/landmines.py
new file mode 100755
index 0000000..bd1fb28
--- /dev/null
+++ b/build/landmines.py
@@ -0,0 +1,139 @@
+#!/usr/bin/env python
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+This script runs every build as a hook. If it detects that the build should
+be clobbered, it will touch the file <build_dir>/.landmine_triggered. The
+various build scripts will then check for the presence of this file and clobber
+accordingly. The script will also emit the reasons for the clobber to stdout.
+
+A landmine is tripped when a builder checks out a different revision, and the
+diff between the new landmines and the old ones is non-null. At this point, the
+build is clobbered.
+"""
+
+import difflib
+import logging
+import optparse
+import os
+import sys
+import subprocess
+import time
+
+import landmine_utils
+
+
+SRC_DIR = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
+
+
+def get_target_build_dir(build_tool, target):
+  """
+  Returns output directory absolute path dependent on build and targets.
+  Examples:
+    r'c:\b\build\slave\win\build\src\out\Release'
+    '/mnt/data/b/build/slave/linux/build/src/out/Debug'
+    '/b/build/slave/ios_rel_device/build/src/xcodebuild/Release-iphoneos'
+
+  Keep this function in sync with tools/build/scripts/slave/compile.py
+  """
+  ret = None
+  if build_tool == 'xcode':
+    ret = os.path.join(SRC_DIR, 'xcodebuild', target)
+  elif build_tool in ['make', 'ninja', 'ninja-ios']:  # TODO: Remove ninja-ios.
+    ret = os.path.join(SRC_DIR, 'out', target)
+  elif build_tool in ['msvs', 'vs', 'ib']:
+    ret = os.path.join(SRC_DIR, 'build', target)
+  else:
+    raise NotImplementedError('Unexpected GYP_GENERATORS (%s)' % build_tool)
+  return os.path.abspath(ret)
+
+
+def set_up_landmines(target, new_landmines):
+  """Does the work of setting, planting, and triggering landmines."""
+  out_dir = get_target_build_dir(landmine_utils.builder(), target)
+
+  landmines_path = os.path.join(out_dir, '.landmines')
+  if not os.path.exists(out_dir):
+    return
+
+  if not os.path.exists(landmines_path):
+    print "Landmines tracker didn't exists."
+
+  # FIXME(machenbach): Clobber deletes the .landmines tracker. Difficult
+  # to know if we are right after a clobber or if it is first-time landmines
+  # deployment. Also, a landmine-triggered clobber right after a clobber is
+  # not possible. Different clobber methods for msvs, xcode and make all
+  # have different blacklists of files that are not deleted.
+  if os.path.exists(landmines_path):
+    triggered = os.path.join(out_dir, '.landmines_triggered')
+    with open(landmines_path, 'r') as f:
+      old_landmines = f.readlines()
+    if old_landmines != new_landmines:
+      old_date = time.ctime(os.stat(landmines_path).st_ctime)
+      diff = difflib.unified_diff(old_landmines, new_landmines,
+          fromfile='old_landmines', tofile='new_landmines',
+          fromfiledate=old_date, tofiledate=time.ctime(), n=0)
+
+      with open(triggered, 'w') as f:
+        f.writelines(diff)
+      print "Setting landmine: %s" % triggered
+    elif os.path.exists(triggered):
+      # Remove false triggered landmines.
+      os.remove(triggered)
+      print "Removing landmine: %s" % triggered
+  with open(landmines_path, 'w') as f:
+    f.writelines(new_landmines)
+
+
+def process_options():
+  """Returns a list of landmine emitting scripts."""
+  parser = optparse.OptionParser()
+  parser.add_option(
+      '-s', '--landmine-scripts', action='append',
+      default=[os.path.join(SRC_DIR, 'build', 'get_landmines.py')],
+      help='Path to the script which emits landmines to stdout. The target '
+           'is passed to this script via option -t. Note that an extra '
+           'script can be specified via an env var EXTRA_LANDMINES_SCRIPT.')
+  parser.add_option('-v', '--verbose', action='store_true',
+      default=('LANDMINES_VERBOSE' in os.environ),
+      help=('Emit some extra debugging information (default off). This option '
+          'is also enabled by the presence of a LANDMINES_VERBOSE environment '
+          'variable.'))
+
+  options, args = parser.parse_args()
+
+  if args:
+    parser.error('Unknown arguments %s' % args)
+
+  logging.basicConfig(
+      level=logging.DEBUG if options.verbose else logging.ERROR)
+
+  extra_script = os.environ.get('EXTRA_LANDMINES_SCRIPT')
+  if extra_script:
+    return options.landmine_scripts + [extra_script]
+  else:
+    return options.landmine_scripts
+
+
+def main():
+  landmine_scripts = process_options()
+
+  if landmine_utils.builder() in ('dump_dependency_json', 'eclipse'):
+    return 0
+
+  landmines = []
+  for s in landmine_scripts:
+    proc = subprocess.Popen([sys.executable, s], stdout=subprocess.PIPE)
+    output, _ = proc.communicate()
+    landmines.extend([('%s\n' % l.strip()) for l in output.splitlines()])
+
+  for target in ('Debug', 'Release'):
+    set_up_landmines(target, landmines)
+
+  return 0
+
+
+if __name__ == '__main__':
+  sys.exit(main())
diff --git a/build/standalone.gypi b/build/standalone.gypi
index 7670e5b..b09122b 100644
--- a/build/standalone.gypi
+++ b/build/standalone.gypi
@@ -33,8 +33,8 @@
   'includes': ['toolchain.gypi'],
   'variables': {
     'component%': 'static_library',
-    'clang%': 0,
     'asan%': 0,
+    'tsan%': 0,
     'visibility%': 'hidden',
     'v8_enable_backtrace%': 0,
     'v8_enable_i18n_support%': 1,
@@ -51,13 +51,7 @@
               # Anything else gets passed through, which probably won't work
               # very well; such hosts should pass an explicit target_arch
               # to gyp.
-              'host_arch%':
-                '<!(uname -m | sed -e "s/i.86/ia32/;\
-                                       s/x86_64/x64/;\
-                                       s/amd64/x64/;\
-                                       s/arm.*/arm/;\
-                                       s/aarch64/arm64/;\
-                                       s/mips.*/mipsel/")',
+              'host_arch%': '<!pymod_do_main(detect_v8_host_arch)',
             }, {
               # OS!="linux" and OS!="freebsd" and OS!="openbsd" and
               # OS!="netbsd" and OS!="mac"
@@ -104,6 +98,7 @@
       ['(v8_target_arch=="arm" and host_arch!="arm") or \
         (v8_target_arch=="arm64" and host_arch!="arm64") or \
         (v8_target_arch=="mipsel" and host_arch!="mipsel") or \
+        (v8_target_arch=="mips64el" and host_arch!="mips64el") or \
         (v8_target_arch=="x64" and host_arch!="x64") or \
         (OS=="android" or OS=="qnx")', {
         'want_separate_host_toolset': 1,
@@ -121,6 +116,11 @@
       }, {
         'v8_enable_gdbjit%': 0,
       }],
+      ['OS=="mac"', {
+        'clang%': 1,
+      }, {
+        'clang%': 0,
+      }],
     ],
     # Default ARM variable settings.
     'arm_version%': 'default',
@@ -191,17 +191,45 @@
         ],
       },
     }],
+    ['tsan==1', {
+      'target_defaults': {
+        'cflags+': [
+          '-fno-omit-frame-pointer',
+          '-gline-tables-only',
+          '-fsanitize=thread',
+          '-fPIC',
+          '-Wno-c++11-extensions',
+        ],
+        'cflags!': [
+          '-fomit-frame-pointer',
+        ],
+        'ldflags': [
+          '-fsanitize=thread',
+          '-pie',
+        ],
+        'defines': [
+          'THREAD_SANITIZER',
+        ],
+      },
+    }],
     ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
        or OS=="netbsd"', {
       'target_defaults': {
-        'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter',
-                    '-pthread', '-fno-exceptions', '-pedantic' ],
-        'cflags_cc': [ '-Wnon-virtual-dtor', '-fno-rtti' ],
+        'cflags': [
+          '-Wall',
+          '<(werror)',
+          '-W',
+          '-Wno-unused-parameter',
+          '-Wno-long-long',
+          '-pthread',
+          '-fno-exceptions',
+          '-pedantic',
+          # Don't warn about the "struct foo f = {0};" initialization pattern.
+          '-Wno-missing-field-initializers',
+        ],
+        'cflags_cc': [ '-Wnon-virtual-dtor', '-fno-rtti', '-std=gnu++0x' ],
         'ldflags': [ '-pthread', ],
         'conditions': [
-          [ 'OS=="linux"', {
-            'cflags': [ '-ansi' ],
-          }],
           [ 'visibility=="hidden" and v8_enable_backtrace==0', {
             'cflags': [ '-fvisibility=hidden' ],
           }],
@@ -215,9 +243,16 @@
     #  or OS=="netbsd"'
     ['OS=="qnx"', {
       'target_defaults': {
-        'cflags': [ '-Wall', '<(werror)', '-W', '-Wno-unused-parameter',
-                    '-fno-exceptions' ],
-        'cflags_cc': [ '-Wnon-virtual-dtor', '-fno-rtti' ],
+        'cflags': [
+          '-Wall',
+          '<(werror)',
+          '-W',
+          '-Wno-unused-parameter',
+          '-fno-exceptions',
+          # Don't warn about the "struct foo f = {0};" initialization pattern.
+          '-Wno-missing-field-initializers',
+        ],
+        'cflags_cc': [ '-Wnon-virtual-dtor', '-fno-rtti', '-std=gnu++0x' ],
         'conditions': [
           [ 'visibility=="hidden"', {
             'cflags': [ '-fvisibility=hidden' ],
@@ -244,6 +279,7 @@
         'defines': [
           '_CRT_SECURE_NO_DEPRECATE',
           '_CRT_NONSTDC_NO_DEPRECATE',
+          '_USING_V110_SDK71_',
         ],
         'conditions': [
           ['component=="static_library"', {
@@ -279,6 +315,7 @@
             'AdditionalOptions': ['/ignore:4221'],
           },
           'VCLinkerTool': {
+            'MinimumRequiredVersion': '5.01',  # XP.
             'AdditionalDependencies': [
               'ws2_32.lib',
             ],
@@ -315,7 +352,7 @@
       'target_defaults': {
         'xcode_settings': {
           'ALWAYS_SEARCH_USER_PATHS': 'NO',
-          'GCC_C_LANGUAGE_STANDARD': 'ansi',        # -ansi
+          'GCC_C_LANGUAGE_STANDARD': 'c99',         # -std=c99
           'GCC_CW_ASM_SYNTAX': 'NO',                # No -fasm-blocks
           'GCC_DYNAMIC_NO_PIC': 'NO',               # No -mdynamic-no-pic
                                                     # (Equivalent to -fPIC)
@@ -340,6 +377,8 @@
             '-Wendif-labels',
             '-W',
             '-Wno-unused-parameter',
+            # Don't warn about the "struct foo f = {0};" initialization pattern.
+            '-Wno-missing-field-initializers',
           ],
         },
         'conditions': [
@@ -351,7 +390,7 @@
           ['clang==1', {
             'xcode_settings': {
               'GCC_VERSION': 'com.apple.compilers.llvm.clang.1_0',
-              'CLANG_CXX_LANGUAGE_STANDARD': 'gnu++11',  # -std=gnu++11
+              'CLANG_CXX_LANGUAGE_STANDARD': 'gnu++0x',  # -std=gnu++0x
             },
           }],
         ],
diff --git a/build/toolchain.gypi b/build/toolchain.gypi
index d91bcb7..7f3b9e5 100644
--- a/build/toolchain.gypi
+++ b/build/toolchain.gypi
@@ -31,7 +31,7 @@
   'variables': {
     'msvs_use_common_release': 0,
     'gcc_version%': 'unknown',
-    'CXX%': '${CXX:-$(which g++)}',  # Used to assemble a shell command.
+    'clang%': 0,
     'v8_target_arch%': '<(target_arch)',
     # Native Client builds currently use the V8 ARM JIT and
     # arm/simulator-arm.cc to defer the significant effort required
@@ -47,7 +47,7 @@
     # these registers in the snapshot and use CPU feature probing when running
     # on the target.
     'v8_can_use_vfp32dregs%': 'false',
-    'arm_test%': 'off',
+    'arm_test_noprobe%': 'off',
 
     # Similar to vfp but on MIPS.
     'v8_can_use_fpu_instructions%': 'true',
@@ -56,7 +56,15 @@
     'v8_use_mips_abi_hardfloat%': 'true',
 
     # Default arch variant for MIPS.
-    'mips_arch_variant%': 'mips32r2',
+    'mips_arch_variant%': 'r2',
+
+    # Possible values fp32, fp64, fpxx.
+    # fp32 - 32 32-bit FPU registers are available, doubles are placed in
+    #        register pairs.
+    # fp64 - 32 64-bit FPU registers are available.
+    # fpxx - compatibility mode, it chooses fp32 or fp64 depending on runtime
+    #        detection
+    'mips_fpu_mode%': 'fp32',
 
     'v8_enable_backtrace%': 0,
 
@@ -82,20 +90,80 @@
 
     # Allow to suppress the array bounds warning (default is no suppression).
     'wno_array_bounds%': '',
+
+    # Link-Time Optimizations
+    'use_lto%': 0,
+
+    'variables': {
+      # This is set when building the Android WebView inside the Android build
+      # system, using the 'android' gyp backend.
+      'android_webview_build%': 0,
+    },
+    # Copy it out one scope.
+    'android_webview_build%': '<(android_webview_build)',
   },
+  'conditions': [
+    ['host_arch=="ia32" or host_arch=="x64" or clang==1', {
+      'variables': {
+        'host_cxx_is_biarch%': 1,
+       },
+     }, {
+      'variables': {
+        'host_cxx_is_biarch%': 0,
+      },
+    }],
+    ['target_arch=="ia32" or target_arch=="x64" or target_arch=="x87" or \
+      clang==1', {
+      'variables': {
+        'target_cxx_is_biarch%': 1,
+       },
+     }, {
+      'variables': {
+        'target_cxx_is_biarch%': 0,
+      },
+    }],
+  ],
   'target_defaults': {
     'conditions': [
       ['v8_target_arch=="arm"', {
         'defines': [
           'V8_TARGET_ARCH_ARM',
         ],
+        'conditions': [
+          [ 'arm_version==7 or arm_version=="default"', {
+            'defines': [
+              'CAN_USE_ARMV7_INSTRUCTIONS',
+            ],
+          }],
+          [ 'arm_fpu=="vfpv3-d16" or arm_fpu=="default"', {
+            'defines': [
+              'CAN_USE_VFP3_INSTRUCTIONS',
+            ],
+          }],
+          [ 'arm_fpu=="vfpv3"', {
+            'defines': [
+              'CAN_USE_VFP3_INSTRUCTIONS',
+              'CAN_USE_VFP32DREGS',
+            ],
+          }],
+          [ 'arm_fpu=="neon"', {
+            'defines': [
+              'CAN_USE_VFP3_INSTRUCTIONS',
+              'CAN_USE_VFP32DREGS',
+              'CAN_USE_NEON',
+            ],
+          }],
+          [ 'arm_test_noprobe=="on"', {
+            'defines': [
+              'ARM_TEST_NO_FEATURE_PROBE',
+            ],
+          }],
+        ],
         'target_conditions': [
           ['_toolset=="host"', {
-            'variables': {
-              'armcompiler': '<!($(echo ${CXX_host:-$(which g++)}) -v 2>&1 | grep -q "^Target: arm" && echo "yes" || echo "no")',
-            },
             'conditions': [
-              ['armcompiler=="yes"', {
+              ['v8_target_arch==host_arch and android_webview_build==0', {
+                # Host built with an Arm CXX compiler.
                 'conditions': [
                   [ 'arm_version==7', {
                     'cflags': ['-march=armv7-a',],
@@ -116,45 +184,11 @@
                   [ 'arm_thumb==0', {
                     'cflags': ['-marm',],
                   }],
-                  [ 'arm_test=="on"', {
-                    'defines': [
-                      'ARM_TEST',
-                    ],
-                  }],
                 ],
               }, {
-                # armcompiler=="no"
+                # 'v8_target_arch!=host_arch'
+                # Host not built with an Arm CXX compiler (simulator build).
                 'conditions': [
-                  [ 'arm_version==7 or arm_version=="default"', {
-                    'defines': [
-                      'CAN_USE_ARMV7_INSTRUCTIONS=1',
-                    ],
-                    'conditions': [
-                      [ 'arm_fpu=="default"', {
-                        'defines': [
-                          'CAN_USE_VFP3_INSTRUCTIONS',
-                        ],
-                      }],
-                      [ 'arm_fpu=="vfpv3-d16"', {
-                        'defines': [
-                          'CAN_USE_VFP3_INSTRUCTIONS',
-                        ],
-                      }],
-                      [ 'arm_fpu=="vfpv3"', {
-                        'defines': [
-                          'CAN_USE_VFP3_INSTRUCTIONS',
-                          'CAN_USE_VFP32DREGS',
-                        ],
-                      }],
-                      [ 'arm_fpu=="neon"', {
-                        'defines': [
-                          'CAN_USE_VFP3_INSTRUCTIONS',
-                          'CAN_USE_VFP32DREGS',
-                          'CAN_USE_NEON',
-                        ],
-                      }],
-                    ],
-                  }],
                   [ 'arm_float_abi=="hard"', {
                     'defines': [
                       'USE_EABI_HARDFLOAT=1',
@@ -166,18 +200,13 @@
                     ],
                   }],
                 ],
-                'defines': [
-                  'ARM_TEST',
-                ],
               }],
             ],
           }],  # _toolset=="host"
           ['_toolset=="target"', {
-            'variables': {
-              'armcompiler': '<!($(echo ${CXX_target:-<(CXX)}) -v 2>&1 | grep -q "^Target: arm" && echo "yes" || echo "no")',
-            },
             'conditions': [
-              ['armcompiler=="yes"', {
+              ['v8_target_arch==target_arch and android_webview_build==0', {
+                # Target built with an Arm CXX compiler.
                 'conditions': [
                   [ 'arm_version==7', {
                     'cflags': ['-march=armv7-a',],
@@ -198,67 +227,11 @@
                   [ 'arm_thumb==0', {
                     'cflags': ['-marm',],
                   }],
-                  [ 'arm_test=="on"', {
-                    'defines': [
-                      'ARM_TEST',
-                    ],
-                    'conditions': [
-                      [ 'arm_fpu=="vfpv3-d16"', {
-                        'defines': [
-                          'CAN_USE_VFP3_INSTRUCTIONS',
-                        ],
-                      }],
-                      [ 'arm_fpu=="vfpv3"', {
-                        'defines': [
-                          'CAN_USE_VFP3_INSTRUCTIONS',
-                          'CAN_USE_VFP32DREGS',
-                        ],
-                      }],
-                      [ 'arm_fpu=="neon"', {
-                        'defines': [
-                          'CAN_USE_VFP3_INSTRUCTIONS',
-                          'CAN_USE_VFP32DREGS',
-                          'CAN_USE_NEON',
-                        ],
-                      }],
-                    ],
-                  }],
                 ],
               }, {
-                # armcompiler=="no"
+                # 'v8_target_arch!=target_arch'
+                # Target not built with an Arm CXX compiler (simulator build).
                 'conditions': [
-                  [ 'arm_version==7 or arm_version=="default"', {
-                    'defines': [
-                      'CAN_USE_ARMV7_INSTRUCTIONS=1',
-                    ],
-                    'conditions': [
-                      [ 'arm_fpu=="default"', {
-                        'defines': [
-                          'CAN_USE_VFP3_INSTRUCTIONS',
-                          'CAN_USE_VFP32DREGS',
-                          'CAN_USE_NEON',
-                        ],
-                      }],
-                      [ 'arm_fpu=="vfpv3-d16"', {
-                        'defines': [
-                          'CAN_USE_VFP3_INSTRUCTIONS',
-                        ],
-                      }],
-                      [ 'arm_fpu=="vfpv3"', {
-                        'defines': [
-                          'CAN_USE_VFP3_INSTRUCTIONS',
-                          'CAN_USE_VFP32DREGS',
-                        ],
-                      }],
-                      [ 'arm_fpu=="neon"', {
-                        'defines': [
-                          'CAN_USE_VFP3_INSTRUCTIONS',
-                          'CAN_USE_VFP32DREGS',
-                          'CAN_USE_NEON',
-                        ],
-                      }],
-                    ],
-                  }],
                   [ 'arm_float_abi=="hard"', {
                     'defines': [
                       'USE_EABI_HARDFLOAT=1',
@@ -270,8 +243,14 @@
                     ],
                   }],
                 ],
-                'defines': [
-                  'ARM_TEST',
+              }],
+              # Disable LTO for v8
+              # v8 is optimized for speed, which takes precedence over
+              # size optimization in LTO.
+              ['use_lto==1', {
+                'cflags!': [
+                  '-flto',
+                  '-ffat-lto-objects',
                 ],
               }],
             ],
@@ -298,11 +277,9 @@
         'defines': [
           'V8_TARGET_ARCH_MIPS',
         ],
-        'variables': {
-          'mipscompiler': '<!($(echo <(CXX)) -v 2>&1 | grep -q "^Target: mips" && echo "yes" || echo "no")',
-        },
         'conditions': [
-          ['mipscompiler=="yes"', {
+          ['v8_target_arch==target_arch and android_webview_build==0', {
+            # Target built with a Mips CXX compiler.
             'target_conditions': [
               ['_toolset=="target"', {
                 'cflags': ['-EB'],
@@ -315,10 +292,33 @@
                     'cflags': ['-msoft-float'],
                     'ldflags': ['-msoft-float'],
                   }],
-                  ['mips_arch_variant=="mips32r2"', {
+                  ['mips_fpu_mode=="fp64"', {
+                    'cflags': ['-mfp64'],
+                  }],
+                  ['mips_fpu_mode=="fpxx"', {
+                    'cflags': ['-mfpxx'],
+                  }],
+                  ['mips_fpu_mode=="fp32"', {
+                    'cflags': ['-mfp32'],
+                  }],
+                  ['mips_arch_variant=="r6"', {
+                    'cflags!': ['-mfp32'],
+                    'cflags': ['-mips32r6', '-Wa,-mips32r6'],
+                    'ldflags': [
+                      '-mips32r6',
+                      '-Wl,--dynamic-linker=$(LDSO_PATH)',
+                      '-Wl,--rpath=$(LD_R_PATH)',
+                    ],
+                  }],
+                  ['mips_arch_variant=="r2"', {
                     'cflags': ['-mips32r2', '-Wa,-mips32r2'],
                   }],
-                  ['mips_arch_variant=="mips32r1"', {
+                  ['mips_arch_variant=="r1"', {
+                    'cflags!': ['-mfp64'],
+                    'cflags': ['-mips32', '-Wa,-mips32'],
+                  }],
+                  ['mips_arch_variant=="rx"', {
+                    'cflags!': ['-mfp64'],
                     'cflags': ['-mips32', '-Wa,-mips32'],
                   }],
                 ],
@@ -340,8 +340,34 @@
               '__mips_soft_float=1'
             ],
           }],
-          ['mips_arch_variant=="mips32r2"', {
+          ['mips_arch_variant=="rx"', {
+            'defines': [
+              '_MIPS_ARCH_MIPS32RX',
+              'FPU_MODE_FPXX',
+            ],
+          }],
+          ['mips_arch_variant=="r6"', {
+            'defines': [
+              '_MIPS_ARCH_MIPS32R6',
+              'FPU_MODE_FP64',
+            ],
+          }],
+          ['mips_arch_variant=="r2"', {
             'defines': ['_MIPS_ARCH_MIPS32R2',],
+            'conditions': [
+              ['mips_fpu_mode=="fp64"', {
+                'defines': ['FPU_MODE_FP64',],
+              }],
+              ['mips_fpu_mode=="fpxx"', {
+                'defines': ['FPU_MODE_FPXX',],
+              }],
+              ['mips_fpu_mode=="fp32"', {
+                'defines': ['FPU_MODE_FP32',],
+              }],
+            ],
+          }],
+          ['mips_arch_variant=="r1"', {
+            'defines': ['FPU_MODE_FP32',],
           }],
         ],
       }],  # v8_target_arch=="mips"
@@ -349,11 +375,9 @@
         'defines': [
           'V8_TARGET_ARCH_MIPS',
         ],
-        'variables': {
-          'mipscompiler': '<!($(echo <(CXX)) -v 2>&1 | grep -q "^Target: mips" && echo "yes" || echo "no")',
-        },
         'conditions': [
-          ['mipscompiler=="yes"', {
+          ['v8_target_arch==target_arch and android_webview_build==0', {
+            # Target built with a Mips CXX compiler.
             'target_conditions': [
               ['_toolset=="target"', {
                 'cflags': ['-EL'],
@@ -366,13 +390,37 @@
                     'cflags': ['-msoft-float'],
                     'ldflags': ['-msoft-float'],
                   }],
-                  ['mips_arch_variant=="mips32r2"', {
+                  ['mips_fpu_mode=="fp64"', {
+                    'cflags': ['-mfp64'],
+                  }],
+                  ['mips_fpu_mode=="fpxx"', {
+                    'cflags': ['-mfpxx'],
+                  }],
+                  ['mips_fpu_mode=="fp32"', {
+                    'cflags': ['-mfp32'],
+                  }],
+                  ['mips_arch_variant=="r6"', {
+                    'cflags!': ['-mfp32'],
+                    'cflags': ['-mips32r6', '-Wa,-mips32r6'],
+                    'ldflags': [
+                      '-mips32r6',
+                      '-Wl,--dynamic-linker=$(LDSO_PATH)',
+                      '-Wl,--rpath=$(LD_R_PATH)',
+                    ],
+                  }],
+                  ['mips_arch_variant=="r2"', {
                     'cflags': ['-mips32r2', '-Wa,-mips32r2'],
                   }],
-                  ['mips_arch_variant=="mips32r1"', {
+                  ['mips_arch_variant=="r1"', {
+                    'cflags!': ['-mfp64'],
                     'cflags': ['-mips32', '-Wa,-mips32'],
-                 }],
+                  }],
+                  ['mips_arch_variant=="rx"', {
+                    'cflags!': ['-mfp64'],
+                    'cflags': ['-mips32', '-Wa,-mips32'],
+                  }],
                   ['mips_arch_variant=="loongson"', {
+                    'cflags!': ['-mfp64'],
                     'cflags': ['-mips3', '-Wa,-mips3'],
                   }],
                 ],
@@ -394,14 +442,105 @@
               '__mips_soft_float=1'
             ],
           }],
-          ['mips_arch_variant=="mips32r2"', {
+          ['mips_arch_variant=="rx"', {
+            'defines': [
+              '_MIPS_ARCH_MIPS32RX',
+              'FPU_MODE_FPXX',
+            ],
+          }],
+          ['mips_arch_variant=="r6"', {
+            'defines': [
+              '_MIPS_ARCH_MIPS32R6',
+               'FPU_MODE_FP64',
+            ],
+          }],
+          ['mips_arch_variant=="r2"', {
             'defines': ['_MIPS_ARCH_MIPS32R2',],
+            'conditions': [
+              ['mips_fpu_mode=="fp64"', {
+                'defines': ['FPU_MODE_FP64',],
+              }],
+              ['mips_fpu_mode=="fpxx"', {
+                'defines': ['FPU_MODE_FPXX',],
+              }],
+              ['mips_fpu_mode=="fp32"', {
+                'defines': ['FPU_MODE_FP32',],
+              }],
+            ],
+          }],
+          ['mips_arch_variant=="r1"', {
+            'defines': ['FPU_MODE_FP32',],
           }],
           ['mips_arch_variant=="loongson"', {
-            'defines': ['_MIPS_ARCH_LOONGSON',],
+            'defines': [
+              '_MIPS_ARCH_LOONGSON',
+              'FPU_MODE_FP32',
+            ],
           }],
         ],
       }],  # v8_target_arch=="mipsel"
+      ['v8_target_arch=="mips64el"', {
+        'defines': [
+          'V8_TARGET_ARCH_MIPS64',
+        ],
+        'conditions': [
+          ['v8_target_arch==target_arch and android_webview_build==0', {
+            # Target built with a Mips CXX compiler.
+            'target_conditions': [
+              ['_toolset=="target"', {
+                'cflags': ['-EL'],
+                'ldflags': ['-EL'],
+                'conditions': [
+                  [ 'v8_use_mips_abi_hardfloat=="true"', {
+                    'cflags': ['-mhard-float'],
+                    'ldflags': ['-mhard-float'],
+                  }, {
+                    'cflags': ['-msoft-float'],
+                    'ldflags': ['-msoft-float'],
+                  }],
+                  ['mips_arch_variant=="r6"', {
+                    'cflags': ['-mips64r6', '-mabi=64', '-Wa,-mips64r6'],
+                    'ldflags': [
+                      '-mips64r6', '-mabi=64',
+                      '-Wl,--dynamic-linker=$(LDSO_PATH)',
+                      '-Wl,--rpath=$(LD_R_PATH)',
+                    ],
+                  }],
+                  ['mips_arch_variant=="r2"', {
+                    'cflags': ['-mips64r2', '-mabi=64', '-Wa,-mips64r2'],
+                    'ldflags': [
+                      '-mips64r2', '-mabi=64',
+                      '-Wl,--dynamic-linker=$(LDSO_PATH)',
+                      '-Wl,--rpath=$(LD_R_PATH)',
+                    ],
+                  }],
+                ],
+              }],
+            ],
+          }],
+          [ 'v8_can_use_fpu_instructions=="true"', {
+            'defines': [
+              'CAN_USE_FPU_INSTRUCTIONS',
+            ],
+          }],
+          [ 'v8_use_mips_abi_hardfloat=="true"', {
+            'defines': [
+              '__mips_hard_float=1',
+              'CAN_USE_FPU_INSTRUCTIONS',
+            ],
+          }, {
+            'defines': [
+              '__mips_soft_float=1'
+            ],
+          }],
+          ['mips_arch_variant=="r6"', {
+            'defines': ['_MIPS_ARCH_MIPS64R6',],
+          }],
+          ['mips_arch_variant=="r2"', {
+            'defines': ['_MIPS_ARCH_MIPS64R2',],
+          }],
+        ],
+      }],  # v8_target_arch=="mips64el"
       ['v8_target_arch=="x64"', {
         'defines': [
           'V8_TARGET_ARCH_X64',
@@ -416,16 +555,42 @@
         },
         'msvs_configuration_platform': 'x64',
       }],  # v8_target_arch=="x64"
+      ['v8_target_arch=="x32"', {
+        'defines': [
+          # x32 port shares the source code with x64 port.
+          'V8_TARGET_ARCH_X64',
+          'V8_TARGET_ARCH_32_BIT',
+        ],
+        'cflags': [
+          '-mx32',
+          # Inhibit warning if long long type is used.
+          '-Wno-long-long',
+        ],
+        'ldflags': [
+          '-mx32',
+        ],
+      }],  # v8_target_arch=="x32"
       ['OS=="win"', {
         'defines': [
           'WIN32',
         ],
+        # 4351: VS 2005 and later are warning us that they've fixed a bug
+        #       present in VS 2003 and earlier.
+        'msvs_disabled_warnings': [4351],
         'msvs_configuration_attributes': {
           'OutputDirectory': '<(DEPTH)\\build\\$(ConfigurationName)',
           'IntermediateDirectory': '$(OutDir)\\obj\\$(ProjectName)',
           'CharacterSet': '1',
         },
       }],
+      ['OS=="win" and v8_target_arch=="ia32"', {
+        'msvs_settings': {
+          'VCCLCompilerTool': {
+            # Ensure no surprising artifacts from 80bit double math with x86.
+            'AdditionalOptions': ['/arch:SSE2'],
+          },
+        },
+      }],
       ['OS=="win" and v8_enable_prof==1', {
         'msvs_settings': {
           'VCLinkerTool': {
@@ -433,6 +598,69 @@
           },
         },
       }],
+      ['(OS=="linux" or OS=="freebsd"  or OS=="openbsd" or OS=="solaris" \
+         or OS=="netbsd" or OS=="mac" or OS=="android" or OS=="qnx") and \
+        (v8_target_arch=="arm" or v8_target_arch=="ia32" or \
+         v8_target_arch=="x87" or v8_target_arch=="mips" or \
+         v8_target_arch=="mipsel")', {
+        'target_conditions': [
+          ['_toolset=="host"', {
+            'conditions': [
+              ['host_cxx_is_biarch==1', {
+                'cflags': [ '-m32' ],
+                'ldflags': [ '-m32' ]
+              }],
+            ],
+            'xcode_settings': {
+              'ARCHS': [ 'i386' ],
+            },
+          }],
+          ['_toolset=="target"', {
+            'conditions': [
+              ['target_cxx_is_biarch==1 and nacl_target_arch!="nacl_x64"', {
+                'cflags': [ '-m32' ],
+                'ldflags': [ '-m32' ],
+              }],
+              # Enable feedback-directed optimisation when building in android.
+              [ 'android_webview_build == 1', {
+                'aosp_build_settings': {
+                  'LOCAL_FDO_SUPPORT': 'true',
+                },
+              }],
+            ],
+            'xcode_settings': {
+              'ARCHS': [ 'i386' ],
+            },
+          }],
+        ],
+      }],
+      ['(OS=="linux" or OS=="android") and \
+        (v8_target_arch=="x64" or v8_target_arch=="arm64")', {
+        'target_conditions': [
+          ['_toolset=="host"', {
+            'conditions': [
+              ['host_cxx_is_biarch==1', {
+                'cflags': [ '-m64' ],
+                'ldflags': [ '-m64' ]
+              }],
+             ],
+           }],
+           ['_toolset=="target"', {
+             'conditions': [
+               ['target_cxx_is_biarch==1', {
+                 'cflags': [ '-m64' ],
+                 'ldflags': [ '-m64' ],
+               }],
+               # Enable feedback-directed optimisation when building in android.
+               [ 'android_webview_build == 1', {
+                 'aosp_build_settings': {
+                   'LOCAL_FDO_SUPPORT': 'true',
+                 },
+               }],
+             ]
+           }],
+         ],
+      }],
       ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
          or OS=="netbsd" or OS=="qnx"', {
         'conditions': [
@@ -444,72 +672,6 @@
       ['OS=="solaris"', {
         'defines': [ '__C99FEATURES__=1' ],  # isinf() etc.
       }],
-      ['(OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
-         or OS=="netbsd" or OS=="mac" or OS=="android" or OS=="qnx") and \
-        (v8_target_arch=="arm" or v8_target_arch=="ia32" or v8_target_arch=="x87" or\
-         v8_target_arch=="mips" or v8_target_arch=="mipsel")', {
-        # Check whether the host compiler and target compiler support the
-        # '-m32' option and set it if so.
-        'target_conditions': [
-          ['_toolset=="host"', {
-            'variables': {
-              'm32flag': '<!(($(echo ${CXX_host:-$(which g++)}) -m32 -E - > /dev/null 2>&1 < /dev/null) && echo "-m32" || true)',
-            },
-            'cflags': [ '<(m32flag)' ],
-            'ldflags': [ '<(m32flag)' ],
-            'xcode_settings': {
-              'ARCHS': [ 'i386' ],
-            },
-          }],
-          ['_toolset=="target"', {
-            'variables': {
-              'm32flag': '<!(($(echo ${CXX_target:-<(CXX)}) -m32 -E - > /dev/null 2>&1 < /dev/null) && echo "-m32" || true)',
-              'clang%': 0,
-            },
-            'conditions': [
-              ['((OS!="android" and OS!="qnx") or clang==1) and \
-                nacl_target_arch!="nacl_x64"', {
-                'cflags': [ '<(m32flag)' ],
-                'ldflags': [ '<(m32flag)' ],
-              }],
-              ['OS=="android"', {
-                'android_enable_fdo': 1,
-              }],
-            ],
-            'xcode_settings': {
-              'ARCHS': [ 'i386' ],
-            },
-          }],
-        ],
-      }],
-      ['(OS=="linux" or OS=="android") and \
-        (v8_target_arch=="x64" or v8_target_arch=="arm64")', {
-        # Check whether the host compiler and target compiler support the
-        # '-m64' option and set it if so.
-        'target_conditions': [
-          ['_toolset=="host"', {
-            'variables': {
-              'm64flag': '<!(($(echo ${CXX_host:-$(which g++)}) -m64 -E - > /dev/null 2>&1 < /dev/null) && echo "-m64" || true)',
-            },
-            'cflags': [ '<(m64flag)' ],
-            'ldflags': [ '<(m64flag)' ],
-          }],
-          ['_toolset=="target"', {
-            'variables': {
-              'm64flag': '<!(($(echo ${CXX_target:-<(CXX)}) -m64 -E - > /dev/null 2>&1 < /dev/null) && echo "-m64" || true)',
-            },
-            'conditions': [
-              ['((OS!="android" and OS!="qnx") or clang==1)', {
-                'cflags': [ '<(m64flag)' ],
-                'ldflags': [ '<(m64flag)' ],
-              }],
-              ['OS=="android"', {
-                'android_enable_fdo': 1,
-              }],
-            ],
-          }]
-        ],
-      }],
       ['OS=="freebsd" or OS=="openbsd"', {
         'cflags': [ '-I/usr/local/include' ],
       }],
diff --git a/codereview.settings b/codereview.settings
index 3f642f1..b7f853c 100644
--- a/codereview.settings
+++ b/codereview.settings
@@ -5,3 +5,4 @@
 TRY_ON_UPLOAD: False
 TRYSERVER_SVN_URL: svn://svn.chromium.org/chrome-try-v8
 TRYSERVER_ROOT: v8
+PROJECT: v8
diff --git a/include/libplatform/libplatform.h b/include/libplatform/libplatform.h
new file mode 100644
index 0000000..2125e97
--- /dev/null
+++ b/include/libplatform/libplatform.h
@@ -0,0 +1,38 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_LIBPLATFORM_LIBPLATFORM_H_
+#define V8_LIBPLATFORM_LIBPLATFORM_H_
+
+#include "include/v8-platform.h"
+
+namespace v8 {
+namespace platform {
+
+/**
+ * Returns a new instance of the default v8::Platform implementation.
+ *
+ * The caller will take ownership of the returned pointer. |thread_pool_size|
+ * is the number of worker threads to allocate for background jobs. If a value
+ * of zero is passed, a suitable default based on the current number of
+ * processors online will be chosen.
+ */
+v8::Platform* CreateDefaultPlatform(int thread_pool_size = 0);
+
+
+/**
+ * Pumps the message loop for the given isolate.
+ *
+ * The caller has to make sure that this is called from the right thread.
+ * Returns true if a task was executed, and false otherwise. This call does
+ * not block if no task is pending. The |platform| has to be created using
+ * |CreateDefaultPlatform|.
+ */
+bool PumpMessageLoop(v8::Platform* platform, v8::Isolate* isolate);
+
+
+}  // namespace platform
+}  // namespace v8
+
+#endif  // V8_LIBPLATFORM_LIBPLATFORM_H_
diff --git a/include/v8-debug.h b/include/v8-debug.h
index 75f9c76..6abf4e0 100644
--- a/include/v8-debug.h
+++ b/include/v8-debug.h
@@ -19,8 +19,10 @@
   NewFunction = 3,
   BeforeCompile = 4,
   AfterCompile  = 5,
-  ScriptCollected = 6,
-  BreakForCommand = 7
+  CompileError = 6,
+  PromiseEvent = 7,
+  AsyncTaskEvent = 8,
+  BreakForCommand = 9
 };
 
 
@@ -165,6 +167,9 @@
   // happened yet.
   static void CancelDebugBreak(Isolate* isolate);
 
+  // Check if a debugger break is scheduled in the given isolate.
+  static bool CheckDebugBreak(Isolate* isolate);
+
   // Break execution of JavaScript in the given isolate (this method
   // can be invoked from a non-VM thread) for further client command
   // execution on a VM thread. Client data is then passed in
diff --git a/include/v8-platform.h b/include/v8-platform.h
index 5667211..1f1679f 100644
--- a/include/v8-platform.h
+++ b/include/v8-platform.h
@@ -5,10 +5,10 @@
 #ifndef V8_V8_PLATFORM_H_
 #define V8_V8_PLATFORM_H_
 
-#include "v8.h"
-
 namespace v8 {
 
+class Isolate;
+
 /**
  * A Task represents a unit of work.
  */
@@ -37,6 +37,8 @@
     kLongRunningTask
   };
 
+  virtual ~Platform() {}
+
   /**
    * Schedules a task to be invoked on a background thread. |expected_runtime|
    * indicates that the task will run a long time. The Platform implementation
@@ -53,9 +55,6 @@
    * scheduling. The definition of "foreground" is opaque to V8.
    */
   virtual void CallOnForegroundThread(Isolate* isolate, Task* task) = 0;
-
- protected:
-  virtual ~Platform() {}
 };
 
 }  // namespace v8
diff --git a/include/v8.h b/include/v8.h
index 4e85561..ec1941e 100644
--- a/include/v8.h
+++ b/include/v8.h
@@ -77,6 +77,7 @@
 class Int32;
 class Integer;
 class Isolate;
+class Name;
 class Number;
 class NumberObject;
 class Object;
@@ -129,6 +130,7 @@
 class HeapObject;
 class Isolate;
 class Object;
+struct StreamedSource;
 template<typename T> class CustomArguments;
 class PropertyCallbackArguments;
 class FunctionCallbackArguments;
@@ -895,6 +897,13 @@
 };
 
 
+// Convenience wrapper.
+template <class T>
+inline Maybe<T> maybe(T t) {
+  return Maybe<T>(t);
+}
+
+
 // --- Special objects ---
 
 
@@ -916,20 +925,24 @@
       Handle<Value> resource_name,
       Handle<Integer> resource_line_offset = Handle<Integer>(),
       Handle<Integer> resource_column_offset = Handle<Integer>(),
-      Handle<Boolean> resource_is_shared_cross_origin = Handle<Boolean>())
+      Handle<Boolean> resource_is_shared_cross_origin = Handle<Boolean>(),
+      Handle<Integer> script_id = Handle<Integer>())
       : resource_name_(resource_name),
         resource_line_offset_(resource_line_offset),
         resource_column_offset_(resource_column_offset),
-        resource_is_shared_cross_origin_(resource_is_shared_cross_origin) { }
+        resource_is_shared_cross_origin_(resource_is_shared_cross_origin),
+        script_id_(script_id) { }
   V8_INLINE Handle<Value> ResourceName() const;
   V8_INLINE Handle<Integer> ResourceLineOffset() const;
   V8_INLINE Handle<Integer> ResourceColumnOffset() const;
   V8_INLINE Handle<Boolean> ResourceIsSharedCrossOrigin() const;
+  V8_INLINE Handle<Integer> ScriptID() const;
  private:
   Handle<Value> resource_name_;
   Handle<Integer> resource_line_offset_;
   Handle<Integer> resource_column_offset_;
   Handle<Boolean> resource_is_shared_cross_origin_;
+  Handle<Integer> script_id_;
 };
 
 
@@ -947,6 +960,15 @@
   Handle<Value> GetScriptName();
 
   /**
+   * Data read from magic sourceURL comments.
+   */
+  Handle<Value> GetSourceURL();
+  /**
+   * Data read from magic sourceMappingURL comments.
+   */
+  Handle<Value> GetSourceMappingURL();
+
+  /**
    * Returns zero based line number of the code_pos location in the script.
    * -1 will be returned if no information available.
    */
@@ -1061,19 +1083,98 @@
     Handle<Integer> resource_column_offset;
     Handle<Boolean> resource_is_shared_cross_origin;
 
-    // Cached data from previous compilation (if any), or generated during
-    // compilation (if the generate_cached_data flag is passed to
-    // ScriptCompiler).
+    // Cached data from previous compilation (if a kConsume*Cache flag is
+    // set), or hold newly generated cache data (kProduce*Cache flags) are
+    // set when calling a compile method.
     CachedData* cached_data;
   };
 
+  /**
+   * For streaming incomplete script data to V8. The embedder should implement a
+   * subclass of this class.
+   */
+  class ExternalSourceStream {
+   public:
+    virtual ~ExternalSourceStream() {}
+
+    /**
+     * V8 calls this to request the next chunk of data from the embedder. This
+     * function will be called on a background thread, so it's OK to block and
+     * wait for the data, if the embedder doesn't have data yet. Returns the
+     * length of the data returned. When the data ends, GetMoreData should
+     * return 0. Caller takes ownership of the data.
+     *
+     * When streaming UTF-8 data, V8 handles multi-byte characters split between
+     * two data chunks, but doesn't handle multi-byte characters split between
+     * more than two data chunks. The embedder can avoid this problem by always
+     * returning at least 2 bytes of data.
+     *
+     * If the embedder wants to cancel the streaming, they should make the next
+     * GetMoreData call return 0. V8 will interpret it as end of data (and most
+     * probably, parsing will fail). The streaming task will return as soon as
+     * V8 has parsed the data it received so far.
+     */
+    virtual size_t GetMoreData(const uint8_t** src) = 0;
+  };
+
+
+  /**
+   * Source code which can be streamed into V8 in pieces. It will be parsed
+   * while streaming. It can be compiled after the streaming is complete.
+   * StreamedSource must be kept alive while the streaming task is ran (see
+   * ScriptStreamingTask below).
+   */
+  class V8_EXPORT StreamedSource {
+   public:
+    enum Encoding { ONE_BYTE, TWO_BYTE, UTF8 };
+
+    StreamedSource(ExternalSourceStream* source_stream, Encoding encoding);
+    ~StreamedSource();
+
+    // Ownership of the CachedData or its buffers is *not* transferred to the
+    // caller. The CachedData object is alive as long as the StreamedSource
+    // object is alive.
+    const CachedData* GetCachedData() const;
+
+    internal::StreamedSource* impl() const { return impl_; }
+
+   private:
+    // Prevent copying. Not implemented.
+    StreamedSource(const StreamedSource&);
+    StreamedSource& operator=(const StreamedSource&);
+
+    internal::StreamedSource* impl_;
+  };
+
+  /**
+   * A streaming task which the embedder must run on a background thread to
+   * stream scripts into V8. Returned by ScriptCompiler::StartStreamingScript.
+   */
+  class ScriptStreamingTask {
+   public:
+    virtual ~ScriptStreamingTask() {}
+    virtual void Run() = 0;
+  };
+
   enum CompileOptions {
-    kNoCompileOptions,
-    kProduceDataToCache = 1 << 0
+    kNoCompileOptions = 0,
+    kProduceParserCache,
+    kConsumeParserCache,
+    kProduceCodeCache,
+    kConsumeCodeCache,
+
+    // Support the previous API for a transition period.
+    kProduceDataToCache
   };
 
   /**
    * Compiles the specified script (context-independent).
+   * Cached data as part of the source object can be optionally produced to be
+   * consumed later to speed up compilation of identical source scripts.
+   *
+   * Note that when producing cached data, the source must point to NULL for
+   * cached data. When consuming cached data, the cached data must have been
+   * produced by the same version of V8.
    *
    * \param source Script source code.
    * \return Compiled script object (context independent; for running it must be
@@ -1097,6 +1198,32 @@
   static Local<Script> Compile(
       Isolate* isolate, Source* source,
       CompileOptions options = kNoCompileOptions);
+
+  /**
+   * Returns a task which streams script data into V8, or NULL if the script
+   * cannot be streamed. The user is responsible for running the task on a
+   * background thread and deleting it. When ran, the task starts parsing the
+   * script, and it will request data from the StreamedSource as needed. When
+   * ScriptStreamingTask::Run exits, all data has been streamed and the script
+   * can be compiled (see Compile below).
+   *
+   * This API allows to start the streaming with as little data as possible, and
+   * the remaining data (for example, the ScriptOrigin) is passed to Compile.
+   */
+  static ScriptStreamingTask* StartStreamingScript(
+      Isolate* isolate, StreamedSource* source,
+      CompileOptions options = kNoCompileOptions);
+
+  /**
+   * Compiles a streamed script (bound to current context).
+   *
+   * This can only be called after the streaming has finished
+   * (ScriptStreamingTask has been run). V8 doesn't construct the source string
+   * during streaming, so the embedder needs to pass the full source here.
+   */
+  static Local<Script> Compile(Isolate* isolate, StreamedSource* source,
+                               Handle<String> full_source_string,
+                               const ScriptOrigin& origin);
 };
 
 
@@ -1109,6 +1236,12 @@
   Local<String> GetSourceLine() const;
 
   /**
+   * Returns the origin for the script from where the function causing the
+   * error originates.
+   */
+  ScriptOrigin GetScriptOrigin() const;
+
+  /**
    * Returns the resource name for the script from where the function causing
    * the error originates.
    */
@@ -1329,6 +1462,12 @@
   bool IsFalse() const;
 
   /**
+   * Returns true if this value is a symbol or a string.
+   * This is an experimental feature.
+   */
+  bool IsName() const;
+
+  /**
    * Returns true if this value is an instance of the String type.
    * See ECMA-262 8.4.
    */
@@ -1386,6 +1525,11 @@
   bool IsDate() const;
 
   /**
+   * Returns true if this value is an Arguments object.
+   */
+  bool IsArgumentsObject() const;
+
+  /**
    * Returns true if this value is a Boolean object.
    */
   bool IsBooleanObject() const;
@@ -1423,6 +1567,30 @@
   bool IsPromise() const;
 
   /**
+   * Returns true if this value is a Map.
+   * This is an experimental feature.
+   */
+  bool IsMap() const;
+
+  /**
+   * Returns true if this value is a Set.
+   * This is an experimental feature.
+   */
+  bool IsSet() const;
+
+  /**
+   * Returns true if this value is a WeakMap.
+   * This is an experimental feature.
+   */
+  bool IsWeakMap() const;
+
+  /**
+   * Returns true if this value is a WeakSet.
+   * This is an experimental feature.
+   */
+  bool IsWeakSet() const;
+
+  /**
    * Returns true if this value is an ArrayBuffer.
    * This is an experimental feature.
    */
@@ -1556,14 +1724,25 @@
 
 
 /**
+ * A superclass for symbols and strings.
+ */
+class V8_EXPORT Name : public Primitive {
+ public:
+  V8_INLINE static Name* Cast(v8::Value* obj);
+ private:
+  static void CheckCast(v8::Value* obj);
+};
+
+
+/**
  * A JavaScript string value (ECMA-262, 4.3.17).
  */
-class V8_EXPORT String : public Primitive {
+class V8_EXPORT String : public Name {
  public:
   enum Encoding {
     UNKNOWN_ENCODING = 0x1,
     TWO_BYTE_ENCODING = 0x0,
-    ASCII_ENCODING = 0x4,
+    ASCII_ENCODING = 0x4,  // TODO(yangguo): deprecate this.
     ONE_BYTE_ENCODING = 0x4
   };
   /**
@@ -1619,7 +1798,8 @@
     NO_OPTIONS = 0,
     HINT_MANY_WRITES_EXPECTED = 1,
     NO_NULL_TERMINATION = 2,
-    PRESERVE_ASCII_NULL = 4,
+    PRESERVE_ASCII_NULL = 4,  // TODO(yangguo): deprecate this.
+    PRESERVE_ONE_BYTE_NULL = 4,
     // Used by WriteUtf8 to replace orphan surrogate code units with the
     // unicode replacement character. Needs to be set to guarantee valid UTF-8
     // output.
@@ -1653,9 +1833,12 @@
   bool IsExternal() const;
 
   /**
-   * Returns true if the string is both external and ASCII
+   * Returns true if the string is both external and one-byte.
    */
-  bool IsExternalAscii() const;
+  bool IsExternalOneByte() const;
+
+  // TODO(yangguo): deprecate this.
+  bool IsExternalAscii() const { return IsExternalOneByte(); }
 
   class V8_EXPORT ExternalStringResourceBase {  // NOLINT
    public:
@@ -1710,33 +1893,32 @@
   };
 
   /**
-   * An ExternalAsciiStringResource is a wrapper around an ASCII
+   * An ExternalOneByteStringResource is a wrapper around an one-byte
    * string buffer that resides outside V8's heap. Implement an
-   * ExternalAsciiStringResource to manage the life cycle of the
+   * ExternalOneByteStringResource to manage the life cycle of the
    * underlying buffer.  Note that the string data must be immutable
-   * and that the data must be strict (7-bit) ASCII, not Latin-1 or
-   * UTF-8, which would require special treatment internally in the
-   * engine and, in the case of UTF-8, do not allow efficient indexing.
-   * Use String::New or convert to 16 bit data for non-ASCII.
+   * and that the data must be Latin-1 and not UTF-8, which would require
+   * special treatment internally in the engine and do not allow efficient
+   * indexing.  Use String::New or convert to 16 bit data for non-Latin1.
    */
 
-  class V8_EXPORT ExternalAsciiStringResource
+  class V8_EXPORT ExternalOneByteStringResource
       : public ExternalStringResourceBase {
    public:
     /**
      * Override the destructor to manage the life cycle of the underlying
      * buffer.
      */
-    virtual ~ExternalAsciiStringResource() {}
+    virtual ~ExternalOneByteStringResource() {}
     /** The string data from the underlying buffer.*/
     virtual const char* data() const = 0;
-    /** The number of ASCII characters in the string.*/
+    /** The number of Latin-1 characters in the string.*/
     virtual size_t length() const = 0;
    protected:
-    ExternalAsciiStringResource() {}
+    ExternalOneByteStringResource() {}
   };
 
-  typedef ExternalAsciiStringResource ExternalOneByteStringResource;
+  typedef ExternalOneByteStringResource ExternalAsciiStringResource;
 
   /**
    * If the string is an external string, return the ExternalStringResourceBase
@@ -1753,10 +1935,15 @@
   V8_INLINE ExternalStringResource* GetExternalStringResource() const;
 
   /**
-   * Get the ExternalAsciiStringResource for an external ASCII string.
-   * Returns NULL if IsExternalAscii() doesn't return true.
+   * Get the ExternalOneByteStringResource for an external one-byte string.
+   * Returns NULL if IsExternalOneByte() doesn't return true.
    */
-  const ExternalAsciiStringResource* GetExternalAsciiStringResource() const;
+  const ExternalOneByteStringResource* GetExternalOneByteStringResource() const;
+
+  // TODO(yangguo): deprecate this.
+  const ExternalAsciiStringResource* GetExternalAsciiStringResource() const {
+    return GetExternalOneByteStringResource();
+  }
 
   V8_INLINE static String* Cast(v8::Value* obj);
 
@@ -1813,7 +2000,7 @@
   bool MakeExternal(ExternalStringResource* resource);
 
   /**
-   * Creates a new external string using the ASCII data defined in the given
+   * Creates a new external string using the one-byte data defined in the given
    * resource. When the external string is no longer live on V8's heap the
    * resource will be disposed by calling its Dispose method. The caller of
    * this function should not otherwise delete or modify the resource. Neither
@@ -1821,7 +2008,7 @@
    * destructor of the external string resource.
    */
   static Local<String> NewExternal(Isolate* isolate,
-                                   ExternalAsciiStringResource* resource);
+                                   ExternalOneByteStringResource* resource);
 
   /**
    * Associate an external string resource with this string by transforming it
@@ -1832,7 +2019,7 @@
    * The string is not modified if the operation fails. See NewExternal for
    * information on the lifetime of the resource.
    */
-  bool MakeExternal(ExternalAsciiStringResource* resource);
+  bool MakeExternal(ExternalOneByteStringResource* resource);
 
   /**
    * Returns true if this string can be made external.
@@ -1897,7 +2084,7 @@
  *
  * This is an experimental feature. Use at your own risk.
  */
-class V8_EXPORT Symbol : public Primitive {
+class V8_EXPORT Symbol : public Name {
  public:
   // Returns the print name string of the symbol, or undefined if none.
   Local<Value> Name() const;
@@ -1917,7 +2104,12 @@
   // registry that is not accessible by (and cannot clash with) JavaScript code.
   static Local<Symbol> ForApi(Isolate *isolate, Local<String> name);
 
+  // Well-known symbols
+  static Local<Symbol> GetIterator(Isolate* isolate);
+  static Local<Symbol> GetUnscopables(Isolate* isolate);
+
   V8_INLINE static Symbol* Cast(v8::Value* obj);
+
  private:
   Symbol();
   static void CheckCast(v8::Value* obj);
@@ -2041,12 +2233,19 @@
 typedef void (*AccessorGetterCallback)(
     Local<String> property,
     const PropertyCallbackInfo<Value>& info);
+typedef void (*AccessorNameGetterCallback)(
+    Local<Name> property,
+    const PropertyCallbackInfo<Value>& info);
 
 
 typedef void (*AccessorSetterCallback)(
     Local<String> property,
     Local<Value> value,
     const PropertyCallbackInfo<void>& info);
+typedef void (*AccessorNameSetterCallback)(
+    Local<Name> property,
+    Local<Value> value,
+    const PropertyCallbackInfo<void>& info);
 
 
 /**
@@ -2071,9 +2270,7 @@
  */
 class V8_EXPORT Object : public Value {
  public:
-  bool Set(Handle<Value> key,
-           Handle<Value> value,
-           PropertyAttribute attribs = None);
+  bool Set(Handle<Value> key, Handle<Value> value);
 
   bool Set(uint32_t index, Handle<Value> value);
 
@@ -2100,6 +2297,11 @@
    */
   PropertyAttribute GetPropertyAttributes(Handle<Value> key);
 
+  /**
+   * Returns Object.getOwnPropertyDescriptor as per ES5 section 15.2.3.3.
+   */
+  Local<Value> GetOwnPropertyDescriptor(Local<String> key);
+
   bool Has(Handle<Value> key);
 
   bool Delete(Handle<Value> key);
@@ -2118,14 +2320,20 @@
                    Handle<Value> data = Handle<Value>(),
                    AccessControl settings = DEFAULT,
                    PropertyAttribute attribute = None);
+  bool SetAccessor(Handle<Name> name,
+                   AccessorNameGetterCallback getter,
+                   AccessorNameSetterCallback setter = 0,
+                   Handle<Value> data = Handle<Value>(),
+                   AccessControl settings = DEFAULT,
+                   PropertyAttribute attribute = None);
 
   // This function is not yet stable and should not be used at this time.
-  bool SetDeclaredAccessor(Local<String> name,
+  bool SetDeclaredAccessor(Local<Name> name,
                            Local<DeclaredAccessorDescriptor> descriptor,
                            PropertyAttribute attribute = None,
                            AccessControl settings = DEFAULT);
 
-  void SetAccessorProperty(Local<String> name,
+  void SetAccessorProperty(Local<Name> name,
                            Local<Function> getter,
                            Handle<Function> setter = Handle<Function>(),
                            PropertyAttribute attribute = None,
@@ -2185,12 +2393,6 @@
   Local<String> ObjectProtoToString();
 
   /**
-   * Returns the function invoked as a constructor for this object.
-   * May be the null value.
-   */
-  Local<Value> GetConstructor();
-
-  /**
    * Returns the name of the function invoked as a constructor for this object.
    */
   Local<String> GetConstructorName();
@@ -3133,12 +3335,12 @@
 class V8_EXPORT Template : public Data {
  public:
   /** Adds a property to each instance created by this template.*/
-  void Set(Handle<String> name, Handle<Data> value,
+  void Set(Handle<Name> name, Handle<Data> value,
            PropertyAttribute attributes = None);
   V8_INLINE void Set(Isolate* isolate, const char* name, Handle<Data> value);
 
   void SetAccessorProperty(
-     Local<String> name,
+     Local<Name> name,
      Local<FunctionTemplate> getter = Local<FunctionTemplate>(),
      Local<FunctionTemplate> setter = Local<FunctionTemplate>(),
      PropertyAttribute attribute = None,
@@ -3180,9 +3382,18 @@
                              Local<AccessorSignature> signature =
                                  Local<AccessorSignature>(),
                              AccessControl settings = DEFAULT);
+  void SetNativeDataProperty(Local<Name> name,
+                             AccessorNameGetterCallback getter,
+                             AccessorNameSetterCallback setter = 0,
+                             // TODO(dcarney): gcc can't handle Local below
+                             Handle<Value> data = Handle<Value>(),
+                             PropertyAttribute attribute = None,
+                             Local<AccessorSignature> signature =
+                                 Local<AccessorSignature>(),
+                             AccessControl settings = DEFAULT);
 
   // This function is not yet stable and should not be used at this time.
-  bool SetDeclaredAccessor(Local<String> name,
+  bool SetDeclaredAccessor(Local<Name> name,
                            Local<DeclaredAccessorDescriptor> descriptor,
                            PropertyAttribute attribute = None,
                            Local<AccessorSignature> signature =
@@ -3549,12 +3760,20 @@
                    PropertyAttribute attribute = None,
                    Handle<AccessorSignature> signature =
                        Handle<AccessorSignature>());
+  void SetAccessor(Handle<Name> name,
+                   AccessorNameGetterCallback getter,
+                   AccessorNameSetterCallback setter = 0,
+                   Handle<Value> data = Handle<Value>(),
+                   AccessControl settings = DEFAULT,
+                   PropertyAttribute attribute = None,
+                   Handle<AccessorSignature> signature =
+                       Handle<AccessorSignature>());
 
   /**
    * Sets a named property handler on the object template.
    *
-   * Whenever a named property is accessed on objects created from
-   * this object template, the provided callback is invoked instead of
+   * Whenever a property whose name is a string is accessed on objects created
+   * from this object template, the provided callback is invoked instead of
    * accessing the property directly on the JavaScript object.
    *
    * \param getter The callback to invoke when getting a property.
@@ -3757,11 +3976,11 @@
 
 // --- Extensions ---
 
-class V8_EXPORT ExternalAsciiStringResourceImpl
-    : public String::ExternalAsciiStringResource {
+class V8_EXPORT ExternalOneByteStringResourceImpl
+    : public String::ExternalOneByteStringResource {
  public:
-  ExternalAsciiStringResourceImpl() : data_(0), length_(0) {}
-  ExternalAsciiStringResourceImpl(const char* data, size_t length)
+  ExternalOneByteStringResourceImpl() : data_(0), length_(0) {}
+  ExternalOneByteStringResourceImpl(const char* data, size_t length)
       : data_(data), length_(length) {}
   const char* data() const { return data_; }
   size_t length() const { return length_; }
@@ -3791,7 +4010,7 @@
 
   const char* name() const { return name_; }
   size_t source_length() const { return source_length_; }
-  const String::ExternalAsciiStringResource* source() const {
+  const String::ExternalOneByteStringResource* source() const {
     return &source_; }
   int dependency_count() { return dep_count_; }
   const char** dependencies() { return deps_; }
@@ -3801,7 +4020,7 @@
  private:
   const char* name_;
   size_t source_length_;  // expected to initialize before source_
-  ExternalAsciiStringResourceImpl source_;
+  ExternalOneByteStringResourceImpl source_;
   int dep_count_;
   const char** deps_;
   bool auto_enable_;
@@ -3880,13 +4099,6 @@
 };
 
 
-/**
- * Sets the given ResourceConstraints on the given Isolate.
- */
-bool V8_EXPORT SetResourceConstraints(Isolate* isolate,
-                                      ResourceConstraints* constraints);
-
-
 // --- Exceptions ---
 
 
@@ -4024,18 +4236,150 @@
 
 class RetainedObjectInfo;
 
+
 /**
- * Isolate represents an isolated instance of the V8 engine.  V8
- * isolates have completely separate states.  Objects from one isolate
- * must not be used in other isolates.  When V8 is initialized a
- * default isolate is implicitly created and entered.  The embedder
- * can create additional isolates and use them in parallel in multiple
- * threads.  An isolate can be entered by at most one thread at any
- * given time.  The Locker/Unlocker API must be used to synchronize.
+ * FunctionEntryHook is the type of the profile entry hook called at entry to
+ * any generated function when function-level profiling is enabled.
+ *
+ * \param function the address of the function that's being entered.
+ * \param return_addr_location points to a location on stack where the machine
+ *    return address resides. This can be used to identify the caller of
+ *    \p function, and/or modified to divert execution when \p function exits.
+ *
+ * \note the entry hook must not cause garbage collection.
+ */
+typedef void (*FunctionEntryHook)(uintptr_t function,
+                                  uintptr_t return_addr_location);
+
+/**
+ * A JIT code event is issued each time code is added, moved or removed.
+ *
+ * \note removal events are not currently issued.
+ */
+struct JitCodeEvent {
+  enum EventType {
+    CODE_ADDED,
+    CODE_MOVED,
+    CODE_REMOVED,
+    CODE_ADD_LINE_POS_INFO,
+    CODE_START_LINE_INFO_RECORDING,
+    CODE_END_LINE_INFO_RECORDING
+  };
+  // Definition of the code position type. The "POSITION" type means the place
+  // in the source code which are of interest when making stack traces to
+  // pin-point the source location of a stack frame as close as possible.
+  // The "STATEMENT_POSITION" means the place at the beginning of each
+  // statement, and is used to indicate possible break locations.
+  enum PositionType { POSITION, STATEMENT_POSITION };
+
+  // Type of event.
+  EventType type;
+  // Start of the instructions.
+  void* code_start;
+  // Size of the instructions.
+  size_t code_len;
+  // Script info for CODE_ADDED event.
+  Handle<UnboundScript> script;
+  // User-defined data for *_LINE_INFO_* event. It's used to hold the source
+  // code line information which is returned from the
+  // CODE_START_LINE_INFO_RECORDING event. And it's passed to subsequent
+  // CODE_ADD_LINE_POS_INFO and CODE_END_LINE_INFO_RECORDING events.
+  void* user_data;
+
+  struct name_t {
+    // Name of the object associated with the code, note that the string is not
+    // zero-terminated.
+    const char* str;
+    // Number of chars in str.
+    size_t len;
+  };
+
+  struct line_info_t {
+    // PC offset
+    size_t offset;
+    // Code postion
+    size_t pos;
+    // The position type.
+    PositionType position_type;
+  };
+
+  union {
+    // Only valid for CODE_ADDED.
+    struct name_t name;
+
+    // Only valid for CODE_ADD_LINE_POS_INFO
+    struct line_info_t line_info;
+
+    // New location of instructions. Only valid for CODE_MOVED.
+    void* new_code_start;
+  };
+};
+
+/**
+ * Option flags passed to the SetJitCodeEventHandler function.
+ */
+enum JitCodeEventOptions {
+  kJitCodeEventDefault = 0,
+  // Generate callbacks for already existent code.
+  kJitCodeEventEnumExisting = 1
+};
+
+
+/**
+ * Callback function passed to SetJitCodeEventHandler.
+ *
+ * \param event code add, move or removal event.
+ */
+typedef void (*JitCodeEventHandler)(const JitCodeEvent* event);
+
+
+/**
+ * Isolate represents an isolated instance of the V8 engine.  V8 isolates have
+ * completely separate states.  Objects from one isolate must not be used in
+ * other isolates.  The embedder can create multiple isolates and use them in
+ * parallel in multiple threads.  An isolate can be entered by at most one
+ * thread at any given time.  The Locker/Unlocker API must be used to
+ * synchronize.
  */
 class V8_EXPORT Isolate {
  public:
   /**
+   * Initial configuration parameters for a new Isolate.
+   */
+  struct CreateParams {
+    CreateParams()
+        : entry_hook(NULL),
+          code_event_handler(NULL),
+          enable_serializer(false) {}
+
+    /**
+     * The optional entry_hook allows the host application to provide the
+     * address of a function that's invoked on entry to every V8-generated
+     * function.  Note that entry_hook is invoked at the very start of each
+     * generated function. Furthermore, if an  entry_hook is given, V8 will
+     * always run without a context snapshot.
+     */
+    FunctionEntryHook entry_hook;
+
+    /**
+     * Allows the host application to provide the address of a function that is
+     * notified each time code is added, moved or removed.
+     */
+    JitCodeEventHandler code_event_handler;
+
+    /**
+     * ResourceConstraints to use for the new Isolate.
+     */
+    ResourceConstraints constraints;
+
+    /**
+     * This flag currently renders the Isolate unusable.
+     */
+    bool enable_serializer;
+  };
+
+
+  /**
    * Stack-allocated class which sets the isolate for all operations
    * executed within a local scope.
    */
@@ -4123,13 +4467,29 @@
   };
 
   /**
+   * Features reported via the SetUseCounterCallback callback. Do not chang
+   * assigned numbers of existing items; add new features to the end of this
+   * list.
+   */
+  enum UseCounterFeature {
+    kUseAsm = 0,
+    kUseCounterFeatureCount  // This enum value must be last.
+  };
+
+  typedef void (*UseCounterCallback)(Isolate* isolate,
+                                     UseCounterFeature feature);
+
+
+  /**
    * Creates a new isolate.  Does not change the currently entered
    * isolate.
    *
    * When an isolate is no longer used its resources should be freed
    * by calling Dispose().  Using the delete operator is not allowed.
+   *
+   * V8::Initialize() must have run prior to this.
    */
-  static Isolate* New();
+  static Isolate* New(const CreateParams& params = CreateParams());
 
   /**
    * Returns the entered isolate for the current thread or NULL in
@@ -4392,6 +4752,11 @@
   bool WillAutorunMicrotasks() const;
 
   /**
+   * Sets a callback for counting the number of times a feature of V8 is used.
+   */
+  void SetUseCounterCallback(UseCounterCallback callback);
+
+  /**
    * Enables the host application to provide a mechanism for recording
    * statistics counters.
    */
@@ -4406,6 +4771,70 @@
   void SetCreateHistogramFunction(CreateHistogramCallback);
   void SetAddHistogramSampleFunction(AddHistogramSampleCallback);
 
+  /**
+   * Optional notification that the embedder is idle.
+   * V8 uses the notification to reduce memory footprint.
+   * This call can be used repeatedly if the embedder remains idle.
+   * Returns true if the embedder should stop calling IdleNotification
+   * until real work has been done.  This indicates that V8 has done
+   * as much cleanup as it will be able to do.
+   *
+   * The idle_time_in_ms argument specifies the time V8 has to do reduce
+   * the memory footprint. There is no guarantee that the actual work will be
+   * done within the time limit.
+   */
+  bool IdleNotification(int idle_time_in_ms);
+
+  /**
+   * Optional notification that the system is running low on memory.
+   * V8 uses these notifications to attempt to free memory.
+   */
+  void LowMemoryNotification();
+
+  /**
+   * Optional notification that a context has been disposed. V8 uses
+   * these notifications to guide the GC heuristic. Returns the number
+   * of context disposals - including this one - since the last time
+   * V8 had a chance to clean up.
+   */
+  int ContextDisposedNotification();
+
+  /**
+   * Allows the host application to provide the address of a function that is
+   * notified each time code is added, moved or removed.
+   *
+   * \param options options for the JIT code event handler.
+   * \param event_handler the JIT code event handler, which will be invoked
+   *     each time code is added, moved or removed.
+   * \note \p event_handler won't get notified of existent code.
+   * \note since code removal notifications are not currently issued, the
+   *     \p event_handler may get notifications of code that overlaps earlier
+   *     code notifications. This happens when code areas are reused, and the
+   *     earlier overlapping code areas should therefore be discarded.
+   * \note the events passed to \p event_handler and the strings they point to
+   *     are not guaranteed to live past each call. The \p event_handler must
+   *     copy strings and other parameters it needs to keep around.
+   * \note the set of events declared in JitCodeEvent::EventType is expected to
+   *     grow over time, and the JitCodeEvent structure is expected to accrue
+   *     new members. The \p event_handler function must ignore event codes
+   *     it does not recognize to maintain future compatibility.
+   * \note Use Isolate::CreateParams to get events for code executed during
+   *     Isolate setup.
+   */
+  void SetJitCodeEventHandler(JitCodeEventOptions options,
+                              JitCodeEventHandler event_handler);
+
+  /**
+   * Modifies the stack limit for this Isolate.
+   *
+   * \param stack_limit An address beyond which the Vm's stack may not grow.
+   *
+   * \note  If you are using threads then you should hold the V8::Locker lock
+   *     while setting the stack limit and you must set a non-default stack
+   *     limit separately for each thread.
+   */
+  void SetStackLimit(uintptr_t stack_limit);
+
  private:
   template<class K, class V, class Traits> friend class PersistentValueMap;
 
@@ -4485,106 +4914,6 @@
 
 
 /**
- * FunctionEntryHook is the type of the profile entry hook called at entry to
- * any generated function when function-level profiling is enabled.
- *
- * \param function the address of the function that's being entered.
- * \param return_addr_location points to a location on stack where the machine
- *    return address resides. This can be used to identify the caller of
- *    \p function, and/or modified to divert execution when \p function exits.
- *
- * \note the entry hook must not cause garbage collection.
- */
-typedef void (*FunctionEntryHook)(uintptr_t function,
-                                  uintptr_t return_addr_location);
-
-
-/**
- * A JIT code event is issued each time code is added, moved or removed.
- *
- * \note removal events are not currently issued.
- */
-struct JitCodeEvent {
-  enum EventType {
-    CODE_ADDED,
-    CODE_MOVED,
-    CODE_REMOVED,
-    CODE_ADD_LINE_POS_INFO,
-    CODE_START_LINE_INFO_RECORDING,
-    CODE_END_LINE_INFO_RECORDING
-  };
-  // Definition of the code position type. The "POSITION" type means the place
-  // in the source code which are of interest when making stack traces to
-  // pin-point the source location of a stack frame as close as possible.
-  // The "STATEMENT_POSITION" means the place at the beginning of each
-  // statement, and is used to indicate possible break locations.
-  enum PositionType {
-    POSITION,
-    STATEMENT_POSITION
-  };
-
-  // Type of event.
-  EventType type;
-  // Start of the instructions.
-  void* code_start;
-  // Size of the instructions.
-  size_t code_len;
-  // Script info for CODE_ADDED event.
-  Handle<Script> script;
-  // User-defined data for *_LINE_INFO_* event. It's used to hold the source
-  // code line information which is returned from the
-  // CODE_START_LINE_INFO_RECORDING event. And it's passed to subsequent
-  // CODE_ADD_LINE_POS_INFO and CODE_END_LINE_INFO_RECORDING events.
-  void* user_data;
-
-  struct name_t {
-    // Name of the object associated with the code, note that the string is not
-    // zero-terminated.
-    const char* str;
-    // Number of chars in str.
-    size_t len;
-  };
-
-  struct line_info_t {
-    // PC offset
-    size_t offset;
-    // Code postion
-    size_t pos;
-    // The position type.
-    PositionType position_type;
-  };
-
-  union {
-    // Only valid for CODE_ADDED.
-    struct name_t name;
-
-    // Only valid for CODE_ADD_LINE_POS_INFO
-    struct line_info_t line_info;
-
-    // New location of instructions. Only valid for CODE_MOVED.
-    void* new_code_start;
-  };
-};
-
-/**
- * Option flags passed to the SetJitCodeEventHandler function.
- */
-enum JitCodeEventOptions {
-  kJitCodeEventDefault = 0,
-  // Generate callbacks for already existent code.
-  kJitCodeEventEnumExisting = 1
-};
-
-
-/**
- * Callback function passed to SetJitCodeEventHandler.
- *
- * \param event code add, move or removal event.
- */
-typedef void (*JitCodeEventHandler)(const JitCodeEvent* event);
-
-
-/**
  * Interface for iterating through all external resources in the heap.
  */
 class V8_EXPORT ExternalResourceVisitor {  // NOLINT
@@ -4659,6 +4988,24 @@
   static void SetDecompressedStartupData(StartupData* decompressed_data);
 
   /**
+   * Hand startup data to V8, in case the embedder has chosen to build
+   * V8 with external startup data.
+   *
+   * Note:
+   * - By default the startup data is linked into the V8 library, in which
+   *   case this function is not meaningful.
+   * - If this needs to be called, it needs to be called before V8
+   *   tries to make use of its built-ins.
+   * - To avoid unnecessary copies of data, V8 will point directly into the
+   *   given data blob, so pretty please keep it around until V8 exit.
+   * - Compression of the startup blob might be useful, but needs to
+   *   handled entirely on the embedders' side.
+   * - The call will abort if the data is invalid.
+   */
+  static void SetNativesDataBlob(StartupData* startup_blob);
+  static void SetSnapshotDataBlob(StartupData* startup_blob);
+
+  /**
    * Adds a message listener.
    *
    * The same message listener can be added more than once and in that
@@ -4699,28 +5046,6 @@
   /** Get the version string. */
   static const char* GetVersion();
 
-  /**
-   * Enables the host application to provide a mechanism for recording
-   * statistics counters.
-   *
-   * Deprecated, use Isolate::SetCounterFunction instead.
-   */
-  static void SetCounterFunction(CounterLookupCallback);
-
-  /**
-   * Enables the host application to provide a mechanism for recording
-   * histograms. The CreateHistogram function returns a
-   * histogram which will later be passed to the AddHistogramSample
-   * function.
-   *
-   * Deprecated, use Isolate::SetCreateHistogramFunction instead.
-   * Isolate::SetAddHistogramSampleFunction instead.
-   */
-  static void SetCreateHistogramFunction(CreateHistogramCallback);
-
-  /** Deprecated, use Isolate::SetAddHistogramSampleFunction instead. */
-  static void SetAddHistogramSampleFunction(AddHistogramSampleCallback);
-
   /** Callback function for reporting failed access checks.*/
   static void SetFailedAccessCheckCallbackFunction(FailedAccessCheckCallback);
 
@@ -4776,9 +5101,8 @@
   static void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback);
 
   /**
-   * Initializes from snapshot if possible. Otherwise, attempts to
-   * initialize from scratch.  This function is called implicitly if
-   * you use the API without calling it first.
+   * Initializes V8. This function needs to be called before the first Isolate
+   * is created. It always returns true.
    */
   static bool Initialize();
 
@@ -4796,45 +5120,6 @@
       ReturnAddressLocationResolver return_address_resolver);
 
   /**
-   * Allows the host application to provide the address of a function that's
-   * invoked on entry to every V8-generated function.
-   * Note that \p entry_hook is invoked at the very start of each
-   * generated function.
-   *
-   * \param isolate the isolate to operate on.
-   * \param entry_hook a function that will be invoked on entry to every
-   *   V8-generated function.
-   * \returns true on success on supported platforms, false on failure.
-   * \note Setting an entry hook can only be done very early in an isolates
-   *   lifetime, and once set, the entry hook cannot be revoked.
-   */
-  static bool SetFunctionEntryHook(Isolate* isolate,
-                                   FunctionEntryHook entry_hook);
-
-  /**
-   * Allows the host application to provide the address of a function that is
-   * notified each time code is added, moved or removed.
-   *
-   * \param options options for the JIT code event handler.
-   * \param event_handler the JIT code event handler, which will be invoked
-   *     each time code is added, moved or removed.
-   * \note \p event_handler won't get notified of existent code.
-   * \note since code removal notifications are not currently issued, the
-   *     \p event_handler may get notifications of code that overlaps earlier
-   *     code notifications. This happens when code areas are reused, and the
-   *     earlier overlapping code areas should therefore be discarded.
-   * \note the events passed to \p event_handler and the strings they point to
-   *     are not guaranteed to live past each call. The \p event_handler must
-   *     copy strings and other parameters it needs to keep around.
-   * \note the set of events declared in JitCodeEvent::EventType is expected to
-   *     grow over time, and the JitCodeEvent structure is expected to accrue
-   *     new members. The \p event_handler function must ignore event codes
-   *     it does not recognize to maintain future compatibility.
-   */
-  static void SetJitCodeEventHandler(JitCodeEventOptions options,
-                                     JitCodeEventHandler event_handler);
-
-  /**
    * Forcefully terminate the current thread of JavaScript execution
    * in the given isolate.
    *
@@ -4910,34 +5195,6 @@
       Isolate* isolate, PersistentHandleVisitor* visitor);
 
   /**
-   * Optional notification that the embedder is idle.
-   * V8 uses the notification to reduce memory footprint.
-   * This call can be used repeatedly if the embedder remains idle.
-   * Returns true if the embedder should stop calling IdleNotification
-   * until real work has been done.  This indicates that V8 has done
-   * as much cleanup as it will be able to do.
-   *
-   * The hint argument specifies the amount of work to be done in the function
-   * on scale from 1 to 1000. There is no guarantee that the actual work will
-   * match the hint.
-   */
-  static bool IdleNotification(int hint = 1000);
-
-  /**
-   * Optional notification that the system is running low on memory.
-   * V8 uses these notifications to attempt to free memory.
-   */
-  static void LowMemoryNotification();
-
-  /**
-   * Optional notification that a context has been disposed. V8 uses
-   * these notifications to guide the GC heuristic. Returns the number
-   * of context disposals - including this one - since the last time
-   * V8 had a chance to clean up.
-   */
-  static int ContextDisposedNotification();
-
-  /**
    * Initialize the ICU library bundled with V8. The embedder should only
    * invoke this method when using the bundled ICU. Returns true on success.
    *
@@ -5064,7 +5321,8 @@
 
   /**
    * Clears any exceptions that may have been caught by this try/catch block.
-   * After this method has been called, HasCaught() will return false.
+   * After this method has been called, HasCaught() will return false. Cancels
+   * the scheduled exception if it is caught and ReThrow() is not called before.
    *
    * It is not necessary to clear a try/catch block before using it again; if
    * another exception is thrown the previously caught exception will just be
@@ -5107,6 +5365,8 @@
   }
 
  private:
+  void ResetInternal();
+
   // Make it hard to create heap-allocated TryCatch blocks.
   TryCatch(const TryCatch&);
   void operator=(const TryCatch&);
@@ -5228,14 +5488,6 @@
    */
   void Exit();
 
-  /**
-   * Returns true if the context has experienced an out of memory situation.
-   * Since V8 always treats OOM as fatal error, this can no longer return true.
-   * Therefore this is now deprecated.
-   * */
-  V8_DEPRECATED("This can no longer happen. OOM is a fatal error.",
-                bool HasOutOfMemoryException()) { return false; }
-
   /** Returns an isolate associated with a current context. */
   v8::Isolate* GetIsolate();
 
@@ -5472,16 +5724,17 @@
 template<int kSmiShiftSize>
 V8_INLINE internal::Object* IntToSmi(int value) {
   int smi_shift_bits = kSmiTagSize + kSmiShiftSize;
-  intptr_t tagged_value =
-      (static_cast<intptr_t>(value) << smi_shift_bits) | kSmiTag;
+  uintptr_t tagged_value =
+      (static_cast<uintptr_t>(value) << smi_shift_bits) | kSmiTag;
   return reinterpret_cast<internal::Object*>(tagged_value);
 }
 
 // Smi constants for 32-bit systems.
 template <> struct SmiTagging<4> {
-  static const int kSmiShiftSize = 0;
-  static const int kSmiValueSize = 31;
-  V8_INLINE static int SmiToInt(internal::Object* value) {
+  enum { kSmiShiftSize = 0, kSmiValueSize = 31 };
+  static int SmiShiftSize() { return kSmiShiftSize; }
+  static int SmiValueSize() { return kSmiValueSize; }
+  V8_INLINE static int SmiToInt(const internal::Object* value) {
     int shift_bits = kSmiTagSize + kSmiShiftSize;
     // Throw away top 32 bits and shift down (requires >> to be sign extending).
     return static_cast<int>(reinterpret_cast<intptr_t>(value)) >> shift_bits;
@@ -5507,9 +5760,10 @@
 
 // Smi constants for 64-bit systems.
 template <> struct SmiTagging<8> {
-  static const int kSmiShiftSize = 31;
-  static const int kSmiValueSize = 32;
-  V8_INLINE static int SmiToInt(internal::Object* value) {
+  enum { kSmiShiftSize = 31, kSmiValueSize = 32 };
+  static int SmiShiftSize() { return kSmiShiftSize; }
+  static int SmiValueSize() { return kSmiValueSize; }
+  V8_INLINE static int SmiToInt(const internal::Object* value) {
     int shift_bits = kSmiTagSize + kSmiShiftSize;
     // Shift down and throw away top 32 bits.
     return static_cast<int>(reinterpret_cast<intptr_t>(value) >> shift_bits);
@@ -5539,7 +5793,8 @@
   // These values match non-compiler-dependent values defined within
   // the implementation of v8.
   static const int kHeapObjectMapOffset = 0;
-  static const int kMapInstanceTypeOffset = 1 * kApiPointerSize + kApiIntSize;
+  static const int kMapInstanceTypeAndBitFieldOffset =
+      1 * kApiPointerSize + kApiIntSize;
   static const int kStringResourceOffset = 3 * kApiPointerSize;
 
   static const int kOddballKindOffset = 3 * kApiPointerSize;
@@ -5547,11 +5802,11 @@
   static const int kJSObjectHeaderSize = 3 * kApiPointerSize;
   static const int kFixedArrayHeaderSize = 2 * kApiPointerSize;
   static const int kContextHeaderSize = 2 * kApiPointerSize;
-  static const int kContextEmbedderDataIndex = 76;
+  static const int kContextEmbedderDataIndex = 95;
   static const int kFullStringRepresentationMask = 0x07;
   static const int kStringEncodingMask = 0x4;
   static const int kExternalTwoByteRepresentationTag = 0x02;
-  static const int kExternalAsciiRepresentationTag = 0x06;
+  static const int kExternalOneByteRepresentationTag = 0x06;
 
   static const int kIsolateEmbedderDataOffset = 0 * kApiPointerSize;
   static const int kAmountOfExternalAllocatedMemoryOffset =
@@ -5565,7 +5820,7 @@
   static const int kNullValueRootIndex = 7;
   static const int kTrueValueRootIndex = 8;
   static const int kFalseValueRootIndex = 9;
-  static const int kEmptyStringRootIndex = 163;
+  static const int kEmptyStringRootIndex = 164;
 
   // The external allocation limit should be below 256 MB on all architectures
   // to avoid that resource-constrained embedders run low on memory.
@@ -5580,10 +5835,10 @@
   static const int kNodeIsIndependentShift = 4;
   static const int kNodeIsPartiallyDependentShift = 5;
 
-  static const int kJSObjectType = 0xbb;
+  static const int kJSObjectType = 0xbc;
   static const int kFirstNonstringType = 0x80;
   static const int kOddballType = 0x83;
-  static const int kForeignType = 0x87;
+  static const int kForeignType = 0x88;
 
   static const int kUndefinedOddballKind = 5;
   static const int kNullOddballKind = 3;
@@ -5597,12 +5852,12 @@
 #endif
   }
 
-  V8_INLINE static bool HasHeapObjectTag(internal::Object* value) {
+  V8_INLINE static bool HasHeapObjectTag(const internal::Object* value) {
     return ((reinterpret_cast<intptr_t>(value) & kHeapObjectTagMask) ==
             kHeapObjectTag);
   }
 
-  V8_INLINE static int SmiValue(internal::Object* value) {
+  V8_INLINE static int SmiValue(const internal::Object* value) {
     return PlatformSmiTagging::SmiToInt(value);
   }
 
@@ -5614,13 +5869,15 @@
     return PlatformSmiTagging::IsValidSmi(value);
   }
 
-  V8_INLINE static int GetInstanceType(internal::Object* obj) {
+  V8_INLINE static int GetInstanceType(const internal::Object* obj) {
     typedef internal::Object O;
     O* map = ReadField<O*>(obj, kHeapObjectMapOffset);
-    return ReadField<uint8_t>(map, kMapInstanceTypeOffset);
+    // Map::InstanceType is defined so that it will always be loaded into
+    // the LS 8 bits of one 16-bit word, regardless of endianess.
+    return ReadField<uint16_t>(map, kMapInstanceTypeAndBitFieldOffset) & 0xff;
   }
 
-  V8_INLINE static int GetOddballKind(internal::Object* obj) {
+  V8_INLINE static int GetOddballKind(const internal::Object* obj) {
     typedef internal::Object O;
     return SmiValue(ReadField<O*>(obj, kOddballKindOffset));
   }
@@ -5638,7 +5895,7 @@
   V8_INLINE static void UpdateNodeFlag(internal::Object** obj,
                                        bool value, int shift) {
       uint8_t* addr = reinterpret_cast<uint8_t*>(obj) + kNodeFlagsOffset;
-      uint8_t mask = static_cast<uint8_t>(1 << shift);
+      uint8_t mask = static_cast<uint8_t>(1U << shift);
       *addr = static_cast<uint8_t>((*addr & ~mask) | (value << shift));
   }
 
@@ -5653,18 +5910,19 @@
     *addr = static_cast<uint8_t>((*addr & ~kNodeStateMask) | value);
   }
 
-  V8_INLINE static void SetEmbedderData(v8::Isolate *isolate,
+  V8_INLINE static void SetEmbedderData(v8::Isolate* isolate,
                                         uint32_t slot,
-                                        void *data) {
+                                        void* data) {
     uint8_t *addr = reinterpret_cast<uint8_t *>(isolate) +
                     kIsolateEmbedderDataOffset + slot * kApiPointerSize;
     *reinterpret_cast<void**>(addr) = data;
   }
 
-  V8_INLINE static void* GetEmbedderData(v8::Isolate* isolate, uint32_t slot) {
-    uint8_t* addr = reinterpret_cast<uint8_t*>(isolate) +
+  V8_INLINE static void* GetEmbedderData(const v8::Isolate* isolate,
+                                         uint32_t slot) {
+    const uint8_t* addr = reinterpret_cast<const uint8_t*>(isolate) +
         kIsolateEmbedderDataOffset + slot * kApiPointerSize;
-    return *reinterpret_cast<void**>(addr);
+    return *reinterpret_cast<void* const*>(addr);
   }
 
   V8_INLINE static internal::Object** GetRoot(v8::Isolate* isolate,
@@ -5674,16 +5932,17 @@
   }
 
   template <typename T>
-  V8_INLINE static T ReadField(internal::Object* ptr, int offset) {
-    uint8_t* addr = reinterpret_cast<uint8_t*>(ptr) + offset - kHeapObjectTag;
-    return *reinterpret_cast<T*>(addr);
+  V8_INLINE static T ReadField(const internal::Object* ptr, int offset) {
+    const uint8_t* addr =
+        reinterpret_cast<const uint8_t*>(ptr) + offset - kHeapObjectTag;
+    return *reinterpret_cast<const T*>(addr);
   }
 
   template <typename T>
-  V8_INLINE static T ReadEmbedderData(v8::Context* context, int index) {
+  V8_INLINE static T ReadEmbedderData(const v8::Context* context, int index) {
     typedef internal::Object O;
     typedef internal::Internals I;
-    O* ctx = *reinterpret_cast<O**>(context);
+    O* ctx = *reinterpret_cast<O* const*>(context);
     int embedder_data_offset = I::kContextHeaderSize +
         (internal::kApiPointerSize * I::kContextEmbedderDataIndex);
     O* embedder_data = I::ReadField<O*>(ctx, embedder_data_offset);
@@ -6093,11 +6352,17 @@
   return resource_column_offset_;
 }
 
+
 Handle<Boolean> ScriptOrigin::ResourceIsSharedCrossOrigin() const {
   return resource_is_shared_cross_origin_;
 }
 
 
+Handle<Integer> ScriptOrigin::ScriptID() const {
+  return script_id_;
+}
+
+
 ScriptCompiler::Source::Source(Local<String> string, const ScriptOrigin& origin,
                                CachedData* data)
     : source_string(string),
@@ -6189,7 +6454,7 @@
 String::ExternalStringResource* String::GetExternalStringResource() const {
   typedef internal::Object O;
   typedef internal::Internals I;
-  O* obj = *reinterpret_cast<O**>(const_cast<String*>(this));
+  O* obj = *reinterpret_cast<O* const*>(this);
   String::ExternalStringResource* result;
   if (I::IsExternalTwoByteString(I::GetInstanceType(obj))) {
     void* value = I::ReadField<void*>(obj, I::kStringResourceOffset);
@@ -6208,11 +6473,11 @@
     String::Encoding* encoding_out) const {
   typedef internal::Object O;
   typedef internal::Internals I;
-  O* obj = *reinterpret_cast<O**>(const_cast<String*>(this));
+  O* obj = *reinterpret_cast<O* const*>(this);
   int type = I::GetInstanceType(obj) & I::kFullStringRepresentationMask;
   *encoding_out = static_cast<Encoding>(type & I::kStringEncodingMask);
   ExternalStringResourceBase* resource = NULL;
-  if (type == I::kExternalAsciiRepresentationTag ||
+  if (type == I::kExternalOneByteRepresentationTag ||
       type == I::kExternalTwoByteRepresentationTag) {
     void* value = I::ReadField<void*>(obj, I::kStringResourceOffset);
     resource = static_cast<ExternalStringResourceBase*>(value);
@@ -6235,7 +6500,7 @@
 bool Value::QuickIsUndefined() const {
   typedef internal::Object O;
   typedef internal::Internals I;
-  O* obj = *reinterpret_cast<O**>(const_cast<Value*>(this));
+  O* obj = *reinterpret_cast<O* const*>(this);
   if (!I::HasHeapObjectTag(obj)) return false;
   if (I::GetInstanceType(obj) != I::kOddballType) return false;
   return (I::GetOddballKind(obj) == I::kUndefinedOddballKind);
@@ -6253,7 +6518,7 @@
 bool Value::QuickIsNull() const {
   typedef internal::Object O;
   typedef internal::Internals I;
-  O* obj = *reinterpret_cast<O**>(const_cast<Value*>(this));
+  O* obj = *reinterpret_cast<O* const*>(this);
   if (!I::HasHeapObjectTag(obj)) return false;
   if (I::GetInstanceType(obj) != I::kOddballType) return false;
   return (I::GetOddballKind(obj) == I::kNullOddballKind);
@@ -6271,7 +6536,7 @@
 bool Value::QuickIsString() const {
   typedef internal::Object O;
   typedef internal::Internals I;
-  O* obj = *reinterpret_cast<O**>(const_cast<Value*>(this));
+  O* obj = *reinterpret_cast<O* const*>(this);
   if (!I::HasHeapObjectTag(obj)) return false;
   return (I::GetInstanceType(obj) < I::kFirstNonstringType);
 }
@@ -6282,6 +6547,14 @@
 }
 
 
+Name* Name::Cast(v8::Value* value) {
+#ifdef V8_ENABLE_CHECKS
+  CheckCast(value);
+#endif
+  return static_cast<Name*>(value);
+}
+
+
 Symbol* Symbol::Cast(v8::Value* value) {
 #ifdef V8_ENABLE_CHECKS
   CheckCast(value);
diff --git a/include/v8config.h b/include/v8config.h
index 452ffc7..87de994 100644
--- a/include/v8config.h
+++ b/include/v8config.h
@@ -175,7 +175,12 @@
 //  V8_HAS_ATTRIBUTE_VISIBILITY         - __attribute__((visibility)) supported
 //  V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT - __attribute__((warn_unused_result))
 //                                        supported
+//  V8_HAS_BUILTIN_CLZ                  - __builtin_clz() supported
+//  V8_HAS_BUILTIN_CTZ                  - __builtin_ctz() supported
 //  V8_HAS_BUILTIN_EXPECT               - __builtin_expect() supported
+//  V8_HAS_BUILTIN_POPCOUNT             - __builtin_popcount() supported
+//  V8_HAS_BUILTIN_SADD_OVERFLOW        - __builtin_sadd_overflow() supported
+//  V8_HAS_BUILTIN_SSUB_OVERFLOW        - __builtin_ssub_overflow() supported
 //  V8_HAS_DECLSPEC_ALIGN               - __declspec(align(n)) supported
 //  V8_HAS_DECLSPEC_DEPRECATED          - __declspec(deprecated) supported
 //  V8_HAS_DECLSPEC_NOINLINE            - __declspec(noinline) supported
@@ -206,7 +211,12 @@
 # define V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT \
     (__has_attribute(warn_unused_result))
 
+# define V8_HAS_BUILTIN_CLZ (__has_builtin(__builtin_clz))
+# define V8_HAS_BUILTIN_CTZ (__has_builtin(__builtin_ctz))
 # define V8_HAS_BUILTIN_EXPECT (__has_builtin(__builtin_expect))
+# define V8_HAS_BUILTIN_POPCOUNT (__has_builtin(__builtin_popcount))
+# define V8_HAS_BUILTIN_SADD_OVERFLOW (__has_builtin(__builtin_sadd_overflow))
+# define V8_HAS_BUILTIN_SSUB_OVERFLOW (__has_builtin(__builtin_ssub_overflow))
 
 # define V8_HAS_CXX11_ALIGNAS (__has_feature(cxx_alignas))
 # define V8_HAS_CXX11_STATIC_ASSERT (__has_feature(cxx_static_assert))
@@ -238,7 +248,10 @@
 # define V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT \
     (!V8_CC_INTEL && V8_GNUC_PREREQ(4, 1, 0))
 
+# define V8_HAS_BUILTIN_CLZ (V8_GNUC_PREREQ(3, 4, 0))
+# define V8_HAS_BUILTIN_CTZ (V8_GNUC_PREREQ(3, 4, 0))
 # define V8_HAS_BUILTIN_EXPECT (V8_GNUC_PREREQ(2, 96, 0))
+# define V8_HAS_BUILTIN_POPCOUNT (V8_GNUC_PREREQ(3, 4, 0))
 
 // g++ requires -std=c++0x or -std=gnu++0x to support C++11 functionality
 // without warnings (functionality used by the macros below).  These modes
@@ -321,24 +334,6 @@
 #endif
 
 
-// A macro to mark variables or types as unused, avoiding compiler warnings.
-#if V8_HAS_ATTRIBUTE_UNUSED
-# define V8_UNUSED __attribute__((unused))
-#else
-# define V8_UNUSED
-#endif
-
-
-// Annotate a function indicating the caller must examine the return value.
-// Use like:
-//   int foo() V8_WARN_UNUSED_RESULT;
-#if V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT
-# define V8_WARN_UNUSED_RESULT __attribute__((warn_unused_result))
-#else
-# define V8_WARN_UNUSED_RESULT /* NOT SUPPORTED */
-#endif
-
-
 // A macro to provide the compiler with branch prediction information.
 #if V8_HAS_BUILTIN_EXPECT
 # define V8_UNLIKELY(condition) (__builtin_expect(!!(condition), 0))
@@ -369,33 +364,6 @@
 #endif
 
 
-// Annotate a virtual method indicating it must be overriding a virtual
-// method in the parent class.
-// Use like:
-//   virtual void bar() V8_OVERRIDE;
-#if V8_HAS_CXX11_OVERRIDE
-# define V8_OVERRIDE override
-#else
-# define V8_OVERRIDE /* NOT SUPPORTED */
-#endif
-
-
-// Annotate a virtual method indicating that subclasses must not override it,
-// or annotate a class to indicate that it cannot be subclassed.
-// Use like:
-//   class B V8_FINAL : public A {};
-//   virtual void bar() V8_FINAL;
-#if V8_HAS_CXX11_FINAL
-# define V8_FINAL final
-#elif V8_HAS___FINAL
-# define V8_FINAL __final
-#elif V8_HAS_SEALED
-# define V8_FINAL sealed
-#else
-# define V8_FINAL /* NOT SUPPORTED */
-#endif
-
-
 // This macro allows to specify memory alignment for structs, classes, etc.
 // Use like:
 //   class V8_ALIGNED(16) MyClass { ... };
diff --git a/samples/lineprocessor.cc b/samples/lineprocessor.cc
index edb0ba0..69bfab4 100644
--- a/samples/lineprocessor.cc
+++ b/samples/lineprocessor.cc
@@ -25,14 +25,15 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#include <v8.h>
+#include <include/v8.h>
 
-#include <v8-debug.h>
+#include <include/libplatform/libplatform.h>
+#include <include/v8-debug.h>
 
 #include <fcntl.h>
-#include <string.h>
 #include <stdio.h>
 #include <stdlib.h>
+#include <string.h>
 
 /**
  * This sample program should demonstrate certain aspects of debugging
@@ -254,8 +255,13 @@
 
 int main(int argc, char* argv[]) {
   v8::V8::InitializeICU();
+  v8::Platform* platform = v8::platform::CreateDefaultPlatform();
+  v8::V8::InitializePlatform(platform);
+  v8::V8::Initialize();
   int result = RunMain(argc, argv);
   v8::V8::Dispose();
+  v8::V8::ShutdownPlatform();
+  delete platform;
   return result;
 }
 
@@ -300,7 +306,7 @@
     printf("%s\n", exception_string);
   } else {
     // Print (filename):(line number): (message).
-    v8::String::Utf8Value filename(message->GetScriptResourceName());
+    v8::String::Utf8Value filename(message->GetScriptOrigin().ResourceName());
     const char* filename_string = ToCString(filename);
     int linenum = message->GetLineNumber();
     printf("%s:%i: %s\n", filename_string, linenum, exception_string);
diff --git a/samples/process.cc b/samples/process.cc
index b279a8d..e5c9b7a 100644
--- a/samples/process.cc
+++ b/samples/process.cc
@@ -25,10 +25,12 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#include <v8.h>
+#include <include/v8.h>
 
-#include <string>
+#include <include/libplatform/libplatform.h>
+
 #include <map>
+#include <string>
 
 #ifdef COMPRESS_STARTUP_DATA_BZ2
 #error Using compressed startup data is not supported for this sample
@@ -644,6 +646,9 @@
 
 int main(int argc, char* argv[]) {
   v8::V8::InitializeICU();
+  v8::Platform* platform = v8::platform::CreateDefaultPlatform();
+  v8::V8::InitializePlatform(platform);
+  v8::V8::Initialize();
   map<string, string> options;
   string file;
   ParseOptions(argc, argv, &options, &file);
diff --git a/samples/samples.gyp b/samples/samples.gyp
index dfc7410..0c4c705 100644
--- a/samples/samples.gyp
+++ b/samples/samples.gyp
@@ -35,9 +35,10 @@
     'type': 'executable',
     'dependencies': [
       '../tools/gyp/v8.gyp:v8',
+      '../tools/gyp/v8.gyp:v8_libplatform',
     ],
     'include_dirs': [
-      '../include',
+      '..',
     ],
     'conditions': [
       ['v8_enable_i18n_support==1', {
diff --git a/samples/shell.cc b/samples/shell.cc
index aebe49d..b66e8f7 100644
--- a/samples/shell.cc
+++ b/samples/shell.cc
@@ -25,12 +25,15 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#include <v8.h>
+#include <include/v8.h>
+
+#include <include/libplatform/libplatform.h>
+
 #include <assert.h>
 #include <fcntl.h>
-#include <string.h>
 #include <stdio.h>
 #include <stdlib.h>
+#include <string.h>
 
 #ifdef COMPRESS_STARTUP_DATA_BZ2
 #error Using compressed startup data is not supported for this sample
@@ -78,6 +81,9 @@
 
 int main(int argc, char* argv[]) {
   v8::V8::InitializeICU();
+  v8::Platform* platform = v8::platform::CreateDefaultPlatform();
+  v8::V8::InitializePlatform(platform);
+  v8::V8::Initialize();
   v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
   ShellArrayBufferAllocator array_buffer_allocator;
   v8::V8::SetArrayBufferAllocator(&array_buffer_allocator);
@@ -97,6 +103,8 @@
     if (run_shell) RunShell(context);
   }
   v8::V8::Dispose();
+  v8::V8::ShutdownPlatform();
+  delete platform;
   return result;
 }
 
@@ -358,7 +366,7 @@
     fprintf(stderr, "%s\n", exception_string);
   } else {
     // Print (filename):(line number): (message).
-    v8::String::Utf8Value filename(message->GetScriptResourceName());
+    v8::String::Utf8Value filename(message->GetScriptOrigin().ResourceName());
     const char* filename_string = ToCString(filename);
     int linenum = message->GetLineNumber();
     fprintf(stderr, "%s:%i: %s\n", filename_string, linenum, exception_string);
diff --git a/src/DEPS b/src/DEPS
index 4196627..260f5b2 100644
--- a/src/DEPS
+++ b/src/DEPS
@@ -1,6 +1,14 @@
 include_rules = [
   "+src",
-
-  # TODO(jochen): Enable this.
-  #"-src/libplatform",
+  "-src/compiler",
+  "+src/compiler/pipeline.h",
+  "-src/libplatform",
+  "-include/libplatform",
+  "+testing",
 ]
+
+specific_include_rules = {
+  "(mksnapshot|d8)\.cc": [
+    "+include/libplatform/libplatform.h",
+  ],
+}
diff --git a/src/accessors.cc b/src/accessors.cc
index 54bd241..011372c 100644
--- a/src/accessors.cc
+++ b/src/accessors.cc
@@ -3,8 +3,9 @@
 // found in the LICENSE file.
 
 #include "src/v8.h"
-#include "src/accessors.h"
 
+#include "src/accessors.h"
+#include "src/api.h"
 #include "src/compiler.h"
 #include "src/contexts.h"
 #include "src/deoptimizer.h"
@@ -14,27 +15,17 @@
 #include "src/isolate.h"
 #include "src/list-inl.h"
 #include "src/property-details.h"
-#include "src/api.h"
+#include "src/prototype.h"
 
 namespace v8 {
 namespace internal {
 
 
-// We have a slight impedance mismatch between the external API and the way we
-// use callbacks internally: Externally, callbacks can only be used with
-// v8::Object, but internally we even have callbacks on entities which are
-// higher in the hierarchy, so we can only return i::Object here, not
-// i::JSObject.
-Handle<Object> GetThisFrom(const v8::PropertyCallbackInfo<v8::Value>& info) {
-  return Utils::OpenHandle(*v8::Local<v8::Value>(info.This()));
-}
-
-
 Handle<AccessorInfo> Accessors::MakeAccessor(
     Isolate* isolate,
-    Handle<String> name,
-    AccessorGetterCallback getter,
-    AccessorSetterCallback setter,
+    Handle<Name> name,
+    AccessorNameGetterCallback getter,
+    AccessorNameSetterCallback setter,
     PropertyAttributes attributes) {
   Factory* factory = isolate->factory();
   Handle<ExecutableAccessorInfo> info = factory->NewExecutableAccessorInfo();
@@ -67,18 +58,20 @@
 
 template <class C>
 static C* FindInstanceOf(Isolate* isolate, Object* obj) {
-  for (Object* cur = obj; !cur->IsNull(); cur = cur->GetPrototype(isolate)) {
-    if (Is<C>(cur)) return C::cast(cur);
+  for (PrototypeIterator iter(isolate, obj,
+                              PrototypeIterator::START_AT_RECEIVER);
+       !iter.IsAtEnd(); iter.Advance()) {
+    if (Is<C>(iter.GetCurrent())) return C::cast(iter.GetCurrent());
   }
   return NULL;
 }
 
 
-static V8_INLINE bool CheckForName(Handle<String> name,
+static V8_INLINE bool CheckForName(Handle<Name> name,
                                    Handle<String> property_name,
                                    int offset,
                                    int* object_offset) {
-  if (String::Equals(name, property_name)) {
+  if (Name::Equals(name, property_name)) {
     *object_offset = offset;
     return true;
   }
@@ -90,7 +83,7 @@
 // If true, *object_offset contains offset of object field.
 template <class T>
 bool Accessors::IsJSObjectFieldAccessor(typename T::TypeHandle type,
-                                        Handle<String> name,
+                                        Handle<Name> name,
                                         int* object_offset) {
   Isolate* isolate = name->GetIsolate();
 
@@ -133,16 +126,75 @@
 
 template
 bool Accessors::IsJSObjectFieldAccessor<Type>(Type* type,
-                                              Handle<String> name,
+                                              Handle<Name> name,
                                               int* object_offset);
 
 
 template
 bool Accessors::IsJSObjectFieldAccessor<HeapType>(Handle<HeapType> type,
-                                                  Handle<String> name,
+                                                  Handle<Name> name,
                                                   int* object_offset);
 
 
+bool SetPropertyOnInstanceIfInherited(
+    Isolate* isolate, const v8::PropertyCallbackInfo<void>& info,
+    v8::Local<v8::Name> name, Handle<Object> value) {
+  Handle<Object> holder = Utils::OpenHandle(*info.Holder());
+  Handle<Object> receiver = Utils::OpenHandle(*info.This());
+  if (*holder == *receiver) return false;
+  if (receiver->IsJSObject()) {
+    Handle<JSObject> object = Handle<JSObject>::cast(receiver);
+    // This behaves sloppy since we lost the actual strict-mode.
+    // TODO(verwaest): Fix by making ExecutableAccessorInfo behave like data
+    // properties.
+    if (!object->map()->is_extensible()) return true;
+    JSObject::SetOwnPropertyIgnoreAttributes(object, Utils::OpenHandle(*name),
+                                             value, NONE).Check();
+  }
+  return true;
+}
+
+
+//
+// Accessors::ArgumentsIterator
+//
+
+
+void Accessors::ArgumentsIteratorGetter(
+    v8::Local<v8::Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
+  DisallowHeapAllocation no_allocation;
+  HandleScope scope(isolate);
+  Object* result = isolate->native_context()->array_values_iterator();
+  info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(result, isolate)));
+}
+
+
+void Accessors::ArgumentsIteratorSetter(
+    v8::Local<v8::Name> name, v8::Local<v8::Value> val,
+    const v8::PropertyCallbackInfo<void>& info) {
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
+  HandleScope scope(isolate);
+  Handle<JSObject> object = Utils::OpenHandle(*info.This());
+  Handle<Object> value = Utils::OpenHandle(*val);
+
+  if (SetPropertyOnInstanceIfInherited(isolate, info, name, value)) return;
+
+  LookupIterator it(object, Utils::OpenHandle(*name));
+  CHECK_EQ(LookupIterator::ACCESSOR, it.state());
+  DCHECK(it.HolderIsReceiverOrHiddenPrototype());
+  Object::SetDataProperty(&it, value);
+}
+
+
+Handle<AccessorInfo> Accessors::ArgumentsIteratorInfo(
+    Isolate* isolate, PropertyAttributes attributes) {
+  Handle<Name> name(isolate->native_context()->iterator_symbol(), isolate);
+  return MakeAccessor(isolate, name, &ArgumentsIteratorGetter,
+                      &ArgumentsIteratorSetter, attributes);
+}
+
+
 //
 // Accessors::ArrayLength
 //
@@ -153,10 +205,9 @@
                                         Handle<Object> value) {
   if (value->IsNumber() || !value->IsJSValue()) return value;
   Handle<JSValue> wrapper = Handle<JSValue>::cast(value);
-  ASSERT(wrapper->GetIsolate()->context()->native_context()->number_function()->
+  DCHECK(wrapper->GetIsolate()->native_context()->number_function()->
       has_initial_map());
-  if (wrapper->map() ==
-      isolate->context()->native_context()->number_function()->initial_map()) {
+  if (wrapper->map() == isolate->number_function()->initial_map()) {
     return handle(wrapper->value(), isolate);
   }
 
@@ -165,41 +216,26 @@
 
 
 void Accessors::ArrayLengthGetter(
-    v8::Local<v8::String> name,
+    v8::Local<v8::Name> name,
     const v8::PropertyCallbackInfo<v8::Value>& info) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
   DisallowHeapAllocation no_allocation;
   HandleScope scope(isolate);
-  Object* object = *GetThisFrom(info);
-  // Traverse the prototype chain until we reach an array.
-  JSArray* holder = FindInstanceOf<JSArray>(isolate, object);
-  Object* result;
-  if (holder != NULL) {
-    result = holder->length();
-  } else {
-    result = Smi::FromInt(0);
-  }
+  JSArray* holder = JSArray::cast(*Utils::OpenHandle(*info.Holder()));
+  Object* result = holder->length();
   info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(result, isolate)));
 }
 
 
 void Accessors::ArrayLengthSetter(
-    v8::Local<v8::String> name,
+    v8::Local<v8::Name> name,
     v8::Local<v8::Value> val,
     const v8::PropertyCallbackInfo<void>& info) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
   HandleScope scope(isolate);
-  Handle<JSObject> object = Handle<JSObject>::cast(
-      Utils::OpenHandle(*info.This()));
+  Handle<JSObject> object = Utils::OpenHandle(*info.This());
   Handle<Object> value = Utils::OpenHandle(*val);
-  // This means one of the object's prototypes is a JSArray and the
-  // object does not have a 'length' property.  Calling SetProperty
-  // causes an infinite loop.
-  if (!object->IsJSArray()) {
-    MaybeHandle<Object> maybe_result =
-        JSObject::SetOwnPropertyIgnoreAttributes(
-            object, isolate->factory()->length_string(), value, NONE);
-    maybe_result.Check();
+  if (SetPropertyOnInstanceIfInherited(isolate, info, name, value)) {
     return;
   }
 
@@ -226,9 +262,15 @@
     return;
   }
 
-  isolate->ScheduleThrow(
-      *isolate->factory()->NewRangeError("invalid_array_length",
-                                         HandleVector<Object>(NULL, 0)));
+  Handle<Object> exception;
+  maybe = isolate->factory()->NewRangeError("invalid_array_length",
+                                            HandleVector<Object>(NULL, 0));
+  if (!maybe.ToHandle(&exception)) {
+    isolate->OptionalRescheduleException(false);
+    return;
+  }
+
+  isolate->ScheduleThrow(*exception);
 }
 
 
@@ -248,27 +290,30 @@
 //
 
 void Accessors::StringLengthGetter(
-    v8::Local<v8::String> name,
+    v8::Local<v8::Name> name,
     const v8::PropertyCallbackInfo<v8::Value>& info) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
   DisallowHeapAllocation no_allocation;
   HandleScope scope(isolate);
-  Object* value = *GetThisFrom(info);
-  Object* result;
-  if (value->IsJSValue()) value = JSValue::cast(value)->value();
-  if (value->IsString()) {
-    result = Smi::FromInt(String::cast(value)->length());
-  } else {
-    // If object is not a string we return 0 to be compatible with WebKit.
-    // Note: Firefox returns the length of ToString(object).
-    result = Smi::FromInt(0);
+
+  // We have a slight impedance mismatch between the external API and the way we
+  // use callbacks internally: Externally, callbacks can only be used with
+  // v8::Object, but internally we have callbacks on entities which are higher
+  // in the hierarchy, in this case for String values.
+
+  Object* value = *Utils::OpenHandle(*v8::Local<v8::Value>(info.This()));
+  if (!value->IsString()) {
+    // Not a string value. That means that we either got a String wrapper or
+    // a Value with a String wrapper in its prototype chain.
+    value = JSValue::cast(*Utils::OpenHandle(*info.Holder()))->value();
   }
+  Object* result = Smi::FromInt(String::cast(value)->length());
   info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(result, isolate)));
 }
 
 
 void Accessors::StringLengthSetter(
-    v8::Local<v8::String> name,
+    v8::Local<v8::Name> name,
     v8::Local<v8::Value> value,
     const v8::PropertyCallbackInfo<void>& info) {
   UNREACHABLE();
@@ -291,7 +336,7 @@
 
 
 void Accessors::ScriptColumnOffsetGetter(
-    v8::Local<v8::String> name,
+    v8::Local<v8::Name> name,
     const v8::PropertyCallbackInfo<v8::Value>& info) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
   DisallowHeapAllocation no_allocation;
@@ -303,7 +348,7 @@
 
 
 void Accessors::ScriptColumnOffsetSetter(
-    v8::Local<v8::String> name,
+    v8::Local<v8::Name> name,
     v8::Local<v8::Value> value,
     const v8::PropertyCallbackInfo<void>& info) {
   UNREACHABLE();
@@ -313,7 +358,7 @@
 Handle<AccessorInfo> Accessors::ScriptColumnOffsetInfo(
       Isolate* isolate, PropertyAttributes attributes) {
   Handle<String> name(isolate->factory()->InternalizeOneByteString(
-        STATIC_ASCII_VECTOR("column_offset")));
+      STATIC_CHAR_VECTOR("column_offset")));
   return MakeAccessor(isolate,
                       name,
                       &ScriptColumnOffsetGetter,
@@ -328,7 +373,7 @@
 
 
 void Accessors::ScriptIdGetter(
-    v8::Local<v8::String> name,
+    v8::Local<v8::Name> name,
     const v8::PropertyCallbackInfo<v8::Value>& info) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
   DisallowHeapAllocation no_allocation;
@@ -340,7 +385,7 @@
 
 
 void Accessors::ScriptIdSetter(
-    v8::Local<v8::String> name,
+    v8::Local<v8::Name> name,
     v8::Local<v8::Value> value,
     const v8::PropertyCallbackInfo<void>& info) {
   UNREACHABLE();
@@ -349,8 +394,8 @@
 
 Handle<AccessorInfo> Accessors::ScriptIdInfo(
       Isolate* isolate, PropertyAttributes attributes) {
-  Handle<String> name(isolate->factory()->InternalizeOneByteString(
-        STATIC_ASCII_VECTOR("id")));
+  Handle<String> name(
+      isolate->factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("id")));
   return MakeAccessor(isolate,
                       name,
                       &ScriptIdGetter,
@@ -365,7 +410,7 @@
 
 
 void Accessors::ScriptNameGetter(
-    v8::Local<v8::String> name,
+    v8::Local<v8::Name> name,
     const v8::PropertyCallbackInfo<v8::Value>& info) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
   DisallowHeapAllocation no_allocation;
@@ -377,7 +422,7 @@
 
 
 void Accessors::ScriptNameSetter(
-    v8::Local<v8::String> name,
+    v8::Local<v8::Name> name,
     v8::Local<v8::Value> value,
     const v8::PropertyCallbackInfo<void>& info) {
   UNREACHABLE();
@@ -400,7 +445,7 @@
 
 
 void Accessors::ScriptSourceGetter(
-    v8::Local<v8::String> name,
+    v8::Local<v8::Name> name,
     const v8::PropertyCallbackInfo<v8::Value>& info) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
   DisallowHeapAllocation no_allocation;
@@ -412,7 +457,7 @@
 
 
 void Accessors::ScriptSourceSetter(
-    v8::Local<v8::String> name,
+    v8::Local<v8::Name> name,
     v8::Local<v8::Value> value,
     const v8::PropertyCallbackInfo<void>& info) {
   UNREACHABLE();
@@ -435,7 +480,7 @@
 
 
 void Accessors::ScriptLineOffsetGetter(
-    v8::Local<v8::String> name,
+    v8::Local<v8::Name> name,
     const v8::PropertyCallbackInfo<v8::Value>& info) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
   DisallowHeapAllocation no_allocation;
@@ -447,7 +492,7 @@
 
 
 void Accessors::ScriptLineOffsetSetter(
-    v8::Local<v8::String> name,
+    v8::Local<v8::Name> name,
     v8::Local<v8::Value> value,
     const v8::PropertyCallbackInfo<void>& info) {
   UNREACHABLE();
@@ -457,7 +502,7 @@
 Handle<AccessorInfo> Accessors::ScriptLineOffsetInfo(
       Isolate* isolate, PropertyAttributes attributes) {
   Handle<String> name(isolate->factory()->InternalizeOneByteString(
-        STATIC_ASCII_VECTOR("line_offset")));
+      STATIC_CHAR_VECTOR("line_offset")));
   return MakeAccessor(isolate,
                       name,
                       &ScriptLineOffsetGetter,
@@ -472,7 +517,7 @@
 
 
 void Accessors::ScriptTypeGetter(
-    v8::Local<v8::String> name,
+    v8::Local<v8::Name> name,
     const v8::PropertyCallbackInfo<v8::Value>& info) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
   DisallowHeapAllocation no_allocation;
@@ -484,7 +529,7 @@
 
 
 void Accessors::ScriptTypeSetter(
-    v8::Local<v8::String> name,
+    v8::Local<v8::Name> name,
     v8::Local<v8::Value> value,
     const v8::PropertyCallbackInfo<void>& info) {
   UNREACHABLE();
@@ -493,8 +538,8 @@
 
 Handle<AccessorInfo> Accessors::ScriptTypeInfo(
       Isolate* isolate, PropertyAttributes attributes) {
-  Handle<String> name(isolate->factory()->InternalizeOneByteString(
-        STATIC_ASCII_VECTOR("type")));
+  Handle<String> name(
+      isolate->factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("type")));
   return MakeAccessor(isolate,
                       name,
                       &ScriptTypeGetter,
@@ -509,7 +554,7 @@
 
 
 void Accessors::ScriptCompilationTypeGetter(
-    v8::Local<v8::String> name,
+    v8::Local<v8::Name> name,
     const v8::PropertyCallbackInfo<v8::Value>& info) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
   DisallowHeapAllocation no_allocation;
@@ -522,7 +567,7 @@
 
 
 void Accessors::ScriptCompilationTypeSetter(
-    v8::Local<v8::String> name,
+    v8::Local<v8::Name> name,
     v8::Local<v8::Value> value,
     const v8::PropertyCallbackInfo<void>& info) {
   UNREACHABLE();
@@ -532,7 +577,7 @@
 Handle<AccessorInfo> Accessors::ScriptCompilationTypeInfo(
       Isolate* isolate, PropertyAttributes attributes) {
   Handle<String> name(isolate->factory()->InternalizeOneByteString(
-        STATIC_ASCII_VECTOR("compilation_type")));
+      STATIC_CHAR_VECTOR("compilation_type")));
   return MakeAccessor(isolate,
                       name,
                       &ScriptCompilationTypeGetter,
@@ -547,7 +592,7 @@
 
 
 void Accessors::ScriptLineEndsGetter(
-    v8::Local<v8::String> name,
+    v8::Local<v8::Name> name,
     const v8::PropertyCallbackInfo<v8::Value>& info) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
   HandleScope scope(isolate);
@@ -555,10 +600,10 @@
   Handle<Script> script(
       Script::cast(Handle<JSValue>::cast(object)->value()), isolate);
   Script::InitLineEnds(script);
-  ASSERT(script->line_ends()->IsFixedArray());
+  DCHECK(script->line_ends()->IsFixedArray());
   Handle<FixedArray> line_ends(FixedArray::cast(script->line_ends()));
   // We do not want anyone to modify this array from JS.
-  ASSERT(*line_ends == isolate->heap()->empty_fixed_array() ||
+  DCHECK(*line_ends == isolate->heap()->empty_fixed_array() ||
          line_ends->map() == isolate->heap()->fixed_cow_array_map());
   Handle<JSArray> js_array =
       isolate->factory()->NewJSArrayWithElements(line_ends);
@@ -567,7 +612,7 @@
 
 
 void Accessors::ScriptLineEndsSetter(
-    v8::Local<v8::String> name,
+    v8::Local<v8::Name> name,
     v8::Local<v8::Value> value,
     const v8::PropertyCallbackInfo<void>& info) {
   UNREACHABLE();
@@ -577,7 +622,7 @@
 Handle<AccessorInfo> Accessors::ScriptLineEndsInfo(
       Isolate* isolate, PropertyAttributes attributes) {
   Handle<String> name(isolate->factory()->InternalizeOneByteString(
-        STATIC_ASCII_VECTOR("line_ends")));
+      STATIC_CHAR_VECTOR("line_ends")));
   return MakeAccessor(isolate,
                       name,
                       &ScriptLineEndsGetter,
@@ -587,12 +632,83 @@
 
 
 //
+// Accessors::ScriptSourceUrl
+//
+
+
+void Accessors::ScriptSourceUrlGetter(
+    v8::Local<v8::Name> name,
+    const v8::PropertyCallbackInfo<v8::Value>& info) {
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
+  DisallowHeapAllocation no_allocation;
+  HandleScope scope(isolate);
+  Object* object = *Utils::OpenHandle(*info.This());
+  Object* url = Script::cast(JSValue::cast(object)->value())->source_url();
+  info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(url, isolate)));
+}
+
+
+void Accessors::ScriptSourceUrlSetter(
+    v8::Local<v8::Name> name,
+    v8::Local<v8::Value> value,
+    const v8::PropertyCallbackInfo<void>& info) {
+  UNREACHABLE();
+}
+
+
+Handle<AccessorInfo> Accessors::ScriptSourceUrlInfo(
+      Isolate* isolate, PropertyAttributes attributes) {
+  return MakeAccessor(isolate,
+                      isolate->factory()->source_url_string(),
+                      &ScriptSourceUrlGetter,
+                      &ScriptSourceUrlSetter,
+                      attributes);
+}
+
+
+//
+// Accessors::ScriptSourceMappingUrl
+//
+
+
+void Accessors::ScriptSourceMappingUrlGetter(
+    v8::Local<v8::Name> name,
+    const v8::PropertyCallbackInfo<v8::Value>& info) {
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
+  DisallowHeapAllocation no_allocation;
+  HandleScope scope(isolate);
+  Object* object = *Utils::OpenHandle(*info.This());
+  Object* url =
+      Script::cast(JSValue::cast(object)->value())->source_mapping_url();
+  info.GetReturnValue().Set(Utils::ToLocal(Handle<Object>(url, isolate)));
+}
+
+
+void Accessors::ScriptSourceMappingUrlSetter(
+    v8::Local<v8::Name> name,
+    v8::Local<v8::Value> value,
+    const v8::PropertyCallbackInfo<void>& info) {
+  UNREACHABLE();
+}
+
+
+Handle<AccessorInfo> Accessors::ScriptSourceMappingUrlInfo(
+      Isolate* isolate, PropertyAttributes attributes) {
+  return MakeAccessor(isolate,
+                      isolate->factory()->source_mapping_url_string(),
+                      &ScriptSourceMappingUrlGetter,
+                      &ScriptSourceMappingUrlSetter,
+                      attributes);
+}
+
+
+//
 // Accessors::ScriptGetContextData
 //
 
 
 void Accessors::ScriptContextDataGetter(
-    v8::Local<v8::String> name,
+    v8::Local<v8::Name> name,
     const v8::PropertyCallbackInfo<v8::Value>& info) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
   DisallowHeapAllocation no_allocation;
@@ -604,7 +720,7 @@
 
 
 void Accessors::ScriptContextDataSetter(
-    v8::Local<v8::String> name,
+    v8::Local<v8::Name> name,
     v8::Local<v8::Value> value,
     const v8::PropertyCallbackInfo<void>& info) {
   UNREACHABLE();
@@ -614,7 +730,7 @@
 Handle<AccessorInfo> Accessors::ScriptContextDataInfo(
       Isolate* isolate, PropertyAttributes attributes) {
   Handle<String> name(isolate->factory()->InternalizeOneByteString(
-        STATIC_ASCII_VECTOR("context_data")));
+      STATIC_CHAR_VECTOR("context_data")));
   return MakeAccessor(isolate,
                       name,
                       &ScriptContextDataGetter,
@@ -629,7 +745,7 @@
 
 
 void Accessors::ScriptEvalFromScriptGetter(
-    v8::Local<v8::String> name,
+    v8::Local<v8::Name> name,
     const v8::PropertyCallbackInfo<v8::Value>& info) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
   HandleScope scope(isolate);
@@ -651,7 +767,7 @@
 
 
 void Accessors::ScriptEvalFromScriptSetter(
-    v8::Local<v8::String> name,
+    v8::Local<v8::Name> name,
     v8::Local<v8::Value> value,
     const v8::PropertyCallbackInfo<void>& info) {
   UNREACHABLE();
@@ -661,7 +777,7 @@
 Handle<AccessorInfo> Accessors::ScriptEvalFromScriptInfo(
       Isolate* isolate, PropertyAttributes attributes) {
   Handle<String> name(isolate->factory()->InternalizeOneByteString(
-        STATIC_ASCII_VECTOR("eval_from_script")));
+      STATIC_CHAR_VECTOR("eval_from_script")));
   return MakeAccessor(isolate,
                       name,
                       &ScriptEvalFromScriptGetter,
@@ -676,7 +792,7 @@
 
 
 void Accessors::ScriptEvalFromScriptPositionGetter(
-    v8::Local<v8::String> name,
+    v8::Local<v8::Name> name,
     const v8::PropertyCallbackInfo<v8::Value>& info) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
   HandleScope scope(isolate);
@@ -697,7 +813,7 @@
 
 
 void Accessors::ScriptEvalFromScriptPositionSetter(
-    v8::Local<v8::String> name,
+    v8::Local<v8::Name> name,
     v8::Local<v8::Value> value,
     const v8::PropertyCallbackInfo<void>& info) {
   UNREACHABLE();
@@ -707,7 +823,7 @@
 Handle<AccessorInfo> Accessors::ScriptEvalFromScriptPositionInfo(
       Isolate* isolate, PropertyAttributes attributes) {
   Handle<String> name(isolate->factory()->InternalizeOneByteString(
-        STATIC_ASCII_VECTOR("eval_from_script_position")));
+      STATIC_CHAR_VECTOR("eval_from_script_position")));
   return MakeAccessor(isolate,
                       name,
                       &ScriptEvalFromScriptPositionGetter,
@@ -722,7 +838,7 @@
 
 
 void Accessors::ScriptEvalFromFunctionNameGetter(
-    v8::Local<v8::String> name,
+    v8::Local<v8::Name> name,
     const v8::PropertyCallbackInfo<v8::Value>& info) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
   HandleScope scope(isolate);
@@ -743,7 +859,7 @@
 
 
 void Accessors::ScriptEvalFromFunctionNameSetter(
-    v8::Local<v8::String> name,
+    v8::Local<v8::Name> name,
     v8::Local<v8::Value> value,
     const v8::PropertyCallbackInfo<void>& info) {
   UNREACHABLE();
@@ -753,7 +869,7 @@
 Handle<AccessorInfo> Accessors::ScriptEvalFromFunctionNameInfo(
       Isolate* isolate, PropertyAttributes attributes) {
   Handle<String> name(isolate->factory()->InternalizeOneByteString(
-        STATIC_ASCII_VECTOR("eval_from_function_name")));
+      STATIC_CHAR_VECTOR("eval_from_function_name")));
   return MakeAccessor(isolate,
                       name,
                       &ScriptEvalFromFunctionNameGetter,
@@ -767,21 +883,7 @@
 //
 
 static Handle<Object> GetFunctionPrototype(Isolate* isolate,
-                                           Handle<Object> receiver) {
-  Handle<JSFunction> function;
-  {
-    DisallowHeapAllocation no_allocation;
-    JSFunction* function_raw = FindInstanceOf<JSFunction>(isolate, *receiver);
-    if (function_raw == NULL) return isolate->factory()->undefined_value();
-    while (!function_raw->should_have_prototype()) {
-      function_raw = FindInstanceOf<JSFunction>(isolate,
-                                                function_raw->GetPrototype());
-      // There has to be one because we hit the getter.
-      ASSERT(function_raw != NULL);
-    }
-    function = Handle<JSFunction>(function_raw, isolate);
-  }
-
+                                           Handle<JSFunction> function) {
   if (!function->has_prototype()) {
     Handle<Object> proto = isolate->factory()->NewFunctionPrototype(function);
     JSFunction::SetPrototype(function, proto);
@@ -791,26 +893,10 @@
 
 
 static Handle<Object> SetFunctionPrototype(Isolate* isolate,
-                                           Handle<JSObject> receiver,
+                                           Handle<JSFunction> function,
                                            Handle<Object> value) {
-  Handle<JSFunction> function;
-  {
-    DisallowHeapAllocation no_allocation;
-    JSFunction* function_raw = FindInstanceOf<JSFunction>(isolate, *receiver);
-    if (function_raw == NULL) return isolate->factory()->undefined_value();
-    function = Handle<JSFunction>(function_raw, isolate);
-  }
-
-  if (!function->should_have_prototype()) {
-    // Since we hit this accessor, object will have no prototype property.
-    MaybeHandle<Object> maybe_result =
-        JSObject::SetOwnPropertyIgnoreAttributes(
-            receiver, isolate->factory()->prototype_string(), value, NONE);
-    return maybe_result.ToHandleChecked();
-  }
-
   Handle<Object> old_value;
-  bool is_observed = *function == *receiver && function->map()->is_observed();
+  bool is_observed = function->map()->is_observed();
   if (is_observed) {
     if (function->has_prototype())
       old_value = handle(function->prototype(), isolate);
@@ -819,7 +905,7 @@
   }
 
   JSFunction::SetPrototype(function, value);
-  ASSERT(function->prototype() == *value);
+  DCHECK(function->prototype() == *value);
 
   if (is_observed && !old_value->SameValue(*value)) {
     JSObject::EnqueueChangeRecord(
@@ -837,33 +923,36 @@
 
 Handle<Object> Accessors::FunctionSetPrototype(Handle<JSFunction> function,
                                                Handle<Object> prototype) {
-  ASSERT(function->should_have_prototype());
+  DCHECK(function->should_have_prototype());
   Isolate* isolate = function->GetIsolate();
   return SetFunctionPrototype(isolate, function, prototype);
 }
 
 
 void Accessors::FunctionPrototypeGetter(
-    v8::Local<v8::String> name,
+    v8::Local<v8::Name> name,
     const v8::PropertyCallbackInfo<v8::Value>& info) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
   HandleScope scope(isolate);
-  Handle<Object> object = GetThisFrom(info);
-  Handle<Object> result = GetFunctionPrototype(isolate, object);
+  Handle<JSFunction> function =
+      Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
+  Handle<Object> result = GetFunctionPrototype(isolate, function);
   info.GetReturnValue().Set(Utils::ToLocal(result));
 }
 
 
 void Accessors::FunctionPrototypeSetter(
-    v8::Local<v8::String> name,
+    v8::Local<v8::Name> name,
     v8::Local<v8::Value> val,
     const v8::PropertyCallbackInfo<void>& info) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
   HandleScope scope(isolate);
-  Handle<JSObject> object =
-      Handle<JSObject>::cast(Utils::OpenHandle(*info.This()));
   Handle<Object> value = Utils::OpenHandle(*val);
-
+  if (SetPropertyOnInstanceIfInherited(isolate, info, name, value)) {
+    return;
+  }
+  Handle<JSFunction> object =
+      Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
   SetFunctionPrototype(isolate, object, value);
 }
 
@@ -884,33 +973,24 @@
 
 
 void Accessors::FunctionLengthGetter(
-    v8::Local<v8::String> name,
+    v8::Local<v8::Name> name,
     const v8::PropertyCallbackInfo<v8::Value>& info) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
   HandleScope scope(isolate);
-  Handle<Object> object = GetThisFrom(info);
-  MaybeHandle<JSFunction> maybe_function;
-
-  {
-    DisallowHeapAllocation no_allocation;
-    JSFunction* function = FindInstanceOf<JSFunction>(isolate, *object);
-    if (function != NULL) maybe_function = Handle<JSFunction>(function);
-  }
+  Handle<JSFunction> function =
+      Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
 
   int length = 0;
-  Handle<JSFunction> function;
-  if (maybe_function.ToHandle(&function)) {
-    if (function->shared()->is_compiled()) {
+  if (function->shared()->is_compiled()) {
+    length = function->shared()->length();
+  } else {
+    // If the function isn't compiled yet, the length is not computed
+    // correctly yet. Compile it now and return the right length.
+    if (Compiler::EnsureCompiled(function, KEEP_EXCEPTION)) {
       length = function->shared()->length();
-    } else {
-      // If the function isn't compiled yet, the length is not computed
-      // correctly yet. Compile it now and return the right length.
-      if (Compiler::EnsureCompiled(function, KEEP_EXCEPTION)) {
-        length = function->shared()->length();
-      }
-      if (isolate->has_pending_exception()) {
-        isolate->OptionalRescheduleException(false);
-      }
+    }
+    if (isolate->has_pending_exception()) {
+      isolate->OptionalRescheduleException(false);
     }
   }
   Handle<Object> result(Smi::FromInt(length), isolate);
@@ -919,10 +999,11 @@
 
 
 void Accessors::FunctionLengthSetter(
-    v8::Local<v8::String> name,
+    v8::Local<v8::Name> name,
     v8::Local<v8::Value> val,
     const v8::PropertyCallbackInfo<void>& info) {
-  // Do nothing.
+  // Function length is non writable, non configurable.
+  UNREACHABLE();
 }
 
 
@@ -942,35 +1023,23 @@
 
 
 void Accessors::FunctionNameGetter(
-    v8::Local<v8::String> name,
+    v8::Local<v8::Name> name,
     const v8::PropertyCallbackInfo<v8::Value>& info) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
   HandleScope scope(isolate);
-  Handle<Object> object = GetThisFrom(info);
-  MaybeHandle<JSFunction> maybe_function;
-
-  {
-    DisallowHeapAllocation no_allocation;
-    JSFunction* function = FindInstanceOf<JSFunction>(isolate, *object);
-    if (function != NULL) maybe_function = Handle<JSFunction>(function);
-  }
-
-  Handle<JSFunction> function;
-  Handle<Object> result;
-  if (maybe_function.ToHandle(&function)) {
-    result = Handle<Object>(function->shared()->name(), isolate);
-  } else {
-    result = isolate->factory()->undefined_value();
-  }
+  Handle<JSFunction> function =
+      Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
+  Handle<Object> result(function->shared()->name(), isolate);
   info.GetReturnValue().Set(Utils::ToLocal(result));
 }
 
 
 void Accessors::FunctionNameSetter(
-    v8::Local<v8::String> name,
+    v8::Local<v8::Name> name,
     v8::Local<v8::Value> val,
     const v8::PropertyCallbackInfo<void>& info) {
-  // Do nothing.
+  // Function name is non writable, non configurable.
+  UNREACHABLE();
 }
 
 
@@ -1072,7 +1141,7 @@
     Handle<FixedArray> array = isolate->factory()->NewFixedArray(length);
 
     // Copy the parameters to the arguments object.
-    ASSERT(array->length() == length);
+    DCHECK(array->length() == length);
     for (int i = 0; i < length; i++) array->set(i, frame->GetParameter(i));
     arguments->set_elements(*array);
 
@@ -1091,35 +1160,23 @@
 
 
 void Accessors::FunctionArgumentsGetter(
-    v8::Local<v8::String> name,
+    v8::Local<v8::Name> name,
     const v8::PropertyCallbackInfo<v8::Value>& info) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
   HandleScope scope(isolate);
-  Handle<Object> object = GetThisFrom(info);
-  MaybeHandle<JSFunction> maybe_function;
-
-  {
-    DisallowHeapAllocation no_allocation;
-    JSFunction* function = FindInstanceOf<JSFunction>(isolate, *object);
-    if (function != NULL) maybe_function = Handle<JSFunction>(function);
-  }
-
-  Handle<JSFunction> function;
-  Handle<Object> result;
-  if (maybe_function.ToHandle(&function)) {
-    result = GetFunctionArguments(isolate, function);
-  } else {
-    result = isolate->factory()->undefined_value();
-  }
+  Handle<JSFunction> function =
+      Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
+  Handle<Object> result = GetFunctionArguments(isolate, function);
   info.GetReturnValue().Set(Utils::ToLocal(result));
 }
 
 
 void Accessors::FunctionArgumentsSetter(
-    v8::Local<v8::String> name,
+    v8::Local<v8::Name> name,
     v8::Local<v8::Value> val,
     const v8::PropertyCallbackInfo<void>& info) {
-  // Do nothing.
+  // Function arguments is non writable, non configurable.
+  UNREACHABLE();
 }
 
 
@@ -1185,7 +1242,7 @@
     if (frame_iterator_.done()) return;
     JavaScriptFrame* frame = frame_iterator_.frame();
     frame->GetFunctions(&functions_);
-    ASSERT(functions_.length() > 0);
+    DCHECK(functions_.length() > 0);
     frame_iterator_.Advance();
     index_ = functions_.length() - 1;
   }
@@ -1246,40 +1303,31 @@
 
 
 void Accessors::FunctionCallerGetter(
-    v8::Local<v8::String> name,
+    v8::Local<v8::Name> name,
     const v8::PropertyCallbackInfo<v8::Value>& info) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
   HandleScope scope(isolate);
-  Handle<Object> object = GetThisFrom(info);
-  MaybeHandle<JSFunction> maybe_function;
-  {
-    DisallowHeapAllocation no_allocation;
-    JSFunction* function = FindInstanceOf<JSFunction>(isolate, *object);
-    if (function != NULL) maybe_function = Handle<JSFunction>(function);
-  }
-  Handle<JSFunction> function;
+  Handle<JSFunction> function =
+      Handle<JSFunction>::cast(Utils::OpenHandle(*info.Holder()));
   Handle<Object> result;
-  if (maybe_function.ToHandle(&function)) {
-    MaybeHandle<JSFunction> maybe_caller;
-    maybe_caller = FindCaller(isolate, function);
-    Handle<JSFunction> caller;
-    if (maybe_caller.ToHandle(&caller)) {
-      result = caller;
-    } else {
-      result = isolate->factory()->null_value();
-    }
+  MaybeHandle<JSFunction> maybe_caller;
+  maybe_caller = FindCaller(isolate, function);
+  Handle<JSFunction> caller;
+  if (maybe_caller.ToHandle(&caller)) {
+    result = caller;
   } else {
-    result = isolate->factory()->undefined_value();
+    result = isolate->factory()->null_value();
   }
   info.GetReturnValue().Set(Utils::ToLocal(result));
 }
 
 
 void Accessors::FunctionCallerSetter(
-    v8::Local<v8::String> name,
+    v8::Local<v8::Name> name,
     v8::Local<v8::Value> val,
     const v8::PropertyCallbackInfo<void>& info) {
-  // Do nothing.
+  // Function caller is non writable, non configurable.
+  UNREACHABLE();
 }
 
 
@@ -1302,15 +1350,22 @@
     const v8::PropertyCallbackInfo<v8::Value>& info) {
   JSModule* instance = JSModule::cast(*v8::Utils::OpenHandle(*info.Holder()));
   Context* context = Context::cast(instance->context());
-  ASSERT(context->IsModuleContext());
+  DCHECK(context->IsModuleContext());
   int slot = info.Data()->Int32Value();
   Object* value = context->get(slot);
   Isolate* isolate = instance->GetIsolate();
   if (value->IsTheHole()) {
     Handle<String> name = v8::Utils::OpenHandle(*property);
-    isolate->ScheduleThrow(
-        *isolate->factory()->NewReferenceError("not_defined",
-                                               HandleVector(&name, 1)));
+
+    Handle<Object> exception;
+    MaybeHandle<Object> maybe = isolate->factory()->NewReferenceError(
+        "not_defined", HandleVector(&name, 1));
+    if (!maybe.ToHandle(&exception)) {
+      isolate->OptionalRescheduleException(false);
+      return;
+    }
+
+    isolate->ScheduleThrow(*exception);
     return;
   }
   info.GetReturnValue().Set(v8::Utils::ToLocal(Handle<Object>(value, isolate)));
@@ -1323,15 +1378,21 @@
     const v8::PropertyCallbackInfo<v8::Value>& info) {
   JSModule* instance = JSModule::cast(*v8::Utils::OpenHandle(*info.Holder()));
   Context* context = Context::cast(instance->context());
-  ASSERT(context->IsModuleContext());
+  DCHECK(context->IsModuleContext());
   int slot = info.Data()->Int32Value();
   Object* old_value = context->get(slot);
+  Isolate* isolate = context->GetIsolate();
   if (old_value->IsTheHole()) {
     Handle<String> name = v8::Utils::OpenHandle(*property);
-    Isolate* isolate = instance->GetIsolate();
-    isolate->ScheduleThrow(
-        *isolate->factory()->NewReferenceError("not_defined",
-                                               HandleVector(&name, 1)));
+    Handle<Object> exception;
+    MaybeHandle<Object> maybe = isolate->factory()->NewReferenceError(
+        "not_defined", HandleVector(&name, 1));
+    if (!maybe.ToHandle(&exception)) {
+      isolate->OptionalRescheduleException(false);
+      return;
+    }
+
+    isolate->ScheduleThrow(*exception);
     return;
   }
   context->set(slot, *v8::Utils::OpenHandle(*value));
diff --git a/src/accessors.h b/src/accessors.h
index 41993ea..8fc1f84 100644
--- a/src/accessors.h
+++ b/src/accessors.h
@@ -13,25 +13,28 @@
 
 // The list of accessor descriptors. This is a second-order macro
 // taking a macro to be applied to all accessor descriptor names.
-#define ACCESSOR_INFO_LIST(V)       \
-  V(ArrayLength)                    \
-  V(FunctionArguments)              \
-  V(FunctionCaller)                 \
-  V(FunctionName)                   \
-  V(FunctionLength)                 \
-  V(FunctionPrototype)              \
-  V(ScriptColumnOffset)             \
-  V(ScriptCompilationType)          \
-  V(ScriptContextData)              \
-  V(ScriptEvalFromScript)           \
-  V(ScriptEvalFromScriptPosition)   \
-  V(ScriptEvalFromFunctionName)     \
-  V(ScriptId)                       \
-  V(ScriptLineEnds)                 \
-  V(ScriptLineOffset)               \
-  V(ScriptName)                     \
-  V(ScriptSource)                   \
-  V(ScriptType)                     \
+#define ACCESSOR_INFO_LIST(V)     \
+  V(ArgumentsIterator)            \
+  V(ArrayLength)                  \
+  V(FunctionArguments)            \
+  V(FunctionCaller)               \
+  V(FunctionName)                 \
+  V(FunctionLength)               \
+  V(FunctionPrototype)            \
+  V(ScriptColumnOffset)           \
+  V(ScriptCompilationType)        \
+  V(ScriptContextData)            \
+  V(ScriptEvalFromScript)         \
+  V(ScriptEvalFromScriptPosition) \
+  V(ScriptEvalFromFunctionName)   \
+  V(ScriptId)                     \
+  V(ScriptLineEnds)               \
+  V(ScriptLineOffset)             \
+  V(ScriptName)                   \
+  V(ScriptSource)                 \
+  V(ScriptType)                   \
+  V(ScriptSourceUrl)              \
+  V(ScriptSourceMappingUrl)       \
   V(StringLength)
 
 // Accessors contains all predefined proxy accessors.
@@ -41,10 +44,10 @@
   // Accessor descriptors.
 #define ACCESSOR_INFO_DECLARATION(name)                   \
   static void name##Getter(                               \
-      v8::Local<v8::String> name,                         \
+      v8::Local<v8::Name> name,                           \
       const v8::PropertyCallbackInfo<v8::Value>& info);   \
   static void name##Setter(                               \
-      v8::Local<v8::String> name,                         \
+      v8::Local<v8::Name> name,                           \
       v8::Local<v8::Value> value,                         \
       const v8::PropertyCallbackInfo<void>& info);   \
   static Handle<AccessorInfo> name##Info(                 \
@@ -76,14 +79,14 @@
   // If true, *object_offset contains offset of object field.
   template <class T>
   static bool IsJSObjectFieldAccessor(typename T::TypeHandle type,
-                                      Handle<String> name,
+                                      Handle<Name> name,
                                       int* object_offset);
 
   static Handle<AccessorInfo> MakeAccessor(
       Isolate* isolate,
-      Handle<String> name,
-      AccessorGetterCallback getter,
-      AccessorSetterCallback setter,
+      Handle<Name> name,
+      AccessorNameGetterCallback getter,
+      AccessorNameSetterCallback setter,
       PropertyAttributes attributes);
 
   static Handle<ExecutableAccessorInfo> CloneAccessor(
diff --git a/src/allocation-site-scopes.cc b/src/allocation-site-scopes.cc
index 805ad7b..5b513f6 100644
--- a/src/allocation-site-scopes.cc
+++ b/src/allocation-site-scopes.cc
@@ -20,7 +20,7 @@
              static_cast<void*>(*scope_site));
     }
   } else {
-    ASSERT(!current().is_null());
+    DCHECK(!current().is_null());
     scope_site = isolate()->factory()->NewAllocationSite();
     if (FLAG_trace_creation_allocation_sites) {
       PrintF("Creating nested site (top, current, new) (%p, %p, %p)\n",
@@ -31,7 +31,7 @@
     current()->set_nested_site(*scope_site);
     update_current_site(*scope_site);
   }
-  ASSERT(!scope_site.is_null());
+  DCHECK(!scope_site.is_null());
   return scope_site;
 }
 
diff --git a/src/allocation-site-scopes.h b/src/allocation-site-scopes.h
index 7adf028..836da43 100644
--- a/src/allocation-site-scopes.h
+++ b/src/allocation-site-scopes.h
@@ -75,7 +75,7 @@
       // Advance current site
       Object* nested_site = current()->nested_site();
       // Something is wrong if we advance to the end of the list here.
-      ASSERT(nested_site->IsAllocationSite());
+      DCHECK(nested_site->IsAllocationSite());
       update_current_site(AllocationSite::cast(nested_site));
     }
     return Handle<AllocationSite>(*current(), isolate());
@@ -85,7 +85,7 @@
                         Handle<JSObject> object) {
     // This assert ensures that we are pointing at the right sub-object in a
     // recursive walk of a nested literal.
-    ASSERT(object.is_null() || *object == scope_site->transition_info());
+    DCHECK(object.is_null() || *object == scope_site->transition_info());
   }
 
   bool ShouldCreateMemento(Handle<JSObject> object);
diff --git a/src/allocation-tracker.cc b/src/allocation-tracker.cc
index f6dc5ab..7534ffb 100644
--- a/src/allocation-tracker.cc
+++ b/src/allocation-tracker.cc
@@ -5,9 +5,8 @@
 #include "src/v8.h"
 
 #include "src/allocation-tracker.h"
-
-#include "src/heap-snapshot-generator.h"
 #include "src/frames-inl.h"
+#include "src/heap-snapshot-generator.h"
 
 namespace v8 {
 namespace internal {
@@ -55,15 +54,15 @@
 
 
 void AllocationTraceNode::Print(int indent, AllocationTracker* tracker) {
-  OS::Print("%10u %10u %*c", total_size_, allocation_count_, indent, ' ');
+  base::OS::Print("%10u %10u %*c", total_size_, allocation_count_, indent, ' ');
   if (tracker != NULL) {
     AllocationTracker::FunctionInfo* info =
         tracker->function_info_list()[function_info_index_];
-    OS::Print("%s #%u", info->name, id_);
+    base::OS::Print("%s #%u", info->name, id_);
   } else {
-    OS::Print("%u #%u", function_info_index_, id_);
+    base::OS::Print("%u #%u", function_info_index_, id_);
   }
-  OS::Print("\n");
+  base::OS::Print("\n");
   indent += 2;
   for (int i = 0; i < children_.length(); i++) {
     children_[i]->Print(indent, tracker);
@@ -94,8 +93,8 @@
 
 
 void AllocationTraceTree::Print(AllocationTracker* tracker) {
-  OS::Print("[AllocationTraceTree:]\n");
-  OS::Print("Total size | Allocation count | Function id | id\n");
+  base::OS::Print("[AllocationTraceTree:]\n");
+  base::OS::Print("Total size | Allocation count | Function id | id\n");
   root()->Print(0, tracker);
 }
 
@@ -229,8 +228,8 @@
   // Mark the new block as FreeSpace to make sure the heap is iterable
   // while we are capturing stack trace.
   FreeListNode::FromAddress(addr)->set_size(heap, size);
-  ASSERT_EQ(HeapObject::FromAddress(addr)->Size(), size);
-  ASSERT(FreeListNode::IsFreeListNode(HeapObject::FromAddress(addr)));
+  DCHECK_EQ(HeapObject::FromAddress(addr)->Size(), size);
+  DCHECK(FreeListNode::IsFreeListNode(HeapObject::FromAddress(addr)));
 
   Isolate* isolate = heap->isolate();
   int length = 0;
diff --git a/src/allocation.cc b/src/allocation.cc
index 98c9be2..cae1c10 100644
--- a/src/allocation.cc
+++ b/src/allocation.cc
@@ -5,8 +5,9 @@
 #include "src/allocation.h"
 
 #include <stdlib.h>  // For free, malloc.
-#include "src/checks.h"
-#include "src/platform.h"
+#include "src/base/bits.h"
+#include "src/base/logging.h"
+#include "src/base/platform/platform.h"
 #include "src/utils.h"
 
 #if V8_LIBC_BIONIC
@@ -83,7 +84,8 @@
 
 
 void* AlignedAlloc(size_t size, size_t alignment) {
-  ASSERT(IsPowerOf2(alignment) && alignment >= V8_ALIGNOF(void*));  // NOLINT
+  DCHECK_LE(V8_ALIGNOF(void*), alignment);
+  DCHECK(base::bits::IsPowerOfTwo32(alignment));
   void* ptr;
 #if V8_OS_WIN
   ptr = _aligned_malloc(size, alignment);
diff --git a/src/api.cc b/src/api.cc
index bf54d0b..e11d140 100644
--- a/src/api.cc
+++ b/src/api.cc
@@ -13,6 +13,10 @@
 #include "include/v8-profiler.h"
 #include "include/v8-testing.h"
 #include "src/assert-scope.h"
+#include "src/background-parsing-task.h"
+#include "src/base/platform/platform.h"
+#include "src/base/platform/time.h"
+#include "src/base/utils/random-number-generator.h"
 #include "src/bootstrapper.h"
 #include "src/code-stubs.h"
 #include "src/compiler.h"
@@ -28,22 +32,18 @@
 #include "src/icu_util.h"
 #include "src/json-parser.h"
 #include "src/messages.h"
-#ifdef COMPRESS_STARTUP_DATA_BZ2
 #include "src/natives.h"
-#endif
 #include "src/parser.h"
-#include "src/platform.h"
-#include "src/platform/time.h"
 #include "src/profile-generator-inl.h"
-#include "src/property-details.h"
 #include "src/property.h"
+#include "src/property-details.h"
+#include "src/prototype.h"
 #include "src/runtime.h"
 #include "src/runtime-profiler.h"
 #include "src/scanner-character-streams.h"
 #include "src/simulator.h"
 #include "src/snapshot.h"
 #include "src/unicode-inl.h"
-#include "src/utils/random-number-generator.h"
 #include "src/v8threads.h"
 #include "src/version.h"
 #include "src/vm-state-inl.h"
@@ -52,7 +52,7 @@
 #define LOG_API(isolate, expr) LOG(isolate, ApiEntryCall(expr))
 
 #define ENTER_V8(isolate)                                          \
-  ASSERT((isolate)->IsInitialized());                              \
+  DCHECK((isolate)->IsInitialized());                              \
   i::VMState<i::OTHER> __state__((isolate))
 
 namespace v8 {
@@ -66,7 +66,7 @@
 
 #define EXCEPTION_PREAMBLE(isolate)                                         \
   (isolate)->handle_scope_implementer()->IncrementCallDepth();              \
-  ASSERT(!(isolate)->external_caught_exception());                          \
+  DCHECK(!(isolate)->external_caught_exception());                          \
   bool has_pending_exception = false
 
 
@@ -176,9 +176,9 @@
   i::Isolate* isolate = i::Isolate::Current();
   FatalErrorCallback callback = isolate->exception_behavior();
   if (callback == NULL) {
-    i::OS::PrintError("\n#\n# Fatal error in %s\n# %s\n#\n\n",
-                      location, message);
-    i::OS::Abort();
+    base::OS::PrintError("\n#\n# Fatal error in %s\n# %s\n#\n\n", location,
+                         message);
+    base::OS::Abort();
   } else {
     callback(location, message);
   }
@@ -202,29 +202,6 @@
 }
 
 
-// --- S t a t i c s ---
-
-
-static bool InitializeHelper(i::Isolate* isolate) {
-  // If the isolate has a function entry hook, it needs to re-build all its
-  // code stubs with entry hooks embedded, so let's deserialize a snapshot.
-  if (isolate == NULL || isolate->function_entry_hook() == NULL) {
-    if (i::Snapshot::Initialize())
-      return true;
-  }
-  return i::V8::Initialize(NULL);
-}
-
-
-static inline bool EnsureInitializedForIsolate(i::Isolate* isolate,
-                                               const char* location) {
-  return (isolate != NULL && isolate->IsInitialized()) ||
-      Utils::ApiCheck(InitializeHelper(isolate),
-                      location,
-                      "Error initializing V8");
-}
-
-
 StartupDataDecompressor::StartupDataDecompressor()
     : raw_data(i::NewArray<char*>(V8::GetCompressedStartupDataCount())) {
   for (int i = 0; i < V8::GetCompressedStartupDataCount(); ++i) {
@@ -256,7 +233,7 @@
                                   compressed_data[i].compressed_size);
       if (result != 0) return result;
     } else {
-      ASSERT_EQ(0, compressed_data[i].raw_size);
+      DCHECK_EQ(0, compressed_data[i].raw_size);
     }
     compressed_data[i].data = decompressed;
   }
@@ -326,24 +303,24 @@
 
 void V8::SetDecompressedStartupData(StartupData* decompressed_data) {
 #ifdef COMPRESS_STARTUP_DATA_BZ2
-  ASSERT_EQ(i::Snapshot::raw_size(), decompressed_data[kSnapshot].raw_size);
+  DCHECK_EQ(i::Snapshot::raw_size(), decompressed_data[kSnapshot].raw_size);
   i::Snapshot::set_raw_data(
       reinterpret_cast<const i::byte*>(decompressed_data[kSnapshot].data));
 
-  ASSERT_EQ(i::Snapshot::context_raw_size(),
+  DCHECK_EQ(i::Snapshot::context_raw_size(),
             decompressed_data[kSnapshotContext].raw_size);
   i::Snapshot::set_context_raw_data(
       reinterpret_cast<const i::byte*>(
           decompressed_data[kSnapshotContext].data));
 
-  ASSERT_EQ(i::Natives::GetRawScriptsSize(),
+  DCHECK_EQ(i::Natives::GetRawScriptsSize(),
             decompressed_data[kLibraries].raw_size);
   i::Vector<const char> libraries_source(
       decompressed_data[kLibraries].data,
       decompressed_data[kLibraries].raw_size);
   i::Natives::SetRawScriptsSource(libraries_source);
 
-  ASSERT_EQ(i::ExperimentalNatives::GetRawScriptsSize(),
+  DCHECK_EQ(i::ExperimentalNatives::GetRawScriptsSize(),
             decompressed_data[kExperimentalLibraries].raw_size);
   i::Vector<const char> exp_libraries_source(
       decompressed_data[kExperimentalLibraries].data,
@@ -353,15 +330,33 @@
 }
 
 
+void V8::SetNativesDataBlob(StartupData* natives_blob) {
+#ifdef V8_USE_EXTERNAL_STARTUP_DATA
+  i::SetNativesFromFile(natives_blob);
+#else
+  CHECK(false);
+#endif
+}
+
+
+void V8::SetSnapshotDataBlob(StartupData* snapshot_blob) {
+#ifdef V8_USE_EXTERNAL_STARTUP_DATA
+  i::SetSnapshotFromFile(snapshot_blob);
+#else
+  CHECK(false);
+#endif
+}
+
+
 void V8::SetFatalErrorHandler(FatalErrorCallback that) {
-  i::Isolate* isolate = i::Isolate::UncheckedCurrent();
+  i::Isolate* isolate = i::Isolate::Current();
   isolate->set_exception_behavior(that);
 }
 
 
 void V8::SetAllowCodeGenerationFromStringsCallback(
     AllowCodeGenerationFromStringsCallback callback) {
-  i::Isolate* isolate = i::Isolate::UncheckedCurrent();
+  i::Isolate* isolate = i::Isolate::Current();
   isolate->set_allow_code_gen_callback(callback);
 }
 
@@ -396,6 +391,7 @@
     delete re;
     re = next;
   }
+  first_extension_ = NULL;
 }
 
 
@@ -475,30 +471,23 @@
 }
 
 
-bool SetResourceConstraints(Isolate* v8_isolate,
-                            ResourceConstraints* constraints) {
-  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
-  int semi_space_size = constraints->max_semi_space_size();
-  int old_space_size = constraints->max_old_space_size();
-  int max_executable_size = constraints->max_executable_size();
-  size_t code_range_size = constraints->code_range_size();
+void SetResourceConstraints(i::Isolate* isolate,
+                            const ResourceConstraints& constraints) {
+  int semi_space_size = constraints.max_semi_space_size();
+  int old_space_size = constraints.max_old_space_size();
+  int max_executable_size = constraints.max_executable_size();
+  size_t code_range_size = constraints.code_range_size();
   if (semi_space_size != 0 || old_space_size != 0 ||
       max_executable_size != 0 || code_range_size != 0) {
-    // After initialization it's too late to change Heap constraints.
-    ASSERT(!isolate->IsInitialized());
-    bool result = isolate->heap()->ConfigureHeap(semi_space_size,
-                                                 old_space_size,
-                                                 max_executable_size,
-                                                 code_range_size);
-    if (!result) return false;
+    isolate->heap()->ConfigureHeap(semi_space_size, old_space_size,
+                                   max_executable_size, code_range_size);
   }
-  if (constraints->stack_limit() != NULL) {
-    uintptr_t limit = reinterpret_cast<uintptr_t>(constraints->stack_limit());
+  if (constraints.stack_limit() != NULL) {
+    uintptr_t limit = reinterpret_cast<uintptr_t>(constraints.stack_limit());
     isolate->stack_guard()->SetStackLimit(limit);
   }
 
-  isolate->set_max_available_threads(constraints->max_available_threads());
-  return true;
+  isolate->set_max_available_threads(constraints.max_available_threads());
 }
 
 
@@ -594,7 +583,7 @@
 
 i::Object** HandleScope::CreateHandle(i::HeapObject* heap_object,
                                       i::Object* value) {
-  ASSERT(heap_object->IsHeapObject());
+  DCHECK(heap_object->IsHeapObject());
   return i::HandleScope::CreateHandle(heap_object->GetIsolate(), value);
 }
 
@@ -697,7 +686,7 @@
   if (data.is_null()) return;
   i::Handle<i::Object> val = Utils::OpenHandle(*value);
   data->set(index, *val);
-  ASSERT_EQ(*Utils::OpenHandle(*value),
+  DCHECK_EQ(*Utils::OpenHandle(*value),
             *Utils::OpenHandle(*GetEmbedderData(index)));
 }
 
@@ -714,7 +703,7 @@
   const char* location = "v8::Context::SetAlignedPointerInEmbedderData()";
   i::Handle<i::FixedArray> data = EmbedderDataFor(this, index, true, location);
   data->set(index, EncodeAlignedAsSmi(value, location));
-  ASSERT_EQ(value, GetAlignedPointerFromEmbedderData(index));
+  DCHECK_EQ(value, GetAlignedPointerFromEmbedderData(index));
 }
 
 
@@ -727,7 +716,6 @@
 // NeanderObject constructor.  When you add one to the site calling the
 // constructor you should check that you ensured the VM was not dead first.
 NeanderObject::NeanderObject(v8::internal::Isolate* isolate, int size) {
-  EnsureInitializedForIsolate(isolate, "v8::Nowhere");
   ENTER_V8(isolate);
   value_ = isolate->factory()->NewNeanderObject();
   i::Handle<i::FixedArray> elements = isolate->factory()->NewFixedArray(size);
@@ -751,8 +739,8 @@
 
 
 i::Object* NeanderArray::get(int offset) {
-  ASSERT(0 <= offset);
-  ASSERT(offset < length());
+  DCHECK(0 <= offset);
+  DCHECK(offset < length());
   return obj_.get(offset + 1);
 }
 
@@ -811,7 +799,7 @@
 }
 
 
-void Template::Set(v8::Handle<String> name,
+void Template::Set(v8::Handle<Name> name,
                    v8::Handle<Data> value,
                    v8::PropertyAttribute attribute) {
   i::Isolate* isolate = i::Isolate::Current();
@@ -828,15 +816,17 @@
 
 
 void Template::SetAccessorProperty(
-    v8::Local<v8::String> name,
+    v8::Local<v8::Name> name,
     v8::Local<FunctionTemplate> getter,
     v8::Local<FunctionTemplate> setter,
     v8::PropertyAttribute attribute,
     v8::AccessControl access_control) {
+  // TODO(verwaest): Remove |access_control|.
+  DCHECK_EQ(v8::DEFAULT, access_control);
   i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
   ENTER_V8(isolate);
-  ASSERT(!name.IsEmpty());
-  ASSERT(!getter.IsEmpty() || !setter.IsEmpty());
+  DCHECK(!name.IsEmpty());
+  DCHECK(!getter.IsEmpty() || !setter.IsEmpty());
   i::HandleScope scope(isolate);
   const int kSize = 5;
   v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
@@ -844,8 +834,7 @@
     name,
     getter,
     setter,
-    v8::Integer::New(v8_isolate, attribute),
-    v8::Integer::New(v8_isolate, access_control)};
+    v8::Integer::New(v8_isolate, attribute)};
   TemplateSet(isolate, this, kSize, data);
 }
 
@@ -919,7 +908,6 @@
     v8::Handle<Signature> signature,
     int length) {
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
-  EnsureInitializedForIsolate(i_isolate, "v8::FunctionTemplate::New()");
   LOG_API(i_isolate, "FunctionTemplate::New");
   ENTER_V8(i_isolate);
   return FunctionTemplateNew(
@@ -931,7 +919,6 @@
                                 Handle<FunctionTemplate> receiver, int argc,
                                 Handle<FunctionTemplate> argv[]) {
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
-  EnsureInitializedForIsolate(i_isolate, "v8::Signature::New()");
   LOG_API(i_isolate, "Signature::New");
   ENTER_V8(i_isolate);
   i::Handle<i::Struct> struct_obj =
@@ -1082,7 +1069,6 @@
 
 Local<TypeSwitch> TypeSwitch::New(int argc, Handle<FunctionTemplate> types[]) {
   i::Isolate* isolate = i::Isolate::Current();
-  EnsureInitializedForIsolate(isolate, "v8::TypeSwitch::New()");
   LOG_API(isolate, "TypeSwitch::New");
   ENTER_V8(isolate);
   i::Handle<i::FixedArray> vector = isolate->factory()->NewFixedArray(argc);
@@ -1138,7 +1124,7 @@
 
 static i::Handle<i::AccessorInfo> SetAccessorInfoProperties(
     i::Handle<i::AccessorInfo> obj,
-    v8::Handle<String> name,
+    v8::Handle<Name> name,
     v8::AccessControl settings,
     v8::PropertyAttribute attributes,
     v8::Handle<AccessorSignature> signature) {
@@ -1155,7 +1141,7 @@
 
 template<typename Getter, typename Setter>
 static i::Handle<i::AccessorInfo> MakeAccessorInfo(
-    v8::Handle<String> name,
+    v8::Handle<Name> name,
     Getter getter,
     Setter setter,
     v8::Handle<Value> data,
@@ -1176,7 +1162,7 @@
 
 
 static i::Handle<i::AccessorInfo> MakeAccessorInfo(
-    v8::Handle<String> name,
+    v8::Handle<Name> name,
     v8::Handle<v8::DeclaredAccessorDescriptor> descriptor,
     void* setter_ignored,
     void* data_ignored,
@@ -1263,7 +1249,6 @@
 Local<ObjectTemplate> ObjectTemplate::New(
     i::Isolate* isolate,
     v8::Handle<FunctionTemplate> constructor) {
-  EnsureInitializedForIsolate(isolate, "v8::ObjectTemplate::New()");
   LOG_API(isolate, "ObjectTemplate::New");
   ENTER_V8(isolate);
   i::Handle<i::Struct> struct_obj =
@@ -1327,10 +1312,10 @@
 }
 
 
-template<typename Setter, typename Getter, typename Data, typename Template>
+template<typename Getter, typename Setter, typename Data, typename Template>
 static bool TemplateSetAccessor(
     Template* template_obj,
-    v8::Local<String> name,
+    v8::Local<Name> name,
     Getter getter,
     Setter setter,
     Data data,
@@ -1350,7 +1335,7 @@
 
 
 bool Template::SetDeclaredAccessor(
-    Local<String> name,
+    Local<Name> name,
     Local<DeclaredAccessorDescriptor> descriptor,
     PropertyAttribute attribute,
     Local<AccessorSignature> signature,
@@ -1373,6 +1358,18 @@
 }
 
 
+void Template::SetNativeDataProperty(v8::Local<Name> name,
+                                     AccessorNameGetterCallback getter,
+                                     AccessorNameSetterCallback setter,
+                                     v8::Handle<Value> data,
+                                     PropertyAttribute attribute,
+                                     v8::Local<AccessorSignature> signature,
+                                     AccessControl settings) {
+  TemplateSetAccessor(
+      this, name, getter, setter, data, settings, attribute, signature);
+}
+
+
 void ObjectTemplate::SetAccessor(v8::Handle<String> name,
                                  AccessorGetterCallback getter,
                                  AccessorSetterCallback setter,
@@ -1385,6 +1382,18 @@
 }
 
 
+void ObjectTemplate::SetAccessor(v8::Handle<Name> name,
+                                 AccessorNameGetterCallback getter,
+                                 AccessorNameSetterCallback setter,
+                                 v8::Handle<Value> data,
+                                 AccessControl settings,
+                                 PropertyAttribute attribute,
+                                 v8::Handle<AccessorSignature> signature) {
+  TemplateSetAccessor(
+      this, name, getter, setter, data, settings, attribute, signature);
+}
+
+
 void ObjectTemplate::SetNamedPropertyHandler(
     NamedPropertyGetterCallback getter,
     NamedPropertySetterCallback setter,
@@ -1557,6 +1566,20 @@
 }
 
 
+ScriptCompiler::StreamedSource::StreamedSource(ExternalSourceStream* stream,
+                                               Encoding encoding)
+    : impl_(new i::StreamedSource(stream, encoding)) {}
+
+
+ScriptCompiler::StreamedSource::~StreamedSource() { delete impl_; }
+
+
+const ScriptCompiler::CachedData*
+ScriptCompiler::StreamedSource::GetCachedData() const {
+  return impl_->cached_data.get();
+}
+
+
 Local<Script> UnboundScript::BindToCurrentContext() {
   i::Handle<i::HeapObject> obj =
       i::Handle<i::HeapObject>::cast(Utils::OpenHandle(this));
@@ -1616,6 +1639,38 @@
 }
 
 
+Handle<Value> UnboundScript::GetSourceURL() {
+  i::Handle<i::SharedFunctionInfo> obj =
+      i::Handle<i::SharedFunctionInfo>::cast(Utils::OpenHandle(this));
+  i::Isolate* isolate = obj->GetIsolate();
+  ON_BAILOUT(isolate, "v8::UnboundScript::GetSourceURL()",
+             return Handle<String>());
+  LOG_API(isolate, "UnboundScript::GetSourceURL");
+  if (obj->script()->IsScript()) {
+    i::Object* url = i::Script::cast(obj->script())->source_url();
+    return Utils::ToLocal(i::Handle<i::Object>(url, isolate));
+  } else {
+    return Handle<String>();
+  }
+}
+
+
+Handle<Value> UnboundScript::GetSourceMappingURL() {
+  i::Handle<i::SharedFunctionInfo> obj =
+      i::Handle<i::SharedFunctionInfo>::cast(Utils::OpenHandle(this));
+  i::Isolate* isolate = obj->GetIsolate();
+  ON_BAILOUT(isolate, "v8::UnboundScript::GetSourceMappingURL()",
+             return Handle<String>());
+  LOG_API(isolate, "UnboundScript::GetSourceMappingURL");
+  if (obj->script()->IsScript()) {
+    i::Object* url = i::Script::cast(obj->script())->source_mapping_url();
+    return Utils::ToLocal(i::Handle<i::Object>(url, isolate));
+  } else {
+    return Handle<String>();
+  }
+}
+
+
 Local<Value> Script::Run() {
   i::Handle<i::Object> obj = Utils::OpenHandle(this, true);
   // If execution is terminating, Compile(..)->Run() requires this
@@ -1625,13 +1680,11 @@
   ON_BAILOUT(isolate, "v8::Script::Run()", return Local<Value>());
   LOG_API(isolate, "Script::Run");
   ENTER_V8(isolate);
-  i::Logger::TimerEventScope timer_scope(
-      isolate, i::Logger::TimerEventScope::v8_execute);
+  i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
   i::HandleScope scope(isolate);
   i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>::cast(obj);
   EXCEPTION_PREAMBLE(isolate);
-  i::Handle<i::Object> receiver(
-      isolate->context()->global_proxy(), isolate);
+  i::Handle<i::Object> receiver(isolate->global_proxy(), isolate);
   i::Handle<i::Object> result;
   has_pending_exception = !i::Execution::Call(
       isolate, fun, receiver, 0, NULL).ToHandle(&result);
@@ -1651,43 +1704,25 @@
     Isolate* v8_isolate,
     Source* source,
     CompileOptions options) {
-  i::ScriptData* script_data_impl = NULL;
-  i::CachedDataMode cached_data_mode = i::NO_CACHED_DATA;
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
   ON_BAILOUT(isolate, "v8::ScriptCompiler::CompileUnbound()",
              return Local<UnboundScript>());
-  if (options & kProduceDataToCache) {
-    cached_data_mode = i::PRODUCE_CACHED_DATA;
-    ASSERT(source->cached_data == NULL);
-    if (source->cached_data) {
-      // Asked to produce cached data even though there is some already -> not
-      // good. Fail the compilation.
-      EXCEPTION_PREAMBLE(isolate);
-      i::Handle<i::Object> result = isolate->factory()->NewSyntaxError(
-          "invalid_cached_data", isolate->factory()->NewJSArray(0));
-      isolate->Throw(*result);
-      isolate->ReportPendingMessages();
-      has_pending_exception = true;
-      EXCEPTION_BAILOUT_CHECK(isolate, Local<UnboundScript>());
-    }
-  } else if (source->cached_data) {
-    cached_data_mode = i::CONSUME_CACHED_DATA;
-    // ScriptData takes care of aligning, in case the data is not aligned
-    // correctly.
-    script_data_impl = i::ScriptData::New(
-        reinterpret_cast<const char*>(source->cached_data->data),
-        source->cached_data->length);
-    // If the cached data is not valid, fail the compilation.
-    if (script_data_impl == NULL || !script_data_impl->SanityCheck()) {
-      EXCEPTION_PREAMBLE(isolate);
-      i::Handle<i::Object> result = isolate->factory()->NewSyntaxError(
-          "invalid_cached_data", isolate->factory()->NewJSArray(0));
-      isolate->Throw(*result);
-      isolate->ReportPendingMessages();
-      delete script_data_impl;
-      has_pending_exception = true;
-      EXCEPTION_BAILOUT_CHECK(isolate, Local<UnboundScript>());
-    }
+
+  // Support the old API for a transition period:
+  // - kProduceToCache -> kProduceParserCache
+  // - kNoCompileOptions + cached_data != NULL -> kConsumeParserCache
+  if (options == kProduceDataToCache) {
+    options = kProduceParserCache;
+  } else if (options == kNoCompileOptions && source->cached_data) {
+    options = kConsumeParserCache;
+  }
+
+  i::ScriptData* script_data = NULL;
+  if (options == kConsumeParserCache || options == kConsumeCodeCache) {
+    DCHECK(source->cached_data);
+    // ScriptData takes care of pointer-aligning the data.
+    script_data = new i::ScriptData(source->cached_data->data,
+                                    source->cached_data->length);
   }
 
   i::Handle<i::String> str = Utils::OpenHandle(*(source->source_string));
@@ -1715,36 +1750,30 @@
           source->resource_is_shared_cross_origin == v8::True(v8_isolate);
     }
     EXCEPTION_PREAMBLE(isolate);
-    i::Handle<i::SharedFunctionInfo> result =
-        i::Compiler::CompileScript(str,
-                                   name_obj,
-                                   line_offset,
-                                   column_offset,
-                                   is_shared_cross_origin,
-                                   isolate->global_context(),
-                                   NULL,
-                                   &script_data_impl,
-                                   cached_data_mode,
-                                   i::NOT_NATIVES_CODE);
+    i::Handle<i::SharedFunctionInfo> result = i::Compiler::CompileScript(
+        str, name_obj, line_offset, column_offset, is_shared_cross_origin,
+        isolate->global_context(), NULL, &script_data, options,
+        i::NOT_NATIVES_CODE);
     has_pending_exception = result.is_null();
-    if (has_pending_exception && cached_data_mode == i::CONSUME_CACHED_DATA) {
+    if (has_pending_exception && script_data != NULL) {
       // This case won't happen during normal operation; we have compiled
       // successfully and produced cached data, and but the second compilation
       // of the same source code fails.
-      delete script_data_impl;
-      script_data_impl = NULL;
+      delete script_data;
+      script_data = NULL;
     }
     EXCEPTION_BAILOUT_CHECK(isolate, Local<UnboundScript>());
     raw_result = *result;
-    if ((options & kProduceDataToCache) && script_data_impl != NULL) {
-      // script_data_impl now contains the data that was generated. source will
+
+    if ((options == kProduceParserCache || options == kProduceCodeCache) &&
+        script_data != NULL) {
+      // script_data now contains the data that was generated. source will
       // take the ownership.
       source->cached_data = new CachedData(
-          reinterpret_cast<const uint8_t*>(script_data_impl->Data()),
-          script_data_impl->Length(), CachedData::BufferOwned);
-      script_data_impl->owns_store_ = false;
+          script_data->data(), script_data->length(), CachedData::BufferOwned);
+      script_data->ReleaseDataOwnership();
     }
-    delete script_data_impl;
+    delete script_data;
   }
   i::Handle<i::SharedFunctionInfo> result(raw_result, isolate);
   return ToApiHandle<UnboundScript>(result);
@@ -1765,6 +1794,89 @@
 }
 
 
+ScriptCompiler::ScriptStreamingTask* ScriptCompiler::StartStreamingScript(
+    Isolate* v8_isolate, StreamedSource* source, CompileOptions options) {
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+  if (!isolate->global_context().is_null() &&
+      !isolate->global_context()->IsNativeContext()) {
+    // The context chain is non-trivial, and constructing the corresponding
+    // non-trivial Scope chain outside the V8 heap is not implemented. Don't
+    // stream the script. This will only occur if Harmony scoping is enabled and
+    // a previous script has introduced "let" or "const" variables. TODO(marja):
+    // Implement externalizing ScopeInfos and constructing non-trivial Scope
+    // chains independent of the V8 heap so that we can stream also in this
+    // case.
+    return NULL;
+  }
+  return new i::BackgroundParsingTask(source->impl(), options,
+                                      i::FLAG_stack_size, isolate);
+}
+
+
+Local<Script> ScriptCompiler::Compile(Isolate* v8_isolate,
+                                      StreamedSource* v8_source,
+                                      Handle<String> full_source_string,
+                                      const ScriptOrigin& origin) {
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
+  i::StreamedSource* source = v8_source->impl();
+  ON_BAILOUT(isolate, "v8::ScriptCompiler::Compile()", return Local<Script>());
+  LOG_API(isolate, "ScriptCompiler::Compile()");
+  ENTER_V8(isolate);
+  i::SharedFunctionInfo* raw_result = NULL;
+
+  {
+    i::HandleScope scope(isolate);
+    i::Handle<i::String> str = Utils::OpenHandle(*(full_source_string));
+    i::Handle<i::Script> script = isolate->factory()->NewScript(str);
+    if (!origin.ResourceName().IsEmpty()) {
+      script->set_name(*Utils::OpenHandle(*(origin.ResourceName())));
+    }
+    if (!origin.ResourceLineOffset().IsEmpty()) {
+      script->set_line_offset(i::Smi::FromInt(
+          static_cast<int>(origin.ResourceLineOffset()->Value())));
+    }
+    if (!origin.ResourceColumnOffset().IsEmpty()) {
+      script->set_column_offset(i::Smi::FromInt(
+          static_cast<int>(origin.ResourceColumnOffset()->Value())));
+    }
+    if (!origin.ResourceIsSharedCrossOrigin().IsEmpty()) {
+      script->set_is_shared_cross_origin(origin.ResourceIsSharedCrossOrigin() ==
+                                         v8::True(v8_isolate));
+    }
+    source->info->set_script(script);
+    source->info->SetContext(isolate->global_context());
+
+    EXCEPTION_PREAMBLE(isolate);
+
+    // Do the parsing tasks which need to be done on the main thread. This will
+    // also handle parse errors.
+    source->parser->Internalize();
+
+    i::Handle<i::SharedFunctionInfo> result =
+        i::Handle<i::SharedFunctionInfo>::null();
+    if (source->info->function() != NULL) {
+      // Parsing has succeeded.
+      result =
+          i::Compiler::CompileStreamedScript(source->info.get(), str->length());
+    }
+    has_pending_exception = result.is_null();
+    if (has_pending_exception) isolate->ReportPendingMessages();
+    EXCEPTION_BAILOUT_CHECK(isolate, Local<Script>());
+
+    raw_result = *result;
+    // The Handle<Script> will go out of scope soon; make sure CompilationInfo
+    // doesn't point to it.
+    source->info->set_script(i::Handle<i::Script>());
+  }  // HandleScope goes out of scope.
+  i::Handle<i::SharedFunctionInfo> result(raw_result, isolate);
+  Local<UnboundScript> generic = ToApiHandle<UnboundScript>(result);
+  if (generic.IsEmpty()) {
+    return Local<Script>();
+  }
+  return generic->BindToCurrentContext();
+}
+
+
 Local<Script> Script::Compile(v8::Handle<String> source,
                               v8::ScriptOrigin* origin) {
   i::Handle<i::String> str = Utils::OpenHandle(*source);
@@ -1799,26 +1911,17 @@
       capture_message_(true),
       rethrow_(false),
       has_terminated_(false) {
-  Reset();
-  js_stack_comparable_address_ = this;
-#ifdef V8_USE_ADDRESS_SANITIZER
-  void* asan_fake_stack_handle = __asan_get_current_fake_stack();
-  if (asan_fake_stack_handle != NULL) {
-    js_stack_comparable_address_ = __asan_addr_is_in_fake_stack(
-        asan_fake_stack_handle, js_stack_comparable_address_, NULL, NULL);
-    CHECK(js_stack_comparable_address_ != NULL);
-  }
-#endif
+  ResetInternal();
   // Special handling for simulators which have a separate JS stack.
-  js_stack_comparable_address_ = reinterpret_cast<void*>(
-      v8::internal::SimulatorStack::RegisterCTryCatch(
-          reinterpret_cast<uintptr_t>(js_stack_comparable_address_)));
+  js_stack_comparable_address_ =
+      reinterpret_cast<void*>(v8::internal::SimulatorStack::RegisterCTryCatch(
+          v8::internal::GetCurrentStackPosition()));
   isolate_->RegisterTryCatchHandler(this);
 }
 
 
 v8::TryCatch::~TryCatch() {
-  ASSERT(isolate_ == i::Isolate::Current());
+  DCHECK(isolate_ == i::Isolate::Current());
   if (rethrow_) {
     v8::Isolate* isolate = reinterpret_cast<Isolate*>(isolate_);
     v8::HandleScope scope(isolate);
@@ -1834,8 +1937,14 @@
     isolate_->UnregisterTryCatchHandler(this);
     v8::internal::SimulatorStack::UnregisterCTryCatch();
     reinterpret_cast<Isolate*>(isolate_)->ThrowException(exc);
-    ASSERT(!isolate_->thread_local_top()->rethrowing_message_);
+    DCHECK(!isolate_->thread_local_top()->rethrowing_message_);
   } else {
+    if (HasCaught() && isolate_->has_scheduled_exception()) {
+      // If an exception was caught but is still scheduled because no API call
+      // promoted it, then it is canceled to prevent it from being propagated.
+      // Note that this will not cancel termination exceptions.
+      isolate_->CancelScheduledExceptionFromTryCatch(this);
+    }
     isolate_->UnregisterTryCatchHandler(this);
     v8::internal::SimulatorStack::UnregisterCTryCatch();
   }
@@ -1865,7 +1974,7 @@
 
 
 v8::Local<Value> v8::TryCatch::Exception() const {
-  ASSERT(isolate_ == i::Isolate::Current());
+  DCHECK(isolate_ == i::Isolate::Current());
   if (HasCaught()) {
     // Check for out of memory exception.
     i::Object* exception = reinterpret_cast<i::Object*>(exception_);
@@ -1877,14 +1986,18 @@
 
 
 v8::Local<Value> v8::TryCatch::StackTrace() const {
-  ASSERT(isolate_ == i::Isolate::Current());
+  DCHECK(isolate_ == i::Isolate::Current());
   if (HasCaught()) {
     i::Object* raw_obj = reinterpret_cast<i::Object*>(exception_);
     if (!raw_obj->IsJSObject()) return v8::Local<Value>();
     i::HandleScope scope(isolate_);
     i::Handle<i::JSObject> obj(i::JSObject::cast(raw_obj), isolate_);
     i::Handle<i::String> name = isolate_->factory()->stack_string();
-    if (!i::JSReceiver::HasProperty(obj, name)) return v8::Local<Value>();
+    EXCEPTION_PREAMBLE(isolate_);
+    Maybe<bool> maybe = i::JSReceiver::HasProperty(obj, name);
+    has_pending_exception = !maybe.has_value;
+    EXCEPTION_BAILOUT_CHECK(isolate_, v8::Local<Value>());
+    if (!maybe.value) return v8::Local<Value>();
     i::Handle<i::Object> value;
     if (!i::Object::GetProperty(obj, name).ToHandle(&value)) {
       return v8::Local<Value>();
@@ -1897,9 +2010,9 @@
 
 
 v8::Local<v8::Message> v8::TryCatch::Message() const {
-  ASSERT(isolate_ == i::Isolate::Current());
+  DCHECK(isolate_ == i::Isolate::Current());
   i::Object* message = reinterpret_cast<i::Object*>(message_obj_);
-  ASSERT(message->IsJSMessageObject() || message->IsTheHole());
+  DCHECK(message->IsJSMessageObject() || message->IsTheHole());
   if (HasCaught() && !message->IsTheHole()) {
     return v8::Utils::MessageToLocal(i::Handle<i::Object>(message, isolate_));
   } else {
@@ -1909,7 +2022,18 @@
 
 
 void v8::TryCatch::Reset() {
-  ASSERT(isolate_ == i::Isolate::Current());
+  DCHECK(isolate_ == i::Isolate::Current());
+  if (!rethrow_ && HasCaught() && isolate_->has_scheduled_exception()) {
+    // If an exception was caught but is still scheduled because no API call
+    // promoted it, then it is canceled to prevent it from being propagated.
+    // Note that this will not cancel termination exceptions.
+    isolate_->CancelScheduledExceptionFromTryCatch(this);
+  }
+  ResetInternal();
+}
+
+
+void v8::TryCatch::ResetInternal() {
   i::Object* the_hole = isolate_->heap()->the_hole_value();
   exception_ = the_hole;
   message_obj_ = the_hole;
@@ -1944,19 +2068,30 @@
 }
 
 
-v8::Handle<Value> Message::GetScriptResourceName() const {
+ScriptOrigin Message::GetScriptOrigin() const {
   i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
-  ENTER_V8(isolate);
-  EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
   i::Handle<i::JSMessageObject> message =
       i::Handle<i::JSMessageObject>::cast(Utils::OpenHandle(this));
-  // Return this.script.name.
-  i::Handle<i::JSValue> script =
-      i::Handle<i::JSValue>::cast(i::Handle<i::Object>(message->script(),
-                                                       isolate));
-  i::Handle<i::Object> resource_name(i::Script::cast(script->value())->name(),
-                                     isolate);
-  return scope.Escape(Utils::ToLocal(resource_name));
+  i::Handle<i::Object> script_wraper =
+      i::Handle<i::Object>(message->script(), isolate);
+  i::Handle<i::JSValue> script_value =
+      i::Handle<i::JSValue>::cast(script_wraper);
+  i::Handle<i::Script> script(i::Script::cast(script_value->value()));
+  i::Handle<i::Object> scriptName(i::Script::GetNameOrSourceURL(script));
+  v8::Isolate* v8_isolate =
+      reinterpret_cast<v8::Isolate*>(script->GetIsolate());
+  v8::ScriptOrigin origin(
+      Utils::ToLocal(scriptName),
+      v8::Integer::New(v8_isolate, script->line_offset()->value()),
+      v8::Integer::New(v8_isolate, script->column_offset()->value()),
+      Handle<Boolean>(),
+      v8::Integer::New(v8_isolate, script->id()->value()));
+  return origin;
+}
+
+
+v8::Handle<Value> Message::GetScriptResourceName() const {
+  return GetScriptOrigin().ResourceName();
 }
 
 
@@ -1994,7 +2129,7 @@
   i::Handle<i::Object> argv[] = { data };
   return CallV8HeapFunction(name,
                             i::Isolate::Current()->js_builtins_object(),
-                            ARRAY_SIZE(argv),
+                            arraysize(argv),
                             argv);
 }
 
@@ -2036,6 +2171,7 @@
 
 int Message::GetStartColumn() const {
   i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+  ON_BAILOUT(isolate, "v8::Message::GetStartColumn()", return kNoColumnInfo);
   ENTER_V8(isolate);
   i::HandleScope scope(isolate);
   i::Handle<i::JSObject> data_obj = Utils::OpenHandle(this);
@@ -2050,6 +2186,7 @@
 
 int Message::GetEndColumn() const {
   i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+  ON_BAILOUT(isolate, "v8::Message::GetEndColumn()", return kNoColumnInfo);
   ENTER_V8(isolate);
   i::HandleScope scope(isolate);
   i::Handle<i::JSObject> data_obj = Utils::OpenHandle(this);
@@ -2149,109 +2286,77 @@
 
 // --- S t a c k F r a m e ---
 
-int StackFrame::GetLineNumber() const {
-  i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+static int getIntProperty(const StackFrame* f, const char* propertyName,
+                          int defaultValue) {
+  i::Isolate* isolate = Utils::OpenHandle(f)->GetIsolate();
   ENTER_V8(isolate);
   i::HandleScope scope(isolate);
-  i::Handle<i::JSObject> self = Utils::OpenHandle(this);
-  i::Handle<i::Object> line = i::Object::GetProperty(
-      isolate, self, "lineNumber").ToHandleChecked();
-  if (!line->IsSmi()) {
-    return Message::kNoLineNumberInfo;
-  }
-  return i::Smi::cast(*line)->value();
+  i::Handle<i::JSObject> self = Utils::OpenHandle(f);
+  i::Handle<i::Object> obj =
+      i::Object::GetProperty(isolate, self, propertyName).ToHandleChecked();
+  return obj->IsSmi() ? i::Smi::cast(*obj)->value() : defaultValue;
+}
+
+
+int StackFrame::GetLineNumber() const {
+  return getIntProperty(this, "lineNumber", Message::kNoLineNumberInfo);
 }
 
 
 int StackFrame::GetColumn() const {
-  i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
-  ENTER_V8(isolate);
-  i::HandleScope scope(isolate);
-  i::Handle<i::JSObject> self = Utils::OpenHandle(this);
-  i::Handle<i::Object> column = i::Object::GetProperty(
-      isolate, self, "column").ToHandleChecked();
-  if (!column->IsSmi()) {
-    return Message::kNoColumnInfo;
-  }
-  return i::Smi::cast(*column)->value();
+  return getIntProperty(this, "column", Message::kNoColumnInfo);
 }
 
 
 int StackFrame::GetScriptId() const {
-  i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+  return getIntProperty(this, "scriptId", Message::kNoScriptIdInfo);
+}
+
+
+static Local<String> getStringProperty(const StackFrame* f,
+                                       const char* propertyName) {
+  i::Isolate* isolate = Utils::OpenHandle(f)->GetIsolate();
   ENTER_V8(isolate);
-  i::HandleScope scope(isolate);
-  i::Handle<i::JSObject> self = Utils::OpenHandle(this);
-  i::Handle<i::Object> scriptId = i::Object::GetProperty(
-      isolate, self, "scriptId").ToHandleChecked();
-  if (!scriptId->IsSmi()) {
-    return Message::kNoScriptIdInfo;
-  }
-  return i::Smi::cast(*scriptId)->value();
+  EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
+  i::Handle<i::JSObject> self = Utils::OpenHandle(f);
+  i::Handle<i::Object> obj =
+      i::Object::GetProperty(isolate, self, propertyName).ToHandleChecked();
+  return obj->IsString()
+             ? scope.Escape(Local<String>::Cast(Utils::ToLocal(obj)))
+             : Local<String>();
 }
 
 
 Local<String> StackFrame::GetScriptName() const {
-  i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
-  ENTER_V8(isolate);
-  EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
-  i::Handle<i::JSObject> self = Utils::OpenHandle(this);
-  i::Handle<i::Object> name = i::Object::GetProperty(
-      isolate, self, "scriptName").ToHandleChecked();
-  if (!name->IsString()) {
-    return Local<String>();
-  }
-  return scope.Escape(Local<String>::Cast(Utils::ToLocal(name)));
+  return getStringProperty(this, "scriptName");
 }
 
 
 Local<String> StackFrame::GetScriptNameOrSourceURL() const {
-  i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
-  ENTER_V8(isolate);
-  EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
-  i::Handle<i::JSObject> self = Utils::OpenHandle(this);
-  i::Handle<i::Object> name = i::Object::GetProperty(
-      isolate, self, "scriptNameOrSourceURL").ToHandleChecked();
-  if (!name->IsString()) {
-    return Local<String>();
-  }
-  return scope.Escape(Local<String>::Cast(Utils::ToLocal(name)));
+  return getStringProperty(this, "scriptNameOrSourceURL");
 }
 
 
 Local<String> StackFrame::GetFunctionName() const {
-  i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
-  ENTER_V8(isolate);
-  EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
-  i::Handle<i::JSObject> self = Utils::OpenHandle(this);
-  i::Handle<i::Object> name = i::Object::GetProperty(
-      isolate, self, "functionName").ToHandleChecked();
-  if (!name->IsString()) {
-    return Local<String>();
-  }
-  return scope.Escape(Local<String>::Cast(Utils::ToLocal(name)));
+  return getStringProperty(this, "functionName");
 }
 
 
-bool StackFrame::IsEval() const {
-  i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+static bool getBoolProperty(const StackFrame* f, const char* propertyName) {
+  i::Isolate* isolate = Utils::OpenHandle(f)->GetIsolate();
   ENTER_V8(isolate);
   i::HandleScope scope(isolate);
-  i::Handle<i::JSObject> self = Utils::OpenHandle(this);
-  i::Handle<i::Object> is_eval = i::Object::GetProperty(
-      isolate, self, "isEval").ToHandleChecked();
-  return is_eval->IsTrue();
+  i::Handle<i::JSObject> self = Utils::OpenHandle(f);
+  i::Handle<i::Object> obj =
+      i::Object::GetProperty(isolate, self, propertyName).ToHandleChecked();
+  return obj->IsTrue();
 }
 
+bool StackFrame::IsEval() const { return getBoolProperty(this, "isEval"); }
+
 
 bool StackFrame::IsConstructor() const {
-  i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
-  ENTER_V8(isolate);
-  i::HandleScope scope(isolate);
-  i::Handle<i::JSObject> self = Utils::OpenHandle(this);
-  i::Handle<i::Object> is_constructor = i::Object::GetProperty(
-      isolate, self, "isConstructor").ToHandleChecked();
-  return is_constructor->IsTrue();
+  return getBoolProperty(this, "isConstructor");
 }
 
 
@@ -2260,7 +2365,6 @@
 Local<Value> JSON::Parse(Local<String> json_string) {
   i::Handle<i::String> string = Utils::OpenHandle(*json_string);
   i::Isolate* isolate = string->GetIsolate();
-  EnsureInitializedForIsolate(isolate, "v8::JSON::Parse");
   ENTER_V8(isolate);
   i::HandleScope scope(isolate);
   i::Handle<i::String> source = i::String::Flatten(string);
@@ -2280,14 +2384,14 @@
 
 bool Value::FullIsUndefined() const {
   bool result = Utils::OpenHandle(this)->IsUndefined();
-  ASSERT_EQ(result, QuickIsUndefined());
+  DCHECK_EQ(result, QuickIsUndefined());
   return result;
 }
 
 
 bool Value::FullIsNull() const {
   bool result = Utils::OpenHandle(this)->IsNull();
-  ASSERT_EQ(result, QuickIsNull());
+  DCHECK_EQ(result, QuickIsNull());
   return result;
 }
 
@@ -2307,9 +2411,14 @@
 }
 
 
+bool Value::IsName() const {
+  return Utils::OpenHandle(this)->IsName();
+}
+
+
 bool Value::FullIsString() const {
   bool result = Utils::OpenHandle(this)->IsString();
-  ASSERT_EQ(result, QuickIsString());
+  DCHECK_EQ(result, QuickIsString());
   return result;
 }
 
@@ -2366,6 +2475,28 @@
 }
 
 
+#define VALUE_IS_SPECIFIC_TYPE(Type, Class)                            \
+  bool Value::Is##Type() const {                                       \
+    i::Handle<i::Object> obj = Utils::OpenHandle(this);                \
+    if (!obj->IsHeapObject()) return false;                            \
+    i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate();     \
+    return obj->HasSpecificClassOf(isolate->heap()->Class##_string()); \
+  }
+
+VALUE_IS_SPECIFIC_TYPE(ArgumentsObject, Arguments)
+VALUE_IS_SPECIFIC_TYPE(BooleanObject, Boolean)
+VALUE_IS_SPECIFIC_TYPE(NumberObject, Number)
+VALUE_IS_SPECIFIC_TYPE(StringObject, String)
+VALUE_IS_SPECIFIC_TYPE(SymbolObject, Symbol)
+VALUE_IS_SPECIFIC_TYPE(Date, Date)
+VALUE_IS_SPECIFIC_TYPE(Map, Map)
+VALUE_IS_SPECIFIC_TYPE(Set, Set)
+VALUE_IS_SPECIFIC_TYPE(WeakMap, WeakMap)
+VALUE_IS_SPECIFIC_TYPE(WeakSet, WeakSet)
+
+#undef VALUE_IS_SPECIFIC_TYPE
+
+
 bool Value::IsBoolean() const {
   return Utils::OpenHandle(this)->IsBoolean();
 }
@@ -2400,38 +2531,6 @@
 }
 
 
-bool Value::IsDate() const {
-  i::Handle<i::Object> obj = Utils::OpenHandle(this);
-  if (!obj->IsHeapObject()) return false;
-  i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate();
-  return obj->HasSpecificClassOf(isolate->heap()->Date_string());
-}
-
-
-bool Value::IsStringObject() const {
-  i::Handle<i::Object> obj = Utils::OpenHandle(this);
-  if (!obj->IsHeapObject()) return false;
-  i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate();
-  return obj->HasSpecificClassOf(isolate->heap()->String_string());
-}
-
-
-bool Value::IsSymbolObject() const {
-  i::Handle<i::Object> obj = Utils::OpenHandle(this);
-  if (!obj->IsHeapObject()) return false;
-  i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate();
-  return obj->HasSpecificClassOf(isolate->heap()->Symbol_string());
-}
-
-
-bool Value::IsNumberObject() const {
-  i::Handle<i::Object> obj = Utils::OpenHandle(this);
-  if (!obj->IsHeapObject()) return false;
-  i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate();
-  return obj->HasSpecificClassOf(isolate->heap()->Number_string());
-}
-
-
 static bool CheckConstructor(i::Isolate* isolate,
                              i::Handle<i::JSObject> obj,
                              const char* class_name) {
@@ -2463,14 +2562,6 @@
 }
 
 
-bool Value::IsBooleanObject() const {
-  i::Handle<i::Object> obj = Utils::OpenHandle(this);
-  if (!obj->IsHeapObject()) return false;
-  i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate();
-  return obj->HasSpecificClassOf(isolate->heap()->Boolean_string());
-}
-
-
 bool Value::IsRegExp() const {
   i::Handle<i::Object> obj = Utils::OpenHandle(this);
   return obj->IsJSRegExp();
@@ -2615,6 +2706,14 @@
 }
 
 
+void v8::Name::CheckCast(v8::Value* that) {
+  i::Handle<i::Object> obj = Utils::OpenHandle(that);
+  Utils::ApiCheck(obj->IsName(),
+                  "v8::Name::Cast()",
+                  "Could not convert to name");
+}
+
+
 void v8::String::CheckCast(v8::Value* that) {
   i::Handle<i::Object> obj = Utils::OpenHandle(that);
   Utils::ApiCheck(obj->IsString(),
@@ -2797,7 +2896,7 @@
     EXCEPTION_PREAMBLE(isolate);
     has_pending_exception = !i::Execution::ToNumber(
         isolate, obj).ToHandle(&num);
-    EXCEPTION_BAILOUT_CHECK(isolate, i::OS::nan_value());
+    EXCEPTION_BAILOUT_CHECK(isolate, base::OS::nan_value());
   }
   return num->Number();
 }
@@ -2931,7 +3030,7 @@
   EXCEPTION_PREAMBLE(isolate);
   i::Handle<i::Object> result;
   has_pending_exception = !CallV8HeapFunction(
-      "EQUALS", obj, ARRAY_SIZE(args), args).ToHandle(&result);
+      "EQUALS", obj, arraysize(args), args).ToHandle(&result);
   EXCEPTION_BAILOUT_CHECK(isolate, false);
   return *result == i::Smi::FromInt(i::EQUAL);
 }
@@ -3004,8 +3103,7 @@
 }
 
 
-bool v8::Object::Set(v8::Handle<Value> key, v8::Handle<Value> value,
-                     v8::PropertyAttribute attribs) {
+bool v8::Object::Set(v8::Handle<Value> key, v8::Handle<Value> value) {
   i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
   ON_BAILOUT(isolate, "v8::Object::Set()", return false);
   ENTER_V8(isolate);
@@ -3014,13 +3112,9 @@
   i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
   i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
   EXCEPTION_PREAMBLE(isolate);
-  has_pending_exception = i::Runtime::SetObjectProperty(
-      isolate,
-      self,
-      key_obj,
-      value_obj,
-      static_cast<PropertyAttributes>(attribs),
-      i::SLOPPY).is_null();
+  has_pending_exception =
+      i::Runtime::SetObjectProperty(isolate, self, key_obj, value_obj,
+                                    i::SLOPPY).is_null();
   EXCEPTION_BAILOUT_CHECK(isolate, false);
   return true;
 }
@@ -3052,7 +3146,7 @@
   i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
   i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
   EXCEPTION_PREAMBLE(isolate);
-  has_pending_exception = i::Runtime::ForceSetObjectProperty(
+  has_pending_exception = i::Runtime::DefineObjectProperty(
       self,
       key_obj,
       value_obj,
@@ -3142,10 +3236,33 @@
     EXCEPTION_BAILOUT_CHECK(isolate, static_cast<PropertyAttribute>(NONE));
   }
   i::Handle<i::Name> key_name = i::Handle<i::Name>::cast(key_obj);
-  PropertyAttributes result =
+  EXCEPTION_PREAMBLE(isolate);
+  Maybe<PropertyAttributes> result =
       i::JSReceiver::GetPropertyAttributes(self, key_name);
-  if (result == ABSENT) return static_cast<PropertyAttribute>(NONE);
-  return static_cast<PropertyAttribute>(result);
+  has_pending_exception = !result.has_value;
+  EXCEPTION_BAILOUT_CHECK(isolate, static_cast<PropertyAttribute>(NONE));
+  if (result.value == ABSENT) return static_cast<PropertyAttribute>(NONE);
+  return static_cast<PropertyAttribute>(result.value);
+}
+
+
+Local<Value> v8::Object::GetOwnPropertyDescriptor(Local<String> key) {
+  i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+  ON_BAILOUT(isolate, "v8::Object::GetOwnPropertyDescriptor()",
+             return Local<Value>());
+  ENTER_V8(isolate);
+  i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
+  i::Handle<i::Name> key_name = Utils::OpenHandle(*key);
+  i::Handle<i::Object> args[] = { obj, key_name };
+  EXCEPTION_PREAMBLE(isolate);
+  i::Handle<i::Object> result;
+  has_pending_exception = !CallV8HeapFunction(
+      "ObjectGetOwnPropertyDescriptor",
+      isolate->factory()->undefined_value(),
+      arraysize(args),
+      args).ToHandle(&result);
+  EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
+  return Utils::ToLocal(result);
 }
 
 
@@ -3154,8 +3271,8 @@
   ON_BAILOUT(isolate, "v8::Object::GetPrototype()", return Local<v8::Value>());
   ENTER_V8(isolate);
   i::Handle<i::Object> self = Utils::OpenHandle(this);
-  i::Handle<i::Object> result(self->GetPrototype(isolate), isolate);
-  return Utils::ToLocal(result);
+  i::PrototypeIterator iter(isolate, self);
+  return Utils::ToLocal(i::PrototypeIterator::GetCurrent(iter));
 }
 
 
@@ -3169,8 +3286,8 @@
   // to propagate outside.
   TryCatch try_catch;
   EXCEPTION_PREAMBLE(isolate);
-  i::MaybeHandle<i::Object> result = i::JSObject::SetPrototype(
-      self, value_obj);
+  i::MaybeHandle<i::Object> result =
+      i::JSObject::SetPrototype(self, value_obj, false);
   has_pending_exception = result.is_null();
   EXCEPTION_BAILOUT_CHECK(isolate, false);
   return true;
@@ -3184,14 +3301,17 @@
              "v8::Object::FindInstanceInPrototypeChain()",
              return Local<v8::Object>());
   ENTER_V8(isolate);
-  i::JSObject* object = *Utils::OpenHandle(this);
+  i::PrototypeIterator iter(isolate, *Utils::OpenHandle(this),
+                            i::PrototypeIterator::START_AT_RECEIVER);
   i::FunctionTemplateInfo* tmpl_info = *Utils::OpenHandle(*tmpl);
-  while (!tmpl_info->IsTemplateFor(object)) {
-    i::Object* prototype = object->GetPrototype();
-    if (!prototype->IsJSObject()) return Local<Object>();
-    object = i::JSObject::cast(prototype);
+  while (!tmpl_info->IsTemplateFor(iter.GetCurrent())) {
+    iter.Advance();
+    if (iter.IsAtEnd()) {
+      return Local<Object>();
+    }
   }
-  return Utils::ToLocal(i::Handle<i::JSObject>(object));
+  return Utils::ToLocal(
+      i::handle(i::JSObject::cast(iter.GetCurrent()), isolate));
 }
 
 
@@ -3258,7 +3378,8 @@
     return v8::String::NewFromUtf8(isolate, "[object ]");
   } else {
     i::Handle<i::String> class_name = i::Handle<i::String>::cast(name);
-    if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Arguments"))) {
+    if (i::String::Equals(class_name,
+                          i_isolate->factory()->Arguments_string())) {
       return v8::String::NewFromUtf8(isolate, "[object Object]");
     } else {
       const char* prefix = "[object ";
@@ -3293,17 +3414,6 @@
 }
 
 
-Local<Value> v8::Object::GetConstructor() {
-  i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
-  ON_BAILOUT(isolate, "v8::Object::GetConstructor()",
-             return Local<v8::Function>());
-  ENTER_V8(isolate);
-  i::Handle<i::JSObject> self = Utils::OpenHandle(this);
-  i::Handle<i::Object> constructor(self->GetConstructor(), isolate);
-  return Utils::ToLocal(constructor);
-}
-
-
 Local<String> v8::Object::GetConstructorName() {
   i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
   ON_BAILOUT(isolate, "v8::Object::GetConstructorName()",
@@ -3379,15 +3489,19 @@
   i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
   ON_BAILOUT(isolate, "v8::Object::HasProperty()", return false);
   i::Handle<i::JSObject> self = Utils::OpenHandle(this);
-  return i::JSReceiver::HasElement(self, index);
+  EXCEPTION_PREAMBLE(isolate);
+  Maybe<bool> maybe = i::JSReceiver::HasElement(self, index);
+  has_pending_exception = !maybe.has_value;
+  EXCEPTION_BAILOUT_CHECK(isolate, false);
+  return maybe.value;
 }
 
 
-template<typename Setter, typename Getter, typename Data>
+template<typename Getter, typename Setter, typename Data>
 static inline bool ObjectSetAccessor(Object* obj,
-                                     Handle<String> name,
-                                     Setter getter,
-                                     Getter setter,
+                                     Handle<Name> name,
+                                     Getter getter,
+                                     Setter setter,
                                      Data data,
                                      AccessControl settings,
                                      PropertyAttribute attributes) {
@@ -3406,7 +3520,7 @@
       i::JSObject::SetAccessor(Utils::OpenHandle(obj), info),
       false);
   if (result->IsUndefined()) return false;
-  if (fast) i::JSObject::TransformToFastProperties(Utils::OpenHandle(obj), 0);
+  if (fast) i::JSObject::MigrateSlowToFast(Utils::OpenHandle(obj), 0);
   return true;
 }
 
@@ -3422,7 +3536,18 @@
 }
 
 
-bool Object::SetDeclaredAccessor(Local<String> name,
+bool Object::SetAccessor(Handle<Name> name,
+                         AccessorNameGetterCallback getter,
+                         AccessorNameSetterCallback setter,
+                         v8::Handle<Value> data,
+                         AccessControl settings,
+                         PropertyAttribute attributes) {
+  return ObjectSetAccessor(
+      this, name, getter, setter, data, settings, attributes);
+}
+
+
+bool Object::SetDeclaredAccessor(Local<Name> name,
                                  Local<DeclaredAccessorDescriptor> descriptor,
                                  PropertyAttribute attributes,
                                  AccessControl settings) {
@@ -3432,11 +3557,13 @@
 }
 
 
-void Object::SetAccessorProperty(Local<String> name,
+void Object::SetAccessorProperty(Local<Name> name,
                                  Local<Function> getter,
                                  Handle<Function> setter,
                                  PropertyAttribute attribute,
                                  AccessControl settings) {
+  // TODO(verwaest): Remove |settings|.
+  DCHECK_EQ(v8::DEFAULT, settings);
   i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
   ON_BAILOUT(isolate, "v8::Object::SetAccessorProperty()", return);
   ENTER_V8(isolate);
@@ -3448,8 +3575,7 @@
                               v8::Utils::OpenHandle(*name),
                               getter_i,
                               setter_i,
-                              static_cast<PropertyAttributes>(attribute),
-                              settings);
+                              static_cast<PropertyAttributes>(attribute));
 }
 
 
@@ -3457,8 +3583,12 @@
   i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
   ON_BAILOUT(isolate, "v8::Object::HasOwnProperty()",
              return false);
-  return i::JSReceiver::HasOwnProperty(
-      Utils::OpenHandle(this), Utils::OpenHandle(*key));
+  EXCEPTION_PREAMBLE(isolate);
+  Maybe<bool> maybe = i::JSReceiver::HasOwnProperty(Utils::OpenHandle(this),
+                                                    Utils::OpenHandle(*key));
+  has_pending_exception = !maybe.has_value;
+  EXCEPTION_BAILOUT_CHECK(isolate, false);
+  return maybe.value;
 }
 
 
@@ -3466,8 +3596,12 @@
   i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
   ON_BAILOUT(isolate, "v8::Object::HasRealNamedProperty()",
              return false);
-  return i::JSObject::HasRealNamedProperty(Utils::OpenHandle(this),
-                                           Utils::OpenHandle(*key));
+  EXCEPTION_PREAMBLE(isolate);
+  Maybe<bool> maybe = i::JSObject::HasRealNamedProperty(
+      Utils::OpenHandle(this), Utils::OpenHandle(*key));
+  has_pending_exception = !maybe.has_value;
+  EXCEPTION_BAILOUT_CHECK(isolate, false);
+  return maybe.value;
 }
 
 
@@ -3475,7 +3609,12 @@
   i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
   ON_BAILOUT(isolate, "v8::Object::HasRealIndexedProperty()",
              return false);
-  return i::JSObject::HasRealElementProperty(Utils::OpenHandle(this), index);
+  EXCEPTION_PREAMBLE(isolate);
+  Maybe<bool> maybe =
+      i::JSObject::HasRealElementProperty(Utils::OpenHandle(this), index);
+  has_pending_exception = !maybe.has_value;
+  EXCEPTION_BAILOUT_CHECK(isolate, false);
+  return maybe.value;
 }
 
 
@@ -3485,8 +3624,12 @@
              "v8::Object::HasRealNamedCallbackProperty()",
              return false);
   ENTER_V8(isolate);
-  return i::JSObject::HasRealNamedCallbackProperty(Utils::OpenHandle(this),
-                                                   Utils::OpenHandle(*key));
+  EXCEPTION_PREAMBLE(isolate);
+  Maybe<bool> maybe = i::JSObject::HasRealNamedCallbackProperty(
+      Utils::OpenHandle(this), Utils::OpenHandle(*key));
+  has_pending_exception = !maybe.has_value;
+  EXCEPTION_BAILOUT_CHECK(isolate, false);
+  return maybe.value;
 }
 
 
@@ -3506,26 +3649,15 @@
 }
 
 
-static Local<Value> GetPropertyByLookup(i::Isolate* isolate,
-                                        i::Handle<i::JSObject> receiver,
-                                        i::Handle<i::String> name,
-                                        i::LookupResult* lookup) {
-  if (!lookup->IsProperty()) {
-    // No real property was found.
-    return Local<Value>();
-  }
-
-  // If the property being looked up is a callback, it can throw
-  // an exception.
-  EXCEPTION_PREAMBLE(isolate);
-  i::LookupIterator it(
-      receiver, name, i::Handle<i::JSReceiver>(lookup->holder(), isolate),
-      i::LookupIterator::SKIP_INTERCEPTOR);
+static Local<Value> GetPropertyByLookup(i::LookupIterator* it) {
+  // If the property being looked up is a callback, it can throw an exception.
+  EXCEPTION_PREAMBLE(it->isolate());
   i::Handle<i::Object> result;
-  has_pending_exception = !i::Object::GetProperty(&it).ToHandle(&result);
-  EXCEPTION_BAILOUT_CHECK(isolate, Local<Value>());
+  has_pending_exception = !i::Object::GetProperty(it).ToHandle(&result);
+  EXCEPTION_BAILOUT_CHECK(it->isolate(), Local<Value>());
 
-  return Utils::ToLocal(result);
+  if (it->IsFound()) return Utils::ToLocal(result);
+  return Local<Value>();
 }
 
 
@@ -3538,9 +3670,12 @@
   ENTER_V8(isolate);
   i::Handle<i::JSObject> self_obj = Utils::OpenHandle(this);
   i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
-  i::LookupResult lookup(isolate);
-  self_obj->LookupRealNamedPropertyInPrototypes(key_obj, &lookup);
-  return GetPropertyByLookup(isolate, self_obj, key_obj, &lookup);
+  i::PrototypeIterator iter(isolate, self_obj);
+  if (iter.IsAtEnd()) return Local<Value>();
+  i::Handle<i::Object> proto = i::PrototypeIterator::GetCurrent(iter);
+  i::LookupIterator it(self_obj, key_obj, i::Handle<i::JSReceiver>::cast(proto),
+                       i::LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
+  return GetPropertyByLookup(&it);
 }
 
 
@@ -3551,9 +3686,9 @@
   ENTER_V8(isolate);
   i::Handle<i::JSObject> self_obj = Utils::OpenHandle(this);
   i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
-  i::LookupResult lookup(isolate);
-  self_obj->LookupRealNamedProperty(key_obj, &lookup);
-  return GetPropertyByLookup(isolate, self_obj, key_obj, &lookup);
+  i::LookupIterator it(self_obj, key_obj,
+                       i::LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
+  return GetPropertyByLookup(&it);
 }
 
 
@@ -3573,7 +3708,7 @@
 
   i::Handle<i::Map> new_map = i::Map::Copy(i::Handle<i::Map>(obj->map()));
   new_map->set_is_access_check_needed(true);
-  obj->set_map(*new_map);
+  i::JSObject::MigrateToMap(obj, new_map);
 }
 
 
@@ -3846,8 +3981,7 @@
              return Local<v8::Value>());
   LOG_API(isolate, "Object::CallAsFunction");
   ENTER_V8(isolate);
-  i::Logger::TimerEventScope timer_scope(
-      isolate, i::Logger::TimerEventScope::v8_execute);
+  i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
   i::HandleScope scope(isolate);
   i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
   i::Handle<i::Object> recv_obj = Utils::OpenHandle(*recv);
@@ -3881,8 +4015,7 @@
              return Local<v8::Object>());
   LOG_API(isolate, "Object::CallAsConstructor");
   ENTER_V8(isolate);
-  i::Logger::TimerEventScope timer_scope(
-      isolate, i::Logger::TimerEventScope::v8_execute);
+  i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
   i::HandleScope scope(isolate);
   i::Handle<i::JSObject> obj = Utils::OpenHandle(this);
   STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
@@ -3909,7 +4042,7 @@
     has_pending_exception = !i::Execution::Call(
         isolate, fun, obj, argc, args).ToHandle(&returned);
     EXCEPTION_BAILOUT_CHECK_DO_CALLBACK(isolate, Local<v8::Object>());
-    ASSERT(!delegate->IsUndefined());
+    DCHECK(!delegate->IsUndefined());
     return Utils::ToLocal(scope.CloseAndEscape(returned));
   }
   return Local<v8::Object>();
@@ -3941,8 +4074,7 @@
              return Local<v8::Object>());
   LOG_API(isolate, "Function::NewInstance");
   ENTER_V8(isolate);
-  i::Logger::TimerEventScope timer_scope(
-      isolate, i::Logger::TimerEventScope::v8_execute);
+  i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
   EscapableHandleScope scope(reinterpret_cast<Isolate*>(isolate));
   i::Handle<i::JSFunction> function = Utils::OpenHandle(this);
   STATIC_ASSERT(sizeof(v8::Handle<v8::Value>) == sizeof(i::Object**));
@@ -3962,8 +4094,7 @@
   ON_BAILOUT(isolate, "v8::Function::Call()", return Local<v8::Value>());
   LOG_API(isolate, "Function::Call");
   ENTER_V8(isolate);
-  i::Logger::TimerEventScope timer_scope(
-      isolate, i::Logger::TimerEventScope::v8_execute);
+  i::TimerEventScope<i::TimerEventExecute> timer_scope(isolate);
   i::HandleScope scope(isolate);
   i::Handle<i::JSFunction> fun = Utils::OpenHandle(this);
   i::Handle<i::Object> recv_obj = Utils::OpenHandle(*recv);
@@ -4010,16 +4141,15 @@
   i::Handle<i::JSFunction> func = Utils::OpenHandle(this);
   i::Handle<i::String> property_name =
       isolate->factory()->InternalizeOneByteString(
-          STATIC_ASCII_VECTOR("displayName"));
-  i::LookupResult lookup(isolate);
-  func->LookupRealNamedProperty(property_name, &lookup);
-  if (lookup.IsFound()) {
-    i::Object* value = lookup.GetLazyValue();
-    if (value && value->IsString()) {
-      i::String* name = i::String::cast(value);
-      if (name->length() > 0) return Utils::ToLocal(i::Handle<i::String>(name));
-    }
+          STATIC_CHAR_VECTOR("displayName"));
+
+  i::Handle<i::Object> value =
+      i::JSObject::GetDataProperty(func, property_name);
+  if (value->IsString()) {
+    i::Handle<i::String> name = i::Handle<i::String>::cast(value);
+    if (name->length() > 0) return Utils::ToLocal(name);
   }
+
   return ToApiHandle<Primitive>(isolate->factory()->undefined_value());
 }
 
@@ -4245,13 +4375,11 @@
 
   class Visitor {
    public:
-    inline explicit Visitor()
-        : utf8_length_(0),
-          state_(kInitialState) {}
+    Visitor() : utf8_length_(0), state_(kInitialState) {}
 
     void VisitOneByteString(const uint8_t* chars, int length) {
       int utf8_length = 0;
-      // Add in length 1 for each non-ASCII character.
+      // Add in length 1 for each non-Latin1 character.
       for (int i = 0; i < length; i++) {
         utf8_length += *chars++ >> 7;
       }
@@ -4300,7 +4428,7 @@
                                    uint8_t leaf_state) {
     bool edge_surrogate = StartsWithSurrogate(leaf_state);
     if (!(*state & kLeftmostEdgeIsCalculated)) {
-      ASSERT(!(*state & kLeftmostEdgeIsSurrogate));
+      DCHECK(!(*state & kLeftmostEdgeIsSurrogate));
       *state |= kLeftmostEdgeIsCalculated
           | (edge_surrogate ? kLeftmostEdgeIsSurrogate : 0);
     } else if (EndsWithSurrogate(*state) && edge_surrogate) {
@@ -4318,7 +4446,7 @@
                                     uint8_t leaf_state) {
     bool edge_surrogate = EndsWithSurrogate(leaf_state);
     if (!(*state & kRightmostEdgeIsCalculated)) {
-      ASSERT(!(*state & kRightmostEdgeIsSurrogate));
+      DCHECK(!(*state & kRightmostEdgeIsSurrogate));
       *state |= (kRightmostEdgeIsCalculated
                  | (edge_surrogate ? kRightmostEdgeIsSurrogate : 0));
     } else if (edge_surrogate && StartsWithSurrogate(*state)) {
@@ -4334,7 +4462,7 @@
   static inline void MergeTerminal(int* length,
                                    uint8_t state,
                                    uint8_t* state_out) {
-    ASSERT((state & kLeftmostEdgeIsCalculated) &&
+    DCHECK((state & kLeftmostEdgeIsCalculated) &&
            (state & kRightmostEdgeIsCalculated));
     if (EndsWithSurrogate(state) && StartsWithSurrogate(state)) {
       *length -= unibrow::Utf8::kBytesSavedByCombiningSurrogates;
@@ -4446,7 +4574,7 @@
                                char* const buffer,
                                bool replace_invalid_utf8) {
     using namespace unibrow;
-    ASSERT(remaining > 0);
+    DCHECK(remaining > 0);
     // We can't use a local buffer here because Encode needs to modify
     // previous characters in the stream.  We know, however, that
     // exactly one character will be advanced.
@@ -4455,7 +4583,7 @@
                                  character,
                                  last_character,
                                  replace_invalid_utf8);
-      ASSERT(written == 1);
+      DCHECK(written == 1);
       return written;
     }
     // Use a scratch buffer to check the required characters.
@@ -4487,7 +4615,7 @@
   template<typename Char>
   void Visit(const Char* chars, const int length) {
     using namespace unibrow;
-    ASSERT(!early_termination_);
+    DCHECK(!early_termination_);
     if (length == 0) return;
     // Copy state to stack.
     char* buffer = buffer_;
@@ -4516,7 +4644,7 @@
         for (; i < fast_length; i++) {
           buffer +=
               Utf8::EncodeOneByte(buffer, static_cast<uint8_t>(*chars++));
-          ASSERT(capacity_ == -1 || (buffer - start_) <= capacity_);
+          DCHECK(capacity_ == -1 || (buffer - start_) <= capacity_);
         }
       } else {
         for (; i < fast_length; i++) {
@@ -4526,7 +4654,7 @@
                                  last_character,
                                  replace_invalid_utf8_);
           last_character = character;
-          ASSERT(capacity_ == -1 || (buffer - start_) <= capacity_);
+          DCHECK(capacity_ == -1 || (buffer - start_) <= capacity_);
         }
       }
       // Array is fully written. Exit.
@@ -4538,10 +4666,10 @@
         return;
       }
     }
-    ASSERT(!skip_capacity_check_);
+    DCHECK(!skip_capacity_check_);
     // Slow loop. Must check capacity on each iteration.
     int remaining_capacity = capacity_ - static_cast<int>(buffer - start_);
-    ASSERT(remaining_capacity >= 0);
+    DCHECK(remaining_capacity >= 0);
     for (; i < length && remaining_capacity > 0; i++) {
       uint16_t character = *chars++;
       // remaining_capacity is <= 3 bytes at this point, so we do not write out
@@ -4653,7 +4781,7 @@
     // First check that the buffer is large enough.
     int utf8_bytes = v8::Utf8Length(*str, str->GetIsolate());
     if (utf8_bytes <= capacity) {
-      // ASCII fast path.
+      // one-byte fast path.
       if (utf8_bytes == string_length) {
         WriteOneByte(reinterpret_cast<uint8_t*>(buffer), 0, capacity, options);
         if (nchars_ref != NULL) *nchars_ref = string_length;
@@ -4688,7 +4816,7 @@
   i::Isolate* isolate = Utils::OpenHandle(string)->GetIsolate();
   LOG_API(isolate, "String::Write");
   ENTER_V8(isolate);
-  ASSERT(start >= 0 && length >= -1);
+  DCHECK(start >= 0 && length >= -1);
   i::Handle<i::String> str = Utils::OpenHandle(string);
   isolate->string_tracker()->RecordWrite(str);
   if (options & String::HINT_MANY_WRITES_EXPECTED) {
@@ -4727,14 +4855,13 @@
 
 bool v8::String::IsExternal() const {
   i::Handle<i::String> str = Utils::OpenHandle(this);
-  EnsureInitializedForIsolate(str->GetIsolate(), "v8::String::IsExternal()");
   return i::StringShape(*str).IsExternalTwoByte();
 }
 
 
-bool v8::String::IsExternalAscii() const {
+bool v8::String::IsExternalOneByte() const {
   i::Handle<i::String> str = Utils::OpenHandle(this);
-  return i::StringShape(*str).IsExternalAscii();
+  return i::StringShape(*str).IsExternalOneByte();
 }
 
 
@@ -4757,11 +4884,11 @@
   i::Handle<i::String> str = Utils::OpenHandle(this);
   const v8::String::ExternalStringResourceBase* expected;
   Encoding expectedEncoding;
-  if (i::StringShape(*str).IsExternalAscii()) {
+  if (i::StringShape(*str).IsExternalOneByte()) {
     const void* resource =
-        i::Handle<i::ExternalAsciiString>::cast(str)->resource();
+        i::Handle<i::ExternalOneByteString>::cast(str)->resource();
     expected = reinterpret_cast<const ExternalStringResourceBase*>(resource);
-    expectedEncoding = ASCII_ENCODING;
+    expectedEncoding = ONE_BYTE_ENCODING;
   } else if (i::StringShape(*str).IsExternalTwoByte()) {
     const void* resource =
         i::Handle<i::ExternalTwoByteString>::cast(str)->resource();
@@ -4769,20 +4896,20 @@
     expectedEncoding = TWO_BYTE_ENCODING;
   } else {
     expected = NULL;
-    expectedEncoding = str->IsOneByteRepresentation() ? ASCII_ENCODING
-        : TWO_BYTE_ENCODING;
+    expectedEncoding =
+        str->IsOneByteRepresentation() ? ONE_BYTE_ENCODING : TWO_BYTE_ENCODING;
   }
   CHECK_EQ(expected, value);
   CHECK_EQ(expectedEncoding, encoding);
 }
 
-const v8::String::ExternalAsciiStringResource*
-v8::String::GetExternalAsciiStringResource() const {
+const v8::String::ExternalOneByteStringResource*
+v8::String::GetExternalOneByteStringResource() const {
   i::Handle<i::String> str = Utils::OpenHandle(this);
-  if (i::StringShape(*str).IsExternalAscii()) {
+  if (i::StringShape(*str).IsExternalOneByte()) {
     const void* resource =
-        i::Handle<i::ExternalAsciiString>::cast(str)->resource();
-    return reinterpret_cast<const ExternalAsciiStringResource*>(resource);
+        i::Handle<i::ExternalOneByteString>::cast(str)->resource();
+    return reinterpret_cast<const ExternalOneByteStringResource*>(resource);
   } else {
     return NULL;
   }
@@ -4873,7 +5000,7 @@
   if (!InternalFieldOK(obj, index, location)) return;
   i::Handle<i::Object> val = Utils::OpenHandle(*value);
   obj->SetInternalField(index, *val);
-  ASSERT_EQ(value, GetInternalField(index));
+  DCHECK_EQ(value, GetInternalField(index));
 }
 
 
@@ -4890,7 +5017,7 @@
   const char* location = "v8::Object::SetAlignedPointerInInternalField()";
   if (!InternalFieldOK(obj, index, location)) return;
   obj->SetInternalField(index, EncodeAlignedAsSmi(value, location));
-  ASSERT_EQ(value, GetAlignedPointerFromInternalField(index));
+  DCHECK_EQ(value, GetAlignedPointerFromInternalField(index));
 }
 
 
@@ -4906,34 +5033,23 @@
 
 
 void v8::V8::InitializePlatform(Platform* platform) {
-#ifdef V8_USE_DEFAULT_PLATFORM
-  FATAL("Can't override v8::Platform when using default implementation");
-#else
   i::V8::InitializePlatform(platform);
-#endif
 }
 
 
 void v8::V8::ShutdownPlatform() {
-#ifdef V8_USE_DEFAULT_PLATFORM
-  FATAL("Can't override v8::Platform when using default implementation");
-#else
   i::V8::ShutdownPlatform();
-#endif
 }
 
 
 bool v8::V8::Initialize() {
-  i::Isolate* isolate = i::Isolate::UncheckedCurrent();
-  if (isolate != NULL && isolate->IsInitialized()) {
-    return true;
-  }
-  return InitializeHelper(isolate);
+  i::V8::Initialize();
+  return true;
 }
 
 
 void v8::V8::SetEntropySource(EntropySource entropy_source) {
-  i::RandomNumberGenerator::SetEntropySource(entropy_source);
+  base::RandomNumberGenerator::SetEntropySource(entropy_source);
 }
 
 
@@ -4942,38 +5058,6 @@
   i::V8::SetReturnAddressLocationResolver(return_address_resolver);
 }
 
-
-bool v8::V8::SetFunctionEntryHook(Isolate* ext_isolate,
-                                  FunctionEntryHook entry_hook) {
-  ASSERT(ext_isolate != NULL);
-  ASSERT(entry_hook != NULL);
-
-  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(ext_isolate);
-
-  // The entry hook can only be set before the Isolate is initialized, as
-  // otherwise the Isolate's code stubs generated at initialization won't
-  // contain entry hooks.
-  if (isolate->IsInitialized())
-    return false;
-
-  // Setting an entry hook is a one-way operation, once set, it cannot be
-  // changed or unset.
-  if (isolate->function_entry_hook() != NULL)
-    return false;
-
-  isolate->set_function_entry_hook(entry_hook);
-  return true;
-}
-
-
-void v8::V8::SetJitCodeEventHandler(
-    JitCodeEventOptions options, JitCodeEventHandler event_handler) {
-  i::Isolate* isolate = i::Isolate::Current();
-  // Ensure that logging is initialized for our isolate.
-  isolate->InitializeLoggingAndCounters();
-  isolate->logger()->SetCodeEventHandler(options, event_handler);
-}
-
 void v8::V8::SetArrayBufferAllocator(
     ArrayBuffer::Allocator* allocator) {
   if (!Utils::ApiCheck(i::V8::ArrayBufferAllocator() == NULL,
@@ -5032,7 +5116,7 @@
 void v8::V8::VisitHandlesForPartialDependence(
     Isolate* exported_isolate, PersistentHandleVisitor* visitor) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(exported_isolate);
-  ASSERT(isolate == i::Isolate::Current());
+  DCHECK(isolate == i::Isolate::Current());
   i::DisallowHeapAllocation no_allocation;
 
   VisitorAdapter visitor_adapter(visitor);
@@ -5041,30 +5125,6 @@
 }
 
 
-bool v8::V8::IdleNotification(int hint) {
-  // Returning true tells the caller that it need not
-  // continue to call IdleNotification.
-  i::Isolate* isolate = i::Isolate::Current();
-  if (isolate == NULL || !isolate->IsInitialized()) return true;
-  if (!i::FLAG_use_idle_notification) return true;
-  return isolate->heap()->IdleNotification(hint);
-}
-
-
-void v8::V8::LowMemoryNotification() {
-  i::Isolate* isolate = i::Isolate::Current();
-  if (isolate == NULL || !isolate->IsInitialized()) return;
-  isolate->heap()->CollectAllAvailableGarbage("low memory notification");
-}
-
-
-int v8::V8::ContextDisposedNotification() {
-  i::Isolate* isolate = i::Isolate::Current();
-  if (!isolate->IsInitialized()) return 0;
-  return isolate->heap()->NotifyContextDisposed();
-}
-
-
 bool v8::V8::InitializeICU(const char* icu_data_file) {
   return i::InitializeICU(icu_data_file);
 }
@@ -5079,7 +5139,7 @@
     i::Isolate* isolate,
     v8::ExtensionConfiguration* extensions,
     v8::Handle<ObjectTemplate> global_template,
-    v8::Handle<Value> global_object) {
+    v8::Handle<Value> maybe_global_proxy) {
   i::Handle<i::Context> env;
 
   // Enter V8 via an ENTER_V8 scope.
@@ -5117,16 +5177,19 @@
       }
     }
 
+    i::Handle<i::Object> proxy = Utils::OpenHandle(*maybe_global_proxy, true);
+    i::MaybeHandle<i::JSGlobalProxy> maybe_proxy;
+    if (!proxy.is_null()) {
+      maybe_proxy = i::Handle<i::JSGlobalProxy>::cast(proxy);
+    }
     // Create the environment.
     env = isolate->bootstrapper()->CreateEnvironment(
-        Utils::OpenHandle(*global_object, true),
-        proxy_template,
-        extensions);
+        maybe_proxy, proxy_template, extensions);
 
     // Restore the access check info on the global template.
     if (!global_template.IsEmpty()) {
-      ASSERT(!global_constructor.is_null());
-      ASSERT(!proxy_constructor.is_null());
+      DCHECK(!global_constructor.is_null());
+      DCHECK(!proxy_constructor.is_null());
       global_constructor->set_access_check_info(
           proxy_constructor->access_check_info());
       global_constructor->set_needs_access_check(
@@ -5144,7 +5207,6 @@
     v8::Handle<ObjectTemplate> global_template,
     v8::Handle<Value> global_object) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(external_isolate);
-  EnsureInitializedForIsolate(isolate, "v8::Context::New()");
   LOG_API(isolate, "Context::New");
   ON_BAILOUT(isolate, "v8::Context::New()", return Local<Context>());
   i::HandleScope scope(isolate);
@@ -5275,7 +5337,6 @@
 Local<External> v8::External::New(Isolate* isolate, void* value) {
   STATIC_ASSERT(sizeof(value) == sizeof(i::Address));
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
-  EnsureInitializedForIsolate(i_isolate, "v8::External::New()");
   LOG_API(i_isolate, "External::New");
   ENTER_V8(i_isolate);
   i::Handle<i::JSObject> external = i_isolate->factory()->NewExternal(value);
@@ -5350,7 +5411,6 @@
                                String::NewStringType type,
                                int length) {
   i::Isolate* isolate = reinterpret_cast<internal::Isolate*>(v8_isolate);
-  EnsureInitializedForIsolate(isolate, location);
   LOG_API(isolate, env);
   if (length == 0 && type != String::kUndetectableString) {
     return String::Empty(v8_isolate);
@@ -5413,7 +5473,6 @@
 Local<String> v8::String::Concat(Handle<String> left, Handle<String> right) {
   i::Handle<i::String> left_string = Utils::OpenHandle(*left);
   i::Isolate* isolate = left_string->GetIsolate();
-  EnsureInitializedForIsolate(isolate, "v8::String::New()");
   LOG_API(isolate, "String::New(char)");
   ENTER_V8(isolate);
   i::Handle<i::String> right_string = Utils::OpenHandle(*right);
@@ -5433,12 +5492,12 @@
 }
 
 
-static i::Handle<i::String> NewExternalAsciiStringHandle(
-    i::Isolate* isolate,
-    v8::String::ExternalAsciiStringResource* resource) {
+static i::Handle<i::String> NewExternalOneByteStringHandle(
+    i::Isolate* isolate, v8::String::ExternalOneByteStringResource* resource) {
   // We do not expect this to fail. Change this if it does.
-  return isolate->factory()->NewExternalStringFromAscii(
-      resource).ToHandleChecked();
+  return isolate->factory()
+      ->NewExternalStringFromOneByte(resource)
+      .ToHandleChecked();
 }
 
 
@@ -5446,7 +5505,6 @@
     Isolate* isolate,
     v8::String::ExternalStringResource* resource) {
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
-  EnsureInitializedForIsolate(i_isolate, "v8::String::NewExternal()");
   LOG_API(i_isolate, "String::NewExternal");
   ENTER_V8(i_isolate);
   CHECK(resource && resource->data());
@@ -5459,7 +5517,7 @@
 bool v8::String::MakeExternal(v8::String::ExternalStringResource* resource) {
   i::Handle<i::String> obj = Utils::OpenHandle(this);
   i::Isolate* isolate = obj->GetIsolate();
-  if (i::StringShape(*obj).IsExternalTwoByte()) {
+  if (i::StringShape(*obj).IsExternal()) {
     return false;  // Already an external string.
   }
   ENTER_V8(isolate);
@@ -5472,8 +5530,10 @@
   CHECK(resource && resource->data());
 
   bool result = obj->MakeExternal(resource);
+  // Assert that if CanMakeExternal(), then externalizing actually succeeds.
+  DCHECK(!CanMakeExternal() || result);
   if (result) {
-    ASSERT(obj->IsExternalString());
+    DCHECK(obj->IsExternalString());
     isolate->heap()->external_string_table()->AddString(*obj);
   }
   return result;
@@ -5481,25 +5541,23 @@
 
 
 Local<String> v8::String::NewExternal(
-    Isolate* isolate,
-    v8::String::ExternalAsciiStringResource* resource) {
+    Isolate* isolate, v8::String::ExternalOneByteStringResource* resource) {
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
-  EnsureInitializedForIsolate(i_isolate, "v8::String::NewExternal()");
   LOG_API(i_isolate, "String::NewExternal");
   ENTER_V8(i_isolate);
   CHECK(resource && resource->data());
   i::Handle<i::String> result =
-      NewExternalAsciiStringHandle(i_isolate, resource);
+      NewExternalOneByteStringHandle(i_isolate, resource);
   i_isolate->heap()->external_string_table()->AddString(*result);
   return Utils::ToLocal(result);
 }
 
 
 bool v8::String::MakeExternal(
-    v8::String::ExternalAsciiStringResource* resource) {
+    v8::String::ExternalOneByteStringResource* resource) {
   i::Handle<i::String> obj = Utils::OpenHandle(this);
   i::Isolate* isolate = obj->GetIsolate();
-  if (i::StringShape(*obj).IsExternalTwoByte()) {
+  if (i::StringShape(*obj).IsExternal()) {
     return false;  // Already an external string.
   }
   ENTER_V8(isolate);
@@ -5512,8 +5570,10 @@
   CHECK(resource && resource->data());
 
   bool result = obj->MakeExternal(resource);
+  // Assert that if CanMakeExternal(), then externalizing actually succeeds.
+  DCHECK(!CanMakeExternal() || result);
   if (result) {
-    ASSERT(obj->IsExternalString());
+    DCHECK(obj->IsExternalString());
     isolate->heap()->external_string_table()->AddString(*obj);
   }
   return result;
@@ -5525,11 +5585,6 @@
   i::Handle<i::String> obj = Utils::OpenHandle(this);
   i::Isolate* isolate = obj->GetIsolate();
 
-  // TODO(yangguo): Externalizing sliced/cons strings allocates.
-  // This rule can be removed when all code that can
-  // trigger an access check is handlified and therefore GC safe.
-  if (isolate->heap()->old_pointer_space()->Contains(*obj)) return false;
-
   if (isolate->string_tracker()->IsFreshUnusedString(obj)) return false;
   int size = obj->Size();  // Byte size of the original string.
   if (size < i::ExternalString::kShortSize) return false;
@@ -5540,7 +5595,6 @@
 
 Local<v8::Object> v8::Object::New(Isolate* isolate) {
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
-  EnsureInitializedForIsolate(i_isolate, "v8::Object::New()");
   LOG_API(i_isolate, "Object::New");
   ENTER_V8(i_isolate);
   i::Handle<i::JSObject> obj =
@@ -5551,7 +5605,6 @@
 
 Local<v8::Value> v8::NumberObject::New(Isolate* isolate, double value) {
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
-  EnsureInitializedForIsolate(i_isolate, "v8::NumberObject::New()");
   LOG_API(i_isolate, "NumberObject::New");
   ENTER_V8(i_isolate);
   i::Handle<i::Object> number = i_isolate->factory()->NewNumber(value);
@@ -5572,7 +5625,6 @@
 
 Local<v8::Value> v8::BooleanObject::New(bool value) {
   i::Isolate* isolate = i::Isolate::Current();
-  EnsureInitializedForIsolate(isolate, "v8::BooleanObject::New()");
   LOG_API(isolate, "BooleanObject::New");
   ENTER_V8(isolate);
   i::Handle<i::Object> boolean(value
@@ -5597,7 +5649,6 @@
 Local<v8::Value> v8::StringObject::New(Handle<String> value) {
   i::Handle<i::String> string = Utils::OpenHandle(*value);
   i::Isolate* isolate = string->GetIsolate();
-  EnsureInitializedForIsolate(isolate, "v8::StringObject::New()");
   LOG_API(isolate, "StringObject::New");
   ENTER_V8(isolate);
   i::Handle<i::Object> obj =
@@ -5618,7 +5669,6 @@
 
 Local<v8::Value> v8::SymbolObject::New(Isolate* isolate, Handle<Symbol> value) {
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
-  EnsureInitializedForIsolate(i_isolate, "v8::SymbolObject::New()");
   LOG_API(i_isolate, "SymbolObject::New");
   ENTER_V8(i_isolate);
   i::Handle<i::Object> obj = i::Object::ToObject(
@@ -5639,11 +5689,10 @@
 
 Local<v8::Value> v8::Date::New(Isolate* isolate, double time) {
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
-  EnsureInitializedForIsolate(i_isolate, "v8::Date::New()");
   LOG_API(i_isolate, "Date::New");
   if (std::isnan(time)) {
     // Introduce only canonical NaN value into the VM, to avoid signaling NaNs.
-    time = i::OS::nan_value();
+    time = base::OS::nan_value();
   }
   ENTER_V8(i_isolate);
   EXCEPTION_PREAMBLE(i_isolate);
@@ -5681,7 +5730,7 @@
   i::Handle<i::FixedArray> date_cache_version =
       i::Handle<i::FixedArray>::cast(i_isolate->eternal_handles()->GetSingleton(
           i::EternalHandles::DATE_CACHE_VERSION));
-  ASSERT_EQ(1, date_cache_version->length());
+  DCHECK_EQ(1, date_cache_version->length());
   CHECK(date_cache_version->get(0)->IsSmi());
   date_cache_version->set(
       0,
@@ -5696,7 +5745,7 @@
   if ((flags & RegExp::kGlobal) != 0) flags_buf[num_flags++] = 'g';
   if ((flags & RegExp::kMultiline) != 0) flags_buf[num_flags++] = 'm';
   if ((flags & RegExp::kIgnoreCase) != 0) flags_buf[num_flags++] = 'i';
-  ASSERT(num_flags <= static_cast<int>(ARRAY_SIZE(flags_buf)));
+  DCHECK(num_flags <= static_cast<int>(arraysize(flags_buf)));
   return isolate->factory()->InternalizeOneByteString(
       i::Vector<const uint8_t>(flags_buf, num_flags));
 }
@@ -5705,7 +5754,6 @@
 Local<v8::RegExp> v8::RegExp::New(Handle<String> pattern,
                                   Flags flags) {
   i::Isolate* isolate = Utils::OpenHandle(*pattern)->GetIsolate();
-  EnsureInitializedForIsolate(isolate, "v8::RegExp::New()");
   LOG_API(isolate, "RegExp::New");
   ENTER_V8(isolate);
   EXCEPTION_PREAMBLE(isolate);
@@ -5742,7 +5790,6 @@
 
 Local<v8::Array> v8::Array::New(Isolate* isolate, int length) {
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
-  EnsureInitializedForIsolate(i_isolate, "v8::Array::New()");
   LOG_API(i_isolate, "Array::New");
   ENTER_V8(i_isolate);
   int real_length = length > 0 ? length : 0;
@@ -5800,10 +5847,9 @@
   i::Handle<i::Object> b;
   has_pending_exception = !i::Execution::Call(
       isolate,
-      handle(
-          isolate->context()->global_object()->native_context()->is_promise()),
+      isolate->is_promise(),
       isolate->factory()->undefined_value(),
-      ARRAY_SIZE(argv), argv,
+      arraysize(argv), argv,
       false).ToHandle(&b);
   EXCEPTION_BAILOUT_CHECK(isolate, false);
   return b->BooleanValue();
@@ -5818,8 +5864,7 @@
   i::Handle<i::Object> result;
   has_pending_exception = !i::Execution::Call(
       isolate,
-      handle(isolate->context()->global_object()->native_context()->
-             promise_create()),
+      isolate->promise_create(),
       isolate->factory()->undefined_value(),
       0, NULL,
       false).ToHandle(&result);
@@ -5843,10 +5888,9 @@
   i::Handle<i::Object> argv[] = { promise, Utils::OpenHandle(*value) };
   has_pending_exception = i::Execution::Call(
       isolate,
-      handle(isolate->context()->global_object()->native_context()->
-             promise_resolve()),
+      isolate->promise_resolve(),
       isolate->factory()->undefined_value(),
-      ARRAY_SIZE(argv), argv,
+      arraysize(argv), argv,
       false).is_null();
   EXCEPTION_BAILOUT_CHECK(isolate, /* void */ ;);
 }
@@ -5861,10 +5905,9 @@
   i::Handle<i::Object> argv[] = { promise, Utils::OpenHandle(*value) };
   has_pending_exception = i::Execution::Call(
       isolate,
-      handle(isolate->context()->global_object()->native_context()->
-             promise_reject()),
+      isolate->promise_reject(),
       isolate->factory()->undefined_value(),
-      ARRAY_SIZE(argv), argv,
+      arraysize(argv), argv,
       false).is_null();
   EXCEPTION_BAILOUT_CHECK(isolate, /* void */ ;);
 }
@@ -5880,10 +5923,9 @@
   i::Handle<i::Object> result;
   has_pending_exception = !i::Execution::Call(
       isolate,
-      handle(isolate->context()->global_object()->native_context()->
-             promise_chain()),
+      isolate->promise_chain(),
       promise,
-      ARRAY_SIZE(argv), argv,
+      arraysize(argv), argv,
       false).ToHandle(&result);
   EXCEPTION_BAILOUT_CHECK(isolate, Local<Promise>());
   return Local<Promise>::Cast(Utils::ToLocal(result));
@@ -5900,10 +5942,9 @@
   i::Handle<i::Object> result;
   has_pending_exception = !i::Execution::Call(
       isolate,
-      handle(isolate->context()->global_object()->native_context()->
-             promise_catch()),
+      isolate->promise_catch(),
       promise,
-      ARRAY_SIZE(argv), argv,
+      arraysize(argv), argv,
       false).ToHandle(&result);
   EXCEPTION_BAILOUT_CHECK(isolate, Local<Promise>());
   return Local<Promise>::Cast(Utils::ToLocal(result));
@@ -5920,10 +5961,9 @@
   i::Handle<i::Object> result;
   has_pending_exception = !i::Execution::Call(
       isolate,
-      handle(isolate->context()->global_object()->native_context()->
-             promise_then()),
+      isolate->promise_then(),
       promise,
-      ARRAY_SIZE(argv), argv,
+      arraysize(argv), argv,
       false).ToHandle(&result);
   EXCEPTION_BAILOUT_CHECK(isolate, Local<Promise>());
   return Local<Promise>::Cast(Utils::ToLocal(result));
@@ -5969,7 +6009,6 @@
 
 Local<ArrayBuffer> v8::ArrayBuffer::New(Isolate* isolate, size_t byte_length) {
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
-  EnsureInitializedForIsolate(i_isolate, "v8::ArrayBuffer::New(size_t)");
   LOG_API(i_isolate, "v8::ArrayBuffer::New(size_t)");
   ENTER_V8(i_isolate);
   i::Handle<i::JSArrayBuffer> obj =
@@ -5982,7 +6021,6 @@
 Local<ArrayBuffer> v8::ArrayBuffer::New(Isolate* isolate, void* data,
                                         size_t byte_length) {
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
-  EnsureInitializedForIsolate(i_isolate, "v8::ArrayBuffer::New(void*, size_t)");
   LOG_API(i_isolate, "v8::ArrayBuffer::New(void*, size_t)");
   ENTER_V8(i_isolate);
   i::Handle<i::JSArrayBuffer> obj =
@@ -5997,10 +6035,10 @@
   i::Handle<i::JSArrayBuffer> buffer;
   if (obj->IsJSDataView()) {
     i::Handle<i::JSDataView> data_view(i::JSDataView::cast(*obj));
-    ASSERT(data_view->buffer()->IsJSArrayBuffer());
+    DCHECK(data_view->buffer()->IsJSArrayBuffer());
     buffer = i::handle(i::JSArrayBuffer::cast(data_view->buffer()));
   } else {
-    ASSERT(obj->IsJSTypedArray());
+    DCHECK(obj->IsJSTypedArray());
     buffer = i::JSTypedArray::cast(*obj)->GetBuffer();
   }
   return Utils::ToLocal(buffer);
@@ -6031,7 +6069,7 @@
     i::Handle<i::JSArrayBuffer> buffer,
     size_t byte_offset,
     size_t byte_length) {
-  ASSERT(byte_offset + byte_length <=
+  DCHECK(byte_offset + byte_length <=
          static_cast<size_t>(buffer->byte_length()->Number()));
 
   obj->set_buffer(*buffer);
@@ -6058,7 +6096,7 @@
       isolate->factory()->NewJSTypedArray(array_type);
   i::Handle<i::JSArrayBuffer> buffer = Utils::OpenHandle(*array_buffer);
 
-  ASSERT(byte_offset % sizeof(ElementType) == 0);
+  DCHECK(byte_offset % sizeof(ElementType) == 0);
 
   CHECK(length <= (std::numeric_limits<size_t>::max() / sizeof(ElementType)));
   CHECK(length <= static_cast<size_t>(i::Smi::kMaxValue));
@@ -6085,8 +6123,6 @@
   Local<Type##Array> Type##Array::New(Handle<ArrayBuffer> array_buffer,      \
                                     size_t byte_offset, size_t length) {     \
     i::Isolate* isolate = Utils::OpenHandle(*array_buffer)->GetIsolate();    \
-    EnsureInitializedForIsolate(isolate,                                     \
-        "v8::" #Type "Array::New(Handle<ArrayBuffer>, size_t, size_t)");     \
     LOG_API(isolate,                                                         \
         "v8::" #Type "Array::New(Handle<ArrayBuffer>, size_t, size_t)");     \
     ENTER_V8(isolate);                                                       \
@@ -6110,8 +6146,6 @@
                               size_t byte_offset, size_t byte_length) {
   i::Handle<i::JSArrayBuffer> buffer = Utils::OpenHandle(*array_buffer);
   i::Isolate* isolate = buffer->GetIsolate();
-  EnsureInitializedForIsolate(
-      isolate, "v8::DataView::New(void*, size_t, size_t)");
   LOG_API(isolate, "v8::DataView::New(void*, size_t, size_t)");
   ENTER_V8(isolate);
   i::Handle<i::JSDataView> obj = isolate->factory()->NewJSDataView();
@@ -6123,7 +6157,6 @@
 
 Local<Symbol> v8::Symbol::New(Isolate* isolate, Local<String> name) {
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
-  EnsureInitializedForIsolate(i_isolate, "v8::Symbol::New()");
   LOG_API(i_isolate, "Symbol::New()");
   ENTER_V8(i_isolate);
   i::Handle<i::Symbol> result = i_isolate->factory()->NewSymbol();
@@ -6132,51 +6165,62 @@
 }
 
 
-Local<Symbol> v8::Symbol::For(Isolate* isolate, Local<String> name) {
-  i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
-  i::Handle<i::String> i_name = Utils::OpenHandle(*name);
-  i::Handle<i::JSObject> registry = i_isolate->GetSymbolRegistry();
-  i::Handle<i::String> part = i_isolate->factory()->for_string();
+static i::Handle<i::Symbol> SymbolFor(i::Isolate* isolate,
+                                      i::Handle<i::String> name,
+                                      i::Handle<i::String> part) {
+  i::Handle<i::JSObject> registry = isolate->GetSymbolRegistry();
   i::Handle<i::JSObject> symbols =
       i::Handle<i::JSObject>::cast(
           i::Object::GetPropertyOrElement(registry, part).ToHandleChecked());
   i::Handle<i::Object> symbol =
-      i::Object::GetPropertyOrElement(symbols, i_name).ToHandleChecked();
+      i::Object::GetPropertyOrElement(symbols, name).ToHandleChecked();
   if (!symbol->IsSymbol()) {
-    ASSERT(symbol->IsUndefined());
-    symbol = i_isolate->factory()->NewSymbol();
-    i::Handle<i::Symbol>::cast(symbol)->set_name(*i_name);
-    i::JSObject::SetProperty(
-        symbols, i_name, symbol, NONE, i::STRICT).Assert();
+    DCHECK(symbol->IsUndefined());
+    symbol = isolate->factory()->NewSymbol();
+    i::Handle<i::Symbol>::cast(symbol)->set_name(*name);
+    i::JSObject::SetProperty(symbols, name, symbol, i::STRICT).Assert();
   }
-  return Utils::ToLocal(i::Handle<i::Symbol>::cast(symbol));
+  return i::Handle<i::Symbol>::cast(symbol);
+}
+
+
+Local<Symbol> v8::Symbol::For(Isolate* isolate, Local<String> name) {
+  i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+  i::Handle<i::String> i_name = Utils::OpenHandle(*name);
+  i::Handle<i::String> part = i_isolate->factory()->for_string();
+  return Utils::ToLocal(SymbolFor(i_isolate, i_name, part));
 }
 
 
 Local<Symbol> v8::Symbol::ForApi(Isolate* isolate, Local<String> name) {
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
   i::Handle<i::String> i_name = Utils::OpenHandle(*name);
-  i::Handle<i::JSObject> registry = i_isolate->GetSymbolRegistry();
   i::Handle<i::String> part = i_isolate->factory()->for_api_string();
-  i::Handle<i::JSObject> symbols =
-      i::Handle<i::JSObject>::cast(
-          i::Object::GetPropertyOrElement(registry, part).ToHandleChecked());
-  i::Handle<i::Object> symbol =
-      i::Object::GetPropertyOrElement(symbols, i_name).ToHandleChecked();
-  if (!symbol->IsSymbol()) {
-    ASSERT(symbol->IsUndefined());
-    symbol = i_isolate->factory()->NewSymbol();
-    i::Handle<i::Symbol>::cast(symbol)->set_name(*i_name);
-    i::JSObject::SetProperty(
-        symbols, i_name, symbol, NONE, i::STRICT).Assert();
-  }
-  return Utils::ToLocal(i::Handle<i::Symbol>::cast(symbol));
+  return Utils::ToLocal(SymbolFor(i_isolate, i_name, part));
+}
+
+
+static Local<Symbol> GetWellKnownSymbol(Isolate* isolate, const char* name) {
+  i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+  i::Handle<i::String> i_name =
+      Utils::OpenHandle(*String::NewFromUtf8(isolate, name));
+  i::Handle<i::String> part = i_isolate->factory()->for_intern_string();
+  return Utils::ToLocal(SymbolFor(i_isolate, i_name, part));
+}
+
+
+Local<Symbol> v8::Symbol::GetIterator(Isolate* isolate) {
+  return GetWellKnownSymbol(isolate, "Symbol.iterator");
+}
+
+
+Local<Symbol> v8::Symbol::GetUnscopables(Isolate* isolate) {
+  return GetWellKnownSymbol(isolate, "Symbol.unscopables");
 }
 
 
 Local<Private> v8::Private::New(Isolate* isolate, Local<String> name) {
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
-  EnsureInitializedForIsolate(i_isolate, "v8::Private::New()");
   LOG_API(i_isolate, "Private::New()");
   ENTER_V8(i_isolate);
   i::Handle<i::Symbol> symbol = i_isolate->factory()->NewPrivateSymbol();
@@ -6197,11 +6241,10 @@
   i::Handle<i::Object> symbol =
       i::Object::GetPropertyOrElement(privates, i_name).ToHandleChecked();
   if (!symbol->IsSymbol()) {
-    ASSERT(symbol->IsUndefined());
+    DCHECK(symbol->IsUndefined());
     symbol = i_isolate->factory()->NewPrivateSymbol();
     i::Handle<i::Symbol>::cast(symbol)->set_name(*i_name);
-    i::JSObject::SetProperty(
-        privates, i_name, symbol, NONE, i::STRICT).Assert();
+    i::JSObject::SetProperty(privates, i_name, symbol, i::STRICT).Assert();
   }
   Local<Symbol> result = Utils::ToLocal(i::Handle<i::Symbol>::cast(symbol));
   return v8::Handle<Private>(reinterpret_cast<Private*>(*result));
@@ -6210,10 +6253,10 @@
 
 Local<Number> v8::Number::New(Isolate* isolate, double value) {
   i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
-  ASSERT(internal_isolate->IsInitialized());
+  DCHECK(internal_isolate->IsInitialized());
   if (std::isnan(value)) {
     // Introduce only canonical NaN value into the VM, to avoid signaling NaNs.
-    value = i::OS::nan_value();
+    value = base::OS::nan_value();
   }
   ENTER_V8(internal_isolate);
   i::Handle<i::Object> result = internal_isolate->factory()->NewNumber(value);
@@ -6223,7 +6266,7 @@
 
 Local<Integer> v8::Integer::New(Isolate* isolate, int32_t value) {
   i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
-  ASSERT(internal_isolate->IsInitialized());
+  DCHECK(internal_isolate->IsInitialized());
   if (i::Smi::IsValid(value)) {
     return Utils::IntegerToLocal(i::Handle<i::Object>(i::Smi::FromInt(value),
                                                       internal_isolate));
@@ -6236,7 +6279,7 @@
 
 Local<Integer> v8::Integer::NewFromUnsigned(Isolate* isolate, uint32_t value) {
   i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
-  ASSERT(internal_isolate->IsInitialized());
+  DCHECK(internal_isolate->IsInitialized());
   bool fits_into_int32_t = (value & (1 << 31)) == 0;
   if (fits_into_int32_t) {
     return Integer::New(isolate, static_cast<int32_t>(value));
@@ -6249,7 +6292,6 @@
 
 bool V8::AddMessageListener(MessageCallback that, Handle<Value> data) {
   i::Isolate* isolate = i::Isolate::Current();
-  EnsureInitializedForIsolate(isolate, "v8::V8::AddMessageListener()");
   ON_BAILOUT(isolate, "v8::V8::AddMessageListener()", return false);
   ENTER_V8(isolate);
   i::HandleScope scope(isolate);
@@ -6265,7 +6307,6 @@
 
 void V8::RemoveMessageListeners(MessageCallback that) {
   i::Isolate* isolate = i::Isolate::Current();
-  EnsureInitializedForIsolate(isolate, "v8::V8::RemoveMessageListener()");
   ON_BAILOUT(isolate, "v8::V8::RemoveMessageListeners()", return);
   ENTER_V8(isolate);
   i::HandleScope scope(isolate);
@@ -6293,32 +6334,6 @@
 }
 
 
-void V8::SetCounterFunction(CounterLookupCallback callback) {
-  i::Isolate* isolate = i::Isolate::UncheckedCurrent();
-  // TODO(svenpanne) The Isolate should really be a parameter.
-  if (isolate == NULL) return;
-  isolate->stats_table()->SetCounterFunction(callback);
-}
-
-
-void V8::SetCreateHistogramFunction(CreateHistogramCallback callback) {
-  i::Isolate* isolate = i::Isolate::UncheckedCurrent();
-  // TODO(svenpanne) The Isolate should really be a parameter.
-  if (isolate == NULL) return;
-  isolate->stats_table()->SetCreateHistogramFunction(callback);
-  isolate->InitializeLoggingAndCounters();
-  isolate->counters()->ResetHistograms();
-}
-
-
-void V8::SetAddHistogramSampleFunction(AddHistogramSampleCallback callback) {
-  i::Isolate* isolate = i::Isolate::UncheckedCurrent();
-  // TODO(svenpanne) The Isolate should really be a parameter.
-  if (isolate == NULL) return;
-  isolate->stats_table()->
-      SetAddHistogramSampleFunction(callback);
-}
-
 void V8::SetFailedAccessCheckCallbackFunction(
     FailedAccessCheckCallback callback) {
   i::Isolate* isolate = i::Isolate::Current();
@@ -6356,7 +6371,7 @@
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
   i::Context* context = isolate->context();
   if (context == NULL) return Local<Context>();
-  i::Context* native_context = context->global_object()->native_context();
+  i::Context* native_context = context->native_context();
   if (native_context == NULL) return Local<Context>();
   return Utils::ToLocal(i::Handle<i::Context>(native_context));
 }
@@ -6537,7 +6552,7 @@
         i::NEW_SPACE, "Isolate::RequestGarbageCollection",
         kGCCallbackFlagForced);
   } else {
-    ASSERT_EQ(kFullGarbageCollection, type);
+    DCHECK_EQ(kFullGarbageCollection, type);
     reinterpret_cast<i::Isolate*>(this)->heap()->CollectAllGarbage(
         i::Heap::kAbortIncrementalMarkingMask,
         "Isolate::RequestGarbageCollection", kGCCallbackFlagForced);
@@ -6546,14 +6561,34 @@
 
 
 Isolate* Isolate::GetCurrent() {
-  i::Isolate* isolate = i::Isolate::UncheckedCurrent();
+  i::Isolate* isolate = i::Isolate::Current();
   return reinterpret_cast<Isolate*>(isolate);
 }
 
 
-Isolate* Isolate::New() {
+Isolate* Isolate::New(const Isolate::CreateParams& params) {
   i::Isolate* isolate = new i::Isolate();
-  return reinterpret_cast<Isolate*>(isolate);
+  Isolate* v8_isolate = reinterpret_cast<Isolate*>(isolate);
+  if (params.entry_hook) {
+    isolate->set_function_entry_hook(params.entry_hook);
+  }
+  if (params.code_event_handler) {
+    isolate->InitializeLoggingAndCounters();
+    isolate->logger()->SetCodeEventHandler(kJitCodeEventDefault,
+                                           params.code_event_handler);
+  }
+  SetResourceConstraints(isolate, params.constraints);
+  if (params.enable_serializer) {
+    isolate->enable_serializer();
+  }
+  // TODO(jochen): Once we got rid of Isolate::Current(), we can remove this.
+  Isolate::Scope isolate_scope(v8_isolate);
+  if (params.entry_hook || !i::Snapshot::Initialize(isolate)) {
+    // If the isolate has a function entry hook, it needs to re-build all its
+    // code stubs with entry hooks embedded, so don't deserialize a snapshot.
+    isolate->Init(NULL);
+  }
+  return v8_isolate;
 }
 
 
@@ -6589,7 +6624,7 @@
     internal_ = reinterpret_cast<void*>(
         new i::DisallowJavascriptExecution(i_isolate));
   } else {
-    ASSERT_EQ(THROW_ON_FAILURE, on_failure);
+    DCHECK_EQ(THROW_ON_FAILURE, on_failure);
     internal_ = reinterpret_cast<void*>(
         new i::ThrowOnJavascriptExecution(i_isolate));
   }
@@ -6707,6 +6742,11 @@
 }
 
 
+void Isolate::SetUseCounterCallback(UseCounterCallback callback) {
+  reinterpret_cast<i::Isolate*>(this)->SetUseCounterCallback(callback);
+}
+
+
 void Isolate::SetCounterFunction(CounterLookupCallback callback) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
   isolate->stats_table()->SetCounterFunction(callback);
@@ -6731,6 +6771,47 @@
 }
 
 
+bool v8::Isolate::IdleNotification(int idle_time_in_ms) {
+  // Returning true tells the caller that it need not
+  // continue to call IdleNotification.
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+  if (!i::FLAG_use_idle_notification) return true;
+  return isolate->heap()->IdleNotification(idle_time_in_ms);
+}
+
+
+void v8::Isolate::LowMemoryNotification() {
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+  {
+    i::HistogramTimerScope idle_notification_scope(
+        isolate->counters()->gc_low_memory_notification());
+    isolate->heap()->CollectAllAvailableGarbage("low memory notification");
+  }
+}
+
+
+int v8::Isolate::ContextDisposedNotification() {
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+  return isolate->heap()->NotifyContextDisposed();
+}
+
+
+void v8::Isolate::SetJitCodeEventHandler(JitCodeEventOptions options,
+                                         JitCodeEventHandler event_handler) {
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+  // Ensure that logging is initialized for our isolate.
+  isolate->InitializeLoggingAndCounters();
+  isolate->logger()->SetCodeEventHandler(options, event_handler);
+}
+
+
+void v8::Isolate::SetStackLimit(uintptr_t stack_limit) {
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+  CHECK(stack_limit);
+  isolate->stack_guard()->SetStackLimit(stack_limit);
+}
+
+
 String::Utf8Value::Utf8Value(v8::Handle<v8::Value> obj)
     : str_(NULL), length_(0) {
   i::Isolate* isolate = i::Isolate::Current();
@@ -6772,97 +6853,43 @@
 }
 
 
-Local<Value> Exception::RangeError(v8::Handle<v8::String> raw_message) {
-  i::Isolate* isolate = i::Isolate::Current();
-  LOG_API(isolate, "RangeError");
-  ON_BAILOUT(isolate, "v8::Exception::RangeError()", return Local<Value>());
-  ENTER_V8(isolate);
-  i::Object* error;
-  {
-    i::HandleScope scope(isolate);
-    i::Handle<i::String> message = Utils::OpenHandle(*raw_message);
-    i::Handle<i::Object> result = isolate->factory()->NewRangeError(message);
-    error = *result;
+#define DEFINE_ERROR(NAME)                                                    \
+  Local<Value> Exception::NAME(v8::Handle<v8::String> raw_message) {          \
+    i::Isolate* isolate = i::Isolate::Current();                              \
+    LOG_API(isolate, #NAME);                                                  \
+    ON_BAILOUT(isolate, "v8::Exception::" #NAME "()", return Local<Value>()); \
+    ENTER_V8(isolate);                                                        \
+    i::Object* error;                                                         \
+    {                                                                         \
+      i::HandleScope scope(isolate);                                          \
+      i::Handle<i::String> message = Utils::OpenHandle(*raw_message);         \
+      i::Handle<i::Object> result;                                            \
+      EXCEPTION_PREAMBLE(isolate);                                            \
+      i::MaybeHandle<i::Object> maybe_result =                                \
+          isolate->factory()->New##NAME(message);                             \
+      has_pending_exception = !maybe_result.ToHandle(&result);                \
+      /* TODO(yangguo): crbug/403509. Return empty handle instead. */         \
+      EXCEPTION_BAILOUT_CHECK(                                                \
+          isolate, v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate)));   \
+      error = *result;                                                        \
+    }                                                                         \
+    i::Handle<i::Object> result(error, isolate);                              \
+    return Utils::ToLocal(result);                                            \
   }
-  i::Handle<i::Object> result(error, isolate);
-  return Utils::ToLocal(result);
-}
 
+DEFINE_ERROR(RangeError)
+DEFINE_ERROR(ReferenceError)
+DEFINE_ERROR(SyntaxError)
+DEFINE_ERROR(TypeError)
+DEFINE_ERROR(Error)
 
-Local<Value> Exception::ReferenceError(v8::Handle<v8::String> raw_message) {
-  i::Isolate* isolate = i::Isolate::Current();
-  LOG_API(isolate, "ReferenceError");
-  ON_BAILOUT(isolate, "v8::Exception::ReferenceError()", return Local<Value>());
-  ENTER_V8(isolate);
-  i::Object* error;
-  {
-    i::HandleScope scope(isolate);
-    i::Handle<i::String> message = Utils::OpenHandle(*raw_message);
-    i::Handle<i::Object> result =
-        isolate->factory()->NewReferenceError(message);
-    error = *result;
-  }
-  i::Handle<i::Object> result(error, isolate);
-  return Utils::ToLocal(result);
-}
-
-
-Local<Value> Exception::SyntaxError(v8::Handle<v8::String> raw_message) {
-  i::Isolate* isolate = i::Isolate::Current();
-  LOG_API(isolate, "SyntaxError");
-  ON_BAILOUT(isolate, "v8::Exception::SyntaxError()", return Local<Value>());
-  ENTER_V8(isolate);
-  i::Object* error;
-  {
-    i::HandleScope scope(isolate);
-    i::Handle<i::String> message = Utils::OpenHandle(*raw_message);
-    i::Handle<i::Object> result = isolate->factory()->NewSyntaxError(message);
-    error = *result;
-  }
-  i::Handle<i::Object> result(error, isolate);
-  return Utils::ToLocal(result);
-}
-
-
-Local<Value> Exception::TypeError(v8::Handle<v8::String> raw_message) {
-  i::Isolate* isolate = i::Isolate::Current();
-  LOG_API(isolate, "TypeError");
-  ON_BAILOUT(isolate, "v8::Exception::TypeError()", return Local<Value>());
-  ENTER_V8(isolate);
-  i::Object* error;
-  {
-    i::HandleScope scope(isolate);
-    i::Handle<i::String> message = Utils::OpenHandle(*raw_message);
-    i::Handle<i::Object> result = isolate->factory()->NewTypeError(message);
-    error = *result;
-  }
-  i::Handle<i::Object> result(error, isolate);
-  return Utils::ToLocal(result);
-}
-
-
-Local<Value> Exception::Error(v8::Handle<v8::String> raw_message) {
-  i::Isolate* isolate = i::Isolate::Current();
-  LOG_API(isolate, "Error");
-  ON_BAILOUT(isolate, "v8::Exception::Error()", return Local<Value>());
-  ENTER_V8(isolate);
-  i::Object* error;
-  {
-    i::HandleScope scope(isolate);
-    i::Handle<i::String> message = Utils::OpenHandle(*raw_message);
-    i::Handle<i::Object> result = isolate->factory()->NewError(message);
-    error = *result;
-  }
-  i::Handle<i::Object> result(error, isolate);
-  return Utils::ToLocal(result);
-}
+#undef DEFINE_ERROR
 
 
 // --- D e b u g   S u p p o r t ---
 
 bool Debug::SetDebugEventListener(EventCallback that, Handle<Value> data) {
   i::Isolate* isolate = i::Isolate::Current();
-  EnsureInitializedForIsolate(isolate, "v8::Debug::SetDebugEventListener()");
   ON_BAILOUT(isolate, "v8::Debug::SetDebugEventListener()", return false);
   ENTER_V8(isolate);
   i::HandleScope scope(isolate);
@@ -6887,6 +6914,12 @@
 }
 
 
+bool Debug::CheckDebugBreak(Isolate* isolate) {
+  i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
+  return internal_isolate->stack_guard()->CheckDebugBreak();
+}
+
+
 void Debug::DebugBreakForCommand(Isolate* isolate, ClientData* data) {
   i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
   internal_isolate->debug()->EnqueueDebugCommand(data);
@@ -6895,7 +6928,6 @@
 
 void Debug::SetMessageHandler(v8::Debug::MessageHandler handler) {
   i::Isolate* isolate = i::Isolate::Current();
-  EnsureInitializedForIsolate(isolate, "v8::Debug::SetMessageHandler");
   ENTER_V8(isolate);
   isolate->debug()->SetMessageHandler(handler);
 }
@@ -6947,7 +6979,7 @@
     i::Handle<i::JSObject> debug(
         isolate_debug->debug_context()->global_object());
     i::Handle<i::String> name = isolate->factory()->InternalizeOneByteString(
-        STATIC_ASCII_VECTOR("MakeMirror"));
+        STATIC_CHAR_VECTOR("MakeMirror"));
     i::Handle<i::Object> fun_obj =
         i::Object::GetProperty(debug, name).ToHandleChecked();
     i::Handle<i::JSFunction> fun = i::Handle<i::JSFunction>::cast(fun_obj);
@@ -6969,7 +7001,6 @@
 
 Local<Context> Debug::GetDebugContext() {
   i::Isolate* isolate = i::Isolate::Current();
-  EnsureInitializedForIsolate(isolate, "v8::Debug::GetDebugContext()");
   ENTER_V8(isolate);
   return Utils::ToLocal(i::Isolate::Current()->debug()->GetDebugContext());
 }
@@ -7061,7 +7092,7 @@
 void CpuProfile::Delete() {
   i::Isolate* isolate = i::Isolate::Current();
   i::CpuProfiler* profiler = isolate->cpu_profiler();
-  ASSERT(profiler != NULL);
+  DCHECK(profiler != NULL);
   profiler->DeleteProfile(reinterpret_cast<i::CpuProfile*>(this));
 }
 
@@ -7088,19 +7119,20 @@
 
 int64_t CpuProfile::GetSampleTimestamp(int index) const {
   const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this);
-  return (profile->sample_timestamp(index) - i::TimeTicks()).InMicroseconds();
+  return (profile->sample_timestamp(index) - base::TimeTicks())
+      .InMicroseconds();
 }
 
 
 int64_t CpuProfile::GetStartTime() const {
   const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this);
-  return (profile->start_time() - i::TimeTicks()).InMicroseconds();
+  return (profile->start_time() - base::TimeTicks()).InMicroseconds();
 }
 
 
 int64_t CpuProfile::GetEndTime() const {
   const i::CpuProfile* profile = reinterpret_cast<const i::CpuProfile*>(this);
-  return (profile->end_time() - i::TimeTicks()).InMicroseconds();
+  return (profile->end_time() - base::TimeTicks()).InMicroseconds();
 }
 
 
@@ -7110,9 +7142,9 @@
 
 
 void CpuProfiler::SetSamplingInterval(int us) {
-  ASSERT(us >= 0);
+  DCHECK(us >= 0);
   return reinterpret_cast<i::CpuProfiler*>(this)->set_sampling_interval(
-      i::TimeDelta::FromMicroseconds(us));
+      base::TimeDelta::FromMicroseconds(us));
 }
 
 
@@ -7142,7 +7174,7 @@
 void CpuProfiler::SetIdle(bool is_idle) {
   i::Isolate* isolate = reinterpret_cast<i::CpuProfiler*>(this)->isolate();
   i::StateTag state = isolate->current_vm_state();
-  ASSERT(state == i::EXTERNAL || state == i::IDLE);
+  DCHECK(state == i::EXTERNAL || state == i::IDLE);
   if (isolate->js_entry_sp() != NULL) return;
   if (is_idle) {
     isolate->set_current_vm_state(i::IDLE);
@@ -7503,7 +7535,7 @@
         (last_handle_before_deferred_block_ <= &block[kHandleBlockSize]) &&
         (last_handle_before_deferred_block_ >= block)) {
       v->VisitPointers(block, last_handle_before_deferred_block_);
-      ASSERT(!found_block_before_deferred);
+      DCHECK(!found_block_before_deferred);
 #ifdef DEBUG
       found_block_before_deferred = true;
 #endif
@@ -7512,7 +7544,7 @@
     }
   }
 
-  ASSERT(last_handle_before_deferred_block_ == NULL ||
+  DCHECK(last_handle_before_deferred_block_ == NULL ||
          found_block_before_deferred);
 
   // Iterate over live handles in the last block (if any).
@@ -7521,7 +7553,7 @@
   }
 
   List<Context*>* context_lists[2] = { &saved_contexts_, &entered_contexts_};
-  for (unsigned i = 0; i < ARRAY_SIZE(context_lists); i++) {
+  for (unsigned i = 0; i < arraysize(context_lists); i++) {
     if (context_lists[i]->is_empty()) continue;
     Object** start = reinterpret_cast<Object**>(&context_lists[i]->first());
     v->VisitPointers(start, start + context_lists[i]->length());
@@ -7552,7 +7584,7 @@
     Object** block_start = blocks_.last();
     Object** block_limit = &block_start[kHandleBlockSize];
     // We should not need to check for SealHandleScope here. Assert this.
-    ASSERT(prev_limit == block_limit ||
+    DCHECK(prev_limit == block_limit ||
            !(block_start <= prev_limit && prev_limit <= block_limit));
     if (prev_limit == block_limit) break;
     deferred->blocks_.Add(blocks_.last());
@@ -7563,17 +7595,17 @@
   // HandleScope stack since BeginDeferredScope was called, but in
   // reverse order.
 
-  ASSERT(prev_limit == NULL || !blocks_.is_empty());
+  DCHECK(prev_limit == NULL || !blocks_.is_empty());
 
-  ASSERT(!blocks_.is_empty() && prev_limit != NULL);
-  ASSERT(last_handle_before_deferred_block_ != NULL);
+  DCHECK(!blocks_.is_empty() && prev_limit != NULL);
+  DCHECK(last_handle_before_deferred_block_ != NULL);
   last_handle_before_deferred_block_ = NULL;
   return deferred;
 }
 
 
 void HandleScopeImplementer::BeginDeferredScope() {
-  ASSERT(last_handle_before_deferred_block_ == NULL);
+  DCHECK(last_handle_before_deferred_block_ == NULL);
   last_handle_before_deferred_block_ = isolate()->handle_scope_data()->next;
 }
 
@@ -7591,9 +7623,9 @@
 
 
 void DeferredHandles::Iterate(ObjectVisitor* v) {
-  ASSERT(!blocks_.is_empty());
+  DCHECK(!blocks_.is_empty());
 
-  ASSERT((first_block_limit_ >= blocks_.first()) &&
+  DCHECK((first_block_limit_ >= blocks_.first()) &&
          (first_block_limit_ <= &(blocks_.first())[kHandleBlockSize]));
 
   v->VisitPointers(blocks_.first(), first_block_limit_);
@@ -7605,9 +7637,9 @@
 
 
 void InvokeAccessorGetterCallback(
-    v8::Local<v8::String> property,
+    v8::Local<v8::Name> property,
     const v8::PropertyCallbackInfo<v8::Value>& info,
-    v8::AccessorGetterCallback getter) {
+    v8::AccessorNameGetterCallback getter) {
   // Leaving JavaScript.
   Isolate* isolate = reinterpret_cast<Isolate*>(info.GetIsolate());
   Address getter_address = reinterpret_cast<Address>(reinterpret_cast<intptr_t>(
diff --git a/src/api.h b/src/api.h
index 7a688ca..9aed5dd 100644
--- a/src/api.h
+++ b/src/api.h
@@ -81,13 +81,13 @@
 
 
 v8::internal::Object* NeanderObject::get(int offset) {
-  ASSERT(value()->HasFastObjectElements());
+  DCHECK(value()->HasFastObjectElements());
   return v8::internal::FixedArray::cast(value()->elements())->get(offset);
 }
 
 
 void NeanderObject::set(int offset, v8::internal::Object* value) {
-  ASSERT(value_->HasFastObjectElements());
+  DCHECK(value_->HasFastObjectElements());
   v8::internal::FixedArray::cast(value_->elements())->set(offset, value);
 }
 
@@ -158,6 +158,7 @@
   V(Float32Array, JSTypedArray)                \
   V(Float64Array, JSTypedArray)                \
   V(DataView, JSDataView)                      \
+  V(Name, Name)                                \
   V(String, String)                            \
   V(Symbol, Symbol)                            \
   V(Script, JSFunction)                        \
@@ -189,6 +190,8 @@
       v8::internal::Handle<v8::internal::Object> obj);
   static inline Local<Function> ToLocal(
       v8::internal::Handle<v8::internal::JSFunction> obj);
+  static inline Local<Name> ToLocal(
+      v8::internal::Handle<v8::internal::Name> obj);
   static inline Local<String> ToLocal(
       v8::internal::Handle<v8::internal::String> obj);
   static inline Local<Symbol> ToLocal(
@@ -264,7 +267,7 @@
 
   template<class From, class To>
   static inline Local<To> Convert(v8::internal::Handle<From> obj) {
-    ASSERT(obj.is_null() || !obj->IsTheHole());
+    DCHECK(obj.is_null() || !obj->IsTheHole());
     return Local<To>(reinterpret_cast<To*>(obj.location()));
   }
 
@@ -325,7 +328,7 @@
 #define MAKE_TO_LOCAL_TYPED_ARRAY(Type, typeName, TYPE, ctype, size)        \
   Local<v8::Type##Array> Utils::ToLocal##Type##Array(                       \
       v8::internal::Handle<v8::internal::JSTypedArray> obj) {               \
-    ASSERT(obj->type() == kExternal##Type##Array);                          \
+    DCHECK(obj->type() == kExternal##Type##Array);                          \
     return Convert<v8::internal::JSTypedArray, v8::Type##Array>(obj);       \
   }
 
@@ -333,6 +336,7 @@
 MAKE_TO_LOCAL(ToLocal, Context, Context)
 MAKE_TO_LOCAL(ToLocal, Object, Value)
 MAKE_TO_LOCAL(ToLocal, JSFunction, Function)
+MAKE_TO_LOCAL(ToLocal, Name, Name)
 MAKE_TO_LOCAL(ToLocal, String, String)
 MAKE_TO_LOCAL(ToLocal, Symbol, Symbol)
 MAKE_TO_LOCAL(ToLocal, JSRegExp, RegExp)
@@ -370,8 +374,7 @@
     const v8::From* that, bool allow_empty_handle) {                        \
     EXTRA_CHECK(allow_empty_handle || that != NULL);                        \
     EXTRA_CHECK(that == NULL ||                                             \
-        (*reinterpret_cast<v8::internal::Object**>(                         \
-            const_cast<v8::From*>(that)))->Is##To());                       \
+        (*reinterpret_cast<v8::internal::Object* const*>(that))->Is##To()); \
     return v8::internal::Handle<v8::internal::To>(                          \
         reinterpret_cast<v8::internal::To**>(const_cast<v8::From*>(that))); \
   }
@@ -535,7 +538,7 @@
   Isolate* isolate() const { return isolate_; }
 
   void ReturnBlock(Object** block) {
-    ASSERT(block != NULL);
+    DCHECK(block != NULL);
     if (spare_ != NULL) DeleteArray(spare_);
     spare_ = block;
   }
@@ -551,9 +554,9 @@
   }
 
   void Free() {
-    ASSERT(blocks_.length() == 0);
-    ASSERT(entered_contexts_.length() == 0);
-    ASSERT(saved_contexts_.length() == 0);
+    DCHECK(blocks_.length() == 0);
+    DCHECK(entered_contexts_.length() == 0);
+    DCHECK(saved_contexts_.length() == 0);
     blocks_.Free();
     entered_contexts_.Free();
     saved_contexts_.Free();
@@ -561,7 +564,7 @@
       DeleteArray(spare_);
       spare_ = NULL;
     }
-    ASSERT(call_depth_ == 0);
+    DCHECK(call_depth_ == 0);
   }
 
   void BeginDeferredScope();
@@ -664,7 +667,7 @@
     }
     spare_ = block_start;
   }
-  ASSERT((blocks_.is_empty() && prev_limit == NULL) ||
+  DCHECK((blocks_.is_empty() && prev_limit == NULL) ||
          (!blocks_.is_empty() && prev_limit != NULL));
 }
 
@@ -672,9 +675,9 @@
 // Interceptor functions called from generated inline caches to notify
 // CPU profiler that external callbacks are invoked.
 void InvokeAccessorGetterCallback(
-    v8::Local<v8::String> property,
+    v8::Local<v8::Name> property,
     const v8::PropertyCallbackInfo<v8::Value>& info,
-    v8::AccessorGetterCallback getter);
+    v8::AccessorNameGetterCallback getter);
 
 void InvokeFunctionCallback(const v8::FunctionCallbackInfo<v8::Value>& info,
                             v8::FunctionCallback callback);
diff --git a/src/apinatives.js b/src/apinatives.js
index d4835af..3e38d10 100644
--- a/src/apinatives.js
+++ b/src/apinatives.js
@@ -30,10 +30,16 @@
       var Constructor = %GetTemplateField(data, kApiConstructorOffset);
       // Note: Do not directly use a function template as a condition, our
       // internal ToBoolean doesn't handle that!
-      var result = typeof Constructor === 'undefined' ?
-          {} : new (Instantiate(Constructor))();
-      ConfigureTemplateInstance(result, data);
-      result = %ToFastProperties(result);
+      var result;
+      if (typeof Constructor === 'undefined') {
+        result = {};
+        ConfigureTemplateInstance(result, data);
+      } else {
+        // ConfigureTemplateInstance is implicitly called before calling the API
+        // constructor in HandleApiCall.
+        result = new (Instantiate(Constructor))();
+        result = %ToFastProperties(result);
+      }
       return result;
     default:
       throw 'Unknown API tag <' + tag + '>';
@@ -62,11 +68,11 @@
         // internal ToBoolean doesn't handle that!
         if (typeof parent !== 'undefined') {
           var parent_fun = Instantiate(parent);
-          %SetPrototype(prototype, parent_fun.prototype);
+          %InternalSetPrototype(prototype, parent_fun.prototype);
         }
       }
       var fun = %CreateApiFunction(data, prototype);
-      if (name) %FunctionSetName(fun, name);
+      if (IS_STRING(name)) %FunctionSetName(fun, name);
       var doNotCache = flags & (1 << kDoNotCacheBit);
       if (!doNotCache) cache[serialNumber] = fun;
       ConfigureTemplateInstance(fun, data);
@@ -93,15 +99,15 @@
         var prop_data = properties[i + 2];
         var attributes = properties[i + 3];
         var value = Instantiate(prop_data, name);
-        %SetProperty(obj, name, value, attributes);
-      } else if (length == 5) {
+        %AddPropertyForTemplate(obj, name, value, attributes);
+      } else if (length == 4 || length == 5) {
+        // TODO(verwaest): The 5th value used to be access_control. Remove once
+        // the bindings are updated.
         var name = properties[i + 1];
         var getter = properties[i + 2];
         var setter = properties[i + 3];
         var attribute = properties[i + 4];
-        var access_control = properties[i + 5];
-        %SetAccessorProperty(
-            obj, name, getter, setter, attribute, access_control);
+        %DefineApiAccessorProperty(obj, name, getter, setter, attribute);
       } else {
         throw "Bad properties array";
       }
diff --git a/src/arguments.cc b/src/arguments.cc
index f4550ae..d31c479 100644
--- a/src/arguments.cc
+++ b/src/arguments.cc
@@ -3,8 +3,8 @@
 // found in the LICENSE file.
 
 #include "src/v8.h"
-#include "src/arguments.h"
 
+#include "src/arguments.h"
 #include "src/vm-state-inl.h"
 
 namespace v8 {
diff --git a/src/arguments.h b/src/arguments.h
index 320b6ad..9fb2da3 100644
--- a/src/arguments.h
+++ b/src/arguments.h
@@ -6,6 +6,7 @@
 #define V8_ARGUMENTS_H_
 
 #include "src/allocation.h"
+#include "src/isolate.h"
 
 namespace v8 {
 namespace internal {
@@ -31,7 +32,7 @@
       : length_(length), arguments_(arguments) { }
 
   Object*& operator[] (int index) {
-    ASSERT(0 <= index && index < length_);
+    DCHECK(0 <= index && index < length_);
     return *(reinterpret_cast<Object**>(reinterpret_cast<intptr_t>(arguments_) -
                                         index * kPointerSize));
   }
@@ -67,13 +68,13 @@
 // They are used to generate the Call() functions below
 // These aren't included in the list as they have duplicate signatures
 // F(NamedPropertyEnumeratorCallback, ...)
-// F(NamedPropertyGetterCallback, ...)
 
 #define FOR_EACH_CALLBACK_TABLE_MAPPING_0(F) \
   F(IndexedPropertyEnumeratorCallback, v8::Array) \
 
 #define FOR_EACH_CALLBACK_TABLE_MAPPING_1(F) \
-  F(AccessorGetterCallback, v8::Value, v8::Local<v8::String>) \
+  F(NamedPropertyGetterCallback, v8::Value, v8::Local<v8::String>) \
+  F(AccessorNameGetterCallback, v8::Value, v8::Local<v8::Name>) \
   F(NamedPropertyQueryCallback, \
     v8::Integer, \
     v8::Local<v8::String>) \
@@ -101,9 +102,9 @@
     v8::Local<v8::Value>) \
 
 #define FOR_EACH_CALLBACK_TABLE_MAPPING_2_VOID_RETURN(F) \
-  F(AccessorSetterCallback, \
+  F(AccessorNameSetterCallback, \
     void, \
-    v8::Local<v8::String>, \
+    v8::Local<v8::Name>, \
     v8::Local<v8::Value>) \
 
 
@@ -175,8 +176,8 @@
     values[T::kReturnValueDefaultValueIndex] =
         isolate->heap()->the_hole_value();
     values[T::kReturnValueIndex] = isolate->heap()->the_hole_value();
-    ASSERT(values[T::kHolderIndex]->IsHeapObject());
-    ASSERT(values[T::kIsolateIndex]->IsSmi());
+    DCHECK(values[T::kHolderIndex]->IsHeapObject());
+    DCHECK(values[T::kIsolateIndex]->IsSmi());
   }
 
   /*
@@ -247,9 +248,9 @@
     values[T::kReturnValueDefaultValueIndex] =
         isolate->heap()->the_hole_value();
     values[T::kReturnValueIndex] = isolate->heap()->the_hole_value();
-    ASSERT(values[T::kCalleeIndex]->IsJSFunction());
-    ASSERT(values[T::kHolderIndex]->IsHeapObject());
-    ASSERT(values[T::kIsolateIndex]->IsSmi());
+    DCHECK(values[T::kCalleeIndex]->IsJSFunction());
+    DCHECK(values[T::kHolderIndex]->IsHeapObject());
+    DCHECK(values[T::kIsolateIndex]->IsSmi());
   }
 
   /*
diff --git a/src/arm/assembler-arm-inl.h b/src/arm/assembler-arm-inl.h
index d09e700..8b5c4b8 100644
--- a/src/arm/assembler-arm-inl.h
+++ b/src/arm/assembler-arm-inl.h
@@ -39,7 +39,7 @@
 
 #include "src/arm/assembler-arm.h"
 
-#include "src/cpu.h"
+#include "src/assembler.h"
 #include "src/debug.h"
 
 
@@ -71,8 +71,8 @@
 
 
 int DwVfpRegister::ToAllocationIndex(DwVfpRegister reg) {
-  ASSERT(!reg.is(kDoubleRegZero));
-  ASSERT(!reg.is(kScratchDoubleReg));
+  DCHECK(!reg.is(kDoubleRegZero));
+  DCHECK(!reg.is(kScratchDoubleReg));
   if (reg.code() > kDoubleRegZero.code()) {
     return reg.code() - kNumReservedRegisters;
   }
@@ -81,8 +81,8 @@
 
 
 DwVfpRegister DwVfpRegister::FromAllocationIndex(int index) {
-  ASSERT(index >= 0 && index < NumAllocatableRegisters());
-  ASSERT(kScratchDoubleReg.code() - kDoubleRegZero.code() ==
+  DCHECK(index >= 0 && index < NumAllocatableRegisters());
+  DCHECK(kScratchDoubleReg.code() - kDoubleRegZero.code() ==
          kNumReservedRegisters - 1);
   if (index >= kDoubleRegZero.code()) {
     return from_code(index + kNumReservedRegisters);
@@ -103,13 +103,13 @@
 
 
 Address RelocInfo::target_address() {
-  ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
+  DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
   return Assembler::target_address_at(pc_, host_);
 }
 
 
 Address RelocInfo::target_address_address() {
-  ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
+  DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
                               || rmode_ == EMBEDDED_OBJECT
                               || rmode_ == EXTERNAL_REFERENCE);
   if (FLAG_enable_ool_constant_pool ||
@@ -118,22 +118,15 @@
     // serializerer and expects the address to reside within the code object.
     return reinterpret_cast<Address>(pc_);
   } else {
-    ASSERT(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc_)));
-    return Assembler::target_pointer_address_at(pc_);
+    DCHECK(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc_)));
+    return constant_pool_entry_address();
   }
 }
 
 
 Address RelocInfo::constant_pool_entry_address() {
-  ASSERT(IsInConstantPool());
-  if (FLAG_enable_ool_constant_pool) {
-    ASSERT(Assembler::IsLdrPpImmediateOffset(Memory::int32_at(pc_)));
-    return Assembler::target_constant_pool_address_at(pc_,
-                                                      host_->constant_pool());
-  } else {
-    ASSERT(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc_)));
-    return Assembler::target_pointer_address_at(pc_);
-  }
+  DCHECK(IsInConstantPool());
+  return Assembler::constant_pool_entry_address(pc_, host_->constant_pool());
 }
 
 
@@ -145,7 +138,7 @@
 void RelocInfo::set_target_address(Address target,
                                    WriteBarrierMode write_barrier_mode,
                                    ICacheFlushMode icache_flush_mode) {
-  ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
+  DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
   Assembler::set_target_address_at(pc_, host_, target, icache_flush_mode);
   if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
       host() != NULL && IsCodeTarget(rmode_)) {
@@ -157,13 +150,13 @@
 
 
 Object* RelocInfo::target_object() {
-  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+  DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
   return reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_));
 }
 
 
 Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
-  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+  DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
   return Handle<Object>(reinterpret_cast<Object**>(
       Assembler::target_address_at(pc_, host_)));
 }
@@ -172,8 +165,7 @@
 void RelocInfo::set_target_object(Object* target,
                                   WriteBarrierMode write_barrier_mode,
                                   ICacheFlushMode icache_flush_mode) {
-  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
-  ASSERT(!target->IsConsString());
+  DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
   Assembler::set_target_address_at(pc_, host_,
                                    reinterpret_cast<Address>(target),
                                    icache_flush_mode);
@@ -187,13 +179,13 @@
 
 
 Address RelocInfo::target_reference() {
-  ASSERT(rmode_ == EXTERNAL_REFERENCE);
+  DCHECK(rmode_ == EXTERNAL_REFERENCE);
   return Assembler::target_address_at(pc_, host_);
 }
 
 
 Address RelocInfo::target_runtime_entry(Assembler* origin) {
-  ASSERT(IsRuntimeEntry(rmode_));
+  DCHECK(IsRuntimeEntry(rmode_));
   return target_address();
 }
 
@@ -201,21 +193,21 @@
 void RelocInfo::set_target_runtime_entry(Address target,
                                          WriteBarrierMode write_barrier_mode,
                                          ICacheFlushMode icache_flush_mode) {
-  ASSERT(IsRuntimeEntry(rmode_));
+  DCHECK(IsRuntimeEntry(rmode_));
   if (target_address() != target)
     set_target_address(target, write_barrier_mode, icache_flush_mode);
 }
 
 
 Handle<Cell> RelocInfo::target_cell_handle() {
-  ASSERT(rmode_ == RelocInfo::CELL);
+  DCHECK(rmode_ == RelocInfo::CELL);
   Address address = Memory::Address_at(pc_);
   return Handle<Cell>(reinterpret_cast<Cell**>(address));
 }
 
 
 Cell* RelocInfo::target_cell() {
-  ASSERT(rmode_ == RelocInfo::CELL);
+  DCHECK(rmode_ == RelocInfo::CELL);
   return Cell::FromValueAddress(Memory::Address_at(pc_));
 }
 
@@ -223,7 +215,7 @@
 void RelocInfo::set_target_cell(Cell* cell,
                                 WriteBarrierMode write_barrier_mode,
                                 ICacheFlushMode icache_flush_mode) {
-  ASSERT(rmode_ == RelocInfo::CELL);
+  DCHECK(rmode_ == RelocInfo::CELL);
   Address address = cell->address() + Cell::kValueOffset;
   Memory::Address_at(pc_) = address;
   if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
@@ -245,7 +237,7 @@
 
 
 Code* RelocInfo::code_age_stub() {
-  ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+  DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
   return Code::GetCodeFromTargetAddress(
       Memory::Address_at(pc_ +
                          (kNoCodeAgeSequenceLength - Assembler::kInstrSize)));
@@ -254,7 +246,7 @@
 
 void RelocInfo::set_code_age_stub(Code* stub,
                                   ICacheFlushMode icache_flush_mode) {
-  ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+  DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
   Memory::Address_at(pc_ +
                      (kNoCodeAgeSequenceLength - Assembler::kInstrSize)) =
       stub->instruction_start();
@@ -264,14 +256,14 @@
 Address RelocInfo::call_address() {
   // The 2 instructions offset assumes patched debug break slot or return
   // sequence.
-  ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+  DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
          (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
   return Memory::Address_at(pc_ + 2 * Assembler::kInstrSize);
 }
 
 
 void RelocInfo::set_call_address(Address target) {
-  ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+  DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
          (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
   Memory::Address_at(pc_ + 2 * Assembler::kInstrSize) = target;
   if (host() != NULL) {
@@ -293,14 +285,14 @@
 
 
 Object** RelocInfo::call_object_address() {
-  ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+  DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
          (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
   return reinterpret_cast<Object**>(pc_ + 2 * Assembler::kInstrSize);
 }
 
 
 void RelocInfo::WipeOut() {
-  ASSERT(IsEmbeddedObject(rmode_) ||
+  DCHECK(IsEmbeddedObject(rmode_) ||
          IsCodeTarget(rmode_) ||
          IsRuntimeEntry(rmode_) ||
          IsExternalReference(rmode_));
@@ -314,8 +306,8 @@
   // A patched return sequence is:
   //  ldr ip, [pc, #0]
   //  blx ip
-  return ((current_instr & kLdrPCMask) == kLdrPCPattern)
-          && ((next_instr & kBlxRegMask) == kBlxRegPattern);
+  return Assembler::IsLdrPcImmediateOffset(current_instr) &&
+         Assembler::IsBlxReg(next_instr);
 }
 
 
@@ -428,52 +420,38 @@
 }
 
 
-Address Assembler::target_pointer_address_at(Address pc) {
-  Instr instr = Memory::int32_at(pc);
-  return pc + GetLdrRegisterImmediateOffset(instr) + kPcLoadDelta;
-}
-
-
-Address Assembler::target_constant_pool_address_at(
-    Address pc, ConstantPoolArray* constant_pool) {
-  ASSERT(constant_pool != NULL);
-  ASSERT(IsLdrPpImmediateOffset(Memory::int32_at(pc)));
-  Instr instr = Memory::int32_at(pc);
-  return reinterpret_cast<Address>(constant_pool) +
-      GetLdrRegisterImmediateOffset(instr);
-}
-
-
-Address Assembler::target_address_at(Address pc,
-                                     ConstantPoolArray* constant_pool) {
-  if (IsMovW(Memory::int32_at(pc))) {
-    ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
-    Instruction* instr = Instruction::At(pc);
-    Instruction* next_instr = Instruction::At(pc + kInstrSize);
-    return reinterpret_cast<Address>(
-        (next_instr->ImmedMovwMovtValue() << 16) |
-        instr->ImmedMovwMovtValue());
-  } else if (FLAG_enable_ool_constant_pool) {
-    ASSERT(IsLdrPpImmediateOffset(Memory::int32_at(pc)));
-    return Memory::Address_at(
-        target_constant_pool_address_at(pc, constant_pool));
-  } else {
-    ASSERT(IsLdrPcImmediateOffset(Memory::int32_at(pc)));
-    return Memory::Address_at(target_pointer_address_at(pc));
-  }
-}
-
-
 Address Assembler::target_address_from_return_address(Address pc) {
   // Returns the address of the call target from the return address that will
   // be returned to after a call.
-  // Call sequence on V7 or later is :
+  // Call sequence on V7 or later is:
   //  movw  ip, #... @ call address low 16
   //  movt  ip, #... @ call address high 16
   //  blx   ip
   //                      @ return address
-  // Or pre-V7 or cases that need frequent patching:
-  //  ldr   ip, [pc, #...] @ call address
+  // For V6 when the constant pool is unavailable, it is:
+  //  mov  ip, #...     @ call address low 8
+  //  orr  ip, ip, #... @ call address 2nd 8
+  //  orr  ip, ip, #... @ call address 3rd 8
+  //  orr  ip, ip, #... @ call address high 8
+  //  blx   ip
+  //                      @ return address
+  // In cases that need frequent patching, the address is in the
+  // constant pool.  It could be a small constant pool load:
+  //  ldr   ip, [pc / pp, #...] @ call address
+  //  blx   ip
+  //                      @ return address
+  // Or an extended constant pool load (ARMv7):
+  //  movw  ip, #...
+  //  movt  ip, #...
+  //  ldr   ip, [pc, ip]  @ call address
+  //  blx   ip
+  //                      @ return address
+  // Or an extended constant pool load (ARMv6):
+  //  mov  ip, #...
+  //  orr  ip, ip, #...
+  //  orr  ip, ip, #...
+  //  orr  ip, ip, #...
+  //  ldr   ip, [pc, ip]  @ call address
   //  blx   ip
   //                      @ return address
   Address candidate = pc - 2 * Assembler::kInstrSize;
@@ -481,22 +459,61 @@
   if (IsLdrPcImmediateOffset(candidate_instr) |
       IsLdrPpImmediateOffset(candidate_instr)) {
     return candidate;
+  } else {
+    if (IsLdrPpRegOffset(candidate_instr)) {
+      candidate -= Assembler::kInstrSize;
+    }
+    if (CpuFeatures::IsSupported(ARMv7)) {
+      candidate -= 1 * Assembler::kInstrSize;
+      DCHECK(IsMovW(Memory::int32_at(candidate)) &&
+             IsMovT(Memory::int32_at(candidate + Assembler::kInstrSize)));
+    } else {
+      candidate -= 3 * Assembler::kInstrSize;
+      DCHECK(
+          IsMovImmed(Memory::int32_at(candidate)) &&
+          IsOrrImmed(Memory::int32_at(candidate + Assembler::kInstrSize)) &&
+          IsOrrImmed(Memory::int32_at(candidate + 2 * Assembler::kInstrSize)) &&
+          IsOrrImmed(Memory::int32_at(candidate + 3 * Assembler::kInstrSize)));
+    }
+    return candidate;
   }
-  candidate = pc - 3 * Assembler::kInstrSize;
-  ASSERT(IsMovW(Memory::int32_at(candidate)) &&
-         IsMovT(Memory::int32_at(candidate + kInstrSize)));
-  return candidate;
+}
+
+
+Address Assembler::break_address_from_return_address(Address pc) {
+  return pc - Assembler::kPatchDebugBreakSlotReturnOffset;
 }
 
 
 Address Assembler::return_address_from_call_start(Address pc) {
   if (IsLdrPcImmediateOffset(Memory::int32_at(pc)) |
       IsLdrPpImmediateOffset(Memory::int32_at(pc))) {
+    // Load from constant pool, small section.
     return pc + kInstrSize * 2;
   } else {
-    ASSERT(IsMovW(Memory::int32_at(pc)));
-    ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
-    return pc + kInstrSize * 3;
+    if (CpuFeatures::IsSupported(ARMv7)) {
+      DCHECK(IsMovW(Memory::int32_at(pc)));
+      DCHECK(IsMovT(Memory::int32_at(pc + kInstrSize)));
+      if (IsLdrPpRegOffset(Memory::int32_at(pc + 2 * kInstrSize))) {
+        // Load from constant pool, extended section.
+        return pc + kInstrSize * 4;
+      } else {
+        // A movw / movt load immediate.
+        return pc + kInstrSize * 3;
+      }
+    } else {
+      DCHECK(IsMovImmed(Memory::int32_at(pc)));
+      DCHECK(IsOrrImmed(Memory::int32_at(pc + kInstrSize)));
+      DCHECK(IsOrrImmed(Memory::int32_at(pc + 2 * kInstrSize)));
+      DCHECK(IsOrrImmed(Memory::int32_at(pc + 3 * kInstrSize)));
+      if (IsLdrPpRegOffset(Memory::int32_at(pc + 4 * kInstrSize))) {
+        // Load from constant pool, extended section.
+        return pc + kInstrSize * 6;
+      } else {
+        // A mov / orr load immediate.
+        return pc + kInstrSize * 5;
+      }
+    }
   }
 }
 
@@ -511,15 +528,89 @@
 }
 
 
-static Instr EncodeMovwImmediate(uint32_t immediate) {
-  ASSERT(immediate < 0x10000);
-  return ((immediate & 0xf000) << 4) | (immediate & 0xfff);
+bool Assembler::is_constant_pool_load(Address pc) {
+  if (CpuFeatures::IsSupported(ARMv7)) {
+    return !Assembler::IsMovW(Memory::int32_at(pc)) ||
+           (FLAG_enable_ool_constant_pool &&
+            Assembler::IsLdrPpRegOffset(
+                Memory::int32_at(pc + 2 * Assembler::kInstrSize)));
+  } else {
+    return !Assembler::IsMovImmed(Memory::int32_at(pc)) ||
+           (FLAG_enable_ool_constant_pool &&
+            Assembler::IsLdrPpRegOffset(
+                Memory::int32_at(pc + 4 * Assembler::kInstrSize)));
+  }
 }
 
 
-static Instr PatchMovwImmediate(Instr instruction, uint32_t immediate) {
-  instruction &= ~EncodeMovwImmediate(0xffff);
-  return instruction | EncodeMovwImmediate(immediate);
+Address Assembler::constant_pool_entry_address(
+    Address pc, ConstantPoolArray* constant_pool) {
+  if (FLAG_enable_ool_constant_pool) {
+    DCHECK(constant_pool != NULL);
+    int cp_offset;
+    if (!CpuFeatures::IsSupported(ARMv7) && IsMovImmed(Memory::int32_at(pc))) {
+      DCHECK(IsOrrImmed(Memory::int32_at(pc + kInstrSize)) &&
+             IsOrrImmed(Memory::int32_at(pc + 2 * kInstrSize)) &&
+             IsOrrImmed(Memory::int32_at(pc + 3 * kInstrSize)) &&
+             IsLdrPpRegOffset(Memory::int32_at(pc + 4 * kInstrSize)));
+      // This is an extended constant pool lookup (ARMv6).
+      Instr mov_instr = instr_at(pc);
+      Instr orr_instr_1 = instr_at(pc + kInstrSize);
+      Instr orr_instr_2 = instr_at(pc + 2 * kInstrSize);
+      Instr orr_instr_3 = instr_at(pc + 3 * kInstrSize);
+      cp_offset = DecodeShiftImm(mov_instr) | DecodeShiftImm(orr_instr_1) |
+                  DecodeShiftImm(orr_instr_2) | DecodeShiftImm(orr_instr_3);
+    } else if (IsMovW(Memory::int32_at(pc))) {
+      DCHECK(IsMovT(Memory::int32_at(pc + kInstrSize)) &&
+             IsLdrPpRegOffset(Memory::int32_at(pc + 2 * kInstrSize)));
+      // This is an extended constant pool lookup (ARMv7).
+      Instruction* movw_instr = Instruction::At(pc);
+      Instruction* movt_instr = Instruction::At(pc + kInstrSize);
+      cp_offset = (movt_instr->ImmedMovwMovtValue() << 16) |
+                  movw_instr->ImmedMovwMovtValue();
+    } else {
+      // This is a small constant pool lookup.
+      DCHECK(Assembler::IsLdrPpImmediateOffset(Memory::int32_at(pc)));
+      cp_offset = GetLdrRegisterImmediateOffset(Memory::int32_at(pc));
+    }
+    return reinterpret_cast<Address>(constant_pool) + cp_offset;
+  } else {
+    DCHECK(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc)));
+    Instr instr = Memory::int32_at(pc);
+    return pc + GetLdrRegisterImmediateOffset(instr) + kPcLoadDelta;
+  }
+}
+
+
+Address Assembler::target_address_at(Address pc,
+                                     ConstantPoolArray* constant_pool) {
+  if (is_constant_pool_load(pc)) {
+    // This is a constant pool lookup. Return the value in the constant pool.
+    return Memory::Address_at(constant_pool_entry_address(pc, constant_pool));
+  } else if (CpuFeatures::IsSupported(ARMv7)) {
+    // This is an movw / movt immediate load. Return the immediate.
+    DCHECK(IsMovW(Memory::int32_at(pc)) &&
+           IsMovT(Memory::int32_at(pc + kInstrSize)));
+    Instruction* movw_instr = Instruction::At(pc);
+    Instruction* movt_instr = Instruction::At(pc + kInstrSize);
+    return reinterpret_cast<Address>(
+        (movt_instr->ImmedMovwMovtValue() << 16) |
+         movw_instr->ImmedMovwMovtValue());
+  } else {
+    // This is an mov / orr immediate load. Return the immediate.
+    DCHECK(IsMovImmed(Memory::int32_at(pc)) &&
+           IsOrrImmed(Memory::int32_at(pc + kInstrSize)) &&
+           IsOrrImmed(Memory::int32_at(pc + 2 * kInstrSize)) &&
+           IsOrrImmed(Memory::int32_at(pc + 3 * kInstrSize)));
+    Instr mov_instr = instr_at(pc);
+    Instr orr_instr_1 = instr_at(pc + kInstrSize);
+    Instr orr_instr_2 = instr_at(pc + 2 * kInstrSize);
+    Instr orr_instr_3 = instr_at(pc + 3 * kInstrSize);
+    Address ret = reinterpret_cast<Address>(
+        DecodeShiftImm(mov_instr) | DecodeShiftImm(orr_instr_1) |
+        DecodeShiftImm(orr_instr_2) | DecodeShiftImm(orr_instr_3));
+    return ret;
+  }
 }
 
 
@@ -527,32 +618,51 @@
                                       ConstantPoolArray* constant_pool,
                                       Address target,
                                       ICacheFlushMode icache_flush_mode) {
-  if (IsMovW(Memory::int32_at(pc))) {
-    ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
+  if (is_constant_pool_load(pc)) {
+    // This is a constant pool lookup. Update the entry in the constant pool.
+    Memory::Address_at(constant_pool_entry_address(pc, constant_pool)) = target;
+    // Intuitively, we would think it is necessary to always flush the
+    // instruction cache after patching a target address in the code as follows:
+    //   CpuFeatures::FlushICache(pc, sizeof(target));
+    // However, on ARM, no instruction is actually patched in the case
+    // of embedded constants of the form:
+    // ldr   ip, [pp, #...]
+    // since the instruction accessing this address in the constant pool remains
+    // unchanged.
+  } else if (CpuFeatures::IsSupported(ARMv7)) {
+    // This is an movw / movt immediate load. Patch the immediate embedded in
+    // the instructions.
+    DCHECK(IsMovW(Memory::int32_at(pc)));
+    DCHECK(IsMovT(Memory::int32_at(pc + kInstrSize)));
     uint32_t* instr_ptr = reinterpret_cast<uint32_t*>(pc);
     uint32_t immediate = reinterpret_cast<uint32_t>(target);
     instr_ptr[0] = PatchMovwImmediate(instr_ptr[0], immediate & 0xFFFF);
     instr_ptr[1] = PatchMovwImmediate(instr_ptr[1], immediate >> 16);
-    ASSERT(IsMovW(Memory::int32_at(pc)));
-    ASSERT(IsMovT(Memory::int32_at(pc + kInstrSize)));
+    DCHECK(IsMovW(Memory::int32_at(pc)));
+    DCHECK(IsMovT(Memory::int32_at(pc + kInstrSize)));
     if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
-      CPU::FlushICache(pc, 2 * kInstrSize);
+      CpuFeatures::FlushICache(pc, 2 * kInstrSize);
     }
-  } else if (FLAG_enable_ool_constant_pool) {
-    ASSERT(IsLdrPpImmediateOffset(Memory::int32_at(pc)));
-    Memory::Address_at(
-      target_constant_pool_address_at(pc, constant_pool)) = target;
   } else {
-    ASSERT(IsLdrPcImmediateOffset(Memory::int32_at(pc)));
-    Memory::Address_at(target_pointer_address_at(pc)) = target;
-    // Intuitively, we would think it is necessary to always flush the
-    // instruction cache after patching a target address in the code as follows:
-    //   CPU::FlushICache(pc, sizeof(target));
-    // However, on ARM, no instruction is actually patched in the case
-    // of embedded constants of the form:
-    // ldr   ip, [pc, #...]
-    // since the instruction accessing this address in the constant pool remains
-    // unchanged.
+    // This is an mov / orr immediate load. Patch the immediate embedded in
+    // the instructions.
+    DCHECK(IsMovImmed(Memory::int32_at(pc)) &&
+           IsOrrImmed(Memory::int32_at(pc + kInstrSize)) &&
+           IsOrrImmed(Memory::int32_at(pc + 2 * kInstrSize)) &&
+           IsOrrImmed(Memory::int32_at(pc + 3 * kInstrSize)));
+    uint32_t* instr_ptr = reinterpret_cast<uint32_t*>(pc);
+    uint32_t immediate = reinterpret_cast<uint32_t>(target);
+    instr_ptr[0] = PatchShiftImm(instr_ptr[0], immediate & kImm8Mask);
+    instr_ptr[1] = PatchShiftImm(instr_ptr[1], immediate & (kImm8Mask << 8));
+    instr_ptr[2] = PatchShiftImm(instr_ptr[2], immediate & (kImm8Mask << 16));
+    instr_ptr[3] = PatchShiftImm(instr_ptr[3], immediate & (kImm8Mask << 24));
+    DCHECK(IsMovImmed(Memory::int32_at(pc)) &&
+           IsOrrImmed(Memory::int32_at(pc + kInstrSize)) &&
+           IsOrrImmed(Memory::int32_at(pc + 2 * kInstrSize)) &&
+           IsOrrImmed(Memory::int32_at(pc + 3 * kInstrSize)));
+    if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+      CpuFeatures::FlushICache(pc, 4 * kInstrSize);
+    }
   }
 }
 
diff --git a/src/arm/assembler-arm.cc b/src/arm/assembler-arm.cc
index 68738a7..96f28f9 100644
--- a/src/arm/assembler-arm.cc
+++ b/src/arm/assembler-arm.cc
@@ -39,6 +39,8 @@
 #if V8_TARGET_ARCH_ARM
 
 #include "src/arm/assembler-arm-inl.h"
+#include "src/base/bits.h"
+#include "src/base/cpu.h"
 #include "src/macro-assembler.h"
 #include "src/serialize.h"
 
@@ -93,7 +95,7 @@
 
 #else  // __arm__
   // Probe for additional features at runtime.
-  CPU cpu;
+  base::CPU cpu;
   if (FLAG_enable_vfp3 && cpu.has_vfp3()) {
     // This implementation also sets the VFP flags if runtime
     // detection of VFP returns true. VFPv3 implies ARMv7, see ARM DDI
@@ -109,76 +111,74 @@
     if (FLAG_enable_armv7) supported_ |= 1u << ARMv7;
     if (FLAG_enable_unaligned_accesses) supported_ |= 1u << UNALIGNED_ACCESSES;
     // Use movw/movt for QUALCOMM ARMv7 cores.
-    if (FLAG_enable_movw_movt && cpu.implementer() == CPU::QUALCOMM) {
+    if (FLAG_enable_movw_movt && cpu.implementer() == base::CPU::QUALCOMM) {
       supported_ |= 1u << MOVW_MOVT_IMMEDIATE_LOADS;
     }
   }
 
   // ARM Cortex-A9 and Cortex-A5 have 32 byte cachelines.
-  if (cpu.implementer() == CPU::ARM && (cpu.part() == CPU::ARM_CORTEX_A5 ||
-                                        cpu.part() == CPU::ARM_CORTEX_A9)) {
+  if (cpu.implementer() == base::CPU::ARM &&
+      (cpu.part() == base::CPU::ARM_CORTEX_A5 ||
+       cpu.part() == base::CPU::ARM_CORTEX_A9)) {
     cache_line_size_ = 32;
   }
 
   if (FLAG_enable_32dregs && cpu.has_vfp3_d32()) supported_ |= 1u << VFP32DREGS;
 #endif
 
-  ASSERT(!IsSupported(VFP3) || IsSupported(ARMv7));
+  DCHECK(!IsSupported(VFP3) || IsSupported(ARMv7));
 }
 
 
 void CpuFeatures::PrintTarget() {
   const char* arm_arch = NULL;
-  const char* arm_test = "";
+  const char* arm_target_type = "";
+  const char* arm_no_probe = "";
   const char* arm_fpu = "";
   const char* arm_thumb = "";
   const char* arm_float_abi = NULL;
 
+#if !defined __arm__
+  arm_target_type = " simulator";
+#endif
+
+#if defined ARM_TEST_NO_FEATURE_PROBE
+  arm_no_probe = " noprobe";
+#endif
+
 #if defined CAN_USE_ARMV7_INSTRUCTIONS
   arm_arch = "arm v7";
 #else
   arm_arch = "arm v6";
 #endif
 
-#ifdef __arm__
-
-# ifdef ARM_TEST
-  arm_test = " test";
-# endif
-# if defined __ARM_NEON__
+#if defined CAN_USE_NEON
   arm_fpu = " neon";
-# elif defined CAN_USE_VFP3_INSTRUCTIONS
-  arm_fpu = " vfp3";
-# else
-  arm_fpu = " vfp2";
-# endif
-# if (defined __thumb__) || (defined __thumb2__)
-  arm_thumb = " thumb";
-# endif
-  arm_float_abi = OS::ArmUsingHardFloat() ? "hard" : "softfp";
-
-#else  // __arm__
-
-  arm_test = " simulator";
-# if defined CAN_USE_VFP3_INSTRUCTIONS
+#elif defined CAN_USE_VFP3_INSTRUCTIONS
 #  if defined CAN_USE_VFP32DREGS
   arm_fpu = " vfp3";
 #  else
   arm_fpu = " vfp3-d16";
 #  endif
-# else
+#else
   arm_fpu = " vfp2";
-# endif
-# if USE_EABI_HARDFLOAT == 1
+#endif
+
+#ifdef __arm__
+  arm_float_abi = base::OS::ArmUsingHardFloat() ? "hard" : "softfp";
+#elif USE_EABI_HARDFLOAT
   arm_float_abi = "hard";
-# else
+#else
   arm_float_abi = "softfp";
-# endif
+#endif
 
-#endif  // __arm__
+#if defined __arm__ && (defined __thumb__) || (defined __thumb2__)
+  arm_thumb = " thumb";
+#endif
 
-  printf("target%s %s%s%s %s\n",
-         arm_test, arm_arch, arm_fpu, arm_thumb, arm_float_abi);
+  printf("target%s%s %s%s%s %s\n",
+         arm_target_type, arm_no_probe, arm_arch, arm_fpu, arm_thumb,
+         arm_float_abi);
 }
 
 
@@ -194,7 +194,7 @@
     CpuFeatures::IsSupported(UNALIGNED_ACCESSES),
     CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS));
 #ifdef __arm__
-  bool eabi_hardfloat = OS::ArmUsingHardFloat();
+  bool eabi_hardfloat = base::OS::ArmUsingHardFloat();
 #elif USE_EABI_HARDFLOAT
   bool eabi_hardfloat = true;
 #else
@@ -208,8 +208,8 @@
 // Implementation of DwVfpRegister
 
 const char* DwVfpRegister::AllocationIndexToString(int index) {
-  ASSERT(index >= 0 && index < NumAllocatableRegisters());
-  ASSERT(kScratchDoubleReg.code() - kDoubleRegZero.code() ==
+  DCHECK(index >= 0 && index < NumAllocatableRegisters());
+  DCHECK(kScratchDoubleReg.code() - kDoubleRegZero.code() ==
          kNumReservedRegisters - 1);
   if (index >= kDoubleRegZero.code()) index += kNumReservedRegisters;
   return VFPRegisters::Name(index, true);
@@ -232,11 +232,7 @@
 
 
 bool RelocInfo::IsInConstantPool() {
-  if (FLAG_enable_ool_constant_pool) {
-    return Assembler::IsLdrPpImmediateOffset(Memory::int32_at(pc_));
-  } else {
-    return Assembler::IsLdrPcImmediateOffset(Memory::int32_at(pc_));
-  }
+  return Assembler::is_constant_pool_load(pc_);
 }
 
 
@@ -249,7 +245,7 @@
   }
 
   // Indicate that code has changed.
-  CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
+  CpuFeatures::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
 }
 
 
@@ -271,7 +267,7 @@
   // Verify all Objects referred by code are NOT in new space.
   Object* obj = *handle;
   if (obj->IsHeapObject()) {
-    ASSERT(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
+    DCHECK(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
     imm32_ = reinterpret_cast<intptr_t>(handle.location());
     rmode_ = RelocInfo::EMBEDDED_OBJECT;
   } else {
@@ -283,7 +279,7 @@
 
 
 Operand::Operand(Register rm, ShiftOp shift_op, int shift_imm) {
-  ASSERT(is_uint5(shift_imm));
+  DCHECK(is_uint5(shift_imm));
 
   rm_ = rm;
   rs_ = no_reg;
@@ -296,7 +292,7 @@
     shift_op = LSL;
   } else if (shift_op == RRX) {
     // encoded as ROR with shift_imm == 0
-    ASSERT(shift_imm == 0);
+    DCHECK(shift_imm == 0);
     shift_op_ = ROR;
     shift_imm_ = 0;
   }
@@ -304,7 +300,7 @@
 
 
 Operand::Operand(Register rm, ShiftOp shift_op, Register rs) {
-  ASSERT(shift_op != RRX);
+  DCHECK(shift_op != RRX);
   rm_ = rm;
   rs_ = no_reg;
   shift_op_ = shift_op;
@@ -331,7 +327,7 @@
 
 MemOperand::MemOperand(Register rn, Register rm,
                        ShiftOp shift_op, int shift_imm, AddrMode am) {
-  ASSERT(is_uint5(shift_imm));
+  DCHECK(is_uint5(shift_imm));
   rn_ = rn;
   rm_ = rm;
   shift_op_ = shift_op;
@@ -341,7 +337,7 @@
 
 
 NeonMemOperand::NeonMemOperand(Register rn, AddrMode am, int align) {
-  ASSERT((am == Offset) || (am == PostIndex));
+  DCHECK((am == Offset) || (am == PostIndex));
   rn_ = rn;
   rm_ = (am == Offset) ? pc : sp;
   SetAlignment(align);
@@ -403,10 +399,6 @@
 // -----------------------------------------------------------------------------
 // Specific instructions, constants, and masks.
 
-// add(sp, sp, 4) instruction (aka Pop())
-const Instr kPopInstruction =
-    al | PostIndex | 4 | LeaveCC | I | kRegister_sp_Code * B16 |
-        kRegister_sp_Code * B12;
 // str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
 // register r is not encoded.
 const Instr kPushRegPattern =
@@ -415,14 +407,15 @@
 // register r is not encoded.
 const Instr kPopRegPattern =
     al | B26 | L | 4 | PostIndex | kRegister_sp_Code * B16;
-// mov lr, pc
-const Instr kMovLrPc = al | MOV | kRegister_pc_Code | kRegister_lr_Code * B12;
 // ldr rd, [pc, #offset]
-const Instr kLdrPCMask = 15 * B24 | 7 * B20 | 15 * B16;
-const Instr kLdrPCPattern = 5 * B24 | L | kRegister_pc_Code * B16;
+const Instr kLdrPCImmedMask = 15 * B24 | 7 * B20 | 15 * B16;
+const Instr kLdrPCImmedPattern = 5 * B24 | L | kRegister_pc_Code * B16;
 // ldr rd, [pp, #offset]
-const Instr kLdrPpMask = 15 * B24 | 7 * B20 | 15 * B16;
-const Instr kLdrPpPattern = 5 * B24 | L | kRegister_r8_Code * B16;
+const Instr kLdrPpImmedMask = 15 * B24 | 7 * B20 | 15 * B16;
+const Instr kLdrPpImmedPattern = 5 * B24 | L | kRegister_r8_Code * B16;
+// ldr rd, [pp, rn]
+const Instr kLdrPpRegMask = 15 * B24 | 7 * B20 | 15 * B16;
+const Instr kLdrPpRegPattern = 7 * B24 | L | kRegister_r8_Code * B16;
 // vldr dd, [pc, #offset]
 const Instr kVldrDPCMask = 15 * B24 | 3 * B20 | 15 * B16 | 15 * B8;
 const Instr kVldrDPCPattern = 13 * B24 | L | kRegister_pc_Code * B16 | 11 * B8;
@@ -440,9 +433,13 @@
 const Instr kMovMvnFlip = B22;
 const Instr kMovLeaveCCMask = 0xdff * B16;
 const Instr kMovLeaveCCPattern = 0x1a0 * B16;
-const Instr kMovwMask = 0xff * B20;
 const Instr kMovwPattern = 0x30 * B20;
+const Instr kMovtPattern = 0x34 * B20;
 const Instr kMovwLeaveCCFlip = 0x5 * B21;
+const Instr kMovImmedMask = 0x7f * B21;
+const Instr kMovImmedPattern = 0x1d * B21;
+const Instr kOrrImmedMask = 0x7f * B21;
+const Instr kOrrImmedPattern = 0x1c * B21;
 const Instr kCmpCmnMask = 0xdd * B20 | 0xf * B12;
 const Instr kCmpCmnPattern = 0x15 * B20;
 const Instr kCmpCmnFlip = B21;
@@ -459,8 +456,6 @@
 const Instr kStrRegFpNegOffsetPattern =
     al | B26 | NegOffset | kRegister_fp_Code * B16;
 const Instr kLdrStrInstrTypeMask = 0xffff0000;
-const Instr kLdrStrInstrArgumentMask = 0x0000ffff;
-const Instr kLdrStrOffsetMask = 0x00000fff;
 
 
 Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
@@ -478,13 +473,12 @@
   first_const_pool_64_use_ = -1;
   last_bound_pos_ = 0;
   constant_pool_available_ = !FLAG_enable_ool_constant_pool;
-  constant_pool_full_ = false;
   ClearRecordedAstId();
 }
 
 
 Assembler::~Assembler() {
-  ASSERT(const_pool_blocked_nesting_ == 0);
+  DCHECK(const_pool_blocked_nesting_ == 0);
 }
 
 
@@ -492,8 +486,8 @@
   if (!FLAG_enable_ool_constant_pool) {
     // Emit constant pool if necessary.
     CheckConstPool(true, false);
-    ASSERT(num_pending_32_bit_reloc_info_ == 0);
-    ASSERT(num_pending_64_bit_reloc_info_ == 0);
+    DCHECK(num_pending_32_bit_reloc_info_ == 0);
+    DCHECK(num_pending_64_bit_reloc_info_ == 0);
   }
   // Set up code descriptor.
   desc->buffer = buffer_;
@@ -505,7 +499,7 @@
 
 
 void Assembler::Align(int m) {
-  ASSERT(m >= 4 && IsPowerOf2(m));
+  DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
   while ((pc_offset() & (m - 1)) != 0) {
     nop();
   }
@@ -529,7 +523,7 @@
 
 
 int Assembler::GetBranchOffset(Instr instr) {
-  ASSERT(IsBranch(instr));
+  DCHECK(IsBranch(instr));
   // Take the jump offset in the lower 24 bits, sign extend it and multiply it
   // with 4 to get the offset in bytes.
   return ((instr & kImm24Mask) << 8) >> 6;
@@ -547,7 +541,7 @@
 
 
 int Assembler::GetLdrRegisterImmediateOffset(Instr instr) {
-  ASSERT(IsLdrRegisterImmediate(instr));
+  DCHECK(IsLdrRegisterImmediate(instr));
   bool positive = (instr & B23) == B23;
   int offset = instr & kOff12Mask;  // Zero extended offset.
   return positive ? offset : -offset;
@@ -555,7 +549,7 @@
 
 
 int Assembler::GetVldrDRegisterImmediateOffset(Instr instr) {
-  ASSERT(IsVldrDRegisterImmediate(instr));
+  DCHECK(IsVldrDRegisterImmediate(instr));
   bool positive = (instr & B23) == B23;
   int offset = instr & kOff8Mask;  // Zero extended offset.
   offset <<= 2;
@@ -564,10 +558,10 @@
 
 
 Instr Assembler::SetLdrRegisterImmediateOffset(Instr instr, int offset) {
-  ASSERT(IsLdrRegisterImmediate(instr));
+  DCHECK(IsLdrRegisterImmediate(instr));
   bool positive = offset >= 0;
   if (!positive) offset = -offset;
-  ASSERT(is_uint12(offset));
+  DCHECK(is_uint12(offset));
   // Set bit indicating whether the offset should be added.
   instr = (instr & ~B23) | (positive ? B23 : 0);
   // Set the actual offset.
@@ -576,11 +570,11 @@
 
 
 Instr Assembler::SetVldrDRegisterImmediateOffset(Instr instr, int offset) {
-  ASSERT(IsVldrDRegisterImmediate(instr));
-  ASSERT((offset & ~3) == offset);  // Must be 64-bit aligned.
+  DCHECK(IsVldrDRegisterImmediate(instr));
+  DCHECK((offset & ~3) == offset);  // Must be 64-bit aligned.
   bool positive = offset >= 0;
   if (!positive) offset = -offset;
-  ASSERT(is_uint10(offset));
+  DCHECK(is_uint10(offset));
   // Set bit indicating whether the offset should be added.
   instr = (instr & ~B23) | (positive ? B23 : 0);
   // Set the actual offset. Its bottom 2 bits are zero.
@@ -594,10 +588,10 @@
 
 
 Instr Assembler::SetStrRegisterImmediateOffset(Instr instr, int offset) {
-  ASSERT(IsStrRegisterImmediate(instr));
+  DCHECK(IsStrRegisterImmediate(instr));
   bool positive = offset >= 0;
   if (!positive) offset = -offset;
-  ASSERT(is_uint12(offset));
+  DCHECK(is_uint12(offset));
   // Set bit indicating whether the offset should be added.
   instr = (instr & ~B23) | (positive ? B23 : 0);
   // Set the actual offset.
@@ -611,9 +605,9 @@
 
 
 Instr Assembler::SetAddRegisterImmediateOffset(Instr instr, int offset) {
-  ASSERT(IsAddRegisterImmediate(instr));
-  ASSERT(offset >= 0);
-  ASSERT(is_uint12(offset));
+  DCHECK(IsAddRegisterImmediate(instr));
+  DCHECK(offset >= 0);
+  DCHECK(is_uint12(offset));
   // Set the offset.
   return (instr & ~kOff12Mask) | offset;
 }
@@ -640,6 +634,24 @@
 }
 
 
+Instr Assembler::GetConsantPoolLoadPattern() {
+  if (FLAG_enable_ool_constant_pool) {
+    return kLdrPpImmedPattern;
+  } else {
+    return kLdrPCImmedPattern;
+  }
+}
+
+
+Instr Assembler::GetConsantPoolLoadMask() {
+  if (FLAG_enable_ool_constant_pool) {
+    return kLdrPpImmedMask;
+  } else {
+    return kLdrPCImmedMask;
+  }
+}
+
+
 bool Assembler::IsPush(Instr instr) {
   return ((instr & ~kRdMask) == kPushRegPattern);
 }
@@ -673,17 +685,27 @@
 bool Assembler::IsLdrPcImmediateOffset(Instr instr) {
   // Check the instruction is indeed a
   // ldr<cond> <Rd>, [pc +/- offset_12].
-  return (instr & kLdrPCMask) == kLdrPCPattern;
+  return (instr & kLdrPCImmedMask) == kLdrPCImmedPattern;
 }
 
 
 bool Assembler::IsLdrPpImmediateOffset(Instr instr) {
   // Check the instruction is indeed a
   // ldr<cond> <Rd>, [pp +/- offset_12].
-  return (instr & kLdrPpMask) == kLdrPpPattern;
+  return (instr & kLdrPpImmedMask) == kLdrPpImmedPattern;
 }
 
 
+bool Assembler::IsLdrPpRegOffset(Instr instr) {
+  // Check the instruction is indeed a
+  // ldr<cond> <Rd>, [pp, +/- <Rm>].
+  return (instr & kLdrPpRegMask) == kLdrPpRegPattern;
+}
+
+
+Instr Assembler::GetLdrPpRegOffsetPattern() { return kLdrPpRegPattern; }
+
+
 bool Assembler::IsVldrDPcImmediateOffset(Instr instr) {
   // Check the instruction is indeed a
   // vldr<cond> <Dd>, [pc +/- offset_10].
@@ -698,6 +720,20 @@
 }
 
 
+bool Assembler::IsBlxReg(Instr instr) {
+  // Check the instruction is indeed a
+  // blxcc <Rm>
+  return (instr & kBlxRegMask) == kBlxRegPattern;
+}
+
+
+bool Assembler::IsBlxIp(Instr instr) {
+  // Check the instruction is indeed a
+  // blx ip
+  return instr == kBlxIp;
+}
+
+
 bool Assembler::IsTstImmediate(Instr instr) {
   return (instr & (B27 | B26 | I | kOpCodeMask | S | kRdMask)) ==
       (I | TST | S);
@@ -717,13 +753,13 @@
 
 
 Register Assembler::GetCmpImmediateRegister(Instr instr) {
-  ASSERT(IsCmpImmediate(instr));
+  DCHECK(IsCmpImmediate(instr));
   return GetRn(instr);
 }
 
 
 int Assembler::GetCmpImmediateRawImmediate(Instr instr) {
-  ASSERT(IsCmpImmediate(instr));
+  DCHECK(IsCmpImmediate(instr));
   return instr & kOff12Mask;
 }
 
@@ -752,7 +788,7 @@
     // Emitted link to a label, not part of a branch.
     return instr;
   }
-  ASSERT((instr & 7*B25) == 5*B25);  // b, bl, or blx imm24
+  DCHECK((instr & 7*B25) == 5*B25);  // b, bl, or blx imm24
   int imm26 = ((instr & kImm24Mask) << 8) >> 6;
   if ((Instruction::ConditionField(instr) == kSpecialCondition) &&
       ((instr & B24) != 0)) {
@@ -766,7 +802,7 @@
 void Assembler::target_at_put(int pos, int target_pos) {
   Instr instr = instr_at(pos);
   if (is_uint24(instr)) {
-    ASSERT(target_pos == pos || target_pos >= 0);
+    DCHECK(target_pos == pos || target_pos >= 0);
     // Emitted link to a label, not part of a branch.
     // Load the position of the label relative to the generated code object
     // pointer in a register.
@@ -783,9 +819,9 @@
     // We extract the destination register from the emitted nop instruction.
     Register dst = Register::from_code(
         Instruction::RmValue(instr_at(pos + kInstrSize)));
-    ASSERT(IsNop(instr_at(pos + kInstrSize), dst.code()));
+    DCHECK(IsNop(instr_at(pos + kInstrSize), dst.code()));
     uint32_t target24 = target_pos + (Code::kHeaderSize - kHeapObjectTag);
-    ASSERT(is_uint24(target24));
+    DCHECK(is_uint24(target24));
     if (is_uint8(target24)) {
       // If the target fits in a byte then only patch with a mov
       // instruction.
@@ -834,17 +870,17 @@
     return;
   }
   int imm26 = target_pos - (pos + kPcLoadDelta);
-  ASSERT((instr & 7*B25) == 5*B25);  // b, bl, or blx imm24
+  DCHECK((instr & 7*B25) == 5*B25);  // b, bl, or blx imm24
   if (Instruction::ConditionField(instr) == kSpecialCondition) {
     // blx uses bit 24 to encode bit 2 of imm26
-    ASSERT((imm26 & 1) == 0);
+    DCHECK((imm26 & 1) == 0);
     instr = (instr & ~(B24 | kImm24Mask)) | ((imm26 & 2) >> 1)*B24;
   } else {
-    ASSERT((imm26 & 3) == 0);
+    DCHECK((imm26 & 3) == 0);
     instr &= ~kImm24Mask;
   }
   int imm24 = imm26 >> 2;
-  ASSERT(is_int24(imm24));
+  DCHECK(is_int24(imm24));
   instr_at_put(pos, instr | (imm24 & kImm24Mask));
 }
 
@@ -863,7 +899,7 @@
       if ((instr & ~kImm24Mask) == 0) {
         PrintF("value\n");
       } else {
-        ASSERT((instr & 7*B25) == 5*B25);  // b, bl, or blx
+        DCHECK((instr & 7*B25) == 5*B25);  // b, bl, or blx
         Condition cond = Instruction::ConditionField(instr);
         const char* b;
         const char* c;
@@ -908,7 +944,7 @@
 
 
 void Assembler::bind_to(Label* L, int pos) {
-  ASSERT(0 <= pos && pos <= pc_offset());  // must have a valid binding position
+  DCHECK(0 <= pos && pos <= pc_offset());  // must have a valid binding position
   while (L->is_linked()) {
     int fixup_pos = L->pos();
     next(L);  // call next before overwriting link with target at fixup_pos
@@ -924,20 +960,20 @@
 
 
 void Assembler::bind(Label* L) {
-  ASSERT(!L->is_bound());  // label can only be bound once
+  DCHECK(!L->is_bound());  // label can only be bound once
   bind_to(L, pc_offset());
 }
 
 
 void Assembler::next(Label* L) {
-  ASSERT(L->is_linked());
+  DCHECK(L->is_linked());
   int link = target_at(L->pos());
   if (link == L->pos()) {
     // Branch target points to the same instuction. This is the end of the link
     // chain.
     L->Unuse();
   } else {
-    ASSERT(link >= 0);
+    DCHECK(link >= 0);
     L->link_to(link);
   }
 }
@@ -971,7 +1007,7 @@
         if (CpuFeatures::IsSupported(ARMv7)) {
           if (imm32 < 0x10000) {
             *instr ^= kMovwLeaveCCFlip;
-            *instr |= EncodeMovwImmediate(imm32);
+            *instr |= Assembler::EncodeMovwImmediate(imm32);
             *rotate_imm = *immed_8 = 0;  // Not used for movw.
             return true;
           }
@@ -1020,10 +1056,7 @@
 
 static bool use_mov_immediate_load(const Operand& x,
                                    const Assembler* assembler) {
-  if (assembler != NULL && !assembler->can_use_constant_pool()) {
-    // If there is no constant pool available, we must use an mov immediate.
-    // TODO(rmcilroy): enable ARMv6 support.
-    ASSERT(CpuFeatures::IsSupported(ARMv7));
+  if (assembler != NULL && !assembler->is_constant_pool_available()) {
     return true;
   } else if (CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS) &&
              (assembler == NULL || !assembler->predictable_code_size())) {
@@ -1039,28 +1072,38 @@
 }
 
 
-bool Operand::is_single_instruction(const Assembler* assembler,
-                                    Instr instr) const {
-  if (rm_.is_valid()) return true;
+int Operand::instructions_required(const Assembler* assembler,
+                                   Instr instr) const {
+  if (rm_.is_valid()) return 1;
   uint32_t dummy1, dummy2;
   if (must_output_reloc_info(assembler) ||
       !fits_shifter(imm32_, &dummy1, &dummy2, &instr)) {
     // The immediate operand cannot be encoded as a shifter operand, or use of
-    // constant pool is required. For a mov instruction not setting the
-    // condition code additional instruction conventions can be used.
-    if ((instr & ~kCondMask) == 13*B21) {  // mov, S not set
-      return !use_mov_immediate_load(*this, assembler);
+    // constant pool is required.  First account for the instructions required
+    // for the constant pool or immediate load
+    int instructions;
+    if (use_mov_immediate_load(*this, assembler)) {
+      // A movw / movt or mov / orr immediate load.
+      instructions = CpuFeatures::IsSupported(ARMv7) ? 2 : 4;
+    } else if (assembler != NULL && assembler->use_extended_constant_pool()) {
+      // An extended constant pool load.
+      instructions = CpuFeatures::IsSupported(ARMv7) ? 3 : 5;
     } else {
-      // If this is not a mov or mvn instruction there will always an additional
-      // instructions - either mov or ldr. The mov might actually be two
-      // instructions mov or movw followed by movt so including the actual
-      // instruction two or three instructions will be generated.
-      return false;
+      // A small constant pool load.
+      instructions = 1;
     }
+
+    if ((instr & ~kCondMask) != 13 * B21) {  // mov, S not set
+      // For a mov or mvn instruction which doesn't set the condition
+      // code, the constant pool or immediate load is enough, otherwise we need
+      // to account for the actual instruction being requested.
+      instructions += 1;
+    }
+    return instructions;
   } else {
     // No use of constant pool and the immediate operand can be encoded as a
     // shifter operand.
-    return true;
+    return 1;
   }
 }
 
@@ -1069,29 +1112,52 @@
                                       const Operand& x,
                                       Condition cond) {
   RelocInfo rinfo(pc_, x.rmode_, x.imm32_, NULL);
+  uint32_t imm32 = static_cast<uint32_t>(x.imm32_);
   if (x.must_output_reloc_info(this)) {
     RecordRelocInfo(rinfo);
   }
 
   if (use_mov_immediate_load(x, this)) {
     Register target = rd.code() == pc.code() ? ip : rd;
-    // TODO(rmcilroy): add ARMv6 support for immediate loads.
-    ASSERT(CpuFeatures::IsSupported(ARMv7));
-    if (!FLAG_enable_ool_constant_pool &&
-        x.must_output_reloc_info(this)) {
-      // Make sure the movw/movt doesn't get separated.
-      BlockConstPoolFor(2);
+    if (CpuFeatures::IsSupported(ARMv7)) {
+      if (!FLAG_enable_ool_constant_pool && x.must_output_reloc_info(this)) {
+        // Make sure the movw/movt doesn't get separated.
+        BlockConstPoolFor(2);
+      }
+      movw(target, imm32 & 0xffff, cond);
+      movt(target, imm32 >> 16, cond);
+    } else {
+      DCHECK(FLAG_enable_ool_constant_pool);
+      mov(target, Operand(imm32 & kImm8Mask), LeaveCC, cond);
+      orr(target, target, Operand(imm32 & (kImm8Mask << 8)), LeaveCC, cond);
+      orr(target, target, Operand(imm32 & (kImm8Mask << 16)), LeaveCC, cond);
+      orr(target, target, Operand(imm32 & (kImm8Mask << 24)), LeaveCC, cond);
     }
-    emit(cond | 0x30*B20 | target.code()*B12 |
-         EncodeMovwImmediate(x.imm32_ & 0xffff));
-    movt(target, static_cast<uint32_t>(x.imm32_) >> 16, cond);
     if (target.code() != rd.code()) {
       mov(rd, target, LeaveCC, cond);
     }
   } else {
-    ASSERT(can_use_constant_pool());
-    ConstantPoolAddEntry(rinfo);
-    ldr(rd, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0), cond);
+    DCHECK(is_constant_pool_available());
+    ConstantPoolArray::LayoutSection section = ConstantPoolAddEntry(rinfo);
+    if (section == ConstantPoolArray::EXTENDED_SECTION) {
+      DCHECK(FLAG_enable_ool_constant_pool);
+      Register target = rd.code() == pc.code() ? ip : rd;
+      // Emit instructions to load constant pool offset.
+      if (CpuFeatures::IsSupported(ARMv7)) {
+        movw(target, 0, cond);
+        movt(target, 0, cond);
+      } else {
+        mov(target, Operand(0), LeaveCC, cond);
+        orr(target, target, Operand(0), LeaveCC, cond);
+        orr(target, target, Operand(0), LeaveCC, cond);
+        orr(target, target, Operand(0), LeaveCC, cond);
+      }
+      // Load from constant pool at offset.
+      ldr(rd, MemOperand(pp, target), cond);
+    } else {
+      DCHECK(section == ConstantPoolArray::SMALL_SECTION);
+      ldr(rd, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0), cond);
+    }
   }
 }
 
@@ -1101,7 +1167,7 @@
                          Register rd,
                          const Operand& x) {
   CheckBuffer();
-  ASSERT((instr & ~(kCondMask | kOpCodeMask | S)) == 0);
+  DCHECK((instr & ~(kCondMask | kOpCodeMask | S)) == 0);
   if (!x.rm_.is_valid()) {
     // Immediate.
     uint32_t rotate_imm;
@@ -1128,7 +1194,7 @@
     instr |= x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
   } else {
     // Register shift.
-    ASSERT(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc));
+    DCHECK(!rn.is(pc) && !rd.is(pc) && !x.rm_.is(pc) && !x.rs_.is(pc));
     instr |= x.rs_.code()*B8 | x.shift_op_ | B4 | x.rm_.code();
   }
   emit(instr | rn.code()*B16 | rd.code()*B12);
@@ -1140,7 +1206,7 @@
 
 
 void Assembler::addrmod2(Instr instr, Register rd, const MemOperand& x) {
-  ASSERT((instr & ~(kCondMask | B | L)) == B26);
+  DCHECK((instr & ~(kCondMask | B | L)) == B26);
   int am = x.am_;
   if (!x.rm_.is_valid()) {
     // Immediate offset.
@@ -1152,28 +1218,28 @@
     if (!is_uint12(offset_12)) {
       // Immediate offset cannot be encoded, load it first to register ip
       // rn (and rd in a load) should never be ip, or will be trashed.
-      ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
+      DCHECK(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
       mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr));
       addrmod2(instr, rd, MemOperand(x.rn_, ip, x.am_));
       return;
     }
-    ASSERT(offset_12 >= 0);  // no masking needed
+    DCHECK(offset_12 >= 0);  // no masking needed
     instr |= offset_12;
   } else {
     // Register offset (shift_imm_ and shift_op_ are 0) or scaled
     // register offset the constructors make sure than both shift_imm_
     // and shift_op_ are initialized.
-    ASSERT(!x.rm_.is(pc));
+    DCHECK(!x.rm_.is(pc));
     instr |= B25 | x.shift_imm_*B7 | x.shift_op_ | x.rm_.code();
   }
-  ASSERT((am & (P|W)) == P || !x.rn_.is(pc));  // no pc base with writeback
+  DCHECK((am & (P|W)) == P || !x.rn_.is(pc));  // no pc base with writeback
   emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
 }
 
 
 void Assembler::addrmod3(Instr instr, Register rd, const MemOperand& x) {
-  ASSERT((instr & ~(kCondMask | L | S6 | H)) == (B4 | B7));
-  ASSERT(x.rn_.is_valid());
+  DCHECK((instr & ~(kCondMask | L | S6 | H)) == (B4 | B7));
+  DCHECK(x.rn_.is_valid());
   int am = x.am_;
   if (!x.rm_.is_valid()) {
     // Immediate offset.
@@ -1185,60 +1251,60 @@
     if (!is_uint8(offset_8)) {
       // Immediate offset cannot be encoded, load it first to register ip
       // rn (and rd in a load) should never be ip, or will be trashed.
-      ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
+      DCHECK(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
       mov(ip, Operand(x.offset_), LeaveCC, Instruction::ConditionField(instr));
       addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
       return;
     }
-    ASSERT(offset_8 >= 0);  // no masking needed
+    DCHECK(offset_8 >= 0);  // no masking needed
     instr |= B | (offset_8 >> 4)*B8 | (offset_8 & 0xf);
   } else if (x.shift_imm_ != 0) {
     // Scaled register offset not supported, load index first
     // rn (and rd in a load) should never be ip, or will be trashed.
-    ASSERT(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
+    DCHECK(!x.rn_.is(ip) && ((instr & L) == L || !rd.is(ip)));
     mov(ip, Operand(x.rm_, x.shift_op_, x.shift_imm_), LeaveCC,
         Instruction::ConditionField(instr));
     addrmod3(instr, rd, MemOperand(x.rn_, ip, x.am_));
     return;
   } else {
     // Register offset.
-    ASSERT((am & (P|W)) == P || !x.rm_.is(pc));  // no pc index with writeback
+    DCHECK((am & (P|W)) == P || !x.rm_.is(pc));  // no pc index with writeback
     instr |= x.rm_.code();
   }
-  ASSERT((am & (P|W)) == P || !x.rn_.is(pc));  // no pc base with writeback
+  DCHECK((am & (P|W)) == P || !x.rn_.is(pc));  // no pc base with writeback
   emit(instr | am | x.rn_.code()*B16 | rd.code()*B12);
 }
 
 
 void Assembler::addrmod4(Instr instr, Register rn, RegList rl) {
-  ASSERT((instr & ~(kCondMask | P | U | W | L)) == B27);
-  ASSERT(rl != 0);
-  ASSERT(!rn.is(pc));
+  DCHECK((instr & ~(kCondMask | P | U | W | L)) == B27);
+  DCHECK(rl != 0);
+  DCHECK(!rn.is(pc));
   emit(instr | rn.code()*B16 | rl);
 }
 
 
 void Assembler::addrmod5(Instr instr, CRegister crd, const MemOperand& x) {
   // Unindexed addressing is not encoded by this function.
-  ASSERT_EQ((B27 | B26),
+  DCHECK_EQ((B27 | B26),
             (instr & ~(kCondMask | kCoprocessorMask | P | U | N | W | L)));
-  ASSERT(x.rn_.is_valid() && !x.rm_.is_valid());
+  DCHECK(x.rn_.is_valid() && !x.rm_.is_valid());
   int am = x.am_;
   int offset_8 = x.offset_;
-  ASSERT((offset_8 & 3) == 0);  // offset must be an aligned word offset
+  DCHECK((offset_8 & 3) == 0);  // offset must be an aligned word offset
   offset_8 >>= 2;
   if (offset_8 < 0) {
     offset_8 = -offset_8;
     am ^= U;
   }
-  ASSERT(is_uint8(offset_8));  // unsigned word offset must fit in a byte
-  ASSERT((am & (P|W)) == P || !x.rn_.is(pc));  // no pc base with writeback
+  DCHECK(is_uint8(offset_8));  // unsigned word offset must fit in a byte
+  DCHECK((am & (P|W)) == P || !x.rn_.is(pc));  // no pc base with writeback
 
   // Post-indexed addressing requires W == 1; different than in addrmod2/3.
   if ((am & P) == 0)
     am |= W;
 
-  ASSERT(offset_8 >= 0);  // no masking needed
+  DCHECK(offset_8 >= 0);  // no masking needed
   emit(instr | am | x.rn_.code()*B16 | crd.code()*B12 | offset_8);
 }
 
@@ -1267,9 +1333,9 @@
 
 // Branch instructions.
 void Assembler::b(int branch_offset, Condition cond) {
-  ASSERT((branch_offset & 3) == 0);
+  DCHECK((branch_offset & 3) == 0);
   int imm24 = branch_offset >> 2;
-  ASSERT(is_int24(imm24));
+  DCHECK(is_int24(imm24));
   emit(cond | B27 | B25 | (imm24 & kImm24Mask));
 
   if (cond == al) {
@@ -1281,33 +1347,33 @@
 
 void Assembler::bl(int branch_offset, Condition cond) {
   positions_recorder()->WriteRecordedPositions();
-  ASSERT((branch_offset & 3) == 0);
+  DCHECK((branch_offset & 3) == 0);
   int imm24 = branch_offset >> 2;
-  ASSERT(is_int24(imm24));
+  DCHECK(is_int24(imm24));
   emit(cond | B27 | B25 | B24 | (imm24 & kImm24Mask));
 }
 
 
 void Assembler::blx(int branch_offset) {  // v5 and above
   positions_recorder()->WriteRecordedPositions();
-  ASSERT((branch_offset & 1) == 0);
+  DCHECK((branch_offset & 1) == 0);
   int h = ((branch_offset & 2) >> 1)*B24;
   int imm24 = branch_offset >> 2;
-  ASSERT(is_int24(imm24));
+  DCHECK(is_int24(imm24));
   emit(kSpecialCondition | B27 | B25 | h | (imm24 & kImm24Mask));
 }
 
 
 void Assembler::blx(Register target, Condition cond) {  // v5 and above
   positions_recorder()->WriteRecordedPositions();
-  ASSERT(!target.is(pc));
+  DCHECK(!target.is(pc));
   emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BLX | target.code());
 }
 
 
 void Assembler::bx(Register target, Condition cond) {  // v5 and above, plus v4t
   positions_recorder()->WriteRecordedPositions();
-  ASSERT(!target.is(pc));  // use of pc is actually allowed, but discouraged
+  DCHECK(!target.is(pc));  // use of pc is actually allowed, but discouraged
   emit(cond | B24 | B21 | 15*B16 | 15*B12 | 15*B8 | BX | target.code());
 }
 
@@ -1379,7 +1445,7 @@
 
 void Assembler::cmp_raw_immediate(
     Register src, int raw_immediate, Condition cond) {
-  ASSERT(is_uint12(raw_immediate));
+  DCHECK(is_uint12(raw_immediate));
   emit(cond | I | CMP | S | src.code() << 16 | raw_immediate);
 }
 
@@ -1402,7 +1468,7 @@
   // Don't allow nop instructions in the form mov rn, rn to be generated using
   // the mov instruction. They must be generated using nop(int/NopMarkerTypes)
   // or MarkCode(int/NopMarkerTypes) pseudo instructions.
-  ASSERT(!(src.is_reg() && src.rm().is(dst) && s == LeaveCC && cond == al));
+  DCHECK(!(src.is_reg() && src.rm().is(dst) && s == LeaveCC && cond == al));
   addrmod1(cond | MOV | s, r0, dst, src);
 }
 
@@ -1435,7 +1501,7 @@
     //
     // When the label gets bound: target_at extracts the link and target_at_put
     // patches the instructions.
-    ASSERT(is_uint24(link));
+    DCHECK(is_uint24(link));
     BlockConstPoolScope block_const_pool(this);
     emit(link);
     nop(dst.code());
@@ -1447,15 +1513,13 @@
 
 
 void Assembler::movw(Register reg, uint32_t immediate, Condition cond) {
-  ASSERT(immediate < 0x10000);
-  // May use movw if supported, but on unsupported platforms will try to use
-  // equivalent rotated immed_8 value and other tricks before falling back to a
-  // constant pool load.
-  mov(reg, Operand(immediate), LeaveCC, cond);
+  DCHECK(CpuFeatures::IsSupported(ARMv7));
+  emit(cond | 0x30*B20 | reg.code()*B12 | EncodeMovwImmediate(immediate));
 }
 
 
 void Assembler::movt(Register reg, uint32_t immediate, Condition cond) {
+  DCHECK(CpuFeatures::IsSupported(ARMv7));
   emit(cond | 0x34*B20 | reg.code()*B12 | EncodeMovwImmediate(immediate));
 }
 
@@ -1474,7 +1538,7 @@
 // Multiply instructions.
 void Assembler::mla(Register dst, Register src1, Register src2, Register srcA,
                     SBit s, Condition cond) {
-  ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
+  DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
   emit(cond | A | s | dst.code()*B16 | srcA.code()*B12 |
        src2.code()*B8 | B7 | B4 | src1.code());
 }
@@ -1482,8 +1546,8 @@
 
 void Assembler::mls(Register dst, Register src1, Register src2, Register srcA,
                     Condition cond) {
-  ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
-  ASSERT(IsEnabled(MLS));
+  DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc) && !srcA.is(pc));
+  DCHECK(IsEnabled(MLS));
   emit(cond | B22 | B21 | dst.code()*B16 | srcA.code()*B12 |
        src2.code()*B8 | B7 | B4 | src1.code());
 }
@@ -1491,16 +1555,25 @@
 
 void Assembler::sdiv(Register dst, Register src1, Register src2,
                      Condition cond) {
-  ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
-  ASSERT(IsEnabled(SUDIV));
+  DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
+  DCHECK(IsEnabled(SUDIV));
   emit(cond | B26 | B25| B24 | B20 | dst.code()*B16 | 0xf * B12 |
        src2.code()*B8 | B4 | src1.code());
 }
 
 
+void Assembler::udiv(Register dst, Register src1, Register src2,
+                     Condition cond) {
+  DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
+  DCHECK(IsEnabled(SUDIV));
+  emit(cond | B26 | B25 | B24 | B21 | B20 | dst.code() * B16 | 0xf * B12 |
+       src2.code() * B8 | B4 | src1.code());
+}
+
+
 void Assembler::mul(Register dst, Register src1, Register src2,
                     SBit s, Condition cond) {
-  ASSERT(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
+  DCHECK(!dst.is(pc) && !src1.is(pc) && !src2.is(pc));
   // dst goes in bits 16-19 for this instruction!
   emit(cond | s | dst.code()*B16 | src2.code()*B8 | B7 | B4 | src1.code());
 }
@@ -1512,8 +1585,8 @@
                       Register src2,
                       SBit s,
                       Condition cond) {
-  ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
-  ASSERT(!dstL.is(dstH));
+  DCHECK(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
+  DCHECK(!dstL.is(dstH));
   emit(cond | B23 | B22 | A | s | dstH.code()*B16 | dstL.code()*B12 |
        src2.code()*B8 | B7 | B4 | src1.code());
 }
@@ -1525,8 +1598,8 @@
                       Register src2,
                       SBit s,
                       Condition cond) {
-  ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
-  ASSERT(!dstL.is(dstH));
+  DCHECK(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
+  DCHECK(!dstL.is(dstH));
   emit(cond | B23 | B22 | s | dstH.code()*B16 | dstL.code()*B12 |
        src2.code()*B8 | B7 | B4 | src1.code());
 }
@@ -1538,8 +1611,8 @@
                       Register src2,
                       SBit s,
                       Condition cond) {
-  ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
-  ASSERT(!dstL.is(dstH));
+  DCHECK(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
+  DCHECK(!dstL.is(dstH));
   emit(cond | B23 | A | s | dstH.code()*B16 | dstL.code()*B12 |
        src2.code()*B8 | B7 | B4 | src1.code());
 }
@@ -1551,8 +1624,8 @@
                       Register src2,
                       SBit s,
                       Condition cond) {
-  ASSERT(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
-  ASSERT(!dstL.is(dstH));
+  DCHECK(!dstL.is(pc) && !dstH.is(pc) && !src1.is(pc) && !src2.is(pc));
+  DCHECK(!dstL.is(dstH));
   emit(cond | B23 | s | dstH.code()*B16 | dstL.code()*B12 |
        src2.code()*B8 | B7 | B4 | src1.code());
 }
@@ -1561,7 +1634,7 @@
 // Miscellaneous arithmetic instructions.
 void Assembler::clz(Register dst, Register src, Condition cond) {
   // v5 and above.
-  ASSERT(!dst.is(pc) && !src.is(pc));
+  DCHECK(!dst.is(pc) && !src.is(pc));
   emit(cond | B24 | B22 | B21 | 15*B16 | dst.code()*B12 |
        15*B8 | CLZ | src.code());
 }
@@ -1575,11 +1648,11 @@
                      const Operand& src,
                      Condition cond) {
   // v6 and above.
-  ASSERT(CpuFeatures::IsSupported(ARMv7));
-  ASSERT(!dst.is(pc) && !src.rm_.is(pc));
-  ASSERT((satpos >= 0) && (satpos <= 31));
-  ASSERT((src.shift_op_ == ASR) || (src.shift_op_ == LSL));
-  ASSERT(src.rs_.is(no_reg));
+  DCHECK(CpuFeatures::IsSupported(ARMv7));
+  DCHECK(!dst.is(pc) && !src.rm_.is(pc));
+  DCHECK((satpos >= 0) && (satpos <= 31));
+  DCHECK((src.shift_op_ == ASR) || (src.shift_op_ == LSL));
+  DCHECK(src.rs_.is(no_reg));
 
   int sh = 0;
   if (src.shift_op_ == ASR) {
@@ -1603,10 +1676,10 @@
                      int width,
                      Condition cond) {
   // v7 and above.
-  ASSERT(CpuFeatures::IsSupported(ARMv7));
-  ASSERT(!dst.is(pc) && !src.is(pc));
-  ASSERT((lsb >= 0) && (lsb <= 31));
-  ASSERT((width >= 1) && (width <= (32 - lsb)));
+  DCHECK(CpuFeatures::IsSupported(ARMv7));
+  DCHECK(!dst.is(pc) && !src.is(pc));
+  DCHECK((lsb >= 0) && (lsb <= 31));
+  DCHECK((width >= 1) && (width <= (32 - lsb)));
   emit(cond | 0xf*B23 | B22 | B21 | (width - 1)*B16 | dst.code()*B12 |
        lsb*B7 | B6 | B4 | src.code());
 }
@@ -1623,10 +1696,10 @@
                      int width,
                      Condition cond) {
   // v7 and above.
-  ASSERT(CpuFeatures::IsSupported(ARMv7));
-  ASSERT(!dst.is(pc) && !src.is(pc));
-  ASSERT((lsb >= 0) && (lsb <= 31));
-  ASSERT((width >= 1) && (width <= (32 - lsb)));
+  DCHECK(CpuFeatures::IsSupported(ARMv7));
+  DCHECK(!dst.is(pc) && !src.is(pc));
+  DCHECK((lsb >= 0) && (lsb <= 31));
+  DCHECK((width >= 1) && (width <= (32 - lsb)));
   emit(cond | 0xf*B23 | B21 | (width - 1)*B16 | dst.code()*B12 |
        lsb*B7 | B6 | B4 | src.code());
 }
@@ -1638,10 +1711,10 @@
 //   bfc dst, #lsb, #width
 void Assembler::bfc(Register dst, int lsb, int width, Condition cond) {
   // v7 and above.
-  ASSERT(CpuFeatures::IsSupported(ARMv7));
-  ASSERT(!dst.is(pc));
-  ASSERT((lsb >= 0) && (lsb <= 31));
-  ASSERT((width >= 1) && (width <= (32 - lsb)));
+  DCHECK(CpuFeatures::IsSupported(ARMv7));
+  DCHECK(!dst.is(pc));
+  DCHECK((lsb >= 0) && (lsb <= 31));
+  DCHECK((width >= 1) && (width <= (32 - lsb)));
   int msb = lsb + width - 1;
   emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 | 0xf);
 }
@@ -1657,10 +1730,10 @@
                     int width,
                     Condition cond) {
   // v7 and above.
-  ASSERT(CpuFeatures::IsSupported(ARMv7));
-  ASSERT(!dst.is(pc) && !src.is(pc));
-  ASSERT((lsb >= 0) && (lsb <= 31));
-  ASSERT((width >= 1) && (width <= (32 - lsb)));
+  DCHECK(CpuFeatures::IsSupported(ARMv7));
+  DCHECK(!dst.is(pc) && !src.is(pc));
+  DCHECK((lsb >= 0) && (lsb <= 31));
+  DCHECK((width >= 1) && (width <= (32 - lsb)));
   int msb = lsb + width - 1;
   emit(cond | 0x1f*B22 | msb*B16 | dst.code()*B12 | lsb*B7 | B4 |
        src.code());
@@ -1674,13 +1747,13 @@
   // Instruction details available in ARM DDI 0406C.b, A8.8.125.
   // cond(31-28) | 01101000(27-20) | Rn(19-16) |
   // Rd(15-12) | imm5(11-7) | 0(6) | 01(5-4) | Rm(3-0)
-  ASSERT(!dst.is(pc));
-  ASSERT(!src1.is(pc));
-  ASSERT(!src2.rm().is(pc));
-  ASSERT(!src2.rm().is(no_reg));
-  ASSERT(src2.rs().is(no_reg));
-  ASSERT((src2.shift_imm_ >= 0) && (src2.shift_imm_ <= 31));
-  ASSERT(src2.shift_op() == LSL);
+  DCHECK(!dst.is(pc));
+  DCHECK(!src1.is(pc));
+  DCHECK(!src2.rm().is(pc));
+  DCHECK(!src2.rm().is(no_reg));
+  DCHECK(src2.rs().is(no_reg));
+  DCHECK((src2.shift_imm_ >= 0) && (src2.shift_imm_ <= 31));
+  DCHECK(src2.shift_op() == LSL);
   emit(cond | 0x68*B20 | src1.code()*B16 | dst.code()*B12 |
        src2.shift_imm_*B7 | B4 | src2.rm().code());
 }
@@ -1693,13 +1766,13 @@
   // Instruction details available in ARM DDI 0406C.b, A8.8.125.
   // cond(31-28) | 01101000(27-20) | Rn(19-16) |
   // Rd(15-12) | imm5(11-7) | 1(6) | 01(5-4) | Rm(3-0)
-  ASSERT(!dst.is(pc));
-  ASSERT(!src1.is(pc));
-  ASSERT(!src2.rm().is(pc));
-  ASSERT(!src2.rm().is(no_reg));
-  ASSERT(src2.rs().is(no_reg));
-  ASSERT((src2.shift_imm_ >= 1) && (src2.shift_imm_ <= 32));
-  ASSERT(src2.shift_op() == ASR);
+  DCHECK(!dst.is(pc));
+  DCHECK(!src1.is(pc));
+  DCHECK(!src2.rm().is(pc));
+  DCHECK(!src2.rm().is(no_reg));
+  DCHECK(src2.rs().is(no_reg));
+  DCHECK((src2.shift_imm_ >= 1) && (src2.shift_imm_ <= 32));
+  DCHECK(src2.shift_op() == ASR);
   int asr = (src2.shift_imm_ == 32) ? 0 : src2.shift_imm_;
   emit(cond | 0x68*B20 | src1.code()*B16 | dst.code()*B12 |
        asr*B7 | B6 | B4 | src2.rm().code());
@@ -1712,16 +1785,16 @@
   // Instruction details available in ARM DDI 0406C.b, A8.8.274.
   // cond(31-28) | 01101110(27-20) | 1111(19-16) |
   // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
-  ASSERT(!dst.is(pc));
-  ASSERT(!src.rm().is(pc));
-  ASSERT(!src.rm().is(no_reg));
-  ASSERT(src.rs().is(no_reg));
-  ASSERT((src.shift_imm_ == 0) ||
+  DCHECK(!dst.is(pc));
+  DCHECK(!src.rm().is(pc));
+  DCHECK(!src.rm().is(no_reg));
+  DCHECK(src.rs().is(no_reg));
+  DCHECK((src.shift_imm_ == 0) ||
          (src.shift_imm_ == 8) ||
          (src.shift_imm_ == 16) ||
          (src.shift_imm_ == 24));
   // Operand maps ROR #0 to LSL #0.
-  ASSERT((src.shift_op() == ROR) ||
+  DCHECK((src.shift_op() == ROR) ||
          ((src.shift_op() == LSL) && (src.shift_imm_ == 0)));
   emit(cond | 0x6E*B20 | 0xF*B16 | dst.code()*B12 |
        ((src.shift_imm_ >> 1)&0xC)*B8 | 7*B4 | src.rm().code());
@@ -1735,17 +1808,17 @@
   // Instruction details available in ARM DDI 0406C.b, A8.8.271.
   // cond(31-28) | 01101110(27-20) | Rn(19-16) |
   // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
-  ASSERT(!dst.is(pc));
-  ASSERT(!src1.is(pc));
-  ASSERT(!src2.rm().is(pc));
-  ASSERT(!src2.rm().is(no_reg));
-  ASSERT(src2.rs().is(no_reg));
-  ASSERT((src2.shift_imm_ == 0) ||
+  DCHECK(!dst.is(pc));
+  DCHECK(!src1.is(pc));
+  DCHECK(!src2.rm().is(pc));
+  DCHECK(!src2.rm().is(no_reg));
+  DCHECK(src2.rs().is(no_reg));
+  DCHECK((src2.shift_imm_ == 0) ||
          (src2.shift_imm_ == 8) ||
          (src2.shift_imm_ == 16) ||
          (src2.shift_imm_ == 24));
   // Operand maps ROR #0 to LSL #0.
-  ASSERT((src2.shift_op() == ROR) ||
+  DCHECK((src2.shift_op() == ROR) ||
          ((src2.shift_op() == LSL) && (src2.shift_imm_ == 0)));
   emit(cond | 0x6E*B20 | src1.code()*B16 | dst.code()*B12 |
        ((src2.shift_imm_ >> 1) &0xC)*B8 | 7*B4 | src2.rm().code());
@@ -1758,16 +1831,16 @@
   // Instruction details available in ARM DDI 0406C.b, A8.8.275.
   // cond(31-28) | 01101100(27-20) | 1111(19-16) |
   // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
-  ASSERT(!dst.is(pc));
-  ASSERT(!src.rm().is(pc));
-  ASSERT(!src.rm().is(no_reg));
-  ASSERT(src.rs().is(no_reg));
-  ASSERT((src.shift_imm_ == 0) ||
+  DCHECK(!dst.is(pc));
+  DCHECK(!src.rm().is(pc));
+  DCHECK(!src.rm().is(no_reg));
+  DCHECK(src.rs().is(no_reg));
+  DCHECK((src.shift_imm_ == 0) ||
          (src.shift_imm_ == 8) ||
          (src.shift_imm_ == 16) ||
          (src.shift_imm_ == 24));
   // Operand maps ROR #0 to LSL #0.
-  ASSERT((src.shift_op() == ROR) ||
+  DCHECK((src.shift_op() == ROR) ||
          ((src.shift_op() == LSL) && (src.shift_imm_ == 0)));
   emit(cond | 0x6C*B20 | 0xF*B16 | dst.code()*B12 |
        ((src.shift_imm_ >> 1)&0xC)*B8 | 7*B4 | src.rm().code());
@@ -1776,14 +1849,14 @@
 
 // Status register access instructions.
 void Assembler::mrs(Register dst, SRegister s, Condition cond) {
-  ASSERT(!dst.is(pc));
+  DCHECK(!dst.is(pc));
   emit(cond | B24 | s | 15*B16 | dst.code()*B12);
 }
 
 
 void Assembler::msr(SRegisterFieldMask fields, const Operand& src,
                     Condition cond) {
-  ASSERT(fields >= B16 && fields < B20);  // at least one field set
+  DCHECK(fields >= B16 && fields < B20);  // at least one field set
   Instr instr;
   if (!src.rm_.is_valid()) {
     // Immediate.
@@ -1798,7 +1871,7 @@
     }
     instr = I | rotate_imm*B8 | immed_8;
   } else {
-    ASSERT(!src.rs_.is_valid() && src.shift_imm_ == 0);  // only rm allowed
+    DCHECK(!src.rs_.is_valid() && src.shift_imm_ == 0);  // only rm allowed
     instr = src.rm_.code();
   }
   emit(cond | instr | B24 | B21 | fields | 15*B12);
@@ -1851,22 +1924,22 @@
 
 void Assembler::ldrd(Register dst1, Register dst2,
                      const MemOperand& src, Condition cond) {
-  ASSERT(IsEnabled(ARMv7));
-  ASSERT(src.rm().is(no_reg));
-  ASSERT(!dst1.is(lr));  // r14.
-  ASSERT_EQ(0, dst1.code() % 2);
-  ASSERT_EQ(dst1.code() + 1, dst2.code());
+  DCHECK(IsEnabled(ARMv7));
+  DCHECK(src.rm().is(no_reg));
+  DCHECK(!dst1.is(lr));  // r14.
+  DCHECK_EQ(0, dst1.code() % 2);
+  DCHECK_EQ(dst1.code() + 1, dst2.code());
   addrmod3(cond | B7 | B6 | B4, dst1, src);
 }
 
 
 void Assembler::strd(Register src1, Register src2,
                      const MemOperand& dst, Condition cond) {
-  ASSERT(dst.rm().is(no_reg));
-  ASSERT(!src1.is(lr));  // r14.
-  ASSERT_EQ(0, src1.code() % 2);
-  ASSERT_EQ(src1.code() + 1, src2.code());
-  ASSERT(IsEnabled(ARMv7));
+  DCHECK(dst.rm().is(no_reg));
+  DCHECK(!src1.is(lr));  // r14.
+  DCHECK_EQ(0, src1.code() % 2);
+  DCHECK_EQ(src1.code() + 1, src2.code());
+  DCHECK(IsEnabled(ARMv7));
   addrmod3(cond | B7 | B6 | B5 | B4, src1, dst);
 }
 
@@ -1876,15 +1949,15 @@
   // Instruction details available in ARM DDI 0406C.b, A8.8.128.
   // 1111(31-28) | 0111(27-24) | U(23) | R(22) | 01(21-20) | Rn(19-16) |
   // 1111(15-12) | imm5(11-07) | type(6-5) | 0(4)| Rm(3-0) |
-  ASSERT(address.rm().is(no_reg));
-  ASSERT(address.am() == Offset);
+  DCHECK(address.rm().is(no_reg));
+  DCHECK(address.am() == Offset);
   int U = B23;
   int offset = address.offset();
   if (offset < 0) {
     offset = -offset;
     U = 0;
   }
-  ASSERT(offset < 4096);
+  DCHECK(offset < 4096);
   emit(kSpecialCondition | B26 | B24 | U | B22 | B20 | address.rn().code()*B16 |
        0xf*B12 | offset);
 }
@@ -1896,7 +1969,7 @@
                     RegList dst,
                     Condition cond) {
   // ABI stack constraint: ldmxx base, {..sp..}  base != sp  is not restartable.
-  ASSERT(base.is(sp) || (dst & sp.bit()) == 0);
+  DCHECK(base.is(sp) || (dst & sp.bit()) == 0);
 
   addrmod4(cond | B27 | am | L, base, dst);
 
@@ -1925,7 +1998,7 @@
 // enabling/disabling and a counter feature. See simulator-arm.h .
 void Assembler::stop(const char* msg, Condition cond, int32_t code) {
 #ifndef __arm__
-  ASSERT(code >= kDefaultStopCode);
+  DCHECK(code >= kDefaultStopCode);
   {
     // The Simulator will handle the stop instruction and get the message
     // address. It expects to find the address just after the svc instruction.
@@ -1951,13 +2024,13 @@
 
 
 void Assembler::bkpt(uint32_t imm16) {  // v5 and above
-  ASSERT(is_uint16(imm16));
+  DCHECK(is_uint16(imm16));
   emit(al | B24 | B21 | (imm16 >> 4)*B8 | BKPT | (imm16 & 0xf));
 }
 
 
 void Assembler::svc(uint32_t imm24, Condition cond) {
-  ASSERT(is_uint24(imm24));
+  DCHECK(is_uint24(imm24));
   emit(cond | 15*B24 | imm24);
 }
 
@@ -1970,7 +2043,7 @@
                     CRegister crm,
                     int opcode_2,
                     Condition cond) {
-  ASSERT(is_uint4(opcode_1) && is_uint3(opcode_2));
+  DCHECK(is_uint4(opcode_1) && is_uint3(opcode_2));
   emit(cond | B27 | B26 | B25 | (opcode_1 & 15)*B20 | crn.code()*B16 |
        crd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | crm.code());
 }
@@ -1993,7 +2066,7 @@
                     CRegister crm,
                     int opcode_2,
                     Condition cond) {
-  ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
+  DCHECK(is_uint3(opcode_1) && is_uint3(opcode_2));
   emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | crn.code()*B16 |
        rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
 }
@@ -2016,7 +2089,7 @@
                     CRegister crm,
                     int opcode_2,
                     Condition cond) {
-  ASSERT(is_uint3(opcode_1) && is_uint3(opcode_2));
+  DCHECK(is_uint3(opcode_1) && is_uint3(opcode_2));
   emit(cond | B27 | B26 | B25 | (opcode_1 & 7)*B21 | L | crn.code()*B16 |
        rd.code()*B12 | coproc*B8 | (opcode_2 & 7)*B5 | B4 | crm.code());
 }
@@ -2048,7 +2121,7 @@
                     LFlag l,
                     Condition cond) {
   // Unindexed addressing.
-  ASSERT(is_uint8(option));
+  DCHECK(is_uint8(option));
   emit(cond | B27 | B26 | U | l | L | rn.code()*B16 | crd.code()*B12 |
        coproc*B8 | (option & 255));
 }
@@ -2089,14 +2162,14 @@
   int vd, d;
   dst.split_code(&vd, &d);
 
-  ASSERT(offset >= 0);
+  DCHECK(offset >= 0);
   if ((offset % 4) == 0 && (offset / 4) < 256) {
     emit(cond | 0xD*B24 | u*B23 | d*B22 | B20 | base.code()*B16 | vd*B12 |
          0xB*B8 | ((offset / 4) & 255));
   } else {
     // Larger offsets must be handled by computing the correct address
     // in the ip register.
-    ASSERT(!base.is(ip));
+    DCHECK(!base.is(ip));
     if (u == 1) {
       add(ip, base, Operand(offset));
     } else {
@@ -2110,9 +2183,14 @@
 void Assembler::vldr(const DwVfpRegister dst,
                      const MemOperand& operand,
                      const Condition cond) {
-  ASSERT(!operand.rm().is_valid());
-  ASSERT(operand.am_ == Offset);
-  vldr(dst, operand.rn(), operand.offset(), cond);
+  DCHECK(operand.am_ == Offset);
+  if (operand.rm().is_valid()) {
+    add(ip, operand.rn(),
+        Operand(operand.rm(), operand.shift_op_, operand.shift_imm_));
+    vldr(dst, ip, 0, cond);
+  } else {
+    vldr(dst, operand.rn(), operand.offset(), cond);
+  }
 }
 
 
@@ -2131,7 +2209,7 @@
   }
   int sd, d;
   dst.split_code(&sd, &d);
-  ASSERT(offset >= 0);
+  DCHECK(offset >= 0);
 
   if ((offset % 4) == 0 && (offset / 4) < 256) {
   emit(cond | u*B23 | d*B22 | 0xD1*B20 | base.code()*B16 | sd*B12 |
@@ -2139,7 +2217,7 @@
   } else {
     // Larger offsets must be handled by computing the correct address
     // in the ip register.
-    ASSERT(!base.is(ip));
+    DCHECK(!base.is(ip));
     if (u == 1) {
       add(ip, base, Operand(offset));
     } else {
@@ -2153,9 +2231,14 @@
 void Assembler::vldr(const SwVfpRegister dst,
                      const MemOperand& operand,
                      const Condition cond) {
-  ASSERT(!operand.rm().is_valid());
-  ASSERT(operand.am_ == Offset);
-  vldr(dst, operand.rn(), operand.offset(), cond);
+  DCHECK(operand.am_ == Offset);
+  if (operand.rm().is_valid()) {
+    add(ip, operand.rn(),
+        Operand(operand.rm(), operand.shift_op_, operand.shift_imm_));
+    vldr(dst, ip, 0, cond);
+  } else {
+    vldr(dst, operand.rn(), operand.offset(), cond);
+  }
 }
 
 
@@ -2172,7 +2255,7 @@
     offset = -offset;
     u = 0;
   }
-  ASSERT(offset >= 0);
+  DCHECK(offset >= 0);
   int vd, d;
   src.split_code(&vd, &d);
 
@@ -2182,7 +2265,7 @@
   } else {
     // Larger offsets must be handled by computing the correct address
     // in the ip register.
-    ASSERT(!base.is(ip));
+    DCHECK(!base.is(ip));
     if (u == 1) {
       add(ip, base, Operand(offset));
     } else {
@@ -2196,9 +2279,14 @@
 void Assembler::vstr(const DwVfpRegister src,
                      const MemOperand& operand,
                      const Condition cond) {
-  ASSERT(!operand.rm().is_valid());
-  ASSERT(operand.am_ == Offset);
-  vstr(src, operand.rn(), operand.offset(), cond);
+  DCHECK(operand.am_ == Offset);
+  if (operand.rm().is_valid()) {
+    add(ip, operand.rn(),
+        Operand(operand.rm(), operand.shift_op_, operand.shift_imm_));
+    vstr(src, ip, 0, cond);
+  } else {
+    vstr(src, operand.rn(), operand.offset(), cond);
+  }
 }
 
 
@@ -2217,14 +2305,14 @@
   }
   int sd, d;
   src.split_code(&sd, &d);
-  ASSERT(offset >= 0);
+  DCHECK(offset >= 0);
   if ((offset % 4) == 0 && (offset / 4) < 256) {
     emit(cond | u*B23 | d*B22 | 0xD0*B20 | base.code()*B16 | sd*B12 |
          0xA*B8 | ((offset / 4) & 255));
   } else {
     // Larger offsets must be handled by computing the correct address
     // in the ip register.
-    ASSERT(!base.is(ip));
+    DCHECK(!base.is(ip));
     if (u == 1) {
       add(ip, base, Operand(offset));
     } else {
@@ -2238,9 +2326,14 @@
 void Assembler::vstr(const SwVfpRegister src,
                      const MemOperand& operand,
                      const Condition cond) {
-  ASSERT(!operand.rm().is_valid());
-  ASSERT(operand.am_ == Offset);
-  vstr(src, operand.rn(), operand.offset(), cond);
+  DCHECK(operand.am_ == Offset);
+  if (operand.rm().is_valid()) {
+    add(ip, operand.rn(),
+        Operand(operand.rm(), operand.shift_op_, operand.shift_imm_));
+    vstr(src, ip, 0, cond);
+  } else {
+    vstr(src, operand.rn(), operand.offset(), cond);
+  }
 }
 
 
@@ -2252,14 +2345,14 @@
   // Instruction details available in ARM DDI 0406C.b, A8-922.
   // cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
   // first(15-12) | 1011(11-8) | (count * 2)
-  ASSERT_LE(first.code(), last.code());
-  ASSERT(am == ia || am == ia_w || am == db_w);
-  ASSERT(!base.is(pc));
+  DCHECK_LE(first.code(), last.code());
+  DCHECK(am == ia || am == ia_w || am == db_w);
+  DCHECK(!base.is(pc));
 
   int sd, d;
   first.split_code(&sd, &d);
   int count = last.code() - first.code() + 1;
-  ASSERT(count <= 16);
+  DCHECK(count <= 16);
   emit(cond | B27 | B26 | am | d*B22 | B20 | base.code()*B16 | sd*B12 |
        0xB*B8 | count*2);
 }
@@ -2273,14 +2366,14 @@
   // Instruction details available in ARM DDI 0406C.b, A8-1080.
   // cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
   // first(15-12) | 1011(11-8) | (count * 2)
-  ASSERT_LE(first.code(), last.code());
-  ASSERT(am == ia || am == ia_w || am == db_w);
-  ASSERT(!base.is(pc));
+  DCHECK_LE(first.code(), last.code());
+  DCHECK(am == ia || am == ia_w || am == db_w);
+  DCHECK(!base.is(pc));
 
   int sd, d;
   first.split_code(&sd, &d);
   int count = last.code() - first.code() + 1;
-  ASSERT(count <= 16);
+  DCHECK(count <= 16);
   emit(cond | B27 | B26 | am | d*B22 | base.code()*B16 | sd*B12 |
        0xB*B8 | count*2);
 }
@@ -2293,9 +2386,9 @@
   // Instruction details available in ARM DDI 0406A, A8-626.
   // cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
   // first(15-12) | 1010(11-8) | (count/2)
-  ASSERT_LE(first.code(), last.code());
-  ASSERT(am == ia || am == ia_w || am == db_w);
-  ASSERT(!base.is(pc));
+  DCHECK_LE(first.code(), last.code());
+  DCHECK(am == ia || am == ia_w || am == db_w);
+  DCHECK(!base.is(pc));
 
   int sd, d;
   first.split_code(&sd, &d);
@@ -2313,9 +2406,9 @@
   // Instruction details available in ARM DDI 0406A, A8-784.
   // cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
   // first(15-12) | 1011(11-8) | (count/2)
-  ASSERT_LE(first.code(), last.code());
-  ASSERT(am == ia || am == ia_w || am == db_w);
-  ASSERT(!base.is(pc));
+  DCHECK_LE(first.code(), last.code());
+  DCHECK(am == ia || am == ia_w || am == db_w);
+  DCHECK(!base.is(pc));
 
   int sd, d;
   first.split_code(&sd, &d);
@@ -2337,7 +2430,7 @@
 // Only works for little endian floating point formats.
 // We don't support VFP on the mixed endian floating point platform.
 static bool FitsVMOVDoubleImmediate(double d, uint32_t *encoding) {
-  ASSERT(CpuFeatures::IsSupported(VFP3));
+  DCHECK(CpuFeatures::IsSupported(VFP3));
 
   // VMOV can accept an immediate of the form:
   //
@@ -2399,7 +2492,7 @@
     int vd, d;
     dst.split_code(&vd, &d);
     emit(al | 0x1D*B23 | d*B22 | 0x3*B20 | vd*B12 | 0x5*B9 | B8 | enc);
-  } else if (FLAG_enable_vldr_imm && can_use_constant_pool()) {
+  } else if (FLAG_enable_vldr_imm && is_constant_pool_available()) {
     // TODO(jfb) Temporarily turned off until we have constant blinding or
     //           some equivalent mitigation: an attacker can otherwise control
     //           generated data which also happens to be executable, a Very Bad
@@ -2416,8 +2509,18 @@
     //           that's tricky because vldr has a limited reach. Furthermore
     //           it breaks load locality.
     RelocInfo rinfo(pc_, imm);
-    ConstantPoolAddEntry(rinfo);
-    vldr(dst, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0));
+    ConstantPoolArray::LayoutSection section = ConstantPoolAddEntry(rinfo);
+    if (section == ConstantPoolArray::EXTENDED_SECTION) {
+      DCHECK(FLAG_enable_ool_constant_pool);
+      // Emit instructions to load constant pool offset.
+      movw(ip, 0);
+      movt(ip, 0);
+      // Load from constant pool at offset.
+      vldr(dst, MemOperand(pp, ip));
+    } else {
+      DCHECK(section == ConstantPoolArray::SMALL_SECTION);
+      vldr(dst, MemOperand(FLAG_enable_ool_constant_pool ? pp : pc, 0));
+    }
   } else {
     // Synthesise the double from ARM immediates.
     uint32_t lo, hi;
@@ -2491,7 +2594,7 @@
   // Instruction details available in ARM DDI 0406C.b, A8-940.
   // cond(31-28) | 1110(27-24) | 0(23) | opc1=0index(22-21) | 0(20) |
   // Vd(19-16) | Rt(15-12) | 1011(11-8) | D(7) | opc2=00(6-5) | 1(4) | 0000(3-0)
-  ASSERT(index.index == 0 || index.index == 1);
+  DCHECK(index.index == 0 || index.index == 1);
   int vd, d;
   dst.split_code(&vd, &d);
   emit(cond | 0xE*B24 | index.index*B21 | vd*B16 | src.code()*B12 | 0xB*B8 |
@@ -2507,7 +2610,7 @@
   // Instruction details available in ARM DDI 0406C.b, A8.8.342.
   // cond(31-28) | 1110(27-24) | U=0(23) | opc1=0index(22-21) | 1(20) |
   // Vn(19-16) | Rt(15-12) | 1011(11-8) | N(7) | opc2=00(6-5) | 1(4) | 0000(3-0)
-  ASSERT(index.index == 0 || index.index == 1);
+  DCHECK(index.index == 0 || index.index == 1);
   int vn, n;
   src.split_code(&vn, &n);
   emit(cond | 0xE*B24 | index.index*B21 | B20 | vn*B16 | dst.code()*B12 |
@@ -2523,7 +2626,7 @@
   // Instruction details available in ARM DDI 0406C.b, A8-948.
   // cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
   // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
-  ASSERT(!src1.is(pc) && !src2.is(pc));
+  DCHECK(!src1.is(pc) && !src2.is(pc));
   int vm, m;
   dst.split_code(&vm, &m);
   emit(cond | 0xC*B24 | B22 | src2.code()*B16 |
@@ -2539,7 +2642,7 @@
   // Instruction details available in ARM DDI 0406C.b, A8-948.
   // cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
   // Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
-  ASSERT(!dst1.is(pc) && !dst2.is(pc));
+  DCHECK(!dst1.is(pc) && !dst2.is(pc));
   int vm, m;
   src.split_code(&vm, &m);
   emit(cond | 0xC*B24 | B22 | B20 | dst2.code()*B16 |
@@ -2554,7 +2657,7 @@
   // Instruction details available in ARM DDI 0406A, A8-642.
   // cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) |
   // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
-  ASSERT(!src.is(pc));
+  DCHECK(!src.is(pc));
   int sn, n;
   dst.split_code(&sn, &n);
   emit(cond | 0xE*B24 | sn*B16 | src.code()*B12 | 0xA*B8 | n*B7 | B4);
@@ -2568,7 +2671,7 @@
   // Instruction details available in ARM DDI 0406A, A8-642.
   // cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) |
   // Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
-  ASSERT(!dst.is(pc));
+  DCHECK(!dst.is(pc));
   int sn, n;
   src.split_code(&sn, &n);
   emit(cond | 0xE*B24 | B20 | sn*B16 | dst.code()*B12 | 0xA*B8 | n*B7 | B4);
@@ -2629,7 +2732,7 @@
                          int reg_code,
                          int* vm,
                          int* m) {
-  ASSERT((reg_code >= 0) && (reg_code <= 31));
+  DCHECK((reg_code >= 0) && (reg_code <= 31));
   if (IsIntegerVFPType(reg_type) || !IsDoubleVFPType(reg_type)) {
     // 32 bit type.
     *m  = reg_code & 0x1;
@@ -2649,7 +2752,7 @@
                         const int src_code,
                         VFPConversionMode mode,
                         const Condition cond) {
-  ASSERT(src_type != dst_type);
+  DCHECK(src_type != dst_type);
   int D, Vd, M, Vm;
   SplitRegCode(src_type, src_code, &Vm, &M);
   SplitRegCode(dst_type, dst_code, &Vd, &D);
@@ -2659,7 +2762,7 @@
     // Instruction details available in ARM DDI 0406B, A8.6.295.
     // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 1(19) | opc2(18-16) |
     // Vd(15-12) | 101(11-9) | sz(8) | op(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
-    ASSERT(!IsIntegerVFPType(dst_type) || !IsIntegerVFPType(src_type));
+    DCHECK(!IsIntegerVFPType(dst_type) || !IsIntegerVFPType(src_type));
 
     int sz, opc2, op;
 
@@ -2668,7 +2771,7 @@
       sz = IsDoubleVFPType(src_type) ? 0x1 : 0x0;
       op = mode;
     } else {
-      ASSERT(IsIntegerVFPType(src_type));
+      DCHECK(IsIntegerVFPType(src_type));
       opc2 = 0x0;
       sz = IsDoubleVFPType(dst_type) ? 0x1 : 0x0;
       op = IsSignedVFPType(src_type) ? 0x1 : 0x0;
@@ -2750,8 +2853,8 @@
   // Instruction details available in ARM DDI 0406C.b, A8-874.
   // cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 1010(19-16) | Vd(15-12) |
   // 101(11-9) | sf=1(8) | sx=1(7) | 1(6) | i(5) | 0(4) | imm4(3-0)
-  ASSERT(fraction_bits > 0 && fraction_bits <= 32);
-  ASSERT(CpuFeatures::IsSupported(VFP3));
+  DCHECK(fraction_bits > 0 && fraction_bits <= 32);
+  DCHECK(CpuFeatures::IsSupported(VFP3));
   int vd, d;
   dst.split_code(&vd, &d);
   int imm5 = 32 - fraction_bits;
@@ -2932,7 +3035,7 @@
   // Instruction details available in ARM DDI 0406C.b, A8-864.
   // cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0101(19-16) |
   // Vd(15-12) | 101(11-9) | sz=1(8) | E=0(7) | 1(6) | 0(5) | 0(4) | 0000(3-0)
-  ASSERT(src2 == 0.0);
+  DCHECK(src2 == 0.0);
   int vd, d;
   src1.split_code(&vd, &d);
   emit(cond | 0x1D*B23 | d*B22 | 0x3*B20 | 0x5*B16 | vd*B12 | 0x5*B9 | B8 | B6);
@@ -2980,7 +3083,7 @@
   // Instruction details available in ARM DDI 0406C.b, A8.8.320.
   // 1111(31-28) | 01000(27-23) | D(22) | 10(21-20) | Rn(19-16) |
   // Vd(15-12) | type(11-8) | size(7-6) | align(5-4) | Rm(3-0)
-  ASSERT(CpuFeatures::IsSupported(NEON));
+  DCHECK(CpuFeatures::IsSupported(NEON));
   int vd, d;
   dst.base().split_code(&vd, &d);
   emit(0xFU*B28 | 4*B24 | d*B22 | 2*B20 | src.rn().code()*B16 | vd*B12 |
@@ -2994,7 +3097,7 @@
   // Instruction details available in ARM DDI 0406C.b, A8.8.404.
   // 1111(31-28) | 01000(27-23) | D(22) | 00(21-20) | Rn(19-16) |
   // Vd(15-12) | type(11-8) | size(7-6) | align(5-4) | Rm(3-0)
-  ASSERT(CpuFeatures::IsSupported(NEON));
+  DCHECK(CpuFeatures::IsSupported(NEON));
   int vd, d;
   src.base().split_code(&vd, &d);
   emit(0xFU*B28 | 4*B24 | d*B22 | dst.rn().code()*B16 | vd*B12 | src.type()*B8 |
@@ -3006,7 +3109,7 @@
   // Instruction details available in ARM DDI 0406C.b, A8.8.346.
   // 1111(31-28) | 001(27-25) | U(24) | 1(23) | D(22) | imm3(21-19) |
   // 000(18-16) | Vd(15-12) | 101000(11-6) | M(5) | 1(4) | Vm(3-0)
-  ASSERT(CpuFeatures::IsSupported(NEON));
+  DCHECK(CpuFeatures::IsSupported(NEON));
   int vd, d;
   dst.split_code(&vd, &d);
   int vm, m;
@@ -3023,7 +3126,7 @@
   // MOV Rx, Rx as NOP and it performs better even in newer CPUs.
   // We therefore use MOV Rx, Rx, even on newer CPUs, and use Rx to encode
   // a type.
-  ASSERT(0 <= type && type <= 14);  // mov pc, pc isn't a nop.
+  DCHECK(0 <= type && type <= 14);  // mov pc, pc isn't a nop.
   emit(al | 13*B21 | type*B12 | type);
 }
 
@@ -3032,7 +3135,7 @@
   instr &= ~(((kNumberOfConditions - 1) << 28) |  // Mask off conditions
              ((kNumRegisters-1)*B12) |            // mask out register
              EncodeMovwImmediate(0xFFFF));        // mask out immediate value
-  return instr == 0x34*B20;
+  return instr == kMovtPattern;
 }
 
 
@@ -3040,17 +3143,63 @@
   instr &= ~(((kNumberOfConditions - 1) << 28) |  // Mask off conditions
              ((kNumRegisters-1)*B12) |            // mask out destination
              EncodeMovwImmediate(0xFFFF));        // mask out immediate value
-  return instr == 0x30*B20;
+  return instr == kMovwPattern;
+}
+
+
+Instr Assembler::GetMovTPattern() { return kMovtPattern; }
+
+
+Instr Assembler::GetMovWPattern() { return kMovwPattern; }
+
+
+Instr Assembler::EncodeMovwImmediate(uint32_t immediate) {
+  DCHECK(immediate < 0x10000);
+  return ((immediate & 0xf000) << 4) | (immediate & 0xfff);
+}
+
+
+Instr Assembler::PatchMovwImmediate(Instr instruction, uint32_t immediate) {
+  instruction &= ~EncodeMovwImmediate(0xffff);
+  return instruction | EncodeMovwImmediate(immediate);
+}
+
+
+int Assembler::DecodeShiftImm(Instr instr) {
+  int rotate = Instruction::RotateValue(instr) * 2;
+  int immed8 = Instruction::Immed8Value(instr);
+  return (immed8 >> rotate) | (immed8 << (32 - rotate));
+}
+
+
+Instr Assembler::PatchShiftImm(Instr instr, int immed) {
+  uint32_t rotate_imm = 0;
+  uint32_t immed_8 = 0;
+  bool immed_fits = fits_shifter(immed, &rotate_imm, &immed_8, NULL);
+  DCHECK(immed_fits);
+  USE(immed_fits);
+  return (instr & ~kOff12Mask) | (rotate_imm << 8) | immed_8;
 }
 
 
 bool Assembler::IsNop(Instr instr, int type) {
-  ASSERT(0 <= type && type <= 14);  // mov pc, pc isn't a nop.
+  DCHECK(0 <= type && type <= 14);  // mov pc, pc isn't a nop.
   // Check for mov rx, rx where x = type.
   return instr == (al | 13*B21 | type*B12 | type);
 }
 
 
+bool Assembler::IsMovImmed(Instr instr) {
+  return (instr & kMovImmedMask) == kMovImmedPattern;
+}
+
+
+bool Assembler::IsOrrImmed(Instr instr) {
+  return (instr & kOrrImmedMask) == kOrrImmedPattern;
+}
+
+
+// static
 bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
   uint32_t dummy1;
   uint32_t dummy2;
@@ -3098,9 +3247,7 @@
 
   // Compute new buffer size.
   CodeDesc desc;  // the new buffer
-  if (buffer_size_ < 4*KB) {
-    desc.buffer_size = 4*KB;
-  } else if (buffer_size_ < 1*MB) {
+  if (buffer_size_ < 1 * MB) {
     desc.buffer_size = 2*buffer_size_;
   } else {
     desc.buffer_size = buffer_size_ + 1*MB;
@@ -3135,7 +3282,7 @@
   // Relocate pending relocation entries.
   for (int i = 0; i < num_pending_32_bit_reloc_info_; i++) {
     RelocInfo& rinfo = pending_32_bit_reloc_info_[i];
-    ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
+    DCHECK(rinfo.rmode() != RelocInfo::COMMENT &&
            rinfo.rmode() != RelocInfo::POSITION);
     if (rinfo.rmode() != RelocInfo::JS_RETURN) {
       rinfo.set_pc(rinfo.pc() + pc_delta);
@@ -3143,7 +3290,7 @@
   }
   for (int i = 0; i < num_pending_64_bit_reloc_info_; i++) {
     RelocInfo& rinfo = pending_64_bit_reloc_info_[i];
-    ASSERT(rinfo.rmode() == RelocInfo::NONE64);
+    DCHECK(rinfo.rmode() == RelocInfo::NONE64);
     rinfo.set_pc(rinfo.pc() + pc_delta);
   }
   constant_pool_builder_.Relocate(pc_delta);
@@ -3154,8 +3301,8 @@
   // No relocation info should be pending while using db. db is used
   // to write pure data with no pointers and the constant pool should
   // be emitted before using db.
-  ASSERT(num_pending_32_bit_reloc_info_ == 0);
-  ASSERT(num_pending_64_bit_reloc_info_ == 0);
+  DCHECK(num_pending_32_bit_reloc_info_ == 0);
+  DCHECK(num_pending_64_bit_reloc_info_ == 0);
   CheckBuffer();
   *reinterpret_cast<uint8_t*>(pc_) = data;
   pc_ += sizeof(uint8_t);
@@ -3166,8 +3313,8 @@
   // No relocation info should be pending while using dd. dd is used
   // to write pure data with no pointers and the constant pool should
   // be emitted before using dd.
-  ASSERT(num_pending_32_bit_reloc_info_ == 0);
-  ASSERT(num_pending_64_bit_reloc_info_ == 0);
+  DCHECK(num_pending_32_bit_reloc_info_ == 0);
+  DCHECK(num_pending_64_bit_reloc_info_ == 0);
   CheckBuffer();
   *reinterpret_cast<uint32_t*>(pc_) = data;
   pc_ += sizeof(uint32_t);
@@ -3195,7 +3342,7 @@
         !serializer_enabled() && !emit_debug_code()) {
       return;
     }
-    ASSERT(buffer_space() >= kMaxRelocSize);  // too late to grow buffer here
+    DCHECK(buffer_space() >= kMaxRelocSize);  // too late to grow buffer here
     if (rinfo.rmode() == RelocInfo::CODE_TARGET_WITH_ID) {
       RelocInfo reloc_info_with_ast_id(rinfo.pc(),
                                        rinfo.rmode(),
@@ -3210,18 +3357,19 @@
 }
 
 
-void Assembler::ConstantPoolAddEntry(const RelocInfo& rinfo) {
+ConstantPoolArray::LayoutSection Assembler::ConstantPoolAddEntry(
+    const RelocInfo& rinfo) {
   if (FLAG_enable_ool_constant_pool) {
-    constant_pool_builder_.AddEntry(this, rinfo);
+    return constant_pool_builder_.AddEntry(this, rinfo);
   } else {
     if (rinfo.rmode() == RelocInfo::NONE64) {
-      ASSERT(num_pending_64_bit_reloc_info_ < kMaxNumPending64RelocInfo);
+      DCHECK(num_pending_64_bit_reloc_info_ < kMaxNumPending64RelocInfo);
       if (num_pending_64_bit_reloc_info_ == 0) {
         first_const_pool_64_use_ = pc_offset();
       }
       pending_64_bit_reloc_info_[num_pending_64_bit_reloc_info_++] = rinfo;
     } else {
-      ASSERT(num_pending_32_bit_reloc_info_ < kMaxNumPending32RelocInfo);
+      DCHECK(num_pending_32_bit_reloc_info_ < kMaxNumPending32RelocInfo);
       if (num_pending_32_bit_reloc_info_ == 0) {
         first_const_pool_32_use_ = pc_offset();
       }
@@ -3230,6 +3378,7 @@
     // Make sure the constant pool is not emitted in place of the next
     // instruction for which we just recorded relocation info.
     BlockConstPoolFor(1);
+    return ConstantPoolArray::SMALL_SECTION;
   }
 }
 
@@ -3237,8 +3386,8 @@
 void Assembler::BlockConstPoolFor(int instructions) {
   if (FLAG_enable_ool_constant_pool) {
     // Should be a no-op if using an out-of-line constant pool.
-    ASSERT(num_pending_32_bit_reloc_info_ == 0);
-    ASSERT(num_pending_64_bit_reloc_info_ == 0);
+    DCHECK(num_pending_32_bit_reloc_info_ == 0);
+    DCHECK(num_pending_64_bit_reloc_info_ == 0);
     return;
   }
 
@@ -3247,10 +3396,10 @@
     // Max pool start (if we need a jump and an alignment).
 #ifdef DEBUG
     int start = pc_limit + kInstrSize + 2 * kPointerSize;
-    ASSERT((num_pending_32_bit_reloc_info_ == 0) ||
+    DCHECK((num_pending_32_bit_reloc_info_ == 0) ||
            (start - first_const_pool_32_use_ +
             num_pending_64_bit_reloc_info_ * kDoubleSize < kMaxDistToIntPool));
-    ASSERT((num_pending_64_bit_reloc_info_ == 0) ||
+    DCHECK((num_pending_64_bit_reloc_info_ == 0) ||
            (start - first_const_pool_64_use_ < kMaxDistToFPPool));
 #endif
     no_const_pool_before_ = pc_limit;
@@ -3265,8 +3414,8 @@
 void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
   if (FLAG_enable_ool_constant_pool) {
     // Should be a no-op if using an out-of-line constant pool.
-    ASSERT(num_pending_32_bit_reloc_info_ == 0);
-    ASSERT(num_pending_64_bit_reloc_info_ == 0);
+    DCHECK(num_pending_32_bit_reloc_info_ == 0);
+    DCHECK(num_pending_64_bit_reloc_info_ == 0);
     return;
   }
 
@@ -3275,7 +3424,7 @@
   // BlockConstPoolScope.
   if (is_const_pool_blocked()) {
     // Something is wrong if emission is forced and blocked at the same time.
-    ASSERT(!force_emit);
+    DCHECK(!force_emit);
     return;
   }
 
@@ -3314,7 +3463,7 @@
   //  * the instruction doesn't require a jump after itself to jump over the
   //    constant pool, and we're getting close to running out of range.
   if (!force_emit) {
-    ASSERT((first_const_pool_32_use_ >= 0) || (first_const_pool_64_use_ >= 0));
+    DCHECK((first_const_pool_32_use_ >= 0) || (first_const_pool_64_use_ >= 0));
     bool need_emit = false;
     if (has_fp_values) {
       int dist64 = pc_offset() +
@@ -3364,15 +3513,15 @@
     for (int i = 0; i < num_pending_64_bit_reloc_info_; i++) {
       RelocInfo& rinfo = pending_64_bit_reloc_info_[i];
 
-      ASSERT(!((uintptr_t)pc_ & 0x7));  // Check 64-bit alignment.
+      DCHECK(!((uintptr_t)pc_ & 0x7));  // Check 64-bit alignment.
 
       Instr instr = instr_at(rinfo.pc());
       // Instruction to patch must be 'vldr rd, [pc, #offset]' with offset == 0.
-      ASSERT((IsVldrDPcImmediateOffset(instr) &&
+      DCHECK((IsVldrDPcImmediateOffset(instr) &&
               GetVldrDRegisterImmediateOffset(instr) == 0));
 
       int delta = pc_ - rinfo.pc() - kPcLoadDelta;
-      ASSERT(is_uint10(delta));
+      DCHECK(is_uint10(delta));
 
       bool found = false;
       uint64_t value = rinfo.raw_data64();
@@ -3380,9 +3529,9 @@
         RelocInfo& rinfo2 = pending_64_bit_reloc_info_[j];
         if (value == rinfo2.raw_data64()) {
           found = true;
-          ASSERT(rinfo2.rmode() == RelocInfo::NONE64);
+          DCHECK(rinfo2.rmode() == RelocInfo::NONE64);
           Instr instr2 = instr_at(rinfo2.pc());
-          ASSERT(IsVldrDPcImmediateOffset(instr2));
+          DCHECK(IsVldrDPcImmediateOffset(instr2));
           delta = GetVldrDRegisterImmediateOffset(instr2);
           delta += rinfo2.pc() - rinfo.pc();
           break;
@@ -3401,7 +3550,7 @@
     // Emit 32-bit constant pool entries.
     for (int i = 0; i < num_pending_32_bit_reloc_info_; i++) {
       RelocInfo& rinfo = pending_32_bit_reloc_info_[i];
-      ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
+      DCHECK(rinfo.rmode() != RelocInfo::COMMENT &&
              rinfo.rmode() != RelocInfo::POSITION &&
              rinfo.rmode() != RelocInfo::STATEMENT_POSITION &&
              rinfo.rmode() != RelocInfo::CONST_POOL &&
@@ -3410,12 +3559,12 @@
       Instr instr = instr_at(rinfo.pc());
 
       // 64-bit loads shouldn't get here.
-      ASSERT(!IsVldrDPcImmediateOffset(instr));
+      DCHECK(!IsVldrDPcImmediateOffset(instr));
 
       if (IsLdrPcImmediateOffset(instr) &&
           GetLdrRegisterImmediateOffset(instr) == 0) {
         int delta = pc_ - rinfo.pc() - kPcLoadDelta;
-        ASSERT(is_uint12(delta));
+        DCHECK(is_uint12(delta));
         // 0 is the smallest delta:
         //   ldr rd, [pc, #0]
         //   constant pool marker
@@ -3445,7 +3594,7 @@
           emit(rinfo.data());
         }
       } else {
-        ASSERT(IsMovW(instr));
+        DCHECK(IsMovW(instr));
       }
     }
 
@@ -3481,12 +3630,7 @@
 
 
 ConstantPoolBuilder::ConstantPoolBuilder()
-    : entries_(),
-      merged_indexes_(),
-      count_of_64bit_(0),
-      count_of_code_ptr_(0),
-      count_of_heap_ptr_(0),
-      count_of_32bit_(0) { }
+    : entries_(), current_section_(ConstantPoolArray::SMALL_SECTION) {}
 
 
 bool ConstantPoolBuilder::IsEmpty() {
@@ -3494,87 +3638,70 @@
 }
 
 
-bool ConstantPoolBuilder::Is64BitEntry(RelocInfo::Mode rmode) {
-  return rmode == RelocInfo::NONE64;
+ConstantPoolArray::Type ConstantPoolBuilder::GetConstantPoolType(
+    RelocInfo::Mode rmode) {
+  if (rmode == RelocInfo::NONE64) {
+    return ConstantPoolArray::INT64;
+  } else if (!RelocInfo::IsGCRelocMode(rmode)) {
+    return ConstantPoolArray::INT32;
+  } else if (RelocInfo::IsCodeTarget(rmode)) {
+    return ConstantPoolArray::CODE_PTR;
+  } else {
+    DCHECK(RelocInfo::IsGCRelocMode(rmode) && !RelocInfo::IsCodeTarget(rmode));
+    return ConstantPoolArray::HEAP_PTR;
+  }
 }
 
 
-bool ConstantPoolBuilder::Is32BitEntry(RelocInfo::Mode rmode) {
-  return !RelocInfo::IsGCRelocMode(rmode) && rmode != RelocInfo::NONE64;
-}
-
-
-bool ConstantPoolBuilder::IsCodePtrEntry(RelocInfo::Mode rmode) {
-  return RelocInfo::IsCodeTarget(rmode);
-}
-
-
-bool ConstantPoolBuilder::IsHeapPtrEntry(RelocInfo::Mode rmode) {
-  return RelocInfo::IsGCRelocMode(rmode) && !RelocInfo::IsCodeTarget(rmode);
-}
-
-
-void ConstantPoolBuilder::AddEntry(Assembler* assm,
-                                   const RelocInfo& rinfo) {
+ConstantPoolArray::LayoutSection ConstantPoolBuilder::AddEntry(
+    Assembler* assm, const RelocInfo& rinfo) {
   RelocInfo::Mode rmode = rinfo.rmode();
-  ASSERT(rmode != RelocInfo::COMMENT &&
+  DCHECK(rmode != RelocInfo::COMMENT &&
          rmode != RelocInfo::POSITION &&
          rmode != RelocInfo::STATEMENT_POSITION &&
          rmode != RelocInfo::CONST_POOL);
 
-
   // Try to merge entries which won't be patched.
   int merged_index = -1;
+  ConstantPoolArray::LayoutSection entry_section = current_section_;
   if (RelocInfo::IsNone(rmode) ||
       (!assm->serializer_enabled() && (rmode >= RelocInfo::CELL))) {
     size_t i;
-    std::vector<RelocInfo>::const_iterator it;
+    std::vector<ConstantPoolEntry>::const_iterator it;
     for (it = entries_.begin(), i = 0; it != entries_.end(); it++, i++) {
-      if (RelocInfo::IsEqual(rinfo, *it)) {
+      if (RelocInfo::IsEqual(rinfo, it->rinfo_)) {
+        // Merge with found entry.
         merged_index = i;
+        entry_section = entries_[i].section_;
         break;
       }
     }
   }
-
-  entries_.push_back(rinfo);
-  merged_indexes_.push_back(merged_index);
+  DCHECK(entry_section <= current_section_);
+  entries_.push_back(ConstantPoolEntry(rinfo, entry_section, merged_index));
 
   if (merged_index == -1) {
     // Not merged, so update the appropriate count.
-    if (Is64BitEntry(rmode)) {
-      count_of_64bit_++;
-    } else if (Is32BitEntry(rmode)) {
-      count_of_32bit_++;
-    } else if (IsCodePtrEntry(rmode)) {
-      count_of_code_ptr_++;
-    } else {
-      ASSERT(IsHeapPtrEntry(rmode));
-      count_of_heap_ptr_++;
-    }
+    number_of_entries_[entry_section].increment(GetConstantPoolType(rmode));
   }
 
-  // Check if we still have room for another entry given Arm's ldr and vldr
-  // immediate offset range.
-  // TODO(rmcilroy): Avoid creating a new object here when we support
-  //                 extended constant pools.
-  ConstantPoolArray::NumberOfEntries total(count_of_64bit_,
-                                           count_of_code_ptr_,
-                                           count_of_heap_ptr_,
-                                           count_of_32bit_);
-  ConstantPoolArray::NumberOfEntries int64_counts(count_of_64bit_, 0, 0, 0);
-  if (!(is_uint12(ConstantPoolArray::SizeFor(total)) &&
-        is_uint10(ConstantPoolArray::SizeFor(int64_counts)))) {
-    assm->set_constant_pool_full();
+  // Check if we still have room for another entry in the small section
+  // given Arm's ldr and vldr immediate offset range.
+  if (current_section_ == ConstantPoolArray::SMALL_SECTION &&
+      !(is_uint12(ConstantPoolArray::SizeFor(*small_entries())) &&
+        is_uint10(ConstantPoolArray::MaxInt64Offset(
+            small_entries()->count_of(ConstantPoolArray::INT64))))) {
+    current_section_ = ConstantPoolArray::EXTENDED_SECTION;
   }
+  return entry_section;
 }
 
 
 void ConstantPoolBuilder::Relocate(int pc_delta) {
-  for (std::vector<RelocInfo>::iterator rinfo = entries_.begin();
-       rinfo != entries_.end(); rinfo++) {
-    ASSERT(rinfo->rmode() != RelocInfo::JS_RETURN);
-    rinfo->set_pc(rinfo->pc() + pc_delta);
+  for (std::vector<ConstantPoolEntry>::iterator entry = entries_.begin();
+       entry != entries_.end(); entry++) {
+    DCHECK(entry->rinfo_.rmode() != RelocInfo::JS_RETURN);
+    entry->rinfo_.set_pc(entry->rinfo_.pc() + pc_delta);
   }
 }
 
@@ -3582,89 +3709,133 @@
 Handle<ConstantPoolArray> ConstantPoolBuilder::New(Isolate* isolate) {
   if (IsEmpty()) {
     return isolate->factory()->empty_constant_pool_array();
+  } else if (extended_entries()->is_empty()) {
+    return isolate->factory()->NewConstantPoolArray(*small_entries());
   } else {
-    ConstantPoolArray::NumberOfEntries small(count_of_64bit_,
-                                             count_of_code_ptr_,
-                                             count_of_heap_ptr_,
-                                             count_of_32bit_);
-    return isolate->factory()->NewConstantPoolArray(small);
+    DCHECK(current_section_ == ConstantPoolArray::EXTENDED_SECTION);
+    return isolate->factory()->NewExtendedConstantPoolArray(
+        *small_entries(), *extended_entries());
   }
 }
 
 
 void ConstantPoolBuilder::Populate(Assembler* assm,
                                    ConstantPoolArray* constant_pool) {
-  ASSERT(count_of_64bit_ == constant_pool->number_of_entries(
-             ConstantPoolArray::INT64, ConstantPoolArray::SMALL_SECTION));
-  ASSERT(count_of_code_ptr_ == constant_pool->number_of_entries(
-             ConstantPoolArray::CODE_PTR, ConstantPoolArray::SMALL_SECTION));
-  ASSERT(count_of_heap_ptr_ == constant_pool->number_of_entries(
-             ConstantPoolArray::HEAP_PTR, ConstantPoolArray::SMALL_SECTION));
-  ASSERT(count_of_32bit_ == constant_pool->number_of_entries(
-             ConstantPoolArray::INT32, ConstantPoolArray::SMALL_SECTION));
-  ASSERT(entries_.size() == merged_indexes_.size());
+  DCHECK_EQ(extended_entries()->is_empty(),
+            !constant_pool->is_extended_layout());
+  DCHECK(small_entries()->equals(ConstantPoolArray::NumberOfEntries(
+      constant_pool, ConstantPoolArray::SMALL_SECTION)));
+  if (constant_pool->is_extended_layout()) {
+    DCHECK(extended_entries()->equals(ConstantPoolArray::NumberOfEntries(
+        constant_pool, ConstantPoolArray::EXTENDED_SECTION)));
+  }
 
-  int index_64bit = 0;
-  int index_code_ptr = count_of_64bit_;
-  int index_heap_ptr = count_of_64bit_ + count_of_code_ptr_;
-  int index_32bit = count_of_64bit_ + count_of_code_ptr_ + count_of_heap_ptr_;
-
-  size_t i;
-  std::vector<RelocInfo>::const_iterator rinfo;
-  for (rinfo = entries_.begin(), i = 0; rinfo != entries_.end(); rinfo++, i++) {
-    RelocInfo::Mode rmode = rinfo->rmode();
-
-    // Update constant pool if necessary and get the entry's offset.
-    int offset;
-    if (merged_indexes_[i] == -1) {
-      if (Is64BitEntry(rmode)) {
-        offset = constant_pool->OffsetOfElementAt(index_64bit) - kHeapObjectTag;
-        constant_pool->set(index_64bit++, rinfo->data64());
-      } else if (Is32BitEntry(rmode)) {
-        offset = constant_pool->OffsetOfElementAt(index_32bit) - kHeapObjectTag;
-        constant_pool->set(index_32bit++, static_cast<int32_t>(rinfo->data()));
-      } else if (IsCodePtrEntry(rmode)) {
-        offset = constant_pool->OffsetOfElementAt(index_code_ptr) -
-            kHeapObjectTag;
-        constant_pool->set(index_code_ptr++,
-                           reinterpret_cast<Address>(rinfo->data()));
-      } else {
-        ASSERT(IsHeapPtrEntry(rmode));
-        offset = constant_pool->OffsetOfElementAt(index_heap_ptr) -
-            kHeapObjectTag;
-        constant_pool->set(index_heap_ptr++,
-                           reinterpret_cast<Object *>(rinfo->data()));
+  // Set up initial offsets.
+  int offsets[ConstantPoolArray::NUMBER_OF_LAYOUT_SECTIONS]
+             [ConstantPoolArray::NUMBER_OF_TYPES];
+  for (int section = 0; section <= constant_pool->final_section(); section++) {
+    int section_start = (section == ConstantPoolArray::EXTENDED_SECTION)
+                            ? small_entries()->total_count()
+                            : 0;
+    for (int i = 0; i < ConstantPoolArray::NUMBER_OF_TYPES; i++) {
+      ConstantPoolArray::Type type = static_cast<ConstantPoolArray::Type>(i);
+      if (number_of_entries_[section].count_of(type) != 0) {
+        offsets[section][type] = constant_pool->OffsetOfElementAt(
+            number_of_entries_[section].base_of(type) + section_start);
       }
-      merged_indexes_[i] = offset;  // Stash offset for merged entries.
-    } else {
-      size_t merged_index = static_cast<size_t>(merged_indexes_[i]);
-      ASSERT(merged_index < merged_indexes_.size() && merged_index < i);
-      offset = merged_indexes_[merged_index];
-    }
-
-    // Patch vldr/ldr instruction with correct offset.
-    Instr instr = assm->instr_at(rinfo->pc());
-    if (Is64BitEntry(rmode)) {
-      // Instruction to patch must be 'vldr rd, [pp, #0]'.
-      ASSERT((Assembler::IsVldrDPpImmediateOffset(instr) &&
-              Assembler::GetVldrDRegisterImmediateOffset(instr) == 0));
-      ASSERT(is_uint10(offset));
-      assm->instr_at_put(rinfo->pc(),
-          Assembler::SetVldrDRegisterImmediateOffset(instr, offset));
-    } else {
-      // Instruction to patch must be 'ldr rd, [pp, #0]'.
-      ASSERT((Assembler::IsLdrPpImmediateOffset(instr) &&
-              Assembler::GetLdrRegisterImmediateOffset(instr) == 0));
-      ASSERT(is_uint12(offset));
-      assm->instr_at_put(rinfo->pc(),
-          Assembler::SetLdrRegisterImmediateOffset(instr, offset));
     }
   }
 
-  ASSERT((index_64bit == count_of_64bit_) &&
-         (index_code_ptr == (index_64bit + count_of_code_ptr_)) &&
-         (index_heap_ptr == (index_code_ptr + count_of_heap_ptr_)) &&
-         (index_32bit == (index_heap_ptr + count_of_32bit_)));
+  for (std::vector<ConstantPoolEntry>::iterator entry = entries_.begin();
+       entry != entries_.end(); entry++) {
+    RelocInfo rinfo = entry->rinfo_;
+    RelocInfo::Mode rmode = entry->rinfo_.rmode();
+    ConstantPoolArray::Type type = GetConstantPoolType(rmode);
+
+    // Update constant pool if necessary and get the entry's offset.
+    int offset;
+    if (entry->merged_index_ == -1) {
+      offset = offsets[entry->section_][type];
+      offsets[entry->section_][type] += ConstantPoolArray::entry_size(type);
+      if (type == ConstantPoolArray::INT64) {
+        constant_pool->set_at_offset(offset, rinfo.data64());
+      } else if (type == ConstantPoolArray::INT32) {
+        constant_pool->set_at_offset(offset,
+                                     static_cast<int32_t>(rinfo.data()));
+      } else if (type == ConstantPoolArray::CODE_PTR) {
+        constant_pool->set_at_offset(offset,
+                                     reinterpret_cast<Address>(rinfo.data()));
+      } else {
+        DCHECK(type == ConstantPoolArray::HEAP_PTR);
+        constant_pool->set_at_offset(offset,
+                                     reinterpret_cast<Object*>(rinfo.data()));
+      }
+      offset -= kHeapObjectTag;
+      entry->merged_index_ = offset;  // Stash offset for merged entries.
+    } else {
+      DCHECK(entry->merged_index_ < (entry - entries_.begin()));
+      offset = entries_[entry->merged_index_].merged_index_;
+    }
+
+    // Patch vldr/ldr instruction with correct offset.
+    Instr instr = assm->instr_at(rinfo.pc());
+    if (entry->section_ == ConstantPoolArray::EXTENDED_SECTION) {
+      if (CpuFeatures::IsSupported(ARMv7)) {
+        // Instructions to patch must be 'movw rd, [#0]' and 'movt rd, [#0].
+        Instr next_instr = assm->instr_at(rinfo.pc() + Assembler::kInstrSize);
+        DCHECK((Assembler::IsMovW(instr) &&
+                Instruction::ImmedMovwMovtValue(instr) == 0));
+        DCHECK((Assembler::IsMovT(next_instr) &&
+                Instruction::ImmedMovwMovtValue(next_instr) == 0));
+        assm->instr_at_put(
+            rinfo.pc(), Assembler::PatchMovwImmediate(instr, offset & 0xffff));
+        assm->instr_at_put(
+            rinfo.pc() + Assembler::kInstrSize,
+            Assembler::PatchMovwImmediate(next_instr, offset >> 16));
+      } else {
+        // Instructions to patch must be 'mov rd, [#0]' and 'orr rd, rd, [#0].
+        Instr instr_2 = assm->instr_at(rinfo.pc() + Assembler::kInstrSize);
+        Instr instr_3 = assm->instr_at(rinfo.pc() + 2 * Assembler::kInstrSize);
+        Instr instr_4 = assm->instr_at(rinfo.pc() + 3 * Assembler::kInstrSize);
+        DCHECK((Assembler::IsMovImmed(instr) &&
+                Instruction::Immed8Value(instr) == 0));
+        DCHECK((Assembler::IsOrrImmed(instr_2) &&
+                Instruction::Immed8Value(instr_2) == 0) &&
+               Assembler::GetRn(instr_2).is(Assembler::GetRd(instr_2)));
+        DCHECK((Assembler::IsOrrImmed(instr_3) &&
+                Instruction::Immed8Value(instr_3) == 0) &&
+               Assembler::GetRn(instr_3).is(Assembler::GetRd(instr_3)));
+        DCHECK((Assembler::IsOrrImmed(instr_4) &&
+                Instruction::Immed8Value(instr_4) == 0) &&
+               Assembler::GetRn(instr_4).is(Assembler::GetRd(instr_4)));
+        assm->instr_at_put(
+            rinfo.pc(), Assembler::PatchShiftImm(instr, (offset & kImm8Mask)));
+        assm->instr_at_put(
+            rinfo.pc() + Assembler::kInstrSize,
+            Assembler::PatchShiftImm(instr_2, (offset & (kImm8Mask << 8))));
+        assm->instr_at_put(
+            rinfo.pc() + 2 * Assembler::kInstrSize,
+            Assembler::PatchShiftImm(instr_3, (offset & (kImm8Mask << 16))));
+        assm->instr_at_put(
+            rinfo.pc() + 3 * Assembler::kInstrSize,
+            Assembler::PatchShiftImm(instr_4, (offset & (kImm8Mask << 24))));
+      }
+    } else if (type == ConstantPoolArray::INT64) {
+      // Instruction to patch must be 'vldr rd, [pp, #0]'.
+      DCHECK((Assembler::IsVldrDPpImmediateOffset(instr) &&
+              Assembler::GetVldrDRegisterImmediateOffset(instr) == 0));
+      DCHECK(is_uint10(offset));
+      assm->instr_at_put(rinfo.pc(), Assembler::SetVldrDRegisterImmediateOffset(
+                                         instr, offset));
+    } else {
+      // Instruction to patch must be 'ldr rd, [pp, #0]'.
+      DCHECK((Assembler::IsLdrPpImmediateOffset(instr) &&
+              Assembler::GetLdrRegisterImmediateOffset(instr) == 0));
+      DCHECK(is_uint12(offset));
+      assm->instr_at_put(
+          rinfo.pc(), Assembler::SetLdrRegisterImmediateOffset(instr, offset));
+    }
+  }
 }
 
 
diff --git a/src/arm/assembler-arm.h b/src/arm/assembler-arm.h
index 812f58f..108d5cb 100644
--- a/src/arm/assembler-arm.h
+++ b/src/arm/assembler-arm.h
@@ -43,8 +43,8 @@
 #include <stdio.h>
 #include <vector>
 
-#include "src/assembler.h"
 #include "src/arm/constants-arm.h"
+#include "src/assembler.h"
 #include "src/serialize.h"
 
 namespace v8 {
@@ -100,17 +100,17 @@
   inline static int NumAllocatableRegisters();
 
   static int ToAllocationIndex(Register reg) {
-    ASSERT(reg.code() < kMaxNumAllocatableRegisters);
+    DCHECK(reg.code() < kMaxNumAllocatableRegisters);
     return reg.code();
   }
 
   static Register FromAllocationIndex(int index) {
-    ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
+    DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
     return from_code(index);
   }
 
   static const char* AllocationIndexToString(int index) {
-    ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
+    DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
     const char* const names[] = {
       "r0",
       "r1",
@@ -136,17 +136,17 @@
   bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
   bool is(Register reg) const { return code_ == reg.code_; }
   int code() const {
-    ASSERT(is_valid());
+    DCHECK(is_valid());
     return code_;
   }
   int bit() const {
-    ASSERT(is_valid());
+    DCHECK(is_valid());
     return 1 << code_;
   }
 
   void set_code(int code) {
     code_ = code;
-    ASSERT(is_valid());
+    DCHECK(is_valid());
   }
 
   // Unfortunately we can't make this private in a struct.
@@ -182,15 +182,15 @@
   bool is_valid() const { return 0 <= code_ && code_ < 32; }
   bool is(SwVfpRegister reg) const { return code_ == reg.code_; }
   int code() const {
-    ASSERT(is_valid());
+    DCHECK(is_valid());
     return code_;
   }
   int bit() const {
-    ASSERT(is_valid());
+    DCHECK(is_valid());
     return 1 << code_;
   }
   void split_code(int* vm, int* m) const {
-    ASSERT(is_valid());
+    DCHECK(is_valid());
     *m = code_ & 0x1;
     *vm = code_ >> 1;
   }
@@ -232,15 +232,15 @@
   }
   bool is(DwVfpRegister reg) const { return code_ == reg.code_; }
   int code() const {
-    ASSERT(is_valid());
+    DCHECK(is_valid());
     return code_;
   }
   int bit() const {
-    ASSERT(is_valid());
+    DCHECK(is_valid());
     return 1 << code_;
   }
   void split_code(int* vm, int* m) const {
-    ASSERT(is_valid());
+    DCHECK(is_valid());
     *m = (code_ & 0x10) >> 4;
     *vm = code_ & 0x0F;
   }
@@ -271,21 +271,21 @@
   bool is(DwVfpRegister reg) const { return code_ == reg.code_; }
   bool is(LowDwVfpRegister reg) const { return code_ == reg.code_; }
   int code() const {
-    ASSERT(is_valid());
+    DCHECK(is_valid());
     return code_;
   }
   SwVfpRegister low() const {
     SwVfpRegister reg;
     reg.code_ = code_ * 2;
 
-    ASSERT(reg.is_valid());
+    DCHECK(reg.is_valid());
     return reg;
   }
   SwVfpRegister high() const {
     SwVfpRegister reg;
     reg.code_ = (code_ * 2) + 1;
 
-    ASSERT(reg.is_valid());
+    DCHECK(reg.is_valid());
     return reg;
   }
 
@@ -307,11 +307,11 @@
   }
   bool is(QwNeonRegister reg) const { return code_ == reg.code_; }
   int code() const {
-    ASSERT(is_valid());
+    DCHECK(is_valid());
     return code_;
   }
   void split_code(int* vm, int* m) const {
-    ASSERT(is_valid());
+    DCHECK(is_valid());
     int encoded_code = code_ << 1;
     *m = (encoded_code & 0x10) >> 4;
     *vm = encoded_code & 0x0F;
@@ -425,11 +425,11 @@
   bool is_valid() const { return 0 <= code_ && code_ < 16; }
   bool is(CRegister creg) const { return code_ == creg.code_; }
   int code() const {
-    ASSERT(is_valid());
+    DCHECK(is_valid());
     return code_;
   }
   int bit() const {
-    ASSERT(is_valid());
+    DCHECK(is_valid());
     return 1 << code_;
   }
 
@@ -518,17 +518,22 @@
   // Return true if this is a register operand.
   INLINE(bool is_reg() const);
 
-  // Return true if this operand fits in one instruction so that no
-  // 2-instruction solution with a load into the ip register is necessary. If
+  // Return the number of actual instructions required to implement the given
+  // instruction for this particular operand. This can be a single instruction,
+  // if no load into the ip register is necessary, or anything between 2 and 4
+  // instructions when we need to load from the constant pool (depending upon
+  // whether the constant pool entry is in the small or extended section). If
   // the instruction this operand is used for is a MOV or MVN instruction the
   // actual instruction to use is required for this calculation. For other
   // instructions instr is ignored.
-  bool is_single_instruction(const Assembler* assembler,
-                             Instr instr = 0) const;
+  //
+  // The value returned is only valid as long as no entries are added to the
+  // constant pool between this call and the actual instruction being emitted.
+  int instructions_required(const Assembler* assembler, Instr instr = 0) const;
   bool must_output_reloc_info(const Assembler* assembler) const;
 
   inline int32_t immediate() const {
-    ASSERT(!rm_.is_valid());
+    DCHECK(!rm_.is_valid());
     return imm32_;
   }
 
@@ -576,12 +581,12 @@
   }
 
   void set_offset(int32_t offset) {
-      ASSERT(rm_.is(no_reg));
+      DCHECK(rm_.is(no_reg));
       offset_ = offset;
   }
 
   uint32_t offset() const {
-      ASSERT(rm_.is(no_reg));
+      DCHECK(rm_.is(no_reg));
       return offset_;
   }
 
@@ -644,60 +649,49 @@
 // Class used to build a constant pool.
 class ConstantPoolBuilder BASE_EMBEDDED {
  public:
-  explicit ConstantPoolBuilder();
-  void AddEntry(Assembler* assm, const RelocInfo& rinfo);
+  ConstantPoolBuilder();
+  ConstantPoolArray::LayoutSection AddEntry(Assembler* assm,
+                                            const RelocInfo& rinfo);
   void Relocate(int pc_delta);
   bool IsEmpty();
   Handle<ConstantPoolArray> New(Isolate* isolate);
   void Populate(Assembler* assm, ConstantPoolArray* constant_pool);
 
-  inline int count_of_64bit() const { return count_of_64bit_; }
-  inline int count_of_code_ptr() const { return count_of_code_ptr_; }
-  inline int count_of_heap_ptr() const { return count_of_heap_ptr_; }
-  inline int count_of_32bit() const { return count_of_32bit_; }
+  inline ConstantPoolArray::LayoutSection current_section() const {
+    return current_section_;
+  }
+
+  inline ConstantPoolArray::NumberOfEntries* number_of_entries(
+      ConstantPoolArray::LayoutSection section) {
+    return &number_of_entries_[section];
+  }
+
+  inline ConstantPoolArray::NumberOfEntries* small_entries() {
+    return number_of_entries(ConstantPoolArray::SMALL_SECTION);
+  }
+
+  inline ConstantPoolArray::NumberOfEntries* extended_entries() {
+    return number_of_entries(ConstantPoolArray::EXTENDED_SECTION);
+  }
 
  private:
-  bool Is64BitEntry(RelocInfo::Mode rmode);
-  bool Is32BitEntry(RelocInfo::Mode rmode);
-  bool IsCodePtrEntry(RelocInfo::Mode rmode);
-  bool IsHeapPtrEntry(RelocInfo::Mode rmode);
+  struct ConstantPoolEntry {
+    ConstantPoolEntry(RelocInfo rinfo, ConstantPoolArray::LayoutSection section,
+                      int merged_index)
+        : rinfo_(rinfo), section_(section), merged_index_(merged_index) {}
 
-  // TODO(rmcilroy): This should ideally be a ZoneList, however that would mean
-  // RelocInfo would need to subclass ZoneObject which it currently doesn't.
-  std::vector<RelocInfo> entries_;
-  std::vector<int> merged_indexes_;
-  int count_of_64bit_;
-  int count_of_code_ptr_;
-  int count_of_heap_ptr_;
-  int count_of_32bit_;
+    RelocInfo rinfo_;
+    ConstantPoolArray::LayoutSection section_;
+    int merged_index_;
+  };
+
+  ConstantPoolArray::Type GetConstantPoolType(RelocInfo::Mode rmode);
+
+  std::vector<ConstantPoolEntry> entries_;
+  ConstantPoolArray::LayoutSection current_section_;
+  ConstantPoolArray::NumberOfEntries number_of_entries_[2];
 };
 
-
-extern const Instr kMovLrPc;
-extern const Instr kLdrPCMask;
-extern const Instr kLdrPCPattern;
-extern const Instr kLdrPpMask;
-extern const Instr kLdrPpPattern;
-extern const Instr kBlxRegMask;
-extern const Instr kBlxRegPattern;
-extern const Instr kBlxIp;
-
-extern const Instr kMovMvnMask;
-extern const Instr kMovMvnPattern;
-extern const Instr kMovMvnFlip;
-
-extern const Instr kMovLeaveCCMask;
-extern const Instr kMovLeaveCCPattern;
-extern const Instr kMovwMask;
-extern const Instr kMovwPattern;
-extern const Instr kMovwLeaveCCFlip;
-
-extern const Instr kCmpCmnMask;
-extern const Instr kCmpCmnPattern;
-extern const Instr kCmpCmnFlip;
-extern const Instr kAddSubFlip;
-extern const Instr kAndBicFlip;
-
 struct VmovIndex {
   unsigned char index;
 };
@@ -749,13 +743,13 @@
   // Manages the jump elimination optimization if the second parameter is true.
   int branch_offset(Label* L, bool jump_elimination_allowed);
 
-  // Return the address in the constant pool of the code target address used by
-  // the branch/call instruction at pc, or the object in a mov.
-  INLINE(static Address target_pointer_address_at(Address pc));
+  // Returns true if the given pc address is the start of a constant pool load
+  // instruction sequence.
+  INLINE(static bool is_constant_pool_load(Address pc));
 
   // Return the address in the constant pool of the code target address used by
   // the branch/call instruction at pc, or the object in a mov.
-  INLINE(static Address target_constant_pool_address_at(
+  INLINE(static Address constant_pool_entry_address(
     Address pc, ConstantPoolArray* constant_pool));
 
   // Read/Modify the code target address in the branch/call instruction at pc.
@@ -787,6 +781,9 @@
   // in the instruction stream that the call will return from.
   INLINE(static Address return_address_from_call_start(Address pc));
 
+  // Return the code target address of the patch debug break slot
+  INLINE(static Address break_address_from_return_address(Address pc));
+
   // This sets the branch destination (which is in the constant pool on ARM).
   // This is for calls and branches within generated code.
   inline static void deserialization_set_special_target_at(
@@ -918,10 +915,8 @@
   void mov_label_offset(Register dst, Label* label);
 
   // ARMv7 instructions for loading a 32 bit immediate in two instructions.
-  // This may actually emit a different mov instruction, but on an ARMv7 it
-  // is guaranteed to only emit one instruction.
+  // The constant for movw and movt should be in the range 0-0xffff.
   void movw(Register reg, uint32_t immediate, Condition cond = al);
-  // The constant for movt should be in the range 0-0xffff.
   void movt(Register reg, uint32_t immediate, Condition cond = al);
 
   void bic(Register dst, Register src1, const Operand& src2,
@@ -930,6 +925,35 @@
   void mvn(Register dst, const Operand& src,
            SBit s = LeaveCC, Condition cond = al);
 
+  // Shift instructions
+
+  void asr(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
+           Condition cond = al) {
+    if (src2.is_reg()) {
+      mov(dst, Operand(src1, ASR, src2.rm()), s, cond);
+    } else {
+      mov(dst, Operand(src1, ASR, src2.immediate()), s, cond);
+    }
+  }
+
+  void lsl(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
+           Condition cond = al) {
+    if (src2.is_reg()) {
+      mov(dst, Operand(src1, LSL, src2.rm()), s, cond);
+    } else {
+      mov(dst, Operand(src1, LSL, src2.immediate()), s, cond);
+    }
+  }
+
+  void lsr(Register dst, Register src1, const Operand& src2, SBit s = LeaveCC,
+           Condition cond = al) {
+    if (src2.is_reg()) {
+      mov(dst, Operand(src1, LSR, src2.rm()), s, cond);
+    } else {
+      mov(dst, Operand(src1, LSR, src2.immediate()), s, cond);
+    }
+  }
+
   // Multiply instructions
 
   void mla(Register dst, Register src1, Register src2, Register srcA,
@@ -941,6 +965,8 @@
   void sdiv(Register dst, Register src1, Register src2,
             Condition cond = al);
 
+  void udiv(Register dst, Register src1, Register src2, Condition cond = al);
+
   void mul(Register dst, Register src1, Register src2,
            SBit s = LeaveCC, Condition cond = al);
 
@@ -1298,7 +1324,7 @@
   }
 
   // Check whether an immediate fits an addressing mode 1 instruction.
-  bool ImmediateFitsAddrMode1Instruction(int32_t imm32);
+  static bool ImmediateFitsAddrMode1Instruction(int32_t imm32);
 
   // Check whether an immediate fits an addressing mode 2 instruction.
   bool ImmediateFitsAddrMode2Instruction(int32_t imm32);
@@ -1330,12 +1356,12 @@
   // Record the AST id of the CallIC being compiled, so that it can be placed
   // in the relocation information.
   void SetRecordedAstId(TypeFeedbackId ast_id) {
-    ASSERT(recorded_ast_id_.IsNone());
+    DCHECK(recorded_ast_id_.IsNone());
     recorded_ast_id_ = ast_id;
   }
 
   TypeFeedbackId RecordedAstId() {
-    ASSERT(!recorded_ast_id_.IsNone());
+    DCHECK(!recorded_ast_id_.IsNone());
     return recorded_ast_id_;
   }
 
@@ -1390,6 +1416,10 @@
   static int GetBranchOffset(Instr instr);
   static bool IsLdrRegisterImmediate(Instr instr);
   static bool IsVldrDRegisterImmediate(Instr instr);
+  static Instr GetConsantPoolLoadPattern();
+  static Instr GetConsantPoolLoadMask();
+  static bool IsLdrPpRegOffset(Instr instr);
+  static Instr GetLdrPpRegOffsetPattern();
   static bool IsLdrPpImmediateOffset(Instr instr);
   static bool IsVldrDPpImmediateOffset(Instr instr);
   static int GetLdrRegisterImmediateOffset(Instr instr);
@@ -1411,14 +1441,24 @@
   static bool IsLdrRegFpNegOffset(Instr instr);
   static bool IsLdrPcImmediateOffset(Instr instr);
   static bool IsVldrDPcImmediateOffset(Instr instr);
+  static bool IsBlxReg(Instr instr);
+  static bool IsBlxIp(Instr instr);
   static bool IsTstImmediate(Instr instr);
   static bool IsCmpRegister(Instr instr);
   static bool IsCmpImmediate(Instr instr);
   static Register GetCmpImmediateRegister(Instr instr);
   static int GetCmpImmediateRawImmediate(Instr instr);
   static bool IsNop(Instr instr, int type = NON_MARKING_NOP);
+  static bool IsMovImmed(Instr instr);
+  static bool IsOrrImmed(Instr instr);
   static bool IsMovT(Instr instr);
+  static Instr GetMovTPattern();
   static bool IsMovW(Instr instr);
+  static Instr GetMovWPattern();
+  static Instr EncodeMovwImmediate(uint32_t immediate);
+  static Instr PatchMovwImmediate(Instr instruction, uint32_t immediate);
+  static int DecodeShiftImm(Instr instr);
+  static Instr PatchShiftImm(Instr instr, int immed);
 
   // Constants in pools are accessed via pc relative addressing, which can
   // reach +/-4KB for integer PC-relative loads and +/-1KB for floating-point
@@ -1443,13 +1483,13 @@
   // Generate the constant pool for the generated code.
   void PopulateConstantPool(ConstantPoolArray* constant_pool);
 
-  bool can_use_constant_pool() const {
-    return is_constant_pool_available() && !constant_pool_full_;
+  bool is_constant_pool_available() const { return constant_pool_available_; }
+
+  bool use_extended_constant_pool() const {
+    return constant_pool_builder_.current_section() ==
+           ConstantPoolArray::EXTENDED_SECTION;
   }
 
-  void set_constant_pool_full() {
-    constant_pool_full_ = true;
-  }
 
  protected:
   // Relocation for a type-recording IC has the AST id added to it.  This
@@ -1484,10 +1524,10 @@
       // Max pool start (if we need a jump and an alignment).
       int start = pc_offset() + kInstrSize + 2 * kPointerSize;
       // Check the constant pool hasn't been blocked for too long.
-      ASSERT((num_pending_32_bit_reloc_info_ == 0) ||
+      DCHECK((num_pending_32_bit_reloc_info_ == 0) ||
              (start + num_pending_64_bit_reloc_info_ * kDoubleSize <
               (first_const_pool_32_use_ + kMaxDistToIntPool)));
-      ASSERT((num_pending_64_bit_reloc_info_ == 0) ||
+      DCHECK((num_pending_64_bit_reloc_info_ == 0) ||
              (start < (first_const_pool_64_use_ + kMaxDistToFPPool)));
 #endif
       // Two cases:
@@ -1504,10 +1544,6 @@
            (pc_offset() < no_const_pool_before_);
   }
 
-  bool is_constant_pool_available() const {
-    return constant_pool_available_;
-  }
-
   void set_constant_pool_available(bool available) {
     constant_pool_available_ = available;
   }
@@ -1577,9 +1613,6 @@
   // Indicates whether the constant pool can be accessed, which is only possible
   // if the pp register points to the current code object's constant pool.
   bool constant_pool_available_;
-  // Indicates whether the constant pool is too full to accept new entries due
-  // to the ldr instruction's limitted immediate offset range.
-  bool constant_pool_full_;
 
   // Code emission
   inline void CheckBuffer();
@@ -1611,7 +1644,7 @@
   // Record reloc info for current pc_
   void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
   void RecordRelocInfo(const RelocInfo& rinfo);
-  void ConstantPoolAddEntry(const RelocInfo& rinfo);
+  ConstantPoolArray::LayoutSection ConstantPoolAddEntry(const RelocInfo& rinfo);
 
   friend class RelocInfo;
   friend class CodePatcher;
diff --git a/src/arm/builtins-arm.cc b/src/arm/builtins-arm.cc
index 3689848..9d1a72a 100644
--- a/src/arm/builtins-arm.cc
+++ b/src/arm/builtins-arm.cc
@@ -11,7 +11,6 @@
 #include "src/deoptimizer.h"
 #include "src/full-codegen.h"
 #include "src/runtime.h"
-#include "src/stub-cache.h"
 
 namespace v8 {
 namespace internal {
@@ -40,7 +39,7 @@
     num_extra_args = 1;
     __ push(r1);
   } else {
-    ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
+    DCHECK(extra_args == NO_EXTRA_ARGUMENTS);
   }
 
   // JumpToExternalReference expects r0 to contain the number of arguments
@@ -303,7 +302,7 @@
   __ cmp(sp, Operand(ip));
   __ b(hs, &ok);
 
-  CallRuntimePassFunction(masm, Runtime::kHiddenTryInstallOptimizedCode);
+  CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode);
   GenerateTailCallToReturnedCode(masm);
 
   __ bind(&ok);
@@ -323,7 +322,7 @@
   // -----------------------------------
 
   // Should never create mementos for api functions.
-  ASSERT(!is_api_function || !create_memento);
+  DCHECK(!is_api_function || !create_memento);
 
   Isolate* isolate = masm->isolate();
 
@@ -385,7 +384,7 @@
         __ push(r1);
 
         __ Push(r2, r1);  // r1 = constructor
-        __ CallRuntime(Runtime::kHiddenFinalizeInstanceSize, 1);
+        __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
 
         __ pop(r2);
         __ pop(r1);
@@ -411,11 +410,11 @@
       // r4: JSObject (not tagged)
       __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
       __ mov(r5, r4);
-      ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
+      DCHECK_EQ(0 * kPointerSize, JSObject::kMapOffset);
       __ str(r2, MemOperand(r5, kPointerSize, PostIndex));
-      ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
+      DCHECK_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
       __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
-      ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
+      DCHECK_EQ(2 * kPointerSize, JSObject::kElementsOffset);
       __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
 
       // Fill all the in-object properties with the appropriate filler.
@@ -424,7 +423,7 @@
       // r3: object size (in words, including memento if create_memento)
       // r4: JSObject (not tagged)
       // r5: First in-object property of JSObject (not tagged)
-      ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
+      DCHECK_EQ(3 * kPointerSize, JSObject::kHeaderSize);
       __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
 
       if (!is_api_function) {
@@ -463,11 +462,11 @@
         // Fill in memento fields.
         // r5: points to the allocated but uninitialized memento.
         __ LoadRoot(r6, Heap::kAllocationMementoMapRootIndex);
-        ASSERT_EQ(0 * kPointerSize, AllocationMemento::kMapOffset);
+        DCHECK_EQ(0 * kPointerSize, AllocationMemento::kMapOffset);
         __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
         // Load the AllocationSite
         __ ldr(r6, MemOperand(sp, 2 * kPointerSize));
-        ASSERT_EQ(1 * kPointerSize, AllocationMemento::kAllocationSiteOffset);
+        DCHECK_EQ(1 * kPointerSize, AllocationMemento::kAllocationSiteOffset);
         __ str(r6, MemOperand(r5, kPointerSize, PostIndex));
       } else {
         __ add(r0, r4, Operand(r3, LSL, kPointerSizeLog2));  // End of object.
@@ -522,9 +521,9 @@
       // r5: FixedArray (not tagged)
       __ LoadRoot(r6, Heap::kFixedArrayMapRootIndex);
       __ mov(r2, r5);
-      ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
+      DCHECK_EQ(0 * kPointerSize, JSObject::kMapOffset);
       __ str(r6, MemOperand(r2, kPointerSize, PostIndex));
-      ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+      DCHECK_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
       __ SmiTag(r0, r3);
       __ str(r0, MemOperand(r2, kPointerSize, PostIndex));
 
@@ -535,7 +534,7 @@
       // r4: JSObject
       // r5: FixedArray (not tagged)
       __ add(r6, r2, Operand(r3, LSL, kPointerSizeLog2));  // End of object.
-      ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
+      DCHECK_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
       { Label loop, entry;
         __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
         __ b(&entry);
@@ -578,9 +577,9 @@
 
     __ push(r1);  // argument for Runtime_NewObject
     if (create_memento) {
-      __ CallRuntime(Runtime::kHiddenNewObjectWithAllocationSite, 2);
+      __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 2);
     } else {
-      __ CallRuntime(Runtime::kHiddenNewObject, 1);
+      __ CallRuntime(Runtime::kNewObject, 1);
     }
     __ mov(r4, r0);
 
@@ -808,8 +807,8 @@
 }
 
 
-void Builtins::Generate_CompileUnoptimized(MacroAssembler* masm) {
-  CallRuntimePassFunction(masm, Runtime::kHiddenCompileUnoptimized);
+void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+  CallRuntimePassFunction(masm, Runtime::kCompileLazy);
   GenerateTailCallToReturnedCode(masm);
 }
 
@@ -823,7 +822,7 @@
   // Whether to compile in a background thread.
   __ Push(masm->isolate()->factory()->ToBoolean(concurrent));
 
-  __ CallRuntime(Runtime::kHiddenCompileOptimized, 2);
+  __ CallRuntime(Runtime::kCompileOptimized, 2);
   // Restore receiver.
   __ pop(r1);
 }
@@ -918,7 +917,7 @@
     // registers.
     __ stm(db_w, sp, kJSCallerSaved | kCalleeSaved);
     // Pass the function and deoptimization type to the runtime system.
-    __ CallRuntime(Runtime::kHiddenNotifyStubFailure, 0, save_doubles);
+    __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
     __ ldm(ia_w, sp, kJSCallerSaved | kCalleeSaved);
   }
 
@@ -944,7 +943,7 @@
     // Pass the function and deoptimization type to the runtime system.
     __ mov(r0, Operand(Smi::FromInt(static_cast<int>(type))));
     __ push(r0);
-    __ CallRuntime(Runtime::kHiddenNotifyDeoptimized, 1);
+    __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
   }
 
   // Get the full codegen state from the stack and untag it -> r6.
@@ -1035,7 +1034,7 @@
   __ b(hs, &ok);
   {
     FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-    __ CallRuntime(Runtime::kHiddenStackGuard, 0);
+    __ CallRuntime(Runtime::kStackGuard, 0);
   }
   __ Jump(masm->isolate()->builtins()->OnStackReplacement(),
           RelocInfo::CODE_TARGET);
@@ -1071,7 +1070,7 @@
   // r1: function
   Label shift_arguments;
   __ mov(r4, Operand::Zero());  // indicate regular JS_FUNCTION
-  { Label convert_to_object, use_global_receiver, patch_receiver;
+  { Label convert_to_object, use_global_proxy, patch_receiver;
     // Change context eagerly in case we need the global receiver.
     __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
 
@@ -1096,10 +1095,10 @@
 
     __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
     __ cmp(r2, r3);
-    __ b(eq, &use_global_receiver);
+    __ b(eq, &use_global_proxy);
     __ LoadRoot(r3, Heap::kNullValueRootIndex);
     __ cmp(r2, r3);
-    __ b(eq, &use_global_receiver);
+    __ b(eq, &use_global_proxy);
 
     STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
     __ CompareObjectType(r2, r3, r3, FIRST_SPEC_OBJECT_TYPE);
@@ -1128,9 +1127,9 @@
     __ mov(r4, Operand::Zero());
     __ jmp(&patch_receiver);
 
-    __ bind(&use_global_receiver);
+    __ bind(&use_global_proxy);
   __ ldr(r2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
-  __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
+  __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalProxyOffset));
 
     __ bind(&patch_receiver);
     __ add(r3, sp, Operand(r0, LSL, kPointerSizeLog2));
@@ -1284,7 +1283,7 @@
 
     // Compute the receiver.
     // Do not transform the receiver for strict mode functions.
-    Label call_to_object, use_global_receiver;
+    Label call_to_object, use_global_proxy;
     __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset));
     __ tst(r2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
                              kSmiTagSize)));
@@ -1298,10 +1297,10 @@
     __ JumpIfSmi(r0, &call_to_object);
     __ LoadRoot(r1, Heap::kNullValueRootIndex);
     __ cmp(r0, r1);
-    __ b(eq, &use_global_receiver);
+    __ b(eq, &use_global_proxy);
     __ LoadRoot(r1, Heap::kUndefinedValueRootIndex);
     __ cmp(r0, r1);
-    __ b(eq, &use_global_receiver);
+    __ b(eq, &use_global_proxy);
 
     // Check if the receiver is already a JavaScript object.
     // r0: receiver
@@ -1316,9 +1315,9 @@
     __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
     __ b(&push_receiver);
 
-    __ bind(&use_global_receiver);
+    __ bind(&use_global_proxy);
     __ ldr(r0, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
-    __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
+    __ ldr(r0, FieldMemOperand(r0, GlobalObject::kGlobalProxyOffset));
 
     // Push the receiver.
     // r0: receiver
@@ -1422,13 +1421,7 @@
   __ ldr(r1, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp +
                               kPointerSize)));
 
-  if (FLAG_enable_ool_constant_pool) {
-    __ add(sp, fp, Operand(StandardFrameConstants::kConstantPoolOffset));
-    __ ldm(ia_w, sp, pp.bit() | fp.bit() | lr.bit());
-  } else {
-    __ mov(sp, fp);;
-    __ ldm(ia_w, sp, fp.bit() | lr.bit());
-  }
+  __ LeaveFrame(StackFrame::ARGUMENTS_ADAPTOR);
   __ add(sp, sp, Operand::PointerOffsetFromSmiKey(r1));
   __ add(sp, sp, Operand(kPointerSize));  // adjust for receiver
 }
diff --git a/src/arm/code-stubs-arm.cc b/src/arm/code-stubs-arm.cc
index 56a34e7..25270d1 100644
--- a/src/arm/code-stubs-arm.cc
+++ b/src/arm/code-stubs-arm.cc
@@ -6,452 +6,86 @@
 
 #if V8_TARGET_ARCH_ARM
 
+#include "src/base/bits.h"
 #include "src/bootstrapper.h"
 #include "src/code-stubs.h"
+#include "src/codegen.h"
+#include "src/ic/handler-compiler.h"
+#include "src/ic/ic.h"
+#include "src/isolate.h"
+#include "src/jsregexp.h"
 #include "src/regexp-macro-assembler.h"
-#include "src/stub-cache.h"
+#include "src/runtime.h"
 
 namespace v8 {
 namespace internal {
 
 
-void FastNewClosureStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { r2 };
-  descriptor->register_param_count_ = 1;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      Runtime::FunctionForId(Runtime::kHiddenNewClosureFromStubFailure)->entry;
-}
-
-
-void FastNewContextStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { r1 };
-  descriptor->register_param_count_ = 1;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ = NULL;
-}
-
-
-void ToNumberStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { r0 };
-  descriptor->register_param_count_ = 1;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ = NULL;
-}
-
-
-void NumberToStringStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { r0 };
-  descriptor->register_param_count_ = 1;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      Runtime::FunctionForId(Runtime::kHiddenNumberToString)->entry;
-}
-
-
-void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { r3, r2, r1 };
-  descriptor->register_param_count_ = 3;
-  descriptor->register_params_ = registers;
-  static Representation representations[] = {
-    Representation::Tagged(),
-    Representation::Smi(),
-    Representation::Tagged() };
-  descriptor->register_param_representations_ = representations;
-  descriptor->deoptimization_handler_ =
-      Runtime::FunctionForId(
-          Runtime::kHiddenCreateArrayLiteralStubBailout)->entry;
-}
-
-
-void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { r3, r2, r1, r0 };
-  descriptor->register_param_count_ = 4;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      Runtime::FunctionForId(Runtime::kHiddenCreateObjectLiteral)->entry;
-}
-
-
-void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { r2, r3 };
-  descriptor->register_param_count_ = 2;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ = NULL;
-}
-
-
-void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { r1, r0 };
-  descriptor->register_param_count_ = 2;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
-}
-
-
-void KeyedLoadDictionaryElementStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { r1, r0 };
-  descriptor->register_param_count_ = 2;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
-}
-
-
-void RegExpConstructResultStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { r2, r1, r0 };
-  descriptor->register_param_count_ = 3;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      Runtime::FunctionForId(Runtime::kHiddenRegExpConstructResult)->entry;
-}
-
-
-void KeyedLoadGenericElementStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { r1, r0 };
-  descriptor->register_param_count_ = 2;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      Runtime::FunctionForId(Runtime::kKeyedGetProperty)->entry;
-}
-
-
-void LoadFieldStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { r0 };
-  descriptor->register_param_count_ = 1;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ = NULL;
-}
-
-
-void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { r1 };
-  descriptor->register_param_count_ = 1;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ = NULL;
-}
-
-
-void StringLengthStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { r0, r2 };
-  descriptor->register_param_count_ = 2;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ = NULL;
-}
-
-
-void KeyedStringLengthStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { r1, r0 };
-  descriptor->register_param_count_ = 2;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ = NULL;
-}
-
-
-void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { r2, r1, r0 };
-  descriptor->register_param_count_ = 3;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      FUNCTION_ADDR(KeyedStoreIC_MissFromStubFailure);
-}
-
-
-void TransitionElementsKindStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { r0, r1 };
-  descriptor->register_param_count_ = 2;
-  descriptor->register_params_ = registers;
-  Address entry =
-      Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
-  descriptor->deoptimization_handler_ = FUNCTION_ADDR(entry);
-}
-
-
-void CompareNilICStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { r0 };
-  descriptor->register_param_count_ = 1;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      FUNCTION_ADDR(CompareNilIC_Miss);
-  descriptor->SetMissHandler(
-      ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate()));
-}
-
-
 static void InitializeArrayConstructorDescriptor(
-    CodeStubInterfaceDescriptor* descriptor,
+    Isolate* isolate, CodeStubDescriptor* descriptor,
     int constant_stack_parameter_count) {
-  // register state
-  // r0 -- number of arguments
-  // r1 -- function
-  // r2 -- allocation site with elements kind
-  static Register registers_variable_args[] = { r1, r2, r0 };
-  static Register registers_no_args[] = { r1, r2 };
+  Address deopt_handler = Runtime::FunctionForId(
+      Runtime::kArrayConstructor)->entry;
 
   if (constant_stack_parameter_count == 0) {
-    descriptor->register_param_count_ = 2;
-    descriptor->register_params_ = registers_no_args;
+    descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
+                           JS_FUNCTION_STUB_MODE);
   } else {
-    // stack param count needs (constructor pointer, and single argument)
-    descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
-    descriptor->stack_parameter_count_ = r0;
-    descriptor->register_param_count_ = 3;
-    descriptor->register_params_ = registers_variable_args;
-    static Representation representations[] = {
-        Representation::Tagged(),
-        Representation::Tagged(),
-        Representation::Integer32() };
-    descriptor->register_param_representations_ = representations;
+    descriptor->Initialize(r0, deopt_handler, constant_stack_parameter_count,
+                           JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
   }
-
-  descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
-  descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
-  descriptor->deoptimization_handler_ =
-      Runtime::FunctionForId(Runtime::kHiddenArrayConstructor)->entry;
 }
 
 
 static void InitializeInternalArrayConstructorDescriptor(
-    CodeStubInterfaceDescriptor* descriptor,
+    Isolate* isolate, CodeStubDescriptor* descriptor,
     int constant_stack_parameter_count) {
-  // register state
-  // r0 -- number of arguments
-  // r1 -- constructor function
-  static Register registers_variable_args[] = { r1, r0 };
-  static Register registers_no_args[] = { r1 };
+  Address deopt_handler = Runtime::FunctionForId(
+      Runtime::kInternalArrayConstructor)->entry;
 
   if (constant_stack_parameter_count == 0) {
-    descriptor->register_param_count_ = 1;
-    descriptor->register_params_ = registers_no_args;
+    descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
+                           JS_FUNCTION_STUB_MODE);
   } else {
-    // stack param count needs (constructor pointer, and single argument)
-    descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
-    descriptor->stack_parameter_count_ = r0;
-    descriptor->register_param_count_ = 2;
-    descriptor->register_params_ = registers_variable_args;
-    static Representation representations[] = {
-        Representation::Tagged(),
-        Representation::Integer32() };
-    descriptor->register_param_representations_ = representations;
+    descriptor->Initialize(r0, deopt_handler, constant_stack_parameter_count,
+                           JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
   }
-
-  descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
-  descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
-  descriptor->deoptimization_handler_ =
-      Runtime::FunctionForId(Runtime::kHiddenInternalArrayConstructor)->entry;
 }
 
 
-void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  InitializeArrayConstructorDescriptor(descriptor, 0);
+void ArrayNoArgumentConstructorStub::InitializeDescriptor(
+    CodeStubDescriptor* descriptor) {
+  InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
 }
 
 
-void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  InitializeArrayConstructorDescriptor(descriptor, 1);
+void ArraySingleArgumentConstructorStub::InitializeDescriptor(
+    CodeStubDescriptor* descriptor) {
+  InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
 }
 
 
-void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  InitializeArrayConstructorDescriptor(descriptor, -1);
+void ArrayNArgumentsConstructorStub::InitializeDescriptor(
+    CodeStubDescriptor* descriptor) {
+  InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
 }
 
 
-void ToBooleanStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { r0 };
-  descriptor->register_param_count_ = 1;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      FUNCTION_ADDR(ToBooleanIC_Miss);
-  descriptor->SetMissHandler(
-      ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate()));
+void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
+    CodeStubDescriptor* descriptor) {
+  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
 }
 
 
-void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(descriptor, 0);
+void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
+    CodeStubDescriptor* descriptor) {
+  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
 }
 
 
-void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(descriptor, 1);
-}
-
-
-void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(descriptor, -1);
-}
-
-
-void StoreGlobalStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { r1, r2, r0 };
-  descriptor->register_param_count_ = 3;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      FUNCTION_ADDR(StoreIC_MissFromStubFailure);
-}
-
-
-void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { r0, r3, r1, r2 };
-  descriptor->register_param_count_ = 4;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss);
-}
-
-
-void BinaryOpICStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { r1, r0 };
-  descriptor->register_param_count_ = 2;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
-  descriptor->SetMissHandler(
-      ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate()));
-}
-
-
-void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { r2, r1, r0 };
-  descriptor->register_param_count_ = 3;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite);
-}
-
-
-void StringAddStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { r1, r0 };
-  descriptor->register_param_count_ = 2;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      Runtime::FunctionForId(Runtime::kHiddenStringAdd)->entry;
-}
-
-
-void CallDescriptors::InitializeForIsolate(Isolate* isolate) {
-  static PlatformCallInterfaceDescriptor default_descriptor =
-      PlatformCallInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
-
-  static PlatformCallInterfaceDescriptor noInlineDescriptor =
-      PlatformCallInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
-
-  {
-    CallInterfaceDescriptor* descriptor =
-        isolate->call_descriptor(Isolate::ArgumentAdaptorCall);
-    static Register registers[] = { r1,  // JSFunction
-                                    cp,  // context
-                                    r0,  // actual number of arguments
-                                    r2,  // expected number of arguments
-    };
-    static Representation representations[] = {
-        Representation::Tagged(),     // JSFunction
-        Representation::Tagged(),     // context
-        Representation::Integer32(),  // actual number of arguments
-        Representation::Integer32(),  // expected number of arguments
-    };
-    descriptor->register_param_count_ = 4;
-    descriptor->register_params_ = registers;
-    descriptor->param_representations_ = representations;
-    descriptor->platform_specific_descriptor_ = &default_descriptor;
-  }
-  {
-    CallInterfaceDescriptor* descriptor =
-        isolate->call_descriptor(Isolate::KeyedCall);
-    static Register registers[] = { cp,  // context
-                                    r2,  // key
-    };
-    static Representation representations[] = {
-        Representation::Tagged(),     // context
-        Representation::Tagged(),     // key
-    };
-    descriptor->register_param_count_ = 2;
-    descriptor->register_params_ = registers;
-    descriptor->param_representations_ = representations;
-    descriptor->platform_specific_descriptor_ = &noInlineDescriptor;
-  }
-  {
-    CallInterfaceDescriptor* descriptor =
-        isolate->call_descriptor(Isolate::NamedCall);
-    static Register registers[] = { cp,  // context
-                                    r2,  // name
-    };
-    static Representation representations[] = {
-        Representation::Tagged(),     // context
-        Representation::Tagged(),     // name
-    };
-    descriptor->register_param_count_ = 2;
-    descriptor->register_params_ = registers;
-    descriptor->param_representations_ = representations;
-    descriptor->platform_specific_descriptor_ = &noInlineDescriptor;
-  }
-  {
-    CallInterfaceDescriptor* descriptor =
-        isolate->call_descriptor(Isolate::CallHandler);
-    static Register registers[] = { cp,  // context
-                                    r0,  // receiver
-    };
-    static Representation representations[] = {
-        Representation::Tagged(),  // context
-        Representation::Tagged(),  // receiver
-    };
-    descriptor->register_param_count_ = 2;
-    descriptor->register_params_ = registers;
-    descriptor->param_representations_ = representations;
-    descriptor->platform_specific_descriptor_ = &default_descriptor;
-  }
-  {
-    CallInterfaceDescriptor* descriptor =
-        isolate->call_descriptor(Isolate::ApiFunctionCall);
-    static Register registers[] = { r0,  // callee
-                                    r4,  // call_data
-                                    r2,  // holder
-                                    r1,  // api_function_address
-                                    cp,  // context
-    };
-    static Representation representations[] = {
-        Representation::Tagged(),    // callee
-        Representation::Tagged(),    // call_data
-        Representation::Tagged(),    // holder
-        Representation::External(),  // api_function_address
-        Representation::Tagged(),    // context
-    };
-    descriptor->register_param_count_ = 5;
-    descriptor->register_params_ = registers;
-    descriptor->param_representations_ = representations;
-    descriptor->platform_specific_descriptor_ = &default_descriptor;
-  }
+void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
+    CodeStubDescriptor* descriptor) {
+  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
 }
 
 
@@ -472,129 +106,34 @@
                                            Register rhs);
 
 
-void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
+void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
+                                               ExternalReference miss) {
   // Update the static counter each time a new code stub is generated.
   isolate()->counters()->code_stubs()->Increment();
 
-  CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor();
-  int param_count = descriptor->register_param_count_;
+  CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
+  int param_count = descriptor.GetEnvironmentParameterCount();
   {
     // Call the runtime system in a fresh internal frame.
     FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-    ASSERT(descriptor->register_param_count_ == 0 ||
-           r0.is(descriptor->register_params_[param_count - 1]));
+    DCHECK(param_count == 0 ||
+           r0.is(descriptor.GetEnvironmentParameterRegister(param_count - 1)));
     // Push arguments
     for (int i = 0; i < param_count; ++i) {
-      __ push(descriptor->register_params_[i]);
+      __ push(descriptor.GetEnvironmentParameterRegister(i));
     }
-    ExternalReference miss = descriptor->miss_handler();
-    __ CallExternalReference(miss, descriptor->register_param_count_);
+    __ CallExternalReference(miss, param_count);
   }
 
   __ Ret();
 }
 
 
-// Takes a Smi and converts to an IEEE 64 bit floating point value in two
-// registers.  The format is 1 sign bit, 11 exponent bits (biased 1023) and
-// 52 fraction bits (20 in the first word, 32 in the second).  Zeros is a
-// scratch register.  Destroys the source register.  No GC occurs during this
-// stub so you don't have to set up the frame.
-class ConvertToDoubleStub : public PlatformCodeStub {
- public:
-  ConvertToDoubleStub(Isolate* isolate,
-                      Register result_reg_1,
-                      Register result_reg_2,
-                      Register source_reg,
-                      Register scratch_reg)
-      : PlatformCodeStub(isolate),
-        result1_(result_reg_1),
-        result2_(result_reg_2),
-        source_(source_reg),
-        zeros_(scratch_reg) { }
-
- private:
-  Register result1_;
-  Register result2_;
-  Register source_;
-  Register zeros_;
-
-  // Minor key encoding in 16 bits.
-  class ModeBits: public BitField<OverwriteMode, 0, 2> {};
-  class OpBits: public BitField<Token::Value, 2, 14> {};
-
-  Major MajorKey() { return ConvertToDouble; }
-  int MinorKey() {
-    // Encode the parameters in a unique 16 bit value.
-    return  result1_.code() +
-           (result2_.code() << 4) +
-           (source_.code() << 8) +
-           (zeros_.code() << 12);
-  }
-
-  void Generate(MacroAssembler* masm);
-};
-
-
-void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
-  Register exponent = result1_;
-  Register mantissa = result2_;
-
-  Label not_special;
-  __ SmiUntag(source_);
-  // Move sign bit from source to destination.  This works because the sign bit
-  // in the exponent word of the double has the same position and polarity as
-  // the 2's complement sign bit in a Smi.
-  STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
-  __ and_(exponent, source_, Operand(HeapNumber::kSignMask), SetCC);
-  // Subtract from 0 if source was negative.
-  __ rsb(source_, source_, Operand::Zero(), LeaveCC, ne);
-
-  // We have -1, 0 or 1, which we treat specially. Register source_ contains
-  // absolute value: it is either equal to 1 (special case of -1 and 1),
-  // greater than 1 (not a special case) or less than 1 (special case of 0).
-  __ cmp(source_, Operand(1));
-  __ b(gt, &not_special);
-
-  // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
-  const uint32_t exponent_word_for_1 =
-      HeapNumber::kExponentBias << HeapNumber::kExponentShift;
-  __ orr(exponent, exponent, Operand(exponent_word_for_1), LeaveCC, eq);
-  // 1, 0 and -1 all have 0 for the second word.
-  __ mov(mantissa, Operand::Zero());
-  __ Ret();
-
-  __ bind(&not_special);
-  __ clz(zeros_, source_);
-  // Compute exponent and or it into the exponent register.
-  // We use mantissa as a scratch register here.  Use a fudge factor to
-  // divide the constant 31 + HeapNumber::kExponentBias, 0x41d, into two parts
-  // that fit in the ARM's constant field.
-  int fudge = 0x400;
-  __ rsb(mantissa, zeros_, Operand(31 + HeapNumber::kExponentBias - fudge));
-  __ add(mantissa, mantissa, Operand(fudge));
-  __ orr(exponent,
-         exponent,
-         Operand(mantissa, LSL, HeapNumber::kExponentShift));
-  // Shift up the source chopping the top bit off.
-  __ add(zeros_, zeros_, Operand(1));
-  // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
-  __ mov(source_, Operand(source_, LSL, zeros_));
-  // Compute lower part of fraction (last 12 bits).
-  __ mov(mantissa, Operand(source_, LSL, HeapNumber::kMantissaBitsInTopWord));
-  // And the top (top 20 bits).
-  __ orr(exponent,
-         exponent,
-         Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord));
-  __ Ret();
-}
-
-
 void DoubleToIStub::Generate(MacroAssembler* masm) {
   Label out_of_range, only_low, negate, done;
   Register input_reg = source();
   Register result_reg = destination();
-  ASSERT(is_truncating());
+  DCHECK(is_truncating());
 
   int double_offset = offset();
   // Account for saved regs if input is sp.
@@ -711,29 +250,29 @@
   // We test for the special value that has a different exponent.  This test
   // has the neat side effect of setting the flags according to the sign.
   STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
-  __ cmp(the_int_, Operand(0x80000000u));
+  __ cmp(the_int(), Operand(0x80000000u));
   __ b(eq, &max_negative_int);
   // Set up the correct exponent in scratch_.  All non-Smi int32s have the same.
   // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
   uint32_t non_smi_exponent =
       (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
-  __ mov(scratch_, Operand(non_smi_exponent));
+  __ mov(scratch(), Operand(non_smi_exponent));
   // Set the sign bit in scratch_ if the value was negative.
-  __ orr(scratch_, scratch_, Operand(HeapNumber::kSignMask), LeaveCC, cs);
+  __ orr(scratch(), scratch(), Operand(HeapNumber::kSignMask), LeaveCC, cs);
   // Subtract from 0 if the value was negative.
-  __ rsb(the_int_, the_int_, Operand::Zero(), LeaveCC, cs);
+  __ rsb(the_int(), the_int(), Operand::Zero(), LeaveCC, cs);
   // We should be masking the implict first digit of the mantissa away here,
   // but it just ends up combining harmlessly with the last digit of the
   // exponent that happens to be 1.  The sign bit is 0 so we shift 10 to get
   // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
-  ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
+  DCHECK(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
   const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
-  __ orr(scratch_, scratch_, Operand(the_int_, LSR, shift_distance));
-  __ str(scratch_, FieldMemOperand(the_heap_number_,
-                                   HeapNumber::kExponentOffset));
-  __ mov(scratch_, Operand(the_int_, LSL, 32 - shift_distance));
-  __ str(scratch_, FieldMemOperand(the_heap_number_,
-                                   HeapNumber::kMantissaOffset));
+  __ orr(scratch(), scratch(), Operand(the_int(), LSR, shift_distance));
+  __ str(scratch(),
+         FieldMemOperand(the_heap_number(), HeapNumber::kExponentOffset));
+  __ mov(scratch(), Operand(the_int(), LSL, 32 - shift_distance));
+  __ str(scratch(),
+         FieldMemOperand(the_heap_number(), HeapNumber::kMantissaOffset));
   __ Ret();
 
   __ bind(&max_negative_int);
@@ -743,9 +282,9 @@
   // significant 1 bit is not stored.
   non_smi_exponent += 1 << HeapNumber::kExponentShift;
   __ mov(ip, Operand(HeapNumber::kSignMask | non_smi_exponent));
-  __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
+  __ str(ip, FieldMemOperand(the_heap_number(), HeapNumber::kExponentOffset));
   __ mov(ip, Operand::Zero());
-  __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
+  __ str(ip, FieldMemOperand(the_heap_number(), HeapNumber::kMantissaOffset));
   __ Ret();
 }
 
@@ -857,7 +396,7 @@
                                     Label* lhs_not_nan,
                                     Label* slow,
                                     bool strict) {
-  ASSERT((lhs.is(r0) && rhs.is(r1)) ||
+  DCHECK((lhs.is(r0) && rhs.is(r1)) ||
          (lhs.is(r1) && rhs.is(r0)));
 
   Label rhs_is_smi;
@@ -919,7 +458,7 @@
 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
                                            Register lhs,
                                            Register rhs) {
-    ASSERT((lhs.is(r0) && rhs.is(r1)) ||
+    DCHECK((lhs.is(r0) && rhs.is(r1)) ||
            (lhs.is(r1) && rhs.is(r0)));
 
     // If either operand is a JS object or an oddball value, then they are
@@ -965,7 +504,7 @@
                                        Label* both_loaded_as_doubles,
                                        Label* not_heap_numbers,
                                        Label* slow) {
-  ASSERT((lhs.is(r0) && rhs.is(r1)) ||
+  DCHECK((lhs.is(r0) && rhs.is(r1)) ||
          (lhs.is(r1) && rhs.is(r0)));
 
   __ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE);
@@ -988,7 +527,7 @@
                                                      Register rhs,
                                                      Label* possible_strings,
                                                      Label* not_both_strings) {
-  ASSERT((lhs.is(r0) && rhs.is(r1)) ||
+  DCHECK((lhs.is(r0) && rhs.is(r1)) ||
          (lhs.is(r1) && rhs.is(r0)));
 
   // r2 is object type of rhs.
@@ -1026,15 +565,14 @@
 }
 
 
-static void ICCompareStub_CheckInputType(MacroAssembler* masm,
-                                         Register input,
+static void CompareICStub_CheckInputType(MacroAssembler* masm, Register input,
                                          Register scratch,
-                                         CompareIC::State expected,
+                                         CompareICState::State expected,
                                          Label* fail) {
   Label ok;
-  if (expected == CompareIC::SMI) {
+  if (expected == CompareICState::SMI) {
     __ JumpIfNotSmi(input, fail);
-  } else if (expected == CompareIC::NUMBER) {
+  } else if (expected == CompareICState::NUMBER) {
     __ JumpIfSmi(input, &ok);
     __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
                 DONT_DO_SMI_CHECK);
@@ -1048,14 +586,14 @@
 // On entry r1 and r2 are the values to be compared.
 // On exit r0 is 0, positive or negative to indicate the result of
 // the comparison.
-void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
+void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
   Register lhs = r1;
   Register rhs = r0;
   Condition cc = GetCondition();
 
   Label miss;
-  ICCompareStub_CheckInputType(masm, lhs, r2, left_, &miss);
-  ICCompareStub_CheckInputType(masm, rhs, r3, right_, &miss);
+  CompareICStub_CheckInputType(masm, lhs, r2, left(), &miss);
+  CompareICStub_CheckInputType(masm, rhs, r3, right(), &miss);
 
   Label slow;  // Call builtin.
   Label not_smis, both_loaded_as_doubles, lhs_not_nan;
@@ -1078,7 +616,7 @@
   // If either is a Smi (we know that not both are), then they can only
   // be strictly equal if the other is a HeapNumber.
   STATIC_ASSERT(kSmiTag == 0);
-  ASSERT_EQ(0, Smi::FromInt(0));
+  DCHECK_EQ(0, Smi::FromInt(0));
   __ and_(r2, lhs, Operand(rhs));
   __ JumpIfNotSmi(r2, &not_smis);
   // One operand is a smi.  EmitSmiNonsmiComparison generates code that can:
@@ -1151,29 +689,19 @@
         masm, lhs, rhs, &flat_string_check, &slow);
   }
 
-  // Check for both being sequential ASCII strings, and inline if that is the
-  // case.
+  // Check for both being sequential one-byte strings,
+  // and inline if that is the case.
   __ bind(&flat_string_check);
 
-  __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs, rhs, r2, r3, &slow);
+  __ JumpIfNonSmisNotBothSequentialOneByteStrings(lhs, rhs, r2, r3, &slow);
 
   __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, r2,
                       r3);
   if (cc == eq) {
-    StringCompareStub::GenerateFlatAsciiStringEquals(masm,
-                                                     lhs,
-                                                     rhs,
-                                                     r2,
-                                                     r3,
-                                                     r4);
+    StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, r2, r3, r4);
   } else {
-    StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
-                                                       lhs,
-                                                       rhs,
-                                                       r2,
-                                                       r3,
-                                                       r4,
-                                                       r5);
+    StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, r2, r3, r4,
+                                                    r5);
   }
   // Never falls through to here.
 
@@ -1190,7 +718,7 @@
     if (cc == lt || cc == le) {
       ncr = GREATER;
     } else {
-      ASSERT(cc == gt || cc == ge);  // remaining cases
+      DCHECK(cc == gt || cc == ge);  // remaining cases
       ncr = LESS;
     }
     __ mov(r0, Operand(Smi::FromInt(ncr)));
@@ -1214,7 +742,7 @@
 
   const Register scratch = r1;
 
-  if (save_doubles_ == kSaveFPRegs) {
+  if (save_doubles()) {
     __ SaveFPRegs(sp, scratch);
   }
   const int argument_count = 1;
@@ -1226,7 +754,7 @@
   __ CallCFunction(
       ExternalReference::store_buffer_overflow_function(isolate()),
       argument_count);
-  if (save_doubles_ == kSaveFPRegs) {
+  if (save_doubles()) {
     __ RestoreFPRegs(sp, scratch);
   }
   __ ldm(ia_w, sp, kCallerSaved | pc.bit());  // Also pop pc to get Ret(0).
@@ -1235,7 +763,8 @@
 
 void MathPowStub::Generate(MacroAssembler* masm) {
   const Register base = r1;
-  const Register exponent = r2;
+  const Register exponent = MathPowTaggedDescriptor::exponent();
+  DCHECK(exponent.is(r2));
   const Register heapnumbermap = r5;
   const Register heapnumber = r0;
   const DwVfpRegister double_base = d0;
@@ -1247,7 +776,7 @@
   const Register scratch2 = r4;
 
   Label call_runtime, done, int_exponent;
-  if (exponent_type_ == ON_STACK) {
+  if (exponent_type() == ON_STACK) {
     Label base_is_smi, unpack_exponent;
     // The exponent and base are supplied as arguments on the stack.
     // This can only happen if the stub is called from non-optimized code.
@@ -1277,7 +806,7 @@
     __ b(ne, &call_runtime);
     __ vldr(double_exponent,
             FieldMemOperand(exponent, HeapNumber::kValueOffset));
-  } else if (exponent_type_ == TAGGED) {
+  } else if (exponent_type() == TAGGED) {
     // Base is already in double_base.
     __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
 
@@ -1285,7 +814,7 @@
             FieldMemOperand(exponent, HeapNumber::kValueOffset));
   }
 
-  if (exponent_type_ != INTEGER) {
+  if (exponent_type() != INTEGER) {
     Label int_exponent_convert;
     // Detect integer exponents stored as double.
     __ vcvt_u32_f64(single_scratch, double_exponent);
@@ -1295,7 +824,7 @@
     __ VFPCompareAndSetFlags(double_scratch, double_exponent);
     __ b(eq, &int_exponent_convert);
 
-    if (exponent_type_ == ON_STACK) {
+    if (exponent_type() == ON_STACK) {
       // Detect square root case.  Crankshaft detects constant +/-0.5 at
       // compile time and uses DoMathPowHalf instead.  We then skip this check
       // for non-constant cases of +/-0.5 as these hardly occur.
@@ -1360,7 +889,7 @@
   __ bind(&int_exponent);
 
   // Get two copies of exponent in the registers scratch and exponent.
-  if (exponent_type_ == INTEGER) {
+  if (exponent_type() == INTEGER) {
     __ mov(scratch, exponent);
   } else {
     // Exponent has previously been stored into scratch as untagged integer.
@@ -1396,10 +925,10 @@
 
   // Returning or bailing out.
   Counters* counters = isolate()->counters();
-  if (exponent_type_ == ON_STACK) {
+  if (exponent_type() == ON_STACK) {
     // The arguments are still on the stack.
     __ bind(&call_runtime);
-    __ TailCallRuntime(Runtime::kHiddenMathPow, 2, 1);
+    __ TailCallRuntime(Runtime::kMathPowRT, 2, 1);
 
     // The stub is called from non-optimized code, which expects the result
     // as heap number in exponent.
@@ -1408,7 +937,7 @@
         heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
     __ vstr(double_result,
             FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
-    ASSERT(heapnumber.is(r0));
+    DCHECK(heapnumber.is(r0));
     __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
     __ Ret(2);
   } else {
@@ -1449,20 +978,10 @@
 
 
 void CodeStub::GenerateFPStubs(Isolate* isolate) {
+  // Generate if not already in cache.
   SaveFPRegsMode mode = kSaveFPRegs;
-  CEntryStub save_doubles(isolate, 1, mode);
-  StoreBufferOverflowStub stub(isolate, mode);
-  // These stubs might already be in the snapshot, detect that and don't
-  // regenerate, which would lead to code stub initialization state being messed
-  // up.
-  Code* save_doubles_code;
-  if (!save_doubles.FindCodeInCache(&save_doubles_code)) {
-    save_doubles_code = *save_doubles.GetCode();
-  }
-  Code* store_buffer_overflow_code;
-  if (!stub.FindCodeInCache(&store_buffer_overflow_code)) {
-      store_buffer_overflow_code = *stub.GetCode();
-  }
+  CEntryStub(isolate, 1, mode).GetCode();
+  StoreBufferOverflowStub(isolate, mode).GetCode();
   isolate->set_fp_stubs_generated(true);
 }
 
@@ -1491,7 +1010,7 @@
 
   // Enter the exit frame that transitions from JavaScript to C++.
   FrameScope scope(masm, StackFrame::MANUAL);
-  __ EnterExitFrame(save_doubles_);
+  __ EnterExitFrame(save_doubles());
 
   // Store a copy of argc in callee-saved registers for later.
   __ mov(r4, Operand(r0));
@@ -1508,7 +1027,7 @@
   if (FLAG_debug_code) {
     if (frame_alignment > kPointerSize) {
       Label alignment_as_expected;
-      ASSERT(IsPowerOf2(frame_alignment));
+      DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
       __ tst(sp, Operand(frame_alignment_mask));
       __ b(eq, &alignment_as_expected);
       // Don't use Check here, as it will call Runtime_Abort re-entering here.
@@ -1575,7 +1094,7 @@
   // sp: stack pointer
   // fp: frame pointer
   // Callee-saved register r4 still holds argc.
-  __ LeaveExitFrame(save_doubles_, r4, true);
+  __ LeaveExitFrame(save_doubles(), r4, true);
   __ mov(pc, lr);
 
   // Handling of exception.
@@ -1603,7 +1122,7 @@
 }
 
 
-void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
+void JSEntryStub::Generate(MacroAssembler* masm) {
   // r0: code entry
   // r1: function
   // r2: receiver
@@ -1642,7 +1161,7 @@
   // r2: receiver
   // r3: argc
   // r4: argv
-  int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
+  int marker = type();
   if (FLAG_enable_ool_constant_pool) {
     __ mov(r8, Operand(isolate()->factory()->empty_constant_pool_array()));
   }
@@ -1723,7 +1242,7 @@
   // r2: receiver
   // r3: argc
   // r4: argv
-  if (is_construct) {
+  if (type() == StackFrame::ENTRY_CONSTRUCT) {
     ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
                                       isolate());
     __ mov(ip, Operand(construct_entry));
@@ -1780,24 +1299,19 @@
 // * function: r1 or at sp.
 //
 // An inlined call site may have been generated before calling this stub.
-// In this case the offset to the inline site to patch is passed in r5.
+// In this case the offset to the inline sites to patch are passed in r5 and r6.
 // (See LCodeGen::DoInstanceOfKnownGlobal)
 void InstanceofStub::Generate(MacroAssembler* masm) {
   // Call site inlining and patching implies arguments in registers.
-  ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
-  // ReturnTrueFalse is only implemented for inlined call sites.
-  ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck());
+  DCHECK(HasArgsInRegisters() || !HasCallSiteInlineCheck());
 
   // Fixed register usage throughout the stub:
   const Register object = r0;  // Object (lhs).
   Register map = r3;  // Map of the object.
   const Register function = r1;  // Function (rhs).
   const Register prototype = r4;  // Prototype of the function.
-  const Register inline_site = r9;
   const Register scratch = r2;
 
-  const int32_t kDeltaToLoadBoolResult = 4 * kPointerSize;
-
   Label slow, loop, is_instance, is_not_instance, not_js_object;
 
   if (!HasArgsInRegisters()) {
@@ -1811,7 +1325,7 @@
 
   // If there is a call site cache don't look in the global cache, but do the
   // real lookup and update the call site cache.
-  if (!HasCallSiteInlineCheck()) {
+  if (!HasCallSiteInlineCheck() && !ReturnTrueFalseObject()) {
     Label miss;
     __ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
     __ b(ne, &miss);
@@ -1836,17 +1350,17 @@
     __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
     __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
   } else {
-    ASSERT(HasArgsInRegisters());
+    DCHECK(HasArgsInRegisters());
     // Patch the (relocated) inlined map check.
 
-    // The offset was stored in r5
+    // The map_load_offset was stored in r5
     //   (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
-    const Register offset = r5;
-    __ sub(inline_site, lr, offset);
+    const Register map_load_offset = r5;
+    __ sub(r9, lr, map_load_offset);
     // Get the map location in r5 and patch it.
-    __ GetRelocatedValueLocation(inline_site, offset);
-    __ ldr(offset, MemOperand(offset));
-    __ str(map, FieldMemOperand(offset, Cell::kValueOffset));
+    __ GetRelocatedValueLocation(r9, map_load_offset, scratch);
+    __ ldr(map_load_offset, MemOperand(map_load_offset));
+    __ str(map, FieldMemOperand(map_load_offset, Cell::kValueOffset));
   }
 
   // Register mapping: r3 is object map and r4 is function prototype.
@@ -1867,17 +1381,24 @@
   __ ldr(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
   __ ldr(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
   __ jmp(&loop);
+  Factory* factory = isolate()->factory();
 
   __ bind(&is_instance);
   if (!HasCallSiteInlineCheck()) {
     __ mov(r0, Operand(Smi::FromInt(0)));
     __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
+    if (ReturnTrueFalseObject()) {
+      __ Move(r0, factory->true_value());
+    }
   } else {
     // Patch the call site to return true.
     __ LoadRoot(r0, Heap::kTrueValueRootIndex);
-    __ add(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
+    // The bool_load_offset was stored in r6
+    //   (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
+    const Register bool_load_offset = r6;
+    __ sub(r9, lr, bool_load_offset);
     // Get the boolean result location in scratch and patch it.
-    __ GetRelocatedValueLocation(inline_site, scratch);
+    __ GetRelocatedValueLocation(r9, scratch, scratch2);
     __ str(r0, MemOperand(scratch));
 
     if (!ReturnTrueFalseObject()) {
@@ -1890,12 +1411,19 @@
   if (!HasCallSiteInlineCheck()) {
     __ mov(r0, Operand(Smi::FromInt(1)));
     __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
+    if (ReturnTrueFalseObject()) {
+      __ Move(r0, factory->false_value());
+    }
   } else {
     // Patch the call site to return false.
     __ LoadRoot(r0, Heap::kFalseValueRootIndex);
-    __ add(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
+    // The bool_load_offset was stored in r6
+    //   (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
+    const Register bool_load_offset = r6;
+    __ sub(r9, lr, bool_load_offset);
+    ;
     // Get the boolean result location in scratch and patch it.
-    __ GetRelocatedValueLocation(inline_site, scratch);
+    __ GetRelocatedValueLocation(r9, scratch, scratch2);
     __ str(r0, MemOperand(scratch));
 
     if (!ReturnTrueFalseObject()) {
@@ -1915,19 +1443,31 @@
   // Null is not instance of anything.
   __ cmp(scratch, Operand(isolate()->factory()->null_value()));
   __ b(ne, &object_not_null);
-  __ mov(r0, Operand(Smi::FromInt(1)));
+  if (ReturnTrueFalseObject()) {
+    __ Move(r0, factory->false_value());
+  } else {
+    __ mov(r0, Operand(Smi::FromInt(1)));
+  }
   __ Ret(HasArgsInRegisters() ? 0 : 2);
 
   __ bind(&object_not_null);
   // Smi values are not instances of anything.
   __ JumpIfNotSmi(object, &object_not_null_or_smi);
-  __ mov(r0, Operand(Smi::FromInt(1)));
+  if (ReturnTrueFalseObject()) {
+    __ Move(r0, factory->false_value());
+  } else {
+    __ mov(r0, Operand(Smi::FromInt(1)));
+  }
   __ Ret(HasArgsInRegisters() ? 0 : 2);
 
   __ bind(&object_not_null_or_smi);
   // String values are not instances of anything.
   __ IsObjectJSStringType(object, scratch, &slow);
-  __ mov(r0, Operand(Smi::FromInt(1)));
+  if (ReturnTrueFalseObject()) {
+    __ Move(r0, factory->false_value());
+  } else {
+    __ mov(r0, Operand(Smi::FromInt(1)));
+  }
   __ Ret(HasArgsInRegisters() ? 0 : 2);
 
   // Slow-case.  Tail call builtin.
@@ -1953,45 +1493,23 @@
 
 void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
   Label miss;
-  Register receiver;
-  if (kind() == Code::KEYED_LOAD_IC) {
-    // ----------- S t a t e -------------
-    //  -- lr    : return address
-    //  -- r0    : key
-    //  -- r1    : receiver
-    // -----------------------------------
-    __ cmp(r0, Operand(isolate()->factory()->prototype_string()));
-    __ b(ne, &miss);
-    receiver = r1;
-  } else {
-    ASSERT(kind() == Code::LOAD_IC);
-    // ----------- S t a t e -------------
-    //  -- r2    : name
-    //  -- lr    : return address
-    //  -- r0    : receiver
-    //  -- sp[0] : receiver
-    // -----------------------------------
-    receiver = r0;
-  }
+  Register receiver = LoadDescriptor::ReceiverRegister();
 
-  StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, r3, r4, &miss);
+  NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, r3,
+                                                          r4, &miss);
   __ bind(&miss);
-  StubCompiler::TailCallBuiltin(
-      masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
+  PropertyAccessCompiler::TailCallBuiltin(
+      masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
 }
 
 
-Register InstanceofStub::left() { return r0; }
-
-
-Register InstanceofStub::right() { return r1; }
-
-
 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
   // The displacement is the offset of the last parameter (if any)
   // relative to the frame pointer.
   const int kDisplacement =
       StandardFrameConstants::kCallerSPOffset - kPointerSize;
+  DCHECK(r1.is(ArgumentsAccessReadDescriptor::index()));
+  DCHECK(r0.is(ArgumentsAccessReadDescriptor::parameter_count()));
 
   // Check that the key is a smi.
   Label slow;
@@ -2058,7 +1576,7 @@
   __ str(r3, MemOperand(sp, 1 * kPointerSize));
 
   __ bind(&runtime);
-  __ TailCallRuntime(Runtime::kHiddenNewSloppyArguments, 3, 1);
+  __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
 }
 
 
@@ -2122,12 +1640,12 @@
   __ Allocate(r9, r0, r3, r4, &runtime, TAG_OBJECT);
 
   // r0 = address of new object(s) (tagged)
-  // r2 = argument count (tagged)
+  // r2 = argument count (smi-tagged)
   // Get the arguments boilerplate from the current native context into r4.
   const int kNormalOffset =
-      Context::SlotOffset(Context::SLOPPY_ARGUMENTS_BOILERPLATE_INDEX);
+      Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
   const int kAliasedOffset =
-      Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX);
+      Context::SlotOffset(Context::ALIASED_ARGUMENTS_MAP_INDEX);
 
   __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
   __ ldr(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset));
@@ -2137,22 +1655,23 @@
 
   // r0 = address of new object (tagged)
   // r1 = mapped parameter count (tagged)
-  // r2 = argument count (tagged)
-  // r4 = address of boilerplate object (tagged)
-  // Copy the JS object part.
-  for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
-    __ ldr(r3, FieldMemOperand(r4, i));
-    __ str(r3, FieldMemOperand(r0, i));
-  }
+  // r2 = argument count (smi-tagged)
+  // r4 = address of arguments map (tagged)
+  __ str(r4, FieldMemOperand(r0, JSObject::kMapOffset));
+  __ LoadRoot(r3, Heap::kEmptyFixedArrayRootIndex);
+  __ str(r3, FieldMemOperand(r0, JSObject::kPropertiesOffset));
+  __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
 
   // Set up the callee in-object property.
   STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
   __ ldr(r3, MemOperand(sp, 2 * kPointerSize));
+  __ AssertNotSmi(r3);
   const int kCalleeOffset = JSObject::kHeaderSize +
       Heap::kArgumentsCalleeIndex * kPointerSize;
   __ str(r3, FieldMemOperand(r0, kCalleeOffset));
 
   // Use the length (smi tagged) and set that as an in-object property too.
+  __ AssertSmi(r2);
   STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
   const int kLengthOffset = JSObject::kHeaderSize +
       Heap::kArgumentsLengthIndex * kPointerSize;
@@ -2262,7 +1781,33 @@
   // r2 = argument count (tagged)
   __ bind(&runtime);
   __ str(r2, MemOperand(sp, 0 * kPointerSize));  // Patch argument count.
-  __ TailCallRuntime(Runtime::kHiddenNewSloppyArguments, 3, 1);
+  __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
+}
+
+
+void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
+  // Return address is in lr.
+  Label slow;
+
+  Register receiver = LoadDescriptor::ReceiverRegister();
+  Register key = LoadDescriptor::NameRegister();
+
+  // Check that the key is an array index, that is Uint32.
+  __ NonNegativeSmiTst(key);
+  __ b(ne, &slow);
+
+  // Everything is fine, call runtime.
+  __ Push(receiver, key);  // Receiver, key.
+
+  // Perform tail call to the entry.
+  __ TailCallExternalReference(
+      ExternalReference(IC_Utility(IC::kLoadElementWithInterceptor),
+                        masm->isolate()),
+      2, 1);
+
+  __ bind(&slow);
+  PropertyAccessCompiler::TailCallBuiltin(
+      masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
 }
 
 
@@ -2306,15 +1851,18 @@
   // Get the arguments boilerplate from the current native context.
   __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
   __ ldr(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset));
-  __ ldr(r4, MemOperand(r4, Context::SlotOffset(
-      Context::STRICT_ARGUMENTS_BOILERPLATE_INDEX)));
+  __ ldr(r4, MemOperand(
+                 r4, Context::SlotOffset(Context::STRICT_ARGUMENTS_MAP_INDEX)));
 
-  // Copy the JS object part.
-  __ CopyFields(r0, r4, d0, JSObject::kHeaderSize / kPointerSize);
+  __ str(r4, FieldMemOperand(r0, JSObject::kMapOffset));
+  __ LoadRoot(r3, Heap::kEmptyFixedArrayRootIndex);
+  __ str(r3, FieldMemOperand(r0, JSObject::kPropertiesOffset));
+  __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset));
 
   // Get the length (smi tagged) and set that as an in-object property too.
   STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
   __ ldr(r1, MemOperand(sp, 0 * kPointerSize));
+  __ AssertSmi(r1);
   __ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize +
       Heap::kArgumentsLengthIndex * kPointerSize));
 
@@ -2356,7 +1904,7 @@
 
   // Do the runtime call to allocate the arguments object.
   __ bind(&runtime);
-  __ TailCallRuntime(Runtime::kHiddenNewStrictArguments, 3, 1);
+  __ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
 }
 
 
@@ -2365,7 +1913,7 @@
   // time or if regexp entry in generated code is turned off runtime switch or
   // at compilation.
 #ifdef V8_INTERPRETED_REGEXP
-  __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
+  __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
 #else  // V8_INTERPRETED_REGEXP
 
   // Stack frame on entry.
@@ -2519,7 +2067,8 @@
   STATIC_ASSERT(kTwoByteStringTag == 0);
   __ and_(r0, r0, Operand(kStringEncodingMask));
   __ mov(r3, Operand(r0, ASR, 2), SetCC);
-  __ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne);
+  __ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataOneByteCodeOffset),
+         ne);
   __ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq);
 
   // (E) Carry on.  String handling is done.
@@ -2530,7 +2079,7 @@
   __ JumpIfSmi(r6, &runtime);
 
   // r1: previous index
-  // r3: encoding of subject string (1 if ASCII, 0 if two_byte);
+  // r3: encoding of subject string (1 if one_byte, 0 if two_byte);
   // r6: code
   // subject: Subject string
   // regexp_data: RegExp data (FixedArray)
@@ -2573,7 +2122,7 @@
   __ str(r0, MemOperand(sp, 1 * kPointerSize));
 
   // For arguments 4 and 3 get string length, calculate start of string data and
-  // calculate the shift of the index (0 for ASCII and 1 for two byte).
+  // calculate the shift of the index (0 for one-byte and 1 for two-byte).
   __ add(r7, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
   __ eor(r3, r3, Operand(1));
   // Load the length from the original subject string from the previous stack
@@ -2740,7 +2289,7 @@
 
   // Do the runtime call to execute the regexp.
   __ bind(&runtime);
-  __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
+  __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
 
   // Deferred code for string handling.
   // (6) Not a long external string?  If yes, go to (8).
@@ -2793,9 +2342,9 @@
   // r3 : slot in feedback vector (Smi)
   Label initialize, done, miss, megamorphic, not_array_function;
 
-  ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()),
+  DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
             masm->isolate()->heap()->megamorphic_symbol());
-  ASSERT_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()),
+  DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()),
             masm->isolate()->heap()->uninitialized_symbol());
 
   // Load the cache state into r4.
@@ -2989,7 +2538,7 @@
 
 
 void CallFunctionStub::Generate(MacroAssembler* masm) {
-  CallFunctionNoFeedback(masm, argc_, NeedsChecks(), CallAsMethod());
+  CallFunctionNoFeedback(masm, argc(), NeedsChecks(), CallAsMethod());
 }
 
 
@@ -3070,7 +2619,7 @@
   // r1 - function
   // r3 - slot id
   Label miss;
-  int argc = state_.arg_count();
+  int argc = arg_count();
   ParameterCount actual(argc);
 
   EmitLoadTypeFeedbackVector(masm, r2);
@@ -3093,7 +2642,7 @@
   __ TailCallStub(&stub);
 
   __ bind(&miss);
-  GenerateMiss(masm, IC::kCallIC_Customization_Miss);
+  GenerateMiss(masm);
 
   // The slow case, we need this no matter what to complete a call after a miss.
   CallFunctionNoFeedback(masm,
@@ -3112,7 +2661,7 @@
   Label extra_checks_or_miss, slow_start;
   Label slow, non_function, wrap, cont;
   Label have_js_function;
-  int argc = state_.arg_count();
+  int argc = arg_count();
   ParameterCount actual(argc);
 
   EmitLoadTypeFeedbackVector(masm, r2);
@@ -3124,7 +2673,7 @@
   __ b(ne, &extra_checks_or_miss);
 
   __ bind(&have_js_function);
-  if (state_.CallAsMethod()) {
+  if (CallAsMethod()) {
     EmitContinueIfStrictOrNative(masm, &cont);
     // Compute the receiver in sloppy mode.
     __ ldr(r3, MemOperand(sp, argc * kPointerSize));
@@ -3141,7 +2690,7 @@
   __ bind(&slow);
   EmitSlowCase(masm, argc, &non_function);
 
-  if (state_.CallAsMethod()) {
+  if (CallAsMethod()) {
     __ bind(&wrap);
     EmitWrapCase(masm, argc, &cont);
   }
@@ -3168,7 +2717,7 @@
 
   // We are here because tracing is on or we are going monomorphic.
   __ bind(&miss);
-  GenerateMiss(masm, IC::kCallIC_Miss);
+  GenerateMiss(masm);
 
   // the slow case
   __ bind(&slow_start);
@@ -3183,9 +2732,9 @@
 }
 
 
-void CallICStub::GenerateMiss(MacroAssembler* masm, IC::UtilityId id) {
+void CallICStub::GenerateMiss(MacroAssembler* masm) {
   // Get the receiver of the function from the stack; 1 ~ return address.
-  __ ldr(r4, MemOperand(sp, (state_.arg_count() + 1) * kPointerSize));
+  __ ldr(r4, MemOperand(sp, (arg_count() + 1) * kPointerSize));
 
   {
     FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
@@ -3194,6 +2743,9 @@
     __ Push(r4, r1, r2, r3);
 
     // Call the entry.
+    IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
+                                               : IC::kCallIC_Customization_Miss;
+
     ExternalReference miss = ExternalReference(IC_Utility(id),
                                                masm->isolate());
     __ CallExternalReference(miss, 4);
@@ -3206,11 +2758,6 @@
 
 // StringCharCodeAtGenerator
 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
-  Label flat_string;
-  Label ascii_string;
-  Label got_char_code;
-  Label sliced_string;
-
   // If the receiver is a smi trigger the non-string case.
   __ JumpIfSmi(object_, receiver_not_string_);
 
@@ -3262,9 +2809,9 @@
   if (index_flags_ == STRING_INDEX_IS_NUMBER) {
     __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
   } else {
-    ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
+    DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
     // NumberToSmi discards numbers that are not exact integers.
-    __ CallRuntime(Runtime::kHiddenNumberToSmi, 1);
+    __ CallRuntime(Runtime::kNumberToSmi, 1);
   }
   // Save the conversion result before the pop instructions below
   // have a chance to overwrite it.
@@ -3286,7 +2833,7 @@
   call_helper.BeforeCall(masm);
   __ SmiTag(index_);
   __ Push(object_, index_);
-  __ CallRuntime(Runtime::kHiddenStringCharCodeAt, 2);
+  __ CallRuntime(Runtime::kStringCharCodeAtRT, 2);
   __ Move(result_, r0);
   call_helper.AfterCall(masm);
   __ jmp(&exit_);
@@ -3302,14 +2849,14 @@
   // Fast case of Heap::LookupSingleCharacterStringFromCode.
   STATIC_ASSERT(kSmiTag == 0);
   STATIC_ASSERT(kSmiShiftSize == 0);
-  ASSERT(IsPowerOf2(String::kMaxOneByteCharCode + 1));
+  DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCode + 1));
   __ tst(code_,
          Operand(kSmiTagMask |
                  ((~String::kMaxOneByteCharCode) << kSmiTagSize)));
   __ b(ne, &slow_case_);
 
   __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
-  // At this point code register contains smi tagged ASCII char code.
+  // At this point code register contains smi tagged one-byte char code.
   __ add(result_, result_, Operand::PointerOffsetFromSmiKey(code_));
   __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
   __ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
@@ -3335,10 +2882,7 @@
 }
 
 
-enum CopyCharactersFlags {
-  COPY_ASCII = 1,
-  DEST_ALWAYS_ALIGNED = 2
-};
+enum CopyCharactersFlags { COPY_ONE_BYTE = 1, DEST_ALWAYS_ALIGNED = 2 };
 
 
 void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
@@ -3377,48 +2921,6 @@
 }
 
 
-void StringHelper::GenerateHashInit(MacroAssembler* masm,
-                                    Register hash,
-                                    Register character) {
-  // hash = character + (character << 10);
-  __ LoadRoot(hash, Heap::kHashSeedRootIndex);
-  // Untag smi seed and add the character.
-  __ add(hash, character, Operand(hash, LSR, kSmiTagSize));
-  // hash += hash << 10;
-  __ add(hash, hash, Operand(hash, LSL, 10));
-  // hash ^= hash >> 6;
-  __ eor(hash, hash, Operand(hash, LSR, 6));
-}
-
-
-void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
-                                            Register hash,
-                                            Register character) {
-  // hash += character;
-  __ add(hash, hash, Operand(character));
-  // hash += hash << 10;
-  __ add(hash, hash, Operand(hash, LSL, 10));
-  // hash ^= hash >> 6;
-  __ eor(hash, hash, Operand(hash, LSR, 6));
-}
-
-
-void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
-                                       Register hash) {
-  // hash += hash << 3;
-  __ add(hash, hash, Operand(hash, LSL, 3));
-  // hash ^= hash >> 11;
-  __ eor(hash, hash, Operand(hash, LSR, 11));
-  // hash += hash << 15;
-  __ add(hash, hash, Operand(hash, LSL, 15));
-
-  __ and_(hash, hash, Operand(String::kHashBitMask), SetCC);
-
-  // if (hash == 0) hash = 27;
-  __ mov(hash, Operand(StringHasher::kZeroHash), LeaveCC, eq);
-}
-
-
 void SubStringStub::Generate(MacroAssembler* masm) {
   Label runtime;
 
@@ -3541,7 +3043,7 @@
     STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
     __ tst(r1, Operand(kStringEncodingMask));
     __ b(eq, &two_byte_slice);
-    __ AllocateAsciiSlicedString(r0, r2, r6, r4, &runtime);
+    __ AllocateOneByteSlicedString(r0, r2, r6, r4, &runtime);
     __ jmp(&set_slice_header);
     __ bind(&two_byte_slice);
     __ AllocateTwoByteSlicedString(r0, r2, r6, r4, &runtime);
@@ -3584,8 +3086,8 @@
   __ tst(r1, Operand(kStringEncodingMask));
   __ b(eq, &two_byte_sequential);
 
-  // Allocate and copy the resulting ASCII string.
-  __ AllocateAsciiString(r0, r2, r4, r6, r1, &runtime);
+  // Allocate and copy the resulting one-byte string.
+  __ AllocateOneByteString(r0, r2, r4, r6, r1, &runtime);
 
   // Locate first character of substring to copy.
   __ add(r5, r5, r3);
@@ -3627,7 +3129,7 @@
 
   // Just jump to runtime to create the sub string.
   __ bind(&runtime);
-  __ TailCallRuntime(Runtime::kHiddenSubString, 3, 1);
+  __ TailCallRuntime(Runtime::kSubString, 3, 1);
 
   __ bind(&single_char);
   // r0: original string
@@ -3644,12 +3146,9 @@
 }
 
 
-void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
-                                                      Register left,
-                                                      Register right,
-                                                      Register scratch1,
-                                                      Register scratch2,
-                                                      Register scratch3) {
+void StringHelper::GenerateFlatOneByteStringEquals(
+    MacroAssembler* masm, Register left, Register right, Register scratch1,
+    Register scratch2, Register scratch3) {
   Register length = scratch1;
 
   // Compare lengths.
@@ -3673,9 +3172,8 @@
 
   // Compare characters.
   __ bind(&compare_chars);
-  GenerateAsciiCharsCompareLoop(masm,
-                                left, right, length, scratch2, scratch3,
-                                &strings_not_equal);
+  GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2, scratch3,
+                                  &strings_not_equal);
 
   // Characters are equal.
   __ mov(r0, Operand(Smi::FromInt(EQUAL)));
@@ -3683,13 +3181,9 @@
 }
 
 
-void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
-                                                        Register left,
-                                                        Register right,
-                                                        Register scratch1,
-                                                        Register scratch2,
-                                                        Register scratch3,
-                                                        Register scratch4) {
+void StringHelper::GenerateCompareFlatOneByteStrings(
+    MacroAssembler* masm, Register left, Register right, Register scratch1,
+    Register scratch2, Register scratch3, Register scratch4) {
   Label result_not_equal, compare_lengths;
   // Find minimum length and length difference.
   __ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset));
@@ -3703,13 +3197,12 @@
   __ b(eq, &compare_lengths);
 
   // Compare loop.
-  GenerateAsciiCharsCompareLoop(masm,
-                                left, right, min_length, scratch2, scratch4,
-                                &result_not_equal);
+  GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
+                                  scratch4, &result_not_equal);
 
   // Compare lengths - strings up to min-length are equal.
   __ bind(&compare_lengths);
-  ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
+  DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
   // Use length_delta as result if it's zero.
   __ mov(r0, Operand(length_delta), SetCC);
   __ bind(&result_not_equal);
@@ -3721,14 +3214,9 @@
 }
 
 
-void StringCompareStub::GenerateAsciiCharsCompareLoop(
-    MacroAssembler* masm,
-    Register left,
-    Register right,
-    Register length,
-    Register scratch1,
-    Register scratch2,
-    Label* chars_not_equal) {
+void StringHelper::GenerateOneByteCharsCompareLoop(
+    MacroAssembler* masm, Register left, Register right, Register length,
+    Register scratch1, Register scratch2, Label* chars_not_equal) {
   // Change index to run from -length to -1 by adding length to string
   // start. This means that loop ends when index reaches zero, which
   // doesn't need an additional compare.
@@ -3774,18 +3262,18 @@
 
   __ bind(&not_same);
 
-  // Check that both objects are sequential ASCII strings.
-  __ JumpIfNotBothSequentialAsciiStrings(r1, r0, r2, r3, &runtime);
+  // Check that both objects are sequential one-byte strings.
+  __ JumpIfNotBothSequentialOneByteStrings(r1, r0, r2, r3, &runtime);
 
-  // Compare flat ASCII strings natively. Remove arguments from stack first.
+  // Compare flat one-byte strings natively. Remove arguments from stack first.
   __ IncrementCounter(counters->string_compare_native(), 1, r2, r3);
   __ add(sp, sp, Operand(2 * kPointerSize));
-  GenerateCompareFlatAsciiStrings(masm, r1, r0, r2, r3, r4, r5);
+  StringHelper::GenerateCompareFlatOneByteStrings(masm, r1, r0, r2, r3, r4, r5);
 
   // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
   // tagged as a small integer.
   __ bind(&runtime);
-  __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
+  __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
 }
 
 
@@ -3815,13 +3303,13 @@
 
   // Tail call into the stub that handles binary operations with allocation
   // sites.
-  BinaryOpWithAllocationSiteStub stub(isolate(), state_);
+  BinaryOpWithAllocationSiteStub stub(isolate(), state());
   __ TailCallStub(&stub);
 }
 
 
-void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
-  ASSERT(state_ == CompareIC::SMI);
+void CompareICStub::GenerateSmis(MacroAssembler* masm) {
+  DCHECK(state() == CompareICState::SMI);
   Label miss;
   __ orr(r2, r1, r0);
   __ JumpIfNotSmi(r2, &miss);
@@ -3841,17 +3329,17 @@
 }
 
 
-void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
-  ASSERT(state_ == CompareIC::NUMBER);
+void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
+  DCHECK(state() == CompareICState::NUMBER);
 
   Label generic_stub;
   Label unordered, maybe_undefined1, maybe_undefined2;
   Label miss;
 
-  if (left_ == CompareIC::SMI) {
+  if (left() == CompareICState::SMI) {
     __ JumpIfNotSmi(r1, &miss);
   }
-  if (right_ == CompareIC::SMI) {
+  if (right() == CompareICState::SMI) {
     __ JumpIfNotSmi(r0, &miss);
   }
 
@@ -3893,12 +3381,12 @@
 
   __ bind(&unordered);
   __ bind(&generic_stub);
-  ICCompareStub stub(isolate(), op_, CompareIC::GENERIC, CompareIC::GENERIC,
-                     CompareIC::GENERIC);
+  CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
+                     CompareICState::GENERIC, CompareICState::GENERIC);
   __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
 
   __ bind(&maybe_undefined1);
-  if (Token::IsOrderedRelationalCompareOp(op_)) {
+  if (Token::IsOrderedRelationalCompareOp(op())) {
     __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
     __ b(ne, &miss);
     __ JumpIfSmi(r1, &unordered);
@@ -3908,7 +3396,7 @@
   }
 
   __ bind(&maybe_undefined2);
-  if (Token::IsOrderedRelationalCompareOp(op_)) {
+  if (Token::IsOrderedRelationalCompareOp(op())) {
     __ CompareRoot(r1, Heap::kUndefinedValueRootIndex);
     __ b(eq, &unordered);
   }
@@ -3918,8 +3406,8 @@
 }
 
 
-void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
-  ASSERT(state_ == CompareIC::INTERNALIZED_STRING);
+void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
+  DCHECK(state() == CompareICState::INTERNALIZED_STRING);
   Label miss;
 
   // Registers containing left and right operands respectively.
@@ -3945,7 +3433,7 @@
   __ cmp(left, right);
   // Make sure r0 is non-zero. At this point input operands are
   // guaranteed to be non-zero.
-  ASSERT(right.is(r0));
+  DCHECK(right.is(r0));
   STATIC_ASSERT(EQUAL == 0);
   STATIC_ASSERT(kSmiTag == 0);
   __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
@@ -3956,9 +3444,9 @@
 }
 
 
-void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
-  ASSERT(state_ == CompareIC::UNIQUE_NAME);
-  ASSERT(GetCondition() == eq);
+void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
+  DCHECK(state() == CompareICState::UNIQUE_NAME);
+  DCHECK(GetCondition() == eq);
   Label miss;
 
   // Registers containing left and right operands respectively.
@@ -3977,14 +3465,14 @@
   __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
   __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
 
-  __ JumpIfNotUniqueName(tmp1, &miss);
-  __ JumpIfNotUniqueName(tmp2, &miss);
+  __ JumpIfNotUniqueNameInstanceType(tmp1, &miss);
+  __ JumpIfNotUniqueNameInstanceType(tmp2, &miss);
 
   // Unique names are compared by identity.
   __ cmp(left, right);
   // Make sure r0 is non-zero. At this point input operands are
   // guaranteed to be non-zero.
-  ASSERT(right.is(r0));
+  DCHECK(right.is(r0));
   STATIC_ASSERT(EQUAL == 0);
   STATIC_ASSERT(kSmiTag == 0);
   __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq);
@@ -3995,11 +3483,11 @@
 }
 
 
-void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
-  ASSERT(state_ == CompareIC::STRING);
+void CompareICStub::GenerateStrings(MacroAssembler* masm) {
+  DCHECK(state() == CompareICState::STRING);
   Label miss;
 
-  bool equality = Token::IsEqualityOp(op_);
+  bool equality = Token::IsEqualityOp(op());
 
   // Registers containing left and right operands respectively.
   Register left = r1;
@@ -4036,28 +3524,28 @@
   // because we already know they are not identical. We know they are both
   // strings.
   if (equality) {
-    ASSERT(GetCondition() == eq);
+    DCHECK(GetCondition() == eq);
     STATIC_ASSERT(kInternalizedTag == 0);
     __ orr(tmp3, tmp1, Operand(tmp2));
     __ tst(tmp3, Operand(kIsNotInternalizedMask));
     // Make sure r0 is non-zero. At this point input operands are
     // guaranteed to be non-zero.
-    ASSERT(right.is(r0));
+    DCHECK(right.is(r0));
     __ Ret(eq);
   }
 
-  // Check that both strings are sequential ASCII.
+  // Check that both strings are sequential one-byte.
   Label runtime;
-  __ JumpIfBothInstanceTypesAreNotSequentialAscii(
-      tmp1, tmp2, tmp3, tmp4, &runtime);
+  __ JumpIfBothInstanceTypesAreNotSequentialOneByte(tmp1, tmp2, tmp3, tmp4,
+                                                    &runtime);
 
-  // Compare flat ASCII strings. Returns when done.
+  // Compare flat one-byte strings. Returns when done.
   if (equality) {
-    StringCompareStub::GenerateFlatAsciiStringEquals(
-        masm, left, right, tmp1, tmp2, tmp3);
+    StringHelper::GenerateFlatOneByteStringEquals(masm, left, right, tmp1, tmp2,
+                                                  tmp3);
   } else {
-    StringCompareStub::GenerateCompareFlatAsciiStrings(
-        masm, left, right, tmp1, tmp2, tmp3, tmp4);
+    StringHelper::GenerateCompareFlatOneByteStrings(masm, left, right, tmp1,
+                                                    tmp2, tmp3, tmp4);
   }
 
   // Handle more complex cases in runtime.
@@ -4066,7 +3554,7 @@
   if (equality) {
     __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
   } else {
-    __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
+    __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
   }
 
   __ bind(&miss);
@@ -4074,8 +3562,8 @@
 }
 
 
-void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
-  ASSERT(state_ == CompareIC::OBJECT);
+void CompareICStub::GenerateObjects(MacroAssembler* masm) {
+  DCHECK(state() == CompareICState::OBJECT);
   Label miss;
   __ and_(r2, r1, Operand(r0));
   __ JumpIfSmi(r2, &miss);
@@ -4085,7 +3573,7 @@
   __ CompareObjectType(r1, r2, r2, JS_OBJECT_TYPE);
   __ b(ne, &miss);
 
-  ASSERT(GetCondition() == eq);
+  DCHECK(GetCondition() == eq);
   __ sub(r0, r0, Operand(r1));
   __ Ret();
 
@@ -4094,7 +3582,7 @@
 }
 
 
-void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
+void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
   Label miss;
   __ and_(r2, r1, Operand(r0));
   __ JumpIfSmi(r2, &miss);
@@ -4113,8 +3601,7 @@
 }
 
 
-
-void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
+void CompareICStub::GenerateMiss(MacroAssembler* masm) {
   {
     // Call the runtime system in a fresh internal frame.
     ExternalReference miss =
@@ -4123,7 +3610,7 @@
     FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
     __ Push(r1, r0);
     __ Push(lr, r1, r0);
-    __ mov(ip, Operand(Smi::FromInt(op_)));
+    __ mov(ip, Operand(Smi::FromInt(op())));
     __ push(ip);
     __ CallExternalReference(miss, 3);
     // Compute the entry point of the rewritten stub.
@@ -4164,7 +3651,7 @@
                                                       Register properties,
                                                       Handle<Name> name,
                                                       Register scratch0) {
-  ASSERT(name->IsUniqueName());
+  DCHECK(name->IsUniqueName());
   // If names of slots in range from 1 to kProbes - 1 for the hash value are
   // not equal to the name and kProbes-th slot is not used (its name is the
   // undefined value), it guarantees the hash table doesn't contain the
@@ -4181,17 +3668,17 @@
         Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i))));
 
     // Scale the index by multiplying by the entry size.
-    ASSERT(NameDictionary::kEntrySize == 3);
+    DCHECK(NameDictionary::kEntrySize == 3);
     __ add(index, index, Operand(index, LSL, 1));  // index *= 3.
 
     Register entity_name = scratch0;
     // Having undefined at this place means the name is not contained.
-    ASSERT_EQ(kSmiTagSize, 1);
+    DCHECK_EQ(kSmiTagSize, 1);
     Register tmp = properties;
     __ add(tmp, properties, Operand(index, LSL, 1));
     __ ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
 
-    ASSERT(!tmp.is(entity_name));
+    DCHECK(!tmp.is(entity_name));
     __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
     __ cmp(entity_name, tmp);
     __ b(eq, done);
@@ -4211,7 +3698,7 @@
     __ ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
     __ ldrb(entity_name,
             FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
-    __ JumpIfNotUniqueName(entity_name, miss);
+    __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
     __ bind(&good);
 
     // Restore the properties.
@@ -4247,10 +3734,10 @@
                                                       Register name,
                                                       Register scratch1,
                                                       Register scratch2) {
-  ASSERT(!elements.is(scratch1));
-  ASSERT(!elements.is(scratch2));
-  ASSERT(!name.is(scratch1));
-  ASSERT(!name.is(scratch2));
+  DCHECK(!elements.is(scratch1));
+  DCHECK(!elements.is(scratch2));
+  DCHECK(!name.is(scratch1));
+  DCHECK(!name.is(scratch2));
 
   __ AssertName(name);
 
@@ -4269,7 +3756,7 @@
       // Add the probe offset (i + i * i) left shifted to avoid right shifting
       // the hash in a separate instruction. The value hash + i + i * i is right
       // shifted in the following and instruction.
-      ASSERT(NameDictionary::GetProbeOffset(i) <
+      DCHECK(NameDictionary::GetProbeOffset(i) <
              1 << (32 - Name::kHashFieldOffset));
       __ add(scratch2, scratch2, Operand(
           NameDictionary::GetProbeOffset(i) << Name::kHashShift));
@@ -4277,7 +3764,7 @@
     __ and_(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift));
 
     // Scale the index by multiplying by the element size.
-    ASSERT(NameDictionary::kEntrySize == 3);
+    DCHECK(NameDictionary::kEntrySize == 3);
     // scratch2 = scratch2 * 3.
     __ add(scratch2, scratch2, Operand(scratch2, LSL, 1));
 
@@ -4295,7 +3782,7 @@
 
   __ stm(db_w, sp, spill_mask);
   if (name.is(r0)) {
-    ASSERT(!elements.is(r1));
+    DCHECK(!elements.is(r1));
     __ Move(r1, name);
     __ Move(r0, elements);
   } else {
@@ -4351,7 +3838,7 @@
       // Add the probe offset (i + i * i) left shifted to avoid right shifting
       // the hash in a separate instruction. The value hash + i + i * i is right
       // shifted in the following and instruction.
-      ASSERT(NameDictionary::GetProbeOffset(i) <
+      DCHECK(NameDictionary::GetProbeOffset(i) <
              1 << (32 - Name::kHashFieldOffset));
       __ add(index, hash, Operand(
           NameDictionary::GetProbeOffset(i) << Name::kHashShift));
@@ -4361,10 +3848,10 @@
     __ and_(index, mask, Operand(index, LSR, Name::kHashShift));
 
     // Scale the index by multiplying by the entry size.
-    ASSERT(NameDictionary::kEntrySize == 3);
+    DCHECK(NameDictionary::kEntrySize == 3);
     __ add(index, index, Operand(index, LSL, 1));  // index *= 3.
 
-    ASSERT_EQ(kSmiTagSize, 1);
+    DCHECK_EQ(kSmiTagSize, 1);
     __ add(index, dictionary, Operand(index, LSL, 2));
     __ ldr(entry_key, FieldMemOperand(index, kElementsStartOffset));
 
@@ -4376,12 +3863,12 @@
     __ cmp(entry_key, Operand(key));
     __ b(eq, &in_dictionary);
 
-    if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
+    if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
       // Check if the entry name is not a unique name.
       __ ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
       __ ldrb(entry_key,
               FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
-      __ JumpIfNotUniqueName(entry_key, &maybe_in_dictionary);
+      __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
     }
   }
 
@@ -4389,7 +3876,7 @@
   // If we are doing negative lookup then probing failure should be
   // treated as a lookup success. For positive lookup probing failure
   // should be treated as lookup failure.
-  if (mode_ == POSITIVE_LOOKUP) {
+  if (mode() == POSITIVE_LOOKUP) {
     __ mov(result, Operand::Zero());
     __ Ret();
   }
@@ -4435,11 +3922,8 @@
     __ b(&skip_to_incremental_compacting);
   }
 
-  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
-    __ RememberedSetHelper(object_,
-                           address_,
-                           value_,
-                           save_fp_regs_mode_,
+  if (remembered_set_action() == EMIT_REMEMBERED_SET) {
+    __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
                            MacroAssembler::kReturnAtEnd);
   }
   __ Ret();
@@ -4452,8 +3936,8 @@
 
   // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
   // Will be checked in IncrementalMarking::ActivateGeneratedStub.
-  ASSERT(Assembler::GetBranchOffset(masm->instr_at(0)) < (1 << 12));
-  ASSERT(Assembler::GetBranchOffset(masm->instr_at(4)) < (1 << 12));
+  DCHECK(Assembler::GetBranchOffset(masm->instr_at(0)) < (1 << 12));
+  DCHECK(Assembler::GetBranchOffset(masm->instr_at(4)) < (1 << 12));
   PatchBranchIntoNop(masm, 0);
   PatchBranchIntoNop(masm, Assembler::kInstrSize);
 }
@@ -4462,7 +3946,7 @@
 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
   regs_.Save(masm);
 
-  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+  if (remembered_set_action() == EMIT_REMEMBERED_SET) {
     Label dont_need_remembered_set;
 
     __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0));
@@ -4482,10 +3966,7 @@
         masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
     InformIncrementalMarker(masm);
     regs_.Restore(masm);
-    __ RememberedSetHelper(object_,
-                           address_,
-                           value_,
-                           save_fp_regs_mode_,
+    __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
                            MacroAssembler::kReturnAtEnd);
 
     __ bind(&dont_need_remembered_set);
@@ -4500,13 +3981,13 @@
 
 
 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
-  regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
+  regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
   int argument_count = 3;
   __ PrepareCallCFunction(argument_count, regs_.scratch0());
   Register address =
       r0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
-  ASSERT(!address.is(regs_.object()));
-  ASSERT(!address.is(r0));
+  DCHECK(!address.is(regs_.object()));
+  DCHECK(!address.is(r0));
   __ Move(address, regs_.address());
   __ Move(r0, regs_.object());
   __ Move(r1, address);
@@ -4516,7 +3997,7 @@
   __ CallCFunction(
       ExternalReference::incremental_marking_record_write_function(isolate()),
       argument_count);
-  regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
+  regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
 }
 
 
@@ -4544,10 +4025,7 @@
 
   regs_.Restore(masm);
   if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
-    __ RememberedSetHelper(object_,
-                           address_,
-                           value_,
-                           save_fp_regs_mode_,
+    __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
                            MacroAssembler::kReturnAtEnd);
   } else {
     __ Ret();
@@ -4588,10 +4066,7 @@
 
   regs_.Restore(masm);
   if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
-    __ RememberedSetHelper(object_,
-                           address_,
-                           value_,
-                           save_fp_regs_mode_,
+    __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
                            MacroAssembler::kReturnAtEnd);
   } else {
     __ Ret();
@@ -4674,7 +4149,7 @@
   int parameter_count_offset =
       StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
   __ ldr(r1, MemOperand(fp, parameter_count_offset));
-  if (function_mode_ == JS_FUNCTION_STUB_MODE) {
+  if (function_mode() == JS_FUNCTION_STUB_MODE) {
     __ add(r1, r1, Operand(1));
   }
   masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
@@ -4684,6 +4159,20 @@
 }
 
 
+void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
+  EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
+  VectorLoadStub stub(isolate(), state());
+  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
+  EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
+  VectorKeyedLoadStub stub(isolate());
+  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
   if (masm->isolate()->function_entry_hook() != NULL) {
     ProfileEntryHookStub stub(masm->isolate());
@@ -4712,7 +4201,7 @@
   // We also save lr, so the count here is one higher than the mask indicates.
   const int32_t kNumSavedRegs = 7;
 
-  ASSERT((kCallerSaved & kSavedRegs) == kCallerSaved);
+  DCHECK((kCallerSaved & kSavedRegs) == kCallerSaved);
 
   // Save all caller-save registers as this may be called from anywhere.
   __ stm(db_w, sp, kSavedRegs | lr.bit());
@@ -4728,7 +4217,7 @@
   int frame_alignment = masm->ActivationFrameAlignment();
   if (frame_alignment > kPointerSize) {
     __ mov(r5, sp);
-    ASSERT(IsPowerOf2(frame_alignment));
+    DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
     __ and_(sp, sp, Operand(-frame_alignment));
   }
 
@@ -4792,12 +4281,12 @@
   // sp[0] - last argument
   Label normal_sequence;
   if (mode == DONT_OVERRIDE) {
-    ASSERT(FAST_SMI_ELEMENTS == 0);
-    ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
-    ASSERT(FAST_ELEMENTS == 2);
-    ASSERT(FAST_HOLEY_ELEMENTS == 3);
-    ASSERT(FAST_DOUBLE_ELEMENTS == 4);
-    ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
+    DCHECK(FAST_SMI_ELEMENTS == 0);
+    DCHECK(FAST_HOLEY_SMI_ELEMENTS == 1);
+    DCHECK(FAST_ELEMENTS == 2);
+    DCHECK(FAST_HOLEY_ELEMENTS == 3);
+    DCHECK(FAST_DOUBLE_ELEMENTS == 4);
+    DCHECK(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
 
     // is the low bit set? If so, we are holey and that is good.
     __ tst(r3, Operand(1));
@@ -4904,7 +4393,7 @@
 void ArrayConstructorStub::GenerateDispatchToArrayStub(
     MacroAssembler* masm,
     AllocationSiteOverrideMode mode) {
-  if (argument_count_ == ANY) {
+  if (argument_count() == ANY) {
     Label not_zero_case, not_one_case;
     __ tst(r0, r0);
     __ b(ne, &not_zero_case);
@@ -4917,11 +4406,11 @@
 
     __ bind(&not_one_case);
     CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
-  } else if (argument_count_ == NONE) {
+  } else if (argument_count() == NONE) {
     CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
-  } else if (argument_count_ == ONE) {
+  } else if (argument_count() == ONE) {
     CreateArrayDispatchOneArgument(masm, mode);
-  } else if (argument_count_ == MORE_THAN_ONE) {
+  } else if (argument_count() == MORE_THAN_ONE) {
     CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
   } else {
     UNREACHABLE();
@@ -4931,7 +4420,7 @@
 
 void ArrayConstructorStub::Generate(MacroAssembler* masm) {
   // ----------- S t a t e -------------
-  //  -- r0 : argc (only if argument_count_ == ANY)
+  //  -- r0 : argc (only if argument_count() == ANY)
   //  -- r1 : constructor
   //  -- r2 : AllocationSite or undefined
   //  -- sp[0] : return address
@@ -5065,9 +4554,9 @@
   Register api_function_address = r1;
   Register context = cp;
 
-  int argc = ArgumentBits::decode(bit_field_);
-  bool is_store = IsStoreBits::decode(bit_field_);
-  bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_);
+  int argc = this->argc();
+  bool is_store = this->is_store();
+  bool call_data_undefined = this->call_data_undefined();
 
   typedef FunctionCallbackArguments FCA;
 
@@ -5116,7 +4605,7 @@
   FrameScope frame_scope(masm, StackFrame::MANUAL);
   __ EnterExitFrame(false, kApiStackSpace);
 
-  ASSERT(!api_function_address.is(r0) && !scratch.is(r0));
+  DCHECK(!api_function_address.is(r0) && !scratch.is(r0));
   // r0 = FunctionCallbackInfo&
   // Arguments is after the return address.
   __ add(r0, sp, Operand(1 * kPointerSize));
@@ -5164,7 +4653,8 @@
   //  -- r2                     : api_function_address
   // -----------------------------------
 
-  Register api_function_address = r2;
+  Register api_function_address = ApiGetterDescriptor::function_address();
+  DCHECK(api_function_address.is(r2));
 
   __ mov(r0, sp);  // r0 = Handle<Name>
   __ add(r1, r0, Operand(1 * kPointerSize));  // r1 = PCA
diff --git a/src/arm/code-stubs-arm.h b/src/arm/code-stubs-arm.h
index 5dde337..727bb1b 100644
--- a/src/arm/code-stubs-arm.h
+++ b/src/arm/code-stubs-arm.h
@@ -5,8 +5,6 @@
 #ifndef V8_ARM_CODE_STUBS_ARM_H_
 #define V8_ARM_CODE_STUBS_ARM_H_
 
-#include "src/ic-inl.h"
-
 namespace v8 {
 namespace internal {
 
@@ -14,24 +12,6 @@
 void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
 
 
-class StoreBufferOverflowStub: public PlatformCodeStub {
- public:
-  StoreBufferOverflowStub(Isolate* isolate, SaveFPRegsMode save_fp)
-      : PlatformCodeStub(isolate), save_doubles_(save_fp) {}
-
-  void Generate(MacroAssembler* masm);
-
-  static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
-  virtual bool SometimesSetsUpAFrame() { return false; }
-
- private:
-  SaveFPRegsMode save_doubles_;
-
-  Major MajorKey() { return StoreBufferOverflow; }
-  int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
-};
-
-
 class StringHelper : public AllStatic {
  public:
   // Generate code for copying a large number of characters. This function
@@ -45,71 +25,24 @@
                                      Register scratch,
                                      String::Encoding encoding);
 
+  // Compares two flat one-byte strings and returns result in r0.
+  static void GenerateCompareFlatOneByteStrings(
+      MacroAssembler* masm, Register left, Register right, Register scratch1,
+      Register scratch2, Register scratch3, Register scratch4);
 
-  // Generate string hash.
-  static void GenerateHashInit(MacroAssembler* masm,
-                               Register hash,
-                               Register character);
-
-  static void GenerateHashAddCharacter(MacroAssembler* masm,
-                                       Register hash,
-                                       Register character);
-
-  static void GenerateHashGetHash(MacroAssembler* masm,
-                                  Register hash);
-
- private:
-  DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
-};
-
-
-class SubStringStub: public PlatformCodeStub {
- public:
-  explicit SubStringStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
-
- private:
-  Major MajorKey() { return SubString; }
-  int MinorKey() { return 0; }
-
-  void Generate(MacroAssembler* masm);
-};
-
-
-
-class StringCompareStub: public PlatformCodeStub {
- public:
-  explicit StringCompareStub(Isolate* isolate) : PlatformCodeStub(isolate) { }
-
-  // Compares two flat ASCII strings and returns result in r0.
-  static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
-                                              Register left,
-                                              Register right,
+  // Compares two flat one-byte strings for equality and returns result in r0.
+  static void GenerateFlatOneByteStringEquals(MacroAssembler* masm,
+                                              Register left, Register right,
                                               Register scratch1,
                                               Register scratch2,
-                                              Register scratch3,
-                                              Register scratch4);
-
-  // Compares two flat ASCII strings for equality and returns result
-  // in r0.
-  static void GenerateFlatAsciiStringEquals(MacroAssembler* masm,
-                                            Register left,
-                                            Register right,
-                                            Register scratch1,
-                                            Register scratch2,
-                                            Register scratch3);
+                                              Register scratch3);
 
  private:
-  virtual Major MajorKey() { return StringCompare; }
-  virtual int MinorKey() { return 0; }
-  virtual void Generate(MacroAssembler* masm);
+  static void GenerateOneByteCharsCompareLoop(
+      MacroAssembler* masm, Register left, Register right, Register length,
+      Register scratch1, Register scratch2, Label* chars_not_equal);
 
-  static void GenerateAsciiCharsCompareLoop(MacroAssembler* masm,
-                                            Register left,
-                                            Register right,
-                                            Register length,
-                                            Register scratch1,
-                                            Register scratch2,
-                                            Label* chars_not_equal);
+  DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
 };
 
 
@@ -118,36 +51,36 @@
 // so you don't have to set up the frame.
 class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
  public:
-  WriteInt32ToHeapNumberStub(Isolate* isolate,
-                             Register the_int,
-                             Register the_heap_number,
-                             Register scratch)
-      : PlatformCodeStub(isolate),
-        the_int_(the_int),
-        the_heap_number_(the_heap_number),
-        scratch_(scratch) { }
+  WriteInt32ToHeapNumberStub(Isolate* isolate, Register the_int,
+                             Register the_heap_number, Register scratch)
+      : PlatformCodeStub(isolate) {
+    minor_key_ = IntRegisterBits::encode(the_int.code()) |
+                 HeapNumberRegisterBits::encode(the_heap_number.code()) |
+                 ScratchRegisterBits::encode(scratch.code());
+  }
 
   static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
 
  private:
-  Register the_int_;
-  Register the_heap_number_;
-  Register scratch_;
+  Register the_int() const {
+    return Register::from_code(IntRegisterBits::decode(minor_key_));
+  }
+
+  Register the_heap_number() const {
+    return Register::from_code(HeapNumberRegisterBits::decode(minor_key_));
+  }
+
+  Register scratch() const {
+    return Register::from_code(ScratchRegisterBits::decode(minor_key_));
+  }
 
   // Minor key encoding in 16 bits.
   class IntRegisterBits: public BitField<int, 0, 4> {};
   class HeapNumberRegisterBits: public BitField<int, 4, 4> {};
   class ScratchRegisterBits: public BitField<int, 8, 4> {};
 
-  Major MajorKey() { return WriteInt32ToHeapNumber; }
-  int MinorKey() {
-    // Encode the parameters in a unique 16 bit value.
-    return IntRegisterBits::encode(the_int_.code())
-           | HeapNumberRegisterBits::encode(the_heap_number_.code())
-           | ScratchRegisterBits::encode(scratch_.code());
-  }
-
-  void Generate(MacroAssembler* masm);
+  DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+  DEFINE_PLATFORM_CODE_STUB(WriteInt32ToHeapNumber, PlatformCodeStub);
 };
 
 
@@ -160,16 +93,19 @@
                   RememberedSetAction remembered_set_action,
                   SaveFPRegsMode fp_mode)
       : PlatformCodeStub(isolate),
-        object_(object),
-        value_(value),
-        address_(address),
-        remembered_set_action_(remembered_set_action),
-        save_fp_regs_mode_(fp_mode),
         regs_(object,   // An input reg.
               address,  // An input reg.
               value) {  // One scratch reg.
+    minor_key_ = ObjectBits::encode(object.code()) |
+                 ValueBits::encode(value.code()) |
+                 AddressBits::encode(address.code()) |
+                 RememberedSetActionBits::encode(remembered_set_action) |
+                 SaveFPRegsModeBits::encode(fp_mode);
   }
 
+  RecordWriteStub(uint32_t key, Isolate* isolate)
+      : PlatformCodeStub(key, isolate), regs_(object(), address(), value()) {}
+
   enum Mode {
     STORE_BUFFER_ONLY,
     INCREMENTAL,
@@ -180,12 +116,12 @@
 
   static void PatchBranchIntoNop(MacroAssembler* masm, int pos) {
     masm->instr_at_put(pos, (masm->instr_at(pos) & ~B27) | (B24 | B20));
-    ASSERT(Assembler::IsTstImmediate(masm->instr_at(pos)));
+    DCHECK(Assembler::IsTstImmediate(masm->instr_at(pos)));
   }
 
   static void PatchNopIntoBranch(MacroAssembler* masm, int pos) {
     masm->instr_at_put(pos, (masm->instr_at(pos) & ~(B24 | B20)) | B27);
-    ASSERT(Assembler::IsBranch(masm->instr_at(pos)));
+    DCHECK(Assembler::IsBranch(masm->instr_at(pos)));
   }
 
   static Mode GetMode(Code* stub) {
@@ -197,13 +133,13 @@
       return INCREMENTAL;
     }
 
-    ASSERT(Assembler::IsTstImmediate(first_instruction));
+    DCHECK(Assembler::IsTstImmediate(first_instruction));
 
     if (Assembler::IsBranch(second_instruction)) {
       return INCREMENTAL_COMPACTION;
     }
 
-    ASSERT(Assembler::IsTstImmediate(second_instruction));
+    DCHECK(Assembler::IsTstImmediate(second_instruction));
 
     return STORE_BUFFER_ONLY;
   }
@@ -214,24 +150,27 @@
                         stub->instruction_size());
     switch (mode) {
       case STORE_BUFFER_ONLY:
-        ASSERT(GetMode(stub) == INCREMENTAL ||
+        DCHECK(GetMode(stub) == INCREMENTAL ||
                GetMode(stub) == INCREMENTAL_COMPACTION);
         PatchBranchIntoNop(&masm, 0);
         PatchBranchIntoNop(&masm, Assembler::kInstrSize);
         break;
       case INCREMENTAL:
-        ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+        DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
         PatchNopIntoBranch(&masm, 0);
         break;
       case INCREMENTAL_COMPACTION:
-        ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+        DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
         PatchNopIntoBranch(&masm, Assembler::kInstrSize);
         break;
     }
-    ASSERT(GetMode(stub) == mode);
-    CPU::FlushICache(stub->instruction_start(), 2 * Assembler::kInstrSize);
+    DCHECK(GetMode(stub) == mode);
+    CpuFeatures::FlushICache(stub->instruction_start(),
+                             2 * Assembler::kInstrSize);
   }
 
+  DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+
  private:
   // This is a helper class for freeing up 3 scratch registers.  The input is
   // two registers that must be preserved and one scratch register provided by
@@ -244,12 +183,12 @@
         : object_(object),
           address_(address),
           scratch0_(scratch0) {
-      ASSERT(!AreAliased(scratch0, object, address, no_reg));
+      DCHECK(!AreAliased(scratch0, object, address, no_reg));
       scratch1_ = GetRegisterThatIsNotOneOf(object_, address_, scratch0_);
     }
 
     void Save(MacroAssembler* masm) {
-      ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_));
+      DCHECK(!AreAliased(object_, address_, scratch1_, scratch0_));
       // We don't have to save scratch0_ because it was given to us as
       // a scratch register.
       masm->push(scratch1_);
@@ -296,7 +235,9 @@
     kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
   };
 
-  void Generate(MacroAssembler* masm);
+  virtual inline Major MajorKey() const FINAL OVERRIDE { return RecordWrite; }
+
+  virtual void Generate(MacroAssembler* masm) OVERRIDE;
   void GenerateIncremental(MacroAssembler* masm, Mode mode);
   void CheckNeedsToInformIncrementalMarker(
       MacroAssembler* masm,
@@ -304,33 +245,40 @@
       Mode mode);
   void InformIncrementalMarker(MacroAssembler* masm);
 
-  Major MajorKey() { return RecordWrite; }
-
-  int MinorKey() {
-    return ObjectBits::encode(object_.code()) |
-        ValueBits::encode(value_.code()) |
-        AddressBits::encode(address_.code()) |
-        RememberedSetActionBits::encode(remembered_set_action_) |
-        SaveFPRegsModeBits::encode(save_fp_regs_mode_);
-  }
-
   void Activate(Code* code) {
     code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
   }
 
+  Register object() const {
+    return Register::from_code(ObjectBits::decode(minor_key_));
+  }
+
+  Register value() const {
+    return Register::from_code(ValueBits::decode(minor_key_));
+  }
+
+  Register address() const {
+    return Register::from_code(AddressBits::decode(minor_key_));
+  }
+
+  RememberedSetAction remembered_set_action() const {
+    return RememberedSetActionBits::decode(minor_key_);
+  }
+
+  SaveFPRegsMode save_fp_regs_mode() const {
+    return SaveFPRegsModeBits::decode(minor_key_);
+  }
+
   class ObjectBits: public BitField<int, 0, 4> {};
   class ValueBits: public BitField<int, 4, 4> {};
   class AddressBits: public BitField<int, 8, 4> {};
   class RememberedSetActionBits: public BitField<RememberedSetAction, 12, 1> {};
   class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 13, 1> {};
 
-  Register object_;
-  Register value_;
-  Register address_;
-  RememberedSetAction remembered_set_action_;
-  SaveFPRegsMode save_fp_regs_mode_;
   Label slow_;
   RegisterAllocation regs_;
+
+  DISALLOW_COPY_AND_ASSIGN(RecordWriteStub);
 };
 
 
@@ -342,14 +290,13 @@
 class DirectCEntryStub: public PlatformCodeStub {
  public:
   explicit DirectCEntryStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
-  void Generate(MacroAssembler* masm);
   void GenerateCall(MacroAssembler* masm, Register target);
 
  private:
-  Major MajorKey() { return DirectCEntry; }
-  int MinorKey() { return 0; }
-
   bool NeedsImmovableCode() { return true; }
+
+  DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+  DEFINE_PLATFORM_CODE_STUB(DirectCEntry, PlatformCodeStub);
 };
 
 
@@ -358,9 +305,9 @@
   enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
 
   NameDictionaryLookupStub(Isolate* isolate, LookupMode mode)
-      : PlatformCodeStub(isolate), mode_(mode) { }
-
-  void Generate(MacroAssembler* masm);
+      : PlatformCodeStub(isolate) {
+    minor_key_ = LookupModeBits::encode(mode);
+  }
 
   static void GenerateNegativeLookup(MacroAssembler* masm,
                                      Label* miss,
@@ -392,30 +339,14 @@
       NameDictionary::kHeaderSize +
       NameDictionary::kElementsStartIndex * kPointerSize;
 
-  Major MajorKey() { return NameDictionaryLookup; }
-
-  int MinorKey() {
-    return LookupModeBits::encode(mode_);
-  }
+  LookupMode mode() const { return LookupModeBits::decode(minor_key_); }
 
   class LookupModeBits: public BitField<LookupMode, 0, 1> {};
 
-  LookupMode mode_;
+  DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+  DEFINE_PLATFORM_CODE_STUB(NameDictionaryLookup, PlatformCodeStub);
 };
 
-
-struct PlatformCallInterfaceDescriptor {
-  explicit PlatformCallInterfaceDescriptor(
-      TargetAddressStorageMode storage_mode)
-      : storage_mode_(storage_mode) { }
-
-  TargetAddressStorageMode storage_mode() { return storage_mode_; }
-
- private:
-  TargetAddressStorageMode storage_mode_;
-};
-
-
 } }  // namespace v8::internal
 
 #endif  // V8_ARM_CODE_STUBS_ARM_H_
diff --git a/src/arm/codegen-arm.cc b/src/arm/codegen-arm.cc
index 7835a6b..d050399 100644
--- a/src/arm/codegen-arm.cc
+++ b/src/arm/codegen-arm.cc
@@ -6,9 +6,9 @@
 
 #if V8_TARGET_ARCH_ARM
 
+#include "src/arm/simulator-arm.h"
 #include "src/codegen.h"
 #include "src/macro-assembler.h"
-#include "src/arm/simulator-arm.h"
 
 namespace v8 {
 namespace internal {
@@ -29,7 +29,8 @@
 UnaryMathFunction CreateExpFunction() {
   if (!FLAG_fast_math) return &std::exp;
   size_t actual_size;
-  byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
+  byte* buffer =
+      static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
   if (buffer == NULL) return &std::exp;
   ExternalReference::InitializeMathExpData();
 
@@ -64,10 +65,10 @@
 
   CodeDesc desc;
   masm.GetCode(&desc);
-  ASSERT(!RelocInfo::RequiresRelocation(desc));
+  DCHECK(!RelocInfo::RequiresRelocation(desc));
 
-  CPU::FlushICache(buffer, actual_size);
-  OS::ProtectCode(buffer, actual_size);
+  CpuFeatures::FlushICache(buffer, actual_size);
+  base::OS::ProtectCode(buffer, actual_size);
 
 #if !defined(USE_SIMULATOR)
   return FUNCTION_CAST<UnaryMathFunction>(buffer);
@@ -84,7 +85,8 @@
 #else
   if (!CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) return stub;
   size_t actual_size;
-  byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
+  byte* buffer =
+      static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
   if (buffer == NULL) return stub;
 
   MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
@@ -223,10 +225,10 @@
 
   CodeDesc desc;
   masm.GetCode(&desc);
-  ASSERT(!RelocInfo::RequiresRelocation(desc));
+  DCHECK(!RelocInfo::RequiresRelocation(desc));
 
-  CPU::FlushICache(buffer, actual_size);
-  OS::ProtectCode(buffer, actual_size);
+  CpuFeatures::FlushICache(buffer, actual_size);
+  base::OS::ProtectCode(buffer, actual_size);
   return FUNCTION_CAST<MemCopyUint8Function>(buffer);
 #endif
 }
@@ -240,7 +242,8 @@
 #else
   if (!CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) return stub;
   size_t actual_size;
-  byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
+  byte* buffer =
+      static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
   if (buffer == NULL) return stub;
 
   MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
@@ -311,8 +314,8 @@
   CodeDesc desc;
   masm.GetCode(&desc);
 
-  CPU::FlushICache(buffer, actual_size);
-  OS::ProtectCode(buffer, actual_size);
+  CpuFeatures::FlushICache(buffer, actual_size);
+  base::OS::ProtectCode(buffer, actual_size);
 
   return FUNCTION_CAST<MemCopyUint16Uint8Function>(buffer);
 #endif
@@ -324,7 +327,8 @@
   return &std::sqrt;
 #else
   size_t actual_size;
-  byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
+  byte* buffer =
+      static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
   if (buffer == NULL) return &std::sqrt;
 
   MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
@@ -336,10 +340,10 @@
 
   CodeDesc desc;
   masm.GetCode(&desc);
-  ASSERT(!RelocInfo::RequiresRelocation(desc));
+  DCHECK(!RelocInfo::RequiresRelocation(desc));
 
-  CPU::FlushICache(buffer, actual_size);
-  OS::ProtectCode(buffer, actual_size);
+  CpuFeatures::FlushICache(buffer, actual_size);
+  base::OS::ProtectCode(buffer, actual_size);
   return FUNCTION_CAST<UnaryMathFunction>(buffer);
 #endif
 }
@@ -352,14 +356,14 @@
 
 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
   masm->EnterFrame(StackFrame::INTERNAL);
-  ASSERT(!masm->has_frame());
+  DCHECK(!masm->has_frame());
   masm->set_has_frame(true);
 }
 
 
 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
   masm->LeaveFrame(StackFrame::INTERNAL);
-  ASSERT(masm->has_frame());
+  DCHECK(masm->has_frame());
   masm->set_has_frame(false);
 }
 
@@ -370,26 +374,28 @@
 #define __ ACCESS_MASM(masm)
 
 void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
-    MacroAssembler* masm, AllocationSiteMode mode,
+    MacroAssembler* masm,
+    Register receiver,
+    Register key,
+    Register value,
+    Register target_map,
+    AllocationSiteMode mode,
     Label* allocation_memento_found) {
-  // ----------- S t a t e -------------
-  //  -- r0    : value
-  //  -- r1    : key
-  //  -- r2    : receiver
-  //  -- lr    : return address
-  //  -- r3    : target map, scratch for subsequent call
-  //  -- r4    : scratch (elements)
-  // -----------------------------------
+  Register scratch_elements = r4;
+  DCHECK(!AreAliased(receiver, key, value, target_map,
+                     scratch_elements));
+
   if (mode == TRACK_ALLOCATION_SITE) {
-    ASSERT(allocation_memento_found != NULL);
-    __ JumpIfJSArrayHasAllocationMemento(r2, r4, allocation_memento_found);
+    DCHECK(allocation_memento_found != NULL);
+    __ JumpIfJSArrayHasAllocationMemento(
+        receiver, scratch_elements, allocation_memento_found);
   }
 
   // Set transitioned map.
-  __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
-  __ RecordWriteField(r2,
+  __ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ RecordWriteField(receiver,
                       HeapObject::kMapOffset,
-                      r3,
+                      target_map,
                       r9,
                       kLRHasNotBeenSaved,
                       kDontSaveFPRegs,
@@ -399,87 +405,103 @@
 
 
 void ElementsTransitionGenerator::GenerateSmiToDouble(
-    MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
-  // ----------- S t a t e -------------
-  //  -- r0    : value
-  //  -- r1    : key
-  //  -- r2    : receiver
-  //  -- lr    : return address
-  //  -- r3    : target map, scratch for subsequent call
-  //  -- r4    : scratch (elements)
-  // -----------------------------------
+    MacroAssembler* masm,
+    Register receiver,
+    Register key,
+    Register value,
+    Register target_map,
+    AllocationSiteMode mode,
+    Label* fail) {
+  // Register lr contains the return address.
   Label loop, entry, convert_hole, gc_required, only_change_map, done;
+  Register elements = r4;
+  Register length = r5;
+  Register array = r6;
+  Register array_end = array;
+
+  // target_map parameter can be clobbered.
+  Register scratch1 = target_map;
+  Register scratch2 = r9;
+
+  // Verify input registers don't conflict with locals.
+  DCHECK(!AreAliased(receiver, key, value, target_map,
+                     elements, length, array, scratch2));
 
   if (mode == TRACK_ALLOCATION_SITE) {
-    __ JumpIfJSArrayHasAllocationMemento(r2, r4, fail);
+    __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
   }
 
   // Check for empty arrays, which only require a map transition and no changes
   // to the backing store.
-  __ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset));
-  __ CompareRoot(r4, Heap::kEmptyFixedArrayRootIndex);
+  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
   __ b(eq, &only_change_map);
 
   __ push(lr);
-  __ ldr(r5, FieldMemOperand(r4, FixedArray::kLengthOffset));
-  // r5: number of elements (smi-tagged)
+  __ ldr(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
+  // length: number of elements (smi-tagged)
 
   // Allocate new FixedDoubleArray.
   // Use lr as a temporary register.
-  __ mov(lr, Operand(r5, LSL, 2));
+  __ mov(lr, Operand(length, LSL, 2));
   __ add(lr, lr, Operand(FixedDoubleArray::kHeaderSize));
-  __ Allocate(lr, r6, r4, r9, &gc_required, DOUBLE_ALIGNMENT);
-  // r6: destination FixedDoubleArray, not tagged as heap object.
-  __ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset));
+  __ Allocate(lr, array, elements, scratch2, &gc_required, DOUBLE_ALIGNMENT);
+  // array: destination FixedDoubleArray, not tagged as heap object.
+  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
   // r4: source FixedArray.
 
   // Set destination FixedDoubleArray's length and map.
-  __ LoadRoot(r9, Heap::kFixedDoubleArrayMapRootIndex);
-  __ str(r5, MemOperand(r6, FixedDoubleArray::kLengthOffset));
+  __ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
+  __ str(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
   // Update receiver's map.
-  __ str(r9, MemOperand(r6, HeapObject::kMapOffset));
+  __ str(scratch2, MemOperand(array, HeapObject::kMapOffset));
 
-  __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
-  __ RecordWriteField(r2,
+  __ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ RecordWriteField(receiver,
                       HeapObject::kMapOffset,
-                      r3,
-                      r9,
+                      target_map,
+                      scratch2,
                       kLRHasBeenSaved,
                       kDontSaveFPRegs,
                       OMIT_REMEMBERED_SET,
                       OMIT_SMI_CHECK);
   // Replace receiver's backing store with newly created FixedDoubleArray.
-  __ add(r3, r6, Operand(kHeapObjectTag));
-  __ str(r3, FieldMemOperand(r2, JSObject::kElementsOffset));
-  __ RecordWriteField(r2,
+  __ add(scratch1, array, Operand(kHeapObjectTag));
+  __ str(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  __ RecordWriteField(receiver,
                       JSObject::kElementsOffset,
-                      r3,
-                      r9,
+                      scratch1,
+                      scratch2,
                       kLRHasBeenSaved,
                       kDontSaveFPRegs,
                       EMIT_REMEMBERED_SET,
                       OMIT_SMI_CHECK);
 
   // Prepare for conversion loop.
-  __ add(r3, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ add(r9, r6, Operand(FixedDoubleArray::kHeaderSize));
-  __ add(r6, r9, Operand(r5, LSL, 2));
-  __ mov(r4, Operand(kHoleNanLower32));
-  __ mov(r5, Operand(kHoleNanUpper32));
-  // r3: begin of source FixedArray element fields, not tagged
-  // r4: kHoleNanLower32
-  // r5: kHoleNanUpper32
-  // r6: end of destination FixedDoubleArray, not tagged
-  // r9: begin of FixedDoubleArray element fields, not tagged
+  __ add(scratch1, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ add(scratch2, array, Operand(FixedDoubleArray::kHeaderSize));
+  __ add(array_end, scratch2, Operand(length, LSL, 2));
+
+  // Repurpose registers no longer in use.
+  Register hole_lower = elements;
+  Register hole_upper = length;
+
+  __ mov(hole_lower, Operand(kHoleNanLower32));
+  __ mov(hole_upper, Operand(kHoleNanUpper32));
+  // scratch1: begin of source FixedArray element fields, not tagged
+  // hole_lower: kHoleNanLower32
+  // hole_upper: kHoleNanUpper32
+  // array_end: end of destination FixedDoubleArray, not tagged
+  // scratch2: begin of FixedDoubleArray element fields, not tagged
 
   __ b(&entry);
 
   __ bind(&only_change_map);
-  __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
-  __ RecordWriteField(r2,
+  __ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ RecordWriteField(receiver,
                       HeapObject::kMapOffset,
-                      r3,
-                      r9,
+                      target_map,
+                      scratch2,
                       kLRHasNotBeenSaved,
                       kDontSaveFPRegs,
                       OMIT_REMEMBERED_SET,
@@ -493,15 +515,15 @@
 
   // Convert and copy elements.
   __ bind(&loop);
-  __ ldr(lr, MemOperand(r3, 4, PostIndex));
+  __ ldr(lr, MemOperand(scratch1, 4, PostIndex));
   // lr: current element
   __ UntagAndJumpIfNotSmi(lr, lr, &convert_hole);
 
   // Normal smi, convert to double and store.
   __ vmov(s0, lr);
   __ vcvt_f64_s32(d0, s0);
-  __ vstr(d0, r9, 0);
-  __ add(r9, r9, Operand(8));
+  __ vstr(d0, scratch2, 0);
+  __ add(scratch2, scratch2, Operand(8));
   __ b(&entry);
 
   // Hole found, store the-hole NaN.
@@ -513,10 +535,10 @@
     __ CompareRoot(lr, Heap::kTheHoleValueRootIndex);
     __ Assert(eq, kObjectFoundInSmiOnlyArray);
   }
-  __ Strd(r4, r5, MemOperand(r9, 8, PostIndex));
+  __ Strd(hole_lower, hole_upper, MemOperand(scratch2, 8, PostIndex));
 
   __ bind(&entry);
-  __ cmp(r9, r6);
+  __ cmp(scratch2, array_end);
   __ b(lt, &loop);
 
   __ pop(lr);
@@ -525,80 +547,104 @@
 
 
 void ElementsTransitionGenerator::GenerateDoubleToObject(
-    MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
-  // ----------- S t a t e -------------
-  //  -- r0    : value
-  //  -- r1    : key
-  //  -- r2    : receiver
-  //  -- lr    : return address
-  //  -- r3    : target map, scratch for subsequent call
-  //  -- r4    : scratch (elements)
-  // -----------------------------------
+    MacroAssembler* masm,
+    Register receiver,
+    Register key,
+    Register value,
+    Register target_map,
+    AllocationSiteMode mode,
+    Label* fail) {
+  // Register lr contains the return address.
   Label entry, loop, convert_hole, gc_required, only_change_map;
+  Register elements = r4;
+  Register array = r6;
+  Register length = r5;
+  Register scratch = r9;
+
+  // Verify input registers don't conflict with locals.
+  DCHECK(!AreAliased(receiver, key, value, target_map,
+                     elements, array, length, scratch));
 
   if (mode == TRACK_ALLOCATION_SITE) {
-    __ JumpIfJSArrayHasAllocationMemento(r2, r4, fail);
+    __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
   }
 
   // Check for empty arrays, which only require a map transition and no changes
   // to the backing store.
-  __ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset));
-  __ CompareRoot(r4, Heap::kEmptyFixedArrayRootIndex);
+  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
   __ b(eq, &only_change_map);
 
   __ push(lr);
-  __ Push(r3, r2, r1, r0);
-  __ ldr(r5, FieldMemOperand(r4, FixedArray::kLengthOffset));
-  // r4: source FixedDoubleArray
-  // r5: number of elements (smi-tagged)
+  __ Push(target_map, receiver, key, value);
+  __ ldr(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
+  // elements: source FixedDoubleArray
+  // length: number of elements (smi-tagged)
 
   // Allocate new FixedArray.
-  __ mov(r0, Operand(FixedDoubleArray::kHeaderSize));
-  __ add(r0, r0, Operand(r5, LSL, 1));
-  __ Allocate(r0, r6, r3, r9, &gc_required, NO_ALLOCATION_FLAGS);
-  // r6: destination FixedArray, not tagged as heap object
+  // Re-use value and target_map registers, as they have been saved on the
+  // stack.
+  Register array_size = value;
+  Register allocate_scratch = target_map;
+  __ mov(array_size, Operand(FixedDoubleArray::kHeaderSize));
+  __ add(array_size, array_size, Operand(length, LSL, 1));
+  __ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
+              NO_ALLOCATION_FLAGS);
+  // array: destination FixedArray, not tagged as heap object
   // Set destination FixedDoubleArray's length and map.
-  __ LoadRoot(r9, Heap::kFixedArrayMapRootIndex);
-  __ str(r5, MemOperand(r6, FixedDoubleArray::kLengthOffset));
-  __ str(r9, MemOperand(r6, HeapObject::kMapOffset));
+  __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
+  __ str(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
+  __ str(scratch, MemOperand(array, HeapObject::kMapOffset));
 
   // Prepare for conversion loop.
-  __ add(r4, r4, Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
-  __ add(r3, r6, Operand(FixedArray::kHeaderSize));
-  __ add(r6, r6, Operand(kHeapObjectTag));
-  __ add(r5, r3, Operand(r5, LSL, 1));
-  __ LoadRoot(r9, Heap::kHeapNumberMapRootIndex);
-  // Using offsetted addresses in r4 to fully take advantage of post-indexing.
-  // r3: begin of destination FixedArray element fields, not tagged
-  // r4: begin of source FixedDoubleArray element fields, not tagged, +4
-  // r5: end of destination FixedArray, not tagged
-  // r6: destination FixedArray
-  // r9: heap number map
+  Register src_elements = elements;
+  Register dst_elements = target_map;
+  Register dst_end = length;
+  Register heap_number_map = scratch;
+  __ add(src_elements, elements,
+         Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
+  __ add(dst_elements, array, Operand(FixedArray::kHeaderSize));
+  __ add(array, array, Operand(kHeapObjectTag));
+  __ add(dst_end, dst_elements, Operand(length, LSL, 1));
+  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+  // Using offsetted addresses in src_elements to fully take advantage of
+  // post-indexing.
+  // dst_elements: begin of destination FixedArray element fields, not tagged
+  // src_elements: begin of source FixedDoubleArray element fields,
+  //               not tagged, +4
+  // dst_end: end of destination FixedArray, not tagged
+  // array: destination FixedArray
+  // heap_number_map: heap number map
   __ b(&entry);
 
   // Call into runtime if GC is required.
   __ bind(&gc_required);
-  __ Pop(r3, r2, r1, r0);
+  __ Pop(target_map, receiver, key, value);
   __ pop(lr);
   __ b(fail);
 
   __ bind(&loop);
-  __ ldr(r1, MemOperand(r4, 8, PostIndex));
-  // r1: current element's upper 32 bit
-  // r4: address of next element's upper 32 bit
-  __ cmp(r1, Operand(kHoleNanUpper32));
+  Register upper_bits = key;
+  __ ldr(upper_bits, MemOperand(src_elements, 8, PostIndex));
+  // upper_bits: current element's upper 32 bit
+  // src_elements: address of next element's upper 32 bit
+  __ cmp(upper_bits, Operand(kHoleNanUpper32));
   __ b(eq, &convert_hole);
 
   // Non-hole double, copy value into a heap number.
-  __ AllocateHeapNumber(r2, r0, lr, r9, &gc_required);
-  // r2: new heap number
-  __ ldr(r0, MemOperand(r4, 12, NegOffset));
-  __ Strd(r0, r1, FieldMemOperand(r2, HeapNumber::kValueOffset));
-  __ mov(r0, r3);
-  __ str(r2, MemOperand(r3, 4, PostIndex));
-  __ RecordWrite(r6,
-                 r0,
-                 r2,
+  Register heap_number = receiver;
+  Register scratch2 = value;
+  __ AllocateHeapNumber(heap_number, scratch2, lr, heap_number_map,
+                        &gc_required);
+  // heap_number: new heap number
+  __ ldr(scratch2, MemOperand(src_elements, 12, NegOffset));
+  __ Strd(scratch2, upper_bits,
+          FieldMemOperand(heap_number, HeapNumber::kValueOffset));
+  __ mov(scratch2, dst_elements);
+  __ str(heap_number, MemOperand(dst_elements, 4, PostIndex));
+  __ RecordWrite(array,
+                 scratch2,
+                 heap_number,
                  kLRHasBeenSaved,
                  kDontSaveFPRegs,
                  EMIT_REMEMBERED_SET,
@@ -607,20 +653,20 @@
 
   // Replace the-hole NaN with the-hole pointer.
   __ bind(&convert_hole);
-  __ LoadRoot(r0, Heap::kTheHoleValueRootIndex);
-  __ str(r0, MemOperand(r3, 4, PostIndex));
+  __ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex);
+  __ str(scratch2, MemOperand(dst_elements, 4, PostIndex));
 
   __ bind(&entry);
-  __ cmp(r3, r5);
+  __ cmp(dst_elements, dst_end);
   __ b(lt, &loop);
 
-  __ Pop(r3, r2, r1, r0);
+  __ Pop(target_map, receiver, key, value);
   // Replace receiver's backing store with newly created and filled FixedArray.
-  __ str(r6, FieldMemOperand(r2, JSObject::kElementsOffset));
-  __ RecordWriteField(r2,
+  __ str(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  __ RecordWriteField(receiver,
                       JSObject::kElementsOffset,
-                      r6,
-                      r9,
+                      array,
+                      scratch,
                       kLRHasBeenSaved,
                       kDontSaveFPRegs,
                       EMIT_REMEMBERED_SET,
@@ -629,11 +675,11 @@
 
   __ bind(&only_change_map);
   // Update receiver's map.
-  __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset));
-  __ RecordWriteField(r2,
+  __ str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ RecordWriteField(receiver,
                       HeapObject::kMapOffset,
-                      r3,
-                      r9,
+                      target_map,
+                      scratch,
                       kLRHasNotBeenSaved,
                       kDontSaveFPRegs,
                       OMIT_REMEMBERED_SET,
@@ -713,16 +759,16 @@
   __ b(ne, call_runtime);
   __ ldr(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
 
-  Label ascii, done;
+  Label one_byte, done;
   __ bind(&check_encoding);
   STATIC_ASSERT(kTwoByteStringTag == 0);
   __ tst(result, Operand(kStringEncodingMask));
-  __ b(ne, &ascii);
+  __ b(ne, &one_byte);
   // Two-byte string.
   __ ldrh(result, MemOperand(string, index, LSL, 1));
   __ jmp(&done);
-  __ bind(&ascii);
-  // Ascii string.
+  __ bind(&one_byte);
+  // One-byte string.
   __ ldrb(result, MemOperand(string, index));
   __ bind(&done);
 }
@@ -741,16 +787,17 @@
                                    Register temp1,
                                    Register temp2,
                                    Register temp3) {
-  ASSERT(!input.is(result));
-  ASSERT(!input.is(double_scratch1));
-  ASSERT(!input.is(double_scratch2));
-  ASSERT(!result.is(double_scratch1));
-  ASSERT(!result.is(double_scratch2));
-  ASSERT(!double_scratch1.is(double_scratch2));
-  ASSERT(!temp1.is(temp2));
-  ASSERT(!temp1.is(temp3));
-  ASSERT(!temp2.is(temp3));
-  ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
+  DCHECK(!input.is(result));
+  DCHECK(!input.is(double_scratch1));
+  DCHECK(!input.is(double_scratch2));
+  DCHECK(!result.is(double_scratch1));
+  DCHECK(!result.is(double_scratch2));
+  DCHECK(!double_scratch1.is(double_scratch2));
+  DCHECK(!temp1.is(temp2));
+  DCHECK(!temp1.is(temp3));
+  DCHECK(!temp2.is(temp3));
+  DCHECK(ExternalReference::math_exp_constants(0).address() != NULL);
+  DCHECK(!masm->serializer_enabled());  // External references not serializable.
 
   Label zero, infinity, done;
 
@@ -781,7 +828,7 @@
   __ vmul(result, result, double_scratch2);
   __ vsub(result, result, double_scratch1);
   // Mov 1 in double_scratch2 as math_exp_constants_array[8] == 1.
-  ASSERT(*reinterpret_cast<double*>
+  DCHECK(*reinterpret_cast<double*>
          (ExternalReference::math_exp_constants(8).address()) == 1);
   __ vmov(double_scratch2, 1);
   __ vadd(result, result, double_scratch2);
@@ -822,7 +869,7 @@
 #endif
 
 CodeAgingHelper::CodeAgingHelper() {
-  ASSERT(young_sequence_.length() == kNoCodeAgeSequenceLength);
+  DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
   // Since patcher is a large object, allocate it dynamically when needed,
   // to avoid overloading the stack in stress conditions.
   // DONT_FLUSH is used because the CodeAgingHelper is initialized early in
@@ -848,7 +895,7 @@
 
 bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
   bool result = isolate->code_aging_helper()->IsYoung(sequence);
-  ASSERT(result || isolate->code_aging_helper()->IsOld(sequence));
+  DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
   return result;
 }
 
@@ -874,7 +921,7 @@
   uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
   if (age == kNoAgeCodeAge) {
     isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
-    CPU::FlushICache(sequence, young_length);
+    CpuFeatures::FlushICache(sequence, young_length);
   } else {
     Code* stub = GetCodeAgeStub(isolate, age, parity);
     CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
diff --git a/src/arm/codegen-arm.h b/src/arm/codegen-arm.h
index 9ec0958..4c7c768 100644
--- a/src/arm/codegen-arm.h
+++ b/src/arm/codegen-arm.h
@@ -6,7 +6,7 @@
 #define V8_ARM_CODEGEN_ARM_H_
 
 #include "src/ast.h"
-#include "src/ic-inl.h"
+#include "src/macro-assembler.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/arm/constants-arm.cc b/src/arm/constants-arm.cc
index d00a09f..3f3c5ed 100644
--- a/src/arm/constants-arm.cc
+++ b/src/arm/constants-arm.cc
@@ -81,7 +81,7 @@
 
 
 const char* VFPRegisters::Name(int reg, bool is_double) {
-  ASSERT((0 <= reg) && (reg < kNumVFPRegisters));
+  DCHECK((0 <= reg) && (reg < kNumVFPRegisters));
   return names_[reg + (is_double ? kNumVFPSingleRegisters : 0)];
 }
 
diff --git a/src/arm/constants-arm.h b/src/arm/constants-arm.h
index 0847ea1..375ef89 100644
--- a/src/arm/constants-arm.h
+++ b/src/arm/constants-arm.h
@@ -19,11 +19,11 @@
 const int kConstantPoolMarker = 0xe7f000f0;
 const int kConstantPoolLengthMaxMask = 0xffff;
 inline int EncodeConstantPoolLength(int length) {
-  ASSERT((length & kConstantPoolLengthMaxMask) == length);
+  DCHECK((length & kConstantPoolLengthMaxMask) == length);
   return ((length & 0xfff0) << 4) | (length & 0xf);
 }
 inline int DecodeConstantPoolLength(int instr) {
-  ASSERT((instr & kConstantPoolMarkerMask) == kConstantPoolMarker);
+  DCHECK((instr & kConstantPoolMarkerMask) == kConstantPoolMarker);
   return ((instr >> 4) & 0xfff0) | (instr & 0xf);
 }
 
@@ -84,7 +84,7 @@
 
 
 inline Condition NegateCondition(Condition cond) {
-  ASSERT(cond != al);
+  DCHECK(cond != al);
   return static_cast<Condition>(cond ^ ne);
 }
 
@@ -406,64 +406,6 @@
 
 
 // -----------------------------------------------------------------------------
-// Specific instructions, constants, and masks.
-// These constants are declared in assembler-arm.cc, as they use named registers
-// and other constants.
-
-
-// add(sp, sp, 4) instruction (aka Pop())
-extern const Instr kPopInstruction;
-
-// str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
-// register r is not encoded.
-extern const Instr kPushRegPattern;
-
-// ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
-// register r is not encoded.
-extern const Instr kPopRegPattern;
-
-// mov lr, pc
-extern const Instr kMovLrPc;
-// ldr rd, [pc, #offset]
-extern const Instr kLdrPCMask;
-extern const Instr kLdrPCPattern;
-// vldr dd, [pc, #offset]
-extern const Instr kVldrDPCMask;
-extern const Instr kVldrDPCPattern;
-// blxcc rm
-extern const Instr kBlxRegMask;
-
-extern const Instr kBlxRegPattern;
-
-extern const Instr kMovMvnMask;
-extern const Instr kMovMvnPattern;
-extern const Instr kMovMvnFlip;
-extern const Instr kMovLeaveCCMask;
-extern const Instr kMovLeaveCCPattern;
-extern const Instr kMovwMask;
-extern const Instr kMovwPattern;
-extern const Instr kMovwLeaveCCFlip;
-extern const Instr kCmpCmnMask;
-extern const Instr kCmpCmnPattern;
-extern const Instr kCmpCmnFlip;
-extern const Instr kAddSubFlip;
-extern const Instr kAndBicFlip;
-
-// A mask for the Rd register for push, pop, ldr, str instructions.
-extern const Instr kLdrRegFpOffsetPattern;
-
-extern const Instr kStrRegFpOffsetPattern;
-
-extern const Instr kLdrRegFpNegOffsetPattern;
-
-extern const Instr kStrRegFpNegOffsetPattern;
-
-extern const Instr kLdrStrInstrTypeMask;
-extern const Instr kLdrStrInstrArgumentMask;
-extern const Instr kLdrStrOffsetMask;
-
-
-// -----------------------------------------------------------------------------
 // Instruction abstraction.
 
 // The class Instruction enables access to individual fields defined in the ARM
@@ -622,10 +564,13 @@
   inline int ShiftAmountValue() const { return Bits(11, 7); }
     // with immediate
   inline int RotateValue() const { return Bits(11, 8); }
+  DECLARE_STATIC_ACCESSOR(RotateValue);
   inline int Immed8Value() const { return Bits(7, 0); }
+  DECLARE_STATIC_ACCESSOR(Immed8Value);
   inline int Immed4Value() const { return Bits(19, 16); }
   inline int ImmedMovwMovtValue() const {
       return Immed4Value() << 12 | Offset12Value(); }
+  DECLARE_STATIC_ACCESSOR(ImmedMovwMovtValue);
 
   // Fields used in Load/Store instructions
   inline int PUValue() const { return Bits(24, 23); }
diff --git a/src/arm/cpu-arm.cc b/src/arm/cpu-arm.cc
index 4ff82a7..9c7104e 100644
--- a/src/arm/cpu-arm.cc
+++ b/src/arm/cpu-arm.cc
@@ -16,18 +16,16 @@
 
 #if V8_TARGET_ARCH_ARM
 
-#include "src/cpu.h"
+#include "src/assembler.h"
 #include "src/macro-assembler.h"
 #include "src/simulator.h"  // for cache flushing.
 
 namespace v8 {
 namespace internal {
 
-void CPU::FlushICache(void* start, size_t size) {
-  // Nothing to do flushing no instructions.
-  if (size == 0) {
-    return;
-  }
+
+void CpuFeatures::FlushICache(void* start, size_t size) {
+  if (size == 0) return;
 
 #if defined(USE_SIMULATOR)
   // Not generating ARM instructions for C-code. This means that we are
@@ -36,47 +34,31 @@
   // None of this code ends up in the snapshot so there are no issues
   // around whether or not to generate the code when building snapshots.
   Simulator::FlushICache(Isolate::Current()->simulator_i_cache(), start, size);
+
 #elif V8_OS_QNX
   msync(start, size, MS_SYNC | MS_INVALIDATE_ICACHE);
-#else
-  // Ideally, we would call
-  //   syscall(__ARM_NR_cacheflush, start,
-  //           reinterpret_cast<intptr_t>(start) + size, 0);
-  // however, syscall(int, ...) is not supported on all platforms, especially
-  // not when using EABI, so we call the __ARM_NR_cacheflush syscall directly.
 
-  register uint32_t beg asm("a1") = reinterpret_cast<uint32_t>(start);
-  register uint32_t end asm("a2") =
-      reinterpret_cast<uint32_t>(start) + size;
-  register uint32_t flg asm("a3") = 0;
-  #if defined (__arm__) && !defined(__thumb__)
-    // __arm__ may be defined in thumb mode.
-    register uint32_t scno asm("r7") = __ARM_NR_cacheflush;
-    asm volatile(
-        "svc 0x0"
-        : "=r" (beg)
-        : "0" (beg), "r" (end), "r" (flg), "r" (scno));
-  #else
-    // r7 is reserved by the EABI in thumb mode.
-    asm volatile(
-    "@   Enter ARM Mode  \n\t"
-        "adr r3, 1f      \n\t"
-        "bx  r3          \n\t"
-        ".ALIGN 4        \n\t"
-        ".ARM            \n"
-    "1:  push {r7}       \n\t"
-        "mov r7, %4      \n\t"
-        "svc 0x0         \n\t"
-        "pop {r7}        \n\t"
-    "@   Enter THUMB Mode\n\t"
-        "adr r3, 2f+1    \n\t"
-        "bx  r3          \n\t"
-        ".THUMB          \n"
-    "2:                  \n\t"
-        : "=r" (beg)
-        : "0" (beg), "r" (end), "r" (flg), "r" (__ARM_NR_cacheflush)
-        : "r3");
-  #endif
+#else
+  register uint32_t beg asm("r0") = reinterpret_cast<uint32_t>(start);
+  register uint32_t end asm("r1") = beg + size;
+  register uint32_t flg asm("r2") = 0;
+
+  asm volatile(
+    // This assembly works for both ARM and Thumb targets.
+
+    // Preserve r7; it is callee-saved, and GCC uses it as a frame pointer for
+    // Thumb targets.
+    "  push {r7}\n"
+                                  // r0 = beg
+                                  // r1 = end
+                                  // r2 = flags (0)
+    "  ldr r7, =%c[scno]\n"       // r7 = syscall number
+    "  svc 0\n"
+
+    "  pop {r7}\n"
+    :
+    : "r" (beg), "r" (end), "r" (flg), [scno] "i" (__ARM_NR_cacheflush)
+    : "memory");
 #endif
 }
 
diff --git a/src/arm/debug-arm.cc b/src/arm/debug-arm.cc
index e5460f5..6d7d6b8 100644
--- a/src/arm/debug-arm.cc
+++ b/src/arm/debug-arm.cc
@@ -27,7 +27,7 @@
   //   ldr ip, [pc, #0]
   //   blx ip
   //   <debug break return code entry point address>
-  //   bktp 0
+  //   bkpt 0
   CodePatcher patcher(rinfo()->pc(), Assembler::kJSReturnSequenceInstructions);
   patcher.masm()->ldr(v8::internal::ip, MemOperand(v8::internal::pc, 0));
   patcher.masm()->blx(v8::internal::ip);
@@ -47,20 +47,20 @@
 // A debug break in the frame exit code is identified by the JS frame exit code
 // having been patched with a call instruction.
 bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
-  ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
+  DCHECK(RelocInfo::IsJSReturn(rinfo->rmode()));
   return rinfo->IsPatchedReturnSequence();
 }
 
 
 bool BreakLocationIterator::IsDebugBreakAtSlot() {
-  ASSERT(IsDebugBreakSlot());
+  DCHECK(IsDebugBreakSlot());
   // Check whether the debug break slot instructions have been patched.
   return rinfo()->IsPatchedDebugBreakSlotSequence();
 }
 
 
 void BreakLocationIterator::SetDebugBreakAtSlot() {
-  ASSERT(IsDebugBreakSlot());
+  DCHECK(IsDebugBreakSlot());
   // Patch the code changing the debug break slot code from
   //   mov r2, r2
   //   mov r2, r2
@@ -78,7 +78,7 @@
 
 
 void BreakLocationIterator::ClearDebugBreakAtSlot() {
-  ASSERT(IsDebugBreakSlot());
+  DCHECK(IsDebugBreakSlot());
   rinfo()->PatchCode(original_rinfo()->pc(),
                      Assembler::kDebugBreakSlotInstructions);
 }
@@ -93,12 +93,20 @@
   {
     FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
 
+    // Load padding words on stack.
+    __ mov(ip, Operand(Smi::FromInt(LiveEdit::kFramePaddingValue)));
+    for (int i = 0; i < LiveEdit::kFramePaddingInitialSize; i++) {
+      __ push(ip);
+    }
+    __ mov(ip, Operand(Smi::FromInt(LiveEdit::kFramePaddingInitialSize)));
+    __ push(ip);
+
     // Store the registers containing live values on the expression stack to
     // make sure that these are correctly updated during GC. Non object values
     // are stored as a smi causing it to be untouched by GC.
-    ASSERT((object_regs & ~kJSCallerSaved) == 0);
-    ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
-    ASSERT((object_regs & non_object_regs) == 0);
+    DCHECK((object_regs & ~kJSCallerSaved) == 0);
+    DCHECK((non_object_regs & ~kJSCallerSaved) == 0);
+    DCHECK((object_regs & non_object_regs) == 0);
     if ((object_regs | non_object_regs) != 0) {
       for (int i = 0; i < kNumJSCallerSaved; i++) {
         int r = JSCallerSavedCode(i);
@@ -139,6 +147,9 @@
       }
     }
 
+    // Don't bother removing padding bytes pushed on the stack
+    // as the frame is going to be restored right away.
+
     // Leave the internal frame.
   }
 
@@ -165,48 +176,35 @@
 
 void DebugCodegen::GenerateLoadICDebugBreak(MacroAssembler* masm) {
   // Calling convention for IC load (from ic-arm.cc).
-  // ----------- S t a t e -------------
-  //  -- r2    : name
-  //  -- lr    : return address
-  //  -- r0    : receiver
-  //  -- [sp]  : receiver
-  // -----------------------------------
-  // Registers r0 and r2 contain objects that need to be pushed on the
-  // expression stack of the fake JS frame.
-  Generate_DebugBreakCallHelper(masm, r0.bit() | r2.bit(), 0);
+  Register receiver = LoadDescriptor::ReceiverRegister();
+  Register name = LoadDescriptor::NameRegister();
+  Generate_DebugBreakCallHelper(masm, receiver.bit() | name.bit(), 0);
 }
 
 
 void DebugCodegen::GenerateStoreICDebugBreak(MacroAssembler* masm) {
   // Calling convention for IC store (from ic-arm.cc).
-  // ----------- S t a t e -------------
-  //  -- r0    : value
-  //  -- r1    : receiver
-  //  -- r2    : name
-  //  -- lr    : return address
-  // -----------------------------------
-  // Registers r0, r1, and r2 contain objects that need to be pushed on the
-  // expression stack of the fake JS frame.
-  Generate_DebugBreakCallHelper(masm, r0.bit() | r1.bit() | r2.bit(), 0);
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  Register name = StoreDescriptor::NameRegister();
+  Register value = StoreDescriptor::ValueRegister();
+  Generate_DebugBreakCallHelper(
+      masm, receiver.bit() | name.bit() | value.bit(), 0);
 }
 
 
 void DebugCodegen::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
-  // ---------- S t a t e --------------
-  //  -- lr     : return address
-  //  -- r0     : key
-  //  -- r1     : receiver
-  Generate_DebugBreakCallHelper(masm, r0.bit() | r1.bit(), 0);
+  // Calling convention for keyed IC load (from ic-arm.cc).
+  GenerateLoadICDebugBreak(masm);
 }
 
 
 void DebugCodegen::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
-  // ---------- S t a t e --------------
-  //  -- r0     : value
-  //  -- r1     : key
-  //  -- r2     : receiver
-  //  -- lr     : return address
-  Generate_DebugBreakCallHelper(masm, r0.bit() | r1.bit() | r2.bit(), 0);
+  // Calling convention for IC keyed store call (from ic-arm.cc).
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  Register name = StoreDescriptor::NameRegister();
+  Register value = StoreDescriptor::ValueRegister();
+  Generate_DebugBreakCallHelper(
+      masm, receiver.bit() | name.bit() | value.bit(), 0);
 }
 
 
@@ -269,7 +267,7 @@
   for (int i = 0; i < Assembler::kDebugBreakSlotInstructions; i++) {
     __ nop(MacroAssembler::DEBUG_BREAK_NOP);
   }
-  ASSERT_EQ(Assembler::kDebugBreakSlotInstructions,
+  DCHECK_EQ(Assembler::kDebugBreakSlotInstructions,
             masm->InstructionsGeneratedSince(&check_codesize));
 }
 
@@ -282,16 +280,42 @@
 
 
 void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
-  masm->Abort(kLiveEditFrameDroppingIsNotSupportedOnArm);
+  __ Ret();
 }
 
 
 void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
-  masm->Abort(kLiveEditFrameDroppingIsNotSupportedOnArm);
+  ExternalReference restarter_frame_function_slot =
+      ExternalReference::debug_restarter_frame_function_pointer_address(
+          masm->isolate());
+  __ mov(ip, Operand(restarter_frame_function_slot));
+  __ mov(r1, Operand::Zero());
+  __ str(r1, MemOperand(ip, 0));
+
+  // Load the function pointer off of our current stack frame.
+  __ ldr(r1, MemOperand(fp,
+         StandardFrameConstants::kConstantPoolOffset - kPointerSize));
+
+  // Pop return address, frame and constant pool pointer (if
+  // FLAG_enable_ool_constant_pool).
+  __ LeaveFrame(StackFrame::INTERNAL);
+
+  { ConstantPoolUnavailableScope constant_pool_unavailable(masm);
+    // Load context from the function.
+    __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
+
+    // Get function code.
+    __ ldr(ip, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
+    __ ldr(ip, FieldMemOperand(ip, SharedFunctionInfo::kCodeOffset));
+    __ add(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+    // Re-run JSFunction, r1 is function, cp is context.
+    __ Jump(ip);
+  }
 }
 
 
-const bool LiveEdit::kFrameDropperSupported = false;
+const bool LiveEdit::kFrameDropperSupported = true;
 
 #undef __
 
diff --git a/src/arm/deoptimizer-arm.cc b/src/arm/deoptimizer-arm.cc
index 1288196..0455a3b 100644
--- a/src/arm/deoptimizer-arm.cc
+++ b/src/arm/deoptimizer-arm.cc
@@ -12,7 +12,7 @@
 namespace v8 {
 namespace internal {
 
-const int Deoptimizer::table_entry_size_ = 12;
+const int Deoptimizer::table_entry_size_ = 8;
 
 
 int Deoptimizer::patch_size() {
@@ -49,9 +49,6 @@
 
   DeoptimizationInputData* deopt_data =
       DeoptimizationInputData::cast(code->deoptimization_data());
-  SharedFunctionInfo* shared =
-      SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo());
-  shared->EvictFromOptimizedCodeMap(code, "deoptimized code");
 #ifdef DEBUG
   Address prev_call_address = NULL;
 #endif
@@ -68,13 +65,13 @@
                                                        deopt_entry,
                                                        RelocInfo::NONE32);
     int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
-    ASSERT(call_size_in_bytes % Assembler::kInstrSize == 0);
-    ASSERT(call_size_in_bytes <= patch_size());
+    DCHECK(call_size_in_bytes % Assembler::kInstrSize == 0);
+    DCHECK(call_size_in_bytes <= patch_size());
     CodePatcher patcher(call_address, call_size_in_words);
     patcher.masm()->Call(deopt_entry, RelocInfo::NONE32);
-    ASSERT(prev_call_address == NULL ||
+    DCHECK(prev_call_address == NULL ||
            call_address >= prev_call_address + patch_size());
-    ASSERT(call_address + patch_size() <= code->instruction_end());
+    DCHECK(call_address + patch_size() <= code->instruction_end());
 #ifdef DEBUG
     prev_call_address = call_address;
 #endif
@@ -104,8 +101,8 @@
 
 
 void Deoptimizer::SetPlatformCompiledStubRegisters(
-    FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) {
-  ApiFunction function(descriptor->deoptimization_handler_);
+    FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
+  ApiFunction function(descriptor->deoptimization_handler());
   ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
   intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
   int params = descriptor->GetHandlerParameterCount();
@@ -145,8 +142,8 @@
       kDoubleSize * DwVfpRegister::kMaxNumAllocatableRegisters;
 
   // Save all allocatable VFP registers before messing with them.
-  ASSERT(kDoubleRegZero.code() == 14);
-  ASSERT(kScratchDoubleReg.code() == 15);
+  DCHECK(kDoubleRegZero.code() == 14);
+  DCHECK(kScratchDoubleReg.code() == 15);
 
   // Check CPU flags for number of registers, setting the Z condition flag.
   __ CheckFor32DRegs(ip);
@@ -197,7 +194,7 @@
   __ ldr(r1, MemOperand(r0, Deoptimizer::input_offset()));
 
   // Copy core registers into FrameDescription::registers_[kNumRegisters].
-  ASSERT(Register::kNumRegisters == kNumberOfRegisters);
+  DCHECK(Register::kNumRegisters == kNumberOfRegisters);
   for (int i = 0; i < kNumberOfRegisters; i++) {
     int offset = (i * kPointerSize) + FrameDescription::registers_offset();
     __ ldr(r2, MemOperand(sp, i * kPointerSize));
@@ -328,11 +325,11 @@
     int start = masm()->pc_offset();
     USE(start);
     __ mov(ip, Operand(i));
-    __ push(ip);
     __ b(&done);
-    ASSERT(masm()->pc_offset() - start == table_entry_size_);
+    DCHECK(masm()->pc_offset() - start == table_entry_size_);
   }
   __ bind(&done);
+  __ push(ip);
 }
 
 
@@ -347,7 +344,7 @@
 
 
 void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
-  ASSERT(FLAG_enable_ool_constant_pool);
+  DCHECK(FLAG_enable_ool_constant_pool);
   SetFrameSlot(offset, value);
 }
 
diff --git a/src/arm/disasm-arm.cc b/src/arm/disasm-arm.cc
index 48f77b4..85977b1 100644
--- a/src/arm/disasm-arm.cc
+++ b/src/arm/disasm-arm.cc
@@ -24,8 +24,8 @@
 
 
 #include <assert.h>
-#include <stdio.h>
 #include <stdarg.h>
+#include <stdio.h>
 #include <string.h>
 
 #include "src/v8.h"
@@ -33,9 +33,9 @@
 #if V8_TARGET_ARCH_ARM
 
 #include "src/arm/constants-arm.h"
+#include "src/base/platform/platform.h"
 #include "src/disasm.h"
 #include "src/macro-assembler.h"
-#include "src/platform.h"
 
 
 namespace v8 {
@@ -299,7 +299,7 @@
 // Handle all register based formatting in this function to reduce the
 // complexity of FormatOption.
 int Decoder::FormatRegister(Instruction* instr, const char* format) {
-  ASSERT(format[0] == 'r');
+  DCHECK(format[0] == 'r');
   if (format[1] == 'n') {  // 'rn: Rn register
     int reg = instr->RnValue();
     PrintRegister(reg);
@@ -322,7 +322,7 @@
     return 2;
   } else if (format[1] == 'l') {
     // 'rlist: register list for load and store multiple instructions
-    ASSERT(STRING_STARTS_WITH(format, "rlist"));
+    DCHECK(STRING_STARTS_WITH(format, "rlist"));
     int rlist = instr->RlistValue();
     int reg = 0;
     Print("{");
@@ -348,7 +348,7 @@
 // Handle all VFP register based formatting in this function to reduce the
 // complexity of FormatOption.
 int Decoder::FormatVFPRegister(Instruction* instr, const char* format) {
-  ASSERT((format[0] == 'S') || (format[0] == 'D'));
+  DCHECK((format[0] == 'S') || (format[0] == 'D'));
 
   VFPRegPrecision precision =
       format[0] == 'D' ? kDoublePrecision : kSinglePrecision;
@@ -462,7 +462,7 @@
       return 1;
     }
     case 'c': {  // 'cond: conditional execution
-      ASSERT(STRING_STARTS_WITH(format, "cond"));
+      DCHECK(STRING_STARTS_WITH(format, "cond"));
       PrintCondition(instr);
       return 4;
     }
@@ -478,9 +478,9 @@
         // BFC/BFI:
         // Bits 20-16 represent most-significant bit. Covert to width.
         width -= lsbit;
-        ASSERT(width > 0);
+        DCHECK(width > 0);
       }
-      ASSERT((width + lsbit) <= 32);
+      DCHECK((width + lsbit) <= 32);
       out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
                                   "#%d, #%d", lsbit, width);
       return 1;
@@ -498,9 +498,9 @@
       int width = (format[3] - '0') * 10 + (format[4] - '0');
       int lsb   = (format[6] - '0') * 10 + (format[7] - '0');
 
-      ASSERT((width >= 1) && (width <= 32));
-      ASSERT((lsb >= 0) && (lsb <= 31));
-      ASSERT((width + lsb) <= 32);
+      DCHECK((width >= 1) && (width <= 32));
+      DCHECK((lsb >= 0) && (lsb <= 31));
+      DCHECK((width + lsb) <= 32);
 
       out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
                                   "%d",
@@ -520,7 +520,7 @@
         return 2;
       }
       if (format[1] == 'e') {  // 'memop: load/store instructions.
-        ASSERT(STRING_STARTS_WITH(format, "memop"));
+        DCHECK(STRING_STARTS_WITH(format, "memop"));
         if (instr->HasL()) {
           Print("ldr");
         } else {
@@ -538,7 +538,7 @@
         return 5;
       }
       // 'msg: for simulator break instructions
-      ASSERT(STRING_STARTS_WITH(format, "msg"));
+      DCHECK(STRING_STARTS_WITH(format, "msg"));
       byte* str =
           reinterpret_cast<byte*>(instr->InstructionBits() & 0x0fffffff);
       out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
@@ -548,13 +548,13 @@
     case 'o': {
       if ((format[3] == '1') && (format[4] == '2')) {
         // 'off12: 12-bit offset for load and store instructions
-        ASSERT(STRING_STARTS_WITH(format, "off12"));
+        DCHECK(STRING_STARTS_WITH(format, "off12"));
         out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
                                     "%d", instr->Offset12Value());
         return 5;
       } else if (format[3] == '0') {
         // 'off0to3and8to19 16-bit immediate encoded in bits 19-8 and 3-0.
-        ASSERT(STRING_STARTS_WITH(format, "off0to3and8to19"));
+        DCHECK(STRING_STARTS_WITH(format, "off0to3and8to19"));
         out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
                                     "%d",
                                     (instr->Bits(19, 8) << 4) +
@@ -562,13 +562,13 @@
         return 15;
       }
       // 'off8: 8-bit offset for extra load and store instructions
-      ASSERT(STRING_STARTS_WITH(format, "off8"));
+      DCHECK(STRING_STARTS_WITH(format, "off8"));
       int offs8 = (instr->ImmedHValue() << 4) | instr->ImmedLValue();
       out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", offs8);
       return 4;
     }
     case 'p': {  // 'pu: P and U bits for load and store instructions
-      ASSERT(STRING_STARTS_WITH(format, "pu"));
+      DCHECK(STRING_STARTS_WITH(format, "pu"));
       PrintPU(instr);
       return 2;
     }
@@ -578,29 +578,29 @@
     case 's': {
       if (format[1] == 'h') {  // 'shift_op or 'shift_rm or 'shift_sat.
         if (format[6] == 'o') {  // 'shift_op
-          ASSERT(STRING_STARTS_WITH(format, "shift_op"));
+          DCHECK(STRING_STARTS_WITH(format, "shift_op"));
           if (instr->TypeValue() == 0) {
             PrintShiftRm(instr);
           } else {
-            ASSERT(instr->TypeValue() == 1);
+            DCHECK(instr->TypeValue() == 1);
             PrintShiftImm(instr);
           }
           return 8;
         } else if (format[6] == 's') {  // 'shift_sat.
-          ASSERT(STRING_STARTS_WITH(format, "shift_sat"));
+          DCHECK(STRING_STARTS_WITH(format, "shift_sat"));
           PrintShiftSat(instr);
           return 9;
         } else {  // 'shift_rm
-          ASSERT(STRING_STARTS_WITH(format, "shift_rm"));
+          DCHECK(STRING_STARTS_WITH(format, "shift_rm"));
           PrintShiftRm(instr);
           return 8;
         }
       } else if (format[1] == 'v') {  // 'svc
-        ASSERT(STRING_STARTS_WITH(format, "svc"));
+        DCHECK(STRING_STARTS_WITH(format, "svc"));
         PrintSoftwareInterrupt(instr->SvcValue());
         return 3;
       } else if (format[1] == 'i') {  // 'sign: signed extra loads and stores
-        ASSERT(STRING_STARTS_WITH(format, "sign"));
+        DCHECK(STRING_STARTS_WITH(format, "sign"));
         if (instr->HasSign()) {
           Print("s");
         }
@@ -613,7 +613,7 @@
       return 1;
     }
     case 't': {  // 'target: target of branch instructions
-      ASSERT(STRING_STARTS_WITH(format, "target"));
+      DCHECK(STRING_STARTS_WITH(format, "target"));
       int off = (instr->SImmed24Value() << 2) + 8;
       out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
                                   "%+d -> %s",
@@ -1097,13 +1097,16 @@
     }
     case db_x: {
       if (FLAG_enable_sudiv) {
-        if (!instr->HasW()) {
-          if (instr->Bits(5, 4) == 0x1) {
-            if ((instr->Bit(22) == 0x0) && (instr->Bit(20) == 0x1)) {
+        if (instr->Bits(5, 4) == 0x1) {
+          if ((instr->Bit(22) == 0x0) && (instr->Bit(20) == 0x1)) {
+            if (instr->Bit(21) == 0x1) {
+              // UDIV (in V8 notation matching ARM ISA format) rn = rm/rs
+              Format(instr, "udiv'cond'b 'rn, 'rm, 'rs");
+            } else {
               // SDIV (in V8 notation matching ARM ISA format) rn = rm/rs
               Format(instr, "sdiv'cond'b 'rn, 'rm, 'rs");
-              break;
             }
+            break;
           }
         }
       }
diff --git a/src/arm/frames-arm.cc b/src/arm/frames-arm.cc
index 6051e02..fde4a17 100644
--- a/src/arm/frames-arm.cc
+++ b/src/arm/frames-arm.cc
@@ -7,10 +7,11 @@
 #if V8_TARGET_ARCH_ARM
 
 #include "src/assembler.h"
-#include "src/arm/assembler-arm.h"
-#include "src/arm/assembler-arm-inl.h"
 #include "src/frames.h"
 #include "src/macro-assembler.h"
+
+#include "src/arm/assembler-arm-inl.h"
+#include "src/arm/assembler-arm.h"
 #include "src/arm/macro-assembler-arm.h"
 
 namespace v8 {
@@ -20,7 +21,7 @@
 Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
 Register JavaScriptFrame::context_register() { return cp; }
 Register JavaScriptFrame::constant_pool_pointer_register() {
-  ASSERT(FLAG_enable_ool_constant_pool);
+  DCHECK(FLAG_enable_ool_constant_pool);
   return pp;
 }
 
@@ -28,13 +29,13 @@
 Register StubFailureTrampolineFrame::fp_register() { return v8::internal::fp; }
 Register StubFailureTrampolineFrame::context_register() { return cp; }
 Register StubFailureTrampolineFrame::constant_pool_pointer_register() {
-  ASSERT(FLAG_enable_ool_constant_pool);
+  DCHECK(FLAG_enable_ool_constant_pool);
   return pp;
 }
 
 
 Object*& ExitFrame::constant_pool_slot() const {
-  ASSERT(FLAG_enable_ool_constant_pool);
+  DCHECK(FLAG_enable_ool_constant_pool);
   const int offset = ExitFrameConstants::kConstantPoolOffset;
   return Memory::Object_at(fp() + offset);
 }
diff --git a/src/arm/full-codegen-arm.cc b/src/arm/full-codegen-arm.cc
index 8b079ed..eb60c3f 100644
--- a/src/arm/full-codegen-arm.cc
+++ b/src/arm/full-codegen-arm.cc
@@ -6,15 +6,16 @@
 
 #if V8_TARGET_ARCH_ARM
 
+#include "src/code-factory.h"
 #include "src/code-stubs.h"
 #include "src/codegen.h"
 #include "src/compiler.h"
 #include "src/debug.h"
 #include "src/full-codegen.h"
+#include "src/ic/ic.h"
 #include "src/isolate-inl.h"
 #include "src/parser.h"
 #include "src/scopes.h"
-#include "src/stub-cache.h"
 
 #include "src/arm/code-stubs-arm.h"
 #include "src/arm/macro-assembler-arm.h"
@@ -40,13 +41,13 @@
   }
 
   ~JumpPatchSite() {
-    ASSERT(patch_site_.is_bound() == info_emitted_);
+    DCHECK(patch_site_.is_bound() == info_emitted_);
   }
 
   // When initially emitting this ensure that a jump is always generated to skip
   // the inlined smi code.
   void EmitJumpIfNotSmi(Register reg, Label* target) {
-    ASSERT(!patch_site_.is_bound() && !info_emitted_);
+    DCHECK(!patch_site_.is_bound() && !info_emitted_);
     Assembler::BlockConstPoolScope block_const_pool(masm_);
     __ bind(&patch_site_);
     __ cmp(reg, Operand(reg));
@@ -56,7 +57,7 @@
   // When initially emitting this ensure that a jump is never generated to skip
   // the inlined smi code.
   void EmitJumpIfSmi(Register reg, Label* target) {
-    ASSERT(!patch_site_.is_bound() && !info_emitted_);
+    DCHECK(!patch_site_.is_bound() && !info_emitted_);
     Assembler::BlockConstPoolScope block_const_pool(masm_);
     __ bind(&patch_site_);
     __ cmp(reg, Operand(reg));
@@ -133,7 +134,7 @@
     __ b(ne, &ok);
 
     __ ldr(r2, GlobalObjectOperand());
-    __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
+    __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalProxyOffset));
 
     __ str(r2, MemOperand(sp, receiver_offset));
 
@@ -152,7 +153,7 @@
   { Comment cmnt(masm_, "[ Allocate locals");
     int locals_count = info->scope()->num_stack_slots();
     // Generators allocate locals, if any, in context slots.
-    ASSERT(!info->function()->is_generator() || locals_count == 0);
+    DCHECK(!info->function()->is_generator() || locals_count == 0);
     if (locals_count > 0) {
       if (locals_count >= 128) {
         Label ok;
@@ -197,7 +198,7 @@
     if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
       __ push(r1);
       __ Push(info->scope()->GetScopeInfo());
-      __ CallRuntime(Runtime::kHiddenNewGlobalContext, 2);
+      __ CallRuntime(Runtime::kNewGlobalContext, 2);
     } else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
       FastNewContextStub stub(isolate(), heap_slots);
       __ CallStub(&stub);
@@ -205,7 +206,7 @@
       need_write_barrier = false;
     } else {
       __ push(r1);
-      __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
+      __ CallRuntime(Runtime::kNewFunctionContext, 1);
     }
     function_in_register = false;
     // Context is returned in r0.  It replaces the context passed to us.
@@ -292,9 +293,9 @@
       // constant.
       if (scope()->is_function_scope() && scope()->function() != NULL) {
         VariableDeclaration* function = scope()->function();
-        ASSERT(function->proxy()->var()->mode() == CONST ||
+        DCHECK(function->proxy()->var()->mode() == CONST ||
                function->proxy()->var()->mode() == CONST_LEGACY);
-        ASSERT(function->proxy()->var()->location() != Variable::UNALLOCATED);
+        DCHECK(function->proxy()->var()->location() != Variable::UNALLOCATED);
         VisitVariableDeclaration(function);
       }
       VisitDeclarations(scope()->declarations());
@@ -314,9 +315,9 @@
     }
 
     { Comment cmnt(masm_, "[ Body");
-      ASSERT(loop_depth() == 0);
+      DCHECK(loop_depth() == 0);
       VisitStatements(function()->body());
-      ASSERT(loop_depth() == 0);
+      DCHECK(loop_depth() == 0);
     }
   }
 
@@ -346,13 +347,34 @@
 }
 
 
+#ifdef CAN_USE_ARMV7_INSTRUCTIONS
+static const int kProfileCounterResetSequenceLength = 5 * Assembler::kInstrSize;
+#else
+static const int kProfileCounterResetSequenceLength = 7 * Assembler::kInstrSize;
+#endif
+
+
 void FullCodeGenerator::EmitProfilingCounterReset() {
+  Assembler::BlockConstPoolScope block_const_pool(masm_);
+  PredictableCodeSizeScope predictable_code_size_scope(
+      masm_, kProfileCounterResetSequenceLength);
+  Label start;
+  __ bind(&start);
   int reset_value = FLAG_interrupt_budget;
   if (info_->is_debug()) {
     // Detect debug break requests as soon as possible.
     reset_value = FLAG_interrupt_budget >> 4;
   }
   __ mov(r2, Operand(profiling_counter_));
+  // The mov instruction above can be either 1 to 3 (for ARMv7) or 1 to 5
+  // instructions (for ARMv6) depending upon whether it is an extended constant
+  // pool - insert nop to compensate.
+  int expected_instr_count =
+      (kProfileCounterResetSequenceLength / Assembler::kInstrSize) - 2;
+  DCHECK(masm_->InstructionsGeneratedSince(&start) <= expected_instr_count);
+  while (masm_->InstructionsGeneratedSince(&start) != expected_instr_count) {
+    __ nop();
+  }
   __ mov(r3, Operand(Smi::FromInt(reset_value)));
   __ str(r3, FieldMemOperand(r2, Cell::kValueOffset));
 }
@@ -365,7 +387,7 @@
   Assembler::BlockConstPoolScope block_const_pool(masm_);
   Label ok;
 
-  ASSERT(back_edge_target->is_bound());
+  DCHECK(back_edge_target->is_bound());
   int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
   int weight = Min(kMaxBackEdgeWeight,
                    Max(1, distance / kCodeSizeMultiplier));
@@ -434,15 +456,17 @@
       PredictableCodeSizeScope predictable(masm_, -1);
       __ RecordJSReturn();
       int no_frame_start = __ LeaveFrame(StackFrame::JAVA_SCRIPT);
-      __ add(sp, sp, Operand(sp_delta));
-      __ Jump(lr);
-      info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
+      { ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
+        __ add(sp, sp, Operand(sp_delta));
+        __ Jump(lr);
+        info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
+      }
     }
 
 #ifdef DEBUG
     // Check that the size of the code used for returning is large enough
     // for the debugger's requirements.
-    ASSERT(Assembler::kJSReturnSequenceInstructions <=
+    DCHECK(Assembler::kJSReturnSequenceInstructions <=
            masm_->InstructionsGeneratedSince(&check_exit_codesize));
 #endif
   }
@@ -450,25 +474,25 @@
 
 
 void FullCodeGenerator::EffectContext::Plug(Variable* var) const {
-  ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+  DCHECK(var->IsStackAllocated() || var->IsContextSlot());
 }
 
 
 void FullCodeGenerator::AccumulatorValueContext::Plug(Variable* var) const {
-  ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+  DCHECK(var->IsStackAllocated() || var->IsContextSlot());
   codegen()->GetVar(result_register(), var);
 }
 
 
 void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
-  ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+  DCHECK(var->IsStackAllocated() || var->IsContextSlot());
   codegen()->GetVar(result_register(), var);
   __ push(result_register());
 }
 
 
 void FullCodeGenerator::TestContext::Plug(Variable* var) const {
-  ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+  DCHECK(var->IsStackAllocated() || var->IsContextSlot());
   // For simplicity we always test the accumulator register.
   codegen()->GetVar(result_register(), var);
   codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
@@ -533,7 +557,7 @@
                                           true,
                                           true_label_,
                                           false_label_);
-  ASSERT(!lit->IsUndetectableObject());  // There are no undetectable literals.
+  DCHECK(!lit->IsUndetectableObject());  // There are no undetectable literals.
   if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
     if (false_label_ != fall_through_) __ b(false_label_);
   } else if (lit->IsTrue() || lit->IsJSObject()) {
@@ -560,7 +584,7 @@
 
 void FullCodeGenerator::EffectContext::DropAndPlug(int count,
                                                    Register reg) const {
-  ASSERT(count > 0);
+  DCHECK(count > 0);
   __ Drop(count);
 }
 
@@ -568,7 +592,7 @@
 void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
     int count,
     Register reg) const {
-  ASSERT(count > 0);
+  DCHECK(count > 0);
   __ Drop(count);
   __ Move(result_register(), reg);
 }
@@ -576,7 +600,7 @@
 
 void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
                                                        Register reg) const {
-  ASSERT(count > 0);
+  DCHECK(count > 0);
   if (count > 1) __ Drop(count - 1);
   __ str(reg, MemOperand(sp, 0));
 }
@@ -584,7 +608,7 @@
 
 void FullCodeGenerator::TestContext::DropAndPlug(int count,
                                                  Register reg) const {
-  ASSERT(count > 0);
+  DCHECK(count > 0);
   // For simplicity we always test the accumulator register.
   __ Drop(count);
   __ Move(result_register(), reg);
@@ -595,7 +619,7 @@
 
 void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
                                             Label* materialize_false) const {
-  ASSERT(materialize_true == materialize_false);
+  DCHECK(materialize_true == materialize_false);
   __ bind(materialize_true);
 }
 
@@ -629,8 +653,8 @@
 
 void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
                                           Label* materialize_false) const {
-  ASSERT(materialize_true == true_label_);
-  ASSERT(materialize_false == false_label_);
+  DCHECK(materialize_true == true_label_);
+  DCHECK(materialize_false == false_label_);
 }
 
 
@@ -693,7 +717,7 @@
 
 
 MemOperand FullCodeGenerator::StackOperand(Variable* var) {
-  ASSERT(var->IsStackAllocated());
+  DCHECK(var->IsStackAllocated());
   // Offset is negative because higher indexes are at lower addresses.
   int offset = -var->index() * kPointerSize;
   // Adjust by a (parameter or local) base offset.
@@ -707,7 +731,7 @@
 
 
 MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) {
-  ASSERT(var->IsContextSlot() || var->IsStackAllocated());
+  DCHECK(var->IsContextSlot() || var->IsStackAllocated());
   if (var->IsContextSlot()) {
     int context_chain_length = scope()->ContextChainLength(var->scope());
     __ LoadContext(scratch, context_chain_length);
@@ -729,10 +753,10 @@
                                Register src,
                                Register scratch0,
                                Register scratch1) {
-  ASSERT(var->IsContextSlot() || var->IsStackAllocated());
-  ASSERT(!scratch0.is(src));
-  ASSERT(!scratch0.is(scratch1));
-  ASSERT(!scratch1.is(src));
+  DCHECK(var->IsContextSlot() || var->IsStackAllocated());
+  DCHECK(!scratch0.is(src));
+  DCHECK(!scratch0.is(scratch1));
+  DCHECK(!scratch1.is(src));
   MemOperand location = VarOperand(var, scratch0);
   __ str(src, location);
 
@@ -772,7 +796,7 @@
 void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
   // The variable in the declaration always resides in the current function
   // context.
-  ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
+  DCHECK_EQ(0, scope()->ContextChainLength(variable->scope()));
   if (generate_debug_code_) {
     // Check that we're not inside a with or catch context.
     __ ldr(r1, FieldMemOperand(cp, HeapObject::kMapOffset));
@@ -826,7 +850,7 @@
       Comment cmnt(masm_, "[ VariableDeclaration");
       __ mov(r2, Operand(variable->name()));
       // Declaration nodes are always introduced in one of four modes.
-      ASSERT(IsDeclaredVariableMode(mode));
+      DCHECK(IsDeclaredVariableMode(mode));
       PropertyAttributes attr =
           IsImmutableVariableMode(mode) ? READ_ONLY : NONE;
       __ mov(r1, Operand(Smi::FromInt(attr)));
@@ -841,7 +865,7 @@
         __ mov(r0, Operand(Smi::FromInt(0)));  // Indicates no initial value.
         __ Push(cp, r2, r1, r0);
       }
-      __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4);
+      __ CallRuntime(Runtime::kDeclareLookupSlot, 4);
       break;
     }
   }
@@ -856,7 +880,7 @@
     case Variable::UNALLOCATED: {
       globals_->Add(variable->name(), zone());
       Handle<SharedFunctionInfo> function =
-          Compiler::BuildFunctionInfo(declaration->fun(), script());
+          Compiler::BuildFunctionInfo(declaration->fun(), script(), info_);
       // Check for stack-overflow exception.
       if (function.is_null()) return SetStackOverflow();
       globals_->Add(function, zone());
@@ -897,7 +921,7 @@
       __ Push(cp, r2, r1);
       // Push initial value for function declaration.
       VisitForStackValue(declaration->fun());
-      __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4);
+      __ CallRuntime(Runtime::kDeclareLookupSlot, 4);
       break;
     }
   }
@@ -906,8 +930,8 @@
 
 void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
   Variable* variable = declaration->proxy()->var();
-  ASSERT(variable->location() == Variable::CONTEXT);
-  ASSERT(variable->interface()->IsFrozen());
+  DCHECK(variable->location() == Variable::CONTEXT);
+  DCHECK(variable->interface()->IsFrozen());
 
   Comment cmnt(masm_, "[ ModuleDeclaration");
   EmitDebugCheckDeclarationContext(variable);
@@ -969,7 +993,7 @@
   __ mov(r1, Operand(pairs));
   __ mov(r0, Operand(Smi::FromInt(DeclareGlobalsFlags())));
   __ Push(cp, r1, r0);
-  __ CallRuntime(Runtime::kHiddenDeclareGlobals, 3);
+  __ CallRuntime(Runtime::kDeclareGlobals, 3);
   // Return value is ignored.
 }
 
@@ -977,7 +1001,7 @@
 void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
   // Call the runtime to declare the modules.
   __ Push(descriptions);
-  __ CallRuntime(Runtime::kHiddenDeclareModules, 1);
+  __ CallRuntime(Runtime::kDeclareModules, 1);
   // Return value is ignored.
 }
 
@@ -1031,7 +1055,8 @@
 
     // Record position before stub call for type feedback.
     SetSourcePosition(clause->position());
-    Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
+    Handle<Code> ic =
+        CodeFactory::CompareIC(isolate(), Token::EQ_STRICT).code();
     CallIC(ic, clause->CompareId());
     patch_site.EmitPatchInfo();
 
@@ -1168,7 +1193,7 @@
   __ bind(&fixed_array);
 
   __ Move(r1, FeedbackVector());
-  __ mov(r2, Operand(TypeFeedbackInfo::MegamorphicSentinel(isolate())));
+  __ mov(r2, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
   __ str(r2, FieldMemOperand(r1, FixedArray::OffsetOfElementAt(slot)));
 
   __ mov(r1, Operand(Smi::FromInt(1)));  // Smi indicates slow check
@@ -1262,15 +1287,6 @@
   Iteration loop_statement(this, stmt);
   increment_loop_depth();
 
-  // var iterable = subject
-  VisitForAccumulatorValue(stmt->assign_iterable());
-
-  // As with for-in, skip the loop if the iterator is null or undefined.
-  __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
-  __ b(eq, loop_statement.break_label());
-  __ CompareRoot(r0, Heap::kNullValueRootIndex);
-  __ b(eq, loop_statement.break_label());
-
   // var iterator = iterable[Symbol.iterator]();
   VisitForEffect(stmt->assign_iterator());
 
@@ -1319,9 +1335,7 @@
       !pretenure &&
       scope()->is_function_scope() &&
       info->num_literals() == 0) {
-    FastNewClosureStub stub(isolate(),
-                            info->strict_mode(),
-                            info->is_generator());
+    FastNewClosureStub stub(isolate(), info->strict_mode(), info->kind());
     __ mov(r2, Operand(info));
     __ CallStub(&stub);
   } else {
@@ -1329,7 +1343,7 @@
     __ LoadRoot(r1, pretenure ? Heap::kTrueValueRootIndex
                               : Heap::kFalseValueRootIndex);
     __ Push(cp, r0, r1);
-    __ CallRuntime(Runtime::kHiddenNewClosure, 3);
+    __ CallRuntime(Runtime::kNewClosure, 3);
   }
   context()->Plug(r0);
 }
@@ -1341,7 +1355,26 @@
 }
 
 
-void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
+void FullCodeGenerator::EmitLoadHomeObject(SuperReference* expr) {
+  Comment cnmt(masm_, "[ SuperReference ");
+
+  __ ldr(LoadDescriptor::ReceiverRegister(),
+         MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+
+  Handle<Symbol> home_object_symbol(isolate()->heap()->home_object_symbol());
+  __ Move(LoadDescriptor::NameRegister(), home_object_symbol);
+
+  CallLoadIC(NOT_CONTEXTUAL, expr->HomeObjectFeedbackId());
+
+  __ cmp(r0, Operand(isolate()->factory()->undefined_value()));
+  Label done;
+  __ b(ne, &done);
+  __ CallRuntime(Runtime::kThrowNonMethodError, 0);
+  __ bind(&done);
+}
+
+
+void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
                                                       TypeofState typeof_state,
                                                       Label* slow) {
   Register current = cp;
@@ -1389,8 +1422,13 @@
     __ bind(&fast);
   }
 
-  __ ldr(r0, GlobalObjectOperand());
-  __ mov(r2, Operand(var->name()));
+  __ ldr(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+  __ mov(LoadDescriptor::NameRegister(), Operand(proxy->var()->name()));
+  if (FLAG_vector_ics) {
+    __ mov(VectorLoadICDescriptor::SlotRegister(),
+           Operand(Smi::FromInt(proxy->VariableFeedbackSlot())));
+  }
+
   ContextualMode mode = (typeof_state == INSIDE_TYPEOF)
       ? NOT_CONTEXTUAL
       : CONTEXTUAL;
@@ -1400,7 +1438,7 @@
 
 MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
                                                                 Label* slow) {
-  ASSERT(var->IsContextSlot());
+  DCHECK(var->IsContextSlot());
   Register context = cp;
   Register next = r3;
   Register temp = r4;
@@ -1430,7 +1468,7 @@
 }
 
 
-void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
+void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
                                                   TypeofState typeof_state,
                                                   Label* slow,
                                                   Label* done) {
@@ -1439,8 +1477,9 @@
   // introducing variables.  In those cases, we do not want to
   // perform a runtime call for all variables in the scope
   // containing the eval.
+  Variable* var = proxy->var();
   if (var->mode() == DYNAMIC_GLOBAL) {
-    EmitLoadGlobalCheckExtensions(var, typeof_state, slow);
+    EmitLoadGlobalCheckExtensions(proxy, typeof_state, slow);
     __ jmp(done);
   } else if (var->mode() == DYNAMIC_LOCAL) {
     Variable* local = var->local_if_not_shadowed();
@@ -1454,7 +1493,7 @@
         __ b(ne, done);
         __ mov(r0, Operand(var->name()));
         __ push(r0);
-        __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
+        __ CallRuntime(Runtime::kThrowReferenceError, 1);
       }
     }
     __ jmp(done);
@@ -1472,10 +1511,12 @@
   switch (var->location()) {
     case Variable::UNALLOCATED: {
       Comment cmnt(masm_, "[ Global variable");
-      // Use inline caching. Variable name is passed in r2 and the global
-      // object (receiver) in r0.
-      __ ldr(r0, GlobalObjectOperand());
-      __ mov(r2, Operand(var->name()));
+      __ ldr(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+      __ mov(LoadDescriptor::NameRegister(), Operand(var->name()));
+      if (FLAG_vector_ics) {
+        __ mov(VectorLoadICDescriptor::SlotRegister(),
+               Operand(Smi::FromInt(proxy->VariableFeedbackSlot())));
+      }
       CallLoadIC(CONTEXTUAL);
       context()->Plug(r0);
       break;
@@ -1492,7 +1533,7 @@
         // always looked up dynamically, i.e. in that case
         //     var->location() == LOOKUP.
         // always holds.
-        ASSERT(var->scope() != NULL);
+        DCHECK(var->scope() != NULL);
 
         // Check if the binding really needs an initialization check. The check
         // can be skipped in the following situation: we have a LET or CONST
@@ -1515,8 +1556,8 @@
           skip_init_check = false;
         } else {
           // Check that we always have valid source position.
-          ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
-          ASSERT(proxy->position() != RelocInfo::kNoPosition);
+          DCHECK(var->initializer_position() != RelocInfo::kNoPosition);
+          DCHECK(proxy->position() != RelocInfo::kNoPosition);
           skip_init_check = var->mode() != CONST_LEGACY &&
               var->initializer_position() < proxy->position();
         }
@@ -1532,11 +1573,11 @@
             __ b(ne, &done);
             __ mov(r0, Operand(var->name()));
             __ push(r0);
-            __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
+            __ CallRuntime(Runtime::kThrowReferenceError, 1);
             __ bind(&done);
           } else {
             // Uninitalized const bindings outside of harmony mode are unholed.
-            ASSERT(var->mode() == CONST_LEGACY);
+            DCHECK(var->mode() == CONST_LEGACY);
             __ LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
           }
           context()->Plug(r0);
@@ -1552,11 +1593,11 @@
       Label done, slow;
       // Generate code for loading from variables potentially shadowed
       // by eval-introduced variables.
-      EmitDynamicLookupFastCase(var, NOT_INSIDE_TYPEOF, &slow, &done);
+      EmitDynamicLookupFastCase(proxy, NOT_INSIDE_TYPEOF, &slow, &done);
       __ bind(&slow);
       __ mov(r1, Operand(var->name()));
       __ Push(cp, r1);  // Context and name.
-      __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2);
+      __ CallRuntime(Runtime::kLoadLookupSlot, 2);
       __ bind(&done);
       context()->Plug(r0);
     }
@@ -1589,7 +1630,7 @@
   __ mov(r2, Operand(expr->pattern()));
   __ mov(r1, Operand(expr->flags()));
   __ Push(r4, r3, r2, r1);
-  __ CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4);
+  __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
   __ mov(r5, r0);
 
   __ bind(&materialized);
@@ -1601,7 +1642,7 @@
   __ bind(&runtime_allocate);
   __ mov(r0, Operand(Smi::FromInt(size)));
   __ Push(r5, r0);
-  __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1);
+  __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
   __ pop(r5);
 
   __ bind(&allocated);
@@ -1645,7 +1686,7 @@
       masm()->serializer_enabled() || flags != ObjectLiteral::kFastElements ||
       properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
     __ Push(r3, r2, r1, r0);
-    __ CallRuntime(Runtime::kHiddenCreateObjectLiteral, 4);
+    __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
   } else {
     FastCloneShallowObjectStub stub(isolate(), properties_count);
     __ CallStub(&stub);
@@ -1675,14 +1716,15 @@
       case ObjectLiteral::Property::CONSTANT:
         UNREACHABLE();
       case ObjectLiteral::Property::MATERIALIZED_LITERAL:
-        ASSERT(!CompileTimeValue::IsCompileTimeValue(property->value()));
+        DCHECK(!CompileTimeValue::IsCompileTimeValue(property->value()));
         // Fall through.
       case ObjectLiteral::Property::COMPUTED:
         if (key->value()->IsInternalizedString()) {
           if (property->emit_store()) {
             VisitForAccumulatorValue(value);
-            __ mov(r2, Operand(key->value()));
-            __ ldr(r1, MemOperand(sp));
+            DCHECK(StoreDescriptor::ValueRegister().is(r0));
+            __ mov(StoreDescriptor::NameRegister(), Operand(key->value()));
+            __ ldr(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
             CallStoreIC(key->LiteralFeedbackId());
             PrepareForBailoutForId(key->id(), NO_REGISTERS);
           } else {
@@ -1696,7 +1738,7 @@
         VisitForStackValue(key);
         VisitForStackValue(value);
         if (property->emit_store()) {
-          __ mov(r0, Operand(Smi::FromInt(NONE)));  // PropertyAttributes
+          __ mov(r0, Operand(Smi::FromInt(SLOPPY)));  // PropertyAttributes
           __ push(r0);
           __ CallRuntime(Runtime::kSetProperty, 4);
         } else {
@@ -1736,11 +1778,11 @@
     EmitAccessor(it->second->setter);
     __ mov(r0, Operand(Smi::FromInt(NONE)));
     __ push(r0);
-    __ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5);
+    __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
   }
 
   if (expr->has_function()) {
-    ASSERT(result_saved);
+    DCHECK(result_saved);
     __ ldr(r0, MemOperand(sp));
     __ push(r0);
     __ CallRuntime(Runtime::kToFastProperties, 1);
@@ -1765,7 +1807,7 @@
   ZoneList<Expression*>* subexprs = expr->values();
   int length = subexprs->length();
   Handle<FixedArray> constant_elements = expr->constant_elements();
-  ASSERT_EQ(2, constant_elements->length());
+  DCHECK_EQ(2, constant_elements->length());
   ElementsKind constant_elements_kind =
       static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
   bool has_fast_elements = IsFastObjectElementsKind(constant_elements_kind);
@@ -1786,7 +1828,7 @@
   if (expr->depth() > 1 || length > JSObject::kInitialMaxFastElementArray) {
     __ mov(r0, Operand(Smi::FromInt(flags)));
     __ Push(r3, r2, r1, r0);
-    __ CallRuntime(Runtime::kHiddenCreateArrayLiteral, 4);
+    __ CallRuntime(Runtime::kCreateArrayLiteral, 4);
   } else {
     FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
     __ CallStub(&stub);
@@ -1837,7 +1879,7 @@
 
 
 void FullCodeGenerator::VisitAssignment(Assignment* expr) {
-  ASSERT(expr->target()->IsValidReferenceExpression());
+  DCHECK(expr->target()->IsValidReferenceExpression());
 
   Comment cmnt(masm_, "[ Assignment");
 
@@ -1859,9 +1901,9 @@
       break;
     case NAMED_PROPERTY:
       if (expr->is_compound()) {
-        // We need the receiver both on the stack and in the accumulator.
-        VisitForAccumulatorValue(property->obj());
-        __ push(result_register());
+        // We need the receiver both on the stack and in the register.
+        VisitForStackValue(property->obj());
+        __ ldr(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
       } else {
         VisitForStackValue(property->obj());
       }
@@ -1869,9 +1911,10 @@
     case KEYED_PROPERTY:
       if (expr->is_compound()) {
         VisitForStackValue(property->obj());
-        VisitForAccumulatorValue(property->key());
-        __ ldr(r1, MemOperand(sp, 0));
-        __ push(r0);
+        VisitForStackValue(property->key());
+        __ ldr(LoadDescriptor::ReceiverRegister(),
+               MemOperand(sp, 1 * kPointerSize));
+        __ ldr(LoadDescriptor::NameRegister(), MemOperand(sp, 0));
       } else {
         VisitForStackValue(property->obj());
         VisitForStackValue(property->key());
@@ -1952,12 +1995,12 @@
   VisitForStackValue(expr->expression());
 
   switch (expr->yield_kind()) {
-    case Yield::SUSPEND:
+    case Yield::kSuspend:
       // Pop value from top-of-stack slot; box result into result register.
       EmitCreateIteratorResult(false);
       __ push(result_register());
       // Fall through.
-    case Yield::INITIAL: {
+    case Yield::kInitial: {
       Label suspend, continuation, post_runtime, resume;
 
       __ jmp(&suspend);
@@ -1967,7 +2010,7 @@
 
       __ bind(&suspend);
       VisitForAccumulatorValue(expr->generator_object());
-      ASSERT(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
+      DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
       __ mov(r1, Operand(Smi::FromInt(continuation.pos())));
       __ str(r1, FieldMemOperand(r0, JSGeneratorObject::kContinuationOffset));
       __ str(cp, FieldMemOperand(r0, JSGeneratorObject::kContextOffset));
@@ -1978,7 +2021,7 @@
       __ cmp(sp, r1);
       __ b(eq, &post_runtime);
       __ push(r0);  // generator object
-      __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1);
+      __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
       __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
       __ bind(&post_runtime);
       __ pop(result_register());
@@ -1989,7 +2032,7 @@
       break;
     }
 
-    case Yield::FINAL: {
+    case Yield::kFinal: {
       VisitForAccumulatorValue(expr->generator_object());
       __ mov(r1, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorClosed)));
       __ str(r1, FieldMemOperand(result_register(),
@@ -2001,7 +2044,7 @@
       break;
     }
 
-    case Yield::DELEGATING: {
+    case Yield::kDelegating: {
       VisitForStackValue(expr->generator_object());
 
       // Initial stack layout is as follows:
@@ -2010,6 +2053,9 @@
 
       Label l_catch, l_try, l_suspend, l_continuation, l_resume;
       Label l_next, l_call, l_loop;
+      Register load_receiver = LoadDescriptor::ReceiverRegister();
+      Register load_name = LoadDescriptor::NameRegister();
+
       // Initial send value is undefined.
       __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
       __ b(&l_next);
@@ -2017,9 +2063,9 @@
       // catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; }
       __ bind(&l_catch);
       handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos()));
-      __ LoadRoot(r2, Heap::kthrow_stringRootIndex);  // "throw"
-      __ ldr(r3, MemOperand(sp, 1 * kPointerSize));   // iter
-      __ Push(r2, r3, r0);                            // "throw", iter, except
+      __ LoadRoot(load_name, Heap::kthrow_stringRootIndex);  // "throw"
+      __ ldr(r3, MemOperand(sp, 1 * kPointerSize));          // iter
+      __ Push(load_name, r3, r0);                       // "throw", iter, except
       __ jmp(&l_call);
 
       // try { received = %yield result }
@@ -2037,14 +2083,14 @@
       const int generator_object_depth = kPointerSize + handler_size;
       __ ldr(r0, MemOperand(sp, generator_object_depth));
       __ push(r0);                                       // g
-      ASSERT(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
+      DCHECK(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
       __ mov(r1, Operand(Smi::FromInt(l_continuation.pos())));
       __ str(r1, FieldMemOperand(r0, JSGeneratorObject::kContinuationOffset));
       __ str(cp, FieldMemOperand(r0, JSGeneratorObject::kContextOffset));
       __ mov(r1, cp);
       __ RecordWriteField(r0, JSGeneratorObject::kContextOffset, r1, r2,
                           kLRHasBeenSaved, kDontSaveFPRegs);
-      __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1);
+      __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
       __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
       __ pop(r0);                                      // result
       EmitReturnSequence();
@@ -2053,15 +2099,20 @@
 
       // receiver = iter; f = 'next'; arg = received;
       __ bind(&l_next);
-      __ LoadRoot(r2, Heap::knext_stringRootIndex);    // "next"
-      __ ldr(r3, MemOperand(sp, 1 * kPointerSize));    // iter
-      __ Push(r2, r3, r0);                             // "next", iter, received
+
+      __ LoadRoot(load_name, Heap::knext_stringRootIndex);  // "next"
+      __ ldr(r3, MemOperand(sp, 1 * kPointerSize));         // iter
+      __ Push(load_name, r3, r0);                      // "next", iter, received
 
       // result = receiver[f](arg);
       __ bind(&l_call);
-      __ ldr(r1, MemOperand(sp, kPointerSize));
-      __ ldr(r0, MemOperand(sp, 2 * kPointerSize));
-      Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+      __ ldr(load_receiver, MemOperand(sp, kPointerSize));
+      __ ldr(load_name, MemOperand(sp, 2 * kPointerSize));
+      if (FLAG_vector_ics) {
+        __ mov(VectorLoadICDescriptor::SlotRegister(),
+               Operand(Smi::FromInt(expr->KeyedLoadFeedbackSlot())));
+      }
+      Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
       CallIC(ic, TypeFeedbackId::None());
       __ mov(r1, r0);
       __ str(r1, MemOperand(sp, 2 * kPointerSize));
@@ -2073,19 +2124,29 @@
 
       // if (!result.done) goto l_try;
       __ bind(&l_loop);
-      __ push(r0);                                       // save result
-      __ LoadRoot(r2, Heap::kdone_stringRootIndex);      // "done"
-      CallLoadIC(NOT_CONTEXTUAL);                        // result.done in r0
+      __ Move(load_receiver, r0);
+
+      __ push(load_receiver);                               // save result
+      __ LoadRoot(load_name, Heap::kdone_stringRootIndex);  // "done"
+      if (FLAG_vector_ics) {
+        __ mov(VectorLoadICDescriptor::SlotRegister(),
+               Operand(Smi::FromInt(expr->DoneFeedbackSlot())));
+      }
+      CallLoadIC(NOT_CONTEXTUAL);                           // r0=result.done
       Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
       CallIC(bool_ic);
       __ cmp(r0, Operand(0));
       __ b(eq, &l_try);
 
       // result.value
-      __ pop(r0);                                        // result
-      __ LoadRoot(r2, Heap::kvalue_stringRootIndex);     // "value"
-      CallLoadIC(NOT_CONTEXTUAL);                        // result.value in r0
-      context()->DropAndPlug(2, r0);                     // drop iter and g
+      __ pop(load_receiver);                                 // result
+      __ LoadRoot(load_name, Heap::kvalue_stringRootIndex);  // "value"
+      if (FLAG_vector_ics) {
+        __ mov(VectorLoadICDescriptor::SlotRegister(),
+               Operand(Smi::FromInt(expr->ValueFeedbackSlot())));
+      }
+      CallLoadIC(NOT_CONTEXTUAL);                            // r0=result.value
+      context()->DropAndPlug(2, r0);                         // drop iter and g
       break;
     }
   }
@@ -2096,7 +2157,7 @@
     Expression *value,
     JSGeneratorObject::ResumeMode resume_mode) {
   // The value stays in r0, and is ultimately read by the resumed generator, as
-  // if CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject) returned it. Or it
+  // if CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it
   // is read to throw the value when the resumed generator is already closed.
   // r1 will hold the generator object until the activation has been resumed.
   VisitForStackValue(generator);
@@ -2187,10 +2248,10 @@
   __ push(r2);
   __ b(&push_operand_holes);
   __ bind(&call_resume);
-  ASSERT(!result_register().is(r1));
+  DCHECK(!result_register().is(r1));
   __ Push(r1, result_register());
   __ Push(Smi::FromInt(resume_mode));
-  __ CallRuntime(Runtime::kHiddenResumeJSGeneratorObject, 3);
+  __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
   // Not reached: the runtime call returns elsewhere.
   __ stop("not-reached");
 
@@ -2205,14 +2266,14 @@
   } else {
     // Throw the provided value.
     __ push(r0);
-    __ CallRuntime(Runtime::kHiddenThrow, 1);
+    __ CallRuntime(Runtime::kThrow, 1);
   }
   __ jmp(&done);
 
   // Throw error if we attempt to operate on a running generator.
   __ bind(&wrong_state);
   __ push(r1);
-  __ CallRuntime(Runtime::kHiddenThrowGeneratorStateError, 1);
+  __ CallRuntime(Runtime::kThrowGeneratorStateError, 1);
 
   __ bind(&done);
   context()->Plug(result_register());
@@ -2230,7 +2291,7 @@
 
   __ bind(&gc_required);
   __ Push(Smi::FromInt(map->instance_size()));
-  __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1);
+  __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
   __ ldr(context_register(),
          MemOperand(fp, StandardFrameConstants::kContextOffset));
 
@@ -2239,7 +2300,7 @@
   __ pop(r2);
   __ mov(r3, Operand(isolate()->factory()->ToBoolean(done)));
   __ mov(r4, Operand(isolate()->factory()->empty_fixed_array()));
-  ASSERT_EQ(map->instance_size(), 5 * kPointerSize);
+  DCHECK_EQ(map->instance_size(), 5 * kPointerSize);
   __ str(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
   __ str(r4, FieldMemOperand(r0, JSObject::kPropertiesOffset));
   __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset));
@@ -2258,17 +2319,43 @@
 void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
   SetSourcePosition(prop->position());
   Literal* key = prop->key()->AsLiteral();
-  __ mov(r2, Operand(key->value()));
-  // Call load IC. It has arguments receiver and property name r0 and r2.
-  CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId());
+
+  __ mov(LoadDescriptor::NameRegister(), Operand(key->value()));
+  if (FLAG_vector_ics) {
+    __ mov(VectorLoadICDescriptor::SlotRegister(),
+           Operand(Smi::FromInt(prop->PropertyFeedbackSlot())));
+    CallLoadIC(NOT_CONTEXTUAL);
+  } else {
+    CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId());
+  }
+}
+
+
+void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
+  SetSourcePosition(prop->position());
+  Literal* key = prop->key()->AsLiteral();
+  DCHECK(!key->value()->IsSmi());
+  DCHECK(prop->IsSuperAccess());
+
+  SuperReference* super_ref = prop->obj()->AsSuperReference();
+  EmitLoadHomeObject(super_ref);
+  __ Push(r0);
+  VisitForStackValue(super_ref->this_var());
+  __ Push(key->value());
+  __ CallRuntime(Runtime::kLoadFromSuper, 3);
 }
 
 
 void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
   SetSourcePosition(prop->position());
-  // Call keyed load IC. It has arguments key and receiver in r0 and r1.
-  Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
-  CallIC(ic, prop->PropertyFeedbackId());
+  Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
+  if (FLAG_vector_ics) {
+    __ mov(VectorLoadICDescriptor::SlotRegister(),
+           Operand(Smi::FromInt(prop->PropertyFeedbackSlot())));
+    CallIC(ic);
+  } else {
+    CallIC(ic, prop->PropertyFeedbackId());
+  }
 }
 
 
@@ -2294,8 +2381,8 @@
   patch_site.EmitJumpIfSmi(scratch1, &smi_case);
 
   __ bind(&stub_call);
-  BinaryOpICStub stub(isolate(), op, mode);
-  CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId());
+  Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op, mode).code();
+  CallIC(code, expr->BinaryOperationFeedbackId());
   patch_site.EmitPatchInfo();
   __ jmp(&done);
 
@@ -2370,16 +2457,16 @@
                                      Token::Value op,
                                      OverwriteMode mode) {
   __ pop(r1);
-  BinaryOpICStub stub(isolate(), op, mode);
+  Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op, mode).code();
   JumpPatchSite patch_site(masm_);    // unbound, signals no inlined smi code.
-  CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId());
+  CallIC(code, expr->BinaryOperationFeedbackId());
   patch_site.EmitPatchInfo();
   context()->Plug(r0);
 }
 
 
 void FullCodeGenerator::EmitAssignment(Expression* expr) {
-  ASSERT(expr->IsValidReferenceExpression());
+  DCHECK(expr->IsValidReferenceExpression());
 
   // Left-hand side can only be a property, a global or a (parameter or local)
   // slot.
@@ -2402,9 +2489,10 @@
     case NAMED_PROPERTY: {
       __ push(r0);  // Preserve value.
       VisitForAccumulatorValue(prop->obj());
-      __ mov(r1, r0);
-      __ pop(r0);  // Restore value.
-      __ mov(r2, Operand(prop->key()->AsLiteral()->value()));
+      __ Move(StoreDescriptor::ReceiverRegister(), r0);
+      __ pop(StoreDescriptor::ValueRegister());  // Restore value.
+      __ mov(StoreDescriptor::NameRegister(),
+             Operand(prop->key()->AsLiteral()->value()));
       CallStoreIC();
       break;
     }
@@ -2412,11 +2500,11 @@
       __ push(r0);  // Preserve value.
       VisitForStackValue(prop->obj());
       VisitForAccumulatorValue(prop->key());
-      __ mov(r1, r0);
-      __ Pop(r0, r2);  // r0 = restored value.
-      Handle<Code> ic = strict_mode() == SLOPPY
-          ? isolate()->builtins()->KeyedStoreIC_Initialize()
-          : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+      __ Move(StoreDescriptor::NameRegister(), r0);
+      __ Pop(StoreDescriptor::ValueRegister(),
+             StoreDescriptor::ReceiverRegister());
+      Handle<Code> ic =
+          CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
       CallIC(ic);
       break;
     }
@@ -2438,33 +2526,23 @@
 }
 
 
-void FullCodeGenerator::EmitCallStoreContextSlot(
-    Handle<String> name, StrictMode strict_mode) {
-  __ push(r0);  // Value.
-  __ mov(r1, Operand(name));
-  __ mov(r0, Operand(Smi::FromInt(strict_mode)));
-  __ Push(cp, r1, r0);  // Context, name, strict mode.
-  __ CallRuntime(Runtime::kHiddenStoreContextSlot, 4);
-}
-
-
 void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
   if (var->IsUnallocated()) {
     // Global var, const, or let.
-    __ mov(r2, Operand(var->name()));
-    __ ldr(r1, GlobalObjectOperand());
+    __ mov(StoreDescriptor::NameRegister(), Operand(var->name()));
+    __ ldr(StoreDescriptor::ReceiverRegister(), GlobalObjectOperand());
     CallStoreIC();
 
   } else if (op == Token::INIT_CONST_LEGACY) {
     // Const initializers need a write barrier.
-    ASSERT(!var->IsParameter());  // No const parameters.
+    DCHECK(!var->IsParameter());  // No const parameters.
     if (var->IsLookupSlot()) {
       __ push(r0);
       __ mov(r0, Operand(var->name()));
       __ Push(cp, r0);  // Context and name.
-      __ CallRuntime(Runtime::kHiddenInitializeConstContextSlot, 3);
+      __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3);
     } else {
-      ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+      DCHECK(var->IsStackAllocated() || var->IsContextSlot());
       Label skip;
       MemOperand location = VarOperand(var, r1);
       __ ldr(r2, location);
@@ -2476,30 +2554,32 @@
 
   } else if (var->mode() == LET && op != Token::INIT_LET) {
     // Non-initializing assignment to let variable needs a write barrier.
-    if (var->IsLookupSlot()) {
-      EmitCallStoreContextSlot(var->name(), strict_mode());
-    } else {
-      ASSERT(var->IsStackAllocated() || var->IsContextSlot());
-      Label assign;
-      MemOperand location = VarOperand(var, r1);
-      __ ldr(r3, location);
-      __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
-      __ b(ne, &assign);
-      __ mov(r3, Operand(var->name()));
-      __ push(r3);
-      __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
-      // Perform the assignment.
-      __ bind(&assign);
-      EmitStoreToStackLocalOrContextSlot(var, location);
-    }
+    DCHECK(!var->IsLookupSlot());
+    DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+    Label assign;
+    MemOperand location = VarOperand(var, r1);
+    __ ldr(r3, location);
+    __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
+    __ b(ne, &assign);
+    __ mov(r3, Operand(var->name()));
+    __ push(r3);
+    __ CallRuntime(Runtime::kThrowReferenceError, 1);
+    // Perform the assignment.
+    __ bind(&assign);
+    EmitStoreToStackLocalOrContextSlot(var, location);
 
   } else if (!var->is_const_mode() || op == Token::INIT_CONST) {
-    // Assignment to var or initializing assignment to let/const
-    // in harmony mode.
     if (var->IsLookupSlot()) {
-      EmitCallStoreContextSlot(var->name(), strict_mode());
+      // Assignment to var.
+      __ push(r0);  // Value.
+      __ mov(r1, Operand(var->name()));
+      __ mov(r0, Operand(Smi::FromInt(strict_mode())));
+      __ Push(cp, r1, r0);  // Context, name, strict mode.
+      __ CallRuntime(Runtime::kStoreLookupSlot, 4);
     } else {
-      ASSERT((var->IsStackAllocated() || var->IsContextSlot()));
+      // Assignment to var or initializing assignment to let/const in harmony
+      // mode.
+      DCHECK((var->IsStackAllocated() || var->IsContextSlot()));
       MemOperand location = VarOperand(var, r1);
       if (generate_debug_code_ && op == Token::INIT_LET) {
         // Check for an uninitialized let binding.
@@ -2517,14 +2597,14 @@
 void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
   // Assignment to a property, using a named store IC.
   Property* prop = expr->target()->AsProperty();
-  ASSERT(prop != NULL);
-  ASSERT(prop->key()->IsLiteral());
+  DCHECK(prop != NULL);
+  DCHECK(prop->key()->IsLiteral());
 
   // Record source code position before IC call.
   SetSourcePosition(expr->position());
-  __ mov(r2, Operand(prop->key()->AsLiteral()->value()));
-  __ pop(r1);
-
+  __ mov(StoreDescriptor::NameRegister(),
+         Operand(prop->key()->AsLiteral()->value()));
+  __ pop(StoreDescriptor::ReceiverRegister());
   CallStoreIC(expr->AssignmentFeedbackId());
 
   PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@@ -2537,11 +2617,10 @@
 
   // Record source code position before IC call.
   SetSourcePosition(expr->position());
-  __ Pop(r2, r1);  // r1 = key.
+  __ Pop(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister());
+  DCHECK(StoreDescriptor::ValueRegister().is(r0));
 
-  Handle<Code> ic = strict_mode() == SLOPPY
-      ? isolate()->builtins()->KeyedStoreIC_Initialize()
-      : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+  Handle<Code> ic = CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
   CallIC(ic, expr->AssignmentFeedbackId());
 
   PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@@ -2554,14 +2633,20 @@
   Expression* key = expr->key();
 
   if (key->IsPropertyName()) {
-    VisitForAccumulatorValue(expr->obj());
-    EmitNamedPropertyLoad(expr);
+    if (!expr->IsSuperAccess()) {
+      VisitForAccumulatorValue(expr->obj());
+      __ Move(LoadDescriptor::ReceiverRegister(), r0);
+      EmitNamedPropertyLoad(expr);
+    } else {
+      EmitNamedSuperPropertyLoad(expr);
+    }
     PrepareForBailoutForId(expr->LoadId(), TOS_REG);
     context()->Plug(r0);
   } else {
     VisitForStackValue(expr->obj());
     VisitForAccumulatorValue(expr->key());
-    __ pop(r1);
+    __ Move(LoadDescriptor::NameRegister(), r0);
+    __ pop(LoadDescriptor::ReceiverRegister());
     EmitKeyedPropertyLoad(expr);
     context()->Plug(r0);
   }
@@ -2582,12 +2667,11 @@
 void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
   Expression* callee = expr->expression();
 
-  CallIC::CallType call_type = callee->IsVariableProxy()
-      ? CallIC::FUNCTION
-      : CallIC::METHOD;
+  CallICState::CallType call_type =
+      callee->IsVariableProxy() ? CallICState::FUNCTION : CallICState::METHOD;
 
   // Get the target function.
-  if (call_type == CallIC::FUNCTION) {
+  if (call_type == CallICState::FUNCTION) {
     { StackValueContext context(this);
       EmitVariableLoad(callee->AsVariableProxy());
       PrepareForBailout(callee, NO_REGISTERS);
@@ -2597,8 +2681,9 @@
     __ Push(isolate()->factory()->undefined_value());
   } else {
     // Load the function from the receiver.
-    ASSERT(callee->IsProperty());
-    __ ldr(r0, MemOperand(sp, 0));
+    DCHECK(callee->IsProperty());
+    DCHECK(!callee->AsProperty()->IsSuperAccess());
+    __ ldr(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
     EmitNamedPropertyLoad(callee->AsProperty());
     PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
     // Push the target function under the receiver.
@@ -2611,6 +2696,45 @@
 }
 
 
+void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
+  Expression* callee = expr->expression();
+  DCHECK(callee->IsProperty());
+  Property* prop = callee->AsProperty();
+  DCHECK(prop->IsSuperAccess());
+
+  SetSourcePosition(prop->position());
+  Literal* key = prop->key()->AsLiteral();
+  DCHECK(!key->value()->IsSmi());
+  // Load the function from the receiver.
+  const Register scratch = r1;
+  SuperReference* super_ref = prop->obj()->AsSuperReference();
+  EmitLoadHomeObject(super_ref);
+  __ Push(r0);
+  VisitForAccumulatorValue(super_ref->this_var());
+  __ Push(r0);
+  __ ldr(scratch, MemOperand(sp, kPointerSize));
+  __ Push(scratch);
+  __ Push(r0);
+  __ Push(key->value());
+
+  // Stack here:
+  //  - home_object
+  //  - this (receiver)
+  //  - home_object <-- LoadFromSuper will pop here and below.
+  //  - this (receiver)
+  //  - key
+  __ CallRuntime(Runtime::kLoadFromSuper, 3);
+
+  // Replace home_object with target function.
+  __ str(r0, MemOperand(sp, kPointerSize));
+
+  // Stack here:
+  // - target function
+  // - this (receiver)
+  EmitCall(expr, CallICState::METHOD);
+}
+
+
 // Code common for calls using the IC.
 void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
                                                 Expression* key) {
@@ -2620,8 +2744,9 @@
   Expression* callee = expr->expression();
 
   // Load the function from the receiver.
-  ASSERT(callee->IsProperty());
-  __ ldr(r1, MemOperand(sp, 0));
+  DCHECK(callee->IsProperty());
+  __ ldr(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
+  __ Move(LoadDescriptor::NameRegister(), r0);
   EmitKeyedPropertyLoad(callee->AsProperty());
   PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
 
@@ -2630,11 +2755,11 @@
   __ push(ip);
   __ str(r0, MemOperand(sp, kPointerSize));
 
-  EmitCall(expr, CallIC::METHOD);
+  EmitCall(expr, CallICState::METHOD);
 }
 
 
-void FullCodeGenerator::EmitCall(Call* expr, CallIC::CallType call_type) {
+void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
   // Load the arguments.
   ZoneList<Expression*>* args = expr->arguments();
   int arg_count = args->length();
@@ -2662,13 +2787,16 @@
 
 
 void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
-  // r4: copy of the first argument or undefined if it doesn't exist.
+  // r5: copy of the first argument or undefined if it doesn't exist.
   if (arg_count > 0) {
-    __ ldr(r4, MemOperand(sp, arg_count * kPointerSize));
+    __ ldr(r5, MemOperand(sp, arg_count * kPointerSize));
   } else {
-    __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
+    __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
   }
 
+  // r4: the receiver of the enclosing function.
+  __ ldr(r4, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+
   // r3: the receiver of the enclosing function.
   int receiver_offset = 2 + info_->scope()->num_parameters();
   __ ldr(r3, MemOperand(fp, receiver_offset * kPointerSize));
@@ -2680,8 +2808,9 @@
   __ mov(r1, Operand(Smi::FromInt(scope()->start_position())));
 
   // Do the runtime call.
+  __ Push(r5);
   __ Push(r4, r3, r2, r1);
-  __ CallRuntime(Runtime::kHiddenResolvePossiblyDirectEval, 5);
+  __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 6);
 }
 
 
@@ -2746,16 +2875,16 @@
     { PreservePositionScope scope(masm()->positions_recorder());
       // Generate code for loading from variables potentially shadowed
       // by eval-introduced variables.
-      EmitDynamicLookupFastCase(proxy->var(), NOT_INSIDE_TYPEOF, &slow, &done);
+      EmitDynamicLookupFastCase(proxy, NOT_INSIDE_TYPEOF, &slow, &done);
     }
 
     __ bind(&slow);
     // Call the runtime to find the function to call (returned in r0)
     // and the object holding it (returned in edx).
-    ASSERT(!context_register().is(r2));
+    DCHECK(!context_register().is(r2));
     __ mov(r2, Operand(proxy->name()));
     __ Push(context_register(), r2);
-    __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2);
+    __ CallRuntime(Runtime::kLoadLookupSlot, 2);
     __ Push(r0, r1);  // Function, receiver.
 
     // If fast case code has been generated, emit code to push the
@@ -2779,16 +2908,23 @@
     EmitCall(expr);
   } else if (call_type == Call::PROPERTY_CALL) {
     Property* property = callee->AsProperty();
-    { PreservePositionScope scope(masm()->positions_recorder());
-      VisitForStackValue(property->obj());
-    }
-    if (property->key()->IsPropertyName()) {
-      EmitCallWithLoadIC(expr);
+    bool is_named_call = property->key()->IsPropertyName();
+    // super.x() is handled in EmitCallWithLoadIC.
+    if (property->IsSuperAccess() && is_named_call) {
+      EmitSuperCallWithLoadIC(expr);
     } else {
-      EmitKeyedCallWithLoadIC(expr, property->key());
+      {
+        PreservePositionScope scope(masm()->positions_recorder());
+        VisitForStackValue(property->obj());
+      }
+      if (is_named_call) {
+        EmitCallWithLoadIC(expr);
+      } else {
+        EmitKeyedCallWithLoadIC(expr, property->key());
+      }
     }
   } else {
-    ASSERT(call_type == Call::OTHER_CALL);
+    DCHECK(call_type == Call::OTHER_CALL);
     // Call to an arbitrary expression not handled specially above.
     { PreservePositionScope scope(masm()->positions_recorder());
       VisitForStackValue(callee);
@@ -2801,7 +2937,7 @@
 
 #ifdef DEBUG
   // RecordJSReturnSite should have been called.
-  ASSERT(expr->return_is_recorded_);
+  DCHECK(expr->return_is_recorded_);
 #endif
 }
 
@@ -2835,7 +2971,7 @@
   // Record call targets in unoptimized code.
   if (FLAG_pretenuring_call_new) {
     EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot());
-    ASSERT(expr->AllocationSiteFeedbackSlot() ==
+    DCHECK(expr->AllocationSiteFeedbackSlot() ==
            expr->CallNewFeedbackSlot() + 1);
   }
 
@@ -2851,7 +2987,7 @@
 
 void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -2872,7 +3008,7 @@
 
 void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -2893,7 +3029,7 @@
 
 void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -2926,7 +3062,7 @@
 
 void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -2948,7 +3084,7 @@
 
 void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -2973,7 +3109,7 @@
 void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
     CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -3061,7 +3197,7 @@
 
 void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -3083,7 +3219,7 @@
 
 void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -3109,7 +3245,7 @@
 
 void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -3131,7 +3267,7 @@
 
 void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -3153,7 +3289,7 @@
 
 
 void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
-  ASSERT(expr->arguments()->length() == 0);
+  DCHECK(expr->arguments()->length() == 0);
 
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
@@ -3182,7 +3318,7 @@
 
 void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 2);
+  DCHECK(args->length() == 2);
 
   // Load the two objects into registers and perform the comparison.
   VisitForStackValue(args->at(0));
@@ -3206,7 +3342,7 @@
 
 void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   // ArgumentsAccessStub expects the key in edx and the formal
   // parameter count in r0.
@@ -3220,7 +3356,7 @@
 
 
 void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
-  ASSERT(expr->arguments()->length() == 0);
+  DCHECK(expr->arguments()->length() == 0);
 
   // Get the number of formal parameters.
   __ mov(r0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
@@ -3240,7 +3376,7 @@
 
 void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
   Label done, null, function, non_function_constructor;
 
   VisitForAccumulatorValue(args->at(0));
@@ -3280,7 +3416,7 @@
 
   // Functions have class 'Function'.
   __ bind(&function);
-  __ LoadRoot(r0, Heap::kfunction_class_stringRootIndex);
+  __ LoadRoot(r0, Heap::kFunction_stringRootIndex);
   __ jmp(&done);
 
   // Objects with a non-function constructor have class 'Object'.
@@ -3303,7 +3439,7 @@
   // Load the arguments on the stack and call the stub.
   SubStringStub stub(isolate());
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 3);
+  DCHECK(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
   VisitForStackValue(args->at(2));
@@ -3316,7 +3452,7 @@
   // Load the arguments on the stack and call the stub.
   RegExpExecStub stub(isolate());
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 4);
+  DCHECK(args->length() == 4);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
   VisitForStackValue(args->at(2));
@@ -3328,7 +3464,7 @@
 
 void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
   VisitForAccumulatorValue(args->at(0));  // Load the object.
 
   Label done;
@@ -3345,8 +3481,8 @@
 
 void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 2);
-  ASSERT_NE(NULL, args->at(1)->AsLiteral());
+  DCHECK(args->length() == 2);
+  DCHECK_NE(NULL, args->at(1)->AsLiteral());
   Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value()));
 
   VisitForAccumulatorValue(args->at(0));  // Load the object.
@@ -3384,7 +3520,7 @@
   }
 
   __ bind(&not_date_object);
-  __ CallRuntime(Runtime::kHiddenThrowNotDateError, 0);
+  __ CallRuntime(Runtime::kThrowNotDateError, 0);
   __ bind(&done);
   context()->Plug(r0);
 }
@@ -3392,15 +3528,15 @@
 
 void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT_EQ(3, args->length());
+  DCHECK_EQ(3, args->length());
 
   Register string = r0;
   Register index = r1;
   Register value = r2;
 
-  VisitForStackValue(args->at(1));  // index
-  VisitForStackValue(args->at(2));  // value
-  VisitForAccumulatorValue(args->at(0));  // string
+  VisitForStackValue(args->at(0));        // index
+  VisitForStackValue(args->at(1));        // value
+  VisitForAccumulatorValue(args->at(2));  // string
   __ Pop(index, value);
 
   if (FLAG_debug_code) {
@@ -3425,15 +3561,15 @@
 
 void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT_EQ(3, args->length());
+  DCHECK_EQ(3, args->length());
 
   Register string = r0;
   Register index = r1;
   Register value = r2;
 
-  VisitForStackValue(args->at(1));  // index
-  VisitForStackValue(args->at(2));  // value
-  VisitForAccumulatorValue(args->at(0));  // string
+  VisitForStackValue(args->at(0));        // index
+  VisitForStackValue(args->at(1));        // value
+  VisitForAccumulatorValue(args->at(2));  // string
   __ Pop(index, value);
 
   if (FLAG_debug_code) {
@@ -3461,7 +3597,7 @@
 void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
   // Load the arguments on the stack and call the runtime function.
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 2);
+  DCHECK(args->length() == 2);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
   MathPowStub stub(isolate(), MathPowStub::ON_STACK);
@@ -3472,7 +3608,7 @@
 
 void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 2);
+  DCHECK(args->length() == 2);
   VisitForStackValue(args->at(0));  // Load the object.
   VisitForAccumulatorValue(args->at(1));  // Load the value.
   __ pop(r1);  // r0 = value. r1 = object.
@@ -3500,7 +3636,7 @@
 
 void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT_EQ(args->length(), 1);
+  DCHECK_EQ(args->length(), 1);
   // Load the argument into r0 and call the stub.
   VisitForAccumulatorValue(args->at(0));
 
@@ -3512,7 +3648,7 @@
 
 void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
   VisitForAccumulatorValue(args->at(0));
 
   Label done;
@@ -3530,7 +3666,7 @@
 
 void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 2);
+  DCHECK(args->length() == 2);
   VisitForStackValue(args->at(0));
   VisitForAccumulatorValue(args->at(1));
 
@@ -3575,7 +3711,7 @@
 
 void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 2);
+  DCHECK(args->length() == 2);
   VisitForStackValue(args->at(0));
   VisitForAccumulatorValue(args->at(1));
 
@@ -3622,7 +3758,7 @@
 
 void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT_EQ(2, args->length());
+  DCHECK_EQ(2, args->length());
   VisitForStackValue(args->at(0));
   VisitForAccumulatorValue(args->at(1));
 
@@ -3635,7 +3771,7 @@
 
 void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT_EQ(2, args->length());
+  DCHECK_EQ(2, args->length());
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
 
@@ -3647,7 +3783,7 @@
 
 void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() >= 2);
+  DCHECK(args->length() >= 2);
 
   int arg_count = args->length() - 2;  // 2 ~ receiver and function.
   for (int i = 0; i < arg_count + 1; i++) {
@@ -3680,7 +3816,7 @@
 void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
   RegExpConstructResultStub stub(isolate());
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 3);
+  DCHECK(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
   VisitForAccumulatorValue(args->at(2));
@@ -3693,8 +3829,8 @@
 
 void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT_EQ(2, args->length());
-  ASSERT_NE(NULL, args->at(0)->AsLiteral());
+  DCHECK_EQ(2, args->length());
+  DCHECK_NE(NULL, args->at(0)->AsLiteral());
   int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value();
 
   Handle<FixedArray> jsfunction_result_caches(
@@ -3733,7 +3869,7 @@
   __ bind(&not_found);
   // Call runtime to perform the lookup.
   __ Push(cache, key);
-  __ CallRuntime(Runtime::kHiddenGetFromCache, 2);
+  __ CallRuntime(Runtime::kGetFromCache, 2);
 
   __ bind(&done);
   context()->Plug(r0);
@@ -3762,7 +3898,7 @@
 
 void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
   VisitForAccumulatorValue(args->at(0));
 
   __ AssertString(r0);
@@ -3774,12 +3910,12 @@
 }
 
 
-void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
+void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
   Label bailout, done, one_char_separator, long_separator, non_trivial_array,
       not_size_one_array, loop, empty_separator_loop, one_char_separator_loop,
       one_char_separator_loop_entry, long_separator_loop;
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 2);
+  DCHECK(args->length() == 2);
   VisitForStackValue(args->at(1));
   VisitForAccumulatorValue(args->at(0));
 
@@ -3821,7 +3957,7 @@
   __ ldr(elements, FieldMemOperand(array, JSArray::kElementsOffset));
   array = no_reg;  // End of array's live range.
 
-  // Check that all array elements are sequential ASCII strings, and
+  // Check that all array elements are sequential one-byte strings, and
   // accumulate the sum of their lengths, as a smi-encoded value.
   __ mov(string_length, Operand::Zero());
   __ add(element,
@@ -3837,14 +3973,14 @@
   //   elements_end: Array end.
   if (generate_debug_code_) {
     __ cmp(array_length, Operand::Zero());
-    __ Assert(gt, kNoEmptyArraysHereInEmitFastAsciiArrayJoin);
+    __ Assert(gt, kNoEmptyArraysHereInEmitFastOneByteArrayJoin);
   }
   __ bind(&loop);
   __ ldr(string, MemOperand(element, kPointerSize, PostIndex));
   __ JumpIfSmi(string, &bailout);
   __ ldr(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
   __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
-  __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &bailout);
+  __ JumpIfInstanceTypeIsNotSequentialOneByte(scratch, scratch, &bailout);
   __ ldr(scratch, FieldMemOperand(string, SeqOneByteString::kLengthOffset));
   __ add(string_length, string_length, Operand(scratch), SetCC);
   __ b(vs, &bailout);
@@ -3865,11 +4001,11 @@
   //   string_length: Sum of string lengths (smi).
   //   elements: FixedArray of strings.
 
-  // Check that the separator is a flat ASCII string.
+  // Check that the separator is a flat one-byte string.
   __ JumpIfSmi(separator, &bailout);
   __ ldr(scratch, FieldMemOperand(separator, HeapObject::kMapOffset));
   __ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
-  __ JumpIfInstanceTypeIsNotSequentialAscii(scratch, scratch, &bailout);
+  __ JumpIfInstanceTypeIsNotSequentialOneByte(scratch, scratch, &bailout);
 
   // Add (separator length times array_length) - separator length to the
   // string_length to get the length of the result string. array_length is not
@@ -3898,12 +4034,10 @@
   //   separator: Separator string
   //   string_length: Length of result string (not smi)
   //   array_length: Length of the array.
-  __ AllocateAsciiString(result,
-                         string_length,
-                         scratch,
-                         string,  // used as scratch
-                         elements_end,  // used as scratch
-                         &bailout);
+  __ AllocateOneByteString(result, string_length, scratch,
+                           string,        // used as scratch
+                           elements_end,  // used as scratch
+                           &bailout);
   // Prepare for looping. Set up elements_end to end of the array. Set
   // result_pos to the position of the result where to write the first
   // character.
@@ -3937,12 +4071,12 @@
   __ CopyBytes(string, result_pos, string_length, scratch);
   __ cmp(element, elements_end);
   __ b(lt, &empty_separator_loop);  // End while (element < elements_end).
-  ASSERT(result.is(r0));
+  DCHECK(result.is(r0));
   __ b(&done);
 
   // One-character separator case
   __ bind(&one_char_separator);
-  // Replace separator with its ASCII character value.
+  // Replace separator with its one-byte character value.
   __ ldrb(separator, FieldMemOperand(separator, SeqOneByteString::kHeaderSize));
   // Jump into the loop after the code that copies the separator, so the first
   // element is not preceded by a separator
@@ -3953,7 +4087,7 @@
   //   result_pos: the position to which we are currently copying characters.
   //   element: Current array element.
   //   elements_end: Array end.
-  //   separator: Single separator ASCII char (in lower byte).
+  //   separator: Single separator one-byte char (in lower byte).
 
   // Copy the separator character to the result.
   __ strb(separator, MemOperand(result_pos, 1, PostIndex));
@@ -3969,7 +4103,7 @@
   __ CopyBytes(string, result_pos, string_length, scratch);
   __ cmp(element, elements_end);
   __ b(lt, &one_char_separator_loop);  // End while (element < elements_end).
-  ASSERT(result.is(r0));
+  DCHECK(result.is(r0));
   __ b(&done);
 
   // Long separator case (separator is more than one character). Entry is at the
@@ -3999,7 +4133,7 @@
   __ CopyBytes(string, result_pos, string_length, scratch);
   __ cmp(element, elements_end);
   __ b(lt, &long_separator_loop);  // End while (element < elements_end).
-  ASSERT(result.is(r0));
+  DCHECK(result.is(r0));
   __ b(&done);
 
   __ bind(&bailout);
@@ -4009,6 +4143,17 @@
 }
 
 
+void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
+  DCHECK(expr->arguments()->length() == 0);
+  ExternalReference debug_is_active =
+      ExternalReference::debug_is_active_address(isolate());
+  __ mov(ip, Operand(debug_is_active));
+  __ ldrb(r0, MemOperand(ip));
+  __ SmiTag(r0);
+  context()->Plug(r0);
+}
+
+
 void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
   if (expr->function() != NULL &&
       expr->function()->intrinsic_type == Runtime::INLINE) {
@@ -4023,13 +4168,20 @@
 
   if (expr->is_jsruntime()) {
     // Push the builtins object as the receiver.
-    __ ldr(r0, GlobalObjectOperand());
-    __ ldr(r0, FieldMemOperand(r0, GlobalObject::kBuiltinsOffset));
-    __ push(r0);
+    Register receiver = LoadDescriptor::ReceiverRegister();
+    __ ldr(receiver, GlobalObjectOperand());
+    __ ldr(receiver, FieldMemOperand(receiver, GlobalObject::kBuiltinsOffset));
+    __ push(receiver);
 
     // Load the function from the receiver.
-    __ mov(r2, Operand(expr->name()));
-    CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId());
+    __ mov(LoadDescriptor::NameRegister(), Operand(expr->name()));
+    if (FLAG_vector_ics) {
+      __ mov(VectorLoadICDescriptor::SlotRegister(),
+             Operand(Smi::FromInt(expr->CallRuntimeFeedbackSlot())));
+      CallLoadIC(NOT_CONTEXTUAL);
+    } else {
+      CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId());
+    }
 
     // Push the target function under the receiver.
     __ ldr(ip, MemOperand(sp, 0));
@@ -4083,7 +4235,7 @@
         Variable* var = proxy->var();
         // Delete of an unqualified identifier is disallowed in strict mode
         // but "delete this" is allowed.
-        ASSERT(strict_mode() == SLOPPY || var->is_this());
+        DCHECK(strict_mode() == SLOPPY || var->is_this());
         if (var->IsUnallocated()) {
           __ ldr(r2, GlobalObjectOperand());
           __ mov(r1, Operand(var->name()));
@@ -4098,10 +4250,10 @@
         } else {
           // Non-global variable.  Call the runtime to try to delete from the
           // context where the variable was introduced.
-          ASSERT(!context_register().is(r2));
+          DCHECK(!context_register().is(r2));
           __ mov(r2, Operand(var->name()));
           __ Push(context_register(), r2);
-          __ CallRuntime(Runtime::kHiddenDeleteContextSlot, 2);
+          __ CallRuntime(Runtime::kDeleteLookupSlot, 2);
           context()->Plug(r0);
         }
       } else {
@@ -4139,7 +4291,7 @@
         // for control and plugging the control flow into the context,
         // because we need to prepare a pair of extra administrative AST ids
         // for the optimizing compiler.
-        ASSERT(context()->IsAccumulatorValue() || context()->IsStackValue());
+        DCHECK(context()->IsAccumulatorValue() || context()->IsStackValue());
         Label materialize_true, materialize_false, done;
         VisitForControl(expr->expression(),
                         &materialize_false,
@@ -4176,7 +4328,7 @@
 
 
 void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
-  ASSERT(expr->expression()->IsValidReferenceExpression());
+  DCHECK(expr->expression()->IsValidReferenceExpression());
 
   Comment cmnt(masm_, "[ CountOperation");
   SetSourcePosition(expr->position());
@@ -4195,7 +4347,7 @@
 
   // Evaluate expression and get value.
   if (assign_type == VARIABLE) {
-    ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
+    DCHECK(expr->expression()->AsVariableProxy()->var() != NULL);
     AccumulatorValueContext context(this);
     EmitVariableLoad(expr->expression()->AsVariableProxy());
   } else {
@@ -4205,15 +4357,16 @@
       __ push(ip);
     }
     if (assign_type == NAMED_PROPERTY) {
-      // Put the object both on the stack and in the accumulator.
-      VisitForAccumulatorValue(prop->obj());
-      __ push(r0);
+      // Put the object both on the stack and in the register.
+      VisitForStackValue(prop->obj());
+      __ ldr(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
       EmitNamedPropertyLoad(prop);
     } else {
       VisitForStackValue(prop->obj());
-      VisitForAccumulatorValue(prop->key());
-      __ ldr(r1, MemOperand(sp, 0));
-      __ push(r0);
+      VisitForStackValue(prop->key());
+      __ ldr(LoadDescriptor::ReceiverRegister(),
+             MemOperand(sp, 1 * kPointerSize));
+      __ ldr(LoadDescriptor::NameRegister(), MemOperand(sp, 0));
       EmitKeyedPropertyLoad(prop);
     }
   }
@@ -4293,8 +4446,9 @@
   // Record position before stub call.
   SetSourcePosition(expr->position());
 
-  BinaryOpICStub stub(isolate(), Token::ADD, NO_OVERWRITE);
-  CallIC(stub.GetCode(), expr->CountBinOpFeedbackId());
+  Handle<Code> code =
+      CodeFactory::BinaryOpIC(isolate(), Token::ADD, NO_OVERWRITE).code();
+  CallIC(code, expr->CountBinOpFeedbackId());
   patch_site.EmitPatchInfo();
   __ bind(&done);
 
@@ -4321,8 +4475,9 @@
       }
       break;
     case NAMED_PROPERTY: {
-      __ mov(r2, Operand(prop->key()->AsLiteral()->value()));
-      __ pop(r1);
+      __ mov(StoreDescriptor::NameRegister(),
+             Operand(prop->key()->AsLiteral()->value()));
+      __ pop(StoreDescriptor::ReceiverRegister());
       CallStoreIC(expr->CountStoreFeedbackId());
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
       if (expr->is_postfix()) {
@@ -4335,10 +4490,10 @@
       break;
     }
     case KEYED_PROPERTY: {
-      __ Pop(r2, r1);  // r1 = key. r2 = receiver.
-      Handle<Code> ic = strict_mode() == SLOPPY
-          ? isolate()->builtins()->KeyedStoreIC_Initialize()
-          : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+      __ Pop(StoreDescriptor::ReceiverRegister(),
+             StoreDescriptor::NameRegister());
+      Handle<Code> ic =
+          CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
       CallIC(ic, expr->CountStoreFeedbackId());
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
       if (expr->is_postfix()) {
@@ -4355,13 +4510,17 @@
 
 
 void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
-  ASSERT(!context()->IsEffect());
-  ASSERT(!context()->IsTest());
+  DCHECK(!context()->IsEffect());
+  DCHECK(!context()->IsTest());
   VariableProxy* proxy = expr->AsVariableProxy();
   if (proxy != NULL && proxy->var()->IsUnallocated()) {
     Comment cmnt(masm_, "[ Global variable");
-    __ ldr(r0, GlobalObjectOperand());
-    __ mov(r2, Operand(proxy->name()));
+    __ ldr(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+    __ mov(LoadDescriptor::NameRegister(), Operand(proxy->name()));
+    if (FLAG_vector_ics) {
+      __ mov(VectorLoadICDescriptor::SlotRegister(),
+             Operand(Smi::FromInt(proxy->VariableFeedbackSlot())));
+    }
     // Use a regular load, not a contextual load, to avoid a reference
     // error.
     CallLoadIC(NOT_CONTEXTUAL);
@@ -4373,12 +4532,12 @@
 
     // Generate code for loading from variables potentially shadowed
     // by eval-introduced variables.
-    EmitDynamicLookupFastCase(proxy->var(), INSIDE_TYPEOF, &slow, &done);
+    EmitDynamicLookupFastCase(proxy, INSIDE_TYPEOF, &slow, &done);
 
     __ bind(&slow);
     __ mov(r0, Operand(proxy->name()));
     __ Push(cp, r0);
-    __ CallRuntime(Runtime::kHiddenLoadContextSlotNoReferenceError, 2);
+    __ CallRuntime(Runtime::kLoadLookupSlotNoReferenceError, 2);
     PrepareForBailout(expr, TOS_REG);
     __ bind(&done);
 
@@ -4429,10 +4588,6 @@
     __ b(eq, if_true);
     __ CompareRoot(r0, Heap::kFalseValueRootIndex);
     Split(eq, if_true, if_false, fall_through);
-  } else if (FLAG_harmony_typeof &&
-             String::Equals(check, factory->null_string())) {
-    __ CompareRoot(r0, Heap::kNullValueRootIndex);
-    Split(eq, if_true, if_false, fall_through);
   } else if (String::Equals(check, factory->undefined_string())) {
     __ CompareRoot(r0, Heap::kUndefinedValueRootIndex);
     __ b(eq, if_true);
@@ -4452,10 +4607,8 @@
     Split(eq, if_true, if_false, fall_through);
   } else if (String::Equals(check, factory->object_string())) {
     __ JumpIfSmi(r0, if_false);
-    if (!FLAG_harmony_typeof) {
-      __ CompareRoot(r0, Heap::kNullValueRootIndex);
-      __ b(eq, if_true);
-    }
+    __ CompareRoot(r0, Heap::kNullValueRootIndex);
+    __ b(eq, if_true);
     // Check for JS objects => true.
     __ CompareObjectType(r0, r0, r1, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
     __ b(lt, if_false);
@@ -4530,7 +4683,7 @@
 
       // Record position and call the compare IC.
       SetSourcePosition(expr->position());
-      Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+      Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
       CallIC(ic, expr->CompareOperationFeedbackId());
       patch_site.EmitPatchInfo();
       PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
@@ -4591,7 +4744,7 @@
 
 
 void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
-  ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
+  DCHECK_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
   __ str(value, MemOperand(fp, frame_offset));
 }
 
@@ -4616,7 +4769,7 @@
     // code.  Fetch it from the context.
     __ ldr(ip, ContextOperand(cp, Context::CLOSURE_INDEX));
   } else {
-    ASSERT(declaration_scope->is_function_scope());
+    DCHECK(declaration_scope->is_function_scope());
     __ ldr(ip, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
   }
   __ push(ip);
@@ -4627,7 +4780,7 @@
 // Non-local control flow support.
 
 void FullCodeGenerator::EnterFinallyBlock() {
-  ASSERT(!result_register().is(r1));
+  DCHECK(!result_register().is(r1));
   // Store result register while executing finally block.
   __ push(result_register());
   // Cook return address in link register to stack (smi encoded Code* delta)
@@ -4661,7 +4814,7 @@
 
 
 void FullCodeGenerator::ExitFinallyBlock() {
-  ASSERT(!result_register().is(r1));
+  DCHECK(!result_register().is(r1));
   // Restore pending message from stack.
   __ pop(r1);
   ExternalReference pending_message_script =
@@ -4727,12 +4880,41 @@
 static Address GetInterruptImmediateLoadAddress(Address pc) {
   Address load_address = pc - 2 * Assembler::kInstrSize;
   if (!FLAG_enable_ool_constant_pool) {
-    ASSERT(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(load_address)));
-  } else if (Assembler::IsMovT(Memory::int32_at(load_address))) {
+    DCHECK(Assembler::IsLdrPcImmediateOffset(Memory::int32_at(load_address)));
+  } else if (Assembler::IsLdrPpRegOffset(Memory::int32_at(load_address))) {
+    // This is an extended constant pool lookup.
+    if (CpuFeatures::IsSupported(ARMv7)) {
+      load_address -= 2 * Assembler::kInstrSize;
+      DCHECK(Assembler::IsMovW(Memory::int32_at(load_address)));
+      DCHECK(Assembler::IsMovT(
+          Memory::int32_at(load_address + Assembler::kInstrSize)));
+    } else {
+      load_address -= 4 * Assembler::kInstrSize;
+      DCHECK(Assembler::IsMovImmed(Memory::int32_at(load_address)));
+      DCHECK(Assembler::IsOrrImmed(
+          Memory::int32_at(load_address + Assembler::kInstrSize)));
+      DCHECK(Assembler::IsOrrImmed(
+          Memory::int32_at(load_address + 2 * Assembler::kInstrSize)));
+      DCHECK(Assembler::IsOrrImmed(
+          Memory::int32_at(load_address + 3 * Assembler::kInstrSize)));
+    }
+  } else if (CpuFeatures::IsSupported(ARMv7) &&
+             Assembler::IsMovT(Memory::int32_at(load_address))) {
+    // This is a movw / movt immediate load.
     load_address -= Assembler::kInstrSize;
-    ASSERT(Assembler::IsMovW(Memory::int32_at(load_address)));
+    DCHECK(Assembler::IsMovW(Memory::int32_at(load_address)));
+  } else if (!CpuFeatures::IsSupported(ARMv7) &&
+             Assembler::IsOrrImmed(Memory::int32_at(load_address))) {
+    // This is a mov / orr immediate load.
+    load_address -= 3 * Assembler::kInstrSize;
+    DCHECK(Assembler::IsMovImmed(Memory::int32_at(load_address)));
+    DCHECK(Assembler::IsOrrImmed(
+        Memory::int32_at(load_address + Assembler::kInstrSize)));
+    DCHECK(Assembler::IsOrrImmed(
+        Memory::int32_at(load_address + 2 * Assembler::kInstrSize)));
   } else {
-    ASSERT(Assembler::IsLdrPpImmediateOffset(Memory::int32_at(load_address)));
+    // This is a small constant pool lookup.
+    DCHECK(Assembler::IsLdrPpImmediateOffset(Memory::int32_at(load_address)));
   }
   return load_address;
 }
@@ -4742,24 +4924,34 @@
                             Address pc,
                             BackEdgeState target_state,
                             Code* replacement_code) {
-  static const int kInstrSize = Assembler::kInstrSize;
   Address pc_immediate_load_address = GetInterruptImmediateLoadAddress(pc);
-  Address branch_address = pc_immediate_load_address - kInstrSize;
+  Address branch_address = pc_immediate_load_address - Assembler::kInstrSize;
   CodePatcher patcher(branch_address, 1);
   switch (target_state) {
     case INTERRUPT:
     {
       //  <decrement profiling counter>
       //   bpl ok
-      //   ; load interrupt stub address into ip - either of:
-      //   ldr ip, [pc/pp, <constant pool offset>]  |   movw ip, <immed low>
-      //                                            |   movt ip, <immed high>
+      //   ; load interrupt stub address into ip - either of (for ARMv7):
+      //   ; <small cp load>      |  <extended cp load> |  <immediate load>
+      //   ldr ip, [pc/pp, #imm]  |   movw ip, #imm     |   movw ip, #imm
+      //                          |   movt ip, #imm     |   movw ip, #imm
+      //                          |   ldr  ip, [pp, ip]
+      //   ; or (for ARMv6):
+      //   ; <small cp load>      |  <extended cp load> |  <immediate load>
+      //   ldr ip, [pc/pp, #imm]  |   mov ip, #imm      |   mov ip, #imm
+      //                          |   orr ip, ip, #imm> |   orr ip, ip, #imm
+      //                          |   orr ip, ip, #imm> |   orr ip, ip, #imm
+      //                          |   orr ip, ip, #imm> |   orr ip, ip, #imm
       //   blx ip
+      //  <reset profiling counter>
       //  ok-label
 
-      // Calculate branch offet to the ok-label - this is the difference between
-      // the branch address and |pc| (which points at <blx ip>) plus one instr.
-      int branch_offset = pc + kInstrSize - branch_address;
+      // Calculate branch offset to the ok-label - this is the difference
+      // between the branch address and |pc| (which points at <blx ip>) plus
+      // kProfileCounterResetSequence instructions
+      int branch_offset = pc - Instruction::kPCReadOffset - branch_address +
+                          kProfileCounterResetSequenceLength;
       patcher.masm()->b(branch_offset, pl);
       break;
     }
@@ -4767,10 +4959,19 @@
     case OSR_AFTER_STACK_CHECK:
       //  <decrement profiling counter>
       //   mov r0, r0 (NOP)
-      //   ; load on-stack replacement address into ip - either of:
-      //   ldr ip, [pc/pp, <constant pool offset>]  |   movw ip, <immed low>
-      //                                            |   movt ip, <immed high>
+      //   ; load on-stack replacement address into ip - either of (for ARMv7):
+      //   ; <small cp load>      |  <extended cp load> |  <immediate load>
+      //   ldr ip, [pc/pp, #imm]  |   movw ip, #imm     |   movw ip, #imm
+      //                          |   movt ip, #imm>    |   movw ip, #imm
+      //                          |   ldr  ip, [pp, ip]
+      //   ; or (for ARMv6):
+      //   ; <small cp load>      |  <extended cp load> |  <immediate load>
+      //   ldr ip, [pc/pp, #imm]  |   mov ip, #imm      |   mov ip, #imm
+      //                          |   orr ip, ip, #imm> |   orr ip, ip, #imm
+      //                          |   orr ip, ip, #imm> |   orr ip, ip, #imm
+      //                          |   orr ip, ip, #imm> |   orr ip, ip, #imm
       //   blx ip
+      //  <reset profiling counter>
       //  ok-label
       patcher.masm()->nop();
       break;
@@ -4789,28 +4990,27 @@
     Isolate* isolate,
     Code* unoptimized_code,
     Address pc) {
-  static const int kInstrSize = Assembler::kInstrSize;
-  ASSERT(Memory::int32_at(pc - kInstrSize) == kBlxIp);
+  DCHECK(Assembler::IsBlxIp(Memory::int32_at(pc - Assembler::kInstrSize)));
 
   Address pc_immediate_load_address = GetInterruptImmediateLoadAddress(pc);
-  Address branch_address = pc_immediate_load_address - kInstrSize;
+  Address branch_address = pc_immediate_load_address - Assembler::kInstrSize;
   Address interrupt_address = Assembler::target_address_at(
       pc_immediate_load_address, unoptimized_code);
 
   if (Assembler::IsBranch(Assembler::instr_at(branch_address))) {
-    ASSERT(interrupt_address ==
+    DCHECK(interrupt_address ==
            isolate->builtins()->InterruptCheck()->entry());
     return INTERRUPT;
   }
 
-  ASSERT(Assembler::IsNop(Assembler::instr_at(branch_address)));
+  DCHECK(Assembler::IsNop(Assembler::instr_at(branch_address)));
 
   if (interrupt_address ==
       isolate->builtins()->OnStackReplacement()->entry()) {
     return ON_STACK_REPLACEMENT;
   }
 
-  ASSERT(interrupt_address ==
+  DCHECK(interrupt_address ==
          isolate->builtins()->OsrAfterStackCheck()->entry());
   return OSR_AFTER_STACK_CHECK;
 }
diff --git a/src/arm/ic-arm.cc b/src/arm/ic-arm.cc
deleted file mode 100644
index 1028f8f..0000000
--- a/src/arm/ic-arm.cc
+++ /dev/null
@@ -1,1321 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#if V8_TARGET_ARCH_ARM
-
-#include "src/arm/assembler-arm.h"
-#include "src/code-stubs.h"
-#include "src/codegen.h"
-#include "src/disasm.h"
-#include "src/ic-inl.h"
-#include "src/runtime.h"
-#include "src/stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-
-// ----------------------------------------------------------------------------
-// Static IC stub generators.
-//
-
-#define __ ACCESS_MASM(masm)
-
-
-static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
-                                            Register type,
-                                            Label* global_object) {
-  // Register usage:
-  //   type: holds the receiver instance type on entry.
-  __ cmp(type, Operand(JS_GLOBAL_OBJECT_TYPE));
-  __ b(eq, global_object);
-  __ cmp(type, Operand(JS_BUILTINS_OBJECT_TYPE));
-  __ b(eq, global_object);
-  __ cmp(type, Operand(JS_GLOBAL_PROXY_TYPE));
-  __ b(eq, global_object);
-}
-
-
-// Generated code falls through if the receiver is a regular non-global
-// JS object with slow properties and no interceptors.
-static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm,
-                                                Register receiver,
-                                                Register elements,
-                                                Register t0,
-                                                Register t1,
-                                                Label* miss) {
-  // Register usage:
-  //   receiver: holds the receiver on entry and is unchanged.
-  //   elements: holds the property dictionary on fall through.
-  // Scratch registers:
-  //   t0: used to holds the receiver map.
-  //   t1: used to holds the receiver instance type, receiver bit mask and
-  //       elements map.
-
-  // Check that the receiver isn't a smi.
-  __ JumpIfSmi(receiver, miss);
-
-  // Check that the receiver is a valid JS object.
-  __ CompareObjectType(receiver, t0, t1, FIRST_SPEC_OBJECT_TYPE);
-  __ b(lt, miss);
-
-  // If this assert fails, we have to check upper bound too.
-  STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
-
-  GenerateGlobalInstanceTypeCheck(masm, t1, miss);
-
-  // Check that the global object does not require access checks.
-  __ ldrb(t1, FieldMemOperand(t0, Map::kBitFieldOffset));
-  __ tst(t1, Operand((1 << Map::kIsAccessCheckNeeded) |
-                     (1 << Map::kHasNamedInterceptor)));
-  __ b(ne, miss);
-
-  __ ldr(elements, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-  __ ldr(t1, FieldMemOperand(elements, HeapObject::kMapOffset));
-  __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
-  __ cmp(t1, ip);
-  __ b(ne, miss);
-}
-
-
-// Helper function used from LoadIC GenerateNormal.
-//
-// elements: Property dictionary. It is not clobbered if a jump to the miss
-//           label is done.
-// name:     Property name. It is not clobbered if a jump to the miss label is
-//           done
-// result:   Register for the result. It is only updated if a jump to the miss
-//           label is not done. Can be the same as elements or name clobbering
-//           one of these in the case of not jumping to the miss label.
-// The two scratch registers need to be different from elements, name and
-// result.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-static void GenerateDictionaryLoad(MacroAssembler* masm,
-                                   Label* miss,
-                                   Register elements,
-                                   Register name,
-                                   Register result,
-                                   Register scratch1,
-                                   Register scratch2) {
-  // Main use of the scratch registers.
-  // scratch1: Used as temporary and to hold the capacity of the property
-  //           dictionary.
-  // scratch2: Used as temporary.
-  Label done;
-
-  // Probe the dictionary.
-  NameDictionaryLookupStub::GeneratePositiveLookup(masm,
-                                                   miss,
-                                                   &done,
-                                                   elements,
-                                                   name,
-                                                   scratch1,
-                                                   scratch2);
-
-  // If probing finds an entry check that the value is a normal
-  // property.
-  __ bind(&done);  // scratch2 == elements + 4 * index
-  const int kElementsStartOffset = NameDictionary::kHeaderSize +
-      NameDictionary::kElementsStartIndex * kPointerSize;
-  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
-  __ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
-  __ tst(scratch1, Operand(PropertyDetails::TypeField::kMask << kSmiTagSize));
-  __ b(ne, miss);
-
-  // Get the value at the masked, scaled index and return.
-  __ ldr(result,
-         FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
-}
-
-
-// Helper function used from StoreIC::GenerateNormal.
-//
-// elements: Property dictionary. It is not clobbered if a jump to the miss
-//           label is done.
-// name:     Property name. It is not clobbered if a jump to the miss label is
-//           done
-// value:    The value to store.
-// The two scratch registers need to be different from elements, name and
-// result.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-static void GenerateDictionaryStore(MacroAssembler* masm,
-                                    Label* miss,
-                                    Register elements,
-                                    Register name,
-                                    Register value,
-                                    Register scratch1,
-                                    Register scratch2) {
-  // Main use of the scratch registers.
-  // scratch1: Used as temporary and to hold the capacity of the property
-  //           dictionary.
-  // scratch2: Used as temporary.
-  Label done;
-
-  // Probe the dictionary.
-  NameDictionaryLookupStub::GeneratePositiveLookup(masm,
-                                                   miss,
-                                                   &done,
-                                                   elements,
-                                                   name,
-                                                   scratch1,
-                                                   scratch2);
-
-  // If probing finds an entry in the dictionary check that the value
-  // is a normal property that is not read only.
-  __ bind(&done);  // scratch2 == elements + 4 * index
-  const int kElementsStartOffset = NameDictionary::kHeaderSize +
-      NameDictionary::kElementsStartIndex * kPointerSize;
-  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
-  const int kTypeAndReadOnlyMask =
-      (PropertyDetails::TypeField::kMask |
-       PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
-  __ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
-  __ tst(scratch1, Operand(kTypeAndReadOnlyMask));
-  __ b(ne, miss);
-
-  // Store the value at the masked, scaled index and return.
-  const int kValueOffset = kElementsStartOffset + kPointerSize;
-  __ add(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag));
-  __ str(value, MemOperand(scratch2));
-
-  // Update the write barrier. Make sure not to clobber the value.
-  __ mov(scratch1, value);
-  __ RecordWrite(
-      elements, scratch2, scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs);
-}
-
-
-// Checks the receiver for special cases (value type, slow case bits).
-// Falls through for regular JS object.
-static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
-                                           Register receiver,
-                                           Register map,
-                                           Register scratch,
-                                           int interceptor_bit,
-                                           Label* slow) {
-  // Check that the object isn't a smi.
-  __ JumpIfSmi(receiver, slow);
-  // Get the map of the receiver.
-  __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  // Check bit field.
-  __ ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
-  __ tst(scratch,
-         Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
-  __ b(ne, slow);
-  // Check that the object is some kind of JS object EXCEPT JS Value type.
-  // In the case that the object is a value-wrapper object,
-  // we enter the runtime system to make sure that indexing into string
-  // objects work as intended.
-  ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
-  __ ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
-  __ cmp(scratch, Operand(JS_OBJECT_TYPE));
-  __ b(lt, slow);
-}
-
-
-// Loads an indexed element from a fast case array.
-// If not_fast_array is NULL, doesn't perform the elements map check.
-static void GenerateFastArrayLoad(MacroAssembler* masm,
-                                  Register receiver,
-                                  Register key,
-                                  Register elements,
-                                  Register scratch1,
-                                  Register scratch2,
-                                  Register result,
-                                  Label* not_fast_array,
-                                  Label* out_of_range) {
-  // Register use:
-  //
-  // receiver - holds the receiver on entry.
-  //            Unchanged unless 'result' is the same register.
-  //
-  // key      - holds the smi key on entry.
-  //            Unchanged unless 'result' is the same register.
-  //
-  // elements - holds the elements of the receiver on exit.
-  //
-  // result   - holds the result on exit if the load succeeded.
-  //            Allowed to be the the same as 'receiver' or 'key'.
-  //            Unchanged on bailout so 'receiver' and 'key' can be safely
-  //            used by further computation.
-  //
-  // Scratch registers:
-  //
-  // scratch1 - used to hold elements map and elements length.
-  //            Holds the elements map if not_fast_array branch is taken.
-  //
-  // scratch2 - used to hold the loaded value.
-
-  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  if (not_fast_array != NULL) {
-    // Check that the object is in fast mode and writable.
-    __ ldr(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
-    __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
-    __ cmp(scratch1, ip);
-    __ b(ne, not_fast_array);
-  } else {
-    __ AssertFastElements(elements);
-  }
-  // Check that the key (index) is within bounds.
-  __ ldr(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
-  __ cmp(key, Operand(scratch1));
-  __ b(hs, out_of_range);
-  // Fast case: Do the load.
-  __ add(scratch1, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ ldr(scratch2, MemOperand::PointerAddressFromSmiKey(scratch1, key));
-  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
-  __ cmp(scratch2, ip);
-  // In case the loaded value is the_hole we have to consult GetProperty
-  // to ensure the prototype chain is searched.
-  __ b(eq, out_of_range);
-  __ mov(result, scratch2);
-}
-
-
-// Checks whether a key is an array index string or a unique name.
-// Falls through if a key is a unique name.
-static void GenerateKeyNameCheck(MacroAssembler* masm,
-                                 Register key,
-                                 Register map,
-                                 Register hash,
-                                 Label* index_string,
-                                 Label* not_unique) {
-  // The key is not a smi.
-  Label unique;
-  // Is it a name?
-  __ CompareObjectType(key, map, hash, LAST_UNIQUE_NAME_TYPE);
-  __ b(hi, not_unique);
-  STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
-  __ b(eq, &unique);
-
-  // Is the string an array index, with cached numeric value?
-  __ ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset));
-  __ tst(hash, Operand(Name::kContainsCachedArrayIndexMask));
-  __ b(eq, index_string);
-
-  // Is the string internalized? We know it's a string, so a single
-  // bit test is enough.
-  // map: key map
-  __ ldrb(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
-  STATIC_ASSERT(kInternalizedTag == 0);
-  __ tst(hash, Operand(kIsNotInternalizedMask));
-  __ b(ne, not_unique);
-
-  __ bind(&unique);
-}
-
-
-void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- r2    : name
-  //  -- lr    : return address
-  //  -- r0    : receiver
-  // -----------------------------------
-
-  // Probe the stub cache.
-  Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC);
-  masm->isolate()->stub_cache()->GenerateProbe(
-      masm, flags, r0, r2, r3, r4, r5, r6);
-
-  // Cache miss: Jump to runtime.
-  GenerateMiss(masm);
-}
-
-
-void LoadIC::GenerateNormal(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- r2    : name
-  //  -- lr    : return address
-  //  -- r0    : receiver
-  // -----------------------------------
-  Label miss, slow;
-
-  GenerateNameDictionaryReceiverCheck(masm, r0, r1, r3, r4, &miss);
-
-  // r1: elements
-  GenerateDictionaryLoad(masm, &slow, r1, r2, r0, r3, r4);
-  __ Ret();
-
-  // Dictionary load failed, go slow (but don't miss).
-  __ bind(&slow);
-  GenerateRuntimeGetProperty(masm);
-
-  // Cache miss: Jump to runtime.
-  __ bind(&miss);
-  GenerateMiss(masm);
-}
-
-
-void LoadIC::GenerateMiss(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- r2    : name
-  //  -- lr    : return address
-  //  -- r0    : receiver
-  // -----------------------------------
-  Isolate* isolate = masm->isolate();
-
-  __ IncrementCounter(isolate->counters()->load_miss(), 1, r3, r4);
-
-  __ mov(r3, r0);
-  __ Push(r3, r2);
-
-  // Perform tail call to the entry.
-  ExternalReference ref =
-      ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
-  __ TailCallExternalReference(ref, 2, 1);
-}
-
-
-void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
-  // ---------- S t a t e --------------
-  //  -- r2    : name
-  //  -- lr    : return address
-  //  -- r0    : receiver
-  // -----------------------------------
-
-  __ mov(r3, r0);
-  __ Push(r3, r2);
-
-  __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
-}
-
-
-static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
-                                                Register object,
-                                                Register key,
-                                                Register scratch1,
-                                                Register scratch2,
-                                                Register scratch3,
-                                                Label* unmapped_case,
-                                                Label* slow_case) {
-  Heap* heap = masm->isolate()->heap();
-
-  // Check that the receiver is a JSObject. Because of the map check
-  // later, we do not need to check for interceptors or whether it
-  // requires access checks.
-  __ JumpIfSmi(object, slow_case);
-  // Check that the object is some kind of JSObject.
-  __ CompareObjectType(object, scratch1, scratch2, FIRST_JS_RECEIVER_TYPE);
-  __ b(lt, slow_case);
-
-  // Check that the key is a positive smi.
-  __ tst(key, Operand(0x80000001));
-  __ b(ne, slow_case);
-
-  // Load the elements into scratch1 and check its map.
-  Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
-  __ ldr(scratch1, FieldMemOperand(object, JSObject::kElementsOffset));
-  __ CheckMap(scratch1, scratch2, arguments_map, slow_case, DONT_DO_SMI_CHECK);
-
-  // Check if element is in the range of mapped arguments. If not, jump
-  // to the unmapped lookup with the parameter map in scratch1.
-  __ ldr(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
-  __ sub(scratch2, scratch2, Operand(Smi::FromInt(2)));
-  __ cmp(key, Operand(scratch2));
-  __ b(cs, unmapped_case);
-
-  // Load element index and check whether it is the hole.
-  const int kOffset =
-      FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag;
-
-  __ mov(scratch3, Operand(kPointerSize >> 1));
-  __ mul(scratch3, key, scratch3);
-  __ add(scratch3, scratch3, Operand(kOffset));
-
-  __ ldr(scratch2, MemOperand(scratch1, scratch3));
-  __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
-  __ cmp(scratch2, scratch3);
-  __ b(eq, unmapped_case);
-
-  // Load value from context and return it. We can reuse scratch1 because
-  // we do not jump to the unmapped lookup (which requires the parameter
-  // map in scratch1).
-  __ ldr(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
-  __ mov(scratch3, Operand(kPointerSize >> 1));
-  __ mul(scratch3, scratch2, scratch3);
-  __ add(scratch3, scratch3, Operand(Context::kHeaderSize - kHeapObjectTag));
-  return MemOperand(scratch1, scratch3);
-}
-
-
-static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
-                                                  Register key,
-                                                  Register parameter_map,
-                                                  Register scratch,
-                                                  Label* slow_case) {
-  // Element is in arguments backing store, which is referenced by the
-  // second element of the parameter_map. The parameter_map register
-  // must be loaded with the parameter map of the arguments object and is
-  // overwritten.
-  const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
-  Register backing_store = parameter_map;
-  __ ldr(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
-  Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
-  __ CheckMap(backing_store, scratch, fixed_array_map, slow_case,
-              DONT_DO_SMI_CHECK);
-  __ ldr(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
-  __ cmp(key, Operand(scratch));
-  __ b(cs, slow_case);
-  __ mov(scratch, Operand(kPointerSize >> 1));
-  __ mul(scratch, key, scratch);
-  __ add(scratch,
-         scratch,
-         Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  return MemOperand(backing_store, scratch);
-}
-
-
-void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
-  // ---------- S t a t e --------------
-  //  -- lr     : return address
-  //  -- r0     : key
-  //  -- r1     : receiver
-  // -----------------------------------
-  Label slow, notin;
-  MemOperand mapped_location =
-      GenerateMappedArgumentsLookup(masm, r1, r0, r2, r3, r4, &notin, &slow);
-  __ ldr(r0, mapped_location);
-  __ Ret();
-  __ bind(&notin);
-  // The unmapped lookup expects that the parameter map is in r2.
-  MemOperand unmapped_location =
-      GenerateUnmappedArgumentsLookup(masm, r0, r2, r3, &slow);
-  __ ldr(r2, unmapped_location);
-  __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
-  __ cmp(r2, r3);
-  __ b(eq, &slow);
-  __ mov(r0, r2);
-  __ Ret();
-  __ bind(&slow);
-  GenerateMiss(masm);
-}
-
-
-void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
-  // ---------- S t a t e --------------
-  //  -- r0     : value
-  //  -- r1     : key
-  //  -- r2     : receiver
-  //  -- lr     : return address
-  // -----------------------------------
-  Label slow, notin;
-  MemOperand mapped_location =
-      GenerateMappedArgumentsLookup(masm, r2, r1, r3, r4, r5, &notin, &slow);
-  __ str(r0, mapped_location);
-  __ add(r6, r3, r5);
-  __ mov(r9, r0);
-  __ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
-  __ Ret();
-  __ bind(&notin);
-  // The unmapped lookup expects that the parameter map is in r3.
-  MemOperand unmapped_location =
-      GenerateUnmappedArgumentsLookup(masm, r1, r3, r4, &slow);
-  __ str(r0, unmapped_location);
-  __ add(r6, r3, r4);
-  __ mov(r9, r0);
-  __ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
-  __ Ret();
-  __ bind(&slow);
-  GenerateMiss(masm);
-}
-
-
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
-  // ---------- S t a t e --------------
-  //  -- lr     : return address
-  //  -- r0     : key
-  //  -- r1     : receiver
-  // -----------------------------------
-  Isolate* isolate = masm->isolate();
-
-  __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, r3, r4);
-
-  __ Push(r1, r0);
-
-  // Perform tail call to the entry.
-  ExternalReference ref =
-      ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
-
-  __ TailCallExternalReference(ref, 2, 1);
-}
-
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
-  // ---------- S t a t e --------------
-  //  -- lr     : return address
-  //  -- r0     : key
-  //  -- r1     : receiver
-  // -----------------------------------
-
-  __ Push(r1, r0);
-
-  __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
-}
-
-
-void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
-  // ---------- S t a t e --------------
-  //  -- lr     : return address
-  //  -- r0     : key
-  //  -- r1     : receiver
-  // -----------------------------------
-  Label slow, check_name, index_smi, index_name, property_array_property;
-  Label probe_dictionary, check_number_dictionary;
-
-  Register key = r0;
-  Register receiver = r1;
-
-  Isolate* isolate = masm->isolate();
-
-  // Check that the key is a smi.
-  __ JumpIfNotSmi(key, &check_name);
-  __ bind(&index_smi);
-  // Now the key is known to be a smi. This place is also jumped to from below
-  // where a numeric string is converted to a smi.
-
-  GenerateKeyedLoadReceiverCheck(
-      masm, receiver, r2, r3, Map::kHasIndexedInterceptor, &slow);
-
-  // Check the receiver's map to see if it has fast elements.
-  __ CheckFastElements(r2, r3, &check_number_dictionary);
-
-  GenerateFastArrayLoad(
-      masm, receiver, key, r4, r3, r2, r0, NULL, &slow);
-  __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, r2, r3);
-  __ Ret();
-
-  __ bind(&check_number_dictionary);
-  __ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ ldr(r3, FieldMemOperand(r4, JSObject::kMapOffset));
-
-  // Check whether the elements is a number dictionary.
-  // r0: key
-  // r3: elements map
-  // r4: elements
-  __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
-  __ cmp(r3, ip);
-  __ b(ne, &slow);
-  __ SmiUntag(r2, r0);
-  __ LoadFromNumberDictionary(&slow, r4, r0, r0, r2, r3, r5);
-  __ Ret();
-
-  // Slow case, key and receiver still in r0 and r1.
-  __ bind(&slow);
-  __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(),
-                      1, r2, r3);
-  GenerateRuntimeGetProperty(masm);
-
-  __ bind(&check_name);
-  GenerateKeyNameCheck(masm, key, r2, r3, &index_name, &slow);
-
-  GenerateKeyedLoadReceiverCheck(
-      masm, receiver, r2, r3, Map::kHasNamedInterceptor, &slow);
-
-  // If the receiver is a fast-case object, check the keyed lookup
-  // cache. Otherwise probe the dictionary.
-  __ ldr(r3, FieldMemOperand(r1, JSObject::kPropertiesOffset));
-  __ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
-  __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
-  __ cmp(r4, ip);
-  __ b(eq, &probe_dictionary);
-
-  // Load the map of the receiver, compute the keyed lookup cache hash
-  // based on 32 bits of the map pointer and the name hash.
-  __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
-  __ mov(r3, Operand(r2, ASR, KeyedLookupCache::kMapHashShift));
-  __ ldr(r4, FieldMemOperand(r0, Name::kHashFieldOffset));
-  __ eor(r3, r3, Operand(r4, ASR, Name::kHashShift));
-  int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask;
-  __ And(r3, r3, Operand(mask));
-
-  // Load the key (consisting of map and unique name) from the cache and
-  // check for match.
-  Label load_in_object_property;
-  static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
-  Label hit_on_nth_entry[kEntriesPerBucket];
-  ExternalReference cache_keys =
-      ExternalReference::keyed_lookup_cache_keys(isolate);
-
-  __ mov(r4, Operand(cache_keys));
-  __ add(r4, r4, Operand(r3, LSL, kPointerSizeLog2 + 1));
-
-  for (int i = 0; i < kEntriesPerBucket - 1; i++) {
-    Label try_next_entry;
-    // Load map and move r4 to next entry.
-    __ ldr(r5, MemOperand(r4, kPointerSize * 2, PostIndex));
-    __ cmp(r2, r5);
-    __ b(ne, &try_next_entry);
-    __ ldr(r5, MemOperand(r4, -kPointerSize));  // Load name
-    __ cmp(r0, r5);
-    __ b(eq, &hit_on_nth_entry[i]);
-    __ bind(&try_next_entry);
-  }
-
-  // Last entry: Load map and move r4 to name.
-  __ ldr(r5, MemOperand(r4, kPointerSize, PostIndex));
-  __ cmp(r2, r5);
-  __ b(ne, &slow);
-  __ ldr(r5, MemOperand(r4));
-  __ cmp(r0, r5);
-  __ b(ne, &slow);
-
-  // Get field offset.
-  // r0     : key
-  // r1     : receiver
-  // r2     : receiver's map
-  // r3     : lookup cache index
-  ExternalReference cache_field_offsets =
-      ExternalReference::keyed_lookup_cache_field_offsets(isolate);
-
-  // Hit on nth entry.
-  for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
-    __ bind(&hit_on_nth_entry[i]);
-    __ mov(r4, Operand(cache_field_offsets));
-    if (i != 0) {
-      __ add(r3, r3, Operand(i));
-    }
-    __ ldr(r5, MemOperand(r4, r3, LSL, kPointerSizeLog2));
-    __ ldrb(r6, FieldMemOperand(r2, Map::kInObjectPropertiesOffset));
-    __ sub(r5, r5, r6, SetCC);
-    __ b(ge, &property_array_property);
-    if (i != 0) {
-      __ jmp(&load_in_object_property);
-    }
-  }
-
-  // Load in-object property.
-  __ bind(&load_in_object_property);
-  __ ldrb(r6, FieldMemOperand(r2, Map::kInstanceSizeOffset));
-  __ add(r6, r6, r5);  // Index from start of object.
-  __ sub(r1, r1, Operand(kHeapObjectTag));  // Remove the heap tag.
-  __ ldr(r0, MemOperand(r1, r6, LSL, kPointerSizeLog2));
-  __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
-                      1, r2, r3);
-  __ Ret();
-
-  // Load property array property.
-  __ bind(&property_array_property);
-  __ ldr(r1, FieldMemOperand(r1, JSObject::kPropertiesOffset));
-  __ add(r1, r1, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ ldr(r0, MemOperand(r1, r5, LSL, kPointerSizeLog2));
-  __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
-                      1, r2, r3);
-  __ Ret();
-
-  // Do a quick inline probe of the receiver's dictionary, if it
-  // exists.
-  __ bind(&probe_dictionary);
-  // r1: receiver
-  // r0: key
-  // r3: elements
-  __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
-  __ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
-  GenerateGlobalInstanceTypeCheck(masm, r2, &slow);
-  // Load the property to r0.
-  GenerateDictionaryLoad(masm, &slow, r3, r0, r0, r2, r4);
-  __ IncrementCounter(
-      isolate->counters()->keyed_load_generic_symbol(), 1, r2, r3);
-  __ Ret();
-
-  __ bind(&index_name);
-  __ IndexFromHash(r3, key);
-  // Now jump to the place where smi keys are handled.
-  __ jmp(&index_smi);
-}
-
-
-void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
-  // ---------- S t a t e --------------
-  //  -- lr     : return address
-  //  -- r0     : key (index)
-  //  -- r1     : receiver
-  // -----------------------------------
-  Label miss;
-
-  Register receiver = r1;
-  Register index = r0;
-  Register scratch = r3;
-  Register result = r0;
-
-  StringCharAtGenerator char_at_generator(receiver,
-                                          index,
-                                          scratch,
-                                          result,
-                                          &miss,  // When not a string.
-                                          &miss,  // When not a number.
-                                          &miss,  // When index out of range.
-                                          STRING_INDEX_IS_ARRAY_INDEX);
-  char_at_generator.GenerateFast(masm);
-  __ Ret();
-
-  StubRuntimeCallHelper call_helper;
-  char_at_generator.GenerateSlow(masm, call_helper);
-
-  __ bind(&miss);
-  GenerateMiss(masm);
-}
-
-
-void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
-  // ---------- S t a t e --------------
-  //  -- lr     : return address
-  //  -- r0     : key
-  //  -- r1     : receiver
-  // -----------------------------------
-  Label slow;
-
-  // Check that the receiver isn't a smi.
-  __ JumpIfSmi(r1, &slow);
-
-  // Check that the key is an array index, that is Uint32.
-  __ NonNegativeSmiTst(r0);
-  __ b(ne, &slow);
-
-  // Get the map of the receiver.
-  __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
-
-  // Check that it has indexed interceptor and access checks
-  // are not enabled for this object.
-  __ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset));
-  __ and_(r3, r3, Operand(kSlowCaseBitFieldMask));
-  __ cmp(r3, Operand(1 << Map::kHasIndexedInterceptor));
-  __ b(ne, &slow);
-
-  // Everything is fine, call runtime.
-  __ Push(r1, r0);  // Receiver, key.
-
-  // Perform tail call to the entry.
-  __ TailCallExternalReference(
-      ExternalReference(IC_Utility(kKeyedLoadPropertyWithInterceptor),
-                        masm->isolate()),
-      2,
-      1);
-
-  __ bind(&slow);
-  GenerateMiss(masm);
-}
-
-
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
-  // ---------- S t a t e --------------
-  //  -- r0     : value
-  //  -- r1     : key
-  //  -- r2     : receiver
-  //  -- lr     : return address
-  // -----------------------------------
-
-  // Push receiver, key and value for runtime call.
-  __ Push(r2, r1, r0);
-
-  ExternalReference ref =
-      ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
-  __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void StoreIC::GenerateSlow(MacroAssembler* masm) {
-  // ---------- S t a t e --------------
-  //  -- r0     : value
-  //  -- r2     : key
-  //  -- r1     : receiver
-  //  -- lr     : return address
-  // -----------------------------------
-
-  // Push receiver, key and value for runtime call.
-  __ Push(r1, r2, r0);
-
-  // The slow case calls into the runtime to complete the store without causing
-  // an IC miss that would otherwise cause a transition to the generic stub.
-  ExternalReference ref =
-      ExternalReference(IC_Utility(kStoreIC_Slow), masm->isolate());
-  __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
-  // ---------- S t a t e --------------
-  //  -- r0     : value
-  //  -- r1     : key
-  //  -- r2     : receiver
-  //  -- lr     : return address
-  // -----------------------------------
-
-  // Push receiver, key and value for runtime call.
-  __ Push(r2, r1, r0);
-
-  // The slow case calls into the runtime to complete the store without causing
-  // an IC miss that would otherwise cause a transition to the generic stub.
-  ExternalReference ref =
-      ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
-  __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
-                                              StrictMode strict_mode) {
-  // ---------- S t a t e --------------
-  //  -- r0     : value
-  //  -- r1     : key
-  //  -- r2     : receiver
-  //  -- lr     : return address
-  // -----------------------------------
-
-  // Push receiver, key and value for runtime call.
-  __ Push(r2, r1, r0);
-
-  __ mov(r1, Operand(Smi::FromInt(NONE)));          // PropertyAttributes
-  __ mov(r0, Operand(Smi::FromInt(strict_mode)));   // Strict mode.
-  __ Push(r1, r0);
-
-  __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
-}
-
-
-static void KeyedStoreGenerateGenericHelper(
-    MacroAssembler* masm,
-    Label* fast_object,
-    Label* fast_double,
-    Label* slow,
-    KeyedStoreCheckMap check_map,
-    KeyedStoreIncrementLength increment_length,
-    Register value,
-    Register key,
-    Register receiver,
-    Register receiver_map,
-    Register elements_map,
-    Register elements) {
-  Label transition_smi_elements;
-  Label finish_object_store, non_double_value, transition_double_elements;
-  Label fast_double_without_map_check;
-
-  // Fast case: Do the store, could be either Object or double.
-  __ bind(fast_object);
-  Register scratch_value = r4;
-  Register address = r5;
-  if (check_map == kCheckMap) {
-    __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
-    __ cmp(elements_map,
-           Operand(masm->isolate()->factory()->fixed_array_map()));
-    __ b(ne, fast_double);
-  }
-
-  // HOLECHECK: guards "A[i] = V"
-  // We have to go to the runtime if the current value is the hole because
-  // there may be a callback on the element
-  Label holecheck_passed1;
-  __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ ldr(scratch_value,
-         MemOperand::PointerAddressFromSmiKey(address, key, PreIndex));
-  __ cmp(scratch_value, Operand(masm->isolate()->factory()->the_hole_value()));
-  __ b(ne, &holecheck_passed1);
-  __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
-                                      slow);
-
-  __ bind(&holecheck_passed1);
-
-  // Smi stores don't require further checks.
-  Label non_smi_value;
-  __ JumpIfNotSmi(value, &non_smi_value);
-
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ add(scratch_value, key, Operand(Smi::FromInt(1)));
-    __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
-  }
-  // It's irrelevant whether array is smi-only or not when writing a smi.
-  __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ str(value, MemOperand::PointerAddressFromSmiKey(address, key));
-  __ Ret();
-
-  __ bind(&non_smi_value);
-  // Escape to elements kind transition case.
-  __ CheckFastObjectElements(receiver_map, scratch_value,
-                             &transition_smi_elements);
-
-  // Fast elements array, store the value to the elements backing store.
-  __ bind(&finish_object_store);
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ add(scratch_value, key, Operand(Smi::FromInt(1)));
-    __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
-  }
-  __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ add(address, address, Operand::PointerOffsetFromSmiKey(key));
-  __ str(value, MemOperand(address));
-  // Update write barrier for the elements array address.
-  __ mov(scratch_value, value);  // Preserve the value which is returned.
-  __ RecordWrite(elements,
-                 address,
-                 scratch_value,
-                 kLRHasNotBeenSaved,
-                 kDontSaveFPRegs,
-                 EMIT_REMEMBERED_SET,
-                 OMIT_SMI_CHECK);
-  __ Ret();
-
-  __ bind(fast_double);
-  if (check_map == kCheckMap) {
-    // Check for fast double array case. If this fails, call through to the
-    // runtime.
-    __ CompareRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex);
-    __ b(ne, slow);
-  }
-
-  // HOLECHECK: guards "A[i] double hole?"
-  // We have to see if the double version of the hole is present. If so
-  // go to the runtime.
-  __ add(address, elements,
-         Operand((FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32))
-                 - kHeapObjectTag));
-  __ ldr(scratch_value,
-         MemOperand(address, key, LSL, kPointerSizeLog2, PreIndex));
-  __ cmp(scratch_value, Operand(kHoleNanUpper32));
-  __ b(ne, &fast_double_without_map_check);
-  __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
-                                      slow);
-
-  __ bind(&fast_double_without_map_check);
-  __ StoreNumberToDoubleElements(value, key, elements, r3, d0,
-                                 &transition_double_elements);
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ add(scratch_value, key, Operand(Smi::FromInt(1)));
-    __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
-  }
-  __ Ret();
-
-  __ bind(&transition_smi_elements);
-  // Transition the array appropriately depending on the value type.
-  __ ldr(r4, FieldMemOperand(value, HeapObject::kMapOffset));
-  __ CompareRoot(r4, Heap::kHeapNumberMapRootIndex);
-  __ b(ne, &non_double_value);
-
-  // Value is a double. Transition FAST_SMI_ELEMENTS ->
-  // FAST_DOUBLE_ELEMENTS and complete the store.
-  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
-                                         FAST_DOUBLE_ELEMENTS,
-                                         receiver_map,
-                                         r4,
-                                         slow);
-  ASSERT(receiver_map.is(r3));  // Transition code expects map in r3
-  AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
-                                                    FAST_DOUBLE_ELEMENTS);
-  ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow);
-  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ jmp(&fast_double_without_map_check);
-
-  __ bind(&non_double_value);
-  // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
-  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
-                                         FAST_ELEMENTS,
-                                         receiver_map,
-                                         r4,
-                                         slow);
-  ASSERT(receiver_map.is(r3));  // Transition code expects map in r3
-  mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
-  ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode,
-                                                                   slow);
-  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ jmp(&finish_object_store);
-
-  __ bind(&transition_double_elements);
-  // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
-  // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
-  // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
-  __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
-                                         FAST_ELEMENTS,
-                                         receiver_map,
-                                         r4,
-                                         slow);
-  ASSERT(receiver_map.is(r3));  // Transition code expects map in r3
-  mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
-  ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow);
-  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ jmp(&finish_object_store);
-}
-
-
-void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
-                                   StrictMode strict_mode) {
-  // ---------- S t a t e --------------
-  //  -- r0     : value
-  //  -- r1     : key
-  //  -- r2     : receiver
-  //  -- lr     : return address
-  // -----------------------------------
-  Label slow, fast_object, fast_object_grow;
-  Label fast_double, fast_double_grow;
-  Label array, extra, check_if_double_array;
-
-  // Register usage.
-  Register value = r0;
-  Register key = r1;
-  Register receiver = r2;
-  Register receiver_map = r3;
-  Register elements_map = r6;
-  Register elements = r9;  // Elements array of the receiver.
-  // r4 and r5 are used as general scratch registers.
-
-  // Check that the key is a smi.
-  __ JumpIfNotSmi(key, &slow);
-  // Check that the object isn't a smi.
-  __ JumpIfSmi(receiver, &slow);
-  // Get the map of the object.
-  __ ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  // Check that the receiver does not require access checks and is not observed.
-  // The generic stub does not perform map checks or handle observed objects.
-  __ ldrb(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
-  __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved));
-  __ b(ne, &slow);
-  // Check if the object is a JS array or not.
-  __ ldrb(r4, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
-  __ cmp(r4, Operand(JS_ARRAY_TYPE));
-  __ b(eq, &array);
-  // Check that the object is some kind of JSObject.
-  __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
-  __ b(lt, &slow);
-
-  // Object case: Check key against length in the elements array.
-  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  // Check array bounds. Both the key and the length of FixedArray are smis.
-  __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
-  __ cmp(key, Operand(ip));
-  __ b(lo, &fast_object);
-
-  // Slow case, handle jump to runtime.
-  __ bind(&slow);
-  // Entry registers are intact.
-  // r0: value.
-  // r1: key.
-  // r2: receiver.
-  GenerateRuntimeSetProperty(masm, strict_mode);
-
-  // Extra capacity case: Check if there is extra capacity to
-  // perform the store and update the length. Used for adding one
-  // element to the array by writing to array[array.length].
-  __ bind(&extra);
-  // Condition code from comparing key and array length is still available.
-  __ b(ne, &slow);  // Only support writing to writing to array[array.length].
-  // Check for room in the elements backing store.
-  // Both the key and the length of FixedArray are smis.
-  __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
-  __ cmp(key, Operand(ip));
-  __ b(hs, &slow);
-  __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
-  __ cmp(elements_map,
-         Operand(masm->isolate()->factory()->fixed_array_map()));
-  __ b(ne, &check_if_double_array);
-  __ jmp(&fast_object_grow);
-
-  __ bind(&check_if_double_array);
-  __ cmp(elements_map,
-         Operand(masm->isolate()->factory()->fixed_double_array_map()));
-  __ b(ne, &slow);
-  __ jmp(&fast_double_grow);
-
-  // Array case: Get the length and the elements array from the JS
-  // array. Check that the array is in fast mode (and writable); if it
-  // is the length is always a smi.
-  __ bind(&array);
-  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-
-  // Check the key against the length in the array.
-  __ ldr(ip, FieldMemOperand(receiver, JSArray::kLengthOffset));
-  __ cmp(key, Operand(ip));
-  __ b(hs, &extra);
-
-  KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double,
-                                  &slow, kCheckMap, kDontIncrementLength,
-                                  value, key, receiver, receiver_map,
-                                  elements_map, elements);
-  KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
-                                  &slow, kDontCheckMap, kIncrementLength,
-                                  value, key, receiver, receiver_map,
-                                  elements_map, elements);
-}
-
-
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- r0    : value
-  //  -- r1    : receiver
-  //  -- r2    : name
-  //  -- lr    : return address
-  // -----------------------------------
-
-  // Get the receiver from the stack and probe the stub cache.
-  Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC);
-
-  masm->isolate()->stub_cache()->GenerateProbe(
-      masm, flags, r1, r2, r3, r4, r5, r6);
-
-  // Cache miss: Jump to runtime.
-  GenerateMiss(masm);
-}
-
-
-void StoreIC::GenerateMiss(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- r0    : value
-  //  -- r1    : receiver
-  //  -- r2    : name
-  //  -- lr    : return address
-  // -----------------------------------
-
-  __ Push(r1, r2, r0);
-
-  // Perform tail call to the entry.
-  ExternalReference ref =
-      ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
-  __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void StoreIC::GenerateNormal(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- r0    : value
-  //  -- r1    : receiver
-  //  -- r2    : name
-  //  -- lr    : return address
-  // -----------------------------------
-  Label miss;
-
-  GenerateNameDictionaryReceiverCheck(masm, r1, r3, r4, r5, &miss);
-
-  GenerateDictionaryStore(masm, &miss, r3, r2, r0, r4, r5);
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->store_normal_hit(),
-                      1, r4, r5);
-  __ Ret();
-
-  __ bind(&miss);
-  __ IncrementCounter(counters->store_normal_miss(), 1, r4, r5);
-  GenerateMiss(masm);
-}
-
-
-void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
-                                         StrictMode strict_mode) {
-  // ----------- S t a t e -------------
-  //  -- r0    : value
-  //  -- r1    : receiver
-  //  -- r2    : name
-  //  -- lr    : return address
-  // -----------------------------------
-
-  __ Push(r1, r2, r0);
-
-  __ mov(r1, Operand(Smi::FromInt(NONE)));  // PropertyAttributes
-  __ mov(r0, Operand(Smi::FromInt(strict_mode)));
-  __ Push(r1, r0);
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
-}
-
-
-#undef __
-
-
-Condition CompareIC::ComputeCondition(Token::Value op) {
-  switch (op) {
-    case Token::EQ_STRICT:
-    case Token::EQ:
-      return eq;
-    case Token::LT:
-      return lt;
-    case Token::GT:
-      return gt;
-    case Token::LTE:
-      return le;
-    case Token::GTE:
-      return ge;
-    default:
-      UNREACHABLE();
-      return kNoCondition;
-  }
-}
-
-
-bool CompareIC::HasInlinedSmiCode(Address address) {
-  // The address of the instruction following the call.
-  Address cmp_instruction_address =
-      Assembler::return_address_from_call_start(address);
-
-  // If the instruction following the call is not a cmp rx, #yyy, nothing
-  // was inlined.
-  Instr instr = Assembler::instr_at(cmp_instruction_address);
-  return Assembler::IsCmpImmediate(instr);
-}
-
-
-void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
-  Address cmp_instruction_address =
-      Assembler::return_address_from_call_start(address);
-
-  // If the instruction following the call is not a cmp rx, #yyy, nothing
-  // was inlined.
-  Instr instr = Assembler::instr_at(cmp_instruction_address);
-  if (!Assembler::IsCmpImmediate(instr)) {
-    return;
-  }
-
-  // The delta to the start of the map check instruction and the
-  // condition code uses at the patched jump.
-  int delta = Assembler::GetCmpImmediateRawImmediate(instr);
-  delta +=
-      Assembler::GetCmpImmediateRegister(instr).code() * kOff12Mask;
-  // If the delta is 0 the instruction is cmp r0, #0 which also signals that
-  // nothing was inlined.
-  if (delta == 0) {
-    return;
-  }
-
-  if (FLAG_trace_ic) {
-    PrintF("[  patching ic at %p, cmp=%p, delta=%d\n",
-           address, cmp_instruction_address, delta);
-  }
-
-  Address patch_address =
-      cmp_instruction_address - delta * Instruction::kInstrSize;
-  Instr instr_at_patch = Assembler::instr_at(patch_address);
-  Instr branch_instr =
-      Assembler::instr_at(patch_address + Instruction::kInstrSize);
-  // This is patching a conditional "jump if not smi/jump if smi" site.
-  // Enabling by changing from
-  //   cmp rx, rx
-  //   b eq/ne, <target>
-  // to
-  //   tst rx, #kSmiTagMask
-  //   b ne/eq, <target>
-  // and vice-versa to be disabled again.
-  CodePatcher patcher(patch_address, 2);
-  Register reg = Assembler::GetRn(instr_at_patch);
-  if (check == ENABLE_INLINED_SMI_CHECK) {
-    ASSERT(Assembler::IsCmpRegister(instr_at_patch));
-    ASSERT_EQ(Assembler::GetRn(instr_at_patch).code(),
-              Assembler::GetRm(instr_at_patch).code());
-    patcher.masm()->tst(reg, Operand(kSmiTagMask));
-  } else {
-    ASSERT(check == DISABLE_INLINED_SMI_CHECK);
-    ASSERT(Assembler::IsTstImmediate(instr_at_patch));
-    patcher.masm()->cmp(reg, reg);
-  }
-  ASSERT(Assembler::IsBranch(branch_instr));
-  if (Assembler::GetCondition(branch_instr) == eq) {
-    patcher.EmitCondition(ne);
-  } else {
-    ASSERT(Assembler::GetCondition(branch_instr) == ne);
-    patcher.EmitCondition(eq);
-  }
-}
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_TARGET_ARCH_ARM
diff --git a/src/arm/interface-descriptors-arm.cc b/src/arm/interface-descriptors-arm.cc
new file mode 100644
index 0000000..9bbc1f5
--- /dev/null
+++ b/src/arm/interface-descriptors-arm.cc
@@ -0,0 +1,323 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_ARM
+
+#include "src/interface-descriptors.h"
+
+namespace v8 {
+namespace internal {
+
+const Register CallInterfaceDescriptor::ContextRegister() { return cp; }
+
+
+const Register LoadDescriptor::ReceiverRegister() { return r1; }
+const Register LoadDescriptor::NameRegister() { return r2; }
+
+
+const Register VectorLoadICTrampolineDescriptor::SlotRegister() { return r0; }
+
+
+const Register VectorLoadICDescriptor::VectorRegister() { return r3; }
+
+
+const Register StoreDescriptor::ReceiverRegister() { return r1; }
+const Register StoreDescriptor::NameRegister() { return r2; }
+const Register StoreDescriptor::ValueRegister() { return r0; }
+
+
+const Register ElementTransitionAndStoreDescriptor::MapRegister() { return r3; }
+
+
+const Register InstanceofDescriptor::left() { return r0; }
+const Register InstanceofDescriptor::right() { return r1; }
+
+
+const Register ArgumentsAccessReadDescriptor::index() { return r1; }
+const Register ArgumentsAccessReadDescriptor::parameter_count() { return r0; }
+
+
+const Register ApiGetterDescriptor::function_address() { return r2; }
+
+
+const Register MathPowTaggedDescriptor::exponent() { return r2; }
+
+
+const Register MathPowIntegerDescriptor::exponent() {
+  return MathPowTaggedDescriptor::exponent();
+}
+
+
+void FastNewClosureDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, r2};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void FastNewContextDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, r1};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ToNumberDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, r0};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void NumberToStringDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, r0};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void FastCloneShallowArrayDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, r3, r2, r1};
+  Representation representations[] = {
+      Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
+      Representation::Tagged()};
+  data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void FastCloneShallowObjectDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, r3, r2, r1, r0};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void CreateAllocationSiteDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, r2, r3};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void StoreArrayLiteralElementDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, r3, r0};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void CallFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, r1};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void CallFunctionWithFeedbackDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, r1, r3};
+  Representation representations[] = {Representation::Tagged(),
+                                      Representation::Tagged(),
+                                      Representation::Smi()};
+  data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void CallConstructDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  // r0 : number of arguments
+  // r1 : the function to call
+  // r2 : feedback vector
+  // r3 : (only if r2 is not the megamorphic symbol) slot in feedback
+  //      vector (Smi)
+  // TODO(turbofan): So far we don't gather type feedback and hence skip the
+  // slot parameter, but ArrayConstructStub needs the vector to be undefined.
+  Register registers[] = {cp, r0, r1, r2};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void RegExpConstructResultDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, r2, r1, r0};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void TransitionElementsKindDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, r0, r1};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ArrayConstructorConstantArgCountDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  // register state
+  // cp -- context
+  // r0 -- number of arguments
+  // r1 -- function
+  // r2 -- allocation site with elements kind
+  Register registers[] = {cp, r1, r2};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ArrayConstructorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  // stack param count needs (constructor pointer, and single argument)
+  Register registers[] = {cp, r1, r2, r0};
+  Representation representations[] = {
+      Representation::Tagged(), Representation::Tagged(),
+      Representation::Tagged(), Representation::Integer32()};
+  data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void InternalArrayConstructorConstantArgCountDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  // register state
+  // cp -- context
+  // r0 -- number of arguments
+  // r1 -- constructor function
+  Register registers[] = {cp, r1};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void InternalArrayConstructorDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  // stack param count needs (constructor pointer, and single argument)
+  Register registers[] = {cp, r1, r0};
+  Representation representations[] = {Representation::Tagged(),
+                                      Representation::Tagged(),
+                                      Representation::Integer32()};
+  data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void CompareNilDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, r0};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ToBooleanDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, r0};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void BinaryOpDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, r1, r0};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void BinaryOpWithAllocationSiteDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, r2, r1, r0};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void StringAddDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, r1, r0};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void KeyedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  static PlatformInterfaceDescriptor noInlineDescriptor =
+      PlatformInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
+
+  Register registers[] = {
+      cp,  // context
+      r2,  // key
+  };
+  Representation representations[] = {
+      Representation::Tagged(),  // context
+      Representation::Tagged(),  // key
+  };
+  data->Initialize(arraysize(registers), registers, representations,
+                   &noInlineDescriptor);
+}
+
+
+void NamedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  static PlatformInterfaceDescriptor noInlineDescriptor =
+      PlatformInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
+
+  Register registers[] = {
+      cp,  // context
+      r2,  // name
+  };
+  Representation representations[] = {
+      Representation::Tagged(),  // context
+      Representation::Tagged(),  // name
+  };
+  data->Initialize(arraysize(registers), registers, representations,
+                   &noInlineDescriptor);
+}
+
+
+void CallHandlerDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  static PlatformInterfaceDescriptor default_descriptor =
+      PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
+
+  Register registers[] = {
+      cp,  // context
+      r0,  // receiver
+  };
+  Representation representations[] = {
+      Representation::Tagged(),  // context
+      Representation::Tagged(),  // receiver
+  };
+  data->Initialize(arraysize(registers), registers, representations,
+                   &default_descriptor);
+}
+
+
+void ArgumentAdaptorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  static PlatformInterfaceDescriptor default_descriptor =
+      PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
+
+  Register registers[] = {
+      cp,  // context
+      r1,  // JSFunction
+      r0,  // actual number of arguments
+      r2,  // expected number of arguments
+  };
+  Representation representations[] = {
+      Representation::Tagged(),     // context
+      Representation::Tagged(),     // JSFunction
+      Representation::Integer32(),  // actual number of arguments
+      Representation::Integer32(),  // expected number of arguments
+  };
+  data->Initialize(arraysize(registers), registers, representations,
+                   &default_descriptor);
+}
+
+
+void ApiFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  static PlatformInterfaceDescriptor default_descriptor =
+      PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
+
+  Register registers[] = {
+      cp,  // context
+      r0,  // callee
+      r4,  // call_data
+      r2,  // holder
+      r1,  // api_function_address
+  };
+  Representation representations[] = {
+      Representation::Tagged(),    // context
+      Representation::Tagged(),    // callee
+      Representation::Tagged(),    // call_data
+      Representation::Tagged(),    // holder
+      Representation::External(),  // api_function_address
+  };
+  data->Initialize(arraysize(registers), registers, representations,
+                   &default_descriptor);
+}
+}
+}  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_ARM
diff --git a/src/arm/interface-descriptors-arm.h b/src/arm/interface-descriptors-arm.h
new file mode 100644
index 0000000..6201adc
--- /dev/null
+++ b/src/arm/interface-descriptors-arm.h
@@ -0,0 +1,26 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_ARM_INTERFACE_DESCRIPTORS_ARM_H_
+#define V8_ARM_INTERFACE_DESCRIPTORS_ARM_H_
+
+#include "src/interface-descriptors.h"
+
+namespace v8 {
+namespace internal {
+
+class PlatformInterfaceDescriptor {
+ public:
+  explicit PlatformInterfaceDescriptor(TargetAddressStorageMode storage_mode)
+      : storage_mode_(storage_mode) {}
+
+  TargetAddressStorageMode storage_mode() { return storage_mode_; }
+
+ private:
+  TargetAddressStorageMode storage_mode_;
+};
+}
+}  // namespace v8::internal
+
+#endif  // V8_ARM_INTERFACE_DESCRIPTORS_ARM_H_
diff --git a/src/arm/lithium-arm.cc b/src/arm/lithium-arm.cc
index 93dc830..13a46a2 100644
--- a/src/arm/lithium-arm.cc
+++ b/src/arm/lithium-arm.cc
@@ -4,10 +4,9 @@
 
 #include "src/v8.h"
 
-#include "src/lithium-allocator-inl.h"
-#include "src/arm/lithium-arm.h"
 #include "src/arm/lithium-codegen-arm.h"
 #include "src/hydrogen-osr.h"
+#include "src/lithium-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -25,17 +24,17 @@
   // outputs because all registers are blocked by the calling convention.
   // Inputs operands must use a fixed register or use-at-start policy or
   // a non-register policy.
-  ASSERT(Output() == NULL ||
+  DCHECK(Output() == NULL ||
          LUnallocated::cast(Output())->HasFixedPolicy() ||
          !LUnallocated::cast(Output())->HasRegisterPolicy());
   for (UseIterator it(this); !it.Done(); it.Advance()) {
     LUnallocated* operand = LUnallocated::cast(it.Current());
-    ASSERT(operand->HasFixedPolicy() ||
+    DCHECK(operand->HasFixedPolicy() ||
            operand->IsUsedAtStart());
   }
   for (TempIterator it(this); !it.Done(); it.Advance()) {
     LUnallocated* operand = LUnallocated::cast(it.Current());
-    ASSERT(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
+    DCHECK(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
   }
 }
 #endif
@@ -317,8 +316,9 @@
 
 void LStoreNamedField::PrintDataTo(StringStream* stream) {
   object()->PrintTo(stream);
-  hydrogen()->access().PrintTo(stream);
-  stream->Add(" <- ");
+  OStringStream os;
+  os << hydrogen()->access() << " <- ";
+  stream->Add(os.c_str());
   value()->PrintTo(stream);
 }
 
@@ -355,7 +355,7 @@
   }
 
   if (value() == NULL) {
-    ASSERT(hydrogen()->IsConstantHoleStore() &&
+    DCHECK(hydrogen()->IsConstantHoleStore() &&
            hydrogen()->value()->representation().IsDouble());
     stream->Add("<the hole(nan)>");
   } else {
@@ -391,14 +391,14 @@
   if (kind == DOUBLE_REGISTERS) {
     return LDoubleStackSlot::Create(index, zone());
   } else {
-    ASSERT(kind == GENERAL_REGISTERS);
+    DCHECK(kind == GENERAL_REGISTERS);
     return LStackSlot::Create(index, zone());
   }
 }
 
 
 LPlatformChunk* LChunkBuilder::Build() {
-  ASSERT(is_unused());
+  DCHECK(is_unused());
   chunk_ = new(zone()) LPlatformChunk(info(), graph());
   LPhase phase("L_Building chunk", chunk_);
   status_ = BUILDING;
@@ -423,12 +423,6 @@
 }
 
 
-void LChunkBuilder::Abort(BailoutReason reason) {
-  info()->set_bailout_reason(reason);
-  status_ = ABORTED;
-}
-
-
 LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
   return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
                                   Register::ToAllocationIndex(reg));
@@ -609,7 +603,7 @@
 
 
 LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
-  ASSERT(!instr->HasPointerMap());
+  DCHECK(!instr->HasPointerMap());
   instr->set_pointer_map(new(zone()) LPointerMap(zone()));
   return instr;
 }
@@ -643,14 +637,14 @@
 
 LOperand* LChunkBuilder::FixedTemp(Register reg) {
   LUnallocated* operand = ToUnallocated(reg);
-  ASSERT(operand->HasFixedPolicy());
+  DCHECK(operand->HasFixedPolicy());
   return operand;
 }
 
 
 LOperand* LChunkBuilder::FixedTemp(DoubleRegister reg) {
   LUnallocated* operand = ToUnallocated(reg);
-  ASSERT(operand->HasFixedPolicy());
+  DCHECK(operand->HasFixedPolicy());
   return operand;
 }
 
@@ -679,8 +673,8 @@
 LInstruction* LChunkBuilder::DoShift(Token::Value op,
                                      HBitwiseBinaryOperation* instr) {
   if (instr->representation().IsSmiOrInteger32()) {
-    ASSERT(instr->left()->representation().Equals(instr->representation()));
-    ASSERT(instr->right()->representation().Equals(instr->representation()));
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
     LOperand* left = UseRegisterAtStart(instr->left());
 
     HValue* right_value = instr->right();
@@ -721,9 +715,9 @@
 
 LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
                                            HArithmeticBinaryOperation* instr) {
-  ASSERT(instr->representation().IsDouble());
-  ASSERT(instr->left()->representation().IsDouble());
-  ASSERT(instr->right()->representation().IsDouble());
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->left()->representation().IsDouble());
+  DCHECK(instr->right()->representation().IsDouble());
   if (op == Token::MOD) {
     LOperand* left = UseFixedDouble(instr->left(), d0);
     LOperand* right = UseFixedDouble(instr->right(), d1);
@@ -742,8 +736,8 @@
                                            HBinaryOperation* instr) {
   HValue* left = instr->left();
   HValue* right = instr->right();
-  ASSERT(left->representation().IsTagged());
-  ASSERT(right->representation().IsTagged());
+  DCHECK(left->representation().IsTagged());
+  DCHECK(right->representation().IsTagged());
   LOperand* context = UseFixed(instr->context(), cp);
   LOperand* left_operand = UseFixed(left, r1);
   LOperand* right_operand = UseFixed(right, r0);
@@ -754,7 +748,7 @@
 
 
 void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
-  ASSERT(is_building());
+  DCHECK(is_building());
   current_block_ = block;
   next_block_ = next_block;
   if (block->IsStartBlock()) {
@@ -763,13 +757,13 @@
   } else if (block->predecessors()->length() == 1) {
     // We have a single predecessor => copy environment and outgoing
     // argument count from the predecessor.
-    ASSERT(block->phis()->length() == 0);
+    DCHECK(block->phis()->length() == 0);
     HBasicBlock* pred = block->predecessors()->at(0);
     HEnvironment* last_environment = pred->last_environment();
-    ASSERT(last_environment != NULL);
+    DCHECK(last_environment != NULL);
     // Only copy the environment, if it is later used again.
     if (pred->end()->SecondSuccessor() == NULL) {
-      ASSERT(pred->end()->FirstSuccessor() == block);
+      DCHECK(pred->end()->FirstSuccessor() == block);
     } else {
       if (pred->end()->FirstSuccessor()->block_id() > block->block_id() ||
           pred->end()->SecondSuccessor()->block_id() > block->block_id()) {
@@ -777,7 +771,7 @@
       }
     }
     block->UpdateEnvironment(last_environment);
-    ASSERT(pred->argument_count() >= 0);
+    DCHECK(pred->argument_count() >= 0);
     argument_count_ = pred->argument_count();
   } else {
     // We are at a state join => process phis.
@@ -829,7 +823,7 @@
     if (current->OperandCount() == 0) {
       instr = DefineAsRegister(new(zone()) LDummy());
     } else {
-      ASSERT(!current->OperandAt(0)->IsControlInstruction());
+      DCHECK(!current->OperandAt(0)->IsControlInstruction());
       instr = DefineAsRegister(new(zone())
           LDummyUse(UseAny(current->OperandAt(0))));
     }
@@ -852,7 +846,7 @@
   }
 
   argument_count_ += current->argument_delta();
-  ASSERT(argument_count_ >= 0);
+  DCHECK(argument_count_ >= 0);
 
   if (instr != NULL) {
     AddInstruction(instr, current);
@@ -894,7 +888,7 @@
       LUnallocated* operand = LUnallocated::cast(it.Current());
       if (operand->HasFixedPolicy()) ++fixed;
     }
-    ASSERT(fixed == 0 || used_at_start == 0);
+    DCHECK(fixed == 0 || used_at_start == 0);
   }
 #endif
 
@@ -958,7 +952,7 @@
 
 
 LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
-  ASSERT(instr->value()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
   LOperand* value = UseRegisterAtStart(instr->value());
   LOperand* temp = TempRegister();
   return new(zone()) LCmpMapAndBranch(value, temp);
@@ -1082,14 +1076,14 @@
 
 LInstruction* LChunkBuilder::DoCallWithDescriptor(
     HCallWithDescriptor* instr) {
-  const CallInterfaceDescriptor* descriptor = instr->descriptor();
+  CallInterfaceDescriptor descriptor = instr->descriptor();
 
   LOperand* target = UseRegisterOrConstantAtStart(instr->target());
   ZoneList<LOperand*> ops(instr->OperandCount(), zone());
   ops.Add(target, zone());
   for (int i = 1; i < instr->OperandCount(); i++) {
-    LOperand* op = UseFixed(instr->OperandAt(i),
-        descriptor->GetParameterRegister(i - 1));
+    LOperand* op =
+        UseFixed(instr->OperandAt(i), descriptor.GetParameterRegister(i - 1));
     ops.Add(op, zone());
   }
 
@@ -1099,6 +1093,19 @@
 }
 
 
+LInstruction* LChunkBuilder::DoTailCallThroughMegamorphicCache(
+    HTailCallThroughMegamorphicCache* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* receiver_register =
+      UseFixed(instr->receiver(), LoadDescriptor::ReceiverRegister());
+  LOperand* name_register =
+      UseFixed(instr->name(), LoadDescriptor::NameRegister());
+  // Not marked as call. It can't deoptimize, and it never returns.
+  return new (zone()) LTailCallThroughMegamorphicCache(
+      context, receiver_register, name_register);
+}
+
+
 LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
   LOperand* context = UseFixed(instr->context(), cp);
   LOperand* function = UseFixed(instr->function(), r1);
@@ -1109,14 +1116,24 @@
 
 LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
   switch (instr->op()) {
-    case kMathFloor: return DoMathFloor(instr);
-    case kMathRound: return DoMathRound(instr);
-    case kMathAbs: return DoMathAbs(instr);
-    case kMathLog: return DoMathLog(instr);
-    case kMathExp: return DoMathExp(instr);
-    case kMathSqrt: return DoMathSqrt(instr);
-    case kMathPowHalf: return DoMathPowHalf(instr);
-    case kMathClz32: return DoMathClz32(instr);
+    case kMathFloor:
+      return DoMathFloor(instr);
+    case kMathRound:
+      return DoMathRound(instr);
+    case kMathFround:
+      return DoMathFround(instr);
+    case kMathAbs:
+      return DoMathAbs(instr);
+    case kMathLog:
+      return DoMathLog(instr);
+    case kMathExp:
+      return DoMathExp(instr);
+    case kMathSqrt:
+      return DoMathSqrt(instr);
+    case kMathPowHalf:
+      return DoMathPowHalf(instr);
+    case kMathClz32:
+      return DoMathClz32(instr);
     default:
       UNREACHABLE();
       return NULL;
@@ -1139,6 +1156,13 @@
 }
 
 
+LInstruction* LChunkBuilder::DoMathFround(HUnaryMathOperation* instr) {
+  LOperand* input = UseRegister(instr->value());
+  LMathFround* result = new (zone()) LMathFround(input);
+  return DefineAsRegister(result);
+}
+
+
 LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
   Representation r = instr->value()->representation();
   LOperand* context = (r.IsDouble() || r.IsSmiOrInteger32())
@@ -1154,8 +1178,8 @@
 
 
 LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
-  ASSERT(instr->representation().IsDouble());
-  ASSERT(instr->value()->representation().IsDouble());
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->value()->representation().IsDouble());
   LOperand* input = UseFixedDouble(instr->value(), d0);
   return MarkAsCall(DefineFixedDouble(new(zone()) LMathLog(input), d0), instr);
 }
@@ -1169,8 +1193,8 @@
 
 
 LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
-  ASSERT(instr->representation().IsDouble());
-  ASSERT(instr->value()->representation().IsDouble());
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->value()->representation().IsDouble());
   LOperand* input = UseRegister(instr->value());
   LOperand* temp1 = TempRegister();
   LOperand* temp2 = TempRegister();
@@ -1246,9 +1270,9 @@
 
 LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
   if (instr->representation().IsSmiOrInteger32()) {
-    ASSERT(instr->left()->representation().Equals(instr->representation()));
-    ASSERT(instr->right()->representation().Equals(instr->representation()));
-    ASSERT(instr->CheckFlag(HValue::kTruncatingToInt32));
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+    DCHECK(instr->CheckFlag(HValue::kTruncatingToInt32));
 
     LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
     LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
@@ -1260,9 +1284,9 @@
 
 
 LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) {
-  ASSERT(instr->representation().IsSmiOrInteger32());
-  ASSERT(instr->left()->representation().Equals(instr->representation()));
-  ASSERT(instr->right()->representation().Equals(instr->representation()));
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
   LOperand* dividend = UseRegister(instr->left());
   int32_t divisor = instr->right()->GetInteger32Constant();
   LInstruction* result = DefineAsRegister(new(zone()) LDivByPowerOf2I(
@@ -1278,9 +1302,9 @@
 
 
 LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
-  ASSERT(instr->representation().IsInteger32());
-  ASSERT(instr->left()->representation().Equals(instr->representation()));
-  ASSERT(instr->right()->representation().Equals(instr->representation()));
+  DCHECK(instr->representation().IsInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
   LOperand* dividend = UseRegister(instr->left());
   int32_t divisor = instr->right()->GetInteger32Constant();
   LInstruction* result = DefineAsRegister(new(zone()) LDivByConstI(
@@ -1295,9 +1319,9 @@
 
 
 LInstruction* LChunkBuilder::DoDivI(HDiv* instr) {
-  ASSERT(instr->representation().IsSmiOrInteger32());
-  ASSERT(instr->left()->representation().Equals(instr->representation()));
-  ASSERT(instr->right()->representation().Equals(instr->representation()));
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
   LOperand* dividend = UseRegister(instr->left());
   LOperand* divisor = UseRegister(instr->right());
   LOperand* temp =
@@ -1348,9 +1372,9 @@
 
 
 LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
-  ASSERT(instr->representation().IsInteger32());
-  ASSERT(instr->left()->representation().Equals(instr->representation()));
-  ASSERT(instr->right()->representation().Equals(instr->representation()));
+  DCHECK(instr->representation().IsInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
   LOperand* dividend = UseRegister(instr->left());
   int32_t divisor = instr->right()->GetInteger32Constant();
   LOperand* temp =
@@ -1368,9 +1392,9 @@
 
 
 LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) {
-  ASSERT(instr->representation().IsSmiOrInteger32());
-  ASSERT(instr->left()->representation().Equals(instr->representation()));
-  ASSERT(instr->right()->representation().Equals(instr->representation()));
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
   LOperand* dividend = UseRegister(instr->left());
   LOperand* divisor = UseRegister(instr->right());
   LOperand* temp =
@@ -1392,14 +1416,15 @@
 
 
 LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) {
-  ASSERT(instr->representation().IsSmiOrInteger32());
-  ASSERT(instr->left()->representation().Equals(instr->representation()));
-  ASSERT(instr->right()->representation().Equals(instr->representation()));
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
   LOperand* dividend = UseRegisterAtStart(instr->left());
   int32_t divisor = instr->right()->GetInteger32Constant();
   LInstruction* result = DefineSameAsFirst(new(zone()) LModByPowerOf2I(
           dividend, divisor));
-  if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+  if (instr->CheckFlag(HValue::kLeftCanBeNegative) &&
+      instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
     result = AssignEnvironment(result);
   }
   return result;
@@ -1407,9 +1432,9 @@
 
 
 LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) {
-  ASSERT(instr->representation().IsSmiOrInteger32());
-  ASSERT(instr->left()->representation().Equals(instr->representation()));
-  ASSERT(instr->right()->representation().Equals(instr->representation()));
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
   LOperand* dividend = UseRegister(instr->left());
   int32_t divisor = instr->right()->GetInteger32Constant();
   LInstruction* result = DefineAsRegister(new(zone()) LModByConstI(
@@ -1422,9 +1447,9 @@
 
 
 LInstruction* LChunkBuilder::DoModI(HMod* instr) {
-  ASSERT(instr->representation().IsSmiOrInteger32());
-  ASSERT(instr->left()->representation().Equals(instr->representation()));
-  ASSERT(instr->right()->representation().Equals(instr->representation()));
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
   LOperand* dividend = UseRegister(instr->left());
   LOperand* divisor = UseRegister(instr->right());
   LOperand* temp =
@@ -1460,8 +1485,8 @@
 
 LInstruction* LChunkBuilder::DoMul(HMul* instr) {
   if (instr->representation().IsSmiOrInteger32()) {
-    ASSERT(instr->left()->representation().Equals(instr->representation()));
-    ASSERT(instr->right()->representation().Equals(instr->representation()));
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
     HValue* left = instr->BetterLeftOperand();
     HValue* right = instr->BetterRightOperand();
     LOperand* left_op;
@@ -1500,8 +1525,8 @@
     return DefineAsRegister(mul);
 
   } else if (instr->representation().IsDouble()) {
-    if (instr->UseCount() == 1 && (instr->uses().value()->IsAdd() ||
-                                   instr->uses().value()->IsSub())) {
+    if (instr->HasOneUse() && (instr->uses().value()->IsAdd() ||
+                               instr->uses().value()->IsSub())) {
       HBinaryOperation* use = HBinaryOperation::cast(instr->uses().value());
 
       if (use->IsAdd() && instr == use->left()) {
@@ -1530,8 +1555,8 @@
 
 LInstruction* LChunkBuilder::DoSub(HSub* instr) {
   if (instr->representation().IsSmiOrInteger32()) {
-    ASSERT(instr->left()->representation().Equals(instr->representation()));
-    ASSERT(instr->right()->representation().Equals(instr->representation()));
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
 
     if (instr->left()->IsConstant()) {
       // If lhs is constant, do reverse subtraction instead.
@@ -1547,7 +1572,7 @@
     }
     return result;
   } else if (instr->representation().IsDouble()) {
-    if (instr->right()->IsMul()) {
+    if (instr->right()->IsMul() && instr->right()->HasOneUse()) {
       return DoMultiplySub(instr->left(), HMul::cast(instr->right()));
     }
 
@@ -1559,9 +1584,9 @@
 
 
 LInstruction* LChunkBuilder::DoRSub(HSub* instr) {
-  ASSERT(instr->representation().IsSmiOrInteger32());
-  ASSERT(instr->left()->representation().Equals(instr->representation()));
-  ASSERT(instr->right()->representation().Equals(instr->representation()));
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
 
   // Note: The lhs of the subtraction becomes the rhs of the
   // reverse-subtraction.
@@ -1598,8 +1623,8 @@
 
 LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
   if (instr->representation().IsSmiOrInteger32()) {
-    ASSERT(instr->left()->representation().Equals(instr->representation()));
-    ASSERT(instr->right()->representation().Equals(instr->representation()));
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
     LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
     LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
     LAddI* add = new(zone()) LAddI(left, right);
@@ -1609,21 +1634,21 @@
     }
     return result;
   } else if (instr->representation().IsExternal()) {
-    ASSERT(instr->left()->representation().IsExternal());
-    ASSERT(instr->right()->representation().IsInteger32());
-    ASSERT(!instr->CheckFlag(HValue::kCanOverflow));
+    DCHECK(instr->left()->representation().IsExternal());
+    DCHECK(instr->right()->representation().IsInteger32());
+    DCHECK(!instr->CheckFlag(HValue::kCanOverflow));
     LOperand* left = UseRegisterAtStart(instr->left());
     LOperand* right = UseOrConstantAtStart(instr->right());
     LAddI* add = new(zone()) LAddI(left, right);
     LInstruction* result = DefineAsRegister(add);
     return result;
   } else if (instr->representation().IsDouble()) {
-    if (instr->left()->IsMul()) {
+    if (instr->left()->IsMul() && instr->left()->HasOneUse()) {
       return DoMultiplyAdd(HMul::cast(instr->left()), instr->right());
     }
 
-    if (instr->right()->IsMul()) {
-      ASSERT(!instr->left()->IsMul());
+    if (instr->right()->IsMul() && instr->right()->HasOneUse()) {
+      DCHECK(!instr->left()->IsMul() || !instr->left()->HasOneUse());
       return DoMultiplyAdd(HMul::cast(instr->right()), instr->left());
     }
 
@@ -1638,14 +1663,14 @@
   LOperand* left = NULL;
   LOperand* right = NULL;
   if (instr->representation().IsSmiOrInteger32()) {
-    ASSERT(instr->left()->representation().Equals(instr->representation()));
-    ASSERT(instr->right()->representation().Equals(instr->representation()));
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
     left = UseRegisterAtStart(instr->BetterLeftOperand());
     right = UseOrConstantAtStart(instr->BetterRightOperand());
   } else {
-    ASSERT(instr->representation().IsDouble());
-    ASSERT(instr->left()->representation().IsDouble());
-    ASSERT(instr->right()->representation().IsDouble());
+    DCHECK(instr->representation().IsDouble());
+    DCHECK(instr->left()->representation().IsDouble());
+    DCHECK(instr->right()->representation().IsDouble());
     left = UseRegisterAtStart(instr->left());
     right = UseRegisterAtStart(instr->right());
   }
@@ -1654,15 +1679,16 @@
 
 
 LInstruction* LChunkBuilder::DoPower(HPower* instr) {
-  ASSERT(instr->representation().IsDouble());
+  DCHECK(instr->representation().IsDouble());
   // We call a C function for double power. It can't trigger a GC.
   // We need to use fixed result register for the call.
   Representation exponent_type = instr->right()->representation();
-  ASSERT(instr->left()->representation().IsDouble());
+  DCHECK(instr->left()->representation().IsDouble());
   LOperand* left = UseFixedDouble(instr->left(), d0);
-  LOperand* right = exponent_type.IsDouble() ?
-      UseFixedDouble(instr->right(), d1) :
-      UseFixed(instr->right(), r2);
+  LOperand* right =
+      exponent_type.IsDouble()
+          ? UseFixedDouble(instr->right(), d1)
+          : UseFixed(instr->right(), MathPowTaggedDescriptor::exponent());
   LPower* result = new(zone()) LPower(left, right);
   return MarkAsCall(DefineFixedDouble(result, d2),
                     instr,
@@ -1671,8 +1697,8 @@
 
 
 LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
-  ASSERT(instr->left()->representation().IsTagged());
-  ASSERT(instr->right()->representation().IsTagged());
+  DCHECK(instr->left()->representation().IsTagged());
+  DCHECK(instr->right()->representation().IsTagged());
   LOperand* context = UseFixed(instr->context(), cp);
   LOperand* left = UseFixed(instr->left(), r1);
   LOperand* right = UseFixed(instr->right(), r0);
@@ -1685,15 +1711,15 @@
     HCompareNumericAndBranch* instr) {
   Representation r = instr->representation();
   if (r.IsSmiOrInteger32()) {
-    ASSERT(instr->left()->representation().Equals(r));
-    ASSERT(instr->right()->representation().Equals(r));
+    DCHECK(instr->left()->representation().Equals(r));
+    DCHECK(instr->right()->representation().Equals(r));
     LOperand* left = UseRegisterOrConstantAtStart(instr->left());
     LOperand* right = UseRegisterOrConstantAtStart(instr->right());
     return new(zone()) LCompareNumericAndBranch(left, right);
   } else {
-    ASSERT(r.IsDouble());
-    ASSERT(instr->left()->representation().IsDouble());
-    ASSERT(instr->right()->representation().IsDouble());
+    DCHECK(r.IsDouble());
+    DCHECK(instr->left()->representation().IsDouble());
+    DCHECK(instr->right()->representation().IsDouble());
     LOperand* left = UseRegisterAtStart(instr->left());
     LOperand* right = UseRegisterAtStart(instr->right());
     return new(zone()) LCompareNumericAndBranch(left, right);
@@ -1725,7 +1751,7 @@
 
 
 LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
-  ASSERT(instr->value()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
   LOperand* value = UseRegisterAtStart(instr->value());
   LOperand* temp = TempRegister();
   return new(zone()) LIsObjectAndBranch(value, temp);
@@ -1733,7 +1759,7 @@
 
 
 LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
-  ASSERT(instr->value()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
   LOperand* value = UseRegisterAtStart(instr->value());
   LOperand* temp = TempRegister();
   return new(zone()) LIsStringAndBranch(value, temp);
@@ -1741,14 +1767,14 @@
 
 
 LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
-  ASSERT(instr->value()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
   return new(zone()) LIsSmiAndBranch(Use(instr->value()));
 }
 
 
 LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
     HIsUndetectableAndBranch* instr) {
-  ASSERT(instr->value()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
   LOperand* value = UseRegisterAtStart(instr->value());
   return new(zone()) LIsUndetectableAndBranch(value, TempRegister());
 }
@@ -1756,8 +1782,8 @@
 
 LInstruction* LChunkBuilder::DoStringCompareAndBranch(
     HStringCompareAndBranch* instr) {
-  ASSERT(instr->left()->representation().IsTagged());
-  ASSERT(instr->right()->representation().IsTagged());
+  DCHECK(instr->left()->representation().IsTagged());
+  DCHECK(instr->right()->representation().IsTagged());
   LOperand* context = UseFixed(instr->context(), cp);
   LOperand* left = UseFixed(instr->left(), r1);
   LOperand* right = UseFixed(instr->right(), r0);
@@ -1769,7 +1795,7 @@
 
 LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
     HHasInstanceTypeAndBranch* instr) {
-  ASSERT(instr->value()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
   LOperand* value = UseRegisterAtStart(instr->value());
   return new(zone()) LHasInstanceTypeAndBranch(value);
 }
@@ -1777,7 +1803,7 @@
 
 LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
     HGetCachedArrayIndex* instr)  {
-  ASSERT(instr->value()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
   LOperand* value = UseRegisterAtStart(instr->value());
 
   return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value));
@@ -1786,7 +1812,7 @@
 
 LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
     HHasCachedArrayIndexAndBranch* instr) {
-  ASSERT(instr->value()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
   return new(zone()) LHasCachedArrayIndexAndBranch(
       UseRegisterAtStart(instr->value()));
 }
@@ -1794,7 +1820,7 @@
 
 LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
     HClassOfTestAndBranch* instr) {
-  ASSERT(instr->value()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
   LOperand* value = UseRegister(instr->value());
   return new(zone()) LClassOfTestAndBranch(value, TempRegister());
 }
@@ -1897,7 +1923,7 @@
       }
       return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value)));
     } else {
-      ASSERT(to.IsInteger32());
+      DCHECK(to.IsInteger32());
       if (val->type().IsSmi() || val->representation().IsSmi()) {
         LOperand* value = UseRegisterAtStart(val);
         return DefineAsRegister(new(zone()) LSmiUntag(value, false));
@@ -1925,7 +1951,7 @@
       return AssignEnvironment(
           DefineAsRegister(new(zone()) LDoubleToSmi(value)));
     } else {
-      ASSERT(to.IsInteger32());
+      DCHECK(to.IsInteger32());
       LOperand* value = UseRegister(val);
       LInstruction* result = DefineAsRegister(new(zone()) LDoubleToI(value));
       if (!instr->CanTruncateToInt32()) result = AssignEnvironment(result);
@@ -1958,7 +1984,7 @@
       }
       return result;
     } else {
-      ASSERT(to.IsDouble());
+      DCHECK(to.IsDouble());
       if (val->CheckFlag(HInstruction::kUint32)) {
         return DefineAsRegister(new(zone()) LUint32ToDouble(UseRegister(val)));
       } else {
@@ -2021,7 +2047,7 @@
   } else if (input_rep.IsInteger32()) {
     return DefineAsRegister(new(zone()) LClampIToUint8(reg));
   } else {
-    ASSERT(input_rep.IsSmiOrTagged());
+    DCHECK(input_rep.IsSmiOrTagged());
     // Register allocator doesn't (yet) support allocation of double
     // temps. Reserve d1 explicitly.
     LClampTToUint8* result =
@@ -2033,7 +2059,7 @@
 
 LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) {
   HValue* value = instr->value();
-  ASSERT(value->representation().IsDouble());
+  DCHECK(value->representation().IsDouble());
   return DefineAsRegister(new(zone()) LDoubleBits(UseRegister(value)));
 }
 
@@ -2084,9 +2110,14 @@
 
 LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
   LOperand* context = UseFixed(instr->context(), cp);
-  LOperand* global_object = UseFixed(instr->global_object(), r0);
+  LOperand* global_object =
+      UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
+  LOperand* vector = NULL;
+  if (FLAG_vector_ics) {
+    vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
+  }
   LLoadGlobalGeneric* result =
-      new(zone()) LLoadGlobalGeneric(context, global_object);
+      new(zone()) LLoadGlobalGeneric(context, global_object, vector);
   return MarkAsCall(DefineFixed(result, r0), instr);
 }
 
@@ -2138,9 +2169,15 @@
 
 LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
   LOperand* context = UseFixed(instr->context(), cp);
-  LOperand* object = UseFixed(instr->object(), r0);
+  LOperand* object =
+      UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
+  LOperand* vector = NULL;
+  if (FLAG_vector_ics) {
+    vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
+  }
+
   LInstruction* result =
-      DefineFixed(new(zone()) LLoadNamedGeneric(context, object), r0);
+      DefineFixed(new(zone()) LLoadNamedGeneric(context, object, vector), r0);
   return MarkAsCall(result, instr);
 }
 
@@ -2158,7 +2195,7 @@
 
 
 LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
-  ASSERT(instr->key()->representation().IsSmiOrInteger32());
+  DCHECK(instr->key()->representation().IsSmiOrInteger32());
   ElementsKind elements_kind = instr->elements_kind();
   LOperand* key = UseRegisterOrConstantAtStart(instr->key());
   LInstruction* result = NULL;
@@ -2168,12 +2205,12 @@
     if (instr->representation().IsDouble()) {
       obj = UseRegister(instr->elements());
     } else {
-      ASSERT(instr->representation().IsSmiOrTagged());
+      DCHECK(instr->representation().IsSmiOrTagged());
       obj = UseRegisterAtStart(instr->elements());
     }
     result = DefineAsRegister(new(zone()) LLoadKeyed(obj, key));
   } else {
-    ASSERT(
+    DCHECK(
         (instr->representation().IsInteger32() &&
          !IsDoubleOrFloatElementsKind(elements_kind)) ||
         (instr->representation().IsDouble() &&
@@ -2198,18 +2235,24 @@
 
 LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
   LOperand* context = UseFixed(instr->context(), cp);
-  LOperand* object = UseFixed(instr->object(), r1);
-  LOperand* key = UseFixed(instr->key(), r0);
+  LOperand* object =
+      UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
+  LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
+  LOperand* vector = NULL;
+  if (FLAG_vector_ics) {
+    vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
+  }
 
   LInstruction* result =
-      DefineFixed(new(zone()) LLoadKeyedGeneric(context, object, key), r0);
+      DefineFixed(new(zone()) LLoadKeyedGeneric(context, object, key, vector),
+                  r0);
   return MarkAsCall(result, instr);
 }
 
 
 LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
   if (!instr->is_typed_elements()) {
-    ASSERT(instr->elements()->representation().IsTagged());
+    DCHECK(instr->elements()->representation().IsTagged());
     bool needs_write_barrier = instr->NeedsWriteBarrier();
     LOperand* object = NULL;
     LOperand* key = NULL;
@@ -2220,7 +2263,7 @@
       val = UseRegister(instr->value());
       key = UseRegisterOrConstantAtStart(instr->key());
     } else {
-      ASSERT(instr->value()->representation().IsSmiOrTagged());
+      DCHECK(instr->value()->representation().IsSmiOrTagged());
       if (needs_write_barrier) {
         object = UseTempRegister(instr->elements());
         val = UseTempRegister(instr->value());
@@ -2235,12 +2278,12 @@
     return new(zone()) LStoreKeyed(object, key, val);
   }
 
-  ASSERT(
+  DCHECK(
       (instr->value()->representation().IsInteger32() &&
        !IsDoubleOrFloatElementsKind(instr->elements_kind())) ||
       (instr->value()->representation().IsDouble() &&
        IsDoubleOrFloatElementsKind(instr->elements_kind())));
-  ASSERT((instr->is_fixed_typed_array() &&
+  DCHECK((instr->is_fixed_typed_array() &&
           instr->elements()->representation().IsTagged()) ||
          (instr->is_external() &&
           instr->elements()->representation().IsExternal()));
@@ -2253,13 +2296,14 @@
 
 LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
   LOperand* context = UseFixed(instr->context(), cp);
-  LOperand* obj = UseFixed(instr->object(), r2);
-  LOperand* key = UseFixed(instr->key(), r1);
-  LOperand* val = UseFixed(instr->value(), r0);
+  LOperand* obj =
+      UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
+  LOperand* key = UseFixed(instr->key(), StoreDescriptor::NameRegister());
+  LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
 
-  ASSERT(instr->object()->representation().IsTagged());
-  ASSERT(instr->key()->representation().IsTagged());
-  ASSERT(instr->value()->representation().IsTagged());
+  DCHECK(instr->object()->representation().IsTagged());
+  DCHECK(instr->key()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
 
   return MarkAsCall(
       new(zone()) LStoreKeyedGeneric(context, obj, key, val), instr);
@@ -2312,7 +2356,7 @@
   }
 
   LOperand* val;
-  if (needs_write_barrier || instr->field_representation().IsSmi()) {
+  if (needs_write_barrier) {
     val = UseTempRegister(instr->value());
   } else if (instr->field_representation().IsDouble()) {
     val = UseRegisterAtStart(instr->value());
@@ -2329,8 +2373,9 @@
 
 LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
   LOperand* context = UseFixed(instr->context(), cp);
-  LOperand* obj = UseFixed(instr->object(), r1);
-  LOperand* val = UseFixed(instr->value(), r0);
+  LOperand* obj =
+      UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
+  LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
 
   LInstruction* result = new(zone()) LStoreNamedGeneric(context, obj, val);
   return MarkAsCall(result, instr);
@@ -2369,9 +2414,7 @@
 LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
   info()->MarkAsDeferredCalling();
   LOperand* context = UseAny(instr->context());
-  LOperand* size = instr->size()->IsConstant()
-      ? UseConstant(instr->size())
-      : UseTempRegister(instr->size());
+  LOperand* size = UseRegisterOrConstant(instr->size());
   LOperand* temp1 = TempRegister();
   LOperand* temp2 = TempRegister();
   LAllocate* result = new(zone()) LAllocate(context, size, temp1, temp2);
@@ -2394,7 +2437,7 @@
 
 
 LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
-  ASSERT(argument_count_ == 0);
+  DCHECK(argument_count_ == 0);
   allocator_->MarkAsOsrEntry();
   current_block_->last_environment()->set_ast_id(instr->ast_id());
   return AssignEnvironment(new(zone()) LOsrEntry);
@@ -2407,11 +2450,11 @@
     int spill_index = chunk()->GetParameterStackSlot(instr->index());
     return DefineAsSpilled(result, spill_index);
   } else {
-    ASSERT(info()->IsStub());
-    CodeStubInterfaceDescriptor* descriptor =
-        info()->code_stub()->GetInterfaceDescriptor();
+    DCHECK(info()->IsStub());
+    CallInterfaceDescriptor descriptor =
+        info()->code_stub()->GetCallInterfaceDescriptor();
     int index = static_cast<int>(instr->index());
-    Register reg = descriptor->GetParameterRegister(index);
+    Register reg = descriptor.GetEnvironmentParameterRegister(index);
     return DefineFixed(result, reg);
   }
 }
@@ -2427,7 +2470,7 @@
   } else {
     spill_index = env_index - instr->environment()->first_local_index();
     if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
-      Abort(kTooManySpillSlotsNeededForOSR);
+      Retry(kTooManySpillSlotsNeededForOSR);
       spill_index = 0;
     }
   }
@@ -2503,7 +2546,7 @@
     LOperand* context = UseFixed(instr->context(), cp);
     return MarkAsCall(new(zone()) LStackCheck(context), instr);
   } else {
-    ASSERT(instr->is_backwards_branch());
+    DCHECK(instr->is_backwards_branch());
     LOperand* context = UseAny(instr->context());
     return AssignEnvironment(
         AssignPointerMap(new(zone()) LStackCheck(context)));
@@ -2524,6 +2567,7 @@
   if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
     inner->Bind(instr->arguments_var(), instr->arguments_object());
   }
+  inner->BindContext(instr->closure_context());
   inner->set_entry(instr);
   current_block_->UpdateEnvironment(inner);
   chunk_->AddInlinedClosure(instr->closure());
@@ -2539,7 +2583,7 @@
   if (env->entry()->arguments_pushed()) {
     int argument_count = env->arguments_environment()->parameter_count();
     pop = new(zone()) LDrop(argument_count);
-    ASSERT(instr->argument_delta() == -argument_count);
+    DCHECK(instr->argument_delta() == -argument_count);
   }
 
   HEnvironment* outer = current_block_->last_environment()->
diff --git a/src/arm/lithium-arm.h b/src/arm/lithium-arm.h
index e385893..f9feaf6 100644
--- a/src/arm/lithium-arm.h
+++ b/src/arm/lithium-arm.h
@@ -6,8 +6,8 @@
 #define V8_ARM_LITHIUM_ARM_H_
 
 #include "src/hydrogen.h"
-#include "src/lithium-allocator.h"
 #include "src/lithium.h"
+#include "src/lithium-allocator.h"
 #include "src/safepoint-table.h"
 #include "src/utils.h"
 
@@ -17,163 +17,165 @@
 // Forward declarations.
 class LCodeGen;
 
-#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V)    \
-  V(AccessArgumentsAt)                          \
-  V(AddI)                                       \
-  V(Allocate)                                   \
-  V(AllocateBlockContext)                       \
-  V(ApplyArguments)                             \
-  V(ArgumentsElements)                          \
-  V(ArgumentsLength)                            \
-  V(ArithmeticD)                                \
-  V(ArithmeticT)                                \
-  V(BitI)                                       \
-  V(BoundsCheck)                                \
-  V(Branch)                                     \
-  V(CallJSFunction)                             \
-  V(CallWithDescriptor)                         \
-  V(CallFunction)                               \
-  V(CallNew)                                    \
-  V(CallNewArray)                               \
-  V(CallRuntime)                                \
-  V(CallStub)                                   \
-  V(CheckInstanceType)                          \
-  V(CheckNonSmi)                                \
-  V(CheckMaps)                                  \
-  V(CheckMapValue)                              \
-  V(CheckSmi)                                   \
-  V(CheckValue)                                 \
-  V(ClampDToUint8)                              \
-  V(ClampIToUint8)                              \
-  V(ClampTToUint8)                              \
-  V(ClassOfTestAndBranch)                       \
-  V(CompareMinusZeroAndBranch)                  \
-  V(CompareNumericAndBranch)                    \
-  V(CmpObjectEqAndBranch)                       \
-  V(CmpHoleAndBranch)                           \
-  V(CmpMapAndBranch)                            \
-  V(CmpT)                                       \
-  V(ConstantD)                                  \
-  V(ConstantE)                                  \
-  V(ConstantI)                                  \
-  V(ConstantS)                                  \
-  V(ConstantT)                                  \
-  V(ConstructDouble)                            \
-  V(Context)                                    \
-  V(DateField)                                  \
-  V(DebugBreak)                                 \
-  V(DeclareGlobals)                             \
-  V(Deoptimize)                                 \
-  V(DivByConstI)                                \
-  V(DivByPowerOf2I)                             \
-  V(DivI)                                       \
-  V(DoubleBits)                                 \
-  V(DoubleToI)                                  \
-  V(DoubleToSmi)                                \
-  V(Drop)                                       \
-  V(Dummy)                                      \
-  V(DummyUse)                                   \
-  V(FlooringDivByConstI)                        \
-  V(FlooringDivByPowerOf2I)                     \
-  V(FlooringDivI)                               \
-  V(ForInCacheArray)                            \
-  V(ForInPrepareMap)                            \
-  V(FunctionLiteral)                            \
-  V(GetCachedArrayIndex)                        \
-  V(Goto)                                       \
-  V(HasCachedArrayIndexAndBranch)               \
-  V(HasInstanceTypeAndBranch)                   \
-  V(InnerAllocatedObject)                       \
-  V(InstanceOf)                                 \
-  V(InstanceOfKnownGlobal)                      \
-  V(InstructionGap)                             \
-  V(Integer32ToDouble)                          \
-  V(InvokeFunction)                             \
-  V(IsConstructCallAndBranch)                   \
-  V(IsObjectAndBranch)                          \
-  V(IsStringAndBranch)                          \
-  V(IsSmiAndBranch)                             \
-  V(IsUndetectableAndBranch)                    \
-  V(Label)                                      \
-  V(LazyBailout)                                \
-  V(LoadContextSlot)                            \
-  V(LoadRoot)                                   \
-  V(LoadFieldByIndex)                           \
-  V(LoadFunctionPrototype)                      \
-  V(LoadGlobalCell)                             \
-  V(LoadGlobalGeneric)                          \
-  V(LoadKeyed)                                  \
-  V(LoadKeyedGeneric)                           \
-  V(LoadNamedField)                             \
-  V(LoadNamedGeneric)                           \
-  V(MapEnumLength)                              \
-  V(MathAbs)                                    \
-  V(MathClz32)                                  \
-  V(MathExp)                                    \
-  V(MathFloor)                                  \
-  V(MathLog)                                    \
-  V(MathMinMax)                                 \
-  V(MathPowHalf)                                \
-  V(MathRound)                                  \
-  V(MathSqrt)                                   \
-  V(ModByConstI)                                \
-  V(ModByPowerOf2I)                             \
-  V(ModI)                                       \
-  V(MulI)                                       \
-  V(MultiplyAddD)                               \
-  V(MultiplySubD)                               \
-  V(NumberTagD)                                 \
-  V(NumberTagI)                                 \
-  V(NumberTagU)                                 \
-  V(NumberUntagD)                               \
-  V(OsrEntry)                                   \
-  V(Parameter)                                  \
-  V(Power)                                      \
-  V(PushArgument)                               \
-  V(RegExpLiteral)                              \
-  V(Return)                                     \
-  V(SeqStringGetChar)                           \
-  V(SeqStringSetChar)                           \
-  V(ShiftI)                                     \
-  V(SmiTag)                                     \
-  V(SmiUntag)                                   \
-  V(StackCheck)                                 \
-  V(StoreCodeEntry)                             \
-  V(StoreContextSlot)                           \
-  V(StoreFrameContext)                          \
-  V(StoreGlobalCell)                            \
-  V(StoreKeyed)                                 \
-  V(StoreKeyedGeneric)                          \
-  V(StoreNamedField)                            \
-  V(StoreNamedGeneric)                          \
-  V(StringAdd)                                  \
-  V(StringCharCodeAt)                           \
-  V(StringCharFromCode)                         \
-  V(StringCompareAndBranch)                     \
-  V(SubI)                                       \
-  V(RSubI)                                      \
-  V(TaggedToI)                                  \
-  V(ThisFunction)                               \
-  V(ToFastProperties)                           \
-  V(TransitionElementsKind)                     \
-  V(TrapAllocationMemento)                      \
-  V(Typeof)                                     \
-  V(TypeofIsAndBranch)                          \
-  V(Uint32ToDouble)                             \
-  V(UnknownOSRValue)                            \
+#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
+  V(AccessArgumentsAt)                       \
+  V(AddI)                                    \
+  V(Allocate)                                \
+  V(AllocateBlockContext)                    \
+  V(ApplyArguments)                          \
+  V(ArgumentsElements)                       \
+  V(ArgumentsLength)                         \
+  V(ArithmeticD)                             \
+  V(ArithmeticT)                             \
+  V(BitI)                                    \
+  V(BoundsCheck)                             \
+  V(Branch)                                  \
+  V(CallJSFunction)                          \
+  V(CallWithDescriptor)                      \
+  V(CallFunction)                            \
+  V(CallNew)                                 \
+  V(CallNewArray)                            \
+  V(CallRuntime)                             \
+  V(CallStub)                                \
+  V(CheckInstanceType)                       \
+  V(CheckNonSmi)                             \
+  V(CheckMaps)                               \
+  V(CheckMapValue)                           \
+  V(CheckSmi)                                \
+  V(CheckValue)                              \
+  V(ClampDToUint8)                           \
+  V(ClampIToUint8)                           \
+  V(ClampTToUint8)                           \
+  V(ClassOfTestAndBranch)                    \
+  V(CompareMinusZeroAndBranch)               \
+  V(CompareNumericAndBranch)                 \
+  V(CmpObjectEqAndBranch)                    \
+  V(CmpHoleAndBranch)                        \
+  V(CmpMapAndBranch)                         \
+  V(CmpT)                                    \
+  V(ConstantD)                               \
+  V(ConstantE)                               \
+  V(ConstantI)                               \
+  V(ConstantS)                               \
+  V(ConstantT)                               \
+  V(ConstructDouble)                         \
+  V(Context)                                 \
+  V(DateField)                               \
+  V(DebugBreak)                              \
+  V(DeclareGlobals)                          \
+  V(Deoptimize)                              \
+  V(DivByConstI)                             \
+  V(DivByPowerOf2I)                          \
+  V(DivI)                                    \
+  V(DoubleBits)                              \
+  V(DoubleToI)                               \
+  V(DoubleToSmi)                             \
+  V(Drop)                                    \
+  V(Dummy)                                   \
+  V(DummyUse)                                \
+  V(FlooringDivByConstI)                     \
+  V(FlooringDivByPowerOf2I)                  \
+  V(FlooringDivI)                            \
+  V(ForInCacheArray)                         \
+  V(ForInPrepareMap)                         \
+  V(FunctionLiteral)                         \
+  V(GetCachedArrayIndex)                     \
+  V(Goto)                                    \
+  V(HasCachedArrayIndexAndBranch)            \
+  V(HasInstanceTypeAndBranch)                \
+  V(InnerAllocatedObject)                    \
+  V(InstanceOf)                              \
+  V(InstanceOfKnownGlobal)                   \
+  V(InstructionGap)                          \
+  V(Integer32ToDouble)                       \
+  V(InvokeFunction)                          \
+  V(IsConstructCallAndBranch)                \
+  V(IsObjectAndBranch)                       \
+  V(IsStringAndBranch)                       \
+  V(IsSmiAndBranch)                          \
+  V(IsUndetectableAndBranch)                 \
+  V(Label)                                   \
+  V(LazyBailout)                             \
+  V(LoadContextSlot)                         \
+  V(LoadRoot)                                \
+  V(LoadFieldByIndex)                        \
+  V(LoadFunctionPrototype)                   \
+  V(LoadGlobalCell)                          \
+  V(LoadGlobalGeneric)                       \
+  V(LoadKeyed)                               \
+  V(LoadKeyedGeneric)                        \
+  V(LoadNamedField)                          \
+  V(LoadNamedGeneric)                        \
+  V(MapEnumLength)                           \
+  V(MathAbs)                                 \
+  V(MathClz32)                               \
+  V(MathExp)                                 \
+  V(MathFloor)                               \
+  V(MathFround)                              \
+  V(MathLog)                                 \
+  V(MathMinMax)                              \
+  V(MathPowHalf)                             \
+  V(MathRound)                               \
+  V(MathSqrt)                                \
+  V(ModByConstI)                             \
+  V(ModByPowerOf2I)                          \
+  V(ModI)                                    \
+  V(MulI)                                    \
+  V(MultiplyAddD)                            \
+  V(MultiplySubD)                            \
+  V(NumberTagD)                              \
+  V(NumberTagI)                              \
+  V(NumberTagU)                              \
+  V(NumberUntagD)                            \
+  V(OsrEntry)                                \
+  V(Parameter)                               \
+  V(Power)                                   \
+  V(PushArgument)                            \
+  V(RegExpLiteral)                           \
+  V(Return)                                  \
+  V(SeqStringGetChar)                        \
+  V(SeqStringSetChar)                        \
+  V(ShiftI)                                  \
+  V(SmiTag)                                  \
+  V(SmiUntag)                                \
+  V(StackCheck)                              \
+  V(StoreCodeEntry)                          \
+  V(StoreContextSlot)                        \
+  V(StoreFrameContext)                       \
+  V(StoreGlobalCell)                         \
+  V(StoreKeyed)                              \
+  V(StoreKeyedGeneric)                       \
+  V(StoreNamedField)                         \
+  V(StoreNamedGeneric)                       \
+  V(StringAdd)                               \
+  V(StringCharCodeAt)                        \
+  V(StringCharFromCode)                      \
+  V(StringCompareAndBranch)                  \
+  V(SubI)                                    \
+  V(RSubI)                                   \
+  V(TaggedToI)                               \
+  V(TailCallThroughMegamorphicCache)         \
+  V(ThisFunction)                            \
+  V(ToFastProperties)                        \
+  V(TransitionElementsKind)                  \
+  V(TrapAllocationMemento)                   \
+  V(Typeof)                                  \
+  V(TypeofIsAndBranch)                       \
+  V(Uint32ToDouble)                          \
+  V(UnknownOSRValue)                         \
   V(WrapReceiver)
 
 
 #define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)                        \
-  virtual Opcode opcode() const V8_FINAL V8_OVERRIDE {                      \
+  virtual Opcode opcode() const FINAL OVERRIDE {                      \
     return LInstruction::k##type;                                           \
   }                                                                         \
-  virtual void CompileToNative(LCodeGen* generator) V8_FINAL V8_OVERRIDE;   \
-  virtual const char* Mnemonic() const V8_FINAL V8_OVERRIDE {               \
+  virtual void CompileToNative(LCodeGen* generator) FINAL OVERRIDE;   \
+  virtual const char* Mnemonic() const FINAL OVERRIDE {               \
     return mnemonic;                                                        \
   }                                                                         \
   static L##type* cast(LInstruction* instr) {                               \
-    ASSERT(instr->Is##type());                                              \
+    DCHECK(instr->Is##type());                                              \
     return reinterpret_cast<L##type*>(instr);                               \
   }
 
@@ -222,6 +224,9 @@
 
   virtual bool IsControl() const { return false; }
 
+  // Try deleting this instruction if possible.
+  virtual bool TryDelete() { return false; }
+
   void set_environment(LEnvironment* env) { environment_ = env; }
   LEnvironment* environment() const { return environment_; }
   bool HasEnvironment() const { return environment_ != NULL; }
@@ -260,11 +265,12 @@
   void VerifyCall();
 #endif
 
+  virtual int InputCount() = 0;
+  virtual LOperand* InputAt(int i) = 0;
+
  private:
   // Iterator support.
   friend class InputIterator;
-  virtual int InputCount() = 0;
-  virtual LOperand* InputAt(int i) = 0;
 
   friend class TempIterator;
   virtual int TempCount() = 0;
@@ -285,7 +291,7 @@
  public:
   // Allow 0 or 1 output operands.
   STATIC_ASSERT(R == 0 || R == 1);
-  virtual bool HasResult() const V8_FINAL V8_OVERRIDE {
+  virtual bool HasResult() const FINAL OVERRIDE {
     return R != 0 && result() != NULL;
   }
   void set_result(LOperand* operand) { results_[0] = operand; }
@@ -307,11 +313,11 @@
 
  private:
   // Iterator support.
-  virtual int InputCount() V8_FINAL V8_OVERRIDE { return I; }
-  virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
+  virtual int InputCount() FINAL OVERRIDE { return I; }
+  virtual LOperand* InputAt(int i) FINAL OVERRIDE { return inputs_[i]; }
 
-  virtual int TempCount() V8_FINAL V8_OVERRIDE { return T; }
-  virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return temps_[i]; }
+  virtual int TempCount() FINAL OVERRIDE { return T; }
+  virtual LOperand* TempAt(int i) FINAL OVERRIDE { return temps_[i]; }
 };
 
 
@@ -326,10 +332,10 @@
   }
 
   // Can't use the DECLARE-macro here because of sub-classes.
-  virtual bool IsGap() const V8_OVERRIDE { return true; }
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual bool IsGap() const OVERRIDE { return true; }
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
   static LGap* cast(LInstruction* instr) {
-    ASSERT(instr->IsGap());
+    DCHECK(instr->IsGap());
     return reinterpret_cast<LGap*>(instr);
   }
 
@@ -363,11 +369,11 @@
 };
 
 
-class LInstructionGap V8_FINAL : public LGap {
+class LInstructionGap FINAL : public LGap {
  public:
   explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
 
-  virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
     return !IsRedundant();
   }
 
@@ -375,14 +381,14 @@
 };
 
 
-class LGoto V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LGoto FINAL : public LTemplateInstruction<0, 0, 0> {
  public:
   explicit LGoto(HBasicBlock* block) : block_(block) { }
 
-  virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE;
+  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE;
   DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
-  virtual bool IsControl() const V8_OVERRIDE { return true; }
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  virtual bool IsControl() const OVERRIDE { return true; }
 
   int block_id() const { return block_->block_id(); }
 
@@ -391,7 +397,7 @@
 };
 
 
-class LLazyBailout V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LLazyBailout FINAL : public LTemplateInstruction<0, 0, 0> {
  public:
   LLazyBailout() : gap_instructions_size_(0) { }
 
@@ -407,14 +413,14 @@
 };
 
 
-class LDummy V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LDummy FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
-  explicit LDummy() { }
+  LDummy() {}
   DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy")
 };
 
 
-class LDummyUse V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDummyUse FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LDummyUse(LOperand* value) {
     inputs_[0] = value;
@@ -423,25 +429,25 @@
 };
 
 
-class LDeoptimize V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LDeoptimize FINAL : public LTemplateInstruction<0, 0, 0> {
  public:
-  virtual bool IsControl() const V8_OVERRIDE { return true; }
+  virtual bool IsControl() const OVERRIDE { return true; }
   DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
   DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
 };
 
 
-class LLabel V8_FINAL : public LGap {
+class LLabel FINAL : public LGap {
  public:
   explicit LLabel(HBasicBlock* block)
       : LGap(block), replacement_(NULL) { }
 
-  virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
     return false;
   }
   DECLARE_CONCRETE_INSTRUCTION(Label, "label")
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int block_id() const { return block()->block_id(); }
   bool is_loop_header() const { return block()->IsLoopHeader(); }
@@ -457,14 +463,14 @@
 };
 
 
-class LParameter V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LParameter FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
   virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
   DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
 };
 
 
-class LCallStub V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallStub FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LCallStub(LOperand* context) {
     inputs_[0] = context;
@@ -477,9 +483,29 @@
 };
 
 
-class LUnknownOSRValue V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LTailCallThroughMegamorphicCache FINAL
+    : public LTemplateInstruction<0, 3, 0> {
  public:
-  virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+  explicit LTailCallThroughMegamorphicCache(LOperand* context,
+                                            LOperand* receiver,
+                                            LOperand* name) {
+    inputs_[0] = context;
+    inputs_[1] = receiver;
+    inputs_[2] = name;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* receiver() { return inputs_[1]; }
+  LOperand* name() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache,
+                               "tail-call-through-megamorphic-cache")
+  DECLARE_HYDROGEN_ACCESSOR(TailCallThroughMegamorphicCache)
+};
+
+class LUnknownOSRValue FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
     return false;
   }
   DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
@@ -491,7 +517,7 @@
  public:
   LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
 
-  virtual bool IsControl() const V8_FINAL V8_OVERRIDE { return true; }
+  virtual bool IsControl() const FINAL OVERRIDE { return true; }
 
   int SuccessorCount() { return hydrogen()->SuccessorCount(); }
   HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
@@ -530,7 +556,7 @@
 };
 
 
-class LWrapReceiver V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LWrapReceiver FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LWrapReceiver(LOperand* receiver, LOperand* function) {
     inputs_[0] = receiver;
@@ -545,7 +571,7 @@
 };
 
 
-class LApplyArguments V8_FINAL : public LTemplateInstruction<1, 4, 0> {
+class LApplyArguments FINAL : public LTemplateInstruction<1, 4, 0> {
  public:
   LApplyArguments(LOperand* function,
                   LOperand* receiver,
@@ -566,7 +592,7 @@
 };
 
 
-class LAccessArgumentsAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LAccessArgumentsAt FINAL : public LTemplateInstruction<1, 3, 0> {
  public:
   LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
     inputs_[0] = arguments;
@@ -580,11 +606,11 @@
   LOperand* length() { return inputs_[1]; }
   LOperand* index() { return inputs_[2]; }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LArgumentsLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LArgumentsLength FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LArgumentsLength(LOperand* elements) {
     inputs_[0] = elements;
@@ -596,14 +622,14 @@
 };
 
 
-class LArgumentsElements V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LArgumentsElements FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
   DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
 };
 
 
-class LModByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LModByPowerOf2I FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   LModByPowerOf2I(LOperand* dividend, int32_t divisor) {
     inputs_[0] = dividend;
@@ -621,7 +647,7 @@
 };
 
 
-class LModByConstI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LModByConstI FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   LModByConstI(LOperand* dividend, int32_t divisor) {
     inputs_[0] = dividend;
@@ -639,7 +665,7 @@
 };
 
 
-class LModI V8_FINAL : public LTemplateInstruction<1, 2, 2> {
+class LModI FINAL : public LTemplateInstruction<1, 2, 2> {
  public:
   LModI(LOperand* left, LOperand* right, LOperand* temp, LOperand* temp2) {
     inputs_[0] = left;
@@ -658,7 +684,7 @@
 };
 
 
-class LDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDivByPowerOf2I FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   LDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
     inputs_[0] = dividend;
@@ -676,7 +702,7 @@
 };
 
 
-class LDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDivByConstI FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   LDivByConstI(LOperand* dividend, int32_t divisor) {
     inputs_[0] = dividend;
@@ -694,7 +720,7 @@
 };
 
 
-class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LDivI FINAL : public LTemplateInstruction<1, 2, 1> {
  public:
   LDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
     inputs_[0] = dividend;
@@ -711,7 +737,7 @@
 };
 
 
-class LFlooringDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LFlooringDivByPowerOf2I FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
     inputs_[0] = dividend;
@@ -730,7 +756,7 @@
 };
 
 
-class LFlooringDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+class LFlooringDivByConstI FINAL : public LTemplateInstruction<1, 1, 2> {
  public:
   LFlooringDivByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) {
     inputs_[0] = dividend;
@@ -750,7 +776,7 @@
 };
 
 
-class LFlooringDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LFlooringDivI FINAL : public LTemplateInstruction<1, 2, 1> {
  public:
   LFlooringDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
     inputs_[0] = dividend;
@@ -767,7 +793,7 @@
 };
 
 
-class LMulI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LMulI FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LMulI(LOperand* left, LOperand* right) {
     inputs_[0] = left;
@@ -783,7 +809,7 @@
 
 
 // Instruction for computing multiplier * multiplicand + addend.
-class LMultiplyAddD V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LMultiplyAddD FINAL : public LTemplateInstruction<1, 3, 0> {
  public:
   LMultiplyAddD(LOperand* addend, LOperand* multiplier,
                 LOperand* multiplicand) {
@@ -801,7 +827,7 @@
 
 
 // Instruction for computing minuend - multiplier * multiplicand.
-class LMultiplySubD V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LMultiplySubD FINAL : public LTemplateInstruction<1, 3, 0> {
  public:
   LMultiplySubD(LOperand* minuend, LOperand* multiplier,
                 LOperand* multiplicand) {
@@ -818,13 +844,13 @@
 };
 
 
-class LDebugBreak V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LDebugBreak FINAL : public LTemplateInstruction<0, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
 };
 
 
-class LCompareNumericAndBranch V8_FINAL : public LControlInstruction<2, 0> {
+class LCompareNumericAndBranch FINAL : public LControlInstruction<2, 0> {
  public:
   LCompareNumericAndBranch(LOperand* left, LOperand* right) {
     inputs_[0] = left;
@@ -843,11 +869,11 @@
     return hydrogen()->representation().IsDouble();
   }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LMathFloor V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathFloor FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LMathFloor(LOperand* value) {
     inputs_[0] = value;
@@ -860,7 +886,7 @@
 };
 
 
-class LMathRound V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LMathRound FINAL : public LTemplateInstruction<1, 1, 1> {
  public:
   LMathRound(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
@@ -875,7 +901,17 @@
 };
 
 
-class LMathAbs V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LMathFround FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathFround(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathFround, "math-fround")
+};
+
+
+class LMathAbs FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LMathAbs(LOperand* context, LOperand* value) {
     inputs_[1] = context;
@@ -890,7 +926,7 @@
 };
 
 
-class LMathLog V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathLog FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LMathLog(LOperand* value) {
     inputs_[0] = value;
@@ -902,7 +938,7 @@
 };
 
 
-class LMathClz32 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathClz32 FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LMathClz32(LOperand* value) {
     inputs_[0] = value;
@@ -914,7 +950,7 @@
 };
 
 
-class LMathExp V8_FINAL : public LTemplateInstruction<1, 1, 3> {
+class LMathExp FINAL : public LTemplateInstruction<1, 1, 3> {
  public:
   LMathExp(LOperand* value,
            LOperand* double_temp,
@@ -936,7 +972,7 @@
 };
 
 
-class LMathSqrt V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathSqrt FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LMathSqrt(LOperand* value) {
     inputs_[0] = value;
@@ -948,7 +984,7 @@
 };
 
 
-class LMathPowHalf V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathPowHalf FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LMathPowHalf(LOperand* value) {
     inputs_[0] = value;
@@ -960,7 +996,7 @@
 };
 
 
-class LCmpObjectEqAndBranch V8_FINAL : public LControlInstruction<2, 0> {
+class LCmpObjectEqAndBranch FINAL : public LControlInstruction<2, 0> {
  public:
   LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
     inputs_[0] = left;
@@ -975,7 +1011,7 @@
 };
 
 
-class LCmpHoleAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LCmpHoleAndBranch FINAL : public LControlInstruction<1, 0> {
  public:
   explicit LCmpHoleAndBranch(LOperand* object) {
     inputs_[0] = object;
@@ -988,7 +1024,7 @@
 };
 
 
-class LCompareMinusZeroAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LCompareMinusZeroAndBranch FINAL : public LControlInstruction<1, 1> {
  public:
   LCompareMinusZeroAndBranch(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
@@ -1004,7 +1040,7 @@
 };
 
 
-class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LIsObjectAndBranch FINAL : public LControlInstruction<1, 1> {
  public:
   LIsObjectAndBranch(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
@@ -1017,11 +1053,11 @@
   DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LIsStringAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LIsStringAndBranch FINAL : public LControlInstruction<1, 1> {
  public:
   LIsStringAndBranch(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
@@ -1034,11 +1070,11 @@
   DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LIsSmiAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LIsSmiAndBranch FINAL : public LControlInstruction<1, 0> {
  public:
   explicit LIsSmiAndBranch(LOperand* value) {
     inputs_[0] = value;
@@ -1049,11 +1085,11 @@
   DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LIsUndetectableAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LIsUndetectableAndBranch FINAL : public LControlInstruction<1, 1> {
  public:
   explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
@@ -1067,11 +1103,11 @@
                                "is-undetectable-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LStringCompareAndBranch V8_FINAL : public LControlInstruction<3, 0> {
+class LStringCompareAndBranch FINAL : public LControlInstruction<3, 0> {
  public:
   LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) {
     inputs_[0] = context;
@@ -1089,11 +1125,11 @@
 
   Token::Value op() const { return hydrogen()->token(); }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LHasInstanceTypeAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LHasInstanceTypeAndBranch FINAL : public LControlInstruction<1, 0> {
  public:
   explicit LHasInstanceTypeAndBranch(LOperand* value) {
     inputs_[0] = value;
@@ -1105,11 +1141,11 @@
                                "has-instance-type-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LGetCachedArrayIndex V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LGetCachedArrayIndex FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LGetCachedArrayIndex(LOperand* value) {
     inputs_[0] = value;
@@ -1122,7 +1158,7 @@
 };
 
 
-class LHasCachedArrayIndexAndBranch V8_FINAL
+class LHasCachedArrayIndexAndBranch FINAL
     : public LControlInstruction<1, 0> {
  public:
   explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
@@ -1135,11 +1171,11 @@
                                "has-cached-array-index-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LClassOfTestAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LClassOfTestAndBranch FINAL : public LControlInstruction<1, 1> {
  public:
   LClassOfTestAndBranch(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
@@ -1153,11 +1189,11 @@
                                "class-of-test-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LCmpT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LCmpT FINAL : public LTemplateInstruction<1, 3, 0> {
  public:
   LCmpT(LOperand* context, LOperand* left, LOperand* right) {
     inputs_[0] = context;
@@ -1176,7 +1212,7 @@
 };
 
 
-class LInstanceOf V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LInstanceOf FINAL : public LTemplateInstruction<1, 3, 0> {
  public:
   LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
     inputs_[0] = context;
@@ -1192,7 +1228,7 @@
 };
 
 
-class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LInstanceOfKnownGlobal FINAL : public LTemplateInstruction<1, 2, 1> {
  public:
   LInstanceOfKnownGlobal(LOperand* context, LOperand* value, LOperand* temp) {
     inputs_[0] = context;
@@ -1213,7 +1249,7 @@
     return lazy_deopt_env_;
   }
   virtual void SetDeferredLazyDeoptimizationEnvironment(
-      LEnvironment* env) V8_OVERRIDE {
+      LEnvironment* env) OVERRIDE {
     lazy_deopt_env_ = env;
   }
 
@@ -1222,7 +1258,7 @@
 };
 
 
-class LBoundsCheck V8_FINAL : public LTemplateInstruction<0, 2, 0> {
+class LBoundsCheck FINAL : public LTemplateInstruction<0, 2, 0> {
  public:
   LBoundsCheck(LOperand* index, LOperand* length) {
     inputs_[0] = index;
@@ -1237,7 +1273,7 @@
 };
 
 
-class LBitI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LBitI FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LBitI(LOperand* left, LOperand* right) {
     inputs_[0] = left;
@@ -1254,7 +1290,7 @@
 };
 
 
-class LShiftI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LShiftI FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
       : op_(op), can_deopt_(can_deopt) {
@@ -1275,7 +1311,7 @@
 };
 
 
-class LSubI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LSubI FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LSubI(LOperand* left, LOperand* right) {
     inputs_[0] = left;
@@ -1290,7 +1326,7 @@
 };
 
 
-class LRSubI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LRSubI FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LRSubI(LOperand* left, LOperand* right) {
     inputs_[0] = left;
@@ -1305,7 +1341,7 @@
 };
 
 
-class LConstantI V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LConstantI FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
   DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1314,7 +1350,7 @@
 };
 
 
-class LConstantS V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LConstantS FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
   DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1323,7 +1359,7 @@
 };
 
 
-class LConstantD V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LConstantD FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
   DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1332,7 +1368,7 @@
 };
 
 
-class LConstantE V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LConstantE FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e")
   DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1343,7 +1379,7 @@
 };
 
 
-class LConstantT V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LConstantT FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
   DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1354,7 +1390,7 @@
 };
 
 
-class LBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LBranch FINAL : public LControlInstruction<1, 0> {
  public:
   explicit LBranch(LOperand* value) {
     inputs_[0] = value;
@@ -1365,11 +1401,11 @@
   DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
   DECLARE_HYDROGEN_ACCESSOR(Branch)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LCmpMapAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LCmpMapAndBranch FINAL : public LControlInstruction<1, 1> {
  public:
   LCmpMapAndBranch(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
@@ -1386,7 +1422,7 @@
 };
 
 
-class LMapEnumLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMapEnumLength FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LMapEnumLength(LOperand* value) {
     inputs_[0] = value;
@@ -1398,7 +1434,7 @@
 };
 
 
-class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LDateField FINAL : public LTemplateInstruction<1, 1, 1> {
  public:
   LDateField(LOperand* date, LOperand* temp, Smi* index) : index_(index) {
     inputs_[0] = date;
@@ -1417,7 +1453,7 @@
 };
 
 
-class LSeqStringGetChar V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LSeqStringGetChar FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LSeqStringGetChar(LOperand* string, LOperand* index) {
     inputs_[0] = string;
@@ -1432,7 +1468,7 @@
 };
 
 
-class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 4, 0> {
+class LSeqStringSetChar FINAL : public LTemplateInstruction<1, 4, 0> {
  public:
   LSeqStringSetChar(LOperand* context,
                     LOperand* string,
@@ -1453,7 +1489,7 @@
 };
 
 
-class LAddI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LAddI FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LAddI(LOperand* left, LOperand* right) {
     inputs_[0] = left;
@@ -1468,7 +1504,7 @@
 };
 
 
-class LMathMinMax V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LMathMinMax FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LMathMinMax(LOperand* left, LOperand* right) {
     inputs_[0] = left;
@@ -1483,7 +1519,7 @@
 };
 
 
-class LPower V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LPower FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LPower(LOperand* left, LOperand* right) {
     inputs_[0] = left;
@@ -1498,7 +1534,7 @@
 };
 
 
-class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LArithmeticD FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
       : op_(op) {
@@ -1510,18 +1546,18 @@
   LOperand* left() { return inputs_[0]; }
   LOperand* right() { return inputs_[1]; }
 
-  virtual Opcode opcode() const V8_OVERRIDE {
+  virtual Opcode opcode() const OVERRIDE {
     return LInstruction::kArithmeticD;
   }
-  virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
-  virtual const char* Mnemonic() const V8_OVERRIDE;
+  virtual void CompileToNative(LCodeGen* generator) OVERRIDE;
+  virtual const char* Mnemonic() const OVERRIDE;
 
  private:
   Token::Value op_;
 };
 
 
-class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LArithmeticT FINAL : public LTemplateInstruction<1, 3, 0> {
  public:
   LArithmeticT(Token::Value op,
                LOperand* context,
@@ -1538,18 +1574,18 @@
   LOperand* right() { return inputs_[2]; }
   Token::Value op() const { return op_; }
 
-  virtual Opcode opcode() const V8_OVERRIDE {
+  virtual Opcode opcode() const OVERRIDE {
     return LInstruction::kArithmeticT;
   }
-  virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
-  virtual const char* Mnemonic() const V8_OVERRIDE;
+  virtual void CompileToNative(LCodeGen* generator) OVERRIDE;
+  virtual const char* Mnemonic() const OVERRIDE;
 
  private:
   Token::Value op_;
 };
 
 
-class LReturn V8_FINAL : public LTemplateInstruction<0, 3, 0> {
+class LReturn FINAL : public LTemplateInstruction<0, 3, 0> {
  public:
   LReturn(LOperand* value, LOperand* context, LOperand* parameter_count) {
     inputs_[0] = value;
@@ -1563,7 +1599,7 @@
     return parameter_count()->IsConstantOperand();
   }
   LConstantOperand* constant_parameter_count() {
-    ASSERT(has_constant_parameter_count());
+    DCHECK(has_constant_parameter_count());
     return LConstantOperand::cast(parameter_count());
   }
   LOperand* parameter_count() { return inputs_[2]; }
@@ -1572,7 +1608,7 @@
 };
 
 
-class LLoadNamedField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LLoadNamedField FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LLoadNamedField(LOperand* object) {
     inputs_[0] = object;
@@ -1585,15 +1621,17 @@
 };
 
 
-class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LLoadNamedGeneric FINAL : public LTemplateInstruction<1, 2, 1> {
  public:
-  LLoadNamedGeneric(LOperand* context, LOperand* object) {
+  LLoadNamedGeneric(LOperand* context, LOperand* object, LOperand* vector) {
     inputs_[0] = context;
     inputs_[1] = object;
+    temps_[0] = vector;
   }
 
   LOperand* context() { return inputs_[0]; }
   LOperand* object() { return inputs_[1]; }
+  LOperand* temp_vector() { return temps_[0]; }
 
   DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
   DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
@@ -1602,7 +1640,7 @@
 };
 
 
-class LLoadFunctionPrototype V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LLoadFunctionPrototype FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LLoadFunctionPrototype(LOperand* function) {
     inputs_[0] = function;
@@ -1615,7 +1653,7 @@
 };
 
 
-class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LLoadRoot FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
   DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
@@ -1624,7 +1662,7 @@
 };
 
 
-class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyed FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LLoadKeyed(LOperand* elements, LOperand* key) {
     inputs_[0] = elements;
@@ -1649,43 +1687,50 @@
   DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
   DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
   uint32_t base_offset() const { return hydrogen()->base_offset(); }
 };
 
 
-class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LLoadKeyedGeneric FINAL : public LTemplateInstruction<1, 3, 1> {
  public:
-  LLoadKeyedGeneric(LOperand* context, LOperand* object, LOperand* key) {
+  LLoadKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
+                    LOperand* vector) {
     inputs_[0] = context;
     inputs_[1] = object;
     inputs_[2] = key;
+    temps_[0] = vector;
   }
 
   LOperand* context() { return inputs_[0]; }
   LOperand* object() { return inputs_[1]; }
   LOperand* key() { return inputs_[2]; }
+  LOperand* temp_vector() { return temps_[0]; }
 
   DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
+  DECLARE_HYDROGEN_ACCESSOR(LoadKeyedGeneric)
 };
 
 
-class LLoadGlobalCell V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LLoadGlobalCell FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
   DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
 };
 
 
-class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LLoadGlobalGeneric FINAL : public LTemplateInstruction<1, 2, 1> {
  public:
-  LLoadGlobalGeneric(LOperand* context, LOperand* global_object) {
+  LLoadGlobalGeneric(LOperand* context, LOperand* global_object,
+                     LOperand* vector) {
     inputs_[0] = context;
     inputs_[1] = global_object;
+    temps_[0] = vector;
   }
 
   LOperand* context() { return inputs_[0]; }
   LOperand* global_object() { return inputs_[1]; }
+  LOperand* temp_vector() { return temps_[0]; }
 
   DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
   DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
@@ -1695,7 +1740,7 @@
 };
 
 
-class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 1> {
+class LStoreGlobalCell FINAL : public LTemplateInstruction<0, 1, 1> {
  public:
   LStoreGlobalCell(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
@@ -1710,7 +1755,7 @@
 };
 
 
-class LLoadContextSlot V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LLoadContextSlot FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LLoadContextSlot(LOperand* context) {
     inputs_[0] = context;
@@ -1723,11 +1768,11 @@
 
   int slot_index() { return hydrogen()->slot_index(); }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LStoreContextSlot V8_FINAL : public LTemplateInstruction<0, 2, 0> {
+class LStoreContextSlot FINAL : public LTemplateInstruction<0, 2, 0> {
  public:
   LStoreContextSlot(LOperand* context, LOperand* value) {
     inputs_[0] = context;
@@ -1742,11 +1787,11 @@
 
   int slot_index() { return hydrogen()->slot_index(); }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LPushArgument V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LPushArgument FINAL : public LTemplateInstruction<0, 1, 0> {
  public:
   explicit LPushArgument(LOperand* value) {
     inputs_[0] = value;
@@ -1758,7 +1803,7 @@
 };
 
 
-class LDrop V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LDrop FINAL : public LTemplateInstruction<0, 0, 0> {
  public:
   explicit LDrop(int count) : count_(count) { }
 
@@ -1771,7 +1816,7 @@
 };
 
 
-class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 2, 0> {
+class LStoreCodeEntry FINAL: public LTemplateInstruction<0, 2, 0> {
  public:
   LStoreCodeEntry(LOperand* function, LOperand* code_object) {
     inputs_[0] = function;
@@ -1788,7 +1833,7 @@
 };
 
 
-class LInnerAllocatedObject V8_FINAL: public LTemplateInstruction<1, 2, 0> {
+class LInnerAllocatedObject FINAL: public LTemplateInstruction<1, 2, 0> {
  public:
   LInnerAllocatedObject(LOperand* base_object, LOperand* offset) {
     inputs_[0] = base_object;
@@ -1798,27 +1843,27 @@
   LOperand* base_object() const { return inputs_[0]; }
   LOperand* offset() const { return inputs_[1]; }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 
   DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object")
 };
 
 
-class LThisFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LThisFunction FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
   DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
 };
 
 
-class LContext V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LContext FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(Context, "context")
   DECLARE_HYDROGEN_ACCESSOR(Context)
 };
 
 
-class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LDeclareGlobals FINAL : public LTemplateInstruction<0, 1, 0> {
  public:
   explicit LDeclareGlobals(LOperand* context) {
     inputs_[0] = context;
@@ -1831,7 +1876,7 @@
 };
 
 
-class LCallJSFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallJSFunction FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LCallJSFunction(LOperand* function) {
     inputs_[0] = function;
@@ -1842,48 +1887,47 @@
   DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
   DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 };
 
 
-class LCallWithDescriptor V8_FINAL : public LTemplateResultInstruction<1> {
+class LCallWithDescriptor FINAL : public LTemplateResultInstruction<1> {
  public:
-  LCallWithDescriptor(const CallInterfaceDescriptor* descriptor,
-                      const ZoneList<LOperand*>& operands,
-                      Zone* zone)
-    : descriptor_(descriptor),
-      inputs_(descriptor->environment_length() + 1, zone) {
-    ASSERT(descriptor->environment_length() + 1 == operands.length());
+  LCallWithDescriptor(CallInterfaceDescriptor descriptor,
+                      const ZoneList<LOperand*>& operands, Zone* zone)
+      : descriptor_(descriptor),
+        inputs_(descriptor.GetRegisterParameterCount() + 1, zone) {
+    DCHECK(descriptor.GetRegisterParameterCount() + 1 == operands.length());
     inputs_.AddAll(operands, zone);
   }
 
   LOperand* target() const { return inputs_[0]; }
 
-  const CallInterfaceDescriptor* descriptor() { return descriptor_; }
+  const CallInterfaceDescriptor descriptor() { return descriptor_; }
 
  private:
   DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
   DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 
-  const CallInterfaceDescriptor* descriptor_;
+  CallInterfaceDescriptor descriptor_;
   ZoneList<LOperand*> inputs_;
 
   // Iterator support.
-  virtual int InputCount() V8_FINAL V8_OVERRIDE { return inputs_.length(); }
-  virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
+  virtual int InputCount() FINAL OVERRIDE { return inputs_.length(); }
+  virtual LOperand* InputAt(int i) FINAL OVERRIDE { return inputs_[i]; }
 
-  virtual int TempCount() V8_FINAL V8_OVERRIDE { return 0; }
-  virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return NULL; }
+  virtual int TempCount() FINAL OVERRIDE { return 0; }
+  virtual LOperand* TempAt(int i) FINAL OVERRIDE { return NULL; }
 };
 
 
-class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LInvokeFunction FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LInvokeFunction(LOperand* context, LOperand* function) {
     inputs_[0] = context;
@@ -1896,13 +1940,13 @@
   DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
   DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 };
 
 
-class LCallFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCallFunction FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LCallFunction(LOperand* context, LOperand* function) {
     inputs_[0] = context;
@@ -1919,7 +1963,7 @@
 };
 
 
-class LCallNew V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCallNew FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LCallNew(LOperand* context, LOperand* constructor) {
     inputs_[0] = context;
@@ -1932,13 +1976,13 @@
   DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
   DECLARE_HYDROGEN_ACCESSOR(CallNew)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 };
 
 
-class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCallNewArray FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LCallNewArray(LOperand* context, LOperand* constructor) {
     inputs_[0] = context;
@@ -1951,13 +1995,13 @@
   DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
   DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 };
 
 
-class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallRuntime FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LCallRuntime(LOperand* context) {
     inputs_[0] = context;
@@ -1968,7 +2012,7 @@
   DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
   DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
 
-  virtual bool ClobbersDoubleRegisters(Isolate* isolate) const V8_OVERRIDE {
+  virtual bool ClobbersDoubleRegisters(Isolate* isolate) const OVERRIDE {
     return save_doubles() == kDontSaveFPRegs;
   }
 
@@ -1978,7 +2022,7 @@
 };
 
 
-class LInteger32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LInteger32ToDouble FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LInteger32ToDouble(LOperand* value) {
     inputs_[0] = value;
@@ -1990,7 +2034,7 @@
 };
 
 
-class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LUint32ToDouble FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LUint32ToDouble(LOperand* value) {
     inputs_[0] = value;
@@ -2002,7 +2046,7 @@
 };
 
 
-class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+class LNumberTagI FINAL : public LTemplateInstruction<1, 1, 2> {
  public:
   LNumberTagI(LOperand* value, LOperand* temp1, LOperand* temp2) {
     inputs_[0] = value;
@@ -2018,7 +2062,7 @@
 };
 
 
-class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+class LNumberTagU FINAL : public LTemplateInstruction<1, 1, 2> {
  public:
   LNumberTagU(LOperand* value, LOperand* temp1, LOperand* temp2) {
     inputs_[0] = value;
@@ -2034,7 +2078,7 @@
 };
 
 
-class LNumberTagD V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+class LNumberTagD FINAL : public LTemplateInstruction<1, 1, 2> {
  public:
   LNumberTagD(LOperand* value, LOperand* temp, LOperand* temp2) {
     inputs_[0] = value;
@@ -2051,7 +2095,7 @@
 };
 
 
-class LDoubleToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDoubleToSmi FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LDoubleToSmi(LOperand* value) {
     inputs_[0] = value;
@@ -2067,7 +2111,7 @@
 
 
 // Sometimes truncating conversion from a tagged value to an int32.
-class LDoubleToI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDoubleToI FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LDoubleToI(LOperand* value) {
     inputs_[0] = value;
@@ -2083,7 +2127,7 @@
 
 
 // Truncating conversion from a tagged value to an int32.
-class LTaggedToI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+class LTaggedToI FINAL : public LTemplateInstruction<1, 1, 2> {
  public:
   LTaggedToI(LOperand* value,
              LOperand* temp,
@@ -2104,7 +2148,7 @@
 };
 
 
-class LSmiTag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LSmiTag FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LSmiTag(LOperand* value) {
     inputs_[0] = value;
@@ -2117,7 +2161,7 @@
 };
 
 
-class LNumberUntagD V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LNumberUntagD FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LNumberUntagD(LOperand* value) {
     inputs_[0] = value;
@@ -2130,7 +2174,7 @@
 };
 
 
-class LSmiUntag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LSmiUntag FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   LSmiUntag(LOperand* value, bool needs_check)
       : needs_check_(needs_check) {
@@ -2147,7 +2191,7 @@
 };
 
 
-class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 1> {
+class LStoreNamedField FINAL : public LTemplateInstruction<0, 2, 1> {
  public:
   LStoreNamedField(LOperand* object, LOperand* value, LOperand* temp) {
     inputs_[0] = object;
@@ -2162,7 +2206,7 @@
   DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
   DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 
   Representation representation() const {
     return hydrogen()->field_representation();
@@ -2170,7 +2214,7 @@
 };
 
 
-class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
+class LStoreNamedGeneric FINAL : public LTemplateInstruction<0, 3, 0> {
  public:
   LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value) {
     inputs_[0] = context;
@@ -2185,14 +2229,14 @@
   DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
   DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 
   Handle<Object> name() const { return hydrogen()->name(); }
   StrictMode strict_mode() { return hydrogen()->strict_mode(); }
 };
 
 
-class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyed FINAL : public LTemplateInstruction<0, 3, 0> {
  public:
   LStoreKeyed(LOperand* object, LOperand* key, LOperand* value) {
     inputs_[0] = object;
@@ -2217,7 +2261,7 @@
   DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
   DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
   bool NeedsCanonicalization() {
     if (hydrogen()->value()->IsAdd() || hydrogen()->value()->IsSub() ||
         hydrogen()->value()->IsMul() || hydrogen()->value()->IsDiv()) {
@@ -2229,7 +2273,7 @@
 };
 
 
-class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 4, 0> {
+class LStoreKeyedGeneric FINAL : public LTemplateInstruction<0, 4, 0> {
  public:
   LStoreKeyedGeneric(LOperand* context,
                      LOperand* obj,
@@ -2249,13 +2293,13 @@
   DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
   DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 
   StrictMode strict_mode() { return hydrogen()->strict_mode(); }
 };
 
 
-class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 2, 1> {
+class LTransitionElementsKind FINAL : public LTemplateInstruction<0, 2, 1> {
  public:
   LTransitionElementsKind(LOperand* object,
                           LOperand* context,
@@ -2273,7 +2317,7 @@
                                "transition-elements-kind")
   DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 
   Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
   Handle<Map> transitioned_map() {
@@ -2284,7 +2328,7 @@
 };
 
 
-class LTrapAllocationMemento V8_FINAL : public LTemplateInstruction<0, 1, 1> {
+class LTrapAllocationMemento FINAL : public LTemplateInstruction<0, 1, 1> {
  public:
   LTrapAllocationMemento(LOperand* object,
                          LOperand* temp) {
@@ -2300,7 +2344,7 @@
 };
 
 
-class LStringAdd V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LStringAdd FINAL : public LTemplateInstruction<1, 3, 0> {
  public:
   LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
     inputs_[0] = context;
@@ -2318,7 +2362,7 @@
 
 
 
-class LStringCharCodeAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LStringCharCodeAt FINAL : public LTemplateInstruction<1, 3, 0> {
  public:
   LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
     inputs_[0] = context;
@@ -2335,7 +2379,7 @@
 };
 
 
-class LStringCharFromCode V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LStringCharFromCode FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   explicit LStringCharFromCode(LOperand* context, LOperand* char_code) {
     inputs_[0] = context;
@@ -2350,7 +2394,7 @@
 };
 
 
-class LCheckValue V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LCheckValue FINAL : public LTemplateInstruction<0, 1, 0> {
  public:
   explicit LCheckValue(LOperand* value) {
     inputs_[0] = value;
@@ -2363,7 +2407,7 @@
 };
 
 
-class LCheckInstanceType V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LCheckInstanceType FINAL : public LTemplateInstruction<0, 1, 0> {
  public:
   explicit LCheckInstanceType(LOperand* value) {
     inputs_[0] = value;
@@ -2376,7 +2420,7 @@
 };
 
 
-class LCheckMaps V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LCheckMaps FINAL : public LTemplateInstruction<0, 1, 0> {
  public:
   explicit LCheckMaps(LOperand* value = NULL) {
     inputs_[0] = value;
@@ -2389,7 +2433,7 @@
 };
 
 
-class LCheckSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCheckSmi FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LCheckSmi(LOperand* value) {
     inputs_[0] = value;
@@ -2401,7 +2445,7 @@
 };
 
 
-class LCheckNonSmi V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LCheckNonSmi FINAL : public LTemplateInstruction<0, 1, 0> {
  public:
   explicit LCheckNonSmi(LOperand* value) {
     inputs_[0] = value;
@@ -2414,7 +2458,7 @@
 };
 
 
-class LClampDToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LClampDToUint8 FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LClampDToUint8(LOperand* unclamped) {
     inputs_[0] = unclamped;
@@ -2426,7 +2470,7 @@
 };
 
 
-class LClampIToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LClampIToUint8 FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LClampIToUint8(LOperand* unclamped) {
     inputs_[0] = unclamped;
@@ -2438,7 +2482,7 @@
 };
 
 
-class LClampTToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LClampTToUint8 FINAL : public LTemplateInstruction<1, 1, 1> {
  public:
   LClampTToUint8(LOperand* unclamped, LOperand* temp) {
     inputs_[0] = unclamped;
@@ -2452,7 +2496,7 @@
 };
 
 
-class LDoubleBits V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDoubleBits FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LDoubleBits(LOperand* value) {
     inputs_[0] = value;
@@ -2465,7 +2509,7 @@
 };
 
 
-class LConstructDouble V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LConstructDouble FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LConstructDouble(LOperand* hi, LOperand* lo) {
     inputs_[0] = hi;
@@ -2479,7 +2523,7 @@
 };
 
 
-class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 2> {
+class LAllocate FINAL : public LTemplateInstruction<1, 2, 2> {
  public:
   LAllocate(LOperand* context,
             LOperand* size,
@@ -2501,7 +2545,7 @@
 };
 
 
-class LRegExpLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LRegExpLiteral FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LRegExpLiteral(LOperand* context) {
     inputs_[0] = context;
@@ -2514,7 +2558,7 @@
 };
 
 
-class LFunctionLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LFunctionLiteral FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LFunctionLiteral(LOperand* context) {
     inputs_[0] = context;
@@ -2527,7 +2571,7 @@
 };
 
 
-class LToFastProperties V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LToFastProperties FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LToFastProperties(LOperand* value) {
     inputs_[0] = value;
@@ -2540,7 +2584,7 @@
 };
 
 
-class LTypeof V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LTypeof FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LTypeof(LOperand* context, LOperand* value) {
     inputs_[0] = context;
@@ -2554,7 +2598,7 @@
 };
 
 
-class LTypeofIsAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LTypeofIsAndBranch FINAL : public LControlInstruction<1, 0> {
  public:
   explicit LTypeofIsAndBranch(LOperand* value) {
     inputs_[0] = value;
@@ -2567,11 +2611,11 @@
 
   Handle<String> type_literal() { return hydrogen()->type_literal(); }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LIsConstructCallAndBranch V8_FINAL : public LControlInstruction<0, 1> {
+class LIsConstructCallAndBranch FINAL : public LControlInstruction<0, 1> {
  public:
   explicit LIsConstructCallAndBranch(LOperand* temp) {
     temps_[0] = temp;
@@ -2584,18 +2628,18 @@
 };
 
 
-class LOsrEntry V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LOsrEntry FINAL : public LTemplateInstruction<0, 0, 0> {
  public:
   LOsrEntry() {}
 
-  virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
     return false;
   }
   DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
 };
 
 
-class LStackCheck V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LStackCheck FINAL : public LTemplateInstruction<0, 1, 0> {
  public:
   explicit LStackCheck(LOperand* context) {
     inputs_[0] = context;
@@ -2613,7 +2657,7 @@
 };
 
 
-class LForInPrepareMap V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LForInPrepareMap FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LForInPrepareMap(LOperand* context, LOperand* object) {
     inputs_[0] = context;
@@ -2627,7 +2671,7 @@
 };
 
 
-class LForInCacheArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LForInCacheArray FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LForInCacheArray(LOperand* map) {
     inputs_[0] = map;
@@ -2643,7 +2687,7 @@
 };
 
 
-class LCheckMapValue V8_FINAL : public LTemplateInstruction<0, 2, 0> {
+class LCheckMapValue FINAL : public LTemplateInstruction<0, 2, 0> {
  public:
   LCheckMapValue(LOperand* value, LOperand* map) {
     inputs_[0] = value;
@@ -2657,7 +2701,7 @@
 };
 
 
-class LLoadFieldByIndex V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LLoadFieldByIndex FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LLoadFieldByIndex(LOperand* object, LOperand* index) {
     inputs_[0] = object;
@@ -2701,7 +2745,7 @@
 
 
 class LChunkBuilder;
-class LPlatformChunk V8_FINAL : public LChunk {
+class LPlatformChunk FINAL : public LChunk {
  public:
   LPlatformChunk(CompilationInfo* info, HGraph* graph)
       : LChunk(info, graph) { }
@@ -2711,20 +2755,14 @@
 };
 
 
-class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
+class LChunkBuilder FINAL : public LChunkBuilderBase {
  public:
   LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
-      : LChunkBuilderBase(graph->zone()),
-        chunk_(NULL),
-        info_(info),
-        graph_(graph),
-        status_(UNUSED),
+      : LChunkBuilderBase(info, graph),
         current_instruction_(NULL),
         current_block_(NULL),
         next_block_(NULL),
-        allocator_(allocator) { }
-
-  Isolate* isolate() const { return graph_->isolate(); }
+        allocator_(allocator) {}
 
   // Build the sequence for the graph.
   LPlatformChunk* Build();
@@ -2742,6 +2780,7 @@
 
   LInstruction* DoMathFloor(HUnaryMathOperation* instr);
   LInstruction* DoMathRound(HUnaryMathOperation* instr);
+  LInstruction* DoMathFround(HUnaryMathOperation* instr);
   LInstruction* DoMathAbs(HUnaryMathOperation* instr);
   LInstruction* DoMathLog(HUnaryMathOperation* instr);
   LInstruction* DoMathExp(HUnaryMathOperation* instr);
@@ -2759,24 +2798,6 @@
   LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr);
 
  private:
-  enum Status {
-    UNUSED,
-    BUILDING,
-    DONE,
-    ABORTED
-  };
-
-  LPlatformChunk* chunk() const { return chunk_; }
-  CompilationInfo* info() const { return info_; }
-  HGraph* graph() const { return graph_; }
-
-  bool is_unused() const { return status_ == UNUSED; }
-  bool is_building() const { return status_ == BUILDING; }
-  bool is_done() const { return status_ == DONE; }
-  bool is_aborted() const { return status_ == ABORTED; }
-
-  void Abort(BailoutReason reason);
-
   // Methods for getting operands for Use / Define / Temp.
   LUnallocated* ToUnallocated(Register reg);
   LUnallocated* ToUnallocated(DoubleRegister reg);
@@ -2818,7 +2839,7 @@
 
   // An input operand in register, stack slot or a constant operand.
   // Will not be moved to a register even if one is freely available.
-  virtual MUST_USE_RESULT LOperand* UseAny(HValue* value) V8_OVERRIDE;
+  virtual MUST_USE_RESULT LOperand* UseAny(HValue* value) OVERRIDE;
 
   // Temporary operand that must be in a register.
   MUST_USE_RESULT LUnallocated* TempRegister();
@@ -2861,10 +2882,6 @@
   LInstruction* DoArithmeticT(Token::Value op,
                               HBinaryOperation* instr);
 
-  LPlatformChunk* chunk_;
-  CompilationInfo* info_;
-  HGraph* const graph_;
-  Status status_;
   HInstruction* current_instruction_;
   HBasicBlock* current_block_;
   HBasicBlock* next_block_;
diff --git a/src/arm/lithium-codegen-arm.cc b/src/arm/lithium-codegen-arm.cc
index e98fcf4..a06ed73 100644
--- a/src/arm/lithium-codegen-arm.cc
+++ b/src/arm/lithium-codegen-arm.cc
@@ -6,15 +6,18 @@
 
 #include "src/arm/lithium-codegen-arm.h"
 #include "src/arm/lithium-gap-resolver-arm.h"
+#include "src/base/bits.h"
+#include "src/code-factory.h"
 #include "src/code-stubs.h"
-#include "src/stub-cache.h"
 #include "src/hydrogen-osr.h"
+#include "src/ic/ic.h"
+#include "src/ic/stub-cache.h"
 
 namespace v8 {
 namespace internal {
 
 
-class SafepointGenerator V8_FINAL : public CallWrapper {
+class SafepointGenerator FINAL : public CallWrapper {
  public:
   SafepointGenerator(LCodeGen* codegen,
                      LPointerMap* pointers,
@@ -24,9 +27,9 @@
         deopt_mode_(mode) { }
   virtual ~SafepointGenerator() {}
 
-  virtual void BeforeCall(int call_size) const V8_OVERRIDE {}
+  virtual void BeforeCall(int call_size) const OVERRIDE {}
 
-  virtual void AfterCall() const V8_OVERRIDE {
+  virtual void AfterCall() const OVERRIDE {
     codegen_->RecordSafepoint(pointers_, deopt_mode_);
   }
 
@@ -41,7 +44,7 @@
 
 bool LCodeGen::GenerateCode() {
   LPhase phase("Z_Code generation", chunk());
-  ASSERT(is_unused());
+  DCHECK(is_unused());
   status_ = GENERATING;
 
   // Open a frame scope to indicate that there is a frame on the stack.  The
@@ -49,16 +52,13 @@
   // the frame (that is done in GeneratePrologue).
   FrameScope frame_scope(masm_, StackFrame::NONE);
 
-  return GeneratePrologue() &&
-      GenerateBody() &&
-      GenerateDeferredCode() &&
-      GenerateDeoptJumpTable() &&
-      GenerateSafepointTable();
+  return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
+         GenerateJumpTable() && GenerateSafepointTable();
 }
 
 
 void LCodeGen::FinishCode(Handle<Code> code) {
-  ASSERT(is_done());
+  DCHECK(is_done());
   code->set_stack_slots(GetStackSlotCount());
   code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
   if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
@@ -67,8 +67,8 @@
 
 
 void LCodeGen::SaveCallerDoubles() {
-  ASSERT(info()->saves_caller_doubles());
-  ASSERT(NeedsEagerFrame());
+  DCHECK(info()->saves_caller_doubles());
+  DCHECK(NeedsEagerFrame());
   Comment(";;; Save clobbered callee double registers");
   int count = 0;
   BitVector* doubles = chunk()->allocated_double_registers();
@@ -83,8 +83,8 @@
 
 
 void LCodeGen::RestoreCallerDoubles() {
-  ASSERT(info()->saves_caller_doubles());
-  ASSERT(NeedsEagerFrame());
+  DCHECK(info()->saves_caller_doubles());
+  DCHECK(NeedsEagerFrame());
   Comment(";;; Restore clobbered callee double registers");
   BitVector* doubles = chunk()->allocated_double_registers();
   BitVector::Iterator save_iterator(doubles);
@@ -99,7 +99,7 @@
 
 
 bool LCodeGen::GeneratePrologue() {
-  ASSERT(is_generating());
+  DCHECK(is_generating());
 
   if (info()->IsOptimizing()) {
     ProfileEntryHookStub::MaybeCallEntryHook(masm_);
@@ -130,7 +130,7 @@
       __ b(ne, &ok);
 
       __ ldr(r2, GlobalObjectOperand());
-      __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalReceiverOffset));
+      __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalProxyOffset));
 
       __ str(r2, MemOperand(sp, receiver_offset));
 
@@ -188,7 +188,7 @@
       need_write_barrier = false;
     } else {
       __ push(r1);
-      __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
+      __ CallRuntime(Runtime::kNewFunctionContext, 1);
     }
     RecordSafepoint(Safepoint::kNoLazyDeopt);
     // Context is returned in both r0 and cp.  It replaces the context
@@ -247,7 +247,7 @@
   // Adjust the frame size, subsuming the unoptimized frame into the
   // optimized frame.
   int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
-  ASSERT(slots >= 0);
+  DCHECK(slots >= 0);
   __ sub(sp, sp, Operand(slots * kPointerSize));
 }
 
@@ -263,7 +263,7 @@
 
 
 bool LCodeGen::GenerateDeferredCode() {
-  ASSERT(is_generating());
+  DCHECK(is_generating());
   if (deferred_.length() > 0) {
     for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
       LDeferredCode* code = deferred_[i];
@@ -281,8 +281,8 @@
       __ bind(code->entry());
       if (NeedsDeferredFrame()) {
         Comment(";;; Build frame");
-        ASSERT(!frame_is_built_);
-        ASSERT(info()->IsStub());
+        DCHECK(!frame_is_built_);
+        DCHECK(info()->IsStub());
         frame_is_built_ = true;
         __ PushFixedFrame();
         __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
@@ -293,7 +293,7 @@
       code->Generate();
       if (NeedsDeferredFrame()) {
         Comment(";;; Destroy frame");
-        ASSERT(frame_is_built_);
+        DCHECK(frame_is_built_);
         __ pop(ip);
         __ PopFixedFrame();
         frame_is_built_ = false;
@@ -310,7 +310,7 @@
 }
 
 
-bool LCodeGen::GenerateDeoptJumpTable() {
+bool LCodeGen::GenerateJumpTable() {
   // Check that the jump table is accessible from everywhere in the function
   // code, i.e. that offsets to the table can be encoded in the 24bit signed
   // immediate of a branch instruction.
@@ -319,53 +319,82 @@
   // Each entry in the jump table generates one instruction and inlines one
   // 32bit data after it.
   if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) +
-      deopt_jump_table_.length() * 7)) {
+                jump_table_.length() * 7)) {
     Abort(kGeneratedCodeIsTooLarge);
   }
 
-  if (deopt_jump_table_.length() > 0) {
+  if (jump_table_.length() > 0) {
+    Label needs_frame, call_deopt_entry;
+
     Comment(";;; -------------------- Jump table --------------------");
-  }
-  Label table_start;
-  __ bind(&table_start);
-  Label needs_frame;
-  for (int i = 0; i < deopt_jump_table_.length(); i++) {
-    __ bind(&deopt_jump_table_[i].label);
-    Address entry = deopt_jump_table_[i].address;
-    Deoptimizer::BailoutType type = deopt_jump_table_[i].bailout_type;
-    int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
-    if (id == Deoptimizer::kNotDeoptimizationEntry) {
-      Comment(";;; jump table entry %d.", i);
-    } else {
-      Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
-    }
-    if (deopt_jump_table_[i].needs_frame) {
-      ASSERT(!info()->saves_caller_doubles());
-      __ mov(ip, Operand(ExternalReference::ForDeoptEntry(entry)));
-      if (needs_frame.is_bound()) {
-        __ b(&needs_frame);
+    Address base = jump_table_[0].address;
+
+    Register entry_offset = scratch0();
+
+    int length = jump_table_.length();
+    for (int i = 0; i < length; i++) {
+      Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
+      __ bind(&table_entry->label);
+
+      DCHECK_EQ(jump_table_[0].bailout_type, table_entry->bailout_type);
+      Address entry = table_entry->address;
+      DeoptComment(table_entry->reason);
+
+      // Second-level deopt table entries are contiguous and small, so instead
+      // of loading the full, absolute address of each one, load an immediate
+      // offset which will be added to the base address later.
+      __ mov(entry_offset, Operand(entry - base));
+
+      if (table_entry->needs_frame) {
+        DCHECK(!info()->saves_caller_doubles());
+        if (needs_frame.is_bound()) {
+          __ b(&needs_frame);
+        } else {
+          __ bind(&needs_frame);
+          Comment(";;; call deopt with frame");
+          __ PushFixedFrame();
+          // This variant of deopt can only be used with stubs. Since we don't
+          // have a function pointer to install in the stack frame that we're
+          // building, install a special marker there instead.
+          DCHECK(info()->IsStub());
+          __ mov(ip, Operand(Smi::FromInt(StackFrame::STUB)));
+          __ push(ip);
+          __ add(fp, sp,
+                 Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+          __ bind(&call_deopt_entry);
+          // Add the base address to the offset previously loaded in
+          // entry_offset.
+          __ add(entry_offset, entry_offset,
+                 Operand(ExternalReference::ForDeoptEntry(base)));
+          __ blx(entry_offset);
+        }
+
+        masm()->CheckConstPool(false, false);
       } else {
-        __ bind(&needs_frame);
-        __ PushFixedFrame();
-        // This variant of deopt can only be used with stubs. Since we don't
-        // have a function pointer to install in the stack frame that we're
-        // building, install a special marker there instead.
-        ASSERT(info()->IsStub());
-        __ mov(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
-        __ push(scratch0());
-        __ add(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
-        __ mov(lr, Operand(pc), LeaveCC, al);
-        __ mov(pc, ip);
+        // The last entry can fall through into `call_deopt_entry`, avoiding a
+        // branch.
+        bool need_branch = ((i + 1) != length) || call_deopt_entry.is_bound();
+
+        if (need_branch) __ b(&call_deopt_entry);
+
+        masm()->CheckConstPool(false, !need_branch);
       }
-    } else {
+    }
+
+    if (!call_deopt_entry.is_bound()) {
+      Comment(";;; call deopt");
+      __ bind(&call_deopt_entry);
+
       if (info()->saves_caller_doubles()) {
-        ASSERT(info()->IsStub());
+        DCHECK(info()->IsStub());
         RestoreCallerDoubles();
       }
-      __ mov(lr, Operand(pc), LeaveCC, al);
-      __ mov(pc, Operand(ExternalReference::ForDeoptEntry(entry)));
+
+      // Add the base address to the offset previously loaded in entry_offset.
+      __ add(entry_offset, entry_offset,
+             Operand(ExternalReference::ForDeoptEntry(base)));
+      __ blx(entry_offset);
     }
-    masm()->CheckConstPool(false, false);
   }
 
   // Force constant pool emission at the end of the deopt jump table to make
@@ -380,7 +409,7 @@
 
 
 bool LCodeGen::GenerateSafepointTable() {
-  ASSERT(is_done());
+  DCHECK(is_done());
   safepoints_.Emit(masm(), GetStackSlotCount());
   return !is_aborted();
 }
@@ -397,7 +426,7 @@
 
 
 Register LCodeGen::ToRegister(LOperand* op) const {
-  ASSERT(op->IsRegister());
+  DCHECK(op->IsRegister());
   return ToRegister(op->index());
 }
 
@@ -411,12 +440,12 @@
     Handle<Object> literal = constant->handle(isolate());
     Representation r = chunk_->LookupLiteralRepresentation(const_op);
     if (r.IsInteger32()) {
-      ASSERT(literal->IsNumber());
+      DCHECK(literal->IsNumber());
       __ mov(scratch, Operand(static_cast<int32_t>(literal->Number())));
     } else if (r.IsDouble()) {
       Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
     } else {
-      ASSERT(r.IsSmiOrTagged());
+      DCHECK(r.IsSmiOrTagged());
       __ Move(scratch, literal);
     }
     return scratch;
@@ -430,7 +459,7 @@
 
 
 DwVfpRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
-  ASSERT(op->IsDoubleRegister());
+  DCHECK(op->IsDoubleRegister());
   return ToDoubleRegister(op->index());
 }
 
@@ -446,7 +475,7 @@
     Handle<Object> literal = constant->handle(isolate());
     Representation r = chunk_->LookupLiteralRepresentation(const_op);
     if (r.IsInteger32()) {
-      ASSERT(literal->IsNumber());
+      DCHECK(literal->IsNumber());
       __ mov(ip, Operand(static_cast<int32_t>(literal->Number())));
       __ vmov(flt_scratch, ip);
       __ vcvt_f64_s32(dbl_scratch, flt_scratch);
@@ -470,7 +499,7 @@
 
 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
   HConstant* constant = chunk_->LookupConstant(op);
-  ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
+  DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
   return constant->handle(isolate());
 }
 
@@ -495,7 +524,7 @@
   HConstant* constant = chunk_->LookupConstant(op);
   int32_t value = constant->Integer32Value();
   if (r.IsInteger32()) return value;
-  ASSERT(r.IsSmiOrTagged());
+  DCHECK(r.IsSmiOrTagged());
   return reinterpret_cast<int32_t>(Smi::FromInt(value));
 }
 
@@ -508,7 +537,7 @@
 
 double LCodeGen::ToDouble(LConstantOperand* op) const {
   HConstant* constant = chunk_->LookupConstant(op);
-  ASSERT(constant->HasDoubleValue());
+  DCHECK(constant->HasDoubleValue());
   return constant->DoubleValue();
 }
 
@@ -519,15 +548,15 @@
     HConstant* constant = chunk()->LookupConstant(const_op);
     Representation r = chunk_->LookupLiteralRepresentation(const_op);
     if (r.IsSmi()) {
-      ASSERT(constant->HasSmiValue());
+      DCHECK(constant->HasSmiValue());
       return Operand(Smi::FromInt(constant->Integer32Value()));
     } else if (r.IsInteger32()) {
-      ASSERT(constant->HasInteger32Value());
+      DCHECK(constant->HasInteger32Value());
       return Operand(constant->Integer32Value());
     } else if (r.IsDouble()) {
       Abort(kToOperandUnsupportedDoubleImmediate);
     }
-    ASSERT(r.IsTagged());
+    DCHECK(r.IsTagged());
     return Operand(constant->handle(isolate()));
   } else if (op->IsRegister()) {
     return Operand(ToRegister(op));
@@ -542,15 +571,15 @@
 
 
 static int ArgumentsOffsetWithoutFrame(int index) {
-  ASSERT(index < 0);
+  DCHECK(index < 0);
   return -(index + 1) * kPointerSize;
 }
 
 
 MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
-  ASSERT(!op->IsRegister());
-  ASSERT(!op->IsDoubleRegister());
-  ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
+  DCHECK(!op->IsRegister());
+  DCHECK(!op->IsDoubleRegister());
+  DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
   if (NeedsEagerFrame()) {
     return MemOperand(fp, StackSlotOffset(op->index()));
   } else {
@@ -562,7 +591,7 @@
 
 
 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
-  ASSERT(op->IsDoubleStackSlot());
+  DCHECK(op->IsDoubleStackSlot());
   if (NeedsEagerFrame()) {
     return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
   } else {
@@ -598,13 +627,13 @@
       translation->BeginConstructStubFrame(closure_id, translation_size);
       break;
     case JS_GETTER:
-      ASSERT(translation_size == 1);
-      ASSERT(height == 0);
+      DCHECK(translation_size == 1);
+      DCHECK(height == 0);
       translation->BeginGetterStubFrame(closure_id);
       break;
     case JS_SETTER:
-      ASSERT(translation_size == 2);
-      ASSERT(height == 0);
+      DCHECK(translation_size == 2);
+      DCHECK(height == 0);
       translation->BeginSetterStubFrame(closure_id);
       break;
     case STUB:
@@ -721,7 +750,7 @@
                                LInstruction* instr,
                                SafepointMode safepoint_mode,
                                TargetAddressStorageMode storage_mode) {
-  ASSERT(instr != NULL);
+  DCHECK(instr != NULL);
   // Block literal pool emission to ensure nop indicating no inlined smi code
   // is in the correct position.
   Assembler::BlockConstPoolScope block_const_pool(masm());
@@ -741,7 +770,7 @@
                            int num_arguments,
                            LInstruction* instr,
                            SaveFPRegsMode save_doubles) {
-  ASSERT(instr != NULL);
+  DCHECK(instr != NULL);
 
   __ CallRuntime(function, num_arguments, save_doubles);
 
@@ -812,13 +841,14 @@
 }
 
 
-void LCodeGen::DeoptimizeIf(Condition condition,
-                            LEnvironment* environment,
+void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
+                            const char* detail,
                             Deoptimizer::BailoutType bailout_type) {
+  LEnvironment* environment = instr->environment();
   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
-  ASSERT(environment->HasBeenRegistered());
+  DCHECK(environment->HasBeenRegistered());
   int id = environment->deoptimization_index();
-  ASSERT(info()->IsOptimizing() || info()->IsStub());
+  DCHECK(info()->IsOptimizing() || info()->IsStub());
   Address entry =
       Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
   if (entry == NULL) {
@@ -841,7 +871,7 @@
     __ mov(scratch, Operand(count));
     __ ldr(r1, MemOperand(scratch));
     __ sub(r1, r1, Operand(1), SetCC);
-    __ movw(r1, FLAG_deopt_every_n_times, eq);
+    __ mov(r1, Operand(FLAG_deopt_every_n_times), LeaveCC, eq);
     __ str(r1, MemOperand(scratch));
     __ pop(r1);
 
@@ -865,35 +895,35 @@
     __ stop("trap_on_deopt", condition);
   }
 
-  ASSERT(info()->IsStub() || frame_is_built_);
+  Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
+                             instr->Mnemonic(), detail);
+  DCHECK(info()->IsStub() || frame_is_built_);
   // Go through jump table if we need to handle condition, build frame, or
   // restore caller doubles.
   if (condition == al && frame_is_built_ &&
       !info()->saves_caller_doubles()) {
+    DeoptComment(reason);
     __ Call(entry, RelocInfo::RUNTIME_ENTRY);
   } else {
+    Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
+                                            !frame_is_built_);
     // We often have several deopts to the same entry, reuse the last
     // jump entry if this is the case.
-    if (deopt_jump_table_.is_empty() ||
-        (deopt_jump_table_.last().address != entry) ||
-        (deopt_jump_table_.last().bailout_type != bailout_type) ||
-        (deopt_jump_table_.last().needs_frame != !frame_is_built_)) {
-      Deoptimizer::JumpTableEntry table_entry(entry,
-                                              bailout_type,
-                                              !frame_is_built_);
-      deopt_jump_table_.Add(table_entry, zone());
+    if (jump_table_.is_empty() ||
+        !table_entry.IsEquivalentTo(jump_table_.last())) {
+      jump_table_.Add(table_entry, zone());
     }
-    __ b(condition, &deopt_jump_table_.last().label);
+    __ b(condition, &jump_table_.last().label);
   }
 }
 
 
-void LCodeGen::DeoptimizeIf(Condition condition,
-                            LEnvironment* environment) {
+void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
+                            const char* detail) {
   Deoptimizer::BailoutType bailout_type = info()->IsStub()
       ? Deoptimizer::LAZY
       : Deoptimizer::EAGER;
-  DeoptimizeIf(condition, environment, bailout_type);
+  DeoptimizeIf(condition, instr, detail, bailout_type);
 }
 
 
@@ -952,7 +982,7 @@
 
 
 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
-  ASSERT(deoptimization_literals_.length() == 0);
+  DCHECK(deoptimization_literals_.length() == 0);
 
   const ZoneList<Handle<JSFunction> >* inlined_closures =
       chunk()->inlined_closures();
@@ -972,7 +1002,7 @@
   if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
     RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
   } else {
-    ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+    DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
     RecordSafepointWithRegisters(
         instr->pointer_map(), 0, Safepoint::kLazyDeopt);
   }
@@ -984,7 +1014,7 @@
     Safepoint::Kind kind,
     int arguments,
     Safepoint::DeoptMode deopt_mode) {
-  ASSERT(expected_safepoint_kind_ == kind);
+  DCHECK(expected_safepoint_kind_ == kind);
 
   const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
   Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
@@ -1024,15 +1054,6 @@
 }
 
 
-void LCodeGen::RecordSafepointWithRegistersAndDoubles(
-    LPointerMap* pointers,
-    int arguments,
-    Safepoint::DeoptMode deopt_mode) {
-  RecordSafepoint(
-      pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode);
-}
-
-
 void LCodeGen::RecordAndWritePosition(int position) {
   if (position == RelocInfo::kNoPosition) return;
   masm()->positions_recorder()->RecordPosition(position);
@@ -1086,8 +1107,8 @@
 
 
 void LCodeGen::DoCallStub(LCallStub* instr) {
-  ASSERT(ToRegister(instr->context()).is(cp));
-  ASSERT(ToRegister(instr->result()).is(r0));
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->result()).is(r0));
   switch (instr->hydrogen()->major_key()) {
     case CodeStub::RegExpExec: {
       RegExpExecStub stub(isolate());
@@ -1118,7 +1139,7 @@
 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
   Register dividend = ToRegister(instr->dividend());
   int32_t divisor = instr->divisor();
-  ASSERT(dividend.is(ToRegister(instr->result())));
+  DCHECK(dividend.is(ToRegister(instr->result())));
 
   // Theoretically, a variation of the branch-free code for integer division by
   // a power of 2 (calculating the remainder via an additional multiplication
@@ -1137,7 +1158,7 @@
     __ and_(dividend, dividend, Operand(mask));
     __ rsb(dividend, dividend, Operand::Zero(), SetCC);
     if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
-      DeoptimizeIf(eq, instr->environment());
+      DeoptimizeIf(eq, instr);
     }
     __ b(&done);
   }
@@ -1152,10 +1173,10 @@
   Register dividend = ToRegister(instr->dividend());
   int32_t divisor = instr->divisor();
   Register result = ToRegister(instr->result());
-  ASSERT(!dividend.is(result));
+  DCHECK(!dividend.is(result));
 
   if (divisor == 0) {
-    DeoptimizeIf(al, instr->environment());
+    DeoptimizeIf(al, instr);
     return;
   }
 
@@ -1170,7 +1191,7 @@
     Label remainder_not_zero;
     __ b(ne, &remainder_not_zero);
     __ cmp(dividend, Operand::Zero());
-    DeoptimizeIf(lt, instr->environment());
+    DeoptimizeIf(lt, instr);
     __ bind(&remainder_not_zero);
   }
 }
@@ -1190,7 +1211,7 @@
     // case because we can't return a NaN.
     if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
       __ cmp(right_reg, Operand::Zero());
-      DeoptimizeIf(eq, instr->environment());
+      DeoptimizeIf(eq, instr);
     }
 
     // Check for kMinInt % -1, sdiv will return kMinInt, which is not what we
@@ -1201,7 +1222,7 @@
       __ b(ne, &no_overflow_possible);
       __ cmp(right_reg, Operand(-1));
       if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
-        DeoptimizeIf(eq, instr->environment());
+        DeoptimizeIf(eq, instr);
       } else {
         __ b(ne, &no_overflow_possible);
         __ mov(result_reg, Operand::Zero());
@@ -1222,7 +1243,7 @@
       __ cmp(result_reg, Operand::Zero());
       __ b(ne, &done);
       __ cmp(left_reg, Operand::Zero());
-      DeoptimizeIf(lt, instr->environment());
+      DeoptimizeIf(lt, instr);
     }
     __ bind(&done);
 
@@ -1232,22 +1253,22 @@
     Register right_reg = ToRegister(instr->right());
     Register result_reg = ToRegister(instr->result());
     Register scratch = scratch0();
-    ASSERT(!scratch.is(left_reg));
-    ASSERT(!scratch.is(right_reg));
-    ASSERT(!scratch.is(result_reg));
+    DCHECK(!scratch.is(left_reg));
+    DCHECK(!scratch.is(right_reg));
+    DCHECK(!scratch.is(result_reg));
     DwVfpRegister dividend = ToDoubleRegister(instr->temp());
     DwVfpRegister divisor = ToDoubleRegister(instr->temp2());
-    ASSERT(!divisor.is(dividend));
+    DCHECK(!divisor.is(dividend));
     LowDwVfpRegister quotient = double_scratch0();
-    ASSERT(!quotient.is(dividend));
-    ASSERT(!quotient.is(divisor));
+    DCHECK(!quotient.is(dividend));
+    DCHECK(!quotient.is(divisor));
 
     Label done;
     // Check for x % 0, we have to deopt in this case because we can't return a
     // NaN.
     if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
       __ cmp(right_reg, Operand::Zero());
-      DeoptimizeIf(eq, instr->environment());
+      DeoptimizeIf(eq, instr);
     }
 
     __ Move(result_reg, left_reg);
@@ -1277,7 +1298,7 @@
     if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
       __ b(ne, &done);
       __ cmp(left_reg, Operand::Zero());
-      DeoptimizeIf(mi, instr->environment());
+      DeoptimizeIf(mi, instr);
     }
     __ bind(&done);
   }
@@ -1288,26 +1309,26 @@
   Register dividend = ToRegister(instr->dividend());
   int32_t divisor = instr->divisor();
   Register result = ToRegister(instr->result());
-  ASSERT(divisor == kMinInt || IsPowerOf2(Abs(divisor)));
-  ASSERT(!result.is(dividend));
+  DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
+  DCHECK(!result.is(dividend));
 
   // Check for (0 / -x) that will produce negative zero.
   HDiv* hdiv = instr->hydrogen();
   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
     __ cmp(dividend, Operand::Zero());
-    DeoptimizeIf(eq, instr->environment());
+    DeoptimizeIf(eq, instr);
   }
   // Check for (kMinInt / -1).
   if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
     __ cmp(dividend, Operand(kMinInt));
-    DeoptimizeIf(eq, instr->environment());
+    DeoptimizeIf(eq, instr);
   }
   // Deoptimize if remainder will not be 0.
   if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
       divisor != 1 && divisor != -1) {
     int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
     __ tst(dividend, Operand(mask));
-    DeoptimizeIf(ne, instr->environment());
+    DeoptimizeIf(ne, instr);
   }
 
   if (divisor == -1) {  // Nice shortcut, not needed for correctness.
@@ -1332,10 +1353,10 @@
   Register dividend = ToRegister(instr->dividend());
   int32_t divisor = instr->divisor();
   Register result = ToRegister(instr->result());
-  ASSERT(!dividend.is(result));
+  DCHECK(!dividend.is(result));
 
   if (divisor == 0) {
-    DeoptimizeIf(al, instr->environment());
+    DeoptimizeIf(al, instr);
     return;
   }
 
@@ -1343,7 +1364,7 @@
   HDiv* hdiv = instr->hydrogen();
   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
     __ cmp(dividend, Operand::Zero());
-    DeoptimizeIf(eq, instr->environment());
+    DeoptimizeIf(eq, instr);
   }
 
   __ TruncatingDiv(result, dividend, Abs(divisor));
@@ -1353,7 +1374,7 @@
     __ mov(ip, Operand(divisor));
     __ smull(scratch0(), ip, result, ip);
     __ sub(scratch0(), scratch0(), dividend, SetCC);
-    DeoptimizeIf(ne, instr->environment());
+    DeoptimizeIf(ne, instr);
   }
 }
 
@@ -1368,7 +1389,7 @@
   // Check for x / 0.
   if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
     __ cmp(divisor, Operand::Zero());
-    DeoptimizeIf(eq, instr->environment());
+    DeoptimizeIf(eq, instr);
   }
 
   // Check for (0 / -x) that will produce negative zero.
@@ -1380,7 +1401,7 @@
     }
     __ b(pl, &positive);
     __ cmp(dividend, Operand::Zero());
-    DeoptimizeIf(eq, instr->environment());
+    DeoptimizeIf(eq, instr);
     __ bind(&positive);
   }
 
@@ -1392,7 +1413,7 @@
     // support because, on ARM, sdiv kMinInt, -1 -> kMinInt.
     __ cmp(dividend, Operand(kMinInt));
     __ cmp(divisor, Operand(-1), eq);
-    DeoptimizeIf(eq, instr->environment());
+    DeoptimizeIf(eq, instr);
   }
 
   if (CpuFeatures::IsSupported(SUDIV)) {
@@ -1415,7 +1436,7 @@
     Register remainder = scratch0();
     __ Mls(remainder, result, divisor, dividend);
     __ cmp(remainder, Operand::Zero());
-    DeoptimizeIf(ne, instr->environment());
+    DeoptimizeIf(ne, instr);
   }
 }
 
@@ -1426,7 +1447,7 @@
   DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand());
 
   // This is computed in-place.
-  ASSERT(addend.is(ToDoubleRegister(instr->result())));
+  DCHECK(addend.is(ToDoubleRegister(instr->result())));
 
   __ vmla(addend, multiplier, multiplicand);
 }
@@ -1438,7 +1459,7 @@
   DwVfpRegister multiplicand = ToDoubleRegister(instr->multiplicand());
 
   // This is computed in-place.
-  ASSERT(minuend.is(ToDoubleRegister(instr->result())));
+  DCHECK(minuend.is(ToDoubleRegister(instr->result())));
 
   __ vmls(minuend, multiplier, multiplicand);
 }
@@ -1466,13 +1487,13 @@
   // If the divisor is negative, we have to negate and handle edge cases.
   __ rsb(result, dividend, Operand::Zero(), SetCC);
   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
-    DeoptimizeIf(eq, instr->environment());
+    DeoptimizeIf(eq, instr);
   }
 
   // Dividing by -1 is basically negation, unless we overflow.
   if (divisor == -1) {
     if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
-      DeoptimizeIf(vs, instr->environment());
+      DeoptimizeIf(vs, instr);
     }
     return;
   }
@@ -1492,10 +1513,10 @@
   Register dividend = ToRegister(instr->dividend());
   int32_t divisor = instr->divisor();
   Register result = ToRegister(instr->result());
-  ASSERT(!dividend.is(result));
+  DCHECK(!dividend.is(result));
 
   if (divisor == 0) {
-    DeoptimizeIf(al, instr->environment());
+    DeoptimizeIf(al, instr);
     return;
   }
 
@@ -1503,7 +1524,7 @@
   HMathFloorOfDiv* hdiv = instr->hydrogen();
   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
     __ cmp(dividend, Operand::Zero());
-    DeoptimizeIf(eq, instr->environment());
+    DeoptimizeIf(eq, instr);
   }
 
   // Easy case: We need no dynamic check for the dividend and the flooring
@@ -1518,7 +1539,7 @@
   // In the general case we may need to adjust before and after the truncating
   // division to get a flooring division.
   Register temp = ToRegister(instr->temp());
-  ASSERT(!temp.is(dividend) && !temp.is(result));
+  DCHECK(!temp.is(dividend) && !temp.is(result));
   Label needs_adjustment, done;
   __ cmp(dividend, Operand::Zero());
   __ b(divisor > 0 ? lt : gt, &needs_adjustment);
@@ -1544,7 +1565,7 @@
   // Check for x / 0.
   if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
     __ cmp(right, Operand::Zero());
-    DeoptimizeIf(eq, instr->environment());
+    DeoptimizeIf(eq, instr);
   }
 
   // Check for (0 / -x) that will produce negative zero.
@@ -1556,7 +1577,7 @@
     }
     __ b(pl, &positive);
     __ cmp(left, Operand::Zero());
-    DeoptimizeIf(eq, instr->environment());
+    DeoptimizeIf(eq, instr);
     __ bind(&positive);
   }
 
@@ -1568,7 +1589,7 @@
     // support because, on ARM, sdiv kMinInt, -1 -> kMinInt.
     __ cmp(left, Operand(kMinInt));
     __ cmp(right, Operand(-1), eq);
-    DeoptimizeIf(eq, instr->environment());
+    DeoptimizeIf(eq, instr);
   }
 
   if (CpuFeatures::IsSupported(SUDIV)) {
@@ -1614,14 +1635,14 @@
       // The case of a null constant will be handled separately.
       // If constant is negative and left is null, the result should be -0.
       __ cmp(left, Operand::Zero());
-      DeoptimizeIf(eq, instr->environment());
+      DeoptimizeIf(eq, instr);
     }
 
     switch (constant) {
       case -1:
         if (overflow) {
           __ rsb(result, left, Operand::Zero(), SetCC);
-          DeoptimizeIf(vs, instr->environment());
+          DeoptimizeIf(vs, instr);
         } else {
           __ rsb(result, left, Operand::Zero());
         }
@@ -1631,7 +1652,7 @@
           // If left is strictly negative and the constant is null, the
           // result is -0. Deoptimize if required, otherwise return 0.
           __ cmp(left, Operand::Zero());
-          DeoptimizeIf(mi, instr->environment());
+          DeoptimizeIf(mi, instr);
         }
         __ mov(result, Operand::Zero());
         break;
@@ -1645,17 +1666,17 @@
         int32_t mask = constant >> 31;
         uint32_t constant_abs = (constant + mask) ^ mask;
 
-        if (IsPowerOf2(constant_abs)) {
+        if (base::bits::IsPowerOfTwo32(constant_abs)) {
           int32_t shift = WhichPowerOf2(constant_abs);
           __ mov(result, Operand(left, LSL, shift));
           // Correct the sign of the result is the constant is negative.
           if (constant < 0)  __ rsb(result, result, Operand::Zero());
-        } else if (IsPowerOf2(constant_abs - 1)) {
+        } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
           int32_t shift = WhichPowerOf2(constant_abs - 1);
           __ add(result, left, Operand(left, LSL, shift));
           // Correct the sign of the result is the constant is negative.
           if (constant < 0)  __ rsb(result, result, Operand::Zero());
-        } else if (IsPowerOf2(constant_abs + 1)) {
+        } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
           int32_t shift = WhichPowerOf2(constant_abs + 1);
           __ rsb(result, left, Operand(left, LSL, shift));
           // Correct the sign of the result is the constant is negative.
@@ -1668,7 +1689,7 @@
     }
 
   } else {
-    ASSERT(right_op->IsRegister());
+    DCHECK(right_op->IsRegister());
     Register right = ToRegister(right_op);
 
     if (overflow) {
@@ -1681,7 +1702,7 @@
         __ smull(result, scratch, left, right);
       }
       __ cmp(scratch, Operand(result, ASR, 31));
-      DeoptimizeIf(ne, instr->environment());
+      DeoptimizeIf(ne, instr);
     } else {
       if (instr->hydrogen()->representation().IsSmi()) {
         __ SmiUntag(result, left);
@@ -1697,7 +1718,7 @@
       __ b(pl, &done);
       // Bail out if the result is minus zero.
       __ cmp(result, Operand::Zero());
-      DeoptimizeIf(eq, instr->environment());
+      DeoptimizeIf(eq, instr);
       __ bind(&done);
     }
   }
@@ -1707,7 +1728,7 @@
 void LCodeGen::DoBitI(LBitI* instr) {
   LOperand* left_op = instr->left();
   LOperand* right_op = instr->right();
-  ASSERT(left_op->IsRegister());
+  DCHECK(left_op->IsRegister());
   Register left = ToRegister(left_op);
   Register result = ToRegister(instr->result());
   Operand right(no_reg);
@@ -1715,7 +1736,7 @@
   if (right_op->IsStackSlot()) {
     right = Operand(EmitLoadRegister(right_op, ip));
   } else {
-    ASSERT(right_op->IsRegister() || right_op->IsConstantOperand());
+    DCHECK(right_op->IsRegister() || right_op->IsConstantOperand());
     right = ToOperand(right_op);
   }
 
@@ -1760,7 +1781,7 @@
       case Token::SHR:
         if (instr->can_deopt()) {
           __ mov(result, Operand(left, LSR, scratch), SetCC);
-          DeoptimizeIf(mi, instr->environment());
+          DeoptimizeIf(mi, instr);
         } else {
           __ mov(result, Operand(left, LSR, scratch));
         }
@@ -1797,7 +1818,7 @@
         } else {
           if (instr->can_deopt()) {
             __ tst(left, Operand(0x80000000));
-            DeoptimizeIf(ne, instr->environment());
+            DeoptimizeIf(ne, instr);
           }
           __ Move(result, left);
         }
@@ -1812,7 +1833,7 @@
             } else {
               __ SmiTag(result, left, SetCC);
             }
-            DeoptimizeIf(vs, instr->environment());
+            DeoptimizeIf(vs, instr);
           } else {
             __ mov(result, Operand(left, LSL, shift_count));
           }
@@ -1839,12 +1860,12 @@
     Register right_reg = EmitLoadRegister(right, ip);
     __ sub(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
   } else {
-    ASSERT(right->IsRegister() || right->IsConstantOperand());
+    DCHECK(right->IsRegister() || right->IsConstantOperand());
     __ sub(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
   }
 
   if (can_overflow) {
-    DeoptimizeIf(vs, instr->environment());
+    DeoptimizeIf(vs, instr);
   }
 }
 
@@ -1860,12 +1881,12 @@
     Register right_reg = EmitLoadRegister(right, ip);
     __ rsb(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
   } else {
-    ASSERT(right->IsRegister() || right->IsConstantOperand());
+    DCHECK(right->IsRegister() || right->IsConstantOperand());
     __ rsb(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
   }
 
   if (can_overflow) {
-    DeoptimizeIf(vs, instr->environment());
+    DeoptimizeIf(vs, instr);
   }
 }
 
@@ -1881,7 +1902,7 @@
 
 
 void LCodeGen::DoConstantD(LConstantD* instr) {
-  ASSERT(instr->result()->IsDoubleRegister());
+  DCHECK(instr->result()->IsDoubleRegister());
   DwVfpRegister result = ToDoubleRegister(instr->result());
   double v = instr->value();
   __ Vmov(result, v, scratch0());
@@ -1913,15 +1934,15 @@
   Register scratch = ToRegister(instr->temp());
   Smi* index = instr->index();
   Label runtime, done;
-  ASSERT(object.is(result));
-  ASSERT(object.is(r0));
-  ASSERT(!scratch.is(scratch0()));
-  ASSERT(!scratch.is(object));
+  DCHECK(object.is(result));
+  DCHECK(object.is(r0));
+  DCHECK(!scratch.is(scratch0()));
+  DCHECK(!scratch.is(object));
 
   __ SmiTst(object);
-  DeoptimizeIf(eq, instr->environment());
+  DeoptimizeIf(eq, instr);
   __ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE);
-  DeoptimizeIf(ne, instr->environment());
+  DeoptimizeIf(ne, instr);
 
   if (index->value() == 0) {
     __ ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
@@ -1958,8 +1979,8 @@
     return FieldMemOperand(string, SeqString::kHeaderSize + offset);
   }
   Register scratch = scratch0();
-  ASSERT(!scratch.is(string));
-  ASSERT(!scratch.is(ToRegister(index)));
+  DCHECK(!scratch.is(string));
+  DCHECK(!scratch.is(ToRegister(index)));
   if (encoding == String::ONE_BYTE_ENCODING) {
     __ add(scratch, string, Operand(ToRegister(index)));
   } else {
@@ -2033,12 +2054,12 @@
     Register right_reg = EmitLoadRegister(right, ip);
     __ add(ToRegister(result), ToRegister(left), Operand(right_reg), set_cond);
   } else {
-    ASSERT(right->IsRegister() || right->IsConstantOperand());
+    DCHECK(right->IsRegister() || right->IsConstantOperand());
     __ add(ToRegister(result), ToRegister(left), ToOperand(right), set_cond);
   }
 
   if (can_overflow) {
-    DeoptimizeIf(vs, instr->environment());
+    DeoptimizeIf(vs, instr);
   }
 }
 
@@ -2058,7 +2079,7 @@
     __ Move(result_reg, left_reg, condition);
     __ mov(result_reg, right_op, LeaveCC, NegateCondition(condition));
   } else {
-    ASSERT(instr->hydrogen()->representation().IsDouble());
+    DCHECK(instr->hydrogen()->representation().IsDouble());
     DwVfpRegister left_reg = ToDoubleRegister(left);
     DwVfpRegister right_reg = ToDoubleRegister(right);
     DwVfpRegister result_reg = ToDoubleRegister(instr->result());
@@ -2145,16 +2166,17 @@
 
 
 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
-  ASSERT(ToRegister(instr->context()).is(cp));
-  ASSERT(ToRegister(instr->left()).is(r1));
-  ASSERT(ToRegister(instr->right()).is(r0));
-  ASSERT(ToRegister(instr->result()).is(r0));
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->left()).is(r1));
+  DCHECK(ToRegister(instr->right()).is(r0));
+  DCHECK(ToRegister(instr->result()).is(r0));
 
-  BinaryOpICStub stub(isolate(), instr->op(), NO_OVERWRITE);
+  Handle<Code> code =
+      CodeFactory::BinaryOpIC(isolate(), instr->op(), NO_OVERWRITE).code();
   // Block literal pool emission to ensure nop indicating no inlined smi code
   // is in the correct position.
   Assembler::BlockConstPoolScope block_const_pool(masm());
-  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  CallCode(code, RelocInfo::CODE_TARGET, instr);
 }
 
 
@@ -2193,34 +2215,34 @@
 void LCodeGen::DoBranch(LBranch* instr) {
   Representation r = instr->hydrogen()->value()->representation();
   if (r.IsInteger32() || r.IsSmi()) {
-    ASSERT(!info()->IsStub());
+    DCHECK(!info()->IsStub());
     Register reg = ToRegister(instr->value());
     __ cmp(reg, Operand::Zero());
     EmitBranch(instr, ne);
   } else if (r.IsDouble()) {
-    ASSERT(!info()->IsStub());
+    DCHECK(!info()->IsStub());
     DwVfpRegister reg = ToDoubleRegister(instr->value());
     // Test the double value. Zero and NaN are false.
     __ VFPCompareAndSetFlags(reg, 0.0);
     __ cmp(r0, r0, vs);  // If NaN, set the Z flag. (NaN -> false)
     EmitBranch(instr, ne);
   } else {
-    ASSERT(r.IsTagged());
+    DCHECK(r.IsTagged());
     Register reg = ToRegister(instr->value());
     HType type = instr->hydrogen()->value()->type();
     if (type.IsBoolean()) {
-      ASSERT(!info()->IsStub());
+      DCHECK(!info()->IsStub());
       __ CompareRoot(reg, Heap::kTrueValueRootIndex);
       EmitBranch(instr, eq);
     } else if (type.IsSmi()) {
-      ASSERT(!info()->IsStub());
+      DCHECK(!info()->IsStub());
       __ cmp(reg, Operand::Zero());
       EmitBranch(instr, ne);
     } else if (type.IsJSArray()) {
-      ASSERT(!info()->IsStub());
+      DCHECK(!info()->IsStub());
       EmitBranch(instr, al);
     } else if (type.IsHeapNumber()) {
-      ASSERT(!info()->IsStub());
+      DCHECK(!info()->IsStub());
       DwVfpRegister dbl_scratch = double_scratch0();
       __ vldr(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
       // Test the double value. Zero and NaN are false.
@@ -2228,7 +2250,7 @@
       __ cmp(r0, r0, vs);  // If NaN, set the Z flag. (NaN)
       EmitBranch(instr, ne);
     } else if (type.IsString()) {
-      ASSERT(!info()->IsStub());
+      DCHECK(!info()->IsStub());
       __ ldr(ip, FieldMemOperand(reg, String::kLengthOffset));
       __ cmp(ip, Operand::Zero());
       EmitBranch(instr, ne);
@@ -2263,7 +2285,7 @@
       } else if (expected.NeedsMap()) {
         // If we need a map later and have a Smi -> deopt.
         __ SmiTst(reg);
-        DeoptimizeIf(eq, instr->environment());
+        DeoptimizeIf(eq, instr);
       }
 
       const Register map = scratch0();
@@ -2319,7 +2341,7 @@
       if (!expected.IsGeneric()) {
         // We've seen something for the first time -> deopt.
         // This can only happen if we are not generic already.
-        DeoptimizeIf(al, instr->environment());
+        DeoptimizeIf(al, instr);
       }
     }
   }
@@ -2450,7 +2472,7 @@
 
 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
   Representation rep = instr->hydrogen()->value()->representation();
-  ASSERT(!rep.IsInteger32());
+  DCHECK(!rep.IsInteger32());
   Register scratch = ToRegister(instr->temp());
 
   if (rep.IsDouble()) {
@@ -2583,10 +2605,10 @@
 
 
 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
-  ASSERT(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->context()).is(cp));
   Token::Value op = instr->op();
 
-  Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+  Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   // This instruction also signals no smi code inlined.
   __ cmp(r0, Operand::Zero());
@@ -2601,7 +2623,7 @@
   InstanceType from = instr->from();
   InstanceType to = instr->to();
   if (from == FIRST_TYPE) return to;
-  ASSERT(from == to || to == LAST_TYPE);
+  DCHECK(from == to || to == LAST_TYPE);
   return from;
 }
 
@@ -2661,13 +2683,13 @@
                                Register input,
                                Register temp,
                                Register temp2) {
-  ASSERT(!input.is(temp));
-  ASSERT(!input.is(temp2));
-  ASSERT(!temp.is(temp2));
+  DCHECK(!input.is(temp));
+  DCHECK(!input.is(temp2));
+  DCHECK(!temp.is(temp2));
 
   __ JumpIfSmi(input, is_false);
 
-  if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
+  if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
     // Assuming the following assertions, we can use the same compares to test
     // for both being a function type and being in the object type range.
     STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
@@ -2698,7 +2720,7 @@
 
   // Objects with a non-function constructor have class 'Object'.
   __ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE);
-  if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
+  if (class_name->IsOneByteEqualTo(STATIC_CHAR_VECTOR("Object"))) {
     __ b(ne, is_true);
   } else {
     __ b(ne, is_false);
@@ -2744,9 +2766,9 @@
 
 
 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
-  ASSERT(ToRegister(instr->context()).is(cp));
-  ASSERT(ToRegister(instr->left()).is(r0));  // Object is in r0.
-  ASSERT(ToRegister(instr->right()).is(r1));  // Function is in r1.
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->left()).is(r0));  // Object is in r0.
+  DCHECK(ToRegister(instr->right()).is(r1));  // Function is in r1.
 
   InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
@@ -2758,19 +2780,23 @@
 
 
 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
-  class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode {
+  class DeferredInstanceOfKnownGlobal FINAL : public LDeferredCode {
    public:
     DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
                                   LInstanceOfKnownGlobal* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() V8_OVERRIDE {
-      codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
+    virtual void Generate() OVERRIDE {
+      codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_,
+                                                 &load_bool_);
     }
-    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
     Label* map_check() { return &map_check_; }
+    Label* load_bool() { return &load_bool_; }
+
    private:
     LInstanceOfKnownGlobal* instr_;
     Label map_check_;
+    Label load_bool_;
   };
 
   DeferredInstanceOfKnownGlobal* deferred;
@@ -2798,12 +2824,12 @@
     // We use Factory::the_hole_value() on purpose instead of loading from the
     // root array to force relocation to be able to later patch with
     // the cached map.
-    PredictableCodeSizeScope predictable(masm_, 5 * Assembler::kInstrSize);
     Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
     __ mov(ip, Operand(Handle<Object>(cell)));
     __ ldr(ip, FieldMemOperand(ip, PropertyCell::kValueOffset));
     __ cmp(map, Operand(ip));
     __ b(ne, &cache_miss);
+    __ bind(deferred->load_bool());  // Label for calculating code patching.
     // We use Factory::the_hole_value() on purpose instead of loading from the
     // root array to force relocation to be able to later patch
     // with true or false.
@@ -2837,7 +2863,8 @@
 
 
 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
-                                               Label* map_check) {
+                                               Label* map_check,
+                                               Label* bool_load) {
   InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
   flags = static_cast<InstanceofStub::Flags>(
       flags | InstanceofStub::kArgsInRegisters);
@@ -2847,25 +2874,39 @@
       flags | InstanceofStub::kReturnTrueFalseObject);
   InstanceofStub stub(isolate(), flags);
 
-  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+  PushSafepointRegistersScope scope(this);
   LoadContextFromDeferred(instr->context());
 
   __ Move(InstanceofStub::right(), instr->function());
-  static const int kAdditionalDelta = 4;
+
+  int call_size = CallCodeSize(stub.GetCode(), RelocInfo::CODE_TARGET);
+  int additional_delta = (call_size / Assembler::kInstrSize) + 4;
   // Make sure that code size is predicable, since we use specific constants
   // offsets in the code to find embedded values..
-  PredictableCodeSizeScope predictable(masm_, 5 * Assembler::kInstrSize);
-  int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
-  Label before_push_delta;
-  __ bind(&before_push_delta);
-  __ BlockConstPoolFor(kAdditionalDelta);
-  // r5 is used to communicate the offset to the location of the map check.
-  __ mov(r5, Operand(delta * kPointerSize));
-  // The mov above can generate one or two instructions. The delta was computed
-  // for two instructions, so we need to pad here in case of one instruction.
-  if (masm_->InstructionsGeneratedSince(&before_push_delta) != 2) {
-    ASSERT_EQ(1, masm_->InstructionsGeneratedSince(&before_push_delta));
-    __ nop();
+  PredictableCodeSizeScope predictable(
+      masm_, (additional_delta + 1) * Assembler::kInstrSize);
+  // Make sure we don't emit any additional entries in the constant pool before
+  // the call to ensure that the CallCodeSize() calculated the correct number of
+  // instructions for the constant pool load.
+  {
+    ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
+    int map_check_delta =
+        masm_->InstructionsGeneratedSince(map_check) + additional_delta;
+    int bool_load_delta =
+        masm_->InstructionsGeneratedSince(bool_load) + additional_delta;
+    Label before_push_delta;
+    __ bind(&before_push_delta);
+    __ BlockConstPoolFor(additional_delta);
+    // r5 is used to communicate the offset to the location of the map check.
+    __ mov(r5, Operand(map_check_delta * kPointerSize));
+    // r6 is used to communicate the offset to the location of the bool load.
+    __ mov(r6, Operand(bool_load_delta * kPointerSize));
+    // The mov above can generate one or two instructions. The delta was
+    // computed for two instructions, so we need to pad here in case of one
+    // instruction.
+    while (masm_->InstructionsGeneratedSince(&before_push_delta) != 4) {
+      __ nop();
+    }
   }
   CallCodeGeneric(stub.GetCode(),
                   RelocInfo::CODE_TARGET,
@@ -2880,10 +2921,10 @@
 
 
 void LCodeGen::DoCmpT(LCmpT* instr) {
-  ASSERT(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->context()).is(cp));
   Token::Value op = instr->op();
 
-  Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+  Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   // This instruction also signals no smi code inlined.
   __ cmp(r0, Operand::Zero());
@@ -2915,23 +2956,25 @@
   if (NeedsEagerFrame()) {
     no_frame_start = masm_->LeaveFrame(StackFrame::JAVA_SCRIPT);
   }
-  if (instr->has_constant_parameter_count()) {
-    int parameter_count = ToInteger32(instr->constant_parameter_count());
-    int32_t sp_delta = (parameter_count + 1) * kPointerSize;
-    if (sp_delta != 0) {
-      __ add(sp, sp, Operand(sp_delta));
+  { ConstantPoolUnavailableScope constant_pool_unavailable(masm());
+    if (instr->has_constant_parameter_count()) {
+      int parameter_count = ToInteger32(instr->constant_parameter_count());
+      int32_t sp_delta = (parameter_count + 1) * kPointerSize;
+      if (sp_delta != 0) {
+        __ add(sp, sp, Operand(sp_delta));
+      }
+    } else {
+      Register reg = ToRegister(instr->parameter_count());
+      // The argument count parameter is a smi
+      __ SmiUntag(reg);
+      __ add(sp, sp, Operand(reg, LSL, kPointerSizeLog2));
     }
-  } else {
-    Register reg = ToRegister(instr->parameter_count());
-    // The argument count parameter is a smi
-    __ SmiUntag(reg);
-    __ add(sp, sp, Operand(reg, LSL, kPointerSizeLog2));
-  }
 
-  __ Jump(lr);
+    __ Jump(lr);
 
-  if (no_frame_start != -1) {
-    info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
+    if (no_frame_start != -1) {
+      info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
+    }
   }
 }
 
@@ -2943,19 +2986,36 @@
   if (instr->hydrogen()->RequiresHoleCheck()) {
     __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
     __ cmp(result, ip);
-    DeoptimizeIf(eq, instr->environment());
+    DeoptimizeIf(eq, instr);
   }
 }
 
 
-void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
-  ASSERT(ToRegister(instr->context()).is(cp));
-  ASSERT(ToRegister(instr->global_object()).is(r0));
-  ASSERT(ToRegister(instr->result()).is(r0));
+template <class T>
+void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
+  DCHECK(FLAG_vector_ics);
+  Register vector = ToRegister(instr->temp_vector());
+  DCHECK(vector.is(VectorLoadICDescriptor::VectorRegister()));
+  __ Move(vector, instr->hydrogen()->feedback_vector());
+  // No need to allocate this register.
+  DCHECK(VectorLoadICDescriptor::SlotRegister().is(r0));
+  __ mov(VectorLoadICDescriptor::SlotRegister(),
+         Operand(Smi::FromInt(instr->hydrogen()->slot())));
+}
 
-  __ mov(r2, Operand(instr->name()));
+
+void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->global_object())
+             .is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->result()).is(r0));
+
+  __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
+  if (FLAG_vector_ics) {
+    EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
+  }
   ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
-  Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode);
+  Handle<Code> ic = CodeFactory::LoadIC(isolate(), mode).code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -2976,7 +3036,7 @@
     Register payload = ToRegister(instr->temp());
     __ ldr(payload, FieldMemOperand(cell, Cell::kValueOffset));
     __ CompareRoot(payload, Heap::kTheHoleValueRootIndex);
-    DeoptimizeIf(eq, instr->environment());
+    DeoptimizeIf(eq, instr);
   }
 
   // Store the value.
@@ -2993,7 +3053,7 @@
     __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
     __ cmp(result, ip);
     if (instr->hydrogen()->DeoptimizesOnHole()) {
-      DeoptimizeIf(eq, instr->environment());
+      DeoptimizeIf(eq, instr);
     } else {
       __ mov(result, Operand(factory()->undefined_value()), LeaveCC, eq);
     }
@@ -3014,7 +3074,7 @@
     __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
     __ cmp(scratch, ip);
     if (instr->hydrogen()->DeoptimizesOnHole()) {
-      DeoptimizeIf(eq, instr->environment());
+      DeoptimizeIf(eq, instr);
     } else {
       __ b(ne, &skip_assignment);
     }
@@ -3068,13 +3128,16 @@
 
 
 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
-  ASSERT(ToRegister(instr->context()).is(cp));
-  ASSERT(ToRegister(instr->object()).is(r0));
-  ASSERT(ToRegister(instr->result()).is(r0));
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->result()).is(r0));
 
   // Name is always in r2.
-  __ mov(r2, Operand(instr->name()));
-  Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
+  __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
+  if (FLAG_vector_ics) {
+    EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
+  }
+  Handle<Code> ic = CodeFactory::LoadIC(isolate(), NOT_CONTEXTUAL).code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
 }
 
@@ -3084,17 +3147,6 @@
   Register function = ToRegister(instr->function());
   Register result = ToRegister(instr->result());
 
-  // Check that the function really is a function. Load map into the
-  // result register.
-  __ CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
-  DeoptimizeIf(ne, instr->environment());
-
-  // Make sure that the function has an instance prototype.
-  Label non_instance;
-  __ ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
-  __ tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
-  __ b(ne, &non_instance);
-
   // Get the prototype or initial map from the function.
   __ ldr(result,
          FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
@@ -3102,7 +3154,7 @@
   // Check that the function has a prototype or an initial map.
   __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
   __ cmp(result, ip);
-  DeoptimizeIf(eq, instr->environment());
+  DeoptimizeIf(eq, instr);
 
   // If the function does not have an initial map, we're done.
   Label done;
@@ -3111,12 +3163,6 @@
 
   // Get the prototype from the initial map.
   __ ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
-  __ jmp(&done);
-
-  // Non-instance prototype: Fetch prototype from constructor field
-  // in initial map.
-  __ bind(&non_instance);
-  __ ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
 
   // All done.
   __ bind(&done);
@@ -3234,7 +3280,7 @@
         __ ldr(result, mem_operand);
         if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
           __ cmp(result, Operand(0x80000000));
-          DeoptimizeIf(cs, instr->environment());
+          DeoptimizeIf(cs, instr);
         }
         break;
       case FLOAT32_ELEMENTS:
@@ -3287,7 +3333,7 @@
   if (instr->hydrogen()->RequiresHoleCheck()) {
     __ ldr(scratch, MemOperand(scratch, sizeof(kHoleNanLower32)));
     __ cmp(scratch, Operand(kHoleNanUpper32));
-    DeoptimizeIf(eq, instr->environment());
+    DeoptimizeIf(eq, instr);
   }
 }
 
@@ -3321,11 +3367,11 @@
   if (instr->hydrogen()->RequiresHoleCheck()) {
     if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
       __ SmiTst(result);
-      DeoptimizeIf(ne, instr->environment());
+      DeoptimizeIf(ne, instr);
     } else {
       __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
       __ cmp(result, scratch);
-      DeoptimizeIf(eq, instr->environment());
+      DeoptimizeIf(eq, instr);
     }
   }
 }
@@ -3357,7 +3403,7 @@
     if (shift_size >= 0) {
       return MemOperand(base, key, LSL, shift_size);
     } else {
-      ASSERT_EQ(-1, shift_size);
+      DCHECK_EQ(-1, shift_size);
       return MemOperand(base, key, LSR, 1);
     }
   }
@@ -3366,7 +3412,7 @@
     __ add(scratch0(), base, Operand(key, LSL, shift_size));
     return MemOperand(scratch0(), base_offset);
   } else {
-    ASSERT_EQ(-1, shift_size);
+    DCHECK_EQ(-1, shift_size);
     __ add(scratch0(), base, Operand(key, ASR, 1));
     return MemOperand(scratch0(), base_offset);
   }
@@ -3374,11 +3420,15 @@
 
 
 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
-  ASSERT(ToRegister(instr->context()).is(cp));
-  ASSERT(ToRegister(instr->object()).is(r1));
-  ASSERT(ToRegister(instr->key()).is(r0));
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
 
-  Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+  if (FLAG_vector_ics) {
+    EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
+  }
+
+  Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
 }
 
@@ -3463,17 +3513,16 @@
 
   // Deoptimize if the receiver is not a JS object.
   __ SmiTst(receiver);
-  DeoptimizeIf(eq, instr->environment());
+  DeoptimizeIf(eq, instr);
   __ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE);
-  DeoptimizeIf(lt, instr->environment());
+  DeoptimizeIf(lt, instr);
 
   __ b(&result_in_receiver);
   __ bind(&global_object);
   __ ldr(result, FieldMemOperand(function, JSFunction::kContextOffset));
   __ ldr(result,
          ContextOperand(result, Context::GLOBAL_OBJECT_INDEX));
-  __ ldr(result,
-         FieldMemOperand(result, GlobalObject::kGlobalReceiverOffset));
+  __ ldr(result, FieldMemOperand(result, GlobalObject::kGlobalProxyOffset));
 
   if (result.is(receiver)) {
     __ bind(&result_in_receiver);
@@ -3493,15 +3542,15 @@
   Register length = ToRegister(instr->length());
   Register elements = ToRegister(instr->elements());
   Register scratch = scratch0();
-  ASSERT(receiver.is(r0));  // Used for parameter count.
-  ASSERT(function.is(r1));  // Required by InvokeFunction.
-  ASSERT(ToRegister(instr->result()).is(r0));
+  DCHECK(receiver.is(r0));  // Used for parameter count.
+  DCHECK(function.is(r1));  // Required by InvokeFunction.
+  DCHECK(ToRegister(instr->result()).is(r0));
 
   // Copy the arguments to this function possibly from the
   // adaptor frame below it.
   const uint32_t kArgumentsLimit = 1 * KB;
   __ cmp(length, Operand(kArgumentsLimit));
-  DeoptimizeIf(hi, instr->environment());
+  DeoptimizeIf(hi, instr);
 
   // Push the receiver and use the register to keep the original
   // number of arguments.
@@ -3523,7 +3572,7 @@
   __ b(ne, &loop);
 
   __ bind(&invoke);
-  ASSERT(instr->HasPointerMap());
+  DCHECK(instr->HasPointerMap());
   LPointerMap* pointers = instr->pointer_map();
   SafepointGenerator safepoint_generator(
       this, pointers, Safepoint::kLazyDeopt);
@@ -3563,19 +3612,19 @@
     __ ldr(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
   } else {
     // If there is no frame, the context must be in cp.
-    ASSERT(result.is(cp));
+    DCHECK(result.is(cp));
   }
 }
 
 
 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
-  ASSERT(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->context()).is(cp));
   __ push(cp);  // The context is the first argument.
   __ Move(scratch0(), instr->hydrogen()->pairs());
   __ push(scratch0());
   __ mov(scratch0(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
   __ push(scratch0());
-  CallRuntime(Runtime::kHiddenDeclareGlobals, 3, instr);
+  CallRuntime(Runtime::kDeclareGlobals, 3, instr);
 }
 
 
@@ -3621,8 +3670,8 @@
 
 
 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
-  ASSERT(instr->context() != NULL);
-  ASSERT(ToRegister(instr->context()).is(cp));
+  DCHECK(instr->context() != NULL);
+  DCHECK(ToRegister(instr->context()).is(cp));
   Register input = ToRegister(instr->value());
   Register result = ToRegister(instr->result());
   Register scratch = scratch0();
@@ -3631,7 +3680,7 @@
   __ ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
   __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
   __ cmp(scratch, Operand(ip));
-  DeoptimizeIf(ne, instr->environment());
+  DeoptimizeIf(ne, instr);
 
   Label done;
   Register exponent = scratch0();
@@ -3647,7 +3696,7 @@
   // Input is negative. Reverse its sign.
   // Preserve the value of all registers.
   {
-    PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+    PushSafepointRegistersScope scope(this);
 
     // Registers were saved at the safepoint, so we can use
     // many scratch registers.
@@ -3666,7 +3715,7 @@
     // Slow case: Call the runtime system to do the number allocation.
     __ bind(&slow);
 
-    CallRuntimeFromDeferred(Runtime::kHiddenAllocateHeapNumber, 0, instr,
+    CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
                             instr->context());
     // Set the pointer to the new heap number in tmp.
     if (!tmp1.is(r0)) __ mov(tmp1, Operand(r0));
@@ -3699,20 +3748,20 @@
   // if input is positive.
   __ rsb(result, input, Operand::Zero(), SetCC, mi);
   // Deoptimize on overflow.
-  DeoptimizeIf(vs, instr->environment());
+  DeoptimizeIf(vs, instr);
 }
 
 
 void LCodeGen::DoMathAbs(LMathAbs* instr) {
   // Class for deferred case.
-  class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode {
+  class DeferredMathAbsTaggedHeapNumber FINAL : public LDeferredCode {
    public:
     DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() V8_OVERRIDE {
+    virtual void Generate() OVERRIDE {
       codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
     }
-    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
    private:
     LMathAbs* instr_;
   };
@@ -3745,7 +3794,7 @@
   Label done, exact;
 
   __ TryInt32Floor(result, input, input_high, double_scratch0(), &done, &exact);
-  DeoptimizeIf(al, instr->environment());
+  DeoptimizeIf(al, instr);
 
   __ bind(&exact);
   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@@ -3753,7 +3802,7 @@
     __ cmp(result, Operand::Zero());
     __ b(ne, &done);
     __ cmp(input_high, Operand::Zero());
-    DeoptimizeIf(mi, instr->environment());
+    DeoptimizeIf(mi, instr);
   }
   __ bind(&done);
 }
@@ -3778,7 +3827,7 @@
   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
     __ VmovHigh(input_high, input);
     __ cmp(input_high, Operand::Zero());
-    DeoptimizeIf(mi, instr->environment());  // [-0.5, -0].
+    DeoptimizeIf(mi, instr);  // [-0.5, -0].
   }
   __ VFPCompareAndSetFlags(input, dot_five);
   __ mov(result, Operand(1), LeaveCC, eq);  // +0.5.
@@ -3792,11 +3841,20 @@
   // Reuse dot_five (double_scratch0) as we no longer need this value.
   __ TryInt32Floor(result, input_plus_dot_five, input_high, double_scratch0(),
                    &done, &done);
-  DeoptimizeIf(al, instr->environment());
+  DeoptimizeIf(al, instr);
   __ bind(&done);
 }
 
 
+void LCodeGen::DoMathFround(LMathFround* instr) {
+  DwVfpRegister input_reg = ToDoubleRegister(instr->value());
+  DwVfpRegister output_reg = ToDoubleRegister(instr->result());
+  LowDwVfpRegister scratch = double_scratch0();
+  __ vcvt_f32_f64(scratch.low(), input_reg);
+  __ vcvt_f64_f32(output_reg, scratch.low());
+}
+
+
 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
   DwVfpRegister input = ToDoubleRegister(instr->value());
   DwVfpRegister result = ToDoubleRegister(instr->result());
@@ -3829,23 +3887,25 @@
   Representation exponent_type = instr->hydrogen()->right()->representation();
   // Having marked this as a call, we can use any registers.
   // Just make sure that the input/output registers are the expected ones.
-  ASSERT(!instr->right()->IsDoubleRegister() ||
+  Register tagged_exponent = MathPowTaggedDescriptor::exponent();
+  DCHECK(!instr->right()->IsDoubleRegister() ||
          ToDoubleRegister(instr->right()).is(d1));
-  ASSERT(!instr->right()->IsRegister() ||
-         ToRegister(instr->right()).is(r2));
-  ASSERT(ToDoubleRegister(instr->left()).is(d0));
-  ASSERT(ToDoubleRegister(instr->result()).is(d2));
+  DCHECK(!instr->right()->IsRegister() ||
+         ToRegister(instr->right()).is(tagged_exponent));
+  DCHECK(ToDoubleRegister(instr->left()).is(d0));
+  DCHECK(ToDoubleRegister(instr->result()).is(d2));
 
   if (exponent_type.IsSmi()) {
     MathPowStub stub(isolate(), MathPowStub::TAGGED);
     __ CallStub(&stub);
   } else if (exponent_type.IsTagged()) {
     Label no_deopt;
-    __ JumpIfSmi(r2, &no_deopt);
-    __ ldr(r6, FieldMemOperand(r2, HeapObject::kMapOffset));
+    __ JumpIfSmi(tagged_exponent, &no_deopt);
+    DCHECK(!r6.is(tagged_exponent));
+    __ ldr(r6, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
     __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
     __ cmp(r6, Operand(ip));
-    DeoptimizeIf(ne, instr->environment());
+    DeoptimizeIf(ne, instr);
     __ bind(&no_deopt);
     MathPowStub stub(isolate(), MathPowStub::TAGGED);
     __ CallStub(&stub);
@@ -3853,7 +3913,7 @@
     MathPowStub stub(isolate(), MathPowStub::INTEGER);
     __ CallStub(&stub);
   } else {
-    ASSERT(exponent_type.IsDouble());
+    DCHECK(exponent_type.IsDouble());
     MathPowStub stub(isolate(), MathPowStub::DOUBLE);
     __ CallStub(&stub);
   }
@@ -3891,9 +3951,9 @@
 
 
 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
-  ASSERT(ToRegister(instr->context()).is(cp));
-  ASSERT(ToRegister(instr->function()).is(r1));
-  ASSERT(instr->HasPointerMap());
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->function()).is(r1));
+  DCHECK(instr->HasPointerMap());
 
   Handle<JSFunction> known_function = instr->hydrogen()->known_function();
   if (known_function.is_null()) {
@@ -3911,8 +3971,36 @@
 }
 
 
+void LCodeGen::DoTailCallThroughMegamorphicCache(
+    LTailCallThroughMegamorphicCache* instr) {
+  Register receiver = ToRegister(instr->receiver());
+  Register name = ToRegister(instr->name());
+  DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(name.is(LoadDescriptor::NameRegister()));
+  DCHECK(receiver.is(r1));
+  DCHECK(name.is(r2));
+
+  Register scratch = r3;
+  Register extra = r4;
+  Register extra2 = r5;
+  Register extra3 = r6;
+
+  // Important for the tail-call.
+  bool must_teardown_frame = NeedsEagerFrame();
+
+  // The probe will tail call to a handler if found.
+  isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(),
+                                         must_teardown_frame, receiver, name,
+                                         scratch, extra, extra2, extra3);
+
+  // Tail call to miss if we ended up here.
+  if (must_teardown_frame) __ LeaveFrame(StackFrame::INTERNAL);
+  LoadIC::GenerateMiss(masm());
+}
+
+
 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
-  ASSERT(ToRegister(instr->result()).is(r0));
+  DCHECK(ToRegister(instr->result()).is(r0));
 
   LPointerMap* pointers = instr->pointer_map();
   SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
@@ -3921,15 +4009,21 @@
     LConstantOperand* target = LConstantOperand::cast(instr->target());
     Handle<Code> code = Handle<Code>::cast(ToHandle(target));
     generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
-    PlatformCallInterfaceDescriptor* call_descriptor =
-        instr->descriptor()->platform_specific_descriptor();
+    PlatformInterfaceDescriptor* call_descriptor =
+        instr->descriptor().platform_specific_descriptor();
     __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None(), al,
             call_descriptor->storage_mode());
   } else {
-    ASSERT(instr->target()->IsRegister());
+    DCHECK(instr->target()->IsRegister());
     Register target = ToRegister(instr->target());
     generator.BeforeCall(__ CallSize(target));
-    __ add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
+    // Make sure we don't emit any additional entries in the constant pool
+    // before the call to ensure that the CallCodeSize() calculated the correct
+    // number of instructions for the constant pool load.
+    {
+      ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
+      __ add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
+    }
     __ Call(target);
   }
   generator.AfterCall();
@@ -3937,8 +4031,8 @@
 
 
 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
-  ASSERT(ToRegister(instr->function()).is(r1));
-  ASSERT(ToRegister(instr->result()).is(r0));
+  DCHECK(ToRegister(instr->function()).is(r1));
+  DCHECK(ToRegister(instr->result()).is(r0));
 
   if (instr->hydrogen()->pass_argument_count()) {
     __ mov(r0, Operand(instr->arity()));
@@ -3956,9 +4050,9 @@
 
 
 void LCodeGen::DoCallFunction(LCallFunction* instr) {
-  ASSERT(ToRegister(instr->context()).is(cp));
-  ASSERT(ToRegister(instr->function()).is(r1));
-  ASSERT(ToRegister(instr->result()).is(r0));
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->function()).is(r1));
+  DCHECK(ToRegister(instr->result()).is(r0));
 
   int arity = instr->arity();
   CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
@@ -3967,9 +4061,9 @@
 
 
 void LCodeGen::DoCallNew(LCallNew* instr) {
-  ASSERT(ToRegister(instr->context()).is(cp));
-  ASSERT(ToRegister(instr->constructor()).is(r1));
-  ASSERT(ToRegister(instr->result()).is(r0));
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->constructor()).is(r1));
+  DCHECK(ToRegister(instr->result()).is(r0));
 
   __ mov(r0, Operand(instr->arity()));
   // No cell in r2 for construct type feedback in optimized code
@@ -3980,9 +4074,9 @@
 
 
 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
-  ASSERT(ToRegister(instr->context()).is(cp));
-  ASSERT(ToRegister(instr->constructor()).is(r1));
-  ASSERT(ToRegister(instr->result()).is(r0));
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->constructor()).is(r1));
+  DCHECK(ToRegister(instr->result()).is(r0));
 
   __ mov(r0, Operand(instr->arity()));
   __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
@@ -4068,13 +4162,13 @@
 
   __ AssertNotSmi(object);
 
-  ASSERT(!representation.IsSmi() ||
+  DCHECK(!representation.IsSmi() ||
          !instr->value()->IsConstantOperand() ||
          IsSmi(LConstantOperand::cast(instr->value())));
   if (representation.IsDouble()) {
-    ASSERT(access.IsInobject());
-    ASSERT(!instr->hydrogen()->has_transition());
-    ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+    DCHECK(access.IsInobject());
+    DCHECK(!instr->hydrogen()->has_transition());
+    DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
     DwVfpRegister value = ToDoubleRegister(instr->value());
     __ vstr(value, FieldMemOperand(object, offset));
     return;
@@ -4135,12 +4229,11 @@
 
 
 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
-  ASSERT(ToRegister(instr->context()).is(cp));
-  ASSERT(ToRegister(instr->object()).is(r1));
-  ASSERT(ToRegister(instr->value()).is(r0));
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
 
-  // Name is always in r2.
-  __ mov(r2, Operand(instr->name()));
+  __ mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
   Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
   CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
 }
@@ -4164,7 +4257,7 @@
     __ stop("eliminated bounds check failed");
     __ bind(&done);
   } else {
-    DeoptimizeIf(cc, instr->environment());
+    DeoptimizeIf(cc, instr);
   }
 }
 
@@ -4309,7 +4402,7 @@
 
   // Do the store.
   if (instr->key()->IsConstantOperand()) {
-    ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+    DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
     LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
     offset += ToInteger32(const_operand) * kPointerSize;
     store_base = elements;
@@ -4357,14 +4450,13 @@
 
 
 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
-  ASSERT(ToRegister(instr->context()).is(cp));
-  ASSERT(ToRegister(instr->object()).is(r2));
-  ASSERT(ToRegister(instr->key()).is(r1));
-  ASSERT(ToRegister(instr->value()).is(r0));
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
+  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
 
-  Handle<Code> ic = instr->strict_mode() == STRICT
-      ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
-      : isolate()->builtins()->KeyedStoreIC_Initialize();
+  Handle<Code> ic =
+      CodeFactory::KeyedStoreIC(isolate(), instr->strict_mode()).code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr, NEVER_INLINE_TARGET_ADDRESS);
 }
 
@@ -4394,15 +4486,14 @@
                          GetLinkRegisterState(),
                          kDontSaveFPRegs);
   } else {
-    ASSERT(ToRegister(instr->context()).is(cp));
-    ASSERT(object_reg.is(r0));
-    PushSafepointRegistersScope scope(
-        this, Safepoint::kWithRegistersAndDoubles);
+    DCHECK(ToRegister(instr->context()).is(cp));
+    DCHECK(object_reg.is(r0));
+    PushSafepointRegistersScope scope(this);
     __ Move(r1, to_map);
     bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
     TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
     __ CallStub(&stub);
-    RecordSafepointWithRegistersAndDoubles(
+    RecordSafepointWithRegisters(
         instr->pointer_map(), 0, Safepoint::kLazyDeopt);
   }
   __ bind(&not_applicable);
@@ -4414,15 +4505,15 @@
   Register temp = ToRegister(instr->temp());
   Label no_memento_found;
   __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
-  DeoptimizeIf(eq, instr->environment());
+  DeoptimizeIf(eq, instr);
   __ bind(&no_memento_found);
 }
 
 
 void LCodeGen::DoStringAdd(LStringAdd* instr) {
-  ASSERT(ToRegister(instr->context()).is(cp));
-  ASSERT(ToRegister(instr->left()).is(r1));
-  ASSERT(ToRegister(instr->right()).is(r0));
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->left()).is(r1));
+  DCHECK(ToRegister(instr->right()).is(r0));
   StringAddStub stub(isolate(),
                      instr->hydrogen()->flags(),
                      instr->hydrogen()->pretenure_flag());
@@ -4431,14 +4522,14 @@
 
 
 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
-  class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode {
+  class DeferredStringCharCodeAt FINAL : public LDeferredCode {
    public:
     DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() V8_OVERRIDE {
+    virtual void Generate() OVERRIDE {
       codegen()->DoDeferredStringCharCodeAt(instr_);
     }
-    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
    private:
     LStringCharCodeAt* instr_;
   };
@@ -4465,7 +4556,7 @@
   // contained in the register pointer map.
   __ mov(result, Operand::Zero());
 
-  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+  PushSafepointRegistersScope scope(this);
   __ push(string);
   // Push the index as a smi. This is safe because of the checks in
   // DoStringCharCodeAt above.
@@ -4478,7 +4569,7 @@
     __ SmiTag(index);
     __ push(index);
   }
-  CallRuntimeFromDeferred(Runtime::kHiddenStringCharCodeAt, 2, instr,
+  CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
                           instr->context());
   __ AssertSmi(r0);
   __ SmiUntag(r0);
@@ -4487,14 +4578,14 @@
 
 
 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
-  class DeferredStringCharFromCode V8_FINAL : public LDeferredCode {
+  class DeferredStringCharFromCode FINAL : public LDeferredCode {
    public:
     DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() V8_OVERRIDE {
+    virtual void Generate() OVERRIDE {
       codegen()->DoDeferredStringCharFromCode(instr_);
     }
-    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
    private:
     LStringCharFromCode* instr_;
   };
@@ -4502,10 +4593,10 @@
   DeferredStringCharFromCode* deferred =
       new(zone()) DeferredStringCharFromCode(this, instr);
 
-  ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
+  DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
   Register char_code = ToRegister(instr->char_code());
   Register result = ToRegister(instr->result());
-  ASSERT(!char_code.is(result));
+  DCHECK(!char_code.is(result));
 
   __ cmp(char_code, Operand(String::kMaxOneByteCharCode));
   __ b(hi, deferred->entry());
@@ -4528,7 +4619,7 @@
   // contained in the register pointer map.
   __ mov(result, Operand::Zero());
 
-  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+  PushSafepointRegistersScope scope(this);
   __ SmiTag(char_code);
   __ push(char_code);
   CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
@@ -4538,9 +4629,9 @@
 
 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
   LOperand* input = instr->value();
-  ASSERT(input->IsRegister() || input->IsStackSlot());
+  DCHECK(input->IsRegister() || input->IsStackSlot());
   LOperand* output = instr->result();
-  ASSERT(output->IsDoubleRegister());
+  DCHECK(output->IsDoubleRegister());
   SwVfpRegister single_scratch = double_scratch0().low();
   if (input->IsStackSlot()) {
     Register scratch = scratch0();
@@ -4564,18 +4655,18 @@
 
 
 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
-  class DeferredNumberTagI V8_FINAL : public LDeferredCode {
+  class DeferredNumberTagI FINAL : public LDeferredCode {
    public:
     DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() V8_OVERRIDE {
+    virtual void Generate() OVERRIDE {
       codegen()->DoDeferredNumberTagIU(instr_,
                                        instr_->value(),
                                        instr_->temp1(),
                                        instr_->temp2(),
                                        SIGNED_INT32);
     }
-    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
    private:
     LNumberTagI* instr_;
   };
@@ -4591,18 +4682,18 @@
 
 
 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
-  class DeferredNumberTagU V8_FINAL : public LDeferredCode {
+  class DeferredNumberTagU FINAL : public LDeferredCode {
    public:
     DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() V8_OVERRIDE {
+    virtual void Generate() OVERRIDE {
       codegen()->DoDeferredNumberTagIU(instr_,
                                        instr_->value(),
                                        instr_->temp1(),
                                        instr_->temp2(),
                                        UNSIGNED_INT32);
     }
-    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
    private:
     LNumberTagU* instr_;
   };
@@ -4661,15 +4752,15 @@
     __ mov(dst, Operand::Zero());
 
     // Preserve the value of all registers.
-    PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+    PushSafepointRegistersScope scope(this);
 
     // NumberTagI and NumberTagD use the context from the frame, rather than
     // the environment's HContext or HInlinedContext value.
-    // They only call Runtime::kHiddenAllocateHeapNumber.
+    // They only call Runtime::kAllocateHeapNumber.
     // The corresponding HChange instructions are added in a phase that does
     // not have easy access to the local context.
     __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-    __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
+    __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
     RecordSafepointWithRegisters(
         instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
     __ sub(r0, r0, Operand(kHeapObjectTag));
@@ -4685,14 +4776,14 @@
 
 
 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
-  class DeferredNumberTagD V8_FINAL : public LDeferredCode {
+  class DeferredNumberTagD FINAL : public LDeferredCode {
    public:
     DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() V8_OVERRIDE {
+    virtual void Generate() OVERRIDE {
       codegen()->DoDeferredNumberTagD(instr_);
     }
-    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
    private:
     LNumberTagD* instr_;
   };
@@ -4726,14 +4817,14 @@
   Register reg = ToRegister(instr->result());
   __ mov(reg, Operand::Zero());
 
-  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+  PushSafepointRegistersScope scope(this);
   // NumberTagI and NumberTagD use the context from the frame, rather than
   // the environment's HContext or HInlinedContext value.
-  // They only call Runtime::kHiddenAllocateHeapNumber.
+  // They only call Runtime::kAllocateHeapNumber.
   // The corresponding HChange instructions are added in a phase that does
   // not have easy access to the local context.
   __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-  __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
+  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
   RecordSafepointWithRegisters(
       instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
   __ sub(r0, r0, Operand(kHeapObjectTag));
@@ -4748,12 +4839,12 @@
   if (hchange->CheckFlag(HValue::kCanOverflow) &&
       hchange->value()->CheckFlag(HValue::kUint32)) {
     __ tst(input, Operand(0xc0000000));
-    DeoptimizeIf(ne, instr->environment());
+    DeoptimizeIf(ne, instr);
   }
   if (hchange->CheckFlag(HValue::kCanOverflow) &&
       !hchange->value()->CheckFlag(HValue::kUint32)) {
     __ SmiTag(output, input, SetCC);
-    DeoptimizeIf(vs, instr->environment());
+    DeoptimizeIf(vs, instr);
   } else {
     __ SmiTag(output, input);
   }
@@ -4767,22 +4858,23 @@
     STATIC_ASSERT(kHeapObjectTag == 1);
     // If the input is a HeapObject, SmiUntag will set the carry flag.
     __ SmiUntag(result, input, SetCC);
-    DeoptimizeIf(cs, instr->environment());
+    DeoptimizeIf(cs, instr);
   } else {
     __ SmiUntag(result, input);
   }
 }
 
 
-void LCodeGen::EmitNumberUntagD(Register input_reg,
+void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
                                 DwVfpRegister result_reg,
-                                bool can_convert_undefined_to_nan,
-                                bool deoptimize_on_minus_zero,
-                                LEnvironment* env,
                                 NumberUntagDMode mode) {
+  bool can_convert_undefined_to_nan =
+      instr->hydrogen()->can_convert_undefined_to_nan();
+  bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
+
   Register scratch = scratch0();
   SwVfpRegister flt_scratch = double_scratch0().low();
-  ASSERT(!result_reg.is(double_scratch0()));
+  DCHECK(!result_reg.is(double_scratch0()));
   Label convert, load_smi, done;
   if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
     // Smi check.
@@ -4794,7 +4886,7 @@
     if (can_convert_undefined_to_nan) {
       __ b(ne, &convert);
     } else {
-      DeoptimizeIf(ne, env);
+      DeoptimizeIf(ne, instr);
     }
     // load heap number
     __ vldr(result_reg, input_reg, HeapNumber::kValueOffset - kHeapObjectTag);
@@ -4804,7 +4896,7 @@
       __ b(ne, &done);
       __ VmovHigh(scratch, result_reg);
       __ cmp(scratch, Operand(HeapNumber::kSignMask));
-      DeoptimizeIf(eq, env);
+      DeoptimizeIf(eq, instr);
     }
     __ jmp(&done);
     if (can_convert_undefined_to_nan) {
@@ -4812,14 +4904,14 @@
       // Convert undefined (and hole) to NaN.
       __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
       __ cmp(input_reg, Operand(ip));
-      DeoptimizeIf(ne, env);
+      DeoptimizeIf(ne, instr);
       __ LoadRoot(scratch, Heap::kNanValueRootIndex);
       __ vldr(result_reg, scratch, HeapNumber::kValueOffset - kHeapObjectTag);
       __ jmp(&done);
     }
   } else {
     __ SmiUntag(scratch, input_reg);
-    ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
+    DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
   }
   // Smi to double register conversion
   __ bind(&load_smi);
@@ -4837,8 +4929,8 @@
   LowDwVfpRegister double_scratch = double_scratch0();
   DwVfpRegister double_scratch2 = ToDoubleRegister(instr->temp2());
 
-  ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
-  ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1));
+  DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2));
+  DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1));
 
   Label done;
 
@@ -4880,24 +4972,22 @@
     __ bind(&check_false);
     __ LoadRoot(ip, Heap::kFalseValueRootIndex);
     __ cmp(scratch2, Operand(ip));
-    DeoptimizeIf(ne, instr->environment());
+    DeoptimizeIf(ne, instr, "cannot truncate");
     __ mov(input_reg, Operand::Zero());
-    __ b(&done);
   } else {
-    // Deoptimize if we don't have a heap number.
-    DeoptimizeIf(ne, instr->environment());
+    DeoptimizeIf(ne, instr, "not a heap number");
 
     __ sub(ip, scratch2, Operand(kHeapObjectTag));
     __ vldr(double_scratch2, ip, HeapNumber::kValueOffset);
     __ TryDoubleToInt32Exact(input_reg, double_scratch2, double_scratch);
-    DeoptimizeIf(ne, instr->environment());
+    DeoptimizeIf(ne, instr, "lost precision or NaN");
 
     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
       __ cmp(input_reg, Operand::Zero());
       __ b(ne, &done);
       __ VmovHigh(scratch1, double_scratch2);
       __ tst(scratch1, Operand(HeapNumber::kSignMask));
-      DeoptimizeIf(ne, instr->environment());
+      DeoptimizeIf(ne, instr, "minus zero");
     }
   }
   __ bind(&done);
@@ -4905,21 +4995,21 @@
 
 
 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
-  class DeferredTaggedToI V8_FINAL : public LDeferredCode {
+  class DeferredTaggedToI FINAL : public LDeferredCode {
    public:
     DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() V8_OVERRIDE {
+    virtual void Generate() OVERRIDE {
       codegen()->DoDeferredTaggedToI(instr_);
     }
-    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
    private:
     LTaggedToI* instr_;
   };
 
   LOperand* input = instr->value();
-  ASSERT(input->IsRegister());
-  ASSERT(input->Equals(instr->result()));
+  DCHECK(input->IsRegister());
+  DCHECK(input->Equals(instr->result()));
 
   Register input_reg = ToRegister(input);
 
@@ -4941,9 +5031,9 @@
 
 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
   LOperand* input = instr->value();
-  ASSERT(input->IsRegister());
+  DCHECK(input->IsRegister());
   LOperand* result = instr->result();
-  ASSERT(result->IsDoubleRegister());
+  DCHECK(result->IsDoubleRegister());
 
   Register input_reg = ToRegister(input);
   DwVfpRegister result_reg = ToDoubleRegister(result);
@@ -4952,11 +5042,7 @@
   NumberUntagDMode mode = value->representation().IsSmi()
       ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
 
-  EmitNumberUntagD(input_reg, result_reg,
-                   instr->hydrogen()->can_convert_undefined_to_nan(),
-                   instr->hydrogen()->deoptimize_on_minus_zero(),
-                   instr->environment(),
-                   mode);
+  EmitNumberUntagD(instr, input_reg, result_reg, mode);
 }
 
 
@@ -4971,14 +5057,14 @@
   } else {
     __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
     // Deoptimize if the input wasn't a int32 (inside a double).
-    DeoptimizeIf(ne, instr->environment());
+    DeoptimizeIf(ne, instr);
     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
       Label done;
       __ cmp(result_reg, Operand::Zero());
       __ b(ne, &done);
       __ VmovHigh(scratch1, double_input);
       __ tst(scratch1, Operand(HeapNumber::kSignMask));
-      DeoptimizeIf(ne, instr->environment());
+      DeoptimizeIf(ne, instr);
       __ bind(&done);
     }
   }
@@ -4996,26 +5082,26 @@
   } else {
     __ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
     // Deoptimize if the input wasn't a int32 (inside a double).
-    DeoptimizeIf(ne, instr->environment());
+    DeoptimizeIf(ne, instr);
     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
       Label done;
       __ cmp(result_reg, Operand::Zero());
       __ b(ne, &done);
       __ VmovHigh(scratch1, double_input);
       __ tst(scratch1, Operand(HeapNumber::kSignMask));
-      DeoptimizeIf(ne, instr->environment());
+      DeoptimizeIf(ne, instr);
       __ bind(&done);
     }
   }
   __ SmiTag(result_reg, SetCC);
-  DeoptimizeIf(vs, instr->environment());
+  DeoptimizeIf(vs, instr);
 }
 
 
 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
   LOperand* input = instr->value();
   __ SmiTst(ToRegister(input));
-  DeoptimizeIf(ne, instr->environment());
+  DeoptimizeIf(ne, instr);
 }
 
 
@@ -5023,7 +5109,7 @@
   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
     LOperand* input = instr->value();
     __ SmiTst(ToRegister(input));
-    DeoptimizeIf(eq, instr->environment());
+    DeoptimizeIf(eq, instr);
   }
 }
 
@@ -5044,13 +5130,13 @@
 
     // If there is only one type in the interval check for equality.
     if (first == last) {
-      DeoptimizeIf(ne, instr->environment());
+      DeoptimizeIf(ne, instr);
     } else {
-      DeoptimizeIf(lo, instr->environment());
+      DeoptimizeIf(lo, instr);
       // Omit check for the last type.
       if (last != LAST_TYPE) {
         __ cmp(scratch, Operand(last));
-        DeoptimizeIf(hi, instr->environment());
+        DeoptimizeIf(hi, instr);
       }
     }
   } else {
@@ -5058,14 +5144,14 @@
     uint8_t tag;
     instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
 
-    if (IsPowerOf2(mask)) {
-      ASSERT(tag == 0 || IsPowerOf2(tag));
+    if (base::bits::IsPowerOfTwo32(mask)) {
+      DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
       __ tst(scratch, Operand(mask));
-      DeoptimizeIf(tag == 0 ? ne : eq, instr->environment());
+      DeoptimizeIf(tag == 0 ? ne : eq, instr);
     } else {
       __ and_(scratch, scratch, Operand(mask));
       __ cmp(scratch, Operand(tag));
-      DeoptimizeIf(ne, instr->environment());
+      DeoptimizeIf(ne, instr);
     }
   }
 }
@@ -5084,13 +5170,13 @@
   } else {
     __ cmp(reg, Operand(object));
   }
-  DeoptimizeIf(ne, instr->environment());
+  DeoptimizeIf(ne, instr);
 }
 
 
 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
   {
-    PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+    PushSafepointRegistersScope scope(this);
     __ push(object);
     __ mov(cp, Operand::Zero());
     __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
@@ -5099,22 +5185,22 @@
     __ StoreToSafepointRegisterSlot(r0, scratch0());
   }
   __ tst(scratch0(), Operand(kSmiTagMask));
-  DeoptimizeIf(eq, instr->environment());
+  DeoptimizeIf(eq, instr);
 }
 
 
 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
-  class DeferredCheckMaps V8_FINAL : public LDeferredCode {
+  class DeferredCheckMaps FINAL : public LDeferredCode {
    public:
     DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
         : LDeferredCode(codegen), instr_(instr), object_(object) {
       SetExit(check_maps());
     }
-    virtual void Generate() V8_OVERRIDE {
+    virtual void Generate() OVERRIDE {
       codegen()->DoDeferredInstanceMigration(instr_, object_);
     }
     Label* check_maps() { return &check_maps_; }
-    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
    private:
     LCheckMaps* instr_;
     Label check_maps_;
@@ -5132,7 +5218,7 @@
   Register map_reg = scratch0();
 
   LOperand* input = instr->value();
-  ASSERT(input->IsRegister());
+  DCHECK(input->IsRegister());
   Register reg = ToRegister(input);
 
   __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
@@ -5156,7 +5242,7 @@
   if (instr->hydrogen()->HasMigrationTarget()) {
     __ b(ne, deferred->entry());
   } else {
-    DeoptimizeIf(ne, instr->environment());
+    DeoptimizeIf(ne, instr);
   }
 
   __ bind(&success);
@@ -5195,7 +5281,7 @@
   // Check for undefined. Undefined is converted to zero for clamping
   // conversions.
   __ cmp(input_reg, Operand(factory()->undefined_value()));
-  DeoptimizeIf(ne, instr->environment());
+  DeoptimizeIf(ne, instr);
   __ mov(result_reg, Operand::Zero());
   __ jmp(&done);
 
@@ -5234,14 +5320,14 @@
 
 
 void LCodeGen::DoAllocate(LAllocate* instr) {
-  class DeferredAllocate V8_FINAL : public LDeferredCode {
+  class DeferredAllocate FINAL : public LDeferredCode {
    public:
     DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() V8_OVERRIDE {
+    virtual void Generate() OVERRIDE {
       codegen()->DoDeferredAllocate(instr_);
     }
-    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
    private:
     LAllocate* instr_;
   };
@@ -5259,11 +5345,11 @@
     flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
   }
   if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
-    ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
-    ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+    DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
+    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
     flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
   } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
-    ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
     flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
   }
 
@@ -5276,33 +5362,25 @@
     }
   } else {
     Register size = ToRegister(instr->size());
-    __ Allocate(size,
-                result,
-                scratch,
-                scratch2,
-                deferred->entry(),
-                flags);
+    __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
   }
 
   __ bind(deferred->exit());
 
   if (instr->hydrogen()->MustPrefillWithFiller()) {
+    STATIC_ASSERT(kHeapObjectTag == 1);
     if (instr->size()->IsConstantOperand()) {
       int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
-      __ mov(scratch, Operand(size));
+      __ mov(scratch, Operand(size - kHeapObjectTag));
     } else {
-      scratch = ToRegister(instr->size());
+      __ sub(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag));
     }
-    __ sub(scratch, scratch, Operand(kPointerSize));
-    __ sub(result, result, Operand(kHeapObjectTag));
+    __ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
     Label loop;
     __ bind(&loop);
-    __ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
+    __ sub(scratch, scratch, Operand(kPointerSize), SetCC);
     __ str(scratch2, MemOperand(result, scratch));
-    __ sub(scratch, scratch, Operand(kPointerSize));
-    __ cmp(scratch, Operand(0));
     __ b(ge, &loop);
-    __ add(result, result, Operand(kHeapObjectTag));
   }
 }
 
@@ -5315,10 +5393,10 @@
   // contained in the register pointer map.
   __ mov(result, Operand(Smi::FromInt(0)));
 
-  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+  PushSafepointRegistersScope scope(this);
   if (instr->size()->IsRegister()) {
     Register size = ToRegister(instr->size());
-    ASSERT(!size.is(result));
+    DCHECK(!size.is(result));
     __ SmiTag(size);
     __ push(size);
   } else {
@@ -5335,11 +5413,11 @@
   int flags = AllocateDoubleAlignFlag::encode(
       instr->hydrogen()->MustAllocateDoubleAligned());
   if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
-    ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
-    ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+    DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
+    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
     flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
   } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
-    ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
     flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
   } else {
     flags = AllocateTargetSpace::update(flags, NEW_SPACE);
@@ -5347,20 +5425,20 @@
   __ Push(Smi::FromInt(flags));
 
   CallRuntimeFromDeferred(
-      Runtime::kHiddenAllocateInTargetSpace, 2, instr, instr->context());
+      Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
   __ StoreToSafepointRegisterSlot(r0, result);
 }
 
 
 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
-  ASSERT(ToRegister(instr->value()).is(r0));
+  DCHECK(ToRegister(instr->value()).is(r0));
   __ push(r0);
   CallRuntime(Runtime::kToFastProperties, 1, instr);
 }
 
 
 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
-  ASSERT(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->context()).is(cp));
   Label materialized;
   // Registers will be used as follows:
   // r6 = literals array.
@@ -5381,7 +5459,7 @@
   __ mov(r4, Operand(instr->hydrogen()->pattern()));
   __ mov(r3, Operand(instr->hydrogen()->flags()));
   __ Push(r6, r5, r4, r3);
-  CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4, instr);
+  CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
   __ mov(r1, r0);
 
   __ bind(&materialized);
@@ -5394,7 +5472,7 @@
   __ bind(&runtime_allocate);
   __ mov(r0, Operand(Smi::FromInt(size)));
   __ Push(r1, r0);
-  CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1, instr);
+  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
   __ pop(r1);
 
   __ bind(&allocated);
@@ -5404,14 +5482,13 @@
 
 
 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
-  ASSERT(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->context()).is(cp));
   // Use the fast case closure allocation code that allocates in new
   // space for nested functions that don't need literals cloning.
   bool pretenure = instr->hydrogen()->pretenure();
   if (!pretenure && instr->hydrogen()->has_no_literals()) {
-    FastNewClosureStub stub(isolate(),
-                            instr->hydrogen()->strict_mode(),
-                            instr->hydrogen()->is_generator());
+    FastNewClosureStub stub(isolate(), instr->hydrogen()->strict_mode(),
+                            instr->hydrogen()->kind());
     __ mov(r2, Operand(instr->hydrogen()->shared_info()));
     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   } else {
@@ -5419,7 +5496,7 @@
     __ mov(r1, Operand(pretenure ? factory()->true_value()
                                  : factory()->false_value()));
     __ Push(cp, r2, r1);
-    CallRuntime(Runtime::kHiddenNewClosure, 3, instr);
+    CallRuntime(Runtime::kNewClosure, 3, instr);
   }
 }
 
@@ -5476,11 +5553,6 @@
     __ CompareRoot(input, Heap::kFalseValueRootIndex);
     final_branch_condition = eq;
 
-  } else if (FLAG_harmony_typeof &&
-             String::Equals(type_name, factory->null_string())) {
-    __ CompareRoot(input, Heap::kNullValueRootIndex);
-    final_branch_condition = eq;
-
   } else if (String::Equals(type_name, factory->undefined_string())) {
     __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
     __ b(eq, true_label);
@@ -5503,10 +5575,8 @@
   } else if (String::Equals(type_name, factory->object_string())) {
     Register map = scratch;
     __ JumpIfSmi(input, false_label);
-    if (!FLAG_harmony_typeof) {
-      __ CompareRoot(input, Heap::kNullValueRootIndex);
-      __ b(eq, true_label);
-    }
+    __ CompareRoot(input, Heap::kNullValueRootIndex);
+    __ b(eq, true_label);
     __ CheckObjectTypeRange(input,
                             map,
                             FIRST_NONCALLABLE_SPEC_OBJECT_TYPE,
@@ -5534,7 +5604,7 @@
 
 
 void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
-  ASSERT(!temp1.is(temp2));
+  DCHECK(!temp1.is(temp2));
   // Get the frame pointer for the calling frame.
   __ ldr(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
 
@@ -5558,7 +5628,7 @@
       // Block literal pool emission for duration of padding.
       Assembler::BlockConstPoolScope block_const_pool(masm());
       int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
-      ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
+      DCHECK_EQ(0, padding_size % Assembler::kInstrSize);
       while (padding_size > 0) {
         __ nop();
         padding_size -= Assembler::kInstrSize;
@@ -5571,7 +5641,7 @@
 
 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
   last_lazy_deopt_pc_ = masm()->pc_offset();
-  ASSERT(instr->HasEnvironment());
+  DCHECK(instr->HasEnvironment());
   LEnvironment* env = instr->environment();
   RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
@@ -5588,8 +5658,7 @@
     type = Deoptimizer::LAZY;
   }
 
-  Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
-  DeoptimizeIf(al, instr->environment(), type);
+  DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type);
 }
 
 
@@ -5604,31 +5673,31 @@
 
 
 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
-  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+  PushSafepointRegistersScope scope(this);
   LoadContextFromDeferred(instr->context());
-  __ CallRuntimeSaveDoubles(Runtime::kHiddenStackGuard);
+  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
   RecordSafepointWithLazyDeopt(
       instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
-  ASSERT(instr->HasEnvironment());
+  DCHECK(instr->HasEnvironment());
   LEnvironment* env = instr->environment();
   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
 }
 
 
 void LCodeGen::DoStackCheck(LStackCheck* instr) {
-  class DeferredStackCheck V8_FINAL : public LDeferredCode {
+  class DeferredStackCheck FINAL : public LDeferredCode {
    public:
     DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() V8_OVERRIDE {
+    virtual void Generate() OVERRIDE {
       codegen()->DoDeferredStackCheck(instr_);
     }
-    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
    private:
     LStackCheck* instr_;
   };
 
-  ASSERT(instr->HasEnvironment());
+  DCHECK(instr->HasEnvironment());
   LEnvironment* env = instr->environment();
   // There is no LLazyBailout instruction for stack-checks. We have to
   // prepare for lazy deoptimization explicitly here.
@@ -5641,12 +5710,12 @@
     Handle<Code> stack_check = isolate()->builtins()->StackCheck();
     PredictableCodeSizeScope predictable(masm(),
         CallCodeSize(stack_check, RelocInfo::CODE_TARGET));
-    ASSERT(instr->context()->IsRegister());
-    ASSERT(ToRegister(instr->context()).is(cp));
+    DCHECK(instr->context()->IsRegister());
+    DCHECK(ToRegister(instr->context()).is(cp));
     CallCode(stack_check, RelocInfo::CODE_TARGET, instr);
     __ bind(&done);
   } else {
-    ASSERT(instr->hydrogen()->is_backwards_branch());
+    DCHECK(instr->hydrogen()->is_backwards_branch());
     // Perform stack overflow check if this goto needs it before jumping.
     DeferredStackCheck* deferred_stack_check =
         new(zone()) DeferredStackCheck(this, instr);
@@ -5672,7 +5741,7 @@
 
   // If the environment were already registered, we would have no way of
   // backpatching it with the spill slot operands.
-  ASSERT(!environment->HasBeenRegistered());
+  DCHECK(!environment->HasBeenRegistered());
   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
 
   GenerateOsrPrologue();
@@ -5682,19 +5751,19 @@
 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
   __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
   __ cmp(r0, ip);
-  DeoptimizeIf(eq, instr->environment());
+  DeoptimizeIf(eq, instr);
 
   Register null_value = r5;
   __ LoadRoot(null_value, Heap::kNullValueRootIndex);
   __ cmp(r0, null_value);
-  DeoptimizeIf(eq, instr->environment());
+  DeoptimizeIf(eq, instr);
 
   __ SmiTst(r0);
-  DeoptimizeIf(eq, instr->environment());
+  DeoptimizeIf(eq, instr);
 
   STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
   __ CompareObjectType(r0, r1, r1, LAST_JS_PROXY_TYPE);
-  DeoptimizeIf(le, instr->environment());
+  DeoptimizeIf(le, instr);
 
   Label use_cache, call_runtime;
   __ CheckEnumCache(null_value, &call_runtime);
@@ -5710,7 +5779,7 @@
   __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
   __ LoadRoot(ip, Heap::kMetaMapRootIndex);
   __ cmp(r1, ip);
-  DeoptimizeIf(ne, instr->environment());
+  DeoptimizeIf(ne, instr);
   __ bind(&use_cache);
 }
 
@@ -5732,7 +5801,7 @@
   __ ldr(result,
          FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
   __ cmp(result, Operand::Zero());
-  DeoptimizeIf(eq, instr->environment());
+  DeoptimizeIf(eq, instr);
 
   __ bind(&done);
 }
@@ -5743,7 +5812,7 @@
   Register map = ToRegister(instr->map());
   __ ldr(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
   __ cmp(map, scratch0());
-  DeoptimizeIf(ne, instr->environment());
+  DeoptimizeIf(ne, instr);
 }
 
 
@@ -5751,7 +5820,7 @@
                                            Register result,
                                            Register object,
                                            Register index) {
-  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+  PushSafepointRegistersScope scope(this);
   __ Push(object);
   __ Push(index);
   __ mov(cp, Operand::Zero());
@@ -5763,7 +5832,7 @@
 
 
 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
-  class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode {
+  class DeferredLoadMutableDouble FINAL : public LDeferredCode {
    public:
     DeferredLoadMutableDouble(LCodeGen* codegen,
                               LLoadFieldByIndex* instr,
@@ -5776,10 +5845,10 @@
           object_(object),
           index_(index) {
     }
-    virtual void Generate() V8_OVERRIDE {
+    virtual void Generate() OVERRIDE {
       codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
     }
-    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
    private:
     LLoadFieldByIndex* instr_;
     Register result_;
@@ -5832,7 +5901,7 @@
   Handle<ScopeInfo> scope_info = instr->scope_info();
   __ Push(scope_info);
   __ push(ToRegister(instr->function()));
-  CallRuntime(Runtime::kHiddenPushBlockContext, 2, instr);
+  CallRuntime(Runtime::kPushBlockContext, 2, instr);
   RecordSafepoint(Safepoint::kNoLazyDeopt);
 }
 
diff --git a/src/arm/lithium-codegen-arm.h b/src/arm/lithium-codegen-arm.h
index b20b3f2..cb137d1 100644
--- a/src/arm/lithium-codegen-arm.h
+++ b/src/arm/lithium-codegen-arm.h
@@ -26,7 +26,7 @@
   LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
       : LCodeGenBase(chunk, assembler, info),
         deoptimizations_(4, info->zone()),
-        deopt_jump_table_(4, info->zone()),
+        jump_table_(4, info->zone()),
         deoptimization_literals_(8, info->zone()),
         inlined_function_count_(0),
         scope_(info->scope()),
@@ -116,7 +116,7 @@
   void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
   void DoDeferredAllocate(LAllocate* instr);
   void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
-                                       Label* map_check);
+                                       Label* map_check, Label* bool_load);
   void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
   void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
                                    Register result,
@@ -169,10 +169,10 @@
 
   // Code generation passes.  Returns true if code generation should
   // continue.
-  void GenerateBodyInstructionPre(LInstruction* instr) V8_OVERRIDE;
+  void GenerateBodyInstructionPre(LInstruction* instr) OVERRIDE;
   bool GeneratePrologue();
   bool GenerateDeferredCode();
-  bool GenerateDeoptJumpTable();
+  bool GenerateJumpTable();
   bool GenerateSafepointTable();
 
   // Generates the custom OSR entrypoint and sets the osr_pc_offset.
@@ -234,10 +234,10 @@
 
   void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
                                             Safepoint::DeoptMode mode);
-  void DeoptimizeIf(Condition condition,
-                    LEnvironment* environment,
-                    Deoptimizer::BailoutType bailout_type);
-  void DeoptimizeIf(Condition condition, LEnvironment* environment);
+  void DeoptimizeIf(Condition condition, LInstruction* instr,
+                    const char* detail, Deoptimizer::BailoutType bailout_type);
+  void DeoptimizeIf(Condition condition, LInstruction* instr,
+                    const char* detail = NULL);
 
   void AddToTranslation(LEnvironment* environment,
                         Translation* translation,
@@ -270,11 +270,8 @@
   void RecordSafepointWithRegisters(LPointerMap* pointers,
                                     int arguments,
                                     Safepoint::DeoptMode mode);
-  void RecordSafepointWithRegistersAndDoubles(LPointerMap* pointers,
-                                              int arguments,
-                                              Safepoint::DeoptMode mode);
 
-  void RecordAndWritePosition(int position) V8_OVERRIDE;
+  void RecordAndWritePosition(int position) OVERRIDE;
 
   static Condition TokenToCondition(Token::Value op, bool is_unsigned);
   void EmitGoto(int block);
@@ -284,12 +281,8 @@
   void EmitBranch(InstrType instr, Condition condition);
   template<class InstrType>
   void EmitFalseBranch(InstrType instr, Condition condition);
-  void EmitNumberUntagD(Register input,
-                        DwVfpRegister result,
-                        bool allow_undefined_as_nan,
-                        bool deoptimize_on_minus_zero,
-                        LEnvironment* env,
-                        NumberUntagDMode mode);
+  void EmitNumberUntagD(LNumberUntagD* instr, Register input,
+                        DwVfpRegister result, NumberUntagDMode mode);
 
   // Emits optimized code for typeof x == "y".  Modifies input register.
   // Returns the condition on which a final split to
@@ -327,7 +320,7 @@
                     int* offset,
                     AllocationSiteMode mode);
 
-  void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE;
+  void EnsureSpaceForLazyDeopt(int space_needed) OVERRIDE;
   void DoLoadKeyedExternalArray(LLoadKeyed* instr);
   void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
   void DoLoadKeyedFixedArray(LLoadKeyed* instr);
@@ -335,8 +328,11 @@
   void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
   void DoStoreKeyedFixedArray(LStoreKeyed* instr);
 
+  template <class T>
+  void EmitVectorLoadICRegisters(T* instr);
+
   ZoneList<LEnvironment*> deoptimizations_;
-  ZoneList<Deoptimizer::JumpTableEntry> deopt_jump_table_;
+  ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
   ZoneList<Handle<Object> > deoptimization_literals_;
   int inlined_function_count_;
   Scope* const scope_;
@@ -354,40 +350,19 @@
 
   Safepoint::Kind expected_safepoint_kind_;
 
-  class PushSafepointRegistersScope V8_FINAL BASE_EMBEDDED {
+  class PushSafepointRegistersScope FINAL BASE_EMBEDDED {
    public:
-    PushSafepointRegistersScope(LCodeGen* codegen,
-                                Safepoint::Kind kind)
+    explicit PushSafepointRegistersScope(LCodeGen* codegen)
         : codegen_(codegen) {
-      ASSERT(codegen_->info()->is_calling());
-      ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
-      codegen_->expected_safepoint_kind_ = kind;
-
-      switch (codegen_->expected_safepoint_kind_) {
-        case Safepoint::kWithRegisters:
-          codegen_->masm_->PushSafepointRegisters();
-          break;
-        case Safepoint::kWithRegistersAndDoubles:
-          codegen_->masm_->PushSafepointRegistersAndDoubles();
-          break;
-        default:
-          UNREACHABLE();
-      }
+      DCHECK(codegen_->info()->is_calling());
+      DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
+      codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
+      codegen_->masm_->PushSafepointRegisters();
     }
 
     ~PushSafepointRegistersScope() {
-      Safepoint::Kind kind = codegen_->expected_safepoint_kind_;
-      ASSERT((kind & Safepoint::kWithRegisters) != 0);
-      switch (kind) {
-        case Safepoint::kWithRegisters:
-          codegen_->masm_->PopSafepointRegisters();
-          break;
-        case Safepoint::kWithRegistersAndDoubles:
-          codegen_->masm_->PopSafepointRegistersAndDoubles();
-          break;
-        default:
-          UNREACHABLE();
-      }
+      DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
+      codegen_->masm_->PopSafepointRegisters();
       codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
     }
 
diff --git a/src/arm/lithium-gap-resolver-arm.cc b/src/arm/lithium-gap-resolver-arm.cc
index 8ca235a..2fceec9 100644
--- a/src/arm/lithium-gap-resolver-arm.cc
+++ b/src/arm/lithium-gap-resolver-arm.cc
@@ -4,8 +4,8 @@
 
 #include "src/v8.h"
 
-#include "src/arm/lithium-gap-resolver-arm.h"
 #include "src/arm/lithium-codegen-arm.h"
+#include "src/arm/lithium-gap-resolver-arm.h"
 
 namespace v8 {
 namespace internal {
@@ -29,7 +29,7 @@
 
 
 void LGapResolver::Resolve(LParallelMove* parallel_move) {
-  ASSERT(moves_.is_empty());
+  DCHECK(moves_.is_empty());
   // Build up a worklist of moves.
   BuildInitialMoveList(parallel_move);
 
@@ -50,13 +50,13 @@
   // Perform the moves with constant sources.
   for (int i = 0; i < moves_.length(); ++i) {
     if (!moves_[i].IsEliminated()) {
-      ASSERT(moves_[i].source()->IsConstantOperand());
+      DCHECK(moves_[i].source()->IsConstantOperand());
       EmitMove(i);
     }
   }
 
   if (need_to_restore_root_) {
-    ASSERT(kSavedValueRegister.is(kRootRegister));
+    DCHECK(kSavedValueRegister.is(kRootRegister));
     __ InitializeRootRegister();
     need_to_restore_root_ = false;
   }
@@ -94,13 +94,13 @@
   // An additional complication is that moves to MemOperands with large
   // offsets (more than 1K or 4K) require us to spill this spilled value to
   // the stack, to free up the register.
-  ASSERT(!moves_[index].IsPending());
-  ASSERT(!moves_[index].IsRedundant());
+  DCHECK(!moves_[index].IsPending());
+  DCHECK(!moves_[index].IsRedundant());
 
   // Clear this move's destination to indicate a pending move.  The actual
   // destination is saved in a stack allocated local.  Multiple moves can
   // be pending because this function is recursive.
-  ASSERT(moves_[index].source() != NULL);  // Or else it will look eliminated.
+  DCHECK(moves_[index].source() != NULL);  // Or else it will look eliminated.
   LOperand* destination = moves_[index].destination();
   moves_[index].set_destination(NULL);
 
@@ -127,7 +127,7 @@
   // a scratch register to break it.
   LMoveOperands other_move = moves_[root_index_];
   if (other_move.Blocks(destination)) {
-    ASSERT(other_move.IsPending());
+    DCHECK(other_move.IsPending());
     BreakCycle(index);
     return;
   }
@@ -138,12 +138,12 @@
 
 
 void LGapResolver::Verify() {
-#ifdef ENABLE_SLOW_ASSERTS
+#ifdef ENABLE_SLOW_DCHECKS
   // No operand should be the destination for more than one move.
   for (int i = 0; i < moves_.length(); ++i) {
     LOperand* destination = moves_[i].destination();
     for (int j = i + 1; j < moves_.length(); ++j) {
-      SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
+      SLOW_DCHECK(!destination->Equals(moves_[j].destination()));
     }
   }
 #endif
@@ -154,8 +154,8 @@
   // We save in a register the source of that move and we remember its
   // destination. Then we mark this move as resolved so the cycle is
   // broken and we can perform the other moves.
-  ASSERT(moves_[index].destination()->Equals(moves_[root_index_].source()));
-  ASSERT(!in_cycle_);
+  DCHECK(moves_[index].destination()->Equals(moves_[root_index_].source()));
+  DCHECK(!in_cycle_);
   in_cycle_ = true;
   LOperand* source = moves_[index].source();
   saved_destination_ = moves_[index].destination();
@@ -178,8 +178,8 @@
 
 
 void LGapResolver::RestoreValue() {
-  ASSERT(in_cycle_);
-  ASSERT(saved_destination_ != NULL);
+  DCHECK(in_cycle_);
+  DCHECK(saved_destination_ != NULL);
 
   if (saved_destination_->IsRegister()) {
     __ mov(cgen_->ToRegister(saved_destination_), kSavedValueRegister);
@@ -210,7 +210,7 @@
     if (destination->IsRegister()) {
       __ mov(cgen_->ToRegister(destination), source_register);
     } else {
-      ASSERT(destination->IsStackSlot());
+      DCHECK(destination->IsStackSlot());
       __ str(source_register, cgen_->ToMemOperand(destination));
     }
   } else if (source->IsStackSlot()) {
@@ -218,7 +218,7 @@
     if (destination->IsRegister()) {
       __ ldr(cgen_->ToRegister(destination), source_operand);
     } else {
-      ASSERT(destination->IsStackSlot());
+      DCHECK(destination->IsStackSlot());
       MemOperand destination_operand = cgen_->ToMemOperand(destination);
       if (!destination_operand.OffsetIsUint12Encodable()) {
         // ip is overwritten while saving the value to the destination.
@@ -248,8 +248,8 @@
       double v = cgen_->ToDouble(constant_source);
       __ Vmov(result, v, ip);
     } else {
-      ASSERT(destination->IsStackSlot());
-      ASSERT(!in_cycle_);  // Constant moves happen after all cycles are gone.
+      DCHECK(destination->IsStackSlot());
+      DCHECK(!in_cycle_);  // Constant moves happen after all cycles are gone.
       need_to_restore_root_ = true;
       Representation r = cgen_->IsSmi(constant_source)
           ? Representation::Smi() : Representation::Integer32();
@@ -267,7 +267,7 @@
     if (destination->IsDoubleRegister()) {
       __ vmov(cgen_->ToDoubleRegister(destination), source_register);
     } else {
-      ASSERT(destination->IsDoubleStackSlot());
+      DCHECK(destination->IsDoubleStackSlot());
       __ vstr(source_register, cgen_->ToMemOperand(destination));
     }
 
@@ -276,7 +276,7 @@
     if (destination->IsDoubleRegister()) {
       __ vldr(cgen_->ToDoubleRegister(destination), source_operand);
     } else {
-      ASSERT(destination->IsDoubleStackSlot());
+      DCHECK(destination->IsDoubleStackSlot());
       MemOperand destination_operand = cgen_->ToMemOperand(destination);
       if (in_cycle_) {
         // kScratchDoubleReg was used to break the cycle.
diff --git a/src/arm/lithium-gap-resolver-arm.h b/src/arm/lithium-gap-resolver-arm.h
index 909ea64..9d7d843 100644
--- a/src/arm/lithium-gap-resolver-arm.h
+++ b/src/arm/lithium-gap-resolver-arm.h
@@ -15,7 +15,7 @@
 class LCodeGen;
 class LGapResolver;
 
-class LGapResolver V8_FINAL BASE_EMBEDDED {
+class LGapResolver FINAL BASE_EMBEDDED {
  public:
   explicit LGapResolver(LCodeGen* owner);
 
diff --git a/src/arm/macro-assembler-arm.cc b/src/arm/macro-assembler-arm.cc
index c34a7f7..c845a3d 100644
--- a/src/arm/macro-assembler-arm.cc
+++ b/src/arm/macro-assembler-arm.cc
@@ -8,6 +8,8 @@
 
 #if V8_TARGET_ARCH_ARM
 
+#include "src/base/bits.h"
+#include "src/base/division-by-constant.h"
 #include "src/bootstrapper.h"
 #include "src/codegen.h"
 #include "src/cpu-profiler.h"
@@ -36,21 +38,21 @@
 
 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
                           Condition cond) {
-  ASSERT(RelocInfo::IsCodeTarget(rmode));
+  DCHECK(RelocInfo::IsCodeTarget(rmode));
   mov(pc, Operand(target, rmode), LeaveCC, cond);
 }
 
 
 void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode,
                           Condition cond) {
-  ASSERT(!RelocInfo::IsCodeTarget(rmode));
+  DCHECK(!RelocInfo::IsCodeTarget(rmode));
   Jump(reinterpret_cast<intptr_t>(target), rmode, cond);
 }
 
 
 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
                           Condition cond) {
-  ASSERT(RelocInfo::IsCodeTarget(rmode));
+  DCHECK(RelocInfo::IsCodeTarget(rmode));
   // 'code' is always generated ARM code, never THUMB code
   AllowDeferredHandleDereference embedding_raw_address;
   Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
@@ -68,19 +70,16 @@
   Label start;
   bind(&start);
   blx(target, cond);
-  ASSERT_EQ(CallSize(target, cond), SizeOfCodeGeneratedSince(&start));
+  DCHECK_EQ(CallSize(target, cond), SizeOfCodeGeneratedSince(&start));
 }
 
 
 int MacroAssembler::CallSize(
     Address target, RelocInfo::Mode rmode, Condition cond) {
-  int size = 2 * kInstrSize;
   Instr mov_instr = cond | MOV | LeaveCC;
-  intptr_t immediate = reinterpret_cast<intptr_t>(target);
-  if (!Operand(immediate, rmode).is_single_instruction(this, mov_instr)) {
-    size += kInstrSize;
-  }
-  return size;
+  Operand mov_operand = Operand(reinterpret_cast<intptr_t>(target), rmode);
+  return kInstrSize +
+         mov_operand.instructions_required(this, mov_instr) * kInstrSize;
 }
 
 
@@ -94,13 +93,10 @@
                                                    Address target,
                                                    RelocInfo::Mode rmode,
                                                    Condition cond) {
-  int size = 2 * kInstrSize;
   Instr mov_instr = cond | MOV | LeaveCC;
-  intptr_t immediate = reinterpret_cast<intptr_t>(target);
-  if (!Operand(immediate, rmode).is_single_instruction(NULL, mov_instr)) {
-    size += kInstrSize;
-  }
-  return size;
+  Operand mov_operand = Operand(reinterpret_cast<intptr_t>(target), rmode);
+  return kInstrSize +
+         mov_operand.instructions_required(NULL, mov_instr) * kInstrSize;
 }
 
 
@@ -144,7 +140,7 @@
   mov(ip, Operand(reinterpret_cast<int32_t>(target), rmode));
   blx(ip, cond);
 
-  ASSERT_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
+  DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
   if (mode == NEVER_INLINE_TARGET_ADDRESS) {
     set_predictable_code_size(old_predictable_code_size);
   }
@@ -167,7 +163,7 @@
                           TargetAddressStorageMode mode) {
   Label start;
   bind(&start);
-  ASSERT(RelocInfo::IsCodeTarget(rmode));
+  DCHECK(RelocInfo::IsCodeTarget(rmode));
   if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
     SetRecordedAstId(ast_id);
     rmode = RelocInfo::CODE_TARGET_WITH_ID;
@@ -228,7 +224,7 @@
   if (value->IsSmi()) {
     mov(dst, Operand(value));
   } else {
-    ASSERT(value->IsHeapObject());
+    DCHECK(value->IsHeapObject());
     if (isolate()->heap()->InNewSpace(*value)) {
       Handle<Cell> cell = isolate()->factory()->NewCell(value);
       mov(dst, Operand(cell));
@@ -260,7 +256,7 @@
     CpuFeatureScope scope(this, MLS);
     mls(dst, src1, src2, srcA, cond);
   } else {
-    ASSERT(!dst.is(srcA));
+    DCHECK(!srcA.is(ip));
     mul(ip, src1, src2, LeaveCC, cond);
     sub(dst, srcA, ip, LeaveCC, cond);
   }
@@ -273,10 +269,10 @@
       !src2.must_output_reloc_info(this) &&
       src2.immediate() == 0) {
     mov(dst, Operand::Zero(), LeaveCC, cond);
-  } else if (!src2.is_single_instruction(this) &&
+  } else if (!(src2.instructions_required(this) == 1) &&
              !src2.must_output_reloc_info(this) &&
              CpuFeatures::IsSupported(ARMv7) &&
-             IsPowerOf2(src2.immediate() + 1)) {
+             base::bits::IsPowerOfTwo32(src2.immediate() + 1)) {
     ubfx(dst, src1, 0,
         WhichPowerOf2(static_cast<uint32_t>(src2.immediate()) + 1), cond);
   } else {
@@ -287,7 +283,7 @@
 
 void MacroAssembler::Ubfx(Register dst, Register src1, int lsb, int width,
                           Condition cond) {
-  ASSERT(lsb < 32);
+  DCHECK(lsb < 32);
   if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
     int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
     and_(dst, src1, Operand(mask), LeaveCC, cond);
@@ -302,7 +298,7 @@
 
 void MacroAssembler::Sbfx(Register dst, Register src1, int lsb, int width,
                           Condition cond) {
-  ASSERT(lsb < 32);
+  DCHECK(lsb < 32);
   if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
     int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
     and_(dst, src1, Operand(mask), LeaveCC, cond);
@@ -326,10 +322,10 @@
                          int lsb,
                          int width,
                          Condition cond) {
-  ASSERT(0 <= lsb && lsb < 32);
-  ASSERT(0 <= width && width < 32);
-  ASSERT(lsb + width < 32);
-  ASSERT(!scratch.is(dst));
+  DCHECK(0 <= lsb && lsb < 32);
+  DCHECK(0 <= width && width < 32);
+  DCHECK(lsb + width < 32);
+  DCHECK(!scratch.is(dst));
   if (width == 0) return;
   if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
     int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
@@ -345,7 +341,7 @@
 
 void MacroAssembler::Bfc(Register dst, Register src, int lsb, int width,
                          Condition cond) {
-  ASSERT(lsb < 32);
+  DCHECK(lsb < 32);
   if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
     int mask = (1 << (width + lsb)) - 1 - ((1 << lsb) - 1);
     bic(dst, src, Operand(mask));
@@ -359,13 +355,13 @@
 void MacroAssembler::Usat(Register dst, int satpos, const Operand& src,
                           Condition cond) {
   if (!CpuFeatures::IsSupported(ARMv7) || predictable_code_size()) {
-    ASSERT(!dst.is(pc) && !src.rm().is(pc));
-    ASSERT((satpos >= 0) && (satpos <= 31));
+    DCHECK(!dst.is(pc) && !src.rm().is(pc));
+    DCHECK((satpos >= 0) && (satpos <= 31));
 
     // These asserts are required to ensure compatibility with the ARMv7
     // implementation.
-    ASSERT((src.shift_op() == ASR) || (src.shift_op() == LSL));
-    ASSERT(src.rs().is(no_reg));
+    DCHECK((src.shift_op() == ASR) || (src.shift_op() == LSL));
+    DCHECK(src.rs().is(no_reg));
 
     Label done;
     int satval = (1 << satpos) - 1;
@@ -390,7 +386,7 @@
 void MacroAssembler::Load(Register dst,
                           const MemOperand& src,
                           Representation r) {
-  ASSERT(!r.IsDouble());
+  DCHECK(!r.IsDouble());
   if (r.IsInteger8()) {
     ldrsb(dst, src);
   } else if (r.IsUInteger8()) {
@@ -408,7 +404,7 @@
 void MacroAssembler::Store(Register src,
                            const MemOperand& dst,
                            Representation r) {
-  ASSERT(!r.IsDouble());
+  DCHECK(!r.IsDouble());
   if (r.IsInteger8() || r.IsUInteger8()) {
     strb(src, dst);
   } else if (r.IsInteger16() || r.IsUInteger16()) {
@@ -451,7 +447,7 @@
                                 Register scratch,
                                 Condition cond,
                                 Label* branch) {
-  ASSERT(cond == eq || cond == ne);
+  DCHECK(cond == eq || cond == ne);
   and_(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
   cmp(scratch, Operand(ExternalReference::new_space_start(isolate())));
   b(cond, branch);
@@ -479,7 +475,7 @@
 
   // Although the object register is tagged, the offset is relative to the start
   // of the object, so so offset must be a multiple of kPointerSize.
-  ASSERT(IsAligned(offset, kPointerSize));
+  DCHECK(IsAligned(offset, kPointerSize));
 
   add(dst, object, Operand(offset - kHeapObjectTag));
   if (emit_debug_code()) {
@@ -504,8 +500,8 @@
   // Clobber clobbered input registers when running with the debug-code flag
   // turned on to provoke errors.
   if (emit_debug_code()) {
-    mov(value, Operand(BitCast<int32_t>(kZapValue + 4)));
-    mov(dst, Operand(BitCast<int32_t>(kZapValue + 8)));
+    mov(value, Operand(bit_cast<int32_t>(kZapValue + 4)));
+    mov(dst, Operand(bit_cast<int32_t>(kZapValue + 8)));
   }
 }
 
@@ -527,10 +523,6 @@
     return;
   }
 
-  // Count number of write barriers in generated code.
-  isolate()->counters()->write_barriers_static()->Increment();
-  // TODO(mstarzinger): Dynamic counter missing.
-
   if (emit_debug_code()) {
     ldr(ip, FieldMemOperand(object, HeapObject::kMapOffset));
     cmp(ip, map);
@@ -571,11 +563,15 @@
 
   bind(&done);
 
+  // Count number of write barriers in generated code.
+  isolate()->counters()->write_barriers_static()->Increment();
+  IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip, dst);
+
   // Clobber clobbered registers when running with the debug-code flag
   // turned on to provoke errors.
   if (emit_debug_code()) {
-    mov(dst, Operand(BitCast<int32_t>(kZapValue + 12)));
-    mov(map, Operand(BitCast<int32_t>(kZapValue + 16)));
+    mov(dst, Operand(bit_cast<int32_t>(kZapValue + 12)));
+    mov(map, Operand(bit_cast<int32_t>(kZapValue + 16)));
   }
 }
 
@@ -592,7 +588,7 @@
     RememberedSetAction remembered_set_action,
     SmiCheck smi_check,
     PointersToHereCheck pointers_to_here_check_for_value) {
-  ASSERT(!object.is(value));
+  DCHECK(!object.is(value));
   if (emit_debug_code()) {
     ldr(ip, MemOperand(address));
     cmp(ip, value);
@@ -604,10 +600,6 @@
     return;
   }
 
-  // Count number of write barriers in generated code.
-  isolate()->counters()->write_barriers_static()->Increment();
-  // TODO(mstarzinger): Dynamic counter missing.
-
   // First, check if a write barrier is even needed. The tests below
   // catch stores of smis and stores into the young generation.
   Label done;
@@ -642,11 +634,16 @@
 
   bind(&done);
 
+  // Count number of write barriers in generated code.
+  isolate()->counters()->write_barriers_static()->Increment();
+  IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip,
+                   value);
+
   // Clobber clobbered registers when running with the debug-code flag
   // turned on to provoke errors.
   if (emit_debug_code()) {
-    mov(address, Operand(BitCast<int32_t>(kZapValue + 12)));
-    mov(value, Operand(BitCast<int32_t>(kZapValue + 16)));
+    mov(address, Operand(bit_cast<int32_t>(kZapValue + 12)));
+    mov(value, Operand(bit_cast<int32_t>(kZapValue + 16)));
   }
 }
 
@@ -678,12 +675,11 @@
   if (and_then == kFallThroughAtEnd) {
     b(eq, &done);
   } else {
-    ASSERT(and_then == kReturnAtEnd);
+    DCHECK(and_then == kReturnAtEnd);
     Ret(eq);
   }
   push(lr);
-  StoreBufferOverflowStub store_buffer_overflow =
-      StoreBufferOverflowStub(isolate(), fp_mode);
+  StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
   CallStub(&store_buffer_overflow);
   pop(lr);
   bind(&done);
@@ -694,7 +690,7 @@
 
 
 void MacroAssembler::PushFixedFrame(Register marker_reg) {
-  ASSERT(!marker_reg.is_valid() || marker_reg.code() < cp.code());
+  DCHECK(!marker_reg.is_valid() || marker_reg.code() < cp.code());
   stm(db_w, sp, (marker_reg.is_valid() ? marker_reg.bit() : 0) |
                 cp.bit() |
                 (FLAG_enable_ool_constant_pool ? pp.bit() : 0) |
@@ -704,7 +700,7 @@
 
 
 void MacroAssembler::PopFixedFrame(Register marker_reg) {
-  ASSERT(!marker_reg.is_valid() || marker_reg.code() < cp.code());
+  DCHECK(!marker_reg.is_valid() || marker_reg.code() < cp.code());
   ldm(ia_w, sp, (marker_reg.is_valid() ? marker_reg.bit() : 0) |
                 cp.bit() |
                 (FLAG_enable_ool_constant_pool ? pp.bit() : 0) |
@@ -716,11 +712,11 @@
 // Push and pop all registers that can hold pointers.
 void MacroAssembler::PushSafepointRegisters() {
   // Safepoints expect a block of contiguous register values starting with r0:
-  ASSERT(((1 << kNumSafepointSavedRegisters) - 1) == kSafepointSavedRegisters);
+  DCHECK(((1 << kNumSafepointSavedRegisters) - 1) == kSafepointSavedRegisters);
   // Safepoints expect a block of kNumSafepointRegisters values on the
   // stack, so adjust the stack for unsaved registers.
   const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
-  ASSERT(num_unsaved >= 0);
+  DCHECK(num_unsaved >= 0);
   sub(sp, sp, Operand(num_unsaved * kPointerSize));
   stm(db_w, sp, kSafepointSavedRegisters);
 }
@@ -733,39 +729,6 @@
 }
 
 
-void MacroAssembler::PushSafepointRegistersAndDoubles() {
-  // Number of d-regs not known at snapshot time.
-  ASSERT(!serializer_enabled());
-  PushSafepointRegisters();
-  // Only save allocatable registers.
-  ASSERT(kScratchDoubleReg.is(d15) && kDoubleRegZero.is(d14));
-  ASSERT(DwVfpRegister::NumReservedRegisters() == 2);
-  if (CpuFeatures::IsSupported(VFP32DREGS)) {
-    vstm(db_w, sp, d16, d31);
-  }
-  vstm(db_w, sp, d0, d13);
-}
-
-
-void MacroAssembler::PopSafepointRegistersAndDoubles() {
-  // Number of d-regs not known at snapshot time.
-  ASSERT(!serializer_enabled());
-  // Only save allocatable registers.
-  ASSERT(kScratchDoubleReg.is(d15) && kDoubleRegZero.is(d14));
-  ASSERT(DwVfpRegister::NumReservedRegisters() == 2);
-  vldm(ia_w, sp, d0, d13);
-  if (CpuFeatures::IsSupported(VFP32DREGS)) {
-    vldm(ia_w, sp, d16, d31);
-  }
-  PopSafepointRegisters();
-}
-
-void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src,
-                                                             Register dst) {
-  str(src, SafepointRegistersAndDoublesSlot(dst));
-}
-
-
 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
   str(src, SafepointRegisterSlot(dst));
 }
@@ -779,7 +742,7 @@
 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
   // The registers are pushed starting with the highest encoding,
   // which means that lowest encodings are closest to the stack pointer.
-  ASSERT(reg_code >= 0 && reg_code < kNumSafepointRegisters);
+  DCHECK(reg_code >= 0 && reg_code < kNumSafepointRegisters);
   return reg_code;
 }
 
@@ -791,7 +754,7 @@
 
 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
   // Number of d-regs not known at snapshot time.
-  ASSERT(!serializer_enabled());
+  DCHECK(!serializer_enabled());
   // General purpose registers are pushed last on the stack.
   int doubles_size = DwVfpRegister::NumAllocatableRegisters() * kDoubleSize;
   int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
@@ -801,12 +764,12 @@
 
 void MacroAssembler::Ldrd(Register dst1, Register dst2,
                           const MemOperand& src, Condition cond) {
-  ASSERT(src.rm().is(no_reg));
-  ASSERT(!dst1.is(lr));  // r14.
+  DCHECK(src.rm().is(no_reg));
+  DCHECK(!dst1.is(lr));  // r14.
 
   // V8 does not use this addressing mode, so the fallback code
   // below doesn't support it yet.
-  ASSERT((src.am() != PreIndex) && (src.am() != NegPreIndex));
+  DCHECK((src.am() != PreIndex) && (src.am() != NegPreIndex));
 
   // Generate two ldr instructions if ldrd is not available.
   if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size() &&
@@ -825,7 +788,7 @@
         ldr(dst2, src2, cond);
       }
     } else {  // PostIndex or NegPostIndex.
-      ASSERT((src.am() == PostIndex) || (src.am() == NegPostIndex));
+      DCHECK((src.am() == PostIndex) || (src.am() == NegPostIndex));
       if (dst1.is(src.rn())) {
         ldr(dst2, MemOperand(src.rn(), 4, Offset), cond);
         ldr(dst1, src, cond);
@@ -842,12 +805,12 @@
 
 void MacroAssembler::Strd(Register src1, Register src2,
                           const MemOperand& dst, Condition cond) {
-  ASSERT(dst.rm().is(no_reg));
-  ASSERT(!src1.is(lr));  // r14.
+  DCHECK(dst.rm().is(no_reg));
+  DCHECK(!src1.is(lr));  // r14.
 
   // V8 does not use this addressing mode, so the fallback code
   // below doesn't support it yet.
-  ASSERT((dst.am() != PreIndex) && (dst.am() != NegPreIndex));
+  DCHECK((dst.am() != PreIndex) && (dst.am() != NegPreIndex));
 
   // Generate two str instructions if strd is not available.
   if (CpuFeatures::IsSupported(ARMv7) && !predictable_code_size() &&
@@ -861,7 +824,7 @@
       str(src1, dst, cond);
       str(src2, dst2, cond);
     } else {  // PostIndex or NegPostIndex.
-      ASSERT((dst.am() == PostIndex) || (dst.am() == NegPostIndex));
+      DCHECK((dst.am() == PostIndex) || (dst.am() == NegPostIndex));
       dst2.set_offset(dst2.offset() - 4);
       str(src1, MemOperand(dst.rn(), 4, PostIndex), cond);
       str(src2, dst2, cond);
@@ -991,7 +954,7 @@
   if (FLAG_enable_ool_constant_pool) {
     int constant_pool_offset = Code::kConstantPoolOffset - Code::kHeaderSize -
         pc_offset() - Instruction::kPCReadOffset;
-    ASSERT(ImmediateFitsAddrMode2Instruction(constant_pool_offset));
+    DCHECK(ImmediateFitsAddrMode2Instruction(constant_pool_offset));
     ldr(pp, MemOperand(pc, constant_pool_offset));
   }
 }
@@ -1075,9 +1038,9 @@
 
 void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
   // Set up the frame structure on the stack.
-  ASSERT_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
-  ASSERT_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
-  ASSERT_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
+  DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
+  DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
+  DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
   Push(lr, fp);
   mov(fp, Operand(sp));  // Set up new frame pointer.
   // Reserve room for saved entry sp and code object.
@@ -1113,7 +1076,7 @@
   const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
   sub(sp, sp, Operand((stack_space + 1) * kPointerSize));
   if (frame_alignment > 0) {
-    ASSERT(IsPowerOf2(frame_alignment));
+    DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
     and_(sp, sp, Operand(-frame_alignment));
   }
 
@@ -1144,7 +1107,7 @@
   // environment.
   // Note: This will break if we ever start generating snapshots on one ARM
   // platform for another ARM platform with a different alignment.
-  return OS::ActivationFrameAlignment();
+  return base::OS::ActivationFrameAlignment();
 #else  // V8_HOST_ARCH_ARM
   // If we are using the simulator then we should always align to the expected
   // alignment. As the simulator is used to generate snapshots we do not know
@@ -1232,12 +1195,12 @@
   // The code below is made a lot easier because the calling code already sets
   // up actual and expected registers according to the contract if values are
   // passed in registers.
-  ASSERT(actual.is_immediate() || actual.reg().is(r0));
-  ASSERT(expected.is_immediate() || expected.reg().is(r2));
-  ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(r3));
+  DCHECK(actual.is_immediate() || actual.reg().is(r0));
+  DCHECK(expected.is_immediate() || expected.reg().is(r2));
+  DCHECK((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(r3));
 
   if (expected.is_immediate()) {
-    ASSERT(actual.is_immediate());
+    DCHECK(actual.is_immediate());
     if (expected.immediate() == actual.immediate()) {
       definitely_matches = true;
     } else {
@@ -1294,7 +1257,7 @@
                                 InvokeFlag flag,
                                 const CallWrapper& call_wrapper) {
   // You can't call a function without a valid frame.
-  ASSERT(flag == JUMP_FUNCTION || has_frame());
+  DCHECK(flag == JUMP_FUNCTION || has_frame());
 
   Label done;
   bool definitely_mismatches = false;
@@ -1307,7 +1270,7 @@
       Call(code);
       call_wrapper.AfterCall();
     } else {
-      ASSERT(flag == JUMP_FUNCTION);
+      DCHECK(flag == JUMP_FUNCTION);
       Jump(code);
     }
 
@@ -1323,10 +1286,10 @@
                                     InvokeFlag flag,
                                     const CallWrapper& call_wrapper) {
   // You can't call a function without a valid frame.
-  ASSERT(flag == JUMP_FUNCTION || has_frame());
+  DCHECK(flag == JUMP_FUNCTION || has_frame());
 
   // Contract with called JS functions requires that function is passed in r1.
-  ASSERT(fun.is(r1));
+  DCHECK(fun.is(r1));
 
   Register expected_reg = r2;
   Register code_reg = r3;
@@ -1351,10 +1314,10 @@
                                     InvokeFlag flag,
                                     const CallWrapper& call_wrapper) {
   // You can't call a function without a valid frame.
-  ASSERT(flag == JUMP_FUNCTION || has_frame());
+  DCHECK(flag == JUMP_FUNCTION || has_frame());
 
   // Contract with called JS functions requires that function is passed in r1.
-  ASSERT(function.is(r1));
+  DCHECK(function.is(r1));
 
   // Get the function and setup the context.
   ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
@@ -1400,7 +1363,7 @@
 void MacroAssembler::IsObjectJSStringType(Register object,
                                           Register scratch,
                                           Label* fail) {
-  ASSERT(kNotStringTag != 0);
+  DCHECK(kNotStringTag != 0);
 
   ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
   ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
@@ -1423,7 +1386,7 @@
   mov(r0, Operand::Zero());
   mov(r1, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
   CEntryStub ces(isolate(), 1);
-  ASSERT(AllowThisStubCall(&ces));
+  DCHECK(AllowThisStubCall(&ces));
   Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
 }
 
@@ -1571,9 +1534,9 @@
                                             Label* miss) {
   Label same_contexts;
 
-  ASSERT(!holder_reg.is(scratch));
-  ASSERT(!holder_reg.is(ip));
-  ASSERT(!scratch.is(ip));
+  DCHECK(!holder_reg.is(scratch));
+  DCHECK(!holder_reg.is(ip));
+  DCHECK(!scratch.is(ip));
 
   // Load current lexical context from the stack frame.
   ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -1643,7 +1606,7 @@
 
 
 // Compute the hash code from the untagged key.  This must be kept in sync with
-// ComputeIntegerHash in utils.h and KeyedLoadGenericElementStub in
+// ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
 // code-stub-hydrogen.cc
 void MacroAssembler::GetNumberHash(Register t0, Register scratch) {
   // First of all we assign the hash seed to scratch.
@@ -1721,7 +1684,7 @@
     and_(t2, t2, Operand(t1));
 
     // Scale the index by multiplying by the element size.
-    ASSERT(SeededNumberDictionary::kEntrySize == 3);
+    DCHECK(SeededNumberDictionary::kEntrySize == 3);
     add(t2, t2, Operand(t2, LSL, 1));  // t2 = t2 * 3
 
     // Check if the key is identical to the name.
@@ -1757,7 +1720,7 @@
                               Register scratch2,
                               Label* gc_required,
                               AllocationFlags flags) {
-  ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
+  DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
   if (!FLAG_inline_new) {
     if (emit_debug_code()) {
       // Trash the registers to simulate an allocation failure.
@@ -1769,17 +1732,17 @@
     return;
   }
 
-  ASSERT(!result.is(scratch1));
-  ASSERT(!result.is(scratch2));
-  ASSERT(!scratch1.is(scratch2));
-  ASSERT(!scratch1.is(ip));
-  ASSERT(!scratch2.is(ip));
+  DCHECK(!result.is(scratch1));
+  DCHECK(!result.is(scratch2));
+  DCHECK(!scratch1.is(scratch2));
+  DCHECK(!scratch1.is(ip));
+  DCHECK(!scratch2.is(ip));
 
   // Make object size into bytes.
   if ((flags & SIZE_IN_WORDS) != 0) {
     object_size *= kPointerSize;
   }
-  ASSERT_EQ(0, object_size & kObjectAlignmentMask);
+  DCHECK_EQ(0, object_size & kObjectAlignmentMask);
 
   // Check relative positions of allocation top and limit addresses.
   // The values must be adjacent in memory to allow the use of LDM.
@@ -1794,8 +1757,8 @@
       reinterpret_cast<intptr_t>(allocation_top.address());
   intptr_t limit =
       reinterpret_cast<intptr_t>(allocation_limit.address());
-  ASSERT((limit - top) == kPointerSize);
-  ASSERT(result.code() < ip.code());
+  DCHECK((limit - top) == kPointerSize);
+  DCHECK(result.code() < ip.code());
 
   // Set up allocation top address register.
   Register topaddr = scratch1;
@@ -1822,7 +1785,7 @@
   if ((flags & DOUBLE_ALIGNMENT) != 0) {
     // Align the next allocation. Storing the filler map without checking top is
     // safe in new-space because the limit of the heap is aligned there.
-    ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
+    DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
     STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
     and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC);
     Label aligned;
@@ -1839,7 +1802,7 @@
   // Calculate new top and bail out if new space is exhausted. Use result
   // to calculate the new top. We must preserve the ip register at this
   // point, so we cannot just use add().
-  ASSERT(object_size > 0);
+  DCHECK(object_size > 0);
   Register source = result;
   Condition cond = al;
   int shift = 0;
@@ -1851,7 +1814,7 @@
       object_size -= bits;
       shift += 8;
       Operand bits_operand(bits);
-      ASSERT(bits_operand.is_single_instruction(this));
+      DCHECK(bits_operand.instructions_required(this) == 1);
       add(scratch2, source, bits_operand, SetCC, cond);
       source = scratch2;
       cond = cc;
@@ -1888,13 +1851,13 @@
 
   // Assert that the register arguments are different and that none of
   // them are ip. ip is used explicitly in the code generated below.
-  ASSERT(!result.is(scratch1));
-  ASSERT(!result.is(scratch2));
-  ASSERT(!scratch1.is(scratch2));
-  ASSERT(!object_size.is(ip));
-  ASSERT(!result.is(ip));
-  ASSERT(!scratch1.is(ip));
-  ASSERT(!scratch2.is(ip));
+  DCHECK(!result.is(scratch1));
+  DCHECK(!result.is(scratch2));
+  DCHECK(!scratch1.is(scratch2));
+  DCHECK(!object_size.is(ip));
+  DCHECK(!result.is(ip));
+  DCHECK(!scratch1.is(ip));
+  DCHECK(!scratch2.is(ip));
 
   // Check relative positions of allocation top and limit addresses.
   // The values must be adjacent in memory to allow the use of LDM.
@@ -1908,8 +1871,8 @@
       reinterpret_cast<intptr_t>(allocation_top.address());
   intptr_t limit =
       reinterpret_cast<intptr_t>(allocation_limit.address());
-  ASSERT((limit - top) == kPointerSize);
-  ASSERT(result.code() < ip.code());
+  DCHECK((limit - top) == kPointerSize);
+  DCHECK(result.code() < ip.code());
 
   // Set up allocation top address.
   Register topaddr = scratch1;
@@ -1936,8 +1899,8 @@
   if ((flags & DOUBLE_ALIGNMENT) != 0) {
     // Align the next allocation. Storing the filler map without checking top is
     // safe in new-space because the limit of the heap is aligned there.
-    ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
-    ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
+    DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
+    DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
     and_(scratch2, result, Operand(kDoubleAlignmentMask), SetCC);
     Label aligned;
     b(eq, &aligned);
@@ -2004,7 +1967,7 @@
                                            Label* gc_required) {
   // Calculate the number of bytes needed for the characters in the string while
   // observing object alignment.
-  ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+  DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
   mov(scratch1, Operand(length, LSL, 1));  // Length in bytes, not chars.
   add(scratch1, scratch1,
       Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize));
@@ -2027,21 +1990,19 @@
 }
 
 
-void MacroAssembler::AllocateAsciiString(Register result,
-                                         Register length,
-                                         Register scratch1,
-                                         Register scratch2,
-                                         Register scratch3,
-                                         Label* gc_required) {
+void MacroAssembler::AllocateOneByteString(Register result, Register length,
+                                           Register scratch1, Register scratch2,
+                                           Register scratch3,
+                                           Label* gc_required) {
   // Calculate the number of bytes needed for the characters in the string while
   // observing object alignment.
-  ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
-  ASSERT(kCharSize == 1);
+  DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+  DCHECK(kCharSize == 1);
   add(scratch1, length,
       Operand(kObjectAlignmentMask + SeqOneByteString::kHeaderSize));
   and_(scratch1, scratch1, Operand(~kObjectAlignmentMask));
 
-  // Allocate ASCII string in new space.
+  // Allocate one-byte string in new space.
   Allocate(scratch1,
            result,
            scratch2,
@@ -2050,11 +2011,8 @@
            TAG_OBJECT);
 
   // Set the map, length and hash field.
-  InitializeNewString(result,
-                      length,
-                      Heap::kAsciiStringMapRootIndex,
-                      scratch1,
-                      scratch2);
+  InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
+                      scratch1, scratch2);
 }
 
 
@@ -2074,11 +2032,10 @@
 }
 
 
-void MacroAssembler::AllocateAsciiConsString(Register result,
-                                             Register length,
-                                             Register scratch1,
-                                             Register scratch2,
-                                             Label* gc_required) {
+void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
+                                               Register scratch1,
+                                               Register scratch2,
+                                               Label* gc_required) {
   Allocate(ConsString::kSize,
            result,
            scratch1,
@@ -2086,11 +2043,8 @@
            gc_required,
            TAG_OBJECT);
 
-  InitializeNewString(result,
-                      length,
-                      Heap::kConsAsciiStringMapRootIndex,
-                      scratch1,
-                      scratch2);
+  InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
+                      scratch1, scratch2);
 }
 
 
@@ -2110,19 +2064,16 @@
 }
 
 
-void MacroAssembler::AllocateAsciiSlicedString(Register result,
-                                               Register length,
-                                               Register scratch1,
-                                               Register scratch2,
-                                               Label* gc_required) {
+void MacroAssembler::AllocateOneByteSlicedString(Register result,
+                                                 Register length,
+                                                 Register scratch1,
+                                                 Register scratch2,
+                                                 Label* gc_required) {
   Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
            TAG_OBJECT);
 
-  InitializeNewString(result,
-                      length,
-                      Heap::kSlicedAsciiStringMapRootIndex,
-                      scratch1,
-                      scratch2);
+  InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
+                      scratch1, scratch2);
 }
 
 
@@ -2167,7 +2118,7 @@
 
 void MacroAssembler::CompareRoot(Register obj,
                                  Heap::RootListIndex index) {
-  ASSERT(!obj.is(ip));
+  DCHECK(!obj.is(ip));
   LoadRoot(ip, index);
   cmp(obj, ip);
 }
@@ -2322,14 +2273,15 @@
                                              Register scratch,
                                              Label* miss,
                                              bool miss_on_bound_function) {
-  // Check that the receiver isn't a smi.
-  JumpIfSmi(function, miss);
-
-  // Check that the function really is a function.  Load map into result reg.
-  CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
-  b(ne, miss);
-
+  Label non_instance;
   if (miss_on_bound_function) {
+    // Check that the receiver isn't a smi.
+    JumpIfSmi(function, miss);
+
+    // Check that the function really is a function.  Load map into result reg.
+    CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
+    b(ne, miss);
+
     ldr(scratch,
         FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
     ldr(scratch,
@@ -2337,13 +2289,12 @@
     tst(scratch,
         Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction)));
     b(ne, miss);
-  }
 
-  // Make sure that the function has an instance prototype.
-  Label non_instance;
-  ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
-  tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
-  b(ne, &non_instance);
+    // Make sure that the function has an instance prototype.
+    ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
+    tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
+    b(ne, &non_instance);
+  }
 
   // Get the prototype or initial map from the function.
   ldr(result,
@@ -2363,12 +2314,15 @@
 
   // Get the prototype from the initial map.
   ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
-  jmp(&done);
 
-  // Non-instance prototype: Fetch prototype from constructor field
-  // in initial map.
-  bind(&non_instance);
-  ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
+  if (miss_on_bound_function) {
+    jmp(&done);
+
+    // Non-instance prototype: Fetch prototype from constructor field
+    // in initial map.
+    bind(&non_instance);
+    ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
+  }
 
   // All done.
   bind(&done);
@@ -2378,7 +2332,7 @@
 void MacroAssembler::CallStub(CodeStub* stub,
                               TypeFeedbackId ast_id,
                               Condition cond) {
-  ASSERT(AllowThisStubCall(stub));  // Stub calls are not allowed in some stubs.
+  DCHECK(AllowThisStubCall(stub));  // Stub calls are not allowed in some stubs.
   Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond);
 }
 
@@ -2409,7 +2363,7 @@
       ExternalReference::handle_scope_level_address(isolate()),
       next_address);
 
-  ASSERT(function_address.is(r1) || function_address.is(r2));
+  DCHECK(function_address.is(r1) || function_address.is(r2));
 
   Label profiler_disabled;
   Label end_profiler_check;
@@ -2503,7 +2457,7 @@
   {
     FrameScope frame(this, StackFrame::INTERNAL);
     CallExternalReference(
-        ExternalReference(Runtime::kHiddenPromoteScheduledException, isolate()),
+        ExternalReference(Runtime::kPromoteScheduledException, isolate()),
         0);
   }
   jmp(&exception_handled);
@@ -2531,7 +2485,7 @@
   // that the constants for the maximum number of digits for an array index
   // cached in the hash field and the number of bits reserved for it does not
   // conflict.
-  ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
+  DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
          (1 << String::kArrayIndexValueBits));
   DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash);
 }
@@ -2551,7 +2505,7 @@
 
 void MacroAssembler::TestDoubleIsInt32(DwVfpRegister double_input,
                                        LowDwVfpRegister double_scratch) {
-  ASSERT(!double_input.is(double_scratch));
+  DCHECK(!double_input.is(double_scratch));
   vcvt_s32_f64(double_scratch.low(), double_input);
   vcvt_f64_s32(double_scratch, double_scratch.low());
   VFPCompareAndSetFlags(double_input, double_scratch);
@@ -2561,7 +2515,7 @@
 void MacroAssembler::TryDoubleToInt32Exact(Register result,
                                            DwVfpRegister double_input,
                                            LowDwVfpRegister double_scratch) {
-  ASSERT(!double_input.is(double_scratch));
+  DCHECK(!double_input.is(double_scratch));
   vcvt_s32_f64(double_scratch.low(), double_input);
   vmov(result, double_scratch.low());
   vcvt_f64_s32(double_scratch, double_scratch.low());
@@ -2575,8 +2529,8 @@
                                    LowDwVfpRegister double_scratch,
                                    Label* done,
                                    Label* exact) {
-  ASSERT(!result.is(input_high));
-  ASSERT(!double_input.is(double_scratch));
+  DCHECK(!result.is(input_high));
+  DCHECK(!double_input.is(double_scratch));
   Label negative, exception;
 
   VmovHigh(input_high, double_input);
@@ -2654,7 +2608,7 @@
                                            Register object) {
   Label done;
   LowDwVfpRegister double_scratch = kScratchDoubleReg;
-  ASSERT(!result.is(object));
+  DCHECK(!result.is(object));
 
   vldr(double_scratch,
        MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
@@ -2681,7 +2635,7 @@
                                        Register scratch1,
                                        Label* not_number) {
   Label done;
-  ASSERT(!result.is(object));
+  DCHECK(!result.is(object));
 
   UntagAndJumpIfSmi(result, object, &done);
   JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
@@ -2765,7 +2719,7 @@
 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
 #if defined(__thumb__)
   // Thumb mode builtin.
-  ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
+  DCHECK((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
 #endif
   mov(r1, Operand(builtin));
   CEntryStub stub(isolate(), 1);
@@ -2777,7 +2731,7 @@
                                    InvokeFlag flag,
                                    const CallWrapper& call_wrapper) {
   // You can't call a builtin without a valid frame.
-  ASSERT(flag == JUMP_FUNCTION || has_frame());
+  DCHECK(flag == JUMP_FUNCTION || has_frame());
 
   GetBuiltinEntry(r2, id);
   if (flag == CALL_FUNCTION) {
@@ -2785,7 +2739,7 @@
     Call(r2);
     call_wrapper.AfterCall();
   } else {
-    ASSERT(flag == JUMP_FUNCTION);
+    DCHECK(flag == JUMP_FUNCTION);
     Jump(r2);
   }
 }
@@ -2804,7 +2758,7 @@
 
 
 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
-  ASSERT(!target.is(r1));
+  DCHECK(!target.is(r1));
   GetBuiltinFunction(r1, id);
   // Load the code entry point from the builtins object.
   ldr(target, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
@@ -2823,7 +2777,7 @@
 
 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
                                       Register scratch1, Register scratch2) {
-  ASSERT(value > 0);
+  DCHECK(value > 0);
   if (FLAG_native_code_counters && counter->Enabled()) {
     mov(scratch2, Operand(ExternalReference(counter)));
     ldr(scratch1, MemOperand(scratch2));
@@ -2835,7 +2789,7 @@
 
 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
                                       Register scratch1, Register scratch2) {
-  ASSERT(value > 0);
+  DCHECK(value > 0);
   if (FLAG_native_code_counters && counter->Enabled()) {
     mov(scratch2, Operand(ExternalReference(counter)));
     ldr(scratch1, MemOperand(scratch2));
@@ -2853,7 +2807,7 @@
 
 void MacroAssembler::AssertFastElements(Register elements) {
   if (emit_debug_code()) {
-    ASSERT(!elements.is(ip));
+    DCHECK(!elements.is(ip));
     Label ok;
     push(elements);
     ldr(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
@@ -2917,7 +2871,7 @@
     // of the Abort macro constant.
     static const int kExpectedAbortInstructions = 7;
     int abort_instructions = InstructionsGeneratedSince(&abort_start);
-    ASSERT(abort_instructions <= kExpectedAbortInstructions);
+    DCHECK(abort_instructions <= kExpectedAbortInstructions);
     while (abort_instructions++ < kExpectedAbortInstructions) {
       nop();
     }
@@ -3218,44 +3172,35 @@
 }
 
 
-void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
-    Register first,
-    Register second,
-    Register scratch1,
-    Register scratch2,
+void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
+    Register first, Register second, Register scratch1, Register scratch2,
     Label* failure) {
-  // Test that both first and second are sequential ASCII strings.
+  // Test that both first and second are sequential one-byte strings.
   // Assume that they are non-smis.
   ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
   ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
   ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
   ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
 
-  JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1,
-                                               scratch2,
-                                               scratch1,
-                                               scratch2,
-                                               failure);
+  JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1,
+                                                 scratch2, failure);
 }
 
-void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
-                                                         Register second,
-                                                         Register scratch1,
-                                                         Register scratch2,
-                                                         Label* failure) {
+void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first,
+                                                           Register second,
+                                                           Register scratch1,
+                                                           Register scratch2,
+                                                           Label* failure) {
   // Check that neither is a smi.
   and_(scratch1, first, Operand(second));
   JumpIfSmi(scratch1, failure);
-  JumpIfNonSmisNotBothSequentialAsciiStrings(first,
-                                             second,
-                                             scratch1,
-                                             scratch2,
-                                             failure);
+  JumpIfNonSmisNotBothSequentialOneByteStrings(first, second, scratch1,
+                                               scratch2, failure);
 }
 
 
-void MacroAssembler::JumpIfNotUniqueName(Register reg,
-                                         Label* not_unique_name) {
+void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
+                                                     Label* not_unique_name) {
   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
   Label succeed;
   tst(reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
@@ -3274,14 +3219,19 @@
                                         Register scratch2,
                                         Register heap_number_map,
                                         Label* gc_required,
-                                        TaggingMode tagging_mode) {
+                                        TaggingMode tagging_mode,
+                                        MutableMode mode) {
   // Allocate an object in the heap for the heap number and tag it as a heap
   // object.
   Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
            tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
 
+  Heap::RootListIndex map_index = mode == MUTABLE
+      ? Heap::kMutableHeapNumberMapRootIndex
+      : Heap::kHeapNumberMapRootIndex;
+  AssertIsRoot(heap_number_map, map_index);
+
   // Store heap number map in the allocated object.
-  AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
   if (tagging_mode == TAG_RESULT) {
     str(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
   } else {
@@ -3415,34 +3365,31 @@
 }
 
 
-void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
-    Register first,
-    Register second,
-    Register scratch1,
-    Register scratch2,
+void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
+    Register first, Register second, Register scratch1, Register scratch2,
     Label* failure) {
-  const int kFlatAsciiStringMask =
+  const int kFlatOneByteStringMask =
       kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
-  const int kFlatAsciiStringTag =
+  const int kFlatOneByteStringTag =
       kStringTag | kOneByteStringTag | kSeqStringTag;
-  and_(scratch1, first, Operand(kFlatAsciiStringMask));
-  and_(scratch2, second, Operand(kFlatAsciiStringMask));
-  cmp(scratch1, Operand(kFlatAsciiStringTag));
+  and_(scratch1, first, Operand(kFlatOneByteStringMask));
+  and_(scratch2, second, Operand(kFlatOneByteStringMask));
+  cmp(scratch1, Operand(kFlatOneByteStringTag));
   // Ignore second test if first test failed.
-  cmp(scratch2, Operand(kFlatAsciiStringTag), eq);
+  cmp(scratch2, Operand(kFlatOneByteStringTag), eq);
   b(ne, failure);
 }
 
 
-void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
-                                                            Register scratch,
-                                                            Label* failure) {
-  const int kFlatAsciiStringMask =
+void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
+                                                              Register scratch,
+                                                              Label* failure) {
+  const int kFlatOneByteStringMask =
       kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
-  const int kFlatAsciiStringTag =
+  const int kFlatOneByteStringTag =
       kStringTag | kOneByteStringTag | kSeqStringTag;
-  and_(scratch, type, Operand(kFlatAsciiStringMask));
-  cmp(scratch, Operand(kFlatAsciiStringTag));
+  and_(scratch, type, Operand(kFlatOneByteStringMask));
+  cmp(scratch, Operand(kFlatOneByteStringTag));
   b(ne, failure);
 }
 
@@ -3519,7 +3466,7 @@
     // and the original value of sp.
     mov(scratch, sp);
     sub(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
-    ASSERT(IsPowerOf2(frame_alignment));
+    DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
     and_(sp, sp, Operand(-frame_alignment));
     str(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
   } else {
@@ -3535,7 +3482,7 @@
 
 
 void MacroAssembler::MovToFloatParameter(DwVfpRegister src) {
-  ASSERT(src.is(d0));
+  DCHECK(src.is(d0));
   if (!use_eabi_hardfloat()) {
     vmov(r0, r1, src);
   }
@@ -3550,8 +3497,8 @@
 
 void MacroAssembler::MovToFloatParameters(DwVfpRegister src1,
                                           DwVfpRegister src2) {
-  ASSERT(src1.is(d0));
-  ASSERT(src2.is(d1));
+  DCHECK(src1.is(d0));
+  DCHECK(src2.is(d1));
   if (!use_eabi_hardfloat()) {
     vmov(r0, r1, src1);
     vmov(r2, r3, src2);
@@ -3589,16 +3536,16 @@
 void MacroAssembler::CallCFunctionHelper(Register function,
                                          int num_reg_arguments,
                                          int num_double_arguments) {
-  ASSERT(has_frame());
+  DCHECK(has_frame());
   // Make sure that the stack is aligned before calling a C function unless
   // running in the simulator. The simulator has its own alignment check which
   // provides more information.
 #if V8_HOST_ARCH_ARM
   if (emit_debug_code()) {
-    int frame_alignment = OS::ActivationFrameAlignment();
+    int frame_alignment = base::OS::ActivationFrameAlignment();
     int frame_alignment_mask = frame_alignment - 1;
     if (frame_alignment > kPointerSize) {
-      ASSERT(IsPowerOf2(frame_alignment));
+      DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
       Label alignment_as_expected;
       tst(sp, Operand(frame_alignment_mask));
       b(eq, &alignment_as_expected);
@@ -3625,25 +3572,65 @@
 
 
 void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
-                                               Register result) {
-  const uint32_t kLdrOffsetMask = (1 << 12) - 1;
+                                               Register result,
+                                               Register scratch) {
+  Label small_constant_pool_load, load_result;
   ldr(result, MemOperand(ldr_location));
+
+  if (FLAG_enable_ool_constant_pool) {
+    // Check if this is an extended constant pool load.
+    and_(scratch, result, Operand(GetConsantPoolLoadMask()));
+    teq(scratch, Operand(GetConsantPoolLoadPattern()));
+    b(eq, &small_constant_pool_load);
+    if (emit_debug_code()) {
+      // Check that the instruction sequence is:
+      //   movw reg, #offset_low
+      //   movt reg, #offset_high
+      //   ldr reg, [pp, reg]
+      Instr patterns[] = {GetMovWPattern(), GetMovTPattern(),
+                          GetLdrPpRegOffsetPattern()};
+      for (int i = 0; i < 3; i++) {
+        ldr(result, MemOperand(ldr_location, i * kInstrSize));
+        and_(result, result, Operand(patterns[i]));
+        cmp(result, Operand(patterns[i]));
+        Check(eq, kTheInstructionToPatchShouldBeALoadFromConstantPool);
+      }
+      // Result was clobbered. Restore it.
+      ldr(result, MemOperand(ldr_location));
+    }
+
+    // Get the offset into the constant pool.  First extract movw immediate into
+    // result.
+    and_(scratch, result, Operand(0xfff));
+    mov(ip, Operand(result, LSR, 4));
+    and_(ip, ip, Operand(0xf000));
+    orr(result, scratch, Operand(ip));
+    // Then extract movt immediate and or into result.
+    ldr(scratch, MemOperand(ldr_location, kInstrSize));
+    and_(ip, scratch, Operand(0xf0000));
+    orr(result, result, Operand(ip, LSL, 12));
+    and_(scratch, scratch, Operand(0xfff));
+    orr(result, result, Operand(scratch, LSL, 16));
+
+    b(&load_result);
+  }
+
+  bind(&small_constant_pool_load);
   if (emit_debug_code()) {
     // Check that the instruction is a ldr reg, [<pc or pp> + offset] .
-    if (FLAG_enable_ool_constant_pool) {
-      and_(result, result, Operand(kLdrPpPattern));
-      cmp(result, Operand(kLdrPpPattern));
-      Check(eq, kTheInstructionToPatchShouldBeALoadFromPp);
-    } else {
-      and_(result, result, Operand(kLdrPCPattern));
-      cmp(result, Operand(kLdrPCPattern));
-      Check(eq, kTheInstructionToPatchShouldBeALoadFromPc);
-    }
+    and_(result, result, Operand(GetConsantPoolLoadPattern()));
+    cmp(result, Operand(GetConsantPoolLoadPattern()));
+    Check(eq, kTheInstructionToPatchShouldBeALoadFromConstantPool);
     // Result was clobbered. Restore it.
     ldr(result, MemOperand(ldr_location));
   }
-  // Get the address of the constant.
+
+  // Get the offset into the constant pool.
+  const uint32_t kLdrOffsetMask = (1 << 12) - 1;
   and_(result, result, Operand(kLdrOffsetMask));
+
+  bind(&load_result);
+  // Get the address of the constant.
   if (FLAG_enable_ool_constant_pool) {
     add(result, pp, Operand(result));
   } else {
@@ -3683,7 +3670,7 @@
                                  Register scratch1,
                                  Label* on_black) {
   HasColor(object, scratch0, scratch1, on_black, 1, 0);  // kBlackBitPattern.
-  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+  DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
 }
 
 
@@ -3693,7 +3680,7 @@
                               Label* has_color,
                               int first_bit,
                               int second_bit) {
-  ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg));
+  DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg));
 
   GetMarkBits(object, bitmap_scratch, mask_scratch);
 
@@ -3726,8 +3713,8 @@
   ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
   CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
   b(eq, &is_data_object);
-  ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
-  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+  DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+  DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
   // If it's a string and it's not a cons string then it's an object containing
   // no GC pointers.
   ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
@@ -3740,7 +3727,7 @@
 void MacroAssembler::GetMarkBits(Register addr_reg,
                                  Register bitmap_reg,
                                  Register mask_reg) {
-  ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
+  DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
   and_(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
   Ubfx(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
   const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
@@ -3757,14 +3744,14 @@
     Register mask_scratch,
     Register load_scratch,
     Label* value_is_white_and_not_data) {
-  ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, ip));
+  DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ip));
   GetMarkBits(value, bitmap_scratch, mask_scratch);
 
   // If the value is black or grey we don't need to do anything.
-  ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
-  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
-  ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
-  ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+  DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+  DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
+  DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
+  DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
 
   Label done;
 
@@ -3797,8 +3784,8 @@
   b(eq, &is_data_object);
 
   // Check for strings.
-  ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
-  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+  DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+  DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
   // If it's a string and it's not a cons string then it's an object containing
   // no GC pointers.
   Register instance_type = load_scratch;
@@ -3810,18 +3797,18 @@
   // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
   // External strings are the only ones with the kExternalStringTag bit
   // set.
-  ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
-  ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
+  DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
+  DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
   tst(instance_type, Operand(kExternalStringTag));
   mov(length, Operand(ExternalString::kSize), LeaveCC, ne);
   b(ne, &is_data_object);
 
-  // Sequential string, either ASCII or UC16.
-  // For ASCII (char-size of 1) we shift the smi tag away to get the length.
+  // Sequential string, either Latin1 or UC16.
+  // For Latin1 (char-size of 1) we shift the smi tag away to get the length.
   // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
   // getting the length multiplied by 2.
-  ASSERT(kOneByteStringTag == 4 && kStringEncodingMask == 4);
-  ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+  DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4);
+  DCHECK(kSmiTag == 0 && kSmiTagSize == 1);
   ldr(ip, FieldMemOperand(value, String::kLengthOffset));
   tst(instance_type, Operand(kStringEncodingMask));
   mov(ip, Operand(ip, LSR, 1), LeaveCC, ne);
@@ -3984,7 +3971,7 @@
     Register scratch0,
     Register scratch1,
     Label* found) {
-  ASSERT(!scratch1.is(scratch0));
+  DCHECK(!scratch1.is(scratch0));
   Factory* factory = isolate()->factory();
   Register current = scratch0;
   Label loop_again;
@@ -4011,9 +3998,12 @@
                 Register reg3,
                 Register reg4,
                 Register reg5,
-                Register reg6) {
+                Register reg6,
+                Register reg7,
+                Register reg8) {
   int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
-    reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid();
+      reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
+      reg7.is_valid() + reg8.is_valid();
 
   RegList regs = 0;
   if (reg1.is_valid()) regs |= reg1.bit();
@@ -4022,6 +4012,8 @@
   if (reg4.is_valid()) regs |= reg4.bit();
   if (reg5.is_valid()) regs |= reg5.bit();
   if (reg6.is_valid()) regs |= reg6.bit();
+  if (reg7.is_valid()) regs |= reg7.bit();
+  if (reg8.is_valid()) regs |= reg8.bit();
   int n_of_non_aliasing_regs = NumRegs(regs);
 
   return n_of_valid_regs != n_of_non_aliasing_regs;
@@ -4039,19 +4031,19 @@
   // Create a new macro assembler pointing to the address of the code to patch.
   // The size is adjusted with kGap on order for the assembler to generate size
   // bytes of instructions without failing with buffer size constraints.
-  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+  DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
 }
 
 
 CodePatcher::~CodePatcher() {
   // Indicate that code has changed.
   if (flush_cache_ == FLUSH) {
-    CPU::FlushICache(address_, size_);
+    CpuFeatures::FlushICache(address_, size_);
   }
 
   // Check that the code was patched as expected.
-  ASSERT(masm_.pc_ == address_ + size_);
-  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+  DCHECK(masm_.pc_ == address_ + size_);
+  DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
 }
 
 
@@ -4075,19 +4067,21 @@
 void MacroAssembler::TruncatingDiv(Register result,
                                    Register dividend,
                                    int32_t divisor) {
-  ASSERT(!dividend.is(result));
-  ASSERT(!dividend.is(ip));
-  ASSERT(!result.is(ip));
-  MultiplierAndShift ms(divisor);
-  mov(ip, Operand(ms.multiplier()));
+  DCHECK(!dividend.is(result));
+  DCHECK(!dividend.is(ip));
+  DCHECK(!result.is(ip));
+  base::MagicNumbersForDivision<uint32_t> mag =
+      base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
+  mov(ip, Operand(mag.multiplier));
   smull(ip, result, dividend, ip);
-  if (divisor > 0 && ms.multiplier() < 0) {
+  bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
+  if (divisor > 0 && neg) {
     add(result, result, Operand(dividend));
   }
-  if (divisor < 0 && ms.multiplier() > 0) {
+  if (divisor < 0 && !neg && mag.multiplier > 0) {
     sub(result, result, Operand(dividend));
   }
-  if (ms.shift() > 0) mov(result, Operand(result, ASR, ms.shift()));
+  if (mag.shift > 0) mov(result, Operand(result, ASR, mag.shift));
   add(result, result, Operand(dividend, LSR, 31));
 }
 
diff --git a/src/arm/macro-assembler-arm.h b/src/arm/macro-assembler-arm.h
index dbf305a..d2a1786 100644
--- a/src/arm/macro-assembler-arm.h
+++ b/src/arm/macro-assembler-arm.h
@@ -6,6 +6,7 @@
 #define V8_ARM_MACRO_ASSEMBLER_ARM_H_
 
 #include "src/assembler.h"
+#include "src/bailout-reason.h"
 #include "src/frames.h"
 #include "src/globals.h"
 
@@ -58,7 +59,9 @@
                 Register reg3 = no_reg,
                 Register reg4 = no_reg,
                 Register reg5 = no_reg,
-                Register reg6 = no_reg);
+                Register reg6 = no_reg,
+                Register reg7 = no_reg,
+                Register reg8 = no_reg);
 #endif
 
 
@@ -76,12 +79,11 @@
   // macro assembler.
   MacroAssembler(Isolate* isolate, void* buffer, int size);
 
-  // Jump, Call, and Ret pseudo instructions implementing inter-working.
-  void Jump(Register target, Condition cond = al);
-  void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
-  void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
+
+  // Returns the size of a call in instructions. Note, the value returned is
+  // only valid as long as no entries are added to the constant pool between
+  // checking the call size and emitting the actual call.
   static int CallSize(Register target, Condition cond = al);
-  void Call(Register target, Condition cond = al);
   int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al);
   int CallStubSize(CodeStub* stub,
                    TypeFeedbackId ast_id = TypeFeedbackId::None(),
@@ -90,6 +92,12 @@
                                             Address target,
                                             RelocInfo::Mode rmode,
                                             Condition cond = al);
+
+  // Jump, Call, and Ret pseudo instructions implementing inter-working.
+  void Jump(Register target, Condition cond = al);
+  void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al);
+  void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
+  void Call(Register target, Condition cond = al);
   void Call(Address target, RelocInfo::Mode rmode,
             Condition cond = al,
             TargetAddressStorageMode mode = CAN_INLINE_TARGET_ADDRESS);
@@ -145,6 +153,12 @@
   // Register move. May do nothing if the registers are identical.
   void Move(Register dst, Handle<Object> value);
   void Move(Register dst, Register src, Condition cond = al);
+  void Move(Register dst, const Operand& src, SBit sbit = LeaveCC,
+            Condition cond = al) {
+    if (!src.is_reg() || !src.rm().is(dst) || sbit != LeaveCC) {
+      mov(dst, src, sbit, cond);
+    }
+  }
   void Move(DwVfpRegister dst, DwVfpRegister src);
 
   void Load(Register dst, const MemOperand& src, Representation r);
@@ -304,7 +318,7 @@
 
   // Push two registers.  Pushes leftmost register first (to highest address).
   void Push(Register src1, Register src2, Condition cond = al) {
-    ASSERT(!src1.is(src2));
+    DCHECK(!src1.is(src2));
     if (src1.code() > src2.code()) {
       stm(db_w, sp, src1.bit() | src2.bit(), cond);
     } else {
@@ -315,9 +329,9 @@
 
   // Push three registers.  Pushes leftmost register first (to highest address).
   void Push(Register src1, Register src2, Register src3, Condition cond = al) {
-    ASSERT(!src1.is(src2));
-    ASSERT(!src2.is(src3));
-    ASSERT(!src1.is(src3));
+    DCHECK(!src1.is(src2));
+    DCHECK(!src2.is(src3));
+    DCHECK(!src1.is(src3));
     if (src1.code() > src2.code()) {
       if (src2.code() > src3.code()) {
         stm(db_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
@@ -337,12 +351,12 @@
             Register src3,
             Register src4,
             Condition cond = al) {
-    ASSERT(!src1.is(src2));
-    ASSERT(!src2.is(src3));
-    ASSERT(!src1.is(src3));
-    ASSERT(!src1.is(src4));
-    ASSERT(!src2.is(src4));
-    ASSERT(!src3.is(src4));
+    DCHECK(!src1.is(src2));
+    DCHECK(!src2.is(src3));
+    DCHECK(!src1.is(src3));
+    DCHECK(!src1.is(src4));
+    DCHECK(!src2.is(src4));
+    DCHECK(!src3.is(src4));
     if (src1.code() > src2.code()) {
       if (src2.code() > src3.code()) {
         if (src3.code() > src4.code()) {
@@ -366,7 +380,7 @@
 
   // Pop two registers. Pops rightmost register first (from lower address).
   void Pop(Register src1, Register src2, Condition cond = al) {
-    ASSERT(!src1.is(src2));
+    DCHECK(!src1.is(src2));
     if (src1.code() > src2.code()) {
       ldm(ia_w, sp, src1.bit() | src2.bit(), cond);
     } else {
@@ -377,9 +391,9 @@
 
   // Pop three registers.  Pops rightmost register first (from lower address).
   void Pop(Register src1, Register src2, Register src3, Condition cond = al) {
-    ASSERT(!src1.is(src2));
-    ASSERT(!src2.is(src3));
-    ASSERT(!src1.is(src3));
+    DCHECK(!src1.is(src2));
+    DCHECK(!src2.is(src3));
+    DCHECK(!src1.is(src3));
     if (src1.code() > src2.code()) {
       if (src2.code() > src3.code()) {
         ldm(ia_w, sp, src1.bit() | src2.bit() | src3.bit(), cond);
@@ -399,12 +413,12 @@
            Register src3,
            Register src4,
            Condition cond = al) {
-    ASSERT(!src1.is(src2));
-    ASSERT(!src2.is(src3));
-    ASSERT(!src1.is(src3));
-    ASSERT(!src1.is(src4));
-    ASSERT(!src2.is(src4));
-    ASSERT(!src3.is(src4));
+    DCHECK(!src1.is(src2));
+    DCHECK(!src2.is(src3));
+    DCHECK(!src1.is(src3));
+    DCHECK(!src1.is(src4));
+    DCHECK(!src2.is(src4));
+    DCHECK(!src3.is(src4));
     if (src1.code() > src2.code()) {
       if (src2.code() > src3.code()) {
         if (src3.code() > src4.code()) {
@@ -436,12 +450,9 @@
   // RegList constant kSafepointSavedRegisters.
   void PushSafepointRegisters();
   void PopSafepointRegisters();
-  void PushSafepointRegistersAndDoubles();
-  void PopSafepointRegistersAndDoubles();
   // Store value in register src in the safepoint stack slot for
   // register dst.
   void StoreToSafepointRegisterSlot(Register src, Register dst);
-  void StoreToSafepointRegistersAndDoublesSlot(Register src, Register dst);
   // Load the value of the src register from its safepoint stack slot
   // into register dst.
   void LoadFromSafepointRegisterSlot(Register dst, Register src);
@@ -680,7 +691,7 @@
   // These instructions are generated to mark special location in the code,
   // like some special IC code.
   static inline bool IsMarkedCode(Instr instr, int type) {
-    ASSERT((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
+    DCHECK((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
     return IsNop(instr, type);
   }
 
@@ -700,7 +711,7 @@
                (FIRST_IC_MARKER <= dst_reg) && (dst_reg < LAST_CODE_MARKER)
                    ? src_reg
                    : -1;
-    ASSERT((type == -1) ||
+    DCHECK((type == -1) ||
            ((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)));
     return type;
   }
@@ -743,32 +754,25 @@
                              Register scratch2,
                              Register scratch3,
                              Label* gc_required);
-  void AllocateAsciiString(Register result,
-                           Register length,
-                           Register scratch1,
-                           Register scratch2,
-                           Register scratch3,
-                           Label* gc_required);
+  void AllocateOneByteString(Register result, Register length,
+                             Register scratch1, Register scratch2,
+                             Register scratch3, Label* gc_required);
   void AllocateTwoByteConsString(Register result,
                                  Register length,
                                  Register scratch1,
                                  Register scratch2,
                                  Label* gc_required);
-  void AllocateAsciiConsString(Register result,
-                               Register length,
-                               Register scratch1,
-                               Register scratch2,
-                               Label* gc_required);
+  void AllocateOneByteConsString(Register result, Register length,
+                                 Register scratch1, Register scratch2,
+                                 Label* gc_required);
   void AllocateTwoByteSlicedString(Register result,
                                    Register length,
                                    Register scratch1,
                                    Register scratch2,
                                    Label* gc_required);
-  void AllocateAsciiSlicedString(Register result,
-                                 Register length,
-                                 Register scratch1,
-                                 Register scratch2,
-                                 Label* gc_required);
+  void AllocateOneByteSlicedString(Register result, Register length,
+                                   Register scratch1, Register scratch2,
+                                   Label* gc_required);
 
   // Allocates a heap number or jumps to the gc_required label if the young
   // space is full and a scavenge is needed. All registers are clobbered also
@@ -778,7 +782,8 @@
                           Register scratch2,
                           Register heap_number_map,
                           Label* gc_required,
-                          TaggingMode tagging_mode = TAG_RESULT);
+                          TaggingMode tagging_mode = TAG_RESULT,
+                          MutableMode mode = IMMUTABLE);
   void AllocateHeapNumberWithValue(Register result,
                                    DwVfpRegister value,
                                    Register scratch1,
@@ -939,7 +944,7 @@
     ldr(type, FieldMemOperand(obj, HeapObject::kMapOffset), cond);
     ldrb(type, FieldMemOperand(type, Map::kInstanceTypeOffset), cond);
     tst(type, Operand(kIsNotStringMask), cond);
-    ASSERT_EQ(0, kStringTag);
+    DCHECK_EQ(0, kStringTag);
     return eq;
   }
 
@@ -1136,7 +1141,7 @@
   void GetBuiltinFunction(Register target, Builtins::JavaScript id);
 
   Handle<Object> CodeObject() {
-    ASSERT(!code_object_.is_null());
+    DCHECK(!code_object_.is_null());
     return code_object_;
   }
 
@@ -1180,7 +1185,7 @@
   // EABI variant for double arguments in use.
   bool use_eabi_hardfloat() {
 #ifdef __arm__
-    return OS::ArmUsingHardFloat();
+    return base::OS::ArmUsingHardFloat();
 #elif USE_EABI_HARDFLOAT
     return true;
 #else
@@ -1310,38 +1315,33 @@
                                Register scratch3,
                                Label* not_found);
 
-  // Checks if both objects are sequential ASCII strings and jumps to label
+  // Checks if both objects are sequential one-byte strings and jumps to label
   // if either is not. Assumes that neither object is a smi.
-  void JumpIfNonSmisNotBothSequentialAsciiStrings(Register object1,
-                                                  Register object2,
-                                                  Register scratch1,
-                                                  Register scratch2,
-                                                  Label* failure);
+  void JumpIfNonSmisNotBothSequentialOneByteStrings(Register object1,
+                                                    Register object2,
+                                                    Register scratch1,
+                                                    Register scratch2,
+                                                    Label* failure);
 
-  // Checks if both objects are sequential ASCII strings and jumps to label
+  // Checks if both objects are sequential one-byte strings and jumps to label
   // if either is not.
-  void JumpIfNotBothSequentialAsciiStrings(Register first,
-                                           Register second,
-                                           Register scratch1,
-                                           Register scratch2,
-                                           Label* not_flat_ascii_strings);
+  void JumpIfNotBothSequentialOneByteStrings(Register first, Register second,
+                                             Register scratch1,
+                                             Register scratch2,
+                                             Label* not_flat_one_byte_strings);
 
-  // Checks if both instance types are sequential ASCII strings and jumps to
+  // Checks if both instance types are sequential one-byte strings and jumps to
   // label if either is not.
-  void JumpIfBothInstanceTypesAreNotSequentialAscii(
-      Register first_object_instance_type,
-      Register second_object_instance_type,
-      Register scratch1,
-      Register scratch2,
-      Label* failure);
+  void JumpIfBothInstanceTypesAreNotSequentialOneByte(
+      Register first_object_instance_type, Register second_object_instance_type,
+      Register scratch1, Register scratch2, Label* failure);
 
-  // Check if instance type is sequential ASCII string and jump to label if
+  // Check if instance type is sequential one-byte string and jump to label if
   // it is not.
-  void JumpIfInstanceTypeIsNotSequentialAscii(Register type,
-                                              Register scratch,
-                                              Label* failure);
+  void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
+                                                Label* failure);
 
-  void JumpIfNotUniqueName(Register reg, Label* not_unique_name);
+  void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
 
   void EmitSeqStringSetCharCheck(Register string,
                                  Register index,
@@ -1353,8 +1353,8 @@
 
   // Get the location of a relocated constant (its address in the constant pool)
   // from its load site.
-  void GetRelocatedValueLocation(Register ldr_location,
-                                 Register result);
+  void GetRelocatedValueLocation(Register ldr_location, Register result,
+                                 Register scratch);
 
 
   void ClampUint8(Register output_reg, Register input_reg);
@@ -1539,7 +1539,7 @@
         old_constant_pool_available_(masm->is_constant_pool_available())  {
     // We only want to enable constant pool access for non-manual frame scopes
     // to ensure the constant pool pointer is valid throughout the scope.
-    ASSERT(type_ != StackFrame::MANUAL && type_ != StackFrame::NONE);
+    DCHECK(type_ != StackFrame::MANUAL && type_ != StackFrame::NONE);
     masm->set_has_frame(true);
     masm->set_constant_pool_available(true);
     masm->EnterFrame(type, !old_constant_pool_available_);
@@ -1557,7 +1557,7 @@
   // scope, the MacroAssembler is still marked as being in a frame scope, and
   // the code will be generated again when it goes out of scope.
   void GenerateLeaveFrame() {
-    ASSERT(type_ != StackFrame::MANUAL && type_ != StackFrame::NONE);
+    DCHECK(type_ != StackFrame::MANUAL && type_ != StackFrame::NONE);
     masm_->LeaveFrame(type_);
   }
 
diff --git a/src/arm/regexp-macro-assembler-arm.cc b/src/arm/regexp-macro-assembler-arm.cc
index e494305..f4918fe 100644
--- a/src/arm/regexp-macro-assembler-arm.cc
+++ b/src/arm/regexp-macro-assembler-arm.cc
@@ -6,13 +6,14 @@
 
 #if V8_TARGET_ARCH_ARM
 
-#include "src/cpu-profiler.h"
-#include "src/unicode.h"
-#include "src/log.h"
 #include "src/code-stubs.h"
-#include "src/regexp-stack.h"
+#include "src/cpu-profiler.h"
+#include "src/log.h"
 #include "src/macro-assembler.h"
 #include "src/regexp-macro-assembler.h"
+#include "src/regexp-stack.h"
+#include "src/unicode.h"
+
 #include "src/arm/regexp-macro-assembler-arm.h"
 
 namespace v8 {
@@ -109,7 +110,7 @@
       success_label_(),
       backtrack_label_(),
       exit_label_() {
-  ASSERT_EQ(0, registers_to_save % 2);
+  DCHECK_EQ(0, registers_to_save % 2);
   __ jmp(&entry_label_);   // We'll write the entry code later.
   __ bind(&start_label_);  // And then continue from here.
 }
@@ -142,8 +143,8 @@
 
 
 void RegExpMacroAssemblerARM::AdvanceRegister(int reg, int by) {
-  ASSERT(reg >= 0);
-  ASSERT(reg < num_registers_);
+  DCHECK(reg >= 0);
+  DCHECK(reg < num_registers_);
   if (by != 0) {
     __ ldr(r0, register_location(reg));
     __ add(r0, r0, Operand(by));
@@ -237,7 +238,7 @@
   __ cmn(r1, Operand(current_input_offset()));
   BranchOrBacktrack(gt, on_no_match);
 
-  if (mode_ == ASCII) {
+  if (mode_ == LATIN1) {
     Label success;
     Label fail;
     Label loop_check;
@@ -286,7 +287,7 @@
     // Compute new value of character position after the matched part.
     __ sub(current_input_offset(), r2, end_of_input_address());
   } else {
-    ASSERT(mode_ == UC16);
+    DCHECK(mode_ == UC16);
     int argument_count = 4;
     __ PrepareCallCFunction(argument_count, r2);
 
@@ -353,11 +354,11 @@
 
   Label loop;
   __ bind(&loop);
-  if (mode_ == ASCII) {
+  if (mode_ == LATIN1) {
     __ ldrb(r3, MemOperand(r0, char_size(), PostIndex));
     __ ldrb(r4, MemOperand(r2, char_size(), PostIndex));
   } else {
-    ASSERT(mode_ == UC16);
+    DCHECK(mode_ == UC16);
     __ ldrh(r3, MemOperand(r0, char_size(), PostIndex));
     __ ldrh(r4, MemOperand(r2, char_size(), PostIndex));
   }
@@ -410,7 +411,7 @@
     uc16 minus,
     uc16 mask,
     Label* on_not_equal) {
-  ASSERT(minus < String::kMaxUtf16CodeUnit);
+  DCHECK(minus < String::kMaxUtf16CodeUnit);
   __ sub(r0, current_character(), Operand(minus));
   __ and_(r0, r0, Operand(mask));
   __ cmp(r0, Operand(c));
@@ -442,7 +443,7 @@
     Handle<ByteArray> table,
     Label* on_bit_set) {
   __ mov(r0, Operand(table));
-  if (mode_ != ASCII || kTableMask != String::kMaxOneByteCharCode) {
+  if (mode_ != LATIN1 || kTableMask != String::kMaxOneByteCharCode) {
     __ and_(r1, current_character(), Operand(kTableSize - 1));
     __ add(r1, r1, Operand(ByteArray::kHeaderSize - kHeapObjectTag));
   } else {
@@ -463,7 +464,7 @@
   switch (type) {
   case 's':
     // Match space-characters
-    if (mode_ == ASCII) {
+    if (mode_ == LATIN1) {
       // One byte space characters are '\t'..'\r', ' ' and \u00a0.
       Label success;
       __ cmp(current_character(), Operand(' '));
@@ -517,7 +518,7 @@
     // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
     __ sub(r0, r0, Operand(0x0b));
     __ cmp(r0, Operand(0x0c - 0x0b));
-    if (mode_ == ASCII) {
+    if (mode_ == LATIN1) {
       BranchOrBacktrack(hi, on_no_match);
     } else {
       Label done;
@@ -533,8 +534,8 @@
     return true;
   }
   case 'w': {
-    if (mode_ != ASCII) {
-      // Table is 128 entries, so all ASCII characters can be tested.
+    if (mode_ != LATIN1) {
+      // Table is 256 entries, so all Latin1 characters can be tested.
       __ cmp(current_character(), Operand('z'));
       BranchOrBacktrack(hi, on_no_match);
     }
@@ -547,8 +548,8 @@
   }
   case 'W': {
     Label done;
-    if (mode_ != ASCII) {
-      // Table is 128 entries, so all ASCII characters can be tested.
+    if (mode_ != LATIN1) {
+      // Table is 256 entries, so all Latin1 characters can be tested.
       __ cmp(current_character(), Operand('z'));
       __ b(hi, &done);
     }
@@ -557,7 +558,7 @@
     __ ldrb(r0, MemOperand(r0, current_character()));
     __ cmp(r0, Operand::Zero());
     BranchOrBacktrack(ne, on_no_match);
-    if (mode_ != ASCII) {
+    if (mode_ != LATIN1) {
       __ bind(&done);
     }
     return true;
@@ -709,7 +710,7 @@
       __ add(r1, r1, Operand(r2));
       // r1 is length of string in characters.
 
-      ASSERT_EQ(0, num_saved_registers_ % 2);
+      DCHECK_EQ(0, num_saved_registers_ % 2);
       // Always an even number of capture registers. This allows us to
       // unroll the loop once to add an operation between a load of a register
       // and the following use of that register.
@@ -894,8 +895,8 @@
                                                    Label* on_end_of_input,
                                                    bool check_bounds,
                                                    int characters) {
-  ASSERT(cp_offset >= -1);      // ^ and \b can look behind one character.
-  ASSERT(cp_offset < (1<<30));  // Be sane! (And ensure negation works)
+  DCHECK(cp_offset >= -1);      // ^ and \b can look behind one character.
+  DCHECK(cp_offset < (1<<30));  // Be sane! (And ensure negation works)
   if (check_bounds) {
     CheckPosition(cp_offset + characters - 1, on_end_of_input);
   }
@@ -960,7 +961,7 @@
 
 
 void RegExpMacroAssemblerARM::SetRegister(int register_index, int to) {
-  ASSERT(register_index >= num_saved_registers_);  // Reserved for positions!
+  DCHECK(register_index >= num_saved_registers_);  // Reserved for positions!
   __ mov(r0, Operand(to));
   __ str(r0, register_location(register_index));
 }
@@ -984,7 +985,7 @@
 
 
 void RegExpMacroAssemblerARM::ClearRegisters(int reg_from, int reg_to) {
-  ASSERT(reg_from <= reg_to);
+  DCHECK(reg_from <= reg_to);
   __ ldr(r0, MemOperand(frame_pointer(), kInputStartMinusOne));
   for (int reg = reg_from; reg <= reg_to; reg++) {
     __ str(r0, register_location(reg));
@@ -1010,8 +1011,8 @@
   __ mov(r1, Operand(masm_->CodeObject()));
 
   // We need to make room for the return address on the stack.
-  int stack_alignment = OS::ActivationFrameAlignment();
-  ASSERT(IsAligned(stack_alignment, kPointerSize));
+  int stack_alignment = base::OS::ActivationFrameAlignment();
+  DCHECK(IsAligned(stack_alignment, kPointerSize));
   __ sub(sp, sp, Operand(stack_alignment));
 
   // r0 will point to the return address, placed by DirectCEntry.
@@ -1026,7 +1027,7 @@
   // Drop the return address from the stack.
   __ add(sp, sp, Operand(stack_alignment));
 
-  ASSERT(stack_alignment != 0);
+  DCHECK(stack_alignment != 0);
   __ ldr(sp, MemOperand(sp, 0));
 
   __ mov(code_pointer(), Operand(masm_->CodeObject()));
@@ -1066,10 +1067,10 @@
   Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
 
   // Current string.
-  bool is_ascii = subject->IsOneByteRepresentationUnderneath();
+  bool is_one_byte = subject->IsOneByteRepresentationUnderneath();
 
-  ASSERT(re_code->instruction_start() <= *return_address);
-  ASSERT(*return_address <=
+  DCHECK(re_code->instruction_start() <= *return_address);
+  DCHECK(*return_address <=
       re_code->instruction_start() + re_code->instruction_size());
 
   Object* result = isolate->stack_guard()->HandleInterrupts();
@@ -1097,8 +1098,8 @@
   }
 
   // String might have changed.
-  if (subject_tmp->IsOneByteRepresentation() != is_ascii) {
-    // If we changed between an ASCII and an UC16 string, the specialized
+  if (subject_tmp->IsOneByteRepresentation() != is_one_byte) {
+    // If we changed between an Latin1 and an UC16 string, the specialized
     // code cannot be used, and we need to restart regexp matching from
     // scratch (including, potentially, compiling a new version of the code).
     return RETRY;
@@ -1108,7 +1109,7 @@
   // be a sequential or external string with the same content.
   // Update the start and end pointers in the stack frame to the current
   // location (whether it has actually moved or not).
-  ASSERT(StringShape(*subject_tmp).IsSequential() ||
+  DCHECK(StringShape(*subject_tmp).IsSequential() ||
       StringShape(*subject_tmp).IsExternal());
 
   // The original start address of the characters to match.
@@ -1140,7 +1141,7 @@
 
 
 MemOperand RegExpMacroAssemblerARM::register_location(int register_index) {
-  ASSERT(register_index < (1<<30));
+  DCHECK(register_index < (1<<30));
   if (num_registers_ <= register_index) {
     num_registers_ = register_index + 1;
   }
@@ -1193,14 +1194,14 @@
 
 
 void RegExpMacroAssemblerARM::Push(Register source) {
-  ASSERT(!source.is(backtrack_stackpointer()));
+  DCHECK(!source.is(backtrack_stackpointer()));
   __ str(source,
          MemOperand(backtrack_stackpointer(), kPointerSize, NegPreIndex));
 }
 
 
 void RegExpMacroAssemblerARM::Pop(Register target) {
-  ASSERT(!target.is(backtrack_stackpointer()));
+  DCHECK(!target.is(backtrack_stackpointer()));
   __ ldr(target,
          MemOperand(backtrack_stackpointer(), kPointerSize, PostIndex));
 }
@@ -1245,24 +1246,24 @@
   // If unaligned load/stores are not supported then this function must only
   // be used to load a single character at a time.
   if (!CanReadUnaligned()) {
-    ASSERT(characters == 1);
+    DCHECK(characters == 1);
   }
 
-  if (mode_ == ASCII) {
+  if (mode_ == LATIN1) {
     if (characters == 4) {
       __ ldr(current_character(), MemOperand(end_of_input_address(), offset));
     } else if (characters == 2) {
       __ ldrh(current_character(), MemOperand(end_of_input_address(), offset));
     } else {
-      ASSERT(characters == 1);
+      DCHECK(characters == 1);
       __ ldrb(current_character(), MemOperand(end_of_input_address(), offset));
     }
   } else {
-    ASSERT(mode_ == UC16);
+    DCHECK(mode_ == UC16);
     if (characters == 2) {
       __ ldr(current_character(), MemOperand(end_of_input_address(), offset));
     } else {
-      ASSERT(characters == 1);
+      DCHECK(characters == 1);
       __ ldrh(current_character(), MemOperand(end_of_input_address(), offset));
     }
   }
diff --git a/src/arm/regexp-macro-assembler-arm.h b/src/arm/regexp-macro-assembler-arm.h
index fef8413..7414e54 100644
--- a/src/arm/regexp-macro-assembler-arm.h
+++ b/src/arm/regexp-macro-assembler-arm.h
@@ -190,7 +190,7 @@
 
   MacroAssembler* masm_;
 
-  // Which mode to generate code for (ASCII or UC16).
+  // Which mode to generate code for (Latin1 or UC16).
   Mode mode_;
 
   // One greater than maximal register index actually used.
diff --git a/src/arm/simulator-arm.cc b/src/arm/simulator-arm.cc
index 1ef4a9c..0444025 100644
--- a/src/arm/simulator-arm.cc
+++ b/src/arm/simulator-arm.cc
@@ -10,11 +10,11 @@
 
 #if V8_TARGET_ARCH_ARM
 
-#include "src/disasm.h"
-#include "src/assembler.h"
-#include "src/codegen.h"
 #include "src/arm/constants-arm.h"
 #include "src/arm/simulator-arm.h"
+#include "src/assembler.h"
+#include "src/codegen.h"
+#include "src/disasm.h"
 
 #if defined(USE_SIMULATOR)
 
@@ -87,7 +87,7 @@
   char** msg_address =
     reinterpret_cast<char**>(sim_->get_pc() + Instruction::kInstrSize);
   char* msg = *msg_address;
-  ASSERT(msg != NULL);
+  DCHECK(msg != NULL);
 
   // Update this stop description.
   if (isWatchedStop(code) && !watched_stops_[code].desc) {
@@ -311,7 +311,7 @@
             }
             for (int i = 0; i < DwVfpRegister::NumRegisters(); i++) {
               dvalue = GetVFPDoubleRegisterValue(i);
-              uint64_t as_words = BitCast<uint64_t>(dvalue);
+              uint64_t as_words = bit_cast<uint64_t>(dvalue);
               PrintF("%3s: %f 0x%08x %08x\n",
                      VFPRegisters::Name(i, true),
                      dvalue,
@@ -322,10 +322,10 @@
             if (GetValue(arg1, &value)) {
               PrintF("%s: 0x%08x %d \n", arg1, value, value);
             } else if (GetVFPSingleValue(arg1, &svalue)) {
-              uint32_t as_word = BitCast<uint32_t>(svalue);
+              uint32_t as_word = bit_cast<uint32_t>(svalue);
               PrintF("%s: %f 0x%08x\n", arg1, svalue, as_word);
             } else if (GetVFPDoubleValue(arg1, &dvalue)) {
-              uint64_t as_words = BitCast<uint64_t>(dvalue);
+              uint64_t as_words = bit_cast<uint64_t>(dvalue);
               PrintF("%s: %f 0x%08x %08x\n",
                      arg1,
                      dvalue,
@@ -342,17 +342,18 @@
                  || (strcmp(cmd, "printobject") == 0)) {
         if (argc == 2) {
           int32_t value;
+          OFStream os(stdout);
           if (GetValue(arg1, &value)) {
             Object* obj = reinterpret_cast<Object*>(value);
-            PrintF("%s: \n", arg1);
+            os << arg1 << ": \n";
 #ifdef DEBUG
-            obj->PrintLn();
+            obj->Print(os);
+            os << "\n";
 #else
-            obj->ShortPrint();
-            PrintF("\n");
+            os << Brief(obj) << "\n";
 #endif
           } else {
-            PrintF("%s unrecognized\n", arg1);
+            os << arg1 << " unrecognized\n";
           }
         } else {
           PrintF("printobject <value>\n");
@@ -451,7 +452,7 @@
         }
       } else if (strcmp(cmd, "gdb") == 0) {
         PrintF("relinquishing control to gdb\n");
-        v8::internal::OS::DebugBreak();
+        v8::base::OS::DebugBreak();
         PrintF("regaining control from gdb\n");
       } else if (strcmp(cmd, "break") == 0) {
         if (argc == 2) {
@@ -607,8 +608,8 @@
 
 
 static bool ICacheMatch(void* one, void* two) {
-  ASSERT((reinterpret_cast<intptr_t>(one) & CachePage::kPageMask) == 0);
-  ASSERT((reinterpret_cast<intptr_t>(two) & CachePage::kPageMask) == 0);
+  DCHECK((reinterpret_cast<intptr_t>(one) & CachePage::kPageMask) == 0);
+  DCHECK((reinterpret_cast<intptr_t>(two) & CachePage::kPageMask) == 0);
   return one == two;
 }
 
@@ -645,7 +646,7 @@
     FlushOnePage(i_cache, start, bytes_to_flush);
     start += bytes_to_flush;
     size -= bytes_to_flush;
-    ASSERT_EQ(0, start & CachePage::kPageMask);
+    DCHECK_EQ(0, start & CachePage::kPageMask);
     offset = 0;
   }
   if (size != 0) {
@@ -670,10 +671,10 @@
 void Simulator::FlushOnePage(v8::internal::HashMap* i_cache,
                              intptr_t start,
                              int size) {
-  ASSERT(size <= CachePage::kPageSize);
-  ASSERT(AllOnOnePage(start, size - 1));
-  ASSERT((start & CachePage::kLineMask) == 0);
-  ASSERT((size & CachePage::kLineMask) == 0);
+  DCHECK(size <= CachePage::kPageSize);
+  DCHECK(AllOnOnePage(start, size - 1));
+  DCHECK((start & CachePage::kLineMask) == 0);
+  DCHECK((size & CachePage::kLineMask) == 0);
   void* page = reinterpret_cast<void*>(start & (~CachePage::kPageMask));
   int offset = (start & CachePage::kPageMask);
   CachePage* cache_page = GetCachePage(i_cache, page);
@@ -813,7 +814,7 @@
     Redirection* current = isolate->simulator_redirection();
     for (; current != NULL; current = current->next_) {
       if (current->external_function_ == external_function) {
-        ASSERT_EQ(current->type(), type);
+        DCHECK_EQ(current->type(), type);
         return current;
       }
     }
@@ -852,7 +853,7 @@
 Simulator* Simulator::current(Isolate* isolate) {
   v8::internal::Isolate::PerIsolateThreadData* isolate_data =
       isolate->FindOrAllocatePerThreadDataForThisThread();
-  ASSERT(isolate_data != NULL);
+  DCHECK(isolate_data != NULL);
 
   Simulator* sim = isolate_data->simulator();
   if (sim == NULL) {
@@ -867,7 +868,7 @@
 // Sets the register in the architecture state. It will also deal with updating
 // Simulator internal state for special registers such as PC.
 void Simulator::set_register(int reg, int32_t value) {
-  ASSERT((reg >= 0) && (reg < num_registers));
+  DCHECK((reg >= 0) && (reg < num_registers));
   if (reg == pc) {
     pc_modified_ = true;
   }
@@ -878,7 +879,7 @@
 // Get the register from the architecture state. This function does handle
 // the special case of accessing the PC register.
 int32_t Simulator::get_register(int reg) const {
-  ASSERT((reg >= 0) && (reg < num_registers));
+  DCHECK((reg >= 0) && (reg < num_registers));
   // Stupid code added to avoid bug in GCC.
   // See: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=43949
   if (reg >= num_registers) return 0;
@@ -888,7 +889,7 @@
 
 
 double Simulator::get_double_from_register_pair(int reg) {
-  ASSERT((reg >= 0) && (reg < num_registers) && ((reg % 2) == 0));
+  DCHECK((reg >= 0) && (reg < num_registers) && ((reg % 2) == 0));
 
   double dm_val = 0.0;
   // Read the bits from the unsigned integer register_[] array
@@ -901,62 +902,62 @@
 
 
 void Simulator::set_register_pair_from_double(int reg, double* value) {
-  ASSERT((reg >= 0) && (reg < num_registers) && ((reg % 2) == 0));
+  DCHECK((reg >= 0) && (reg < num_registers) && ((reg % 2) == 0));
   memcpy(registers_ + reg, value, sizeof(*value));
 }
 
 
 void Simulator::set_dw_register(int dreg, const int* dbl) {
-  ASSERT((dreg >= 0) && (dreg < num_d_registers));
+  DCHECK((dreg >= 0) && (dreg < num_d_registers));
   registers_[dreg] = dbl[0];
   registers_[dreg + 1] = dbl[1];
 }
 
 
 void Simulator::get_d_register(int dreg, uint64_t* value) {
-  ASSERT((dreg >= 0) && (dreg < DwVfpRegister::NumRegisters()));
+  DCHECK((dreg >= 0) && (dreg < DwVfpRegister::NumRegisters()));
   memcpy(value, vfp_registers_ + dreg * 2, sizeof(*value));
 }
 
 
 void Simulator::set_d_register(int dreg, const uint64_t* value) {
-  ASSERT((dreg >= 0) && (dreg < DwVfpRegister::NumRegisters()));
+  DCHECK((dreg >= 0) && (dreg < DwVfpRegister::NumRegisters()));
   memcpy(vfp_registers_ + dreg * 2, value, sizeof(*value));
 }
 
 
 void Simulator::get_d_register(int dreg, uint32_t* value) {
-  ASSERT((dreg >= 0) && (dreg < DwVfpRegister::NumRegisters()));
+  DCHECK((dreg >= 0) && (dreg < DwVfpRegister::NumRegisters()));
   memcpy(value, vfp_registers_ + dreg * 2, sizeof(*value) * 2);
 }
 
 
 void Simulator::set_d_register(int dreg, const uint32_t* value) {
-  ASSERT((dreg >= 0) && (dreg < DwVfpRegister::NumRegisters()));
+  DCHECK((dreg >= 0) && (dreg < DwVfpRegister::NumRegisters()));
   memcpy(vfp_registers_ + dreg * 2, value, sizeof(*value) * 2);
 }
 
 
 void Simulator::get_q_register(int qreg, uint64_t* value) {
-  ASSERT((qreg >= 0) && (qreg < num_q_registers));
+  DCHECK((qreg >= 0) && (qreg < num_q_registers));
   memcpy(value, vfp_registers_ + qreg * 4, sizeof(*value) * 2);
 }
 
 
 void Simulator::set_q_register(int qreg, const uint64_t* value) {
-  ASSERT((qreg >= 0) && (qreg < num_q_registers));
+  DCHECK((qreg >= 0) && (qreg < num_q_registers));
   memcpy(vfp_registers_ + qreg * 4, value, sizeof(*value) * 2);
 }
 
 
 void Simulator::get_q_register(int qreg, uint32_t* value) {
-  ASSERT((qreg >= 0) && (qreg < num_q_registers));
+  DCHECK((qreg >= 0) && (qreg < num_q_registers));
   memcpy(value, vfp_registers_ + qreg * 4, sizeof(*value) * 4);
 }
 
 
 void Simulator::set_q_register(int qreg, const uint32_t* value) {
-  ASSERT((qreg >= 0) && (qreg < num_q_registers));
+  DCHECK((qreg >= 0) && (qreg < num_q_registers));
   memcpy(vfp_registers_ + qreg * 4, value, sizeof(*value) * 4);
 }
 
@@ -981,22 +982,22 @@
 
 // Getting from and setting into VFP registers.
 void Simulator::set_s_register(int sreg, unsigned int value) {
-  ASSERT((sreg >= 0) && (sreg < num_s_registers));
+  DCHECK((sreg >= 0) && (sreg < num_s_registers));
   vfp_registers_[sreg] = value;
 }
 
 
 unsigned int Simulator::get_s_register(int sreg) const {
-  ASSERT((sreg >= 0) && (sreg < num_s_registers));
+  DCHECK((sreg >= 0) && (sreg < num_s_registers));
   return vfp_registers_[sreg];
 }
 
 
 template<class InputType, int register_size>
 void Simulator::SetVFPRegister(int reg_index, const InputType& value) {
-  ASSERT(reg_index >= 0);
-  if (register_size == 1) ASSERT(reg_index < num_s_registers);
-  if (register_size == 2) ASSERT(reg_index < DwVfpRegister::NumRegisters());
+  DCHECK(reg_index >= 0);
+  if (register_size == 1) DCHECK(reg_index < num_s_registers);
+  if (register_size == 2) DCHECK(reg_index < DwVfpRegister::NumRegisters());
 
   char buffer[register_size * sizeof(vfp_registers_[0])];
   memcpy(buffer, &value, register_size * sizeof(vfp_registers_[0]));
@@ -1007,9 +1008,9 @@
 
 template<class ReturnType, int register_size>
 ReturnType Simulator::GetFromVFPRegister(int reg_index) {
-  ASSERT(reg_index >= 0);
-  if (register_size == 1) ASSERT(reg_index < num_s_registers);
-  if (register_size == 2) ASSERT(reg_index < DwVfpRegister::NumRegisters());
+  DCHECK(reg_index >= 0);
+  if (register_size == 1) DCHECK(reg_index < num_s_registers);
+  if (register_size == 2) DCHECK(reg_index < DwVfpRegister::NumRegisters());
 
   ReturnType value = 0;
   char buffer[register_size * sizeof(vfp_registers_[0])];
@@ -1429,7 +1430,7 @@
           *carry_out = (result & 1) == 1;
           result >>= 1;
         } else {
-          ASSERT(shift_amount >= 32);
+          DCHECK(shift_amount >= 32);
           if (result < 0) {
             *carry_out = true;
             result = 0xffffffff;
@@ -1452,7 +1453,7 @@
           *carry_out = (result & 1) == 1;
           result = 0;
         } else {
-          ASSERT(shift_amount > 32);
+          DCHECK(shift_amount > 32);
           *carry_out = false;
           result = 0;
         }
@@ -1574,7 +1575,7 @@
 
   intptr_t* address = reinterpret_cast<intptr_t*>(start_address);
   // Catch null pointers a little earlier.
-  ASSERT(start_address > 8191 || start_address < 0);
+  DCHECK(start_address > 8191 || start_address < 0);
   int reg = 0;
   while (rlist != 0) {
     if ((rlist & 1) != 0) {
@@ -1588,7 +1589,7 @@
     reg++;
     rlist >>= 1;
   }
-  ASSERT(end_address == ((intptr_t)address) - 4);
+  DCHECK(end_address == ((intptr_t)address) - 4);
   if (instr->HasW()) {
     set_register(instr->RnValue(), rn_val);
   }
@@ -1647,7 +1648,7 @@
       address += 2;
     }
   }
-  ASSERT(reinterpret_cast<intptr_t>(address) - operand_size == end_address);
+  DCHECK(reinterpret_cast<intptr_t>(address) - operand_size == end_address);
   if (instr->HasW()) {
     set_register(instr->RnValue(), rn_val);
   }
@@ -1852,7 +1853,7 @@
         target(arg0, arg1, Redirection::ReverseRedirection(arg2));
       } else {
         // builtin call.
-        ASSERT(redirection->type() == ExternalReference::BUILTIN_CALL);
+        DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL);
         SimulatorRuntimeCall target =
             reinterpret_cast<SimulatorRuntimeCall>(external);
         if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
@@ -1928,13 +1929,13 @@
 
 
 bool Simulator::isWatchedStop(uint32_t code) {
-  ASSERT(code <= kMaxStopCode);
+  DCHECK(code <= kMaxStopCode);
   return code < kNumOfWatchedStops;
 }
 
 
 bool Simulator::isEnabledStop(uint32_t code) {
-  ASSERT(code <= kMaxStopCode);
+  DCHECK(code <= kMaxStopCode);
   // Unwatched stops are always enabled.
   return !isWatchedStop(code) ||
     !(watched_stops_[code].count & kStopDisabledBit);
@@ -1942,7 +1943,7 @@
 
 
 void Simulator::EnableStop(uint32_t code) {
-  ASSERT(isWatchedStop(code));
+  DCHECK(isWatchedStop(code));
   if (!isEnabledStop(code)) {
     watched_stops_[code].count &= ~kStopDisabledBit;
   }
@@ -1950,7 +1951,7 @@
 
 
 void Simulator::DisableStop(uint32_t code) {
-  ASSERT(isWatchedStop(code));
+  DCHECK(isWatchedStop(code));
   if (isEnabledStop(code)) {
     watched_stops_[code].count |= kStopDisabledBit;
   }
@@ -1958,8 +1959,8 @@
 
 
 void Simulator::IncreaseStopCounter(uint32_t code) {
-  ASSERT(code <= kMaxStopCode);
-  ASSERT(isWatchedStop(code));
+  DCHECK(code <= kMaxStopCode);
+  DCHECK(isWatchedStop(code));
   if ((watched_stops_[code].count & ~(1 << 31)) == 0x7fffffff) {
     PrintF("Stop counter for code %i has overflowed.\n"
            "Enabling this code and reseting the counter to 0.\n", code);
@@ -1973,7 +1974,7 @@
 
 // Print a stop status.
 void Simulator::PrintStopInfo(uint32_t code) {
-  ASSERT(code <= kMaxStopCode);
+  DCHECK(code <= kMaxStopCode);
   if (!isWatchedStop(code)) {
     PrintF("Stop not watched.");
   } else {
@@ -2091,7 +2092,7 @@
         switch (instr->PUField()) {
           case da_x: {
             // Format(instr, "'memop'cond'sign'h 'rd, ['rn], -'rm");
-            ASSERT(!instr->HasW());
+            DCHECK(!instr->HasW());
             addr = rn_val;
             rn_val -= rm_val;
             set_register(rn, rn_val);
@@ -2099,7 +2100,7 @@
           }
           case ia_x: {
             // Format(instr, "'memop'cond'sign'h 'rd, ['rn], +'rm");
-            ASSERT(!instr->HasW());
+            DCHECK(!instr->HasW());
             addr = rn_val;
             rn_val += rm_val;
             set_register(rn, rn_val);
@@ -2134,7 +2135,7 @@
         switch (instr->PUField()) {
           case da_x: {
             // Format(instr, "'memop'cond'sign'h 'rd, ['rn], #-'off8");
-            ASSERT(!instr->HasW());
+            DCHECK(!instr->HasW());
             addr = rn_val;
             rn_val -= imm_val;
             set_register(rn, rn_val);
@@ -2142,7 +2143,7 @@
           }
           case ia_x: {
             // Format(instr, "'memop'cond'sign'h 'rd, ['rn], #+'off8");
-            ASSERT(!instr->HasW());
+            DCHECK(!instr->HasW());
             addr = rn_val;
             rn_val += imm_val;
             set_register(rn, rn_val);
@@ -2174,7 +2175,7 @@
         }
       }
       if (((instr->Bits(7, 4) & 0xd) == 0xd) && (instr->Bit(20) == 0)) {
-        ASSERT((rd % 2) == 0);
+        DCHECK((rd % 2) == 0);
         if (instr->HasH()) {
           // The strd instruction.
           int32_t value1 = get_register(rd);
@@ -2205,8 +2206,8 @@
         }
       } else {
         // signed byte loads
-        ASSERT(instr->HasSign());
-        ASSERT(instr->HasL());
+        DCHECK(instr->HasSign());
+        DCHECK(instr->HasL());
         int8_t val = ReadB(addr);
         set_register(rd, val);
       }
@@ -2270,7 +2271,7 @@
     if (type == 0) {
       shifter_operand = GetShiftRm(instr, &shifter_carry_out);
     } else {
-      ASSERT(instr->TypeValue() == 1);
+      DCHECK(instr->TypeValue() == 1);
       shifter_operand = GetImm(instr, &shifter_carry_out);
     }
     int32_t alu_out;
@@ -2493,7 +2494,7 @@
   switch (instr->PUField()) {
     case da_x: {
       // Format(instr, "'memop'cond'b 'rd, ['rn], #-'off12");
-      ASSERT(!instr->HasW());
+      DCHECK(!instr->HasW());
       addr = rn_val;
       rn_val -= im_val;
       set_register(rn, rn_val);
@@ -2501,7 +2502,7 @@
     }
     case ia_x: {
       // Format(instr, "'memop'cond'b 'rd, ['rn], #+'off12");
-      ASSERT(!instr->HasW());
+      DCHECK(!instr->HasW());
       addr = rn_val;
       rn_val += im_val;
       set_register(rn, rn_val);
@@ -2557,7 +2558,7 @@
   int32_t addr = 0;
   switch (instr->PUField()) {
     case da_x: {
-      ASSERT(!instr->HasW());
+      DCHECK(!instr->HasW());
       Format(instr, "'memop'cond'b 'rd, ['rn], -'shift_rm");
       UNIMPLEMENTED();
       break;
@@ -2710,28 +2711,30 @@
     }
     case db_x: {
       if (FLAG_enable_sudiv) {
-        if (!instr->HasW()) {
-          if (instr->Bits(5, 4) == 0x1) {
-             if ((instr->Bit(22) == 0x0) && (instr->Bit(20) == 0x1)) {
-               // sdiv (in V8 notation matching ARM ISA format) rn = rm/rs
-               // Format(instr, "'sdiv'cond'b 'rn, 'rm, 'rs);
-               int rm = instr->RmValue();
-               int32_t rm_val = get_register(rm);
-               int rs = instr->RsValue();
-               int32_t rs_val = get_register(rs);
-               int32_t ret_val = 0;
-               ASSERT(rs_val != 0);
-               if ((rm_val == kMinInt) && (rs_val == -1)) {
-                 ret_val = kMinInt;
-               } else {
-                 ret_val = rm_val / rs_val;
-               }
-               set_register(rn, ret_val);
-               return;
-             }
-           }
-         }
-       }
+        if (instr->Bits(5, 4) == 0x1) {
+          if ((instr->Bit(22) == 0x0) && (instr->Bit(20) == 0x1)) {
+            // (s/u)div (in V8 notation matching ARM ISA format) rn = rm/rs
+            // Format(instr, "'(s/u)div'cond'b 'rn, 'rm, 'rs);
+            int rm = instr->RmValue();
+            int32_t rm_val = get_register(rm);
+            int rs = instr->RsValue();
+            int32_t rs_val = get_register(rs);
+            int32_t ret_val = 0;
+            DCHECK(rs_val != 0);
+            // udiv
+            if (instr->Bit(21) == 0x1) {
+              ret_val = static_cast<int32_t>(static_cast<uint32_t>(rm_val) /
+                                             static_cast<uint32_t>(rs_val));
+            } else if ((rm_val == kMinInt) && (rs_val == -1)) {
+              ret_val = kMinInt;
+            } else {
+              ret_val = rm_val / rs_val;
+            }
+            set_register(rn, ret_val);
+            return;
+          }
+        }
+      }
       // Format(instr, "'memop'cond'b 'rd, ['rn, -'shift_rm]'w");
       addr = rn_val - shifter_operand;
       if (instr->HasW()) {
@@ -2771,7 +2774,7 @@
           uint32_t rd_val =
               static_cast<uint32_t>(get_register(instr->RdValue()));
           uint32_t bitcount = msbit - lsbit + 1;
-          uint32_t mask = (1 << bitcount) - 1;
+          uint32_t mask = 0xffffffffu >> (32 - bitcount);
           rd_val &= ~(mask << lsbit);
           if (instr->RmValue() != 15) {
             // bfi - bitfield insert.
@@ -2818,7 +2821,7 @@
 
 
 void Simulator::DecodeType4(Instruction* instr) {
-  ASSERT(instr->Bit(22) == 0);  // only allowed to be set in privileged mode
+  DCHECK(instr->Bit(22) == 0);  // only allowed to be set in privileged mode
   if (instr->HasL()) {
     // Format(instr, "ldm'cond'pu 'rn'w, 'rlist");
     HandleRList(instr, true);
@@ -2872,8 +2875,8 @@
 // vmrs
 // Dd = vsqrt(Dm)
 void Simulator::DecodeTypeVFP(Instruction* instr) {
-  ASSERT((instr->TypeValue() == 7) && (instr->Bit(24) == 0x0) );
-  ASSERT(instr->Bits(11, 9) == 0x5);
+  DCHECK((instr->TypeValue() == 7) && (instr->Bit(24) == 0x0) );
+  DCHECK(instr->Bits(11, 9) == 0x5);
 
   // Obtain double precision register codes.
   int vm = instr->VFPMRegValue(kDoublePrecision);
@@ -3088,7 +3091,7 @@
 
 void Simulator::DecodeVMOVBetweenCoreAndSinglePrecisionRegisters(
     Instruction* instr) {
-  ASSERT((instr->Bit(4) == 1) && (instr->VCValue() == 0x0) &&
+  DCHECK((instr->Bit(4) == 1) && (instr->VCValue() == 0x0) &&
          (instr->VAValue() == 0x0));
 
   int t = instr->RtValue();
@@ -3106,8 +3109,8 @@
 
 
 void Simulator::DecodeVCMP(Instruction* instr) {
-  ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
-  ASSERT(((instr->Opc2Value() == 0x4) || (instr->Opc2Value() == 0x5)) &&
+  DCHECK((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
+  DCHECK(((instr->Opc2Value() == 0x4) || (instr->Opc2Value() == 0x5)) &&
          (instr->Opc3Value() & 0x1));
   // Comparison.
 
@@ -3144,8 +3147,8 @@
 
 
 void Simulator::DecodeVCVTBetweenDoubleAndSingle(Instruction* instr) {
-  ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
-  ASSERT((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3));
+  DCHECK((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7));
+  DCHECK((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3));
 
   VFPRegPrecision dst_precision = kDoublePrecision;
   VFPRegPrecision src_precision = kSinglePrecision;
@@ -3169,7 +3172,7 @@
 bool get_inv_op_vfp_flag(VFPRoundingMode mode,
                          double val,
                          bool unsigned_) {
-  ASSERT((mode == RN) || (mode == RM) || (mode == RZ));
+  DCHECK((mode == RN) || (mode == RM) || (mode == RZ));
   double max_uint = static_cast<double>(0xffffffffu);
   double max_int = static_cast<double>(kMaxInt);
   double min_int = static_cast<double>(kMinInt);
@@ -3222,9 +3225,9 @@
 
 
 void Simulator::DecodeVCVTBetweenFloatingPointAndInteger(Instruction* instr) {
-  ASSERT((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7) &&
+  DCHECK((instr->Bit(4) == 0) && (instr->Opc1Value() == 0x7) &&
          (instr->Bits(27, 23) == 0x1D));
-  ASSERT(((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) ||
+  DCHECK(((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) ||
          (((instr->Opc2Value() >> 1) == 0x6) && (instr->Opc3Value() & 0x1)));
 
   // Conversion between floating-point and integer.
@@ -3248,7 +3251,7 @@
     // mode or the default Round to Zero mode.
     VFPRoundingMode mode = (instr->Bit(7) != 1) ? FPSCR_rounding_mode_
                                                 : RZ;
-    ASSERT((mode == RM) || (mode == RZ) || (mode == RN));
+    DCHECK((mode == RM) || (mode == RZ) || (mode == RN));
 
     bool unsigned_integer = (instr->Bit(16) == 0);
     bool double_precision = (src_precision == kDoublePrecision);
@@ -3332,7 +3335,7 @@
 // Ddst = MEM(Rbase + 4*offset).
 // MEM(Rbase + 4*offset) = Dsrc.
 void Simulator::DecodeType6CoprocessorIns(Instruction* instr) {
-  ASSERT((instr->TypeValue() == 6));
+  DCHECK((instr->TypeValue() == 6));
 
   if (instr->CoprocessorValue() == 0xA) {
     switch (instr->OpcodeValue()) {
@@ -3753,7 +3756,7 @@
   // Set up arguments
 
   // First four arguments passed in registers.
-  ASSERT(argument_count >= 4);
+  DCHECK(argument_count >= 4);
   set_register(r0, va_arg(parameters, int32_t));
   set_register(r1, va_arg(parameters, int32_t));
   set_register(r2, va_arg(parameters, int32_t));
@@ -3763,8 +3766,8 @@
   int original_stack = get_register(sp);
   // Compute position of stack on entry to generated code.
   int entry_stack = (original_stack - (argument_count - 4) * sizeof(int32_t));
-  if (OS::ActivationFrameAlignment() != 0) {
-    entry_stack &= -OS::ActivationFrameAlignment();
+  if (base::OS::ActivationFrameAlignment() != 0) {
+    entry_stack &= -base::OS::ActivationFrameAlignment();
   }
   // Store remaining arguments on stack, from low to high memory.
   intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
diff --git a/src/arm/simulator-arm.h b/src/arm/simulator-arm.h
index 9a2f192..76865bc 100644
--- a/src/arm/simulator-arm.h
+++ b/src/arm/simulator-arm.h
@@ -61,8 +61,8 @@
 // Running with a simulator.
 
 #include "src/arm/constants-arm.h"
-#include "src/hashmap.h"
 #include "src/assembler.h"
+#include "src/hashmap.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/arm/stub-cache-arm.cc b/src/arm/stub-cache-arm.cc
deleted file mode 100644
index edc6953..0000000
--- a/src/arm/stub-cache-arm.cc
+++ /dev/null
@@ -1,1516 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#if V8_TARGET_ARCH_ARM
-
-#include "src/ic-inl.h"
-#include "src/codegen.h"
-#include "src/stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-
-static void ProbeTable(Isolate* isolate,
-                       MacroAssembler* masm,
-                       Code::Flags flags,
-                       StubCache::Table table,
-                       Register receiver,
-                       Register name,
-                       // Number of the cache entry, not scaled.
-                       Register offset,
-                       Register scratch,
-                       Register scratch2,
-                       Register offset_scratch) {
-  ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
-  ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
-  ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
-
-  uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address());
-  uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address());
-  uint32_t map_off_addr = reinterpret_cast<uint32_t>(map_offset.address());
-
-  // Check the relative positions of the address fields.
-  ASSERT(value_off_addr > key_off_addr);
-  ASSERT((value_off_addr - key_off_addr) % 4 == 0);
-  ASSERT((value_off_addr - key_off_addr) < (256 * 4));
-  ASSERT(map_off_addr > key_off_addr);
-  ASSERT((map_off_addr - key_off_addr) % 4 == 0);
-  ASSERT((map_off_addr - key_off_addr) < (256 * 4));
-
-  Label miss;
-  Register base_addr = scratch;
-  scratch = no_reg;
-
-  // Multiply by 3 because there are 3 fields per entry (name, code, map).
-  __ add(offset_scratch, offset, Operand(offset, LSL, 1));
-
-  // Calculate the base address of the entry.
-  __ mov(base_addr, Operand(key_offset));
-  __ add(base_addr, base_addr, Operand(offset_scratch, LSL, kPointerSizeLog2));
-
-  // Check that the key in the entry matches the name.
-  __ ldr(ip, MemOperand(base_addr, 0));
-  __ cmp(name, ip);
-  __ b(ne, &miss);
-
-  // Check the map matches.
-  __ ldr(ip, MemOperand(base_addr, map_off_addr - key_off_addr));
-  __ ldr(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ cmp(ip, scratch2);
-  __ b(ne, &miss);
-
-  // Get the code entry from the cache.
-  Register code = scratch2;
-  scratch2 = no_reg;
-  __ ldr(code, MemOperand(base_addr, value_off_addr - key_off_addr));
-
-  // Check that the flags match what we're looking for.
-  Register flags_reg = base_addr;
-  base_addr = no_reg;
-  __ ldr(flags_reg, FieldMemOperand(code, Code::kFlagsOffset));
-  // It's a nice optimization if this constant is encodable in the bic insn.
-
-  uint32_t mask = Code::kFlagsNotUsedInLookup;
-  ASSERT(__ ImmediateFitsAddrMode1Instruction(mask));
-  __ bic(flags_reg, flags_reg, Operand(mask));
-  __ cmp(flags_reg, Operand(flags));
-  __ b(ne, &miss);
-
-#ifdef DEBUG
-    if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
-      __ jmp(&miss);
-    } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
-      __ jmp(&miss);
-    }
-#endif
-
-  // Jump to the first instruction in the code stub.
-  __ add(pc, code, Operand(Code::kHeaderSize - kHeapObjectTag));
-
-  // Miss: fall through.
-  __ bind(&miss);
-}
-
-
-void StubCompiler::GenerateDictionaryNegativeLookup(MacroAssembler* masm,
-                                                    Label* miss_label,
-                                                    Register receiver,
-                                                    Handle<Name> name,
-                                                    Register scratch0,
-                                                    Register scratch1) {
-  ASSERT(name->IsUniqueName());
-  ASSERT(!receiver.is(scratch0));
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
-  __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
-
-  Label done;
-
-  const int kInterceptorOrAccessCheckNeededMask =
-      (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
-
-  // Bail out if the receiver has a named interceptor or requires access checks.
-  Register map = scratch1;
-  __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ ldrb(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
-  __ tst(scratch0, Operand(kInterceptorOrAccessCheckNeededMask));
-  __ b(ne, miss_label);
-
-  // Check that receiver is a JSObject.
-  __ ldrb(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
-  __ cmp(scratch0, Operand(FIRST_SPEC_OBJECT_TYPE));
-  __ b(lt, miss_label);
-
-  // Load properties array.
-  Register properties = scratch0;
-  __ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-  // Check that the properties array is a dictionary.
-  __ ldr(map, FieldMemOperand(properties, HeapObject::kMapOffset));
-  Register tmp = properties;
-  __ LoadRoot(tmp, Heap::kHashTableMapRootIndex);
-  __ cmp(map, tmp);
-  __ b(ne, miss_label);
-
-  // Restore the temporarily used register.
-  __ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-
-
-  NameDictionaryLookupStub::GenerateNegativeLookup(masm,
-                                                   miss_label,
-                                                   &done,
-                                                   receiver,
-                                                   properties,
-                                                   name,
-                                                   scratch1);
-  __ bind(&done);
-  __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
-}
-
-
-void StubCache::GenerateProbe(MacroAssembler* masm,
-                              Code::Flags flags,
-                              Register receiver,
-                              Register name,
-                              Register scratch,
-                              Register extra,
-                              Register extra2,
-                              Register extra3) {
-  Isolate* isolate = masm->isolate();
-  Label miss;
-
-  // Make sure that code is valid. The multiplying code relies on the
-  // entry size being 12.
-  ASSERT(sizeof(Entry) == 12);
-
-  // Make sure the flags does not name a specific type.
-  ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
-
-  // Make sure that there are no register conflicts.
-  ASSERT(!scratch.is(receiver));
-  ASSERT(!scratch.is(name));
-  ASSERT(!extra.is(receiver));
-  ASSERT(!extra.is(name));
-  ASSERT(!extra.is(scratch));
-  ASSERT(!extra2.is(receiver));
-  ASSERT(!extra2.is(name));
-  ASSERT(!extra2.is(scratch));
-  ASSERT(!extra2.is(extra));
-
-  // Check scratch, extra and extra2 registers are valid.
-  ASSERT(!scratch.is(no_reg));
-  ASSERT(!extra.is(no_reg));
-  ASSERT(!extra2.is(no_reg));
-  ASSERT(!extra3.is(no_reg));
-
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1,
-                      extra2, extra3);
-
-  // Check that the receiver isn't a smi.
-  __ JumpIfSmi(receiver, &miss);
-
-  // Get the map of the receiver and compute the hash.
-  __ ldr(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
-  __ ldr(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ add(scratch, scratch, Operand(ip));
-  uint32_t mask = kPrimaryTableSize - 1;
-  // We shift out the last two bits because they are not part of the hash and
-  // they are always 01 for maps.
-  __ mov(scratch, Operand(scratch, LSR, kHeapObjectTagSize));
-  // Mask down the eor argument to the minimum to keep the immediate
-  // ARM-encodable.
-  __ eor(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask));
-  // Prefer and_ to ubfx here because ubfx takes 2 cycles.
-  __ and_(scratch, scratch, Operand(mask));
-
-  // Probe the primary table.
-  ProbeTable(isolate,
-             masm,
-             flags,
-             kPrimary,
-             receiver,
-             name,
-             scratch,
-             extra,
-             extra2,
-             extra3);
-
-  // Primary miss: Compute hash for secondary probe.
-  __ sub(scratch, scratch, Operand(name, LSR, kHeapObjectTagSize));
-  uint32_t mask2 = kSecondaryTableSize - 1;
-  __ add(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask2));
-  __ and_(scratch, scratch, Operand(mask2));
-
-  // Probe the secondary table.
-  ProbeTable(isolate,
-             masm,
-             flags,
-             kSecondary,
-             receiver,
-             name,
-             scratch,
-             extra,
-             extra2,
-             extra3);
-
-  // Cache miss: Fall-through and let caller handle the miss by
-  // entering the runtime system.
-  __ bind(&miss);
-  __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1,
-                      extra2, extra3);
-}
-
-
-void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
-                                                       int index,
-                                                       Register prototype) {
-  // Load the global or builtins object from the current context.
-  __ ldr(prototype,
-         MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
-  // Load the native context from the global or builtins object.
-  __ ldr(prototype,
-         FieldMemOperand(prototype, GlobalObject::kNativeContextOffset));
-  // Load the function from the native context.
-  __ ldr(prototype, MemOperand(prototype, Context::SlotOffset(index)));
-  // Load the initial map.  The global functions all have initial maps.
-  __ ldr(prototype,
-         FieldMemOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset));
-  // Load the prototype from the initial map.
-  __ ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
-}
-
-
-void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
-    MacroAssembler* masm,
-    int index,
-    Register prototype,
-    Label* miss) {
-  Isolate* isolate = masm->isolate();
-  // Get the global function with the given index.
-  Handle<JSFunction> function(
-      JSFunction::cast(isolate->native_context()->get(index)));
-
-  // Check we're still in the same context.
-  Register scratch = prototype;
-  const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
-  __ ldr(scratch, MemOperand(cp, offset));
-  __ ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
-  __ ldr(scratch, MemOperand(scratch, Context::SlotOffset(index)));
-  __ Move(ip, function);
-  __ cmp(ip, scratch);
-  __ b(ne, miss);
-
-  // Load its initial map. The global functions all have initial maps.
-  __ Move(prototype, Handle<Map>(function->initial_map()));
-  // Load the prototype from the initial map.
-  __ ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
-}
-
-
-void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
-                                            Register dst,
-                                            Register src,
-                                            bool inobject,
-                                            int index,
-                                            Representation representation) {
-  ASSERT(!representation.IsDouble());
-  int offset = index * kPointerSize;
-  if (!inobject) {
-    // Calculate the offset into the properties array.
-    offset = offset + FixedArray::kHeaderSize;
-    __ ldr(dst, FieldMemOperand(src, JSObject::kPropertiesOffset));
-    src = dst;
-  }
-  __ ldr(dst, FieldMemOperand(src, offset));
-}
-
-
-void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
-                                           Register receiver,
-                                           Register scratch,
-                                           Label* miss_label) {
-  // Check that the receiver isn't a smi.
-  __ JumpIfSmi(receiver, miss_label);
-
-  // Check that the object is a JS array.
-  __ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE);
-  __ b(ne, miss_label);
-
-  // Load length directly from the JS array.
-  __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
-  __ Ret();
-}
-
-
-void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
-                                                 Register receiver,
-                                                 Register scratch1,
-                                                 Register scratch2,
-                                                 Label* miss_label) {
-  __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
-  __ mov(r0, scratch1);
-  __ Ret();
-}
-
-
-// Generate code to check that a global property cell is empty. Create
-// the property cell at compilation time if no cell exists for the
-// property.
-void StubCompiler::GenerateCheckPropertyCell(MacroAssembler* masm,
-                                             Handle<JSGlobalObject> global,
-                                             Handle<Name> name,
-                                             Register scratch,
-                                             Label* miss) {
-  Handle<Cell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
-  ASSERT(cell->value()->IsTheHole());
-  __ mov(scratch, Operand(cell));
-  __ ldr(scratch, FieldMemOperand(scratch, Cell::kValueOffset));
-  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
-  __ cmp(scratch, ip);
-  __ b(ne, miss);
-}
-
-
-void StoreStubCompiler::GenerateNegativeHolderLookup(
-    MacroAssembler* masm,
-    Handle<JSObject> holder,
-    Register holder_reg,
-    Handle<Name> name,
-    Label* miss) {
-  if (holder->IsJSGlobalObject()) {
-    GenerateCheckPropertyCell(
-        masm, Handle<JSGlobalObject>::cast(holder), name, scratch1(), miss);
-  } else if (!holder->HasFastProperties() && !holder->IsJSGlobalProxy()) {
-    GenerateDictionaryNegativeLookup(
-        masm, miss, holder_reg, name, scratch1(), scratch2());
-  }
-}
-
-
-// Generate StoreTransition code, value is passed in r0 register.
-// When leaving generated code after success, the receiver_reg and name_reg
-// may be clobbered.  Upon branch to miss_label, the receiver and name
-// registers have their original values.
-void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
-                                                Handle<JSObject> object,
-                                                LookupResult* lookup,
-                                                Handle<Map> transition,
-                                                Handle<Name> name,
-                                                Register receiver_reg,
-                                                Register storage_reg,
-                                                Register value_reg,
-                                                Register scratch1,
-                                                Register scratch2,
-                                                Register scratch3,
-                                                Label* miss_label,
-                                                Label* slow) {
-  // r0 : value
-  Label exit;
-
-  int descriptor = transition->LastAdded();
-  DescriptorArray* descriptors = transition->instance_descriptors();
-  PropertyDetails details = descriptors->GetDetails(descriptor);
-  Representation representation = details.representation();
-  ASSERT(!representation.IsNone());
-
-  if (details.type() == CONSTANT) {
-    Handle<Object> constant(descriptors->GetValue(descriptor), masm->isolate());
-    __ Move(scratch1, constant);
-    __ cmp(value_reg, scratch1);
-    __ b(ne, miss_label);
-  } else if (representation.IsSmi()) {
-    __ JumpIfNotSmi(value_reg, miss_label);
-  } else if (representation.IsHeapObject()) {
-    __ JumpIfSmi(value_reg, miss_label);
-    HeapType* field_type = descriptors->GetFieldType(descriptor);
-    HeapType::Iterator<Map> it = field_type->Classes();
-    if (!it.Done()) {
-      __ ldr(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset));
-      Label do_store;
-      while (true) {
-        __ CompareMap(scratch1, it.Current(), &do_store);
-        it.Advance();
-        if (it.Done()) {
-          __ b(ne, miss_label);
-          break;
-        }
-        __ b(eq, &do_store);
-      }
-      __ bind(&do_store);
-    }
-  } else if (representation.IsDouble()) {
-    Label do_store, heap_number;
-    __ LoadRoot(scratch3, Heap::kHeapNumberMapRootIndex);
-    __ AllocateHeapNumber(storage_reg, scratch1, scratch2, scratch3, slow);
-
-    __ JumpIfNotSmi(value_reg, &heap_number);
-    __ SmiUntag(scratch1, value_reg);
-    __ vmov(s0, scratch1);
-    __ vcvt_f64_s32(d0, s0);
-    __ jmp(&do_store);
-
-    __ bind(&heap_number);
-    __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex,
-                miss_label, DONT_DO_SMI_CHECK);
-    __ vldr(d0, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
-
-    __ bind(&do_store);
-    __ vstr(d0, FieldMemOperand(storage_reg, HeapNumber::kValueOffset));
-  }
-
-  // Stub never generated for non-global objects that require access
-  // checks.
-  ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
-
-  // Perform map transition for the receiver if necessary.
-  if (details.type() == FIELD &&
-      object->map()->unused_property_fields() == 0) {
-    // The properties must be extended before we can store the value.
-    // We jump to a runtime call that extends the properties array.
-    __ push(receiver_reg);
-    __ mov(r2, Operand(transition));
-    __ Push(r2, r0);
-    __ TailCallExternalReference(
-        ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
-                          masm->isolate()),
-        3,
-        1);
-    return;
-  }
-
-  // Update the map of the object.
-  __ mov(scratch1, Operand(transition));
-  __ str(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
-
-  // Update the write barrier for the map field.
-  __ RecordWriteField(receiver_reg,
-                      HeapObject::kMapOffset,
-                      scratch1,
-                      scratch2,
-                      kLRHasNotBeenSaved,
-                      kDontSaveFPRegs,
-                      OMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-
-  if (details.type() == CONSTANT) {
-    ASSERT(value_reg.is(r0));
-    __ Ret();
-    return;
-  }
-
-  int index = transition->instance_descriptors()->GetFieldIndex(
-      transition->LastAdded());
-
-  // Adjust for the number of properties stored in the object. Even in the
-  // face of a transition we can use the old map here because the size of the
-  // object and the number of in-object properties is not going to change.
-  index -= object->map()->inobject_properties();
-
-  // TODO(verwaest): Share this code as a code stub.
-  SmiCheck smi_check = representation.IsTagged()
-      ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
-  if (index < 0) {
-    // Set the property straight into the object.
-    int offset = object->map()->instance_size() + (index * kPointerSize);
-    if (representation.IsDouble()) {
-      __ str(storage_reg, FieldMemOperand(receiver_reg, offset));
-    } else {
-      __ str(value_reg, FieldMemOperand(receiver_reg, offset));
-    }
-
-    if (!representation.IsSmi()) {
-      // Update the write barrier for the array address.
-      if (!representation.IsDouble()) {
-        __ mov(storage_reg, value_reg);
-      }
-      __ RecordWriteField(receiver_reg,
-                          offset,
-                          storage_reg,
-                          scratch1,
-                          kLRHasNotBeenSaved,
-                          kDontSaveFPRegs,
-                          EMIT_REMEMBERED_SET,
-                          smi_check);
-    }
-  } else {
-    // Write to the properties array.
-    int offset = index * kPointerSize + FixedArray::kHeaderSize;
-    // Get the properties array
-    __ ldr(scratch1,
-           FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
-    if (representation.IsDouble()) {
-      __ str(storage_reg, FieldMemOperand(scratch1, offset));
-    } else {
-      __ str(value_reg, FieldMemOperand(scratch1, offset));
-    }
-
-    if (!representation.IsSmi()) {
-      // Update the write barrier for the array address.
-      if (!representation.IsDouble()) {
-        __ mov(storage_reg, value_reg);
-      }
-      __ RecordWriteField(scratch1,
-                          offset,
-                          storage_reg,
-                          receiver_reg,
-                          kLRHasNotBeenSaved,
-                          kDontSaveFPRegs,
-                          EMIT_REMEMBERED_SET,
-                          smi_check);
-    }
-  }
-
-  // Return the value (register r0).
-  ASSERT(value_reg.is(r0));
-  __ bind(&exit);
-  __ Ret();
-}
-
-
-// Generate StoreField code, value is passed in r0 register.
-// When leaving generated code after success, the receiver_reg and name_reg
-// may be clobbered.  Upon branch to miss_label, the receiver and name
-// registers have their original values.
-void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
-                                           Handle<JSObject> object,
-                                           LookupResult* lookup,
-                                           Register receiver_reg,
-                                           Register name_reg,
-                                           Register value_reg,
-                                           Register scratch1,
-                                           Register scratch2,
-                                           Label* miss_label) {
-  // r0 : value
-  Label exit;
-
-  // Stub never generated for non-global objects that require access
-  // checks.
-  ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
-
-  FieldIndex index = lookup->GetFieldIndex();
-
-  Representation representation = lookup->representation();
-  ASSERT(!representation.IsNone());
-  if (representation.IsSmi()) {
-    __ JumpIfNotSmi(value_reg, miss_label);
-  } else if (representation.IsHeapObject()) {
-    __ JumpIfSmi(value_reg, miss_label);
-    HeapType* field_type = lookup->GetFieldType();
-    HeapType::Iterator<Map> it = field_type->Classes();
-    if (!it.Done()) {
-      __ ldr(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset));
-      Label do_store;
-      while (true) {
-        __ CompareMap(scratch1, it.Current(), &do_store);
-        it.Advance();
-        if (it.Done()) {
-          __ b(ne, miss_label);
-          break;
-        }
-        __ b(eq, &do_store);
-      }
-      __ bind(&do_store);
-    }
-  } else if (representation.IsDouble()) {
-    // Load the double storage.
-    if (index.is_inobject()) {
-      __ ldr(scratch1, FieldMemOperand(receiver_reg, index.offset()));
-    } else {
-      __ ldr(scratch1,
-             FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
-      __ ldr(scratch1, FieldMemOperand(scratch1, index.offset()));
-    }
-
-    // Store the value into the storage.
-    Label do_store, heap_number;
-    __ JumpIfNotSmi(value_reg, &heap_number);
-    __ SmiUntag(scratch2, value_reg);
-    __ vmov(s0, scratch2);
-    __ vcvt_f64_s32(d0, s0);
-    __ jmp(&do_store);
-
-    __ bind(&heap_number);
-    __ CheckMap(value_reg, scratch2, Heap::kHeapNumberMapRootIndex,
-                miss_label, DONT_DO_SMI_CHECK);
-    __ vldr(d0, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
-
-    __ bind(&do_store);
-    __ vstr(d0, FieldMemOperand(scratch1, HeapNumber::kValueOffset));
-    // Return the value (register r0).
-    ASSERT(value_reg.is(r0));
-    __ Ret();
-    return;
-  }
-
-  // TODO(verwaest): Share this code as a code stub.
-  SmiCheck smi_check = representation.IsTagged()
-      ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
-  if (index.is_inobject()) {
-    // Set the property straight into the object.
-    __ str(value_reg, FieldMemOperand(receiver_reg, index.offset()));
-
-    if (!representation.IsSmi()) {
-      // Skip updating write barrier if storing a smi.
-      __ JumpIfSmi(value_reg, &exit);
-
-      // Update the write barrier for the array address.
-      // Pass the now unused name_reg as a scratch register.
-      __ mov(name_reg, value_reg);
-      __ RecordWriteField(receiver_reg,
-                          index.offset(),
-                          name_reg,
-                          scratch1,
-                          kLRHasNotBeenSaved,
-                          kDontSaveFPRegs,
-                          EMIT_REMEMBERED_SET,
-                          smi_check);
-    }
-  } else {
-    // Write to the properties array.
-    // Get the properties array
-    __ ldr(scratch1,
-           FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
-    __ str(value_reg, FieldMemOperand(scratch1, index.offset()));
-
-    if (!representation.IsSmi()) {
-      // Skip updating write barrier if storing a smi.
-      __ JumpIfSmi(value_reg, &exit);
-
-      // Update the write barrier for the array address.
-      // Ok to clobber receiver_reg and name_reg, since we return.
-      __ mov(name_reg, value_reg);
-      __ RecordWriteField(scratch1,
-                          index.offset(),
-                          name_reg,
-                          receiver_reg,
-                          kLRHasNotBeenSaved,
-                          kDontSaveFPRegs,
-                          EMIT_REMEMBERED_SET,
-                          smi_check);
-    }
-  }
-
-  // Return the value (register r0).
-  ASSERT(value_reg.is(r0));
-  __ bind(&exit);
-  __ Ret();
-}
-
-
-void StoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
-                                            Label* label,
-                                            Handle<Name> name) {
-  if (!label->is_unused()) {
-    __ bind(label);
-    __ mov(this->name(), Operand(name));
-  }
-}
-
-
-static void PushInterceptorArguments(MacroAssembler* masm,
-                                     Register receiver,
-                                     Register holder,
-                                     Register name,
-                                     Handle<JSObject> holder_obj) {
-  STATIC_ASSERT(StubCache::kInterceptorArgsNameIndex == 0);
-  STATIC_ASSERT(StubCache::kInterceptorArgsInfoIndex == 1);
-  STATIC_ASSERT(StubCache::kInterceptorArgsThisIndex == 2);
-  STATIC_ASSERT(StubCache::kInterceptorArgsHolderIndex == 3);
-  STATIC_ASSERT(StubCache::kInterceptorArgsLength == 4);
-  __ push(name);
-  Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
-  ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
-  Register scratch = name;
-  __ mov(scratch, Operand(interceptor));
-  __ push(scratch);
-  __ push(receiver);
-  __ push(holder);
-}
-
-
-static void CompileCallLoadPropertyWithInterceptor(
-    MacroAssembler* masm,
-    Register receiver,
-    Register holder,
-    Register name,
-    Handle<JSObject> holder_obj,
-    IC::UtilityId id) {
-  PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
-  __ CallExternalReference(
-      ExternalReference(IC_Utility(id), masm->isolate()),
-      StubCache::kInterceptorArgsLength);
-}
-
-
-// Generate call to api function.
-void StubCompiler::GenerateFastApiCall(MacroAssembler* masm,
-                                       const CallOptimization& optimization,
-                                       Handle<Map> receiver_map,
-                                       Register receiver,
-                                       Register scratch_in,
-                                       bool is_store,
-                                       int argc,
-                                       Register* values) {
-  ASSERT(!receiver.is(scratch_in));
-  __ push(receiver);
-  // Write the arguments to stack frame.
-  for (int i = 0; i < argc; i++) {
-    Register arg = values[argc-1-i];
-    ASSERT(!receiver.is(arg));
-    ASSERT(!scratch_in.is(arg));
-    __ push(arg);
-  }
-  ASSERT(optimization.is_simple_api_call());
-
-  // Abi for CallApiFunctionStub.
-  Register callee = r0;
-  Register call_data = r4;
-  Register holder = r2;
-  Register api_function_address = r1;
-
-  // Put holder in place.
-  CallOptimization::HolderLookup holder_lookup;
-  Handle<JSObject> api_holder = optimization.LookupHolderOfExpectedType(
-      receiver_map,
-      &holder_lookup);
-  switch (holder_lookup) {
-    case CallOptimization::kHolderIsReceiver:
-      __ Move(holder, receiver);
-      break;
-    case CallOptimization::kHolderFound:
-      __ Move(holder, api_holder);
-     break;
-    case CallOptimization::kHolderNotFound:
-      UNREACHABLE();
-      break;
-  }
-
-  Isolate* isolate = masm->isolate();
-  Handle<JSFunction> function = optimization.constant_function();
-  Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
-  Handle<Object> call_data_obj(api_call_info->data(), isolate);
-
-  // Put callee in place.
-  __ Move(callee, function);
-
-  bool call_data_undefined = false;
-  // Put call_data in place.
-  if (isolate->heap()->InNewSpace(*call_data_obj)) {
-    __ Move(call_data, api_call_info);
-    __ ldr(call_data, FieldMemOperand(call_data, CallHandlerInfo::kDataOffset));
-  } else if (call_data_obj->IsUndefined()) {
-    call_data_undefined = true;
-    __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
-  } else {
-    __ Move(call_data, call_data_obj);
-  }
-
-  // Put api_function_address in place.
-  Address function_address = v8::ToCData<Address>(api_call_info->callback());
-  ApiFunction fun(function_address);
-  ExternalReference::Type type = ExternalReference::DIRECT_API_CALL;
-  ExternalReference ref = ExternalReference(&fun,
-                                            type,
-                                            masm->isolate());
-  __ mov(api_function_address, Operand(ref));
-
-  // Jump to stub.
-  CallApiFunctionStub stub(isolate, is_store, call_data_undefined, argc);
-  __ TailCallStub(&stub);
-}
-
-
-void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) {
-  __ Jump(code, RelocInfo::CODE_TARGET);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-Register StubCompiler::CheckPrototypes(Handle<HeapType> type,
-                                       Register object_reg,
-                                       Handle<JSObject> holder,
-                                       Register holder_reg,
-                                       Register scratch1,
-                                       Register scratch2,
-                                       Handle<Name> name,
-                                       Label* miss,
-                                       PrototypeCheckType check) {
-  Handle<Map> receiver_map(IC::TypeToMap(*type, isolate()));
-
-  // Make sure there's no overlap between holder and object registers.
-  ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
-  ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
-         && !scratch2.is(scratch1));
-
-  // Keep track of the current object in register reg.
-  Register reg = object_reg;
-  int depth = 0;
-
-  Handle<JSObject> current = Handle<JSObject>::null();
-  if (type->IsConstant()) {
-    current = Handle<JSObject>::cast(type->AsConstant()->Value());
-  }
-  Handle<JSObject> prototype = Handle<JSObject>::null();
-  Handle<Map> current_map = receiver_map;
-  Handle<Map> holder_map(holder->map());
-  // Traverse the prototype chain and check the maps in the prototype chain for
-  // fast and global objects or do negative lookup for normal objects.
-  while (!current_map.is_identical_to(holder_map)) {
-    ++depth;
-
-    // Only global objects and objects that do not require access
-    // checks are allowed in stubs.
-    ASSERT(current_map->IsJSGlobalProxyMap() ||
-           !current_map->is_access_check_needed());
-
-    prototype = handle(JSObject::cast(current_map->prototype()));
-    if (current_map->is_dictionary_map() &&
-        !current_map->IsJSGlobalObjectMap() &&
-        !current_map->IsJSGlobalProxyMap()) {
-      if (!name->IsUniqueName()) {
-        ASSERT(name->IsString());
-        name = factory()->InternalizeString(Handle<String>::cast(name));
-      }
-      ASSERT(current.is_null() ||
-             current->property_dictionary()->FindEntry(name) ==
-             NameDictionary::kNotFound);
-
-      GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
-                                       scratch1, scratch2);
-
-      __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
-      reg = holder_reg;  // From now on the object will be in holder_reg.
-      __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
-    } else {
-      Register map_reg = scratch1;
-      if (depth != 1 || check == CHECK_ALL_MAPS) {
-        // CheckMap implicitly loads the map of |reg| into |map_reg|.
-        __ CheckMap(reg, map_reg, current_map, miss, DONT_DO_SMI_CHECK);
-      } else {
-        __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
-      }
-
-      // Check access rights to the global object.  This has to happen after
-      // the map check so that we know that the object is actually a global
-      // object.
-      if (current_map->IsJSGlobalProxyMap()) {
-        __ CheckAccessGlobalProxy(reg, scratch2, miss);
-      } else if (current_map->IsJSGlobalObjectMap()) {
-        GenerateCheckPropertyCell(
-            masm(), Handle<JSGlobalObject>::cast(current), name,
-            scratch2, miss);
-      }
-
-      reg = holder_reg;  // From now on the object will be in holder_reg.
-
-      if (heap()->InNewSpace(*prototype)) {
-        // The prototype is in new space; we cannot store a reference to it
-        // in the code.  Load it from the map.
-        __ ldr(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
-      } else {
-        // The prototype is in old space; load it directly.
-        __ mov(reg, Operand(prototype));
-      }
-    }
-
-    // Go to the next object in the prototype chain.
-    current = prototype;
-    current_map = handle(current->map());
-  }
-
-  // Log the check depth.
-  LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
-
-  if (depth != 0 || check == CHECK_ALL_MAPS) {
-    // Check the holder map.
-    __ CheckMap(reg, scratch1, current_map, miss, DONT_DO_SMI_CHECK);
-  }
-
-  // Perform security check for access to the global object.
-  ASSERT(current_map->IsJSGlobalProxyMap() ||
-         !current_map->is_access_check_needed());
-  if (current_map->IsJSGlobalProxyMap()) {
-    __ CheckAccessGlobalProxy(reg, scratch1, miss);
-  }
-
-  // Return the register containing the holder.
-  return reg;
-}
-
-
-void LoadStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
-  if (!miss->is_unused()) {
-    Label success;
-    __ b(&success);
-    __ bind(miss);
-    TailCallBuiltin(masm(), MissBuiltin(kind()));
-    __ bind(&success);
-  }
-}
-
-
-void StoreStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
-  if (!miss->is_unused()) {
-    Label success;
-    __ b(&success);
-    GenerateRestoreName(masm(), miss, name);
-    TailCallBuiltin(masm(), MissBuiltin(kind()));
-    __ bind(&success);
-  }
-}
-
-
-Register LoadStubCompiler::CallbackHandlerFrontend(
-    Handle<HeapType> type,
-    Register object_reg,
-    Handle<JSObject> holder,
-    Handle<Name> name,
-    Handle<Object> callback) {
-  Label miss;
-
-  Register reg = HandlerFrontendHeader(type, object_reg, holder, name, &miss);
-
-  if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
-    ASSERT(!reg.is(scratch2()));
-    ASSERT(!reg.is(scratch3()));
-    ASSERT(!reg.is(scratch4()));
-
-    // Load the properties dictionary.
-    Register dictionary = scratch4();
-    __ ldr(dictionary, FieldMemOperand(reg, JSObject::kPropertiesOffset));
-
-    // Probe the dictionary.
-    Label probe_done;
-    NameDictionaryLookupStub::GeneratePositiveLookup(masm(),
-                                                     &miss,
-                                                     &probe_done,
-                                                     dictionary,
-                                                     this->name(),
-                                                     scratch2(),
-                                                     scratch3());
-    __ bind(&probe_done);
-
-    // If probing finds an entry in the dictionary, scratch3 contains the
-    // pointer into the dictionary. Check that the value is the callback.
-    Register pointer = scratch3();
-    const int kElementsStartOffset = NameDictionary::kHeaderSize +
-        NameDictionary::kElementsStartIndex * kPointerSize;
-    const int kValueOffset = kElementsStartOffset + kPointerSize;
-    __ ldr(scratch2(), FieldMemOperand(pointer, kValueOffset));
-    __ cmp(scratch2(), Operand(callback));
-    __ b(ne, &miss);
-  }
-
-  HandlerFrontendFooter(name, &miss);
-  return reg;
-}
-
-
-void LoadStubCompiler::GenerateLoadField(Register reg,
-                                         Handle<JSObject> holder,
-                                         FieldIndex field,
-                                         Representation representation) {
-  if (!reg.is(receiver())) __ mov(receiver(), reg);
-  if (kind() == Code::LOAD_IC) {
-    LoadFieldStub stub(isolate(), field);
-    GenerateTailCall(masm(), stub.GetCode());
-  } else {
-    KeyedLoadFieldStub stub(isolate(), field);
-    GenerateTailCall(masm(), stub.GetCode());
-  }
-}
-
-
-void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
-  // Return the constant value.
-  __ Move(r0, value);
-  __ Ret();
-}
-
-
-void LoadStubCompiler::GenerateLoadCallback(
-    Register reg,
-    Handle<ExecutableAccessorInfo> callback) {
-  // Build AccessorInfo::args_ list on the stack and push property name below
-  // the exit frame to make GC aware of them and store pointers to them.
-  STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
-  STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
-  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
-  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
-  STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
-  STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
-  STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6);
-  ASSERT(!scratch2().is(reg));
-  ASSERT(!scratch3().is(reg));
-  ASSERT(!scratch4().is(reg));
-  __ push(receiver());
-  if (heap()->InNewSpace(callback->data())) {
-    __ Move(scratch3(), callback);
-    __ ldr(scratch3(), FieldMemOperand(scratch3(),
-                                       ExecutableAccessorInfo::kDataOffset));
-  } else {
-    __ Move(scratch3(), Handle<Object>(callback->data(), isolate()));
-  }
-  __ push(scratch3());
-  __ LoadRoot(scratch3(), Heap::kUndefinedValueRootIndex);
-  __ mov(scratch4(), scratch3());
-  __ Push(scratch3(), scratch4());
-  __ mov(scratch4(),
-         Operand(ExternalReference::isolate_address(isolate())));
-  __ Push(scratch4(), reg);
-  __ mov(scratch2(), sp);  // scratch2 = PropertyAccessorInfo::args_
-  __ push(name());
-
-  // Abi for CallApiGetter
-  Register getter_address_reg = r2;
-
-  Address getter_address = v8::ToCData<Address>(callback->getter());
-  ApiFunction fun(getter_address);
-  ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL;
-  ExternalReference ref = ExternalReference(&fun, type, isolate());
-  __ mov(getter_address_reg, Operand(ref));
-
-  CallApiGetterStub stub(isolate());
-  __ TailCallStub(&stub);
-}
-
-
-void LoadStubCompiler::GenerateLoadInterceptor(
-    Register holder_reg,
-    Handle<Object> object,
-    Handle<JSObject> interceptor_holder,
-    LookupResult* lookup,
-    Handle<Name> name) {
-  ASSERT(interceptor_holder->HasNamedInterceptor());
-  ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
-
-  // So far the most popular follow ups for interceptor loads are FIELD
-  // and CALLBACKS, so inline only them, other cases may be added
-  // later.
-  bool compile_followup_inline = false;
-  if (lookup->IsFound() && lookup->IsCacheable()) {
-    if (lookup->IsField()) {
-      compile_followup_inline = true;
-    } else if (lookup->type() == CALLBACKS &&
-               lookup->GetCallbackObject()->IsExecutableAccessorInfo()) {
-      ExecutableAccessorInfo* callback =
-          ExecutableAccessorInfo::cast(lookup->GetCallbackObject());
-      compile_followup_inline = callback->getter() != NULL &&
-          callback->IsCompatibleReceiver(*object);
-    }
-  }
-
-  if (compile_followup_inline) {
-    // Compile the interceptor call, followed by inline code to load the
-    // property from further up the prototype chain if the call fails.
-    // Check that the maps haven't changed.
-    ASSERT(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
-
-    // Preserve the receiver register explicitly whenever it is different from
-    // the holder and it is needed should the interceptor return without any
-    // result. The CALLBACKS case needs the receiver to be passed into C++ code,
-    // the FIELD case might cause a miss during the prototype check.
-    bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder();
-    bool must_preserve_receiver_reg = !receiver().is(holder_reg) &&
-        (lookup->type() == CALLBACKS || must_perfrom_prototype_check);
-
-    // Save necessary data before invoking an interceptor.
-    // Requires a frame to make GC aware of pushed pointers.
-    {
-      FrameAndConstantPoolScope frame_scope(masm(), StackFrame::INTERNAL);
-      if (must_preserve_receiver_reg) {
-        __ Push(receiver(), holder_reg, this->name());
-      } else {
-        __ Push(holder_reg, this->name());
-      }
-      // Invoke an interceptor.  Note: map checks from receiver to
-      // interceptor's holder has been compiled before (see a caller
-      // of this method.)
-      CompileCallLoadPropertyWithInterceptor(
-          masm(), receiver(), holder_reg, this->name(), interceptor_holder,
-          IC::kLoadPropertyWithInterceptorOnly);
-
-      // Check if interceptor provided a value for property.  If it's
-      // the case, return immediately.
-      Label interceptor_failed;
-      __ LoadRoot(scratch1(), Heap::kNoInterceptorResultSentinelRootIndex);
-      __ cmp(r0, scratch1());
-      __ b(eq, &interceptor_failed);
-      frame_scope.GenerateLeaveFrame();
-      __ Ret();
-
-      __ bind(&interceptor_failed);
-      __ pop(this->name());
-      __ pop(holder_reg);
-      if (must_preserve_receiver_reg) {
-        __ pop(receiver());
-      }
-      // Leave the internal frame.
-    }
-
-    GenerateLoadPostInterceptor(holder_reg, interceptor_holder, name, lookup);
-  } else {  // !compile_followup_inline
-    // Call the runtime system to load the interceptor.
-    // Check that the maps haven't changed.
-    PushInterceptorArguments(masm(), receiver(), holder_reg,
-                             this->name(), interceptor_holder);
-
-    ExternalReference ref =
-        ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptor),
-                          isolate());
-    __ TailCallExternalReference(ref, StubCache::kInterceptorArgsLength, 1);
-  }
-}
-
-
-Handle<Code> StoreStubCompiler::CompileStoreCallback(
-    Handle<JSObject> object,
-    Handle<JSObject> holder,
-    Handle<Name> name,
-    Handle<ExecutableAccessorInfo> callback) {
-  Register holder_reg = HandlerFrontend(
-      IC::CurrentTypeOf(object, isolate()), receiver(), holder, name);
-
-  // Stub never generated for non-global objects that require access checks.
-  ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
-
-  __ push(receiver());  // receiver
-  __ push(holder_reg);
-  __ mov(ip, Operand(callback));  // callback info
-  __ push(ip);
-  __ mov(ip, Operand(name));
-  __ Push(ip, value());
-
-  // Do tail-call to the runtime system.
-  ExternalReference store_callback_property =
-      ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
-  __ TailCallExternalReference(store_callback_property, 5, 1);
-
-  // Return the generated code.
-  return GetCode(kind(), Code::FAST, name);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void StoreStubCompiler::GenerateStoreViaSetter(
-    MacroAssembler* masm,
-    Handle<HeapType> type,
-    Register receiver,
-    Handle<JSFunction> setter) {
-  // ----------- S t a t e -------------
-  //  -- lr    : return address
-  // -----------------------------------
-  {
-    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-
-    // Save value register, so we can restore it later.
-    __ push(value());
-
-    if (!setter.is_null()) {
-      // Call the JavaScript setter with receiver and value on the stack.
-      if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
-        // Swap in the global receiver.
-        __ ldr(receiver,
-               FieldMemOperand(
-                   receiver, JSGlobalObject::kGlobalReceiverOffset));
-      }
-      __ Push(receiver, value());
-      ParameterCount actual(1);
-      ParameterCount expected(setter);
-      __ InvokeFunction(setter, expected, actual,
-                        CALL_FUNCTION, NullCallWrapper());
-    } else {
-      // If we generate a global code snippet for deoptimization only, remember
-      // the place to continue after deoptimization.
-      masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
-    }
-
-    // We have to return the passed value, not the return value of the setter.
-    __ pop(r0);
-
-    // Restore context register.
-    __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-  }
-  __ Ret();
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
-    Handle<JSObject> object,
-    Handle<Name> name) {
-  __ Push(receiver(), this->name(), value());
-
-  // Do tail-call to the runtime system.
-  ExternalReference store_ic_property =
-      ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate());
-  __ TailCallExternalReference(store_ic_property, 3, 1);
-
-  // Return the generated code.
-  return GetCode(kind(), Code::FAST, name);
-}
-
-
-Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<HeapType> type,
-                                                      Handle<JSObject> last,
-                                                      Handle<Name> name) {
-  NonexistentHandlerFrontend(type, last, name);
-
-  // Return undefined if maps of the full prototype chain are still the
-  // same and no global property with this name contains a value.
-  __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
-  __ Ret();
-
-  // Return the generated code.
-  return GetCode(kind(), Code::FAST, name);
-}
-
-
-Register* LoadStubCompiler::registers() {
-  // receiver, name, scratch1, scratch2, scratch3, scratch4.
-  static Register registers[] = { r0, r2, r3, r1, r4, r5 };
-  return registers;
-}
-
-
-Register* KeyedLoadStubCompiler::registers() {
-  // receiver, name, scratch1, scratch2, scratch3, scratch4.
-  static Register registers[] = { r1, r0, r2, r3, r4, r5 };
-  return registers;
-}
-
-
-Register StoreStubCompiler::value() {
-  return r0;
-}
-
-
-Register* StoreStubCompiler::registers() {
-  // receiver, name, scratch1, scratch2, scratch3.
-  static Register registers[] = { r1, r2, r3, r4, r5 };
-  return registers;
-}
-
-
-Register* KeyedStoreStubCompiler::registers() {
-  // receiver, name, scratch1, scratch2, scratch3.
-  static Register registers[] = { r2, r1, r3, r4, r5 };
-  return registers;
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
-                                             Handle<HeapType> type,
-                                             Register receiver,
-                                             Handle<JSFunction> getter) {
-  // ----------- S t a t e -------------
-  //  -- r0    : receiver
-  //  -- r2    : name
-  //  -- lr    : return address
-  // -----------------------------------
-  {
-    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
-
-    if (!getter.is_null()) {
-      // Call the JavaScript getter with the receiver on the stack.
-      if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
-        // Swap in the global receiver.
-        __ ldr(receiver,
-                FieldMemOperand(
-                    receiver, JSGlobalObject::kGlobalReceiverOffset));
-      }
-      __ push(receiver);
-      ParameterCount actual(0);
-      ParameterCount expected(getter);
-      __ InvokeFunction(getter, expected, actual,
-                        CALL_FUNCTION, NullCallWrapper());
-    } else {
-      // If we generate a global code snippet for deoptimization only, remember
-      // the place to continue after deoptimization.
-      masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
-    }
-
-    // Restore context register.
-    __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-  }
-  __ Ret();
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-Handle<Code> LoadStubCompiler::CompileLoadGlobal(
-    Handle<HeapType> type,
-    Handle<GlobalObject> global,
-    Handle<PropertyCell> cell,
-    Handle<Name> name,
-    bool is_dont_delete) {
-  Label miss;
-  HandlerFrontendHeader(type, receiver(), global, name, &miss);
-
-  // Get the value from the cell.
-  __ mov(r3, Operand(cell));
-  __ ldr(r4, FieldMemOperand(r3, Cell::kValueOffset));
-
-  // Check for deleted property if property can actually be deleted.
-  if (!is_dont_delete) {
-    __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
-    __ cmp(r4, ip);
-    __ b(eq, &miss);
-  }
-
-  Counters* counters = isolate()->counters();
-  __ IncrementCounter(counters->named_load_global_stub(), 1, r1, r3);
-  __ mov(r0, r4);
-  __ Ret();
-
-  HandlerFrontendFooter(name, &miss);
-
-  // Return the generated code.
-  return GetCode(kind(), Code::NORMAL, name);
-}
-
-
-Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
-    TypeHandleList* types,
-    CodeHandleList* handlers,
-    Handle<Name> name,
-    Code::StubType type,
-    IcCheckType check) {
-  Label miss;
-
-  if (check == PROPERTY &&
-      (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
-    __ cmp(this->name(), Operand(name));
-    __ b(ne, &miss);
-  }
-
-  Label number_case;
-  Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
-  __ JumpIfSmi(receiver(), smi_target);
-
-  Register map_reg = scratch1();
-
-  int receiver_count = types->length();
-  int number_of_handled_maps = 0;
-  __ ldr(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
-  for (int current = 0; current < receiver_count; ++current) {
-    Handle<HeapType> type = types->at(current);
-    Handle<Map> map = IC::TypeToMap(*type, isolate());
-    if (!map->is_deprecated()) {
-      number_of_handled_maps++;
-      __ mov(ip, Operand(map));
-      __ cmp(map_reg, ip);
-      if (type->Is(HeapType::Number())) {
-        ASSERT(!number_case.is_unused());
-        __ bind(&number_case);
-      }
-      __ Jump(handlers->at(current), RelocInfo::CODE_TARGET, eq);
-    }
-  }
-  ASSERT(number_of_handled_maps != 0);
-
-  __ bind(&miss);
-  TailCallBuiltin(masm(), MissBuiltin(kind()));
-
-  // Return the generated code.
-  InlineCacheState state =
-      number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
-  return GetICCode(kind(), type, name, state);
-}
-
-
-void StoreStubCompiler::GenerateStoreArrayLength() {
-  // Prepare tail call to StoreIC_ArrayLength.
-  __ Push(receiver(), value());
-
-  ExternalReference ref =
-      ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength),
-                        masm()->isolate());
-  __ TailCallExternalReference(ref, 2, 1);
-}
-
-
-Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
-    MapHandleList* receiver_maps,
-    CodeHandleList* handler_stubs,
-    MapHandleList* transitioned_maps) {
-  Label miss;
-  __ JumpIfSmi(receiver(), &miss);
-
-  int receiver_count = receiver_maps->length();
-  __ ldr(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset));
-  for (int i = 0; i < receiver_count; ++i) {
-    __ mov(ip, Operand(receiver_maps->at(i)));
-    __ cmp(scratch1(), ip);
-    if (transitioned_maps->at(i).is_null()) {
-      __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq);
-    } else {
-      Label next_map;
-      __ b(ne, &next_map);
-      __ mov(transition_map(), Operand(transitioned_maps->at(i)));
-      __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, al);
-      __ bind(&next_map);
-    }
-  }
-
-  __ bind(&miss);
-  TailCallBuiltin(masm(), MissBuiltin(kind()));
-
-  // Return the generated code.
-  return GetICCode(
-      kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
-    MacroAssembler* masm) {
-  // ---------- S t a t e --------------
-  //  -- lr     : return address
-  //  -- r0     : key
-  //  -- r1     : receiver
-  // -----------------------------------
-  Label slow, miss;
-
-  Register key = r0;
-  Register receiver = r1;
-
-  __ UntagAndJumpIfNotSmi(r2, key, &miss);
-  __ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ LoadFromNumberDictionary(&slow, r4, key, r0, r2, r3, r5);
-  __ Ret();
-
-  __ bind(&slow);
-  __ IncrementCounter(
-      masm->isolate()->counters()->keyed_load_external_array_slow(),
-      1, r2, r3);
-
-  // ---------- S t a t e --------------
-  //  -- lr     : return address
-  //  -- r0     : key
-  //  -- r1     : receiver
-  // -----------------------------------
-  TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow);
-
-  // Miss case, call the runtime.
-  __ bind(&miss);
-
-  // ---------- S t a t e --------------
-  //  -- lr     : return address
-  //  -- r0     : key
-  //  -- r1     : receiver
-  // -----------------------------------
-  TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss);
-}
-
-
-#undef __
-
-} }  // namespace v8::internal
-
-#endif  // V8_TARGET_ARCH_ARM
diff --git a/src/arm64/assembler-arm64-inl.h b/src/arm64/assembler-arm64-inl.h
index 135858d..5e1bed1 100644
--- a/src/arm64/assembler-arm64-inl.h
+++ b/src/arm64/assembler-arm64-inl.h
@@ -6,7 +6,7 @@
 #define V8_ARM64_ASSEMBLER_ARM64_INL_H_
 
 #include "src/arm64/assembler-arm64.h"
-#include "src/cpu.h"
+#include "src/assembler.h"
 #include "src/debug.h"
 
 
@@ -25,7 +25,7 @@
 void RelocInfo::set_target_address(Address target,
                                    WriteBarrierMode write_barrier_mode,
                                    ICacheFlushMode icache_flush_mode) {
-  ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
+  DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
   Assembler::set_target_address_at(pc_, host_, target, icache_flush_mode);
   if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL &&
       IsCodeTarget(rmode_)) {
@@ -37,54 +37,54 @@
 
 
 inline unsigned CPURegister::code() const {
-  ASSERT(IsValid());
+  DCHECK(IsValid());
   return reg_code;
 }
 
 
 inline CPURegister::RegisterType CPURegister::type() const {
-  ASSERT(IsValidOrNone());
+  DCHECK(IsValidOrNone());
   return reg_type;
 }
 
 
 inline RegList CPURegister::Bit() const {
-  ASSERT(reg_code < (sizeof(RegList) * kBitsPerByte));
+  DCHECK(reg_code < (sizeof(RegList) * kBitsPerByte));
   return IsValid() ? 1UL << reg_code : 0;
 }
 
 
 inline unsigned CPURegister::SizeInBits() const {
-  ASSERT(IsValid());
+  DCHECK(IsValid());
   return reg_size;
 }
 
 
 inline int CPURegister::SizeInBytes() const {
-  ASSERT(IsValid());
-  ASSERT(SizeInBits() % 8 == 0);
+  DCHECK(IsValid());
+  DCHECK(SizeInBits() % 8 == 0);
   return reg_size / 8;
 }
 
 
 inline bool CPURegister::Is32Bits() const {
-  ASSERT(IsValid());
+  DCHECK(IsValid());
   return reg_size == 32;
 }
 
 
 inline bool CPURegister::Is64Bits() const {
-  ASSERT(IsValid());
+  DCHECK(IsValid());
   return reg_size == 64;
 }
 
 
 inline bool CPURegister::IsValid() const {
   if (IsValidRegister() || IsValidFPRegister()) {
-    ASSERT(!IsNone());
+    DCHECK(!IsNone());
     return true;
   } else {
-    ASSERT(IsNone());
+    DCHECK(IsNone());
     return false;
   }
 }
@@ -106,21 +106,21 @@
 
 inline bool CPURegister::IsNone() const {
   // kNoRegister types should always have size 0 and code 0.
-  ASSERT((reg_type != kNoRegister) || (reg_code == 0));
-  ASSERT((reg_type != kNoRegister) || (reg_size == 0));
+  DCHECK((reg_type != kNoRegister) || (reg_code == 0));
+  DCHECK((reg_type != kNoRegister) || (reg_size == 0));
 
   return reg_type == kNoRegister;
 }
 
 
 inline bool CPURegister::Is(const CPURegister& other) const {
-  ASSERT(IsValidOrNone() && other.IsValidOrNone());
+  DCHECK(IsValidOrNone() && other.IsValidOrNone());
   return Aliases(other) && (reg_size == other.reg_size);
 }
 
 
 inline bool CPURegister::Aliases(const CPURegister& other) const {
-  ASSERT(IsValidOrNone() && other.IsValidOrNone());
+  DCHECK(IsValidOrNone() && other.IsValidOrNone());
   return (reg_code == other.reg_code) && (reg_type == other.reg_type);
 }
 
@@ -146,27 +146,27 @@
 
 
 inline bool CPURegister::IsZero() const {
-  ASSERT(IsValid());
+  DCHECK(IsValid());
   return IsRegister() && (reg_code == kZeroRegCode);
 }
 
 
 inline bool CPURegister::IsSP() const {
-  ASSERT(IsValid());
+  DCHECK(IsValid());
   return IsRegister() && (reg_code == kSPRegInternalCode);
 }
 
 
 inline void CPURegList::Combine(const CPURegList& other) {
-  ASSERT(IsValid());
-  ASSERT(other.type() == type_);
-  ASSERT(other.RegisterSizeInBits() == size_);
+  DCHECK(IsValid());
+  DCHECK(other.type() == type_);
+  DCHECK(other.RegisterSizeInBits() == size_);
   list_ |= other.list();
 }
 
 
 inline void CPURegList::Remove(const CPURegList& other) {
-  ASSERT(IsValid());
+  DCHECK(IsValid());
   if (other.type() == type_) {
     list_ &= ~other.list();
   }
@@ -174,8 +174,8 @@
 
 
 inline void CPURegList::Combine(const CPURegister& other) {
-  ASSERT(other.type() == type_);
-  ASSERT(other.SizeInBits() == size_);
+  DCHECK(other.type() == type_);
+  DCHECK(other.SizeInBits() == size_);
   Combine(other.code());
 }
 
@@ -192,15 +192,15 @@
 
 
 inline void CPURegList::Combine(int code) {
-  ASSERT(IsValid());
-  ASSERT(CPURegister::Create(code, size_, type_).IsValid());
+  DCHECK(IsValid());
+  DCHECK(CPURegister::Create(code, size_, type_).IsValid());
   list_ |= (1UL << code);
 }
 
 
 inline void CPURegList::Remove(int code) {
-  ASSERT(IsValid());
-  ASSERT(CPURegister::Create(code, size_, type_).IsValid());
+  DCHECK(IsValid());
+  DCHECK(CPURegister::Create(code, size_, type_).IsValid());
   list_ &= ~(1UL << code);
 }
 
@@ -209,7 +209,7 @@
   if (code == kSPRegInternalCode) {
     return csp;
   } else {
-    ASSERT(code < kNumberOfRegisters);
+    DCHECK(code < kNumberOfRegisters);
     return Register::Create(code, kXRegSizeInBits);
   }
 }
@@ -219,44 +219,44 @@
   if (code == kSPRegInternalCode) {
     return wcsp;
   } else {
-    ASSERT(code < kNumberOfRegisters);
+    DCHECK(code < kNumberOfRegisters);
     return Register::Create(code, kWRegSizeInBits);
   }
 }
 
 
 inline FPRegister FPRegister::SRegFromCode(unsigned code) {
-  ASSERT(code < kNumberOfFPRegisters);
+  DCHECK(code < kNumberOfFPRegisters);
   return FPRegister::Create(code, kSRegSizeInBits);
 }
 
 
 inline FPRegister FPRegister::DRegFromCode(unsigned code) {
-  ASSERT(code < kNumberOfFPRegisters);
+  DCHECK(code < kNumberOfFPRegisters);
   return FPRegister::Create(code, kDRegSizeInBits);
 }
 
 
 inline Register CPURegister::W() const {
-  ASSERT(IsValidRegister());
+  DCHECK(IsValidRegister());
   return Register::WRegFromCode(reg_code);
 }
 
 
 inline Register CPURegister::X() const {
-  ASSERT(IsValidRegister());
+  DCHECK(IsValidRegister());
   return Register::XRegFromCode(reg_code);
 }
 
 
 inline FPRegister CPURegister::S() const {
-  ASSERT(IsValidFPRegister());
+  DCHECK(IsValidFPRegister());
   return FPRegister::SRegFromCode(reg_code);
 }
 
 
 inline FPRegister CPURegister::D() const {
-  ASSERT(IsValidFPRegister());
+  DCHECK(IsValidFPRegister());
   return FPRegister::DRegFromCode(reg_code);
 }
 
@@ -341,9 +341,9 @@
       shift_(shift),
       extend_(NO_EXTEND),
       shift_amount_(shift_amount) {
-  ASSERT(reg.Is64Bits() || (shift_amount < kWRegSizeInBits));
-  ASSERT(reg.Is32Bits() || (shift_amount < kXRegSizeInBits));
-  ASSERT(!reg.IsSP());
+  DCHECK(reg.Is64Bits() || (shift_amount < kWRegSizeInBits));
+  DCHECK(reg.Is32Bits() || (shift_amount < kXRegSizeInBits));
+  DCHECK(!reg.IsSP());
 }
 
 
@@ -353,12 +353,12 @@
       shift_(NO_SHIFT),
       extend_(extend),
       shift_amount_(shift_amount) {
-  ASSERT(reg.IsValid());
-  ASSERT(shift_amount <= 4);
-  ASSERT(!reg.IsSP());
+  DCHECK(reg.IsValid());
+  DCHECK(shift_amount <= 4);
+  DCHECK(!reg.IsSP());
 
   // Extend modes SXTX and UXTX require a 64-bit register.
-  ASSERT(reg.Is64Bits() || ((extend != SXTX) && (extend != UXTX)));
+  DCHECK(reg.Is64Bits() || ((extend != SXTX) && (extend != UXTX)));
 }
 
 
@@ -387,57 +387,61 @@
 
 
 Operand Operand::ToExtendedRegister() const {
-  ASSERT(IsShiftedRegister());
-  ASSERT((shift_ == LSL) && (shift_amount_ <= 4));
+  DCHECK(IsShiftedRegister());
+  DCHECK((shift_ == LSL) && (shift_amount_ <= 4));
   return Operand(reg_, reg_.Is64Bits() ? UXTX : UXTW, shift_amount_);
 }
 
 
 Immediate Operand::immediate() const {
-  ASSERT(IsImmediate());
+  DCHECK(IsImmediate());
   return immediate_;
 }
 
 
 int64_t Operand::ImmediateValue() const {
-  ASSERT(IsImmediate());
+  DCHECK(IsImmediate());
   return immediate_.value();
 }
 
 
 Register Operand::reg() const {
-  ASSERT(IsShiftedRegister() || IsExtendedRegister());
+  DCHECK(IsShiftedRegister() || IsExtendedRegister());
   return reg_;
 }
 
 
 Shift Operand::shift() const {
-  ASSERT(IsShiftedRegister());
+  DCHECK(IsShiftedRegister());
   return shift_;
 }
 
 
 Extend Operand::extend() const {
-  ASSERT(IsExtendedRegister());
+  DCHECK(IsExtendedRegister());
   return extend_;
 }
 
 
 unsigned Operand::shift_amount() const {
-  ASSERT(IsShiftedRegister() || IsExtendedRegister());
+  DCHECK(IsShiftedRegister() || IsExtendedRegister());
   return shift_amount_;
 }
 
 
 Operand Operand::UntagSmi(Register smi) {
-  ASSERT(smi.Is64Bits());
+  STATIC_ASSERT(kXRegSizeInBits == static_cast<unsigned>(kSmiShift +
+                                                         kSmiValueSize));
+  DCHECK(smi.Is64Bits());
   return Operand(smi, ASR, kSmiShift);
 }
 
 
 Operand Operand::UntagSmiAndScale(Register smi, int scale) {
-  ASSERT(smi.Is64Bits());
-  ASSERT((scale >= 0) && (scale <= (64 - kSmiValueSize)));
+  STATIC_ASSERT(kXRegSizeInBits == static_cast<unsigned>(kSmiShift +
+                                                         kSmiValueSize));
+  DCHECK(smi.Is64Bits());
+  DCHECK((scale >= 0) && (scale <= (64 - kSmiValueSize)));
   if (scale > kSmiShift) {
     return Operand(smi, LSL, scale - kSmiShift);
   } else if (scale < kSmiShift) {
@@ -453,10 +457,10 @@
 }
 
 
-MemOperand::MemOperand(Register base, ptrdiff_t offset, AddrMode addrmode)
+MemOperand::MemOperand(Register base, int64_t offset, AddrMode addrmode)
   : base_(base), regoffset_(NoReg), offset_(offset), addrmode_(addrmode),
     shift_(NO_SHIFT), extend_(NO_EXTEND), shift_amount_(0) {
-  ASSERT(base.Is64Bits() && !base.IsZero());
+  DCHECK(base.Is64Bits() && !base.IsZero());
 }
 
 
@@ -466,12 +470,12 @@
                        unsigned shift_amount)
   : base_(base), regoffset_(regoffset), offset_(0), addrmode_(Offset),
     shift_(NO_SHIFT), extend_(extend), shift_amount_(shift_amount) {
-  ASSERT(base.Is64Bits() && !base.IsZero());
-  ASSERT(!regoffset.IsSP());
-  ASSERT((extend == UXTW) || (extend == SXTW) || (extend == SXTX));
+  DCHECK(base.Is64Bits() && !base.IsZero());
+  DCHECK(!regoffset.IsSP());
+  DCHECK((extend == UXTW) || (extend == SXTW) || (extend == SXTX));
 
   // SXTX extend mode requires a 64-bit offset register.
-  ASSERT(regoffset.Is64Bits() || (extend != SXTX));
+  DCHECK(regoffset.Is64Bits() || (extend != SXTX));
 }
 
 
@@ -481,22 +485,22 @@
                        unsigned shift_amount)
   : base_(base), regoffset_(regoffset), offset_(0), addrmode_(Offset),
     shift_(shift), extend_(NO_EXTEND), shift_amount_(shift_amount) {
-  ASSERT(base.Is64Bits() && !base.IsZero());
-  ASSERT(regoffset.Is64Bits() && !regoffset.IsSP());
-  ASSERT(shift == LSL);
+  DCHECK(base.Is64Bits() && !base.IsZero());
+  DCHECK(regoffset.Is64Bits() && !regoffset.IsSP());
+  DCHECK(shift == LSL);
 }
 
 
 MemOperand::MemOperand(Register base, const Operand& offset, AddrMode addrmode)
   : base_(base), addrmode_(addrmode) {
-  ASSERT(base.Is64Bits() && !base.IsZero());
+  DCHECK(base.Is64Bits() && !base.IsZero());
 
   if (offset.IsImmediate()) {
     offset_ = offset.ImmediateValue();
 
     regoffset_ = NoReg;
   } else if (offset.IsShiftedRegister()) {
-    ASSERT(addrmode == Offset);
+    DCHECK(addrmode == Offset);
 
     regoffset_ = offset.reg();
     shift_= offset.shift();
@@ -506,11 +510,11 @@
     offset_ = 0;
 
     // These assertions match those in the shifted-register constructor.
-    ASSERT(regoffset_.Is64Bits() && !regoffset_.IsSP());
-    ASSERT(shift_ == LSL);
+    DCHECK(regoffset_.Is64Bits() && !regoffset_.IsSP());
+    DCHECK(shift_ == LSL);
   } else {
-    ASSERT(offset.IsExtendedRegister());
-    ASSERT(addrmode == Offset);
+    DCHECK(offset.IsExtendedRegister());
+    DCHECK(addrmode == Offset);
 
     regoffset_ = offset.reg();
     extend_ = offset.extend();
@@ -520,9 +524,9 @@
     offset_ = 0;
 
     // These assertions match those in the extended-register constructor.
-    ASSERT(!regoffset_.IsSP());
-    ASSERT((extend_ == UXTW) || (extend_ == SXTW) || (extend_ == SXTX));
-    ASSERT((regoffset_.Is64Bits() || (extend_ != SXTX)));
+    DCHECK(!regoffset_.IsSP());
+    DCHECK((extend_ == UXTW) || (extend_ == SXTW) || (extend_ == SXTX));
+    DCHECK((regoffset_.Is64Bits() || (extend_ != SXTX)));
   }
 }
 
@@ -549,7 +553,7 @@
   if (IsImmediateOffset()) {
     return offset();
   } else {
-    ASSERT(IsRegisterOffset());
+    DCHECK(IsRegisterOffset());
     if (extend() == NO_EXTEND) {
       return Operand(regoffset(), shift(), shift_amount());
     } else {
@@ -571,7 +575,7 @@
 
 Address Assembler::target_pointer_address_at(Address pc) {
   Instruction* instr = reinterpret_cast<Instruction*>(pc);
-  ASSERT(instr->IsLdrLiteralX());
+  DCHECK(instr->IsLdrLiteralX());
   return reinterpret_cast<Address>(instr->ImmPCOffsetTarget());
 }
 
@@ -598,11 +602,16 @@
   Address candidate = pc - 2 * kInstructionSize;
   Instruction* instr = reinterpret_cast<Instruction*>(candidate);
   USE(instr);
-  ASSERT(instr->IsLdrLiteralX());
+  DCHECK(instr->IsLdrLiteralX());
   return candidate;
 }
 
 
+Address Assembler::break_address_from_return_address(Address pc) {
+  return pc - Assembler::kPatchDebugBreakSlotReturnOffset;
+}
+
+
 Address Assembler::return_address_from_call_start(Address pc) {
   // The call, generated by MacroAssembler::Call, is one of two possible
   // sequences:
@@ -626,14 +635,14 @@
   Instruction* instr = reinterpret_cast<Instruction*>(pc);
   if (instr->IsMovz()) {
     // Verify the instruction sequence.
-    ASSERT(instr->following(1)->IsMovk());
-    ASSERT(instr->following(2)->IsMovk());
-    ASSERT(instr->following(3)->IsBranchAndLinkToRegister());
+    DCHECK(instr->following(1)->IsMovk());
+    DCHECK(instr->following(2)->IsMovk());
+    DCHECK(instr->following(3)->IsBranchAndLinkToRegister());
     return pc + Assembler::kCallSizeWithoutRelocation;
   } else {
     // Verify the instruction sequence.
-    ASSERT(instr->IsLdrLiteralX());
-    ASSERT(instr->following(1)->IsBranchAndLinkToRegister());
+    DCHECK(instr->IsLdrLiteralX());
+    DCHECK(instr->following(1)->IsBranchAndLinkToRegister());
     return pc + Assembler::kCallSizeWithRelocation;
   }
 }
@@ -652,7 +661,7 @@
   Memory::Address_at(target_pointer_address_at(pc)) = target;
   // Intuitively, we would think it is necessary to always flush the
   // instruction cache after patching a target address in the code as follows:
-  //   CPU::FlushICache(pc, sizeof(target));
+  //   CpuFeatures::FlushICache(pc, sizeof(target));
   // However, on ARM, an instruction is actually patched in the case of
   // embedded constants of the form:
   // ldr   ip, [pc, #...]
@@ -676,13 +685,13 @@
 
 
 Address RelocInfo::target_address() {
-  ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
+  DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
   return Assembler::target_address_at(pc_, host_);
 }
 
 
 Address RelocInfo::target_address_address() {
-  ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
+  DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
                               || rmode_ == EMBEDDED_OBJECT
                               || rmode_ == EXTERNAL_REFERENCE);
   return Assembler::target_pointer_address_at(pc_);
@@ -690,19 +699,19 @@
 
 
 Address RelocInfo::constant_pool_entry_address() {
-  ASSERT(IsInConstantPool());
+  DCHECK(IsInConstantPool());
   return Assembler::target_pointer_address_at(pc_);
 }
 
 
 Object* RelocInfo::target_object() {
-  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+  DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
   return reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_));
 }
 
 
 Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
-  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+  DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
   return Handle<Object>(reinterpret_cast<Object**>(
       Assembler::target_address_at(pc_, host_)));
 }
@@ -711,8 +720,7 @@
 void RelocInfo::set_target_object(Object* target,
                                   WriteBarrierMode write_barrier_mode,
                                   ICacheFlushMode icache_flush_mode) {
-  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
-  ASSERT(!target->IsConsString());
+  DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
   Assembler::set_target_address_at(pc_, host_,
                                    reinterpret_cast<Address>(target),
                                    icache_flush_mode);
@@ -726,13 +734,13 @@
 
 
 Address RelocInfo::target_reference() {
-  ASSERT(rmode_ == EXTERNAL_REFERENCE);
+  DCHECK(rmode_ == EXTERNAL_REFERENCE);
   return Assembler::target_address_at(pc_, host_);
 }
 
 
 Address RelocInfo::target_runtime_entry(Assembler* origin) {
-  ASSERT(IsRuntimeEntry(rmode_));
+  DCHECK(IsRuntimeEntry(rmode_));
   return target_address();
 }
 
@@ -740,7 +748,7 @@
 void RelocInfo::set_target_runtime_entry(Address target,
                                          WriteBarrierMode write_barrier_mode,
                                          ICacheFlushMode icache_flush_mode) {
-  ASSERT(IsRuntimeEntry(rmode_));
+  DCHECK(IsRuntimeEntry(rmode_));
   if (target_address() != target) {
     set_target_address(target, write_barrier_mode, icache_flush_mode);
   }
@@ -755,7 +763,7 @@
 
 
 Cell* RelocInfo::target_cell() {
-  ASSERT(rmode_ == RelocInfo::CELL);
+  DCHECK(rmode_ == RelocInfo::CELL);
   return Cell::FromValueAddress(Memory::Address_at(pc_));
 }
 
@@ -778,7 +786,7 @@
 
 
 Code* RelocInfo::code_age_stub() {
-  ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+  DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
   // Read the stub entry point from the code age sequence.
   Address stub_entry_address = pc_ + kCodeAgeStubEntryOffset;
   return Code::GetCodeFromTargetAddress(Memory::Address_at(stub_entry_address));
@@ -787,8 +795,8 @@
 
 void RelocInfo::set_code_age_stub(Code* stub,
                                   ICacheFlushMode icache_flush_mode) {
-  ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
-  ASSERT(!Code::IsYoungSequence(stub->GetIsolate(), pc_));
+  DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+  DCHECK(!Code::IsYoungSequence(stub->GetIsolate(), pc_));
   // Overwrite the stub entry point in the code age sequence. This is loaded as
   // a literal so there is no need to call FlushICache here.
   Address stub_entry_address = pc_ + kCodeAgeStubEntryOffset;
@@ -797,7 +805,7 @@
 
 
 Address RelocInfo::call_address() {
-  ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+  DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
          (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
   // For the above sequences the Relocinfo points to the load literal loading
   // the call address.
@@ -806,7 +814,7 @@
 
 
 void RelocInfo::set_call_address(Address target) {
-  ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+  DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
          (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
   Assembler::set_target_address_at(pc_, host_, target);
   if (host() != NULL) {
@@ -818,7 +826,7 @@
 
 
 void RelocInfo::WipeOut() {
-  ASSERT(IsEmbeddedObject(rmode_) ||
+  DCHECK(IsEmbeddedObject(rmode_) ||
          IsCodeTarget(rmode_) ||
          IsRuntimeEntry(rmode_) ||
          IsExternalReference(rmode_));
@@ -890,11 +898,11 @@
 
 
 LoadStoreOp Assembler::LoadOpFor(const CPURegister& rt) {
-  ASSERT(rt.IsValid());
+  DCHECK(rt.IsValid());
   if (rt.IsRegister()) {
     return rt.Is64Bits() ? LDR_x : LDR_w;
   } else {
-    ASSERT(rt.IsFPRegister());
+    DCHECK(rt.IsFPRegister());
     return rt.Is64Bits() ? LDR_d : LDR_s;
   }
 }
@@ -902,23 +910,23 @@
 
 LoadStorePairOp Assembler::LoadPairOpFor(const CPURegister& rt,
                                          const CPURegister& rt2) {
-  ASSERT(AreSameSizeAndType(rt, rt2));
+  DCHECK(AreSameSizeAndType(rt, rt2));
   USE(rt2);
   if (rt.IsRegister()) {
     return rt.Is64Bits() ? LDP_x : LDP_w;
   } else {
-    ASSERT(rt.IsFPRegister());
+    DCHECK(rt.IsFPRegister());
     return rt.Is64Bits() ? LDP_d : LDP_s;
   }
 }
 
 
 LoadStoreOp Assembler::StoreOpFor(const CPURegister& rt) {
-  ASSERT(rt.IsValid());
+  DCHECK(rt.IsValid());
   if (rt.IsRegister()) {
     return rt.Is64Bits() ? STR_x : STR_w;
   } else {
-    ASSERT(rt.IsFPRegister());
+    DCHECK(rt.IsFPRegister());
     return rt.Is64Bits() ? STR_d : STR_s;
   }
 }
@@ -926,12 +934,12 @@
 
 LoadStorePairOp Assembler::StorePairOpFor(const CPURegister& rt,
                                           const CPURegister& rt2) {
-  ASSERT(AreSameSizeAndType(rt, rt2));
+  DCHECK(AreSameSizeAndType(rt, rt2));
   USE(rt2);
   if (rt.IsRegister()) {
     return rt.Is64Bits() ? STP_x : STP_w;
   } else {
-    ASSERT(rt.IsFPRegister());
+    DCHECK(rt.IsFPRegister());
     return rt.Is64Bits() ? STP_d : STP_s;
   }
 }
@@ -939,12 +947,12 @@
 
 LoadStorePairNonTemporalOp Assembler::LoadPairNonTemporalOpFor(
     const CPURegister& rt, const CPURegister& rt2) {
-  ASSERT(AreSameSizeAndType(rt, rt2));
+  DCHECK(AreSameSizeAndType(rt, rt2));
   USE(rt2);
   if (rt.IsRegister()) {
     return rt.Is64Bits() ? LDNP_x : LDNP_w;
   } else {
-    ASSERT(rt.IsFPRegister());
+    DCHECK(rt.IsFPRegister());
     return rt.Is64Bits() ? LDNP_d : LDNP_s;
   }
 }
@@ -952,12 +960,12 @@
 
 LoadStorePairNonTemporalOp Assembler::StorePairNonTemporalOpFor(
     const CPURegister& rt, const CPURegister& rt2) {
-  ASSERT(AreSameSizeAndType(rt, rt2));
+  DCHECK(AreSameSizeAndType(rt, rt2));
   USE(rt2);
   if (rt.IsRegister()) {
     return rt.Is64Bits() ? STNP_x : STNP_w;
   } else {
-    ASSERT(rt.IsFPRegister());
+    DCHECK(rt.IsFPRegister());
     return rt.Is64Bits() ? STNP_d : STNP_s;
   }
 }
@@ -967,16 +975,16 @@
   if (rt.IsRegister()) {
     return rt.Is64Bits() ? LDR_x_lit : LDR_w_lit;
   } else {
-    ASSERT(rt.IsFPRegister());
+    DCHECK(rt.IsFPRegister());
     return rt.Is64Bits() ? LDR_d_lit : LDR_s_lit;
   }
 }
 
 
 int Assembler::LinkAndGetInstructionOffsetTo(Label* label) {
-  ASSERT(kStartOfLabelLinkChain == 0);
+  DCHECK(kStartOfLabelLinkChain == 0);
   int offset = LinkAndGetByteOffsetTo(label);
-  ASSERT(IsAligned(offset, kInstructionSize));
+  DCHECK(IsAligned(offset, kInstructionSize));
   return offset >> kInstructionSizeLog2;
 }
 
@@ -1031,7 +1039,7 @@
 
 
 Instr Assembler::ImmTestBranchBit(unsigned bit_pos) {
-  ASSERT(is_uint6(bit_pos));
+  DCHECK(is_uint6(bit_pos));
   // Subtract five from the shift offset, as we need bit 5 from bit_pos.
   unsigned b5 = bit_pos << (ImmTestBranchBit5_offset - 5);
   unsigned b40 = bit_pos << ImmTestBranchBit40_offset;
@@ -1047,7 +1055,7 @@
 
 
 Instr Assembler::ImmAddSub(int64_t imm) {
-  ASSERT(IsImmAddSub(imm));
+  DCHECK(IsImmAddSub(imm));
   if (is_uint12(imm)) {  // No shift required.
     return imm << ImmAddSub_offset;
   } else {
@@ -1057,7 +1065,7 @@
 
 
 Instr Assembler::ImmS(unsigned imms, unsigned reg_size) {
-  ASSERT(((reg_size == kXRegSizeInBits) && is_uint6(imms)) ||
+  DCHECK(((reg_size == kXRegSizeInBits) && is_uint6(imms)) ||
          ((reg_size == kWRegSizeInBits) && is_uint5(imms)));
   USE(reg_size);
   return imms << ImmS_offset;
@@ -1065,26 +1073,26 @@
 
 
 Instr Assembler::ImmR(unsigned immr, unsigned reg_size) {
-  ASSERT(((reg_size == kXRegSizeInBits) && is_uint6(immr)) ||
+  DCHECK(((reg_size == kXRegSizeInBits) && is_uint6(immr)) ||
          ((reg_size == kWRegSizeInBits) && is_uint5(immr)));
   USE(reg_size);
-  ASSERT(is_uint6(immr));
+  DCHECK(is_uint6(immr));
   return immr << ImmR_offset;
 }
 
 
 Instr Assembler::ImmSetBits(unsigned imms, unsigned reg_size) {
-  ASSERT((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
-  ASSERT(is_uint6(imms));
-  ASSERT((reg_size == kXRegSizeInBits) || is_uint6(imms + 3));
+  DCHECK((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
+  DCHECK(is_uint6(imms));
+  DCHECK((reg_size == kXRegSizeInBits) || is_uint6(imms + 3));
   USE(reg_size);
   return imms << ImmSetBits_offset;
 }
 
 
 Instr Assembler::ImmRotate(unsigned immr, unsigned reg_size) {
-  ASSERT((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
-  ASSERT(((reg_size == kXRegSizeInBits) && is_uint6(immr)) ||
+  DCHECK((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
+  DCHECK(((reg_size == kXRegSizeInBits) && is_uint6(immr)) ||
          ((reg_size == kWRegSizeInBits) && is_uint5(immr)));
   USE(reg_size);
   return immr << ImmRotate_offset;
@@ -1098,21 +1106,21 @@
 
 
 Instr Assembler::BitN(unsigned bitn, unsigned reg_size) {
-  ASSERT((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
-  ASSERT((reg_size == kXRegSizeInBits) || (bitn == 0));
+  DCHECK((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
+  DCHECK((reg_size == kXRegSizeInBits) || (bitn == 0));
   USE(reg_size);
   return bitn << BitN_offset;
 }
 
 
 Instr Assembler::ShiftDP(Shift shift) {
-  ASSERT(shift == LSL || shift == LSR || shift == ASR || shift == ROR);
+  DCHECK(shift == LSL || shift == LSR || shift == ASR || shift == ROR);
   return shift << ShiftDP_offset;
 }
 
 
 Instr Assembler::ImmDPShift(unsigned amount) {
-  ASSERT(is_uint6(amount));
+  DCHECK(is_uint6(amount));
   return amount << ImmDPShift_offset;
 }
 
@@ -1123,13 +1131,13 @@
 
 
 Instr Assembler::ImmExtendShift(unsigned left_shift) {
-  ASSERT(left_shift <= 4);
+  DCHECK(left_shift <= 4);
   return left_shift << ImmExtendShift_offset;
 }
 
 
 Instr Assembler::ImmCondCmp(unsigned imm) {
-  ASSERT(is_uint5(imm));
+  DCHECK(is_uint5(imm));
   return imm << ImmCondCmp_offset;
 }
 
@@ -1140,75 +1148,75 @@
 
 
 Instr Assembler::ImmLSUnsigned(int imm12) {
-  ASSERT(is_uint12(imm12));
+  DCHECK(is_uint12(imm12));
   return imm12 << ImmLSUnsigned_offset;
 }
 
 
 Instr Assembler::ImmLS(int imm9) {
-  ASSERT(is_int9(imm9));
+  DCHECK(is_int9(imm9));
   return truncate_to_int9(imm9) << ImmLS_offset;
 }
 
 
 Instr Assembler::ImmLSPair(int imm7, LSDataSize size) {
-  ASSERT(((imm7 >> size) << size) == imm7);
+  DCHECK(((imm7 >> size) << size) == imm7);
   int scaled_imm7 = imm7 >> size;
-  ASSERT(is_int7(scaled_imm7));
+  DCHECK(is_int7(scaled_imm7));
   return truncate_to_int7(scaled_imm7) << ImmLSPair_offset;
 }
 
 
 Instr Assembler::ImmShiftLS(unsigned shift_amount) {
-  ASSERT(is_uint1(shift_amount));
+  DCHECK(is_uint1(shift_amount));
   return shift_amount << ImmShiftLS_offset;
 }
 
 
 Instr Assembler::ImmException(int imm16) {
-  ASSERT(is_uint16(imm16));
+  DCHECK(is_uint16(imm16));
   return imm16 << ImmException_offset;
 }
 
 
 Instr Assembler::ImmSystemRegister(int imm15) {
-  ASSERT(is_uint15(imm15));
+  DCHECK(is_uint15(imm15));
   return imm15 << ImmSystemRegister_offset;
 }
 
 
 Instr Assembler::ImmHint(int imm7) {
-  ASSERT(is_uint7(imm7));
+  DCHECK(is_uint7(imm7));
   return imm7 << ImmHint_offset;
 }
 
 
 Instr Assembler::ImmBarrierDomain(int imm2) {
-  ASSERT(is_uint2(imm2));
+  DCHECK(is_uint2(imm2));
   return imm2 << ImmBarrierDomain_offset;
 }
 
 
 Instr Assembler::ImmBarrierType(int imm2) {
-  ASSERT(is_uint2(imm2));
+  DCHECK(is_uint2(imm2));
   return imm2 << ImmBarrierType_offset;
 }
 
 
 LSDataSize Assembler::CalcLSDataSize(LoadStoreOp op) {
-  ASSERT((SizeLS_offset + SizeLS_width) == (kInstructionSize * 8));
+  DCHECK((SizeLS_offset + SizeLS_width) == (kInstructionSize * 8));
   return static_cast<LSDataSize>(op >> SizeLS_offset);
 }
 
 
 Instr Assembler::ImmMoveWide(uint64_t imm) {
-  ASSERT(is_uint16(imm));
+  DCHECK(is_uint16(imm));
   return imm << ImmMoveWide_offset;
 }
 
 
 Instr Assembler::ShiftMoveWide(int64_t shift) {
-  ASSERT(is_uint2(shift));
+  DCHECK(is_uint2(shift));
   return shift << ShiftMoveWide_offset;
 }
 
@@ -1219,7 +1227,7 @@
 
 
 Instr Assembler::FPScale(unsigned scale) {
-  ASSERT(is_uint6(scale));
+  DCHECK(is_uint6(scale));
   return scale << FPScale_offset;
 }
 
@@ -1230,7 +1238,7 @@
 
 
 inline void Assembler::CheckBufferSpace() {
-  ASSERT(pc_ < (buffer_ + buffer_size_));
+  DCHECK(pc_ < (buffer_ + buffer_size_));
   if (buffer_space() < kGap) {
     GrowBuffer();
   }
@@ -1249,7 +1257,7 @@
 
 
 TypeFeedbackId Assembler::RecordedAstId() {
-  ASSERT(!recorded_ast_id_.IsNone());
+  DCHECK(!recorded_ast_id_.IsNone());
   return recorded_ast_id_;
 }
 
diff --git a/src/arm64/assembler-arm64.cc b/src/arm64/assembler-arm64.cc
index 90cff59..c1213e9 100644
--- a/src/arm64/assembler-arm64.cc
+++ b/src/arm64/assembler-arm64.cc
@@ -33,6 +33,8 @@
 #define ARM64_DEFINE_REG_STATICS
 
 #include "src/arm64/assembler-arm64-inl.h"
+#include "src/base/bits.h"
+#include "src/base/cpu.h"
 
 namespace v8 {
 namespace internal {
@@ -47,9 +49,9 @@
     // csp will always be aligned if it is enabled by probing at runtime.
     if (FLAG_enable_always_align_csp) supported_ |= 1u << ALWAYS_ALIGN_CSP;
   } else {
-    CPU cpu;
-    if (FLAG_enable_always_align_csp && (cpu.implementer() == CPU::NVIDIA ||
-                                         FLAG_debug_code)) {
+    base::CPU cpu;
+    if (FLAG_enable_always_align_csp &&
+        (cpu.implementer() == base::CPU::NVIDIA || FLAG_debug_code)) {
       supported_ |= 1u << ALWAYS_ALIGN_CSP;
     }
   }
@@ -64,25 +66,25 @@
 // CPURegList utilities.
 
 CPURegister CPURegList::PopLowestIndex() {
-  ASSERT(IsValid());
+  DCHECK(IsValid());
   if (IsEmpty()) {
     return NoCPUReg;
   }
   int index = CountTrailingZeros(list_, kRegListSizeInBits);
-  ASSERT((1 << index) & list_);
+  DCHECK((1 << index) & list_);
   Remove(index);
   return CPURegister::Create(index, size_, type_);
 }
 
 
 CPURegister CPURegList::PopHighestIndex() {
-  ASSERT(IsValid());
+  DCHECK(IsValid());
   if (IsEmpty()) {
     return NoCPUReg;
   }
   int index = CountLeadingZeros(list_, kRegListSizeInBits);
   index = kRegListSizeInBits - 1 - index;
-  ASSERT((1 << index) & list_);
+  DCHECK((1 << index) & list_);
   Remove(index);
   return CPURegister::Create(index, size_, type_);
 }
@@ -94,8 +96,8 @@
   } else if (type() == CPURegister::kFPRegister) {
     Remove(GetCalleeSavedFP(RegisterSizeInBits()));
   } else {
-    ASSERT(type() == CPURegister::kNoRegister);
-    ASSERT(IsEmpty());
+    DCHECK(type() == CPURegister::kNoRegister);
+    DCHECK(IsEmpty());
     // The list must already be empty, so do nothing.
   }
 }
@@ -190,7 +192,7 @@
   }
 
   // Indicate that code has changed.
-  CPU::FlushICache(pc_, instruction_count * kInstructionSize);
+  CpuFeatures::FlushICache(pc_, instruction_count * kInstructionSize);
 }
 
 
@@ -226,7 +228,7 @@
 
   const CPURegister regs[] = {reg1, reg2, reg3, reg4, reg5, reg6, reg7, reg8};
 
-  for (unsigned i = 0; i < sizeof(regs) / sizeof(regs[0]); i++) {
+  for (unsigned i = 0; i < arraysize(regs); i++) {
     if (regs[i].IsRegister()) {
       number_of_valid_regs++;
       unique_regs |= regs[i].Bit();
@@ -234,7 +236,7 @@
       number_of_valid_fpregs++;
       unique_fpregs |= regs[i].Bit();
     } else {
-      ASSERT(!regs[i].IsValid());
+      DCHECK(!regs[i].IsValid());
     }
   }
 
@@ -243,8 +245,8 @@
   int number_of_unique_fpregs =
     CountSetBits(unique_fpregs, sizeof(unique_fpregs) * kBitsPerByte);
 
-  ASSERT(number_of_valid_regs >= number_of_unique_regs);
-  ASSERT(number_of_valid_fpregs >= number_of_unique_fpregs);
+  DCHECK(number_of_valid_regs >= number_of_unique_regs);
+  DCHECK(number_of_valid_fpregs >= number_of_unique_fpregs);
 
   return (number_of_valid_regs != number_of_unique_regs) ||
          (number_of_valid_fpregs != number_of_unique_fpregs);
@@ -255,7 +257,7 @@
                         const CPURegister& reg3, const CPURegister& reg4,
                         const CPURegister& reg5, const CPURegister& reg6,
                         const CPURegister& reg7, const CPURegister& reg8) {
-  ASSERT(reg1.IsValid());
+  DCHECK(reg1.IsValid());
   bool match = true;
   match &= !reg2.IsValid() || reg2.IsSameSizeAndType(reg1);
   match &= !reg3.IsValid() || reg3.IsSameSizeAndType(reg1);
@@ -274,7 +276,7 @@
   // Verify all Objects referred by code are NOT in new space.
   Object* obj = *handle;
   if (obj->IsHeapObject()) {
-    ASSERT(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
+    DCHECK(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
     value_ = reinterpret_cast<intptr_t>(handle.location());
     rmode_ = RelocInfo::EMBEDDED_OBJECT;
   } else {
@@ -296,10 +298,257 @@
 }
 
 
-// Assembler
+// Constant Pool.
+void ConstPool::RecordEntry(intptr_t data,
+                            RelocInfo::Mode mode) {
+  DCHECK(mode != RelocInfo::COMMENT &&
+         mode != RelocInfo::POSITION &&
+         mode != RelocInfo::STATEMENT_POSITION &&
+         mode != RelocInfo::CONST_POOL &&
+         mode != RelocInfo::VENEER_POOL &&
+         mode != RelocInfo::CODE_AGE_SEQUENCE);
 
+  uint64_t raw_data = static_cast<uint64_t>(data);
+  int offset = assm_->pc_offset();
+  if (IsEmpty()) {
+    first_use_ = offset;
+  }
+
+  std::pair<uint64_t, int> entry = std::make_pair(raw_data, offset);
+  if (CanBeShared(mode)) {
+    shared_entries_.insert(entry);
+    if (shared_entries_.count(entry.first) == 1) {
+      shared_entries_count++;
+    }
+  } else {
+    unique_entries_.push_back(entry);
+  }
+
+  if (EntryCount() > Assembler::kApproxMaxPoolEntryCount) {
+    // Request constant pool emission after the next instruction.
+    assm_->SetNextConstPoolCheckIn(1);
+  }
+}
+
+
+int ConstPool::DistanceToFirstUse() {
+  DCHECK(first_use_ >= 0);
+  return assm_->pc_offset() - first_use_;
+}
+
+
+int ConstPool::MaxPcOffset() {
+  // There are no pending entries in the pool so we can never get out of
+  // range.
+  if (IsEmpty()) return kMaxInt;
+
+  // Entries are not necessarily emitted in the order they are added so in the
+  // worst case the first constant pool use will be accessing the last entry.
+  return first_use_ + kMaxLoadLiteralRange - WorstCaseSize();
+}
+
+
+int ConstPool::WorstCaseSize() {
+  if (IsEmpty()) return 0;
+
+  // Max size prologue:
+  //   b   over
+  //   ldr xzr, #pool_size
+  //   blr xzr
+  //   nop
+  // All entries are 64-bit for now.
+  return 4 * kInstructionSize + EntryCount() * kPointerSize;
+}
+
+
+int ConstPool::SizeIfEmittedAtCurrentPc(bool require_jump) {
+  if (IsEmpty()) return 0;
+
+  // Prologue is:
+  //   b   over  ;; if require_jump
+  //   ldr xzr, #pool_size
+  //   blr xzr
+  //   nop       ;; if not 64-bit aligned
+  int prologue_size = require_jump ? kInstructionSize : 0;
+  prologue_size += 2 * kInstructionSize;
+  prologue_size += IsAligned(assm_->pc_offset() + prologue_size, 8) ?
+                   0 : kInstructionSize;
+
+  // All entries are 64-bit for now.
+  return prologue_size + EntryCount() * kPointerSize;
+}
+
+
+void ConstPool::Emit(bool require_jump) {
+  DCHECK(!assm_->is_const_pool_blocked());
+  // Prevent recursive pool emission and protect from veneer pools.
+  Assembler::BlockPoolsScope block_pools(assm_);
+
+  int size = SizeIfEmittedAtCurrentPc(require_jump);
+  Label size_check;
+  assm_->bind(&size_check);
+
+  assm_->RecordConstPool(size);
+  // Emit the constant pool. It is preceded by an optional branch if
+  // require_jump and a header which will:
+  //  1) Encode the size of the constant pool, for use by the disassembler.
+  //  2) Terminate the program, to try to prevent execution from accidentally
+  //     flowing into the constant pool.
+  //  3) align the pool entries to 64-bit.
+  // The header is therefore made of up to three arm64 instructions:
+  //   ldr xzr, #<size of the constant pool in 32-bit words>
+  //   blr xzr
+  //   nop
+  //
+  // If executed, the header will likely segfault and lr will point to the
+  // instruction following the offending blr.
+  // TODO(all): Make the alignment part less fragile. Currently code is
+  // allocated as a byte array so there are no guarantees the alignment will
+  // be preserved on compaction. Currently it works as allocation seems to be
+  // 64-bit aligned.
+
+  // Emit branch if required
+  Label after_pool;
+  if (require_jump) {
+    assm_->b(&after_pool);
+  }
+
+  // Emit the header.
+  assm_->RecordComment("[ Constant Pool");
+  EmitMarker();
+  EmitGuard();
+  assm_->Align(8);
+
+  // Emit constant pool entries.
+  // TODO(all): currently each relocated constant is 64 bits, consider adding
+  // support for 32-bit entries.
+  EmitEntries();
+  assm_->RecordComment("]");
+
+  if (after_pool.is_linked()) {
+    assm_->bind(&after_pool);
+  }
+
+  DCHECK(assm_->SizeOfCodeGeneratedSince(&size_check) ==
+         static_cast<unsigned>(size));
+}
+
+
+void ConstPool::Clear() {
+  shared_entries_.clear();
+  shared_entries_count = 0;
+  unique_entries_.clear();
+  first_use_ = -1;
+}
+
+
+bool ConstPool::CanBeShared(RelocInfo::Mode mode) {
+  // Constant pool currently does not support 32-bit entries.
+  DCHECK(mode != RelocInfo::NONE32);
+
+  return RelocInfo::IsNone(mode) ||
+         (!assm_->serializer_enabled() && (mode >= RelocInfo::CELL));
+}
+
+
+void ConstPool::EmitMarker() {
+  // A constant pool size is expressed in number of 32-bits words.
+  // Currently all entries are 64-bit.
+  // + 1 is for the crash guard.
+  // + 0/1 for alignment.
+  int word_count = EntryCount() * 2 + 1 +
+                   (IsAligned(assm_->pc_offset(), 8) ? 0 : 1);
+  assm_->Emit(LDR_x_lit                          |
+              Assembler::ImmLLiteral(word_count) |
+              Assembler::Rt(xzr));
+}
+
+
+MemOperand::PairResult MemOperand::AreConsistentForPair(
+    const MemOperand& operandA,
+    const MemOperand& operandB,
+    int access_size_log2) {
+  DCHECK(access_size_log2 >= 0);
+  DCHECK(access_size_log2 <= 3);
+  // Step one: check that they share the same base, that the mode is Offset
+  // and that the offset is a multiple of access size.
+  if (!operandA.base().Is(operandB.base()) ||
+      (operandA.addrmode() != Offset) ||
+      (operandB.addrmode() != Offset) ||
+      ((operandA.offset() & ((1 << access_size_log2) - 1)) != 0)) {
+    return kNotPair;
+  }
+  // Step two: check that the offsets are contiguous and that the range
+  // is OK for ldp/stp.
+  if ((operandB.offset() == operandA.offset() + (1 << access_size_log2)) &&
+      is_int7(operandA.offset() >> access_size_log2)) {
+    return kPairAB;
+  }
+  if ((operandA.offset() == operandB.offset() + (1 << access_size_log2)) &&
+      is_int7(operandB.offset() >> access_size_log2)) {
+    return kPairBA;
+  }
+  return kNotPair;
+}
+
+
+void ConstPool::EmitGuard() {
+#ifdef DEBUG
+  Instruction* instr = reinterpret_cast<Instruction*>(assm_->pc());
+  DCHECK(instr->preceding()->IsLdrLiteralX() &&
+         instr->preceding()->Rt() == xzr.code());
+#endif
+  assm_->EmitPoolGuard();
+}
+
+
+void ConstPool::EmitEntries() {
+  DCHECK(IsAligned(assm_->pc_offset(), 8));
+
+  typedef std::multimap<uint64_t, int>::const_iterator SharedEntriesIterator;
+  SharedEntriesIterator value_it;
+  // Iterate through the keys (constant pool values).
+  for (value_it = shared_entries_.begin();
+       value_it != shared_entries_.end();
+       value_it = shared_entries_.upper_bound(value_it->first)) {
+    std::pair<SharedEntriesIterator, SharedEntriesIterator> range;
+    uint64_t data = value_it->first;
+    range = shared_entries_.equal_range(data);
+    SharedEntriesIterator offset_it;
+    // Iterate through the offsets of a given key.
+    for (offset_it = range.first; offset_it != range.second; offset_it++) {
+      Instruction* instr = assm_->InstructionAt(offset_it->second);
+
+      // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
+      DCHECK(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0);
+      instr->SetImmPCOffsetTarget(assm_->pc());
+    }
+    assm_->dc64(data);
+  }
+  shared_entries_.clear();
+  shared_entries_count = 0;
+
+  // Emit unique entries.
+  std::vector<std::pair<uint64_t, int> >::const_iterator unique_it;
+  for (unique_it = unique_entries_.begin();
+       unique_it != unique_entries_.end();
+       unique_it++) {
+    Instruction* instr = assm_->InstructionAt(unique_it->second);
+
+    // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
+    DCHECK(instr->IsLdrLiteral() && instr->ImmLLiteral() == 0);
+    instr->SetImmPCOffsetTarget(assm_->pc());
+    assm_->dc64(unique_it->first);
+  }
+  unique_entries_.clear();
+  first_use_ = -1;
+}
+
+
+// Assembler
 Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
     : AssemblerBase(isolate, buffer, buffer_size),
+      constpool_(this),
       recorded_ast_id_(TypeFeedbackId::None()),
       unresolved_branches_(),
       positions_recorder_(this) {
@@ -310,28 +559,27 @@
 
 
 Assembler::~Assembler() {
-  ASSERT(num_pending_reloc_info_ == 0);
-  ASSERT(const_pool_blocked_nesting_ == 0);
-  ASSERT(veneer_pool_blocked_nesting_ == 0);
+  DCHECK(constpool_.IsEmpty());
+  DCHECK(const_pool_blocked_nesting_ == 0);
+  DCHECK(veneer_pool_blocked_nesting_ == 0);
 }
 
 
 void Assembler::Reset() {
 #ifdef DEBUG
-  ASSERT((pc_ >= buffer_) && (pc_ < buffer_ + buffer_size_));
-  ASSERT(const_pool_blocked_nesting_ == 0);
-  ASSERT(veneer_pool_blocked_nesting_ == 0);
-  ASSERT(unresolved_branches_.empty());
+  DCHECK((pc_ >= buffer_) && (pc_ < buffer_ + buffer_size_));
+  DCHECK(const_pool_blocked_nesting_ == 0);
+  DCHECK(veneer_pool_blocked_nesting_ == 0);
+  DCHECK(unresolved_branches_.empty());
   memset(buffer_, 0, pc_ - buffer_);
 #endif
   pc_ = buffer_;
   reloc_info_writer.Reposition(reinterpret_cast<byte*>(buffer_ + buffer_size_),
                                reinterpret_cast<byte*>(pc_));
-  num_pending_reloc_info_ = 0;
+  constpool_.Clear();
   next_constant_pool_check_ = 0;
   next_veneer_pool_check_ = kMaxInt;
   no_const_pool_before_ = 0;
-  first_const_pool_use_ = -1;
   ClearRecordedAstId();
 }
 
@@ -339,7 +587,7 @@
 void Assembler::GetCode(CodeDesc* desc) {
   // Emit constant pool if necessary.
   CheckConstPool(true, false);
-  ASSERT(num_pending_reloc_info_ == 0);
+  DCHECK(constpool_.IsEmpty());
 
   // Set up code descriptor.
   if (desc) {
@@ -354,7 +602,7 @@
 
 
 void Assembler::Align(int m) {
-  ASSERT(m >= 4 && IsPowerOf2(m));
+  DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
   while ((pc_offset() & (m - 1)) != 0) {
     nop();
   }
@@ -382,7 +630,7 @@
 void Assembler::RemoveBranchFromLabelLinkChain(Instruction* branch,
                                                Label* label,
                                                Instruction* label_veneer) {
-  ASSERT(label->is_linked());
+  DCHECK(label->is_linked());
 
   CheckLabelLinkChain(label);
 
@@ -398,7 +646,7 @@
     link = next_link;
   }
 
-  ASSERT(branch == link);
+  DCHECK(branch == link);
   next_link = branch->ImmPCOffsetTarget();
 
   if (branch == prev_link) {
@@ -464,8 +712,8 @@
   // that are linked to this label will be updated to point to the newly-bound
   // label.
 
-  ASSERT(!label->is_near_linked());
-  ASSERT(!label->is_bound());
+  DCHECK(!label->is_near_linked());
+  DCHECK(!label->is_bound());
 
   DeleteUnresolvedBranchInfoForLabel(label);
 
@@ -488,11 +736,11 @@
 
     CheckLabelLinkChain(label);
 
-    ASSERT(linkoffset >= 0);
-    ASSERT(linkoffset < pc_offset());
-    ASSERT((linkoffset > prevlinkoffset) ||
+    DCHECK(linkoffset >= 0);
+    DCHECK(linkoffset < pc_offset());
+    DCHECK((linkoffset > prevlinkoffset) ||
            (linkoffset - prevlinkoffset == kStartOfLabelLinkChain));
-    ASSERT(prevlinkoffset >= 0);
+    DCHECK(prevlinkoffset >= 0);
 
     // Update the link to point to the label.
     link->SetImmPCOffsetTarget(reinterpret_cast<Instruction*>(pc_));
@@ -508,13 +756,13 @@
   }
   label->bind_to(pc_offset());
 
-  ASSERT(label->is_bound());
-  ASSERT(!label->is_linked());
+  DCHECK(label->is_bound());
+  DCHECK(!label->is_linked());
 }
 
 
 int Assembler::LinkAndGetByteOffsetTo(Label* label) {
-  ASSERT(sizeof(*pc_) == 1);
+  DCHECK(sizeof(*pc_) == 1);
   CheckLabelLinkChain(label);
 
   int offset;
@@ -529,7 +777,7 @@
     // Note that offset can be zero for self-referential instructions. (This
     // could be useful for ADR, for example.)
     offset = label->pos() - pc_offset();
-    ASSERT(offset <= 0);
+    DCHECK(offset <= 0);
   } else {
     if (label->is_linked()) {
       // The label is linked, so the referring instruction should be added onto
@@ -538,7 +786,7 @@
       // In this case, label->pos() returns the offset of the last linked
       // instruction from the start of the buffer.
       offset = label->pos() - pc_offset();
-      ASSERT(offset != kStartOfLabelLinkChain);
+      DCHECK(offset != kStartOfLabelLinkChain);
       // Note that the offset here needs to be PC-relative only so that the
       // first instruction in a buffer can link to an unbound label. Otherwise,
       // the offset would be 0 for this case, and 0 is reserved for
@@ -557,7 +805,7 @@
 
 
 void Assembler::DeleteUnresolvedBranchInfoForLabelTraverse(Label* label) {
-  ASSERT(label->is_linked());
+  DCHECK(label->is_linked());
   CheckLabelLinkChain(label);
 
   int link_offset = label->pos();
@@ -592,7 +840,7 @@
 
 void Assembler::DeleteUnresolvedBranchInfoForLabel(Label* label) {
   if (unresolved_branches_.empty()) {
-    ASSERT(next_veneer_pool_check_ == kMaxInt);
+    DCHECK(next_veneer_pool_check_ == kMaxInt);
     return;
   }
 
@@ -622,8 +870,7 @@
 void Assembler::EndBlockConstPool() {
   if (--const_pool_blocked_nesting_ == 0) {
     // Check the constant pool hasn't been blocked for too long.
-    ASSERT((num_pending_reloc_info_ == 0) ||
-           (pc_offset() < (first_const_pool_use_ + kMaxDistToConstPool)));
+    DCHECK(pc_offset() < constpool_.MaxPcOffset());
     // Two cases:
     //  * no_const_pool_before_ >= next_constant_pool_check_ and the emission is
     //    still blocked
@@ -648,7 +895,7 @@
 
   // It is still worth asserting the marker is complete.
   // 4: blr xzr
-  ASSERT(!result || (instr->following()->IsBranchAndLinkToRegister() &&
+  DCHECK(!result || (instr->following()->IsBranchAndLinkToRegister() &&
                      instr->following()->Rn() == xzr.code()));
 
   return result;
@@ -682,13 +929,6 @@
 }
 
 
-void Assembler::ConstantPoolMarker(uint32_t size) {
-  ASSERT(is_const_pool_blocked());
-  // + 1 is for the crash guard.
-  Emit(LDR_x_lit | ImmLLiteral(size + 1) | Rt(xzr));
-}
-
-
 void Assembler::EmitPoolGuard() {
   // We must generate only one instruction as this is used in scopes that
   // control the size of the code generated.
@@ -696,18 +936,6 @@
 }
 
 
-void Assembler::ConstantPoolGuard() {
-#ifdef DEBUG
-  // Currently this is only used after a constant pool marker.
-  ASSERT(is_const_pool_blocked());
-  Instruction* instr = reinterpret_cast<Instruction*>(pc_);
-  ASSERT(instr->preceding()->IsLdrLiteralX() &&
-         instr->preceding()->Rt() == xzr.code());
-#endif
-  EmitPoolGuard();
-}
-
-
 void Assembler::StartBlockVeneerPool() {
   ++veneer_pool_blocked_nesting_;
 }
@@ -716,7 +944,7 @@
 void Assembler::EndBlockVeneerPool() {
   if (--veneer_pool_blocked_nesting_ == 0) {
     // Check the veneer pool hasn't been blocked for too long.
-    ASSERT(unresolved_branches_.empty() ||
+    DCHECK(unresolved_branches_.empty() ||
            (pc_offset() < unresolved_branches_first_limit()));
   }
 }
@@ -724,24 +952,24 @@
 
 void Assembler::br(const Register& xn) {
   positions_recorder()->WriteRecordedPositions();
-  ASSERT(xn.Is64Bits());
+  DCHECK(xn.Is64Bits());
   Emit(BR | Rn(xn));
 }
 
 
 void Assembler::blr(const Register& xn) {
   positions_recorder()->WriteRecordedPositions();
-  ASSERT(xn.Is64Bits());
+  DCHECK(xn.Is64Bits());
   // The pattern 'blr xzr' is used as a guard to detect when execution falls
   // through the constant pool. It should not be emitted.
-  ASSERT(!xn.Is(xzr));
+  DCHECK(!xn.Is(xzr));
   Emit(BLR | Rn(xn));
 }
 
 
 void Assembler::ret(const Register& xn) {
   positions_recorder()->WriteRecordedPositions();
-  ASSERT(xn.Is64Bits());
+  DCHECK(xn.Is64Bits());
   Emit(RET | Rn(xn));
 }
 
@@ -812,7 +1040,7 @@
                     unsigned bit_pos,
                     int imm14) {
   positions_recorder()->WriteRecordedPositions();
-  ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSizeInBits)));
+  DCHECK(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSizeInBits)));
   Emit(TBZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
 }
 
@@ -829,7 +1057,7 @@
                      unsigned bit_pos,
                      int imm14) {
   positions_recorder()->WriteRecordedPositions();
-  ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSizeInBits)));
+  DCHECK(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSizeInBits)));
   Emit(TBNZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt));
 }
 
@@ -843,7 +1071,7 @@
 
 
 void Assembler::adr(const Register& rd, int imm21) {
-  ASSERT(rd.Is64Bits());
+  DCHECK(rd.Is64Bits());
   Emit(ADR | ImmPCRelAddress(imm21) | Rd(rd));
 }
 
@@ -1012,8 +1240,8 @@
 void Assembler::lslv(const Register& rd,
                      const Register& rn,
                      const Register& rm) {
-  ASSERT(rd.SizeInBits() == rn.SizeInBits());
-  ASSERT(rd.SizeInBits() == rm.SizeInBits());
+  DCHECK(rd.SizeInBits() == rn.SizeInBits());
+  DCHECK(rd.SizeInBits() == rm.SizeInBits());
   Emit(SF(rd) | LSLV | Rm(rm) | Rn(rn) | Rd(rd));
 }
 
@@ -1021,8 +1249,8 @@
 void Assembler::lsrv(const Register& rd,
                      const Register& rn,
                      const Register& rm) {
-  ASSERT(rd.SizeInBits() == rn.SizeInBits());
-  ASSERT(rd.SizeInBits() == rm.SizeInBits());
+  DCHECK(rd.SizeInBits() == rn.SizeInBits());
+  DCHECK(rd.SizeInBits() == rm.SizeInBits());
   Emit(SF(rd) | LSRV | Rm(rm) | Rn(rn) | Rd(rd));
 }
 
@@ -1030,8 +1258,8 @@
 void Assembler::asrv(const Register& rd,
                      const Register& rn,
                      const Register& rm) {
-  ASSERT(rd.SizeInBits() == rn.SizeInBits());
-  ASSERT(rd.SizeInBits() == rm.SizeInBits());
+  DCHECK(rd.SizeInBits() == rn.SizeInBits());
+  DCHECK(rd.SizeInBits() == rm.SizeInBits());
   Emit(SF(rd) | ASRV | Rm(rm) | Rn(rn) | Rd(rd));
 }
 
@@ -1039,8 +1267,8 @@
 void Assembler::rorv(const Register& rd,
                      const Register& rn,
                      const Register& rm) {
-  ASSERT(rd.SizeInBits() == rn.SizeInBits());
-  ASSERT(rd.SizeInBits() == rm.SizeInBits());
+  DCHECK(rd.SizeInBits() == rn.SizeInBits());
+  DCHECK(rd.SizeInBits() == rm.SizeInBits());
   Emit(SF(rd) | RORV | Rm(rm) | Rn(rn) | Rd(rd));
 }
 
@@ -1050,7 +1278,7 @@
                      const Register& rn,
                      unsigned immr,
                      unsigned imms) {
-  ASSERT(rd.SizeInBits() == rn.SizeInBits());
+  DCHECK(rd.SizeInBits() == rn.SizeInBits());
   Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
   Emit(SF(rd) | BFM | N |
        ImmR(immr, rd.SizeInBits()) |
@@ -1063,7 +1291,7 @@
                      const Register& rn,
                      unsigned immr,
                      unsigned imms) {
-  ASSERT(rd.Is64Bits() || rn.Is32Bits());
+  DCHECK(rd.Is64Bits() || rn.Is32Bits());
   Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
   Emit(SF(rd) | SBFM | N |
        ImmR(immr, rd.SizeInBits()) |
@@ -1076,7 +1304,7 @@
                      const Register& rn,
                      unsigned immr,
                      unsigned imms) {
-  ASSERT(rd.SizeInBits() == rn.SizeInBits());
+  DCHECK(rd.SizeInBits() == rn.SizeInBits());
   Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
   Emit(SF(rd) | UBFM | N |
        ImmR(immr, rd.SizeInBits()) |
@@ -1089,8 +1317,8 @@
                      const Register& rn,
                      const Register& rm,
                      unsigned lsb) {
-  ASSERT(rd.SizeInBits() == rn.SizeInBits());
-  ASSERT(rd.SizeInBits() == rm.SizeInBits());
+  DCHECK(rd.SizeInBits() == rn.SizeInBits());
+  DCHECK(rd.SizeInBits() == rm.SizeInBits());
   Instr N = SF(rd) >> (kSFOffset - kBitfieldNOffset);
   Emit(SF(rd) | EXTR | N | Rm(rm) |
        ImmS(lsb, rn.SizeInBits()) | Rn(rn) | Rd(rd));
@@ -1130,33 +1358,33 @@
 
 
 void Assembler::cset(const Register &rd, Condition cond) {
-  ASSERT((cond != al) && (cond != nv));
+  DCHECK((cond != al) && (cond != nv));
   Register zr = AppropriateZeroRegFor(rd);
   csinc(rd, zr, zr, NegateCondition(cond));
 }
 
 
 void Assembler::csetm(const Register &rd, Condition cond) {
-  ASSERT((cond != al) && (cond != nv));
+  DCHECK((cond != al) && (cond != nv));
   Register zr = AppropriateZeroRegFor(rd);
   csinv(rd, zr, zr, NegateCondition(cond));
 }
 
 
 void Assembler::cinc(const Register &rd, const Register &rn, Condition cond) {
-  ASSERT((cond != al) && (cond != nv));
+  DCHECK((cond != al) && (cond != nv));
   csinc(rd, rn, rn, NegateCondition(cond));
 }
 
 
 void Assembler::cinv(const Register &rd, const Register &rn, Condition cond) {
-  ASSERT((cond != al) && (cond != nv));
+  DCHECK((cond != al) && (cond != nv));
   csinv(rd, rn, rn, NegateCondition(cond));
 }
 
 
 void Assembler::cneg(const Register &rd, const Register &rn, Condition cond) {
-  ASSERT((cond != al) && (cond != nv));
+  DCHECK((cond != al) && (cond != nv));
   csneg(rd, rn, rn, NegateCondition(cond));
 }
 
@@ -1166,8 +1394,8 @@
                                   const Register& rm,
                                   Condition cond,
                                   ConditionalSelectOp op) {
-  ASSERT(rd.SizeInBits() == rn.SizeInBits());
-  ASSERT(rd.SizeInBits() == rm.SizeInBits());
+  DCHECK(rd.SizeInBits() == rn.SizeInBits());
+  DCHECK(rd.SizeInBits() == rm.SizeInBits());
   Emit(SF(rd) | op | Rm(rm) | Cond(cond) | Rn(rn) | Rd(rd));
 }
 
@@ -1200,7 +1428,7 @@
 void Assembler::mul(const Register& rd,
                     const Register& rn,
                     const Register& rm) {
-  ASSERT(AreSameSizeAndType(rd, rn, rm));
+  DCHECK(AreSameSizeAndType(rd, rn, rm));
   Register zr = AppropriateZeroRegFor(rn);
   DataProcessing3Source(rd, rn, rm, zr, MADD);
 }
@@ -1210,7 +1438,7 @@
                      const Register& rn,
                      const Register& rm,
                      const Register& ra) {
-  ASSERT(AreSameSizeAndType(rd, rn, rm, ra));
+  DCHECK(AreSameSizeAndType(rd, rn, rm, ra));
   DataProcessing3Source(rd, rn, rm, ra, MADD);
 }
 
@@ -1218,7 +1446,7 @@
 void Assembler::mneg(const Register& rd,
                      const Register& rn,
                      const Register& rm) {
-  ASSERT(AreSameSizeAndType(rd, rn, rm));
+  DCHECK(AreSameSizeAndType(rd, rn, rm));
   Register zr = AppropriateZeroRegFor(rn);
   DataProcessing3Source(rd, rn, rm, zr, MSUB);
 }
@@ -1228,7 +1456,7 @@
                      const Register& rn,
                      const Register& rm,
                      const Register& ra) {
-  ASSERT(AreSameSizeAndType(rd, rn, rm, ra));
+  DCHECK(AreSameSizeAndType(rd, rn, rm, ra));
   DataProcessing3Source(rd, rn, rm, ra, MSUB);
 }
 
@@ -1237,8 +1465,8 @@
                        const Register& rn,
                        const Register& rm,
                        const Register& ra) {
-  ASSERT(rd.Is64Bits() && ra.Is64Bits());
-  ASSERT(rn.Is32Bits() && rm.Is32Bits());
+  DCHECK(rd.Is64Bits() && ra.Is64Bits());
+  DCHECK(rn.Is32Bits() && rm.Is32Bits());
   DataProcessing3Source(rd, rn, rm, ra, SMADDL_x);
 }
 
@@ -1247,8 +1475,8 @@
                        const Register& rn,
                        const Register& rm,
                        const Register& ra) {
-  ASSERT(rd.Is64Bits() && ra.Is64Bits());
-  ASSERT(rn.Is32Bits() && rm.Is32Bits());
+  DCHECK(rd.Is64Bits() && ra.Is64Bits());
+  DCHECK(rn.Is32Bits() && rm.Is32Bits());
   DataProcessing3Source(rd, rn, rm, ra, SMSUBL_x);
 }
 
@@ -1257,8 +1485,8 @@
                        const Register& rn,
                        const Register& rm,
                        const Register& ra) {
-  ASSERT(rd.Is64Bits() && ra.Is64Bits());
-  ASSERT(rn.Is32Bits() && rm.Is32Bits());
+  DCHECK(rd.Is64Bits() && ra.Is64Bits());
+  DCHECK(rn.Is32Bits() && rm.Is32Bits());
   DataProcessing3Source(rd, rn, rm, ra, UMADDL_x);
 }
 
@@ -1267,8 +1495,8 @@
                        const Register& rn,
                        const Register& rm,
                        const Register& ra) {
-  ASSERT(rd.Is64Bits() && ra.Is64Bits());
-  ASSERT(rn.Is32Bits() && rm.Is32Bits());
+  DCHECK(rd.Is64Bits() && ra.Is64Bits());
+  DCHECK(rn.Is32Bits() && rm.Is32Bits());
   DataProcessing3Source(rd, rn, rm, ra, UMSUBL_x);
 }
 
@@ -1276,8 +1504,8 @@
 void Assembler::smull(const Register& rd,
                       const Register& rn,
                       const Register& rm) {
-  ASSERT(rd.Is64Bits());
-  ASSERT(rn.Is32Bits() && rm.Is32Bits());
+  DCHECK(rd.Is64Bits());
+  DCHECK(rn.Is32Bits() && rm.Is32Bits());
   DataProcessing3Source(rd, rn, rm, xzr, SMADDL_x);
 }
 
@@ -1285,7 +1513,7 @@
 void Assembler::smulh(const Register& rd,
                       const Register& rn,
                       const Register& rm) {
-  ASSERT(AreSameSizeAndType(rd, rn, rm));
+  DCHECK(AreSameSizeAndType(rd, rn, rm));
   DataProcessing3Source(rd, rn, rm, xzr, SMULH_x);
 }
 
@@ -1293,8 +1521,8 @@
 void Assembler::sdiv(const Register& rd,
                      const Register& rn,
                      const Register& rm) {
-  ASSERT(rd.SizeInBits() == rn.SizeInBits());
-  ASSERT(rd.SizeInBits() == rm.SizeInBits());
+  DCHECK(rd.SizeInBits() == rn.SizeInBits());
+  DCHECK(rd.SizeInBits() == rm.SizeInBits());
   Emit(SF(rd) | SDIV | Rm(rm) | Rn(rn) | Rd(rd));
 }
 
@@ -1302,8 +1530,8 @@
 void Assembler::udiv(const Register& rd,
                      const Register& rn,
                      const Register& rm) {
-  ASSERT(rd.SizeInBits() == rn.SizeInBits());
-  ASSERT(rd.SizeInBits() == rm.SizeInBits());
+  DCHECK(rd.SizeInBits() == rn.SizeInBits());
+  DCHECK(rd.SizeInBits() == rm.SizeInBits());
   Emit(SF(rd) | UDIV | Rm(rm) | Rn(rn) | Rd(rd));
 }
 
@@ -1322,7 +1550,7 @@
 
 void Assembler::rev32(const Register& rd,
                       const Register& rn) {
-  ASSERT(rd.Is64Bits());
+  DCHECK(rd.Is64Bits());
   DataProcessing1Source(rd, rn, REV);
 }
 
@@ -1362,7 +1590,7 @@
 void Assembler::ldpsw(const Register& rt,
                       const Register& rt2,
                       const MemOperand& src) {
-  ASSERT(rt.Is64Bits());
+  DCHECK(rt.Is64Bits());
   LoadStorePair(rt, rt2, src, LDPSW_x);
 }
 
@@ -1372,8 +1600,8 @@
                               const MemOperand& addr,
                               LoadStorePairOp op) {
   // 'rt' and 'rt2' can only be aliased for stores.
-  ASSERT(((op & LoadStorePairLBit) == 0) || !rt.Is(rt2));
-  ASSERT(AreSameSizeAndType(rt, rt2));
+  DCHECK(((op & LoadStorePairLBit) == 0) || !rt.Is(rt2));
+  DCHECK(AreSameSizeAndType(rt, rt2));
 
   Instr memop = op | Rt(rt) | Rt2(rt2) | RnSP(addr.base()) |
                 ImmLSPair(addr.offset(), CalcLSPairDataSize(op));
@@ -1383,13 +1611,13 @@
     addrmodeop = LoadStorePairOffsetFixed;
   } else {
     // Pre-index and post-index modes.
-    ASSERT(!rt.Is(addr.base()));
-    ASSERT(!rt2.Is(addr.base()));
-    ASSERT(addr.offset() != 0);
+    DCHECK(!rt.Is(addr.base()));
+    DCHECK(!rt2.Is(addr.base()));
+    DCHECK(addr.offset() != 0);
     if (addr.IsPreIndex()) {
       addrmodeop = LoadStorePairPreIndexFixed;
     } else {
-      ASSERT(addr.IsPostIndex());
+      DCHECK(addr.IsPostIndex());
       addrmodeop = LoadStorePairPostIndexFixed;
     }
   }
@@ -1417,9 +1645,9 @@
                                          const CPURegister& rt2,
                                          const MemOperand& addr,
                                          LoadStorePairNonTemporalOp op) {
-  ASSERT(!rt.Is(rt2));
-  ASSERT(AreSameSizeAndType(rt, rt2));
-  ASSERT(addr.IsImmediateOffset());
+  DCHECK(!rt.Is(rt2));
+  DCHECK(AreSameSizeAndType(rt, rt2));
+  DCHECK(addr.IsImmediateOffset());
 
   LSDataSize size = CalcLSPairDataSize(
     static_cast<LoadStorePairOp>(op & LoadStorePairMask));
@@ -1470,7 +1698,7 @@
 
 
 void Assembler::ldrsw(const Register& rt, const MemOperand& src) {
-  ASSERT(rt.Is64Bits());
+  DCHECK(rt.Is64Bits());
   LoadStore(rt, src, LDRSW_x);
 }
 
@@ -1478,14 +1706,14 @@
 void Assembler::ldr_pcrel(const CPURegister& rt, int imm19) {
   // The pattern 'ldr xzr, #offset' is used to indicate the beginning of a
   // constant pool. It should not be emitted.
-  ASSERT(!rt.IsZero());
+  DCHECK(!rt.IsZero());
   Emit(LoadLiteralOpFor(rt) | ImmLLiteral(imm19) | Rt(rt));
 }
 
 
 void Assembler::ldr(const CPURegister& rt, const Immediate& imm) {
   // Currently we only support 64-bit literals.
-  ASSERT(rt.Is64Bits());
+  DCHECK(rt.Is64Bits());
 
   RecordRelocInfo(imm.rmode(), imm.value());
   BlockConstPoolFor(1);
@@ -1513,13 +1741,13 @@
 
 
 void Assembler::mrs(const Register& rt, SystemRegister sysreg) {
-  ASSERT(rt.Is64Bits());
+  DCHECK(rt.Is64Bits());
   Emit(MRS | ImmSystemRegister(sysreg) | Rt(rt));
 }
 
 
 void Assembler::msr(SystemRegister sysreg, const Register& rt) {
-  ASSERT(rt.Is64Bits());
+  DCHECK(rt.Is64Bits());
   Emit(MSR | Rt(rt) | ImmSystemRegister(sysreg));
 }
 
@@ -1545,35 +1773,35 @@
 
 
 void Assembler::fmov(FPRegister fd, double imm) {
-  ASSERT(fd.Is64Bits());
-  ASSERT(IsImmFP64(imm));
+  DCHECK(fd.Is64Bits());
+  DCHECK(IsImmFP64(imm));
   Emit(FMOV_d_imm | Rd(fd) | ImmFP64(imm));
 }
 
 
 void Assembler::fmov(FPRegister fd, float imm) {
-  ASSERT(fd.Is32Bits());
-  ASSERT(IsImmFP32(imm));
+  DCHECK(fd.Is32Bits());
+  DCHECK(IsImmFP32(imm));
   Emit(FMOV_s_imm | Rd(fd) | ImmFP32(imm));
 }
 
 
 void Assembler::fmov(Register rd, FPRegister fn) {
-  ASSERT(rd.SizeInBits() == fn.SizeInBits());
+  DCHECK(rd.SizeInBits() == fn.SizeInBits());
   FPIntegerConvertOp op = rd.Is32Bits() ? FMOV_ws : FMOV_xd;
   Emit(op | Rd(rd) | Rn(fn));
 }
 
 
 void Assembler::fmov(FPRegister fd, Register rn) {
-  ASSERT(fd.SizeInBits() == rn.SizeInBits());
+  DCHECK(fd.SizeInBits() == rn.SizeInBits());
   FPIntegerConvertOp op = fd.Is32Bits() ? FMOV_sw : FMOV_dx;
   Emit(op | Rd(fd) | Rn(rn));
 }
 
 
 void Assembler::fmov(FPRegister fd, FPRegister fn) {
-  ASSERT(fd.SizeInBits() == fn.SizeInBits());
+  DCHECK(fd.SizeInBits() == fn.SizeInBits());
   Emit(FPType(fd) | FMOV | Rd(fd) | Rn(fn));
 }
 
@@ -1668,56 +1896,56 @@
 
 void Assembler::fabs(const FPRegister& fd,
                      const FPRegister& fn) {
-  ASSERT(fd.SizeInBits() == fn.SizeInBits());
+  DCHECK(fd.SizeInBits() == fn.SizeInBits());
   FPDataProcessing1Source(fd, fn, FABS);
 }
 
 
 void Assembler::fneg(const FPRegister& fd,
                      const FPRegister& fn) {
-  ASSERT(fd.SizeInBits() == fn.SizeInBits());
+  DCHECK(fd.SizeInBits() == fn.SizeInBits());
   FPDataProcessing1Source(fd, fn, FNEG);
 }
 
 
 void Assembler::fsqrt(const FPRegister& fd,
                       const FPRegister& fn) {
-  ASSERT(fd.SizeInBits() == fn.SizeInBits());
+  DCHECK(fd.SizeInBits() == fn.SizeInBits());
   FPDataProcessing1Source(fd, fn, FSQRT);
 }
 
 
 void Assembler::frinta(const FPRegister& fd,
                        const FPRegister& fn) {
-  ASSERT(fd.SizeInBits() == fn.SizeInBits());
+  DCHECK(fd.SizeInBits() == fn.SizeInBits());
   FPDataProcessing1Source(fd, fn, FRINTA);
 }
 
 
 void Assembler::frintm(const FPRegister& fd,
                        const FPRegister& fn) {
-  ASSERT(fd.SizeInBits() == fn.SizeInBits());
+  DCHECK(fd.SizeInBits() == fn.SizeInBits());
   FPDataProcessing1Source(fd, fn, FRINTM);
 }
 
 
 void Assembler::frintn(const FPRegister& fd,
                        const FPRegister& fn) {
-  ASSERT(fd.SizeInBits() == fn.SizeInBits());
+  DCHECK(fd.SizeInBits() == fn.SizeInBits());
   FPDataProcessing1Source(fd, fn, FRINTN);
 }
 
 
 void Assembler::frintz(const FPRegister& fd,
                        const FPRegister& fn) {
-  ASSERT(fd.SizeInBits() == fn.SizeInBits());
+  DCHECK(fd.SizeInBits() == fn.SizeInBits());
   FPDataProcessing1Source(fd, fn, FRINTZ);
 }
 
 
 void Assembler::fcmp(const FPRegister& fn,
                      const FPRegister& fm) {
-  ASSERT(fn.SizeInBits() == fm.SizeInBits());
+  DCHECK(fn.SizeInBits() == fm.SizeInBits());
   Emit(FPType(fn) | FCMP | Rm(fm) | Rn(fn));
 }
 
@@ -1728,7 +1956,7 @@
   // Although the fcmp instruction can strictly only take an immediate value of
   // +0.0, we don't need to check for -0.0 because the sign of 0.0 doesn't
   // affect the result of the comparison.
-  ASSERT(value == 0.0);
+  DCHECK(value == 0.0);
   Emit(FPType(fn) | FCMP_zero | Rn(fn));
 }
 
@@ -1737,7 +1965,7 @@
                       const FPRegister& fm,
                       StatusFlags nzcv,
                       Condition cond) {
-  ASSERT(fn.SizeInBits() == fm.SizeInBits());
+  DCHECK(fn.SizeInBits() == fm.SizeInBits());
   Emit(FPType(fn) | FCCMP | Rm(fm) | Cond(cond) | Rn(fn) | Nzcv(nzcv));
 }
 
@@ -1746,8 +1974,8 @@
                       const FPRegister& fn,
                       const FPRegister& fm,
                       Condition cond) {
-  ASSERT(fd.SizeInBits() == fn.SizeInBits());
-  ASSERT(fd.SizeInBits() == fm.SizeInBits());
+  DCHECK(fd.SizeInBits() == fn.SizeInBits());
+  DCHECK(fd.SizeInBits() == fm.SizeInBits());
   Emit(FPType(fd) | FCSEL | Rm(fm) | Cond(cond) | Rn(fn) | Rd(fd));
 }
 
@@ -1763,11 +1991,11 @@
                      const FPRegister& fn) {
   if (fd.Is64Bits()) {
     // Convert float to double.
-    ASSERT(fn.Is32Bits());
+    DCHECK(fn.Is32Bits());
     FPDataProcessing1Source(fd, fn, FCVT_ds);
   } else {
     // Convert double to float.
-    ASSERT(fn.Is64Bits());
+    DCHECK(fn.Is64Bits());
     FPDataProcessing1Source(fd, fn, FCVT_sd);
   }
 }
@@ -1842,7 +2070,7 @@
 // negated bit.
 // If b is 1, then B is 0.
 Instr Assembler::ImmFP32(float imm) {
-  ASSERT(IsImmFP32(imm));
+  DCHECK(IsImmFP32(imm));
   // bits: aBbb.bbbc.defg.h000.0000.0000.0000.0000
   uint32_t bits = float_to_rawbits(imm);
   // bit7: a000.0000
@@ -1857,7 +2085,7 @@
 
 
 Instr Assembler::ImmFP64(double imm) {
-  ASSERT(IsImmFP64(imm));
+  DCHECK(IsImmFP64(imm));
   // bits: aBbb.bbbb.bbcd.efgh.0000.0000.0000.0000
   //       0000.0000.0000.0000.0000.0000.0000.0000
   uint64_t bits = double_to_rawbits(imm);
@@ -1877,10 +2105,19 @@
                          uint64_t imm,
                          int shift,
                          MoveWideImmediateOp mov_op) {
+  // Ignore the top 32 bits of an immediate if we're moving to a W register.
+  if (rd.Is32Bits()) {
+    // Check that the top 32 bits are zero (a positive 32-bit number) or top
+    // 33 bits are one (a negative 32-bit number, sign extended to 64 bits).
+    DCHECK(((imm >> kWRegSizeInBits) == 0) ||
+           ((imm >> (kWRegSizeInBits - 1)) == 0x1ffffffff));
+    imm &= kWRegMask;
+  }
+
   if (shift >= 0) {
     // Explicit shift specified.
-    ASSERT((shift == 0) || (shift == 16) || (shift == 32) || (shift == 48));
-    ASSERT(rd.Is64Bits() || (shift == 0) || (shift == 16));
+    DCHECK((shift == 0) || (shift == 16) || (shift == 32) || (shift == 48));
+    DCHECK(rd.Is64Bits() || (shift == 0) || (shift == 16));
     shift /= 16;
   } else {
     // Calculate a new immediate and shift combination to encode the immediate
@@ -1892,17 +2129,17 @@
       imm >>= 16;
       shift = 1;
     } else if ((imm & ~(0xffffUL << 32)) == 0) {
-      ASSERT(rd.Is64Bits());
+      DCHECK(rd.Is64Bits());
       imm >>= 32;
       shift = 2;
     } else if ((imm & ~(0xffffUL << 48)) == 0) {
-      ASSERT(rd.Is64Bits());
+      DCHECK(rd.Is64Bits());
       imm >>= 48;
       shift = 3;
     }
   }
 
-  ASSERT(is_uint16(imm));
+  DCHECK(is_uint16(imm));
 
   Emit(SF(rd) | MoveWideImmediateFixed | mov_op |
        Rd(rd) | ImmMoveWide(imm) | ShiftMoveWide(shift));
@@ -1914,17 +2151,17 @@
                        const Operand& operand,
                        FlagsUpdate S,
                        AddSubOp op) {
-  ASSERT(rd.SizeInBits() == rn.SizeInBits());
-  ASSERT(!operand.NeedsRelocation(this));
+  DCHECK(rd.SizeInBits() == rn.SizeInBits());
+  DCHECK(!operand.NeedsRelocation(this));
   if (operand.IsImmediate()) {
     int64_t immediate = operand.ImmediateValue();
-    ASSERT(IsImmAddSub(immediate));
+    DCHECK(IsImmAddSub(immediate));
     Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd);
     Emit(SF(rd) | AddSubImmediateFixed | op | Flags(S) |
          ImmAddSub(immediate) | dest_reg | RnSP(rn));
   } else if (operand.IsShiftedRegister()) {
-    ASSERT(operand.reg().SizeInBits() == rd.SizeInBits());
-    ASSERT(operand.shift() != ROR);
+    DCHECK(operand.reg().SizeInBits() == rd.SizeInBits());
+    DCHECK(operand.shift() != ROR);
 
     // For instructions of the form:
     //   add/sub   wsp, <Wn>, <Wm> [, LSL #0-3 ]
@@ -1934,14 +2171,14 @@
     // or their 64-bit register equivalents, convert the operand from shifted to
     // extended register mode, and emit an add/sub extended instruction.
     if (rn.IsSP() || rd.IsSP()) {
-      ASSERT(!(rd.IsSP() && (S == SetFlags)));
+      DCHECK(!(rd.IsSP() && (S == SetFlags)));
       DataProcExtendedRegister(rd, rn, operand.ToExtendedRegister(), S,
                                AddSubExtendedFixed | op);
     } else {
       DataProcShiftedRegister(rd, rn, operand, S, AddSubShiftedFixed | op);
     }
   } else {
-    ASSERT(operand.IsExtendedRegister());
+    DCHECK(operand.IsExtendedRegister());
     DataProcExtendedRegister(rd, rn, operand, S, AddSubExtendedFixed | op);
   }
 }
@@ -1952,26 +2189,37 @@
                                 const Operand& operand,
                                 FlagsUpdate S,
                                 AddSubWithCarryOp op) {
-  ASSERT(rd.SizeInBits() == rn.SizeInBits());
-  ASSERT(rd.SizeInBits() == operand.reg().SizeInBits());
-  ASSERT(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
-  ASSERT(!operand.NeedsRelocation(this));
+  DCHECK(rd.SizeInBits() == rn.SizeInBits());
+  DCHECK(rd.SizeInBits() == operand.reg().SizeInBits());
+  DCHECK(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
+  DCHECK(!operand.NeedsRelocation(this));
   Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) | Rn(rn) | Rd(rd));
 }
 
 
 void Assembler::hlt(int code) {
-  ASSERT(is_uint16(code));
+  DCHECK(is_uint16(code));
   Emit(HLT | ImmException(code));
 }
 
 
 void Assembler::brk(int code) {
-  ASSERT(is_uint16(code));
+  DCHECK(is_uint16(code));
   Emit(BRK | ImmException(code));
 }
 
 
+void Assembler::EmitStringData(const char* string) {
+  size_t len = strlen(string) + 1;
+  DCHECK(RoundUp(len, kInstructionSize) <= static_cast<size_t>(kGap));
+  EmitData(string, len);
+  // Pad with NULL characters until pc_ is aligned.
+  const char pad[] = {'\0', '\0', '\0', '\0'};
+  STATIC_ASSERT(sizeof(pad) == kInstructionSize);
+  EmitData(pad, RoundUp(pc_offset(), kInstructionSize) - pc_offset());
+}
+
+
 void Assembler::debug(const char* message, uint32_t code, Instr params) {
 #ifdef USE_SIMULATOR
   // Don't generate simulator specific code if we are building a snapshot, which
@@ -1987,11 +2235,11 @@
     // Refer to instructions-arm64.h for a description of the marker and its
     // arguments.
     hlt(kImmExceptionIsDebug);
-    ASSERT(SizeOfCodeGeneratedSince(&start) == kDebugCodeOffset);
+    DCHECK(SizeOfCodeGeneratedSince(&start) == kDebugCodeOffset);
     dc32(code);
-    ASSERT(SizeOfCodeGeneratedSince(&start) == kDebugParamsOffset);
+    DCHECK(SizeOfCodeGeneratedSince(&start) == kDebugParamsOffset);
     dc32(params);
-    ASSERT(SizeOfCodeGeneratedSince(&start) == kDebugMessageOffset);
+    DCHECK(SizeOfCodeGeneratedSince(&start) == kDebugMessageOffset);
     EmitStringData(message);
     hlt(kImmExceptionIsUnreachable);
 
@@ -2010,15 +2258,15 @@
                         const Register& rn,
                         const Operand& operand,
                         LogicalOp op) {
-  ASSERT(rd.SizeInBits() == rn.SizeInBits());
-  ASSERT(!operand.NeedsRelocation(this));
+  DCHECK(rd.SizeInBits() == rn.SizeInBits());
+  DCHECK(!operand.NeedsRelocation(this));
   if (operand.IsImmediate()) {
     int64_t immediate = operand.ImmediateValue();
     unsigned reg_size = rd.SizeInBits();
 
-    ASSERT(immediate != 0);
-    ASSERT(immediate != -1);
-    ASSERT(rd.Is64Bits() || is_uint32(immediate));
+    DCHECK(immediate != 0);
+    DCHECK(immediate != -1);
+    DCHECK(rd.Is64Bits() || is_uint32(immediate));
 
     // If the operation is NOT, invert the operation and immediate.
     if ((op & NOT) == NOT) {
@@ -2035,8 +2283,8 @@
       UNREACHABLE();
     }
   } else {
-    ASSERT(operand.IsShiftedRegister());
-    ASSERT(operand.reg().SizeInBits() == rd.SizeInBits());
+    DCHECK(operand.IsShiftedRegister());
+    DCHECK(operand.reg().SizeInBits() == rd.SizeInBits());
     Instr dp_op = static_cast<Instr>(op | LogicalShiftedFixed);
     DataProcShiftedRegister(rd, rn, operand, LeaveFlags, dp_op);
   }
@@ -2063,13 +2311,13 @@
                                    Condition cond,
                                    ConditionalCompareOp op) {
   Instr ccmpop;
-  ASSERT(!operand.NeedsRelocation(this));
+  DCHECK(!operand.NeedsRelocation(this));
   if (operand.IsImmediate()) {
     int64_t immediate = operand.ImmediateValue();
-    ASSERT(IsImmConditionalCompare(immediate));
+    DCHECK(IsImmConditionalCompare(immediate));
     ccmpop = ConditionalCompareImmediateFixed | op | ImmCondCmp(immediate);
   } else {
-    ASSERT(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
+    DCHECK(operand.IsShiftedRegister() && (operand.shift_amount() == 0));
     ccmpop = ConditionalCompareRegisterFixed | op | Rm(operand.reg());
   }
   Emit(SF(rn) | ccmpop | Cond(cond) | Rn(rn) | Nzcv(nzcv));
@@ -2079,7 +2327,7 @@
 void Assembler::DataProcessing1Source(const Register& rd,
                                       const Register& rn,
                                       DataProcessing1SourceOp op) {
-  ASSERT(rd.SizeInBits() == rn.SizeInBits());
+  DCHECK(rd.SizeInBits() == rn.SizeInBits());
   Emit(SF(rn) | op | Rn(rn) | Rd(rd));
 }
 
@@ -2095,8 +2343,8 @@
                                         const FPRegister& fn,
                                         const FPRegister& fm,
                                         FPDataProcessing2SourceOp op) {
-  ASSERT(fd.SizeInBits() == fn.SizeInBits());
-  ASSERT(fd.SizeInBits() == fm.SizeInBits());
+  DCHECK(fd.SizeInBits() == fn.SizeInBits());
+  DCHECK(fd.SizeInBits() == fm.SizeInBits());
   Emit(FPType(fd) | op | Rm(fm) | Rn(fn) | Rd(fd));
 }
 
@@ -2106,7 +2354,7 @@
                                         const FPRegister& fm,
                                         const FPRegister& fa,
                                         FPDataProcessing3SourceOp op) {
-  ASSERT(AreSameSizeAndType(fd, fn, fm, fa));
+  DCHECK(AreSameSizeAndType(fd, fn, fm, fa));
   Emit(FPType(fd) | op | Rm(fm) | Rn(fn) | Rd(fd) | Ra(fa));
 }
 
@@ -2138,7 +2386,7 @@
                                 const Register& rn,
                                 Extend extend,
                                 unsigned left_shift) {
-  ASSERT(rd.SizeInBits() >= rn.SizeInBits());
+  DCHECK(rd.SizeInBits() >= rn.SizeInBits());
   unsigned reg_size = rd.SizeInBits();
   // Use the correct size of register.
   Register rn_ = Register::Create(rn.code(), rd.SizeInBits());
@@ -2157,7 +2405,7 @@
       case SXTW: sbfm(rd, rn_, non_shift_bits, high_bit); break;
       case UXTX:
       case SXTX: {
-        ASSERT(rn.SizeInBits() == kXRegSizeInBits);
+        DCHECK(rn.SizeInBits() == kXRegSizeInBits);
         // Nothing to extend. Just shift.
         lsl(rd, rn_, left_shift);
         break;
@@ -2176,9 +2424,9 @@
                                         const Operand& operand,
                                         FlagsUpdate S,
                                         Instr op) {
-  ASSERT(operand.IsShiftedRegister());
-  ASSERT(rn.Is64Bits() || (rn.Is32Bits() && is_uint5(operand.shift_amount())));
-  ASSERT(!operand.NeedsRelocation(this));
+  DCHECK(operand.IsShiftedRegister());
+  DCHECK(rn.Is64Bits() || (rn.Is32Bits() && is_uint5(operand.shift_amount())));
+  DCHECK(!operand.NeedsRelocation(this));
   Emit(SF(rd) | op | Flags(S) |
        ShiftDP(operand.shift()) | ImmDPShift(operand.shift_amount()) |
        Rm(operand.reg()) | Rn(rn) | Rd(rd));
@@ -2190,7 +2438,7 @@
                                          const Operand& operand,
                                          FlagsUpdate S,
                                          Instr op) {
-  ASSERT(!operand.NeedsRelocation(this));
+  DCHECK(!operand.NeedsRelocation(this));
   Instr dest_reg = (S == SetFlags) ? Rd(rd) : RdSP(rd);
   Emit(SF(rd) | op | Flags(S) | Rm(operand.reg()) |
        ExtendMode(operand.extend()) | ImmExtendShift(operand.shift_amount()) |
@@ -2207,7 +2455,7 @@
                           const MemOperand& addr,
                           LoadStoreOp op) {
   Instr memop = op | Rt(rt) | RnSP(addr.base());
-  ptrdiff_t offset = addr.offset();
+  int64_t offset = addr.offset();
 
   if (addr.IsImmediateOffset()) {
     LSDataSize size = CalcLSDataSize(op);
@@ -2234,18 +2482,18 @@
 
     // Shifts are encoded in one bit, indicating a left shift by the memory
     // access size.
-    ASSERT((shift_amount == 0) ||
+    DCHECK((shift_amount == 0) ||
            (shift_amount == static_cast<unsigned>(CalcLSDataSize(op))));
     Emit(LoadStoreRegisterOffsetFixed | memop | Rm(addr.regoffset()) |
          ExtendMode(ext) | ImmShiftLS((shift_amount > 0) ? 1 : 0));
   } else {
     // Pre-index and post-index modes.
-    ASSERT(!rt.Is(addr.base()));
+    DCHECK(!rt.Is(addr.base()));
     if (IsImmLSUnscaled(offset)) {
       if (addr.IsPreIndex()) {
         Emit(LoadStorePreIndexFixed | memop | ImmLS(offset));
       } else {
-        ASSERT(addr.IsPostIndex());
+        DCHECK(addr.IsPostIndex());
         Emit(LoadStorePostIndexFixed | memop | ImmLS(offset));
       }
     } else {
@@ -2256,17 +2504,23 @@
 }
 
 
-bool Assembler::IsImmLSUnscaled(ptrdiff_t offset) {
+bool Assembler::IsImmLSUnscaled(int64_t offset) {
   return is_int9(offset);
 }
 
 
-bool Assembler::IsImmLSScaled(ptrdiff_t offset, LSDataSize size) {
+bool Assembler::IsImmLSScaled(int64_t offset, LSDataSize size) {
   bool offset_is_size_multiple = (((offset >> size) << size) == offset);
   return offset_is_size_multiple && is_uint12(offset >> size);
 }
 
 
+bool Assembler::IsImmLSPair(int64_t offset, LSDataSize size) {
+  bool offset_is_size_multiple = (((offset >> size) << size) == offset);
+  return offset_is_size_multiple && is_int7(offset >> size);
+}
+
+
 // Test if a given value can be encoded in the immediate field of a logical
 // instruction.
 // If it can be encoded, the function returns true, and values pointed to by n,
@@ -2279,94 +2533,200 @@
                              unsigned* n,
                              unsigned* imm_s,
                              unsigned* imm_r) {
-  ASSERT((n != NULL) && (imm_s != NULL) && (imm_r != NULL));
-  ASSERT((width == kWRegSizeInBits) || (width == kXRegSizeInBits));
+  DCHECK((n != NULL) && (imm_s != NULL) && (imm_r != NULL));
+  DCHECK((width == kWRegSizeInBits) || (width == kXRegSizeInBits));
+
+  bool negate = false;
 
   // Logical immediates are encoded using parameters n, imm_s and imm_r using
   // the following table:
   //
-  //  N   imms    immr    size        S             R
-  //  1  ssssss  rrrrrr    64    UInt(ssssss)  UInt(rrrrrr)
-  //  0  0sssss  xrrrrr    32    UInt(sssss)   UInt(rrrrr)
-  //  0  10ssss  xxrrrr    16    UInt(ssss)    UInt(rrrr)
-  //  0  110sss  xxxrrr     8    UInt(sss)     UInt(rrr)
-  //  0  1110ss  xxxxrr     4    UInt(ss)      UInt(rr)
-  //  0  11110s  xxxxxr     2    UInt(s)       UInt(r)
+  //    N   imms    immr    size        S             R
+  //    1  ssssss  rrrrrr    64    UInt(ssssss)  UInt(rrrrrr)
+  //    0  0sssss  xrrrrr    32    UInt(sssss)   UInt(rrrrr)
+  //    0  10ssss  xxrrrr    16    UInt(ssss)    UInt(rrrr)
+  //    0  110sss  xxxrrr     8    UInt(sss)     UInt(rrr)
+  //    0  1110ss  xxxxrr     4    UInt(ss)      UInt(rr)
+  //    0  11110s  xxxxxr     2    UInt(s)       UInt(r)
   // (s bits must not be all set)
   //
-  // A pattern is constructed of size bits, where the least significant S+1
-  // bits are set. The pattern is rotated right by R, and repeated across a
-  // 32 or 64-bit value, depending on destination register width.
+  // A pattern is constructed of size bits, where the least significant S+1 bits
+  // are set. The pattern is rotated right by R, and repeated across a 32 or
+  // 64-bit value, depending on destination register width.
   //
-  // To test if an arbitary immediate can be encoded using this scheme, an
-  // iterative algorithm is used.
+  // Put another way: the basic format of a logical immediate is a single
+  // contiguous stretch of 1 bits, repeated across the whole word at intervals
+  // given by a power of 2. To identify them quickly, we first locate the
+  // lowest stretch of 1 bits, then the next 1 bit above that; that combination
+  // is different for every logical immediate, so it gives us all the
+  // information we need to identify the only logical immediate that our input
+  // could be, and then we simply check if that's the value we actually have.
   //
-  // TODO(mcapewel) This code does not consider using X/W register overlap to
-  // support 64-bit immediates where the top 32-bits are zero, and the bottom
-  // 32-bits are an encodable logical immediate.
+  // (The rotation parameter does give the possibility of the stretch of 1 bits
+  // going 'round the end' of the word. To deal with that, we observe that in
+  // any situation where that happens the bitwise NOT of the value is also a
+  // valid logical immediate. So we simply invert the input whenever its low bit
+  // is set, and then we know that the rotated case can't arise.)
 
-  // 1. If the value has all set or all clear bits, it can't be encoded.
-  if ((value == 0) || (value == 0xffffffffffffffffUL) ||
-      ((width == kWRegSizeInBits) && (value == 0xffffffff))) {
-    return false;
+  if (value & 1) {
+    // If the low bit is 1, negate the value, and set a flag to remember that we
+    // did (so that we can adjust the return values appropriately).
+    negate = true;
+    value = ~value;
   }
 
-  unsigned lead_zero = CountLeadingZeros(value, width);
-  unsigned lead_one = CountLeadingZeros(~value, width);
-  unsigned trail_zero = CountTrailingZeros(value, width);
-  unsigned trail_one = CountTrailingZeros(~value, width);
-  unsigned set_bits = CountSetBits(value, width);
+  if (width == kWRegSizeInBits) {
+    // To handle 32-bit logical immediates, the very easiest thing is to repeat
+    // the input value twice to make a 64-bit word. The correct encoding of that
+    // as a logical immediate will also be the correct encoding of the 32-bit
+    // value.
 
-  // The fixed bits in the immediate s field.
-  // If width == 64 (X reg), start at 0xFFFFFF80.
-  // If width == 32 (W reg), start at 0xFFFFFFC0, as the iteration for 64-bit
-  // widths won't be executed.
-  int imm_s_fixed = (width == kXRegSizeInBits) ? -128 : -64;
-  int imm_s_mask = 0x3F;
+    // The most-significant 32 bits may not be zero (ie. negate is true) so
+    // shift the value left before duplicating it.
+    value <<= kWRegSizeInBits;
+    value |= value >> kWRegSizeInBits;
+  }
 
-  for (;;) {
-    // 2. If the value is two bits wide, it can be encoded.
-    if (width == 2) {
-      *n = 0;
-      *imm_s = 0x3C;
-      *imm_r = (value & 3) - 1;
-      return true;
-    }
+  // The basic analysis idea: imagine our input word looks like this.
+  //
+  //    0011111000111110001111100011111000111110001111100011111000111110
+  //                                                          c  b    a
+  //                                                          |<--d-->|
+  //
+  // We find the lowest set bit (as an actual power-of-2 value, not its index)
+  // and call it a. Then we add a to our original number, which wipes out the
+  // bottommost stretch of set bits and replaces it with a 1 carried into the
+  // next zero bit. Then we look for the new lowest set bit, which is in
+  // position b, and subtract it, so now our number is just like the original
+  // but with the lowest stretch of set bits completely gone. Now we find the
+  // lowest set bit again, which is position c in the diagram above. Then we'll
+  // measure the distance d between bit positions a and c (using CLZ), and that
+  // tells us that the only valid logical immediate that could possibly be equal
+  // to this number is the one in which a stretch of bits running from a to just
+  // below b is replicated every d bits.
+  uint64_t a = LargestPowerOf2Divisor(value);
+  uint64_t value_plus_a = value + a;
+  uint64_t b = LargestPowerOf2Divisor(value_plus_a);
+  uint64_t value_plus_a_minus_b = value_plus_a - b;
+  uint64_t c = LargestPowerOf2Divisor(value_plus_a_minus_b);
 
-    *n = (width == 64) ? 1 : 0;
-    *imm_s = ((imm_s_fixed | (set_bits - 1)) & imm_s_mask);
-    if ((lead_zero + set_bits) == width) {
-      *imm_r = 0;
+  int d, clz_a, out_n;
+  uint64_t mask;
+
+  if (c != 0) {
+    // The general case, in which there is more than one stretch of set bits.
+    // Compute the repeat distance d, and set up a bitmask covering the basic
+    // unit of repetition (i.e. a word with the bottom d bits set). Also, in all
+    // of these cases the N bit of the output will be zero.
+    clz_a = CountLeadingZeros(a, kXRegSizeInBits);
+    int clz_c = CountLeadingZeros(c, kXRegSizeInBits);
+    d = clz_a - clz_c;
+    mask = ((V8_UINT64_C(1) << d) - 1);
+    out_n = 0;
+  } else {
+    // Handle degenerate cases.
+    //
+    // If any of those 'find lowest set bit' operations didn't find a set bit at
+    // all, then the word will have been zero thereafter, so in particular the
+    // last lowest_set_bit operation will have returned zero. So we can test for
+    // all the special case conditions in one go by seeing if c is zero.
+    if (a == 0) {
+      // The input was zero (or all 1 bits, which will come to here too after we
+      // inverted it at the start of the function), for which we just return
+      // false.
+      return false;
     } else {
-      *imm_r = (lead_zero > 0) ? (width - trail_zero) : lead_one;
+      // Otherwise, if c was zero but a was not, then there's just one stretch
+      // of set bits in our word, meaning that we have the trivial case of
+      // d == 64 and only one 'repetition'. Set up all the same variables as in
+      // the general case above, and set the N bit in the output.
+      clz_a = CountLeadingZeros(a, kXRegSizeInBits);
+      d = 64;
+      mask = ~V8_UINT64_C(0);
+      out_n = 1;
     }
+  }
 
-    // 3. If the sum of leading zeros, trailing zeros and set bits is equal to
-    //    the bit width of the value, it can be encoded.
-    if (lead_zero + trail_zero + set_bits == width) {
-      return true;
-    }
-
-    // 4. If the sum of leading ones, trailing ones and unset bits in the
-    //    value is equal to the bit width of the value, it can be encoded.
-    if (lead_one + trail_one + (width - set_bits) == width) {
-      return true;
-    }
-
-    // 5. If the most-significant half of the bitwise value is equal to the
-    //    least-significant half, return to step 2 using the least-significant
-    //    half of the value.
-    uint64_t mask = (1UL << (width >> 1)) - 1;
-    if ((value & mask) == ((value >> (width >> 1)) & mask)) {
-      width >>= 1;
-      set_bits >>= 1;
-      imm_s_fixed >>= 1;
-      continue;
-    }
-
-    // 6. Otherwise, the value can't be encoded.
+  // If the repeat period d is not a power of two, it can't be encoded.
+  if (!IS_POWER_OF_TWO(d)) {
     return false;
   }
+
+  if (((b - a) & ~mask) != 0) {
+    // If the bit stretch (b - a) does not fit within the mask derived from the
+    // repeat period, then fail.
+    return false;
+  }
+
+  // The only possible option is b - a repeated every d bits. Now we're going to
+  // actually construct the valid logical immediate derived from that
+  // specification, and see if it equals our original input.
+  //
+  // To repeat a value every d bits, we multiply it by a number of the form
+  // (1 + 2^d + 2^(2d) + ...), i.e. 0x0001000100010001 or similar. These can
+  // be derived using a table lookup on CLZ(d).
+  static const uint64_t multipliers[] = {
+    0x0000000000000001UL,
+    0x0000000100000001UL,
+    0x0001000100010001UL,
+    0x0101010101010101UL,
+    0x1111111111111111UL,
+    0x5555555555555555UL,
+  };
+  int multiplier_idx = CountLeadingZeros(d, kXRegSizeInBits) - 57;
+  // Ensure that the index to the multipliers array is within bounds.
+  DCHECK((multiplier_idx >= 0) &&
+         (static_cast<size_t>(multiplier_idx) < arraysize(multipliers)));
+  uint64_t multiplier = multipliers[multiplier_idx];
+  uint64_t candidate = (b - a) * multiplier;
+
+  if (value != candidate) {
+    // The candidate pattern doesn't match our input value, so fail.
+    return false;
+  }
+
+  // We have a match! This is a valid logical immediate, so now we have to
+  // construct the bits and pieces of the instruction encoding that generates
+  // it.
+
+  // Count the set bits in our basic stretch. The special case of clz(0) == -1
+  // makes the answer come out right for stretches that reach the very top of
+  // the word (e.g. numbers like 0xffffc00000000000).
+  int clz_b = (b == 0) ? -1 : CountLeadingZeros(b, kXRegSizeInBits);
+  int s = clz_a - clz_b;
+
+  // Decide how many bits to rotate right by, to put the low bit of that basic
+  // stretch in position a.
+  int r;
+  if (negate) {
+    // If we inverted the input right at the start of this function, here's
+    // where we compensate: the number of set bits becomes the number of clear
+    // bits, and the rotation count is based on position b rather than position
+    // a (since b is the location of the 'lowest' 1 bit after inversion).
+    s = d - s;
+    r = (clz_b + 1) & (d - 1);
+  } else {
+    r = (clz_a + 1) & (d - 1);
+  }
+
+  // Now we're done, except for having to encode the S output in such a way that
+  // it gives both the number of set bits and the length of the repeated
+  // segment. The s field is encoded like this:
+  //
+  //     imms    size        S
+  //    ssssss    64    UInt(ssssss)
+  //    0sssss    32    UInt(sssss)
+  //    10ssss    16    UInt(ssss)
+  //    110sss     8    UInt(sss)
+  //    1110ss     4    UInt(ss)
+  //    11110s     2    UInt(s)
+  //
+  // So we 'or' (-d << 1) with our computed s to form imms.
+  *n = out_n;
+  *imm_s = ((-d << 1) | (s - 1)) & 0x3f;
+  *imm_r = r;
+
+  return true;
 }
 
 
@@ -2429,9 +2789,7 @@
 
   // Compute new buffer size.
   CodeDesc desc;  // the new buffer
-  if (buffer_size_ < 4 * KB) {
-    desc.buffer_size = 4 * KB;
-  } else if (buffer_size_ < 1 * MB) {
+  if (buffer_size_ < 1 * MB) {
     desc.buffer_size = 2 * buffer_size_;
   } else {
     desc.buffer_size = buffer_size_ + 1 * MB;
@@ -2466,15 +2824,7 @@
   // buffer nor pc absolute pointing inside the code buffer, so there is no need
   // to relocate any emitted relocation entries.
 
-  // Relocate pending relocation entries.
-  for (int i = 0; i < num_pending_reloc_info_; i++) {
-    RelocInfo& rinfo = pending_reloc_info_[i];
-    ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
-           rinfo.rmode() != RelocInfo::POSITION);
-    if (rinfo.rmode() != RelocInfo::JS_RETURN) {
-      rinfo.set_pc(rinfo.pc() + pc_delta);
-    }
-  }
+  // Pending relocation entries are also relative, no need to relocate.
 }
 
 
@@ -2486,7 +2836,7 @@
       (rmode == RelocInfo::CONST_POOL) ||
       (rmode == RelocInfo::VENEER_POOL)) {
     // Adjust code for new modes.
-    ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
+    DCHECK(RelocInfo::IsDebugBreakSlot(rmode)
            || RelocInfo::IsJSReturn(rmode)
            || RelocInfo::IsComment(rmode)
            || RelocInfo::IsPosition(rmode)
@@ -2494,11 +2844,7 @@
            || RelocInfo::IsVeneerPool(rmode));
     // These modes do not need an entry in the constant pool.
   } else {
-    ASSERT(num_pending_reloc_info_ < kMaxNumPendingRelocInfo);
-    if (num_pending_reloc_info_ == 0) {
-      first_const_pool_use_ = pc_offset();
-    }
-    pending_reloc_info_[num_pending_reloc_info_++] = rinfo;
+    constpool_.RecordEntry(data, rmode);
     // Make sure the constant pool is not emitted in place of the next
     // instruction for which we just recorded relocation info.
     BlockConstPoolFor(1);
@@ -2510,7 +2856,7 @@
         !serializer_enabled() && !emit_debug_code()) {
       return;
     }
-    ASSERT(buffer_space() >= kMaxRelocSize);  // too late to grow buffer here
+    DCHECK(buffer_space() >= kMaxRelocSize);  // too late to grow buffer here
     if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
       RelocInfo reloc_info_with_ast_id(
           reinterpret_cast<byte*>(pc_), rmode, RecordedAstId().ToInt(), NULL);
@@ -2526,11 +2872,9 @@
 void Assembler::BlockConstPoolFor(int instructions) {
   int pc_limit = pc_offset() + instructions * kInstructionSize;
   if (no_const_pool_before_ < pc_limit) {
-    // If there are some pending entries, the constant pool cannot be blocked
-    // further than first_const_pool_use_ + kMaxDistToConstPool
-    ASSERT((num_pending_reloc_info_ == 0) ||
-           (pc_limit < (first_const_pool_use_ + kMaxDistToConstPool)));
     no_const_pool_before_ = pc_limit;
+    // Make sure the pool won't be blocked for too long.
+    DCHECK(pc_limit < constpool_.MaxPcOffset());
   }
 
   if (next_constant_pool_check_ < no_const_pool_before_) {
@@ -2545,111 +2889,53 @@
   // BlockConstPoolScope.
   if (is_const_pool_blocked()) {
     // Something is wrong if emission is forced and blocked at the same time.
-    ASSERT(!force_emit);
+    DCHECK(!force_emit);
     return;
   }
 
   // There is nothing to do if there are no pending constant pool entries.
-  if (num_pending_reloc_info_ == 0)  {
+  if (constpool_.IsEmpty())  {
     // Calculate the offset of the next check.
-    next_constant_pool_check_ = pc_offset() + kCheckConstPoolInterval;
+    SetNextConstPoolCheckIn(kCheckConstPoolInterval);
     return;
   }
 
   // We emit a constant pool when:
   //  * requested to do so by parameter force_emit (e.g. after each function).
   //  * the distance to the first instruction accessing the constant pool is
-  //    kAvgDistToConstPool or more.
-  //  * no jump is required and the distance to the first instruction accessing
-  //    the constant pool is at least kMaxDistToPConstool / 2.
-  ASSERT(first_const_pool_use_ >= 0);
-  int dist = pc_offset() - first_const_pool_use_;
-  if (!force_emit && dist < kAvgDistToConstPool &&
-      (require_jump || (dist < (kMaxDistToConstPool / 2)))) {
+  //    kApproxMaxDistToConstPool or more.
+  //  * the number of entries in the pool is kApproxMaxPoolEntryCount or more.
+  int dist = constpool_.DistanceToFirstUse();
+  int count = constpool_.EntryCount();
+  if (!force_emit &&
+      (dist < kApproxMaxDistToConstPool) &&
+      (count < kApproxMaxPoolEntryCount)) {
     return;
   }
 
-  int jump_instr = require_jump ? kInstructionSize : 0;
-  int size_pool_marker = kInstructionSize;
-  int size_pool_guard = kInstructionSize;
-  int pool_size = jump_instr + size_pool_marker + size_pool_guard +
-    num_pending_reloc_info_ * kPointerSize;
-  int needed_space = pool_size + kGap;
 
   // Emit veneers for branches that would go out of range during emission of the
   // constant pool.
-  CheckVeneerPool(false, require_jump, kVeneerDistanceMargin + pool_size);
-
-  Label size_check;
-  bind(&size_check);
+  int worst_case_size = constpool_.WorstCaseSize();
+  CheckVeneerPool(false, require_jump,
+                  kVeneerDistanceMargin + worst_case_size);
 
   // Check that the code buffer is large enough before emitting the constant
-  // pool (include the jump over the pool, the constant pool marker, the
-  // constant pool guard, and the gap to the relocation information).
+  // pool (this includes the gap to the relocation information).
+  int needed_space = worst_case_size + kGap + 1 * kInstructionSize;
   while (buffer_space() <= needed_space) {
     GrowBuffer();
   }
 
-  {
-    // Block recursive calls to CheckConstPool and protect from veneer pools.
-    BlockPoolsScope block_pools(this);
-    RecordConstPool(pool_size);
-
-    // Emit jump over constant pool if necessary.
-    Label after_pool;
-    if (require_jump) {
-      b(&after_pool);
-    }
-
-    // Emit a constant pool header. The header has two goals:
-    //  1) Encode the size of the constant pool, for use by the disassembler.
-    //  2) Terminate the program, to try to prevent execution from accidentally
-    //     flowing into the constant pool.
-    // The header is therefore made of two arm64 instructions:
-    //   ldr xzr, #<size of the constant pool in 32-bit words>
-    //   blr xzr
-    // If executed the code will likely segfault and lr will point to the
-    // beginning of the constant pool.
-    // TODO(all): currently each relocated constant is 64 bits, consider adding
-    // support for 32-bit entries.
-    RecordComment("[ Constant Pool");
-    ConstantPoolMarker(2 * num_pending_reloc_info_);
-    ConstantPoolGuard();
-
-    // Emit constant pool entries.
-    for (int i = 0; i < num_pending_reloc_info_; i++) {
-      RelocInfo& rinfo = pending_reloc_info_[i];
-      ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
-             rinfo.rmode() != RelocInfo::POSITION &&
-             rinfo.rmode() != RelocInfo::STATEMENT_POSITION &&
-             rinfo.rmode() != RelocInfo::CONST_POOL &&
-             rinfo.rmode() != RelocInfo::VENEER_POOL);
-
-      Instruction* instr = reinterpret_cast<Instruction*>(rinfo.pc());
-      // Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
-      ASSERT(instr->IsLdrLiteral() &&
-             instr->ImmLLiteral() == 0);
-
-      instr->SetImmPCOffsetTarget(reinterpret_cast<Instruction*>(pc_));
-      dc64(rinfo.data());
-    }
-
-    num_pending_reloc_info_ = 0;
-    first_const_pool_use_ = -1;
-
-    RecordComment("]");
-
-    if (after_pool.is_linked()) {
-      bind(&after_pool);
-    }
-  }
+  Label size_check;
+  bind(&size_check);
+  constpool_.Emit(require_jump);
+  DCHECK(SizeOfCodeGeneratedSince(&size_check) <=
+         static_cast<unsigned>(worst_case_size));
 
   // Since a constant pool was just emitted, move the check offset forward by
   // the standard interval.
-  next_constant_pool_check_ = pc_offset() + kCheckConstPoolInterval;
-
-  ASSERT(SizeOfCodeGeneratedSince(&size_check) ==
-         static_cast<unsigned>(pool_size));
+  SetNextConstPoolCheckIn(kCheckConstPoolInterval);
 }
 
 
@@ -2709,7 +2995,7 @@
       branch->SetImmPCOffsetTarget(veneer);
       b(label);
 #ifdef DEBUG
-      ASSERT(SizeOfCodeGeneratedSince(&veneer_size_check) <=
+      DCHECK(SizeOfCodeGeneratedSince(&veneer_size_check) <=
              static_cast<uint64_t>(kMaxVeneerCodeSize));
       veneer_size_check.Unuse();
 #endif
@@ -2742,17 +3028,17 @@
                                 int margin) {
   // There is nothing to do if there are no pending veneer pool entries.
   if (unresolved_branches_.empty())  {
-    ASSERT(next_veneer_pool_check_ == kMaxInt);
+    DCHECK(next_veneer_pool_check_ == kMaxInt);
     return;
   }
 
-  ASSERT(pc_offset() < unresolved_branches_first_limit());
+  DCHECK(pc_offset() < unresolved_branches_first_limit());
 
   // Some short sequence of instruction mustn't be broken up by veneer pool
   // emission, such sequences are protected by calls to BlockVeneerPoolFor and
   // BlockVeneerPoolScope.
   if (is_veneer_pool_blocked()) {
-    ASSERT(!force_emit);
+    DCHECK(!force_emit);
     return;
   }
 
@@ -2805,43 +3091,24 @@
 
 Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
   // No out-of-line constant pool support.
-  ASSERT(!FLAG_enable_ool_constant_pool);
+  DCHECK(!FLAG_enable_ool_constant_pool);
   return isolate->factory()->empty_constant_pool_array();
 }
 
 
 void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
   // No out-of-line constant pool support.
-  ASSERT(!FLAG_enable_ool_constant_pool);
+  DCHECK(!FLAG_enable_ool_constant_pool);
   return;
 }
 
 
-void PatchingAssembler::MovInt64(const Register& rd, int64_t imm) {
-  Label start;
-  bind(&start);
-
-  ASSERT(rd.Is64Bits());
-  ASSERT(!rd.IsSP());
-
-  for (unsigned i = 0; i < (rd.SizeInBits() / 16); i++) {
-    uint64_t imm16 = (imm >> (16 * i)) & 0xffffL;
-    movk(rd, imm16, 16 * i);
-  }
-
-  ASSERT(SizeOfCodeGeneratedSince(&start) ==
-         kMovInt64NInstrs * kInstructionSize);
-}
-
-
-void PatchingAssembler::PatchAdrFar(Instruction* target) {
+void PatchingAssembler::PatchAdrFar(int64_t target_offset) {
   // The code at the current instruction should be:
   //   adr  rd, 0
   //   nop  (adr_far)
   //   nop  (adr_far)
-  //   nop  (adr_far)
   //   movz scratch, 0
-  //   add  rd, rd, scratch
 
   // Verify the expected code.
   Instruction* expected_adr = InstructionAt(0);
@@ -2851,39 +3118,21 @@
     CHECK(InstructionAt((i + 1) * kInstructionSize)->IsNop(ADR_FAR_NOP));
   }
   Instruction* expected_movz =
-      InstructionAt((kAdrFarPatchableNInstrs - 2) * kInstructionSize);
+      InstructionAt((kAdrFarPatchableNInstrs - 1) * kInstructionSize);
   CHECK(expected_movz->IsMovz() &&
         (expected_movz->ImmMoveWide() == 0) &&
         (expected_movz->ShiftMoveWide() == 0));
   int scratch_code = expected_movz->Rd();
-  Instruction* expected_add =
-      InstructionAt((kAdrFarPatchableNInstrs - 1) * kInstructionSize);
-  CHECK(expected_add->IsAddSubShifted() &&
-        (expected_add->Mask(AddSubOpMask) == ADD) &&
-        expected_add->SixtyFourBits() &&
-        (expected_add->Rd() == rd_code) && (expected_add->Rn() == rd_code) &&
-        (expected_add->Rm() == scratch_code) &&
-        (static_cast<Shift>(expected_add->ShiftDP()) == LSL) &&
-        (expected_add->ImmDPShift() == 0));
 
   // Patch to load the correct address.
-  Label start;
-  bind(&start);
   Register rd = Register::XRegFromCode(rd_code);
-  // If the target is in range, we only patch the adr. Otherwise we patch the
-  // nops with fixup instructions.
-  int target_offset = expected_adr->DistanceTo(target);
-  if (Instruction::IsValidPCRelOffset(target_offset)) {
-    adr(rd, target_offset);
-    for (int i = 0; i < kAdrFarPatchableNInstrs - 2; ++i) {
-      nop(ADR_FAR_NOP);
-    }
-  } else {
-    Register scratch = Register::XRegFromCode(scratch_code);
-    adr(rd, 0);
-    MovInt64(scratch, target_offset);
-    add(rd, rd, scratch);
-  }
+  Register scratch = Register::XRegFromCode(scratch_code);
+  // Addresses are only 48 bits.
+  adr(rd, target_offset & 0xFFFF);
+  movz(scratch, (target_offset >> 16) & 0xFFFF, 16);
+  movk(scratch, (target_offset >> 32) & 0xFFFF, 32);
+  DCHECK((target_offset >> 48) == 0);
+  add(rd, rd, scratch);
 }
 
 
diff --git a/src/arm64/assembler-arm64.h b/src/arm64/assembler-arm64.h
index c0ad4d0..82b4500 100644
--- a/src/arm64/assembler-arm64.h
+++ b/src/arm64/assembler-arm64.h
@@ -7,13 +7,13 @@
 
 #include <list>
 #include <map>
+#include <vector>
 
-#include "src/cpu.h"
-#include "src/globals.h"
-#include "src/utils.h"
-#include "src/assembler.h"
-#include "src/serialize.h"
 #include "src/arm64/instructions-arm64.h"
+#include "src/assembler.h"
+#include "src/globals.h"
+#include "src/serialize.h"
+#include "src/utils.h"
 
 
 namespace v8 {
@@ -106,18 +106,18 @@
     reg_code = r.reg_code;
     reg_size = r.reg_size;
     reg_type = r.reg_type;
-    ASSERT(IsValidOrNone());
+    DCHECK(IsValidOrNone());
   }
 
   Register(const Register& r) {  // NOLINT(runtime/explicit)
     reg_code = r.reg_code;
     reg_size = r.reg_size;
     reg_type = r.reg_type;
-    ASSERT(IsValidOrNone());
+    DCHECK(IsValidOrNone());
   }
 
   bool IsValid() const {
-    ASSERT(IsRegister() || IsNone());
+    DCHECK(IsRegister() || IsNone());
     return IsValidRegister();
   }
 
@@ -169,7 +169,7 @@
   }
 
   static Register FromAllocationIndex(unsigned index) {
-    ASSERT(index < static_cast<unsigned>(NumAllocatableRegisters()));
+    DCHECK(index < static_cast<unsigned>(NumAllocatableRegisters()));
     // cp is the last allocatable register.
     if (index == (static_cast<unsigned>(NumAllocatableRegisters() - 1))) {
       return from_code(kAllocatableContext);
@@ -182,8 +182,8 @@
   }
 
   static const char* AllocationIndexToString(int index) {
-    ASSERT((index >= 0) && (index < NumAllocatableRegisters()));
-    ASSERT((kAllocatableLowRangeBegin == 0) &&
+    DCHECK((index >= 0) && (index < NumAllocatableRegisters()));
+    DCHECK((kAllocatableLowRangeBegin == 0) &&
            (kAllocatableLowRangeEnd == 15) &&
            (kAllocatableHighRangeBegin == 18) &&
            (kAllocatableHighRangeEnd == 24) &&
@@ -199,7 +199,7 @@
   }
 
   static int ToAllocationIndex(Register reg) {
-    ASSERT(reg.IsAllocatable());
+    DCHECK(reg.IsAllocatable());
     unsigned code = reg.code();
     if (code == kAllocatableContext) {
       return NumAllocatableRegisters() - 1;
@@ -235,18 +235,18 @@
     reg_code = r.reg_code;
     reg_size = r.reg_size;
     reg_type = r.reg_type;
-    ASSERT(IsValidOrNone());
+    DCHECK(IsValidOrNone());
   }
 
   FPRegister(const FPRegister& r) {  // NOLINT(runtime/explicit)
     reg_code = r.reg_code;
     reg_size = r.reg_size;
     reg_type = r.reg_type;
-    ASSERT(IsValidOrNone());
+    DCHECK(IsValidOrNone());
   }
 
   bool IsValid() const {
-    ASSERT(IsFPRegister() || IsNone());
+    DCHECK(IsFPRegister() || IsNone());
     return IsValidFPRegister();
   }
 
@@ -282,7 +282,7 @@
   }
 
   static FPRegister FromAllocationIndex(unsigned int index) {
-    ASSERT(index < static_cast<unsigned>(NumAllocatableRegisters()));
+    DCHECK(index < static_cast<unsigned>(NumAllocatableRegisters()));
 
     return (index <= kAllocatableLowRangeEnd)
         ? from_code(index)
@@ -290,8 +290,8 @@
   }
 
   static const char* AllocationIndexToString(int index) {
-    ASSERT((index >= 0) && (index < NumAllocatableRegisters()));
-    ASSERT((kAllocatableLowRangeBegin == 0) &&
+    DCHECK((index >= 0) && (index < NumAllocatableRegisters()));
+    DCHECK((kAllocatableLowRangeBegin == 0) &&
            (kAllocatableLowRangeEnd == 14) &&
            (kAllocatableHighRangeBegin == 16) &&
            (kAllocatableHighRangeEnd == 28));
@@ -305,7 +305,7 @@
   }
 
   static int ToAllocationIndex(FPRegister reg) {
-    ASSERT(reg.IsAllocatable());
+    DCHECK(reg.IsAllocatable());
     unsigned code = reg.code();
 
     return (code <= kAllocatableLowRangeEnd)
@@ -451,40 +451,40 @@
                       CPURegister reg4 = NoCPUReg)
       : list_(reg1.Bit() | reg2.Bit() | reg3.Bit() | reg4.Bit()),
         size_(reg1.SizeInBits()), type_(reg1.type()) {
-    ASSERT(AreSameSizeAndType(reg1, reg2, reg3, reg4));
-    ASSERT(IsValid());
+    DCHECK(AreSameSizeAndType(reg1, reg2, reg3, reg4));
+    DCHECK(IsValid());
   }
 
   CPURegList(CPURegister::RegisterType type, unsigned size, RegList list)
       : list_(list), size_(size), type_(type) {
-    ASSERT(IsValid());
+    DCHECK(IsValid());
   }
 
   CPURegList(CPURegister::RegisterType type, unsigned size,
              unsigned first_reg, unsigned last_reg)
       : size_(size), type_(type) {
-    ASSERT(((type == CPURegister::kRegister) &&
+    DCHECK(((type == CPURegister::kRegister) &&
             (last_reg < kNumberOfRegisters)) ||
            ((type == CPURegister::kFPRegister) &&
             (last_reg < kNumberOfFPRegisters)));
-    ASSERT(last_reg >= first_reg);
+    DCHECK(last_reg >= first_reg);
     list_ = (1UL << (last_reg + 1)) - 1;
     list_ &= ~((1UL << first_reg) - 1);
-    ASSERT(IsValid());
+    DCHECK(IsValid());
   }
 
   CPURegister::RegisterType type() const {
-    ASSERT(IsValid());
+    DCHECK(IsValid());
     return type_;
   }
 
   RegList list() const {
-    ASSERT(IsValid());
+    DCHECK(IsValid());
     return list_;
   }
 
   inline void set_list(RegList new_list) {
-    ASSERT(IsValid());
+    DCHECK(IsValid());
     list_ = new_list;
   }
 
@@ -529,7 +529,7 @@
   static CPURegList GetSafepointSavedRegisters();
 
   bool IsEmpty() const {
-    ASSERT(IsValid());
+    DCHECK(IsValid());
     return list_ == 0;
   }
 
@@ -537,7 +537,7 @@
                        const CPURegister& other2 = NoCPUReg,
                        const CPURegister& other3 = NoCPUReg,
                        const CPURegister& other4 = NoCPUReg) const {
-    ASSERT(IsValid());
+    DCHECK(IsValid());
     RegList list = 0;
     if (!other1.IsNone() && (other1.type() == type_)) list |= other1.Bit();
     if (!other2.IsNone() && (other2.type() == type_)) list |= other2.Bit();
@@ -547,23 +547,23 @@
   }
 
   int Count() const {
-    ASSERT(IsValid());
+    DCHECK(IsValid());
     return CountSetBits(list_, kRegListSizeInBits);
   }
 
   unsigned RegisterSizeInBits() const {
-    ASSERT(IsValid());
+    DCHECK(IsValid());
     return size_;
   }
 
   unsigned RegisterSizeInBytes() const {
     int size_in_bits = RegisterSizeInBits();
-    ASSERT((size_in_bits % kBitsPerByte) == 0);
+    DCHECK((size_in_bits % kBitsPerByte) == 0);
     return size_in_bits / kBitsPerByte;
   }
 
   unsigned TotalSizeInBytes() const {
-    ASSERT(IsValid());
+    DCHECK(IsValid());
     return RegisterSizeInBytes() * Count();
   }
 
@@ -697,9 +697,9 @@
 // MemOperand represents a memory operand in a load or store instruction.
 class MemOperand {
  public:
-  inline explicit MemOperand();
+  inline MemOperand();
   inline explicit MemOperand(Register base,
-                             ptrdiff_t offset = 0,
+                             int64_t offset = 0,
                              AddrMode addrmode = Offset);
   inline explicit MemOperand(Register base,
                              Register regoffset,
@@ -715,7 +715,7 @@
 
   const Register& base() const { return base_; }
   const Register& regoffset() const { return regoffset_; }
-  ptrdiff_t offset() const { return offset_; }
+  int64_t offset() const { return offset_; }
   AddrMode addrmode() const { return addrmode_; }
   Shift shift() const { return shift_; }
   Extend extend() const { return extend_; }
@@ -729,10 +729,20 @@
   // handle indexed modes.
   inline Operand OffsetAsOperand() const;
 
+  enum PairResult {
+    kNotPair,   // Can't use a pair instruction.
+    kPairAB,    // Can use a pair instruction (operandA has lower address).
+    kPairBA     // Can use a pair instruction (operandB has lower address).
+  };
+  // Check if two MemOperand are consistent for stp/ldp use.
+  static PairResult AreConsistentForPair(const MemOperand& operandA,
+                                         const MemOperand& operandB,
+                                         int access_size_log2 = kXRegSizeLog2);
+
  private:
   Register base_;
   Register regoffset_;
-  ptrdiff_t offset_;
+  int64_t offset_;
   AddrMode addrmode_;
   Shift shift_;
   Extend extend_;
@@ -740,6 +750,55 @@
 };
 
 
+class ConstPool {
+ public:
+  explicit ConstPool(Assembler* assm)
+      : assm_(assm),
+        first_use_(-1),
+        shared_entries_count(0) {}
+  void RecordEntry(intptr_t data, RelocInfo::Mode mode);
+  int EntryCount() const {
+    return shared_entries_count + unique_entries_.size();
+  }
+  bool IsEmpty() const {
+    return shared_entries_.empty() && unique_entries_.empty();
+  }
+  // Distance in bytes between the current pc and the first instruction
+  // using the pool. If there are no pending entries return kMaxInt.
+  int DistanceToFirstUse();
+  // Offset after which instructions using the pool will be out of range.
+  int MaxPcOffset();
+  // Maximum size the constant pool can be with current entries. It always
+  // includes alignment padding and branch over.
+  int WorstCaseSize();
+  // Size in bytes of the literal pool *if* it is emitted at the current
+  // pc. The size will include the branch over the pool if it was requested.
+  int SizeIfEmittedAtCurrentPc(bool require_jump);
+  // Emit the literal pool at the current pc with a branch over the pool if
+  // requested.
+  void Emit(bool require_jump);
+  // Discard any pending pool entries.
+  void Clear();
+
+ private:
+  bool CanBeShared(RelocInfo::Mode mode);
+  void EmitMarker();
+  void EmitGuard();
+  void EmitEntries();
+
+  Assembler* assm_;
+  // Keep track of the first instruction requiring a constant pool entry
+  // since the previous constant pool was emitted.
+  int first_use_;
+  // values, pc offset(s) of entries which can be shared.
+  std::multimap<uint64_t, int> shared_entries_;
+  // Number of distinct literal in shared entries.
+  int shared_entries_count;
+  // values, pc offset of entries which cannot be shared.
+  std::vector<std::pair<uint64_t, int> > unique_entries_;
+};
+
+
 // -----------------------------------------------------------------------------
 // Assembler.
 
@@ -763,14 +822,14 @@
   virtual ~Assembler();
 
   virtual void AbortedCodeGeneration() {
-    num_pending_reloc_info_ = 0;
+    constpool_.Clear();
   }
 
   // System functions ---------------------------------------------------------
   // Start generating code from the beginning of the buffer, discarding any code
   // and data that has already been emitted into the buffer.
   //
-  // In order to avoid any accidental transfer of state, Reset ASSERTs that the
+  // In order to avoid any accidental transfer of state, Reset DCHECKs that the
   // constant pool is not blocked.
   void Reset();
 
@@ -828,6 +887,9 @@
   // instruction stream that call will return from.
   inline static Address return_address_from_call_start(Address pc);
 
+  // Return the code target address of the patch debug break slot
+  inline static Address break_address_from_return_address(Address pc);
+
   // This sets the branch destination (which is in the constant pool on ARM).
   // This is for calls and branches within generated code.
   inline static void deserialization_set_special_target_at(
@@ -854,15 +916,15 @@
 
   // Size of the generated code in bytes
   uint64_t SizeOfGeneratedCode() const {
-    ASSERT((pc_ >= buffer_) && (pc_ < (buffer_ + buffer_size_)));
+    DCHECK((pc_ >= buffer_) && (pc_ < (buffer_ + buffer_size_)));
     return pc_ - buffer_;
   }
 
   // Return the code size generated from label to the current position.
   uint64_t SizeOfCodeGeneratedSince(const Label* label) {
-    ASSERT(label->is_bound());
-    ASSERT(pc_offset() >= label->pos());
-    ASSERT(pc_offset() < buffer_size_);
+    DCHECK(label->is_bound());
+    DCHECK(pc_offset() >= label->pos());
+    DCHECK(pc_offset() < buffer_size_);
     return pc_offset() - label->pos();
   }
 
@@ -872,8 +934,8 @@
   // TODO(jbramley): Work out what sign to use for these things and if possible,
   // change things to be consistent.
   void AssertSizeOfCodeGeneratedSince(const Label* label, ptrdiff_t size) {
-    ASSERT(size >= 0);
-    ASSERT(static_cast<uint64_t>(size) == SizeOfCodeGeneratedSince(label));
+    DCHECK(size >= 0);
+    DCHECK(static_cast<uint64_t>(size) == SizeOfCodeGeneratedSince(label));
   }
 
   // Return the number of instructions generated from label to the
@@ -912,9 +974,7 @@
   static bool IsConstantPoolAt(Instruction* instr);
   static int ConstantPoolSizeAt(Instruction* instr);
   // See Assembler::CheckConstPool for more info.
-  void ConstantPoolMarker(uint32_t size);
   void EmitPoolGuard();
-  void ConstantPoolGuard();
 
   // Prevent veneer pool emission until EndBlockVeneerPool is called.
   // Call to this function can be nested but must be followed by an equal
@@ -1157,8 +1217,8 @@
            const Register& rn,
            unsigned lsb,
            unsigned width) {
-    ASSERT(width >= 1);
-    ASSERT(lsb + width <= rn.SizeInBits());
+    DCHECK(width >= 1);
+    DCHECK(lsb + width <= rn.SizeInBits());
     bfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
   }
 
@@ -1167,15 +1227,15 @@
              const Register& rn,
              unsigned lsb,
              unsigned width) {
-    ASSERT(width >= 1);
-    ASSERT(lsb + width <= rn.SizeInBits());
+    DCHECK(width >= 1);
+    DCHECK(lsb + width <= rn.SizeInBits());
     bfm(rd, rn, lsb, lsb + width - 1);
   }
 
   // Sbfm aliases.
   // Arithmetic shift right.
   void asr(const Register& rd, const Register& rn, unsigned shift) {
-    ASSERT(shift < rd.SizeInBits());
+    DCHECK(shift < rd.SizeInBits());
     sbfm(rd, rn, shift, rd.SizeInBits() - 1);
   }
 
@@ -1184,8 +1244,8 @@
              const Register& rn,
              unsigned lsb,
              unsigned width) {
-    ASSERT(width >= 1);
-    ASSERT(lsb + width <= rn.SizeInBits());
+    DCHECK(width >= 1);
+    DCHECK(lsb + width <= rn.SizeInBits());
     sbfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
   }
 
@@ -1194,8 +1254,8 @@
             const Register& rn,
             unsigned lsb,
             unsigned width) {
-    ASSERT(width >= 1);
-    ASSERT(lsb + width <= rn.SizeInBits());
+    DCHECK(width >= 1);
+    DCHECK(lsb + width <= rn.SizeInBits());
     sbfm(rd, rn, lsb, lsb + width - 1);
   }
 
@@ -1218,13 +1278,13 @@
   // Logical shift left.
   void lsl(const Register& rd, const Register& rn, unsigned shift) {
     unsigned reg_size = rd.SizeInBits();
-    ASSERT(shift < reg_size);
+    DCHECK(shift < reg_size);
     ubfm(rd, rn, (reg_size - shift) % reg_size, reg_size - shift - 1);
   }
 
   // Logical shift right.
   void lsr(const Register& rd, const Register& rn, unsigned shift) {
-    ASSERT(shift < rd.SizeInBits());
+    DCHECK(shift < rd.SizeInBits());
     ubfm(rd, rn, shift, rd.SizeInBits() - 1);
   }
 
@@ -1233,8 +1293,8 @@
              const Register& rn,
              unsigned lsb,
              unsigned width) {
-    ASSERT(width >= 1);
-    ASSERT(lsb + width <= rn.SizeInBits());
+    DCHECK(width >= 1);
+    DCHECK(lsb + width <= rn.SizeInBits());
     ubfm(rd, rn, (rd.SizeInBits() - lsb) & (rd.SizeInBits() - 1), width - 1);
   }
 
@@ -1243,8 +1303,8 @@
             const Register& rn,
             unsigned lsb,
             unsigned width) {
-    ASSERT(width >= 1);
-    ASSERT(lsb + width <= rn.SizeInBits());
+    DCHECK(width >= 1);
+    DCHECK(lsb + width <= rn.SizeInBits());
     ubfm(rd, rn, lsb, lsb + width - 1);
   }
 
@@ -1514,7 +1574,7 @@
   };
 
   void nop(NopMarkerTypes n) {
-    ASSERT((FIRST_NOP_MARKER <= n) && (n <= LAST_NOP_MARKER));
+    DCHECK((FIRST_NOP_MARKER <= n) && (n <= LAST_NOP_MARKER));
     mov(Register::XRegFromCode(n), Register::XRegFromCode(n));
   }
 
@@ -1673,16 +1733,7 @@
   // Copy a string into the instruction stream, including the terminating NULL
   // character. The instruction pointer (pc_) is then aligned correctly for
   // subsequent instructions.
-  void EmitStringData(const char * string) {
-    size_t len = strlen(string) + 1;
-    ASSERT(RoundUp(len, kInstructionSize) <= static_cast<size_t>(kGap));
-    EmitData(string, len);
-    // Pad with NULL characters until pc_ is aligned.
-    const char pad[] = {'\0', '\0', '\0', '\0'};
-    STATIC_ASSERT(sizeof(pad) == kInstructionSize);
-    byte* next_pc = AlignUp(pc_, kInstructionSize);
-    EmitData(&pad, next_pc - pc_);
-  }
+  void EmitStringData(const char* string);
 
   // Pseudo-instructions ------------------------------------------------------
 
@@ -1695,7 +1746,9 @@
 
   // Code generation helpers --------------------------------------------------
 
-  unsigned num_pending_reloc_info() const { return num_pending_reloc_info_; }
+  bool IsConstPoolEmpty() const { return constpool_.IsEmpty(); }
+
+  Instruction* pc() const { return Instruction::Cast(pc_); }
 
   Instruction* InstructionAt(int offset) const {
     return reinterpret_cast<Instruction*>(buffer_ + offset);
@@ -1707,44 +1760,44 @@
 
   // Register encoding.
   static Instr Rd(CPURegister rd) {
-    ASSERT(rd.code() != kSPRegInternalCode);
+    DCHECK(rd.code() != kSPRegInternalCode);
     return rd.code() << Rd_offset;
   }
 
   static Instr Rn(CPURegister rn) {
-    ASSERT(rn.code() != kSPRegInternalCode);
+    DCHECK(rn.code() != kSPRegInternalCode);
     return rn.code() << Rn_offset;
   }
 
   static Instr Rm(CPURegister rm) {
-    ASSERT(rm.code() != kSPRegInternalCode);
+    DCHECK(rm.code() != kSPRegInternalCode);
     return rm.code() << Rm_offset;
   }
 
   static Instr Ra(CPURegister ra) {
-    ASSERT(ra.code() != kSPRegInternalCode);
+    DCHECK(ra.code() != kSPRegInternalCode);
     return ra.code() << Ra_offset;
   }
 
   static Instr Rt(CPURegister rt) {
-    ASSERT(rt.code() != kSPRegInternalCode);
+    DCHECK(rt.code() != kSPRegInternalCode);
     return rt.code() << Rt_offset;
   }
 
   static Instr Rt2(CPURegister rt2) {
-    ASSERT(rt2.code() != kSPRegInternalCode);
+    DCHECK(rt2.code() != kSPRegInternalCode);
     return rt2.code() << Rt2_offset;
   }
 
   // These encoding functions allow the stack pointer to be encoded, and
   // disallow the zero register.
   static Instr RdSP(Register rd) {
-    ASSERT(!rd.IsZero());
+    DCHECK(!rd.IsZero());
     return (rd.code() & kRegCodeMask) << Rd_offset;
   }
 
   static Instr RnSP(Register rn) {
-    ASSERT(!rn.IsZero());
+    DCHECK(!rn.IsZero());
     return (rn.code() & kRegCodeMask) << Rn_offset;
   }
 
@@ -1797,6 +1850,9 @@
   inline static Instr ImmBarrierType(int imm2);
   inline static LSDataSize CalcLSDataSize(LoadStoreOp op);
 
+  static bool IsImmLSUnscaled(int64_t offset);
+  static bool IsImmLSScaled(int64_t offset, LSDataSize size);
+
   // Move immediates encoding.
   inline static Instr ImmMoveWide(uint64_t imm);
   inline static Instr ShiftMoveWide(int64_t shift);
@@ -1880,8 +1936,10 @@
   void LoadStore(const CPURegister& rt,
                  const MemOperand& addr,
                  LoadStoreOp op);
-  static bool IsImmLSUnscaled(ptrdiff_t offset);
-  static bool IsImmLSScaled(ptrdiff_t offset, LSDataSize size);
+
+  void LoadStorePair(const CPURegister& rt, const CPURegister& rt2,
+                     const MemOperand& addr, LoadStorePairOp op);
+  static bool IsImmLSPair(int64_t offset, LSDataSize size);
 
   void Logical(const Register& rd,
                const Register& rn,
@@ -1965,10 +2023,6 @@
                                 const Operand& operand,
                                 FlagsUpdate S,
                                 Instr op);
-  void LoadStorePair(const CPURegister& rt,
-                     const CPURegister& rt2,
-                     const MemOperand& addr,
-                     LoadStorePairOp op);
   void LoadStorePairNonTemporal(const CPURegister& rt,
                                 const CPURegister& rt2,
                                 const MemOperand& addr,
@@ -2019,11 +2073,16 @@
   // instructions.
   void BlockConstPoolFor(int instructions);
 
+  // Set how far from current pc the next constant pool check will be.
+  void SetNextConstPoolCheckIn(int instructions) {
+    next_constant_pool_check_ = pc_offset() + instructions * kInstructionSize;
+  }
+
   // Emit the instruction at pc_.
   void Emit(Instr instruction) {
     STATIC_ASSERT(sizeof(*pc_) == 1);
     STATIC_ASSERT(sizeof(instruction) == kInstructionSize);
-    ASSERT((pc_ + sizeof(instruction)) <= (buffer_ + buffer_size_));
+    DCHECK((pc_ + sizeof(instruction)) <= (buffer_ + buffer_size_));
 
     memcpy(pc_, &instruction, sizeof(instruction));
     pc_ += sizeof(instruction);
@@ -2032,8 +2091,8 @@
 
   // Emit data inline in the instruction stream.
   void EmitData(void const * data, unsigned size) {
-    ASSERT(sizeof(*pc_) == 1);
-    ASSERT((pc_ + size) <= (buffer_ + buffer_size_));
+    DCHECK(sizeof(*pc_) == 1);
+    DCHECK((pc_ + size) <= (buffer_ + buffer_size_));
 
     // TODO(all): Somehow register we have some data here. Then we can
     // disassemble it correctly.
@@ -2050,12 +2109,13 @@
   int next_constant_pool_check_;
 
   // Constant pool generation
-  // Pools are emitted in the instruction stream, preferably after unconditional
-  // jumps or after returns from functions (in dead code locations).
-  // If a long code sequence does not contain unconditional jumps, it is
-  // necessary to emit the constant pool before the pool gets too far from the
-  // location it is accessed from. In this case, we emit a jump over the emitted
-  // constant pool.
+  // Pools are emitted in the instruction stream. They are emitted when:
+  //  * the distance to the first use is above a pre-defined distance or
+  //  * the numbers of entries in the pool is above a pre-defined size or
+  //  * code generation is finished
+  // If a pool needs to be emitted before code generation is finished a branch
+  // over the emitted pool will be inserted.
+
   // Constants in the pool may be addresses of functions that gets relocated;
   // if so, a relocation info entry is associated to the constant pool entry.
 
@@ -2063,34 +2123,22 @@
   // expensive. By default we only check again once a number of instructions
   // has been generated. That also means that the sizing of the buffers is not
   // an exact science, and that we rely on some slop to not overrun buffers.
-  static const int kCheckConstPoolIntervalInst = 128;
-  static const int kCheckConstPoolInterval =
-    kCheckConstPoolIntervalInst * kInstructionSize;
+  static const int kCheckConstPoolInterval = 128;
 
-  // Constants in pools are accessed via pc relative addressing, which can
-  // reach +/-4KB thereby defining a maximum distance between the instruction
-  // and the accessed constant.
-  static const int kMaxDistToConstPool = 4 * KB;
-  static const int kMaxNumPendingRelocInfo =
-    kMaxDistToConstPool / kInstructionSize;
+  // Distance to first use after a which a pool will be emitted. Pool entries
+  // are accessed with pc relative load therefore this cannot be more than
+  // 1 * MB. Since constant pool emission checks are interval based this value
+  // is an approximation.
+  static const int kApproxMaxDistToConstPool = 64 * KB;
 
-
-  // Average distance beetween a constant pool and the first instruction
-  // accessing the constant pool. Longer distance should result in less I-cache
-  // pollution.
-  // In practice the distance will be smaller since constant pool emission is
-  // forced after function return and sometimes after unconditional branches.
-  static const int kAvgDistToConstPool =
-    kMaxDistToConstPool - kCheckConstPoolInterval;
+  // Number of pool entries after which a pool will be emitted. Since constant
+  // pool emission checks are interval based this value is an approximation.
+  static const int kApproxMaxPoolEntryCount = 512;
 
   // Emission of the constant pool may be blocked in some code sequences.
   int const_pool_blocked_nesting_;  // Block emission if this is not zero.
   int no_const_pool_before_;  // Block emission before this pc offset.
 
-  // Keep track of the first instruction requiring a constant pool entry
-  // since the previous constant pool was emitted.
-  int first_const_pool_use_;
-
   // Emission of the veneer pools may be blocked in some code sequences.
   int veneer_pool_blocked_nesting_;  // Block emission if this is not zero.
 
@@ -2106,10 +2154,8 @@
   // If every instruction in a long sequence is accessing the pool, we need one
   // pending relocation entry per instruction.
 
-  // the buffer of pending relocation info
-  RelocInfo pending_reloc_info_[kMaxNumPendingRelocInfo];
-  // number of pending reloc info entries in the buffer
-  int num_pending_reloc_info_;
+  // The pending constant pool.
+  ConstPool constpool_;
 
   // Relocation for a type-recording IC has the AST id added to it.  This
   // member variable is a way to pass the information from the call site to
@@ -2123,7 +2169,7 @@
   // Record the AST id of the CallIC being compiled, so that it can be placed
   // in the relocation information.
   void SetRecordedAstId(TypeFeedbackId ast_id) {
-    ASSERT(recorded_ast_id_.IsNone());
+    DCHECK(recorded_ast_id_.IsNone());
     recorded_ast_id_ = ast_id;
   }
 
@@ -2171,7 +2217,7 @@
   static const int kVeneerDistanceCheckMargin =
     kVeneerNoProtectionFactor * kVeneerDistanceMargin;
   int unresolved_branches_first_limit() const {
-    ASSERT(!unresolved_branches_.empty());
+    DCHECK(!unresolved_branches_.empty());
     return unresolved_branches_.begin()->first;
   }
   // This is similar to next_constant_pool_check_ and helps reduce the overhead
@@ -2196,6 +2242,7 @@
   PositionsRecorder positions_recorder_;
   friend class PositionsRecorder;
   friend class EnsureSpace;
+  friend class ConstPool;
 };
 
 class PatchingAssembler : public Assembler {
@@ -2223,24 +2270,21 @@
 
   ~PatchingAssembler() {
     // Const pool should still be blocked.
-    ASSERT(is_const_pool_blocked());
+    DCHECK(is_const_pool_blocked());
     EndBlockPools();
     // Verify we have generated the number of instruction we expected.
-    ASSERT((pc_offset() + kGap) == buffer_size_);
+    DCHECK((pc_offset() + kGap) == buffer_size_);
     // Verify no relocation information has been emitted.
-    ASSERT(num_pending_reloc_info() == 0);
+    DCHECK(IsConstPoolEmpty());
     // Flush the Instruction cache.
     size_t length = buffer_size_ - kGap;
-    CPU::FlushICache(buffer_, length);
+    CpuFeatures::FlushICache(buffer_, length);
   }
 
-  static const int kMovInt64NInstrs = 4;
-  void MovInt64(const Register& rd, int64_t imm);
-
   // See definition of PatchAdrFar() for details.
-  static const int kAdrFarPatchableNNops = kMovInt64NInstrs - 1;
-  static const int kAdrFarPatchableNInstrs = kAdrFarPatchableNNops + 3;
-  void PatchAdrFar(Instruction* target);
+  static const int kAdrFarPatchableNNops = 2;
+  static const int kAdrFarPatchableNInstrs = kAdrFarPatchableNNops + 2;
+  void PatchAdrFar(int64_t target_offset);
 };
 
 
diff --git a/src/arm64/builtins-arm64.cc b/src/arm64/builtins-arm64.cc
index 9dc7221..0013e24 100644
--- a/src/arm64/builtins-arm64.cc
+++ b/src/arm64/builtins-arm64.cc
@@ -11,7 +11,6 @@
 #include "src/deoptimizer.h"
 #include "src/full-codegen.h"
 #include "src/runtime.h"
-#include "src/stub-cache.h"
 
 namespace v8 {
 namespace internal {
@@ -66,7 +65,7 @@
     num_extra_args = 1;
     __ Push(x1);
   } else {
-    ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
+    DCHECK(extra_args == NO_EXTRA_ARGUMENTS);
   }
 
   // JumpToExternalReference expects x0 to contain the number of arguments
@@ -294,7 +293,7 @@
   __ CompareRoot(masm->StackPointer(), Heap::kStackLimitRootIndex);
   __ B(hs, &ok);
 
-  CallRuntimePassFunction(masm, Runtime::kHiddenTryInstallOptimizedCode);
+  CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode);
   GenerateTailCallToReturnedCode(masm);
 
   __ Bind(&ok);
@@ -315,7 +314,7 @@
 
   ASM_LOCATION("Builtins::Generate_JSConstructStubHelper");
   // Should never create mementos for api functions.
-  ASSERT(!is_api_function || !create_memento);
+  DCHECK(!is_api_function || !create_memento);
 
   Isolate* isolate = masm->isolate();
 
@@ -380,7 +379,7 @@
         // Push the constructor and map to the stack, and the constructor again
         // as argument to the runtime call.
         __ Push(constructor, init_map, constructor);
-        __ CallRuntime(Runtime::kHiddenFinalizeInstanceSize, 1);
+        __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
         __ Pop(init_map, constructor);
         __ Mov(constructon_count, Operand(JSFunction::kNoSlackTracking));
         __ Bind(&allocate);
@@ -465,11 +464,11 @@
         __ FillFields(first_prop, prop_fields, filler);
         __ Add(first_prop, new_obj, Operand(obj_size, LSL, kPointerSizeLog2));
         __ LoadRoot(x14, Heap::kAllocationMementoMapRootIndex);
-        ASSERT_EQ(0 * kPointerSize, AllocationMemento::kMapOffset);
+        DCHECK_EQ(0 * kPointerSize, AllocationMemento::kMapOffset);
         __ Str(x14, MemOperand(first_prop, kPointerSize, PostIndex));
         // Load the AllocationSite
         __ Peek(x14, 2 * kXRegSize);
-        ASSERT_EQ(1 * kPointerSize, AllocationMemento::kAllocationSiteOffset);
+        DCHECK_EQ(1 * kPointerSize, AllocationMemento::kAllocationSiteOffset);
         __ Str(x14, MemOperand(first_prop, kPointerSize, PostIndex));
         first_prop = NoReg;
       } else {
@@ -542,7 +541,7 @@
       __ Peek(x4, 2 * kXRegSize);
       __ Push(x4);
       __ Push(constructor);  // Argument for Runtime_NewObject.
-      __ CallRuntime(Runtime::kHiddenNewObjectWithAllocationSite, 2);
+      __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 2);
       __ Mov(x4, x0);
       // If we ended up using the runtime, and we want a memento, then the
       // runtime call made it for us, and we shouldn't do create count
@@ -550,7 +549,7 @@
       __ jmp(&count_incremented);
     } else {
       __ Push(constructor);  // Argument for Runtime_NewObject.
-      __ CallRuntime(Runtime::kHiddenNewObject, 1);
+      __ CallRuntime(Runtime::kNewObject, 1);
       __ Mov(x4, x0);
     }
 
@@ -781,8 +780,8 @@
 }
 
 
-void Builtins::Generate_CompileUnoptimized(MacroAssembler* masm) {
-  CallRuntimePassFunction(masm, Runtime::kHiddenCompileUnoptimized);
+void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+  CallRuntimePassFunction(masm, Runtime::kCompileLazy);
   GenerateTailCallToReturnedCode(masm);
 }
 
@@ -792,11 +791,11 @@
   Register function = x1;
 
   // Preserve function. At the same time, push arguments for
-  // kHiddenCompileOptimized.
+  // kCompileOptimized.
   __ LoadObject(x10, masm->isolate()->factory()->ToBoolean(concurrent));
   __ Push(function, function, x10);
 
-  __ CallRuntime(Runtime::kHiddenCompileOptimized, 2);
+  __ CallRuntime(Runtime::kCompileOptimized, 2);
 
   // Restore receiver.
   __ Pop(function);
@@ -906,7 +905,7 @@
     // preserve the registers with parameters.
     __ PushXRegList(kSafepointSavedRegisters);
     // Pass the function and deoptimization type to the runtime system.
-    __ CallRuntime(Runtime::kHiddenNotifyStubFailure, 0, save_doubles);
+    __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
     __ PopXRegList(kSafepointSavedRegisters);
   }
 
@@ -936,7 +935,7 @@
     // Pass the deoptimization type to the runtime system.
     __ Mov(x0, Smi::FromInt(static_cast<int>(type)));
     __ Push(x0);
-    __ CallRuntime(Runtime::kHiddenNotifyDeoptimized, 1);
+    __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
   }
 
   // Get the full codegen state from the stack and untag it.
@@ -1021,7 +1020,7 @@
   __ B(hs, &ok);
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
-    __ CallRuntime(Runtime::kHiddenStackGuard, 0);
+    __ CallRuntime(Runtime::kStackGuard, 0);
   }
   __ Jump(masm->isolate()->builtins()->OnStackReplacement(),
           RelocInfo::CODE_TARGET);
@@ -1065,7 +1064,7 @@
   // 3a. Patch the first argument if necessary when calling a function.
   Label shift_arguments;
   __ Mov(call_type, static_cast<int>(call_type_JS_func));
-  { Label convert_to_object, use_global_receiver, patch_receiver;
+  { Label convert_to_object, use_global_proxy, patch_receiver;
     // Change context eagerly in case we need the global receiver.
     __ Ldr(cp, FieldMemOperand(function, JSFunction::kContextOffset));
 
@@ -1089,8 +1088,8 @@
     __ JumpIfSmi(receiver, &convert_to_object);
 
     __ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex,
-                  &use_global_receiver);
-    __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &use_global_receiver);
+                  &use_global_proxy);
+    __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &use_global_proxy);
 
     STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
     __ JumpIfObjectType(receiver, scratch1, scratch2,
@@ -1118,10 +1117,10 @@
     __ Mov(call_type, static_cast<int>(call_type_JS_func));
     __ B(&patch_receiver);
 
-    __ Bind(&use_global_receiver);
+    __ Bind(&use_global_proxy);
     __ Ldr(receiver, GlobalObjectMemOperand());
     __ Ldr(receiver,
-           FieldMemOperand(receiver, GlobalObject::kGlobalReceiverOffset));
+           FieldMemOperand(receiver, GlobalObject::kGlobalProxyOffset));
 
 
     __ Bind(&patch_receiver);
@@ -1246,7 +1245,7 @@
     // TODO(jbramley): Check that the stack usage here is safe.
     __ Sub(x10, jssp, x10);
     // Check if the arguments will overflow the stack.
-    __ Cmp(x10, Operand(argc, LSR, kSmiShift - kPointerSizeLog2));
+    __ Cmp(x10, Operand::UntagSmiAndScale(argc, kPointerSizeLog2));
     __ B(gt, &enough_stack_space);
     // There is not enough stack space, so use a builtin to throw an appropriate
     // error.
@@ -1278,7 +1277,7 @@
 
     // Compute and push the receiver.
     // Do not transform the receiver for strict mode functions.
-    Label convert_receiver_to_object, use_global_receiver;
+    Label convert_receiver_to_object, use_global_proxy;
     __ Ldr(w10, FieldMemOperand(x2, SharedFunctionInfo::kCompilerHintsOffset));
     __ Tbnz(x10, SharedFunctionInfo::kStrictModeFunction, &push_receiver);
     // Do not transform the receiver for native functions.
@@ -1286,9 +1285,9 @@
 
     // Compute the receiver in sloppy mode.
     __ JumpIfSmi(receiver, &convert_receiver_to_object);
-    __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &use_global_receiver);
+    __ JumpIfRoot(receiver, Heap::kNullValueRootIndex, &use_global_proxy);
     __ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex,
-                  &use_global_receiver);
+                  &use_global_proxy);
 
     // Check if the receiver is already a JavaScript object.
     STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
@@ -1302,9 +1301,9 @@
     __ Mov(receiver, x0);
     __ B(&push_receiver);
 
-    __ Bind(&use_global_receiver);
+    __ Bind(&use_global_proxy);
     __ Ldr(x10, GlobalObjectMemOperand());
-    __ Ldr(receiver, FieldMemOperand(x10, GlobalObject::kGlobalReceiverOffset));
+    __ Ldr(receiver, FieldMemOperand(x10, GlobalObject::kGlobalProxyOffset));
 
     // Push the receiver
     __ Bind(&push_receiver);
diff --git a/src/arm64/code-stubs-arm64.cc b/src/arm64/code-stubs-arm64.cc
index fc8d91b..4978e5e 100644
--- a/src/arm64/code-stubs-arm64.cc
+++ b/src/arm64/code-stubs-arm64.cc
@@ -8,521 +8,114 @@
 
 #include "src/bootstrapper.h"
 #include "src/code-stubs.h"
+#include "src/codegen.h"
+#include "src/ic/handler-compiler.h"
+#include "src/ic/ic.h"
+#include "src/isolate.h"
+#include "src/jsregexp.h"
 #include "src/regexp-macro-assembler.h"
-#include "src/stub-cache.h"
+#include "src/runtime.h"
 
 namespace v8 {
 namespace internal {
 
 
-void FastNewClosureStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  // x2: function info
-  static Register registers[] = { x2 };
-  descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      Runtime::FunctionForId(Runtime::kHiddenNewClosureFromStubFailure)->entry;
-}
-
-
-void FastNewContextStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  // x1: function
-  static Register registers[] = { x1 };
-  descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ = NULL;
-}
-
-
-void ToNumberStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  // x0: value
-  static Register registers[] = { x0 };
-  descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ = NULL;
-}
-
-
-void NumberToStringStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  // x0: value
-  static Register registers[] = { x0 };
-  descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      Runtime::FunctionForId(Runtime::kHiddenNumberToString)->entry;
-}
-
-
-void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  // x3: array literals array
-  // x2: array literal index
-  // x1: constant elements
-  static Register registers[] = { x3, x2, x1 };
-  descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
-  descriptor->register_params_ = registers;
-  static Representation representations[] = {
-    Representation::Tagged(),
-    Representation::Smi(),
-    Representation::Tagged() };
-  descriptor->register_param_representations_ = representations;
-  descriptor->deoptimization_handler_ =
-      Runtime::FunctionForId(
-          Runtime::kHiddenCreateArrayLiteralStubBailout)->entry;
-}
-
-
-void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  // x3: object literals array
-  // x2: object literal index
-  // x1: constant properties
-  // x0: object literal flags
-  static Register registers[] = { x3, x2, x1, x0 };
-  descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      Runtime::FunctionForId(Runtime::kHiddenCreateObjectLiteral)->entry;
-}
-
-
-void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  // x2: feedback vector
-  // x3: call feedback slot
-  static Register registers[] = { x2, x3 };
-  descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ = NULL;
-}
-
-
-void KeyedLoadGenericElementStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { x1, x0 };
-  descriptor->register_param_count_ = 2;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      Runtime::FunctionForId(Runtime::kKeyedGetProperty)->entry;
-}
-
-
-void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  // x1: receiver
-  // x0: key
-  static Register registers[] = { x1, x0 };
-  descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
-}
-
-
-void KeyedLoadDictionaryElementStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  // x1: receiver
-  // x0: key
-  static Register registers[] = { x1, x0 };
-  descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
-}
-
-
-void RegExpConstructResultStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  // x2: length
-  // x1: index (of last match)
-  // x0: string
-  static Register registers[] = { x2, x1, x0 };
-  descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      Runtime::FunctionForId(Runtime::kHiddenRegExpConstructResult)->entry;
-}
-
-
-void LoadFieldStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  // x0: receiver
-  static Register registers[] = { x0 };
-  descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ = NULL;
-}
-
-
-void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  // x1: receiver
-  static Register registers[] = { x1 };
-  descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ = NULL;
-}
-
-
-void StringLengthStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { x0, x2 };
-  descriptor->register_param_count_ = 2;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ = NULL;
-}
-
-
-void KeyedStringLengthStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { x1, x0 };
-  descriptor->register_param_count_ = 2;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ = NULL;
-}
-
-
-void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  // x2: receiver
-  // x1: key
-  // x0: value
-  static Register registers[] = { x2, x1, x0 };
-  descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      FUNCTION_ADDR(KeyedStoreIC_MissFromStubFailure);
-}
-
-
-void TransitionElementsKindStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  // x0: value (js_array)
-  // x1: to_map
-  static Register registers[] = { x0, x1 };
-  descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
-  descriptor->register_params_ = registers;
-  Address entry =
-      Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
-  descriptor->deoptimization_handler_ = FUNCTION_ADDR(entry);
-}
-
-
-void CompareNilICStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  // x0: value to compare
-  static Register registers[] = { x0 };
-  descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      FUNCTION_ADDR(CompareNilIC_Miss);
-  descriptor->SetMissHandler(
-      ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate()));
-}
-
-
 static void InitializeArrayConstructorDescriptor(
-    CodeStubInterfaceDescriptor* descriptor,
+    Isolate* isolate, CodeStubDescriptor* descriptor,
     int constant_stack_parameter_count) {
+  // cp: context
   // x1: function
   // x2: allocation site with elements kind
   // x0: number of arguments to the constructor function
-  static Register registers_variable_args[] = { x1, x2, x0 };
-  static Register registers_no_args[] = { x1, x2 };
+  Address deopt_handler = Runtime::FunctionForId(
+      Runtime::kArrayConstructor)->entry;
 
   if (constant_stack_parameter_count == 0) {
-    descriptor->register_param_count_ =
-        sizeof(registers_no_args) / sizeof(registers_no_args[0]);
-    descriptor->register_params_ = registers_no_args;
+    descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
+                           JS_FUNCTION_STUB_MODE);
   } else {
-    // stack param count needs (constructor pointer, and single argument)
-    descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
-    descriptor->stack_parameter_count_ = x0;
-    descriptor->register_param_count_ =
-        sizeof(registers_variable_args) / sizeof(registers_variable_args[0]);
-    descriptor->register_params_ = registers_variable_args;
-    static Representation representations[] = {
-        Representation::Tagged(),
-        Representation::Tagged(),
-        Representation::Integer32() };
-    descriptor->register_param_representations_ = representations;
+    descriptor->Initialize(x0, deopt_handler, constant_stack_parameter_count,
+                           JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
   }
-
-  descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
-  descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
-  descriptor->deoptimization_handler_ =
-      Runtime::FunctionForId(Runtime::kHiddenArrayConstructor)->entry;
 }
 
 
-void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  InitializeArrayConstructorDescriptor(descriptor, 0);
+void ArrayNoArgumentConstructorStub::InitializeDescriptor(
+    CodeStubDescriptor* descriptor) {
+  InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
 }
 
 
-void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  InitializeArrayConstructorDescriptor(descriptor, 1);
+void ArraySingleArgumentConstructorStub::InitializeDescriptor(
+    CodeStubDescriptor* descriptor) {
+  InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
 }
 
 
-void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  InitializeArrayConstructorDescriptor(descriptor, -1);
+void ArrayNArgumentsConstructorStub::InitializeDescriptor(
+    CodeStubDescriptor* descriptor) {
+  InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
 }
 
 
 static void InitializeInternalArrayConstructorDescriptor(
-    CodeStubInterfaceDescriptor* descriptor,
+    Isolate* isolate, CodeStubDescriptor* descriptor,
     int constant_stack_parameter_count) {
-  // x1: constructor function
-  // x0: number of arguments to the constructor function
-  static Register registers_variable_args[] = { x1, x0 };
-  static Register registers_no_args[] = { x1 };
+  Address deopt_handler = Runtime::FunctionForId(
+      Runtime::kInternalArrayConstructor)->entry;
 
   if (constant_stack_parameter_count == 0) {
-    descriptor->register_param_count_ =
-        sizeof(registers_no_args) / sizeof(registers_no_args[0]);
-    descriptor->register_params_ = registers_no_args;
+    descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
+                           JS_FUNCTION_STUB_MODE);
   } else {
-    // stack param count needs (constructor pointer, and single argument)
-    descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
-    descriptor->stack_parameter_count_ = x0;
-    descriptor->register_param_count_ =
-        sizeof(registers_variable_args) / sizeof(registers_variable_args[0]);
-    descriptor->register_params_ = registers_variable_args;
-    static Representation representations[] = {
-        Representation::Tagged(),
-        Representation::Integer32() };
-    descriptor->register_param_representations_ = representations;
+    descriptor->Initialize(x0, deopt_handler, constant_stack_parameter_count,
+                           JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
   }
-
-  descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
-  descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
-  descriptor->deoptimization_handler_ =
-      Runtime::FunctionForId(Runtime::kHiddenInternalArrayConstructor)->entry;
 }
 
 
-void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(descriptor, 0);
+void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
+    CodeStubDescriptor* descriptor) {
+  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
 }
 
 
-void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(descriptor, 1);
+void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
+    CodeStubDescriptor* descriptor) {
+  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
 }
 
 
-void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(descriptor, -1);
-}
-
-
-void ToBooleanStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  // x0: value
-  static Register registers[] = { x0 };
-  descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ = FUNCTION_ADDR(ToBooleanIC_Miss);
-  descriptor->SetMissHandler(
-      ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate()));
-}
-
-
-void StoreGlobalStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  // x1: receiver
-  // x2: key (unused)
-  // x0: value
-  static Register registers[] = { x1, x2, x0 };
-  descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      FUNCTION_ADDR(StoreIC_MissFromStubFailure);
-}
-
-
-void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  // x0: value
-  // x3: target map
-  // x1: key
-  // x2: receiver
-  static Register registers[] = { x0, x3, x1, x2 };
-  descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss);
-}
-
-
-void BinaryOpICStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  // x1: left operand
-  // x0: right operand
-  static Register registers[] = { x1, x0 };
-  descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
-  descriptor->SetMissHandler(
-      ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate()));
-}
-
-
-void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  // x2: allocation site
-  // x1: left operand
-  // x0: right operand
-  static Register registers[] = { x2, x1, x0 };
-  descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite);
-}
-
-
-void StringAddStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  // x1: left operand
-  // x0: right operand
-  static Register registers[] = { x1, x0 };
-  descriptor->register_param_count_ = sizeof(registers) / sizeof(registers[0]);
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      Runtime::FunctionForId(Runtime::kHiddenStringAdd)->entry;
-}
-
-
-void CallDescriptors::InitializeForIsolate(Isolate* isolate) {
-  static PlatformCallInterfaceDescriptor default_descriptor =
-      PlatformCallInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
-
-  static PlatformCallInterfaceDescriptor noInlineDescriptor =
-      PlatformCallInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
-
-  {
-    CallInterfaceDescriptor* descriptor =
-        isolate->call_descriptor(Isolate::ArgumentAdaptorCall);
-    static Register registers[] = { x1,  // JSFunction
-                                    cp,  // context
-                                    x0,  // actual number of arguments
-                                    x2,  // expected number of arguments
-    };
-    static Representation representations[] = {
-        Representation::Tagged(),     // JSFunction
-        Representation::Tagged(),     // context
-        Representation::Integer32(),  // actual number of arguments
-        Representation::Integer32(),  // expected number of arguments
-    };
-    descriptor->register_param_count_ = 4;
-    descriptor->register_params_ = registers;
-    descriptor->param_representations_ = representations;
-    descriptor->platform_specific_descriptor_ = &default_descriptor;
-  }
-  {
-    CallInterfaceDescriptor* descriptor =
-        isolate->call_descriptor(Isolate::KeyedCall);
-    static Register registers[] = { cp,  // context
-                                    x2,  // key
-    };
-    static Representation representations[] = {
-        Representation::Tagged(),     // context
-        Representation::Tagged(),     // key
-    };
-    descriptor->register_param_count_ = 2;
-    descriptor->register_params_ = registers;
-    descriptor->param_representations_ = representations;
-    descriptor->platform_specific_descriptor_ = &noInlineDescriptor;
-  }
-  {
-    CallInterfaceDescriptor* descriptor =
-        isolate->call_descriptor(Isolate::NamedCall);
-    static Register registers[] = { cp,  // context
-                                    x2,  // name
-    };
-    static Representation representations[] = {
-        Representation::Tagged(),     // context
-        Representation::Tagged(),     // name
-    };
-    descriptor->register_param_count_ = 2;
-    descriptor->register_params_ = registers;
-    descriptor->param_representations_ = representations;
-    descriptor->platform_specific_descriptor_ = &noInlineDescriptor;
-  }
-  {
-    CallInterfaceDescriptor* descriptor =
-        isolate->call_descriptor(Isolate::CallHandler);
-    static Register registers[] = { cp,  // context
-                                    x0,  // receiver
-    };
-    static Representation representations[] = {
-        Representation::Tagged(),  // context
-        Representation::Tagged(),  // receiver
-    };
-    descriptor->register_param_count_ = 2;
-    descriptor->register_params_ = registers;
-    descriptor->param_representations_ = representations;
-    descriptor->platform_specific_descriptor_ = &default_descriptor;
-  }
-  {
-    CallInterfaceDescriptor* descriptor =
-        isolate->call_descriptor(Isolate::ApiFunctionCall);
-    static Register registers[] = { x0,  // callee
-                                    x4,  // call_data
-                                    x2,  // holder
-                                    x1,  // api_function_address
-                                    cp,  // context
-    };
-    static Representation representations[] = {
-        Representation::Tagged(),    // callee
-        Representation::Tagged(),    // call_data
-        Representation::Tagged(),    // holder
-        Representation::External(),  // api_function_address
-        Representation::Tagged(),    // context
-    };
-    descriptor->register_param_count_ = 5;
-    descriptor->register_params_ = registers;
-    descriptor->param_representations_ = representations;
-    descriptor->platform_specific_descriptor_ = &default_descriptor;
-  }
+void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
+    CodeStubDescriptor* descriptor) {
+  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
 }
 
 
 #define __ ACCESS_MASM(masm)
 
 
-void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
+void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
+                                               ExternalReference miss) {
   // Update the static counter each time a new code stub is generated.
   isolate()->counters()->code_stubs()->Increment();
 
-  CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor();
-  int param_count = descriptor->register_param_count_;
+  CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
+  int param_count = descriptor.GetEnvironmentParameterCount();
   {
     // Call the runtime system in a fresh internal frame.
     FrameScope scope(masm, StackFrame::INTERNAL);
-    ASSERT((descriptor->register_param_count_ == 0) ||
-           x0.Is(descriptor->register_params_[param_count - 1]));
+    DCHECK((param_count == 0) ||
+           x0.Is(descriptor.GetEnvironmentParameterRegister(param_count - 1)));
 
     // Push arguments
     MacroAssembler::PushPopQueue queue(masm);
     for (int i = 0; i < param_count; ++i) {
-      queue.Queue(descriptor->register_params_[i]);
+      queue.Queue(descriptor.GetEnvironmentParameterRegister(i));
     }
     queue.PushQueued();
 
-    ExternalReference miss = descriptor->miss_handler();
-    __ CallExternalReference(miss, descriptor->register_param_count_);
+    __ CallExternalReference(miss, param_count);
   }
 
   __ Ret();
@@ -533,10 +126,10 @@
   Label done;
   Register input = source();
   Register result = destination();
-  ASSERT(is_truncating());
+  DCHECK(is_truncating());
 
-  ASSERT(result.Is64Bits());
-  ASSERT(jssp.Is(masm->StackPointer()));
+  DCHECK(result.Is64Bits());
+  DCHECK(jssp.Is(masm->StackPointer()));
 
   int double_offset = offset();
 
@@ -616,7 +209,7 @@
                                           FPRegister double_scratch,
                                           Label* slow,
                                           Condition cond) {
-  ASSERT(!AreAliased(left, right, scratch));
+  DCHECK(!AreAliased(left, right, scratch));
   Label not_identical, return_equal, heap_number;
   Register result = x0;
 
@@ -630,30 +223,30 @@
   if ((cond == lt) || (cond == gt)) {
     __ JumpIfObjectType(right, scratch, scratch, FIRST_SPEC_OBJECT_TYPE, slow,
                         ge);
+  } else if (cond == eq) {
+    __ JumpIfHeapNumber(right, &heap_number);
   } else {
     Register right_type = scratch;
     __ JumpIfObjectType(right, right_type, right_type, HEAP_NUMBER_TYPE,
                         &heap_number);
     // Comparing JS objects with <=, >= is complicated.
-    if (cond != eq) {
-      __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE);
-      __ B(ge, slow);
-      // Normally here we fall through to return_equal, but undefined is
-      // special: (undefined == undefined) == true, but
-      // (undefined <= undefined) == false!  See ECMAScript 11.8.5.
-      if ((cond == le) || (cond == ge)) {
-        __ Cmp(right_type, ODDBALL_TYPE);
-        __ B(ne, &return_equal);
-        __ JumpIfNotRoot(right, Heap::kUndefinedValueRootIndex, &return_equal);
-        if (cond == le) {
-          // undefined <= undefined should fail.
-          __ Mov(result, GREATER);
-        } else  {
-          // undefined >= undefined should fail.
-          __ Mov(result, LESS);
-        }
-        __ Ret();
+    __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE);
+    __ B(ge, slow);
+    // Normally here we fall through to return_equal, but undefined is
+    // special: (undefined == undefined) == true, but
+    // (undefined <= undefined) == false!  See ECMAScript 11.8.5.
+    if ((cond == le) || (cond == ge)) {
+      __ Cmp(right_type, ODDBALL_TYPE);
+      __ B(ne, &return_equal);
+      __ JumpIfNotRoot(right, Heap::kUndefinedValueRootIndex, &return_equal);
+      if (cond == le) {
+        // undefined <= undefined should fail.
+        __ Mov(result, GREATER);
+      } else {
+        // undefined >= undefined should fail.
+        __ Mov(result, LESS);
       }
+      __ Ret();
     }
   }
 
@@ -671,7 +264,7 @@
   // it is handled in the parser (see Parser::ParseBinaryExpression). We are
   // only concerned with cases ge, le and eq here.
   if ((cond != lt) && (cond != gt)) {
-    ASSERT((cond == ge) || (cond == le) || (cond == eq));
+    DCHECK((cond == ge) || (cond == le) || (cond == eq));
     __ Bind(&heap_number);
     // Left and right are identical pointers to a heap number object. Return
     // non-equal if the heap number is a NaN, and equal otherwise. Comparing
@@ -704,7 +297,7 @@
                                            Register left_type,
                                            Register right_type,
                                            Register scratch) {
-  ASSERT(!AreAliased(left, right, left_type, right_type, scratch));
+  DCHECK(!AreAliased(left, right, left_type, right_type, scratch));
 
   if (masm->emit_debug_code()) {
     // We assume that the arguments are not identical.
@@ -722,7 +315,7 @@
   __ B(lt, &right_non_object);
 
   // Return non-zero - x0 already contains a non-zero pointer.
-  ASSERT(left.is(x0) || right.is(x0));
+  DCHECK(left.is(x0) || right.is(x0));
   Label return_not_equal;
   __ Bind(&return_not_equal);
   __ Ret();
@@ -757,12 +350,10 @@
                                     Register right,
                                     FPRegister left_d,
                                     FPRegister right_d,
-                                    Register scratch,
                                     Label* slow,
                                     bool strict) {
-  ASSERT(!AreAliased(left, right, scratch));
-  ASSERT(!AreAliased(left_d, right_d));
-  ASSERT((left.is(x0) && right.is(x1)) ||
+  DCHECK(!AreAliased(left_d, right_d));
+  DCHECK((left.is(x0) && right.is(x1)) ||
          (right.is(x0) && left.is(x1)));
   Register result = x0;
 
@@ -774,8 +365,7 @@
     // If right is not a number and left is a smi, then strict equality cannot
     // succeed. Return non-equal.
     Label is_heap_number;
-    __ JumpIfObjectType(right, scratch, scratch, HEAP_NUMBER_TYPE,
-                        &is_heap_number);
+    __ JumpIfHeapNumber(right, &is_heap_number);
     // Register right is a non-zero pointer, which is a valid NOT_EQUAL result.
     if (!right.is(result)) {
       __ Mov(result, NOT_EQUAL);
@@ -785,7 +375,7 @@
   } else {
     // Smi compared non-strictly with a non-smi, non-heap-number. Call the
     // runtime.
-    __ JumpIfNotObjectType(right, scratch, scratch, HEAP_NUMBER_TYPE, slow);
+    __ JumpIfNotHeapNumber(right, slow);
   }
 
   // Left is the smi. Right is a heap number. Load right value into right_d, and
@@ -800,8 +390,7 @@
     // If left is not a number and right is a smi then strict equality cannot
     // succeed. Return non-equal.
     Label is_heap_number;
-    __ JumpIfObjectType(left, scratch, scratch, HEAP_NUMBER_TYPE,
-                        &is_heap_number);
+    __ JumpIfHeapNumber(left, &is_heap_number);
     // Register left is a non-zero pointer, which is a valid NOT_EQUAL result.
     if (!left.is(result)) {
       __ Mov(result, NOT_EQUAL);
@@ -811,7 +400,7 @@
   } else {
     // Smi compared non-strictly with a non-smi, non-heap-number. Call the
     // runtime.
-    __ JumpIfNotObjectType(left, scratch, scratch, HEAP_NUMBER_TYPE, slow);
+    __ JumpIfNotHeapNumber(left, slow);
   }
 
   // Right is the smi. Left is a heap number. Load left value into left_d, and
@@ -835,7 +424,7 @@
                                                      Register right_type,
                                                      Label* possible_strings,
                                                      Label* not_both_strings) {
-  ASSERT(!AreAliased(left, right, left_map, right_map, left_type, right_type));
+  DCHECK(!AreAliased(left, right, left_map, right_map, left_type, right_type));
   Register result = x0;
 
   Label object_test;
@@ -878,18 +467,15 @@
 }
 
 
-static void ICCompareStub_CheckInputType(MacroAssembler* masm,
-                                         Register input,
-                                         Register scratch,
-                                         CompareIC::State expected,
+static void CompareICStub_CheckInputType(MacroAssembler* masm, Register input,
+                                         CompareICState::State expected,
                                          Label* fail) {
   Label ok;
-  if (expected == CompareIC::SMI) {
+  if (expected == CompareICState::SMI) {
     __ JumpIfNotSmi(input, fail);
-  } else if (expected == CompareIC::NUMBER) {
+  } else if (expected == CompareICState::NUMBER) {
     __ JumpIfSmi(input, &ok);
-    __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
-                DONT_DO_SMI_CHECK);
+    __ JumpIfNotHeapNumber(input, fail);
   }
   // We could be strict about internalized/non-internalized here, but as long as
   // hydrogen doesn't care, the stub doesn't have to care either.
@@ -897,15 +483,15 @@
 }
 
 
-void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
+void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
   Register lhs = x1;
   Register rhs = x0;
   Register result = x0;
   Condition cond = GetCondition();
 
   Label miss;
-  ICCompareStub_CheckInputType(masm, lhs, x2, left_, &miss);
-  ICCompareStub_CheckInputType(masm, rhs, x3, right_, &miss);
+  CompareICStub_CheckInputType(masm, lhs, left(), &miss);
+  CompareICStub_CheckInputType(masm, rhs, right(), &miss);
 
   Label slow;  // Call builtin.
   Label not_smis, both_loaded_as_doubles;
@@ -938,7 +524,7 @@
   // rhs_d, left into lhs_d.
   FPRegister rhs_d = d0;
   FPRegister lhs_d = d1;
-  EmitSmiNonsmiComparison(masm, lhs, rhs, lhs_d, rhs_d, x10, &slow, strict());
+  EmitSmiNonsmiComparison(masm, lhs, rhs, lhs_d, rhs_d, &slow, strict());
 
   __ Bind(&both_loaded_as_doubles);
   // The arguments have been converted to doubles and stored in rhs_d and
@@ -955,7 +541,7 @@
   // Left and/or right is a NaN. Load the result register with whatever makes
   // the comparison fail, since comparisons with NaN always fail (except ne,
   // which is filtered out at a higher level.)
-  ASSERT(cond != ne);
+  DCHECK(cond != ne);
   if ((cond == lt) || (cond == le)) {
     __ Mov(result, GREATER);
   } else {
@@ -1012,20 +598,20 @@
                                              &flat_string_check, &slow);
   }
 
-  // Check for both being sequential ASCII strings, and inline if that is the
-  // case.
+  // Check for both being sequential one-byte strings,
+  // and inline if that is the case.
   __ Bind(&flat_string_check);
-  __ JumpIfBothInstanceTypesAreNotSequentialAscii(lhs_type, rhs_type, x14,
-                                                  x15, &slow);
+  __ JumpIfBothInstanceTypesAreNotSequentialOneByte(lhs_type, rhs_type, x14,
+                                                    x15, &slow);
 
   __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, x10,
                       x11);
   if (cond == eq) {
-    StringCompareStub::GenerateFlatAsciiStringEquals(masm, lhs, rhs,
-                                                     x10, x11, x12);
+    StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, x10, x11,
+                                                  x12);
   } else {
-    StringCompareStub::GenerateCompareFlatAsciiStrings(masm, lhs, rhs,
-                                                       x10, x11, x12, x13);
+    StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, x10, x11,
+                                                    x12, x13);
   }
 
   // Never fall through to here.
@@ -1046,7 +632,7 @@
     if ((cond == lt) || (cond == le)) {
       ncr = GREATER;
     } else {
-      ASSERT((cond == gt) || (cond == ge));  // remaining cases
+      DCHECK((cond == gt) || (cond == ge));  // remaining cases
       ncr = LESS;
     }
     __ Mov(x10, Smi::FromInt(ncr));
@@ -1075,7 +661,7 @@
   saved_fp_regs.Remove(*(masm->FPTmpList()));
 
   __ PushCPURegList(saved_regs);
-  if (save_doubles_ == kSaveFPRegs) {
+  if (save_doubles()) {
     __ PushCPURegList(saved_fp_regs);
   }
 
@@ -1084,7 +670,7 @@
   __ CallCFunction(
       ExternalReference::store_buffer_overflow_function(isolate()), 1, 0);
 
-  if (save_doubles_ == kSaveFPRegs) {
+  if (save_doubles()) {
     __ PopCPURegList(saved_fp_regs);
   }
   __ PopCPURegList(saved_regs);
@@ -1110,11 +696,7 @@
   // Restore lr with the value it had before the call to this stub (the value
   // which must be pushed).
   __ Mov(lr, saved_lr);
-  if (save_doubles_ == kSaveFPRegs) {
-    __ PushSafepointRegistersAndDoubles();
-  } else {
-    __ PushSafepointRegisters();
-  }
+  __ PushSafepointRegisters();
   __ Ret(return_address);
 }
 
@@ -1125,11 +707,7 @@
   Register return_address = temps.AcquireX();
   // Preserve the return address (lr will be clobbered by the pop).
   __ Mov(return_address, lr);
-  if (save_doubles_ == kSaveFPRegs) {
-    __ PopSafepointRegistersAndDoubles();
-  } else {
-    __ PopSafepointRegisters();
-  }
+  __ PopSafepointRegisters();
   __ Ret(return_address);
 }
 
@@ -1143,8 +721,10 @@
 
   Register result_tagged = x0;
   Register base_tagged = x10;
-  Register exponent_tagged = x11;
-  Register exponent_integer = x12;
+  Register exponent_tagged = MathPowTaggedDescriptor::exponent();
+  DCHECK(exponent_tagged.is(x11));
+  Register exponent_integer = MathPowIntegerDescriptor::exponent();
+  DCHECK(exponent_integer.is(x12));
   Register scratch1 = x14;
   Register scratch0 = x15;
   Register saved_lr = x19;
@@ -1163,7 +743,7 @@
   Label done;
 
   // Unpack the inputs.
-  if (exponent_type_ == ON_STACK) {
+  if (exponent_type() == ON_STACK) {
     Label base_is_smi;
     Label unpack_exponent;
 
@@ -1187,20 +767,20 @@
     // exponent_tagged is a heap number, so load its double value.
     __ Ldr(exponent_double,
            FieldMemOperand(exponent_tagged, HeapNumber::kValueOffset));
-  } else if (exponent_type_ == TAGGED) {
+  } else if (exponent_type() == TAGGED) {
     __ JumpIfSmi(exponent_tagged, &exponent_is_smi);
     __ Ldr(exponent_double,
            FieldMemOperand(exponent_tagged, HeapNumber::kValueOffset));
   }
 
   // Handle double (heap number) exponents.
-  if (exponent_type_ != INTEGER) {
+  if (exponent_type() != INTEGER) {
     // Detect integer exponents stored as doubles and handle those in the
     // integer fast-path.
     __ TryRepresentDoubleAsInt64(exponent_integer, exponent_double,
                                  scratch0_double, &exponent_is_integer);
 
-    if (exponent_type_ == ON_STACK) {
+    if (exponent_type() == ON_STACK) {
       FPRegister  half_double = d3;
       FPRegister  minus_half_double = d4;
       // Detect square root case. Crankshaft detects constant +/-0.5 at compile
@@ -1351,18 +931,18 @@
   __ Fcmp(result_double, 0.0);
   __ B(&done, ne);
 
-  if (exponent_type_ == ON_STACK) {
+  if (exponent_type() == ON_STACK) {
     // Bail out to runtime code.
     __ Bind(&call_runtime);
     // Put the arguments back on the stack.
     __ Push(base_tagged, exponent_tagged);
-    __ TailCallRuntime(Runtime::kHiddenMathPow, 2, 1);
+    __ TailCallRuntime(Runtime::kMathPowRT, 2, 1);
 
     // Return.
     __ Bind(&done);
     __ AllocateHeapNumber(result_tagged, &call_runtime, scratch0, scratch1,
                           result_double);
-    ASSERT(result_tagged.is(x0));
+    DCHECK(result_tagged.is(x0));
     __ IncrementCounter(
         isolate()->counters()->math_pow(), 1, scratch0, scratch1);
     __ Ret();
@@ -1401,18 +981,14 @@
 
 
 void StoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
-  StoreRegistersStateStub stub1(isolate, kDontSaveFPRegs);
-  stub1.GetCode();
-  StoreRegistersStateStub stub2(isolate, kSaveFPRegs);
-  stub2.GetCode();
+  StoreRegistersStateStub stub(isolate);
+  stub.GetCode();
 }
 
 
 void RestoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
-  RestoreRegistersStateStub stub1(isolate, kDontSaveFPRegs);
-  stub1.GetCode();
-  RestoreRegistersStateStub stub2(isolate, kSaveFPRegs);
-  stub2.GetCode();
+  RestoreRegistersStateStub stub(isolate);
+  stub.GetCode();
 }
 
 
@@ -1470,7 +1046,7 @@
   //
   // The arguments are in reverse order, so that arg[argc-2] is actually the
   // first argument to the target function and arg[0] is the last.
-  ASSERT(jssp.Is(__ StackPointer()));
+  DCHECK(jssp.Is(__ StackPointer()));
   const Register& argc_input = x0;
   const Register& target_input = x1;
 
@@ -1496,8 +1072,8 @@
   // Enter the exit frame. Reserve three slots to preserve x21-x23 callee-saved
   // registers.
   FrameScope scope(masm, StackFrame::MANUAL);
-  __ EnterExitFrame(save_doubles_, x10, 3);
-  ASSERT(csp.Is(__ StackPointer()));
+  __ EnterExitFrame(save_doubles(), x10, 3);
+  DCHECK(csp.Is(__ StackPointer()));
 
   // Poke callee-saved registers into reserved space.
   __ Poke(argv, 1 * kPointerSize);
@@ -1547,7 +1123,7 @@
   // untouched, and the stub either throws an exception by jumping to one of
   // the exception_returned label.
 
-  ASSERT(csp.Is(__ StackPointer()));
+  DCHECK(csp.Is(__ StackPointer()));
 
   // Prepare AAPCS64 arguments to pass to the builtin.
   __ Mov(x0, argc);
@@ -1593,8 +1169,8 @@
   __ Peek(argc, 2 * kPointerSize);
   __ Peek(target, 3 * kPointerSize);
 
-  __ LeaveExitFrame(save_doubles_, x10, true);
-  ASSERT(jssp.Is(__ StackPointer()));
+  __ LeaveExitFrame(save_doubles(), x10, true);
+  DCHECK(jssp.Is(__ StackPointer()));
   // Pop or drop the remaining stack slots and return from the stub.
   //         jssp[24]:    Arguments array (of size argc), including receiver.
   //         jssp[16]:    Preserved x23 (used for target).
@@ -1665,8 +1241,8 @@
 //   x4: argv.
 // Output:
 //   x0: result.
-void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
-  ASSERT(jssp.Is(__ StackPointer()));
+void JSEntryStub::Generate(MacroAssembler* masm) {
+  DCHECK(jssp.Is(__ StackPointer()));
   Register code_entry = x0;
 
   // Enable instruction instrumentation. This only works on the simulator, and
@@ -1696,7 +1272,7 @@
   __ Fmov(fp_zero, 0.0);
 
   // Build an entry frame (see layout below).
-  int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
+  int marker = type();
   int64_t bad_frame_pointer = -1L;  // Bad frame pointer to fail if it is used.
   __ Mov(x13, bad_frame_pointer);
   __ Mov(x12, Smi::FromInt(marker));
@@ -1720,7 +1296,7 @@
   __ B(&done);
   __ Bind(&non_outermost_js);
   // We spare one instruction by pushing xzr since the marker is 0.
-  ASSERT(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME) == NULL);
+  DCHECK(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME) == NULL);
   __ Push(xzr);
   __ Bind(&done);
 
@@ -1781,8 +1357,9 @@
   // x2: receiver.
   // x3: argc.
   // x4: argv.
-  ExternalReference entry(is_construct ? Builtins::kJSConstructEntryTrampoline
-                                       : Builtins::kJSEntryTrampoline,
+  ExternalReference entry(type() == StackFrame::ENTRY_CONSTRUCT
+                              ? Builtins::kJSConstructEntryTrampoline
+                              : Builtins::kJSEntryTrampoline,
                           isolate());
   __ Mov(x10, entry);
 
@@ -1822,7 +1399,7 @@
   // Reset the stack to the callee saved registers.
   __ Drop(-EntryFrameConstants::kCallerFPOffset, kByteSizeInBytes);
   // Restore the callee-saved registers and return.
-  ASSERT(jssp.Is(__ StackPointer()));
+  DCHECK(jssp.Is(__ StackPointer()));
   __ Mov(csp, jssp);
   __ SetStackPointer(csp);
   __ PopCalleeSavedRegisters();
@@ -1834,33 +1411,14 @@
 
 void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
   Label miss;
-  Register receiver;
-  if (kind() == Code::KEYED_LOAD_IC) {
-    // ----------- S t a t e -------------
-    //  -- lr    : return address
-    //  -- x1    : receiver
-    //  -- x0    : key
-    // -----------------------------------
-    Register key = x0;
-    receiver = x1;
-    __ Cmp(key, Operand(isolate()->factory()->prototype_string()));
-    __ B(ne, &miss);
-  } else {
-    ASSERT(kind() == Code::LOAD_IC);
-    // ----------- S t a t e -------------
-    //  -- lr    : return address
-    //  -- x2    : name
-    //  -- x0    : receiver
-    //  -- sp[0] : receiver
-    // -----------------------------------
-    receiver = x0;
-  }
+  Register receiver = LoadDescriptor::ReceiverRegister();
 
-  StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, x10, x11, &miss);
+  NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, x10,
+                                                          x11, &miss);
 
   __ Bind(&miss);
-  StubCompiler::TailCallBuiltin(masm,
-                                BaseLoadStoreStubCompiler::MissBuiltin(kind()));
+  PropertyAccessCompiler::TailCallBuiltin(
+      masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
 }
 
 
@@ -1908,7 +1466,7 @@
 
   // If there is a call site cache, don't look in the global cache, but do the
   // real lookup and update the call site cache.
-  if (!HasCallSiteInlineCheck()) {
+  if (!HasCallSiteInlineCheck() && !ReturnTrueFalseObject()) {
     Label miss;
     __ JumpIfNotRoot(function, Heap::kInstanceofCacheFunctionRootIndex, &miss);
     __ JumpIfNotRoot(map, Heap::kInstanceofCacheMapRootIndex, &miss);
@@ -1940,6 +1498,7 @@
   }
 
   Label return_true, return_result;
+  Register smi_value = scratch1;
   {
     // Loop through the prototype chain looking for the function prototype.
     Register chain_map = x1;
@@ -1950,6 +1509,10 @@
     __ LoadRoot(null_value, Heap::kNullValueRootIndex);
     // Speculatively set a result.
     __ Mov(result, res_false);
+    if (!HasCallSiteInlineCheck() && ReturnTrueFalseObject()) {
+      // Value to store in the cache cannot be an object.
+      __ Mov(smi_value, Smi::FromInt(1));
+    }
 
     __ Bind(&loop);
 
@@ -1972,14 +1535,19 @@
   // We cannot fall through to here.
   __ Bind(&return_true);
   __ Mov(result, res_true);
+  if (!HasCallSiteInlineCheck() && ReturnTrueFalseObject()) {
+    // Value to store in the cache cannot be an object.
+    __ Mov(smi_value, Smi::FromInt(0));
+  }
   __ Bind(&return_result);
   if (HasCallSiteInlineCheck()) {
-    ASSERT(ReturnTrueFalseObject());
+    DCHECK(ReturnTrueFalseObject());
     __ Add(map_check_site, map_check_site, kDeltaToLoadBoolResult);
     __ GetRelocatedValueLocation(map_check_site, scratch2);
     __ Str(result, MemOperand(scratch2));
   } else {
-    __ StoreRoot(result, Heap::kInstanceofCacheAnswerRootIndex);
+    Register cached_value = ReturnTrueFalseObject() ? smi_value : result;
+    __ StoreRoot(cached_value, Heap::kInstanceofCacheAnswerRootIndex);
   }
   __ Ret();
 
@@ -2034,21 +1602,11 @@
 }
 
 
-Register InstanceofStub::left() {
-  // Object to check (instanceof lhs).
-  return x11;
-}
-
-
-Register InstanceofStub::right() {
-  // Constructor function (instanceof rhs).
-  return x10;
-}
-
-
 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
-  Register arg_count = x0;
-  Register key = x1;
+  Register arg_count = ArgumentsAccessReadDescriptor::parameter_count();
+  Register key = ArgumentsAccessReadDescriptor::index();
+  DCHECK(arg_count.is(x0));
+  DCHECK(key.is(x1));
 
   // The displacement is the offset of the last parameter (if any) relative
   // to the frame pointer.
@@ -2106,9 +1664,8 @@
   Register caller_fp = x10;
   __ Ldr(caller_fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
   // Load and untag the context.
-  STATIC_ASSERT((kSmiShift / kBitsPerByte) == 4);
-  __ Ldr(w11, MemOperand(caller_fp, StandardFrameConstants::kContextOffset +
-                         (kSmiShift / kBitsPerByte)));
+  __ Ldr(w11, UntagSmiMemOperand(caller_fp,
+                                 StandardFrameConstants::kContextOffset));
   __ Cmp(w11, StackFrame::ARGUMENTS_ADAPTOR);
   __ B(ne, &runtime);
 
@@ -2121,7 +1678,7 @@
   __ Poke(x10, 1 * kXRegSize);
 
   __ Bind(&runtime);
-  __ TailCallRuntime(Runtime::kHiddenNewSloppyArguments, 3, 1);
+  __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
 }
 
 
@@ -2221,41 +1778,42 @@
 
   // Get the arguments boilerplate from the current (global) context.
 
-  //   x0   alloc_obj     pointer to allocated objects (param map, backing
-  //                      store, arguments)
-  //   x1   mapped_params number of mapped parameters, min(params, args)
-  //   x2   arg_count     number of function arguments
-  //   x3   arg_count_smi number of function arguments (smi)
-  //   x4   function      function pointer
-  //   x7   param_count   number of function parameters
-  //   x11  args_offset   offset to args (or aliased args) boilerplate (uninit)
-  //   x14  recv_arg      pointer to receiver arguments
+  //   x0   alloc_obj       pointer to allocated objects (param map, backing
+  //                        store, arguments)
+  //   x1   mapped_params   number of mapped parameters, min(params, args)
+  //   x2   arg_count       number of function arguments
+  //   x3   arg_count_smi   number of function arguments (smi)
+  //   x4   function        function pointer
+  //   x7   param_count     number of function parameters
+  //   x11  sloppy_args_map offset to args (or aliased args) map (uninit)
+  //   x14  recv_arg        pointer to receiver arguments
 
   Register global_object = x10;
   Register global_ctx = x10;
-  Register args_offset = x11;
-  Register aliased_args_offset = x10;
+  Register sloppy_args_map = x11;
+  Register aliased_args_map = x10;
   __ Ldr(global_object, GlobalObjectMemOperand());
   __ Ldr(global_ctx, FieldMemOperand(global_object,
                                      GlobalObject::kNativeContextOffset));
 
-  __ Ldr(args_offset,
-         ContextMemOperand(global_ctx,
-                           Context::SLOPPY_ARGUMENTS_BOILERPLATE_INDEX));
-  __ Ldr(aliased_args_offset,
-         ContextMemOperand(global_ctx,
-                           Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX));
+  __ Ldr(sloppy_args_map,
+         ContextMemOperand(global_ctx, Context::SLOPPY_ARGUMENTS_MAP_INDEX));
+  __ Ldr(aliased_args_map,
+         ContextMemOperand(global_ctx, Context::ALIASED_ARGUMENTS_MAP_INDEX));
   __ Cmp(mapped_params, 0);
-  __ CmovX(args_offset, aliased_args_offset, ne);
+  __ CmovX(sloppy_args_map, aliased_args_map, ne);
 
   // Copy the JS object part.
-  __ CopyFields(alloc_obj, args_offset, CPURegList(x10, x12, x13),
-                JSObject::kHeaderSize / kPointerSize);
+  __ Str(sloppy_args_map, FieldMemOperand(alloc_obj, JSObject::kMapOffset));
+  __ LoadRoot(x10, Heap::kEmptyFixedArrayRootIndex);
+  __ Str(x10, FieldMemOperand(alloc_obj, JSObject::kPropertiesOffset));
+  __ Str(x10, FieldMemOperand(alloc_obj, JSObject::kElementsOffset));
 
   // Set up the callee in-object property.
   STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
   const int kCalleeOffset = JSObject::kHeaderSize +
                             Heap::kArgumentsCalleeIndex * kPointerSize;
+  __ AssertNotSmi(function);
   __ Str(function, FieldMemOperand(alloc_obj, kCalleeOffset));
 
   // Use the length and set that as an in-object property.
@@ -2393,7 +1951,30 @@
   // Do the runtime call to allocate the arguments object.
   __ Bind(&runtime);
   __ Push(function, recv_arg, arg_count_smi);
-  __ TailCallRuntime(Runtime::kHiddenNewSloppyArguments, 3, 1);
+  __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
+}
+
+
+void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
+  // Return address is in lr.
+  Label slow;
+
+  Register receiver = LoadDescriptor::ReceiverRegister();
+  Register key = LoadDescriptor::NameRegister();
+
+  // Check that the key is an array index, that is Uint32.
+  __ TestAndBranchIfAnySet(key, kSmiTagMask | kSmiSignMask, &slow);
+
+  // Everything is fine, call runtime.
+  __ Push(receiver, key);
+  __ TailCallExternalReference(
+      ExternalReference(IC_Utility(IC::kLoadElementWithInterceptor),
+                        masm->isolate()),
+      2, 1);
+
+  __ Bind(&slow);
+  PropertyAccessCompiler::TailCallBuiltin(
+      masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
 }
 
 
@@ -2456,25 +2037,24 @@
   // Get the arguments boilerplate from the current (native) context.
   Register global_object = x10;
   Register global_ctx = x10;
-  Register args_offset = x4;
+  Register strict_args_map = x4;
   __ Ldr(global_object, GlobalObjectMemOperand());
   __ Ldr(global_ctx, FieldMemOperand(global_object,
                                      GlobalObject::kNativeContextOffset));
-  __ Ldr(args_offset,
-         ContextMemOperand(global_ctx,
-                           Context::STRICT_ARGUMENTS_BOILERPLATE_INDEX));
+  __ Ldr(strict_args_map,
+         ContextMemOperand(global_ctx, Context::STRICT_ARGUMENTS_MAP_INDEX));
 
   //   x0   alloc_obj         pointer to allocated objects: parameter array and
   //                          arguments object
   //   x1   param_count_smi   number of parameters passed to function (smi)
   //   x2   params            pointer to parameters
   //   x3   function          function pointer
-  //   x4   args_offset       offset to arguments boilerplate
+  //   x4   strict_args_map   offset to arguments map
   //   x13  param_count       number of parameters passed to function
-
-  // Copy the JS object part.
-  __ CopyFields(alloc_obj, args_offset, CPURegList(x5, x6, x7),
-                JSObject::kHeaderSize / kPointerSize);
+  __ Str(strict_args_map, FieldMemOperand(alloc_obj, JSObject::kMapOffset));
+  __ LoadRoot(x5, Heap::kEmptyFixedArrayRootIndex);
+  __ Str(x5, FieldMemOperand(alloc_obj, JSObject::kPropertiesOffset));
+  __ Str(x5, FieldMemOperand(alloc_obj, JSObject::kElementsOffset));
 
   // Set the smi-tagged length as an in-object property.
   STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
@@ -2526,13 +2106,13 @@
   // Do the runtime call to allocate the arguments object.
   __ Bind(&runtime);
   __ Push(function, params, param_count_smi);
-  __ TailCallRuntime(Runtime::kHiddenNewStrictArguments, 3, 1);
+  __ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
 }
 
 
 void RegExpExecStub::Generate(MacroAssembler* masm) {
 #ifdef V8_INTERPRETED_REGEXP
-  __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
+  __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
 #else  // V8_INTERPRETED_REGEXP
 
   // Stack frame on entry.
@@ -2549,7 +2129,7 @@
   //   w0       string_type                     type of subject string
   //   x2       jsstring_length                 subject string length
   //   x3       jsregexp_object                 JSRegExp object
-  //   w4       string_encoding                 ASCII or UC16
+  //   w4       string_encoding                 Latin1 or UC16
   //   w5       sliced_string_offset            if the string is a SlicedString
   //                                            offset to the underlying string
   //   w6       string_representation           groups attributes of the string:
@@ -2611,7 +2191,7 @@
   __ Cbz(x10, &runtime);
 
   // Check that the first argument is a JSRegExp object.
-  ASSERT(jssp.Is(__ StackPointer()));
+  DCHECK(jssp.Is(__ StackPointer()));
   __ Peek(jsregexp_object, kJSRegExpOffset);
   __ JumpIfSmi(jsregexp_object, &runtime);
   __ JumpIfNotObjectType(jsregexp_object, x10, x10, JS_REGEXP_TYPE, &runtime);
@@ -2648,7 +2228,7 @@
   // Initialize offset for possibly sliced string.
   __ Mov(sliced_string_offset, 0);
 
-  ASSERT(jssp.Is(__ StackPointer()));
+  DCHECK(jssp.Is(__ StackPointer()));
   __ Peek(subject, kSubjectOffset);
   __ JumpIfSmi(subject, &runtime);
 
@@ -2731,7 +2311,7 @@
 
   // Check that the third argument is a positive smi less than the subject
   // string length. A negative value will be greater (unsigned comparison).
-  ASSERT(jssp.Is(__ StackPointer()));
+  DCHECK(jssp.Is(__ StackPointer()));
   __ Peek(x10, kPreviousIndexOffset);
   __ JumpIfNotSmi(x10, &runtime);
   __ Cmp(jsstring_length, x10);
@@ -2747,17 +2327,17 @@
   STATIC_ASSERT(kStringEncodingMask == 0x04);
 
   // Find the code object based on the assumptions above.
-  // kDataAsciiCodeOffset and kDataUC16CodeOffset are adjacent, adds an offset
+  // kDataOneByteCodeOffset and kDataUC16CodeOffset are adjacent, adds an offset
   // of kPointerSize to reach the latter.
-  ASSERT_EQ(JSRegExp::kDataAsciiCodeOffset + kPointerSize,
+  DCHECK_EQ(JSRegExp::kDataOneByteCodeOffset + kPointerSize,
             JSRegExp::kDataUC16CodeOffset);
   __ Mov(x10, kPointerSize);
-  // We will need the encoding later: ASCII = 0x04
-  //                                  UC16  = 0x00
+  // We will need the encoding later: Latin1 = 0x04
+  //                                  UC16   = 0x00
   __ Ands(string_encoding, string_type, kStringEncodingMask);
   __ CzeroX(x10, ne);
   __ Add(x10, regexp_data, x10);
-  __ Ldr(code_object, FieldMemOperand(x10, JSRegExp::kDataAsciiCodeOffset));
+  __ Ldr(code_object, FieldMemOperand(x10, JSRegExp::kDataOneByteCodeOffset));
 
   // (E) Carry on.  String handling is done.
 
@@ -2773,7 +2353,7 @@
 
   // Isolates: note we add an additional parameter here (isolate pointer).
   __ EnterExitFrame(false, x10, 1);
-  ASSERT(csp.Is(__ StackPointer()));
+  DCHECK(csp.Is(__ StackPointer()));
 
   // We have 9 arguments to pass to the regexp code, therefore we have to pass
   // one on the stack and the rest as registers.
@@ -2800,13 +2380,13 @@
   __ Ldr(length, UntagSmiFieldMemOperand(subject, String::kLengthOffset));
 
   // Handle UC16 encoding, two bytes make one character.
-  //   string_encoding: if ASCII: 0x04
-  //                    if UC16:  0x00
+  //   string_encoding: if Latin1: 0x04
+  //                    if UC16:   0x00
   STATIC_ASSERT(kStringEncodingMask == 0x04);
   __ Ubfx(string_encoding, string_encoding, 2, 1);
   __ Eor(string_encoding, string_encoding, 1);
-  //   string_encoding: if ASCII: 0
-  //                    if UC16:  1
+  //   string_encoding: if Latin1: 0
+  //                    if UC16:   1
 
   // Convert string positions from characters to bytes.
   // Previous index is in x1.
@@ -2877,7 +2457,7 @@
   __ Add(number_of_capture_registers, x10, 2);
 
   // Check that the fourth object is a JSArray object.
-  ASSERT(jssp.Is(__ StackPointer()));
+  DCHECK(jssp.Is(__ StackPointer()));
   __ Peek(x10, kLastMatchInfoOffset);
   __ JumpIfSmi(x10, &runtime);
   __ JumpIfNotObjectType(x10, x11, x11, JS_ARRAY_TYPE, &runtime);
@@ -2956,8 +2536,8 @@
   // Store the smi values in the last match info.
   __ SmiTag(x10, current_offset);
   // Clearing the 32 bottom bits gives us a Smi.
-  STATIC_ASSERT(kSmiShift == 32);
-  __ And(x11, current_offset, ~kWRegMask);
+  STATIC_ASSERT(kSmiTag == 0);
+  __ Bic(x11, current_offset, kSmiShiftMask);
   __ Stp(x10,
          x11,
          MemOperand(last_match_offsets, kXRegSize * 2, PostIndex));
@@ -3006,7 +2586,7 @@
 
   __ Bind(&runtime);
   __ PopCPURegList(used_callee_saved_registers);
-  __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
+  __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
 
   // Deferred code for string handling.
   // (6) Not a long external string?  If yes, go to (8).
@@ -3059,7 +2639,7 @@
                                      Register scratch1,
                                      Register scratch2) {
   ASM_LOCATION("GenerateRecordCallTarget");
-  ASSERT(!AreAliased(scratch1, scratch2,
+  DCHECK(!AreAliased(scratch1, scratch2,
                      argc, function, feedback_vector, index));
   // Cache the called function in a feedback vector slot. Cache states are
   // uninitialized, monomorphic (indicated by a JSFunction), and megamorphic.
@@ -3069,9 +2649,9 @@
   //  index :           slot in feedback vector (smi)
   Label initialize, done, miss, megamorphic, not_array_function;
 
-  ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()),
+  DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
             masm->isolate()->heap()->megamorphic_symbol());
-  ASSERT_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()),
+  DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()),
             masm->isolate()->heap()->uninitialized_symbol());
 
   // Load the cache state.
@@ -3136,7 +2716,7 @@
 
       // CreateAllocationSiteStub expect the feedback vector in x2 and the slot
       // index in x3.
-      ASSERT(feedback_vector.Is(x2) && index.Is(x3));
+      DCHECK(feedback_vector.Is(x2) && index.Is(x3));
       __ CallStub(&create_stub);
 
       __ Pop(index, feedback_vector, function, argc);
@@ -3276,7 +2856,7 @@
 
 void CallFunctionStub::Generate(MacroAssembler* masm) {
   ASM_LOCATION("CallFunctionStub::Generate");
-  CallFunctionNoFeedback(masm, argc_, NeedsChecks(), CallAsMethod());
+  CallFunctionNoFeedback(masm, argc(), NeedsChecks(), CallAsMethod());
 }
 
 
@@ -3391,7 +2971,7 @@
   __ TailCallStub(&stub);
 
   __ bind(&miss);
-  GenerateMiss(masm, IC::kCallIC_Customization_Miss);
+  GenerateMiss(masm);
 
   // The slow case, we need this no matter what to complete a call after a miss.
   CallFunctionNoFeedback(masm,
@@ -3411,7 +2991,7 @@
   Label extra_checks_or_miss, slow_start;
   Label slow, non_function, wrap, cont;
   Label have_js_function;
-  int argc = state_.arg_count();
+  int argc = arg_count();
   ParameterCount actual(argc);
 
   Register function = x1;
@@ -3430,7 +3010,7 @@
   __ B(ne, &extra_checks_or_miss);
 
   __ bind(&have_js_function);
-  if (state_.CallAsMethod()) {
+  if (CallAsMethod()) {
     EmitContinueIfStrictOrNative(masm, &cont);
 
     // Compute the receiver in sloppy mode.
@@ -3450,7 +3030,7 @@
   __ bind(&slow);
   EmitSlowCase(masm, argc, function, type, &non_function);
 
-  if (state_.CallAsMethod()) {
+  if (CallAsMethod()) {
     __ bind(&wrap);
     EmitWrapCase(masm, argc, &cont);
   }
@@ -3475,7 +3055,7 @@
 
   // We are here because tracing is on or we are going monomorphic.
   __ bind(&miss);
-  GenerateMiss(masm, IC::kCallIC_Miss);
+  GenerateMiss(masm);
 
   // the slow case
   __ bind(&slow_start);
@@ -3489,11 +3069,11 @@
 }
 
 
-void CallICStub::GenerateMiss(MacroAssembler* masm, IC::UtilityId id) {
+void CallICStub::GenerateMiss(MacroAssembler* masm) {
   ASM_LOCATION("CallICStub[Miss]");
 
   // Get the receiver of the function from the stack; 1 ~ return address.
-  __ Peek(x4, (state_.arg_count() + 1) * kPointerSize);
+  __ Peek(x4, (arg_count() + 1) * kPointerSize);
 
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
@@ -3502,6 +3082,9 @@
     __ Push(x4, x1, x2, x3);
 
     // Call the entry.
+    IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
+                                               : IC::kCallIC_Customization_Miss;
+
     ExternalReference miss = ExternalReference(IC_Utility(id),
                                                masm->isolate());
     __ CallExternalReference(miss, 4);
@@ -3551,20 +3134,16 @@
 
   __ Bind(&index_not_smi_);
   // If index is a heap number, try converting it to an integer.
-  __ CheckMap(index_,
-              result_,
-              Heap::kHeapNumberMapRootIndex,
-              index_not_number_,
-              DONT_DO_SMI_CHECK);
+  __ JumpIfNotHeapNumber(index_, index_not_number_);
   call_helper.BeforeCall(masm);
   // Save object_ on the stack and pass index_ as argument for runtime call.
   __ Push(object_, index_);
   if (index_flags_ == STRING_INDEX_IS_NUMBER) {
     __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
   } else {
-    ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
+    DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
     // NumberToSmi discards numbers that are not exact integers.
-    __ CallRuntime(Runtime::kHiddenNumberToSmi, 1);
+    __ CallRuntime(Runtime::kNumberToSmi, 1);
   }
   // Save the conversion result before the pop instructions below
   // have a chance to overwrite it.
@@ -3587,7 +3166,7 @@
   call_helper.BeforeCall(masm);
   __ SmiTag(index_);
   __ Push(object_, index_);
-  __ CallRuntime(Runtime::kHiddenStringCharCodeAt, 2);
+  __ CallRuntime(Runtime::kStringCharCodeAtRT, 2);
   __ Mov(result_, x0);
   call_helper.AfterCall(masm);
   __ B(&exit_);
@@ -3602,9 +3181,8 @@
   __ B(hi, &slow_case_);
 
   __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
-  // At this point code register contains smi tagged ASCII char code.
-  STATIC_ASSERT(kSmiShift > kPointerSizeLog2);
-  __ Add(result_, result_, Operand(code_, LSR, kSmiShift - kPointerSizeLog2));
+  // At this point code register contains smi tagged one-byte char code.
+  __ Add(result_, result_, Operand::UntagSmiAndScale(code_, kPointerSizeLog2));
   __ Ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
   __ JumpIfRoot(result_, Heap::kUndefinedValueRootIndex, &slow_case_);
   __ Bind(&exit_);
@@ -3628,10 +3206,10 @@
 }
 
 
-void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
+void CompareICStub::GenerateSmis(MacroAssembler* masm) {
   // Inputs are in x0 (lhs) and x1 (rhs).
-  ASSERT(state_ == CompareIC::SMI);
-  ASM_LOCATION("ICCompareStub[Smis]");
+  DCHECK(state() == CompareICState::SMI);
+  ASM_LOCATION("CompareICStub[Smis]");
   Label miss;
   // Bail out (to 'miss') unless both x0 and x1 are smis.
   __ JumpIfEitherNotSmi(x0, x1, &miss);
@@ -3651,9 +3229,9 @@
 }
 
 
-void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
-  ASSERT(state_ == CompareIC::NUMBER);
-  ASM_LOCATION("ICCompareStub[HeapNumbers]");
+void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
+  DCHECK(state() == CompareICState::NUMBER);
+  ASM_LOCATION("CompareICStub[HeapNumbers]");
 
   Label unordered, maybe_undefined1, maybe_undefined2;
   Label miss, handle_lhs, values_in_d_regs;
@@ -3665,10 +3243,10 @@
   FPRegister rhs_d = d0;
   FPRegister lhs_d = d1;
 
-  if (left_ == CompareIC::SMI) {
+  if (left() == CompareICState::SMI) {
     __ JumpIfNotSmi(lhs, &miss);
   }
-  if (right_ == CompareIC::SMI) {
+  if (right() == CompareICState::SMI) {
     __ JumpIfNotSmi(rhs, &miss);
   }
 
@@ -3677,15 +3255,13 @@
 
   // Load rhs if it's a heap number.
   __ JumpIfSmi(rhs, &handle_lhs);
-  __ CheckMap(rhs, x10, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
-              DONT_DO_SMI_CHECK);
+  __ JumpIfNotHeapNumber(rhs, &maybe_undefined1);
   __ Ldr(rhs_d, FieldMemOperand(rhs, HeapNumber::kValueOffset));
 
   // Load lhs if it's a heap number.
   __ Bind(&handle_lhs);
   __ JumpIfSmi(lhs, &values_in_d_regs);
-  __ CheckMap(lhs, x10, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
-              DONT_DO_SMI_CHECK);
+  __ JumpIfNotHeapNumber(lhs, &maybe_undefined2);
   __ Ldr(lhs_d, FieldMemOperand(lhs, HeapNumber::kValueOffset));
 
   __ Bind(&values_in_d_regs);
@@ -3697,20 +3273,20 @@
   __ Ret();
 
   __ Bind(&unordered);
-  ICCompareStub stub(isolate(), op_, CompareIC::GENERIC, CompareIC::GENERIC,
-                     CompareIC::GENERIC);
+  CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
+                     CompareICState::GENERIC, CompareICState::GENERIC);
   __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
 
   __ Bind(&maybe_undefined1);
-  if (Token::IsOrderedRelationalCompareOp(op_)) {
+  if (Token::IsOrderedRelationalCompareOp(op())) {
     __ JumpIfNotRoot(rhs, Heap::kUndefinedValueRootIndex, &miss);
     __ JumpIfSmi(lhs, &unordered);
-    __ JumpIfNotObjectType(lhs, x10, x10, HEAP_NUMBER_TYPE, &maybe_undefined2);
+    __ JumpIfNotHeapNumber(lhs, &maybe_undefined2);
     __ B(&unordered);
   }
 
   __ Bind(&maybe_undefined2);
-  if (Token::IsOrderedRelationalCompareOp(op_)) {
+  if (Token::IsOrderedRelationalCompareOp(op())) {
     __ JumpIfRoot(lhs, Heap::kUndefinedValueRootIndex, &unordered);
   }
 
@@ -3719,9 +3295,9 @@
 }
 
 
-void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
-  ASSERT(state_ == CompareIC::INTERNALIZED_STRING);
-  ASM_LOCATION("ICCompareStub[InternalizedStrings]");
+void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
+  DCHECK(state() == CompareICState::INTERNALIZED_STRING);
+  ASM_LOCATION("CompareICStub[InternalizedStrings]");
   Label miss;
 
   Register result = x0;
@@ -3757,10 +3333,10 @@
 }
 
 
-void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
-  ASSERT(state_ == CompareIC::UNIQUE_NAME);
-  ASM_LOCATION("ICCompareStub[UniqueNames]");
-  ASSERT(GetCondition() == eq);
+void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
+  DCHECK(state() == CompareICState::UNIQUE_NAME);
+  ASM_LOCATION("CompareICStub[UniqueNames]");
+  DCHECK(GetCondition() == eq);
   Label miss;
 
   Register result = x0;
@@ -3782,8 +3358,8 @@
 
   // To avoid a miss, each instance type should be either SYMBOL_TYPE or it
   // should have kInternalizedTag set.
-  __ JumpIfNotUniqueName(lhs_instance_type, &miss);
-  __ JumpIfNotUniqueName(rhs_instance_type, &miss);
+  __ JumpIfNotUniqueNameInstanceType(lhs_instance_type, &miss);
+  __ JumpIfNotUniqueNameInstanceType(rhs_instance_type, &miss);
 
   // Unique names are compared by identity.
   STATIC_ASSERT(EQUAL == 0);
@@ -3796,13 +3372,13 @@
 }
 
 
-void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
-  ASSERT(state_ == CompareIC::STRING);
-  ASM_LOCATION("ICCompareStub[Strings]");
+void CompareICStub::GenerateStrings(MacroAssembler* masm) {
+  DCHECK(state() == CompareICState::STRING);
+  ASM_LOCATION("CompareICStub[Strings]");
 
   Label miss;
 
-  bool equality = Token::IsEqualityOp(op_);
+  bool equality = Token::IsEqualityOp(op());
 
   Register result = x0;
   Register rhs = x0;
@@ -3838,7 +3414,7 @@
   // because we already know they are not identical. We know they are both
   // strings.
   if (equality) {
-    ASSERT(GetCondition() == eq);
+    DCHECK(GetCondition() == eq);
     STATIC_ASSERT(kInternalizedTag == 0);
     Label not_internalized_strings;
     __ Orr(x12, lhs_type, rhs_type);
@@ -3849,18 +3425,18 @@
     __ Bind(&not_internalized_strings);
   }
 
-  // Check that both strings are sequential ASCII.
+  // Check that both strings are sequential one-byte.
   Label runtime;
-  __ JumpIfBothInstanceTypesAreNotSequentialAscii(
-      lhs_type, rhs_type, x12, x13, &runtime);
+  __ JumpIfBothInstanceTypesAreNotSequentialOneByte(lhs_type, rhs_type, x12,
+                                                    x13, &runtime);
 
-  // Compare flat ASCII strings. Returns when done.
+  // Compare flat one-byte strings. Returns when done.
   if (equality) {
-    StringCompareStub::GenerateFlatAsciiStringEquals(
-        masm, lhs, rhs, x10, x11, x12);
+    StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, x10, x11,
+                                                  x12);
   } else {
-    StringCompareStub::GenerateCompareFlatAsciiStrings(
-        masm, lhs, rhs, x10, x11, x12, x13);
+    StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, x10, x11,
+                                                    x12, x13);
   }
 
   // Handle more complex cases in runtime.
@@ -3869,7 +3445,7 @@
   if (equality) {
     __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
   } else {
-    __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
+    __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
   }
 
   __ Bind(&miss);
@@ -3877,9 +3453,9 @@
 }
 
 
-void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
-  ASSERT(state_ == CompareIC::OBJECT);
-  ASM_LOCATION("ICCompareStub[Objects]");
+void CompareICStub::GenerateObjects(MacroAssembler* masm) {
+  DCHECK(state() == CompareICState::OBJECT);
+  ASM_LOCATION("CompareICStub[Objects]");
 
   Label miss;
 
@@ -3892,7 +3468,7 @@
   __ JumpIfNotObjectType(rhs, x10, x10, JS_OBJECT_TYPE, &miss);
   __ JumpIfNotObjectType(lhs, x10, x10, JS_OBJECT_TYPE, &miss);
 
-  ASSERT(GetCondition() == eq);
+  DCHECK(GetCondition() == eq);
   __ Sub(result, rhs, lhs);
   __ Ret();
 
@@ -3901,8 +3477,8 @@
 }
 
 
-void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
-  ASM_LOCATION("ICCompareStub[KnownObjects]");
+void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
+  ASM_LOCATION("CompareICStub[KnownObjects]");
 
   Label miss;
 
@@ -3931,10 +3507,10 @@
 
 // This method handles the case where a compare stub had the wrong
 // implementation. It calls a miss handler, which re-writes the stub. All other
-// ICCompareStub::Generate* methods should fall back into this one if their
+// CompareICStub::Generate* methods should fall back into this one if their
 // operands were not the expected types.
-void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
-  ASM_LOCATION("ICCompareStub[Miss]");
+void CompareICStub::GenerateMiss(MacroAssembler* masm) {
+  ASM_LOCATION("CompareICStub[Miss]");
 
   Register stub_entry = x11;
   {
@@ -3948,7 +3524,7 @@
     // Preserve some caller-saved registers.
     __ Push(x1, x0, lr);
     // Push the arguments.
-    __ Mov(op, Smi::FromInt(op_));
+    __ Mov(op, Smi::FromInt(this->op()));
     __ Push(left, right, op);
 
     // Call the miss handler. This also pops the arguments.
@@ -3965,67 +3541,6 @@
 }
 
 
-void StringHelper::GenerateHashInit(MacroAssembler* masm,
-                                    Register hash,
-                                    Register character) {
-  ASSERT(!AreAliased(hash, character));
-
-  // hash = character + (character << 10);
-  __ LoadRoot(hash, Heap::kHashSeedRootIndex);
-  // Untag smi seed and add the character.
-  __ Add(hash, character, Operand(hash, LSR, kSmiShift));
-
-  // Compute hashes modulo 2^32 using a 32-bit W register.
-  Register hash_w = hash.W();
-
-  // hash += hash << 10;
-  __ Add(hash_w, hash_w, Operand(hash_w, LSL, 10));
-  // hash ^= hash >> 6;
-  __ Eor(hash_w, hash_w, Operand(hash_w, LSR, 6));
-}
-
-
-void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
-                                            Register hash,
-                                            Register character) {
-  ASSERT(!AreAliased(hash, character));
-
-  // hash += character;
-  __ Add(hash, hash, character);
-
-  // Compute hashes modulo 2^32 using a 32-bit W register.
-  Register hash_w = hash.W();
-
-  // hash += hash << 10;
-  __ Add(hash_w, hash_w, Operand(hash_w, LSL, 10));
-  // hash ^= hash >> 6;
-  __ Eor(hash_w, hash_w, Operand(hash_w, LSR, 6));
-}
-
-
-void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
-                                       Register hash,
-                                       Register scratch) {
-  // Compute hashes modulo 2^32 using a 32-bit W register.
-  Register hash_w = hash.W();
-  Register scratch_w = scratch.W();
-  ASSERT(!AreAliased(hash_w, scratch_w));
-
-  // hash += hash << 3;
-  __ Add(hash_w, hash_w, Operand(hash_w, LSL, 3));
-  // hash ^= hash >> 11;
-  __ Eor(hash_w, hash_w, Operand(hash_w, LSR, 11));
-  // hash += hash << 15;
-  __ Add(hash_w, hash_w, Operand(hash_w, LSL, 15));
-
-  __ Ands(hash_w, hash_w, String::kHashBitMask);
-
-  // if (hash == 0) hash = 27;
-  __ Mov(scratch_w, StringHasher::kZeroHash);
-  __ Csel(hash_w, scratch_w, hash_w, eq);
-}
-
-
 void SubStringStub::Generate(MacroAssembler* masm) {
   ASM_LOCATION("SubStringStub::Generate");
   Label runtime;
@@ -4168,8 +3683,8 @@
     STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
     STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
     __ Tbz(input_type, MaskToBit(kStringEncodingMask), &two_byte_slice);
-    __ AllocateAsciiSlicedString(result_string, result_length, x3, x4,
-                                 &runtime);
+    __ AllocateOneByteSlicedString(result_string, result_length, x3, x4,
+                                   &runtime);
     __ B(&set_slice_header);
 
     __ Bind(&two_byte_slice);
@@ -4219,12 +3734,12 @@
          SeqOneByteString::kHeaderSize - kHeapObjectTag);
 
   __ Bind(&allocate_result);
-  // Sequential ASCII string. Allocate the result.
+  // Sequential one-byte string. Allocate the result.
   STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
   __ Tbz(input_type, MaskToBit(kStringEncodingMask), &two_byte_sequential);
 
-  // Allocate and copy the resulting ASCII string.
-  __ AllocateAsciiString(result_string, result_length, x3, x4, x5, &runtime);
+  // Allocate and copy the resulting one-byte string.
+  __ AllocateOneByteString(result_string, result_length, x3, x4, x5, &runtime);
 
   // Locate first character of substring to copy.
   __ Add(substring_char0, unpacked_char0, from);
@@ -4259,7 +3774,7 @@
   __ Ret();
 
   __ Bind(&runtime);
-  __ TailCallRuntime(Runtime::kHiddenSubString, 3, 1);
+  __ TailCallRuntime(Runtime::kSubString, 3, 1);
 
   __ bind(&single_char);
   // x1: result_length
@@ -4277,13 +3792,10 @@
 }
 
 
-void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
-                                                      Register left,
-                                                      Register right,
-                                                      Register scratch1,
-                                                      Register scratch2,
-                                                      Register scratch3) {
-  ASSERT(!AreAliased(left, right, scratch1, scratch2, scratch3));
+void StringHelper::GenerateFlatOneByteStringEquals(
+    MacroAssembler* masm, Register left, Register right, Register scratch1,
+    Register scratch2, Register scratch3) {
+  DCHECK(!AreAliased(left, right, scratch1, scratch2, scratch3));
   Register result = x0;
   Register left_length = scratch1;
   Register right_length = scratch2;
@@ -4310,8 +3822,8 @@
 
   // Compare characters. Falls through if all characters are equal.
   __ Bind(&compare_chars);
-  GenerateAsciiCharsCompareLoop(masm, left, right, left_length, scratch2,
-                                scratch3, &strings_not_equal);
+  GenerateOneByteCharsCompareLoop(masm, left, right, left_length, scratch2,
+                                  scratch3, &strings_not_equal);
 
   // Characters in strings are equal.
   __ Mov(result, Smi::FromInt(EQUAL));
@@ -4319,14 +3831,10 @@
 }
 
 
-void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
-                                                        Register left,
-                                                        Register right,
-                                                        Register scratch1,
-                                                        Register scratch2,
-                                                        Register scratch3,
-                                                        Register scratch4) {
-  ASSERT(!AreAliased(left, right, scratch1, scratch2, scratch3, scratch4));
+void StringHelper::GenerateCompareFlatOneByteStrings(
+    MacroAssembler* masm, Register left, Register right, Register scratch1,
+    Register scratch2, Register scratch3, Register scratch4) {
+  DCHECK(!AreAliased(left, right, scratch1, scratch2, scratch3, scratch4));
   Label result_not_equal, compare_lengths;
 
   // Find minimum length and length difference.
@@ -4340,14 +3848,13 @@
   __ Cbz(min_length, &compare_lengths);
 
   // Compare loop.
-  GenerateAsciiCharsCompareLoop(masm,
-                                left, right, min_length, scratch2, scratch4,
-                                &result_not_equal);
+  GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
+                                  scratch4, &result_not_equal);
 
   // Compare lengths - strings up to min-length are equal.
   __ Bind(&compare_lengths);
 
-  ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
+  DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
 
   // Use length_delta as result if it's zero.
   Register result = x0;
@@ -4364,15 +3871,10 @@
 }
 
 
-void StringCompareStub::GenerateAsciiCharsCompareLoop(
-    MacroAssembler* masm,
-    Register left,
-    Register right,
-    Register length,
-    Register scratch1,
-    Register scratch2,
-    Label* chars_not_equal) {
-  ASSERT(!AreAliased(left, right, length, scratch1, scratch2));
+void StringHelper::GenerateOneByteCharsCompareLoop(
+    MacroAssembler* masm, Register left, Register right, Register length,
+    Register scratch1, Register scratch2, Label* chars_not_equal) {
+  DCHECK(!AreAliased(left, right, length, scratch1, scratch2));
 
   // Change index to run from -length to -1 by adding length to string
   // start. This means that loop ends when index reaches zero, which
@@ -4419,13 +3921,14 @@
 
   __ Bind(&not_same);
 
-  // Check that both objects are sequential ASCII strings.
-  __ JumpIfEitherIsNotSequentialAsciiStrings(left, right, x12, x13, &runtime);
+  // Check that both objects are sequential one-byte strings.
+  __ JumpIfEitherIsNotSequentialOneByteStrings(left, right, x12, x13, &runtime);
 
-  // Compare flat ASCII strings natively. Remove arguments from stack first,
+  // Compare flat one-byte strings natively. Remove arguments from stack first,
   // as this function will generate a return.
   __ IncrementCounter(counters->string_compare_native(), 1, x3, x4);
-  GenerateCompareFlatAsciiStrings(masm, left, right, x12, x13, x14, x15);
+  StringHelper::GenerateCompareFlatOneByteStrings(masm, left, right, x12, x13,
+                                                  x14, x15);
 
   __ Bind(&runtime);
 
@@ -4436,7 +3939,7 @@
 
   // Call the runtime.
   // Returns -1 (less), 0 (equal), or 1 (greater) tagged as a small integer.
-  __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
+  __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
 }
 
 
@@ -4462,7 +3965,7 @@
 
   // Tail call into the stub that handles binary operations with allocation
   // sites.
-  BinaryOpWithAllocationSiteStub stub(isolate(), state_);
+  BinaryOpWithAllocationSiteStub stub(isolate(), state());
   __ TailCallStub(&stub);
 }
 
@@ -4472,16 +3975,14 @@
   // but we need to save them before using them.
   regs_.Save(masm);
 
-  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+  if (remembered_set_action() == EMIT_REMEMBERED_SET) {
     Label dont_need_remembered_set;
 
-    Register value = regs_.scratch0();
-    __ Ldr(value, MemOperand(regs_.address()));
-    __ JumpIfNotInNewSpace(value, &dont_need_remembered_set);
+    Register val = regs_.scratch0();
+    __ Ldr(val, MemOperand(regs_.address()));
+    __ JumpIfNotInNewSpace(val, &dont_need_remembered_set);
 
-    __ CheckPageFlagSet(regs_.object(),
-                        value,
-                        1 << MemoryChunk::SCAN_ON_SCAVENGE,
+    __ CheckPageFlagSet(regs_.object(), val, 1 << MemoryChunk::SCAN_ON_SCAVENGE,
                         &dont_need_remembered_set);
 
     // First notify the incremental marker if necessary, then update the
@@ -4491,11 +3992,9 @@
     InformIncrementalMarker(masm);
     regs_.Restore(masm);  // Restore the extra scratch registers we used.
 
-    __ RememberedSetHelper(object_,
-                           address_,
-                           value_,            // scratch1
-                           save_fp_regs_mode_,
-                           MacroAssembler::kReturnAtEnd);
+    __ RememberedSetHelper(object(), address(),
+                           value(),  // scratch1
+                           save_fp_regs_mode(), MacroAssembler::kReturnAtEnd);
 
     __ Bind(&dont_need_remembered_set);
   }
@@ -4509,11 +4008,11 @@
 
 
 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
-  regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
+  regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
   Register address =
     x0.Is(regs_.address()) ? regs_.scratch0() : regs_.address();
-  ASSERT(!address.Is(regs_.object()));
-  ASSERT(!address.Is(x0));
+  DCHECK(!address.Is(regs_.object()));
+  DCHECK(!address.Is(x0));
   __ Mov(address, regs_.address());
   __ Mov(x0, regs_.object());
   __ Mov(x1, address);
@@ -4525,7 +4024,7 @@
           isolate());
   __ CallCFunction(function, 3, 0);
 
-  regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
+  regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
 }
 
 
@@ -4552,25 +4051,22 @@
 
   regs_.Restore(masm);  // Restore the extra scratch registers we used.
   if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
-    __ RememberedSetHelper(object_,
-                           address_,
-                           value_,            // scratch1
-                           save_fp_regs_mode_,
-                           MacroAssembler::kReturnAtEnd);
+    __ RememberedSetHelper(object(), address(),
+                           value(),  // scratch1
+                           save_fp_regs_mode(), MacroAssembler::kReturnAtEnd);
   } else {
     __ Ret();
   }
 
   __ Bind(&on_black);
   // Get the value from the slot.
-  Register value = regs_.scratch0();
-  __ Ldr(value, MemOperand(regs_.address()));
+  Register val = regs_.scratch0();
+  __ Ldr(val, MemOperand(regs_.address()));
 
   if (mode == INCREMENTAL_COMPACTION) {
     Label ensure_not_white;
 
-    __ CheckPageFlagClear(value,
-                          regs_.scratch1(),
+    __ CheckPageFlagClear(val, regs_.scratch1(),
                           MemoryChunk::kEvacuationCandidateMask,
                           &ensure_not_white);
 
@@ -4585,7 +4081,7 @@
   // We need extra registers for this, so we push the object and the address
   // register temporarily.
   __ Push(regs_.address(), regs_.object());
-  __ EnsureNotWhite(value,
+  __ EnsureNotWhite(val,
                     regs_.scratch1(),  // Scratch.
                     regs_.object(),    // Scratch.
                     regs_.address(),   // Scratch.
@@ -4595,11 +4091,9 @@
 
   regs_.Restore(masm);  // Restore the extra scratch registers we used.
   if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
-    __ RememberedSetHelper(object_,
-                           address_,
-                           value_,            // scratch1
-                           save_fp_regs_mode_,
-                           MacroAssembler::kReturnAtEnd);
+    __ RememberedSetHelper(object(), address(),
+                           value(),  // scratch1
+                           save_fp_regs_mode(), MacroAssembler::kReturnAtEnd);
   } else {
     __ Ret();
   }
@@ -4627,12 +4121,10 @@
     __ adr(xzr, &skip_to_incremental_compacting);
   }
 
-  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
-    __ RememberedSetHelper(object_,
-                           address_,
-                           value_,            // scratch1
-                           save_fp_regs_mode_,
-                           MacroAssembler::kReturnAtEnd);
+  if (remembered_set_action() == EMIT_REMEMBERED_SET) {
+    __ RememberedSetHelper(object(), address(),
+                           value(),  // scratch1
+                           save_fp_regs_mode(), MacroAssembler::kReturnAtEnd);
   }
   __ Ret();
 
@@ -4720,7 +4212,7 @@
   int parameter_count_offset =
       StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
   __ Ldr(x1, MemOperand(fp, parameter_count_offset));
-  if (function_mode_ == JS_FUNCTION_STUB_MODE) {
+  if (function_mode() == JS_FUNCTION_STUB_MODE) {
     __ Add(x1, x1, 1);
   }
   masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
@@ -4730,6 +4222,20 @@
 }
 
 
+void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
+  EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
+  VectorLoadStub stub(isolate(), state());
+  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
+  EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
+  VectorKeyedLoadStub stub(isolate());
+  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
 static unsigned int GetProfileEntryHookCallSize(MacroAssembler* masm) {
   // The entry hook is a "BumpSystemStackPointer" instruction (sub),
   // followed by a "Push lr" instruction, followed by a call.
@@ -4753,7 +4259,7 @@
     __ Bind(&entry_hook_call_start);
     __ Push(lr);
     __ CallStub(&stub);
-    ASSERT(masm->SizeOfCodeGeneratedSince(&entry_hook_call_start) ==
+    DCHECK(masm->SizeOfCodeGeneratedSince(&entry_hook_call_start) ==
            GetProfileEntryHookCallSize(masm));
 
     __ Pop(lr);
@@ -4768,7 +4274,7 @@
   // from anywhere.
   // TODO(jbramley): What about FP registers?
   __ PushCPURegList(kCallerSaved);
-  ASSERT(kCallerSaved.IncludesAliasOf(lr));
+  DCHECK(kCallerSaved.IncludesAliasOf(lr));
   const int kNumSavedRegs = kCallerSaved.Count();
 
   // Compute the function's address as the first argument.
@@ -4829,7 +4335,7 @@
                                     Register target) {
   // Make sure the caller configured the stack pointer (see comment in
   // DirectCEntryStub::Generate).
-  ASSERT(csp.Is(__ StackPointer()));
+  DCHECK(csp.Is(__ StackPointer()));
 
   intptr_t code =
       reinterpret_cast<intptr_t>(GetCode().location());
@@ -4854,7 +4360,7 @@
     Register name,
     Register scratch1,
     Register scratch2) {
-  ASSERT(!AreAliased(elements, name, scratch1, scratch2));
+  DCHECK(!AreAliased(elements, name, scratch1, scratch2));
 
   // Assert that name contains a string.
   __ AssertName(name);
@@ -4871,7 +4377,7 @@
       // Add the probe offset (i + i * i) left shifted to avoid right shifting
       // the hash in a separate instruction. The value hash + i + i * i is right
       // shifted in the following and instruction.
-      ASSERT(NameDictionary::GetProbeOffset(i) <
+      DCHECK(NameDictionary::GetProbeOffset(i) <
           1 << (32 - Name::kHashFieldOffset));
       __ Add(scratch2, scratch2, Operand(
           NameDictionary::GetProbeOffset(i) << Name::kHashShift));
@@ -4879,7 +4385,7 @@
     __ And(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift));
 
     // Scale the index by multiplying by the element size.
-    ASSERT(NameDictionary::kEntrySize == 3);
+    DCHECK(NameDictionary::kEntrySize == 3);
     __ Add(scratch2, scratch2, Operand(scratch2, LSL, 1));
 
     // Check if the key is identical to the name.
@@ -4902,7 +4408,7 @@
   __ PushCPURegList(spill_list);
 
   if (name.is(x0)) {
-    ASSERT(!elements.is(x1));
+    DCHECK(!elements.is(x1));
     __ Mov(x1, name);
     __ Mov(x0, elements);
   } else {
@@ -4931,8 +4437,8 @@
                                                       Register properties,
                                                       Handle<Name> name,
                                                       Register scratch0) {
-  ASSERT(!AreAliased(receiver, properties, scratch0));
-  ASSERT(name->IsUniqueName());
+  DCHECK(!AreAliased(receiver, properties, scratch0));
+  DCHECK(name->IsUniqueName());
   // If names of slots in range from 1 to kProbes - 1 for the hash value are
   // not equal to the name and kProbes-th slot is not used (its name is the
   // undefined value), it guarantees the hash table doesn't contain the
@@ -4948,7 +4454,7 @@
     __ And(index, index, name->Hash() + NameDictionary::GetProbeOffset(i));
 
     // Scale the index by multiplying by the entry size.
-    ASSERT(NameDictionary::kEntrySize == 3);
+    DCHECK(NameDictionary::kEntrySize == 3);
     __ Add(index, index, Operand(index, LSL, 1));  // index *= 3.
 
     Register entity_name = scratch0;
@@ -4970,7 +4476,7 @@
     __ Ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
     __ Ldrb(entity_name,
             FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
-    __ JumpIfNotUniqueName(entity_name, miss);
+    __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
     __ Bind(&good);
   }
 
@@ -5029,7 +4535,7 @@
       // Add the probe offset (i + i * i) left shifted to avoid right shifting
       // the hash in a separate instruction. The value hash + i + i * i is right
       // shifted in the following and instruction.
-      ASSERT(NameDictionary::GetProbeOffset(i) <
+      DCHECK(NameDictionary::GetProbeOffset(i) <
              1 << (32 - Name::kHashFieldOffset));
       __ Add(index, hash,
              NameDictionary::GetProbeOffset(i) << Name::kHashShift);
@@ -5039,7 +4545,7 @@
     __ And(index, mask, Operand(index, LSR, Name::kHashShift));
 
     // Scale the index by multiplying by the entry size.
-    ASSERT(NameDictionary::kEntrySize == 3);
+    DCHECK(NameDictionary::kEntrySize == 3);
     __ Add(index, index, Operand(index, LSL, 1));  // index *= 3.
 
     __ Add(index, dictionary, Operand(index, LSL, kPointerSizeLog2));
@@ -5053,11 +4559,11 @@
     __ Cmp(entry_key, key);
     __ B(eq, &in_dictionary);
 
-    if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
+    if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
       // Check if the entry name is not a unique name.
       __ Ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
       __ Ldrb(entry_key, FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
-      __ JumpIfNotUniqueName(entry_key, &maybe_in_dictionary);
+      __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
     }
   }
 
@@ -5065,7 +4571,7 @@
   // If we are doing negative lookup then probing failure should be
   // treated as a lookup success. For positive lookup, probing failure
   // should be treated as lookup failure.
-  if (mode_ == POSITIVE_LOOKUP) {
+  if (mode() == POSITIVE_LOOKUP) {
     __ Mov(result, 0);
     __ Ret();
   }
@@ -5245,7 +4751,7 @@
     MacroAssembler* masm,
     AllocationSiteOverrideMode mode) {
   Register argc = x0;
-  if (argument_count_ == ANY) {
+  if (argument_count() == ANY) {
     Label zero_case, n_case;
     __ Cbz(argc, &zero_case);
     __ Cmp(argc, 1);
@@ -5262,11 +4768,11 @@
     // N arguments.
     CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
 
-  } else if (argument_count_ == NONE) {
+  } else if (argument_count() == NONE) {
     CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
-  } else if (argument_count_ == ONE) {
+  } else if (argument_count() == ONE) {
     CreateArrayDispatchOneArgument(masm, mode);
-  } else if (argument_count_ == MORE_THAN_ONE) {
+  } else if (argument_count() == MORE_THAN_ONE) {
     CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
   } else {
     UNREACHABLE();
@@ -5277,7 +4783,7 @@
 void ArrayConstructorStub::Generate(MacroAssembler* masm) {
   ASM_LOCATION("ArrayConstructorStub::Generate");
   // ----------- S t a t e -------------
-  //  -- x0 : argc (only if argument_count_ == ANY)
+  //  -- x0 : argc (only if argument_count() == ANY)
   //  -- x1 : constructor
   //  -- x2 : AllocationSite or undefined
   //  -- sp[0] : return address
@@ -5429,9 +4935,9 @@
   Register api_function_address = x1;
   Register context = cp;
 
-  int argc = ArgumentBits::decode(bit_field_);
-  bool is_store = IsStoreBits::decode(bit_field_);
-  bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_);
+  int argc = this->argc();
+  bool is_store = this->is_store();
+  bool call_data_undefined = this->call_data_undefined();
 
   typedef FunctionCallbackArguments FCA;
 
@@ -5475,7 +4981,7 @@
   FrameScope frame_scope(masm, StackFrame::MANUAL);
   __ EnterExitFrame(false, x10, kApiStackSpace + kCallApiFunctionSpillSpace);
 
-  ASSERT(!AreAliased(x0, api_function_address));
+  DCHECK(!AreAliased(x0, api_function_address));
   // x0 = FunctionCallbackInfo&
   // Arguments is after the return address.
   __ Add(x0, masm->StackPointer(), 1 * kPointerSize);
@@ -5521,7 +5027,8 @@
   //  -- x2                     : api_function_address
   // -----------------------------------
 
-  Register api_function_address = x2;
+  Register api_function_address = ApiGetterDescriptor::function_address();
+  DCHECK(api_function_address.is(x2));
 
   __ Mov(x0, masm->StackPointer());  // x0 = Handle<Name>
   __ Add(x1, x0, 1 * kPointerSize);  // x1 = PCA
diff --git a/src/arm64/code-stubs-arm64.h b/src/arm64/code-stubs-arm64.h
index 6baf969..03dab5b 100644
--- a/src/arm64/code-stubs-arm64.h
+++ b/src/arm64/code-stubs-arm64.h
@@ -5,8 +5,6 @@
 #ifndef V8_ARM64_CODE_STUBS_ARM64_H_
 #define V8_ARM64_CODE_STUBS_ARM64_H_
 
-#include "src/ic-inl.h"
-
 namespace v8 {
 namespace internal {
 
@@ -14,74 +12,54 @@
 void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
 
 
-class StoreBufferOverflowStub: public PlatformCodeStub {
- public:
-  StoreBufferOverflowStub(Isolate* isolate, SaveFPRegsMode save_fp)
-      : PlatformCodeStub(isolate), save_doubles_(save_fp) { }
-
-  void Generate(MacroAssembler* masm);
-
-  static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
-  virtual bool SometimesSetsUpAFrame() { return false; }
-
- private:
-  SaveFPRegsMode save_doubles_;
-
-  Major MajorKey() { return StoreBufferOverflow; }
-  int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
-};
-
-
 class StringHelper : public AllStatic {
  public:
-  // TODO(all): These don't seem to be used any more. Delete them.
+  // Compares two flat one-byte strings and returns result in x0.
+  static void GenerateCompareFlatOneByteStrings(
+      MacroAssembler* masm, Register left, Register right, Register scratch1,
+      Register scratch2, Register scratch3, Register scratch4);
 
-  // Generate string hash.
-  static void GenerateHashInit(MacroAssembler* masm,
-                               Register hash,
-                               Register character);
-
-  static void GenerateHashAddCharacter(MacroAssembler* masm,
-                                       Register hash,
-                                       Register character);
-
-  static void GenerateHashGetHash(MacroAssembler* masm,
-                                  Register hash,
-                                  Register scratch);
+  // Compare two flat one-byte strings for equality and returns result in x0.
+  static void GenerateFlatOneByteStringEquals(MacroAssembler* masm,
+                                              Register left, Register right,
+                                              Register scratch1,
+                                              Register scratch2,
+                                              Register scratch3);
 
  private:
+  static void GenerateOneByteCharsCompareLoop(
+      MacroAssembler* masm, Register left, Register right, Register length,
+      Register scratch1, Register scratch2, Label* chars_not_equal);
+
   DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
 };
 
 
 class StoreRegistersStateStub: public PlatformCodeStub {
  public:
-  StoreRegistersStateStub(Isolate* isolate, SaveFPRegsMode with_fp)
-      : PlatformCodeStub(isolate), save_doubles_(with_fp) {}
+  explicit StoreRegistersStateStub(Isolate* isolate)
+      : PlatformCodeStub(isolate) {}
 
   static Register to_be_pushed_lr() { return ip0; }
-  static void GenerateAheadOfTime(Isolate* isolate);
- private:
-  Major MajorKey() { return StoreRegistersState; }
-  int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
-  SaveFPRegsMode save_doubles_;
 
-  void Generate(MacroAssembler* masm);
+  static void GenerateAheadOfTime(Isolate* isolate);
+
+ private:
+  DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+  DEFINE_PLATFORM_CODE_STUB(StoreRegistersState, PlatformCodeStub);
 };
 
 
 class RestoreRegistersStateStub: public PlatformCodeStub {
  public:
-  RestoreRegistersStateStub(Isolate* isolate, SaveFPRegsMode with_fp)
-      : PlatformCodeStub(isolate), save_doubles_(with_fp) {}
+  explicit RestoreRegistersStateStub(Isolate* isolate)
+      : PlatformCodeStub(isolate) {}
 
   static void GenerateAheadOfTime(Isolate* isolate);
- private:
-  Major MajorKey() { return RestoreRegistersState; }
-  int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
-  SaveFPRegsMode save_doubles_;
 
-  void Generate(MacroAssembler* masm);
+ private:
+  DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+  DEFINE_PLATFORM_CODE_STUB(RestoreRegistersState, PlatformCodeStub);
 };
 
 
@@ -97,16 +75,22 @@
                   RememberedSetAction remembered_set_action,
                   SaveFPRegsMode fp_mode)
       : PlatformCodeStub(isolate),
-        object_(object),
-        value_(value),
-        address_(address),
-        remembered_set_action_(remembered_set_action),
-        save_fp_regs_mode_(fp_mode),
         regs_(object,   // An input reg.
               address,  // An input reg.
               value) {  // One scratch reg.
+    DCHECK(object.Is64Bits());
+    DCHECK(value.Is64Bits());
+    DCHECK(address.Is64Bits());
+    minor_key_ = ObjectBits::encode(object.code()) |
+                 ValueBits::encode(value.code()) |
+                 AddressBits::encode(address.code()) |
+                 RememberedSetActionBits::encode(remembered_set_action) |
+                 SaveFPRegsModeBits::encode(fp_mode);
   }
 
+  RecordWriteStub(uint32_t key, Isolate* isolate)
+      : PlatformCodeStub(key, isolate), regs_(object(), address(), value()) {}
+
   enum Mode {
     STORE_BUFFER_ONLY,
     INCREMENTAL,
@@ -122,17 +106,17 @@
     Instruction* instr2 = instr1->following();
 
     if (instr1->IsUncondBranchImm()) {
-      ASSERT(instr2->IsPCRelAddressing() && (instr2->Rd() == xzr.code()));
+      DCHECK(instr2->IsPCRelAddressing() && (instr2->Rd() == xzr.code()));
       return INCREMENTAL;
     }
 
-    ASSERT(instr1->IsPCRelAddressing() && (instr1->Rd() == xzr.code()));
+    DCHECK(instr1->IsPCRelAddressing() && (instr1->Rd() == xzr.code()));
 
     if (instr2->IsUncondBranchImm()) {
       return INCREMENTAL_COMPACTION;
     }
 
-    ASSERT(instr2->IsPCRelAddressing());
+    DCHECK(instr2->IsPCRelAddressing());
 
     return STORE_BUFFER_ONLY;
   }
@@ -151,33 +135,35 @@
     Instruction* instr1 = patcher.InstructionAt(0);
     Instruction* instr2 = patcher.InstructionAt(kInstructionSize);
     // Instructions must be either 'adr' or 'b'.
-    ASSERT(instr1->IsPCRelAddressing() || instr1->IsUncondBranchImm());
-    ASSERT(instr2->IsPCRelAddressing() || instr2->IsUncondBranchImm());
+    DCHECK(instr1->IsPCRelAddressing() || instr1->IsUncondBranchImm());
+    DCHECK(instr2->IsPCRelAddressing() || instr2->IsUncondBranchImm());
     // Retrieve the offsets to the labels.
     int32_t offset_to_incremental_noncompacting = instr1->ImmPCOffset();
     int32_t offset_to_incremental_compacting = instr2->ImmPCOffset();
 
     switch (mode) {
       case STORE_BUFFER_ONLY:
-        ASSERT(GetMode(stub) == INCREMENTAL ||
+        DCHECK(GetMode(stub) == INCREMENTAL ||
                GetMode(stub) == INCREMENTAL_COMPACTION);
         patcher.adr(xzr, offset_to_incremental_noncompacting);
         patcher.adr(xzr, offset_to_incremental_compacting);
         break;
       case INCREMENTAL:
-        ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+        DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
         patcher.b(offset_to_incremental_noncompacting >> kInstructionSizeLog2);
         patcher.adr(xzr, offset_to_incremental_compacting);
         break;
       case INCREMENTAL_COMPACTION:
-        ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+        DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
         patcher.adr(xzr, offset_to_incremental_noncompacting);
         patcher.b(offset_to_incremental_compacting >> kInstructionSizeLog2);
         break;
     }
-    ASSERT(GetMode(stub) == mode);
+    DCHECK(GetMode(stub) == mode);
   }
 
+  DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+
  private:
   // This is a helper class to manage the registers associated with the stub.
   // The 'object' and 'address' registers must be preserved.
@@ -191,7 +177,7 @@
           scratch0_(scratch),
           saved_regs_(kCallerSaved),
           saved_fp_regs_(kCallerSavedFP) {
-      ASSERT(!AreAliased(scratch, object, address));
+      DCHECK(!AreAliased(scratch, object, address));
 
       // The SaveCallerSaveRegisters method needs to save caller-saved
       // registers, but we don't bother saving MacroAssembler scratch registers.
@@ -284,62 +270,51 @@
     friend class RecordWriteStub;
   };
 
-  // A list of stub variants which are pregenerated.
-  // The variants are stored in the same format as the minor key, so
-  // MinorKeyFor() can be used to populate and check this list.
-  static const int kAheadOfTime[];
-
-  void Generate(MacroAssembler* masm);
-  void GenerateIncremental(MacroAssembler* masm, Mode mode);
-
   enum OnNoNeedToInformIncrementalMarker {
     kReturnOnNoNeedToInformIncrementalMarker,
     kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
   };
 
+  virtual inline Major MajorKey() const FINAL OVERRIDE { return RecordWrite; }
+
+  virtual void Generate(MacroAssembler* masm) OVERRIDE;
+  void GenerateIncremental(MacroAssembler* masm, Mode mode);
   void CheckNeedsToInformIncrementalMarker(
       MacroAssembler* masm,
       OnNoNeedToInformIncrementalMarker on_no_need,
       Mode mode);
   void InformIncrementalMarker(MacroAssembler* masm);
 
-  Major MajorKey() { return RecordWrite; }
-
-  int MinorKey() {
-    return MinorKeyFor(object_, value_, address_, remembered_set_action_,
-                       save_fp_regs_mode_);
-  }
-
-  static int MinorKeyFor(Register object,
-                         Register value,
-                         Register address,
-                         RememberedSetAction action,
-                         SaveFPRegsMode fp_mode) {
-    ASSERT(object.Is64Bits());
-    ASSERT(value.Is64Bits());
-    ASSERT(address.Is64Bits());
-    return ObjectBits::encode(object.code()) |
-        ValueBits::encode(value.code()) |
-        AddressBits::encode(address.code()) |
-        RememberedSetActionBits::encode(action) |
-        SaveFPRegsModeBits::encode(fp_mode);
-  }
-
   void Activate(Code* code) {
     code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
   }
 
+  Register object() const {
+    return Register::from_code(ObjectBits::decode(minor_key_));
+  }
+
+  Register value() const {
+    return Register::from_code(ValueBits::decode(minor_key_));
+  }
+
+  Register address() const {
+    return Register::from_code(AddressBits::decode(minor_key_));
+  }
+
+  RememberedSetAction remembered_set_action() const {
+    return RememberedSetActionBits::decode(minor_key_);
+  }
+
+  SaveFPRegsMode save_fp_regs_mode() const {
+    return SaveFPRegsModeBits::decode(minor_key_);
+  }
+
   class ObjectBits: public BitField<int, 0, 5> {};
   class ValueBits: public BitField<int, 5, 5> {};
   class AddressBits: public BitField<int, 10, 5> {};
   class RememberedSetActionBits: public BitField<RememberedSetAction, 15, 1> {};
   class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 16, 1> {};
 
-  Register object_;
-  Register value_;
-  Register address_;
-  RememberedSetAction remembered_set_action_;
-  SaveFPRegsMode save_fp_regs_mode_;
   Label slow_;
   RegisterAllocation regs_;
 };
@@ -350,14 +325,13 @@
 class DirectCEntryStub: public PlatformCodeStub {
  public:
   explicit DirectCEntryStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
-  void Generate(MacroAssembler* masm);
   void GenerateCall(MacroAssembler* masm, Register target);
 
  private:
-  Major MajorKey() { return DirectCEntry; }
-  int MinorKey() { return 0; }
-
   bool NeedsImmovableCode() { return true; }
+
+  DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+  DEFINE_PLATFORM_CODE_STUB(DirectCEntry, PlatformCodeStub);
 };
 
 
@@ -366,9 +340,9 @@
   enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
 
   NameDictionaryLookupStub(Isolate* isolate, LookupMode mode)
-      : PlatformCodeStub(isolate), mode_(mode) { }
-
-  void Generate(MacroAssembler* masm);
+      : PlatformCodeStub(isolate) {
+    minor_key_ = LookupModeBits::encode(mode);
+  }
 
   static void GenerateNegativeLookup(MacroAssembler* masm,
                                      Label* miss,
@@ -400,79 +374,14 @@
       NameDictionary::kHeaderSize +
       NameDictionary::kElementsStartIndex * kPointerSize;
 
-  Major MajorKey() { return NameDictionaryLookup; }
-
-  int MinorKey() {
-    return LookupModeBits::encode(mode_);
-  }
+  LookupMode mode() const { return LookupModeBits::decode(minor_key_); }
 
   class LookupModeBits: public BitField<LookupMode, 0, 1> {};
 
-  LookupMode mode_;
+  DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+  DEFINE_PLATFORM_CODE_STUB(NameDictionaryLookup, PlatformCodeStub);
 };
 
-
-class SubStringStub: public PlatformCodeStub {
- public:
-  explicit SubStringStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
-
- private:
-  Major MajorKey() { return SubString; }
-  int MinorKey() { return 0; }
-
-  void Generate(MacroAssembler* masm);
-};
-
-
-class StringCompareStub: public PlatformCodeStub {
- public:
-  explicit StringCompareStub(Isolate* isolate) : PlatformCodeStub(isolate) { }
-
-  // Compares two flat ASCII strings and returns result in x0.
-  static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
-                                              Register left,
-                                              Register right,
-                                              Register scratch1,
-                                              Register scratch2,
-                                              Register scratch3,
-                                              Register scratch4);
-
-  // Compare two flat ASCII strings for equality and returns result
-  // in x0.
-  static void GenerateFlatAsciiStringEquals(MacroAssembler* masm,
-                                            Register left,
-                                            Register right,
-                                            Register scratch1,
-                                            Register scratch2,
-                                            Register scratch3);
-
- private:
-  virtual Major MajorKey() { return StringCompare; }
-  virtual int MinorKey() { return 0; }
-  virtual void Generate(MacroAssembler* masm);
-
-  static void GenerateAsciiCharsCompareLoop(MacroAssembler* masm,
-                                            Register left,
-                                            Register right,
-                                            Register length,
-                                            Register scratch1,
-                                            Register scratch2,
-                                            Label* chars_not_equal);
-};
-
-
-struct PlatformCallInterfaceDescriptor {
-  explicit PlatformCallInterfaceDescriptor(
-      TargetAddressStorageMode storage_mode)
-      : storage_mode_(storage_mode) { }
-
-  TargetAddressStorageMode storage_mode() { return storage_mode_; }
-
- private:
-  TargetAddressStorageMode storage_mode_;
-};
-
-
 } }  // namespace v8::internal
 
 #endif  // V8_ARM64_CODE_STUBS_ARM64_H_
diff --git a/src/arm64/codegen-arm64.cc b/src/arm64/codegen-arm64.cc
index 9eb0d4a..91eaba7 100644
--- a/src/arm64/codegen-arm64.cc
+++ b/src/arm64/codegen-arm64.cc
@@ -6,9 +6,9 @@
 
 #if V8_TARGET_ARCH_ARM64
 
+#include "src/arm64/simulator-arm64.h"
 #include "src/codegen.h"
 #include "src/macro-assembler.h"
-#include "src/arm64/simulator-arm64.h"
 
 namespace v8 {
 namespace internal {
@@ -35,7 +35,8 @@
   // an AAPCS64-compliant exp() function. This will be faster than the C
   // library's exp() function, but probably less accurate.
   size_t actual_size;
-  byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
+  byte* buffer =
+      static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
   if (buffer == NULL) return &std::exp;
 
   ExternalReference::InitializeMathExpData();
@@ -61,10 +62,10 @@
 
   CodeDesc desc;
   masm.GetCode(&desc);
-  ASSERT(!RelocInfo::RequiresRelocation(desc));
+  DCHECK(!RelocInfo::RequiresRelocation(desc));
 
-  CPU::FlushICache(buffer, actual_size);
-  OS::ProtectCode(buffer, actual_size);
+  CpuFeatures::FlushICache(buffer, actual_size);
+  base::OS::ProtectCode(buffer, actual_size);
 
 #if !defined(USE_SIMULATOR)
   return FUNCTION_CAST<UnaryMathFunction>(buffer);
@@ -85,14 +86,14 @@
 
 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
   masm->EnterFrame(StackFrame::INTERNAL);
-  ASSERT(!masm->has_frame());
+  DCHECK(!masm->has_frame());
   masm->set_has_frame(true);
 }
 
 
 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
   masm->LeaveFrame(StackFrame::INTERNAL);
-  ASSERT(masm->has_frame());
+  DCHECK(masm->has_frame());
   masm->set_has_frame(false);
 }
 
@@ -101,26 +102,28 @@
 // Code generators
 
 void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
-    MacroAssembler* masm, AllocationSiteMode mode,
+    MacroAssembler* masm,
+    Register receiver,
+    Register key,
+    Register value,
+    Register target_map,
+    AllocationSiteMode mode,
     Label* allocation_memento_found) {
-  // ----------- S t a t e -------------
-  //  -- x2    : receiver
-  //  -- x3    : target map
-  // -----------------------------------
-  Register receiver = x2;
-  Register map = x3;
+  ASM_LOCATION(
+      "ElementsTransitionGenerator::GenerateMapChangeElementsTransition");
+  DCHECK(!AreAliased(receiver, key, value, target_map));
 
   if (mode == TRACK_ALLOCATION_SITE) {
-    ASSERT(allocation_memento_found != NULL);
+    DCHECK(allocation_memento_found != NULL);
     __ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11,
                                          allocation_memento_found);
   }
 
   // Set transitioned map.
-  __ Str(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
   __ RecordWriteField(receiver,
                       HeapObject::kMapOffset,
-                      map,
+                      target_map,
                       x10,
                       kLRHasNotBeenSaved,
                       kDontSaveFPRegs,
@@ -130,19 +133,25 @@
 
 
 void ElementsTransitionGenerator::GenerateSmiToDouble(
-    MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
+    MacroAssembler* masm,
+    Register receiver,
+    Register key,
+    Register value,
+    Register target_map,
+    AllocationSiteMode mode,
+    Label* fail) {
   ASM_LOCATION("ElementsTransitionGenerator::GenerateSmiToDouble");
-  // ----------- S t a t e -------------
-  //  -- lr    : return address
-  //  -- x0    : value
-  //  -- x1    : key
-  //  -- x2    : receiver
-  //  -- x3    : target map, scratch for subsequent call
-  // -----------------------------------
-  Register receiver = x2;
-  Register target_map = x3;
-
   Label gc_required, only_change_map;
+  Register elements = x4;
+  Register length = x5;
+  Register array_size = x6;
+  Register array = x7;
+
+  Register scratch = x6;
+
+  // Verify input registers don't conflict with locals.
+  DCHECK(!AreAliased(receiver, key, value, target_map,
+                     elements, length, array_size, array));
 
   if (mode == TRACK_ALLOCATION_SITE) {
     __ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11, fail);
@@ -150,32 +159,28 @@
 
   // Check for empty arrays, which only require a map transition and no changes
   // to the backing store.
-  Register elements = x4;
   __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
   __ JumpIfRoot(elements, Heap::kEmptyFixedArrayRootIndex, &only_change_map);
 
   __ Push(lr);
-  Register length = x5;
   __ Ldrsw(length, UntagSmiFieldMemOperand(elements,
                                            FixedArray::kLengthOffset));
 
   // Allocate new FixedDoubleArray.
-  Register array_size = x6;
-  Register array = x7;
   __ Lsl(array_size, length, kDoubleSizeLog2);
   __ Add(array_size, array_size, FixedDoubleArray::kHeaderSize);
   __ Allocate(array_size, array, x10, x11, &gc_required, DOUBLE_ALIGNMENT);
   // Register array is non-tagged heap object.
 
   // Set the destination FixedDoubleArray's length and map.
-  Register map_root = x6;
+  Register map_root = array_size;
   __ LoadRoot(map_root, Heap::kFixedDoubleArrayMapRootIndex);
   __ SmiTag(x11, length);
   __ Str(x11, MemOperand(array, FixedDoubleArray::kLengthOffset));
   __ Str(map_root, MemOperand(array, HeapObject::kMapOffset));
 
   __ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, x6,
+  __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch,
                       kLRHasBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
                       OMIT_SMI_CHECK);
 
@@ -183,7 +188,7 @@
   __ Add(x10, array, kHeapObjectTag);
   __ Str(x10, FieldMemOperand(receiver, JSObject::kElementsOffset));
   __ RecordWriteField(receiver, JSObject::kElementsOffset, x10,
-                      x6, kLRHasBeenSaved, kDontSaveFPRegs,
+                      scratch, kLRHasBeenSaved, kDontSaveFPRegs,
                       EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
 
   // Prepare for conversion loop.
@@ -202,7 +207,7 @@
 
   __ Bind(&only_change_map);
   __ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, x6,
+  __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch,
                       kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
                       OMIT_SMI_CHECK);
   __ B(&done);
@@ -234,20 +239,22 @@
 
 
 void ElementsTransitionGenerator::GenerateDoubleToObject(
-    MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
+    MacroAssembler* masm,
+    Register receiver,
+    Register key,
+    Register value,
+    Register target_map,
+    AllocationSiteMode mode,
+    Label* fail) {
   ASM_LOCATION("ElementsTransitionGenerator::GenerateDoubleToObject");
-  // ----------- S t a t e -------------
-  //  -- x0    : value
-  //  -- x1    : key
-  //  -- x2    : receiver
-  //  -- lr    : return address
-  //  -- x3    : target map, scratch for subsequent call
-  //  -- x4    : scratch (elements)
-  // -----------------------------------
-  Register value = x0;
-  Register key = x1;
-  Register receiver = x2;
-  Register target_map = x3;
+  Register elements = x4;
+  Register array_size = x6;
+  Register array = x7;
+  Register length = x5;
+
+  // Verify input registers don't conflict with locals.
+  DCHECK(!AreAliased(receiver, key, value, target_map,
+                     elements, array_size, array, length));
 
   if (mode == TRACK_ALLOCATION_SITE) {
     __ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11, fail);
@@ -256,7 +263,7 @@
   // Check for empty arrays, which only require a map transition and no changes
   // to the backing store.
   Label only_change_map;
-  Register elements = x4;
+
   __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
   __ JumpIfRoot(elements, Heap::kEmptyFixedArrayRootIndex, &only_change_map);
 
@@ -264,20 +271,16 @@
   // TODO(all): These registers may not need to be pushed. Examine
   // RecordWriteStub and check whether it's needed.
   __ Push(target_map, receiver, key, value);
-  Register length = x5;
   __ Ldrsw(length, UntagSmiFieldMemOperand(elements,
                                            FixedArray::kLengthOffset));
-
   // Allocate new FixedArray.
-  Register array_size = x6;
-  Register array = x7;
   Label gc_required;
   __ Mov(array_size, FixedDoubleArray::kHeaderSize);
   __ Add(array_size, array_size, Operand(length, LSL, kPointerSizeLog2));
   __ Allocate(array_size, array, x10, x11, &gc_required, NO_ALLOCATION_FLAGS);
 
   // Set destination FixedDoubleArray's length and map.
-  Register map_root = x6;
+  Register map_root = array_size;
   __ LoadRoot(map_root, Heap::kFixedArrayMapRootIndex);
   __ SmiTag(x11, length);
   __ Str(x11, MemOperand(array, FixedDoubleArray::kLengthOffset));
@@ -315,8 +318,10 @@
     __ B(eq, &convert_hole);
 
     // Non-hole double, copy value into a heap number.
-    Register heap_num = x5;
-    __ AllocateHeapNumber(heap_num, &gc_required, x6, x4,
+    Register heap_num = length;
+    Register scratch = array_size;
+    Register scratch2 = elements;
+    __ AllocateHeapNumber(heap_num, &gc_required, scratch, scratch2,
                           x13, heap_num_map);
     __ Mov(x13, dst_elements);
     __ Str(heap_num, MemOperand(dst_elements, kPointerSize, PostIndex));
@@ -351,7 +356,7 @@
 
 
 CodeAgingHelper::CodeAgingHelper() {
-  ASSERT(young_sequence_.length() == kNoCodeAgeSequenceLength);
+  DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
   // The sequence of instructions that is patched out for aging code is the
   // following boilerplate stack-building prologue that is found both in
   // FUNCTION and OPTIMIZED_FUNCTION code:
@@ -363,7 +368,7 @@
 
 #ifdef DEBUG
   const int length = kCodeAgeStubEntryOffset / kInstructionSize;
-  ASSERT(old_sequence_.length() >= kCodeAgeStubEntryOffset);
+  DCHECK(old_sequence_.length() >= kCodeAgeStubEntryOffset);
   PatchingAssembler patcher_old(old_sequence_.start(), length);
   MacroAssembler::EmitCodeAgeSequence(&patcher_old, NULL);
 #endif
@@ -415,7 +420,7 @@
                                        Register index,
                                        Register result,
                                        Label* call_runtime) {
-  ASSERT(string.Is64Bits() && index.Is32Bits() && result.Is64Bits());
+  DCHECK(string.Is64Bits() && index.Is32Bits() && result.Is64Bits());
   // Fetch the instance type of the receiver into result register.
   __ Ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
   __ Ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
@@ -480,15 +485,15 @@
   __ B(ne, call_runtime);
   __ Ldr(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
 
-  Label ascii, done;
+  Label one_byte, done;
   __ Bind(&check_encoding);
   STATIC_ASSERT(kTwoByteStringTag == 0);
-  __ TestAndBranchIfAnySet(result, kStringEncodingMask, &ascii);
+  __ TestAndBranchIfAnySet(result, kStringEncodingMask, &one_byte);
   // Two-byte string.
   __ Ldrh(result, MemOperand(string, index, SXTW, 1));
   __ B(&done);
-  __ Bind(&ascii);
-  // Ascii string.
+  __ Bind(&one_byte);
+  // One-byte string.
   __ Ldrb(result, MemOperand(string, index, SXTW));
   __ Bind(&done);
 }
@@ -511,10 +516,11 @@
   // instead of fmul and fsub. Doing this changes the result, but since this is
   // an estimation anyway, does it matter?
 
-  ASSERT(!AreAliased(input, result,
+  DCHECK(!AreAliased(input, result,
                      double_temp1, double_temp2,
                      temp1, temp2, temp3));
-  ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
+  DCHECK(ExternalReference::math_exp_constants(0).address() != NULL);
+  DCHECK(!masm->serializer_enabled());  // External references not serializable.
 
   Label done;
   DoubleRegister double_temp3 = result;
@@ -534,7 +540,7 @@
   Label result_is_finite_non_zero;
   // Assert that we can load offset 0 (the small input threshold) and offset 1
   // (the large input threshold) with a single ldp.
-  ASSERT(kDRegSize == (ExpConstant(constants, 1).offset() -
+  DCHECK(kDRegSize == (ExpConstant(constants, 1).offset() -
                               ExpConstant(constants, 0).offset()));
   __ Ldp(double_temp1, double_temp2, ExpConstant(constants, 0));
 
@@ -564,7 +570,7 @@
   __ Bind(&result_is_finite_non_zero);
 
   // Assert that we can load offset 3 and offset 4 with a single ldp.
-  ASSERT(kDRegSize == (ExpConstant(constants, 4).offset() -
+  DCHECK(kDRegSize == (ExpConstant(constants, 4).offset() -
                               ExpConstant(constants, 3).offset()));
   __ Ldp(double_temp1, double_temp3, ExpConstant(constants, 3));
   __ Fmadd(double_temp1, double_temp1, input, double_temp3);
@@ -572,7 +578,7 @@
   __ Fsub(double_temp1, double_temp1, double_temp3);
 
   // Assert that we can load offset 5 and offset 6 with a single ldp.
-  ASSERT(kDRegSize == (ExpConstant(constants, 6).offset() -
+  DCHECK(kDRegSize == (ExpConstant(constants, 6).offset() -
                               ExpConstant(constants, 5).offset()));
   __ Ldp(double_temp2, double_temp3, ExpConstant(constants, 5));
   // TODO(jbramley): Consider using Fnmsub here.
diff --git a/src/arm64/codegen-arm64.h b/src/arm64/codegen-arm64.h
index 9ef148c..2f01c51 100644
--- a/src/arm64/codegen-arm64.h
+++ b/src/arm64/codegen-arm64.h
@@ -6,7 +6,7 @@
 #define V8_ARM64_CODEGEN_ARM64_H_
 
 #include "src/ast.h"
-#include "src/ic-inl.h"
+#include "src/macro-assembler.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/arm64/constants-arm64.h b/src/arm64/constants-arm64.h
index f459b4b..8db120b 100644
--- a/src/arm64/constants-arm64.h
+++ b/src/arm64/constants-arm64.h
@@ -15,7 +15,9 @@
 
 
 // Get the standard printf format macros for C99 stdint types.
+#ifndef __STDC_FORMAT_MACROS
 #define __STDC_FORMAT_MACROS
+#endif
 #include <inttypes.h>
 
 
@@ -260,7 +262,7 @@
 inline Condition NegateCondition(Condition cond) {
   // Conditions al and nv behave identically, as "always true". They can't be
   // inverted, because there is no never condition.
-  ASSERT((cond != al) && (cond != nv));
+  DCHECK((cond != al) && (cond != nv));
   return static_cast<Condition>(cond ^ 1);
 }
 
@@ -398,7 +400,7 @@
 //
 // The enumerations can be used like this:
 //
-// ASSERT(instr->Mask(PCRelAddressingFMask) == PCRelAddressingFixed);
+// DCHECK(instr->Mask(PCRelAddressingFMask) == PCRelAddressingFixed);
 // switch(instr->Mask(PCRelAddressingMask)) {
 //   case ADR:  Format("adr 'Xd, 'AddrPCRelByte"); break;
 //   case ADRP: Format("adrp 'Xd, 'AddrPCRelPage"); break;
diff --git a/src/arm64/cpu-arm64.cc b/src/arm64/cpu-arm64.cc
index 4cfc4f0..39beb6d 100644
--- a/src/arm64/cpu-arm64.cc
+++ b/src/arm64/cpu-arm64.cc
@@ -8,8 +8,8 @@
 
 #if V8_TARGET_ARCH_ARM64
 
-#include "src/cpu.h"
 #include "src/arm64/utils-arm64.h"
+#include "src/assembler.h"
 
 namespace v8 {
 namespace internal {
@@ -40,7 +40,7 @@
 };
 
 
-void CPU::FlushICache(void* address, size_t length) {
+void CpuFeatures::FlushICache(void* address, size_t length) {
   if (length == 0) return;
 
 #ifdef USE_SIMULATOR
@@ -59,8 +59,8 @@
   uintptr_t dsize = sizes.dcache_line_size();
   uintptr_t isize = sizes.icache_line_size();
   // Cache line sizes are always a power of 2.
-  ASSERT(CountSetBits(dsize, 64) == 1);
-  ASSERT(CountSetBits(isize, 64) == 1);
+  DCHECK(CountSetBits(dsize, 64) == 1);
+  DCHECK(CountSetBits(isize, 64) == 1);
   uintptr_t dstart = start & ~(dsize - 1);
   uintptr_t istart = start & ~(isize - 1);
   uintptr_t end = start + length;
diff --git a/src/arm64/debug-arm64.cc b/src/arm64/debug-arm64.cc
index 5562316..f57d5b5 100644
--- a/src/arm64/debug-arm64.cc
+++ b/src/arm64/debug-arm64.cc
@@ -67,13 +67,13 @@
 
 
 bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
-  ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
+  DCHECK(RelocInfo::IsJSReturn(rinfo->rmode()));
   return rinfo->IsPatchedReturnSequence();
 }
 
 
 bool BreakLocationIterator::IsDebugBreakAtSlot() {
-  ASSERT(IsDebugBreakSlot());
+  DCHECK(IsDebugBreakSlot());
   // Check whether the debug break slot instructions have been patched.
   return rinfo()->IsPatchedDebugBreakSlotSequence();
 }
@@ -118,7 +118,7 @@
 
 
 void BreakLocationIterator::ClearDebugBreakAtSlot() {
-  ASSERT(IsDebugBreakSlot());
+  DCHECK(IsDebugBreakSlot());
   rinfo()->PatchCode(original_rinfo()->pc(),
                      Assembler::kDebugBreakSlotInstructions);
 }
@@ -131,6 +131,12 @@
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
 
+    // Load padding words on stack.
+    __ Mov(scratch, Smi::FromInt(LiveEdit::kFramePaddingValue));
+    __ PushMultipleTimes(scratch, LiveEdit::kFramePaddingInitialSize);
+    __ Mov(scratch, Smi::FromInt(LiveEdit::kFramePaddingInitialSize));
+    __ Push(scratch);
+
     // Any live values (object_regs and non_object_regs) in caller-saved
     // registers (or lr) need to be stored on the stack so that their values are
     // safely preserved for a call into C code.
@@ -144,12 +150,12 @@
     //    collector doesn't try to interpret them as pointers.
     //
     // TODO(jbramley): Why can't this handle callee-saved registers?
-    ASSERT((~kCallerSaved.list() & object_regs) == 0);
-    ASSERT((~kCallerSaved.list() & non_object_regs) == 0);
-    ASSERT((object_regs & non_object_regs) == 0);
-    ASSERT((scratch.Bit() & object_regs) == 0);
-    ASSERT((scratch.Bit() & non_object_regs) == 0);
-    ASSERT((masm->TmpList()->list() & (object_regs | non_object_regs)) == 0);
+    DCHECK((~kCallerSaved.list() & object_regs) == 0);
+    DCHECK((~kCallerSaved.list() & non_object_regs) == 0);
+    DCHECK((object_regs & non_object_regs) == 0);
+    DCHECK((scratch.Bit() & object_regs) == 0);
+    DCHECK((scratch.Bit() & non_object_regs) == 0);
+    DCHECK((masm->TmpList()->list() & (object_regs | non_object_regs)) == 0);
     STATIC_ASSERT(kSmiValueSize == 32);
 
     CPURegList non_object_list =
@@ -165,7 +171,8 @@
       //  jssp[8]: 0x00000000 (SMI tag & padding)
       //  jssp[4]: reg[31:0]
       //  jssp[0]: 0x00000000 (SMI tag & padding)
-      STATIC_ASSERT((kSmiTag == 0) && (kSmiShift == 32));
+      STATIC_ASSERT(kSmiTag == 0);
+      STATIC_ASSERT(static_cast<unsigned>(kSmiShift) == kWRegSizeInBits);
     }
 
     if (object_regs != 0) {
@@ -200,6 +207,9 @@
       __ Bfxil(reg, scratch, 32, 32);
     }
 
+    // Don't bother removing padding bytes pushed on the stack
+    // as the frame is going to be restored right away.
+
     // Leave the internal frame.
   }
 
@@ -226,48 +236,35 @@
 
 void DebugCodegen::GenerateLoadICDebugBreak(MacroAssembler* masm) {
   // Calling convention for IC load (from ic-arm.cc).
-  // ----------- S t a t e -------------
-  //  -- x2    : name
-  //  -- lr    : return address
-  //  -- x0    : receiver
-  //  -- [sp]  : receiver
-  // -----------------------------------
-  // Registers x0 and x2 contain objects that need to be pushed on the
-  // expression stack of the fake JS frame.
-  Generate_DebugBreakCallHelper(masm, x0.Bit() | x2.Bit(), 0, x10);
+  Register receiver = LoadDescriptor::ReceiverRegister();
+  Register name = LoadDescriptor::NameRegister();
+  Generate_DebugBreakCallHelper(masm, receiver.Bit() | name.Bit(), 0, x10);
 }
 
 
 void DebugCodegen::GenerateStoreICDebugBreak(MacroAssembler* masm) {
-  // Calling convention for IC store (from ic-arm.cc).
-  // ----------- S t a t e -------------
-  //  -- x0    : value
-  //  -- x1    : receiver
-  //  -- x2    : name
-  //  -- lr    : return address
-  // -----------------------------------
-  // Registers x0, x1, and x2 contain objects that need to be pushed on the
-  // expression stack of the fake JS frame.
-  Generate_DebugBreakCallHelper(masm, x0.Bit() | x1.Bit() | x2.Bit(), 0, x10);
+  // Calling convention for IC store (from ic-arm64.cc).
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  Register name = StoreDescriptor::NameRegister();
+  Register value = StoreDescriptor::ValueRegister();
+  Generate_DebugBreakCallHelper(
+      masm, receiver.Bit() | name.Bit() | value.Bit(), 0, x10);
 }
 
 
 void DebugCodegen::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
-  // ---------- S t a t e --------------
-  //  -- lr     : return address
-  //  -- x0     : key
-  //  -- x1     : receiver
-  Generate_DebugBreakCallHelper(masm, x0.Bit() | x1.Bit(), 0, x10);
+  // Calling convention for keyed IC load (from ic-arm.cc).
+  GenerateLoadICDebugBreak(masm);
 }
 
 
 void DebugCodegen::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
-  // ---------- S t a t e --------------
-  //  -- x0     : value
-  //  -- x1     : key
-  //  -- x2     : receiver
-  //  -- lr     : return address
-  Generate_DebugBreakCallHelper(masm, x0.Bit() | x1.Bit() | x2.Bit(), 0, x10);
+  // Calling convention for IC keyed store call (from ic-arm64.cc).
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  Register name = StoreDescriptor::NameRegister();
+  Register value = StoreDescriptor::ValueRegister();
+  Generate_DebugBreakCallHelper(
+      masm, receiver.Bit() | name.Bit() | value.Bit(), 0, x10);
 }
 
 
@@ -341,16 +338,40 @@
 
 
 void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
-  masm->Abort(kLiveEditFrameDroppingIsNotSupportedOnARM64);
+  __ Ret();
 }
 
 
 void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
-  masm->Abort(kLiveEditFrameDroppingIsNotSupportedOnARM64);
+  ExternalReference restarter_frame_function_slot =
+      ExternalReference::debug_restarter_frame_function_pointer_address(
+          masm->isolate());
+  UseScratchRegisterScope temps(masm);
+  Register scratch = temps.AcquireX();
+
+  __ Mov(scratch, restarter_frame_function_slot);
+  __ Str(xzr, MemOperand(scratch));
+
+  // We do not know our frame height, but set sp based on fp.
+  __ Sub(masm->StackPointer(), fp, kPointerSize);
+  __ AssertStackConsistency();
+
+  __ Pop(x1, fp, lr);  // Function, Frame, Return address.
+
+  // Load context from the function.
+  __ Ldr(cp, FieldMemOperand(x1, JSFunction::kContextOffset));
+
+  // Get function code.
+  __ Ldr(scratch, FieldMemOperand(x1, JSFunction::kSharedFunctionInfoOffset));
+  __ Ldr(scratch, FieldMemOperand(scratch, SharedFunctionInfo::kCodeOffset));
+  __ Add(scratch, scratch, Code::kHeaderSize - kHeapObjectTag);
+
+  // Re-run JSFunction, x1 is function, cp is context.
+  __ Br(scratch);
 }
 
 
-const bool LiveEdit::kFrameDropperSupported = false;
+const bool LiveEdit::kFrameDropperSupported = true;
 
 } }  // namespace v8::internal
 
diff --git a/src/arm64/decoder-arm64-inl.h b/src/arm64/decoder-arm64-inl.h
index e8eef5e..5dd2fd9 100644
--- a/src/arm64/decoder-arm64-inl.h
+++ b/src/arm64/decoder-arm64-inl.h
@@ -96,17 +96,17 @@
 
 template<typename V>
 void Decoder<V>::DecodePCRelAddressing(Instruction* instr) {
-  ASSERT(instr->Bits(27, 24) == 0x0);
+  DCHECK(instr->Bits(27, 24) == 0x0);
   // We know bit 28 is set, as <b28:b27> = 0 is filtered out at the top level
   // decode.
-  ASSERT(instr->Bit(28) == 0x1);
+  DCHECK(instr->Bit(28) == 0x1);
   V::VisitPCRelAddressing(instr);
 }
 
 
 template<typename V>
 void Decoder<V>::DecodeBranchSystemException(Instruction* instr) {
-  ASSERT((instr->Bits(27, 24) == 0x4) ||
+  DCHECK((instr->Bits(27, 24) == 0x4) ||
          (instr->Bits(27, 24) == 0x5) ||
          (instr->Bits(27, 24) == 0x6) ||
          (instr->Bits(27, 24) == 0x7) );
@@ -208,7 +208,7 @@
 
 template<typename V>
 void Decoder<V>::DecodeLoadStore(Instruction* instr) {
-  ASSERT((instr->Bits(27, 24) == 0x8) ||
+  DCHECK((instr->Bits(27, 24) == 0x8) ||
          (instr->Bits(27, 24) == 0x9) ||
          (instr->Bits(27, 24) == 0xC) ||
          (instr->Bits(27, 24) == 0xD) );
@@ -328,7 +328,7 @@
 
 template<typename V>
 void Decoder<V>::DecodeLogical(Instruction* instr) {
-  ASSERT(instr->Bits(27, 24) == 0x2);
+  DCHECK(instr->Bits(27, 24) == 0x2);
 
   if (instr->Mask(0x80400000) == 0x00400000) {
     V::VisitUnallocated(instr);
@@ -348,7 +348,7 @@
 
 template<typename V>
 void Decoder<V>::DecodeBitfieldExtract(Instruction* instr) {
-  ASSERT(instr->Bits(27, 24) == 0x3);
+  DCHECK(instr->Bits(27, 24) == 0x3);
 
   if ((instr->Mask(0x80400000) == 0x80000000) ||
       (instr->Mask(0x80400000) == 0x00400000) ||
@@ -374,7 +374,7 @@
 
 template<typename V>
 void Decoder<V>::DecodeAddSubImmediate(Instruction* instr) {
-  ASSERT(instr->Bits(27, 24) == 0x1);
+  DCHECK(instr->Bits(27, 24) == 0x1);
   if (instr->Bit(23) == 1) {
     V::VisitUnallocated(instr);
   } else {
@@ -385,7 +385,7 @@
 
 template<typename V>
 void Decoder<V>::DecodeDataProcessing(Instruction* instr) {
-  ASSERT((instr->Bits(27, 24) == 0xA) ||
+  DCHECK((instr->Bits(27, 24) == 0xA) ||
          (instr->Bits(27, 24) == 0xB) );
 
   if (instr->Bit(24) == 0) {
@@ -501,7 +501,7 @@
 
 template<typename V>
 void Decoder<V>::DecodeFP(Instruction* instr) {
-  ASSERT((instr->Bits(27, 24) == 0xE) ||
+  DCHECK((instr->Bits(27, 24) == 0xE) ||
          (instr->Bits(27, 24) == 0xF) );
 
   if (instr->Bit(28) == 0) {
@@ -614,7 +614,7 @@
           }
         } else {
           // Bit 30 == 1 has been handled earlier.
-          ASSERT(instr->Bit(30) == 0);
+          DCHECK(instr->Bit(30) == 0);
           if (instr->Mask(0xA0800000) != 0) {
             V::VisitUnallocated(instr);
           } else {
@@ -630,7 +630,7 @@
 template<typename V>
 void Decoder<V>::DecodeAdvSIMDLoadStore(Instruction* instr) {
   // TODO(all): Implement Advanced SIMD load/store instruction decode.
-  ASSERT(instr->Bits(29, 25) == 0x6);
+  DCHECK(instr->Bits(29, 25) == 0x6);
   V::VisitUnimplemented(instr);
 }
 
@@ -638,7 +638,7 @@
 template<typename V>
 void Decoder<V>::DecodeAdvSIMDDataProcessing(Instruction* instr) {
   // TODO(all): Implement Advanced SIMD data processing instruction decode.
-  ASSERT(instr->Bits(27, 25) == 0x7);
+  DCHECK(instr->Bits(27, 25) == 0x7);
   V::VisitUnimplemented(instr);
 }
 
diff --git a/src/arm64/decoder-arm64.cc b/src/arm64/decoder-arm64.cc
index 08881c2..5cca85e 100644
--- a/src/arm64/decoder-arm64.cc
+++ b/src/arm64/decoder-arm64.cc
@@ -6,9 +6,9 @@
 
 #if V8_TARGET_ARCH_ARM64
 
+#include "src/arm64/decoder-arm64.h"
 #include "src/globals.h"
 #include "src/utils.h"
-#include "src/arm64/decoder-arm64.h"
 
 
 namespace v8 {
@@ -17,13 +17,13 @@
 
 void DispatchingDecoderVisitor::AppendVisitor(DecoderVisitor* new_visitor) {
   visitors_.remove(new_visitor);
-  visitors_.push_front(new_visitor);
+  visitors_.push_back(new_visitor);
 }
 
 
 void DispatchingDecoderVisitor::PrependVisitor(DecoderVisitor* new_visitor) {
   visitors_.remove(new_visitor);
-  visitors_.push_back(new_visitor);
+  visitors_.push_front(new_visitor);
 }
 
 
@@ -39,7 +39,7 @@
   }
   // We reached the end of the list. The last element must be
   // registered_visitor.
-  ASSERT(*it == registered_visitor);
+  DCHECK(*it == registered_visitor);
   visitors_.insert(it, new_visitor);
 }
 
@@ -57,7 +57,7 @@
   }
   // We reached the end of the list. The last element must be
   // registered_visitor.
-  ASSERT(*it == registered_visitor);
+  DCHECK(*it == registered_visitor);
   visitors_.push_back(new_visitor);
 }
 
@@ -70,7 +70,7 @@
 #define DEFINE_VISITOR_CALLERS(A)                                \
   void DispatchingDecoderVisitor::Visit##A(Instruction* instr) { \
     if (!(instr->Mask(A##FMask) == A##Fixed)) {                  \
-      ASSERT(instr->Mask(A##FMask) == A##Fixed);                 \
+      DCHECK(instr->Mask(A##FMask) == A##Fixed);                 \
     }                                                            \
     std::list<DecoderVisitor*>::iterator it;                     \
     for (it = visitors_.begin(); it != visitors_.end(); it++) {  \
diff --git a/src/arm64/decoder-arm64.h b/src/arm64/decoder-arm64.h
index 0ce8425..af6bcc6 100644
--- a/src/arm64/decoder-arm64.h
+++ b/src/arm64/decoder-arm64.h
@@ -7,8 +7,8 @@
 
 #include <list>
 
-#include "src/globals.h"
 #include "src/arm64/instructions-arm64.h"
+#include "src/globals.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/arm64/delayed-masm-arm64-inl.h b/src/arm64/delayed-masm-arm64-inl.h
new file mode 100644
index 0000000..2c44630
--- /dev/null
+++ b/src/arm64/delayed-masm-arm64-inl.h
@@ -0,0 +1,55 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_ARM64_DELAYED_MASM_ARM64_INL_H_
+#define V8_ARM64_DELAYED_MASM_ARM64_INL_H_
+
+#include "src/arm64/delayed-masm-arm64.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm_)
+
+
+void DelayedMasm::EndDelayedUse() {
+  EmitPending();
+  DCHECK(!scratch_register_acquired_);
+  ResetSavedValue();
+}
+
+
+void DelayedMasm::Mov(const Register& rd,
+                      const Operand& operand,
+                      DiscardMoveMode discard_mode) {
+  EmitPending();
+  DCHECK(!IsScratchRegister(rd) || scratch_register_acquired_);
+  __ Mov(rd, operand, discard_mode);
+}
+
+
+void DelayedMasm::Fmov(FPRegister fd, FPRegister fn) {
+  EmitPending();
+  __ Fmov(fd, fn);
+}
+
+
+void DelayedMasm::Fmov(FPRegister fd, double imm) {
+  EmitPending();
+  __ Fmov(fd, imm);
+}
+
+
+void DelayedMasm::LoadObject(Register result, Handle<Object> object) {
+  EmitPending();
+  DCHECK(!IsScratchRegister(result) || scratch_register_acquired_);
+  __ LoadObject(result, object);
+}
+
+
+#undef __
+
+} }  // namespace v8::internal
+
+#endif  // V8_ARM64_DELAYED_MASM_ARM64_INL_H_
diff --git a/src/arm64/delayed-masm-arm64.cc b/src/arm64/delayed-masm-arm64.cc
new file mode 100644
index 0000000..c3bda91
--- /dev/null
+++ b/src/arm64/delayed-masm-arm64.cc
@@ -0,0 +1,198 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "src/arm64/delayed-masm-arm64.h"
+#include "src/arm64/lithium-codegen-arm64.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm_)
+
+
+void DelayedMasm::StackSlotMove(LOperand* src, LOperand* dst) {
+  DCHECK(src->IsStackSlot());
+  DCHECK(dst->IsStackSlot());
+  MemOperand src_operand = cgen_->ToMemOperand(src);
+  MemOperand dst_operand = cgen_->ToMemOperand(dst);
+  if (pending_ == kStackSlotMove) {
+    DCHECK(pending_pc_ == masm_->pc_offset());
+    UseScratchRegisterScope scope(masm_);
+    DoubleRegister temp1 = scope.AcquireD();
+    DoubleRegister temp2 = scope.AcquireD();
+    switch (MemOperand::AreConsistentForPair(pending_address_src_,
+                                             src_operand)) {
+      case MemOperand::kNotPair:
+        __ Ldr(temp1, pending_address_src_);
+        __ Ldr(temp2, src_operand);
+        break;
+      case MemOperand::kPairAB:
+        __ Ldp(temp1, temp2, pending_address_src_);
+        break;
+      case MemOperand::kPairBA:
+        __ Ldp(temp2, temp1, src_operand);
+        break;
+    }
+    switch (MemOperand::AreConsistentForPair(pending_address_dst_,
+                                             dst_operand)) {
+      case MemOperand::kNotPair:
+        __ Str(temp1, pending_address_dst_);
+        __ Str(temp2, dst_operand);
+        break;
+      case MemOperand::kPairAB:
+        __ Stp(temp1, temp2, pending_address_dst_);
+        break;
+      case MemOperand::kPairBA:
+        __ Stp(temp2, temp1, dst_operand);
+        break;
+    }
+    ResetPending();
+    return;
+  }
+
+  EmitPending();
+  pending_ = kStackSlotMove;
+  pending_address_src_ = src_operand;
+  pending_address_dst_ = dst_operand;
+#ifdef DEBUG
+  pending_pc_ = masm_->pc_offset();
+#endif
+}
+
+
+void DelayedMasm::StoreConstant(uint64_t value, const MemOperand& operand) {
+  DCHECK(!scratch_register_acquired_);
+  if ((pending_ == kStoreConstant) && (value == pending_value_)) {
+    MemOperand::PairResult result =
+        MemOperand::AreConsistentForPair(pending_address_dst_, operand);
+    if (result != MemOperand::kNotPair) {
+      const MemOperand& dst =
+          (result == MemOperand::kPairAB) ?
+              pending_address_dst_ :
+              operand;
+      DCHECK(pending_pc_ == masm_->pc_offset());
+      if (pending_value_ == 0) {
+        __ Stp(xzr, xzr, dst);
+      } else {
+        SetSavedValue(pending_value_);
+        __ Stp(ScratchRegister(), ScratchRegister(), dst);
+      }
+      ResetPending();
+      return;
+    }
+  }
+
+  EmitPending();
+  pending_ = kStoreConstant;
+  pending_address_dst_ = operand;
+  pending_value_ = value;
+#ifdef DEBUG
+  pending_pc_ = masm_->pc_offset();
+#endif
+}
+
+
+void DelayedMasm::Load(const CPURegister& rd, const MemOperand& operand) {
+  if ((pending_ == kLoad) &&
+      pending_register_.IsSameSizeAndType(rd)) {
+    switch (MemOperand::AreConsistentForPair(pending_address_src_, operand)) {
+      case MemOperand::kNotPair:
+        break;
+      case MemOperand::kPairAB:
+        DCHECK(pending_pc_ == masm_->pc_offset());
+        DCHECK(!IsScratchRegister(pending_register_) ||
+               scratch_register_acquired_);
+        DCHECK(!IsScratchRegister(rd) || scratch_register_acquired_);
+        __ Ldp(pending_register_, rd, pending_address_src_);
+        ResetPending();
+        return;
+      case MemOperand::kPairBA:
+        DCHECK(pending_pc_ == masm_->pc_offset());
+        DCHECK(!IsScratchRegister(pending_register_) ||
+               scratch_register_acquired_);
+        DCHECK(!IsScratchRegister(rd) || scratch_register_acquired_);
+        __ Ldp(rd, pending_register_, operand);
+        ResetPending();
+        return;
+    }
+  }
+
+  EmitPending();
+  pending_ = kLoad;
+  pending_register_ = rd;
+  pending_address_src_ = operand;
+#ifdef DEBUG
+  pending_pc_ = masm_->pc_offset();
+#endif
+}
+
+
+void DelayedMasm::Store(const CPURegister& rd, const MemOperand& operand) {
+  if ((pending_ == kStore) &&
+      pending_register_.IsSameSizeAndType(rd)) {
+    switch (MemOperand::AreConsistentForPair(pending_address_dst_, operand)) {
+      case MemOperand::kNotPair:
+        break;
+      case MemOperand::kPairAB:
+        DCHECK(pending_pc_ == masm_->pc_offset());
+        __ Stp(pending_register_, rd, pending_address_dst_);
+        ResetPending();
+        return;
+      case MemOperand::kPairBA:
+        DCHECK(pending_pc_ == masm_->pc_offset());
+        __ Stp(rd, pending_register_, operand);
+        ResetPending();
+        return;
+    }
+  }
+
+  EmitPending();
+  pending_ = kStore;
+  pending_register_ = rd;
+  pending_address_dst_ = operand;
+#ifdef DEBUG
+  pending_pc_ = masm_->pc_offset();
+#endif
+}
+
+
+void DelayedMasm::EmitPending() {
+  DCHECK((pending_ == kNone) || (pending_pc_ == masm_->pc_offset()));
+  switch (pending_) {
+    case kNone:
+      return;
+    case kStoreConstant:
+      if (pending_value_ == 0) {
+        __ Str(xzr, pending_address_dst_);
+      } else {
+        SetSavedValue(pending_value_);
+        __ Str(ScratchRegister(), pending_address_dst_);
+      }
+      break;
+    case kLoad:
+      DCHECK(!IsScratchRegister(pending_register_) ||
+              scratch_register_acquired_);
+      __ Ldr(pending_register_, pending_address_src_);
+      break;
+    case kStore:
+      __ Str(pending_register_, pending_address_dst_);
+      break;
+    case kStackSlotMove: {
+      UseScratchRegisterScope scope(masm_);
+      DoubleRegister temp = scope.AcquireD();
+      __ Ldr(temp, pending_address_src_);
+      __ Str(temp, pending_address_dst_);
+      break;
+    }
+  }
+  ResetPending();
+}
+
+} }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_ARM64
diff --git a/src/arm64/delayed-masm-arm64.h b/src/arm64/delayed-masm-arm64.h
new file mode 100644
index 0000000..76227a3
--- /dev/null
+++ b/src/arm64/delayed-masm-arm64.h
@@ -0,0 +1,164 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_ARM64_DELAYED_MASM_ARM64_H_
+#define V8_ARM64_DELAYED_MASM_ARM64_H_
+
+#include "src/lithium.h"
+
+namespace v8 {
+namespace internal {
+
+class LCodeGen;
+
+// This class delays the generation of some instructions. This way, we have a
+// chance to merge two instructions in one (with load/store pair).
+// Each instruction must either:
+//  - merge with the pending instruction and generate just one instruction.
+//  - emit the pending instruction and then generate the instruction (or set the
+//    pending instruction).
+class DelayedMasm BASE_EMBEDDED {
+ public:
+  DelayedMasm(LCodeGen* owner,
+              MacroAssembler* masm,
+              const Register& scratch_register)
+    : cgen_(owner), masm_(masm), scratch_register_(scratch_register),
+      scratch_register_used_(false), pending_(kNone), saved_value_(0) {
+#ifdef DEBUG
+    pending_register_ = no_reg;
+    pending_value_ = 0;
+    pending_pc_ = 0;
+    scratch_register_acquired_ = false;
+#endif
+  }
+  ~DelayedMasm() {
+    DCHECK(!scratch_register_acquired_);
+    DCHECK(!scratch_register_used_);
+    DCHECK(!pending());
+  }
+  inline void EndDelayedUse();
+
+  const Register& ScratchRegister() {
+    scratch_register_used_ = true;
+    return scratch_register_;
+  }
+  bool IsScratchRegister(const CPURegister& reg) {
+    return reg.Is(scratch_register_);
+  }
+  bool scratch_register_used() const { return scratch_register_used_; }
+  void reset_scratch_register_used() { scratch_register_used_ = false; }
+  // Acquire/Release scratch register for use outside this class.
+  void AcquireScratchRegister() {
+    EmitPending();
+    ResetSavedValue();
+#ifdef DEBUG
+    DCHECK(!scratch_register_acquired_);
+    scratch_register_acquired_ = true;
+#endif
+  }
+  void ReleaseScratchRegister() {
+#ifdef DEBUG
+    DCHECK(scratch_register_acquired_);
+    scratch_register_acquired_ = false;
+#endif
+  }
+  bool pending() { return pending_ != kNone; }
+
+  // Extra layer over the macro-assembler instructions (which emits the
+  // potential pending instruction).
+  inline void Mov(const Register& rd,
+                  const Operand& operand,
+                  DiscardMoveMode discard_mode = kDontDiscardForSameWReg);
+  inline void Fmov(FPRegister fd, FPRegister fn);
+  inline void Fmov(FPRegister fd, double imm);
+  inline void LoadObject(Register result, Handle<Object> object);
+  // Instructions which try to merge which the pending instructions.
+  void StackSlotMove(LOperand* src, LOperand* dst);
+  // StoreConstant can only be used if the scratch register is not acquired.
+  void StoreConstant(uint64_t value, const MemOperand& operand);
+  void Load(const CPURegister& rd, const MemOperand& operand);
+  void Store(const CPURegister& rd, const MemOperand& operand);
+  // Emit the potential pending instruction.
+  void EmitPending();
+  // Reset the pending state.
+  void ResetPending() {
+    pending_ = kNone;
+#ifdef DEBUG
+    pending_register_ = no_reg;
+    MemOperand tmp;
+    pending_address_src_ = tmp;
+    pending_address_dst_ = tmp;
+    pending_value_ = 0;
+    pending_pc_ = 0;
+#endif
+  }
+  void InitializeRootRegister() {
+    masm_->InitializeRootRegister();
+  }
+
+ private:
+  // Set the saved value and load the ScratchRegister with it.
+  void SetSavedValue(uint64_t saved_value) {
+    DCHECK(saved_value != 0);
+    if (saved_value_ != saved_value) {
+      masm_->Mov(ScratchRegister(), saved_value);
+      saved_value_ = saved_value;
+    }
+  }
+  // Reset the saved value (i.e. the value of ScratchRegister is no longer
+  // known).
+  void ResetSavedValue() {
+    saved_value_ = 0;
+  }
+
+  LCodeGen* cgen_;
+  MacroAssembler* masm_;
+
+  // Register used to store a constant.
+  Register scratch_register_;
+  bool scratch_register_used_;
+
+  // Sometimes we store or load two values in two contiguous stack slots.
+  // In this case, we try to use the ldp/stp instructions to reduce code size.
+  // To be able to do that, instead of generating directly the instructions,
+  // we register with the following fields that an instruction needs to be
+  // generated. Then with the next instruction, if the instruction is
+  // consistent with the pending one for stp/ldp we generate ldp/stp. Else,
+  // if they are not consistent, we generate the pending instruction and we
+  // register the new instruction (which becomes pending).
+
+  // Enumeration of instructions which can be pending.
+  enum Pending {
+    kNone,
+    kStoreConstant,
+    kLoad, kStore,
+    kStackSlotMove
+  };
+  // The pending instruction.
+  Pending pending_;
+  // For kLoad, kStore: register which must be loaded/stored.
+  CPURegister pending_register_;
+  // For kLoad, kStackSlotMove: address of the load.
+  MemOperand pending_address_src_;
+  // For kStoreConstant, kStore, kStackSlotMove: address of the store.
+  MemOperand pending_address_dst_;
+  // For kStoreConstant: value to be stored.
+  uint64_t pending_value_;
+  // Value held into the ScratchRegister if the saved_value_ is not 0.
+  // For 0, we use xzr.
+  uint64_t saved_value_;
+#ifdef DEBUG
+  // Address where the pending instruction must be generated. It's only used to
+  // check that nothing else has been generated since we set the pending
+  // instruction.
+  int pending_pc_;
+  // If true, the scratch register has been acquired outside this class. The
+  // scratch register can no longer be used for constants.
+  bool scratch_register_acquired_;
+#endif
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_ARM64_DELAYED_MASM_ARM64_H_
diff --git a/src/arm64/deoptimizer-arm64.cc b/src/arm64/deoptimizer-arm64.cc
index 7ac5bd0..d67dc8f 100644
--- a/src/arm64/deoptimizer-arm64.cc
+++ b/src/arm64/deoptimizer-arm64.cc
@@ -32,9 +32,6 @@
 
   DeoptimizationInputData* deopt_data =
       DeoptimizationInputData::cast(code->deoptimization_data());
-  SharedFunctionInfo* shared =
-      SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo());
-  shared->EvictFromOptimizedCodeMap(code, "deoptimized code");
   Address code_start_address = code->instruction_start();
 #ifdef DEBUG
   Address prev_call_address = NULL;
@@ -52,9 +49,9 @@
     patcher.blr(ip0);
     patcher.dc64(reinterpret_cast<intptr_t>(deopt_entry));
 
-    ASSERT((prev_call_address == NULL) ||
+    DCHECK((prev_call_address == NULL) ||
            (call_address >= prev_call_address + patch_size()));
-    ASSERT(call_address + patch_size() <= code->instruction_end());
+    DCHECK(call_address + patch_size() <= code->instruction_end());
 #ifdef DEBUG
     prev_call_address = call_address;
 #endif
@@ -92,8 +89,8 @@
 
 
 void Deoptimizer::SetPlatformCompiledStubRegisters(
-    FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) {
-  ApiFunction function(descriptor->deoptimization_handler_);
+    FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
+  ApiFunction function(descriptor->deoptimization_handler());
   ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
   intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
   int params = descriptor->GetHandlerParameterCount();
@@ -110,42 +107,6 @@
 }
 
 
-#define __ masm->
-
-static void CopyRegisterDumpToFrame(MacroAssembler* masm,
-                                    Register frame,
-                                    CPURegList reg_list,
-                                    Register scratch1,
-                                    Register scratch2,
-                                    int src_offset,
-                                    int dst_offset) {
-  int offset0, offset1;
-  CPURegList copy_to_input = reg_list;
-  int reg_count = reg_list.Count();
-  int reg_size = reg_list.RegisterSizeInBytes();
-  for (int i = 0; i < (reg_count / 2); i++) {
-    __ PeekPair(scratch1, scratch2, src_offset + (i * reg_size * 2));
-
-    offset0 = (copy_to_input.PopLowestIndex().code() * reg_size) + dst_offset;
-    offset1 = (copy_to_input.PopLowestIndex().code() * reg_size) + dst_offset;
-
-    if ((offset0 + reg_size) == offset1) {
-      // Registers are adjacent: store in pairs.
-      __ Stp(scratch1, scratch2, MemOperand(frame, offset0));
-    } else {
-      // Registers are not adjacent: store individually.
-      __ Str(scratch1, MemOperand(frame, offset0));
-      __ Str(scratch2, MemOperand(frame, offset1));
-    }
-  }
-  if ((reg_count & 1) != 0) {
-    __ Peek(scratch1, src_offset + (reg_count - 1) * reg_size);
-    offset0 = (copy_to_input.PopLowestIndex().code() * reg_size) + dst_offset;
-    __ Str(scratch1, MemOperand(frame, offset0));
-  }
-}
-
-#undef __
 
 #define __ masm()->
 
@@ -209,13 +170,23 @@
   __ Ldr(x1, MemOperand(deoptimizer, Deoptimizer::input_offset()));
 
   // Copy core registers into the input frame.
-  CopyRegisterDumpToFrame(masm(), x1, saved_registers, x2, x4, 0,
-                          FrameDescription::registers_offset());
+  CPURegList copy_to_input = saved_registers;
+  for (int i = 0; i < saved_registers.Count(); i++) {
+    __ Peek(x2, i * kPointerSize);
+    CPURegister current_reg = copy_to_input.PopLowestIndex();
+    int offset = (current_reg.code() * kPointerSize) +
+        FrameDescription::registers_offset();
+    __ Str(x2, MemOperand(x1, offset));
+  }
 
   // Copy FP registers to the input frame.
-  CopyRegisterDumpToFrame(masm(), x1, saved_fp_registers, x2, x4,
-                          kFPRegistersOffset,
-                          FrameDescription::double_registers_offset());
+  for (int i = 0; i < saved_fp_registers.Count(); i++) {
+    int dst_offset = FrameDescription::double_registers_offset() +
+        (i * kDoubleSize);
+    int src_offset = kFPRegistersOffset + (i * kDoubleSize);
+    __ Peek(x2, src_offset);
+    __ Str(x2, MemOperand(x1, dst_offset));
+  }
 
   // Remove the bailout id and the saved registers from the stack.
   __ Drop(1 + (kSavedRegistersAreaSize / kXRegSize));
@@ -279,7 +250,7 @@
   __ B(lt, &outer_push_loop);
 
   __ Ldr(x1, MemOperand(x4, Deoptimizer::input_offset()));
-  ASSERT(!saved_fp_registers.IncludesAliasOf(crankshaft_fp_scratch) &&
+  DCHECK(!saved_fp_registers.IncludesAliasOf(crankshaft_fp_scratch) &&
          !saved_fp_registers.IncludesAliasOf(fp_zero) &&
          !saved_fp_registers.IncludesAliasOf(fp_scratch));
   int src_offset = FrameDescription::double_registers_offset();
@@ -306,7 +277,7 @@
   // Note that lr is not in the list of saved_registers and will be restored
   // later. We can use it to hold the address of last output frame while
   // reloading the other registers.
-  ASSERT(!saved_registers.IncludesAliasOf(lr));
+  DCHECK(!saved_registers.IncludesAliasOf(lr));
   Register last_output_frame = lr;
   __ Mov(last_output_frame, current_frame);
 
@@ -349,14 +320,14 @@
     // The number of entry will never exceed kMaxNumberOfEntries.
     // As long as kMaxNumberOfEntries is a valid 16 bits immediate you can use
     // a movz instruction to load the entry id.
-    ASSERT(is_uint16(Deoptimizer::kMaxNumberOfEntries));
+    DCHECK(is_uint16(Deoptimizer::kMaxNumberOfEntries));
 
     for (int i = 0; i < count(); i++) {
       int start = masm()->pc_offset();
       USE(start);
       __ movz(entry_id, i);
       __ b(&done);
-      ASSERT(masm()->pc_offset() - start == table_entry_size_);
+      DCHECK(masm()->pc_offset() - start == table_entry_size_);
     }
   }
   __ Bind(&done);
diff --git a/src/arm64/disasm-arm64.cc b/src/arm64/disasm-arm64.cc
index e6a30b4..ac7cb37 100644
--- a/src/arm64/disasm-arm64.cc
+++ b/src/arm64/disasm-arm64.cc
@@ -3,19 +3,19 @@
 // found in the LICENSE file.
 
 #include <assert.h>
-#include <stdio.h>
 #include <stdarg.h>
+#include <stdio.h>
 #include <string.h>
 
 #include "src/v8.h"
 
 #if V8_TARGET_ARCH_ARM64
 
-#include "src/disasm.h"
 #include "src/arm64/decoder-arm64-inl.h"
 #include "src/arm64/disasm-arm64.h"
+#include "src/base/platform/platform.h"
+#include "src/disasm.h"
 #include "src/macro-assembler.h"
-#include "src/platform.h"
 
 namespace v8 {
 namespace internal {
@@ -258,7 +258,7 @@
 
 
 bool Disassembler::IsMovzMovnImm(unsigned reg_size, uint64_t value) {
-  ASSERT((reg_size == kXRegSizeInBits) ||
+  DCHECK((reg_size == kXRegSizeInBits) ||
          ((reg_size == kWRegSizeInBits) && (value <= 0xffffffff)));
 
   // Test for movz: 16-bits set at positions 0, 16, 32 or 48.
@@ -1176,7 +1176,7 @@
       }
     }
   } else if (instr->Mask(SystemHintFMask) == SystemHintFixed) {
-    ASSERT(instr->Mask(SystemHintMask) == HINT);
+    DCHECK(instr->Mask(SystemHintMask) == HINT);
     switch (instr->ImmHint()) {
       case NOP: {
         mnemonic = "nop";
@@ -1246,7 +1246,7 @@
                           const char* format) {
   // TODO(mcapewel) don't think I can use the instr address here - there needs
   //                to be a base address too
-  ASSERT(mnemonic != NULL);
+  DCHECK(mnemonic != NULL);
   ResetOutput();
   Substitute(instr, mnemonic);
   if (format != NULL) {
@@ -1364,7 +1364,7 @@
 
 int Disassembler::SubstituteImmediateField(Instruction* instr,
                                            const char* format) {
-  ASSERT(format[0] == 'I');
+  DCHECK(format[0] == 'I');
 
   switch (format[1]) {
     case 'M': {  // IMoveImm or IMoveLSL.
@@ -1372,7 +1372,7 @@
         uint64_t imm = instr->ImmMoveWide() << (16 * instr->ShiftMoveWide());
         AppendToOutput("#0x%" PRIx64, imm);
       } else {
-        ASSERT(format[5] == 'L');
+        DCHECK(format[5] == 'L');
         AppendToOutput("#0x%" PRIx64, instr->ImmMoveWide());
         if (instr->ShiftMoveWide() > 0) {
           AppendToOutput(", lsl #%d", 16 * instr->ShiftMoveWide());
@@ -1417,7 +1417,7 @@
       return 6;
     }
     case 'A': {  // IAddSub.
-      ASSERT(instr->ShiftAddSub() <= 1);
+      DCHECK(instr->ShiftAddSub() <= 1);
       int64_t imm = instr->ImmAddSub() << (12 * instr->ShiftAddSub());
       AppendToOutput("#0x%" PRIx64 " (%" PRId64 ")", imm, imm);
       return 7;
@@ -1474,7 +1474,7 @@
 
 int Disassembler::SubstituteBitfieldImmediateField(Instruction* instr,
                                                    const char* format) {
-  ASSERT((format[0] == 'I') && (format[1] == 'B'));
+  DCHECK((format[0] == 'I') && (format[1] == 'B'));
   unsigned r = instr->ImmR();
   unsigned s = instr->ImmS();
 
@@ -1488,13 +1488,13 @@
         AppendToOutput("#%d", s + 1);
         return 5;
       } else {
-        ASSERT(format[3] == '-');
+        DCHECK(format[3] == '-');
         AppendToOutput("#%d", s - r + 1);
         return 7;
       }
     }
     case 'Z': {  // IBZ-r.
-      ASSERT((format[3] == '-') && (format[4] == 'r'));
+      DCHECK((format[3] == '-') && (format[4] == 'r'));
       unsigned reg_size = (instr->SixtyFourBits() == 1) ? kXRegSizeInBits
                                                         : kWRegSizeInBits;
       AppendToOutput("#%d", reg_size - r);
@@ -1510,14 +1510,16 @@
 
 int Disassembler::SubstituteLiteralField(Instruction* instr,
                                          const char* format) {
-  ASSERT(strncmp(format, "LValue", 6) == 0);
+  DCHECK(strncmp(format, "LValue", 6) == 0);
   USE(format);
 
   switch (instr->Mask(LoadLiteralMask)) {
     case LDR_w_lit:
     case LDR_x_lit:
     case LDR_s_lit:
-    case LDR_d_lit: AppendToOutput("(addr %p)", instr->LiteralAddress()); break;
+    case LDR_d_lit:
+      AppendToOutput("(addr 0x%016" PRIxPTR ")", instr->LiteralAddress());
+      break;
     default: UNREACHABLE();
   }
 
@@ -1526,12 +1528,12 @@
 
 
 int Disassembler::SubstituteShiftField(Instruction* instr, const char* format) {
-  ASSERT(format[0] == 'H');
-  ASSERT(instr->ShiftDP() <= 0x3);
+  DCHECK(format[0] == 'H');
+  DCHECK(instr->ShiftDP() <= 0x3);
 
   switch (format[1]) {
     case 'D': {  // HDP.
-      ASSERT(instr->ShiftDP() != ROR);
+      DCHECK(instr->ShiftDP() != ROR);
     }  // Fall through.
     case 'L': {  // HLo.
       if (instr->ImmDPShift() != 0) {
@@ -1550,7 +1552,7 @@
 
 int Disassembler::SubstituteConditionField(Instruction* instr,
                                            const char* format) {
-  ASSERT(format[0] == 'C');
+  DCHECK(format[0] == 'C');
   const char* condition_code[] = { "eq", "ne", "hs", "lo",
                                    "mi", "pl", "vs", "vc",
                                    "hi", "ls", "ge", "lt",
@@ -1572,12 +1574,12 @@
 int Disassembler::SubstitutePCRelAddressField(Instruction* instr,
                                               const char* format) {
   USE(format);
-  ASSERT(strncmp(format, "AddrPCRel", 9) == 0);
+  DCHECK(strncmp(format, "AddrPCRel", 9) == 0);
 
   int offset = instr->ImmPCRel();
 
   // Only ADR (AddrPCRelByte) is supported.
-  ASSERT(strcmp(format, "AddrPCRelByte") == 0);
+  DCHECK(strcmp(format, "AddrPCRelByte") == 0);
 
   char sign = '+';
   if (offset < 0) {
@@ -1592,7 +1594,7 @@
 
 int Disassembler::SubstituteBranchTargetField(Instruction* instr,
                                               const char* format) {
-  ASSERT(strncmp(format, "BImm", 4) == 0);
+  DCHECK(strncmp(format, "BImm", 4) == 0);
 
   int64_t offset = 0;
   switch (format[5]) {
@@ -1619,8 +1621,8 @@
 
 int Disassembler::SubstituteExtendField(Instruction* instr,
                                         const char* format) {
-  ASSERT(strncmp(format, "Ext", 3) == 0);
-  ASSERT(instr->ExtendMode() <= 7);
+  DCHECK(strncmp(format, "Ext", 3) == 0);
+  DCHECK(instr->ExtendMode() <= 7);
   USE(format);
 
   const char* extend_mode[] = { "uxtb", "uxth", "uxtw", "uxtx",
@@ -1646,7 +1648,7 @@
 
 int Disassembler::SubstituteLSRegOffsetField(Instruction* instr,
                                              const char* format) {
-  ASSERT(strncmp(format, "Offsetreg", 9) == 0);
+  DCHECK(strncmp(format, "Offsetreg", 9) == 0);
   const char* extend_mode[] = { "undefined", "undefined", "uxtw", "lsl",
                                 "undefined", "undefined", "sxtw", "sxtx" };
   USE(format);
@@ -1675,7 +1677,7 @@
 
 int Disassembler::SubstitutePrefetchField(Instruction* instr,
                                           const char* format) {
-  ASSERT(format[0] == 'P');
+  DCHECK(format[0] == 'P');
   USE(format);
 
   int prefetch_mode = instr->PrefetchMode();
@@ -1690,7 +1692,7 @@
 
 int Disassembler::SubstituteBarrierField(Instruction* instr,
                                          const char* format) {
-  ASSERT(format[0] == 'M');
+  DCHECK(format[0] == 'M');
   USE(format);
 
   static const char* options[4][4] = {
@@ -1797,7 +1799,7 @@
     : converter_(converter) {}
 
 
-Disassembler::~Disassembler() {}
+Disassembler::~Disassembler() { USE(converter_); }
 
 
 int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
diff --git a/src/arm64/disasm-arm64.h b/src/arm64/disasm-arm64.h
index a7db4d2..8cd3b80 100644
--- a/src/arm64/disasm-arm64.h
+++ b/src/arm64/disasm-arm64.h
@@ -7,10 +7,10 @@
 
 #include "src/v8.h"
 
+#include "src/arm64/decoder-arm64.h"
+#include "src/arm64/instructions-arm64.h"
 #include "src/globals.h"
 #include "src/utils.h"
-#include "src/arm64/instructions-arm64.h"
-#include "src/arm64/decoder-arm64.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/arm64/frames-arm64.cc b/src/arm64/frames-arm64.cc
index 122ac21..b3633e0 100644
--- a/src/arm64/frames-arm64.cc
+++ b/src/arm64/frames-arm64.cc
@@ -6,9 +6,9 @@
 
 #if V8_TARGET_ARCH_ARM64
 
-#include "src/assembler.h"
-#include "src/arm64/assembler-arm64.h"
 #include "src/arm64/assembler-arm64-inl.h"
+#include "src/arm64/assembler-arm64.h"
+#include "src/assembler.h"
 #include "src/frames.h"
 
 namespace v8 {
diff --git a/src/arm64/frames-arm64.h b/src/arm64/frames-arm64.h
index 557c955..8d4ce86 100644
--- a/src/arm64/frames-arm64.h
+++ b/src/arm64/frames-arm64.h
@@ -2,8 +2,8 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/arm64/constants-arm64.h"
 #include "src/arm64/assembler-arm64.h"
+#include "src/arm64/constants-arm64.h"
 
 #ifndef V8_ARM64_FRAMES_ARM64_H_
 #define V8_ARM64_FRAMES_ARM64_H_
diff --git a/src/arm64/full-codegen-arm64.cc b/src/arm64/full-codegen-arm64.cc
index 4a44e35..25a9efd 100644
--- a/src/arm64/full-codegen-arm64.cc
+++ b/src/arm64/full-codegen-arm64.cc
@@ -6,15 +6,16 @@
 
 #if V8_TARGET_ARCH_ARM64
 
+#include "src/code-factory.h"
 #include "src/code-stubs.h"
 #include "src/codegen.h"
 #include "src/compiler.h"
 #include "src/debug.h"
 #include "src/full-codegen.h"
+#include "src/ic/ic.h"
 #include "src/isolate-inl.h"
 #include "src/parser.h"
 #include "src/scopes.h"
-#include "src/stub-cache.h"
 
 #include "src/arm64/code-stubs-arm64.h"
 #include "src/arm64/macro-assembler-arm64.h"
@@ -34,18 +35,18 @@
 
   ~JumpPatchSite() {
     if (patch_site_.is_bound()) {
-      ASSERT(info_emitted_);
+      DCHECK(info_emitted_);
     } else {
-      ASSERT(reg_.IsNone());
+      DCHECK(reg_.IsNone());
     }
   }
 
   void EmitJumpIfNotSmi(Register reg, Label* target) {
     // This code will be patched by PatchInlinedSmiCode, in ic-arm64.cc.
     InstructionAccurateScope scope(masm_, 1);
-    ASSERT(!info_emitted_);
-    ASSERT(reg.Is64Bits());
-    ASSERT(!reg.Is(csp));
+    DCHECK(!info_emitted_);
+    DCHECK(reg.Is64Bits());
+    DCHECK(!reg.Is(csp));
     reg_ = reg;
     __ bind(&patch_site_);
     __ tbz(xzr, 0, target);   // Always taken before patched.
@@ -54,9 +55,9 @@
   void EmitJumpIfSmi(Register reg, Label* target) {
     // This code will be patched by PatchInlinedSmiCode, in ic-arm64.cc.
     InstructionAccurateScope scope(masm_, 1);
-    ASSERT(!info_emitted_);
-    ASSERT(reg.Is64Bits());
-    ASSERT(!reg.Is(csp));
+    DCHECK(!info_emitted_);
+    DCHECK(reg.Is64Bits());
+    DCHECK(!reg.Is(csp));
     reg_ = reg;
     __ bind(&patch_site_);
     __ tbnz(xzr, 0, target);  // Never taken before patched.
@@ -130,7 +131,7 @@
     __ JumpIfNotRoot(x10, Heap::kUndefinedValueRootIndex, &ok);
 
     __ Ldr(x10, GlobalObjectMemOperand());
-    __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kGlobalReceiverOffset));
+    __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kGlobalProxyOffset));
     __ Poke(x10, receiver_offset);
 
     __ Bind(&ok);
@@ -154,12 +155,12 @@
   { Comment cmnt(masm_, "[ Allocate locals");
     int locals_count = info->scope()->num_stack_slots();
     // Generators allocate locals, if any, in context slots.
-    ASSERT(!info->function()->is_generator() || locals_count == 0);
+    DCHECK(!info->function()->is_generator() || locals_count == 0);
 
     if (locals_count > 0) {
       if (locals_count >= 128) {
         Label ok;
-        ASSERT(jssp.Is(__ StackPointer()));
+        DCHECK(jssp.Is(__ StackPointer()));
         __ Sub(x10, jssp, locals_count * kPointerSize);
         __ CompareRoot(x10, Heap::kRealStackLimitRootIndex);
         __ B(hs, &ok);
@@ -198,7 +199,7 @@
     if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
       __ Mov(x10, Operand(info->scope()->GetScopeInfo()));
       __ Push(x1, x10);
-      __ CallRuntime(Runtime::kHiddenNewGlobalContext, 2);
+      __ CallRuntime(Runtime::kNewGlobalContext, 2);
     } else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
       FastNewContextStub stub(isolate(), heap_slots);
       __ CallStub(&stub);
@@ -206,7 +207,7 @@
       need_write_barrier = false;
     } else {
       __ Push(x1);
-      __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
+      __ CallRuntime(Runtime::kNewFunctionContext, 1);
     }
     function_in_register_x1 = false;
     // Context is returned in x0.  It replaces the context passed to us.
@@ -291,9 +292,9 @@
     { Comment cmnt(masm_, "[ Declarations");
       if (scope()->is_function_scope() && scope()->function() != NULL) {
         VariableDeclaration* function = scope()->function();
-        ASSERT(function->proxy()->var()->mode() == CONST ||
+        DCHECK(function->proxy()->var()->mode() == CONST ||
                function->proxy()->var()->mode() == CONST_LEGACY);
-        ASSERT(function->proxy()->var()->location() != Variable::UNALLOCATED);
+        DCHECK(function->proxy()->var()->location() != Variable::UNALLOCATED);
         VisitVariableDeclaration(function);
       }
       VisitDeclarations(scope()->declarations());
@@ -303,7 +304,7 @@
   { Comment cmnt(masm_, "[ Stack check");
     PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
     Label ok;
-    ASSERT(jssp.Is(__ StackPointer()));
+    DCHECK(jssp.Is(__ StackPointer()));
     __ CompareRoot(jssp, Heap::kStackLimitRootIndex);
     __ B(hs, &ok);
     PredictableCodeSizeScope predictable(masm_,
@@ -313,9 +314,9 @@
   }
 
   { Comment cmnt(masm_, "[ Body");
-    ASSERT(loop_depth() == 0);
+    DCHECK(loop_depth() == 0);
     VisitStatements(function()->body());
-    ASSERT(loop_depth() == 0);
+    DCHECK(loop_depth() == 0);
   }
 
   // Always emit a 'return undefined' in case control fell off the end of
@@ -359,13 +360,13 @@
 
 void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
                                                 Label* back_edge_target) {
-  ASSERT(jssp.Is(__ StackPointer()));
+  DCHECK(jssp.Is(__ StackPointer()));
   Comment cmnt(masm_, "[ Back edge bookkeeping");
   // Block literal pools whilst emitting back edge code.
   Assembler::BlockPoolsScope block_const_pool(masm_);
   Label ok;
 
-  ASSERT(back_edge_target->is_bound());
+  DCHECK(back_edge_target->is_bound());
   // We want to do a round rather than a floor of distance/kCodeSizeMultiplier
   // to reduce the absolute error due to the integer division. To do that,
   // we add kCodeSizeMultiplier/2 to the distance (equivalent to adding 0.5 to
@@ -407,7 +408,7 @@
       // Runtime::TraceExit returns its parameter in x0.
       __ Push(result_register());
       __ CallRuntime(Runtime::kTraceExit, 1);
-      ASSERT(x0.Is(result_register()));
+      DCHECK(x0.Is(result_register()));
     }
     // Pretend that the exit is a backwards jump to the entry.
     int weight = 1;
@@ -441,7 +442,7 @@
       // of the generated code must be consistent.
       const Register& current_sp = __ StackPointer();
       // Nothing ensures 16 bytes alignment here.
-      ASSERT(!current_sp.Is(csp));
+      DCHECK(!current_sp.Is(csp));
       __ mov(current_sp, fp);
       int no_frame_start = masm_->pc_offset();
       __ ldp(fp, lr, MemOperand(current_sp, 2 * kXRegSize, PostIndex));
@@ -460,25 +461,25 @@
 
 
 void FullCodeGenerator::EffectContext::Plug(Variable* var) const {
-  ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+  DCHECK(var->IsStackAllocated() || var->IsContextSlot());
 }
 
 
 void FullCodeGenerator::AccumulatorValueContext::Plug(Variable* var) const {
-  ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+  DCHECK(var->IsStackAllocated() || var->IsContextSlot());
   codegen()->GetVar(result_register(), var);
 }
 
 
 void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
-  ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+  DCHECK(var->IsStackAllocated() || var->IsContextSlot());
   codegen()->GetVar(result_register(), var);
   __ Push(result_register());
 }
 
 
 void FullCodeGenerator::TestContext::Plug(Variable* var) const {
-  ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+  DCHECK(var->IsStackAllocated() || var->IsContextSlot());
   // For simplicity we always test the accumulator register.
   codegen()->GetVar(result_register(), var);
   codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
@@ -542,7 +543,7 @@
                                           true,
                                           true_label_,
                                           false_label_);
-  ASSERT(!lit->IsUndetectableObject());  // There are no undetectable literals.
+  DCHECK(!lit->IsUndetectableObject());  // There are no undetectable literals.
   if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
     if (false_label_ != fall_through_) __ B(false_label_);
   } else if (lit->IsTrue() || lit->IsJSObject()) {
@@ -569,7 +570,7 @@
 
 void FullCodeGenerator::EffectContext::DropAndPlug(int count,
                                                    Register reg) const {
-  ASSERT(count > 0);
+  DCHECK(count > 0);
   __ Drop(count);
 }
 
@@ -577,7 +578,7 @@
 void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
     int count,
     Register reg) const {
-  ASSERT(count > 0);
+  DCHECK(count > 0);
   __ Drop(count);
   __ Move(result_register(), reg);
 }
@@ -585,7 +586,7 @@
 
 void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
                                                        Register reg) const {
-  ASSERT(count > 0);
+  DCHECK(count > 0);
   if (count > 1) __ Drop(count - 1);
   __ Poke(reg, 0);
 }
@@ -593,7 +594,7 @@
 
 void FullCodeGenerator::TestContext::DropAndPlug(int count,
                                                  Register reg) const {
-  ASSERT(count > 0);
+  DCHECK(count > 0);
   // For simplicity we always test the accumulator register.
   __ Drop(count);
   __ Mov(result_register(), reg);
@@ -604,7 +605,7 @@
 
 void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
                                             Label* materialize_false) const {
-  ASSERT(materialize_true == materialize_false);
+  DCHECK(materialize_true == materialize_false);
   __ Bind(materialize_true);
 }
 
@@ -638,8 +639,8 @@
 
 void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
                                           Label* materialize_false) const {
-  ASSERT(materialize_true == true_label_);
-  ASSERT(materialize_false == false_label_);
+  DCHECK(materialize_true == true_label_);
+  DCHECK(materialize_false == false_label_);
 }
 
 
@@ -700,7 +701,7 @@
   if (if_false == fall_through) {
     __ B(cond, if_true);
   } else if (if_true == fall_through) {
-    ASSERT(if_false != fall_through);
+    DCHECK(if_false != fall_through);
     __ B(NegateCondition(cond), if_false);
   } else {
     __ B(cond, if_true);
@@ -723,7 +724,7 @@
 
 
 MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) {
-  ASSERT(var->IsContextSlot() || var->IsStackAllocated());
+  DCHECK(var->IsContextSlot() || var->IsStackAllocated());
   if (var->IsContextSlot()) {
     int context_chain_length = scope()->ContextChainLength(var->scope());
     __ LoadContext(scratch, context_chain_length);
@@ -745,8 +746,8 @@
                                Register src,
                                Register scratch0,
                                Register scratch1) {
-  ASSERT(var->IsContextSlot() || var->IsStackAllocated());
-  ASSERT(!AreAliased(src, scratch0, scratch1));
+  DCHECK(var->IsContextSlot() || var->IsStackAllocated());
+  DCHECK(!AreAliased(src, scratch0, scratch1));
   MemOperand location = VarOperand(var, scratch0);
   __ Str(src, location);
 
@@ -789,7 +790,7 @@
 void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
   // The variable in the declaration always resides in the current function
   // context.
-  ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
+  DCHECK_EQ(0, scope()->ContextChainLength(variable->scope()));
   if (generate_debug_code_) {
     // Check that we're not inside a with or catch context.
     __ Ldr(x1, FieldMemOperand(cp, HeapObject::kMapOffset));
@@ -844,7 +845,7 @@
       Comment cmnt(masm_, "[ VariableDeclaration");
       __ Mov(x2, Operand(variable->name()));
       // Declaration nodes are always introduced in one of four modes.
-      ASSERT(IsDeclaredVariableMode(mode));
+      DCHECK(IsDeclaredVariableMode(mode));
       PropertyAttributes attr = IsImmutableVariableMode(mode) ? READ_ONLY
                                                               : NONE;
       __ Mov(x1, Smi::FromInt(attr));
@@ -859,7 +860,7 @@
         // Pushing 0 (xzr) indicates no initial value.
         __ Push(cp, x2, x1, xzr);
       }
-      __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4);
+      __ CallRuntime(Runtime::kDeclareLookupSlot, 4);
       break;
     }
   }
@@ -874,7 +875,7 @@
     case Variable::UNALLOCATED: {
       globals_->Add(variable->name(), zone());
       Handle<SharedFunctionInfo> function =
-          Compiler::BuildFunctionInfo(declaration->fun(), script());
+          Compiler::BuildFunctionInfo(declaration->fun(), script(), info_);
       // Check for stack overflow exception.
       if (function.is_null()) return SetStackOverflow();
       globals_->Add(function, zone());
@@ -915,7 +916,7 @@
       __ Push(cp, x2, x1);
       // Push initial value for function declaration.
       VisitForStackValue(declaration->fun());
-      __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4);
+      __ CallRuntime(Runtime::kDeclareLookupSlot, 4);
       break;
     }
   }
@@ -924,8 +925,8 @@
 
 void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
   Variable* variable = declaration->proxy()->var();
-  ASSERT(variable->location() == Variable::CONTEXT);
-  ASSERT(variable->interface()->IsFrozen());
+  DCHECK(variable->location() == Variable::CONTEXT);
+  DCHECK(variable->interface()->IsFrozen());
 
   Comment cmnt(masm_, "[ ModuleDeclaration");
   EmitDebugCheckDeclarationContext(variable);
@@ -990,7 +991,7 @@
   __ Mov(flags, Smi::FromInt(DeclareGlobalsFlags()));
   }
   __ Push(cp, x11, flags);
-  __ CallRuntime(Runtime::kHiddenDeclareGlobals, 3);
+  __ CallRuntime(Runtime::kDeclareGlobals, 3);
   // Return value is ignored.
 }
 
@@ -998,7 +999,7 @@
 void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
   // Call the runtime to declare the modules.
   __ Push(descriptions);
-  __ CallRuntime(Runtime::kHiddenDeclareModules, 1);
+  __ CallRuntime(Runtime::kDeclareModules, 1);
   // Return value is ignored.
 }
 
@@ -1051,7 +1052,8 @@
 
     // Record position before stub call for type feedback.
     SetSourcePosition(clause->position());
-    Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
+    Handle<Code> ic =
+        CodeFactory::CompareIC(isolate(), Token::EQ_STRICT).code();
     CallIC(ic, clause->CompareId());
     patch_site.EmitPatchInfo();
 
@@ -1165,8 +1167,9 @@
          FieldMemOperand(x2, DescriptorArray::kEnumCacheBridgeCacheOffset));
 
   // Set up the four remaining stack slots.
-  __ Push(x0, x2);              // Map, enumeration cache.
-  __ SmiTagAndPush(x1, xzr);    // Enum cache length, zero (both as smis).
+  __ SmiTag(x1);
+  // Map, enumeration cache, enum cache length, zero (both last as smis).
+  __ Push(x0, x2, x1, xzr);
   __ B(&loop);
 
   __ Bind(&no_descriptors);
@@ -1177,7 +1180,7 @@
   __ Bind(&fixed_array);
 
   __ LoadObject(x1, FeedbackVector());
-  __ Mov(x10, Operand(TypeFeedbackInfo::MegamorphicSentinel(isolate())));
+  __ Mov(x10, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
   __ Str(x10, FieldMemOperand(x1, FixedArray::OffsetOfElementAt(slot)));
 
   __ Mov(x1, Smi::FromInt(1));  // Smi indicates slow check.
@@ -1185,11 +1188,11 @@
   STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
   // TODO(all): similar check was done already. Can we avoid it here?
   __ CompareObjectType(x10, x11, x12, LAST_JS_PROXY_TYPE);
-  ASSERT(Smi::FromInt(0) == 0);
+  DCHECK(Smi::FromInt(0) == 0);
   __ CzeroX(x1, le);  // Zero indicates proxy.
-  __ Push(x1, x0);  // Smi and array
-  __ Ldr(x1, FieldMemOperand(x0, FixedArray::kLengthOffset));
-  __ Push(x1, xzr);  // Fixed array length (as smi) and initial index.
+  __ Ldr(x2, FieldMemOperand(x0, FixedArray::kLengthOffset));
+  // Smi and array, fixed array length (as smi) and initial index.
+  __ Push(x1, x0, x2, xzr);
 
   // Generate code for doing the condition check.
   PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
@@ -1270,16 +1273,6 @@
   Iteration loop_statement(this, stmt);
   increment_loop_depth();
 
-  // var iterable = subject
-  VisitForAccumulatorValue(stmt->assign_iterable());
-
-  // As with for-in, skip the loop if the iterator is null or undefined.
-  Register iterator = x0;
-  __ JumpIfRoot(iterator, Heap::kUndefinedValueRootIndex,
-                loop_statement.break_label());
-  __ JumpIfRoot(iterator, Heap::kNullValueRootIndex,
-                loop_statement.break_label());
-
   // var iterator = iterable[Symbol.iterator]();
   VisitForEffect(stmt->assign_iterator());
 
@@ -1328,9 +1321,7 @@
       !pretenure &&
       scope()->is_function_scope() &&
       info->num_literals() == 0) {
-    FastNewClosureStub stub(isolate(),
-                            info->strict_mode(),
-                            info->is_generator());
+    FastNewClosureStub stub(isolate(), info->strict_mode(), info->kind());
     __ Mov(x2, Operand(info));
     __ CallStub(&stub);
   } else {
@@ -1338,7 +1329,7 @@
     __ LoadRoot(x10, pretenure ? Heap::kTrueValueRootIndex
                                : Heap::kFalseValueRootIndex);
     __ Push(cp, x11, x10);
-    __ CallRuntime(Runtime::kHiddenNewClosure, 3);
+    __ CallRuntime(Runtime::kNewClosure, 3);
   }
   context()->Plug(x0);
 }
@@ -1350,7 +1341,27 @@
 }
 
 
-void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
+void FullCodeGenerator::EmitLoadHomeObject(SuperReference* expr) {
+  Comment cnmt(masm_, "[ SuperReference ");
+
+  __ ldr(LoadDescriptor::ReceiverRegister(),
+         MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+
+  Handle<Symbol> home_object_symbol(isolate()->heap()->home_object_symbol());
+  __ Mov(LoadDescriptor::NameRegister(), Operand(home_object_symbol));
+
+  CallLoadIC(NOT_CONTEXTUAL, expr->HomeObjectFeedbackId());
+
+  __ Mov(x10, Operand(isolate()->factory()->undefined_value()));
+  __ cmp(x0, x10);
+  Label done;
+  __ b(&done, ne);
+  __ CallRuntime(Runtime::kThrowNonMethodError, 0);
+  __ bind(&done);
+}
+
+
+void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
                                                       TypeofState typeof_state,
                                                       Label* slow) {
   Register current = cp;
@@ -1393,8 +1404,13 @@
     __ Bind(&fast);
   }
 
-  __ Ldr(x0, GlobalObjectMemOperand());
-  __ Mov(x2, Operand(var->name()));
+  __ Ldr(LoadDescriptor::ReceiverRegister(), GlobalObjectMemOperand());
+  __ Mov(LoadDescriptor::NameRegister(), Operand(proxy->var()->name()));
+  if (FLAG_vector_ics) {
+    __ Mov(VectorLoadICDescriptor::SlotRegister(),
+           Smi::FromInt(proxy->VariableFeedbackSlot()));
+  }
+
   ContextualMode mode = (typeof_state == INSIDE_TYPEOF) ? NOT_CONTEXTUAL
                                                         : CONTEXTUAL;
   CallLoadIC(mode);
@@ -1403,7 +1419,7 @@
 
 MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
                                                                 Label* slow) {
-  ASSERT(var->IsContextSlot());
+  DCHECK(var->IsContextSlot());
   Register context = cp;
   Register next = x10;
   Register temp = x11;
@@ -1431,7 +1447,7 @@
 }
 
 
-void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
+void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
                                                   TypeofState typeof_state,
                                                   Label* slow,
                                                   Label* done) {
@@ -1440,8 +1456,9 @@
   // introducing variables.  In those cases, we do not want to
   // perform a runtime call for all variables in the scope
   // containing the eval.
+  Variable* var = proxy->var();
   if (var->mode() == DYNAMIC_GLOBAL) {
-    EmitLoadGlobalCheckExtensions(var, typeof_state, slow);
+    EmitLoadGlobalCheckExtensions(proxy, typeof_state, slow);
     __ B(done);
   } else if (var->mode() == DYNAMIC_LOCAL) {
     Variable* local = var->local_if_not_shadowed();
@@ -1454,7 +1471,7 @@
       } else {  // LET || CONST
         __ Mov(x0, Operand(var->name()));
         __ Push(x0);
-        __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
+        __ CallRuntime(Runtime::kThrowReferenceError, 1);
       }
     }
     __ B(done);
@@ -1472,10 +1489,12 @@
   switch (var->location()) {
     case Variable::UNALLOCATED: {
       Comment cmnt(masm_, "Global variable");
-      // Use inline caching. Variable name is passed in x2 and the global
-      // object (receiver) in x0.
-      __ Ldr(x0, GlobalObjectMemOperand());
-      __ Mov(x2, Operand(var->name()));
+      __ Ldr(LoadDescriptor::ReceiverRegister(), GlobalObjectMemOperand());
+      __ Mov(LoadDescriptor::NameRegister(), Operand(var->name()));
+      if (FLAG_vector_ics) {
+        __ Mov(VectorLoadICDescriptor::SlotRegister(),
+               Smi::FromInt(proxy->VariableFeedbackSlot()));
+      }
       CallLoadIC(CONTEXTUAL);
       context()->Plug(x0);
       break;
@@ -1493,7 +1512,7 @@
         // always looked up dynamically, i.e. in that case
         //     var->location() == LOOKUP.
         // always holds.
-        ASSERT(var->scope() != NULL);
+        DCHECK(var->scope() != NULL);
 
         // Check if the binding really needs an initialization check. The check
         // can be skipped in the following situation: we have a LET or CONST
@@ -1516,8 +1535,8 @@
           skip_init_check = false;
         } else {
           // Check that we always have valid source position.
-          ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
-          ASSERT(proxy->position() != RelocInfo::kNoPosition);
+          DCHECK(var->initializer_position() != RelocInfo::kNoPosition);
+          DCHECK(proxy->position() != RelocInfo::kNoPosition);
           skip_init_check = var->mode() != CONST_LEGACY &&
               var->initializer_position() < proxy->position();
         }
@@ -1532,11 +1551,11 @@
             // binding in harmony mode.
             __ Mov(x0, Operand(var->name()));
             __ Push(x0);
-            __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
+            __ CallRuntime(Runtime::kThrowReferenceError, 1);
             __ Bind(&done);
           } else {
             // Uninitalized const bindings outside of harmony mode are unholed.
-            ASSERT(var->mode() == CONST_LEGACY);
+            DCHECK(var->mode() == CONST_LEGACY);
             __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
             __ Bind(&done);
           }
@@ -1552,12 +1571,12 @@
       Label done, slow;
       // Generate code for loading from variables potentially shadowed by
       // eval-introduced variables.
-      EmitDynamicLookupFastCase(var, NOT_INSIDE_TYPEOF, &slow, &done);
+      EmitDynamicLookupFastCase(proxy, NOT_INSIDE_TYPEOF, &slow, &done);
       __ Bind(&slow);
       Comment cmnt(masm_, "Lookup variable");
       __ Mov(x1, Operand(var->name()));
       __ Push(cp, x1);  // Context and name.
-      __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2);
+      __ CallRuntime(Runtime::kLoadLookupSlot, 2);
       __ Bind(&done);
       context()->Plug(x0);
       break;
@@ -1589,7 +1608,7 @@
   __ Mov(x2, Operand(expr->pattern()));
   __ Mov(x1, Operand(expr->flags()));
   __ Push(x4, x3, x2, x1);
-  __ CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4);
+  __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
   __ Mov(x5, x0);
 
   __ Bind(&materialized);
@@ -1601,7 +1620,7 @@
   __ Bind(&runtime_allocate);
   __ Mov(x10, Smi::FromInt(size));
   __ Push(x5, x10);
-  __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1);
+  __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
   __ Pop(x5);
 
   __ Bind(&allocated);
@@ -1647,7 +1666,7 @@
       masm()->serializer_enabled() || flags != ObjectLiteral::kFastElements ||
       properties_count > max_cloned_properties) {
     __ Push(x3, x2, x1, x0);
-    __ CallRuntime(Runtime::kHiddenCreateObjectLiteral, 4);
+    __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
   } else {
     FastCloneShallowObjectStub stub(isolate(), properties_count);
     __ CallStub(&stub);
@@ -1677,14 +1696,15 @@
       case ObjectLiteral::Property::CONSTANT:
         UNREACHABLE();
       case ObjectLiteral::Property::MATERIALIZED_LITERAL:
-        ASSERT(!CompileTimeValue::IsCompileTimeValue(property->value()));
+        DCHECK(!CompileTimeValue::IsCompileTimeValue(property->value()));
         // Fall through.
       case ObjectLiteral::Property::COMPUTED:
         if (key->value()->IsInternalizedString()) {
           if (property->emit_store()) {
             VisitForAccumulatorValue(value);
-            __ Mov(x2, Operand(key->value()));
-            __ Peek(x1, 0);
+            DCHECK(StoreDescriptor::ValueRegister().is(x0));
+            __ Mov(StoreDescriptor::NameRegister(), Operand(key->value()));
+            __ Peek(StoreDescriptor::ReceiverRegister(), 0);
             CallStoreIC(key->LiteralFeedbackId());
             PrepareForBailoutForId(key->id(), NO_REGISTERS);
           } else {
@@ -1698,7 +1718,7 @@
           __ Push(x0);
           VisitForStackValue(key);
           VisitForStackValue(value);
-          __ Mov(x0, Smi::FromInt(NONE));  // PropertyAttributes
+          __ Mov(x0, Smi::FromInt(SLOPPY));  // Strict mode
           __ Push(x0);
           __ CallRuntime(Runtime::kSetProperty, 4);
         } else {
@@ -1738,11 +1758,11 @@
       EmitAccessor(it->second->setter);
       __ Mov(x10, Smi::FromInt(NONE));
       __ Push(x10);
-      __ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5);
+      __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
   }
 
   if (expr->has_function()) {
-    ASSERT(result_saved);
+    DCHECK(result_saved);
     __ Peek(x0, 0);
     __ Push(x0);
     __ CallRuntime(Runtime::kToFastProperties, 1);
@@ -1766,7 +1786,7 @@
   ZoneList<Expression*>* subexprs = expr->values();
   int length = subexprs->length();
   Handle<FixedArray> constant_elements = expr->constant_elements();
-  ASSERT_EQ(2, constant_elements->length());
+  DCHECK_EQ(2, constant_elements->length());
   ElementsKind constant_elements_kind =
       static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
   bool has_fast_elements = IsFastObjectElementsKind(constant_elements_kind);
@@ -1787,7 +1807,7 @@
   if (expr->depth() > 1 || length > JSObject::kInitialMaxFastElementArray) {
     __ Mov(x0, Smi::FromInt(flags));
     __ Push(x3, x2, x1, x0);
-    __ CallRuntime(Runtime::kHiddenCreateArrayLiteral, 4);
+    __ CallRuntime(Runtime::kCreateArrayLiteral, 4);
   } else {
     FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
     __ CallStub(&stub);
@@ -1804,8 +1824,8 @@
     if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
 
     if (!result_saved) {
-      __ Push(x0);
-      __ Push(Smi::FromInt(expr->literal_index()));
+      __ Mov(x1, Smi::FromInt(expr->literal_index()));
+      __ Push(x0, x1);
       result_saved = true;
     }
     VisitForAccumulatorValue(subexpr);
@@ -1838,7 +1858,7 @@
 
 
 void FullCodeGenerator::VisitAssignment(Assignment* expr) {
-  ASSERT(expr->target()->IsValidReferenceExpression());
+  DCHECK(expr->target()->IsValidReferenceExpression());
 
   Comment cmnt(masm_, "[ Assignment");
 
@@ -1860,9 +1880,9 @@
       break;
     case NAMED_PROPERTY:
       if (expr->is_compound()) {
-        // We need the receiver both on the stack and in the accumulator.
-        VisitForAccumulatorValue(property->obj());
-        __ Push(result_register());
+        // We need the receiver both on the stack and in the register.
+        VisitForStackValue(property->obj());
+        __ Peek(LoadDescriptor::ReceiverRegister(), 0);
       } else {
         VisitForStackValue(property->obj());
       }
@@ -1870,9 +1890,9 @@
     case KEYED_PROPERTY:
       if (expr->is_compound()) {
         VisitForStackValue(property->obj());
-        VisitForAccumulatorValue(property->key());
-        __ Peek(x1, 0);
-        __ Push(x0);
+        VisitForStackValue(property->key());
+        __ Peek(LoadDescriptor::ReceiverRegister(), 1 * kPointerSize);
+        __ Peek(LoadDescriptor::NameRegister(), 0);
       } else {
         VisitForStackValue(property->obj());
         VisitForStackValue(property->key());
@@ -1949,17 +1969,45 @@
 void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
   SetSourcePosition(prop->position());
   Literal* key = prop->key()->AsLiteral();
-  __ Mov(x2, Operand(key->value()));
-  // Call load IC. It has arguments receiver and property name x0 and x2.
-  CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId());
+  DCHECK(!prop->IsSuperAccess());
+
+  __ Mov(LoadDescriptor::NameRegister(), Operand(key->value()));
+  if (FLAG_vector_ics) {
+    __ Mov(VectorLoadICDescriptor::SlotRegister(),
+           Smi::FromInt(prop->PropertyFeedbackSlot()));
+    CallLoadIC(NOT_CONTEXTUAL);
+  } else {
+    CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId());
+  }
+}
+
+
+void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
+  SetSourcePosition(prop->position());
+  Literal* key = prop->key()->AsLiteral();
+  DCHECK(!key->value()->IsSmi());
+  DCHECK(prop->IsSuperAccess());
+
+  SuperReference* super_ref = prop->obj()->AsSuperReference();
+  EmitLoadHomeObject(super_ref);
+  __ Push(x0);
+  VisitForStackValue(super_ref->this_var());
+  __ Push(key->value());
+  __ CallRuntime(Runtime::kLoadFromSuper, 3);
 }
 
 
 void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
   SetSourcePosition(prop->position());
   // Call keyed load IC. It has arguments key and receiver in r0 and r1.
-  Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
-  CallIC(ic, prop->PropertyFeedbackId());
+  Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
+  if (FLAG_vector_ics) {
+    __ Mov(VectorLoadICDescriptor::SlotRegister(),
+           Smi::FromInt(prop->PropertyFeedbackSlot()));
+    CallIC(ic);
+  } else {
+    CallIC(ic, prop->PropertyFeedbackId());
+  }
 }
 
 
@@ -1982,10 +2030,11 @@
   patch_site.EmitJumpIfSmi(x10, &both_smis);
 
   __ Bind(&stub_call);
-  BinaryOpICStub stub(isolate(), op, mode);
+
+  Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op, mode).code();
   {
     Assembler::BlockPoolsScope scope(masm_);
-    CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId());
+    CallIC(code, expr->BinaryOperationFeedbackId());
     patch_site.EmitPatchInfo();
   }
   __ B(&done);
@@ -2008,16 +2057,14 @@
       __ Ubfx(right, right, kSmiShift, 5);
       __ Lsl(result, left, right);
       break;
-    case Token::SHR: {
-      Label right_not_zero;
-      __ Cbnz(right, &right_not_zero);
-      __ Tbnz(left, kXSignBit, &stub_call);
-      __ Bind(&right_not_zero);
+    case Token::SHR:
+      // If `left >>> right` >= 0x80000000, the result is not representable in a
+      // signed 32-bit smi.
       __ Ubfx(right, right, kSmiShift, 5);
-      __ Lsr(result, left, right);
-      __ Bic(result, result, kSmiShiftMask);
+      __ Lsr(x10, left, right);
+      __ Tbnz(x10, kXSignBit, &stub_call);
+      __ Bic(result, x10, kSmiShiftMask);
       break;
-    }
     case Token::ADD:
       __ Adds(x10, left, right);
       __ B(vs, &stub_call);
@@ -2030,11 +2077,12 @@
       break;
     case Token::MUL: {
       Label not_minus_zero, done;
+      STATIC_ASSERT(static_cast<unsigned>(kSmiShift) == (kXRegSizeInBits / 2));
+      STATIC_ASSERT(kSmiTag == 0);
       __ Smulh(x10, left, right);
       __ Cbnz(x10, &not_minus_zero);
       __ Eor(x11, left, right);
       __ Tbnz(x11, kXSignBit, &stub_call);
-      STATIC_ASSERT(kSmiTag == 0);
       __ Mov(result, x10);
       __ B(&done);
       __ Bind(&not_minus_zero);
@@ -2067,11 +2115,11 @@
                                      Token::Value op,
                                      OverwriteMode mode) {
   __ Pop(x1);
-  BinaryOpICStub stub(isolate(), op, mode);
+  Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op, mode).code();
   JumpPatchSite patch_site(masm_);    // Unbound, signals no inlined smi code.
   {
     Assembler::BlockPoolsScope scope(masm_);
-    CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId());
+    CallIC(code, expr->BinaryOperationFeedbackId());
     patch_site.EmitPatchInfo();
   }
   context()->Plug(x0);
@@ -2079,7 +2127,7 @@
 
 
 void FullCodeGenerator::EmitAssignment(Expression* expr) {
-  ASSERT(expr->IsValidReferenceExpression());
+  DCHECK(expr->IsValidReferenceExpression());
 
   // Left-hand side can only be a property, a global or a (parameter or local)
   // slot.
@@ -2104,9 +2152,10 @@
       VisitForAccumulatorValue(prop->obj());
       // TODO(all): We could introduce a VisitForRegValue(reg, expr) to avoid
       // this copy.
-      __ Mov(x1, x0);
-      __ Pop(x0);  // Restore value.
-      __ Mov(x2, Operand(prop->key()->AsLiteral()->value()));
+      __ Mov(StoreDescriptor::ReceiverRegister(), x0);
+      __ Pop(StoreDescriptor::ValueRegister());  // Restore value.
+      __ Mov(StoreDescriptor::NameRegister(),
+             Operand(prop->key()->AsLiteral()->value()));
       CallStoreIC();
       break;
     }
@@ -2114,11 +2163,11 @@
       __ Push(x0);  // Preserve value.
       VisitForStackValue(prop->obj());
       VisitForAccumulatorValue(prop->key());
-      __ Mov(x1, x0);
-      __ Pop(x2, x0);
-      Handle<Code> ic = strict_mode() == SLOPPY
-          ? isolate()->builtins()->KeyedStoreIC_Initialize()
-          : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+      __ Mov(StoreDescriptor::NameRegister(), x0);
+      __ Pop(StoreDescriptor::ReceiverRegister(),
+             StoreDescriptor::ValueRegister());
+      Handle<Code> ic =
+          CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
       CallIC(ic);
       break;
     }
@@ -2140,38 +2189,24 @@
 }
 
 
-void FullCodeGenerator::EmitCallStoreContextSlot(
-    Handle<String> name, StrictMode strict_mode) {
-  __ Mov(x11, Operand(name));
-  __ Mov(x10, Smi::FromInt(strict_mode));
-  // jssp[0]  : mode.
-  // jssp[8]  : name.
-  // jssp[16] : context.
-  // jssp[24] : value.
-  __ Push(x0, cp, x11, x10);
-  __ CallRuntime(Runtime::kHiddenStoreContextSlot, 4);
-}
-
-
 void FullCodeGenerator::EmitVariableAssignment(Variable* var,
                                                Token::Value op) {
   ASM_LOCATION("FullCodeGenerator::EmitVariableAssignment");
   if (var->IsUnallocated()) {
     // Global var, const, or let.
-    __ Mov(x2, Operand(var->name()));
-    __ Ldr(x1, GlobalObjectMemOperand());
+    __ Mov(StoreDescriptor::NameRegister(), Operand(var->name()));
+    __ Ldr(StoreDescriptor::ReceiverRegister(), GlobalObjectMemOperand());
     CallStoreIC();
 
   } else if (op == Token::INIT_CONST_LEGACY) {
     // Const initializers need a write barrier.
-    ASSERT(!var->IsParameter());  // No const parameters.
+    DCHECK(!var->IsParameter());  // No const parameters.
     if (var->IsLookupSlot()) {
-      __ Push(x0);
-      __ Mov(x0, Operand(var->name()));
-      __ Push(cp, x0);  // Context and name.
-      __ CallRuntime(Runtime::kHiddenInitializeConstContextSlot, 3);
+      __ Mov(x1, Operand(var->name()));
+      __ Push(x0, cp, x1);
+      __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3);
     } else {
-      ASSERT(var->IsStackLocal() || var->IsContextSlot());
+      DCHECK(var->IsStackLocal() || var->IsContextSlot());
       Label skip;
       MemOperand location = VarOperand(var, x1);
       __ Ldr(x10, location);
@@ -2182,29 +2217,34 @@
 
   } else if (var->mode() == LET && op != Token::INIT_LET) {
     // Non-initializing assignment to let variable needs a write barrier.
-    if (var->IsLookupSlot()) {
-      EmitCallStoreContextSlot(var->name(), strict_mode());
-    } else {
-      ASSERT(var->IsStackAllocated() || var->IsContextSlot());
-      Label assign;
-      MemOperand location = VarOperand(var, x1);
-      __ Ldr(x10, location);
-      __ JumpIfNotRoot(x10, Heap::kTheHoleValueRootIndex, &assign);
-      __ Mov(x10, Operand(var->name()));
-      __ Push(x10);
-      __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
-      // Perform the assignment.
-      __ Bind(&assign);
-      EmitStoreToStackLocalOrContextSlot(var, location);
-    }
+    DCHECK(!var->IsLookupSlot());
+    DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+    Label assign;
+    MemOperand location = VarOperand(var, x1);
+    __ Ldr(x10, location);
+    __ JumpIfNotRoot(x10, Heap::kTheHoleValueRootIndex, &assign);
+    __ Mov(x10, Operand(var->name()));
+    __ Push(x10);
+    __ CallRuntime(Runtime::kThrowReferenceError, 1);
+    // Perform the assignment.
+    __ Bind(&assign);
+    EmitStoreToStackLocalOrContextSlot(var, location);
 
   } else if (!var->is_const_mode() || op == Token::INIT_CONST) {
-    // Assignment to var or initializing assignment to let/const
-    // in harmony mode.
     if (var->IsLookupSlot()) {
-      EmitCallStoreContextSlot(var->name(), strict_mode());
+      // Assignment to var.
+      __ Mov(x11, Operand(var->name()));
+      __ Mov(x10, Smi::FromInt(strict_mode()));
+      // jssp[0]  : mode.
+      // jssp[8]  : name.
+      // jssp[16] : context.
+      // jssp[24] : value.
+      __ Push(x0, cp, x11, x10);
+      __ CallRuntime(Runtime::kStoreLookupSlot, 4);
     } else {
-      ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+      // Assignment to var or initializing assignment to let/const in harmony
+      // mode.
+      DCHECK(var->IsStackAllocated() || var->IsContextSlot());
       MemOperand location = VarOperand(var, x1);
       if (FLAG_debug_code && op == Token::INIT_LET) {
         __ Ldr(x10, location);
@@ -2222,14 +2262,14 @@
   ASM_LOCATION("FullCodeGenerator::EmitNamedPropertyAssignment");
   // Assignment to a property, using a named store IC.
   Property* prop = expr->target()->AsProperty();
-  ASSERT(prop != NULL);
-  ASSERT(prop->key()->IsLiteral());
+  DCHECK(prop != NULL);
+  DCHECK(prop->key()->IsLiteral());
 
   // Record source code position before IC call.
   SetSourcePosition(expr->position());
-  __ Mov(x2, Operand(prop->key()->AsLiteral()->value()));
-  __ Pop(x1);
-
+  __ Mov(StoreDescriptor::NameRegister(),
+         Operand(prop->key()->AsLiteral()->value()));
+  __ Pop(StoreDescriptor::ReceiverRegister());
   CallStoreIC(expr->AssignmentFeedbackId());
 
   PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@@ -2244,11 +2284,10 @@
   // Record source code position before IC call.
   SetSourcePosition(expr->position());
   // TODO(all): Could we pass this in registers rather than on the stack?
-  __ Pop(x1, x2);  // Key and object holding the property.
+  __ Pop(StoreDescriptor::NameRegister(), StoreDescriptor::ReceiverRegister());
+  DCHECK(StoreDescriptor::ValueRegister().is(x0));
 
-  Handle<Code> ic = strict_mode() == SLOPPY
-      ? isolate()->builtins()->KeyedStoreIC_Initialize()
-      : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+  Handle<Code> ic = CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
   CallIC(ic, expr->AssignmentFeedbackId());
 
   PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@@ -2261,14 +2300,20 @@
   Expression* key = expr->key();
 
   if (key->IsPropertyName()) {
-    VisitForAccumulatorValue(expr->obj());
-    EmitNamedPropertyLoad(expr);
+    if (!expr->IsSuperAccess()) {
+      VisitForAccumulatorValue(expr->obj());
+      __ Move(LoadDescriptor::ReceiverRegister(), x0);
+      EmitNamedPropertyLoad(expr);
+    } else {
+      EmitNamedSuperPropertyLoad(expr);
+    }
     PrepareForBailoutForId(expr->LoadId(), TOS_REG);
     context()->Plug(x0);
   } else {
     VisitForStackValue(expr->obj());
     VisitForAccumulatorValue(expr->key());
-    __ Pop(x1);
+    __ Move(LoadDescriptor::NameRegister(), x0);
+    __ Pop(LoadDescriptor::ReceiverRegister());
     EmitKeyedPropertyLoad(expr);
     context()->Plug(x0);
   }
@@ -2288,12 +2333,11 @@
 void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
   Expression* callee = expr->expression();
 
-  CallIC::CallType call_type = callee->IsVariableProxy()
-      ? CallIC::FUNCTION
-      : CallIC::METHOD;
+  CallICState::CallType call_type =
+      callee->IsVariableProxy() ? CallICState::FUNCTION : CallICState::METHOD;
 
   // Get the target function.
-  if (call_type == CallIC::FUNCTION) {
+  if (call_type == CallICState::FUNCTION) {
     { StackValueContext context(this);
       EmitVariableLoad(callee->AsVariableProxy());
       PrepareForBailout(callee, NO_REGISTERS);
@@ -2303,8 +2347,9 @@
     __ Push(isolate()->factory()->undefined_value());
   } else {
     // Load the function from the receiver.
-    ASSERT(callee->IsProperty());
-    __ Peek(x0, 0);
+    DCHECK(callee->IsProperty());
+    DCHECK(!callee->AsProperty()->IsSuperAccess());
+    __ Peek(LoadDescriptor::ReceiverRegister(), 0);
     EmitNamedPropertyLoad(callee->AsProperty());
     PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
     // Push the target function under the receiver.
@@ -2316,6 +2361,45 @@
 }
 
 
+void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
+  Expression* callee = expr->expression();
+  DCHECK(callee->IsProperty());
+  Property* prop = callee->AsProperty();
+  DCHECK(prop->IsSuperAccess());
+
+  SetSourcePosition(prop->position());
+  Literal* key = prop->key()->AsLiteral();
+  DCHECK(!key->value()->IsSmi());
+
+  // Load the function from the receiver.
+  const Register scratch = x10;
+  SuperReference* super_ref = callee->AsProperty()->obj()->AsSuperReference();
+  EmitLoadHomeObject(super_ref);
+  __ Push(x0);
+  VisitForAccumulatorValue(super_ref->this_var());
+  __ Push(x0);
+  __ Peek(scratch, kPointerSize);
+  __ Push(scratch, x0);
+  __ Push(key->value());
+
+  // Stack here:
+  //  - home_object
+  //  - this (receiver)
+  //  - home_object <-- LoadFromSuper will pop here and below.
+  //  - this (receiver)
+  //  - key
+  __ CallRuntime(Runtime::kLoadFromSuper, 3);
+
+  // Replace home_object with target function.
+  __ Poke(x0, kPointerSize);
+
+  // Stack here:
+  // - target function
+  // - this (receiver)
+  EmitCall(expr, CallICState::METHOD);
+}
+
+
 // Code common for calls using the IC.
 void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
                                                 Expression* key) {
@@ -2325,8 +2409,9 @@
   Expression* callee = expr->expression();
 
   // Load the function from the receiver.
-  ASSERT(callee->IsProperty());
-  __ Peek(x1, 0);
+  DCHECK(callee->IsProperty());
+  __ Peek(LoadDescriptor::ReceiverRegister(), 0);
+  __ Move(LoadDescriptor::NameRegister(), x0);
   EmitKeyedPropertyLoad(callee->AsProperty());
   PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
 
@@ -2334,11 +2419,11 @@
   __ Pop(x10);
   __ Push(x0, x10);
 
-  EmitCall(expr, CallIC::METHOD);
+  EmitCall(expr, CallICState::METHOD);
 }
 
 
-void FullCodeGenerator::EmitCall(Call* expr, CallIC::CallType call_type) {
+void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
   // Load the arguments.
   ZoneList<Expression*>* args = expr->arguments();
   int arg_count = args->length();
@@ -2370,28 +2455,26 @@
   // Prepare to push a copy of the first argument or undefined if it doesn't
   // exist.
   if (arg_count > 0) {
-    __ Peek(x10, arg_count * kXRegSize);
+    __ Peek(x9, arg_count * kXRegSize);
   } else {
-    __ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
+    __ LoadRoot(x9, Heap::kUndefinedValueRootIndex);
   }
 
+  __ Ldr(x10, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
   // Prepare to push the receiver of the enclosing function.
   int receiver_offset = 2 + info_->scope()->num_parameters();
   __ Ldr(x11, MemOperand(fp, receiver_offset * kPointerSize));
 
-  // Push.
-  __ Push(x10, x11);
-
   // Prepare to push the language mode.
-  __ Mov(x10, Smi::FromInt(strict_mode()));
+  __ Mov(x12, Smi::FromInt(strict_mode()));
   // Prepare to push the start position of the scope the calls resides in.
-  __ Mov(x11, Smi::FromInt(scope()->start_position()));
+  __ Mov(x13, Smi::FromInt(scope()->start_position()));
 
   // Push.
-  __ Push(x10, x11);
+  __ Push(x9, x10, x11, x12, x13);
 
   // Do the runtime call.
-  __ CallRuntime(Runtime::kHiddenResolvePossiblyDirectEval, 5);
+  __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 6);
 }
 
 
@@ -2459,16 +2542,15 @@
     { PreservePositionScope scope(masm()->positions_recorder());
       // Generate code for loading from variables potentially shadowed
       // by eval-introduced variables.
-      EmitDynamicLookupFastCase(proxy->var(), NOT_INSIDE_TYPEOF, &slow, &done);
+      EmitDynamicLookupFastCase(proxy, NOT_INSIDE_TYPEOF, &slow, &done);
     }
 
     __ Bind(&slow);
     // Call the runtime to find the function to call (returned in x0)
     // and the object holding it (returned in x1).
-    __ Push(context_register());
     __ Mov(x10, Operand(proxy->name()));
-    __ Push(x10);
-    __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2);
+    __ Push(context_register(), x10);
+    __ CallRuntime(Runtime::kLoadLookupSlot, 2);
     __ Push(x0, x1);  // Receiver, function.
 
     // If fast case code has been generated, emit code to push the
@@ -2479,11 +2561,10 @@
       __ B(&call);
       __ Bind(&done);
       // Push function.
-      __ Push(x0);
       // The receiver is implicitly the global receiver. Indicate this
       // by passing the undefined to the call function stub.
       __ LoadRoot(x1, Heap::kUndefinedValueRootIndex);
-      __ Push(x1);
+      __ Push(x0, x1);
       __ Bind(&call);
     }
 
@@ -2492,17 +2573,23 @@
     EmitCall(expr);
   } else if (call_type == Call::PROPERTY_CALL) {
     Property* property = callee->AsProperty();
-    { PreservePositionScope scope(masm()->positions_recorder());
-      VisitForStackValue(property->obj());
-    }
-    if (property->key()->IsPropertyName()) {
-      EmitCallWithLoadIC(expr);
+    bool is_named_call = property->key()->IsPropertyName();
+    // super.x() is handled in EmitCallWithLoadIC.
+    if (property->IsSuperAccess() && is_named_call) {
+      EmitSuperCallWithLoadIC(expr);
     } else {
-      EmitKeyedCallWithLoadIC(expr, property->key());
+      {
+        PreservePositionScope scope(masm()->positions_recorder());
+        VisitForStackValue(property->obj());
+      }
+      if (is_named_call) {
+        EmitCallWithLoadIC(expr);
+      } else {
+        EmitKeyedCallWithLoadIC(expr, property->key());
+      }
     }
-
   } else {
-    ASSERT(call_type == Call::OTHER_CALL);
+    DCHECK(call_type == Call::OTHER_CALL);
     // Call to an arbitrary expression not handled specially above.
     { PreservePositionScope scope(masm()->positions_recorder());
       VisitForStackValue(callee);
@@ -2515,7 +2602,7 @@
 
 #ifdef DEBUG
   // RecordJSReturnSite should have been called.
-  ASSERT(expr->return_is_recorded_);
+  DCHECK(expr->return_is_recorded_);
 #endif
 }
 
@@ -2549,7 +2636,7 @@
   // Record call targets in unoptimized code.
   if (FLAG_pretenuring_call_new) {
     EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot());
-    ASSERT(expr->AllocationSiteFeedbackSlot() ==
+    DCHECK(expr->AllocationSiteFeedbackSlot() ==
            expr->CallNewFeedbackSlot() + 1);
   }
 
@@ -2565,7 +2652,7 @@
 
 void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -2585,7 +2672,7 @@
 
 void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -2596,9 +2683,10 @@
   context()->PrepareTest(&materialize_true, &materialize_false,
                          &if_true, &if_false, &fall_through);
 
+  uint64_t sign_mask = V8_UINT64_C(1) << (kSmiShift + kSmiValueSize - 1);
+
   PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
-  __ TestAndSplit(x0, kSmiTagMask | (0x80000000UL << kSmiShift), if_true,
-                  if_false, fall_through);
+  __ TestAndSplit(x0, kSmiTagMask | sign_mask, if_true, if_false, fall_through);
 
   context()->Plug(if_true, if_false);
 }
@@ -2606,7 +2694,7 @@
 
 void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -2636,7 +2724,7 @@
 
 void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -2659,7 +2747,7 @@
 void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
   ASM_LOCATION("FullCodeGenerator::EmitIsUndetectableObject");
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -2684,7 +2772,7 @@
 void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
     CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
   VisitForAccumulatorValue(args->at(0));
 
   Label materialize_true, materialize_false, skip_lookup;
@@ -2785,7 +2873,7 @@
 
 void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -2807,7 +2895,7 @@
 
 void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -2819,7 +2907,7 @@
                          &if_true, &if_false, &fall_through);
 
   // Only a HeapNumber can be -0.0, so return false if we have something else.
-  __ CheckMap(x0, x1, Heap::kHeapNumberMapRootIndex, if_false, DO_SMI_CHECK);
+  __ JumpIfNotHeapNumber(x0, if_false, DO_SMI_CHECK);
 
   // Test the bit pattern.
   __ Ldr(x10, FieldMemOperand(x0, HeapNumber::kValueOffset));
@@ -2834,7 +2922,7 @@
 
 void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -2856,7 +2944,7 @@
 
 void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -2878,7 +2966,7 @@
 
 
 void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
-  ASSERT(expr->arguments()->length() == 0);
+  DCHECK(expr->arguments()->length() == 0);
 
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
@@ -2910,7 +2998,7 @@
 
 void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 2);
+  DCHECK(args->length() == 2);
 
   // Load the two objects into registers and perform the comparison.
   VisitForStackValue(args->at(0));
@@ -2934,7 +3022,7 @@
 
 void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   // ArgumentsAccessStub expects the key in x1.
   VisitForAccumulatorValue(args->at(0));
@@ -2947,7 +3035,7 @@
 
 
 void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
-  ASSERT(expr->arguments()->length() == 0);
+  DCHECK(expr->arguments()->length() == 0);
   Label exit;
   // Get the number of formal parameters.
   __ Mov(x0, Smi::FromInt(info_->scope()->num_parameters()));
@@ -2970,7 +3058,7 @@
 void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
   ASM_LOCATION("FullCodeGenerator::EmitClassOf");
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
   Label done, null, function, non_function_constructor;
 
   VisitForAccumulatorValue(args->at(0));
@@ -3012,7 +3100,7 @@
 
   // Functions have class 'Function'.
   __ Bind(&function);
-  __ LoadRoot(x0, Heap::kfunction_class_stringRootIndex);
+  __ LoadRoot(x0, Heap::kFunction_stringRootIndex);
   __ B(&done);
 
   // Objects with a non-function constructor have class 'Object'.
@@ -3035,7 +3123,7 @@
   // Load the arguments on the stack and call the stub.
   SubStringStub stub(isolate());
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 3);
+  DCHECK(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
   VisitForStackValue(args->at(2));
@@ -3048,7 +3136,7 @@
   // Load the arguments on the stack and call the stub.
   RegExpExecStub stub(isolate());
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 4);
+  DCHECK(args->length() == 4);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
   VisitForStackValue(args->at(2));
@@ -3061,7 +3149,7 @@
 void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
   ASM_LOCATION("FullCodeGenerator::EmitValueOf");
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
   VisitForAccumulatorValue(args->at(0));  // Load the object.
 
   Label done;
@@ -3078,8 +3166,8 @@
 
 void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 2);
-  ASSERT_NE(NULL, args->at(1)->AsLiteral());
+  DCHECK(args->length() == 2);
+  DCHECK_NE(NULL, args->at(1)->AsLiteral());
   Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value()));
 
   VisitForAccumulatorValue(args->at(0));  // Load the object.
@@ -3116,7 +3204,7 @@
   }
 
   __ Bind(&not_date_object);
-  __ CallRuntime(Runtime::kHiddenThrowNotDateError, 0);
+  __ CallRuntime(Runtime::kThrowNotDateError, 0);
   __ Bind(&done);
   context()->Plug(x0);
 }
@@ -3124,16 +3212,16 @@
 
 void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT_EQ(3, args->length());
+  DCHECK_EQ(3, args->length());
 
   Register string = x0;
   Register index = x1;
   Register value = x2;
   Register scratch = x10;
 
-  VisitForStackValue(args->at(1));  // index
-  VisitForStackValue(args->at(2));  // value
-  VisitForAccumulatorValue(args->at(0));  // string
+  VisitForStackValue(args->at(0));        // index
+  VisitForStackValue(args->at(1));        // value
+  VisitForAccumulatorValue(args->at(2));  // string
   __ Pop(value, index);
 
   if (FLAG_debug_code) {
@@ -3154,16 +3242,16 @@
 
 void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT_EQ(3, args->length());
+  DCHECK_EQ(3, args->length());
 
   Register string = x0;
   Register index = x1;
   Register value = x2;
   Register scratch = x10;
 
-  VisitForStackValue(args->at(1));  // index
-  VisitForStackValue(args->at(2));  // value
-  VisitForAccumulatorValue(args->at(0));  // string
+  VisitForStackValue(args->at(0));        // index
+  VisitForStackValue(args->at(1));        // value
+  VisitForAccumulatorValue(args->at(2));  // string
   __ Pop(value, index);
 
   if (FLAG_debug_code) {
@@ -3185,7 +3273,7 @@
 void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
   // Load the arguments on the stack and call the MathPow stub.
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 2);
+  DCHECK(args->length() == 2);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
   MathPowStub stub(isolate(), MathPowStub::ON_STACK);
@@ -3196,7 +3284,7 @@
 
 void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 2);
+  DCHECK(args->length() == 2);
   VisitForStackValue(args->at(0));  // Load the object.
   VisitForAccumulatorValue(args->at(1));  // Load the value.
   __ Pop(x1);
@@ -3225,7 +3313,7 @@
 
 void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT_EQ(args->length(), 1);
+  DCHECK_EQ(args->length(), 1);
 
   // Load the argument into x0 and call the stub.
   VisitForAccumulatorValue(args->at(0));
@@ -3238,7 +3326,7 @@
 
 void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -3260,7 +3348,7 @@
 
 void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 2);
+  DCHECK(args->length() == 2);
 
   VisitForStackValue(args->at(0));
   VisitForAccumulatorValue(args->at(1));
@@ -3305,7 +3393,7 @@
 
 void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 2);
+  DCHECK(args->length() == 2);
 
   VisitForStackValue(args->at(0));
   VisitForAccumulatorValue(args->at(1));
@@ -3352,7 +3440,7 @@
 void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
   ASM_LOCATION("FullCodeGenerator::EmitStringAdd");
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT_EQ(2, args->length());
+  DCHECK_EQ(2, args->length());
 
   VisitForStackValue(args->at(0));
   VisitForAccumulatorValue(args->at(1));
@@ -3367,7 +3455,7 @@
 
 void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT_EQ(2, args->length());
+  DCHECK_EQ(2, args->length());
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
 
@@ -3380,7 +3468,7 @@
 void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
   ASM_LOCATION("FullCodeGenerator::EmitCallFunction");
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() >= 2);
+  DCHECK(args->length() >= 2);
 
   int arg_count = args->length() - 2;  // 2 ~ receiver and function.
   for (int i = 0; i < arg_count + 1; i++) {
@@ -3412,7 +3500,7 @@
 void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
   RegExpConstructResultStub stub(isolate());
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 3);
+  DCHECK(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
   VisitForAccumulatorValue(args->at(2));
@@ -3424,8 +3512,8 @@
 
 void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT_EQ(2, args->length());
-  ASSERT_NE(NULL, args->at(0)->AsLiteral());
+  DCHECK_EQ(2, args->length());
+  DCHECK_NE(NULL, args->at(0)->AsLiteral());
   int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value();
 
   Handle<FixedArray> jsfunction_result_caches(
@@ -3463,7 +3551,7 @@
 
   // Call runtime to perform the lookup.
   __ Push(cache, key);
-  __ CallRuntime(Runtime::kHiddenGetFromCache, 2);
+  __ CallRuntime(Runtime::kGetFromCache, 2);
 
   __ Bind(&done);
   context()->Plug(x0);
@@ -3492,7 +3580,7 @@
 
 void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
   VisitForAccumulatorValue(args->at(0));
 
   __ AssertString(x0);
@@ -3504,11 +3592,11 @@
 }
 
 
-void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
-  ASM_LOCATION("FullCodeGenerator::EmitFastAsciiArrayJoin");
+void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
+  ASM_LOCATION("FullCodeGenerator::EmitFastOneByteArrayJoin");
 
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 2);
+  DCHECK(args->length() == 2);
   VisitForStackValue(args->at(1));
   VisitForAccumulatorValue(args->at(0));
 
@@ -3557,7 +3645,7 @@
   // Get the FixedArray containing array's elements.
   __ Ldr(elements, FieldMemOperand(array, JSArray::kElementsOffset));
 
-  // Check that all array elements are sequential ASCII strings, and
+  // Check that all array elements are sequential one-byte strings, and
   // accumulate the sum of their lengths.
   __ Mov(string_length, 0);
   __ Add(element, elements, FixedArray::kHeaderSize - kHeapObjectTag);
@@ -3572,14 +3660,14 @@
   //   elements_end: Array end.
   if (FLAG_debug_code) {
     __ Cmp(array_length, 0);
-    __ Assert(gt, kNoEmptyArraysHereInEmitFastAsciiArrayJoin);
+    __ Assert(gt, kNoEmptyArraysHereInEmitFastOneByteArrayJoin);
   }
   __ Bind(&loop);
   __ Ldr(string, MemOperand(element, kPointerSize, PostIndex));
   __ JumpIfSmi(string, &bailout);
   __ Ldr(scratch1, FieldMemOperand(string, HeapObject::kMapOffset));
   __ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
-  __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
+  __ JumpIfInstanceTypeIsNotSequentialOneByte(scratch1, scratch2, &bailout);
   __ Ldrsw(scratch1,
            UntagSmiFieldMemOperand(string, SeqOneByteString::kLengthOffset));
   __ Adds(string_length, string_length, scratch1);
@@ -3601,11 +3689,11 @@
   //   string_length: Sum of string lengths (not smi).
   //   elements: FixedArray of strings.
 
-  // Check that the separator is a flat ASCII string.
+  // Check that the separator is a flat one-byte string.
   __ JumpIfSmi(separator, &bailout);
   __ Ldr(scratch1, FieldMemOperand(separator, HeapObject::kMapOffset));
   __ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
-  __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
+  __ JumpIfInstanceTypeIsNotSequentialOneByte(scratch1, scratch2, &bailout);
 
   // Add (separator length times array_length) - separator length to the
   // string_length to get the length of the result string.
@@ -3625,13 +3713,13 @@
   //   separator: Separator string
   //   string_length: Length of result string (not smi)
   //   array_length: Length of the array (not smi).
-  __ AllocateAsciiString(result, string_length, scratch1, scratch2, scratch3,
-                         &bailout);
+  __ AllocateOneByteString(result, string_length, scratch1, scratch2, scratch3,
+                           &bailout);
 
   // Prepare for looping. Set up elements_end to end of the array. Set
   // result_pos to the position of the result where to write the first
   // character.
-  // TODO(all): useless unless AllocateAsciiString trashes the register.
+  // TODO(all): useless unless AllocateOneByteString trashes the register.
   __ Add(elements_end, element, Operand(array_length, LSL, kPointerSizeLog2));
   __ Add(result_pos, result, SeqOneByteString::kHeaderSize - kHeapObjectTag);
 
@@ -3659,7 +3747,7 @@
 
   // One-character separator case
   __ Bind(&one_char_separator);
-  // Replace separator with its ASCII character value.
+  // Replace separator with its one-byte character value.
   __ Ldrb(separator, FieldMemOperand(separator, SeqOneByteString::kHeaderSize));
   // Jump into the loop after the code that copies the separator, so the first
   // element is not preceded by a separator
@@ -3670,7 +3758,7 @@
   //   result_pos: the position to which we are currently copying characters.
   //   element: Current array element.
   //   elements_end: Array end.
-  //   separator: Single separator ASCII char (in lower byte).
+  //   separator: Single separator one-byte char (in lower byte).
 
   // Copy the separator character to the result.
   __ Strb(separator, MemOperand(result_pos, 1, PostIndex));
@@ -3720,6 +3808,17 @@
 }
 
 
+void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
+  DCHECK(expr->arguments()->length() == 0);
+  ExternalReference debug_is_active =
+      ExternalReference::debug_is_active_address(isolate());
+  __ Mov(x10, debug_is_active);
+  __ Ldrb(x0, MemOperand(x10));
+  __ SmiTag(x0);
+  context()->Plug(x0);
+}
+
+
 void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
   if (expr->function() != NULL &&
       expr->function()->intrinsic_type == Runtime::INLINE) {
@@ -3735,13 +3834,20 @@
   if (expr->is_jsruntime()) {
     // Push the builtins object as the receiver.
     __ Ldr(x10, GlobalObjectMemOperand());
-    __ Ldr(x0, FieldMemOperand(x10, GlobalObject::kBuiltinsOffset));
-    __ Push(x0);
+    __ Ldr(LoadDescriptor::ReceiverRegister(),
+           FieldMemOperand(x10, GlobalObject::kBuiltinsOffset));
+    __ Push(LoadDescriptor::ReceiverRegister());
 
     // Load the function from the receiver.
     Handle<String> name = expr->name();
-    __ Mov(x2, Operand(name));
-    CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId());
+    __ Mov(LoadDescriptor::NameRegister(), Operand(name));
+    if (FLAG_vector_ics) {
+      __ Mov(VectorLoadICDescriptor::SlotRegister(),
+             Smi::FromInt(expr->CallRuntimeFeedbackSlot()));
+      CallLoadIC(NOT_CONTEXTUAL);
+    } else {
+      CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId());
+    }
 
     // Push the target function under the receiver.
     __ Pop(x10);
@@ -3793,7 +3899,7 @@
         Variable* var = proxy->var();
         // Delete of an unqualified identifier is disallowed in strict mode
         // but "delete this" is allowed.
-        ASSERT(strict_mode() == SLOPPY || var->is_this());
+        DCHECK(strict_mode() == SLOPPY || var->is_this());
         if (var->IsUnallocated()) {
           __ Ldr(x12, GlobalObjectMemOperand());
           __ Mov(x11, Operand(var->name()));
@@ -3810,7 +3916,7 @@
           // context where the variable was introduced.
           __ Mov(x2, Operand(var->name()));
           __ Push(context_register(), x2);
-          __ CallRuntime(Runtime::kHiddenDeleteContextSlot, 2);
+          __ CallRuntime(Runtime::kDeleteLookupSlot, 2);
           context()->Plug(x0);
         }
       } else {
@@ -3843,7 +3949,7 @@
                         test->fall_through());
         context()->Plug(test->true_label(), test->false_label());
       } else {
-        ASSERT(context()->IsAccumulatorValue() || context()->IsStackValue());
+        DCHECK(context()->IsAccumulatorValue() || context()->IsStackValue());
         // TODO(jbramley): This could be much more efficient using (for
         // example) the CSEL instruction.
         Label materialize_true, materialize_false, done;
@@ -3886,7 +3992,7 @@
 
 
 void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
-  ASSERT(expr->expression()->IsValidReferenceExpression());
+  DCHECK(expr->expression()->IsValidReferenceExpression());
 
   Comment cmnt(masm_, "[ CountOperation");
   SetSourcePosition(expr->position());
@@ -3905,7 +4011,7 @@
 
   // Evaluate expression and get value.
   if (assign_type == VARIABLE) {
-    ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
+    DCHECK(expr->expression()->AsVariableProxy()->var() != NULL);
     AccumulatorValueContext context(this);
     EmitVariableLoad(expr->expression()->AsVariableProxy());
   } else {
@@ -3914,16 +4020,16 @@
       __ Push(xzr);
     }
     if (assign_type == NAMED_PROPERTY) {
-      // Put the object both on the stack and in the accumulator.
-      VisitForAccumulatorValue(prop->obj());
-      __ Push(x0);
+      // Put the object both on the stack and in the register.
+      VisitForStackValue(prop->obj());
+      __ Peek(LoadDescriptor::ReceiverRegister(), 0);
       EmitNamedPropertyLoad(prop);
     } else {
       // KEYED_PROPERTY
       VisitForStackValue(prop->obj());
-      VisitForAccumulatorValue(prop->key());
-      __ Peek(x1, 0);
-      __ Push(x0);
+      VisitForStackValue(prop->key());
+      __ Peek(LoadDescriptor::ReceiverRegister(), 1 * kPointerSize);
+      __ Peek(LoadDescriptor::NameRegister(), 0);
       EmitKeyedPropertyLoad(prop);
     }
   }
@@ -4004,8 +4110,9 @@
 
   {
     Assembler::BlockPoolsScope scope(masm_);
-    BinaryOpICStub stub(isolate(), Token::ADD, NO_OVERWRITE);
-    CallIC(stub.GetCode(), expr->CountBinOpFeedbackId());
+    Handle<Code> code =
+        CodeFactory::BinaryOpIC(isolate(), Token::ADD, NO_OVERWRITE).code();
+    CallIC(code, expr->CountBinOpFeedbackId());
     patch_site.EmitPatchInfo();
   }
   __ Bind(&done);
@@ -4033,8 +4140,9 @@
       }
       break;
     case NAMED_PROPERTY: {
-      __ Mov(x2, Operand(prop->key()->AsLiteral()->value()));
-      __ Pop(x1);
+      __ Mov(StoreDescriptor::NameRegister(),
+             Operand(prop->key()->AsLiteral()->value()));
+      __ Pop(StoreDescriptor::ReceiverRegister());
       CallStoreIC(expr->CountStoreFeedbackId());
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
       if (expr->is_postfix()) {
@@ -4047,11 +4155,10 @@
       break;
     }
     case KEYED_PROPERTY: {
-      __ Pop(x1);  // Key.
-      __ Pop(x2);  // Receiver.
-      Handle<Code> ic = strict_mode() == SLOPPY
-          ? isolate()->builtins()->KeyedStoreIC_Initialize()
-          : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+      __ Pop(StoreDescriptor::NameRegister());
+      __ Pop(StoreDescriptor::ReceiverRegister());
+      Handle<Code> ic =
+          CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
       CallIC(ic, expr->CountStoreFeedbackId());
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
       if (expr->is_postfix()) {
@@ -4068,13 +4175,17 @@
 
 
 void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
-  ASSERT(!context()->IsEffect());
-  ASSERT(!context()->IsTest());
+  DCHECK(!context()->IsEffect());
+  DCHECK(!context()->IsTest());
   VariableProxy* proxy = expr->AsVariableProxy();
   if (proxy != NULL && proxy->var()->IsUnallocated()) {
     Comment cmnt(masm_, "Global variable");
-    __ Ldr(x0, GlobalObjectMemOperand());
-    __ Mov(x2, Operand(proxy->name()));
+    __ Ldr(LoadDescriptor::ReceiverRegister(), GlobalObjectMemOperand());
+    __ Mov(LoadDescriptor::NameRegister(), Operand(proxy->name()));
+    if (FLAG_vector_ics) {
+      __ Mov(VectorLoadICDescriptor::SlotRegister(),
+             Smi::FromInt(proxy->VariableFeedbackSlot()));
+    }
     // Use a regular load, not a contextual load, to avoid a reference
     // error.
     CallLoadIC(NOT_CONTEXTUAL);
@@ -4085,12 +4196,12 @@
 
     // Generate code for loading from variables potentially shadowed
     // by eval-introduced variables.
-    EmitDynamicLookupFastCase(proxy->var(), INSIDE_TYPEOF, &slow, &done);
+    EmitDynamicLookupFastCase(proxy, INSIDE_TYPEOF, &slow, &done);
 
     __ Bind(&slow);
     __ Mov(x0, Operand(proxy->name()));
     __ Push(cp, x0);
-    __ CallRuntime(Runtime::kHiddenLoadContextSlotNoReferenceError, 2);
+    __ CallRuntime(Runtime::kLoadLookupSlotNoReferenceError, 2);
     PrepareForBailout(expr, TOS_REG);
     __ Bind(&done);
 
@@ -4144,11 +4255,6 @@
     __ JumpIfRoot(x0, Heap::kTrueValueRootIndex, if_true);
     __ CompareRoot(x0, Heap::kFalseValueRootIndex);
     Split(eq, if_true, if_false, fall_through);
-  } else if (FLAG_harmony_typeof &&
-             String::Equals(check, factory->null_string())) {
-    ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof null_string");
-    __ CompareRoot(x0, Heap::kNullValueRootIndex);
-    Split(eq, if_true, if_false, fall_through);
   } else if (String::Equals(check, factory->undefined_string())) {
     ASM_LOCATION(
         "FullCodeGenerator::EmitLiteralCompareTypeof undefined_string");
@@ -4170,9 +4276,7 @@
   } else if (String::Equals(check, factory->object_string())) {
     ASM_LOCATION("FullCodeGenerator::EmitLiteralCompareTypeof object_string");
     __ JumpIfSmi(x0, if_false);
-    if (!FLAG_harmony_typeof) {
-      __ JumpIfRoot(x0, Heap::kNullValueRootIndex, if_true);
-    }
+    __ JumpIfRoot(x0, Heap::kNullValueRootIndex, if_true);
     // Check for JS objects => true.
     Register map = x10;
     __ JumpIfObjectType(x0, map, x11, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE,
@@ -4252,7 +4356,7 @@
 
       // Record position and call the compare IC.
       SetSourcePosition(expr->position());
-      Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+      Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
       CallIC(ic, expr->CompareOperationFeedbackId());
       patch_site.EmitPatchInfo();
       PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
@@ -4313,12 +4417,12 @@
   // don't want to spend too much time on it now.
 
   switch (expr->yield_kind()) {
-    case Yield::SUSPEND:
+    case Yield::kSuspend:
       // Pop value from top-of-stack slot; box result into result register.
       EmitCreateIteratorResult(false);
       __ Push(result_register());
       // Fall through.
-    case Yield::INITIAL: {
+    case Yield::kInitial: {
       Label suspend, continuation, post_runtime, resume;
 
       __ B(&suspend);
@@ -4331,7 +4435,7 @@
 
       __ Bind(&suspend);
       VisitForAccumulatorValue(expr->generator_object());
-      ASSERT((continuation.pos() > 0) && Smi::IsValid(continuation.pos()));
+      DCHECK((continuation.pos() > 0) && Smi::IsValid(continuation.pos()));
       __ Mov(x1, Smi::FromInt(continuation.pos()));
       __ Str(x1, FieldMemOperand(x0, JSGeneratorObject::kContinuationOffset));
       __ Str(cp, FieldMemOperand(x0, JSGeneratorObject::kContextOffset));
@@ -4342,7 +4446,7 @@
       __ Cmp(__ StackPointer(), x1);
       __ B(eq, &post_runtime);
       __ Push(x0);  // generator object
-      __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1);
+      __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
       __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
       __ Bind(&post_runtime);
       __ Pop(result_register());
@@ -4353,7 +4457,7 @@
       break;
     }
 
-    case Yield::FINAL: {
+    case Yield::kFinal: {
       VisitForAccumulatorValue(expr->generator_object());
       __ Mov(x1, Smi::FromInt(JSGeneratorObject::kGeneratorClosed));
       __ Str(x1, FieldMemOperand(result_register(),
@@ -4365,7 +4469,7 @@
       break;
     }
 
-    case Yield::DELEGATING: {
+    case Yield::kDelegating: {
       VisitForStackValue(expr->generator_object());
 
       // Initial stack layout is as follows:
@@ -4374,6 +4478,9 @@
 
       Label l_catch, l_try, l_suspend, l_continuation, l_resume;
       Label l_next, l_call, l_loop;
+      Register load_receiver = LoadDescriptor::ReceiverRegister();
+      Register load_name = LoadDescriptor::NameRegister();
+
       // Initial send value is undefined.
       __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
       __ B(&l_next);
@@ -4381,9 +4488,9 @@
       // catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; }
       __ Bind(&l_catch);
       handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos()));
-      __ LoadRoot(x2, Heap::kthrow_stringRootIndex);  // "throw"
-      __ Peek(x3, 1 * kPointerSize);                  // iter
-      __ Push(x2, x3, x0);                            // "throw", iter, except
+      __ LoadRoot(load_name, Heap::kthrow_stringRootIndex);  // "throw"
+      __ Peek(x3, 1 * kPointerSize);                         // iter
+      __ Push(load_name, x3, x0);                       // "throw", iter, except
       __ B(&l_call);
 
       // try { received = %yield result }
@@ -4406,14 +4513,14 @@
       const int generator_object_depth = kPointerSize + handler_size;
       __ Peek(x0, generator_object_depth);
       __ Push(x0);                                       // g
-      ASSERT((l_continuation.pos() > 0) && Smi::IsValid(l_continuation.pos()));
+      DCHECK((l_continuation.pos() > 0) && Smi::IsValid(l_continuation.pos()));
       __ Mov(x1, Smi::FromInt(l_continuation.pos()));
       __ Str(x1, FieldMemOperand(x0, JSGeneratorObject::kContinuationOffset));
       __ Str(cp, FieldMemOperand(x0, JSGeneratorObject::kContextOffset));
       __ Mov(x1, cp);
       __ RecordWriteField(x0, JSGeneratorObject::kContextOffset, x1, x2,
                           kLRHasBeenSaved, kDontSaveFPRegs);
-      __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1);
+      __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
       __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
       __ Pop(x0);                                        // result
       EmitReturnSequence();
@@ -4422,15 +4529,20 @@
 
       // receiver = iter; f = 'next'; arg = received;
       __ Bind(&l_next);
-      __ LoadRoot(x2, Heap::knext_stringRootIndex);  // "next"
-      __ Peek(x3, 1 * kPointerSize);                 // iter
-      __ Push(x2, x3, x0);                           // "next", iter, received
+
+      __ LoadRoot(load_name, Heap::knext_stringRootIndex);  // "next"
+      __ Peek(x3, 1 * kPointerSize);                        // iter
+      __ Push(load_name, x3, x0);                      // "next", iter, received
 
       // result = receiver[f](arg);
       __ Bind(&l_call);
-      __ Peek(x1, 1 * kPointerSize);
-      __ Peek(x0, 2 * kPointerSize);
-      Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+      __ Peek(load_receiver, 1 * kPointerSize);
+      __ Peek(load_name, 2 * kPointerSize);
+      if (FLAG_vector_ics) {
+        __ Mov(VectorLoadICDescriptor::SlotRegister(),
+               Smi::FromInt(expr->KeyedLoadFeedbackSlot()));
+      }
+      Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
       CallIC(ic, TypeFeedbackId::None());
       __ Mov(x1, x0);
       __ Poke(x1, 2 * kPointerSize);
@@ -4442,19 +4554,29 @@
 
       // if (!result.done) goto l_try;
       __ Bind(&l_loop);
-      __ Push(x0);                                       // save result
-      __ LoadRoot(x2, Heap::kdone_stringRootIndex);      // "done"
-      CallLoadIC(NOT_CONTEXTUAL);                        // result.done in x0
+      __ Move(load_receiver, x0);
+
+      __ Push(load_receiver);                               // save result
+      __ LoadRoot(load_name, Heap::kdone_stringRootIndex);  // "done"
+      if (FLAG_vector_ics) {
+        __ Mov(VectorLoadICDescriptor::SlotRegister(),
+               Smi::FromInt(expr->DoneFeedbackSlot()));
+      }
+      CallLoadIC(NOT_CONTEXTUAL);                           // x0=result.done
       // The ToBooleanStub argument (result.done) is in x0.
       Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
       CallIC(bool_ic);
       __ Cbz(x0, &l_try);
 
       // result.value
-      __ Pop(x0);                                        // result
-      __ LoadRoot(x2, Heap::kvalue_stringRootIndex);     // "value"
-      CallLoadIC(NOT_CONTEXTUAL);                        // result.value in x0
-      context()->DropAndPlug(2, x0);                     // drop iter and g
+      __ Pop(load_receiver);                                 // result
+      __ LoadRoot(load_name, Heap::kvalue_stringRootIndex);  // "value"
+      if (FLAG_vector_ics) {
+        __ Mov(VectorLoadICDescriptor::SlotRegister(),
+               Smi::FromInt(expr->ValueFeedbackSlot()));
+      }
+      CallLoadIC(NOT_CONTEXTUAL);                            // x0=result.value
+      context()->DropAndPlug(2, x0);                         // drop iter and g
       break;
     }
   }
@@ -4472,7 +4594,7 @@
   Register function = x4;
 
   // The value stays in x0, and is ultimately read by the resumed generator, as
-  // if CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject) returned it. Or it
+  // if CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it
   // is read to throw the value when the resumed generator is already closed. r1
   // will hold the generator object until the activation has been resumed.
   VisitForStackValue(generator);
@@ -4554,7 +4676,7 @@
 
   __ Mov(x10, Smi::FromInt(resume_mode));
   __ Push(generator_object, result_register(), x10);
-  __ CallRuntime(Runtime::kHiddenResumeJSGeneratorObject, 3);
+  __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
   // Not reached: the runtime call returns elsewhere.
   __ Unreachable();
 
@@ -4569,14 +4691,14 @@
   } else {
     // Throw the provided value.
     __ Push(value_reg);
-    __ CallRuntime(Runtime::kHiddenThrow, 1);
+    __ CallRuntime(Runtime::kThrow, 1);
   }
   __ B(&done);
 
   // Throw error if we attempt to operate on a running generator.
   __ Bind(&wrong_state);
   __ Push(generator_object);
-  __ CallRuntime(Runtime::kHiddenThrowGeneratorStateError, 1);
+  __ CallRuntime(Runtime::kThrowGeneratorStateError, 1);
 
   __ Bind(&done);
   context()->Plug(result_register());
@@ -4597,7 +4719,7 @@
 
   __ Bind(&gc_required);
   __ Push(Smi::FromInt(map->instance_size()));
-  __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1);
+  __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
   __ Ldr(context_register(),
          MemOperand(fp, StandardFrameConstants::kContextOffset));
 
@@ -4611,7 +4733,7 @@
   __ Pop(result_value);
   __ Mov(boolean_done, Operand(isolate()->factory()->ToBoolean(done)));
   __ Mov(empty_fixed_array, Operand(isolate()->factory()->empty_fixed_array()));
-  ASSERT_EQ(map->instance_size(), 5 * kPointerSize);
+  DCHECK_EQ(map->instance_size(), 5 * kPointerSize);
   STATIC_ASSERT(JSObject::kPropertiesOffset + kPointerSize ==
                 JSObject::kElementsOffset);
   STATIC_ASSERT(JSGeneratorObject::kResultValuePropertyOffset + kPointerSize ==
@@ -4651,7 +4773,7 @@
 
 
 void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
-  ASSERT(POINTER_SIZE_ALIGN(frame_offset) == frame_offset);
+  DCHECK(POINTER_SIZE_ALIGN(frame_offset) == frame_offset);
   __ Str(value, MemOperand(fp, frame_offset));
 }
 
@@ -4669,7 +4791,7 @@
     // as their closure, not the anonymous closure containing the global
     // code.  Pass a smi sentinel and let the runtime look up the empty
     // function.
-    ASSERT(kSmiTag == 0);
+    DCHECK(kSmiTag == 0);
     __ Push(xzr);
   } else if (declaration_scope->is_eval_scope()) {
     // Contexts created by a call to eval have the same closure as the
@@ -4678,7 +4800,7 @@
     __ Ldr(x10, ContextMemOperand(cp, Context::CLOSURE_INDEX));
     __ Push(x10);
   } else {
-    ASSERT(declaration_scope->is_function_scope());
+    DCHECK(declaration_scope->is_function_scope());
     __ Ldr(x10, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
     __ Push(x10);
   }
@@ -4687,7 +4809,7 @@
 
 void FullCodeGenerator::EnterFinallyBlock() {
   ASM_LOCATION("FullCodeGenerator::EnterFinallyBlock");
-  ASSERT(!result_register().is(x10));
+  DCHECK(!result_register().is(x10));
   // Preserve the result register while executing finally block.
   // Also cook the return address in lr to the stack (smi encoded Code* delta).
   __ Sub(x10, lr, Operand(masm_->CodeObject()));
@@ -4719,7 +4841,7 @@
 
 void FullCodeGenerator::ExitFinallyBlock() {
   ASM_LOCATION("FullCodeGenerator::ExitFinallyBlock");
-  ASSERT(!result_register().is(x10));
+  DCHECK(!result_register().is(x10));
 
   // Restore pending message from stack.
   __ Pop(x10, x11, x12);
@@ -4761,7 +4883,7 @@
   Address branch_address = pc - 3 * kInstructionSize;
   PatchingAssembler patcher(branch_address, 1);
 
-  ASSERT(Instruction::Cast(branch_address)
+  DCHECK(Instruction::Cast(branch_address)
              ->IsNop(Assembler::INTERRUPT_CODE_NOP) ||
          (Instruction::Cast(branch_address)->IsCondBranchImm() &&
           Instruction::Cast(branch_address)->ImmPCOffset() ==
@@ -4792,7 +4914,7 @@
   Instruction* load = Instruction::Cast(pc)->preceding(2);
   Address interrupt_address_pointer =
       reinterpret_cast<Address>(load) + load->ImmPCOffset();
-  ASSERT((Memory::uint64_at(interrupt_address_pointer) ==
+  DCHECK((Memory::uint64_at(interrupt_address_pointer) ==
           reinterpret_cast<uint64_t>(unoptimized_code->GetIsolate()
                                          ->builtins()
                                          ->OnStackReplacement()
diff --git a/src/arm64/ic-arm64.cc b/src/arm64/ic-arm64.cc
deleted file mode 100644
index 842b3e7..0000000
--- a/src/arm64/ic-arm64.cc
+++ /dev/null
@@ -1,1387 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#if V8_TARGET_ARCH_ARM64
-
-#include "src/arm64/assembler-arm64.h"
-#include "src/code-stubs.h"
-#include "src/codegen.h"
-#include "src/disasm.h"
-#include "src/ic-inl.h"
-#include "src/runtime.h"
-#include "src/stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-
-#define __ ACCESS_MASM(masm)
-
-
-// "type" holds an instance type on entry and is not clobbered.
-// Generated code branch on "global_object" if type is any kind of global
-// JS object.
-static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
-                                            Register type,
-                                            Label* global_object) {
-  __ Cmp(type, JS_GLOBAL_OBJECT_TYPE);
-  __ Ccmp(type, JS_BUILTINS_OBJECT_TYPE, ZFlag, ne);
-  __ Ccmp(type, JS_GLOBAL_PROXY_TYPE, ZFlag, ne);
-  __ B(eq, global_object);
-}
-
-
-// Generated code falls through if the receiver is a regular non-global
-// JS object with slow properties and no interceptors.
-//
-// "receiver" holds the receiver on entry and is unchanged.
-// "elements" holds the property dictionary on fall through.
-static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm,
-                                                Register receiver,
-                                                Register elements,
-                                                Register scratch0,
-                                                Register scratch1,
-                                                Label* miss) {
-  ASSERT(!AreAliased(receiver, elements, scratch0, scratch1));
-
-  // Check that the receiver isn't a smi.
-  __ JumpIfSmi(receiver, miss);
-
-  // Check that the receiver is a valid JS object.
-  // Let t be the object instance type, we want:
-  //   FIRST_SPEC_OBJECT_TYPE <= t <= LAST_SPEC_OBJECT_TYPE.
-  // Since LAST_SPEC_OBJECT_TYPE is the last possible instance type we only
-  // check the lower bound.
-  STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
-
-  __ JumpIfObjectType(receiver, scratch0, scratch1, FIRST_SPEC_OBJECT_TYPE,
-                      miss, lt);
-
-  // scratch0 now contains the map of the receiver and scratch1 the object type.
-  Register map = scratch0;
-  Register type = scratch1;
-
-  // Check if the receiver is a global JS object.
-  GenerateGlobalInstanceTypeCheck(masm, type, miss);
-
-  // Check that the object does not require access checks.
-  __ Ldrb(scratch1, FieldMemOperand(map, Map::kBitFieldOffset));
-  __ Tbnz(scratch1, Map::kIsAccessCheckNeeded, miss);
-  __ Tbnz(scratch1, Map::kHasNamedInterceptor, miss);
-
-  // Check that the properties dictionary is valid.
-  __ Ldr(elements, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-  __ Ldr(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
-  __ JumpIfNotRoot(scratch1, Heap::kHashTableMapRootIndex, miss);
-}
-
-
-// Helper function used from LoadIC GenerateNormal.
-//
-// elements: Property dictionary. It is not clobbered if a jump to the miss
-//           label is done.
-// name:     Property name. It is not clobbered if a jump to the miss label is
-//           done
-// result:   Register for the result. It is only updated if a jump to the miss
-//           label is not done.
-// The scratch registers need to be different from elements, name and result.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-static void GenerateDictionaryLoad(MacroAssembler* masm,
-                                   Label* miss,
-                                   Register elements,
-                                   Register name,
-                                   Register result,
-                                   Register scratch1,
-                                   Register scratch2) {
-  ASSERT(!AreAliased(elements, name, scratch1, scratch2));
-  ASSERT(!AreAliased(result, scratch1, scratch2));
-
-  Label done;
-
-  // Probe the dictionary.
-  NameDictionaryLookupStub::GeneratePositiveLookup(masm,
-                                                   miss,
-                                                   &done,
-                                                   elements,
-                                                   name,
-                                                   scratch1,
-                                                   scratch2);
-
-  // If probing finds an entry check that the value is a normal property.
-  __ Bind(&done);
-
-  static const int kElementsStartOffset = NameDictionary::kHeaderSize +
-      NameDictionary::kElementsStartIndex * kPointerSize;
-  static const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
-  __ Ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
-  __ Tst(scratch1, Smi::FromInt(PropertyDetails::TypeField::kMask));
-  __ B(ne, miss);
-
-  // Get the value at the masked, scaled index and return.
-  __ Ldr(result,
-         FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
-}
-
-
-// Helper function used from StoreIC::GenerateNormal.
-//
-// elements: Property dictionary. It is not clobbered if a jump to the miss
-//           label is done.
-// name:     Property name. It is not clobbered if a jump to the miss label is
-//           done
-// value:    The value to store (never clobbered).
-//
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-static void GenerateDictionaryStore(MacroAssembler* masm,
-                                    Label* miss,
-                                    Register elements,
-                                    Register name,
-                                    Register value,
-                                    Register scratch1,
-                                    Register scratch2) {
-  ASSERT(!AreAliased(elements, name, value, scratch1, scratch2));
-
-  Label done;
-
-  // Probe the dictionary.
-  NameDictionaryLookupStub::GeneratePositiveLookup(masm,
-                                                   miss,
-                                                   &done,
-                                                   elements,
-                                                   name,
-                                                   scratch1,
-                                                   scratch2);
-
-  // If probing finds an entry in the dictionary check that the value
-  // is a normal property that is not read only.
-  __ Bind(&done);
-
-  static const int kElementsStartOffset = NameDictionary::kHeaderSize +
-      NameDictionary::kElementsStartIndex * kPointerSize;
-  static const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
-  static const int kTypeAndReadOnlyMask =
-      PropertyDetails::TypeField::kMask |
-      PropertyDetails::AttributesField::encode(READ_ONLY);
-  __ Ldrsw(scratch1, UntagSmiFieldMemOperand(scratch2, kDetailsOffset));
-  __ Tst(scratch1, kTypeAndReadOnlyMask);
-  __ B(ne, miss);
-
-  // Store the value at the masked, scaled index and return.
-  static const int kValueOffset = kElementsStartOffset + kPointerSize;
-  __ Add(scratch2, scratch2, kValueOffset - kHeapObjectTag);
-  __ Str(value, MemOperand(scratch2));
-
-  // Update the write barrier. Make sure not to clobber the value.
-  __ Mov(scratch1, value);
-  __ RecordWrite(
-      elements, scratch2, scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs);
-}
-
-
-// Checks the receiver for special cases (value type, slow case bits).
-// Falls through for regular JS object and return the map of the
-// receiver in 'map_scratch' if the receiver is not a SMI.
-static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
-                                           Register receiver,
-                                           Register map_scratch,
-                                           Register scratch,
-                                           int interceptor_bit,
-                                           Label* slow) {
-  ASSERT(!AreAliased(map_scratch, scratch));
-
-  // Check that the object isn't a smi.
-  __ JumpIfSmi(receiver, slow);
-  // Get the map of the receiver.
-  __ Ldr(map_scratch, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  // Check bit field.
-  __ Ldrb(scratch, FieldMemOperand(map_scratch, Map::kBitFieldOffset));
-  __ Tbnz(scratch, Map::kIsAccessCheckNeeded, slow);
-  __ Tbnz(scratch, interceptor_bit, slow);
-
-  // Check that the object is some kind of JS object EXCEPT JS Value type.
-  // In the case that the object is a value-wrapper object, we enter the
-  // runtime system to make sure that indexing into string objects work
-  // as intended.
-  STATIC_ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
-  __ Ldrb(scratch, FieldMemOperand(map_scratch, Map::kInstanceTypeOffset));
-  __ Cmp(scratch, JS_OBJECT_TYPE);
-  __ B(lt, slow);
-}
-
-
-// Loads an indexed element from a fast case array.
-// If not_fast_array is NULL, doesn't perform the elements map check.
-//
-// receiver     - holds the receiver on entry.
-//                Unchanged unless 'result' is the same register.
-//
-// key          - holds the smi key on entry.
-//                Unchanged unless 'result' is the same register.
-//
-// elements     - holds the elements of the receiver on exit.
-//
-// elements_map - holds the elements map on exit if the not_fast_array branch is
-//                taken. Otherwise, this is used as a scratch register.
-//
-// result       - holds the result on exit if the load succeeded.
-//                Allowed to be the the same as 'receiver' or 'key'.
-//                Unchanged on bailout so 'receiver' and 'key' can be safely
-//                used by further computation.
-static void GenerateFastArrayLoad(MacroAssembler* masm,
-                                  Register receiver,
-                                  Register key,
-                                  Register elements,
-                                  Register elements_map,
-                                  Register scratch2,
-                                  Register result,
-                                  Label* not_fast_array,
-                                  Label* slow) {
-  ASSERT(!AreAliased(receiver, key, elements, elements_map, scratch2));
-
-  // Check for fast array.
-  __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  if (not_fast_array != NULL) {
-    // Check that the object is in fast mode and writable.
-    __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
-    __ JumpIfNotRoot(elements_map, Heap::kFixedArrayMapRootIndex,
-                     not_fast_array);
-  } else {
-    __ AssertFastElements(elements);
-  }
-
-  // The elements_map register is only used for the not_fast_array path, which
-  // was handled above. From this point onward it is a scratch register.
-  Register scratch1 = elements_map;
-
-  // Check that the key (index) is within bounds.
-  __ Ldr(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
-  __ Cmp(key, scratch1);
-  __ B(hs, slow);
-
-  // Fast case: Do the load.
-  __ Add(scratch1, elements, FixedArray::kHeaderSize - kHeapObjectTag);
-  __ SmiUntag(scratch2, key);
-  __ Ldr(scratch2, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2));
-
-  // In case the loaded value is the_hole we have to consult GetProperty
-  // to ensure the prototype chain is searched.
-  __ JumpIfRoot(scratch2, Heap::kTheHoleValueRootIndex, slow);
-
-  // Move the value to the result register.
-  // 'result' can alias with 'receiver' or 'key' but these two must be
-  // preserved if we jump to 'slow'.
-  __ Mov(result, scratch2);
-}
-
-
-// Checks whether a key is an array index string or a unique name.
-// Falls through if a key is a unique name.
-// The map of the key is returned in 'map_scratch'.
-// If the jump to 'index_string' is done the hash of the key is left
-// in 'hash_scratch'.
-static void GenerateKeyNameCheck(MacroAssembler* masm,
-                                 Register key,
-                                 Register map_scratch,
-                                 Register hash_scratch,
-                                 Label* index_string,
-                                 Label* not_unique) {
-  ASSERT(!AreAliased(key, map_scratch, hash_scratch));
-
-  // Is the key a name?
-  Label unique;
-  __ JumpIfObjectType(key, map_scratch, hash_scratch, LAST_UNIQUE_NAME_TYPE,
-                      not_unique, hi);
-  STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
-  __ B(eq, &unique);
-
-  // Is the string an array index with cached numeric value?
-  __ Ldr(hash_scratch.W(), FieldMemOperand(key, Name::kHashFieldOffset));
-  __ TestAndBranchIfAllClear(hash_scratch,
-                             Name::kContainsCachedArrayIndexMask,
-                             index_string);
-
-  // Is the string internalized? We know it's a string, so a single bit test is
-  // enough.
-  __ Ldrb(hash_scratch, FieldMemOperand(map_scratch, Map::kInstanceTypeOffset));
-  STATIC_ASSERT(kInternalizedTag == 0);
-  __ TestAndBranchIfAnySet(hash_scratch, kIsNotInternalizedMask, not_unique);
-
-  __ Bind(&unique);
-  // Fall through if the key is a unique name.
-}
-
-
-// Neither 'object' nor 'key' are modified by this function.
-//
-// If the 'unmapped_case' or 'slow_case' exit is taken, the 'map' register is
-// left with the object's elements map. Otherwise, it is used as a scratch
-// register.
-static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
-                                                Register object,
-                                                Register key,
-                                                Register map,
-                                                Register scratch1,
-                                                Register scratch2,
-                                                Label* unmapped_case,
-                                                Label* slow_case) {
-  ASSERT(!AreAliased(object, key, map, scratch1, scratch2));
-
-  Heap* heap = masm->isolate()->heap();
-
-  // Check that the receiver is a JSObject. Because of the elements
-  // map check later, we do not need to check for interceptors or
-  // whether it requires access checks.
-  __ JumpIfSmi(object, slow_case);
-  // Check that the object is some kind of JSObject.
-  __ JumpIfObjectType(object, map, scratch1, FIRST_JS_RECEIVER_TYPE,
-                      slow_case, lt);
-
-  // Check that the key is a positive smi.
-  __ JumpIfNotSmi(key, slow_case);
-  __ Tbnz(key, kXSignBit, slow_case);
-
-  // Load the elements object and check its map.
-  Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
-  __ Ldr(map, FieldMemOperand(object, JSObject::kElementsOffset));
-  __ CheckMap(map, scratch1, arguments_map, slow_case, DONT_DO_SMI_CHECK);
-
-  // Check if element is in the range of mapped arguments. If not, jump
-  // to the unmapped lookup.
-  __ Ldr(scratch1, FieldMemOperand(map, FixedArray::kLengthOffset));
-  __ Sub(scratch1, scratch1, Smi::FromInt(2));
-  __ Cmp(key, scratch1);
-  __ B(hs, unmapped_case);
-
-  // Load element index and check whether it is the hole.
-  static const int offset =
-      FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag;
-
-  __ Add(scratch1, map, offset);
-  __ SmiUntag(scratch2, key);
-  __ Ldr(scratch1, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2));
-  __ JumpIfRoot(scratch1, Heap::kTheHoleValueRootIndex, unmapped_case);
-
-  // Load value from context and return it.
-  __ Ldr(scratch2, FieldMemOperand(map, FixedArray::kHeaderSize));
-  __ SmiUntag(scratch1);
-  __ Lsl(scratch1, scratch1, kPointerSizeLog2);
-  __ Add(scratch1, scratch1, Context::kHeaderSize - kHeapObjectTag);
-  // The base of the result (scratch2) is passed to RecordWrite in
-  // KeyedStoreIC::GenerateSloppyArguments and it must be a HeapObject.
-  return MemOperand(scratch2, scratch1);
-}
-
-
-// The 'parameter_map' register must be loaded with the parameter map of the
-// arguments object and is overwritten.
-static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
-                                                  Register key,
-                                                  Register parameter_map,
-                                                  Register scratch,
-                                                  Label* slow_case) {
-  ASSERT(!AreAliased(key, parameter_map, scratch));
-
-  // Element is in arguments backing store, which is referenced by the
-  // second element of the parameter_map.
-  const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
-  Register backing_store = parameter_map;
-  __ Ldr(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
-  Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
-  __ CheckMap(
-      backing_store, scratch, fixed_array_map, slow_case, DONT_DO_SMI_CHECK);
-  __ Ldr(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
-  __ Cmp(key, scratch);
-  __ B(hs, slow_case);
-
-  __ Add(backing_store,
-         backing_store,
-         FixedArray::kHeaderSize - kHeapObjectTag);
-  __ SmiUntag(scratch, key);
-  return MemOperand(backing_store, scratch, LSL, kPointerSizeLog2);
-}
-
-
-void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- x2    : name
-  //  -- lr    : return address
-  //  -- x0    : receiver
-  // -----------------------------------
-
-  // Probe the stub cache.
-  Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC);
-  masm->isolate()->stub_cache()->GenerateProbe(
-      masm, flags, x0, x2, x3, x4, x5, x6);
-
-  // Cache miss: Jump to runtime.
-  GenerateMiss(masm);
-}
-
-
-void LoadIC::GenerateNormal(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- x2    : name
-  //  -- lr    : return address
-  //  -- x0    : receiver
-  // -----------------------------------
-  Label miss, slow;
-
-  GenerateNameDictionaryReceiverCheck(masm, x0, x1, x3, x4, &miss);
-
-  // x1 now holds the property dictionary.
-  GenerateDictionaryLoad(masm, &slow, x1, x2, x0, x3, x4);
-  __ Ret();
-
-  // Dictionary load failed, go slow (but don't miss).
-  __ Bind(&slow);
-  GenerateRuntimeGetProperty(masm);
-
-  // Cache miss: Jump to runtime.
-  __ Bind(&miss);
-  GenerateMiss(masm);
-}
-
-
-void LoadIC::GenerateMiss(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- x2    : name
-  //  -- lr    : return address
-  //  -- x0    : receiver
-  // -----------------------------------
-  Isolate* isolate = masm->isolate();
-  ASM_LOCATION("LoadIC::GenerateMiss");
-
-  __ IncrementCounter(isolate->counters()->load_miss(), 1, x3, x4);
-
-  // Perform tail call to the entry.
-  __ Push(x0, x2);
-  ExternalReference ref =
-      ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
-  __ TailCallExternalReference(ref, 2, 1);
-}
-
-
-void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
-  // ---------- S t a t e --------------
-  //  -- x2    : name
-  //  -- lr    : return address
-  //  -- x0    : receiver
-  // -----------------------------------
-
-  __ Push(x0, x2);
-  __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
-}
-
-
-void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
-  // ---------- S t a t e --------------
-  //  -- lr     : return address
-  //  -- x0     : key
-  //  -- x1     : receiver
-  // -----------------------------------
-  Register result = x0;
-  Register key = x0;
-  Register receiver = x1;
-  Label miss, unmapped;
-
-  Register map_scratch = x2;
-  MemOperand mapped_location = GenerateMappedArgumentsLookup(
-      masm, receiver, key, map_scratch, x3, x4, &unmapped, &miss);
-  __ Ldr(result, mapped_location);
-  __ Ret();
-
-  __ Bind(&unmapped);
-  // Parameter map is left in map_scratch when a jump on unmapped is done.
-  MemOperand unmapped_location =
-      GenerateUnmappedArgumentsLookup(masm, key, map_scratch, x3, &miss);
-  __ Ldr(x2, unmapped_location);
-  __ JumpIfRoot(x2, Heap::kTheHoleValueRootIndex, &miss);
-  // Move the result in x0. x0 must be preserved on miss.
-  __ Mov(result, x2);
-  __ Ret();
-
-  __ Bind(&miss);
-  GenerateMiss(masm);
-}
-
-
-void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
-  ASM_LOCATION("KeyedStoreIC::GenerateSloppyArguments");
-  // ---------- S t a t e --------------
-  //  -- lr     : return address
-  //  -- x0     : value
-  //  -- x1     : key
-  //  -- x2     : receiver
-  // -----------------------------------
-
-  Label slow, notin;
-
-  Register value = x0;
-  Register key = x1;
-  Register receiver = x2;
-  Register map = x3;
-
-  // These registers are used by GenerateMappedArgumentsLookup to build a
-  // MemOperand. They are live for as long as the MemOperand is live.
-  Register mapped1 = x4;
-  Register mapped2 = x5;
-
-  MemOperand mapped =
-      GenerateMappedArgumentsLookup(masm, receiver, key, map,
-                                    mapped1, mapped2,
-                                    &notin, &slow);
-  Operand mapped_offset = mapped.OffsetAsOperand();
-  __ Str(value, mapped);
-  __ Add(x10, mapped.base(), mapped_offset);
-  __ Mov(x11, value);
-  __ RecordWrite(mapped.base(), x10, x11, kLRHasNotBeenSaved, kDontSaveFPRegs);
-  __ Ret();
-
-  __ Bind(&notin);
-
-  // These registers are used by GenerateMappedArgumentsLookup to build a
-  // MemOperand. They are live for as long as the MemOperand is live.
-  Register unmapped1 = map;   // This is assumed to alias 'map'.
-  Register unmapped2 = x4;
-  MemOperand unmapped =
-      GenerateUnmappedArgumentsLookup(masm, key, unmapped1, unmapped2, &slow);
-  Operand unmapped_offset = unmapped.OffsetAsOperand();
-  __ Str(value, unmapped);
-  __ Add(x10, unmapped.base(), unmapped_offset);
-  __ Mov(x11, value);
-  __ RecordWrite(unmapped.base(), x10, x11,
-                 kLRHasNotBeenSaved, kDontSaveFPRegs);
-  __ Ret();
-  __ Bind(&slow);
-  GenerateMiss(masm);
-}
-
-
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
-  // ---------- S t a t e --------------
-  //  -- lr     : return address
-  //  -- x0     : key
-  //  -- x1     : receiver
-  // -----------------------------------
-  Isolate* isolate = masm->isolate();
-
-  __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, x10, x11);
-
-  __ Push(x1, x0);
-
-  // Perform tail call to the entry.
-  ExternalReference ref =
-      ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
-
-  __ TailCallExternalReference(ref, 2, 1);
-}
-
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
-  // ---------- S t a t e --------------
-  //  -- lr     : return address
-  //  -- x0     : key
-  //  -- x1     : receiver
-  // -----------------------------------
-  Register key = x0;
-  Register receiver = x1;
-
-  __ Push(receiver, key);
-  __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
-}
-
-
-static void GenerateKeyedLoadWithSmiKey(MacroAssembler* masm,
-                                        Register key,
-                                        Register receiver,
-                                        Register scratch1,
-                                        Register scratch2,
-                                        Register scratch3,
-                                        Register scratch4,
-                                        Register scratch5,
-                                        Label *slow) {
-  ASSERT(!AreAliased(
-      key, receiver, scratch1, scratch2, scratch3, scratch4, scratch5));
-
-  Isolate* isolate = masm->isolate();
-  Label check_number_dictionary;
-  // If we can load the value, it should be returned in x0.
-  Register result = x0;
-
-  GenerateKeyedLoadReceiverCheck(
-      masm, receiver, scratch1, scratch2, Map::kHasIndexedInterceptor, slow);
-
-  // Check the receiver's map to see if it has fast elements.
-  __ CheckFastElements(scratch1, scratch2, &check_number_dictionary);
-
-  GenerateFastArrayLoad(
-      masm, receiver, key, scratch3, scratch2, scratch1, result, NULL, slow);
-  __ IncrementCounter(
-      isolate->counters()->keyed_load_generic_smi(), 1, scratch1, scratch2);
-  __ Ret();
-
-  __ Bind(&check_number_dictionary);
-  __ Ldr(scratch3, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ Ldr(scratch2, FieldMemOperand(scratch3, JSObject::kMapOffset));
-
-  // Check whether we have a number dictionary.
-  __ JumpIfNotRoot(scratch2, Heap::kHashTableMapRootIndex, slow);
-
-  __ LoadFromNumberDictionary(
-      slow, scratch3, key, result, scratch1, scratch2, scratch4, scratch5);
-  __ Ret();
-}
-
-static void GenerateKeyedLoadWithNameKey(MacroAssembler* masm,
-                                         Register key,
-                                         Register receiver,
-                                         Register scratch1,
-                                         Register scratch2,
-                                         Register scratch3,
-                                         Register scratch4,
-                                         Register scratch5,
-                                         Label *slow) {
-  ASSERT(!AreAliased(
-      key, receiver, scratch1, scratch2, scratch3, scratch4, scratch5));
-
-  Isolate* isolate = masm->isolate();
-  Label probe_dictionary, property_array_property;
-  // If we can load the value, it should be returned in x0.
-  Register result = x0;
-
-  GenerateKeyedLoadReceiverCheck(
-      masm, receiver, scratch1, scratch2, Map::kHasNamedInterceptor, slow);
-
-  // If the receiver is a fast-case object, check the keyed lookup cache.
-  // Otherwise probe the dictionary.
-  __ Ldr(scratch2, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-  __ Ldr(scratch3, FieldMemOperand(scratch2, HeapObject::kMapOffset));
-  __ JumpIfRoot(scratch3, Heap::kHashTableMapRootIndex, &probe_dictionary);
-
-  // We keep the map of the receiver in scratch1.
-  Register receiver_map = scratch1;
-
-  // Load the map of the receiver, compute the keyed lookup cache hash
-  // based on 32 bits of the map pointer and the name hash.
-  __ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ Mov(scratch2, Operand(receiver_map, ASR, KeyedLookupCache::kMapHashShift));
-  __ Ldr(scratch3.W(), FieldMemOperand(key, Name::kHashFieldOffset));
-  __ Eor(scratch2, scratch2, Operand(scratch3, ASR, Name::kHashShift));
-  int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask;
-  __ And(scratch2, scratch2, mask);
-
-  // Load the key (consisting of map and unique name) from the cache and
-  // check for match.
-  Label load_in_object_property;
-  static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
-  Label hit_on_nth_entry[kEntriesPerBucket];
-  ExternalReference cache_keys =
-      ExternalReference::keyed_lookup_cache_keys(isolate);
-
-  __ Mov(scratch3, cache_keys);
-  __ Add(scratch3, scratch3, Operand(scratch2, LSL, kPointerSizeLog2 + 1));
-
-  for (int i = 0; i < kEntriesPerBucket - 1; i++) {
-    Label try_next_entry;
-    // Load map and make scratch3 pointing to the next entry.
-    __ Ldr(scratch4, MemOperand(scratch3, kPointerSize * 2, PostIndex));
-    __ Cmp(receiver_map, scratch4);
-    __ B(ne, &try_next_entry);
-    __ Ldr(scratch4, MemOperand(scratch3, -kPointerSize));  // Load name
-    __ Cmp(key, scratch4);
-    __ B(eq, &hit_on_nth_entry[i]);
-    __ Bind(&try_next_entry);
-  }
-
-  // Last entry.
-  __ Ldr(scratch4, MemOperand(scratch3, kPointerSize, PostIndex));
-  __ Cmp(receiver_map, scratch4);
-  __ B(ne, slow);
-  __ Ldr(scratch4, MemOperand(scratch3));
-  __ Cmp(key, scratch4);
-  __ B(ne, slow);
-
-  // Get field offset.
-  ExternalReference cache_field_offsets =
-      ExternalReference::keyed_lookup_cache_field_offsets(isolate);
-
-  // Hit on nth entry.
-  for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
-    __ Bind(&hit_on_nth_entry[i]);
-    __ Mov(scratch3, cache_field_offsets);
-    if (i != 0) {
-      __ Add(scratch2, scratch2, i);
-    }
-    __ Ldr(scratch4.W(), MemOperand(scratch3, scratch2, LSL, 2));
-    __ Ldrb(scratch5,
-            FieldMemOperand(receiver_map, Map::kInObjectPropertiesOffset));
-    __ Subs(scratch4, scratch4, scratch5);
-    __ B(ge, &property_array_property);
-    if (i != 0) {
-      __ B(&load_in_object_property);
-    }
-  }
-
-  // Load in-object property.
-  __ Bind(&load_in_object_property);
-  __ Ldrb(scratch5, FieldMemOperand(receiver_map, Map::kInstanceSizeOffset));
-  __ Add(scratch5, scratch5, scratch4);  // Index from start of object.
-  __ Sub(receiver, receiver, kHeapObjectTag);  // Remove the heap tag.
-  __ Ldr(result, MemOperand(receiver, scratch5, LSL, kPointerSizeLog2));
-  __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
-                      1, scratch1, scratch2);
-  __ Ret();
-
-  // Load property array property.
-  __ Bind(&property_array_property);
-  __ Ldr(scratch1, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-  __ Add(scratch1, scratch1, FixedArray::kHeaderSize - kHeapObjectTag);
-  __ Ldr(result, MemOperand(scratch1, scratch4, LSL, kPointerSizeLog2));
-  __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
-                      1, scratch1, scratch2);
-  __ Ret();
-
-  // Do a quick inline probe of the receiver's dictionary, if it exists.
-  __ Bind(&probe_dictionary);
-  __ Ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
-  GenerateGlobalInstanceTypeCheck(masm, scratch1, slow);
-  // Load the property.
-  GenerateDictionaryLoad(masm, slow, scratch2, key, result, scratch1, scratch3);
-  __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(),
-                      1, scratch1, scratch2);
-  __ Ret();
-}
-
-
-void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
-  // ---------- S t a t e --------------
-  //  -- lr     : return address
-  //  -- x0     : key
-  //  -- x1     : receiver
-  // -----------------------------------
-  Label slow, check_name, index_smi, index_name;
-
-  Register key = x0;
-  Register receiver = x1;
-
-  __ JumpIfNotSmi(key, &check_name);
-  __ Bind(&index_smi);
-  // Now the key is known to be a smi. This place is also jumped to from below
-  // where a numeric string is converted to a smi.
-  GenerateKeyedLoadWithSmiKey(masm, key, receiver, x2, x3, x4, x5, x6, &slow);
-
-  // Slow case, key and receiver still in x0 and x1.
-  __ Bind(&slow);
-  __ IncrementCounter(
-      masm->isolate()->counters()->keyed_load_generic_slow(), 1, x2, x3);
-  GenerateRuntimeGetProperty(masm);
-
-  __ Bind(&check_name);
-  GenerateKeyNameCheck(masm, key, x2, x3, &index_name, &slow);
-
-  GenerateKeyedLoadWithNameKey(masm, key, receiver, x2, x3, x4, x5, x6, &slow);
-
-  __ Bind(&index_name);
-  __ IndexFromHash(x3, key);
-  // Now jump to the place where smi keys are handled.
-  __ B(&index_smi);
-}
-
-
-void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
-  // ---------- S t a t e --------------
-  //  -- lr     : return address
-  //  -- x0     : key (index)
-  //  -- x1     : receiver
-  // -----------------------------------
-  Label miss;
-
-  Register index = x0;
-  Register receiver = x1;
-  Register result = x0;
-  Register scratch = x3;
-
-  StringCharAtGenerator char_at_generator(receiver,
-                                          index,
-                                          scratch,
-                                          result,
-                                          &miss,  // When not a string.
-                                          &miss,  // When not a number.
-                                          &miss,  // When index out of range.
-                                          STRING_INDEX_IS_ARRAY_INDEX);
-  char_at_generator.GenerateFast(masm);
-  __ Ret();
-
-  StubRuntimeCallHelper call_helper;
-  char_at_generator.GenerateSlow(masm, call_helper);
-
-  __ Bind(&miss);
-  GenerateMiss(masm);
-}
-
-
-void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
-  // ---------- S t a t e --------------
-  //  -- lr     : return address
-  //  -- x0     : key
-  //  -- x1     : receiver
-  // -----------------------------------
-  Label slow;
-  Register key = x0;
-  Register receiver = x1;
-
-  // Check that the receiver isn't a smi.
-  __ JumpIfSmi(receiver, &slow);
-
-  // Check that the key is an array index, that is Uint32.
-  __ TestAndBranchIfAnySet(key, kSmiTagMask | kSmiSignMask, &slow);
-
-  // Get the map of the receiver.
-  Register map = x2;
-  __ Ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-
-  // Check that it has indexed interceptor and access checks
-  // are not enabled for this object.
-  __ Ldrb(x3, FieldMemOperand(map, Map::kBitFieldOffset));
-  ASSERT(kSlowCaseBitFieldMask ==
-      ((1 << Map::kIsAccessCheckNeeded) | (1 << Map::kHasIndexedInterceptor)));
-  __ Tbnz(x3, Map::kIsAccessCheckNeeded, &slow);
-  __ Tbz(x3, Map::kHasIndexedInterceptor, &slow);
-
-  // Everything is fine, call runtime.
-  __ Push(receiver, key);
-  __ TailCallExternalReference(
-      ExternalReference(IC_Utility(kKeyedLoadPropertyWithInterceptor),
-                        masm->isolate()),
-      2,
-      1);
-
-  __ Bind(&slow);
-  GenerateMiss(masm);
-}
-
-
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
-  ASM_LOCATION("KeyedStoreIC::GenerateMiss");
-  // ---------- S t a t e --------------
-  //  -- x0     : value
-  //  -- x1     : key
-  //  -- x2     : receiver
-  //  -- lr     : return address
-  // -----------------------------------
-
-  // Push receiver, key and value for runtime call.
-  __ Push(x2, x1, x0);
-
-  ExternalReference ref =
-      ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
-  __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
-  ASM_LOCATION("KeyedStoreIC::GenerateSlow");
-  // ---------- S t a t e --------------
-  //  -- lr     : return address
-  //  -- x0     : value
-  //  -- x1     : key
-  //  -- x2     : receiver
-  // -----------------------------------
-
-  // Push receiver, key and value for runtime call.
-  __ Push(x2, x1, x0);
-
-  // The slow case calls into the runtime to complete the store without causing
-  // an IC miss that would otherwise cause a transition to the generic stub.
-  ExternalReference ref =
-      ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
-  __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
-                                              StrictMode strict_mode) {
-  ASM_LOCATION("KeyedStoreIC::GenerateRuntimeSetProperty");
-  // ---------- S t a t e --------------
-  //  -- x0     : value
-  //  -- x1     : key
-  //  -- x2     : receiver
-  //  -- lr     : return address
-  // -----------------------------------
-
-  // Push receiver, key and value for runtime call.
-  __ Push(x2, x1, x0);
-
-  // Push PropertyAttributes(NONE) and strict_mode for runtime call.
-  STATIC_ASSERT(NONE == 0);
-  __ Mov(x10, Smi::FromInt(strict_mode));
-  __ Push(xzr, x10);
-
-  __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
-}
-
-
-static void KeyedStoreGenerateGenericHelper(
-    MacroAssembler* masm,
-    Label* fast_object,
-    Label* fast_double,
-    Label* slow,
-    KeyedStoreCheckMap check_map,
-    KeyedStoreIncrementLength increment_length,
-    Register value,
-    Register key,
-    Register receiver,
-    Register receiver_map,
-    Register elements_map,
-    Register elements) {
-  ASSERT(!AreAliased(
-      value, key, receiver, receiver_map, elements_map, elements, x10, x11));
-
-  Label transition_smi_elements;
-  Label transition_double_elements;
-  Label fast_double_without_map_check;
-  Label non_double_value;
-  Label finish_store;
-
-  __ Bind(fast_object);
-  if (check_map == kCheckMap) {
-    __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
-    __ Cmp(elements_map,
-           Operand(masm->isolate()->factory()->fixed_array_map()));
-    __ B(ne, fast_double);
-  }
-
-  // HOLECHECK: guards "A[i] = V"
-  // We have to go to the runtime if the current value is the hole because there
-  // may be a callback on the element.
-  Label holecheck_passed;
-  __ Add(x10, elements, FixedArray::kHeaderSize - kHeapObjectTag);
-  __ Add(x10, x10, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
-  __ Ldr(x11, MemOperand(x10));
-  __ JumpIfNotRoot(x11, Heap::kTheHoleValueRootIndex, &holecheck_passed);
-  __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, x10, slow);
-  __ bind(&holecheck_passed);
-
-  // Smi stores don't require further checks.
-  __ JumpIfSmi(value, &finish_store);
-
-  // Escape to elements kind transition case.
-  __ CheckFastObjectElements(receiver_map, x10, &transition_smi_elements);
-
-  __ Bind(&finish_store);
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ Add(x10, key, Smi::FromInt(1));
-    __ Str(x10, FieldMemOperand(receiver, JSArray::kLengthOffset));
-  }
-
-  Register address = x11;
-  __ Add(address, elements, FixedArray::kHeaderSize - kHeapObjectTag);
-  __ Add(address, address, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
-  __ Str(value, MemOperand(address));
-
-  Label dont_record_write;
-  __ JumpIfSmi(value, &dont_record_write);
-
-  // Update write barrier for the elements array address.
-  __ Mov(x10, value);  // Preserve the value which is returned.
-  __ RecordWrite(elements,
-                 address,
-                 x10,
-                 kLRHasNotBeenSaved,
-                 kDontSaveFPRegs,
-                 EMIT_REMEMBERED_SET,
-                 OMIT_SMI_CHECK);
-
-  __ Bind(&dont_record_write);
-  __ Ret();
-
-
-  __ Bind(fast_double);
-  if (check_map == kCheckMap) {
-    // Check for fast double array case. If this fails, call through to the
-    // runtime.
-    __ JumpIfNotRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex, slow);
-  }
-
-  // HOLECHECK: guards "A[i] double hole?"
-  // We have to see if the double version of the hole is present. If so go to
-  // the runtime.
-  __ Add(x10, elements, FixedDoubleArray::kHeaderSize - kHeapObjectTag);
-  __ Add(x10, x10, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
-  __ Ldr(x11, MemOperand(x10));
-  __ CompareAndBranch(x11, kHoleNanInt64, ne, &fast_double_without_map_check);
-  __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, x10, slow);
-
-  __ Bind(&fast_double_without_map_check);
-  __ StoreNumberToDoubleElements(value,
-                                 key,
-                                 elements,
-                                 x10,
-                                 d0,
-                                 &transition_double_elements);
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ Add(x10, key, Smi::FromInt(1));
-    __ Str(x10, FieldMemOperand(receiver, JSArray::kLengthOffset));
-  }
-  __ Ret();
-
-
-  __ Bind(&transition_smi_elements);
-  // Transition the array appropriately depending on the value type.
-  __ Ldr(x10, FieldMemOperand(value, HeapObject::kMapOffset));
-  __ JumpIfNotRoot(x10, Heap::kHeapNumberMapRootIndex, &non_double_value);
-
-  // Value is a double. Transition FAST_SMI_ELEMENTS ->
-  // FAST_DOUBLE_ELEMENTS and complete the store.
-  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
-                                         FAST_DOUBLE_ELEMENTS,
-                                         receiver_map,
-                                         x10,
-                                         x11,
-                                         slow);
-  ASSERT(receiver_map.Is(x3));  // Transition code expects map in x3.
-  AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
-                                                    FAST_DOUBLE_ELEMENTS);
-  ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow);
-  __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ B(&fast_double_without_map_check);
-
-  __ Bind(&non_double_value);
-  // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS.
-  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
-                                         FAST_ELEMENTS,
-                                         receiver_map,
-                                         x10,
-                                         x11,
-                                         slow);
-  ASSERT(receiver_map.Is(x3));  // Transition code expects map in x3.
-  mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
-  ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode,
-                                                                   slow);
-  __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ B(&finish_store);
-
-  __ Bind(&transition_double_elements);
-  // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
-  // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
-  // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
-  __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
-                                         FAST_ELEMENTS,
-                                         receiver_map,
-                                         x10,
-                                         x11,
-                                         slow);
-  ASSERT(receiver_map.Is(x3));  // Transition code expects map in x3.
-  mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
-  ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow);
-  __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ B(&finish_store);
-}
-
-
-void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
-                                   StrictMode strict_mode) {
-  ASM_LOCATION("KeyedStoreIC::GenerateGeneric");
-  // ---------- S t a t e --------------
-  //  -- x0     : value
-  //  -- x1     : key
-  //  -- x2     : receiver
-  //  -- lr     : return address
-  // -----------------------------------
-  Label slow;
-  Label array;
-  Label fast_object;
-  Label extra;
-  Label fast_object_grow;
-  Label fast_double_grow;
-  Label fast_double;
-
-  Register value = x0;
-  Register key = x1;
-  Register receiver = x2;
-  Register receiver_map = x3;
-  Register elements = x4;
-  Register elements_map = x5;
-
-  __ JumpIfNotSmi(key, &slow);
-  __ JumpIfSmi(receiver, &slow);
-  __ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-
-  // Check that the receiver does not require access checks and is not observed.
-  // The generic stub does not perform map checks or handle observed objects.
-  __ Ldrb(x10, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
-  __ TestAndBranchIfAnySet(
-      x10, (1 << Map::kIsAccessCheckNeeded) | (1 << Map::kIsObserved), &slow);
-
-  // Check if the object is a JS array or not.
-  Register instance_type = x10;
-  __ CompareInstanceType(receiver_map, instance_type, JS_ARRAY_TYPE);
-  __ B(eq, &array);
-  // Check that the object is some kind of JSObject.
-  __ Cmp(instance_type, FIRST_JS_OBJECT_TYPE);
-  __ B(lt, &slow);
-
-  // Object case: Check key against length in the elements array.
-  __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  // Check array bounds. Both the key and the length of FixedArray are smis.
-  __ Ldrsw(x10, UntagSmiFieldMemOperand(elements, FixedArray::kLengthOffset));
-  __ Cmp(x10, Operand::UntagSmi(key));
-  __ B(hi, &fast_object);
-
-
-  __ Bind(&slow);
-  // Slow case, handle jump to runtime.
-  // Live values:
-  //  x0: value
-  //  x1: key
-  //  x2: receiver
-  GenerateRuntimeSetProperty(masm, strict_mode);
-
-
-  __ Bind(&extra);
-  // Extra capacity case: Check if there is extra capacity to
-  // perform the store and update the length. Used for adding one
-  // element to the array by writing to array[array.length].
-
-  // Check for room in the elements backing store.
-  // Both the key and the length of FixedArray are smis.
-  __ Ldrsw(x10, UntagSmiFieldMemOperand(elements, FixedArray::kLengthOffset));
-  __ Cmp(x10, Operand::UntagSmi(key));
-  __ B(ls, &slow);
-
-  __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
-  __ Cmp(elements_map, Operand(masm->isolate()->factory()->fixed_array_map()));
-  __ B(eq, &fast_object_grow);
-  __ Cmp(elements_map,
-         Operand(masm->isolate()->factory()->fixed_double_array_map()));
-  __ B(eq, &fast_double_grow);
-  __ B(&slow);
-
-
-  __ Bind(&array);
-  // Array case: Get the length and the elements array from the JS
-  // array. Check that the array is in fast mode (and writable); if it
-  // is the length is always a smi.
-
-  __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-
-  // Check the key against the length in the array.
-  __ Ldrsw(x10, UntagSmiFieldMemOperand(receiver, JSArray::kLengthOffset));
-  __ Cmp(x10, Operand::UntagSmi(key));
-  __ B(eq, &extra);  // We can handle the case where we are appending 1 element.
-  __ B(lo, &slow);
-
-  KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double,
-                                  &slow, kCheckMap, kDontIncrementLength,
-                                  value, key, receiver, receiver_map,
-                                  elements_map, elements);
-  KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
-                                  &slow, kDontCheckMap, kIncrementLength,
-                                  value, key, receiver, receiver_map,
-                                  elements_map, elements);
-}
-
-
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- x0    : value
-  //  -- x1    : receiver
-  //  -- x2    : name
-  //  -- lr    : return address
-  // -----------------------------------
-
-  // Probe the stub cache.
-  Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC);
-  masm->isolate()->stub_cache()->GenerateProbe(
-      masm, flags, x1, x2, x3, x4, x5, x6);
-
-  // Cache miss: Jump to runtime.
-  GenerateMiss(masm);
-}
-
-
-void StoreIC::GenerateMiss(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- x0    : value
-  //  -- x1    : receiver
-  //  -- x2    : name
-  //  -- lr    : return address
-  // -----------------------------------
-
-  __ Push(x1, x2, x0);
-
-  // Tail call to the entry.
-  ExternalReference ref =
-      ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
-  __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void StoreIC::GenerateNormal(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- x0    : value
-  //  -- x1    : receiver
-  //  -- x2    : name
-  //  -- lr    : return address
-  // -----------------------------------
-  Label miss;
-  Register value = x0;
-  Register receiver = x1;
-  Register name = x2;
-  Register dictionary = x3;
-
-  GenerateNameDictionaryReceiverCheck(
-      masm, receiver, dictionary, x4, x5, &miss);
-
-  GenerateDictionaryStore(masm, &miss, dictionary, name, value, x4, x5);
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->store_normal_hit(), 1, x4, x5);
-  __ Ret();
-
-  // Cache miss: Jump to runtime.
-  __ Bind(&miss);
-  __ IncrementCounter(counters->store_normal_miss(), 1, x4, x5);
-  GenerateMiss(masm);
-}
-
-
-void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
-                                         StrictMode strict_mode) {
-  ASM_LOCATION("StoreIC::GenerateRuntimeSetProperty");
-  // ----------- S t a t e -------------
-  //  -- x0    : value
-  //  -- x1    : receiver
-  //  -- x2    : name
-  //  -- lr    : return address
-  // -----------------------------------
-
-  __ Push(x1, x2, x0);
-
-  __ Mov(x11, Smi::FromInt(NONE));  // PropertyAttributes
-  __ Mov(x10, Smi::FromInt(strict_mode));
-  __ Push(x11, x10);
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
-}
-
-
-void StoreIC::GenerateSlow(MacroAssembler* masm) {
-  // ---------- S t a t e --------------
-  //  -- x0     : value
-  //  -- x1     : receiver
-  //  -- x2     : name
-  //  -- lr     : return address
-  // -----------------------------------
-
-  // Push receiver, name and value for runtime call.
-  __ Push(x1, x2, x0);
-
-  // The slow case calls into the runtime to complete the store without causing
-  // an IC miss that would otherwise cause a transition to the generic stub.
-  ExternalReference ref =
-      ExternalReference(IC_Utility(kStoreIC_Slow), masm->isolate());
-  __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-Condition CompareIC::ComputeCondition(Token::Value op) {
-  switch (op) {
-    case Token::EQ_STRICT:
-    case Token::EQ:
-      return eq;
-    case Token::LT:
-      return lt;
-    case Token::GT:
-      return gt;
-    case Token::LTE:
-      return le;
-    case Token::GTE:
-      return ge;
-    default:
-      UNREACHABLE();
-      return al;
-  }
-}
-
-
-bool CompareIC::HasInlinedSmiCode(Address address) {
-  // The address of the instruction following the call.
-  Address info_address =
-      Assembler::return_address_from_call_start(address);
-
-  InstructionSequence* patch_info = InstructionSequence::At(info_address);
-  return patch_info->IsInlineData();
-}
-
-
-// Activate a SMI fast-path by patching the instructions generated by
-// JumpPatchSite::EmitJumpIf(Not)Smi(), using the information encoded by
-// JumpPatchSite::EmitPatchInfo().
-void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
-  // The patch information is encoded in the instruction stream using
-  // instructions which have no side effects, so we can safely execute them.
-  // The patch information is encoded directly after the call to the helper
-  // function which is requesting this patch operation.
-  Address info_address =
-      Assembler::return_address_from_call_start(address);
-  InlineSmiCheckInfo info(info_address);
-
-  // Check and decode the patch information instruction.
-  if (!info.HasSmiCheck()) {
-    return;
-  }
-
-  if (FLAG_trace_ic) {
-    PrintF("[  Patching ic at %p, marker=%p, SMI check=%p\n",
-           address, info_address, reinterpret_cast<void*>(info.SmiCheck()));
-  }
-
-  // Patch and activate code generated by JumpPatchSite::EmitJumpIfNotSmi()
-  // and JumpPatchSite::EmitJumpIfSmi().
-  // Changing
-  //   tb(n)z xzr, #0, <target>
-  // to
-  //   tb(!n)z test_reg, #0, <target>
-  Instruction* to_patch = info.SmiCheck();
-  PatchingAssembler patcher(to_patch, 1);
-  ASSERT(to_patch->IsTestBranch());
-  ASSERT(to_patch->ImmTestBranchBit5() == 0);
-  ASSERT(to_patch->ImmTestBranchBit40() == 0);
-
-  STATIC_ASSERT(kSmiTag == 0);
-  STATIC_ASSERT(kSmiTagMask == 1);
-
-  int branch_imm = to_patch->ImmTestBranch();
-  Register smi_reg;
-  if (check == ENABLE_INLINED_SMI_CHECK) {
-    ASSERT(to_patch->Rt() == xzr.code());
-    smi_reg = info.SmiRegister();
-  } else {
-    ASSERT(check == DISABLE_INLINED_SMI_CHECK);
-    ASSERT(to_patch->Rt() != xzr.code());
-    smi_reg = xzr;
-  }
-
-  if (to_patch->Mask(TestBranchMask) == TBZ) {
-    // This is JumpIfNotSmi(smi_reg, branch_imm).
-    patcher.tbnz(smi_reg, 0, branch_imm);
-  } else {
-    ASSERT(to_patch->Mask(TestBranchMask) == TBNZ);
-    // This is JumpIfSmi(smi_reg, branch_imm).
-    patcher.tbz(smi_reg, 0, branch_imm);
-  }
-}
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_TARGET_ARCH_ARM64
diff --git a/src/arm64/instructions-arm64.cc b/src/arm64/instructions-arm64.cc
index c7334ed..71094ba 100644
--- a/src/arm64/instructions-arm64.cc
+++ b/src/arm64/instructions-arm64.cc
@@ -8,8 +8,8 @@
 
 #define ARM64_DEFINE_FP_STATICS
 
-#include "src/arm64/instructions-arm64.h"
 #include "src/arm64/assembler-arm64-inl.h"
+#include "src/arm64/instructions-arm64.h"
 
 namespace v8 {
 namespace internal {
@@ -67,7 +67,7 @@
 static uint64_t RotateRight(uint64_t value,
                             unsigned int rotate,
                             unsigned int width) {
-  ASSERT(width <= 64);
+  DCHECK(width <= 64);
   rotate &= 63;
   return ((value & ((1UL << rotate) - 1UL)) << (width - rotate)) |
          (value >> rotate);
@@ -77,9 +77,9 @@
 static uint64_t RepeatBitsAcrossReg(unsigned reg_size,
                                     uint64_t value,
                                     unsigned width) {
-  ASSERT((width == 2) || (width == 4) || (width == 8) || (width == 16) ||
+  DCHECK((width == 2) || (width == 4) || (width == 8) || (width == 16) ||
          (width == 32));
-  ASSERT((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
+  DCHECK((reg_size == kWRegSizeInBits) || (reg_size == kXRegSizeInBits));
   uint64_t result = value & ((1UL << width) - 1UL);
   for (unsigned i = width; i < reg_size; i *= 2) {
     result |= (result << i);
@@ -182,8 +182,8 @@
 }
 
 
-ptrdiff_t Instruction::ImmPCOffset() {
-  ptrdiff_t offset;
+int64_t Instruction::ImmPCOffset() {
+  int64_t offset;
   if (IsPCRelAddressing()) {
     // PC-relative addressing. Only ADR is supported.
     offset = ImmPCRel();
@@ -193,7 +193,7 @@
     offset = ImmBranch() << kInstructionSizeLog2;
   } else {
     // Load literal (offset from PC).
-    ASSERT(IsLdrLiteral());
+    DCHECK(IsLdrLiteral());
     // The offset is always shifted by 2 bits, even for loads to 64-bits
     // registers.
     offset = ImmLLiteral() << kInstructionSizeLog2;
@@ -231,9 +231,9 @@
 
 void Instruction::SetPCRelImmTarget(Instruction* target) {
   // ADRP is not supported, so 'this' must point to an ADR instruction.
-  ASSERT(IsAdr());
+  DCHECK(IsAdr());
 
-  int target_offset = DistanceTo(target);
+  ptrdiff_t target_offset = DistanceTo(target);
   Instr imm;
   if (Instruction::IsValidPCRelOffset(target_offset)) {
     imm = Assembler::ImmPCRelAddress(target_offset);
@@ -241,13 +241,13 @@
   } else {
     PatchingAssembler patcher(this,
                               PatchingAssembler::kAdrFarPatchableNInstrs);
-    patcher.PatchAdrFar(target);
+    patcher.PatchAdrFar(target_offset);
   }
 }
 
 
 void Instruction::SetBranchImmTarget(Instruction* target) {
-  ASSERT(IsAligned(DistanceTo(target), kInstructionSize));
+  DCHECK(IsAligned(DistanceTo(target), kInstructionSize));
   Instr branch_imm = 0;
   uint32_t imm_mask = 0;
   ptrdiff_t offset = DistanceTo(target) >> kInstructionSizeLog2;
@@ -279,7 +279,7 @@
 
 
 void Instruction::SetImmLLiteral(Instruction* source) {
-  ASSERT(IsAligned(DistanceTo(source), kInstructionSize));
+  DCHECK(IsAligned(DistanceTo(source), kInstructionSize));
   ptrdiff_t offset = DistanceTo(source) >> kLoadLiteralScaleLog2;
   Instr imm = Assembler::ImmLLiteral(offset);
   Instr mask = ImmLLiteral_mask;
@@ -304,7 +304,7 @@
 // xzr and Register are not defined in that header. Consider adding
 // instructions-arm64-inl.h to work around this.
 uint64_t InstructionSequence::InlineData() const {
-  ASSERT(IsInlineData());
+  DCHECK(IsInlineData());
   uint64_t payload = ImmMoveWide();
   // TODO(all): If we extend ::InlineData() to support bigger data, we need
   // to update this method too.
diff --git a/src/arm64/instructions-arm64.h b/src/arm64/instructions-arm64.h
index b3d9b79..374e246 100644
--- a/src/arm64/instructions-arm64.h
+++ b/src/arm64/instructions-arm64.h
@@ -5,10 +5,10 @@
 #ifndef V8_ARM64_INSTRUCTIONS_ARM64_H_
 #define V8_ARM64_INSTRUCTIONS_ARM64_H_
 
-#include "src/globals.h"
-#include "src/utils.h"
 #include "src/arm64/constants-arm64.h"
 #include "src/arm64/utils-arm64.h"
+#include "src/globals.h"
+#include "src/utils.h"
 
 namespace v8 {
 namespace internal {
@@ -137,7 +137,7 @@
   // ImmPCRel is a compound field (not present in INSTRUCTION_FIELDS_LIST),
   // formed from ImmPCRelLo and ImmPCRelHi.
   int ImmPCRel() const {
-    ASSERT(IsPCRelAddressing());
+    DCHECK(IsPCRelAddressing());
     int const offset = ((ImmPCRelHi() << ImmPCRelLo_width) | ImmPCRelLo());
     int const width = ImmPCRelLo_width + ImmPCRelHi_width;
     return signed_bitextract_32(width - 1, 0, offset);
@@ -338,7 +338,7 @@
   // Find the PC offset encoded in this instruction. 'this' may be a branch or
   // a PC-relative addressing instruction.
   // The offset returned is unscaled.
-  ptrdiff_t ImmPCOffset();
+  int64_t ImmPCOffset();
 
   // Find the target of this instruction. 'this' may be a branch or a
   // PC-relative addressing instruction.
@@ -352,9 +352,9 @@
   // Patch a literal load instruction to load from 'source'.
   void SetImmLLiteral(Instruction* source);
 
-  uint8_t* LiteralAddress() {
+  uintptr_t LiteralAddress() {
     int offset = ImmLLiteral() << kLoadLiteralScaleLog2;
-    return reinterpret_cast<uint8_t*>(this) + offset;
+    return reinterpret_cast<uintptr_t>(this) + offset;
   }
 
   enum CheckAlignment { NO_CHECK, CHECK_ALIGNMENT };
@@ -364,7 +364,7 @@
       CheckAlignment check = CHECK_ALIGNMENT) {
     Address addr = reinterpret_cast<Address>(this) + offset;
     // The FUZZ_disasm test relies on no check being done.
-    ASSERT(check == NO_CHECK || IsAddressAligned(addr, kInstructionSize));
+    DCHECK(check == NO_CHECK || IsAddressAligned(addr, kInstructionSize));
     return Cast(addr);
   }
 
diff --git a/src/arm64/instrument-arm64.cc b/src/arm64/instrument-arm64.cc
index 631556f..da505ff 100644
--- a/src/arm64/instrument-arm64.cc
+++ b/src/arm64/instrument-arm64.cc
@@ -9,7 +9,7 @@
 
 Counter::Counter(const char* name, CounterType type)
     : count_(0), enabled_(false), type_(type) {
-  ASSERT(name != NULL);
+  DCHECK(name != NULL);
   strncpy(name_, name, kCounterNameMaxLength);
 }
 
@@ -107,8 +107,7 @@
     }
   }
 
-  static const int num_counters =
-    sizeof(kCounterList) / sizeof(CounterDescriptor);
+  static const int num_counters = arraysize(kCounterList);
 
   // Dump an instrumentation description comment at the top of the file.
   fprintf(output_stream_, "# counters=%d\n", num_counters);
@@ -144,7 +143,7 @@
   // Increment the instruction counter, and dump all counters if a sample period
   // has elapsed.
   static Counter* counter = GetCounter("Instruction");
-  ASSERT(counter->type() == Cumulative);
+  DCHECK(counter->type() == Cumulative);
   counter->Increment();
 
   if (counter->IsEnabled() && (counter->count() % sample_period_) == 0) {
diff --git a/src/arm64/instrument-arm64.h b/src/arm64/instrument-arm64.h
index 7458310..86ddfcb 100644
--- a/src/arm64/instrument-arm64.h
+++ b/src/arm64/instrument-arm64.h
@@ -7,8 +7,9 @@
 
 #include "src/globals.h"
 #include "src/utils.h"
-#include "src/arm64/decoder-arm64.h"
+
 #include "src/arm64/constants-arm64.h"
+#include "src/arm64/decoder-arm64.h"
 
 namespace v8 {
 namespace internal {
@@ -31,7 +32,7 @@
 
 class Counter {
  public:
-  Counter(const char* name, CounterType type = Gauge);
+  explicit Counter(const char* name, CounterType type = Gauge);
 
   void Increment();
   void Enable();
diff --git a/src/arm64/interface-descriptors-arm64.cc b/src/arm64/interface-descriptors-arm64.cc
new file mode 100644
index 0000000..690c8c2
--- /dev/null
+++ b/src/arm64/interface-descriptors-arm64.cc
@@ -0,0 +1,368 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "src/interface-descriptors.h"
+
+namespace v8 {
+namespace internal {
+
+const Register CallInterfaceDescriptor::ContextRegister() { return cp; }
+
+
+const Register LoadDescriptor::ReceiverRegister() { return x1; }
+const Register LoadDescriptor::NameRegister() { return x2; }
+
+
+const Register VectorLoadICTrampolineDescriptor::SlotRegister() { return x0; }
+
+
+const Register VectorLoadICDescriptor::VectorRegister() { return x3; }
+
+
+const Register StoreDescriptor::ReceiverRegister() { return x1; }
+const Register StoreDescriptor::NameRegister() { return x2; }
+const Register StoreDescriptor::ValueRegister() { return x0; }
+
+
+const Register ElementTransitionAndStoreDescriptor::MapRegister() { return x3; }
+
+
+const Register InstanceofDescriptor::left() {
+  // Object to check (instanceof lhs).
+  return x11;
+}
+
+
+const Register InstanceofDescriptor::right() {
+  // Constructor function (instanceof rhs).
+  return x10;
+}
+
+
+const Register ArgumentsAccessReadDescriptor::index() { return x1; }
+const Register ArgumentsAccessReadDescriptor::parameter_count() { return x0; }
+
+
+const Register ApiGetterDescriptor::function_address() { return x2; }
+
+
+const Register MathPowTaggedDescriptor::exponent() { return x11; }
+
+
+const Register MathPowIntegerDescriptor::exponent() { return x12; }
+
+
+void FastNewClosureDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  // cp: context
+  // x2: function info
+  Register registers[] = {cp, x2};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void FastNewContextDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  // cp: context
+  // x1: function
+  Register registers[] = {cp, x1};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ToNumberDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  // cp: context
+  // x0: value
+  Register registers[] = {cp, x0};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void NumberToStringDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  // cp: context
+  // x0: value
+  Register registers[] = {cp, x0};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void FastCloneShallowArrayDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  // cp: context
+  // x3: array literals array
+  // x2: array literal index
+  // x1: constant elements
+  Register registers[] = {cp, x3, x2, x1};
+  Representation representations[] = {
+      Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
+      Representation::Tagged()};
+  data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void FastCloneShallowObjectDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  // cp: context
+  // x3: object literals array
+  // x2: object literal index
+  // x1: constant properties
+  // x0: object literal flags
+  Register registers[] = {cp, x3, x2, x1, x0};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void CreateAllocationSiteDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  // cp: context
+  // x2: feedback vector
+  // x3: call feedback slot
+  Register registers[] = {cp, x2, x3};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void StoreArrayLiteralElementDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, x3, x0};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void CallFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  // x1  function    the function to call
+  Register registers[] = {cp, x1};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void CallFunctionWithFeedbackDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, x1, x3};
+  Representation representations[] = {Representation::Tagged(),
+                                      Representation::Tagged(),
+                                      Representation::Smi()};
+  data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void CallConstructDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  // x0 : number of arguments
+  // x1 : the function to call
+  // x2 : feedback vector
+  // x3 : slot in feedback vector (smi) (if r2 is not the megamorphic symbol)
+  // TODO(turbofan): So far we don't gather type feedback and hence skip the
+  // slot parameter, but ArrayConstructStub needs the vector to be undefined.
+  Register registers[] = {cp, x0, x1, x2};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void RegExpConstructResultDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  // cp: context
+  // x2: length
+  // x1: index (of last match)
+  // x0: string
+  Register registers[] = {cp, x2, x1, x0};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void TransitionElementsKindDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  // cp: context
+  // x0: value (js_array)
+  // x1: to_map
+  Register registers[] = {cp, x0, x1};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ArrayConstructorConstantArgCountDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  // cp: context
+  // x1: function
+  // x2: allocation site with elements kind
+  // x0: number of arguments to the constructor function
+  Register registers[] = {cp, x1, x2};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ArrayConstructorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  // stack param count needs (constructor pointer, and single argument)
+  Register registers[] = {cp, x1, x2, x0};
+  Representation representations[] = {
+      Representation::Tagged(), Representation::Tagged(),
+      Representation::Tagged(), Representation::Integer32()};
+  data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void InternalArrayConstructorConstantArgCountDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  // cp: context
+  // x1: constructor function
+  // x0: number of arguments to the constructor function
+  Register registers[] = {cp, x1};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void InternalArrayConstructorDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  // stack param count needs (constructor pointer, and single argument)
+  Register registers[] = {cp, x1, x0};
+  Representation representations[] = {Representation::Tagged(),
+                                      Representation::Tagged(),
+                                      Representation::Integer32()};
+  data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void CompareNilDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  // cp: context
+  // x0: value to compare
+  Register registers[] = {cp, x0};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ToBooleanDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  // cp: context
+  // x0: value
+  Register registers[] = {cp, x0};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void BinaryOpDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  // cp: context
+  // x1: left operand
+  // x0: right operand
+  Register registers[] = {cp, x1, x0};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void BinaryOpWithAllocationSiteDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  // cp: context
+  // x2: allocation site
+  // x1: left operand
+  // x0: right operand
+  Register registers[] = {cp, x2, x1, x0};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void StringAddDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  // cp: context
+  // x1: left operand
+  // x0: right operand
+  Register registers[] = {cp, x1, x0};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void KeyedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  static PlatformInterfaceDescriptor noInlineDescriptor =
+      PlatformInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
+
+  Register registers[] = {
+      cp,  // context
+      x2,  // key
+  };
+  Representation representations[] = {
+      Representation::Tagged(),  // context
+      Representation::Tagged(),  // key
+  };
+  data->Initialize(arraysize(registers), registers, representations,
+                   &noInlineDescriptor);
+}
+
+
+void NamedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  static PlatformInterfaceDescriptor noInlineDescriptor =
+      PlatformInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS);
+
+  Register registers[] = {
+      cp,  // context
+      x2,  // name
+  };
+  Representation representations[] = {
+      Representation::Tagged(),  // context
+      Representation::Tagged(),  // name
+  };
+  data->Initialize(arraysize(registers), registers, representations,
+                   &noInlineDescriptor);
+}
+
+
+void CallHandlerDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  static PlatformInterfaceDescriptor default_descriptor =
+      PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
+
+  Register registers[] = {
+      cp,  // context
+      x0,  // receiver
+  };
+  Representation representations[] = {
+      Representation::Tagged(),  // context
+      Representation::Tagged(),  // receiver
+  };
+  data->Initialize(arraysize(registers), registers, representations,
+                   &default_descriptor);
+}
+
+
+void ArgumentAdaptorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  static PlatformInterfaceDescriptor default_descriptor =
+      PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
+
+  Register registers[] = {
+      cp,  // context
+      x1,  // JSFunction
+      x0,  // actual number of arguments
+      x2,  // expected number of arguments
+  };
+  Representation representations[] = {
+      Representation::Tagged(),     // context
+      Representation::Tagged(),     // JSFunction
+      Representation::Integer32(),  // actual number of arguments
+      Representation::Integer32(),  // expected number of arguments
+  };
+  data->Initialize(arraysize(registers), registers, representations,
+                   &default_descriptor);
+}
+
+
+void ApiFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  static PlatformInterfaceDescriptor default_descriptor =
+      PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS);
+
+  Register registers[] = {
+      cp,  // context
+      x0,  // callee
+      x4,  // call_data
+      x2,  // holder
+      x1,  // api_function_address
+  };
+  Representation representations[] = {
+      Representation::Tagged(),    // context
+      Representation::Tagged(),    // callee
+      Representation::Tagged(),    // call_data
+      Representation::Tagged(),    // holder
+      Representation::External(),  // api_function_address
+  };
+  data->Initialize(arraysize(registers), registers, representations,
+                   &default_descriptor);
+}
+}
+}  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_ARM64
diff --git a/src/arm64/interface-descriptors-arm64.h b/src/arm64/interface-descriptors-arm64.h
new file mode 100644
index 0000000..76def88
--- /dev/null
+++ b/src/arm64/interface-descriptors-arm64.h
@@ -0,0 +1,26 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_ARM64_INTERFACE_DESCRIPTORS_ARM64_H_
+#define V8_ARM64_INTERFACE_DESCRIPTORS_ARM64_H_
+
+#include "src/interface-descriptors.h"
+
+namespace v8 {
+namespace internal {
+
+class PlatformInterfaceDescriptor {
+ public:
+  explicit PlatformInterfaceDescriptor(TargetAddressStorageMode storage_mode)
+      : storage_mode_(storage_mode) {}
+
+  TargetAddressStorageMode storage_mode() { return storage_mode_; }
+
+ private:
+  TargetAddressStorageMode storage_mode_;
+};
+}
+}  // namespace v8::internal
+
+#endif  // V8_ARM64_INTERFACE_DESCRIPTORS_ARM64_H_
diff --git a/src/arm64/lithium-arm64.cc b/src/arm64/lithium-arm64.cc
index 8446edf..502b046 100644
--- a/src/arm64/lithium-arm64.cc
+++ b/src/arm64/lithium-arm64.cc
@@ -4,15 +4,13 @@
 
 #include "src/v8.h"
 
-#include "src/lithium-allocator-inl.h"
-#include "src/arm64/lithium-arm64.h"
 #include "src/arm64/lithium-codegen-arm64.h"
 #include "src/hydrogen-osr.h"
+#include "src/lithium-inl.h"
 
 namespace v8 {
 namespace internal {
 
-
 #define DEFINE_COMPILE(type)                            \
   void L##type::CompileToNative(LCodeGen* generator) {  \
     generator->Do##type(this);                          \
@@ -26,17 +24,17 @@
   // outputs because all registers are blocked by the calling convention.
   // Inputs operands must use a fixed register or use-at-start policy or
   // a non-register policy.
-  ASSERT(Output() == NULL ||
+  DCHECK(Output() == NULL ||
          LUnallocated::cast(Output())->HasFixedPolicy() ||
          !LUnallocated::cast(Output())->HasRegisterPolicy());
   for (UseIterator it(this); !it.Done(); it.Advance()) {
     LUnallocated* operand = LUnallocated::cast(it.Current());
-    ASSERT(operand->HasFixedPolicy() ||
+    DCHECK(operand->HasFixedPolicy() ||
            operand->IsUsedAtStart());
   }
   for (TempIterator it(this); !it.Done(); it.Advance()) {
     LUnallocated* operand = LUnallocated::cast(it.Current());
-    ASSERT(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
+    DCHECK(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
   }
 }
 #endif
@@ -284,7 +282,9 @@
 
 void LStoreNamedField::PrintDataTo(StringStream* stream) {
   object()->PrintTo(stream);
-  hydrogen()->access().PrintTo(stream);
+  OStringStream os;
+  os << hydrogen()->access();
+  stream->Add(os.c_str());
   stream->Add(" <- ");
   value()->PrintTo(stream);
 }
@@ -354,12 +354,6 @@
 }
 
 
-void LChunkBuilder::Abort(BailoutReason reason) {
-  info()->set_bailout_reason(reason);
-  status_ = ABORTED;
-}
-
-
 LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
   return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
                                   Register::ToAllocationIndex(reg));
@@ -501,7 +495,7 @@
 
 
 LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
-  ASSERT(!instr->HasPointerMap());
+  DCHECK(!instr->HasPointerMap());
   instr->set_pointer_map(new(zone()) LPointerMap(zone()));
   return instr;
 }
@@ -543,21 +537,28 @@
   if (kind == DOUBLE_REGISTERS) {
     return LDoubleStackSlot::Create(index, zone());
   } else {
-    ASSERT(kind == GENERAL_REGISTERS);
+    DCHECK(kind == GENERAL_REGISTERS);
     return LStackSlot::Create(index, zone());
   }
 }
 
 
+LOperand* LChunkBuilder::FixedTemp(Register reg) {
+  LUnallocated* operand = ToUnallocated(reg);
+  DCHECK(operand->HasFixedPolicy());
+  return operand;
+}
+
+
 LOperand* LChunkBuilder::FixedTemp(DoubleRegister reg) {
   LUnallocated* operand = ToUnallocated(reg);
-  ASSERT(operand->HasFixedPolicy());
+  DCHECK(operand->HasFixedPolicy());
   return operand;
 }
 
 
 LPlatformChunk* LChunkBuilder::Build() {
-  ASSERT(is_unused());
+  DCHECK(is_unused());
   chunk_ = new(zone()) LPlatformChunk(info_, graph_);
   LPhase phase("L_Building chunk", chunk_);
   status_ = BUILDING;
@@ -583,7 +584,7 @@
 
 
 void LChunkBuilder::DoBasicBlock(HBasicBlock* block) {
-  ASSERT(is_building());
+  DCHECK(is_building());
   current_block_ = block;
 
   if (block->IsStartBlock()) {
@@ -592,14 +593,14 @@
   } else if (block->predecessors()->length() == 1) {
     // We have a single predecessor => copy environment and outgoing
     // argument count from the predecessor.
-    ASSERT(block->phis()->length() == 0);
+    DCHECK(block->phis()->length() == 0);
     HBasicBlock* pred = block->predecessors()->at(0);
     HEnvironment* last_environment = pred->last_environment();
-    ASSERT(last_environment != NULL);
+    DCHECK(last_environment != NULL);
 
     // Only copy the environment, if it is later used again.
     if (pred->end()->SecondSuccessor() == NULL) {
-      ASSERT(pred->end()->FirstSuccessor() == block);
+      DCHECK(pred->end()->FirstSuccessor() == block);
     } else {
       if ((pred->end()->FirstSuccessor()->block_id() > block->block_id()) ||
           (pred->end()->SecondSuccessor()->block_id() > block->block_id())) {
@@ -607,7 +608,7 @@
       }
     }
     block->UpdateEnvironment(last_environment);
-    ASSERT(pred->argument_count() >= 0);
+    DCHECK(pred->argument_count() >= 0);
     argument_count_ = pred->argument_count();
   } else {
     // We are at a state join => process phis.
@@ -660,7 +661,7 @@
     if (current->OperandCount() == 0) {
       instr = DefineAsRegister(new(zone()) LDummy());
     } else {
-      ASSERT(!current->OperandAt(0)->IsControlInstruction());
+      DCHECK(!current->OperandAt(0)->IsControlInstruction());
       instr = DefineAsRegister(new(zone())
           LDummyUse(UseAny(current->OperandAt(0))));
     }
@@ -683,7 +684,7 @@
   }
 
   argument_count_ += current->argument_delta();
-  ASSERT(argument_count_ >= 0);
+  DCHECK(argument_count_ >= 0);
 
   if (instr != NULL) {
     AddInstruction(instr, current);
@@ -725,7 +726,7 @@
       LUnallocated* operand = LUnallocated::cast(it.Current());
       if (operand->HasFixedPolicy()) ++fixed;
     }
-    ASSERT(fixed == 0 || used_at_start == 0);
+    DCHECK(fixed == 0 || used_at_start == 0);
   }
 #endif
 
@@ -779,9 +780,9 @@
 
 LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
                                            HArithmeticBinaryOperation* instr) {
-  ASSERT(instr->representation().IsDouble());
-  ASSERT(instr->left()->representation().IsDouble());
-  ASSERT(instr->right()->representation().IsDouble());
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->left()->representation().IsDouble());
+  DCHECK(instr->right()->representation().IsDouble());
 
   if (op == Token::MOD) {
     LOperand* left = UseFixedDouble(instr->left(), d0);
@@ -799,7 +800,7 @@
 
 LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
                                            HBinaryOperation* instr) {
-  ASSERT((op == Token::ADD) || (op == Token::SUB) || (op == Token::MUL) ||
+  DCHECK((op == Token::ADD) || (op == Token::SUB) || (op == Token::MUL) ||
          (op == Token::DIV) || (op == Token::MOD) || (op == Token::SHR) ||
          (op == Token::SHL) || (op == Token::SAR) || (op == Token::ROR) ||
          (op == Token::BIT_OR) || (op == Token::BIT_AND) ||
@@ -809,9 +810,9 @@
 
   // TODO(jbramley): Once we've implemented smi support for all arithmetic
   // operations, these assertions should check IsTagged().
-  ASSERT(instr->representation().IsSmiOrTagged());
-  ASSERT(left->representation().IsSmiOrTagged());
-  ASSERT(right->representation().IsSmiOrTagged());
+  DCHECK(instr->representation().IsSmiOrTagged());
+  DCHECK(left->representation().IsSmiOrTagged());
+  DCHECK(right->representation().IsSmiOrTagged());
 
   LOperand* context = UseFixed(instr->context(), cp);
   LOperand* left_operand = UseFixed(left, x1);
@@ -851,8 +852,8 @@
 
 LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
   if (instr->representation().IsSmiOrInteger32()) {
-    ASSERT(instr->left()->representation().Equals(instr->representation()));
-    ASSERT(instr->right()->representation().Equals(instr->representation()));
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
 
     LInstruction* shifted_operation = TryDoOpWithShiftedRightOperand(instr);
     if (shifted_operation != NULL) {
@@ -870,16 +871,16 @@
     }
     return result;
   } else if (instr->representation().IsExternal()) {
-    ASSERT(instr->left()->representation().IsExternal());
-    ASSERT(instr->right()->representation().IsInteger32());
-    ASSERT(!instr->CheckFlag(HValue::kCanOverflow));
+    DCHECK(instr->left()->representation().IsExternal());
+    DCHECK(instr->right()->representation().IsInteger32());
+    DCHECK(!instr->CheckFlag(HValue::kCanOverflow));
     LOperand* left = UseRegisterAtStart(instr->left());
     LOperand* right = UseRegisterOrConstantAtStart(instr->right());
     return DefineAsRegister(new(zone()) LAddE(left, right));
   } else if (instr->representation().IsDouble()) {
     return DoArithmeticD(Token::ADD, instr);
   } else {
-    ASSERT(instr->representation().IsTagged());
+    DCHECK(instr->representation().IsTagged());
     return DoArithmeticT(Token::ADD, instr);
   }
 }
@@ -935,9 +936,9 @@
 
 LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
   if (instr->representation().IsSmiOrInteger32()) {
-    ASSERT(instr->left()->representation().Equals(instr->representation()));
-    ASSERT(instr->right()->representation().Equals(instr->representation()));
-    ASSERT(instr->CheckFlag(HValue::kTruncatingToInt32));
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+    DCHECK(instr->CheckFlag(HValue::kTruncatingToInt32));
 
     LInstruction* shifted_operation = TryDoOpWithShiftedRightOperand(instr);
     if (shifted_operation != NULL) {
@@ -987,7 +988,7 @@
     // These representations have simple checks that cannot deoptimize.
     return new(zone()) LBranch(UseRegister(value), NULL, NULL);
   } else {
-    ASSERT(r.IsTagged());
+    DCHECK(r.IsTagged());
     if (type.IsBoolean() || type.IsSmi() || type.IsJSArray() ||
         type.IsHeapNumber()) {
       // These types have simple checks that cannot deoptimize.
@@ -1007,7 +1008,7 @@
     if (expected.IsGeneric() || expected.IsEmpty()) {
       // The generic case cannot deoptimize because it already supports every
       // possible input type.
-      ASSERT(needs_temps);
+      DCHECK(needs_temps);
       return new(zone()) LBranch(UseRegister(value), temp1, temp2);
     } else {
       return AssignEnvironment(
@@ -1029,14 +1030,14 @@
 
 LInstruction* LChunkBuilder::DoCallWithDescriptor(
     HCallWithDescriptor* instr) {
-  const CallInterfaceDescriptor* descriptor = instr->descriptor();
+  CallInterfaceDescriptor descriptor = instr->descriptor();
 
   LOperand* target = UseRegisterOrConstantAtStart(instr->target());
   ZoneList<LOperand*> ops(instr->OperandCount(), zone());
   ops.Add(target, zone());
   for (int i = 1; i < instr->OperandCount(); i++) {
-    LOperand* op = UseFixed(instr->OperandAt(i),
-        descriptor->GetParameterRegister(i - 1));
+    LOperand* op =
+        UseFixed(instr->OperandAt(i), descriptor.GetParameterRegister(i - 1));
     ops.Add(op, zone());
   }
 
@@ -1119,7 +1120,7 @@
       }
       return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value)));
     } else {
-      ASSERT(to.IsInteger32());
+      DCHECK(to.IsInteger32());
       if (val->type().IsSmi() || val->representation().IsSmi()) {
         LOperand* value = UseRegisterAtStart(val);
         return DefineAsRegister(new(zone()) LSmiUntag(value, false));
@@ -1143,7 +1144,7 @@
       LNumberTagD* result = new(zone()) LNumberTagD(value, temp1, temp2);
       return AssignPointerMap(DefineAsRegister(result));
     } else {
-      ASSERT(to.IsSmi() || to.IsInteger32());
+      DCHECK(to.IsSmi() || to.IsInteger32());
       if (instr->CanTruncateToInt32()) {
         LOperand* value = UseRegister(val);
         return DefineAsRegister(new(zone()) LTruncateDoubleToIntOrSmi(value));
@@ -1175,7 +1176,7 @@
       }
       return result;
     } else {
-      ASSERT(to.IsDouble());
+      DCHECK(to.IsDouble());
       if (val->CheckFlag(HInstruction::kUint32)) {
         return DefineAsRegister(
             new(zone()) LUint32ToDouble(UseRegisterAtStart(val)));
@@ -1242,10 +1243,9 @@
   } else if (input_rep.IsInteger32()) {
     return DefineAsRegister(new(zone()) LClampIToUint8(reg));
   } else {
-    ASSERT(input_rep.IsSmiOrTagged());
+    DCHECK(input_rep.IsSmiOrTagged());
     return AssignEnvironment(
         DefineAsRegister(new(zone()) LClampTToUint8(reg,
-                                                    TempRegister(),
                                                     TempDoubleRegister())));
   }
 }
@@ -1253,7 +1253,7 @@
 
 LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
     HClassOfTestAndBranch* instr) {
-  ASSERT(instr->value()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
   LOperand* value = UseRegisterAtStart(instr->value());
   return new(zone()) LClassOfTestAndBranch(value,
                                            TempRegister(),
@@ -1265,32 +1265,30 @@
     HCompareNumericAndBranch* instr) {
   Representation r = instr->representation();
   if (r.IsSmiOrInteger32()) {
-    ASSERT(instr->left()->representation().Equals(r));
-    ASSERT(instr->right()->representation().Equals(r));
+    DCHECK(instr->left()->representation().Equals(r));
+    DCHECK(instr->right()->representation().Equals(r));
     LOperand* left = UseRegisterOrConstantAtStart(instr->left());
     LOperand* right = UseRegisterOrConstantAtStart(instr->right());
     return new(zone()) LCompareNumericAndBranch(left, right);
   } else {
-    ASSERT(r.IsDouble());
-    ASSERT(instr->left()->representation().IsDouble());
-    ASSERT(instr->right()->representation().IsDouble());
-    // TODO(all): In fact the only case that we can handle more efficiently is
-    // when one of the operand is the constant 0. Currently the MacroAssembler
-    // will be able to cope with any constant by loading it into an internal
-    // scratch register. This means that if the constant is used more that once,
-    // it will be loaded multiple times. Unfortunatly crankshaft already
-    // duplicates constant loads, but we should modify the code below once this
-    // issue has been addressed in crankshaft.
-    LOperand* left = UseRegisterOrConstantAtStart(instr->left());
-    LOperand* right = UseRegisterOrConstantAtStart(instr->right());
+    DCHECK(r.IsDouble());
+    DCHECK(instr->left()->representation().IsDouble());
+    DCHECK(instr->right()->representation().IsDouble());
+    if (instr->left()->IsConstant() && instr->right()->IsConstant()) {
+      LOperand* left = UseConstant(instr->left());
+      LOperand* right = UseConstant(instr->right());
+      return new(zone()) LCompareNumericAndBranch(left, right);
+    }
+    LOperand* left = UseRegisterAtStart(instr->left());
+    LOperand* right = UseRegisterAtStart(instr->right());
     return new(zone()) LCompareNumericAndBranch(left, right);
   }
 }
 
 
 LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
-  ASSERT(instr->left()->representation().IsTagged());
-  ASSERT(instr->right()->representation().IsTagged());
+  DCHECK(instr->left()->representation().IsTagged());
+  DCHECK(instr->right()->representation().IsTagged());
   LOperand* context = UseFixed(instr->context(), cp);
   LOperand* left = UseFixed(instr->left(), x1);
   LOperand* right = UseFixed(instr->right(), x0);
@@ -1320,7 +1318,7 @@
 
 
 LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
-  ASSERT(instr->value()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
   LOperand* value = UseRegisterAtStart(instr->value());
   LOperand* temp = TempRegister();
   return new(zone()) LCmpMapAndBranch(value, temp);
@@ -1381,9 +1379,9 @@
 
 
 LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) {
-  ASSERT(instr->representation().IsInteger32());
-  ASSERT(instr->left()->representation().Equals(instr->representation()));
-  ASSERT(instr->right()->representation().Equals(instr->representation()));
+  DCHECK(instr->representation().IsInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
   LOperand* dividend = UseRegister(instr->left());
   int32_t divisor = instr->right()->GetInteger32Constant();
   LInstruction* result = DefineAsRegister(new(zone()) LDivByPowerOf2I(
@@ -1399,9 +1397,9 @@
 
 
 LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
-  ASSERT(instr->representation().IsInteger32());
-  ASSERT(instr->left()->representation().Equals(instr->representation()));
-  ASSERT(instr->right()->representation().Equals(instr->representation()));
+  DCHECK(instr->representation().IsInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
   LOperand* dividend = UseRegister(instr->left());
   int32_t divisor = instr->right()->GetInteger32Constant();
   LOperand* temp = instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)
@@ -1418,9 +1416,9 @@
 
 
 LInstruction* LChunkBuilder::DoDivI(HBinaryOperation* instr) {
-  ASSERT(instr->representation().IsSmiOrInteger32());
-  ASSERT(instr->left()->representation().Equals(instr->representation()));
-  ASSERT(instr->right()->representation().Equals(instr->representation()));
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
   LOperand* dividend = UseRegister(instr->left());
   LOperand* divisor = UseRegister(instr->right());
   LOperand* temp = instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)
@@ -1470,6 +1468,7 @@
       instr->arguments_object()->IsLinked()) {
     inner->Bind(instr->arguments_var(), instr->arguments_object());
   }
+  inner->BindContext(instr->closure_context());
   inner->set_entry(instr);
   current_block_->UpdateEnvironment(inner);
   chunk_->AddInlinedClosure(instr->closure());
@@ -1501,7 +1500,7 @@
 
 LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
     HGetCachedArrayIndex* instr) {
-  ASSERT(instr->value()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
   LOperand* value = UseRegisterAtStart(instr->value());
   return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value));
 }
@@ -1514,7 +1513,7 @@
 
 LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
     HHasCachedArrayIndexAndBranch* instr) {
-  ASSERT(instr->value()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
   return new(zone()) LHasCachedArrayIndexAndBranch(
       UseRegisterAtStart(instr->value()), TempRegister());
 }
@@ -1522,7 +1521,7 @@
 
 LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
     HHasInstanceTypeAndBranch* instr) {
-  ASSERT(instr->value()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
   LOperand* value = UseRegisterAtStart(instr->value());
   return new(zone()) LHasInstanceTypeAndBranch(value, TempRegister());
 }
@@ -1556,6 +1555,19 @@
 }
 
 
+LInstruction* LChunkBuilder::DoTailCallThroughMegamorphicCache(
+    HTailCallThroughMegamorphicCache* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* receiver_register =
+      UseFixed(instr->receiver(), LoadDescriptor::ReceiverRegister());
+  LOperand* name_register =
+      UseFixed(instr->name(), LoadDescriptor::NameRegister());
+  // Not marked as call. It can't deoptimize, and it never returns.
+  return new (zone()) LTailCallThroughMegamorphicCache(
+      context, receiver_register, name_register);
+}
+
+
 LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
   LOperand* context = UseFixed(instr->context(), cp);
   // The function is required (by MacroAssembler::InvokeFunction) to be in x1.
@@ -1580,7 +1592,7 @@
 
 
 LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
-  ASSERT(instr->value()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
   LOperand* value = UseRegisterAtStart(instr->value());
   LOperand* temp1 = TempRegister();
   LOperand* temp2 = TempRegister();
@@ -1589,7 +1601,7 @@
 
 
 LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
-  ASSERT(instr->value()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
   LOperand* value = UseRegisterAtStart(instr->value());
   LOperand* temp = TempRegister();
   return new(zone()) LIsStringAndBranch(value, temp);
@@ -1597,14 +1609,14 @@
 
 
 LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
-  ASSERT(instr->value()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
   return new(zone()) LIsSmiAndBranch(UseRegisterAtStart(instr->value()));
 }
 
 
 LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
     HIsUndetectableAndBranch* instr) {
-  ASSERT(instr->value()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
   LOperand* value = UseRegisterAtStart(instr->value());
   return new(zone()) LIsUndetectableAndBranch(value, TempRegister());
 }
@@ -1617,7 +1629,7 @@
   if (env->entry()->arguments_pushed()) {
     int argument_count = env->arguments_environment()->parameter_count();
     pop = new(zone()) LDrop(argument_count);
-    ASSERT(instr->argument_delta() == -argument_count);
+    DCHECK(instr->argument_delta() == -argument_count);
   }
 
   HEnvironment* outer =
@@ -1658,15 +1670,21 @@
 
 LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
   LOperand* context = UseFixed(instr->context(), cp);
-  LOperand* global_object = UseFixed(instr->global_object(), x0);
+  LOperand* global_object =
+      UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
+  LOperand* vector = NULL;
+  if (FLAG_vector_ics) {
+    vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
+  }
+
   LLoadGlobalGeneric* result =
-      new(zone()) LLoadGlobalGeneric(context, global_object);
+      new(zone()) LLoadGlobalGeneric(context, global_object, vector);
   return MarkAsCall(DefineFixed(result, x0), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
-  ASSERT(instr->key()->representation().IsSmiOrInteger32());
+  DCHECK(instr->key()->representation().IsSmiOrInteger32());
   ElementsKind elements_kind = instr->elements_kind();
   LOperand* elements = UseRegister(instr->elements());
   LOperand* key = UseRegisterOrConstant(instr->key());
@@ -1684,7 +1702,7 @@
           ? AssignEnvironment(DefineAsRegister(result))
           : DefineAsRegister(result);
     } else {
-      ASSERT(instr->representation().IsSmiOrTagged() ||
+      DCHECK(instr->representation().IsSmiOrTagged() ||
              instr->representation().IsInteger32());
       LOperand* temp = instr->key()->IsConstant() ? NULL : TempRegister();
       LLoadKeyedFixed* result =
@@ -1694,7 +1712,7 @@
           : DefineAsRegister(result);
     }
   } else {
-    ASSERT((instr->representation().IsInteger32() &&
+    DCHECK((instr->representation().IsInteger32() &&
             !IsDoubleOrFloatElementsKind(instr->elements_kind())) ||
            (instr->representation().IsDouble() &&
             IsDoubleOrFloatElementsKind(instr->elements_kind())));
@@ -1714,11 +1732,17 @@
 
 LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
   LOperand* context = UseFixed(instr->context(), cp);
-  LOperand* object = UseFixed(instr->object(), x1);
-  LOperand* key = UseFixed(instr->key(), x0);
+  LOperand* object =
+      UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
+  LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
+  LOperand* vector = NULL;
+  if (FLAG_vector_ics) {
+    vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
+  }
 
   LInstruction* result =
-      DefineFixed(new(zone()) LLoadKeyedGeneric(context, object, key), x0);
+      DefineFixed(new(zone()) LLoadKeyedGeneric(context, object, key, vector),
+                  x0);
   return MarkAsCall(result, instr);
 }
 
@@ -1731,9 +1755,15 @@
 
 LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
   LOperand* context = UseFixed(instr->context(), cp);
-  LOperand* object = UseFixed(instr->object(), x0);
+  LOperand* object =
+      UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
+  LOperand* vector = NULL;
+  if (FLAG_vector_ics) {
+    vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
+  }
+
   LInstruction* result =
-      DefineFixed(new(zone()) LLoadNamedGeneric(context, object), x0);
+      DefineFixed(new(zone()) LLoadNamedGeneric(context, object, vector), x0);
   return MarkAsCall(result, instr);
 }
 
@@ -1750,9 +1780,9 @@
 
 
 LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) {
-  ASSERT(instr->representation().IsInteger32());
-  ASSERT(instr->left()->representation().Equals(instr->representation()));
-  ASSERT(instr->right()->representation().Equals(instr->representation()));
+  DCHECK(instr->representation().IsInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
   LOperand* dividend = UseRegisterAtStart(instr->left());
   int32_t divisor = instr->right()->GetInteger32Constant();
   LInstruction* result = DefineAsRegister(new(zone()) LFlooringDivByPowerOf2I(
@@ -1766,9 +1796,9 @@
 
 
 LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
-  ASSERT(instr->representation().IsInteger32());
-  ASSERT(instr->left()->representation().Equals(instr->representation()));
-  ASSERT(instr->right()->representation().Equals(instr->representation()));
+  DCHECK(instr->representation().IsInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
   LOperand* dividend = UseRegister(instr->left());
   int32_t divisor = instr->right()->GetInteger32Constant();
   LOperand* temp =
@@ -1810,14 +1840,14 @@
   LOperand* left = NULL;
   LOperand* right = NULL;
   if (instr->representation().IsSmiOrInteger32()) {
-    ASSERT(instr->left()->representation().Equals(instr->representation()));
-    ASSERT(instr->right()->representation().Equals(instr->representation()));
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
     left = UseRegisterAtStart(instr->BetterLeftOperand());
     right = UseRegisterOrConstantAtStart(instr->BetterRightOperand());
   } else {
-    ASSERT(instr->representation().IsDouble());
-    ASSERT(instr->left()->representation().IsDouble());
-    ASSERT(instr->right()->representation().IsDouble());
+    DCHECK(instr->representation().IsDouble());
+    DCHECK(instr->left()->representation().IsDouble());
+    DCHECK(instr->right()->representation().IsDouble());
     left = UseRegisterAtStart(instr->left());
     right = UseRegisterAtStart(instr->right());
   }
@@ -1826,14 +1856,15 @@
 
 
 LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) {
-  ASSERT(instr->representation().IsInteger32());
-  ASSERT(instr->left()->representation().Equals(instr->representation()));
-  ASSERT(instr->right()->representation().Equals(instr->representation()));
+  DCHECK(instr->representation().IsInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
   LOperand* dividend = UseRegisterAtStart(instr->left());
   int32_t divisor = instr->right()->GetInteger32Constant();
   LInstruction* result = DefineSameAsFirst(new(zone()) LModByPowerOf2I(
           dividend, divisor));
-  if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+  if (instr->CheckFlag(HValue::kLeftCanBeNegative) &&
+      instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
     result = AssignEnvironment(result);
   }
   return result;
@@ -1841,9 +1872,9 @@
 
 
 LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) {
-  ASSERT(instr->representation().IsInteger32());
-  ASSERT(instr->left()->representation().Equals(instr->representation()));
-  ASSERT(instr->right()->representation().Equals(instr->representation()));
+  DCHECK(instr->representation().IsInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
   LOperand* dividend = UseRegister(instr->left());
   int32_t divisor = instr->right()->GetInteger32Constant();
   LOperand* temp = TempRegister();
@@ -1857,9 +1888,9 @@
 
 
 LInstruction* LChunkBuilder::DoModI(HMod* instr) {
-  ASSERT(instr->representation().IsSmiOrInteger32());
-  ASSERT(instr->left()->representation().Equals(instr->representation()));
-  ASSERT(instr->right()->representation().Equals(instr->representation()));
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
   LOperand* dividend = UseRegister(instr->left());
   LOperand* divisor = UseRegister(instr->right());
   LInstruction* result = DefineAsRegister(new(zone()) LModI(dividend, divisor));
@@ -1890,8 +1921,8 @@
 
 LInstruction* LChunkBuilder::DoMul(HMul* instr) {
   if (instr->representation().IsSmiOrInteger32()) {
-    ASSERT(instr->left()->representation().Equals(instr->representation()));
-    ASSERT(instr->right()->representation().Equals(instr->representation()));
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
 
     bool can_overflow = instr->CheckFlag(HValue::kCanOverflow);
     bool bailout_on_minus_zero = instr->CheckFlag(HValue::kBailoutOnMinusZero);
@@ -1912,12 +1943,12 @@
       int32_t constant_abs = Abs(constant);
 
       if (!end_range_constant &&
-          (small_constant ||
-           (IsPowerOf2(constant_abs)) ||
-           (!can_overflow && (IsPowerOf2(constant_abs + 1) ||
-                              IsPowerOf2(constant_abs - 1))))) {
+          (small_constant || (base::bits::IsPowerOfTwo32(constant_abs)) ||
+           (!can_overflow && (base::bits::IsPowerOfTwo32(constant_abs + 1) ||
+                              base::bits::IsPowerOfTwo32(constant_abs - 1))))) {
         LConstantOperand* right = UseConstant(most_const);
-        bool need_register = IsPowerOf2(constant_abs) && !small_constant;
+        bool need_register =
+            base::bits::IsPowerOfTwo32(constant_abs) && !small_constant;
         LOperand* left = need_register ? UseRegister(least_const)
                                        : UseRegisterAtStart(least_const);
         LInstruction* result =
@@ -1949,7 +1980,7 @@
 
 
 LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
-  ASSERT(argument_count_ == 0);
+  DCHECK(argument_count_ == 0);
   allocator_->MarkAsOsrEntry();
   current_block_->last_environment()->set_ast_id(instr->ast_id());
   return AssignEnvironment(new(zone()) LOsrEntry);
@@ -1962,28 +1993,31 @@
     int spill_index = chunk_->GetParameterStackSlot(instr->index());
     return DefineAsSpilled(result, spill_index);
   } else {
-    ASSERT(info()->IsStub());
-    CodeStubInterfaceDescriptor* descriptor =
-        info()->code_stub()->GetInterfaceDescriptor();
+    DCHECK(info()->IsStub());
+    CallInterfaceDescriptor descriptor =
+        info()->code_stub()->GetCallInterfaceDescriptor();
     int index = static_cast<int>(instr->index());
-    Register reg = descriptor->GetParameterRegister(index);
+    Register reg = descriptor.GetEnvironmentParameterRegister(index);
     return DefineFixed(result, reg);
   }
 }
 
 
 LInstruction* LChunkBuilder::DoPower(HPower* instr) {
-  ASSERT(instr->representation().IsDouble());
+  DCHECK(instr->representation().IsDouble());
   // We call a C function for double power. It can't trigger a GC.
   // We need to use fixed result register for the call.
   Representation exponent_type = instr->right()->representation();
-  ASSERT(instr->left()->representation().IsDouble());
+  DCHECK(instr->left()->representation().IsDouble());
   LOperand* left = UseFixedDouble(instr->left(), d0);
-  LOperand* right = exponent_type.IsInteger32()
-                        ? UseFixed(instr->right(), x12)
-                        : exponent_type.IsDouble()
-                            ? UseFixedDouble(instr->right(), d1)
-                            : UseFixed(instr->right(), x11);
+  LOperand* right;
+  if (exponent_type.IsInteger32()) {
+    right = UseFixed(instr->right(), MathPowIntegerDescriptor::exponent());
+  } else if (exponent_type.IsDouble()) {
+    right = UseFixedDouble(instr->right(), d1);
+  } else {
+    right = UseFixed(instr->right(), MathPowTaggedDescriptor::exponent());
+  }
   LPower* result = new(zone()) LPower(left, right);
   return MarkAsCall(DefineFixedDouble(result, d0),
                     instr,
@@ -2018,7 +2052,7 @@
 
 LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) {
   HValue* value = instr->value();
-  ASSERT(value->representation().IsDouble());
+  DCHECK(value->representation().IsDouble());
   return DefineAsRegister(new(zone()) LDoubleBits(UseRegister(value)));
 }
 
@@ -2072,8 +2106,8 @@
   HBinaryOperation* hinstr = HBinaryOperation::cast(val);
   HValue* hleft = hinstr->left();
   HValue* hright = hinstr->right();
-  ASSERT(hleft->representation().Equals(hinstr->representation()));
-  ASSERT(hright->representation().Equals(hinstr->representation()));
+  DCHECK(hleft->representation().Equals(hinstr->representation()));
+  DCHECK(hright->representation().Equals(hinstr->representation()));
 
   if ((hright->IsConstant() &&
        LikelyFitsImmField(hinstr, HConstant::cast(hright)->Integer32Value())) ||
@@ -2145,8 +2179,8 @@
 
 LInstruction* LChunkBuilder::DoShiftedBinaryOp(
     HBinaryOperation* hinstr, HValue* hleft, HBitwiseBinaryOperation* hshift) {
-  ASSERT(hshift->IsBitwiseBinaryShift());
-  ASSERT(!hshift->IsShr() || (JSShiftAmountFromHConstant(hshift->right()) > 0));
+  DCHECK(hshift->IsBitwiseBinaryShift());
+  DCHECK(!hshift->IsShr() || (JSShiftAmountFromHConstant(hshift->right()) > 0));
 
   LTemplateResultInstruction<1>* res;
   LOperand* left = UseRegisterAtStart(hleft);
@@ -2165,7 +2199,7 @@
   } else if (hinstr->IsAdd()) {
     res = new(zone()) LAddI(left, right, shift_op, shift_amount);
   } else {
-    ASSERT(hinstr->IsSub());
+    DCHECK(hinstr->IsSub());
     res = new(zone()) LSubI(left, right, shift_op, shift_amount);
   }
   if (hinstr->CheckFlag(HValue::kCanOverflow)) {
@@ -2181,10 +2215,9 @@
     return DoArithmeticT(op, instr);
   }
 
-  ASSERT(instr->representation().IsInteger32() ||
-         instr->representation().IsSmi());
-  ASSERT(instr->left()->representation().Equals(instr->representation()));
-  ASSERT(instr->right()->representation().Equals(instr->representation()));
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
 
   if (ShiftCanBeOptimizedAway(instr)) {
     return NULL;
@@ -2193,42 +2226,30 @@
   LOperand* left = instr->representation().IsSmi()
       ? UseRegister(instr->left())
       : UseRegisterAtStart(instr->left());
+  LOperand* right = UseRegisterOrConstantAtStart(instr->right());
 
-  HValue* right_value = instr->right();
-  LOperand* right = NULL;
-  LOperand* temp = NULL;
-  int constant_value = 0;
-  if (right_value->IsConstant()) {
-    right = UseConstant(right_value);
-    constant_value = JSShiftAmountFromHConstant(right_value);
-  } else {
-    right = UseRegisterAtStart(right_value);
-    if (op == Token::ROR) {
-      temp = TempRegister();
-    }
-  }
-
-  // Shift operations can only deoptimize if we do a logical shift by 0 and the
-  // result cannot be truncated to int32.
-  bool does_deopt = false;
-  if ((op == Token::SHR) && (constant_value == 0)) {
+  // The only shift that can deoptimize is `left >>> 0`, where left is negative.
+  // In these cases, the result is a uint32 that is too large for an int32.
+  bool right_can_be_zero = !instr->right()->IsConstant() ||
+                           (JSShiftAmountFromHConstant(instr->right()) == 0);
+  bool can_deopt = false;
+  if ((op == Token::SHR) && right_can_be_zero) {
     if (FLAG_opt_safe_uint32_operations) {
-      does_deopt = !instr->CheckFlag(HInstruction::kUint32);
+      can_deopt = !instr->CheckFlag(HInstruction::kUint32);
     } else {
-      does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
+      can_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
     }
   }
 
   LInstruction* result;
   if (instr->representation().IsInteger32()) {
-    result = DefineAsRegister(new(zone()) LShiftI(op, left, right, does_deopt));
+    result = DefineAsRegister(new (zone()) LShiftI(op, left, right, can_deopt));
   } else {
-    ASSERT(instr->representation().IsSmi());
-    result = DefineAsRegister(
-        new(zone()) LShiftS(op, left, right, temp, does_deopt));
+    DCHECK(instr->representation().IsSmi());
+    result = DefineAsRegister(new (zone()) LShiftS(op, left, right, can_deopt));
   }
 
-  return does_deopt ? AssignEnvironment(result) : result;
+  return can_deopt ? AssignEnvironment(result) : result;
 }
 
 
@@ -2263,7 +2284,7 @@
     LOperand* context = UseFixed(instr->context(), cp);
     return MarkAsCall(new(zone()) LStackCheck(context), instr);
   } else {
-    ASSERT(instr->is_backwards_branch());
+    DCHECK(instr->is_backwards_branch());
     LOperand* context = UseAny(instr->context());
     return AssignEnvironment(
         AssignPointerMap(new(zone()) LStackCheck(context)));
@@ -2332,23 +2353,23 @@
   }
 
   if (instr->is_typed_elements()) {
-    ASSERT((instr->value()->representation().IsInteger32() &&
+    DCHECK((instr->value()->representation().IsInteger32() &&
             !IsDoubleOrFloatElementsKind(instr->elements_kind())) ||
            (instr->value()->representation().IsDouble() &&
             IsDoubleOrFloatElementsKind(instr->elements_kind())));
-    ASSERT((instr->is_fixed_typed_array() &&
+    DCHECK((instr->is_fixed_typed_array() &&
             instr->elements()->representation().IsTagged()) ||
            (instr->is_external() &&
             instr->elements()->representation().IsExternal()));
     return new(zone()) LStoreKeyedExternal(elements, key, val, temp);
 
   } else if (instr->value()->representation().IsDouble()) {
-    ASSERT(instr->elements()->representation().IsTagged());
+    DCHECK(instr->elements()->representation().IsTagged());
     return new(zone()) LStoreKeyedFixedDouble(elements, key, val, temp);
 
   } else {
-    ASSERT(instr->elements()->representation().IsTagged());
-    ASSERT(instr->value()->representation().IsSmiOrTagged() ||
+    DCHECK(instr->elements()->representation().IsTagged());
+    DCHECK(instr->value()->representation().IsSmiOrTagged() ||
            instr->value()->representation().IsInteger32());
     return new(zone()) LStoreKeyedFixed(elements, key, val, temp);
   }
@@ -2357,13 +2378,14 @@
 
 LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
   LOperand* context = UseFixed(instr->context(), cp);
-  LOperand* object = UseFixed(instr->object(), x2);
-  LOperand* key = UseFixed(instr->key(), x1);
-  LOperand* value = UseFixed(instr->value(), x0);
+  LOperand* object =
+      UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
+  LOperand* key = UseFixed(instr->key(), StoreDescriptor::NameRegister());
+  LOperand* value = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
 
-  ASSERT(instr->object()->representation().IsTagged());
-  ASSERT(instr->key()->representation().IsTagged());
-  ASSERT(instr->value()->representation().IsTagged());
+  DCHECK(instr->object()->representation().IsTagged());
+  DCHECK(instr->key()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
 
   return MarkAsCall(
       new(zone()) LStoreKeyedGeneric(context, object, key, value), instr);
@@ -2401,8 +2423,10 @@
 
 LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
   LOperand* context = UseFixed(instr->context(), cp);
-  LOperand* object = UseFixed(instr->object(), x1);
-  LOperand* value = UseFixed(instr->value(), x0);
+  LOperand* object =
+      UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
+  LOperand* value = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
+
   LInstruction* result = new(zone()) LStoreNamedGeneric(context, object, value);
   return MarkAsCall(result, instr);
 }
@@ -2439,8 +2463,8 @@
 
 LInstruction* LChunkBuilder::DoStringCompareAndBranch(
     HStringCompareAndBranch* instr) {
-  ASSERT(instr->left()->representation().IsTagged());
-  ASSERT(instr->right()->representation().IsTagged());
+  DCHECK(instr->left()->representation().IsTagged());
+  DCHECK(instr->right()->representation().IsTagged());
   LOperand* context = UseFixed(instr->context(), cp);
   LOperand* left = UseFixed(instr->left(), x1);
   LOperand* right = UseFixed(instr->right(), x0);
@@ -2452,8 +2476,8 @@
 
 LInstruction* LChunkBuilder::DoSub(HSub* instr) {
   if (instr->representation().IsSmiOrInteger32()) {
-    ASSERT(instr->left()->representation().Equals(instr->representation()));
-    ASSERT(instr->right()->representation().Equals(instr->representation()));
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
 
     LInstruction* shifted_operation = TryDoOpWithShiftedRightOperand(instr);
     if (shifted_operation != NULL) {
@@ -2574,8 +2598,8 @@
       }
     }
     case kMathExp: {
-      ASSERT(instr->representation().IsDouble());
-      ASSERT(instr->value()->representation().IsDouble());
+      DCHECK(instr->representation().IsDouble());
+      DCHECK(instr->value()->representation().IsDouble());
       LOperand* input = UseRegister(instr->value());
       LOperand* double_temp1 = TempDoubleRegister();
       LOperand* temp1 = TempRegister();
@@ -2586,52 +2610,58 @@
       return DefineAsRegister(result);
     }
     case kMathFloor: {
-      ASSERT(instr->value()->representation().IsDouble());
+      DCHECK(instr->value()->representation().IsDouble());
       LOperand* input = UseRegisterAtStart(instr->value());
       if (instr->representation().IsInteger32()) {
         LMathFloorI* result = new(zone()) LMathFloorI(input);
         return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
       } else {
-        ASSERT(instr->representation().IsDouble());
+        DCHECK(instr->representation().IsDouble());
         LMathFloorD* result = new(zone()) LMathFloorD(input);
         return DefineAsRegister(result);
       }
     }
     case kMathLog: {
-      ASSERT(instr->representation().IsDouble());
-      ASSERT(instr->value()->representation().IsDouble());
+      DCHECK(instr->representation().IsDouble());
+      DCHECK(instr->value()->representation().IsDouble());
       LOperand* input = UseFixedDouble(instr->value(), d0);
       LMathLog* result = new(zone()) LMathLog(input);
       return MarkAsCall(DefineFixedDouble(result, d0), instr);
     }
     case kMathPowHalf: {
-      ASSERT(instr->representation().IsDouble());
-      ASSERT(instr->value()->representation().IsDouble());
+      DCHECK(instr->representation().IsDouble());
+      DCHECK(instr->value()->representation().IsDouble());
       LOperand* input = UseRegister(instr->value());
       return DefineAsRegister(new(zone()) LMathPowHalf(input));
     }
     case kMathRound: {
-      ASSERT(instr->value()->representation().IsDouble());
+      DCHECK(instr->value()->representation().IsDouble());
       LOperand* input = UseRegister(instr->value());
       if (instr->representation().IsInteger32()) {
         LOperand* temp = TempDoubleRegister();
         LMathRoundI* result = new(zone()) LMathRoundI(input, temp);
         return AssignEnvironment(DefineAsRegister(result));
       } else {
-        ASSERT(instr->representation().IsDouble());
+        DCHECK(instr->representation().IsDouble());
         LMathRoundD* result = new(zone()) LMathRoundD(input);
         return DefineAsRegister(result);
       }
     }
+    case kMathFround: {
+      DCHECK(instr->value()->representation().IsDouble());
+      LOperand* input = UseRegister(instr->value());
+      LMathFround* result = new (zone()) LMathFround(input);
+      return DefineAsRegister(result);
+    }
     case kMathSqrt: {
-      ASSERT(instr->representation().IsDouble());
-      ASSERT(instr->value()->representation().IsDouble());
+      DCHECK(instr->representation().IsDouble());
+      DCHECK(instr->value()->representation().IsDouble());
       LOperand* input = UseRegisterAtStart(instr->value());
       return DefineAsRegister(new(zone()) LMathSqrt(input));
     }
     case kMathClz32: {
-      ASSERT(instr->representation().IsInteger32());
-      ASSERT(instr->value()->representation().IsInteger32());
+      DCHECK(instr->representation().IsInteger32());
+      DCHECK(instr->value()->representation().IsInteger32());
       LOperand* input = UseRegisterAtStart(instr->value());
       return DefineAsRegister(new(zone()) LMathClz32(input));
     }
@@ -2652,7 +2682,7 @@
   } else {
     spill_index = env_index - instr->environment()->first_local_index();
     if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
-      Abort(kTooManySpillSlotsNeededForOSR);
+      Retry(kTooManySpillSlotsNeededForOSR);
       spill_index = 0;
     }
   }
diff --git a/src/arm64/lithium-arm64.h b/src/arm64/lithium-arm64.h
index 18dd927..6ead3fe 100644
--- a/src/arm64/lithium-arm64.h
+++ b/src/arm64/lithium-arm64.h
@@ -6,8 +6,8 @@
 #define V8_ARM64_LITHIUM_ARM64_H_
 
 #include "src/hydrogen.h"
-#include "src/lithium-allocator.h"
 #include "src/lithium.h"
+#include "src/lithium-allocator.h"
 #include "src/safepoint-table.h"
 #include "src/utils.h"
 
@@ -17,175 +17,177 @@
 // Forward declarations.
 class LCodeGen;
 
-#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V)    \
-  V(AccessArgumentsAt)                          \
-  V(AddE)                                       \
-  V(AddI)                                       \
-  V(AddS)                                       \
-  V(Allocate)                                   \
-  V(AllocateBlockContext)                       \
-  V(ApplyArguments)                             \
-  V(ArgumentsElements)                          \
-  V(ArgumentsLength)                            \
-  V(ArithmeticD)                                \
-  V(ArithmeticT)                                \
-  V(BitI)                                       \
-  V(BitS)                                       \
-  V(BoundsCheck)                                \
-  V(Branch)                                     \
-  V(CallFunction)                               \
-  V(CallJSFunction)                             \
-  V(CallNew)                                    \
-  V(CallNewArray)                               \
-  V(CallRuntime)                                \
-  V(CallStub)                                   \
-  V(CallWithDescriptor)                         \
-  V(CheckInstanceType)                          \
-  V(CheckMapValue)                              \
-  V(CheckMaps)                                  \
-  V(CheckNonSmi)                                \
-  V(CheckSmi)                                   \
-  V(CheckValue)                                 \
-  V(ClampDToUint8)                              \
-  V(ClampIToUint8)                              \
-  V(ClampTToUint8)                              \
-  V(ClassOfTestAndBranch)                       \
-  V(CmpHoleAndBranchD)                          \
-  V(CmpHoleAndBranchT)                          \
-  V(CmpMapAndBranch)                            \
-  V(CmpObjectEqAndBranch)                       \
-  V(CmpT)                                       \
-  V(CompareMinusZeroAndBranch)                  \
-  V(CompareNumericAndBranch)                    \
-  V(ConstantD)                                  \
-  V(ConstantE)                                  \
-  V(ConstantI)                                  \
-  V(ConstantS)                                  \
-  V(ConstantT)                                  \
-  V(ConstructDouble)                            \
-  V(Context)                                    \
-  V(DateField)                                  \
-  V(DebugBreak)                                 \
-  V(DeclareGlobals)                             \
-  V(Deoptimize)                                 \
-  V(DivByConstI)                                \
-  V(DivByPowerOf2I)                             \
-  V(DivI)                                       \
-  V(DoubleBits)                                 \
-  V(DoubleToIntOrSmi)                           \
-  V(Drop)                                       \
-  V(Dummy)                                      \
-  V(DummyUse)                                   \
-  V(FlooringDivByConstI)                        \
-  V(FlooringDivByPowerOf2I)                     \
-  V(FlooringDivI)                               \
-  V(ForInCacheArray)                            \
-  V(ForInPrepareMap)                            \
-  V(FunctionLiteral)                            \
-  V(GetCachedArrayIndex)                        \
-  V(Goto)                                       \
-  V(HasCachedArrayIndexAndBranch)               \
-  V(HasInstanceTypeAndBranch)                   \
-  V(InnerAllocatedObject)                       \
-  V(InstanceOf)                                 \
-  V(InstanceOfKnownGlobal)                      \
-  V(InstructionGap)                             \
-  V(Integer32ToDouble)                          \
-  V(InvokeFunction)                             \
-  V(IsConstructCallAndBranch)                   \
-  V(IsObjectAndBranch)                          \
-  V(IsSmiAndBranch)                             \
-  V(IsStringAndBranch)                          \
-  V(IsUndetectableAndBranch)                    \
-  V(Label)                                      \
-  V(LazyBailout)                                \
-  V(LoadContextSlot)                            \
-  V(LoadFieldByIndex)                           \
-  V(LoadFunctionPrototype)                      \
-  V(LoadGlobalCell)                             \
-  V(LoadGlobalGeneric)                          \
-  V(LoadKeyedExternal)                          \
-  V(LoadKeyedFixed)                             \
-  V(LoadKeyedFixedDouble)                       \
-  V(LoadKeyedGeneric)                           \
-  V(LoadNamedField)                             \
-  V(LoadNamedGeneric)                           \
-  V(LoadRoot)                                   \
-  V(MapEnumLength)                              \
-  V(MathAbs)                                    \
-  V(MathAbsTagged)                              \
-  V(MathClz32)                                  \
-  V(MathExp)                                    \
-  V(MathFloorD)                                 \
-  V(MathFloorI)                                 \
-  V(MathLog)                                    \
-  V(MathMinMax)                                 \
-  V(MathPowHalf)                                \
-  V(MathRoundD)                                 \
-  V(MathRoundI)                                 \
-  V(MathSqrt)                                   \
-  V(ModByConstI)                                \
-  V(ModByPowerOf2I)                             \
-  V(ModI)                                       \
-  V(MulConstIS)                                 \
-  V(MulI)                                       \
-  V(MulS)                                       \
-  V(NumberTagD)                                 \
-  V(NumberTagU)                                 \
-  V(NumberUntagD)                               \
-  V(OsrEntry)                                   \
-  V(Parameter)                                  \
-  V(Power)                                      \
-  V(PreparePushArguments)                       \
-  V(PushArguments)                              \
-  V(RegExpLiteral)                              \
-  V(Return)                                     \
-  V(SeqStringGetChar)                           \
-  V(SeqStringSetChar)                           \
-  V(ShiftI)                                     \
-  V(ShiftS)                                     \
-  V(SmiTag)                                     \
-  V(SmiUntag)                                   \
-  V(StackCheck)                                 \
-  V(StoreCodeEntry)                             \
-  V(StoreContextSlot)                           \
-  V(StoreFrameContext)                          \
-  V(StoreGlobalCell)                            \
-  V(StoreKeyedExternal)                         \
-  V(StoreKeyedFixed)                            \
-  V(StoreKeyedFixedDouble)                      \
-  V(StoreKeyedGeneric)                          \
-  V(StoreNamedField)                            \
-  V(StoreNamedGeneric)                          \
-  V(StringAdd)                                  \
-  V(StringCharCodeAt)                           \
-  V(StringCharFromCode)                         \
-  V(StringCompareAndBranch)                     \
-  V(SubI)                                       \
-  V(SubS)                                       \
-  V(TaggedToI)                                  \
-  V(ThisFunction)                               \
-  V(ToFastProperties)                           \
-  V(TransitionElementsKind)                     \
-  V(TrapAllocationMemento)                      \
-  V(TruncateDoubleToIntOrSmi)                   \
-  V(Typeof)                                     \
-  V(TypeofIsAndBranch)                          \
-  V(Uint32ToDouble)                             \
-  V(UnknownOSRValue)                            \
+#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
+  V(AccessArgumentsAt)                       \
+  V(AddE)                                    \
+  V(AddI)                                    \
+  V(AddS)                                    \
+  V(Allocate)                                \
+  V(AllocateBlockContext)                    \
+  V(ApplyArguments)                          \
+  V(ArgumentsElements)                       \
+  V(ArgumentsLength)                         \
+  V(ArithmeticD)                             \
+  V(ArithmeticT)                             \
+  V(BitI)                                    \
+  V(BitS)                                    \
+  V(BoundsCheck)                             \
+  V(Branch)                                  \
+  V(CallFunction)                            \
+  V(CallJSFunction)                          \
+  V(CallNew)                                 \
+  V(CallNewArray)                            \
+  V(CallRuntime)                             \
+  V(CallStub)                                \
+  V(CallWithDescriptor)                      \
+  V(CheckInstanceType)                       \
+  V(CheckMapValue)                           \
+  V(CheckMaps)                               \
+  V(CheckNonSmi)                             \
+  V(CheckSmi)                                \
+  V(CheckValue)                              \
+  V(ClampDToUint8)                           \
+  V(ClampIToUint8)                           \
+  V(ClampTToUint8)                           \
+  V(ClassOfTestAndBranch)                    \
+  V(CmpHoleAndBranchD)                       \
+  V(CmpHoleAndBranchT)                       \
+  V(CmpMapAndBranch)                         \
+  V(CmpObjectEqAndBranch)                    \
+  V(CmpT)                                    \
+  V(CompareMinusZeroAndBranch)               \
+  V(CompareNumericAndBranch)                 \
+  V(ConstantD)                               \
+  V(ConstantE)                               \
+  V(ConstantI)                               \
+  V(ConstantS)                               \
+  V(ConstantT)                               \
+  V(ConstructDouble)                         \
+  V(Context)                                 \
+  V(DateField)                               \
+  V(DebugBreak)                              \
+  V(DeclareGlobals)                          \
+  V(Deoptimize)                              \
+  V(DivByConstI)                             \
+  V(DivByPowerOf2I)                          \
+  V(DivI)                                    \
+  V(DoubleBits)                              \
+  V(DoubleToIntOrSmi)                        \
+  V(Drop)                                    \
+  V(Dummy)                                   \
+  V(DummyUse)                                \
+  V(FlooringDivByConstI)                     \
+  V(FlooringDivByPowerOf2I)                  \
+  V(FlooringDivI)                            \
+  V(ForInCacheArray)                         \
+  V(ForInPrepareMap)                         \
+  V(FunctionLiteral)                         \
+  V(GetCachedArrayIndex)                     \
+  V(Goto)                                    \
+  V(HasCachedArrayIndexAndBranch)            \
+  V(HasInstanceTypeAndBranch)                \
+  V(InnerAllocatedObject)                    \
+  V(InstanceOf)                              \
+  V(InstanceOfKnownGlobal)                   \
+  V(InstructionGap)                          \
+  V(Integer32ToDouble)                       \
+  V(InvokeFunction)                          \
+  V(IsConstructCallAndBranch)                \
+  V(IsObjectAndBranch)                       \
+  V(IsSmiAndBranch)                          \
+  V(IsStringAndBranch)                       \
+  V(IsUndetectableAndBranch)                 \
+  V(Label)                                   \
+  V(LazyBailout)                             \
+  V(LoadContextSlot)                         \
+  V(LoadFieldByIndex)                        \
+  V(LoadFunctionPrototype)                   \
+  V(LoadGlobalCell)                          \
+  V(LoadGlobalGeneric)                       \
+  V(LoadKeyedExternal)                       \
+  V(LoadKeyedFixed)                          \
+  V(LoadKeyedFixedDouble)                    \
+  V(LoadKeyedGeneric)                        \
+  V(LoadNamedField)                          \
+  V(LoadNamedGeneric)                        \
+  V(LoadRoot)                                \
+  V(MapEnumLength)                           \
+  V(MathAbs)                                 \
+  V(MathAbsTagged)                           \
+  V(MathClz32)                               \
+  V(MathExp)                                 \
+  V(MathFloorD)                              \
+  V(MathFloorI)                              \
+  V(MathFround)                              \
+  V(MathLog)                                 \
+  V(MathMinMax)                              \
+  V(MathPowHalf)                             \
+  V(MathRoundD)                              \
+  V(MathRoundI)                              \
+  V(MathSqrt)                                \
+  V(ModByConstI)                             \
+  V(ModByPowerOf2I)                          \
+  V(ModI)                                    \
+  V(MulConstIS)                              \
+  V(MulI)                                    \
+  V(MulS)                                    \
+  V(NumberTagD)                              \
+  V(NumberTagU)                              \
+  V(NumberUntagD)                            \
+  V(OsrEntry)                                \
+  V(Parameter)                               \
+  V(Power)                                   \
+  V(PreparePushArguments)                    \
+  V(PushArguments)                           \
+  V(RegExpLiteral)                           \
+  V(Return)                                  \
+  V(SeqStringGetChar)                        \
+  V(SeqStringSetChar)                        \
+  V(ShiftI)                                  \
+  V(ShiftS)                                  \
+  V(SmiTag)                                  \
+  V(SmiUntag)                                \
+  V(StackCheck)                              \
+  V(StoreCodeEntry)                          \
+  V(StoreContextSlot)                        \
+  V(StoreFrameContext)                       \
+  V(StoreGlobalCell)                         \
+  V(StoreKeyedExternal)                      \
+  V(StoreKeyedFixed)                         \
+  V(StoreKeyedFixedDouble)                   \
+  V(StoreKeyedGeneric)                       \
+  V(StoreNamedField)                         \
+  V(StoreNamedGeneric)                       \
+  V(StringAdd)                               \
+  V(StringCharCodeAt)                        \
+  V(StringCharFromCode)                      \
+  V(StringCompareAndBranch)                  \
+  V(SubI)                                    \
+  V(SubS)                                    \
+  V(TaggedToI)                               \
+  V(TailCallThroughMegamorphicCache)         \
+  V(ThisFunction)                            \
+  V(ToFastProperties)                        \
+  V(TransitionElementsKind)                  \
+  V(TrapAllocationMemento)                   \
+  V(TruncateDoubleToIntOrSmi)                \
+  V(Typeof)                                  \
+  V(TypeofIsAndBranch)                       \
+  V(Uint32ToDouble)                          \
+  V(UnknownOSRValue)                         \
   V(WrapReceiver)
 
 
 #define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)                        \
-  virtual Opcode opcode() const V8_FINAL V8_OVERRIDE {                      \
+  virtual Opcode opcode() const FINAL OVERRIDE {                      \
     return LInstruction::k##type;                                           \
   }                                                                         \
-  virtual void CompileToNative(LCodeGen* generator) V8_FINAL V8_OVERRIDE;   \
-  virtual const char* Mnemonic() const V8_FINAL V8_OVERRIDE {               \
+  virtual void CompileToNative(LCodeGen* generator) FINAL OVERRIDE;   \
+  virtual const char* Mnemonic() const FINAL OVERRIDE {               \
     return mnemonic;                                                        \
   }                                                                         \
   static L##type* cast(LInstruction* instr) {                               \
-    ASSERT(instr->Is##type());                                              \
+    DCHECK(instr->Is##type());                                              \
     return reinterpret_cast<L##type*>(instr);                               \
   }
 
@@ -233,6 +235,9 @@
 
   virtual bool IsControl() const { return false; }
 
+  // Try deleting this instruction if possible.
+  virtual bool TryDelete() { return false; }
+
   void set_environment(LEnvironment* env) { environment_ = env; }
   LEnvironment* environment() const { return environment_; }
   bool HasEnvironment() const { return environment_ != NULL; }
@@ -290,7 +295,7 @@
  public:
   // Allow 0 or 1 output operands.
   STATIC_ASSERT(R == 0 || R == 1);
-  virtual bool HasResult() const V8_FINAL V8_OVERRIDE {
+  virtual bool HasResult() const FINAL OVERRIDE {
     return (R != 0) && (result() != NULL);
   }
   void set_result(LOperand* operand) { results_[0] = operand; }
@@ -312,17 +317,38 @@
 
  private:
   // Iterator support.
-  virtual int InputCount() V8_FINAL V8_OVERRIDE { return I; }
-  virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
+  virtual int InputCount() FINAL OVERRIDE { return I; }
+  virtual LOperand* InputAt(int i) FINAL OVERRIDE { return inputs_[i]; }
 
-  virtual int TempCount() V8_FINAL V8_OVERRIDE { return T; }
-  virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return temps_[i]; }
+  virtual int TempCount() FINAL OVERRIDE { return T; }
+  virtual LOperand* TempAt(int i) FINAL OVERRIDE { return temps_[i]; }
 };
 
 
-class LUnknownOSRValue V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LTailCallThroughMegamorphicCache FINAL
+    : public LTemplateInstruction<0, 3, 0> {
  public:
-  virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+  explicit LTailCallThroughMegamorphicCache(LOperand* context,
+                                            LOperand* receiver,
+                                            LOperand* name) {
+    inputs_[0] = context;
+    inputs_[1] = receiver;
+    inputs_[2] = name;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* receiver() { return inputs_[1]; }
+  LOperand* name() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache,
+                               "tail-call-through-megamorphic-cache")
+  DECLARE_HYDROGEN_ACCESSOR(TailCallThroughMegamorphicCache)
+};
+
+
+class LUnknownOSRValue FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
     return false;
   }
   DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
@@ -334,7 +360,7 @@
  public:
   LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
 
-  virtual bool IsControl() const V8_FINAL V8_OVERRIDE { return true; }
+  virtual bool IsControl() const FINAL OVERRIDE { return true; }
 
   int SuccessorCount() { return hydrogen()->SuccessorCount(); }
   HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
@@ -384,10 +410,10 @@
   }
 
   // Can't use the DECLARE-macro here because of sub-classes.
-  virtual bool IsGap() const V8_OVERRIDE { return true; }
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual bool IsGap() const OVERRIDE { return true; }
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
   static LGap* cast(LInstruction* instr) {
-    ASSERT(instr->IsGap());
+    DCHECK(instr->IsGap());
     return reinterpret_cast<LGap*>(instr);
   }
 
@@ -421,11 +447,11 @@
 };
 
 
-class LInstructionGap V8_FINAL : public LGap {
+class LInstructionGap FINAL : public LGap {
  public:
   explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
 
-  virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
     return !IsRedundant();
   }
 
@@ -433,7 +459,7 @@
 };
 
 
-class LDrop V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LDrop FINAL : public LTemplateInstruction<0, 0, 0> {
  public:
   explicit LDrop(int count) : count_(count) { }
 
@@ -446,14 +472,14 @@
 };
 
 
-class LDummy V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LDummy FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
-  explicit LDummy() { }
+  LDummy() {}
   DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy")
 };
 
 
-class LDummyUse V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDummyUse FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LDummyUse(LOperand* value) {
     inputs_[0] = value;
@@ -462,14 +488,14 @@
 };
 
 
-class LGoto V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LGoto FINAL : public LTemplateInstruction<0, 0, 0> {
  public:
   explicit LGoto(HBasicBlock* block) : block_(block) { }
 
-  virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE;
+  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE;
   DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
-  virtual bool IsControl() const V8_OVERRIDE { return true; }
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  virtual bool IsControl() const OVERRIDE { return true; }
 
   int block_id() const { return block_->block_id(); }
 
@@ -478,7 +504,7 @@
 };
 
 
-class LLazyBailout V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LLazyBailout FINAL : public LTemplateInstruction<0, 0, 0> {
  public:
   LLazyBailout() : gap_instructions_size_(0) { }
 
@@ -494,17 +520,17 @@
 };
 
 
-class LLabel V8_FINAL : public LGap {
+class LLabel FINAL : public LGap {
  public:
   explicit LLabel(HBasicBlock* block)
       : LGap(block), replacement_(NULL) { }
 
-  virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
     return false;
   }
   DECLARE_CONCRETE_INSTRUCTION(Label, "label")
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int block_id() const { return block()->block_id(); }
   bool is_loop_header() const { return block()->IsLoopHeader(); }
@@ -520,18 +546,18 @@
 };
 
 
-class LOsrEntry V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LOsrEntry FINAL : public LTemplateInstruction<0, 0, 0> {
  public:
   LOsrEntry() {}
 
-  virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
     return false;
   }
   DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
 };
 
 
-class LAccessArgumentsAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LAccessArgumentsAt FINAL : public LTemplateInstruction<1, 3, 0> {
  public:
   LAccessArgumentsAt(LOperand* arguments,
                      LOperand* length,
@@ -547,11 +573,11 @@
   LOperand* length() { return inputs_[1]; }
   LOperand* index() { return inputs_[2]; }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LAddE V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LAddE FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LAddE(LOperand* left, LOperand* right) {
     inputs_[0] = left;
@@ -566,7 +592,7 @@
 };
 
 
-class LAddI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LAddI FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LAddI(LOperand* left, LOperand* right)
       : shift_(NO_SHIFT), shift_amount_(0)  {
@@ -595,7 +621,7 @@
 };
 
 
-class LAddS V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LAddS FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LAddS(LOperand* left, LOperand* right) {
     inputs_[0] = left;
@@ -610,7 +636,7 @@
 };
 
 
-class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 3> {
+class LAllocate FINAL : public LTemplateInstruction<1, 2, 3> {
  public:
   LAllocate(LOperand* context,
             LOperand* size,
@@ -635,7 +661,7 @@
 };
 
 
-class LApplyArguments V8_FINAL : public LTemplateInstruction<1, 4, 0> {
+class LApplyArguments FINAL : public LTemplateInstruction<1, 4, 0> {
  public:
   LApplyArguments(LOperand* function,
                   LOperand* receiver,
@@ -656,7 +682,7 @@
 };
 
 
-class LArgumentsElements V8_FINAL : public LTemplateInstruction<1, 0, 1> {
+class LArgumentsElements FINAL : public LTemplateInstruction<1, 0, 1> {
  public:
   explicit LArgumentsElements(LOperand* temp) {
     temps_[0] = temp;
@@ -669,7 +695,7 @@
 };
 
 
-class LArgumentsLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LArgumentsLength FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LArgumentsLength(LOperand* elements) {
     inputs_[0] = elements;
@@ -681,7 +707,7 @@
 };
 
 
-class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LArithmeticD FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LArithmeticD(Token::Value op,
                LOperand* left,
@@ -695,18 +721,18 @@
   LOperand* left() { return inputs_[0]; }
   LOperand* right() { return inputs_[1]; }
 
-  virtual Opcode opcode() const V8_OVERRIDE {
+  virtual Opcode opcode() const OVERRIDE {
     return LInstruction::kArithmeticD;
   }
-  virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
-  virtual const char* Mnemonic() const V8_OVERRIDE;
+  virtual void CompileToNative(LCodeGen* generator) OVERRIDE;
+  virtual const char* Mnemonic() const OVERRIDE;
 
  private:
   Token::Value op_;
 };
 
 
-class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LArithmeticT FINAL : public LTemplateInstruction<1, 3, 0> {
  public:
   LArithmeticT(Token::Value op,
                LOperand* context,
@@ -723,18 +749,18 @@
   LOperand* right() { return inputs_[2]; }
   Token::Value op() const { return op_; }
 
-  virtual Opcode opcode() const V8_OVERRIDE {
+  virtual Opcode opcode() const OVERRIDE {
     return LInstruction::kArithmeticT;
   }
-  virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
-  virtual const char* Mnemonic() const V8_OVERRIDE;
+  virtual void CompileToNative(LCodeGen* generator) OVERRIDE;
+  virtual const char* Mnemonic() const OVERRIDE;
 
  private:
   Token::Value op_;
 };
 
 
-class LBoundsCheck V8_FINAL : public LTemplateInstruction<0, 2, 0> {
+class LBoundsCheck FINAL : public LTemplateInstruction<0, 2, 0> {
  public:
   explicit LBoundsCheck(LOperand* index, LOperand* length) {
     inputs_[0] = index;
@@ -749,7 +775,7 @@
 };
 
 
-class LBitI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LBitI FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LBitI(LOperand* left, LOperand* right)
       : shift_(NO_SHIFT), shift_amount_(0)  {
@@ -780,7 +806,7 @@
 };
 
 
-class LBitS V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LBitS FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LBitS(LOperand* left, LOperand* right) {
     inputs_[0] = left;
@@ -797,7 +823,7 @@
 };
 
 
-class LBranch V8_FINAL : public LControlInstruction<1, 2> {
+class LBranch FINAL : public LControlInstruction<1, 2> {
  public:
   explicit LBranch(LOperand* value, LOperand *temp1, LOperand *temp2) {
     inputs_[0] = value;
@@ -812,11 +838,11 @@
   DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
   DECLARE_HYDROGEN_ACCESSOR(Branch)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LCallJSFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallJSFunction FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LCallJSFunction(LOperand* function) {
     inputs_[0] = function;
@@ -827,13 +853,13 @@
   DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
   DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 };
 
 
-class LCallFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCallFunction FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LCallFunction(LOperand* context, LOperand* function) {
     inputs_[0] = context;
@@ -850,7 +876,7 @@
 };
 
 
-class LCallNew V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCallNew FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LCallNew(LOperand* context, LOperand* constructor) {
     inputs_[0] = context;
@@ -863,13 +889,13 @@
   DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
   DECLARE_HYDROGEN_ACCESSOR(CallNew)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 };
 
 
-class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCallNewArray FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LCallNewArray(LOperand* context, LOperand* constructor) {
     inputs_[0] = context;
@@ -882,13 +908,13 @@
   DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
   DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 };
 
 
-class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallRuntime FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LCallRuntime(LOperand* context) {
     inputs_[0] = context;
@@ -899,7 +925,7 @@
   DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
   DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
 
-  virtual bool ClobbersDoubleRegisters(Isolate* isolate) const V8_OVERRIDE {
+  virtual bool ClobbersDoubleRegisters(Isolate* isolate) const OVERRIDE {
     return save_doubles() == kDontSaveFPRegs;
   }
 
@@ -909,7 +935,7 @@
 };
 
 
-class LCallStub V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallStub FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LCallStub(LOperand* context) {
     inputs_[0] = context;
@@ -922,7 +948,7 @@
 };
 
 
-class LCheckInstanceType V8_FINAL : public LTemplateInstruction<0, 1, 1> {
+class LCheckInstanceType FINAL : public LTemplateInstruction<0, 1, 1> {
  public:
   explicit LCheckInstanceType(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
@@ -937,9 +963,9 @@
 };
 
 
-class LCheckMaps V8_FINAL : public LTemplateInstruction<0, 1, 1> {
+class LCheckMaps FINAL : public LTemplateInstruction<0, 1, 1> {
  public:
-  LCheckMaps(LOperand* value = NULL, LOperand* temp = NULL) {
+  explicit LCheckMaps(LOperand* value = NULL, LOperand* temp = NULL) {
     inputs_[0] = value;
     temps_[0] = temp;
   }
@@ -952,7 +978,7 @@
 };
 
 
-class LCheckNonSmi V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LCheckNonSmi FINAL : public LTemplateInstruction<0, 1, 0> {
  public:
   explicit LCheckNonSmi(LOperand* value) {
     inputs_[0] = value;
@@ -965,7 +991,7 @@
 };
 
 
-class LCheckSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCheckSmi FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LCheckSmi(LOperand* value) {
     inputs_[0] = value;
@@ -977,7 +1003,7 @@
 };
 
 
-class LCheckValue V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LCheckValue FINAL : public LTemplateInstruction<0, 1, 0> {
  public:
   explicit LCheckValue(LOperand* value) {
     inputs_[0] = value;
@@ -990,7 +1016,7 @@
 };
 
 
-class LClampDToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LClampDToUint8 FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LClampDToUint8(LOperand* unclamped) {
     inputs_[0] = unclamped;
@@ -1002,7 +1028,7 @@
 };
 
 
-class LClampIToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LClampIToUint8 FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LClampIToUint8(LOperand* unclamped) {
     inputs_[0] = unclamped;
@@ -1014,23 +1040,21 @@
 };
 
 
-class LClampTToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+class LClampTToUint8 FINAL : public LTemplateInstruction<1, 1, 1> {
  public:
-  LClampTToUint8(LOperand* unclamped, LOperand* temp1, LOperand* temp2) {
+  LClampTToUint8(LOperand* unclamped, LOperand* temp1) {
     inputs_[0] = unclamped;
     temps_[0] = temp1;
-    temps_[1] = temp2;
   }
 
   LOperand* unclamped() { return inputs_[0]; }
   LOperand* temp1() { return temps_[0]; }
-  LOperand* temp2() { return temps_[1]; }
 
   DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
 };
 
 
-class LDoubleBits V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDoubleBits FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LDoubleBits(LOperand* value) {
     inputs_[0] = value;
@@ -1043,7 +1067,7 @@
 };
 
 
-class LConstructDouble V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LConstructDouble FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LConstructDouble(LOperand* hi, LOperand* lo) {
     inputs_[0] = hi;
@@ -1057,7 +1081,7 @@
 };
 
 
-class LClassOfTestAndBranch V8_FINAL : public LControlInstruction<1, 2> {
+class LClassOfTestAndBranch FINAL : public LControlInstruction<1, 2> {
  public:
   LClassOfTestAndBranch(LOperand* value, LOperand* temp1, LOperand* temp2) {
     inputs_[0] = value;
@@ -1073,11 +1097,11 @@
                                "class-of-test-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LCmpHoleAndBranchD V8_FINAL : public LControlInstruction<1, 1> {
+class LCmpHoleAndBranchD FINAL : public LControlInstruction<1, 1> {
  public:
   explicit LCmpHoleAndBranchD(LOperand* object, LOperand* temp) {
     inputs_[0] = object;
@@ -1092,7 +1116,7 @@
 };
 
 
-class LCmpHoleAndBranchT V8_FINAL : public LControlInstruction<1, 0> {
+class LCmpHoleAndBranchT FINAL : public LControlInstruction<1, 0> {
  public:
   explicit LCmpHoleAndBranchT(LOperand* object) {
     inputs_[0] = object;
@@ -1105,7 +1129,7 @@
 };
 
 
-class LCmpMapAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LCmpMapAndBranch FINAL : public LControlInstruction<1, 1> {
  public:
   LCmpMapAndBranch(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
@@ -1122,7 +1146,7 @@
 };
 
 
-class LCmpObjectEqAndBranch V8_FINAL : public LControlInstruction<2, 0> {
+class LCmpObjectEqAndBranch FINAL : public LControlInstruction<2, 0> {
  public:
   LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
     inputs_[0] = left;
@@ -1137,7 +1161,7 @@
 };
 
 
-class LCmpT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LCmpT FINAL : public LTemplateInstruction<1, 3, 0> {
  public:
   LCmpT(LOperand* context, LOperand* left, LOperand* right) {
     inputs_[0] = context;
@@ -1156,7 +1180,7 @@
 };
 
 
-class LCompareMinusZeroAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LCompareMinusZeroAndBranch FINAL : public LControlInstruction<1, 1> {
  public:
   LCompareMinusZeroAndBranch(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
@@ -1172,7 +1196,7 @@
 };
 
 
-class LCompareNumericAndBranch V8_FINAL : public LControlInstruction<2, 0> {
+class LCompareNumericAndBranch FINAL : public LControlInstruction<2, 0> {
  public:
   LCompareNumericAndBranch(LOperand* left, LOperand* right) {
     inputs_[0] = left;
@@ -1191,11 +1215,11 @@
     return hydrogen()->representation().IsDouble();
   }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LConstantD V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LConstantD FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
   DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1204,7 +1228,7 @@
 };
 
 
-class LConstantE V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LConstantE FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e")
   DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1215,7 +1239,7 @@
 };
 
 
-class LConstantI V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LConstantI FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
   DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1224,7 +1248,7 @@
 };
 
 
-class LConstantS V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LConstantS FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
   DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1233,7 +1257,7 @@
 };
 
 
-class LConstantT V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LConstantT FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
   DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1244,14 +1268,14 @@
 };
 
 
-class LContext V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LContext FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(Context, "context")
   DECLARE_HYDROGEN_ACCESSOR(Context)
 };
 
 
-class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDateField FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   LDateField(LOperand* date, Smi* index) : index_(index) {
     inputs_[0] = date;
@@ -1268,13 +1292,13 @@
 };
 
 
-class LDebugBreak V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LDebugBreak FINAL : public LTemplateInstruction<0, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
 };
 
 
-class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LDeclareGlobals FINAL : public LTemplateInstruction<0, 1, 0> {
  public:
   explicit LDeclareGlobals(LOperand* context) {
     inputs_[0] = context;
@@ -1287,15 +1311,15 @@
 };
 
 
-class LDeoptimize V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LDeoptimize FINAL : public LTemplateInstruction<0, 0, 0> {
  public:
-  virtual bool IsControl() const V8_OVERRIDE { return true; }
+  virtual bool IsControl() const OVERRIDE { return true; }
   DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
   DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
 };
 
 
-class LDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDivByPowerOf2I FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   LDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
     inputs_[0] = dividend;
@@ -1313,7 +1337,7 @@
 };
 
 
-class LDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LDivByConstI FINAL : public LTemplateInstruction<1, 1, 1> {
  public:
   LDivByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) {
     inputs_[0] = dividend;
@@ -1333,7 +1357,7 @@
 };
 
 
-class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LDivI FINAL : public LTemplateInstruction<1, 2, 1> {
  public:
   LDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
     inputs_[0] = dividend;
@@ -1350,7 +1374,7 @@
 };
 
 
-class LDoubleToIntOrSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDoubleToIntOrSmi FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LDoubleToIntOrSmi(LOperand* value) {
     inputs_[0] = value;
@@ -1365,7 +1389,7 @@
 };
 
 
-class LForInCacheArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LForInCacheArray FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LForInCacheArray(LOperand* map) {
     inputs_[0] = map;
@@ -1381,7 +1405,7 @@
 };
 
 
-class LForInPrepareMap V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LForInPrepareMap FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LForInPrepareMap(LOperand* context, LOperand* object) {
     inputs_[0] = context;
@@ -1395,7 +1419,7 @@
 };
 
 
-class LGetCachedArrayIndex V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LGetCachedArrayIndex FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LGetCachedArrayIndex(LOperand* value) {
     inputs_[0] = value;
@@ -1408,7 +1432,7 @@
 };
 
 
-class LHasCachedArrayIndexAndBranch V8_FINAL
+class LHasCachedArrayIndexAndBranch FINAL
     : public LControlInstruction<1, 1> {
  public:
   LHasCachedArrayIndexAndBranch(LOperand* value, LOperand* temp) {
@@ -1423,11 +1447,11 @@
                                "has-cached-array-index-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LHasInstanceTypeAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LHasInstanceTypeAndBranch FINAL : public LControlInstruction<1, 1> {
  public:
   LHasInstanceTypeAndBranch(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
@@ -1441,11 +1465,11 @@
                                "has-instance-type-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LInnerAllocatedObject V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LInnerAllocatedObject FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LInnerAllocatedObject(LOperand* base_object, LOperand* offset) {
     inputs_[0] = base_object;
@@ -1455,13 +1479,13 @@
   LOperand* base_object() const { return inputs_[0]; }
   LOperand* offset() const { return inputs_[1]; }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 
   DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object")
 };
 
 
-class LInstanceOf V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LInstanceOf FINAL : public LTemplateInstruction<1, 3, 0> {
  public:
   LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
     inputs_[0] = context;
@@ -1477,7 +1501,7 @@
 };
 
 
-class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LInstanceOfKnownGlobal FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LInstanceOfKnownGlobal(LOperand* context, LOperand* value) {
     inputs_[0] = context;
@@ -1496,7 +1520,7 @@
     return lazy_deopt_env_;
   }
   virtual void SetDeferredLazyDeoptimizationEnvironment(
-      LEnvironment* env) V8_OVERRIDE {
+      LEnvironment* env) OVERRIDE {
     lazy_deopt_env_ = env;
   }
 
@@ -1505,7 +1529,7 @@
 };
 
 
-class LInteger32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LInteger32ToDouble FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LInteger32ToDouble(LOperand* value) {
     inputs_[0] = value;
@@ -1517,42 +1541,41 @@
 };
 
 
-class LCallWithDescriptor V8_FINAL : public LTemplateResultInstruction<1> {
+class LCallWithDescriptor FINAL : public LTemplateResultInstruction<1> {
  public:
-  LCallWithDescriptor(const CallInterfaceDescriptor* descriptor,
-                      const ZoneList<LOperand*>& operands,
-                      Zone* zone)
-    : descriptor_(descriptor),
-      inputs_(descriptor->environment_length() + 1, zone) {
-    ASSERT(descriptor->environment_length() + 1 == operands.length());
+  LCallWithDescriptor(CallInterfaceDescriptor descriptor,
+                      const ZoneList<LOperand*>& operands, Zone* zone)
+      : descriptor_(descriptor),
+        inputs_(descriptor.GetRegisterParameterCount() + 1, zone) {
+    DCHECK(descriptor.GetRegisterParameterCount() + 1 == operands.length());
     inputs_.AddAll(operands, zone);
   }
 
   LOperand* target() const { return inputs_[0]; }
 
-  const CallInterfaceDescriptor* descriptor() { return descriptor_; }
+  CallInterfaceDescriptor descriptor() { return descriptor_; }
 
  private:
   DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
   DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 
-  const CallInterfaceDescriptor* descriptor_;
+  CallInterfaceDescriptor descriptor_;
   ZoneList<LOperand*> inputs_;
 
   // Iterator support.
-  virtual int InputCount() V8_FINAL V8_OVERRIDE { return inputs_.length(); }
-  virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
+  virtual int InputCount() FINAL OVERRIDE { return inputs_.length(); }
+  virtual LOperand* InputAt(int i) FINAL OVERRIDE { return inputs_[i]; }
 
-  virtual int TempCount() V8_FINAL V8_OVERRIDE { return 0; }
-  virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return NULL; }
+  virtual int TempCount() FINAL OVERRIDE { return 0; }
+  virtual LOperand* TempAt(int i) FINAL OVERRIDE { return NULL; }
 };
 
 
-class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LInvokeFunction FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LInvokeFunction(LOperand* context, LOperand* function) {
     inputs_[0] = context;
@@ -1565,13 +1588,13 @@
   DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
   DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 };
 
 
-class LIsConstructCallAndBranch V8_FINAL : public LControlInstruction<0, 2> {
+class LIsConstructCallAndBranch FINAL : public LControlInstruction<0, 2> {
  public:
   LIsConstructCallAndBranch(LOperand* temp1, LOperand* temp2) {
     temps_[0] = temp1;
@@ -1586,7 +1609,7 @@
 };
 
 
-class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 2> {
+class LIsObjectAndBranch FINAL : public LControlInstruction<1, 2> {
  public:
   LIsObjectAndBranch(LOperand* value, LOperand* temp1, LOperand* temp2) {
     inputs_[0] = value;
@@ -1601,11 +1624,11 @@
   DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LIsStringAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LIsStringAndBranch FINAL : public LControlInstruction<1, 1> {
  public:
   LIsStringAndBranch(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
@@ -1618,11 +1641,11 @@
   DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LIsSmiAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LIsSmiAndBranch FINAL : public LControlInstruction<1, 0> {
  public:
   explicit LIsSmiAndBranch(LOperand* value) {
     inputs_[0] = value;
@@ -1633,11 +1656,11 @@
   DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LIsUndetectableAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LIsUndetectableAndBranch FINAL : public LControlInstruction<1, 1> {
  public:
   explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
@@ -1651,11 +1674,11 @@
                                "is-undetectable-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LLoadContextSlot V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LLoadContextSlot FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LLoadContextSlot(LOperand* context) {
     inputs_[0] = context;
@@ -1668,11 +1691,11 @@
 
   int slot_index() const { return hydrogen()->slot_index(); }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LLoadNamedField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LLoadNamedField FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LLoadNamedField(LOperand* object) {
     inputs_[0] = object;
@@ -1685,7 +1708,7 @@
 };
 
 
-class LFunctionLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LFunctionLiteral FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LFunctionLiteral(LOperand* context) {
     inputs_[0] = context;
@@ -1698,7 +1721,7 @@
 };
 
 
-class LLoadFunctionPrototype V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LLoadFunctionPrototype FINAL : public LTemplateInstruction<1, 1, 1> {
  public:
   LLoadFunctionPrototype(LOperand* function, LOperand* temp) {
     inputs_[0] = function;
@@ -1713,22 +1736,25 @@
 };
 
 
-class LLoadGlobalCell V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LLoadGlobalCell FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
   DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
 };
 
 
-class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LLoadGlobalGeneric FINAL : public LTemplateInstruction<1, 2, 1> {
  public:
-  LLoadGlobalGeneric(LOperand* context, LOperand* global_object) {
+  LLoadGlobalGeneric(LOperand* context, LOperand* global_object,
+                     LOperand* vector) {
     inputs_[0] = context;
     inputs_[1] = global_object;
+    temps_[0] = vector;
   }
 
   LOperand* context() { return inputs_[0]; }
   LOperand* global_object() { return inputs_[1]; }
+  LOperand* temp_vector() { return temps_[0]; }
 
   DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
   DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
@@ -1763,7 +1789,7 @@
   uint32_t base_offset() const {
     return this->hydrogen()->base_offset();
   }
-  void PrintDataTo(StringStream* stream) V8_OVERRIDE {
+  void PrintDataTo(StringStream* stream) OVERRIDE {
     this->elements()->PrintTo(stream);
     stream->Add("[");
     this->key()->PrintTo(stream);
@@ -1817,31 +1843,37 @@
 };
 
 
-class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LLoadKeyedGeneric FINAL : public LTemplateInstruction<1, 3, 1> {
  public:
-  LLoadKeyedGeneric(LOperand* context, LOperand* object, LOperand* key) {
+  LLoadKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
+                    LOperand* vector) {
     inputs_[0] = context;
     inputs_[1] = object;
     inputs_[2] = key;
+    temps_[0] = vector;
   }
 
   LOperand* context() { return inputs_[0]; }
   LOperand* object() { return inputs_[1]; }
   LOperand* key() { return inputs_[2]; }
+  LOperand* temp_vector() { return temps_[0]; }
 
   DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
+  DECLARE_HYDROGEN_ACCESSOR(LoadKeyedGeneric)
 };
 
 
-class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LLoadNamedGeneric FINAL : public LTemplateInstruction<1, 2, 1> {
  public:
-  LLoadNamedGeneric(LOperand* context, LOperand* object) {
+  LLoadNamedGeneric(LOperand* context, LOperand* object, LOperand* vector) {
     inputs_[0] = context;
     inputs_[1] = object;
+    temps_[0] = vector;
   }
 
   LOperand* context() { return inputs_[0]; }
   LOperand* object() { return inputs_[1]; }
+  LOperand* temp_vector() { return temps_[0]; }
 
   DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
   DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
@@ -1850,7 +1882,7 @@
 };
 
 
-class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LLoadRoot FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
   DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
@@ -1859,7 +1891,7 @@
 };
 
 
-class LMapEnumLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMapEnumLength FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LMapEnumLength(LOperand* value) {
     inputs_[0] = value;
@@ -1881,13 +1913,13 @@
   LOperand* value() { return this->inputs_[0]; }
   BuiltinFunctionId op() const { return this->hydrogen()->op(); }
 
-  void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
 };
 
 
-class LMathAbs V8_FINAL : public LUnaryMathOperation<0> {
+class LMathAbs FINAL : public LUnaryMathOperation<0> {
  public:
   explicit LMathAbs(LOperand* value) : LUnaryMathOperation<0>(value) {}
 
@@ -1917,7 +1949,7 @@
 };
 
 
-class LMathExp V8_FINAL : public LUnaryMathOperation<4> {
+class LMathExp FINAL : public LUnaryMathOperation<4> {
  public:
   LMathExp(LOperand* value,
                 LOperand* double_temp1,
@@ -1942,7 +1974,7 @@
 
 
 // Math.floor with a double result.
-class LMathFloorD V8_FINAL : public LUnaryMathOperation<0> {
+class LMathFloorD FINAL : public LUnaryMathOperation<0> {
  public:
   explicit LMathFloorD(LOperand* value) : LUnaryMathOperation<0>(value) { }
   DECLARE_CONCRETE_INSTRUCTION(MathFloorD, "math-floor-d")
@@ -1950,14 +1982,14 @@
 
 
 // Math.floor with an integer result.
-class LMathFloorI V8_FINAL : public LUnaryMathOperation<0> {
+class LMathFloorI FINAL : public LUnaryMathOperation<0> {
  public:
   explicit LMathFloorI(LOperand* value) : LUnaryMathOperation<0>(value) { }
   DECLARE_CONCRETE_INSTRUCTION(MathFloorI, "math-floor-i")
 };
 
 
-class LFlooringDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LFlooringDivByPowerOf2I FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
     inputs_[0] = dividend;
@@ -1976,7 +2008,7 @@
 };
 
 
-class LFlooringDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+class LFlooringDivByConstI FINAL : public LTemplateInstruction<1, 1, 2> {
  public:
   LFlooringDivByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) {
     inputs_[0] = dividend;
@@ -1996,7 +2028,7 @@
 };
 
 
-class LFlooringDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LFlooringDivI FINAL : public LTemplateInstruction<1, 2, 1> {
  public:
   LFlooringDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
     inputs_[0] = dividend;
@@ -2013,21 +2045,21 @@
 };
 
 
-class LMathLog V8_FINAL : public LUnaryMathOperation<0> {
+class LMathLog FINAL : public LUnaryMathOperation<0> {
  public:
   explicit LMathLog(LOperand* value) : LUnaryMathOperation<0>(value) { }
   DECLARE_CONCRETE_INSTRUCTION(MathLog, "math-log")
 };
 
 
-class LMathClz32 V8_FINAL : public LUnaryMathOperation<0> {
+class LMathClz32 FINAL : public LUnaryMathOperation<0> {
  public:
   explicit LMathClz32(LOperand* value) : LUnaryMathOperation<0>(value) { }
   DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
 };
 
 
-class LMathMinMax V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LMathMinMax FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LMathMinMax(LOperand* left, LOperand* right) {
     inputs_[0] = left;
@@ -2042,7 +2074,7 @@
 };
 
 
-class LMathPowHalf V8_FINAL : public LUnaryMathOperation<0> {
+class LMathPowHalf FINAL : public LUnaryMathOperation<0> {
  public:
   explicit LMathPowHalf(LOperand* value) : LUnaryMathOperation<0>(value) { }
   DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half")
@@ -2050,7 +2082,7 @@
 
 
 // Math.round with an integer result.
-class LMathRoundD V8_FINAL : public LUnaryMathOperation<0> {
+class LMathRoundD FINAL : public LUnaryMathOperation<0> {
  public:
   explicit LMathRoundD(LOperand* value)
       : LUnaryMathOperation<0>(value) {
@@ -2061,7 +2093,7 @@
 
 
 // Math.round with an integer result.
-class LMathRoundI V8_FINAL : public LUnaryMathOperation<1> {
+class LMathRoundI FINAL : public LUnaryMathOperation<1> {
  public:
   LMathRoundI(LOperand* value, LOperand* temp1)
       : LUnaryMathOperation<1>(value) {
@@ -2074,14 +2106,22 @@
 };
 
 
-class LMathSqrt V8_FINAL : public LUnaryMathOperation<0> {
+class LMathFround FINAL : public LUnaryMathOperation<0> {
+ public:
+  explicit LMathFround(LOperand* value) : LUnaryMathOperation<0>(value) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(MathFround, "math-fround")
+};
+
+
+class LMathSqrt FINAL : public LUnaryMathOperation<0> {
  public:
   explicit LMathSqrt(LOperand* value) : LUnaryMathOperation<0>(value) { }
   DECLARE_CONCRETE_INSTRUCTION(MathSqrt, "math-sqrt")
 };
 
 
-class LModByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LModByPowerOf2I FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   LModByPowerOf2I(LOperand* dividend, int32_t divisor) {
     inputs_[0] = dividend;
@@ -2099,7 +2139,7 @@
 };
 
 
-class LModByConstI V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LModByConstI FINAL : public LTemplateInstruction<1, 1, 1> {
  public:
   LModByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) {
     inputs_[0] = dividend;
@@ -2119,7 +2159,7 @@
 };
 
 
-class LModI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LModI FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LModI(LOperand* left, LOperand* right) {
     inputs_[0] = left;
@@ -2134,7 +2174,7 @@
 };
 
 
-class LMulConstIS V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LMulConstIS FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LMulConstIS(LOperand* left, LConstantOperand* right) {
     inputs_[0] = left;
@@ -2149,7 +2189,7 @@
 };
 
 
-class LMulI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LMulI FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LMulI(LOperand* left, LOperand* right) {
     inputs_[0] = left;
@@ -2164,7 +2204,7 @@
 };
 
 
-class LMulS V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LMulS FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LMulS(LOperand* left, LOperand* right) {
     inputs_[0] = left;
@@ -2179,7 +2219,7 @@
 };
 
 
-class LNumberTagD V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+class LNumberTagD FINAL : public LTemplateInstruction<1, 1, 2> {
  public:
   LNumberTagD(LOperand* value, LOperand* temp1, LOperand* temp2) {
     inputs_[0] = value;
@@ -2196,7 +2236,7 @@
 };
 
 
-class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+class LNumberTagU FINAL : public LTemplateInstruction<1, 1, 2> {
  public:
   explicit LNumberTagU(LOperand* value,
                        LOperand* temp1,
@@ -2214,7 +2254,7 @@
 };
 
 
-class LNumberUntagD V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LNumberUntagD FINAL : public LTemplateInstruction<1, 1, 1> {
  public:
   LNumberUntagD(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
@@ -2230,14 +2270,14 @@
 };
 
 
-class LParameter V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LParameter FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
   virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
   DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
 };
 
 
-class LPower V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LPower FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LPower(LOperand* left, LOperand* right) {
     inputs_[0] = left;
@@ -2252,7 +2292,7 @@
 };
 
 
-class LPreparePushArguments V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LPreparePushArguments FINAL : public LTemplateInstruction<0, 0, 0> {
  public:
   explicit LPreparePushArguments(int argc) : argc_(argc) {}
 
@@ -2265,7 +2305,7 @@
 };
 
 
-class LPushArguments V8_FINAL : public LTemplateResultInstruction<0> {
+class LPushArguments FINAL : public LTemplateResultInstruction<0> {
  public:
   explicit LPushArguments(Zone* zone,
                           int capacity = kRecommendedMaxPushedArgs)
@@ -2291,15 +2331,15 @@
 
  private:
   // Iterator support.
-  virtual int InputCount() V8_FINAL V8_OVERRIDE { return inputs_.length(); }
-  virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
+  virtual int InputCount() FINAL OVERRIDE { return inputs_.length(); }
+  virtual LOperand* InputAt(int i) FINAL OVERRIDE { return inputs_[i]; }
 
-  virtual int TempCount() V8_FINAL V8_OVERRIDE { return 0; }
-  virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return NULL; }
+  virtual int TempCount() FINAL OVERRIDE { return 0; }
+  virtual LOperand* TempAt(int i) FINAL OVERRIDE { return NULL; }
 };
 
 
-class LRegExpLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LRegExpLiteral FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LRegExpLiteral(LOperand* context) {
     inputs_[0] = context;
@@ -2312,7 +2352,7 @@
 };
 
 
-class LReturn V8_FINAL : public LTemplateInstruction<0, 3, 0> {
+class LReturn FINAL : public LTemplateInstruction<0, 3, 0> {
  public:
   LReturn(LOperand* value, LOperand* context, LOperand* parameter_count) {
     inputs_[0] = value;
@@ -2327,7 +2367,7 @@
     return parameter_count()->IsConstantOperand();
   }
   LConstantOperand* constant_parameter_count() {
-    ASSERT(has_constant_parameter_count());
+    DCHECK(has_constant_parameter_count());
     return LConstantOperand::cast(parameter_count());
   }
 
@@ -2335,7 +2375,7 @@
 };
 
 
-class LSeqStringGetChar V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LSeqStringGetChar FINAL : public LTemplateInstruction<1, 2, 1> {
  public:
   LSeqStringGetChar(LOperand* string,
                     LOperand* index,
@@ -2354,7 +2394,7 @@
 };
 
 
-class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 4, 1> {
+class LSeqStringSetChar FINAL : public LTemplateInstruction<1, 4, 1> {
  public:
   LSeqStringSetChar(LOperand* context,
                     LOperand* string,
@@ -2379,7 +2419,7 @@
 };
 
 
-class LSmiTag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LSmiTag FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LSmiTag(LOperand* value) {
     inputs_[0] = value;
@@ -2392,7 +2432,7 @@
 };
 
 
-class LSmiUntag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LSmiUntag FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   LSmiUntag(LOperand* value, bool needs_check)
       : needs_check_(needs_check) {
@@ -2409,7 +2449,7 @@
 };
 
 
-class LStackCheck V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LStackCheck FINAL : public LTemplateInstruction<0, 1, 0> {
  public:
   explicit LStackCheck(LOperand* context) {
     inputs_[0] = context;
@@ -2459,7 +2499,7 @@
   }
   uint32_t base_offset() const { return this->hydrogen()->base_offset(); }
 
-  void PrintDataTo(StringStream* stream) V8_OVERRIDE {
+  void PrintDataTo(StringStream* stream) OVERRIDE {
     this->elements()->PrintTo(stream);
     stream->Add("[");
     this->key()->PrintTo(stream);
@@ -2470,7 +2510,7 @@
     }
 
     if (this->value() == NULL) {
-      ASSERT(hydrogen()->IsConstantHoleStore() &&
+      DCHECK(hydrogen()->IsConstantHoleStore() &&
              hydrogen()->value()->representation().IsDouble());
       stream->Add("<the hole(nan)>");
     } else {
@@ -2482,7 +2522,7 @@
 };
 
 
-class LStoreKeyedExternal V8_FINAL : public LStoreKeyed<1> {
+class LStoreKeyedExternal FINAL : public LStoreKeyed<1> {
  public:
   LStoreKeyedExternal(LOperand* elements, LOperand* key, LOperand* value,
                       LOperand* temp) :
@@ -2496,7 +2536,7 @@
 };
 
 
-class LStoreKeyedFixed V8_FINAL : public LStoreKeyed<1> {
+class LStoreKeyedFixed FINAL : public LStoreKeyed<1> {
  public:
   LStoreKeyedFixed(LOperand* elements, LOperand* key, LOperand* value,
                    LOperand* temp) :
@@ -2510,7 +2550,7 @@
 };
 
 
-class LStoreKeyedFixedDouble V8_FINAL : public LStoreKeyed<1> {
+class LStoreKeyedFixedDouble FINAL : public LStoreKeyed<1> {
  public:
   LStoreKeyedFixedDouble(LOperand* elements, LOperand* key, LOperand* value,
                          LOperand* temp) :
@@ -2525,7 +2565,7 @@
 };
 
 
-class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 4, 0> {
+class LStoreKeyedGeneric FINAL : public LTemplateInstruction<0, 4, 0> {
  public:
   LStoreKeyedGeneric(LOperand* context,
                      LOperand* obj,
@@ -2545,13 +2585,13 @@
   DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
   DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 
   StrictMode strict_mode() { return hydrogen()->strict_mode(); }
 };
 
 
-class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 2> {
+class LStoreNamedField FINAL : public LTemplateInstruction<0, 2, 2> {
  public:
   LStoreNamedField(LOperand* object, LOperand* value,
                    LOperand* temp0, LOperand* temp1) {
@@ -2569,7 +2609,7 @@
   DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
   DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 
   Representation representation() const {
     return hydrogen()->field_representation();
@@ -2577,7 +2617,7 @@
 };
 
 
-class LStoreNamedGeneric V8_FINAL: public LTemplateInstruction<0, 3, 0> {
+class LStoreNamedGeneric FINAL: public LTemplateInstruction<0, 3, 0> {
  public:
   LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value) {
     inputs_[0] = context;
@@ -2592,14 +2632,14 @@
   DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
   DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 
   Handle<Object> name() const { return hydrogen()->name(); }
   StrictMode strict_mode() { return hydrogen()->strict_mode(); }
 };
 
 
-class LStringAdd V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LStringAdd FINAL : public LTemplateInstruction<1, 3, 0> {
  public:
   LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
     inputs_[0] = context;
@@ -2617,7 +2657,7 @@
 
 
 
-class LStringCharCodeAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LStringCharCodeAt FINAL : public LTemplateInstruction<1, 3, 0> {
  public:
   LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
     inputs_[0] = context;
@@ -2634,7 +2674,7 @@
 };
 
 
-class LStringCharFromCode V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LStringCharFromCode FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LStringCharFromCode(LOperand* context, LOperand* char_code) {
     inputs_[0] = context;
@@ -2649,7 +2689,7 @@
 };
 
 
-class LStringCompareAndBranch V8_FINAL : public LControlInstruction<3, 0> {
+class LStringCompareAndBranch FINAL : public LControlInstruction<3, 0> {
  public:
   LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) {
     inputs_[0] = context;
@@ -2667,12 +2707,12 @@
 
   Token::Value op() const { return hydrogen()->token(); }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
 // Truncating conversion from a tagged value to an int32.
-class LTaggedToI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+class LTaggedToI FINAL : public LTemplateInstruction<1, 1, 2> {
  public:
   explicit LTaggedToI(LOperand* value, LOperand* temp1, LOperand* temp2) {
     inputs_[0] = value;
@@ -2691,7 +2731,7 @@
 };
 
 
-class LShiftI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LShiftI FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
       : op_(op), can_deopt_(can_deopt) {
@@ -2712,19 +2752,17 @@
 };
 
 
-class LShiftS V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LShiftS FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
-  LShiftS(Token::Value op, LOperand* left, LOperand* right, LOperand* temp,
-          bool can_deopt) : op_(op), can_deopt_(can_deopt) {
+  LShiftS(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
+      : op_(op), can_deopt_(can_deopt) {
     inputs_[0] = left;
     inputs_[1] = right;
-    temps_[0] = temp;
   }
 
   Token::Value op() const { return op_; }
   LOperand* left() { return inputs_[0]; }
   LOperand* right() { return inputs_[1]; }
-  LOperand* temp() { return temps_[0]; }
   bool can_deopt() const { return can_deopt_; }
 
   DECLARE_CONCRETE_INSTRUCTION(ShiftS, "shift-s")
@@ -2735,7 +2773,7 @@
 };
 
 
-class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 2, 1> {
+class LStoreCodeEntry FINAL: public LTemplateInstruction<0, 2, 1> {
  public:
   LStoreCodeEntry(LOperand* function, LOperand* code_object,
                   LOperand* temp) {
@@ -2748,14 +2786,14 @@
   LOperand* code_object() { return inputs_[1]; }
   LOperand* temp() { return temps_[0]; }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 
   DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
   DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
 };
 
 
-class LStoreContextSlot V8_FINAL : public LTemplateInstruction<0, 2, 1> {
+class LStoreContextSlot FINAL : public LTemplateInstruction<0, 2, 1> {
  public:
   LStoreContextSlot(LOperand* context, LOperand* value, LOperand* temp) {
     inputs_[0] = context;
@@ -2772,11 +2810,11 @@
 
   int slot_index() { return hydrogen()->slot_index(); }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 2> {
+class LStoreGlobalCell FINAL : public LTemplateInstruction<0, 1, 2> {
  public:
   LStoreGlobalCell(LOperand* value, LOperand* temp1, LOperand* temp2) {
     inputs_[0] = value;
@@ -2793,7 +2831,7 @@
 };
 
 
-class LSubI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LSubI FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LSubI(LOperand* left, LOperand* right)
       : shift_(NO_SHIFT), shift_amount_(0)  {
@@ -2837,14 +2875,14 @@
 };
 
 
-class LThisFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LThisFunction FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
   DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
 };
 
 
-class LToFastProperties V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LToFastProperties FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LToFastProperties(LOperand* value) {
     inputs_[0] = value;
@@ -2857,7 +2895,7 @@
 };
 
 
-class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 2, 2> {
+class LTransitionElementsKind FINAL : public LTemplateInstruction<0, 2, 2> {
  public:
   LTransitionElementsKind(LOperand* object,
                           LOperand* context,
@@ -2878,7 +2916,7 @@
                                "transition-elements-kind")
   DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 
   Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
   Handle<Map> transitioned_map() {
@@ -2889,7 +2927,7 @@
 };
 
 
-class LTrapAllocationMemento V8_FINAL : public LTemplateInstruction<0, 1, 2> {
+class LTrapAllocationMemento FINAL : public LTemplateInstruction<0, 1, 2> {
  public:
   LTrapAllocationMemento(LOperand* object, LOperand* temp1, LOperand* temp2) {
     inputs_[0] = object;
@@ -2905,7 +2943,7 @@
 };
 
 
-class LTruncateDoubleToIntOrSmi V8_FINAL
+class LTruncateDoubleToIntOrSmi FINAL
     : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LTruncateDoubleToIntOrSmi(LOperand* value) {
@@ -2922,7 +2960,7 @@
 };
 
 
-class LTypeof V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LTypeof FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LTypeof(LOperand* context, LOperand* value) {
     inputs_[0] = context;
@@ -2936,7 +2974,7 @@
 };
 
 
-class LTypeofIsAndBranch V8_FINAL : public LControlInstruction<1, 2> {
+class LTypeofIsAndBranch FINAL : public LControlInstruction<1, 2> {
  public:
   LTypeofIsAndBranch(LOperand* value, LOperand* temp1, LOperand* temp2) {
     inputs_[0] = value;
@@ -2953,11 +2991,11 @@
 
   Handle<String> type_literal() const { return hydrogen()->type_literal(); }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LUint32ToDouble FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LUint32ToDouble(LOperand* value) {
     inputs_[0] = value;
@@ -2969,7 +3007,7 @@
 };
 
 
-class LCheckMapValue V8_FINAL : public LTemplateInstruction<0, 2, 1> {
+class LCheckMapValue FINAL : public LTemplateInstruction<0, 2, 1> {
  public:
   LCheckMapValue(LOperand* value, LOperand* map, LOperand* temp) {
     inputs_[0] = value;
@@ -2985,7 +3023,7 @@
 };
 
 
-class LLoadFieldByIndex V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LLoadFieldByIndex FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LLoadFieldByIndex(LOperand* object, LOperand* index) {
     inputs_[0] = object;
@@ -3028,7 +3066,7 @@
 };
 
 
-class LWrapReceiver V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LWrapReceiver FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LWrapReceiver(LOperand* receiver, LOperand* function) {
     inputs_[0] = receiver;
@@ -3044,7 +3082,7 @@
 
 
 class LChunkBuilder;
-class LPlatformChunk V8_FINAL : public LChunk {
+class LPlatformChunk FINAL : public LChunk {
  public:
   LPlatformChunk(CompilationInfo* info, HGraph* graph)
       : LChunk(info, graph) { }
@@ -3054,17 +3092,13 @@
 };
 
 
-class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
+class LChunkBuilder FINAL : public LChunkBuilderBase {
  public:
   LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
-      : LChunkBuilderBase(graph->zone()),
-        chunk_(NULL),
-        info_(info),
-        graph_(graph),
-        status_(UNUSED),
+      : LChunkBuilderBase(info, graph),
         current_instruction_(NULL),
         current_block_(NULL),
-        allocator_(allocator) { }
+        allocator_(allocator) {}
 
   // Build the sequence for the graph.
   LPlatformChunk* Build();
@@ -3087,27 +3121,6 @@
   static bool HasMagicNumberForDivision(int32_t divisor);
 
  private:
-  enum Status {
-    UNUSED,
-    BUILDING,
-    DONE,
-    ABORTED
-  };
-
-  HGraph* graph() const { return graph_; }
-  Isolate* isolate() const { return info_->isolate(); }
-
-  bool is_unused() const { return status_ == UNUSED; }
-  bool is_building() const { return status_ == BUILDING; }
-  bool is_done() const { return status_ == DONE; }
-  bool is_aborted() const { return status_ == ABORTED; }
-
-  int argument_count() const { return argument_count_; }
-  CompilationInfo* info() const { return info_; }
-  Heap* heap() const { return isolate()->heap(); }
-
-  void Abort(BailoutReason reason);
-
   // Methods for getting operands for Use / Define / Temp.
   LUnallocated* ToUnallocated(Register reg);
   LUnallocated* ToUnallocated(DoubleRegister reg);
@@ -3156,6 +3169,8 @@
   // Temporary operand that must be in a double register.
   MUST_USE_RESULT LUnallocated* TempDoubleRegister();
 
+  MUST_USE_RESULT LOperand* FixedTemp(Register reg);
+
   // Temporary operand that must be in a fixed double register.
   MUST_USE_RESULT LOperand* FixedTemp(DoubleRegister reg);
 
@@ -3197,7 +3212,7 @@
     if (instr->IsAdd() || instr->IsSub()) {
       return Assembler::IsImmAddSub(imm) || Assembler::IsImmAddSub(-imm);
     } else {
-      ASSERT(instr->IsBitwise());
+      DCHECK(instr->IsBitwise());
       unsigned unused_n, unused_imm_s, unused_imm_r;
       return Assembler::IsImmLogical(imm, kWRegSizeInBits,
                                      &unused_n, &unused_imm_s, &unused_imm_r);
@@ -3229,10 +3244,6 @@
   LInstruction* DoArithmeticT(Token::Value op,
                               HBinaryOperation* instr);
 
-  LPlatformChunk* chunk_;
-  CompilationInfo* info_;
-  HGraph* const graph_;
-  Status status_;
   HInstruction* current_instruction_;
   HBasicBlock* current_block_;
   LAllocator* allocator_;
diff --git a/src/arm64/lithium-codegen-arm64.cc b/src/arm64/lithium-codegen-arm64.cc
index 29c13ac..2998642 100644
--- a/src/arm64/lithium-codegen-arm64.cc
+++ b/src/arm64/lithium-codegen-arm64.cc
@@ -6,15 +6,18 @@
 
 #include "src/arm64/lithium-codegen-arm64.h"
 #include "src/arm64/lithium-gap-resolver-arm64.h"
+#include "src/base/bits.h"
+#include "src/code-factory.h"
 #include "src/code-stubs.h"
-#include "src/stub-cache.h"
 #include "src/hydrogen-osr.h"
+#include "src/ic/ic.h"
+#include "src/ic/stub-cache.h"
 
 namespace v8 {
 namespace internal {
 
 
-class SafepointGenerator V8_FINAL : public CallWrapper {
+class SafepointGenerator FINAL : public CallWrapper {
  public:
   SafepointGenerator(LCodeGen* codegen,
                      LPointerMap* pointers,
@@ -238,13 +241,13 @@
       translation->BeginConstructStubFrame(closure_id, translation_size);
       break;
     case JS_GETTER:
-      ASSERT(translation_size == 1);
-      ASSERT(height == 0);
+      DCHECK(translation_size == 1);
+      DCHECK(height == 0);
       translation->BeginGetterStubFrame(closure_id);
       break;
     case JS_SETTER:
-      ASSERT(translation_size == 2);
-      ASSERT(height == 0);
+      DCHECK(translation_size == 2);
+      DCHECK(height == 0);
       translation->BeginSetterStubFrame(closure_id);
       break;
     case STUB:
@@ -386,7 +389,7 @@
                                RelocInfo::Mode mode,
                                LInstruction* instr,
                                SafepointMode safepoint_mode) {
-  ASSERT(instr != NULL);
+  DCHECK(instr != NULL);
 
   Assembler::BlockPoolsScope scope(masm_);
   __ Call(code, mode);
@@ -402,9 +405,9 @@
 
 
 void LCodeGen::DoCallFunction(LCallFunction* instr) {
-  ASSERT(ToRegister(instr->context()).is(cp));
-  ASSERT(ToRegister(instr->function()).Is(x1));
-  ASSERT(ToRegister(instr->result()).Is(x0));
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->function()).Is(x1));
+  DCHECK(ToRegister(instr->result()).Is(x0));
 
   int arity = instr->arity();
   CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
@@ -414,9 +417,9 @@
 
 
 void LCodeGen::DoCallNew(LCallNew* instr) {
-  ASSERT(ToRegister(instr->context()).is(cp));
-  ASSERT(instr->IsMarkedAsCall());
-  ASSERT(ToRegister(instr->constructor()).is(x1));
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(instr->IsMarkedAsCall());
+  DCHECK(ToRegister(instr->constructor()).is(x1));
 
   __ Mov(x0, instr->arity());
   // No cell in x2 for construct type feedback in optimized code.
@@ -426,14 +429,14 @@
   CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
   after_push_argument_ = false;
 
-  ASSERT(ToRegister(instr->result()).is(x0));
+  DCHECK(ToRegister(instr->result()).is(x0));
 }
 
 
 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
-  ASSERT(instr->IsMarkedAsCall());
-  ASSERT(ToRegister(instr->context()).is(cp));
-  ASSERT(ToRegister(instr->constructor()).is(x1));
+  DCHECK(instr->IsMarkedAsCall());
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->constructor()).is(x1));
 
   __ Mov(x0, Operand(instr->arity()));
   __ LoadRoot(x2, Heap::kUndefinedValueRootIndex);
@@ -474,7 +477,7 @@
   }
   after_push_argument_ = false;
 
-  ASSERT(ToRegister(instr->result()).is(x0));
+  DCHECK(ToRegister(instr->result()).is(x0));
 }
 
 
@@ -482,7 +485,7 @@
                            int num_arguments,
                            LInstruction* instr,
                            SaveFPRegsMode save_doubles) {
-  ASSERT(instr != NULL);
+  DCHECK(instr != NULL);
 
   __ CallRuntime(function, num_arguments, save_doubles);
 
@@ -529,7 +532,7 @@
   if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
     RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
   } else {
-    ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+    DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
     RecordSafepointWithRegisters(
         instr->pointer_map(), 0, Safepoint::kLazyDeopt);
   }
@@ -540,7 +543,7 @@
                                Safepoint::Kind kind,
                                int arguments,
                                Safepoint::DeoptMode deopt_mode) {
-  ASSERT(expected_safepoint_kind_ == kind);
+  DCHECK(expected_safepoint_kind_ == kind);
 
   const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
   Safepoint safepoint = safepoints_.DefineSafepoint(
@@ -580,16 +583,9 @@
 }
 
 
-void LCodeGen::RecordSafepointWithRegistersAndDoubles(
-    LPointerMap* pointers, int arguments, Safepoint::DeoptMode deopt_mode) {
-  RecordSafepoint(
-      pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode);
-}
-
-
 bool LCodeGen::GenerateCode() {
   LPhase phase("Z_Code generation", chunk());
-  ASSERT(is_unused());
+  DCHECK(is_unused());
   status_ = GENERATING;
 
   // Open a frame scope to indicate that there is a frame on the stack.  The
@@ -597,17 +593,14 @@
   // the frame (that is done in GeneratePrologue).
   FrameScope frame_scope(masm_, StackFrame::NONE);
 
-  return GeneratePrologue() &&
-      GenerateBody() &&
-      GenerateDeferredCode() &&
-      GenerateDeoptJumpTable() &&
-      GenerateSafepointTable();
+  return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
+         GenerateJumpTable() && GenerateSafepointTable();
 }
 
 
 void LCodeGen::SaveCallerDoubles() {
-  ASSERT(info()->saves_caller_doubles());
-  ASSERT(NeedsEagerFrame());
+  DCHECK(info()->saves_caller_doubles());
+  DCHECK(NeedsEagerFrame());
   Comment(";;; Save clobbered callee double registers");
   BitVector* doubles = chunk()->allocated_double_registers();
   BitVector::Iterator iterator(doubles);
@@ -624,8 +617,8 @@
 
 
 void LCodeGen::RestoreCallerDoubles() {
-  ASSERT(info()->saves_caller_doubles());
-  ASSERT(NeedsEagerFrame());
+  DCHECK(info()->saves_caller_doubles());
+  DCHECK(NeedsEagerFrame());
   Comment(";;; Restore clobbered callee double registers");
   BitVector* doubles = chunk()->allocated_double_registers();
   BitVector::Iterator iterator(doubles);
@@ -642,7 +635,7 @@
 
 
 bool LCodeGen::GeneratePrologue() {
-  ASSERT(is_generating());
+  DCHECK(is_generating());
 
   if (info()->IsOptimizing()) {
     ProfileEntryHookStub::MaybeCallEntryHook(masm_);
@@ -661,14 +654,14 @@
       __ JumpIfNotRoot(x10, Heap::kUndefinedValueRootIndex, &ok);
 
       __ Ldr(x10, GlobalObjectMemOperand());
-      __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kGlobalReceiverOffset));
+      __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kGlobalProxyOffset));
       __ Poke(x10, receiver_offset);
 
       __ Bind(&ok);
     }
   }
 
-  ASSERT(__ StackPointer().Is(jssp));
+  DCHECK(__ StackPointer().Is(jssp));
   info()->set_prologue_offset(masm_->pc_offset());
   if (NeedsEagerFrame()) {
     if (info()->IsStub()) {
@@ -703,7 +696,7 @@
       need_write_barrier = false;
     } else {
       __ Push(x1);
-      __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
+      __ CallRuntime(Runtime::kNewFunctionContext, 1);
     }
     RecordSafepoint(Safepoint::kNoLazyDeopt);
     // Context is returned in x0. It replaces the context passed to us. It's
@@ -761,7 +754,7 @@
   // Adjust the frame size, subsuming the unoptimized frame into the
   // optimized frame.
   int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
-  ASSERT(slots >= 0);
+  DCHECK(slots >= 0);
   __ Claim(slots);
 }
 
@@ -777,7 +770,7 @@
 
 
 bool LCodeGen::GenerateDeferredCode() {
-  ASSERT(is_generating());
+  DCHECK(is_generating());
   if (deferred_.length() > 0) {
     for (int i = 0; !is_aborted() && (i < deferred_.length()); i++) {
       LDeferredCode* code = deferred_[i];
@@ -797,8 +790,8 @@
 
       if (NeedsDeferredFrame()) {
         Comment(";;; Build frame");
-        ASSERT(!frame_is_built_);
-        ASSERT(info()->IsStub());
+        DCHECK(!frame_is_built_);
+        DCHECK(info()->IsStub());
         frame_is_built_ = true;
         __ Push(lr, fp, cp);
         __ Mov(fp, Smi::FromInt(StackFrame::STUB));
@@ -812,7 +805,7 @@
 
       if (NeedsDeferredFrame()) {
         Comment(";;; Destroy frame");
-        ASSERT(frame_is_built_);
+        DCHECK(frame_is_built_);
         __ Pop(xzr, cp, fp, lr);
         frame_is_built_ = false;
       }
@@ -831,28 +824,23 @@
 }
 
 
-bool LCodeGen::GenerateDeoptJumpTable() {
+bool LCodeGen::GenerateJumpTable() {
   Label needs_frame, restore_caller_doubles, call_deopt_entry;
 
-  if (deopt_jump_table_.length() > 0) {
+  if (jump_table_.length() > 0) {
     Comment(";;; -------------------- Jump table --------------------");
-    Address base = deopt_jump_table_[0]->address;
+    Address base = jump_table_[0]->address;
 
     UseScratchRegisterScope temps(masm());
     Register entry_offset = temps.AcquireX();
 
-    int length = deopt_jump_table_.length();
+    int length = jump_table_.length();
     for (int i = 0; i < length; i++) {
-      __ Bind(&deopt_jump_table_[i]->label);
+      Deoptimizer::JumpTableEntry* table_entry = jump_table_[i];
+      __ Bind(&table_entry->label);
 
-      Deoptimizer::BailoutType type = deopt_jump_table_[i]->bailout_type;
-      Address entry = deopt_jump_table_[i]->address;
-      int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
-      if (id == Deoptimizer::kNotDeoptimizationEntry) {
-        Comment(";;; jump table entry %d.", i);
-      } else {
-        Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
-      }
+      Address entry = table_entry->address;
+      DeoptComment(table_entry->reason);
 
       // Second-level deopt table entries are contiguous and small, so instead
       // of loading the full, absolute address of each one, load the base
@@ -863,13 +851,13 @@
       // branch.
       bool last_entry = (i + 1) == length;
 
-      if (deopt_jump_table_[i]->needs_frame) {
-        ASSERT(!info()->saves_caller_doubles());
+      if (table_entry->needs_frame) {
+        DCHECK(!info()->saves_caller_doubles());
         if (!needs_frame.is_bound()) {
           // This variant of deopt can only be used with stubs. Since we don't
           // have a function pointer to install in the stack frame that we're
           // building, install a special marker there instead.
-          ASSERT(info()->IsStub());
+          DCHECK(info()->IsStub());
 
           UseScratchRegisterScope temps(masm());
           Register stub_marker = temps.AcquireX();
@@ -883,7 +871,7 @@
           __ B(&needs_frame);
         }
       } else if (info()->saves_caller_doubles()) {
-        ASSERT(info()->IsStub());
+        DCHECK(info()->IsStub());
         if (!restore_caller_doubles.is_bound()) {
           __ Bind(&restore_caller_doubles);
           RestoreCallerDoubles();
@@ -922,7 +910,7 @@
 
 
 bool LCodeGen::GenerateSafepointTable() {
-  ASSERT(is_done());
+  DCHECK(is_done());
   // We do not know how much data will be emitted for the safepoint table, so
   // force emission of the veneer pool.
   masm()->CheckVeneerPool(true, true);
@@ -932,7 +920,7 @@
 
 
 void LCodeGen::FinishCode(Handle<Code> code) {
-  ASSERT(is_done());
+  DCHECK(is_done());
   code->set_stack_slots(GetStackSlotCount());
   code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
   if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
@@ -987,7 +975,7 @@
 
 
 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
-  ASSERT(deoptimization_literals_.length() == 0);
+  DCHECK(deoptimization_literals_.length() == 0);
 
   const ZoneList<Handle<JSFunction> >* inlined_closures =
       chunk()->inlined_closures();
@@ -1001,9 +989,9 @@
 
 
 void LCodeGen::DeoptimizeBranch(
-    LEnvironment* environment,
-    BranchType branch_type, Register reg, int bit,
-    Deoptimizer::BailoutType* override_bailout_type) {
+    LInstruction* instr, const char* detail, BranchType branch_type,
+    Register reg, int bit, Deoptimizer::BailoutType* override_bailout_type) {
+  LEnvironment* environment = instr->environment();
   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
   Deoptimizer::BailoutType bailout_type =
     info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER;
@@ -1012,8 +1000,8 @@
     bailout_type = *override_bailout_type;
   }
 
-  ASSERT(environment->HasBeenRegistered());
-  ASSERT(info()->IsOptimizing() || info()->IsStub());
+  DCHECK(environment->HasBeenRegistered());
+  DCHECK(info()->IsOptimizing() || info()->IsStub());
   int id = environment->deoptimization_index();
   Address entry =
       Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
@@ -1035,7 +1023,7 @@
     __ Mov(w1, FLAG_deopt_every_n_times);
     __ Str(w1, MemOperand(x0));
     __ Pop(x2, x1, x0);
-    ASSERT(frame_is_built_);
+    DCHECK(frame_is_built_);
     __ Call(entry, RelocInfo::RUNTIME_ENTRY);
     __ Unreachable();
 
@@ -1052,102 +1040,109 @@
     __ Bind(&dont_trap);
   }
 
-  ASSERT(info()->IsStub() || frame_is_built_);
+  Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
+                             instr->Mnemonic(), detail);
+  DCHECK(info()->IsStub() || frame_is_built_);
   // Go through jump table if we need to build frame, or restore caller doubles.
   if (branch_type == always &&
       frame_is_built_ && !info()->saves_caller_doubles()) {
+    DeoptComment(reason);
     __ Call(entry, RelocInfo::RUNTIME_ENTRY);
   } else {
+    Deoptimizer::JumpTableEntry* table_entry =
+        new (zone()) Deoptimizer::JumpTableEntry(entry, reason, bailout_type,
+                                                 !frame_is_built_);
     // We often have several deopts to the same entry, reuse the last
     // jump entry if this is the case.
-    if (deopt_jump_table_.is_empty() ||
-        (deopt_jump_table_.last()->address != entry) ||
-        (deopt_jump_table_.last()->bailout_type != bailout_type) ||
-        (deopt_jump_table_.last()->needs_frame != !frame_is_built_)) {
-      Deoptimizer::JumpTableEntry* table_entry =
-        new(zone()) Deoptimizer::JumpTableEntry(entry,
-                                                bailout_type,
-                                                !frame_is_built_);
-      deopt_jump_table_.Add(table_entry, zone());
+    if (jump_table_.is_empty() ||
+        !table_entry->IsEquivalentTo(*jump_table_.last())) {
+      jump_table_.Add(table_entry, zone());
     }
-    __ B(&deopt_jump_table_.last()->label,
-         branch_type, reg, bit);
+    __ B(&jump_table_.last()->label, branch_type, reg, bit);
   }
 }
 
 
-void LCodeGen::Deoptimize(LEnvironment* environment,
-                          Deoptimizer::BailoutType* override_bailout_type) {
-  DeoptimizeBranch(environment, always, NoReg, -1, override_bailout_type);
+void LCodeGen::Deoptimize(LInstruction* instr,
+                          Deoptimizer::BailoutType* override_bailout_type,
+                          const char* detail) {
+  DeoptimizeBranch(instr, detail, always, NoReg, -1, override_bailout_type);
 }
 
 
-void LCodeGen::DeoptimizeIf(Condition cond, LEnvironment* environment) {
-  DeoptimizeBranch(environment, static_cast<BranchType>(cond));
+void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
+                            const char* detail) {
+  DeoptimizeBranch(instr, detail, static_cast<BranchType>(cond));
 }
 
 
-void LCodeGen::DeoptimizeIfZero(Register rt, LEnvironment* environment) {
-  DeoptimizeBranch(environment, reg_zero, rt);
+void LCodeGen::DeoptimizeIfZero(Register rt, LInstruction* instr,
+                                const char* detail) {
+  DeoptimizeBranch(instr, detail, reg_zero, rt);
 }
 
 
-void LCodeGen::DeoptimizeIfNotZero(Register rt, LEnvironment* environment) {
-  DeoptimizeBranch(environment, reg_not_zero, rt);
+void LCodeGen::DeoptimizeIfNotZero(Register rt, LInstruction* instr,
+                                   const char* detail) {
+  DeoptimizeBranch(instr, detail, reg_not_zero, rt);
 }
 
 
-void LCodeGen::DeoptimizeIfNegative(Register rt, LEnvironment* environment) {
+void LCodeGen::DeoptimizeIfNegative(Register rt, LInstruction* instr,
+                                    const char* detail) {
   int sign_bit = rt.Is64Bits() ? kXSignBit : kWSignBit;
-  DeoptimizeIfBitSet(rt, sign_bit, environment);
+  DeoptimizeIfBitSet(rt, sign_bit, instr, detail);
 }
 
 
-void LCodeGen::DeoptimizeIfSmi(Register rt,
-                               LEnvironment* environment) {
-  DeoptimizeIfBitClear(rt, MaskToBit(kSmiTagMask), environment);
+void LCodeGen::DeoptimizeIfSmi(Register rt, LInstruction* instr,
+                               const char* detail) {
+  DeoptimizeIfBitClear(rt, MaskToBit(kSmiTagMask), instr, detail);
 }
 
 
-void LCodeGen::DeoptimizeIfNotSmi(Register rt, LEnvironment* environment) {
-  DeoptimizeIfBitSet(rt, MaskToBit(kSmiTagMask), environment);
+void LCodeGen::DeoptimizeIfNotSmi(Register rt, LInstruction* instr,
+                                  const char* detail) {
+  DeoptimizeIfBitSet(rt, MaskToBit(kSmiTagMask), instr, detail);
 }
 
 
-void LCodeGen::DeoptimizeIfRoot(Register rt,
-                                Heap::RootListIndex index,
-                                LEnvironment* environment) {
+void LCodeGen::DeoptimizeIfRoot(Register rt, Heap::RootListIndex index,
+                                LInstruction* instr, const char* detail) {
   __ CompareRoot(rt, index);
-  DeoptimizeIf(eq, environment);
+  DeoptimizeIf(eq, instr, detail);
 }
 
 
-void LCodeGen::DeoptimizeIfNotRoot(Register rt,
-                                   Heap::RootListIndex index,
-                                   LEnvironment* environment) {
+void LCodeGen::DeoptimizeIfNotRoot(Register rt, Heap::RootListIndex index,
+                                   LInstruction* instr, const char* detail) {
   __ CompareRoot(rt, index);
-  DeoptimizeIf(ne, environment);
+  DeoptimizeIf(ne, instr, detail);
 }
 
 
-void LCodeGen::DeoptimizeIfMinusZero(DoubleRegister input,
-                                     LEnvironment* environment) {
+void LCodeGen::DeoptimizeIfMinusZero(DoubleRegister input, LInstruction* instr,
+                                     const char* detail) {
   __ TestForMinusZero(input);
-  DeoptimizeIf(vs, environment);
+  DeoptimizeIf(vs, instr, detail);
 }
 
 
-void LCodeGen::DeoptimizeIfBitSet(Register rt,
-                                  int bit,
-                                  LEnvironment* environment) {
-  DeoptimizeBranch(environment, reg_bit_set, rt, bit);
+void LCodeGen::DeoptimizeIfNotHeapNumber(Register object, LInstruction* instr) {
+  __ CompareObjectMap(object, Heap::kHeapNumberMapRootIndex);
+  DeoptimizeIf(ne, instr, "not heap number");
 }
 
 
-void LCodeGen::DeoptimizeIfBitClear(Register rt,
-                                    int bit,
-                                    LEnvironment* environment) {
-  DeoptimizeBranch(environment, reg_bit_clear, rt, bit);
+void LCodeGen::DeoptimizeIfBitSet(Register rt, int bit, LInstruction* instr,
+                                  const char* detail) {
+  DeoptimizeBranch(instr, detail, reg_bit_set, rt, bit);
+}
+
+
+void LCodeGen::DeoptimizeIfBitClear(Register rt, int bit, LInstruction* instr,
+                                    const char* detail) {
+  DeoptimizeBranch(instr, detail, reg_bit_clear, rt, bit);
 }
 
 
@@ -1159,7 +1154,7 @@
 
     if (current_pc < (last_lazy_deopt_pc_ + space_needed)) {
       ptrdiff_t padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
-      ASSERT((padding_size % kInstructionSize) == 0);
+      DCHECK((padding_size % kInstructionSize) == 0);
       InstructionAccurateScope instruction_accurate(
           masm(), padding_size / kInstructionSize);
 
@@ -1175,16 +1170,16 @@
 
 Register LCodeGen::ToRegister(LOperand* op) const {
   // TODO(all): support zero register results, as ToRegister32.
-  ASSERT((op != NULL) && op->IsRegister());
+  DCHECK((op != NULL) && op->IsRegister());
   return Register::FromAllocationIndex(op->index());
 }
 
 
 Register LCodeGen::ToRegister32(LOperand* op) const {
-  ASSERT(op != NULL);
+  DCHECK(op != NULL);
   if (op->IsConstantOperand()) {
     // If this is a constant operand, the result must be the zero register.
-    ASSERT(ToInteger32(LConstantOperand::cast(op)) == 0);
+    DCHECK(ToInteger32(LConstantOperand::cast(op)) == 0);
     return wzr;
   } else {
     return ToRegister(op).W();
@@ -1199,27 +1194,27 @@
 
 
 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
-  ASSERT((op != NULL) && op->IsDoubleRegister());
+  DCHECK((op != NULL) && op->IsDoubleRegister());
   return DoubleRegister::FromAllocationIndex(op->index());
 }
 
 
 Operand LCodeGen::ToOperand(LOperand* op) {
-  ASSERT(op != NULL);
+  DCHECK(op != NULL);
   if (op->IsConstantOperand()) {
     LConstantOperand* const_op = LConstantOperand::cast(op);
     HConstant* constant = chunk()->LookupConstant(const_op);
     Representation r = chunk_->LookupLiteralRepresentation(const_op);
     if (r.IsSmi()) {
-      ASSERT(constant->HasSmiValue());
+      DCHECK(constant->HasSmiValue());
       return Operand(Smi::FromInt(constant->Integer32Value()));
     } else if (r.IsInteger32()) {
-      ASSERT(constant->HasInteger32Value());
+      DCHECK(constant->HasInteger32Value());
       return Operand(constant->Integer32Value());
     } else if (r.IsDouble()) {
       Abort(kToOperandUnsupportedDoubleImmediate);
     }
-    ASSERT(r.IsTagged());
+    DCHECK(r.IsTagged());
     return Operand(constant->handle(isolate()));
   } else if (op->IsRegister()) {
     return Operand(ToRegister(op));
@@ -1233,18 +1228,8 @@
 }
 
 
-Operand LCodeGen::ToOperand32I(LOperand* op) {
-  return ToOperand32(op, SIGNED_INT32);
-}
-
-
-Operand LCodeGen::ToOperand32U(LOperand* op) {
-  return ToOperand32(op, UNSIGNED_INT32);
-}
-
-
-Operand LCodeGen::ToOperand32(LOperand* op, IntegerSignedness signedness) {
-  ASSERT(op != NULL);
+Operand LCodeGen::ToOperand32(LOperand* op) {
+  DCHECK(op != NULL);
   if (op->IsRegister()) {
     return Operand(ToRegister32(op));
   } else if (op->IsConstantOperand()) {
@@ -1252,10 +1237,7 @@
     HConstant* constant = chunk()->LookupConstant(const_op);
     Representation r = chunk_->LookupLiteralRepresentation(const_op);
     if (r.IsInteger32()) {
-      ASSERT(constant->HasInteger32Value());
-      return (signedness == SIGNED_INT32)
-          ? Operand(constant->Integer32Value())
-          : Operand(static_cast<uint32_t>(constant->Integer32Value()));
+      return Operand(constant->Integer32Value());
     } else {
       // Other constants not implemented.
       Abort(kToOperand32UnsupportedImmediate);
@@ -1267,17 +1249,17 @@
 }
 
 
-static ptrdiff_t ArgumentsOffsetWithoutFrame(ptrdiff_t index) {
-  ASSERT(index < 0);
+static int64_t ArgumentsOffsetWithoutFrame(int index) {
+  DCHECK(index < 0);
   return -(index + 1) * kPointerSize;
 }
 
 
 MemOperand LCodeGen::ToMemOperand(LOperand* op, StackMode stack_mode) const {
-  ASSERT(op != NULL);
-  ASSERT(!op->IsRegister());
-  ASSERT(!op->IsDoubleRegister());
-  ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
+  DCHECK(op != NULL);
+  DCHECK(!op->IsRegister());
+  DCHECK(!op->IsDoubleRegister());
+  DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
   if (NeedsEagerFrame()) {
     int fp_offset = StackSlotOffset(op->index());
     if (op->index() >= 0) {
@@ -1316,17 +1298,15 @@
 
 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
   HConstant* constant = chunk_->LookupConstant(op);
-  ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
+  DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
   return constant->handle(isolate());
 }
 
 
-template<class LI>
-Operand LCodeGen::ToShiftedRightOperand32(LOperand* right, LI* shift_info,
-                                          IntegerSignedness signedness) {
+template <class LI>
+Operand LCodeGen::ToShiftedRightOperand32(LOperand* right, LI* shift_info) {
   if (shift_info->shift() == NO_SHIFT) {
-    return (signedness == SIGNED_INT32) ? ToOperand32I(right)
-                                        : ToOperand32U(right);
+    return ToOperand32(right);
   } else {
     return Operand(
         ToRegister32(right),
@@ -1354,7 +1334,7 @@
 
 double LCodeGen::ToDouble(LConstantOperand* op) const {
   HConstant* constant = chunk_->LookupConstant(op);
-  ASSERT(constant->HasDoubleValue());
+  DCHECK(constant->HasDoubleValue());
   return constant->DoubleValue();
 }
 
@@ -1403,18 +1383,18 @@
     EmitGoto(left_block);
   } else if (left_block == next_block) {
     branch.EmitInverted(chunk_->GetAssemblyLabel(right_block));
-  } else if (right_block == next_block) {
-    branch.Emit(chunk_->GetAssemblyLabel(left_block));
   } else {
     branch.Emit(chunk_->GetAssemblyLabel(left_block));
-    __ B(chunk_->GetAssemblyLabel(right_block));
+    if (right_block != next_block) {
+      __ B(chunk_->GetAssemblyLabel(right_block));
+    }
   }
 }
 
 
 template<class InstrType>
 void LCodeGen::EmitBranch(InstrType instr, Condition condition) {
-  ASSERT((condition != al) && (condition != nv));
+  DCHECK((condition != al) && (condition != nv));
   BranchOnCondition branch(this, condition);
   EmitBranchGeneric(instr, branch);
 }
@@ -1425,7 +1405,7 @@
                                     Condition condition,
                                     const Register& lhs,
                                     const Operand& rhs) {
-  ASSERT((condition != al) && (condition != nv));
+  DCHECK((condition != al) && (condition != nv));
   CompareAndBranch branch(this, condition, lhs, rhs);
   EmitBranchGeneric(instr, branch);
 }
@@ -1436,7 +1416,7 @@
                                  Condition condition,
                                  const Register& value,
                                  uint64_t mask) {
-  ASSERT((condition != al) && (condition != nv));
+  DCHECK((condition != al) && (condition != nv));
   TestAndBranch branch(this, condition, value, mask);
   EmitBranchGeneric(instr, branch);
 }
@@ -1508,7 +1488,7 @@
     }
   } else {
     Register length = ToRegister32(instr->length());
-    Operand index = ToOperand32I(instr->index());
+    Operand index = ToOperand32(instr->index());
     __ Sub(result.W(), length, index);
     __ Add(result.W(), result.W(), 1);
     __ Ldr(result, MemOperand(arguments, result, UXTW, kPointerSizeLog2));
@@ -1523,7 +1503,7 @@
       ? ToInteger32(LConstantOperand::cast(instr->right()))
       : Operand(ToRegister32(instr->right()), SXTW);
 
-  ASSERT(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow));
+  DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow));
   __ Add(result, left, right);
 }
 
@@ -1532,11 +1512,11 @@
   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
   Register result = ToRegister32(instr->result());
   Register left = ToRegister32(instr->left());
-  Operand right = ToShiftedRightOperand32I(instr->right(), instr);
+  Operand right = ToShiftedRightOperand32(instr->right(), instr);
 
   if (can_overflow) {
     __ Adds(result, left, right);
-    DeoptimizeIf(vs, instr->environment());
+    DeoptimizeIf(vs, instr);
   } else {
     __ Add(result, left, right);
   }
@@ -1550,7 +1530,7 @@
   Operand right = ToOperand(instr->right());
   if (can_overflow) {
     __ Adds(result, left, right);
-    DeoptimizeIf(vs, instr->environment());
+    DeoptimizeIf(vs, instr);
   } else {
     __ Add(result, left, right);
   }
@@ -1581,11 +1561,11 @@
   }
 
   if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
-    ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
-    ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+    DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
+    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
     flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
   } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
-    ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
     flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
   }
 
@@ -1620,7 +1600,7 @@
     __ Mov(filler, Operand(isolate()->factory()->one_pointer_filler_map()));
     __ FillFields(untagged_result, filler_count, filler);
   } else {
-    ASSERT(instr->temp3() == NULL);
+    DCHECK(instr->temp3() == NULL);
   }
 }
 
@@ -1631,7 +1611,7 @@
   // contained in the register pointer map.
   __ Mov(ToRegister(instr->result()), Smi::FromInt(0));
 
-  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+  PushSafepointRegistersScope scope(this);
   // We're in a SafepointRegistersScope so we can use any scratch registers.
   Register size = x0;
   if (instr->size()->IsConstantOperand()) {
@@ -1642,11 +1622,11 @@
   int flags = AllocateDoubleAlignFlag::encode(
       instr->hydrogen()->MustAllocateDoubleAligned());
   if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
-    ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
-    ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+    DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
+    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
     flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
   } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
-    ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
     flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
   } else {
     flags = AllocateTargetSpace::update(flags, NEW_SPACE);
@@ -1655,7 +1635,7 @@
   __ Push(size, x10);
 
   CallRuntimeFromDeferred(
-      Runtime::kHiddenAllocateInTargetSpace, 2, instr, instr->context());
+      Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
   __ StoreToSafepointRegisterSlot(x0, ToRegister(instr->result()));
 }
 
@@ -1667,16 +1647,16 @@
 
   Register elements = ToRegister(instr->elements());
   Register scratch = x5;
-  ASSERT(receiver.Is(x0));  // Used for parameter count.
-  ASSERT(function.Is(x1));  // Required by InvokeFunction.
-  ASSERT(ToRegister(instr->result()).Is(x0));
-  ASSERT(instr->IsMarkedAsCall());
+  DCHECK(receiver.Is(x0));  // Used for parameter count.
+  DCHECK(function.Is(x1));  // Required by InvokeFunction.
+  DCHECK(ToRegister(instr->result()).Is(x0));
+  DCHECK(instr->IsMarkedAsCall());
 
   // Copy the arguments to this function possibly from the
   // adaptor frame below it.
   const uint32_t kArgumentsLimit = 1 * KB;
   __ Cmp(length, kArgumentsLimit);
-  DeoptimizeIf(hi, instr->environment());
+  DeoptimizeIf(hi, instr);
 
   // Push the receiver and use the register to keep the original
   // number of arguments.
@@ -1699,7 +1679,7 @@
   __ B(ne, &loop);
 
   __ Bind(&invoke);
-  ASSERT(instr->HasPointerMap());
+  DCHECK(instr->HasPointerMap());
   LPointerMap* pointers = instr->pointer_map();
   SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
   // The number of arguments is stored in argc (receiver) which is x0, as
@@ -1725,10 +1705,10 @@
     // LAccessArgumentsAt implementation take that into account.
     // In the inlined case we need to subtract the size of 2 words to jssp to
     // get a pointer which will work well with LAccessArgumentsAt.
-    ASSERT(masm()->StackPointer().Is(jssp));
+    DCHECK(masm()->StackPointer().Is(jssp));
     __ Sub(result, jssp, 2 * kPointerSize);
   } else {
-    ASSERT(instr->temp() != NULL);
+    DCHECK(instr->temp() != NULL);
     Register previous_fp = ToRegister(instr->temp());
 
     __ Ldr(previous_fp,
@@ -1782,12 +1762,12 @@
       // precision), it should be possible. However, we would need support for
       // fdiv in round-towards-zero mode, and the ARM64 simulator doesn't
       // support that yet.
-      ASSERT(left.Is(d0));
-      ASSERT(right.Is(d1));
+      DCHECK(left.Is(d0));
+      DCHECK(right.Is(d1));
       __ CallCFunction(
           ExternalReference::mod_two_doubles_operation(isolate()),
           0, 2);
-      ASSERT(result.Is(d0));
+      DCHECK(result.Is(d0));
       break;
     }
     default:
@@ -1798,20 +1778,21 @@
 
 
 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
-  ASSERT(ToRegister(instr->context()).is(cp));
-  ASSERT(ToRegister(instr->left()).is(x1));
-  ASSERT(ToRegister(instr->right()).is(x0));
-  ASSERT(ToRegister(instr->result()).is(x0));
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->left()).is(x1));
+  DCHECK(ToRegister(instr->right()).is(x0));
+  DCHECK(ToRegister(instr->result()).is(x0));
 
-  BinaryOpICStub stub(isolate(), instr->op(), NO_OVERWRITE);
-  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  Handle<Code> code =
+      CodeFactory::BinaryOpIC(isolate(), instr->op(), NO_OVERWRITE).code();
+  CallCode(code, RelocInfo::CODE_TARGET, instr);
 }
 
 
 void LCodeGen::DoBitI(LBitI* instr) {
   Register result = ToRegister32(instr->result());
   Register left = ToRegister32(instr->left());
-  Operand right = ToShiftedRightOperand32U(instr->right(), instr);
+  Operand right = ToShiftedRightOperand32(instr->right(), instr);
 
   switch (instr->op()) {
     case Token::BIT_AND: __ And(result, left, right); break;
@@ -1842,22 +1823,22 @@
 
 void LCodeGen::DoBoundsCheck(LBoundsCheck *instr) {
   Condition cond = instr->hydrogen()->allow_equality() ? hi : hs;
-  ASSERT(instr->hydrogen()->index()->representation().IsInteger32());
-  ASSERT(instr->hydrogen()->length()->representation().IsInteger32());
+  DCHECK(instr->hydrogen()->index()->representation().IsInteger32());
+  DCHECK(instr->hydrogen()->length()->representation().IsInteger32());
   if (instr->index()->IsConstantOperand()) {
-    Operand index = ToOperand32I(instr->index());
+    Operand index = ToOperand32(instr->index());
     Register length = ToRegister32(instr->length());
     __ Cmp(length, index);
     cond = CommuteCondition(cond);
   } else {
     Register index = ToRegister32(instr->index());
-    Operand length = ToOperand32I(instr->length());
+    Operand length = ToOperand32(instr->length());
     __ Cmp(index, length);
   }
   if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
     __ Assert(NegateCondition(cond), kEliminatedBoundsCheckFailed);
   } else {
-    DeoptimizeIf(cond, instr->environment());
+    DeoptimizeIf(cond, instr);
   }
 }
 
@@ -1868,10 +1849,10 @@
   Label* false_label = instr->FalseLabel(chunk_);
 
   if (r.IsInteger32()) {
-    ASSERT(!info()->IsStub());
+    DCHECK(!info()->IsStub());
     EmitCompareAndBranch(instr, ne, ToRegister32(instr->value()), 0);
   } else if (r.IsSmi()) {
-    ASSERT(!info()->IsStub());
+    DCHECK(!info()->IsStub());
     STATIC_ASSERT(kSmiTag == 0);
     EmitCompareAndBranch(instr, ne, ToRegister(instr->value()), 0);
   } else if (r.IsDouble()) {
@@ -1879,28 +1860,28 @@
     // Test the double value. Zero and NaN are false.
     EmitBranchIfNonZeroNumber(instr, value, double_scratch());
   } else {
-    ASSERT(r.IsTagged());
+    DCHECK(r.IsTagged());
     Register value = ToRegister(instr->value());
     HType type = instr->hydrogen()->value()->type();
 
     if (type.IsBoolean()) {
-      ASSERT(!info()->IsStub());
+      DCHECK(!info()->IsStub());
       __ CompareRoot(value, Heap::kTrueValueRootIndex);
       EmitBranch(instr, eq);
     } else if (type.IsSmi()) {
-      ASSERT(!info()->IsStub());
+      DCHECK(!info()->IsStub());
       EmitCompareAndBranch(instr, ne, value, Smi::FromInt(0));
     } else if (type.IsJSArray()) {
-      ASSERT(!info()->IsStub());
+      DCHECK(!info()->IsStub());
       EmitGoto(instr->TrueDestination(chunk()));
     } else if (type.IsHeapNumber()) {
-      ASSERT(!info()->IsStub());
+      DCHECK(!info()->IsStub());
       __ Ldr(double_scratch(), FieldMemOperand(value,
                                                HeapNumber::kValueOffset));
       // Test the double value. Zero and NaN are false.
       EmitBranchIfNonZeroNumber(instr, double_scratch(), double_scratch());
     } else if (type.IsString()) {
-      ASSERT(!info()->IsStub());
+      DCHECK(!info()->IsStub());
       Register temp = ToRegister(instr->temp1());
       __ Ldr(temp, FieldMemOperand(value, String::kLengthOffset));
       EmitCompareAndBranch(instr, ne, temp, 0);
@@ -1931,19 +1912,19 @@
 
       if (expected.Contains(ToBooleanStub::SMI)) {
         // Smis: 0 -> false, all other -> true.
-        ASSERT(Smi::FromInt(0) == 0);
+        DCHECK(Smi::FromInt(0) == 0);
         __ Cbz(value, false_label);
         __ JumpIfSmi(value, true_label);
       } else if (expected.NeedsMap()) {
         // If we need a map later and have a smi, deopt.
-        DeoptimizeIfSmi(value, instr->environment());
+        DeoptimizeIfSmi(value, instr);
       }
 
       Register map = NoReg;
       Register scratch = NoReg;
 
       if (expected.NeedsMap()) {
-        ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL));
+        DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
         map = ToRegister(instr->temp1());
         scratch = ToRegister(instr->temp2());
 
@@ -1997,7 +1978,7 @@
       if (!expected.IsGeneric()) {
         // We've seen something for the first time -> deopt.
         // This can only happen if we are not generic already.
-        Deoptimize(instr->environment());
+        Deoptimize(instr);
       }
     }
   }
@@ -2015,7 +1996,7 @@
       dont_adapt_arguments || formal_parameter_count == arity;
 
   // The function interface relies on the following register assignments.
-  ASSERT(function_reg.Is(x1) || function_reg.IsNone());
+  DCHECK(function_reg.Is(x1) || function_reg.IsNone());
   Register arity_reg = x0;
 
   LPointerMap* pointers = instr->pointer_map();
@@ -2059,9 +2040,37 @@
 }
 
 
+void LCodeGen::DoTailCallThroughMegamorphicCache(
+    LTailCallThroughMegamorphicCache* instr) {
+  Register receiver = ToRegister(instr->receiver());
+  Register name = ToRegister(instr->name());
+  DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(name.is(LoadDescriptor::NameRegister()));
+  DCHECK(receiver.is(x1));
+  DCHECK(name.is(x2));
+
+  Register scratch = x3;
+  Register extra = x4;
+  Register extra2 = x5;
+  Register extra3 = x6;
+
+  // Important for the tail-call.
+  bool must_teardown_frame = NeedsEagerFrame();
+
+  // The probe will tail call to a handler if found.
+  isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(),
+                                         must_teardown_frame, receiver, name,
+                                         scratch, extra, extra2, extra3);
+
+  // Tail call to miss if we ended up here.
+  if (must_teardown_frame) __ LeaveFrame(StackFrame::INTERNAL);
+  LoadIC::GenerateMiss(masm());
+}
+
+
 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
-  ASSERT(instr->IsMarkedAsCall());
-  ASSERT(ToRegister(instr->result()).Is(x0));
+  DCHECK(instr->IsMarkedAsCall());
+  DCHECK(ToRegister(instr->result()).Is(x0));
 
   LPointerMap* pointers = instr->pointer_map();
   SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
@@ -2075,7 +2084,7 @@
     // this understanding is correct.
     __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None());
   } else {
-    ASSERT(instr->target()->IsRegister());
+    DCHECK(instr->target()->IsRegister());
     Register target = ToRegister(instr->target());
     generator.BeforeCall(__ CallSize(target));
     __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
@@ -2087,8 +2096,8 @@
 
 
 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
-  ASSERT(instr->IsMarkedAsCall());
-  ASSERT(ToRegister(instr->function()).is(x1));
+  DCHECK(instr->IsMarkedAsCall());
+  DCHECK(ToRegister(instr->function()).is(x1));
 
   if (instr->hydrogen()->pass_argument_count()) {
     __ Mov(x0, Operand(instr->arity()));
@@ -2113,8 +2122,8 @@
 
 
 void LCodeGen::DoCallStub(LCallStub* instr) {
-  ASSERT(ToRegister(instr->context()).is(cp));
-  ASSERT(ToRegister(instr->result()).is(x0));
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->result()).is(x0));
   switch (instr->hydrogen()->major_key()) {
     case CodeStub::RegExpExec: {
       RegExpExecStub stub(isolate());
@@ -2146,7 +2155,7 @@
 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
   Register temp = ToRegister(instr->temp());
   {
-    PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+    PushSafepointRegistersScope scope(this);
     __ Push(object);
     __ Mov(cp, 0);
     __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
@@ -2154,7 +2163,7 @@
         instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
     __ StoreToSafepointRegisterSlot(x0, temp);
   }
-  DeoptimizeIfSmi(temp, instr->environment());
+  DeoptimizeIfSmi(temp, instr);
 }
 
 
@@ -2209,7 +2218,7 @@
   if (instr->hydrogen()->HasMigrationTarget()) {
     __ B(ne, deferred->entry());
   } else {
-    DeoptimizeIf(ne, instr->environment());
+    DeoptimizeIf(ne, instr);
   }
 
   __ Bind(&success);
@@ -2218,15 +2227,15 @@
 
 void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
-    DeoptimizeIfSmi(ToRegister(instr->value()), instr->environment());
+    DeoptimizeIfSmi(ToRegister(instr->value()), instr);
   }
 }
 
 
 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
   Register value = ToRegister(instr->value());
-  ASSERT(!instr->result() || ToRegister(instr->result()).Is(value));
-  DeoptimizeIfNotSmi(value, instr->environment());
+  DCHECK(!instr->result() || ToRegister(instr->result()).Is(value));
+  DeoptimizeIfNotSmi(value, instr);
 }
 
 
@@ -2244,27 +2253,27 @@
     __ Cmp(scratch, first);
     if (first == last) {
       // If there is only one type in the interval check for equality.
-      DeoptimizeIf(ne, instr->environment());
+      DeoptimizeIf(ne, instr);
     } else if (last == LAST_TYPE) {
       // We don't need to compare with the higher bound of the interval.
-      DeoptimizeIf(lo, instr->environment());
+      DeoptimizeIf(lo, instr);
     } else {
       // If we are below the lower bound, set the C flag and clear the Z flag
       // to force a deopt.
       __ Ccmp(scratch, last, CFlag, hs);
-      DeoptimizeIf(hi, instr->environment());
+      DeoptimizeIf(hi, instr);
     }
   } else {
     uint8_t mask;
     uint8_t tag;
     instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
 
-    if (IsPowerOf2(mask)) {
-      ASSERT((tag == 0) || (tag == mask));
+    if (base::bits::IsPowerOfTwo32(mask)) {
+      DCHECK((tag == 0) || (tag == mask));
       if (tag == 0) {
-        DeoptimizeIfBitSet(scratch, MaskToBit(mask), instr->environment());
+        DeoptimizeIfBitSet(scratch, MaskToBit(mask), instr);
       } else {
-        DeoptimizeIfBitClear(scratch, MaskToBit(mask), instr->environment());
+        DeoptimizeIfBitClear(scratch, MaskToBit(mask), instr);
       }
     } else {
       if (tag == 0) {
@@ -2273,7 +2282,7 @@
         __ And(scratch, scratch, mask);
         __ Cmp(scratch, tag);
       }
-      DeoptimizeIf(ne, instr->environment());
+      DeoptimizeIf(ne, instr);
     }
   }
 }
@@ -2296,7 +2305,6 @@
 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
   Register input = ToRegister(instr->unclamped());
   Register result = ToRegister32(instr->result());
-  Register scratch = ToRegister(instr->temp1());
   Label done;
 
   // Both smi and heap number cases are handled.
@@ -2310,19 +2318,17 @@
 
   // Check for heap number.
   Label is_heap_number;
-  __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
-  __ JumpIfRoot(scratch, Heap::kHeapNumberMapRootIndex, &is_heap_number);
+  __ JumpIfHeapNumber(input, &is_heap_number);
 
   // Check for undefined. Undefined is coverted to zero for clamping conversion.
-  DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex,
-                         instr->environment());
+  DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr);
   __ Mov(result, 0);
   __ B(&done);
 
   // Heap number case.
   __ Bind(&is_heap_number);
   DoubleRegister dbl_scratch = double_scratch();
-  DoubleRegister dbl_scratch2 = ToDoubleRegister(instr->temp2());
+  DoubleRegister dbl_scratch2 = ToDoubleRegister(instr->temp1());
   __ Ldr(dbl_scratch, FieldMemOperand(input, HeapNumber::kValueOffset));
   __ ClampDoubleToUint8(result, dbl_scratch, dbl_scratch2);
 
@@ -2365,7 +2371,7 @@
   __ JumpIfSmi(input, false_label);
 
   Register map = scratch2;
-  if (class_name->IsUtf8EqualTo(CStrVector("Function"))) {
+  if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
     // Assuming the following assertions, we can use the same compares to test
     // for both being a function type and being in the object type range.
     STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
@@ -2390,7 +2396,7 @@
   __ Ldr(scratch1, FieldMemOperand(map, Map::kConstructorOffset));
 
   // Objects with a non-function constructor have class 'Object'.
-  if (class_name->IsUtf8EqualTo(CStrVector("Object"))) {
+  if (String::Equals(class_name, isolate()->factory()->Object_string())) {
     __ JumpIfNotObjectType(
         scratch1, scratch2, scratch2, JS_FUNCTION_TYPE, true_label);
   } else {
@@ -2416,7 +2422,7 @@
 
 
 void LCodeGen::DoCmpHoleAndBranchD(LCmpHoleAndBranchD* instr) {
-  ASSERT(instr->hydrogen()->representation().IsDouble());
+  DCHECK(instr->hydrogen()->representation().IsDouble());
   FPRegister object = ToDoubleRegister(instr->object());
   Register temp = ToRegister(instr->temp());
 
@@ -2432,7 +2438,7 @@
 
 
 void LCodeGen::DoCmpHoleAndBranchT(LCmpHoleAndBranchT* instr) {
-  ASSERT(instr->hydrogen()->representation().IsTagged());
+  DCHECK(instr->hydrogen()->representation().IsTagged());
   Register object = ToRegister(instr->object());
 
   EmitBranchIfRoot(instr, object, Heap::kTheHoleValueRootIndex);
@@ -2450,7 +2456,7 @@
 
 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
   Representation rep = instr->hydrogen()->value()->representation();
-  ASSERT(!rep.IsInteger32());
+  DCHECK(!rep.IsInteger32());
   Register scratch = ToRegister(instr->temp());
 
   if (rep.IsDouble()) {
@@ -2458,8 +2464,7 @@
                        instr->TrueLabel(chunk()));
   } else {
     Register value = ToRegister(instr->value());
-    __ CheckMap(value, scratch, Heap::kHeapNumberMapRootIndex,
-                instr->FalseLabel(chunk()), DO_SMI_CHECK);
+    __ JumpIfNotHeapNumber(value, instr->FalseLabel(chunk()), DO_SMI_CHECK);
     __ Ldr(scratch, FieldMemOperand(value, HeapNumber::kValueOffset));
     __ JumpIfMinusZero(scratch, instr->TrueLabel(chunk()));
   }
@@ -2484,17 +2489,7 @@
     EmitGoto(next_block);
   } else {
     if (instr->is_double()) {
-      if (right->IsConstantOperand()) {
-        __ Fcmp(ToDoubleRegister(left),
-                ToDouble(LConstantOperand::cast(right)));
-      } else if (left->IsConstantOperand()) {
-        // Commute the operands and the condition.
-        __ Fcmp(ToDoubleRegister(right),
-                ToDouble(LConstantOperand::cast(left)));
-        cond = CommuteCondition(cond);
-      } else {
-        __ Fcmp(ToDoubleRegister(left), ToDoubleRegister(right));
-      }
+      __ Fcmp(ToDoubleRegister(left), ToDoubleRegister(right));
 
       // If a NaN is involved, i.e. the result is unordered (V set),
       // jump to false block label.
@@ -2503,19 +2498,15 @@
     } else {
       if (instr->hydrogen_value()->representation().IsInteger32()) {
         if (right->IsConstantOperand()) {
-          EmitCompareAndBranch(instr,
-                               cond,
-                               ToRegister32(left),
-                               ToOperand32I(right));
+          EmitCompareAndBranch(instr, cond, ToRegister32(left),
+                               ToOperand32(right));
         } else {
           // Commute the operands and the condition.
-          EmitCompareAndBranch(instr,
-                               CommuteCondition(cond),
-                               ToRegister32(right),
-                               ToOperand32I(left));
+          EmitCompareAndBranch(instr, CommuteCondition(cond),
+                               ToRegister32(right), ToOperand32(left));
         }
       } else {
-        ASSERT(instr->hydrogen_value()->representation().IsSmi());
+        DCHECK(instr->hydrogen_value()->representation().IsSmi());
         if (right->IsConstantOperand()) {
           int32_t value = ToInteger32(LConstantOperand::cast(right));
           EmitCompareAndBranch(instr,
@@ -2549,20 +2540,20 @@
 
 
 void LCodeGen::DoCmpT(LCmpT* instr) {
-  ASSERT(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->context()).is(cp));
   Token::Value op = instr->op();
   Condition cond = TokenToCondition(op, false);
 
-  ASSERT(ToRegister(instr->left()).Is(x1));
-  ASSERT(ToRegister(instr->right()).Is(x0));
-  Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+  DCHECK(ToRegister(instr->left()).Is(x1));
+  DCHECK(ToRegister(instr->right()).Is(x0));
+  Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   // Signal that we don't inline smi code before this stub.
   InlineSmiCheckInfo::EmitNotInlined(masm());
 
   // Return true or false depending on CompareIC result.
   // This instruction is marked as call. We can clobber any register.
-  ASSERT(instr->IsMarkedAsCall());
+  DCHECK(instr->IsMarkedAsCall());
   __ LoadTrueFalseRoots(x1, x2);
   __ Cmp(x0, 0);
   __ Csel(ToRegister(instr->result()), x1, x2, cond);
@@ -2570,7 +2561,7 @@
 
 
 void LCodeGen::DoConstantD(LConstantD* instr) {
-  ASSERT(instr->result()->IsDoubleRegister());
+  DCHECK(instr->result()->IsDoubleRegister());
   DoubleRegister result = ToDoubleRegister(instr->result());
   if (instr->value() == 0) {
     if (copysign(1.0, instr->value()) == 1.0) {
@@ -2590,7 +2581,7 @@
 
 
 void LCodeGen::DoConstantI(LConstantI* instr) {
-  ASSERT(is_int32(instr->value()));
+  DCHECK(is_int32(instr->value()));
   // Cast the value here to ensure that the value isn't sign extended by the
   // implicit Operand constructor.
   __ Mov(ToRegister32(instr->result()), static_cast<uint32_t>(instr->value()));
@@ -2616,7 +2607,7 @@
     __ Ldr(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
   } else {
     // If there is no frame, the context must be in cp.
-    ASSERT(result.is(cp));
+    DCHECK(result.is(cp));
   }
 }
 
@@ -2635,13 +2626,13 @@
   } else {
     __ Cmp(reg, Operand(object));
   }
-  DeoptimizeIf(ne, instr->environment());
+  DeoptimizeIf(ne, instr);
 }
 
 
 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
   last_lazy_deopt_pc_ = masm()->pc_offset();
-  ASSERT(instr->HasEnvironment());
+  DCHECK(instr->HasEnvironment());
   LEnvironment* env = instr->environment();
   RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
@@ -2656,12 +2647,12 @@
   Smi* index = instr->index();
   Label runtime, done;
 
-  ASSERT(object.is(result) && object.Is(x0));
-  ASSERT(instr->IsMarkedAsCall());
+  DCHECK(object.is(result) && object.Is(x0));
+  DCHECK(instr->IsMarkedAsCall());
 
-  DeoptimizeIfSmi(object, instr->environment());
+  DeoptimizeIfSmi(object, instr);
   __ CompareObjectType(object, temp1, temp1, JS_DATE_TYPE);
-  DeoptimizeIf(ne, instr->environment());
+  DeoptimizeIf(ne, instr);
 
   if (index->value() == 0) {
     __ Ldr(result, FieldMemOperand(object, JSDate::kValueOffset));
@@ -2697,8 +2688,7 @@
     type = Deoptimizer::LAZY;
   }
 
-  Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
-  Deoptimize(instr->environment(), &type);
+  Deoptimize(instr, &type, instr->hydrogen()->reason());
 }
 
 
@@ -2706,27 +2696,27 @@
   Register dividend = ToRegister32(instr->dividend());
   int32_t divisor = instr->divisor();
   Register result = ToRegister32(instr->result());
-  ASSERT(divisor == kMinInt || IsPowerOf2(Abs(divisor)));
-  ASSERT(!result.is(dividend));
+  DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
+  DCHECK(!result.is(dividend));
 
   // Check for (0 / -x) that will produce negative zero.
   HDiv* hdiv = instr->hydrogen();
   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
-    DeoptimizeIfZero(dividend, instr->environment());
+    DeoptimizeIfZero(dividend, instr);
   }
   // Check for (kMinInt / -1).
   if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
     // Test dividend for kMinInt by subtracting one (cmp) and checking for
     // overflow.
     __ Cmp(dividend, 1);
-    DeoptimizeIf(vs, instr->environment());
+    DeoptimizeIf(vs, instr);
   }
   // Deoptimize if remainder will not be 0.
   if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
       divisor != 1 && divisor != -1) {
     int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
     __ Tst(dividend, mask);
-    DeoptimizeIf(ne, instr->environment());
+    DeoptimizeIf(ne, instr);
   }
 
   if (divisor == -1) {  // Nice shortcut, not needed for correctness.
@@ -2751,17 +2741,17 @@
   Register dividend = ToRegister32(instr->dividend());
   int32_t divisor = instr->divisor();
   Register result = ToRegister32(instr->result());
-  ASSERT(!AreAliased(dividend, result));
+  DCHECK(!AreAliased(dividend, result));
 
   if (divisor == 0) {
-    Deoptimize(instr->environment());
+    Deoptimize(instr);
     return;
   }
 
   // Check for (0 / -x) that will produce negative zero.
   HDiv* hdiv = instr->hydrogen();
   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
-    DeoptimizeIfZero(dividend, instr->environment());
+    DeoptimizeIfZero(dividend, instr);
   }
 
   __ TruncatingDiv(result, dividend, Abs(divisor));
@@ -2769,11 +2759,11 @@
 
   if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
     Register temp = ToRegister32(instr->temp());
-    ASSERT(!AreAliased(dividend, result, temp));
+    DCHECK(!AreAliased(dividend, result, temp));
     __ Sxtw(dividend.X(), dividend);
     __ Mov(temp, divisor);
     __ Smsubl(temp.X(), result, temp, dividend.X());
-    DeoptimizeIfNotZero(temp, instr->environment());
+    DeoptimizeIfNotZero(temp, instr);
   }
 }
 
@@ -2790,13 +2780,13 @@
   __ Sdiv(result, dividend, divisor);
 
   if (hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
-    ASSERT_EQ(NULL, instr->temp());
+    DCHECK_EQ(NULL, instr->temp());
     return;
   }
 
   // Check for x / 0.
   if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
-    DeoptimizeIfZero(divisor, instr->environment());
+    DeoptimizeIfZero(divisor, instr);
   }
 
   // Check for (0 / -x) as that will produce negative zero.
@@ -2808,7 +2798,7 @@
     // If the divisor >= 0 (pl, the opposite of mi) set the flags to
     // condition ne, so we don't deopt, ie. positive divisor doesn't deopt.
     __ Ccmp(dividend, 0, NoFlag, mi);
-    DeoptimizeIf(eq, instr->environment());
+    DeoptimizeIf(eq, instr);
   }
 
   // Check for (kMinInt / -1).
@@ -2820,13 +2810,13 @@
     // -1. If overflow is clear, set the flags for condition ne, as the
     // dividend isn't -1, and thus we shouldn't deopt.
     __ Ccmp(divisor, -1, NoFlag, vs);
-    DeoptimizeIf(eq, instr->environment());
+    DeoptimizeIf(eq, instr);
   }
 
   // Compute remainder and deopt if it's not zero.
   Register remainder = ToRegister32(instr->temp());
   __ Msub(remainder, result, divisor, dividend);
-  DeoptimizeIfNotZero(remainder, instr->environment());
+  DeoptimizeIfNotZero(remainder, instr);
 }
 
 
@@ -2835,11 +2825,11 @@
   Register result = ToRegister32(instr->result());
 
   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
-    DeoptimizeIfMinusZero(input, instr->environment());
+    DeoptimizeIfMinusZero(input, instr);
   }
 
   __ TryRepresentDoubleAsInt32(result, input, double_scratch());
-  DeoptimizeIf(ne, instr->environment());
+  DeoptimizeIf(ne, instr);
 
   if (instr->tag_result()) {
     __ SmiTag(result.X());
@@ -2863,17 +2853,16 @@
 
 
 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
-  ASSERT(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->context()).is(cp));
   // FunctionLiteral instruction is marked as call, we can trash any register.
-  ASSERT(instr->IsMarkedAsCall());
+  DCHECK(instr->IsMarkedAsCall());
 
   // Use the fast case closure allocation code that allocates in new
   // space for nested functions that don't need literals cloning.
   bool pretenure = instr->hydrogen()->pretenure();
   if (!pretenure && instr->hydrogen()->has_no_literals()) {
-    FastNewClosureStub stub(isolate(),
-                            instr->hydrogen()->strict_mode(),
-                            instr->hydrogen()->is_generator());
+    FastNewClosureStub stub(isolate(), instr->hydrogen()->strict_mode(),
+                            instr->hydrogen()->kind());
     __ Mov(x2, Operand(instr->hydrogen()->shared_info()));
     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   } else {
@@ -2881,7 +2870,7 @@
     __ Mov(x1, Operand(pretenure ? factory()->true_value()
                                  : factory()->false_value()));
     __ Push(cp, x2, x1);
-    CallRuntime(Runtime::kHiddenNewClosure, 3, instr);
+    CallRuntime(Runtime::kNewClosure, 3, instr);
   }
 }
 
@@ -2901,7 +2890,7 @@
   __ LoadInstanceDescriptors(map, result);
   __ Ldr(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
   __ Ldr(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
-  DeoptimizeIfZero(result, instr->environment());
+  DeoptimizeIfZero(result, instr);
 
   __ Bind(&done);
 }
@@ -2911,21 +2900,20 @@
   Register object = ToRegister(instr->object());
   Register null_value = x5;
 
-  ASSERT(instr->IsMarkedAsCall());
-  ASSERT(object.Is(x0));
+  DCHECK(instr->IsMarkedAsCall());
+  DCHECK(object.Is(x0));
 
-  DeoptimizeIfRoot(object, Heap::kUndefinedValueRootIndex,
-                   instr->environment());
+  DeoptimizeIfRoot(object, Heap::kUndefinedValueRootIndex, instr);
 
   __ LoadRoot(null_value, Heap::kNullValueRootIndex);
   __ Cmp(object, null_value);
-  DeoptimizeIf(eq, instr->environment());
+  DeoptimizeIf(eq, instr);
 
-  DeoptimizeIfSmi(object, instr->environment());
+  DeoptimizeIfSmi(object, instr);
 
   STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
   __ CompareObjectType(object, x1, x1, LAST_JS_PROXY_TYPE);
-  DeoptimizeIf(le, instr->environment());
+  DeoptimizeIf(le, instr);
 
   Label use_cache, call_runtime;
   __ CheckEnumCache(object, null_value, x1, x2, x3, x4, &call_runtime);
@@ -2939,7 +2927,7 @@
   CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
 
   __ Ldr(x1, FieldMemOperand(object, HeapObject::kMapOffset));
-  DeoptimizeIfNotRoot(x1, Heap::kMetaMapRootIndex, instr->environment());
+  DeoptimizeIfNotRoot(x1, Heap::kMetaMapRootIndex, instr);
 
   __ Bind(&use_cache);
 }
@@ -2952,7 +2940,7 @@
   __ AssertString(input);
 
   // Assert that we can use a W register load to get the hash.
-  ASSERT((String::kHashShift + String::kArrayIndexValueBits) < kWRegSizeInBits);
+  DCHECK((String::kHashShift + String::kArrayIndexValueBits) < kWRegSizeInBits);
   __ Ldr(result.W(), FieldMemOperand(input, String::kHashFieldOffset));
   __ IndexFromHash(result, result);
 }
@@ -2977,7 +2965,7 @@
   Register temp = ToRegister32(instr->temp());
 
   // Assert that the cache status bits fit in a W register.
-  ASSERT(is_uint32(String::kContainsCachedArrayIndexMask));
+  DCHECK(is_uint32(String::kContainsCachedArrayIndexMask));
   __ Ldr(temp, FieldMemOperand(input, String::kHashFieldOffset));
   __ Tst(temp, String::kContainsCachedArrayIndexMask);
   EmitBranch(instr, eq);
@@ -3001,7 +2989,7 @@
   InstanceType from = instr->from();
   InstanceType to = instr->to();
   if (from == FIRST_TYPE) return to;
-  ASSERT((from == to) || (to == LAST_TYPE));
+  DCHECK((from == to) || (to == LAST_TYPE));
   return from;
 }
 
@@ -3034,7 +3022,7 @@
   Register result = ToRegister(instr->result());
   Register base = ToRegister(instr->base_object());
   if (instr->offset()->IsConstantOperand()) {
-    __ Add(result, base, ToOperand32I(instr->offset()));
+    __ Add(result, base, ToOperand32(instr->offset()));
   } else {
     __ Add(result, base, Operand(ToRegister32(instr->offset()), SXTW));
   }
@@ -3042,10 +3030,10 @@
 
 
 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
-  ASSERT(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->context()).is(cp));
   // Assert that the arguments are in the registers expected by InstanceofStub.
-  ASSERT(ToRegister(instr->left()).Is(InstanceofStub::left()));
-  ASSERT(ToRegister(instr->right()).Is(InstanceofStub::right()));
+  DCHECK(ToRegister(instr->left()).Is(InstanceofStub::left()));
+  DCHECK(ToRegister(instr->right()).Is(InstanceofStub::right()));
 
   InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
@@ -3084,10 +3072,10 @@
   Register map = x5;
 
   // This instruction is marked as call. We can clobber any register.
-  ASSERT(instr->IsMarkedAsCall());
+  DCHECK(instr->IsMarkedAsCall());
 
   // We must take into account that object is in x11.
-  ASSERT(object.Is(x11));
+  DCHECK(object.Is(x11));
   Register scratch = x10;
 
   // A Smi is not instance of anything.
@@ -3111,7 +3099,7 @@
     __ b(&cache_miss, ne);
     // The address of this instruction is computed relative to the map check
     // above, so check the size of the code generated.
-    ASSERT(masm()->InstructionsGeneratedSince(&map_check) == 4);
+    DCHECK(masm()->InstructionsGeneratedSince(&map_check) == 4);
     // Will be patched with the cached result.
     __ ldr(result, Immediate(factory()->the_hole_value()));
   }
@@ -3146,7 +3134,7 @@
 
 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
   Register result = ToRegister(instr->result());
-  ASSERT(result.Is(x0));  // InstanceofStub returns its result in x0.
+  DCHECK(result.Is(x0));  // InstanceofStub returns its result in x0.
   InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
   flags = static_cast<InstanceofStub::Flags>(
       flags | InstanceofStub::kArgsInRegisters);
@@ -3155,11 +3143,11 @@
   flags = static_cast<InstanceofStub::Flags>(
       flags | InstanceofStub::kCallSiteInlineCheck);
 
-  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+  PushSafepointRegistersScope scope(this);
   LoadContextFromDeferred(instr->context());
 
   // Prepare InstanceofStub arguments.
-  ASSERT(ToRegister(instr->value()).Is(InstanceofStub::left()));
+  DCHECK(ToRegister(instr->value()).Is(InstanceofStub::left()));
   __ LoadObject(InstanceofStub::right(), instr->function());
 
   InstanceofStub stub(isolate(), flags);
@@ -3188,10 +3176,10 @@
 
 
 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
-  ASSERT(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->context()).is(cp));
   // The function is required to be in x1.
-  ASSERT(ToRegister(instr->function()).is(x1));
-  ASSERT(instr->HasPointerMap());
+  DCHECK(ToRegister(instr->function()).is(x1));
+  DCHECK(instr->HasPointerMap());
 
   Handle<JSFunction> known_function = instr->hydrogen()->known_function();
   if (known_function.is_null()) {
@@ -3332,8 +3320,7 @@
   __ Ldr(result, ContextMemOperand(context, instr->slot_index()));
   if (instr->hydrogen()->RequiresHoleCheck()) {
     if (instr->hydrogen()->DeoptimizesOnHole()) {
-      DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex,
-                       instr->environment());
+      DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr);
     } else {
       Label not_the_hole;
       __ JumpIfNotRoot(result, Heap::kTheHoleValueRootIndex, &not_the_hole);
@@ -3349,23 +3336,12 @@
   Register result = ToRegister(instr->result());
   Register temp = ToRegister(instr->temp());
 
-  // Check that the function really is a function. Leaves map in the result
-  // register.
-  __ CompareObjectType(function, result, temp, JS_FUNCTION_TYPE);
-  DeoptimizeIf(ne, instr->environment());
-
-  // Make sure that the function has an instance prototype.
-  Label non_instance;
-  __ Ldrb(temp, FieldMemOperand(result, Map::kBitFieldOffset));
-  __ Tbnz(temp, Map::kHasNonInstancePrototype, &non_instance);
-
   // Get the prototype or initial map from the function.
   __ Ldr(result, FieldMemOperand(function,
                                  JSFunction::kPrototypeOrInitialMapOffset));
 
   // Check that the function has a prototype or an initial map.
-  DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex,
-                   instr->environment());
+  DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr);
 
   // If the function does not have an initial map, we're done.
   Label done;
@@ -3374,12 +3350,6 @@
 
   // Get the prototype from the initial map.
   __ Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
-  __ B(&done);
-
-  // Non-instance prototype: fetch prototype from constructor field in initial
-  // map.
-  __ Bind(&non_instance);
-  __ Ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
 
   // All done.
   __ Bind(&done);
@@ -3391,19 +3361,35 @@
   __ Mov(result, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
   __ Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
   if (instr->hydrogen()->RequiresHoleCheck()) {
-    DeoptimizeIfRoot(
-        result, Heap::kTheHoleValueRootIndex, instr->environment());
+    DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr);
   }
 }
 
 
+template <class T>
+void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
+  DCHECK(FLAG_vector_ics);
+  Register vector = ToRegister(instr->temp_vector());
+  DCHECK(vector.is(VectorLoadICDescriptor::VectorRegister()));
+  __ Mov(vector, instr->hydrogen()->feedback_vector());
+  // No need to allocate this register.
+  DCHECK(VectorLoadICDescriptor::SlotRegister().is(x0));
+  __ Mov(VectorLoadICDescriptor::SlotRegister(),
+         Smi::FromInt(instr->hydrogen()->slot()));
+}
+
+
 void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
-  ASSERT(ToRegister(instr->context()).is(cp));
-  ASSERT(ToRegister(instr->global_object()).Is(x0));
-  ASSERT(ToRegister(instr->result()).Is(x0));
-  __ Mov(x2, Operand(instr->name()));
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->global_object())
+             .is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->result()).Is(x0));
+  __ Mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
+  if (FLAG_vector_ics) {
+    EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
+  }
   ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
-  Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode);
+  Handle<Code> ic = CodeFactory::LoadIC(isolate(), mode).code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -3433,7 +3419,7 @@
     return MemOperand(base, key, SXTW, element_size_shift);
   }
 
-  ASSERT(!AreAliased(scratch, key));
+  DCHECK(!AreAliased(scratch, key));
   __ Add(scratch, base, base_offset);
   return MemOperand(scratch, key, SXTW, element_size_shift);
 }
@@ -3449,7 +3435,7 @@
   Register key = no_reg;
   int constant_key = 0;
   if (key_is_constant) {
-    ASSERT(instr->temp() == NULL);
+    DCHECK(instr->temp() == NULL);
     constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
     if (constant_key & 0xf0000000) {
       Abort(kArrayIndexConstantValueTooBig);
@@ -3506,7 +3492,7 @@
         if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
           // Deopt if value > 0x80000000.
           __ Tst(result, 0xFFFFFFFF80000000);
-          DeoptimizeIf(ne, instr->environment());
+          DeoptimizeIf(ne, instr);
         }
         break;
       case FLOAT32_ELEMENTS:
@@ -3535,7 +3521,8 @@
                                               ElementsKind elements_kind,
                                               Representation representation,
                                               int base_offset) {
-  STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) && (kSmiTag == 0));
+  STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
+  STATIC_ASSERT(kSmiTag == 0);
   int element_size_shift = ElementsKindToShiftSize(elements_kind);
 
   // Even though the HLoad/StoreKeyed instructions force the input
@@ -3545,9 +3532,8 @@
   if (key_is_tagged) {
     __ Add(base, elements, Operand::UntagSmiAndScale(key, element_size_shift));
     if (representation.IsInteger32()) {
-      ASSERT(elements_kind == FAST_SMI_ELEMENTS);
-      // Read or write only the most-significant 32 bits in the case of fast smi
-      // arrays.
+      DCHECK(elements_kind == FAST_SMI_ELEMENTS);
+      // Read or write only the smi payload in the case of fast smi arrays.
       return UntagSmiMemOperand(base, base_offset);
     } else {
       return MemOperand(base, base_offset);
@@ -3555,11 +3541,10 @@
   } else {
     // Sign extend key because it could be a 32-bit negative value or contain
     // garbage in the top 32-bits. The address computation happens in 64-bit.
-    ASSERT((element_size_shift >= 0) && (element_size_shift <= 4));
+    DCHECK((element_size_shift >= 0) && (element_size_shift <= 4));
     if (representation.IsInteger32()) {
-      ASSERT(elements_kind == FAST_SMI_ELEMENTS);
-      // Read or write only the most-significant 32 bits in the case of fast smi
-      // arrays.
+      DCHECK(elements_kind == FAST_SMI_ELEMENTS);
+      // Read or write only the smi payload in the case of fast smi arrays.
       __ Add(base, elements, Operand(key, SXTW, element_size_shift));
       return UntagSmiMemOperand(base, base_offset);
     } else {
@@ -3576,7 +3561,7 @@
   MemOperand mem_op;
 
   if (instr->key()->IsConstantOperand()) {
-    ASSERT(instr->hydrogen()->RequiresHoleCheck() ||
+    DCHECK(instr->hydrogen()->RequiresHoleCheck() ||
            (instr->temp() == NULL));
 
     int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
@@ -3604,7 +3589,7 @@
     STATIC_ASSERT(kHoleNanInt64 == 0x7fffffffffffffff);
     __ Ldr(scratch, mem_op);
     __ Cmn(scratch, 1);
-    DeoptimizeIf(vs, instr->environment());
+    DeoptimizeIf(vs, instr);
   }
 }
 
@@ -3616,14 +3601,14 @@
 
   Representation representation = instr->hydrogen()->representation();
   if (instr->key()->IsConstantOperand()) {
-    ASSERT(instr->temp() == NULL);
+    DCHECK(instr->temp() == NULL);
     LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
     int offset = instr->base_offset() +
         ToInteger32(const_operand) * kPointerSize;
     if (representation.IsInteger32()) {
-      ASSERT(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
-      STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) &&
-                    (kSmiTag == 0));
+      DCHECK(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
+      STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
+      STATIC_ASSERT(kSmiTag == 0);
       mem_op = UntagSmiMemOperand(elements, offset);
     } else {
       mem_op = MemOperand(elements, offset);
@@ -3642,24 +3627,26 @@
 
   if (instr->hydrogen()->RequiresHoleCheck()) {
     if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
-      DeoptimizeIfNotSmi(result, instr->environment());
+      DeoptimizeIfNotSmi(result, instr);
     } else {
-      DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex,
-                       instr->environment());
+      DeoptimizeIfRoot(result, Heap::kTheHoleValueRootIndex, instr);
     }
   }
 }
 
 
 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
-  ASSERT(ToRegister(instr->context()).is(cp));
-  ASSERT(ToRegister(instr->object()).Is(x1));
-  ASSERT(ToRegister(instr->key()).Is(x0));
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
+  if (FLAG_vector_ics) {
+    EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
+  }
 
-  Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+  Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 
-  ASSERT(ToRegister(instr->result()).Is(x0));
+  DCHECK(ToRegister(instr->result()).Is(x0));
 }
 
 
@@ -3693,7 +3680,8 @@
   if (access.representation().IsSmi() &&
       instr->hydrogen()->representation().IsInteger32()) {
     // Read int value directly from upper half of the smi.
-    STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0);
+    STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
+    STATIC_ASSERT(kSmiTag == 0);
     __ Load(result, UntagSmiFieldMemOperand(source, offset),
             Representation::Integer32());
   } else {
@@ -3703,15 +3691,18 @@
 
 
 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
-  ASSERT(ToRegister(instr->context()).is(cp));
-  // LoadIC expects x2 to hold the name, and x0 to hold the receiver.
-  ASSERT(ToRegister(instr->object()).is(x0));
-  __ Mov(x2, Operand(instr->name()));
+  DCHECK(ToRegister(instr->context()).is(cp));
+  // LoadIC expects name and receiver in registers.
+  DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
+  __ Mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
+  if (FLAG_vector_ics) {
+    EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
+  }
 
-  Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
+  Handle<Code> ic = CodeFactory::LoadIC(isolate(), NOT_CONTEXTUAL).code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 
-  ASSERT(ToRegister(instr->result()).is(x0));
+  DCHECK(ToRegister(instr->result()).is(x0));
 }
 
 
@@ -3740,7 +3731,7 @@
     Register result = r.IsSmi() ? ToRegister(instr->result())
                                 : ToRegister32(instr->result());
     __ Abs(result, input);
-    DeoptimizeIf(vs, instr->environment());
+    DeoptimizeIf(vs, instr);
   }
 }
 
@@ -3757,8 +3748,8 @@
   //  - The (smi) input -0x80000000, produces +0x80000000, which does not fit
   //    a smi. In this case, the inline code sets the result and jumps directly
   //    to the allocation_entry label.
-  ASSERT(instr->context() != NULL);
-  ASSERT(ToRegister(instr->context()).is(cp));
+  DCHECK(instr->context() != NULL);
+  DCHECK(ToRegister(instr->context()).is(cp));
   Register input = ToRegister(instr->value());
   Register temp1 = ToRegister(instr->temp1());
   Register temp2 = ToRegister(instr->temp2());
@@ -3768,9 +3759,7 @@
   Label runtime_allocation;
 
   // Deoptimize if the input is not a HeapNumber.
-  __ Ldr(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
-  DeoptimizeIfNotRoot(temp1, Heap::kHeapNumberMapRootIndex,
-                      instr->environment());
+  DeoptimizeIfNotHeapNumber(input, instr);
 
   // If the argument is positive, we can return it as-is, without any need to
   // allocate a new HeapNumber for the result. We have to do this in integer
@@ -3804,8 +3793,8 @@
     __ Bind(&result_ok);
   }
 
-  { PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
-    CallRuntimeFromDeferred(Runtime::kHiddenAllocateHeapNumber, 0, instr,
+  { PushSafepointRegistersScope scope(this);
+    CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
                             instr->context());
     __ StoreToSafepointRegisterSlot(x0, result);
   }
@@ -3832,12 +3821,12 @@
 
   // TODO(jbramley): The early-exit mechanism would skip the new frame handling
   // in GenerateDeferredCode. Tidy this up.
-  ASSERT(!NeedsDeferredFrame());
+  DCHECK(!NeedsDeferredFrame());
 
   DeferredMathAbsTagged* deferred =
       new(zone()) DeferredMathAbsTagged(this, instr);
 
-  ASSERT(instr->hydrogen()->value()->representation().IsTagged() ||
+  DCHECK(instr->hydrogen()->value()->representation().IsTagged() ||
          instr->hydrogen()->value()->representation().IsSmi());
   Register input = ToRegister(instr->value());
   Register result_bits = ToRegister(instr->temp3());
@@ -3894,7 +3883,7 @@
   Register result = ToRegister(instr->result());
 
   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
-    DeoptimizeIfMinusZero(input, instr->environment());
+    DeoptimizeIfMinusZero(input, instr);
   }
 
   __ Fcvtms(result, input);
@@ -3904,7 +3893,7 @@
   __ Cmp(result, Operand(result, SXTW));
   //  - The input was not NaN.
   __ Fccmp(input, input, NoFlag, eq);
-  DeoptimizeIf(ne, instr->environment());
+  DeoptimizeIf(ne, instr);
 }
 
 
@@ -3930,13 +3919,13 @@
   // If the divisor is negative, we have to negate and handle edge cases.
   __ Negs(result, dividend);
   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
-    DeoptimizeIf(eq, instr->environment());
+    DeoptimizeIf(eq, instr);
   }
 
   // Dividing by -1 is basically negation, unless we overflow.
   if (divisor == -1) {
     if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
-      DeoptimizeIf(vs, instr->environment());
+      DeoptimizeIf(vs, instr);
     }
     return;
   }
@@ -3956,17 +3945,17 @@
   Register dividend = ToRegister32(instr->dividend());
   int32_t divisor = instr->divisor();
   Register result = ToRegister32(instr->result());
-  ASSERT(!AreAliased(dividend, result));
+  DCHECK(!AreAliased(dividend, result));
 
   if (divisor == 0) {
-    Deoptimize(instr->environment());
+    Deoptimize(instr);
     return;
   }
 
   // Check for (0 / -x) that will produce negative zero.
   HMathFloorOfDiv* hdiv = instr->hydrogen();
   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
-    DeoptimizeIfZero(dividend, instr->environment());
+    DeoptimizeIfZero(dividend, instr);
   }
 
   // Easy case: We need no dynamic check for the dividend and the flooring
@@ -3981,7 +3970,7 @@
   // In the general case we may need to adjust before and after the truncating
   // division to get a flooring division.
   Register temp = ToRegister32(instr->temp());
-  ASSERT(!AreAliased(temp, dividend, result));
+  DCHECK(!AreAliased(temp, dividend, result));
   Label needs_adjustment, done;
   __ Cmp(dividend, 0);
   __ B(divisor > 0 ? lt : gt, &needs_adjustment);
@@ -4009,14 +3998,14 @@
   __ Sdiv(result, dividend, divisor);
 
   // Check for x / 0.
-  DeoptimizeIfZero(divisor, instr->environment());
+  DeoptimizeIfZero(divisor, instr);
 
   // Check for (kMinInt / -1).
   if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
     // The V flag will be set iff dividend == kMinInt.
     __ Cmp(dividend, 1);
     __ Ccmp(divisor, -1, NoFlag, vs);
-    DeoptimizeIf(eq, instr->environment());
+    DeoptimizeIf(eq, instr);
   }
 
   // Check for (0 / -x) that will produce negative zero.
@@ -4026,7 +4015,7 @@
     // "divisor" can't be null because the code would have already been
     // deoptimized. The Z flag is set only if (divisor < 0) and (dividend == 0).
     // In this case we need to deoptimize to produce a -0.
-    DeoptimizeIf(eq, instr->environment());
+    DeoptimizeIf(eq, instr);
   }
 
   Label done;
@@ -4044,11 +4033,11 @@
 
 
 void LCodeGen::DoMathLog(LMathLog* instr) {
-  ASSERT(instr->IsMarkedAsCall());
-  ASSERT(ToDoubleRegister(instr->value()).is(d0));
+  DCHECK(instr->IsMarkedAsCall());
+  DCHECK(ToDoubleRegister(instr->value()).is(d0));
   __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
                    0, 1);
-  ASSERT(ToDoubleRegister(instr->result()).Is(d0));
+  DCHECK(ToDoubleRegister(instr->result()).Is(d0));
 }
 
 
@@ -4087,35 +4076,35 @@
   Representation exponent_type = instr->hydrogen()->right()->representation();
   // Having marked this as a call, we can use any registers.
   // Just make sure that the input/output registers are the expected ones.
-  ASSERT(!instr->right()->IsDoubleRegister() ||
+  Register tagged_exponent = MathPowTaggedDescriptor::exponent();
+  Register integer_exponent = MathPowIntegerDescriptor::exponent();
+  DCHECK(!instr->right()->IsDoubleRegister() ||
          ToDoubleRegister(instr->right()).is(d1));
-  ASSERT(exponent_type.IsInteger32() || !instr->right()->IsRegister() ||
-         ToRegister(instr->right()).is(x11));
-  ASSERT(!exponent_type.IsInteger32() || ToRegister(instr->right()).is(x12));
-  ASSERT(ToDoubleRegister(instr->left()).is(d0));
-  ASSERT(ToDoubleRegister(instr->result()).is(d0));
+  DCHECK(exponent_type.IsInteger32() || !instr->right()->IsRegister() ||
+         ToRegister(instr->right()).is(tagged_exponent));
+  DCHECK(!exponent_type.IsInteger32() ||
+         ToRegister(instr->right()).is(integer_exponent));
+  DCHECK(ToDoubleRegister(instr->left()).is(d0));
+  DCHECK(ToDoubleRegister(instr->result()).is(d0));
 
   if (exponent_type.IsSmi()) {
     MathPowStub stub(isolate(), MathPowStub::TAGGED);
     __ CallStub(&stub);
   } else if (exponent_type.IsTagged()) {
     Label no_deopt;
-    __ JumpIfSmi(x11, &no_deopt);
-    __ Ldr(x0, FieldMemOperand(x11, HeapObject::kMapOffset));
-    DeoptimizeIfNotRoot(x0, Heap::kHeapNumberMapRootIndex,
-                        instr->environment());
+    __ JumpIfSmi(tagged_exponent, &no_deopt);
+    DeoptimizeIfNotHeapNumber(tagged_exponent, instr);
     __ Bind(&no_deopt);
     MathPowStub stub(isolate(), MathPowStub::TAGGED);
     __ CallStub(&stub);
   } else if (exponent_type.IsInteger32()) {
     // Ensure integer exponent has no garbage in top 32-bits, as MathPowStub
     // supports large integer exponents.
-    Register exponent = ToRegister(instr->right());
-    __ Sxtw(exponent, exponent);
+    __ Sxtw(integer_exponent, integer_exponent);
     MathPowStub stub(isolate(), MathPowStub::INTEGER);
     __ CallStub(&stub);
   } else {
-    ASSERT(exponent_type.IsDouble());
+    DCHECK(exponent_type.IsDouble());
     MathPowStub stub(isolate(), MathPowStub::DOUBLE);
     __ CallStub(&stub);
   }
@@ -4127,7 +4116,7 @@
   DoubleRegister result = ToDoubleRegister(instr->result());
   DoubleRegister scratch_d = double_scratch();
 
-  ASSERT(!AreAliased(input, result, scratch_d));
+  DCHECK(!AreAliased(input, result, scratch_d));
 
   Label done;
 
@@ -4185,18 +4174,18 @@
 
   // Deoptimize if the result > 1, as it must be larger than 32 bits.
   __ Cmp(result, 1);
-  DeoptimizeIf(hi, instr->environment());
+  DeoptimizeIf(hi, instr);
 
   // Deoptimize for negative inputs, which at this point are only numbers in
   // the range [-0.5, -0.0]
   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
     __ Fmov(result, input);
-    DeoptimizeIfNegative(result, instr->environment());
+    DeoptimizeIfNegative(result, instr);
   }
 
   // Deoptimize if the input was NaN.
   __ Fcmp(input, dot_five);
-  DeoptimizeIf(vs, instr->environment());
+  DeoptimizeIf(vs, instr);
 
   // Now, the only unhandled inputs are in the range [0.0, 1.5[ (or [-0.5, 1.5[
   // if we didn't generate a -0.0 bailout). If input >= 0.5 then return 1,
@@ -4206,6 +4195,14 @@
 }
 
 
+void LCodeGen::DoMathFround(LMathFround* instr) {
+  DoubleRegister input = ToDoubleRegister(instr->value());
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  __ Fcvt(result.S(), input);
+  __ Fcvt(result, result.S());
+}
+
+
 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
   DoubleRegister input = ToDoubleRegister(instr->value());
   DoubleRegister result = ToDoubleRegister(instr->result());
@@ -4218,7 +4215,7 @@
   if (instr->hydrogen()->representation().IsInteger32()) {
     Register result = ToRegister32(instr->result());
     Register left = ToRegister32(instr->left());
-    Operand right = ToOperand32I(instr->right());
+    Operand right = ToOperand32(instr->right());
 
     __ Cmp(left, right);
     __ Csel(result, left, right, (op == HMathMinMax::kMathMax) ? ge : le);
@@ -4230,7 +4227,7 @@
     __ Cmp(left, right);
     __ Csel(result, left, right, (op == HMathMinMax::kMathMax) ? ge : le);
   } else {
-    ASSERT(instr->hydrogen()->representation().IsDouble());
+    DCHECK(instr->hydrogen()->representation().IsDouble());
     DoubleRegister result = ToDoubleRegister(instr->result());
     DoubleRegister left = ToDoubleRegister(instr->left());
     DoubleRegister right = ToDoubleRegister(instr->right());
@@ -4238,7 +4235,7 @@
     if (op == HMathMinMax::kMathMax) {
       __ Fmax(result, left, right);
     } else {
-      ASSERT(op == HMathMinMax::kMathMin);
+      DCHECK(op == HMathMinMax::kMathMin);
       __ Fmin(result, left, right);
     }
   }
@@ -4248,7 +4245,7 @@
 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
   Register dividend = ToRegister32(instr->dividend());
   int32_t divisor = instr->divisor();
-  ASSERT(dividend.is(ToRegister32(instr->result())));
+  DCHECK(dividend.is(ToRegister32(instr->result())));
 
   // Theoretically, a variation of the branch-free code for integer division by
   // a power of 2 (calculating the remainder via an additional multiplication
@@ -4266,7 +4263,7 @@
     __ And(dividend, dividend, mask);
     __ Negs(dividend, dividend);
     if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
-      DeoptimizeIf(eq, instr->environment());
+      DeoptimizeIf(eq, instr);
     }
     __ B(&done);
   }
@@ -4282,10 +4279,10 @@
   int32_t divisor = instr->divisor();
   Register result = ToRegister32(instr->result());
   Register temp = ToRegister32(instr->temp());
-  ASSERT(!AreAliased(dividend, result, temp));
+  DCHECK(!AreAliased(dividend, result, temp));
 
   if (divisor == 0) {
-    Deoptimize(instr->environment());
+    Deoptimize(instr);
     return;
   }
 
@@ -4299,7 +4296,7 @@
   if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
     Label remainder_not_zero;
     __ Cbnz(result, &remainder_not_zero);
-    DeoptimizeIfNegative(dividend, instr->environment());
+    DeoptimizeIfNegative(dividend, instr);
     __ bind(&remainder_not_zero);
   }
 }
@@ -4314,26 +4311,26 @@
   // modulo = dividend - quotient * divisor
   __ Sdiv(result, dividend, divisor);
   if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
-    DeoptimizeIfZero(divisor, instr->environment());
+    DeoptimizeIfZero(divisor, instr);
   }
   __ Msub(result, result, divisor, dividend);
   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
     __ Cbnz(result, &done);
-    DeoptimizeIfNegative(dividend, instr->environment());
+    DeoptimizeIfNegative(dividend, instr);
   }
   __ Bind(&done);
 }
 
 
 void LCodeGen::DoMulConstIS(LMulConstIS* instr) {
-  ASSERT(instr->hydrogen()->representation().IsSmiOrInteger32());
+  DCHECK(instr->hydrogen()->representation().IsSmiOrInteger32());
   bool is_smi = instr->hydrogen()->representation().IsSmi();
   Register result =
       is_smi ? ToRegister(instr->result()) : ToRegister32(instr->result());
   Register left =
       is_smi ? ToRegister(instr->left()) : ToRegister32(instr->left()) ;
   int32_t right = ToInteger32(instr->right());
-  ASSERT((right > -kMaxInt) || (right < kMaxInt));
+  DCHECK((right > -kMaxInt) || (right < kMaxInt));
 
   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
   bool bailout_on_minus_zero =
@@ -4342,10 +4339,10 @@
   if (bailout_on_minus_zero) {
     if (right < 0) {
       // The result is -0 if right is negative and left is zero.
-      DeoptimizeIfZero(left, instr->environment());
+      DeoptimizeIfZero(left, instr);
     } else if (right == 0) {
       // The result is -0 if the right is zero and the left is negative.
-      DeoptimizeIfNegative(left, instr->environment());
+      DeoptimizeIfNegative(left, instr);
     }
   }
 
@@ -4355,7 +4352,7 @@
       if (can_overflow) {
         // Only 0x80000000 can overflow here.
         __ Negs(result, left);
-        DeoptimizeIf(vs, instr->environment());
+        DeoptimizeIf(vs, instr);
       } else {
         __ Neg(result, left);
       }
@@ -4371,7 +4368,7 @@
     case 2:
       if (can_overflow) {
         __ Adds(result, left, left);
-        DeoptimizeIf(vs, instr->environment());
+        DeoptimizeIf(vs, instr);
       } else {
         __ Add(result, left, left);
       }
@@ -4382,15 +4379,15 @@
       // can be done efficiently with shifted operands.
       int32_t right_abs = Abs(right);
 
-      if (IsPowerOf2(right_abs)) {
+      if (base::bits::IsPowerOfTwo32(right_abs)) {
         int right_log2 = WhichPowerOf2(right_abs);
 
         if (can_overflow) {
           Register scratch = result;
-          ASSERT(!AreAliased(scratch, left));
+          DCHECK(!AreAliased(scratch, left));
           __ Cls(scratch, left);
           __ Cmp(scratch, right_log2);
-          DeoptimizeIf(lt, instr->environment());
+          DeoptimizeIf(lt, instr);
         }
 
         if (right >= 0) {
@@ -4400,7 +4397,7 @@
           // result = -left << log2(-right)
           if (can_overflow) {
             __ Negs(result, Operand(left, LSL, right_log2));
-            DeoptimizeIf(vs, instr->environment());
+            DeoptimizeIf(vs, instr);
           } else {
             __ Neg(result, Operand(left, LSL, right_log2));
           }
@@ -4412,13 +4409,13 @@
       // For the following cases, we could perform a conservative overflow check
       // with CLS as above. However the few cycles saved are likely not worth
       // the risk of deoptimizing more often than required.
-      ASSERT(!can_overflow);
+      DCHECK(!can_overflow);
 
       if (right >= 0) {
-        if (IsPowerOf2(right - 1)) {
+        if (base::bits::IsPowerOfTwo32(right - 1)) {
           // result = left + left << log2(right - 1)
           __ Add(result, left, Operand(left, LSL, WhichPowerOf2(right - 1)));
-        } else if (IsPowerOf2(right + 1)) {
+        } else if (base::bits::IsPowerOfTwo32(right + 1)) {
           // result = -left + left << log2(right + 1)
           __ Sub(result, left, Operand(left, LSL, WhichPowerOf2(right + 1)));
           __ Neg(result, result);
@@ -4426,10 +4423,10 @@
           UNREACHABLE();
         }
       } else {
-        if (IsPowerOf2(-right + 1)) {
+        if (base::bits::IsPowerOfTwo32(-right + 1)) {
           // result = left - left << log2(-right + 1)
           __ Sub(result, left, Operand(left, LSL, WhichPowerOf2(-right + 1)));
-        } else if (IsPowerOf2(-right - 1)) {
+        } else if (base::bits::IsPowerOfTwo32(-right - 1)) {
           // result = -left - left << log2(-right - 1)
           __ Add(result, left, Operand(left, LSL, WhichPowerOf2(-right - 1)));
           __ Neg(result, result);
@@ -4458,13 +4455,13 @@
     //  - If so (eq), set N (mi) if left + right is negative.
     //  - Otherwise, clear N.
     __ Ccmn(left, right, NoFlag, eq);
-    DeoptimizeIf(mi, instr->environment());
+    DeoptimizeIf(mi, instr);
   }
 
   if (can_overflow) {
     __ Smull(result.X(), left, right);
     __ Cmp(result.X(), Operand(result, SXTW));
-    DeoptimizeIf(ne, instr->environment());
+    DeoptimizeIf(ne, instr);
   } else {
     __ Mul(result, left, right);
   }
@@ -4488,7 +4485,7 @@
     //  - If so (eq), set N (mi) if left + right is negative.
     //  - Otherwise, clear N.
     __ Ccmn(left, right, NoFlag, eq);
-    DeoptimizeIf(mi, instr->environment());
+    DeoptimizeIf(mi, instr);
   }
 
   STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0));
@@ -4496,7 +4493,7 @@
     __ Smulh(result, left, right);
     __ Cmp(result, Operand(result.W(), SXTW));
     __ SmiTag(result);
-    DeoptimizeIf(ne, instr->environment());
+    DeoptimizeIf(ne, instr);
   } else {
     if (AreAliased(result, left, right)) {
       // All three registers are the same: half untag the input and then
@@ -4510,7 +4507,7 @@
       __ SmiUntag(result, left);
       __ Mul(result, result, right);
     } else {
-      ASSERT(!left.Is(result));
+      DCHECK(!left.Is(result));
       // Registers result and right alias, left is distinct, or all registers
       // are distinct: untag right into result, and then multiply by left,
       // giving a tagged result.
@@ -4528,14 +4525,14 @@
   Register result = ToRegister(instr->result());
   __ Mov(result, 0);
 
-  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+  PushSafepointRegistersScope scope(this);
   // NumberTagU and NumberTagD use the context from the frame, rather than
   // the environment's HContext or HInlinedContext value.
-  // They only call Runtime::kHiddenAllocateHeapNumber.
+  // They only call Runtime::kAllocateHeapNumber.
   // The corresponding HChange instructions are added in a phase that does
   // not have easy access to the local context.
   __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-  __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
+  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
   RecordSafepointWithRegisters(
       instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
   __ StoreToSafepointRegisterSlot(x0, result);
@@ -4593,15 +4590,15 @@
   __ Mov(dst, 0);
   {
     // Preserve the value of all registers.
-    PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+    PushSafepointRegistersScope scope(this);
 
     // NumberTagU and NumberTagD use the context from the frame, rather than
     // the environment's HContext or HInlinedContext value.
-    // They only call Runtime::kHiddenAllocateHeapNumber.
+    // They only call Runtime::kAllocateHeapNumber.
     // The corresponding HChange instructions are added in a phase that does
     // not have easy access to the local context.
     __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-    __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
+    __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
     RecordSafepointWithRegisters(
       instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
     __ StoreToSafepointRegisterSlot(x0, dst);
@@ -4663,26 +4660,22 @@
     Label convert_undefined;
 
     // Heap number map check.
-    __ Ldr(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
     if (can_convert_undefined_to_nan) {
-      __ JumpIfNotRoot(scratch, Heap::kHeapNumberMapRootIndex,
-                       &convert_undefined);
+      __ JumpIfNotHeapNumber(input, &convert_undefined);
     } else {
-      DeoptimizeIfNotRoot(scratch, Heap::kHeapNumberMapRootIndex,
-                          instr->environment());
+      DeoptimizeIfNotHeapNumber(input, instr);
     }
 
     // Load heap number.
     __ Ldr(result, FieldMemOperand(input, HeapNumber::kValueOffset));
     if (instr->hydrogen()->deoptimize_on_minus_zero()) {
-      DeoptimizeIfMinusZero(result, instr->environment());
+      DeoptimizeIfMinusZero(result, instr);
     }
     __ B(&done);
 
     if (can_convert_undefined_to_nan) {
       __ Bind(&convert_undefined);
-      DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex,
-                          instr->environment());
+      DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr);
 
       __ LoadRoot(scratch, Heap::kNanValueRootIndex);
       __ Ldr(result, FieldMemOperand(scratch, HeapNumber::kValueOffset));
@@ -4690,7 +4683,7 @@
     }
 
   } else {
-    ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
+    DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
     // Fall through to load_smi.
   }
 
@@ -4710,7 +4703,7 @@
 
   // If the environment were already registered, we would have no way of
   // backpatching it with the spill slot operands.
-  ASSERT(!environment->HasBeenRegistered());
+  DCHECK(!environment->HasBeenRegistered());
   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
 
   GenerateOsrPrologue();
@@ -4849,7 +4842,7 @@
   Register temp = ToRegister(instr->temp());
 
   if (FLAG_debug_code) {
-    ASSERT(ToRegister(instr->context()).is(cp));
+    DCHECK(ToRegister(instr->context()).is(cp));
     Register index = ToRegister(instr->index());
     static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
     static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
@@ -4875,7 +4868,7 @@
   Register output = ToRegister(instr->result());
   if (hchange->CheckFlag(HValue::kCanOverflow) &&
       hchange->value()->CheckFlag(HValue::kUint32)) {
-    DeoptimizeIfNegative(input.W(), instr->environment());
+    DeoptimizeIfNegative(input.W(), instr);
   }
   __ SmiTag(output, input);
 }
@@ -4887,7 +4880,7 @@
   Label done, untag;
 
   if (instr->needs_check()) {
-    DeoptimizeIfNotSmi(input, instr->environment());
+    DeoptimizeIfNotSmi(input, instr);
   }
 
   __ Bind(&untag);
@@ -4908,22 +4901,21 @@
       case Token::SAR: __ Asr(result, left, right); break;
       case Token::SHL: __ Lsl(result, left, right); break;
       case Token::SHR:
-        if (instr->can_deopt()) {
-          Label right_not_zero;
-          __ Cbnz(right, &right_not_zero);
-          DeoptimizeIfNegative(left, instr->environment());
-          __ Bind(&right_not_zero);
-        }
         __ Lsr(result, left, right);
+        if (instr->can_deopt()) {
+          // If `left >>> right` >= 0x80000000, the result is not representable
+          // in a signed 32-bit smi.
+          DeoptimizeIfNegative(result, instr);
+        }
         break;
       default: UNREACHABLE();
     }
   } else {
-    ASSERT(right_op->IsConstantOperand());
+    DCHECK(right_op->IsConstantOperand());
     int shift_count = JSShiftAmountFromLConstant(right_op);
     if (shift_count == 0) {
       if ((instr->op() == Token::SHR) && instr->can_deopt()) {
-        DeoptimizeIfNegative(left, instr->environment());
+        DeoptimizeIfNegative(left, instr);
       }
       __ Mov(result, left, kDiscardForSameWReg);
     } else {
@@ -4944,49 +4936,49 @@
   Register left = ToRegister(instr->left());
   Register result = ToRegister(instr->result());
 
-  // Only ROR by register needs a temp.
-  ASSERT(((instr->op() == Token::ROR) && right_op->IsRegister()) ||
-         (instr->temp() == NULL));
-
   if (right_op->IsRegister()) {
     Register right = ToRegister(instr->right());
+
+    // JavaScript shifts only look at the bottom 5 bits of the 'right' operand.
+    // Since we're handling smis in X registers, we have to extract these bits
+    // explicitly.
+    __ Ubfx(result, right, kSmiShift, 5);
+
     switch (instr->op()) {
       case Token::ROR: {
-        Register temp = ToRegister(instr->temp());
-        __ Ubfx(temp, right, kSmiShift, 5);
-        __ SmiUntag(result, left);
-        __ Ror(result.W(), result.W(), temp.W());
+        // This is the only case that needs a scratch register. To keep things
+        // simple for the other cases, borrow a MacroAssembler scratch register.
+        UseScratchRegisterScope temps(masm());
+        Register temp = temps.AcquireW();
+        __ SmiUntag(temp, left);
+        __ Ror(result.W(), temp.W(), result.W());
         __ SmiTag(result);
         break;
       }
       case Token::SAR:
-        __ Ubfx(result, right, kSmiShift, 5);
         __ Asr(result, left, result);
         __ Bic(result, result, kSmiShiftMask);
         break;
       case Token::SHL:
-        __ Ubfx(result, right, kSmiShift, 5);
         __ Lsl(result, left, result);
         break;
       case Token::SHR:
-        if (instr->can_deopt()) {
-          Label right_not_zero;
-          __ Cbnz(right, &right_not_zero);
-          DeoptimizeIfNegative(left, instr->environment());
-          __ Bind(&right_not_zero);
-        }
-        __ Ubfx(result, right, kSmiShift, 5);
         __ Lsr(result, left, result);
         __ Bic(result, result, kSmiShiftMask);
+        if (instr->can_deopt()) {
+          // If `left >>> right` >= 0x80000000, the result is not representable
+          // in a signed 32-bit smi.
+          DeoptimizeIfNegative(result, instr);
+        }
         break;
       default: UNREACHABLE();
     }
   } else {
-    ASSERT(right_op->IsConstantOperand());
+    DCHECK(right_op->IsConstantOperand());
     int shift_count = JSShiftAmountFromLConstant(right_op);
     if (shift_count == 0) {
       if ((instr->op() == Token::SHR) && instr->can_deopt()) {
-        DeoptimizeIfNegative(left, instr->environment());
+        DeoptimizeIfNegative(left, instr);
       }
       __ Mov(result, left);
     } else {
@@ -5020,10 +5012,10 @@
 
 
 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
-  ASSERT(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->context()).is(cp));
   Register scratch1 = x5;
   Register scratch2 = x6;
-  ASSERT(instr->IsMarkedAsCall());
+  DCHECK(instr->IsMarkedAsCall());
 
   ASM_UNIMPLEMENTED_BREAK("DoDeclareGlobals");
   // TODO(all): if Mov could handle object in new space then it could be used
@@ -5031,17 +5023,17 @@
   __ LoadHeapObject(scratch1, instr->hydrogen()->pairs());
   __ Mov(scratch2, Smi::FromInt(instr->hydrogen()->flags()));
   __ Push(cp, scratch1, scratch2);  // The context is the first argument.
-  CallRuntime(Runtime::kHiddenDeclareGlobals, 3, instr);
+  CallRuntime(Runtime::kDeclareGlobals, 3, instr);
 }
 
 
 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
-  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+  PushSafepointRegistersScope scope(this);
   LoadContextFromDeferred(instr->context());
-  __ CallRuntimeSaveDoubles(Runtime::kHiddenStackGuard);
+  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
   RecordSafepointWithLazyDeopt(
       instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
-  ASSERT(instr->HasEnvironment());
+  DCHECK(instr->HasEnvironment());
   LEnvironment* env = instr->environment();
   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
 }
@@ -5058,7 +5050,7 @@
     LStackCheck* instr_;
   };
 
-  ASSERT(instr->HasEnvironment());
+  DCHECK(instr->HasEnvironment());
   LEnvironment* env = instr->environment();
   // There is no LLazyBailout instruction for stack-checks. We have to
   // prepare for lazy deoptimization explicitly here.
@@ -5070,14 +5062,14 @@
 
     PredictableCodeSizeScope predictable(masm_,
                                          Assembler::kCallSizeWithRelocation);
-    ASSERT(instr->context()->IsRegister());
-    ASSERT(ToRegister(instr->context()).is(cp));
+    DCHECK(instr->context()->IsRegister());
+    DCHECK(ToRegister(instr->context()).is(cp));
     CallCode(isolate()->builtins()->StackCheck(),
              RelocInfo::CODE_TARGET,
              instr);
     __ Bind(&done);
   } else {
-    ASSERT(instr->hydrogen()->is_backwards_branch());
+    DCHECK(instr->hydrogen()->is_backwards_branch());
     // Perform stack overflow check if this goto needs it before jumping.
     DeferredStackCheck* deferred_stack_check =
         new(zone()) DeferredStackCheck(this, instr);
@@ -5115,8 +5107,7 @@
   if (instr->hydrogen()->RequiresHoleCheck()) {
     __ Ldr(scratch, target);
     if (instr->hydrogen()->DeoptimizesOnHole()) {
-      DeoptimizeIfRoot(scratch, Heap::kTheHoleValueRootIndex,
-                       instr->environment());
+      DeoptimizeIfRoot(scratch, Heap::kTheHoleValueRootIndex, instr);
     } else {
       __ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, &skip_assignment);
     }
@@ -5154,8 +5145,7 @@
   if (instr->hydrogen()->RequiresHoleCheck()) {
     Register payload = ToRegister(instr->temp2());
     __ Ldr(payload, FieldMemOperand(cell, Cell::kValueOffset));
-    DeoptimizeIfRoot(
-        payload, Heap::kTheHoleValueRootIndex, instr->environment());
+    DeoptimizeIfRoot(payload, Heap::kTheHoleValueRootIndex, instr);
   }
 
   // Store the value.
@@ -5174,7 +5164,7 @@
   bool key_is_constant = instr->key()->IsConstantOperand();
   int constant_key = 0;
   if (key_is_constant) {
-    ASSERT(instr->temp() == NULL);
+    DCHECK(instr->temp() == NULL);
     constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
     if (constant_key & 0xf0000000) {
       Abort(kArrayIndexConstantValueTooBig);
@@ -5294,10 +5284,10 @@
         ToInteger32(const_operand) * kPointerSize;
     store_base = elements;
     if (representation.IsInteger32()) {
-      ASSERT(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
-      ASSERT(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
-      STATIC_ASSERT((kSmiValueSize == 32) && (kSmiShift == 32) &&
-                    (kSmiTag == 0));
+      DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
+      DCHECK(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
+      STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
+      STATIC_ASSERT(kSmiTag == 0);
       mem_op = UntagSmiMemOperand(store_base, offset);
     } else {
       mem_op = MemOperand(store_base, offset);
@@ -5315,7 +5305,7 @@
   __ Store(value, mem_op, representation);
 
   if (instr->hydrogen()->NeedsWriteBarrier()) {
-    ASSERT(representation.IsTagged());
+    DCHECK(representation.IsTagged());
     // This assignment may cause element_addr to alias store_base.
     Register element_addr = scratch;
     SmiCheck check_needed =
@@ -5331,14 +5321,13 @@
 
 
 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
-  ASSERT(ToRegister(instr->context()).is(cp));
-  ASSERT(ToRegister(instr->object()).Is(x2));
-  ASSERT(ToRegister(instr->key()).Is(x1));
-  ASSERT(ToRegister(instr->value()).Is(x0));
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
+  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
 
-  Handle<Code> ic = instr->strict_mode() == STRICT
-      ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
-      : isolate()->builtins()->KeyedStoreIC_Initialize();
+  Handle<Code> ic =
+      CodeFactory::KeyedStoreIC(isolate(), instr->strict_mode()).code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -5351,8 +5340,8 @@
   int offset = access.offset();
 
   if (access.IsExternalMemory()) {
-    ASSERT(!instr->hydrogen()->has_transition());
-    ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+    DCHECK(!instr->hydrogen()->has_transition());
+    DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
     Register value = ToRegister(instr->value());
     __ Store(value, MemOperand(object, offset), representation);
     return;
@@ -5361,9 +5350,9 @@
   __ AssertNotSmi(object);
 
   if (representation.IsDouble()) {
-    ASSERT(access.IsInobject());
-    ASSERT(!instr->hydrogen()->has_transition());
-    ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+    DCHECK(access.IsInobject());
+    DCHECK(!instr->hydrogen()->has_transition());
+    DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
     FPRegister value = ToDoubleRegister(instr->value());
     __ Str(value, FieldMemOperand(object, offset));
     return;
@@ -5371,7 +5360,7 @@
 
   Register value = ToRegister(instr->value());
 
-  ASSERT(!representation.IsSmi() ||
+  DCHECK(!representation.IsSmi() ||
          !instr->value()->IsConstantOperand() ||
          IsInteger32Constant(LConstantOperand::cast(instr->value())));
 
@@ -5404,7 +5393,7 @@
 
   if (representation.IsSmi() &&
      instr->hydrogen()->value()->representation().IsInteger32()) {
-    ASSERT(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
+    DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
 #ifdef DEBUG
     Register temp0 = ToRegister(instr->temp0());
     __ Ldr(temp0, FieldMemOperand(destination, offset));
@@ -5412,11 +5401,12 @@
     // If destination aliased temp0, restore it to the address calculated
     // earlier.
     if (destination.Is(temp0)) {
-      ASSERT(!access.IsInobject());
+      DCHECK(!access.IsInobject());
       __ Ldr(destination, FieldMemOperand(object, JSObject::kPropertiesOffset));
     }
 #endif
-    STATIC_ASSERT(kSmiValueSize == 32 && kSmiShift == 32 && kSmiTag == 0);
+    STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
+    STATIC_ASSERT(kSmiTag == 0);
     __ Store(value, UntagSmiFieldMemOperand(destination, offset),
              Representation::Integer32());
   } else {
@@ -5437,21 +5427,20 @@
 
 
 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
-  ASSERT(ToRegister(instr->context()).is(cp));
-  ASSERT(ToRegister(instr->value()).is(x0));
-  ASSERT(ToRegister(instr->object()).is(x1));
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
 
-  // Name must be in x2.
-  __ Mov(x2, Operand(instr->name()));
+  __ Mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
   Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
 
 void LCodeGen::DoStringAdd(LStringAdd* instr) {
-  ASSERT(ToRegister(instr->context()).is(cp));
-  ASSERT(ToRegister(instr->left()).Is(x1));
-  ASSERT(ToRegister(instr->right()).Is(x0));
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->left()).Is(x1));
+  DCHECK(ToRegister(instr->right()).Is(x0));
   StringAddStub stub(isolate(),
                      instr->hydrogen()->flags(),
                      instr->hydrogen()->pretenure_flag());
@@ -5491,14 +5480,14 @@
   // contained in the register pointer map.
   __ Mov(result, 0);
 
-  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+  PushSafepointRegistersScope scope(this);
   __ Push(string);
   // Push the index as a smi. This is safe because of the checks in
   // DoStringCharCodeAt above.
   Register index = ToRegister(instr->index());
   __ SmiTagAndPush(index);
 
-  CallRuntimeFromDeferred(Runtime::kHiddenStringCharCodeAt, 2, instr,
+  CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
                           instr->context());
   __ AssertSmi(x0);
   __ SmiUntag(x0);
@@ -5520,7 +5509,7 @@
   DeferredStringCharFromCode* deferred =
       new(zone()) DeferredStringCharFromCode(this, instr);
 
-  ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
+  DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
   Register char_code = ToRegister32(instr->char_code());
   Register result = ToRegister(instr->result());
 
@@ -5544,7 +5533,7 @@
   // contained in the register pointer map.
   __ Mov(result, 0);
 
-  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+  PushSafepointRegistersScope scope(this);
   __ SmiTagAndPush(char_code);
   CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
   __ StoreToSafepointRegisterSlot(x0, result);
@@ -5552,10 +5541,10 @@
 
 
 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
-  ASSERT(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->context()).is(cp));
   Token::Value op = instr->op();
 
-  Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+  Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   InlineSmiCheckInfo::EmitNotInlined(masm());
 
@@ -5569,11 +5558,11 @@
   bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
   Register result = ToRegister32(instr->result());
   Register left = ToRegister32(instr->left());
-  Operand right = ToShiftedRightOperand32I(instr->right(), instr);
+  Operand right = ToShiftedRightOperand32(instr->right(), instr);
 
   if (can_overflow) {
     __ Subs(result, left, right);
-    DeoptimizeIf(vs, instr->environment());
+    DeoptimizeIf(vs, instr);
   } else {
     __ Sub(result, left, right);
   }
@@ -5587,7 +5576,7 @@
   Operand right = ToOperand(instr->right());
   if (can_overflow) {
     __ Subs(result, left, right);
-    DeoptimizeIf(vs, instr->environment());
+    DeoptimizeIf(vs, instr);
   } else {
     __ Sub(result, left, right);
   }
@@ -5604,15 +5593,12 @@
 
   Label done;
 
-  // Load heap object map.
-  __ Ldr(scratch1, FieldMemOperand(input, HeapObject::kMapOffset));
-
   if (instr->truncating()) {
     Register output = ToRegister(instr->result());
     Label check_bools;
 
     // If it's not a heap number, jump to undefined check.
-    __ JumpIfNotRoot(scratch1, Heap::kHeapNumberMapRootIndex, &check_bools);
+    __ JumpIfNotHeapNumber(input, &check_bools);
 
     // A heap number: load value and convert to int32 using truncating function.
     __ TruncateHeapNumberToI(output, input);
@@ -5630,28 +5616,24 @@
 
     // Output contains zero, undefined is converted to zero for truncating
     // conversions.
-    DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex,
-                        instr->environment());
+    DeoptimizeIfNotRoot(input, Heap::kUndefinedValueRootIndex, instr);
   } else {
     Register output = ToRegister32(instr->result());
-
     DoubleRegister dbl_scratch2 = ToDoubleRegister(temp2);
 
-    // Deoptimized if it's not a heap number.
-    DeoptimizeIfNotRoot(scratch1, Heap::kHeapNumberMapRootIndex,
-                        instr->environment());
+    DeoptimizeIfNotHeapNumber(input, instr);
 
     // A heap number: load value and convert to int32 using non-truncating
     // function. If the result is out of range, branch to deoptimize.
     __ Ldr(dbl_scratch1, FieldMemOperand(input, HeapNumber::kValueOffset));
     __ TryRepresentDoubleAsInt32(output, dbl_scratch1, dbl_scratch2);
-    DeoptimizeIf(ne, instr->environment());
+    DeoptimizeIf(ne, instr, "lost precision or NaN");
 
     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
       __ Cmp(output, 0);
       __ B(ne, &done);
       __ Fmov(scratch1, dbl_scratch1);
-      DeoptimizeIfNegative(scratch1, instr->environment());
+      DeoptimizeIfNegative(scratch1, instr, "minus zero");
     }
   }
   __ Bind(&done);
@@ -5695,15 +5677,15 @@
 
 
 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
-  ASSERT(ToRegister(instr->value()).Is(x0));
-  ASSERT(ToRegister(instr->result()).Is(x0));
+  DCHECK(ToRegister(instr->value()).Is(x0));
+  DCHECK(ToRegister(instr->result()).Is(x0));
   __ Push(x0);
   CallRuntime(Runtime::kToFastProperties, 1, instr);
 }
 
 
 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
-  ASSERT(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->context()).is(cp));
   Label materialized;
   // Registers will be used as follows:
   // x7 = literals array.
@@ -5722,7 +5704,7 @@
   __ Mov(x11, Operand(instr->hydrogen()->pattern()));
   __ Mov(x10, Operand(instr->hydrogen()->flags()));
   __ Push(x7, x12, x11, x10);
-  CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4, instr);
+  CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
   __ Mov(x1, x0);
 
   __ Bind(&materialized);
@@ -5735,7 +5717,7 @@
   __ Bind(&runtime_allocate);
   __ Mov(x0, Smi::FromInt(size));
   __ Push(x1, x0);
-  CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1, instr);
+  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
   __ Pop(x1);
 
   __ Bind(&allocated);
@@ -5771,15 +5753,14 @@
       __ CheckMap(object, temps.AcquireX(), from_map, &not_applicable,
                   DONT_DO_SMI_CHECK);
     }
-    ASSERT(object.is(x0));
-    ASSERT(ToRegister(instr->context()).is(cp));
-    PushSafepointRegistersScope scope(
-        this, Safepoint::kWithRegistersAndDoubles);
+    DCHECK(object.is(x0));
+    DCHECK(ToRegister(instr->context()).is(cp));
+    PushSafepointRegistersScope scope(this);
     __ Mov(x1, Operand(to_map));
     bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
     TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
     __ CallStub(&stub);
-    RecordSafepointWithRegistersAndDoubles(
+    RecordSafepointWithRegisters(
         instr->pointer_map(), 0, Safepoint::kLazyDeopt);
   }
   __ Bind(&not_applicable);
@@ -5793,7 +5774,7 @@
 
   Label no_memento_found;
   __ TestJSArrayForAllocationMemento(object, temp1, temp2, &no_memento_found);
-  DeoptimizeIf(eq, instr->environment());
+  DeoptimizeIf(eq, instr);
   __ Bind(&no_memento_found);
 }
 
@@ -5823,16 +5804,25 @@
 
   Factory* factory = isolate()->factory();
   if (String::Equals(type_name, factory->number_string())) {
-    ASSERT(instr->temp1() != NULL);
-    Register map = ToRegister(instr->temp1());
-
     __ JumpIfSmi(value, true_label);
-    __ Ldr(map, FieldMemOperand(value, HeapObject::kMapOffset));
-    __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
-    EmitBranch(instr, eq);
+
+    int true_block = instr->TrueDestination(chunk_);
+    int false_block = instr->FalseDestination(chunk_);
+    int next_block = GetNextEmittedBlock();
+
+    if (true_block == false_block) {
+      EmitGoto(true_block);
+    } else if (true_block == next_block) {
+      __ JumpIfNotHeapNumber(value, chunk_->GetAssemblyLabel(false_block));
+    } else {
+      __ JumpIfHeapNumber(value, chunk_->GetAssemblyLabel(true_block));
+      if (false_block != next_block) {
+        __ B(chunk_->GetAssemblyLabel(false_block));
+      }
+    }
 
   } else if (String::Equals(type_name, factory->string_string())) {
-    ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL));
+    DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
     Register map = ToRegister(instr->temp1());
     Register scratch = ToRegister(instr->temp2());
 
@@ -5843,7 +5833,7 @@
     EmitTestAndBranch(instr, eq, scratch, 1 << Map::kIsUndetectable);
 
   } else if (String::Equals(type_name, factory->symbol_string())) {
-    ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL));
+    DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
     Register map = ToRegister(instr->temp1());
     Register scratch = ToRegister(instr->temp2());
 
@@ -5856,13 +5846,8 @@
     __ CompareRoot(value, Heap::kFalseValueRootIndex);
     EmitBranch(instr, eq);
 
-  } else if (FLAG_harmony_typeof &&
-             String::Equals(type_name, factory->null_string())) {
-    __ CompareRoot(value, Heap::kNullValueRootIndex);
-    EmitBranch(instr, eq);
-
   } else if (String::Equals(type_name, factory->undefined_string())) {
-    ASSERT(instr->temp1() != NULL);
+    DCHECK(instr->temp1() != NULL);
     Register scratch = ToRegister(instr->temp1());
 
     __ JumpIfRoot(value, Heap::kUndefinedValueRootIndex, true_label);
@@ -5874,7 +5859,7 @@
 
   } else if (String::Equals(type_name, factory->function_string())) {
     STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
-    ASSERT(instr->temp1() != NULL);
+    DCHECK(instr->temp1() != NULL);
     Register type = ToRegister(instr->temp1());
 
     __ JumpIfSmi(value, false_label);
@@ -5883,14 +5868,12 @@
     EmitCompareAndBranch(instr, eq, type, JS_FUNCTION_PROXY_TYPE);
 
   } else if (String::Equals(type_name, factory->object_string())) {
-    ASSERT((instr->temp1() != NULL) && (instr->temp2() != NULL));
+    DCHECK((instr->temp1() != NULL) && (instr->temp2() != NULL));
     Register map = ToRegister(instr->temp1());
     Register scratch = ToRegister(instr->temp2());
 
     __ JumpIfSmi(value, false_label);
-    if (!FLAG_harmony_typeof) {
-      __ JumpIfRoot(value, Heap::kNullValueRootIndex, true_label);
-    }
+    __ JumpIfRoot(value, Heap::kNullValueRootIndex, true_label);
     __ JumpIfObjectType(value, map, scratch,
                         FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, false_label, lt);
     __ CompareInstanceType(map, scratch, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
@@ -5916,7 +5899,7 @@
   Register temp = ToRegister(instr->temp());
   __ Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
   __ Cmp(map, temp);
-  DeoptimizeIf(ne, instr->environment());
+  DeoptimizeIf(ne, instr);
 }
 
 
@@ -5950,15 +5933,15 @@
   __ JumpIfRoot(receiver, Heap::kUndefinedValueRootIndex, &global_object);
 
   // Deoptimize if the receiver is not a JS object.
-  DeoptimizeIfSmi(receiver, instr->environment());
+  DeoptimizeIfSmi(receiver, instr);
   __ CompareObjectType(receiver, result, result, FIRST_SPEC_OBJECT_TYPE);
   __ B(ge, &copy_receiver);
-  Deoptimize(instr->environment());
+  Deoptimize(instr);
 
   __ Bind(&global_object);
   __ Ldr(result, FieldMemOperand(function, JSFunction::kContextOffset));
   __ Ldr(result, ContextMemOperand(result, Context::GLOBAL_OBJECT_INDEX));
-  __ Ldr(result, FieldMemOperand(result, GlobalObject::kGlobalReceiverOffset));
+  __ Ldr(result, FieldMemOperand(result, GlobalObject::kGlobalProxyOffset));
   __ B(&done);
 
   __ Bind(&copy_receiver);
@@ -5971,7 +5954,7 @@
                                            Register result,
                                            Register object,
                                            Register index) {
-  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+  PushSafepointRegistersScope scope(this);
   __ Push(object);
   __ Push(index);
   __ Mov(cp, 0);
@@ -5983,7 +5966,7 @@
 
 
 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
-  class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode {
+  class DeferredLoadMutableDouble FINAL : public LDeferredCode {
    public:
     DeferredLoadMutableDouble(LCodeGen* codegen,
                               LLoadFieldByIndex* instr,
@@ -5996,10 +5979,10 @@
           object_(object),
           index_(index) {
     }
-    virtual void Generate() V8_OVERRIDE {
+    virtual void Generate() OVERRIDE {
       codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
     }
-    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
    private:
     LLoadFieldByIndex* instr_;
     Register result_;
@@ -6052,7 +6035,7 @@
   Handle<ScopeInfo> scope_info = instr->scope_info();
   __ Push(scope_info);
   __ Push(ToRegister(instr->function()));
-  CallRuntime(Runtime::kHiddenPushBlockContext, 2, instr);
+  CallRuntime(Runtime::kPushBlockContext, 2, instr);
   RecordSafepoint(Safepoint::kNoLazyDeopt);
 }
 
diff --git a/src/arm64/lithium-codegen-arm64.h b/src/arm64/lithium-codegen-arm64.h
index 43cf13f..a141dfa 100644
--- a/src/arm64/lithium-codegen-arm64.h
+++ b/src/arm64/lithium-codegen-arm64.h
@@ -27,7 +27,7 @@
   LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
       : LCodeGenBase(chunk, assembler, info),
         deoptimizations_(4, info->zone()),
-        deopt_jump_table_(4, info->zone()),
+        jump_table_(4, info->zone()),
         deoptimization_literals_(8, info->zone()),
         inlined_function_count_(0),
         scope_(info->scope()),
@@ -44,7 +44,7 @@
   }
 
   ~LCodeGen() {
-    ASSERT(!after_push_argument_ || inlined_arguments_);
+    DCHECK(!after_push_argument_ || inlined_arguments_);
   }
 
   // Simple accessors.
@@ -83,31 +83,17 @@
 
   enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
   // Support for converting LOperands to assembler types.
-  // LOperand must be a register.
   Register ToRegister(LOperand* op) const;
   Register ToRegister32(LOperand* op) const;
   Operand ToOperand(LOperand* op);
-  Operand ToOperand32I(LOperand* op);
-  Operand ToOperand32U(LOperand* op);
+  Operand ToOperand32(LOperand* op);
   enum StackMode { kMustUseFramePointer, kCanUseStackPointer };
   MemOperand ToMemOperand(LOperand* op,
                           StackMode stack_mode = kCanUseStackPointer) const;
   Handle<Object> ToHandle(LConstantOperand* op) const;
 
-  template<class LI>
-  Operand ToShiftedRightOperand32I(LOperand* right,
-                                   LI* shift_info) {
-    return ToShiftedRightOperand32(right, shift_info, SIGNED_INT32);
-  }
-  template<class LI>
-  Operand ToShiftedRightOperand32U(LOperand* right,
-                                   LI* shift_info) {
-    return ToShiftedRightOperand32(right, shift_info, UNSIGNED_INT32);
-  }
-  template<class LI>
-  Operand ToShiftedRightOperand32(LOperand* right,
-                                  LI* shift_info,
-                                  IntegerSignedness signedness);
+  template <class LI>
+  Operand ToShiftedRightOperand32(LOperand* right, LI* shift_info);
 
   int JSShiftAmountFromLConstant(LOperand* constant) {
     return ToInteger32(LConstantOperand::cast(constant)) & 0x1f;
@@ -158,8 +144,6 @@
                                    Register object,
                                    Register index);
 
-  Operand ToOperand32(LOperand* op, IntegerSignedness signedness);
-
   static Condition TokenToCondition(Token::Value op, bool is_unsigned);
   void EmitGoto(int block);
   void DoGap(LGap* instr);
@@ -212,6 +196,9 @@
                     int* offset,
                     AllocationSiteMode mode);
 
+  template <class T>
+  void EmitVectorLoadICRegisters(T* instr);
+
   // Emits optimized code for %_IsString(x).  Preserves input register.
   // Returns the condition on which a final split to
   // true and false label should be made, to optimize fallthrough.
@@ -226,27 +213,36 @@
                                    Register temp,
                                    LOperand* index,
                                    String::Encoding encoding);
-  void DeoptimizeBranch(
-      LEnvironment* environment,
-      BranchType branch_type, Register reg = NoReg, int bit = -1,
-      Deoptimizer::BailoutType* override_bailout_type = NULL);
-  void Deoptimize(LEnvironment* environment,
-                  Deoptimizer::BailoutType* override_bailout_type = NULL);
-  void DeoptimizeIf(Condition cond, LEnvironment* environment);
-  void DeoptimizeIfZero(Register rt, LEnvironment* environment);
-  void DeoptimizeIfNotZero(Register rt, LEnvironment* environment);
-  void DeoptimizeIfNegative(Register rt, LEnvironment* environment);
-  void DeoptimizeIfSmi(Register rt, LEnvironment* environment);
-  void DeoptimizeIfNotSmi(Register rt, LEnvironment* environment);
-  void DeoptimizeIfRoot(Register rt,
-                        Heap::RootListIndex index,
-                        LEnvironment* environment);
-  void DeoptimizeIfNotRoot(Register rt,
-                           Heap::RootListIndex index,
-                           LEnvironment* environment);
-  void DeoptimizeIfMinusZero(DoubleRegister input, LEnvironment* environment);
-  void DeoptimizeIfBitSet(Register rt, int bit, LEnvironment* environment);
-  void DeoptimizeIfBitClear(Register rt, int bit, LEnvironment* environment);
+  void DeoptimizeBranch(LInstruction* instr, const char* detail,
+                        BranchType branch_type, Register reg = NoReg,
+                        int bit = -1,
+                        Deoptimizer::BailoutType* override_bailout_type = NULL);
+  void Deoptimize(LInstruction* instr,
+                  Deoptimizer::BailoutType* override_bailout_type = NULL,
+                  const char* detail = NULL);
+  void DeoptimizeIf(Condition cond, LInstruction* instr,
+                    const char* detail = NULL);
+  void DeoptimizeIfZero(Register rt, LInstruction* instr,
+                        const char* detail = NULL);
+  void DeoptimizeIfNotZero(Register rt, LInstruction* instr,
+                           const char* detail = NULL);
+  void DeoptimizeIfNegative(Register rt, LInstruction* instr,
+                            const char* detail = NULL);
+  void DeoptimizeIfSmi(Register rt, LInstruction* instr,
+                       const char* detail = NULL);
+  void DeoptimizeIfNotSmi(Register rt, LInstruction* instr,
+                          const char* detail = NULL);
+  void DeoptimizeIfRoot(Register rt, Heap::RootListIndex index,
+                        LInstruction* instr, const char* detail = NULL);
+  void DeoptimizeIfNotRoot(Register rt, Heap::RootListIndex index,
+                           LInstruction* instr, const char* detail = NULL);
+  void DeoptimizeIfNotHeapNumber(Register object, LInstruction* instr);
+  void DeoptimizeIfMinusZero(DoubleRegister input, LInstruction* instr,
+                             const char* detail = NULL);
+  void DeoptimizeIfBitSet(Register rt, int bit, LInstruction* instr,
+                          const char* detail = NULL);
+  void DeoptimizeIfBitClear(Register rt, int bit, LInstruction* instr,
+                            const char* detail = NULL);
 
   MemOperand PrepareKeyedExternalArrayOperand(Register key,
                                               Register base,
@@ -286,10 +282,10 @@
   void RestoreCallerDoubles();
 
   // Code generation steps.  Returns true if code generation should continue.
-  void GenerateBodyInstructionPre(LInstruction* instr) V8_OVERRIDE;
+  void GenerateBodyInstructionPre(LInstruction* instr) OVERRIDE;
   bool GeneratePrologue();
   bool GenerateDeferredCode();
-  bool GenerateDeoptJumpTable();
+  bool GenerateJumpTable();
   bool GenerateSafepointTable();
 
   // Generates the custom OSR entrypoint and sets the osr_pc_offset.
@@ -338,7 +334,7 @@
                          Register function_reg = NoReg);
 
   // Support for recording safepoint and position information.
-  void RecordAndWritePosition(int position) V8_OVERRIDE;
+  void RecordAndWritePosition(int position) OVERRIDE;
   void RecordSafepoint(LPointerMap* pointers,
                        Safepoint::Kind kind,
                        int arguments,
@@ -348,16 +344,13 @@
   void RecordSafepointWithRegisters(LPointerMap* pointers,
                                     int arguments,
                                     Safepoint::DeoptMode mode);
-  void RecordSafepointWithRegistersAndDoubles(LPointerMap* pointers,
-                                              int arguments,
-                                              Safepoint::DeoptMode mode);
   void RecordSafepointWithLazyDeopt(LInstruction* instr,
                                     SafepointMode safepoint_mode);
 
-  void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE;
+  void EnsureSpaceForLazyDeopt(int space_needed) OVERRIDE;
 
   ZoneList<LEnvironment*> deoptimizations_;
-  ZoneList<Deoptimizer::JumpTableEntry*> deopt_jump_table_;
+  ZoneList<Deoptimizer::JumpTableEntry*> jump_table_;
   ZoneList<Handle<Object> > deoptimization_literals_;
   int inlined_function_count_;
   Scope* const scope_;
@@ -388,12 +381,11 @@
 
   class PushSafepointRegistersScope BASE_EMBEDDED {
    public:
-    PushSafepointRegistersScope(LCodeGen* codegen,
-                                Safepoint::Kind kind)
+    explicit PushSafepointRegistersScope(LCodeGen* codegen)
         : codegen_(codegen) {
-      ASSERT(codegen_->info()->is_calling());
-      ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
-      codegen_->expected_safepoint_kind_ = kind;
+      DCHECK(codegen_->info()->is_calling());
+      DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
+      codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
 
       UseScratchRegisterScope temps(codegen_->masm_);
       // Preserve the value of lr which must be saved on the stack (the call to
@@ -401,39 +393,14 @@
       Register to_be_pushed_lr =
           temps.UnsafeAcquire(StoreRegistersStateStub::to_be_pushed_lr());
       codegen_->masm_->Mov(to_be_pushed_lr, lr);
-      switch (codegen_->expected_safepoint_kind_) {
-        case Safepoint::kWithRegisters: {
-          StoreRegistersStateStub stub(codegen_->isolate(), kDontSaveFPRegs);
-          codegen_->masm_->CallStub(&stub);
-          break;
-        }
-        case Safepoint::kWithRegistersAndDoubles: {
-          StoreRegistersStateStub stub(codegen_->isolate(), kSaveFPRegs);
-          codegen_->masm_->CallStub(&stub);
-          break;
-        }
-        default:
-          UNREACHABLE();
-      }
+      StoreRegistersStateStub stub(codegen_->isolate());
+      codegen_->masm_->CallStub(&stub);
     }
 
     ~PushSafepointRegistersScope() {
-      Safepoint::Kind kind = codegen_->expected_safepoint_kind_;
-      ASSERT((kind & Safepoint::kWithRegisters) != 0);
-      switch (kind) {
-        case Safepoint::kWithRegisters: {
-          RestoreRegistersStateStub stub(codegen_->isolate(), kDontSaveFPRegs);
-          codegen_->masm_->CallStub(&stub);
-          break;
-        }
-        case Safepoint::kWithRegistersAndDoubles: {
-          RestoreRegistersStateStub stub(codegen_->isolate(), kSaveFPRegs);
-          codegen_->masm_->CallStub(&stub);
-          break;
-        }
-        default:
-          UNREACHABLE();
-      }
+      DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
+      RestoreRegistersStateStub stub(codegen_->isolate());
+      codegen_->masm_->CallStub(&stub);
       codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
     }
 
diff --git a/src/arm64/lithium-gap-resolver-arm64.cc b/src/arm64/lithium-gap-resolver-arm64.cc
index bd655ea..d06a37b 100644
--- a/src/arm64/lithium-gap-resolver-arm64.cc
+++ b/src/arm64/lithium-gap-resolver-arm64.cc
@@ -4,36 +4,36 @@
 
 #include "src/v8.h"
 
-#include "src/arm64/lithium-gap-resolver-arm64.h"
+#include "src/arm64/delayed-masm-arm64-inl.h"
 #include "src/arm64/lithium-codegen-arm64.h"
+#include "src/arm64/lithium-gap-resolver-arm64.h"
 
 namespace v8 {
 namespace internal {
 
-// We use the root register to spill a value while breaking a cycle in parallel
-// moves. We don't need access to roots while resolving the move list and using
-// the root register has two advantages:
-//  - It is not in crankshaft allocatable registers list, so it can't interfere
-//    with any of the moves we are resolving.
-//  - We don't need to push it on the stack, as we can reload it with its value
-//    once we have resolved a cycle.
-#define kSavedValue root
+#define __ ACCESS_MASM((&masm_))
 
-// We use the MacroAssembler floating-point scratch register to break a cycle
-// involving double values as the MacroAssembler will not need it for the
-// operations performed by the gap resolver.
-#define kSavedDoubleValue fp_scratch
+
+void DelayedGapMasm::EndDelayedUse() {
+  DelayedMasm::EndDelayedUse();
+  if (scratch_register_used()) {
+    DCHECK(ScratchRegister().Is(root));
+    DCHECK(!pending());
+    InitializeRootRegister();
+    reset_scratch_register_used();
+  }
+}
 
 
 LGapResolver::LGapResolver(LCodeGen* owner)
-    : cgen_(owner), moves_(32, owner->zone()), root_index_(0), in_cycle_(false),
-      saved_destination_(NULL), need_to_restore_root_(false) { }
+    : cgen_(owner), masm_(owner, owner->masm()), moves_(32, owner->zone()),
+      root_index_(0), in_cycle_(false), saved_destination_(NULL) {
+}
 
 
-#define __ ACCESS_MASM(cgen_->masm())
-
 void LGapResolver::Resolve(LParallelMove* parallel_move) {
-  ASSERT(moves_.is_empty());
+  DCHECK(moves_.is_empty());
+  DCHECK(!masm_.pending());
 
   // Build up a worklist of moves.
   BuildInitialMoveList(parallel_move);
@@ -56,16 +56,12 @@
     LMoveOperands move = moves_[i];
 
     if (!move.IsEliminated()) {
-      ASSERT(move.source()->IsConstantOperand());
+      DCHECK(move.source()->IsConstantOperand());
       EmitMove(i);
     }
   }
 
-  if (need_to_restore_root_) {
-    ASSERT(kSavedValue.Is(root));
-    __ InitializeRootRegister();
-    need_to_restore_root_ = false;
-  }
+  __ EndDelayedUse();
 
   moves_.Rewind(0);
 }
@@ -92,13 +88,13 @@
   // cycles in the move graph.
   LMoveOperands& current_move = moves_[index];
 
-  ASSERT(!current_move.IsPending());
-  ASSERT(!current_move.IsRedundant());
+  DCHECK(!current_move.IsPending());
+  DCHECK(!current_move.IsRedundant());
 
   // Clear this move's destination to indicate a pending move.  The actual
   // destination is saved in a stack allocated local. Multiple moves can
   // be pending because this function is recursive.
-  ASSERT(current_move.source() != NULL);  // Otherwise it will look eliminated.
+  DCHECK(current_move.source() != NULL);  // Otherwise it will look eliminated.
   LOperand* destination = current_move.destination();
   current_move.set_destination(NULL);
 
@@ -125,7 +121,7 @@
   // a scratch register to break it.
   LMoveOperands other_move = moves_[root_index_];
   if (other_move.Blocks(destination)) {
-    ASSERT(other_move.IsPending());
+    DCHECK(other_move.IsPending());
     BreakCycle(index);
     return;
   }
@@ -136,12 +132,12 @@
 
 
 void LGapResolver::Verify() {
-#ifdef ENABLE_SLOW_ASSERTS
+#ifdef ENABLE_SLOW_DCHECKS
   // No operand should be the destination for more than one move.
   for (int i = 0; i < moves_.length(); ++i) {
     LOperand* destination = moves_[i].destination();
     for (int j = i + 1; j < moves_.length(); ++j) {
-      SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
+      SLOW_DCHECK(!destination->Equals(moves_[j].destination()));
     }
   }
 #endif
@@ -149,13 +145,8 @@
 
 
 void LGapResolver::BreakCycle(int index) {
-  ASSERT(moves_[index].destination()->Equals(moves_[root_index_].source()));
-  ASSERT(!in_cycle_);
-
-  // We use registers which are not allocatable by crankshaft to break the cycle
-  // to be sure they don't interfere with the moves we are resolving.
-  ASSERT(!kSavedValue.IsAllocatable());
-  ASSERT(!kSavedDoubleValue.IsAllocatable());
+  DCHECK(moves_[index].destination()->Equals(moves_[root_index_].source()));
+  DCHECK(!in_cycle_);
 
   // We save in a register the source of that move and we remember its
   // destination. Then we mark this move as resolved so the cycle is
@@ -165,19 +156,15 @@
   saved_destination_ = moves_[index].destination();
 
   if (source->IsRegister()) {
-    need_to_restore_root_ = true;
-    __ Mov(kSavedValue, cgen_->ToRegister(source));
+    AcquireSavedValueRegister();
+    __ Mov(SavedValueRegister(), cgen_->ToRegister(source));
   } else if (source->IsStackSlot()) {
-    need_to_restore_root_ = true;
-    __ Ldr(kSavedValue, cgen_->ToMemOperand(source));
+    AcquireSavedValueRegister();
+    __ Load(SavedValueRegister(), cgen_->ToMemOperand(source));
   } else if (source->IsDoubleRegister()) {
-    ASSERT(cgen_->masm()->FPTmpList()->IncludesAliasOf(kSavedDoubleValue));
-    cgen_->masm()->FPTmpList()->Remove(kSavedDoubleValue);
-    __ Fmov(kSavedDoubleValue, cgen_->ToDoubleRegister(source));
+    __ Fmov(SavedFPValueRegister(), cgen_->ToDoubleRegister(source));
   } else if (source->IsDoubleStackSlot()) {
-    ASSERT(cgen_->masm()->FPTmpList()->IncludesAliasOf(kSavedDoubleValue));
-    cgen_->masm()->FPTmpList()->Remove(kSavedDoubleValue);
-    __ Ldr(kSavedDoubleValue, cgen_->ToMemOperand(source));
+    __ Load(SavedFPValueRegister(), cgen_->ToMemOperand(source));
   } else {
     UNREACHABLE();
   }
@@ -190,19 +177,20 @@
 
 
 void LGapResolver::RestoreValue() {
-  ASSERT(in_cycle_);
-  ASSERT(saved_destination_ != NULL);
+  DCHECK(in_cycle_);
+  DCHECK(saved_destination_ != NULL);
 
   if (saved_destination_->IsRegister()) {
-    __ Mov(cgen_->ToRegister(saved_destination_), kSavedValue);
+    __ Mov(cgen_->ToRegister(saved_destination_), SavedValueRegister());
+    ReleaseSavedValueRegister();
   } else if (saved_destination_->IsStackSlot()) {
-    __ Str(kSavedValue, cgen_->ToMemOperand(saved_destination_));
+    __ Store(SavedValueRegister(), cgen_->ToMemOperand(saved_destination_));
+    ReleaseSavedValueRegister();
   } else if (saved_destination_->IsDoubleRegister()) {
-    __ Fmov(cgen_->ToDoubleRegister(saved_destination_), kSavedDoubleValue);
-    cgen_->masm()->FPTmpList()->Combine(kSavedDoubleValue);
+    __ Fmov(cgen_->ToDoubleRegister(saved_destination_),
+            SavedFPValueRegister());
   } else if (saved_destination_->IsDoubleStackSlot()) {
-    __ Str(kSavedDoubleValue, cgen_->ToMemOperand(saved_destination_));
-    cgen_->masm()->FPTmpList()->Combine(kSavedDoubleValue);
+    __ Store(SavedFPValueRegister(), cgen_->ToMemOperand(saved_destination_));
   } else {
     UNREACHABLE();
   }
@@ -224,16 +212,16 @@
     if (destination->IsRegister()) {
       __ Mov(cgen_->ToRegister(destination), source_register);
     } else {
-      ASSERT(destination->IsStackSlot());
-      __ Str(source_register, cgen_->ToMemOperand(destination));
+      DCHECK(destination->IsStackSlot());
+      __ Store(source_register, cgen_->ToMemOperand(destination));
     }
 
   } else if (source->IsStackSlot()) {
     MemOperand source_operand = cgen_->ToMemOperand(source);
     if (destination->IsRegister()) {
-      __ Ldr(cgen_->ToRegister(destination), source_operand);
+      __ Load(cgen_->ToRegister(destination), source_operand);
     } else {
-      ASSERT(destination->IsStackSlot());
+      DCHECK(destination->IsStackSlot());
       EmitStackSlotMove(index);
     }
 
@@ -252,17 +240,30 @@
       DoubleRegister result = cgen_->ToDoubleRegister(destination);
       __ Fmov(result, cgen_->ToDouble(constant_source));
     } else {
-      ASSERT(destination->IsStackSlot());
-      ASSERT(!in_cycle_);  // Constant moves happen after all cycles are gone.
-      need_to_restore_root_ = true;
+      DCHECK(destination->IsStackSlot());
+      DCHECK(!in_cycle_);  // Constant moves happen after all cycles are gone.
       if (cgen_->IsSmi(constant_source)) {
-        __ Mov(kSavedValue, cgen_->ToSmi(constant_source));
+        Smi* smi = cgen_->ToSmi(constant_source);
+        __ StoreConstant(reinterpret_cast<intptr_t>(smi),
+                         cgen_->ToMemOperand(destination));
       } else if (cgen_->IsInteger32Constant(constant_source)) {
-        __ Mov(kSavedValue, cgen_->ToInteger32(constant_source));
+        __ StoreConstant(cgen_->ToInteger32(constant_source),
+                         cgen_->ToMemOperand(destination));
       } else {
-        __ LoadObject(kSavedValue, cgen_->ToHandle(constant_source));
+        Handle<Object> handle = cgen_->ToHandle(constant_source);
+        AllowDeferredHandleDereference smi_object_check;
+        if (handle->IsSmi()) {
+          Object* obj = *handle;
+          DCHECK(!obj->IsHeapObject());
+          __ StoreConstant(reinterpret_cast<intptr_t>(obj),
+                           cgen_->ToMemOperand(destination));
+        } else {
+          AcquireSavedValueRegister();
+          __ LoadObject(SavedValueRegister(), handle);
+          __ Store(SavedValueRegister(), cgen_->ToMemOperand(destination));
+          ReleaseSavedValueRegister();
+        }
       }
-      __ Str(kSavedValue, cgen_->ToMemOperand(destination));
     }
 
   } else if (source->IsDoubleRegister()) {
@@ -270,16 +271,16 @@
     if (destination->IsDoubleRegister()) {
       __ Fmov(cgen_->ToDoubleRegister(destination), src);
     } else {
-      ASSERT(destination->IsDoubleStackSlot());
-      __ Str(src, cgen_->ToMemOperand(destination));
+      DCHECK(destination->IsDoubleStackSlot());
+      __ Store(src, cgen_->ToMemOperand(destination));
     }
 
   } else if (source->IsDoubleStackSlot()) {
     MemOperand src = cgen_->ToMemOperand(source);
     if (destination->IsDoubleRegister()) {
-      __ Ldr(cgen_->ToDoubleRegister(destination), src);
+      __ Load(cgen_->ToDoubleRegister(destination), src);
     } else {
-      ASSERT(destination->IsDoubleStackSlot());
+      DCHECK(destination->IsDoubleStackSlot());
       EmitStackSlotMove(index);
     }
 
@@ -291,21 +292,4 @@
   moves_[index].Eliminate();
 }
 
-
-void LGapResolver::EmitStackSlotMove(int index) {
-  // We need a temp register to perform a stack slot to stack slot move, and
-  // the register must not be involved in breaking cycles.
-
-  // Use the Crankshaft double scratch register as the temporary.
-  DoubleRegister temp = crankshaft_fp_scratch;
-
-  LOperand* src = moves_[index].source();
-  LOperand* dst = moves_[index].destination();
-
-  ASSERT(src->IsStackSlot());
-  ASSERT(dst->IsStackSlot());
-  __ Ldr(temp, cgen_->ToMemOperand(src));
-  __ Str(temp, cgen_->ToMemOperand(dst));
-}
-
 } }  // namespace v8::internal
diff --git a/src/arm64/lithium-gap-resolver-arm64.h b/src/arm64/lithium-gap-resolver-arm64.h
index 55d4ecb..2eb651b 100644
--- a/src/arm64/lithium-gap-resolver-arm64.h
+++ b/src/arm64/lithium-gap-resolver-arm64.h
@@ -7,6 +7,7 @@
 
 #include "src/v8.h"
 
+#include "src/arm64/delayed-masm-arm64.h"
 #include "src/lithium.h"
 
 namespace v8 {
@@ -15,6 +16,21 @@
 class LCodeGen;
 class LGapResolver;
 
+class DelayedGapMasm : public DelayedMasm {
+ public:
+  DelayedGapMasm(LCodeGen* owner, MacroAssembler* masm)
+    : DelayedMasm(owner, masm, root) {
+    // We use the root register as an extra scratch register.
+    // The root register has two advantages:
+    //  - It is not in crankshaft allocatable registers list, so it can't
+    //    interfere with the allocatable registers.
+    //  - We don't need to push it on the stack, as we can reload it with its
+    //    value once we have finish.
+  }
+  void EndDelayedUse();
+};
+
+
 class LGapResolver BASE_EMBEDDED {
  public:
   explicit LGapResolver(LCodeGen* owner);
@@ -43,12 +59,32 @@
   void EmitMove(int index);
 
   // Emit a move from one stack slot to another.
-  void EmitStackSlotMove(int index);
+  void EmitStackSlotMove(int index) {
+    masm_.StackSlotMove(moves_[index].source(), moves_[index].destination());
+  }
 
   // Verify the move list before performing moves.
   void Verify();
 
+  // Registers used to solve cycles.
+  const Register& SavedValueRegister() {
+    DCHECK(!masm_.ScratchRegister().IsAllocatable());
+    return masm_.ScratchRegister();
+  }
+  // The scratch register is used to break cycles and to store constant.
+  // These two methods switch from one mode to the other.
+  void AcquireSavedValueRegister() { masm_.AcquireScratchRegister(); }
+  void ReleaseSavedValueRegister() { masm_.ReleaseScratchRegister(); }
+  const FPRegister& SavedFPValueRegister() {
+    // We use the Crankshaft floating-point scratch register to break a cycle
+    // involving double values as the MacroAssembler will not need it for the
+    // operations performed by the gap resolver.
+    DCHECK(!crankshaft_fp_scratch.IsAllocatable());
+    return crankshaft_fp_scratch;
+  }
+
   LCodeGen* cgen_;
+  DelayedGapMasm masm_;
 
   // List of moves not yet resolved.
   ZoneList<LMoveOperands> moves_;
@@ -56,10 +92,6 @@
   int root_index_;
   bool in_cycle_;
   LOperand* saved_destination_;
-
-  // We use the root register as a scratch in a few places. When that happens,
-  // this flag is set to indicate that it needs to be restored.
-  bool need_to_restore_root_;
 };
 
 } }  // namespace v8::internal
diff --git a/src/arm64/macro-assembler-arm64-inl.h b/src/arm64/macro-assembler-arm64-inl.h
index 0c6aadf..23767e4 100644
--- a/src/arm64/macro-assembler-arm64-inl.h
+++ b/src/arm64/macro-assembler-arm64-inl.h
@@ -9,10 +9,11 @@
 
 #include "src/globals.h"
 
-#include "src/arm64/assembler-arm64.h"
 #include "src/arm64/assembler-arm64-inl.h"
-#include "src/arm64/macro-assembler-arm64.h"
+#include "src/arm64/assembler-arm64.h"
 #include "src/arm64/instrument-arm64.h"
+#include "src/arm64/macro-assembler-arm64.h"
+#include "src/base/bits.h"
 
 
 namespace v8 {
@@ -37,7 +38,7 @@
 
 
 Handle<Object> MacroAssembler::CodeObject() {
-  ASSERT(!code_object_.is_null());
+  DCHECK(!code_object_.is_null());
   return code_object_;
 }
 
@@ -45,8 +46,8 @@
 void MacroAssembler::And(const Register& rd,
                          const Register& rn,
                          const Operand& operand) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   LogicalMacro(rd, rn, operand, AND);
 }
 
@@ -54,15 +55,15 @@
 void MacroAssembler::Ands(const Register& rd,
                           const Register& rn,
                           const Operand& operand) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   LogicalMacro(rd, rn, operand, ANDS);
 }
 
 
 void MacroAssembler::Tst(const Register& rn,
                          const Operand& operand) {
-  ASSERT(allow_macro_instructions_);
+  DCHECK(allow_macro_instructions_);
   LogicalMacro(AppropriateZeroRegFor(rn), rn, operand, ANDS);
 }
 
@@ -70,8 +71,8 @@
 void MacroAssembler::Bic(const Register& rd,
                          const Register& rn,
                          const Operand& operand) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   LogicalMacro(rd, rn, operand, BIC);
 }
 
@@ -79,8 +80,8 @@
 void MacroAssembler::Bics(const Register& rd,
                           const Register& rn,
                           const Operand& operand) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   LogicalMacro(rd, rn, operand, BICS);
 }
 
@@ -88,8 +89,8 @@
 void MacroAssembler::Orr(const Register& rd,
                          const Register& rn,
                          const Operand& operand) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   LogicalMacro(rd, rn, operand, ORR);
 }
 
@@ -97,8 +98,8 @@
 void MacroAssembler::Orn(const Register& rd,
                          const Register& rn,
                          const Operand& operand) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   LogicalMacro(rd, rn, operand, ORN);
 }
 
@@ -106,8 +107,8 @@
 void MacroAssembler::Eor(const Register& rd,
                          const Register& rn,
                          const Operand& operand) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   LogicalMacro(rd, rn, operand, EOR);
 }
 
@@ -115,8 +116,8 @@
 void MacroAssembler::Eon(const Register& rd,
                          const Register& rn,
                          const Operand& operand) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   LogicalMacro(rd, rn, operand, EON);
 }
 
@@ -125,7 +126,7 @@
                           const Operand& operand,
                           StatusFlags nzcv,
                           Condition cond) {
-  ASSERT(allow_macro_instructions_);
+  DCHECK(allow_macro_instructions_);
   if (operand.IsImmediate() && (operand.ImmediateValue() < 0)) {
     ConditionalCompareMacro(rn, -operand.ImmediateValue(), nzcv, cond, CCMN);
   } else {
@@ -138,7 +139,7 @@
                           const Operand& operand,
                           StatusFlags nzcv,
                           Condition cond) {
-  ASSERT(allow_macro_instructions_);
+  DCHECK(allow_macro_instructions_);
   if (operand.IsImmediate() && (operand.ImmediateValue() < 0)) {
     ConditionalCompareMacro(rn, -operand.ImmediateValue(), nzcv, cond, CCMP);
   } else {
@@ -150,8 +151,9 @@
 void MacroAssembler::Add(const Register& rd,
                          const Register& rn,
                          const Operand& operand) {
-  ASSERT(allow_macro_instructions_);
-  if (operand.IsImmediate() && (operand.ImmediateValue() < 0)) {
+  DCHECK(allow_macro_instructions_);
+  if (operand.IsImmediate() && (operand.ImmediateValue() < 0) &&
+      IsImmAddSub(-operand.ImmediateValue())) {
     AddSubMacro(rd, rn, -operand.ImmediateValue(), LeaveFlags, SUB);
   } else {
     AddSubMacro(rd, rn, operand, LeaveFlags, ADD);
@@ -161,8 +163,9 @@
 void MacroAssembler::Adds(const Register& rd,
                           const Register& rn,
                           const Operand& operand) {
-  ASSERT(allow_macro_instructions_);
-  if (operand.IsImmediate() && (operand.ImmediateValue() < 0)) {
+  DCHECK(allow_macro_instructions_);
+  if (operand.IsImmediate() && (operand.ImmediateValue() < 0) &&
+      IsImmAddSub(-operand.ImmediateValue())) {
     AddSubMacro(rd, rn, -operand.ImmediateValue(), SetFlags, SUB);
   } else {
     AddSubMacro(rd, rn, operand, SetFlags, ADD);
@@ -173,8 +176,9 @@
 void MacroAssembler::Sub(const Register& rd,
                          const Register& rn,
                          const Operand& operand) {
-  ASSERT(allow_macro_instructions_);
-  if (operand.IsImmediate() && (operand.ImmediateValue() < 0)) {
+  DCHECK(allow_macro_instructions_);
+  if (operand.IsImmediate() && (operand.ImmediateValue() < 0) &&
+      IsImmAddSub(-operand.ImmediateValue())) {
     AddSubMacro(rd, rn, -operand.ImmediateValue(), LeaveFlags, ADD);
   } else {
     AddSubMacro(rd, rn, operand, LeaveFlags, SUB);
@@ -185,8 +189,9 @@
 void MacroAssembler::Subs(const Register& rd,
                           const Register& rn,
                           const Operand& operand) {
-  ASSERT(allow_macro_instructions_);
-  if (operand.IsImmediate() && (operand.ImmediateValue() < 0)) {
+  DCHECK(allow_macro_instructions_);
+  if (operand.IsImmediate() && (operand.ImmediateValue() < 0) &&
+      IsImmAddSub(-operand.ImmediateValue())) {
     AddSubMacro(rd, rn, -operand.ImmediateValue(), SetFlags, ADD);
   } else {
     AddSubMacro(rd, rn, operand, SetFlags, SUB);
@@ -195,21 +200,21 @@
 
 
 void MacroAssembler::Cmn(const Register& rn, const Operand& operand) {
-  ASSERT(allow_macro_instructions_);
+  DCHECK(allow_macro_instructions_);
   Adds(AppropriateZeroRegFor(rn), rn, operand);
 }
 
 
 void MacroAssembler::Cmp(const Register& rn, const Operand& operand) {
-  ASSERT(allow_macro_instructions_);
+  DCHECK(allow_macro_instructions_);
   Subs(AppropriateZeroRegFor(rn), rn, operand);
 }
 
 
 void MacroAssembler::Neg(const Register& rd,
                          const Operand& operand) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   if (operand.IsImmediate()) {
     Mov(rd, -operand.ImmediateValue());
   } else {
@@ -220,7 +225,7 @@
 
 void MacroAssembler::Negs(const Register& rd,
                           const Operand& operand) {
-  ASSERT(allow_macro_instructions_);
+  DCHECK(allow_macro_instructions_);
   Subs(rd, AppropriateZeroRegFor(rd), operand);
 }
 
@@ -228,8 +233,8 @@
 void MacroAssembler::Adc(const Register& rd,
                          const Register& rn,
                          const Operand& operand) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   AddSubWithCarryMacro(rd, rn, operand, LeaveFlags, ADC);
 }
 
@@ -237,8 +242,8 @@
 void MacroAssembler::Adcs(const Register& rd,
                           const Register& rn,
                           const Operand& operand) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   AddSubWithCarryMacro(rd, rn, operand, SetFlags, ADC);
 }
 
@@ -246,8 +251,8 @@
 void MacroAssembler::Sbc(const Register& rd,
                          const Register& rn,
                          const Operand& operand) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   AddSubWithCarryMacro(rd, rn, operand, LeaveFlags, SBC);
 }
 
@@ -255,16 +260,16 @@
 void MacroAssembler::Sbcs(const Register& rd,
                           const Register& rn,
                           const Operand& operand) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   AddSubWithCarryMacro(rd, rn, operand, SetFlags, SBC);
 }
 
 
 void MacroAssembler::Ngc(const Register& rd,
                          const Operand& operand) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   Register zr = AppropriateZeroRegFor(rd);
   Sbc(rd, zr, operand);
 }
@@ -272,34 +277,44 @@
 
 void MacroAssembler::Ngcs(const Register& rd,
                           const Operand& operand) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   Register zr = AppropriateZeroRegFor(rd);
   Sbcs(rd, zr, operand);
 }
 
 
 void MacroAssembler::Mvn(const Register& rd, uint64_t imm) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   Mov(rd, ~imm);
 }
 
 
 #define DEFINE_FUNCTION(FN, REGTYPE, REG, OP)                         \
 void MacroAssembler::FN(const REGTYPE REG, const MemOperand& addr) {  \
-  ASSERT(allow_macro_instructions_);                                  \
+  DCHECK(allow_macro_instructions_);                                  \
   LoadStoreMacro(REG, addr, OP);                                      \
 }
 LS_MACRO_LIST(DEFINE_FUNCTION)
 #undef DEFINE_FUNCTION
 
 
+#define DEFINE_FUNCTION(FN, REGTYPE, REG, REG2, OP)              \
+  void MacroAssembler::FN(const REGTYPE REG, const REGTYPE REG2, \
+                          const MemOperand& addr) {              \
+    DCHECK(allow_macro_instructions_);                           \
+    LoadStorePairMacro(REG, REG2, addr, OP);                     \
+  }
+LSPAIR_MACRO_LIST(DEFINE_FUNCTION)
+#undef DEFINE_FUNCTION
+
+
 void MacroAssembler::Asr(const Register& rd,
                          const Register& rn,
                          unsigned shift) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   asr(rd, rn, shift);
 }
 
@@ -307,8 +322,8 @@
 void MacroAssembler::Asr(const Register& rd,
                          const Register& rn,
                          const Register& rm) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   asrv(rd, rn, rm);
 }
 
@@ -320,7 +335,7 @@
 
 
 void MacroAssembler::B(Condition cond, Label* label) {
-  ASSERT(allow_macro_instructions_);
+  DCHECK(allow_macro_instructions_);
   B(label, cond);
 }
 
@@ -329,8 +344,8 @@
                          const Register& rn,
                          unsigned lsb,
                          unsigned width) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   bfi(rd, rn, lsb, width);
 }
 
@@ -339,40 +354,40 @@
                            const Register& rn,
                            unsigned lsb,
                            unsigned width) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   bfxil(rd, rn, lsb, width);
 }
 
 
 void MacroAssembler::Bind(Label* label) {
-  ASSERT(allow_macro_instructions_);
+  DCHECK(allow_macro_instructions_);
   bind(label);
 }
 
 
 void MacroAssembler::Bl(Label* label) {
-  ASSERT(allow_macro_instructions_);
+  DCHECK(allow_macro_instructions_);
   bl(label);
 }
 
 
 void MacroAssembler::Blr(const Register& xn) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!xn.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!xn.IsZero());
   blr(xn);
 }
 
 
 void MacroAssembler::Br(const Register& xn) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!xn.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!xn.IsZero());
   br(xn);
 }
 
 
 void MacroAssembler::Brk(int code) {
-  ASSERT(allow_macro_instructions_);
+  DCHECK(allow_macro_instructions_);
   brk(code);
 }
 
@@ -380,9 +395,9 @@
 void MacroAssembler::Cinc(const Register& rd,
                           const Register& rn,
                           Condition cond) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
-  ASSERT((cond != al) && (cond != nv));
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
+  DCHECK((cond != al) && (cond != nv));
   cinc(rd, rn, cond);
 }
 
@@ -390,23 +405,23 @@
 void MacroAssembler::Cinv(const Register& rd,
                           const Register& rn,
                           Condition cond) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
-  ASSERT((cond != al) && (cond != nv));
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
+  DCHECK((cond != al) && (cond != nv));
   cinv(rd, rn, cond);
 }
 
 
 void MacroAssembler::Cls(const Register& rd, const Register& rn) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   cls(rd, rn);
 }
 
 
 void MacroAssembler::Clz(const Register& rd, const Register& rn) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   clz(rd, rn);
 }
 
@@ -414,9 +429,9 @@
 void MacroAssembler::Cneg(const Register& rd,
                           const Register& rn,
                           Condition cond) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
-  ASSERT((cond != al) && (cond != nv));
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
+  DCHECK((cond != al) && (cond != nv));
   cneg(rd, rn, cond);
 }
 
@@ -425,9 +440,9 @@
 // due to the truncation side-effect when used on W registers.
 void MacroAssembler::CzeroX(const Register& rd,
                             Condition cond) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsSP() && rd.Is64Bits());
-  ASSERT((cond != al) && (cond != nv));
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsSP() && rd.Is64Bits());
+  DCHECK((cond != al) && (cond != nv));
   csel(rd, xzr, rd, cond);
 }
 
@@ -437,10 +452,10 @@
 void MacroAssembler::CmovX(const Register& rd,
                            const Register& rn,
                            Condition cond) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsSP());
-  ASSERT(rd.Is64Bits() && rn.Is64Bits());
-  ASSERT((cond != al) && (cond != nv));
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsSP());
+  DCHECK(rd.Is64Bits() && rn.Is64Bits());
+  DCHECK((cond != al) && (cond != nv));
   if (!rd.is(rn)) {
     csel(rd, rn, rd, cond);
   }
@@ -448,17 +463,17 @@
 
 
 void MacroAssembler::Cset(const Register& rd, Condition cond) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
-  ASSERT((cond != al) && (cond != nv));
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
+  DCHECK((cond != al) && (cond != nv));
   cset(rd, cond);
 }
 
 
 void MacroAssembler::Csetm(const Register& rd, Condition cond) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
-  ASSERT((cond != al) && (cond != nv));
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
+  DCHECK((cond != al) && (cond != nv));
   csetm(rd, cond);
 }
 
@@ -467,9 +482,9 @@
                            const Register& rn,
                            const Register& rm,
                            Condition cond) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
-  ASSERT((cond != al) && (cond != nv));
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
+  DCHECK((cond != al) && (cond != nv));
   csinc(rd, rn, rm, cond);
 }
 
@@ -478,9 +493,9 @@
                            const Register& rn,
                            const Register& rm,
                            Condition cond) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
-  ASSERT((cond != al) && (cond != nv));
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
+  DCHECK((cond != al) && (cond != nv));
   csinv(rd, rn, rm, cond);
 }
 
@@ -489,27 +504,27 @@
                            const Register& rn,
                            const Register& rm,
                            Condition cond) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
-  ASSERT((cond != al) && (cond != nv));
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
+  DCHECK((cond != al) && (cond != nv));
   csneg(rd, rn, rm, cond);
 }
 
 
 void MacroAssembler::Dmb(BarrierDomain domain, BarrierType type) {
-  ASSERT(allow_macro_instructions_);
+  DCHECK(allow_macro_instructions_);
   dmb(domain, type);
 }
 
 
 void MacroAssembler::Dsb(BarrierDomain domain, BarrierType type) {
-  ASSERT(allow_macro_instructions_);
+  DCHECK(allow_macro_instructions_);
   dsb(domain, type);
 }
 
 
 void MacroAssembler::Debug(const char* message, uint32_t code, Instr params) {
-  ASSERT(allow_macro_instructions_);
+  DCHECK(allow_macro_instructions_);
   debug(message, code, params);
 }
 
@@ -518,14 +533,14 @@
                           const Register& rn,
                           const Register& rm,
                           unsigned lsb) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   extr(rd, rn, rm, lsb);
 }
 
 
 void MacroAssembler::Fabs(const FPRegister& fd, const FPRegister& fn) {
-  ASSERT(allow_macro_instructions_);
+  DCHECK(allow_macro_instructions_);
   fabs(fd, fn);
 }
 
@@ -533,7 +548,7 @@
 void MacroAssembler::Fadd(const FPRegister& fd,
                           const FPRegister& fn,
                           const FPRegister& fm) {
-  ASSERT(allow_macro_instructions_);
+  DCHECK(allow_macro_instructions_);
   fadd(fd, fn, fm);
 }
 
@@ -542,20 +557,20 @@
                            const FPRegister& fm,
                            StatusFlags nzcv,
                            Condition cond) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT((cond != al) && (cond != nv));
+  DCHECK(allow_macro_instructions_);
+  DCHECK((cond != al) && (cond != nv));
   fccmp(fn, fm, nzcv, cond);
 }
 
 
 void MacroAssembler::Fcmp(const FPRegister& fn, const FPRegister& fm) {
-  ASSERT(allow_macro_instructions_);
+  DCHECK(allow_macro_instructions_);
   fcmp(fn, fm);
 }
 
 
 void MacroAssembler::Fcmp(const FPRegister& fn, double value) {
-  ASSERT(allow_macro_instructions_);
+  DCHECK(allow_macro_instructions_);
   if (value != 0.0) {
     UseScratchRegisterScope temps(this);
     FPRegister tmp = temps.AcquireSameSizeAs(fn);
@@ -571,68 +586,68 @@
                            const FPRegister& fn,
                            const FPRegister& fm,
                            Condition cond) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT((cond != al) && (cond != nv));
+  DCHECK(allow_macro_instructions_);
+  DCHECK((cond != al) && (cond != nv));
   fcsel(fd, fn, fm, cond);
 }
 
 
 void MacroAssembler::Fcvt(const FPRegister& fd, const FPRegister& fn) {
-  ASSERT(allow_macro_instructions_);
+  DCHECK(allow_macro_instructions_);
   fcvt(fd, fn);
 }
 
 
 void MacroAssembler::Fcvtas(const Register& rd, const FPRegister& fn) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   fcvtas(rd, fn);
 }
 
 
 void MacroAssembler::Fcvtau(const Register& rd, const FPRegister& fn) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   fcvtau(rd, fn);
 }
 
 
 void MacroAssembler::Fcvtms(const Register& rd, const FPRegister& fn) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   fcvtms(rd, fn);
 }
 
 
 void MacroAssembler::Fcvtmu(const Register& rd, const FPRegister& fn) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   fcvtmu(rd, fn);
 }
 
 
 void MacroAssembler::Fcvtns(const Register& rd, const FPRegister& fn) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   fcvtns(rd, fn);
 }
 
 
 void MacroAssembler::Fcvtnu(const Register& rd, const FPRegister& fn) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   fcvtnu(rd, fn);
 }
 
 
 void MacroAssembler::Fcvtzs(const Register& rd, const FPRegister& fn) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   fcvtzs(rd, fn);
 }
 void MacroAssembler::Fcvtzu(const Register& rd, const FPRegister& fn) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   fcvtzu(rd, fn);
 }
 
@@ -640,7 +655,7 @@
 void MacroAssembler::Fdiv(const FPRegister& fd,
                           const FPRegister& fn,
                           const FPRegister& fm) {
-  ASSERT(allow_macro_instructions_);
+  DCHECK(allow_macro_instructions_);
   fdiv(fd, fn, fm);
 }
 
@@ -649,7 +664,7 @@
                            const FPRegister& fn,
                            const FPRegister& fm,
                            const FPRegister& fa) {
-  ASSERT(allow_macro_instructions_);
+  DCHECK(allow_macro_instructions_);
   fmadd(fd, fn, fm, fa);
 }
 
@@ -657,7 +672,7 @@
 void MacroAssembler::Fmax(const FPRegister& fd,
                           const FPRegister& fn,
                           const FPRegister& fm) {
-  ASSERT(allow_macro_instructions_);
+  DCHECK(allow_macro_instructions_);
   fmax(fd, fn, fm);
 }
 
@@ -665,7 +680,7 @@
 void MacroAssembler::Fmaxnm(const FPRegister& fd,
                             const FPRegister& fn,
                             const FPRegister& fm) {
-  ASSERT(allow_macro_instructions_);
+  DCHECK(allow_macro_instructions_);
   fmaxnm(fd, fn, fm);
 }
 
@@ -673,7 +688,7 @@
 void MacroAssembler::Fmin(const FPRegister& fd,
                           const FPRegister& fn,
                           const FPRegister& fm) {
-  ASSERT(allow_macro_instructions_);
+  DCHECK(allow_macro_instructions_);
   fmin(fd, fn, fm);
 }
 
@@ -681,13 +696,13 @@
 void MacroAssembler::Fminnm(const FPRegister& fd,
                             const FPRegister& fn,
                             const FPRegister& fm) {
-  ASSERT(allow_macro_instructions_);
+  DCHECK(allow_macro_instructions_);
   fminnm(fd, fn, fm);
 }
 
 
 void MacroAssembler::Fmov(FPRegister fd, FPRegister fn) {
-  ASSERT(allow_macro_instructions_);
+  DCHECK(allow_macro_instructions_);
   // Only emit an instruction if fd and fn are different, and they are both D
   // registers. fmov(s0, s0) is not a no-op because it clears the top word of
   // d0. Technically, fmov(d0, d0) is not a no-op either because it clears the
@@ -699,19 +714,19 @@
 
 
 void MacroAssembler::Fmov(FPRegister fd, Register rn) {
-  ASSERT(allow_macro_instructions_);
+  DCHECK(allow_macro_instructions_);
   fmov(fd, rn);
 }
 
 
 void MacroAssembler::Fmov(FPRegister fd, double imm) {
-  ASSERT(allow_macro_instructions_);
+  DCHECK(allow_macro_instructions_);
   if (fd.Is32Bits()) {
     Fmov(fd, static_cast<float>(imm));
     return;
   }
 
-  ASSERT(fd.Is64Bits());
+  DCHECK(fd.Is64Bits());
   if (IsImmFP64(imm)) {
     fmov(fd, imm);
   } else if ((imm == 0.0) && (copysign(1.0, imm) == 1.0)) {
@@ -723,13 +738,13 @@
 
 
 void MacroAssembler::Fmov(FPRegister fd, float imm) {
-  ASSERT(allow_macro_instructions_);
+  DCHECK(allow_macro_instructions_);
   if (fd.Is64Bits()) {
     Fmov(fd, static_cast<double>(imm));
     return;
   }
 
-  ASSERT(fd.Is32Bits());
+  DCHECK(fd.Is32Bits());
   if (IsImmFP32(imm)) {
     fmov(fd, imm);
   } else if ((imm == 0.0) && (copysign(1.0, imm) == 1.0)) {
@@ -745,8 +760,8 @@
 
 
 void MacroAssembler::Fmov(Register rd, FPRegister fn) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   fmov(rd, fn);
 }
 
@@ -755,7 +770,7 @@
                            const FPRegister& fn,
                            const FPRegister& fm,
                            const FPRegister& fa) {
-  ASSERT(allow_macro_instructions_);
+  DCHECK(allow_macro_instructions_);
   fmsub(fd, fn, fm, fa);
 }
 
@@ -763,13 +778,13 @@
 void MacroAssembler::Fmul(const FPRegister& fd,
                           const FPRegister& fn,
                           const FPRegister& fm) {
-  ASSERT(allow_macro_instructions_);
+  DCHECK(allow_macro_instructions_);
   fmul(fd, fn, fm);
 }
 
 
 void MacroAssembler::Fneg(const FPRegister& fd, const FPRegister& fn) {
-  ASSERT(allow_macro_instructions_);
+  DCHECK(allow_macro_instructions_);
   fneg(fd, fn);
 }
 
@@ -778,7 +793,7 @@
                             const FPRegister& fn,
                             const FPRegister& fm,
                             const FPRegister& fa) {
-  ASSERT(allow_macro_instructions_);
+  DCHECK(allow_macro_instructions_);
   fnmadd(fd, fn, fm, fa);
 }
 
@@ -787,37 +802,37 @@
                             const FPRegister& fn,
                             const FPRegister& fm,
                             const FPRegister& fa) {
-  ASSERT(allow_macro_instructions_);
+  DCHECK(allow_macro_instructions_);
   fnmsub(fd, fn, fm, fa);
 }
 
 
 void MacroAssembler::Frinta(const FPRegister& fd, const FPRegister& fn) {
-  ASSERT(allow_macro_instructions_);
+  DCHECK(allow_macro_instructions_);
   frinta(fd, fn);
 }
 
 
 void MacroAssembler::Frintm(const FPRegister& fd, const FPRegister& fn) {
-  ASSERT(allow_macro_instructions_);
+  DCHECK(allow_macro_instructions_);
   frintm(fd, fn);
 }
 
 
 void MacroAssembler::Frintn(const FPRegister& fd, const FPRegister& fn) {
-  ASSERT(allow_macro_instructions_);
+  DCHECK(allow_macro_instructions_);
   frintn(fd, fn);
 }
 
 
 void MacroAssembler::Frintz(const FPRegister& fd, const FPRegister& fn) {
-  ASSERT(allow_macro_instructions_);
+  DCHECK(allow_macro_instructions_);
   frintz(fd, fn);
 }
 
 
 void MacroAssembler::Fsqrt(const FPRegister& fd, const FPRegister& fn) {
-  ASSERT(allow_macro_instructions_);
+  DCHECK(allow_macro_instructions_);
   fsqrt(fd, fn);
 }
 
@@ -825,25 +840,25 @@
 void MacroAssembler::Fsub(const FPRegister& fd,
                           const FPRegister& fn,
                           const FPRegister& fm) {
-  ASSERT(allow_macro_instructions_);
+  DCHECK(allow_macro_instructions_);
   fsub(fd, fn, fm);
 }
 
 
 void MacroAssembler::Hint(SystemHint code) {
-  ASSERT(allow_macro_instructions_);
+  DCHECK(allow_macro_instructions_);
   hint(code);
 }
 
 
 void MacroAssembler::Hlt(int code) {
-  ASSERT(allow_macro_instructions_);
+  DCHECK(allow_macro_instructions_);
   hlt(code);
 }
 
 
 void MacroAssembler::Isb() {
-  ASSERT(allow_macro_instructions_);
+  DCHECK(allow_macro_instructions_);
   isb();
 }
 
@@ -851,40 +866,21 @@
 void MacroAssembler::Ldnp(const CPURegister& rt,
                           const CPURegister& rt2,
                           const MemOperand& src) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!AreAliased(rt, rt2));
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!AreAliased(rt, rt2));
   ldnp(rt, rt2, src);
 }
 
 
-void MacroAssembler::Ldp(const CPURegister& rt,
-                         const CPURegister& rt2,
-                         const MemOperand& src) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!AreAliased(rt, rt2));
-  ldp(rt, rt2, src);
-}
-
-
-void MacroAssembler::Ldpsw(const Register& rt,
-                           const Register& rt2,
-                           const MemOperand& src) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rt.IsZero());
-  ASSERT(!rt2.IsZero());
-  ldpsw(rt, rt2, src);
-}
-
-
 void MacroAssembler::Ldr(const CPURegister& rt, const Immediate& imm) {
-  ASSERT(allow_macro_instructions_);
+  DCHECK(allow_macro_instructions_);
   ldr(rt, imm);
 }
 
 
 void MacroAssembler::Ldr(const CPURegister& rt, double imm) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(rt.Is64Bits());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(rt.Is64Bits());
   ldr(rt, Immediate(double_to_rawbits(imm)));
 }
 
@@ -892,8 +888,8 @@
 void MacroAssembler::Lsl(const Register& rd,
                          const Register& rn,
                          unsigned shift) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   lsl(rd, rn, shift);
 }
 
@@ -901,8 +897,8 @@
 void MacroAssembler::Lsl(const Register& rd,
                          const Register& rn,
                          const Register& rm) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   lslv(rd, rn, rm);
 }
 
@@ -910,8 +906,8 @@
 void MacroAssembler::Lsr(const Register& rd,
                          const Register& rn,
                          unsigned shift) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   lsr(rd, rn, shift);
 }
 
@@ -919,8 +915,8 @@
 void MacroAssembler::Lsr(const Register& rd,
                          const Register& rn,
                          const Register& rm) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   lsrv(rd, rn, rm);
 }
 
@@ -929,8 +925,8 @@
                           const Register& rn,
                           const Register& rm,
                           const Register& ra) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   madd(rd, rn, rm, ra);
 }
 
@@ -938,15 +934,15 @@
 void MacroAssembler::Mneg(const Register& rd,
                           const Register& rn,
                           const Register& rm) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   mneg(rd, rn, rm);
 }
 
 
 void MacroAssembler::Mov(const Register& rd, const Register& rn) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   // Emit a register move only if the registers are distinct, or if they are
   // not X registers. Note that mov(w0, w0) is not a no-op because it clears
   // the top word of x0.
@@ -957,21 +953,21 @@
 
 
 void MacroAssembler::Movk(const Register& rd, uint64_t imm, int shift) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   movk(rd, imm, shift);
 }
 
 
 void MacroAssembler::Mrs(const Register& rt, SystemRegister sysreg) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rt.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rt.IsZero());
   mrs(rt, sysreg);
 }
 
 
 void MacroAssembler::Msr(SystemRegister sysreg, const Register& rt) {
-  ASSERT(allow_macro_instructions_);
+  DCHECK(allow_macro_instructions_);
   msr(sysreg, rt);
 }
 
@@ -980,8 +976,8 @@
                           const Register& rn,
                           const Register& rm,
                           const Register& ra) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   msub(rd, rn, rm, ra);
 }
 
@@ -989,44 +985,44 @@
 void MacroAssembler::Mul(const Register& rd,
                          const Register& rn,
                          const Register& rm) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   mul(rd, rn, rm);
 }
 
 
 void MacroAssembler::Rbit(const Register& rd, const Register& rn) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   rbit(rd, rn);
 }
 
 
 void MacroAssembler::Ret(const Register& xn) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!xn.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!xn.IsZero());
   ret(xn);
   CheckVeneerPool(false, false);
 }
 
 
 void MacroAssembler::Rev(const Register& rd, const Register& rn) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   rev(rd, rn);
 }
 
 
 void MacroAssembler::Rev16(const Register& rd, const Register& rn) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   rev16(rd, rn);
 }
 
 
 void MacroAssembler::Rev32(const Register& rd, const Register& rn) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   rev32(rd, rn);
 }
 
@@ -1034,8 +1030,8 @@
 void MacroAssembler::Ror(const Register& rd,
                          const Register& rs,
                          unsigned shift) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   ror(rd, rs, shift);
 }
 
@@ -1043,8 +1039,8 @@
 void MacroAssembler::Ror(const Register& rd,
                          const Register& rn,
                          const Register& rm) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   rorv(rd, rn, rm);
 }
 
@@ -1053,8 +1049,8 @@
                            const Register& rn,
                            unsigned lsb,
                            unsigned width) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   sbfiz(rd, rn, lsb, width);
 }
 
@@ -1063,8 +1059,8 @@
                           const Register& rn,
                           unsigned lsb,
                           unsigned width) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   sbfx(rd, rn, lsb, width);
 }
 
@@ -1072,7 +1068,7 @@
 void MacroAssembler::Scvtf(const FPRegister& fd,
                            const Register& rn,
                            unsigned fbits) {
-  ASSERT(allow_macro_instructions_);
+  DCHECK(allow_macro_instructions_);
   scvtf(fd, rn, fbits);
 }
 
@@ -1080,8 +1076,8 @@
 void MacroAssembler::Sdiv(const Register& rd,
                           const Register& rn,
                           const Register& rm) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   sdiv(rd, rn, rm);
 }
 
@@ -1090,8 +1086,8 @@
                             const Register& rn,
                             const Register& rm,
                             const Register& ra) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   smaddl(rd, rn, rm, ra);
 }
 
@@ -1100,8 +1096,8 @@
                             const Register& rn,
                             const Register& rm,
                             const Register& ra) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   smsubl(rd, rn, rm, ra);
 }
 
@@ -1109,8 +1105,8 @@
 void MacroAssembler::Smull(const Register& rd,
                            const Register& rn,
                            const Register& rm) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   smull(rd, rn, rm);
 }
 
@@ -1118,8 +1114,8 @@
 void MacroAssembler::Smulh(const Register& rd,
                            const Register& rn,
                            const Register& rm) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   smulh(rd, rn, rm);
 }
 
@@ -1127,36 +1123,28 @@
 void MacroAssembler::Stnp(const CPURegister& rt,
                           const CPURegister& rt2,
                           const MemOperand& dst) {
-  ASSERT(allow_macro_instructions_);
+  DCHECK(allow_macro_instructions_);
   stnp(rt, rt2, dst);
 }
 
 
-void MacroAssembler::Stp(const CPURegister& rt,
-                         const CPURegister& rt2,
-                         const MemOperand& dst) {
-  ASSERT(allow_macro_instructions_);
-  stp(rt, rt2, dst);
-}
-
-
 void MacroAssembler::Sxtb(const Register& rd, const Register& rn) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   sxtb(rd, rn);
 }
 
 
 void MacroAssembler::Sxth(const Register& rd, const Register& rn) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   sxth(rd, rn);
 }
 
 
 void MacroAssembler::Sxtw(const Register& rd, const Register& rn) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   sxtw(rd, rn);
 }
 
@@ -1165,8 +1153,8 @@
                            const Register& rn,
                            unsigned lsb,
                            unsigned width) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   ubfiz(rd, rn, lsb, width);
 }
 
@@ -1175,8 +1163,8 @@
                           const Register& rn,
                           unsigned lsb,
                           unsigned width) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   ubfx(rd, rn, lsb, width);
 }
 
@@ -1184,7 +1172,7 @@
 void MacroAssembler::Ucvtf(const FPRegister& fd,
                            const Register& rn,
                            unsigned fbits) {
-  ASSERT(allow_macro_instructions_);
+  DCHECK(allow_macro_instructions_);
   ucvtf(fd, rn, fbits);
 }
 
@@ -1192,8 +1180,8 @@
 void MacroAssembler::Udiv(const Register& rd,
                           const Register& rn,
                           const Register& rm) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   udiv(rd, rn, rm);
 }
 
@@ -1202,8 +1190,8 @@
                             const Register& rn,
                             const Register& rm,
                             const Register& ra) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   umaddl(rd, rn, rm, ra);
 }
 
@@ -1212,35 +1200,35 @@
                             const Register& rn,
                             const Register& rm,
                             const Register& ra) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   umsubl(rd, rn, rm, ra);
 }
 
 
 void MacroAssembler::Uxtb(const Register& rd, const Register& rn) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   uxtb(rd, rn);
 }
 
 
 void MacroAssembler::Uxth(const Register& rd, const Register& rn) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   uxth(rd, rn);
 }
 
 
 void MacroAssembler::Uxtw(const Register& rd, const Register& rn) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
   uxtw(rd, rn);
 }
 
 
 void MacroAssembler::BumpSystemStackPointer(const Operand& space) {
-  ASSERT(!csp.Is(sp_));
+  DCHECK(!csp.Is(sp_));
   if (!TmpList()->IsEmpty()) {
     if (CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) {
       UseScratchRegisterScope temps(this);
@@ -1258,10 +1246,10 @@
     // instruction are accepted.) Once we implement our flexible scratch
     // register idea, we could greatly simplify this function.
     InstructionAccurateScope scope(this);
-    ASSERT(space.IsImmediate());
+    DCHECK(space.IsImmediate());
     // Align to 16 bytes.
     uint64_t imm = RoundUp(space.ImmediateValue(), 0x10);
-    ASSERT(is_uint24(imm));
+    DCHECK(is_uint24(imm));
 
     Register source = StackPointer();
     if (CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) {
@@ -1283,8 +1271,8 @@
 
 
 void MacroAssembler::SyncSystemStackPointer() {
-  ASSERT(emit_debug_code());
-  ASSERT(!csp.Is(sp_));
+  DCHECK(emit_debug_code());
+  DCHECK(!csp.Is(sp_));
   { InstructionAccurateScope scope(this);
     if (CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) {
       bic(csp, StackPointer(), 0xf);
@@ -1304,7 +1292,9 @@
 
 
 void MacroAssembler::SmiTag(Register dst, Register src) {
-  ASSERT(dst.Is64Bits() && src.Is64Bits());
+  STATIC_ASSERT(kXRegSizeInBits ==
+                static_cast<unsigned>(kSmiShift + kSmiValueSize));
+  DCHECK(dst.Is64Bits() && src.Is64Bits());
   Lsl(dst, src, kSmiShift);
 }
 
@@ -1313,7 +1303,9 @@
 
 
 void MacroAssembler::SmiUntag(Register dst, Register src) {
-  ASSERT(dst.Is64Bits() && src.Is64Bits());
+  STATIC_ASSERT(kXRegSizeInBits ==
+                static_cast<unsigned>(kSmiShift + kSmiValueSize));
+  DCHECK(dst.Is64Bits() && src.Is64Bits());
   if (FLAG_enable_slow_asserts) {
     AssertSmi(src);
   }
@@ -1327,7 +1319,7 @@
 void MacroAssembler::SmiUntagToDouble(FPRegister dst,
                                       Register src,
                                       UntagMode mode) {
-  ASSERT(dst.Is64Bits() && src.Is64Bits());
+  DCHECK(dst.Is64Bits() && src.Is64Bits());
   if (FLAG_enable_slow_asserts && (mode == kNotSpeculativeUntag)) {
     AssertSmi(src);
   }
@@ -1338,7 +1330,7 @@
 void MacroAssembler::SmiUntagToFloat(FPRegister dst,
                                      Register src,
                                      UntagMode mode) {
-  ASSERT(dst.Is32Bits() && src.Is64Bits());
+  DCHECK(dst.Is32Bits() && src.Is64Bits());
   if (FLAG_enable_slow_asserts && (mode == kNotSpeculativeUntag)) {
     AssertSmi(src);
   }
@@ -1347,13 +1339,17 @@
 
 
 void MacroAssembler::SmiTagAndPush(Register src) {
-  STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0));
+  STATIC_ASSERT((static_cast<unsigned>(kSmiShift) == kWRegSizeInBits) &&
+                (static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits) &&
+                (kSmiTag == 0));
   Push(src.W(), wzr);
 }
 
 
 void MacroAssembler::SmiTagAndPush(Register src1, Register src2) {
-  STATIC_ASSERT((kSmiShift == 32) && (kSmiTag == 0));
+  STATIC_ASSERT((static_cast<unsigned>(kSmiShift) == kWRegSizeInBits) &&
+                (static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits) &&
+                (kSmiTag == 0));
   Push(src1.W(), wzr, src2.W(), wzr);
 }
 
@@ -1369,7 +1365,7 @@
       B(not_smi_label);
     }
   } else {
-    ASSERT(not_smi_label);
+    DCHECK(not_smi_label);
     Tbnz(value, 0, not_smi_label);
   }
 }
@@ -1486,7 +1482,7 @@
   Ldrb(type.W(), FieldMemOperand(type, Map::kInstanceTypeOffset));
 
   STATIC_ASSERT(kStringTag == 0);
-  ASSERT((string != NULL) || (not_string != NULL));
+  DCHECK((string != NULL) || (not_string != NULL));
   if (string == NULL) {
     TestAndBranchIfAnySet(type.W(), kIsNotStringMask, not_string);
   } else if (not_string == NULL) {
@@ -1514,7 +1510,7 @@
   }
 
   if (csp.Is(StackPointer())) {
-    ASSERT(size % 16 == 0);
+    DCHECK(size % 16 == 0);
   } else {
     BumpSystemStackPointer(size);
   }
@@ -1525,7 +1521,7 @@
 
 void MacroAssembler::Claim(const Register& count, uint64_t unit_size) {
   if (unit_size == 0) return;
-  ASSERT(IsPowerOf2(unit_size));
+  DCHECK(base::bits::IsPowerOfTwo64(unit_size));
 
   const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits);
   const Operand size(count, LSL, shift);
@@ -1543,7 +1539,7 @@
 
 
 void MacroAssembler::ClaimBySMI(const Register& count_smi, uint64_t unit_size) {
-  ASSERT(unit_size == 0 || IsPowerOf2(unit_size));
+  DCHECK(unit_size == 0 || base::bits::IsPowerOfTwo64(unit_size));
   const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits) - kSmiShift;
   const Operand size(count_smi,
                      (shift >= 0) ? (LSL) : (LSR),
@@ -1571,7 +1567,7 @@
   Add(StackPointer(), StackPointer(), size);
 
   if (csp.Is(StackPointer())) {
-    ASSERT(size % 16 == 0);
+    DCHECK(size % 16 == 0);
   } else if (emit_debug_code()) {
     // It is safe to leave csp where it is when unwinding the JavaScript stack,
     // but if we keep it matching StackPointer, the simulator can detect memory
@@ -1583,7 +1579,7 @@
 
 void MacroAssembler::Drop(const Register& count, uint64_t unit_size) {
   if (unit_size == 0) return;
-  ASSERT(IsPowerOf2(unit_size));
+  DCHECK(base::bits::IsPowerOfTwo64(unit_size));
 
   const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits);
   const Operand size(count, LSL, shift);
@@ -1604,7 +1600,7 @@
 
 
 void MacroAssembler::DropBySMI(const Register& count_smi, uint64_t unit_size) {
-  ASSERT(unit_size == 0 || IsPowerOf2(unit_size));
+  DCHECK(unit_size == 0 || base::bits::IsPowerOfTwo64(unit_size));
   const int shift = CountTrailingZeros(unit_size, kXRegSizeInBits) - kSmiShift;
   const Operand size(count_smi,
                      (shift >= 0) ? (LSL) : (LSR),
@@ -1647,7 +1643,7 @@
                                            const uint64_t bit_pattern,
                                            Label* label) {
   int bits = reg.SizeInBits();
-  ASSERT(CountSetBits(bit_pattern, bits) > 0);
+  DCHECK(CountSetBits(bit_pattern, bits) > 0);
   if (CountSetBits(bit_pattern, bits) == 1) {
     Tbnz(reg, MaskToBit(bit_pattern), label);
   } else {
@@ -1661,7 +1657,7 @@
                                              const uint64_t bit_pattern,
                                              Label* label) {
   int bits = reg.SizeInBits();
-  ASSERT(CountSetBits(bit_pattern, bits) > 0);
+  DCHECK(CountSetBits(bit_pattern, bits) > 0);
   if (CountSetBits(bit_pattern, bits) == 1) {
     Tbz(reg, MaskToBit(bit_pattern), label);
   } else {
@@ -1672,7 +1668,7 @@
 
 
 void MacroAssembler::InlineData(uint64_t data) {
-  ASSERT(is_uint16(data));
+  DCHECK(is_uint16(data));
   InstructionAccurateScope scope(this, 1);
   movz(xzr, data);
 }
@@ -1691,11 +1687,11 @@
 
 
 void MacroAssembler::AnnotateInstrumentation(const char* marker_name) {
-  ASSERT(strlen(marker_name) == 2);
+  DCHECK(strlen(marker_name) == 2);
 
   // We allow only printable characters in the marker names. Unprintable
   // characters are reserved for controlling features of the instrumentation.
-  ASSERT(isprint(marker_name[0]) && isprint(marker_name[1]));
+  DCHECK(isprint(marker_name[0]) && isprint(marker_name[1]));
 
   InstructionAccurateScope scope(this, 1);
   movn(xzr, (marker_name[1] << 8) | marker_name[0]);
diff --git a/src/arm64/macro-assembler-arm64.cc b/src/arm64/macro-assembler-arm64.cc
index f2e49b4..f670403 100644
--- a/src/arm64/macro-assembler-arm64.cc
+++ b/src/arm64/macro-assembler-arm64.cc
@@ -6,6 +6,8 @@
 
 #if V8_TARGET_ARCH_ARM64
 
+#include "src/base/bits.h"
+#include "src/base/division-by-constant.h"
 #include "src/bootstrapper.h"
 #include "src/codegen.h"
 #include "src/cpu-profiler.h"
@@ -64,17 +66,23 @@
   } else if (operand.IsImmediate()) {
     int64_t immediate = operand.ImmediateValue();
     unsigned reg_size = rd.SizeInBits();
-    ASSERT(rd.Is64Bits() || is_uint32(immediate));
 
     // If the operation is NOT, invert the operation and immediate.
     if ((op & NOT) == NOT) {
       op = static_cast<LogicalOp>(op & ~NOT);
       immediate = ~immediate;
-      if (rd.Is32Bits()) {
-        immediate &= kWRegMask;
-      }
     }
 
+    // Ignore the top 32 bits of an immediate if we're moving to a W register.
+    if (rd.Is32Bits()) {
+      // Check that the top 32 bits are consistent.
+      DCHECK(((immediate >> kWRegSizeInBits) == 0) ||
+             ((immediate >> kWRegSizeInBits) == -1));
+      immediate &= kWRegMask;
+    }
+
+    DCHECK(rd.Is64Bits() || is_uint32(immediate));
+
     // Special cases for all set or all clear immediates.
     if (immediate == 0) {
       switch (op) {
@@ -118,24 +126,24 @@
     } else {
       // Immediate can't be encoded: synthesize using move immediate.
       Register temp = temps.AcquireSameSizeAs(rn);
-      Mov(temp, immediate);
+      Operand imm_operand = MoveImmediateForShiftedOp(temp, immediate);
       if (rd.Is(csp)) {
         // If rd is the stack pointer we cannot use it as the destination
         // register so we use the temp register as an intermediate again.
-        Logical(temp, rn, temp, op);
+        Logical(temp, rn, imm_operand, op);
         Mov(csp, temp);
         AssertStackConsistency();
       } else {
-        Logical(rd, rn, temp, op);
+        Logical(rd, rn, imm_operand, op);
       }
     }
 
   } else if (operand.IsExtendedRegister()) {
-    ASSERT(operand.reg().SizeInBits() <= rd.SizeInBits());
+    DCHECK(operand.reg().SizeInBits() <= rd.SizeInBits());
     // Add/sub extended supports shift <= 4. We want to support exactly the
     // same modes here.
-    ASSERT(operand.shift_amount() <= 4);
-    ASSERT(operand.reg().Is64Bits() ||
+    DCHECK(operand.shift_amount() <= 4);
+    DCHECK(operand.reg().Is64Bits() ||
            ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
     Register temp = temps.AcquireSameSizeAs(rn);
     EmitExtendShift(temp, operand.reg(), operand.extend(),
@@ -144,16 +152,16 @@
 
   } else {
     // The operand can be encoded in the instruction.
-    ASSERT(operand.IsShiftedRegister());
+    DCHECK(operand.IsShiftedRegister());
     Logical(rd, rn, operand, op);
   }
 }
 
 
 void MacroAssembler::Mov(const Register& rd, uint64_t imm) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(is_uint32(imm) || is_int32(imm) || rd.Is64Bits());
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(is_uint32(imm) || is_int32(imm) || rd.Is64Bits());
+  DCHECK(!rd.IsZero());
 
   // TODO(all) extend to support more immediates.
   //
@@ -172,20 +180,11 @@
   // applying move-keep operations to move-zero and move-inverted initial
   // values.
 
-  unsigned reg_size = rd.SizeInBits();
-  unsigned n, imm_s, imm_r;
-  if (IsImmMovz(imm, reg_size) && !rd.IsSP()) {
-    // Immediate can be represented in a move zero instruction. Movz can't
-    // write to the stack pointer.
-    movz(rd, imm);
-  } else if (IsImmMovn(imm, reg_size) && !rd.IsSP()) {
-    // Immediate can be represented in a move inverted instruction. Movn can't
-    // write to the stack pointer.
-    movn(rd, rd.Is64Bits() ? ~imm : (~imm & kWRegMask));
-  } else if (IsImmLogical(imm, reg_size, &n, &imm_s, &imm_r)) {
-    // Immediate can be represented in a logical orr instruction.
-    LogicalImmediate(rd, AppropriateZeroRegFor(rd), n, imm_s, imm_r, ORR);
-  } else {
+  // Try to move the immediate in one instruction, and if that fails, switch to
+  // using multiple instructions.
+  if (!TryOneInstrMoveImmediate(rd, imm)) {
+    unsigned reg_size = rd.SizeInBits();
+
     // Generic immediate case. Imm will be represented by
     //   [imm3, imm2, imm1, imm0], where each imm is 16 bits.
     // A move-zero or move-inverted is generated for the first non-zero or
@@ -208,7 +207,7 @@
 
     // Iterate through the halfwords. Use movn/movz for the first non-ignored
     // halfword, and movk for subsequent halfwords.
-    ASSERT((reg_size % 16) == 0);
+    DCHECK((reg_size % 16) == 0);
     bool first_mov_done = false;
     for (unsigned i = 0; i < (rd.SizeInBits() / 16); i++) {
       uint64_t imm16 = (imm >> (16 * i)) & 0xffffL;
@@ -226,7 +225,7 @@
         }
       }
     }
-    ASSERT(first_mov_done);
+    DCHECK(first_mov_done);
 
     // Move the temporary if the original destination register was the stack
     // pointer.
@@ -241,8 +240,8 @@
 void MacroAssembler::Mov(const Register& rd,
                          const Operand& operand,
                          DiscardMoveMode discard_mode) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
 
   // Provide a swap register for instructions that need to write into the
   // system stack pointer (and can't do this inherently).
@@ -288,14 +287,14 @@
 
   // Copy the result to the system stack pointer.
   if (!dst.Is(rd)) {
-    ASSERT(rd.IsSP());
+    DCHECK(rd.IsSP());
     Assembler::mov(rd, dst);
   }
 }
 
 
 void MacroAssembler::Mvn(const Register& rd, const Operand& operand) {
-  ASSERT(allow_macro_instructions_);
+  DCHECK(allow_macro_instructions_);
 
   if (operand.NeedsRelocation(this)) {
     Ldr(rd, operand.immediate());
@@ -319,7 +318,7 @@
 
 
 unsigned MacroAssembler::CountClearHalfWords(uint64_t imm, unsigned reg_size) {
-  ASSERT((reg_size % 8) == 0);
+  DCHECK((reg_size % 8) == 0);
   int count = 0;
   for (unsigned i = 0; i < (reg_size / 16); i++) {
     if ((imm & 0xffff) == 0) {
@@ -334,7 +333,7 @@
 // The movz instruction can generate immediates containing an arbitrary 16-bit
 // half-word, with remaining bits clear, eg. 0x00001234, 0x0000123400000000.
 bool MacroAssembler::IsImmMovz(uint64_t imm, unsigned reg_size) {
-  ASSERT((reg_size == kXRegSizeInBits) || (reg_size == kWRegSizeInBits));
+  DCHECK((reg_size == kXRegSizeInBits) || (reg_size == kWRegSizeInBits));
   return CountClearHalfWords(imm, reg_size) >= ((reg_size / 16) - 1);
 }
 
@@ -351,7 +350,7 @@
                                              StatusFlags nzcv,
                                              Condition cond,
                                              ConditionalCompareOp op) {
-  ASSERT((cond != al) && (cond != nv));
+  DCHECK((cond != al) && (cond != nv));
   if (operand.NeedsRelocation(this)) {
     UseScratchRegisterScope temps(this);
     Register temp = temps.AcquireX();
@@ -380,9 +379,9 @@
                           const Register& rn,
                           const Operand& operand,
                           Condition cond) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
-  ASSERT((cond != al) && (cond != nv));
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
+  DCHECK((cond != al) && (cond != nv));
   if (operand.IsImmediate()) {
     // Immediate argument. Handle special cases of 0, 1 and -1 using zero
     // register.
@@ -413,6 +412,66 @@
 }
 
 
+bool MacroAssembler::TryOneInstrMoveImmediate(const Register& dst,
+                                              int64_t imm) {
+  unsigned n, imm_s, imm_r;
+  int reg_size = dst.SizeInBits();
+  if (IsImmMovz(imm, reg_size) && !dst.IsSP()) {
+    // Immediate can be represented in a move zero instruction. Movz can't write
+    // to the stack pointer.
+    movz(dst, imm);
+    return true;
+  } else if (IsImmMovn(imm, reg_size) && !dst.IsSP()) {
+    // Immediate can be represented in a move not instruction. Movn can't write
+    // to the stack pointer.
+    movn(dst, dst.Is64Bits() ? ~imm : (~imm & kWRegMask));
+    return true;
+  } else if (IsImmLogical(imm, reg_size, &n, &imm_s, &imm_r)) {
+    // Immediate can be represented in a logical orr instruction.
+    LogicalImmediate(dst, AppropriateZeroRegFor(dst), n, imm_s, imm_r, ORR);
+    return true;
+  }
+  return false;
+}
+
+
+Operand MacroAssembler::MoveImmediateForShiftedOp(const Register& dst,
+                                                  int64_t imm) {
+  int reg_size = dst.SizeInBits();
+
+  // Encode the immediate in a single move instruction, if possible.
+  if (TryOneInstrMoveImmediate(dst, imm)) {
+    // The move was successful; nothing to do here.
+  } else {
+    // Pre-shift the immediate to the least-significant bits of the register.
+    int shift_low = CountTrailingZeros(imm, reg_size);
+    int64_t imm_low = imm >> shift_low;
+
+    // Pre-shift the immediate to the most-significant bits of the register. We
+    // insert set bits in the least-significant bits, as this creates a
+    // different immediate that may be encodable using movn or orr-immediate.
+    // If this new immediate is encodable, the set bits will be eliminated by
+    // the post shift on the following instruction.
+    int shift_high = CountLeadingZeros(imm, reg_size);
+    int64_t imm_high = (imm << shift_high) | ((1 << shift_high) - 1);
+
+    if (TryOneInstrMoveImmediate(dst, imm_low)) {
+      // The new immediate has been moved into the destination's low bits:
+      // return a new leftward-shifting operand.
+      return Operand(dst, LSL, shift_low);
+    } else if (TryOneInstrMoveImmediate(dst, imm_high)) {
+      // The new immediate has been moved into the destination's high bits:
+      // return a new rightward-shifting operand.
+      return Operand(dst, LSR, shift_high);
+    } else {
+      // Use the generic move operation to set up the immediate.
+      Mov(dst, imm);
+    }
+  }
+  return Operand(dst);
+}
+
+
 void MacroAssembler::AddSubMacro(const Register& rd,
                                  const Register& rn,
                                  const Operand& operand,
@@ -435,8 +494,14 @@
              (operand.IsShiftedRegister() && (operand.shift() == ROR))) {
     UseScratchRegisterScope temps(this);
     Register temp = temps.AcquireSameSizeAs(rn);
-    Mov(temp, operand);
-    AddSub(rd, rn, temp, S, op);
+    if (operand.IsImmediate()) {
+      Operand imm_operand =
+          MoveImmediateForShiftedOp(temp, operand.ImmediateValue());
+      AddSub(rd, rn, imm_operand, S, op);
+    } else {
+      Mov(temp, operand);
+      AddSub(rd, rn, temp, S, op);
+    }
   } else {
     AddSub(rd, rn, operand, S, op);
   }
@@ -448,7 +513,7 @@
                                           const Operand& operand,
                                           FlagsUpdate S,
                                           AddSubWithCarryOp op) {
-  ASSERT(rd.SizeInBits() == rn.SizeInBits());
+  DCHECK(rd.SizeInBits() == rn.SizeInBits());
   UseScratchRegisterScope temps(this);
 
   if (operand.NeedsRelocation(this)) {
@@ -465,9 +530,9 @@
 
   } else if (operand.IsShiftedRegister() && (operand.shift_amount() != 0)) {
     // Add/sub with carry (shifted register).
-    ASSERT(operand.reg().SizeInBits() == rd.SizeInBits());
-    ASSERT(operand.shift() != ROR);
-    ASSERT(is_uintn(operand.shift_amount(),
+    DCHECK(operand.reg().SizeInBits() == rd.SizeInBits());
+    DCHECK(operand.shift() != ROR);
+    DCHECK(is_uintn(operand.shift_amount(),
           rd.SizeInBits() == kXRegSizeInBits ? kXRegSizeInBitsLog2
                                              : kWRegSizeInBitsLog2));
     Register temp = temps.AcquireSameSizeAs(rn);
@@ -476,11 +541,11 @@
 
   } else if (operand.IsExtendedRegister()) {
     // Add/sub with carry (extended register).
-    ASSERT(operand.reg().SizeInBits() <= rd.SizeInBits());
+    DCHECK(operand.reg().SizeInBits() <= rd.SizeInBits());
     // Add/sub extended supports a shift <= 4. We want to support exactly the
     // same modes.
-    ASSERT(operand.shift_amount() <= 4);
-    ASSERT(operand.reg().Is64Bits() ||
+    DCHECK(operand.shift_amount() <= 4);
+    DCHECK(operand.reg().Is64Bits() ||
            ((operand.extend() != UXTX) && (operand.extend() != SXTX)));
     Register temp = temps.AcquireSameSizeAs(rn);
     EmitExtendShift(temp, operand.reg(), operand.extend(),
@@ -525,11 +590,44 @@
   }
 }
 
+void MacroAssembler::LoadStorePairMacro(const CPURegister& rt,
+                                        const CPURegister& rt2,
+                                        const MemOperand& addr,
+                                        LoadStorePairOp op) {
+  // TODO(all): Should we support register offset for load-store-pair?
+  DCHECK(!addr.IsRegisterOffset());
+
+  int64_t offset = addr.offset();
+  LSDataSize size = CalcLSPairDataSize(op);
+
+  // Check if the offset fits in the immediate field of the appropriate
+  // instruction. If not, emit two instructions to perform the operation.
+  if (IsImmLSPair(offset, size)) {
+    // Encodable in one load/store pair instruction.
+    LoadStorePair(rt, rt2, addr, op);
+  } else {
+    Register base = addr.base();
+    if (addr.IsImmediateOffset()) {
+      UseScratchRegisterScope temps(this);
+      Register temp = temps.AcquireSameSizeAs(base);
+      Add(temp, base, offset);
+      LoadStorePair(rt, rt2, MemOperand(temp), op);
+    } else if (addr.IsPostIndex()) {
+      LoadStorePair(rt, rt2, MemOperand(base), op);
+      Add(base, base, offset);
+    } else {
+      DCHECK(addr.IsPreIndex());
+      Add(base, base, offset);
+      LoadStorePair(rt, rt2, MemOperand(base), op);
+    }
+  }
+}
+
 
 void MacroAssembler::Load(const Register& rt,
                           const MemOperand& addr,
                           Representation r) {
-  ASSERT(!r.IsDouble());
+  DCHECK(!r.IsDouble());
 
   if (r.IsInteger8()) {
     Ldrsb(rt, addr);
@@ -542,7 +640,7 @@
   } else if (r.IsInteger32()) {
     Ldr(rt.W(), addr);
   } else {
-    ASSERT(rt.Is64Bits());
+    DCHECK(rt.Is64Bits());
     Ldr(rt, addr);
   }
 }
@@ -551,7 +649,7 @@
 void MacroAssembler::Store(const Register& rt,
                            const MemOperand& addr,
                            Representation r) {
-  ASSERT(!r.IsDouble());
+  DCHECK(!r.IsDouble());
 
   if (r.IsInteger8() || r.IsUInteger8()) {
     Strb(rt, addr);
@@ -560,7 +658,7 @@
   } else if (r.IsInteger32()) {
     Str(rt.W(), addr);
   } else {
-    ASSERT(rt.Is64Bits());
+    DCHECK(rt.Is64Bits());
     if (r.IsHeapObject()) {
       AssertNotSmi(rt);
     } else if (r.IsSmi()) {
@@ -598,30 +696,29 @@
 
 
 void MacroAssembler::Adr(const Register& rd, Label* label, AdrHint hint) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(!rd.IsZero());
+  DCHECK(allow_macro_instructions_);
+  DCHECK(!rd.IsZero());
 
   if (hint == kAdrNear) {
     adr(rd, label);
     return;
   }
 
-  ASSERT(hint == kAdrFar);
-  UseScratchRegisterScope temps(this);
-  Register scratch = temps.AcquireX();
-  ASSERT(!AreAliased(rd, scratch));
-
+  DCHECK(hint == kAdrFar);
   if (label->is_bound()) {
     int label_offset = label->pos() - pc_offset();
     if (Instruction::IsValidPCRelOffset(label_offset)) {
       adr(rd, label);
     } else {
-      ASSERT(label_offset <= 0);
+      DCHECK(label_offset <= 0);
       int min_adr_offset = -(1 << (Instruction::ImmPCRelRangeBitwidth - 1));
       adr(rd, min_adr_offset);
       Add(rd, rd, label_offset - min_adr_offset);
     }
   } else {
+    UseScratchRegisterScope temps(this);
+    Register scratch = temps.AcquireX();
+
     InstructionAccurateScope scope(
         this, PatchingAssembler::kAdrFarPatchableNInstrs);
     adr(rd, label);
@@ -629,13 +726,12 @@
       nop(ADR_FAR_NOP);
     }
     movz(scratch, 0);
-    add(rd, rd, scratch);
   }
 }
 
 
 void MacroAssembler::B(Label* label, BranchType type, Register reg, int bit) {
-  ASSERT((reg.Is(NoReg) || type >= kBranchTypeFirstUsingReg) &&
+  DCHECK((reg.Is(NoReg) || type >= kBranchTypeFirstUsingReg) &&
          (bit == -1 || type >= kBranchTypeFirstUsingBit));
   if (kBranchTypeFirstCondition <= type && type <= kBranchTypeLastCondition) {
     B(static_cast<Condition>(type), label);
@@ -655,8 +751,8 @@
 
 
 void MacroAssembler::B(Label* label, Condition cond) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT((cond != al) && (cond != nv));
+  DCHECK(allow_macro_instructions_);
+  DCHECK((cond != al) && (cond != nv));
 
   Label done;
   bool need_extra_instructions =
@@ -673,7 +769,7 @@
 
 
 void MacroAssembler::Tbnz(const Register& rt, unsigned bit_pos, Label* label) {
-  ASSERT(allow_macro_instructions_);
+  DCHECK(allow_macro_instructions_);
 
   Label done;
   bool need_extra_instructions =
@@ -690,7 +786,7 @@
 
 
 void MacroAssembler::Tbz(const Register& rt, unsigned bit_pos, Label* label) {
-  ASSERT(allow_macro_instructions_);
+  DCHECK(allow_macro_instructions_);
 
   Label done;
   bool need_extra_instructions =
@@ -707,7 +803,7 @@
 
 
 void MacroAssembler::Cbnz(const Register& rt, Label* label) {
-  ASSERT(allow_macro_instructions_);
+  DCHECK(allow_macro_instructions_);
 
   Label done;
   bool need_extra_instructions =
@@ -724,7 +820,7 @@
 
 
 void MacroAssembler::Cbz(const Register& rt, Label* label) {
-  ASSERT(allow_macro_instructions_);
+  DCHECK(allow_macro_instructions_);
 
   Label done;
   bool need_extra_instructions =
@@ -746,8 +842,8 @@
 void MacroAssembler::Abs(const Register& rd, const Register& rm,
                          Label* is_not_representable,
                          Label* is_representable) {
-  ASSERT(allow_macro_instructions_);
-  ASSERT(AreSameSizeAndType(rd, rm));
+  DCHECK(allow_macro_instructions_);
+  DCHECK(AreSameSizeAndType(rd, rm));
 
   Cmp(rm, 1);
   Cneg(rd, rm, lt);
@@ -771,7 +867,7 @@
 
 void MacroAssembler::Push(const CPURegister& src0, const CPURegister& src1,
                           const CPURegister& src2, const CPURegister& src3) {
-  ASSERT(AreSameSizeAndType(src0, src1, src2, src3));
+  DCHECK(AreSameSizeAndType(src0, src1, src2, src3));
 
   int count = 1 + src1.IsValid() + src2.IsValid() + src3.IsValid();
   int size = src0.SizeInBytes();
@@ -785,7 +881,7 @@
                           const CPURegister& src2, const CPURegister& src3,
                           const CPURegister& src4, const CPURegister& src5,
                           const CPURegister& src6, const CPURegister& src7) {
-  ASSERT(AreSameSizeAndType(src0, src1, src2, src3, src4, src5, src6, src7));
+  DCHECK(AreSameSizeAndType(src0, src1, src2, src3, src4, src5, src6, src7));
 
   int count = 5 + src5.IsValid() + src6.IsValid() + src6.IsValid();
   int size = src0.SizeInBytes();
@@ -800,9 +896,9 @@
                          const CPURegister& dst2, const CPURegister& dst3) {
   // It is not valid to pop into the same register more than once in one
   // instruction, not even into the zero register.
-  ASSERT(!AreAliased(dst0, dst1, dst2, dst3));
-  ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3));
-  ASSERT(dst0.IsValid());
+  DCHECK(!AreAliased(dst0, dst1, dst2, dst3));
+  DCHECK(AreSameSizeAndType(dst0, dst1, dst2, dst3));
+  DCHECK(dst0.IsValid());
 
   int count = 1 + dst1.IsValid() + dst2.IsValid() + dst3.IsValid();
   int size = dst0.SizeInBytes();
@@ -812,6 +908,17 @@
 }
 
 
+void MacroAssembler::Push(const Register& src0, const FPRegister& src1) {
+  int size = src0.SizeInBytes() + src1.SizeInBytes();
+
+  PushPreamble(size);
+  // Reserve room for src0 and push src1.
+  str(src1, MemOperand(StackPointer(), -size, PreIndex));
+  // Fill the gap with src0.
+  str(src0, MemOperand(StackPointer(), src1.SizeInBytes()));
+}
+
+
 void MacroAssembler::PushPopQueue::PushQueued(
     PreambleDirective preamble_directive) {
   if (queued_.empty()) return;
@@ -936,7 +1043,7 @@
     PushHelper(1, size, src, NoReg, NoReg, NoReg);
     count -= 1;
   }
-  ASSERT(count == 0);
+  DCHECK(count == 0);
 }
 
 
@@ -994,22 +1101,22 @@
   // Ensure that we don't unintentially modify scratch or debug registers.
   InstructionAccurateScope scope(this);
 
-  ASSERT(AreSameSizeAndType(src0, src1, src2, src3));
-  ASSERT(size == src0.SizeInBytes());
+  DCHECK(AreSameSizeAndType(src0, src1, src2, src3));
+  DCHECK(size == src0.SizeInBytes());
 
   // When pushing multiple registers, the store order is chosen such that
   // Push(a, b) is equivalent to Push(a) followed by Push(b).
   switch (count) {
     case 1:
-      ASSERT(src1.IsNone() && src2.IsNone() && src3.IsNone());
+      DCHECK(src1.IsNone() && src2.IsNone() && src3.IsNone());
       str(src0, MemOperand(StackPointer(), -1 * size, PreIndex));
       break;
     case 2:
-      ASSERT(src2.IsNone() && src3.IsNone());
+      DCHECK(src2.IsNone() && src3.IsNone());
       stp(src1, src0, MemOperand(StackPointer(), -2 * size, PreIndex));
       break;
     case 3:
-      ASSERT(src3.IsNone());
+      DCHECK(src3.IsNone());
       stp(src2, src1, MemOperand(StackPointer(), -3 * size, PreIndex));
       str(src0, MemOperand(StackPointer(), 2 * size));
       break;
@@ -1034,22 +1141,22 @@
   // Ensure that we don't unintentially modify scratch or debug registers.
   InstructionAccurateScope scope(this);
 
-  ASSERT(AreSameSizeAndType(dst0, dst1, dst2, dst3));
-  ASSERT(size == dst0.SizeInBytes());
+  DCHECK(AreSameSizeAndType(dst0, dst1, dst2, dst3));
+  DCHECK(size == dst0.SizeInBytes());
 
   // When popping multiple registers, the load order is chosen such that
   // Pop(a, b) is equivalent to Pop(a) followed by Pop(b).
   switch (count) {
     case 1:
-      ASSERT(dst1.IsNone() && dst2.IsNone() && dst3.IsNone());
+      DCHECK(dst1.IsNone() && dst2.IsNone() && dst3.IsNone());
       ldr(dst0, MemOperand(StackPointer(), 1 * size, PostIndex));
       break;
     case 2:
-      ASSERT(dst2.IsNone() && dst3.IsNone());
+      DCHECK(dst2.IsNone() && dst3.IsNone());
       ldp(dst0, dst1, MemOperand(StackPointer(), 2 * size, PostIndex));
       break;
     case 3:
-      ASSERT(dst3.IsNone());
+      DCHECK(dst3.IsNone());
       ldr(dst2, MemOperand(StackPointer(), 2 * size));
       ldp(dst0, dst1, MemOperand(StackPointer(), 3 * size, PostIndex));
       break;
@@ -1073,7 +1180,7 @@
     // on entry and the total size of the specified registers must also be a
     // multiple of 16 bytes.
     if (total_size.IsImmediate()) {
-      ASSERT((total_size.ImmediateValue() % 16) == 0);
+      DCHECK((total_size.ImmediateValue() % 16) == 0);
     }
 
     // Don't check access size for non-immediate sizes. It's difficult to do
@@ -1093,7 +1200,7 @@
     // on entry and the total size of the specified registers must also be a
     // multiple of 16 bytes.
     if (total_size.IsImmediate()) {
-      ASSERT((total_size.ImmediateValue() % 16) == 0);
+      DCHECK((total_size.ImmediateValue() % 16) == 0);
     }
 
     // Don't check access size for non-immediate sizes. It's difficult to do
@@ -1109,7 +1216,7 @@
 
 void MacroAssembler::Poke(const CPURegister& src, const Operand& offset) {
   if (offset.IsImmediate()) {
-    ASSERT(offset.ImmediateValue() >= 0);
+    DCHECK(offset.ImmediateValue() >= 0);
   } else if (emit_debug_code()) {
     Cmp(xzr, offset);
     Check(le, kStackAccessBelowStackPointer);
@@ -1121,7 +1228,7 @@
 
 void MacroAssembler::Peek(const CPURegister& dst, const Operand& offset) {
   if (offset.IsImmediate()) {
-    ASSERT(offset.ImmediateValue() >= 0);
+    DCHECK(offset.ImmediateValue() >= 0);
   } else if (emit_debug_code()) {
     Cmp(xzr, offset);
     Check(le, kStackAccessBelowStackPointer);
@@ -1134,8 +1241,8 @@
 void MacroAssembler::PokePair(const CPURegister& src1,
                               const CPURegister& src2,
                               int offset) {
-  ASSERT(AreSameSizeAndType(src1, src2));
-  ASSERT((offset >= 0) && ((offset % src1.SizeInBytes()) == 0));
+  DCHECK(AreSameSizeAndType(src1, src2));
+  DCHECK((offset >= 0) && ((offset % src1.SizeInBytes()) == 0));
   Stp(src1, src2, MemOperand(StackPointer(), offset));
 }
 
@@ -1143,8 +1250,8 @@
 void MacroAssembler::PeekPair(const CPURegister& dst1,
                               const CPURegister& dst2,
                               int offset) {
-  ASSERT(AreSameSizeAndType(dst1, dst2));
-  ASSERT((offset >= 0) && ((offset % dst1.SizeInBytes()) == 0));
+  DCHECK(AreSameSizeAndType(dst1, dst2));
+  DCHECK((offset >= 0) && ((offset % dst1.SizeInBytes()) == 0));
   Ldp(dst1, dst2, MemOperand(StackPointer(), offset));
 }
 
@@ -1155,7 +1262,7 @@
 
   // This method must not be called unless the current stack pointer is the
   // system stack pointer (csp).
-  ASSERT(csp.Is(StackPointer()));
+  DCHECK(csp.Is(StackPointer()));
 
   MemOperand tos(csp, -2 * kXRegSize, PreIndex);
 
@@ -1179,7 +1286,7 @@
 
   // This method must not be called unless the current stack pointer is the
   // system stack pointer (csp).
-  ASSERT(csp.Is(StackPointer()));
+  DCHECK(csp.Is(StackPointer()));
 
   MemOperand tos(csp, 2 * kXRegSize, PostIndex);
 
@@ -1353,7 +1460,7 @@
                                     Register scratch2,
                                     Register scratch3,
                                     Label* call_runtime) {
-  ASSERT(!AreAliased(object, null_value, scratch0, scratch1, scratch2,
+  DCHECK(!AreAliased(object, null_value, scratch0, scratch1, scratch2,
                      scratch3));
 
   Register empty_fixed_array_value = scratch0;
@@ -1435,7 +1542,7 @@
                                         Register scratch1,
                                         Register scratch2) {
   // Handler expects argument in x0.
-  ASSERT(exception.Is(x0));
+  DCHECK(exception.Is(x0));
 
   // Compute the handler entry address and jump to it. The handler table is
   // a fixed array of (smi-tagged) code offsets.
@@ -1453,7 +1560,7 @@
 void MacroAssembler::InNewSpace(Register object,
                                 Condition cond,
                                 Label* branch) {
-  ASSERT(cond == eq || cond == ne);
+  DCHECK(cond == eq || cond == ne);
   UseScratchRegisterScope temps(this);
   Register temp = temps.AcquireX();
   And(temp, object, ExternalReference::new_space_mask(isolate()));
@@ -1476,10 +1583,10 @@
   STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
 
   // The handler expects the exception in x0.
-  ASSERT(value.Is(x0));
+  DCHECK(value.Is(x0));
 
   // Drop the stack pointer to the top of the top handler.
-  ASSERT(jssp.Is(StackPointer()));
+  DCHECK(jssp.Is(StackPointer()));
   Mov(scratch1, Operand(ExternalReference(Isolate::kHandlerAddress,
                                           isolate())));
   Ldr(jssp, MemOperand(scratch1));
@@ -1518,10 +1625,10 @@
   STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
 
   // The handler expects the exception in x0.
-  ASSERT(value.Is(x0));
+  DCHECK(value.Is(x0));
 
   // Drop the stack pointer to the top of the top stack handler.
-  ASSERT(jssp.Is(StackPointer()));
+  DCHECK(jssp.Is(StackPointer()));
   Mov(scratch1, Operand(ExternalReference(Isolate::kHandlerAddress,
                                           isolate())));
   Ldr(jssp, MemOperand(scratch1));
@@ -1551,12 +1658,6 @@
 }
 
 
-void MacroAssembler::SmiAbs(const Register& smi, Label* slow) {
-  ASSERT(smi.Is64Bits());
-  Abs(smi, smi, slow);
-}
-
-
 void MacroAssembler::AssertSmi(Register object, BailoutReason reason) {
   if (emit_debug_code()) {
     STATIC_ASSERT(kSmiTag == 0);
@@ -1618,7 +1719,7 @@
 
 
 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
-  ASSERT(AllowThisStubCall(stub));  // Stub calls are not allowed in some stubs.
+  DCHECK(AllowThisStubCall(stub));  // Stub calls are not allowed in some stubs.
   Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
 }
 
@@ -1670,7 +1771,7 @@
       ExternalReference::handle_scope_level_address(isolate()),
       next_address);
 
-  ASSERT(function_address.is(x1) || function_address.is(x2));
+  DCHECK(function_address.is(x1) || function_address.is(x2));
 
   Label profiler_disabled;
   Label end_profiler_check;
@@ -1779,7 +1880,7 @@
     FrameScope frame(this, StackFrame::INTERNAL);
     CallExternalReference(
         ExternalReference(
-            Runtime::kHiddenPromoteScheduledException, isolate()), 0);
+            Runtime::kPromoteScheduledException, isolate()), 0);
   }
   B(&exception_handled);
 
@@ -1828,7 +1929,7 @@
 void MacroAssembler::GetBuiltinEntry(Register target,
                                      Register function,
                                      Builtins::JavaScript id) {
-  ASSERT(!AreAliased(target, function));
+  DCHECK(!AreAliased(target, function));
   GetBuiltinFunction(function, id);
   // Load the code entry point from the builtins object.
   Ldr(target, FieldMemOperand(function, JSFunction::kCodeEntryOffset));
@@ -1840,7 +1941,7 @@
                                    const CallWrapper& call_wrapper) {
   ASM_LOCATION("MacroAssembler::InvokeBuiltin");
   // You can't call a builtin without a valid frame.
-  ASSERT(flag == JUMP_FUNCTION || has_frame());
+  DCHECK(flag == JUMP_FUNCTION || has_frame());
 
   // Get the builtin entry in x2 and setup the function object in x1.
   GetBuiltinEntry(x2, x1, id);
@@ -1849,7 +1950,7 @@
     Call(x2);
     call_wrapper.AfterCall();
   } else {
-    ASSERT(flag == JUMP_FUNCTION);
+    DCHECK(flag == JUMP_FUNCTION);
     Jump(x2);
   }
 }
@@ -1881,7 +1982,7 @@
                                          Heap::RootListIndex map_index,
                                          Register scratch1,
                                          Register scratch2) {
-  ASSERT(!AreAliased(string, length, scratch1, scratch2));
+  DCHECK(!AreAliased(string, length, scratch1, scratch2));
   LoadRoot(scratch2, map_index);
   SmiTag(scratch1, length);
   Str(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
@@ -1898,7 +1999,7 @@
   // environment.
   // Note: This will break if we ever start generating snapshots on one ARM
   // platform for another ARM platform with a different alignment.
-  return OS::ActivationFrameAlignment();
+  return base::OS::ActivationFrameAlignment();
 #else  // V8_HOST_ARCH_ARM64
   // If we are using the simulator then we should always align to the expected
   // alignment. As the simulator is used to generate snapshots we do not know
@@ -1928,10 +2029,10 @@
 void MacroAssembler::CallCFunction(Register function,
                                    int num_of_reg_args,
                                    int num_of_double_args) {
-  ASSERT(has_frame());
+  DCHECK(has_frame());
   // We can pass 8 integer arguments in registers. If we need to pass more than
   // that, we'll need to implement support for passing them on the stack.
-  ASSERT(num_of_reg_args <= 8);
+  DCHECK(num_of_reg_args <= 8);
 
   // If we're passing doubles, we're limited to the following prototypes
   // (defined by ExternalReference::Type):
@@ -1940,8 +2041,8 @@
   //  BUILTIN_FP_CALL:       double f(double)
   //  BUILTIN_FP_INT_CALL:   double f(double, int)
   if (num_of_double_args > 0) {
-    ASSERT(num_of_reg_args <= 1);
-    ASSERT((num_of_double_args + num_of_reg_args) <= 2);
+    DCHECK(num_of_reg_args <= 1);
+    DCHECK((num_of_double_args + num_of_reg_args) <= 2);
   }
 
 
@@ -1953,12 +2054,12 @@
 
     int sp_alignment = ActivationFrameAlignment();
     // The ABI mandates at least 16-byte alignment.
-    ASSERT(sp_alignment >= 16);
-    ASSERT(IsPowerOf2(sp_alignment));
+    DCHECK(sp_alignment >= 16);
+    DCHECK(base::bits::IsPowerOfTwo32(sp_alignment));
 
     // The current stack pointer is a callee saved register, and is preserved
     // across the call.
-    ASSERT(kCalleeSaved.IncludesAliasOf(old_stack_pointer));
+    DCHECK(kCalleeSaved.IncludesAliasOf(old_stack_pointer));
 
     // Align and synchronize the system stack pointer with jssp.
     Bic(csp, old_stack_pointer, sp_alignment - 1);
@@ -1976,7 +2077,7 @@
       // where we only pushed one W register on top of an aligned jssp.
       UseScratchRegisterScope temps(this);
       Register temp = temps.AcquireX();
-      ASSERT(ActivationFrameAlignment() == 16);
+      DCHECK(ActivationFrameAlignment() == 16);
       Sub(temp, csp, old_stack_pointer);
       // We want temp <= 0 && temp >= -12.
       Cmp(temp, 0);
@@ -2002,13 +2103,13 @@
 
 
 void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode) {
-  ASSERT(!RelocInfo::IsCodeTarget(rmode));
+  DCHECK(!RelocInfo::IsCodeTarget(rmode));
   Jump(reinterpret_cast<intptr_t>(target), rmode);
 }
 
 
 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode) {
-  ASSERT(RelocInfo::IsCodeTarget(rmode));
+  DCHECK(RelocInfo::IsCodeTarget(rmode));
   AllowDeferredHandleDereference embedding_raw_address;
   Jump(reinterpret_cast<intptr_t>(code.location()), rmode);
 }
@@ -2057,7 +2158,7 @@
   positions_recorder()->WriteRecordedPositions();
 
   // Addresses always have 64 bits, so we shouldn't encounter NONE32.
-  ASSERT(rmode != RelocInfo::NONE32);
+  DCHECK(rmode != RelocInfo::NONE32);
 
   UseScratchRegisterScope temps(this);
   Register temp = temps.AcquireX();
@@ -2066,7 +2167,7 @@
     // Addresses are 48 bits so we never need to load the upper 16 bits.
     uint64_t imm = reinterpret_cast<uint64_t>(target);
     // If we don't use ARM tagged addresses, the 16 higher bits must be 0.
-    ASSERT(((imm >> 48) & 0xffff) == 0);
+    DCHECK(((imm >> 48) & 0xffff) == 0);
     movz(temp, (imm >> 0) & 0xffff, 0);
     movk(temp, (imm >> 16) & 0xffff, 16);
     movk(temp, (imm >> 32) & 0xffff, 32);
@@ -2119,7 +2220,7 @@
   USE(target);
 
   // Addresses always have 64 bits, so we shouldn't encounter NONE32.
-  ASSERT(rmode != RelocInfo::NONE32);
+  DCHECK(rmode != RelocInfo::NONE32);
 
   if (rmode == RelocInfo::NONE64) {
     return kCallSizeWithoutRelocation;
@@ -2136,7 +2237,7 @@
   USE(ast_id);
 
   // Addresses always have 64 bits, so we shouldn't encounter NONE32.
-  ASSERT(rmode != RelocInfo::NONE32);
+  DCHECK(rmode != RelocInfo::NONE32);
 
   if (rmode == RelocInfo::NONE64) {
     return kCallSizeWithoutRelocation;
@@ -2146,58 +2247,38 @@
 }
 
 
+void MacroAssembler::JumpIfHeapNumber(Register object, Label* on_heap_number,
+                                      SmiCheckType smi_check_type) {
+  Label on_not_heap_number;
 
+  if (smi_check_type == DO_SMI_CHECK) {
+    JumpIfSmi(object, &on_not_heap_number);
+  }
 
-
-void MacroAssembler::JumpForHeapNumber(Register object,
-                                       Register heap_number_map,
-                                       Label* on_heap_number,
-                                       Label* on_not_heap_number) {
-  ASSERT(on_heap_number || on_not_heap_number);
   AssertNotSmi(object);
 
   UseScratchRegisterScope temps(this);
   Register temp = temps.AcquireX();
-
-  // Load the HeapNumber map if it is not passed.
-  if (heap_number_map.Is(NoReg)) {
-    heap_number_map = temps.AcquireX();
-    LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-  } else {
-    AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
-  }
-
-  ASSERT(!AreAliased(temp, heap_number_map));
-
   Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
-  Cmp(temp, heap_number_map);
+  JumpIfRoot(temp, Heap::kHeapNumberMapRootIndex, on_heap_number);
 
-  if (on_heap_number) {
-    B(eq, on_heap_number);
-  }
-  if (on_not_heap_number) {
-    B(ne, on_not_heap_number);
-  }
-}
-
-
-void MacroAssembler::JumpIfHeapNumber(Register object,
-                                      Label* on_heap_number,
-                                      Register heap_number_map) {
-  JumpForHeapNumber(object,
-                    heap_number_map,
-                    on_heap_number,
-                    NULL);
+  Bind(&on_not_heap_number);
 }
 
 
 void MacroAssembler::JumpIfNotHeapNumber(Register object,
                                          Label* on_not_heap_number,
-                                         Register heap_number_map) {
-  JumpForHeapNumber(object,
-                    heap_number_map,
-                    NULL,
-                    on_not_heap_number);
+                                         SmiCheckType smi_check_type) {
+  if (smi_check_type == DO_SMI_CHECK) {
+    JumpIfSmi(object, on_not_heap_number);
+  }
+
+  AssertNotSmi(object);
+
+  UseScratchRegisterScope temps(this);
+  Register temp = temps.AcquireX();
+  Ldr(temp, FieldMemOperand(object, HeapObject::kMapOffset));
+  JumpIfNotRoot(temp, Heap::kHeapNumberMapRootIndex, on_not_heap_number);
 }
 
 
@@ -2207,7 +2288,7 @@
                                              Register scratch2,
                                              Register scratch3,
                                              Label* not_found) {
-  ASSERT(!AreAliased(object, result, scratch1, scratch2, scratch3));
+  DCHECK(!AreAliased(object, result, scratch1, scratch2, scratch3));
 
   // Use of registers. Register result is used as a temporary.
   Register number_string_cache = result;
@@ -2231,8 +2312,7 @@
   Label load_result_from_cache;
 
   JumpIfSmi(object, &is_smi);
-  CheckMap(object, scratch1, Heap::kHeapNumberMapRootIndex, not_found,
-           DONT_DO_SMI_CHECK);
+  JumpIfNotHeapNumber(object, not_found);
 
   STATIC_ASSERT(kDoubleSize == (kWRegSize * 2));
   Add(scratch1, object, HeapNumber::kValueOffset - kHeapObjectTag);
@@ -2313,7 +2393,7 @@
 
 void MacroAssembler::JumpIfMinusZero(Register input,
                                      Label* on_negative_zero) {
-  ASSERT(input.Is64Bits());
+  DCHECK(input.Is64Bits());
   // Floating point value is in an integer register. Detect -0.0 by subtracting
   // 1 (cmp), which will cause overflow.
   Cmp(input, 1);
@@ -2366,9 +2446,9 @@
                                                Register scratch5) {
   // Untag src and dst into scratch registers.
   // Copy src->dst in a tight loop.
-  ASSERT(!AreAliased(dst, src,
+  DCHECK(!AreAliased(dst, src,
                      scratch1, scratch2, scratch3, scratch4, scratch5));
-  ASSERT(count >= 2);
+  DCHECK(count >= 2);
 
   const Register& remaining = scratch3;
   Mov(remaining, count / 2);
@@ -2405,7 +2485,7 @@
                                                    Register scratch4) {
   // Untag src and dst into scratch registers.
   // Copy src->dst in an unrolled loop.
-  ASSERT(!AreAliased(dst, src, scratch1, scratch2, scratch3, scratch4));
+  DCHECK(!AreAliased(dst, src, scratch1, scratch2, scratch3, scratch4));
 
   const Register& dst_untagged = scratch1;
   const Register& src_untagged = scratch2;
@@ -2434,7 +2514,7 @@
                                               Register scratch3) {
   // Untag src and dst into scratch registers.
   // Copy src->dst in an unrolled loop.
-  ASSERT(!AreAliased(dst, src, scratch1, scratch2, scratch3));
+  DCHECK(!AreAliased(dst, src, scratch1, scratch2, scratch3));
 
   const Register& dst_untagged = scratch1;
   const Register& src_untagged = scratch2;
@@ -2463,10 +2543,10 @@
   //
   // In both cases, fields are copied in pairs if possible, and left-overs are
   // handled separately.
-  ASSERT(!AreAliased(dst, src));
-  ASSERT(!temps.IncludesAliasOf(dst));
-  ASSERT(!temps.IncludesAliasOf(src));
-  ASSERT(!temps.IncludesAliasOf(xzr));
+  DCHECK(!AreAliased(dst, src));
+  DCHECK(!temps.IncludesAliasOf(dst));
+  DCHECK(!temps.IncludesAliasOf(src));
+  DCHECK(!temps.IncludesAliasOf(xzr));
 
   if (emit_debug_code()) {
     Cmp(dst, src);
@@ -2510,8 +2590,8 @@
   UseScratchRegisterScope temps(this);
   Register tmp1 = temps.AcquireX();
   Register tmp2 = temps.AcquireX();
-  ASSERT(!AreAliased(src, dst, length, scratch, tmp1, tmp2));
-  ASSERT(!AreAliased(src, dst, csp));
+  DCHECK(!AreAliased(src, dst, length, scratch, tmp1, tmp2));
+  DCHECK(!AreAliased(src, dst, csp));
 
   if (emit_debug_code()) {
     // Check copy length.
@@ -2560,7 +2640,7 @@
 void MacroAssembler::FillFields(Register dst,
                                 Register field_count,
                                 Register filler) {
-  ASSERT(!dst.Is(csp));
+  DCHECK(!dst.Is(csp));
   UseScratchRegisterScope temps(this);
   Register field_ptr = temps.AcquireX();
   Register counter = temps.AcquireX();
@@ -2594,18 +2674,13 @@
 }
 
 
-void MacroAssembler::JumpIfEitherIsNotSequentialAsciiStrings(
-    Register first,
-    Register second,
-    Register scratch1,
-    Register scratch2,
-    Label* failure,
-    SmiCheckType smi_check) {
-
+void MacroAssembler::JumpIfEitherIsNotSequentialOneByteStrings(
+    Register first, Register second, Register scratch1, Register scratch2,
+    Label* failure, SmiCheckType smi_check) {
   if (smi_check == DO_SMI_CHECK) {
     JumpIfEitherSmi(first, second, failure);
   } else if (emit_debug_code()) {
-    ASSERT(smi_check == DONT_DO_SMI_CHECK);
+    DCHECK(smi_check == DONT_DO_SMI_CHECK);
     Label not_smi;
     JumpIfEitherSmi(first, second, NULL, &not_smi);
 
@@ -2616,73 +2691,64 @@
     Bind(&not_smi);
   }
 
-  // Test that both first and second are sequential ASCII strings.
+  // Test that both first and second are sequential one-byte strings.
   Ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
   Ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
   Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
   Ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
 
-  JumpIfEitherInstanceTypeIsNotSequentialAscii(scratch1,
-                                               scratch2,
-                                               scratch1,
-                                               scratch2,
-                                               failure);
+  JumpIfEitherInstanceTypeIsNotSequentialOneByte(scratch1, scratch2, scratch1,
+                                                 scratch2, failure);
 }
 
 
-void MacroAssembler::JumpIfEitherInstanceTypeIsNotSequentialAscii(
-    Register first,
-    Register second,
-    Register scratch1,
-    Register scratch2,
+void MacroAssembler::JumpIfEitherInstanceTypeIsNotSequentialOneByte(
+    Register first, Register second, Register scratch1, Register scratch2,
     Label* failure) {
-  ASSERT(!AreAliased(scratch1, second));
-  ASSERT(!AreAliased(scratch1, scratch2));
-  static const int kFlatAsciiStringMask =
+  DCHECK(!AreAliased(scratch1, second));
+  DCHECK(!AreAliased(scratch1, scratch2));
+  static const int kFlatOneByteStringMask =
       kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
-  static const int kFlatAsciiStringTag = ASCII_STRING_TYPE;
-  And(scratch1, first, kFlatAsciiStringMask);
-  And(scratch2, second, kFlatAsciiStringMask);
-  Cmp(scratch1, kFlatAsciiStringTag);
-  Ccmp(scratch2, kFlatAsciiStringTag, NoFlag, eq);
+  static const int kFlatOneByteStringTag = ONE_BYTE_STRING_TYPE;
+  And(scratch1, first, kFlatOneByteStringMask);
+  And(scratch2, second, kFlatOneByteStringMask);
+  Cmp(scratch1, kFlatOneByteStringTag);
+  Ccmp(scratch2, kFlatOneByteStringTag, NoFlag, eq);
   B(ne, failure);
 }
 
 
-void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
-                                                            Register scratch,
-                                                            Label* failure) {
-  const int kFlatAsciiStringMask =
+void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
+                                                              Register scratch,
+                                                              Label* failure) {
+  const int kFlatOneByteStringMask =
       kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
-  const int kFlatAsciiStringTag =
+  const int kFlatOneByteStringTag =
       kStringTag | kOneByteStringTag | kSeqStringTag;
-  And(scratch, type, kFlatAsciiStringMask);
-  Cmp(scratch, kFlatAsciiStringTag);
+  And(scratch, type, kFlatOneByteStringMask);
+  Cmp(scratch, kFlatOneByteStringTag);
   B(ne, failure);
 }
 
 
-void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
-    Register first,
-    Register second,
-    Register scratch1,
-    Register scratch2,
+void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
+    Register first, Register second, Register scratch1, Register scratch2,
     Label* failure) {
-  ASSERT(!AreAliased(first, second, scratch1, scratch2));
-  const int kFlatAsciiStringMask =
+  DCHECK(!AreAliased(first, second, scratch1, scratch2));
+  const int kFlatOneByteStringMask =
       kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
-  const int kFlatAsciiStringTag =
+  const int kFlatOneByteStringTag =
       kStringTag | kOneByteStringTag | kSeqStringTag;
-  And(scratch1, first, kFlatAsciiStringMask);
-  And(scratch2, second, kFlatAsciiStringMask);
-  Cmp(scratch1, kFlatAsciiStringTag);
-  Ccmp(scratch2, kFlatAsciiStringTag, NoFlag, eq);
+  And(scratch1, first, kFlatOneByteStringMask);
+  And(scratch2, second, kFlatOneByteStringMask);
+  Cmp(scratch1, kFlatOneByteStringTag);
+  Ccmp(scratch2, kFlatOneByteStringTag, NoFlag, eq);
   B(ne, failure);
 }
 
 
-void MacroAssembler::JumpIfNotUniqueName(Register type,
-                                         Label* not_unique_name) {
+void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register type,
+                                                     Label* not_unique_name) {
   STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0));
   // if ((type is string && type is internalized) || type == SYMBOL_TYPE) {
   //   continue
@@ -2716,12 +2782,12 @@
   // The code below is made a lot easier because the calling code already sets
   // up actual and expected registers according to the contract if values are
   // passed in registers.
-  ASSERT(actual.is_immediate() || actual.reg().is(x0));
-  ASSERT(expected.is_immediate() || expected.reg().is(x2));
-  ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(x3));
+  DCHECK(actual.is_immediate() || actual.reg().is(x0));
+  DCHECK(expected.is_immediate() || expected.reg().is(x2));
+  DCHECK((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(x3));
 
   if (expected.is_immediate()) {
-    ASSERT(actual.is_immediate());
+    DCHECK(actual.is_immediate());
     if (expected.immediate() == actual.immediate()) {
       definitely_matches = true;
 
@@ -2784,7 +2850,7 @@
                                 InvokeFlag flag,
                                 const CallWrapper& call_wrapper) {
   // You can't call a function without a valid frame.
-  ASSERT(flag == JUMP_FUNCTION || has_frame());
+  DCHECK(flag == JUMP_FUNCTION || has_frame());
 
   Label done;
 
@@ -2801,7 +2867,7 @@
       Call(code);
       call_wrapper.AfterCall();
     } else {
-      ASSERT(flag == JUMP_FUNCTION);
+      DCHECK(flag == JUMP_FUNCTION);
       Jump(code);
     }
   }
@@ -2817,11 +2883,11 @@
                                     InvokeFlag flag,
                                     const CallWrapper& call_wrapper) {
   // You can't call a function without a valid frame.
-  ASSERT(flag == JUMP_FUNCTION || has_frame());
+  DCHECK(flag == JUMP_FUNCTION || has_frame());
 
   // Contract with called JS functions requires that function is passed in x1.
   // (See FullCodeGenerator::Generate().)
-  ASSERT(function.is(x1));
+  DCHECK(function.is(x1));
 
   Register expected_reg = x2;
   Register code_reg = x3;
@@ -2849,11 +2915,11 @@
                                     InvokeFlag flag,
                                     const CallWrapper& call_wrapper) {
   // You can't call a function without a valid frame.
-  ASSERT(flag == JUMP_FUNCTION || has_frame());
+  DCHECK(flag == JUMP_FUNCTION || has_frame());
 
   // Contract with called JS functions requires that function is passed in x1.
   // (See FullCodeGenerator::Generate().)
-  ASSERT(function.Is(x1));
+  DCHECK(function.Is(x1));
 
   Register code_reg = x3;
 
@@ -2908,15 +2974,24 @@
 void MacroAssembler::TruncateDoubleToI(Register result,
                                        DoubleRegister double_input) {
   Label done;
-  ASSERT(jssp.Is(StackPointer()));
 
   // Try to convert the double to an int64. If successful, the bottom 32 bits
   // contain our truncated int32 result.
   TryConvertDoubleToInt64(result, double_input, &done);
 
+  const Register old_stack_pointer = StackPointer();
+  if (csp.Is(old_stack_pointer)) {
+    // This currently only happens during compiler-unittest. If it arises
+    // during regular code generation the DoubleToI stub should be updated to
+    // cope with csp and have an extra parameter indicating which stack pointer
+    // it should use.
+    Push(jssp, xzr);  // Push xzr to maintain csp required 16-bytes alignment.
+    Mov(jssp, csp);
+    SetStackPointer(jssp);
+  }
+
   // If we fell through then inline version didn't succeed - call stub instead.
-  Push(lr);
-  Push(double_input);  // Put input on stack.
+  Push(lr, double_input);
 
   DoubleToIStub stub(isolate(),
                      jssp,
@@ -2926,8 +3001,15 @@
                      true);  // skip_fastpath
   CallStub(&stub);  // DoubleToIStub preserves any registers it needs to clobber
 
-  Drop(1, kDoubleSize);  // Drop the double input on the stack.
-  Pop(lr);
+  DCHECK_EQ(xzr.SizeInBytes(), double_input.SizeInBytes());
+  Pop(xzr, lr);  // xzr to drop the double input on the stack.
+
+  if (csp.Is(old_stack_pointer)) {
+    Mov(csp, jssp);
+    SetStackPointer(csp);
+    AssertStackConsistency();
+    Pop(xzr, jssp);
+  }
 
   Bind(&done);
 }
@@ -2936,8 +3018,8 @@
 void MacroAssembler::TruncateHeapNumberToI(Register result,
                                            Register object) {
   Label done;
-  ASSERT(!result.is(object));
-  ASSERT(jssp.Is(StackPointer()));
+  DCHECK(!result.is(object));
+  DCHECK(jssp.Is(StackPointer()));
 
   Ldr(fp_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
 
@@ -2961,7 +3043,7 @@
 
 
 void MacroAssembler::StubPrologue() {
-  ASSERT(StackPointer().Is(jssp));
+  DCHECK(StackPointer().Is(jssp));
   UseScratchRegisterScope temps(this);
   Register temp = temps.AcquireX();
   __ Mov(temp, Smi::FromInt(StackFrame::STUB));
@@ -2983,7 +3065,7 @@
 
 
 void MacroAssembler::EnterFrame(StackFrame::Type type) {
-  ASSERT(jssp.Is(StackPointer()));
+  DCHECK(jssp.Is(StackPointer()));
   UseScratchRegisterScope temps(this);
   Register type_reg = temps.AcquireX();
   Register code_reg = temps.AcquireX();
@@ -3004,7 +3086,7 @@
 
 
 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
-  ASSERT(jssp.Is(StackPointer()));
+  DCHECK(jssp.Is(StackPointer()));
   // Drop the execution stack down to the frame pointer and restore
   // the caller frame pointer and return address.
   Mov(jssp, fp);
@@ -3022,7 +3104,7 @@
   // Read the registers from the stack without popping them. The stack pointer
   // will be reset as part of the unwinding process.
   CPURegList saved_fp_regs = kCallerSavedFP;
-  ASSERT(saved_fp_regs.Count() % 2 == 0);
+  DCHECK(saved_fp_regs.Count() % 2 == 0);
 
   int offset = ExitFrameConstants::kLastExitFrameField;
   while (!saved_fp_regs.IsEmpty()) {
@@ -3037,7 +3119,7 @@
 void MacroAssembler::EnterExitFrame(bool save_doubles,
                                     const Register& scratch,
                                     int extra_space) {
-  ASSERT(jssp.Is(StackPointer()));
+  DCHECK(jssp.Is(StackPointer()));
 
   // Set up the new stack frame.
   Mov(scratch, Operand(CodeObject()));
@@ -3083,7 +3165,7 @@
 
   // Align and synchronize the system stack pointer with jssp.
   AlignAndSetCSPForFrame();
-  ASSERT(csp.Is(StackPointer()));
+  DCHECK(csp.Is(StackPointer()));
 
   //         fp[8]: CallerPC (lr)
   //   fp -> fp[0]: CallerFP (old fp)
@@ -3107,7 +3189,7 @@
 void MacroAssembler::LeaveExitFrame(bool restore_doubles,
                                     const Register& scratch,
                                     bool restore_context) {
-  ASSERT(csp.Is(StackPointer()));
+  DCHECK(csp.Is(StackPointer()));
 
   if (restore_doubles) {
     ExitFrameRestoreFPRegs();
@@ -3154,7 +3236,7 @@
 
 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
                                       Register scratch1, Register scratch2) {
-  ASSERT(value != 0);
+  DCHECK(value != 0);
   if (FLAG_native_code_counters && counter->Enabled()) {
     Mov(scratch2, ExternalReference(counter));
     Ldr(scratch1, MemOperand(scratch2));
@@ -3190,14 +3272,14 @@
   Mov(x0, 0);
   Mov(x1, ExternalReference(Runtime::kDebugBreak, isolate()));
   CEntryStub ces(isolate(), 1);
-  ASSERT(AllowThisStubCall(&ces));
+  DCHECK(AllowThisStubCall(&ces));
   Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
 }
 
 
 void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
                                     int handler_index) {
-  ASSERT(jssp.Is(StackPointer()));
+  DCHECK(jssp.Is(StackPointer()));
   // Adjust this code if the asserts don't hold.
   STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
   STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
@@ -3219,7 +3301,7 @@
 
   // Push the frame pointer, context, state, and code object.
   if (kind == StackHandler::JS_ENTRY) {
-    ASSERT(Smi::FromInt(0) == 0);
+    DCHECK(Smi::FromInt(0) == 0);
     Push(xzr, xzr, x11, x10);
   } else {
     Push(fp, cp, x11, x10);
@@ -3249,7 +3331,7 @@
                               Register scratch2,
                               Label* gc_required,
                               AllocationFlags flags) {
-  ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
+  DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
   if (!FLAG_inline_new) {
     if (emit_debug_code()) {
       // Trash the registers to simulate an allocation failure.
@@ -3265,14 +3347,14 @@
   UseScratchRegisterScope temps(this);
   Register scratch3 = temps.AcquireX();
 
-  ASSERT(!AreAliased(result, scratch1, scratch2, scratch3));
-  ASSERT(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits());
+  DCHECK(!AreAliased(result, scratch1, scratch2, scratch3));
+  DCHECK(result.Is64Bits() && scratch1.Is64Bits() && scratch2.Is64Bits());
 
   // Make object size into bytes.
   if ((flags & SIZE_IN_WORDS) != 0) {
     object_size *= kPointerSize;
   }
-  ASSERT(0 == (object_size & kObjectAlignmentMask));
+  DCHECK(0 == (object_size & kObjectAlignmentMask));
 
   // Check relative positions of allocation top and limit addresses.
   // The values must be adjacent in memory to allow the use of LDP.
@@ -3282,7 +3364,7 @@
       AllocationUtils::GetAllocationLimitReference(isolate(), flags);
   intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address());
   intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address());
-  ASSERT((limit - top) == kPointerSize);
+  DCHECK((limit - top) == kPointerSize);
 
   // Set up allocation top address and object size registers.
   Register top_address = scratch1;
@@ -3341,8 +3423,8 @@
   UseScratchRegisterScope temps(this);
   Register scratch3 = temps.AcquireX();
 
-  ASSERT(!AreAliased(object_size, result, scratch1, scratch2, scratch3));
-  ASSERT(object_size.Is64Bits() && result.Is64Bits() &&
+  DCHECK(!AreAliased(object_size, result, scratch1, scratch2, scratch3));
+  DCHECK(object_size.Is64Bits() && result.Is64Bits() &&
          scratch1.Is64Bits() && scratch2.Is64Bits());
 
   // Check relative positions of allocation top and limit addresses.
@@ -3353,7 +3435,7 @@
       AllocationUtils::GetAllocationLimitReference(isolate(), flags);
   intptr_t top = reinterpret_cast<intptr_t>(heap_allocation_top.address());
   intptr_t limit = reinterpret_cast<intptr_t>(heap_allocation_limit.address());
-  ASSERT((limit - top) == kPointerSize);
+  DCHECK((limit - top) == kPointerSize);
 
   // Set up allocation top address and object size registers.
   Register top_address = scratch1;
@@ -3427,7 +3509,7 @@
                                            Register scratch2,
                                            Register scratch3,
                                            Label* gc_required) {
-  ASSERT(!AreAliased(result, length, scratch1, scratch2, scratch3));
+  DCHECK(!AreAliased(result, length, scratch1, scratch2, scratch3));
   // Calculate the number of bytes needed for the characters in the string while
   // observing object alignment.
   STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
@@ -3452,13 +3534,11 @@
 }
 
 
-void MacroAssembler::AllocateAsciiString(Register result,
-                                         Register length,
-                                         Register scratch1,
-                                         Register scratch2,
-                                         Register scratch3,
-                                         Label* gc_required) {
-  ASSERT(!AreAliased(result, length, scratch1, scratch2, scratch3));
+void MacroAssembler::AllocateOneByteString(Register result, Register length,
+                                           Register scratch1, Register scratch2,
+                                           Register scratch3,
+                                           Label* gc_required) {
+  DCHECK(!AreAliased(result, length, scratch1, scratch2, scratch3));
   // Calculate the number of bytes needed for the characters in the string while
   // observing object alignment.
   STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
@@ -3466,7 +3546,7 @@
   Add(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
   Bic(scratch1, scratch1, kObjectAlignmentMask);
 
-  // Allocate ASCII string in new space.
+  // Allocate one-byte string in new space.
   Allocate(scratch1,
            result,
            scratch2,
@@ -3475,11 +3555,8 @@
            TAG_OBJECT);
 
   // Set the map, length and hash field.
-  InitializeNewString(result,
-                      length,
-                      Heap::kAsciiStringMapRootIndex,
-                      scratch1,
-                      scratch2);
+  InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
+                      scratch1, scratch2);
 }
 
 
@@ -3499,11 +3576,10 @@
 }
 
 
-void MacroAssembler::AllocateAsciiConsString(Register result,
-                                             Register length,
-                                             Register scratch1,
-                                             Register scratch2,
-                                             Label* gc_required) {
+void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
+                                               Register scratch1,
+                                               Register scratch2,
+                                               Label* gc_required) {
   Allocate(ConsString::kSize,
            result,
            scratch1,
@@ -3511,11 +3587,8 @@
            gc_required,
            TAG_OBJECT);
 
-  InitializeNewString(result,
-                      length,
-                      Heap::kConsAsciiStringMapRootIndex,
-                      scratch1,
-                      scratch2);
+  InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
+                      scratch1, scratch2);
 }
 
 
@@ -3524,7 +3597,7 @@
                                                  Register scratch1,
                                                  Register scratch2,
                                                  Label* gc_required) {
-  ASSERT(!AreAliased(result, length, scratch1, scratch2));
+  DCHECK(!AreAliased(result, length, scratch1, scratch2));
   Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
            TAG_OBJECT);
 
@@ -3536,20 +3609,17 @@
 }
 
 
-void MacroAssembler::AllocateAsciiSlicedString(Register result,
-                                               Register length,
-                                               Register scratch1,
-                                               Register scratch2,
-                                               Label* gc_required) {
-  ASSERT(!AreAliased(result, length, scratch1, scratch2));
+void MacroAssembler::AllocateOneByteSlicedString(Register result,
+                                                 Register length,
+                                                 Register scratch1,
+                                                 Register scratch2,
+                                                 Label* gc_required) {
+  DCHECK(!AreAliased(result, length, scratch1, scratch2));
   Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
            TAG_OBJECT);
 
-  InitializeNewString(result,
-                      length,
-                      Heap::kSlicedAsciiStringMapRootIndex,
-                      scratch1,
-                      scratch2);
+  InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
+                      scratch1, scratch2);
 }
 
 
@@ -3560,8 +3630,9 @@
                                         Register scratch1,
                                         Register scratch2,
                                         CPURegister value,
-                                        CPURegister heap_number_map) {
-  ASSERT(!value.IsValid() || value.Is64Bits());
+                                        CPURegister heap_number_map,
+                                        MutableMode mode) {
+  DCHECK(!value.IsValid() || value.Is64Bits());
   UseScratchRegisterScope temps(this);
 
   // Allocate an object in the heap for the heap number and tag it as a heap
@@ -3569,6 +3640,10 @@
   Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
            NO_ALLOCATION_FLAGS);
 
+  Heap::RootListIndex map_index = mode == MUTABLE
+      ? Heap::kMutableHeapNumberMapRootIndex
+      : Heap::kHeapNumberMapRootIndex;
+
   // Prepare the heap number map.
   if (!heap_number_map.IsValid()) {
     // If we have a valid value register, use the same type of register to store
@@ -3578,7 +3653,7 @@
     } else {
       heap_number_map = scratch1;
     }
-    LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+    LoadRoot(heap_number_map, map_index);
   }
   if (emit_debug_code()) {
     Register map;
@@ -3588,7 +3663,7 @@
     } else {
       map = Register(heap_number_map);
     }
-    AssertRegisterIsRoot(map, Heap::kHeapNumberMapRootIndex);
+    AssertRegisterIsRoot(map, map_index);
   }
 
   // Store the heap number map and the value in the allocated object.
@@ -3645,9 +3720,16 @@
 }
 
 
-void MacroAssembler::CompareMap(Register obj,
-                                Register scratch,
-                                Handle<Map> map) {
+void MacroAssembler::CompareObjectMap(Register obj, Heap::RootListIndex index) {
+  UseScratchRegisterScope temps(this);
+  Register obj_map = temps.AcquireX();
+  Ldr(obj_map, FieldMemOperand(obj, HeapObject::kMapOffset));
+  CompareRoot(obj_map, index);
+}
+
+
+void MacroAssembler::CompareObjectMap(Register obj, Register scratch,
+                                      Handle<Map> map) {
   Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
   CompareMap(scratch, map);
 }
@@ -3668,7 +3750,7 @@
     JumpIfSmi(obj, fail);
   }
 
-  CompareMap(obj, scratch, map);
+  CompareObjectMap(obj, scratch, map);
   B(ne, fail);
 }
 
@@ -3738,15 +3820,16 @@
                                              Register scratch,
                                              Label* miss,
                                              BoundFunctionAction action) {
-  ASSERT(!AreAliased(function, result, scratch));
+  DCHECK(!AreAliased(function, result, scratch));
 
-  // Check that the receiver isn't a smi.
-  JumpIfSmi(function, miss);
-
-  // Check that the function really is a function. Load map into result reg.
-  JumpIfNotObjectType(function, result, scratch, JS_FUNCTION_TYPE, miss);
-
+  Label non_instance;
   if (action == kMissOnBoundFunction) {
+    // Check that the receiver isn't a smi.
+    JumpIfSmi(function, miss);
+
+    // Check that the function really is a function. Load map into result reg.
+    JumpIfNotObjectType(function, result, scratch, JS_FUNCTION_TYPE, miss);
+
     Register scratch_w = scratch.W();
     Ldr(scratch,
         FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
@@ -3755,12 +3838,11 @@
     Ldr(scratch_w,
         FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
     Tbnz(scratch, SharedFunctionInfo::kBoundFunction, miss);
-  }
 
-  // Make sure that the function has an instance prototype.
-  Label non_instance;
-  Ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
-  Tbnz(scratch, Map::kHasNonInstancePrototype, &non_instance);
+    // Make sure that the function has an instance prototype.
+    Ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
+    Tbnz(scratch, Map::kHasNonInstancePrototype, &non_instance);
+  }
 
   // Get the prototype or initial map from the function.
   Ldr(result,
@@ -3777,12 +3859,15 @@
 
   // Get the prototype from the initial map.
   Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
-  B(&done);
 
-  // Non-instance prototype: fetch prototype from constructor field in initial
-  // map.
-  Bind(&non_instance);
-  Ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
+  if (action == kMissOnBoundFunction) {
+    B(&done);
+
+    // Non-instance prototype: fetch prototype from constructor field in initial
+    // map.
+    Bind(&non_instance);
+    Ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
+  }
 
   // All done.
   Bind(&done);
@@ -3793,7 +3878,7 @@
                                  Heap::RootListIndex index) {
   UseScratchRegisterScope temps(this);
   Register temp = temps.AcquireX();
-  ASSERT(!AreAliased(obj, temp));
+  DCHECK(!AreAliased(obj, temp));
   LoadRoot(temp, index);
   Cmp(obj, temp);
 }
@@ -3894,7 +3979,7 @@
                                                  FPRegister fpscratch1,
                                                  Label* fail,
                                                  int elements_offset) {
-  ASSERT(!AreAliased(value_reg, key_reg, elements_reg, scratch1));
+  DCHECK(!AreAliased(value_reg, key_reg, elements_reg, scratch1));
   Label store_num;
 
   // Speculatively convert the smi to a double - all smis can be exactly
@@ -3905,8 +3990,7 @@
   JumpIfSmi(value_reg, &store_num);
 
   // Ensure that the object is a heap number.
-  CheckMap(value_reg, scratch1, isolate()->factory()->heap_number_map(),
-           fail, DONT_DO_SMI_CHECK);
+  JumpIfNotHeapNumber(value_reg, fail);
 
   Ldr(fpscratch1, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
 
@@ -3933,7 +4017,7 @@
   // that the constants for the maximum number of digits for an array index
   // cached in the hash field and the number of bits reserved for it does not
   // conflict.
-  ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
+  DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
          (1 << String::kArrayIndexValueBits));
   DecodeField<String::ArrayIndexValueBits>(index, hash);
   SmiTag(index, index);
@@ -3946,7 +4030,7 @@
     SeqStringSetCharCheckIndexType index_type,
     Register scratch,
     uint32_t encoding_mask) {
-  ASSERT(!AreAliased(string, index, scratch));
+  DCHECK(!AreAliased(string, index, scratch));
 
   if (index_type == kIndexIsSmi) {
     AssertSmi(index);
@@ -3967,7 +4051,7 @@
   Cmp(index, index_type == kIndexIsSmi ? scratch : Operand::UntagSmi(scratch));
   Check(lt, kIndexIsTooLarge);
 
-  ASSERT_EQ(0, Smi::FromInt(0));
+  DCHECK_EQ(0, Smi::FromInt(0));
   Cmp(index, 0);
   Check(ge, kIndexIsNegative);
 }
@@ -3977,7 +4061,7 @@
                                             Register scratch1,
                                             Register scratch2,
                                             Label* miss) {
-  ASSERT(!AreAliased(holder_reg, scratch1, scratch2));
+  DCHECK(!AreAliased(holder_reg, scratch1, scratch2));
   Label same_contexts;
 
   // Load current lexical context from the stack frame.
@@ -4039,10 +4123,10 @@
 
 
 // Compute the hash code from the untagged key. This must be kept in sync with
-// ComputeIntegerHash in utils.h and KeyedLoadGenericElementStub in
+// ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
 // code-stub-hydrogen.cc
 void MacroAssembler::GetNumberHash(Register key, Register scratch) {
-  ASSERT(!AreAliased(key, scratch));
+  DCHECK(!AreAliased(key, scratch));
 
   // Xor original key with a seed.
   LoadRoot(scratch, Heap::kHashSeedRootIndex);
@@ -4081,7 +4165,7 @@
                                               Register scratch1,
                                               Register scratch2,
                                               Register scratch3) {
-  ASSERT(!AreAliased(elements, key, scratch0, scratch1, scratch2, scratch3));
+  DCHECK(!AreAliased(elements, key, scratch0, scratch1, scratch2, scratch3));
 
   Label done;
 
@@ -4105,7 +4189,7 @@
     And(scratch2, scratch2, scratch1);
 
     // Scale the index by multiplying by the element size.
-    ASSERT(SeededNumberDictionary::kEntrySize == 3);
+    DCHECK(SeededNumberDictionary::kEntrySize == 3);
     Add(scratch2, scratch2, Operand(scratch2, LSL, 1));
 
     // Check if the key is identical to the name.
@@ -4140,7 +4224,7 @@
                                          Register scratch1,
                                          SaveFPRegsMode fp_mode,
                                          RememberedSetFinalAction and_then) {
-  ASSERT(!AreAliased(object, address, scratch1));
+  DCHECK(!AreAliased(object, address, scratch1));
   Label done, store_buffer_overflow;
   if (emit_debug_code()) {
     Label ok;
@@ -4160,20 +4244,19 @@
   Str(scratch1, MemOperand(scratch2));
   // Call stub on end of buffer.
   // Check for end of buffer.
-  ASSERT(StoreBuffer::kStoreBufferOverflowBit ==
+  DCHECK(StoreBuffer::kStoreBufferOverflowBit ==
          (1 << (14 + kPointerSizeLog2)));
   if (and_then == kFallThroughAtEnd) {
     Tbz(scratch1, (14 + kPointerSizeLog2), &done);
   } else {
-    ASSERT(and_then == kReturnAtEnd);
+    DCHECK(and_then == kReturnAtEnd);
     Tbnz(scratch1, (14 + kPointerSizeLog2), &store_buffer_overflow);
     Ret();
   }
 
   Bind(&store_buffer_overflow);
   Push(lr);
-  StoreBufferOverflowStub store_buffer_overflow_stub =
-      StoreBufferOverflowStub(isolate(), fp_mode);
+  StoreBufferOverflowStub store_buffer_overflow_stub(isolate(), fp_mode);
   CallStub(&store_buffer_overflow_stub);
   Pop(lr);
 
@@ -4195,7 +4278,7 @@
   // Safepoints expect a block of kNumSafepointRegisters values on the stack, so
   // adjust the stack for unsaved registers.
   const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
-  ASSERT(num_unsaved >= 0);
+  DCHECK(num_unsaved >= 0);
   Claim(num_unsaved);
   PushXRegList(kSafepointSavedRegisters);
 }
@@ -4217,7 +4300,7 @@
 
 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
   // Make sure the safepoint registers list is what we expect.
-  ASSERT(CPURegList::GetSafepointSavedRegisters().list() == 0x6ffcffff);
+  DCHECK(CPURegList::GetSafepointSavedRegisters().list() == 0x6ffcffff);
 
   // Safepoint registers are stored contiguously on the stack, but not all the
   // registers are saved. The following registers are excluded:
@@ -4287,7 +4370,7 @@
 
   // Although the object register is tagged, the offset is relative to the start
   // of the object, so offset must be a multiple of kPointerSize.
-  ASSERT(IsAligned(offset, kPointerSize));
+  DCHECK(IsAligned(offset, kPointerSize));
 
   Add(scratch, object, offset - kHeapObjectTag);
   if (emit_debug_code()) {
@@ -4312,8 +4395,8 @@
   // Clobber clobbered input registers when running with the debug-code flag
   // turned on to provoke errors.
   if (emit_debug_code()) {
-    Mov(value, Operand(BitCast<int64_t>(kZapValue + 4)));
-    Mov(scratch, Operand(BitCast<int64_t>(kZapValue + 8)));
+    Mov(value, Operand(bit_cast<int64_t>(kZapValue + 4)));
+    Mov(scratch, Operand(bit_cast<int64_t>(kZapValue + 8)));
   }
 }
 
@@ -4326,13 +4409,13 @@
                                        LinkRegisterStatus lr_status,
                                        SaveFPRegsMode fp_mode) {
   ASM_LOCATION("MacroAssembler::RecordWrite");
-  ASSERT(!AreAliased(object, map));
+  DCHECK(!AreAliased(object, map));
 
   if (emit_debug_code()) {
     UseScratchRegisterScope temps(this);
     Register temp = temps.AcquireX();
 
-    CompareMap(map, temp, isolate()->factory()->meta_map());
+    CompareObjectMap(map, temp, isolate()->factory()->meta_map());
     Check(eq, kWrongAddressOrValuePassedToRecordWrite);
   }
 
@@ -4349,10 +4432,6 @@
     Check(eq, kWrongAddressOrValuePassedToRecordWrite);
   }
 
-  // Count number of write barriers in generated code.
-  isolate()->counters()->write_barriers_static()->Increment();
-  // TODO(mstarzinger): Dynamic counter missing.
-
   // First, check if a write barrier is even needed. The tests below
   // catch stores of smis and stores into the young generation.
   Label done;
@@ -4380,11 +4459,16 @@
 
   Bind(&done);
 
+  // Count number of write barriers in generated code.
+  isolate()->counters()->write_barriers_static()->Increment();
+  IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, map,
+                   dst);
+
   // Clobber clobbered registers when running with the debug-code flag
   // turned on to provoke errors.
   if (emit_debug_code()) {
-    Mov(dst, Operand(BitCast<int64_t>(kZapValue + 12)));
-    Mov(map, Operand(BitCast<int64_t>(kZapValue + 16)));
+    Mov(dst, Operand(bit_cast<int64_t>(kZapValue + 12)));
+    Mov(map, Operand(bit_cast<int64_t>(kZapValue + 16)));
   }
 }
 
@@ -4404,7 +4488,7 @@
     SmiCheck smi_check,
     PointersToHereCheck pointers_to_here_check_for_value) {
   ASM_LOCATION("MacroAssembler::RecordWrite");
-  ASSERT(!AreAliased(object, value));
+  DCHECK(!AreAliased(object, value));
 
   if (emit_debug_code()) {
     UseScratchRegisterScope temps(this);
@@ -4415,16 +4499,12 @@
     Check(eq, kWrongAddressOrValuePassedToRecordWrite);
   }
 
-  // Count number of write barriers in generated code.
-  isolate()->counters()->write_barriers_static()->Increment();
-  // TODO(mstarzinger): Dynamic counter missing.
-
   // First, check if a write barrier is even needed. The tests below
   // catch stores of smis and stores into the young generation.
   Label done;
 
   if (smi_check == INLINE_SMI_CHECK) {
-    ASSERT_EQ(0, kSmiTag);
+    DCHECK_EQ(0, kSmiTag);
     JumpIfSmi(value, &done);
   }
 
@@ -4452,11 +4532,16 @@
 
   Bind(&done);
 
+  // Count number of write barriers in generated code.
+  isolate()->counters()->write_barriers_static()->Increment();
+  IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, address,
+                   value);
+
   // Clobber clobbered registers when running with the debug-code flag
   // turned on to provoke errors.
   if (emit_debug_code()) {
-    Mov(address, Operand(BitCast<int64_t>(kZapValue + 12)));
-    Mov(value, Operand(BitCast<int64_t>(kZapValue + 16)));
+    Mov(address, Operand(bit_cast<int64_t>(kZapValue + 12)));
+    Mov(value, Operand(bit_cast<int64_t>(kZapValue + 16)));
   }
 }
 
@@ -4465,7 +4550,7 @@
   if (emit_debug_code()) {
     // The bit sequence is backward. The first character in the string
     // represents the least significant bit.
-    ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+    DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
 
     Label color_is_valid;
     Tbnz(reg, 0, &color_is_valid);
@@ -4479,8 +4564,8 @@
 void MacroAssembler::GetMarkBits(Register addr_reg,
                                  Register bitmap_reg,
                                  Register shift_reg) {
-  ASSERT(!AreAliased(addr_reg, bitmap_reg, shift_reg));
-  ASSERT(addr_reg.Is64Bits() && bitmap_reg.Is64Bits() && shift_reg.Is64Bits());
+  DCHECK(!AreAliased(addr_reg, bitmap_reg, shift_reg));
+  DCHECK(addr_reg.Is64Bits() && bitmap_reg.Is64Bits() && shift_reg.Is64Bits());
   // addr_reg is divided into fields:
   // |63        page base        20|19    high      8|7   shift   3|2  0|
   // 'high' gives the index of the cell holding color bits for the object.
@@ -4504,7 +4589,7 @@
                               int first_bit,
                               int second_bit) {
   // See mark-compact.h for color definitions.
-  ASSERT(!AreAliased(object, bitmap_scratch, shift_scratch));
+  DCHECK(!AreAliased(object, bitmap_scratch, shift_scratch));
 
   GetMarkBits(object, bitmap_scratch, shift_scratch);
   Ldr(bitmap_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
@@ -4515,14 +4600,14 @@
 
   // These bit sequences are backwards. The first character in the string
   // represents the least significant bit.
-  ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
-  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
-  ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
+  DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+  DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
+  DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
 
   // Check for the color.
   if (first_bit == 0) {
     // Checking for white.
-    ASSERT(second_bit == 0);
+    DCHECK(second_bit == 0);
     // We only need to test the first bit.
     Tbz(bitmap_scratch, 0, has_color);
   } else {
@@ -4556,7 +4641,7 @@
                                  Register scratch0,
                                  Register scratch1,
                                  Label* on_black) {
-  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+  DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
   HasColor(object, scratch0, scratch1, on_black, 1, 0);  // kBlackBitPattern.
 }
 
@@ -4566,7 +4651,7 @@
     Register scratch0,
     Register scratch1,
     Label* found) {
-  ASSERT(!AreAliased(object, scratch0, scratch1));
+  DCHECK(!AreAliased(object, scratch0, scratch1));
   Factory* factory = isolate()->factory();
   Register current = scratch0;
   Label loop_again;
@@ -4587,7 +4672,7 @@
 
 void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
                                                Register result) {
-  ASSERT(!result.Is(ldr_location));
+  DCHECK(!result.Is(ldr_location));
   const uint32_t kLdrLitOffset_lsb = 5;
   const uint32_t kLdrLitOffset_width = 19;
   Ldr(result, MemOperand(ldr_location));
@@ -4610,14 +4695,14 @@
     Register load_scratch,
     Register length_scratch,
     Label* value_is_white_and_not_data) {
-  ASSERT(!AreAliased(
+  DCHECK(!AreAliased(
       value, bitmap_scratch, shift_scratch, load_scratch, length_scratch));
 
   // These bit sequences are backwards. The first character in the string
   // represents the least significant bit.
-  ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
-  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
-  ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
+  DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+  DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
+  DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
 
   GetMarkBits(value, bitmap_scratch, shift_scratch);
   Ldr(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
@@ -4641,8 +4726,8 @@
   JumpIfRoot(map, Heap::kHeapNumberMapRootIndex, &is_data_object);
 
   // Check for strings.
-  ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
-  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+  DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+  DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
   // If it's a string and it's not a cons string then it's an object containing
   // no GC pointers.
   Register instance_type = load_scratch;
@@ -4656,16 +4741,16 @@
   // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
   // External strings are the only ones with the kExternalStringTag bit
   // set.
-  ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
-  ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
+  DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
+  DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
   Mov(length_scratch, ExternalString::kSize);
   TestAndBranchIfAnySet(instance_type, kExternalStringTag, &is_data_object);
 
-  // Sequential string, either ASCII or UC16.
-  // For ASCII (char-size of 1) we shift the smi tag away to get the length.
+  // Sequential string, either Latin1 or UC16.
+  // For Latin1 (char-size of 1) we shift the smi tag away to get the length.
   // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
   // getting the length multiplied by 2.
-  ASSERT(kOneByteStringTag == 4 && kStringEncodingMask == 4);
+  DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4);
   Ldrsw(length_scratch, UntagSmiFieldMemOperand(value,
                                                 String::kLengthOffset));
   Tst(instance_type, kStringEncodingMask);
@@ -4891,7 +4976,7 @@
                                       const CPURegister& arg3) {
   // We cannot handle a caller-saved stack pointer. It doesn't make much sense
   // in most cases anyway, so this restriction shouldn't be too serious.
-  ASSERT(!kCallerSaved.IncludesAliasOf(__ StackPointer()));
+  DCHECK(!kCallerSaved.IncludesAliasOf(__ StackPointer()));
 
   // The provided arguments, and their proper procedure-call standard registers.
   CPURegister args[kPrintfMaxArgCount] = {arg0, arg1, arg2, arg3};
@@ -4942,7 +5027,7 @@
       // In C, floats are always cast to doubles for varargs calls.
       pcs[i] = pcs_varargs_fp.PopLowestIndex().D();
     } else {
-      ASSERT(args[i].IsNone());
+      DCHECK(args[i].IsNone());
       arg_count = i;
       break;
     }
@@ -4971,11 +5056,11 @@
   // Do a second pass to move values into their final positions and perform any
   // conversions that may be required.
   for (int i = 0; i < arg_count; i++) {
-    ASSERT(pcs[i].type() == args[i].type());
+    DCHECK(pcs[i].type() == args[i].type());
     if (pcs[i].IsRegister()) {
       Mov(Register(pcs[i]), Register(args[i]), kDiscardForSameWReg);
     } else {
-      ASSERT(pcs[i].IsFPRegister());
+      DCHECK(pcs[i].IsFPRegister());
       if (pcs[i].SizeInBytes() == args[i].SizeInBytes()) {
         Fmov(FPRegister(pcs[i]), FPRegister(args[i]));
       } else {
@@ -5029,10 +5114,10 @@
       if (args[i].IsRegister()) {
         arg_pattern = args[i].Is32Bits() ? kPrintfArgW : kPrintfArgX;
       } else {
-        ASSERT(args[i].Is64Bits());
+        DCHECK(args[i].Is64Bits());
         arg_pattern = kPrintfArgD;
       }
-      ASSERT(arg_pattern < (1 << kPrintfArgPatternBits));
+      DCHECK(arg_pattern < (1 << kPrintfArgPatternBits));
       arg_pattern_list |= (arg_pattern << (kPrintfArgPatternBits * i));
     }
     dc32(arg_pattern_list);   // kPrintfArgPatternListOffset
@@ -5050,10 +5135,10 @@
                             CPURegister arg3) {
   // We can only print sp if it is the current stack pointer.
   if (!csp.Is(StackPointer())) {
-    ASSERT(!csp.Aliases(arg0));
-    ASSERT(!csp.Aliases(arg1));
-    ASSERT(!csp.Aliases(arg2));
-    ASSERT(!csp.Aliases(arg3));
+    DCHECK(!csp.Aliases(arg0));
+    DCHECK(!csp.Aliases(arg1));
+    DCHECK(!csp.Aliases(arg2));
+    DCHECK(!csp.Aliases(arg3));
   }
 
   // Printf is expected to preserve all registers, so make sure that none are
@@ -5128,7 +5213,7 @@
   // the sequence and copying it in the same way.
   InstructionAccurateScope scope(this,
                                  kNoCodeAgeSequenceLength / kInstructionSize);
-  ASSERT(jssp.Is(StackPointer()));
+  DCHECK(jssp.Is(StackPointer()));
   EmitFrameSetupForCodeAgePatching(this);
 }
 
@@ -5137,7 +5222,7 @@
 void MacroAssembler::EmitCodeAgeSequence(Code* stub) {
   InstructionAccurateScope scope(this,
                                  kNoCodeAgeSequenceLength / kInstructionSize);
-  ASSERT(jssp.Is(StackPointer()));
+  DCHECK(jssp.Is(StackPointer()));
   EmitCodeAgeSequence(this, stub);
 }
 
@@ -5190,7 +5275,7 @@
 
 bool MacroAssembler::IsYoungSequence(Isolate* isolate, byte* sequence) {
   bool is_young = isolate->code_aging_helper()->IsYoung(sequence);
-  ASSERT(is_young ||
+  DCHECK(is_young ||
          isolate->code_aging_helper()->IsOld(sequence));
   return is_young;
 }
@@ -5199,15 +5284,17 @@
 void MacroAssembler::TruncatingDiv(Register result,
                                    Register dividend,
                                    int32_t divisor) {
-  ASSERT(!AreAliased(result, dividend));
-  ASSERT(result.Is32Bits() && dividend.Is32Bits());
-  MultiplierAndShift ms(divisor);
-  Mov(result, ms.multiplier());
+  DCHECK(!AreAliased(result, dividend));
+  DCHECK(result.Is32Bits() && dividend.Is32Bits());
+  base::MagicNumbersForDivision<uint32_t> mag =
+      base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
+  Mov(result, mag.multiplier);
   Smull(result.X(), dividend, result);
   Asr(result.X(), result.X(), 32);
-  if (divisor > 0 && ms.multiplier() < 0) Add(result, result, dividend);
-  if (divisor < 0 && ms.multiplier() > 0) Sub(result, result, dividend);
-  if (ms.shift() > 0) Asr(result, result, ms.shift());
+  bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
+  if (divisor > 0 && neg) Add(result, result, dividend);
+  if (divisor < 0 && !neg && mag.multiplier > 0) Sub(result, result, dividend);
+  if (mag.shift > 0) Asr(result, result, mag.shift);
   Add(result, result, Operand(dividend, LSR, 31));
 }
 
@@ -5237,14 +5324,14 @@
     CPURegList* available) {
   CHECK(!available->IsEmpty());
   CPURegister result = available->PopLowestIndex();
-  ASSERT(!AreAliased(result, xzr, csp));
+  DCHECK(!AreAliased(result, xzr, csp));
   return result;
 }
 
 
 CPURegister UseScratchRegisterScope::UnsafeAcquire(CPURegList* available,
                                                    const CPURegister& reg) {
-  ASSERT(available->IncludesAliasOf(reg));
+  DCHECK(available->IncludesAliasOf(reg));
   available->Remove(reg);
   return reg;
 }
@@ -5257,8 +5344,8 @@
                               const Label* smi_check) {
   Assembler::BlockPoolsScope scope(masm);
   if (reg.IsValid()) {
-    ASSERT(smi_check->is_bound());
-    ASSERT(reg.Is64Bits());
+    DCHECK(smi_check->is_bound());
+    DCHECK(reg.Is64Bits());
 
     // Encode the register (x0-x30) in the lowest 5 bits, then the offset to
     // 'check' in the other bits. The possible offset is limited in that we
@@ -5267,7 +5354,7 @@
     uint32_t delta = __ InstructionsGeneratedSince(smi_check);
     __ InlineData(RegisterBits::encode(reg.code()) | DeltaBits::encode(delta));
   } else {
-    ASSERT(!smi_check->is_bound());
+    DCHECK(!smi_check->is_bound());
 
     // An offset of 0 indicates that there is no patch site.
     __ InlineData(0);
@@ -5278,17 +5365,17 @@
 InlineSmiCheckInfo::InlineSmiCheckInfo(Address info)
     : reg_(NoReg), smi_check_(NULL) {
   InstructionSequence* inline_data = InstructionSequence::At(info);
-  ASSERT(inline_data->IsInlineData());
+  DCHECK(inline_data->IsInlineData());
   if (inline_data->IsInlineData()) {
     uint64_t payload = inline_data->InlineData();
     // We use BitField to decode the payload, and BitField can only handle
     // 32-bit values.
-    ASSERT(is_uint32(payload));
+    DCHECK(is_uint32(payload));
     if (payload != 0) {
       int reg_code = RegisterBits::decode(payload);
       reg_ = Register::XRegFromCode(reg_code);
       uint64_t smi_check_delta = DeltaBits::decode(payload);
-      ASSERT(smi_check_delta != 0);
+      DCHECK(smi_check_delta != 0);
       smi_check_ = inline_data->preceding(smi_check_delta);
     }
   }
diff --git a/src/arm64/macro-assembler-arm64.h b/src/arm64/macro-assembler-arm64.h
index 34182c0..7a106a1 100644
--- a/src/arm64/macro-assembler-arm64.h
+++ b/src/arm64/macro-assembler-arm64.h
@@ -7,9 +7,29 @@
 
 #include <vector>
 
+#include "src/bailout-reason.h"
 #include "src/globals.h"
 
 #include "src/arm64/assembler-arm64-inl.h"
+#include "src/base/bits.h"
+
+// Simulator specific helpers.
+#if USE_SIMULATOR
+  // TODO(all): If possible automatically prepend an indicator like
+  // UNIMPLEMENTED or LOCATION.
+  #define ASM_UNIMPLEMENTED(message)                                         \
+  __ Debug(message, __LINE__, NO_PARAM)
+  #define ASM_UNIMPLEMENTED_BREAK(message)                                   \
+  __ Debug(message, __LINE__,                                                \
+           FLAG_ignore_asm_unimplemented_break ? NO_PARAM : BREAK)
+  #define ASM_LOCATION(message)                                              \
+  __ Debug("LOCATION: " message, __LINE__, NO_PARAM)
+#else
+  #define ASM_UNIMPLEMENTED(message)
+  #define ASM_UNIMPLEMENTED_BREAK(message)
+  #define ASM_LOCATION(message)
+#endif
+
 
 namespace v8 {
 namespace internal {
@@ -25,6 +45,11 @@
   V(Str, CPURegister&, rt, StoreOpFor(rt))                    \
   V(Ldrsw, Register&, rt, LDRSW_x)
 
+#define LSPAIR_MACRO_LIST(V)                             \
+  V(Ldp, CPURegister&, rt, rt2, LoadPairOpFor(rt, rt2))  \
+  V(Stp, CPURegister&, rt, rt2, StorePairOpFor(rt, rt2)) \
+  V(Ldpsw, CPURegister&, rt, rt2, LDPSW_x)
+
 
 // ----------------------------------------------------------------------------
 // Static helper functions
@@ -202,6 +227,18 @@
   static bool IsImmMovz(uint64_t imm, unsigned reg_size);
   static unsigned CountClearHalfWords(uint64_t imm, unsigned reg_size);
 
+  // Try to move an immediate into the destination register in a single
+  // instruction. Returns true for success, and updates the contents of dst.
+  // Returns false, otherwise.
+  bool TryOneInstrMoveImmediate(const Register& dst, int64_t imm);
+
+  // Move an immediate into register dst, and return an Operand object for use
+  // with a subsequent instruction that accepts a shift. The value moved into
+  // dst is not necessarily equal to imm; it may have had a shifting operation
+  // applied to it that will be subsequently undone by the shift applied in the
+  // Operand.
+  Operand MoveImmediateForShiftedOp(const Register& dst, int64_t imm);
+
   // Conditional macros.
   inline void Ccmp(const Register& rn,
                    const Operand& operand,
@@ -231,6 +268,14 @@
                       const MemOperand& addr,
                       LoadStoreOp op);
 
+#define DECLARE_FUNCTION(FN, REGTYPE, REG, REG2, OP) \
+  inline void FN(const REGTYPE REG, const REGTYPE REG2, const MemOperand& addr);
+  LSPAIR_MACRO_LIST(DECLARE_FUNCTION)
+#undef DECLARE_FUNCTION
+
+  void LoadStorePairMacro(const CPURegister& rt, const CPURegister& rt2,
+                          const MemOperand& addr, LoadStorePairOp op);
+
   // V8-specific load/store helpers.
   void Load(const Register& rt, const MemOperand& addr, Representation r);
   void Store(const Register& rt, const MemOperand& addr, Representation r);
@@ -354,7 +399,7 @@
   // Provide a template to allow other types to be converted automatically.
   template<typename T>
   void Fmov(FPRegister fd, T imm) {
-    ASSERT(allow_macro_instructions_);
+    DCHECK(allow_macro_instructions_);
     Fmov(fd, static_cast<double>(imm));
   }
   inline void Fmov(Register rd, FPRegister fn);
@@ -388,12 +433,6 @@
   inline void Ldnp(const CPURegister& rt,
                    const CPURegister& rt2,
                    const MemOperand& src);
-  inline void Ldp(const CPURegister& rt,
-                  const CPURegister& rt2,
-                  const MemOperand& src);
-  inline void Ldpsw(const Register& rt,
-                    const Register& rt2,
-                    const MemOperand& src);
   // Load a literal from the inline constant pool.
   inline void Ldr(const CPURegister& rt, const Immediate& imm);
   // Helper function for double immediate.
@@ -453,9 +492,6 @@
   inline void Stnp(const CPURegister& rt,
                    const CPURegister& rt2,
                    const MemOperand& dst);
-  inline void Stp(const CPURegister& rt,
-                  const CPURegister& rt2,
-                  const MemOperand& dst);
   inline void Sxtb(const Register& rd, const Register& rn);
   inline void Sxth(const Register& rd, const Register& rn);
   inline void Sxtw(const Register& rd, const Register& rn);
@@ -531,6 +567,7 @@
             const CPURegister& src6 = NoReg, const CPURegister& src7 = NoReg);
   void Pop(const CPURegister& dst0, const CPURegister& dst1 = NoReg,
            const CPURegister& dst2 = NoReg, const CPURegister& dst3 = NoReg);
+  void Push(const Register& src0, const FPRegister& src1);
 
   // Alternative forms of Push and Pop, taking a RegList or CPURegList that
   // specifies the registers that are to be pushed or popped. Higher-numbered
@@ -606,7 +643,7 @@
     explicit PushPopQueue(MacroAssembler* masm) : masm_(masm), size_(0) { }
 
     ~PushPopQueue() {
-      ASSERT(queued_.empty());
+      DCHECK(queued_.empty());
     }
 
     void Queue(const CPURegister& rt) {
@@ -758,7 +795,7 @@
 
   // Set the current stack pointer, but don't generate any code.
   inline void SetStackPointer(const Register& stack_pointer) {
-    ASSERT(!TmpList()->IncludesAliasOf(stack_pointer));
+    DCHECK(!TmpList()->IncludesAliasOf(stack_pointer));
     sp_ = stack_pointer;
   }
 
@@ -772,8 +809,8 @@
   inline void AlignAndSetCSPForFrame() {
     int sp_alignment = ActivationFrameAlignment();
     // AAPCS64 mandates at least 16-byte alignment.
-    ASSERT(sp_alignment >= 16);
-    ASSERT(IsPowerOf2(sp_alignment));
+    DCHECK(sp_alignment >= 16);
+    DCHECK(base::bits::IsPowerOfTwo32(sp_alignment));
     Bic(csp, StackPointer(), sp_alignment - 1);
     SetStackPointer(csp);
   }
@@ -828,7 +865,7 @@
     if (object->IsHeapObject()) {
       LoadHeapObject(result, Handle<HeapObject>::cast(object));
     } else {
-      ASSERT(object->IsSmi());
+      DCHECK(object->IsSmi());
       Mov(result, Operand(object));
     }
   }
@@ -874,11 +911,6 @@
   inline void SmiTagAndPush(Register src);
   inline void SmiTagAndPush(Register src1, Register src2);
 
-  // Compute the absolute value of 'smi' and leave the result in 'smi'
-  // register. If 'smi' is the most negative SMI, the absolute value cannot
-  // be represented as a SMI and a jump to 'slow' is done.
-  void SmiAbs(const Register& smi, Label* slow);
-
   inline void JumpIfSmi(Register value,
                         Label* smi_label,
                         Label* not_smi_label = NULL);
@@ -915,16 +947,10 @@
   // Abort execution if argument is not a string, enabled via --debug-code.
   void AssertString(Register object);
 
-  void JumpForHeapNumber(Register object,
-                         Register heap_number_map,
-                         Label* on_heap_number,
-                         Label* on_not_heap_number = NULL);
-  void JumpIfHeapNumber(Register object,
-                        Label* on_heap_number,
-                        Register heap_number_map = NoReg);
-  void JumpIfNotHeapNumber(Register object,
-                           Label* on_not_heap_number,
-                           Register heap_number_map = NoReg);
+  void JumpIfHeapNumber(Register object, Label* on_heap_number,
+                        SmiCheckType smi_check_type = DONT_DO_SMI_CHECK);
+  void JumpIfNotHeapNumber(Register object, Label* on_not_heap_number,
+                           SmiCheckType smi_check_type = DONT_DO_SMI_CHECK);
 
   // Sets the vs flag if the input is -0.0.
   void TestForMinusZero(DoubleRegister input);
@@ -968,7 +994,7 @@
                                  FPRegister scratch_d,
                                  Label* on_successful_conversion = NULL,
                                  Label* on_failed_conversion = NULL) {
-    ASSERT(as_int.Is32Bits());
+    DCHECK(as_int.Is32Bits());
     TryRepresentDoubleAsInt(as_int, value, scratch_d, on_successful_conversion,
                             on_failed_conversion);
   }
@@ -983,7 +1009,7 @@
                                  FPRegister scratch_d,
                                  Label* on_successful_conversion = NULL,
                                  Label* on_failed_conversion = NULL) {
-    ASSERT(as_int.Is64Bits());
+    DCHECK(as_int.Is64Bits());
     TryRepresentDoubleAsInt(as_int, value, scratch_d, on_successful_conversion,
                             on_failed_conversion);
   }
@@ -1020,41 +1046,30 @@
   // ---- String Utilities ----
 
 
-  // Jump to label if either object is not a sequential ASCII string.
+  // Jump to label if either object is not a sequential one-byte string.
   // Optionally perform a smi check on the objects first.
-  void JumpIfEitherIsNotSequentialAsciiStrings(
-      Register first,
-      Register second,
-      Register scratch1,
-      Register scratch2,
-      Label* failure,
-      SmiCheckType smi_check = DO_SMI_CHECK);
+  void JumpIfEitherIsNotSequentialOneByteStrings(
+      Register first, Register second, Register scratch1, Register scratch2,
+      Label* failure, SmiCheckType smi_check = DO_SMI_CHECK);
 
-  // Check if instance type is sequential ASCII string and jump to label if
+  // Check if instance type is sequential one-byte string and jump to label if
   // it is not.
-  void JumpIfInstanceTypeIsNotSequentialAscii(Register type,
-                                              Register scratch,
-                                              Label* failure);
+  void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
+                                                Label* failure);
 
-  // Checks if both instance types are sequential ASCII strings and jumps to
+  // Checks if both instance types are sequential one-byte strings and jumps to
   // label if either is not.
-  void JumpIfEitherInstanceTypeIsNotSequentialAscii(
-      Register first_object_instance_type,
-      Register second_object_instance_type,
-      Register scratch1,
-      Register scratch2,
-      Label* failure);
+  void JumpIfEitherInstanceTypeIsNotSequentialOneByte(
+      Register first_object_instance_type, Register second_object_instance_type,
+      Register scratch1, Register scratch2, Label* failure);
 
-  // Checks if both instance types are sequential ASCII strings and jumps to
+  // Checks if both instance types are sequential one-byte strings and jumps to
   // label if either is not.
-  void JumpIfBothInstanceTypesAreNotSequentialAscii(
-      Register first_object_instance_type,
-      Register second_object_instance_type,
-      Register scratch1,
-      Register scratch2,
-      Label* failure);
+  void JumpIfBothInstanceTypesAreNotSequentialOneByte(
+      Register first_object_instance_type, Register second_object_instance_type,
+      Register scratch1, Register scratch2, Label* failure);
 
-  void JumpIfNotUniqueName(Register type, Label* not_unique_name);
+  void JumpIfNotUniqueNameInstanceType(Register type, Label* not_unique_name);
 
   // ---- Calling / Jumping helpers ----
 
@@ -1334,32 +1349,25 @@
                              Register scratch2,
                              Register scratch3,
                              Label* gc_required);
-  void AllocateAsciiString(Register result,
-                           Register length,
-                           Register scratch1,
-                           Register scratch2,
-                           Register scratch3,
-                           Label* gc_required);
+  void AllocateOneByteString(Register result, Register length,
+                             Register scratch1, Register scratch2,
+                             Register scratch3, Label* gc_required);
   void AllocateTwoByteConsString(Register result,
                                  Register length,
                                  Register scratch1,
                                  Register scratch2,
                                  Label* gc_required);
-  void AllocateAsciiConsString(Register result,
-                               Register length,
-                               Register scratch1,
-                               Register scratch2,
-                               Label* gc_required);
+  void AllocateOneByteConsString(Register result, Register length,
+                                 Register scratch1, Register scratch2,
+                                 Label* gc_required);
   void AllocateTwoByteSlicedString(Register result,
                                    Register length,
                                    Register scratch1,
                                    Register scratch2,
                                    Label* gc_required);
-  void AllocateAsciiSlicedString(Register result,
-                                 Register length,
-                                 Register scratch1,
-                                 Register scratch2,
-                                 Label* gc_required);
+  void AllocateOneByteSlicedString(Register result, Register length,
+                                   Register scratch1, Register scratch2,
+                                   Label* gc_required);
 
   // Allocates a heap number or jumps to the gc_required label if the young
   // space is full and a scavenge is needed.
@@ -1371,7 +1379,8 @@
                           Register scratch1,
                           Register scratch2,
                           CPURegister value = NoFPReg,
-                          CPURegister heap_number_map = NoReg);
+                          CPURegister heap_number_map = NoReg,
+                          MutableMode mode = IMMUTABLE);
 
   // ---------------------------------------------------------------------------
   // Support functions.
@@ -1434,9 +1443,11 @@
 
   // Compare an object's map with the specified map. Condition flags are set
   // with result of map compare.
-  void CompareMap(Register obj,
-                  Register scratch,
-                  Handle<Map> map);
+  void CompareObjectMap(Register obj, Heap::RootListIndex index);
+
+  // Compare an object's map with the specified map. Condition flags are set
+  // with result of map compare.
+  void CompareObjectMap(Register obj, Register scratch, Handle<Map> map);
 
   // As above, but the map of the object is already loaded into the register
   // which is preserved by the code generated.
@@ -2165,7 +2176,7 @@
 // emitted is what you specified when creating the scope.
 class InstructionAccurateScope BASE_EMBEDDED {
  public:
-  InstructionAccurateScope(MacroAssembler* masm, size_t count = 0)
+  explicit InstructionAccurateScope(MacroAssembler* masm, size_t count = 0)
       : masm_(masm)
 #ifdef DEBUG
         ,
@@ -2190,7 +2201,7 @@
     masm_->EndBlockPools();
 #ifdef DEBUG
     if (start_.is_bound()) {
-      ASSERT(masm_->SizeOfCodeGeneratedSince(&start_) == size_);
+      DCHECK(masm_->SizeOfCodeGeneratedSince(&start_) == size_);
     }
     masm_->set_allow_macro_instructions(previous_allow_macro_instructions_);
 #endif
@@ -2220,8 +2231,8 @@
         availablefp_(masm->FPTmpList()),
         old_available_(available_->list()),
         old_availablefp_(availablefp_->list()) {
-    ASSERT(available_->type() == CPURegister::kRegister);
-    ASSERT(availablefp_->type() == CPURegister::kFPRegister);
+    DCHECK(available_->type() == CPURegister::kRegister);
+    DCHECK(availablefp_->type() == CPURegister::kFPRegister);
   }
 
   ~UseScratchRegisterScope();
diff --git a/src/arm64/regexp-macro-assembler-arm64.cc b/src/arm64/regexp-macro-assembler-arm64.cc
index a772ef2..e9a485d 100644
--- a/src/arm64/regexp-macro-assembler-arm64.cc
+++ b/src/arm64/regexp-macro-assembler-arm64.cc
@@ -6,13 +6,14 @@
 
 #if V8_TARGET_ARCH_ARM64
 
-#include "src/cpu-profiler.h"
-#include "src/unicode.h"
-#include "src/log.h"
 #include "src/code-stubs.h"
-#include "src/regexp-stack.h"
+#include "src/cpu-profiler.h"
+#include "src/log.h"
 #include "src/macro-assembler.h"
 #include "src/regexp-macro-assembler.h"
+#include "src/regexp-stack.h"
+#include "src/unicode.h"
+
 #include "src/arm64/regexp-macro-assembler-arm64.h"
 
 namespace v8 {
@@ -125,7 +126,7 @@
       backtrack_label_(),
       exit_label_() {
   __ SetStackPointer(csp);
-  ASSERT_EQ(0, registers_to_save % 2);
+  DCHECK_EQ(0, registers_to_save % 2);
   // We can cache at most 16 W registers in x0-x7.
   STATIC_ASSERT(kNumCachedRegisters <= 16);
   STATIC_ASSERT((kNumCachedRegisters % 2) == 0);
@@ -160,7 +161,7 @@
 
 
 void RegExpMacroAssemblerARM64::AdvanceRegister(int reg, int by) {
-  ASSERT((reg >= 0) && (reg < num_registers_));
+  DCHECK((reg >= 0) && (reg < num_registers_));
   if (by != 0) {
     Register to_advance;
     RegisterState register_state = GetRegisterState(reg);
@@ -259,9 +260,9 @@
   }
 
   for (int i = 0; i < str.length(); i++) {
-    if (mode_ == ASCII) {
+    if (mode_ == LATIN1) {
       __ Ldrb(w10, MemOperand(characters_address, 1, PostIndex));
-      ASSERT(str[i] <= String::kMaxOneByteCharCode);
+      DCHECK(str[i] <= String::kMaxOneByteCharCode);
     } else {
       __ Ldrh(w10, MemOperand(characters_address, 2, PostIndex));
     }
@@ -288,10 +289,10 @@
   // Save the capture length in a callee-saved register so it will
   // be preserved if we call a C helper.
   Register capture_length = w19;
-  ASSERT(kCalleeSaved.IncludesAliasOf(capture_length));
+  DCHECK(kCalleeSaved.IncludesAliasOf(capture_length));
 
   // Find length of back-referenced capture.
-  ASSERT((start_reg % 2) == 0);
+  DCHECK((start_reg % 2) == 0);
   if (start_reg < kNumCachedRegisters) {
     __ Mov(capture_start_offset.X(), GetCachedRegister(start_reg));
     __ Lsr(x11, GetCachedRegister(start_reg), kWRegSizeInBits);
@@ -306,7 +307,7 @@
   __ Cmn(capture_length, current_input_offset());
   BranchOrBacktrack(gt, on_no_match);
 
-  if (mode_ == ASCII) {
+  if (mode_ == LATIN1) {
     Label success;
     Label fail;
     Label loop_check;
@@ -364,12 +365,12 @@
       __ Check(le, kOffsetOutOfRange);
     }
   } else {
-    ASSERT(mode_ == UC16);
+    DCHECK(mode_ == UC16);
     int argument_count = 4;
 
     // The cached registers need to be retained.
     CPURegList cached_registers(CPURegister::kRegister, kXRegSizeInBits, 0, 7);
-    ASSERT((cached_registers.Count() * 2) == kNumCachedRegisters);
+    DCHECK((cached_registers.Count() * 2) == kNumCachedRegisters);
     __ PushCPURegList(cached_registers);
 
     // Put arguments into arguments registers.
@@ -396,11 +397,14 @@
     }
 
     // Check if function returned non-zero for success or zero for failure.
-    CompareAndBranchOrBacktrack(x0, 0, eq, on_no_match);
+    // x0 is one of the registers used as a cache so it must be tested before
+    // the cache is restored.
+    __ Cmp(x0, 0);
+    __ PopCPURegList(cached_registers);
+    BranchOrBacktrack(eq, on_no_match);
+
     // On success, increment position by length of capture.
     __ Add(current_input_offset(), current_input_offset(), capture_length);
-    // Reset the cached registers.
-    __ PopCPURegList(cached_registers);
   }
 
   __ Bind(&fallthrough);
@@ -417,7 +421,7 @@
   Register capture_length = w15;
 
   // Find length of back-referenced capture.
-  ASSERT((start_reg % 2) == 0);
+  DCHECK((start_reg % 2) == 0);
   if (start_reg < kNumCachedRegisters) {
     __ Mov(x10, GetCachedRegister(start_reg));
     __ Lsr(x11, GetCachedRegister(start_reg), kWRegSizeInBits);
@@ -443,11 +447,11 @@
 
   Label loop;
   __ Bind(&loop);
-  if (mode_ == ASCII) {
+  if (mode_ == LATIN1) {
     __ Ldrb(w10, MemOperand(capture_start_address, 1, PostIndex));
     __ Ldrb(w11, MemOperand(current_position_address, 1, PostIndex));
   } else {
-    ASSERT(mode_ == UC16);
+    DCHECK(mode_ == UC16);
     __ Ldrh(w10, MemOperand(capture_start_address, 2, PostIndex));
     __ Ldrh(w11, MemOperand(current_position_address, 2, PostIndex));
   }
@@ -495,7 +499,7 @@
     uc16 minus,
     uc16 mask,
     Label* on_not_equal) {
-  ASSERT(minus < String::kMaxUtf16CodeUnit);
+  DCHECK(minus < String::kMaxUtf16CodeUnit);
   __ Sub(w10, current_character(), minus);
   __ And(w10, w10, mask);
   CompareAndBranchOrBacktrack(w10, c, ne, on_not_equal);
@@ -526,7 +530,7 @@
     Handle<ByteArray> table,
     Label* on_bit_set) {
   __ Mov(x11, Operand(table));
-  if ((mode_ != ASCII) || (kTableMask != String::kMaxOneByteCharCode)) {
+  if ((mode_ != LATIN1) || (kTableMask != String::kMaxOneByteCharCode)) {
     __ And(w10, current_character(), kTableMask);
     __ Add(w10, w10, ByteArray::kHeaderSize - kHeapObjectTag);
   } else {
@@ -544,7 +548,7 @@
   switch (type) {
   case 's':
     // Match space-characters
-    if (mode_ == ASCII) {
+    if (mode_ == LATIN1) {
       // One byte space characters are '\t'..'\r', ' ' and \u00a0.
       Label success;
       // Check for ' ' or 0x00a0.
@@ -607,8 +611,8 @@
     return true;
   }
   case 'w': {
-    if (mode_ != ASCII) {
-      // Table is 128 entries, so all ASCII characters can be tested.
+    if (mode_ != LATIN1) {
+      // Table is 256 entries, so all Latin1 characters can be tested.
       CompareAndBranchOrBacktrack(current_character(), 'z', hi, on_no_match);
     }
     ExternalReference map = ExternalReference::re_word_character_map();
@@ -619,8 +623,8 @@
   }
   case 'W': {
     Label done;
-    if (mode_ != ASCII) {
-      // Table is 128 entries, so all ASCII characters can be tested.
+    if (mode_ != LATIN1) {
+      // Table is 256 entries, so all Latin1 characters can be tested.
       __ Cmp(current_character(), 'z');
       __ B(hi, &done);
     }
@@ -677,10 +681,10 @@
   CPURegList argument_registers(x0, x5, x6, x7);
 
   CPURegList registers_to_retain = kCalleeSaved;
-  ASSERT(kCalleeSaved.Count() == 11);
+  DCHECK(kCalleeSaved.Count() == 11);
   registers_to_retain.Combine(lr);
 
-  ASSERT(csp.Is(__ StackPointer()));
+  DCHECK(csp.Is(__ StackPointer()));
   __ PushCPURegList(registers_to_retain);
   __ PushCPURegList(argument_registers);
 
@@ -704,7 +708,7 @@
 
   // Make sure the stack alignment will be respected.
   int alignment = masm_->ActivationFrameAlignment();
-  ASSERT_EQ(alignment % 16, 0);
+  DCHECK_EQ(alignment % 16, 0);
   int align_mask = (alignment / kWRegSize) - 1;
   num_wreg_to_allocate = (num_wreg_to_allocate + align_mask) & ~align_mask;
 
@@ -857,7 +861,7 @@
         Register base = x10;
         // There are always an even number of capture registers. A couple of
         // registers determine one match with two offsets.
-        ASSERT_EQ(0, num_registers_left_on_stack % 2);
+        DCHECK_EQ(0, num_registers_left_on_stack % 2);
         __ Add(base, frame_pointer(), kFirstCaptureOnStack);
 
         // We can unroll the loop here, we should not unroll for less than 2
@@ -974,7 +978,7 @@
   __ Bind(&return_w0);
 
   // Set stack pointer back to first register to retain
-  ASSERT(csp.Is(__ StackPointer()));
+  DCHECK(csp.Is(__ StackPointer()));
   __ Mov(csp, fp);
   __ AssertStackConsistency();
 
@@ -987,7 +991,7 @@
   // Registers x0 to x7 are used to store the first captures, they need to be
   // retained over calls to C++ code.
   CPURegList cached_registers(CPURegister::kRegister, kXRegSizeInBits, 0, 7);
-  ASSERT((cached_registers.Count() * 2) == kNumCachedRegisters);
+  DCHECK((cached_registers.Count() * 2) == kNumCachedRegisters);
 
   if (check_preempt_label_.is_linked()) {
     __ Bind(&check_preempt_label_);
@@ -1080,9 +1084,9 @@
                                                      int characters) {
   // TODO(pielan): Make sure long strings are caught before this, and not
   // just asserted in debug mode.
-  ASSERT(cp_offset >= -1);      // ^ and \b can look behind one character.
+  DCHECK(cp_offset >= -1);      // ^ and \b can look behind one character.
   // Be sane! (And ensure that an int32_t can be used to index the string)
-  ASSERT(cp_offset < (1<<30));
+  DCHECK(cp_offset < (1<<30));
   if (check_bounds) {
     CheckPosition(cp_offset + characters - 1, on_end_of_input);
   }
@@ -1175,7 +1179,7 @@
 
 
 void RegExpMacroAssemblerARM64::SetRegister(int register_index, int to) {
-  ASSERT(register_index >= num_saved_registers_);  // Reserved for positions!
+  DCHECK(register_index >= num_saved_registers_);  // Reserved for positions!
   Register set_to = wzr;
   if (to != 0) {
     set_to = w10;
@@ -1203,7 +1207,7 @@
 
 
 void RegExpMacroAssemblerARM64::ClearRegisters(int reg_from, int reg_to) {
-  ASSERT(reg_from <= reg_to);
+  DCHECK(reg_from <= reg_to);
   int num_registers = reg_to - reg_from + 1;
 
   // If the first capture register is cached in a hardware register but not
@@ -1216,7 +1220,7 @@
 
   // Clear cached registers in pairs as far as possible.
   while ((num_registers >= 2) && (reg_from < kNumCachedRegisters)) {
-    ASSERT(GetRegisterState(reg_from) == CACHED_LSW);
+    DCHECK(GetRegisterState(reg_from) == CACHED_LSW);
     __ Mov(GetCachedRegister(reg_from), twice_non_position_value());
     reg_from += 2;
     num_registers -= 2;
@@ -1230,7 +1234,7 @@
 
   if (num_registers > 0) {
     // If there are some remaining registers, they are stored on the stack.
-    ASSERT(reg_from >= kNumCachedRegisters);
+    DCHECK(reg_from >= kNumCachedRegisters);
 
     // Move down the indexes of the registers on stack to get the correct offset
     // in memory.
@@ -1311,10 +1315,10 @@
   Handle<String> subject(frame_entry<String*>(re_frame, kInput));
 
   // Current string.
-  bool is_ascii = subject->IsOneByteRepresentationUnderneath();
+  bool is_one_byte = subject->IsOneByteRepresentationUnderneath();
 
-  ASSERT(re_code->instruction_start() <= *return_address);
-  ASSERT(*return_address <=
+  DCHECK(re_code->instruction_start() <= *return_address);
+  DCHECK(*return_address <=
       re_code->instruction_start() + re_code->instruction_size());
 
   Object* result = isolate->stack_guard()->HandleInterrupts();
@@ -1342,8 +1346,8 @@
   }
 
   // String might have changed.
-  if (subject_tmp->IsOneByteRepresentation() != is_ascii) {
-    // If we changed between an ASCII and an UC16 string, the specialized
+  if (subject_tmp->IsOneByteRepresentation() != is_one_byte) {
+    // If we changed between an Latin1 and an UC16 string, the specialized
     // code cannot be used, and we need to restart regexp matching from
     // scratch (including, potentially, compiling a new version of the code).
     return RETRY;
@@ -1353,7 +1357,7 @@
   // be a sequential or external string with the same content.
   // Update the start and end pointers in the stack frame to the current
   // location (whether it has actually moved or not).
-  ASSERT(StringShape(*subject_tmp).IsSequential() ||
+  DCHECK(StringShape(*subject_tmp).IsSequential() ||
          StringShape(*subject_tmp).IsExternal());
 
   // The original start address of the characters to match.
@@ -1406,11 +1410,11 @@
   // moved. Allocate extra space for 2 arguments passed by pointers.
   // AAPCS64 requires the stack to be 16 byte aligned.
   int alignment = masm_->ActivationFrameAlignment();
-  ASSERT_EQ(alignment % 16, 0);
+  DCHECK_EQ(alignment % 16, 0);
   int align_mask = (alignment / kXRegSize) - 1;
   int xreg_to_claim = (3 + align_mask) & ~align_mask;
 
-  ASSERT(csp.Is(__ StackPointer()));
+  DCHECK(csp.Is(__ StackPointer()));
   __ Claim(xreg_to_claim);
 
   // CheckStackGuardState needs the end and start addresses of the input string.
@@ -1440,7 +1444,7 @@
   __ Peek(input_start(), kPointerSize);
   __ Peek(input_end(), 2 * kPointerSize);
 
-  ASSERT(csp.Is(__ StackPointer()));
+  DCHECK(csp.Is(__ StackPointer()));
   __ Drop(xreg_to_claim);
 
   // Reload the Code pointer.
@@ -1460,12 +1464,7 @@
   if (to == NULL) {
     to = &backtrack_label_;
   }
-  // TODO(ulan): do direct jump when jump distance is known and fits in imm19.
-  Condition inverted_condition = NegateCondition(condition);
-  Label no_branch;
-  __ B(inverted_condition, &no_branch);
-  __ B(to);
-  __ Bind(&no_branch);
+  __ B(condition, to);
 }
 
 void RegExpMacroAssemblerARM64::CompareAndBranchOrBacktrack(Register reg,
@@ -1476,15 +1475,11 @@
     if (to == NULL) {
       to = &backtrack_label_;
     }
-    // TODO(ulan): do direct jump when jump distance is known and fits in imm19.
-    Label no_branch;
     if (condition == eq) {
-      __ Cbnz(reg, &no_branch);
+      __ Cbz(reg, to);
     } else {
-      __ Cbz(reg, &no_branch);
+      __ Cbnz(reg, to);
     }
-    __ B(to);
-    __ Bind(&no_branch);
   } else {
     __ Cmp(reg, immediate);
     BranchOrBacktrack(condition, to);
@@ -1498,7 +1493,7 @@
       ExternalReference::address_of_stack_limit(isolate());
   __ Mov(x10, stack_limit);
   __ Ldr(x10, MemOperand(x10));
-  ASSERT(csp.Is(__ StackPointer()));
+  DCHECK(csp.Is(__ StackPointer()));
   __ Cmp(csp, x10);
   CallIf(&check_preempt_label_, ls);
 }
@@ -1515,8 +1510,8 @@
 
 
 void RegExpMacroAssemblerARM64::Push(Register source) {
-  ASSERT(source.Is32Bits());
-  ASSERT(!source.is(backtrack_stackpointer()));
+  DCHECK(source.Is32Bits());
+  DCHECK(!source.is(backtrack_stackpointer()));
   __ Str(source,
          MemOperand(backtrack_stackpointer(),
                     -static_cast<int>(kWRegSize),
@@ -1525,23 +1520,23 @@
 
 
 void RegExpMacroAssemblerARM64::Pop(Register target) {
-  ASSERT(target.Is32Bits());
-  ASSERT(!target.is(backtrack_stackpointer()));
+  DCHECK(target.Is32Bits());
+  DCHECK(!target.is(backtrack_stackpointer()));
   __ Ldr(target,
          MemOperand(backtrack_stackpointer(), kWRegSize, PostIndex));
 }
 
 
 Register RegExpMacroAssemblerARM64::GetCachedRegister(int register_index) {
-  ASSERT(register_index < kNumCachedRegisters);
+  DCHECK(register_index < kNumCachedRegisters);
   return Register::Create(register_index / 2, kXRegSizeInBits);
 }
 
 
 Register RegExpMacroAssemblerARM64::GetRegister(int register_index,
                                                 Register maybe_result) {
-  ASSERT(maybe_result.Is32Bits());
-  ASSERT(register_index >= 0);
+  DCHECK(maybe_result.Is32Bits());
+  DCHECK(register_index >= 0);
   if (num_registers_ <= register_index) {
     num_registers_ = register_index + 1;
   }
@@ -1564,15 +1559,15 @@
       UNREACHABLE();
       break;
   }
-  ASSERT(result.Is32Bits());
+  DCHECK(result.Is32Bits());
   return result;
 }
 
 
 void RegExpMacroAssemblerARM64::StoreRegister(int register_index,
                                               Register source) {
-  ASSERT(source.Is32Bits());
-  ASSERT(register_index >= 0);
+  DCHECK(source.Is32Bits());
+  DCHECK(register_index >= 0);
   if (num_registers_ <= register_index) {
     num_registers_ = register_index + 1;
   }
@@ -1609,22 +1604,22 @@
 
 
 void RegExpMacroAssemblerARM64::RestoreLinkRegister() {
-  ASSERT(csp.Is(__ StackPointer()));
+  DCHECK(csp.Is(__ StackPointer()));
   __ Pop(lr, xzr);
   __ Add(lr, lr, Operand(masm_->CodeObject()));
 }
 
 
 void RegExpMacroAssemblerARM64::SaveLinkRegister() {
-  ASSERT(csp.Is(__ StackPointer()));
+  DCHECK(csp.Is(__ StackPointer()));
   __ Sub(lr, lr, Operand(masm_->CodeObject()));
   __ Push(xzr, lr);
 }
 
 
 MemOperand RegExpMacroAssemblerARM64::register_location(int register_index) {
-  ASSERT(register_index < (1<<30));
-  ASSERT(register_index >= kNumCachedRegisters);
+  DCHECK(register_index < (1<<30));
+  DCHECK(register_index >= kNumCachedRegisters);
   if (num_registers_ <= register_index) {
     num_registers_ = register_index + 1;
   }
@@ -1635,10 +1630,10 @@
 
 MemOperand RegExpMacroAssemblerARM64::capture_location(int register_index,
                                                      Register scratch) {
-  ASSERT(register_index < (1<<30));
-  ASSERT(register_index < num_saved_registers_);
-  ASSERT(register_index >= kNumCachedRegisters);
-  ASSERT_EQ(register_index % 2, 0);
+  DCHECK(register_index < (1<<30));
+  DCHECK(register_index < num_saved_registers_);
+  DCHECK(register_index >= kNumCachedRegisters);
+  DCHECK_EQ(register_index % 2, 0);
   register_index -= kNumCachedRegisters;
   int offset = kFirstCaptureOnStack - register_index * kWRegSize;
   // capture_location is used with Stp instructions to load/store 2 registers.
@@ -1664,7 +1659,7 @@
   // disable it.
   // TODO(pielan): See whether or not we should disable unaligned accesses.
   if (!CanReadUnaligned()) {
-    ASSERT(characters == 1);
+    DCHECK(characters == 1);
   }
 
   if (cp_offset != 0) {
@@ -1680,21 +1675,21 @@
     offset = w10;
   }
 
-  if (mode_ == ASCII) {
+  if (mode_ == LATIN1) {
     if (characters == 4) {
       __ Ldr(current_character(), MemOperand(input_end(), offset, SXTW));
     } else if (characters == 2) {
       __ Ldrh(current_character(), MemOperand(input_end(), offset, SXTW));
     } else {
-      ASSERT(characters == 1);
+      DCHECK(characters == 1);
       __ Ldrb(current_character(), MemOperand(input_end(), offset, SXTW));
     }
   } else {
-    ASSERT(mode_ == UC16);
+    DCHECK(mode_ == UC16);
     if (characters == 2) {
       __ Ldr(current_character(), MemOperand(input_end(), offset, SXTW));
     } else {
-      ASSERT(characters == 1);
+      DCHECK(characters == 1);
       __ Ldrh(current_character(), MemOperand(input_end(), offset, SXTW));
     }
   }
diff --git a/src/arm64/regexp-macro-assembler-arm64.h b/src/arm64/regexp-macro-assembler-arm64.h
index c319eae..632c513 100644
--- a/src/arm64/regexp-macro-assembler-arm64.h
+++ b/src/arm64/regexp-macro-assembler-arm64.h
@@ -5,9 +5,10 @@
 #ifndef V8_ARM64_REGEXP_MACRO_ASSEMBLER_ARM64_H_
 #define V8_ARM64_REGEXP_MACRO_ASSEMBLER_ARM64_H_
 
+#include "src/macro-assembler.h"
+
 #include "src/arm64/assembler-arm64.h"
 #include "src/arm64/assembler-arm64-inl.h"
-#include "src/macro-assembler.h"
 
 namespace v8 {
 namespace internal {
@@ -230,7 +231,7 @@
   };
 
   RegisterState GetRegisterState(int register_index) {
-    ASSERT(register_index >= 0);
+    DCHECK(register_index >= 0);
     if (register_index >= kNumCachedRegisters) {
       return STACKED;
     } else {
@@ -264,7 +265,7 @@
 
   MacroAssembler* masm_;
 
-  // Which mode to generate code for (ASCII or UC16).
+  // Which mode to generate code for (LATIN1 or UC16).
   Mode mode_;
 
   // One greater than maximal register index actually used.
diff --git a/src/arm64/simulator-arm64.cc b/src/arm64/simulator-arm64.cc
index 488b91e..129252b 100644
--- a/src/arm64/simulator-arm64.cc
+++ b/src/arm64/simulator-arm64.cc
@@ -9,11 +9,12 @@
 
 #if V8_TARGET_ARCH_ARM64
 
-#include "src/disasm.h"
-#include "src/assembler.h"
 #include "src/arm64/decoder-arm64-inl.h"
 #include "src/arm64/simulator-arm64.h"
+#include "src/assembler.h"
+#include "src/disasm.h"
 #include "src/macro-assembler.h"
+#include "src/ostreams.h"
 
 namespace v8 {
 namespace internal {
@@ -29,30 +30,28 @@
 
 
 // Helpers for colors.
-// Depending on your terminal configuration, the colour names may not match the
-// observed colours.
-#define COLOUR(colour_code)  "\033[" colour_code "m"
-#define BOLD(colour_code)    "1;" colour_code
-#define NORMAL ""
-#define GREY   "30"
-#define GREEN  "32"
-#define ORANGE "33"
-#define BLUE   "34"
-#define PURPLE "35"
-#define INDIGO "36"
-#define WHITE  "37"
+#define COLOUR(colour_code)       "\033[0;" colour_code "m"
+#define COLOUR_BOLD(colour_code)  "\033[1;" colour_code "m"
+#define NORMAL  ""
+#define GREY    "30"
+#define RED     "31"
+#define GREEN   "32"
+#define YELLOW  "33"
+#define BLUE    "34"
+#define MAGENTA "35"
+#define CYAN    "36"
+#define WHITE   "37"
 typedef char const * const TEXT_COLOUR;
 TEXT_COLOUR clr_normal         = FLAG_log_colour ? COLOUR(NORMAL)       : "";
-TEXT_COLOUR clr_flag_name      = FLAG_log_colour ? COLOUR(BOLD(GREY))   : "";
-TEXT_COLOUR clr_flag_value     = FLAG_log_colour ? COLOUR(BOLD(WHITE))  : "";
-TEXT_COLOUR clr_reg_name       = FLAG_log_colour ? COLOUR(BOLD(BLUE))   : "";
-TEXT_COLOUR clr_reg_value      = FLAG_log_colour ? COLOUR(BOLD(INDIGO)) : "";
-TEXT_COLOUR clr_fpreg_name     = FLAG_log_colour ? COLOUR(BOLD(ORANGE)) : "";
-TEXT_COLOUR clr_fpreg_value    = FLAG_log_colour ? COLOUR(BOLD(PURPLE)) : "";
-TEXT_COLOUR clr_memory_value   = FLAG_log_colour ? COLOUR(BOLD(GREEN))  : "";
-TEXT_COLOUR clr_memory_address = FLAG_log_colour ? COLOUR(GREEN)        : "";
-TEXT_COLOUR clr_debug_number   = FLAG_log_colour ? COLOUR(BOLD(ORANGE)) : "";
-TEXT_COLOUR clr_debug_message  = FLAG_log_colour ? COLOUR(ORANGE)       : "";
+TEXT_COLOUR clr_flag_name      = FLAG_log_colour ? COLOUR_BOLD(WHITE)   : "";
+TEXT_COLOUR clr_flag_value     = FLAG_log_colour ? COLOUR(NORMAL)       : "";
+TEXT_COLOUR clr_reg_name       = FLAG_log_colour ? COLOUR_BOLD(CYAN)    : "";
+TEXT_COLOUR clr_reg_value      = FLAG_log_colour ? COLOUR(CYAN)         : "";
+TEXT_COLOUR clr_fpreg_name     = FLAG_log_colour ? COLOUR_BOLD(MAGENTA) : "";
+TEXT_COLOUR clr_fpreg_value    = FLAG_log_colour ? COLOUR(MAGENTA)      : "";
+TEXT_COLOUR clr_memory_address = FLAG_log_colour ? COLOUR_BOLD(BLUE)    : "";
+TEXT_COLOUR clr_debug_number   = FLAG_log_colour ? COLOUR_BOLD(YELLOW)  : "";
+TEXT_COLOUR clr_debug_message  = FLAG_log_colour ? COLOUR(YELLOW)       : "";
 TEXT_COLOUR clr_printf         = FLAG_log_colour ? COLOUR(GREEN)        : "";
 
 
@@ -61,7 +60,7 @@
   if (FLAG_trace_sim) {
     va_list arguments;
     va_start(arguments, format);
-    OS::VFPrint(stream_, format, arguments);
+    base::OS::VFPrint(stream_, format, arguments);
     va_end(arguments);
   }
 }
@@ -72,11 +71,11 @@
 
 void SimSystemRegister::SetBits(int msb, int lsb, uint32_t bits) {
   int width = msb - lsb + 1;
-  ASSERT(is_uintn(bits, width) || is_intn(bits, width));
+  DCHECK(is_uintn(bits, width) || is_intn(bits, width));
 
   bits <<= lsb;
   uint32_t mask = ((1 << width) - 1) << lsb;
-  ASSERT((mask & write_ignore_mask_) == 0);
+  DCHECK((mask & write_ignore_mask_) == 0);
 
   value_ = (value_ & ~mask) | (bits & mask);
 }
@@ -106,7 +105,7 @@
 Simulator* Simulator::current(Isolate* isolate) {
   Isolate::PerIsolateThreadData* isolate_data =
       isolate->FindOrAllocatePerThreadDataForThisThread();
-  ASSERT(isolate_data != NULL);
+  DCHECK(isolate_data != NULL);
 
   Simulator* sim = isolate_data->simulator();
   if (sim == NULL) {
@@ -134,7 +133,7 @@
     } else if (arg.IsD() && (index_d < 8)) {
       set_dreg_bits(index_d++, arg.bits());
     } else {
-      ASSERT(arg.IsD() || arg.IsX());
+      DCHECK(arg.IsD() || arg.IsX());
       stack_args.push_back(arg.bits());
     }
   }
@@ -143,8 +142,8 @@
   uintptr_t original_stack = sp();
   uintptr_t entry_stack = original_stack -
                           stack_args.size() * sizeof(stack_args[0]);
-  if (OS::ActivationFrameAlignment() != 0) {
-    entry_stack &= -OS::ActivationFrameAlignment();
+  if (base::OS::ActivationFrameAlignment() != 0) {
+    entry_stack &= -base::OS::ActivationFrameAlignment();
   }
   char * stack = reinterpret_cast<char*>(entry_stack);
   std::vector<int64_t>::const_iterator it;
@@ -153,7 +152,7 @@
     stack += sizeof(*it);
   }
 
-  ASSERT(reinterpret_cast<uintptr_t>(stack) <= original_stack);
+  DCHECK(reinterpret_cast<uintptr_t>(stack) <= original_stack);
   set_sp(entry_stack);
 
   // Call the generated code.
@@ -255,7 +254,7 @@
     CHECK_EQ(saved_registers[i], xreg(register_list.PopLowestIndex().code()));
   }
   for (int i = 0; i < kNumberOfCalleeSavedFPRegisters; i++) {
-    ASSERT(saved_fpregisters[i] ==
+    DCHECK(saved_fpregisters[i] ==
            dreg_bits(fpregister_list.PopLowestIndex().code()));
   }
 
@@ -288,7 +287,7 @@
       set_xreg(code, value | code);
     }
   } else {
-    ASSERT(list->type() == CPURegister::kFPRegister);
+    DCHECK(list->type() == CPURegister::kFPRegister);
     while (!list->IsEmpty()) {
       unsigned code = list->PopLowestIndex().code();
       set_dreg_bits(code, value | code);
@@ -310,7 +309,7 @@
 
 // Extending the stack by 2 * 64 bits is required for stack alignment purposes.
 uintptr_t Simulator::PushAddress(uintptr_t address) {
-  ASSERT(sizeof(uintptr_t) < 2 * kXRegSize);
+  DCHECK(sizeof(uintptr_t) < 2 * kXRegSize);
   intptr_t new_sp = sp() - 2 * kXRegSize;
   uintptr_t* alignment_slot =
     reinterpret_cast<uintptr_t*>(new_sp + kXRegSize);
@@ -326,7 +325,7 @@
   intptr_t current_sp = sp();
   uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp);
   uintptr_t address = *stack_slot;
-  ASSERT(sizeof(uintptr_t) < 2 * kXRegSize);
+  DCHECK(sizeof(uintptr_t) < 2 * kXRegSize);
   set_sp(current_sp + 2 * kXRegSize);
   return address;
 }
@@ -336,7 +335,7 @@
 uintptr_t Simulator::StackLimit() const {
   // Leave a safety margin of 1024 bytes to prevent overrunning the stack when
   // pushing values.
-  return reinterpret_cast<uintptr_t>(stack_limit_) + 1024;
+  return stack_limit_ + 1024;
 }
 
 
@@ -379,11 +378,11 @@
 
   // Allocate and setup the simulator stack.
   stack_size_ = (FLAG_sim_stack_size * KB) + (2 * stack_protection_size_);
-  stack_ = new byte[stack_size_];
+  stack_ = reinterpret_cast<uintptr_t>(new byte[stack_size_]);
   stack_limit_ = stack_ + stack_protection_size_;
-  byte* tos = stack_ + stack_size_ - stack_protection_size_;
-  // The stack pointer must be 16 bytes aligned.
-  set_sp(reinterpret_cast<int64_t>(tos) & ~0xfUL);
+  uintptr_t tos = stack_ + stack_size_ - stack_protection_size_;
+  // The stack pointer must be 16-byte aligned.
+  set_sp(tos & ~0xfUL);
 
   stream_ = stream;
   print_disasm_ = new PrintDisassembler(stream_);
@@ -419,7 +418,7 @@
 
 
 Simulator::~Simulator() {
-  delete[] stack_;
+  delete[] reinterpret_cast<byte*>(stack_);
   if (FLAG_log_instruction_stats) {
     delete instrument_;
   }
@@ -480,7 +479,7 @@
     Redirection* current = isolate->simulator_redirection();
     for (; current != NULL; current = current->next_) {
       if (current->external_function_ == external_function) {
-        ASSERT_EQ(current->type(), type);
+        DCHECK_EQ(current->type(), type);
         return current;
       }
     }
@@ -703,7 +702,7 @@
 
     case ExternalReference::PROFILING_GETTER_CALL: {
       // void f(Local<String> property, PropertyCallbackInfo& info,
-      //        AccessorGetterCallback callback)
+      //        AccessorNameGetterCallback callback)
       TraceSim("Type: PROFILING_GETTER_CALL\n");
       SimulatorRuntimeProfilingGetterCall target =
         reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(
@@ -733,15 +732,15 @@
 
 
 const char* Simulator::xreg_names[] = {
-"x0",  "x1",  "x2",  "x3",  "x4",  "x5",  "x6",  "x7",
-"x8",  "x9",  "x10", "x11", "x12", "x13", "x14", "x15",
-"ip0", "ip1", "x18", "x19", "x20", "x21", "x22", "x23",
-"x24", "x25", "x26", "cp", "jssp", "fp", "lr",  "xzr", "csp"};
+"x0",  "x1",  "x2",  "x3",  "x4",   "x5",  "x6",  "x7",
+"x8",  "x9",  "x10", "x11", "x12",  "x13", "x14", "x15",
+"ip0", "ip1", "x18", "x19", "x20",  "x21", "x22", "x23",
+"x24", "x25", "x26", "cp",  "jssp", "fp",  "lr",  "xzr", "csp"};
 
 const char* Simulator::wreg_names[] = {
-"w0",  "w1",  "w2",  "w3",  "w4",  "w5",  "w6",  "w7",
-"w8",  "w9",  "w10", "w11", "w12", "w13", "w14", "w15",
-"w16", "w17", "w18", "w19", "w20", "w21", "w22", "w23",
+"w0",  "w1",  "w2",  "w3",  "w4",    "w5",  "w6",  "w7",
+"w8",  "w9",  "w10", "w11", "w12",   "w13", "w14", "w15",
+"w16", "w17", "w18", "w19", "w20",   "w21", "w22", "w23",
 "w24", "w25", "w26", "wcp", "wjssp", "wfp", "wlr", "wzr", "wcsp"};
 
 const char* Simulator::sreg_names[] = {
@@ -764,7 +763,12 @@
 
 
 const char* Simulator::WRegNameForCode(unsigned code, Reg31Mode mode) {
-  ASSERT(code < kNumberOfRegisters);
+  STATIC_ASSERT(arraysize(Simulator::wreg_names) == (kNumberOfRegisters + 1));
+  DCHECK(code < kNumberOfRegisters);
+  // The modulo operator has no effect here, but it silences a broken GCC
+  // warning about out-of-bounds array accesses.
+  code %= kNumberOfRegisters;
+
   // If the code represents the stack pointer, index the name after zr.
   if ((code == kZeroRegCode) && (mode == Reg31IsStackPointer)) {
     code = kZeroRegCode + 1;
@@ -774,7 +778,10 @@
 
 
 const char* Simulator::XRegNameForCode(unsigned code, Reg31Mode mode) {
-  ASSERT(code < kNumberOfRegisters);
+  STATIC_ASSERT(arraysize(Simulator::xreg_names) == (kNumberOfRegisters + 1));
+  DCHECK(code < kNumberOfRegisters);
+  code %= kNumberOfRegisters;
+
   // If the code represents the stack pointer, index the name after zr.
   if ((code == kZeroRegCode) && (mode == Reg31IsStackPointer)) {
     code = kZeroRegCode + 1;
@@ -784,20 +791,23 @@
 
 
 const char* Simulator::SRegNameForCode(unsigned code) {
-  ASSERT(code < kNumberOfFPRegisters);
-  return sreg_names[code];
+  STATIC_ASSERT(arraysize(Simulator::sreg_names) == kNumberOfFPRegisters);
+  DCHECK(code < kNumberOfFPRegisters);
+  return sreg_names[code % kNumberOfFPRegisters];
 }
 
 
 const char* Simulator::DRegNameForCode(unsigned code) {
-  ASSERT(code < kNumberOfFPRegisters);
-  return dreg_names[code];
+  STATIC_ASSERT(arraysize(Simulator::dreg_names) == kNumberOfFPRegisters);
+  DCHECK(code < kNumberOfFPRegisters);
+  return dreg_names[code % kNumberOfFPRegisters];
 }
 
 
 const char* Simulator::VRegNameForCode(unsigned code) {
-  ASSERT(code < kNumberOfFPRegisters);
-  return vreg_names[code];
+  STATIC_ASSERT(arraysize(Simulator::vreg_names) == kNumberOfFPRegisters);
+  DCHECK(code < kNumberOfFPRegisters);
+  return vreg_names[code % kNumberOfFPRegisters];
 }
 
 
@@ -829,7 +839,7 @@
                           T src2,
                           T carry_in) {
   typedef typename make_unsigned<T>::type unsignedT;
-  ASSERT((carry_in == 0) || (carry_in == 1));
+  DCHECK((carry_in == 0) || (carry_in == 1));
 
   T signed_sum = src1 + src2 + carry_in;
   T result = signed_sum;
@@ -854,6 +864,7 @@
     nzcv().SetZ(Z);
     nzcv().SetC(C);
     nzcv().SetV(V);
+    LogSystemRegister(NZCV);
   }
   return result;
 }
@@ -977,6 +988,7 @@
   } else {
     UNREACHABLE();
   }
+  LogSystemRegister(NZCV);
 }
 
 
@@ -1043,118 +1055,206 @@
 }
 
 
-void Simulator::PrintSystemRegisters(bool print_all) {
-  static bool first_run = true;
-
-  static SimSystemRegister last_nzcv;
-  if (print_all || first_run || (last_nzcv.RawValue() != nzcv().RawValue())) {
-    fprintf(stream_, "# %sFLAGS: %sN:%d Z:%d C:%d V:%d%s\n",
-            clr_flag_name,
-            clr_flag_value,
-            nzcv().N(), nzcv().Z(), nzcv().C(), nzcv().V(),
-            clr_normal);
-  }
-  last_nzcv = nzcv();
-
-  static SimSystemRegister last_fpcr;
-  if (print_all || first_run || (last_fpcr.RawValue() != fpcr().RawValue())) {
-    static const char * rmode[] = {
-      "0b00 (Round to Nearest)",
-      "0b01 (Round towards Plus Infinity)",
-      "0b10 (Round towards Minus Infinity)",
-      "0b11 (Round towards Zero)"
-    };
-    ASSERT(fpcr().RMode() <= (sizeof(rmode) / sizeof(rmode[0])));
-    fprintf(stream_, "# %sFPCR: %sAHP:%d DN:%d FZ:%d RMode:%s%s\n",
-            clr_flag_name,
-            clr_flag_value,
-            fpcr().AHP(), fpcr().DN(), fpcr().FZ(), rmode[fpcr().RMode()],
-            clr_normal);
-  }
-  last_fpcr = fpcr();
-
-  first_run = false;
+void Simulator::PrintSystemRegisters() {
+  PrintSystemRegister(NZCV);
+  PrintSystemRegister(FPCR);
 }
 
 
-void Simulator::PrintRegisters(bool print_all_regs) {
-  static bool first_run = true;
-  static int64_t last_regs[kNumberOfRegisters];
-
+void Simulator::PrintRegisters() {
   for (unsigned i = 0; i < kNumberOfRegisters; i++) {
-    if (print_all_regs || first_run ||
-        (last_regs[i] != xreg(i, Reg31IsStackPointer))) {
-      fprintf(stream_,
-              "# %s%4s:%s 0x%016" PRIx64 "%s\n",
-              clr_reg_name,
-              XRegNameForCode(i, Reg31IsStackPointer),
-              clr_reg_value,
-              xreg(i, Reg31IsStackPointer),
-              clr_normal);
-    }
-    // Cache the new register value so the next run can detect any changes.
-    last_regs[i] = xreg(i, Reg31IsStackPointer);
+    PrintRegister(i);
   }
-  first_run = false;
 }
 
 
-void Simulator::PrintFPRegisters(bool print_all_regs) {
-  static bool first_run = true;
-  static uint64_t last_regs[kNumberOfFPRegisters];
-
-  // Print as many rows of registers as necessary, keeping each individual
-  // register in the same column each time (to make it easy to visually scan
-  // for changes).
+void Simulator::PrintFPRegisters() {
   for (unsigned i = 0; i < kNumberOfFPRegisters; i++) {
-    if (print_all_regs || first_run || (last_regs[i] != dreg_bits(i))) {
-      fprintf(stream_,
-              "# %s %4s:%s 0x%016" PRIx64 "%s (%s%s:%s %g%s %s:%s %g%s)\n",
-              clr_fpreg_name,
-              VRegNameForCode(i),
-              clr_fpreg_value,
-              dreg_bits(i),
-              clr_normal,
-              clr_fpreg_name,
-              DRegNameForCode(i),
-              clr_fpreg_value,
-              dreg(i),
-              clr_fpreg_name,
-              SRegNameForCode(i),
-              clr_fpreg_value,
-              sreg(i),
-              clr_normal);
-    }
-    // Cache the new register value so the next run can detect any changes.
-    last_regs[i] = dreg_bits(i);
+    PrintFPRegister(i);
   }
-  first_run = false;
 }
 
 
-void Simulator::PrintProcessorState() {
-  PrintSystemRegisters();
-  PrintRegisters();
-  PrintFPRegisters();
+void Simulator::PrintRegister(unsigned code, Reg31Mode r31mode) {
+  // Don't print writes into xzr.
+  if ((code == kZeroRegCode) && (r31mode == Reg31IsZeroRegister)) {
+    return;
+  }
+
+  // The template is "# x<code>:value".
+  fprintf(stream_, "# %s%5s: %s0x%016" PRIx64 "%s\n",
+          clr_reg_name, XRegNameForCode(code, r31mode),
+          clr_reg_value, reg<uint64_t>(code, r31mode), clr_normal);
 }
 
 
-void Simulator::PrintWrite(uint8_t* address,
-                           uint64_t value,
-                           unsigned num_bytes) {
-  // The template is "# value -> address". The template is not directly used
-  // in the printf since compilers tend to struggle with the parametrized
-  // width (%0*).
-  const char* format = "# %s0x%0*" PRIx64 "%s -> %s0x%016" PRIx64 "%s\n";
-  fprintf(stream_,
-          format,
-          clr_memory_value,
-          num_bytes * 2,  // The width in hexa characters.
-          value,
-          clr_normal,
-          clr_memory_address,
-          address,
-          clr_normal);
+void Simulator::PrintFPRegister(unsigned code, PrintFPRegisterSizes sizes) {
+  // The template is "# v<code>:bits (d<code>:value, ...)".
+
+  DCHECK(sizes != 0);
+  DCHECK((sizes & kPrintAllFPRegValues) == sizes);
+
+  // Print the raw bits.
+  fprintf(stream_, "# %s%5s: %s0x%016" PRIx64 "%s (",
+          clr_fpreg_name, VRegNameForCode(code),
+          clr_fpreg_value, fpreg<uint64_t>(code), clr_normal);
+
+  // Print all requested value interpretations.
+  bool need_separator = false;
+  if (sizes & kPrintDRegValue) {
+    fprintf(stream_, "%s%s%s: %s%g%s",
+            need_separator ? ", " : "",
+            clr_fpreg_name, DRegNameForCode(code),
+            clr_fpreg_value, fpreg<double>(code), clr_normal);
+    need_separator = true;
+  }
+
+  if (sizes & kPrintSRegValue) {
+    fprintf(stream_, "%s%s%s: %s%g%s",
+            need_separator ? ", " : "",
+            clr_fpreg_name, SRegNameForCode(code),
+            clr_fpreg_value, fpreg<float>(code), clr_normal);
+    need_separator = true;
+  }
+
+  // End the value list.
+  fprintf(stream_, ")\n");
+}
+
+
+void Simulator::PrintSystemRegister(SystemRegister id) {
+  switch (id) {
+    case NZCV:
+      fprintf(stream_, "# %sNZCV: %sN:%d Z:%d C:%d V:%d%s\n",
+              clr_flag_name, clr_flag_value,
+              nzcv().N(), nzcv().Z(), nzcv().C(), nzcv().V(),
+              clr_normal);
+      break;
+    case FPCR: {
+      static const char * rmode[] = {
+        "0b00 (Round to Nearest)",
+        "0b01 (Round towards Plus Infinity)",
+        "0b10 (Round towards Minus Infinity)",
+        "0b11 (Round towards Zero)"
+      };
+      DCHECK(fpcr().RMode() < arraysize(rmode));
+      fprintf(stream_,
+              "# %sFPCR: %sAHP:%d DN:%d FZ:%d RMode:%s%s\n",
+              clr_flag_name, clr_flag_value,
+              fpcr().AHP(), fpcr().DN(), fpcr().FZ(), rmode[fpcr().RMode()],
+              clr_normal);
+      break;
+    }
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void Simulator::PrintRead(uintptr_t address,
+                          size_t size,
+                          unsigned reg_code) {
+  USE(size);  // Size is unused here.
+
+  // The template is "# x<code>:value <- address".
+  fprintf(stream_, "# %s%5s: %s0x%016" PRIx64 "%s",
+          clr_reg_name, XRegNameForCode(reg_code),
+          clr_reg_value, reg<uint64_t>(reg_code), clr_normal);
+
+  fprintf(stream_, " <- %s0x%016" PRIxPTR "%s\n",
+          clr_memory_address, address, clr_normal);
+}
+
+
+void Simulator::PrintReadFP(uintptr_t address,
+                            size_t size,
+                            unsigned reg_code) {
+  // The template is "# reg:bits (reg:value) <- address".
+  switch (size) {
+    case kSRegSize:
+      fprintf(stream_, "# %s%5s: %s0x%016" PRIx64 "%s (%s%s: %s%gf%s)",
+              clr_fpreg_name, VRegNameForCode(reg_code),
+              clr_fpreg_value, fpreg<uint64_t>(reg_code), clr_normal,
+              clr_fpreg_name, SRegNameForCode(reg_code),
+              clr_fpreg_value, fpreg<float>(reg_code), clr_normal);
+      break;
+    case kDRegSize:
+      fprintf(stream_, "# %s%5s: %s0x%016" PRIx64 "%s (%s%s: %s%g%s)",
+              clr_fpreg_name, VRegNameForCode(reg_code),
+              clr_fpreg_value, fpreg<uint64_t>(reg_code), clr_normal,
+              clr_fpreg_name, DRegNameForCode(reg_code),
+              clr_fpreg_value, fpreg<double>(reg_code), clr_normal);
+      break;
+    default:
+      UNREACHABLE();
+  }
+
+  fprintf(stream_, " <- %s0x%016" PRIxPTR "%s\n",
+          clr_memory_address, address, clr_normal);
+}
+
+
+void Simulator::PrintWrite(uintptr_t address,
+                           size_t size,
+                           unsigned reg_code) {
+  // The template is "# reg:value -> address". To keep the trace tidy and
+  // readable, the value is aligned with the values in the register trace.
+  switch (size) {
+    case kByteSizeInBytes:
+      fprintf(stream_, "# %s%5s<7:0>:          %s0x%02" PRIx8 "%s",
+              clr_reg_name, WRegNameForCode(reg_code),
+              clr_reg_value, reg<uint8_t>(reg_code), clr_normal);
+      break;
+    case kHalfWordSizeInBytes:
+      fprintf(stream_, "# %s%5s<15:0>:       %s0x%04" PRIx16 "%s",
+              clr_reg_name, WRegNameForCode(reg_code),
+              clr_reg_value, reg<uint16_t>(reg_code), clr_normal);
+      break;
+    case kWRegSize:
+      fprintf(stream_, "# %s%5s:         %s0x%08" PRIx32 "%s",
+              clr_reg_name, WRegNameForCode(reg_code),
+              clr_reg_value, reg<uint32_t>(reg_code), clr_normal);
+      break;
+    case kXRegSize:
+      fprintf(stream_, "# %s%5s: %s0x%016" PRIx64 "%s",
+              clr_reg_name, XRegNameForCode(reg_code),
+              clr_reg_value, reg<uint64_t>(reg_code), clr_normal);
+      break;
+    default:
+      UNREACHABLE();
+  }
+
+  fprintf(stream_, " -> %s0x%016" PRIxPTR "%s\n",
+          clr_memory_address, address, clr_normal);
+}
+
+
+void Simulator::PrintWriteFP(uintptr_t address,
+                             size_t size,
+                             unsigned reg_code) {
+  // The template is "# reg:bits (reg:value) -> address". To keep the trace tidy
+  // and readable, the value is aligned with the values in the register trace.
+  switch (size) {
+    case kSRegSize:
+      fprintf(stream_, "# %s%5s<31:0>:   %s0x%08" PRIx32 "%s (%s%s: %s%gf%s)",
+              clr_fpreg_name, VRegNameForCode(reg_code),
+              clr_fpreg_value, fpreg<uint32_t>(reg_code), clr_normal,
+              clr_fpreg_name, SRegNameForCode(reg_code),
+              clr_fpreg_value, fpreg<float>(reg_code), clr_normal);
+      break;
+    case kDRegSize:
+      fprintf(stream_, "# %s%5s: %s0x%016" PRIx64 "%s (%s%s: %s%g%s)",
+              clr_fpreg_name, VRegNameForCode(reg_code),
+              clr_fpreg_value, fpreg<uint64_t>(reg_code), clr_normal,
+              clr_fpreg_name, DRegNameForCode(reg_code),
+              clr_fpreg_value, fpreg<double>(reg_code), clr_normal);
+      break;
+    default:
+      UNREACHABLE();
+  }
+
+  fprintf(stream_, " -> %s0x%016" PRIxPTR "%s\n",
+          clr_memory_address, address, clr_normal);
 }
 
 
@@ -1204,7 +1304,7 @@
 
 
 void Simulator::VisitConditionalBranch(Instruction* instr) {
-  ASSERT(instr->Mask(ConditionalBranchMask) == B_cond);
+  DCHECK(instr->Mask(ConditionalBranchMask) == B_cond);
   if (ConditionPassed(static_cast<Condition>(instr->ConditionBranch()))) {
     set_pc(instr->ImmPCOffsetTarget());
   }
@@ -1383,6 +1483,7 @@
     nzcv().SetZ(CalcZFlag(result));
     nzcv().SetC(0);
     nzcv().SetV(0);
+    LogSystemRegister(NZCV);
   }
 
   set_reg<T>(instr->Rd(), result, instr->RdMode());
@@ -1417,12 +1518,13 @@
     if (instr->Mask(ConditionalCompareMask) == CCMP) {
       AddWithCarry<T>(true, op1, ~op2, 1);
     } else {
-      ASSERT(instr->Mask(ConditionalCompareMask) == CCMN);
+      DCHECK(instr->Mask(ConditionalCompareMask) == CCMN);
       AddWithCarry<T>(true, op1, op2, 0);
     }
   } else {
     // If the condition fails, set the status flags to the nzcv immediate.
     nzcv().SetFlags(instr->Nzcv());
+    LogSystemRegister(NZCV);
   }
 }
 
@@ -1450,7 +1552,7 @@
 
 void Simulator::VisitLoadStoreRegisterOffset(Instruction* instr) {
   Extend ext = static_cast<Extend>(instr->ExtendMode());
-  ASSERT((ext == UXTW) || (ext == UXTX) || (ext == SXTW) || (ext == SXTX));
+  DCHECK((ext == UXTW) || (ext == UXTX) || (ext == SXTW) || (ext == SXTX));
   unsigned shift_amount = instr->ImmShiftLS() * instr->SizeLS();
 
   int64_t offset = ExtendValue(xreg(instr->Rm()), ext, shift_amount);
@@ -1463,9 +1565,8 @@
                                 AddrMode addrmode) {
   unsigned srcdst = instr->Rt();
   unsigned addr_reg = instr->Rn();
-  uint8_t* address = LoadStoreAddress(addr_reg, offset, addrmode);
-  int num_bytes = 1 << instr->SizeLS();
-  uint8_t* stack = NULL;
+  uintptr_t address = LoadStoreAddress(addr_reg, offset, addrmode);
+  uintptr_t stack = 0;
 
   // Handle the writeback for stores before the store. On a CPU the writeback
   // and the store are atomic, but when running on the simulator it is possible
@@ -1479,46 +1580,52 @@
 
     // For store the address post writeback is used to check access below the
     // stack.
-    stack = reinterpret_cast<uint8_t*>(sp());
+    stack = sp();
   }
 
   LoadStoreOp op = static_cast<LoadStoreOp>(instr->Mask(LoadStoreOpMask));
   switch (op) {
-    case LDRB_w:
-    case LDRH_w:
-    case LDR_w:
-    case LDR_x: set_xreg(srcdst, MemoryRead(address, num_bytes)); break;
-    case STRB_w:
-    case STRH_w:
-    case STR_w:
-    case STR_x: MemoryWrite(address, xreg(srcdst), num_bytes); break;
-    case LDRSB_w: {
-      set_wreg(srcdst, ExtendValue<int32_t>(MemoryRead8(address), SXTB));
-      break;
-    }
-    case LDRSB_x: {
-      set_xreg(srcdst, ExtendValue<int64_t>(MemoryRead8(address), SXTB));
-      break;
-    }
-    case LDRSH_w: {
-      set_wreg(srcdst, ExtendValue<int32_t>(MemoryRead16(address), SXTH));
-      break;
-    }
-    case LDRSH_x: {
-      set_xreg(srcdst, ExtendValue<int64_t>(MemoryRead16(address), SXTH));
-      break;
-    }
-    case LDRSW_x: {
-      set_xreg(srcdst, ExtendValue<int64_t>(MemoryRead32(address), SXTW));
-      break;
-    }
-    case LDR_s: set_sreg(srcdst, MemoryReadFP32(address)); break;
-    case LDR_d: set_dreg(srcdst, MemoryReadFP64(address)); break;
-    case STR_s: MemoryWriteFP32(address, sreg(srcdst)); break;
-    case STR_d: MemoryWriteFP64(address, dreg(srcdst)); break;
+    // Use _no_log variants to suppress the register trace (LOG_REGS,
+    // LOG_FP_REGS). We will print a more detailed log.
+    case LDRB_w:  set_wreg_no_log(srcdst, MemoryRead<uint8_t>(address)); break;
+    case LDRH_w:  set_wreg_no_log(srcdst, MemoryRead<uint16_t>(address)); break;
+    case LDR_w:   set_wreg_no_log(srcdst, MemoryRead<uint32_t>(address)); break;
+    case LDR_x:   set_xreg_no_log(srcdst, MemoryRead<uint64_t>(address)); break;
+    case LDRSB_w: set_wreg_no_log(srcdst, MemoryRead<int8_t>(address)); break;
+    case LDRSH_w: set_wreg_no_log(srcdst, MemoryRead<int16_t>(address)); break;
+    case LDRSB_x: set_xreg_no_log(srcdst, MemoryRead<int8_t>(address)); break;
+    case LDRSH_x: set_xreg_no_log(srcdst, MemoryRead<int16_t>(address)); break;
+    case LDRSW_x: set_xreg_no_log(srcdst, MemoryRead<int32_t>(address)); break;
+    case LDR_s:   set_sreg_no_log(srcdst, MemoryRead<float>(address)); break;
+    case LDR_d:   set_dreg_no_log(srcdst, MemoryRead<double>(address)); break;
+
+    case STRB_w:  MemoryWrite<uint8_t>(address, wreg(srcdst)); break;
+    case STRH_w:  MemoryWrite<uint16_t>(address, wreg(srcdst)); break;
+    case STR_w:   MemoryWrite<uint32_t>(address, wreg(srcdst)); break;
+    case STR_x:   MemoryWrite<uint64_t>(address, xreg(srcdst)); break;
+    case STR_s:   MemoryWrite<float>(address, sreg(srcdst)); break;
+    case STR_d:   MemoryWrite<double>(address, dreg(srcdst)); break;
+
     default: UNIMPLEMENTED();
   }
 
+  // Print a detailed trace (including the memory address) instead of the basic
+  // register:value trace generated by set_*reg().
+  size_t access_size = 1 << instr->SizeLS();
+  if (instr->IsLoad()) {
+    if ((op == LDR_s) || (op == LDR_d)) {
+      LogReadFP(address, access_size, srcdst);
+    } else {
+      LogRead(address, access_size, srcdst);
+    }
+  } else {
+    if ((op == STR_s) || (op == STR_d)) {
+      LogWriteFP(address, access_size, srcdst);
+    } else {
+      LogWrite(address, access_size, srcdst);
+    }
+  }
+
   // Handle the writeback for loads after the load to ensure safe pop
   // operation even when interrupted in the middle of it. The stack pointer
   // is only updated after the load so pop(fp) will never break the invariant
@@ -1526,7 +1633,7 @@
   if (instr->IsLoad()) {
     // For loads the address pre writeback is used to check access below the
     // stack.
-    stack = reinterpret_cast<uint8_t*>(sp());
+    stack = sp();
 
     LoadStoreWriteBack(addr_reg, offset, addrmode);
   }
@@ -1562,9 +1669,11 @@
   unsigned rt = instr->Rt();
   unsigned rt2 = instr->Rt2();
   unsigned addr_reg = instr->Rn();
-  int offset = instr->ImmLSPair() << instr->SizeLSPair();
-  uint8_t* address = LoadStoreAddress(addr_reg, offset, addrmode);
-  uint8_t* stack = NULL;
+  size_t access_size = 1 << instr->SizeLSPair();
+  int64_t offset = instr->ImmLSPair() * access_size;
+  uintptr_t address = LoadStoreAddress(addr_reg, offset, addrmode);
+  uintptr_t address2 = address + access_size;
+  uintptr_t stack = 0;
 
   // Handle the writeback for stores before the store. On a CPU the writeback
   // and the store are atomic, but when running on the simulator it is possible
@@ -1578,65 +1687,95 @@
 
     // For store the address post writeback is used to check access below the
     // stack.
-    stack = reinterpret_cast<uint8_t*>(sp());
+    stack = sp();
   }
 
   LoadStorePairOp op =
     static_cast<LoadStorePairOp>(instr->Mask(LoadStorePairMask));
 
   // 'rt' and 'rt2' can only be aliased for stores.
-  ASSERT(((op & LoadStorePairLBit) == 0) || (rt != rt2));
+  DCHECK(((op & LoadStorePairLBit) == 0) || (rt != rt2));
 
   switch (op) {
+    // Use _no_log variants to suppress the register trace (LOG_REGS,
+    // LOG_FP_REGS). We will print a more detailed log.
     case LDP_w: {
-      set_wreg(rt, MemoryRead32(address));
-      set_wreg(rt2, MemoryRead32(address + kWRegSize));
+      DCHECK(access_size == kWRegSize);
+      set_wreg_no_log(rt, MemoryRead<uint32_t>(address));
+      set_wreg_no_log(rt2, MemoryRead<uint32_t>(address2));
       break;
     }
     case LDP_s: {
-      set_sreg(rt, MemoryReadFP32(address));
-      set_sreg(rt2, MemoryReadFP32(address + kSRegSize));
+      DCHECK(access_size == kSRegSize);
+      set_sreg_no_log(rt, MemoryRead<float>(address));
+      set_sreg_no_log(rt2, MemoryRead<float>(address2));
       break;
     }
     case LDP_x: {
-      set_xreg(rt, MemoryRead64(address));
-      set_xreg(rt2, MemoryRead64(address + kXRegSize));
+      DCHECK(access_size == kXRegSize);
+      set_xreg_no_log(rt, MemoryRead<uint64_t>(address));
+      set_xreg_no_log(rt2, MemoryRead<uint64_t>(address2));
       break;
     }
     case LDP_d: {
-      set_dreg(rt, MemoryReadFP64(address));
-      set_dreg(rt2, MemoryReadFP64(address + kDRegSize));
+      DCHECK(access_size == kDRegSize);
+      set_dreg_no_log(rt, MemoryRead<double>(address));
+      set_dreg_no_log(rt2, MemoryRead<double>(address2));
       break;
     }
     case LDPSW_x: {
-      set_xreg(rt, ExtendValue<int64_t>(MemoryRead32(address), SXTW));
-      set_xreg(rt2, ExtendValue<int64_t>(
-               MemoryRead32(address + kWRegSize), SXTW));
+      DCHECK(access_size == kWRegSize);
+      set_xreg_no_log(rt, MemoryRead<int32_t>(address));
+      set_xreg_no_log(rt2, MemoryRead<int32_t>(address2));
       break;
     }
     case STP_w: {
-      MemoryWrite32(address, wreg(rt));
-      MemoryWrite32(address + kWRegSize, wreg(rt2));
+      DCHECK(access_size == kWRegSize);
+      MemoryWrite<uint32_t>(address, wreg(rt));
+      MemoryWrite<uint32_t>(address2, wreg(rt2));
       break;
     }
     case STP_s: {
-      MemoryWriteFP32(address, sreg(rt));
-      MemoryWriteFP32(address + kSRegSize, sreg(rt2));
+      DCHECK(access_size == kSRegSize);
+      MemoryWrite<float>(address, sreg(rt));
+      MemoryWrite<float>(address2, sreg(rt2));
       break;
     }
     case STP_x: {
-      MemoryWrite64(address, xreg(rt));
-      MemoryWrite64(address + kXRegSize, xreg(rt2));
+      DCHECK(access_size == kXRegSize);
+      MemoryWrite<uint64_t>(address, xreg(rt));
+      MemoryWrite<uint64_t>(address2, xreg(rt2));
       break;
     }
     case STP_d: {
-      MemoryWriteFP64(address, dreg(rt));
-      MemoryWriteFP64(address + kDRegSize, dreg(rt2));
+      DCHECK(access_size == kDRegSize);
+      MemoryWrite<double>(address, dreg(rt));
+      MemoryWrite<double>(address2, dreg(rt2));
       break;
     }
     default: UNREACHABLE();
   }
 
+  // Print a detailed trace (including the memory address) instead of the basic
+  // register:value trace generated by set_*reg().
+  if (instr->IsLoad()) {
+    if ((op == LDP_s) || (op == LDP_d)) {
+      LogReadFP(address, access_size, rt);
+      LogReadFP(address2, access_size, rt2);
+    } else {
+      LogRead(address, access_size, rt);
+      LogRead(address2, access_size, rt2);
+    }
+  } else {
+    if ((op == STP_s) || (op == STP_d)) {
+      LogWriteFP(address, access_size, rt);
+      LogWriteFP(address2, access_size, rt2);
+    } else {
+      LogWrite(address, access_size, rt);
+      LogWrite(address2, access_size, rt2);
+    }
+  }
+
   // Handle the writeback for loads after the load to ensure safe pop
   // operation even when interrupted in the middle of it. The stack pointer
   // is only updated after the load so pop(fp) will never break the invariant
@@ -1644,7 +1783,7 @@
   if (instr->IsLoad()) {
     // For loads the address pre writeback is used to check access below the
     // stack.
-    stack = reinterpret_cast<uint8_t*>(sp());
+    stack = sp();
 
     LoadStoreWriteBack(addr_reg, offset, addrmode);
   }
@@ -1656,24 +1795,37 @@
 
 
 void Simulator::VisitLoadLiteral(Instruction* instr) {
-  uint8_t* address = instr->LiteralAddress();
+  uintptr_t address = instr->LiteralAddress();
   unsigned rt = instr->Rt();
 
   switch (instr->Mask(LoadLiteralMask)) {
-    case LDR_w_lit: set_wreg(rt, MemoryRead32(address));  break;
-    case LDR_x_lit: set_xreg(rt, MemoryRead64(address));  break;
-    case LDR_s_lit: set_sreg(rt, MemoryReadFP32(address));  break;
-    case LDR_d_lit: set_dreg(rt, MemoryReadFP64(address));  break;
+    // Use _no_log variants to suppress the register trace (LOG_REGS,
+    // LOG_FP_REGS), then print a more detailed log.
+    case LDR_w_lit:
+      set_wreg_no_log(rt, MemoryRead<uint32_t>(address));
+      LogRead(address, kWRegSize, rt);
+      break;
+    case LDR_x_lit:
+      set_xreg_no_log(rt, MemoryRead<uint64_t>(address));
+      LogRead(address, kXRegSize, rt);
+      break;
+    case LDR_s_lit:
+      set_sreg_no_log(rt, MemoryRead<float>(address));
+      LogReadFP(address, kSRegSize, rt);
+      break;
+    case LDR_d_lit:
+      set_dreg_no_log(rt, MemoryRead<double>(address));
+      LogReadFP(address, kDRegSize, rt);
+      break;
     default: UNREACHABLE();
   }
 }
 
 
-uint8_t* Simulator::LoadStoreAddress(unsigned addr_reg,
-                                     int64_t offset,
-                                     AddrMode addrmode) {
+uintptr_t Simulator::LoadStoreAddress(unsigned addr_reg, int64_t offset,
+                                      AddrMode addrmode) {
   const unsigned kSPRegCode = kSPRegInternalCode & kRegCodeMask;
-  int64_t address = xreg(addr_reg, Reg31IsStackPointer);
+  uint64_t address = xreg(addr_reg, Reg31IsStackPointer);
   if ((addr_reg == kSPRegCode) && ((address % 16) != 0)) {
     // When the base register is SP the stack pointer is required to be
     // quadword aligned prior to the address calculation and write-backs.
@@ -1685,7 +1837,7 @@
     address += offset;
   }
 
-  return reinterpret_cast<uint8_t*>(address);
+  return address;
 }
 
 
@@ -1693,95 +1845,28 @@
                                    int64_t offset,
                                    AddrMode addrmode) {
   if ((addrmode == PreIndex) || (addrmode == PostIndex)) {
-    ASSERT(offset != 0);
+    DCHECK(offset != 0);
     uint64_t address = xreg(addr_reg, Reg31IsStackPointer);
     set_reg(addr_reg, address + offset, Reg31IsStackPointer);
   }
 }
 
 
-void Simulator::CheckMemoryAccess(uint8_t* address, uint8_t* stack) {
+void Simulator::CheckMemoryAccess(uintptr_t address, uintptr_t stack) {
   if ((address >= stack_limit_) && (address < stack)) {
     fprintf(stream_, "ACCESS BELOW STACK POINTER:\n");
-    fprintf(stream_, "  sp is here:          0x%16p\n", stack);
-    fprintf(stream_, "  access was here:     0x%16p\n", address);
-    fprintf(stream_, "  stack limit is here: 0x%16p\n", stack_limit_);
+    fprintf(stream_, "  sp is here:          0x%016" PRIx64 "\n",
+            static_cast<uint64_t>(stack));
+    fprintf(stream_, "  access was here:     0x%016" PRIx64 "\n",
+            static_cast<uint64_t>(address));
+    fprintf(stream_, "  stack limit is here: 0x%016" PRIx64 "\n",
+            static_cast<uint64_t>(stack_limit_));
     fprintf(stream_, "\n");
     FATAL("ACCESS BELOW STACK POINTER");
   }
 }
 
 
-uint64_t Simulator::MemoryRead(uint8_t* address, unsigned num_bytes) {
-  ASSERT(address != NULL);
-  ASSERT((num_bytes > 0) && (num_bytes <= sizeof(uint64_t)));
-  uint64_t read = 0;
-  memcpy(&read, address, num_bytes);
-  return read;
-}
-
-
-uint8_t Simulator::MemoryRead8(uint8_t* address) {
-  return MemoryRead(address, sizeof(uint8_t));
-}
-
-
-uint16_t Simulator::MemoryRead16(uint8_t* address) {
-  return MemoryRead(address, sizeof(uint16_t));
-}
-
-
-uint32_t Simulator::MemoryRead32(uint8_t* address) {
-  return MemoryRead(address, sizeof(uint32_t));
-}
-
-
-float Simulator::MemoryReadFP32(uint8_t* address) {
-  return rawbits_to_float(MemoryRead32(address));
-}
-
-
-uint64_t Simulator::MemoryRead64(uint8_t* address) {
-  return MemoryRead(address, sizeof(uint64_t));
-}
-
-
-double Simulator::MemoryReadFP64(uint8_t* address) {
-  return rawbits_to_double(MemoryRead64(address));
-}
-
-
-void Simulator::MemoryWrite(uint8_t* address,
-                            uint64_t value,
-                            unsigned num_bytes) {
-  ASSERT(address != NULL);
-  ASSERT((num_bytes > 0) && (num_bytes <= sizeof(uint64_t)));
-
-  LogWrite(address, value, num_bytes);
-  memcpy(address, &value, num_bytes);
-}
-
-
-void Simulator::MemoryWrite32(uint8_t* address, uint32_t value) {
-  MemoryWrite(address, value, sizeof(uint32_t));
-}
-
-
-void Simulator::MemoryWriteFP32(uint8_t* address, float value) {
-  MemoryWrite32(address, float_to_rawbits(value));
-}
-
-
-void Simulator::MemoryWrite64(uint8_t* address, uint64_t value) {
-  MemoryWrite(address, value, sizeof(uint64_t));
-}
-
-
-void Simulator::MemoryWriteFP64(uint8_t* address, double value) {
-  MemoryWrite64(address, double_to_rawbits(value));
-}
-
-
 void Simulator::VisitMoveWideImmediate(Instruction* instr) {
   MoveWideImmediateOp mov_op =
     static_cast<MoveWideImmediateOp>(instr->Mask(MoveWideImmediateMask));
@@ -1789,7 +1874,7 @@
 
   bool is_64_bits = instr->SixtyFourBits() == 1;
   // Shift is limited for W operations.
-  ASSERT(is_64_bits || (instr->ShiftMoveWide() < 2));
+  DCHECK(is_64_bits || (instr->ShiftMoveWide() < 2));
 
   // Get the shifted immediate.
   int64_t shift = instr->ShiftMoveWide() * 16;
@@ -1879,7 +1964,7 @@
 
 
 uint64_t Simulator::ReverseBits(uint64_t value, unsigned num_bits) {
-  ASSERT((num_bits == kWRegSizeInBits) || (num_bits == kXRegSizeInBits));
+  DCHECK((num_bits == kWRegSizeInBits) || (num_bits == kXRegSizeInBits));
   uint64_t result = 0;
   for (unsigned i = 0; i < num_bits; i++) {
     result = (result << 1) | (value & 1);
@@ -1903,7 +1988,7 @@
   //  permute_table[Reverse16] is used by REV16_x, REV16_w
   //  permute_table[Reverse32] is used by REV32_x, REV_w
   //  permute_table[Reverse64] is used by REV_x
-  ASSERT((Reverse16 == 0) && (Reverse32 == 1) && (Reverse64 == 2));
+  DCHECK((Reverse16 == 0) && (Reverse32 == 1) && (Reverse64 == 2));
   static const uint8_t permute_table[3][8] = { {6, 7, 4, 5, 2, 3, 0, 1},
                                                {4, 5, 6, 7, 0, 1, 2, 3},
                                                {0, 1, 2, 3, 4, 5, 6, 7} };
@@ -2026,7 +2111,7 @@
     case UMADDL_x: result = xreg(instr->Ra()) + (rn_u32 * rm_u32); break;
     case UMSUBL_x: result = xreg(instr->Ra()) - (rn_u32 * rm_u32); break;
     case SMULH_x:
-      ASSERT(instr->Ra() == kZeroRegCode);
+      DCHECK(instr->Ra() == kZeroRegCode);
       result = MultiplyHighSigned(xreg(instr->Rn()), xreg(instr->Rm()));
       break;
     default: UNIMPLEMENTED();
@@ -2330,6 +2415,7 @@
       } else {
         // If the condition fails, set the status flags to the nzcv immediate.
         nzcv().SetFlags(instr->Nzcv());
+        LogSystemRegister(NZCV);
       }
       break;
     }
@@ -2406,10 +2492,10 @@
 template <class T, int ebits, int mbits>
 static T FPRound(int64_t sign, int64_t exponent, uint64_t mantissa,
                  FPRounding round_mode) {
-  ASSERT((sign == 0) || (sign == 1));
+  DCHECK((sign == 0) || (sign == 1));
 
   // Only the FPTieEven rounding mode is implemented.
-  ASSERT(round_mode == FPTieEven);
+  DCHECK(round_mode == FPTieEven);
   USE(round_mode);
 
   // Rounding can promote subnormals to normals, and normals to infinities. For
@@ -2726,7 +2812,7 @@
 
 float Simulator::FPToFloat(double value, FPRounding round_mode) {
   // Only the FPTieEven rounding mode is implemented.
-  ASSERT(round_mode == FPTieEven);
+  DCHECK(round_mode == FPTieEven);
   USE(round_mode);
 
   switch (std::fpclassify(value)) {
@@ -2855,7 +2941,7 @@
 template <typename T>
 T Simulator::FPAdd(T op1, T op2) {
   // NaNs should be handled elsewhere.
-  ASSERT(!std::isnan(op1) && !std::isnan(op2));
+  DCHECK(!std::isnan(op1) && !std::isnan(op2));
 
   if (std::isinf(op1) && std::isinf(op2) && (op1 != op2)) {
     // inf + -inf returns the default NaN.
@@ -2870,7 +2956,7 @@
 template <typename T>
 T Simulator::FPDiv(T op1, T op2) {
   // NaNs should be handled elsewhere.
-  ASSERT(!std::isnan(op1) && !std::isnan(op2));
+  DCHECK(!std::isnan(op1) && !std::isnan(op2));
 
   if ((std::isinf(op1) && std::isinf(op2)) || ((op1 == 0.0) && (op2 == 0.0))) {
     // inf / inf and 0.0 / 0.0 return the default NaN.
@@ -2885,7 +2971,7 @@
 template <typename T>
 T Simulator::FPMax(T a, T b) {
   // NaNs should be handled elsewhere.
-  ASSERT(!std::isnan(a) && !std::isnan(b));
+  DCHECK(!std::isnan(a) && !std::isnan(b));
 
   if ((a == 0.0) && (b == 0.0) &&
       (copysign(1.0, a) != copysign(1.0, b))) {
@@ -2912,7 +2998,7 @@
 template <typename T>
 T Simulator::FPMin(T a, T b) {
   // NaNs should be handled elsewhere.
-  ASSERT(!isnan(a) && !isnan(b));
+  DCHECK(!std::isnan(a) && !std::isnan(b));
 
   if ((a == 0.0) && (b == 0.0) &&
       (copysign(1.0, a) != copysign(1.0, b))) {
@@ -2940,7 +3026,7 @@
 template <typename T>
 T Simulator::FPMul(T op1, T op2) {
   // NaNs should be handled elsewhere.
-  ASSERT(!std::isnan(op1) && !std::isnan(op2));
+  DCHECK(!std::isnan(op1) && !std::isnan(op2));
 
   if ((std::isinf(op1) && (op2 == 0.0)) || (std::isinf(op2) && (op1 == 0.0))) {
     // inf * 0.0 returns the default NaN.
@@ -2985,7 +3071,7 @@
   }
 
   result = FusedMultiplyAdd(op1, op2, a);
-  ASSERT(!std::isnan(result));
+  DCHECK(!std::isnan(result));
 
   // Work around broken fma implementations for rounded zero results: If a is
   // 0.0, the sign of the result is the sign of op1 * op2 before rounding.
@@ -3012,7 +3098,7 @@
 template <typename T>
 T Simulator::FPSub(T op1, T op2) {
   // NaNs should be handled elsewhere.
-  ASSERT(!std::isnan(op1) && !std::isnan(op2));
+  DCHECK(!std::isnan(op1) && !std::isnan(op2));
 
   if (std::isinf(op1) && std::isinf(op2) && (op1 == op2)) {
     // inf - inf returns the default NaN.
@@ -3026,7 +3112,7 @@
 
 template <typename T>
 T Simulator::FPProcessNaN(T op) {
-  ASSERT(std::isnan(op));
+  DCHECK(std::isnan(op));
   return fpcr().DN() ? FPDefaultNaN<T>() : ToQuietNaN(op);
 }
 
@@ -3038,10 +3124,10 @@
   } else if (IsSignallingNaN(op2)) {
     return FPProcessNaN(op2);
   } else if (std::isnan(op1)) {
-    ASSERT(IsQuietNaN(op1));
+    DCHECK(IsQuietNaN(op1));
     return FPProcessNaN(op1);
   } else if (std::isnan(op2)) {
-    ASSERT(IsQuietNaN(op2));
+    DCHECK(IsQuietNaN(op2));
     return FPProcessNaN(op2);
   } else {
     return 0.0;
@@ -3058,13 +3144,13 @@
   } else if (IsSignallingNaN(op3)) {
     return FPProcessNaN(op3);
   } else if (std::isnan(op1)) {
-    ASSERT(IsQuietNaN(op1));
+    DCHECK(IsQuietNaN(op1));
     return FPProcessNaN(op1);
   } else if (std::isnan(op2)) {
-    ASSERT(IsQuietNaN(op2));
+    DCHECK(IsQuietNaN(op2));
     return FPProcessNaN(op2);
   } else if (std::isnan(op3)) {
-    ASSERT(IsQuietNaN(op3));
+    DCHECK(IsQuietNaN(op3));
     return FPProcessNaN(op3);
   } else {
     return 0.0;
@@ -3112,15 +3198,21 @@
       }
       case MSR: {
         switch (instr->ImmSystemRegister()) {
-          case NZCV: nzcv().SetRawValue(xreg(instr->Rt())); break;
-          case FPCR: fpcr().SetRawValue(xreg(instr->Rt())); break;
+          case NZCV:
+            nzcv().SetRawValue(xreg(instr->Rt()));
+            LogSystemRegister(NZCV);
+            break;
+          case FPCR:
+            fpcr().SetRawValue(xreg(instr->Rt()));
+            LogSystemRegister(FPCR);
+            break;
           default: UNIMPLEMENTED();
         }
         break;
       }
     }
   } else if (instr->Mask(SystemHintFMask) == SystemHintFixed) {
-    ASSERT(instr->Mask(SystemHintMask) == HINT);
+    DCHECK(instr->Mask(SystemHintMask) == HINT);
     switch (instr->ImmHint()) {
       case NOP: break;
       default: UNIMPLEMENTED();
@@ -3163,12 +3255,12 @@
 
 bool Simulator::PrintValue(const char* desc) {
   if (strcmp(desc, "csp") == 0) {
-    ASSERT(CodeFromName(desc) == static_cast<int>(kSPRegInternalCode));
+    DCHECK(CodeFromName(desc) == static_cast<int>(kSPRegInternalCode));
     PrintF(stream_, "%s csp:%s 0x%016" PRIx64 "%s\n",
         clr_reg_name, clr_reg_value, xreg(31, Reg31IsStackPointer), clr_normal);
     return true;
   } else if (strcmp(desc, "wcsp") == 0) {
-    ASSERT(CodeFromName(desc) == static_cast<int>(kSPRegInternalCode));
+    DCHECK(CodeFromName(desc) == static_cast<int>(kSPRegInternalCode));
     PrintF(stream_, "%s wcsp:%s 0x%08" PRIx32 "%s\n",
         clr_reg_name, clr_reg_value, wreg(31, Reg31IsStackPointer), clr_normal);
     return true;
@@ -3324,8 +3416,8 @@
       } else if ((strcmp(cmd, "print") == 0) || (strcmp(cmd, "p") == 0)) {
         if (argc == 2) {
           if (strcmp(arg1, "all") == 0) {
-            PrintRegisters(true);
-            PrintFPRegisters(true);
+            PrintRegisters();
+            PrintFPRegisters();
           } else {
             if (!PrintValue(arg1)) {
               PrintF("%s unrecognized\n", arg1);
@@ -3344,17 +3436,18 @@
                  (strcmp(cmd, "po") == 0)) {
         if (argc == 2) {
           int64_t value;
+          OFStream os(stdout);
           if (GetValue(arg1, &value)) {
             Object* obj = reinterpret_cast<Object*>(value);
-            PrintF("%s: \n", arg1);
+            os << arg1 << ": \n";
 #ifdef DEBUG
-            obj->PrintLn();
+            obj->Print(os);
+            os << "\n";
 #else
-            obj->ShortPrint();
-            PrintF("\n");
+            os << Brief(obj) << "\n";
 #endif
           } else {
-            PrintF("%s unrecognized\n", arg1);
+            os << arg1 << " unrecognized\n";
           }
         } else {
           PrintF("printobject <value>\n"
@@ -3444,7 +3537,7 @@
       // gdb -------------------------------------------------------------------
       } else if (strcmp(cmd, "gdb") == 0) {
         PrintF("Relinquishing control to gdb.\n");
-        OS::DebugBreak();
+        base::OS::DebugBreak();
         PrintF("Regaining control from gdb.\n");
 
       // sysregs ---------------------------------------------------------------
@@ -3528,7 +3621,7 @@
         if (FLAG_trace_sim_messages || FLAG_trace_sim || (parameters & BREAK)) {
           if (message != NULL) {
             PrintF(stream_,
-                   "%sDebugger hit %d: %s%s%s\n",
+                   "# %sDebugger hit %d: %s%s%s\n",
                    clr_debug_number,
                    code,
                    clr_debug_message,
@@ -3536,7 +3629,7 @@
                    clr_normal);
           } else {
             PrintF(stream_,
-                   "%sDebugger hit %d.%s\n",
+                   "# %sDebugger hit %d.%s\n",
                    clr_debug_number,
                    code,
                    clr_normal);
@@ -3559,13 +3652,13 @@
             break;
           default:
             // We don't support a one-shot LOG_DISASM.
-            ASSERT((parameters & LOG_DISASM) == 0);
+            DCHECK((parameters & LOG_DISASM) == 0);
             // Don't print information that is already being traced.
             parameters &= ~log_parameters();
             // Print the requested information.
-            if (parameters & LOG_SYS_REGS) PrintSystemRegisters(true);
-            if (parameters & LOG_REGS) PrintRegisters(true);
-            if (parameters & LOG_FP_REGS) PrintFPRegisters(true);
+            if (parameters & LOG_SYS_REGS) PrintSystemRegisters();
+            if (parameters & LOG_REGS) PrintRegisters();
+            if (parameters & LOG_FP_REGS) PrintFPRegisters();
         }
 
         // The stop parameters are inlined in the code. Skip them:
@@ -3573,8 +3666,8 @@
         size_t size = kDebugMessageOffset + strlen(message) + 1;
         pc_ = pc_->InstructionAtOffset(RoundUp(size, kInstructionSize));
         //  - Verify that the unreachable marker is present.
-        ASSERT(pc_->Mask(ExceptionMask) == HLT);
-        ASSERT(pc_->ImmException() ==  kImmExceptionIsUnreachable);
+        DCHECK(pc_->Mask(ExceptionMask) == HLT);
+        DCHECK(pc_->ImmException() ==  kImmExceptionIsUnreachable);
         //  - Skip past the unreachable marker.
         set_pc(pc_->following());
 
@@ -3592,7 +3685,7 @@
         abort();
 
       } else {
-        OS::DebugBreak();
+        base::OS::DebugBreak();
       }
       break;
     }
@@ -3604,7 +3697,7 @@
 
 
 void Simulator::DoPrintf(Instruction* instr) {
-  ASSERT((instr->Mask(ExceptionMask) == HLT) &&
+  DCHECK((instr->Mask(ExceptionMask) == HLT) &&
               (instr->ImmException() == kImmExceptionIsPrintf));
 
   // Read the arguments encoded inline in the instruction stream.
@@ -3618,8 +3711,8 @@
          instr + kPrintfArgPatternListOffset,
          sizeof(arg_pattern_list));
 
-  ASSERT(arg_count <= kPrintfMaxArgCount);
-  ASSERT((arg_pattern_list >> (kPrintfArgPatternBits * arg_count)) == 0);
+  DCHECK(arg_count <= kPrintfMaxArgCount);
+  DCHECK((arg_pattern_list >> (kPrintfArgPatternBits * arg_count)) == 0);
 
   // We need to call the host printf function with a set of arguments defined by
   // arg_pattern_list. Because we don't know the types and sizes of the
@@ -3631,7 +3724,7 @@
   // Leave enough space for one extra character per expected argument (plus the
   // '\0' termination).
   const char * format_base = reg<const char *>(0);
-  ASSERT(format_base != NULL);
+  DCHECK(format_base != NULL);
   size_t length = strlen(format_base) + 1;
   char * const format = new char[length + arg_count];
 
@@ -3666,7 +3759,7 @@
       }
     }
   }
-  ASSERT(format_scratch <= (format + length + arg_count));
+  DCHECK(format_scratch <= (format + length + arg_count));
   CHECK(placeholder_count == arg_count);
 
   // Finally, call printf with each chunk, passing the appropriate register
diff --git a/src/arm64/simulator-arm64.h b/src/arm64/simulator-arm64.h
index bf74de8..108f6f2 100644
--- a/src/arm64/simulator-arm64.h
+++ b/src/arm64/simulator-arm64.h
@@ -10,14 +10,14 @@
 
 #include "src/v8.h"
 
-#include "src/globals.h"
-#include "src/utils.h"
 #include "src/allocation.h"
-#include "src/assembler.h"
 #include "src/arm64/assembler-arm64.h"
 #include "src/arm64/decoder-arm64.h"
 #include "src/arm64/disasm-arm64.h"
 #include "src/arm64/instrument-arm64.h"
+#include "src/assembler.h"
+#include "src/globals.h"
+#include "src/utils.h"
 
 #define REGISTER_CODE_LIST(R)                                                  \
 R(0)  R(1)  R(2)  R(3)  R(4)  R(5)  R(6)  R(7)                                 \
@@ -211,13 +211,14 @@
    public:
     template<typename T>
     explicit CallArgument(T argument) {
-      ASSERT(sizeof(argument) <= sizeof(bits_));
+      bits_ = 0;
+      DCHECK(sizeof(argument) <= sizeof(bits_));
       memcpy(&bits_, &argument, sizeof(argument));
       type_ = X_ARG;
     }
 
     explicit CallArgument(double argument) {
-      ASSERT(sizeof(argument) == sizeof(bits_));
+      DCHECK(sizeof(argument) == sizeof(bits_));
       memcpy(&bits_, &argument, sizeof(argument));
       type_ = D_ARG;
     }
@@ -228,10 +229,10 @@
       UNIMPLEMENTED();
       // Make the D register a NaN to try to trap errors if the callee expects a
       // double. If it expects a float, the callee should ignore the top word.
-      ASSERT(sizeof(kFP64SignallingNaN) == sizeof(bits_));
+      DCHECK(sizeof(kFP64SignallingNaN) == sizeof(bits_));
       memcpy(&bits_, &kFP64SignallingNaN, sizeof(kFP64SignallingNaN));
       // Write the float payload to the S register.
-      ASSERT(sizeof(argument) <= sizeof(bits_));
+      DCHECK(sizeof(argument) <= sizeof(bits_));
       memcpy(&bits_, &argument, sizeof(argument));
       type_ = D_ARG;
     }
@@ -289,7 +290,7 @@
   // Simulation helpers.
   template <typename T>
   void set_pc(T new_pc) {
-    ASSERT(sizeof(T) == sizeof(pc_));
+    DCHECK(sizeof(T) == sizeof(pc_));
     memcpy(&pc_, &new_pc, sizeof(T));
     pc_modified_ = true;
   }
@@ -308,10 +309,9 @@
   }
 
   void ExecuteInstruction() {
-    ASSERT(IsAligned(reinterpret_cast<uintptr_t>(pc_), kInstructionSize));
+    DCHECK(IsAligned(reinterpret_cast<uintptr_t>(pc_), kInstructionSize));
     CheckBreakNext();
     Decode(pc_);
-    LogProcessorState();
     increment_pc();
     CheckBreakpoints();
   }
@@ -331,7 +331,7 @@
   //
   template<typename T>
   T reg(unsigned code, Reg31Mode r31mode = Reg31IsZeroRegister) const {
-    ASSERT(code < kNumberOfRegisters);
+    DCHECK(code < kNumberOfRegisters);
     if (IsZeroRegister(code, r31mode)) {
       return 0;
     }
@@ -347,16 +347,13 @@
     return reg<int64_t>(code, r31mode);
   }
 
-  // Write 'size' bits of 'value' into an integer register. The value is
-  // zero-extended. This behaviour matches AArch64 register writes.
-
-  // Like set_reg(), but infer the access size from the template type.
+  // Write 'value' into an integer register. The value is zero-extended. This
+  // behaviour matches AArch64 register writes.
   template<typename T>
   void set_reg(unsigned code, T value,
                Reg31Mode r31mode = Reg31IsZeroRegister) {
-    ASSERT(code < kNumberOfRegisters);
-    if (!IsZeroRegister(code, r31mode))
-      registers_[code].Set(value);
+    set_reg_no_log(code, value, r31mode);
+    LogRegister(code, r31mode);
   }
 
   // Common specialized accessors for the set_reg() template.
@@ -370,16 +367,36 @@
     set_reg(code, value, r31mode);
   }
 
+  // As above, but don't automatically log the register update.
+  template <typename T>
+  void set_reg_no_log(unsigned code, T value,
+                      Reg31Mode r31mode = Reg31IsZeroRegister) {
+    DCHECK(code < kNumberOfRegisters);
+    if (!IsZeroRegister(code, r31mode)) {
+      registers_[code].Set(value);
+    }
+  }
+
+  void set_wreg_no_log(unsigned code, int32_t value,
+                       Reg31Mode r31mode = Reg31IsZeroRegister) {
+    set_reg_no_log(code, value, r31mode);
+  }
+
+  void set_xreg_no_log(unsigned code, int64_t value,
+                       Reg31Mode r31mode = Reg31IsZeroRegister) {
+    set_reg_no_log(code, value, r31mode);
+  }
+
   // Commonly-used special cases.
   template<typename T>
   void set_lr(T value) {
-    ASSERT(sizeof(T) == kPointerSize);
+    DCHECK(sizeof(T) == kPointerSize);
     set_reg(kLinkRegCode, value);
   }
 
   template<typename T>
   void set_sp(T value) {
-    ASSERT(sizeof(T) == kPointerSize);
+    DCHECK(sizeof(T) == kPointerSize);
     set_reg(31, value, Reg31IsStackPointer);
   }
 
@@ -394,7 +411,7 @@
 
   template<typename T>
   T fpreg(unsigned code) const {
-    ASSERT(code < kNumberOfRegisters);
+    DCHECK(code < kNumberOfRegisters);
     return fpregisters_[code].Get<T>();
   }
 
@@ -429,9 +446,13 @@
   // This behaviour matches AArch64 register writes.
   template<typename T>
   void set_fpreg(unsigned code, T value) {
-    ASSERT((sizeof(value) == kDRegSize) || (sizeof(value) == kSRegSize));
-    ASSERT(code < kNumberOfFPRegisters);
-    fpregisters_[code].Set(value);
+    set_fpreg_no_log(code, value);
+
+    if (sizeof(value) <= kSRegSize) {
+      LogFPRegister(code, kPrintSRegValue);
+    } else {
+      LogFPRegister(code, kPrintDRegValue);
+    }
   }
 
   // Common specialized accessors for the set_fpreg() template.
@@ -451,6 +472,22 @@
     set_fpreg(code, value);
   }
 
+  // As above, but don't automatically log the register update.
+  template <typename T>
+  void set_fpreg_no_log(unsigned code, T value) {
+    DCHECK((sizeof(value) == kDRegSize) || (sizeof(value) == kSRegSize));
+    DCHECK(code < kNumberOfFPRegisters);
+    fpregisters_[code].Set(value);
+  }
+
+  void set_sreg_no_log(unsigned code, float value) {
+    set_fpreg_no_log(code, value);
+  }
+
+  void set_dreg_no_log(unsigned code, double value) {
+    set_fpreg_no_log(code, value);
+  }
+
   SimSystemRegister& nzcv() { return nzcv_; }
   SimSystemRegister& fpcr() { return fpcr_; }
 
@@ -477,27 +514,68 @@
   // Disassemble instruction at the given address.
   void PrintInstructionsAt(Instruction* pc, uint64_t count);
 
-  void PrintSystemRegisters(bool print_all = false);
-  void PrintRegisters(bool print_all_regs = false);
-  void PrintFPRegisters(bool print_all_regs = false);
-  void PrintProcessorState();
-  void PrintWrite(uint8_t* address, uint64_t value, unsigned num_bytes);
+  // Print all registers of the specified types.
+  void PrintRegisters();
+  void PrintFPRegisters();
+  void PrintSystemRegisters();
+
+  // Like Print* (above), but respect log_parameters().
   void LogSystemRegisters() {
-    if (log_parameters_ & LOG_SYS_REGS) PrintSystemRegisters();
+    if (log_parameters() & LOG_SYS_REGS) PrintSystemRegisters();
   }
   void LogRegisters() {
-    if (log_parameters_ & LOG_REGS) PrintRegisters();
+    if (log_parameters() & LOG_REGS) PrintRegisters();
   }
   void LogFPRegisters() {
-    if (log_parameters_ & LOG_FP_REGS) PrintFPRegisters();
+    if (log_parameters() & LOG_FP_REGS) PrintFPRegisters();
   }
-  void LogProcessorState() {
-    LogSystemRegisters();
-    LogRegisters();
-    LogFPRegisters();
+
+  // Specify relevant register sizes, for PrintFPRegister.
+  //
+  // These values are bit masks; they can be combined in case multiple views of
+  // a machine register are interesting.
+  enum PrintFPRegisterSizes {
+    kPrintDRegValue = 1 << kDRegSize,
+    kPrintSRegValue = 1 << kSRegSize,
+    kPrintAllFPRegValues = kPrintDRegValue | kPrintSRegValue
+  };
+
+  // Print individual register values (after update).
+  void PrintRegister(unsigned code, Reg31Mode r31mode = Reg31IsStackPointer);
+  void PrintFPRegister(unsigned code,
+                       PrintFPRegisterSizes sizes = kPrintAllFPRegValues);
+  void PrintSystemRegister(SystemRegister id);
+
+  // Like Print* (above), but respect log_parameters().
+  void LogRegister(unsigned code, Reg31Mode r31mode = Reg31IsStackPointer) {
+    if (log_parameters() & LOG_REGS) PrintRegister(code, r31mode);
   }
-  void LogWrite(uint8_t* address, uint64_t value, unsigned num_bytes) {
-    if (log_parameters_ & LOG_WRITE) PrintWrite(address, value, num_bytes);
+  void LogFPRegister(unsigned code,
+                     PrintFPRegisterSizes sizes = kPrintAllFPRegValues) {
+    if (log_parameters() & LOG_FP_REGS) PrintFPRegister(code, sizes);
+  }
+  void LogSystemRegister(SystemRegister id) {
+    if (log_parameters() & LOG_SYS_REGS) PrintSystemRegister(id);
+  }
+
+  // Print memory accesses.
+  void PrintRead(uintptr_t address, size_t size, unsigned reg_code);
+  void PrintReadFP(uintptr_t address, size_t size, unsigned reg_code);
+  void PrintWrite(uintptr_t address, size_t size, unsigned reg_code);
+  void PrintWriteFP(uintptr_t address, size_t size, unsigned reg_code);
+
+  // Like Print* (above), but respect log_parameters().
+  void LogRead(uintptr_t address, size_t size, unsigned reg_code) {
+    if (log_parameters() & LOG_REGS) PrintRead(address, size, reg_code);
+  }
+  void LogReadFP(uintptr_t address, size_t size, unsigned reg_code) {
+    if (log_parameters() & LOG_FP_REGS) PrintReadFP(address, size, reg_code);
+  }
+  void LogWrite(uintptr_t address, size_t size, unsigned reg_code) {
+    if (log_parameters() & LOG_WRITE) PrintWrite(address, size, reg_code);
+  }
+  void LogWriteFP(uintptr_t address, size_t size, unsigned reg_code) {
+    if (log_parameters() & LOG_WRITE) PrintWriteFP(address, size, reg_code);
   }
 
   int log_parameters() { return log_parameters_; }
@@ -588,28 +666,30 @@
                        int64_t offset,
                        AddrMode addrmode);
   void LoadStorePairHelper(Instruction* instr, AddrMode addrmode);
-  uint8_t* LoadStoreAddress(unsigned addr_reg,
-                            int64_t offset,
-                            AddrMode addrmode);
+  uintptr_t LoadStoreAddress(unsigned addr_reg, int64_t offset,
+                             AddrMode addrmode);
   void LoadStoreWriteBack(unsigned addr_reg,
                           int64_t offset,
                           AddrMode addrmode);
-  void CheckMemoryAccess(uint8_t* address, uint8_t* stack);
+  void CheckMemoryAccess(uintptr_t address, uintptr_t stack);
 
-  uint64_t MemoryRead(uint8_t* address, unsigned num_bytes);
-  uint8_t MemoryRead8(uint8_t* address);
-  uint16_t MemoryRead16(uint8_t* address);
-  uint32_t MemoryRead32(uint8_t* address);
-  float MemoryReadFP32(uint8_t* address);
-  uint64_t MemoryRead64(uint8_t* address);
-  double MemoryReadFP64(uint8_t* address);
+  // Memory read helpers.
+  template <typename T, typename A>
+  T MemoryRead(A address) {
+    T value;
+    STATIC_ASSERT((sizeof(value) == 1) || (sizeof(value) == 2) ||
+                  (sizeof(value) == 4) || (sizeof(value) == 8));
+    memcpy(&value, reinterpret_cast<const void*>(address), sizeof(value));
+    return value;
+  }
 
-  void MemoryWrite(uint8_t* address, uint64_t value, unsigned num_bytes);
-  void MemoryWrite32(uint8_t* address, uint32_t value);
-  void MemoryWriteFP32(uint8_t* address, float value);
-  void MemoryWrite64(uint8_t* address, uint64_t value);
-  void MemoryWriteFP64(uint8_t* address, double value);
-
+  // Memory write helpers.
+  template <typename T, typename A>
+  void MemoryWrite(A address, T value) {
+    STATIC_ASSERT((sizeof(value) == 1) || (sizeof(value) == 2) ||
+                  (sizeof(value) == 4) || (sizeof(value) == 8));
+    memcpy(reinterpret_cast<void*>(address), &value, sizeof(value));
+  }
 
   template <typename T>
   T ShiftOperand(T value,
@@ -743,8 +823,8 @@
   // functions, or to save and restore it when entering and leaving generated
   // code.
   void AssertSupportedFPCR() {
-    ASSERT(fpcr().FZ() == 0);             // No flush-to-zero support.
-    ASSERT(fpcr().RMode() == FPTieEven);  // Ties-to-even rounding only.
+    DCHECK(fpcr().FZ() == 0);             // No flush-to-zero support.
+    DCHECK(fpcr().RMode() == FPTieEven);  // Ties-to-even rounding only.
 
     // The simulator does not support half-precision operations so fpcr().AHP()
     // is irrelevant, and is not checked here.
@@ -762,10 +842,10 @@
   static const uint32_t kConditionFlagsMask = 0xf0000000;
 
   // Stack
-  byte* stack_;
-  static const intptr_t stack_protection_size_ = KB;
-  intptr_t stack_size_;
-  byte* stack_limit_;
+  uintptr_t stack_;
+  static const size_t stack_protection_size_ = KB;
+  size_t stack_size_;
+  uintptr_t stack_limit_;
 
   Decoder<DispatchingDecoderVisitor>* decoder_;
   Decoder<DispatchingDecoderVisitor>* disassembler_decoder_;
diff --git a/src/arm64/stub-cache-arm64.cc b/src/arm64/stub-cache-arm64.cc
deleted file mode 100644
index b0f58fd..0000000
--- a/src/arm64/stub-cache-arm64.cc
+++ /dev/null
@@ -1,1477 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#if V8_TARGET_ARCH_ARM64
-
-#include "src/ic-inl.h"
-#include "src/codegen.h"
-#include "src/stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-
-#define __ ACCESS_MASM(masm)
-
-
-void StubCompiler::GenerateDictionaryNegativeLookup(MacroAssembler* masm,
-                                                    Label* miss_label,
-                                                    Register receiver,
-                                                    Handle<Name> name,
-                                                    Register scratch0,
-                                                    Register scratch1) {
-  ASSERT(!AreAliased(receiver, scratch0, scratch1));
-  ASSERT(name->IsUniqueName());
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
-  __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
-
-  Label done;
-
-  const int kInterceptorOrAccessCheckNeededMask =
-      (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
-
-  // Bail out if the receiver has a named interceptor or requires access checks.
-  Register map = scratch1;
-  __ Ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ Ldrb(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
-  __ Tst(scratch0, kInterceptorOrAccessCheckNeededMask);
-  __ B(ne, miss_label);
-
-  // Check that receiver is a JSObject.
-  __ Ldrb(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
-  __ Cmp(scratch0, FIRST_SPEC_OBJECT_TYPE);
-  __ B(lt, miss_label);
-
-  // Load properties array.
-  Register properties = scratch0;
-  __ Ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-  // Check that the properties array is a dictionary.
-  __ Ldr(map, FieldMemOperand(properties, HeapObject::kMapOffset));
-  __ JumpIfNotRoot(map, Heap::kHashTableMapRootIndex, miss_label);
-
-  NameDictionaryLookupStub::GenerateNegativeLookup(masm,
-                                                     miss_label,
-                                                     &done,
-                                                     receiver,
-                                                     properties,
-                                                     name,
-                                                     scratch1);
-  __ Bind(&done);
-  __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
-}
-
-
-// Probe primary or secondary table.
-// If the entry is found in the cache, the generated code jump to the first
-// instruction of the stub in the cache.
-// If there is a miss the code fall trough.
-//
-// 'receiver', 'name' and 'offset' registers are preserved on miss.
-static void ProbeTable(Isolate* isolate,
-                       MacroAssembler* masm,
-                       Code::Flags flags,
-                       StubCache::Table table,
-                       Register receiver,
-                       Register name,
-                       Register offset,
-                       Register scratch,
-                       Register scratch2,
-                       Register scratch3) {
-  // Some code below relies on the fact that the Entry struct contains
-  // 3 pointers (name, code, map).
-  STATIC_ASSERT(sizeof(StubCache::Entry) == (3 * kPointerSize));
-
-  ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
-  ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
-  ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
-
-  uintptr_t key_off_addr = reinterpret_cast<uintptr_t>(key_offset.address());
-  uintptr_t value_off_addr =
-      reinterpret_cast<uintptr_t>(value_offset.address());
-  uintptr_t map_off_addr = reinterpret_cast<uintptr_t>(map_offset.address());
-
-  Label miss;
-
-  ASSERT(!AreAliased(name, offset, scratch, scratch2, scratch3));
-
-  // Multiply by 3 because there are 3 fields per entry.
-  __ Add(scratch3, offset, Operand(offset, LSL, 1));
-
-  // Calculate the base address of the entry.
-  __ Mov(scratch, key_offset);
-  __ Add(scratch, scratch, Operand(scratch3, LSL, kPointerSizeLog2));
-
-  // Check that the key in the entry matches the name.
-  __ Ldr(scratch2, MemOperand(scratch));
-  __ Cmp(name, scratch2);
-  __ B(ne, &miss);
-
-  // Check the map matches.
-  __ Ldr(scratch2, MemOperand(scratch, map_off_addr - key_off_addr));
-  __ Ldr(scratch3, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ Cmp(scratch2, scratch3);
-  __ B(ne, &miss);
-
-  // Get the code entry from the cache.
-  __ Ldr(scratch, MemOperand(scratch, value_off_addr - key_off_addr));
-
-  // Check that the flags match what we're looking for.
-  __ Ldr(scratch2.W(), FieldMemOperand(scratch, Code::kFlagsOffset));
-  __ Bic(scratch2.W(), scratch2.W(), Code::kFlagsNotUsedInLookup);
-  __ Cmp(scratch2.W(), flags);
-  __ B(ne, &miss);
-
-#ifdef DEBUG
-  if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
-    __ B(&miss);
-  } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
-    __ B(&miss);
-  }
-#endif
-
-  // Jump to the first instruction in the code stub.
-  __ Add(scratch, scratch, Code::kHeaderSize - kHeapObjectTag);
-  __ Br(scratch);
-
-  // Miss: fall through.
-  __ Bind(&miss);
-}
-
-
-void StubCache::GenerateProbe(MacroAssembler* masm,
-                              Code::Flags flags,
-                              Register receiver,
-                              Register name,
-                              Register scratch,
-                              Register extra,
-                              Register extra2,
-                              Register extra3) {
-  Isolate* isolate = masm->isolate();
-  Label miss;
-
-  // Make sure the flags does not name a specific type.
-  ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
-
-  // Make sure that there are no register conflicts.
-  ASSERT(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
-
-  // Make sure extra and extra2 registers are valid.
-  ASSERT(!extra.is(no_reg));
-  ASSERT(!extra2.is(no_reg));
-  ASSERT(!extra3.is(no_reg));
-
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1,
-                      extra2, extra3);
-
-  // Check that the receiver isn't a smi.
-  __ JumpIfSmi(receiver, &miss);
-
-  // Compute the hash for primary table.
-  __ Ldr(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
-  __ Ldr(extra, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ Add(scratch, scratch, extra);
-  __ Eor(scratch, scratch, flags);
-  // We shift out the last two bits because they are not part of the hash.
-  __ Ubfx(scratch, scratch, kHeapObjectTagSize,
-          CountTrailingZeros(kPrimaryTableSize, 64));
-
-  // Probe the primary table.
-  ProbeTable(isolate, masm, flags, kPrimary, receiver, name,
-      scratch, extra, extra2, extra3);
-
-  // Primary miss: Compute hash for secondary table.
-  __ Sub(scratch, scratch, Operand(name, LSR, kHeapObjectTagSize));
-  __ Add(scratch, scratch, flags >> kHeapObjectTagSize);
-  __ And(scratch, scratch, kSecondaryTableSize - 1);
-
-  // Probe the secondary table.
-  ProbeTable(isolate, masm, flags, kSecondary, receiver, name,
-      scratch, extra, extra2, extra3);
-
-  // Cache miss: Fall-through and let caller handle the miss by
-  // entering the runtime system.
-  __ Bind(&miss);
-  __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1,
-                      extra2, extra3);
-}
-
-
-void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
-                                                       int index,
-                                                       Register prototype) {
-  // Load the global or builtins object from the current context.
-  __ Ldr(prototype, GlobalObjectMemOperand());
-  // Load the native context from the global or builtins object.
-  __ Ldr(prototype,
-         FieldMemOperand(prototype, GlobalObject::kNativeContextOffset));
-  // Load the function from the native context.
-  __ Ldr(prototype, ContextMemOperand(prototype, index));
-  // Load the initial map. The global functions all have initial maps.
-  __ Ldr(prototype,
-         FieldMemOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset));
-  // Load the prototype from the initial map.
-  __ Ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
-}
-
-
-void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
-    MacroAssembler* masm,
-    int index,
-    Register prototype,
-    Label* miss) {
-  Isolate* isolate = masm->isolate();
-  // Get the global function with the given index.
-  Handle<JSFunction> function(
-      JSFunction::cast(isolate->native_context()->get(index)));
-
-  // Check we're still in the same context.
-  Register scratch = prototype;
-  __ Ldr(scratch, GlobalObjectMemOperand());
-  __ Ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
-  __ Ldr(scratch, ContextMemOperand(scratch, index));
-  __ Cmp(scratch, Operand(function));
-  __ B(ne, miss);
-
-  // Load its initial map. The global functions all have initial maps.
-  __ Mov(prototype, Operand(Handle<Map>(function->initial_map())));
-  // Load the prototype from the initial map.
-  __ Ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
-}
-
-
-void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
-                                            Register dst,
-                                            Register src,
-                                            bool inobject,
-                                            int index,
-                                            Representation representation) {
-  ASSERT(!representation.IsDouble());
-  USE(representation);
-  if (inobject) {
-    int offset = index * kPointerSize;
-    __ Ldr(dst, FieldMemOperand(src, offset));
-  } else {
-    // Calculate the offset into the properties array.
-    int offset = index * kPointerSize + FixedArray::kHeaderSize;
-    __ Ldr(dst, FieldMemOperand(src, JSObject::kPropertiesOffset));
-    __ Ldr(dst, FieldMemOperand(dst, offset));
-  }
-}
-
-
-void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
-                                           Register receiver,
-                                           Register scratch,
-                                           Label* miss_label) {
-  ASSERT(!AreAliased(receiver, scratch));
-
-  // Check that the receiver isn't a smi.
-  __ JumpIfSmi(receiver, miss_label);
-
-  // Check that the object is a JS array.
-  __ JumpIfNotObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE,
-                         miss_label);
-
-  // Load length directly from the JS array.
-  __ Ldr(x0, FieldMemOperand(receiver, JSArray::kLengthOffset));
-  __ Ret();
-}
-
-
-void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
-                                                 Register receiver,
-                                                 Register scratch1,
-                                                 Register scratch2,
-                                                 Label* miss_label) {
-  __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
-  // TryGetFunctionPrototype can't put the result directly in x0 because the
-  // 3 inputs registers can't alias and we call this function from
-  // LoadIC::GenerateFunctionPrototype, where receiver is x0. So we explicitly
-  // move the result in x0.
-  __ Mov(x0, scratch1);
-  __ Ret();
-}
-
-
-// Generate code to check that a global property cell is empty. Create
-// the property cell at compilation time if no cell exists for the
-// property.
-void StubCompiler::GenerateCheckPropertyCell(MacroAssembler* masm,
-                                             Handle<JSGlobalObject> global,
-                                             Handle<Name> name,
-                                             Register scratch,
-                                             Label* miss) {
-  Handle<Cell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
-  ASSERT(cell->value()->IsTheHole());
-  __ Mov(scratch, Operand(cell));
-  __ Ldr(scratch, FieldMemOperand(scratch, Cell::kValueOffset));
-  __ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, miss);
-}
-
-
-void StoreStubCompiler::GenerateNegativeHolderLookup(
-    MacroAssembler* masm,
-    Handle<JSObject> holder,
-    Register holder_reg,
-    Handle<Name> name,
-    Label* miss) {
-  if (holder->IsJSGlobalObject()) {
-    GenerateCheckPropertyCell(
-        masm, Handle<JSGlobalObject>::cast(holder), name, scratch1(), miss);
-  } else if (!holder->HasFastProperties() && !holder->IsJSGlobalProxy()) {
-    GenerateDictionaryNegativeLookup(
-        masm, miss, holder_reg, name, scratch1(), scratch2());
-  }
-}
-
-
-// Generate StoreTransition code, value is passed in x0 register.
-// When leaving generated code after success, the receiver_reg and storage_reg
-// may be clobbered. Upon branch to miss_label, the receiver and name registers
-// have their original values.
-void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
-                                                Handle<JSObject> object,
-                                                LookupResult* lookup,
-                                                Handle<Map> transition,
-                                                Handle<Name> name,
-                                                Register receiver_reg,
-                                                Register storage_reg,
-                                                Register value_reg,
-                                                Register scratch1,
-                                                Register scratch2,
-                                                Register scratch3,
-                                                Label* miss_label,
-                                                Label* slow) {
-  Label exit;
-
-  ASSERT(!AreAliased(receiver_reg, storage_reg, value_reg,
-                     scratch1, scratch2, scratch3));
-
-  // We don't need scratch3.
-  scratch3 = NoReg;
-
-  int descriptor = transition->LastAdded();
-  DescriptorArray* descriptors = transition->instance_descriptors();
-  PropertyDetails details = descriptors->GetDetails(descriptor);
-  Representation representation = details.representation();
-  ASSERT(!representation.IsNone());
-
-  if (details.type() == CONSTANT) {
-    Handle<Object> constant(descriptors->GetValue(descriptor), masm->isolate());
-    __ LoadObject(scratch1, constant);
-    __ Cmp(value_reg, scratch1);
-    __ B(ne, miss_label);
-  } else if (representation.IsSmi()) {
-    __ JumpIfNotSmi(value_reg, miss_label);
-  } else if (representation.IsHeapObject()) {
-    __ JumpIfSmi(value_reg, miss_label);
-    HeapType* field_type = descriptors->GetFieldType(descriptor);
-    HeapType::Iterator<Map> it = field_type->Classes();
-    if (!it.Done()) {
-      __ Ldr(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset));
-      Label do_store;
-      while (true) {
-        __ CompareMap(scratch1, it.Current());
-        it.Advance();
-        if (it.Done()) {
-          __ B(ne, miss_label);
-          break;
-        }
-        __ B(eq, &do_store);
-      }
-      __ Bind(&do_store);
-    }
-  } else if (representation.IsDouble()) {
-    UseScratchRegisterScope temps(masm);
-    DoubleRegister temp_double = temps.AcquireD();
-    __ SmiUntagToDouble(temp_double, value_reg, kSpeculativeUntag);
-
-    Label do_store;
-    __ JumpIfSmi(value_reg, &do_store);
-
-    __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex,
-                miss_label, DONT_DO_SMI_CHECK);
-    __ Ldr(temp_double, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
-
-    __ Bind(&do_store);
-    __ AllocateHeapNumber(storage_reg, slow, scratch1, scratch2, temp_double);
-  }
-
-  // Stub never generated for non-global objects that require access checks.
-  ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
-
-  // Perform map transition for the receiver if necessary.
-  if ((details.type() == FIELD) &&
-      (object->map()->unused_property_fields() == 0)) {
-    // The properties must be extended before we can store the value.
-    // We jump to a runtime call that extends the properties array.
-    __ Mov(scratch1, Operand(transition));
-    __ Push(receiver_reg, scratch1, value_reg);
-    __ TailCallExternalReference(
-        ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
-                          masm->isolate()),
-        3,
-        1);
-    return;
-  }
-
-  // Update the map of the object.
-  __ Mov(scratch1, Operand(transition));
-  __ Str(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
-
-  // Update the write barrier for the map field.
-  __ RecordWriteField(receiver_reg,
-                      HeapObject::kMapOffset,
-                      scratch1,
-                      scratch2,
-                      kLRHasNotBeenSaved,
-                      kDontSaveFPRegs,
-                      OMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-
-  if (details.type() == CONSTANT) {
-    ASSERT(value_reg.is(x0));
-    __ Ret();
-    return;
-  }
-
-  int index = transition->instance_descriptors()->GetFieldIndex(
-      transition->LastAdded());
-
-  // Adjust for the number of properties stored in the object. Even in the
-  // face of a transition we can use the old map here because the size of the
-  // object and the number of in-object properties is not going to change.
-  index -= object->map()->inobject_properties();
-
-  // TODO(verwaest): Share this code as a code stub.
-  SmiCheck smi_check = representation.IsTagged()
-      ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
-  Register prop_reg = representation.IsDouble() ? storage_reg : value_reg;
-  if (index < 0) {
-    // Set the property straight into the object.
-    int offset = object->map()->instance_size() + (index * kPointerSize);
-    __ Str(prop_reg, FieldMemOperand(receiver_reg, offset));
-
-    if (!representation.IsSmi()) {
-      // Update the write barrier for the array address.
-      if (!representation.IsDouble()) {
-        __ Mov(storage_reg, value_reg);
-      }
-      __ RecordWriteField(receiver_reg,
-                          offset,
-                          storage_reg,
-                          scratch1,
-                          kLRHasNotBeenSaved,
-                          kDontSaveFPRegs,
-                          EMIT_REMEMBERED_SET,
-                          smi_check);
-    }
-  } else {
-    // Write to the properties array.
-    int offset = index * kPointerSize + FixedArray::kHeaderSize;
-    // Get the properties array
-    __ Ldr(scratch1,
-           FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
-    __ Str(prop_reg, FieldMemOperand(scratch1, offset));
-
-    if (!representation.IsSmi()) {
-      // Update the write barrier for the array address.
-      if (!representation.IsDouble()) {
-        __ Mov(storage_reg, value_reg);
-      }
-      __ RecordWriteField(scratch1,
-                          offset,
-                          storage_reg,
-                          receiver_reg,
-                          kLRHasNotBeenSaved,
-                          kDontSaveFPRegs,
-                          EMIT_REMEMBERED_SET,
-                          smi_check);
-    }
-  }
-
-  __ Bind(&exit);
-  // Return the value (register x0).
-  ASSERT(value_reg.is(x0));
-  __ Ret();
-}
-
-
-// Generate StoreField code, value is passed in x0 register.
-// When leaving generated code after success, the receiver_reg and name_reg may
-// be clobbered. Upon branch to miss_label, the receiver and name registers have
-// their original values.
-void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
-                                           Handle<JSObject> object,
-                                           LookupResult* lookup,
-                                           Register receiver_reg,
-                                           Register name_reg,
-                                           Register value_reg,
-                                           Register scratch1,
-                                           Register scratch2,
-                                           Label* miss_label) {
-  // x0 : value
-  Label exit;
-
-  // Stub never generated for non-global objects that require access
-  // checks.
-  ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
-
-  FieldIndex index = lookup->GetFieldIndex();
-
-  Representation representation = lookup->representation();
-  ASSERT(!representation.IsNone());
-  if (representation.IsSmi()) {
-    __ JumpIfNotSmi(value_reg, miss_label);
-  } else if (representation.IsHeapObject()) {
-    __ JumpIfSmi(value_reg, miss_label);
-    HeapType* field_type = lookup->GetFieldType();
-    HeapType::Iterator<Map> it = field_type->Classes();
-    if (!it.Done()) {
-      __ Ldr(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset));
-      Label do_store;
-      while (true) {
-        __ CompareMap(scratch1, it.Current());
-        it.Advance();
-        if (it.Done()) {
-          __ B(ne, miss_label);
-          break;
-        }
-        __ B(eq, &do_store);
-      }
-      __ Bind(&do_store);
-    }
-  } else if (representation.IsDouble()) {
-    UseScratchRegisterScope temps(masm);
-    DoubleRegister temp_double = temps.AcquireD();
-
-    __ SmiUntagToDouble(temp_double, value_reg, kSpeculativeUntag);
-
-    // Load the double storage.
-    if (index.is_inobject()) {
-      __ Ldr(scratch1, FieldMemOperand(receiver_reg, index.offset()));
-    } else {
-      __ Ldr(scratch1,
-             FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
-      __ Ldr(scratch1, FieldMemOperand(scratch1, index.offset()));
-    }
-
-    // Store the value into the storage.
-    Label do_store, heap_number;
-
-    __ JumpIfSmi(value_reg, &do_store);
-
-    __ CheckMap(value_reg, scratch2, Heap::kHeapNumberMapRootIndex,
-                miss_label, DONT_DO_SMI_CHECK);
-    __ Ldr(temp_double, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
-
-    __ Bind(&do_store);
-    __ Str(temp_double, FieldMemOperand(scratch1, HeapNumber::kValueOffset));
-
-    // Return the value (register x0).
-    ASSERT(value_reg.is(x0));
-    __ Ret();
-    return;
-  }
-
-  // TODO(verwaest): Share this code as a code stub.
-  SmiCheck smi_check = representation.IsTagged()
-      ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
-  if (index.is_inobject()) {
-    // Set the property straight into the object.
-    __ Str(value_reg, FieldMemOperand(receiver_reg, index.offset()));
-
-    if (!representation.IsSmi()) {
-      // Skip updating write barrier if storing a smi.
-      __ JumpIfSmi(value_reg, &exit);
-
-      // Update the write barrier for the array address.
-      // Pass the now unused name_reg as a scratch register.
-      __ Mov(name_reg, value_reg);
-      __ RecordWriteField(receiver_reg,
-                          index.offset(),
-                          name_reg,
-                          scratch1,
-                          kLRHasNotBeenSaved,
-                          kDontSaveFPRegs,
-                          EMIT_REMEMBERED_SET,
-                          smi_check);
-    }
-  } else {
-    // Write to the properties array.
-    // Get the properties array
-    __ Ldr(scratch1,
-           FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
-    __ Str(value_reg, FieldMemOperand(scratch1, index.offset()));
-
-    if (!representation.IsSmi()) {
-      // Skip updating write barrier if storing a smi.
-      __ JumpIfSmi(value_reg, &exit);
-
-      // Update the write barrier for the array address.
-      // Ok to clobber receiver_reg and name_reg, since we return.
-      __ Mov(name_reg, value_reg);
-      __ RecordWriteField(scratch1,
-                          index.offset(),
-                          name_reg,
-                          receiver_reg,
-                          kLRHasNotBeenSaved,
-                          kDontSaveFPRegs,
-                          EMIT_REMEMBERED_SET,
-                          smi_check);
-    }
-  }
-
-  __ Bind(&exit);
-  // Return the value (register x0).
-  ASSERT(value_reg.is(x0));
-  __ Ret();
-}
-
-
-void StoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
-                                            Label* label,
-                                            Handle<Name> name) {
-  if (!label->is_unused()) {
-    __ Bind(label);
-    __ Mov(this->name(), Operand(name));
-  }
-}
-
-
-static void PushInterceptorArguments(MacroAssembler* masm,
-                                     Register receiver,
-                                     Register holder,
-                                     Register name,
-                                     Handle<JSObject> holder_obj) {
-  STATIC_ASSERT(StubCache::kInterceptorArgsNameIndex == 0);
-  STATIC_ASSERT(StubCache::kInterceptorArgsInfoIndex == 1);
-  STATIC_ASSERT(StubCache::kInterceptorArgsThisIndex == 2);
-  STATIC_ASSERT(StubCache::kInterceptorArgsHolderIndex == 3);
-  STATIC_ASSERT(StubCache::kInterceptorArgsLength == 4);
-
-  __ Push(name);
-  Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
-  ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
-  Register scratch = name;
-  __ Mov(scratch, Operand(interceptor));
-  __ Push(scratch, receiver, holder);
-}
-
-
-static void CompileCallLoadPropertyWithInterceptor(
-    MacroAssembler* masm,
-    Register receiver,
-    Register holder,
-    Register name,
-    Handle<JSObject> holder_obj,
-    IC::UtilityId id) {
-  PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
-
-  __ CallExternalReference(
-      ExternalReference(IC_Utility(id), masm->isolate()),
-      StubCache::kInterceptorArgsLength);
-}
-
-
-// Generate call to api function.
-void StubCompiler::GenerateFastApiCall(MacroAssembler* masm,
-                                       const CallOptimization& optimization,
-                                       Handle<Map> receiver_map,
-                                       Register receiver,
-                                       Register scratch,
-                                       bool is_store,
-                                       int argc,
-                                       Register* values) {
-  ASSERT(!AreAliased(receiver, scratch));
-
-  MacroAssembler::PushPopQueue queue(masm);
-  queue.Queue(receiver);
-  // Write the arguments to the stack frame.
-  for (int i = 0; i < argc; i++) {
-    Register arg = values[argc-1-i];
-    ASSERT(!AreAliased(receiver, scratch, arg));
-    queue.Queue(arg);
-  }
-  queue.PushQueued();
-
-  ASSERT(optimization.is_simple_api_call());
-
-  // Abi for CallApiFunctionStub.
-  Register callee = x0;
-  Register call_data = x4;
-  Register holder = x2;
-  Register api_function_address = x1;
-
-  // Put holder in place.
-  CallOptimization::HolderLookup holder_lookup;
-  Handle<JSObject> api_holder =
-      optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
-  switch (holder_lookup) {
-    case CallOptimization::kHolderIsReceiver:
-      __ Mov(holder, receiver);
-      break;
-    case CallOptimization::kHolderFound:
-      __ LoadObject(holder, api_holder);
-      break;
-    case CallOptimization::kHolderNotFound:
-      UNREACHABLE();
-      break;
-  }
-
-  Isolate* isolate = masm->isolate();
-  Handle<JSFunction> function = optimization.constant_function();
-  Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
-  Handle<Object> call_data_obj(api_call_info->data(), isolate);
-
-  // Put callee in place.
-  __ LoadObject(callee, function);
-
-  bool call_data_undefined = false;
-  // Put call_data in place.
-  if (isolate->heap()->InNewSpace(*call_data_obj)) {
-    __ LoadObject(call_data, api_call_info);
-    __ Ldr(call_data, FieldMemOperand(call_data, CallHandlerInfo::kDataOffset));
-  } else if (call_data_obj->IsUndefined()) {
-    call_data_undefined = true;
-    __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
-  } else {
-    __ LoadObject(call_data, call_data_obj);
-  }
-
-  // Put api_function_address in place.
-  Address function_address = v8::ToCData<Address>(api_call_info->callback());
-  ApiFunction fun(function_address);
-  ExternalReference ref = ExternalReference(&fun,
-                                            ExternalReference::DIRECT_API_CALL,
-                                            masm->isolate());
-  __ Mov(api_function_address, ref);
-
-  // Jump to stub.
-  CallApiFunctionStub stub(isolate, is_store, call_data_undefined, argc);
-  __ TailCallStub(&stub);
-}
-
-
-void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) {
-  __ Jump(code, RelocInfo::CODE_TARGET);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-Register StubCompiler::CheckPrototypes(Handle<HeapType> type,
-                                       Register object_reg,
-                                       Handle<JSObject> holder,
-                                       Register holder_reg,
-                                       Register scratch1,
-                                       Register scratch2,
-                                       Handle<Name> name,
-                                       Label* miss,
-                                       PrototypeCheckType check) {
-  Handle<Map> receiver_map(IC::TypeToMap(*type, isolate()));
-
-  // object_reg and holder_reg registers can alias.
-  ASSERT(!AreAliased(object_reg, scratch1, scratch2));
-  ASSERT(!AreAliased(holder_reg, scratch1, scratch2));
-
-  // Keep track of the current object in register reg.
-  Register reg = object_reg;
-  int depth = 0;
-
-  Handle<JSObject> current = Handle<JSObject>::null();
-  if (type->IsConstant()) {
-    current = Handle<JSObject>::cast(type->AsConstant()->Value());
-  }
-  Handle<JSObject> prototype = Handle<JSObject>::null();
-  Handle<Map> current_map = receiver_map;
-  Handle<Map> holder_map(holder->map());
-  // Traverse the prototype chain and check the maps in the prototype chain for
-  // fast and global objects or do negative lookup for normal objects.
-  while (!current_map.is_identical_to(holder_map)) {
-    ++depth;
-
-    // Only global objects and objects that do not require access
-    // checks are allowed in stubs.
-    ASSERT(current_map->IsJSGlobalProxyMap() ||
-           !current_map->is_access_check_needed());
-
-    prototype = handle(JSObject::cast(current_map->prototype()));
-    if (current_map->is_dictionary_map() &&
-        !current_map->IsJSGlobalObjectMap() &&
-        !current_map->IsJSGlobalProxyMap()) {
-      if (!name->IsUniqueName()) {
-        ASSERT(name->IsString());
-        name = factory()->InternalizeString(Handle<String>::cast(name));
-      }
-      ASSERT(current.is_null() ||
-             (current->property_dictionary()->FindEntry(name) ==
-              NameDictionary::kNotFound));
-
-      GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
-                                       scratch1, scratch2);
-
-      __ Ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
-      reg = holder_reg;  // From now on the object will be in holder_reg.
-      __ Ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
-    } else {
-      bool need_map = (depth != 1 || check == CHECK_ALL_MAPS) ||
-                      heap()->InNewSpace(*prototype);
-      Register map_reg = NoReg;
-      if (need_map) {
-        map_reg = scratch1;
-        __ Ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
-      }
-
-      if (depth != 1 || check == CHECK_ALL_MAPS) {
-        __ CheckMap(map_reg, current_map, miss, DONT_DO_SMI_CHECK);
-      }
-
-      // Check access rights to the global object.  This has to happen after
-      // the map check so that we know that the object is actually a global
-      // object.
-      if (current_map->IsJSGlobalProxyMap()) {
-        UseScratchRegisterScope temps(masm());
-        __ CheckAccessGlobalProxy(reg, scratch2, temps.AcquireX(), miss);
-      } else if (current_map->IsJSGlobalObjectMap()) {
-        GenerateCheckPropertyCell(
-            masm(), Handle<JSGlobalObject>::cast(current), name,
-            scratch2, miss);
-      }
-
-      reg = holder_reg;  // From now on the object will be in holder_reg.
-
-      if (heap()->InNewSpace(*prototype)) {
-        // The prototype is in new space; we cannot store a reference to it
-        // in the code.  Load it from the map.
-        __ Ldr(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
-      } else {
-        // The prototype is in old space; load it directly.
-        __ Mov(reg, Operand(prototype));
-      }
-    }
-
-    // Go to the next object in the prototype chain.
-    current = prototype;
-    current_map = handle(current->map());
-  }
-
-  // Log the check depth.
-  LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
-
-  // Check the holder map.
-  if (depth != 0 || check == CHECK_ALL_MAPS) {
-    // Check the holder map.
-    __ CheckMap(reg, scratch1, current_map, miss, DONT_DO_SMI_CHECK);
-  }
-
-  // Perform security check for access to the global object.
-  ASSERT(current_map->IsJSGlobalProxyMap() ||
-         !current_map->is_access_check_needed());
-  if (current_map->IsJSGlobalProxyMap()) {
-    __ CheckAccessGlobalProxy(reg, scratch1, scratch2, miss);
-  }
-
-  // Return the register containing the holder.
-  return reg;
-}
-
-
-void LoadStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
-  if (!miss->is_unused()) {
-    Label success;
-    __ B(&success);
-
-    __ Bind(miss);
-    TailCallBuiltin(masm(), MissBuiltin(kind()));
-
-    __ Bind(&success);
-  }
-}
-
-
-void StoreStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
-  if (!miss->is_unused()) {
-    Label success;
-    __ B(&success);
-
-    GenerateRestoreName(masm(), miss, name);
-    TailCallBuiltin(masm(), MissBuiltin(kind()));
-
-    __ Bind(&success);
-  }
-}
-
-
-Register LoadStubCompiler::CallbackHandlerFrontend(Handle<HeapType> type,
-                                                   Register object_reg,
-                                                   Handle<JSObject> holder,
-                                                   Handle<Name> name,
-                                                   Handle<Object> callback) {
-  Label miss;
-
-  Register reg = HandlerFrontendHeader(type, object_reg, holder, name, &miss);
-  // HandlerFrontendHeader can return its result into scratch1() so do not
-  // use it.
-  Register scratch2 = this->scratch2();
-  Register scratch3 = this->scratch3();
-  Register dictionary = this->scratch4();
-  ASSERT(!AreAliased(reg, scratch2, scratch3, dictionary));
-
-  if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
-    // Load the properties dictionary.
-    __ Ldr(dictionary, FieldMemOperand(reg, JSObject::kPropertiesOffset));
-
-    // Probe the dictionary.
-    Label probe_done;
-    NameDictionaryLookupStub::GeneratePositiveLookup(masm(),
-                                                     &miss,
-                                                     &probe_done,
-                                                     dictionary,
-                                                     this->name(),
-                                                     scratch2,
-                                                     scratch3);
-    __ Bind(&probe_done);
-
-    // If probing finds an entry in the dictionary, scratch3 contains the
-    // pointer into the dictionary. Check that the value is the callback.
-    Register pointer = scratch3;
-    const int kElementsStartOffset = NameDictionary::kHeaderSize +
-        NameDictionary::kElementsStartIndex * kPointerSize;
-    const int kValueOffset = kElementsStartOffset + kPointerSize;
-    __ Ldr(scratch2, FieldMemOperand(pointer, kValueOffset));
-    __ Cmp(scratch2, Operand(callback));
-    __ B(ne, &miss);
-  }
-
-  HandlerFrontendFooter(name, &miss);
-  return reg;
-}
-
-
-void LoadStubCompiler::GenerateLoadField(Register reg,
-                                         Handle<JSObject> holder,
-                                         FieldIndex field,
-                                         Representation representation) {
-  __ Mov(receiver(), reg);
-  if (kind() == Code::LOAD_IC) {
-    LoadFieldStub stub(isolate(), field);
-    GenerateTailCall(masm(), stub.GetCode());
-  } else {
-    KeyedLoadFieldStub stub(isolate(), field);
-    GenerateTailCall(masm(), stub.GetCode());
-  }
-}
-
-
-void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
-  // Return the constant value.
-  __ LoadObject(x0, value);
-  __ Ret();
-}
-
-
-void LoadStubCompiler::GenerateLoadCallback(
-    Register reg,
-    Handle<ExecutableAccessorInfo> callback) {
-  ASSERT(!AreAliased(scratch2(), scratch3(), scratch4(), reg));
-
-  // Build ExecutableAccessorInfo::args_ list on the stack and push property
-  // name below the exit frame to make GC aware of them and store pointers to
-  // them.
-  STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
-  STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
-  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
-  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
-  STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
-  STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
-  STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6);
-
-  __ Push(receiver());
-
-  if (heap()->InNewSpace(callback->data())) {
-    __ Mov(scratch3(), Operand(callback));
-    __ Ldr(scratch3(), FieldMemOperand(scratch3(),
-                                       ExecutableAccessorInfo::kDataOffset));
-  } else {
-    __ Mov(scratch3(), Operand(Handle<Object>(callback->data(), isolate())));
-  }
-  __ LoadRoot(scratch4(), Heap::kUndefinedValueRootIndex);
-  __ Mov(scratch2(), Operand(ExternalReference::isolate_address(isolate())));
-  __ Push(scratch3(), scratch4(), scratch4(), scratch2(), reg, name());
-
-  Register args_addr = scratch2();
-  __ Add(args_addr, __ StackPointer(), kPointerSize);
-
-  // Stack at this point:
-  //              sp[40] callback data
-  //              sp[32] undefined
-  //              sp[24] undefined
-  //              sp[16] isolate
-  // args_addr -> sp[8]  reg
-  //              sp[0]  name
-
-  // Abi for CallApiGetter.
-  Register getter_address_reg = x2;
-
-  // Set up the call.
-  Address getter_address = v8::ToCData<Address>(callback->getter());
-  ApiFunction fun(getter_address);
-  ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL;
-  ExternalReference ref = ExternalReference(&fun, type, isolate());
-  __ Mov(getter_address_reg, ref);
-
-  CallApiGetterStub stub(isolate());
-  __ TailCallStub(&stub);
-}
-
-
-void LoadStubCompiler::GenerateLoadInterceptor(
-    Register holder_reg,
-    Handle<Object> object,
-    Handle<JSObject> interceptor_holder,
-    LookupResult* lookup,
-    Handle<Name> name) {
-  ASSERT(!AreAliased(receiver(), this->name(),
-                     scratch1(), scratch2(), scratch3()));
-  ASSERT(interceptor_holder->HasNamedInterceptor());
-  ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
-
-  // So far the most popular follow ups for interceptor loads are FIELD
-  // and CALLBACKS, so inline only them, other cases may be added later.
-  bool compile_followup_inline = false;
-  if (lookup->IsFound() && lookup->IsCacheable()) {
-    if (lookup->IsField()) {
-      compile_followup_inline = true;
-    } else if (lookup->type() == CALLBACKS &&
-               lookup->GetCallbackObject()->IsExecutableAccessorInfo()) {
-      ExecutableAccessorInfo* callback =
-          ExecutableAccessorInfo::cast(lookup->GetCallbackObject());
-      compile_followup_inline = callback->getter() != NULL &&
-          callback->IsCompatibleReceiver(*object);
-    }
-  }
-
-  if (compile_followup_inline) {
-    // Compile the interceptor call, followed by inline code to load the
-    // property from further up the prototype chain if the call fails.
-    // Check that the maps haven't changed.
-    ASSERT(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
-
-    // Preserve the receiver register explicitly whenever it is different from
-    // the holder and it is needed should the interceptor return without any
-    // result. The CALLBACKS case needs the receiver to be passed into C++ code,
-    // the FIELD case might cause a miss during the prototype check.
-    bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder();
-    bool must_preserve_receiver_reg = !receiver().Is(holder_reg) &&
-        (lookup->type() == CALLBACKS || must_perfrom_prototype_check);
-
-    // Save necessary data before invoking an interceptor.
-    // Requires a frame to make GC aware of pushed pointers.
-    {
-      FrameScope frame_scope(masm(), StackFrame::INTERNAL);
-      if (must_preserve_receiver_reg) {
-        __ Push(receiver(), holder_reg, this->name());
-      } else {
-        __ Push(holder_reg, this->name());
-      }
-      // Invoke an interceptor.  Note: map checks from receiver to
-      // interceptor's holder has been compiled before (see a caller
-      // of this method.)
-      CompileCallLoadPropertyWithInterceptor(
-          masm(), receiver(), holder_reg, this->name(), interceptor_holder,
-          IC::kLoadPropertyWithInterceptorOnly);
-
-      // Check if interceptor provided a value for property.  If it's
-      // the case, return immediately.
-      Label interceptor_failed;
-      __ JumpIfRoot(x0,
-                    Heap::kNoInterceptorResultSentinelRootIndex,
-                    &interceptor_failed);
-      frame_scope.GenerateLeaveFrame();
-      __ Ret();
-
-      __ Bind(&interceptor_failed);
-      if (must_preserve_receiver_reg) {
-        __ Pop(this->name(), holder_reg, receiver());
-      } else {
-        __ Pop(this->name(), holder_reg);
-      }
-      // Leave the internal frame.
-    }
-    GenerateLoadPostInterceptor(holder_reg, interceptor_holder, name, lookup);
-  } else {  // !compile_followup_inline
-    // Call the runtime system to load the interceptor.
-    // Check that the maps haven't changed.
-    PushInterceptorArguments(
-        masm(), receiver(), holder_reg, this->name(), interceptor_holder);
-
-    ExternalReference ref =
-        ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptor),
-                          isolate());
-    __ TailCallExternalReference(ref, StubCache::kInterceptorArgsLength, 1);
-  }
-}
-
-
-Handle<Code> StoreStubCompiler::CompileStoreCallback(
-    Handle<JSObject> object,
-    Handle<JSObject> holder,
-    Handle<Name> name,
-    Handle<ExecutableAccessorInfo> callback) {
-  ASM_LOCATION("StoreStubCompiler::CompileStoreCallback");
-  Register holder_reg = HandlerFrontend(
-      IC::CurrentTypeOf(object, isolate()), receiver(), holder, name);
-
-  // Stub never generated for non-global objects that require access checks.
-  ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
-
-  // receiver() and holder_reg can alias.
-  ASSERT(!AreAliased(receiver(), scratch1(), scratch2(), value()));
-  ASSERT(!AreAliased(holder_reg, scratch1(), scratch2(), value()));
-  __ Mov(scratch1(), Operand(callback));
-  __ Mov(scratch2(), Operand(name));
-  __ Push(receiver(), holder_reg, scratch1(), scratch2(), value());
-
-  // Do tail-call to the runtime system.
-  ExternalReference store_callback_property =
-      ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
-  __ TailCallExternalReference(store_callback_property, 5, 1);
-
-  // Return the generated code.
-  return GetCode(kind(), Code::FAST, name);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void StoreStubCompiler::GenerateStoreViaSetter(
-    MacroAssembler* masm,
-    Handle<HeapType> type,
-    Register receiver,
-    Handle<JSFunction> setter) {
-  // ----------- S t a t e -------------
-  //  -- lr    : return address
-  // -----------------------------------
-  Label miss;
-
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-
-    // Save value register, so we can restore it later.
-    __ Push(value());
-
-    if (!setter.is_null()) {
-      // Call the JavaScript setter with receiver and value on the stack.
-      if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
-        // Swap in the global receiver.
-        __ Ldr(receiver,
-               FieldMemOperand(
-                   receiver, JSGlobalObject::kGlobalReceiverOffset));
-      }
-      __ Push(receiver, value());
-      ParameterCount actual(1);
-      ParameterCount expected(setter);
-      __ InvokeFunction(setter, expected, actual,
-                        CALL_FUNCTION, NullCallWrapper());
-    } else {
-      // If we generate a global code snippet for deoptimization only, remember
-      // the place to continue after deoptimization.
-      masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
-    }
-
-    // We have to return the passed value, not the return value of the setter.
-    __ Pop(x0);
-
-    // Restore context register.
-    __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-  }
-  __ Ret();
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
-    Handle<JSObject> object,
-    Handle<Name> name) {
-  Label miss;
-
-  ASM_LOCATION("StoreStubCompiler::CompileStoreInterceptor");
-
-  __ Push(receiver(), this->name(), value());
-
-  // Do tail-call to the runtime system.
-  ExternalReference store_ic_property =
-      ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate());
-  __ TailCallExternalReference(store_ic_property, 3, 1);
-
-  // Return the generated code.
-  return GetCode(kind(), Code::FAST, name);
-}
-
-
-Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<HeapType> type,
-                                                      Handle<JSObject> last,
-                                                      Handle<Name> name) {
-  NonexistentHandlerFrontend(type, last, name);
-
-  // Return undefined if maps of the full prototype chain are still the
-  // same and no global property with this name contains a value.
-  __ LoadRoot(x0, Heap::kUndefinedValueRootIndex);
-  __ Ret();
-
-  // Return the generated code.
-  return GetCode(kind(), Code::FAST, name);
-}
-
-
-// TODO(all): The so-called scratch registers are significant in some cases. For
-// example, KeyedStoreStubCompiler::registers()[3] (x3) is actually used for
-// KeyedStoreCompiler::transition_map(). We should verify which registers are
-// actually scratch registers, and which are important. For now, we use the same
-// assignments as ARM to remain on the safe side.
-
-Register* LoadStubCompiler::registers() {
-  // receiver, name, scratch1, scratch2, scratch3, scratch4.
-  static Register registers[] = { x0, x2, x3, x1, x4, x5 };
-  return registers;
-}
-
-
-Register* KeyedLoadStubCompiler::registers() {
-  // receiver, name/key, scratch1, scratch2, scratch3, scratch4.
-  static Register registers[] = { x1, x0, x2, x3, x4, x5 };
-  return registers;
-}
-
-
-Register StoreStubCompiler::value() {
-  return x0;
-}
-
-
-Register* StoreStubCompiler::registers() {
-  // receiver, value, scratch1, scratch2, scratch3.
-  static Register registers[] = { x1, x2, x3, x4, x5 };
-  return registers;
-}
-
-
-Register* KeyedStoreStubCompiler::registers() {
-  // receiver, name, scratch1, scratch2, scratch3.
-  static Register registers[] = { x2, x1, x3, x4, x5 };
-  return registers;
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
-                                             Handle<HeapType> type,
-                                             Register receiver,
-                                             Handle<JSFunction> getter) {
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-
-    if (!getter.is_null()) {
-      // Call the JavaScript getter with the receiver on the stack.
-      if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
-        // Swap in the global receiver.
-        __ Ldr(receiver,
-                FieldMemOperand(
-                    receiver, JSGlobalObject::kGlobalReceiverOffset));
-      }
-      __ Push(receiver);
-      ParameterCount actual(0);
-      ParameterCount expected(getter);
-      __ InvokeFunction(getter, expected, actual,
-                        CALL_FUNCTION, NullCallWrapper());
-    } else {
-      // If we generate a global code snippet for deoptimization only, remember
-      // the place to continue after deoptimization.
-      masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
-    }
-
-    // Restore context register.
-    __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-  }
-  __ Ret();
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-Handle<Code> LoadStubCompiler::CompileLoadGlobal(
-    Handle<HeapType> type,
-    Handle<GlobalObject> global,
-    Handle<PropertyCell> cell,
-    Handle<Name> name,
-    bool is_dont_delete) {
-  Label miss;
-  HandlerFrontendHeader(type, receiver(), global, name, &miss);
-
-  // Get the value from the cell.
-  __ Mov(x3, Operand(cell));
-  __ Ldr(x4, FieldMemOperand(x3, Cell::kValueOffset));
-
-  // Check for deleted property if property can actually be deleted.
-  if (!is_dont_delete) {
-    __ JumpIfRoot(x4, Heap::kTheHoleValueRootIndex, &miss);
-  }
-
-  Counters* counters = isolate()->counters();
-  __ IncrementCounter(counters->named_load_global_stub(), 1, x1, x3);
-  __ Mov(x0, x4);
-  __ Ret();
-
-  HandlerFrontendFooter(name, &miss);
-
-  // Return the generated code.
-  return GetCode(kind(), Code::NORMAL, name);
-}
-
-
-Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
-    TypeHandleList* types,
-    CodeHandleList* handlers,
-    Handle<Name> name,
-    Code::StubType type,
-    IcCheckType check) {
-  Label miss;
-
-  if (check == PROPERTY &&
-      (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
-    __ CompareAndBranch(this->name(), Operand(name), ne, &miss);
-  }
-
-  Label number_case;
-  Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
-  __ JumpIfSmi(receiver(), smi_target);
-
-  Register map_reg = scratch1();
-  __ Ldr(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
-  int receiver_count = types->length();
-  int number_of_handled_maps = 0;
-  for (int current = 0; current < receiver_count; ++current) {
-    Handle<HeapType> type = types->at(current);
-    Handle<Map> map = IC::TypeToMap(*type, isolate());
-    if (!map->is_deprecated()) {
-      number_of_handled_maps++;
-      Label try_next;
-      __ Cmp(map_reg, Operand(map));
-      __ B(ne, &try_next);
-      if (type->Is(HeapType::Number())) {
-        ASSERT(!number_case.is_unused());
-        __ Bind(&number_case);
-      }
-      __ Jump(handlers->at(current), RelocInfo::CODE_TARGET);
-      __ Bind(&try_next);
-    }
-  }
-  ASSERT(number_of_handled_maps != 0);
-
-  __ Bind(&miss);
-  TailCallBuiltin(masm(), MissBuiltin(kind()));
-
-  // Return the generated code.
-  InlineCacheState state =
-      (number_of_handled_maps > 1) ? POLYMORPHIC : MONOMORPHIC;
-  return GetICCode(kind(), type, name, state);
-}
-
-
-void StoreStubCompiler::GenerateStoreArrayLength() {
-  // Prepare tail call to StoreIC_ArrayLength.
-  __ Push(receiver(), value());
-
-  ExternalReference ref =
-      ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength),
-                        masm()->isolate());
-  __ TailCallExternalReference(ref, 2, 1);
-}
-
-
-Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
-    MapHandleList* receiver_maps,
-    CodeHandleList* handler_stubs,
-    MapHandleList* transitioned_maps) {
-  Label miss;
-
-  ASM_LOCATION("KeyedStoreStubCompiler::CompileStorePolymorphic");
-
-  __ JumpIfSmi(receiver(), &miss);
-
-  int receiver_count = receiver_maps->length();
-  __ Ldr(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset));
-  for (int i = 0; i < receiver_count; i++) {
-    __ Cmp(scratch1(), Operand(receiver_maps->at(i)));
-
-    Label skip;
-    __ B(&skip, ne);
-    if (!transitioned_maps->at(i).is_null()) {
-      // This argument is used by the handler stub. For example, see
-      // ElementsTransitionGenerator::GenerateMapChangeElementsTransition.
-      __ Mov(transition_map(), Operand(transitioned_maps->at(i)));
-    }
-    __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET);
-    __ Bind(&skip);
-  }
-
-  __ Bind(&miss);
-  TailCallBuiltin(masm(), MissBuiltin(kind()));
-
-  return GetICCode(
-      kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
-    MacroAssembler* masm) {
-  // ---------- S t a t e --------------
-  //  -- lr     : return address
-  //  -- x0     : key
-  //  -- x1     : receiver
-  // -----------------------------------
-  Label slow, miss;
-
-  Register result = x0;
-  Register key = x0;
-  Register receiver = x1;
-
-  __ JumpIfNotSmi(key, &miss);
-  __ Ldr(x4, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ LoadFromNumberDictionary(&slow, x4, key, result, x2, x3, x5, x6);
-  __ Ret();
-
-  __ Bind(&slow);
-  __ IncrementCounter(
-      masm->isolate()->counters()->keyed_load_external_array_slow(), 1, x2, x3);
-  TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow);
-
-  // Miss case, call the runtime.
-  __ Bind(&miss);
-  TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss);
-}
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_TARGET_ARCH_ARM64
diff --git a/src/arm64/utils-arm64.cc b/src/arm64/utils-arm64.cc
index 0cb4ea5..dbfb876 100644
--- a/src/arm64/utils-arm64.cc
+++ b/src/arm64/utils-arm64.cc
@@ -15,7 +15,7 @@
 
 int CountLeadingZeros(uint64_t value, int width) {
   // TODO(jbramley): Optimize this for ARM64 hosts.
-  ASSERT((width == 32) || (width == 64));
+  DCHECK((width == 32) || (width == 64));
   int count = 0;
   uint64_t bit_test = 1UL << (width - 1);
   while ((count < width) && ((bit_test & value) == 0)) {
@@ -28,7 +28,7 @@
 
 int CountLeadingSignBits(int64_t value, int width) {
   // TODO(jbramley): Optimize this for ARM64 hosts.
-  ASSERT((width == 32) || (width == 64));
+  DCHECK((width == 32) || (width == 64));
   if (value >= 0) {
     return CountLeadingZeros(value, width) - 1;
   } else {
@@ -39,7 +39,7 @@
 
 int CountTrailingZeros(uint64_t value, int width) {
   // TODO(jbramley): Optimize this for ARM64 hosts.
-  ASSERT((width == 32) || (width == 64));
+  DCHECK((width == 32) || (width == 64));
   int count = 0;
   while ((count < width) && (((value >> count) & 1) == 0)) {
     count++;
@@ -51,7 +51,7 @@
 int CountSetBits(uint64_t value, int width) {
   // TODO(jbramley): Would it be useful to allow other widths? The
   // implementation already supports them.
-  ASSERT((width == 32) || (width == 64));
+  DCHECK((width == 32) || (width == 64));
 
   // Mask out unused bits to ensure that they are not counted.
   value &= (0xffffffffffffffffUL >> (64-width));
@@ -78,8 +78,13 @@
 }
 
 
+uint64_t LargestPowerOf2Divisor(uint64_t value) {
+  return value & -value;
+}
+
+
 int MaskToBit(uint64_t mask) {
-  ASSERT(CountSetBits(mask, 64) == 1);
+  DCHECK(CountSetBits(mask, 64) == 1);
   return CountTrailingZeros(mask, 64);
 }
 
diff --git a/src/arm64/utils-arm64.h b/src/arm64/utils-arm64.h
index b940710..c22ed9a 100644
--- a/src/arm64/utils-arm64.h
+++ b/src/arm64/utils-arm64.h
@@ -7,6 +7,7 @@
 
 #include <cmath>
 #include "src/v8.h"
+
 #include "src/arm64/constants-arm64.h"
 
 #define REGISTER_CODE_LIST(R)                                                  \
@@ -56,6 +57,7 @@
 int CountLeadingSignBits(int64_t value, int width);
 int CountTrailingZeros(uint64_t value, int width);
 int CountSetBits(uint64_t value, int width);
+uint64_t LargestPowerOf2Divisor(uint64_t value);
 int MaskToBit(uint64_t mask);
 
 
@@ -86,13 +88,13 @@
 
 // Convert the NaN in 'num' to a quiet NaN.
 inline double ToQuietNaN(double num) {
-  ASSERT(isnan(num));
+  DCHECK(std::isnan(num));
   return rawbits_to_double(double_to_rawbits(num) | kDQuietNanMask);
 }
 
 
 inline float ToQuietNaN(float num) {
-  ASSERT(isnan(num));
+  DCHECK(std::isnan(num));
   return rawbits_to_float(float_to_rawbits(num) | kSQuietNanMask);
 }
 
diff --git a/src/array-iterator.js b/src/array-iterator.js
index 9511b6d..5ced9da 100644
--- a/src/array-iterator.js
+++ b/src/array-iterator.js
@@ -50,7 +50,7 @@
 function ArrayIteratorNext() {
   var iterator = ToObject(this);
 
-  if (!HAS_PRIVATE(iterator, arrayIteratorObjectSymbol)) {
+  if (!HAS_DEFINED_PRIVATE(iterator, arrayIteratorNextIndexSymbol)) {
     throw MakeTypeError('incompatible_method_receiver',
                         ['Array Iterator.prototype.next']);
   }
@@ -110,8 +110,8 @@
     'next', ArrayIteratorNext
   ));
   %FunctionSetName(ArrayIteratorIterator, '[Symbol.iterator]');
-  %SetProperty(ArrayIterator.prototype, symbolIterator, ArrayIteratorIterator,
-      DONT_ENUM | DONT_DELETE | READ_ONLY);
+  %AddNamedProperty(ArrayIterator.prototype, symbolIterator,
+                    ArrayIteratorIterator, DONT_ENUM);
 }
 SetUpArrayIterator();
 
@@ -120,9 +120,38 @@
   %CheckIsBootstrapping();
 
   InstallFunctions($Array.prototype, DONT_ENUM, $Array(
+    // No 'values' since it breaks webcompat: http://crbug.com/409858
     'entries', ArrayEntries,
-    'values', ArrayValues,
     'keys', ArrayKeys
   ));
+
+  %AddNamedProperty($Array.prototype, symbolIterator, ArrayValues, DONT_ENUM);
 }
 ExtendArrayPrototype();
+
+
+function ExtendTypedArrayPrototypes() {
+  %CheckIsBootstrapping();
+
+macro TYPED_ARRAYS(FUNCTION)
+  FUNCTION(Uint8Array)
+  FUNCTION(Int8Array)
+  FUNCTION(Uint16Array)
+  FUNCTION(Int16Array)
+  FUNCTION(Uint32Array)
+  FUNCTION(Int32Array)
+  FUNCTION(Float32Array)
+  FUNCTION(Float64Array)
+  FUNCTION(Uint8ClampedArray)
+endmacro
+
+macro EXTEND_TYPED_ARRAY(NAME)
+  %AddNamedProperty($NAME.prototype, 'entries', ArrayEntries, DONT_ENUM);
+  %AddNamedProperty($NAME.prototype, 'values', ArrayValues, DONT_ENUM);
+  %AddNamedProperty($NAME.prototype, 'keys', ArrayKeys, DONT_ENUM);
+  %AddNamedProperty($NAME.prototype, symbolIterator, ArrayValues, DONT_ENUM);
+endmacro
+
+  TYPED_ARRAYS(EXTEND_TYPED_ARRAY)
+}
+ExtendTypedArrayPrototypes();
diff --git a/src/array.js b/src/array.js
index ef7aae4..81f1f65 100644
--- a/src/array.js
+++ b/src/array.js
@@ -86,11 +86,20 @@
 }
 
 
-function UseSparseVariant(object, length, is_array) {
-   return is_array &&
-       length > 1000 &&
-       (!%_IsSmi(length) ||
-        %EstimateNumberOfElements(object) < (length >> 2));
+function UseSparseVariant(array, length, is_array, touched) {
+  // Only use the sparse variant on arrays that are likely to be sparse and the
+  // number of elements touched in the operation is relatively small compared to
+  // the overall size of the array.
+  if (!is_array || length < 1000 || %IsObserved(array)) {
+    return false;
+  }
+  if (!%_IsSmi(length)) {
+    return true;
+  }
+  var elements_threshold = length >> 2;  // No more than 75% holes
+  var estimated_elements = %EstimateNumberOfElements(array);
+  return (estimated_elements < elements_threshold) &&
+    (touched > estimated_elements * 4);
 }
 
 
@@ -107,7 +116,8 @@
 
   // Attempt to convert the elements.
   try {
-    if (UseSparseVariant(array, length, is_array)) {
+    if (UseSparseVariant(array, length, is_array, length)) {
+      %NormalizeElements(array);
       if (separator.length == 0) {
         return SparseJoin(array, length, convert);
       } else {
@@ -134,7 +144,7 @@
         elements[elements_length++] = e;
       }
       elements.length = elements_length;
-      var result = %_FastAsciiArrayJoin(elements, '');
+      var result = %_FastOneByteArrayJoin(elements, '');
       if (!IS_UNDEFINED(result)) return result;
       return %StringBuilderConcat(elements, elements_length, '');
     }
@@ -158,7 +168,7 @@
         elements[i] = e;
       }
     }
-    var result = %_FastAsciiArrayJoin(elements, separator);
+    var result = %_FastOneByteArrayJoin(elements, separator);
     if (!IS_UNDEFINED(result)) return result;
 
     return %StringBuilderJoin(elements, length, separator);
@@ -365,7 +375,7 @@
     separator = NonStringToString(separator);
   }
 
-  var result = %_FastAsciiArrayJoin(array, separator);
+  var result = %_FastOneByteArrayJoin(array, separator);
   if (!IS_UNDEFINED(result)) return result;
 
   return Join(array, length, separator, ConvertToString);
@@ -443,9 +453,7 @@
   var m = %_ArgumentsLength();
 
   for (var i = 0; i < m; i++) {
-    // Use SetProperty rather than a direct keyed store to ensure that the store
-    // site doesn't become poisened with an elements transition KeyedStoreIC.
-    %SetProperty(array, i+n, %_Arguments(i), 0, kStrictMode);
+    array[i+n] = %_Arguments(i);
   }
 
   var new_length = n + m;
@@ -520,13 +528,15 @@
   CHECK_OBJECT_COERCIBLE(this, "Array.prototype.reverse");
 
   var array = TO_OBJECT_INLINE(this);
-  var j = TO_UINT32(array.length) - 1;
+  var len = TO_UINT32(array.length);
 
-  if (UseSparseVariant(array, j, IS_ARRAY(array))) {
-    SparseReverse(array, j+1);
+  if (UseSparseVariant(array, len, IS_ARRAY(array), len)) {
+    %NormalizeElements(array);
+    SparseReverse(array, len);
     return array;
   }
 
+  var j = len - 1;
   for (var i = 0; i < j; i++, j--) {
     var current_i = array[i];
     if (!IS_UNDEFINED(current_i) || i in array) {
@@ -672,10 +682,9 @@
 
   if (end_i < start_i) return result;
 
-  if (IS_ARRAY(array) &&
-      !%IsObserved(array) &&
-      (end_i > 1000) &&
-      (%EstimateNumberOfElements(array) < end_i)) {
+  if (UseSparseVariant(array, len, IS_ARRAY(array), end_i - start_i)) {
+    %NormalizeElements(array);
+    %NormalizeElements(result);
     SmartSlice(array, start_i, end_i - start_i, len, result);
   } else {
     SimpleSlice(array, start_i, end_i - start_i, len, result);
@@ -783,24 +792,20 @@
                         ["Array.prototype.splice"]);
   }
 
-  var use_simple_splice = true;
-  if (IS_ARRAY(array) &&
-      num_elements_to_add !== del_count) {
-    // If we are only deleting/moving a few things near the end of the
-    // array then the simple version is going to be faster, because it
-    // doesn't touch most of the array.
-    var estimated_non_hole_elements = %EstimateNumberOfElements(array);
-    if (len > 20 && (estimated_non_hole_elements >> 2) < (len - start_i)) {
-      use_simple_splice = false;
-    }
+  var changed_elements = del_count;
+  if (num_elements_to_add != del_count) {
+    // If the slice needs to do a actually move elements after the insertion
+    // point, then include those in the estimate of changed elements.
+    changed_elements += len - start_i - del_count;
   }
-
-  if (use_simple_splice) {
-    SimpleSlice(array, start_i, del_count, len, deleted_elements);
-    SimpleMove(array, start_i, del_count, len, num_elements_to_add);
-  } else {
+  if (UseSparseVariant(array, len, IS_ARRAY(array), changed_elements)) {
+    %NormalizeElements(array);
+    %NormalizeElements(deleted_elements);
     SmartSlice(array, start_i, del_count, len, deleted_elements);
     SmartMove(array, start_i, del_count, len, num_elements_to_add);
+  } else {
+    SimpleSlice(array, start_i, del_count, len, deleted_elements);
+    SimpleMove(array, start_i, del_count, len, num_elements_to_add);
   }
 
   // Insert the arguments into the resulting array in
@@ -858,11 +863,12 @@
     var t_array = [];
     // Use both 'from' and 'to' to determine the pivot candidates.
     var increment = 200 + ((to - from) & 15);
-    for (var i = from + 1; i < to - 1; i += increment) {
-      t_array.push([i, a[i]]);
+    for (var i = from + 1, j = 0; i < to - 1; i += increment, j++) {
+      t_array[j] = [i, a[i]];
     }
-    t_array.sort(function(a, b) {
-        return %_CallFunction(receiver, a[1], b[1], comparefn) } );
+    %_CallFunction(t_array, function(a, b) {
+      return %_CallFunction(receiver, a[1], b[1], comparefn);
+    }, ArraySort);
     var third_index = t_array[t_array.length >> 1][0];
     return third_index;
   }
@@ -964,7 +970,7 @@
         // It's an interval.
         var proto_length = indices;
         for (var i = 0; i < proto_length; i++) {
-          if (!obj.hasOwnProperty(i) && proto.hasOwnProperty(i)) {
+          if (!HAS_OWN_PROPERTY(obj, i) && HAS_OWN_PROPERTY(proto, i)) {
             obj[i] = proto[i];
             if (i >= max) { max = i + 1; }
           }
@@ -972,8 +978,8 @@
       } else {
         for (var i = 0; i < indices.length; i++) {
           var index = indices[i];
-          if (!IS_UNDEFINED(index) &&
-              !obj.hasOwnProperty(index) && proto.hasOwnProperty(index)) {
+          if (!IS_UNDEFINED(index) && !HAS_OWN_PROPERTY(obj, index)
+              && HAS_OWN_PROPERTY(proto, index)) {
             obj[index] = proto[index];
             if (index >= max) { max = index + 1; }
           }
@@ -993,7 +999,7 @@
         // It's an interval.
         var proto_length = indices;
         for (var i = from; i < proto_length; i++) {
-          if (proto.hasOwnProperty(i)) {
+          if (HAS_OWN_PROPERTY(proto, i)) {
             obj[i] = UNDEFINED;
           }
         }
@@ -1001,7 +1007,7 @@
         for (var i = 0; i < indices.length; i++) {
           var index = indices[i];
           if (!IS_UNDEFINED(index) && from <= index &&
-              proto.hasOwnProperty(index)) {
+              HAS_OWN_PROPERTY(proto, index)) {
             obj[index] = UNDEFINED;
           }
         }
@@ -1024,14 +1030,14 @@
       }
       // Maintain the invariant num_holes = the number of holes in the original
       // array with indices <= first_undefined or > last_defined.
-      if (!obj.hasOwnProperty(first_undefined)) {
+      if (!HAS_OWN_PROPERTY(obj, first_undefined)) {
         num_holes++;
       }
 
       // Find last defined element.
       while (first_undefined < last_defined &&
              IS_UNDEFINED(obj[last_defined])) {
-        if (!obj.hasOwnProperty(last_defined)) {
+        if (!HAS_OWN_PROPERTY(obj, last_defined)) {
           num_holes++;
         }
         last_defined--;
@@ -1128,7 +1134,7 @@
   var result = new $Array();
   var accumulator = new InternalArray();
   var accumulator_length = 0;
-  var stepping = %_DebugCallbackSupportsStepping(f);
+  var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(f);
   for (var i = 0; i < length; i++) {
     if (i in array) {
       var element = array[i];
@@ -1161,7 +1167,7 @@
     receiver = ToObject(receiver);
   }
 
-  var stepping = %_DebugCallbackSupportsStepping(f);
+  var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(f);
   for (var i = 0; i < length; i++) {
     if (i in array) {
       var element = array[i];
@@ -1192,7 +1198,7 @@
     receiver = ToObject(receiver);
   }
 
-  var stepping = %_DebugCallbackSupportsStepping(f);
+  var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(f);
   for (var i = 0; i < length; i++) {
     if (i in array) {
       var element = array[i];
@@ -1222,7 +1228,7 @@
     receiver = ToObject(receiver);
   }
 
-  var stepping = %_DebugCallbackSupportsStepping(f);
+  var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(f);
   for (var i = 0; i < length; i++) {
     if (i in array) {
       var element = array[i];
@@ -1253,7 +1259,7 @@
 
   var result = new $Array();
   var accumulator = new InternalArray(length);
-  var stepping = %_DebugCallbackSupportsStepping(f);
+  var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(f);
   for (var i = 0; i < length; i++) {
     if (i in array) {
       var element = array[i];
@@ -1285,7 +1291,8 @@
   }
   var min = index;
   var max = length;
-  if (UseSparseVariant(this, length, IS_ARRAY(this))) {
+  if (UseSparseVariant(this, length, IS_ARRAY(this), max - min)) {
+    %NormalizeElements(this);
     var indices = %GetArrayKeys(this, length);
     if (IS_NUMBER(indices)) {
       // It's an interval.
@@ -1340,7 +1347,8 @@
   }
   var min = 0;
   var max = index;
-  if (UseSparseVariant(this, length, IS_ARRAY(this))) {
+  if (UseSparseVariant(this, length, IS_ARRAY(this), index)) {
+    %NormalizeElements(this);
     var indices = %GetArrayKeys(this, index + 1);
     if (IS_NUMBER(indices)) {
       // It's an interval.
@@ -1400,7 +1408,7 @@
   }
 
   var receiver = %GetDefaultReceiver(callback);
-  var stepping = %_DebugCallbackSupportsStepping(callback);
+  var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(callback);
   for (; i < length; i++) {
     if (i in array) {
       var element = array[i];
@@ -1437,7 +1445,7 @@
   }
 
   var receiver = %GetDefaultReceiver(callback);
-  var stepping = %_DebugCallbackSupportsStepping(callback);
+  var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(callback);
   for (; i >= 0; i--) {
     if (i in array) {
       var element = array[i];
@@ -1462,7 +1470,20 @@
 
   // Set up non-enumerable constructor property on the Array.prototype
   // object.
-  %SetProperty($Array.prototype, "constructor", $Array, DONT_ENUM);
+  %AddNamedProperty($Array.prototype, "constructor", $Array, DONT_ENUM);
+
+  // Set up unscopable properties on the Array.prototype object.
+  var unscopables = {
+    __proto__: null,
+    copyWithin: true,
+    entries: true,
+    fill: true,
+    find: true,
+    findIndex: true,
+    keys: true,
+  };
+  %AddNamedProperty($Array.prototype, symbolUnscopables, unscopables,
+      DONT_ENUM | READ_ONLY);
 
   // Set up non-enumerable functions on the Array object.
   InstallFunctions($Array, DONT_ENUM, $Array(
diff --git a/src/arraybuffer.js b/src/arraybuffer.js
index d1324bb..e1c887f 100644
--- a/src/arraybuffer.js
+++ b/src/arraybuffer.js
@@ -74,7 +74,8 @@
   %FunctionSetPrototype($ArrayBuffer, new $Object());
 
   // Set up the constructor property on the ArrayBuffer prototype object.
-  %SetProperty($ArrayBuffer.prototype, "constructor", $ArrayBuffer, DONT_ENUM);
+  %AddNamedProperty(
+      $ArrayBuffer.prototype, "constructor", $ArrayBuffer, DONT_ENUM);
 
   InstallGetter($ArrayBuffer.prototype, "byteLength", ArrayBufferGetByteLen);
 
diff --git a/src/assembler.cc b/src/assembler.cc
index e8fa728..a705300 100644
--- a/src/assembler.cc
+++ b/src/assembler.cc
@@ -36,38 +36,40 @@
 
 #include <cmath>
 #include "src/api.h"
+#include "src/base/cpu.h"
 #include "src/base/lazy-instance.h"
+#include "src/base/platform/platform.h"
 #include "src/builtins.h"
+#include "src/codegen.h"
 #include "src/counters.h"
-#include "src/cpu.h"
 #include "src/cpu-profiler.h"
 #include "src/debug.h"
 #include "src/deoptimizer.h"
 #include "src/execution.h"
-#include "src/ic.h"
+#include "src/ic/ic.h"
+#include "src/ic/stub-cache.h"
 #include "src/isolate-inl.h"
 #include "src/jsregexp.h"
-#include "src/platform.h"
 #include "src/regexp-macro-assembler.h"
 #include "src/regexp-stack.h"
 #include "src/runtime.h"
 #include "src/serialize.h"
-#include "src/store-buffer-inl.h"
-#include "src/stub-cache.h"
 #include "src/token.h"
 
 #if V8_TARGET_ARCH_IA32
-#include "src/ia32/assembler-ia32-inl.h"
+#include "src/ia32/assembler-ia32-inl.h"  // NOLINT
 #elif V8_TARGET_ARCH_X64
-#include "src/x64/assembler-x64-inl.h"
+#include "src/x64/assembler-x64-inl.h"  // NOLINT
 #elif V8_TARGET_ARCH_ARM64
-#include "src/arm64/assembler-arm64-inl.h"
+#include "src/arm64/assembler-arm64-inl.h"  // NOLINT
 #elif V8_TARGET_ARCH_ARM
-#include "src/arm/assembler-arm-inl.h"
+#include "src/arm/assembler-arm-inl.h"  // NOLINT
 #elif V8_TARGET_ARCH_MIPS
-#include "src/mips/assembler-mips-inl.h"
+#include "src/mips/assembler-mips-inl.h"  // NOLINT
+#elif V8_TARGET_ARCH_MIPS64
+#include "src/mips64/assembler-mips64-inl.h"  // NOLINT
 #elif V8_TARGET_ARCH_X87
-#include "src/x87/assembler-x87-inl.h"
+#include "src/x87/assembler-x87-inl.h"  // NOLINT
 #else
 #error "Unknown architecture."
 #endif
@@ -75,17 +77,19 @@
 // Include native regexp-macro-assembler.
 #ifndef V8_INTERPRETED_REGEXP
 #if V8_TARGET_ARCH_IA32
-#include "src/ia32/regexp-macro-assembler-ia32.h"
+#include "src/ia32/regexp-macro-assembler-ia32.h"  // NOLINT
 #elif V8_TARGET_ARCH_X64
-#include "src/x64/regexp-macro-assembler-x64.h"
+#include "src/x64/regexp-macro-assembler-x64.h"  // NOLINT
 #elif V8_TARGET_ARCH_ARM64
-#include "src/arm64/regexp-macro-assembler-arm64.h"
+#include "src/arm64/regexp-macro-assembler-arm64.h"  // NOLINT
 #elif V8_TARGET_ARCH_ARM
-#include "src/arm/regexp-macro-assembler-arm.h"
+#include "src/arm/regexp-macro-assembler-arm.h"  // NOLINT
 #elif V8_TARGET_ARCH_MIPS
-#include "src/mips/regexp-macro-assembler-mips.h"
+#include "src/mips/regexp-macro-assembler-mips.h"  // NOLINT
+#elif V8_TARGET_ARCH_MIPS64
+#include "src/mips64/regexp-macro-assembler-mips64.h"  // NOLINT
 #elif V8_TARGET_ARCH_X87
-#include "src/x87/regexp-macro-assembler-x87.h"
+#include "src/x87/regexp-macro-assembler-x87.h"  // NOLINT
 #else  // Unknown architecture.
 #error "Unknown architecture."
 #endif  // Target architecture.
@@ -98,16 +102,13 @@
 // Common double constants.
 
 struct DoubleConstant BASE_EMBEDDED {
-  double min_int;
-  double one_half;
-  double minus_one_half;
-  double minus_zero;
-  double zero;
-  double uint8_max_value;
-  double negative_infinity;
-  double canonical_non_hole_nan;
-  double the_hole_nan;
-  double uint32_bias;
+double min_int;
+double one_half;
+double minus_one_half;
+double negative_infinity;
+double canonical_non_hole_nan;
+double the_hole_nan;
+double uint32_bias;
 };
 
 static DoubleConstant double_constants;
@@ -115,7 +116,7 @@
 const char* const RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING";
 
 static bool math_exp_data_initialized = false;
-static Mutex* math_exp_data_mutex = NULL;
+static base::Mutex* math_exp_data_mutex = NULL;
 static double* math_exp_constants_array = NULL;
 static double* math_exp_log_table_array = NULL;
 
@@ -133,22 +134,10 @@
   if (FLAG_mask_constants_with_cookie && isolate != NULL)  {
     jit_cookie_ = isolate->random_number_generator()->NextInt();
   }
-  if (buffer == NULL) {
-    // Do our own buffer management.
-    if (buffer_size <= kMinimalBufferSize) {
-      buffer_size = kMinimalBufferSize;
-      if (isolate->assembler_spare_buffer() != NULL) {
-        buffer = isolate->assembler_spare_buffer();
-        isolate->set_assembler_spare_buffer(NULL);
-      }
-    }
-    if (buffer == NULL) buffer = NewArray<byte>(buffer_size);
-    own_buffer_ = true;
-  } else {
-    // Use externally provided buffer instead.
-    ASSERT(buffer_size > 0);
-    own_buffer_ = false;
-  }
+  own_buffer_ = buffer == NULL;
+  if (buffer_size == 0) buffer_size = kMinimalBufferSize;
+  DCHECK(buffer_size > 0);
+  if (own_buffer_) buffer = NewArray<byte>(buffer_size);
   buffer_ = static_cast<byte*>(buffer);
   buffer_size_ = buffer_size;
 
@@ -157,15 +146,7 @@
 
 
 AssemblerBase::~AssemblerBase() {
-  if (own_buffer_) {
-    if (isolate() != NULL &&
-        isolate()->assembler_spare_buffer() == NULL &&
-        buffer_size_ == kMinimalBufferSize) {
-      isolate()->set_assembler_spare_buffer(buffer_);
-    } else {
-      DeleteArray(buffer_);
-    }
-  }
+  if (own_buffer_) DeleteArray(buffer_);
 }
 
 
@@ -197,7 +178,7 @@
 #ifdef DEBUG
 CpuFeatureScope::CpuFeatureScope(AssemblerBase* assembler, CpuFeature f)
     : assembler_(assembler) {
-  ASSERT(CpuFeatures::IsSupported(f));
+  DCHECK(CpuFeatures::IsSupported(f));
   old_enabled_ = assembler_->enabled_cpu_features();
   uint64_t mask = static_cast<uint64_t>(1) << f;
   // TODO(svenpanne) This special case below doesn't belong here!
@@ -354,7 +335,7 @@
   if (is_uintn(pc_delta, kSmallPCDeltaBits)) return pc_delta;
   WriteExtraTag(kPCJumpExtraTag, kVariableLengthPCJumpTopTag);
   uint32_t pc_jump = pc_delta >> kSmallPCDeltaBits;
-  ASSERT(pc_jump > 0);
+  DCHECK(pc_jump > 0);
   // Write kChunkBits size chunks of the pc_jump.
   for (; pc_jump > 0; pc_jump = pc_jump >> kChunkBits) {
     byte b = pc_jump & kChunkMask;
@@ -428,9 +409,9 @@
 #ifdef DEBUG
   byte* begin_pos = pos_;
 #endif
-  ASSERT(rinfo->rmode() < RelocInfo::NUMBER_OF_MODES);
-  ASSERT(rinfo->pc() - last_pc_ >= 0);
-  ASSERT(RelocInfo::LAST_STANDARD_NONCOMPACT_ENUM - RelocInfo::LAST_COMPACT_ENUM
+  DCHECK(rinfo->rmode() < RelocInfo::NUMBER_OF_MODES);
+  DCHECK(rinfo->pc() - last_pc_ >= 0);
+  DCHECK(RelocInfo::LAST_STANDARD_NONCOMPACT_ENUM - RelocInfo::LAST_COMPACT_ENUM
          <= kMaxStandardNonCompactModes);
   // Use unsigned delta-encoding for pc.
   uint32_t pc_delta = static_cast<uint32_t>(rinfo->pc() - last_pc_);
@@ -441,10 +422,10 @@
     WriteTaggedPC(pc_delta, kEmbeddedObjectTag);
   } else if (rmode == RelocInfo::CODE_TARGET) {
     WriteTaggedPC(pc_delta, kCodeTargetTag);
-    ASSERT(begin_pos - pos_ <= RelocInfo::kMaxCallSize);
+    DCHECK(begin_pos - pos_ <= RelocInfo::kMaxCallSize);
   } else if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
     // Use signed delta-encoding for id.
-    ASSERT(static_cast<int>(rinfo->data()) == rinfo->data());
+    DCHECK(static_cast<int>(rinfo->data()) == rinfo->data());
     int id_delta = static_cast<int>(rinfo->data()) - last_id_;
     // Check if delta is small enough to fit in a tagged byte.
     if (is_intn(id_delta, kSmallDataBits)) {
@@ -458,7 +439,7 @@
     last_id_ = static_cast<int>(rinfo->data());
   } else if (RelocInfo::IsPosition(rmode)) {
     // Use signed delta-encoding for position.
-    ASSERT(static_cast<int>(rinfo->data()) == rinfo->data());
+    DCHECK(static_cast<int>(rinfo->data()) == rinfo->data());
     int pos_delta = static_cast<int>(rinfo->data()) - last_position_;
     int pos_type_tag = (rmode == RelocInfo::POSITION) ? kNonstatementPositionTag
                                                       : kStatementPositionTag;
@@ -476,23 +457,23 @@
     // Comments are normally not generated, so we use the costly encoding.
     WriteExtraTaggedPC(pc_delta, kPCJumpExtraTag);
     WriteExtraTaggedData(rinfo->data(), kCommentTag);
-    ASSERT(begin_pos - pos_ >= RelocInfo::kMinRelocCommentSize);
+    DCHECK(begin_pos - pos_ >= RelocInfo::kMinRelocCommentSize);
   } else if (RelocInfo::IsConstPool(rmode) || RelocInfo::IsVeneerPool(rmode)) {
       WriteExtraTaggedPC(pc_delta, kPCJumpExtraTag);
       WriteExtraTaggedPoolData(static_cast<int>(rinfo->data()),
                                RelocInfo::IsConstPool(rmode) ? kConstPoolTag
                                                              : kVeneerPoolTag);
   } else {
-    ASSERT(rmode > RelocInfo::LAST_COMPACT_ENUM);
+    DCHECK(rmode > RelocInfo::LAST_COMPACT_ENUM);
     int saved_mode = rmode - RelocInfo::LAST_COMPACT_ENUM;
     // For all other modes we simply use the mode as the extra tag.
     // None of these modes need a data component.
-    ASSERT(saved_mode < kPCJumpExtraTag && saved_mode < kDataJumpExtraTag);
+    DCHECK(saved_mode < kPCJumpExtraTag && saved_mode < kDataJumpExtraTag);
     WriteExtraTaggedPC(pc_delta, saved_mode);
   }
   last_pc_ = rinfo->pc();
 #ifdef DEBUG
-  ASSERT(begin_pos - pos_ <= kMaxSize);
+  DCHECK(begin_pos - pos_ <= kMaxSize);
 #endif
 }
 
@@ -598,7 +579,7 @@
 
 
 static inline RelocInfo::Mode GetPositionModeFromTag(int tag) {
-  ASSERT(tag == kNonstatementPositionTag ||
+  DCHECK(tag == kNonstatementPositionTag ||
          tag == kStatementPositionTag);
   return (tag == kNonstatementPositionTag) ?
          RelocInfo::POSITION :
@@ -607,7 +588,7 @@
 
 
 void RelocIterator::next() {
-  ASSERT(!done());
+  DCHECK(!done());
   // Basically, do the opposite of RelocInfoWriter::Write.
   // Reading of data is as far as possible avoided for unwanted modes,
   // but we must always update the pc.
@@ -633,7 +614,7 @@
       } else {
         // Compact encoding is never used for comments,
         // so it must be a position.
-        ASSERT(locatable_tag == kNonstatementPositionTag ||
+        DCHECK(locatable_tag == kNonstatementPositionTag ||
                locatable_tag == kStatementPositionTag);
         if (mode_mask_ & RelocInfo::kPositionMask) {
           ReadTaggedPosition();
@@ -641,7 +622,7 @@
         }
       }
     } else {
-      ASSERT(tag == kDefaultTag);
+      DCHECK(tag == kDefaultTag);
       int extra_tag = GetExtraTag();
       if (extra_tag == kPCJumpExtraTag) {
         if (GetTopTag() == kVariableLengthPCJumpTopTag) {
@@ -658,7 +639,7 @@
           }
           Advance(kIntSize);
         } else if (locatable_tag != kCommentTag) {
-          ASSERT(locatable_tag == kNonstatementPositionTag ||
+          DCHECK(locatable_tag == kNonstatementPositionTag ||
                  locatable_tag == kStatementPositionTag);
           if (mode_mask_ & RelocInfo::kPositionMask) {
             AdvanceReadPosition();
@@ -667,7 +648,7 @@
             Advance(kIntSize);
           }
         } else {
-          ASSERT(locatable_tag == kCommentTag);
+          DCHECK(locatable_tag == kCommentTag);
           if (SetMode(RelocInfo::COMMENT)) {
             AdvanceReadData();
             return;
@@ -676,7 +657,7 @@
         }
       } else if (extra_tag == kPoolExtraTag) {
         int pool_type = GetTopTag();
-        ASSERT(pool_type == kConstPoolTag || pool_type == kVeneerPoolTag);
+        DCHECK(pool_type == kConstPoolTag || pool_type == kVeneerPoolTag);
         RelocInfo::Mode rmode = (pool_type == kConstPoolTag) ?
           RelocInfo::CONST_POOL : RelocInfo::VENEER_POOL;
         if (SetMode(rmode)) {
@@ -813,39 +794,36 @@
 }
 
 
-void RelocInfo::Print(Isolate* isolate, FILE* out) {
-  PrintF(out, "%p  %s", pc_, RelocModeName(rmode_));
+void RelocInfo::Print(Isolate* isolate, OStream& os) {  // NOLINT
+  os << pc_ << "  " << RelocModeName(rmode_);
   if (IsComment(rmode_)) {
-    PrintF(out, "  (%s)", reinterpret_cast<char*>(data_));
+    os << "  (" << reinterpret_cast<char*>(data_) << ")";
   } else if (rmode_ == EMBEDDED_OBJECT) {
-    PrintF(out, "  (");
-    target_object()->ShortPrint(out);
-    PrintF(out, ")");
+    os << "  (" << Brief(target_object()) << ")";
   } else if (rmode_ == EXTERNAL_REFERENCE) {
     ExternalReferenceEncoder ref_encoder(isolate);
-    PrintF(out, " (%s)  (%p)",
-           ref_encoder.NameOfAddress(target_reference()),
-           target_reference());
+    os << " (" << ref_encoder.NameOfAddress(target_reference()) << ")  ("
+       << target_reference() << ")";
   } else if (IsCodeTarget(rmode_)) {
     Code* code = Code::GetCodeFromTargetAddress(target_address());
-    PrintF(out, " (%s)  (%p)", Code::Kind2String(code->kind()),
-           target_address());
+    os << " (" << Code::Kind2String(code->kind()) << ")  (" << target_address()
+       << ")";
     if (rmode_ == CODE_TARGET_WITH_ID) {
-      PrintF(out, " (id=%d)", static_cast<int>(data_));
+      os << " (id=" << static_cast<int>(data_) << ")";
     }
   } else if (IsPosition(rmode_)) {
-    PrintF(out, "  (%" V8_PTR_PREFIX "d)", data());
+    os << "  (" << data() << ")";
   } else if (IsRuntimeEntry(rmode_) &&
              isolate->deoptimizer_data() != NULL) {
     // Depotimization bailouts are stored as runtime entries.
     int id = Deoptimizer::GetDeoptimizationId(
         isolate, target_address(), Deoptimizer::EAGER);
     if (id != Deoptimizer::kNotDeoptimizationEntry) {
-      PrintF(out, "  (deoptimization bailout %d)", id);
+      os << "  (deoptimization bailout " << id << ")";
     }
   }
 
-  PrintF(out, "\n");
+  os << "\n";
 }
 #endif  // ENABLE_DISASSEMBLER
 
@@ -890,7 +868,7 @@
       UNREACHABLE();
       break;
     case CODE_AGE_SEQUENCE:
-      ASSERT(Code::IsYoungSequence(isolate, pc_) || code_age_stub()->IsCode());
+      DCHECK(Code::IsYoungSequence(isolate, pc_) || code_age_stub()->IsCode());
       break;
   }
 }
@@ -904,16 +882,13 @@
   double_constants.min_int = kMinInt;
   double_constants.one_half = 0.5;
   double_constants.minus_one_half = -0.5;
-  double_constants.minus_zero = -0.0;
-  double_constants.uint8_max_value = 255;
-  double_constants.zero = 0.0;
-  double_constants.canonical_non_hole_nan = OS::nan_value();
-  double_constants.the_hole_nan = BitCast<double>(kHoleNanInt64);
+  double_constants.canonical_non_hole_nan = base::OS::nan_value();
+  double_constants.the_hole_nan = bit_cast<double>(kHoleNanInt64);
   double_constants.negative_infinity = -V8_INFINITY;
   double_constants.uint32_bias =
     static_cast<double>(static_cast<uint32_t>(0xFFFFFFFF)) + 1;
 
-  math_exp_data_mutex = new Mutex();
+  math_exp_data_mutex = new base::Mutex();
 }
 
 
@@ -921,7 +896,7 @@
   // Early return?
   if (math_exp_data_initialized) return;
 
-  LockGuard<Mutex> lock_guard(math_exp_data_mutex);
+  base::LockGuard<base::Mutex> lock_guard(math_exp_data_mutex);
   if (!math_exp_data_initialized) {
     // If this is changed, generated code must be adapted too.
     const int kTableSizeBits = 11;
@@ -949,9 +924,9 @@
     math_exp_log_table_array = new double[kTableSize];
     for (int i = 0; i < kTableSize; i++) {
       double value = std::pow(2, i / kTableSizeDouble);
-      uint64_t bits = BitCast<uint64_t, double>(value);
+      uint64_t bits = bit_cast<uint64_t, double>(value);
       bits &= (static_cast<uint64_t>(1) << 52) - 1;
-      double mantissa = BitCast<double, uint64_t>(bits);
+      double mantissa = bit_cast<double, uint64_t>(bits);
       math_exp_log_table_array[i] = mantissa;
     }
 
@@ -962,8 +937,11 @@
 
 void ExternalReference::TearDownMathExpData() {
   delete[] math_exp_constants_array;
+  math_exp_constants_array = NULL;
   delete[] math_exp_log_table_array;
+  math_exp_log_table_array = NULL;
   delete math_exp_data_mutex;
+  math_exp_data_mutex = NULL;
 }
 
 
@@ -1031,7 +1009,8 @@
 
 
 ExternalReference ExternalReference::flush_icache_function(Isolate* isolate) {
-  return ExternalReference(Redirect(isolate, FUNCTION_ADDR(CPU::FlushICache)));
+  return ExternalReference(
+      Redirect(isolate, FUNCTION_ADDR(CpuFeatures::FlushICache)));
 }
 
 
@@ -1163,13 +1142,6 @@
 }
 
 
-ExternalReference ExternalReference::heap_always_allocate_scope_depth(
-    Isolate* isolate) {
-  Heap* heap = isolate->heap();
-  return ExternalReference(heap->always_allocate_scope_depth_address());
-}
-
-
 ExternalReference ExternalReference::new_space_allocation_limit_address(
     Isolate* isolate) {
   return ExternalReference(isolate->heap()->NewSpaceAllocationLimitAddress());
@@ -1262,23 +1234,6 @@
 }
 
 
-ExternalReference ExternalReference::address_of_minus_zero() {
-  return ExternalReference(
-      reinterpret_cast<void*>(&double_constants.minus_zero));
-}
-
-
-ExternalReference ExternalReference::address_of_zero() {
-  return ExternalReference(reinterpret_cast<void*>(&double_constants.zero));
-}
-
-
-ExternalReference ExternalReference::address_of_uint8_max_value() {
-  return ExternalReference(
-      reinterpret_cast<void*>(&double_constants.uint8_max_value));
-}
-
-
 ExternalReference ExternalReference::address_of_negative_infinity() {
   return ExternalReference(
       reinterpret_cast<void*>(&double_constants.negative_infinity));
@@ -1342,6 +1297,8 @@
   function = FUNCTION_ADDR(RegExpMacroAssemblerARM::CheckStackGuardState);
 #elif V8_TARGET_ARCH_MIPS
   function = FUNCTION_ADDR(RegExpMacroAssemblerMIPS::CheckStackGuardState);
+#elif V8_TARGET_ARCH_MIPS64
+  function = FUNCTION_ADDR(RegExpMacroAssemblerMIPS::CheckStackGuardState);
 #elif V8_TARGET_ARCH_X87
   function = FUNCTION_ADDR(RegExpMacroAssemblerX87::CheckStackGuardState);
 #else
@@ -1399,14 +1356,14 @@
 
 
 ExternalReference ExternalReference::math_exp_constants(int constant_index) {
-  ASSERT(math_exp_data_initialized);
+  DCHECK(math_exp_data_initialized);
   return ExternalReference(
       reinterpret_cast<void*>(math_exp_constants_array + constant_index));
 }
 
 
 ExternalReference ExternalReference::math_exp_log_table() {
-  ASSERT(math_exp_data_initialized);
+  DCHECK(math_exp_data_initialized);
   return ExternalReference(reinterpret_cast<void*>(math_exp_log_table_array));
 }
 
@@ -1423,11 +1380,17 @@
 
 
 ExternalReference ExternalReference::cpu_features() {
-  ASSERT(CpuFeatures::initialized_);
+  DCHECK(CpuFeatures::initialized_);
   return ExternalReference(&CpuFeatures::supported_);
 }
 
 
+ExternalReference ExternalReference::debug_is_active_address(
+    Isolate* isolate) {
+  return ExternalReference(isolate->debug()->is_active_address());
+}
+
+
 ExternalReference ExternalReference::debug_after_break_target_address(
     Isolate* isolate) {
   return ExternalReference(isolate->debug()->after_break_target_address());
@@ -1500,7 +1463,7 @@
   // The checks for special cases can be dropped in ia32 because it has already
   // been done in generated code before bailing out here.
   if (std::isnan(y) || ((x == 1 || x == -1) && std::isinf(y))) {
-    return OS::nan_value();
+    return base::OS::nan_value();
   }
   return std::pow(x, y);
 }
@@ -1523,7 +1486,7 @@
 
 
 bool EvalComparison(Token::Value op, double op1, double op2) {
-  ASSERT(Token::IsCompareOp(op));
+  DCHECK(Token::IsCompareOp(op));
   switch (op) {
     case Token::EQ:
     case Token::EQ_STRICT: return (op1 == op2);
@@ -1559,14 +1522,9 @@
 
 
 void PositionsRecorder::RecordPosition(int pos) {
-  ASSERT(pos != RelocInfo::kNoPosition);
-  ASSERT(pos >= 0);
+  DCHECK(pos != RelocInfo::kNoPosition);
+  DCHECK(pos >= 0);
   state_.current_position = pos;
-#ifdef ENABLE_GDB_JIT_INTERFACE
-  if (gdbjit_lineinfo_ != NULL) {
-    gdbjit_lineinfo_->SetPosition(assembler_->pc_offset(), pos, false);
-  }
-#endif
   LOG_CODE_EVENT(assembler_->isolate(),
                  CodeLinePosInfoAddPositionEvent(jit_handler_data_,
                                                  assembler_->pc_offset(),
@@ -1575,14 +1533,9 @@
 
 
 void PositionsRecorder::RecordStatementPosition(int pos) {
-  ASSERT(pos != RelocInfo::kNoPosition);
-  ASSERT(pos >= 0);
+  DCHECK(pos != RelocInfo::kNoPosition);
+  DCHECK(pos >= 0);
   state_.current_statement_position = pos;
-#ifdef ENABLE_GDB_JIT_INTERFACE
-  if (gdbjit_lineinfo_ != NULL) {
-    gdbjit_lineinfo_->SetPosition(assembler_->pc_offset(), pos, true);
-  }
-#endif
   LOG_CODE_EVENT(assembler_->isolate(),
                  CodeLinePosInfoAddStatementPositionEvent(
                      jit_handler_data_,
@@ -1618,38 +1571,4 @@
   return written;
 }
 
-
-MultiplierAndShift::MultiplierAndShift(int32_t d) {
-  ASSERT(d <= -2 || 2 <= d);
-  const uint32_t two31 = 0x80000000;
-  uint32_t ad = Abs(d);
-  uint32_t t = two31 + (uint32_t(d) >> 31);
-  uint32_t anc = t - 1 - t % ad;   // Absolute value of nc.
-  int32_t p = 31;                  // Init. p.
-  uint32_t q1 = two31 / anc;       // Init. q1 = 2**p/|nc|.
-  uint32_t r1 = two31 - q1 * anc;  // Init. r1 = rem(2**p, |nc|).
-  uint32_t q2 = two31 / ad;        // Init. q2 = 2**p/|d|.
-  uint32_t r2 = two31 - q2 * ad;   // Init. r2 = rem(2**p, |d|).
-  uint32_t delta;
-  do {
-    p++;
-    q1 *= 2;          // Update q1 = 2**p/|nc|.
-    r1 *= 2;          // Update r1 = rem(2**p, |nc|).
-    if (r1 >= anc) {  // Must be an unsigned comparison here.
-      q1++;
-      r1 = r1 - anc;
-    }
-    q2 *= 2;          // Update q2 = 2**p/|d|.
-    r2 *= 2;          // Update r2 = rem(2**p, |d|).
-    if (r2 >= ad) {   // Must be an unsigned comparison here.
-      q2++;
-      r2 = r2 - ad;
-    }
-    delta = ad - r2;
-  } while (q1 < delta || (q1 == delta && r1 == 0));
-  int32_t mul = static_cast<int32_t>(q2 + 1);
-  multiplier_ = (d < 0) ? -mul : mul;
-  shift_ = p - 32;
-}
-
 } }  // namespace v8::internal
diff --git a/src/assembler.h b/src/assembler.h
index bbca793..3d2f7d9 100644
--- a/src/assembler.h
+++ b/src/assembler.h
@@ -66,6 +66,7 @@
   void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
 
   bool serializer_enabled() const { return serializer_enabled_; }
+  void enable_serializer() { serializer_enabled_ = true; }
 
   bool predictable_code_size() const { return predictable_code_size_; }
   void set_predictable_code_size(bool value) { predictable_code_size_ = value; }
@@ -175,6 +176,11 @@
     ProbeImpl(cross_compile);
   }
 
+  static unsigned SupportedFeatures() {
+    Probe(false);
+    return supported_;
+  }
+
   static bool IsSupported(CpuFeature f) {
     return (supported_ & (1u << f)) != 0;
   }
@@ -182,13 +188,16 @@
   static inline bool SupportsCrankshaft();
 
   static inline unsigned cache_line_size() {
-    ASSERT(cache_line_size_ != 0);
+    DCHECK(cache_line_size_ != 0);
     return cache_line_size_;
   }
 
   static void PrintTarget();
   static void PrintFeatures();
 
+  // Flush instruction cache.
+  static void FlushICache(void* start, size_t size);
+
  private:
   // Platform-dependent implementation.
   static void ProbeImpl(bool cross_compile);
@@ -219,8 +228,8 @@
   }
 
   INLINE(~Label()) {
-    ASSERT(!is_linked());
-    ASSERT(!is_near_linked());
+    DCHECK(!is_linked());
+    DCHECK(!is_near_linked());
   }
 
   INLINE(void Unuse()) { pos_ = 0; }
@@ -250,15 +259,15 @@
 
   void bind_to(int pos)  {
     pos_ = -pos - 1;
-    ASSERT(is_bound());
+    DCHECK(is_bound());
   }
   void link_to(int pos, Distance distance = kFar) {
     if (distance == kNear) {
       near_link_pos_ = pos + 1;
-      ASSERT(is_near_linked());
+      DCHECK(is_near_linked());
     } else {
       pos_ = pos + 1;
-      ASSERT(is_linked());
+      DCHECK(is_linked());
     }
   }
 
@@ -380,7 +389,7 @@
         mode <= LAST_REAL_RELOC_MODE;
   }
   static inline bool IsPseudoRelocMode(Mode mode) {
-    ASSERT(!IsRealRelocMode(mode));
+    DCHECK(!IsRealRelocMode(mode));
     return mode >= FIRST_PSEUDO_RELOC_MODE &&
         mode <= LAST_PSEUDO_RELOC_MODE;
   }
@@ -450,9 +459,7 @@
   Mode rmode() const {  return rmode_; }
   intptr_t data() const { return data_; }
   double data64() const { return data64_; }
-  uint64_t raw_data64() {
-    return BitCast<uint64_t>(data64_);
-  }
+  uint64_t raw_data64() { return bit_cast<uint64_t>(data64_); }
   Code* host() const { return host_; }
   void set_host(Code* host) { host_ = host; }
 
@@ -571,7 +578,7 @@
 #ifdef ENABLE_DISASSEMBLER
   // Printing
   static const char* RelocModeName(Mode rmode);
-  void Print(Isolate* isolate, FILE* out);
+  void Print(Isolate* isolate, OStream& os);  // NOLINT
 #endif  // ENABLE_DISASSEMBLER
 #ifdef VERIFY_HEAP
   void Verify(Isolate* isolate);
@@ -677,7 +684,7 @@
 
   // Return pointer valid until next next().
   RelocInfo* rinfo() {
-    ASSERT(!done());
+    DCHECK(!done());
     return &rinfo_;
   }
 
@@ -765,12 +772,12 @@
     PROFILING_API_CALL,
 
     // Direct call to accessor getter callback.
-    // void f(Local<String> property, PropertyCallbackInfo& info)
+    // void f(Local<Name> property, PropertyCallbackInfo& info)
     DIRECT_GETTER_CALL,
 
     // Call to accessor getter callback via InvokeAccessorGetterCallback.
-    // void f(Local<String> property, PropertyCallbackInfo& info,
-    //     AccessorGetterCallback callback)
+    // void f(Local<Name> property, PropertyCallbackInfo& info,
+    //     AccessorNameGetterCallback callback)
     PROFILING_GETTER_CALL
   };
 
@@ -857,8 +864,6 @@
   // Static variable Heap::NewSpaceStart()
   static ExternalReference new_space_start(Isolate* isolate);
   static ExternalReference new_space_mask(Isolate* isolate);
-  static ExternalReference heap_always_allocate_scope_depth(Isolate* isolate);
-  static ExternalReference new_space_mark_bits(Isolate* isolate);
 
   // Write barrier.
   static ExternalReference store_buffer_top(Isolate* isolate);
@@ -892,9 +897,6 @@
   static ExternalReference address_of_min_int();
   static ExternalReference address_of_one_half();
   static ExternalReference address_of_minus_one_half();
-  static ExternalReference address_of_minus_zero();
-  static ExternalReference address_of_zero();
-  static ExternalReference address_of_uint8_max_value();
   static ExternalReference address_of_negative_infinity();
   static ExternalReference address_of_canonical_non_hole_nan();
   static ExternalReference address_of_the_hole_nan();
@@ -911,6 +913,7 @@
 
   static ExternalReference cpu_features();
 
+  static ExternalReference debug_is_active_address(Isolate* isolate);
   static ExternalReference debug_after_break_target_address(Isolate* isolate);
   static ExternalReference debug_restarter_frame_function_pointer_address(
       Isolate* isolate);
@@ -949,7 +952,7 @@
   static void set_redirector(Isolate* isolate,
                              ExternalReferenceRedirector* redirector) {
     // We can't stack them.
-    ASSERT(isolate->external_reference_redirector() == NULL);
+    DCHECK(isolate->external_reference_redirector() == NULL);
     isolate->set_external_reference_redirector(
         reinterpret_cast<ExternalReferenceRedirectorPointer*>(redirector));
   }
@@ -969,17 +972,6 @@
       : address_(address) {}
 
   static void* Redirect(Isolate* isolate,
-                        void* address,
-                        Type type = ExternalReference::BUILTIN_CALL) {
-    ExternalReferenceRedirector* redirector =
-        reinterpret_cast<ExternalReferenceRedirector*>(
-            isolate->external_reference_redirector());
-    if (redirector == NULL) return address;
-    void* answer = (*redirector)(address, type);
-    return answer;
-  }
-
-  static void* Redirect(Isolate* isolate,
                         Address address_arg,
                         Type type = ExternalReference::BUILTIN_CALL) {
     ExternalReferenceRedirector* redirector =
@@ -1017,29 +1009,9 @@
  public:
   explicit PositionsRecorder(Assembler* assembler)
       : assembler_(assembler) {
-#ifdef ENABLE_GDB_JIT_INTERFACE
-    gdbjit_lineinfo_ = NULL;
-#endif
     jit_handler_data_ = NULL;
   }
 
-#ifdef ENABLE_GDB_JIT_INTERFACE
-  ~PositionsRecorder() {
-    delete gdbjit_lineinfo_;
-  }
-
-  void StartGDBJITLineInfoRecording() {
-    if (FLAG_gdbjit) {
-      gdbjit_lineinfo_ = new GDBJITLineInfo();
-    }
-  }
-
-  GDBJITLineInfo* DetachGDBJITLineInfo() {
-    GDBJITLineInfo* lineinfo = gdbjit_lineinfo_;
-    gdbjit_lineinfo_ = NULL;  // To prevent deallocation in destructor.
-    return lineinfo;
-  }
-#endif
   void AttachJITHandlerData(void* user_data) {
     jit_handler_data_ = user_data;
   }
@@ -1067,9 +1039,6 @@
  private:
   Assembler* assembler_;
   PositionState state_;
-#ifdef ENABLE_GDB_JIT_INTERFACE
-  GDBJITLineInfo* gdbjit_lineinfo_;
-#endif
 
   // Currently jit_handler_data_ is used to store JITHandler-specific data
   // over the lifetime of a PositionsRecorder
@@ -1139,20 +1108,6 @@
 };
 
 
-// The multiplier and shift for signed division via multiplication, see Warren's
-// "Hacker's Delight", chapter 10.
-class MultiplierAndShift {
- public:
-  explicit MultiplierAndShift(int32_t d);
-  int32_t multiplier() const { return multiplier_; }
-  int32_t shift() const { return shift_; }
-
- private:
-  int32_t multiplier_;
-  int32_t shift_;
-};
-
-
 } }  // namespace v8::internal
 
 #endif  // V8_ASSEMBLER_H_
diff --git a/src/assert-scope.h b/src/assert-scope.h
index 14e1194..7cfec56 100644
--- a/src/assert-scope.h
+++ b/src/assert-scope.h
@@ -6,7 +6,7 @@
 #define V8_ASSERT_SCOPE_H_
 
 #include "src/allocation.h"
-#include "src/platform.h"
+#include "src/base/platform/platform.h"
 #include "src/utils.h"
 
 namespace v8 {
@@ -28,7 +28,8 @@
   JAVASCRIPT_EXECUTION_ASSERT,
   JAVASCRIPT_EXECUTION_THROWS,
   ALLOCATION_FAILURE_ASSERT,
-  DEOPTIMIZATION_ASSERT
+  DEOPTIMIZATION_ASSERT,
+  COMPILATION_ASSERT
 };
 
 
@@ -73,7 +74,7 @@
   ~PerThreadAssertScopeBase() {
     if (!data_->decrement_level()) return;
     for (int i = 0; i < LAST_PER_THREAD_ASSERT_TYPE; i++) {
-      ASSERT(data_->get(static_cast<PerThreadAssertType>(i)));
+      DCHECK(data_->get(static_cast<PerThreadAssertType>(i)));
     }
     delete data_;
     SetThreadLocalData(NULL);
@@ -81,16 +82,16 @@
 
   static PerThreadAssertData* GetAssertData() {
     return reinterpret_cast<PerThreadAssertData*>(
-        Thread::GetThreadLocal(thread_local_key));
+        base::Thread::GetThreadLocal(thread_local_key));
   }
 
-  static Thread::LocalStorageKey thread_local_key;
+  static base::Thread::LocalStorageKey thread_local_key;
   PerThreadAssertData* data_;
   friend class Isolate;
 
  private:
   static void SetThreadLocalData(PerThreadAssertData* data) {
-    Thread::SetThreadLocal(thread_local_key, data);
+    base::Thread::SetThreadLocal(thread_local_key, data);
   }
 };
 
@@ -254,6 +255,13 @@
 typedef PerIsolateAssertScopeDebugOnly<DEOPTIMIZATION_ASSERT, true>
     AllowDeoptimization;
 
+// Scope to document where we do not expect deoptimization.
+typedef PerIsolateAssertScopeDebugOnly<COMPILATION_ASSERT, false>
+    DisallowCompilation;
+
+// Scope to introduce an exception to DisallowDeoptimization.
+typedef PerIsolateAssertScopeDebugOnly<COMPILATION_ASSERT, true>
+    AllowCompilation;
 } }  // namespace v8::internal
 
 #endif  // V8_ASSERT_SCOPE_H_
diff --git a/src/ast-value-factory.cc b/src/ast-value-factory.cc
new file mode 100644
index 0000000..ea8474f
--- /dev/null
+++ b/src/ast-value-factory.cc
@@ -0,0 +1,409 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "src/ast-value-factory.h"
+
+#include "src/api.h"
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+// For using StringToArrayIndex.
+class OneByteStringStream {
+ public:
+  explicit OneByteStringStream(Vector<const byte> lb) :
+      literal_bytes_(lb), pos_(0) {}
+
+  bool HasMore() { return pos_ < literal_bytes_.length(); }
+  uint16_t GetNext() { return literal_bytes_[pos_++]; }
+
+ private:
+  Vector<const byte> literal_bytes_;
+  int pos_;
+};
+
+}
+
+class AstRawStringInternalizationKey : public HashTableKey {
+ public:
+  explicit AstRawStringInternalizationKey(const AstRawString* string)
+      : string_(string) {}
+
+  virtual bool IsMatch(Object* other) OVERRIDE {
+    if (string_->is_one_byte_)
+      return String::cast(other)->IsOneByteEqualTo(string_->literal_bytes_);
+    return String::cast(other)->IsTwoByteEqualTo(
+        Vector<const uint16_t>::cast(string_->literal_bytes_));
+  }
+
+  virtual uint32_t Hash() OVERRIDE {
+    return string_->hash() >> Name::kHashShift;
+  }
+
+  virtual uint32_t HashForObject(Object* key) OVERRIDE {
+    return String::cast(key)->Hash();
+  }
+
+  virtual Handle<Object> AsHandle(Isolate* isolate) OVERRIDE {
+    if (string_->is_one_byte_)
+      return isolate->factory()->NewOneByteInternalizedString(
+          string_->literal_bytes_, string_->hash());
+    return isolate->factory()->NewTwoByteInternalizedString(
+        Vector<const uint16_t>::cast(string_->literal_bytes_), string_->hash());
+  }
+
+ private:
+  const AstRawString* string_;
+};
+
+
+void AstRawString::Internalize(Isolate* isolate) {
+  if (!string_.is_null()) return;
+  if (literal_bytes_.length() == 0) {
+    string_ = isolate->factory()->empty_string();
+  } else {
+    AstRawStringInternalizationKey key(this);
+    string_ = StringTable::LookupKey(isolate, &key);
+  }
+}
+
+
+bool AstRawString::AsArrayIndex(uint32_t* index) const {
+  if (!string_.is_null())
+    return string_->AsArrayIndex(index);
+  if (!is_one_byte_ || literal_bytes_.length() == 0 ||
+      literal_bytes_.length() > String::kMaxArrayIndexSize)
+    return false;
+  OneByteStringStream stream(literal_bytes_);
+  return StringToArrayIndex(&stream, index);
+}
+
+
+bool AstRawString::IsOneByteEqualTo(const char* data) const {
+  int length = static_cast<int>(strlen(data));
+  if (is_one_byte_ && literal_bytes_.length() == length) {
+    const char* token = reinterpret_cast<const char*>(literal_bytes_.start());
+    return !strncmp(token, data, length);
+  }
+  return false;
+}
+
+
+bool AstRawString::Compare(void* a, void* b) {
+  AstRawString* string1 = reinterpret_cast<AstRawString*>(a);
+  AstRawString* string2 = reinterpret_cast<AstRawString*>(b);
+  if (string1->is_one_byte_ != string2->is_one_byte_) return false;
+  if (string1->hash_ != string2->hash_) return false;
+  int length = string1->literal_bytes_.length();
+  if (string2->literal_bytes_.length() != length) return false;
+  return memcmp(string1->literal_bytes_.start(),
+                string2->literal_bytes_.start(), length) == 0;
+}
+
+
+void AstConsString::Internalize(Isolate* isolate) {
+  // AstRawStrings are internalized before AstConsStrings so left and right are
+  // already internalized.
+  string_ = isolate->factory()
+                ->NewConsString(left_->string(), right_->string())
+                .ToHandleChecked();
+}
+
+
+bool AstValue::IsPropertyName() const {
+  if (type_ == STRING) {
+    uint32_t index;
+    return !string_->AsArrayIndex(&index);
+  }
+  return false;
+}
+
+
+bool AstValue::BooleanValue() const {
+  switch (type_) {
+    case STRING:
+      DCHECK(string_ != NULL);
+      return !string_->IsEmpty();
+    case SYMBOL:
+      UNREACHABLE();
+      break;
+    case NUMBER:
+      return DoubleToBoolean(number_);
+    case SMI:
+      return smi_ != 0;
+    case STRING_ARRAY:
+      UNREACHABLE();
+      break;
+    case BOOLEAN:
+      return bool_;
+    case NULL_TYPE:
+      return false;
+    case THE_HOLE:
+      UNREACHABLE();
+      break;
+    case UNDEFINED:
+      return false;
+  }
+  UNREACHABLE();
+  return false;
+}
+
+
+void AstValue::Internalize(Isolate* isolate) {
+  switch (type_) {
+    case STRING:
+      DCHECK(string_ != NULL);
+      // Strings are already internalized.
+      DCHECK(!string_->string().is_null());
+      break;
+    case SYMBOL:
+      value_ = Object::GetProperty(
+                   isolate, handle(isolate->native_context()->builtins()),
+                   symbol_name_).ToHandleChecked();
+      break;
+    case NUMBER:
+      value_ = isolate->factory()->NewNumber(number_, TENURED);
+      break;
+    case SMI:
+      value_ = handle(Smi::FromInt(smi_), isolate);
+      break;
+    case BOOLEAN:
+      if (bool_) {
+        value_ = isolate->factory()->true_value();
+      } else {
+        value_ = isolate->factory()->false_value();
+      }
+      break;
+    case STRING_ARRAY: {
+      DCHECK(strings_ != NULL);
+      Factory* factory = isolate->factory();
+      int len = strings_->length();
+      Handle<FixedArray> elements = factory->NewFixedArray(len, TENURED);
+      for (int i = 0; i < len; i++) {
+        const AstRawString* string = (*strings_)[i];
+        Handle<Object> element = string->string();
+        // Strings are already internalized.
+        DCHECK(!element.is_null());
+        elements->set(i, *element);
+      }
+      value_ =
+          factory->NewJSArrayWithElements(elements, FAST_ELEMENTS, TENURED);
+      break;
+    }
+    case NULL_TYPE:
+      value_ = isolate->factory()->null_value();
+      break;
+    case THE_HOLE:
+      value_ = isolate->factory()->the_hole_value();
+      break;
+    case UNDEFINED:
+      value_ = isolate->factory()->undefined_value();
+      break;
+  }
+}
+
+
+const AstRawString* AstValueFactory::GetOneByteString(
+    Vector<const uint8_t> literal) {
+  uint32_t hash = StringHasher::HashSequentialString<uint8_t>(
+      literal.start(), literal.length(), hash_seed_);
+  return GetString(hash, true, literal);
+}
+
+
+const AstRawString* AstValueFactory::GetTwoByteString(
+    Vector<const uint16_t> literal) {
+  uint32_t hash = StringHasher::HashSequentialString<uint16_t>(
+      literal.start(), literal.length(), hash_seed_);
+  return GetString(hash, false, Vector<const byte>::cast(literal));
+}
+
+
+const AstRawString* AstValueFactory::GetString(Handle<String> literal) {
+  DisallowHeapAllocation no_gc;
+  String::FlatContent content = literal->GetFlatContent();
+  if (content.IsOneByte()) {
+    return GetOneByteString(content.ToOneByteVector());
+  }
+  DCHECK(content.IsTwoByte());
+  return GetTwoByteString(content.ToUC16Vector());
+}
+
+
+const AstConsString* AstValueFactory::NewConsString(
+    const AstString* left, const AstString* right) {
+  // This Vector will be valid as long as the Collector is alive (meaning that
+  // the AstRawString will not be moved).
+  AstConsString* new_string = new (zone_) AstConsString(left, right);
+  strings_.Add(new_string);
+  if (isolate_) {
+    new_string->Internalize(isolate_);
+  }
+  return new_string;
+}
+
+
+void AstValueFactory::Internalize(Isolate* isolate) {
+  if (isolate_) {
+    // Everything is already internalized.
+    return;
+  }
+  // Strings need to be internalized before values, because values refer to
+  // strings.
+  for (int i = 0; i < strings_.length(); ++i) {
+    strings_[i]->Internalize(isolate);
+  }
+  for (int i = 0; i < values_.length(); ++i) {
+    values_[i]->Internalize(isolate);
+  }
+  isolate_ = isolate;
+}
+
+
+const AstValue* AstValueFactory::NewString(const AstRawString* string) {
+  AstValue* value = new (zone_) AstValue(string);
+  DCHECK(string != NULL);
+  if (isolate_) {
+    value->Internalize(isolate_);
+  }
+  values_.Add(value);
+  return value;
+}
+
+
+const AstValue* AstValueFactory::NewSymbol(const char* name) {
+  AstValue* value = new (zone_) AstValue(name);
+  if (isolate_) {
+    value->Internalize(isolate_);
+  }
+  values_.Add(value);
+  return value;
+}
+
+
+const AstValue* AstValueFactory::NewNumber(double number) {
+  AstValue* value = new (zone_) AstValue(number);
+  if (isolate_) {
+    value->Internalize(isolate_);
+  }
+  values_.Add(value);
+  return value;
+}
+
+
+const AstValue* AstValueFactory::NewSmi(int number) {
+  AstValue* value =
+      new (zone_) AstValue(AstValue::SMI, number);
+  if (isolate_) {
+    value->Internalize(isolate_);
+  }
+  values_.Add(value);
+  return value;
+}
+
+
+const AstValue* AstValueFactory::NewBoolean(bool b) {
+  AstValue* value = new (zone_) AstValue(b);
+  if (isolate_) {
+    value->Internalize(isolate_);
+  }
+  values_.Add(value);
+  return value;
+}
+
+
+const AstValue* AstValueFactory::NewStringList(
+    ZoneList<const AstRawString*>* strings) {
+  AstValue* value = new (zone_) AstValue(strings);
+  if (isolate_) {
+    value->Internalize(isolate_);
+  }
+  values_.Add(value);
+  return value;
+}
+
+
+const AstValue* AstValueFactory::NewNull() {
+  AstValue* value = new (zone_) AstValue(AstValue::NULL_TYPE);
+  if (isolate_) {
+    value->Internalize(isolate_);
+  }
+  values_.Add(value);
+  return value;
+}
+
+
+const AstValue* AstValueFactory::NewUndefined() {
+  AstValue* value = new (zone_) AstValue(AstValue::UNDEFINED);
+  if (isolate_) {
+    value->Internalize(isolate_);
+  }
+  values_.Add(value);
+  return value;
+}
+
+
+const AstValue* AstValueFactory::NewTheHole() {
+  AstValue* value = new (zone_) AstValue(AstValue::THE_HOLE);
+  if (isolate_) {
+    value->Internalize(isolate_);
+  }
+  values_.Add(value);
+  return value;
+}
+
+
+const AstRawString* AstValueFactory::GetString(
+    uint32_t hash, bool is_one_byte, Vector<const byte> literal_bytes) {
+  // literal_bytes here points to whatever the user passed, and this is OK
+  // because we use vector_compare (which checks the contents) to compare
+  // against the AstRawStrings which are in the string_table_. We should not
+  // return this AstRawString.
+  AstRawString key(is_one_byte, literal_bytes, hash);
+  HashMap::Entry* entry = string_table_.Lookup(&key, hash, true);
+  if (entry->value == NULL) {
+    // Copy literal contents for later comparison.
+    int length = literal_bytes.length();
+    byte* new_literal_bytes = zone_->NewArray<byte>(length);
+    memcpy(new_literal_bytes, literal_bytes.start(), length);
+    AstRawString* new_string = new (zone_) AstRawString(
+        is_one_byte, Vector<const byte>(new_literal_bytes, length), hash);
+    entry->key = new_string;
+    strings_.Add(new_string);
+    if (isolate_) {
+      new_string->Internalize(isolate_);
+    }
+    entry->value = reinterpret_cast<void*>(1);
+  }
+  return reinterpret_cast<AstRawString*>(entry->key);
+}
+
+
+} }  // namespace v8::internal
diff --git a/src/ast-value-factory.h b/src/ast-value-factory.h
new file mode 100644
index 0000000..2f84163
--- /dev/null
+++ b/src/ast-value-factory.h
@@ -0,0 +1,345 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_AST_VALUE_FACTORY_H_
+#define V8_AST_VALUE_FACTORY_H_
+
+#include "src/api.h"
+#include "src/hashmap.h"
+#include "src/utils.h"
+
+// AstString, AstValue and AstValueFactory are for storing strings and values
+// independent of the V8 heap and internalizing them later. During parsing,
+// AstStrings and AstValues are created and stored outside the heap, in
+// AstValueFactory. After parsing, the strings and values are internalized
+// (moved into the V8 heap).
+namespace v8 {
+namespace internal {
+
+class AstString : public ZoneObject {
+ public:
+  virtual ~AstString() {}
+
+  virtual int length() const = 0;
+  bool IsEmpty() const { return length() == 0; }
+
+  // Puts the string into the V8 heap.
+  virtual void Internalize(Isolate* isolate) = 0;
+
+  // This function can be called after internalizing.
+  V8_INLINE Handle<String> string() const {
+    DCHECK(!string_.is_null());
+    return string_;
+  }
+
+ protected:
+  // This is null until the string is internalized.
+  Handle<String> string_;
+};
+
+
+class AstRawString : public AstString {
+ public:
+  virtual int length() const OVERRIDE {
+    if (is_one_byte_)
+      return literal_bytes_.length();
+    return literal_bytes_.length() / 2;
+  }
+
+  virtual void Internalize(Isolate* isolate) OVERRIDE;
+
+  bool AsArrayIndex(uint32_t* index) const;
+
+  // The string is not null-terminated, use length() to find out the length.
+  const unsigned char* raw_data() const {
+    return literal_bytes_.start();
+  }
+  bool is_one_byte() const { return is_one_byte_; }
+  bool IsOneByteEqualTo(const char* data) const;
+  uint16_t FirstCharacter() const {
+    if (is_one_byte_)
+      return literal_bytes_[0];
+    const uint16_t* c =
+        reinterpret_cast<const uint16_t*>(literal_bytes_.start());
+    return *c;
+  }
+
+  // For storing AstRawStrings in a hash map.
+  uint32_t hash() const {
+    return hash_;
+  }
+  static bool Compare(void* a, void* b);
+
+ private:
+  friend class AstValueFactory;
+  friend class AstRawStringInternalizationKey;
+
+  AstRawString(bool is_one_byte, const Vector<const byte>& literal_bytes,
+            uint32_t hash)
+      : is_one_byte_(is_one_byte), literal_bytes_(literal_bytes), hash_(hash) {}
+
+  AstRawString()
+      : is_one_byte_(true),
+        hash_(0) {}
+
+  bool is_one_byte_;
+
+  // Points to memory owned by Zone.
+  Vector<const byte> literal_bytes_;
+  uint32_t hash_;
+};
+
+
+class AstConsString : public AstString {
+ public:
+  AstConsString(const AstString* left, const AstString* right)
+      : left_(left),
+        right_(right) {}
+
+  virtual int length() const OVERRIDE {
+    return left_->length() + right_->length();
+  }
+
+  virtual void Internalize(Isolate* isolate) OVERRIDE;
+
+ private:
+  friend class AstValueFactory;
+
+  const AstString* left_;
+  const AstString* right_;
+};
+
+
+// AstValue is either a string, a number, a string array, a boolean, or a
+// special value (null, undefined, the hole).
+class AstValue : public ZoneObject {
+ public:
+  bool IsString() const {
+    return type_ == STRING;
+  }
+
+  bool IsNumber() const {
+    return type_ == NUMBER || type_ == SMI;
+  }
+
+  const AstRawString* AsString() const {
+    if (type_ == STRING)
+      return string_;
+    UNREACHABLE();
+    return 0;
+  }
+
+  double AsNumber() const {
+    if (type_ == NUMBER)
+      return number_;
+    if (type_ == SMI)
+      return smi_;
+    UNREACHABLE();
+    return 0;
+  }
+
+  bool EqualsString(const AstRawString* string) const {
+    return type_ == STRING && string_ == string;
+  }
+
+  bool IsPropertyName() const;
+
+  bool BooleanValue() const;
+
+  void Internalize(Isolate* isolate);
+
+  // Can be called after Internalize has been called.
+  V8_INLINE Handle<Object> value() const {
+    if (type_ == STRING) {
+      return string_->string();
+    }
+    DCHECK(!value_.is_null());
+    return value_;
+  }
+
+ private:
+  friend class AstValueFactory;
+
+  enum Type {
+    STRING,
+    SYMBOL,
+    NUMBER,
+    SMI,
+    BOOLEAN,
+    STRING_ARRAY,
+    NULL_TYPE,
+    UNDEFINED,
+    THE_HOLE
+  };
+
+  explicit AstValue(const AstRawString* s) : type_(STRING) { string_ = s; }
+
+  explicit AstValue(const char* name) : type_(SYMBOL) { symbol_name_ = name; }
+
+  explicit AstValue(double n) : type_(NUMBER) { number_ = n; }
+
+  AstValue(Type t, int i) : type_(t) {
+    DCHECK(type_ == SMI);
+    smi_ = i;
+  }
+
+  explicit AstValue(bool b) : type_(BOOLEAN) { bool_ = b; }
+
+  explicit AstValue(ZoneList<const AstRawString*>* s) : type_(STRING_ARRAY) {
+    strings_ = s;
+  }
+
+  explicit AstValue(Type t) : type_(t) {
+    DCHECK(t == NULL_TYPE || t == UNDEFINED || t == THE_HOLE);
+  }
+
+  Type type_;
+
+  // Uninternalized value.
+  union {
+    const AstRawString* string_;
+    double number_;
+    int smi_;
+    bool bool_;
+    ZoneList<const AstRawString*>* strings_;
+    const char* symbol_name_;
+  };
+
+  // Internalized value (empty before internalized).
+  Handle<Object> value_;
+};
+
+
+// For generating string constants.
+#define STRING_CONSTANTS(F)                           \
+  F(anonymous_function, "(anonymous function)")       \
+  F(arguments, "arguments")                           \
+  F(constructor, "constructor")                       \
+  F(done, "done")                                     \
+  F(dot, ".")                                         \
+  F(dot_for, ".for")                                  \
+  F(dot_generator, ".generator")                      \
+  F(dot_generator_object, ".generator_object")        \
+  F(dot_iterator, ".iterator")                        \
+  F(dot_module, ".module")                            \
+  F(dot_result, ".result")                            \
+  F(empty, "")                                        \
+  F(eval, "eval")                                     \
+  F(initialize_const_global, "initializeConstGlobal") \
+  F(initialize_var_global, "initializeVarGlobal")     \
+  F(make_reference_error, "MakeReferenceError")       \
+  F(make_syntax_error, "MakeSyntaxError")             \
+  F(make_type_error, "MakeTypeError")                 \
+  F(module, "module")                                 \
+  F(native, "native")                                 \
+  F(next, "next")                                     \
+  F(proto, "__proto__")                               \
+  F(prototype, "prototype")                           \
+  F(this, "this")                                     \
+  F(use_asm, "use asm")                               \
+  F(use_strict, "use strict")                         \
+  F(value, "value")
+
+
+class AstValueFactory {
+ public:
+  AstValueFactory(Zone* zone, uint32_t hash_seed)
+      : string_table_(AstRawString::Compare),
+        zone_(zone),
+        isolate_(NULL),
+        hash_seed_(hash_seed) {
+#define F(name, str) \
+    name##_string_ = NULL;
+    STRING_CONSTANTS(F)
+#undef F
+  }
+
+  const AstRawString* GetOneByteString(Vector<const uint8_t> literal);
+  const AstRawString* GetOneByteString(const char* string) {
+    return GetOneByteString(Vector<const uint8_t>(
+        reinterpret_cast<const uint8_t*>(string), StrLength(string)));
+  }
+  const AstRawString* GetTwoByteString(Vector<const uint16_t> literal);
+  const AstRawString* GetString(Handle<String> literal);
+  const AstConsString* NewConsString(const AstString* left,
+                                     const AstString* right);
+
+  void Internalize(Isolate* isolate);
+  bool IsInternalized() {
+    return isolate_ != NULL;
+  }
+
+#define F(name, str) \
+  const AstRawString* name##_string() { \
+    if (name##_string_ == NULL) { \
+      const char* data = str; \
+      name##_string_ = GetOneByteString( \
+          Vector<const uint8_t>(reinterpret_cast<const uint8_t*>(data), \
+                                static_cast<int>(strlen(data)))); \
+    } \
+    return name##_string_; \
+  }
+  STRING_CONSTANTS(F)
+#undef F
+
+  const AstValue* NewString(const AstRawString* string);
+  // A JavaScript symbol (ECMA-262 edition 6).
+  const AstValue* NewSymbol(const char* name);
+  const AstValue* NewNumber(double number);
+  const AstValue* NewSmi(int number);
+  const AstValue* NewBoolean(bool b);
+  const AstValue* NewStringList(ZoneList<const AstRawString*>* strings);
+  const AstValue* NewNull();
+  const AstValue* NewUndefined();
+  const AstValue* NewTheHole();
+
+ private:
+  const AstRawString* GetString(uint32_t hash, bool is_one_byte,
+                                Vector<const byte> literal_bytes);
+
+  // All strings are copied here, one after another (no NULLs inbetween).
+  HashMap string_table_;
+  // For keeping track of all AstValues and AstRawStrings we've created (so that
+  // they can be internalized later).
+  List<AstValue*> values_;
+  List<AstString*> strings_;
+  Zone* zone_;
+  Isolate* isolate_;
+
+  uint32_t hash_seed_;
+
+#define F(name, str) \
+  const AstRawString* name##_string_;
+  STRING_CONSTANTS(F)
+#undef F
+};
+
+} }  // namespace v8::internal
+
+#undef STRING_CONSTANTS
+
+#endif  // V8_AST_VALUE_FACTORY_H_
diff --git a/src/ast.cc b/src/ast.cc
index d332f4a..6816992 100644
--- a/src/ast.cc
+++ b/src/ast.cc
@@ -11,8 +11,8 @@
 #include "src/conversions.h"
 #include "src/hashmap.h"
 #include "src/parser.h"
-#include "src/property-details.h"
 #include "src/property.h"
+#include "src/property-details.h"
 #include "src/scopes.h"
 #include "src/string-stream.h"
 #include "src/type-info.h"
@@ -55,68 +55,59 @@
   // The global identifier "undefined" is immutable. Everything
   // else could be reassigned.
   return var != NULL && var->location() == Variable::UNALLOCATED &&
-         String::Equals(var_proxy->name(),
-                        isolate->factory()->undefined_string());
+         var_proxy->raw_name()->IsOneByteEqualTo("undefined");
 }
 
 
-VariableProxy::VariableProxy(Zone* zone, Variable* var, int position)
-    : Expression(zone, position),
-      name_(var->name()),
+VariableProxy::VariableProxy(Zone* zone, Variable* var, int position,
+                             IdGen* id_gen)
+    : Expression(zone, position, id_gen),
+      name_(var->raw_name()),
       var_(NULL),  // Will be set by the call to BindTo.
       is_this_(var->is_this()),
-      is_trivial_(false),
-      is_lvalue_(false),
-      interface_(var->interface()) {
+      is_assigned_(false),
+      interface_(var->interface()),
+      variable_feedback_slot_(kInvalidFeedbackSlot) {
   BindTo(var);
 }
 
 
-VariableProxy::VariableProxy(Zone* zone,
-                             Handle<String> name,
-                             bool is_this,
-                             Interface* interface,
-                             int position)
-    : Expression(zone, position),
+VariableProxy::VariableProxy(Zone* zone, const AstRawString* name, bool is_this,
+                             Interface* interface, int position, IdGen* id_gen)
+    : Expression(zone, position, id_gen),
       name_(name),
       var_(NULL),
       is_this_(is_this),
-      is_trivial_(false),
-      is_lvalue_(false),
-      interface_(interface) {
-  // Names must be canonicalized for fast equality checks.
-  ASSERT(name->IsInternalizedString());
-}
+      is_assigned_(false),
+      interface_(interface),
+      variable_feedback_slot_(kInvalidFeedbackSlot) {}
 
 
 void VariableProxy::BindTo(Variable* var) {
-  ASSERT(var_ == NULL);  // must be bound only once
-  ASSERT(var != NULL);  // must bind
-  ASSERT(!FLAG_harmony_modules || interface_->IsUnified(var->interface()));
-  ASSERT((is_this() && var->is_this()) || name_.is_identical_to(var->name()));
+  DCHECK(var_ == NULL);  // must be bound only once
+  DCHECK(var != NULL);  // must bind
+  DCHECK(!FLAG_harmony_modules || interface_->IsUnified(var->interface()));
+  DCHECK((is_this() && var->is_this()) || name_ == var->raw_name());
   // Ideally CONST-ness should match. However, this is very hard to achieve
   // because we don't know the exact semantics of conflicting (const and
   // non-const) multiple variable declarations, const vars introduced via
   // eval() etc.  Const-ness and variable declarations are a complete mess
   // in JS. Sigh...
   var_ = var;
-  var->set_is_used(true);
+  var->set_is_used();
 }
 
 
-Assignment::Assignment(Zone* zone,
-                       Token::Value op,
-                       Expression* target,
-                       Expression* value,
-                       int pos)
-    : Expression(zone, pos),
+Assignment::Assignment(Zone* zone, Token::Value op, Expression* target,
+                       Expression* value, int pos, IdGen* id_gen)
+    : Expression(zone, pos, id_gen),
       op_(op),
       target_(target),
       value_(value),
       binary_operation_(NULL),
-      assignment_id_(GetNextId(zone)),
+      assignment_id_(id_gen->GetNextId()),
       is_uninitialized_(false),
-      store_mode_(STANDARD_STORE) { }
+      store_mode_(STANDARD_STORE) {}
 
 
 Token::Value Assignment::binary_op() const {
@@ -180,15 +171,15 @@
 }
 
 
-ObjectLiteralProperty::ObjectLiteralProperty(
-    Zone* zone, Literal* key, Expression* value) {
+ObjectLiteralProperty::ObjectLiteralProperty(Zone* zone,
+                                             AstValueFactory* ast_value_factory,
+                                             Literal* key, Expression* value,
+                                             bool is_static) {
   emit_store_ = true;
   key_ = key;
   value_ = value;
-  Handle<Object> k = key->value();
-  if (k->IsInternalizedString() &&
-      String::Equals(Handle<String>::cast(k),
-                     zone->isolate()->factory()->proto_string())) {
+  is_static_ = is_static;
+  if (key->raw_value()->EqualsString(ast_value_factory->proto_string())) {
     kind_ = PROTOTYPE;
   } else if (value_->AsMaterializedLiteral() != NULL) {
     kind_ = MATERIALIZED_LITERAL;
@@ -200,11 +191,13 @@
 }
 
 
-ObjectLiteralProperty::ObjectLiteralProperty(
-    Zone* zone, bool is_getter, FunctionLiteral* value) {
+ObjectLiteralProperty::ObjectLiteralProperty(Zone* zone, bool is_getter,
+                                             FunctionLiteral* value,
+                                             bool is_static) {
   emit_store_ = true;
   value_ = value;
   kind_ = is_getter ? GETTER : SETTER;
+  is_static_ = is_static;
 }
 
 
@@ -407,8 +400,8 @@
   if (IsObjectLiteral()) {
     return AsObjectLiteral()->BuildConstantProperties(isolate);
   }
-  ASSERT(IsRegExpLiteral());
-  ASSERT(depth() >= 1);  // Depth should be initialized.
+  DCHECK(IsRegExpLiteral());
+  DCHECK(depth() >= 1);  // Depth should be initialized.
 }
 
 
@@ -595,18 +588,16 @@
 
 
 bool Call::ComputeGlobalTarget(Handle<GlobalObject> global,
-                               LookupResult* lookup) {
+                               LookupIterator* it) {
   target_ = Handle<JSFunction>::null();
   cell_ = Handle<Cell>::null();
-  ASSERT(lookup->IsFound() &&
-         lookup->type() == NORMAL &&
-         lookup->holder() == *global);
-  cell_ = Handle<Cell>(global->GetPropertyCell(lookup));
+  DCHECK(it->IsFound() && it->GetHolder<JSObject>().is_identical_to(global));
+  cell_ = it->GetPropertyCell();
   if (cell_->value()->IsJSFunction()) {
     Handle<JSFunction> candidate(JSFunction::cast(cell_->value()));
     // If the function is in new space we assume it's more likely to
     // change and thus prefer the general IC code.
-    if (!lookup->isolate()->heap()->InNewSpace(*candidate)) {
+    if (!it->isolate()->heap()->InNewSpace(*candidate)) {
       target_ = candidate;
       return true;
     }
@@ -804,53 +795,46 @@
 // in as many cases as possible, to make it more difficult for incorrect
 // parses to look as correct ones which is likely if the input and
 // output formats are alike.
-class RegExpUnparser V8_FINAL : public RegExpVisitor {
+class RegExpUnparser FINAL : public RegExpVisitor {
  public:
-  explicit RegExpUnparser(Zone* zone);
+  RegExpUnparser(OStream& os, Zone* zone) : os_(os), zone_(zone) {}
   void VisitCharacterRange(CharacterRange that);
-  SmartArrayPointer<const char> ToString() { return stream_.ToCString(); }
 #define MAKE_CASE(Name) virtual void* Visit##Name(RegExp##Name*,          \
-                                                  void* data) V8_OVERRIDE;
+                                                  void* data) OVERRIDE;
   FOR_EACH_REG_EXP_TREE_TYPE(MAKE_CASE)
 #undef MAKE_CASE
  private:
-  StringStream* stream() { return &stream_; }
-  HeapStringAllocator alloc_;
-  StringStream stream_;
+  OStream& os_;
   Zone* zone_;
 };
 
 
-RegExpUnparser::RegExpUnparser(Zone* zone) : stream_(&alloc_), zone_(zone) {
-}
-
-
 void* RegExpUnparser::VisitDisjunction(RegExpDisjunction* that, void* data) {
-  stream()->Add("(|");
+  os_ << "(|";
   for (int i = 0; i <  that->alternatives()->length(); i++) {
-    stream()->Add(" ");
+    os_ << " ";
     that->alternatives()->at(i)->Accept(this, data);
   }
-  stream()->Add(")");
+  os_ << ")";
   return NULL;
 }
 
 
 void* RegExpUnparser::VisitAlternative(RegExpAlternative* that, void* data) {
-  stream()->Add("(:");
+  os_ << "(:";
   for (int i = 0; i <  that->nodes()->length(); i++) {
-    stream()->Add(" ");
+    os_ << " ";
     that->nodes()->at(i)->Accept(this, data);
   }
-  stream()->Add(")");
+  os_ << ")";
   return NULL;
 }
 
 
 void RegExpUnparser::VisitCharacterRange(CharacterRange that) {
-  stream()->Add("%k", that.from());
+  os_ << AsUC16(that.from());
   if (!that.IsSingleton()) {
-    stream()->Add("-%k", that.to());
+    os_ << "-" << AsUC16(that.to());
   }
 }
 
@@ -858,14 +842,13 @@
 
 void* RegExpUnparser::VisitCharacterClass(RegExpCharacterClass* that,
                                           void* data) {
-  if (that->is_negated())
-    stream()->Add("^");
-  stream()->Add("[");
+  if (that->is_negated()) os_ << "^";
+  os_ << "[";
   for (int i = 0; i < that->ranges(zone_)->length(); i++) {
-    if (i > 0) stream()->Add(" ");
+    if (i > 0) os_ << " ";
     VisitCharacterRange(that->ranges(zone_)->at(i));
   }
-  stream()->Add("]");
+  os_ << "]";
   return NULL;
 }
 
@@ -873,22 +856,22 @@
 void* RegExpUnparser::VisitAssertion(RegExpAssertion* that, void* data) {
   switch (that->assertion_type()) {
     case RegExpAssertion::START_OF_INPUT:
-      stream()->Add("@^i");
+      os_ << "@^i";
       break;
     case RegExpAssertion::END_OF_INPUT:
-      stream()->Add("@$i");
+      os_ << "@$i";
       break;
     case RegExpAssertion::START_OF_LINE:
-      stream()->Add("@^l");
+      os_ << "@^l";
       break;
     case RegExpAssertion::END_OF_LINE:
-      stream()->Add("@$l");
+      os_ << "@$l";
        break;
     case RegExpAssertion::BOUNDARY:
-      stream()->Add("@b");
+      os_ << "@b";
       break;
     case RegExpAssertion::NON_BOUNDARY:
-      stream()->Add("@B");
+      os_ << "@B";
       break;
   }
   return NULL;
@@ -896,12 +879,12 @@
 
 
 void* RegExpUnparser::VisitAtom(RegExpAtom* that, void* data) {
-  stream()->Add("'");
+  os_ << "'";
   Vector<const uc16> chardata = that->data();
   for (int i = 0; i < chardata.length(); i++) {
-    stream()->Add("%k", chardata[i]);
+    os_ << AsUC16(chardata[i]);
   }
-  stream()->Add("'");
+  os_ << "'";
   return NULL;
 }
 
@@ -910,71 +893,70 @@
   if (that->elements()->length() == 1) {
     that->elements()->at(0).tree()->Accept(this, data);
   } else {
-    stream()->Add("(!");
+    os_ << "(!";
     for (int i = 0; i < that->elements()->length(); i++) {
-      stream()->Add(" ");
+      os_ << " ";
       that->elements()->at(i).tree()->Accept(this, data);
     }
-    stream()->Add(")");
+    os_ << ")";
   }
   return NULL;
 }
 
 
 void* RegExpUnparser::VisitQuantifier(RegExpQuantifier* that, void* data) {
-  stream()->Add("(# %i ", that->min());
+  os_ << "(# " << that->min() << " ";
   if (that->max() == RegExpTree::kInfinity) {
-    stream()->Add("- ");
+    os_ << "- ";
   } else {
-    stream()->Add("%i ", that->max());
+    os_ << that->max() << " ";
   }
-  stream()->Add(that->is_greedy() ? "g " : that->is_possessive() ? "p " : "n ");
+  os_ << (that->is_greedy() ? "g " : that->is_possessive() ? "p " : "n ");
   that->body()->Accept(this, data);
-  stream()->Add(")");
+  os_ << ")";
   return NULL;
 }
 
 
 void* RegExpUnparser::VisitCapture(RegExpCapture* that, void* data) {
-  stream()->Add("(^ ");
+  os_ << "(^ ";
   that->body()->Accept(this, data);
-  stream()->Add(")");
+  os_ << ")";
   return NULL;
 }
 
 
 void* RegExpUnparser::VisitLookahead(RegExpLookahead* that, void* data) {
-  stream()->Add("(-> ");
-  stream()->Add(that->is_positive() ? "+ " : "- ");
+  os_ << "(-> " << (that->is_positive() ? "+ " : "- ");
   that->body()->Accept(this, data);
-  stream()->Add(")");
+  os_ << ")";
   return NULL;
 }
 
 
 void* RegExpUnparser::VisitBackReference(RegExpBackReference* that,
                                          void* data) {
-  stream()->Add("(<- %i)", that->index());
+  os_ << "(<- " << that->index() << ")";
   return NULL;
 }
 
 
 void* RegExpUnparser::VisitEmpty(RegExpEmpty* that, void* data) {
-  stream()->Put('%');
+  os_ << '%';
   return NULL;
 }
 
 
-SmartArrayPointer<const char> RegExpTree::ToString(Zone* zone) {
-  RegExpUnparser unparser(zone);
+OStream& RegExpTree::Print(OStream& os, Zone* zone) {  // NOLINT
+  RegExpUnparser unparser(os, zone);
   Accept(&unparser, NULL);
-  return unparser.ToString();
+  return os;
 }
 
 
 RegExpDisjunction::RegExpDisjunction(ZoneList<RegExpTree*>* alternatives)
     : alternatives_(alternatives) {
-  ASSERT(alternatives->length() > 1);
+  DCHECK(alternatives->length() > 1);
   RegExpTree* first_alternative = alternatives->at(0);
   min_match_ = first_alternative->min_match();
   max_match_ = first_alternative->max_match();
@@ -996,7 +978,7 @@
 
 RegExpAlternative::RegExpAlternative(ZoneList<RegExpTree*>* nodes)
     : nodes_(nodes) {
-  ASSERT(nodes->length() > 1);
+  DCHECK(nodes->length() > 1);
   min_match_ = 0;
   max_match_ = 0;
   for (int i = 0; i < nodes->length(); i++) {
@@ -1009,53 +991,62 @@
 }
 
 
-CaseClause::CaseClause(Zone* zone,
-                       Expression* label,
-                       ZoneList<Statement*>* statements,
-                       int pos)
-    : Expression(zone, pos),
+CaseClause::CaseClause(Zone* zone, Expression* label,
+                       ZoneList<Statement*>* statements, int pos, IdGen* id_gen)
+    : Expression(zone, pos, id_gen),
       label_(label),
       statements_(statements),
       compare_type_(Type::None(zone)),
-      compare_id_(AstNode::GetNextId(zone)),
-      entry_id_(AstNode::GetNextId(zone)) {
-}
+      compare_id_(id_gen->GetNextId()),
+      entry_id_(id_gen->GetNextId()) {}
 
 
-#define REGULAR_NODE(NodeType) \
+#define REGULAR_NODE(NodeType)                                   \
   void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
-    increase_node_count(); \
+    increase_node_count();                                       \
   }
-#define REGULAR_NODE_WITH_FEEDBACK_SLOTS(NodeType) \
+#define REGULAR_NODE_WITH_FEEDBACK_SLOTS(NodeType)               \
   void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
-    increase_node_count(); \
-    add_slot_node(node); \
+    increase_node_count();                                       \
+    add_slot_node(node);                                         \
   }
-#define DONT_OPTIMIZE_NODE(NodeType) \
+#define DONT_OPTIMIZE_NODE(NodeType)                             \
   void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
-    increase_node_count(); \
-    set_dont_optimize_reason(k##NodeType); \
-    add_flag(kDontInline); \
-    add_flag(kDontSelfOptimize); \
+    increase_node_count();                                       \
+    set_dont_crankshaft_reason(k##NodeType);                     \
+    add_flag(kDontSelfOptimize);                                 \
   }
-#define DONT_SELFOPTIMIZE_NODE(NodeType) \
+#define DONT_OPTIMIZE_NODE_WITH_FEEDBACK_SLOTS(NodeType)         \
   void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
-    increase_node_count(); \
-    add_flag(kDontSelfOptimize); \
+    increase_node_count();                                       \
+    add_slot_node(node);                                         \
+    set_dont_crankshaft_reason(k##NodeType);                     \
+    add_flag(kDontSelfOptimize);                                 \
   }
-#define DONT_SELFOPTIMIZE_NODE_WITH_FEEDBACK_SLOTS(NodeType) \
+#define DONT_TURBOFAN_NODE(NodeType)                             \
   void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
-    increase_node_count(); \
-    add_slot_node(node); \
-    add_flag(kDontSelfOptimize); \
+    increase_node_count();                                       \
+    set_dont_crankshaft_reason(k##NodeType);                     \
+    set_dont_turbofan_reason(k##NodeType);                       \
+    add_flag(kDontSelfOptimize);                                 \
   }
-#define DONT_CACHE_NODE(NodeType) \
+#define DONT_SELFOPTIMIZE_NODE(NodeType)                         \
   void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
-    increase_node_count(); \
-    set_dont_optimize_reason(k##NodeType); \
-    add_flag(kDontInline); \
-    add_flag(kDontSelfOptimize); \
-    add_flag(kDontCache); \
+    increase_node_count();                                       \
+    add_flag(kDontSelfOptimize);                                 \
+  }
+#define DONT_SELFOPTIMIZE_NODE_WITH_FEEDBACK_SLOTS(NodeType)     \
+  void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
+    increase_node_count();                                       \
+    add_slot_node(node);                                         \
+    add_flag(kDontSelfOptimize);                                 \
+  }
+#define DONT_CACHE_NODE(NodeType)                                \
+  void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
+    increase_node_count();                                       \
+    set_dont_crankshaft_reason(k##NodeType);                     \
+    add_flag(kDontSelfOptimize);                                 \
+    add_flag(kDontCache);                                        \
   }
 
 REGULAR_NODE(VariableDeclaration)
@@ -1077,19 +1068,21 @@
 REGULAR_NODE(FunctionLiteral)
 REGULAR_NODE(Assignment)
 REGULAR_NODE(Throw)
-REGULAR_NODE(Property)
 REGULAR_NODE(UnaryOperation)
 REGULAR_NODE(CountOperation)
 REGULAR_NODE(BinaryOperation)
 REGULAR_NODE(CompareOperation)
 REGULAR_NODE(ThisFunction)
+
 REGULAR_NODE_WITH_FEEDBACK_SLOTS(Call)
 REGULAR_NODE_WITH_FEEDBACK_SLOTS(CallNew)
+REGULAR_NODE_WITH_FEEDBACK_SLOTS(Property)
 // In theory, for VariableProxy we'd have to add:
-// if (node->var()->IsLookupSlot()) add_flag(kDontInline);
+// if (node->var()->IsLookupSlot())
+//   set_dont_optimize_reason(kReferenceToAVariableWhichRequiresDynamicLookup);
 // But node->var() is usually not bound yet at VariableProxy creation time, and
 // LOOKUP variables only result from constructs that cannot be inlined anyway.
-REGULAR_NODE(VariableProxy)
+REGULAR_NODE_WITH_FEEDBACK_SLOTS(VariableProxy)
 
 // We currently do not optimize any modules.
 DONT_OPTIMIZE_NODE(ModuleDeclaration)
@@ -1099,36 +1092,34 @@
 DONT_OPTIMIZE_NODE(ModulePath)
 DONT_OPTIMIZE_NODE(ModuleUrl)
 DONT_OPTIMIZE_NODE(ModuleStatement)
-DONT_OPTIMIZE_NODE(Yield)
 DONT_OPTIMIZE_NODE(WithStatement)
-DONT_OPTIMIZE_NODE(TryCatchStatement)
-DONT_OPTIMIZE_NODE(TryFinallyStatement)
 DONT_OPTIMIZE_NODE(DebuggerStatement)
+DONT_OPTIMIZE_NODE(ClassLiteral)
 DONT_OPTIMIZE_NODE(NativeFunctionLiteral)
+DONT_OPTIMIZE_NODE(SuperReference)
+
+DONT_OPTIMIZE_NODE_WITH_FEEDBACK_SLOTS(Yield)
+
+// TODO(turbofan): Remove the dont_turbofan_reason once this list is empty.
+DONT_TURBOFAN_NODE(ForOfStatement)
+DONT_TURBOFAN_NODE(TryCatchStatement)
+DONT_TURBOFAN_NODE(TryFinallyStatement)
 
 DONT_SELFOPTIMIZE_NODE(DoWhileStatement)
 DONT_SELFOPTIMIZE_NODE(WhileStatement)
 DONT_SELFOPTIMIZE_NODE(ForStatement)
+
 DONT_SELFOPTIMIZE_NODE_WITH_FEEDBACK_SLOTS(ForInStatement)
-DONT_SELFOPTIMIZE_NODE(ForOfStatement)
 
 DONT_CACHE_NODE(ModuleLiteral)
 
 
 void AstConstructionVisitor::VisitCallRuntime(CallRuntime* node) {
   increase_node_count();
+  add_slot_node(node);
   if (node->is_jsruntime()) {
-    // Don't try to inline JS runtime calls because we don't (currently) even
-    // optimize them.
-    add_flag(kDontInline);
-  } else if (node->function()->intrinsic_type == Runtime::INLINE &&
-      (node->name()->IsOneByteEqualTo(
-          STATIC_ASCII_VECTOR("_ArgumentsLength")) ||
-       node->name()->IsOneByteEqualTo(STATIC_ASCII_VECTOR("_Arguments")))) {
-    // Don't inline the %_ArgumentsLength or %_Arguments because their
-    // implementation will not work.  There is no stack frame to get them
-    // from.
-    add_flag(kDontInline);
+    // Don't try to optimize JS runtime calls because we bailout on them.
+    set_dont_crankshaft_reason(kCallToAJavaScriptRuntimeFunction);
   }
 }
 
@@ -1139,17 +1130,17 @@
 
 
 Handle<String> Literal::ToString() {
-  if (value_->IsString()) return Handle<String>::cast(value_);
-  ASSERT(value_->IsNumber());
+  if (value_->IsString()) return value_->AsString()->string();
+  DCHECK(value_->IsNumber());
   char arr[100];
-  Vector<char> buffer(arr, ARRAY_SIZE(arr));
+  Vector<char> buffer(arr, arraysize(arr));
   const char* str;
-  if (value_->IsSmi()) {
+  if (value()->IsSmi()) {
     // Optimization only, the heap number case would subsume this.
-    SNPrintF(buffer, "%d", Smi::cast(*value_)->value());
+    SNPrintF(buffer, "%d", Smi::cast(*value())->value());
     str = arr;
   } else {
-    str = DoubleToCString(value_->Number(), buffer);
+    str = DoubleToCString(value()->Number(), buffer);
   }
   return isolate_->factory()->NewStringFromAsciiChecked(str);
 }
diff --git a/src/ast.h b/src/ast.h
index 3036fcc..03f43ad 100644
--- a/src/ast.h
+++ b/src/ast.h
@@ -8,8 +8,11 @@
 #include "src/v8.h"
 
 #include "src/assembler.h"
+#include "src/ast-value-factory.h"
+#include "src/bailout-reason.h"
 #include "src/factory.h"
 #include "src/feedback-slots.h"
+#include "src/interface.h"
 #include "src/isolate.h"
 #include "src/jsregexp.h"
 #include "src/list-inl.h"
@@ -20,7 +23,6 @@
 #include "src/types.h"
 #include "src/utils.h"
 #include "src/variables.h"
-#include "src/interface.h"
 #include "src/zone-inl.h"
 
 namespace v8 {
@@ -39,12 +41,12 @@
 // Nodes of the abstract syntax tree. Only concrete classes are
 // enumerated here.
 
-#define DECLARATION_NODE_LIST(V)                \
-  V(VariableDeclaration)                        \
-  V(FunctionDeclaration)                        \
-  V(ModuleDeclaration)                          \
-  V(ImportDeclaration)                          \
-  V(ExportDeclaration)                          \
+#define DECLARATION_NODE_LIST(V) \
+  V(VariableDeclaration)         \
+  V(FunctionDeclaration)         \
+  V(ModuleDeclaration)           \
+  V(ImportDeclaration)           \
+  V(ExportDeclaration)
 
 #define MODULE_NODE_LIST(V)                     \
   V(ModuleLiteral)                              \
@@ -72,27 +74,29 @@
   V(TryFinallyStatement)                        \
   V(DebuggerStatement)
 
-#define EXPRESSION_NODE_LIST(V)                 \
-  V(FunctionLiteral)                            \
-  V(NativeFunctionLiteral)                      \
-  V(Conditional)                                \
-  V(VariableProxy)                              \
-  V(Literal)                                    \
-  V(RegExpLiteral)                              \
-  V(ObjectLiteral)                              \
-  V(ArrayLiteral)                               \
-  V(Assignment)                                 \
-  V(Yield)                                      \
-  V(Throw)                                      \
-  V(Property)                                   \
-  V(Call)                                       \
-  V(CallNew)                                    \
-  V(CallRuntime)                                \
-  V(UnaryOperation)                             \
-  V(CountOperation)                             \
-  V(BinaryOperation)                            \
-  V(CompareOperation)                           \
-  V(ThisFunction)                               \
+#define EXPRESSION_NODE_LIST(V) \
+  V(FunctionLiteral)            \
+  V(ClassLiteral)               \
+  V(NativeFunctionLiteral)      \
+  V(Conditional)                \
+  V(VariableProxy)              \
+  V(Literal)                    \
+  V(RegExpLiteral)              \
+  V(ObjectLiteral)              \
+  V(ArrayLiteral)               \
+  V(Assignment)                 \
+  V(Yield)                      \
+  V(Throw)                      \
+  V(Property)                   \
+  V(Call)                       \
+  V(CallNew)                    \
+  V(CallRuntime)                \
+  V(UnaryOperation)             \
+  V(CountOperation)             \
+  V(BinaryOperation)            \
+  V(CompareOperation)           \
+  V(ThisFunction)               \
+  V(SuperReference)             \
   V(CaseClause)
 
 #define AST_NODE_LIST(V)                        \
@@ -111,6 +115,7 @@
 class Expression;
 class IterationStatement;
 class MaterializedLiteral;
+class OStream;
 class Statement;
 class TargetCollector;
 class TypeFeedbackOracle;
@@ -140,22 +145,21 @@
 
 
 #define DECLARE_NODE_TYPE(type)                                 \
-  virtual void Accept(AstVisitor* v) V8_OVERRIDE;                  \
-  virtual AstNode::NodeType node_type() const V8_FINAL V8_OVERRIDE {  \
+  virtual void Accept(AstVisitor* v) OVERRIDE;                  \
+  virtual AstNode::NodeType node_type() const FINAL OVERRIDE {  \
     return AstNode::k##type;                                    \
   }                                                             \
   template<class> friend class AstNodeFactory;
 
 
 enum AstPropertiesFlag {
-  kDontInline,
   kDontSelfOptimize,
   kDontSoftInline,
   kDontCache
 };
 
 
-class AstProperties V8_FINAL BASE_EMBEDDED {
+class AstProperties FINAL BASE_EMBEDDED {
  public:
   class Flags : public EnumSet<AstPropertiesFlag, int> {};
 
@@ -179,6 +183,22 @@
 
 class AstNode: public ZoneObject {
  public:
+  // For generating IDs for AstNodes.
+  class IdGen {
+   public:
+    explicit IdGen(int id = 0) : id_(id) {}
+
+    int GetNextId() { return ReserveIdRange(1); }
+    int ReserveIdRange(int n) {
+      int tmp = id_;
+      id_ += n;
+      return tmp;
+    }
+
+   private:
+    int id_;
+  };
+
 #define DECLARE_TYPE_ENUM(type) k##type,
   enum NodeType {
     AST_NODE_LIST(DECLARE_TYPE_ENUM)
@@ -215,16 +235,6 @@
   virtual MaterializedLiteral* AsMaterializedLiteral() { return NULL; }
 
  protected:
-  static int GetNextId(Zone* zone) {
-    return ReserveIdRange(zone, 1);
-  }
-
-  static int ReserveIdRange(Zone* zone, int n) {
-    int tmp = zone->isolate()->ast_node_id();
-    zone->isolate()->set_ast_node_id(tmp + n);
-    return tmp;
-  }
-
   // Some nodes re-use bailout IDs for type feedback.
   static TypeFeedbackId reuse(BailoutId id) {
     return TypeFeedbackId(id.ToInt());
@@ -251,7 +261,7 @@
 };
 
 
-class SmallMapList V8_FINAL {
+class SmallMapList FINAL {
  public:
   SmallMapList() {}
   SmallMapList(int capacity, Zone* zone) : list_(capacity, zone) {}
@@ -264,7 +274,7 @@
   int length() const { return list_.length(); }
 
   void AddMapIfMissing(Handle<Map> map, Zone* zone) {
-    if (!Map::CurrentMapForDeprecated(map).ToHandle(&map)) return;
+    if (!Map::TryUpdate(map).ToHandle(&map)) return;
     for (int i = 0; i < length(); ++i) {
       if (at(i).is_identical_to(map)) return;
     }
@@ -343,6 +353,11 @@
   Bounds bounds() const { return bounds_; }
   void set_bounds(Bounds bounds) { bounds_ = bounds; }
 
+  // Whether the expression is parenthesized
+  unsigned parenthesization_level() const { return parenthesization_level_; }
+  bool is_parenthesized() const { return parenthesization_level_ > 0; }
+  void increase_parenthesization_level() { ++parenthesization_level_; }
+
   // Type feedback information for assignments and properties.
   virtual bool IsMonomorphic() {
     UNREACHABLE();
@@ -365,16 +380,18 @@
   TypeFeedbackId test_id() const { return test_id_; }
 
  protected:
-  Expression(Zone* zone, int pos)
+  Expression(Zone* zone, int pos, IdGen* id_gen)
       : AstNode(pos),
         bounds_(Bounds::Unbounded(zone)),
-        id_(GetNextId(zone)),
-        test_id_(GetNextId(zone)) {}
+        parenthesization_level_(0),
+        id_(id_gen->GetNextId()),
+        test_id_(id_gen->GetNextId()) {}
   void set_to_boolean_types(byte types) { to_boolean_types_ = types; }
 
  private:
   Bounds bounds_;
   byte to_boolean_types_;
+  unsigned parenthesization_level_;
 
   const BailoutId id_;
   const TypeFeedbackId test_id_;
@@ -390,10 +407,10 @@
 
   // The labels associated with this statement. May be NULL;
   // if it is != NULL, guaranteed to contain at least one entry.
-  ZoneStringList* labels() const { return labels_; }
+  ZoneList<const AstRawString*>* labels() const { return labels_; }
 
   // Type testing & conversion.
-  virtual BreakableStatement* AsBreakableStatement() V8_FINAL V8_OVERRIDE {
+  virtual BreakableStatement* AsBreakableStatement() FINAL OVERRIDE {
     return this;
   }
 
@@ -409,20 +426,19 @@
   BailoutId ExitId() const { return exit_id_; }
 
  protected:
-  BreakableStatement(
-      Zone* zone, ZoneStringList* labels,
-      BreakableType breakable_type, int position)
+  BreakableStatement(Zone* zone, ZoneList<const AstRawString*>* labels,
+                     BreakableType breakable_type, int position, IdGen* id_gen)
       : Statement(zone, position),
         labels_(labels),
         breakable_type_(breakable_type),
-        entry_id_(GetNextId(zone)),
-        exit_id_(GetNextId(zone)) {
-    ASSERT(labels == NULL || labels->length() > 0);
+        entry_id_(id_gen->GetNextId()),
+        exit_id_(id_gen->GetNextId()) {
+    DCHECK(labels == NULL || labels->length() > 0);
   }
 
 
  private:
-  ZoneStringList* labels_;
+  ZoneList<const AstRawString*>* labels_;
   BreakableType breakable_type_;
   Label break_target_;
   const BailoutId entry_id_;
@@ -430,7 +446,7 @@
 };
 
 
-class Block V8_FINAL : public BreakableStatement {
+class Block FINAL : public BreakableStatement {
  public:
   DECLARE_NODE_TYPE(Block)
 
@@ -443,7 +459,7 @@
 
   BailoutId DeclsId() const { return decls_id_; }
 
-  virtual bool IsJump() const V8_OVERRIDE {
+  virtual bool IsJump() const OVERRIDE {
     return !statements_.is_empty() && statements_.last()->IsJump()
         && labels() == NULL;  // Good enough as an approximation...
   }
@@ -452,17 +468,13 @@
   void set_scope(Scope* scope) { scope_ = scope; }
 
  protected:
-  Block(Zone* zone,
-        ZoneStringList* labels,
-        int capacity,
-        bool is_initializer_block,
-        int pos)
-      : BreakableStatement(zone, labels, TARGET_FOR_NAMED_ONLY, pos),
+  Block(Zone* zone, ZoneList<const AstRawString*>* labels, int capacity,
+        bool is_initializer_block, int pos, IdGen* id_gen)
+      : BreakableStatement(zone, labels, TARGET_FOR_NAMED_ONLY, pos, id_gen),
         statements_(capacity, zone),
         is_initializer_block_(is_initializer_block),
-        decls_id_(GetNextId(zone)),
-        scope_(NULL) {
-  }
+        decls_id_(id_gen->GetNextId()),
+        scope_(NULL) {}
 
  private:
   ZoneList<Statement*> statements_;
@@ -490,7 +502,7 @@
         proxy_(proxy),
         mode_(mode),
         scope_(scope) {
-    ASSERT(IsDeclaredVariableMode(mode));
+    DCHECK(IsDeclaredVariableMode(mode));
   }
 
  private:
@@ -502,11 +514,11 @@
 };
 
 
-class VariableDeclaration V8_FINAL : public Declaration {
+class VariableDeclaration FINAL : public Declaration {
  public:
   DECLARE_NODE_TYPE(VariableDeclaration)
 
-  virtual InitializationFlag initialization() const V8_OVERRIDE {
+  virtual InitializationFlag initialization() const OVERRIDE {
     return mode() == VAR ? kCreatedInitialized : kNeedsInitialization;
   }
 
@@ -521,15 +533,15 @@
 };
 
 
-class FunctionDeclaration V8_FINAL : public Declaration {
+class FunctionDeclaration FINAL : public Declaration {
  public:
   DECLARE_NODE_TYPE(FunctionDeclaration)
 
   FunctionLiteral* fun() const { return fun_; }
-  virtual InitializationFlag initialization() const V8_OVERRIDE {
+  virtual InitializationFlag initialization() const OVERRIDE {
     return kCreatedInitialized;
   }
-  virtual bool IsInlineable() const V8_OVERRIDE;
+  virtual bool IsInlineable() const OVERRIDE;
 
  protected:
   FunctionDeclaration(Zone* zone,
@@ -541,8 +553,8 @@
       : Declaration(zone, proxy, mode, scope, pos),
         fun_(fun) {
     // At the moment there are no "const functions" in JavaScript...
-    ASSERT(mode == VAR || mode == LET);
-    ASSERT(fun != NULL);
+    DCHECK(mode == VAR || mode == LET);
+    DCHECK(fun != NULL);
   }
 
  private:
@@ -550,12 +562,12 @@
 };
 
 
-class ModuleDeclaration V8_FINAL : public Declaration {
+class ModuleDeclaration FINAL : public Declaration {
  public:
   DECLARE_NODE_TYPE(ModuleDeclaration)
 
   Module* module() const { return module_; }
-  virtual InitializationFlag initialization() const V8_OVERRIDE {
+  virtual InitializationFlag initialization() const OVERRIDE {
     return kCreatedInitialized;
   }
 
@@ -574,12 +586,12 @@
 };
 
 
-class ImportDeclaration V8_FINAL : public Declaration {
+class ImportDeclaration FINAL : public Declaration {
  public:
   DECLARE_NODE_TYPE(ImportDeclaration)
 
   Module* module() const { return module_; }
-  virtual InitializationFlag initialization() const V8_OVERRIDE {
+  virtual InitializationFlag initialization() const OVERRIDE {
     return kCreatedInitialized;
   }
 
@@ -598,11 +610,11 @@
 };
 
 
-class ExportDeclaration V8_FINAL : public Declaration {
+class ExportDeclaration FINAL : public Declaration {
  public:
   DECLARE_NODE_TYPE(ExportDeclaration)
 
-  virtual InitializationFlag initialization() const V8_OVERRIDE {
+  virtual InitializationFlag initialization() const OVERRIDE {
     return kCreatedInitialized;
   }
 
@@ -633,7 +645,7 @@
 };
 
 
-class ModuleLiteral V8_FINAL : public Module {
+class ModuleLiteral FINAL : public Module {
  public:
   DECLARE_NODE_TYPE(ModuleLiteral)
 
@@ -643,7 +655,7 @@
 };
 
 
-class ModuleVariable V8_FINAL : public Module {
+class ModuleVariable FINAL : public Module {
  public:
   DECLARE_NODE_TYPE(ModuleVariable)
 
@@ -657,27 +669,24 @@
 };
 
 
-class ModulePath V8_FINAL : public Module {
+class ModulePath FINAL : public Module {
  public:
   DECLARE_NODE_TYPE(ModulePath)
 
   Module* module() const { return module_; }
-  Handle<String> name() const { return name_; }
+  Handle<String> name() const { return name_->string(); }
 
  protected:
-  ModulePath(Zone* zone, Module* module, Handle<String> name, int pos)
-      : Module(zone, pos),
-        module_(module),
-        name_(name) {
-  }
+  ModulePath(Zone* zone, Module* module, const AstRawString* name, int pos)
+      : Module(zone, pos), module_(module), name_(name) {}
 
  private:
   Module* module_;
-  Handle<String> name_;
+  const AstRawString* name_;
 };
 
 
-class ModuleUrl V8_FINAL : public Module {
+class ModuleUrl FINAL : public Module {
  public:
   DECLARE_NODE_TYPE(ModuleUrl)
 
@@ -693,7 +702,7 @@
 };
 
 
-class ModuleStatement V8_FINAL : public Statement {
+class ModuleStatement FINAL : public Statement {
  public:
   DECLARE_NODE_TYPE(ModuleStatement)
 
@@ -716,7 +725,7 @@
 class IterationStatement : public BreakableStatement {
  public:
   // Type testing & conversion.
-  virtual IterationStatement* AsIterationStatement() V8_FINAL V8_OVERRIDE {
+  virtual IterationStatement* AsIterationStatement() FINAL OVERRIDE {
     return this;
   }
 
@@ -730,11 +739,11 @@
   Label* continue_target()  { return &continue_target_; }
 
  protected:
-  IterationStatement(Zone* zone, ZoneStringList* labels, int pos)
-      : BreakableStatement(zone, labels, TARGET_FOR_ANONYMOUS, pos),
+  IterationStatement(Zone* zone, ZoneList<const AstRawString*>* labels, int pos,
+                     IdGen* id_gen)
+      : BreakableStatement(zone, labels, TARGET_FOR_ANONYMOUS, pos, id_gen),
         body_(NULL),
-        osr_entry_id_(GetNextId(zone)) {
-  }
+        osr_entry_id_(id_gen->GetNextId()) {}
 
   void Initialize(Statement* body) {
     body_ = body;
@@ -748,7 +757,7 @@
 };
 
 
-class DoWhileStatement V8_FINAL : public IterationStatement {
+class DoWhileStatement FINAL : public IterationStatement {
  public:
   DECLARE_NODE_TYPE(DoWhileStatement)
 
@@ -759,17 +768,17 @@
 
   Expression* cond() const { return cond_; }
 
-  virtual BailoutId ContinueId() const V8_OVERRIDE { return continue_id_; }
-  virtual BailoutId StackCheckId() const V8_OVERRIDE { return back_edge_id_; }
+  virtual BailoutId ContinueId() const OVERRIDE { return continue_id_; }
+  virtual BailoutId StackCheckId() const OVERRIDE { return back_edge_id_; }
   BailoutId BackEdgeId() const { return back_edge_id_; }
 
  protected:
-  DoWhileStatement(Zone* zone, ZoneStringList* labels, int pos)
-      : IterationStatement(zone, labels, pos),
+  DoWhileStatement(Zone* zone, ZoneList<const AstRawString*>* labels, int pos,
+                   IdGen* id_gen)
+      : IterationStatement(zone, labels, pos, id_gen),
         cond_(NULL),
-        continue_id_(GetNextId(zone)),
-        back_edge_id_(GetNextId(zone)) {
-  }
+        continue_id_(id_gen->GetNextId()),
+        back_edge_id_(id_gen->GetNextId()) {}
 
  private:
   Expression* cond_;
@@ -779,7 +788,7 @@
 };
 
 
-class WhileStatement V8_FINAL : public IterationStatement {
+class WhileStatement FINAL : public IterationStatement {
  public:
   DECLARE_NODE_TYPE(WhileStatement)
 
@@ -796,17 +805,17 @@
     may_have_function_literal_ = value;
   }
 
-  virtual BailoutId ContinueId() const V8_OVERRIDE { return EntryId(); }
-  virtual BailoutId StackCheckId() const V8_OVERRIDE { return body_id_; }
+  virtual BailoutId ContinueId() const OVERRIDE { return EntryId(); }
+  virtual BailoutId StackCheckId() const OVERRIDE { return body_id_; }
   BailoutId BodyId() const { return body_id_; }
 
  protected:
-  WhileStatement(Zone* zone, ZoneStringList* labels, int pos)
-      : IterationStatement(zone, labels, pos),
+  WhileStatement(Zone* zone, ZoneList<const AstRawString*>* labels, int pos,
+                 IdGen* id_gen)
+      : IterationStatement(zone, labels, pos, id_gen),
         cond_(NULL),
         may_have_function_literal_(true),
-        body_id_(GetNextId(zone)) {
-  }
+        body_id_(id_gen->GetNextId()) {}
 
  private:
   Expression* cond_;
@@ -818,7 +827,7 @@
 };
 
 
-class ForStatement V8_FINAL : public IterationStatement {
+class ForStatement FINAL : public IterationStatement {
  public:
   DECLARE_NODE_TYPE(ForStatement)
 
@@ -843,8 +852,8 @@
     may_have_function_literal_ = value;
   }
 
-  virtual BailoutId ContinueId() const V8_OVERRIDE { return continue_id_; }
-  virtual BailoutId StackCheckId() const V8_OVERRIDE { return body_id_; }
+  virtual BailoutId ContinueId() const OVERRIDE { return continue_id_; }
+  virtual BailoutId StackCheckId() const OVERRIDE { return body_id_; }
   BailoutId BodyId() const { return body_id_; }
 
   bool is_fast_smi_loop() { return loop_variable_ != NULL; }
@@ -852,16 +861,16 @@
   void set_loop_variable(Variable* var) { loop_variable_ = var; }
 
  protected:
-  ForStatement(Zone* zone, ZoneStringList* labels, int pos)
-      : IterationStatement(zone, labels, pos),
+  ForStatement(Zone* zone, ZoneList<const AstRawString*>* labels, int pos,
+               IdGen* id_gen)
+      : IterationStatement(zone, labels, pos, id_gen),
         init_(NULL),
         cond_(NULL),
         next_(NULL),
         may_have_function_literal_(true),
         loop_variable_(NULL),
-        continue_id_(GetNextId(zone)),
-        body_id_(GetNextId(zone)) {
-  }
+        continue_id_(id_gen->GetNextId()),
+        body_id_(id_gen->GetNextId()) {}
 
  private:
   Statement* init_;
@@ -894,11 +903,11 @@
   Expression* subject() const { return subject_; }
 
  protected:
-  ForEachStatement(Zone* zone, ZoneStringList* labels, int pos)
-      : IterationStatement(zone, labels, pos),
+  ForEachStatement(Zone* zone, ZoneList<const AstRawString*>* labels, int pos,
+                   IdGen* id_gen)
+      : IterationStatement(zone, labels, pos, id_gen),
         each_(NULL),
-        subject_(NULL) {
-  }
+        subject_(NULL) {}
 
  private:
   Expression* each_;
@@ -906,7 +915,7 @@
 };
 
 
-class ForInStatement V8_FINAL : public ForEachStatement,
+class ForInStatement FINAL : public ForEachStatement,
     public FeedbackSlotInterface {
  public:
   DECLARE_NODE_TYPE(ForInStatement)
@@ -920,7 +929,7 @@
   virtual void SetFirstFeedbackSlot(int slot) { for_in_feedback_slot_ = slot; }
 
   int ForInFeedbackSlot() {
-    ASSERT(for_in_feedback_slot_ != kInvalidFeedbackSlot);
+    DCHECK(for_in_feedback_slot_ != kInvalidFeedbackSlot);
     return for_in_feedback_slot_;
   }
 
@@ -930,17 +939,17 @@
 
   BailoutId BodyId() const { return body_id_; }
   BailoutId PrepareId() const { return prepare_id_; }
-  virtual BailoutId ContinueId() const V8_OVERRIDE { return EntryId(); }
-  virtual BailoutId StackCheckId() const V8_OVERRIDE { return body_id_; }
+  virtual BailoutId ContinueId() const OVERRIDE { return EntryId(); }
+  virtual BailoutId StackCheckId() const OVERRIDE { return body_id_; }
 
  protected:
-  ForInStatement(Zone* zone, ZoneStringList* labels, int pos)
-      : ForEachStatement(zone, labels, pos),
+  ForInStatement(Zone* zone, ZoneList<const AstRawString*>* labels, int pos,
+                 IdGen* id_gen)
+      : ForEachStatement(zone, labels, pos, id_gen),
         for_in_type_(SLOW_FOR_IN),
         for_in_feedback_slot_(kInvalidFeedbackSlot),
-        body_id_(GetNextId(zone)),
-        prepare_id_(GetNextId(zone)) {
-  }
+        body_id_(id_gen->GetNextId()),
+        prepare_id_(id_gen->GetNextId()) {}
 
   ForInType for_in_type_;
   int for_in_feedback_slot_;
@@ -949,20 +958,18 @@
 };
 
 
-class ForOfStatement V8_FINAL : public ForEachStatement {
+class ForOfStatement FINAL : public ForEachStatement {
  public:
   DECLARE_NODE_TYPE(ForOfStatement)
 
   void Initialize(Expression* each,
                   Expression* subject,
                   Statement* body,
-                  Expression* assign_iterable,
                   Expression* assign_iterator,
                   Expression* next_result,
                   Expression* result_done,
                   Expression* assign_each) {
     ForEachStatement::Initialize(each, subject, body);
-    assign_iterable_ = assign_iterable;
     assign_iterator_ = assign_iterator;
     next_result_ = next_result;
     result_done_ = result_done;
@@ -973,12 +980,7 @@
     return subject();
   }
 
-  // var iterable = subject;
-  Expression* assign_iterable() const {
-    return assign_iterable_;
-  }
-
-  // var iterator = iterable[Symbol.iterator]();
+  // var iterator = subject[Symbol.iterator]();
   Expression* assign_iterator() const {
     return assign_iterator_;
   }
@@ -998,22 +1000,21 @@
     return assign_each_;
   }
 
-  virtual BailoutId ContinueId() const V8_OVERRIDE { return EntryId(); }
-  virtual BailoutId StackCheckId() const V8_OVERRIDE { return BackEdgeId(); }
+  virtual BailoutId ContinueId() const OVERRIDE { return EntryId(); }
+  virtual BailoutId StackCheckId() const OVERRIDE { return BackEdgeId(); }
 
   BailoutId BackEdgeId() const { return back_edge_id_; }
 
  protected:
-  ForOfStatement(Zone* zone, ZoneStringList* labels, int pos)
-      : ForEachStatement(zone, labels, pos),
+  ForOfStatement(Zone* zone, ZoneList<const AstRawString*>* labels, int pos,
+                 IdGen* id_gen)
+      : ForEachStatement(zone, labels, pos, id_gen),
         assign_iterator_(NULL),
         next_result_(NULL),
         result_done_(NULL),
         assign_each_(NULL),
-        back_edge_id_(GetNextId(zone)) {
-  }
+        back_edge_id_(id_gen->GetNextId()) {}
 
-  Expression* assign_iterable_;
   Expression* assign_iterator_;
   Expression* next_result_;
   Expression* result_done_;
@@ -1022,13 +1023,13 @@
 };
 
 
-class ExpressionStatement V8_FINAL : public Statement {
+class ExpressionStatement FINAL : public Statement {
  public:
   DECLARE_NODE_TYPE(ExpressionStatement)
 
   void set_expression(Expression* e) { expression_ = e; }
   Expression* expression() const { return expression_; }
-  virtual bool IsJump() const V8_OVERRIDE { return expression_->IsThrow(); }
+  virtual bool IsJump() const OVERRIDE { return expression_->IsThrow(); }
 
  protected:
   ExpressionStatement(Zone* zone, Expression* expression, int pos)
@@ -1041,14 +1042,14 @@
 
 class JumpStatement : public Statement {
  public:
-  virtual bool IsJump() const V8_FINAL V8_OVERRIDE { return true; }
+  virtual bool IsJump() const FINAL OVERRIDE { return true; }
 
  protected:
   explicit JumpStatement(Zone* zone, int pos) : Statement(zone, pos) {}
 };
 
 
-class ContinueStatement V8_FINAL : public JumpStatement {
+class ContinueStatement FINAL : public JumpStatement {
  public:
   DECLARE_NODE_TYPE(ContinueStatement)
 
@@ -1063,7 +1064,7 @@
 };
 
 
-class BreakStatement V8_FINAL : public JumpStatement {
+class BreakStatement FINAL : public JumpStatement {
  public:
   DECLARE_NODE_TYPE(BreakStatement)
 
@@ -1078,7 +1079,7 @@
 };
 
 
-class ReturnStatement V8_FINAL : public JumpStatement {
+class ReturnStatement FINAL : public JumpStatement {
  public:
   DECLARE_NODE_TYPE(ReturnStatement)
 
@@ -1093,7 +1094,7 @@
 };
 
 
-class WithStatement V8_FINAL : public Statement {
+class WithStatement FINAL : public Statement {
  public:
   DECLARE_NODE_TYPE(WithStatement)
 
@@ -1117,7 +1118,7 @@
 };
 
 
-class CaseClause V8_FINAL : public Expression {
+class CaseClause FINAL : public Expression {
  public:
   DECLARE_NODE_TYPE(CaseClause)
 
@@ -1137,10 +1138,8 @@
   void set_compare_type(Type* type) { compare_type_ = type; }
 
  private:
-  CaseClause(Zone* zone,
-             Expression* label,
-             ZoneList<Statement*>* statements,
-             int pos);
+  CaseClause(Zone* zone, Expression* label, ZoneList<Statement*>* statements,
+             int pos, IdGen* id_gen);
 
   Expression* label_;
   Label body_target_;
@@ -1152,7 +1151,7 @@
 };
 
 
-class SwitchStatement V8_FINAL : public BreakableStatement {
+class SwitchStatement FINAL : public BreakableStatement {
  public:
   DECLARE_NODE_TYPE(SwitchStatement)
 
@@ -1165,10 +1164,11 @@
   ZoneList<CaseClause*>* cases() const { return cases_; }
 
  protected:
-  SwitchStatement(Zone* zone, ZoneStringList* labels, int pos)
-      : BreakableStatement(zone, labels, TARGET_FOR_ANONYMOUS, pos),
+  SwitchStatement(Zone* zone, ZoneList<const AstRawString*>* labels, int pos,
+                  IdGen* id_gen)
+      : BreakableStatement(zone, labels, TARGET_FOR_ANONYMOUS, pos, id_gen),
         tag_(NULL),
-        cases_(NULL) { }
+        cases_(NULL) {}
 
  private:
   Expression* tag_;
@@ -1181,7 +1181,7 @@
 // the parser implicitly creates an empty statement. Use the
 // HasThenStatement() and HasElseStatement() functions to check if a
 // given if-statement has a then- or an else-part containing code.
-class IfStatement V8_FINAL : public Statement {
+class IfStatement FINAL : public Statement {
  public:
   DECLARE_NODE_TYPE(IfStatement)
 
@@ -1192,7 +1192,7 @@
   Statement* then_statement() const { return then_statement_; }
   Statement* else_statement() const { return else_statement_; }
 
-  virtual bool IsJump() const V8_OVERRIDE {
+  virtual bool IsJump() const OVERRIDE {
     return HasThenStatement() && then_statement()->IsJump()
         && HasElseStatement() && else_statement()->IsJump();
   }
@@ -1202,19 +1202,15 @@
   BailoutId ElseId() const { return else_id_; }
 
  protected:
-  IfStatement(Zone* zone,
-              Expression* condition,
-              Statement* then_statement,
-              Statement* else_statement,
-              int pos)
+  IfStatement(Zone* zone, Expression* condition, Statement* then_statement,
+              Statement* else_statement, int pos, IdGen* id_gen)
       : Statement(zone, pos),
         condition_(condition),
         then_statement_(then_statement),
         else_statement_(else_statement),
-        if_id_(GetNextId(zone)),
-        then_id_(GetNextId(zone)),
-        else_id_(GetNextId(zone)) {
-  }
+        if_id_(id_gen->GetNextId()),
+        then_id_(id_gen->GetNextId()),
+        else_id_(id_gen->GetNextId()) {}
 
  private:
   Expression* condition_;
@@ -1228,7 +1224,7 @@
 
 // NOTE: TargetCollectors are represented as nodes to fit in the target
 // stack in the compiler; this should probably be reworked.
-class TargetCollector V8_FINAL : public AstNode {
+class TargetCollector FINAL : public AstNode {
  public:
   explicit TargetCollector(Zone* zone)
       : AstNode(RelocInfo::kNoPosition), targets_(0, zone) { }
@@ -1239,9 +1235,9 @@
   void AddTarget(Label* target, Zone* zone);
 
   // Virtual behaviour. TargetCollectors are never part of the AST.
-  virtual void Accept(AstVisitor* v) V8_OVERRIDE { UNREACHABLE(); }
-  virtual NodeType node_type() const V8_OVERRIDE { return kInvalid; }
-  virtual TargetCollector* AsTargetCollector() V8_OVERRIDE { return this; }
+  virtual void Accept(AstVisitor* v) OVERRIDE { UNREACHABLE(); }
+  virtual NodeType node_type() const OVERRIDE { return kInvalid; }
+  virtual TargetCollector* AsTargetCollector() OVERRIDE { return this; }
 
   ZoneList<Label*>* targets() { return &targets_; }
 
@@ -1276,7 +1272,7 @@
 };
 
 
-class TryCatchStatement V8_FINAL : public TryStatement {
+class TryCatchStatement FINAL : public TryStatement {
  public:
   DECLARE_NODE_TYPE(TryCatchStatement)
 
@@ -1305,7 +1301,7 @@
 };
 
 
-class TryFinallyStatement V8_FINAL : public TryStatement {
+class TryFinallyStatement FINAL : public TryStatement {
  public:
   DECLARE_NODE_TYPE(TryFinallyStatement)
 
@@ -1322,16 +1318,22 @@
 };
 
 
-class DebuggerStatement V8_FINAL : public Statement {
+class DebuggerStatement FINAL : public Statement {
  public:
   DECLARE_NODE_TYPE(DebuggerStatement)
 
+  BailoutId DebugBreakId() const { return debugger_id_; }
+
  protected:
-  explicit DebuggerStatement(Zone* zone, int pos): Statement(zone, pos) {}
+  explicit DebuggerStatement(Zone* zone, int pos, IdGen* id_gen)
+      : Statement(zone, pos), debugger_id_(id_gen->GetNextId()) {}
+
+ private:
+  const BailoutId debugger_id_;
 };
 
 
-class EmptyStatement V8_FINAL : public Statement {
+class EmptyStatement FINAL : public Statement {
  public:
   DECLARE_NODE_TYPE(EmptyStatement)
 
@@ -1340,31 +1342,33 @@
 };
 
 
-class Literal V8_FINAL : public Expression {
+class Literal FINAL : public Expression {
  public:
   DECLARE_NODE_TYPE(Literal)
 
-  virtual bool IsPropertyName() const V8_OVERRIDE {
-    if (value_->IsInternalizedString()) {
-      uint32_t ignored;
-      return !String::cast(*value_)->AsArrayIndex(&ignored);
-    }
-    return false;
+  virtual bool IsPropertyName() const OVERRIDE {
+    return value_->IsPropertyName();
   }
 
   Handle<String> AsPropertyName() {
-    ASSERT(IsPropertyName());
-    return Handle<String>::cast(value_);
+    DCHECK(IsPropertyName());
+    return Handle<String>::cast(value());
   }
 
-  virtual bool ToBooleanIsTrue() const V8_OVERRIDE {
-    return value_->BooleanValue();
-  }
-  virtual bool ToBooleanIsFalse() const V8_OVERRIDE {
-    return !value_->BooleanValue();
+  const AstRawString* AsRawPropertyName() {
+    DCHECK(IsPropertyName());
+    return value_->AsString();
   }
 
-  Handle<Object> value() const { return value_; }
+  virtual bool ToBooleanIsTrue() const OVERRIDE {
+    return value()->BooleanValue();
+  }
+  virtual bool ToBooleanIsFalse() const OVERRIDE {
+    return !value()->BooleanValue();
+  }
+
+  Handle<Object> value() const { return value_->value(); }
+  const AstValue* raw_value() const { return value_; }
 
   // Support for using Literal as a HashMap key. NOTE: Currently, this works
   // only for string and number literals!
@@ -1379,15 +1383,15 @@
   TypeFeedbackId LiteralFeedbackId() const { return reuse(id()); }
 
  protected:
-  Literal(Zone* zone, Handle<Object> value, int position)
-      : Expression(zone, position),
+  Literal(Zone* zone, const AstValue* value, int position, IdGen* id_gen)
+      : Expression(zone, position, id_gen),
         value_(value),
-        isolate_(zone->isolate()) { }
+        isolate_(zone->isolate()) {}
 
  private:
   Handle<String> ToString();
 
-  Handle<Object> value_;
+  const AstValue* value_;
   // TODO(dcarney): remove.  this is only needed for Match and Hash.
   Isolate* isolate_;
 };
@@ -1402,15 +1406,13 @@
 
   int depth() const {
     // only callable after initialization.
-    ASSERT(depth_ >= 1);
+    DCHECK(depth_ >= 1);
     return depth_;
   }
 
  protected:
-  MaterializedLiteral(Zone* zone,
-                      int literal_index,
-                      int pos)
-      : Expression(zone, pos),
+  MaterializedLiteral(Zone* zone, int literal_index, int pos, IdGen* id_gen)
+      : Expression(zone, pos, id_gen),
         literal_index_(literal_index),
         is_simple_(false),
         depth_(0) {}
@@ -1422,7 +1424,7 @@
   friend class CompileTimeValue;
 
   void set_depth(int depth) {
-    ASSERT(depth >= 1);
+    DCHECK(depth >= 1);
     depth_ = depth;
   }
 
@@ -1448,7 +1450,7 @@
 // Property is used for passing information
 // about an object literal's properties from the parser
 // to the code generator.
-class ObjectLiteralProperty V8_FINAL : public ZoneObject {
+class ObjectLiteralProperty FINAL : public ZoneObject {
  public:
   enum Kind {
     CONSTANT,              // Property with constant value (compile time).
@@ -1458,7 +1460,8 @@
     PROTOTYPE              // Property is __proto__.
   };
 
-  ObjectLiteralProperty(Zone* zone, Literal* key, Expression* value);
+  ObjectLiteralProperty(Zone* zone, AstValueFactory* ast_value_factory,
+                        Literal* key, Expression* value, bool is_static);
 
   Literal* key() { return key_; }
   Expression* value() { return value_; }
@@ -1477,7 +1480,8 @@
  protected:
   template<class> friend class AstNodeFactory;
 
-  ObjectLiteralProperty(Zone* zone, bool is_getter, FunctionLiteral* value);
+  ObjectLiteralProperty(Zone* zone, bool is_getter, FunctionLiteral* value,
+                        bool is_static);
   void set_key(Literal* key) { key_ = key; }
 
  private:
@@ -1485,13 +1489,14 @@
   Expression* value_;
   Kind kind_;
   bool emit_store_;
+  bool is_static_;
   Handle<Map> receiver_type_;
 };
 
 
 // An object literal has a boilerplate object that is used
 // for minimizing the work when constructing it at runtime.
-class ObjectLiteral V8_FINAL : public MaterializedLiteral {
+class ObjectLiteral FINAL : public MaterializedLiteral {
  public:
   typedef ObjectLiteralProperty Property;
 
@@ -1516,6 +1521,13 @@
   // marked expressions, no store code is emitted.
   void CalculateEmitStore(Zone* zone);
 
+  // Assemble bitfield of flags for the CreateObjectLiteral helper.
+  int ComputeFlags() const {
+    int flags = fast_elements() ? kFastElements : kNoFlags;
+    flags |= has_function() ? kHasFunction : kNoFlags;
+    return flags;
+  }
+
   enum Flags {
     kNoFlags = 0,
     kFastElements = 1,
@@ -1529,13 +1541,10 @@
   };
 
  protected:
-  ObjectLiteral(Zone* zone,
-                ZoneList<Property*>* properties,
-                int literal_index,
-                int boilerplate_properties,
-                bool has_function,
-                int pos)
-      : MaterializedLiteral(zone, literal_index, pos),
+  ObjectLiteral(Zone* zone, ZoneList<Property*>* properties, int literal_index,
+                int boilerplate_properties, bool has_function, int pos,
+                IdGen* id_gen)
+      : MaterializedLiteral(zone, literal_index, pos, id_gen),
         properties_(properties),
         boilerplate_properties_(boilerplate_properties),
         fast_elements_(false),
@@ -1553,34 +1562,32 @@
 
 
 // Node for capturing a regexp literal.
-class RegExpLiteral V8_FINAL : public MaterializedLiteral {
+class RegExpLiteral FINAL : public MaterializedLiteral {
  public:
   DECLARE_NODE_TYPE(RegExpLiteral)
 
-  Handle<String> pattern() const { return pattern_; }
-  Handle<String> flags() const { return flags_; }
+  Handle<String> pattern() const { return pattern_->string(); }
+  Handle<String> flags() const { return flags_->string(); }
 
  protected:
-  RegExpLiteral(Zone* zone,
-                Handle<String> pattern,
-                Handle<String> flags,
-                int literal_index,
-                int pos)
-      : MaterializedLiteral(zone, literal_index, pos),
+  RegExpLiteral(Zone* zone, const AstRawString* pattern,
+                const AstRawString* flags, int literal_index, int pos,
+                IdGen* id_gen)
+      : MaterializedLiteral(zone, literal_index, pos, id_gen),
         pattern_(pattern),
         flags_(flags) {
     set_depth(1);
   }
 
  private:
-  Handle<String> pattern_;
-  Handle<String> flags_;
+  const AstRawString* pattern_;
+  const AstRawString* flags_;
 };
 
 
 // An array literal has a literals object that is used
 // for minimizing the work when constructing it at runtime.
-class ArrayLiteral V8_FINAL : public MaterializedLiteral {
+class ArrayLiteral FINAL : public MaterializedLiteral {
  public:
   DECLARE_NODE_TYPE(ArrayLiteral)
 
@@ -1595,6 +1602,13 @@
   // Populate the constant elements fixed array.
   void BuildConstantElements(Isolate* isolate);
 
+  // Assemble bitfield of flags for the CreateArrayLiteral helper.
+  int ComputeFlags() const {
+    int flags = depth() == 1 ? kShallowElements : kNoFlags;
+    flags |= ArrayLiteral::kDisableMementos;
+    return flags;
+  }
+
   enum Flags {
     kNoFlags = 0,
     kShallowElements = 1,
@@ -1602,13 +1616,11 @@
   };
 
  protected:
-  ArrayLiteral(Zone* zone,
-               ZoneList<Expression*>* values,
-               int literal_index,
-               int pos)
-      : MaterializedLiteral(zone, literal_index, pos),
+  ArrayLiteral(Zone* zone, ZoneList<Expression*>* values, int literal_index,
+               int pos, IdGen* id_gen)
+      : MaterializedLiteral(zone, literal_index, pos, id_gen),
         values_(values),
-        first_element_id_(ReserveIdRange(zone, values->length())) {}
+        first_element_id_(id_gen->ReserveIdRange(values->length())) {}
 
  private:
   Handle<FixedArray> constant_elements_;
@@ -1617,59 +1629,55 @@
 };
 
 
-class VariableProxy V8_FINAL : public Expression {
+class VariableProxy FINAL : public Expression, public FeedbackSlotInterface {
  public:
   DECLARE_NODE_TYPE(VariableProxy)
 
-  virtual bool IsValidReferenceExpression() const V8_OVERRIDE {
+  virtual bool IsValidReferenceExpression() const OVERRIDE {
     return var_ == NULL ? true : var_->IsValidReference();
   }
 
-  bool IsVariable(Handle<String> n) const {
-    return !is_this() && name().is_identical_to(n);
-  }
-
   bool IsArguments() const { return var_ != NULL && var_->is_arguments(); }
 
-  bool IsLValue() const { return is_lvalue_; }
-
-  Handle<String> name() const { return name_; }
+  Handle<String> name() const { return name_->string(); }
+  const AstRawString* raw_name() const { return name_; }
   Variable* var() const { return var_; }
   bool is_this() const { return is_this_; }
   Interface* interface() const { return interface_; }
 
-
-  void MarkAsTrivial() { is_trivial_ = true; }
-  void MarkAsLValue() { is_lvalue_ = true; }
+  bool is_assigned() const { return is_assigned_; }
+  void set_is_assigned() { is_assigned_ = true; }
 
   // Bind this proxy to the variable var. Interfaces must match.
   void BindTo(Variable* var);
 
+  virtual int ComputeFeedbackSlotCount() { return FLAG_vector_ics ? 1 : 0; }
+  virtual void SetFirstFeedbackSlot(int slot) {
+    variable_feedback_slot_ = slot;
+  }
+
+  int VariableFeedbackSlot() { return variable_feedback_slot_; }
+
  protected:
-  VariableProxy(Zone* zone, Variable* var, int position);
+  VariableProxy(Zone* zone, Variable* var, int position, IdGen* id_gen);
 
-  VariableProxy(Zone* zone,
-                Handle<String> name,
-                bool is_this,
-                Interface* interface,
-                int position);
+  VariableProxy(Zone* zone, const AstRawString* name, bool is_this,
+                Interface* interface, int position, IdGen* id_gen);
 
-  Handle<String> name_;
+  const AstRawString* name_;
   Variable* var_;  // resolved variable, or NULL
   bool is_this_;
-  bool is_trivial_;
-  // True if this variable proxy is being used in an assignment
-  // or with a increment/decrement operator.
-  bool is_lvalue_;
+  bool is_assigned_;
   Interface* interface_;
+  int variable_feedback_slot_;
 };
 
 
-class Property V8_FINAL : public Expression {
+class Property FINAL : public Expression, public FeedbackSlotInterface {
  public:
   DECLARE_NODE_TYPE(Property)
 
-  virtual bool IsValidReferenceExpression() const V8_OVERRIDE { return true; }
+  virtual bool IsValidReferenceExpression() const OVERRIDE { return true; }
 
   Expression* obj() const { return obj_; }
   Expression* key() const { return key_; }
@@ -1677,16 +1685,15 @@
   BailoutId LoadId() const { return load_id_; }
 
   bool IsStringAccess() const { return is_string_access_; }
-  bool IsFunctionPrototype() const { return is_function_prototype_; }
 
   // Type feedback information.
-  virtual bool IsMonomorphic() V8_OVERRIDE {
+  virtual bool IsMonomorphic() OVERRIDE {
     return receiver_types_.length() == 1;
   }
-  virtual SmallMapList* GetReceiverTypes() V8_OVERRIDE {
+  virtual SmallMapList* GetReceiverTypes() OVERRIDE {
     return &receiver_types_;
   }
-  virtual KeyedAccessStoreMode GetStoreMode() V8_OVERRIDE {
+  virtual KeyedAccessStoreMode GetStoreMode() OVERRIDE {
     return STANDARD_STORE;
   }
   bool IsUninitialized() { return !is_for_call_ && is_uninitialized_; }
@@ -1695,40 +1702,47 @@
   }
   void set_is_uninitialized(bool b) { is_uninitialized_ = b; }
   void set_is_string_access(bool b) { is_string_access_ = b; }
-  void set_is_function_prototype(bool b) { is_function_prototype_ = b; }
   void mark_for_call() { is_for_call_ = true; }
   bool IsForCall() { return is_for_call_; }
 
+  bool IsSuperAccess() {
+    return obj()->IsSuperReference();
+  }
+
   TypeFeedbackId PropertyFeedbackId() { return reuse(id()); }
 
+  virtual int ComputeFeedbackSlotCount() { return FLAG_vector_ics ? 1 : 0; }
+  virtual void SetFirstFeedbackSlot(int slot) {
+    property_feedback_slot_ = slot;
+  }
+
+  int PropertyFeedbackSlot() const { return property_feedback_slot_; }
+
  protected:
-  Property(Zone* zone,
-           Expression* obj,
-           Expression* key,
-           int pos)
-      : Expression(zone, pos),
+  Property(Zone* zone, Expression* obj, Expression* key, int pos, IdGen* id_gen)
+      : Expression(zone, pos, id_gen),
         obj_(obj),
         key_(key),
-        load_id_(GetNextId(zone)),
+        load_id_(id_gen->GetNextId()),
+        property_feedback_slot_(kInvalidFeedbackSlot),
         is_for_call_(false),
         is_uninitialized_(false),
-        is_string_access_(false),
-        is_function_prototype_(false) { }
+        is_string_access_(false) {}
 
  private:
   Expression* obj_;
   Expression* key_;
   const BailoutId load_id_;
+  int property_feedback_slot_;
 
   SmallMapList receiver_types_;
   bool is_for_call_ : 1;
   bool is_uninitialized_ : 1;
   bool is_string_access_ : 1;
-  bool is_function_prototype_ : 1;
 };
 
 
-class Call V8_FINAL : public Expression, public FeedbackSlotInterface {
+class Call FINAL : public Expression, public FeedbackSlotInterface {
  public:
   DECLARE_NODE_TYPE(Call)
 
@@ -1746,14 +1760,14 @@
   }
   int CallFeedbackSlot() const { return call_feedback_slot_; }
 
-  virtual SmallMapList* GetReceiverTypes() V8_OVERRIDE {
+  virtual SmallMapList* GetReceiverTypes() OVERRIDE {
     if (expression()->IsProperty()) {
       return expression()->AsProperty()->GetReceiverTypes();
     }
     return NULL;
   }
 
-  virtual bool IsMonomorphic() V8_OVERRIDE {
+  virtual bool IsMonomorphic() OVERRIDE {
     if (expression()->IsProperty()) {
       return expression()->AsProperty()->IsMonomorphic();
     }
@@ -1779,7 +1793,7 @@
   void set_allocation_site(Handle<AllocationSite> site) {
     allocation_site_ = site;
   }
-  bool ComputeGlobalTarget(Handle<GlobalObject> global, LookupResult* lookup);
+  bool ComputeGlobalTarget(Handle<GlobalObject> global, LookupIterator* it);
 
   BailoutId ReturnId() const { return return_id_; }
 
@@ -1801,15 +1815,13 @@
 #endif
 
  protected:
-  Call(Zone* zone,
-       Expression* expression,
-       ZoneList<Expression*>* arguments,
-       int pos)
-      : Expression(zone, pos),
+  Call(Zone* zone, Expression* expression, ZoneList<Expression*>* arguments,
+       int pos, IdGen* id_gen)
+      : Expression(zone, pos, id_gen),
         expression_(expression),
         arguments_(arguments),
         call_feedback_slot_(kInvalidFeedbackSlot),
-        return_id_(GetNextId(zone)) {
+        return_id_(id_gen->GetNextId()) {
     if (expression->IsProperty()) {
       expression->AsProperty()->mark_for_call();
     }
@@ -1828,7 +1840,7 @@
 };
 
 
-class CallNew V8_FINAL : public Expression, public FeedbackSlotInterface {
+class CallNew FINAL : public Expression, public FeedbackSlotInterface {
  public:
   DECLARE_NODE_TYPE(CallNew)
 
@@ -1844,17 +1856,17 @@
   }
 
   int CallNewFeedbackSlot() {
-    ASSERT(callnew_feedback_slot_ != kInvalidFeedbackSlot);
+    DCHECK(callnew_feedback_slot_ != kInvalidFeedbackSlot);
     return callnew_feedback_slot_;
   }
   int AllocationSiteFeedbackSlot() {
-    ASSERT(callnew_feedback_slot_ != kInvalidFeedbackSlot);
-    ASSERT(FLAG_pretenuring_call_new);
+    DCHECK(callnew_feedback_slot_ != kInvalidFeedbackSlot);
+    DCHECK(FLAG_pretenuring_call_new);
     return callnew_feedback_slot_ + 1;
   }
 
   void RecordTypeFeedback(TypeFeedbackOracle* oracle);
-  virtual bool IsMonomorphic() V8_OVERRIDE { return is_monomorphic_; }
+  virtual bool IsMonomorphic() OVERRIDE { return is_monomorphic_; }
   Handle<JSFunction> target() const { return target_; }
   ElementsKind elements_kind() const { return elements_kind_; }
   Handle<AllocationSite> allocation_site() const {
@@ -1866,17 +1878,15 @@
   BailoutId ReturnId() const { return return_id_; }
 
  protected:
-  CallNew(Zone* zone,
-          Expression* expression,
-          ZoneList<Expression*>* arguments,
-          int pos)
-      : Expression(zone, pos),
+  CallNew(Zone* zone, Expression* expression, ZoneList<Expression*>* arguments,
+          int pos, IdGen* id_gen)
+      : Expression(zone, pos, id_gen),
         expression_(expression),
         arguments_(arguments),
         is_monomorphic_(false),
         elements_kind_(GetInitialFastElementsKind()),
         callnew_feedback_slot_(kInvalidFeedbackSlot),
-        return_id_(GetNextId(zone)) { }
+        return_id_(id_gen->GetNextId()) {}
 
  private:
   Expression* expression_;
@@ -1896,36 +1906,50 @@
 // language construct. Instead it is used to call a C or JS function
 // with a set of arguments. This is used from the builtins that are
 // implemented in JavaScript (see "v8natives.js").
-class CallRuntime V8_FINAL : public Expression {
+class CallRuntime FINAL : public Expression, public FeedbackSlotInterface {
  public:
   DECLARE_NODE_TYPE(CallRuntime)
 
-  Handle<String> name() const { return name_; }
+  Handle<String> name() const { return raw_name_->string(); }
+  const AstRawString* raw_name() const { return raw_name_; }
   const Runtime::Function* function() const { return function_; }
   ZoneList<Expression*>* arguments() const { return arguments_; }
   bool is_jsruntime() const { return function_ == NULL; }
 
+  // Type feedback information.
+  virtual int ComputeFeedbackSlotCount() {
+    return (FLAG_vector_ics && is_jsruntime()) ? 1 : 0;
+  }
+  virtual void SetFirstFeedbackSlot(int slot) {
+    callruntime_feedback_slot_ = slot;
+  }
+
+  int CallRuntimeFeedbackSlot() {
+    DCHECK(!is_jsruntime() ||
+           callruntime_feedback_slot_ != kInvalidFeedbackSlot);
+    return callruntime_feedback_slot_;
+  }
+
   TypeFeedbackId CallRuntimeFeedbackId() const { return reuse(id()); }
 
  protected:
-  CallRuntime(Zone* zone,
-              Handle<String> name,
+  CallRuntime(Zone* zone, const AstRawString* name,
               const Runtime::Function* function,
-              ZoneList<Expression*>* arguments,
-              int pos)
-      : Expression(zone, pos),
-        name_(name),
+              ZoneList<Expression*>* arguments, int pos, IdGen* id_gen)
+      : Expression(zone, pos, id_gen),
+        raw_name_(name),
         function_(function),
-        arguments_(arguments) { }
+        arguments_(arguments) {}
 
  private:
-  Handle<String> name_;
+  const AstRawString* raw_name_;
   const Runtime::Function* function_;
   ZoneList<Expression*>* arguments_;
+  int callruntime_feedback_slot_;
 };
 
 
-class UnaryOperation V8_FINAL : public Expression {
+class UnaryOperation FINAL : public Expression {
  public:
   DECLARE_NODE_TYPE(UnaryOperation)
 
@@ -1936,19 +1960,17 @@
   BailoutId MaterializeFalseId() { return materialize_false_id_; }
 
   virtual void RecordToBooleanTypeFeedback(
-      TypeFeedbackOracle* oracle) V8_OVERRIDE;
+      TypeFeedbackOracle* oracle) OVERRIDE;
 
  protected:
-  UnaryOperation(Zone* zone,
-                 Token::Value op,
-                 Expression* expression,
-                 int pos)
-      : Expression(zone, pos),
+  UnaryOperation(Zone* zone, Token::Value op, Expression* expression, int pos,
+                 IdGen* id_gen)
+      : Expression(zone, pos, id_gen),
         op_(op),
         expression_(expression),
-        materialize_true_id_(GetNextId(zone)),
-        materialize_false_id_(GetNextId(zone)) {
-    ASSERT(Token::IsUnaryOp(op));
+        materialize_true_id_(id_gen->GetNextId()),
+        materialize_false_id_(id_gen->GetNextId()) {
+    DCHECK(Token::IsUnaryOp(op));
   }
 
  private:
@@ -1962,11 +1984,11 @@
 };
 
 
-class BinaryOperation V8_FINAL : public Expression {
+class BinaryOperation FINAL : public Expression {
  public:
   DECLARE_NODE_TYPE(BinaryOperation)
 
-  virtual bool ResultOverwriteAllowed() const V8_OVERRIDE;
+  virtual bool ResultOverwriteAllowed() const OVERRIDE;
 
   Token::Value op() const { return op_; }
   Expression* left() const { return left_; }
@@ -1983,20 +2005,17 @@
   void set_fixed_right_arg(Maybe<int> arg) { fixed_right_arg_ = arg; }
 
   virtual void RecordToBooleanTypeFeedback(
-      TypeFeedbackOracle* oracle) V8_OVERRIDE;
+      TypeFeedbackOracle* oracle) OVERRIDE;
 
  protected:
-  BinaryOperation(Zone* zone,
-                  Token::Value op,
-                  Expression* left,
-                  Expression* right,
-                  int pos)
-      : Expression(zone, pos),
+  BinaryOperation(Zone* zone, Token::Value op, Expression* left,
+                  Expression* right, int pos, IdGen* id_gen)
+      : Expression(zone, pos, id_gen),
         op_(op),
         left_(left),
         right_(right),
-        right_id_(GetNextId(zone)) {
-    ASSERT(Token::IsBinaryOp(op));
+        right_id_(id_gen->GetNextId()) {
+    DCHECK(Token::IsBinaryOp(op));
   }
 
  private:
@@ -2015,7 +2034,7 @@
 };
 
 
-class CountOperation V8_FINAL : public Expression {
+class CountOperation FINAL : public Expression {
  public:
   DECLARE_NODE_TYPE(CountOperation)
 
@@ -2029,13 +2048,13 @@
 
   Expression* expression() const { return expression_; }
 
-  virtual bool IsMonomorphic() V8_OVERRIDE {
+  virtual bool IsMonomorphic() OVERRIDE {
     return receiver_types_.length() == 1;
   }
-  virtual SmallMapList* GetReceiverTypes() V8_OVERRIDE {
+  virtual SmallMapList* GetReceiverTypes() OVERRIDE {
     return &receiver_types_;
   }
-  virtual KeyedAccessStoreMode GetStoreMode() V8_OVERRIDE {
+  virtual KeyedAccessStoreMode GetStoreMode() OVERRIDE {
     return store_mode_;
   }
   Type* type() const { return type_; }
@@ -2048,18 +2067,15 @@
   TypeFeedbackId CountStoreFeedbackId() const { return reuse(id()); }
 
  protected:
-  CountOperation(Zone* zone,
-                 Token::Value op,
-                 bool is_prefix,
-                 Expression* expr,
-                 int pos)
-      : Expression(zone, pos),
+  CountOperation(Zone* zone, Token::Value op, bool is_prefix, Expression* expr,
+                 int pos, IdGen* id_gen)
+      : Expression(zone, pos, id_gen),
         op_(op),
         is_prefix_(is_prefix),
         store_mode_(STANDARD_STORE),
         expression_(expr),
-        assignment_id_(GetNextId(zone)),
-        count_id_(GetNextId(zone)) {}
+        assignment_id_(id_gen->GetNextId()),
+        count_id_(id_gen->GetNextId()) {}
 
  private:
   Token::Value op_;
@@ -2075,7 +2091,7 @@
 };
 
 
-class CompareOperation V8_FINAL : public Expression {
+class CompareOperation FINAL : public Expression {
  public:
   DECLARE_NODE_TYPE(CompareOperation)
 
@@ -2094,17 +2110,14 @@
   bool IsLiteralCompareNull(Expression** expr);
 
  protected:
-  CompareOperation(Zone* zone,
-                   Token::Value op,
-                   Expression* left,
-                   Expression* right,
-                   int pos)
-      : Expression(zone, pos),
+  CompareOperation(Zone* zone, Token::Value op, Expression* left,
+                   Expression* right, int pos, IdGen* id_gen)
+      : Expression(zone, pos, id_gen),
         op_(op),
         left_(left),
         right_(right),
         combined_type_(Type::None(zone)) {
-    ASSERT(Token::IsCompareOp(op));
+    DCHECK(Token::IsCompareOp(op));
   }
 
  private:
@@ -2116,7 +2129,7 @@
 };
 
 
-class Conditional V8_FINAL : public Expression {
+class Conditional FINAL : public Expression {
  public:
   DECLARE_NODE_TYPE(Conditional)
 
@@ -2128,17 +2141,14 @@
   BailoutId ElseId() const { return else_id_; }
 
  protected:
-  Conditional(Zone* zone,
-              Expression* condition,
-              Expression* then_expression,
-              Expression* else_expression,
-              int position)
-      : Expression(zone, position),
+  Conditional(Zone* zone, Expression* condition, Expression* then_expression,
+              Expression* else_expression, int position, IdGen* id_gen)
+      : Expression(zone, position, id_gen),
         condition_(condition),
         then_expression_(then_expression),
         else_expression_(else_expression),
-        then_id_(GetNextId(zone)),
-        else_id_(GetNextId(zone)) { }
+        then_id_(id_gen->GetNextId()),
+        else_id_(id_gen->GetNextId()) {}
 
  private:
   Expression* condition_;
@@ -2149,7 +2159,7 @@
 };
 
 
-class Assignment V8_FINAL : public Expression {
+class Assignment FINAL : public Expression {
  public:
   DECLARE_NODE_TYPE(Assignment)
 
@@ -2169,32 +2179,29 @@
 
   // Type feedback information.
   TypeFeedbackId AssignmentFeedbackId() { return reuse(id()); }
-  virtual bool IsMonomorphic() V8_OVERRIDE {
+  virtual bool IsMonomorphic() OVERRIDE {
     return receiver_types_.length() == 1;
   }
   bool IsUninitialized() { return is_uninitialized_; }
   bool HasNoTypeInformation() {
     return is_uninitialized_;
   }
-  virtual SmallMapList* GetReceiverTypes() V8_OVERRIDE {
+  virtual SmallMapList* GetReceiverTypes() OVERRIDE {
     return &receiver_types_;
   }
-  virtual KeyedAccessStoreMode GetStoreMode() V8_OVERRIDE {
+  virtual KeyedAccessStoreMode GetStoreMode() OVERRIDE {
     return store_mode_;
   }
   void set_is_uninitialized(bool b) { is_uninitialized_ = b; }
   void set_store_mode(KeyedAccessStoreMode mode) { store_mode_ = mode; }
 
  protected:
-  Assignment(Zone* zone,
-             Token::Value op,
-             Expression* target,
-             Expression* value,
-             int pos);
+  Assignment(Zone* zone, Token::Value op, Expression* target, Expression* value,
+             int pos, IdGen* id_gen);
 
   template<class Visitor>
   void Init(Zone* zone, AstNodeFactory<Visitor>* factory) {
-    ASSERT(Token::IsAssignmentOp(op_));
+    DCHECK(Token::IsAssignmentOp(op_));
     if (is_compound()) {
       binary_operation_ = factory->NewBinaryOperation(
           binary_op(), target_, value_, position() + 1);
@@ -2215,15 +2222,15 @@
 };
 
 
-class Yield V8_FINAL : public Expression {
+class Yield FINAL : public Expression, public FeedbackSlotInterface {
  public:
   DECLARE_NODE_TYPE(Yield)
 
   enum Kind {
-    INITIAL,     // The initial yield that returns the unboxed generator object.
-    SUSPEND,     // A normal yield: { value: EXPRESSION, done: false }
-    DELEGATING,  // A yield*.
-    FINAL        // A return: { value: EXPRESSION, done: true }
+    kInitial,  // The initial yield that returns the unboxed generator object.
+    kSuspend,  // A normal yield: { value: EXPRESSION, done: false }
+    kDelegating,  // A yield*.
+    kFinal        // A return: { value: EXPRESSION, done: true }
   };
 
   Expression* generator_object() const { return generator_object_; }
@@ -2234,50 +2241,72 @@
   // locates the catch handler in the handler table, and is equivalent to
   // TryCatchStatement::index().
   int index() const {
-    ASSERT(yield_kind() == DELEGATING);
+    DCHECK_EQ(kDelegating, yield_kind());
     return index_;
   }
   void set_index(int index) {
-    ASSERT(yield_kind() == DELEGATING);
+    DCHECK_EQ(kDelegating, yield_kind());
     index_ = index;
   }
 
+  // Type feedback information.
+  virtual int ComputeFeedbackSlotCount() {
+    return (FLAG_vector_ics && yield_kind() == kDelegating) ? 3 : 0;
+  }
+  virtual void SetFirstFeedbackSlot(int slot) {
+    yield_first_feedback_slot_ = slot;
+  }
+
+  int KeyedLoadFeedbackSlot() {
+    DCHECK(yield_first_feedback_slot_ != kInvalidFeedbackSlot);
+    return yield_first_feedback_slot_;
+  }
+
+  int DoneFeedbackSlot() {
+    DCHECK(yield_first_feedback_slot_ != kInvalidFeedbackSlot);
+    return yield_first_feedback_slot_ + 1;
+  }
+
+  int ValueFeedbackSlot() {
+    DCHECK(yield_first_feedback_slot_ != kInvalidFeedbackSlot);
+    return yield_first_feedback_slot_ + 2;
+  }
+
  protected:
-  Yield(Zone* zone,
-        Expression* generator_object,
-        Expression* expression,
-        Kind yield_kind,
-        int pos)
-      : Expression(zone, pos),
+  Yield(Zone* zone, Expression* generator_object, Expression* expression,
+        Kind yield_kind, int pos, IdGen* id_gen)
+      : Expression(zone, pos, id_gen),
         generator_object_(generator_object),
         expression_(expression),
         yield_kind_(yield_kind),
-        index_(-1) { }
+        index_(-1),
+        yield_first_feedback_slot_(kInvalidFeedbackSlot) {}
 
  private:
   Expression* generator_object_;
   Expression* expression_;
   Kind yield_kind_;
   int index_;
+  int yield_first_feedback_slot_;
 };
 
 
-class Throw V8_FINAL : public Expression {
+class Throw FINAL : public Expression {
  public:
   DECLARE_NODE_TYPE(Throw)
 
   Expression* exception() const { return exception_; }
 
  protected:
-  Throw(Zone* zone, Expression* exception, int pos)
-      : Expression(zone, pos), exception_(exception) {}
+  Throw(Zone* zone, Expression* exception, int pos, IdGen* id_gen)
+      : Expression(zone, pos, id_gen), exception_(exception) {}
 
  private:
   Expression* exception_;
 };
 
 
-class FunctionLiteral V8_FINAL : public Expression {
+class FunctionLiteral FINAL : public Expression {
  public:
   enum FunctionType {
     ANONYMOUS_EXPRESSION,
@@ -2300,11 +2329,6 @@
     kNotParenthesized
   };
 
-  enum IsGeneratorFlag {
-    kIsGenerator,
-    kNotGenerator
-  };
-
   enum ArityRestriction {
     NORMAL_ARITY,
     GETTER_ARITY,
@@ -2313,7 +2337,8 @@
 
   DECLARE_NODE_TYPE(FunctionLiteral)
 
-  Handle<String> name() const { return name_; }
+  Handle<String> name() const { return raw_name_->string(); }
+  const AstRawString* raw_name() const { return raw_name_; }
   Scope* scope() const { return scope_; }
   ZoneList<Statement*>* body() const { return body_; }
   void set_function_token_position(int pos) { function_token_position_ = pos; }
@@ -2336,13 +2361,37 @@
   void InitializeSharedInfo(Handle<Code> code);
 
   Handle<String> debug_name() const {
-    if (name_->length() > 0) return name_;
+    if (raw_name_ != NULL && !raw_name_->IsEmpty()) {
+      return raw_name_->string();
+    }
     return inferred_name();
   }
 
-  Handle<String> inferred_name() const { return inferred_name_; }
+  Handle<String> inferred_name() const {
+    if (!inferred_name_.is_null()) {
+      DCHECK(raw_inferred_name_ == NULL);
+      return inferred_name_;
+    }
+    if (raw_inferred_name_ != NULL) {
+      return raw_inferred_name_->string();
+    }
+    UNREACHABLE();
+    return Handle<String>();
+  }
+
+  // Only one of {set_inferred_name, set_raw_inferred_name} should be called.
   void set_inferred_name(Handle<String> inferred_name) {
+    DCHECK(!inferred_name.is_null());
     inferred_name_ = inferred_name;
+    DCHECK(raw_inferred_name_== NULL || raw_inferred_name_->IsEmpty());
+    raw_inferred_name_ = NULL;
+  }
+
+  void set_raw_inferred_name(const AstString* raw_inferred_name) {
+    DCHECK(raw_inferred_name != NULL);
+    raw_inferred_name_ = raw_inferred_name;
+    DCHECK(inferred_name_.is_null());
+    inferred_name_ = Handle<String>();
   }
 
   // shared_info may be null if it's not cached in full code.
@@ -2369,8 +2418,15 @@
     bitfield_ = IsParenthesized::update(bitfield_, kIsParenthesized);
   }
 
+  FunctionKind kind() { return FunctionKindBits::decode(bitfield_); }
+  bool is_arrow() {
+    return IsArrowFunction(FunctionKindBits::decode(bitfield_));
+  }
   bool is_generator() {
-    return IsGenerator::decode(bitfield_) == kIsGenerator;
+    return IsGeneratorFunction(FunctionKindBits::decode(bitfield_));
+  }
+  bool is_concise_method() {
+    return IsConciseMethod(FunctionKindBits::decode(bitfield_));
   }
 
   int ast_node_count() { return ast_properties_.node_count(); }
@@ -2388,46 +2444,43 @@
   }
 
  protected:
-  FunctionLiteral(Zone* zone,
-                  Handle<String> name,
-                  Scope* scope,
-                  ZoneList<Statement*>* body,
-                  int materialized_literal_count,
-                  int expected_property_count,
-                  int handler_count,
-                  int parameter_count,
-                  FunctionType function_type,
+  FunctionLiteral(Zone* zone, const AstRawString* name,
+                  AstValueFactory* ast_value_factory, Scope* scope,
+                  ZoneList<Statement*>* body, int materialized_literal_count,
+                  int expected_property_count, int handler_count,
+                  int parameter_count, FunctionType function_type,
                   ParameterFlag has_duplicate_parameters,
                   IsFunctionFlag is_function,
-                  IsParenthesizedFlag is_parenthesized,
-                  IsGeneratorFlag is_generator,
-                  int position)
-      : Expression(zone, position),
-        name_(name),
+                  IsParenthesizedFlag is_parenthesized, FunctionKind kind,
+                  int position, IdGen* id_gen)
+      : Expression(zone, position, id_gen),
+        raw_name_(name),
         scope_(scope),
         body_(body),
-        inferred_name_(zone->isolate()->factory()->empty_string()),
+        raw_inferred_name_(ast_value_factory->empty_string()),
         dont_optimize_reason_(kNoReason),
         materialized_literal_count_(materialized_literal_count),
         expected_property_count_(expected_property_count),
         handler_count_(handler_count),
         parameter_count_(parameter_count),
         function_token_position_(RelocInfo::kNoPosition) {
-    bitfield_ =
-        IsExpression::encode(function_type != DECLARATION) |
-        IsAnonymous::encode(function_type == ANONYMOUS_EXPRESSION) |
-        Pretenure::encode(false) |
-        HasDuplicateParameters::encode(has_duplicate_parameters) |
-        IsFunction::encode(is_function) |
-        IsParenthesized::encode(is_parenthesized) |
-        IsGenerator::encode(is_generator);
+    bitfield_ = IsExpression::encode(function_type != DECLARATION) |
+                IsAnonymous::encode(function_type == ANONYMOUS_EXPRESSION) |
+                Pretenure::encode(false) |
+                HasDuplicateParameters::encode(has_duplicate_parameters) |
+                IsFunction::encode(is_function) |
+                IsParenthesized::encode(is_parenthesized) |
+                FunctionKindBits::encode(kind);
+    DCHECK(IsValidFunctionKind(kind));
   }
 
  private:
+  const AstRawString* raw_name_;
   Handle<String> name_;
   Handle<SharedFunctionInfo> shared_info_;
   Scope* scope_;
   ZoneList<Statement*>* body_;
+  const AstString* raw_inferred_name_;
   Handle<String> inferred_name_;
   AstProperties ast_properties_;
   BailoutReason dont_optimize_reason_;
@@ -2439,42 +2492,92 @@
   int function_token_position_;
 
   unsigned bitfield_;
-  class IsExpression: public BitField<bool, 0, 1> {};
-  class IsAnonymous: public BitField<bool, 1, 1> {};
-  class Pretenure: public BitField<bool, 2, 1> {};
-  class HasDuplicateParameters: public BitField<ParameterFlag, 3, 1> {};
-  class IsFunction: public BitField<IsFunctionFlag, 4, 1> {};
-  class IsParenthesized: public BitField<IsParenthesizedFlag, 5, 1> {};
-  class IsGenerator: public BitField<IsGeneratorFlag, 6, 1> {};
+  class IsExpression : public BitField<bool, 0, 1> {};
+  class IsAnonymous : public BitField<bool, 1, 1> {};
+  class Pretenure : public BitField<bool, 2, 1> {};
+  class HasDuplicateParameters : public BitField<ParameterFlag, 3, 1> {};
+  class IsFunction : public BitField<IsFunctionFlag, 4, 1> {};
+  class IsParenthesized : public BitField<IsParenthesizedFlag, 5, 1> {};
+  class FunctionKindBits : public BitField<FunctionKind, 6, 3> {};
 };
 
 
-class NativeFunctionLiteral V8_FINAL : public Expression {
+class ClassLiteral FINAL : public Expression {
+ public:
+  typedef ObjectLiteralProperty Property;
+
+  DECLARE_NODE_TYPE(ClassLiteral)
+
+  Handle<String> name() const { return raw_name_->string(); }
+  const AstRawString* raw_name() const { return raw_name_; }
+  Expression* extends() const { return extends_; }
+  Expression* constructor() const { return constructor_; }
+  ZoneList<Property*>* properties() const { return properties_; }
+
+ protected:
+  ClassLiteral(Zone* zone, const AstRawString* name, Expression* extends,
+               Expression* constructor, ZoneList<Property*>* properties,
+               int position, IdGen* id_gen)
+      : Expression(zone, position, id_gen),
+        raw_name_(name),
+        extends_(extends),
+        constructor_(constructor),
+        properties_(properties) {}
+
+ private:
+  const AstRawString* raw_name_;
+  Expression* extends_;
+  Expression* constructor_;
+  ZoneList<Property*>* properties_;
+};
+
+
+class NativeFunctionLiteral FINAL : public Expression {
  public:
   DECLARE_NODE_TYPE(NativeFunctionLiteral)
 
-  Handle<String> name() const { return name_; }
+  Handle<String> name() const { return name_->string(); }
   v8::Extension* extension() const { return extension_; }
 
  protected:
-  NativeFunctionLiteral(
-      Zone* zone, Handle<String> name, v8::Extension* extension, int pos)
-      : Expression(zone, pos), name_(name), extension_(extension) {}
+  NativeFunctionLiteral(Zone* zone, const AstRawString* name,
+                        v8::Extension* extension, int pos, IdGen* id_gen)
+      : Expression(zone, pos, id_gen), name_(name), extension_(extension) {}
 
  private:
-  Handle<String> name_;
+  const AstRawString* name_;
   v8::Extension* extension_;
 };
 
 
-class ThisFunction V8_FINAL : public Expression {
+class ThisFunction FINAL : public Expression {
  public:
   DECLARE_NODE_TYPE(ThisFunction)
 
  protected:
-  explicit ThisFunction(Zone* zone, int pos): Expression(zone, pos) {}
+  ThisFunction(Zone* zone, int pos, IdGen* id_gen)
+      : Expression(zone, pos, id_gen) {}
 };
 
+
+class SuperReference FINAL : public Expression {
+ public:
+  DECLARE_NODE_TYPE(SuperReference)
+
+  VariableProxy* this_var() const { return this_var_; }
+
+  TypeFeedbackId HomeObjectFeedbackId() { return reuse(id()); }
+
+ protected:
+  SuperReference(Zone* zone, VariableProxy* this_var, int pos, IdGen* id_gen)
+      : Expression(zone, pos, id_gen), this_var_(this_var) {
+    DCHECK(this_var->is_this());
+  }
+
+  VariableProxy* this_var_;
+};
+
+
 #undef DECLARE_NODE_TYPE
 
 
@@ -2508,7 +2611,7 @@
   // expression.
   virtual Interval CaptureRegisters() { return Interval::Empty(); }
   virtual void AppendToText(RegExpText* text, Zone* zone);
-  SmartArrayPointer<const char> ToString(Zone* zone);
+  OStream& Print(OStream& os, Zone* zone);  // NOLINT
 #define MAKE_ASTYPE(Name)                                                  \
   virtual RegExp##Name* As##Name();                                        \
   virtual bool Is##Name();
@@ -2517,19 +2620,19 @@
 };
 
 
-class RegExpDisjunction V8_FINAL : public RegExpTree {
+class RegExpDisjunction FINAL : public RegExpTree {
  public:
   explicit RegExpDisjunction(ZoneList<RegExpTree*>* alternatives);
-  virtual void* Accept(RegExpVisitor* visitor, void* data) V8_OVERRIDE;
+  virtual void* Accept(RegExpVisitor* visitor, void* data) OVERRIDE;
   virtual RegExpNode* ToNode(RegExpCompiler* compiler,
-                             RegExpNode* on_success) V8_OVERRIDE;
-  virtual RegExpDisjunction* AsDisjunction() V8_OVERRIDE;
-  virtual Interval CaptureRegisters() V8_OVERRIDE;
-  virtual bool IsDisjunction() V8_OVERRIDE;
-  virtual bool IsAnchoredAtStart() V8_OVERRIDE;
-  virtual bool IsAnchoredAtEnd() V8_OVERRIDE;
-  virtual int min_match() V8_OVERRIDE { return min_match_; }
-  virtual int max_match() V8_OVERRIDE { return max_match_; }
+                             RegExpNode* on_success) OVERRIDE;
+  virtual RegExpDisjunction* AsDisjunction() OVERRIDE;
+  virtual Interval CaptureRegisters() OVERRIDE;
+  virtual bool IsDisjunction() OVERRIDE;
+  virtual bool IsAnchoredAtStart() OVERRIDE;
+  virtual bool IsAnchoredAtEnd() OVERRIDE;
+  virtual int min_match() OVERRIDE { return min_match_; }
+  virtual int max_match() OVERRIDE { return max_match_; }
   ZoneList<RegExpTree*>* alternatives() { return alternatives_; }
  private:
   ZoneList<RegExpTree*>* alternatives_;
@@ -2538,19 +2641,19 @@
 };
 
 
-class RegExpAlternative V8_FINAL : public RegExpTree {
+class RegExpAlternative FINAL : public RegExpTree {
  public:
   explicit RegExpAlternative(ZoneList<RegExpTree*>* nodes);
-  virtual void* Accept(RegExpVisitor* visitor, void* data) V8_OVERRIDE;
+  virtual void* Accept(RegExpVisitor* visitor, void* data) OVERRIDE;
   virtual RegExpNode* ToNode(RegExpCompiler* compiler,
-                             RegExpNode* on_success) V8_OVERRIDE;
-  virtual RegExpAlternative* AsAlternative() V8_OVERRIDE;
-  virtual Interval CaptureRegisters() V8_OVERRIDE;
-  virtual bool IsAlternative() V8_OVERRIDE;
-  virtual bool IsAnchoredAtStart() V8_OVERRIDE;
-  virtual bool IsAnchoredAtEnd() V8_OVERRIDE;
-  virtual int min_match() V8_OVERRIDE { return min_match_; }
-  virtual int max_match() V8_OVERRIDE { return max_match_; }
+                             RegExpNode* on_success) OVERRIDE;
+  virtual RegExpAlternative* AsAlternative() OVERRIDE;
+  virtual Interval CaptureRegisters() OVERRIDE;
+  virtual bool IsAlternative() OVERRIDE;
+  virtual bool IsAnchoredAtStart() OVERRIDE;
+  virtual bool IsAnchoredAtEnd() OVERRIDE;
+  virtual int min_match() OVERRIDE { return min_match_; }
+  virtual int max_match() OVERRIDE { return max_match_; }
   ZoneList<RegExpTree*>* nodes() { return nodes_; }
  private:
   ZoneList<RegExpTree*>* nodes_;
@@ -2559,7 +2662,7 @@
 };
 
 
-class RegExpAssertion V8_FINAL : public RegExpTree {
+class RegExpAssertion FINAL : public RegExpTree {
  public:
   enum AssertionType {
     START_OF_LINE,
@@ -2570,22 +2673,22 @@
     NON_BOUNDARY
   };
   explicit RegExpAssertion(AssertionType type) : assertion_type_(type) { }
-  virtual void* Accept(RegExpVisitor* visitor, void* data) V8_OVERRIDE;
+  virtual void* Accept(RegExpVisitor* visitor, void* data) OVERRIDE;
   virtual RegExpNode* ToNode(RegExpCompiler* compiler,
-                             RegExpNode* on_success) V8_OVERRIDE;
-  virtual RegExpAssertion* AsAssertion() V8_OVERRIDE;
-  virtual bool IsAssertion() V8_OVERRIDE;
-  virtual bool IsAnchoredAtStart() V8_OVERRIDE;
-  virtual bool IsAnchoredAtEnd() V8_OVERRIDE;
-  virtual int min_match() V8_OVERRIDE { return 0; }
-  virtual int max_match() V8_OVERRIDE { return 0; }
+                             RegExpNode* on_success) OVERRIDE;
+  virtual RegExpAssertion* AsAssertion() OVERRIDE;
+  virtual bool IsAssertion() OVERRIDE;
+  virtual bool IsAnchoredAtStart() OVERRIDE;
+  virtual bool IsAnchoredAtEnd() OVERRIDE;
+  virtual int min_match() OVERRIDE { return 0; }
+  virtual int max_match() OVERRIDE { return 0; }
   AssertionType assertion_type() { return assertion_type_; }
  private:
   AssertionType assertion_type_;
 };
 
 
-class CharacterSet V8_FINAL BASE_EMBEDDED {
+class CharacterSet FINAL BASE_EMBEDDED {
  public:
   explicit CharacterSet(uc16 standard_set_type)
       : ranges_(NULL),
@@ -2608,7 +2711,7 @@
 };
 
 
-class RegExpCharacterClass V8_FINAL : public RegExpTree {
+class RegExpCharacterClass FINAL : public RegExpTree {
  public:
   RegExpCharacterClass(ZoneList<CharacterRange>* ranges, bool is_negated)
       : set_(ranges),
@@ -2616,15 +2719,15 @@
   explicit RegExpCharacterClass(uc16 type)
       : set_(type),
         is_negated_(false) { }
-  virtual void* Accept(RegExpVisitor* visitor, void* data) V8_OVERRIDE;
+  virtual void* Accept(RegExpVisitor* visitor, void* data) OVERRIDE;
   virtual RegExpNode* ToNode(RegExpCompiler* compiler,
-                             RegExpNode* on_success) V8_OVERRIDE;
-  virtual RegExpCharacterClass* AsCharacterClass() V8_OVERRIDE;
-  virtual bool IsCharacterClass() V8_OVERRIDE;
-  virtual bool IsTextElement() V8_OVERRIDE { return true; }
-  virtual int min_match() V8_OVERRIDE { return 1; }
-  virtual int max_match() V8_OVERRIDE { return 1; }
-  virtual void AppendToText(RegExpText* text, Zone* zone) V8_OVERRIDE;
+                             RegExpNode* on_success) OVERRIDE;
+  virtual RegExpCharacterClass* AsCharacterClass() OVERRIDE;
+  virtual bool IsCharacterClass() OVERRIDE;
+  virtual bool IsTextElement() OVERRIDE { return true; }
+  virtual int min_match() OVERRIDE { return 1; }
+  virtual int max_match() OVERRIDE { return 1; }
+  virtual void AppendToText(RegExpText* text, Zone* zone) OVERRIDE;
   CharacterSet character_set() { return set_; }
   // TODO(lrn): Remove need for complex version if is_standard that
   // recognizes a mangled standard set and just do { return set_.is_special(); }
@@ -2650,18 +2753,18 @@
 };
 
 
-class RegExpAtom V8_FINAL : public RegExpTree {
+class RegExpAtom FINAL : public RegExpTree {
  public:
   explicit RegExpAtom(Vector<const uc16> data) : data_(data) { }
-  virtual void* Accept(RegExpVisitor* visitor, void* data) V8_OVERRIDE;
+  virtual void* Accept(RegExpVisitor* visitor, void* data) OVERRIDE;
   virtual RegExpNode* ToNode(RegExpCompiler* compiler,
-                             RegExpNode* on_success) V8_OVERRIDE;
-  virtual RegExpAtom* AsAtom() V8_OVERRIDE;
-  virtual bool IsAtom() V8_OVERRIDE;
-  virtual bool IsTextElement() V8_OVERRIDE { return true; }
-  virtual int min_match() V8_OVERRIDE { return data_.length(); }
-  virtual int max_match() V8_OVERRIDE { return data_.length(); }
-  virtual void AppendToText(RegExpText* text, Zone* zone) V8_OVERRIDE;
+                             RegExpNode* on_success) OVERRIDE;
+  virtual RegExpAtom* AsAtom() OVERRIDE;
+  virtual bool IsAtom() OVERRIDE;
+  virtual bool IsTextElement() OVERRIDE { return true; }
+  virtual int min_match() OVERRIDE { return data_.length(); }
+  virtual int max_match() OVERRIDE { return data_.length(); }
+  virtual void AppendToText(RegExpText* text, Zone* zone) OVERRIDE;
   Vector<const uc16> data() { return data_; }
   int length() { return data_.length(); }
  private:
@@ -2669,18 +2772,18 @@
 };
 
 
-class RegExpText V8_FINAL : public RegExpTree {
+class RegExpText FINAL : public RegExpTree {
  public:
   explicit RegExpText(Zone* zone) : elements_(2, zone), length_(0) {}
-  virtual void* Accept(RegExpVisitor* visitor, void* data) V8_OVERRIDE;
+  virtual void* Accept(RegExpVisitor* visitor, void* data) OVERRIDE;
   virtual RegExpNode* ToNode(RegExpCompiler* compiler,
-                             RegExpNode* on_success) V8_OVERRIDE;
-  virtual RegExpText* AsText() V8_OVERRIDE;
-  virtual bool IsText() V8_OVERRIDE;
-  virtual bool IsTextElement() V8_OVERRIDE { return true; }
-  virtual int min_match() V8_OVERRIDE { return length_; }
-  virtual int max_match() V8_OVERRIDE { return length_; }
-  virtual void AppendToText(RegExpText* text, Zone* zone) V8_OVERRIDE;
+                             RegExpNode* on_success) OVERRIDE;
+  virtual RegExpText* AsText() OVERRIDE;
+  virtual bool IsText() OVERRIDE;
+  virtual bool IsTextElement() OVERRIDE { return true; }
+  virtual int min_match() OVERRIDE { return length_; }
+  virtual int max_match() OVERRIDE { return length_; }
+  virtual void AppendToText(RegExpText* text, Zone* zone) OVERRIDE;
   void AddElement(TextElement elm, Zone* zone)  {
     elements_.Add(elm, zone);
     length_ += elm.length();
@@ -2692,7 +2795,7 @@
 };
 
 
-class RegExpQuantifier V8_FINAL : public RegExpTree {
+class RegExpQuantifier FINAL : public RegExpTree {
  public:
   enum QuantifierType { GREEDY, NON_GREEDY, POSSESSIVE };
   RegExpQuantifier(int min, int max, QuantifierType type, RegExpTree* body)
@@ -2707,9 +2810,9 @@
       max_match_ = max * body->max_match();
     }
   }
-  virtual void* Accept(RegExpVisitor* visitor, void* data) V8_OVERRIDE;
+  virtual void* Accept(RegExpVisitor* visitor, void* data) OVERRIDE;
   virtual RegExpNode* ToNode(RegExpCompiler* compiler,
-                             RegExpNode* on_success) V8_OVERRIDE;
+                             RegExpNode* on_success) OVERRIDE;
   static RegExpNode* ToNode(int min,
                             int max,
                             bool is_greedy,
@@ -2717,11 +2820,11 @@
                             RegExpCompiler* compiler,
                             RegExpNode* on_success,
                             bool not_at_start = false);
-  virtual RegExpQuantifier* AsQuantifier() V8_OVERRIDE;
-  virtual Interval CaptureRegisters() V8_OVERRIDE;
-  virtual bool IsQuantifier() V8_OVERRIDE;
-  virtual int min_match() V8_OVERRIDE { return min_match_; }
-  virtual int max_match() V8_OVERRIDE { return max_match_; }
+  virtual RegExpQuantifier* AsQuantifier() OVERRIDE;
+  virtual Interval CaptureRegisters() OVERRIDE;
+  virtual bool IsQuantifier() OVERRIDE;
+  virtual int min_match() OVERRIDE { return min_match_; }
+  virtual int max_match() OVERRIDE { return max_match_; }
   int min() { return min_; }
   int max() { return max_; }
   bool is_possessive() { return quantifier_type_ == POSSESSIVE; }
@@ -2739,24 +2842,24 @@
 };
 
 
-class RegExpCapture V8_FINAL : public RegExpTree {
+class RegExpCapture FINAL : public RegExpTree {
  public:
   explicit RegExpCapture(RegExpTree* body, int index)
       : body_(body), index_(index) { }
-  virtual void* Accept(RegExpVisitor* visitor, void* data) V8_OVERRIDE;
+  virtual void* Accept(RegExpVisitor* visitor, void* data) OVERRIDE;
   virtual RegExpNode* ToNode(RegExpCompiler* compiler,
-                             RegExpNode* on_success) V8_OVERRIDE;
+                             RegExpNode* on_success) OVERRIDE;
   static RegExpNode* ToNode(RegExpTree* body,
                             int index,
                             RegExpCompiler* compiler,
                             RegExpNode* on_success);
-  virtual RegExpCapture* AsCapture() V8_OVERRIDE;
-  virtual bool IsAnchoredAtStart() V8_OVERRIDE;
-  virtual bool IsAnchoredAtEnd() V8_OVERRIDE;
-  virtual Interval CaptureRegisters() V8_OVERRIDE;
-  virtual bool IsCapture() V8_OVERRIDE;
-  virtual int min_match() V8_OVERRIDE { return body_->min_match(); }
-  virtual int max_match() V8_OVERRIDE { return body_->max_match(); }
+  virtual RegExpCapture* AsCapture() OVERRIDE;
+  virtual bool IsAnchoredAtStart() OVERRIDE;
+  virtual bool IsAnchoredAtEnd() OVERRIDE;
+  virtual Interval CaptureRegisters() OVERRIDE;
+  virtual bool IsCapture() OVERRIDE;
+  virtual int min_match() OVERRIDE { return body_->min_match(); }
+  virtual int max_match() OVERRIDE { return body_->max_match(); }
   RegExpTree* body() { return body_; }
   int index() { return index_; }
   static int StartRegister(int index) { return index * 2; }
@@ -2768,7 +2871,7 @@
 };
 
 
-class RegExpLookahead V8_FINAL : public RegExpTree {
+class RegExpLookahead FINAL : public RegExpTree {
  public:
   RegExpLookahead(RegExpTree* body,
                   bool is_positive,
@@ -2779,15 +2882,15 @@
         capture_count_(capture_count),
         capture_from_(capture_from) { }
 
-  virtual void* Accept(RegExpVisitor* visitor, void* data) V8_OVERRIDE;
+  virtual void* Accept(RegExpVisitor* visitor, void* data) OVERRIDE;
   virtual RegExpNode* ToNode(RegExpCompiler* compiler,
-                             RegExpNode* on_success) V8_OVERRIDE;
-  virtual RegExpLookahead* AsLookahead() V8_OVERRIDE;
-  virtual Interval CaptureRegisters() V8_OVERRIDE;
-  virtual bool IsLookahead() V8_OVERRIDE;
-  virtual bool IsAnchoredAtStart() V8_OVERRIDE;
-  virtual int min_match() V8_OVERRIDE { return 0; }
-  virtual int max_match() V8_OVERRIDE { return 0; }
+                             RegExpNode* on_success) OVERRIDE;
+  virtual RegExpLookahead* AsLookahead() OVERRIDE;
+  virtual Interval CaptureRegisters() OVERRIDE;
+  virtual bool IsLookahead() OVERRIDE;
+  virtual bool IsAnchoredAtStart() OVERRIDE;
+  virtual int min_match() OVERRIDE { return 0; }
+  virtual int max_match() OVERRIDE { return 0; }
   RegExpTree* body() { return body_; }
   bool is_positive() { return is_positive_; }
   int capture_count() { return capture_count_; }
@@ -2801,17 +2904,17 @@
 };
 
 
-class RegExpBackReference V8_FINAL : public RegExpTree {
+class RegExpBackReference FINAL : public RegExpTree {
  public:
   explicit RegExpBackReference(RegExpCapture* capture)
       : capture_(capture) { }
-  virtual void* Accept(RegExpVisitor* visitor, void* data) V8_OVERRIDE;
+  virtual void* Accept(RegExpVisitor* visitor, void* data) OVERRIDE;
   virtual RegExpNode* ToNode(RegExpCompiler* compiler,
-                             RegExpNode* on_success) V8_OVERRIDE;
-  virtual RegExpBackReference* AsBackReference() V8_OVERRIDE;
-  virtual bool IsBackReference() V8_OVERRIDE;
-  virtual int min_match() V8_OVERRIDE { return 0; }
-  virtual int max_match() V8_OVERRIDE { return capture_->max_match(); }
+                             RegExpNode* on_success) OVERRIDE;
+  virtual RegExpBackReference* AsBackReference() OVERRIDE;
+  virtual bool IsBackReference() OVERRIDE;
+  virtual int min_match() OVERRIDE { return 0; }
+  virtual int max_match() OVERRIDE { return capture_->max_match(); }
   int index() { return capture_->index(); }
   RegExpCapture* capture() { return capture_; }
  private:
@@ -2819,16 +2922,16 @@
 };
 
 
-class RegExpEmpty V8_FINAL : public RegExpTree {
+class RegExpEmpty FINAL : public RegExpTree {
  public:
   RegExpEmpty() { }
-  virtual void* Accept(RegExpVisitor* visitor, void* data) V8_OVERRIDE;
+  virtual void* Accept(RegExpVisitor* visitor, void* data) OVERRIDE;
   virtual RegExpNode* ToNode(RegExpCompiler* compiler,
-                             RegExpNode* on_success) V8_OVERRIDE;
-  virtual RegExpEmpty* AsEmpty() V8_OVERRIDE;
-  virtual bool IsEmpty() V8_OVERRIDE;
-  virtual int min_match() V8_OVERRIDE { return 0; }
-  virtual int max_match() V8_OVERRIDE { return 0; }
+                             RegExpNode* on_success) OVERRIDE;
+  virtual RegExpEmpty* AsEmpty() OVERRIDE;
+  virtual bool IsEmpty() OVERRIDE;
+  virtual int min_match() OVERRIDE { return 0; }
+  virtual int max_match() OVERRIDE { return 0; }
   static RegExpEmpty* GetInstance() {
     static RegExpEmpty* instance = ::new RegExpEmpty();
     return instance;
@@ -2872,7 +2975,7 @@
 
 #define DEFINE_AST_VISITOR_SUBCLASS_MEMBERS()                       \
 public:                                                             \
-  virtual void Visit(AstNode* node) V8_FINAL V8_OVERRIDE {          \
+  virtual void Visit(AstNode* node) FINAL OVERRIDE {          \
     if (!CheckStackOverflow()) node->Accept(this);                  \
   }                                                                 \
                                                                     \
@@ -2904,10 +3007,17 @@
 
 class AstConstructionVisitor BASE_EMBEDDED {
  public:
-  AstConstructionVisitor() : dont_optimize_reason_(kNoReason) { }
+  AstConstructionVisitor()
+      : dont_crankshaft_reason_(kNoReason), dont_turbofan_reason_(kNoReason) {}
 
   AstProperties* ast_properties() { return &properties_; }
-  BailoutReason dont_optimize_reason() { return dont_optimize_reason_; }
+  BailoutReason dont_optimize_reason() {
+    if (dont_turbofan_reason_ != kNoReason) {
+      return dont_turbofan_reason_;
+    } else {
+      return dont_crankshaft_reason_;
+    }
+  }
 
  private:
   template<class> friend class AstNodeFactory;
@@ -2920,8 +3030,11 @@
 
   void increase_node_count() { properties_.add_node_count(1); }
   void add_flag(AstPropertiesFlag flag) { properties_.flags()->Add(flag); }
-  void set_dont_optimize_reason(BailoutReason reason) {
-      dont_optimize_reason_ = reason;
+  void set_dont_crankshaft_reason(BailoutReason reason) {
+    dont_crankshaft_reason_ = reason;
+  }
+  void set_dont_turbofan_reason(BailoutReason reason) {
+    dont_turbofan_reason_ = reason;
   }
 
   void add_slot_node(FeedbackSlotInterface* slot_node) {
@@ -2933,7 +3046,8 @@
   }
 
   AstProperties properties_;
-  BailoutReason dont_optimize_reason_;
+  BailoutReason dont_crankshaft_reason_;
+  BailoutReason dont_turbofan_reason_;
 };
 
 
@@ -2952,9 +3066,11 @@
 // AstNode factory
 
 template<class Visitor>
-class AstNodeFactory V8_FINAL BASE_EMBEDDED {
+class AstNodeFactory FINAL BASE_EMBEDDED {
  public:
-  explicit AstNodeFactory(Zone* zone) : zone_(zone) { }
+  AstNodeFactory(Zone* zone, AstValueFactory* ast_value_factory,
+                 AstNode::IdGen* id_gen)
+      : zone_(zone), ast_value_factory_(ast_value_factory), id_gen_(id_gen) {}
 
   Visitor* visitor() { return &visitor_; }
 
@@ -3018,8 +3134,8 @@
     VISIT_AND_RETURN(ModuleVariable, module)
   }
 
-  ModulePath* NewModulePath(Module* origin, Handle<String> name, int pos) {
-    ModulePath* module = new(zone_) ModulePath(zone_, origin, name, pos);
+  ModulePath* NewModulePath(Module* origin, const AstRawString* name, int pos) {
+    ModulePath* module = new (zone_) ModulePath(zone_, origin, name, pos);
     VISIT_AND_RETURN(ModulePath, module)
   }
 
@@ -3028,19 +3144,19 @@
     VISIT_AND_RETURN(ModuleUrl, module)
   }
 
-  Block* NewBlock(ZoneStringList* labels,
+  Block* NewBlock(ZoneList<const AstRawString*>* labels,
                   int capacity,
                   bool is_initializer_block,
                   int pos) {
-    Block* block = new(zone_) Block(
-        zone_, labels, capacity, is_initializer_block, pos);
+    Block* block = new (zone_)
+        Block(zone_, labels, capacity, is_initializer_block, pos, id_gen_);
     VISIT_AND_RETURN(Block, block)
   }
 
-#define STATEMENT_WITH_LABELS(NodeType) \
-  NodeType* New##NodeType(ZoneStringList* labels, int pos) { \
-    NodeType* stmt = new(zone_) NodeType(zone_, labels, pos); \
-    VISIT_AND_RETURN(NodeType, stmt); \
+#define STATEMENT_WITH_LABELS(NodeType)                                     \
+  NodeType* New##NodeType(ZoneList<const AstRawString*>* labels, int pos) { \
+    NodeType* stmt = new (zone_) NodeType(zone_, labels, pos, id_gen_);     \
+    VISIT_AND_RETURN(NodeType, stmt);                                       \
   }
   STATEMENT_WITH_LABELS(DoWhileStatement)
   STATEMENT_WITH_LABELS(WhileStatement)
@@ -3049,15 +3165,17 @@
 #undef STATEMENT_WITH_LABELS
 
   ForEachStatement* NewForEachStatement(ForEachStatement::VisitMode visit_mode,
-                                        ZoneStringList* labels,
+                                        ZoneList<const AstRawString*>* labels,
                                         int pos) {
     switch (visit_mode) {
       case ForEachStatement::ENUMERATE: {
-        ForInStatement* stmt = new(zone_) ForInStatement(zone_, labels, pos);
+        ForInStatement* stmt =
+            new (zone_) ForInStatement(zone_, labels, pos, id_gen_);
         VISIT_AND_RETURN(ForInStatement, stmt);
       }
       case ForEachStatement::ITERATE: {
-        ForOfStatement* stmt = new(zone_) ForOfStatement(zone_, labels, pos);
+        ForOfStatement* stmt =
+            new (zone_) ForOfStatement(zone_, labels, pos, id_gen_);
         VISIT_AND_RETURN(ForOfStatement, stmt);
       }
     }
@@ -3105,8 +3223,8 @@
                               Statement* then_statement,
                               Statement* else_statement,
                               int pos) {
-    IfStatement* stmt = new(zone_) IfStatement(
-        zone_, condition, then_statement, else_statement, pos);
+    IfStatement* stmt = new (zone_) IfStatement(
+        zone_, condition, then_statement, else_statement, pos, id_gen_);
     VISIT_AND_RETURN(IfStatement, stmt)
   }
 
@@ -3131,7 +3249,8 @@
   }
 
   DebuggerStatement* NewDebuggerStatement(int pos) {
-    DebuggerStatement* stmt = new(zone_) DebuggerStatement(zone_, pos);
+    DebuggerStatement* stmt =
+        new (zone_) DebuggerStatement(zone_, pos, id_gen_);
     VISIT_AND_RETURN(DebuggerStatement, stmt)
   }
 
@@ -3142,18 +3261,64 @@
   CaseClause* NewCaseClause(
       Expression* label, ZoneList<Statement*>* statements, int pos) {
     CaseClause* clause =
-        new(zone_) CaseClause(zone_, label, statements, pos);
+        new (zone_) CaseClause(zone_, label, statements, pos, id_gen_);
     VISIT_AND_RETURN(CaseClause, clause)
   }
 
-  Literal* NewLiteral(Handle<Object> handle, int pos) {
-    Literal* lit = new(zone_) Literal(zone_, handle, pos);
+  Literal* NewStringLiteral(const AstRawString* string, int pos) {
+    Literal* lit = new (zone_)
+        Literal(zone_, ast_value_factory_->NewString(string), pos, id_gen_);
+    VISIT_AND_RETURN(Literal, lit)
+  }
+
+  // A JavaScript symbol (ECMA-262 edition 6).
+  Literal* NewSymbolLiteral(const char* name, int pos) {
+    Literal* lit = new (zone_)
+        Literal(zone_, ast_value_factory_->NewSymbol(name), pos, id_gen_);
     VISIT_AND_RETURN(Literal, lit)
   }
 
   Literal* NewNumberLiteral(double number, int pos) {
-    return NewLiteral(
-        zone_->isolate()->factory()->NewNumber(number, TENURED), pos);
+    Literal* lit = new (zone_)
+        Literal(zone_, ast_value_factory_->NewNumber(number), pos, id_gen_);
+    VISIT_AND_RETURN(Literal, lit)
+  }
+
+  Literal* NewSmiLiteral(int number, int pos) {
+    Literal* lit = new (zone_)
+        Literal(zone_, ast_value_factory_->NewSmi(number), pos, id_gen_);
+    VISIT_AND_RETURN(Literal, lit)
+  }
+
+  Literal* NewBooleanLiteral(bool b, int pos) {
+    Literal* lit = new (zone_)
+        Literal(zone_, ast_value_factory_->NewBoolean(b), pos, id_gen_);
+    VISIT_AND_RETURN(Literal, lit)
+  }
+
+  Literal* NewStringListLiteral(ZoneList<const AstRawString*>* strings,
+                                int pos) {
+    Literal* lit = new (zone_) Literal(
+        zone_, ast_value_factory_->NewStringList(strings), pos, id_gen_);
+    VISIT_AND_RETURN(Literal, lit)
+  }
+
+  Literal* NewNullLiteral(int pos) {
+    Literal* lit =
+        new (zone_) Literal(zone_, ast_value_factory_->NewNull(), pos, id_gen_);
+    VISIT_AND_RETURN(Literal, lit)
+  }
+
+  Literal* NewUndefinedLiteral(int pos) {
+    Literal* lit = new (zone_)
+        Literal(zone_, ast_value_factory_->NewUndefined(), pos, id_gen_);
+    VISIT_AND_RETURN(Literal, lit)
+  }
+
+  Literal* NewTheHoleLiteral(int pos) {
+    Literal* lit = new (zone_)
+        Literal(zone_, ast_value_factory_->NewTheHole(), pos, id_gen_);
+    VISIT_AND_RETURN(Literal, lit)
   }
 
   ObjectLiteral* NewObjectLiteral(
@@ -3162,83 +3327,86 @@
       int boilerplate_properties,
       bool has_function,
       int pos) {
-    ObjectLiteral* lit = new(zone_) ObjectLiteral(
-        zone_, properties, literal_index, boilerplate_properties,
-        has_function, pos);
+    ObjectLiteral* lit = new (zone_)
+        ObjectLiteral(zone_, properties, literal_index, boilerplate_properties,
+                      has_function, pos, id_gen_);
     VISIT_AND_RETURN(ObjectLiteral, lit)
   }
 
   ObjectLiteral::Property* NewObjectLiteralProperty(Literal* key,
-                                                    Expression* value) {
-    return new(zone_) ObjectLiteral::Property(zone_, key, value);
+                                                    Expression* value,
+                                                    bool is_static) {
+    return new (zone_) ObjectLiteral::Property(zone_, ast_value_factory_, key,
+                                               value, is_static);
   }
 
   ObjectLiteral::Property* NewObjectLiteralProperty(bool is_getter,
                                                     FunctionLiteral* value,
-                                                    int pos) {
+                                                    int pos, bool is_static) {
     ObjectLiteral::Property* prop =
-        new(zone_) ObjectLiteral::Property(zone_, is_getter, value);
-    prop->set_key(NewLiteral(value->name(), pos));
+        new (zone_) ObjectLiteral::Property(zone_, is_getter, value, is_static);
+    prop->set_key(NewStringLiteral(value->raw_name(), pos));
     return prop;  // Not an AST node, will not be visited.
   }
 
-  RegExpLiteral* NewRegExpLiteral(Handle<String> pattern,
-                                  Handle<String> flags,
+  RegExpLiteral* NewRegExpLiteral(const AstRawString* pattern,
+                                  const AstRawString* flags,
                                   int literal_index,
                                   int pos) {
-    RegExpLiteral* lit =
-        new(zone_) RegExpLiteral(zone_, pattern, flags, literal_index, pos);
+    RegExpLiteral* lit = new (zone_)
+        RegExpLiteral(zone_, pattern, flags, literal_index, pos, id_gen_);
     VISIT_AND_RETURN(RegExpLiteral, lit);
   }
 
   ArrayLiteral* NewArrayLiteral(ZoneList<Expression*>* values,
                                 int literal_index,
                                 int pos) {
-    ArrayLiteral* lit = new(zone_) ArrayLiteral(
-        zone_, values, literal_index, pos);
+    ArrayLiteral* lit =
+        new (zone_) ArrayLiteral(zone_, values, literal_index, pos, id_gen_);
     VISIT_AND_RETURN(ArrayLiteral, lit)
   }
 
   VariableProxy* NewVariableProxy(Variable* var,
                                   int pos = RelocInfo::kNoPosition) {
-    VariableProxy* proxy = new(zone_) VariableProxy(zone_, var, pos);
+    VariableProxy* proxy = new (zone_) VariableProxy(zone_, var, pos, id_gen_);
     VISIT_AND_RETURN(VariableProxy, proxy)
   }
 
-  VariableProxy* NewVariableProxy(Handle<String> name,
+  VariableProxy* NewVariableProxy(const AstRawString* name,
                                   bool is_this,
                                   Interface* interface = Interface::NewValue(),
                                   int position = RelocInfo::kNoPosition) {
-    VariableProxy* proxy =
-        new(zone_) VariableProxy(zone_, name, is_this, interface, position);
+    VariableProxy* proxy = new (zone_)
+        VariableProxy(zone_, name, is_this, interface, position, id_gen_);
     VISIT_AND_RETURN(VariableProxy, proxy)
   }
 
   Property* NewProperty(Expression* obj, Expression* key, int pos) {
-    Property* prop = new(zone_) Property(zone_, obj, key, pos);
+    Property* prop = new (zone_) Property(zone_, obj, key, pos, id_gen_);
     VISIT_AND_RETURN(Property, prop)
   }
 
   Call* NewCall(Expression* expression,
                 ZoneList<Expression*>* arguments,
                 int pos) {
-    Call* call = new(zone_) Call(zone_, expression, arguments, pos);
+    Call* call = new (zone_) Call(zone_, expression, arguments, pos, id_gen_);
     VISIT_AND_RETURN(Call, call)
   }
 
   CallNew* NewCallNew(Expression* expression,
                       ZoneList<Expression*>* arguments,
                       int pos) {
-    CallNew* call = new(zone_) CallNew(zone_, expression, arguments, pos);
+    CallNew* call =
+        new (zone_) CallNew(zone_, expression, arguments, pos, id_gen_);
     VISIT_AND_RETURN(CallNew, call)
   }
 
-  CallRuntime* NewCallRuntime(Handle<String> name,
+  CallRuntime* NewCallRuntime(const AstRawString* name,
                               const Runtime::Function* function,
                               ZoneList<Expression*>* arguments,
                               int pos) {
     CallRuntime* call =
-        new(zone_) CallRuntime(zone_, name, function, arguments, pos);
+        new (zone_) CallRuntime(zone_, name, function, arguments, pos, id_gen_);
     VISIT_AND_RETURN(CallRuntime, call)
   }
 
@@ -3246,7 +3414,7 @@
                                     Expression* expression,
                                     int pos) {
     UnaryOperation* node =
-        new(zone_) UnaryOperation(zone_, op, expression, pos);
+        new (zone_) UnaryOperation(zone_, op, expression, pos, id_gen_);
     VISIT_AND_RETURN(UnaryOperation, node)
   }
 
@@ -3255,7 +3423,7 @@
                                       Expression* right,
                                       int pos) {
     BinaryOperation* node =
-        new(zone_) BinaryOperation(zone_, op, left, right, pos);
+        new (zone_) BinaryOperation(zone_, op, left, right, pos, id_gen_);
     VISIT_AND_RETURN(BinaryOperation, node)
   }
 
@@ -3264,7 +3432,7 @@
                                     Expression* expr,
                                     int pos) {
     CountOperation* node =
-        new(zone_) CountOperation(zone_, op, is_prefix, expr, pos);
+        new (zone_) CountOperation(zone_, op, is_prefix, expr, pos, id_gen_);
     VISIT_AND_RETURN(CountOperation, node)
   }
 
@@ -3273,7 +3441,7 @@
                                         Expression* right,
                                         int pos) {
     CompareOperation* node =
-        new(zone_) CompareOperation(zone_, op, left, right, pos);
+        new (zone_) CompareOperation(zone_, op, left, right, pos, id_gen_);
     VISIT_AND_RETURN(CompareOperation, node)
   }
 
@@ -3281,8 +3449,8 @@
                               Expression* then_expression,
                               Expression* else_expression,
                               int position) {
-    Conditional* cond = new(zone_) Conditional(
-        zone_, condition, then_expression, else_expression, position);
+    Conditional* cond = new (zone_) Conditional(
+        zone_, condition, then_expression, else_expression, position, id_gen_);
     VISIT_AND_RETURN(Conditional, cond)
   }
 
@@ -3291,7 +3459,7 @@
                             Expression* value,
                             int pos) {
     Assignment* assign =
-        new(zone_) Assignment(zone_, op, target, value, pos);
+        new (zone_) Assignment(zone_, op, target, value, pos, id_gen_);
     assign->Init(zone_, this);
     VISIT_AND_RETURN(Assignment, assign)
   }
@@ -3300,35 +3468,31 @@
                   Expression* expression,
                   Yield::Kind yield_kind,
                   int pos) {
-    Yield* yield = new(zone_) Yield(
-        zone_, generator_object, expression, yield_kind, pos);
+    if (!expression) expression = NewUndefinedLiteral(pos);
+    Yield* yield = new (zone_)
+        Yield(zone_, generator_object, expression, yield_kind, pos, id_gen_);
     VISIT_AND_RETURN(Yield, yield)
   }
 
   Throw* NewThrow(Expression* exception, int pos) {
-    Throw* t = new(zone_) Throw(zone_, exception, pos);
+    Throw* t = new (zone_) Throw(zone_, exception, pos, id_gen_);
     VISIT_AND_RETURN(Throw, t)
   }
 
   FunctionLiteral* NewFunctionLiteral(
-      Handle<String> name,
-      Scope* scope,
-      ZoneList<Statement*>* body,
-      int materialized_literal_count,
-      int expected_property_count,
-      int handler_count,
-      int parameter_count,
+      const AstRawString* name, AstValueFactory* ast_value_factory,
+      Scope* scope, ZoneList<Statement*>* body, int materialized_literal_count,
+      int expected_property_count, int handler_count, int parameter_count,
       FunctionLiteral::ParameterFlag has_duplicate_parameters,
       FunctionLiteral::FunctionType function_type,
       FunctionLiteral::IsFunctionFlag is_function,
-      FunctionLiteral::IsParenthesizedFlag is_parenthesized,
-      FunctionLiteral::IsGeneratorFlag is_generator,
+      FunctionLiteral::IsParenthesizedFlag is_parenthesized, FunctionKind kind,
       int position) {
-    FunctionLiteral* lit = new(zone_) FunctionLiteral(
-        zone_, name, scope, body,
-        materialized_literal_count, expected_property_count, handler_count,
-        parameter_count, function_type, has_duplicate_parameters, is_function,
-        is_parenthesized, is_generator, position);
+    FunctionLiteral* lit = new (zone_) FunctionLiteral(
+        zone_, name, ast_value_factory, scope, body, materialized_literal_count,
+        expected_property_count, handler_count, parameter_count, function_type,
+        has_duplicate_parameters, is_function, is_parenthesized, kind, position,
+        id_gen_);
     // Top-level literal doesn't count for the AST's properties.
     if (is_function == FunctionLiteral::kIsFunction) {
       visitor_.VisitFunctionLiteral(lit);
@@ -3336,23 +3500,41 @@
     return lit;
   }
 
-  NativeFunctionLiteral* NewNativeFunctionLiteral(
-      Handle<String> name, v8::Extension* extension, int pos) {
+  ClassLiteral* NewClassLiteral(const AstRawString* name, Expression* extends,
+                                Expression* constructor,
+                                ZoneList<ObjectLiteral::Property*>* properties,
+                                int position) {
+    ClassLiteral* lit = new (zone_) ClassLiteral(
+        zone_, name, extends, constructor, properties, position, id_gen_);
+    VISIT_AND_RETURN(ClassLiteral, lit)
+  }
+
+  NativeFunctionLiteral* NewNativeFunctionLiteral(const AstRawString* name,
+                                                  v8::Extension* extension,
+                                                  int pos) {
     NativeFunctionLiteral* lit =
-        new(zone_) NativeFunctionLiteral(zone_, name, extension, pos);
+        new (zone_) NativeFunctionLiteral(zone_, name, extension, pos, id_gen_);
     VISIT_AND_RETURN(NativeFunctionLiteral, lit)
   }
 
   ThisFunction* NewThisFunction(int pos) {
-    ThisFunction* fun = new(zone_) ThisFunction(zone_, pos);
+    ThisFunction* fun = new (zone_) ThisFunction(zone_, pos, id_gen_);
     VISIT_AND_RETURN(ThisFunction, fun)
   }
 
+  SuperReference* NewSuperReference(VariableProxy* this_var, int pos) {
+    SuperReference* super =
+        new (zone_) SuperReference(zone_, this_var, pos, id_gen_);
+    VISIT_AND_RETURN(SuperReference, super);
+  }
+
 #undef VISIT_AND_RETURN
 
  private:
   Zone* zone_;
   Visitor visitor_;
+  AstValueFactory* ast_value_factory_;
+  AstNode::IdGen* id_gen_;
 };
 
 
diff --git a/src/background-parsing-task.cc b/src/background-parsing-task.cc
new file mode 100644
index 0000000..c7602a7
--- /dev/null
+++ b/src/background-parsing-task.cc
@@ -0,0 +1,62 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/background-parsing-task.h"
+
+namespace v8 {
+namespace internal {
+
+BackgroundParsingTask::BackgroundParsingTask(
+    StreamedSource* source, ScriptCompiler::CompileOptions options,
+    int stack_size, Isolate* isolate)
+    : source_(source), options_(options), stack_size_(stack_size) {
+  // Prepare the data for the internalization phase and compilation phase, which
+  // will happen in the main thread after parsing.
+  source->info.Reset(new i::CompilationInfoWithZone(source->source_stream.get(),
+                                                    source->encoding, isolate));
+  source->info->MarkAsGlobal();
+
+  // We don't set the context to the CompilationInfo yet, because the background
+  // thread cannot do anything with it anyway. We set it just before compilation
+  // on the foreground thread.
+  DCHECK(options == ScriptCompiler::kProduceParserCache ||
+         options == ScriptCompiler::kProduceCodeCache ||
+         options == ScriptCompiler::kNoCompileOptions);
+  source->allow_lazy =
+      !i::Compiler::DebuggerWantsEagerCompilation(source->info.get());
+  source->hash_seed = isolate->heap()->HashSeed();
+}
+
+
+void BackgroundParsingTask::Run() {
+  DisallowHeapAllocation no_allocation;
+  DisallowHandleAllocation no_handles;
+  DisallowHandleDereference no_deref;
+
+  ScriptData* script_data = NULL;
+  if (options_ == ScriptCompiler::kProduceParserCache ||
+      options_ == ScriptCompiler::kProduceCodeCache) {
+    source_->info->SetCachedData(&script_data, options_);
+  }
+
+  uintptr_t limit = reinterpret_cast<uintptr_t>(&limit) - stack_size_ * KB;
+  Parser::ParseInfo parse_info = {limit, source_->hash_seed,
+                                  &source_->unicode_cache};
+
+  // Parser needs to stay alive for finalizing the parsing on the main
+  // thread. Passing &parse_info is OK because Parser doesn't store it.
+  source_->parser.Reset(new Parser(source_->info.get(), &parse_info));
+  source_->parser->set_allow_lazy(source_->allow_lazy);
+  source_->parser->ParseOnBackground();
+
+  if (script_data != NULL) {
+    source_->cached_data.Reset(new ScriptCompiler::CachedData(
+        script_data->data(), script_data->length(),
+        ScriptCompiler::CachedData::BufferOwned));
+    script_data->ReleaseDataOwnership();
+    delete script_data;
+  }
+}
+}
+}  // namespace v8::internal
diff --git a/src/background-parsing-task.h b/src/background-parsing-task.h
new file mode 100644
index 0000000..19c93a8
--- /dev/null
+++ b/src/background-parsing-task.h
@@ -0,0 +1,67 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BACKGROUND_PARSING_TASK_H_
+#define V8_BACKGROUND_PARSING_TASK_H_
+
+#include "src/base/platform/platform.h"
+#include "src/base/platform/semaphore.h"
+#include "src/compiler.h"
+#include "src/parser.h"
+#include "src/smart-pointers.h"
+
+namespace v8 {
+namespace internal {
+
+class Parser;
+
+// Internal representation of v8::ScriptCompiler::StreamedSource. Contains all
+// data which needs to be transmitted between threads for background parsing,
+// finalizing it on the main thread, and compiling on the main thread.
+struct StreamedSource {
+  StreamedSource(ScriptCompiler::ExternalSourceStream* source_stream,
+                 ScriptCompiler::StreamedSource::Encoding encoding)
+      : source_stream(source_stream),
+        encoding(encoding),
+        hash_seed(0),
+        allow_lazy(false) {}
+
+  // Internal implementation of v8::ScriptCompiler::StreamedSource.
+  SmartPointer<ScriptCompiler::ExternalSourceStream> source_stream;
+  ScriptCompiler::StreamedSource::Encoding encoding;
+  SmartPointer<ScriptCompiler::CachedData> cached_data;
+
+  // Data needed for parsing, and data needed to to be passed between thread
+  // between parsing and compilation. These need to be initialized before the
+  // compilation starts.
+  UnicodeCache unicode_cache;
+  SmartPointer<CompilationInfo> info;
+  uint32_t hash_seed;
+  bool allow_lazy;
+  SmartPointer<Parser> parser;
+
+ private:
+  // Prevent copying. Not implemented.
+  StreamedSource(const StreamedSource&);
+  StreamedSource& operator=(const StreamedSource&);
+};
+
+
+class BackgroundParsingTask : public ScriptCompiler::ScriptStreamingTask {
+ public:
+  BackgroundParsingTask(StreamedSource* source,
+                        ScriptCompiler::CompileOptions options, int stack_size,
+                        Isolate* isolate);
+
+  virtual void Run();
+
+ private:
+  StreamedSource* source_;  // Not owned.
+  ScriptCompiler::CompileOptions options_;
+  int stack_size_;
+};
+}
+}  // namespace v8::internal
+
+#endif  // V8_BACKGROUND_PARSING_TASK_H_
diff --git a/src/bailout-reason.cc b/src/bailout-reason.cc
new file mode 100644
index 0000000..93d43dd
--- /dev/null
+++ b/src/bailout-reason.cc
@@ -0,0 +1,20 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/bailout-reason.h"
+#include "src/base/logging.h"
+
+namespace v8 {
+namespace internal {
+
+const char* GetBailoutReason(BailoutReason reason) {
+  DCHECK(reason < kLastErrorMessage);
+#define ERROR_MESSAGES_TEXTS(C, T) T,
+  static const char* error_messages_[] = {
+      ERROR_MESSAGES_LIST(ERROR_MESSAGES_TEXTS)};
+#undef ERROR_MESSAGES_TEXTS
+  return error_messages_[reason];
+}
+}
+}  // namespace v8::internal
diff --git a/src/bailout-reason.h b/src/bailout-reason.h
new file mode 100644
index 0000000..7287d62
--- /dev/null
+++ b/src/bailout-reason.h
@@ -0,0 +1,339 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BAILOUT_REASON_H_
+#define V8_BAILOUT_REASON_H_
+
+namespace v8 {
+namespace internal {
+
+#define ERROR_MESSAGES_LIST(V)                                                 \
+  V(kNoReason, "no reason")                                                    \
+                                                                               \
+  V(k32BitValueInRegisterIsNotZeroExtended,                                    \
+    "32 bit value in register is not zero-extended")                           \
+  V(kAlignmentMarkerExpected, "Alignment marker expected")                     \
+  V(kAllocationIsNotDoubleAligned, "Allocation is not double aligned")         \
+  V(kAPICallReturnedInvalidObject, "API call returned invalid object")         \
+  V(kArgumentsObjectValueInATestContext,                                       \
+    "Arguments object value in a test context")                                \
+  V(kArrayBoilerplateCreationFailed, "Array boilerplate creation failed")      \
+  V(kArrayIndexConstantValueTooBig, "Array index constant value too big")      \
+  V(kAssignmentToArguments, "Assignment to arguments")                         \
+  V(kAssignmentToLetVariableBeforeInitialization,                              \
+    "Assignment to let variable before initialization")                        \
+  V(kAssignmentToLOOKUPVariable, "Assignment to LOOKUP variable")              \
+  V(kAssignmentToParameterFunctionUsesArgumentsObject,                         \
+    "Assignment to parameter, function uses arguments object")                 \
+  V(kAssignmentToParameterInArgumentsObject,                                   \
+    "Assignment to parameter in arguments object")                             \
+  V(kAttemptToUseUndefinedCache, "Attempt to use undefined cache")             \
+  V(kBadValueContextForArgumentsObjectValue,                                   \
+    "Bad value context for arguments object value")                            \
+  V(kBadValueContextForArgumentsValue,                                         \
+    "Bad value context for arguments value")                                   \
+  V(kBailedOutDueToDependencyChange, "Bailed out due to dependency change")    \
+  V(kBailoutWasNotPrepared, "Bailout was not prepared")                        \
+  V(kBinaryStubGenerateFloatingPointCode,                                      \
+    "BinaryStub_GenerateFloatingPointCode")                                    \
+  V(kBothRegistersWereSmisInSelectNonSmi,                                      \
+    "Both registers were smis in SelectNonSmi")                                \
+  V(kCallToAJavaScriptRuntimeFunction,                                         \
+    "Call to a JavaScript runtime function")                                   \
+  V(kCannotTranslatePositionInChangedArea,                                     \
+    "Cannot translate position in changed area")                               \
+  V(kClassLiteral, "Class literal")                                            \
+  V(kCodeGenerationFailed, "Code generation failed")                           \
+  V(kCodeObjectNotProperlyPatched, "Code object not properly patched")         \
+  V(kCompoundAssignmentToLookupSlot, "Compound assignment to lookup slot")     \
+  V(kContextAllocatedArguments, "Context-allocated arguments")                 \
+  V(kCopyBuffersOverlap, "Copy buffers overlap")                               \
+  V(kCouldNotGenerateZero, "Could not generate +0.0")                          \
+  V(kCouldNotGenerateNegativeZero, "Could not generate -0.0")                  \
+  V(kDebuggerHasBreakPoints, "Debugger has break points")                      \
+  V(kDebuggerStatement, "DebuggerStatement")                                   \
+  V(kDeclarationInCatchContext, "Declaration in catch context")                \
+  V(kDeclarationInWithContext, "Declaration in with context")                  \
+  V(kDefaultNaNModeNotSet, "Default NaN mode not set")                         \
+  V(kDeleteWithGlobalVariable, "Delete with global variable")                  \
+  V(kDeleteWithNonGlobalVariable, "Delete with non-global variable")           \
+  V(kDestinationOfCopyNotAligned, "Destination of copy not aligned")           \
+  V(kDontDeleteCellsCannotContainTheHole,                                      \
+    "DontDelete cells can't contain the hole")                                 \
+  V(kDoPushArgumentNotImplementedForDoubleType,                                \
+    "DoPushArgument not implemented for double type")                          \
+  V(kEliminatedBoundsCheckFailed, "Eliminated bounds check failed")            \
+  V(kEmitLoadRegisterUnsupportedDoubleImmediate,                               \
+    "EmitLoadRegister: Unsupported double immediate")                          \
+  V(kEval, "eval")                                                             \
+  V(kExpected0AsASmiSentinel, "Expected 0 as a Smi sentinel")                  \
+  V(kExpectedAlignmentMarker, "Expected alignment marker")                     \
+  V(kExpectedAllocationSite, "Expected allocation site")                       \
+  V(kExpectedFunctionObject, "Expected function object in register")           \
+  V(kExpectedHeapNumber, "Expected HeapNumber")                                \
+  V(kExpectedNativeContext, "Expected native context")                         \
+  V(kExpectedNonIdenticalObjects, "Expected non-identical objects")            \
+  V(kExpectedNonNullContext, "Expected non-null context")                      \
+  V(kExpectedPositiveZero, "Expected +0.0")                                    \
+  V(kExpectedAllocationSiteInCell, "Expected AllocationSite in property cell") \
+  V(kExpectedFixedArrayInFeedbackVector,                                       \
+    "Expected fixed array in feedback vector")                                 \
+  V(kExpectedFixedArrayInRegisterA2, "Expected fixed array in register a2")    \
+  V(kExpectedFixedArrayInRegisterEbx, "Expected fixed array in register ebx")  \
+  V(kExpectedFixedArrayInRegisterR2, "Expected fixed array in register r2")    \
+  V(kExpectedFixedArrayInRegisterRbx, "Expected fixed array in register rbx")  \
+  V(kExpectedNewSpaceObject, "Expected new space object")                      \
+  V(kExpectedSmiOrHeapNumber, "Expected smi or HeapNumber")                    \
+  V(kExpectedUndefinedOrCell, "Expected undefined or cell in register")        \
+  V(kExpectingAlignmentForCopyBytes, "Expecting alignment for CopyBytes")      \
+  V(kExportDeclaration, "Export declaration")                                  \
+  V(kExternalStringExpectedButNotFound,                                        \
+    "External string expected, but not found")                                 \
+  V(kFailedBailedOutLastTime, "Failed/bailed out last time")                   \
+  V(kForInStatementIsNotFastCase, "ForInStatement is not fast case")           \
+  V(kForInStatementOptimizationIsDisabled,                                     \
+    "ForInStatement optimization is disabled")                                 \
+  V(kForInStatementWithNonLocalEachVariable,                                   \
+    "ForInStatement with non-local each variable")                             \
+  V(kForOfStatement, "ForOfStatement")                                         \
+  V(kFrameIsExpectedToBeAligned, "Frame is expected to be aligned")            \
+  V(kFunctionCallsEval, "Function calls eval")                                 \
+  V(kFunctionIsAGenerator, "Function is a generator")                          \
+  V(kFunctionWithIllegalRedeclaration, "Function with illegal redeclaration")  \
+  V(kGeneratedCodeIsTooLarge, "Generated code is too large")                   \
+  V(kGeneratorFailedToResume, "Generator failed to resume")                    \
+  V(kGenerator, "Generator")                                                   \
+  V(kGlobalFunctionsMustHaveInitialMap,                                        \
+    "Global functions must have initial map")                                  \
+  V(kHeapNumberMapRegisterClobbered, "HeapNumberMap register clobbered")       \
+  V(kHydrogenFilter, "Optimization disabled by filter")                        \
+  V(kImportDeclaration, "Import declaration")                                  \
+  V(kImproperObjectOnPrototypeChainForStore,                                   \
+    "Improper object on prototype chain for store")                            \
+  V(kIndexIsNegative, "Index is negative")                                     \
+  V(kIndexIsTooLarge, "Index is too large")                                    \
+  V(kInlinedRuntimeFunctionClassOf, "Inlined runtime function: ClassOf")       \
+  V(kInlinedRuntimeFunctionFastOneByteArrayJoin,                               \
+    "Inlined runtime function: FastOneByteArrayJoin")                          \
+  V(kInlinedRuntimeFunctionGeneratorNext,                                      \
+    "Inlined runtime function: GeneratorNext")                                 \
+  V(kInlinedRuntimeFunctionGeneratorThrow,                                     \
+    "Inlined runtime function: GeneratorThrow")                                \
+  V(kInlinedRuntimeFunctionGetFromCache,                                       \
+    "Inlined runtime function: GetFromCache")                                  \
+  V(kInlinedRuntimeFunctionIsNonNegativeSmi,                                   \
+    "Inlined runtime function: IsNonNegativeSmi")                              \
+  V(kInlinedRuntimeFunctionIsStringWrapperSafeForDefaultValueOf,               \
+    "Inlined runtime function: IsStringWrapperSafeForDefaultValueOf")          \
+  V(kInliningBailedOut, "Inlining bailed out")                                 \
+  V(kInputGPRIsExpectedToHaveUpper32Cleared,                                   \
+    "Input GPR is expected to have upper32 cleared")                           \
+  V(kInputStringTooLong, "Input string too long")                              \
+  V(kInstanceofStubUnexpectedCallSiteCacheCheck,                               \
+    "InstanceofStub unexpected call site cache (check)")                       \
+  V(kInstanceofStubUnexpectedCallSiteCacheCmp1,                                \
+    "InstanceofStub unexpected call site cache (cmp 1)")                       \
+  V(kInstanceofStubUnexpectedCallSiteCacheCmp2,                                \
+    "InstanceofStub unexpected call site cache (cmp 2)")                       \
+  V(kInstanceofStubUnexpectedCallSiteCacheMov,                                 \
+    "InstanceofStub unexpected call site cache (mov)")                         \
+  V(kInteger32ToSmiFieldWritingToNonSmiLocation,                               \
+    "Integer32ToSmiField writing to non-smi location")                         \
+  V(kInvalidCaptureReferenced, "Invalid capture referenced")                   \
+  V(kInvalidElementsKindForInternalArrayOrInternalPackedArray,                 \
+    "Invalid ElementsKind for InternalArray or InternalPackedArray")           \
+  V(kInvalidFullCodegenState, "invalid full-codegen state")                    \
+  V(kInvalidHandleScopeLevel, "Invalid HandleScope level")                     \
+  V(kInvalidLeftHandSideInAssignment, "Invalid left-hand side in assignment")  \
+  V(kInvalidLhsInCompoundAssignment, "Invalid lhs in compound assignment")     \
+  V(kInvalidLhsInCountOperation, "Invalid lhs in count operation")             \
+  V(kInvalidMinLength, "Invalid min_length")                                   \
+  V(kJSGlobalObjectNativeContextShouldBeANativeContext,                        \
+    "JSGlobalObject::native_context should be a native context")               \
+  V(kJSGlobalProxyContextShouldNotBeNull,                                      \
+    "JSGlobalProxy::context() should not be null")                             \
+  V(kJSObjectWithFastElementsMapHasSlowElements,                               \
+    "JSObject with fast elements map has slow elements")                       \
+  V(kLetBindingReInitialization, "Let binding re-initialization")              \
+  V(kLhsHasBeenClobbered, "lhs has been clobbered")                            \
+  V(kLiveBytesCountOverflowChunkSize, "Live Bytes Count overflow chunk size")  \
+  V(kLiveEdit, "LiveEdit")                                                     \
+  V(kLookupVariableInCountOperation, "Lookup variable in count operation")     \
+  V(kMapBecameDeprecated, "Map became deprecated")                             \
+  V(kMapBecameUnstable, "Map became unstable")                                 \
+  V(kMapIsNoLongerInEax, "Map is no longer in eax")                            \
+  V(kModuleDeclaration, "Module declaration")                                  \
+  V(kModuleLiteral, "Module literal")                                          \
+  V(kModulePath, "Module path")                                                \
+  V(kModuleStatement, "Module statement")                                      \
+  V(kModuleVariable, "Module variable")                                        \
+  V(kModuleUrl, "Module url")                                                  \
+  V(kNativeFunctionLiteral, "Native function literal")                         \
+  V(kSuperReference, "Super reference")                                        \
+  V(kNeedSmiLiteral, "Need a Smi literal here")                                \
+  V(kNoCasesLeft, "No cases left")                                             \
+  V(kNoEmptyArraysHereInEmitFastOneByteArrayJoin,                              \
+    "No empty arrays here in EmitFastOneByteArrayJoin")                        \
+  V(kNonInitializerAssignmentToConst, "Non-initializer assignment to const")   \
+  V(kNonSmiIndex, "Non-smi index")                                             \
+  V(kNonSmiKeyInArrayLiteral, "Non-smi key in array literal")                  \
+  V(kNonSmiValue, "Non-smi value")                                             \
+  V(kNonObject, "Non-object value")                                            \
+  V(kNotEnoughVirtualRegistersForValues,                                       \
+    "Not enough virtual registers for values")                                 \
+  V(kNotEnoughSpillSlotsForOsr, "Not enough spill slots for OSR")              \
+  V(kNotEnoughVirtualRegistersRegalloc,                                        \
+    "Not enough virtual registers (regalloc)")                                 \
+  V(kObjectFoundInSmiOnlyArray, "Object found in smi-only array")              \
+  V(kObjectLiteralWithComplexProperty, "Object literal with complex property") \
+  V(kOddballInStringTableIsNotUndefinedOrTheHole,                              \
+    "Oddball in string table is not undefined or the hole")                    \
+  V(kOffsetOutOfRange, "Offset out of range")                                  \
+  V(kOperandIsASmiAndNotAName, "Operand is a smi and not a name")              \
+  V(kOperandIsASmiAndNotAString, "Operand is a smi and not a string")          \
+  V(kOperandIsASmi, "Operand is a smi")                                        \
+  V(kOperandIsNotAName, "Operand is not a name")                               \
+  V(kOperandIsNotANumber, "Operand is not a number")                           \
+  V(kOperandIsNotASmi, "Operand is not a smi")                                 \
+  V(kOperandIsNotAString, "Operand is not a string")                           \
+  V(kOperandIsNotSmi, "Operand is not smi")                                    \
+  V(kOperandNotANumber, "Operand not a number")                                \
+  V(kObjectTagged, "The object is tagged")                                     \
+  V(kObjectNotTagged, "The object is not tagged")                              \
+  V(kOptimizationDisabled, "Optimization is disabled")                         \
+  V(kOptimizedTooManyTimes, "Optimized too many times")                        \
+  V(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister,                   \
+    "Out of virtual registers while trying to allocate temp register")         \
+  V(kParseScopeError, "Parse/scope error")                                     \
+  V(kPossibleDirectCallToEval, "Possible direct call to eval")                 \
+  V(kPreconditionsWereNotMet, "Preconditions were not met")                    \
+  V(kPropertyAllocationCountFailed, "Property allocation count failed")        \
+  V(kReceivedInvalidReturnAddress, "Received invalid return address")          \
+  V(kReferenceToAVariableWhichRequiresDynamicLookup,                           \
+    "Reference to a variable which requires dynamic lookup")                   \
+  V(kReferenceToGlobalLexicalVariable, "Reference to global lexical variable") \
+  V(kReferenceToUninitializedVariable, "Reference to uninitialized variable")  \
+  V(kRegisterDidNotMatchExpectedRoot, "Register did not match expected root")  \
+  V(kRegisterWasClobbered, "Register was clobbered")                           \
+  V(kRememberedSetPointerInNewSpace, "Remembered set pointer is in new space") \
+  V(kReturnAddressNotFoundInFrame, "Return address not found in frame")        \
+  V(kRhsHasBeenClobbered, "Rhs has been clobbered")                            \
+  V(kScopedBlock, "ScopedBlock")                                               \
+  V(kSmiAdditionOverflow, "Smi addition overflow")                             \
+  V(kSmiSubtractionOverflow, "Smi subtraction overflow")                       \
+  V(kStackAccessBelowStackPointer, "Stack access below stack pointer")         \
+  V(kStackFrameTypesMustMatch, "Stack frame types must match")                 \
+  V(kTheCurrentStackPointerIsBelowCsp,                                         \
+    "The current stack pointer is below csp")                                  \
+  V(kTheInstructionShouldBeALui, "The instruction should be a lui")            \
+  V(kTheInstructionShouldBeAnOri, "The instruction should be an ori")          \
+  V(kTheInstructionToPatchShouldBeALoadFromConstantPool,                       \
+    "The instruction to patch should be a load from the constant pool")        \
+  V(kTheInstructionToPatchShouldBeAnLdrLiteral,                                \
+    "The instruction to patch should be a ldr literal")                        \
+  V(kTheInstructionToPatchShouldBeALui,                                        \
+    "The instruction to patch should be a lui")                                \
+  V(kTheInstructionToPatchShouldBeAnOri,                                       \
+    "The instruction to patch should be an ori")                               \
+  V(kTheSourceAndDestinationAreTheSame,                                        \
+    "The source and destination are the same")                                 \
+  V(kTheStackPointerIsNotAligned, "The stack pointer is not aligned.")         \
+  V(kTheStackWasCorruptedByMacroAssemblerCall,                                 \
+    "The stack was corrupted by MacroAssembler::Call()")                       \
+  V(kTooManyParametersLocals, "Too many parameters/locals")                    \
+  V(kTooManyParameters, "Too many parameters")                                 \
+  V(kTooManySpillSlotsNeededForOSR, "Too many spill slots needed for OSR")     \
+  V(kToOperand32UnsupportedImmediate, "ToOperand32 unsupported immediate.")    \
+  V(kToOperandIsDoubleRegisterUnimplemented,                                   \
+    "ToOperand IsDoubleRegister unimplemented")                                \
+  V(kToOperandUnsupportedDoubleImmediate,                                      \
+    "ToOperand Unsupported double immediate")                                  \
+  V(kTryCatchStatement, "TryCatchStatement")                                   \
+  V(kTryFinallyStatement, "TryFinallyStatement")                               \
+  V(kUnableToEncodeValueAsSmi, "Unable to encode value as smi")                \
+  V(kUnalignedAllocationInNewSpace, "Unaligned allocation in new space")       \
+  V(kUnalignedCellInWriteBarrier, "Unaligned cell in write barrier")           \
+  V(kUndefinedValueNotLoaded, "Undefined value not loaded")                    \
+  V(kUndoAllocationOfNonAllocatedMemory,                                       \
+    "Undo allocation of non allocated memory")                                 \
+  V(kUnexpectedAllocationTop, "Unexpected allocation top")                     \
+  V(kUnexpectedColorFound, "Unexpected color bit pattern found")               \
+  V(kUnexpectedElementsKindInArrayConstructor,                                 \
+    "Unexpected ElementsKind in array constructor")                            \
+  V(kUnexpectedFallthroughFromCharCodeAtSlowCase,                              \
+    "Unexpected fallthrough from CharCodeAt slow case")                        \
+  V(kUnexpectedFallthroughFromCharFromCodeSlowCase,                            \
+    "Unexpected fallthrough from CharFromCode slow case")                      \
+  V(kUnexpectedFallThroughFromStringComparison,                                \
+    "Unexpected fall-through from string comparison")                          \
+  V(kUnexpectedFallThroughInBinaryStubGenerateFloatingPointCode,               \
+    "Unexpected fall-through in BinaryStub_GenerateFloatingPointCode")         \
+  V(kUnexpectedFallthroughToCharCodeAtSlowCase,                                \
+    "Unexpected fallthrough to CharCodeAt slow case")                          \
+  V(kUnexpectedFallthroughToCharFromCodeSlowCase,                              \
+    "Unexpected fallthrough to CharFromCode slow case")                        \
+  V(kUnexpectedFPUStackDepthAfterInstruction,                                  \
+    "Unexpected FPU stack depth after instruction")                            \
+  V(kUnexpectedInitialMapForArrayFunction1,                                    \
+    "Unexpected initial map for Array function (1)")                           \
+  V(kUnexpectedInitialMapForArrayFunction2,                                    \
+    "Unexpected initial map for Array function (2)")                           \
+  V(kUnexpectedInitialMapForArrayFunction,                                     \
+    "Unexpected initial map for Array function")                               \
+  V(kUnexpectedInitialMapForInternalArrayFunction,                             \
+    "Unexpected initial map for InternalArray function")                       \
+  V(kUnexpectedLevelAfterReturnFromApiCall,                                    \
+    "Unexpected level after return from api call")                             \
+  V(kUnexpectedNegativeValue, "Unexpected negative value")                     \
+  V(kUnexpectedNumberOfPreAllocatedPropertyFields,                             \
+    "Unexpected number of pre-allocated property fields")                      \
+  V(kUnexpectedFPCRMode, "Unexpected FPCR mode.")                              \
+  V(kUnexpectedSmi, "Unexpected smi value")                                    \
+  V(kUnexpectedStringFunction, "Unexpected String function")                   \
+  V(kUnexpectedStringType, "Unexpected string type")                           \
+  V(kUnexpectedStringWrapperInstanceSize,                                      \
+    "Unexpected string wrapper instance size")                                 \
+  V(kUnexpectedTypeForRegExpDataFixedArrayExpected,                            \
+    "Unexpected type for RegExp data, FixedArray expected")                    \
+  V(kUnexpectedValue, "Unexpected value")                                      \
+  V(kUnexpectedUnusedPropertiesOfStringWrapper,                                \
+    "Unexpected unused properties of string wrapper")                          \
+  V(kUnimplemented, "unimplemented")                                           \
+  V(kUninitializedKSmiConstantRegister, "Uninitialized kSmiConstantRegister")  \
+  V(kUnknown, "Unknown")                                                       \
+  V(kUnsupportedConstCompoundAssignment,                                       \
+    "Unsupported const compound assignment")                                   \
+  V(kUnsupportedCountOperationWithConst,                                       \
+    "Unsupported count operation with const")                                  \
+  V(kUnsupportedDoubleImmediate, "Unsupported double immediate")               \
+  V(kUnsupportedLetCompoundAssignment, "Unsupported let compound assignment")  \
+  V(kUnsupportedLookupSlotInDeclaration,                                       \
+    "Unsupported lookup slot in declaration")                                  \
+  V(kUnsupportedNonPrimitiveCompare, "Unsupported non-primitive compare")      \
+  V(kUnsupportedPhiUseOfArguments, "Unsupported phi use of arguments")         \
+  V(kUnsupportedPhiUseOfConstVariable,                                         \
+    "Unsupported phi use of const variable")                                   \
+  V(kUnsupportedTaggedImmediate, "Unsupported tagged immediate")               \
+  V(kVariableResolvedToWithContext, "Variable resolved to with context")       \
+  V(kWeShouldNotHaveAnEmptyLexicalContext,                                     \
+    "We should not have an empty lexical context")                             \
+  V(kWithStatement, "WithStatement")                                           \
+  V(kWrongFunctionContext, "Wrong context passed to function")                 \
+  V(kWrongAddressOrValuePassedToRecordWrite,                                   \
+    "Wrong address or value passed to RecordWrite")                            \
+  V(kYield, "Yield")
+
+
+#define ERROR_MESSAGES_CONSTANTS(C, T) C,
+enum BailoutReason {
+  ERROR_MESSAGES_LIST(ERROR_MESSAGES_CONSTANTS) kLastErrorMessage
+};
+#undef ERROR_MESSAGES_CONSTANTS
+
+
+const char* GetBailoutReason(BailoutReason reason);
+}
+}  // namespace v8::internal
+
+#endif  // V8_BAILOUT_REASON_H_
diff --git a/src/base/DEPS b/src/base/DEPS
index 6548030..e53cadf 100644
--- a/src/base/DEPS
+++ b/src/base/DEPS
@@ -1,4 +1,7 @@
 include_rules = [
+  "-include",
+  "+include/v8config.h",
+  "+include/v8stdint.h",
   "-src",
   "+src/base",
 ]
diff --git a/src/base/atomicops.h b/src/base/atomicops.h
index b26fc4c..eba172f 100644
--- a/src/base/atomicops.h
+++ b/src/base/atomicops.h
@@ -25,7 +25,7 @@
 #ifndef V8_BASE_ATOMICOPS_H_
 #define V8_BASE_ATOMICOPS_H_
 
-#include "include/v8.h"
+#include "include/v8stdint.h"
 #include "src/base/build_config.h"
 
 #if defined(_WIN32) && defined(V8_HOST_ARCH_64_BIT)
@@ -148,6 +148,8 @@
 #include "src/base/atomicops_internals_x86_gcc.h"
 #elif defined(__GNUC__) && V8_HOST_ARCH_MIPS
 #include "src/base/atomicops_internals_mips_gcc.h"
+#elif defined(__GNUC__) && V8_HOST_ARCH_MIPS64
+#include "src/base/atomicops_internals_mips64_gcc.h"
 #else
 #error "Atomic operations are not supported on your platform"
 #endif
diff --git a/src/base/atomicops_internals_mips64_gcc.h b/src/base/atomicops_internals_mips64_gcc.h
new file mode 100644
index 0000000..1f629b6
--- /dev/null
+++ b/src/base/atomicops_internals_mips64_gcc.h
@@ -0,0 +1,307 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// This file is an internal atomic implementation, use atomicops.h instead.
+
+#ifndef V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_
+#define V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_
+
+namespace v8 {
+namespace base {
+
+// Atomically execute:
+//      result = *ptr;
+//      if (*ptr == old_value)
+//        *ptr = new_value;
+//      return result;
+//
+// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
+// Always return the old value of "*ptr"
+//
+// This routine implies no memory barriers.
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+                                         Atomic32 old_value,
+                                         Atomic32 new_value) {
+  Atomic32 prev, tmp;
+  __asm__ __volatile__(".set push\n"
+                       ".set noreorder\n"
+                       "1:\n"
+                       "ll %0, %5\n"  // prev = *ptr
+                       "bne %0, %3, 2f\n"  // if (prev != old_value) goto 2
+                       "move %2, %4\n"  // tmp = new_value
+                       "sc %2, %1\n"  // *ptr = tmp (with atomic check)
+                       "beqz %2, 1b\n"  // start again on atomic error
+                       "nop\n"  // delay slot nop
+                       "2:\n"
+                       ".set pop\n"
+                       : "=&r" (prev), "=m" (*ptr), "=&r" (tmp)
+                       : "Ir" (old_value), "r" (new_value), "m" (*ptr)
+                       : "memory");
+  return prev;
+}
+
+// Atomically store new_value into *ptr, returning the previous value held in
+// *ptr.  This routine implies no memory barriers.
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+                                         Atomic32 new_value) {
+  Atomic32 temp, old;
+  __asm__ __volatile__(".set push\n"
+                       ".set noreorder\n"
+                       "1:\n"
+                       "ll %1, %2\n"  // old = *ptr
+                       "move %0, %3\n"  // temp = new_value
+                       "sc %0, %2\n"  // *ptr = temp (with atomic check)
+                       "beqz %0, 1b\n"  // start again on atomic error
+                       "nop\n"  // delay slot nop
+                       ".set pop\n"
+                       : "=&r" (temp), "=&r" (old), "=m" (*ptr)
+                       : "r" (new_value), "m" (*ptr)
+                       : "memory");
+
+  return old;
+}
+
+// Atomically increment *ptr by "increment".  Returns the new value of
+// *ptr with the increment applied.  This routine implies no memory barriers.
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+                                          Atomic32 increment) {
+  Atomic32 temp, temp2;
+
+  __asm__ __volatile__(".set push\n"
+                       ".set noreorder\n"
+                       "1:\n"
+                       "ll %0, %2\n"  // temp = *ptr
+                       "addu %1, %0, %3\n"  // temp2 = temp + increment
+                       "sc %1, %2\n"  // *ptr = temp2 (with atomic check)
+                       "beqz %1, 1b\n"  // start again on atomic error
+                       "addu %1, %0, %3\n"  // temp2 = temp + increment
+                       ".set pop\n"
+                       : "=&r" (temp), "=&r" (temp2), "=m" (*ptr)
+                       : "Ir" (increment), "m" (*ptr)
+                       : "memory");
+  // temp2 now holds the final value.
+  return temp2;
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+                                        Atomic32 increment) {
+  MemoryBarrier();
+  Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment);
+  MemoryBarrier();
+  return res;
+}
+
+// "Acquire" operations
+// ensure that no later memory access can be reordered ahead of the operation.
+// "Release" operations ensure that no previous memory access can be reordered
+// after the operation.  "Barrier" operations have both "Acquire" and "Release"
+// semantics.   A MemoryBarrier() has "Barrier" semantics, but does no memory
+// access.
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+  MemoryBarrier();
+  return res;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+                                       Atomic32 old_value,
+                                       Atomic32 new_value) {
+  MemoryBarrier();
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
+  *ptr = value;
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+}
+
+inline void MemoryBarrier() {
+  __asm__ __volatile__("sync" : : : "memory");
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
+  *ptr = value;
+  MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
+  MemoryBarrier();
+  *ptr = value;
+}
+
+inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
+  return *ptr;
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
+  return *ptr;
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
+  Atomic32 value = *ptr;
+  MemoryBarrier();
+  return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
+  MemoryBarrier();
+  return *ptr;
+}
+
+
+// 64-bit versions of the atomic ops.
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+                                         Atomic64 old_value,
+                                         Atomic64 new_value) {
+  Atomic64 prev, tmp;
+  __asm__ __volatile__(".set push\n"
+                       ".set noreorder\n"
+                       "1:\n"
+                       "lld %0, %5\n"  // prev = *ptr
+                       "bne %0, %3, 2f\n"  // if (prev != old_value) goto 2
+                       "move %2, %4\n"  // tmp = new_value
+                       "scd %2, %1\n"  // *ptr = tmp (with atomic check)
+                       "beqz %2, 1b\n"  // start again on atomic error
+                       "nop\n"  // delay slot nop
+                       "2:\n"
+                       ".set pop\n"
+                       : "=&r" (prev), "=m" (*ptr), "=&r" (tmp)
+                       : "Ir" (old_value), "r" (new_value), "m" (*ptr)
+                       : "memory");
+  return prev;
+}
+
+// Atomically store new_value into *ptr, returning the previous value held in
+// *ptr.  This routine implies no memory barriers.
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+                                         Atomic64 new_value) {
+  Atomic64 temp, old;
+  __asm__ __volatile__(".set push\n"
+                       ".set noreorder\n"
+                       "1:\n"
+                       "lld %1, %2\n"  // old = *ptr
+                       "move %0, %3\n"  // temp = new_value
+                       "scd %0, %2\n"  // *ptr = temp (with atomic check)
+                       "beqz %0, 1b\n"  // start again on atomic error
+                       "nop\n"  // delay slot nop
+                       ".set pop\n"
+                       : "=&r" (temp), "=&r" (old), "=m" (*ptr)
+                       : "r" (new_value), "m" (*ptr)
+                       : "memory");
+
+  return old;
+}
+
+// Atomically increment *ptr by "increment".  Returns the new value of
+// *ptr with the increment applied.  This routine implies no memory barriers.
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+                                          Atomic64 increment) {
+  Atomic64 temp, temp2;
+
+  __asm__ __volatile__(".set push\n"
+                       ".set noreorder\n"
+                       "1:\n"
+                       "lld %0, %2\n"  // temp = *ptr
+                       "daddu %1, %0, %3\n"  // temp2 = temp + increment
+                       "scd %1, %2\n"  // *ptr = temp2 (with atomic check)
+                       "beqz %1, 1b\n"  // start again on atomic error
+                       "daddu %1, %0, %3\n"  // temp2 = temp + increment
+                       ".set pop\n"
+                       : "=&r" (temp), "=&r" (temp2), "=m" (*ptr)
+                       : "Ir" (increment), "m" (*ptr)
+                       : "memory");
+  // temp2 now holds the final value.
+  return temp2;
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+                                        Atomic64 increment) {
+  MemoryBarrier();
+  Atomic64 res = NoBarrier_AtomicIncrement(ptr, increment);
+  MemoryBarrier();
+  return res;
+}
+
+// "Acquire" operations
+// ensure that no later memory access can be reordered ahead of the operation.
+// "Release" operations ensure that no previous memory access can be reordered
+// after the operation.  "Barrier" operations have both "Acquire" and "Release"
+// semantics.   A MemoryBarrier() has "Barrier" semantics, but does no memory
+// access.
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+                                       Atomic64 old_value,
+                                       Atomic64 new_value) {
+  Atomic64 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+  MemoryBarrier();
+  return res;
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+                                       Atomic64 old_value,
+                                       Atomic64 new_value) {
+  MemoryBarrier();
+  return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
+  *ptr = value;
+}
+
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
+  *ptr = value;
+  MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
+  MemoryBarrier();
+  *ptr = value;
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
+  return *ptr;
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
+  Atomic64 value = *ptr;
+  MemoryBarrier();
+  return value;
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
+  MemoryBarrier();
+  return *ptr;
+}
+
+} }  // namespace v8::base
+
+#endif  // V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_
diff --git a/src/base/atomicops_internals_mips_gcc.h b/src/base/atomicops_internals_mips_gcc.h
index 0d3a0e3..d33b668 100644
--- a/src/base/atomicops_internals_mips_gcc.h
+++ b/src/base/atomicops_internals_mips_gcc.h
@@ -27,16 +27,16 @@
   __asm__ __volatile__(".set push\n"
                        ".set noreorder\n"
                        "1:\n"
-                       "ll %0, %5\n"  // prev = *ptr
-                       "bne %0, %3, 2f\n"  // if (prev != old_value) goto 2
-                       "move %2, %4\n"  // tmp = new_value
-                       "sc %2, %1\n"  // *ptr = tmp (with atomic check)
-                       "beqz %2, 1b\n"  // start again on atomic error
+                       "ll %0, 0(%4)\n"  // prev = *ptr
+                       "bne %0, %2, 2f\n"  // if (prev != old_value) goto 2
+                       "move %1, %3\n"  // tmp = new_value
+                       "sc %1, 0(%4)\n"  // *ptr = tmp (with atomic check)
+                       "beqz %1, 1b\n"  // start again on atomic error
                        "nop\n"  // delay slot nop
                        "2:\n"
                        ".set pop\n"
-                       : "=&r" (prev), "=m" (*ptr), "=&r" (tmp)
-                       : "Ir" (old_value), "r" (new_value), "m" (*ptr)
+                       : "=&r" (prev), "=&r" (tmp)
+                       : "Ir" (old_value), "r" (new_value), "r" (ptr)
                        : "memory");
   return prev;
 }
@@ -48,15 +48,16 @@
   Atomic32 temp, old;
   __asm__ __volatile__(".set push\n"
                        ".set noreorder\n"
+                       ".set at\n"
                        "1:\n"
-                       "ll %1, %2\n"  // old = *ptr
-                       "move %0, %3\n"  // temp = new_value
-                       "sc %0, %2\n"  // *ptr = temp (with atomic check)
+                       "ll %1, 0(%3)\n"  // old = *ptr
+                       "move %0, %2\n"  // temp = new_value
+                       "sc %0, 0(%3)\n"  // *ptr = temp (with atomic check)
                        "beqz %0, 1b\n"  // start again on atomic error
                        "nop\n"  // delay slot nop
                        ".set pop\n"
-                       : "=&r" (temp), "=&r" (old), "=m" (*ptr)
-                       : "r" (new_value), "m" (*ptr)
+                       : "=&r" (temp), "=&r" (old)
+                       : "r" (new_value), "r" (ptr)
                        : "memory");
 
   return old;
@@ -71,14 +72,14 @@
   __asm__ __volatile__(".set push\n"
                        ".set noreorder\n"
                        "1:\n"
-                       "ll %0, %2\n"  // temp = *ptr
-                       "addu %1, %0, %3\n"  // temp2 = temp + increment
-                       "sc %1, %2\n"  // *ptr = temp2 (with atomic check)
+                       "ll %0, 0(%3)\n"  // temp = *ptr
+                       "addu %1, %0, %2\n"  // temp2 = temp + increment
+                       "sc %1, 0(%3)\n"  // *ptr = temp2 (with atomic check)
                        "beqz %1, 1b\n"  // start again on atomic error
-                       "addu %1, %0, %3\n"  // temp2 = temp + increment
+                       "addu %1, %0, %2\n"  // temp2 = temp + increment
                        ".set pop\n"
-                       : "=&r" (temp), "=&r" (temp2), "=m" (*ptr)
-                       : "Ir" (increment), "m" (*ptr)
+                       : "=&r" (temp), "=&r" (temp2)
+                       : "Ir" (increment), "r" (ptr)
                        : "memory");
   // temp2 now holds the final value.
   return temp2;
diff --git a/src/base/atomicops_internals_tsan.h b/src/base/atomicops_internals_tsan.h
index 363668d..646e5bd 100644
--- a/src/base/atomicops_internals_tsan.h
+++ b/src/base/atomicops_internals_tsan.h
@@ -15,20 +15,6 @@
 #ifndef TSAN_INTERFACE_ATOMIC_H
 #define TSAN_INTERFACE_ATOMIC_H
 
-// This struct is not part of the public API of this module; clients may not
-// use it.  (However, it's exported via BASE_EXPORT because clients implicitly
-// do use it at link time by inlining these functions.)
-// Features of this x86.  Values may not be correct before main() is run,
-// but are set conservatively.
-struct AtomicOps_x86CPUFeatureStruct {
-  bool has_amd_lock_mb_bug;  // Processor has AMD memory-barrier bug; do lfence
-                             // after acquire compare-and-swap.
-  bool has_sse2;             // Processor has SSE2.
-};
-extern struct AtomicOps_x86CPUFeatureStruct
-    AtomicOps_Internalx86CPUFeatures;
-
-#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
 
 extern "C" {
 typedef char  __tsan_atomic8;
@@ -374,6 +360,4 @@
 }  // namespace base
 }  // namespace v8
 
-#undef ATOMICOPS_COMPILER_BARRIER
-
 #endif  // V8_BASE_ATOMICOPS_INTERNALS_TSAN_H_
diff --git a/src/base/atomicops_internals_x86_gcc.cc b/src/base/atomicops_internals_x86_gcc.cc
index b8ba0c3..969f237 100644
--- a/src/base/atomicops_internals_x86_gcc.cc
+++ b/src/base/atomicops_internals_x86_gcc.cc
@@ -42,7 +42,9 @@
 // default values should hopefully be pretty safe.
 struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures = {
   false,          // bug can't exist before process spawns multiple threads
+#if !defined(__SSE2__)
   false,          // no SSE2
+#endif
 };
 
 } }  // namespace v8::base
@@ -89,8 +91,10 @@
     AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug = false;
   }
 
+#if !defined(__SSE2__)
   // edx bit 26 is SSE2 which we use to tell use whether we can use mfence
   AtomicOps_Internalx86CPUFeatures.has_sse2 = ((edx >> 26) & 1);
+#endif
 }
 
 class AtomicOpsx86Initializer {
diff --git a/src/base/atomicops_internals_x86_gcc.h b/src/base/atomicops_internals_x86_gcc.h
index 00b6448..ec87c42 100644
--- a/src/base/atomicops_internals_x86_gcc.h
+++ b/src/base/atomicops_internals_x86_gcc.h
@@ -17,7 +17,9 @@
 struct AtomicOps_x86CPUFeatureStruct {
   bool has_amd_lock_mb_bug;  // Processor has AMD memory-barrier bug; do lfence
                              // after acquire compare-and-swap.
+#if !defined(__SSE2__)
   bool has_sse2;             // Processor has SSE2.
+#endif
 };
 extern struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures;
 
@@ -92,7 +94,7 @@
   *ptr = value;
 }
 
-#if defined(__x86_64__)
+#if defined(__x86_64__) || defined(__SSE2__)
 
 // 64-bit implementations of memory barrier can be simpler, because it
 // "mfence" is guaranteed to exist.
diff --git a/src/base/base.gyp b/src/base/base.gyp
new file mode 100644
index 0000000..e391e2e
--- /dev/null
+++ b/src/base/base.gyp
@@ -0,0 +1,46 @@
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+  'variables': {
+    'v8_code': 1,
+  },
+  'includes': ['../../build/toolchain.gypi', '../../build/features.gypi'],
+  'targets': [
+    {
+      'target_name': 'base-unittests',
+      'type': 'executable',
+      'dependencies': [
+        '../../testing/gtest.gyp:gtest',
+        '../../testing/gtest.gyp:gtest_main',
+        '../../tools/gyp/v8.gyp:v8_libbase',
+      ],
+      'include_dirs': [
+        '../..',
+      ],
+      'sources': [  ### gcmole(all) ###
+        'bits-unittest.cc',
+        'cpu-unittest.cc',
+        'division-by-constant-unittest.cc',
+        'flags-unittest.cc',
+        'platform/condition-variable-unittest.cc',
+        'platform/mutex-unittest.cc',
+        'platform/platform-unittest.cc',
+        'platform/semaphore-unittest.cc',
+        'platform/time-unittest.cc',
+        'sys-info-unittest.cc',
+        'utils/random-number-generator-unittest.cc',
+      ],
+      'conditions': [
+        ['os_posix == 1', {
+          # TODO(svenpanne): This is a temporary work-around to fix the warnings
+          # that show up because we use -std=gnu++0x instead of -std=c++11.
+          'cflags!': [
+            '-pedantic',
+          ],
+        }],
+      ],
+    },
+  ],
+}
diff --git a/src/base/bits-unittest.cc b/src/base/bits-unittest.cc
new file mode 100644
index 0000000..06c1183
--- /dev/null
+++ b/src/base/bits-unittest.cc
@@ -0,0 +1,167 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <limits>
+
+#include "src/base/bits.h"
+#include "src/base/macros.h"
+#include "testing/gtest-support.h"
+
+#ifdef DEBUG
+#define DISABLE_IN_RELEASE(Name) Name
+#else
+#define DISABLE_IN_RELEASE(Name) DISABLED_##Name
+#endif
+
+namespace v8 {
+namespace base {
+namespace bits {
+
+TEST(Bits, CountPopulation32) {
+  EXPECT_EQ(0u, CountPopulation32(0));
+  EXPECT_EQ(1u, CountPopulation32(1));
+  EXPECT_EQ(8u, CountPopulation32(0x11111111));
+  EXPECT_EQ(16u, CountPopulation32(0xf0f0f0f0));
+  EXPECT_EQ(24u, CountPopulation32(0xfff0f0ff));
+  EXPECT_EQ(32u, CountPopulation32(0xffffffff));
+}
+
+
+TEST(Bits, CountLeadingZeros32) {
+  EXPECT_EQ(32u, CountLeadingZeros32(0));
+  EXPECT_EQ(31u, CountLeadingZeros32(1));
+  TRACED_FORRANGE(uint32_t, shift, 0, 31) {
+    EXPECT_EQ(31u - shift, CountLeadingZeros32(1u << shift));
+  }
+  EXPECT_EQ(4u, CountLeadingZeros32(0x0f0f0f0f));
+}
+
+
+TEST(Bits, CountTrailingZeros32) {
+  EXPECT_EQ(32u, CountTrailingZeros32(0));
+  EXPECT_EQ(31u, CountTrailingZeros32(0x80000000));
+  TRACED_FORRANGE(uint32_t, shift, 0, 31) {
+    EXPECT_EQ(shift, CountTrailingZeros32(1u << shift));
+  }
+  EXPECT_EQ(4u, CountTrailingZeros32(0xf0f0f0f0));
+}
+
+
+TEST(Bits, IsPowerOfTwo32) {
+  EXPECT_FALSE(IsPowerOfTwo32(0U));
+  TRACED_FORRANGE(uint32_t, shift, 0, 31) {
+    EXPECT_TRUE(IsPowerOfTwo32(1U << shift));
+    EXPECT_FALSE(IsPowerOfTwo32((1U << shift) + 5U));
+    EXPECT_FALSE(IsPowerOfTwo32(~(1U << shift)));
+  }
+  TRACED_FORRANGE(uint32_t, shift, 2, 31) {
+    EXPECT_FALSE(IsPowerOfTwo32((1U << shift) - 1U));
+  }
+  EXPECT_FALSE(IsPowerOfTwo32(0xffffffff));
+}
+
+
+TEST(Bits, IsPowerOfTwo64) {
+  EXPECT_FALSE(IsPowerOfTwo64(0U));
+  TRACED_FORRANGE(uint32_t, shift, 0, 63) {
+    EXPECT_TRUE(IsPowerOfTwo64(V8_UINT64_C(1) << shift));
+    EXPECT_FALSE(IsPowerOfTwo64((V8_UINT64_C(1) << shift) + 5U));
+    EXPECT_FALSE(IsPowerOfTwo64(~(V8_UINT64_C(1) << shift)));
+  }
+  TRACED_FORRANGE(uint32_t, shift, 2, 63) {
+    EXPECT_FALSE(IsPowerOfTwo64((V8_UINT64_C(1) << shift) - 1U));
+  }
+  EXPECT_FALSE(IsPowerOfTwo64(V8_UINT64_C(0xffffffffffffffff)));
+}
+
+
+TEST(Bits, RoundUpToPowerOfTwo32) {
+  TRACED_FORRANGE(uint32_t, shift, 0, 31) {
+    EXPECT_EQ(1u << shift, RoundUpToPowerOfTwo32(1u << shift));
+  }
+  EXPECT_EQ(0u, RoundUpToPowerOfTwo32(0));
+  EXPECT_EQ(4u, RoundUpToPowerOfTwo32(3));
+  EXPECT_EQ(0x80000000u, RoundUpToPowerOfTwo32(0x7fffffffu));
+}
+
+
+TEST(BitsDeathTest, DISABLE_IN_RELEASE(RoundUpToPowerOfTwo32)) {
+  ASSERT_DEATH_IF_SUPPORTED({ RoundUpToPowerOfTwo32(0x80000001u); },
+                            "0x80000000");
+}
+
+
+TEST(Bits, RoundDownToPowerOfTwo32) {
+  TRACED_FORRANGE(uint32_t, shift, 0, 31) {
+    EXPECT_EQ(1u << shift, RoundDownToPowerOfTwo32(1u << shift));
+  }
+  EXPECT_EQ(0u, RoundDownToPowerOfTwo32(0));
+  EXPECT_EQ(4u, RoundDownToPowerOfTwo32(5));
+  EXPECT_EQ(0x80000000u, RoundDownToPowerOfTwo32(0x80000001u));
+}
+
+
+TEST(Bits, RotateRight32) {
+  TRACED_FORRANGE(uint32_t, shift, 0, 31) {
+    EXPECT_EQ(0u, RotateRight32(0u, shift));
+  }
+  EXPECT_EQ(1u, RotateRight32(1, 0));
+  EXPECT_EQ(1u, RotateRight32(2, 1));
+  EXPECT_EQ(0x80000000u, RotateRight32(1, 1));
+}
+
+
+TEST(Bits, RotateRight64) {
+  TRACED_FORRANGE(uint64_t, shift, 0, 63) {
+    EXPECT_EQ(0u, RotateRight64(0u, shift));
+  }
+  EXPECT_EQ(1u, RotateRight64(1, 0));
+  EXPECT_EQ(1u, RotateRight64(2, 1));
+  EXPECT_EQ(V8_UINT64_C(0x8000000000000000), RotateRight64(1, 1));
+}
+
+
+TEST(Bits, SignedAddOverflow32) {
+  int32_t val = 0;
+  EXPECT_FALSE(SignedAddOverflow32(0, 0, &val));
+  EXPECT_EQ(0, val);
+  EXPECT_TRUE(
+      SignedAddOverflow32(std::numeric_limits<int32_t>::max(), 1, &val));
+  EXPECT_EQ(std::numeric_limits<int32_t>::min(), val);
+  EXPECT_TRUE(
+      SignedAddOverflow32(std::numeric_limits<int32_t>::min(), -1, &val));
+  EXPECT_EQ(std::numeric_limits<int32_t>::max(), val);
+  EXPECT_TRUE(SignedAddOverflow32(std::numeric_limits<int32_t>::max(),
+                                  std::numeric_limits<int32_t>::max(), &val));
+  EXPECT_EQ(-2, val);
+  TRACED_FORRANGE(int32_t, i, 1, 50) {
+    TRACED_FORRANGE(int32_t, j, 1, i) {
+      EXPECT_FALSE(SignedAddOverflow32(i, j, &val));
+      EXPECT_EQ(i + j, val);
+    }
+  }
+}
+
+
+TEST(Bits, SignedSubOverflow32) {
+  int32_t val = 0;
+  EXPECT_FALSE(SignedSubOverflow32(0, 0, &val));
+  EXPECT_EQ(0, val);
+  EXPECT_TRUE(
+      SignedSubOverflow32(std::numeric_limits<int32_t>::min(), 1, &val));
+  EXPECT_EQ(std::numeric_limits<int32_t>::max(), val);
+  EXPECT_TRUE(
+      SignedSubOverflow32(std::numeric_limits<int32_t>::max(), -1, &val));
+  EXPECT_EQ(std::numeric_limits<int32_t>::min(), val);
+  TRACED_FORRANGE(int32_t, i, 1, 50) {
+    TRACED_FORRANGE(int32_t, j, 1, i) {
+      EXPECT_FALSE(SignedSubOverflow32(i, j, &val));
+      EXPECT_EQ(i - j, val);
+    }
+  }
+}
+
+}  // namespace bits
+}  // namespace base
+}  // namespace v8
diff --git a/src/base/bits.cc b/src/base/bits.cc
new file mode 100644
index 0000000..6daee53
--- /dev/null
+++ b/src/base/bits.cc
@@ -0,0 +1,25 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/bits.h"
+#include "src/base/logging.h"
+
+namespace v8 {
+namespace base {
+namespace bits {
+
+uint32_t RoundUpToPowerOfTwo32(uint32_t value) {
+  DCHECK_LE(value, 0x80000000u);
+  value = value - 1;
+  value = value | (value >> 1);
+  value = value | (value >> 2);
+  value = value | (value >> 4);
+  value = value | (value >> 8);
+  value = value | (value >> 16);
+  return value + 1;
+}
+
+}  // namespace bits
+}  // namespace base
+}  // namespace v8
diff --git a/src/base/bits.h b/src/base/bits.h
new file mode 100644
index 0000000..e6a733a
--- /dev/null
+++ b/src/base/bits.h
@@ -0,0 +1,150 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_BITS_H_
+#define V8_BASE_BITS_H_
+
+#include "include/v8stdint.h"
+#include "src/base/macros.h"
+#if V8_CC_MSVC
+#include <intrin.h>
+#endif
+#if V8_OS_WIN32
+#include "src/base/win32-headers.h"
+#endif
+
+namespace v8 {
+namespace base {
+namespace bits {
+
+// CountPopulation32(value) returns the number of bits set in |value|.
+inline uint32_t CountPopulation32(uint32_t value) {
+#if V8_HAS_BUILTIN_POPCOUNT
+  return __builtin_popcount(value);
+#else
+  value = ((value >> 1) & 0x55555555) + (value & 0x55555555);
+  value = ((value >> 2) & 0x33333333) + (value & 0x33333333);
+  value = ((value >> 4) & 0x0f0f0f0f) + (value & 0x0f0f0f0f);
+  value = ((value >> 8) & 0x00ff00ff) + (value & 0x00ff00ff);
+  value = ((value >> 16) & 0x0000ffff) + (value & 0x0000ffff);
+  return value;
+#endif
+}
+
+
+// CountLeadingZeros32(value) returns the number of zero bits following the most
+// significant 1 bit in |value| if |value| is non-zero, otherwise it returns 32.
+inline uint32_t CountLeadingZeros32(uint32_t value) {
+#if V8_HAS_BUILTIN_CLZ
+  return value ? __builtin_clz(value) : 32;
+#elif V8_CC_MSVC
+  unsigned long result;  // NOLINT(runtime/int)
+  if (!_BitScanReverse(&result, value)) return 32;
+  return static_cast<uint32_t>(31 - result);
+#else
+  value = value | (value >> 1);
+  value = value | (value >> 2);
+  value = value | (value >> 4);
+  value = value | (value >> 8);
+  value = value | (value >> 16);
+  return CountPopulation32(~value);
+#endif
+}
+
+
+// CountTrailingZeros32(value) returns the number of zero bits preceding the
+// least significant 1 bit in |value| if |value| is non-zero, otherwise it
+// returns 32.
+inline uint32_t CountTrailingZeros32(uint32_t value) {
+#if V8_HAS_BUILTIN_CTZ
+  return value ? __builtin_ctz(value) : 32;
+#elif V8_CC_MSVC
+  unsigned long result;  // NOLINT(runtime/int)
+  if (!_BitScanForward(&result, value)) return 32;
+  return static_cast<uint32_t>(result);
+#else
+  if (value == 0) return 32;
+  unsigned count = 0;
+  for (value ^= value - 1; value >>= 1; ++count)
+    ;
+  return count;
+#endif
+}
+
+
+// Returns true iff |value| is a power of 2.
+inline bool IsPowerOfTwo32(uint32_t value) {
+  return value && !(value & (value - 1));
+}
+
+
+// Returns true iff |value| is a power of 2.
+inline bool IsPowerOfTwo64(uint64_t value) {
+  return value && !(value & (value - 1));
+}
+
+
+// RoundUpToPowerOfTwo32(value) returns the smallest power of two which is
+// greater than or equal to |value|. If you pass in a |value| that is already a
+// power of two, it is returned as is. |value| must be less than or equal to
+// 0x80000000u. Implementation is from "Hacker's Delight" by Henry S. Warren,
+// Jr., figure 3-3, page 48, where the function is called clp2.
+uint32_t RoundUpToPowerOfTwo32(uint32_t value);
+
+
+// RoundDownToPowerOfTwo32(value) returns the greatest power of two which is
+// less than or equal to |value|. If you pass in a |value| that is already a
+// power of two, it is returned as is.
+inline uint32_t RoundDownToPowerOfTwo32(uint32_t value) {
+  if (value > 0x80000000u) return 0x80000000u;
+  uint32_t result = RoundUpToPowerOfTwo32(value);
+  if (result > value) result >>= 1;
+  return result;
+}
+
+
+inline uint32_t RotateRight32(uint32_t value, uint32_t shift) {
+  if (shift == 0) return value;
+  return (value >> shift) | (value << (32 - shift));
+}
+
+
+inline uint64_t RotateRight64(uint64_t value, uint64_t shift) {
+  if (shift == 0) return value;
+  return (value >> shift) | (value << (64 - shift));
+}
+
+
+// SignedAddOverflow32(lhs,rhs,val) performs a signed summation of |lhs| and
+// |rhs| and stores the result into the variable pointed to by |val| and
+// returns true if the signed summation resulted in an overflow.
+inline bool SignedAddOverflow32(int32_t lhs, int32_t rhs, int32_t* val) {
+#if V8_HAS_BUILTIN_SADD_OVERFLOW
+  return __builtin_sadd_overflow(lhs, rhs, val);
+#else
+  uint32_t res = static_cast<uint32_t>(lhs) + static_cast<uint32_t>(rhs);
+  *val = bit_cast<int32_t>(res);
+  return ((res ^ lhs) & (res ^ rhs) & (1U << 31)) != 0;
+#endif
+}
+
+
+// SignedSubOverflow32(lhs,rhs,val) performs a signed subtraction of |lhs| and
+// |rhs| and stores the result into the variable pointed to by |val| and
+// returns true if the signed subtraction resulted in an overflow.
+inline bool SignedSubOverflow32(int32_t lhs, int32_t rhs, int32_t* val) {
+#if V8_HAS_BUILTIN_SSUB_OVERFLOW
+  return __builtin_ssub_overflow(lhs, rhs, val);
+#else
+  uint32_t res = static_cast<uint32_t>(lhs) - static_cast<uint32_t>(rhs);
+  *val = bit_cast<int32_t>(res);
+  return ((res ^ lhs) & (res ^ ~rhs) & (1U << 31)) != 0;
+#endif
+}
+
+}  // namespace bits
+}  // namespace base
+}  // namespace v8
+
+#endif  // V8_BASE_BITS_H_
diff --git a/src/base/build_config.h b/src/base/build_config.h
index e412b92d..2bf57c9 100644
--- a/src/base/build_config.h
+++ b/src/base/build_config.h
@@ -21,23 +21,26 @@
 // V8_HOST_ARCH_IA32 on both 32- and 64-bit x86.
 #define V8_HOST_ARCH_IA32 1
 #define V8_HOST_ARCH_32_BIT 1
-#define V8_HOST_CAN_READ_UNALIGNED 1
 #else
 #define V8_HOST_ARCH_X64 1
+#if defined(__x86_64__) && __SIZEOF_POINTER__ == 4  // Check for x32.
+#define V8_HOST_ARCH_32_BIT 1
+#else
 #define V8_HOST_ARCH_64_BIT 1
-#define V8_HOST_CAN_READ_UNALIGNED 1
+#endif
 #endif  // __native_client__
 #elif defined(_M_IX86) || defined(__i386__)
 #define V8_HOST_ARCH_IA32 1
 #define V8_HOST_ARCH_32_BIT 1
-#define V8_HOST_CAN_READ_UNALIGNED 1
 #elif defined(__AARCH64EL__)
 #define V8_HOST_ARCH_ARM64 1
 #define V8_HOST_ARCH_64_BIT 1
-#define V8_HOST_CAN_READ_UNALIGNED 1
 #elif defined(__ARMEL__)
 #define V8_HOST_ARCH_ARM 1
 #define V8_HOST_ARCH_32_BIT 1
+#elif defined(__mips64)
+#define V8_HOST_ARCH_MIPS64 1
+#define V8_HOST_ARCH_64_BIT 1
 #elif defined(__MIPSEB__) || defined(__MIPSEL__)
 #define V8_HOST_ARCH_MIPS 1
 #define V8_HOST_ARCH_32_BIT 1
@@ -59,7 +62,8 @@
 // in the same way as the host architecture, that is, target the native
 // environment as presented by the compiler.
 #if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_X87 && \
-    !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS
+    !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && \
+    !V8_TARGET_ARCH_MIPS64
 #if defined(_M_X64) || defined(__x86_64__)
 #define V8_TARGET_ARCH_X64 1
 #elif defined(_M_IX86) || defined(__i386__)
@@ -68,6 +72,8 @@
 #define V8_TARGET_ARCH_ARM64 1
 #elif defined(__ARMEL__)
 #define V8_TARGET_ARCH_ARM 1
+#elif defined(__mips64)
+#define V8_TARGET_ARCH_MIPS64 1
 #elif defined(__MIPSEB__) || defined(__MIPSEL__)
 #define V8_TARGET_ARCH_MIPS 1
 #else
@@ -75,13 +81,43 @@
 #endif
 #endif
 
+// Determine architecture pointer size.
+#if V8_TARGET_ARCH_IA32
+#define V8_TARGET_ARCH_32_BIT 1
+#elif V8_TARGET_ARCH_X64
+#if !V8_TARGET_ARCH_32_BIT && !V8_TARGET_ARCH_64_BIT
+#if defined(__x86_64__) && __SIZEOF_POINTER__ == 4  // Check for x32.
+#define V8_TARGET_ARCH_32_BIT 1
+#else
+#define V8_TARGET_ARCH_64_BIT 1
+#endif
+#endif
+#elif V8_TARGET_ARCH_ARM
+#define V8_TARGET_ARCH_32_BIT 1
+#elif V8_TARGET_ARCH_ARM64
+#define V8_TARGET_ARCH_64_BIT 1
+#elif V8_TARGET_ARCH_MIPS
+#define V8_TARGET_ARCH_32_BIT 1
+#elif V8_TARGET_ARCH_MIPS64
+#define V8_TARGET_ARCH_64_BIT 1
+#elif V8_TARGET_ARCH_X87
+#define V8_TARGET_ARCH_32_BIT 1
+#else
+#error Unknown target architecture pointer size
+#endif
+
 // Check for supported combinations of host and target architectures.
 #if V8_TARGET_ARCH_IA32 && !V8_HOST_ARCH_IA32
 #error Target architecture ia32 is only supported on ia32 host
 #endif
-#if V8_TARGET_ARCH_X64 && !V8_HOST_ARCH_X64
+#if (V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_64_BIT && \
+     !(V8_HOST_ARCH_X64 && V8_HOST_ARCH_64_BIT))
 #error Target architecture x64 is only supported on x64 host
 #endif
+#if (V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT && \
+     !(V8_HOST_ARCH_X64 && V8_HOST_ARCH_32_BIT))
+#error Target architecture x32 is only supported on x64 host with x32 support
+#endif
 #if (V8_TARGET_ARCH_ARM && !(V8_HOST_ARCH_IA32 || V8_HOST_ARCH_ARM))
 #error Target architecture arm is only supported on arm and ia32 host
 #endif
@@ -91,6 +127,9 @@
 #if (V8_TARGET_ARCH_MIPS && !(V8_HOST_ARCH_IA32 || V8_HOST_ARCH_MIPS))
 #error Target architecture mips is only supported on mips and ia32 host
 #endif
+#if (V8_TARGET_ARCH_MIPS64 && !(V8_HOST_ARCH_X64 || V8_HOST_ARCH_MIPS64))
+#error Target architecture mips64 is only supported on mips64 and x64 host
+#endif
 
 // Determine architecture endianness.
 #if V8_TARGET_ARCH_IA32
@@ -107,14 +146,16 @@
 #else
 #define V8_TARGET_LITTLE_ENDIAN 1
 #endif
+#elif V8_TARGET_ARCH_MIPS64
+#define V8_TARGET_LITTLE_ENDIAN 1
 #elif V8_TARGET_ARCH_X87
 #define V8_TARGET_LITTLE_ENDIAN 1
 #else
 #error Unknown target architecture endianness
 #endif
 
-#if V8_OS_MACOSX || defined(__FreeBSD__) || defined(__OpenBSD__)
-#define USING_BSD_ABI
-#endif
+// Number of bits to represent the page size for paged spaces. The value of 20
+// gives 1Mb bytes per page.
+const int kPageSizeBits = 20;
 
 #endif  // V8_BASE_BUILD_CONFIG_H_
diff --git a/src/base/compiler-specific.h b/src/base/compiler-specific.h
new file mode 100644
index 0000000..475a32c
--- /dev/null
+++ b/src/base/compiler-specific.h
@@ -0,0 +1,58 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_COMPILER_SPECIFIC_H_
+#define V8_BASE_COMPILER_SPECIFIC_H_
+
+#include "include/v8config.h"
+
+// Annotate a variable indicating it's ok if the variable is not used.
+// (Typically used to silence a compiler warning when the assignment
+// is important for some other reason.)
+// Use like:
+//   int x ALLOW_UNUSED = ...;
+#if V8_HAS_ATTRIBUTE_UNUSED
+#define ALLOW_UNUSED __attribute__((unused))
+#else
+#define ALLOW_UNUSED
+#endif
+
+
+// Annotate a virtual method indicating it must be overriding a virtual
+// method in the parent class.
+// Use like:
+//   virtual void bar() OVERRIDE;
+#if V8_HAS_CXX11_OVERRIDE
+#define OVERRIDE override
+#else
+#define OVERRIDE /* NOT SUPPORTED */
+#endif
+
+
+// Annotate a virtual method indicating that subclasses must not override it,
+// or annotate a class to indicate that it cannot be subclassed.
+// Use like:
+//   class B FINAL : public A {};
+//   virtual void bar() FINAL;
+#if V8_HAS_CXX11_FINAL
+#define FINAL final
+#elif V8_HAS___FINAL
+#define FINAL __final
+#elif V8_HAS_SEALED
+#define FINAL sealed
+#else
+#define FINAL /* NOT SUPPORTED */
+#endif
+
+
+// Annotate a function indicating the caller must examine the return value.
+// Use like:
+//   int foo() WARN_UNUSED_RESULT;
+#if V8_HAS_ATTRIBUTE_WARN_UNUSED_RESULT
+#define WARN_UNUSED_RESULT __attribute__((warn_unused_result))
+#else
+#define WARN_UNUSED_RESULT /* NOT SUPPORTED */
+#endif
+
+#endif  // V8_BASE_COMPILER_SPECIFIC_H_
diff --git a/src/base/cpu-unittest.cc b/src/base/cpu-unittest.cc
new file mode 100644
index 0000000..5c58f86
--- /dev/null
+++ b/src/base/cpu-unittest.cc
@@ -0,0 +1,49 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/cpu.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace base {
+
+TEST(CPUTest, FeatureImplications) {
+  CPU cpu;
+
+  // ia32 and x64 features
+  EXPECT_TRUE(!cpu.has_sse() || cpu.has_mmx());
+  EXPECT_TRUE(!cpu.has_sse2() || cpu.has_sse());
+  EXPECT_TRUE(!cpu.has_sse3() || cpu.has_sse2());
+  EXPECT_TRUE(!cpu.has_ssse3() || cpu.has_sse3());
+  EXPECT_TRUE(!cpu.has_sse41() || cpu.has_sse3());
+  EXPECT_TRUE(!cpu.has_sse42() || cpu.has_sse41());
+
+  // arm features
+  EXPECT_TRUE(!cpu.has_vfp3_d32() || cpu.has_vfp3());
+}
+
+
+TEST(CPUTest, RequiredFeatures) {
+  CPU cpu;
+
+#if V8_HOST_ARCH_ARM
+  EXPECT_TRUE(cpu.has_fpu());
+#endif
+
+#if V8_HOST_ARCH_IA32
+  EXPECT_TRUE(cpu.has_fpu());
+  EXPECT_TRUE(cpu.has_sahf());
+#endif
+
+#if V8_HOST_ARCH_X64
+  EXPECT_TRUE(cpu.has_fpu());
+  EXPECT_TRUE(cpu.has_cmov());
+  EXPECT_TRUE(cpu.has_mmx());
+  EXPECT_TRUE(cpu.has_sse());
+  EXPECT_TRUE(cpu.has_sse2());
+#endif
+}
+
+}  // namespace base
+}  // namespace v8
diff --git a/src/base/cpu.cc b/src/base/cpu.cc
new file mode 100644
index 0000000..fbfbcf6
--- /dev/null
+++ b/src/base/cpu.cc
@@ -0,0 +1,528 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/cpu.h"
+
+#if V8_LIBC_MSVCRT
+#include <intrin.h>  // __cpuid()
+#endif
+#if V8_OS_POSIX
+#include <unistd.h>  // sysconf()
+#endif
+#if V8_OS_QNX
+#include <sys/syspage.h>  // cpuinfo
+#endif
+
+#include <ctype.h>
+#include <limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <algorithm>
+
+#include "src/base/logging.h"
+#if V8_OS_WIN
+#include "src/base/win32-headers.h"  // NOLINT
+#endif
+
+namespace v8 {
+namespace base {
+
+#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
+
+// Define __cpuid() for non-MSVC libraries.
+#if !V8_LIBC_MSVCRT
+
+static V8_INLINE void __cpuid(int cpu_info[4], int info_type) {
+#if defined(__i386__) && defined(__pic__)
+  // Make sure to preserve ebx, which contains the pointer
+  // to the GOT in case we're generating PIC.
+  __asm__ volatile (
+    "mov %%ebx, %%edi\n\t"
+    "cpuid\n\t"
+    "xchg %%edi, %%ebx\n\t"
+    : "=a"(cpu_info[0]), "=D"(cpu_info[1]), "=c"(cpu_info[2]), "=d"(cpu_info[3])
+    : "a"(info_type)
+  );
+#else
+  __asm__ volatile (
+    "cpuid \n\t"
+    : "=a"(cpu_info[0]), "=b"(cpu_info[1]), "=c"(cpu_info[2]), "=d"(cpu_info[3])
+    : "a"(info_type)
+  );
+#endif  // defined(__i386__) && defined(__pic__)
+}
+
+#endif  // !V8_LIBC_MSVCRT
+
+#elif V8_HOST_ARCH_ARM || V8_HOST_ARCH_ARM64 \
+    || V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
+
+#if V8_OS_LINUX
+
+#if V8_HOST_ARCH_ARM
+
+// See <uapi/asm/hwcap.h> kernel header.
+/*
+ * HWCAP flags - for elf_hwcap (in kernel) and AT_HWCAP
+ */
+#define HWCAP_SWP (1 << 0)
+#define HWCAP_HALF  (1 << 1)
+#define HWCAP_THUMB (1 << 2)
+#define HWCAP_26BIT (1 << 3)  /* Play it safe */
+#define HWCAP_FAST_MULT (1 << 4)
+#define HWCAP_FPA (1 << 5)
+#define HWCAP_VFP (1 << 6)
+#define HWCAP_EDSP  (1 << 7)
+#define HWCAP_JAVA  (1 << 8)
+#define HWCAP_IWMMXT  (1 << 9)
+#define HWCAP_CRUNCH  (1 << 10)
+#define HWCAP_THUMBEE (1 << 11)
+#define HWCAP_NEON  (1 << 12)
+#define HWCAP_VFPv3 (1 << 13)
+#define HWCAP_VFPv3D16  (1 << 14) /* also set for VFPv4-D16 */
+#define HWCAP_TLS (1 << 15)
+#define HWCAP_VFPv4 (1 << 16)
+#define HWCAP_IDIVA (1 << 17)
+#define HWCAP_IDIVT (1 << 18)
+#define HWCAP_VFPD32  (1 << 19) /* set if VFP has 32 regs (not 16) */
+#define HWCAP_IDIV  (HWCAP_IDIVA | HWCAP_IDIVT)
+#define HWCAP_LPAE  (1 << 20)
+
+#define AT_HWCAP 16
+
+// Read the ELF HWCAP flags by parsing /proc/self/auxv.
+static uint32_t ReadELFHWCaps() {
+  uint32_t result = 0;
+  FILE* fp = fopen("/proc/self/auxv", "r");
+  if (fp != NULL) {
+    struct { uint32_t tag; uint32_t value; } entry;
+    for (;;) {
+      size_t n = fread(&entry, sizeof(entry), 1, fp);
+      if (n == 0 || (entry.tag == 0 && entry.value == 0)) {
+        break;
+      }
+      if (entry.tag == AT_HWCAP) {
+        result = entry.value;
+        break;
+      }
+    }
+    fclose(fp);
+  }
+  return result;
+}
+
+#endif  // V8_HOST_ARCH_ARM
+
+#if V8_HOST_ARCH_MIPS
+int __detect_fp64_mode(void) {
+  double result = 0;
+  // Bit representation of (double)1 is 0x3FF0000000000000.
+  asm(
+    "lui $t0, 0x3FF0\n\t"
+    "ldc1 $f0, %0\n\t"
+    "mtc1 $t0, $f1\n\t"
+    "sdc1 $f0, %0\n\t"
+    : "+m" (result)
+    : : "t0", "$f0", "$f1", "memory");
+
+  return !(result == 1);
+}
+
+
+int __detect_mips_arch_revision(void) {
+  // TODO(dusmil): Do the specific syscall as soon as it is implemented in mips
+  // kernel. Currently fail-back to the least common denominator which is
+  // mips32 revision 1.
+  return 1;
+}
+#endif
+
+// Extract the information exposed by the kernel via /proc/cpuinfo.
+class CPUInfo FINAL {
+ public:
+  CPUInfo() : datalen_(0) {
+    // Get the size of the cpuinfo file by reading it until the end. This is
+    // required because files under /proc do not always return a valid size
+    // when using fseek(0, SEEK_END) + ftell(). Nor can the be mmap()-ed.
+    static const char PATHNAME[] = "/proc/cpuinfo";
+    FILE* fp = fopen(PATHNAME, "r");
+    if (fp != NULL) {
+      for (;;) {
+        char buffer[256];
+        size_t n = fread(buffer, 1, sizeof(buffer), fp);
+        if (n == 0) {
+          break;
+        }
+        datalen_ += n;
+      }
+      fclose(fp);
+    }
+
+    // Read the contents of the cpuinfo file.
+    data_ = new char[datalen_ + 1];
+    fp = fopen(PATHNAME, "r");
+    if (fp != NULL) {
+      for (size_t offset = 0; offset < datalen_; ) {
+        size_t n = fread(data_ + offset, 1, datalen_ - offset, fp);
+        if (n == 0) {
+          break;
+        }
+        offset += n;
+      }
+      fclose(fp);
+    }
+
+    // Zero-terminate the data.
+    data_[datalen_] = '\0';
+  }
+
+  ~CPUInfo() {
+    delete[] data_;
+  }
+
+  // Extract the content of a the first occurence of a given field in
+  // the content of the cpuinfo file and return it as a heap-allocated
+  // string that must be freed by the caller using delete[].
+  // Return NULL if not found.
+  char* ExtractField(const char* field) const {
+    DCHECK(field != NULL);
+
+    // Look for first field occurence, and ensure it starts the line.
+    size_t fieldlen = strlen(field);
+    char* p = data_;
+    for (;;) {
+      p = strstr(p, field);
+      if (p == NULL) {
+        return NULL;
+      }
+      if (p == data_ || p[-1] == '\n') {
+        break;
+      }
+      p += fieldlen;
+    }
+
+    // Skip to the first colon followed by a space.
+    p = strchr(p + fieldlen, ':');
+    if (p == NULL || !isspace(p[1])) {
+      return NULL;
+    }
+    p += 2;
+
+    // Find the end of the line.
+    char* q = strchr(p, '\n');
+    if (q == NULL) {
+      q = data_ + datalen_;
+    }
+
+    // Copy the line into a heap-allocated buffer.
+    size_t len = q - p;
+    char* result = new char[len + 1];
+    if (result != NULL) {
+      memcpy(result, p, len);
+      result[len] = '\0';
+    }
+    return result;
+  }
+
+ private:
+  char* data_;
+  size_t datalen_;
+};
+
+#if V8_HOST_ARCH_ARM || V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
+
+// Checks that a space-separated list of items contains one given 'item'.
+static bool HasListItem(const char* list, const char* item) {
+  ssize_t item_len = strlen(item);
+  const char* p = list;
+  if (p != NULL) {
+    while (*p != '\0') {
+      // Skip whitespace.
+      while (isspace(*p)) ++p;
+
+      // Find end of current list item.
+      const char* q = p;
+      while (*q != '\0' && !isspace(*q)) ++q;
+
+      if (item_len == q - p && memcmp(p, item, item_len) == 0) {
+        return true;
+      }
+
+      // Skip to next item.
+      p = q;
+    }
+  }
+  return false;
+}
+
+#endif  // V8_HOST_ARCH_ARM || V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
+
+#endif  // V8_OS_LINUX
+
+#endif  // V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
+
+CPU::CPU() : stepping_(0),
+             model_(0),
+             ext_model_(0),
+             family_(0),
+             ext_family_(0),
+             type_(0),
+             implementer_(0),
+             architecture_(0),
+             part_(0),
+             has_fpu_(false),
+             has_cmov_(false),
+             has_sahf_(false),
+             has_mmx_(false),
+             has_sse_(false),
+             has_sse2_(false),
+             has_sse3_(false),
+             has_ssse3_(false),
+             has_sse41_(false),
+             has_sse42_(false),
+             has_idiva_(false),
+             has_neon_(false),
+             has_thumb2_(false),
+             has_vfp_(false),
+             has_vfp3_(false),
+             has_vfp3_d32_(false),
+             is_fp64_mode_(false) {
+  memcpy(vendor_, "Unknown", 8);
+#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
+  int cpu_info[4];
+
+  // __cpuid with an InfoType argument of 0 returns the number of
+  // valid Ids in CPUInfo[0] and the CPU identification string in
+  // the other three array elements. The CPU identification string is
+  // not in linear order. The code below arranges the information
+  // in a human readable form. The human readable order is CPUInfo[1] |
+  // CPUInfo[3] | CPUInfo[2]. CPUInfo[2] and CPUInfo[3] are swapped
+  // before using memcpy to copy these three array elements to cpu_string.
+  __cpuid(cpu_info, 0);
+  unsigned num_ids = cpu_info[0];
+  std::swap(cpu_info[2], cpu_info[3]);
+  memcpy(vendor_, cpu_info + 1, 12);
+  vendor_[12] = '\0';
+
+  // Interpret CPU feature information.
+  if (num_ids > 0) {
+    __cpuid(cpu_info, 1);
+    stepping_ = cpu_info[0] & 0xf;
+    model_ = ((cpu_info[0] >> 4) & 0xf) + ((cpu_info[0] >> 12) & 0xf0);
+    family_ = (cpu_info[0] >> 8) & 0xf;
+    type_ = (cpu_info[0] >> 12) & 0x3;
+    ext_model_ = (cpu_info[0] >> 16) & 0xf;
+    ext_family_ = (cpu_info[0] >> 20) & 0xff;
+    has_fpu_ = (cpu_info[3] & 0x00000001) != 0;
+    has_cmov_ = (cpu_info[3] & 0x00008000) != 0;
+    has_mmx_ = (cpu_info[3] & 0x00800000) != 0;
+    has_sse_ = (cpu_info[3] & 0x02000000) != 0;
+    has_sse2_ = (cpu_info[3] & 0x04000000) != 0;
+    has_sse3_ = (cpu_info[2] & 0x00000001) != 0;
+    has_ssse3_ = (cpu_info[2] & 0x00000200) != 0;
+    has_sse41_ = (cpu_info[2] & 0x00080000) != 0;
+    has_sse42_ = (cpu_info[2] & 0x00100000) != 0;
+  }
+
+#if V8_HOST_ARCH_IA32
+  // SAHF is always available in compat/legacy mode,
+  has_sahf_ = true;
+#else
+  // Query extended IDs.
+  __cpuid(cpu_info, 0x80000000);
+  unsigned num_ext_ids = cpu_info[0];
+
+  // Interpret extended CPU feature information.
+  if (num_ext_ids > 0x80000000) {
+    __cpuid(cpu_info, 0x80000001);
+    // SAHF must be probed in long mode.
+    has_sahf_ = (cpu_info[2] & 0x00000001) != 0;
+  }
+#endif
+
+#elif V8_HOST_ARCH_ARM
+
+#if V8_OS_LINUX
+
+  CPUInfo cpu_info;
+
+  // Extract implementor from the "CPU implementer" field.
+  char* implementer = cpu_info.ExtractField("CPU implementer");
+  if (implementer != NULL) {
+    char* end ;
+    implementer_ = strtol(implementer, &end, 0);
+    if (end == implementer) {
+      implementer_ = 0;
+    }
+    delete[] implementer;
+  }
+
+  // Extract part number from the "CPU part" field.
+  char* part = cpu_info.ExtractField("CPU part");
+  if (part != NULL) {
+    char* end ;
+    part_ = strtol(part, &end, 0);
+    if (end == part) {
+      part_ = 0;
+    }
+    delete[] part;
+  }
+
+  // Extract architecture from the "CPU Architecture" field.
+  // The list is well-known, unlike the the output of
+  // the 'Processor' field which can vary greatly.
+  // See the definition of the 'proc_arch' array in
+  // $KERNEL/arch/arm/kernel/setup.c and the 'c_show' function in
+  // same file.
+  char* architecture = cpu_info.ExtractField("CPU architecture");
+  if (architecture != NULL) {
+    char* end;
+    architecture_ = strtol(architecture, &end, 10);
+    if (end == architecture) {
+      architecture_ = 0;
+    }
+    delete[] architecture;
+
+    // Unfortunately, it seems that certain ARMv6-based CPUs
+    // report an incorrect architecture number of 7!
+    //
+    // See http://code.google.com/p/android/issues/detail?id=10812
+    //
+    // We try to correct this by looking at the 'elf_format'
+    // field reported by the 'Processor' field, which is of the
+    // form of "(v7l)" for an ARMv7-based CPU, and "(v6l)" for
+    // an ARMv6-one. For example, the Raspberry Pi is one popular
+    // ARMv6 device that reports architecture 7.
+    if (architecture_ == 7) {
+      char* processor = cpu_info.ExtractField("Processor");
+      if (HasListItem(processor, "(v6l)")) {
+        architecture_ = 6;
+      }
+      delete[] processor;
+    }
+  }
+
+  // Try to extract the list of CPU features from ELF hwcaps.
+  uint32_t hwcaps = ReadELFHWCaps();
+  if (hwcaps != 0) {
+    has_idiva_ = (hwcaps & HWCAP_IDIVA) != 0;
+    has_neon_ = (hwcaps & HWCAP_NEON) != 0;
+    has_vfp_ = (hwcaps & HWCAP_VFP) != 0;
+    has_vfp3_ = (hwcaps & (HWCAP_VFPv3 | HWCAP_VFPv3D16 | HWCAP_VFPv4)) != 0;
+    has_vfp3_d32_ = (has_vfp3_ && ((hwcaps & HWCAP_VFPv3D16) == 0 ||
+                                   (hwcaps & HWCAP_VFPD32) != 0));
+  } else {
+    // Try to fallback to "Features" CPUInfo field.
+    char* features = cpu_info.ExtractField("Features");
+    has_idiva_ = HasListItem(features, "idiva");
+    has_neon_ = HasListItem(features, "neon");
+    has_thumb2_ = HasListItem(features, "thumb2");
+    has_vfp_ = HasListItem(features, "vfp");
+    if (HasListItem(features, "vfpv3d16")) {
+      has_vfp3_ = true;
+    } else if (HasListItem(features, "vfpv3")) {
+      has_vfp3_ = true;
+      has_vfp3_d32_ = true;
+    }
+    delete[] features;
+  }
+
+  // Some old kernels will report vfp not vfpv3. Here we make an attempt
+  // to detect vfpv3 by checking for vfp *and* neon, since neon is only
+  // available on architectures with vfpv3. Checking neon on its own is
+  // not enough as it is possible to have neon without vfp.
+  if (has_vfp_ && has_neon_) {
+    has_vfp3_ = true;
+  }
+
+  // VFPv3 implies ARMv7, see ARM DDI 0406B, page A1-6.
+  if (architecture_ < 7 && has_vfp3_) {
+    architecture_ = 7;
+  }
+
+  // ARMv7 implies Thumb2.
+  if (architecture_ >= 7) {
+    has_thumb2_ = true;
+  }
+
+  // The earliest architecture with Thumb2 is ARMv6T2.
+  if (has_thumb2_ && architecture_ < 6) {
+    architecture_ = 6;
+  }
+
+  // We don't support any FPUs other than VFP.
+  has_fpu_ = has_vfp_;
+
+#elif V8_OS_QNX
+
+  uint32_t cpu_flags = SYSPAGE_ENTRY(cpuinfo)->flags;
+  if (cpu_flags & ARM_CPU_FLAG_V7) {
+    architecture_ = 7;
+    has_thumb2_ = true;
+  } else if (cpu_flags & ARM_CPU_FLAG_V6) {
+    architecture_ = 6;
+    // QNX doesn't say if Thumb2 is available.
+    // Assume false for the architectures older than ARMv7.
+  }
+  DCHECK(architecture_ >= 6);
+  has_fpu_ = (cpu_flags & CPU_FLAG_FPU) != 0;
+  has_vfp_ = has_fpu_;
+  if (cpu_flags & ARM_CPU_FLAG_NEON) {
+    has_neon_ = true;
+    has_vfp3_ = has_vfp_;
+#ifdef ARM_CPU_FLAG_VFP_D32
+    has_vfp3_d32_ = (cpu_flags & ARM_CPU_FLAG_VFP_D32) != 0;
+#endif
+  }
+  has_idiva_ = (cpu_flags & ARM_CPU_FLAG_IDIV) != 0;
+
+#endif  // V8_OS_LINUX
+
+#elif V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
+
+  // Simple detection of FPU at runtime for Linux.
+  // It is based on /proc/cpuinfo, which reveals hardware configuration
+  // to user-space applications.  According to MIPS (early 2010), no similar
+  // facility is universally available on the MIPS architectures,
+  // so it's up to individual OSes to provide such.
+  CPUInfo cpu_info;
+  char* cpu_model = cpu_info.ExtractField("cpu model");
+  has_fpu_ = HasListItem(cpu_model, "FPU");
+  delete[] cpu_model;
+#ifdef V8_HOST_ARCH_MIPS
+  is_fp64_mode_ = __detect_fp64_mode();
+  architecture_ = __detect_mips_arch_revision();
+#endif
+
+#elif V8_HOST_ARCH_ARM64
+
+  CPUInfo cpu_info;
+
+  // Extract implementor from the "CPU implementer" field.
+  char* implementer = cpu_info.ExtractField("CPU implementer");
+  if (implementer != NULL) {
+    char* end ;
+    implementer_ = strtol(implementer, &end, 0);
+    if (end == implementer) {
+      implementer_ = 0;
+    }
+    delete[] implementer;
+  }
+
+  // Extract part number from the "CPU part" field.
+  char* part = cpu_info.ExtractField("CPU part");
+  if (part != NULL) {
+    char* end ;
+    part_ = strtol(part, &end, 0);
+    if (end == part) {
+      part_ = 0;
+    }
+    delete[] part;
+  }
+
+#endif
+}
+
+} }  // namespace v8::base
diff --git a/src/base/cpu.h b/src/base/cpu.h
new file mode 100644
index 0000000..dc0eaf4
--- /dev/null
+++ b/src/base/cpu.h
@@ -0,0 +1,115 @@
+// Copyright 2006-2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This module contains the architecture-specific code. This make the rest of
+// the code less dependent on differences between different processor
+// architecture.
+// The classes have the same definition for all architectures. The
+// implementation for a particular architecture is put in cpu_<arch>.cc.
+// The build system then uses the implementation for the target architecture.
+//
+
+#ifndef V8_BASE_CPU_H_
+#define V8_BASE_CPU_H_
+
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace base {
+
+// ----------------------------------------------------------------------------
+// CPU
+//
+// Query information about the processor.
+//
+// This class also has static methods for the architecture specific functions.
+// Add methods here to cope with differences between the supported
+// architectures. For each architecture the file cpu_<arch>.cc contains the
+// implementation of these static functions.
+
+class CPU FINAL {
+ public:
+  CPU();
+
+  // x86 CPUID information
+  const char* vendor() const { return vendor_; }
+  int stepping() const { return stepping_; }
+  int model() const { return model_; }
+  int ext_model() const { return ext_model_; }
+  int family() const { return family_; }
+  int ext_family() const { return ext_family_; }
+  int type() const { return type_; }
+
+  // arm implementer/part information
+  int implementer() const { return implementer_; }
+  static const int ARM = 0x41;
+  static const int NVIDIA = 0x4e;
+  static const int QUALCOMM = 0x51;
+  int architecture() const { return architecture_; }
+  int part() const { return part_; }
+  static const int ARM_CORTEX_A5 = 0xc05;
+  static const int ARM_CORTEX_A7 = 0xc07;
+  static const int ARM_CORTEX_A8 = 0xc08;
+  static const int ARM_CORTEX_A9 = 0xc09;
+  static const int ARM_CORTEX_A12 = 0xc0c;
+  static const int ARM_CORTEX_A15 = 0xc0f;
+
+  // General features
+  bool has_fpu() const { return has_fpu_; }
+
+  // x86 features
+  bool has_cmov() const { return has_cmov_; }
+  bool has_sahf() const { return has_sahf_; }
+  bool has_mmx() const { return has_mmx_; }
+  bool has_sse() const { return has_sse_; }
+  bool has_sse2() const { return has_sse2_; }
+  bool has_sse3() const { return has_sse3_; }
+  bool has_ssse3() const { return has_ssse3_; }
+  bool has_sse41() const { return has_sse41_; }
+  bool has_sse42() const { return has_sse42_; }
+
+  // arm features
+  bool has_idiva() const { return has_idiva_; }
+  bool has_neon() const { return has_neon_; }
+  bool has_thumb2() const { return has_thumb2_; }
+  bool has_vfp() const { return has_vfp_; }
+  bool has_vfp3() const { return has_vfp3_; }
+  bool has_vfp3_d32() const { return has_vfp3_d32_; }
+
+  // mips features
+  bool is_fp64_mode() const { return is_fp64_mode_; }
+
+ private:
+  char vendor_[13];
+  int stepping_;
+  int model_;
+  int ext_model_;
+  int family_;
+  int ext_family_;
+  int type_;
+  int implementer_;
+  int architecture_;
+  int part_;
+  bool has_fpu_;
+  bool has_cmov_;
+  bool has_sahf_;
+  bool has_mmx_;
+  bool has_sse_;
+  bool has_sse2_;
+  bool has_sse3_;
+  bool has_ssse3_;
+  bool has_sse41_;
+  bool has_sse42_;
+  bool has_idiva_;
+  bool has_neon_;
+  bool has_thumb2_;
+  bool has_vfp_;
+  bool has_vfp3_;
+  bool has_vfp3_d32_;
+  bool is_fp64_mode_;
+};
+
+} }  // namespace v8::base
+
+#endif  // V8_BASE_CPU_H_
diff --git a/src/base/division-by-constant-unittest.cc b/src/base/division-by-constant-unittest.cc
new file mode 100644
index 0000000..47c2483
--- /dev/null
+++ b/src/base/division-by-constant-unittest.cc
@@ -0,0 +1,132 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Check all examples from table 10-1 of "Hacker's Delight".
+
+#include "src/base/division-by-constant.h"
+
+#include <ostream>  // NOLINT
+
+#include "testing/gtest-support.h"
+
+namespace v8 {
+namespace base {
+
+template <class T>
+std::ostream& operator<<(std::ostream& os,
+                         const MagicNumbersForDivision<T>& mag) {
+  return os << "{ multiplier: " << mag.multiplier << ", shift: " << mag.shift
+            << ", add: " << mag.add << " }";
+}
+
+
+// Some abbreviations...
+
+typedef MagicNumbersForDivision<uint32_t> M32;
+typedef MagicNumbersForDivision<uint64_t> M64;
+
+
+static M32 s32(int32_t d) {
+  return SignedDivisionByConstant<uint32_t>(static_cast<uint32_t>(d));
+}
+
+
+static M64 s64(int64_t d) {
+  return SignedDivisionByConstant<uint64_t>(static_cast<uint64_t>(d));
+}
+
+
+static M32 u32(uint32_t d) { return UnsignedDivisionByConstant<uint32_t>(d); }
+static M64 u64(uint64_t d) { return UnsignedDivisionByConstant<uint64_t>(d); }
+
+
+TEST(DivisionByConstant, Signed32) {
+  EXPECT_EQ(M32(0x99999999U, 1, false), s32(-5));
+  EXPECT_EQ(M32(0x55555555U, 1, false), s32(-3));
+  int32_t d = -1;
+  for (unsigned k = 1; k <= 32 - 1; ++k) {
+    d *= 2;
+    EXPECT_EQ(M32(0x7FFFFFFFU, k - 1, false), s32(d));
+  }
+  for (unsigned k = 1; k <= 32 - 2; ++k) {
+    EXPECT_EQ(M32(0x80000001U, k - 1, false), s32(1 << k));
+  }
+  EXPECT_EQ(M32(0x55555556U, 0, false), s32(3));
+  EXPECT_EQ(M32(0x66666667U, 1, false), s32(5));
+  EXPECT_EQ(M32(0x2AAAAAABU, 0, false), s32(6));
+  EXPECT_EQ(M32(0x92492493U, 2, false), s32(7));
+  EXPECT_EQ(M32(0x38E38E39U, 1, false), s32(9));
+  EXPECT_EQ(M32(0x66666667U, 2, false), s32(10));
+  EXPECT_EQ(M32(0x2E8BA2E9U, 1, false), s32(11));
+  EXPECT_EQ(M32(0x2AAAAAABU, 1, false), s32(12));
+  EXPECT_EQ(M32(0x51EB851FU, 3, false), s32(25));
+  EXPECT_EQ(M32(0x10624DD3U, 3, false), s32(125));
+  EXPECT_EQ(M32(0x68DB8BADU, 8, false), s32(625));
+}
+
+
+TEST(DivisionByConstant, Unsigned32) {
+  EXPECT_EQ(M32(0x00000000U, 0, true), u32(1));
+  for (unsigned k = 1; k <= 30; ++k) {
+    EXPECT_EQ(M32(1U << (32 - k), 0, false), u32(1U << k));
+  }
+  EXPECT_EQ(M32(0xAAAAAAABU, 1, false), u32(3));
+  EXPECT_EQ(M32(0xCCCCCCCDU, 2, false), u32(5));
+  EXPECT_EQ(M32(0xAAAAAAABU, 2, false), u32(6));
+  EXPECT_EQ(M32(0x24924925U, 3, true), u32(7));
+  EXPECT_EQ(M32(0x38E38E39U, 1, false), u32(9));
+  EXPECT_EQ(M32(0xCCCCCCCDU, 3, false), u32(10));
+  EXPECT_EQ(M32(0xBA2E8BA3U, 3, false), u32(11));
+  EXPECT_EQ(M32(0xAAAAAAABU, 3, false), u32(12));
+  EXPECT_EQ(M32(0x51EB851FU, 3, false), u32(25));
+  EXPECT_EQ(M32(0x10624DD3U, 3, false), u32(125));
+  EXPECT_EQ(M32(0xD1B71759U, 9, false), u32(625));
+}
+
+
+TEST(DivisionByConstant, Signed64) {
+  EXPECT_EQ(M64(0x9999999999999999ULL, 1, false), s64(-5));
+  EXPECT_EQ(M64(0x5555555555555555ULL, 1, false), s64(-3));
+  int64_t d = -1;
+  for (unsigned k = 1; k <= 64 - 1; ++k) {
+    d *= 2;
+    EXPECT_EQ(M64(0x7FFFFFFFFFFFFFFFULL, k - 1, false), s64(d));
+  }
+  for (unsigned k = 1; k <= 64 - 2; ++k) {
+    EXPECT_EQ(M64(0x8000000000000001ULL, k - 1, false), s64(1LL << k));
+  }
+  EXPECT_EQ(M64(0x5555555555555556ULL, 0, false), s64(3));
+  EXPECT_EQ(M64(0x6666666666666667ULL, 1, false), s64(5));
+  EXPECT_EQ(M64(0x2AAAAAAAAAAAAAABULL, 0, false), s64(6));
+  EXPECT_EQ(M64(0x4924924924924925ULL, 1, false), s64(7));
+  EXPECT_EQ(M64(0x1C71C71C71C71C72ULL, 0, false), s64(9));
+  EXPECT_EQ(M64(0x6666666666666667ULL, 2, false), s64(10));
+  EXPECT_EQ(M64(0x2E8BA2E8BA2E8BA3ULL, 1, false), s64(11));
+  EXPECT_EQ(M64(0x2AAAAAAAAAAAAAABULL, 1, false), s64(12));
+  EXPECT_EQ(M64(0xA3D70A3D70A3D70BULL, 4, false), s64(25));
+  EXPECT_EQ(M64(0x20C49BA5E353F7CFULL, 4, false), s64(125));
+  EXPECT_EQ(M64(0x346DC5D63886594BULL, 7, false), s64(625));
+}
+
+
+TEST(DivisionByConstant, Unsigned64) {
+  EXPECT_EQ(M64(0x0000000000000000ULL, 0, true), u64(1));
+  for (unsigned k = 1; k <= 64 - 2; ++k) {
+    EXPECT_EQ(M64(1ULL << (64 - k), 0, false), u64(1ULL << k));
+  }
+  EXPECT_EQ(M64(0xAAAAAAAAAAAAAAABULL, 1, false), u64(3));
+  EXPECT_EQ(M64(0xCCCCCCCCCCCCCCCDULL, 2, false), u64(5));
+  EXPECT_EQ(M64(0xAAAAAAAAAAAAAAABULL, 2, false), u64(6));
+  EXPECT_EQ(M64(0x2492492492492493ULL, 3, true), u64(7));
+  EXPECT_EQ(M64(0xE38E38E38E38E38FULL, 3, false), u64(9));
+  EXPECT_EQ(M64(0xCCCCCCCCCCCCCCCDULL, 3, false), u64(10));
+  EXPECT_EQ(M64(0x2E8BA2E8BA2E8BA3ULL, 1, false), u64(11));
+  EXPECT_EQ(M64(0xAAAAAAAAAAAAAAABULL, 3, false), u64(12));
+  EXPECT_EQ(M64(0x47AE147AE147AE15ULL, 5, true), u64(25));
+  EXPECT_EQ(M64(0x0624DD2F1A9FBE77ULL, 7, true), u64(125));
+  EXPECT_EQ(M64(0x346DC5D63886594BULL, 7, false), u64(625));
+}
+
+}  // namespace base
+}  // namespace v8
diff --git a/src/base/division-by-constant.cc b/src/base/division-by-constant.cc
new file mode 100644
index 0000000..235d39f
--- /dev/null
+++ b/src/base/division-by-constant.cc
@@ -0,0 +1,115 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/division-by-constant.h"
+
+#include <stdint.h>
+
+#include "src/base/logging.h"
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace base {
+
+template <class T>
+bool MagicNumbersForDivision<T>::operator==(
+    const MagicNumbersForDivision& rhs) const {
+  return multiplier == rhs.multiplier && shift == rhs.shift && add == rhs.add;
+}
+
+
+template <class T>
+MagicNumbersForDivision<T> SignedDivisionByConstant(T d) {
+  STATIC_ASSERT(static_cast<T>(0) < static_cast<T>(-1));
+  DCHECK(d != static_cast<T>(-1) && d != 0 && d != 1);
+  const unsigned bits = static_cast<unsigned>(sizeof(T)) * 8;
+  const T min = (static_cast<T>(1) << (bits - 1));
+  const bool neg = (min & d) != 0;
+  const T ad = neg ? (0 - d) : d;
+  const T t = min + (d >> (bits - 1));
+  const T anc = t - 1 - t % ad;  // Absolute value of nc
+  unsigned p = bits - 1;         // Init. p.
+  T q1 = min / anc;              // Init. q1 = 2**p/|nc|.
+  T r1 = min - q1 * anc;         // Init. r1 = rem(2**p, |nc|).
+  T q2 = min / ad;               // Init. q2 = 2**p/|d|.
+  T r2 = min - q2 * ad;          // Init. r2 = rem(2**p, |d|).
+  T delta;
+  do {
+    p = p + 1;
+    q1 = 2 * q1;      // Update q1 = 2**p/|nc|.
+    r1 = 2 * r1;      // Update r1 = rem(2**p, |nc|).
+    if (r1 >= anc) {  // Must be an unsigned comparison here.
+      q1 = q1 + 1;
+      r1 = r1 - anc;
+    }
+    q2 = 2 * q2;     // Update q2 = 2**p/|d|.
+    r2 = 2 * r2;     // Update r2 = rem(2**p, |d|).
+    if (r2 >= ad) {  // Must be an unsigned comparison here.
+      q2 = q2 + 1;
+      r2 = r2 - ad;
+    }
+    delta = ad - r2;
+  } while (q1 < delta || (q1 == delta && r1 == 0));
+  T mul = q2 + 1;
+  return {neg ? (0 - mul) : mul, p - bits, false};
+}
+
+
+template <class T>
+MagicNumbersForDivision<T> UnsignedDivisionByConstant(T d,
+                                                      unsigned leading_zeros) {
+  STATIC_ASSERT(static_cast<T>(0) < static_cast<T>(-1));
+  DCHECK(d != 0);
+  const unsigned bits = static_cast<unsigned>(sizeof(T)) * 8;
+  const T ones = ~static_cast<T>(0) >> leading_zeros;
+  const T min = static_cast<T>(1) << (bits - 1);
+  const T max = ~static_cast<T>(0) >> 1;
+  const T nc = ones - (ones - d) % d;
+  bool a = false;         // Init. "add" indicator.
+  unsigned p = bits - 1;  // Init. p.
+  T q1 = min / nc;        // Init. q1 = 2**p/nc
+  T r1 = min - q1 * nc;   // Init. r1 = rem(2**p,nc)
+  T q2 = max / d;         // Init. q2 = (2**p - 1)/d.
+  T r2 = max - q2 * d;    // Init. r2 = rem(2**p - 1, d).
+  T delta;
+  do {
+    p = p + 1;
+    if (r1 >= nc - r1) {
+      q1 = 2 * q1 + 1;
+      r1 = 2 * r1 - nc;
+    } else {
+      q1 = 2 * q1;
+      r1 = 2 * r1;
+    }
+    if (r2 + 1 >= d - r2) {
+      if (q2 >= max) a = true;
+      q2 = 2 * q2 + 1;
+      r2 = 2 * r2 + 1 - d;
+    } else {
+      if (q2 >= min) a = true;
+      q2 = 2 * q2;
+      r2 = 2 * r2 + 1;
+    }
+    delta = d - 1 - r2;
+  } while (p < bits * 2 && (q1 < delta || (q1 == delta && r1 == 0)));
+  return {q2 + 1, p - bits, a};
+}
+
+
+// -----------------------------------------------------------------------------
+// Instantiations.
+
+template struct MagicNumbersForDivision<uint32_t>;
+template struct MagicNumbersForDivision<uint64_t>;
+
+template MagicNumbersForDivision<uint32_t> SignedDivisionByConstant(uint32_t d);
+template MagicNumbersForDivision<uint64_t> SignedDivisionByConstant(uint64_t d);
+
+template MagicNumbersForDivision<uint32_t> UnsignedDivisionByConstant(
+    uint32_t d, unsigned leading_zeros);
+template MagicNumbersForDivision<uint64_t> UnsignedDivisionByConstant(
+    uint64_t d, unsigned leading_zeros);
+
+}  // namespace base
+}  // namespace v8
diff --git a/src/base/division-by-constant.h b/src/base/division-by-constant.h
new file mode 100644
index 0000000..02e7e14
--- /dev/null
+++ b/src/base/division-by-constant.h
@@ -0,0 +1,45 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_DIVISION_BY_CONSTANT_H_
+#define V8_BASE_DIVISION_BY_CONSTANT_H_
+
+namespace v8 {
+namespace base {
+
+// ----------------------------------------------------------------------------
+
+// The magic numbers for division via multiplication, see Warren's "Hacker's
+// Delight", chapter 10. The template parameter must be one of the unsigned
+// integral types.
+template <class T>
+struct MagicNumbersForDivision {
+  MagicNumbersForDivision(T m, unsigned s, bool a)
+      : multiplier(m), shift(s), add(a) {}
+  bool operator==(const MagicNumbersForDivision& rhs) const;
+
+  T multiplier;
+  unsigned shift;
+  bool add;
+};
+
+
+// Calculate the multiplier and shift for signed division via multiplication.
+// The divisor must not be -1, 0 or 1 when interpreted as a signed value.
+template <class T>
+MagicNumbersForDivision<T> SignedDivisionByConstant(T d);
+
+
+// Calculate the multiplier and shift for unsigned division via multiplication,
+// see Warren's "Hacker's Delight", chapter 10. The divisor must not be 0 and
+// leading_zeros can be used to speed up the calculation if the given number of
+// upper bits of the dividend value are known to be zero.
+template <class T>
+MagicNumbersForDivision<T> UnsignedDivisionByConstant(
+    T d, unsigned leading_zeros = 0);
+
+}  // namespace base
+}  // namespace v8
+
+#endif  // V8_BASE_DIVISION_BY_CONSTANT_H_
diff --git a/src/base/flags-unittest.cc b/src/base/flags-unittest.cc
new file mode 100644
index 0000000..a1d6f37
--- /dev/null
+++ b/src/base/flags-unittest.cc
@@ -0,0 +1,104 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/v8stdint.h"
+#include "src/base/flags.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace base {
+
+namespace {
+
+enum Flag1 {
+  kFlag1None = 0,
+  kFlag1First = 1u << 1,
+  kFlag1Second = 1u << 2,
+  kFlag1All = kFlag1None | kFlag1First | kFlag1Second
+};
+typedef Flags<Flag1> Flags1;
+
+
+DEFINE_OPERATORS_FOR_FLAGS(Flags1)
+
+
+Flags1 bar(Flags1 flags1) { return flags1; }
+
+}  // namespace
+
+
+TEST(FlagsTest, BasicOperations) {
+  Flags1 a;
+  EXPECT_EQ(kFlag1None, static_cast<int>(a));
+  a |= kFlag1First;
+  EXPECT_EQ(kFlag1First, static_cast<int>(a));
+  a = a | kFlag1Second;
+  EXPECT_EQ(kFlag1All, static_cast<int>(a));
+  a &= kFlag1Second;
+  EXPECT_EQ(kFlag1Second, static_cast<int>(a));
+  a = kFlag1None & a;
+  EXPECT_EQ(kFlag1None, static_cast<int>(a));
+  a ^= (kFlag1All | kFlag1None);
+  EXPECT_EQ(kFlag1All, static_cast<int>(a));
+  Flags1 b = ~a;
+  EXPECT_EQ(kFlag1All, static_cast<int>(a));
+  EXPECT_EQ(~static_cast<int>(a), static_cast<int>(b));
+  Flags1 c = a;
+  EXPECT_EQ(a, c);
+  EXPECT_NE(a, b);
+  EXPECT_EQ(a, bar(a));
+  EXPECT_EQ(a, bar(kFlag1All));
+}
+
+
+namespace {
+namespace foo {
+
+enum Option {
+  kNoOptions = 0,
+  kOption1 = 1,
+  kOption2 = 2,
+  kAllOptions = kNoOptions | kOption1 | kOption2
+};
+typedef Flags<Option> Options;
+
+}  // namespace foo
+
+
+DEFINE_OPERATORS_FOR_FLAGS(foo::Options)
+
+}  // namespace
+
+
+TEST(FlagsTest, NamespaceScope) {
+  foo::Options options;
+  options ^= foo::kNoOptions;
+  options |= foo::kOption1 | foo::kOption2;
+  EXPECT_EQ(foo::kAllOptions, static_cast<int>(options));
+}
+
+
+namespace {
+
+struct Foo {
+  enum Enum { kEnum1 = 1, kEnum2 = 2 };
+  typedef Flags<Enum, uint32_t> Enums;
+};
+
+
+DEFINE_OPERATORS_FOR_FLAGS(Foo::Enums)
+
+}  // namespace
+
+
+TEST(FlagsTest, ClassScope) {
+  Foo::Enums enums;
+  enums |= Foo::kEnum1;
+  enums |= Foo::kEnum2;
+  EXPECT_TRUE(enums & Foo::kEnum1);
+  EXPECT_TRUE(enums & Foo::kEnum2);
+}
+
+}  // namespace base
+}  // namespace v8
diff --git a/src/base/flags.h b/src/base/flags.h
new file mode 100644
index 0000000..f3420ee
--- /dev/null
+++ b/src/base/flags.h
@@ -0,0 +1,108 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_FLAGS_H_
+#define V8_BASE_FLAGS_H_
+
+#include "src/base/compiler-specific.h"
+
+namespace v8 {
+namespace base {
+
+// The Flags class provides a type-safe way of storing OR-combinations of enum
+// values. The Flags<T, S> class is a template class, where T is an enum type,
+// and S is the underlying storage type (usually int).
+//
+// The traditional C++ approach for storing OR-combinations of enum values is to
+// use an int or unsigned int variable. The inconvenience with this approach is
+// that there's no type checking at all; any enum value can be OR'd with any
+// other enum value and passed on to a function that takes an int or unsigned
+// int.
+template <typename T, typename S = int>
+class Flags FINAL {
+ public:
+  typedef T flag_type;
+  typedef S mask_type;
+
+  Flags() : mask_(0) {}
+  Flags(flag_type flag) : mask_(flag) {}  // NOLINT(runtime/explicit)
+  explicit Flags(mask_type mask) : mask_(mask) {}
+
+  Flags& operator&=(const Flags& flags) {
+    mask_ &= flags.mask_;
+    return *this;
+  }
+  Flags& operator|=(const Flags& flags) {
+    mask_ |= flags.mask_;
+    return *this;
+  }
+  Flags& operator^=(const Flags& flags) {
+    mask_ ^= flags.mask_;
+    return *this;
+  }
+
+  Flags operator&(const Flags& flags) const { return Flags(*this) &= flags; }
+  Flags operator|(const Flags& flags) const { return Flags(*this) |= flags; }
+  Flags operator^(const Flags& flags) const { return Flags(*this) ^= flags; }
+
+  Flags& operator&=(flag_type flag) { return operator&=(Flags(flag)); }
+  Flags& operator|=(flag_type flag) { return operator|=(Flags(flag)); }
+  Flags& operator^=(flag_type flag) { return operator^=(Flags(flag)); }
+
+  Flags operator&(flag_type flag) const { return operator&(Flags(flag)); }
+  Flags operator|(flag_type flag) const { return operator|(Flags(flag)); }
+  Flags operator^(flag_type flag) const { return operator^(Flags(flag)); }
+
+  Flags operator~() const { return Flags(~mask_); }
+
+  operator mask_type() const { return mask_; }
+  bool operator!() const { return !mask_; }
+
+ private:
+  mask_type mask_;
+};
+
+
+#define DEFINE_OPERATORS_FOR_FLAGS(Type)                                       \
+  inline Type operator&(Type::flag_type lhs,                                   \
+                        Type::flag_type rhs)ALLOW_UNUSED WARN_UNUSED_RESULT;   \
+  inline Type operator&(Type::flag_type lhs, Type::flag_type rhs) {            \
+    return Type(lhs) & rhs;                                                    \
+  }                                                                            \
+  inline Type operator&(Type::flag_type lhs,                                   \
+                        const Type& rhs)ALLOW_UNUSED WARN_UNUSED_RESULT;       \
+  inline Type operator&(Type::flag_type lhs, const Type& rhs) {                \
+    return rhs & lhs;                                                          \
+  }                                                                            \
+  inline void operator&(Type::flag_type lhs, Type::mask_type rhs)ALLOW_UNUSED; \
+  inline void operator&(Type::flag_type lhs, Type::mask_type rhs) {}           \
+  inline Type operator|(Type::flag_type lhs, Type::flag_type rhs)              \
+      ALLOW_UNUSED WARN_UNUSED_RESULT;                                         \
+  inline Type operator|(Type::flag_type lhs, Type::flag_type rhs) {            \
+    return Type(lhs) | rhs;                                                    \
+  }                                                                            \
+  inline Type operator|(Type::flag_type lhs, const Type& rhs)                  \
+      ALLOW_UNUSED WARN_UNUSED_RESULT;                                         \
+  inline Type operator|(Type::flag_type lhs, const Type& rhs) {                \
+    return rhs | lhs;                                                          \
+  }                                                                            \
+  inline void operator|(Type::flag_type lhs, Type::mask_type rhs)              \
+      ALLOW_UNUSED;                                                            \
+  inline void operator|(Type::flag_type lhs, Type::mask_type rhs) {}           \
+  inline Type operator^(Type::flag_type lhs, Type::flag_type rhs)              \
+      ALLOW_UNUSED WARN_UNUSED_RESULT;                                         \
+  inline Type operator^(Type::flag_type lhs, Type::flag_type rhs) {            \
+    return Type(lhs) ^ rhs;                                                    \
+  } inline Type operator^(Type::flag_type lhs, const Type& rhs)                \
+      ALLOW_UNUSED WARN_UNUSED_RESULT;                                         \
+  inline Type operator^(Type::flag_type lhs, const Type& rhs) {                \
+    return rhs ^ lhs;                                                          \
+  } inline void operator^(Type::flag_type lhs, Type::mask_type rhs)            \
+      ALLOW_UNUSED;                                                            \
+  inline void operator^(Type::flag_type lhs, Type::mask_type rhs) {}
+
+}  // namespace base
+}  // namespace v8
+
+#endif  // V8_BASE_FLAGS_H_
diff --git a/src/base/logging.cc b/src/base/logging.cc
new file mode 100644
index 0000000..c3f609f
--- /dev/null
+++ b/src/base/logging.cc
@@ -0,0 +1,88 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/logging.h"
+
+#if V8_LIBC_GLIBC || V8_OS_BSD
+# include <cxxabi.h>
+# include <execinfo.h>
+#elif V8_OS_QNX
+# include <backtrace.h>
+#endif  // V8_LIBC_GLIBC || V8_OS_BSD
+#include <stdio.h>
+#include <stdlib.h>
+
+#include "src/base/platform/platform.h"
+
+namespace v8 {
+namespace base {
+
+// Attempts to dump a backtrace (if supported).
+void DumpBacktrace() {
+#if V8_LIBC_GLIBC || V8_OS_BSD
+  void* trace[100];
+  int size = backtrace(trace, arraysize(trace));
+  char** symbols = backtrace_symbols(trace, size);
+  OS::PrintError("\n==== C stack trace ===============================\n\n");
+  if (size == 0) {
+    OS::PrintError("(empty)\n");
+  } else if (symbols == NULL) {
+    OS::PrintError("(no symbols)\n");
+  } else {
+    for (int i = 1; i < size; ++i) {
+      OS::PrintError("%2d: ", i);
+      char mangled[201];
+      if (sscanf(symbols[i], "%*[^(]%*[(]%200[^)+]", mangled) == 1) {  // NOLINT
+        int status;
+        size_t length;
+        char* demangled = abi::__cxa_demangle(mangled, NULL, &length, &status);
+        OS::PrintError("%s\n", demangled != NULL ? demangled : mangled);
+        free(demangled);
+      } else {
+        OS::PrintError("??\n");
+      }
+    }
+  }
+  free(symbols);
+#elif V8_OS_QNX
+  char out[1024];
+  bt_accessor_t acc;
+  bt_memmap_t memmap;
+  bt_init_accessor(&acc, BT_SELF);
+  bt_load_memmap(&acc, &memmap);
+  bt_sprn_memmap(&memmap, out, sizeof(out));
+  OS::PrintError(out);
+  bt_addr_t trace[100];
+  int size = bt_get_backtrace(&acc, trace, arraysize(trace));
+  OS::PrintError("\n==== C stack trace ===============================\n\n");
+  if (size == 0) {
+    OS::PrintError("(empty)\n");
+  } else {
+    bt_sprnf_addrs(&memmap, trace, size, const_cast<char*>("%a\n"),
+                   out, sizeof(out), NULL);
+    OS::PrintError(out);
+  }
+  bt_unload_memmap(&memmap);
+  bt_release_accessor(&acc);
+#endif  // V8_LIBC_GLIBC || V8_OS_BSD
+}
+
+} }  // namespace v8::base
+
+
+// Contains protection against recursive calls (faults while handling faults).
+extern "C" void V8_Fatal(const char* file, int line, const char* format, ...) {
+  fflush(stdout);
+  fflush(stderr);
+  v8::base::OS::PrintError("\n\n#\n# Fatal error in %s, line %d\n# ", file,
+                           line);
+  va_list arguments;
+  va_start(arguments, format);
+  v8::base::OS::VPrintError(format, arguments);
+  va_end(arguments);
+  v8::base::OS::PrintError("\n#\n");
+  v8::base::DumpBacktrace();
+  fflush(stderr);
+  v8::base::OS::Abort();
+}
diff --git a/src/base/logging.h b/src/base/logging.h
new file mode 100644
index 0000000..8e24bb0
--- /dev/null
+++ b/src/base/logging.h
@@ -0,0 +1,223 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_LOGGING_H_
+#define V8_BASE_LOGGING_H_
+
+#include <string.h>
+
+#include "include/v8stdint.h"
+#include "src/base/build_config.h"
+
+extern "C" void V8_Fatal(const char* file, int line, const char* format, ...);
+
+
+// The FATAL, UNREACHABLE and UNIMPLEMENTED macros are useful during
+// development, but they should not be relied on in the final product.
+#ifdef DEBUG
+#define FATAL(msg)                              \
+  V8_Fatal(__FILE__, __LINE__, "%s", (msg))
+#define UNIMPLEMENTED()                         \
+  V8_Fatal(__FILE__, __LINE__, "unimplemented code")
+#define UNREACHABLE()                           \
+  V8_Fatal(__FILE__, __LINE__, "unreachable code")
+#else
+#define FATAL(msg)                              \
+  V8_Fatal("", 0, "%s", (msg))
+#define UNIMPLEMENTED()                         \
+  V8_Fatal("", 0, "unimplemented code")
+#define UNREACHABLE() ((void) 0)
+#endif
+
+
+// The CHECK macro checks that the given condition is true; if not, it
+// prints a message to stderr and aborts.
+#define CHECK(condition) do {                                       \
+    if (!(condition)) {                                             \
+      V8_Fatal(__FILE__, __LINE__, "CHECK(%s) failed", #condition); \
+    }                                                               \
+  } while (0)
+
+
+// Helper function used by the CHECK_EQ function when given int
+// arguments.  Should not be called directly.
+inline void CheckEqualsHelper(const char* file, int line,
+                              const char* expected_source, int expected,
+                              const char* value_source, int value) {
+  if (expected != value) {
+    V8_Fatal(file, line,
+             "CHECK_EQ(%s, %s) failed\n#   Expected: %i\n#   Found: %i",
+             expected_source, value_source, expected, value);
+  }
+}
+
+
+// Helper function used by the CHECK_EQ function when given int64_t
+// arguments.  Should not be called directly.
+inline void CheckEqualsHelper(const char* file, int line,
+                              const char* expected_source,
+                              int64_t expected,
+                              const char* value_source,
+                              int64_t value) {
+  if (expected != value) {
+    // Print int64_t values in hex, as two int32s,
+    // to avoid platform-dependencies.
+    V8_Fatal(file, line,
+             "CHECK_EQ(%s, %s) failed\n#"
+             "   Expected: 0x%08x%08x\n#   Found: 0x%08x%08x",
+             expected_source, value_source,
+             static_cast<uint32_t>(expected >> 32),
+             static_cast<uint32_t>(expected),
+             static_cast<uint32_t>(value >> 32),
+             static_cast<uint32_t>(value));
+  }
+}
+
+
+// Helper function used by the CHECK_NE function when given int
+// arguments.  Should not be called directly.
+inline void CheckNonEqualsHelper(const char* file,
+                                 int line,
+                                 const char* unexpected_source,
+                                 int unexpected,
+                                 const char* value_source,
+                                 int value) {
+  if (unexpected == value) {
+    V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n#   Value: %i",
+             unexpected_source, value_source, value);
+  }
+}
+
+
+// Helper function used by the CHECK function when given string
+// arguments.  Should not be called directly.
+inline void CheckEqualsHelper(const char* file,
+                              int line,
+                              const char* expected_source,
+                              const char* expected,
+                              const char* value_source,
+                              const char* value) {
+  if ((expected == NULL && value != NULL) ||
+      (expected != NULL && value == NULL) ||
+      (expected != NULL && value != NULL && strcmp(expected, value) != 0)) {
+    V8_Fatal(file, line,
+             "CHECK_EQ(%s, %s) failed\n#   Expected: %s\n#   Found: %s",
+             expected_source, value_source, expected, value);
+  }
+}
+
+
+inline void CheckNonEqualsHelper(const char* file,
+                                 int line,
+                                 const char* expected_source,
+                                 const char* expected,
+                                 const char* value_source,
+                                 const char* value) {
+  if (expected == value ||
+      (expected != NULL && value != NULL && strcmp(expected, value) == 0)) {
+    V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n#   Value: %s",
+             expected_source, value_source, value);
+  }
+}
+
+
+// Helper function used by the CHECK function when given pointer
+// arguments.  Should not be called directly.
+inline void CheckEqualsHelper(const char* file,
+                              int line,
+                              const char* expected_source,
+                              const void* expected,
+                              const char* value_source,
+                              const void* value) {
+  if (expected != value) {
+    V8_Fatal(file, line,
+             "CHECK_EQ(%s, %s) failed\n#   Expected: %p\n#   Found: %p",
+             expected_source, value_source,
+             expected, value);
+  }
+}
+
+
+inline void CheckNonEqualsHelper(const char* file,
+                                 int line,
+                                 const char* expected_source,
+                                 const void* expected,
+                                 const char* value_source,
+                                 const void* value) {
+  if (expected == value) {
+    V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n#   Value: %p",
+             expected_source, value_source, value);
+  }
+}
+
+
+inline void CheckNonEqualsHelper(const char* file,
+                              int line,
+                              const char* expected_source,
+                              int64_t expected,
+                              const char* value_source,
+                              int64_t value) {
+  if (expected == value) {
+    V8_Fatal(file, line,
+             "CHECK_EQ(%s, %s) failed\n#   Expected: %f\n#   Found: %f",
+             expected_source, value_source, expected, value);
+  }
+}
+
+
+#define CHECK_EQ(expected, value) CheckEqualsHelper(__FILE__, __LINE__, \
+  #expected, expected, #value, value)
+
+
+#define CHECK_NE(unexpected, value) CheckNonEqualsHelper(__FILE__, __LINE__, \
+  #unexpected, unexpected, #value, value)
+
+
+#define CHECK_GT(a, b) CHECK((a) > (b))
+#define CHECK_GE(a, b) CHECK((a) >= (b))
+#define CHECK_LT(a, b) CHECK((a) < (b))
+#define CHECK_LE(a, b) CHECK((a) <= (b))
+
+
+namespace v8 {
+namespace base {
+
+// Exposed for making debugging easier (to see where your function is being
+// called, just add a call to DumpBacktrace).
+void DumpBacktrace();
+
+} }  // namespace v8::base
+
+
+// The DCHECK macro is equivalent to CHECK except that it only
+// generates code in debug builds.
+#ifdef DEBUG
+#define DCHECK_RESULT(expr)    CHECK(expr)
+#define DCHECK(condition)      CHECK(condition)
+#define DCHECK_EQ(v1, v2)      CHECK_EQ(v1, v2)
+#define DCHECK_NE(v1, v2)      CHECK_NE(v1, v2)
+#define DCHECK_GE(v1, v2)      CHECK_GE(v1, v2)
+#define DCHECK_LT(v1, v2)      CHECK_LT(v1, v2)
+#define DCHECK_LE(v1, v2)      CHECK_LE(v1, v2)
+#else
+#define DCHECK_RESULT(expr)    (expr)
+#define DCHECK(condition)      ((void) 0)
+#define DCHECK_EQ(v1, v2)      ((void) 0)
+#define DCHECK_NE(v1, v2)      ((void) 0)
+#define DCHECK_GE(v1, v2)      ((void) 0)
+#define DCHECK_LT(v1, v2)      ((void) 0)
+#define DCHECK_LE(v1, v2)      ((void) 0)
+#endif
+
+#define DCHECK_NOT_NULL(p)  DCHECK_NE(NULL, p)
+
+// "Extra checks" are lightweight checks that are enabled in some release
+// builds.
+#ifdef ENABLE_EXTRA_CHECKS
+#define EXTRA_CHECK(condition) CHECK(condition)
+#else
+#define EXTRA_CHECK(condition) ((void) 0)
+#endif
+
+#endif  // V8_BASE_LOGGING_H_
diff --git a/src/base/macros.h b/src/base/macros.h
index 736a656..cef088c 100644
--- a/src/base/macros.h
+++ b/src/base/macros.h
@@ -5,7 +5,12 @@
 #ifndef V8_BASE_MACROS_H_
 #define V8_BASE_MACROS_H_
 
+#include <cstring>
+
 #include "include/v8stdint.h"
+#include "src/base/build_config.h"
+#include "src/base/compiler-specific.h"
+#include "src/base/logging.h"
 
 
 // The expression OFFSET_OF(type, field) computes the byte-offset
@@ -18,13 +23,220 @@
   (reinterpret_cast<intptr_t>(&(reinterpret_cast<type*>(4)->field)) - 4)
 
 
-// The expression ARRAY_SIZE(a) is a compile-time constant of type
-// size_t which represents the number of elements of the given
-// array. You should only use ARRAY_SIZE on statically allocated
-// arrays.
-#define ARRAY_SIZE(a)                                   \
-  ((sizeof(a) / sizeof(*(a))) /                         \
-  static_cast<size_t>(!(sizeof(a) % sizeof(*(a)))))
+// ARRAYSIZE_UNSAFE performs essentially the same calculation as arraysize,
+// but can be used on anonymous types or types defined inside
+// functions.  It's less safe than arraysize as it accepts some
+// (although not all) pointers.  Therefore, you should use arraysize
+// whenever possible.
+//
+// The expression ARRAYSIZE_UNSAFE(a) is a compile-time constant of type
+// size_t.
+//
+// ARRAYSIZE_UNSAFE catches a few type errors.  If you see a compiler error
+//
+//   "warning: division by zero in ..."
+//
+// when using ARRAYSIZE_UNSAFE, you are (wrongfully) giving it a pointer.
+// You should only use ARRAYSIZE_UNSAFE on statically allocated arrays.
+//
+// The following comments are on the implementation details, and can
+// be ignored by the users.
+//
+// ARRAYSIZE_UNSAFE(arr) works by inspecting sizeof(arr) (the # of bytes in
+// the array) and sizeof(*(arr)) (the # of bytes in one array
+// element).  If the former is divisible by the latter, perhaps arr is
+// indeed an array, in which case the division result is the # of
+// elements in the array.  Otherwise, arr cannot possibly be an array,
+// and we generate a compiler error to prevent the code from
+// compiling.
+//
+// Since the size of bool is implementation-defined, we need to cast
+// !(sizeof(a) & sizeof(*(a))) to size_t in order to ensure the final
+// result has type size_t.
+//
+// This macro is not perfect as it wrongfully accepts certain
+// pointers, namely where the pointer size is divisible by the pointee
+// size.  Since all our code has to go through a 32-bit compiler,
+// where a pointer is 4 bytes, this means all pointers to a type whose
+// size is 3 or greater than 4 will be (righteously) rejected.
+#define ARRAYSIZE_UNSAFE(a)     \
+  ((sizeof(a) / sizeof(*(a))) / \
+   static_cast<size_t>(!(sizeof(a) % sizeof(*(a)))))  // NOLINT
+
+
+#if V8_OS_NACL
+
+// TODO(bmeurer): For some reason, the NaCl toolchain cannot handle the correct
+// definition of arraysize() below, so we have to use the unsafe version for
+// now.
+#define arraysize ARRAYSIZE_UNSAFE
+
+#else  // V8_OS_NACL
+
+// The arraysize(arr) macro returns the # of elements in an array arr.
+// The expression is a compile-time constant, and therefore can be
+// used in defining new arrays, for example.  If you use arraysize on
+// a pointer by mistake, you will get a compile-time error.
+//
+// One caveat is that arraysize() doesn't accept any array of an
+// anonymous type or a type defined inside a function.  In these rare
+// cases, you have to use the unsafe ARRAYSIZE_UNSAFE() macro below.  This is
+// due to a limitation in C++'s template system.  The limitation might
+// eventually be removed, but it hasn't happened yet.
+#define arraysize(array) (sizeof(ArraySizeHelper(array)))
+
+
+// This template function declaration is used in defining arraysize.
+// Note that the function doesn't need an implementation, as we only
+// use its type.
+template <typename T, size_t N>
+char (&ArraySizeHelper(T (&array)[N]))[N];
+
+
+#if !V8_CC_MSVC
+// That gcc wants both of these prototypes seems mysterious. VC, for
+// its part, can't decide which to use (another mystery). Matching of
+// template overloads: the final frontier.
+template <typename T, size_t N>
+char (&ArraySizeHelper(const T (&array)[N]))[N];
+#endif
+
+#endif  // V8_OS_NACL
+
+
+// The COMPILE_ASSERT macro can be used to verify that a compile time
+// expression is true. For example, you could use it to verify the
+// size of a static array:
+//
+//   COMPILE_ASSERT(ARRAYSIZE_UNSAFE(content_type_names) == CONTENT_NUM_TYPES,
+//                  content_type_names_incorrect_size);
+//
+// or to make sure a struct is smaller than a certain size:
+//
+//   COMPILE_ASSERT(sizeof(foo) < 128, foo_too_large);
+//
+// The second argument to the macro is the name of the variable. If
+// the expression is false, most compilers will issue a warning/error
+// containing the name of the variable.
+#if V8_HAS_CXX11_STATIC_ASSERT
+
+// Under C++11, just use static_assert.
+#define COMPILE_ASSERT(expr, msg) static_assert(expr, #msg)
+
+#else
+
+template <bool>
+struct CompileAssert {};
+
+#define COMPILE_ASSERT(expr, msg)                \
+  typedef CompileAssert<static_cast<bool>(expr)> \
+      msg[static_cast<bool>(expr) ? 1 : -1] ALLOW_UNUSED
+
+// Implementation details of COMPILE_ASSERT:
+//
+// - COMPILE_ASSERT works by defining an array type that has -1
+//   elements (and thus is invalid) when the expression is false.
+//
+// - The simpler definition
+//
+//     #define COMPILE_ASSERT(expr, msg) typedef char msg[(expr) ? 1 : -1]
+//
+//   does not work, as gcc supports variable-length arrays whose sizes
+//   are determined at run-time (this is gcc's extension and not part
+//   of the C++ standard).  As a result, gcc fails to reject the
+//   following code with the simple definition:
+//
+//     int foo;
+//     COMPILE_ASSERT(foo, msg); // not supposed to compile as foo is
+//                               // not a compile-time constant.
+//
+// - By using the type CompileAssert<(bool(expr))>, we ensures that
+//   expr is a compile-time constant.  (Template arguments must be
+//   determined at compile-time.)
+//
+// - The outer parentheses in CompileAssert<(bool(expr))> are necessary
+//   to work around a bug in gcc 3.4.4 and 4.0.1.  If we had written
+//
+//     CompileAssert<bool(expr)>
+//
+//   instead, these compilers will refuse to compile
+//
+//     COMPILE_ASSERT(5 > 0, some_message);
+//
+//   (They seem to think the ">" in "5 > 0" marks the end of the
+//   template argument list.)
+//
+// - The array size is (bool(expr) ? 1 : -1), instead of simply
+//
+//     ((expr) ? 1 : -1).
+//
+//   This is to avoid running into a bug in MS VC 7.1, which
+//   causes ((0.0) ? 1 : -1) to incorrectly evaluate to 1.
+
+#endif
+
+
+// bit_cast<Dest,Source> is a template function that implements the
+// equivalent of "*reinterpret_cast<Dest*>(&source)".  We need this in
+// very low-level functions like the protobuf library and fast math
+// support.
+//
+//   float f = 3.14159265358979;
+//   int i = bit_cast<int32>(f);
+//   // i = 0x40490fdb
+//
+// The classical address-casting method is:
+//
+//   // WRONG
+//   float f = 3.14159265358979;            // WRONG
+//   int i = * reinterpret_cast<int*>(&f);  // WRONG
+//
+// The address-casting method actually produces undefined behavior
+// according to ISO C++ specification section 3.10 -15 -.  Roughly, this
+// section says: if an object in memory has one type, and a program
+// accesses it with a different type, then the result is undefined
+// behavior for most values of "different type".
+//
+// This is true for any cast syntax, either *(int*)&f or
+// *reinterpret_cast<int*>(&f).  And it is particularly true for
+// conversions between integral lvalues and floating-point lvalues.
+//
+// The purpose of 3.10 -15- is to allow optimizing compilers to assume
+// that expressions with different types refer to different memory.  gcc
+// 4.0.1 has an optimizer that takes advantage of this.  So a
+// non-conforming program quietly produces wildly incorrect output.
+//
+// The problem is not the use of reinterpret_cast.  The problem is type
+// punning: holding an object in memory of one type and reading its bits
+// back using a different type.
+//
+// The C++ standard is more subtle and complex than this, but that
+// is the basic idea.
+//
+// Anyways ...
+//
+// bit_cast<> calls memcpy() which is blessed by the standard,
+// especially by the example in section 3.9 .  Also, of course,
+// bit_cast<> wraps up the nasty logic in one place.
+//
+// Fortunately memcpy() is very fast.  In optimized mode, with a
+// constant size, gcc 2.95.3, gcc 4.0.1, and msvc 7.1 produce inline
+// code with the minimal amount of data movement.  On a 32-bit system,
+// memcpy(d,s,4) compiles to one load and one store, and memcpy(d,s,8)
+// compiles to two loads and two stores.
+//
+// I tested this code with gcc 2.95.3, gcc 4.0.1, icc 8.1, and msvc 7.1.
+//
+// WARNING: if Dest or Source is a non-POD type, the result of the memcpy
+// is likely to surprise you.
+template <class Dest, class Source>
+V8_INLINE Dest bit_cast(Source const& source) {
+  COMPILE_ASSERT(sizeof(Dest) == sizeof(Source), VerifySizesAreEqual);
+
+  Dest dest;
+  memcpy(&dest, &source, sizeof(dest));
+  return dest;
+}
 
 
 // A macro to disallow the evil copy constructor and operator= functions
@@ -50,8 +262,8 @@
 #define NO_INLINE(declarator) V8_NOINLINE declarator
 
 
-// Newly written code should use V8_WARN_UNUSED_RESULT.
-#define MUST_USE_RESULT V8_WARN_UNUSED_RESULT
+// Newly written code should use WARN_UNUSED_RESULT.
+#define MUST_USE_RESULT WARN_UNUSED_RESULT
 
 
 // Define V8_USE_ADDRESS_SANITIZER macros.
@@ -99,7 +311,7 @@
 #define STATIC_ASSERT(test)                                                    \
   typedef                                                                     \
     StaticAssertionHelper<sizeof(StaticAssertion<static_cast<bool>((test))>)> \
-    SEMI_STATIC_JOIN(__StaticAssertTypedef__, __LINE__) V8_UNUSED
+    SEMI_STATIC_JOIN(__StaticAssertTypedef__, __LINE__) ALLOW_UNUSED
 
 #endif
 
@@ -112,9 +324,88 @@
 
 #define IS_POWER_OF_TWO(x) ((x) != 0 && (((x) & ((x) - 1)) == 0))
 
+
+// Define our own macros for writing 64-bit constants.  This is less fragile
+// than defining __STDC_CONSTANT_MACROS before including <stdint.h>, and it
+// works on compilers that don't have it (like MSVC).
+#if V8_CC_MSVC
+# define V8_UINT64_C(x)   (x ## UI64)
+# define V8_INT64_C(x)    (x ## I64)
+# if V8_HOST_ARCH_64_BIT
+#  define V8_INTPTR_C(x)  (x ## I64)
+#  define V8_PTR_PREFIX   "ll"
+# else
+#  define V8_INTPTR_C(x)  (x)
+#  define V8_PTR_PREFIX   ""
+# endif  // V8_HOST_ARCH_64_BIT
+#elif V8_CC_MINGW64
+# define V8_UINT64_C(x)   (x ## ULL)
+# define V8_INT64_C(x)    (x ## LL)
+# define V8_INTPTR_C(x)   (x ## LL)
+# define V8_PTR_PREFIX    "I64"
+#elif V8_HOST_ARCH_64_BIT
+# if V8_OS_MACOSX
+#  define V8_UINT64_C(x)   (x ## ULL)
+#  define V8_INT64_C(x)    (x ## LL)
+# else
+#  define V8_UINT64_C(x)   (x ## UL)
+#  define V8_INT64_C(x)    (x ## L)
+# endif
+# define V8_INTPTR_C(x)   (x ## L)
+# define V8_PTR_PREFIX    "l"
+#else
+# define V8_UINT64_C(x)   (x ## ULL)
+# define V8_INT64_C(x)    (x ## LL)
+# define V8_INTPTR_C(x)   (x)
+# define V8_PTR_PREFIX    ""
+#endif
+
+#define V8PRIxPTR V8_PTR_PREFIX "x"
+#define V8PRIdPTR V8_PTR_PREFIX "d"
+#define V8PRIuPTR V8_PTR_PREFIX "u"
+
+// Fix for Mac OS X defining uintptr_t as "unsigned long":
+#if V8_OS_MACOSX
+#undef V8PRIxPTR
+#define V8PRIxPTR "lx"
+#endif
+
 // The following macro works on both 32 and 64-bit platforms.
 // Usage: instead of writing 0x1234567890123456
 //      write V8_2PART_UINT64_C(0x12345678,90123456);
 #define V8_2PART_UINT64_C(a, b) (((static_cast<uint64_t>(a) << 32) + 0x##b##u))
 
+
+// Compute the 0-relative offset of some absolute value x of type T.
+// This allows conversion of Addresses and integral types into
+// 0-relative int offsets.
+template <typename T>
+inline intptr_t OffsetFrom(T x) {
+  return x - static_cast<T>(0);
+}
+
+
+// Compute the absolute value of type T for some 0-relative offset x.
+// This allows conversion of 0-relative int offsets into Addresses and
+// integral types.
+template <typename T>
+inline T AddressFrom(intptr_t x) {
+  return static_cast<T>(static_cast<T>(0) + x);
+}
+
+
+// Return the largest multiple of m which is <= x.
+template <typename T>
+inline T RoundDown(T x, intptr_t m) {
+  DCHECK(IS_POWER_OF_TWO(m));
+  return AddressFrom<T>(OffsetFrom(x) & -m);
+}
+
+
+// Return the smallest multiple of m which is >= x.
+template <typename T>
+inline T RoundUp(T x, intptr_t m) {
+  return RoundDown<T>(static_cast<T>(x + m - 1), m);
+}
+
 #endif   // V8_BASE_MACROS_H_
diff --git a/src/base/platform/condition-variable-unittest.cc b/src/base/platform/condition-variable-unittest.cc
new file mode 100644
index 0000000..fe0ad2a
--- /dev/null
+++ b/src/base/platform/condition-variable-unittest.cc
@@ -0,0 +1,301 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/platform/condition-variable.h"
+
+#include "src/base/platform/platform.h"
+#include "src/base/platform/time.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace base {
+
+TEST(ConditionVariable, WaitForAfterNofityOnSameThread) {
+  for (int n = 0; n < 10; ++n) {
+    Mutex mutex;
+    ConditionVariable cv;
+
+    LockGuard<Mutex> lock_guard(&mutex);
+
+    cv.NotifyOne();
+    EXPECT_FALSE(cv.WaitFor(&mutex, TimeDelta::FromMicroseconds(n)));
+
+    cv.NotifyAll();
+    EXPECT_FALSE(cv.WaitFor(&mutex, TimeDelta::FromMicroseconds(n)));
+  }
+}
+
+
+namespace {
+
+class ThreadWithMutexAndConditionVariable FINAL : public Thread {
+ public:
+  ThreadWithMutexAndConditionVariable()
+      : Thread(Options("ThreadWithMutexAndConditionVariable")),
+        running_(false),
+        finished_(false) {}
+  virtual ~ThreadWithMutexAndConditionVariable() {}
+
+  virtual void Run() OVERRIDE {
+    LockGuard<Mutex> lock_guard(&mutex_);
+    running_ = true;
+    cv_.NotifyOne();
+    while (running_) {
+      cv_.Wait(&mutex_);
+    }
+    finished_ = true;
+    cv_.NotifyAll();
+  }
+
+  bool running_;
+  bool finished_;
+  ConditionVariable cv_;
+  Mutex mutex_;
+};
+
+}  // namespace
+
+
+TEST(ConditionVariable, MultipleThreadsWithSeparateConditionVariables) {
+  static const int kThreadCount = 128;
+  ThreadWithMutexAndConditionVariable threads[kThreadCount];
+
+  for (int n = 0; n < kThreadCount; ++n) {
+    LockGuard<Mutex> lock_guard(&threads[n].mutex_);
+    EXPECT_FALSE(threads[n].running_);
+    EXPECT_FALSE(threads[n].finished_);
+    threads[n].Start();
+    // Wait for nth thread to start.
+    while (!threads[n].running_) {
+      threads[n].cv_.Wait(&threads[n].mutex_);
+    }
+  }
+
+  for (int n = kThreadCount - 1; n >= 0; --n) {
+    LockGuard<Mutex> lock_guard(&threads[n].mutex_);
+    EXPECT_TRUE(threads[n].running_);
+    EXPECT_FALSE(threads[n].finished_);
+  }
+
+  for (int n = 0; n < kThreadCount; ++n) {
+    LockGuard<Mutex> lock_guard(&threads[n].mutex_);
+    EXPECT_TRUE(threads[n].running_);
+    EXPECT_FALSE(threads[n].finished_);
+    // Tell the nth thread to quit.
+    threads[n].running_ = false;
+    threads[n].cv_.NotifyOne();
+  }
+
+  for (int n = kThreadCount - 1; n >= 0; --n) {
+    // Wait for nth thread to quit.
+    LockGuard<Mutex> lock_guard(&threads[n].mutex_);
+    while (!threads[n].finished_) {
+      threads[n].cv_.Wait(&threads[n].mutex_);
+    }
+    EXPECT_FALSE(threads[n].running_);
+    EXPECT_TRUE(threads[n].finished_);
+  }
+
+  for (int n = 0; n < kThreadCount; ++n) {
+    threads[n].Join();
+    LockGuard<Mutex> lock_guard(&threads[n].mutex_);
+    EXPECT_FALSE(threads[n].running_);
+    EXPECT_TRUE(threads[n].finished_);
+  }
+}
+
+
+namespace {
+
+class ThreadWithSharedMutexAndConditionVariable FINAL : public Thread {
+ public:
+  ThreadWithSharedMutexAndConditionVariable()
+      : Thread(Options("ThreadWithSharedMutexAndConditionVariable")),
+        running_(false),
+        finished_(false),
+        cv_(NULL),
+        mutex_(NULL) {}
+  virtual ~ThreadWithSharedMutexAndConditionVariable() {}
+
+  virtual void Run() OVERRIDE {
+    LockGuard<Mutex> lock_guard(mutex_);
+    running_ = true;
+    cv_->NotifyAll();
+    while (running_) {
+      cv_->Wait(mutex_);
+    }
+    finished_ = true;
+    cv_->NotifyAll();
+  }
+
+  bool running_;
+  bool finished_;
+  ConditionVariable* cv_;
+  Mutex* mutex_;
+};
+
+}  // namespace
+
+
+TEST(ConditionVariable, MultipleThreadsWithSharedSeparateConditionVariables) {
+  static const int kThreadCount = 128;
+  ThreadWithSharedMutexAndConditionVariable threads[kThreadCount];
+  ConditionVariable cv;
+  Mutex mutex;
+
+  for (int n = 0; n < kThreadCount; ++n) {
+    threads[n].mutex_ = &mutex;
+    threads[n].cv_ = &cv;
+  }
+
+  // Start all threads.
+  {
+    LockGuard<Mutex> lock_guard(&mutex);
+    for (int n = 0; n < kThreadCount; ++n) {
+      EXPECT_FALSE(threads[n].running_);
+      EXPECT_FALSE(threads[n].finished_);
+      threads[n].Start();
+    }
+  }
+
+  // Wait for all threads to start.
+  {
+    LockGuard<Mutex> lock_guard(&mutex);
+    for (int n = kThreadCount - 1; n >= 0; --n) {
+      while (!threads[n].running_) {
+        cv.Wait(&mutex);
+      }
+    }
+  }
+
+  // Make sure that all threads are running.
+  {
+    LockGuard<Mutex> lock_guard(&mutex);
+    for (int n = 0; n < kThreadCount; ++n) {
+      EXPECT_TRUE(threads[n].running_);
+      EXPECT_FALSE(threads[n].finished_);
+    }
+  }
+
+  // Tell all threads to quit.
+  {
+    LockGuard<Mutex> lock_guard(&mutex);
+    for (int n = kThreadCount - 1; n >= 0; --n) {
+      EXPECT_TRUE(threads[n].running_);
+      EXPECT_FALSE(threads[n].finished_);
+      // Tell the nth thread to quit.
+      threads[n].running_ = false;
+    }
+    cv.NotifyAll();
+  }
+
+  // Wait for all threads to quit.
+  {
+    LockGuard<Mutex> lock_guard(&mutex);
+    for (int n = 0; n < kThreadCount; ++n) {
+      while (!threads[n].finished_) {
+        cv.Wait(&mutex);
+      }
+    }
+  }
+
+  // Make sure all threads are finished.
+  {
+    LockGuard<Mutex> lock_guard(&mutex);
+    for (int n = kThreadCount - 1; n >= 0; --n) {
+      EXPECT_FALSE(threads[n].running_);
+      EXPECT_TRUE(threads[n].finished_);
+    }
+  }
+
+  // Join all threads.
+  for (int n = 0; n < kThreadCount; ++n) {
+    threads[n].Join();
+  }
+}
+
+
+namespace {
+
+class LoopIncrementThread FINAL : public Thread {
+ public:
+  LoopIncrementThread(int rem, int* counter, int limit, int thread_count,
+                      ConditionVariable* cv, Mutex* mutex)
+      : Thread(Options("LoopIncrementThread")),
+        rem_(rem),
+        counter_(counter),
+        limit_(limit),
+        thread_count_(thread_count),
+        cv_(cv),
+        mutex_(mutex) {
+    EXPECT_LT(rem, thread_count);
+    EXPECT_EQ(0, limit % thread_count);
+  }
+
+  virtual void Run() OVERRIDE {
+    int last_count = -1;
+    while (true) {
+      LockGuard<Mutex> lock_guard(mutex_);
+      int count = *counter_;
+      while (count % thread_count_ != rem_ && count < limit_) {
+        cv_->Wait(mutex_);
+        count = *counter_;
+      }
+      if (count >= limit_) break;
+      EXPECT_EQ(*counter_, count);
+      if (last_count != -1) {
+        EXPECT_EQ(last_count + (thread_count_ - 1), count);
+      }
+      count++;
+      *counter_ = count;
+      last_count = count;
+      cv_->NotifyAll();
+    }
+  }
+
+ private:
+  const int rem_;
+  int* counter_;
+  const int limit_;
+  const int thread_count_;
+  ConditionVariable* cv_;
+  Mutex* mutex_;
+};
+
+}  // namespace
+
+
+TEST(ConditionVariable, LoopIncrement) {
+  static const int kMaxThreadCount = 16;
+  Mutex mutex;
+  ConditionVariable cv;
+  for (int thread_count = 1; thread_count < kMaxThreadCount; ++thread_count) {
+    int limit = thread_count * 10;
+    int counter = 0;
+
+    // Setup the threads.
+    Thread** threads = new Thread* [thread_count];
+    for (int n = 0; n < thread_count; ++n) {
+      threads[n] = new LoopIncrementThread(n, &counter, limit, thread_count,
+                                           &cv, &mutex);
+    }
+
+    // Start all threads.
+    for (int n = thread_count - 1; n >= 0; --n) {
+      threads[n]->Start();
+    }
+
+    // Join and cleanup all threads.
+    for (int n = 0; n < thread_count; ++n) {
+      threads[n]->Join();
+      delete threads[n];
+    }
+    delete[] threads;
+
+    EXPECT_EQ(limit, counter);
+  }
+}
+
+}  // namespace base
+}  // namespace v8
diff --git a/src/base/platform/condition-variable.cc b/src/base/platform/condition-variable.cc
new file mode 100644
index 0000000..4547b66
--- /dev/null
+++ b/src/base/platform/condition-variable.cc
@@ -0,0 +1,322 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/platform/condition-variable.h"
+
+#include <errno.h>
+#include <time.h>
+
+#include "src/base/platform/time.h"
+
+namespace v8 {
+namespace base {
+
+#if V8_OS_POSIX
+
+ConditionVariable::ConditionVariable() {
+  // TODO(bmeurer): The test for V8_LIBRT_NOT_AVAILABLE is a temporary
+  // hack to support cross-compiling Chrome for Android in AOSP. Remove
+  // this once AOSP is fixed.
+#if (V8_OS_FREEBSD || V8_OS_NETBSD || V8_OS_OPENBSD || \
+     (V8_OS_LINUX && V8_LIBC_GLIBC)) && !V8_LIBRT_NOT_AVAILABLE
+  // On Free/Net/OpenBSD and Linux with glibc we can change the time
+  // source for pthread_cond_timedwait() to use the monotonic clock.
+  pthread_condattr_t attr;
+  int result = pthread_condattr_init(&attr);
+  DCHECK_EQ(0, result);
+  result = pthread_condattr_setclock(&attr, CLOCK_MONOTONIC);
+  DCHECK_EQ(0, result);
+  result = pthread_cond_init(&native_handle_, &attr);
+  DCHECK_EQ(0, result);
+  result = pthread_condattr_destroy(&attr);
+#else
+  int result = pthread_cond_init(&native_handle_, NULL);
+#endif
+  DCHECK_EQ(0, result);
+  USE(result);
+}
+
+
+ConditionVariable::~ConditionVariable() {
+  int result = pthread_cond_destroy(&native_handle_);
+  DCHECK_EQ(0, result);
+  USE(result);
+}
+
+
+void ConditionVariable::NotifyOne() {
+  int result = pthread_cond_signal(&native_handle_);
+  DCHECK_EQ(0, result);
+  USE(result);
+}
+
+
+void ConditionVariable::NotifyAll() {
+  int result = pthread_cond_broadcast(&native_handle_);
+  DCHECK_EQ(0, result);
+  USE(result);
+}
+
+
+void ConditionVariable::Wait(Mutex* mutex) {
+  mutex->AssertHeldAndUnmark();
+  int result = pthread_cond_wait(&native_handle_, &mutex->native_handle());
+  DCHECK_EQ(0, result);
+  USE(result);
+  mutex->AssertUnheldAndMark();
+}
+
+
+bool ConditionVariable::WaitFor(Mutex* mutex, const TimeDelta& rel_time) {
+  struct timespec ts;
+  int result;
+  mutex->AssertHeldAndUnmark();
+#if V8_OS_MACOSX
+  // Mac OS X provides pthread_cond_timedwait_relative_np(), which does
+  // not depend on the real time clock, which is what you really WANT here!
+  ts = rel_time.ToTimespec();
+  DCHECK_GE(ts.tv_sec, 0);
+  DCHECK_GE(ts.tv_nsec, 0);
+  result = pthread_cond_timedwait_relative_np(
+      &native_handle_, &mutex->native_handle(), &ts);
+#else
+  // TODO(bmeurer): The test for V8_LIBRT_NOT_AVAILABLE is a temporary
+  // hack to support cross-compiling Chrome for Android in AOSP. Remove
+  // this once AOSP is fixed.
+#if (V8_OS_FREEBSD || V8_OS_NETBSD || V8_OS_OPENBSD || \
+     (V8_OS_LINUX && V8_LIBC_GLIBC)) && !V8_LIBRT_NOT_AVAILABLE
+  // On Free/Net/OpenBSD and Linux with glibc we can change the time
+  // source for pthread_cond_timedwait() to use the monotonic clock.
+  result = clock_gettime(CLOCK_MONOTONIC, &ts);
+  DCHECK_EQ(0, result);
+  Time now = Time::FromTimespec(ts);
+#else
+  // The timeout argument to pthread_cond_timedwait() is in absolute time.
+  Time now = Time::NowFromSystemTime();
+#endif
+  Time end_time = now + rel_time;
+  DCHECK_GE(end_time, now);
+  ts = end_time.ToTimespec();
+  result = pthread_cond_timedwait(
+      &native_handle_, &mutex->native_handle(), &ts);
+#endif  // V8_OS_MACOSX
+  mutex->AssertUnheldAndMark();
+  if (result == ETIMEDOUT) {
+    return false;
+  }
+  DCHECK_EQ(0, result);
+  return true;
+}
+
+#elif V8_OS_WIN
+
+struct ConditionVariable::Event {
+  Event() : handle_(::CreateEventA(NULL, true, false, NULL)) {
+    DCHECK(handle_ != NULL);
+  }
+
+  ~Event() {
+    BOOL ok = ::CloseHandle(handle_);
+    DCHECK(ok);
+    USE(ok);
+  }
+
+  bool WaitFor(DWORD timeout_ms) {
+    DWORD result = ::WaitForSingleObject(handle_, timeout_ms);
+    if (result == WAIT_OBJECT_0) {
+      return true;
+    }
+    DCHECK(result == WAIT_TIMEOUT);
+    return false;
+  }
+
+  HANDLE handle_;
+  Event* next_;
+  HANDLE thread_;
+  volatile bool notified_;
+};
+
+
+ConditionVariable::NativeHandle::~NativeHandle() {
+  DCHECK(waitlist_ == NULL);
+
+  while (freelist_ != NULL) {
+    Event* event = freelist_;
+    freelist_ = event->next_;
+    delete event;
+  }
+}
+
+
+ConditionVariable::Event* ConditionVariable::NativeHandle::Pre() {
+  LockGuard<Mutex> lock_guard(&mutex_);
+
+  // Grab an event from the free list or create a new one.
+  Event* event = freelist_;
+  if (event != NULL) {
+    freelist_ = event->next_;
+  } else {
+    event = new Event;
+  }
+  event->thread_ = GetCurrentThread();
+  event->notified_ = false;
+
+#ifdef DEBUG
+  // The event must not be on the wait list.
+  for (Event* we = waitlist_; we != NULL; we = we->next_) {
+    DCHECK_NE(event, we);
+  }
+#endif
+
+  // Prepend the event to the wait list.
+  event->next_ = waitlist_;
+  waitlist_ = event;
+
+  return event;
+}
+
+
+void ConditionVariable::NativeHandle::Post(Event* event, bool result) {
+  LockGuard<Mutex> lock_guard(&mutex_);
+
+  // Remove the event from the wait list.
+  for (Event** wep = &waitlist_;; wep = &(*wep)->next_) {
+    DCHECK_NE(NULL, *wep);
+    if (*wep == event) {
+      *wep = event->next_;
+      break;
+    }
+  }
+
+#ifdef DEBUG
+  // The event must not be on the free list.
+  for (Event* fe = freelist_; fe != NULL; fe = fe->next_) {
+    DCHECK_NE(event, fe);
+  }
+#endif
+
+  // Reset the event.
+  BOOL ok = ::ResetEvent(event->handle_);
+  DCHECK(ok);
+  USE(ok);
+
+  // Insert the event into the free list.
+  event->next_ = freelist_;
+  freelist_ = event;
+
+  // Forward signals delivered after the timeout to the next waiting event.
+  if (!result && event->notified_ && waitlist_ != NULL) {
+    ok = ::SetEvent(waitlist_->handle_);
+    DCHECK(ok);
+    USE(ok);
+    waitlist_->notified_ = true;
+  }
+}
+
+
+ConditionVariable::ConditionVariable() {}
+
+
+ConditionVariable::~ConditionVariable() {}
+
+
+void ConditionVariable::NotifyOne() {
+  // Notify the thread with the highest priority in the waitlist
+  // that was not already signalled.
+  LockGuard<Mutex> lock_guard(native_handle_.mutex());
+  Event* highest_event = NULL;
+  int highest_priority = std::numeric_limits<int>::min();
+  for (Event* event = native_handle().waitlist();
+       event != NULL;
+       event = event->next_) {
+    if (event->notified_) {
+      continue;
+    }
+    int priority = GetThreadPriority(event->thread_);
+    DCHECK_NE(THREAD_PRIORITY_ERROR_RETURN, priority);
+    if (priority >= highest_priority) {
+      highest_priority = priority;
+      highest_event = event;
+    }
+  }
+  if (highest_event != NULL) {
+    DCHECK(!highest_event->notified_);
+    ::SetEvent(highest_event->handle_);
+    highest_event->notified_ = true;
+  }
+}
+
+
+void ConditionVariable::NotifyAll() {
+  // Notify all threads on the waitlist.
+  LockGuard<Mutex> lock_guard(native_handle_.mutex());
+  for (Event* event = native_handle().waitlist();
+       event != NULL;
+       event = event->next_) {
+    if (!event->notified_) {
+      ::SetEvent(event->handle_);
+      event->notified_ = true;
+    }
+  }
+}
+
+
+void ConditionVariable::Wait(Mutex* mutex) {
+  // Create and setup the wait event.
+  Event* event = native_handle_.Pre();
+
+  // Release the user mutex.
+  mutex->Unlock();
+
+  // Wait on the wait event.
+  while (!event->WaitFor(INFINITE))
+    ;
+
+  // Reaquire the user mutex.
+  mutex->Lock();
+
+  // Release the wait event (we must have been notified).
+  DCHECK(event->notified_);
+  native_handle_.Post(event, true);
+}
+
+
+bool ConditionVariable::WaitFor(Mutex* mutex, const TimeDelta& rel_time) {
+  // Create and setup the wait event.
+  Event* event = native_handle_.Pre();
+
+  // Release the user mutex.
+  mutex->Unlock();
+
+  // Wait on the wait event.
+  TimeTicks now = TimeTicks::Now();
+  TimeTicks end = now + rel_time;
+  bool result = false;
+  while (true) {
+    int64_t msec = (end - now).InMilliseconds();
+    if (msec >= static_cast<int64_t>(INFINITE)) {
+      result = event->WaitFor(INFINITE - 1);
+      if (result) {
+        break;
+      }
+      now = TimeTicks::Now();
+    } else {
+      result = event->WaitFor((msec < 0) ? 0 : static_cast<DWORD>(msec));
+      break;
+    }
+  }
+
+  // Reaquire the user mutex.
+  mutex->Lock();
+
+  // Release the wait event.
+  DCHECK(!result || event->notified_);
+  native_handle_.Post(event, result);
+
+  return result;
+}
+
+#endif  // V8_OS_POSIX
+
+} }  // namespace v8::base
diff --git a/src/base/platform/condition-variable.h b/src/base/platform/condition-variable.h
new file mode 100644
index 0000000..b5a6c3f
--- /dev/null
+++ b/src/base/platform/condition-variable.h
@@ -0,0 +1,118 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_PLATFORM_CONDITION_VARIABLE_H_
+#define V8_BASE_PLATFORM_CONDITION_VARIABLE_H_
+
+#include "src/base/lazy-instance.h"
+#include "src/base/platform/mutex.h"
+
+namespace v8 {
+namespace base {
+
+// Forward declarations.
+class ConditionVariableEvent;
+class TimeDelta;
+
+// -----------------------------------------------------------------------------
+// ConditionVariable
+//
+// This class is a synchronization primitive that can be used to block a thread,
+// or multiple threads at the same time, until:
+// - a notification is received from another thread,
+// - a timeout expires, or
+// - a spurious wakeup occurs
+// Any thread that intends to wait on a ConditionVariable has to acquire a lock
+// on a Mutex first. The |Wait()| and |WaitFor()| operations atomically release
+// the mutex and suspend the execution of the calling thread. When the condition
+// variable is notified, the thread is awakened, and the mutex is reacquired.
+
+class ConditionVariable FINAL {
+ public:
+  ConditionVariable();
+  ~ConditionVariable();
+
+  // If any threads are waiting on this condition variable, calling
+  // |NotifyOne()| unblocks one of the waiting threads.
+  void NotifyOne();
+
+  // Unblocks all threads currently waiting for this condition variable.
+  void NotifyAll();
+
+  // |Wait()| causes the calling thread to block until the condition variable is
+  // notified or a spurious wakeup occurs. Atomically releases the mutex, blocks
+  // the current executing thread, and adds it to the list of threads waiting on
+  // this condition variable. The thread will be unblocked when |NotifyAll()| or
+  // |NotifyOne()| is executed. It may also be unblocked spuriously. When
+  // unblocked, regardless of the reason, the lock on the mutex is reacquired
+  // and |Wait()| exits.
+  void Wait(Mutex* mutex);
+
+  // Atomically releases the mutex, blocks the current executing thread, and
+  // adds it to the list of threads waiting on this condition variable. The
+  // thread will be unblocked when |NotifyAll()| or |NotifyOne()| is executed,
+  // or when the relative timeout |rel_time| expires. It may also be unblocked
+  // spuriously. When unblocked, regardless of the reason, the lock on the mutex
+  // is reacquired and |WaitFor()| exits. Returns true if the condition variable
+  // was notified prior to the timeout.
+  bool WaitFor(Mutex* mutex, const TimeDelta& rel_time) WARN_UNUSED_RESULT;
+
+  // The implementation-defined native handle type.
+#if V8_OS_POSIX
+  typedef pthread_cond_t NativeHandle;
+#elif V8_OS_WIN
+  struct Event;
+  class NativeHandle FINAL {
+   public:
+    NativeHandle() : waitlist_(NULL), freelist_(NULL) {}
+    ~NativeHandle();
+
+    Event* Pre() WARN_UNUSED_RESULT;
+    void Post(Event* event, bool result);
+
+    Mutex* mutex() { return &mutex_; }
+    Event* waitlist() { return waitlist_; }
+
+   private:
+    Event* waitlist_;
+    Event* freelist_;
+    Mutex mutex_;
+
+    DISALLOW_COPY_AND_ASSIGN(NativeHandle);
+  };
+#endif
+
+  NativeHandle& native_handle() {
+    return native_handle_;
+  }
+  const NativeHandle& native_handle() const {
+    return native_handle_;
+  }
+
+ private:
+  NativeHandle native_handle_;
+
+  DISALLOW_COPY_AND_ASSIGN(ConditionVariable);
+};
+
+
+// POD ConditionVariable initialized lazily (i.e. the first time Pointer() is
+// called).
+// Usage:
+//   static LazyConditionVariable my_condvar =
+//       LAZY_CONDITION_VARIABLE_INITIALIZER;
+//
+//   void my_function() {
+//     LockGuard<Mutex> lock_guard(&my_mutex);
+//     my_condvar.Pointer()->Wait(&my_mutex);
+//   }
+typedef LazyStaticInstance<
+    ConditionVariable, DefaultConstructTrait<ConditionVariable>,
+    ThreadSafeInitOnceTrait>::type LazyConditionVariable;
+
+#define LAZY_CONDITION_VARIABLE_INITIALIZER LAZY_STATIC_INSTANCE_INITIALIZER
+
+} }  // namespace v8::base
+
+#endif  // V8_BASE_PLATFORM_CONDITION_VARIABLE_H_
diff --git a/src/base/platform/elapsed-timer.h b/src/base/platform/elapsed-timer.h
new file mode 100644
index 0000000..dccba3a
--- /dev/null
+++ b/src/base/platform/elapsed-timer.h
@@ -0,0 +1,97 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_PLATFORM_ELAPSED_TIMER_H_
+#define V8_BASE_PLATFORM_ELAPSED_TIMER_H_
+
+#include "src/base/logging.h"
+#include "src/base/platform/time.h"
+
+namespace v8 {
+namespace base {
+
+class ElapsedTimer FINAL {
+ public:
+#ifdef DEBUG
+  ElapsedTimer() : started_(false) {}
+#endif
+
+  // Starts this timer. Once started a timer can be checked with
+  // |Elapsed()| or |HasExpired()|, and may be restarted using |Restart()|.
+  // This method must not be called on an already started timer.
+  void Start() {
+    DCHECK(!IsStarted());
+    start_ticks_ = Now();
+#ifdef DEBUG
+    started_ = true;
+#endif
+    DCHECK(IsStarted());
+  }
+
+  // Stops this timer. Must not be called on a timer that was not
+  // started before.
+  void Stop() {
+    DCHECK(IsStarted());
+    start_ticks_ = TimeTicks();
+#ifdef DEBUG
+    started_ = false;
+#endif
+    DCHECK(!IsStarted());
+  }
+
+  // Returns |true| if this timer was started previously.
+  bool IsStarted() const {
+    DCHECK(started_ || start_ticks_.IsNull());
+    DCHECK(!started_ || !start_ticks_.IsNull());
+    return !start_ticks_.IsNull();
+  }
+
+  // Restarts the timer and returns the time elapsed since the previous start.
+  // This method is equivalent to obtaining the elapsed time with |Elapsed()|
+  // and then starting the timer again, but does so in one single operation,
+  // avoiding the need to obtain the clock value twice. It may only be called
+  // on a previously started timer.
+  TimeDelta Restart() {
+    DCHECK(IsStarted());
+    TimeTicks ticks = Now();
+    TimeDelta elapsed = ticks - start_ticks_;
+    DCHECK(elapsed.InMicroseconds() >= 0);
+    start_ticks_ = ticks;
+    DCHECK(IsStarted());
+    return elapsed;
+  }
+
+  // Returns the time elapsed since the previous start. This method may only
+  // be called on a previously started timer.
+  TimeDelta Elapsed() const {
+    DCHECK(IsStarted());
+    TimeDelta elapsed = Now() - start_ticks_;
+    DCHECK(elapsed.InMicroseconds() >= 0);
+    return elapsed;
+  }
+
+  // Returns |true| if the specified |time_delta| has elapsed since the
+  // previous start, or |false| if not. This method may only be called on
+  // a previously started timer.
+  bool HasExpired(TimeDelta time_delta) const {
+    DCHECK(IsStarted());
+    return Elapsed() >= time_delta;
+  }
+
+ private:
+  static V8_INLINE TimeTicks Now() {
+    TimeTicks now = TimeTicks::HighResolutionNow();
+    DCHECK(!now.IsNull());
+    return now;
+  }
+
+  TimeTicks start_ticks_;
+#ifdef DEBUG
+  bool started_;
+#endif
+};
+
+} }  // namespace v8::base
+
+#endif  // V8_BASE_PLATFORM_ELAPSED_TIMER_H_
diff --git a/src/base/platform/mutex-unittest.cc b/src/base/platform/mutex-unittest.cc
new file mode 100644
index 0000000..5af5efb
--- /dev/null
+++ b/src/base/platform/mutex-unittest.cc
@@ -0,0 +1,91 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/platform/mutex.h"
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace base {
+
+TEST(Mutex, LockGuardMutex) {
+  Mutex mutex;
+  { LockGuard<Mutex> lock_guard(&mutex); }
+  { LockGuard<Mutex> lock_guard(&mutex); }
+}
+
+
+TEST(Mutex, LockGuardRecursiveMutex) {
+  RecursiveMutex recursive_mutex;
+  { LockGuard<RecursiveMutex> lock_guard(&recursive_mutex); }
+  {
+    LockGuard<RecursiveMutex> lock_guard1(&recursive_mutex);
+    LockGuard<RecursiveMutex> lock_guard2(&recursive_mutex);
+  }
+}
+
+
+TEST(Mutex, LockGuardLazyMutex) {
+  LazyMutex lazy_mutex = LAZY_MUTEX_INITIALIZER;
+  { LockGuard<Mutex> lock_guard(lazy_mutex.Pointer()); }
+  { LockGuard<Mutex> lock_guard(lazy_mutex.Pointer()); }
+}
+
+
+TEST(Mutex, LockGuardLazyRecursiveMutex) {
+  LazyRecursiveMutex lazy_recursive_mutex = LAZY_RECURSIVE_MUTEX_INITIALIZER;
+  { LockGuard<RecursiveMutex> lock_guard(lazy_recursive_mutex.Pointer()); }
+  {
+    LockGuard<RecursiveMutex> lock_guard1(lazy_recursive_mutex.Pointer());
+    LockGuard<RecursiveMutex> lock_guard2(lazy_recursive_mutex.Pointer());
+  }
+}
+
+
+TEST(Mutex, MultipleMutexes) {
+  Mutex mutex1;
+  Mutex mutex2;
+  Mutex mutex3;
+  // Order 1
+  mutex1.Lock();
+  mutex2.Lock();
+  mutex3.Lock();
+  mutex1.Unlock();
+  mutex2.Unlock();
+  mutex3.Unlock();
+  // Order 2
+  mutex1.Lock();
+  mutex2.Lock();
+  mutex3.Lock();
+  mutex3.Unlock();
+  mutex2.Unlock();
+  mutex1.Unlock();
+}
+
+
+TEST(Mutex, MultipleRecursiveMutexes) {
+  RecursiveMutex recursive_mutex1;
+  RecursiveMutex recursive_mutex2;
+  // Order 1
+  recursive_mutex1.Lock();
+  recursive_mutex2.Lock();
+  EXPECT_TRUE(recursive_mutex1.TryLock());
+  EXPECT_TRUE(recursive_mutex2.TryLock());
+  recursive_mutex1.Unlock();
+  recursive_mutex1.Unlock();
+  recursive_mutex2.Unlock();
+  recursive_mutex2.Unlock();
+  // Order 2
+  recursive_mutex1.Lock();
+  EXPECT_TRUE(recursive_mutex1.TryLock());
+  recursive_mutex2.Lock();
+  EXPECT_TRUE(recursive_mutex2.TryLock());
+  recursive_mutex2.Unlock();
+  recursive_mutex1.Unlock();
+  recursive_mutex2.Unlock();
+  recursive_mutex1.Unlock();
+}
+
+}  // namespace base
+}  // namespace v8
diff --git a/src/base/platform/mutex.cc b/src/base/platform/mutex.cc
new file mode 100644
index 0000000..8b1e305
--- /dev/null
+++ b/src/base/platform/mutex.cc
@@ -0,0 +1,191 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/platform/mutex.h"
+
+#include <errno.h>
+
+namespace v8 {
+namespace base {
+
+#if V8_OS_POSIX
+
+static V8_INLINE void InitializeNativeHandle(pthread_mutex_t* mutex) {
+  int result;
+#if defined(DEBUG)
+  // Use an error checking mutex in debug mode.
+  pthread_mutexattr_t attr;
+  result = pthread_mutexattr_init(&attr);
+  DCHECK_EQ(0, result);
+  result = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
+  DCHECK_EQ(0, result);
+  result = pthread_mutex_init(mutex, &attr);
+  DCHECK_EQ(0, result);
+  result = pthread_mutexattr_destroy(&attr);
+#else
+  // Use a fast mutex (default attributes).
+  result = pthread_mutex_init(mutex, NULL);
+#endif  // defined(DEBUG)
+  DCHECK_EQ(0, result);
+  USE(result);
+}
+
+
+static V8_INLINE void InitializeRecursiveNativeHandle(pthread_mutex_t* mutex) {
+  pthread_mutexattr_t attr;
+  int result = pthread_mutexattr_init(&attr);
+  DCHECK_EQ(0, result);
+  result = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
+  DCHECK_EQ(0, result);
+  result = pthread_mutex_init(mutex, &attr);
+  DCHECK_EQ(0, result);
+  result = pthread_mutexattr_destroy(&attr);
+  DCHECK_EQ(0, result);
+  USE(result);
+}
+
+
+static V8_INLINE void DestroyNativeHandle(pthread_mutex_t* mutex) {
+  int result = pthread_mutex_destroy(mutex);
+  DCHECK_EQ(0, result);
+  USE(result);
+}
+
+
+static V8_INLINE void LockNativeHandle(pthread_mutex_t* mutex) {
+  int result = pthread_mutex_lock(mutex);
+  DCHECK_EQ(0, result);
+  USE(result);
+}
+
+
+static V8_INLINE void UnlockNativeHandle(pthread_mutex_t* mutex) {
+  int result = pthread_mutex_unlock(mutex);
+  DCHECK_EQ(0, result);
+  USE(result);
+}
+
+
+static V8_INLINE bool TryLockNativeHandle(pthread_mutex_t* mutex) {
+  int result = pthread_mutex_trylock(mutex);
+  if (result == EBUSY) {
+    return false;
+  }
+  DCHECK_EQ(0, result);
+  return true;
+}
+
+#elif V8_OS_WIN
+
+static V8_INLINE void InitializeNativeHandle(PCRITICAL_SECTION cs) {
+  InitializeCriticalSection(cs);
+}
+
+
+static V8_INLINE void InitializeRecursiveNativeHandle(PCRITICAL_SECTION cs) {
+  InitializeCriticalSection(cs);
+}
+
+
+static V8_INLINE void DestroyNativeHandle(PCRITICAL_SECTION cs) {
+  DeleteCriticalSection(cs);
+}
+
+
+static V8_INLINE void LockNativeHandle(PCRITICAL_SECTION cs) {
+  EnterCriticalSection(cs);
+}
+
+
+static V8_INLINE void UnlockNativeHandle(PCRITICAL_SECTION cs) {
+  LeaveCriticalSection(cs);
+}
+
+
+static V8_INLINE bool TryLockNativeHandle(PCRITICAL_SECTION cs) {
+  return TryEnterCriticalSection(cs);
+}
+
+#endif  // V8_OS_POSIX
+
+
+Mutex::Mutex() {
+  InitializeNativeHandle(&native_handle_);
+#ifdef DEBUG
+  level_ = 0;
+#endif
+}
+
+
+Mutex::~Mutex() {
+  DestroyNativeHandle(&native_handle_);
+  DCHECK_EQ(0, level_);
+}
+
+
+void Mutex::Lock() {
+  LockNativeHandle(&native_handle_);
+  AssertUnheldAndMark();
+}
+
+
+void Mutex::Unlock() {
+  AssertHeldAndUnmark();
+  UnlockNativeHandle(&native_handle_);
+}
+
+
+bool Mutex::TryLock() {
+  if (!TryLockNativeHandle(&native_handle_)) {
+    return false;
+  }
+  AssertUnheldAndMark();
+  return true;
+}
+
+
+RecursiveMutex::RecursiveMutex() {
+  InitializeRecursiveNativeHandle(&native_handle_);
+#ifdef DEBUG
+  level_ = 0;
+#endif
+}
+
+
+RecursiveMutex::~RecursiveMutex() {
+  DestroyNativeHandle(&native_handle_);
+  DCHECK_EQ(0, level_);
+}
+
+
+void RecursiveMutex::Lock() {
+  LockNativeHandle(&native_handle_);
+#ifdef DEBUG
+  DCHECK_LE(0, level_);
+  level_++;
+#endif
+}
+
+
+void RecursiveMutex::Unlock() {
+#ifdef DEBUG
+  DCHECK_LT(0, level_);
+  level_--;
+#endif
+  UnlockNativeHandle(&native_handle_);
+}
+
+
+bool RecursiveMutex::TryLock() {
+  if (!TryLockNativeHandle(&native_handle_)) {
+    return false;
+  }
+#ifdef DEBUG
+  DCHECK_LE(0, level_);
+  level_++;
+#endif
+  return true;
+}
+
+} }  // namespace v8::base
diff --git a/src/base/platform/mutex.h b/src/base/platform/mutex.h
new file mode 100644
index 0000000..5d0e57b
--- /dev/null
+++ b/src/base/platform/mutex.h
@@ -0,0 +1,215 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_PLATFORM_MUTEX_H_
+#define V8_BASE_PLATFORM_MUTEX_H_
+
+#include "src/base/lazy-instance.h"
+#if V8_OS_WIN
+#include "src/base/win32-headers.h"
+#endif
+#include "src/base/logging.h"
+
+#if V8_OS_POSIX
+#include <pthread.h>  // NOLINT
+#endif
+
+namespace v8 {
+namespace base {
+
+// ----------------------------------------------------------------------------
+// Mutex
+//
+// This class is a synchronization primitive that can be used to protect shared
+// data from being simultaneously accessed by multiple threads. A mutex offers
+// exclusive, non-recursive ownership semantics:
+// - A calling thread owns a mutex from the time that it successfully calls
+//   either |Lock()| or |TryLock()| until it calls |Unlock()|.
+// - When a thread owns a mutex, all other threads will block (for calls to
+//   |Lock()|) or receive a |false| return value (for |TryLock()|) if they
+//   attempt to claim ownership of the mutex.
+// A calling thread must not own the mutex prior to calling |Lock()| or
+// |TryLock()|. The behavior of a program is undefined if a mutex is destroyed
+// while still owned by some thread. The Mutex class is non-copyable.
+
+class Mutex FINAL {
+ public:
+  Mutex();
+  ~Mutex();
+
+  // Locks the given mutex. If the mutex is currently unlocked, it becomes
+  // locked and owned by the calling thread, and immediately. If the mutex
+  // is already locked by another thread, suspends the calling thread until
+  // the mutex is unlocked.
+  void Lock();
+
+  // Unlocks the given mutex. The mutex is assumed to be locked and owned by
+  // the calling thread on entrance.
+  void Unlock();
+
+  // Tries to lock the given mutex. Returns whether the mutex was
+  // successfully locked.
+  bool TryLock() WARN_UNUSED_RESULT;
+
+  // The implementation-defined native handle type.
+#if V8_OS_POSIX
+  typedef pthread_mutex_t NativeHandle;
+#elif V8_OS_WIN
+  typedef CRITICAL_SECTION NativeHandle;
+#endif
+
+  NativeHandle& native_handle() {
+    return native_handle_;
+  }
+  const NativeHandle& native_handle() const {
+    return native_handle_;
+  }
+
+ private:
+  NativeHandle native_handle_;
+#ifdef DEBUG
+  int level_;
+#endif
+
+  V8_INLINE void AssertHeldAndUnmark() {
+#ifdef DEBUG
+    DCHECK_EQ(1, level_);
+    level_--;
+#endif
+  }
+
+  V8_INLINE void AssertUnheldAndMark() {
+#ifdef DEBUG
+    DCHECK_EQ(0, level_);
+    level_++;
+#endif
+  }
+
+  friend class ConditionVariable;
+
+  DISALLOW_COPY_AND_ASSIGN(Mutex);
+};
+
+
+// POD Mutex initialized lazily (i.e. the first time Pointer() is called).
+// Usage:
+//   static LazyMutex my_mutex = LAZY_MUTEX_INITIALIZER;
+//
+//   void my_function() {
+//     LockGuard<Mutex> guard(my_mutex.Pointer());
+//     // Do something.
+//   }
+//
+typedef LazyStaticInstance<Mutex, DefaultConstructTrait<Mutex>,
+                           ThreadSafeInitOnceTrait>::type LazyMutex;
+
+#define LAZY_MUTEX_INITIALIZER LAZY_STATIC_INSTANCE_INITIALIZER
+
+
+// -----------------------------------------------------------------------------
+// RecursiveMutex
+//
+// This class is a synchronization primitive that can be used to protect shared
+// data from being simultaneously accessed by multiple threads. A recursive
+// mutex offers exclusive, recursive ownership semantics:
+// - A calling thread owns a recursive mutex for a period of time that starts
+//   when it successfully calls either |Lock()| or |TryLock()|. During this
+//   period, the thread may make additional calls to |Lock()| or |TryLock()|.
+//   The period of ownership ends when the thread makes a matching number of
+//   calls to |Unlock()|.
+// - When a thread owns a recursive mutex, all other threads will block (for
+//   calls to |Lock()|) or receive a |false| return value (for |TryLock()|) if
+//   they attempt to claim ownership of the recursive mutex.
+// - The maximum number of times that a recursive mutex may be locked is
+//   unspecified, but after that number is reached, calls to |Lock()| will
+//   probably abort the process and calls to |TryLock()| return false.
+// The behavior of a program is undefined if a recursive mutex is destroyed
+// while still owned by some thread. The RecursiveMutex class is non-copyable.
+
+class RecursiveMutex FINAL {
+ public:
+  RecursiveMutex();
+  ~RecursiveMutex();
+
+  // Locks the mutex. If another thread has already locked the mutex, a call to
+  // |Lock()| will block execution until the lock is acquired. A thread may call
+  // |Lock()| on a recursive mutex repeatedly. Ownership will only be released
+  // after the thread makes a matching number of calls to |Unlock()|.
+  // The behavior is undefined if the mutex is not unlocked before being
+  // destroyed, i.e. some thread still owns it.
+  void Lock();
+
+  // Unlocks the mutex if its level of ownership is 1 (there was exactly one
+  // more call to |Lock()| than there were calls to unlock() made by this
+  // thread), reduces the level of ownership by 1 otherwise. The mutex must be
+  // locked by the current thread of execution, otherwise, the behavior is
+  // undefined.
+  void Unlock();
+
+  // Tries to lock the given mutex. Returns whether the mutex was
+  // successfully locked.
+  bool TryLock() WARN_UNUSED_RESULT;
+
+  // The implementation-defined native handle type.
+  typedef Mutex::NativeHandle NativeHandle;
+
+  NativeHandle& native_handle() {
+    return native_handle_;
+  }
+  const NativeHandle& native_handle() const {
+    return native_handle_;
+  }
+
+ private:
+  NativeHandle native_handle_;
+#ifdef DEBUG
+  int level_;
+#endif
+
+  DISALLOW_COPY_AND_ASSIGN(RecursiveMutex);
+};
+
+
+// POD RecursiveMutex initialized lazily (i.e. the first time Pointer() is
+// called).
+// Usage:
+//   static LazyRecursiveMutex my_mutex = LAZY_RECURSIVE_MUTEX_INITIALIZER;
+//
+//   void my_function() {
+//     LockGuard<RecursiveMutex> guard(my_mutex.Pointer());
+//     // Do something.
+//   }
+//
+typedef LazyStaticInstance<RecursiveMutex,
+                           DefaultConstructTrait<RecursiveMutex>,
+                           ThreadSafeInitOnceTrait>::type LazyRecursiveMutex;
+
+#define LAZY_RECURSIVE_MUTEX_INITIALIZER LAZY_STATIC_INSTANCE_INITIALIZER
+
+
+// -----------------------------------------------------------------------------
+// LockGuard
+//
+// This class is a mutex wrapper that provides a convenient RAII-style mechanism
+// for owning a mutex for the duration of a scoped block.
+// When a LockGuard object is created, it attempts to take ownership of the
+// mutex it is given. When control leaves the scope in which the LockGuard
+// object was created, the LockGuard is destructed and the mutex is released.
+// The LockGuard class is non-copyable.
+
+template <typename Mutex>
+class LockGuard FINAL {
+ public:
+  explicit LockGuard(Mutex* mutex) : mutex_(mutex) { mutex_->Lock(); }
+  ~LockGuard() { mutex_->Unlock(); }
+
+ private:
+  Mutex* mutex_;
+
+  DISALLOW_COPY_AND_ASSIGN(LockGuard);
+};
+
+} }  // namespace v8::base
+
+#endif  // V8_BASE_PLATFORM_MUTEX_H_
diff --git a/src/base/platform/platform-cygwin.cc b/src/base/platform/platform-cygwin.cc
new file mode 100644
index 0000000..8a767cf
--- /dev/null
+++ b/src/base/platform/platform-cygwin.cc
@@ -0,0 +1,303 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Platform-specific code for Cygwin goes here. For the POSIX-compatible
+// parts, the implementation is in platform-posix.cc.
+
+#include <errno.h>
+#include <pthread.h>
+#include <semaphore.h>
+#include <stdarg.h>
+#include <strings.h>    // index
+#include <sys/mman.h>   // mmap & munmap
+#include <sys/time.h>
+#include <unistd.h>     // sysconf
+
+#include <cmath>
+
+#undef MAP_TYPE
+
+#include "src/base/macros.h"
+#include "src/base/platform/platform.h"
+#include "src/base/win32-headers.h"
+
+namespace v8 {
+namespace base {
+
+
+const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
+  if (std::isnan(time)) return "";
+  time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
+  struct tm* t = localtime(&tv);
+  if (NULL == t) return "";
+  return tzname[0];  // The location of the timezone string on Cygwin.
+}
+
+
+double OS::LocalTimeOffset(TimezoneCache* cache) {
+  // On Cygwin, struct tm does not contain a tm_gmtoff field.
+  time_t utc = time(NULL);
+  DCHECK(utc != -1);
+  struct tm* loc = localtime(&utc);
+  DCHECK(loc != NULL);
+  // time - localtime includes any daylight savings offset, so subtract it.
+  return static_cast<double>((mktime(loc) - utc) * msPerSecond -
+                             (loc->tm_isdst > 0 ? 3600 * msPerSecond : 0));
+}
+
+
+void* OS::Allocate(const size_t requested,
+                   size_t* allocated,
+                   bool is_executable) {
+  const size_t msize = RoundUp(requested, sysconf(_SC_PAGESIZE));
+  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+  void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+  if (mbase == MAP_FAILED) return NULL;
+  *allocated = msize;
+  return mbase;
+}
+
+
+class PosixMemoryMappedFile : public OS::MemoryMappedFile {
+ public:
+  PosixMemoryMappedFile(FILE* file, void* memory, int size)
+    : file_(file), memory_(memory), size_(size) { }
+  virtual ~PosixMemoryMappedFile();
+  virtual void* memory() { return memory_; }
+  virtual int size() { return size_; }
+ private:
+  FILE* file_;
+  void* memory_;
+  int size_;
+};
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
+  FILE* file = fopen(name, "r+");
+  if (file == NULL) return NULL;
+
+  fseek(file, 0, SEEK_END);
+  int size = ftell(file);
+
+  void* memory =
+      mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
+  return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
+    void* initial) {
+  FILE* file = fopen(name, "w+");
+  if (file == NULL) return NULL;
+  int result = fwrite(initial, size, 1, file);
+  if (result < 1) {
+    fclose(file);
+    return NULL;
+  }
+  void* memory =
+      mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
+  return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
+PosixMemoryMappedFile::~PosixMemoryMappedFile() {
+  if (memory_) munmap(memory_, size_);
+  fclose(file_);
+}
+
+
+std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
+  std::vector<SharedLibraryAddresses> result;
+  // This function assumes that the layout of the file is as follows:
+  // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
+  // If we encounter an unexpected situation we abort scanning further entries.
+  FILE* fp = fopen("/proc/self/maps", "r");
+  if (fp == NULL) return result;
+
+  // Allocate enough room to be able to store a full file name.
+  const int kLibNameLen = FILENAME_MAX + 1;
+  char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));
+
+  // This loop will terminate once the scanning hits an EOF.
+  while (true) {
+    uintptr_t start, end;
+    char attr_r, attr_w, attr_x, attr_p;
+    // Parse the addresses and permission bits at the beginning of the line.
+    if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break;
+    if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break;
+
+    int c;
+    if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') {
+      // Found a read-only executable entry. Skip characters until we reach
+      // the beginning of the filename or the end of the line.
+      do {
+        c = getc(fp);
+      } while ((c != EOF) && (c != '\n') && (c != '/'));
+      if (c == EOF) break;  // EOF: Was unexpected, just exit.
+
+      // Process the filename if found.
+      if (c == '/') {
+        ungetc(c, fp);  // Push the '/' back into the stream to be read below.
+
+        // Read to the end of the line. Exit if the read fails.
+        if (fgets(lib_name, kLibNameLen, fp) == NULL) break;
+
+        // Drop the newline character read by fgets. We do not need to check
+        // for a zero-length string because we know that we at least read the
+        // '/' character.
+        lib_name[strlen(lib_name) - 1] = '\0';
+      } else {
+        // No library name found, just record the raw address range.
+        snprintf(lib_name, kLibNameLen,
+                 "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end);
+      }
+      result.push_back(SharedLibraryAddress(lib_name, start, end));
+    } else {
+      // Entry not describing executable data. Skip to end of line to set up
+      // reading the next entry.
+      do {
+        c = getc(fp);
+      } while ((c != EOF) && (c != '\n'));
+      if (c == EOF) break;
+    }
+  }
+  free(lib_name);
+  fclose(fp);
+  return result;
+}
+
+
+void OS::SignalCodeMovingGC() {
+  // Nothing to do on Cygwin.
+}
+
+
+// The VirtualMemory implementation is taken from platform-win32.cc.
+// The mmap-based virtual memory implementation as it is used on most posix
+// platforms does not work well because Cygwin does not support MAP_FIXED.
+// This causes VirtualMemory::Commit to not always commit the memory region
+// specified.
+
+static void* RandomizedVirtualAlloc(size_t size, int action, int protection) {
+  LPVOID base = NULL;
+
+  if (protection == PAGE_EXECUTE_READWRITE || protection == PAGE_NOACCESS) {
+    // For exectutable pages try and randomize the allocation address
+    for (size_t attempts = 0; base == NULL && attempts < 3; ++attempts) {
+      base = VirtualAlloc(OS::GetRandomMmapAddr(), size, action, protection);
+    }
+  }
+
+  // After three attempts give up and let the OS find an address to use.
+  if (base == NULL) base = VirtualAlloc(NULL, size, action, protection);
+
+  return base;
+}
+
+
+VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
+
+
+VirtualMemory::VirtualMemory(size_t size)
+    : address_(ReserveRegion(size)), size_(size) { }
+
+
+VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+    : address_(NULL), size_(0) {
+  DCHECK((alignment % OS::AllocateAlignment()) == 0);
+  size_t request_size = RoundUp(size + alignment,
+                                static_cast<intptr_t>(OS::AllocateAlignment()));
+  void* address = ReserveRegion(request_size);
+  if (address == NULL) return;
+  uint8_t* base = RoundUp(static_cast<uint8_t*>(address), alignment);
+  // Try reducing the size by freeing and then reallocating a specific area.
+  bool result = ReleaseRegion(address, request_size);
+  USE(result);
+  DCHECK(result);
+  address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS);
+  if (address != NULL) {
+    request_size = size;
+    DCHECK(base == static_cast<uint8_t*>(address));
+  } else {
+    // Resizing failed, just go with a bigger area.
+    address = ReserveRegion(request_size);
+    if (address == NULL) return;
+  }
+  address_ = address;
+  size_ = request_size;
+}
+
+
+VirtualMemory::~VirtualMemory() {
+  if (IsReserved()) {
+    bool result = ReleaseRegion(address_, size_);
+    DCHECK(result);
+    USE(result);
+  }
+}
+
+
+bool VirtualMemory::IsReserved() {
+  return address_ != NULL;
+}
+
+
+void VirtualMemory::Reset() {
+  address_ = NULL;
+  size_ = 0;
+}
+
+
+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+  return CommitRegion(address, size, is_executable);
+}
+
+
+bool VirtualMemory::Uncommit(void* address, size_t size) {
+  DCHECK(IsReserved());
+  return UncommitRegion(address, size);
+}
+
+
+void* VirtualMemory::ReserveRegion(size_t size) {
+  return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS);
+}
+
+
+bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
+  int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
+  if (NULL == VirtualAlloc(base, size, MEM_COMMIT, prot)) {
+    return false;
+  }
+  return true;
+}
+
+
+bool VirtualMemory::Guard(void* address) {
+  if (NULL == VirtualAlloc(address,
+                           OS::CommitPageSize(),
+                           MEM_COMMIT,
+                           PAGE_NOACCESS)) {
+    return false;
+  }
+  return true;
+}
+
+
+bool VirtualMemory::UncommitRegion(void* base, size_t size) {
+  return VirtualFree(base, size, MEM_DECOMMIT) != 0;
+}
+
+
+bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
+  return VirtualFree(base, 0, MEM_RELEASE) != 0;
+}
+
+
+bool VirtualMemory::HasLazyCommits() {
+  // TODO(alph): implement for the platform.
+  return false;
+}
+
+} }  // namespace v8::base
diff --git a/src/base/platform/platform-freebsd.cc b/src/base/platform/platform-freebsd.cc
new file mode 100644
index 0000000..507b946
--- /dev/null
+++ b/src/base/platform/platform-freebsd.cc
@@ -0,0 +1,307 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Platform-specific code for FreeBSD goes here. For the POSIX-compatible
+// parts, the implementation is in platform-posix.cc.
+
+#include <pthread.h>
+#include <semaphore.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <sys/resource.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <sys/ucontext.h>
+
+#include <sys/fcntl.h>  // open
+#include <sys/mman.h>   // mmap & munmap
+#include <sys/stat.h>   // open
+#include <sys/types.h>  // mmap & munmap
+#include <unistd.h>     // getpagesize
+// If you don't have execinfo.h then you need devel/libexecinfo from ports.
+#include <errno.h>
+#include <limits.h>
+#include <stdarg.h>
+#include <strings.h>    // index
+
+#include <cmath>
+
+#undef MAP_TYPE
+
+#include "src/base/macros.h"
+#include "src/base/platform/platform.h"
+
+
+namespace v8 {
+namespace base {
+
+
+const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
+  if (std::isnan(time)) return "";
+  time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
+  struct tm* t = localtime(&tv);
+  if (NULL == t) return "";
+  return t->tm_zone;
+}
+
+
+double OS::LocalTimeOffset(TimezoneCache* cache) {
+  time_t tv = time(NULL);
+  struct tm* t = localtime(&tv);
+  // tm_gmtoff includes any daylight savings offset, so subtract it.
+  return static_cast<double>(t->tm_gmtoff * msPerSecond -
+                             (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
+}
+
+
+void* OS::Allocate(const size_t requested,
+                   size_t* allocated,
+                   bool executable) {
+  const size_t msize = RoundUp(requested, getpagesize());
+  int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
+  void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
+
+  if (mbase == MAP_FAILED) return NULL;
+  *allocated = msize;
+  return mbase;
+}
+
+
+class PosixMemoryMappedFile : public OS::MemoryMappedFile {
+ public:
+  PosixMemoryMappedFile(FILE* file, void* memory, int size)
+    : file_(file), memory_(memory), size_(size) { }
+  virtual ~PosixMemoryMappedFile();
+  virtual void* memory() { return memory_; }
+  virtual int size() { return size_; }
+ private:
+  FILE* file_;
+  void* memory_;
+  int size_;
+};
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
+  FILE* file = fopen(name, "r+");
+  if (file == NULL) return NULL;
+
+  fseek(file, 0, SEEK_END);
+  int size = ftell(file);
+
+  void* memory =
+      mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
+  return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
+    void* initial) {
+  FILE* file = fopen(name, "w+");
+  if (file == NULL) return NULL;
+  int result = fwrite(initial, size, 1, file);
+  if (result < 1) {
+    fclose(file);
+    return NULL;
+  }
+  void* memory =
+      mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
+  return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
+PosixMemoryMappedFile::~PosixMemoryMappedFile() {
+  if (memory_) munmap(memory_, size_);
+  fclose(file_);
+}
+
+
+static unsigned StringToLong(char* buffer) {
+  return static_cast<unsigned>(strtol(buffer, NULL, 16));  // NOLINT
+}
+
+
+std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
+  std::vector<SharedLibraryAddress> result;
+  static const int MAP_LENGTH = 1024;
+  int fd = open("/proc/self/maps", O_RDONLY);
+  if (fd < 0) return result;
+  while (true) {
+    char addr_buffer[11];
+    addr_buffer[0] = '0';
+    addr_buffer[1] = 'x';
+    addr_buffer[10] = 0;
+    ssize_t bytes_read = read(fd, addr_buffer + 2, 8);
+    if (bytes_read < 8) break;
+    unsigned start = StringToLong(addr_buffer);
+    bytes_read = read(fd, addr_buffer + 2, 1);
+    if (bytes_read < 1) break;
+    if (addr_buffer[2] != '-') break;
+    bytes_read = read(fd, addr_buffer + 2, 8);
+    if (bytes_read < 8) break;
+    unsigned end = StringToLong(addr_buffer);
+    char buffer[MAP_LENGTH];
+    int bytes_read = -1;
+    do {
+      bytes_read++;
+      if (bytes_read >= MAP_LENGTH - 1)
+        break;
+      bytes_read = read(fd, buffer + bytes_read, 1);
+      if (bytes_read < 1) break;
+    } while (buffer[bytes_read] != '\n');
+    buffer[bytes_read] = 0;
+    // Ignore mappings that are not executable.
+    if (buffer[3] != 'x') continue;
+    char* start_of_path = index(buffer, '/');
+    // There may be no filename in this line.  Skip to next.
+    if (start_of_path == NULL) continue;
+    buffer[bytes_read] = 0;
+    result.push_back(SharedLibraryAddress(start_of_path, start, end));
+  }
+  close(fd);
+  return result;
+}
+
+
+void OS::SignalCodeMovingGC() {
+}
+
+
+
+// Constants used for mmap.
+static const int kMmapFd = -1;
+static const int kMmapFdOffset = 0;
+
+
+VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
+
+
+VirtualMemory::VirtualMemory(size_t size)
+    : address_(ReserveRegion(size)), size_(size) { }
+
+
+VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+    : address_(NULL), size_(0) {
+  DCHECK((alignment % OS::AllocateAlignment()) == 0);
+  size_t request_size = RoundUp(size + alignment,
+                                static_cast<intptr_t>(OS::AllocateAlignment()));
+  void* reservation = mmap(OS::GetRandomMmapAddr(),
+                           request_size,
+                           PROT_NONE,
+                           MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+                           kMmapFd,
+                           kMmapFdOffset);
+  if (reservation == MAP_FAILED) return;
+
+  uint8_t* base = static_cast<uint8_t*>(reservation);
+  uint8_t* aligned_base = RoundUp(base, alignment);
+  DCHECK_LE(base, aligned_base);
+
+  // Unmap extra memory reserved before and after the desired block.
+  if (aligned_base != base) {
+    size_t prefix_size = static_cast<size_t>(aligned_base - base);
+    OS::Free(base, prefix_size);
+    request_size -= prefix_size;
+  }
+
+  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
+  DCHECK_LE(aligned_size, request_size);
+
+  if (aligned_size != request_size) {
+    size_t suffix_size = request_size - aligned_size;
+    OS::Free(aligned_base + aligned_size, suffix_size);
+    request_size -= suffix_size;
+  }
+
+  DCHECK(aligned_size == request_size);
+
+  address_ = static_cast<void*>(aligned_base);
+  size_ = aligned_size;
+}
+
+
+VirtualMemory::~VirtualMemory() {
+  if (IsReserved()) {
+    bool result = ReleaseRegion(address(), size());
+    DCHECK(result);
+    USE(result);
+  }
+}
+
+
+bool VirtualMemory::IsReserved() {
+  return address_ != NULL;
+}
+
+
+void VirtualMemory::Reset() {
+  address_ = NULL;
+  size_ = 0;
+}
+
+
+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+  return CommitRegion(address, size, is_executable);
+}
+
+
+bool VirtualMemory::Uncommit(void* address, size_t size) {
+  return UncommitRegion(address, size);
+}
+
+
+bool VirtualMemory::Guard(void* address) {
+  OS::Guard(address, OS::CommitPageSize());
+  return true;
+}
+
+
+void* VirtualMemory::ReserveRegion(size_t size) {
+  void* result = mmap(OS::GetRandomMmapAddr(),
+                      size,
+                      PROT_NONE,
+                      MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+                      kMmapFd,
+                      kMmapFdOffset);
+
+  if (result == MAP_FAILED) return NULL;
+
+  return result;
+}
+
+
+bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
+  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+  if (MAP_FAILED == mmap(base,
+                         size,
+                         prot,
+                         MAP_PRIVATE | MAP_ANON | MAP_FIXED,
+                         kMmapFd,
+                         kMmapFdOffset)) {
+    return false;
+  }
+  return true;
+}
+
+
+bool VirtualMemory::UncommitRegion(void* base, size_t size) {
+  return mmap(base,
+              size,
+              PROT_NONE,
+              MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
+              kMmapFd,
+              kMmapFdOffset) != MAP_FAILED;
+}
+
+
+bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
+  return munmap(base, size) == 0;
+}
+
+
+bool VirtualMemory::HasLazyCommits() {
+  // TODO(alph): implement for the platform.
+  return false;
+}
+
+} }  // namespace v8::base
diff --git a/src/base/platform/platform-linux.cc b/src/base/platform/platform-linux.cc
new file mode 100644
index 0000000..eff5ced
--- /dev/null
+++ b/src/base/platform/platform-linux.cc
@@ -0,0 +1,446 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Platform-specific code for Linux goes here. For the POSIX-compatible
+// parts, the implementation is in platform-posix.cc.
+
+#include <pthread.h>
+#include <semaphore.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <sys/resource.h>
+#include <sys/time.h>
+#include <sys/types.h>
+
+// Ubuntu Dapper requires memory pages to be marked as
+// executable. Otherwise, OS raises an exception when executing code
+// in that page.
+#include <errno.h>
+#include <fcntl.h>      // open
+#include <stdarg.h>
+#include <strings.h>    // index
+#include <sys/mman.h>   // mmap & munmap
+#include <sys/stat.h>   // open
+#include <sys/types.h>  // mmap & munmap
+#include <unistd.h>     // sysconf
+
+// GLibc on ARM defines mcontext_t has a typedef for 'struct sigcontext'.
+// Old versions of the C library <signal.h> didn't define the type.
+#if defined(__ANDROID__) && !defined(__BIONIC_HAVE_UCONTEXT_T) && \
+    (defined(__arm__) || defined(__aarch64__)) && \
+    !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
+#include <asm/sigcontext.h>  // NOLINT
+#endif
+
+#if defined(LEAK_SANITIZER)
+#include <sanitizer/lsan_interface.h>
+#endif
+
+#include <cmath>
+
+#undef MAP_TYPE
+
+#include "src/base/macros.h"
+#include "src/base/platform/platform.h"
+
+#if V8_OS_NACL
+#if !defined(MAP_NORESERVE)
+// PNaCL doesn't have this, so we always grab all of the memory, which is bad.
+#define MAP_NORESERVE 0
+#endif
+#else
+#include <sys/prctl.h>
+#include <sys/syscall.h>
+#endif
+
+namespace v8 {
+namespace base {
+
+
+#ifdef __arm__
+
+bool OS::ArmUsingHardFloat() {
+  // GCC versions 4.6 and above define __ARM_PCS or __ARM_PCS_VFP to specify
+  // the Floating Point ABI used (PCS stands for Procedure Call Standard).
+  // We use these as well as a couple of other defines to statically determine
+  // what FP ABI used.
+  // GCC versions 4.4 and below don't support hard-fp.
+  // GCC versions 4.5 may support hard-fp without defining __ARM_PCS or
+  // __ARM_PCS_VFP.
+
+#define GCC_VERSION (__GNUC__ * 10000                                          \
+                     + __GNUC_MINOR__ * 100                                    \
+                     + __GNUC_PATCHLEVEL__)
+#if GCC_VERSION >= 40600
+#if defined(__ARM_PCS_VFP)
+  return true;
+#else
+  return false;
+#endif
+
+#elif GCC_VERSION < 40500
+  return false;
+
+#else
+#if defined(__ARM_PCS_VFP)
+  return true;
+#elif defined(__ARM_PCS) || defined(__SOFTFP__) || defined(__SOFTFP) || \
+      !defined(__VFP_FP__)
+  return false;
+#else
+#error "Your version of GCC does not report the FP ABI compiled for."          \
+       "Please report it on this issue"                                        \
+       "http://code.google.com/p/v8/issues/detail?id=2140"
+
+#endif
+#endif
+#undef GCC_VERSION
+}
+
+#endif  // def __arm__
+
+
+const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
+#if V8_OS_NACL
+  // Missing support for tm_zone field.
+  return "";
+#else
+  if (std::isnan(time)) return "";
+  time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
+  struct tm* t = localtime(&tv);
+  if (NULL == t) return "";
+  return t->tm_zone;
+#endif
+}
+
+
+double OS::LocalTimeOffset(TimezoneCache* cache) {
+#if V8_OS_NACL
+  // Missing support for tm_zone field.
+  return 0;
+#else
+  time_t tv = time(NULL);
+  struct tm* t = localtime(&tv);
+  // tm_gmtoff includes any daylight savings offset, so subtract it.
+  return static_cast<double>(t->tm_gmtoff * msPerSecond -
+                             (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
+#endif
+}
+
+
+void* OS::Allocate(const size_t requested,
+                   size_t* allocated,
+                   bool is_executable) {
+  const size_t msize = RoundUp(requested, AllocateAlignment());
+  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+  void* addr = OS::GetRandomMmapAddr();
+  void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+  if (mbase == MAP_FAILED) return NULL;
+  *allocated = msize;
+  return mbase;
+}
+
+
+class PosixMemoryMappedFile : public OS::MemoryMappedFile {
+ public:
+  PosixMemoryMappedFile(FILE* file, void* memory, int size)
+    : file_(file), memory_(memory), size_(size) { }
+  virtual ~PosixMemoryMappedFile();
+  virtual void* memory() { return memory_; }
+  virtual int size() { return size_; }
+ private:
+  FILE* file_;
+  void* memory_;
+  int size_;
+};
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
+  FILE* file = fopen(name, "r+");
+  if (file == NULL) return NULL;
+
+  fseek(file, 0, SEEK_END);
+  int size = ftell(file);
+
+  void* memory =
+      mmap(OS::GetRandomMmapAddr(),
+           size,
+           PROT_READ | PROT_WRITE,
+           MAP_SHARED,
+           fileno(file),
+           0);
+  return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
+    void* initial) {
+  FILE* file = fopen(name, "w+");
+  if (file == NULL) return NULL;
+  int result = fwrite(initial, size, 1, file);
+  if (result < 1) {
+    fclose(file);
+    return NULL;
+  }
+  void* memory =
+      mmap(OS::GetRandomMmapAddr(),
+           size,
+           PROT_READ | PROT_WRITE,
+           MAP_SHARED,
+           fileno(file),
+           0);
+  return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
+PosixMemoryMappedFile::~PosixMemoryMappedFile() {
+  if (memory_) OS::Free(memory_, size_);
+  fclose(file_);
+}
+
+
+std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
+  std::vector<SharedLibraryAddress> result;
+  // This function assumes that the layout of the file is as follows:
+  // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
+  // If we encounter an unexpected situation we abort scanning further entries.
+  FILE* fp = fopen("/proc/self/maps", "r");
+  if (fp == NULL) return result;
+
+  // Allocate enough room to be able to store a full file name.
+  const int kLibNameLen = FILENAME_MAX + 1;
+  char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));
+
+  // This loop will terminate once the scanning hits an EOF.
+  while (true) {
+    uintptr_t start, end;
+    char attr_r, attr_w, attr_x, attr_p;
+    // Parse the addresses and permission bits at the beginning of the line.
+    if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break;
+    if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break;
+
+    int c;
+    if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') {
+      // Found a read-only executable entry. Skip characters until we reach
+      // the beginning of the filename or the end of the line.
+      do {
+        c = getc(fp);
+      } while ((c != EOF) && (c != '\n') && (c != '/') && (c != '['));
+      if (c == EOF) break;  // EOF: Was unexpected, just exit.
+
+      // Process the filename if found.
+      if ((c == '/') || (c == '[')) {
+        // Push the '/' or '[' back into the stream to be read below.
+        ungetc(c, fp);
+
+        // Read to the end of the line. Exit if the read fails.
+        if (fgets(lib_name, kLibNameLen, fp) == NULL) break;
+
+        // Drop the newline character read by fgets. We do not need to check
+        // for a zero-length string because we know that we at least read the
+        // '/' or '[' character.
+        lib_name[strlen(lib_name) - 1] = '\0';
+      } else {
+        // No library name found, just record the raw address range.
+        snprintf(lib_name, kLibNameLen,
+                 "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end);
+      }
+      result.push_back(SharedLibraryAddress(lib_name, start, end));
+    } else {
+      // Entry not describing executable data. Skip to end of line to set up
+      // reading the next entry.
+      do {
+        c = getc(fp);
+      } while ((c != EOF) && (c != '\n'));
+      if (c == EOF) break;
+    }
+  }
+  free(lib_name);
+  fclose(fp);
+  return result;
+}
+
+
+void OS::SignalCodeMovingGC() {
+  // Support for ll_prof.py.
+  //
+  // The Linux profiler built into the kernel logs all mmap's with
+  // PROT_EXEC so that analysis tools can properly attribute ticks. We
+  // do a mmap with a name known by ll_prof.py and immediately munmap
+  // it. This injects a GC marker into the stream of events generated
+  // by the kernel and allows us to synchronize V8 code log and the
+  // kernel log.
+  int size = sysconf(_SC_PAGESIZE);
+  FILE* f = fopen(OS::GetGCFakeMMapFile(), "w+");
+  if (f == NULL) {
+    OS::PrintError("Failed to open %s\n", OS::GetGCFakeMMapFile());
+    OS::Abort();
+  }
+  void* addr = mmap(OS::GetRandomMmapAddr(), size,
+#if V8_OS_NACL
+                    // The Native Client port of V8 uses an interpreter,
+                    // so code pages don't need PROT_EXEC.
+                    PROT_READ,
+#else
+                    PROT_READ | PROT_EXEC,
+#endif
+                    MAP_PRIVATE, fileno(f), 0);
+  DCHECK(addr != MAP_FAILED);
+  OS::Free(addr, size);
+  fclose(f);
+}
+
+
+// Constants used for mmap.
+static const int kMmapFd = -1;
+static const int kMmapFdOffset = 0;
+
+
+VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
+
+
+VirtualMemory::VirtualMemory(size_t size)
+    : address_(ReserveRegion(size)), size_(size) { }
+
+
+VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+    : address_(NULL), size_(0) {
+  DCHECK((alignment % OS::AllocateAlignment()) == 0);
+  size_t request_size = RoundUp(size + alignment,
+                                static_cast<intptr_t>(OS::AllocateAlignment()));
+  void* reservation = mmap(OS::GetRandomMmapAddr(),
+                           request_size,
+                           PROT_NONE,
+                           MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
+                           kMmapFd,
+                           kMmapFdOffset);
+  if (reservation == MAP_FAILED) return;
+
+  uint8_t* base = static_cast<uint8_t*>(reservation);
+  uint8_t* aligned_base = RoundUp(base, alignment);
+  DCHECK_LE(base, aligned_base);
+
+  // Unmap extra memory reserved before and after the desired block.
+  if (aligned_base != base) {
+    size_t prefix_size = static_cast<size_t>(aligned_base - base);
+    OS::Free(base, prefix_size);
+    request_size -= prefix_size;
+  }
+
+  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
+  DCHECK_LE(aligned_size, request_size);
+
+  if (aligned_size != request_size) {
+    size_t suffix_size = request_size - aligned_size;
+    OS::Free(aligned_base + aligned_size, suffix_size);
+    request_size -= suffix_size;
+  }
+
+  DCHECK(aligned_size == request_size);
+
+  address_ = static_cast<void*>(aligned_base);
+  size_ = aligned_size;
+#if defined(LEAK_SANITIZER)
+  __lsan_register_root_region(address_, size_);
+#endif
+}
+
+
+VirtualMemory::~VirtualMemory() {
+  if (IsReserved()) {
+    bool result = ReleaseRegion(address(), size());
+    DCHECK(result);
+    USE(result);
+  }
+}
+
+
+bool VirtualMemory::IsReserved() {
+  return address_ != NULL;
+}
+
+
+void VirtualMemory::Reset() {
+  address_ = NULL;
+  size_ = 0;
+}
+
+
+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+  return CommitRegion(address, size, is_executable);
+}
+
+
+bool VirtualMemory::Uncommit(void* address, size_t size) {
+  return UncommitRegion(address, size);
+}
+
+
+bool VirtualMemory::Guard(void* address) {
+  OS::Guard(address, OS::CommitPageSize());
+  return true;
+}
+
+
+void* VirtualMemory::ReserveRegion(size_t size) {
+  void* result = mmap(OS::GetRandomMmapAddr(),
+                      size,
+                      PROT_NONE,
+                      MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
+                      kMmapFd,
+                      kMmapFdOffset);
+
+  if (result == MAP_FAILED) return NULL;
+
+#if defined(LEAK_SANITIZER)
+  __lsan_register_root_region(result, size);
+#endif
+  return result;
+}
+
+
+bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
+#if V8_OS_NACL
+  // The Native Client port of V8 uses an interpreter,
+  // so code pages don't need PROT_EXEC.
+  int prot = PROT_READ | PROT_WRITE;
+#else
+  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+#endif
+  if (MAP_FAILED == mmap(base,
+                         size,
+                         prot,
+                         MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
+                         kMmapFd,
+                         kMmapFdOffset)) {
+    return false;
+  }
+
+  return true;
+}
+
+
+bool VirtualMemory::UncommitRegion(void* base, size_t size) {
+  return mmap(base,
+              size,
+              PROT_NONE,
+              MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED,
+              kMmapFd,
+              kMmapFdOffset) != MAP_FAILED;
+}
+
+
+bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
+#if defined(LEAK_SANITIZER)
+  __lsan_unregister_root_region(base, size);
+#endif
+  return munmap(base, size) == 0;
+}
+
+
+bool VirtualMemory::HasLazyCommits() {
+  return true;
+}
+
+} }  // namespace v8::base
diff --git a/src/base/platform/platform-macos.cc b/src/base/platform/platform-macos.cc
new file mode 100644
index 0000000..77893ee
--- /dev/null
+++ b/src/base/platform/platform-macos.cc
@@ -0,0 +1,310 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Platform-specific code for MacOS goes here. For the POSIX-compatible
+// parts, the implementation is in platform-posix.cc.
+
+#include <dlfcn.h>
+#include <mach/mach_init.h>
+#include <mach-o/dyld.h>
+#include <mach-o/getsect.h>
+#include <sys/mman.h>
+#include <unistd.h>
+
+#include <AvailabilityMacros.h>
+
+#include <errno.h>
+#include <libkern/OSAtomic.h>
+#include <mach/mach.h>
+#include <mach/semaphore.h>
+#include <mach/task.h>
+#include <mach/vm_statistics.h>
+#include <pthread.h>
+#include <semaphore.h>
+#include <signal.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sys/resource.h>
+#include <sys/sysctl.h>
+#include <sys/time.h>
+#include <sys/types.h>
+
+#include <cmath>
+
+#undef MAP_TYPE
+
+#include "src/base/macros.h"
+#include "src/base/platform/platform.h"
+
+
+namespace v8 {
+namespace base {
+
+
+// Constants used for mmap.
+// kMmapFd is used to pass vm_alloc flags to tag the region with the user
+// defined tag 255 This helps identify V8-allocated regions in memory analysis
+// tools like vmmap(1).
+static const int kMmapFd = VM_MAKE_TAG(255);
+static const off_t kMmapFdOffset = 0;
+
+
+void* OS::Allocate(const size_t requested,
+                   size_t* allocated,
+                   bool is_executable) {
+  const size_t msize = RoundUp(requested, getpagesize());
+  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+  void* mbase = mmap(OS::GetRandomMmapAddr(),
+                     msize,
+                     prot,
+                     MAP_PRIVATE | MAP_ANON,
+                     kMmapFd,
+                     kMmapFdOffset);
+  if (mbase == MAP_FAILED) return NULL;
+  *allocated = msize;
+  return mbase;
+}
+
+
+class PosixMemoryMappedFile : public OS::MemoryMappedFile {
+ public:
+  PosixMemoryMappedFile(FILE* file, void* memory, int size)
+    : file_(file), memory_(memory), size_(size) { }
+  virtual ~PosixMemoryMappedFile();
+  virtual void* memory() { return memory_; }
+  virtual int size() { return size_; }
+ private:
+  FILE* file_;
+  void* memory_;
+  int size_;
+};
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
+  FILE* file = fopen(name, "r+");
+  if (file == NULL) return NULL;
+
+  fseek(file, 0, SEEK_END);
+  int size = ftell(file);
+
+  void* memory =
+      mmap(OS::GetRandomMmapAddr(),
+           size,
+           PROT_READ | PROT_WRITE,
+           MAP_SHARED,
+           fileno(file),
+           0);
+  return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
+    void* initial) {
+  FILE* file = fopen(name, "w+");
+  if (file == NULL) return NULL;
+  int result = fwrite(initial, size, 1, file);
+  if (result < 1) {
+    fclose(file);
+    return NULL;
+  }
+  void* memory =
+      mmap(OS::GetRandomMmapAddr(),
+          size,
+          PROT_READ | PROT_WRITE,
+          MAP_SHARED,
+          fileno(file),
+          0);
+  return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
+PosixMemoryMappedFile::~PosixMemoryMappedFile() {
+  if (memory_) OS::Free(memory_, size_);
+  fclose(file_);
+}
+
+
+std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
+  std::vector<SharedLibraryAddress> result;
+  unsigned int images_count = _dyld_image_count();
+  for (unsigned int i = 0; i < images_count; ++i) {
+    const mach_header* header = _dyld_get_image_header(i);
+    if (header == NULL) continue;
+#if V8_HOST_ARCH_X64
+    uint64_t size;
+    char* code_ptr = getsectdatafromheader_64(
+        reinterpret_cast<const mach_header_64*>(header),
+        SEG_TEXT,
+        SECT_TEXT,
+        &size);
+#else
+    unsigned int size;
+    char* code_ptr = getsectdatafromheader(header, SEG_TEXT, SECT_TEXT, &size);
+#endif
+    if (code_ptr == NULL) continue;
+    const uintptr_t slide = _dyld_get_image_vmaddr_slide(i);
+    const uintptr_t start = reinterpret_cast<uintptr_t>(code_ptr) + slide;
+    result.push_back(
+        SharedLibraryAddress(_dyld_get_image_name(i), start, start + size));
+  }
+  return result;
+}
+
+
+void OS::SignalCodeMovingGC() {
+}
+
+
+const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
+  if (std::isnan(time)) return "";
+  time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
+  struct tm* t = localtime(&tv);
+  if (NULL == t) return "";
+  return t->tm_zone;
+}
+
+
+double OS::LocalTimeOffset(TimezoneCache* cache) {
+  time_t tv = time(NULL);
+  struct tm* t = localtime(&tv);
+  // tm_gmtoff includes any daylight savings offset, so subtract it.
+  return static_cast<double>(t->tm_gmtoff * msPerSecond -
+                             (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
+}
+
+
+VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
+
+
+VirtualMemory::VirtualMemory(size_t size)
+    : address_(ReserveRegion(size)), size_(size) { }
+
+
+VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+    : address_(NULL), size_(0) {
+  DCHECK((alignment % OS::AllocateAlignment()) == 0);
+  size_t request_size = RoundUp(size + alignment,
+                                static_cast<intptr_t>(OS::AllocateAlignment()));
+  void* reservation = mmap(OS::GetRandomMmapAddr(),
+                           request_size,
+                           PROT_NONE,
+                           MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+                           kMmapFd,
+                           kMmapFdOffset);
+  if (reservation == MAP_FAILED) return;
+
+  uint8_t* base = static_cast<uint8_t*>(reservation);
+  uint8_t* aligned_base = RoundUp(base, alignment);
+  DCHECK_LE(base, aligned_base);
+
+  // Unmap extra memory reserved before and after the desired block.
+  if (aligned_base != base) {
+    size_t prefix_size = static_cast<size_t>(aligned_base - base);
+    OS::Free(base, prefix_size);
+    request_size -= prefix_size;
+  }
+
+  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
+  DCHECK_LE(aligned_size, request_size);
+
+  if (aligned_size != request_size) {
+    size_t suffix_size = request_size - aligned_size;
+    OS::Free(aligned_base + aligned_size, suffix_size);
+    request_size -= suffix_size;
+  }
+
+  DCHECK(aligned_size == request_size);
+
+  address_ = static_cast<void*>(aligned_base);
+  size_ = aligned_size;
+}
+
+
+VirtualMemory::~VirtualMemory() {
+  if (IsReserved()) {
+    bool result = ReleaseRegion(address(), size());
+    DCHECK(result);
+    USE(result);
+  }
+}
+
+
+bool VirtualMemory::IsReserved() {
+  return address_ != NULL;
+}
+
+
+void VirtualMemory::Reset() {
+  address_ = NULL;
+  size_ = 0;
+}
+
+
+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+  return CommitRegion(address, size, is_executable);
+}
+
+
+bool VirtualMemory::Uncommit(void* address, size_t size) {
+  return UncommitRegion(address, size);
+}
+
+
+bool VirtualMemory::Guard(void* address) {
+  OS::Guard(address, OS::CommitPageSize());
+  return true;
+}
+
+
+void* VirtualMemory::ReserveRegion(size_t size) {
+  void* result = mmap(OS::GetRandomMmapAddr(),
+                      size,
+                      PROT_NONE,
+                      MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+                      kMmapFd,
+                      kMmapFdOffset);
+
+  if (result == MAP_FAILED) return NULL;
+
+  return result;
+}
+
+
+bool VirtualMemory::CommitRegion(void* address,
+                                 size_t size,
+                                 bool is_executable) {
+  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+  if (MAP_FAILED == mmap(address,
+                         size,
+                         prot,
+                         MAP_PRIVATE | MAP_ANON | MAP_FIXED,
+                         kMmapFd,
+                         kMmapFdOffset)) {
+    return false;
+  }
+  return true;
+}
+
+
+bool VirtualMemory::UncommitRegion(void* address, size_t size) {
+  return mmap(address,
+              size,
+              PROT_NONE,
+              MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
+              kMmapFd,
+              kMmapFdOffset) != MAP_FAILED;
+}
+
+
+bool VirtualMemory::ReleaseRegion(void* address, size_t size) {
+  return munmap(address, size) == 0;
+}
+
+
+bool VirtualMemory::HasLazyCommits() {
+  return false;
+}
+
+} }  // namespace v8::base
diff --git a/src/base/platform/platform-openbsd.cc b/src/base/platform/platform-openbsd.cc
new file mode 100644
index 0000000..4e706cb
--- /dev/null
+++ b/src/base/platform/platform-openbsd.cc
@@ -0,0 +1,338 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Platform-specific code for OpenBSD and NetBSD goes here. For the
+// POSIX-compatible parts, the implementation is in platform-posix.cc.
+
+#include <pthread.h>
+#include <semaphore.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <sys/resource.h>
+#include <sys/syscall.h>
+#include <sys/time.h>
+#include <sys/types.h>
+
+#include <errno.h>
+#include <fcntl.h>      // open
+#include <stdarg.h>
+#include <strings.h>    // index
+#include <sys/mman.h>   // mmap & munmap
+#include <sys/stat.h>   // open
+#include <sys/types.h>  // mmap & munmap
+#include <unistd.h>     // sysconf
+
+#include <cmath>
+
+#undef MAP_TYPE
+
+#include "src/base/macros.h"
+#include "src/base/platform/platform.h"
+
+
+namespace v8 {
+namespace base {
+
+
+const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
+  if (std::isnan(time)) return "";
+  time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
+  struct tm* t = localtime(&tv);
+  if (NULL == t) return "";
+  return t->tm_zone;
+}
+
+
+double OS::LocalTimeOffset(TimezoneCache* cache) {
+  time_t tv = time(NULL);
+  struct tm* t = localtime(&tv);
+  // tm_gmtoff includes any daylight savings offset, so subtract it.
+  return static_cast<double>(t->tm_gmtoff * msPerSecond -
+                             (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
+}
+
+
+void* OS::Allocate(const size_t requested,
+                   size_t* allocated,
+                   bool is_executable) {
+  const size_t msize = RoundUp(requested, AllocateAlignment());
+  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+  void* addr = OS::GetRandomMmapAddr();
+  void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
+  if (mbase == MAP_FAILED) return NULL;
+  *allocated = msize;
+  return mbase;
+}
+
+
+class PosixMemoryMappedFile : public OS::MemoryMappedFile {
+ public:
+  PosixMemoryMappedFile(FILE* file, void* memory, int size)
+    : file_(file), memory_(memory), size_(size) { }
+  virtual ~PosixMemoryMappedFile();
+  virtual void* memory() { return memory_; }
+  virtual int size() { return size_; }
+ private:
+  FILE* file_;
+  void* memory_;
+  int size_;
+};
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
+  FILE* file = fopen(name, "r+");
+  if (file == NULL) return NULL;
+
+  fseek(file, 0, SEEK_END);
+  int size = ftell(file);
+
+  void* memory =
+      mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
+  return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
+    void* initial) {
+  FILE* file = fopen(name, "w+");
+  if (file == NULL) return NULL;
+  int result = fwrite(initial, size, 1, file);
+  if (result < 1) {
+    fclose(file);
+    return NULL;
+  }
+  void* memory =
+      mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
+  return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
+PosixMemoryMappedFile::~PosixMemoryMappedFile() {
+  if (memory_) OS::Free(memory_, size_);
+  fclose(file_);
+}
+
+
+std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
+  std::vector<SharedLibraryAddress> result;
+  // This function assumes that the layout of the file is as follows:
+  // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
+  // If we encounter an unexpected situation we abort scanning further entries.
+  FILE* fp = fopen("/proc/self/maps", "r");
+  if (fp == NULL) return result;
+
+  // Allocate enough room to be able to store a full file name.
+  const int kLibNameLen = FILENAME_MAX + 1;
+  char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));
+
+  // This loop will terminate once the scanning hits an EOF.
+  while (true) {
+    uintptr_t start, end;
+    char attr_r, attr_w, attr_x, attr_p;
+    // Parse the addresses and permission bits at the beginning of the line.
+    if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break;
+    if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break;
+
+    int c;
+    if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') {
+      // Found a read-only executable entry. Skip characters until we reach
+      // the beginning of the filename or the end of the line.
+      do {
+        c = getc(fp);
+      } while ((c != EOF) && (c != '\n') && (c != '/'));
+      if (c == EOF) break;  // EOF: Was unexpected, just exit.
+
+      // Process the filename if found.
+      if (c == '/') {
+        ungetc(c, fp);  // Push the '/' back into the stream to be read below.
+
+        // Read to the end of the line. Exit if the read fails.
+        if (fgets(lib_name, kLibNameLen, fp) == NULL) break;
+
+        // Drop the newline character read by fgets. We do not need to check
+        // for a zero-length string because we know that we at least read the
+        // '/' character.
+        lib_name[strlen(lib_name) - 1] = '\0';
+      } else {
+        // No library name found, just record the raw address range.
+        snprintf(lib_name, kLibNameLen,
+                 "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end);
+      }
+      result.push_back(SharedLibraryAddress(lib_name, start, end));
+    } else {
+      // Entry not describing executable data. Skip to end of line to set up
+      // reading the next entry.
+      do {
+        c = getc(fp);
+      } while ((c != EOF) && (c != '\n'));
+      if (c == EOF) break;
+    }
+  }
+  free(lib_name);
+  fclose(fp);
+  return result;
+}
+
+
+void OS::SignalCodeMovingGC() {
+  // Support for ll_prof.py.
+  //
+  // The Linux profiler built into the kernel logs all mmap's with
+  // PROT_EXEC so that analysis tools can properly attribute ticks. We
+  // do a mmap with a name known by ll_prof.py and immediately munmap
+  // it. This injects a GC marker into the stream of events generated
+  // by the kernel and allows us to synchronize V8 code log and the
+  // kernel log.
+  int size = sysconf(_SC_PAGESIZE);
+  FILE* f = fopen(OS::GetGCFakeMMapFile(), "w+");
+  if (f == NULL) {
+    OS::PrintError("Failed to open %s\n", OS::GetGCFakeMMapFile());
+    OS::Abort();
+  }
+  void* addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE,
+                    fileno(f), 0);
+  DCHECK(addr != MAP_FAILED);
+  OS::Free(addr, size);
+  fclose(f);
+}
+
+
+
+// Constants used for mmap.
+static const int kMmapFd = -1;
+static const int kMmapFdOffset = 0;
+
+
+VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
+
+
+VirtualMemory::VirtualMemory(size_t size)
+    : address_(ReserveRegion(size)), size_(size) { }
+
+
+VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+    : address_(NULL), size_(0) {
+  DCHECK((alignment % OS::AllocateAlignment()) == 0);
+  size_t request_size = RoundUp(size + alignment,
+                                static_cast<intptr_t>(OS::AllocateAlignment()));
+  void* reservation = mmap(OS::GetRandomMmapAddr(),
+                           request_size,
+                           PROT_NONE,
+                           MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+                           kMmapFd,
+                           kMmapFdOffset);
+  if (reservation == MAP_FAILED) return;
+
+  uint8_t* base = static_cast<uint8_t*>(reservation);
+  uint8_t* aligned_base = RoundUp(base, alignment);
+  DCHECK_LE(base, aligned_base);
+
+  // Unmap extra memory reserved before and after the desired block.
+  if (aligned_base != base) {
+    size_t prefix_size = static_cast<size_t>(aligned_base - base);
+    OS::Free(base, prefix_size);
+    request_size -= prefix_size;
+  }
+
+  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
+  DCHECK_LE(aligned_size, request_size);
+
+  if (aligned_size != request_size) {
+    size_t suffix_size = request_size - aligned_size;
+    OS::Free(aligned_base + aligned_size, suffix_size);
+    request_size -= suffix_size;
+  }
+
+  DCHECK(aligned_size == request_size);
+
+  address_ = static_cast<void*>(aligned_base);
+  size_ = aligned_size;
+}
+
+
+VirtualMemory::~VirtualMemory() {
+  if (IsReserved()) {
+    bool result = ReleaseRegion(address(), size());
+    DCHECK(result);
+    USE(result);
+  }
+}
+
+
+bool VirtualMemory::IsReserved() {
+  return address_ != NULL;
+}
+
+
+void VirtualMemory::Reset() {
+  address_ = NULL;
+  size_ = 0;
+}
+
+
+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+  return CommitRegion(address, size, is_executable);
+}
+
+
+bool VirtualMemory::Uncommit(void* address, size_t size) {
+  return UncommitRegion(address, size);
+}
+
+
+bool VirtualMemory::Guard(void* address) {
+  OS::Guard(address, OS::CommitPageSize());
+  return true;
+}
+
+
+void* VirtualMemory::ReserveRegion(size_t size) {
+  void* result = mmap(OS::GetRandomMmapAddr(),
+                      size,
+                      PROT_NONE,
+                      MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
+                      kMmapFd,
+                      kMmapFdOffset);
+
+  if (result == MAP_FAILED) return NULL;
+
+  return result;
+}
+
+
+bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
+  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+  if (MAP_FAILED == mmap(base,
+                         size,
+                         prot,
+                         MAP_PRIVATE | MAP_ANON | MAP_FIXED,
+                         kMmapFd,
+                         kMmapFdOffset)) {
+    return false;
+  }
+  return true;
+}
+
+
+bool VirtualMemory::UncommitRegion(void* base, size_t size) {
+  return mmap(base,
+              size,
+              PROT_NONE,
+              MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
+              kMmapFd,
+              kMmapFdOffset) != MAP_FAILED;
+}
+
+
+bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
+  return munmap(base, size) == 0;
+}
+
+
+bool VirtualMemory::HasLazyCommits() {
+  // TODO(alph): implement for the platform.
+  return false;
+}
+
+} }  // namespace v8::base
diff --git a/src/base/platform/platform-posix.cc b/src/base/platform/platform-posix.cc
new file mode 100644
index 0000000..0fc04fc
--- /dev/null
+++ b/src/base/platform/platform-posix.cc
@@ -0,0 +1,678 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Platform-specific code for POSIX goes here. This is not a platform on its
+// own, but contains the parts which are the same across the POSIX platforms
+// Linux, MacOS, FreeBSD, OpenBSD, NetBSD and QNX.
+
+#include <errno.h>
+#include <limits.h>
+#include <pthread.h>
+#if defined(__DragonFly__) || defined(__FreeBSD__) || defined(__OpenBSD__)
+#include <pthread_np.h>  // for pthread_set_name_np
+#endif
+#include <sched.h>  // for sched_yield
+#include <time.h>
+#include <unistd.h>
+
+#include <sys/mman.h>
+#include <sys/resource.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#if defined(__APPLE__) || defined(__DragonFly__) || defined(__FreeBSD__) || \
+    defined(__NetBSD__) || defined(__OpenBSD__)
+#include <sys/sysctl.h>  // NOLINT, for sysctl
+#endif
+
+#undef MAP_TYPE
+
+#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
+#define LOG_TAG "v8"
+#include <android/log.h>  // NOLINT
+#endif
+
+#include <cmath>
+#include <cstdlib>
+
+#include "src/base/lazy-instance.h"
+#include "src/base/macros.h"
+#include "src/base/platform/platform.h"
+#include "src/base/platform/time.h"
+#include "src/base/utils/random-number-generator.h"
+
+#ifdef V8_FAST_TLS_SUPPORTED
+#include "src/base/atomicops.h"
+#endif
+
+#if V8_OS_MACOSX
+#include <dlfcn.h>
+#endif
+
+#if V8_OS_LINUX
+#include <sys/prctl.h>  // NOLINT, for prctl
+#endif
+
+#if !V8_OS_NACL
+#include <sys/syscall.h>
+#endif
+
+namespace v8 {
+namespace base {
+
+namespace {
+
+// 0 is never a valid thread id.
+const pthread_t kNoThread = (pthread_t) 0;
+
+bool g_hard_abort = false;
+
+const char* g_gc_fake_mmap = NULL;
+
+}  // namespace
+
+
+int OS::ActivationFrameAlignment() {
+#if V8_TARGET_ARCH_ARM
+  // On EABI ARM targets this is required for fp correctness in the
+  // runtime system.
+  return 8;
+#elif V8_TARGET_ARCH_MIPS
+  return 8;
+#else
+  // Otherwise we just assume 16 byte alignment, i.e.:
+  // - With gcc 4.4 the tree vectorization optimizer can generate code
+  //   that requires 16 byte alignment such as movdqa on x86.
+  // - Mac OS X and Solaris (64-bit) activation frames must be 16 byte-aligned;
+  //   see "Mac OS X ABI Function Call Guide"
+  return 16;
+#endif
+}
+
+
+intptr_t OS::CommitPageSize() {
+  static intptr_t page_size = getpagesize();
+  return page_size;
+}
+
+
+void OS::Free(void* address, const size_t size) {
+  // TODO(1240712): munmap has a return value which is ignored here.
+  int result = munmap(address, size);
+  USE(result);
+  DCHECK(result == 0);
+}
+
+
+// Get rid of writable permission on code allocations.
+void OS::ProtectCode(void* address, const size_t size) {
+#if V8_OS_CYGWIN
+  DWORD old_protect;
+  VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect);
+#elif V8_OS_NACL
+  // The Native Client port of V8 uses an interpreter, so
+  // code pages don't need PROT_EXEC.
+  mprotect(address, size, PROT_READ);
+#else
+  mprotect(address, size, PROT_READ | PROT_EXEC);
+#endif
+}
+
+
+// Create guard pages.
+void OS::Guard(void* address, const size_t size) {
+#if V8_OS_CYGWIN
+  DWORD oldprotect;
+  VirtualProtect(address, size, PAGE_NOACCESS, &oldprotect);
+#else
+  mprotect(address, size, PROT_NONE);
+#endif
+}
+
+
+static LazyInstance<RandomNumberGenerator>::type
+    platform_random_number_generator = LAZY_INSTANCE_INITIALIZER;
+
+
+void OS::Initialize(int64_t random_seed, bool hard_abort,
+                    const char* const gc_fake_mmap) {
+  if (random_seed) {
+    platform_random_number_generator.Pointer()->SetSeed(random_seed);
+  }
+  g_hard_abort = hard_abort;
+  g_gc_fake_mmap = gc_fake_mmap;
+}
+
+
+const char* OS::GetGCFakeMMapFile() {
+  return g_gc_fake_mmap;
+}
+
+
+void* OS::GetRandomMmapAddr() {
+#if V8_OS_NACL
+  // TODO(bradchen): restore randomization once Native Client gets
+  // smarter about using mmap address hints.
+  // See http://code.google.com/p/nativeclient/issues/3341
+  return NULL;
+#endif
+#if defined(ADDRESS_SANITIZER) || defined(MEMORY_SANITIZER) || \
+    defined(THREAD_SANITIZER)
+  // Dynamic tools do not support custom mmap addresses.
+  return NULL;
+#endif
+  uintptr_t raw_addr;
+  platform_random_number_generator.Pointer()->NextBytes(&raw_addr,
+                                                        sizeof(raw_addr));
+#if V8_TARGET_ARCH_X64
+  // Currently available CPUs have 48 bits of virtual addressing.  Truncate
+  // the hint address to 46 bits to give the kernel a fighting chance of
+  // fulfilling our placement request.
+  raw_addr &= V8_UINT64_C(0x3ffffffff000);
+#else
+  raw_addr &= 0x3ffff000;
+
+# ifdef __sun
+  // For our Solaris/illumos mmap hint, we pick a random address in the bottom
+  // half of the top half of the address space (that is, the third quarter).
+  // Because we do not MAP_FIXED, this will be treated only as a hint -- the
+  // system will not fail to mmap() because something else happens to already
+  // be mapped at our random address. We deliberately set the hint high enough
+  // to get well above the system's break (that is, the heap); Solaris and
+  // illumos will try the hint and if that fails allocate as if there were
+  // no hint at all. The high hint prevents the break from getting hemmed in
+  // at low values, ceding half of the address space to the system heap.
+  raw_addr += 0x80000000;
+# else
+  // The range 0x20000000 - 0x60000000 is relatively unpopulated across a
+  // variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macos
+  // 10.6 and 10.7.
+  raw_addr += 0x20000000;
+# endif
+#endif
+  return reinterpret_cast<void*>(raw_addr);
+}
+
+
+size_t OS::AllocateAlignment() {
+  return static_cast<size_t>(sysconf(_SC_PAGESIZE));
+}
+
+
+void OS::Sleep(int milliseconds) {
+  useconds_t ms = static_cast<useconds_t>(milliseconds);
+  usleep(1000 * ms);
+}
+
+
+void OS::Abort() {
+  if (g_hard_abort) {
+    V8_IMMEDIATE_CRASH();
+  }
+  // Redirect to std abort to signal abnormal program termination.
+  abort();
+}
+
+
+void OS::DebugBreak() {
+#if V8_HOST_ARCH_ARM
+  asm("bkpt 0");
+#elif V8_HOST_ARCH_ARM64
+  asm("brk 0");
+#elif V8_HOST_ARCH_MIPS
+  asm("break");
+#elif V8_HOST_ARCH_MIPS64
+  asm("break");
+#elif V8_HOST_ARCH_IA32
+#if V8_OS_NACL
+  asm("hlt");
+#else
+  asm("int $3");
+#endif  // V8_OS_NACL
+#elif V8_HOST_ARCH_X64
+  asm("int $3");
+#else
+#error Unsupported host architecture.
+#endif
+}
+
+
+// ----------------------------------------------------------------------------
+// Math functions
+
+double OS::nan_value() {
+  // NAN from math.h is defined in C99 and not in POSIX.
+  return NAN;
+}
+
+
+int OS::GetCurrentProcessId() {
+  return static_cast<int>(getpid());
+}
+
+
+int OS::GetCurrentThreadId() {
+#if defined(ANDROID)
+  return static_cast<int>(syscall(__NR_gettid));
+#elif defined(SYS_gettid)
+  return static_cast<int>(syscall(SYS_gettid));
+#else
+  // PNaCL doesn't have a way to get an integral thread ID, but it doesn't
+  // really matter, because we only need it in PerfJitLogger::LogRecordedBuffer.
+  return 0;
+#endif
+}
+
+
+// ----------------------------------------------------------------------------
+// POSIX date/time support.
+//
+
+int OS::GetUserTime(uint32_t* secs,  uint32_t* usecs) {
+#if V8_OS_NACL
+  // Optionally used in Logger::ResourceEvent.
+  return -1;
+#else
+  struct rusage usage;
+
+  if (getrusage(RUSAGE_SELF, &usage) < 0) return -1;
+  *secs = usage.ru_utime.tv_sec;
+  *usecs = usage.ru_utime.tv_usec;
+  return 0;
+#endif
+}
+
+
+double OS::TimeCurrentMillis() {
+  return Time::Now().ToJsTime();
+}
+
+
+class TimezoneCache {};
+
+
+TimezoneCache* OS::CreateTimezoneCache() {
+  return NULL;
+}
+
+
+void OS::DisposeTimezoneCache(TimezoneCache* cache) {
+  DCHECK(cache == NULL);
+}
+
+
+void OS::ClearTimezoneCache(TimezoneCache* cache) {
+  DCHECK(cache == NULL);
+}
+
+
+double OS::DaylightSavingsOffset(double time, TimezoneCache*) {
+  if (std::isnan(time)) return nan_value();
+  time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
+  struct tm* t = localtime(&tv);
+  if (NULL == t) return nan_value();
+  return t->tm_isdst > 0 ? 3600 * msPerSecond : 0;
+}
+
+
+int OS::GetLastError() {
+  return errno;
+}
+
+
+// ----------------------------------------------------------------------------
+// POSIX stdio support.
+//
+
+FILE* OS::FOpen(const char* path, const char* mode) {
+  FILE* file = fopen(path, mode);
+  if (file == NULL) return NULL;
+  struct stat file_stat;
+  if (fstat(fileno(file), &file_stat) != 0) return NULL;
+  bool is_regular_file = ((file_stat.st_mode & S_IFREG) != 0);
+  if (is_regular_file) return file;
+  fclose(file);
+  return NULL;
+}
+
+
+bool OS::Remove(const char* path) {
+  return (remove(path) == 0);
+}
+
+
+FILE* OS::OpenTemporaryFile() {
+  return tmpfile();
+}
+
+
+const char* const OS::LogFileOpenMode = "w";
+
+
+void OS::Print(const char* format, ...) {
+  va_list args;
+  va_start(args, format);
+  VPrint(format, args);
+  va_end(args);
+}
+
+
+void OS::VPrint(const char* format, va_list args) {
+#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
+  __android_log_vprint(ANDROID_LOG_INFO, LOG_TAG, format, args);
+#else
+  vprintf(format, args);
+#endif
+}
+
+
+void OS::FPrint(FILE* out, const char* format, ...) {
+  va_list args;
+  va_start(args, format);
+  VFPrint(out, format, args);
+  va_end(args);
+}
+
+
+void OS::VFPrint(FILE* out, const char* format, va_list args) {
+#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
+  __android_log_vprint(ANDROID_LOG_INFO, LOG_TAG, format, args);
+#else
+  vfprintf(out, format, args);
+#endif
+}
+
+
+void OS::PrintError(const char* format, ...) {
+  va_list args;
+  va_start(args, format);
+  VPrintError(format, args);
+  va_end(args);
+}
+
+
+void OS::VPrintError(const char* format, va_list args) {
+#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
+  __android_log_vprint(ANDROID_LOG_ERROR, LOG_TAG, format, args);
+#else
+  vfprintf(stderr, format, args);
+#endif
+}
+
+
+int OS::SNPrintF(char* str, int length, const char* format, ...) {
+  va_list args;
+  va_start(args, format);
+  int result = VSNPrintF(str, length, format, args);
+  va_end(args);
+  return result;
+}
+
+
+int OS::VSNPrintF(char* str,
+                  int length,
+                  const char* format,
+                  va_list args) {
+  int n = vsnprintf(str, length, format, args);
+  if (n < 0 || n >= length) {
+    // If the length is zero, the assignment fails.
+    if (length > 0)
+      str[length - 1] = '\0';
+    return -1;
+  } else {
+    return n;
+  }
+}
+
+
+// ----------------------------------------------------------------------------
+// POSIX string support.
+//
+
+char* OS::StrChr(char* str, int c) {
+  return strchr(str, c);
+}
+
+
+void OS::StrNCpy(char* dest, int length, const char* src, size_t n) {
+  strncpy(dest, src, n);
+}
+
+
+// ----------------------------------------------------------------------------
+// POSIX thread support.
+//
+
+class Thread::PlatformData {
+ public:
+  PlatformData() : thread_(kNoThread) {}
+  pthread_t thread_;  // Thread handle for pthread.
+  // Synchronizes thread creation
+  Mutex thread_creation_mutex_;
+};
+
+Thread::Thread(const Options& options)
+    : data_(new PlatformData),
+      stack_size_(options.stack_size()),
+      start_semaphore_(NULL) {
+  if (stack_size_ > 0 && static_cast<size_t>(stack_size_) < PTHREAD_STACK_MIN) {
+    stack_size_ = PTHREAD_STACK_MIN;
+  }
+  set_name(options.name());
+}
+
+
+Thread::~Thread() {
+  delete data_;
+}
+
+
+static void SetThreadName(const char* name) {
+#if V8_OS_DRAGONFLYBSD || V8_OS_FREEBSD || V8_OS_OPENBSD
+  pthread_set_name_np(pthread_self(), name);
+#elif V8_OS_NETBSD
+  STATIC_ASSERT(Thread::kMaxThreadNameLength <= PTHREAD_MAX_NAMELEN_NP);
+  pthread_setname_np(pthread_self(), "%s", name);
+#elif V8_OS_MACOSX
+  // pthread_setname_np is only available in 10.6 or later, so test
+  // for it at runtime.
+  int (*dynamic_pthread_setname_np)(const char*);
+  *reinterpret_cast<void**>(&dynamic_pthread_setname_np) =
+    dlsym(RTLD_DEFAULT, "pthread_setname_np");
+  if (dynamic_pthread_setname_np == NULL)
+    return;
+
+  // Mac OS X does not expose the length limit of the name, so hardcode it.
+  static const int kMaxNameLength = 63;
+  STATIC_ASSERT(Thread::kMaxThreadNameLength <= kMaxNameLength);
+  dynamic_pthread_setname_np(name);
+#elif defined(PR_SET_NAME)
+  prctl(PR_SET_NAME,
+        reinterpret_cast<unsigned long>(name),  // NOLINT
+        0, 0, 0);
+#endif
+}
+
+
+static void* ThreadEntry(void* arg) {
+  Thread* thread = reinterpret_cast<Thread*>(arg);
+  // We take the lock here to make sure that pthread_create finished first since
+  // we don't know which thread will run first (the original thread or the new
+  // one).
+  { LockGuard<Mutex> lock_guard(&thread->data()->thread_creation_mutex_); }
+  SetThreadName(thread->name());
+  DCHECK(thread->data()->thread_ != kNoThread);
+  thread->NotifyStartedAndRun();
+  return NULL;
+}
+
+
+void Thread::set_name(const char* name) {
+  strncpy(name_, name, sizeof(name_));
+  name_[sizeof(name_) - 1] = '\0';
+}
+
+
+void Thread::Start() {
+  int result;
+  pthread_attr_t attr;
+  memset(&attr, 0, sizeof(attr));
+  result = pthread_attr_init(&attr);
+  DCHECK_EQ(0, result);
+  // Native client uses default stack size.
+#if !V8_OS_NACL
+  if (stack_size_ > 0) {
+    result = pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
+    DCHECK_EQ(0, result);
+  }
+#endif
+  {
+    LockGuard<Mutex> lock_guard(&data_->thread_creation_mutex_);
+    result = pthread_create(&data_->thread_, &attr, ThreadEntry, this);
+  }
+  DCHECK_EQ(0, result);
+  result = pthread_attr_destroy(&attr);
+  DCHECK_EQ(0, result);
+  DCHECK(data_->thread_ != kNoThread);
+  USE(result);
+}
+
+
+void Thread::Join() {
+  pthread_join(data_->thread_, NULL);
+}
+
+
+void Thread::YieldCPU() {
+  int result = sched_yield();
+  DCHECK_EQ(0, result);
+  USE(result);
+}
+
+
+static Thread::LocalStorageKey PthreadKeyToLocalKey(pthread_key_t pthread_key) {
+#if V8_OS_CYGWIN
+  // We need to cast pthread_key_t to Thread::LocalStorageKey in two steps
+  // because pthread_key_t is a pointer type on Cygwin. This will probably not
+  // work on 64-bit platforms, but Cygwin doesn't support 64-bit anyway.
+  STATIC_ASSERT(sizeof(Thread::LocalStorageKey) == sizeof(pthread_key_t));
+  intptr_t ptr_key = reinterpret_cast<intptr_t>(pthread_key);
+  return static_cast<Thread::LocalStorageKey>(ptr_key);
+#else
+  return static_cast<Thread::LocalStorageKey>(pthread_key);
+#endif
+}
+
+
+static pthread_key_t LocalKeyToPthreadKey(Thread::LocalStorageKey local_key) {
+#if V8_OS_CYGWIN
+  STATIC_ASSERT(sizeof(Thread::LocalStorageKey) == sizeof(pthread_key_t));
+  intptr_t ptr_key = static_cast<intptr_t>(local_key);
+  return reinterpret_cast<pthread_key_t>(ptr_key);
+#else
+  return static_cast<pthread_key_t>(local_key);
+#endif
+}
+
+
+#ifdef V8_FAST_TLS_SUPPORTED
+
+static Atomic32 tls_base_offset_initialized = 0;
+intptr_t kMacTlsBaseOffset = 0;
+
+// It's safe to do the initialization more that once, but it has to be
+// done at least once.
+static void InitializeTlsBaseOffset() {
+  const size_t kBufferSize = 128;
+  char buffer[kBufferSize];
+  size_t buffer_size = kBufferSize;
+  int ctl_name[] = { CTL_KERN , KERN_OSRELEASE };
+  if (sysctl(ctl_name, 2, buffer, &buffer_size, NULL, 0) != 0) {
+    V8_Fatal(__FILE__, __LINE__, "V8 failed to get kernel version");
+  }
+  // The buffer now contains a string of the form XX.YY.ZZ, where
+  // XX is the major kernel version component.
+  // Make sure the buffer is 0-terminated.
+  buffer[kBufferSize - 1] = '\0';
+  char* period_pos = strchr(buffer, '.');
+  *period_pos = '\0';
+  int kernel_version_major =
+      static_cast<int>(strtol(buffer, NULL, 10));  // NOLINT
+  // The constants below are taken from pthreads.s from the XNU kernel
+  // sources archive at www.opensource.apple.com.
+  if (kernel_version_major < 11) {
+    // 8.x.x (Tiger), 9.x.x (Leopard), 10.x.x (Snow Leopard) have the
+    // same offsets.
+#if V8_HOST_ARCH_IA32
+    kMacTlsBaseOffset = 0x48;
+#else
+    kMacTlsBaseOffset = 0x60;
+#endif
+  } else {
+    // 11.x.x (Lion) changed the offset.
+    kMacTlsBaseOffset = 0;
+  }
+
+  Release_Store(&tls_base_offset_initialized, 1);
+}
+
+
+static void CheckFastTls(Thread::LocalStorageKey key) {
+  void* expected = reinterpret_cast<void*>(0x1234CAFE);
+  Thread::SetThreadLocal(key, expected);
+  void* actual = Thread::GetExistingThreadLocal(key);
+  if (expected != actual) {
+    V8_Fatal(__FILE__, __LINE__,
+             "V8 failed to initialize fast TLS on current kernel");
+  }
+  Thread::SetThreadLocal(key, NULL);
+}
+
+#endif  // V8_FAST_TLS_SUPPORTED
+
+
+Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
+#ifdef V8_FAST_TLS_SUPPORTED
+  bool check_fast_tls = false;
+  if (tls_base_offset_initialized == 0) {
+    check_fast_tls = true;
+    InitializeTlsBaseOffset();
+  }
+#endif
+  pthread_key_t key;
+  int result = pthread_key_create(&key, NULL);
+  DCHECK_EQ(0, result);
+  USE(result);
+  LocalStorageKey local_key = PthreadKeyToLocalKey(key);
+#ifdef V8_FAST_TLS_SUPPORTED
+  // If we just initialized fast TLS support, make sure it works.
+  if (check_fast_tls) CheckFastTls(local_key);
+#endif
+  return local_key;
+}
+
+
+void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
+  pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
+  int result = pthread_key_delete(pthread_key);
+  DCHECK_EQ(0, result);
+  USE(result);
+}
+
+
+void* Thread::GetThreadLocal(LocalStorageKey key) {
+  pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
+  return pthread_getspecific(pthread_key);
+}
+
+
+void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
+  pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
+  int result = pthread_setspecific(pthread_key, value);
+  DCHECK_EQ(0, result);
+  USE(result);
+}
+
+
+} }  // namespace v8::base
diff --git a/src/base/platform/platform-qnx.cc b/src/base/platform/platform-qnx.cc
new file mode 100644
index 0000000..2cb3228
--- /dev/null
+++ b/src/base/platform/platform-qnx.cc
@@ -0,0 +1,374 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Platform-specific code for QNX goes here. For the POSIX-compatible
+// parts the implementation is in platform-posix.cc.
+
+#include <backtrace.h>
+#include <pthread.h>
+#include <semaphore.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <sys/resource.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <ucontext.h>
+
+// QNX requires memory pages to be marked as executable.
+// Otherwise, the OS raises an exception when executing code in that page.
+#include <errno.h>
+#include <fcntl.h>      // open
+#include <stdarg.h>
+#include <strings.h>    // index
+#include <sys/mman.h>   // mmap & munmap
+#include <sys/procfs.h>
+#include <sys/stat.h>   // open
+#include <sys/types.h>  // mmap & munmap
+#include <unistd.h>     // sysconf
+
+#include <cmath>
+
+#undef MAP_TYPE
+
+#include "src/base/macros.h"
+#include "src/base/platform/platform.h"
+
+
+namespace v8 {
+namespace base {
+
+// 0 is never a valid thread id on Qnx since tids and pids share a
+// name space and pid 0 is reserved (see man 2 kill).
+static const pthread_t kNoThread = (pthread_t) 0;
+
+
+#ifdef __arm__
+
+bool OS::ArmUsingHardFloat() {
+  // GCC versions 4.6 and above define __ARM_PCS or __ARM_PCS_VFP to specify
+  // the Floating Point ABI used (PCS stands for Procedure Call Standard).
+  // We use these as well as a couple of other defines to statically determine
+  // what FP ABI used.
+  // GCC versions 4.4 and below don't support hard-fp.
+  // GCC versions 4.5 may support hard-fp without defining __ARM_PCS or
+  // __ARM_PCS_VFP.
+
+#define GCC_VERSION (__GNUC__ * 10000                                          \
+                     + __GNUC_MINOR__ * 100                                    \
+                     + __GNUC_PATCHLEVEL__)
+#if GCC_VERSION >= 40600
+#if defined(__ARM_PCS_VFP)
+  return true;
+#else
+  return false;
+#endif
+
+#elif GCC_VERSION < 40500
+  return false;
+
+#else
+#if defined(__ARM_PCS_VFP)
+  return true;
+#elif defined(__ARM_PCS) || defined(__SOFTFP__) || defined(__SOFTFP) || \
+      !defined(__VFP_FP__)
+  return false;
+#else
+#error "Your version of GCC does not report the FP ABI compiled for."          \
+       "Please report it on this issue"                                        \
+       "http://code.google.com/p/v8/issues/detail?id=2140"
+
+#endif
+#endif
+#undef GCC_VERSION
+}
+
+#endif  // __arm__
+
+
+const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
+  if (std::isnan(time)) return "";
+  time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
+  struct tm* t = localtime(&tv);
+  if (NULL == t) return "";
+  return t->tm_zone;
+}
+
+
+double OS::LocalTimeOffset(TimezoneCache* cache) {
+  time_t tv = time(NULL);
+  struct tm* t = localtime(&tv);
+  // tm_gmtoff includes any daylight savings offset, so subtract it.
+  return static_cast<double>(t->tm_gmtoff * msPerSecond -
+                             (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
+}
+
+
+void* OS::Allocate(const size_t requested,
+                   size_t* allocated,
+                   bool is_executable) {
+  const size_t msize = RoundUp(requested, AllocateAlignment());
+  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+  void* addr = OS::GetRandomMmapAddr();
+  void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
+  if (mbase == MAP_FAILED) return NULL;
+  *allocated = msize;
+  return mbase;
+}
+
+
+class PosixMemoryMappedFile : public OS::MemoryMappedFile {
+ public:
+  PosixMemoryMappedFile(FILE* file, void* memory, int size)
+    : file_(file), memory_(memory), size_(size) { }
+  virtual ~PosixMemoryMappedFile();
+  virtual void* memory() { return memory_; }
+  virtual int size() { return size_; }
+ private:
+  FILE* file_;
+  void* memory_;
+  int size_;
+};
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
+  FILE* file = fopen(name, "r+");
+  if (file == NULL) return NULL;
+
+  fseek(file, 0, SEEK_END);
+  int size = ftell(file);
+
+  void* memory =
+      mmap(OS::GetRandomMmapAddr(),
+           size,
+           PROT_READ | PROT_WRITE,
+           MAP_SHARED,
+           fileno(file),
+           0);
+  return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
+    void* initial) {
+  FILE* file = fopen(name, "w+");
+  if (file == NULL) return NULL;
+  int result = fwrite(initial, size, 1, file);
+  if (result < 1) {
+    fclose(file);
+    return NULL;
+  }
+  void* memory =
+      mmap(OS::GetRandomMmapAddr(),
+           size,
+           PROT_READ | PROT_WRITE,
+           MAP_SHARED,
+           fileno(file),
+           0);
+  return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
+PosixMemoryMappedFile::~PosixMemoryMappedFile() {
+  if (memory_) OS::Free(memory_, size_);
+  fclose(file_);
+}
+
+
+std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
+  std::vector<SharedLibraryAddress> result;
+  procfs_mapinfo *mapinfos = NULL, *mapinfo;
+  int proc_fd, num, i;
+
+  struct {
+    procfs_debuginfo info;
+    char buff[PATH_MAX];
+  } map;
+
+  char buf[PATH_MAX + 1];
+  snprintf(buf, PATH_MAX + 1, "/proc/%d/as", getpid());
+
+  if ((proc_fd = open(buf, O_RDONLY)) == -1) {
+    close(proc_fd);
+    return result;
+  }
+
+  /* Get the number of map entries.  */
+  if (devctl(proc_fd, DCMD_PROC_MAPINFO, NULL, 0, &num) != EOK) {
+    close(proc_fd);
+    return result;
+  }
+
+  mapinfos = reinterpret_cast<procfs_mapinfo *>(
+      malloc(num * sizeof(procfs_mapinfo)));
+  if (mapinfos == NULL) {
+    close(proc_fd);
+    return result;
+  }
+
+  /* Fill the map entries.  */
+  if (devctl(proc_fd, DCMD_PROC_PAGEDATA,
+      mapinfos, num * sizeof(procfs_mapinfo), &num) != EOK) {
+    free(mapinfos);
+    close(proc_fd);
+    return result;
+  }
+
+  for (i = 0; i < num; i++) {
+    mapinfo = mapinfos + i;
+    if (mapinfo->flags & MAP_ELF) {
+      map.info.vaddr = mapinfo->vaddr;
+      if (devctl(proc_fd, DCMD_PROC_MAPDEBUG, &map, sizeof(map), 0) != EOK) {
+        continue;
+      }
+      result.push_back(SharedLibraryAddress(
+          map.info.path, mapinfo->vaddr, mapinfo->vaddr + mapinfo->size));
+    }
+  }
+  free(mapinfos);
+  close(proc_fd);
+  return result;
+}
+
+
+void OS::SignalCodeMovingGC() {
+}
+
+
+// Constants used for mmap.
+static const int kMmapFd = -1;
+static const int kMmapFdOffset = 0;
+
+
+VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
+
+
+VirtualMemory::VirtualMemory(size_t size)
+    : address_(ReserveRegion(size)), size_(size) { }
+
+
+VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+    : address_(NULL), size_(0) {
+  DCHECK((alignment % OS::AllocateAlignment()) == 0);
+  size_t request_size = RoundUp(size + alignment,
+                                static_cast<intptr_t>(OS::AllocateAlignment()));
+  void* reservation = mmap(OS::GetRandomMmapAddr(),
+                           request_size,
+                           PROT_NONE,
+                           MAP_PRIVATE | MAP_ANONYMOUS | MAP_LAZY,
+                           kMmapFd,
+                           kMmapFdOffset);
+  if (reservation == MAP_FAILED) return;
+
+  uint8_t* base = static_cast<uint8_t*>(reservation);
+  uint8_t* aligned_base = RoundUp(base, alignment);
+  DCHECK_LE(base, aligned_base);
+
+  // Unmap extra memory reserved before and after the desired block.
+  if (aligned_base != base) {
+    size_t prefix_size = static_cast<size_t>(aligned_base - base);
+    OS::Free(base, prefix_size);
+    request_size -= prefix_size;
+  }
+
+  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
+  DCHECK_LE(aligned_size, request_size);
+
+  if (aligned_size != request_size) {
+    size_t suffix_size = request_size - aligned_size;
+    OS::Free(aligned_base + aligned_size, suffix_size);
+    request_size -= suffix_size;
+  }
+
+  DCHECK(aligned_size == request_size);
+
+  address_ = static_cast<void*>(aligned_base);
+  size_ = aligned_size;
+}
+
+
+VirtualMemory::~VirtualMemory() {
+  if (IsReserved()) {
+    bool result = ReleaseRegion(address(), size());
+    DCHECK(result);
+    USE(result);
+  }
+}
+
+
+bool VirtualMemory::IsReserved() {
+  return address_ != NULL;
+}
+
+
+void VirtualMemory::Reset() {
+  address_ = NULL;
+  size_ = 0;
+}
+
+
+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+  return CommitRegion(address, size, is_executable);
+}
+
+
+bool VirtualMemory::Uncommit(void* address, size_t size) {
+  return UncommitRegion(address, size);
+}
+
+
+bool VirtualMemory::Guard(void* address) {
+  OS::Guard(address, OS::CommitPageSize());
+  return true;
+}
+
+
+void* VirtualMemory::ReserveRegion(size_t size) {
+  void* result = mmap(OS::GetRandomMmapAddr(),
+                      size,
+                      PROT_NONE,
+                      MAP_PRIVATE | MAP_ANONYMOUS | MAP_LAZY,
+                      kMmapFd,
+                      kMmapFdOffset);
+
+  if (result == MAP_FAILED) return NULL;
+
+  return result;
+}
+
+
+bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
+  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+  if (MAP_FAILED == mmap(base,
+                         size,
+                         prot,
+                         MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
+                         kMmapFd,
+                         kMmapFdOffset)) {
+    return false;
+  }
+
+  return true;
+}
+
+
+bool VirtualMemory::UncommitRegion(void* base, size_t size) {
+  return mmap(base,
+              size,
+              PROT_NONE,
+              MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED | MAP_LAZY,
+              kMmapFd,
+              kMmapFdOffset) != MAP_FAILED;
+}
+
+
+bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
+  return munmap(base, size) == 0;
+}
+
+
+bool VirtualMemory::HasLazyCommits() {
+  return false;
+}
+
+} }  // namespace v8::base
diff --git a/src/base/platform/platform-solaris.cc b/src/base/platform/platform-solaris.cc
new file mode 100644
index 0000000..b9ef465
--- /dev/null
+++ b/src/base/platform/platform-solaris.cc
@@ -0,0 +1,279 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Platform-specific code for Solaris 10 goes here. For the POSIX-compatible
+// parts, the implementation is in platform-posix.cc.
+
+#ifdef __sparc
+# error "V8 does not support the SPARC CPU architecture."
+#endif
+
+#include <dlfcn.h>  // dladdr
+#include <errno.h>
+#include <ieeefp.h>  // finite()
+#include <pthread.h>
+#include <semaphore.h>
+#include <signal.h>  // sigemptyset(), etc
+#include <sys/mman.h>  // mmap()
+#include <sys/regset.h>
+#include <sys/stack.h>  // for stack alignment
+#include <sys/time.h>  // gettimeofday(), timeradd()
+#include <time.h>
+#include <ucontext.h>  // walkstack(), getcontext()
+#include <unistd.h>  // getpagesize(), usleep()
+
+#include <cmath>
+
+#undef MAP_TYPE
+
+#include "src/base/macros.h"
+#include "src/base/platform/platform.h"
+
+
+// It seems there is a bug in some Solaris distributions (experienced in
+// SunOS 5.10 Generic_141445-09) which make it difficult or impossible to
+// access signbit() despite the availability of other C99 math functions.
+#ifndef signbit
+namespace std {
+// Test sign - usually defined in math.h
+int signbit(double x) {
+  // We need to take care of the special case of both positive and negative
+  // versions of zero.
+  if (x == 0) {
+    return fpclass(x) & FP_NZERO;
+  } else {
+    // This won't detect negative NaN but that should be okay since we don't
+    // assume that behavior.
+    return x < 0;
+  }
+}
+}  // namespace std
+#endif  // signbit
+
+namespace v8 {
+namespace base {
+
+
+const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
+  if (std::isnan(time)) return "";
+  time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
+  struct tm* t = localtime(&tv);
+  if (NULL == t) return "";
+  return tzname[0];  // The location of the timezone string on Solaris.
+}
+
+
+double OS::LocalTimeOffset(TimezoneCache* cache) {
+  tzset();
+  return -static_cast<double>(timezone * msPerSecond);
+}
+
+
+void* OS::Allocate(const size_t requested,
+                   size_t* allocated,
+                   bool is_executable) {
+  const size_t msize = RoundUp(requested, getpagesize());
+  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+  void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
+
+  if (mbase == MAP_FAILED) return NULL;
+  *allocated = msize;
+  return mbase;
+}
+
+
+class PosixMemoryMappedFile : public OS::MemoryMappedFile {
+ public:
+  PosixMemoryMappedFile(FILE* file, void* memory, int size)
+    : file_(file), memory_(memory), size_(size) { }
+  virtual ~PosixMemoryMappedFile();
+  virtual void* memory() { return memory_; }
+  virtual int size() { return size_; }
+ private:
+  FILE* file_;
+  void* memory_;
+  int size_;
+};
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
+  FILE* file = fopen(name, "r+");
+  if (file == NULL) return NULL;
+
+  fseek(file, 0, SEEK_END);
+  int size = ftell(file);
+
+  void* memory =
+      mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
+  return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
+    void* initial) {
+  FILE* file = fopen(name, "w+");
+  if (file == NULL) return NULL;
+  int result = fwrite(initial, size, 1, file);
+  if (result < 1) {
+    fclose(file);
+    return NULL;
+  }
+  void* memory =
+      mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
+  return new PosixMemoryMappedFile(file, memory, size);
+}
+
+
+PosixMemoryMappedFile::~PosixMemoryMappedFile() {
+  if (memory_) munmap(memory_, size_);
+  fclose(file_);
+}
+
+
+std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
+  return std::vector<SharedLibraryAddress>();
+}
+
+
+void OS::SignalCodeMovingGC() {
+}
+
+
+// Constants used for mmap.
+static const int kMmapFd = -1;
+static const int kMmapFdOffset = 0;
+
+
+VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
+
+
+VirtualMemory::VirtualMemory(size_t size)
+    : address_(ReserveRegion(size)), size_(size) { }
+
+
+VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+    : address_(NULL), size_(0) {
+  DCHECK((alignment % OS::AllocateAlignment()) == 0);
+  size_t request_size = RoundUp(size + alignment,
+                                static_cast<intptr_t>(OS::AllocateAlignment()));
+  void* reservation = mmap(OS::GetRandomMmapAddr(),
+                           request_size,
+                           PROT_NONE,
+                           MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
+                           kMmapFd,
+                           kMmapFdOffset);
+  if (reservation == MAP_FAILED) return;
+
+  uint8_t* base = static_cast<uint8_t*>(reservation);
+  uint8_t* aligned_base = RoundUp(base, alignment);
+  DCHECK_LE(base, aligned_base);
+
+  // Unmap extra memory reserved before and after the desired block.
+  if (aligned_base != base) {
+    size_t prefix_size = static_cast<size_t>(aligned_base - base);
+    OS::Free(base, prefix_size);
+    request_size -= prefix_size;
+  }
+
+  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
+  DCHECK_LE(aligned_size, request_size);
+
+  if (aligned_size != request_size) {
+    size_t suffix_size = request_size - aligned_size;
+    OS::Free(aligned_base + aligned_size, suffix_size);
+    request_size -= suffix_size;
+  }
+
+  DCHECK(aligned_size == request_size);
+
+  address_ = static_cast<void*>(aligned_base);
+  size_ = aligned_size;
+}
+
+
+VirtualMemory::~VirtualMemory() {
+  if (IsReserved()) {
+    bool result = ReleaseRegion(address(), size());
+    DCHECK(result);
+    USE(result);
+  }
+}
+
+
+bool VirtualMemory::IsReserved() {
+  return address_ != NULL;
+}
+
+
+void VirtualMemory::Reset() {
+  address_ = NULL;
+  size_ = 0;
+}
+
+
+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+  return CommitRegion(address, size, is_executable);
+}
+
+
+bool VirtualMemory::Uncommit(void* address, size_t size) {
+  return UncommitRegion(address, size);
+}
+
+
+bool VirtualMemory::Guard(void* address) {
+  OS::Guard(address, OS::CommitPageSize());
+  return true;
+}
+
+
+void* VirtualMemory::ReserveRegion(size_t size) {
+  void* result = mmap(OS::GetRandomMmapAddr(),
+                      size,
+                      PROT_NONE,
+                      MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
+                      kMmapFd,
+                      kMmapFdOffset);
+
+  if (result == MAP_FAILED) return NULL;
+
+  return result;
+}
+
+
+bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
+  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
+  if (MAP_FAILED == mmap(base,
+                         size,
+                         prot,
+                         MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
+                         kMmapFd,
+                         kMmapFdOffset)) {
+    return false;
+  }
+  return true;
+}
+
+
+bool VirtualMemory::UncommitRegion(void* base, size_t size) {
+  return mmap(base,
+              size,
+              PROT_NONE,
+              MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED,
+              kMmapFd,
+              kMmapFdOffset) != MAP_FAILED;
+}
+
+
+bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
+  return munmap(base, size) == 0;
+}
+
+
+bool VirtualMemory::HasLazyCommits() {
+  // TODO(alph): implement for the platform.
+  return false;
+}
+
+} }  // namespace v8::base
diff --git a/src/base/platform/platform-unittest.cc b/src/base/platform/platform-unittest.cc
new file mode 100644
index 0000000..06fbee0
--- /dev/null
+++ b/src/base/platform/platform-unittest.cc
@@ -0,0 +1,110 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/platform/platform.h"
+
+#if V8_OS_POSIX
+#include <unistd.h>  // NOLINT
+#endif
+
+#if V8_OS_WIN
+#include "src/base/win32-headers.h"
+#endif
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace base {
+
+TEST(OS, GetCurrentProcessId) {
+#if V8_OS_POSIX
+  EXPECT_EQ(static_cast<int>(getpid()), OS::GetCurrentProcessId());
+#endif
+
+#if V8_OS_WIN
+  EXPECT_EQ(static_cast<int>(::GetCurrentProcessId()),
+            OS::GetCurrentProcessId());
+#endif
+}
+
+
+namespace {
+
+class SelfJoinThread FINAL : public Thread {
+ public:
+  SelfJoinThread() : Thread(Options("SelfJoinThread")) {}
+  virtual void Run() OVERRIDE { Join(); }
+};
+
+}  // namespace
+
+
+TEST(Thread, SelfJoin) {
+  SelfJoinThread thread;
+  thread.Start();
+  thread.Join();
+}
+
+
+namespace {
+
+class ThreadLocalStorageTest : public Thread, public ::testing::Test {
+ public:
+  ThreadLocalStorageTest() : Thread(Options("ThreadLocalStorageTest")) {
+    for (size_t i = 0; i < arraysize(keys_); ++i) {
+      keys_[i] = Thread::CreateThreadLocalKey();
+    }
+  }
+  ~ThreadLocalStorageTest() {
+    for (size_t i = 0; i < arraysize(keys_); ++i) {
+      Thread::DeleteThreadLocalKey(keys_[i]);
+    }
+  }
+
+  virtual void Run() FINAL OVERRIDE {
+    for (size_t i = 0; i < arraysize(keys_); i++) {
+      CHECK(!Thread::HasThreadLocal(keys_[i]));
+    }
+    for (size_t i = 0; i < arraysize(keys_); i++) {
+      Thread::SetThreadLocal(keys_[i], GetValue(i));
+    }
+    for (size_t i = 0; i < arraysize(keys_); i++) {
+      CHECK(Thread::HasThreadLocal(keys_[i]));
+    }
+    for (size_t i = 0; i < arraysize(keys_); i++) {
+      CHECK_EQ(GetValue(i), Thread::GetThreadLocal(keys_[i]));
+      CHECK_EQ(GetValue(i), Thread::GetExistingThreadLocal(keys_[i]));
+    }
+    for (size_t i = 0; i < arraysize(keys_); i++) {
+      Thread::SetThreadLocal(keys_[i], GetValue(arraysize(keys_) - i - 1));
+    }
+    for (size_t i = 0; i < arraysize(keys_); i++) {
+      CHECK(Thread::HasThreadLocal(keys_[i]));
+    }
+    for (size_t i = 0; i < arraysize(keys_); i++) {
+      CHECK_EQ(GetValue(arraysize(keys_) - i - 1),
+               Thread::GetThreadLocal(keys_[i]));
+      CHECK_EQ(GetValue(arraysize(keys_) - i - 1),
+               Thread::GetExistingThreadLocal(keys_[i]));
+    }
+  }
+
+ private:
+  static void* GetValue(size_t x) {
+    return reinterpret_cast<void*>(static_cast<uintptr_t>(x + 1));
+  }
+
+  Thread::LocalStorageKey keys_[256];
+};
+
+}  // namespace
+
+
+TEST_F(ThreadLocalStorageTest, DoTest) {
+  Run();
+  Start();
+  Join();
+}
+
+}  // namespace base
+}  // namespace v8
diff --git a/src/base/platform/platform-win32.cc b/src/base/platform/platform-win32.cc
new file mode 100644
index 0000000..10f89de
--- /dev/null
+++ b/src/base/platform/platform-win32.cc
@@ -0,0 +1,1406 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Platform-specific code for Win32.
+
+// Secure API functions are not available using MinGW with msvcrt.dll
+// on Windows XP. Make sure MINGW_HAS_SECURE_API is not defined to
+// disable definition of secure API functions in standard headers that
+// would conflict with our own implementation.
+#ifdef __MINGW32__
+#include <_mingw.h>
+#ifdef MINGW_HAS_SECURE_API
+#undef MINGW_HAS_SECURE_API
+#endif  // MINGW_HAS_SECURE_API
+#endif  // __MINGW32__
+
+#ifdef _MSC_VER
+#include <limits>
+#endif
+
+#include "src/base/win32-headers.h"
+
+#include "src/base/bits.h"
+#include "src/base/lazy-instance.h"
+#include "src/base/macros.h"
+#include "src/base/platform/platform.h"
+#include "src/base/platform/time.h"
+#include "src/base/utils/random-number-generator.h"
+
+#ifdef _MSC_VER
+
+// Case-insensitive bounded string comparisons. Use stricmp() on Win32. Usually
+// defined in strings.h.
+int strncasecmp(const char* s1, const char* s2, int n) {
+  return _strnicmp(s1, s2, n);
+}
+
+#endif  // _MSC_VER
+
+
+// Extra functions for MinGW. Most of these are the _s functions which are in
+// the Microsoft Visual Studio C++ CRT.
+#ifdef __MINGW32__
+
+
+#ifndef __MINGW64_VERSION_MAJOR
+
+#define _TRUNCATE 0
+#define STRUNCATE 80
+
+inline void MemoryBarrier() {
+  int barrier = 0;
+  __asm__ __volatile__("xchgl %%eax,%0 ":"=r" (barrier));
+}
+
+#endif  // __MINGW64_VERSION_MAJOR
+
+
+int localtime_s(tm* out_tm, const time_t* time) {
+  tm* posix_local_time_struct = localtime(time);
+  if (posix_local_time_struct == NULL) return 1;
+  *out_tm = *posix_local_time_struct;
+  return 0;
+}
+
+
+int fopen_s(FILE** pFile, const char* filename, const char* mode) {
+  *pFile = fopen(filename, mode);
+  return *pFile != NULL ? 0 : 1;
+}
+
+int _vsnprintf_s(char* buffer, size_t sizeOfBuffer, size_t count,
+                 const char* format, va_list argptr) {
+  DCHECK(count == _TRUNCATE);
+  return _vsnprintf(buffer, sizeOfBuffer, format, argptr);
+}
+
+
+int strncpy_s(char* dest, size_t dest_size, const char* source, size_t count) {
+  CHECK(source != NULL);
+  CHECK(dest != NULL);
+  CHECK_GT(dest_size, 0);
+
+  if (count == _TRUNCATE) {
+    while (dest_size > 0 && *source != 0) {
+      *(dest++) = *(source++);
+      --dest_size;
+    }
+    if (dest_size == 0) {
+      *(dest - 1) = 0;
+      return STRUNCATE;
+    }
+  } else {
+    while (dest_size > 0 && count > 0 && *source != 0) {
+      *(dest++) = *(source++);
+      --dest_size;
+      --count;
+    }
+  }
+  CHECK_GT(dest_size, 0);
+  *dest = 0;
+  return 0;
+}
+
+#endif  // __MINGW32__
+
+namespace v8 {
+namespace base {
+
+namespace {
+
+bool g_hard_abort = false;
+
+}  // namespace
+
+class TimezoneCache {
+ public:
+  TimezoneCache() : initialized_(false) { }
+
+  void Clear() {
+    initialized_ = false;
+  }
+
+  // Initialize timezone information. The timezone information is obtained from
+  // windows. If we cannot get the timezone information we fall back to CET.
+  void InitializeIfNeeded() {
+    // Just return if timezone information has already been initialized.
+    if (initialized_) return;
+
+    // Initialize POSIX time zone data.
+    _tzset();
+    // Obtain timezone information from operating system.
+    memset(&tzinfo_, 0, sizeof(tzinfo_));
+    if (GetTimeZoneInformation(&tzinfo_) == TIME_ZONE_ID_INVALID) {
+      // If we cannot get timezone information we fall back to CET.
+      tzinfo_.Bias = -60;
+      tzinfo_.StandardDate.wMonth = 10;
+      tzinfo_.StandardDate.wDay = 5;
+      tzinfo_.StandardDate.wHour = 3;
+      tzinfo_.StandardBias = 0;
+      tzinfo_.DaylightDate.wMonth = 3;
+      tzinfo_.DaylightDate.wDay = 5;
+      tzinfo_.DaylightDate.wHour = 2;
+      tzinfo_.DaylightBias = -60;
+    }
+
+    // Make standard and DST timezone names.
+    WideCharToMultiByte(CP_UTF8, 0, tzinfo_.StandardName, -1,
+                        std_tz_name_, kTzNameSize, NULL, NULL);
+    std_tz_name_[kTzNameSize - 1] = '\0';
+    WideCharToMultiByte(CP_UTF8, 0, tzinfo_.DaylightName, -1,
+                        dst_tz_name_, kTzNameSize, NULL, NULL);
+    dst_tz_name_[kTzNameSize - 1] = '\0';
+
+    // If OS returned empty string or resource id (like "@tzres.dll,-211")
+    // simply guess the name from the UTC bias of the timezone.
+    // To properly resolve the resource identifier requires a library load,
+    // which is not possible in a sandbox.
+    if (std_tz_name_[0] == '\0' || std_tz_name_[0] == '@') {
+      OS::SNPrintF(std_tz_name_, kTzNameSize - 1,
+                   "%s Standard Time",
+                   GuessTimezoneNameFromBias(tzinfo_.Bias));
+    }
+    if (dst_tz_name_[0] == '\0' || dst_tz_name_[0] == '@') {
+      OS::SNPrintF(dst_tz_name_, kTzNameSize - 1,
+                   "%s Daylight Time",
+                   GuessTimezoneNameFromBias(tzinfo_.Bias));
+    }
+    // Timezone information initialized.
+    initialized_ = true;
+  }
+
+  // Guess the name of the timezone from the bias.
+  // The guess is very biased towards the northern hemisphere.
+  const char* GuessTimezoneNameFromBias(int bias) {
+    static const int kHour = 60;
+    switch (-bias) {
+      case -9*kHour: return "Alaska";
+      case -8*kHour: return "Pacific";
+      case -7*kHour: return "Mountain";
+      case -6*kHour: return "Central";
+      case -5*kHour: return "Eastern";
+      case -4*kHour: return "Atlantic";
+      case  0*kHour: return "GMT";
+      case +1*kHour: return "Central Europe";
+      case +2*kHour: return "Eastern Europe";
+      case +3*kHour: return "Russia";
+      case +5*kHour + 30: return "India";
+      case +8*kHour: return "China";
+      case +9*kHour: return "Japan";
+      case +12*kHour: return "New Zealand";
+      default: return "Local";
+    }
+  }
+
+
+ private:
+  static const int kTzNameSize = 128;
+  bool initialized_;
+  char std_tz_name_[kTzNameSize];
+  char dst_tz_name_[kTzNameSize];
+  TIME_ZONE_INFORMATION tzinfo_;
+  friend class Win32Time;
+};
+
+
+// ----------------------------------------------------------------------------
+// The Time class represents time on win32. A timestamp is represented as
+// a 64-bit integer in 100 nanoseconds since January 1, 1601 (UTC). JavaScript
+// timestamps are represented as a doubles in milliseconds since 00:00:00 UTC,
+// January 1, 1970.
+
+class Win32Time {
+ public:
+  // Constructors.
+  Win32Time();
+  explicit Win32Time(double jstime);
+  Win32Time(int year, int mon, int day, int hour, int min, int sec);
+
+  // Convert timestamp to JavaScript representation.
+  double ToJSTime();
+
+  // Set timestamp to current time.
+  void SetToCurrentTime();
+
+  // Returns the local timezone offset in milliseconds east of UTC. This is
+  // the number of milliseconds you must add to UTC to get local time, i.e.
+  // LocalOffset(CET) = 3600000 and LocalOffset(PST) = -28800000. This
+  // routine also takes into account whether daylight saving is effect
+  // at the time.
+  int64_t LocalOffset(TimezoneCache* cache);
+
+  // Returns the daylight savings time offset for the time in milliseconds.
+  int64_t DaylightSavingsOffset(TimezoneCache* cache);
+
+  // Returns a string identifying the current timezone for the
+  // timestamp taking into account daylight saving.
+  char* LocalTimezone(TimezoneCache* cache);
+
+ private:
+  // Constants for time conversion.
+  static const int64_t kTimeEpoc = 116444736000000000LL;
+  static const int64_t kTimeScaler = 10000;
+  static const int64_t kMsPerMinute = 60000;
+
+  // Constants for timezone information.
+  static const bool kShortTzNames = false;
+
+  // Return whether or not daylight savings time is in effect at this time.
+  bool InDST(TimezoneCache* cache);
+
+  // Accessor for FILETIME representation.
+  FILETIME& ft() { return time_.ft_; }
+
+  // Accessor for integer representation.
+  int64_t& t() { return time_.t_; }
+
+  // Although win32 uses 64-bit integers for representing timestamps,
+  // these are packed into a FILETIME structure. The FILETIME structure
+  // is just a struct representing a 64-bit integer. The TimeStamp union
+  // allows access to both a FILETIME and an integer representation of
+  // the timestamp.
+  union TimeStamp {
+    FILETIME ft_;
+    int64_t t_;
+  };
+
+  TimeStamp time_;
+};
+
+
+// Initialize timestamp to start of epoc.
+Win32Time::Win32Time() {
+  t() = 0;
+}
+
+
+// Initialize timestamp from a JavaScript timestamp.
+Win32Time::Win32Time(double jstime) {
+  t() = static_cast<int64_t>(jstime) * kTimeScaler + kTimeEpoc;
+}
+
+
+// Initialize timestamp from date/time components.
+Win32Time::Win32Time(int year, int mon, int day, int hour, int min, int sec) {
+  SYSTEMTIME st;
+  st.wYear = year;
+  st.wMonth = mon;
+  st.wDay = day;
+  st.wHour = hour;
+  st.wMinute = min;
+  st.wSecond = sec;
+  st.wMilliseconds = 0;
+  SystemTimeToFileTime(&st, &ft());
+}
+
+
+// Convert timestamp to JavaScript timestamp.
+double Win32Time::ToJSTime() {
+  return static_cast<double>((t() - kTimeEpoc) / kTimeScaler);
+}
+
+
+// Set timestamp to current time.
+void Win32Time::SetToCurrentTime() {
+  // The default GetSystemTimeAsFileTime has a ~15.5ms resolution.
+  // Because we're fast, we like fast timers which have at least a
+  // 1ms resolution.
+  //
+  // timeGetTime() provides 1ms granularity when combined with
+  // timeBeginPeriod().  If the host application for v8 wants fast
+  // timers, it can use timeBeginPeriod to increase the resolution.
+  //
+  // Using timeGetTime() has a drawback because it is a 32bit value
+  // and hence rolls-over every ~49days.
+  //
+  // To use the clock, we use GetSystemTimeAsFileTime as our base;
+  // and then use timeGetTime to extrapolate current time from the
+  // start time.  To deal with rollovers, we resync the clock
+  // any time when more than kMaxClockElapsedTime has passed or
+  // whenever timeGetTime creates a rollover.
+
+  static bool initialized = false;
+  static TimeStamp init_time;
+  static DWORD init_ticks;
+  static const int64_t kHundredNanosecondsPerSecond = 10000000;
+  static const int64_t kMaxClockElapsedTime =
+      60*kHundredNanosecondsPerSecond;  // 1 minute
+
+  // If we are uninitialized, we need to resync the clock.
+  bool needs_resync = !initialized;
+
+  // Get the current time.
+  TimeStamp time_now;
+  GetSystemTimeAsFileTime(&time_now.ft_);
+  DWORD ticks_now = timeGetTime();
+
+  // Check if we need to resync due to clock rollover.
+  needs_resync |= ticks_now < init_ticks;
+
+  // Check if we need to resync due to elapsed time.
+  needs_resync |= (time_now.t_ - init_time.t_) > kMaxClockElapsedTime;
+
+  // Check if we need to resync due to backwards time change.
+  needs_resync |= time_now.t_ < init_time.t_;
+
+  // Resync the clock if necessary.
+  if (needs_resync) {
+    GetSystemTimeAsFileTime(&init_time.ft_);
+    init_ticks = ticks_now = timeGetTime();
+    initialized = true;
+  }
+
+  // Finally, compute the actual time.  Why is this so hard.
+  DWORD elapsed = ticks_now - init_ticks;
+  this->time_.t_ = init_time.t_ + (static_cast<int64_t>(elapsed) * 10000);
+}
+
+
+// Return the local timezone offset in milliseconds east of UTC. This
+// takes into account whether daylight saving is in effect at the time.
+// Only times in the 32-bit Unix range may be passed to this function.
+// Also, adding the time-zone offset to the input must not overflow.
+// The function EquivalentTime() in date.js guarantees this.
+int64_t Win32Time::LocalOffset(TimezoneCache* cache) {
+  cache->InitializeIfNeeded();
+
+  Win32Time rounded_to_second(*this);
+  rounded_to_second.t() = rounded_to_second.t() / 1000 / kTimeScaler *
+      1000 * kTimeScaler;
+  // Convert to local time using POSIX localtime function.
+  // Windows XP Service Pack 3 made SystemTimeToTzSpecificLocalTime()
+  // very slow.  Other browsers use localtime().
+
+  // Convert from JavaScript milliseconds past 1/1/1970 0:00:00 to
+  // POSIX seconds past 1/1/1970 0:00:00.
+  double unchecked_posix_time = rounded_to_second.ToJSTime() / 1000;
+  if (unchecked_posix_time > INT_MAX || unchecked_posix_time < 0) {
+    return 0;
+  }
+  // Because _USE_32BIT_TIME_T is defined, time_t is a 32-bit int.
+  time_t posix_time = static_cast<time_t>(unchecked_posix_time);
+
+  // Convert to local time, as struct with fields for day, hour, year, etc.
+  tm posix_local_time_struct;
+  if (localtime_s(&posix_local_time_struct, &posix_time)) return 0;
+
+  if (posix_local_time_struct.tm_isdst > 0) {
+    return (cache->tzinfo_.Bias + cache->tzinfo_.DaylightBias) * -kMsPerMinute;
+  } else if (posix_local_time_struct.tm_isdst == 0) {
+    return (cache->tzinfo_.Bias + cache->tzinfo_.StandardBias) * -kMsPerMinute;
+  } else {
+    return cache->tzinfo_.Bias * -kMsPerMinute;
+  }
+}
+
+
+// Return whether or not daylight savings time is in effect at this time.
+bool Win32Time::InDST(TimezoneCache* cache) {
+  cache->InitializeIfNeeded();
+
+  // Determine if DST is in effect at the specified time.
+  bool in_dst = false;
+  if (cache->tzinfo_.StandardDate.wMonth != 0 ||
+      cache->tzinfo_.DaylightDate.wMonth != 0) {
+    // Get the local timezone offset for the timestamp in milliseconds.
+    int64_t offset = LocalOffset(cache);
+
+    // Compute the offset for DST. The bias parameters in the timezone info
+    // are specified in minutes. These must be converted to milliseconds.
+    int64_t dstofs =
+        -(cache->tzinfo_.Bias + cache->tzinfo_.DaylightBias) * kMsPerMinute;
+
+    // If the local time offset equals the timezone bias plus the daylight
+    // bias then DST is in effect.
+    in_dst = offset == dstofs;
+  }
+
+  return in_dst;
+}
+
+
+// Return the daylight savings time offset for this time.
+int64_t Win32Time::DaylightSavingsOffset(TimezoneCache* cache) {
+  return InDST(cache) ? 60 * kMsPerMinute : 0;
+}
+
+
+// Returns a string identifying the current timezone for the
+// timestamp taking into account daylight saving.
+char* Win32Time::LocalTimezone(TimezoneCache* cache) {
+  // Return the standard or DST time zone name based on whether daylight
+  // saving is in effect at the given time.
+  return InDST(cache) ? cache->dst_tz_name_ : cache->std_tz_name_;
+}
+
+
+// Returns the accumulated user time for thread.
+int OS::GetUserTime(uint32_t* secs,  uint32_t* usecs) {
+  FILETIME dummy;
+  uint64_t usertime;
+
+  // Get the amount of time that the thread has executed in user mode.
+  if (!GetThreadTimes(GetCurrentThread(), &dummy, &dummy, &dummy,
+                      reinterpret_cast<FILETIME*>(&usertime))) return -1;
+
+  // Adjust the resolution to micro-seconds.
+  usertime /= 10;
+
+  // Convert to seconds and microseconds
+  *secs = static_cast<uint32_t>(usertime / 1000000);
+  *usecs = static_cast<uint32_t>(usertime % 1000000);
+  return 0;
+}
+
+
+// Returns current time as the number of milliseconds since
+// 00:00:00 UTC, January 1, 1970.
+double OS::TimeCurrentMillis() {
+  return Time::Now().ToJsTime();
+}
+
+
+TimezoneCache* OS::CreateTimezoneCache() {
+  return new TimezoneCache();
+}
+
+
+void OS::DisposeTimezoneCache(TimezoneCache* cache) {
+  delete cache;
+}
+
+
+void OS::ClearTimezoneCache(TimezoneCache* cache) {
+  cache->Clear();
+}
+
+
+// Returns a string identifying the current timezone taking into
+// account daylight saving.
+const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
+  return Win32Time(time).LocalTimezone(cache);
+}
+
+
+// Returns the local time offset in milliseconds east of UTC without
+// taking daylight savings time into account.
+double OS::LocalTimeOffset(TimezoneCache* cache) {
+  // Use current time, rounded to the millisecond.
+  Win32Time t(TimeCurrentMillis());
+  // Time::LocalOffset inlcudes any daylight savings offset, so subtract it.
+  return static_cast<double>(t.LocalOffset(cache) -
+                             t.DaylightSavingsOffset(cache));
+}
+
+
+// Returns the daylight savings offset in milliseconds for the given
+// time.
+double OS::DaylightSavingsOffset(double time, TimezoneCache* cache) {
+  int64_t offset = Win32Time(time).DaylightSavingsOffset(cache);
+  return static_cast<double>(offset);
+}
+
+
+int OS::GetLastError() {
+  return ::GetLastError();
+}
+
+
+int OS::GetCurrentProcessId() {
+  return static_cast<int>(::GetCurrentProcessId());
+}
+
+
+int OS::GetCurrentThreadId() {
+  return static_cast<int>(::GetCurrentThreadId());
+}
+
+
+// ----------------------------------------------------------------------------
+// Win32 console output.
+//
+// If a Win32 application is linked as a console application it has a normal
+// standard output and standard error. In this case normal printf works fine
+// for output. However, if the application is linked as a GUI application,
+// the process doesn't have a console, and therefore (debugging) output is lost.
+// This is the case if we are embedded in a windows program (like a browser).
+// In order to be able to get debug output in this case the the debugging
+// facility using OutputDebugString. This output goes to the active debugger
+// for the process (if any). Else the output can be monitored using DBMON.EXE.
+
+enum OutputMode {
+  UNKNOWN,  // Output method has not yet been determined.
+  CONSOLE,  // Output is written to stdout.
+  ODS       // Output is written to debug facility.
+};
+
+static OutputMode output_mode = UNKNOWN;  // Current output mode.
+
+
+// Determine if the process has a console for output.
+static bool HasConsole() {
+  // Only check the first time. Eventual race conditions are not a problem,
+  // because all threads will eventually determine the same mode.
+  if (output_mode == UNKNOWN) {
+    // We cannot just check that the standard output is attached to a console
+    // because this would fail if output is redirected to a file. Therefore we
+    // say that a process does not have an output console if either the
+    // standard output handle is invalid or its file type is unknown.
+    if (GetStdHandle(STD_OUTPUT_HANDLE) != INVALID_HANDLE_VALUE &&
+        GetFileType(GetStdHandle(STD_OUTPUT_HANDLE)) != FILE_TYPE_UNKNOWN)
+      output_mode = CONSOLE;
+    else
+      output_mode = ODS;
+  }
+  return output_mode == CONSOLE;
+}
+
+
+static void VPrintHelper(FILE* stream, const char* format, va_list args) {
+  if ((stream == stdout || stream == stderr) && !HasConsole()) {
+    // It is important to use safe print here in order to avoid
+    // overflowing the buffer. We might truncate the output, but this
+    // does not crash.
+    char buffer[4096];
+    OS::VSNPrintF(buffer, sizeof(buffer), format, args);
+    OutputDebugStringA(buffer);
+  } else {
+    vfprintf(stream, format, args);
+  }
+}
+
+
+FILE* OS::FOpen(const char* path, const char* mode) {
+  FILE* result;
+  if (fopen_s(&result, path, mode) == 0) {
+    return result;
+  } else {
+    return NULL;
+  }
+}
+
+
+bool OS::Remove(const char* path) {
+  return (DeleteFileA(path) != 0);
+}
+
+
+FILE* OS::OpenTemporaryFile() {
+  // tmpfile_s tries to use the root dir, don't use it.
+  char tempPathBuffer[MAX_PATH];
+  DWORD path_result = 0;
+  path_result = GetTempPathA(MAX_PATH, tempPathBuffer);
+  if (path_result > MAX_PATH || path_result == 0) return NULL;
+  UINT name_result = 0;
+  char tempNameBuffer[MAX_PATH];
+  name_result = GetTempFileNameA(tempPathBuffer, "", 0, tempNameBuffer);
+  if (name_result == 0) return NULL;
+  FILE* result = FOpen(tempNameBuffer, "w+");  // Same mode as tmpfile uses.
+  if (result != NULL) {
+    Remove(tempNameBuffer);  // Delete on close.
+  }
+  return result;
+}
+
+
+// Open log file in binary mode to avoid /n -> /r/n conversion.
+const char* const OS::LogFileOpenMode = "wb";
+
+
+// Print (debug) message to console.
+void OS::Print(const char* format, ...) {
+  va_list args;
+  va_start(args, format);
+  VPrint(format, args);
+  va_end(args);
+}
+
+
+void OS::VPrint(const char* format, va_list args) {
+  VPrintHelper(stdout, format, args);
+}
+
+
+void OS::FPrint(FILE* out, const char* format, ...) {
+  va_list args;
+  va_start(args, format);
+  VFPrint(out, format, args);
+  va_end(args);
+}
+
+
+void OS::VFPrint(FILE* out, const char* format, va_list args) {
+  VPrintHelper(out, format, args);
+}
+
+
+// Print error message to console.
+void OS::PrintError(const char* format, ...) {
+  va_list args;
+  va_start(args, format);
+  VPrintError(format, args);
+  va_end(args);
+}
+
+
+void OS::VPrintError(const char* format, va_list args) {
+  VPrintHelper(stderr, format, args);
+}
+
+
+int OS::SNPrintF(char* str, int length, const char* format, ...) {
+  va_list args;
+  va_start(args, format);
+  int result = VSNPrintF(str, length, format, args);
+  va_end(args);
+  return result;
+}
+
+
+int OS::VSNPrintF(char* str, int length, const char* format, va_list args) {
+  int n = _vsnprintf_s(str, length, _TRUNCATE, format, args);
+  // Make sure to zero-terminate the string if the output was
+  // truncated or if there was an error.
+  if (n < 0 || n >= length) {
+    if (length > 0)
+      str[length - 1] = '\0';
+    return -1;
+  } else {
+    return n;
+  }
+}
+
+
+char* OS::StrChr(char* str, int c) {
+  return const_cast<char*>(strchr(str, c));
+}
+
+
+void OS::StrNCpy(char* dest, int length, const char* src, size_t n) {
+  // Use _TRUNCATE or strncpy_s crashes (by design) if buffer is too small.
+  size_t buffer_size = static_cast<size_t>(length);
+  if (n + 1 > buffer_size)  // count for trailing '\0'
+    n = _TRUNCATE;
+  int result = strncpy_s(dest, length, src, n);
+  USE(result);
+  DCHECK(result == 0 || (n == _TRUNCATE && result == STRUNCATE));
+}
+
+
+#undef _TRUNCATE
+#undef STRUNCATE
+
+
+// Get the system's page size used by VirtualAlloc() or the next power
+// of two. The reason for always returning a power of two is that the
+// rounding up in OS::Allocate expects that.
+static size_t GetPageSize() {
+  static size_t page_size = 0;
+  if (page_size == 0) {
+    SYSTEM_INFO info;
+    GetSystemInfo(&info);
+    page_size = base::bits::RoundUpToPowerOfTwo32(info.dwPageSize);
+  }
+  return page_size;
+}
+
+
+// The allocation alignment is the guaranteed alignment for
+// VirtualAlloc'ed blocks of memory.
+size_t OS::AllocateAlignment() {
+  static size_t allocate_alignment = 0;
+  if (allocate_alignment == 0) {
+    SYSTEM_INFO info;
+    GetSystemInfo(&info);
+    allocate_alignment = info.dwAllocationGranularity;
+  }
+  return allocate_alignment;
+}
+
+
+static LazyInstance<RandomNumberGenerator>::type
+    platform_random_number_generator = LAZY_INSTANCE_INITIALIZER;
+
+
+void OS::Initialize(int64_t random_seed, bool hard_abort,
+                    const char* const gc_fake_mmap) {
+  if (random_seed) {
+    platform_random_number_generator.Pointer()->SetSeed(random_seed);
+  }
+  g_hard_abort = hard_abort;
+}
+
+
+void* OS::GetRandomMmapAddr() {
+  // The address range used to randomize RWX allocations in OS::Allocate
+  // Try not to map pages into the default range that windows loads DLLs
+  // Use a multiple of 64k to prevent committing unused memory.
+  // Note: This does not guarantee RWX regions will be within the
+  // range kAllocationRandomAddressMin to kAllocationRandomAddressMax
+#ifdef V8_HOST_ARCH_64_BIT
+  static const intptr_t kAllocationRandomAddressMin = 0x0000000080000000;
+  static const intptr_t kAllocationRandomAddressMax = 0x000003FFFFFF0000;
+#else
+  static const intptr_t kAllocationRandomAddressMin = 0x04000000;
+  static const intptr_t kAllocationRandomAddressMax = 0x3FFF0000;
+#endif
+  uintptr_t address =
+      (platform_random_number_generator.Pointer()->NextInt() << kPageSizeBits) |
+      kAllocationRandomAddressMin;
+  address &= kAllocationRandomAddressMax;
+  return reinterpret_cast<void *>(address);
+}
+
+
+static void* RandomizedVirtualAlloc(size_t size, int action, int protection) {
+  LPVOID base = NULL;
+
+  if (protection == PAGE_EXECUTE_READWRITE || protection == PAGE_NOACCESS) {
+    // For exectutable pages try and randomize the allocation address
+    for (size_t attempts = 0; base == NULL && attempts < 3; ++attempts) {
+      base = VirtualAlloc(OS::GetRandomMmapAddr(), size, action, protection);
+    }
+  }
+
+  // After three attempts give up and let the OS find an address to use.
+  if (base == NULL) base = VirtualAlloc(NULL, size, action, protection);
+
+  return base;
+}
+
+
+void* OS::Allocate(const size_t requested,
+                   size_t* allocated,
+                   bool is_executable) {
+  // VirtualAlloc rounds allocated size to page size automatically.
+  size_t msize = RoundUp(requested, static_cast<int>(GetPageSize()));
+
+  // Windows XP SP2 allows Data Excution Prevention (DEP).
+  int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
+
+  LPVOID mbase = RandomizedVirtualAlloc(msize,
+                                        MEM_COMMIT | MEM_RESERVE,
+                                        prot);
+
+  if (mbase == NULL) return NULL;
+
+  DCHECK((reinterpret_cast<uintptr_t>(mbase) % OS::AllocateAlignment()) == 0);
+
+  *allocated = msize;
+  return mbase;
+}
+
+
+void OS::Free(void* address, const size_t size) {
+  // TODO(1240712): VirtualFree has a return value which is ignored here.
+  VirtualFree(address, 0, MEM_RELEASE);
+  USE(size);
+}
+
+
+intptr_t OS::CommitPageSize() {
+  return 4096;
+}
+
+
+void OS::ProtectCode(void* address, const size_t size) {
+  DWORD old_protect;
+  VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect);
+}
+
+
+void OS::Guard(void* address, const size_t size) {
+  DWORD oldprotect;
+  VirtualProtect(address, size, PAGE_NOACCESS, &oldprotect);
+}
+
+
+void OS::Sleep(int milliseconds) {
+  ::Sleep(milliseconds);
+}
+
+
+void OS::Abort() {
+  if (g_hard_abort) {
+    V8_IMMEDIATE_CRASH();
+  }
+  // Make the MSVCRT do a silent abort.
+  raise(SIGABRT);
+}
+
+
+void OS::DebugBreak() {
+#ifdef _MSC_VER
+  // To avoid Visual Studio runtime support the following code can be used
+  // instead
+  // __asm { int 3 }
+  __debugbreak();
+#else
+  ::DebugBreak();
+#endif
+}
+
+
+class Win32MemoryMappedFile : public OS::MemoryMappedFile {
+ public:
+  Win32MemoryMappedFile(HANDLE file,
+                        HANDLE file_mapping,
+                        void* memory,
+                        int size)
+      : file_(file),
+        file_mapping_(file_mapping),
+        memory_(memory),
+        size_(size) { }
+  virtual ~Win32MemoryMappedFile();
+  virtual void* memory() { return memory_; }
+  virtual int size() { return size_; }
+ private:
+  HANDLE file_;
+  HANDLE file_mapping_;
+  void* memory_;
+  int size_;
+};
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
+  // Open a physical file
+  HANDLE file = CreateFileA(name, GENERIC_READ | GENERIC_WRITE,
+      FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, OPEN_EXISTING, 0, NULL);
+  if (file == INVALID_HANDLE_VALUE) return NULL;
+
+  int size = static_cast<int>(GetFileSize(file, NULL));
+
+  // Create a file mapping for the physical file
+  HANDLE file_mapping = CreateFileMapping(file, NULL,
+      PAGE_READWRITE, 0, static_cast<DWORD>(size), NULL);
+  if (file_mapping == NULL) return NULL;
+
+  // Map a view of the file into memory
+  void* memory = MapViewOfFile(file_mapping, FILE_MAP_ALL_ACCESS, 0, 0, size);
+  return new Win32MemoryMappedFile(file, file_mapping, memory, size);
+}
+
+
+OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
+    void* initial) {
+  // Open a physical file
+  HANDLE file = CreateFileA(name, GENERIC_READ | GENERIC_WRITE,
+      FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, OPEN_ALWAYS, 0, NULL);
+  if (file == NULL) return NULL;
+  // Create a file mapping for the physical file
+  HANDLE file_mapping = CreateFileMapping(file, NULL,
+      PAGE_READWRITE, 0, static_cast<DWORD>(size), NULL);
+  if (file_mapping == NULL) return NULL;
+  // Map a view of the file into memory
+  void* memory = MapViewOfFile(file_mapping, FILE_MAP_ALL_ACCESS, 0, 0, size);
+  if (memory) memmove(memory, initial, size);
+  return new Win32MemoryMappedFile(file, file_mapping, memory, size);
+}
+
+
+Win32MemoryMappedFile::~Win32MemoryMappedFile() {
+  if (memory_ != NULL)
+    UnmapViewOfFile(memory_);
+  CloseHandle(file_mapping_);
+  CloseHandle(file_);
+}
+
+
+// The following code loads functions defined in DbhHelp.h and TlHelp32.h
+// dynamically. This is to avoid being depending on dbghelp.dll and
+// tlhelp32.dll when running (the functions in tlhelp32.dll have been moved to
+// kernel32.dll at some point so loading functions defines in TlHelp32.h
+// dynamically might not be necessary any more - for some versions of Windows?).
+
+// Function pointers to functions dynamically loaded from dbghelp.dll.
+#define DBGHELP_FUNCTION_LIST(V)  \
+  V(SymInitialize)                \
+  V(SymGetOptions)                \
+  V(SymSetOptions)                \
+  V(SymGetSearchPath)             \
+  V(SymLoadModule64)              \
+  V(StackWalk64)                  \
+  V(SymGetSymFromAddr64)          \
+  V(SymGetLineFromAddr64)         \
+  V(SymFunctionTableAccess64)     \
+  V(SymGetModuleBase64)
+
+// Function pointers to functions dynamically loaded from dbghelp.dll.
+#define TLHELP32_FUNCTION_LIST(V)  \
+  V(CreateToolhelp32Snapshot)      \
+  V(Module32FirstW)                \
+  V(Module32NextW)
+
+// Define the decoration to use for the type and variable name used for
+// dynamically loaded DLL function..
+#define DLL_FUNC_TYPE(name) _##name##_
+#define DLL_FUNC_VAR(name) _##name
+
+// Define the type for each dynamically loaded DLL function. The function
+// definitions are copied from DbgHelp.h and TlHelp32.h. The IN and VOID macros
+// from the Windows include files are redefined here to have the function
+// definitions to be as close to the ones in the original .h files as possible.
+#ifndef IN
+#define IN
+#endif
+#ifndef VOID
+#define VOID void
+#endif
+
+// DbgHelp isn't supported on MinGW yet
+#ifndef __MINGW32__
+// DbgHelp.h functions.
+typedef BOOL (__stdcall *DLL_FUNC_TYPE(SymInitialize))(IN HANDLE hProcess,
+                                                       IN PSTR UserSearchPath,
+                                                       IN BOOL fInvadeProcess);
+typedef DWORD (__stdcall *DLL_FUNC_TYPE(SymGetOptions))(VOID);
+typedef DWORD (__stdcall *DLL_FUNC_TYPE(SymSetOptions))(IN DWORD SymOptions);
+typedef BOOL (__stdcall *DLL_FUNC_TYPE(SymGetSearchPath))(
+    IN HANDLE hProcess,
+    OUT PSTR SearchPath,
+    IN DWORD SearchPathLength);
+typedef DWORD64 (__stdcall *DLL_FUNC_TYPE(SymLoadModule64))(
+    IN HANDLE hProcess,
+    IN HANDLE hFile,
+    IN PSTR ImageName,
+    IN PSTR ModuleName,
+    IN DWORD64 BaseOfDll,
+    IN DWORD SizeOfDll);
+typedef BOOL (__stdcall *DLL_FUNC_TYPE(StackWalk64))(
+    DWORD MachineType,
+    HANDLE hProcess,
+    HANDLE hThread,
+    LPSTACKFRAME64 StackFrame,
+    PVOID ContextRecord,
+    PREAD_PROCESS_MEMORY_ROUTINE64 ReadMemoryRoutine,
+    PFUNCTION_TABLE_ACCESS_ROUTINE64 FunctionTableAccessRoutine,
+    PGET_MODULE_BASE_ROUTINE64 GetModuleBaseRoutine,
+    PTRANSLATE_ADDRESS_ROUTINE64 TranslateAddress);
+typedef BOOL (__stdcall *DLL_FUNC_TYPE(SymGetSymFromAddr64))(
+    IN HANDLE hProcess,
+    IN DWORD64 qwAddr,
+    OUT PDWORD64 pdwDisplacement,
+    OUT PIMAGEHLP_SYMBOL64 Symbol);
+typedef BOOL (__stdcall *DLL_FUNC_TYPE(SymGetLineFromAddr64))(
+    IN HANDLE hProcess,
+    IN DWORD64 qwAddr,
+    OUT PDWORD pdwDisplacement,
+    OUT PIMAGEHLP_LINE64 Line64);
+// DbgHelp.h typedefs. Implementation found in dbghelp.dll.
+typedef PVOID (__stdcall *DLL_FUNC_TYPE(SymFunctionTableAccess64))(
+    HANDLE hProcess,
+    DWORD64 AddrBase);  // DbgHelp.h typedef PFUNCTION_TABLE_ACCESS_ROUTINE64
+typedef DWORD64 (__stdcall *DLL_FUNC_TYPE(SymGetModuleBase64))(
+    HANDLE hProcess,
+    DWORD64 AddrBase);  // DbgHelp.h typedef PGET_MODULE_BASE_ROUTINE64
+
+// TlHelp32.h functions.
+typedef HANDLE (__stdcall *DLL_FUNC_TYPE(CreateToolhelp32Snapshot))(
+    DWORD dwFlags,
+    DWORD th32ProcessID);
+typedef BOOL (__stdcall *DLL_FUNC_TYPE(Module32FirstW))(HANDLE hSnapshot,
+                                                        LPMODULEENTRY32W lpme);
+typedef BOOL (__stdcall *DLL_FUNC_TYPE(Module32NextW))(HANDLE hSnapshot,
+                                                       LPMODULEENTRY32W lpme);
+
+#undef IN
+#undef VOID
+
+// Declare a variable for each dynamically loaded DLL function.
+#define DEF_DLL_FUNCTION(name) DLL_FUNC_TYPE(name) DLL_FUNC_VAR(name) = NULL;
+DBGHELP_FUNCTION_LIST(DEF_DLL_FUNCTION)
+TLHELP32_FUNCTION_LIST(DEF_DLL_FUNCTION)
+#undef DEF_DLL_FUNCTION
+
+// Load the functions. This function has a lot of "ugly" macros in order to
+// keep down code duplication.
+
+static bool LoadDbgHelpAndTlHelp32() {
+  static bool dbghelp_loaded = false;
+
+  if (dbghelp_loaded) return true;
+
+  HMODULE module;
+
+  // Load functions from the dbghelp.dll module.
+  module = LoadLibrary(TEXT("dbghelp.dll"));
+  if (module == NULL) {
+    return false;
+  }
+
+#define LOAD_DLL_FUNC(name)                                                 \
+  DLL_FUNC_VAR(name) =                                                      \
+      reinterpret_cast<DLL_FUNC_TYPE(name)>(GetProcAddress(module, #name));
+
+DBGHELP_FUNCTION_LIST(LOAD_DLL_FUNC)
+
+#undef LOAD_DLL_FUNC
+
+  // Load functions from the kernel32.dll module (the TlHelp32.h function used
+  // to be in tlhelp32.dll but are now moved to kernel32.dll).
+  module = LoadLibrary(TEXT("kernel32.dll"));
+  if (module == NULL) {
+    return false;
+  }
+
+#define LOAD_DLL_FUNC(name)                                                 \
+  DLL_FUNC_VAR(name) =                                                      \
+      reinterpret_cast<DLL_FUNC_TYPE(name)>(GetProcAddress(module, #name));
+
+TLHELP32_FUNCTION_LIST(LOAD_DLL_FUNC)
+
+#undef LOAD_DLL_FUNC
+
+  // Check that all functions where loaded.
+  bool result =
+#define DLL_FUNC_LOADED(name) (DLL_FUNC_VAR(name) != NULL) &&
+
+DBGHELP_FUNCTION_LIST(DLL_FUNC_LOADED)
+TLHELP32_FUNCTION_LIST(DLL_FUNC_LOADED)
+
+#undef DLL_FUNC_LOADED
+  true;
+
+  dbghelp_loaded = result;
+  return result;
+  // NOTE: The modules are never unloaded and will stay around until the
+  // application is closed.
+}
+
+#undef DBGHELP_FUNCTION_LIST
+#undef TLHELP32_FUNCTION_LIST
+#undef DLL_FUNC_VAR
+#undef DLL_FUNC_TYPE
+
+
+// Load the symbols for generating stack traces.
+static std::vector<OS::SharedLibraryAddress> LoadSymbols(
+    HANDLE process_handle) {
+  static std::vector<OS::SharedLibraryAddress> result;
+
+  static bool symbols_loaded = false;
+
+  if (symbols_loaded) return result;
+
+  BOOL ok;
+
+  // Initialize the symbol engine.
+  ok = _SymInitialize(process_handle,  // hProcess
+                      NULL,            // UserSearchPath
+                      false);          // fInvadeProcess
+  if (!ok) return result;
+
+  DWORD options = _SymGetOptions();
+  options |= SYMOPT_LOAD_LINES;
+  options |= SYMOPT_FAIL_CRITICAL_ERRORS;
+  options = _SymSetOptions(options);
+
+  char buf[OS::kStackWalkMaxNameLen] = {0};
+  ok = _SymGetSearchPath(process_handle, buf, OS::kStackWalkMaxNameLen);
+  if (!ok) {
+    int err = GetLastError();
+    OS::Print("%d\n", err);
+    return result;
+  }
+
+  HANDLE snapshot = _CreateToolhelp32Snapshot(
+      TH32CS_SNAPMODULE,       // dwFlags
+      GetCurrentProcessId());  // th32ProcessId
+  if (snapshot == INVALID_HANDLE_VALUE) return result;
+  MODULEENTRY32W module_entry;
+  module_entry.dwSize = sizeof(module_entry);  // Set the size of the structure.
+  BOOL cont = _Module32FirstW(snapshot, &module_entry);
+  while (cont) {
+    DWORD64 base;
+    // NOTE the SymLoadModule64 function has the peculiarity of accepting a
+    // both unicode and ASCII strings even though the parameter is PSTR.
+    base = _SymLoadModule64(
+        process_handle,                                       // hProcess
+        0,                                                    // hFile
+        reinterpret_cast<PSTR>(module_entry.szExePath),       // ImageName
+        reinterpret_cast<PSTR>(module_entry.szModule),        // ModuleName
+        reinterpret_cast<DWORD64>(module_entry.modBaseAddr),  // BaseOfDll
+        module_entry.modBaseSize);                            // SizeOfDll
+    if (base == 0) {
+      int err = GetLastError();
+      if (err != ERROR_MOD_NOT_FOUND &&
+          err != ERROR_INVALID_HANDLE) {
+        result.clear();
+        return result;
+      }
+    }
+    int lib_name_length = WideCharToMultiByte(
+        CP_UTF8, 0, module_entry.szExePath, -1, NULL, 0, NULL, NULL);
+    std::string lib_name(lib_name_length, 0);
+    WideCharToMultiByte(CP_UTF8, 0, module_entry.szExePath, -1, &lib_name[0],
+                        lib_name_length, NULL, NULL);
+    result.push_back(OS::SharedLibraryAddress(
+        lib_name, reinterpret_cast<unsigned int>(module_entry.modBaseAddr),
+        reinterpret_cast<unsigned int>(module_entry.modBaseAddr +
+                                       module_entry.modBaseSize)));
+    cont = _Module32NextW(snapshot, &module_entry);
+  }
+  CloseHandle(snapshot);
+
+  symbols_loaded = true;
+  return result;
+}
+
+
+std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
+  // SharedLibraryEvents are logged when loading symbol information.
+  // Only the shared libraries loaded at the time of the call to
+  // GetSharedLibraryAddresses are logged.  DLLs loaded after
+  // initialization are not accounted for.
+  if (!LoadDbgHelpAndTlHelp32()) return std::vector<OS::SharedLibraryAddress>();
+  HANDLE process_handle = GetCurrentProcess();
+  return LoadSymbols(process_handle);
+}
+
+
+void OS::SignalCodeMovingGC() {
+}
+
+
+#else  // __MINGW32__
+std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
+  return std::vector<OS::SharedLibraryAddress>();
+}
+
+
+void OS::SignalCodeMovingGC() { }
+#endif  // __MINGW32__
+
+
+double OS::nan_value() {
+#ifdef _MSC_VER
+  return std::numeric_limits<double>::quiet_NaN();
+#else  // _MSC_VER
+  return NAN;
+#endif  // _MSC_VER
+}
+
+
+int OS::ActivationFrameAlignment() {
+#ifdef _WIN64
+  return 16;  // Windows 64-bit ABI requires the stack to be 16-byte aligned.
+#elif defined(__MINGW32__)
+  // With gcc 4.4 the tree vectorization optimizer can generate code
+  // that requires 16 byte alignment such as movdqa on x86.
+  return 16;
+#else
+  return 8;  // Floating-point math runs faster with 8-byte alignment.
+#endif
+}
+
+
+VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
+
+
+VirtualMemory::VirtualMemory(size_t size)
+    : address_(ReserveRegion(size)), size_(size) { }
+
+
+VirtualMemory::VirtualMemory(size_t size, size_t alignment)
+    : address_(NULL), size_(0) {
+  DCHECK((alignment % OS::AllocateAlignment()) == 0);
+  size_t request_size = RoundUp(size + alignment,
+                                static_cast<intptr_t>(OS::AllocateAlignment()));
+  void* address = ReserveRegion(request_size);
+  if (address == NULL) return;
+  uint8_t* base = RoundUp(static_cast<uint8_t*>(address), alignment);
+  // Try reducing the size by freeing and then reallocating a specific area.
+  bool result = ReleaseRegion(address, request_size);
+  USE(result);
+  DCHECK(result);
+  address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS);
+  if (address != NULL) {
+    request_size = size;
+    DCHECK(base == static_cast<uint8_t*>(address));
+  } else {
+    // Resizing failed, just go with a bigger area.
+    address = ReserveRegion(request_size);
+    if (address == NULL) return;
+  }
+  address_ = address;
+  size_ = request_size;
+}
+
+
+VirtualMemory::~VirtualMemory() {
+  if (IsReserved()) {
+    bool result = ReleaseRegion(address(), size());
+    DCHECK(result);
+    USE(result);
+  }
+}
+
+
+bool VirtualMemory::IsReserved() {
+  return address_ != NULL;
+}
+
+
+void VirtualMemory::Reset() {
+  address_ = NULL;
+  size_ = 0;
+}
+
+
+bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
+  return CommitRegion(address, size, is_executable);
+}
+
+
+bool VirtualMemory::Uncommit(void* address, size_t size) {
+  DCHECK(IsReserved());
+  return UncommitRegion(address, size);
+}
+
+
+bool VirtualMemory::Guard(void* address) {
+  if (NULL == VirtualAlloc(address,
+                           OS::CommitPageSize(),
+                           MEM_COMMIT,
+                           PAGE_NOACCESS)) {
+    return false;
+  }
+  return true;
+}
+
+
+void* VirtualMemory::ReserveRegion(size_t size) {
+  return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS);
+}
+
+
+bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
+  int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
+  if (NULL == VirtualAlloc(base, size, MEM_COMMIT, prot)) {
+    return false;
+  }
+  return true;
+}
+
+
+bool VirtualMemory::UncommitRegion(void* base, size_t size) {
+  return VirtualFree(base, size, MEM_DECOMMIT) != 0;
+}
+
+
+bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
+  return VirtualFree(base, 0, MEM_RELEASE) != 0;
+}
+
+
+bool VirtualMemory::HasLazyCommits() {
+  // TODO(alph): implement for the platform.
+  return false;
+}
+
+
+// ----------------------------------------------------------------------------
+// Win32 thread support.
+
+// Definition of invalid thread handle and id.
+static const HANDLE kNoThread = INVALID_HANDLE_VALUE;
+
+// Entry point for threads. The supplied argument is a pointer to the thread
+// object. The entry function dispatches to the run method in the thread
+// object. It is important that this function has __stdcall calling
+// convention.
+static unsigned int __stdcall ThreadEntry(void* arg) {
+  Thread* thread = reinterpret_cast<Thread*>(arg);
+  thread->NotifyStartedAndRun();
+  return 0;
+}
+
+
+class Thread::PlatformData {
+ public:
+  explicit PlatformData(HANDLE thread) : thread_(thread) {}
+  HANDLE thread_;
+  unsigned thread_id_;
+};
+
+
+// Initialize a Win32 thread object. The thread has an invalid thread
+// handle until it is started.
+
+Thread::Thread(const Options& options)
+    : stack_size_(options.stack_size()),
+      start_semaphore_(NULL) {
+  data_ = new PlatformData(kNoThread);
+  set_name(options.name());
+}
+
+
+void Thread::set_name(const char* name) {
+  OS::StrNCpy(name_, sizeof(name_), name, strlen(name));
+  name_[sizeof(name_) - 1] = '\0';
+}
+
+
+// Close our own handle for the thread.
+Thread::~Thread() {
+  if (data_->thread_ != kNoThread) CloseHandle(data_->thread_);
+  delete data_;
+}
+
+
+// Create a new thread. It is important to use _beginthreadex() instead of
+// the Win32 function CreateThread(), because the CreateThread() does not
+// initialize thread specific structures in the C runtime library.
+void Thread::Start() {
+  data_->thread_ = reinterpret_cast<HANDLE>(
+      _beginthreadex(NULL,
+                     static_cast<unsigned>(stack_size_),
+                     ThreadEntry,
+                     this,
+                     0,
+                     &data_->thread_id_));
+}
+
+
+// Wait for thread to terminate.
+void Thread::Join() {
+  if (data_->thread_id_ != GetCurrentThreadId()) {
+    WaitForSingleObject(data_->thread_, INFINITE);
+  }
+}
+
+
+Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
+  DWORD result = TlsAlloc();
+  DCHECK(result != TLS_OUT_OF_INDEXES);
+  return static_cast<LocalStorageKey>(result);
+}
+
+
+void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
+  BOOL result = TlsFree(static_cast<DWORD>(key));
+  USE(result);
+  DCHECK(result);
+}
+
+
+void* Thread::GetThreadLocal(LocalStorageKey key) {
+  return TlsGetValue(static_cast<DWORD>(key));
+}
+
+
+void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
+  BOOL result = TlsSetValue(static_cast<DWORD>(key), value);
+  USE(result);
+  DCHECK(result);
+}
+
+
+
+void Thread::YieldCPU() {
+  Sleep(0);
+}
+
+} }  // namespace v8::base
diff --git a/src/base/platform/platform.h b/src/base/platform/platform.h
new file mode 100644
index 0000000..9e20c08
--- /dev/null
+++ b/src/base/platform/platform.h
@@ -0,0 +1,516 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This module contains the platform-specific code. This make the rest of the
+// code less dependent on operating system, compilers and runtime libraries.
+// This module does specifically not deal with differences between different
+// processor architecture.
+// The platform classes have the same definition for all platforms. The
+// implementation for a particular platform is put in platform_<os>.cc.
+// The build system then uses the implementation for the target platform.
+//
+// This design has been chosen because it is simple and fast. Alternatively,
+// the platform dependent classes could have been implemented using abstract
+// superclasses with virtual methods and having specializations for each
+// platform. This design was rejected because it was more complicated and
+// slower. It would require factory methods for selecting the right
+// implementation and the overhead of virtual methods for performance
+// sensitive like mutex locking/unlocking.
+
+#ifndef V8_BASE_PLATFORM_PLATFORM_H_
+#define V8_BASE_PLATFORM_PLATFORM_H_
+
+#include <stdarg.h>
+#include <string>
+#include <vector>
+
+#include "src/base/build_config.h"
+#include "src/base/platform/mutex.h"
+#include "src/base/platform/semaphore.h"
+
+#ifdef __sun
+# ifndef signbit
+namespace std {
+int signbit(double x);
+}
+# endif
+#endif
+
+#if V8_OS_QNX
+#include "src/base/qnx-math.h"
+#endif
+
+// Microsoft Visual C++ specific stuff.
+#if V8_LIBC_MSVCRT
+
+#include "src/base/win32-headers.h"
+#include "src/base/win32-math.h"
+
+int strncasecmp(const char* s1, const char* s2, int n);
+
+// Visual C++ 2013 and higher implement this function.
+#if (_MSC_VER < 1800)
+inline int lrint(double flt) {
+  int intgr;
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
+  __asm {
+    fld flt
+    fistp intgr
+  };
+#else
+  intgr = static_cast<int>(flt + 0.5);
+  if ((intgr & 1) != 0 && intgr - flt == 0.5) {
+    // If the number is halfway between two integers, round to the even one.
+    intgr--;
+  }
+#endif
+  return intgr;
+}
+#endif  // _MSC_VER < 1800
+
+#endif  // V8_LIBC_MSVCRT
+
+namespace v8 {
+namespace base {
+
+// ----------------------------------------------------------------------------
+// Fast TLS support
+
+#ifndef V8_NO_FAST_TLS
+
+#if defined(_MSC_VER) && (V8_HOST_ARCH_IA32)
+
+#define V8_FAST_TLS_SUPPORTED 1
+
+INLINE(intptr_t InternalGetExistingThreadLocal(intptr_t index));
+
+inline intptr_t InternalGetExistingThreadLocal(intptr_t index) {
+  const intptr_t kTibInlineTlsOffset = 0xE10;
+  const intptr_t kTibExtraTlsOffset = 0xF94;
+  const intptr_t kMaxInlineSlots = 64;
+  const intptr_t kMaxSlots = kMaxInlineSlots + 1024;
+  const intptr_t kPointerSize = sizeof(void*);
+  DCHECK(0 <= index && index < kMaxSlots);
+  if (index < kMaxInlineSlots) {
+    return static_cast<intptr_t>(__readfsdword(kTibInlineTlsOffset +
+                                               kPointerSize * index));
+  }
+  intptr_t extra = static_cast<intptr_t>(__readfsdword(kTibExtraTlsOffset));
+  DCHECK(extra != 0);
+  return *reinterpret_cast<intptr_t*>(extra +
+                                      kPointerSize * (index - kMaxInlineSlots));
+}
+
+#elif defined(__APPLE__) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
+
+#define V8_FAST_TLS_SUPPORTED 1
+
+extern intptr_t kMacTlsBaseOffset;
+
+INLINE(intptr_t InternalGetExistingThreadLocal(intptr_t index));
+
+inline intptr_t InternalGetExistingThreadLocal(intptr_t index) {
+  intptr_t result;
+#if V8_HOST_ARCH_IA32
+  asm("movl %%gs:(%1,%2,4), %0;"
+      :"=r"(result)  // Output must be a writable register.
+      :"r"(kMacTlsBaseOffset), "r"(index));
+#else
+  asm("movq %%gs:(%1,%2,8), %0;"
+      :"=r"(result)
+      :"r"(kMacTlsBaseOffset), "r"(index));
+#endif
+  return result;
+}
+
+#endif
+
+#endif  // V8_NO_FAST_TLS
+
+
+class TimezoneCache;
+
+
+// ----------------------------------------------------------------------------
+// OS
+//
+// This class has static methods for the different platform specific
+// functions. Add methods here to cope with differences between the
+// supported platforms.
+
+class OS {
+ public:
+  // Initialize the OS class.
+  // - random_seed: Used for the GetRandomMmapAddress() if non-zero.
+  // - hard_abort: If true, OS::Abort() will crash instead of aborting.
+  // - gc_fake_mmap: Name of the file for fake gc mmap used in ll_prof.
+  static void Initialize(int64_t random_seed,
+                         bool hard_abort,
+                         const char* const gc_fake_mmap);
+
+  // Returns the accumulated user time for thread. This routine
+  // can be used for profiling. The implementation should
+  // strive for high-precision timer resolution, preferable
+  // micro-second resolution.
+  static int GetUserTime(uint32_t* secs,  uint32_t* usecs);
+
+  // Returns current time as the number of milliseconds since
+  // 00:00:00 UTC, January 1, 1970.
+  static double TimeCurrentMillis();
+
+  static TimezoneCache* CreateTimezoneCache();
+  static void DisposeTimezoneCache(TimezoneCache* cache);
+  static void ClearTimezoneCache(TimezoneCache* cache);
+
+  // Returns a string identifying the current time zone. The
+  // timestamp is used for determining if DST is in effect.
+  static const char* LocalTimezone(double time, TimezoneCache* cache);
+
+  // Returns the local time offset in milliseconds east of UTC without
+  // taking daylight savings time into account.
+  static double LocalTimeOffset(TimezoneCache* cache);
+
+  // Returns the daylight savings offset for the given time.
+  static double DaylightSavingsOffset(double time, TimezoneCache* cache);
+
+  // Returns last OS error.
+  static int GetLastError();
+
+  static FILE* FOpen(const char* path, const char* mode);
+  static bool Remove(const char* path);
+
+  // Opens a temporary file, the file is auto removed on close.
+  static FILE* OpenTemporaryFile();
+
+  // Log file open mode is platform-dependent due to line ends issues.
+  static const char* const LogFileOpenMode;
+
+  // Print output to console. This is mostly used for debugging output.
+  // On platforms that has standard terminal output, the output
+  // should go to stdout.
+  static void Print(const char* format, ...);
+  static void VPrint(const char* format, va_list args);
+
+  // Print output to a file. This is mostly used for debugging output.
+  static void FPrint(FILE* out, const char* format, ...);
+  static void VFPrint(FILE* out, const char* format, va_list args);
+
+  // Print error output to console. This is mostly used for error message
+  // output. On platforms that has standard terminal output, the output
+  // should go to stderr.
+  static void PrintError(const char* format, ...);
+  static void VPrintError(const char* format, va_list args);
+
+  // Allocate/Free memory used by JS heap. Pages are readable/writable, but
+  // they are not guaranteed to be executable unless 'executable' is true.
+  // Returns the address of allocated memory, or NULL if failed.
+  static void* Allocate(const size_t requested,
+                        size_t* allocated,
+                        bool is_executable);
+  static void Free(void* address, const size_t size);
+
+  // This is the granularity at which the ProtectCode(...) call can set page
+  // permissions.
+  static intptr_t CommitPageSize();
+
+  // Mark code segments non-writable.
+  static void ProtectCode(void* address, const size_t size);
+
+  // Assign memory as a guard page so that access will cause an exception.
+  static void Guard(void* address, const size_t size);
+
+  // Generate a random address to be used for hinting mmap().
+  static void* GetRandomMmapAddr();
+
+  // Get the Alignment guaranteed by Allocate().
+  static size_t AllocateAlignment();
+
+  // Sleep for a number of milliseconds.
+  static void Sleep(const int milliseconds);
+
+  // Abort the current process.
+  static void Abort();
+
+  // Debug break.
+  static void DebugBreak();
+
+  // Walk the stack.
+  static const int kStackWalkError = -1;
+  static const int kStackWalkMaxNameLen = 256;
+  static const int kStackWalkMaxTextLen = 256;
+  struct StackFrame {
+    void* address;
+    char text[kStackWalkMaxTextLen];
+  };
+
+  class MemoryMappedFile {
+   public:
+    static MemoryMappedFile* open(const char* name);
+    static MemoryMappedFile* create(const char* name, int size, void* initial);
+    virtual ~MemoryMappedFile() { }
+    virtual void* memory() = 0;
+    virtual int size() = 0;
+  };
+
+  // Safe formatting print. Ensures that str is always null-terminated.
+  // Returns the number of chars written, or -1 if output was truncated.
+  static int SNPrintF(char* str, int length, const char* format, ...);
+  static int VSNPrintF(char* str,
+                       int length,
+                       const char* format,
+                       va_list args);
+
+  static char* StrChr(char* str, int c);
+  static void StrNCpy(char* dest, int length, const char* src, size_t n);
+
+  // Support for the profiler.  Can do nothing, in which case ticks
+  // occuring in shared libraries will not be properly accounted for.
+  struct SharedLibraryAddress {
+    SharedLibraryAddress(
+        const std::string& library_path, uintptr_t start, uintptr_t end)
+        : library_path(library_path), start(start), end(end) {}
+
+    std::string library_path;
+    uintptr_t start;
+    uintptr_t end;
+  };
+
+  static std::vector<SharedLibraryAddress> GetSharedLibraryAddresses();
+
+  // Support for the profiler.  Notifies the external profiling
+  // process that a code moving garbage collection starts.  Can do
+  // nothing, in which case the code objects must not move (e.g., by
+  // using --never-compact) if accurate profiling is desired.
+  static void SignalCodeMovingGC();
+
+  // Returns the double constant NAN
+  static double nan_value();
+
+  // Support runtime detection of whether the hard float option of the
+  // EABI is used.
+  static bool ArmUsingHardFloat();
+
+  // Returns the activation frame alignment constraint or zero if
+  // the platform doesn't care. Guaranteed to be a power of two.
+  static int ActivationFrameAlignment();
+
+  static int GetCurrentProcessId();
+
+  static int GetCurrentThreadId();
+
+ private:
+  static const int msPerSecond = 1000;
+
+#if V8_OS_POSIX
+  static const char* GetGCFakeMMapFile();
+#endif
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(OS);
+};
+
+// Represents and controls an area of reserved memory.
+// Control of the reserved memory can be assigned to another VirtualMemory
+// object by assignment or copy-contructing. This removes the reserved memory
+// from the original object.
+class VirtualMemory {
+ public:
+  // Empty VirtualMemory object, controlling no reserved memory.
+  VirtualMemory();
+
+  // Reserves virtual memory with size.
+  explicit VirtualMemory(size_t size);
+
+  // Reserves virtual memory containing an area of the given size that
+  // is aligned per alignment. This may not be at the position returned
+  // by address().
+  VirtualMemory(size_t size, size_t alignment);
+
+  // Releases the reserved memory, if any, controlled by this VirtualMemory
+  // object.
+  ~VirtualMemory();
+
+  // Returns whether the memory has been reserved.
+  bool IsReserved();
+
+  // Initialize or resets an embedded VirtualMemory object.
+  void Reset();
+
+  // Returns the start address of the reserved memory.
+  // If the memory was reserved with an alignment, this address is not
+  // necessarily aligned. The user might need to round it up to a multiple of
+  // the alignment to get the start of the aligned block.
+  void* address() {
+    DCHECK(IsReserved());
+    return address_;
+  }
+
+  // Returns the size of the reserved memory. The returned value is only
+  // meaningful when IsReserved() returns true.
+  // If the memory was reserved with an alignment, this size may be larger
+  // than the requested size.
+  size_t size() { return size_; }
+
+  // Commits real memory. Returns whether the operation succeeded.
+  bool Commit(void* address, size_t size, bool is_executable);
+
+  // Uncommit real memory.  Returns whether the operation succeeded.
+  bool Uncommit(void* address, size_t size);
+
+  // Creates a single guard page at the given address.
+  bool Guard(void* address);
+
+  void Release() {
+    DCHECK(IsReserved());
+    // Notice: Order is important here. The VirtualMemory object might live
+    // inside the allocated region.
+    void* address = address_;
+    size_t size = size_;
+    Reset();
+    bool result = ReleaseRegion(address, size);
+    USE(result);
+    DCHECK(result);
+  }
+
+  // Assign control of the reserved region to a different VirtualMemory object.
+  // The old object is no longer functional (IsReserved() returns false).
+  void TakeControl(VirtualMemory* from) {
+    DCHECK(!IsReserved());
+    address_ = from->address_;
+    size_ = from->size_;
+    from->Reset();
+  }
+
+  static void* ReserveRegion(size_t size);
+
+  static bool CommitRegion(void* base, size_t size, bool is_executable);
+
+  static bool UncommitRegion(void* base, size_t size);
+
+  // Must be called with a base pointer that has been returned by ReserveRegion
+  // and the same size it was reserved with.
+  static bool ReleaseRegion(void* base, size_t size);
+
+  // Returns true if OS performs lazy commits, i.e. the memory allocation call
+  // defers actual physical memory allocation till the first memory access.
+  // Otherwise returns false.
+  static bool HasLazyCommits();
+
+ private:
+  void* address_;  // Start address of the virtual memory.
+  size_t size_;  // Size of the virtual memory.
+};
+
+
+// ----------------------------------------------------------------------------
+// Thread
+//
+// Thread objects are used for creating and running threads. When the start()
+// method is called the new thread starts running the run() method in the new
+// thread. The Thread object should not be deallocated before the thread has
+// terminated.
+
+class Thread {
+ public:
+  // Opaque data type for thread-local storage keys.
+  typedef int32_t LocalStorageKey;
+
+  class Options {
+   public:
+    Options() : name_("v8:<unknown>"), stack_size_(0) {}
+    explicit Options(const char* name, int stack_size = 0)
+        : name_(name), stack_size_(stack_size) {}
+
+    const char* name() const { return name_; }
+    int stack_size() const { return stack_size_; }
+
+   private:
+    const char* name_;
+    int stack_size_;
+  };
+
+  // Create new thread.
+  explicit Thread(const Options& options);
+  virtual ~Thread();
+
+  // Start new thread by calling the Run() method on the new thread.
+  void Start();
+
+  // Start new thread and wait until Run() method is called on the new thread.
+  void StartSynchronously() {
+    start_semaphore_ = new Semaphore(0);
+    Start();
+    start_semaphore_->Wait();
+    delete start_semaphore_;
+    start_semaphore_ = NULL;
+  }
+
+  // Wait until thread terminates.
+  void Join();
+
+  inline const char* name() const {
+    return name_;
+  }
+
+  // Abstract method for run handler.
+  virtual void Run() = 0;
+
+  // Thread-local storage.
+  static LocalStorageKey CreateThreadLocalKey();
+  static void DeleteThreadLocalKey(LocalStorageKey key);
+  static void* GetThreadLocal(LocalStorageKey key);
+  static int GetThreadLocalInt(LocalStorageKey key) {
+    return static_cast<int>(reinterpret_cast<intptr_t>(GetThreadLocal(key)));
+  }
+  static void SetThreadLocal(LocalStorageKey key, void* value);
+  static void SetThreadLocalInt(LocalStorageKey key, int value) {
+    SetThreadLocal(key, reinterpret_cast<void*>(static_cast<intptr_t>(value)));
+  }
+  static bool HasThreadLocal(LocalStorageKey key) {
+    return GetThreadLocal(key) != NULL;
+  }
+
+#ifdef V8_FAST_TLS_SUPPORTED
+  static inline void* GetExistingThreadLocal(LocalStorageKey key) {
+    void* result = reinterpret_cast<void*>(
+        InternalGetExistingThreadLocal(static_cast<intptr_t>(key)));
+    DCHECK(result == GetThreadLocal(key));
+    return result;
+  }
+#else
+  static inline void* GetExistingThreadLocal(LocalStorageKey key) {
+    return GetThreadLocal(key);
+  }
+#endif
+
+  // A hint to the scheduler to let another thread run.
+  static void YieldCPU();
+
+
+  // The thread name length is limited to 16 based on Linux's implementation of
+  // prctl().
+  static const int kMaxThreadNameLength = 16;
+
+  class PlatformData;
+  PlatformData* data() { return data_; }
+
+  void NotifyStartedAndRun() {
+    if (start_semaphore_) start_semaphore_->Signal();
+    Run();
+  }
+
+ private:
+  void set_name(const char* name);
+
+  PlatformData* data_;
+
+  char name_[kMaxThreadNameLength];
+  int stack_size_;
+  Semaphore* start_semaphore_;
+
+  DISALLOW_COPY_AND_ASSIGN(Thread);
+};
+
+} }  // namespace v8::base
+
+#endif  // V8_BASE_PLATFORM_PLATFORM_H_
diff --git a/src/base/platform/semaphore-unittest.cc b/src/base/platform/semaphore-unittest.cc
new file mode 100644
index 0000000..c68435f
--- /dev/null
+++ b/src/base/platform/semaphore-unittest.cc
@@ -0,0 +1,145 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <cstring>
+
+#include "src/base/platform/platform.h"
+#include "src/base/platform/semaphore.h"
+#include "src/base/platform/time.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace base {
+
+namespace {
+
+static const char kAlphabet[] = "XKOAD";
+static const size_t kAlphabetSize = sizeof(kAlphabet) - 1;
+static const size_t kBufferSize = 987;  // GCD(buffer size, alphabet size) = 1
+static const size_t kDataSize = kBufferSize * kAlphabetSize * 10;
+
+
+class ProducerThread FINAL : public Thread {
+ public:
+  ProducerThread(char* buffer, Semaphore* free_space, Semaphore* used_space)
+      : Thread(Options("ProducerThread")),
+        buffer_(buffer),
+        free_space_(free_space),
+        used_space_(used_space) {}
+  virtual ~ProducerThread() {}
+
+  virtual void Run() OVERRIDE {
+    for (size_t n = 0; n < kDataSize; ++n) {
+      free_space_->Wait();
+      buffer_[n % kBufferSize] = kAlphabet[n % kAlphabetSize];
+      used_space_->Signal();
+    }
+  }
+
+ private:
+  char* buffer_;
+  Semaphore* const free_space_;
+  Semaphore* const used_space_;
+};
+
+
+class ConsumerThread FINAL : public Thread {
+ public:
+  ConsumerThread(const char* buffer, Semaphore* free_space,
+                 Semaphore* used_space)
+      : Thread(Options("ConsumerThread")),
+        buffer_(buffer),
+        free_space_(free_space),
+        used_space_(used_space) {}
+  virtual ~ConsumerThread() {}
+
+  virtual void Run() OVERRIDE {
+    for (size_t n = 0; n < kDataSize; ++n) {
+      used_space_->Wait();
+      EXPECT_EQ(kAlphabet[n % kAlphabetSize], buffer_[n % kBufferSize]);
+      free_space_->Signal();
+    }
+  }
+
+ private:
+  const char* buffer_;
+  Semaphore* const free_space_;
+  Semaphore* const used_space_;
+};
+
+
+class WaitAndSignalThread FINAL : public Thread {
+ public:
+  explicit WaitAndSignalThread(Semaphore* semaphore)
+      : Thread(Options("WaitAndSignalThread")), semaphore_(semaphore) {}
+  virtual ~WaitAndSignalThread() {}
+
+  virtual void Run() OVERRIDE {
+    for (int n = 0; n < 100; ++n) {
+      semaphore_->Wait();
+      ASSERT_FALSE(semaphore_->WaitFor(TimeDelta::FromMicroseconds(1)));
+      semaphore_->Signal();
+    }
+  }
+
+ private:
+  Semaphore* const semaphore_;
+};
+
+}  // namespace
+
+
+TEST(Semaphore, ProducerConsumer) {
+  char buffer[kBufferSize];
+  std::memset(buffer, 0, sizeof(buffer));
+  Semaphore free_space(kBufferSize);
+  Semaphore used_space(0);
+  ProducerThread producer_thread(buffer, &free_space, &used_space);
+  ConsumerThread consumer_thread(buffer, &free_space, &used_space);
+  producer_thread.Start();
+  consumer_thread.Start();
+  producer_thread.Join();
+  consumer_thread.Join();
+}
+
+
+TEST(Semaphore, WaitAndSignal) {
+  Semaphore semaphore(0);
+  WaitAndSignalThread t1(&semaphore);
+  WaitAndSignalThread t2(&semaphore);
+
+  t1.Start();
+  t2.Start();
+
+  // Make something available.
+  semaphore.Signal();
+
+  t1.Join();
+  t2.Join();
+
+  semaphore.Wait();
+
+  EXPECT_FALSE(semaphore.WaitFor(TimeDelta::FromMicroseconds(1)));
+}
+
+
+TEST(Semaphore, WaitFor) {
+  Semaphore semaphore(0);
+
+  // Semaphore not signalled - timeout.
+  ASSERT_FALSE(semaphore.WaitFor(TimeDelta::FromMicroseconds(0)));
+  ASSERT_FALSE(semaphore.WaitFor(TimeDelta::FromMicroseconds(100)));
+  ASSERT_FALSE(semaphore.WaitFor(TimeDelta::FromMicroseconds(1000)));
+
+  // Semaphore signalled - no timeout.
+  semaphore.Signal();
+  ASSERT_TRUE(semaphore.WaitFor(TimeDelta::FromMicroseconds(0)));
+  semaphore.Signal();
+  ASSERT_TRUE(semaphore.WaitFor(TimeDelta::FromMicroseconds(100)));
+  semaphore.Signal();
+  ASSERT_TRUE(semaphore.WaitFor(TimeDelta::FromMicroseconds(1000)));
+}
+
+}  // namespace base
+}  // namespace v8
diff --git a/src/base/platform/semaphore.cc b/src/base/platform/semaphore.cc
new file mode 100644
index 0000000..0679c00
--- /dev/null
+++ b/src/base/platform/semaphore.cc
@@ -0,0 +1,204 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/platform/semaphore.h"
+
+#if V8_OS_MACOSX
+#include <mach/mach_init.h>
+#include <mach/task.h>
+#endif
+
+#include <errno.h>
+
+#include "src/base/logging.h"
+#include "src/base/platform/elapsed-timer.h"
+#include "src/base/platform/time.h"
+
+namespace v8 {
+namespace base {
+
+#if V8_OS_MACOSX
+
+Semaphore::Semaphore(int count) {
+  kern_return_t result = semaphore_create(
+      mach_task_self(), &native_handle_, SYNC_POLICY_FIFO, count);
+  DCHECK_EQ(KERN_SUCCESS, result);
+  USE(result);
+}
+
+
+Semaphore::~Semaphore() {
+  kern_return_t result = semaphore_destroy(mach_task_self(), native_handle_);
+  DCHECK_EQ(KERN_SUCCESS, result);
+  USE(result);
+}
+
+
+void Semaphore::Signal() {
+  kern_return_t result = semaphore_signal(native_handle_);
+  DCHECK_EQ(KERN_SUCCESS, result);
+  USE(result);
+}
+
+
+void Semaphore::Wait() {
+  while (true) {
+    kern_return_t result = semaphore_wait(native_handle_);
+    if (result == KERN_SUCCESS) return;  // Semaphore was signalled.
+    DCHECK_EQ(KERN_ABORTED, result);
+  }
+}
+
+
+bool Semaphore::WaitFor(const TimeDelta& rel_time) {
+  TimeTicks now = TimeTicks::Now();
+  TimeTicks end = now + rel_time;
+  while (true) {
+    mach_timespec_t ts;
+    if (now >= end) {
+      // Return immediately if semaphore was not signalled.
+      ts.tv_sec = 0;
+      ts.tv_nsec = 0;
+    } else {
+      ts = (end - now).ToMachTimespec();
+    }
+    kern_return_t result = semaphore_timedwait(native_handle_, ts);
+    if (result == KERN_SUCCESS) return true;  // Semaphore was signalled.
+    if (result == KERN_OPERATION_TIMED_OUT) return false;  // Timeout.
+    DCHECK_EQ(KERN_ABORTED, result);
+    now = TimeTicks::Now();
+  }
+}
+
+#elif V8_OS_POSIX
+
+Semaphore::Semaphore(int count) {
+  DCHECK(count >= 0);
+  int result = sem_init(&native_handle_, 0, count);
+  DCHECK_EQ(0, result);
+  USE(result);
+}
+
+
+Semaphore::~Semaphore() {
+  int result = sem_destroy(&native_handle_);
+  DCHECK_EQ(0, result);
+  USE(result);
+}
+
+
+void Semaphore::Signal() {
+  int result = sem_post(&native_handle_);
+  DCHECK_EQ(0, result);
+  USE(result);
+}
+
+
+void Semaphore::Wait() {
+  while (true) {
+    int result = sem_wait(&native_handle_);
+    if (result == 0) return;  // Semaphore was signalled.
+    // Signal caused spurious wakeup.
+    DCHECK_EQ(-1, result);
+    DCHECK_EQ(EINTR, errno);
+  }
+}
+
+
+bool Semaphore::WaitFor(const TimeDelta& rel_time) {
+#if V8_OS_NACL
+  // PNaCL doesn't support sem_timedwait, do ugly busy waiting.
+  ElapsedTimer timer;
+  timer.Start();
+  do {
+    int result = sem_trywait(&native_handle_);
+    if (result == 0) return true;
+    DCHECK(errno == EAGAIN || errno == EINTR);
+  } while (!timer.HasExpired(rel_time));
+  return false;
+#else
+  // Compute the time for end of timeout.
+  const Time time = Time::NowFromSystemTime() + rel_time;
+  const struct timespec ts = time.ToTimespec();
+
+  // Wait for semaphore signalled or timeout.
+  while (true) {
+    int result = sem_timedwait(&native_handle_, &ts);
+    if (result == 0) return true;  // Semaphore was signalled.
+#if V8_LIBC_GLIBC && !V8_GLIBC_PREREQ(2, 4)
+    if (result > 0) {
+      // sem_timedwait in glibc prior to 2.3.4 returns the errno instead of -1.
+      errno = result;
+      result = -1;
+    }
+#endif
+    if (result == -1 && errno == ETIMEDOUT) {
+      // Timed out while waiting for semaphore.
+      return false;
+    }
+    // Signal caused spurious wakeup.
+    DCHECK_EQ(-1, result);
+    DCHECK_EQ(EINTR, errno);
+  }
+#endif
+}
+
+#elif V8_OS_WIN
+
+Semaphore::Semaphore(int count) {
+  DCHECK(count >= 0);
+  native_handle_ = ::CreateSemaphoreA(NULL, count, 0x7fffffff, NULL);
+  DCHECK(native_handle_ != NULL);
+}
+
+
+Semaphore::~Semaphore() {
+  BOOL result = CloseHandle(native_handle_);
+  DCHECK(result);
+  USE(result);
+}
+
+
+void Semaphore::Signal() {
+  LONG dummy;
+  BOOL result = ReleaseSemaphore(native_handle_, 1, &dummy);
+  DCHECK(result);
+  USE(result);
+}
+
+
+void Semaphore::Wait() {
+  DWORD result = WaitForSingleObject(native_handle_, INFINITE);
+  DCHECK(result == WAIT_OBJECT_0);
+  USE(result);
+}
+
+
+bool Semaphore::WaitFor(const TimeDelta& rel_time) {
+  TimeTicks now = TimeTicks::Now();
+  TimeTicks end = now + rel_time;
+  while (true) {
+    int64_t msec = (end - now).InMilliseconds();
+    if (msec >= static_cast<int64_t>(INFINITE)) {
+      DWORD result = WaitForSingleObject(native_handle_, INFINITE - 1);
+      if (result == WAIT_OBJECT_0) {
+        return true;
+      }
+      DCHECK(result == WAIT_TIMEOUT);
+      now = TimeTicks::Now();
+    } else {
+      DWORD result = WaitForSingleObject(
+          native_handle_, (msec < 0) ? 0 : static_cast<DWORD>(msec));
+      if (result == WAIT_TIMEOUT) {
+        return false;
+      }
+      DCHECK(result == WAIT_OBJECT_0);
+      return true;
+    }
+  }
+}
+
+#endif  // V8_OS_MACOSX
+
+} }  // namespace v8::base
diff --git a/src/base/platform/semaphore.h b/src/base/platform/semaphore.h
new file mode 100644
index 0000000..cbf8df2
--- /dev/null
+++ b/src/base/platform/semaphore.h
@@ -0,0 +1,101 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_PLATFORM_SEMAPHORE_H_
+#define V8_BASE_PLATFORM_SEMAPHORE_H_
+
+#include "src/base/lazy-instance.h"
+#if V8_OS_WIN
+#include "src/base/win32-headers.h"
+#endif
+
+#if V8_OS_MACOSX
+#include <mach/semaphore.h>  // NOLINT
+#elif V8_OS_POSIX
+#include <semaphore.h>  // NOLINT
+#endif
+
+namespace v8 {
+namespace base {
+
+// Forward declarations.
+class TimeDelta;
+
+// ----------------------------------------------------------------------------
+// Semaphore
+//
+// A semaphore object is a synchronization object that maintains a count. The
+// count is decremented each time a thread completes a wait for the semaphore
+// object and incremented each time a thread signals the semaphore. When the
+// count reaches zero,  threads waiting for the semaphore blocks until the
+// count becomes non-zero.
+
+class Semaphore FINAL {
+ public:
+  explicit Semaphore(int count);
+  ~Semaphore();
+
+  // Increments the semaphore counter.
+  void Signal();
+
+  // Suspends the calling thread until the semaphore counter is non zero
+  // and then decrements the semaphore counter.
+  void Wait();
+
+  // Suspends the calling thread until the counter is non zero or the timeout
+  // time has passed. If timeout happens the return value is false and the
+  // counter is unchanged. Otherwise the semaphore counter is decremented and
+  // true is returned.
+  bool WaitFor(const TimeDelta& rel_time) WARN_UNUSED_RESULT;
+
+#if V8_OS_MACOSX
+  typedef semaphore_t NativeHandle;
+#elif V8_OS_POSIX
+  typedef sem_t NativeHandle;
+#elif V8_OS_WIN
+  typedef HANDLE NativeHandle;
+#endif
+
+  NativeHandle& native_handle() {
+    return native_handle_;
+  }
+  const NativeHandle& native_handle() const {
+    return native_handle_;
+  }
+
+ private:
+  NativeHandle native_handle_;
+
+  DISALLOW_COPY_AND_ASSIGN(Semaphore);
+};
+
+
+// POD Semaphore initialized lazily (i.e. the first time Pointer() is called).
+// Usage:
+//   // The following semaphore starts at 0.
+//   static LazySemaphore<0>::type my_semaphore = LAZY_SEMAPHORE_INITIALIZER;
+//
+//   void my_function() {
+//     // Do something with my_semaphore.Pointer().
+//   }
+//
+
+template <int N>
+struct CreateSemaphoreTrait {
+  static Semaphore* Create() {
+    return new Semaphore(N);
+  }
+};
+
+template <int N>
+struct LazySemaphore {
+  typedef typename LazyDynamicInstance<Semaphore, CreateSemaphoreTrait<N>,
+                                       ThreadSafeInitOnceTrait>::type type;
+};
+
+#define LAZY_SEMAPHORE_INITIALIZER LAZY_DYNAMIC_INSTANCE_INITIALIZER
+
+} }  // namespace v8::base
+
+#endif  // V8_BASE_PLATFORM_SEMAPHORE_H_
diff --git a/src/base/platform/time-unittest.cc b/src/base/platform/time-unittest.cc
new file mode 100644
index 0000000..b3bfbab
--- /dev/null
+++ b/src/base/platform/time-unittest.cc
@@ -0,0 +1,186 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/platform/time.h"
+
+#if V8_OS_MACOSX
+#include <mach/mach_time.h>
+#endif
+#if V8_OS_POSIX
+#include <sys/time.h>
+#endif
+
+#if V8_OS_WIN
+#include "src/base/win32-headers.h"
+#endif
+
+#include "src/base/platform/elapsed-timer.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace base {
+
+TEST(TimeDelta, FromAndIn) {
+  EXPECT_EQ(TimeDelta::FromDays(2), TimeDelta::FromHours(48));
+  EXPECT_EQ(TimeDelta::FromHours(3), TimeDelta::FromMinutes(180));
+  EXPECT_EQ(TimeDelta::FromMinutes(2), TimeDelta::FromSeconds(120));
+  EXPECT_EQ(TimeDelta::FromSeconds(2), TimeDelta::FromMilliseconds(2000));
+  EXPECT_EQ(TimeDelta::FromMilliseconds(2), TimeDelta::FromMicroseconds(2000));
+  EXPECT_EQ(static_cast<int>(13), TimeDelta::FromDays(13).InDays());
+  EXPECT_EQ(static_cast<int>(13), TimeDelta::FromHours(13).InHours());
+  EXPECT_EQ(static_cast<int>(13), TimeDelta::FromMinutes(13).InMinutes());
+  EXPECT_EQ(static_cast<int64_t>(13), TimeDelta::FromSeconds(13).InSeconds());
+  EXPECT_DOUBLE_EQ(13.0, TimeDelta::FromSeconds(13).InSecondsF());
+  EXPECT_EQ(static_cast<int64_t>(13),
+            TimeDelta::FromMilliseconds(13).InMilliseconds());
+  EXPECT_DOUBLE_EQ(13.0, TimeDelta::FromMilliseconds(13).InMillisecondsF());
+  EXPECT_EQ(static_cast<int64_t>(13),
+            TimeDelta::FromMicroseconds(13).InMicroseconds());
+}
+
+
+#if V8_OS_MACOSX
+TEST(TimeDelta, MachTimespec) {
+  TimeDelta null = TimeDelta();
+  EXPECT_EQ(null, TimeDelta::FromMachTimespec(null.ToMachTimespec()));
+  TimeDelta delta1 = TimeDelta::FromMilliseconds(42);
+  EXPECT_EQ(delta1, TimeDelta::FromMachTimespec(delta1.ToMachTimespec()));
+  TimeDelta delta2 = TimeDelta::FromDays(42);
+  EXPECT_EQ(delta2, TimeDelta::FromMachTimespec(delta2.ToMachTimespec()));
+}
+#endif
+
+
+TEST(Time, JsTime) {
+  Time t = Time::FromJsTime(700000.3);
+  EXPECT_DOUBLE_EQ(700000.3, t.ToJsTime());
+}
+
+
+#if V8_OS_POSIX
+TEST(Time, Timespec) {
+  Time null;
+  EXPECT_TRUE(null.IsNull());
+  EXPECT_EQ(null, Time::FromTimespec(null.ToTimespec()));
+  Time now = Time::Now();
+  EXPECT_EQ(now, Time::FromTimespec(now.ToTimespec()));
+  Time now_sys = Time::NowFromSystemTime();
+  EXPECT_EQ(now_sys, Time::FromTimespec(now_sys.ToTimespec()));
+  Time unix_epoch = Time::UnixEpoch();
+  EXPECT_EQ(unix_epoch, Time::FromTimespec(unix_epoch.ToTimespec()));
+  Time max = Time::Max();
+  EXPECT_TRUE(max.IsMax());
+  EXPECT_EQ(max, Time::FromTimespec(max.ToTimespec()));
+}
+
+
+TEST(Time, Timeval) {
+  Time null;
+  EXPECT_TRUE(null.IsNull());
+  EXPECT_EQ(null, Time::FromTimeval(null.ToTimeval()));
+  Time now = Time::Now();
+  EXPECT_EQ(now, Time::FromTimeval(now.ToTimeval()));
+  Time now_sys = Time::NowFromSystemTime();
+  EXPECT_EQ(now_sys, Time::FromTimeval(now_sys.ToTimeval()));
+  Time unix_epoch = Time::UnixEpoch();
+  EXPECT_EQ(unix_epoch, Time::FromTimeval(unix_epoch.ToTimeval()));
+  Time max = Time::Max();
+  EXPECT_TRUE(max.IsMax());
+  EXPECT_EQ(max, Time::FromTimeval(max.ToTimeval()));
+}
+#endif
+
+
+#if V8_OS_WIN
+TEST(Time, Filetime) {
+  Time null;
+  EXPECT_TRUE(null.IsNull());
+  EXPECT_EQ(null, Time::FromFiletime(null.ToFiletime()));
+  Time now = Time::Now();
+  EXPECT_EQ(now, Time::FromFiletime(now.ToFiletime()));
+  Time now_sys = Time::NowFromSystemTime();
+  EXPECT_EQ(now_sys, Time::FromFiletime(now_sys.ToFiletime()));
+  Time unix_epoch = Time::UnixEpoch();
+  EXPECT_EQ(unix_epoch, Time::FromFiletime(unix_epoch.ToFiletime()));
+  Time max = Time::Max();
+  EXPECT_TRUE(max.IsMax());
+  EXPECT_EQ(max, Time::FromFiletime(max.ToFiletime()));
+}
+#endif
+
+
+namespace {
+
+template <typename T>
+static void ResolutionTest(T (*Now)(), TimeDelta target_granularity) {
+  // We're trying to measure that intervals increment in a VERY small amount
+  // of time -- according to the specified target granularity. Unfortunately,
+  // if we happen to have a context switch in the middle of our test, the
+  // context switch could easily exceed our limit. So, we iterate on this
+  // several times. As long as we're able to detect the fine-granularity
+  // timers at least once, then the test has succeeded.
+  static const TimeDelta kExpirationTimeout = TimeDelta::FromSeconds(1);
+  ElapsedTimer timer;
+  timer.Start();
+  TimeDelta delta;
+  do {
+    T start = Now();
+    T now = start;
+    // Loop until we can detect that the clock has changed. Non-HighRes timers
+    // will increment in chunks, i.e. 15ms. By spinning until we see a clock
+    // change, we detect the minimum time between measurements.
+    do {
+      now = Now();
+      delta = now - start;
+    } while (now <= start);
+    EXPECT_NE(static_cast<int64_t>(0), delta.InMicroseconds());
+  } while (delta > target_granularity && !timer.HasExpired(kExpirationTimeout));
+  EXPECT_LE(delta, target_granularity);
+}
+
+}  // namespace
+
+
+TEST(Time, NowResolution) {
+  // We assume that Time::Now() has at least 16ms resolution.
+  static const TimeDelta kTargetGranularity = TimeDelta::FromMilliseconds(16);
+  ResolutionTest<Time>(&Time::Now, kTargetGranularity);
+}
+
+
+TEST(TimeTicks, NowResolution) {
+  // We assume that TimeTicks::Now() has at least 16ms resolution.
+  static const TimeDelta kTargetGranularity = TimeDelta::FromMilliseconds(16);
+  ResolutionTest<TimeTicks>(&TimeTicks::Now, kTargetGranularity);
+}
+
+
+TEST(TimeTicks, HighResolutionNowResolution) {
+  if (!TimeTicks::IsHighResolutionClockWorking()) return;
+
+  // We assume that TimeTicks::HighResolutionNow() has sub-ms resolution.
+  static const TimeDelta kTargetGranularity = TimeDelta::FromMilliseconds(1);
+  ResolutionTest<TimeTicks>(&TimeTicks::HighResolutionNow, kTargetGranularity);
+}
+
+
+TEST(TimeTicks, IsMonotonic) {
+  TimeTicks previous_normal_ticks;
+  TimeTicks previous_highres_ticks;
+  ElapsedTimer timer;
+  timer.Start();
+  while (!timer.HasExpired(TimeDelta::FromMilliseconds(100))) {
+    TimeTicks normal_ticks = TimeTicks::Now();
+    TimeTicks highres_ticks = TimeTicks::HighResolutionNow();
+    EXPECT_GE(normal_ticks, previous_normal_ticks);
+    EXPECT_GE((normal_ticks - previous_normal_ticks).InMicroseconds(), 0);
+    EXPECT_GE(highres_ticks, previous_highres_ticks);
+    EXPECT_GE((highres_ticks - previous_highres_ticks).InMicroseconds(), 0);
+    previous_normal_ticks = normal_ticks;
+    previous_highres_ticks = highres_ticks;
+  }
+}
+
+}  // namespace base
+}  // namespace v8
diff --git a/src/base/platform/time.cc b/src/base/platform/time.cc
new file mode 100644
index 0000000..d47ccaf
--- /dev/null
+++ b/src/base/platform/time.cc
@@ -0,0 +1,654 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/platform/time.h"
+
+#if V8_OS_POSIX
+#include <fcntl.h>  // for O_RDONLY
+#include <sys/time.h>
+#include <unistd.h>
+#endif
+#if V8_OS_MACOSX
+#include <mach/mach_time.h>
+#endif
+
+#include <string.h>
+
+#if V8_OS_WIN
+#include "src/base/lazy-instance.h"
+#include "src/base/win32-headers.h"
+#endif
+#include "src/base/cpu.h"
+#include "src/base/logging.h"
+#include "src/base/platform/platform.h"
+
+namespace v8 {
+namespace base {
+
+TimeDelta TimeDelta::FromDays(int days) {
+  return TimeDelta(days * Time::kMicrosecondsPerDay);
+}
+
+
+TimeDelta TimeDelta::FromHours(int hours) {
+  return TimeDelta(hours * Time::kMicrosecondsPerHour);
+}
+
+
+TimeDelta TimeDelta::FromMinutes(int minutes) {
+  return TimeDelta(minutes * Time::kMicrosecondsPerMinute);
+}
+
+
+TimeDelta TimeDelta::FromSeconds(int64_t seconds) {
+  return TimeDelta(seconds * Time::kMicrosecondsPerSecond);
+}
+
+
+TimeDelta TimeDelta::FromMilliseconds(int64_t milliseconds) {
+  return TimeDelta(milliseconds * Time::kMicrosecondsPerMillisecond);
+}
+
+
+TimeDelta TimeDelta::FromNanoseconds(int64_t nanoseconds) {
+  return TimeDelta(nanoseconds / Time::kNanosecondsPerMicrosecond);
+}
+
+
+int TimeDelta::InDays() const {
+  return static_cast<int>(delta_ / Time::kMicrosecondsPerDay);
+}
+
+
+int TimeDelta::InHours() const {
+  return static_cast<int>(delta_ / Time::kMicrosecondsPerHour);
+}
+
+
+int TimeDelta::InMinutes() const {
+  return static_cast<int>(delta_ / Time::kMicrosecondsPerMinute);
+}
+
+
+double TimeDelta::InSecondsF() const {
+  return static_cast<double>(delta_) / Time::kMicrosecondsPerSecond;
+}
+
+
+int64_t TimeDelta::InSeconds() const {
+  return delta_ / Time::kMicrosecondsPerSecond;
+}
+
+
+double TimeDelta::InMillisecondsF() const {
+  return static_cast<double>(delta_) / Time::kMicrosecondsPerMillisecond;
+}
+
+
+int64_t TimeDelta::InMilliseconds() const {
+  return delta_ / Time::kMicrosecondsPerMillisecond;
+}
+
+
+int64_t TimeDelta::InNanoseconds() const {
+  return delta_ * Time::kNanosecondsPerMicrosecond;
+}
+
+
+#if V8_OS_MACOSX
+
+TimeDelta TimeDelta::FromMachTimespec(struct mach_timespec ts) {
+  DCHECK_GE(ts.tv_nsec, 0);
+  DCHECK_LT(ts.tv_nsec,
+            static_cast<long>(Time::kNanosecondsPerSecond));  // NOLINT
+  return TimeDelta(ts.tv_sec * Time::kMicrosecondsPerSecond +
+                   ts.tv_nsec / Time::kNanosecondsPerMicrosecond);
+}
+
+
+struct mach_timespec TimeDelta::ToMachTimespec() const {
+  struct mach_timespec ts;
+  DCHECK(delta_ >= 0);
+  ts.tv_sec = delta_ / Time::kMicrosecondsPerSecond;
+  ts.tv_nsec = (delta_ % Time::kMicrosecondsPerSecond) *
+      Time::kNanosecondsPerMicrosecond;
+  return ts;
+}
+
+#endif  // V8_OS_MACOSX
+
+
+#if V8_OS_POSIX
+
+TimeDelta TimeDelta::FromTimespec(struct timespec ts) {
+  DCHECK_GE(ts.tv_nsec, 0);
+  DCHECK_LT(ts.tv_nsec,
+            static_cast<long>(Time::kNanosecondsPerSecond));  // NOLINT
+  return TimeDelta(ts.tv_sec * Time::kMicrosecondsPerSecond +
+                   ts.tv_nsec / Time::kNanosecondsPerMicrosecond);
+}
+
+
+struct timespec TimeDelta::ToTimespec() const {
+  struct timespec ts;
+  ts.tv_sec = delta_ / Time::kMicrosecondsPerSecond;
+  ts.tv_nsec = (delta_ % Time::kMicrosecondsPerSecond) *
+      Time::kNanosecondsPerMicrosecond;
+  return ts;
+}
+
+#endif  // V8_OS_POSIX
+
+
+#if V8_OS_WIN
+
+// We implement time using the high-resolution timers so that we can get
+// timeouts which are smaller than 10-15ms. To avoid any drift, we
+// periodically resync the internal clock to the system clock.
+class Clock FINAL {
+ public:
+  Clock() : initial_ticks_(GetSystemTicks()), initial_time_(GetSystemTime()) {}
+
+  Time Now() {
+    // Time between resampling the un-granular clock for this API (1 minute).
+    const TimeDelta kMaxElapsedTime = TimeDelta::FromMinutes(1);
+
+    LockGuard<Mutex> lock_guard(&mutex_);
+
+    // Determine current time and ticks.
+    TimeTicks ticks = GetSystemTicks();
+    Time time = GetSystemTime();
+
+    // Check if we need to synchronize with the system clock due to a backwards
+    // time change or the amount of time elapsed.
+    TimeDelta elapsed = ticks - initial_ticks_;
+    if (time < initial_time_ || elapsed > kMaxElapsedTime) {
+      initial_ticks_ = ticks;
+      initial_time_ = time;
+      return time;
+    }
+
+    return initial_time_ + elapsed;
+  }
+
+  Time NowFromSystemTime() {
+    LockGuard<Mutex> lock_guard(&mutex_);
+    initial_ticks_ = GetSystemTicks();
+    initial_time_ = GetSystemTime();
+    return initial_time_;
+  }
+
+ private:
+  static TimeTicks GetSystemTicks() {
+    return TimeTicks::Now();
+  }
+
+  static Time GetSystemTime() {
+    FILETIME ft;
+    ::GetSystemTimeAsFileTime(&ft);
+    return Time::FromFiletime(ft);
+  }
+
+  TimeTicks initial_ticks_;
+  Time initial_time_;
+  Mutex mutex_;
+};
+
+
+static LazyStaticInstance<Clock, DefaultConstructTrait<Clock>,
+                          ThreadSafeInitOnceTrait>::type clock =
+    LAZY_STATIC_INSTANCE_INITIALIZER;
+
+
+Time Time::Now() {
+  return clock.Pointer()->Now();
+}
+
+
+Time Time::NowFromSystemTime() {
+  return clock.Pointer()->NowFromSystemTime();
+}
+
+
+// Time between windows epoch and standard epoch.
+static const int64_t kTimeToEpochInMicroseconds = V8_INT64_C(11644473600000000);
+
+
+Time Time::FromFiletime(FILETIME ft) {
+  if (ft.dwLowDateTime == 0 && ft.dwHighDateTime == 0) {
+    return Time();
+  }
+  if (ft.dwLowDateTime == std::numeric_limits<DWORD>::max() &&
+      ft.dwHighDateTime == std::numeric_limits<DWORD>::max()) {
+    return Max();
+  }
+  int64_t us = (static_cast<uint64_t>(ft.dwLowDateTime) +
+                (static_cast<uint64_t>(ft.dwHighDateTime) << 32)) / 10;
+  return Time(us - kTimeToEpochInMicroseconds);
+}
+
+
+FILETIME Time::ToFiletime() const {
+  DCHECK(us_ >= 0);
+  FILETIME ft;
+  if (IsNull()) {
+    ft.dwLowDateTime = 0;
+    ft.dwHighDateTime = 0;
+    return ft;
+  }
+  if (IsMax()) {
+    ft.dwLowDateTime = std::numeric_limits<DWORD>::max();
+    ft.dwHighDateTime = std::numeric_limits<DWORD>::max();
+    return ft;
+  }
+  uint64_t us = static_cast<uint64_t>(us_ + kTimeToEpochInMicroseconds) * 10;
+  ft.dwLowDateTime = static_cast<DWORD>(us);
+  ft.dwHighDateTime = static_cast<DWORD>(us >> 32);
+  return ft;
+}
+
+#elif V8_OS_POSIX
+
+Time Time::Now() {
+  struct timeval tv;
+  int result = gettimeofday(&tv, NULL);
+  DCHECK_EQ(0, result);
+  USE(result);
+  return FromTimeval(tv);
+}
+
+
+Time Time::NowFromSystemTime() {
+  return Now();
+}
+
+
+Time Time::FromTimespec(struct timespec ts) {
+  DCHECK(ts.tv_nsec >= 0);
+  DCHECK(ts.tv_nsec < static_cast<long>(kNanosecondsPerSecond));  // NOLINT
+  if (ts.tv_nsec == 0 && ts.tv_sec == 0) {
+    return Time();
+  }
+  if (ts.tv_nsec == static_cast<long>(kNanosecondsPerSecond - 1) &&  // NOLINT
+      ts.tv_sec == std::numeric_limits<time_t>::max()) {
+    return Max();
+  }
+  return Time(ts.tv_sec * kMicrosecondsPerSecond +
+              ts.tv_nsec / kNanosecondsPerMicrosecond);
+}
+
+
+struct timespec Time::ToTimespec() const {
+  struct timespec ts;
+  if (IsNull()) {
+    ts.tv_sec = 0;
+    ts.tv_nsec = 0;
+    return ts;
+  }
+  if (IsMax()) {
+    ts.tv_sec = std::numeric_limits<time_t>::max();
+    ts.tv_nsec = static_cast<long>(kNanosecondsPerSecond - 1);  // NOLINT
+    return ts;
+  }
+  ts.tv_sec = us_ / kMicrosecondsPerSecond;
+  ts.tv_nsec = (us_ % kMicrosecondsPerSecond) * kNanosecondsPerMicrosecond;
+  return ts;
+}
+
+
+Time Time::FromTimeval(struct timeval tv) {
+  DCHECK(tv.tv_usec >= 0);
+  DCHECK(tv.tv_usec < static_cast<suseconds_t>(kMicrosecondsPerSecond));
+  if (tv.tv_usec == 0 && tv.tv_sec == 0) {
+    return Time();
+  }
+  if (tv.tv_usec == static_cast<suseconds_t>(kMicrosecondsPerSecond - 1) &&
+      tv.tv_sec == std::numeric_limits<time_t>::max()) {
+    return Max();
+  }
+  return Time(tv.tv_sec * kMicrosecondsPerSecond + tv.tv_usec);
+}
+
+
+struct timeval Time::ToTimeval() const {
+  struct timeval tv;
+  if (IsNull()) {
+    tv.tv_sec = 0;
+    tv.tv_usec = 0;
+    return tv;
+  }
+  if (IsMax()) {
+    tv.tv_sec = std::numeric_limits<time_t>::max();
+    tv.tv_usec = static_cast<suseconds_t>(kMicrosecondsPerSecond - 1);
+    return tv;
+  }
+  tv.tv_sec = us_ / kMicrosecondsPerSecond;
+  tv.tv_usec = us_ % kMicrosecondsPerSecond;
+  return tv;
+}
+
+#endif  // V8_OS_WIN
+
+
+Time Time::FromJsTime(double ms_since_epoch) {
+  // The epoch is a valid time, so this constructor doesn't interpret
+  // 0 as the null time.
+  if (ms_since_epoch == std::numeric_limits<double>::max()) {
+    return Max();
+  }
+  return Time(
+      static_cast<int64_t>(ms_since_epoch * kMicrosecondsPerMillisecond));
+}
+
+
+double Time::ToJsTime() const {
+  if (IsNull()) {
+    // Preserve 0 so the invalid result doesn't depend on the platform.
+    return 0;
+  }
+  if (IsMax()) {
+    // Preserve max without offset to prevent overflow.
+    return std::numeric_limits<double>::max();
+  }
+  return static_cast<double>(us_) / kMicrosecondsPerMillisecond;
+}
+
+
+#if V8_OS_WIN
+
+class TickClock {
+ public:
+  virtual ~TickClock() {}
+  virtual int64_t Now() = 0;
+  virtual bool IsHighResolution() = 0;
+};
+
+
+// Overview of time counters:
+// (1) CPU cycle counter. (Retrieved via RDTSC)
+// The CPU counter provides the highest resolution time stamp and is the least
+// expensive to retrieve. However, the CPU counter is unreliable and should not
+// be used in production. Its biggest issue is that it is per processor and it
+// is not synchronized between processors. Also, on some computers, the counters
+// will change frequency due to thermal and power changes, and stop in some
+// states.
+//
+// (2) QueryPerformanceCounter (QPC). The QPC counter provides a high-
+// resolution (100 nanoseconds) time stamp but is comparatively more expensive
+// to retrieve. What QueryPerformanceCounter actually does is up to the HAL.
+// (with some help from ACPI).
+// According to http://blogs.msdn.com/oldnewthing/archive/2005/09/02/459952.aspx
+// in the worst case, it gets the counter from the rollover interrupt on the
+// programmable interrupt timer. In best cases, the HAL may conclude that the
+// RDTSC counter runs at a constant frequency, then it uses that instead. On
+// multiprocessor machines, it will try to verify the values returned from
+// RDTSC on each processor are consistent with each other, and apply a handful
+// of workarounds for known buggy hardware. In other words, QPC is supposed to
+// give consistent result on a multiprocessor computer, but it is unreliable in
+// reality due to bugs in BIOS or HAL on some, especially old computers.
+// With recent updates on HAL and newer BIOS, QPC is getting more reliable but
+// it should be used with caution.
+//
+// (3) System time. The system time provides a low-resolution (typically 10ms
+// to 55 milliseconds) time stamp but is comparatively less expensive to
+// retrieve and more reliable.
+class HighResolutionTickClock FINAL : public TickClock {
+ public:
+  explicit HighResolutionTickClock(int64_t ticks_per_second)
+      : ticks_per_second_(ticks_per_second) {
+    DCHECK_LT(0, ticks_per_second);
+  }
+  virtual ~HighResolutionTickClock() {}
+
+  virtual int64_t Now() OVERRIDE {
+    LARGE_INTEGER now;
+    BOOL result = QueryPerformanceCounter(&now);
+    DCHECK(result);
+    USE(result);
+
+    // Intentionally calculate microseconds in a round about manner to avoid
+    // overflow and precision issues. Think twice before simplifying!
+    int64_t whole_seconds = now.QuadPart / ticks_per_second_;
+    int64_t leftover_ticks = now.QuadPart % ticks_per_second_;
+    int64_t ticks = (whole_seconds * Time::kMicrosecondsPerSecond) +
+        ((leftover_ticks * Time::kMicrosecondsPerSecond) / ticks_per_second_);
+
+    // Make sure we never return 0 here, so that TimeTicks::HighResolutionNow()
+    // will never return 0.
+    return ticks + 1;
+  }
+
+  virtual bool IsHighResolution() OVERRIDE {
+    return true;
+  }
+
+ private:
+  int64_t ticks_per_second_;
+};
+
+
+class RolloverProtectedTickClock FINAL : public TickClock {
+ public:
+  // We initialize rollover_ms_ to 1 to ensure that we will never
+  // return 0 from TimeTicks::HighResolutionNow() and TimeTicks::Now() below.
+  RolloverProtectedTickClock() : last_seen_now_(0), rollover_ms_(1) {}
+  virtual ~RolloverProtectedTickClock() {}
+
+  virtual int64_t Now() OVERRIDE {
+    LockGuard<Mutex> lock_guard(&mutex_);
+    // We use timeGetTime() to implement TimeTicks::Now(), which rolls over
+    // every ~49.7 days. We try to track rollover ourselves, which works if
+    // TimeTicks::Now() is called at least every 49 days.
+    // Note that we do not use GetTickCount() here, since timeGetTime() gives
+    // more predictable delta values, as described here:
+    // http://blogs.msdn.com/b/larryosterman/archive/2009/09/02/what-s-the-difference-between-gettickcount-and-timegettime.aspx
+    // timeGetTime() provides 1ms granularity when combined with
+    // timeBeginPeriod(). If the host application for V8 wants fast timers, it
+    // can use timeBeginPeriod() to increase the resolution.
+    DWORD now = timeGetTime();
+    if (now < last_seen_now_) {
+      rollover_ms_ += V8_INT64_C(0x100000000);  // ~49.7 days.
+    }
+    last_seen_now_ = now;
+    return (now + rollover_ms_) * Time::kMicrosecondsPerMillisecond;
+  }
+
+  virtual bool IsHighResolution() OVERRIDE {
+    return false;
+  }
+
+ private:
+  Mutex mutex_;
+  DWORD last_seen_now_;
+  int64_t rollover_ms_;
+};
+
+
+static LazyStaticInstance<RolloverProtectedTickClock,
+                          DefaultConstructTrait<RolloverProtectedTickClock>,
+                          ThreadSafeInitOnceTrait>::type tick_clock =
+    LAZY_STATIC_INSTANCE_INITIALIZER;
+
+
+struct CreateHighResTickClockTrait {
+  static TickClock* Create() {
+    // Check if the installed hardware supports a high-resolution performance
+    // counter, and if not fallback to the low-resolution tick clock.
+    LARGE_INTEGER ticks_per_second;
+    if (!QueryPerformanceFrequency(&ticks_per_second)) {
+      return tick_clock.Pointer();
+    }
+
+    // On Athlon X2 CPUs (e.g. model 15) the QueryPerformanceCounter
+    // is unreliable, fallback to the low-resolution tick clock.
+    CPU cpu;
+    if (strcmp(cpu.vendor(), "AuthenticAMD") == 0 && cpu.family() == 15) {
+      return tick_clock.Pointer();
+    }
+
+    return new HighResolutionTickClock(ticks_per_second.QuadPart);
+  }
+};
+
+
+static LazyDynamicInstance<TickClock, CreateHighResTickClockTrait,
+                           ThreadSafeInitOnceTrait>::type high_res_tick_clock =
+    LAZY_DYNAMIC_INSTANCE_INITIALIZER;
+
+
+TimeTicks TimeTicks::Now() {
+  // Make sure we never return 0 here.
+  TimeTicks ticks(tick_clock.Pointer()->Now());
+  DCHECK(!ticks.IsNull());
+  return ticks;
+}
+
+
+TimeTicks TimeTicks::HighResolutionNow() {
+  // Make sure we never return 0 here.
+  TimeTicks ticks(high_res_tick_clock.Pointer()->Now());
+  DCHECK(!ticks.IsNull());
+  return ticks;
+}
+
+
+// static
+bool TimeTicks::IsHighResolutionClockWorking() {
+  return high_res_tick_clock.Pointer()->IsHighResolution();
+}
+
+
+// static
+TimeTicks TimeTicks::KernelTimestampNow() { return TimeTicks(0); }
+
+
+// static
+bool TimeTicks::KernelTimestampAvailable() { return false; }
+
+#else  // V8_OS_WIN
+
+TimeTicks TimeTicks::Now() {
+  return HighResolutionNow();
+}
+
+
+TimeTicks TimeTicks::HighResolutionNow() {
+  int64_t ticks;
+#if V8_OS_MACOSX
+  static struct mach_timebase_info info;
+  if (info.denom == 0) {
+    kern_return_t result = mach_timebase_info(&info);
+    DCHECK_EQ(KERN_SUCCESS, result);
+    USE(result);
+  }
+  ticks = (mach_absolute_time() / Time::kNanosecondsPerMicrosecond *
+           info.numer / info.denom);
+#elif V8_OS_SOLARIS
+  ticks = (gethrtime() / Time::kNanosecondsPerMicrosecond);
+#elif V8_LIBRT_NOT_AVAILABLE
+  // TODO(bmeurer): This is a temporary hack to support cross-compiling
+  // Chrome for Android in AOSP. Remove this once AOSP is fixed, also
+  // cleanup the tools/gyp/v8.gyp file.
+  struct timeval tv;
+  int result = gettimeofday(&tv, NULL);
+  DCHECK_EQ(0, result);
+  USE(result);
+  ticks = (tv.tv_sec * Time::kMicrosecondsPerSecond + tv.tv_usec);
+#elif V8_OS_POSIX
+  struct timespec ts;
+  int result = clock_gettime(CLOCK_MONOTONIC, &ts);
+  DCHECK_EQ(0, result);
+  USE(result);
+  ticks = (ts.tv_sec * Time::kMicrosecondsPerSecond +
+           ts.tv_nsec / Time::kNanosecondsPerMicrosecond);
+#endif  // V8_OS_MACOSX
+  // Make sure we never return 0 here.
+  return TimeTicks(ticks + 1);
+}
+
+
+// static
+bool TimeTicks::IsHighResolutionClockWorking() {
+  return true;
+}
+
+
+#if V8_OS_LINUX && !V8_LIBRT_NOT_AVAILABLE
+
+class KernelTimestampClock {
+ public:
+  KernelTimestampClock() : clock_fd_(-1), clock_id_(kClockInvalid) {
+    clock_fd_ = open(kTraceClockDevice, O_RDONLY);
+    if (clock_fd_ == -1) {
+      return;
+    }
+    clock_id_ = get_clockid(clock_fd_);
+  }
+
+  virtual ~KernelTimestampClock() {
+    if (clock_fd_ != -1) {
+      close(clock_fd_);
+    }
+  }
+
+  int64_t Now() {
+    if (clock_id_ == kClockInvalid) {
+      return 0;
+    }
+
+    struct timespec ts;
+
+    clock_gettime(clock_id_, &ts);
+    return ((int64_t)ts.tv_sec * kNsecPerSec) + ts.tv_nsec;
+  }
+
+  bool Available() { return clock_id_ != kClockInvalid; }
+
+ private:
+  static const clockid_t kClockInvalid = -1;
+  static const char kTraceClockDevice[];
+  static const uint64_t kNsecPerSec = 1000000000;
+
+  int clock_fd_;
+  clockid_t clock_id_;
+
+  static int get_clockid(int fd) { return ((~(clockid_t)(fd) << 3) | 3); }
+};
+
+
+// Timestamp module name
+const char KernelTimestampClock::kTraceClockDevice[] = "/dev/trace_clock";
+
+#else
+
+class KernelTimestampClock {
+ public:
+  KernelTimestampClock() {}
+
+  int64_t Now() { return 0; }
+  bool Available() { return false; }
+};
+
+#endif  // V8_OS_LINUX && !V8_LIBRT_NOT_AVAILABLE
+
+static LazyStaticInstance<KernelTimestampClock,
+                          DefaultConstructTrait<KernelTimestampClock>,
+                          ThreadSafeInitOnceTrait>::type kernel_tick_clock =
+    LAZY_STATIC_INSTANCE_INITIALIZER;
+
+
+// static
+TimeTicks TimeTicks::KernelTimestampNow() {
+  return TimeTicks(kernel_tick_clock.Pointer()->Now());
+}
+
+
+// static
+bool TimeTicks::KernelTimestampAvailable() {
+  return kernel_tick_clock.Pointer()->Available();
+}
+
+#endif  // V8_OS_WIN
+
+} }  // namespace v8::base
diff --git a/src/base/platform/time.h b/src/base/platform/time.h
new file mode 100644
index 0000000..9dfa47d
--- /dev/null
+++ b/src/base/platform/time.h
@@ -0,0 +1,400 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_PLATFORM_TIME_H_
+#define V8_BASE_PLATFORM_TIME_H_
+
+#include <time.h>
+#include <limits>
+
+#include "src/base/macros.h"
+
+// Forward declarations.
+extern "C" {
+struct _FILETIME;
+struct mach_timespec;
+struct timespec;
+struct timeval;
+}
+
+namespace v8 {
+namespace base {
+
+class Time;
+class TimeTicks;
+
+// -----------------------------------------------------------------------------
+// TimeDelta
+//
+// This class represents a duration of time, internally represented in
+// microseonds.
+
+class TimeDelta FINAL {
+ public:
+  TimeDelta() : delta_(0) {}
+
+  // Converts units of time to TimeDeltas.
+  static TimeDelta FromDays(int days);
+  static TimeDelta FromHours(int hours);
+  static TimeDelta FromMinutes(int minutes);
+  static TimeDelta FromSeconds(int64_t seconds);
+  static TimeDelta FromMilliseconds(int64_t milliseconds);
+  static TimeDelta FromMicroseconds(int64_t microseconds) {
+    return TimeDelta(microseconds);
+  }
+  static TimeDelta FromNanoseconds(int64_t nanoseconds);
+
+  // Returns the time delta in some unit. The F versions return a floating
+  // point value, the "regular" versions return a rounded-down value.
+  //
+  // InMillisecondsRoundedUp() instead returns an integer that is rounded up
+  // to the next full millisecond.
+  int InDays() const;
+  int InHours() const;
+  int InMinutes() const;
+  double InSecondsF() const;
+  int64_t InSeconds() const;
+  double InMillisecondsF() const;
+  int64_t InMilliseconds() const;
+  int64_t InMillisecondsRoundedUp() const;
+  int64_t InMicroseconds() const { return delta_; }
+  int64_t InNanoseconds() const;
+
+  // Converts to/from Mach time specs.
+  static TimeDelta FromMachTimespec(struct mach_timespec ts);
+  struct mach_timespec ToMachTimespec() const;
+
+  // Converts to/from POSIX time specs.
+  static TimeDelta FromTimespec(struct timespec ts);
+  struct timespec ToTimespec() const;
+
+  TimeDelta& operator=(const TimeDelta& other) {
+    delta_ = other.delta_;
+    return *this;
+  }
+
+  // Computations with other deltas.
+  TimeDelta operator+(const TimeDelta& other) const {
+    return TimeDelta(delta_ + other.delta_);
+  }
+  TimeDelta operator-(const TimeDelta& other) const {
+    return TimeDelta(delta_ - other.delta_);
+  }
+
+  TimeDelta& operator+=(const TimeDelta& other) {
+    delta_ += other.delta_;
+    return *this;
+  }
+  TimeDelta& operator-=(const TimeDelta& other) {
+    delta_ -= other.delta_;
+    return *this;
+  }
+  TimeDelta operator-() const {
+    return TimeDelta(-delta_);
+  }
+
+  double TimesOf(const TimeDelta& other) const {
+    return static_cast<double>(delta_) / static_cast<double>(other.delta_);
+  }
+  double PercentOf(const TimeDelta& other) const {
+    return TimesOf(other) * 100.0;
+  }
+
+  // Computations with ints, note that we only allow multiplicative operations
+  // with ints, and additive operations with other deltas.
+  TimeDelta operator*(int64_t a) const {
+    return TimeDelta(delta_ * a);
+  }
+  TimeDelta operator/(int64_t a) const {
+    return TimeDelta(delta_ / a);
+  }
+  TimeDelta& operator*=(int64_t a) {
+    delta_ *= a;
+    return *this;
+  }
+  TimeDelta& operator/=(int64_t a) {
+    delta_ /= a;
+    return *this;
+  }
+  int64_t operator/(const TimeDelta& other) const {
+    return delta_ / other.delta_;
+  }
+
+  // Comparison operators.
+  bool operator==(const TimeDelta& other) const {
+    return delta_ == other.delta_;
+  }
+  bool operator!=(const TimeDelta& other) const {
+    return delta_ != other.delta_;
+  }
+  bool operator<(const TimeDelta& other) const {
+    return delta_ < other.delta_;
+  }
+  bool operator<=(const TimeDelta& other) const {
+    return delta_ <= other.delta_;
+  }
+  bool operator>(const TimeDelta& other) const {
+    return delta_ > other.delta_;
+  }
+  bool operator>=(const TimeDelta& other) const {
+    return delta_ >= other.delta_;
+  }
+
+ private:
+  // Constructs a delta given the duration in microseconds. This is private
+  // to avoid confusion by callers with an integer constructor. Use
+  // FromSeconds, FromMilliseconds, etc. instead.
+  explicit TimeDelta(int64_t delta) : delta_(delta) {}
+
+  // Delta in microseconds.
+  int64_t delta_;
+};
+
+
+// -----------------------------------------------------------------------------
+// Time
+//
+// This class represents an absolute point in time, internally represented as
+// microseconds (s/1,000,000) since 00:00:00 UTC, January 1, 1970.
+
+class Time FINAL {
+ public:
+  static const int64_t kMillisecondsPerSecond = 1000;
+  static const int64_t kMicrosecondsPerMillisecond = 1000;
+  static const int64_t kMicrosecondsPerSecond = kMicrosecondsPerMillisecond *
+                                                kMillisecondsPerSecond;
+  static const int64_t kMicrosecondsPerMinute = kMicrosecondsPerSecond * 60;
+  static const int64_t kMicrosecondsPerHour = kMicrosecondsPerMinute * 60;
+  static const int64_t kMicrosecondsPerDay = kMicrosecondsPerHour * 24;
+  static const int64_t kMicrosecondsPerWeek = kMicrosecondsPerDay * 7;
+  static const int64_t kNanosecondsPerMicrosecond = 1000;
+  static const int64_t kNanosecondsPerSecond = kNanosecondsPerMicrosecond *
+                                               kMicrosecondsPerSecond;
+
+  // Contains the NULL time. Use Time::Now() to get the current time.
+  Time() : us_(0) {}
+
+  // Returns true if the time object has not been initialized.
+  bool IsNull() const { return us_ == 0; }
+
+  // Returns true if the time object is the maximum time.
+  bool IsMax() const { return us_ == std::numeric_limits<int64_t>::max(); }
+
+  // Returns the current time. Watch out, the system might adjust its clock
+  // in which case time will actually go backwards. We don't guarantee that
+  // times are increasing, or that two calls to Now() won't be the same.
+  static Time Now();
+
+  // Returns the current time. Same as Now() except that this function always
+  // uses system time so that there are no discrepancies between the returned
+  // time and system time even on virtual environments including our test bot.
+  // For timing sensitive unittests, this function should be used.
+  static Time NowFromSystemTime();
+
+  // Returns the time for epoch in Unix-like system (Jan 1, 1970).
+  static Time UnixEpoch() { return Time(0); }
+
+  // Returns the maximum time, which should be greater than any reasonable time
+  // with which we might compare it.
+  static Time Max() { return Time(std::numeric_limits<int64_t>::max()); }
+
+  // Converts to/from internal values. The meaning of the "internal value" is
+  // completely up to the implementation, so it should be treated as opaque.
+  static Time FromInternalValue(int64_t value) {
+    return Time(value);
+  }
+  int64_t ToInternalValue() const {
+    return us_;
+  }
+
+  // Converts to/from POSIX time specs.
+  static Time FromTimespec(struct timespec ts);
+  struct timespec ToTimespec() const;
+
+  // Converts to/from POSIX time values.
+  static Time FromTimeval(struct timeval tv);
+  struct timeval ToTimeval() const;
+
+  // Converts to/from Windows file times.
+  static Time FromFiletime(struct _FILETIME ft);
+  struct _FILETIME ToFiletime() const;
+
+  // Converts to/from the Javascript convention for times, a number of
+  // milliseconds since the epoch:
+  static Time FromJsTime(double ms_since_epoch);
+  double ToJsTime() const;
+
+  Time& operator=(const Time& other) {
+    us_ = other.us_;
+    return *this;
+  }
+
+  // Compute the difference between two times.
+  TimeDelta operator-(const Time& other) const {
+    return TimeDelta::FromMicroseconds(us_ - other.us_);
+  }
+
+  // Modify by some time delta.
+  Time& operator+=(const TimeDelta& delta) {
+    us_ += delta.InMicroseconds();
+    return *this;
+  }
+  Time& operator-=(const TimeDelta& delta) {
+    us_ -= delta.InMicroseconds();
+    return *this;
+  }
+
+  // Return a new time modified by some delta.
+  Time operator+(const TimeDelta& delta) const {
+    return Time(us_ + delta.InMicroseconds());
+  }
+  Time operator-(const TimeDelta& delta) const {
+    return Time(us_ - delta.InMicroseconds());
+  }
+
+  // Comparison operators
+  bool operator==(const Time& other) const {
+    return us_ == other.us_;
+  }
+  bool operator!=(const Time& other) const {
+    return us_ != other.us_;
+  }
+  bool operator<(const Time& other) const {
+    return us_ < other.us_;
+  }
+  bool operator<=(const Time& other) const {
+    return us_ <= other.us_;
+  }
+  bool operator>(const Time& other) const {
+    return us_ > other.us_;
+  }
+  bool operator>=(const Time& other) const {
+    return us_ >= other.us_;
+  }
+
+ private:
+  explicit Time(int64_t us) : us_(us) {}
+
+  // Time in microseconds in UTC.
+  int64_t us_;
+};
+
+inline Time operator+(const TimeDelta& delta, const Time& time) {
+  return time + delta;
+}
+
+
+// -----------------------------------------------------------------------------
+// TimeTicks
+//
+// This class represents an abstract time that is most of the time incrementing
+// for use in measuring time durations. It is internally represented in
+// microseconds.  It can not be converted to a human-readable time, but is
+// guaranteed not to decrease (if the user changes the computer clock,
+// Time::Now() may actually decrease or jump).  But note that TimeTicks may
+// "stand still", for example if the computer suspended.
+
+class TimeTicks FINAL {
+ public:
+  TimeTicks() : ticks_(0) {}
+
+  // Platform-dependent tick count representing "right now."
+  // The resolution of this clock is ~1-15ms.  Resolution varies depending
+  // on hardware/operating system configuration.
+  // This method never returns a null TimeTicks.
+  static TimeTicks Now();
+
+  // Returns a platform-dependent high-resolution tick count. Implementation
+  // is hardware dependent and may or may not return sub-millisecond
+  // resolution.  THIS CALL IS GENERALLY MUCH MORE EXPENSIVE THAN Now() AND
+  // SHOULD ONLY BE USED WHEN IT IS REALLY NEEDED.
+  // This method never returns a null TimeTicks.
+  static TimeTicks HighResolutionNow();
+
+  // Returns true if the high-resolution clock is working on this system.
+  static bool IsHighResolutionClockWorking();
+
+  // Returns Linux kernel timestamp for generating profiler events. This method
+  // returns null TimeTicks if the kernel cannot provide the timestamps (e.g.,
+  // on non-Linux OS or if the kernel module for timestamps is not loaded).
+
+  static TimeTicks KernelTimestampNow();
+  static bool KernelTimestampAvailable();
+
+  // Returns true if this object has not been initialized.
+  bool IsNull() const { return ticks_ == 0; }
+
+  // Converts to/from internal values. The meaning of the "internal value" is
+  // completely up to the implementation, so it should be treated as opaque.
+  static TimeTicks FromInternalValue(int64_t value) {
+    return TimeTicks(value);
+  }
+  int64_t ToInternalValue() const {
+    return ticks_;
+  }
+
+  TimeTicks& operator=(const TimeTicks other) {
+    ticks_ = other.ticks_;
+    return *this;
+  }
+
+  // Compute the difference between two times.
+  TimeDelta operator-(const TimeTicks other) const {
+    return TimeDelta::FromMicroseconds(ticks_ - other.ticks_);
+  }
+
+  // Modify by some time delta.
+  TimeTicks& operator+=(const TimeDelta& delta) {
+    ticks_ += delta.InMicroseconds();
+    return *this;
+  }
+  TimeTicks& operator-=(const TimeDelta& delta) {
+    ticks_ -= delta.InMicroseconds();
+    return *this;
+  }
+
+  // Return a new TimeTicks modified by some delta.
+  TimeTicks operator+(const TimeDelta& delta) const {
+    return TimeTicks(ticks_ + delta.InMicroseconds());
+  }
+  TimeTicks operator-(const TimeDelta& delta) const {
+    return TimeTicks(ticks_ - delta.InMicroseconds());
+  }
+
+  // Comparison operators
+  bool operator==(const TimeTicks& other) const {
+    return ticks_ == other.ticks_;
+  }
+  bool operator!=(const TimeTicks& other) const {
+    return ticks_ != other.ticks_;
+  }
+  bool operator<(const TimeTicks& other) const {
+    return ticks_ < other.ticks_;
+  }
+  bool operator<=(const TimeTicks& other) const {
+    return ticks_ <= other.ticks_;
+  }
+  bool operator>(const TimeTicks& other) const {
+    return ticks_ > other.ticks_;
+  }
+  bool operator>=(const TimeTicks& other) const {
+    return ticks_ >= other.ticks_;
+  }
+
+ private:
+  // Please use Now() to create a new object. This is for internal use
+  // and testing. Ticks is in microseconds.
+  explicit TimeTicks(int64_t ticks) : ticks_(ticks) {}
+
+  // Tick count in microseconds.
+  int64_t ticks_;
+};
+
+inline TimeTicks operator+(const TimeDelta& delta, const TimeTicks& ticks) {
+  return ticks + delta;
+}
+
+} }  // namespace v8::base
+
+#endif  // V8_BASE_PLATFORM_TIME_H_
diff --git a/src/base/qnx-math.h b/src/base/qnx-math.h
new file mode 100644
index 0000000..6ff18f8
--- /dev/null
+++ b/src/base/qnx-math.h
@@ -0,0 +1,19 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_QNX_MATH_H_
+#define V8_QBASE_NX_MATH_H_
+
+#include <cmath>
+
+#undef fpclassify
+#undef isfinite
+#undef isinf
+#undef isnan
+#undef isnormal
+#undef signbit
+
+using std::lrint;
+
+#endif  // V8_BASE_QNX_MATH_H_
diff --git a/src/base/safe_conversions_impl.h b/src/base/safe_conversions_impl.h
index 2226f17..90c8e19 100644
--- a/src/base/safe_conversions_impl.h
+++ b/src/base/safe_conversions_impl.h
@@ -10,6 +10,7 @@
 
 #include <limits>
 
+#include "src/base/logging.h"
 #include "src/base/macros.h"
 
 namespace v8 {
@@ -98,9 +99,8 @@
 
 // Helper function for coercing an int back to a RangeContraint.
 inline RangeConstraint GetRangeConstraint(int integer_range_constraint) {
-  // TODO(jochen/jkummerow): Re-enable this when checks.h is available in base.
-  // ASSERT(integer_range_constraint >= RANGE_VALID &&
-  //        integer_range_constraint <= RANGE_INVALID);
+  DCHECK(integer_range_constraint >= RANGE_VALID &&
+         integer_range_constraint <= RANGE_INVALID);
   return static_cast<RangeConstraint>(integer_range_constraint);
 }
 
diff --git a/src/base/sys-info-unittest.cc b/src/base/sys-info-unittest.cc
new file mode 100644
index 0000000..a760f94
--- /dev/null
+++ b/src/base/sys-info-unittest.cc
@@ -0,0 +1,32 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/sys-info.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+#if V8_OS_NACL
+#define DISABLE_ON_NACL(Name) DISABLED_##Name
+#else
+#define DISABLE_ON_NACL(Name) Name
+#endif
+
+namespace v8 {
+namespace base {
+
+TEST(SysInfoTest, NumberOfProcessors) {
+  EXPECT_LT(0, SysInfo::NumberOfProcessors());
+}
+
+
+TEST(SysInfoTest, DISABLE_ON_NACL(AmountOfPhysicalMemory)) {
+  EXPECT_LT(0, SysInfo::AmountOfPhysicalMemory());
+}
+
+
+TEST(SysInfoTest, AmountOfVirtualMemory) {
+  EXPECT_LE(0, SysInfo::AmountOfVirtualMemory());
+}
+
+}  // namespace base
+}  // namespace v8
diff --git a/src/base/sys-info.cc b/src/base/sys-info.cc
new file mode 100644
index 0000000..06c4f24
--- /dev/null
+++ b/src/base/sys-info.cc
@@ -0,0 +1,125 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/sys-info.h"
+
+#if V8_OS_POSIX
+#include <sys/resource.h>
+#include <sys/stat.h>
+#include <sys/time.h>
+#include <sys/types.h>
+#include <unistd.h>
+#endif
+
+#if V8_OS_BSD
+#include <sys/sysctl.h>
+#endif
+
+#include <limits>
+
+#include "src/base/logging.h"
+#include "src/base/macros.h"
+#if V8_OS_WIN
+#include "src/base/win32-headers.h"
+#endif
+
+namespace v8 {
+namespace base {
+
+// static
+int SysInfo::NumberOfProcessors() {
+#if V8_OS_OPENBSD
+  int mib[2] = {CTL_HW, HW_NCPU};
+  int ncpu = 0;
+  size_t len = sizeof(ncpu);
+  if (sysctl(mib, arraysize(mib), &ncpu, &len, NULL, 0) != 0) {
+    UNREACHABLE();
+    return 1;
+  }
+  return ncpu;
+#elif V8_OS_POSIX
+  long result = sysconf(_SC_NPROCESSORS_ONLN);  // NOLINT(runtime/int)
+  if (result == -1) {
+    UNREACHABLE();
+    return 1;
+  }
+  return static_cast<int>(result);
+#elif V8_OS_WIN
+  SYSTEM_INFO system_info = {0};
+  ::GetNativeSystemInfo(&system_info);
+  return static_cast<int>(system_info.dwNumberOfProcessors);
+#endif
+}
+
+
+// static
+int64_t SysInfo::AmountOfPhysicalMemory() {
+#if V8_OS_MACOSX
+  int mib[2] = {CTL_HW, HW_MEMSIZE};
+  int64_t memsize = 0;
+  size_t len = sizeof(memsize);
+  if (sysctl(mib, arraysize(mib), &memsize, &len, NULL, 0) != 0) {
+    UNREACHABLE();
+    return 0;
+  }
+  return memsize;
+#elif V8_OS_FREEBSD
+  int pages, page_size;
+  size_t size = sizeof(pages);
+  sysctlbyname("vm.stats.vm.v_page_count", &pages, &size, NULL, 0);
+  sysctlbyname("vm.stats.vm.v_page_size", &page_size, &size, NULL, 0);
+  if (pages == -1 || page_size == -1) {
+    UNREACHABLE();
+    return 0;
+  }
+  return static_cast<int64_t>(pages) * page_size;
+#elif V8_OS_CYGWIN || V8_OS_WIN
+  MEMORYSTATUSEX memory_info;
+  memory_info.dwLength = sizeof(memory_info);
+  if (!GlobalMemoryStatusEx(&memory_info)) {
+    UNREACHABLE();
+    return 0;
+  }
+  int64_t result = static_cast<int64_t>(memory_info.ullTotalPhys);
+  if (result < 0) result = std::numeric_limits<int64_t>::max();
+  return result;
+#elif V8_OS_QNX
+  struct stat stat_buf;
+  if (stat("/proc", &stat_buf) != 0) {
+    UNREACHABLE();
+    return 0;
+  }
+  return static_cast<int64_t>(stat_buf.st_size);
+#elif V8_OS_NACL
+  // No support for _SC_PHYS_PAGES, assume 2GB.
+  return static_cast<int64_t>(1) << 31;
+#elif V8_OS_POSIX
+  long pages = sysconf(_SC_PHYS_PAGES);    // NOLINT(runtime/int)
+  long page_size = sysconf(_SC_PAGESIZE);  // NOLINT(runtime/int)
+  if (pages == -1 || page_size == -1) {
+    UNREACHABLE();
+    return 0;
+  }
+  return static_cast<int64_t>(pages) * page_size;
+#endif
+}
+
+
+// static
+int64_t SysInfo::AmountOfVirtualMemory() {
+#if V8_OS_NACL || V8_OS_WIN
+  return 0;
+#elif V8_OS_POSIX
+  struct rlimit rlim;
+  int result = getrlimit(RLIMIT_DATA, &rlim);
+  if (result != 0) {
+    UNREACHABLE();
+    return 0;
+  }
+  return (rlim.rlim_cur == RLIM_INFINITY) ? 0 : rlim.rlim_cur;
+#endif
+}
+
+}  // namespace base
+}  // namespace v8
diff --git a/src/base/sys-info.h b/src/base/sys-info.h
new file mode 100644
index 0000000..d1658fc
--- /dev/null
+++ b/src/base/sys-info.h
@@ -0,0 +1,30 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_SYS_INFO_H_
+#define V8_BASE_SYS_INFO_H_
+
+#include "include/v8stdint.h"
+#include "src/base/compiler-specific.h"
+
+namespace v8 {
+namespace base {
+
+class SysInfo FINAL {
+ public:
+  // Returns the number of logical processors/core on the current machine.
+  static int NumberOfProcessors();
+
+  // Returns the number of bytes of physical memory on the current machine.
+  static int64_t AmountOfPhysicalMemory();
+
+  // Returns the number of bytes of virtual memory of this process. A return
+  // value of zero means that there is no limit on the available virtual memory.
+  static int64_t AmountOfVirtualMemory();
+};
+
+}  // namespace base
+}  // namespace v8
+
+#endif  // V8_BASE_SYS_INFO_H_
diff --git a/src/base/utils/random-number-generator-unittest.cc b/src/base/utils/random-number-generator-unittest.cc
new file mode 100644
index 0000000..7c533db
--- /dev/null
+++ b/src/base/utils/random-number-generator-unittest.cc
@@ -0,0 +1,53 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <climits>
+
+#include "src/base/utils/random-number-generator.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace base {
+
+class RandomNumberGeneratorTest : public ::testing::TestWithParam<int> {};
+
+
+static const int kMaxRuns = 12345;
+
+
+TEST_P(RandomNumberGeneratorTest, NextIntWithMaxValue) {
+  RandomNumberGenerator rng(GetParam());
+  for (int max = 1; max <= kMaxRuns; ++max) {
+    int n = rng.NextInt(max);
+    EXPECT_LE(0, n);
+    EXPECT_LT(n, max);
+  }
+}
+
+
+TEST_P(RandomNumberGeneratorTest, NextBooleanReturnsFalseOrTrue) {
+  RandomNumberGenerator rng(GetParam());
+  for (int k = 0; k < kMaxRuns; ++k) {
+    bool b = rng.NextBool();
+    EXPECT_TRUE(b == false || b == true);
+  }
+}
+
+
+TEST_P(RandomNumberGeneratorTest, NextDoubleReturnsValueBetween0And1) {
+  RandomNumberGenerator rng(GetParam());
+  for (int k = 0; k < kMaxRuns; ++k) {
+    double d = rng.NextDouble();
+    EXPECT_LE(0.0, d);
+    EXPECT_LT(d, 1.0);
+  }
+}
+
+
+INSTANTIATE_TEST_CASE_P(RandomSeeds, RandomNumberGeneratorTest,
+                        ::testing::Values(INT_MIN, -1, 0, 1, 42, 100,
+                                          1234567890, 987654321, INT_MAX));
+
+}  // namespace base
+}  // namespace v8
diff --git a/src/base/utils/random-number-generator.cc b/src/base/utils/random-number-generator.cc
new file mode 100644
index 0000000..9454936
--- /dev/null
+++ b/src/base/utils/random-number-generator.cc
@@ -0,0 +1,132 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/utils/random-number-generator.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <new>
+
+#include "src/base/macros.h"
+#include "src/base/platform/mutex.h"
+#include "src/base/platform/time.h"
+
+namespace v8 {
+namespace base {
+
+static LazyMutex entropy_mutex = LAZY_MUTEX_INITIALIZER;
+static RandomNumberGenerator::EntropySource entropy_source = NULL;
+
+
+// static
+void RandomNumberGenerator::SetEntropySource(EntropySource source) {
+  LockGuard<Mutex> lock_guard(entropy_mutex.Pointer());
+  entropy_source = source;
+}
+
+
+RandomNumberGenerator::RandomNumberGenerator() {
+  // Check if embedder supplied an entropy source.
+  { LockGuard<Mutex> lock_guard(entropy_mutex.Pointer());
+    if (entropy_source != NULL) {
+      int64_t seed;
+      if (entropy_source(reinterpret_cast<unsigned char*>(&seed),
+                         sizeof(seed))) {
+        SetSeed(seed);
+        return;
+      }
+    }
+  }
+
+#if V8_OS_CYGWIN || V8_OS_WIN
+  // Use rand_s() to gather entropy on Windows. See:
+  // https://code.google.com/p/v8/issues/detail?id=2905
+  unsigned first_half, second_half;
+  errno_t result = rand_s(&first_half);
+  DCHECK_EQ(0, result);
+  result = rand_s(&second_half);
+  DCHECK_EQ(0, result);
+  SetSeed((static_cast<int64_t>(first_half) << 32) + second_half);
+#else
+  // Gather entropy from /dev/urandom if available.
+  FILE* fp = fopen("/dev/urandom", "rb");
+  if (fp != NULL) {
+    int64_t seed;
+    size_t n = fread(&seed, sizeof(seed), 1, fp);
+    fclose(fp);
+    if (n == 1) {
+      SetSeed(seed);
+      return;
+    }
+  }
+
+  // We cannot assume that random() or rand() were seeded
+  // properly, so instead of relying on random() or rand(),
+  // we just seed our PRNG using timing data as fallback.
+  // This is weak entropy, but it's sufficient, because
+  // it is the responsibility of the embedder to install
+  // an entropy source using v8::V8::SetEntropySource(),
+  // which provides reasonable entropy, see:
+  // https://code.google.com/p/v8/issues/detail?id=2905
+  int64_t seed = Time::NowFromSystemTime().ToInternalValue() << 24;
+  seed ^= TimeTicks::HighResolutionNow().ToInternalValue() << 16;
+  seed ^= TimeTicks::Now().ToInternalValue() << 8;
+  SetSeed(seed);
+#endif  // V8_OS_CYGWIN || V8_OS_WIN
+}
+
+
+int RandomNumberGenerator::NextInt(int max) {
+  DCHECK_LE(0, max);
+
+  // Fast path if max is a power of 2.
+  if (IS_POWER_OF_TWO(max)) {
+    return static_cast<int>((max * static_cast<int64_t>(Next(31))) >> 31);
+  }
+
+  while (true) {
+    int rnd = Next(31);
+    int val = rnd % max;
+    if (rnd - val + (max - 1) >= 0) {
+      return val;
+    }
+  }
+}
+
+
+double RandomNumberGenerator::NextDouble() {
+  return ((static_cast<int64_t>(Next(26)) << 27) + Next(27)) /
+      static_cast<double>(static_cast<int64_t>(1) << 53);
+}
+
+
+void RandomNumberGenerator::NextBytes(void* buffer, size_t buflen) {
+  for (size_t n = 0; n < buflen; ++n) {
+    static_cast<uint8_t*>(buffer)[n] = static_cast<uint8_t>(Next(8));
+  }
+}
+
+
+int RandomNumberGenerator::Next(int bits) {
+  DCHECK_LT(0, bits);
+  DCHECK_GE(32, bits);
+  // Do unsigned multiplication, which has the intended modulo semantics, while
+  // signed multiplication would expose undefined behavior.
+  uint64_t product = static_cast<uint64_t>(seed_) * kMultiplier;
+  // Assigning a uint64_t to an int64_t is implementation defined, but this
+  // should be OK. Use a static_cast to explicitly state that we know what we're
+  // doing. (Famous last words...)
+  int64_t seed = static_cast<int64_t>((product + kAddend) & kMask);
+  seed_ = seed;
+  return static_cast<int>(seed >> (48 - bits));
+}
+
+
+void RandomNumberGenerator::SetSeed(int64_t seed) {
+  initial_seed_ = seed;
+  seed_ = (seed ^ kMultiplier) & kMask;
+}
+
+} }  // namespace v8::base
diff --git a/src/base/utils/random-number-generator.h b/src/base/utils/random-number-generator.h
new file mode 100644
index 0000000..479423d
--- /dev/null
+++ b/src/base/utils/random-number-generator.h
@@ -0,0 +1,92 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_UTILS_RANDOM_NUMBER_GENERATOR_H_
+#define V8_BASE_UTILS_RANDOM_NUMBER_GENERATOR_H_
+
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace base {
+
+// -----------------------------------------------------------------------------
+// RandomNumberGenerator
+//
+// This class is used to generate a stream of pseudorandom numbers. The class
+// uses a 48-bit seed, which is modified using a linear congruential formula.
+// (See Donald Knuth, The Art of Computer Programming, Volume 3, Section 3.2.1.)
+// If two instances of RandomNumberGenerator are created with the same seed, and
+// the same sequence of method calls is made for each, they will generate and
+// return identical sequences of numbers.
+// This class uses (probably) weak entropy by default, but it's sufficient,
+// because it is the responsibility of the embedder to install an entropy source
+// using v8::V8::SetEntropySource(), which provides reasonable entropy, see:
+// https://code.google.com/p/v8/issues/detail?id=2905
+// This class is neither reentrant nor threadsafe.
+
+class RandomNumberGenerator FINAL {
+ public:
+  // EntropySource is used as a callback function when V8 needs a source of
+  // entropy.
+  typedef bool (*EntropySource)(unsigned char* buffer, size_t buflen);
+  static void SetEntropySource(EntropySource entropy_source);
+
+  RandomNumberGenerator();
+  explicit RandomNumberGenerator(int64_t seed) { SetSeed(seed); }
+
+  // Returns the next pseudorandom, uniformly distributed int value from this
+  // random number generator's sequence. The general contract of |NextInt()| is
+  // that one int value is pseudorandomly generated and returned.
+  // All 2^32 possible integer values are produced with (approximately) equal
+  // probability.
+  V8_INLINE int NextInt() WARN_UNUSED_RESULT {
+    return Next(32);
+  }
+
+  // Returns a pseudorandom, uniformly distributed int value between 0
+  // (inclusive) and the specified max value (exclusive), drawn from this random
+  // number generator's sequence. The general contract of |NextInt(int)| is that
+  // one int value in the specified range is pseudorandomly generated and
+  // returned. All max possible int values are produced with (approximately)
+  // equal probability.
+  int NextInt(int max) WARN_UNUSED_RESULT;
+
+  // Returns the next pseudorandom, uniformly distributed boolean value from
+  // this random number generator's sequence. The general contract of
+  // |NextBoolean()| is that one boolean value is pseudorandomly generated and
+  // returned. The values true and false are produced with (approximately) equal
+  // probability.
+  V8_INLINE bool NextBool() WARN_UNUSED_RESULT {
+    return Next(1) != 0;
+  }
+
+  // Returns the next pseudorandom, uniformly distributed double value between
+  // 0.0 and 1.0 from this random number generator's sequence.
+  // The general contract of |NextDouble()| is that one double value, chosen
+  // (approximately) uniformly from the range 0.0 (inclusive) to 1.0
+  // (exclusive), is pseudorandomly generated and returned.
+  double NextDouble() WARN_UNUSED_RESULT;
+
+  // Fills the elements of a specified array of bytes with random numbers.
+  void NextBytes(void* buffer, size_t buflen);
+
+  // Override the current ssed.
+  void SetSeed(int64_t seed);
+
+  int64_t initial_seed() const { return initial_seed_; }
+
+ private:
+  static const int64_t kMultiplier = V8_2PART_UINT64_C(0x5, deece66d);
+  static const int64_t kAddend = 0xb;
+  static const int64_t kMask = V8_2PART_UINT64_C(0xffff, ffffffff);
+
+  int Next(int bits) WARN_UNUSED_RESULT;
+
+  int64_t initial_seed_;
+  int64_t seed_;
+};
+
+} }  // namespace v8::base
+
+#endif  // V8_BASE_UTILS_RANDOM_NUMBER_GENERATOR_H_
diff --git a/src/base/win32-headers.h b/src/base/win32-headers.h
index 5432de1..2d94abd 100644
--- a/src/base/win32-headers.h
+++ b/src/base/win32-headers.h
@@ -35,9 +35,9 @@
 
 #include <windows.h>
 
+#include <mmsystem.h>  // For timeGetTime().
 #include <signal.h>  // For raise().
 #include <time.h>  // For LocalOffset() implementation.
-#include <mmsystem.h>  // For timeGetTime().
 #ifdef __MINGW32__
 // Require Windows XP or higher when compiling with MinGW. This is for MinGW
 // header files to expose getaddrinfo.
@@ -75,5 +75,7 @@
 #undef GetObject
 #undef CreateSemaphore
 #undef Yield
+#undef RotateRight32
+#undef RotateRight64
 
 #endif  // V8_BASE_WIN32_HEADERS_H_
diff --git a/src/base/win32-math.cc b/src/base/win32-math.cc
new file mode 100644
index 0000000..d6fc78b
--- /dev/null
+++ b/src/base/win32-math.cc
@@ -0,0 +1,82 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Extra POSIX/ANSI routines for Win32 when using Visual Studio C++. Please
+// refer to The Open Group Base Specification for specification of the correct
+// semantics for these functions.
+// (http://www.opengroup.org/onlinepubs/000095399/)
+#if defined(_MSC_VER) && (_MSC_VER < 1800)
+
+#include "src/base/win32-headers.h"
+#include <float.h>         // Required for DBL_MAX and on Win32 for finite()
+#include <limits.h>        // Required for INT_MAX etc.
+#include <cmath>
+#include "src/base/win32-math.h"
+
+#include "src/base/logging.h"
+
+
+namespace std {
+
+// Test for a NaN (not a number) value - usually defined in math.h
+int isnan(double x) {
+  return _isnan(x);
+}
+
+
+// Test for infinity - usually defined in math.h
+int isinf(double x) {
+  return (_fpclass(x) & (_FPCLASS_PINF | _FPCLASS_NINF)) != 0;
+}
+
+
+// Test for finite value - usually defined in math.h
+int isfinite(double x) {
+  return _finite(x);
+}
+
+
+// Test if x is less than y and both nominal - usually defined in math.h
+int isless(double x, double y) {
+  return isnan(x) || isnan(y) ? 0 : x < y;
+}
+
+
+// Test if x is greater than y and both nominal - usually defined in math.h
+int isgreater(double x, double y) {
+  return isnan(x) || isnan(y) ? 0 : x > y;
+}
+
+
+// Classify floating point number - usually defined in math.h
+int fpclassify(double x) {
+  // Use the MS-specific _fpclass() for classification.
+  int flags = _fpclass(x);
+
+  // Determine class. We cannot use a switch statement because
+  // the _FPCLASS_ constants are defined as flags.
+  if (flags & (_FPCLASS_PN | _FPCLASS_NN)) return FP_NORMAL;
+  if (flags & (_FPCLASS_PZ | _FPCLASS_NZ)) return FP_ZERO;
+  if (flags & (_FPCLASS_PD | _FPCLASS_ND)) return FP_SUBNORMAL;
+  if (flags & (_FPCLASS_PINF | _FPCLASS_NINF)) return FP_INFINITE;
+
+  // All cases should be covered by the code above.
+  DCHECK(flags & (_FPCLASS_SNAN | _FPCLASS_QNAN));
+  return FP_NAN;
+}
+
+
+// Test sign - usually defined in math.h
+int signbit(double x) {
+  // We need to take care of the special case of both positive
+  // and negative versions of zero.
+  if (x == 0)
+    return _fpclass(x) & _FPCLASS_NZ;
+  else
+    return x < 0;
+}
+
+}  // namespace std
+
+#endif  // _MSC_VER
diff --git a/src/base/win32-math.h b/src/base/win32-math.h
new file mode 100644
index 0000000..e1c0350
--- /dev/null
+++ b/src/base/win32-math.h
@@ -0,0 +1,42 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Extra POSIX/ANSI routines for Win32 when using Visual Studio C++. Please
+// refer to The Open Group Base Specification for specification of the correct
+// semantics for these functions.
+// (http://www.opengroup.org/onlinepubs/000095399/)
+
+#ifndef V8_BASE_WIN32_MATH_H_
+#define V8_BASE_WIN32_MATH_H_
+
+#ifndef _MSC_VER
+#error Wrong environment, expected MSVC.
+#endif  // _MSC_VER
+
+// MSVC 2013+ provides implementations of all standard math functions.
+#if (_MSC_VER < 1800)
+enum {
+  FP_NAN,
+  FP_INFINITE,
+  FP_ZERO,
+  FP_SUBNORMAL,
+  FP_NORMAL
+};
+
+
+namespace std {
+
+int isfinite(double x);
+int isinf(double x);
+int isnan(double x);
+int isless(double x, double y);
+int isgreater(double x, double y);
+int fpclassify(double x);
+int signbit(double x);
+
+}  // namespace std
+
+#endif  // _MSC_VER < 1800
+
+#endif  // V8_BASE_WIN32_MATH_H_
diff --git a/src/bignum-dtoa.cc b/src/bignum-dtoa.cc
index 8860a9b..53bf418 100644
--- a/src/bignum-dtoa.cc
+++ b/src/bignum-dtoa.cc
@@ -5,7 +5,7 @@
 #include <cmath>
 
 #include "include/v8stdint.h"
-#include "src/checks.h"
+#include "src/base/logging.h"
 #include "src/utils.h"
 
 #include "src/bignum-dtoa.h"
@@ -17,7 +17,7 @@
 namespace internal {
 
 static int NormalizedExponent(uint64_t significand, int exponent) {
-  ASSERT(significand != 0);
+  DCHECK(significand != 0);
   while ((significand & Double::kHiddenBit) == 0) {
     significand = significand << 1;
     exponent = exponent - 1;
@@ -68,8 +68,8 @@
 
 void BignumDtoa(double v, BignumDtoaMode mode, int requested_digits,
                 Vector<char> buffer, int* length, int* decimal_point) {
-  ASSERT(v > 0);
-  ASSERT(!Double(v).IsSpecial());
+  DCHECK(v > 0);
+  DCHECK(!Double(v).IsSpecial());
   uint64_t significand = Double(v).Significand();
   bool is_even = (significand & 1) == 0;
   int exponent = Double(v).Exponent();
@@ -99,7 +99,7 @@
   // 4e-324. In this case the denominator needs fewer than 324*4 binary digits.
   // The maximum double is 1.7976931348623157e308 which needs fewer than
   // 308*4 binary digits.
-  ASSERT(Bignum::kMaxSignificantBits >= 324*4);
+  DCHECK(Bignum::kMaxSignificantBits >= 324*4);
   bool need_boundary_deltas = (mode == BIGNUM_DTOA_SHORTEST);
   InitialScaledStartValues(v, estimated_power, need_boundary_deltas,
                            &numerator, &denominator,
@@ -159,7 +159,7 @@
   while (true) {
     uint16_t digit;
     digit = numerator->DivideModuloIntBignum(*denominator);
-    ASSERT(digit <= 9);  // digit is a uint16_t and therefore always positive.
+    DCHECK(digit <= 9);  // digit is a uint16_t and therefore always positive.
     // digit = numerator / denominator (integer division).
     // numerator = numerator % denominator.
     buffer[(*length)++] = digit + '0';
@@ -205,7 +205,7 @@
         // loop would have stopped earlier.
         // We still have an assert here in case the preconditions were not
         // satisfied.
-        ASSERT(buffer[(*length) - 1] != '9');
+        DCHECK(buffer[(*length) - 1] != '9');
         buffer[(*length) - 1]++;
       } else {
         // Halfway case.
@@ -216,7 +216,7 @@
         if ((buffer[(*length) - 1] - '0') % 2 == 0) {
           // Round down => Do nothing.
         } else {
-          ASSERT(buffer[(*length) - 1] != '9');
+          DCHECK(buffer[(*length) - 1] != '9');
           buffer[(*length) - 1]++;
         }
       }
@@ -228,9 +228,9 @@
       // Round up.
       // Note again that the last digit could not be '9' since this would have
       // stopped the loop earlier.
-      // We still have an ASSERT here, in case the preconditions were not
+      // We still have an DCHECK here, in case the preconditions were not
       // satisfied.
-      ASSERT(buffer[(*length) -1] != '9');
+      DCHECK(buffer[(*length) -1] != '9');
       buffer[(*length) - 1]++;
       return;
     }
@@ -247,11 +247,11 @@
 static void GenerateCountedDigits(int count, int* decimal_point,
                                   Bignum* numerator, Bignum* denominator,
                                   Vector<char>(buffer), int* length) {
-  ASSERT(count >= 0);
+  DCHECK(count >= 0);
   for (int i = 0; i < count - 1; ++i) {
     uint16_t digit;
     digit = numerator->DivideModuloIntBignum(*denominator);
-    ASSERT(digit <= 9);  // digit is a uint16_t and therefore always positive.
+    DCHECK(digit <= 9);  // digit is a uint16_t and therefore always positive.
     // digit = numerator / denominator (integer division).
     // numerator = numerator % denominator.
     buffer[i] = digit + '0';
@@ -304,7 +304,7 @@
   } else if (-(*decimal_point) == requested_digits) {
     // We only need to verify if the number rounds down or up.
     // Ex: 0.04 and 0.06 with requested_digits == 1.
-    ASSERT(*decimal_point == -requested_digits);
+    DCHECK(*decimal_point == -requested_digits);
     // Initially the fraction lies in range (1, 10]. Multiply the denominator
     // by 10 so that we can compare more easily.
     denominator->Times10();
@@ -383,7 +383,7 @@
     Bignum* numerator, Bignum* denominator,
     Bignum* delta_minus, Bignum* delta_plus) {
   // A positive exponent implies a positive power.
-  ASSERT(estimated_power >= 0);
+  DCHECK(estimated_power >= 0);
   // Since the estimated_power is positive we simply multiply the denominator
   // by 10^estimated_power.
 
@@ -502,7 +502,7 @@
   // numerator = v * 10^-estimated_power * 2 * 2^-exponent.
   // Remember: numerator has been abused as power_ten. So no need to assign it
   //  to itself.
-  ASSERT(numerator == power_ten);
+  DCHECK(numerator == power_ten);
   numerator->MultiplyByUInt64(significand);
 
   // denominator = 2 * 2^-exponent with exponent < 0.
diff --git a/src/bignum.cc b/src/bignum.cc
index a44a672..254cb01 100644
--- a/src/bignum.cc
+++ b/src/bignum.cc
@@ -2,9 +2,10 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "include/v8stdint.h"
-#include "src/utils.h"
+#include "src/v8.h"
+
 #include "src/bignum.h"
+#include "src/utils.h"
 
 namespace v8 {
 namespace internal {
@@ -25,7 +26,7 @@
 
 // Guaranteed to lie in one Bigit.
 void Bignum::AssignUInt16(uint16_t value) {
-  ASSERT(kBigitSize >= BitSize(value));
+  DCHECK(kBigitSize >= BitSize(value));
   Zero();
   if (value == 0) return;
 
@@ -71,7 +72,7 @@
   uint64_t result = 0;
   for (int i = from; i < from + digits_to_read; ++i) {
     int digit = buffer[i] - '0';
-    ASSERT(0 <= digit && digit <= 9);
+    DCHECK(0 <= digit && digit <= 9);
     result = result * 10 + digit;
   }
   return result;
@@ -147,8 +148,8 @@
 
 
 void Bignum::AddBignum(const Bignum& other) {
-  ASSERT(IsClamped());
-  ASSERT(other.IsClamped());
+  DCHECK(IsClamped());
+  DCHECK(other.IsClamped());
 
   // If this has a greater exponent than other append zero-bigits to this.
   // After this call exponent_ <= other.exponent_.
@@ -169,7 +170,7 @@
   EnsureCapacity(1 + Max(BigitLength(), other.BigitLength()) - exponent_);
   Chunk carry = 0;
   int bigit_pos = other.exponent_ - exponent_;
-  ASSERT(bigit_pos >= 0);
+  DCHECK(bigit_pos >= 0);
   for (int i = 0; i < other.used_digits_; ++i) {
     Chunk sum = bigits_[bigit_pos] + other.bigits_[i] + carry;
     bigits_[bigit_pos] = sum & kBigitMask;
@@ -184,15 +185,15 @@
     bigit_pos++;
   }
   used_digits_ = Max(bigit_pos, used_digits_);
-  ASSERT(IsClamped());
+  DCHECK(IsClamped());
 }
 
 
 void Bignum::SubtractBignum(const Bignum& other) {
-  ASSERT(IsClamped());
-  ASSERT(other.IsClamped());
+  DCHECK(IsClamped());
+  DCHECK(other.IsClamped());
   // We require this to be bigger than other.
-  ASSERT(LessEqual(other, *this));
+  DCHECK(LessEqual(other, *this));
 
   Align(other);
 
@@ -200,7 +201,7 @@
   Chunk borrow = 0;
   int i;
   for (i = 0; i < other.used_digits_; ++i) {
-    ASSERT((borrow == 0) || (borrow == 1));
+    DCHECK((borrow == 0) || (borrow == 1));
     Chunk difference = bigits_[i + offset] - other.bigits_[i] - borrow;
     bigits_[i + offset] = difference & kBigitMask;
     borrow = difference >> (kChunkSize - 1);
@@ -234,7 +235,7 @@
 
   // The product of a bigit with the factor is of size kBigitSize + 32.
   // Assert that this number + 1 (for the carry) fits into double chunk.
-  ASSERT(kDoubleChunkSize >= kBigitSize + 32 + 1);
+  DCHECK(kDoubleChunkSize >= kBigitSize + 32 + 1);
   DoubleChunk carry = 0;
   for (int i = 0; i < used_digits_; ++i) {
     DoubleChunk product = static_cast<DoubleChunk>(factor) * bigits_[i] + carry;
@@ -256,7 +257,7 @@
     Zero();
     return;
   }
-  ASSERT(kBigitSize < 32);
+  DCHECK(kBigitSize < 32);
   uint64_t carry = 0;
   uint64_t low = factor & 0xFFFFFFFF;
   uint64_t high = factor >> 32;
@@ -296,7 +297,7 @@
       { kFive1, kFive2, kFive3, kFive4, kFive5, kFive6,
         kFive7, kFive8, kFive9, kFive10, kFive11, kFive12 };
 
-  ASSERT(exponent >= 0);
+  DCHECK(exponent >= 0);
   if (exponent == 0) return;
   if (used_digits_ == 0) return;
 
@@ -318,7 +319,7 @@
 
 
 void Bignum::Square() {
-  ASSERT(IsClamped());
+  DCHECK(IsClamped());
   int product_length = 2 * used_digits_;
   EnsureCapacity(product_length);
 
@@ -380,7 +381,7 @@
   }
   // Since the result was guaranteed to lie inside the number the
   // accumulator must be 0 now.
-  ASSERT(accumulator == 0);
+  DCHECK(accumulator == 0);
 
   // Don't forget to update the used_digits and the exponent.
   used_digits_ = product_length;
@@ -390,8 +391,8 @@
 
 
 void Bignum::AssignPowerUInt16(uint16_t base, int power_exponent) {
-  ASSERT(base != 0);
-  ASSERT(power_exponent >= 0);
+  DCHECK(base != 0);
+  DCHECK(power_exponent >= 0);
   if (power_exponent == 0) {
     AssignUInt16(1);
     return;
@@ -464,9 +465,9 @@
 
 // Precondition: this/other < 16bit.
 uint16_t Bignum::DivideModuloIntBignum(const Bignum& other) {
-  ASSERT(IsClamped());
-  ASSERT(other.IsClamped());
-  ASSERT(other.used_digits_ > 0);
+  DCHECK(IsClamped());
+  DCHECK(other.IsClamped());
+  DCHECK(other.used_digits_ > 0);
 
   // Easy case: if we have less digits than the divisor than the result is 0.
   // Note: this handles the case where this == 0, too.
@@ -484,14 +485,14 @@
     // This naive approach is extremely inefficient if the this divided other
     // might be big. This function is implemented for doubleToString where
     // the result should be small (less than 10).
-    ASSERT(other.bigits_[other.used_digits_ - 1] >= ((1 << kBigitSize) / 16));
+    DCHECK(other.bigits_[other.used_digits_ - 1] >= ((1 << kBigitSize) / 16));
     // Remove the multiples of the first digit.
     // Example this = 23 and other equals 9. -> Remove 2 multiples.
     result += bigits_[used_digits_ - 1];
     SubtractTimes(other, bigits_[used_digits_ - 1]);
   }
 
-  ASSERT(BigitLength() == other.BigitLength());
+  DCHECK(BigitLength() == other.BigitLength());
 
   // Both bignums are at the same length now.
   // Since other has more than 0 digits we know that the access to
@@ -528,7 +529,7 @@
 
 template<typename S>
 static int SizeInHexChars(S number) {
-  ASSERT(number > 0);
+  DCHECK(number > 0);
   int result = 0;
   while (number != 0) {
     number >>= 4;
@@ -539,16 +540,16 @@
 
 
 static char HexCharOfValue(int value) {
-  ASSERT(0 <= value && value <= 16);
+  DCHECK(0 <= value && value <= 16);
   if (value < 10) return value + '0';
   return value - 10 + 'A';
 }
 
 
 bool Bignum::ToHexString(char* buffer, int buffer_size) const {
-  ASSERT(IsClamped());
+  DCHECK(IsClamped());
   // Each bigit must be printable as separate hex-character.
-  ASSERT(kBigitSize % 4 == 0);
+  DCHECK(kBigitSize % 4 == 0);
   const int kHexCharsPerBigit = kBigitSize / 4;
 
   if (used_digits_ == 0) {
@@ -593,8 +594,8 @@
 
 
 int Bignum::Compare(const Bignum& a, const Bignum& b) {
-  ASSERT(a.IsClamped());
-  ASSERT(b.IsClamped());
+  DCHECK(a.IsClamped());
+  DCHECK(b.IsClamped());
   int bigit_length_a = a.BigitLength();
   int bigit_length_b = b.BigitLength();
   if (bigit_length_a < bigit_length_b) return -1;
@@ -611,9 +612,9 @@
 
 
 int Bignum::PlusCompare(const Bignum& a, const Bignum& b, const Bignum& c) {
-  ASSERT(a.IsClamped());
-  ASSERT(b.IsClamped());
-  ASSERT(c.IsClamped());
+  DCHECK(a.IsClamped());
+  DCHECK(b.IsClamped());
+  DCHECK(c.IsClamped());
   if (a.BigitLength() < b.BigitLength()) {
     return PlusCompare(b, a, c);
   }
@@ -690,15 +691,15 @@
     }
     used_digits_ += zero_digits;
     exponent_ -= zero_digits;
-    ASSERT(used_digits_ >= 0);
-    ASSERT(exponent_ >= 0);
+    DCHECK(used_digits_ >= 0);
+    DCHECK(exponent_ >= 0);
   }
 }
 
 
 void Bignum::BigitsShiftLeft(int shift_amount) {
-  ASSERT(shift_amount < kBigitSize);
-  ASSERT(shift_amount >= 0);
+  DCHECK(shift_amount < kBigitSize);
+  DCHECK(shift_amount >= 0);
   Chunk carry = 0;
   for (int i = 0; i < used_digits_; ++i) {
     Chunk new_carry = bigits_[i] >> (kBigitSize - shift_amount);
@@ -720,7 +721,7 @@
   b.MultiplyByUInt32(factor);
   a.SubtractBignum(b);
 #endif
-  ASSERT(exponent_ <= other.exponent_);
+  DCHECK(exponent_ <= other.exponent_);
   if (factor < 3) {
     for (int i = 0; i < factor; ++i) {
       SubtractBignum(other);
@@ -745,7 +746,7 @@
     borrow = difference >> (kChunkSize - 1);
   }
   Clamp();
-  ASSERT(Bignum::Equal(a, *this));
+  DCHECK(Bignum::Equal(a, *this));
 }
 
 
diff --git a/src/bootstrapper.cc b/src/bootstrapper.cc
index 4d7ce52..250562a 100644
--- a/src/bootstrapper.cc
+++ b/src/bootstrapper.cc
@@ -5,16 +5,16 @@
 #include "src/bootstrapper.h"
 
 #include "src/accessors.h"
-#include "src/isolate-inl.h"
-#include "src/natives.h"
-#include "src/snapshot.h"
-#include "src/trig-table.h"
+#include "src/code-stubs.h"
 #include "src/extensions/externalize-string-extension.h"
 #include "src/extensions/free-buffer-extension.h"
 #include "src/extensions/gc-extension.h"
 #include "src/extensions/statistics-extension.h"
 #include "src/extensions/trigger-failure-extension.h"
-#include "src/code-stubs.h"
+#include "src/isolate-inl.h"
+#include "src/natives.h"
+#include "src/snapshot.h"
+#include "third_party/fdlibm/fdlibm.h"
 
 namespace v8 {
 namespace internal {
@@ -44,7 +44,7 @@
 
 
 Handle<String> Bootstrapper::NativesSourceLookup(int index) {
-  ASSERT(0 <= index && index < Natives::GetBuiltinsCount());
+  DCHECK(0 <= index && index < Natives::GetBuiltinsCount());
   Heap* heap = isolate_->heap();
   if (heap->natives_source_cache()->get(index)->IsUndefined()) {
     // We can use external strings for the natives.
@@ -54,9 +54,9 @@
                                           source.start(),
                                           source.length());
     // We do not expect this to throw an exception. Change this if it does.
-    Handle<String> source_code =
-        isolate_->factory()->NewExternalStringFromAscii(
-            resource).ToHandleChecked();
+    Handle<String> source_code = isolate_->factory()
+                                     ->NewExternalStringFromOneByte(resource)
+                                     .ToHandleChecked();
     heap->natives_source_cache()->set(index, *source_code);
   }
   Handle<Object> cached_source(heap->natives_source_cache()->get(index),
@@ -99,10 +99,15 @@
 
 void Bootstrapper::TearDownExtensions() {
   delete free_buffer_extension_;
+  free_buffer_extension_ = NULL;
   delete gc_extension_;
+  gc_extension_ = NULL;
   delete externalize_string_extension_;
+  externalize_string_extension_ = NULL;
   delete statistics_extension_;
+  statistics_extension_ = NULL;
   delete trigger_failure_extension_;
+  trigger_failure_extension_ = NULL;
 }
 
 
@@ -121,7 +126,7 @@
 void Bootstrapper::TearDown() {
   if (delete_these_non_arrays_on_tear_down_ != NULL) {
     int len = delete_these_non_arrays_on_tear_down_->length();
-    ASSERT(len < 24);  // Don't use this mechanism for unbounded allocations.
+    DCHECK(len < 28);  // Don't use this mechanism for unbounded allocations.
     for (int i = 0; i < len; i++) {
       delete delete_these_non_arrays_on_tear_down_->at(i);
       delete_these_non_arrays_on_tear_down_->at(i) = NULL;
@@ -132,7 +137,7 @@
 
   if (delete_these_arrays_on_tear_down_ != NULL) {
     int len = delete_these_arrays_on_tear_down_->length();
-    ASSERT(len < 1000);  // Don't use this mechanism for unbounded allocations.
+    DCHECK(len < 1000);  // Don't use this mechanism for unbounded allocations.
     for (int i = 0; i < len; i++) {
       delete[] delete_these_arrays_on_tear_down_->at(i);
       delete_these_arrays_on_tear_down_->at(i) = NULL;
@@ -148,8 +153,8 @@
 class Genesis BASE_EMBEDDED {
  public:
   Genesis(Isolate* isolate,
-          Handle<Object> global_object,
-          v8::Handle<v8::ObjectTemplate> global_template,
+          MaybeHandle<JSGlobalProxy> maybe_global_proxy,
+          v8::Handle<v8::ObjectTemplate> global_proxy_template,
           v8::ExtensionConfiguration* extensions);
   ~Genesis() { }
 
@@ -183,26 +188,25 @@
   // we have to used the deserialized ones that are linked together with the
   // rest of the context snapshot.
   Handle<JSGlobalProxy> CreateNewGlobals(
-      v8::Handle<v8::ObjectTemplate> global_template,
-      Handle<Object> global_object,
-      Handle<GlobalObject>* global_proxy_out);
+      v8::Handle<v8::ObjectTemplate> global_proxy_template,
+      MaybeHandle<JSGlobalProxy> maybe_global_proxy,
+      Handle<GlobalObject>* global_object_out);
   // Hooks the given global proxy into the context.  If the context was created
   // by deserialization then this will unhook the global proxy that was
   // deserialized, leaving the GC to pick it up.
-  void HookUpGlobalProxy(Handle<GlobalObject> inner_global,
+  void HookUpGlobalProxy(Handle<GlobalObject> global_object,
                          Handle<JSGlobalProxy> global_proxy);
-  // Similarly, we want to use the inner global that has been created by the
-  // templates passed through the API.  The inner global from the snapshot is
-  // detached from the other objects in the snapshot.
-  void HookUpInnerGlobal(Handle<GlobalObject> inner_global);
+  // Similarly, we want to use the global that has been created by the templates
+  // passed through the API.  The global from the snapshot is detached from the
+  // other objects in the snapshot.
+  void HookUpGlobalObject(Handle<GlobalObject> global_object);
   // New context initialization.  Used for creating a context from scratch.
-  void InitializeGlobal(Handle<GlobalObject> inner_global,
+  void InitializeGlobal(Handle<GlobalObject> global_object,
                         Handle<JSFunction> empty_function);
   void InitializeExperimentalGlobal();
   // Installs the contents of the native .js files on the global objects.
   // Used for creating a context from scratch.
   void InstallNativeFunctions();
-  void InstallExperimentalBuiltinFunctionIds();
   void InstallExperimentalNativeFunctions();
   Handle<JSFunction> InstallInternalArray(Handle<JSBuiltinsObject> builtins,
                                           const char* name,
@@ -253,7 +257,8 @@
   bool InstallJSBuiltins(Handle<JSBuiltinsObject> builtins);
   bool ConfigureApiObject(Handle<JSObject> object,
                           Handle<ObjectTemplateInfo> object_template);
-  bool ConfigureGlobalObjects(v8::Handle<v8::ObjectTemplate> global_template);
+  bool ConfigureGlobalObjects(
+      v8::Handle<v8::ObjectTemplate> global_proxy_template);
 
   // Migrates all properties from the 'from' object to the 'to'
   // object and overrides the prototype in 'to' with the one from
@@ -327,11 +332,12 @@
 
 
 Handle<Context> Bootstrapper::CreateEnvironment(
-    Handle<Object> global_object,
-    v8::Handle<v8::ObjectTemplate> global_template,
+    MaybeHandle<JSGlobalProxy> maybe_global_proxy,
+    v8::Handle<v8::ObjectTemplate> global_proxy_template,
     v8::ExtensionConfiguration* extensions) {
   HandleScope scope(isolate_);
-  Genesis genesis(isolate_, global_object, global_template, extensions);
+  Genesis genesis(
+      isolate_, maybe_global_proxy, global_proxy_template, extensions);
   Handle<Context> env = genesis.result();
   if (env.is_null() || !InstallExtensions(env, extensions)) {
     return Handle<Context>();
@@ -342,10 +348,10 @@
 
 static void SetObjectPrototype(Handle<JSObject> object, Handle<Object> proto) {
   // object.__proto__ = proto;
-  Handle<Map> old_to_map = Handle<Map>(object->map());
-  Handle<Map> new_to_map = Map::Copy(old_to_map);
-  new_to_map->set_prototype(*proto);
-  object->set_map(*new_to_map);
+  Handle<Map> old_map = Handle<Map>(object->map());
+  Handle<Map> new_map = Map::Copy(old_map);
+  new_map->set_prototype(*proto);
+  JSObject::MigrateToMap(object, new_map);
 }
 
 
@@ -354,6 +360,7 @@
   Handle<JSGlobalProxy> global_proxy(JSGlobalProxy::cast(env->global_proxy()));
   global_proxy->set_native_context(*factory->null_value());
   SetObjectPrototype(global_proxy, factory->null_value());
+  global_proxy->map()->set_constructor(*factory->null_value());
 }
 
 
@@ -379,8 +386,7 @@
   } else {
     attributes = DONT_ENUM;
   }
-  JSObject::SetOwnPropertyIgnoreAttributes(
-      target, internalized_name, function, attributes).Check();
+  JSObject::AddProperty(target, internalized_name, function, attributes);
   if (target->IsJSGlobalObject()) {
     function->shared()->set_instance_class_name(*internalized_name);
   }
@@ -477,12 +483,14 @@
 
   {  // --- O b j e c t ---
     Handle<JSFunction> object_fun = factory->NewFunction(object_name);
+    int unused = JSObject::kInitialGlobalObjectUnusedPropertiesCount;
+    int instance_size = JSObject::kHeaderSize + kPointerSize * unused;
     Handle<Map> object_function_map =
-        factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
-    object_fun->set_initial_map(*object_function_map);
-    object_function_map->set_constructor(*object_fun);
-    object_function_map->set_unused_property_fields(
-        JSObject::kInitialGlobalObjectUnusedPropertiesCount);
+        factory->NewMap(JS_OBJECT_TYPE, instance_size);
+    object_function_map->set_inobject_properties(unused);
+    JSFunction::SetInitialMap(object_fun, object_function_map,
+                              isolate->factory()->null_value());
+    object_function_map->set_unused_property_fields(unused);
 
     native_context()->set_object_function(*object_fun);
 
@@ -490,6 +498,9 @@
     Handle<JSObject> prototype = factory->NewJSObject(
         isolate->object_function(),
         TENURED);
+    Handle<Map> map = Map::Copy(handle(prototype->map()));
+    map->set_is_prototype_map(true);
+    prototype->set_map(*map);
 
     native_context()->set_initial_object_prototype(*prototype);
     // For bootstrapping set the array prototype to be the same as the object
@@ -502,13 +513,22 @@
   // Allocate the empty function as the prototype for function ECMAScript
   // 262 15.3.4.
   Handle<String> empty_string =
-      factory->InternalizeOneByteString(STATIC_ASCII_VECTOR("Empty"));
+      factory->InternalizeOneByteString(STATIC_CHAR_VECTOR("Empty"));
   Handle<Code> code(isolate->builtins()->builtin(Builtins::kEmptyFunction));
   Handle<JSFunction> empty_function = factory->NewFunctionWithoutPrototype(
       empty_string, code);
 
+  // Allocate the function map first and then patch the prototype later
+  Handle<Map> empty_function_map =
+      CreateFunctionMap(FUNCTION_WITHOUT_PROTOTYPE);
+  DCHECK(!empty_function_map->is_dictionary_map());
+  empty_function_map->set_prototype(
+      native_context()->object_function()->prototype());
+  empty_function_map->set_is_prototype_map(true);
+  empty_function->set_map(*empty_function_map);
+
   // --- E m p t y ---
-  Handle<String> source = factory->NewStringFromStaticAscii("() {}");
+  Handle<String> source = factory->NewStringFromStaticChars("() {}");
   Handle<Script> script = factory->NewScript(source);
   script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
   empty_function->shared()->set_script(*script);
@@ -521,13 +541,6 @@
   native_context()->sloppy_function_without_prototype_map()->
       set_prototype(*empty_function);
   sloppy_function_map_writable_prototype_->set_prototype(*empty_function);
-
-  // Allocate the function map first and then patch the prototype later
-  Handle<Map> empty_function_map =
-      CreateFunctionMap(FUNCTION_WITHOUT_PROTOTYPE);
-  empty_function_map->set_prototype(
-      native_context()->object_function()->prototype());
-  empty_function->set_map(*empty_function_map);
   return empty_function;
 }
 
@@ -550,7 +563,7 @@
     FieldDescriptor d(length_string, 0, ro_attribs, Representation::Tagged());
     map->AppendDescriptor(&d);
   } else {
-    ASSERT(function_mode == FUNCTION_WITH_WRITEABLE_PROTOTYPE ||
+    DCHECK(function_mode == FUNCTION_WITH_WRITEABLE_PROTOTYPE ||
            function_mode == FUNCTION_WITH_READONLY_PROTOTYPE ||
            function_mode == FUNCTION_WITHOUT_PROTOTYPE);
     Handle<AccessorInfo> length =
@@ -593,7 +606,7 @@
 Handle<JSFunction> Genesis::GetStrictPoisonFunction() {
   if (strict_poison_function.is_null()) {
     Handle<String> name = factory()->InternalizeOneByteString(
-        STATIC_ASCII_VECTOR("ThrowTypeError"));
+        STATIC_CHAR_VECTOR("ThrowTypeError"));
     Handle<Code> code(isolate()->builtins()->builtin(
         Builtins::kStrictModePoisonPill));
     strict_poison_function = factory()->NewFunctionWithoutPrototype(name, code);
@@ -609,7 +622,7 @@
 Handle<JSFunction> Genesis::GetGeneratorPoisonFunction() {
   if (generator_poison_function.is_null()) {
     Handle<String> name = factory()->InternalizeOneByteString(
-        STATIC_ASCII_VECTOR("ThrowTypeError"));
+        STATIC_CHAR_VECTOR("ThrowTypeError"));
     Handle<Code> code(isolate()->builtins()->builtin(
         Builtins::kGeneratorPoisonPill));
     generator_poison_function = factory()->NewFunctionWithoutPrototype(
@@ -694,16 +707,16 @@
 
 
 static void AddToWeakNativeContextList(Context* context) {
-  ASSERT(context->IsNativeContext());
+  DCHECK(context->IsNativeContext());
   Heap* heap = context->GetIsolate()->heap();
 #ifdef DEBUG
   { // NOLINT
-    ASSERT(context->get(Context::NEXT_CONTEXT_LINK)->IsUndefined());
+    DCHECK(context->get(Context::NEXT_CONTEXT_LINK)->IsUndefined());
     // Check that context is not in the list yet.
     for (Object* current = heap->native_contexts_list();
          !current->IsUndefined();
          current = Context::cast(current)->get(Context::NEXT_CONTEXT_LINK)) {
-      ASSERT(current != context);
+      DCHECK(current != context);
     }
   }
 #endif
@@ -730,73 +743,75 @@
 
 
 Handle<JSGlobalProxy> Genesis::CreateNewGlobals(
-    v8::Handle<v8::ObjectTemplate> global_template,
-    Handle<Object> global_object,
-    Handle<GlobalObject>* inner_global_out) {
-  // The argument global_template aka data is an ObjectTemplateInfo.
+    v8::Handle<v8::ObjectTemplate> global_proxy_template,
+    MaybeHandle<JSGlobalProxy> maybe_global_proxy,
+    Handle<GlobalObject>* global_object_out) {
+  // The argument global_proxy_template aka data is an ObjectTemplateInfo.
   // It has a constructor pointer that points at global_constructor which is a
   // FunctionTemplateInfo.
-  // The global_constructor is used to create or reinitialize the global_proxy.
-  // The global_constructor also has a prototype_template pointer that points at
-  // js_global_template which is an ObjectTemplateInfo.
+  // The global_proxy_constructor is used to create or reinitialize the
+  // global_proxy. The global_proxy_constructor also has a prototype_template
+  // pointer that points at js_global_object_template which is an
+  // ObjectTemplateInfo.
   // That in turn has a constructor pointer that points at
-  // js_global_constructor which is a FunctionTemplateInfo.
-  // js_global_constructor is used to make js_global_function
-  // js_global_function is used to make the new inner_global.
+  // js_global_object_constructor which is a FunctionTemplateInfo.
+  // js_global_object_constructor is used to make js_global_object_function
+  // js_global_object_function is used to make the new global_object.
   //
   // --- G l o b a l ---
-  // Step 1: Create a fresh inner JSGlobalObject.
-  Handle<JSFunction> js_global_function;
-  Handle<ObjectTemplateInfo> js_global_template;
-  if (!global_template.IsEmpty()) {
-    // Get prototype template of the global_template.
+  // Step 1: Create a fresh JSGlobalObject.
+  Handle<JSFunction> js_global_object_function;
+  Handle<ObjectTemplateInfo> js_global_object_template;
+  if (!global_proxy_template.IsEmpty()) {
+    // Get prototype template of the global_proxy_template.
     Handle<ObjectTemplateInfo> data =
-        v8::Utils::OpenHandle(*global_template);
+        v8::Utils::OpenHandle(*global_proxy_template);
     Handle<FunctionTemplateInfo> global_constructor =
         Handle<FunctionTemplateInfo>(
             FunctionTemplateInfo::cast(data->constructor()));
     Handle<Object> proto_template(global_constructor->prototype_template(),
                                   isolate());
     if (!proto_template->IsUndefined()) {
-      js_global_template =
+      js_global_object_template =
           Handle<ObjectTemplateInfo>::cast(proto_template);
     }
   }
 
-  if (js_global_template.is_null()) {
+  if (js_global_object_template.is_null()) {
     Handle<String> name = Handle<String>(heap()->empty_string());
     Handle<Code> code = Handle<Code>(isolate()->builtins()->builtin(
         Builtins::kIllegal));
-    js_global_function = factory()->NewFunction(
-        name, code, JS_GLOBAL_OBJECT_TYPE, JSGlobalObject::kSize);
-    // Change the constructor property of the prototype of the
-    // hidden global function to refer to the Object function.
     Handle<JSObject> prototype =
-        Handle<JSObject>(
-            JSObject::cast(js_global_function->instance_prototype()));
-    JSObject::SetOwnPropertyIgnoreAttributes(
-        prototype, factory()->constructor_string(),
-        isolate()->object_function(), NONE).Check();
+        factory()->NewFunctionPrototype(isolate()->object_function());
+    js_global_object_function = factory()->NewFunction(
+        name, code, prototype, JS_GLOBAL_OBJECT_TYPE, JSGlobalObject::kSize);
+#ifdef DEBUG
+    LookupIterator it(prototype, factory()->constructor_string(),
+                      LookupIterator::OWN_SKIP_INTERCEPTOR);
+    Handle<Object> value = JSReceiver::GetProperty(&it).ToHandleChecked();
+    DCHECK(it.IsFound());
+    DCHECK_EQ(*isolate()->object_function(), *value);
+#endif
   } else {
-    Handle<FunctionTemplateInfo> js_global_constructor(
-        FunctionTemplateInfo::cast(js_global_template->constructor()));
-    js_global_function =
-        factory()->CreateApiFunction(js_global_constructor,
+    Handle<FunctionTemplateInfo> js_global_object_constructor(
+        FunctionTemplateInfo::cast(js_global_object_template->constructor()));
+    js_global_object_function =
+        factory()->CreateApiFunction(js_global_object_constructor,
                                      factory()->the_hole_value(),
-                                     factory()->InnerGlobalObject);
+                                     factory()->GlobalObjectType);
   }
 
-  js_global_function->initial_map()->set_is_hidden_prototype();
-  js_global_function->initial_map()->set_dictionary_map(true);
-  Handle<GlobalObject> inner_global =
-      factory()->NewGlobalObject(js_global_function);
-  if (inner_global_out != NULL) {
-    *inner_global_out = inner_global;
+  js_global_object_function->initial_map()->set_is_hidden_prototype();
+  js_global_object_function->initial_map()->set_dictionary_map(true);
+  Handle<GlobalObject> global_object =
+      factory()->NewGlobalObject(js_global_object_function);
+  if (global_object_out != NULL) {
+    *global_object_out = global_object;
   }
 
   // Step 2: create or re-initialize the global proxy object.
   Handle<JSFunction> global_proxy_function;
-  if (global_template.IsEmpty()) {
+  if (global_proxy_template.IsEmpty()) {
     Handle<String> name = Handle<String>(heap()->empty_string());
     Handle<Code> code = Handle<Code>(isolate()->builtins()->builtin(
         Builtins::kIllegal));
@@ -804,17 +819,16 @@
         name, code, JS_GLOBAL_PROXY_TYPE, JSGlobalProxy::kSize);
   } else {
     Handle<ObjectTemplateInfo> data =
-        v8::Utils::OpenHandle(*global_template);
+        v8::Utils::OpenHandle(*global_proxy_template);
     Handle<FunctionTemplateInfo> global_constructor(
             FunctionTemplateInfo::cast(data->constructor()));
     global_proxy_function =
         factory()->CreateApiFunction(global_constructor,
                                      factory()->the_hole_value(),
-                                     factory()->OuterGlobalObject);
+                                     factory()->GlobalProxyType);
   }
 
-  Handle<String> global_name = factory()->InternalizeOneByteString(
-      STATIC_ASCII_VECTOR("global"));
+  Handle<String> global_name = factory()->global_string();
   global_proxy_function->shared()->set_instance_class_name(*global_name);
   global_proxy_function->initial_map()->set_is_access_check_needed(true);
 
@@ -822,9 +836,7 @@
   // Return the global proxy.
 
   Handle<JSGlobalProxy> global_proxy;
-  if (global_object.location() != NULL) {
-    ASSERT(global_object->IsJSGlobalProxy());
-    global_proxy = Handle<JSGlobalProxy>::cast(global_object);
+  if (maybe_global_proxy.ToHandle(&global_proxy)) {
     factory()->ReinitializeJSGlobalProxy(global_proxy, global_proxy_function);
   } else {
     global_proxy = Handle<JSGlobalProxy>::cast(
@@ -835,63 +847,58 @@
 }
 
 
-void Genesis::HookUpGlobalProxy(Handle<GlobalObject> inner_global,
+void Genesis::HookUpGlobalProxy(Handle<GlobalObject> global_object,
                                 Handle<JSGlobalProxy> global_proxy) {
   // Set the native context for the global object.
-  inner_global->set_native_context(*native_context());
-  inner_global->set_global_context(*native_context());
-  inner_global->set_global_receiver(*global_proxy);
+  global_object->set_native_context(*native_context());
+  global_object->set_global_context(*native_context());
+  global_object->set_global_proxy(*global_proxy);
   global_proxy->set_native_context(*native_context());
   native_context()->set_global_proxy(*global_proxy);
 }
 
 
-void Genesis::HookUpInnerGlobal(Handle<GlobalObject> inner_global) {
-  Handle<GlobalObject> inner_global_from_snapshot(
+void Genesis::HookUpGlobalObject(Handle<GlobalObject> global_object) {
+  Handle<GlobalObject> global_object_from_snapshot(
       GlobalObject::cast(native_context()->extension()));
   Handle<JSBuiltinsObject> builtins_global(native_context()->builtins());
-  native_context()->set_extension(*inner_global);
-  native_context()->set_global_object(*inner_global);
-  native_context()->set_security_token(*inner_global);
+  native_context()->set_extension(*global_object);
+  native_context()->set_global_object(*global_object);
+  native_context()->set_security_token(*global_object);
   static const PropertyAttributes attributes =
       static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
-  Runtime::ForceSetObjectProperty(builtins_global,
-                                  factory()->InternalizeOneByteString(
-                                      STATIC_ASCII_VECTOR("global")),
-                                  inner_global,
-                                  attributes).Assert();
+  Runtime::DefineObjectProperty(builtins_global, factory()->global_string(),
+                                global_object, attributes).Assert();
   // Set up the reference from the global object to the builtins object.
-  JSGlobalObject::cast(*inner_global)->set_builtins(*builtins_global);
-  TransferNamedProperties(inner_global_from_snapshot, inner_global);
-  TransferIndexedProperties(inner_global_from_snapshot, inner_global);
+  JSGlobalObject::cast(*global_object)->set_builtins(*builtins_global);
+  TransferNamedProperties(global_object_from_snapshot, global_object);
+  TransferIndexedProperties(global_object_from_snapshot, global_object);
 }
 
 
 // This is only called if we are not using snapshots.  The equivalent
-// work in the snapshot case is done in HookUpInnerGlobal.
-void Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
+// work in the snapshot case is done in HookUpGlobalObject.
+void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
                                Handle<JSFunction> empty_function) {
   // --- N a t i v e   C o n t e x t ---
   // Use the empty function as closure (no scope info).
   native_context()->set_closure(*empty_function);
   native_context()->set_previous(NULL);
   // Set extension and global object.
-  native_context()->set_extension(*inner_global);
-  native_context()->set_global_object(*inner_global);
-  // Security setup: Set the security token of the global object to
-  // its the inner global. This makes the security check between two
-  // different contexts fail by default even in case of global
-  // object reinitialization.
-  native_context()->set_security_token(*inner_global);
+  native_context()->set_extension(*global_object);
+  native_context()->set_global_object(*global_object);
+  // Security setup: Set the security token of the native context to the global
+  // object. This makes the security check between two different contexts fail
+  // by default even in case of global object reinitialization.
+  native_context()->set_security_token(*global_object);
 
-  Isolate* isolate = inner_global->GetIsolate();
+  Isolate* isolate = global_object->GetIsolate();
   Factory* factory = isolate->factory();
   Heap* heap = isolate->heap();
 
   Handle<String> object_name = factory->Object_string();
-  JSObject::SetOwnPropertyIgnoreAttributes(
-      inner_global, object_name,
-      isolate->object_function(), DONT_ENUM).Check();
+  JSObject::AddProperty(
+      global_object, object_name, isolate->object_function(), DONT_ENUM);
 
   Handle<JSObject> global(native_context()->global_object());
 
@@ -915,7 +922,7 @@
 
     // This assert protects an optimization in
     // HGraphBuilder::JSArrayBuilder::EmitMapCode()
-    ASSERT(initial_map->elements_kind() == GetInitialFastElementsKind());
+    DCHECK(initial_map->elements_kind() == GetInitialFastElementsKind());
     Map::EnsureDescriptorSlack(initial_map, 1);
 
     PropertyAttributes attribs = static_cast<PropertyAttributes>(
@@ -983,6 +990,14 @@
     }
   }
 
+  {
+    // --- S y m b o l ---
+    Handle<JSFunction> symbol_fun = InstallFunction(
+        global, "Symbol", JS_VALUE_TYPE, JSValue::kSize,
+        isolate->initial_object_prototype(), Builtins::kIllegal);
+    native_context()->set_symbol_function(*symbol_fun);
+  }
+
   {  // --- D a t e ---
     // Builtin functions for Date.prototype.
     Handle<JSFunction> date_fun =
@@ -1002,10 +1017,10 @@
                         Builtins::kIllegal);
     native_context()->set_regexp_function(*regexp_fun);
 
-    ASSERT(regexp_fun->has_initial_map());
+    DCHECK(regexp_fun->has_initial_map());
     Handle<Map> initial_map(regexp_fun->initial_map());
 
-    ASSERT_EQ(0, initial_map->inobject_properties());
+    DCHECK_EQ(0, initial_map->inobject_properties());
 
     PropertyAttributes final =
         static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
@@ -1076,6 +1091,7 @@
     proto->InObjectPropertyAtPut(JSRegExp::kLastIndexFieldIndex,
                                  Smi::FromInt(0),
                                  SKIP_WRITE_BARRIER);  // It's a Smi.
+    proto_map->set_is_prototype_map(true);
     initial_map->set_prototype(*proto);
     factory->SetRegExpIrregexpData(Handle<JSRegExp>::cast(proto),
                                    JSRegExp::IRREGEXP, factory->empty_string(),
@@ -1089,9 +1105,8 @@
         Handle<Object>(native_context()->initial_object_prototype(), isolate));
     cons->SetInstanceClassName(*name);
     Handle<JSObject> json_object = factory->NewJSObject(cons, TENURED);
-    ASSERT(json_object->IsJSObject());
-    JSObject::SetOwnPropertyIgnoreAttributes(
-        global, name, json_object, DONT_ENUM).Check();
+    DCHECK(json_object->IsJSObject());
+    JSObject::AddProperty(global, name, json_object, DONT_ENUM);
     native_context()->set_json_object(*json_object);
   }
 
@@ -1129,6 +1144,44 @@
     native_context()->set_data_view_fun(*data_view_fun);
   }
 
+  // -- M a p
+  InstallFunction(global, "Map", JS_MAP_TYPE, JSMap::kSize,
+                  isolate->initial_object_prototype(), Builtins::kIllegal);
+
+  // -- S e t
+  InstallFunction(global, "Set", JS_SET_TYPE, JSSet::kSize,
+                  isolate->initial_object_prototype(), Builtins::kIllegal);
+
+  {  // Set up the iterator result object
+    STATIC_ASSERT(JSGeneratorObject::kResultPropertyCount == 2);
+    Handle<JSFunction> object_function(native_context()->object_function());
+    Handle<Map> iterator_result_map =
+        Map::Create(isolate, JSGeneratorObject::kResultPropertyCount);
+    DCHECK_EQ(JSGeneratorObject::kResultSize,
+              iterator_result_map->instance_size());
+    DCHECK_EQ(JSGeneratorObject::kResultPropertyCount,
+              iterator_result_map->inobject_properties());
+    Map::EnsureDescriptorSlack(iterator_result_map,
+                               JSGeneratorObject::kResultPropertyCount);
+
+    FieldDescriptor value_descr(factory->value_string(),
+                                JSGeneratorObject::kResultValuePropertyIndex,
+                                NONE, Representation::Tagged());
+    iterator_result_map->AppendDescriptor(&value_descr);
+
+    FieldDescriptor done_descr(factory->done_string(),
+                               JSGeneratorObject::kResultDonePropertyIndex,
+                               NONE, Representation::Tagged());
+    iterator_result_map->AppendDescriptor(&done_descr);
+
+    iterator_result_map->set_unused_property_fields(0);
+    iterator_result_map->set_pre_allocated_property_fields(
+        JSGeneratorObject::kResultPropertyCount);
+    DCHECK_EQ(JSGeneratorObject::kResultSize,
+              iterator_result_map->instance_size());
+    native_context()->set_iterator_result_map(*iterator_result_map);
+  }
+
   // -- W e a k M a p
   InstallFunction(global, "WeakMap", JS_WEAK_MAP_TYPE, JSWeakMap::kSize,
                   isolate->initial_object_prototype(), Builtins::kIllegal);
@@ -1136,80 +1189,56 @@
   InstallFunction(global, "WeakSet", JS_WEAK_SET_TYPE, JSWeakSet::kSize,
                   isolate->initial_object_prototype(), Builtins::kIllegal);
 
-  {  // --- arguments_boilerplate_
+  {  // --- sloppy arguments map
     // Make sure we can recognize argument objects at runtime.
     // This is done by introducing an anonymous function with
     // class_name equals 'Arguments'.
-    Handle<String> arguments_string = factory->InternalizeOneByteString(
-        STATIC_ASCII_VECTOR("Arguments"));
+    Handle<String> arguments_string = factory->Arguments_string();
     Handle<Code> code(isolate->builtins()->builtin(Builtins::kIllegal));
-
     Handle<JSFunction> function = factory->NewFunctionWithoutPrototype(
         arguments_string, code);
-    ASSERT(!function->has_initial_map());
     function->shared()->set_instance_class_name(*arguments_string);
-    function->shared()->set_expected_nof_properties(2);
-    function->set_prototype_or_initial_map(
-        native_context()->object_function()->prototype());
-    Handle<JSObject> result = factory->NewJSObject(function);
 
-    native_context()->set_sloppy_arguments_boilerplate(*result);
-    // Note: length must be added as the first property and
-    //       callee must be added as the second property.
-    JSObject::SetOwnPropertyIgnoreAttributes(
-        result, factory->length_string(),
-        factory->undefined_value(), DONT_ENUM,
-        Object::FORCE_TAGGED, FORCE_FIELD).Check();
-    JSObject::SetOwnPropertyIgnoreAttributes(
-        result, factory->callee_string(),
-        factory->undefined_value(), DONT_ENUM,
-        Object::FORCE_TAGGED, FORCE_FIELD).Check();
+    Handle<Map> map =
+        factory->NewMap(JS_OBJECT_TYPE, Heap::kSloppyArgumentsObjectSize);
+    // Create the descriptor array for the arguments object.
+    Map::EnsureDescriptorSlack(map, 2);
 
-#ifdef DEBUG
-    LookupResult lookup(isolate);
-    result->LookupOwn(factory->callee_string(), &lookup);
-    ASSERT(lookup.IsField());
-    ASSERT(lookup.GetFieldIndex().property_index() ==
-           Heap::kArgumentsCalleeIndex);
+    {  // length
+      FieldDescriptor d(factory->length_string(), Heap::kArgumentsLengthIndex,
+                        DONT_ENUM, Representation::Tagged());
+      map->AppendDescriptor(&d);
+    }
+    {  // callee
+      FieldDescriptor d(factory->callee_string(), Heap::kArgumentsCalleeIndex,
+                        DONT_ENUM, Representation::Tagged());
+      map->AppendDescriptor(&d);
+    }
+    // @@iterator method is added later.
 
-    result->LookupOwn(factory->length_string(), &lookup);
-    ASSERT(lookup.IsField());
-    ASSERT(lookup.GetFieldIndex().property_index() ==
-           Heap::kArgumentsLengthIndex);
+    map->set_function_with_prototype(true);
+    map->set_pre_allocated_property_fields(2);
+    map->set_inobject_properties(2);
+    native_context()->set_sloppy_arguments_map(*map);
 
-    ASSERT(result->map()->inobject_properties() > Heap::kArgumentsCalleeIndex);
-    ASSERT(result->map()->inobject_properties() > Heap::kArgumentsLengthIndex);
+    DCHECK(!function->has_initial_map());
+    JSFunction::SetInitialMap(function, map,
+                              isolate->initial_object_prototype());
 
-    // Check the state of the object.
-    ASSERT(result->HasFastProperties());
-    ASSERT(result->HasFastObjectElements());
-#endif
+    DCHECK(map->inobject_properties() > Heap::kArgumentsCalleeIndex);
+    DCHECK(map->inobject_properties() > Heap::kArgumentsLengthIndex);
+    DCHECK(!map->is_dictionary_map());
+    DCHECK(IsFastObjectElementsKind(map->elements_kind()));
   }
 
-  {  // --- aliased_arguments_boilerplate_
-    // Set up a well-formed parameter map to make assertions happy.
-    Handle<FixedArray> elements = factory->NewFixedArray(2);
-    elements->set_map(heap->sloppy_arguments_elements_map());
-    Handle<FixedArray> array;
-    array = factory->NewFixedArray(0);
-    elements->set(0, *array);
-    array = factory->NewFixedArray(0);
-    elements->set(1, *array);
-
-    Handle<Map> old_map(
-        native_context()->sloppy_arguments_boilerplate()->map());
-    Handle<Map> new_map = Map::Copy(old_map);
-    new_map->set_pre_allocated_property_fields(2);
-    Handle<JSObject> result = factory->NewJSObjectFromMap(new_map);
-    // Set elements kind after allocating the object because
-    // NewJSObjectFromMap assumes a fast elements map.
-    new_map->set_elements_kind(SLOPPY_ARGUMENTS_ELEMENTS);
-    result->set_elements(*elements);
-    ASSERT(result->HasSloppyArgumentsElements());
-    native_context()->set_aliased_arguments_boilerplate(*result);
+  {  // --- aliased arguments map
+    Handle<Map> map = Map::Copy(isolate->sloppy_arguments_map());
+    map->set_elements_kind(SLOPPY_ARGUMENTS_ELEMENTS);
+    DCHECK_EQ(2, map->pre_allocated_property_fields());
+    native_context()->set_aliased_arguments_map(*map);
   }
 
-  {  // --- strict mode arguments boilerplate
+  {  // --- strict mode arguments map
     const PropertyAttributes attributes =
       static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
 
@@ -1232,22 +1261,19 @@
     Map::EnsureDescriptorSlack(map, 3);
 
     {  // length
-      FieldDescriptor d(
-          factory->length_string(), 0, DONT_ENUM, Representation::Tagged());
+      FieldDescriptor d(factory->length_string(), Heap::kArgumentsLengthIndex,
+                        DONT_ENUM, Representation::Tagged());
       map->AppendDescriptor(&d);
     }
     {  // callee
-      CallbacksDescriptor d(factory->callee_string(),
-                            callee,
-                            attributes);
+      CallbacksDescriptor d(factory->callee_string(), callee, attributes);
       map->AppendDescriptor(&d);
     }
     {  // caller
-      CallbacksDescriptor d(factory->caller_string(),
-                            caller,
-                            attributes);
+      CallbacksDescriptor d(factory->caller_string(), caller, attributes);
       map->AppendDescriptor(&d);
     }
+    // @@iterator method is added later.
 
     map->set_function_with_prototype(true);
     map->set_prototype(native_context()->object_function()->prototype());
@@ -1256,30 +1282,13 @@
 
     // Copy constructor from the sloppy arguments boilerplate.
     map->set_constructor(
-      native_context()->sloppy_arguments_boilerplate()->map()->constructor());
+        native_context()->sloppy_arguments_map()->constructor());
 
-    // Allocate the arguments boilerplate object.
-    Handle<JSObject> result = factory->NewJSObjectFromMap(map);
-    native_context()->set_strict_arguments_boilerplate(*result);
+    native_context()->set_strict_arguments_map(*map);
 
-    // Add length property only for strict mode boilerplate.
-    JSObject::SetOwnPropertyIgnoreAttributes(
-        result, factory->length_string(),
-        factory->undefined_value(), DONT_ENUM).Check();
-
-#ifdef DEBUG
-    LookupResult lookup(isolate);
-    result->LookupOwn(factory->length_string(), &lookup);
-    ASSERT(lookup.IsField());
-    ASSERT(lookup.GetFieldIndex().property_index() ==
-           Heap::kArgumentsLengthIndex);
-
-    ASSERT(result->map()->inobject_properties() > Heap::kArgumentsLengthIndex);
-
-    // Check the state of the object.
-    ASSERT(result->HasFastProperties());
-    ASSERT(result->HasFastObjectElements());
-#endif
+    DCHECK(map->inobject_properties() > Heap::kArgumentsLengthIndex);
+    DCHECK(!map->is_dictionary_map());
+    DCHECK(IsFastObjectElementsKind(map->elements_kind()));
   }
 
   {  // --- context extension
@@ -1291,7 +1300,7 @@
         JSObject::kHeaderSize);
 
     Handle<String> name = factory->InternalizeOneByteString(
-        STATIC_ASCII_VECTOR("context_extension"));
+        STATIC_CHAR_VECTOR("context_extension"));
     context_extension_fun->shared()->set_instance_class_name(*name);
     native_context()->set_context_extension_function(*context_extension_fun);
   }
@@ -1339,8 +1348,8 @@
       JS_TYPED_ARRAY_TYPE,
       JSTypedArray::kSizeWithInternalFields,
       elements_kind);
-  result->set_initial_map(*initial_map);
-  initial_map->set_constructor(*result);
+  JSFunction::SetInitialMap(result, initial_map,
+                            handle(initial_map->prototype(), isolate()));
   *fun = result;
 
   ElementsKind external_kind = GetNextTransitionElementsKind(elements_kind);
@@ -1349,141 +1358,16 @@
 
 
 void Genesis::InitializeExperimentalGlobal() {
-  Handle<JSObject> global = Handle<JSObject>(native_context()->global_object());
+  // TODO(erikcorry): Move this into Genesis::InitializeGlobal once we no
+  // longer need to live behind a flag.
+  Handle<JSObject> builtins(native_context()->builtins());
 
-  // TODO(mstarzinger): Move this into Genesis::InitializeGlobal once we no
-  // longer need to live behind flags, so functions get added to the snapshot.
-
-  if (FLAG_harmony_symbols) {
-    // --- S y m b o l ---
-    Handle<JSFunction> symbol_fun = InstallFunction(
-        global, "Symbol", JS_VALUE_TYPE, JSValue::kSize,
-        isolate()->initial_object_prototype(), Builtins::kIllegal);
-    native_context()->set_symbol_function(*symbol_fun);
-  }
-
-  if (FLAG_harmony_collections) {
-    // -- M a p
-    InstallFunction(global, "Map", JS_MAP_TYPE, JSMap::kSize,
-                    isolate()->initial_object_prototype(), Builtins::kIllegal);
-    // -- S e t
-    InstallFunction(global, "Set", JS_SET_TYPE, JSSet::kSize,
-                    isolate()->initial_object_prototype(), Builtins::kIllegal);
-    {   // -- S e t I t e r a t o r
-      Handle<JSObject> builtins(native_context()->builtins());
-      Handle<JSFunction> set_iterator_function =
-          InstallFunction(builtins, "SetIterator", JS_SET_ITERATOR_TYPE,
-                          JSSetIterator::kSize,
-                          isolate()->initial_object_prototype(),
-                          Builtins::kIllegal);
-      native_context()->set_set_iterator_map(
-          set_iterator_function->initial_map());
-    }
-    {   // -- M a p I t e r a t o r
-      Handle<JSObject> builtins(native_context()->builtins());
-      Handle<JSFunction> map_iterator_function =
-          InstallFunction(builtins, "MapIterator", JS_MAP_ITERATOR_TYPE,
-                          JSMapIterator::kSize,
-                          isolate()->initial_object_prototype(),
-                          Builtins::kIllegal);
-      native_context()->set_map_iterator_map(
-          map_iterator_function->initial_map());
-    }
-  }
-
-  if (FLAG_harmony_generators) {
-    // Create generator meta-objects and install them on the builtins object.
-    Handle<JSObject> builtins(native_context()->builtins());
-    Handle<JSObject> generator_object_prototype =
-        factory()->NewJSObject(isolate()->object_function(), TENURED);
-    Handle<JSFunction> generator_function_prototype = InstallFunction(
-        builtins, "GeneratorFunctionPrototype", JS_FUNCTION_TYPE,
-        JSFunction::kHeaderSize, generator_object_prototype,
-        Builtins::kIllegal);
-    InstallFunction(builtins, "GeneratorFunction",
-                    JS_FUNCTION_TYPE, JSFunction::kSize,
-                    generator_function_prototype, Builtins::kIllegal);
-
-    // Create maps for generator functions and their prototypes.  Store those
-    // maps in the native context.
-    Handle<Map> sloppy_function_map(native_context()->sloppy_function_map());
-    Handle<Map> generator_function_map = Map::Copy(sloppy_function_map);
-    generator_function_map->set_prototype(*generator_function_prototype);
-    native_context()->set_sloppy_generator_function_map(
-        *generator_function_map);
-
-    // The "arguments" and "caller" instance properties aren't specified, so
-    // technically we could leave them out.  They make even less sense for
-    // generators than for functions.  Still, the same argument that it makes
-    // sense to keep them around but poisoned in strict mode applies to
-    // generators as well.  With poisoned accessors, naive callers can still
-    // iterate over the properties without accessing them.
-    //
-    // We can't use PoisonArgumentsAndCaller because that mutates accessor pairs
-    // in place, and the initial state of the generator function map shares the
-    // accessor pair with sloppy functions.  Also the error message should be
-    // different.  Also unhappily, we can't use the API accessors to implement
-    // poisoning, because API accessors present themselves as data properties,
-    // not accessor properties, and so getOwnPropertyDescriptor raises an
-    // exception as it tries to get the values.  Sadness.
-    Handle<AccessorPair> poison_pair(factory()->NewAccessorPair());
-    PropertyAttributes rw_attribs =
-        static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
-    Handle<JSFunction> poison_function = GetGeneratorPoisonFunction();
-    poison_pair->set_getter(*poison_function);
-    poison_pair->set_setter(*poison_function);
-    ReplaceAccessors(generator_function_map, factory()->arguments_string(),
-        rw_attribs, poison_pair);
-    ReplaceAccessors(generator_function_map, factory()->caller_string(),
-        rw_attribs, poison_pair);
-
-    Handle<Map> strict_function_map(native_context()->strict_function_map());
-    Handle<Map> strict_generator_function_map = Map::Copy(strict_function_map);
-    // "arguments" and "caller" already poisoned.
-    strict_generator_function_map->set_prototype(*generator_function_prototype);
-    native_context()->set_strict_generator_function_map(
-        *strict_generator_function_map);
-
-    Handle<JSFunction> object_function(native_context()->object_function());
-    Handle<Map> generator_object_prototype_map = Map::Create(
-        object_function, 0);
-    generator_object_prototype_map->set_prototype(
-        *generator_object_prototype);
-    native_context()->set_generator_object_prototype_map(
-        *generator_object_prototype_map);
-  }
-
-  if (FLAG_harmony_collections || FLAG_harmony_generators) {
-    // Collection forEach uses an iterator result object.
-    // Generators return iteraror result objects.
-
-    STATIC_ASSERT(JSGeneratorObject::kResultPropertyCount == 2);
-    Handle<JSFunction> object_function(native_context()->object_function());
-    ASSERT(object_function->initial_map()->inobject_properties() == 0);
-    Handle<Map> iterator_result_map = Map::Create(
-        object_function, JSGeneratorObject::kResultPropertyCount);
-    ASSERT(iterator_result_map->inobject_properties() ==
-        JSGeneratorObject::kResultPropertyCount);
-    Map::EnsureDescriptorSlack(
-        iterator_result_map, JSGeneratorObject::kResultPropertyCount);
-
-    FieldDescriptor value_descr(isolate()->factory()->value_string(),
-                                JSGeneratorObject::kResultValuePropertyIndex,
-                                NONE,
-                                Representation::Tagged());
-    iterator_result_map->AppendDescriptor(&value_descr);
-
-    FieldDescriptor done_descr(isolate()->factory()->done_string(),
-                               JSGeneratorObject::kResultDonePropertyIndex,
-                               NONE,
-                               Representation::Tagged());
-    iterator_result_map->AppendDescriptor(&done_descr);
-
-    iterator_result_map->set_unused_property_fields(0);
-    ASSERT_EQ(JSGeneratorObject::kResultSize,
-              iterator_result_map->instance_size());
-    native_context()->set_iterator_result_map(*iterator_result_map);
-  }
+  Handle<HeapObject> flag(
+      FLAG_harmony_regexps ? heap()->true_value() : heap()->false_value());
+  PropertyAttributes attributes =
+      static_cast<PropertyAttributes>(DONT_DELETE | READ_ONLY);
+  Runtime::DefineObjectProperty(builtins, factory()->harmony_regexps_string(),
+                                flag, attributes).Assert();
 }
 
 
@@ -1500,9 +1384,8 @@
   Factory* factory = isolate->factory();
   Handle<String> source_code;
   ASSIGN_RETURN_ON_EXCEPTION_VALUE(
-      isolate, source_code,
-      factory->NewStringFromAscii(
-          ExperimentalNatives::GetRawScriptSource(index)),
+      isolate, source_code, factory->NewStringFromAscii(
+                                ExperimentalNatives::GetRawScriptSource(index)),
       false);
   return CompileNative(isolate, name, source_code);
 }
@@ -1526,7 +1409,7 @@
                                     NULL,
                                     Handle<Context>(isolate->context()),
                                     true);
-  ASSERT(isolate->has_pending_exception() != result);
+  DCHECK(isolate->has_pending_exception() != result);
   if (!result) isolate->clear_pending_exception();
   return result;
 }
@@ -1546,19 +1429,12 @@
   // If we can't find the function in the cache, we compile a new
   // function and insert it into the cache.
   if (cache == NULL || !cache->Lookup(name, &function_info)) {
-    ASSERT(source->IsOneByteRepresentation());
+    DCHECK(source->IsOneByteRepresentation());
     Handle<String> script_name =
         factory->NewStringFromUtf8(name).ToHandleChecked();
     function_info = Compiler::CompileScript(
-        source,
-        script_name,
-        0,
-        0,
-        false,
-        top_context,
-        extension,
-        NULL,
-        NO_CACHED_DATA,
+        source, script_name, 0, 0, false, top_context, extension, NULL,
+        ScriptCompiler::kNoCompileOptions,
         use_runtime_context ? NATIVES_CODE : NOT_NATIVES_CODE);
     if (function_info.is_null()) return false;
     if (cache != NULL) cache->Add(name, function_info);
@@ -1567,7 +1443,7 @@
   // Set up the function context. Conceptually, we should clone the
   // function before overwriting the context but since we're in a
   // single-threaded environment it is not strictly necessary.
-  ASSERT(top_context->IsNativeContext());
+  DCHECK(top_context->IsNativeContext());
   Handle<Context> context =
       Handle<Context>(use_runtime_context
                       ? Handle<Context>(top_context->runtime_context())
@@ -1587,13 +1463,52 @@
 }
 
 
-#define INSTALL_NATIVE(Type, name, var)                                        \
-  Handle<String> var##_name =                                                  \
-      factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR(name));          \
-  Handle<Object> var##_native = Object::GetProperty(                           \
-      handle(native_context()->builtins()), var##_name).ToHandleChecked();     \
+static Handle<JSObject> ResolveBuiltinIdHolder(Handle<Context> native_context,
+                                               const char* holder_expr) {
+  Isolate* isolate = native_context->GetIsolate();
+  Factory* factory = isolate->factory();
+  Handle<GlobalObject> global(native_context->global_object());
+  const char* period_pos = strchr(holder_expr, '.');
+  if (period_pos == NULL) {
+    return Handle<JSObject>::cast(
+        Object::GetPropertyOrElement(
+            global, factory->InternalizeUtf8String(holder_expr))
+            .ToHandleChecked());
+  }
+  const char* inner = period_pos + 1;
+  DCHECK_EQ(NULL, strchr(inner, '.'));
+  Vector<const char> property(holder_expr,
+                              static_cast<int>(period_pos - holder_expr));
+  Handle<String> property_string = factory->InternalizeUtf8String(property);
+  DCHECK(!property_string.is_null());
+  Handle<JSObject> object = Handle<JSObject>::cast(
+      Object::GetProperty(global, property_string).ToHandleChecked());
+  if (strcmp("prototype", inner) == 0) {
+    Handle<JSFunction> function = Handle<JSFunction>::cast(object);
+    return Handle<JSObject>(JSObject::cast(function->prototype()));
+  }
+  Handle<String> inner_string = factory->InternalizeUtf8String(inner);
+  DCHECK(!inner_string.is_null());
+  Handle<Object> value =
+      Object::GetProperty(object, inner_string).ToHandleChecked();
+  return Handle<JSObject>::cast(value);
+}
+
+
+#define INSTALL_NATIVE(Type, name, var)                                     \
+  Handle<String> var##_name =                                               \
+      factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR(name));        \
+  Handle<Object> var##_native =                                             \
+      Object::GetProperty(handle(native_context()->builtins()), var##_name) \
+          .ToHandleChecked();                                               \
   native_context()->set_##var(Type::cast(*var##_native));
 
+#define INSTALL_NATIVE_MATH(name)                                    \
+  {                                                                  \
+    Handle<Object> fun =                                             \
+        ResolveBuiltinIdHolder(native_context(), "Math." #name);     \
+    native_context()->set_math_##name##_fun(JSFunction::cast(*fun)); \
+  }
 
 void Genesis::InstallNativeFunctions() {
   HandleScope scope(isolate());
@@ -1636,6 +1551,30 @@
                  native_object_get_notifier);
   INSTALL_NATIVE(JSFunction, "NativeObjectNotifierPerformChange",
                  native_object_notifier_perform_change);
+
+  INSTALL_NATIVE(Symbol, "symbolIterator", iterator_symbol);
+  INSTALL_NATIVE(Symbol, "symbolUnscopables", unscopables_symbol);
+  INSTALL_NATIVE(JSFunction, "ArrayValues", array_values_iterator);
+
+  INSTALL_NATIVE_MATH(abs)
+  INSTALL_NATIVE_MATH(acos)
+  INSTALL_NATIVE_MATH(asin)
+  INSTALL_NATIVE_MATH(atan)
+  INSTALL_NATIVE_MATH(atan2)
+  INSTALL_NATIVE_MATH(ceil)
+  INSTALL_NATIVE_MATH(cos)
+  INSTALL_NATIVE_MATH(exp)
+  INSTALL_NATIVE_MATH(floor)
+  INSTALL_NATIVE_MATH(imul)
+  INSTALL_NATIVE_MATH(log)
+  INSTALL_NATIVE_MATH(max)
+  INSTALL_NATIVE_MATH(min)
+  INSTALL_NATIVE_MATH(pow)
+  INSTALL_NATIVE_MATH(random)
+  INSTALL_NATIVE_MATH(round)
+  INSTALL_NATIVE_MATH(sin)
+  INSTALL_NATIVE_MATH(sqrt)
+  INSTALL_NATIVE_MATH(tan)
 }
 
 
@@ -1646,10 +1585,6 @@
     INSTALL_NATIVE(JSFunction, "DerivedSetTrap", derived_set_trap);
     INSTALL_NATIVE(JSFunction, "ProxyEnumerate", proxy_enumerate);
   }
-
-  if (FLAG_harmony_symbols) {
-    INSTALL_NATIVE(Symbol, "symbolIterator", iterator_symbol);
-  }
 }
 
 #undef INSTALL_NATIVE
@@ -1679,7 +1614,7 @@
   Handle<Map> original_map(array_function->initial_map());
   Handle<Map> initial_map = Map::Copy(original_map);
   initial_map->set_elements_kind(elements_kind);
-  array_function->set_initial_map(*initial_map);
+  JSFunction::SetInitialMap(array_function, initial_map, prototype);
 
   // Make "length" magic on instances.
   Map::EnsureDescriptorSlack(initial_map, 1);
@@ -1712,7 +1647,7 @@
       JSBuiltinsObject::kSize);
 
   Handle<String> name =
-      factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("builtins"));
+      factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("builtins"));
   builtins_fun->shared()->set_instance_class_name(*name);
   builtins_fun->initial_map()->set_dictionary_map(true);
   builtins_fun->initial_map()->set_prototype(heap()->null_value());
@@ -1723,8 +1658,7 @@
   builtins->set_builtins(*builtins);
   builtins->set_native_context(*native_context());
   builtins->set_global_context(*native_context());
-  builtins->set_global_receiver(*builtins);
-  builtins->set_global_receiver(native_context()->global_proxy());
+  builtins->set_global_proxy(native_context()->global_proxy());
 
 
   // Set up the 'global' properties of the builtins object. The
@@ -1734,14 +1668,12 @@
   static const PropertyAttributes attributes =
       static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
   Handle<String> global_string =
-      factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("global"));
+      factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("global"));
   Handle<Object> global_obj(native_context()->global_object(), isolate());
-  JSObject::SetOwnPropertyIgnoreAttributes(
-      builtins, global_string, global_obj, attributes).Check();
+  JSObject::AddProperty(builtins, global_string, global_obj, attributes);
   Handle<String> builtins_string =
-      factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("builtins"));
-  JSObject::SetOwnPropertyIgnoreAttributes(
-      builtins, builtins_string, builtins, attributes).Check();
+      factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("builtins"));
+  JSObject::AddProperty(builtins, builtins_string, builtins, attributes);
 
   // Set up the reference from the global object to the builtins object.
   JSGlobalObject::cast(native_context()->global_object())->
@@ -1749,7 +1681,7 @@
 
   // Create a bridge function that has context in the native context.
   Handle<JSFunction> bridge = factory()->NewFunction(factory()->empty_string());
-  ASSERT(bridge->context() == *isolate()->native_context());
+  DCHECK(bridge->context() == *isolate()->native_context());
 
   // Allocate the builtins context.
   Handle<Context> context =
@@ -1769,7 +1701,7 @@
     native_context()->set_script_function(*script_fun);
 
     Handle<Map> script_map = Handle<Map>(script_fun->initial_map());
-    Map::EnsureDescriptorSlack(script_map, 13);
+    Map::EnsureDescriptorSlack(script_map, 14);
 
     PropertyAttributes attribs =
         static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
@@ -1876,6 +1808,23 @@
       script_map->AppendDescriptor(&d);
     }
 
+    Handle<AccessorInfo> script_source_url =
+        Accessors::ScriptSourceUrlInfo(isolate(), attribs);
+    {
+      CallbacksDescriptor d(Handle<Name>(Name::cast(script_source_url->name())),
+                            script_source_url, attribs);
+      script_map->AppendDescriptor(&d);
+    }
+
+    Handle<AccessorInfo> script_source_mapping_url =
+        Accessors::ScriptSourceMappingUrlInfo(isolate(), attribs);
+    {
+      CallbacksDescriptor d(
+          Handle<Name>(Name::cast(script_source_mapping_url->name())),
+          script_source_mapping_url, attribs);
+      script_map->AppendDescriptor(&d);
+    }
+
     // Allocate the empty script.
     Handle<Script> script = factory()->NewScript(factory()->empty_string());
     script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
@@ -1909,6 +1858,82 @@
     InstallInternalArray(builtins, "InternalPackedArray", FAST_ELEMENTS);
   }
 
+  {  // -- S e t I t e r a t o r
+    Handle<JSFunction> set_iterator_function = InstallFunction(
+        builtins, "SetIterator", JS_SET_ITERATOR_TYPE, JSSetIterator::kSize,
+        isolate()->initial_object_prototype(), Builtins::kIllegal);
+    native_context()->set_set_iterator_map(
+        set_iterator_function->initial_map());
+  }
+
+  {  // -- M a p I t e r a t o r
+    Handle<JSFunction> map_iterator_function = InstallFunction(
+        builtins, "MapIterator", JS_MAP_ITERATOR_TYPE, JSMapIterator::kSize,
+        isolate()->initial_object_prototype(), Builtins::kIllegal);
+    native_context()->set_map_iterator_map(
+        map_iterator_function->initial_map());
+  }
+
+  {
+    // Create generator meta-objects and install them on the builtins object.
+    Handle<JSObject> builtins(native_context()->builtins());
+    Handle<JSObject> generator_object_prototype =
+        factory()->NewJSObject(isolate()->object_function(), TENURED);
+    Handle<JSFunction> generator_function_prototype =
+        InstallFunction(builtins, "GeneratorFunctionPrototype",
+                        JS_FUNCTION_TYPE, JSFunction::kHeaderSize,
+                        generator_object_prototype, Builtins::kIllegal);
+    InstallFunction(builtins, "GeneratorFunction", JS_FUNCTION_TYPE,
+                    JSFunction::kSize, generator_function_prototype,
+                    Builtins::kIllegal);
+
+    // Create maps for generator functions and their prototypes.  Store those
+    // maps in the native context.
+    Handle<Map> generator_function_map =
+        Map::Copy(sloppy_function_map_writable_prototype_);
+    generator_function_map->set_prototype(*generator_function_prototype);
+    native_context()->set_sloppy_generator_function_map(
+        *generator_function_map);
+
+    // The "arguments" and "caller" instance properties aren't specified, so
+    // technically we could leave them out.  They make even less sense for
+    // generators than for functions.  Still, the same argument that it makes
+    // sense to keep them around but poisoned in strict mode applies to
+    // generators as well.  With poisoned accessors, naive callers can still
+    // iterate over the properties without accessing them.
+    //
+    // We can't use PoisonArgumentsAndCaller because that mutates accessor pairs
+    // in place, and the initial state of the generator function map shares the
+    // accessor pair with sloppy functions.  Also the error message should be
+    // different.  Also unhappily, we can't use the API accessors to implement
+    // poisoning, because API accessors present themselves as data properties,
+    // not accessor properties, and so getOwnPropertyDescriptor raises an
+    // exception as it tries to get the values.  Sadness.
+    Handle<AccessorPair> poison_pair(factory()->NewAccessorPair());
+    PropertyAttributes rw_attribs =
+        static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
+    Handle<JSFunction> poison_function = GetGeneratorPoisonFunction();
+    poison_pair->set_getter(*poison_function);
+    poison_pair->set_setter(*poison_function);
+    ReplaceAccessors(generator_function_map, factory()->arguments_string(),
+                     rw_attribs, poison_pair);
+    ReplaceAccessors(generator_function_map, factory()->caller_string(),
+                     rw_attribs, poison_pair);
+
+    Handle<Map> strict_function_map(native_context()->strict_function_map());
+    Handle<Map> strict_generator_function_map = Map::Copy(strict_function_map);
+    // "arguments" and "caller" already poisoned.
+    strict_generator_function_map->set_prototype(*generator_function_prototype);
+    native_context()->set_strict_generator_function_map(
+        *strict_generator_function_map);
+
+    Handle<JSFunction> object_function(native_context()->object_function());
+    Handle<Map> generator_object_prototype_map = Map::Create(isolate(), 0);
+    generator_object_prototype_map->set_prototype(*generator_object_prototype);
+    native_context()->set_generator_object_prototype_map(
+        *generator_object_prototype_map);
+  }
+
   if (FLAG_disable_native_files) {
     PrintF("Warning: Running without installed natives!\n");
     return true;
@@ -1930,16 +1955,17 @@
   // Store the map for the string prototype after the natives has been compiled
   // and the String function has been set up.
   Handle<JSFunction> string_function(native_context()->string_function());
-  ASSERT(JSObject::cast(
+  DCHECK(JSObject::cast(
       string_function->initial_map()->prototype())->HasFastProperties());
   native_context()->set_string_function_prototype_map(
       HeapObject::cast(string_function->initial_map()->prototype())->map());
 
   // Install Function.prototype.call and apply.
-  { Handle<String> key = factory()->function_class_string();
+  {
+    Handle<String> key = factory()->Function_string();
     Handle<JSFunction> function =
         Handle<JSFunction>::cast(Object::GetProperty(
-            isolate()->global_object(), key).ToHandleChecked());
+            handle(native_context()->global_object()), key).ToHandleChecked());
     Handle<JSObject> proto =
         Handle<JSObject>(JSObject::cast(function->instance_prototype()));
 
@@ -1950,12 +1976,19 @@
     Handle<JSFunction> apply =
         InstallFunction(proto, "apply", JS_OBJECT_TYPE, JSObject::kHeaderSize,
                         MaybeHandle<JSObject>(), Builtins::kFunctionApply);
+    if (FLAG_vector_ics) {
+      // Apply embeds an IC, so we need a type vector of size 1 in the shared
+      // function info.
+      Handle<TypeFeedbackVector> feedback_vector =
+          factory()->NewTypeFeedbackVector(1);
+      apply->shared()->set_feedback_vector(*feedback_vector);
+    }
 
     // Make sure that Function.prototype.call appears to be compiled.
     // The code will never be called, but inline caching for call will
     // only work if it appears to be compiled.
     call->shared()->DontAdaptArguments();
-    ASSERT(call->is_compiled());
+    DCHECK(call->is_compiled());
 
     // Set the expected parameters for apply to 2; required by builtin.
     apply->shared()->set_formal_parameter_count(2);
@@ -1996,7 +2029,7 @@
       Handle<String> length = factory()->length_string();
       int old = array_descriptors->SearchWithCache(
           *length, array_function->initial_map());
-      ASSERT(old != DescriptorArray::kNotFound);
+      DCHECK(old != DescriptorArray::kNotFound);
       CallbacksDescriptor desc(length,
                                handle(array_descriptors->GetValue(old),
                                       isolate()),
@@ -2026,6 +2059,34 @@
     native_context()->set_regexp_result_map(*initial_map);
   }
 
+  // Add @@iterator method to the arguments object maps.
+  {
+    PropertyAttributes attribs = DONT_ENUM;
+    Handle<AccessorInfo> arguments_iterator =
+        Accessors::ArgumentsIteratorInfo(isolate(), attribs);
+    {
+      CallbacksDescriptor d(Handle<Name>(native_context()->iterator_symbol()),
+                            arguments_iterator, attribs);
+      Handle<Map> map(native_context()->sloppy_arguments_map());
+      Map::EnsureDescriptorSlack(map, 1);
+      map->AppendDescriptor(&d);
+    }
+    {
+      CallbacksDescriptor d(Handle<Name>(native_context()->iterator_symbol()),
+                            arguments_iterator, attribs);
+      Handle<Map> map(native_context()->aliased_arguments_map());
+      Map::EnsureDescriptorSlack(map, 1);
+      map->AppendDescriptor(&d);
+    }
+    {
+      CallbacksDescriptor d(Handle<Name>(native_context()->iterator_symbol()),
+                            arguments_iterator, attribs);
+      Handle<Map> map(native_context()->strict_arguments_map());
+      Map::EnsureDescriptorSlack(map, 1);
+      map->AppendDescriptor(&d);
+    }
+  }
+
 #ifdef VERIFY_HEAP
   builtins->ObjectVerify();
 #endif
@@ -2046,45 +2107,17 @@
   for (int i = ExperimentalNatives::GetDebuggerCount();
        i < ExperimentalNatives::GetBuiltinsCount();
        i++) {
-    INSTALL_EXPERIMENTAL_NATIVE(i, symbols, "symbol.js")
     INSTALL_EXPERIMENTAL_NATIVE(i, proxies, "proxy.js")
-    INSTALL_EXPERIMENTAL_NATIVE(i, collections, "collection.js")
-    INSTALL_EXPERIMENTAL_NATIVE(i, collections, "collection-iterator.js")
-    INSTALL_EXPERIMENTAL_NATIVE(i, generators, "generator.js")
-    INSTALL_EXPERIMENTAL_NATIVE(i, iteration, "array-iterator.js")
     INSTALL_EXPERIMENTAL_NATIVE(i, strings, "harmony-string.js")
     INSTALL_EXPERIMENTAL_NATIVE(i, arrays, "harmony-array.js")
-    INSTALL_EXPERIMENTAL_NATIVE(i, maths, "harmony-math.js")
+    INSTALL_EXPERIMENTAL_NATIVE(i, classes, "harmony-classes.js")
   }
 
   InstallExperimentalNativeFunctions();
-  InstallExperimentalBuiltinFunctionIds();
   return true;
 }
 
 
-static Handle<JSObject> ResolveBuiltinIdHolder(
-    Handle<Context> native_context,
-    const char* holder_expr) {
-  Isolate* isolate = native_context->GetIsolate();
-  Factory* factory = isolate->factory();
-  Handle<GlobalObject> global(native_context->global_object());
-  const char* period_pos = strchr(holder_expr, '.');
-  if (period_pos == NULL) {
-    return Handle<JSObject>::cast(Object::GetPropertyOrElement(
-        global, factory->InternalizeUtf8String(holder_expr)).ToHandleChecked());
-  }
-  ASSERT_EQ(".prototype", period_pos);
-  Vector<const char> property(holder_expr,
-                              static_cast<int>(period_pos - holder_expr));
-  Handle<String> property_string = factory->InternalizeUtf8String(property);
-  ASSERT(!property_string.is_null());
-  Handle<JSFunction> function = Handle<JSFunction>::cast(
-      Object::GetProperty(global, property_string).ToHandleChecked());
-  return Handle<JSObject>(JSObject::cast(function->prototype()));
-}
-
-
 static void InstallBuiltinFunctionId(Handle<JSObject> holder,
                                      const char* function_name,
                                      BuiltinFunctionId id) {
@@ -2110,15 +2143,6 @@
 }
 
 
-void Genesis::InstallExperimentalBuiltinFunctionIds() {
-  HandleScope scope(isolate());
-  if (FLAG_harmony_maths) {
-    Handle<JSObject> holder = ResolveBuiltinIdHolder(native_context(), "Math");
-    InstallBuiltinFunctionId(holder, "clz32", kMathClz32);
-  }
-}
-
-
 // Do not forget to update macros.py with named constant
 // of cache id.
 #define JSFUNCTION_RESULT_CACHE_LIST(F) \
@@ -2181,34 +2205,39 @@
 
 bool Genesis::InstallSpecialObjects(Handle<Context> native_context) {
   Isolate* isolate = native_context->GetIsolate();
+  // Don't install extensions into the snapshot.
+  if (isolate->serializer_enabled()) return true;
+
   Factory* factory = isolate->factory();
   HandleScope scope(isolate);
   Handle<JSGlobalObject> global(JSGlobalObject::cast(
       native_context->global_object()));
+
+  Handle<JSObject> Error = Handle<JSObject>::cast(
+      Object::GetProperty(isolate, global, "Error").ToHandleChecked());
+  Handle<String> name =
+      factory->InternalizeOneByteString(STATIC_CHAR_VECTOR("stackTraceLimit"));
+  Handle<Smi> stack_trace_limit(Smi::FromInt(FLAG_stack_trace_limit), isolate);
+  JSObject::AddProperty(Error, name, stack_trace_limit, NONE);
+
   // Expose the natives in global if a name for it is specified.
   if (FLAG_expose_natives_as != NULL && strlen(FLAG_expose_natives_as) != 0) {
     Handle<String> natives =
         factory->InternalizeUtf8String(FLAG_expose_natives_as);
-    RETURN_ON_EXCEPTION_VALUE(
-        isolate,
-        JSObject::SetOwnPropertyIgnoreAttributes(
-            global, natives, Handle<JSObject>(global->builtins()), DONT_ENUM),
-        false);
+    uint32_t dummy_index;
+    if (natives->AsArrayIndex(&dummy_index)) return true;
+    JSObject::AddProperty(global, natives, handle(global->builtins()),
+                          DONT_ENUM);
   }
 
-  Handle<Object> Error = Object::GetProperty(
-      isolate, global, "Error").ToHandleChecked();
-  if (Error->IsJSObject()) {
-    Handle<String> name = factory->InternalizeOneByteString(
-        STATIC_ASCII_VECTOR("stackTraceLimit"));
-    Handle<Smi> stack_trace_limit(
-        Smi::FromInt(FLAG_stack_trace_limit), isolate);
-    RETURN_ON_EXCEPTION_VALUE(
-        isolate,
-        JSObject::SetOwnPropertyIgnoreAttributes(
-            Handle<JSObject>::cast(Error), name, stack_trace_limit, NONE),
-        false);
-  }
+  // Expose the stack trace symbol to native JS.
+  RETURN_ON_EXCEPTION_VALUE(isolate,
+                            JSObject::SetOwnPropertyIgnoreAttributes(
+                                handle(native_context->builtins(), isolate),
+                                factory->InternalizeOneByteString(
+                                    STATIC_CHAR_VECTOR("stack_trace_symbol")),
+                                factory->stack_trace_symbol(), NONE),
+                            false);
 
   // Expose the debug global object in global if a name for it is specified.
   if (FLAG_expose_debug_as != NULL && strlen(FLAG_expose_debug_as) != 0) {
@@ -2223,12 +2252,10 @@
     debug_context->set_security_token(native_context->security_token());
     Handle<String> debug_string =
         factory->InternalizeUtf8String(FLAG_expose_debug_as);
+    uint32_t index;
+    if (debug_string->AsArrayIndex(&index)) return true;
     Handle<Object> global_proxy(debug_context->global_proxy(), isolate);
-    RETURN_ON_EXCEPTION_VALUE(
-        isolate,
-        JSObject::SetOwnPropertyIgnoreAttributes(
-            global, debug_string, global_proxy, DONT_ENUM),
-        false);
+    JSObject::AddProperty(global, debug_string, global_proxy, DONT_ENUM);
   }
   return true;
 }
@@ -2332,7 +2359,7 @@
                        "Circular extension dependency")) {
     return false;
   }
-  ASSERT(extension_states->get_state(current) == UNVISITED);
+  DCHECK(extension_states->get_state(current) == UNVISITED);
   extension_states->set_state(current, VISITED);
   v8::Extension* extension = current->extension();
   // Install the extension's dependencies
@@ -2345,8 +2372,9 @@
   }
   // We do not expect this to throw an exception. Change this if it does.
   Handle<String> source_code =
-      isolate->factory()->NewExternalStringFromAscii(
-          extension->source()).ToHandleChecked();
+      isolate->factory()
+          ->NewExternalStringFromOneByte(extension->source())
+          .ToHandleChecked();
   bool result = CompileScriptCached(isolate,
                                     CStrVector(extension->name()),
                                     source_code,
@@ -2354,14 +2382,14 @@
                                     extension,
                                     Handle<Context>(isolate->context()),
                                     false);
-  ASSERT(isolate->has_pending_exception() != result);
+  DCHECK(isolate->has_pending_exception() != result);
   if (!result) {
     // We print out the name of the extension that fail to install.
     // When an error is thrown during bootstrapping we automatically print
     // the line number at which this happened to the console in the isolate
     // error throwing functionality.
-    OS::PrintError("Error installing extension '%s'.\n",
-                   current->extension()->name());
+    base::OS::PrintError("Error installing extension '%s'.\n",
+                         current->extension()->name());
     isolate->clear_pending_exception();
   }
   extension_states->set_state(current, INSTALLED);
@@ -2378,6 +2406,10 @@
         isolate(), builtins, Builtins::GetName(id)).ToHandleChecked();
     Handle<JSFunction> function = Handle<JSFunction>::cast(function_object);
     builtins->set_javascript_builtin(id, *function);
+    // TODO(mstarzinger): This is just a temporary hack to make TurboFan work,
+    // the correct solution is to restore the context register after invoking
+    // builtins from full-codegen.
+    function->shared()->set_optimization_disabled(true);
     if (!Compiler::EnsureCompiled(function, CLEAR_EXCEPTION)) {
       return false;
     }
@@ -2391,26 +2423,26 @@
     v8::Handle<v8::ObjectTemplate> global_proxy_template) {
   Handle<JSObject> global_proxy(
       JSObject::cast(native_context()->global_proxy()));
-  Handle<JSObject> inner_global(
+  Handle<JSObject> global_object(
       JSObject::cast(native_context()->global_object()));
 
   if (!global_proxy_template.IsEmpty()) {
     // Configure the global proxy object.
-    Handle<ObjectTemplateInfo> proxy_data =
+    Handle<ObjectTemplateInfo> global_proxy_data =
         v8::Utils::OpenHandle(*global_proxy_template);
-    if (!ConfigureApiObject(global_proxy, proxy_data)) return false;
+    if (!ConfigureApiObject(global_proxy, global_proxy_data)) return false;
 
-    // Configure the inner global object.
+    // Configure the global object.
     Handle<FunctionTemplateInfo> proxy_constructor(
-        FunctionTemplateInfo::cast(proxy_data->constructor()));
+        FunctionTemplateInfo::cast(global_proxy_data->constructor()));
     if (!proxy_constructor->prototype_template()->IsUndefined()) {
-      Handle<ObjectTemplateInfo> inner_data(
+      Handle<ObjectTemplateInfo> global_object_data(
           ObjectTemplateInfo::cast(proxy_constructor->prototype_template()));
-      if (!ConfigureApiObject(inner_global, inner_data)) return false;
+      if (!ConfigureApiObject(global_object, global_object_data)) return false;
     }
   }
 
-  SetObjectPrototype(global_proxy, inner_global);
+  SetObjectPrototype(global_proxy, global_object);
 
   native_context()->set_initial_array_prototype(
       JSArray::cast(native_context()->array_function()->prototype()));
@@ -2420,16 +2452,16 @@
 
 
 bool Genesis::ConfigureApiObject(Handle<JSObject> object,
-    Handle<ObjectTemplateInfo> object_template) {
-  ASSERT(!object_template.is_null());
-  ASSERT(FunctionTemplateInfo::cast(object_template->constructor())
+                                 Handle<ObjectTemplateInfo> object_template) {
+  DCHECK(!object_template.is_null());
+  DCHECK(FunctionTemplateInfo::cast(object_template->constructor())
              ->IsTemplateFor(object->map()));;
 
   MaybeHandle<JSObject> maybe_obj =
       Execution::InstantiateObject(object_template);
   Handle<JSObject> obj;
   if (!maybe_obj.ToHandle(&obj)) {
-    ASSERT(isolate()->has_pending_exception());
+    DCHECK(isolate()->has_pending_exception());
     isolate()->clear_pending_exception();
     return false;
   }
@@ -2450,29 +2482,27 @@
           HandleScope inner(isolate());
           Handle<Name> key = Handle<Name>(descs->GetKey(i));
           FieldIndex index = FieldIndex::ForDescriptor(from->map(), i);
-          ASSERT(!descs->GetDetails(i).representation().IsDouble());
+          DCHECK(!descs->GetDetails(i).representation().IsDouble());
           Handle<Object> value = Handle<Object>(from->RawFastPropertyAt(index),
                                                 isolate());
-          JSObject::SetOwnPropertyIgnoreAttributes(
-              to, key, value, details.attributes()).Check();
+          JSObject::AddProperty(to, key, value, details.attributes());
           break;
         }
         case CONSTANT: {
           HandleScope inner(isolate());
           Handle<Name> key = Handle<Name>(descs->GetKey(i));
           Handle<Object> constant(descs->GetConstant(i), isolate());
-          JSObject::SetOwnPropertyIgnoreAttributes(
-              to, key, constant, details.attributes()).Check();
+          JSObject::AddProperty(to, key, constant, details.attributes());
           break;
         }
         case CALLBACKS: {
-          LookupResult result(isolate());
-          Handle<Name> key(Name::cast(descs->GetKey(i)), isolate());
-          to->LookupOwn(key, &result);
+          Handle<Name> key(descs->GetKey(i));
+          LookupIterator it(to, key, LookupIterator::OWN_SKIP_INTERCEPTOR);
+          CHECK_NE(LookupIterator::ACCESS_CHECK, it.state());
           // If the property is already there we skip it
-          if (result.IsFound()) continue;
+          if (it.IsFound()) continue;
           HandleScope inner(isolate());
-          ASSERT(!to->HasFastProperties());
+          DCHECK(!to->HasFastProperties());
           // Add to dictionary.
           Handle<Object> callbacks(descs->GetCallbacksObject(i), isolate());
           PropertyDetails d = PropertyDetails(
@@ -2480,12 +2510,8 @@
           JSObject::SetNormalizedProperty(to, key, callbacks, d);
           break;
         }
+        // Do not occur since the from object has fast properties.
         case NORMAL:
-          // Do not occur since the from object has fast properties.
-        case HANDLER:
-        case INTERCEPTOR:
-        case NONEXISTENT:
-          // No element in instance descriptors have proxy or interceptor type.
           UNREACHABLE();
           break;
       }
@@ -2497,23 +2523,22 @@
     for (int i = 0; i < capacity; i++) {
       Object* raw_key(properties->KeyAt(i));
       if (properties->IsKey(raw_key)) {
-        ASSERT(raw_key->IsName());
+        DCHECK(raw_key->IsName());
         // If the property is already there we skip it.
-        LookupResult result(isolate());
         Handle<Name> key(Name::cast(raw_key));
-        to->LookupOwn(key, &result);
-        if (result.IsFound()) continue;
+        LookupIterator it(to, key, LookupIterator::OWN_SKIP_INTERCEPTOR);
+        CHECK_NE(LookupIterator::ACCESS_CHECK, it.state());
+        if (it.IsFound()) continue;
         // Set the property.
         Handle<Object> value = Handle<Object>(properties->ValueAt(i),
                                               isolate());
-        ASSERT(!value->IsCell());
+        DCHECK(!value->IsCell());
         if (value->IsPropertyCell()) {
           value = Handle<Object>(PropertyCell::cast(*value)->value(),
                                  isolate());
         }
         PropertyDetails details = properties->DetailsAt(i);
-        JSObject::SetOwnPropertyIgnoreAttributes(
-            to, key, value, details.attributes()).Check();
+        JSObject::AddProperty(to, key, value, details.attributes());
       }
     }
   }
@@ -2533,17 +2558,15 @@
 void Genesis::TransferObject(Handle<JSObject> from, Handle<JSObject> to) {
   HandleScope outer(isolate());
 
-  ASSERT(!from->IsJSArray());
-  ASSERT(!to->IsJSArray());
+  DCHECK(!from->IsJSArray());
+  DCHECK(!to->IsJSArray());
 
   TransferNamedProperties(from, to);
   TransferIndexedProperties(from, to);
 
   // Transfer the prototype (new map is needed).
-  Handle<Map> old_to_map = Handle<Map>(to->map());
-  Handle<Map> new_to_map = Map::Copy(old_to_map);
-  new_to_map->set_prototype(from->map()->prototype());
-  to->set_map(*new_to_map);
+  Handle<Object> proto(from->map()->prototype(), isolate());
+  SetObjectPrototype(to, proto);
 }
 
 
@@ -2551,8 +2574,8 @@
   // The maps with writable prototype are created in CreateEmptyFunction
   // and CreateStrictModeFunctionMaps respectively. Initially the maps are
   // created with read-only prototype for JS builtins processing.
-  ASSERT(!sloppy_function_map_writable_prototype_.is_null());
-  ASSERT(!strict_function_map_writable_prototype_.is_null());
+  DCHECK(!sloppy_function_map_writable_prototype_.is_null());
+  DCHECK(!strict_function_map_writable_prototype_.is_null());
 
   // Replace function instance maps to make prototype writable.
   native_context()->set_sloppy_function_map(
@@ -2583,16 +2606,13 @@
 
 
 Genesis::Genesis(Isolate* isolate,
-                 Handle<Object> global_object,
-                 v8::Handle<v8::ObjectTemplate> global_template,
+                 MaybeHandle<JSGlobalProxy> maybe_global_proxy,
+                 v8::Handle<v8::ObjectTemplate> global_proxy_template,
                  v8::ExtensionConfiguration* extensions)
     : isolate_(isolate),
       active_(isolate->bootstrapper()) {
   NoTrackDoubleFieldsForSerializerScope disable_scope(isolate);
   result_ = Handle<Context>::null();
-  // If V8 cannot be initialized, just return.
-  if (!V8::Initialize(NULL)) return;
-
   // Before creating the roots we must save the context and restore it
   // on all function exits.
   SaveContext saved_context(isolate);
@@ -2615,41 +2635,39 @@
     AddToWeakNativeContextList(*native_context());
     isolate->set_context(*native_context());
     isolate->counters()->contexts_created_by_snapshot()->Increment();
-    Handle<GlobalObject> inner_global;
-    Handle<JSGlobalProxy> global_proxy =
-        CreateNewGlobals(global_template,
-                         global_object,
-                         &inner_global);
+    Handle<GlobalObject> global_object;
+    Handle<JSGlobalProxy> global_proxy = CreateNewGlobals(
+        global_proxy_template, maybe_global_proxy, &global_object);
 
-    HookUpGlobalProxy(inner_global, global_proxy);
-    HookUpInnerGlobal(inner_global);
-    native_context()->builtins()->set_global_receiver(
+    HookUpGlobalProxy(global_object, global_proxy);
+    HookUpGlobalObject(global_object);
+    native_context()->builtins()->set_global_proxy(
         native_context()->global_proxy());
 
-    if (!ConfigureGlobalObjects(global_template)) return;
+    if (!ConfigureGlobalObjects(global_proxy_template)) return;
   } else {
     // We get here if there was no context snapshot.
     CreateRoots();
     Handle<JSFunction> empty_function = CreateEmptyFunction(isolate);
     CreateStrictModeFunctionMaps(empty_function);
-    Handle<GlobalObject> inner_global;
-    Handle<JSGlobalProxy> global_proxy =
-        CreateNewGlobals(global_template, global_object, &inner_global);
-    HookUpGlobalProxy(inner_global, global_proxy);
-    InitializeGlobal(inner_global, empty_function);
+    Handle<GlobalObject> global_object;
+    Handle<JSGlobalProxy> global_proxy = CreateNewGlobals(
+        global_proxy_template, maybe_global_proxy, &global_object);
+    HookUpGlobalProxy(global_object, global_proxy);
+    InitializeGlobal(global_object, empty_function);
     InstallJSFunctionResultCaches();
     InitializeNormalizedMapCaches();
     if (!InstallNatives()) return;
 
     MakeFunctionInstancePrototypeWritable();
 
-    if (!ConfigureGlobalObjects(global_template)) return;
+    if (!ConfigureGlobalObjects(global_proxy_template)) return;
     isolate->counters()->contexts_created_from_scratch()->Increment();
   }
 
-  // Initialize experimental globals and install experimental natives.
-  InitializeExperimentalGlobal();
+  // Install experimental natives.
   if (!InstallExperimentalNatives()) return;
+  InitializeExperimentalGlobal();
 
   // We can't (de-)serialize typed arrays currently, but we are lucky: The state
   // of the random number generator needs no initialization during snapshot
@@ -2670,50 +2688,23 @@
     Utils::OpenHandle(*buffer)->set_should_be_freed(true);
     v8::Local<v8::Uint32Array> ta = v8::Uint32Array::New(buffer, 0, num_elems);
     Handle<JSBuiltinsObject> builtins(native_context()->builtins());
-    Runtime::ForceSetObjectProperty(builtins,
-                                    factory()->InternalizeOneByteString(
-                                        STATIC_ASCII_VECTOR("rngstate")),
-                                    Utils::OpenHandle(*ta),
-                                    NONE).Assert();
+    Runtime::DefineObjectProperty(builtins, factory()->InternalizeOneByteString(
+                                                STATIC_CHAR_VECTOR("rngstate")),
+                                  Utils::OpenHandle(*ta), NONE).Assert();
 
     // Initialize trigonometric lookup tables and constants.
-    const int table_num_bytes = TrigonometricLookupTable::table_num_bytes();
-    v8::Local<v8::ArrayBuffer> sin_buffer = v8::ArrayBuffer::New(
+    const int constants_size = arraysize(fdlibm::MathConstants::constants);
+    const int table_num_bytes = constants_size * kDoubleSize;
+    v8::Local<v8::ArrayBuffer> trig_buffer = v8::ArrayBuffer::New(
         reinterpret_cast<v8::Isolate*>(isolate),
-        TrigonometricLookupTable::sin_table(), table_num_bytes);
-    v8::Local<v8::ArrayBuffer> cos_buffer = v8::ArrayBuffer::New(
-        reinterpret_cast<v8::Isolate*>(isolate),
-        TrigonometricLookupTable::cos_x_interval_table(), table_num_bytes);
-    v8::Local<v8::Float64Array> sin_table = v8::Float64Array::New(
-        sin_buffer, 0, TrigonometricLookupTable::table_size());
-    v8::Local<v8::Float64Array> cos_table = v8::Float64Array::New(
-        cos_buffer, 0, TrigonometricLookupTable::table_size());
+        const_cast<double*>(fdlibm::MathConstants::constants), table_num_bytes);
+    v8::Local<v8::Float64Array> trig_table =
+        v8::Float64Array::New(trig_buffer, 0, constants_size);
 
-    Runtime::ForceSetObjectProperty(builtins,
-                                    factory()->InternalizeOneByteString(
-                                        STATIC_ASCII_VECTOR("kSinTable")),
-                                    Utils::OpenHandle(*sin_table),
-                                    NONE).Assert();
-    Runtime::ForceSetObjectProperty(
+    Runtime::DefineObjectProperty(
         builtins,
-        factory()->InternalizeOneByteString(
-            STATIC_ASCII_VECTOR("kCosXIntervalTable")),
-        Utils::OpenHandle(*cos_table),
-        NONE).Assert();
-    Runtime::ForceSetObjectProperty(
-        builtins,
-        factory()->InternalizeOneByteString(
-            STATIC_ASCII_VECTOR("kSamples")),
-        factory()->NewHeapNumber(
-            TrigonometricLookupTable::samples()),
-        NONE).Assert();
-    Runtime::ForceSetObjectProperty(
-        builtins,
-        factory()->InternalizeOneByteString(
-            STATIC_ASCII_VECTOR("kIndexConvert")),
-        factory()->NewHeapNumber(
-            TrigonometricLookupTable::samples_over_pi_half()),
-        NONE).Assert();
+        factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("kMath")),
+        Utils::OpenHandle(*trig_table), NONE).Assert();
   }
 
   result_ = native_context();
@@ -2745,7 +2736,7 @@
 
 // Called when the top-level V8 mutex is destroyed.
 void Bootstrapper::FreeThreadResources() {
-  ASSERT(!IsActive());
+  DCHECK(!IsActive());
 }
 
 } }  // namespace v8::internal
diff --git a/src/bootstrapper.h b/src/bootstrapper.h
index 5988382..0cc8486 100644
--- a/src/bootstrapper.h
+++ b/src/bootstrapper.h
@@ -11,11 +11,11 @@
 namespace internal {
 
 // A SourceCodeCache uses a FixedArray to store pairs of
-// (AsciiString*, JSFunction*), mapping names of native code files
+// (OneByteString*, JSFunction*), mapping names of native code files
 // (runtime.js, etc.) to precompiled functions. Instead of mapping
 // names to functions it might make sense to let the JS2C tool
 // generate an index for each native JS file.
-class SourceCodeCache V8_FINAL BASE_EMBEDDED {
+class SourceCodeCache FINAL BASE_EMBEDDED {
  public:
   explicit SourceCodeCache(Script::Type type): type_(type), cache_(NULL) { }
 
@@ -24,7 +24,7 @@
   }
 
   void Iterate(ObjectVisitor* v) {
-    v->VisitPointer(BitCast<Object**, FixedArray**>(&cache_));
+    v->VisitPointer(bit_cast<Object**, FixedArray**>(&cache_));
   }
 
   bool Lookup(Vector<const char> name, Handle<SharedFunctionInfo>* handle) {
@@ -49,7 +49,7 @@
     cache_ = *new_array;
     Handle<String> str =
         factory->NewStringFromAscii(name, TENURED).ToHandleChecked();
-    ASSERT(!str.is_null());
+    DCHECK(!str.is_null());
     cache_->set(length, *str);
     cache_->set(length + 1, *shared);
     Script::cast(shared->script())->set_type(Smi::FromInt(type_));
@@ -64,7 +64,7 @@
 
 // The Boostrapper is the public interface for creating a JavaScript global
 // context.
-class Bootstrapper V8_FINAL {
+class Bootstrapper FINAL {
  public:
   static void InitializeOncePerProcess();
   static void TearDownExtensions();
@@ -76,8 +76,8 @@
   // Creates a JavaScript Global Context with initial object graph.
   // The returned value is a global handle casted to V8Environment*.
   Handle<Context> CreateEnvironment(
-      Handle<Object> global_object,
-      v8::Handle<v8::ObjectTemplate> global_template,
+      MaybeHandle<JSGlobalProxy> maybe_global_proxy,
+      v8::Handle<v8::ObjectTemplate> global_object_template,
       v8::ExtensionConfiguration* extensions);
 
   // Detach the environment from its outer global object.
@@ -134,7 +134,7 @@
 };
 
 
-class BootstrapperActive V8_FINAL BASE_EMBEDDED {
+class BootstrapperActive FINAL BASE_EMBEDDED {
  public:
   explicit BootstrapperActive(Bootstrapper* bootstrapper)
       : bootstrapper_(bootstrapper) {
@@ -152,14 +152,14 @@
 };
 
 
-class NativesExternalStringResource V8_FINAL
-    : public v8::String::ExternalAsciiStringResource {
+class NativesExternalStringResource FINAL
+    : public v8::String::ExternalOneByteStringResource {
  public:
   NativesExternalStringResource(Bootstrapper* bootstrapper,
                                 const char* source,
                                 size_t length);
-  virtual const char* data() const V8_OVERRIDE { return data_; }
-  virtual size_t length() const V8_OVERRIDE { return length_; }
+  virtual const char* data() const OVERRIDE { return data_; }
+  virtual size_t length() const OVERRIDE { return length_; }
 
  private:
   const char* data_;
diff --git a/src/builtins.cc b/src/builtins.cc
index 503d9a6..d0c19e5 100644
--- a/src/builtins.cc
+++ b/src/builtins.cc
@@ -11,10 +11,11 @@
 #include "src/builtins.h"
 #include "src/cpu-profiler.h"
 #include "src/gdb-jit.h"
-#include "src/ic-inl.h"
+#include "src/heap/mark-compact.h"
 #include "src/heap-profiler.h"
-#include "src/mark-compact.h"
-#include "src/stub-cache.h"
+#include "src/ic/handler-compiler.h"
+#include "src/ic/ic.h"
+#include "src/prototype.h"
 #include "src/vm-state-inl.h"
 
 namespace v8 {
@@ -30,12 +31,12 @@
       : Arguments(length, arguments) { }
 
   Object*& operator[] (int index) {
-    ASSERT(index < length());
+    DCHECK(index < length());
     return Arguments::operator[](index);
   }
 
   template <class S> Handle<S> at(int index) {
-    ASSERT(index < length());
+    DCHECK(index < length());
     return Arguments::at<S>(index);
   }
 
@@ -58,7 +59,7 @@
 #ifdef DEBUG
   void Verify() {
     // Check we have at least the receiver.
-    ASSERT(Arguments::length() >= 1);
+    DCHECK(Arguments::length() >= 1);
   }
 #endif
 };
@@ -75,7 +76,7 @@
 template <>
 void BuiltinArguments<NEEDS_CALLED_FUNCTION>::Verify() {
   // Check we have at least the receiver and the called function.
-  ASSERT(Arguments::length() >= 2);
+  DCHECK(Arguments::length() >= 2);
   // Make sure cast to JSFunction succeeds.
   called_function();
 }
@@ -137,7 +138,7 @@
   // that the state of the stack is as we assume it to be in the
   // code below.
   StackFrameIterator it(isolate);
-  ASSERT(it.frame()->is_exit());
+  DCHECK(it.frame()->is_exit());
   it.Advance();
   StackFrame* frame = it.frame();
   bool reference_result = frame->is_construct();
@@ -154,7 +155,7 @@
   const Smi* kConstructMarker = Smi::FromInt(StackFrame::CONSTRUCT);
   Object* marker = Memory::Object_at(caller_fp + kMarkerOffset);
   bool result = (marker == kConstructMarker);
-  ASSERT_EQ(result, reference_result);
+  DCHECK_EQ(result, reference_result);
   return result;
 }
 #endif
@@ -181,74 +182,6 @@
 }
 
 
-static FixedArrayBase* LeftTrimFixedArray(Heap* heap,
-                                          FixedArrayBase* elms,
-                                          int to_trim) {
-  ASSERT(heap->CanMoveObjectStart(elms));
-
-  Map* map = elms->map();
-  int entry_size;
-  if (elms->IsFixedArray()) {
-    entry_size = kPointerSize;
-  } else {
-    entry_size = kDoubleSize;
-  }
-  ASSERT(elms->map() != heap->fixed_cow_array_map());
-  // For now this trick is only applied to fixed arrays in new and paged space.
-  // In large object space the object's start must coincide with chunk
-  // and thus the trick is just not applicable.
-  ASSERT(!heap->lo_space()->Contains(elms));
-
-  STATIC_ASSERT(FixedArrayBase::kMapOffset == 0);
-  STATIC_ASSERT(FixedArrayBase::kLengthOffset == kPointerSize);
-  STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kPointerSize);
-
-  Object** former_start = HeapObject::RawField(elms, 0);
-
-  const int len = elms->length();
-
-  if (to_trim * entry_size > FixedArrayBase::kHeaderSize &&
-      elms->IsFixedArray() &&
-      !heap->new_space()->Contains(elms)) {
-    // If we are doing a big trim in old space then we zap the space that was
-    // formerly part of the array so that the GC (aided by the card-based
-    // remembered set) won't find pointers to new-space there.
-    Object** zap = reinterpret_cast<Object**>(elms->address());
-    zap++;  // Header of filler must be at least one word so skip that.
-    for (int i = 1; i < to_trim; i++) {
-      *zap++ = Smi::FromInt(0);
-    }
-  }
-  // Technically in new space this write might be omitted (except for
-  // debug mode which iterates through the heap), but to play safer
-  // we still do it.
-  // Since left trimming is only performed on pages which are not concurrently
-  // swept creating a filler object does not require synchronization.
-  heap->CreateFillerObjectAt(elms->address(), to_trim * entry_size);
-
-  int new_start_index = to_trim * (entry_size / kPointerSize);
-  former_start[new_start_index] = map;
-  former_start[new_start_index + 1] = Smi::FromInt(len - to_trim);
-
-  // Maintain marking consistency for HeapObjectIterator and
-  // IncrementalMarking.
-  int size_delta = to_trim * entry_size;
-  Address new_start = elms->address() + size_delta;
-  heap->marking()->TransferMark(elms->address(), new_start);
-  heap->AdjustLiveBytes(new_start, -size_delta, Heap::FROM_MUTATOR);
-
-  FixedArrayBase* new_elms =
-      FixedArrayBase::cast(HeapObject::FromAddress(new_start));
-  HeapProfiler* profiler = heap->isolate()->heap_profiler();
-  if (profiler->is_tracking_object_moves()) {
-    profiler->ObjectMoveEvent(elms->address(),
-                              new_elms->address(),
-                              new_elms->Size());
-  }
-  return new_elms;
-}
-
-
 static bool ArrayPrototypeHasNoElements(Heap* heap,
                                         Context* native_context,
                                         JSObject* array_proto) {
@@ -257,12 +190,15 @@
   // fields.
   if (array_proto->elements() != heap->empty_fixed_array()) return false;
   // Object.prototype
-  Object* proto = array_proto->GetPrototype();
-  if (proto == heap->null_value()) return false;
-  array_proto = JSObject::cast(proto);
+  PrototypeIterator iter(heap->isolate(), array_proto);
+  if (iter.IsAtEnd()) {
+    return false;
+  }
+  array_proto = JSObject::cast(iter.GetCurrent());
   if (array_proto != native_context->initial_object_prototype()) return false;
   if (array_proto->elements() != heap->empty_fixed_array()) return false;
-  return array_proto->GetPrototype()->IsNull();
+  iter.Advance();
+  return iter.IsAtEnd();
 }
 
 
@@ -302,7 +238,7 @@
   if (first_added_arg >= args_length) return handle(array->elements(), isolate);
 
   ElementsKind origin_kind = array->map()->elements_kind();
-  ASSERT(!IsFastObjectElementsKind(origin_kind));
+  DCHECK(!IsFastObjectElementsKind(origin_kind));
   ElementsKind target_kind = origin_kind;
   {
     DisallowHeapAllocation no_gc;
@@ -335,7 +271,8 @@
   Context* native_context = heap->isolate()->context()->native_context();
   JSObject* array_proto =
       JSObject::cast(native_context->array_function()->prototype());
-  return receiver->GetPrototype() == array_proto &&
+  PrototypeIterator iter(heap->isolate(), receiver);
+  return iter.GetCurrent() == array_proto &&
          ArrayPrototypeHasNoElements(heap, native_context, array_proto);
 }
 
@@ -384,7 +321,7 @@
   if (to_add > 0 && JSArray::WouldChangeReadOnlyLength(array, len + to_add)) {
     return CallJsBuiltin(isolate, "ArrayPush", args);
   }
-  ASSERT(!array->map()->is_observed());
+  DCHECK(!array->map()->is_observed());
 
   ElementsKind kind = array->GetElementsKind();
 
@@ -395,7 +332,7 @@
     }
     // Currently fixed arrays cannot grow too big, so
     // we should never hit this case.
-    ASSERT(to_add <= (Smi::kMaxValue - len));
+    DCHECK(to_add <= (Smi::kMaxValue - len));
 
     int new_length = len + to_add;
 
@@ -434,7 +371,7 @@
     }
     // Currently fixed arrays cannot grow too big, so
     // we should never hit this case.
-    ASSERT(to_add <= (Smi::kMaxValue - len));
+    DCHECK(to_add <= (Smi::kMaxValue - len));
 
     int new_length = len + to_add;
 
@@ -489,7 +426,7 @@
   }
 
   Handle<JSArray> array = Handle<JSArray>::cast(receiver);
-  ASSERT(!array->map()->is_observed());
+  DCHECK(!array->map()->is_observed());
 
   int len = Smi::cast(array->length())->value();
   if (len == 0) return isolate->heap()->undefined_value();
@@ -521,7 +458,7 @@
     return CallJsBuiltin(isolate, "ArrayShift", args);
   }
   Handle<JSArray> array = Handle<JSArray>::cast(receiver);
-  ASSERT(!array->map()->is_observed());
+  DCHECK(!array->map()->is_observed());
 
   int len = Smi::cast(array->length())->value();
   if (len == 0) return heap->undefined_value();
@@ -535,7 +472,7 @@
   }
 
   if (heap->CanMoveObjectStart(*elms_obj)) {
-    array->set_elements(LeftTrimFixedArray(heap, *elms_obj, 1));
+    array->set_elements(heap->LeftTrimFixedArray(*elms_obj, 1));
   } else {
     // Shift the elements.
     if (elms_obj->IsFixedArray()) {
@@ -570,7 +507,7 @@
     return CallJsBuiltin(isolate, "ArrayUnshift", args);
   }
   Handle<JSArray> array = Handle<JSArray>::cast(receiver);
-  ASSERT(!array->map()->is_observed());
+  DCHECK(!array->map()->is_observed());
   if (!array->HasFastSmiOrObjectElements()) {
     return CallJsBuiltin(isolate, "ArrayUnshift", args);
   }
@@ -579,7 +516,7 @@
   int new_length = len + to_add;
   // Currently fixed arrays cannot grow too big, so
   // we should never hit this case.
-  ASSERT(to_add <= (Smi::kMaxValue - len));
+  DCHECK(to_add <= (Smi::kMaxValue - len));
 
   if (to_add > 0 && JSArray::WouldChangeReadOnlyLength(array, len + to_add)) {
     return CallJsBuiltin(isolate, "ArrayUnshift", args);
@@ -647,8 +584,8 @@
     } else {
       // Array.slice(arguments, ...) is quite a common idiom (notably more
       // than 50% of invocations in Web apps).  Treat it in C++ as well.
-      Map* arguments_map = isolate->context()->native_context()->
-          sloppy_arguments_boilerplate()->map();
+      Map* arguments_map =
+          isolate->context()->native_context()->sloppy_arguments_map();
 
       bool is_arguments_object_with_fast_elements =
           receiver->IsJSObject() &&
@@ -676,7 +613,7 @@
       }
     }
 
-    ASSERT(len >= 0);
+    DCHECK(len >= 0);
     int n_arguments = args.length() - 1;
 
     // Note carefully choosen defaults---if argument is missing,
@@ -777,7 +714,7 @@
     return CallJsBuiltin(isolate, "ArraySplice", args);
   }
   Handle<JSArray> array = Handle<JSArray>::cast(receiver);
-  ASSERT(!array->map()->is_observed());
+  DCHECK(!array->map()->is_observed());
 
   int len = Smi::cast(array->length())->value();
 
@@ -811,7 +748,7 @@
   // compatibility.
   int actual_delete_count;
   if (n_arguments == 1) {
-    ASSERT(len - actual_start >= 0);
+    DCHECK(len - actual_start >= 0);
     actual_delete_count = len - actual_start;
   } else {
     int value = 0;  // ToInteger(undefined) == 0
@@ -880,7 +817,7 @@
 
       if (heap->CanMoveObjectStart(*elms_obj)) {
         // On the fast path we move the start of the object in memory.
-        elms_obj = handle(LeftTrimFixedArray(heap, *elms_obj, delta), isolate);
+        elms_obj = handle(heap->LeftTrimFixedArray(*elms_obj, delta));
       } else {
         // This is the slow path. We are going to move the elements to the left
         // by copying them. For trimmed values we store the hole.
@@ -918,7 +855,7 @@
     Handle<FixedArray> elms = Handle<FixedArray>::cast(elms_obj);
     // Currently fixed arrays cannot grow too big, so
     // we should never hit this case.
-    ASSERT((item_count - actual_delete_count) <= (Smi::kMaxValue - len));
+    DCHECK((item_count - actual_delete_count) <= (Smi::kMaxValue - len));
 
     // Check if array need to grow.
     if (new_length > elms->length()) {
@@ -1003,9 +940,9 @@
     bool is_holey = false;
     for (int i = 0; i < n_arguments; i++) {
       Object* arg = args[i];
-      if (!arg->IsJSArray() ||
-          !JSArray::cast(arg)->HasFastElements() ||
-          JSArray::cast(arg)->GetPrototype() != array_proto) {
+      PrototypeIterator iter(isolate, arg);
+      if (!arg->IsJSArray() || !JSArray::cast(arg)->HasFastElements() ||
+          iter.GetCurrent() != array_proto) {
         AllowHeapAllocation allow_allocation;
         return CallJsBuiltin(isolate, "ArrayConcatJS", args);
       }
@@ -1016,7 +953,7 @@
       STATIC_ASSERT(FixedArray::kMaxLength < kHalfOfMaxInt);
       USE(kHalfOfMaxInt);
       result_len += len;
-      ASSERT(result_len >= 0);
+      DCHECK(result_len >= 0);
 
       if (result_len > FixedDoubleArray::kMaxLength) {
         AllowHeapAllocation allow_allocation;
@@ -1061,7 +998,7 @@
     }
   }
 
-  ASSERT(j == result_len);
+  DCHECK(j == result_len);
 
   return *result_array;
 }
@@ -1073,15 +1010,17 @@
 
 BUILTIN(StrictModePoisonPill) {
   HandleScope scope(isolate);
-  return isolate->Throw(*isolate->factory()->NewTypeError(
-      "strict_poison_pill", HandleVector<Object>(NULL, 0)));
+  THROW_NEW_ERROR_RETURN_FAILURE(
+      isolate,
+      NewTypeError("strict_poison_pill", HandleVector<Object>(NULL, 0)));
 }
 
 
 BUILTIN(GeneratorPoisonPill) {
   HandleScope scope(isolate);
-  return isolate->Throw(*isolate->factory()->NewTypeError(
-      "generator_poison_pill", HandleVector<Object>(NULL, 0)));
+  THROW_NEW_ERROR_RETURN_FAILURE(
+      isolate,
+      NewTypeError("generator_poison_pill", HandleVector<Object>(NULL, 0)));
 }
 
 
@@ -1095,11 +1034,12 @@
 static inline Object* FindHidden(Heap* heap,
                                  Object* object,
                                  FunctionTemplateInfo* type) {
-  if (type->IsTemplateFor(object)) return object;
-  Object* proto = object->GetPrototype(heap->isolate());
-  if (proto->IsJSObject() &&
-      JSObject::cast(proto)->map()->is_hidden_prototype()) {
-    return FindHidden(heap, proto, type);
+  for (PrototypeIterator iter(heap->isolate(), object,
+                              PrototypeIterator::START_AT_RECEIVER);
+       !iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN); iter.Advance()) {
+    if (type->IsTemplateFor(iter.GetCurrent())) {
+      return iter.GetCurrent();
+    }
   }
   return heap->null_value();
 }
@@ -1150,12 +1090,12 @@
 template <bool is_construct>
 MUST_USE_RESULT static Object* HandleApiCallHelper(
     BuiltinArguments<NEEDS_CALLED_FUNCTION> args, Isolate* isolate) {
-  ASSERT(is_construct == CalledAsConstructor(isolate));
+  DCHECK(is_construct == CalledAsConstructor(isolate));
   Heap* heap = isolate->heap();
 
   HandleScope scope(isolate);
   Handle<JSFunction> function = args.called_function();
-  ASSERT(function->shared()->IsApiFunction());
+  DCHECK(function->shared()->IsApiFunction());
 
   Handle<FunctionTemplateInfo> fun_data(
       function->shared()->get_api_func_data(), isolate);
@@ -1169,20 +1109,17 @@
   SharedFunctionInfo* shared = function->shared();
   if (shared->strict_mode() == SLOPPY && !shared->native()) {
     Object* recv = args[0];
-    ASSERT(!recv->IsNull());
-    if (recv->IsUndefined()) {
-      args[0] = function->context()->global_object()->global_receiver();
-    }
+    DCHECK(!recv->IsNull());
+    if (recv->IsUndefined()) args[0] = function->global_proxy();
   }
 
   Object* raw_holder = TypeCheck(heap, args.length(), &args[0], *fun_data);
 
   if (raw_holder->IsNull()) {
     // This function cannot be called with the given receiver.  Abort!
-    Handle<Object> obj =
-        isolate->factory()->NewTypeError(
-            "illegal_invocation", HandleVector(&function, 1));
-    return isolate->Throw(*obj);
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate,
+        NewTypeError("illegal_invocation", HandleVector(&function, 1)));
   }
 
   Object* raw_call_data = fun_data->call_code();
@@ -1195,7 +1132,7 @@
     Object* result;
 
     LOG(isolate, ApiObjectAccess("call", JSObject::cast(*args.receiver())));
-    ASSERT(raw_holder->IsJSObject());
+    DCHECK(raw_holder->IsJSObject());
 
     FunctionCallbackArguments custom(isolate,
                                      data_obj,
@@ -1240,7 +1177,7 @@
     BuiltinArguments<NO_EXTRA_ARGUMENTS> args) {
   // Non-functions are never called as constructors. Even if this is an object
   // called as a constructor the delegate call is not a construct call.
-  ASSERT(!CalledAsConstructor(isolate));
+  DCHECK(!CalledAsConstructor(isolate));
   Heap* heap = isolate->heap();
 
   Handle<Object> receiver = args.receiver();
@@ -1250,12 +1187,12 @@
 
   // Get the invocation callback from the function descriptor that was
   // used to create the called object.
-  ASSERT(obj->map()->has_instance_call_handler());
+  DCHECK(obj->map()->has_instance_call_handler());
   JSFunction* constructor = JSFunction::cast(obj->map()->constructor());
-  ASSERT(constructor->shared()->IsApiFunction());
+  DCHECK(constructor->shared()->IsApiFunction());
   Object* handler =
       constructor->shared()->get_api_func_data()->instance_call_handler();
-  ASSERT(!handler->IsUndefined());
+  DCHECK(!handler->IsUndefined());
   CallHandlerInfo* call_data = CallHandlerInfo::cast(handler);
   Object* callback_obj = call_data->callback();
   v8::FunctionCallback callback =
@@ -1313,7 +1250,7 @@
 
 
 static void Generate_LoadIC_Getter_ForDeopt(MacroAssembler* masm) {
-  LoadStubCompiler::GenerateLoadViaGetterForDeopt(masm);
+  NamedLoadHandlerCompiler::GenerateLoadViaGetterForDeopt(masm);
 }
 
 
@@ -1352,21 +1289,6 @@
 }
 
 
-static void Generate_KeyedLoadIC_IndexedInterceptor(MacroAssembler* masm) {
-  KeyedLoadIC::GenerateIndexedInterceptor(masm);
-}
-
-
-static void Generate_KeyedLoadIC_SloppyArguments(MacroAssembler* masm) {
-  KeyedLoadIC::GenerateSloppyArguments(masm);
-}
-
-
-static void Generate_StoreIC_Slow(MacroAssembler* masm) {
-  StoreIC::GenerateSlow(masm);
-}
-
-
 static void Generate_StoreIC_Miss(MacroAssembler* masm) {
   StoreIC::GenerateMiss(masm);
 }
@@ -1377,8 +1299,18 @@
 }
 
 
+static void Generate_StoreIC_Slow(MacroAssembler* masm) {
+  NamedStoreHandlerCompiler::GenerateSlow(masm);
+}
+
+
+static void Generate_KeyedStoreIC_Slow(MacroAssembler* masm) {
+  ElementHandlerCompiler::GenerateStoreSlow(masm);
+}
+
+
 static void Generate_StoreIC_Setter_ForDeopt(MacroAssembler* masm) {
-  StoreStubCompiler::GenerateStoreViaSetterForDeopt(masm);
+  NamedStoreHandlerCompiler::GenerateStoreViaSetterForDeopt(masm);
 }
 
 
@@ -1397,11 +1329,6 @@
 }
 
 
-static void Generate_KeyedStoreIC_Slow(MacroAssembler* masm) {
-  KeyedStoreIC::GenerateSlow(masm);
-}
-
-
 static void Generate_KeyedStoreIC_Initialize(MacroAssembler* masm) {
   KeyedStoreIC::GenerateInitialize(masm);
 }
@@ -1601,7 +1528,7 @@
 
 
 void Builtins::SetUp(Isolate* isolate, bool create_heap_objects) {
-  ASSERT(!initialized_);
+  DCHECK(!initialized_);
 
   // Create a scope for the handles in the builtins.
   HandleScope scope(isolate);
@@ -1630,7 +1557,7 @@
       // We pass all arguments to the generator, but it may not use all of
       // them.  This works because the first arguments are on top of the
       // stack.
-      ASSERT(!masm.has_frame());
+      DCHECK(!masm.has_frame());
       g(&masm, functions[i].name, functions[i].extra_args);
       // Move the code into the object heap.
       CodeDesc desc;
@@ -1641,14 +1568,15 @@
       // Log the event and add the code to the builtins array.
       PROFILE(isolate,
               CodeCreateEvent(Logger::BUILTIN_TAG, *code, functions[i].s_name));
-      GDBJIT(AddCode(GDBJITInterface::BUILTIN, functions[i].s_name, *code));
       builtins_[i] = *code;
+      if (code->kind() == Code::BUILTIN) code->set_builtin_index(i);
 #ifdef ENABLE_DISASSEMBLER
       if (FLAG_print_builtin_code) {
         CodeTracer::Scope trace_scope(isolate->GetCodeTracer());
-        PrintF(trace_scope.file(), "Builtin: %s\n", functions[i].s_name);
-        code->Disassemble(functions[i].s_name, trace_scope.file());
-        PrintF(trace_scope.file(), "\n");
+        OFStream os(trace_scope.file());
+        os << "Builtin: " << functions[i].s_name << "\n";
+        code->Disassemble(functions[i].s_name, os);
+        os << "\n";
       }
 #endif
     } else {
@@ -1688,12 +1616,12 @@
 
 
 void Builtins::Generate_InterruptCheck(MacroAssembler* masm) {
-  masm->TailCallRuntime(Runtime::kHiddenInterrupt, 0, 1);
+  masm->TailCallRuntime(Runtime::kInterrupt, 0, 1);
 }
 
 
 void Builtins::Generate_StackCheck(MacroAssembler* masm) {
-  masm->TailCallRuntime(Runtime::kHiddenStackGuard, 0, 1);
+  masm->TailCallRuntime(Runtime::kStackGuard, 0, 1);
 }
 
 
diff --git a/src/builtins.h b/src/builtins.h
index a2ed12d..c1ed91d 100644
--- a/src/builtins.h
+++ b/src/builtins.h
@@ -63,105 +63,65 @@
   V(GeneratorPoisonPill, NO_EXTRA_ARGUMENTS)
 
 // Define list of builtins implemented in assembly.
-#define BUILTIN_LIST_A(V)                                               \
-  V(ArgumentsAdaptorTrampoline,     BUILTIN, UNINITIALIZED,             \
-                                    kNoExtraICState)                    \
-  V(InOptimizationQueue,            BUILTIN, UNINITIALIZED,             \
-                                    kNoExtraICState)                    \
-  V(JSConstructStubGeneric,         BUILTIN, UNINITIALIZED,             \
-                                    kNoExtraICState)                    \
-  V(JSConstructStubApi,             BUILTIN, UNINITIALIZED,             \
-                                    kNoExtraICState)                    \
-  V(JSEntryTrampoline,              BUILTIN, UNINITIALIZED,             \
-                                    kNoExtraICState)                    \
-  V(JSConstructEntryTrampoline,     BUILTIN, UNINITIALIZED,             \
-                                    kNoExtraICState)                    \
-  V(CompileUnoptimized,             BUILTIN, UNINITIALIZED,             \
-                                    kNoExtraICState)                    \
-  V(CompileOptimized,               BUILTIN, UNINITIALIZED,             \
-                                    kNoExtraICState)                    \
-  V(CompileOptimizedConcurrent,     BUILTIN, UNINITIALIZED,             \
-                                    kNoExtraICState)                    \
-  V(NotifyDeoptimized,              BUILTIN, UNINITIALIZED,             \
-                                    kNoExtraICState)                    \
-  V(NotifySoftDeoptimized,          BUILTIN, UNINITIALIZED,             \
-                                    kNoExtraICState)                    \
-  V(NotifyLazyDeoptimized,          BUILTIN, UNINITIALIZED,             \
-                                    kNoExtraICState)                    \
-  V(NotifyStubFailure,              BUILTIN, UNINITIALIZED,             \
-                                    kNoExtraICState)                    \
-  V(NotifyStubFailureSaveDoubles,   BUILTIN, UNINITIALIZED,             \
-                                    kNoExtraICState)                    \
-                                                                        \
-  V(LoadIC_Miss,                    BUILTIN, UNINITIALIZED,             \
-                                    kNoExtraICState)                    \
-  V(KeyedLoadIC_Miss,               BUILTIN, UNINITIALIZED,             \
-                                    kNoExtraICState)                    \
-  V(StoreIC_Miss,                   BUILTIN, UNINITIALIZED,             \
-                                    kNoExtraICState)                    \
-  V(KeyedStoreIC_Miss,              BUILTIN, UNINITIALIZED,             \
-                                    kNoExtraICState)                    \
-  V(LoadIC_Getter_ForDeopt,         LOAD_IC, MONOMORPHIC,               \
-                                    kNoExtraICState)                    \
-  V(KeyedLoadIC_Initialize,         KEYED_LOAD_IC, UNINITIALIZED,       \
-                                    kNoExtraICState)                    \
-  V(KeyedLoadIC_PreMonomorphic,     KEYED_LOAD_IC, PREMONOMORPHIC,      \
-                                    kNoExtraICState)                    \
-  V(KeyedLoadIC_Generic,            KEYED_LOAD_IC, GENERIC,             \
-                                    kNoExtraICState)                    \
-  V(KeyedLoadIC_String,             KEYED_LOAD_IC, MEGAMORPHIC,         \
-                                    kNoExtraICState)                    \
-  V(KeyedLoadIC_IndexedInterceptor, KEYED_LOAD_IC, MONOMORPHIC,         \
-                                    kNoExtraICState)                    \
-  V(KeyedLoadIC_SloppyArguments,    KEYED_LOAD_IC, MONOMORPHIC,         \
-                                    kNoExtraICState)                    \
-                                                                        \
-  V(StoreIC_Setter_ForDeopt,        STORE_IC, MONOMORPHIC,              \
-                                    StoreIC::kStrictModeState)          \
-                                                                        \
-  V(KeyedStoreIC_Initialize,        KEYED_STORE_IC, UNINITIALIZED,      \
-                                    kNoExtraICState)                    \
-  V(KeyedStoreIC_PreMonomorphic,    KEYED_STORE_IC, PREMONOMORPHIC,     \
-                                    kNoExtraICState)                    \
-  V(KeyedStoreIC_Generic,           KEYED_STORE_IC, GENERIC,            \
-                                    kNoExtraICState)                    \
-                                                                        \
-  V(KeyedStoreIC_Initialize_Strict, KEYED_STORE_IC, UNINITIALIZED,      \
-                                    StoreIC::kStrictModeState)          \
-  V(KeyedStoreIC_PreMonomorphic_Strict, KEYED_STORE_IC, PREMONOMORPHIC, \
-                                    StoreIC::kStrictModeState)          \
-  V(KeyedStoreIC_Generic_Strict,    KEYED_STORE_IC, GENERIC,            \
-                                    StoreIC::kStrictModeState)          \
-  V(KeyedStoreIC_SloppyArguments,   KEYED_STORE_IC, MONOMORPHIC,        \
-                                    kNoExtraICState)                    \
-                                                                        \
-  /* Uses KeyedLoadIC_Initialize; must be after in list. */             \
-  V(FunctionCall,                   BUILTIN, UNINITIALIZED,             \
-                                    kNoExtraICState)                    \
-  V(FunctionApply,                  BUILTIN, UNINITIALIZED,             \
-                                    kNoExtraICState)                    \
-                                                                        \
-  V(InternalArrayCode,              BUILTIN, UNINITIALIZED,             \
-                                    kNoExtraICState)                    \
-  V(ArrayCode,                      BUILTIN, UNINITIALIZED,             \
-                                    kNoExtraICState)                    \
-                                                                        \
-  V(StringConstructCode,            BUILTIN, UNINITIALIZED,             \
-                                    kNoExtraICState)                    \
-                                                                        \
-  V(OnStackReplacement,             BUILTIN, UNINITIALIZED,             \
-                                    kNoExtraICState)                    \
-  V(InterruptCheck,                 BUILTIN, UNINITIALIZED,             \
-                                    kNoExtraICState)                    \
-  V(OsrAfterStackCheck,             BUILTIN, UNINITIALIZED,             \
-                                    kNoExtraICState)                    \
-  V(StackCheck,                     BUILTIN, UNINITIALIZED,             \
-                                    kNoExtraICState)                    \
-                                                                        \
-  V(MarkCodeAsExecutedOnce,         BUILTIN, UNINITIALIZED,             \
-                                    kNoExtraICState)                    \
-  V(MarkCodeAsExecutedTwice,        BUILTIN, UNINITIALIZED,             \
-                                    kNoExtraICState)                    \
+#define BUILTIN_LIST_A(V)                                                      \
+  V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState)       \
+  V(InOptimizationQueue, BUILTIN, UNINITIALIZED, kNoExtraICState)              \
+  V(JSConstructStubGeneric, BUILTIN, UNINITIALIZED, kNoExtraICState)           \
+  V(JSConstructStubApi, BUILTIN, UNINITIALIZED, kNoExtraICState)               \
+  V(JSEntryTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState)                \
+  V(JSConstructEntryTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState)       \
+  V(CompileLazy, BUILTIN, UNINITIALIZED, kNoExtraICState)                      \
+  V(CompileOptimized, BUILTIN, UNINITIALIZED, kNoExtraICState)                 \
+  V(CompileOptimizedConcurrent, BUILTIN, UNINITIALIZED, kNoExtraICState)       \
+  V(NotifyDeoptimized, BUILTIN, UNINITIALIZED, kNoExtraICState)                \
+  V(NotifySoftDeoptimized, BUILTIN, UNINITIALIZED, kNoExtraICState)            \
+  V(NotifyLazyDeoptimized, BUILTIN, UNINITIALIZED, kNoExtraICState)            \
+  V(NotifyStubFailure, BUILTIN, UNINITIALIZED, kNoExtraICState)                \
+  V(NotifyStubFailureSaveDoubles, BUILTIN, UNINITIALIZED, kNoExtraICState)     \
+                                                                               \
+  V(LoadIC_Miss, BUILTIN, UNINITIALIZED, kNoExtraICState)                      \
+  V(KeyedLoadIC_Miss, BUILTIN, UNINITIALIZED, kNoExtraICState)                 \
+  V(StoreIC_Miss, BUILTIN, UNINITIALIZED, kNoExtraICState)                     \
+  V(KeyedStoreIC_Miss, BUILTIN, UNINITIALIZED, kNoExtraICState)                \
+  V(LoadIC_Getter_ForDeopt, LOAD_IC, MONOMORPHIC, kNoExtraICState)             \
+  V(KeyedLoadIC_Initialize, KEYED_LOAD_IC, UNINITIALIZED, kNoExtraICState)     \
+  V(KeyedLoadIC_PreMonomorphic, KEYED_LOAD_IC, PREMONOMORPHIC,                 \
+    kNoExtraICState)                                                           \
+  V(KeyedLoadIC_Generic, KEYED_LOAD_IC, GENERIC, kNoExtraICState)              \
+  V(KeyedLoadIC_String, KEYED_LOAD_IC, MEGAMORPHIC, kNoExtraICState)           \
+                                                                               \
+  V(StoreIC_Setter_ForDeopt, STORE_IC, MONOMORPHIC, StoreIC::kStrictModeState) \
+                                                                               \
+  V(KeyedStoreIC_Initialize, KEYED_STORE_IC, UNINITIALIZED, kNoExtraICState)   \
+  V(KeyedStoreIC_PreMonomorphic, KEYED_STORE_IC, PREMONOMORPHIC,               \
+    kNoExtraICState)                                                           \
+  V(KeyedStoreIC_Generic, KEYED_STORE_IC, GENERIC, kNoExtraICState)            \
+                                                                               \
+  V(KeyedStoreIC_Initialize_Strict, KEYED_STORE_IC, UNINITIALIZED,             \
+    StoreIC::kStrictModeState)                                                 \
+  V(KeyedStoreIC_PreMonomorphic_Strict, KEYED_STORE_IC, PREMONOMORPHIC,        \
+    StoreIC::kStrictModeState)                                                 \
+  V(KeyedStoreIC_Generic_Strict, KEYED_STORE_IC, GENERIC,                      \
+    StoreIC::kStrictModeState)                                                 \
+  V(KeyedStoreIC_SloppyArguments, KEYED_STORE_IC, MONOMORPHIC,                 \
+    kNoExtraICState)                                                           \
+                                                                               \
+  /* Uses KeyedLoadIC_Initialize; must be after in list. */                    \
+  V(FunctionCall, BUILTIN, UNINITIALIZED, kNoExtraICState)                     \
+  V(FunctionApply, BUILTIN, UNINITIALIZED, kNoExtraICState)                    \
+                                                                               \
+  V(InternalArrayCode, BUILTIN, UNINITIALIZED, kNoExtraICState)                \
+  V(ArrayCode, BUILTIN, UNINITIALIZED, kNoExtraICState)                        \
+                                                                               \
+  V(StringConstructCode, BUILTIN, UNINITIALIZED, kNoExtraICState)              \
+                                                                               \
+  V(OnStackReplacement, BUILTIN, UNINITIALIZED, kNoExtraICState)               \
+  V(InterruptCheck, BUILTIN, UNINITIALIZED, kNoExtraICState)                   \
+  V(OsrAfterStackCheck, BUILTIN, UNINITIALIZED, kNoExtraICState)               \
+  V(StackCheck, BUILTIN, UNINITIALIZED, kNoExtraICState)                       \
+                                                                               \
+  V(MarkCodeAsExecutedOnce, BUILTIN, UNINITIALIZED, kNoExtraICState)           \
+  V(MarkCodeAsExecutedTwice, BUILTIN, UNINITIALIZED, kNoExtraICState)          \
   CODE_AGE_LIST_WITH_ARG(DECLARE_CODE_AGE_BUILTIN, V)
 
 // Define list of builtin handlers implemented in assembly.
@@ -307,8 +267,8 @@
 
   static const char* GetName(JavaScript id) { return javascript_names_[id]; }
   const char* name(int index) {
-    ASSERT(index >= 0);
-    ASSERT(index < builtin_count);
+    DCHECK(index >= 0);
+    DCHECK(index < builtin_count);
     return names_[index];
   }
   static int GetArgumentsCount(JavaScript id) { return javascript_argc_[id]; }
@@ -334,7 +294,7 @@
   static void Generate_Adaptor(MacroAssembler* masm,
                                CFunctionId id,
                                BuiltinExtraArguments extra_args);
-  static void Generate_CompileUnoptimized(MacroAssembler* masm);
+  static void Generate_CompileLazy(MacroAssembler* masm);
   static void Generate_InOptimizationQueue(MacroAssembler* masm);
   static void Generate_CompileOptimized(MacroAssembler* masm);
   static void Generate_CompileOptimizedConcurrent(MacroAssembler* masm);
diff --git a/src/cached-powers.cc b/src/cached-powers.cc
index f572087..dd9e3b4 100644
--- a/src/cached-powers.cc
+++ b/src/cached-powers.cc
@@ -2,14 +2,14 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include <stdarg.h>
 #include <limits.h>
+#include <stdarg.h>
 #include <cmath>
 
 #include "include/v8stdint.h"
-#include "src/globals.h"
-#include "src/checks.h"
+#include "src/base/logging.h"
 #include "src/cached-powers.h"
+#include "src/globals.h"
 
 namespace v8 {
 namespace internal {
@@ -111,7 +111,7 @@
 };
 
 #ifdef DEBUG
-static const int kCachedPowersLength = ARRAY_SIZE(kCachedPowers);
+static const int kCachedPowersLength = arraysize(kCachedPowers);
 #endif
 
 static const int kCachedPowersOffset = 348;  // -1 * the first decimal_exponent.
@@ -133,10 +133,10 @@
   int foo = kCachedPowersOffset;
   int index =
       (foo + static_cast<int>(k) - 1) / kDecimalExponentDistance + 1;
-  ASSERT(0 <= index && index < kCachedPowersLength);
+  DCHECK(0 <= index && index < kCachedPowersLength);
   CachedPower cached_power = kCachedPowers[index];
-  ASSERT(min_exponent <= cached_power.binary_exponent);
-  ASSERT(cached_power.binary_exponent <= max_exponent);
+  DCHECK(min_exponent <= cached_power.binary_exponent);
+  DCHECK(cached_power.binary_exponent <= max_exponent);
   *decimal_exponent = cached_power.decimal_exponent;
   *power = DiyFp(cached_power.significand, cached_power.binary_exponent);
 }
@@ -145,15 +145,15 @@
 void PowersOfTenCache::GetCachedPowerForDecimalExponent(int requested_exponent,
                                                         DiyFp* power,
                                                         int* found_exponent) {
-  ASSERT(kMinDecimalExponent <= requested_exponent);
-  ASSERT(requested_exponent < kMaxDecimalExponent + kDecimalExponentDistance);
+  DCHECK(kMinDecimalExponent <= requested_exponent);
+  DCHECK(requested_exponent < kMaxDecimalExponent + kDecimalExponentDistance);
   int index =
       (requested_exponent + kCachedPowersOffset) / kDecimalExponentDistance;
   CachedPower cached_power = kCachedPowers[index];
   *power = DiyFp(cached_power.significand, cached_power.binary_exponent);
   *found_exponent = cached_power.decimal_exponent;
-  ASSERT(*found_exponent <= requested_exponent);
-  ASSERT(requested_exponent < *found_exponent + kDecimalExponentDistance);
+  DCHECK(*found_exponent <= requested_exponent);
+  DCHECK(requested_exponent < *found_exponent + kDecimalExponentDistance);
 }
 
 } }  // namespace v8::internal
diff --git a/src/cached-powers.h b/src/cached-powers.h
index dc681bf..bfe3635 100644
--- a/src/cached-powers.h
+++ b/src/cached-powers.h
@@ -5,6 +5,7 @@
 #ifndef V8_CACHED_POWERS_H_
 #define V8_CACHED_POWERS_H_
 
+#include "src/base/logging.h"
 #include "src/diy-fp.h"
 
 namespace v8 {
diff --git a/src/char-predicates-inl.h b/src/char-predicates-inl.h
index 19d96db..71d1b06 100644
--- a/src/char-predicates-inl.h
+++ b/src/char-predicates-inl.h
@@ -30,7 +30,7 @@
 
 
 inline bool IsInRange(int value, int lower_limit, int higher_limit) {
-  ASSERT(lower_limit <= higher_limit);
+  DCHECK(lower_limit <= higher_limit);
   return static_cast<unsigned int>(value - lower_limit) <=
       static_cast<unsigned int>(higher_limit - lower_limit);
 }
diff --git a/src/checks.cc b/src/checks.cc
index 23c9305..e5a4caa 100644
--- a/src/checks.cc
+++ b/src/checks.cc
@@ -4,15 +4,6 @@
 
 #include "src/checks.h"
 
-#if V8_LIBC_GLIBC || V8_OS_BSD
-# include <cxxabi.h>
-# include <execinfo.h>
-#elif V8_OS_QNX
-# include <backtrace.h>
-#endif  // V8_LIBC_GLIBC || V8_OS_BSD
-#include <stdio.h>
-
-#include "src/platform.h"
 #include "src/v8.h"
 
 namespace v8 {
@@ -20,72 +11,50 @@
 
 intptr_t HeapObjectTagMask() { return kHeapObjectTagMask; }
 
-// Attempts to dump a backtrace (if supported).
-void DumpBacktrace() {
-#if V8_LIBC_GLIBC || V8_OS_BSD
-  void* trace[100];
-  int size = backtrace(trace, ARRAY_SIZE(trace));
-  char** symbols = backtrace_symbols(trace, size);
-  OS::PrintError("\n==== C stack trace ===============================\n\n");
-  if (size == 0) {
-    OS::PrintError("(empty)\n");
-  } else if (symbols == NULL) {
-    OS::PrintError("(no symbols)\n");
-  } else {
-    for (int i = 1; i < size; ++i) {
-      OS::PrintError("%2d: ", i);
-      char mangled[201];
-      if (sscanf(symbols[i], "%*[^(]%*[(]%200[^)+]", mangled) == 1) {  // NOLINT
-        int status;
-        size_t length;
-        char* demangled = abi::__cxa_demangle(mangled, NULL, &length, &status);
-        OS::PrintError("%s\n", demangled != NULL ? demangled : mangled);
-        free(demangled);
-      } else {
-        OS::PrintError("??\n");
-      }
-    }
-  }
-  free(symbols);
-#elif V8_OS_QNX
-  char out[1024];
-  bt_accessor_t acc;
-  bt_memmap_t memmap;
-  bt_init_accessor(&acc, BT_SELF);
-  bt_load_memmap(&acc, &memmap);
-  bt_sprn_memmap(&memmap, out, sizeof(out));
-  OS::PrintError(out);
-  bt_addr_t trace[100];
-  int size = bt_get_backtrace(&acc, trace, ARRAY_SIZE(trace));
-  OS::PrintError("\n==== C stack trace ===============================\n\n");
-  if (size == 0) {
-    OS::PrintError("(empty)\n");
-  } else {
-    bt_sprnf_addrs(&memmap, trace, size, const_cast<char*>("%a\n"),
-                   out, sizeof(out), NULL);
-    OS::PrintError(out);
-  }
-  bt_unload_memmap(&memmap);
-  bt_release_accessor(&acc);
-#endif  // V8_LIBC_GLIBC || V8_OS_BSD
-}
-
 } }  // namespace v8::internal
 
 
-// Contains protection against recursive calls (faults while handling faults).
-extern "C" void V8_Fatal(const char* file, int line, const char* format, ...) {
-  fflush(stdout);
-  fflush(stderr);
-  i::OS::PrintError("\n\n#\n# Fatal error in %s, line %d\n# ", file, line);
-  va_list arguments;
-  va_start(arguments, format);
-  i::OS::VPrintError(format, arguments);
-  va_end(arguments);
-  i::OS::PrintError("\n#\n");
-  v8::internal::DumpBacktrace();
-  fflush(stderr);
-  i::OS::Abort();
+static bool CheckEqualsStrict(volatile double* exp, volatile double* val) {
+  v8::internal::DoubleRepresentation exp_rep(*exp);
+  v8::internal::DoubleRepresentation val_rep(*val);
+  if (std::isnan(exp_rep.value) && std::isnan(val_rep.value)) return true;
+  return exp_rep.bits == val_rep.bits;
+}
+
+
+void CheckEqualsHelper(const char* file, int line, const char* expected_source,
+                       double expected, const char* value_source,
+                       double value) {
+  // Force values to 64 bit memory to truncate 80 bit precision on IA32.
+  volatile double* exp = new double[1];
+  *exp = expected;
+  volatile double* val = new double[1];
+  *val = value;
+  if (!CheckEqualsStrict(exp, val)) {
+    V8_Fatal(file, line,
+             "CHECK_EQ(%s, %s) failed\n#   Expected: %f\n#   Found: %f",
+             expected_source, value_source, *exp, *val);
+  }
+  delete[] exp;
+  delete[] val;
+}
+
+
+void CheckNonEqualsHelper(const char* file, int line,
+                          const char* expected_source, double expected,
+                          const char* value_source, double value) {
+  // Force values to 64 bit memory to truncate 80 bit precision on IA32.
+  volatile double* exp = new double[1];
+  *exp = expected;
+  volatile double* val = new double[1];
+  *val = value;
+  if (CheckEqualsStrict(exp, val)) {
+    V8_Fatal(file, line,
+             "CHECK_EQ(%s, %s) failed\n#   Expected: %f\n#   Found: %f",
+             expected_source, value_source, *exp, *val);
+  }
+  delete[] exp;
+  delete[] val;
 }
 
 
diff --git a/src/checks.h b/src/checks.h
index dd7a395..6303855 100644
--- a/src/checks.h
+++ b/src/checks.h
@@ -5,295 +5,58 @@
 #ifndef V8_CHECKS_H_
 #define V8_CHECKS_H_
 
-#include <string.h>
-
-#include "include/v8stdint.h"
-#include "src/base/build_config.h"
-
-extern "C" void V8_Fatal(const char* file, int line, const char* format, ...);
-
-
-// The FATAL, UNREACHABLE and UNIMPLEMENTED macros are useful during
-// development, but they should not be relied on in the final product.
-#ifdef DEBUG
-#define FATAL(msg)                              \
-  V8_Fatal(__FILE__, __LINE__, "%s", (msg))
-#define UNIMPLEMENTED()                         \
-  V8_Fatal(__FILE__, __LINE__, "unimplemented code")
-#define UNREACHABLE()                           \
-  V8_Fatal(__FILE__, __LINE__, "unreachable code")
-#else
-#define FATAL(msg)                              \
-  V8_Fatal("", 0, "%s", (msg))
-#define UNIMPLEMENTED()                         \
-  V8_Fatal("", 0, "unimplemented code")
-#define UNREACHABLE() ((void) 0)
-#endif
-
-// Simulator specific helpers.
-// We can't use USE_SIMULATOR here because it isn't defined yet.
-#if V8_TARGET_ARCH_ARM64 && !V8_HOST_ARCH_ARM64
-  // TODO(all): If possible automatically prepend an indicator like
-  // UNIMPLEMENTED or LOCATION.
-  #define ASM_UNIMPLEMENTED(message)                                         \
-  __ Debug(message, __LINE__, NO_PARAM)
-  #define ASM_UNIMPLEMENTED_BREAK(message)                                   \
-  __ Debug(message, __LINE__,                                                \
-           FLAG_ignore_asm_unimplemented_break ? NO_PARAM : BREAK)
-  #define ASM_LOCATION(message)                                              \
-  __ Debug("LOCATION: " message, __LINE__, NO_PARAM)
-#else
-  #define ASM_UNIMPLEMENTED(message)
-  #define ASM_UNIMPLEMENTED_BREAK(message)
-  #define ASM_LOCATION(message)
-#endif
-
-
-// The CHECK macro checks that the given condition is true; if not, it
-// prints a message to stderr and aborts.
-#define CHECK(condition) do {                                       \
-    if (!(condition)) {                                             \
-      V8_Fatal(__FILE__, __LINE__, "CHECK(%s) failed", #condition); \
-    }                                                               \
-  } while (0)
-
-
-// Helper function used by the CHECK_EQ function when given int
-// arguments.  Should not be called directly.
-inline void CheckEqualsHelper(const char* file, int line,
-                              const char* expected_source, int expected,
-                              const char* value_source, int value) {
-  if (expected != value) {
-    V8_Fatal(file, line,
-             "CHECK_EQ(%s, %s) failed\n#   Expected: %i\n#   Found: %i",
-             expected_source, value_source, expected, value);
-  }
-}
-
-
-// Helper function used by the CHECK_EQ function when given int64_t
-// arguments.  Should not be called directly.
-inline void CheckEqualsHelper(const char* file, int line,
-                              const char* expected_source,
-                              int64_t expected,
-                              const char* value_source,
-                              int64_t value) {
-  if (expected != value) {
-    // Print int64_t values in hex, as two int32s,
-    // to avoid platform-dependencies.
-    V8_Fatal(file, line,
-             "CHECK_EQ(%s, %s) failed\n#"
-             "   Expected: 0x%08x%08x\n#   Found: 0x%08x%08x",
-             expected_source, value_source,
-             static_cast<uint32_t>(expected >> 32),
-             static_cast<uint32_t>(expected),
-             static_cast<uint32_t>(value >> 32),
-             static_cast<uint32_t>(value));
-  }
-}
-
-
-// Helper function used by the CHECK_NE function when given int
-// arguments.  Should not be called directly.
-inline void CheckNonEqualsHelper(const char* file,
-                                 int line,
-                                 const char* unexpected_source,
-                                 int unexpected,
-                                 const char* value_source,
-                                 int value) {
-  if (unexpected == value) {
-    V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n#   Value: %i",
-             unexpected_source, value_source, value);
-  }
-}
-
-
-// Helper function used by the CHECK function when given string
-// arguments.  Should not be called directly.
-inline void CheckEqualsHelper(const char* file,
-                              int line,
-                              const char* expected_source,
-                              const char* expected,
-                              const char* value_source,
-                              const char* value) {
-  if ((expected == NULL && value != NULL) ||
-      (expected != NULL && value == NULL) ||
-      (expected != NULL && value != NULL && strcmp(expected, value) != 0)) {
-    V8_Fatal(file, line,
-             "CHECK_EQ(%s, %s) failed\n#   Expected: %s\n#   Found: %s",
-             expected_source, value_source, expected, value);
-  }
-}
-
-
-inline void CheckNonEqualsHelper(const char* file,
-                                 int line,
-                                 const char* expected_source,
-                                 const char* expected,
-                                 const char* value_source,
-                                 const char* value) {
-  if (expected == value ||
-      (expected != NULL && value != NULL && strcmp(expected, value) == 0)) {
-    V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n#   Value: %s",
-             expected_source, value_source, value);
-  }
-}
-
-
-// Helper function used by the CHECK function when given pointer
-// arguments.  Should not be called directly.
-inline void CheckEqualsHelper(const char* file,
-                              int line,
-                              const char* expected_source,
-                              const void* expected,
-                              const char* value_source,
-                              const void* value) {
-  if (expected != value) {
-    V8_Fatal(file, line,
-             "CHECK_EQ(%s, %s) failed\n#   Expected: %p\n#   Found: %p",
-             expected_source, value_source,
-             expected, value);
-  }
-}
-
-
-inline void CheckNonEqualsHelper(const char* file,
-                                 int line,
-                                 const char* expected_source,
-                                 const void* expected,
-                                 const char* value_source,
-                                 const void* value) {
-  if (expected == value) {
-    V8_Fatal(file, line, "CHECK_NE(%s, %s) failed\n#   Value: %p",
-             expected_source, value_source, value);
-  }
-}
-
-
-// Helper function used by the CHECK function when given floating
-// point arguments.  Should not be called directly.
-inline void CheckEqualsHelper(const char* file,
-                              int line,
-                              const char* expected_source,
-                              double expected,
-                              const char* value_source,
-                              double value) {
-  // Force values to 64 bit memory to truncate 80 bit precision on IA32.
-  volatile double* exp = new double[1];
-  *exp = expected;
-  volatile double* val = new double[1];
-  *val = value;
-  if (*exp != *val) {
-    V8_Fatal(file, line,
-             "CHECK_EQ(%s, %s) failed\n#   Expected: %f\n#   Found: %f",
-             expected_source, value_source, *exp, *val);
-  }
-  delete[] exp;
-  delete[] val;
-}
-
-
-inline void CheckNonEqualsHelper(const char* file,
-                              int line,
-                              const char* expected_source,
-                              int64_t expected,
-                              const char* value_source,
-                              int64_t value) {
-  if (expected == value) {
-    V8_Fatal(file, line,
-             "CHECK_EQ(%s, %s) failed\n#   Expected: %f\n#   Found: %f",
-             expected_source, value_source, expected, value);
-  }
-}
-
-
-inline void CheckNonEqualsHelper(const char* file,
-                                 int line,
-                                 const char* expected_source,
-                                 double expected,
-                                 const char* value_source,
-                                 double value) {
-  // Force values to 64 bit memory to truncate 80 bit precision on IA32.
-  volatile double* exp = new double[1];
-  *exp = expected;
-  volatile double* val = new double[1];
-  *val = value;
-  if (*exp == *val) {
-    V8_Fatal(file, line,
-             "CHECK_NE(%s, %s) failed\n#   Value: %f",
-             expected_source, value_source, *val);
-  }
-  delete[] exp;
-  delete[] val;
-}
-
-
-#define CHECK_EQ(expected, value) CheckEqualsHelper(__FILE__, __LINE__, \
-  #expected, expected, #value, value)
-
-
-#define CHECK_NE(unexpected, value) CheckNonEqualsHelper(__FILE__, __LINE__, \
-  #unexpected, unexpected, #value, value)
-
-
-#define CHECK_GT(a, b) CHECK((a) > (b))
-#define CHECK_GE(a, b) CHECK((a) >= (b))
-#define CHECK_LT(a, b) CHECK((a) < (b))
-#define CHECK_LE(a, b) CHECK((a) <= (b))
-
+#include "src/base/logging.h"
 
 #ifdef DEBUG
 #ifndef OPTIMIZED_DEBUG
-#define ENABLE_SLOW_ASSERTS    1
+#define ENABLE_SLOW_DCHECKS    1
 #endif
 #endif
 
 namespace v8 {
+
+class Value;
+template <class T> class Handle;
+
 namespace internal {
-#ifdef ENABLE_SLOW_ASSERTS
-#define SLOW_ASSERT(condition) \
+
+intptr_t HeapObjectTagMask();
+
+#ifdef ENABLE_SLOW_DCHECKS
+#define SLOW_DCHECK(condition) \
   CHECK(!v8::internal::FLAG_enable_slow_asserts || (condition))
 extern bool FLAG_enable_slow_asserts;
 #else
-#define SLOW_ASSERT(condition) ((void) 0)
+#define SLOW_DCHECK(condition) ((void) 0)
 const bool FLAG_enable_slow_asserts = false;
 #endif
 
-// Exposed for making debugging easier (to see where your function is being
-// called, just add a call to DumpBacktrace).
-void DumpBacktrace();
-
 } }  // namespace v8::internal
 
 
-// The ASSERT macro is equivalent to CHECK except that it only
-// generates code in debug builds.
-#ifdef DEBUG
-#define ASSERT_RESULT(expr)    CHECK(expr)
-#define ASSERT(condition)      CHECK(condition)
-#define ASSERT_EQ(v1, v2)      CHECK_EQ(v1, v2)
-#define ASSERT_NE(v1, v2)      CHECK_NE(v1, v2)
-#define ASSERT_GE(v1, v2)      CHECK_GE(v1, v2)
-#define ASSERT_LT(v1, v2)      CHECK_LT(v1, v2)
-#define ASSERT_LE(v1, v2)      CHECK_LE(v1, v2)
-#else
-#define ASSERT_RESULT(expr)    (expr)
-#define ASSERT(condition)      ((void) 0)
-#define ASSERT_EQ(v1, v2)      ((void) 0)
-#define ASSERT_NE(v1, v2)      ((void) 0)
-#define ASSERT_GE(v1, v2)      ((void) 0)
-#define ASSERT_LT(v1, v2)      ((void) 0)
-#define ASSERT_LE(v1, v2)      ((void) 0)
-#endif
+void CheckNonEqualsHelper(const char* file, int line,
+                          const char* expected_source, double expected,
+                          const char* value_source, double value);
 
-#define ASSERT_NOT_NULL(p)  ASSERT_NE(NULL, p)
+void CheckEqualsHelper(const char* file, int line, const char* expected_source,
+                       double expected, const char* value_source, double value);
 
-// "Extra checks" are lightweight checks that are enabled in some release
-// builds.
-#ifdef ENABLE_EXTRA_CHECKS
-#define EXTRA_CHECK(condition) CHECK(condition)
-#else
-#define EXTRA_CHECK(condition) ((void) 0)
-#endif
+void CheckNonEqualsHelper(const char* file, int line,
+                          const char* unexpected_source,
+                          v8::Handle<v8::Value> unexpected,
+                          const char* value_source,
+                          v8::Handle<v8::Value> value);
+
+void CheckEqualsHelper(const char* file,
+                       int line,
+                       const char* expected_source,
+                       v8::Handle<v8::Value> expected,
+                       const char* value_source,
+                       v8::Handle<v8::Value> value);
+
+#define DCHECK_TAG_ALIGNED(address) \
+  DCHECK((reinterpret_cast<intptr_t>(address) & HeapObjectTagMask()) == 0)
+
+#define DCHECK_SIZE_TAG_ALIGNED(size) DCHECK((size & HeapObjectTagMask()) == 0)
 
 #endif  // V8_CHECKS_H_
diff --git a/src/code-factory.cc b/src/code-factory.cc
new file mode 100644
index 0000000..c969c8f
--- /dev/null
+++ b/src/code-factory.cc
@@ -0,0 +1,92 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/bootstrapper.h"
+#include "src/code-factory.h"
+#include "src/ic/ic.h"
+
+namespace v8 {
+namespace internal {
+
+// static
+Callable CodeFactory::LoadIC(Isolate* isolate, ContextualMode mode) {
+  return Callable(
+      LoadIC::initialize_stub(isolate, LoadICState(mode).GetExtraICState()),
+      LoadDescriptor(isolate));
+}
+
+
+// static
+Callable CodeFactory::KeyedLoadIC(Isolate* isolate) {
+  return Callable(isolate->builtins()->KeyedLoadIC_Initialize(),
+                  LoadDescriptor(isolate));
+}
+
+
+// static
+Callable CodeFactory::StoreIC(Isolate* isolate, StrictMode mode) {
+  return Callable(StoreIC::initialize_stub(isolate, mode),
+                  StoreDescriptor(isolate));
+}
+
+
+// static
+Callable CodeFactory::KeyedStoreIC(Isolate* isolate, StrictMode mode) {
+  Handle<Code> ic = mode == SLOPPY
+                        ? isolate->builtins()->KeyedStoreIC_Initialize()
+                        : isolate->builtins()->KeyedStoreIC_Initialize_Strict();
+  return Callable(ic, StoreDescriptor(isolate));
+}
+
+
+// static
+Callable CodeFactory::CompareIC(Isolate* isolate, Token::Value op) {
+  Handle<Code> code = CompareIC::GetUninitialized(isolate, op);
+  return Callable(code, BinaryOpDescriptor(isolate));
+}
+
+
+// static
+Callable CodeFactory::BinaryOpIC(Isolate* isolate, Token::Value op,
+                                 OverwriteMode mode) {
+  BinaryOpICStub stub(isolate, op, mode);
+  return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+
+// static
+Callable CodeFactory::ToBoolean(Isolate* isolate,
+                                ToBooleanStub::ResultMode mode,
+                                ToBooleanStub::Types types) {
+  ToBooleanStub stub(isolate, mode, types);
+  return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+
+// static
+Callable CodeFactory::ToNumber(Isolate* isolate) {
+  ToNumberStub stub(isolate);
+  return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+
+// static
+Callable CodeFactory::StringAdd(Isolate* isolate, StringAddFlags flags,
+                                PretenureFlag pretenure_flag) {
+  StringAddStub stub(isolate, flags, pretenure_flag);
+  return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+
+// static
+Callable CodeFactory::CallFunction(Isolate* isolate, int argc,
+                                   CallFunctionFlags flags) {
+  CallFunctionStub stub(isolate, argc, flags);
+  return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/code-factory.h b/src/code-factory.h
new file mode 100644
index 0000000..3add384
--- /dev/null
+++ b/src/code-factory.h
@@ -0,0 +1,61 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CODE_FACTORY_H_
+#define V8_CODE_FACTORY_H_
+
+#include "src/allocation.h"
+#include "src/assembler.h"
+#include "src/codegen.h"
+#include "src/globals.h"
+#include "src/interface-descriptors.h"
+
+namespace v8 {
+namespace internal {
+
+// Associates a body of code with an interface descriptor.
+class Callable FINAL BASE_EMBEDDED {
+ public:
+  Callable(Handle<Code> code, CallInterfaceDescriptor descriptor)
+      : code_(code), descriptor_(descriptor) {}
+
+  Handle<Code> code() const { return code_; }
+  CallInterfaceDescriptor descriptor() const { return descriptor_; }
+
+ private:
+  const Handle<Code> code_;
+  const CallInterfaceDescriptor descriptor_;
+};
+
+
+class CodeFactory FINAL {
+ public:
+  // Initial states for ICs.
+  static Callable LoadIC(Isolate* isolate, ContextualMode mode);
+  static Callable KeyedLoadIC(Isolate* isolate);
+  static Callable StoreIC(Isolate* isolate, StrictMode mode);
+  static Callable KeyedStoreIC(Isolate* isolate, StrictMode mode);
+
+  static Callable CompareIC(Isolate* isolate, Token::Value op);
+
+  static Callable BinaryOpIC(Isolate* isolate, Token::Value op,
+                             OverwriteMode mode = NO_OVERWRITE);
+
+  // Code stubs. Add methods here as needed to reduce dependency on
+  // code-stubs.h.
+  static Callable ToBoolean(
+      Isolate* isolate, ToBooleanStub::ResultMode mode,
+      ToBooleanStub::Types types = ToBooleanStub::Types());
+
+  static Callable ToNumber(Isolate* isolate);
+
+  static Callable StringAdd(Isolate* isolate, StringAddFlags flags,
+                            PretenureFlag pretenure_flag);
+
+  static Callable CallFunction(Isolate* isolate, int argc,
+                               CallFunctionFlags flags);
+};
+}
+}
+#endif  // V8_CODE_FACTORY_H_
diff --git a/src/code-stubs-hydrogen.cc b/src/code-stubs-hydrogen.cc
index 8342f9f..dafef52 100644
--- a/src/code-stubs-hydrogen.cc
+++ b/src/code-stubs-hydrogen.cc
@@ -4,6 +4,7 @@
 
 #include "src/v8.h"
 
+#include "src/bailout-reason.h"
 #include "src/code-stubs.h"
 #include "src/field-index.h"
 #include "src/hydrogen.h"
@@ -18,7 +19,7 @@
   DisallowHandleAllocation no_handles;
   DisallowHandleDereference no_deref;
 
-  ASSERT(graph != NULL);
+  DCHECK(graph != NULL);
   BailoutReason bailout_reason = kNoReason;
   if (!graph->Optimize(&bailout_reason)) {
     FATAL(GetBailoutReason(bailout_reason));
@@ -37,21 +38,22 @@
       : HGraphBuilder(&info_),
         arguments_length_(NULL),
         info_(stub, isolate),
+        descriptor_(stub),
         context_(NULL) {
-    descriptor_ = stub->GetInterfaceDescriptor();
-    parameters_.Reset(new HParameter*[descriptor_->register_param_count_]);
+    int parameter_count = descriptor_.GetEnvironmentParameterCount();
+    parameters_.Reset(new HParameter*[parameter_count]);
   }
   virtual bool BuildGraph();
 
  protected:
   virtual HValue* BuildCodeStub() = 0;
   HParameter* GetParameter(int parameter) {
-    ASSERT(parameter < descriptor_->register_param_count_);
+    DCHECK(parameter < descriptor_.GetEnvironmentParameterCount());
     return parameters_[parameter];
   }
   HValue* GetArgumentsLength() {
     // This is initialized in BuildGraph()
-    ASSERT(arguments_length_ != NULL);
+    DCHECK(arguments_length_ != NULL);
     return arguments_length_;
   }
   CompilationInfo* info() { return &info_; }
@@ -61,6 +63,8 @@
 
   HLoadNamedField* BuildLoadNamedField(HValue* object,
                                        FieldIndex index);
+  void BuildStoreNamedField(HValue* object, HValue* value, FieldIndex index,
+                            Representation representation);
 
   enum ArgumentClass {
     NONE,
@@ -68,6 +72,8 @@
     MULTIPLE
   };
 
+  HValue* UnmappedCase(HValue* elements, HValue* key);
+
   HValue* BuildArrayConstructor(ElementsKind kind,
                                 AllocationSiteOverrideMode override_mode,
                                 ArgumentClass argument_class);
@@ -100,7 +106,7 @@
   SmartArrayPointer<HParameter*> parameters_;
   HValue* arguments_length_;
   CompilationInfoWithZone info_;
-  CodeStubInterfaceDescriptor* descriptor_;
+  CodeStubDescriptor descriptor_;
   HContext* context_;
 };
 
@@ -116,30 +122,29 @@
     isolate()->GetHTracer()->TraceCompilation(&info_);
   }
 
-  int param_count = descriptor_->register_param_count_;
+  int param_count = descriptor_.GetEnvironmentParameterCount();
   HEnvironment* start_environment = graph()->start_environment();
   HBasicBlock* next_block = CreateBasicBlock(start_environment);
   Goto(next_block);
   next_block->SetJoinId(BailoutId::StubEntry());
   set_current_block(next_block);
 
-  bool runtime_stack_params = descriptor_->stack_parameter_count_.is_valid();
+  bool runtime_stack_params = descriptor_.stack_parameter_count().is_valid();
   HInstruction* stack_parameter_count = NULL;
   for (int i = 0; i < param_count; ++i) {
-    Representation r = descriptor_->register_param_representations_ == NULL
-        ? Representation::Tagged()
-        : descriptor_->register_param_representations_[i];
-    HParameter* param = Add<HParameter>(i, HParameter::REGISTER_PARAMETER, r);
+    Representation r = descriptor_.GetEnvironmentParameterRepresentation(i);
+    HParameter* param = Add<HParameter>(i,
+                                        HParameter::REGISTER_PARAMETER, r);
     start_environment->Bind(i, param);
     parameters_[i] = param;
-    if (descriptor_->IsParameterCountRegister(i)) {
+    if (descriptor_.IsEnvironmentParameterCountRegister(i)) {
       param->set_type(HType::Smi());
       stack_parameter_count = param;
       arguments_length_ = stack_parameter_count;
     }
   }
 
-  ASSERT(!runtime_stack_params || arguments_length_ != NULL);
+  DCHECK(!runtime_stack_params || arguments_length_ != NULL);
   if (!runtime_stack_params) {
     stack_parameter_count = graph()->GetConstantMinus1();
     arguments_length_ = graph()->GetConstant0();
@@ -157,16 +162,16 @@
   // We might have extra expressions to pop from the stack in addition to the
   // arguments above.
   HInstruction* stack_pop_count = stack_parameter_count;
-  if (descriptor_->function_mode_ == JS_FUNCTION_STUB_MODE) {
+  if (descriptor_.function_mode() == JS_FUNCTION_STUB_MODE) {
     if (!stack_parameter_count->IsConstant() &&
-        descriptor_->hint_stack_parameter_count_ < 0) {
+        descriptor_.hint_stack_parameter_count() < 0) {
       HInstruction* constant_one = graph()->GetConstant1();
       stack_pop_count = AddUncasted<HAdd>(stack_parameter_count, constant_one);
       stack_pop_count->ClearFlag(HValue::kCanOverflow);
       // TODO(mvstanton): verify that stack_parameter_count+1 really fits in a
       // smi.
     } else {
-      int count = descriptor_->hint_stack_parameter_count_;
+      int count = descriptor_.hint_stack_parameter_count();
       stack_pop_count = Add<HConstant>(count);
     }
   }
@@ -214,7 +219,8 @@
 };
 
 
-Handle<Code> HydrogenCodeStub::GenerateLightweightMissCode() {
+Handle<Code> HydrogenCodeStub::GenerateLightweightMissCode(
+    ExternalReference miss) {
   Factory* factory = isolate()->factory();
 
   // Generate the new code.
@@ -227,7 +233,7 @@
     // Generate the code for the stub.
     masm.set_generating_stub(true);
     NoCurrentFrameScope scope(&masm);
-    GenerateLightweightMiss(&masm);
+    GenerateLightweightMiss(&masm, miss);
   }
 
   // Create the code object.
@@ -249,32 +255,28 @@
 template <class Stub>
 static Handle<Code> DoGenerateCode(Stub* stub) {
   Isolate* isolate = stub->isolate();
-  CodeStub::Major  major_key =
-      static_cast<HydrogenCodeStub*>(stub)->MajorKey();
-  CodeStubInterfaceDescriptor* descriptor =
-      isolate->code_stub_interface_descriptor(major_key);
-  if (descriptor->register_param_count_ < 0) {
-    stub->InitializeInterfaceDescriptor(descriptor);
-  }
+  CodeStubDescriptor descriptor(stub);
 
   // If we are uninitialized we can use a light-weight stub to enter
   // the runtime that is significantly faster than using the standard
   // stub-failure deopt mechanism.
-  if (stub->IsUninitialized() && descriptor->has_miss_handler()) {
-    ASSERT(!descriptor->stack_parameter_count_.is_valid());
-    return stub->GenerateLightweightMissCode();
+  if (stub->IsUninitialized() && descriptor.has_miss_handler()) {
+    DCHECK(!descriptor.stack_parameter_count().is_valid());
+    return stub->GenerateLightweightMissCode(descriptor.miss_handler());
   }
-  ElapsedTimer timer;
+  base::ElapsedTimer timer;
   if (FLAG_profile_hydrogen_code_stub_compilation) {
     timer.Start();
   }
   CodeStubGraphBuilder<Stub> builder(isolate, stub);
   LChunk* chunk = OptimizeGraph(builder.CreateGraph());
+  // TODO(yangguo) remove this once the code serializer handles code stubs.
+  if (FLAG_serialize_toplevel) chunk->info()->PrepareForSerializing();
   Handle<Code> code = chunk->Codegen();
   if (FLAG_profile_hydrogen_code_stub_compilation) {
-    double ms = timer.Elapsed().InMillisecondsF();
-    PrintF("[Lazy compilation of %s took %0.3f ms]\n",
-           stub->GetName().get(), ms);
+    OFStream os(stdout);
+    os << "[Lazy compilation of " << stub << " took "
+       << timer.Elapsed().InMillisecondsF() << " ms]" << endl;
   }
   return code;
 }
@@ -442,7 +444,7 @@
             boilerplate, static_cast<HValue*>(NULL), access));
   }
 
-  ASSERT(FLAG_allocation_site_pretenuring || (size == object_size));
+  DCHECK(FLAG_allocation_site_pretenuring || (size == object_size));
   if (FLAG_allocation_site_pretenuring) {
     BuildCreateAllocationMemento(
         object, Add<HConstant>(object_size), allocation_site);
@@ -537,16 +539,17 @@
 
 
 template <>
-HValue* CodeStubGraphBuilder<KeyedLoadFastElementStub>::BuildCodeStub() {
+HValue* CodeStubGraphBuilder<LoadFastElementStub>::BuildCodeStub() {
   HInstruction* load = BuildUncheckedMonomorphicElementAccess(
-      GetParameter(0), GetParameter(1), NULL,
-      casted_stub()->is_js_array(), casted_stub()->elements_kind(),
-      LOAD, NEVER_RETURN_HOLE, STANDARD_STORE);
+      GetParameter(LoadDescriptor::kReceiverIndex),
+      GetParameter(LoadDescriptor::kNameIndex), NULL,
+      casted_stub()->is_js_array(), casted_stub()->elements_kind(), LOAD,
+      NEVER_RETURN_HOLE, STANDARD_STORE);
   return load;
 }
 
 
-Handle<Code> KeyedLoadFastElementStub::GenerateCode() {
+Handle<Code> LoadFastElementStub::GenerateCode() {
   return DoGenerateCode(this);
 }
 
@@ -583,7 +586,176 @@
 }
 
 
-template<>
+template <>
+HValue* CodeStubGraphBuilder<LoadConstantStub>::BuildCodeStub() {
+  HValue* map = AddLoadMap(GetParameter(0), NULL);
+  HObjectAccess descriptors_access = HObjectAccess::ForObservableJSObjectOffset(
+      Map::kDescriptorsOffset, Representation::Tagged());
+  HValue* descriptors =
+      Add<HLoadNamedField>(map, static_cast<HValue*>(NULL), descriptors_access);
+  HObjectAccess value_access = HObjectAccess::ForObservableJSObjectOffset(
+      DescriptorArray::GetValueOffset(casted_stub()->constant_index()));
+  return Add<HLoadNamedField>(descriptors, static_cast<HValue*>(NULL),
+                              value_access);
+}
+
+
+Handle<Code> LoadConstantStub::GenerateCode() { return DoGenerateCode(this); }
+
+
+HValue* CodeStubGraphBuilderBase::UnmappedCase(HValue* elements, HValue* key) {
+  HValue* result;
+  HInstruction* backing_store = Add<HLoadKeyed>(
+      elements, graph()->GetConstant1(), static_cast<HValue*>(NULL),
+      FAST_ELEMENTS, ALLOW_RETURN_HOLE);
+  Add<HCheckMaps>(backing_store, isolate()->factory()->fixed_array_map());
+  HValue* backing_store_length =
+      Add<HLoadNamedField>(backing_store, static_cast<HValue*>(NULL),
+                           HObjectAccess::ForFixedArrayLength());
+  IfBuilder in_unmapped_range(this);
+  in_unmapped_range.If<HCompareNumericAndBranch>(key, backing_store_length,
+                                                 Token::LT);
+  in_unmapped_range.Then();
+  {
+    result = Add<HLoadKeyed>(backing_store, key, static_cast<HValue*>(NULL),
+                             FAST_HOLEY_ELEMENTS, NEVER_RETURN_HOLE);
+  }
+  in_unmapped_range.ElseDeopt("Outside of range");
+  in_unmapped_range.End();
+  return result;
+}
+
+
+template <>
+HValue* CodeStubGraphBuilder<KeyedLoadSloppyArgumentsStub>::BuildCodeStub() {
+  HValue* receiver = GetParameter(LoadDescriptor::kReceiverIndex);
+  HValue* key = GetParameter(LoadDescriptor::kNameIndex);
+
+  // Mapped arguments are actual arguments. Unmapped arguments are values added
+  // to the arguments object after it was created for the call. Mapped arguments
+  // are stored in the context at indexes given by elements[key + 2]. Unmapped
+  // arguments are stored as regular indexed properties in the arguments array,
+  // held at elements[1]. See NewSloppyArguments() in runtime.cc for a detailed
+  // look at argument object construction.
+  //
+  // The sloppy arguments elements array has a special format:
+  //
+  // 0: context
+  // 1: unmapped arguments array
+  // 2: mapped_index0,
+  // 3: mapped_index1,
+  // ...
+  //
+  // length is 2 + min(number_of_actual_arguments, number_of_formal_arguments).
+  // If key + 2 >= elements.length then attempt to look in the unmapped
+  // arguments array (given by elements[1]) and return the value at key, missing
+  // to the runtime if the unmapped arguments array is not a fixed array or if
+  // key >= unmapped_arguments_array.length.
+  //
+  // Otherwise, t = elements[key + 2]. If t is the hole, then look up the value
+  // in the unmapped arguments array, as described above. Otherwise, t is a Smi
+  // index into the context array given at elements[0]. Return the value at
+  // context[t].
+
+  key = AddUncasted<HForceRepresentation>(key, Representation::Smi());
+  IfBuilder positive_smi(this);
+  positive_smi.If<HCompareNumericAndBranch>(key, graph()->GetConstant0(),
+                                            Token::LT);
+  positive_smi.ThenDeopt("key is negative");
+  positive_smi.End();
+
+  HValue* constant_two = Add<HConstant>(2);
+  HValue* elements = AddLoadElements(receiver, static_cast<HValue*>(NULL));
+  HValue* elements_length =
+      Add<HLoadNamedField>(elements, static_cast<HValue*>(NULL),
+                           HObjectAccess::ForFixedArrayLength());
+  HValue* adjusted_length = AddUncasted<HSub>(elements_length, constant_two);
+  IfBuilder in_range(this);
+  in_range.If<HCompareNumericAndBranch>(key, adjusted_length, Token::LT);
+  in_range.Then();
+  {
+    HValue* index = AddUncasted<HAdd>(key, constant_two);
+    HInstruction* mapped_index =
+        Add<HLoadKeyed>(elements, index, static_cast<HValue*>(NULL),
+                        FAST_HOLEY_ELEMENTS, ALLOW_RETURN_HOLE);
+
+    IfBuilder is_valid(this);
+    is_valid.IfNot<HCompareObjectEqAndBranch>(mapped_index,
+                                              graph()->GetConstantHole());
+    is_valid.Then();
+    {
+      // TODO(mvstanton): I'd like to assert from this point, that if the
+      // mapped_index is not the hole that it is indeed, a smi. An unnecessary
+      // smi check is being emitted.
+      HValue* the_context =
+          Add<HLoadKeyed>(elements, graph()->GetConstant0(),
+                          static_cast<HValue*>(NULL), FAST_ELEMENTS);
+      DCHECK(Context::kHeaderSize == FixedArray::kHeaderSize);
+      HValue* result =
+          Add<HLoadKeyed>(the_context, mapped_index, static_cast<HValue*>(NULL),
+                          FAST_ELEMENTS, ALLOW_RETURN_HOLE);
+      environment()->Push(result);
+    }
+    is_valid.Else();
+    {
+      HValue* result = UnmappedCase(elements, key);
+      environment()->Push(result);
+    }
+    is_valid.End();
+  }
+  in_range.Else();
+  {
+    HValue* result = UnmappedCase(elements, key);
+    environment()->Push(result);
+  }
+  in_range.End();
+
+  return environment()->Pop();
+}
+
+
+Handle<Code> KeyedLoadSloppyArgumentsStub::GenerateCode() {
+  return DoGenerateCode(this);
+}
+
+
+void CodeStubGraphBuilderBase::BuildStoreNamedField(
+    HValue* object, HValue* value, FieldIndex index,
+    Representation representation) {
+  DCHECK(!index.is_double() || representation.IsDouble());
+  int offset = index.offset();
+  HObjectAccess access =
+      index.is_inobject()
+          ? HObjectAccess::ForObservableJSObjectOffset(offset, representation)
+          : HObjectAccess::ForBackingStoreOffset(offset, representation);
+
+  if (representation.IsDouble()) {
+    // Load the heap number.
+    object = Add<HLoadNamedField>(
+        object, static_cast<HValue*>(NULL),
+        access.WithRepresentation(Representation::Tagged()));
+    // Store the double value into it.
+    access = HObjectAccess::ForHeapNumberValue();
+  } else if (representation.IsHeapObject()) {
+    BuildCheckHeapObject(value);
+  }
+
+  Add<HStoreNamedField>(object, access, value, INITIALIZING_STORE);
+}
+
+
+template <>
+HValue* CodeStubGraphBuilder<StoreFieldStub>::BuildCodeStub() {
+  BuildStoreNamedField(GetParameter(0), GetParameter(2), casted_stub()->index(),
+                       casted_stub()->representation());
+  return GetParameter(2);
+}
+
+
+Handle<Code> StoreFieldStub::GenerateCode() { return DoGenerateCode(this); }
+
+
+template <>
 HValue* CodeStubGraphBuilder<StringLengthStub>::BuildCodeStub() {
   HValue* string = BuildLoadNamedField(GetParameter(0),
       FieldIndex::ForInObjectOffset(JSValue::kValueOffset));
@@ -598,17 +770,19 @@
 
 
 template <>
-HValue* CodeStubGraphBuilder<KeyedStoreFastElementStub>::BuildCodeStub() {
+HValue* CodeStubGraphBuilder<StoreFastElementStub>::BuildCodeStub() {
   BuildUncheckedMonomorphicElementAccess(
-      GetParameter(0), GetParameter(1), GetParameter(2),
-      casted_stub()->is_js_array(), casted_stub()->elements_kind(),
-      STORE, NEVER_RETURN_HOLE, casted_stub()->store_mode());
+      GetParameter(StoreDescriptor::kReceiverIndex),
+      GetParameter(StoreDescriptor::kNameIndex),
+      GetParameter(StoreDescriptor::kValueIndex), casted_stub()->is_js_array(),
+      casted_stub()->elements_kind(), STORE, NEVER_RETURN_HOLE,
+      casted_stub()->store_mode());
 
   return GetParameter(2);
 }
 
 
-Handle<Code> KeyedStoreFastElementStub::GenerateCode() {
+Handle<Code> StoreFastElementStub::GenerateCode() {
   return DoGenerateCode(this);
 }
 
@@ -725,7 +899,7 @@
                                                     checked_length,
                                                     fill_mode);
   HValue* elements = array_builder->GetElementsLocation();
-  ASSERT(elements != NULL);
+  DCHECK(elements != NULL);
 
   // Now populate the elements correctly.
   LoopBuilder builder(this,
@@ -850,7 +1024,7 @@
 
 template <>
 HValue* CodeStubGraphBuilder<BinaryOpICStub>::BuildCodeInitializedStub() {
-  BinaryOpIC::State state = casted_stub()->state();
+  BinaryOpICState state = casted_stub()->state();
 
   HValue* left = GetParameter(BinaryOpICStub::kLeft);
   HValue* right = GetParameter(BinaryOpICStub::kRight);
@@ -859,7 +1033,7 @@
   Type* right_type = state.GetRightType(zone());
   Type* result_type = state.GetResultType(zone());
 
-  ASSERT(!left_type->Is(Type::None()) && !right_type->Is(Type::None()) &&
+  DCHECK(!left_type->Is(Type::None()) && !right_type->Is(Type::None()) &&
          (state.HasSideEffects() || !result_type->Is(Type::None())));
 
   HValue* result = NULL;
@@ -949,7 +1123,7 @@
 
 template <>
 HValue* CodeStubGraphBuilder<BinaryOpWithAllocationSiteStub>::BuildCodeStub() {
-  BinaryOpIC::State state = casted_stub()->state();
+  BinaryOpICState state = casted_stub()->state();
 
   HValue* allocation_site = GetParameter(
       BinaryOpWithAllocationSiteStub::kAllocationSite);
@@ -1001,14 +1175,31 @@
 template <>
 HValue* CodeStubGraphBuilder<ToBooleanStub>::BuildCodeInitializedStub() {
   ToBooleanStub* stub = casted_stub();
+  HValue* true_value = NULL;
+  HValue* false_value = NULL;
+
+  switch (stub->mode()) {
+    case ToBooleanStub::RESULT_AS_SMI:
+      true_value = graph()->GetConstant1();
+      false_value = graph()->GetConstant0();
+      break;
+    case ToBooleanStub::RESULT_AS_ODDBALL:
+      true_value = graph()->GetConstantTrue();
+      false_value = graph()->GetConstantFalse();
+      break;
+    case ToBooleanStub::RESULT_AS_INVERSE_ODDBALL:
+      true_value = graph()->GetConstantFalse();
+      false_value = graph()->GetConstantTrue();
+      break;
+  }
 
   IfBuilder if_true(this);
-  if_true.If<HBranch>(GetParameter(0), stub->GetTypes());
+  if_true.If<HBranch>(GetParameter(0), stub->types());
   if_true.Then();
-  if_true.Return(graph()->GetConstant1());
+  if_true.Return(true_value);
   if_true.Else();
   if_true.End();
-  return graph()->GetConstant0();
+  return false_value;
 }
 
 
@@ -1020,12 +1211,11 @@
 template <>
 HValue* CodeStubGraphBuilder<StoreGlobalStub>::BuildCodeInitializedStub() {
   StoreGlobalStub* stub = casted_stub();
-  Handle<Object> hole(isolate()->heap()->the_hole_value(), isolate());
   Handle<Object> placeholer_value(Smi::FromInt(0), isolate());
   Handle<PropertyCell> placeholder_cell =
       isolate()->factory()->NewPropertyCell(placeholer_value);
 
-  HParameter* value = GetParameter(2);
+  HParameter* value = GetParameter(StoreDescriptor::kValueIndex);
 
   if (stub->check_global()) {
     // Check that the map of the global has not changed: use a placeholder map
@@ -1052,7 +1242,7 @@
     // property has been deleted and that the store must be handled by the
     // runtime.
     IfBuilder builder(this);
-    HValue* hole_value = Add<HConstant>(hole);
+    HValue* hole_value = graph()->GetConstantHole();
     builder.If<HCompareObjectEqAndBranch>(cell_contents, hole_value);
     builder.Then();
     builder.Deopt("Unexpected cell contents in global store");
@@ -1072,10 +1262,10 @@
 
 template<>
 HValue* CodeStubGraphBuilder<ElementsTransitionAndStoreStub>::BuildCodeStub() {
-  HValue* value = GetParameter(0);
-  HValue* map = GetParameter(1);
-  HValue* key = GetParameter(2);
-  HValue* object = GetParameter(3);
+  HValue* value = GetParameter(ElementsTransitionAndStoreStub::kValueIndex);
+  HValue* map = GetParameter(ElementsTransitionAndStoreStub::kMapIndex);
+  HValue* key = GetParameter(ElementsTransitionAndStoreStub::kKeyIndex);
+  HValue* object = GetParameter(ElementsTransitionAndStoreStub::kObjectIndex);
 
   if (FLAG_trace_elements_transitions) {
     // Tracing elements transitions is the job of the runtime.
@@ -1169,7 +1359,7 @@
     int field_offset) {
   // By making sure to express these loads in the form [<hvalue> + constant]
   // the keyed load can be hoisted.
-  ASSERT(field_offset >= 0 && field_offset < SharedFunctionInfo::kEntryLength);
+  DCHECK(field_offset >= 0 && field_offset < SharedFunctionInfo::kEntryLength);
   HValue* field_slot = iterator;
   if (field_offset > 0) {
     HValue* field_offset_value = Add<HConstant>(field_offset);
@@ -1271,7 +1461,7 @@
                                              NOT_TENURED, JS_FUNCTION_TYPE);
 
   int map_index = Context::FunctionMapIndex(casted_stub()->strict_mode(),
-                                            casted_stub()->is_generator());
+                                            casted_stub()->kind());
 
   // Compute the function map in the current native context and set that
   // as the map of the allocated object.
@@ -1369,10 +1559,10 @@
 }
 
 
-template<>
-HValue* CodeStubGraphBuilder<KeyedLoadDictionaryElementStub>::BuildCodeStub() {
-  HValue* receiver = GetParameter(0);
-  HValue* key = GetParameter(1);
+template <>
+HValue* CodeStubGraphBuilder<LoadDictionaryElementStub>::BuildCodeStub() {
+  HValue* receiver = GetParameter(LoadDescriptor::kReceiverIndex);
+  HValue* key = GetParameter(LoadDescriptor::kNameIndex);
 
   Add<HCheckSmi>(key);
 
@@ -1384,7 +1574,7 @@
 }
 
 
-Handle<Code> KeyedLoadDictionaryElementStub::GenerateCode() {
+Handle<Code> LoadDictionaryElementStub::GenerateCode() {
   return DoGenerateCode(this);
 }
 
@@ -1408,12 +1598,11 @@
 
 
 template <>
-class CodeStubGraphBuilder<KeyedLoadGenericElementStub>
-  : public CodeStubGraphBuilderBase {
+class CodeStubGraphBuilder<KeyedLoadGenericStub>
+    : public CodeStubGraphBuilderBase {
  public:
-  CodeStubGraphBuilder(Isolate* isolate,
-                       KeyedLoadGenericElementStub* stub)
-    : CodeStubGraphBuilderBase(isolate, stub) {}
+  CodeStubGraphBuilder(Isolate* isolate, KeyedLoadGenericStub* stub)
+      : CodeStubGraphBuilderBase(isolate, stub) {}
 
  protected:
   virtual HValue* BuildCodeStub();
@@ -1436,16 +1625,14 @@
                                 HValue* bit_field2,
                                 ElementsKind kind);
 
-  KeyedLoadGenericElementStub* casted_stub() {
-    return static_cast<KeyedLoadGenericElementStub*>(stub());
+  KeyedLoadGenericStub* casted_stub() {
+    return static_cast<KeyedLoadGenericStub*>(stub());
   }
 };
 
 
-void CodeStubGraphBuilder<
-  KeyedLoadGenericElementStub>::BuildElementsKindLimitCheck(
-    HGraphBuilder::IfBuilder* if_builder,
-    HValue* bit_field2,
+void CodeStubGraphBuilder<KeyedLoadGenericStub>::BuildElementsKindLimitCheck(
+    HGraphBuilder::IfBuilder* if_builder, HValue* bit_field2,
     ElementsKind kind) {
   ElementsKind next_kind = static_cast<ElementsKind>(kind + 1);
   HValue* kind_limit = Add<HConstant>(
@@ -1456,14 +1643,10 @@
 }
 
 
-void CodeStubGraphBuilder<KeyedLoadGenericElementStub>::BuildFastElementLoad(
-    HGraphBuilder::IfBuilder* if_builder,
-    HValue* receiver,
-    HValue* key,
-    HValue* instance_type,
-    HValue* bit_field2,
-    ElementsKind kind) {
-  ASSERT(!IsExternalArrayElementsKind(kind));
+void CodeStubGraphBuilder<KeyedLoadGenericStub>::BuildFastElementLoad(
+    HGraphBuilder::IfBuilder* if_builder, HValue* receiver, HValue* key,
+    HValue* instance_type, HValue* bit_field2, ElementsKind kind) {
+  DCHECK(!IsExternalArrayElementsKind(kind));
 
   BuildElementsKindLimitCheck(if_builder, bit_field2, kind);
 
@@ -1484,15 +1667,10 @@
 }
 
 
-void CodeStubGraphBuilder<
-  KeyedLoadGenericElementStub>::BuildExternalElementLoad(
-    HGraphBuilder::IfBuilder* if_builder,
-    HValue* receiver,
-    HValue* key,
-    HValue* instance_type,
-    HValue* bit_field2,
-    ElementsKind kind) {
-  ASSERT(IsExternalArrayElementsKind(kind));
+void CodeStubGraphBuilder<KeyedLoadGenericStub>::BuildExternalElementLoad(
+    HGraphBuilder::IfBuilder* if_builder, HValue* receiver, HValue* key,
+    HValue* instance_type, HValue* bit_field2, ElementsKind kind) {
+  DCHECK(IsExternalArrayElementsKind(kind));
 
   BuildElementsKindLimitCheck(if_builder, bit_field2, kind);
 
@@ -1503,9 +1681,9 @@
 }
 
 
-HValue* CodeStubGraphBuilder<KeyedLoadGenericElementStub>::BuildCodeStub() {
-  HValue* receiver = GetParameter(0);
-  HValue* key = GetParameter(1);
+HValue* CodeStubGraphBuilder<KeyedLoadGenericStub>::BuildCodeStub() {
+  HValue* receiver = GetParameter(LoadDescriptor::kReceiverIndex);
+  HValue* key = GetParameter(LoadDescriptor::kNameIndex);
 
   // Split into a smi/integer case and unique string case.
   HIfContinuation index_name_split_continuation(graph()->CreateBasicBlock(),
@@ -1560,7 +1738,7 @@
     BuildElementsKindLimitCheck(&kind_if, bit_field2,
                                 SLOPPY_ARGUMENTS_ELEMENTS);
     // Non-strict elements are not handled.
-    Add<HDeoptimize>("non-strict elements in KeyedLoadGenericElementStub",
+    Add<HDeoptimize>("non-strict elements in KeyedLoadGenericStub",
                      Deoptimizer::EAGER);
     Push(graph()->GetConstant0());
 
@@ -1600,7 +1778,7 @@
     BuildExternalElementLoad(&kind_if, receiver, key, instance_type, bit_field2,
                              EXTERNAL_UINT8_CLAMPED_ELEMENTS);
 
-    kind_if.ElseDeopt("ElementsKind unhandled in KeyedLoadGenericElementStub");
+    kind_if.ElseDeopt("ElementsKind unhandled in KeyedLoadGenericStub");
 
     kind_if.End();
   }
@@ -1629,6 +1807,8 @@
           Add<HLoadNamedField>(key, static_cast<HValue*>(NULL),
           HObjectAccess::ForNameHashField());
 
+      hash = AddUncasted<HShr>(hash, Add<HConstant>(Name::kHashShift));
+
       HValue* value = BuildUncheckedDictionaryElementLoad(receiver,
                                                           properties,
                                                           key,
@@ -1649,50 +1829,67 @@
       HValue* base_index = AddUncasted<HMul>(hash, Add<HConstant>(2));
       base_index->ClearFlag(HValue::kCanOverflow);
 
-      IfBuilder lookup_if(this);
-      for (int probe = 0; probe < KeyedLookupCache::kEntriesPerBucket;
-           ++probe) {
-        int probe_base = probe * KeyedLookupCache::kEntryLength;
-        HValue* map_index = AddUncasted<HAdd>(base_index,
-            Add<HConstant>(probe_base + KeyedLookupCache::kMapIndex));
-        map_index->ClearFlag(HValue::kCanOverflow);
-        HValue* key_index = AddUncasted<HAdd>(base_index,
-            Add<HConstant>(probe_base + KeyedLookupCache::kKeyIndex));
-        key_index->ClearFlag(HValue::kCanOverflow);
-        HValue* map_to_check = Add<HLoadKeyed>(cache_keys,
-                                               map_index,
-                                               static_cast<HValue*>(NULL),
-                                               FAST_ELEMENTS,
-                                               NEVER_RETURN_HOLE, 0);
-        lookup_if.If<HCompareObjectEqAndBranch>(map_to_check, map);
-        lookup_if.And();
-        HValue* key_to_check = Add<HLoadKeyed>(cache_keys,
-                                               key_index,
-                                               static_cast<HValue*>(NULL),
-                                               FAST_ELEMENTS,
-                                               NEVER_RETURN_HOLE, 0);
-        lookup_if.If<HCompareObjectEqAndBranch>(key_to_check, key);
-        lookup_if.Then();
-        {
-          ExternalReference cache_field_offsets_ref =
-              ExternalReference::keyed_lookup_cache_field_offsets(isolate());
-          HValue* cache_field_offsets = Add<HConstant>(cache_field_offsets_ref);
-          HValue* index = AddUncasted<HAdd>(hash,
-                                            Add<HConstant>(probe));
-          index->ClearFlag(HValue::kCanOverflow);
-          HValue* property_index = Add<HLoadKeyed>(cache_field_offsets,
-                                                   index,
-                                                   static_cast<HValue*>(NULL),
-                                                   EXTERNAL_INT32_ELEMENTS,
-                                                   NEVER_RETURN_HOLE, 0);
-          Push(property_index);
+      HIfContinuation inline_or_runtime_continuation(
+          graph()->CreateBasicBlock(), graph()->CreateBasicBlock());
+      {
+        IfBuilder lookup_ifs[KeyedLookupCache::kEntriesPerBucket];
+        for (int probe = 0; probe < KeyedLookupCache::kEntriesPerBucket;
+             ++probe) {
+          IfBuilder* lookup_if = &lookup_ifs[probe];
+          lookup_if->Initialize(this);
+          int probe_base = probe * KeyedLookupCache::kEntryLength;
+          HValue* map_index = AddUncasted<HAdd>(
+              base_index,
+              Add<HConstant>(probe_base + KeyedLookupCache::kMapIndex));
+          map_index->ClearFlag(HValue::kCanOverflow);
+          HValue* key_index = AddUncasted<HAdd>(
+              base_index,
+              Add<HConstant>(probe_base + KeyedLookupCache::kKeyIndex));
+          key_index->ClearFlag(HValue::kCanOverflow);
+          HValue* map_to_check =
+              Add<HLoadKeyed>(cache_keys, map_index, static_cast<HValue*>(NULL),
+                              FAST_ELEMENTS, NEVER_RETURN_HOLE, 0);
+          lookup_if->If<HCompareObjectEqAndBranch>(map_to_check, map);
+          lookup_if->And();
+          HValue* key_to_check =
+              Add<HLoadKeyed>(cache_keys, key_index, static_cast<HValue*>(NULL),
+                              FAST_ELEMENTS, NEVER_RETURN_HOLE, 0);
+          lookup_if->If<HCompareObjectEqAndBranch>(key_to_check, key);
+          lookup_if->Then();
+          {
+            ExternalReference cache_field_offsets_ref =
+                ExternalReference::keyed_lookup_cache_field_offsets(isolate());
+            HValue* cache_field_offsets =
+                Add<HConstant>(cache_field_offsets_ref);
+            HValue* index = AddUncasted<HAdd>(hash, Add<HConstant>(probe));
+            index->ClearFlag(HValue::kCanOverflow);
+            HValue* property_index = Add<HLoadKeyed>(
+                cache_field_offsets, index, static_cast<HValue*>(NULL),
+                EXTERNAL_INT32_ELEMENTS, NEVER_RETURN_HOLE, 0);
+            Push(property_index);
+          }
+          lookup_if->Else();
         }
-        lookup_if.Else();
+        for (int i = 0; i < KeyedLookupCache::kEntriesPerBucket; ++i) {
+          lookup_ifs[i].JoinContinuation(&inline_or_runtime_continuation);
+        }
       }
-      Add<HDeoptimize>("KeyedLoad fall-back", Deoptimizer::EAGER);
-      Push(graph()->GetConstant0());
-      lookup_if.End();
-      Push(Add<HLoadFieldByIndex>(receiver, Pop()));
+
+      IfBuilder inline_or_runtime(this, &inline_or_runtime_continuation);
+      inline_or_runtime.Then();
+      {
+        // Found a cached index, load property inline.
+        Push(Add<HLoadFieldByIndex>(receiver, Pop()));
+      }
+      inline_or_runtime.Else();
+      {
+        // KeyedLookupCache miss; call runtime.
+        Add<HPushArguments>(receiver, key);
+        Push(Add<HCallRuntime>(
+            isolate()->factory()->empty_string(),
+            Runtime::FunctionForId(Runtime::kKeyedGetProperty), 2));
+      }
+      inline_or_runtime.End();
     }
     if_dict_properties.End();
   }
@@ -1702,9 +1899,52 @@
 }
 
 
-Handle<Code> KeyedLoadGenericElementStub::GenerateCode() {
+Handle<Code> KeyedLoadGenericStub::GenerateCode() {
   return DoGenerateCode(this);
 }
 
 
+template <>
+HValue* CodeStubGraphBuilder<VectorLoadStub>::BuildCodeStub() {
+  HValue* receiver = GetParameter(VectorLoadICDescriptor::kReceiverIndex);
+  Add<HDeoptimize>("Always deopt", Deoptimizer::EAGER);
+  return receiver;
+}
+
+
+Handle<Code> VectorLoadStub::GenerateCode() { return DoGenerateCode(this); }
+
+
+template <>
+HValue* CodeStubGraphBuilder<VectorKeyedLoadStub>::BuildCodeStub() {
+  HValue* receiver = GetParameter(VectorLoadICDescriptor::kReceiverIndex);
+  Add<HDeoptimize>("Always deopt", Deoptimizer::EAGER);
+  return receiver;
+}
+
+
+Handle<Code> VectorKeyedLoadStub::GenerateCode() {
+  return DoGenerateCode(this);
+}
+
+
+Handle<Code> MegamorphicLoadStub::GenerateCode() {
+  return DoGenerateCode(this);
+}
+
+
+template <>
+HValue* CodeStubGraphBuilder<MegamorphicLoadStub>::BuildCodeStub() {
+  // The return address is on the stack.
+  HValue* receiver = GetParameter(LoadDescriptor::kReceiverIndex);
+  HValue* name = GetParameter(LoadDescriptor::kNameIndex);
+
+  // Probe the stub cache.
+  Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
+      Code::ComputeHandlerFlags(Code::LOAD_IC));
+  Add<HTailCallThroughMegamorphicCache>(receiver, name, flags);
+
+  // We never continue.
+  return graph()->GetConstant0();
+}
 } }  // namespace v8::internal
diff --git a/src/code-stubs.cc b/src/code-stubs.cc
index acd877d..5c9e1a2 100644
--- a/src/code-stubs.cc
+++ b/src/code-stubs.cc
@@ -7,26 +7,59 @@
 #include "src/bootstrapper.h"
 #include "src/code-stubs.h"
 #include "src/cpu-profiler.h"
-#include "src/stub-cache.h"
 #include "src/factory.h"
 #include "src/gdb-jit.h"
+#include "src/ic/handler-compiler.h"
+#include "src/ic/ic.h"
 #include "src/macro-assembler.h"
 
 namespace v8 {
 namespace internal {
 
 
-CodeStubInterfaceDescriptor::CodeStubInterfaceDescriptor()
-    : register_param_count_(-1),
+CodeStubDescriptor::CodeStubDescriptor(CodeStub* stub)
+    : call_descriptor_(stub->GetCallInterfaceDescriptor()),
       stack_parameter_count_(no_reg),
       hint_stack_parameter_count_(-1),
       function_mode_(NOT_JS_FUNCTION_STUB_MODE),
-      register_params_(NULL),
-      register_param_representations_(NULL),
       deoptimization_handler_(NULL),
       handler_arguments_mode_(DONT_PASS_ARGUMENTS),
       miss_handler_(),
-      has_miss_handler_(false) { }
+      has_miss_handler_(false) {
+  stub->InitializeDescriptor(this);
+}
+
+
+CodeStubDescriptor::CodeStubDescriptor(Isolate* isolate, uint32_t stub_key)
+    : stack_parameter_count_(no_reg),
+      hint_stack_parameter_count_(-1),
+      function_mode_(NOT_JS_FUNCTION_STUB_MODE),
+      deoptimization_handler_(NULL),
+      handler_arguments_mode_(DONT_PASS_ARGUMENTS),
+      miss_handler_(),
+      has_miss_handler_(false) {
+  CodeStub::InitializeDescriptor(isolate, stub_key, this);
+}
+
+
+void CodeStubDescriptor::Initialize(Address deoptimization_handler,
+                                    int hint_stack_parameter_count,
+                                    StubFunctionMode function_mode) {
+  deoptimization_handler_ = deoptimization_handler;
+  hint_stack_parameter_count_ = hint_stack_parameter_count;
+  function_mode_ = function_mode;
+}
+
+
+void CodeStubDescriptor::Initialize(Register stack_parameter_count,
+                                    Address deoptimization_handler,
+                                    int hint_stack_parameter_count,
+                                    StubFunctionMode function_mode,
+                                    HandlerArgumentsMode handler_mode) {
+  Initialize(deoptimization_handler, hint_stack_parameter_count, function_mode);
+  stack_parameter_count_ = stack_parameter_count;
+  handler_arguments_mode_ = handler_mode;
+}
 
 
 bool CodeStub::FindCodeInCache(Code** code_out) {
@@ -40,21 +73,11 @@
 }
 
 
-SmartArrayPointer<const char> CodeStub::GetName() {
-  char buffer[100];
-  NoAllocationStringAllocator allocator(buffer,
-                                        static_cast<unsigned>(sizeof(buffer)));
-  StringStream stream(&allocator);
-  PrintName(&stream);
-  return stream.ToCString();
-}
-
-
 void CodeStub::RecordCodeGeneration(Handle<Code> code) {
   IC::RegisterWeakMapDependency(code);
-  SmartArrayPointer<const char> name = GetName();
-  PROFILE(isolate(), CodeCreateEvent(Logger::STUB_TAG, *code, name.get()));
-  GDBJIT(AddCode(GDBJITInterface::STUB, name.get(), *code));
+  OStringStream os;
+  os << *this;
+  PROFILE(isolate(), CodeCreateEvent(Logger::STUB_TAG, *code, os.c_str()));
   Counters* counters = isolate()->counters();
   counters->total_stubs_code_size()->Increment(code->instruction_size());
 }
@@ -80,6 +103,9 @@
   // Generate the new code.
   MacroAssembler masm(isolate(), NULL, 256);
 
+  // TODO(yangguo) remove this once the code serializer handles code stubs.
+  if (FLAG_serialize_toplevel) masm.enable_serializer();
+
   {
     // Update the static counter each time a new code stub is generated.
     isolate()->counters()->code_stubs()->Increment();
@@ -109,10 +135,9 @@
 Handle<Code> CodeStub::GetCode() {
   Heap* heap = isolate()->heap();
   Code* code;
-  if (UseSpecialCache()
-      ? FindCodeInSpecialCache(&code)
-      : FindCodeInCache(&code)) {
-    ASSERT(GetCodeKind() == code->kind());
+  if (UseSpecialCache() ? FindCodeInSpecialCache(&code)
+                        : FindCodeInCache(&code)) {
+    DCHECK(GetCodeKind() == code->kind());
     return Handle<Code>(code);
   }
 
@@ -120,15 +145,18 @@
     HandleScope scope(isolate());
 
     Handle<Code> new_object = GenerateCode();
-    new_object->set_major_key(MajorKey());
+    new_object->set_stub_key(GetKey());
     FinishCode(new_object);
     RecordCodeGeneration(new_object);
 
 #ifdef ENABLE_DISASSEMBLER
     if (FLAG_print_code_stubs) {
       CodeTracer::Scope trace_scope(isolate()->GetCodeTracer());
-      new_object->Disassemble(GetName().get(), trace_scope.file());
-      PrintF(trace_scope.file(), "\n");
+      OFStream os(trace_scope.file());
+      OStringStream name;
+      name << *this;
+      new_object->Disassemble(name.c_str(), os);
+      os << "\n";
     }
 #endif
 
@@ -147,7 +175,7 @@
   }
 
   Activate(code);
-  ASSERT(!NeedsImmovableCode() ||
+  DCHECK(!NeedsImmovableCode() ||
          heap->lo_space()->Contains(code) ||
          heap->code_space()->FirstPage()->Contains(code->address()));
   return Handle<Code>(code, isolate());
@@ -160,24 +188,77 @@
 #define DEF_CASE(name) case name: return #name "Stub";
     CODE_STUB_LIST(DEF_CASE)
 #undef DEF_CASE
-    case UninitializedMajorKey: return "<UninitializedMajorKey>Stub";
-    default:
-      if (!allow_unknown_keys) {
-        UNREACHABLE();
-      }
+    case NoCache:
+      return "<NoCache>Stub";
+    case NUMBER_OF_IDS:
+      UNREACHABLE();
       return NULL;
   }
+  return NULL;
+}
+
+
+void CodeStub::PrintBaseName(OStream& os) const {  // NOLINT
+  os << MajorName(MajorKey(), false);
+}
+
+
+void CodeStub::PrintName(OStream& os) const {  // NOLINT
+  PrintBaseName(os);
+  PrintState(os);
+}
+
+
+void CodeStub::Dispatch(Isolate* isolate, uint32_t key, void** value_out,
+                        DispatchedCall call) {
+  switch (MajorKeyFromKey(key)) {
+#define DEF_CASE(NAME)             \
+  case NAME: {                     \
+    NAME##Stub stub(key, isolate); \
+    CodeStub* pstub = &stub;       \
+    call(pstub, value_out);        \
+    break;                         \
+  }
+    CODE_STUB_LIST(DEF_CASE)
+#undef DEF_CASE
+    case NUMBER_OF_IDS:
+      UNREACHABLE();
+    case NoCache:
+      *value_out = NULL;
+      break;
+  }
 }
 
 
-void CodeStub::PrintBaseName(StringStream* stream) {
-  stream->Add("%s", MajorName(MajorKey(), false));
+static void InitializeDescriptorDispatchedCall(CodeStub* stub,
+                                               void** value_out) {
+  CodeStubDescriptor* descriptor_out =
+      reinterpret_cast<CodeStubDescriptor*>(value_out);
+  stub->InitializeDescriptor(descriptor_out);
+  descriptor_out->set_call_descriptor(stub->GetCallInterfaceDescriptor());
 }
 
 
-void CodeStub::PrintName(StringStream* stream) {
-  PrintBaseName(stream);
-  PrintState(stream);
+void CodeStub::InitializeDescriptor(Isolate* isolate, uint32_t key,
+                                    CodeStubDescriptor* desc) {
+  void** value_out = reinterpret_cast<void**>(desc);
+  Dispatch(isolate, key, value_out, &InitializeDescriptorDispatchedCall);
+}
+
+
+void CodeStub::GetCodeDispatchCall(CodeStub* stub, void** value_out) {
+  Handle<Code>* code_out = reinterpret_cast<Handle<Code>*>(value_out);
+  // Code stubs with special cache cannot be recreated from stub key.
+  *code_out = stub->UseSpecialCache() ? Handle<Code>() : stub->GetCode();
+}
+
+
+MaybeHandle<Code> CodeStub::GetCode(Isolate* isolate, uint32_t key) {
+  HandleScope scope(isolate);
+  Handle<Code> code;
+  void** value_out = reinterpret_cast<void**>(&code);
+  Dispatch(isolate, key, value_out, &GetCodeDispatchCall);
+  return scope.CloseAndEscape(code);
 }
 
 
@@ -194,18 +275,18 @@
   }
 
   // Generate special versions of the stub.
-  BinaryOpIC::State::GenerateAheadOfTime(isolate, &GenerateAheadOfTime);
+  BinaryOpICState::GenerateAheadOfTime(isolate, &GenerateAheadOfTime);
 }
 
 
-void BinaryOpICStub::PrintState(StringStream* stream) {
-  state_.Print(stream);
+void BinaryOpICStub::PrintState(OStream& os) const {  // NOLINT
+  os << state();
 }
 
 
 // static
 void BinaryOpICStub::GenerateAheadOfTime(Isolate* isolate,
-                                         const BinaryOpIC::State& state) {
+                                         const BinaryOpICState& state) {
   BinaryOpICStub stub(isolate, state);
   stub.GetCode();
 }
@@ -214,18 +295,19 @@
 // static
 void BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(Isolate* isolate) {
   // Generate special versions of the stub.
-  BinaryOpIC::State::GenerateAheadOfTime(isolate, &GenerateAheadOfTime);
+  BinaryOpICState::GenerateAheadOfTime(isolate, &GenerateAheadOfTime);
 }
 
 
-void BinaryOpICWithAllocationSiteStub::PrintState(StringStream* stream) {
-  state_.Print(stream);
+void BinaryOpICWithAllocationSiteStub::PrintState(
+    OStream& os) const {  // NOLINT
+  os << state();
 }
 
 
 // static
 void BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(
-    Isolate* isolate, const BinaryOpIC::State& state) {
+    Isolate* isolate, const BinaryOpICState& state) {
   if (state.CouldCreateAllocationMementos()) {
     BinaryOpICWithAllocationSiteStub stub(isolate, state);
     stub.GetCode();
@@ -233,35 +315,35 @@
 }
 
 
-void StringAddStub::PrintBaseName(StringStream* stream) {
-  stream->Add("StringAddStub");
+void StringAddStub::PrintBaseName(OStream& os) const {  // NOLINT
+  os << "StringAddStub";
   if ((flags() & STRING_ADD_CHECK_BOTH) == STRING_ADD_CHECK_BOTH) {
-    stream->Add("_CheckBoth");
+    os << "_CheckBoth";
   } else if ((flags() & STRING_ADD_CHECK_LEFT) == STRING_ADD_CHECK_LEFT) {
-    stream->Add("_CheckLeft");
+    os << "_CheckLeft";
   } else if ((flags() & STRING_ADD_CHECK_RIGHT) == STRING_ADD_CHECK_RIGHT) {
-    stream->Add("_CheckRight");
+    os << "_CheckRight";
   }
   if (pretenure_flag() == TENURED) {
-    stream->Add("_Tenured");
+    os << "_Tenured";
   }
 }
 
 
-InlineCacheState ICCompareStub::GetICState() {
-  CompareIC::State state = Max(left_, right_);
+InlineCacheState CompareICStub::GetICState() const {
+  CompareICState::State state = Max(left(), right());
   switch (state) {
-    case CompareIC::UNINITIALIZED:
+    case CompareICState::UNINITIALIZED:
       return ::v8::internal::UNINITIALIZED;
-    case CompareIC::SMI:
-    case CompareIC::NUMBER:
-    case CompareIC::INTERNALIZED_STRING:
-    case CompareIC::STRING:
-    case CompareIC::UNIQUE_NAME:
-    case CompareIC::OBJECT:
-    case CompareIC::KNOWN_OBJECT:
+    case CompareICState::SMI:
+    case CompareICState::NUMBER:
+    case CompareICState::INTERNALIZED_STRING:
+    case CompareICState::STRING:
+    case CompareICState::UNIQUE_NAME:
+    case CompareICState::OBJECT:
+    case CompareICState::KNOWN_OBJECT:
       return MONOMORPHIC;
-    case CompareIC::GENERIC:
+    case CompareICState::GENERIC:
       return ::v8::internal::GENERIC;
   }
   UNREACHABLE();
@@ -269,8 +351,13 @@
 }
 
 
-void ICCompareStub::AddToSpecialCache(Handle<Code> new_object) {
-  ASSERT(*known_map_ != NULL);
+Condition CompareICStub::GetCondition() const {
+  return CompareIC::ComputeCondition(op());
+}
+
+
+void CompareICStub::AddToSpecialCache(Handle<Code> new_object) {
+  DCHECK(*known_map_ != NULL);
   Isolate* isolate = new_object->GetIsolate();
   Factory* factory = isolate->factory();
   return Map::UpdateCodeCache(known_map_,
@@ -281,12 +368,12 @@
 }
 
 
-bool ICCompareStub::FindCodeInSpecialCache(Code** code_out) {
+bool CompareICStub::FindCodeInSpecialCache(Code** code_out) {
   Factory* factory = isolate()->factory();
   Code::Flags flags = Code::ComputeFlags(
       GetCodeKind(),
       UNINITIALIZED);
-  ASSERT(op_ == Token::EQ || op_ == Token::EQ_STRICT);
+  DCHECK(op() == Token::EQ || op() == Token::EQ_STRICT);
   Handle<Object> probe(
       known_map_->FindInCodeCache(
         strict() ?
@@ -297,10 +384,11 @@
   if (probe->IsCode()) {
     *code_out = Code::cast(*probe);
 #ifdef DEBUG
-    Token::Value cached_op;
-    ICCompareStub::DecodeMinorKey((*code_out)->stub_info(), NULL, NULL, NULL,
-                                  &cached_op);
-    ASSERT(op_ == cached_op);
+    CompareICStub decode((*code_out)->stub_key(), isolate());
+    DCHECK(op() == decode.op());
+    DCHECK(left() == decode.left());
+    DCHECK(right() == decode.right());
+    DCHECK(state() == decode.state());
 #endif
     return true;
   }
@@ -308,65 +396,34 @@
 }
 
 
-int ICCompareStub::MinorKey() {
-  return OpField::encode(op_ - Token::EQ) |
-         LeftStateField::encode(left_) |
-         RightStateField::encode(right_) |
-         HandlerStateField::encode(state_);
-}
-
-
-void ICCompareStub::DecodeMinorKey(int minor_key,
-                                   CompareIC::State* left_state,
-                                   CompareIC::State* right_state,
-                                   CompareIC::State* handler_state,
-                                   Token::Value* op) {
-  if (left_state) {
-    *left_state =
-        static_cast<CompareIC::State>(LeftStateField::decode(minor_key));
-  }
-  if (right_state) {
-    *right_state =
-        static_cast<CompareIC::State>(RightStateField::decode(minor_key));
-  }
-  if (handler_state) {
-    *handler_state =
-        static_cast<CompareIC::State>(HandlerStateField::decode(minor_key));
-  }
-  if (op) {
-    *op = static_cast<Token::Value>(OpField::decode(minor_key) + Token::EQ);
-  }
-}
-
-
-void ICCompareStub::Generate(MacroAssembler* masm) {
-  switch (state_) {
-    case CompareIC::UNINITIALIZED:
+void CompareICStub::Generate(MacroAssembler* masm) {
+  switch (state()) {
+    case CompareICState::UNINITIALIZED:
       GenerateMiss(masm);
       break;
-    case CompareIC::SMI:
+    case CompareICState::SMI:
       GenerateSmis(masm);
       break;
-    case CompareIC::NUMBER:
+    case CompareICState::NUMBER:
       GenerateNumbers(masm);
       break;
-    case CompareIC::STRING:
+    case CompareICState::STRING:
       GenerateStrings(masm);
       break;
-    case CompareIC::INTERNALIZED_STRING:
+    case CompareICState::INTERNALIZED_STRING:
       GenerateInternalizedStrings(masm);
       break;
-    case CompareIC::UNIQUE_NAME:
+    case CompareICState::UNIQUE_NAME:
       GenerateUniqueNames(masm);
       break;
-    case CompareIC::OBJECT:
+    case CompareICState::OBJECT:
       GenerateObjects(masm);
       break;
-    case CompareIC::KNOWN_OBJECT:
-      ASSERT(*known_map_ != NULL);
+    case CompareICState::KNOWN_OBJECT:
+      DCHECK(*known_map_ != NULL);
       GenerateKnownObjects(masm);
       break;
-    case CompareIC::GENERIC:
+    case CompareICState::GENERIC:
       GenerateGeneric(masm);
       break;
   }
@@ -374,24 +431,26 @@
 
 
 void CompareNilICStub::UpdateStatus(Handle<Object> object) {
-  ASSERT(!state_.Contains(GENERIC));
-  State old_state(state_);
+  State state = this->state();
+  DCHECK(!state.Contains(GENERIC));
+  State old_state = state;
   if (object->IsNull()) {
-    state_.Add(NULL_TYPE);
+    state.Add(NULL_TYPE);
   } else if (object->IsUndefined()) {
-    state_.Add(UNDEFINED);
+    state.Add(UNDEFINED);
   } else if (object->IsUndetectableObject() ||
              object->IsOddball() ||
              !object->IsHeapObject()) {
-    state_.RemoveAll();
-    state_.Add(GENERIC);
+    state.RemoveAll();
+    state.Add(GENERIC);
   } else if (IsMonomorphic()) {
-    state_.RemoveAll();
-    state_.Add(GENERIC);
+    state.RemoveAll();
+    state.Add(GENERIC);
   } else {
-    state_.Add(MONOMORPHIC_MAP);
+    state.Add(MONOMORPHIC_MAP);
   }
-  TraceTransition(old_state, state_);
+  TraceTransition(old_state, state);
+  set_sub_minor_key(TypesBits::update(sub_minor_key(), state.ToIntegral()));
 }
 
 
@@ -399,60 +458,70 @@
 void HydrogenCodeStub::TraceTransition(StateType from, StateType to) {
   // Note: Although a no-op transition is semantically OK, it is hinting at a
   // bug somewhere in our state transition machinery.
-  ASSERT(from != to);
+  DCHECK(from != to);
   if (!FLAG_trace_ic) return;
-  char buffer[100];
-  NoAllocationStringAllocator allocator(buffer,
-                                        static_cast<unsigned>(sizeof(buffer)));
-  StringStream stream(&allocator);
-  stream.Add("[");
-  PrintBaseName(&stream);
-  stream.Add(": ");
-  from.Print(&stream);
-  stream.Add("=>");
-  to.Print(&stream);
-  stream.Add("]\n");
-  stream.OutputToStdOut();
+  OFStream os(stdout);
+  os << "[";
+  PrintBaseName(os);
+  os << ": " << from << "=>" << to << "]" << endl;
 }
 
 
-void CompareNilICStub::PrintBaseName(StringStream* stream) {
-  CodeStub::PrintBaseName(stream);
-  stream->Add((nil_value_ == kNullValue) ? "(NullValue)":
-                                           "(UndefinedValue)");
+void CompareNilICStub::PrintBaseName(OStream& os) const {  // NOLINT
+  CodeStub::PrintBaseName(os);
+  os << ((nil_value() == kNullValue) ? "(NullValue)" : "(UndefinedValue)");
 }
 
 
-void CompareNilICStub::PrintState(StringStream* stream) {
-  state_.Print(stream);
+void CompareNilICStub::PrintState(OStream& os) const {  // NOLINT
+  os << state();
 }
 
 
-void CompareNilICStub::State::Print(StringStream* stream) const {
-  stream->Add("(");
-  SimpleListPrinter printer(stream);
-  if (IsEmpty()) printer.Add("None");
-  if (Contains(UNDEFINED)) printer.Add("Undefined");
-  if (Contains(NULL_TYPE)) printer.Add("Null");
-  if (Contains(MONOMORPHIC_MAP)) printer.Add("MonomorphicMap");
-  if (Contains(GENERIC)) printer.Add("Generic");
-  stream->Add(")");
+// TODO(svenpanne) Make this a real infix_ostream_iterator.
+class SimpleListPrinter {
+ public:
+  explicit SimpleListPrinter(OStream& os) : os_(os), first_(true) {}
+
+  void Add(const char* s) {
+    if (first_) {
+      first_ = false;
+    } else {
+      os_ << ",";
+    }
+    os_ << s;
+  }
+
+ private:
+  OStream& os_;
+  bool first_;
+};
+
+
+OStream& operator<<(OStream& os, const CompareNilICStub::State& s) {
+  os << "(";
+  SimpleListPrinter p(os);
+  if (s.IsEmpty()) p.Add("None");
+  if (s.Contains(CompareNilICStub::UNDEFINED)) p.Add("Undefined");
+  if (s.Contains(CompareNilICStub::NULL_TYPE)) p.Add("Null");
+  if (s.Contains(CompareNilICStub::MONOMORPHIC_MAP)) p.Add("MonomorphicMap");
+  if (s.Contains(CompareNilICStub::GENERIC)) p.Add("Generic");
+  return os << ")";
 }
 
 
 Type* CompareNilICStub::GetType(Zone* zone, Handle<Map> map) {
-  if (state_.Contains(CompareNilICStub::GENERIC)) {
-    return Type::Any(zone);
-  }
+  State state = this->state();
+  if (state.Contains(CompareNilICStub::GENERIC)) return Type::Any(zone);
 
   Type* result = Type::None(zone);
-  if (state_.Contains(CompareNilICStub::UNDEFINED)) {
+  if (state.Contains(CompareNilICStub::UNDEFINED)) {
     result = Type::Union(result, Type::Undefined(zone), zone);
   }
-  if (state_.Contains(CompareNilICStub::NULL_TYPE)) {
+  if (state.Contains(CompareNilICStub::NULL_TYPE)) {
     result = Type::Union(result, Type::Null(zone), zone);
   }
-  if (state_.Contains(CompareNilICStub::MONOMORPHIC_MAP)) {
+  if (state.Contains(CompareNilICStub::MONOMORPHIC_MAP)) {
     Type* type =
         map.is_null() ? Type::Detectable(zone) : Type::Class(map, zone);
     result = Type::Union(result, type, zone);
@@ -465,42 +534,26 @@
 Type* CompareNilICStub::GetInputType(Zone* zone, Handle<Map> map) {
   Type* output_type = GetType(zone, map);
   Type* nil_type =
-      nil_value_ == kNullValue ? Type::Null(zone) : Type::Undefined(zone);
+      nil_value() == kNullValue ? Type::Null(zone) : Type::Undefined(zone);
   return Type::Union(output_type, nil_type, zone);
 }
 
 
-void CallIC_ArrayStub::PrintState(StringStream* stream) {
-  state_.Print(stream);
-  stream->Add(" (Array)");
+void CallIC_ArrayStub::PrintState(OStream& os) const {  // NOLINT
+  os << state() << " (Array)";
 }
 
 
-void CallICStub::PrintState(StringStream* stream) {
-  state_.Print(stream);
+void CallICStub::PrintState(OStream& os) const {  // NOLINT
+  os << state();
 }
 
 
-void InstanceofStub::PrintName(StringStream* stream) {
-  const char* args = "";
-  if (HasArgsInRegisters()) {
-    args = "_REGS";
-  }
-
-  const char* inline_check = "";
-  if (HasCallSiteInlineCheck()) {
-    inline_check = "_INLINE";
-  }
-
-  const char* return_true_false_object = "";
-  if (ReturnTrueFalseObject()) {
-    return_true_false_object = "_TRUEFALSE";
-  }
-
-  stream->Add("InstanceofStub%s%s%s",
-              args,
-              inline_check,
-              return_true_false_object);
+void InstanceofStub::PrintName(OStream& os) const {  // NOLINT
+  os << "InstanceofStub";
+  if (HasArgsInRegisters()) os << "_REGS";
+  if (HasCallSiteInlineCheck()) os << "_INLINE";
+  if (ReturnTrueFalseObject()) os << "_TRUEFALSE";
 }
 
 
@@ -512,9 +565,160 @@
 }
 
 
-void KeyedLoadDictionaryElementPlatformStub::Generate(
-    MacroAssembler* masm) {
-  KeyedLoadStubCompiler::GenerateLoadDictionaryElement(masm);
+void LoadFastElementStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
+  descriptor->Initialize(FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure));
+}
+
+
+void LoadDictionaryElementStub::InitializeDescriptor(
+    CodeStubDescriptor* descriptor) {
+  descriptor->Initialize(FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure));
+}
+
+
+void KeyedLoadGenericStub::InitializeDescriptor(
+    CodeStubDescriptor* descriptor) {
+  descriptor->Initialize(
+      Runtime::FunctionForId(Runtime::kKeyedGetProperty)->entry);
+}
+
+
+void HandlerStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
+  if (kind() == Code::STORE_IC) {
+    descriptor->Initialize(FUNCTION_ADDR(StoreIC_MissFromStubFailure));
+  } else if (kind() == Code::KEYED_LOAD_IC) {
+    descriptor->Initialize(FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure));
+  }
+}
+
+
+CallInterfaceDescriptor HandlerStub::GetCallInterfaceDescriptor() {
+  if (kind() == Code::LOAD_IC || kind() == Code::KEYED_LOAD_IC) {
+    return LoadDescriptor(isolate());
+  } else {
+    DCHECK_EQ(Code::STORE_IC, kind());
+    return StoreDescriptor(isolate());
+  }
+}
+
+
+void StoreFastElementStub::InitializeDescriptor(
+    CodeStubDescriptor* descriptor) {
+  descriptor->Initialize(FUNCTION_ADDR(KeyedStoreIC_MissFromStubFailure));
+}
+
+
+void ElementsTransitionAndStoreStub::InitializeDescriptor(
+    CodeStubDescriptor* descriptor) {
+  descriptor->Initialize(FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss));
+}
+
+
+static void InitializeVectorLoadStub(Isolate* isolate,
+                                     CodeStubDescriptor* descriptor,
+                                     Address deoptimization_handler) {
+  DCHECK(FLAG_vector_ics);
+  descriptor->Initialize(deoptimization_handler);
+}
+
+
+void VectorLoadStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
+  InitializeVectorLoadStub(isolate(), descriptor,
+                           FUNCTION_ADDR(VectorLoadIC_MissFromStubFailure));
+}
+
+
+void VectorKeyedLoadStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
+  InitializeVectorLoadStub(
+      isolate(), descriptor,
+      FUNCTION_ADDR(VectorKeyedLoadIC_MissFromStubFailure));
+}
+
+
+void MegamorphicLoadStub::InitializeDescriptor(CodeStubDescriptor* d) {}
+
+
+void FastNewClosureStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
+  descriptor->Initialize(
+      Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry);
+}
+
+
+void FastNewContextStub::InitializeDescriptor(CodeStubDescriptor* d) {}
+
+
+void ToNumberStub::InitializeDescriptor(CodeStubDescriptor* d) {}
+
+
+void NumberToStringStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
+  NumberToStringDescriptor call_descriptor(isolate());
+  descriptor->Initialize(
+      Runtime::FunctionForId(Runtime::kNumberToStringRT)->entry);
+}
+
+
+void FastCloneShallowArrayStub::InitializeDescriptor(
+    CodeStubDescriptor* descriptor) {
+  FastCloneShallowArrayDescriptor call_descriptor(isolate());
+  descriptor->Initialize(
+      Runtime::FunctionForId(Runtime::kCreateArrayLiteralStubBailout)->entry);
+}
+
+
+void FastCloneShallowObjectStub::InitializeDescriptor(
+    CodeStubDescriptor* descriptor) {
+  FastCloneShallowObjectDescriptor call_descriptor(isolate());
+  descriptor->Initialize(
+      Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry);
+}
+
+
+void CreateAllocationSiteStub::InitializeDescriptor(CodeStubDescriptor* d) {}
+
+
+void RegExpConstructResultStub::InitializeDescriptor(
+    CodeStubDescriptor* descriptor) {
+  descriptor->Initialize(
+      Runtime::FunctionForId(Runtime::kRegExpConstructResult)->entry);
+}
+
+
+void TransitionElementsKindStub::InitializeDescriptor(
+    CodeStubDescriptor* descriptor) {
+  descriptor->Initialize(
+      Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry);
+}
+
+
+void CompareNilICStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
+  descriptor->Initialize(FUNCTION_ADDR(CompareNilIC_Miss));
+  descriptor->SetMissHandler(
+      ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate()));
+}
+
+
+void ToBooleanStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
+  descriptor->Initialize(FUNCTION_ADDR(ToBooleanIC_Miss));
+  descriptor->SetMissHandler(
+      ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate()));
+}
+
+
+void BinaryOpICStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
+  descriptor->Initialize(FUNCTION_ADDR(BinaryOpIC_Miss));
+  descriptor->SetMissHandler(
+      ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate()));
+}
+
+
+void BinaryOpWithAllocationSiteStub::InitializeDescriptor(
+    CodeStubDescriptor* descriptor) {
+  descriptor->Initialize(FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite));
+}
+
+
+void StringAddStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
+  descriptor->Initialize(Runtime::FunctionForId(Runtime::kStringAdd)->entry);
 }
 
 
@@ -524,8 +728,8 @@
 }
 
 
-void KeyedStoreElementStub::Generate(MacroAssembler* masm) {
-  switch (elements_kind_) {
+void StoreElementStub::Generate(MacroAssembler* masm) {
+  switch (elements_kind()) {
     case FAST_ELEMENTS:
     case FAST_HOLEY_ELEMENTS:
     case FAST_SMI_ELEMENTS:
@@ -541,7 +745,7 @@
       UNREACHABLE();
       break;
     case DICTIONARY_ELEMENTS:
-      KeyedStoreStubCompiler::GenerateStoreDictionaryElement(masm);
+      ElementHandlerCompiler::GenerateStoreSlow(masm);
       break;
     case SLOPPY_ARGUMENTS_ELEMENTS:
       UNREACHABLE();
@@ -550,76 +754,113 @@
 }
 
 
-void ArgumentsAccessStub::PrintName(StringStream* stream) {
-  stream->Add("ArgumentsAccessStub_");
-  switch (type_) {
-    case READ_ELEMENT: stream->Add("ReadElement"); break;
-    case NEW_SLOPPY_FAST: stream->Add("NewSloppyFast"); break;
-    case NEW_SLOPPY_SLOW: stream->Add("NewSloppySlow"); break;
-    case NEW_STRICT: stream->Add("NewStrict"); break;
+void ArgumentsAccessStub::Generate(MacroAssembler* masm) {
+  switch (type()) {
+    case READ_ELEMENT:
+      GenerateReadElement(masm);
+      break;
+    case NEW_SLOPPY_FAST:
+      GenerateNewSloppyFast(masm);
+      break;
+    case NEW_SLOPPY_SLOW:
+      GenerateNewSloppySlow(masm);
+      break;
+    case NEW_STRICT:
+      GenerateNewStrict(masm);
+      break;
   }
 }
 
 
-void CallFunctionStub::PrintName(StringStream* stream) {
-  stream->Add("CallFunctionStub_Args%d", argc_);
-}
-
-
-void CallConstructStub::PrintName(StringStream* stream) {
-  stream->Add("CallConstructStub");
-  if (RecordCallTarget()) stream->Add("_Recording");
-}
-
-
-void ArrayConstructorStub::PrintName(StringStream* stream) {
-  stream->Add("ArrayConstructorStub");
-  switch (argument_count_) {
-    case ANY: stream->Add("_Any"); break;
-    case NONE: stream->Add("_None"); break;
-    case ONE: stream->Add("_One"); break;
-    case MORE_THAN_ONE: stream->Add("_More_Than_One"); break;
+void ArgumentsAccessStub::PrintName(OStream& os) const {  // NOLINT
+  os << "ArgumentsAccessStub_";
+  switch (type()) {
+    case READ_ELEMENT:
+      os << "ReadElement";
+      break;
+    case NEW_SLOPPY_FAST:
+      os << "NewSloppyFast";
+      break;
+    case NEW_SLOPPY_SLOW:
+      os << "NewSloppySlow";
+      break;
+    case NEW_STRICT:
+      os << "NewStrict";
+      break;
   }
+  return;
 }
 
 
-void ArrayConstructorStubBase::BasePrintName(const char* name,
-                                             StringStream* stream) {
-  stream->Add(name);
-  stream->Add("_");
-  stream->Add(ElementsKindToString(elements_kind()));
+void CallFunctionStub::PrintName(OStream& os) const {  // NOLINT
+  os << "CallFunctionStub_Args" << argc();
+}
+
+
+void CallConstructStub::PrintName(OStream& os) const {  // NOLINT
+  os << "CallConstructStub";
+  if (RecordCallTarget()) os << "_Recording";
+}
+
+
+void ArrayConstructorStub::PrintName(OStream& os) const {  // NOLINT
+  os << "ArrayConstructorStub";
+  switch (argument_count()) {
+    case ANY:
+      os << "_Any";
+      break;
+    case NONE:
+      os << "_None";
+      break;
+    case ONE:
+      os << "_One";
+      break;
+    case MORE_THAN_ONE:
+      os << "_More_Than_One";
+      break;
+  }
+  return;
+}
+
+
+OStream& ArrayConstructorStubBase::BasePrintName(OStream& os,  // NOLINT
+                                                 const char* name) const {
+  os << name << "_" << ElementsKindToString(elements_kind());
   if (override_mode() == DISABLE_ALLOCATION_SITES) {
-    stream->Add("_DISABLE_ALLOCATION_SITES");
+    os << "_DISABLE_ALLOCATION_SITES";
   }
+  return os;
 }
 
 
 bool ToBooleanStub::UpdateStatus(Handle<Object> object) {
-  Types old_types(types_);
-  bool to_boolean_value = types_.UpdateStatus(object);
-  TraceTransition(old_types, types_);
+  Types new_types = types();
+  Types old_types = new_types;
+  bool to_boolean_value = new_types.UpdateStatus(object);
+  TraceTransition(old_types, new_types);
+  set_sub_minor_key(TypesBits::update(sub_minor_key(), new_types.ToByte()));
   return to_boolean_value;
 }
 
 
-void ToBooleanStub::PrintState(StringStream* stream) {
-  types_.Print(stream);
+void ToBooleanStub::PrintState(OStream& os) const {  // NOLINT
+  os << types();
 }
 
 
-void ToBooleanStub::Types::Print(StringStream* stream) const {
-  stream->Add("(");
-  SimpleListPrinter printer(stream);
-  if (IsEmpty()) printer.Add("None");
-  if (Contains(UNDEFINED)) printer.Add("Undefined");
-  if (Contains(BOOLEAN)) printer.Add("Bool");
-  if (Contains(NULL_TYPE)) printer.Add("Null");
-  if (Contains(SMI)) printer.Add("Smi");
-  if (Contains(SPEC_OBJECT)) printer.Add("SpecObject");
-  if (Contains(STRING)) printer.Add("String");
-  if (Contains(SYMBOL)) printer.Add("Symbol");
-  if (Contains(HEAP_NUMBER)) printer.Add("HeapNumber");
-  stream->Add(")");
+OStream& operator<<(OStream& os, const ToBooleanStub::Types& s) {
+  os << "(";
+  SimpleListPrinter p(os);
+  if (s.IsEmpty()) p.Add("None");
+  if (s.Contains(ToBooleanStub::UNDEFINED)) p.Add("Undefined");
+  if (s.Contains(ToBooleanStub::BOOLEAN)) p.Add("Bool");
+  if (s.Contains(ToBooleanStub::NULL_TYPE)) p.Add("Null");
+  if (s.Contains(ToBooleanStub::SMI)) p.Add("Smi");
+  if (s.Contains(ToBooleanStub::SPEC_OBJECT)) p.Add("SpecObject");
+  if (s.Contains(ToBooleanStub::STRING)) p.Add("String");
+  if (s.Contains(ToBooleanStub::SYMBOL)) p.Add("Symbol");
+  if (s.Contains(ToBooleanStub::HEAP_NUMBER)) p.Add("HeapNumber");
+  return os << ")";
 }
 
 
@@ -647,7 +888,7 @@
     Add(SYMBOL);
     return true;
   } else if (object->IsHeapNumber()) {
-    ASSERT(!object->IsUndetectableObject());
+    DCHECK(!object->IsUndetectableObject());
     Add(HEAP_NUMBER);
     double value = HeapNumber::cast(*object)->value();
     return value != 0 && !std::isnan(value);
@@ -685,94 +926,14 @@
                                                intptr_t stack_pointer,
                                                Isolate* isolate) {
   FunctionEntryHook entry_hook = isolate->function_entry_hook();
-  ASSERT(entry_hook != NULL);
+  DCHECK(entry_hook != NULL);
   entry_hook(function, stack_pointer);
 }
 
 
-static void InstallDescriptor(Isolate* isolate, HydrogenCodeStub* stub) {
-  int major_key = stub->MajorKey();
-  CodeStubInterfaceDescriptor* descriptor =
-      isolate->code_stub_interface_descriptor(major_key);
-  if (!descriptor->initialized()) {
-    stub->InitializeInterfaceDescriptor(descriptor);
-  }
-}
-
-
-void ArrayConstructorStubBase::InstallDescriptors(Isolate* isolate) {
-  ArrayNoArgumentConstructorStub stub1(isolate, GetInitialFastElementsKind());
-  InstallDescriptor(isolate, &stub1);
-  ArraySingleArgumentConstructorStub stub2(isolate,
-                                           GetInitialFastElementsKind());
-  InstallDescriptor(isolate, &stub2);
-  ArrayNArgumentsConstructorStub stub3(isolate, GetInitialFastElementsKind());
-  InstallDescriptor(isolate, &stub3);
-}
-
-
-void NumberToStringStub::InstallDescriptors(Isolate* isolate) {
-  NumberToStringStub stub(isolate);
-  InstallDescriptor(isolate, &stub);
-}
-
-
-void FastNewClosureStub::InstallDescriptors(Isolate* isolate) {
-  FastNewClosureStub stub(isolate, STRICT, false);
-  InstallDescriptor(isolate, &stub);
-}
-
-
-void FastNewContextStub::InstallDescriptors(Isolate* isolate) {
-  FastNewContextStub stub(isolate, FastNewContextStub::kMaximumSlots);
-  InstallDescriptor(isolate, &stub);
-}
-
-
-// static
-void FastCloneShallowArrayStub::InstallDescriptors(Isolate* isolate) {
-  FastCloneShallowArrayStub stub(isolate, DONT_TRACK_ALLOCATION_SITE);
-  InstallDescriptor(isolate, &stub);
-}
-
-
-// static
-void BinaryOpICStub::InstallDescriptors(Isolate* isolate) {
-  BinaryOpICStub stub(isolate, Token::ADD, NO_OVERWRITE);
-  InstallDescriptor(isolate, &stub);
-}
-
-
-// static
-void BinaryOpWithAllocationSiteStub::InstallDescriptors(Isolate* isolate) {
-  BinaryOpWithAllocationSiteStub stub(isolate, Token::ADD, NO_OVERWRITE);
-  InstallDescriptor(isolate, &stub);
-}
-
-
-// static
-void StringAddStub::InstallDescriptors(Isolate* isolate) {
-  StringAddStub stub(isolate, STRING_ADD_CHECK_NONE, NOT_TENURED);
-  InstallDescriptor(isolate, &stub);
-}
-
-
-// static
-void RegExpConstructResultStub::InstallDescriptors(Isolate* isolate) {
-  RegExpConstructResultStub stub(isolate);
-  InstallDescriptor(isolate, &stub);
-}
-
-
-// static
-void KeyedLoadGenericElementStub::InstallDescriptors(Isolate* isolate) {
-  KeyedLoadGenericElementStub stub(isolate);
-  InstallDescriptor(isolate, &stub);
-}
-
-
 ArrayConstructorStub::ArrayConstructorStub(Isolate* isolate)
-    : PlatformCodeStub(isolate), argument_count_(ANY) {
+    : PlatformCodeStub(isolate) {
+  minor_key_ = ArgumentCountBits::encode(ANY);
   ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
 }
 
@@ -781,11 +942,11 @@
                                            int argument_count)
     : PlatformCodeStub(isolate) {
   if (argument_count == 0) {
-    argument_count_ = NONE;
+    minor_key_ = ArgumentCountBits::encode(NONE);
   } else if (argument_count == 1) {
-    argument_count_ = ONE;
+    minor_key_ = ArgumentCountBits::encode(ONE);
   } else if (argument_count >= 2) {
-    argument_count_ = MORE_THAN_ONE;
+    minor_key_ = ArgumentCountBits::encode(MORE_THAN_ONE);
   } else {
     UNREACHABLE();
   }
@@ -793,15 +954,6 @@
 }
 
 
-void InternalArrayConstructorStubBase::InstallDescriptors(Isolate* isolate) {
-  InternalArrayNoArgumentConstructorStub stub1(isolate, FAST_ELEMENTS);
-  InstallDescriptor(isolate, &stub1);
-  InternalArraySingleArgumentConstructorStub stub2(isolate, FAST_ELEMENTS);
-  InstallDescriptor(isolate, &stub2);
-  InternalArrayNArgumentsConstructorStub stub3(isolate, FAST_ELEMENTS);
-  InstallDescriptor(isolate, &stub3);
-}
-
 InternalArrayConstructorStub::InternalArrayConstructorStub(
     Isolate* isolate) : PlatformCodeStub(isolate) {
   InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
diff --git a/src/code-stubs.h b/src/code-stubs.h
index b243b56..3b31399 100644
--- a/src/code-stubs.h
+++ b/src/code-stubs.h
@@ -9,96 +9,101 @@
 #include "src/assembler.h"
 #include "src/codegen.h"
 #include "src/globals.h"
+#include "src/ic/ic-state.h"
+#include "src/interface-descriptors.h"
 #include "src/macro-assembler.h"
+#include "src/ostreams.h"
 
 namespace v8 {
 namespace internal {
 
 // List of code stubs used on all platforms.
-#define CODE_STUB_LIST_ALL_PLATFORMS(V)  \
-  V(CallFunction)                        \
-  V(CallConstruct)                       \
-  V(BinaryOpIC)                          \
-  V(BinaryOpICWithAllocationSite)        \
-  V(BinaryOpWithAllocationSite)          \
-  V(StringAdd)                           \
-  V(SubString)                           \
-  V(StringCompare)                       \
-  V(Compare)                             \
-  V(CompareIC)                           \
-  V(CompareNilIC)                        \
-  V(MathPow)                             \
-  V(CallIC)                              \
-  V(CallIC_Array)                        \
-  V(FunctionPrototype)                   \
-  V(RecordWrite)                         \
-  V(StoreBufferOverflow)                 \
-  V(RegExpExec)                          \
-  V(Instanceof)                          \
-  V(ConvertToDouble)                     \
-  V(WriteInt32ToHeapNumber)              \
-  V(StackCheck)                          \
-  V(Interrupt)                           \
-  V(FastNewClosure)                      \
-  V(FastNewContext)                      \
-  V(FastCloneShallowArray)               \
-  V(FastCloneShallowObject)              \
-  V(CreateAllocationSite)                \
-  V(ToBoolean)                           \
-  V(ToNumber)                            \
-  V(ArgumentsAccess)                     \
-  V(RegExpConstructResult)               \
-  V(NumberToString)                      \
-  V(DoubleToI)                           \
-  V(CEntry)                              \
-  V(JSEntry)                             \
-  V(KeyedLoadElement)                    \
-  V(KeyedLoadGeneric)                    \
-  V(ArrayNoArgumentConstructor)          \
-  V(ArraySingleArgumentConstructor)      \
-  V(ArrayNArgumentsConstructor)          \
-  V(InternalArrayNoArgumentConstructor)  \
-  V(InternalArraySingleArgumentConstructor)      \
-  V(InternalArrayNArgumentsConstructor)  \
-  V(KeyedStoreElement)                   \
-  V(DebuggerStatement)                   \
-  V(NameDictionaryLookup)                \
-  V(ElementsTransitionAndStore)          \
-  V(TransitionElementsKind)              \
-  V(StoreArrayLiteralElement)            \
-  V(StubFailureTrampoline)               \
-  V(ArrayConstructor)                    \
-  V(InternalArrayConstructor)            \
-  V(ProfileEntryHook)                    \
-  V(StoreGlobal)                         \
-  V(CallApiFunction)                     \
-  V(CallApiGetter)                       \
-  /* IC Handler stubs */                 \
-  V(LoadField)                           \
-  V(KeyedLoadField)                      \
-  V(StringLength)                        \
-  V(KeyedStringLength)
+#define CODE_STUB_LIST_ALL_PLATFORMS(V)     \
+  /* PlatformCodeStubs */                   \
+  V(ArgumentsAccess)                        \
+  V(ArrayConstructor)                       \
+  V(BinaryOpICWithAllocationSite)           \
+  V(CallApiFunction)                        \
+  V(CallApiGetter)                          \
+  V(CallConstruct)                          \
+  V(CallFunction)                           \
+  V(CallIC)                                 \
+  V(CallIC_Array)                           \
+  V(CEntry)                                 \
+  V(CompareIC)                              \
+  V(DoubleToI)                              \
+  V(FunctionPrototype)                      \
+  V(Instanceof)                             \
+  V(InternalArrayConstructor)               \
+  V(JSEntry)                                \
+  V(KeyedLoadICTrampoline)                  \
+  V(LoadICTrampoline)                       \
+  V(LoadIndexedInterceptor)                 \
+  V(MathPow)                                \
+  V(ProfileEntryHook)                       \
+  V(RecordWrite)                            \
+  V(RegExpExec)                             \
+  V(StoreArrayLiteralElement)               \
+  V(StoreBufferOverflow)                    \
+  V(StoreElement)                           \
+  V(StringCompare)                          \
+  V(StubFailureTrampoline)                  \
+  V(SubString)                              \
+  /* HydrogenCodeStubs */                   \
+  V(ArrayNArgumentsConstructor)             \
+  V(ArrayNoArgumentConstructor)             \
+  V(ArraySingleArgumentConstructor)         \
+  V(BinaryOpIC)                             \
+  V(BinaryOpWithAllocationSite)             \
+  V(CompareNilIC)                           \
+  V(CreateAllocationSite)                   \
+  V(ElementsTransitionAndStore)             \
+  V(FastCloneShallowArray)                  \
+  V(FastCloneShallowObject)                 \
+  V(FastNewClosure)                         \
+  V(FastNewContext)                         \
+  V(InternalArrayNArgumentsConstructor)     \
+  V(InternalArrayNoArgumentConstructor)     \
+  V(InternalArraySingleArgumentConstructor) \
+  V(KeyedLoadGeneric)                       \
+  V(LoadDictionaryElement)                  \
+  V(LoadFastElement)                        \
+  V(MegamorphicLoad)                        \
+  V(NameDictionaryLookup)                   \
+  V(NumberToString)                         \
+  V(RegExpConstructResult)                  \
+  V(StoreFastElement)                       \
+  V(StringAdd)                              \
+  V(ToBoolean)                              \
+  V(ToNumber)                               \
+  V(TransitionElementsKind)                 \
+  V(VectorKeyedLoad)                        \
+  V(VectorLoad)                             \
+  /* IC Handler stubs */                    \
+  V(LoadConstant)                           \
+  V(LoadField)                              \
+  V(KeyedLoadSloppyArguments)               \
+  V(StoreField)                             \
+  V(StoreGlobal)                            \
+  V(StringLength)
 
 // List of code stubs only used on ARM 32 bits platforms.
 #if V8_TARGET_ARCH_ARM
-#define CODE_STUB_LIST_ARM(V)  \
-  V(GetProperty)               \
-  V(SetProperty)               \
-  V(InvokeBuiltin)             \
-  V(DirectCEntry)
+#define CODE_STUB_LIST_ARM(V) \
+  V(DirectCEntry)             \
+  V(WriteInt32ToHeapNumber)
+
 #else
 #define CODE_STUB_LIST_ARM(V)
 #endif
 
 // List of code stubs only used on ARM 64 bits platforms.
 #if V8_TARGET_ARCH_ARM64
-#define CODE_STUB_LIST_ARM64(V)  \
-  V(GetProperty)               \
-  V(SetProperty)               \
-  V(InvokeBuiltin)             \
-  V(DirectCEntry)              \
-  V(StoreRegistersState)       \
-  V(RestoreRegistersState)
+#define CODE_STUB_LIST_ARM64(V) \
+  V(DirectCEntry)               \
+  V(RestoreRegistersState)      \
+  V(StoreRegistersState)
+
 #else
 #define CODE_STUB_LIST_ARM64(V)
 #endif
@@ -106,10 +111,16 @@
 // List of code stubs only used on MIPS platforms.
 #if V8_TARGET_ARCH_MIPS
 #define CODE_STUB_LIST_MIPS(V)  \
-  V(RegExpCEntry)               \
   V(DirectCEntry)               \
+  V(RestoreRegistersState)      \
   V(StoreRegistersState)        \
-  V(RestoreRegistersState)
+  V(WriteInt32ToHeapNumber)
+#elif V8_TARGET_ARCH_MIPS64
+#define CODE_STUB_LIST_MIPS(V)  \
+  V(DirectCEntry)               \
+  V(RestoreRegistersState)      \
+  V(StoreRegistersState)        \
+  V(WriteInt32ToHeapNumber)
 #else
 #define CODE_STUB_LIST_MIPS(V)
 #endif
@@ -125,7 +136,6 @@
 class CodeStub BASE_EMBEDDED {
  public:
   enum Major {
-    UninitializedMajorKey = 0,
 #define DEF_ENUM(name) name,
     CODE_STUB_LIST(DEF_ENUM)
 #undef DEF_ENUM
@@ -142,18 +152,20 @@
   static Major MajorKeyFromKey(uint32_t key) {
     return static_cast<Major>(MajorKeyBits::decode(key));
   }
-  static int MinorKeyFromKey(uint32_t key) {
+  static uint32_t MinorKeyFromKey(uint32_t key) {
     return MinorKeyBits::decode(key);
   }
 
   // Gets the major key from a code object that is a code stub or binary op IC.
   static Major GetMajorKey(Code* code_stub) {
-    return static_cast<Major>(code_stub->major_key());
+    return MajorKeyFromKey(code_stub->stub_key());
   }
 
+  static uint32_t NoCacheKey() { return MajorKeyBits::encode(NoCache); }
+
   static const char* MajorName(Major major_key, bool allow_unknown_keys);
 
-  explicit CodeStub(Isolate* isolate) : isolate_(isolate) { }
+  explicit CodeStub(Isolate* isolate) : minor_key_(0), isolate_(isolate) {}
   virtual ~CodeStub() {}
 
   static void GenerateStubsAheadOfTime(Isolate* isolate);
@@ -170,28 +182,36 @@
   // Lookup the code in the (possibly custom) cache.
   bool FindCodeInCache(Code** code_out);
 
-  // Returns information for computing the number key.
-  virtual Major MajorKey() = 0;
-  virtual int MinorKey() = 0;
+  virtual CallInterfaceDescriptor GetCallInterfaceDescriptor() = 0;
 
-  virtual InlineCacheState GetICState() {
-    return UNINITIALIZED;
-  }
-  virtual ExtraICState GetExtraICState() {
-    return kNoExtraICState;
-  }
+  virtual void InitializeDescriptor(CodeStubDescriptor* descriptor) {}
+
+  static void InitializeDescriptor(Isolate* isolate, uint32_t key,
+                                   CodeStubDescriptor* desc);
+
+  static MaybeHandle<Code> GetCode(Isolate* isolate, uint32_t key);
+
+  // Returns information for computing the number key.
+  virtual Major MajorKey() const = 0;
+  uint32_t MinorKey() const { return minor_key_; }
+
+  virtual InlineCacheState GetICState() const { return UNINITIALIZED; }
+  virtual ExtraICState GetExtraICState() const { return kNoExtraICState; }
   virtual Code::StubType GetStubType() {
     return Code::NORMAL;
   }
 
-  virtual void PrintName(StringStream* stream);
-
-  // Returns a name for logging/debugging purposes.
-  SmartArrayPointer<const char> GetName();
+  friend OStream& operator<<(OStream& os, const CodeStub& s) {
+    s.PrintName(os);
+    return os;
+  }
 
   Isolate* isolate() const { return isolate_; }
 
  protected:
+  CodeStub(uint32_t key, Isolate* isolate)
+      : minor_key_(MinorKeyFromKey(key)), isolate_(isolate) {}
+
   // Generates the assembler code for the stub.
   virtual Handle<Code> GenerateCode() = 0;
 
@@ -199,8 +219,17 @@
   // a fixed (non-moveable) code object.
   virtual bool NeedsImmovableCode() { return false; }
 
-  virtual void PrintBaseName(StringStream* stream);
-  virtual void PrintState(StringStream* stream) { }
+  virtual void PrintName(OStream& os) const;        // NOLINT
+  virtual void PrintBaseName(OStream& os) const;    // NOLINT
+  virtual void PrintState(OStream& os) const { ; }  // NOLINT
+
+  // Computes the key based on major and minor.
+  uint32_t GetKey() {
+    DCHECK(static_cast<int>(MajorKey()) < NUMBER_OF_IDS);
+    return MinorKeyBits::encode(MinorKey()) | MajorKeyBits::encode(MajorKey());
+  }
+
+  uint32_t minor_key_;
 
  private:
   // Perform bookkeeping required after code generation when stub code is
@@ -230,12 +259,13 @@
   // If a stub uses a special cache override this.
   virtual bool UseSpecialCache() { return false; }
 
-  // Computes the key based on major and minor.
-  uint32_t GetKey() {
-    ASSERT(static_cast<int>(MajorKey()) < NUMBER_OF_IDS);
-    return MinorKeyBits::encode(MinorKey()) |
-           MajorKeyBits::encode(MajorKey());
-  }
+  // We use this dispatch to statically instantiate the correct code stub for
+  // the given stub key and call the passed function with that code stub.
+  typedef void (*DispatchedCall)(CodeStub* stub, void** value_out);
+  static void Dispatch(Isolate* isolate, uint32_t key, void** value_out,
+                       DispatchedCall call);
+
+  static void GetCodeDispatchCall(CodeStub* stub, void** value_out);
 
   STATIC_ASSERT(NUMBER_OF_IDS < (1 << kStubMajorKeyBits));
   class MajorKeyBits: public BitField<uint32_t, 0, kStubMajorKeyBits> {};
@@ -248,123 +278,154 @@
 };
 
 
+#define DEFINE_CODE_STUB_BASE(NAME, SUPER)                      \
+ public:                                                        \
+  NAME(uint32_t key, Isolate* isolate) : SUPER(key, isolate) {} \
+                                                                \
+ private:                                                       \
+  DISALLOW_COPY_AND_ASSIGN(NAME)
+
+
+#define DEFINE_CODE_STUB(NAME, SUPER)              \
+ protected:                                        \
+  virtual inline Major MajorKey() const OVERRIDE { \
+    return NAME;                                   \
+  };                                               \
+  DEFINE_CODE_STUB_BASE(NAME##Stub, SUPER)
+
+
+#define DEFINE_PLATFORM_CODE_STUB(NAME, SUPER)          \
+ private:                                               \
+  virtual void Generate(MacroAssembler* masm) OVERRIDE; \
+  DEFINE_CODE_STUB(NAME, SUPER)
+
+
+#define DEFINE_HYDROGEN_CODE_STUB(NAME, SUPER)                                \
+ public:                                                                      \
+  virtual void InitializeDescriptor(CodeStubDescriptor* descriptor) OVERRIDE; \
+  virtual Handle<Code> GenerateCode() OVERRIDE;                               \
+  DEFINE_CODE_STUB(NAME, SUPER)
+
+#define DEFINE_HANDLER_CODE_STUB(NAME, SUPER)   \
+ public:                                        \
+  virtual Handle<Code> GenerateCode() OVERRIDE; \
+  DEFINE_CODE_STUB(NAME, SUPER)
+
+#define DEFINE_CALL_INTERFACE_DESCRIPTOR(NAME)                            \
+ public:                                                                  \
+  virtual CallInterfaceDescriptor GetCallInterfaceDescriptor() OVERRIDE { \
+    return NAME##Descriptor(isolate());                                   \
+  }
+
+// There are some code stubs we just can't describe right now with a
+// CallInterfaceDescriptor. Isolate behavior for those cases with this macro.
+// An attempt to retrieve a descriptor will fail.
+#define DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR()                           \
+ public:                                                                  \
+  virtual CallInterfaceDescriptor GetCallInterfaceDescriptor() OVERRIDE { \
+    UNREACHABLE();                                                        \
+    return CallInterfaceDescriptor();                                     \
+  }
+
+
 class PlatformCodeStub : public CodeStub {
  public:
-  explicit PlatformCodeStub(Isolate* isolate) : CodeStub(isolate) { }
-
   // Retrieve the code for the stub. Generate the code if needed.
-  virtual Handle<Code> GenerateCode() V8_OVERRIDE;
+  virtual Handle<Code> GenerateCode() OVERRIDE;
 
   virtual Code::Kind GetCodeKind() const { return Code::STUB; }
 
  protected:
+  explicit PlatformCodeStub(Isolate* isolate) : CodeStub(isolate) {}
+
   // Generates the assembler code for the stub.
   virtual void Generate(MacroAssembler* masm) = 0;
+
+  DEFINE_CODE_STUB_BASE(PlatformCodeStub, CodeStub);
 };
 
 
 enum StubFunctionMode { NOT_JS_FUNCTION_STUB_MODE, JS_FUNCTION_STUB_MODE };
 enum HandlerArgumentsMode { DONT_PASS_ARGUMENTS, PASS_ARGUMENTS };
 
-struct CodeStubInterfaceDescriptor {
-  CodeStubInterfaceDescriptor();
-  int register_param_count_;
 
-  Register stack_parameter_count_;
-  // if hint_stack_parameter_count_ > 0, the code stub can optimize the
-  // return sequence. Default value is -1, which means it is ignored.
-  int hint_stack_parameter_count_;
-  StubFunctionMode function_mode_;
-  Register* register_params_;
-  // Specifies Representations for the stub's parameter. Points to an array of
-  // Representations of the same length of the numbers of parameters to the
-  // stub, or if NULL (the default value), Representation of each parameter
-  // assumed to be Tagged()
-  Representation* register_param_representations_;
+class CodeStubDescriptor {
+ public:
+  explicit CodeStubDescriptor(CodeStub* stub);
 
-  Address deoptimization_handler_;
-  HandlerArgumentsMode handler_arguments_mode_;
+  CodeStubDescriptor(Isolate* isolate, uint32_t stub_key);
 
-  bool initialized() const { return register_param_count_ >= 0; }
-
-  int environment_length() const {
-    return register_param_count_;
-  }
+  void Initialize(Address deoptimization_handler = NULL,
+                  int hint_stack_parameter_count = -1,
+                  StubFunctionMode function_mode = NOT_JS_FUNCTION_STUB_MODE);
+  void Initialize(Register stack_parameter_count,
+                  Address deoptimization_handler = NULL,
+                  int hint_stack_parameter_count = -1,
+                  StubFunctionMode function_mode = NOT_JS_FUNCTION_STUB_MODE,
+                  HandlerArgumentsMode handler_mode = DONT_PASS_ARGUMENTS);
 
   void SetMissHandler(ExternalReference handler) {
     miss_handler_ = handler;
     has_miss_handler_ = true;
     // Our miss handler infrastructure doesn't currently support
     // variable stack parameter counts.
-    ASSERT(!stack_parameter_count_.is_valid());
+    DCHECK(!stack_parameter_count_.is_valid());
   }
 
-  ExternalReference miss_handler() {
-    ASSERT(has_miss_handler_);
+  void set_call_descriptor(CallInterfaceDescriptor d) { call_descriptor_ = d; }
+  CallInterfaceDescriptor call_descriptor() const { return call_descriptor_; }
+
+  int GetEnvironmentParameterCount() const {
+    return call_descriptor().GetEnvironmentParameterCount();
+  }
+
+  Representation GetEnvironmentParameterRepresentation(int index) const {
+    return call_descriptor().GetEnvironmentParameterRepresentation(index);
+  }
+
+  ExternalReference miss_handler() const {
+    DCHECK(has_miss_handler_);
     return miss_handler_;
   }
 
-  bool has_miss_handler() {
+  bool has_miss_handler() const {
     return has_miss_handler_;
   }
 
-  Register GetParameterRegister(int index) const {
-    return register_params_[index];
+  bool IsEnvironmentParameterCountRegister(int index) const {
+    return call_descriptor().GetEnvironmentParameterRegister(index).is(
+        stack_parameter_count_);
   }
 
-  bool IsParameterCountRegister(int index) {
-    return GetParameterRegister(index).is(stack_parameter_count_);
-  }
-
-  int GetHandlerParameterCount() {
-    int params = environment_length();
+  int GetHandlerParameterCount() const {
+    int params = call_descriptor().GetEnvironmentParameterCount();
     if (handler_arguments_mode_ == PASS_ARGUMENTS) {
       params += 1;
     }
     return params;
   }
 
+  int hint_stack_parameter_count() const { return hint_stack_parameter_count_; }
+  Register stack_parameter_count() const { return stack_parameter_count_; }
+  StubFunctionMode function_mode() const { return function_mode_; }
+  Address deoptimization_handler() const { return deoptimization_handler_; }
+
  private:
+  CallInterfaceDescriptor call_descriptor_;
+  Register stack_parameter_count_;
+  // If hint_stack_parameter_count_ > 0, the code stub can optimize the
+  // return sequence. Default value is -1, which means it is ignored.
+  int hint_stack_parameter_count_;
+  StubFunctionMode function_mode_;
+
+  Address deoptimization_handler_;
+  HandlerArgumentsMode handler_arguments_mode_;
+
   ExternalReference miss_handler_;
   bool has_miss_handler_;
 };
 
 
-struct PlatformCallInterfaceDescriptor;
-
-
-struct CallInterfaceDescriptor {
-  CallInterfaceDescriptor()
-      : register_param_count_(-1),
-        register_params_(NULL),
-        param_representations_(NULL),
-        platform_specific_descriptor_(NULL) { }
-
-  bool initialized() const { return register_param_count_ >= 0; }
-
-  int environment_length() const {
-    return register_param_count_;
-  }
-
-  Representation GetParameterRepresentation(int index) const {
-    return param_representations_[index];
-  }
-
-  Register GetParameterRegister(int index) const {
-    return register_params_[index];
-  }
-
-  PlatformCallInterfaceDescriptor* platform_specific_descriptor() const {
-    return platform_specific_descriptor_;
-  }
-
-  int register_param_count_;
-  Register* register_params_;
-  Representation* param_representations_;
-  PlatformCallInterfaceDescriptor* platform_specific_descriptor_;
-};
-
-
 class HydrogenCodeStub : public CodeStub {
  public:
   enum InitializationState {
@@ -372,49 +433,46 @@
     INITIALIZED
   };
 
-  HydrogenCodeStub(Isolate* isolate, InitializationState state = INITIALIZED)
-      : CodeStub(isolate) {
-    is_uninitialized_ = (state == UNINITIALIZED);
-  }
-
   virtual Code::Kind GetCodeKind() const { return Code::STUB; }
 
-  CodeStubInterfaceDescriptor* GetInterfaceDescriptor() {
-    return isolate()->code_stub_interface_descriptor(MajorKey());
-  }
-
-  bool IsUninitialized() { return is_uninitialized_; }
-
   template<class SubClass>
   static Handle<Code> GetUninitialized(Isolate* isolate) {
     SubClass::GenerateAheadOfTime(isolate);
     return SubClass().GetCode(isolate);
   }
 
-  virtual void InitializeInterfaceDescriptor(
-      CodeStubInterfaceDescriptor* descriptor) = 0;
-
   // Retrieve the code for the stub. Generate the code if needed.
   virtual Handle<Code> GenerateCode() = 0;
 
-  virtual int NotMissMinorKey() = 0;
+  bool IsUninitialized() const { return IsMissBits::decode(minor_key_); }
 
-  Handle<Code> GenerateLightweightMissCode();
+  Handle<Code> GenerateLightweightMissCode(ExternalReference miss);
 
   template<class StateType>
   void TraceTransition(StateType from, StateType to);
 
- private:
-  class MinorKeyBits: public BitField<int, 0, kStubMinorKeyBits - 1> {};
-  class IsMissBits: public BitField<bool, kStubMinorKeyBits - 1, 1> {};
-
-  void GenerateLightweightMiss(MacroAssembler* masm);
-  virtual int MinorKey() {
-    return IsMissBits::encode(is_uninitialized_) |
-        MinorKeyBits::encode(NotMissMinorKey());
+ protected:
+  explicit HydrogenCodeStub(Isolate* isolate,
+                            InitializationState state = INITIALIZED)
+      : CodeStub(isolate) {
+    minor_key_ = IsMissBits::encode(state == UNINITIALIZED);
   }
 
-  bool is_uninitialized_;
+  void set_sub_minor_key(uint32_t key) {
+    minor_key_ = SubMinorKeyBits::update(minor_key_, key);
+  }
+
+  uint32_t sub_minor_key() const { return SubMinorKeyBits::decode(minor_key_); }
+
+  static const int kSubMinorKeyBits = kStubMinorKeyBits - 1;
+
+ private:
+  class IsMissBits : public BitField<bool, kSubMinorKeyBits, 1> {};
+  class SubMinorKeyBits : public BitField<int, 0, kSubMinorKeyBits> {};
+
+  void GenerateLightweightMiss(MacroAssembler* masm, ExternalReference miss);
+
+  DEFINE_CODE_STUB_BASE(HydrogenCodeStub, CodeStub);
 };
 
 
@@ -447,6 +505,8 @@
 #include "src/arm/code-stubs-arm.h"
 #elif V8_TARGET_ARCH_MIPS
 #include "src/mips/code-stubs-mips.h"
+#elif V8_TARGET_ARCH_MIPS64
+#include "src/mips64/code-stubs-mips64.h"
 #elif V8_TARGET_ARCH_X87
 #include "src/x87/code-stubs-x87.h"
 #else
@@ -484,103 +544,72 @@
  public:
   explicit ToNumberStub(Isolate* isolate) : HydrogenCodeStub(isolate) { }
 
-  virtual Handle<Code> GenerateCode() V8_OVERRIDE;
-
-  virtual void InitializeInterfaceDescriptor(
-      CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
-
-  static void InstallDescriptors(Isolate* isolate) {
-    ToNumberStub stub(isolate);
-    stub.InitializeInterfaceDescriptor(
-        isolate->code_stub_interface_descriptor(CodeStub::ToNumber));
-  }
-
- private:
-  Major MajorKey() { return ToNumber; }
-  int NotMissMinorKey() { return 0; }
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(ToNumber);
+  DEFINE_HYDROGEN_CODE_STUB(ToNumber, HydrogenCodeStub);
 };
 
 
-class NumberToStringStub V8_FINAL : public HydrogenCodeStub {
+class NumberToStringStub FINAL : public HydrogenCodeStub {
  public:
   explicit NumberToStringStub(Isolate* isolate) : HydrogenCodeStub(isolate) {}
 
-  virtual Handle<Code> GenerateCode() V8_OVERRIDE;
-
-  virtual void InitializeInterfaceDescriptor(
-      CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
-
-  static void InstallDescriptors(Isolate* isolate);
-
   // Parameters accessed via CodeStubGraphBuilder::GetParameter()
   static const int kNumber = 0;
 
- private:
-  virtual Major MajorKey() V8_OVERRIDE { return NumberToString; }
-  virtual int NotMissMinorKey() V8_OVERRIDE { return 0; }
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(NumberToString);
+  DEFINE_HYDROGEN_CODE_STUB(NumberToString, HydrogenCodeStub);
 };
 
 
 class FastNewClosureStub : public HydrogenCodeStub {
  public:
-  FastNewClosureStub(Isolate* isolate,
-                     StrictMode strict_mode,
-                     bool is_generator)
-      : HydrogenCodeStub(isolate),
-        strict_mode_(strict_mode),
-        is_generator_(is_generator) { }
-
-  virtual Handle<Code> GenerateCode() V8_OVERRIDE;
-
-  virtual void InitializeInterfaceDescriptor(
-      CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
-
-  static void InstallDescriptors(Isolate* isolate);
-
-  StrictMode strict_mode() const { return strict_mode_; }
-  bool is_generator() const { return is_generator_; }
-
- private:
-  class StrictModeBits: public BitField<bool, 0, 1> {};
-  class IsGeneratorBits: public BitField<bool, 1, 1> {};
-
-  Major MajorKey() { return FastNewClosure; }
-  int NotMissMinorKey() {
-    return StrictModeBits::encode(strict_mode_ == STRICT) |
-      IsGeneratorBits::encode(is_generator_);
+  FastNewClosureStub(Isolate* isolate, StrictMode strict_mode,
+                     FunctionKind kind)
+      : HydrogenCodeStub(isolate) {
+    DCHECK(IsValidFunctionKind(kind));
+    set_sub_minor_key(StrictModeBits::encode(strict_mode) |
+                      FunctionKindBits::encode(kind));
   }
 
-  StrictMode strict_mode_;
-  bool is_generator_;
+  StrictMode strict_mode() const {
+    return StrictModeBits::decode(sub_minor_key());
+  }
+
+  FunctionKind kind() const {
+    return FunctionKindBits::decode(sub_minor_key());
+  }
+  bool is_arrow() const { return IsArrowFunction(kind()); }
+  bool is_generator() const { return IsGeneratorFunction(kind()); }
+  bool is_concise_method() const { return IsConciseMethod(kind()); }
+
+ private:
+  class StrictModeBits : public BitField<StrictMode, 0, 1> {};
+  class FunctionKindBits : public BitField<FunctionKind, 1, 3> {};
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(FastNewClosure);
+  DEFINE_HYDROGEN_CODE_STUB(FastNewClosure, HydrogenCodeStub);
 };
 
 
-class FastNewContextStub V8_FINAL : public HydrogenCodeStub {
+class FastNewContextStub FINAL : public HydrogenCodeStub {
  public:
   static const int kMaximumSlots = 64;
 
-  FastNewContextStub(Isolate* isolate, int slots)
-      : HydrogenCodeStub(isolate), slots_(slots) {
-    ASSERT(slots_ > 0 && slots_ <= kMaximumSlots);
+  FastNewContextStub(Isolate* isolate, int slots) : HydrogenCodeStub(isolate) {
+    DCHECK(slots > 0 && slots <= kMaximumSlots);
+    set_sub_minor_key(SlotsBits::encode(slots));
   }
 
-  virtual Handle<Code> GenerateCode() V8_OVERRIDE;
-
-  virtual void InitializeInterfaceDescriptor(
-      CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
-
-  static void InstallDescriptors(Isolate* isolate);
-
-  int slots() const { return slots_; }
-
-  virtual Major MajorKey() V8_OVERRIDE { return FastNewContext; }
-  virtual int NotMissMinorKey() V8_OVERRIDE { return slots_; }
+  int slots() const { return SlotsBits::decode(sub_minor_key()); }
 
   // Parameters accessed via CodeStubGraphBuilder::GetParameter()
   static const int kFunction = 0;
 
  private:
-  int slots_;
+  class SlotsBits : public BitField<int, 0, 8> {};
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(FastNewContext);
+  DEFINE_HYDROGEN_CODE_STUB(FastNewContext, HydrogenCodeStub);
 };
 
 
@@ -588,29 +617,19 @@
  public:
   FastCloneShallowArrayStub(Isolate* isolate,
                             AllocationSiteMode allocation_site_mode)
-      : HydrogenCodeStub(isolate),
-      allocation_site_mode_(allocation_site_mode) {}
+      : HydrogenCodeStub(isolate) {
+    set_sub_minor_key(AllocationSiteModeBits::encode(allocation_site_mode));
+  }
 
   AllocationSiteMode allocation_site_mode() const {
-    return allocation_site_mode_;
+    return AllocationSiteModeBits::decode(sub_minor_key());
   }
 
-  virtual Handle<Code> GenerateCode();
-
-  virtual void InitializeInterfaceDescriptor(
-      CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
-
-  static void InstallDescriptors(Isolate* isolate);
-
  private:
-  AllocationSiteMode allocation_site_mode_;
-
   class AllocationSiteModeBits: public BitField<AllocationSiteMode, 0, 1> {};
-  // Ensure data fits within available bits.
-  Major MajorKey() { return FastCloneShallowArray; }
-  int NotMissMinorKey() {
-    return AllocationSiteModeBits::encode(allocation_site_mode_);
-  }
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(FastCloneShallowArray);
+  DEFINE_HYDROGEN_CODE_STUB(FastCloneShallowArray, HydrogenCodeStub);
 };
 
 
@@ -620,25 +639,19 @@
   static const int kMaximumClonedProperties = 6;
 
   FastCloneShallowObjectStub(Isolate* isolate, int length)
-      : HydrogenCodeStub(isolate), length_(length) {
-    ASSERT_GE(length_, 0);
-    ASSERT_LE(length_, kMaximumClonedProperties);
+      : HydrogenCodeStub(isolate) {
+    DCHECK_GE(length, 0);
+    DCHECK_LE(length, kMaximumClonedProperties);
+    set_sub_minor_key(LengthBits::encode(length));
   }
 
-  int length() const { return length_; }
-
-  virtual Handle<Code> GenerateCode() V8_OVERRIDE;
-
-  virtual void InitializeInterfaceDescriptor(
-      CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
+  int length() const { return LengthBits::decode(sub_minor_key()); }
 
  private:
-  int length_;
+  class LengthBits : public BitField<int, 0, 4> {};
 
-  Major MajorKey() { return FastCloneShallowObject; }
-  int NotMissMinorKey() { return length_; }
-
-  DISALLOW_COPY_AND_ASSIGN(FastCloneShallowObjectStub);
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(FastCloneShallowObject);
+  DEFINE_HYDROGEN_CODE_STUB(FastCloneShallowObject, HydrogenCodeStub);
 };
 
 
@@ -647,18 +660,10 @@
   explicit CreateAllocationSiteStub(Isolate* isolate)
       : HydrogenCodeStub(isolate) { }
 
-  virtual Handle<Code> GenerateCode() V8_OVERRIDE;
-
   static void GenerateAheadOfTime(Isolate* isolate);
 
-  virtual void InitializeInterfaceDescriptor(
-      CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
-
- private:
-  Major MajorKey() { return CreateAllocationSite; }
-  int NotMissMinorKey() { return 0; }
-
-  DISALLOW_COPY_AND_ASSIGN(CreateAllocationSiteStub);
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(CreateAllocationSite);
+  DEFINE_HYDROGEN_CODE_STUB(CreateAllocationSite, HydrogenCodeStub);
 };
 
 
@@ -671,33 +676,38 @@
     kReturnTrueFalseObject = 1 << 2
   };
 
-  InstanceofStub(Isolate* isolate, Flags flags)
-      : PlatformCodeStub(isolate), flags_(flags) { }
-
-  static Register left();
-  static Register right();
-
-  void Generate(MacroAssembler* masm);
-
- private:
-  Major MajorKey() { return Instanceof; }
-  int MinorKey() { return static_cast<int>(flags_); }
-
-  bool HasArgsInRegisters() const {
-    return (flags_ & kArgsInRegisters) != 0;
+  InstanceofStub(Isolate* isolate, Flags flags) : PlatformCodeStub(isolate) {
+    minor_key_ = FlagBits::encode(flags);
   }
 
+  static Register left() { return InstanceofDescriptor::left(); }
+  static Register right() { return InstanceofDescriptor::right(); }
+
+  virtual CallInterfaceDescriptor GetCallInterfaceDescriptor() OVERRIDE {
+    if (HasArgsInRegisters()) {
+      return InstanceofDescriptor(isolate());
+    }
+    return ContextOnlyDescriptor(isolate());
+  }
+
+ private:
+  Flags flags() const { return FlagBits::decode(minor_key_); }
+
+  bool HasArgsInRegisters() const { return (flags() & kArgsInRegisters) != 0; }
+
   bool HasCallSiteInlineCheck() const {
-    return (flags_ & kCallSiteInlineCheck) != 0;
+    return (flags() & kCallSiteInlineCheck) != 0;
   }
 
   bool ReturnTrueFalseObject() const {
-    return (flags_ & kReturnTrueFalseObject) != 0;
+    return (flags() & kReturnTrueFalseObject) != 0;
   }
 
-  virtual void PrintName(StringStream* stream);
+  virtual void PrintName(OStream& os) const OVERRIDE;  // NOLINT
 
-  Flags flags_;
+  class FlagBits : public BitField<Flags, 0, 3> {};
+
+  DEFINE_PLATFORM_CODE_STUB(Instanceof, PlatformCodeStub);
 };
 
 
@@ -711,20 +721,25 @@
 class ArrayConstructorStub: public PlatformCodeStub {
  public:
   enum ArgumentCountKey { ANY, NONE, ONE, MORE_THAN_ONE };
+
   ArrayConstructorStub(Isolate* isolate, int argument_count);
+
   explicit ArrayConstructorStub(Isolate* isolate);
 
-  void Generate(MacroAssembler* masm);
-
  private:
+  ArgumentCountKey argument_count() const {
+    return ArgumentCountBits::decode(minor_key_);
+  }
+
   void GenerateDispatchToArrayStub(MacroAssembler* masm,
                                    AllocationSiteOverrideMode mode);
-  virtual void PrintName(StringStream* stream);
 
-  virtual CodeStub::Major MajorKey() { return ArrayConstructor; }
-  virtual int MinorKey() { return argument_count_; }
+  virtual void PrintName(OStream& os) const OVERRIDE;  // NOLINT
 
-  ArgumentCountKey argument_count_;
+  class ArgumentCountBits : public BitField<ArgumentCountKey, 0, 2> {};
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(ArrayConstructor);
+  DEFINE_PLATFORM_CODE_STUB(ArrayConstructor, PlatformCodeStub);
 };
 
 
@@ -732,13 +747,11 @@
  public:
   explicit InternalArrayConstructorStub(Isolate* isolate);
 
-  void Generate(MacroAssembler* masm);
-
  private:
-  virtual CodeStub::Major MajorKey() { return InternalArrayConstructor; }
-  virtual int MinorKey() { return 0; }
-
   void GenerateCase(MacroAssembler* masm, ElementsKind kind);
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(InternalArrayConstructor);
+  DEFINE_PLATFORM_CODE_STUB(InternalArrayConstructor, PlatformCodeStub);
 };
 
 
@@ -747,236 +760,239 @@
   enum ExponentType { INTEGER, DOUBLE, TAGGED, ON_STACK };
 
   MathPowStub(Isolate* isolate, ExponentType exponent_type)
-      : PlatformCodeStub(isolate), exponent_type_(exponent_type) { }
-  virtual void Generate(MacroAssembler* masm);
-
- private:
-  virtual CodeStub::Major MajorKey() { return MathPow; }
-  virtual int MinorKey() { return exponent_type_; }
-
-  ExponentType exponent_type_;
-};
-
-
-class ICStub: public PlatformCodeStub {
- public:
-  ICStub(Isolate* isolate, Code::Kind kind)
-      : PlatformCodeStub(isolate), kind_(kind) { }
-  virtual Code::Kind GetCodeKind() const { return kind_; }
-  virtual InlineCacheState GetICState() { return MONOMORPHIC; }
-
-  bool Describes(Code* code) {
-    return GetMajorKey(code) == MajorKey() && code->stub_info() == MinorKey();
+      : PlatformCodeStub(isolate) {
+    minor_key_ = ExponentTypeBits::encode(exponent_type);
   }
 
- protected:
-  class KindBits: public BitField<Code::Kind, 0, 4> {};
-  virtual void FinishCode(Handle<Code> code) {
-    code->set_stub_info(MinorKey());
-  }
-  Code::Kind kind() { return kind_; }
-
-  virtual int MinorKey() {
-    return KindBits::encode(kind_);
+  virtual CallInterfaceDescriptor GetCallInterfaceDescriptor() OVERRIDE {
+    if (exponent_type() == TAGGED) {
+      return MathPowTaggedDescriptor(isolate());
+    } else if (exponent_type() == INTEGER) {
+      return MathPowIntegerDescriptor(isolate());
+    }
+    // A CallInterfaceDescriptor doesn't specify double registers (yet).
+    return ContextOnlyDescriptor(isolate());
   }
 
  private:
-  Code::Kind kind_;
+  ExponentType exponent_type() const {
+    return ExponentTypeBits::decode(minor_key_);
+  }
+
+  class ExponentTypeBits : public BitField<ExponentType, 0, 2> {};
+
+  DEFINE_PLATFORM_CODE_STUB(MathPow, PlatformCodeStub);
 };
 
 
 class CallICStub: public PlatformCodeStub {
  public:
-  CallICStub(Isolate* isolate, const CallIC::State& state)
-      : PlatformCodeStub(isolate), state_(state) {}
-
-  bool CallAsMethod() const { return state_.CallAsMethod(); }
-
-  int arg_count() const { return state_.arg_count(); }
+  CallICStub(Isolate* isolate, const CallICState& state)
+      : PlatformCodeStub(isolate) {
+    minor_key_ = state.GetExtraICState();
+  }
 
   static int ExtractArgcFromMinorKey(int minor_key) {
-    CallIC::State state((ExtraICState) minor_key);
+    CallICState state(static_cast<ExtraICState>(minor_key));
     return state.arg_count();
   }
 
-  virtual void Generate(MacroAssembler* masm);
+  virtual Code::Kind GetCodeKind() const OVERRIDE { return Code::CALL_IC; }
 
-  virtual Code::Kind GetCodeKind() const V8_OVERRIDE {
-    return Code::CALL_IC;
-  }
+  virtual InlineCacheState GetICState() const OVERRIDE { return DEFAULT; }
 
-  virtual InlineCacheState GetICState() V8_FINAL V8_OVERRIDE {
-    return state_.GetICState();
-  }
-
-  virtual ExtraICState GetExtraICState() V8_FINAL V8_OVERRIDE {
-    return state_.GetExtraICState();
+  virtual ExtraICState GetExtraICState() const FINAL OVERRIDE {
+    return static_cast<ExtraICState>(minor_key_);
   }
 
  protected:
-  virtual int MinorKey() { return GetExtraICState(); }
-  virtual void PrintState(StringStream* stream) V8_OVERRIDE;
+  bool CallAsMethod() const {
+    return state().call_type() == CallICState::METHOD;
+  }
 
-  virtual CodeStub::Major MajorKey() { return CallIC; }
+  int arg_count() const { return state().arg_count(); }
+
+  CallICState state() const {
+    return CallICState(static_cast<ExtraICState>(minor_key_));
+  }
 
   // Code generation helpers.
-  void GenerateMiss(MacroAssembler* masm, IC::UtilityId id);
+  void GenerateMiss(MacroAssembler* masm);
 
-  const CallIC::State state_;
+ private:
+  virtual void PrintState(OStream& os) const OVERRIDE;  // NOLINT
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(CallFunctionWithFeedback);
+  DEFINE_PLATFORM_CODE_STUB(CallIC, PlatformCodeStub);
 };
 
 
 class CallIC_ArrayStub: public CallICStub {
  public:
-  CallIC_ArrayStub(Isolate* isolate, const CallIC::State& state_in)
+  CallIC_ArrayStub(Isolate* isolate, const CallICState& state_in)
       : CallICStub(isolate, state_in) {}
 
-  virtual void Generate(MacroAssembler* masm);
-
- protected:
-  virtual void PrintState(StringStream* stream) V8_OVERRIDE;
-
-  virtual CodeStub::Major MajorKey() { return CallIC_Array; }
-};
-
-
-class FunctionPrototypeStub: public ICStub {
- public:
-  FunctionPrototypeStub(Isolate* isolate, Code::Kind kind)
-      : ICStub(isolate, kind) { }
-  virtual void Generate(MacroAssembler* masm);
-
- private:
-  virtual CodeStub::Major MajorKey() { return FunctionPrototype; }
-};
-
-
-class StoreICStub: public ICStub {
- public:
-  StoreICStub(Isolate* isolate, Code::Kind kind, StrictMode strict_mode)
-      : ICStub(isolate, kind), strict_mode_(strict_mode) { }
-
- protected:
-  virtual ExtraICState GetExtraICState() {
-    return StoreIC::ComputeExtraICState(strict_mode_);
+  virtual InlineCacheState GetICState() const FINAL OVERRIDE {
+    return MONOMORPHIC;
   }
 
  private:
-  STATIC_ASSERT(KindBits::kSize == 4);
-  class StrictModeBits: public BitField<bool, 4, 1> {};
-  virtual int MinorKey() {
-    return KindBits::encode(kind()) | StrictModeBits::encode(strict_mode_);
-  }
+  virtual void PrintState(OStream& os) const OVERRIDE;  // NOLINT
 
-  StrictMode strict_mode_;
+  DEFINE_PLATFORM_CODE_STUB(CallIC_Array, CallICStub);
 };
 
 
-class HICStub: public HydrogenCodeStub {
+// TODO(verwaest): Translate to hydrogen code stub.
+class FunctionPrototypeStub : public PlatformCodeStub {
  public:
-  explicit HICStub(Isolate* isolate) : HydrogenCodeStub(isolate) { }
-  virtual Code::Kind GetCodeKind() const { return kind(); }
-  virtual InlineCacheState GetICState() { return MONOMORPHIC; }
+  explicit FunctionPrototypeStub(Isolate* isolate)
+      : PlatformCodeStub(isolate) {}
 
- protected:
-  class KindBits: public BitField<Code::Kind, 0, 4> {};
-  virtual Code::Kind kind() const = 0;
+  virtual Code::Kind GetCodeKind() const { return Code::HANDLER; }
+
+  // TODO(mvstanton): only the receiver register is accessed. When this is
+  // translated to a hydrogen code stub, a new CallInterfaceDescriptor
+  // should be created that just uses that register for more efficient code.
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(Load);
+  DEFINE_PLATFORM_CODE_STUB(FunctionPrototype, PlatformCodeStub);
 };
 
 
-class HandlerStub: public HICStub {
+// TODO(mvstanton): Translate to hydrogen code stub.
+class LoadIndexedInterceptorStub : public PlatformCodeStub {
+ public:
+  explicit LoadIndexedInterceptorStub(Isolate* isolate)
+      : PlatformCodeStub(isolate) {}
+
+  virtual Code::Kind GetCodeKind() const { return Code::HANDLER; }
+  virtual Code::StubType GetStubType() { return Code::FAST; }
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(Load);
+  DEFINE_PLATFORM_CODE_STUB(LoadIndexedInterceptor, PlatformCodeStub);
+};
+
+
+class HandlerStub : public HydrogenCodeStub {
  public:
   virtual Code::Kind GetCodeKind() const { return Code::HANDLER; }
-  virtual ExtraICState GetExtraICState() { return kind(); }
+  virtual ExtraICState GetExtraICState() const { return kind(); }
+  virtual InlineCacheState GetICState() const { return MONOMORPHIC; }
+
+  virtual void InitializeDescriptor(CodeStubDescriptor* descriptor) OVERRIDE;
+
+  virtual CallInterfaceDescriptor GetCallInterfaceDescriptor() OVERRIDE;
 
  protected:
-  explicit HandlerStub(Isolate* isolate) : HICStub(isolate) { }
-  virtual int NotMissMinorKey() { return bit_field_; }
-  int bit_field_;
+  explicit HandlerStub(Isolate* isolate) : HydrogenCodeStub(isolate) {}
+
+  virtual Code::Kind kind() const = 0;
+
+  DEFINE_CODE_STUB_BASE(HandlerStub, HydrogenCodeStub);
 };
 
 
 class LoadFieldStub: public HandlerStub {
  public:
-  LoadFieldStub(Isolate* isolate, FieldIndex index)
-    : HandlerStub(isolate), index_(index) {
-    Initialize(Code::LOAD_IC);
+  LoadFieldStub(Isolate* isolate, FieldIndex index) : HandlerStub(isolate) {
+    int property_index_key = index.GetFieldAccessStubKey();
+    set_sub_minor_key(LoadFieldByIndexBits::encode(property_index_key));
   }
 
-  virtual Handle<Code> GenerateCode() V8_OVERRIDE;
-
-  virtual void InitializeInterfaceDescriptor(
-      CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
-
-  Representation representation() {
-    if (unboxed_double()) return Representation::Double();
-    return Representation::Tagged();
+  FieldIndex index() const {
+    int property_index_key = LoadFieldByIndexBits::decode(sub_minor_key());
+    return FieldIndex::FromFieldAccessStubKey(property_index_key);
   }
 
-  virtual Code::Kind kind() const {
-    return KindBits::decode(bit_field_);
-  }
-
-  FieldIndex index() const { return index_; }
-
-  bool unboxed_double() {
-    return index_.is_double();
-  }
-
-  virtual Code::StubType GetStubType() { return Code::FAST; }
-
  protected:
-  explicit LoadFieldStub(Isolate* isolate);
-
-  void Initialize(Code::Kind kind) {
-    int property_index_key = index_.GetLoadFieldStubKey();
-    // Save a copy of the essence of the property index into the bit field to
-    // make sure that hashing of unique stubs works correctly..
-    bit_field_ = KindBits::encode(kind) |
-        EncodedLoadFieldByIndexBits::encode(property_index_key);
-  }
+  virtual Code::Kind kind() const { return Code::LOAD_IC; }
+  virtual Code::StubType GetStubType() { return Code::FAST; }
 
  private:
-  STATIC_ASSERT(KindBits::kSize == 4);
-  class EncodedLoadFieldByIndexBits: public BitField<int, 4, 13> {};
-  virtual CodeStub::Major MajorKey() { return LoadField; }
-  FieldIndex index_;
+  class LoadFieldByIndexBits : public BitField<int, 0, 13> {};
+
+  DEFINE_HANDLER_CODE_STUB(LoadField, HandlerStub);
+};
+
+
+class KeyedLoadSloppyArgumentsStub : public HandlerStub {
+ public:
+  explicit KeyedLoadSloppyArgumentsStub(Isolate* isolate)
+      : HandlerStub(isolate) {}
+
+ protected:
+  virtual Code::Kind kind() const { return Code::KEYED_LOAD_IC; }
+  virtual Code::StubType GetStubType() { return Code::FAST; }
+
+ private:
+  DEFINE_HANDLER_CODE_STUB(KeyedLoadSloppyArguments, HandlerStub);
+};
+
+
+class LoadConstantStub : public HandlerStub {
+ public:
+  LoadConstantStub(Isolate* isolate, int constant_index)
+      : HandlerStub(isolate) {
+    set_sub_minor_key(ConstantIndexBits::encode(constant_index));
+  }
+
+  int constant_index() const {
+    return ConstantIndexBits::decode(sub_minor_key());
+  }
+
+ protected:
+  virtual Code::Kind kind() const { return Code::LOAD_IC; }
+  virtual Code::StubType GetStubType() { return Code::FAST; }
+
+ private:
+  class ConstantIndexBits : public BitField<int, 0, kSubMinorKeyBits> {};
+
+  DEFINE_HANDLER_CODE_STUB(LoadConstant, HandlerStub);
 };
 
 
 class StringLengthStub: public HandlerStub {
  public:
-  explicit StringLengthStub(Isolate* isolate) : HandlerStub(isolate) {
-    Initialize(Code::LOAD_IC);
-  }
-  virtual Handle<Code> GenerateCode() V8_OVERRIDE;
-  virtual void InitializeInterfaceDescriptor(
-      CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
+  explicit StringLengthStub(Isolate* isolate) : HandlerStub(isolate) {}
 
  protected:
-  virtual Code::Kind kind() const {
-    return KindBits::decode(bit_field_);
-  }
+  virtual Code::Kind kind() const { return Code::LOAD_IC; }
+  virtual Code::StubType GetStubType() { return Code::FAST; }
 
-  void Initialize(Code::Kind kind) {
-    bit_field_ = KindBits::encode(kind);
-  }
-
- private:
-  virtual CodeStub::Major MajorKey() { return StringLength; }
+  DEFINE_HANDLER_CODE_STUB(StringLength, HandlerStub);
 };
 
 
-class KeyedStringLengthStub: public StringLengthStub {
+class StoreFieldStub : public HandlerStub {
  public:
-  explicit KeyedStringLengthStub(Isolate* isolate) : StringLengthStub(isolate) {
-    Initialize(Code::KEYED_LOAD_IC);
+  StoreFieldStub(Isolate* isolate, FieldIndex index,
+                 Representation representation)
+      : HandlerStub(isolate) {
+    int property_index_key = index.GetFieldAccessStubKey();
+    uint8_t repr = PropertyDetails::EncodeRepresentation(representation);
+    set_sub_minor_key(StoreFieldByIndexBits::encode(property_index_key) |
+                      RepresentationBits::encode(repr));
   }
-  virtual void InitializeInterfaceDescriptor(
-      CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
+
+  FieldIndex index() const {
+    int property_index_key = StoreFieldByIndexBits::decode(sub_minor_key());
+    return FieldIndex::FromFieldAccessStubKey(property_index_key);
+  }
+
+  Representation representation() {
+    uint8_t repr = RepresentationBits::decode(sub_minor_key());
+    return PropertyDetails::DecodeRepresentation(repr);
+  }
+
+ protected:
+  virtual Code::Kind kind() const { return Code::STORE_IC; }
+  virtual Code::StubType GetStubType() { return Code::FAST; }
 
  private:
-  virtual CodeStub::Major MajorKey() { return KeyedStringLength; }
+  class StoreFieldByIndexBits : public BitField<int, 0, 13> {};
+  class RepresentationBits : public BitField<uint8_t, 13, 4> {};
+
+  DEFINE_HANDLER_CODE_STUB(StoreField, HandlerStub);
 };
 
 
@@ -984,8 +1000,8 @@
  public:
   StoreGlobalStub(Isolate* isolate, bool is_constant, bool check_global)
       : HandlerStub(isolate) {
-    bit_field_ = IsConstantBits::encode(is_constant) |
-        CheckGlobalBits::encode(check_global);
+    set_sub_minor_key(IsConstantBits::encode(is_constant) |
+                      CheckGlobalBits::encode(check_global));
   }
 
   static Handle<HeapObject> global_placeholder(Isolate* isolate) {
@@ -1009,36 +1025,29 @@
 
   virtual Code::Kind kind() const { return Code::STORE_IC; }
 
-  virtual Handle<Code> GenerateCode() V8_OVERRIDE;
+  bool is_constant() const { return IsConstantBits::decode(sub_minor_key()); }
 
-  virtual void InitializeInterfaceDescriptor(
-      CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
+  bool check_global() const { return CheckGlobalBits::decode(sub_minor_key()); }
 
-  bool is_constant() const {
-    return IsConstantBits::decode(bit_field_);
-  }
-  bool check_global() const {
-    return CheckGlobalBits::decode(bit_field_);
-  }
   void set_is_constant(bool value) {
-    bit_field_ = IsConstantBits::update(bit_field_, value);
+    set_sub_minor_key(IsConstantBits::update(sub_minor_key(), value));
   }
 
   Representation representation() {
-    return Representation::FromKind(RepresentationBits::decode(bit_field_));
+    return Representation::FromKind(
+        RepresentationBits::decode(sub_minor_key()));
   }
+
   void set_representation(Representation r) {
-    bit_field_ = RepresentationBits::update(bit_field_, r.kind());
+    set_sub_minor_key(RepresentationBits::update(sub_minor_key(), r.kind()));
   }
 
  private:
-  Major MajorKey() { return StoreGlobal; }
-
   class IsConstantBits: public BitField<bool, 0, 1> {};
   class RepresentationBits: public BitField<Representation::Kind, 1, 8> {};
   class CheckGlobalBits: public BitField<bool, 9, 1> {};
 
-  DISALLOW_COPY_AND_ASSIGN(StoreGlobalStub);
+  DEFINE_HANDLER_CODE_STUB(StoreGlobal, HandlerStub);
 };
 
 
@@ -1048,25 +1057,26 @@
                       bool is_store,
                       bool call_data_undefined,
                       int argc) : PlatformCodeStub(isolate) {
-    bit_field_ =
-        IsStoreBits::encode(is_store) |
-        CallDataUndefinedBits::encode(call_data_undefined) |
-        ArgumentBits::encode(argc);
-    ASSERT(!is_store || argc == 1);
+    minor_key_ = IsStoreBits::encode(is_store) |
+                 CallDataUndefinedBits::encode(call_data_undefined) |
+                 ArgumentBits::encode(argc);
+    DCHECK(!is_store || argc == 1);
   }
 
  private:
-  virtual void Generate(MacroAssembler* masm) V8_OVERRIDE;
-  virtual Major MajorKey() V8_OVERRIDE { return CallApiFunction; }
-  virtual int MinorKey() V8_OVERRIDE { return bit_field_; }
+  bool is_store() const { return IsStoreBits::decode(minor_key_); }
+  bool call_data_undefined() const {
+    return CallDataUndefinedBits::decode(minor_key_);
+  }
+  int argc() const { return ArgumentBits::decode(minor_key_); }
 
   class IsStoreBits: public BitField<bool, 0, 1> {};
   class CallDataUndefinedBits: public BitField<bool, 1, 1> {};
   class ArgumentBits: public BitField<int, 2, Code::kArgumentsBits> {};
+  STATIC_ASSERT(Code::kArgumentsBits + 2 <= kStubMinorKeyBits);
 
-  int bit_field_;
-
-  DISALLOW_COPY_AND_ASSIGN(CallApiFunctionStub);
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(ApiFunction);
+  DEFINE_PLATFORM_CODE_STUB(CallApiFunction, PlatformCodeStub);
 };
 
 
@@ -1074,89 +1084,67 @@
  public:
   explicit CallApiGetterStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
 
- private:
-  virtual void Generate(MacroAssembler* masm) V8_OVERRIDE;
-  virtual Major MajorKey() V8_OVERRIDE { return CallApiGetter; }
-  virtual int MinorKey() V8_OVERRIDE { return 0; }
-
-  DISALLOW_COPY_AND_ASSIGN(CallApiGetterStub);
-};
-
-
-class KeyedLoadFieldStub: public LoadFieldStub {
- public:
-  KeyedLoadFieldStub(Isolate* isolate, FieldIndex index)
-      : LoadFieldStub(isolate, index) {
-    Initialize(Code::KEYED_LOAD_IC);
-  }
-
-  virtual void InitializeInterfaceDescriptor(
-      CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
-
- private:
-  virtual CodeStub::Major MajorKey() { return KeyedLoadField; }
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(ApiGetter);
+  DEFINE_PLATFORM_CODE_STUB(CallApiGetter, PlatformCodeStub);
 };
 
 
 class BinaryOpICStub : public HydrogenCodeStub {
  public:
-  BinaryOpICStub(Isolate* isolate, Token::Value op, OverwriteMode mode)
-      : HydrogenCodeStub(isolate, UNINITIALIZED), state_(isolate, op, mode) {}
+  BinaryOpICStub(Isolate* isolate, Token::Value op,
+                 OverwriteMode mode = NO_OVERWRITE)
+      : HydrogenCodeStub(isolate, UNINITIALIZED) {
+    BinaryOpICState state(isolate, op, mode);
+    set_sub_minor_key(state.GetExtraICState());
+  }
 
-  BinaryOpICStub(Isolate* isolate, const BinaryOpIC::State& state)
-      : HydrogenCodeStub(isolate), state_(state) {}
+  BinaryOpICStub(Isolate* isolate, const BinaryOpICState& state)
+      : HydrogenCodeStub(isolate) {
+    set_sub_minor_key(state.GetExtraICState());
+  }
 
   static void GenerateAheadOfTime(Isolate* isolate);
 
-  virtual void InitializeInterfaceDescriptor(
-      CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
-
-  static void InstallDescriptors(Isolate* isolate);
-
-  virtual Code::Kind GetCodeKind() const V8_OVERRIDE {
+  virtual Code::Kind GetCodeKind() const OVERRIDE {
     return Code::BINARY_OP_IC;
   }
 
-  virtual InlineCacheState GetICState() V8_FINAL V8_OVERRIDE {
-    return state_.GetICState();
+  virtual InlineCacheState GetICState() const FINAL OVERRIDE {
+    return state().GetICState();
   }
 
-  virtual ExtraICState GetExtraICState() V8_FINAL V8_OVERRIDE {
-    return state_.GetExtraICState();
+  virtual ExtraICState GetExtraICState() const FINAL OVERRIDE {
+    return static_cast<ExtraICState>(sub_minor_key());
   }
 
-  virtual Handle<Code> GenerateCode() V8_OVERRIDE;
-
-  const BinaryOpIC::State& state() const { return state_; }
-
-  virtual void PrintState(StringStream* stream) V8_FINAL V8_OVERRIDE;
-
-  virtual Major MajorKey() V8_OVERRIDE { return BinaryOpIC; }
-  virtual int NotMissMinorKey() V8_FINAL V8_OVERRIDE {
-    return GetExtraICState();
+  BinaryOpICState state() const {
+    return BinaryOpICState(isolate(), GetExtraICState());
   }
 
+  virtual void PrintState(OStream& os) const FINAL OVERRIDE;  // NOLINT
+
   // Parameters accessed via CodeStubGraphBuilder::GetParameter()
   static const int kLeft = 0;
   static const int kRight = 1;
 
  private:
   static void GenerateAheadOfTime(Isolate* isolate,
-                                  const BinaryOpIC::State& state);
+                                  const BinaryOpICState& state);
 
-  BinaryOpIC::State state_;
-
-  DISALLOW_COPY_AND_ASSIGN(BinaryOpICStub);
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOp);
+  DEFINE_HYDROGEN_CODE_STUB(BinaryOpIC, HydrogenCodeStub);
 };
 
 
 // TODO(bmeurer): Merge this into the BinaryOpICStub once we have proper tail
 // call support for stubs in Hydrogen.
-class BinaryOpICWithAllocationSiteStub V8_FINAL : public PlatformCodeStub {
+class BinaryOpICWithAllocationSiteStub FINAL : public PlatformCodeStub {
  public:
   BinaryOpICWithAllocationSiteStub(Isolate* isolate,
-                                   const BinaryOpIC::State& state)
-      : PlatformCodeStub(isolate), state_(state) {}
+                                   const BinaryOpICState& state)
+      : PlatformCodeStub(isolate) {
+    minor_key_ = state.GetExtraICState();
+  }
 
   static void GenerateAheadOfTime(Isolate* isolate);
 
@@ -1166,65 +1154,54 @@
     return CodeStub::GetCodeCopy(pattern);
   }
 
-  virtual Code::Kind GetCodeKind() const V8_OVERRIDE {
+  virtual Code::Kind GetCodeKind() const OVERRIDE {
     return Code::BINARY_OP_IC;
   }
 
-  virtual InlineCacheState GetICState() V8_OVERRIDE {
-    return state_.GetICState();
+  virtual InlineCacheState GetICState() const OVERRIDE {
+    return state().GetICState();
   }
 
-  virtual ExtraICState GetExtraICState() V8_OVERRIDE {
-    return state_.GetExtraICState();
+  virtual ExtraICState GetExtraICState() const OVERRIDE {
+    return static_cast<ExtraICState>(minor_key_);
   }
 
-  virtual void Generate(MacroAssembler* masm) V8_OVERRIDE;
-
-  virtual void PrintState(StringStream* stream) V8_OVERRIDE;
-
-  virtual Major MajorKey() V8_OVERRIDE { return BinaryOpICWithAllocationSite; }
-  virtual int MinorKey() V8_OVERRIDE { return GetExtraICState(); }
+  virtual void PrintState(OStream& os) const OVERRIDE;  // NOLINT
 
  private:
+  BinaryOpICState state() const {
+    return BinaryOpICState(isolate(), static_cast<ExtraICState>(minor_key_));
+  }
+
   static void GenerateAheadOfTime(Isolate* isolate,
-                                  const BinaryOpIC::State& state);
+                                  const BinaryOpICState& state);
 
-  BinaryOpIC::State state_;
-
-  DISALLOW_COPY_AND_ASSIGN(BinaryOpICWithAllocationSiteStub);
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOpWithAllocationSite);
+  DEFINE_PLATFORM_CODE_STUB(BinaryOpICWithAllocationSite, PlatformCodeStub);
 };
 
 
-class BinaryOpWithAllocationSiteStub V8_FINAL : public BinaryOpICStub {
+class BinaryOpWithAllocationSiteStub FINAL : public BinaryOpICStub {
  public:
   BinaryOpWithAllocationSiteStub(Isolate* isolate,
                                  Token::Value op,
                                  OverwriteMode mode)
       : BinaryOpICStub(isolate, op, mode) {}
 
-  BinaryOpWithAllocationSiteStub(Isolate* isolate,
-                                 const BinaryOpIC::State& state)
+  BinaryOpWithAllocationSiteStub(Isolate* isolate, const BinaryOpICState& state)
       : BinaryOpICStub(isolate, state) {}
 
-  virtual void InitializeInterfaceDescriptor(
-      CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
-
-  static void InstallDescriptors(Isolate* isolate);
-
-  virtual Code::Kind GetCodeKind() const V8_FINAL V8_OVERRIDE {
+  virtual Code::Kind GetCodeKind() const FINAL OVERRIDE {
     return Code::STUB;
   }
 
-  virtual Handle<Code> GenerateCode() V8_OVERRIDE;
-
-  virtual Major MajorKey() V8_OVERRIDE {
-    return BinaryOpWithAllocationSite;
-  }
-
   // Parameters accessed via CodeStubGraphBuilder::GetParameter()
   static const int kAllocationSite = 0;
   static const int kLeft = 1;
   static const int kRight = 2;
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOpWithAllocationSite);
+  DEFINE_HYDROGEN_CODE_STUB(BinaryOpWithAllocationSite, BinaryOpICStub);
 };
 
 
@@ -1240,30 +1217,23 @@
 };
 
 
-class StringAddStub V8_FINAL : public HydrogenCodeStub {
+class StringAddStub FINAL : public HydrogenCodeStub {
  public:
-  StringAddStub(Isolate* isolate,
-                StringAddFlags flags,
+  StringAddStub(Isolate* isolate, StringAddFlags flags,
                 PretenureFlag pretenure_flag)
-      : HydrogenCodeStub(isolate),
-        bit_field_(StringAddFlagsBits::encode(flags) |
-                   PretenureFlagBits::encode(pretenure_flag)) {}
+      : HydrogenCodeStub(isolate) {
+    set_sub_minor_key(StringAddFlagsBits::encode(flags) |
+                      PretenureFlagBits::encode(pretenure_flag));
+  }
 
   StringAddFlags flags() const {
-    return StringAddFlagsBits::decode(bit_field_);
+    return StringAddFlagsBits::decode(sub_minor_key());
   }
 
   PretenureFlag pretenure_flag() const {
-    return PretenureFlagBits::decode(bit_field_);
+    return PretenureFlagBits::decode(sub_minor_key());
   }
 
-  virtual Handle<Code> GenerateCode() V8_OVERRIDE;
-
-  virtual void InitializeInterfaceDescriptor(
-      CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
-
-  static void InstallDescriptors(Isolate* isolate);
-
   // Parameters accessed via CodeStubGraphBuilder::GetParameter()
   static const int kLeft = 0;
   static const int kRight = 1;
@@ -1271,57 +1241,41 @@
  private:
   class StringAddFlagsBits: public BitField<StringAddFlags, 0, 2> {};
   class PretenureFlagBits: public BitField<PretenureFlag, 2, 1> {};
-  uint32_t bit_field_;
 
-  virtual Major MajorKey() V8_OVERRIDE { return StringAdd; }
-  virtual int NotMissMinorKey() V8_OVERRIDE { return bit_field_; }
+  virtual void PrintBaseName(OStream& os) const OVERRIDE;  // NOLINT
 
-  virtual void PrintBaseName(StringStream* stream) V8_OVERRIDE;
-
-  DISALLOW_COPY_AND_ASSIGN(StringAddStub);
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(StringAdd);
+  DEFINE_HYDROGEN_CODE_STUB(StringAdd, HydrogenCodeStub);
 };
 
 
-class ICCompareStub: public PlatformCodeStub {
+class CompareICStub : public PlatformCodeStub {
  public:
-  ICCompareStub(Isolate* isolate,
-                Token::Value op,
-                CompareIC::State left,
-                CompareIC::State right,
-                CompareIC::State handler)
-      : PlatformCodeStub(isolate),
-        op_(op),
-        left_(left),
-        right_(right),
-        state_(handler) {
-    ASSERT(Token::IsCompareOp(op));
+  CompareICStub(Isolate* isolate, Token::Value op, CompareICState::State left,
+                CompareICState::State right, CompareICState::State state)
+      : PlatformCodeStub(isolate) {
+    DCHECK(Token::IsCompareOp(op));
+    minor_key_ = OpBits::encode(op - Token::EQ) | LeftStateBits::encode(left) |
+                 RightStateBits::encode(right) | StateBits::encode(state);
   }
 
-  virtual void Generate(MacroAssembler* masm);
-
   void set_known_map(Handle<Map> map) { known_map_ = map; }
 
-  static void DecodeMinorKey(int minor_key,
-                             CompareIC::State* left_state,
-                             CompareIC::State* right_state,
-                             CompareIC::State* handler_state,
-                             Token::Value* op);
+  virtual InlineCacheState GetICState() const;
 
-  virtual InlineCacheState GetICState();
-
- private:
-  class OpField: public BitField<int, 0, 3> { };
-  class LeftStateField: public BitField<int, 3, 4> { };
-  class RightStateField: public BitField<int, 7, 4> { };
-  class HandlerStateField: public BitField<int, 11, 4> { };
-
-  virtual void FinishCode(Handle<Code> code) {
-    code->set_stub_info(MinorKey());
+  Token::Value op() const {
+    return static_cast<Token::Value>(Token::EQ + OpBits::decode(minor_key_));
   }
 
-  virtual CodeStub::Major MajorKey() { return CompareIC; }
-  virtual int MinorKey();
+  CompareICState::State left() const {
+    return LeftStateBits::decode(minor_key_);
+  }
+  CompareICState::State right() const {
+    return RightStateBits::decode(minor_key_);
+  }
+  CompareICState::State state() const { return StateBits::decode(minor_key_); }
 
+ private:
   virtual Code::Kind GetCodeKind() const { return Code::COMPARE_IC; }
 
   void GenerateSmis(MacroAssembler* masm);
@@ -1334,18 +1288,24 @@
   void GenerateKnownObjects(MacroAssembler* masm);
   void GenerateGeneric(MacroAssembler* masm);
 
-  bool strict() const { return op_ == Token::EQ_STRICT; }
-  Condition GetCondition() const { return CompareIC::ComputeCondition(op_); }
+  bool strict() const { return op() == Token::EQ_STRICT; }
+  Condition GetCondition() const;
 
   virtual void AddToSpecialCache(Handle<Code> new_object);
   virtual bool FindCodeInSpecialCache(Code** code_out);
-  virtual bool UseSpecialCache() { return state_ == CompareIC::KNOWN_OBJECT; }
+  virtual bool UseSpecialCache() {
+    return state() == CompareICState::KNOWN_OBJECT;
+  }
 
-  Token::Value op_;
-  CompareIC::State left_;
-  CompareIC::State right_;
-  CompareIC::State state_;
+  class OpBits : public BitField<int, 0, 3> {};
+  class LeftStateBits : public BitField<CompareICState::State, 3, 4> {};
+  class RightStateBits : public BitField<CompareICState::State, 7, 4> {};
+  class StateBits : public BitField<CompareICState::State, 11, 4> {};
+
   Handle<Map> known_map_;
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(BinaryOp);
+  DEFINE_PLATFORM_CODE_STUB(CompareIC, PlatformCodeStub);
 };
 
 
@@ -1354,35 +1314,26 @@
   Type* GetType(Zone* zone, Handle<Map> map = Handle<Map>());
   Type* GetInputType(Zone* zone, Handle<Map> map);
 
-  CompareNilICStub(Isolate* isolate, NilValue nil)
-      : HydrogenCodeStub(isolate), nil_value_(nil) { }
+  CompareNilICStub(Isolate* isolate, NilValue nil) : HydrogenCodeStub(isolate) {
+    set_sub_minor_key(NilValueBits::encode(nil));
+  }
 
-  CompareNilICStub(Isolate* isolate,
-                   ExtraICState ic_state,
+  CompareNilICStub(Isolate* isolate, ExtraICState ic_state,
                    InitializationState init_state = INITIALIZED)
-      : HydrogenCodeStub(isolate, init_state),
-        nil_value_(NilValueField::decode(ic_state)),
-        state_(State(TypesField::decode(ic_state))) {
-      }
+      : HydrogenCodeStub(isolate, init_state) {
+    set_sub_minor_key(ic_state);
+  }
 
   static Handle<Code> GetUninitialized(Isolate* isolate,
                                        NilValue nil) {
     return CompareNilICStub(isolate, nil, UNINITIALIZED).GetCode();
   }
 
-  virtual void InitializeInterfaceDescriptor(
-      CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
-
-  static void InstallDescriptors(Isolate* isolate) {
-    CompareNilICStub compare_stub(isolate, kNullValue, UNINITIALIZED);
-    compare_stub.InitializeInterfaceDescriptor(
-        isolate->code_stub_interface_descriptor(CodeStub::CompareNilIC));
-  }
-
-  virtual InlineCacheState GetICState() {
-    if (state_.Contains(GENERIC)) {
+  virtual InlineCacheState GetICState() const {
+    State state = this->state();
+    if (state.Contains(GENERIC)) {
       return MEGAMORPHIC;
-    } else if (state_.Contains(MONOMORPHIC_MAP)) {
+    } else if (state.Contains(MONOMORPHIC_MAP)) {
       return MONOMORPHIC;
     } else {
       return PREMONOMORPHIC;
@@ -1391,24 +1342,27 @@
 
   virtual Code::Kind GetCodeKind() const { return Code::COMPARE_NIL_IC; }
 
-  virtual Handle<Code> GenerateCode() V8_OVERRIDE;
-
-  virtual ExtraICState GetExtraICState() {
-    return NilValueField::encode(nil_value_) |
-           TypesField::encode(state_.ToIntegral());
-  }
+  virtual ExtraICState GetExtraICState() const { return sub_minor_key(); }
 
   void UpdateStatus(Handle<Object> object);
 
-  bool IsMonomorphic() const { return state_.Contains(MONOMORPHIC_MAP); }
-  NilValue GetNilValue() const { return nil_value_; }
-  void ClearState() { state_.RemoveAll(); }
+  bool IsMonomorphic() const { return state().Contains(MONOMORPHIC_MAP); }
 
-  virtual void PrintState(StringStream* stream);
-  virtual void PrintBaseName(StringStream* stream);
+  NilValue nil_value() const { return NilValueBits::decode(sub_minor_key()); }
+
+  void ClearState() {
+    set_sub_minor_key(TypesBits::update(sub_minor_key(), 0));
+  }
+
+  virtual void PrintState(OStream& os) const OVERRIDE;     // NOLINT
+  virtual void PrintBaseName(OStream& os) const OVERRIDE;  // NOLINT
 
  private:
-  friend class CompareNilIC;
+  CompareNilICStub(Isolate* isolate, NilValue nil,
+                   InitializationState init_state)
+      : HydrogenCodeStub(isolate, init_state) {
+    set_sub_minor_key(NilValueBits::encode(nil));
+  }
 
   enum CompareNilType {
     UNDEFINED,
@@ -1427,38 +1381,35 @@
    public:
     State() : EnumSet<CompareNilType, byte>(0) { }
     explicit State(byte bits) : EnumSet<CompareNilType, byte>(bits) { }
-
-    void Print(StringStream* stream) const;
   };
+  friend OStream& operator<<(OStream& os, const State& s);
 
-  CompareNilICStub(Isolate* isolate,
-                   NilValue nil,
-                   InitializationState init_state)
-      : HydrogenCodeStub(isolate, init_state), nil_value_(nil) { }
+  State state() const { return State(TypesBits::decode(sub_minor_key())); }
 
-  class NilValueField : public BitField<NilValue, 0, 1> {};
-  class TypesField    : public BitField<byte,     1, NUMBER_OF_TYPES> {};
+  class NilValueBits : public BitField<NilValue, 0, 1> {};
+  class TypesBits : public BitField<byte, 1, NUMBER_OF_TYPES> {};
 
-  virtual CodeStub::Major MajorKey() { return CompareNilIC; }
-  virtual int NotMissMinorKey() { return GetExtraICState(); }
+  friend class CompareNilIC;
 
-  NilValue nil_value_;
-  State state_;
-
-  DISALLOW_COPY_AND_ASSIGN(CompareNilICStub);
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(CompareNil);
+  DEFINE_HYDROGEN_CODE_STUB(CompareNilIC, HydrogenCodeStub);
 };
 
 
+OStream& operator<<(OStream& os, const CompareNilICStub::State& s);
+
+
 class CEntryStub : public PlatformCodeStub {
  public:
-  CEntryStub(Isolate* isolate,
-             int result_size,
+  CEntryStub(Isolate* isolate, int result_size,
              SaveFPRegsMode save_doubles = kDontSaveFPRegs)
-      : PlatformCodeStub(isolate),
-        result_size_(result_size),
-        save_doubles_(save_doubles) { }
-
-  void Generate(MacroAssembler* masm);
+      : PlatformCodeStub(isolate) {
+    minor_key_ = SaveDoublesBits::encode(save_doubles == kSaveFPRegs);
+    DCHECK(result_size == 1 || result_size == 2);
+#ifdef _WIN64
+    minor_key_ = ResultSizeBits::update(minor_key_, result_size);
+#endif  // _WIN64
+  }
 
   // The version of this stub that doesn't save doubles is generated ahead of
   // time, so it's OK to call it from other stubs that can't cope with GC during
@@ -1467,48 +1418,47 @@
   static void GenerateAheadOfTime(Isolate* isolate);
 
  private:
-  // Number of pointers/values returned.
-  const int result_size_;
-  SaveFPRegsMode save_doubles_;
-
-  Major MajorKey() { return CEntry; }
-  int MinorKey();
+  bool save_doubles() const { return SaveDoublesBits::decode(minor_key_); }
+#ifdef _WIN64
+  int result_size() const { return ResultSizeBits::decode(minor_key_); }
+#endif  // _WIN64
 
   bool NeedsImmovableCode();
+
+  class SaveDoublesBits : public BitField<bool, 0, 1> {};
+  class ResultSizeBits : public BitField<int, 1, 3> {};
+
+  DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+  DEFINE_PLATFORM_CODE_STUB(CEntry, PlatformCodeStub);
 };
 
 
 class JSEntryStub : public PlatformCodeStub {
  public:
-  explicit JSEntryStub(Isolate* isolate) : PlatformCodeStub(isolate) { }
-
-  void Generate(MacroAssembler* masm) { GenerateBody(masm, false); }
-
- protected:
-  void GenerateBody(MacroAssembler* masm, bool is_construct);
+  JSEntryStub(Isolate* isolate, StackFrame::Type type)
+      : PlatformCodeStub(isolate) {
+    DCHECK(type == StackFrame::ENTRY || type == StackFrame::ENTRY_CONSTRUCT);
+    minor_key_ = StackFrameTypeBits::encode(type);
+  }
 
  private:
-  Major MajorKey() { return JSEntry; }
-  int MinorKey() { return 0; }
-
   virtual void FinishCode(Handle<Code> code);
 
-  int handler_offset_;
-};
-
-
-class JSConstructEntryStub : public JSEntryStub {
- public:
-  explicit JSConstructEntryStub(Isolate* isolate) : JSEntryStub(isolate) { }
-
-  void Generate(MacroAssembler* masm) { GenerateBody(masm, true); }
-
- private:
-  int MinorKey() { return 1; }
-
-  virtual void PrintName(StringStream* stream) {
-    stream->Add("JSConstructEntryStub");
+  virtual void PrintName(OStream& os) const OVERRIDE {  // NOLINT
+    os << (type() == StackFrame::ENTRY ? "JSEntryStub"
+                                       : "JSConstructEntryStub");
   }
+
+  StackFrame::Type type() const {
+    return StackFrameTypeBits::decode(minor_key_);
+  }
+
+  class StackFrameTypeBits : public BitField<StackFrame::Type, 0, 5> {};
+
+  int handler_offset_;
+
+  DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+  DEFINE_PLATFORM_CODE_STUB(JSEntry, PlatformCodeStub);
 };
 
 
@@ -1521,22 +1471,30 @@
     NEW_STRICT
   };
 
-  ArgumentsAccessStub(Isolate* isolate, Type type)
-      : PlatformCodeStub(isolate), type_(type) { }
+  ArgumentsAccessStub(Isolate* isolate, Type type) : PlatformCodeStub(isolate) {
+    minor_key_ = TypeBits::encode(type);
+  }
+
+  virtual CallInterfaceDescriptor GetCallInterfaceDescriptor() OVERRIDE {
+    if (type() == READ_ELEMENT) {
+      return ArgumentsAccessReadDescriptor(isolate());
+    }
+    return ContextOnlyDescriptor(isolate());
+  }
 
  private:
-  Type type_;
+  Type type() const { return TypeBits::decode(minor_key_); }
 
-  Major MajorKey() { return ArgumentsAccess; }
-  int MinorKey() { return type_; }
-
-  void Generate(MacroAssembler* masm);
   void GenerateReadElement(MacroAssembler* masm);
   void GenerateNewStrict(MacroAssembler* masm);
   void GenerateNewSloppyFast(MacroAssembler* masm);
   void GenerateNewSloppySlow(MacroAssembler* masm);
 
-  virtual void PrintName(StringStream* stream);
+  virtual void PrintName(OStream& os) const OVERRIDE;  // NOLINT
+
+  class TypeBits : public BitField<Type, 0, 2> {};
+
+  DEFINE_PLATFORM_CODE_STUB(ArgumentsAccess, PlatformCodeStub);
 };
 
 
@@ -1544,98 +1502,84 @@
  public:
   explicit RegExpExecStub(Isolate* isolate) : PlatformCodeStub(isolate) { }
 
- private:
-  Major MajorKey() { return RegExpExec; }
-  int MinorKey() { return 0; }
-
-  void Generate(MacroAssembler* masm);
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(ContextOnly);
+  DEFINE_PLATFORM_CODE_STUB(RegExpExec, PlatformCodeStub);
 };
 
 
-class RegExpConstructResultStub V8_FINAL : public HydrogenCodeStub {
+class RegExpConstructResultStub FINAL : public HydrogenCodeStub {
  public:
   explicit RegExpConstructResultStub(Isolate* isolate)
       : HydrogenCodeStub(isolate) { }
 
-  virtual Handle<Code> GenerateCode() V8_OVERRIDE;
-
-  virtual void InitializeInterfaceDescriptor(
-      CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
-
-  virtual Major MajorKey() V8_OVERRIDE { return RegExpConstructResult; }
-  virtual int NotMissMinorKey() V8_OVERRIDE { return 0; }
-
-  static void InstallDescriptors(Isolate* isolate);
-
   // Parameters accessed via CodeStubGraphBuilder::GetParameter()
   static const int kLength = 0;
   static const int kIndex = 1;
   static const int kInput = 2;
 
- private:
-  DISALLOW_COPY_AND_ASSIGN(RegExpConstructResultStub);
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(RegExpConstructResult);
+  DEFINE_HYDROGEN_CODE_STUB(RegExpConstructResult, HydrogenCodeStub);
 };
 
 
 class CallFunctionStub: public PlatformCodeStub {
  public:
   CallFunctionStub(Isolate* isolate, int argc, CallFunctionFlags flags)
-      : PlatformCodeStub(isolate), argc_(argc), flags_(flags) { }
-
-  void Generate(MacroAssembler* masm);
+      : PlatformCodeStub(isolate) {
+    DCHECK(argc >= 0 && argc <= Code::kMaxArguments);
+    minor_key_ = ArgcBits::encode(argc) | FlagBits::encode(flags);
+  }
 
   static int ExtractArgcFromMinorKey(int minor_key) {
     return ArgcBits::decode(minor_key);
   }
 
  private:
-  int argc_;
-  CallFunctionFlags flags_;
+  int argc() const { return ArgcBits::decode(minor_key_); }
+  int flags() const { return FlagBits::decode(minor_key_); }
 
-  virtual void PrintName(StringStream* stream);
+  bool CallAsMethod() const {
+    return flags() == CALL_AS_METHOD || flags() == WRAP_AND_CALL;
+  }
+
+  bool NeedsChecks() const { return flags() != WRAP_AND_CALL; }
+
+  virtual void PrintName(OStream& os) const OVERRIDE;  // NOLINT
 
   // Minor key encoding in 32 bits with Bitfield <Type, shift, size>.
-  class FlagBits: public BitField<CallFunctionFlags, 0, 2> {};
-  class ArgcBits: public BitField<unsigned, 2, 32 - 2> {};
+  class FlagBits : public BitField<CallFunctionFlags, 0, 2> {};
+  class ArgcBits : public BitField<unsigned, 2, Code::kArgumentsBits> {};
+  STATIC_ASSERT(Code::kArgumentsBits + 2 <= kStubMinorKeyBits);
 
-  Major MajorKey() { return CallFunction; }
-  int MinorKey() {
-    // Encode the parameters in a unique 32 bit value.
-    return FlagBits::encode(flags_) | ArgcBits::encode(argc_);
-  }
-
-  bool CallAsMethod() {
-    return flags_ == CALL_AS_METHOD || flags_ == WRAP_AND_CALL;
-  }
-
-  bool NeedsChecks() {
-    return flags_ != WRAP_AND_CALL;
-  }
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(CallFunction);
+  DEFINE_PLATFORM_CODE_STUB(CallFunction, PlatformCodeStub);
 };
 
 
 class CallConstructStub: public PlatformCodeStub {
  public:
   CallConstructStub(Isolate* isolate, CallConstructorFlags flags)
-      : PlatformCodeStub(isolate), flags_(flags) {}
-
-  void Generate(MacroAssembler* masm);
+      : PlatformCodeStub(isolate) {
+    minor_key_ = FlagBits::encode(flags);
+  }
 
   virtual void FinishCode(Handle<Code> code) {
     code->set_has_function_cache(RecordCallTarget());
   }
 
  private:
-  CallConstructorFlags flags_;
+  CallConstructorFlags flags() const { return FlagBits::decode(minor_key_); }
 
-  virtual void PrintName(StringStream* stream);
-
-  Major MajorKey() { return CallConstruct; }
-  int MinorKey() { return flags_; }
-
-  bool RecordCallTarget() {
-    return (flags_ & RECORD_CONSTRUCTOR_TARGET) != 0;
+  bool RecordCallTarget() const {
+    return (flags() & RECORD_CONSTRUCTOR_TARGET) != 0;
   }
+
+  virtual void PrintName(OStream& os) const OVERRIDE;  // NOLINT
+
+  class FlagBits : public BitField<CallConstructorFlags, 0, 1> {};
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(CallConstruct);
+  DEFINE_PLATFORM_CODE_STUB(CallConstruct, PlatformCodeStub);
 };
 
 
@@ -1676,8 +1620,8 @@
         index_not_number_(index_not_number),
         index_out_of_range_(index_out_of_range),
         index_flags_(index_flags) {
-    ASSERT(!result_.is(object_));
-    ASSERT(!result_.is(index_));
+    DCHECK(!result_.is(object_));
+    DCHECK(!result_.is(index_));
   }
 
   // Generates the fast case code. On the fallthrough path |result|
@@ -1724,7 +1668,7 @@
                               Register result)
       : code_(code),
         result_(result) {
-    ASSERT(!code_.is(result_));
+    DCHECK(!code_.is(result_));
   }
 
   // Generates the fast case code. On the fallthrough path |result|
@@ -1813,104 +1757,155 @@
 };
 
 
-class KeyedLoadDictionaryElementStub : public HydrogenCodeStub {
+class LoadDictionaryElementStub : public HydrogenCodeStub {
  public:
-  explicit KeyedLoadDictionaryElementStub(Isolate* isolate)
+  explicit LoadDictionaryElementStub(Isolate* isolate)
       : HydrogenCodeStub(isolate) {}
 
-  virtual Handle<Code> GenerateCode() V8_OVERRIDE;
-
-  virtual void InitializeInterfaceDescriptor(
-      CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
-
- private:
-  Major MajorKey() { return KeyedLoadElement; }
-  int NotMissMinorKey() { return DICTIONARY_ELEMENTS; }
-
-  DISALLOW_COPY_AND_ASSIGN(KeyedLoadDictionaryElementStub);
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(Load);
+  DEFINE_HYDROGEN_CODE_STUB(LoadDictionaryElement, HydrogenCodeStub);
 };
 
 
-class KeyedLoadDictionaryElementPlatformStub : public PlatformCodeStub {
+class KeyedLoadGenericStub : public HydrogenCodeStub {
  public:
-  explicit KeyedLoadDictionaryElementPlatformStub(Isolate* isolate)
-      : PlatformCodeStub(isolate) {}
-
-  void Generate(MacroAssembler* masm);
-
- private:
-  Major MajorKey() { return KeyedLoadElement; }
-  int MinorKey() { return DICTIONARY_ELEMENTS; }
-
-  DISALLOW_COPY_AND_ASSIGN(KeyedLoadDictionaryElementPlatformStub);
-};
-
-
-class KeyedLoadGenericElementStub : public HydrogenCodeStub {
- public:
-  explicit KeyedLoadGenericElementStub(Isolate *isolate)
-      : HydrogenCodeStub(isolate) {}
-
-  virtual Handle<Code> GenerateCode() V8_OVERRIDE;
-
-  virtual void InitializeInterfaceDescriptor(
-      CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
-
-  static void InstallDescriptors(Isolate* isolate);
+  explicit KeyedLoadGenericStub(Isolate* isolate) : HydrogenCodeStub(isolate) {}
 
   virtual Code::Kind GetCodeKind() const { return Code::KEYED_LOAD_IC; }
-  virtual InlineCacheState GetICState() { return GENERIC; }
+  virtual InlineCacheState GetICState() const { return GENERIC; }
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(Load);
+  DEFINE_HYDROGEN_CODE_STUB(KeyedLoadGeneric, HydrogenCodeStub);
+};
+
+
+class LoadICTrampolineStub : public PlatformCodeStub {
+ public:
+  LoadICTrampolineStub(Isolate* isolate, const LoadICState& state)
+      : PlatformCodeStub(isolate) {
+    minor_key_ = state.GetExtraICState();
+  }
+
+  virtual Code::Kind GetCodeKind() const OVERRIDE { return Code::LOAD_IC; }
+
+  virtual InlineCacheState GetICState() const FINAL OVERRIDE {
+    return GENERIC;
+  }
+
+  virtual ExtraICState GetExtraICState() const FINAL OVERRIDE {
+    return static_cast<ExtraICState>(minor_key_);
+  }
 
  private:
-  Major MajorKey() { return KeyedLoadGeneric; }
-  int NotMissMinorKey() { return 0; }
+  LoadICState state() const {
+    return LoadICState(static_cast<ExtraICState>(minor_key_));
+  }
 
-  DISALLOW_COPY_AND_ASSIGN(KeyedLoadGenericElementStub);
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(VectorLoadICTrampoline);
+  DEFINE_PLATFORM_CODE_STUB(LoadICTrampoline, PlatformCodeStub);
+};
+
+
+class KeyedLoadICTrampolineStub : public LoadICTrampolineStub {
+ public:
+  explicit KeyedLoadICTrampolineStub(Isolate* isolate)
+      : LoadICTrampolineStub(isolate, LoadICState(0)) {}
+
+  virtual Code::Kind GetCodeKind() const OVERRIDE {
+    return Code::KEYED_LOAD_IC;
+  }
+
+  DEFINE_PLATFORM_CODE_STUB(KeyedLoadICTrampoline, LoadICTrampolineStub);
+};
+
+
+class MegamorphicLoadStub : public HydrogenCodeStub {
+ public:
+  MegamorphicLoadStub(Isolate* isolate, const LoadICState& state)
+      : HydrogenCodeStub(isolate) {
+    set_sub_minor_key(state.GetExtraICState());
+  }
+
+  virtual Code::Kind GetCodeKind() const OVERRIDE { return Code::LOAD_IC; }
+
+  virtual InlineCacheState GetICState() const FINAL OVERRIDE {
+    return MEGAMORPHIC;
+  }
+
+  virtual ExtraICState GetExtraICState() const FINAL OVERRIDE {
+    return static_cast<ExtraICState>(sub_minor_key());
+  }
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(Load);
+  DEFINE_HYDROGEN_CODE_STUB(MegamorphicLoad, HydrogenCodeStub);
+};
+
+
+class VectorLoadStub : public HydrogenCodeStub {
+ public:
+  explicit VectorLoadStub(Isolate* isolate, const LoadICState& state)
+      : HydrogenCodeStub(isolate) {
+    set_sub_minor_key(state.GetExtraICState());
+  }
+
+  virtual Code::Kind GetCodeKind() const OVERRIDE { return Code::LOAD_IC; }
+
+  virtual InlineCacheState GetICState() const FINAL OVERRIDE {
+    return GENERIC;
+  }
+
+  virtual ExtraICState GetExtraICState() const FINAL OVERRIDE {
+    return static_cast<ExtraICState>(sub_minor_key());
+  }
+
+ private:
+  LoadICState state() const { return LoadICState(GetExtraICState()); }
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(VectorLoadIC);
+  DEFINE_HYDROGEN_CODE_STUB(VectorLoad, HydrogenCodeStub);
+};
+
+
+class VectorKeyedLoadStub : public VectorLoadStub {
+ public:
+  explicit VectorKeyedLoadStub(Isolate* isolate)
+      : VectorLoadStub(isolate, LoadICState(0)) {}
+
+  virtual Code::Kind GetCodeKind() const OVERRIDE {
+    return Code::KEYED_LOAD_IC;
+  }
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(VectorLoadIC);
+  DEFINE_HYDROGEN_CODE_STUB(VectorKeyedLoad, VectorLoadStub);
 };
 
 
 class DoubleToIStub : public PlatformCodeStub {
  public:
-  DoubleToIStub(Isolate* isolate,
-                Register source,
-                Register destination,
-                int offset,
-                bool is_truncating,
-                bool skip_fastpath = false)
-      : PlatformCodeStub(isolate), bit_field_(0) {
-    bit_field_ = SourceRegisterBits::encode(source.code()) |
-      DestinationRegisterBits::encode(destination.code()) |
-      OffsetBits::encode(offset) |
-      IsTruncatingBits::encode(is_truncating) |
-      SkipFastPathBits::encode(skip_fastpath) |
-      SSE3Bits::encode(CpuFeatures::IsSupported(SSE3) ? 1 : 0);
+  DoubleToIStub(Isolate* isolate, Register source, Register destination,
+                int offset, bool is_truncating, bool skip_fastpath = false)
+      : PlatformCodeStub(isolate) {
+    minor_key_ = SourceRegisterBits::encode(source.code()) |
+                 DestinationRegisterBits::encode(destination.code()) |
+                 OffsetBits::encode(offset) |
+                 IsTruncatingBits::encode(is_truncating) |
+                 SkipFastPathBits::encode(skip_fastpath) |
+                 SSE3Bits::encode(CpuFeatures::IsSupported(SSE3) ? 1 : 0);
   }
 
-  Register source() {
-    return Register::from_code(SourceRegisterBits::decode(bit_field_));
-  }
-
-  Register destination() {
-    return Register::from_code(DestinationRegisterBits::decode(bit_field_));
-  }
-
-  bool is_truncating() {
-    return IsTruncatingBits::decode(bit_field_);
-  }
-
-  bool skip_fastpath() {
-    return SkipFastPathBits::decode(bit_field_);
-  }
-
-  int offset() {
-    return OffsetBits::decode(bit_field_);
-  }
-
-  void Generate(MacroAssembler* masm);
-
   virtual bool SometimesSetsUpAFrame() { return false; }
 
  private:
+  Register source() const {
+    return Register::from_code(SourceRegisterBits::decode(minor_key_));
+  }
+  Register destination() const {
+    return Register::from_code(DestinationRegisterBits::decode(minor_key_));
+  }
+  bool is_truncating() const { return IsTruncatingBits::decode(minor_key_); }
+  bool skip_fastpath() const { return SkipFastPathBits::decode(minor_key_); }
+  int offset() const { return OffsetBits::decode(minor_key_); }
+
   static const int kBitsPerRegisterNumber = 6;
   STATIC_ASSERT((1L << kBitsPerRegisterNumber) >= Register::kNumRegisters);
   class SourceRegisterBits:
@@ -1927,89 +1922,62 @@
   class SSE3Bits:
       public BitField<int, 2 * kBitsPerRegisterNumber + 5, 1> {};  // NOLINT
 
-  Major MajorKey() { return DoubleToI; }
-  int MinorKey() { return bit_field_; }
-
-  int bit_field_;
-
-  DISALLOW_COPY_AND_ASSIGN(DoubleToIStub);
+  DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+  DEFINE_PLATFORM_CODE_STUB(DoubleToI, PlatformCodeStub);
 };
 
 
-class KeyedLoadFastElementStub : public HydrogenCodeStub {
+class LoadFastElementStub : public HydrogenCodeStub {
  public:
-  KeyedLoadFastElementStub(Isolate* isolate,
-                           bool is_js_array,
-                           ElementsKind elements_kind)
+  LoadFastElementStub(Isolate* isolate, bool is_js_array,
+                      ElementsKind elements_kind)
       : HydrogenCodeStub(isolate) {
-    bit_field_ = ElementsKindBits::encode(elements_kind) |
-        IsJSArrayBits::encode(is_js_array);
+    set_sub_minor_key(ElementsKindBits::encode(elements_kind) |
+                      IsJSArrayBits::encode(is_js_array));
   }
 
-  bool is_js_array() const {
-    return IsJSArrayBits::decode(bit_field_);
-  }
+  bool is_js_array() const { return IsJSArrayBits::decode(sub_minor_key()); }
 
   ElementsKind elements_kind() const {
-    return ElementsKindBits::decode(bit_field_);
+    return ElementsKindBits::decode(sub_minor_key());
   }
 
-  virtual Handle<Code> GenerateCode() V8_OVERRIDE;
-
-  virtual void InitializeInterfaceDescriptor(
-      CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
-
  private:
   class ElementsKindBits: public BitField<ElementsKind, 0, 8> {};
   class IsJSArrayBits: public BitField<bool, 8, 1> {};
-  uint32_t bit_field_;
 
-  Major MajorKey() { return KeyedLoadElement; }
-  int NotMissMinorKey() { return bit_field_; }
-
-  DISALLOW_COPY_AND_ASSIGN(KeyedLoadFastElementStub);
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(Load);
+  DEFINE_HYDROGEN_CODE_STUB(LoadFastElement, HydrogenCodeStub);
 };
 
 
-class KeyedStoreFastElementStub : public HydrogenCodeStub {
+class StoreFastElementStub : public HydrogenCodeStub {
  public:
-  KeyedStoreFastElementStub(Isolate* isolate,
-                            bool is_js_array,
-                            ElementsKind elements_kind,
-                            KeyedAccessStoreMode mode)
+  StoreFastElementStub(Isolate* isolate, bool is_js_array,
+                       ElementsKind elements_kind, KeyedAccessStoreMode mode)
       : HydrogenCodeStub(isolate) {
-    bit_field_ = ElementsKindBits::encode(elements_kind) |
-        IsJSArrayBits::encode(is_js_array) |
-        StoreModeBits::encode(mode);
+    set_sub_minor_key(ElementsKindBits::encode(elements_kind) |
+                      IsJSArrayBits::encode(is_js_array) |
+                      StoreModeBits::encode(mode));
   }
 
-  bool is_js_array() const {
-    return IsJSArrayBits::decode(bit_field_);
-  }
+  bool is_js_array() const { return IsJSArrayBits::decode(sub_minor_key()); }
 
   ElementsKind elements_kind() const {
-    return ElementsKindBits::decode(bit_field_);
+    return ElementsKindBits::decode(sub_minor_key());
   }
 
   KeyedAccessStoreMode store_mode() const {
-    return StoreModeBits::decode(bit_field_);
+    return StoreModeBits::decode(sub_minor_key());
   }
 
-  virtual Handle<Code> GenerateCode() V8_OVERRIDE;
-
-  virtual void InitializeInterfaceDescriptor(
-      CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
-
  private:
   class ElementsKindBits: public BitField<ElementsKind,      0, 8> {};
   class StoreModeBits: public BitField<KeyedAccessStoreMode, 8, 4> {};
   class IsJSArrayBits: public BitField<bool,                12, 1> {};
-  uint32_t bit_field_;
 
-  Major MajorKey() { return KeyedStoreElement; }
-  int NotMissMinorKey() { return bit_field_; }
-
-  DISALLOW_COPY_AND_ASSIGN(KeyedStoreFastElementStub);
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(Store);
+  DEFINE_HYDROGEN_CODE_STUB(StoreFastElement, HydrogenCodeStub);
 };
 
 
@@ -2019,38 +1987,26 @@
                              ElementsKind from_kind,
                              ElementsKind to_kind,
                              bool is_js_array) : HydrogenCodeStub(isolate) {
-    bit_field_ = FromKindBits::encode(from_kind) |
-                 ToKindBits::encode(to_kind) |
-                 IsJSArrayBits::encode(is_js_array);
+    set_sub_minor_key(FromKindBits::encode(from_kind) |
+                      ToKindBits::encode(to_kind) |
+                      IsJSArrayBits::encode(is_js_array));
   }
 
   ElementsKind from_kind() const {
-    return FromKindBits::decode(bit_field_);
+    return FromKindBits::decode(sub_minor_key());
   }
 
-  ElementsKind to_kind() const {
-    return ToKindBits::decode(bit_field_);
-  }
+  ElementsKind to_kind() const { return ToKindBits::decode(sub_minor_key()); }
 
-  bool is_js_array() const {
-    return IsJSArrayBits::decode(bit_field_);
-  }
-
-  virtual Handle<Code> GenerateCode() V8_OVERRIDE;
-
-  virtual void InitializeInterfaceDescriptor(
-      CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
+  bool is_js_array() const { return IsJSArrayBits::decode(sub_minor_key()); }
 
  private:
   class FromKindBits: public BitField<ElementsKind, 8, 8> {};
   class ToKindBits: public BitField<ElementsKind, 0, 8> {};
   class IsJSArrayBits: public BitField<bool, 16, 1> {};
-  uint32_t bit_field_;
 
-  Major MajorKey() { return TransitionElementsKind; }
-  int NotMissMinorKey() { return bit_field_; }
-
-  DISALLOW_COPY_AND_ASSIGN(TransitionElementsKindStub);
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(TransitionElementsKind);
+  DEFINE_HYDROGEN_CODE_STUB(TransitionElementsKind, HydrogenCodeStub);
 };
 
 
@@ -2063,42 +2019,38 @@
     // It only makes sense to override local allocation site behavior
     // if there is a difference between the global allocation site policy
     // for an ElementsKind and the desired usage of the stub.
-    ASSERT(override_mode != DISABLE_ALLOCATION_SITES ||
+    DCHECK(override_mode != DISABLE_ALLOCATION_SITES ||
            AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE);
-    bit_field_ = ElementsKindBits::encode(kind) |
-        AllocationSiteOverrideModeBits::encode(override_mode);
+    set_sub_minor_key(ElementsKindBits::encode(kind) |
+                      AllocationSiteOverrideModeBits::encode(override_mode));
   }
 
   ElementsKind elements_kind() const {
-    return ElementsKindBits::decode(bit_field_);
+    return ElementsKindBits::decode(sub_minor_key());
   }
 
   AllocationSiteOverrideMode override_mode() const {
-    return AllocationSiteOverrideModeBits::decode(bit_field_);
+    return AllocationSiteOverrideModeBits::decode(sub_minor_key());
   }
 
   static void GenerateStubsAheadOfTime(Isolate* isolate);
-  static void InstallDescriptors(Isolate* isolate);
 
   // Parameters accessed via CodeStubGraphBuilder::GetParameter()
   static const int kConstructor = 0;
   static const int kAllocationSite = 1;
 
  protected:
-  void BasePrintName(const char* name, StringStream* stream);
+  OStream& BasePrintName(OStream& os, const char* name) const;  // NOLINT
 
  private:
-  int NotMissMinorKey() { return bit_field_; }
-
   // Ensure data fits within available bits.
   STATIC_ASSERT(LAST_ALLOCATION_SITE_OVERRIDE_MODE == 1);
 
   class ElementsKindBits: public BitField<ElementsKind, 0, 8> {};
   class AllocationSiteOverrideModeBits: public
       BitField<AllocationSiteOverrideMode, 8, 1> {};  // NOLINT
-  uint32_t bit_field_;
 
-  DISALLOW_COPY_AND_ASSIGN(ArrayConstructorStubBase);
+  DEFINE_CODE_STUB_BASE(ArrayConstructorStubBase, HydrogenCodeStub);
 };
 
 
@@ -2111,19 +2063,14 @@
       : ArrayConstructorStubBase(isolate, kind, override_mode) {
   }
 
-  virtual Handle<Code> GenerateCode() V8_OVERRIDE;
-
-  virtual void InitializeInterfaceDescriptor(
-      CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
-
  private:
-  Major MajorKey() { return ArrayNoArgumentConstructor; }
-
-  virtual void PrintName(StringStream* stream) {
-    BasePrintName("ArrayNoArgumentConstructorStub", stream);
+  virtual void PrintName(OStream& os) const OVERRIDE {  // NOLINT
+    BasePrintName(os, "ArrayNoArgumentConstructorStub");
   }
 
-  DISALLOW_COPY_AND_ASSIGN(ArrayNoArgumentConstructorStub);
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(ArrayConstructorConstantArgCount);
+  DEFINE_HYDROGEN_CODE_STUB(ArrayNoArgumentConstructor,
+                            ArrayConstructorStubBase);
 };
 
 
@@ -2136,19 +2083,14 @@
       : ArrayConstructorStubBase(isolate, kind, override_mode) {
   }
 
-  virtual Handle<Code> GenerateCode() V8_OVERRIDE;
-
-  virtual void InitializeInterfaceDescriptor(
-      CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
-
  private:
-  Major MajorKey() { return ArraySingleArgumentConstructor; }
-
-  virtual void PrintName(StringStream* stream) {
-    BasePrintName("ArraySingleArgumentConstructorStub", stream);
+  virtual void PrintName(OStream& os) const {  // NOLINT
+    BasePrintName(os, "ArraySingleArgumentConstructorStub");
   }
 
-  DISALLOW_COPY_AND_ASSIGN(ArraySingleArgumentConstructorStub);
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(ArrayConstructor);
+  DEFINE_HYDROGEN_CODE_STUB(ArraySingleArgumentConstructor,
+                            ArrayConstructorStubBase);
 };
 
 
@@ -2161,19 +2103,14 @@
       : ArrayConstructorStubBase(isolate, kind, override_mode) {
   }
 
-  virtual Handle<Code> GenerateCode() V8_OVERRIDE;
-
-  virtual void InitializeInterfaceDescriptor(
-      CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
-
  private:
-  Major MajorKey() { return ArrayNArgumentsConstructor; }
-
-  virtual void PrintName(StringStream* stream) {
-    BasePrintName("ArrayNArgumentsConstructorStub", stream);
+  virtual void PrintName(OStream& os) const {  // NOLINT
+    BasePrintName(os, "ArrayNArgumentsConstructorStub");
   }
 
-  DISALLOW_COPY_AND_ASSIGN(ArrayNArgumentsConstructorStub);
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(ArrayConstructor);
+  DEFINE_HYDROGEN_CODE_STUB(ArrayNArgumentsConstructor,
+                            ArrayConstructorStubBase);
 };
 
 
@@ -2181,23 +2118,22 @@
  public:
   InternalArrayConstructorStubBase(Isolate* isolate, ElementsKind kind)
       : HydrogenCodeStub(isolate) {
-    kind_ = kind;
+    set_sub_minor_key(ElementsKindBits::encode(kind));
   }
 
   static void GenerateStubsAheadOfTime(Isolate* isolate);
-  static void InstallDescriptors(Isolate* isolate);
 
   // Parameters accessed via CodeStubGraphBuilder::GetParameter()
   static const int kConstructor = 0;
 
-  ElementsKind elements_kind() const { return kind_; }
+  ElementsKind elements_kind() const {
+    return ElementsKindBits::decode(sub_minor_key());
+  }
 
  private:
-  int NotMissMinorKey() { return kind_; }
+  class ElementsKindBits : public BitField<ElementsKind, 0, 8> {};
 
-  ElementsKind kind_;
-
-  DISALLOW_COPY_AND_ASSIGN(InternalArrayConstructorStubBase);
+  DEFINE_CODE_STUB_BASE(InternalArrayConstructorStubBase, HydrogenCodeStub);
 };
 
 
@@ -2208,15 +2144,9 @@
                                          ElementsKind kind)
       : InternalArrayConstructorStubBase(isolate, kind) { }
 
-  virtual Handle<Code> GenerateCode() V8_OVERRIDE;
-
-  virtual void InitializeInterfaceDescriptor(
-      CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
-
- private:
-  Major MajorKey() { return InternalArrayNoArgumentConstructor; }
-
-  DISALLOW_COPY_AND_ASSIGN(InternalArrayNoArgumentConstructorStub);
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(InternalArrayConstructorConstantArgCount);
+  DEFINE_HYDROGEN_CODE_STUB(InternalArrayNoArgumentConstructor,
+                            InternalArrayConstructorStubBase);
 };
 
 
@@ -2227,15 +2157,9 @@
                                              ElementsKind kind)
       : InternalArrayConstructorStubBase(isolate, kind) { }
 
-  virtual Handle<Code> GenerateCode() V8_OVERRIDE;
-
-  virtual void InitializeInterfaceDescriptor(
-      CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
-
- private:
-  Major MajorKey() { return InternalArraySingleArgumentConstructor; }
-
-  DISALLOW_COPY_AND_ASSIGN(InternalArraySingleArgumentConstructorStub);
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(InternalArrayConstructor);
+  DEFINE_HYDROGEN_CODE_STUB(InternalArraySingleArgumentConstructor,
+                            InternalArrayConstructorStubBase);
 };
 
 
@@ -2245,48 +2169,28 @@
   InternalArrayNArgumentsConstructorStub(Isolate* isolate, ElementsKind kind)
       : InternalArrayConstructorStubBase(isolate, kind) { }
 
-  virtual Handle<Code> GenerateCode() V8_OVERRIDE;
-
-  virtual void InitializeInterfaceDescriptor(
-      CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
-
- private:
-  Major MajorKey() { return InternalArrayNArgumentsConstructor; }
-
-  DISALLOW_COPY_AND_ASSIGN(InternalArrayNArgumentsConstructorStub);
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(InternalArrayConstructor);
+  DEFINE_HYDROGEN_CODE_STUB(InternalArrayNArgumentsConstructor,
+                            InternalArrayConstructorStubBase);
 };
 
 
-class KeyedStoreElementStub : public PlatformCodeStub {
+class StoreElementStub : public PlatformCodeStub {
  public:
-  KeyedStoreElementStub(Isolate* isolate,
-                        bool is_js_array,
-                        ElementsKind elements_kind,
-                        KeyedAccessStoreMode store_mode)
-      : PlatformCodeStub(isolate),
-        is_js_array_(is_js_array),
-        elements_kind_(elements_kind),
-        store_mode_(store_mode) { }
-
-  Major MajorKey() { return KeyedStoreElement; }
-  int MinorKey() {
-    return ElementsKindBits::encode(elements_kind_) |
-        IsJSArrayBits::encode(is_js_array_) |
-        StoreModeBits::encode(store_mode_);
+  StoreElementStub(Isolate* isolate, ElementsKind elements_kind)
+      : PlatformCodeStub(isolate) {
+    minor_key_ = ElementsKindBits::encode(elements_kind);
   }
 
-  void Generate(MacroAssembler* masm);
-
  private:
-  class ElementsKindBits: public BitField<ElementsKind,      0, 8> {};
-  class StoreModeBits: public BitField<KeyedAccessStoreMode, 8, 4> {};
-  class IsJSArrayBits: public BitField<bool,                12, 1> {};
+  ElementsKind elements_kind() const {
+    return ElementsKindBits::decode(minor_key_);
+  }
 
-  bool is_js_array_;
-  ElementsKind elements_kind_;
-  KeyedAccessStoreMode store_mode_;
+  class ElementsKindBits : public BitField<ElementsKind, 0, 8> {};
 
-  DISALLOW_COPY_AND_ASSIGN(KeyedStoreElementStub);
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(Store);
+  DEFINE_PLATFORM_CODE_STUB(StoreElement, PlatformCodeStub);
 };
 
 
@@ -2304,6 +2208,12 @@
     NUMBER_OF_TYPES
   };
 
+  enum ResultMode {
+    RESULT_AS_SMI,             // For Smi(1) on truthy value, Smi(0) otherwise.
+    RESULT_AS_ODDBALL,         // For {true} on truthy value, {false} otherwise.
+    RESULT_AS_INVERSE_ODDBALL  // For {false} on truthy value, {true} otherwise.
+  };
+
   // At most 8 different types can be distinguished, because the Code object
   // only has room for a single byte to hold a set of these types. :-P
   STATIC_ASSERT(NUMBER_OF_TYPES <= 8);
@@ -2314,7 +2224,6 @@
     explicit Types(byte bits) : EnumSet<Type, byte>(bits) {}
 
     byte ToByte() const { return ToIntegral(); }
-    void Print(StringStream* stream) const;
     bool UpdateStatus(Handle<Object> object);
     bool NeedsMap() const;
     bool CanBeUndetectable() const;
@@ -2323,39 +2232,35 @@
     static Types Generic() { return Types((1 << NUMBER_OF_TYPES) - 1); }
   };
 
-  ToBooleanStub(Isolate* isolate, Types types = Types())
-      : HydrogenCodeStub(isolate), types_(types) { }
+  ToBooleanStub(Isolate* isolate, ResultMode mode, Types types = Types())
+      : HydrogenCodeStub(isolate) {
+    set_sub_minor_key(TypesBits::encode(types.ToByte()) |
+                      ResultModeBits::encode(mode));
+  }
+
   ToBooleanStub(Isolate* isolate, ExtraICState state)
-      : HydrogenCodeStub(isolate), types_(static_cast<byte>(state)) { }
+      : HydrogenCodeStub(isolate) {
+    set_sub_minor_key(TypesBits::encode(static_cast<byte>(state)) |
+                      ResultModeBits::encode(RESULT_AS_SMI));
+  }
 
   bool UpdateStatus(Handle<Object> object);
-  Types GetTypes() { return types_; }
-
-  virtual Handle<Code> GenerateCode() V8_OVERRIDE;
-  virtual void InitializeInterfaceDescriptor(
-      CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
+  Types types() const { return Types(TypesBits::decode(sub_minor_key())); }
+  ResultMode mode() const { return ResultModeBits::decode(sub_minor_key()); }
 
   virtual Code::Kind GetCodeKind() const { return Code::TO_BOOLEAN_IC; }
-  virtual void PrintState(StringStream* stream);
+  virtual void PrintState(OStream& os) const OVERRIDE;  // NOLINT
 
   virtual bool SometimesSetsUpAFrame() { return false; }
 
-  static void InstallDescriptors(Isolate* isolate) {
-    ToBooleanStub stub(isolate);
-    stub.InitializeInterfaceDescriptor(
-        isolate->code_stub_interface_descriptor(CodeStub::ToBoolean));
-  }
-
   static Handle<Code> GetUninitialized(Isolate* isolate) {
     return ToBooleanStub(isolate, UNINITIALIZED).GetCode();
   }
 
-  virtual ExtraICState GetExtraICState() {
-    return types_.ToIntegral();
-  }
+  virtual ExtraICState GetExtraICState() const { return types().ToIntegral(); }
 
-  virtual InlineCacheState GetICState() {
-    if (types_.IsEmpty()) {
+  virtual InlineCacheState GetICState() const {
+    if (types().IsEmpty()) {
       return ::v8::internal::UNINITIALIZED;
     } else {
       return MONOMORPHIC;
@@ -2363,59 +2268,70 @@
   }
 
  private:
-  Major MajorKey() { return ToBoolean; }
-  int NotMissMinorKey() { return GetExtraICState(); }
+  ToBooleanStub(Isolate* isolate, InitializationState init_state)
+      : HydrogenCodeStub(isolate, init_state) {
+    set_sub_minor_key(ResultModeBits::encode(RESULT_AS_SMI));
+  }
 
-  ToBooleanStub(Isolate* isolate, InitializationState init_state) :
-      HydrogenCodeStub(isolate, init_state) {}
+  class TypesBits : public BitField<byte, 0, NUMBER_OF_TYPES> {};
+  class ResultModeBits : public BitField<ResultMode, NUMBER_OF_TYPES, 2> {};
 
-  Types types_;
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(ToBoolean);
+  DEFINE_HYDROGEN_CODE_STUB(ToBoolean, HydrogenCodeStub);
 };
 
 
+OStream& operator<<(OStream& os, const ToBooleanStub::Types& t);
+
+
 class ElementsTransitionAndStoreStub : public HydrogenCodeStub {
  public:
-  ElementsTransitionAndStoreStub(Isolate* isolate,
-                                 ElementsKind from_kind,
-                                 ElementsKind to_kind,
-                                 bool is_jsarray,
+  ElementsTransitionAndStoreStub(Isolate* isolate, ElementsKind from_kind,
+                                 ElementsKind to_kind, bool is_jsarray,
                                  KeyedAccessStoreMode store_mode)
-      : HydrogenCodeStub(isolate),
-        from_kind_(from_kind),
-        to_kind_(to_kind),
-        is_jsarray_(is_jsarray),
-        store_mode_(store_mode) {}
-
-  ElementsKind from_kind() const { return from_kind_; }
-  ElementsKind to_kind() const { return to_kind_; }
-  bool is_jsarray() const { return is_jsarray_; }
-  KeyedAccessStoreMode store_mode() const { return store_mode_; }
-
-  virtual Handle<Code> GenerateCode() V8_OVERRIDE;
-
-  virtual void InitializeInterfaceDescriptor(
-      CodeStubInterfaceDescriptor* descriptor) V8_OVERRIDE;
-
- private:
-  class FromBits:      public BitField<ElementsKind,          0, 8> {};
-  class ToBits:        public BitField<ElementsKind,          8, 8> {};
-  class IsJSArrayBits: public BitField<bool,                 16, 1> {};
-  class StoreModeBits: public BitField<KeyedAccessStoreMode, 17, 4> {};
-
-  Major MajorKey() { return ElementsTransitionAndStore; }
-  int NotMissMinorKey() {
-    return FromBits::encode(from_kind_) |
-        ToBits::encode(to_kind_) |
-        IsJSArrayBits::encode(is_jsarray_) |
-        StoreModeBits::encode(store_mode_);
+      : HydrogenCodeStub(isolate) {
+    set_sub_minor_key(FromBits::encode(from_kind) | ToBits::encode(to_kind) |
+                      IsJSArrayBits::encode(is_jsarray) |
+                      StoreModeBits::encode(store_mode));
   }
 
-  ElementsKind from_kind_;
-  ElementsKind to_kind_;
-  bool is_jsarray_;
-  KeyedAccessStoreMode store_mode_;
+  ElementsKind from_kind() const { return FromBits::decode(sub_minor_key()); }
+  ElementsKind to_kind() const { return ToBits::decode(sub_minor_key()); }
+  bool is_jsarray() const { return IsJSArrayBits::decode(sub_minor_key()); }
+  KeyedAccessStoreMode store_mode() const {
+    return StoreModeBits::decode(sub_minor_key());
+  }
 
-  DISALLOW_COPY_AND_ASSIGN(ElementsTransitionAndStoreStub);
+  // Parameters accessed via CodeStubGraphBuilder::GetParameter()
+  enum ParameterIndices {
+    kValueIndex,
+    kMapIndex,
+    kKeyIndex,
+    kObjectIndex,
+    kParameterCount
+  };
+
+  static const Register ValueRegister() {
+    return ElementTransitionAndStoreDescriptor::ValueRegister();
+  }
+  static const Register MapRegister() {
+    return ElementTransitionAndStoreDescriptor::MapRegister();
+  }
+  static const Register KeyRegister() {
+    return ElementTransitionAndStoreDescriptor::NameRegister();
+  }
+  static const Register ObjectRegister() {
+    return ElementTransitionAndStoreDescriptor::ReceiverRegister();
+  }
+
+ private:
+  class FromBits : public BitField<ElementsKind, 0, 8> {};
+  class ToBits : public BitField<ElementsKind, 8, 8> {};
+  class IsJSArrayBits : public BitField<bool, 16, 1> {};
+  class StoreModeBits : public BitField<KeyedAccessStoreMode, 17, 4> {};
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(ElementTransitionAndStore);
+  DEFINE_HYDROGEN_CODE_STUB(ElementsTransitionAndStore, HydrogenCodeStub);
 };
 
 
@@ -2424,37 +2340,29 @@
   explicit StoreArrayLiteralElementStub(Isolate* isolate)
       : PlatformCodeStub(isolate) { }
 
- private:
-  Major MajorKey() { return StoreArrayLiteralElement; }
-  int MinorKey() { return 0; }
-
-  void Generate(MacroAssembler* masm);
-
-  DISALLOW_COPY_AND_ASSIGN(StoreArrayLiteralElementStub);
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(StoreArrayLiteralElement);
+  DEFINE_PLATFORM_CODE_STUB(StoreArrayLiteralElement, PlatformCodeStub);
 };
 
 
 class StubFailureTrampolineStub : public PlatformCodeStub {
  public:
   StubFailureTrampolineStub(Isolate* isolate, StubFunctionMode function_mode)
-      : PlatformCodeStub(isolate),
-        function_mode_(function_mode) {}
+      : PlatformCodeStub(isolate) {
+    minor_key_ = FunctionModeField::encode(function_mode);
+  }
 
   static void GenerateAheadOfTime(Isolate* isolate);
 
  private:
-  class FunctionModeField: public BitField<StubFunctionMode,    0, 1> {};
-
-  Major MajorKey() { return StubFailureTrampoline; }
-  int MinorKey() {
-    return FunctionModeField::encode(function_mode_);
+  StubFunctionMode function_mode() const {
+    return FunctionModeField::decode(minor_key_);
   }
 
-  void Generate(MacroAssembler* masm);
+  class FunctionModeField : public BitField<StubFunctionMode, 0, 1> {};
 
-  StubFunctionMode function_mode_;
-
-  DISALLOW_COPY_AND_ASSIGN(StubFailureTrampolineStub);
+  DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+  DEFINE_PLATFORM_CODE_STUB(StubFailureTrampoline, PlatformCodeStub);
 };
 
 
@@ -2473,20 +2381,57 @@
                                   intptr_t stack_pointer,
                                   Isolate* isolate);
 
-  Major MajorKey() { return ProfileEntryHook; }
-  int MinorKey() { return 0; }
-
-  void Generate(MacroAssembler* masm);
-
-  DISALLOW_COPY_AND_ASSIGN(ProfileEntryHookStub);
+  // ProfileEntryHookStub is called at the start of a function, so it has the
+  // same register set.
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(CallFunction)
+  DEFINE_PLATFORM_CODE_STUB(ProfileEntryHook, PlatformCodeStub);
 };
 
 
-class CallDescriptors {
+class StoreBufferOverflowStub : public PlatformCodeStub {
  public:
-  static void InitializeForIsolate(Isolate* isolate);
+  StoreBufferOverflowStub(Isolate* isolate, SaveFPRegsMode save_fp)
+      : PlatformCodeStub(isolate) {
+    minor_key_ = SaveDoublesBits::encode(save_fp == kSaveFPRegs);
+  }
+
+  static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
+  virtual bool SometimesSetsUpAFrame() { return false; }
+
+ private:
+  bool save_doubles() const { return SaveDoublesBits::decode(minor_key_); }
+
+  class SaveDoublesBits : public BitField<bool, 0, 1> {};
+
+  DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+  DEFINE_PLATFORM_CODE_STUB(StoreBufferOverflow, PlatformCodeStub);
 };
 
+
+class SubStringStub : public PlatformCodeStub {
+ public:
+  explicit SubStringStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(ContextOnly);
+  DEFINE_PLATFORM_CODE_STUB(SubString, PlatformCodeStub);
+};
+
+
+class StringCompareStub : public PlatformCodeStub {
+ public:
+  explicit StringCompareStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
+
+  DEFINE_CALL_INTERFACE_DESCRIPTOR(ContextOnly);
+  DEFINE_PLATFORM_CODE_STUB(StringCompare, PlatformCodeStub);
+};
+
+
+#undef DEFINE_CALL_INTERFACE_DESCRIPTOR
+#undef DEFINE_PLATFORM_CODE_STUB
+#undef DEFINE_HANDLER_CODE_STUB
+#undef DEFINE_HYDROGEN_CODE_STUB
+#undef DEFINE_CODE_STUB
+#undef DEFINE_CODE_STUB_BASE
 } }  // namespace v8::internal
 
 #endif  // V8_CODE_STUBS_H_
diff --git a/src/code.h b/src/code.h
index 2b821c6..d0a5fec 100644
--- a/src/code.h
+++ b/src/code.h
@@ -30,11 +30,11 @@
   bool is_immediate() const { return !is_reg(); }
 
   Register reg() const {
-    ASSERT(is_reg());
+    DCHECK(is_reg());
     return reg_;
   }
   int immediate() const {
-    ASSERT(is_immediate());
+    DCHECK(is_immediate());
     return immediate_;
   }
 
diff --git a/src/codegen.cc b/src/codegen.cc
index c039e40..1362232 100644
--- a/src/codegen.cc
+++ b/src/codegen.cc
@@ -12,7 +12,6 @@
 #include "src/prettyprinter.h"
 #include "src/rewriter.h"
 #include "src/runtime.h"
-#include "src/stub-cache.h"
 
 namespace v8 {
 namespace internal {
@@ -117,6 +116,7 @@
           CodeStub::MajorName(info->code_stub()->MajorKey(), true);
       PrintF("%s", name == NULL ? "<unknown>" : name);
     } else {
+      AllowDeferredHandleDereference allow_deference_for_trace;
       PrintF("%s", info->function()->debug_name()->ToCString().get());
     }
     PrintF("]\n");
@@ -175,10 +175,11 @@
         code->kind() == Code::FUNCTION;
 
     CodeTracer::Scope tracing_scope(info->isolate()->GetCodeTracer());
+    OFStream os(tracing_scope.file());
     if (print_source) {
       Handle<Script> script = info->script();
       if (!script->IsUndefined() && !script->source()->IsUndefined()) {
-        PrintF(tracing_scope.file(), "--- Raw source ---\n");
+        os << "--- Raw source ---\n";
         ConsStringIteratorOp op;
         StringCharacterStream stream(String::cast(script->source()),
                                      &op,
@@ -189,37 +190,33 @@
             function->end_position() - function->start_position() + 1;
         for (int i = 0; i < source_len; i++) {
           if (stream.HasMore()) {
-            PrintF(tracing_scope.file(), "%c", stream.GetNext());
+            os << AsReversiblyEscapedUC16(stream.GetNext());
           }
         }
-        PrintF(tracing_scope.file(), "\n\n");
+        os << "\n\n";
       }
     }
     if (info->IsOptimizing()) {
       if (FLAG_print_unopt_code) {
-        PrintF(tracing_scope.file(), "--- Unoptimized code ---\n");
+        os << "--- Unoptimized code ---\n";
         info->closure()->shared()->code()->Disassemble(
-            function->debug_name()->ToCString().get(), tracing_scope.file());
+            function->debug_name()->ToCString().get(), os);
       }
-      PrintF(tracing_scope.file(), "--- Optimized code ---\n");
-      PrintF(tracing_scope.file(),
-             "optimization_id = %d\n", info->optimization_id());
+      os << "--- Optimized code ---\n"
+         << "optimization_id = " << info->optimization_id() << "\n";
     } else {
-      PrintF(tracing_scope.file(), "--- Code ---\n");
+      os << "--- Code ---\n";
     }
     if (print_source) {
-      PrintF(tracing_scope.file(),
-             "source_position = %d\n", function->start_position());
+      os << "source_position = " << function->start_position() << "\n";
     }
     if (info->IsStub()) {
       CodeStub::Major major_key = info->code_stub()->MajorKey();
-      code->Disassemble(CodeStub::MajorName(major_key, false),
-                        tracing_scope.file());
+      code->Disassemble(CodeStub::MajorName(major_key, false), os);
     } else {
-      code->Disassemble(function->debug_name()->ToCString().get(),
-                        tracing_scope.file());
+      code->Disassemble(function->debug_name()->ToCString().get(), os);
     }
-    PrintF(tracing_scope.file(), "--- End code ---\n");
+    os << "--- End code ---\n";
   }
 #endif  // ENABLE_DISASSEMBLER
 }
@@ -238,34 +235,4 @@
   return false;
 }
 
-
-void ArgumentsAccessStub::Generate(MacroAssembler* masm) {
-  switch (type_) {
-    case READ_ELEMENT:
-      GenerateReadElement(masm);
-      break;
-    case NEW_SLOPPY_FAST:
-      GenerateNewSloppyFast(masm);
-      break;
-    case NEW_SLOPPY_SLOW:
-      GenerateNewSloppySlow(masm);
-      break;
-    case NEW_STRICT:
-      GenerateNewStrict(masm);
-      break;
-  }
-}
-
-
-int CEntryStub::MinorKey() {
-  int result = (save_doubles_ == kSaveFPRegs) ? 1 : 0;
-  ASSERT(result_size_ == 1 || result_size_ == 2);
-#ifdef _WIN64
-  return result | ((result_size_ == 1) ? 0 : 2);
-#else
-  return result;
-#endif
-}
-
-
 } }  // namespace v8::internal
diff --git a/src/codegen.h b/src/codegen.h
index ec36b15..e01a398 100644
--- a/src/codegen.h
+++ b/src/codegen.h
@@ -46,17 +46,19 @@
 enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
 
 #if V8_TARGET_ARCH_IA32
-#include "src/ia32/codegen-ia32.h"
+#include "src/ia32/codegen-ia32.h"  // NOLINT
 #elif V8_TARGET_ARCH_X64
-#include "src/x64/codegen-x64.h"
+#include "src/x64/codegen-x64.h"  // NOLINT
 #elif V8_TARGET_ARCH_ARM64
-#include "src/arm64/codegen-arm64.h"
+#include "src/arm64/codegen-arm64.h"  // NOLINT
 #elif V8_TARGET_ARCH_ARM
-#include "src/arm/codegen-arm.h"
+#include "src/arm/codegen-arm.h"  // NOLINT
 #elif V8_TARGET_ARCH_MIPS
-#include "src/mips/codegen-mips.h"
+#include "src/mips/codegen-mips.h"  // NOLINT
+#elif V8_TARGET_ARCH_MIPS64
+#include "src/mips64/codegen-mips64.h"  // NOLINT
 #elif V8_TARGET_ARCH_X87
-#include "src/x87/codegen-x87.h"
+#include "src/x87/codegen-x87.h"  // NOLINT
 #else
 #error Unsupported target architecture.
 #endif
@@ -115,15 +117,30 @@
  public:
   // If |mode| is set to DONT_TRACK_ALLOCATION_SITE,
   // |allocation_memento_found| may be NULL.
-  static void GenerateMapChangeElementsTransition(MacroAssembler* masm,
+  static void GenerateMapChangeElementsTransition(
+      MacroAssembler* masm,
+      Register receiver,
+      Register key,
+      Register value,
+      Register target_map,
       AllocationSiteMode mode,
       Label* allocation_memento_found);
-  static void GenerateSmiToDouble(MacroAssembler* masm,
-                                  AllocationSiteMode mode,
-                                  Label* fail);
-  static void GenerateDoubleToObject(MacroAssembler* masm,
-                                     AllocationSiteMode mode,
-                                     Label* fail);
+  static void GenerateSmiToDouble(
+      MacroAssembler* masm,
+      Register receiver,
+      Register key,
+      Register value,
+      Register target_map,
+      AllocationSiteMode mode,
+      Label* fail);
+  static void GenerateDoubleToObject(
+      MacroAssembler* masm,
+      Register receiver,
+      Register key,
+      Register value,
+      Register target_map,
+      AllocationSiteMode mode,
+      Label* fail);
 
  private:
   DISALLOW_COPY_AND_ASSIGN(ElementsTransitionGenerator);
diff --git a/src/collection-iterator.js b/src/collection-iterator.js
index 2436a93..2bccc8d 100644
--- a/src/collection-iterator.js
+++ b/src/collection-iterator.js
@@ -21,7 +21,23 @@
     throw MakeTypeError('incompatible_method_receiver',
                         ['Set Iterator.prototype.next', this]);
   }
-  return %SetIteratorNext(this);
+
+  var value_array = [UNDEFINED, UNDEFINED];
+  var entry = {value: value_array, done: false};
+  switch (%SetIteratorNext(this, value_array)) {
+    case 0:
+      entry.value = UNDEFINED;
+      entry.done = true;
+      break;
+    case ITERATOR_KIND_VALUES:
+      entry.value = value_array[0];
+      break;
+    case ITERATOR_KIND_ENTRIES:
+      value_array[1] = value_array[0];
+      break;
+  }
+
+  return entry;
 }
 
 
@@ -59,7 +75,7 @@
   ));
 
   %FunctionSetName(SetIteratorSymbolIterator, '[Symbol.iterator]');
-  %SetProperty(SetIterator.prototype, symbolIterator,
+  %AddNamedProperty(SetIterator.prototype, symbolIterator,
       SetIteratorSymbolIterator, DONT_ENUM);
 }
 
@@ -71,11 +87,11 @@
 
   InstallFunctions($Set.prototype, DONT_ENUM, $Array(
     'entries', SetEntries,
+    'keys', SetValues,
     'values', SetValues
   ));
 
-  %SetProperty($Set.prototype, symbolIterator, SetValues,
-      DONT_ENUM);
+  %AddNamedProperty($Set.prototype, symbolIterator, SetValues, DONT_ENUM);
 }
 
 ExtendSetPrototype();
@@ -97,7 +113,24 @@
     throw MakeTypeError('incompatible_method_receiver',
                         ['Map Iterator.prototype.next', this]);
   }
-  return %MapIteratorNext(this);
+
+  var value_array = [UNDEFINED, UNDEFINED];
+  var entry = {value: value_array, done: false};
+  switch (%MapIteratorNext(this, value_array)) {
+    case 0:
+      entry.value = UNDEFINED;
+      entry.done = true;
+      break;
+    case ITERATOR_KIND_KEYS:
+      entry.value = value_array[0];
+      break;
+    case ITERATOR_KIND_VALUES:
+      entry.value = value_array[1];
+      break;
+    // ITERATOR_KIND_ENTRIES does not need any processing.
+  }
+
+  return entry;
 }
 
 
@@ -139,7 +172,7 @@
   ));
 
   %FunctionSetName(MapIteratorSymbolIterator, '[Symbol.iterator]');
-  %SetProperty(MapIterator.prototype, symbolIterator,
+  %AddNamedProperty(MapIterator.prototype, symbolIterator,
       MapIteratorSymbolIterator, DONT_ENUM);
 }
 
@@ -155,8 +188,7 @@
     'values', MapValues
   ));
 
-  %SetProperty($Map.prototype, symbolIterator, MapEntries,
-      DONT_ENUM);
+  %AddNamedProperty($Map.prototype, symbolIterator, MapEntries, DONT_ENUM);
 }
 
 ExtendMapPrototype();
diff --git a/src/collection.js b/src/collection.js
index 0d8dd77..0027bd7 100644
--- a/src/collection.js
+++ b/src/collection.js
@@ -15,12 +15,32 @@
 // -------------------------------------------------------------------
 // Harmony Set
 
-function SetConstructor() {
-  if (%_IsConstructCall()) {
-    %SetInitialize(this);
-  } else {
+function SetConstructor(iterable) {
+  if (!%_IsConstructCall()) {
     throw MakeTypeError('constructor_not_function', ['Set']);
   }
+
+  var iter, adder;
+
+  if (!IS_NULL_OR_UNDEFINED(iterable)) {
+    iter = GetIterator(ToObject(iterable));
+    adder = this.add;
+    if (!IS_SPEC_FUNCTION(adder)) {
+      throw MakeTypeError('property_not_function', ['add', this]);
+    }
+  }
+
+  %SetInitialize(this);
+
+  if (IS_UNDEFINED(iter)) return;
+
+  var next, done;
+  while (!(next = iter.next()).done) {
+    if (!IS_SPEC_OBJECT(next)) {
+      throw MakeTypeError('iterator_result_not_an_object', [next]);
+    }
+    %_CallFunction(this, next.value, adder);
+  }
 }
 
 
@@ -29,6 +49,13 @@
     throw MakeTypeError('incompatible_method_receiver',
                         ['Set.prototype.add', this]);
   }
+  // Normalize -0 to +0 as required by the spec.
+  // Even though we use SameValueZero as the comparison for the keys we don't
+  // want to ever store -0 as the key since the key is directly exposed when
+  // doing iteration.
+  if (key === 0) {
+    key = 0;
+  }
   return %SetAdd(this, key);
 }
 
@@ -80,11 +107,13 @@
   }
 
   var iterator = new SetIterator(this, ITERATOR_KIND_VALUES);
-  var entry;
-  var stepping = %_DebugCallbackSupportsStepping(f);
-  while (!(entry = %SetIteratorNext(iterator)).done) {
+  var key;
+  var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(f);
+  var value_array = [UNDEFINED];
+  while (%SetIteratorNext(iterator, value_array)) {
     if (stepping) %DebugPrepareStepInIfStepping(f);
-    %_CallFunction(receiver, entry.value, entry.value, this, f);
+    key = value_array[0];
+    %_CallFunction(receiver, key, key, this, f);
   }
 }
 
@@ -96,7 +125,7 @@
 
   %SetCode($Set, SetConstructor);
   %FunctionSetPrototype($Set, new $Object());
-  %SetProperty($Set.prototype, "constructor", $Set, DONT_ENUM);
+  %AddNamedProperty($Set.prototype, "constructor", $Set, DONT_ENUM);
 
   %FunctionSetLength(SetForEach, 1);
 
@@ -117,12 +146,36 @@
 // -------------------------------------------------------------------
 // Harmony Map
 
-function MapConstructor() {
-  if (%_IsConstructCall()) {
-    %MapInitialize(this);
-  } else {
+function MapConstructor(iterable) {
+  if (!%_IsConstructCall()) {
     throw MakeTypeError('constructor_not_function', ['Map']);
   }
+
+  var iter, adder;
+
+  if (!IS_NULL_OR_UNDEFINED(iterable)) {
+    iter = GetIterator(ToObject(iterable));
+    adder = this.set;
+    if (!IS_SPEC_FUNCTION(adder)) {
+      throw MakeTypeError('property_not_function', ['set', this]);
+    }
+  }
+
+  %MapInitialize(this);
+
+  if (IS_UNDEFINED(iter)) return;
+
+  var next, done, nextItem;
+  while (!(next = iter.next()).done) {
+    if (!IS_SPEC_OBJECT(next)) {
+      throw MakeTypeError('iterator_result_not_an_object', [next]);
+    }
+    nextItem = next.value;
+    if (!IS_SPEC_OBJECT(nextItem)) {
+      throw MakeTypeError('iterator_value_not_an_object', [nextItem]);
+    }
+    %_CallFunction(this, nextItem[0], nextItem[1], adder);
+  }
 }
 
 
@@ -140,6 +193,13 @@
     throw MakeTypeError('incompatible_method_receiver',
                         ['Map.prototype.set', this]);
   }
+  // Normalize -0 to +0 as required by the spec.
+  // Even though we use SameValueZero as the comparison for the keys we don't
+  // want to ever store -0 as the key since the key is directly exposed when
+  // doing iteration.
+  if (key === 0) {
+    key = 0;
+  }
   return %MapSet(this, key, value);
 }
 
@@ -191,11 +251,11 @@
   }
 
   var iterator = new MapIterator(this, ITERATOR_KIND_ENTRIES);
-  var entry;
-  var stepping = %_DebugCallbackSupportsStepping(f);
-  while (!(entry = %MapIteratorNext(iterator)).done) {
+  var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(f);
+  var value_array = [UNDEFINED, UNDEFINED];
+  while (%MapIteratorNext(iterator, value_array)) {
     if (stepping) %DebugPrepareStepInIfStepping(f);
-    %_CallFunction(receiver, entry.value[1], entry.value[0], this, f);
+    %_CallFunction(receiver, value_array[1], value_array[0], this, f);
   }
 }
 
@@ -207,7 +267,7 @@
 
   %SetCode($Map, MapConstructor);
   %FunctionSetPrototype($Map, new $Object());
-  %SetProperty($Map.prototype, "constructor", $Map, DONT_ENUM);
+  %AddNamedProperty($Map.prototype, "constructor", $Map, DONT_ENUM);
 
   %FunctionSetLength(MapForEach, 1);
 
diff --git a/src/compilation-cache.cc b/src/compilation-cache.cc
index 6b22bf2..aab2fe5 100644
--- a/src/compilation-cache.cc
+++ b/src/compilation-cache.cc
@@ -43,7 +43,7 @@
 
 
 Handle<CompilationCacheTable> CompilationSubCache::GetTable(int generation) {
-  ASSERT(generation < generations_);
+  DCHECK(generation < generations_);
   Handle<CompilationCacheTable> result;
   if (tables_[generation]->IsUndefined()) {
     result = CompilationCacheTable::New(isolate(), kInitialCacheSize);
@@ -193,7 +193,7 @@
   if (result != NULL) {
     Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(result),
                                       isolate());
-    ASSERT(HasOrigin(shared,
+    DCHECK(HasOrigin(shared,
                      name,
                      line_offset,
                      column_offset,
@@ -221,10 +221,8 @@
 
 
 MaybeHandle<SharedFunctionInfo> CompilationCacheEval::Lookup(
-    Handle<String> source,
-    Handle<Context> context,
-    StrictMode strict_mode,
-    int scope_position) {
+    Handle<String> source, Handle<SharedFunctionInfo> outer_info,
+    StrictMode strict_mode, int scope_position) {
   HandleScope scope(isolate());
   // Make sure not to leak the table into the surrounding handle
   // scope. Otherwise, we risk keeping old tables around even after
@@ -233,14 +231,14 @@
   int generation;
   for (generation = 0; generation < generations(); generation++) {
     Handle<CompilationCacheTable> table = GetTable(generation);
-    result = table->LookupEval(source, context, strict_mode, scope_position);
+    result = table->LookupEval(source, outer_info, strict_mode, scope_position);
     if (result->IsSharedFunctionInfo()) break;
   }
   if (result->IsSharedFunctionInfo()) {
     Handle<SharedFunctionInfo> function_info =
         Handle<SharedFunctionInfo>::cast(result);
     if (generation != 0) {
-      Put(source, context, function_info, scope_position);
+      Put(source, outer_info, function_info, scope_position);
     }
     isolate()->counters()->compilation_cache_hits()->Increment();
     return scope.CloseAndEscape(function_info);
@@ -252,12 +250,12 @@
 
 
 void CompilationCacheEval::Put(Handle<String> source,
-                               Handle<Context> context,
+                               Handle<SharedFunctionInfo> outer_info,
                                Handle<SharedFunctionInfo> function_info,
                                int scope_position) {
   HandleScope scope(isolate());
   Handle<CompilationCacheTable> table = GetFirstTable();
-  table = CompilationCacheTable::PutEval(table, source, context,
+  table = CompilationCacheTable::PutEval(table, source, outer_info,
                                          function_info, scope_position);
   SetFirstTable(table);
 }
@@ -324,27 +322,25 @@
 
 
 MaybeHandle<SharedFunctionInfo> CompilationCache::LookupEval(
-    Handle<String> source,
-    Handle<Context> context,
-    StrictMode strict_mode,
-    int scope_position) {
+    Handle<String> source, Handle<SharedFunctionInfo> outer_info,
+    Handle<Context> context, StrictMode strict_mode, int scope_position) {
   if (!IsEnabled()) return MaybeHandle<SharedFunctionInfo>();
 
   MaybeHandle<SharedFunctionInfo> result;
   if (context->IsNativeContext()) {
-    result = eval_global_.Lookup(
-        source, context, strict_mode, scope_position);
+    result =
+        eval_global_.Lookup(source, outer_info, strict_mode, scope_position);
   } else {
-    ASSERT(scope_position != RelocInfo::kNoPosition);
-    result = eval_contextual_.Lookup(
-        source, context, strict_mode, scope_position);
+    DCHECK(scope_position != RelocInfo::kNoPosition);
+    result = eval_contextual_.Lookup(source, outer_info, strict_mode,
+                                     scope_position);
   }
   return result;
 }
 
 
 MaybeHandle<FixedArray> CompilationCache::LookupRegExp(Handle<String> source,
-                                                  JSRegExp::Flags flags) {
+                                                       JSRegExp::Flags flags) {
   if (!IsEnabled()) return MaybeHandle<FixedArray>();
 
   return reg_exp_.Lookup(source, flags);
@@ -361,6 +357,7 @@
 
 
 void CompilationCache::PutEval(Handle<String> source,
+                               Handle<SharedFunctionInfo> outer_info,
                                Handle<Context> context,
                                Handle<SharedFunctionInfo> function_info,
                                int scope_position) {
@@ -368,10 +365,10 @@
 
   HandleScope scope(isolate());
   if (context->IsNativeContext()) {
-    eval_global_.Put(source, context, function_info, scope_position);
+    eval_global_.Put(source, outer_info, function_info, scope_position);
   } else {
-    ASSERT(scope_position != RelocInfo::kNoPosition);
-    eval_contextual_.Put(source, context, function_info, scope_position);
+    DCHECK(scope_position != RelocInfo::kNoPosition);
+    eval_contextual_.Put(source, outer_info, function_info, scope_position);
   }
 }
 
diff --git a/src/compilation-cache.h b/src/compilation-cache.h
index baa53fb..6799b1c 100644
--- a/src/compilation-cache.h
+++ b/src/compilation-cache.h
@@ -34,7 +34,7 @@
     return GetTable(kFirstGeneration);
   }
   void SetFirstTable(Handle<CompilationCacheTable> value) {
-    ASSERT(kFirstGeneration < generations_);
+    DCHECK(kFirstGeneration < generations_);
     tables_[kFirstGeneration] = *value;
   }
 
@@ -114,14 +114,12 @@
       : CompilationSubCache(isolate, generations) { }
 
   MaybeHandle<SharedFunctionInfo> Lookup(Handle<String> source,
-                                         Handle<Context> context,
+                                         Handle<SharedFunctionInfo> outer_info,
                                          StrictMode strict_mode,
                                          int scope_position);
 
-  void Put(Handle<String> source,
-           Handle<Context> context,
-           Handle<SharedFunctionInfo> function_info,
-           int scope_position);
+  void Put(Handle<String> source, Handle<SharedFunctionInfo> outer_info,
+           Handle<SharedFunctionInfo> function_info, int scope_position);
 
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheEval);
@@ -161,8 +159,8 @@
   // given context.  Returns an empty handle if the cache doesn't
   // contain a script for the given source string.
   MaybeHandle<SharedFunctionInfo> LookupEval(
-      Handle<String> source, Handle<Context> context, StrictMode strict_mode,
-      int scope_position);
+      Handle<String> source, Handle<SharedFunctionInfo> outer_info,
+      Handle<Context> context, StrictMode strict_mode, int scope_position);
 
   // Returns the regexp data associated with the given regexp if it
   // is in cache, otherwise an empty handle.
@@ -177,10 +175,9 @@
 
   // Associate the (source, context->closure()->shared(), kind) triple
   // with the shared function info. This may overwrite an existing mapping.
-  void PutEval(Handle<String> source,
+  void PutEval(Handle<String> source, Handle<SharedFunctionInfo> outer_info,
                Handle<Context> context,
-               Handle<SharedFunctionInfo> function_info,
-               int scope_position);
+               Handle<SharedFunctionInfo> function_info, int scope_position);
 
   // Associate the (source, flags) pair to the given regexp data.
   // This may overwrite an existing mapping.
diff --git a/src/compiler-intrinsics.h b/src/compiler-intrinsics.h
deleted file mode 100644
index f31895e..0000000
--- a/src/compiler-intrinsics.h
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_INTRINSICS_H_
-#define V8_COMPILER_INTRINSICS_H_
-
-namespace v8 {
-namespace internal {
-
-class CompilerIntrinsics {
- public:
-  // Returns number of zero bits preceding least significant 1 bit.
-  // Undefined for zero value.
-  INLINE(static int CountTrailingZeros(uint32_t value));
-
-  // Returns number of zero bits following most significant 1 bit.
-  // Undefined for zero value.
-  INLINE(static int CountLeadingZeros(uint32_t value));
-
-  // Returns the number of bits set.
-  INLINE(static int CountSetBits(uint32_t value));
-};
-
-#ifdef __GNUC__
-int CompilerIntrinsics::CountTrailingZeros(uint32_t value) {
-  return __builtin_ctz(value);
-}
-
-int CompilerIntrinsics::CountLeadingZeros(uint32_t value) {
-  return __builtin_clz(value);
-}
-
-int CompilerIntrinsics::CountSetBits(uint32_t value) {
-  return __builtin_popcount(value);
-}
-
-#elif defined(_MSC_VER)
-
-#pragma intrinsic(_BitScanForward)
-#pragma intrinsic(_BitScanReverse)
-
-int CompilerIntrinsics::CountTrailingZeros(uint32_t value) {
-  unsigned long result;  //NOLINT
-  _BitScanForward(&result, static_cast<long>(value));  //NOLINT
-  return static_cast<int>(result);
-}
-
-int CompilerIntrinsics::CountLeadingZeros(uint32_t value) {
-  unsigned long result;  //NOLINT
-  _BitScanReverse(&result, static_cast<long>(value));  //NOLINT
-  return 31 - static_cast<int>(result);
-}
-
-int CompilerIntrinsics::CountSetBits(uint32_t value) {
-  // Manually count set bits.
-  value = ((value >>  1) & 0x55555555) + (value & 0x55555555);
-  value = ((value >>  2) & 0x33333333) + (value & 0x33333333);
-  value = ((value >>  4) & 0x0f0f0f0f) + (value & 0x0f0f0f0f);
-  value = ((value >>  8) & 0x00ff00ff) + (value & 0x00ff00ff);
-  value = ((value >> 16) & 0x0000ffff) + (value & 0x0000ffff);
-  return value;
-}
-
-#else
-#error Unsupported compiler
-#endif
-
-} }  // namespace v8::internal
-
-#endif  // V8_COMPILER_INTRINSICS_H_
diff --git a/src/compiler.cc b/src/compiler.cc
index 0d3f146..ea604c9 100644
--- a/src/compiler.cc
+++ b/src/compiler.cc
@@ -9,12 +9,12 @@
 #include "src/bootstrapper.h"
 #include "src/codegen.h"
 #include "src/compilation-cache.h"
+#include "src/compiler/pipeline.h"
 #include "src/cpu-profiler.h"
 #include "src/debug.h"
 #include "src/deoptimizer.h"
 #include "src/full-codegen.h"
 #include "src/gdb-jit.h"
-#include "src/typing.h"
 #include "src/hydrogen.h"
 #include "src/isolate-inl.h"
 #include "src/lithium.h"
@@ -25,65 +25,118 @@
 #include "src/scanner-character-streams.h"
 #include "src/scopeinfo.h"
 #include "src/scopes.h"
+#include "src/typing.h"
 #include "src/vm-state-inl.h"
 
 namespace v8 {
 namespace internal {
 
 
-CompilationInfo::CompilationInfo(Handle<Script> script,
-                                 Zone* zone)
-    : flags_(StrictModeField::encode(SLOPPY)),
+ScriptData::ScriptData(const byte* data, int length)
+    : owns_data_(false), data_(data), length_(length) {
+  if (!IsAligned(reinterpret_cast<intptr_t>(data), kPointerAlignment)) {
+    byte* copy = NewArray<byte>(length);
+    DCHECK(IsAligned(reinterpret_cast<intptr_t>(copy), kPointerAlignment));
+    CopyBytes(copy, data, length);
+    data_ = copy;
+    AcquireDataOwnership();
+  }
+}
+
+
+CompilationInfo::CompilationInfo(Handle<Script> script, Zone* zone)
+    : flags_(kThisHasUses),
       script_(script),
+      source_stream_(NULL),
       osr_ast_id_(BailoutId::None()),
       parameter_count_(0),
-      this_has_uses_(true),
-      optimization_id_(-1) {
+      optimization_id_(-1),
+      ast_value_factory_(NULL),
+      ast_value_factory_owned_(false),
+      aborted_due_to_dependency_change_(false) {
   Initialize(script->GetIsolate(), BASE, zone);
 }
 
 
+CompilationInfo::CompilationInfo(Isolate* isolate, Zone* zone)
+    : flags_(kThisHasUses),
+      script_(Handle<Script>::null()),
+      source_stream_(NULL),
+      osr_ast_id_(BailoutId::None()),
+      parameter_count_(0),
+      optimization_id_(-1),
+      ast_value_factory_(NULL),
+      ast_value_factory_owned_(false),
+      aborted_due_to_dependency_change_(false) {
+  Initialize(isolate, STUB, zone);
+}
+
+
 CompilationInfo::CompilationInfo(Handle<SharedFunctionInfo> shared_info,
                                  Zone* zone)
-    : flags_(StrictModeField::encode(SLOPPY) | IsLazy::encode(true)),
+    : flags_(kLazy | kThisHasUses),
       shared_info_(shared_info),
       script_(Handle<Script>(Script::cast(shared_info->script()))),
+      source_stream_(NULL),
       osr_ast_id_(BailoutId::None()),
       parameter_count_(0),
-      this_has_uses_(true),
-      optimization_id_(-1) {
+      optimization_id_(-1),
+      ast_value_factory_(NULL),
+      ast_value_factory_owned_(false),
+      aborted_due_to_dependency_change_(false) {
   Initialize(script_->GetIsolate(), BASE, zone);
 }
 
 
-CompilationInfo::CompilationInfo(Handle<JSFunction> closure,
-                                 Zone* zone)
-    : flags_(StrictModeField::encode(SLOPPY) | IsLazy::encode(true)),
+CompilationInfo::CompilationInfo(Handle<JSFunction> closure, Zone* zone)
+    : flags_(kLazy | kThisHasUses),
       closure_(closure),
       shared_info_(Handle<SharedFunctionInfo>(closure->shared())),
       script_(Handle<Script>(Script::cast(shared_info_->script()))),
+      source_stream_(NULL),
       context_(closure->context()),
       osr_ast_id_(BailoutId::None()),
       parameter_count_(0),
-      this_has_uses_(true),
-      optimization_id_(-1) {
+      optimization_id_(-1),
+      ast_value_factory_(NULL),
+      ast_value_factory_owned_(false),
+      aborted_due_to_dependency_change_(false) {
   Initialize(script_->GetIsolate(), BASE, zone);
 }
 
 
-CompilationInfo::CompilationInfo(HydrogenCodeStub* stub,
-                                 Isolate* isolate,
+CompilationInfo::CompilationInfo(HydrogenCodeStub* stub, Isolate* isolate,
                                  Zone* zone)
-    : flags_(StrictModeField::encode(SLOPPY) | IsLazy::encode(true)),
+    : flags_(kLazy | kThisHasUses),
+      source_stream_(NULL),
       osr_ast_id_(BailoutId::None()),
       parameter_count_(0),
-      this_has_uses_(true),
-      optimization_id_(-1) {
+      optimization_id_(-1),
+      ast_value_factory_(NULL),
+      ast_value_factory_owned_(false),
+      aborted_due_to_dependency_change_(false) {
   Initialize(isolate, STUB, zone);
   code_stub_ = stub;
 }
 
 
+CompilationInfo::CompilationInfo(
+    ScriptCompiler::ExternalSourceStream* stream,
+    ScriptCompiler::StreamedSource::Encoding encoding, Isolate* isolate,
+    Zone* zone)
+    : flags_(kThisHasUses),
+      source_stream_(stream),
+      source_stream_encoding_(encoding),
+      osr_ast_id_(BailoutId::None()),
+      parameter_count_(0),
+      optimization_id_(-1),
+      ast_value_factory_(NULL),
+      ast_value_factory_owned_(false),
+      aborted_due_to_dependency_change_(false) {
+  Initialize(isolate, BASE, zone);
+}
+
+
 void CompilationInfo::Initialize(Isolate* isolate,
                                  Mode mode,
                                  Zone* zone) {
@@ -93,7 +146,7 @@
   global_scope_ = NULL;
   extension_ = NULL;
   cached_data_ = NULL;
-  cached_data_mode_ = NO_CACHED_DATA;
+  compile_options_ = ScriptCompiler::kNoCompileOptions;
   zone_ = zone;
   deferred_handles_ = NULL;
   code_stub_ = NULL;
@@ -109,33 +162,41 @@
     return;
   }
   mode_ = mode;
-  abort_due_to_dependency_ = false;
-  if (script_->type()->value() == Script::TYPE_NATIVE) MarkAsNative();
+  if (!script_.is_null() && script_->type()->value() == Script::TYPE_NATIVE) {
+    MarkAsNative();
+  }
   if (isolate_->debug()->is_active()) MarkAsDebug();
+  if (FLAG_context_specialization) MarkAsContextSpecializing();
+  if (FLAG_turbo_inlining) MarkAsInliningEnabled();
+  if (FLAG_turbo_types) MarkAsTypingEnabled();
 
   if (!shared_info_.is_null()) {
-    ASSERT(strict_mode() == SLOPPY);
+    DCHECK(strict_mode() == SLOPPY);
     SetStrictMode(shared_info_->strict_mode());
   }
-  set_bailout_reason(kUnknown);
+  bailout_reason_ = kUnknown;
 
   if (!shared_info().is_null() && shared_info()->is_compiled()) {
     // We should initialize the CompilationInfo feedback vector from the
     // passed in shared info, rather than creating a new one.
-    feedback_vector_ = Handle<FixedArray>(shared_info()->feedback_vector(),
-                                          isolate);
+    feedback_vector_ =
+        Handle<TypeFeedbackVector>(shared_info()->feedback_vector(), isolate);
   }
 }
 
 
 CompilationInfo::~CompilationInfo() {
+  if (GetFlag(kDisableFutureOptimization)) {
+    shared_info()->DisableOptimization(bailout_reason());
+  }
   delete deferred_handles_;
   delete no_frame_ranges_;
+  if (ast_value_factory_owned_) delete ast_value_factory_;
 #ifdef DEBUG
   // Check that no dependent maps have been added or added dependent maps have
   // been rolled back or committed.
   for (int i = 0; i < DependentCode::kGroupCount; i++) {
-    ASSERT_EQ(NULL, dependencies_[i]);
+    DCHECK_EQ(NULL, dependencies_[i]);
   }
 #endif  // DEBUG
 }
@@ -145,7 +206,7 @@
   for (int i = 0; i < DependentCode::kGroupCount; i++) {
     ZoneList<Handle<HeapObject> >* group_objects = dependencies_[i];
     if (group_objects == NULL) continue;
-    ASSERT(!object_wrapper_.is_null());
+    DCHECK(!object_wrapper_.is_null());
     for (int j = 0; j < group_objects->length(); j++) {
       DependentCode::DependencyGroup group =
           static_cast<DependentCode::DependencyGroup>(i);
@@ -177,7 +238,7 @@
 
 int CompilationInfo::num_parameters() const {
   if (IsStub()) {
-    ASSERT(parameter_count_ > 0);
+    DCHECK(parameter_count_ > 0);
     return parameter_count_;
   } else {
     return scope()->num_parameters();
@@ -206,18 +267,6 @@
 }
 
 
-// Disable optimization for the rest of the compilation pipeline.
-void CompilationInfo::DisableOptimization() {
-  bool is_optimizable_closure =
-    FLAG_optimize_closures &&
-    closure_.is_null() &&
-    !scope_->HasTrivialOuterContext() &&
-    !scope_->outer_scope_calls_sloppy_eval() &&
-    !scope_->inside_with();
-  SetMode(is_optimizable_closure ? BASE : NONOPT);
-}
-
-
 // Primitive functions are unlikely to be picked up by the stack-walking
 // profiler, so they trigger their own optimization when they're called
 // for the SharedFunctionInfo::kCallsUntilPrimitiveOptimization-th time.
@@ -231,7 +280,7 @@
 
 
 void CompilationInfo::PrepareForCompilation(Scope* scope) {
-  ASSERT(scope_ == NULL);
+  DCHECK(scope_ == NULL);
   scope_ = scope;
 
   int length = function()->slot_count();
@@ -239,7 +288,7 @@
     // Allocate the feedback vector too.
     feedback_vector_ = isolate()->factory()->NewTypeFeedbackVector(length);
   }
-  ASSERT(feedback_vector_->length() == length);
+  DCHECK(feedback_vector_->length() == length);
 }
 
 
@@ -250,7 +299,7 @@
   }
 
 #define DEF_VISIT(type)                                 \
-  virtual void Visit##type(type* node) V8_OVERRIDE {    \
+  virtual void Visit##type(type* node) OVERRIDE {    \
     if (node->position() != RelocInfo::kNoPosition) {   \
       SetSourcePosition(node->position());              \
     }                                                   \
@@ -260,7 +309,7 @@
 #undef DEF_VISIT
 
 #define DEF_VISIT(type)                                          \
-  virtual void Visit##type(type* node) V8_OVERRIDE {             \
+  virtual void Visit##type(type* node) OVERRIDE {             \
     if (node->position() != RelocInfo::kNoPosition) {            \
       SetSourcePosition(node->position());                       \
     }                                                            \
@@ -270,7 +319,7 @@
 #undef DEF_VISIT
 
 #define DEF_VISIT(type)                                            \
-  virtual void Visit##type(type* node) V8_OVERRIDE {               \
+  virtual void Visit##type(type* node) OVERRIDE {               \
     HOptimizedGraphBuilder::Visit##type(node);                     \
   }
   MODULE_NODE_LIST(DEF_VISIT)
@@ -280,26 +329,17 @@
 
 
 OptimizedCompileJob::Status OptimizedCompileJob::CreateGraph() {
-  ASSERT(isolate()->use_crankshaft());
-  ASSERT(info()->IsOptimizing());
-  ASSERT(!info()->IsCompilingForDebugging());
-
-  // We should never arrive here if there is no code object on the
-  // shared function object.
-  ASSERT(info()->shared_info()->code()->kind() == Code::FUNCTION);
+  DCHECK(isolate()->use_crankshaft());
+  DCHECK(info()->IsOptimizing());
+  DCHECK(!info()->IsCompilingForDebugging());
 
   // We should never arrive here if optimization has been disabled on the
   // shared function info.
-  ASSERT(!info()->shared_info()->optimization_disabled());
-
-  // Fall back to using the full code generator if it's not possible
-  // to use the Hydrogen-based optimizing compiler. We already have
-  // generated code for this from the shared function object.
-  if (FLAG_always_full_compiler) return AbortOptimization();
+  DCHECK(!info()->shared_info()->optimization_disabled());
 
   // Do not use crankshaft if we need to be able to set break points.
   if (isolate()->DebuggerHasBreakPoints()) {
-    return AbortOptimization(kDebuggerHasBreakPoints);
+    return RetryOptimization(kDebuggerHasBreakPoints);
   }
 
   // Limit the number of times we re-compile a functions with
@@ -307,7 +347,7 @@
   const int kMaxOptCount =
       FLAG_deopt_every_n_times == 0 ? FLAG_max_opt_count : 1000;
   if (info()->opt_count() > kMaxOptCount) {
-    return AbortAndDisableOptimization(kOptimizedTooManyTimes);
+    return AbortOptimization(kOptimizedTooManyTimes);
   }
 
   // Due to an encoding limit on LUnallocated operands in the Lithium
@@ -320,62 +360,57 @@
   const int parameter_limit = -LUnallocated::kMinFixedSlotIndex;
   Scope* scope = info()->scope();
   if ((scope->num_parameters() + 1) > parameter_limit) {
-    return AbortAndDisableOptimization(kTooManyParameters);
+    return AbortOptimization(kTooManyParameters);
   }
 
   const int locals_limit = LUnallocated::kMaxFixedSlotIndex;
   if (info()->is_osr() &&
       scope->num_parameters() + 1 + scope->num_stack_slots() > locals_limit) {
-    return AbortAndDisableOptimization(kTooManyParametersLocals);
+    return AbortOptimization(kTooManyParametersLocals);
   }
 
   if (scope->HasIllegalRedeclaration()) {
-    return AbortAndDisableOptimization(kFunctionWithIllegalRedeclaration);
+    return AbortOptimization(kFunctionWithIllegalRedeclaration);
   }
 
-  // Take --hydrogen-filter into account.
+  // Check the whitelist for Crankshaft.
   if (!info()->closure()->PassesFilter(FLAG_hydrogen_filter)) {
     return AbortOptimization(kHydrogenFilter);
   }
 
+  // Crankshaft requires a version of fullcode with deoptimization support.
   // Recompile the unoptimized version of the code if the current version
-  // doesn't have deoptimization support. Alternatively, we may decide to
-  // run the full code generator to get a baseline for the compile-time
-  // performance of the hydrogen-based compiler.
+  // doesn't have deoptimization support already.
+  // Otherwise, if we are gathering compilation time and space statistics
+  // for hydrogen, gather baseline statistics for a fullcode compilation.
   bool should_recompile = !info()->shared_info()->has_deoptimization_support();
   if (should_recompile || FLAG_hydrogen_stats) {
-    ElapsedTimer timer;
+    base::ElapsedTimer timer;
     if (FLAG_hydrogen_stats) {
       timer.Start();
     }
-    CompilationInfoWithZone unoptimized(info()->shared_info());
-    // Note that we use the same AST that we will use for generating the
-    // optimized code.
-    unoptimized.SetFunction(info()->function());
-    unoptimized.PrepareForCompilation(info()->scope());
-    unoptimized.SetContext(info()->context());
-    if (should_recompile) unoptimized.EnableDeoptimizationSupport();
-    bool succeeded = FullCodeGenerator::MakeCode(&unoptimized);
-    if (should_recompile) {
-      if (!succeeded) return SetLastStatus(FAILED);
-      Handle<SharedFunctionInfo> shared = info()->shared_info();
-      shared->EnableDeoptimizationSupport(*unoptimized.code());
-      // The existing unoptimized code was replaced with the new one.
-      Compiler::RecordFunctionCompilation(
-          Logger::LAZY_COMPILE_TAG, &unoptimized, shared);
+    if (!Compiler::EnsureDeoptimizationSupport(info())) {
+      return SetLastStatus(FAILED);
     }
     if (FLAG_hydrogen_stats) {
       isolate()->GetHStatistics()->IncrementFullCodeGen(timer.Elapsed());
     }
   }
 
-  // Check that the unoptimized, shared code is ready for
-  // optimizations.  When using the always_opt flag we disregard the
-  // optimizable marker in the code object and optimize anyway. This
-  // is safe as long as the unoptimized code has deoptimization
-  // support.
-  ASSERT(FLAG_always_opt || info()->shared_info()->code()->optimizable());
-  ASSERT(info()->shared_info()->has_deoptimization_support());
+  DCHECK(info()->shared_info()->has_deoptimization_support());
+
+  // Check the whitelist for TurboFan.
+  if ((FLAG_turbo_asm && info()->shared_info()->asm_function()) ||
+      info()->closure()->PassesFilter(FLAG_turbo_filter)) {
+    compiler::Pipeline pipeline(info());
+    pipeline.GenerateCode();
+    if (!info()->code().is_null()) {
+      if (FLAG_turbo_deoptimization) {
+        info()->context()->native_context()->AddOptimizedCode(*info()->code());
+      }
+      return SetLastStatus(SUCCEEDED);
+    }
+  }
 
   if (FLAG_trace_hydrogen) {
     Handle<String> name = info()->function()->debug_name();
@@ -387,7 +422,7 @@
   // Type-check the function.
   AstTyper::Run(info());
 
-  graph_builder_ = FLAG_hydrogen_track_positions
+  graph_builder_ = (FLAG_hydrogen_track_positions || FLAG_trace_ic)
       ? new(info()->zone()) HOptimizedGraphBuilderWithPositions(info())
       : new(info()->zone()) HOptimizedGraphBuilder(info());
 
@@ -399,20 +434,11 @@
     return SetLastStatus(FAILED);
   }
 
-  // The function being compiled may have bailed out due to an inline
-  // candidate bailing out.  In such a case, we don't disable
-  // optimization on the shared_info.
-  ASSERT(!graph_builder_->inline_bailout() || graph_ == NULL);
-  if (graph_ == NULL) {
-    if (graph_builder_->inline_bailout()) {
-      return AbortOptimization();
-    } else {
-      return AbortAndDisableOptimization();
-    }
-  }
+  if (graph_ == NULL) return SetLastStatus(BAILED_OUT);
 
   if (info()->HasAbortedDueToDependencyChange()) {
-    return AbortOptimization(kBailedOutDueToDependencyChange);
+    // Dependency has changed during graph creation. Let's try again later.
+    return RetryOptimization(kBailedOutDueToDependencyChange);
   }
 
   return SetLastStatus(SUCCEEDED);
@@ -425,9 +451,14 @@
   DisallowHandleDereference no_deref;
   DisallowCodeDependencyChange no_dependency_change;
 
-  ASSERT(last_status() == SUCCEEDED);
+  DCHECK(last_status() == SUCCEEDED);
+  // TODO(turbofan): Currently everything is done in the first phase.
+  if (!info()->code().is_null()) {
+    return last_status();
+  }
+
   Timer t(this, &time_taken_to_optimize_);
-  ASSERT(graph_ != NULL);
+  DCHECK(graph_ != NULL);
   BailoutReason bailout_reason = kNoReason;
 
   if (graph_->Optimize(&bailout_reason)) {
@@ -437,18 +468,25 @@
     graph_builder_->Bailout(bailout_reason);
   }
 
-  return AbortOptimization();
+  return SetLastStatus(BAILED_OUT);
 }
 
 
 OptimizedCompileJob::Status OptimizedCompileJob::GenerateCode() {
-  ASSERT(last_status() == SUCCEEDED);
-  ASSERT(!info()->HasAbortedDueToDependencyChange());
+  DCHECK(last_status() == SUCCEEDED);
+  // TODO(turbofan): Currently everything is done in the first phase.
+  if (!info()->code().is_null()) {
+    RecordOptimizationStats();
+    return last_status();
+  }
+
+  DCHECK(!info()->HasAbortedDueToDependencyChange());
   DisallowCodeDependencyChange no_dependency_change;
+  DisallowJavascriptExecution no_js(isolate());
   {  // Scope for timer.
     Timer timer(this, &time_taken_to_codegen_);
-    ASSERT(chunk_ != NULL);
-    ASSERT(graph_ != NULL);
+    DCHECK(chunk_ != NULL);
+    DCHECK(graph_ != NULL);
     // Deferred handles reference objects that were accessible during
     // graph creation.  To make sure that we don't encounter inconsistencies
     // between graph creation and code generation, we disallow accessing
@@ -457,23 +495,9 @@
     Handle<Code> optimized_code = chunk_->Codegen();
     if (optimized_code.is_null()) {
       if (info()->bailout_reason() == kNoReason) {
-        info_->set_bailout_reason(kCodeGenerationFailed);
-      } else if (info()->bailout_reason() == kMapBecameDeprecated) {
-        if (FLAG_trace_opt) {
-          PrintF("[aborted optimizing ");
-          info()->closure()->ShortPrint();
-          PrintF(" because a map became deprecated]\n");
-        }
-        return AbortOptimization();
-      } else if (info()->bailout_reason() == kMapBecameUnstable) {
-        if (FLAG_trace_opt) {
-          PrintF("[aborted optimizing ");
-          info()->closure()->ShortPrint();
-          PrintF(" because a map became unstable]\n");
-        }
-        return AbortOptimization();
+        return AbortOptimization(kCodeGenerationFailed);
       }
-      return AbortAndDisableOptimization();
+      return SetLastStatus(BAILED_OUT);
     }
     info()->SetCode(optimized_code);
   }
@@ -545,38 +569,6 @@
 }
 
 
-static void UpdateSharedFunctionInfo(CompilationInfo* info) {
-  // Update the shared function info with the compiled code and the
-  // scope info.  Please note, that the order of the shared function
-  // info initialization is important since set_scope_info might
-  // trigger a GC, causing the ASSERT below to be invalid if the code
-  // was flushed. By setting the code object last we avoid this.
-  Handle<SharedFunctionInfo> shared = info->shared_info();
-  Handle<ScopeInfo> scope_info =
-      ScopeInfo::Create(info->scope(), info->zone());
-  shared->set_scope_info(*scope_info);
-
-  Handle<Code> code = info->code();
-  CHECK(code->kind() == Code::FUNCTION);
-  shared->ReplaceCode(*code);
-  if (shared->optimization_disabled()) code->set_optimizable(false);
-
-  shared->set_feedback_vector(*info->feedback_vector());
-
-  // Set the expected number of properties for instances.
-  FunctionLiteral* lit = info->function();
-  int expected = lit->expected_property_count();
-  SetExpectedNofPropertiesFromEstimate(shared, expected);
-
-  // Check the function has compiled code.
-  ASSERT(shared->is_compiled());
-  shared->set_dont_optimize_reason(lit->dont_optimize_reason());
-  shared->set_dont_inline(lit->flags()->Contains(kDontInline));
-  shared->set_ast_node_count(lit->ast_node_count());
-  shared->set_strict_mode(lit->strict_mode());
-}
-
-
 // Sets the function info on a function.
 // The start_position points to the first '(' character after the function name
 // in the full script source. When counting characters in the script source the
@@ -603,18 +595,53 @@
   function_info->set_has_duplicate_parameters(lit->has_duplicate_parameters());
   function_info->set_ast_node_count(lit->ast_node_count());
   function_info->set_is_function(lit->is_function());
-  function_info->set_dont_optimize_reason(lit->dont_optimize_reason());
-  function_info->set_dont_inline(lit->flags()->Contains(kDontInline));
+  function_info->set_bailout_reason(lit->dont_optimize_reason());
   function_info->set_dont_cache(lit->flags()->Contains(kDontCache));
-  function_info->set_is_generator(lit->is_generator());
+  function_info->set_kind(lit->kind());
+  function_info->set_asm_function(lit->scope()->asm_function());
+}
+
+
+static void RecordFunctionCompilation(Logger::LogEventsAndTags tag,
+                                      CompilationInfo* info,
+                                      Handle<SharedFunctionInfo> shared) {
+  // SharedFunctionInfo is passed separately, because if CompilationInfo
+  // was created using Script object, it will not have it.
+
+  // Log the code generation. If source information is available include
+  // script name and line number. Check explicitly whether logging is
+  // enabled as finding the line number is not free.
+  if (info->isolate()->logger()->is_logging_code_events() ||
+      info->isolate()->cpu_profiler()->is_profiling()) {
+    Handle<Script> script = info->script();
+    Handle<Code> code = info->code();
+    if (code.is_identical_to(info->isolate()->builtins()->CompileLazy())) {
+      return;
+    }
+    int line_num = Script::GetLineNumber(script, shared->start_position()) + 1;
+    int column_num =
+        Script::GetColumnNumber(script, shared->start_position()) + 1;
+    String* script_name = script->name()->IsString()
+                              ? String::cast(script->name())
+                              : info->isolate()->heap()->empty_string();
+    Logger::LogEventsAndTags log_tag = Logger::ToNativeByScript(tag, *script);
+    PROFILE(info->isolate(),
+            CodeCreateEvent(log_tag, *code, *shared, info, script_name,
+                            line_num, column_num));
+  }
+
+  GDBJIT(AddCode(Handle<String>(shared->DebugName()),
+                 Handle<Script>(info->script()), Handle<Code>(info->code()),
+                 info));
 }
 
 
 static bool CompileUnoptimizedCode(CompilationInfo* info) {
-  ASSERT(info->function() != NULL);
+  DCHECK(AllowCompilation::IsAllowed(info->isolate()));
+  DCHECK(info->function() != NULL);
   if (!Rewriter::Rewrite(info)) return false;
   if (!Scope::Analyze(info)) return false;
-  ASSERT(info->scope() != NULL);
+  DCHECK(info->scope() != NULL);
 
   if (!FullCodeGenerator::MakeCode(info)) {
     Isolate* isolate = info->isolate();
@@ -629,420 +656,42 @@
     CompilationInfo* info) {
   VMState<COMPILER> state(info->isolate());
   PostponeInterruptsScope postpone(info->isolate());
-  if (!Parser::Parse(info)) return MaybeHandle<Code>();
-  info->SetStrictMode(info->function()->strict_mode());
 
+  // Parse and update CompilationInfo with the results.
+  if (!Parser::Parse(info)) return MaybeHandle<Code>();
+  Handle<SharedFunctionInfo> shared = info->shared_info();
+  FunctionLiteral* lit = info->function();
+  shared->set_strict_mode(lit->strict_mode());
+  SetExpectedNofPropertiesFromEstimate(shared, lit->expected_property_count());
+  shared->set_bailout_reason(lit->dont_optimize_reason());
+  shared->set_ast_node_count(lit->ast_node_count());
+
+  // Compile unoptimized code.
   if (!CompileUnoptimizedCode(info)) return MaybeHandle<Code>();
-  Compiler::RecordFunctionCompilation(
-      Logger::LAZY_COMPILE_TAG, info, info->shared_info());
-  UpdateSharedFunctionInfo(info);
-  ASSERT_EQ(Code::FUNCTION, info->code()->kind());
+
+  CHECK_EQ(Code::FUNCTION, info->code()->kind());
+  RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info, shared);
+
+  // Update the shared function info with the scope info. Allocating the
+  // ScopeInfo object may cause a GC.
+  Handle<ScopeInfo> scope_info = ScopeInfo::Create(info->scope(), info->zone());
+  shared->set_scope_info(*scope_info);
+
+  // Update the code and feedback vector for the shared function info.
+  shared->ReplaceCode(*info->code());
+  if (shared->optimization_disabled()) info->code()->set_optimizable(false);
+  shared->set_feedback_vector(*info->feedback_vector());
+
   return info->code();
 }
 
 
-MaybeHandle<Code> Compiler::GetUnoptimizedCode(Handle<JSFunction> function) {
-  ASSERT(!function->GetIsolate()->has_pending_exception());
-  ASSERT(!function->is_compiled());
-  if (function->shared()->is_compiled()) {
-    return Handle<Code>(function->shared()->code());
-  }
-
-  CompilationInfoWithZone info(function);
-  Handle<Code> result;
-  ASSIGN_RETURN_ON_EXCEPTION(info.isolate(), result,
-                             GetUnoptimizedCodeCommon(&info),
-                             Code);
-
-  if (FLAG_always_opt &&
-      info.isolate()->use_crankshaft() &&
-      !info.shared_info()->optimization_disabled() &&
-      !info.isolate()->DebuggerHasBreakPoints()) {
-    Handle<Code> opt_code;
-    if (Compiler::GetOptimizedCode(
-            function, result,
-            Compiler::NOT_CONCURRENT).ToHandle(&opt_code)) {
-      result = opt_code;
-    }
-  }
-
-  return result;
-}
-
-
-MaybeHandle<Code> Compiler::GetUnoptimizedCode(
-    Handle<SharedFunctionInfo> shared) {
-  ASSERT(!shared->GetIsolate()->has_pending_exception());
-  ASSERT(!shared->is_compiled());
-
-  CompilationInfoWithZone info(shared);
-  return GetUnoptimizedCodeCommon(&info);
-}
-
-
-bool Compiler::EnsureCompiled(Handle<JSFunction> function,
-                              ClearExceptionFlag flag) {
-  if (function->is_compiled()) return true;
-  MaybeHandle<Code> maybe_code = Compiler::GetUnoptimizedCode(function);
-  Handle<Code> code;
-  if (!maybe_code.ToHandle(&code)) {
-    if (flag == CLEAR_EXCEPTION) {
-      function->GetIsolate()->clear_pending_exception();
-    }
-    return false;
-  }
-  function->ReplaceCode(*code);
-  ASSERT(function->is_compiled());
-  return true;
-}
-
-
-// Compile full code for debugging. This code will have debug break slots
-// and deoptimization information. Deoptimization information is required
-// in case that an optimized version of this function is still activated on
-// the stack. It will also make sure that the full code is compiled with
-// the same flags as the previous version, that is flags which can change
-// the code generated. The current method of mapping from already compiled
-// full code without debug break slots to full code with debug break slots
-// depends on the generated code is otherwise exactly the same.
-// If compilation fails, just keep the existing code.
-MaybeHandle<Code> Compiler::GetCodeForDebugging(Handle<JSFunction> function) {
-  CompilationInfoWithZone info(function);
-  Isolate* isolate = info.isolate();
-  VMState<COMPILER> state(isolate);
-
-  info.MarkAsDebug();
-
-  ASSERT(!isolate->has_pending_exception());
-  Handle<Code> old_code(function->shared()->code());
-  ASSERT(old_code->kind() == Code::FUNCTION);
-  ASSERT(!old_code->has_debug_break_slots());
-
-  info.MarkCompilingForDebugging();
-  if (old_code->is_compiled_optimizable()) {
-    info.EnableDeoptimizationSupport();
-  } else {
-    info.MarkNonOptimizable();
-  }
-  MaybeHandle<Code> maybe_new_code = GetUnoptimizedCodeCommon(&info);
-  Handle<Code> new_code;
-  if (!maybe_new_code.ToHandle(&new_code)) {
-    isolate->clear_pending_exception();
-  } else {
-    ASSERT_EQ(old_code->is_compiled_optimizable(),
-              new_code->is_compiled_optimizable());
-  }
-  return maybe_new_code;
-}
-
-
-void Compiler::CompileForLiveEdit(Handle<Script> script) {
-  // TODO(635): support extensions.
-  CompilationInfoWithZone info(script);
-  PostponeInterruptsScope postpone(info.isolate());
-  VMState<COMPILER> state(info.isolate());
-
-  info.MarkAsGlobal();
-  if (!Parser::Parse(&info)) return;
-  info.SetStrictMode(info.function()->strict_mode());
-
-  LiveEditFunctionTracker tracker(info.isolate(), info.function());
-  if (!CompileUnoptimizedCode(&info)) return;
-  if (!info.shared_info().is_null()) {
-    Handle<ScopeInfo> scope_info = ScopeInfo::Create(info.scope(),
-                                                     info.zone());
-    info.shared_info()->set_scope_info(*scope_info);
-  }
-  tracker.RecordRootFunctionInfo(info.code());
-}
-
-
-static bool DebuggerWantsEagerCompilation(CompilationInfo* info,
-                                          bool allow_lazy_without_ctx = false) {
-  return LiveEditFunctionTracker::IsActive(info->isolate()) ||
-         (info->isolate()->DebuggerHasBreakPoints() && !allow_lazy_without_ctx);
-}
-
-
-static Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
-  Isolate* isolate = info->isolate();
-  PostponeInterruptsScope postpone(isolate);
-  ASSERT(!isolate->native_context().is_null());
-  Handle<Script> script = info->script();
-
-  // TODO(svenpanne) Obscure place for this, perhaps move to OnBeforeCompile?
-  FixedArray* array = isolate->native_context()->embedder_data();
-  script->set_context_data(array->get(0));
-
-  isolate->debug()->OnBeforeCompile(script);
-
-  ASSERT(info->is_eval() || info->is_global());
-
-  bool parse_allow_lazy =
-      (info->cached_data_mode() == CONSUME_CACHED_DATA ||
-       String::cast(script->source())->length() > FLAG_min_preparse_length) &&
-      !DebuggerWantsEagerCompilation(info);
-
-  if (!parse_allow_lazy && info->cached_data_mode() != NO_CACHED_DATA) {
-    // We are going to parse eagerly, but we either 1) have cached data produced
-    // by lazy parsing or 2) are asked to generate cached data. We cannot use
-    // the existing data, since it won't contain all the symbols we need for
-    // eager parsing. In addition, it doesn't make sense to produce the data
-    // when parsing eagerly. That data would contain all symbols, but no
-    // functions, so it cannot be used to aid lazy parsing later.
-    info->SetCachedData(NULL, NO_CACHED_DATA);
-  }
-
-  Handle<SharedFunctionInfo> result;
-
-  { VMState<COMPILER> state(info->isolate());
-    if (!Parser::Parse(info, parse_allow_lazy)) {
-      return Handle<SharedFunctionInfo>::null();
-    }
-
-    FunctionLiteral* lit = info->function();
-    LiveEditFunctionTracker live_edit_tracker(isolate, lit);
-
-    // Measure how long it takes to do the compilation; only take the
-    // rest of the function into account to avoid overlap with the
-    // parsing statistics.
-    HistogramTimer* rate = info->is_eval()
-          ? info->isolate()->counters()->compile_eval()
-          : info->isolate()->counters()->compile();
-    HistogramTimerScope timer(rate);
-
-    // Compile the code.
-    if (!CompileUnoptimizedCode(info)) {
-      return Handle<SharedFunctionInfo>::null();
-    }
-
-    // Allocate function.
-    ASSERT(!info->code().is_null());
-    result = isolate->factory()->NewSharedFunctionInfo(
-        lit->name(),
-        lit->materialized_literal_count(),
-        lit->is_generator(),
-        info->code(),
-        ScopeInfo::Create(info->scope(), info->zone()),
-        info->feedback_vector());
-
-    ASSERT_EQ(RelocInfo::kNoPosition, lit->function_token_position());
-    SetFunctionInfo(result, lit, true, script);
-
-    Handle<String> script_name = script->name()->IsString()
-        ? Handle<String>(String::cast(script->name()))
-        : isolate->factory()->empty_string();
-    Logger::LogEventsAndTags log_tag = info->is_eval()
-        ? Logger::EVAL_TAG
-        : Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script);
-
-    PROFILE(isolate, CodeCreateEvent(
-                log_tag, *info->code(), *result, info, *script_name));
-    GDBJIT(AddCode(script_name, script, info->code(), info));
-
-    // Hint to the runtime system used when allocating space for initial
-    // property space by setting the expected number of properties for
-    // the instances of the function.
-    SetExpectedNofPropertiesFromEstimate(result,
-                                         lit->expected_property_count());
-
-    script->set_compilation_state(Script::COMPILATION_STATE_COMPILED);
-
-    live_edit_tracker.RecordFunctionInfo(result, lit, info->zone());
-  }
-
-  isolate->debug()->OnAfterCompile(script, Debug::NO_AFTER_COMPILE_FLAGS);
-
-  return result;
-}
-
-
-MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
-    Handle<String> source,
-    Handle<Context> context,
-    StrictMode strict_mode,
-    ParseRestriction restriction,
-    int scope_position) {
-  Isolate* isolate = source->GetIsolate();
-  int source_length = source->length();
-  isolate->counters()->total_eval_size()->Increment(source_length);
-  isolate->counters()->total_compile_size()->Increment(source_length);
-
-  CompilationCache* compilation_cache = isolate->compilation_cache();
-  MaybeHandle<SharedFunctionInfo> maybe_shared_info =
-      compilation_cache->LookupEval(source, context, strict_mode,
-                                    scope_position);
-  Handle<SharedFunctionInfo> shared_info;
-
-  if (!maybe_shared_info.ToHandle(&shared_info)) {
-    Handle<Script> script = isolate->factory()->NewScript(source);
-    CompilationInfoWithZone info(script);
-    info.MarkAsEval();
-    if (context->IsNativeContext()) info.MarkAsGlobal();
-    info.SetStrictMode(strict_mode);
-    info.SetParseRestriction(restriction);
-    info.SetContext(context);
-
-    Debug::RecordEvalCaller(script);
-
-    shared_info = CompileToplevel(&info);
-
-    if (shared_info.is_null()) {
-      return MaybeHandle<JSFunction>();
-    } else {
-      // Explicitly disable optimization for eval code. We're not yet prepared
-      // to handle eval-code in the optimizing compiler.
-      shared_info->DisableOptimization(kEval);
-
-      // If caller is strict mode, the result must be in strict mode as well.
-      ASSERT(strict_mode == SLOPPY || shared_info->strict_mode() == STRICT);
-      if (!shared_info->dont_cache()) {
-        compilation_cache->PutEval(
-            source, context, shared_info, scope_position);
-      }
-    }
-  } else if (shared_info->ic_age() != isolate->heap()->global_ic_age()) {
-    shared_info->ResetForNewContext(isolate->heap()->global_ic_age());
-  }
-
-  return isolate->factory()->NewFunctionFromSharedFunctionInfo(
-      shared_info, context, NOT_TENURED);
-}
-
-
-Handle<SharedFunctionInfo> Compiler::CompileScript(
-    Handle<String> source,
-    Handle<Object> script_name,
-    int line_offset,
-    int column_offset,
-    bool is_shared_cross_origin,
-    Handle<Context> context,
-    v8::Extension* extension,
-    ScriptData** cached_data,
-    CachedDataMode cached_data_mode,
-    NativesFlag natives) {
-  if (cached_data_mode == NO_CACHED_DATA) {
-    cached_data = NULL;
-  } else if (cached_data_mode == PRODUCE_CACHED_DATA) {
-    ASSERT(cached_data && !*cached_data);
-  } else {
-    ASSERT(cached_data_mode == CONSUME_CACHED_DATA);
-    ASSERT(cached_data && *cached_data);
-  }
-  Isolate* isolate = source->GetIsolate();
-  int source_length = source->length();
-  isolate->counters()->total_load_size()->Increment(source_length);
-  isolate->counters()->total_compile_size()->Increment(source_length);
-
-  CompilationCache* compilation_cache = isolate->compilation_cache();
-
-  // Do a lookup in the compilation cache but not for extensions.
-  MaybeHandle<SharedFunctionInfo> maybe_result;
-  Handle<SharedFunctionInfo> result;
-  if (extension == NULL) {
-    maybe_result = compilation_cache->LookupScript(
-        source, script_name, line_offset, column_offset,
-        is_shared_cross_origin, context);
-  }
-
-  if (!maybe_result.ToHandle(&result)) {
-    // No cache entry found. Compile the script.
-
-    // Create a script object describing the script to be compiled.
-    Handle<Script> script = isolate->factory()->NewScript(source);
-    if (natives == NATIVES_CODE) {
-      script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
-    }
-    if (!script_name.is_null()) {
-      script->set_name(*script_name);
-      script->set_line_offset(Smi::FromInt(line_offset));
-      script->set_column_offset(Smi::FromInt(column_offset));
-    }
-    script->set_is_shared_cross_origin(is_shared_cross_origin);
-
-    // Compile the function and add it to the cache.
-    CompilationInfoWithZone info(script);
-    info.MarkAsGlobal();
-    info.SetExtension(extension);
-    info.SetCachedData(cached_data, cached_data_mode);
-    info.SetContext(context);
-    if (FLAG_use_strict) info.SetStrictMode(STRICT);
-    result = CompileToplevel(&info);
-    if (extension == NULL && !result.is_null() && !result->dont_cache()) {
-      compilation_cache->PutScript(source, context, result);
-    }
-    if (result.is_null()) isolate->ReportPendingMessages();
-  } else if (result->ic_age() != isolate->heap()->global_ic_age()) {
-      result->ResetForNewContext(isolate->heap()->global_ic_age());
-  }
-  return result;
-}
-
-
-Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
-                                                       Handle<Script> script) {
-  // Precondition: code has been parsed and scopes have been analyzed.
-  CompilationInfoWithZone info(script);
-  info.SetFunction(literal);
-  info.PrepareForCompilation(literal->scope());
-  info.SetStrictMode(literal->scope()->strict_mode());
-
-  Isolate* isolate = info.isolate();
-  Factory* factory = isolate->factory();
-  LiveEditFunctionTracker live_edit_tracker(isolate, literal);
-  // Determine if the function can be lazily compiled. This is necessary to
-  // allow some of our builtin JS files to be lazily compiled. These
-  // builtins cannot be handled lazily by the parser, since we have to know
-  // if a function uses the special natives syntax, which is something the
-  // parser records.
-  // If the debugger requests compilation for break points, we cannot be
-  // aggressive about lazy compilation, because it might trigger compilation
-  // of functions without an outer context when setting a breakpoint through
-  // Debug::FindSharedFunctionInfoInScript.
-  bool allow_lazy_without_ctx = literal->AllowsLazyCompilationWithoutContext();
-  bool allow_lazy = literal->AllowsLazyCompilation() &&
-      !DebuggerWantsEagerCompilation(&info, allow_lazy_without_ctx);
-
-  // Generate code
-  Handle<ScopeInfo> scope_info;
-  if (FLAG_lazy && allow_lazy && !literal->is_parenthesized()) {
-    Handle<Code> code = isolate->builtins()->CompileUnoptimized();
-    info.SetCode(code);
-    scope_info = Handle<ScopeInfo>(ScopeInfo::Empty(isolate));
-  } else if (FullCodeGenerator::MakeCode(&info)) {
-    ASSERT(!info.code().is_null());
-    scope_info = ScopeInfo::Create(info.scope(), info.zone());
-  } else {
-    return Handle<SharedFunctionInfo>::null();
-  }
-
-  // Create a shared function info object.
-  Handle<SharedFunctionInfo> result =
-      factory->NewSharedFunctionInfo(literal->name(),
-                                     literal->materialized_literal_count(),
-                                     literal->is_generator(),
-                                     info.code(),
-                                     scope_info,
-                                     info.feedback_vector());
-  SetFunctionInfo(result, literal, false, script);
-  RecordFunctionCompilation(Logger::FUNCTION_TAG, &info, result);
-  result->set_allows_lazy_compilation(allow_lazy);
-  result->set_allows_lazy_compilation_without_context(allow_lazy_without_ctx);
-
-  // Set the expected number of properties for instances and return
-  // the resulting function.
-  SetExpectedNofPropertiesFromEstimate(result,
-                                       literal->expected_property_count());
-  live_edit_tracker.RecordFunctionInfo(result, literal, info.zone());
-  return result;
-}
-
-
 MUST_USE_RESULT static MaybeHandle<Code> GetCodeFromOptimizedCodeMap(
-    Handle<JSFunction> function,
-    BailoutId osr_ast_id) {
+    Handle<JSFunction> function, BailoutId osr_ast_id) {
   if (FLAG_cache_optimized_code) {
     Handle<SharedFunctionInfo> shared(function->shared());
+    // Bound functions are not cached.
+    if (shared->bound()) return MaybeHandle<Code>();
     DisallowHeapAllocation no_gc;
     int index = shared->SearchOptimizedCodeMap(
         function->context()->native_context(), osr_ast_id);
@@ -1068,25 +717,28 @@
   Handle<Code> code = info->code();
   if (code->kind() != Code::OPTIMIZED_FUNCTION) return;  // Nothing to do.
 
+  // Context specialization folds-in the context, so no sharing can occur.
+  if (code->is_turbofanned() && info->is_context_specializing()) return;
+
   // Cache optimized code.
   if (FLAG_cache_optimized_code) {
     Handle<JSFunction> function = info->closure();
     Handle<SharedFunctionInfo> shared(function->shared());
+    // Do not cache bound functions.
+    if (shared->bound()) return;
     Handle<FixedArray> literals(function->literals());
     Handle<Context> native_context(function->context()->native_context());
-    SharedFunctionInfo::AddToOptimizedCodeMap(
-        shared, native_context, code, literals, info->osr_ast_id());
+    SharedFunctionInfo::AddToOptimizedCodeMap(shared, native_context, code,
+                                              literals, info->osr_ast_id());
   }
 }
 
 
 static bool CompileOptimizedPrologue(CompilationInfo* info) {
   if (!Parser::Parse(info)) return false;
-  info->SetStrictMode(info->function()->strict_mode());
-
   if (!Rewriter::Rewrite(info)) return false;
   if (!Scope::Analyze(info)) return false;
-  ASSERT(info->scope() != NULL);
+  DCHECK(info->scope() != NULL);
   return true;
 }
 
@@ -1094,19 +746,30 @@
 static bool GetOptimizedCodeNow(CompilationInfo* info) {
   if (!CompileOptimizedPrologue(info)) return false;
 
-  Logger::TimerEventScope timer(
-      info->isolate(), Logger::TimerEventScope::v8_recompile_synchronous);
+  TimerEventScope<TimerEventRecompileSynchronous> timer(info->isolate());
 
   OptimizedCompileJob job(info);
-  if (job.CreateGraph() != OptimizedCompileJob::SUCCEEDED) return false;
-  if (job.OptimizeGraph() != OptimizedCompileJob::SUCCEEDED) return false;
-  if (job.GenerateCode() != OptimizedCompileJob::SUCCEEDED) return false;
+  if (job.CreateGraph() != OptimizedCompileJob::SUCCEEDED ||
+      job.OptimizeGraph() != OptimizedCompileJob::SUCCEEDED ||
+      job.GenerateCode() != OptimizedCompileJob::SUCCEEDED) {
+    if (FLAG_trace_opt) {
+      PrintF("[aborted optimizing ");
+      info->closure()->ShortPrint();
+      PrintF(" because: %s]\n", GetBailoutReason(info->bailout_reason()));
+    }
+    return false;
+  }
 
   // Success!
-  ASSERT(!info->isolate()->has_pending_exception());
+  DCHECK(!info->isolate()->has_pending_exception());
   InsertCodeIntoOptimizedCodeMap(info);
-  Compiler::RecordFunctionCompilation(
-      Logger::LAZY_COMPILE_TAG, info, info->shared_info());
+  RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info,
+                            info->shared_info());
+  if (FLAG_trace_opt) {
+    PrintF("[completed optimizing ");
+    info->closure()->ShortPrint();
+    PrintF("]\n");
+  }
   return true;
 }
 
@@ -1116,7 +779,7 @@
   if (!isolate->optimizing_compiler_thread()->IsQueueAvailable()) {
     if (FLAG_trace_concurrent_recompilation) {
       PrintF("  ** Compilation queue full, will retry optimizing ");
-      info->closure()->PrintName();
+      info->closure()->ShortPrint();
       PrintF(" later.\n");
     }
     return false;
@@ -1126,17 +789,16 @@
   if (!CompileOptimizedPrologue(info)) return false;
   info->SaveHandles();  // Copy handles to the compilation handle scope.
 
-  Logger::TimerEventScope timer(
-      isolate, Logger::TimerEventScope::v8_recompile_synchronous);
+  TimerEventScope<TimerEventRecompileSynchronous> timer(info->isolate());
 
-  OptimizedCompileJob* job = new(info->zone()) OptimizedCompileJob(info);
+  OptimizedCompileJob* job = new (info->zone()) OptimizedCompileJob(info);
   OptimizedCompileJob::Status status = job->CreateGraph();
   if (status != OptimizedCompileJob::SUCCEEDED) return false;
   isolate->optimizing_compiler_thread()->QueueForOptimization(job);
 
   if (FLAG_trace_concurrent_recompilation) {
     PrintF("  ** Queued ");
-     info->closure()->PrintName();
+    info->closure()->ShortPrint();
     if (info->is_osr()) {
       PrintF(" for concurrent OSR at %d.\n", info->osr_ast_id().ToInt());
     } else {
@@ -1147,6 +809,497 @@
 }
 
 
+MaybeHandle<Code> Compiler::GetUnoptimizedCode(Handle<JSFunction> function) {
+  DCHECK(!function->GetIsolate()->has_pending_exception());
+  DCHECK(!function->is_compiled());
+  if (function->shared()->is_compiled()) {
+    return Handle<Code>(function->shared()->code());
+  }
+
+  CompilationInfoWithZone info(function);
+  Handle<Code> result;
+  ASSIGN_RETURN_ON_EXCEPTION(info.isolate(), result,
+                             GetUnoptimizedCodeCommon(&info),
+                             Code);
+  return result;
+}
+
+
+MaybeHandle<Code> Compiler::GetLazyCode(Handle<JSFunction> function) {
+  DCHECK(!function->GetIsolate()->has_pending_exception());
+  DCHECK(!function->is_compiled());
+
+  if (FLAG_turbo_asm && function->shared()->asm_function()) {
+    CompilationInfoWithZone info(function);
+
+    VMState<COMPILER> state(info.isolate());
+    PostponeInterruptsScope postpone(info.isolate());
+
+    info.SetOptimizing(BailoutId::None(),
+                       Handle<Code>(function->shared()->code()));
+
+    info.MarkAsContextSpecializing();
+    info.MarkAsTypingEnabled();
+    info.MarkAsInliningDisabled();
+
+    if (GetOptimizedCodeNow(&info)) return info.code();
+  }
+
+  if (function->shared()->is_compiled()) {
+    return Handle<Code>(function->shared()->code());
+  }
+
+  CompilationInfoWithZone info(function);
+  Handle<Code> result;
+  ASSIGN_RETURN_ON_EXCEPTION(info.isolate(), result,
+                             GetUnoptimizedCodeCommon(&info), Code);
+
+  if (FLAG_always_opt &&
+      info.isolate()->use_crankshaft() &&
+      !info.shared_info()->optimization_disabled() &&
+      !info.isolate()->DebuggerHasBreakPoints()) {
+    Handle<Code> opt_code;
+    if (Compiler::GetOptimizedCode(
+            function, result,
+            Compiler::NOT_CONCURRENT).ToHandle(&opt_code)) {
+      result = opt_code;
+    }
+  }
+
+  return result;
+}
+
+
+MaybeHandle<Code> Compiler::GetUnoptimizedCode(
+    Handle<SharedFunctionInfo> shared) {
+  DCHECK(!shared->GetIsolate()->has_pending_exception());
+  DCHECK(!shared->is_compiled());
+
+  CompilationInfoWithZone info(shared);
+  return GetUnoptimizedCodeCommon(&info);
+}
+
+
+bool Compiler::EnsureCompiled(Handle<JSFunction> function,
+                              ClearExceptionFlag flag) {
+  if (function->is_compiled()) return true;
+  MaybeHandle<Code> maybe_code = Compiler::GetLazyCode(function);
+  Handle<Code> code;
+  if (!maybe_code.ToHandle(&code)) {
+    if (flag == CLEAR_EXCEPTION) {
+      function->GetIsolate()->clear_pending_exception();
+    }
+    return false;
+  }
+  function->ReplaceCode(*code);
+  DCHECK(function->is_compiled());
+  return true;
+}
+
+
+// TODO(turbofan): In the future, unoptimized code with deopt support could
+// be generated lazily once deopt is triggered.
+bool Compiler::EnsureDeoptimizationSupport(CompilationInfo* info) {
+  if (!info->shared_info()->has_deoptimization_support()) {
+    CompilationInfoWithZone unoptimized(info->shared_info());
+    // Note that we use the same AST that we will use for generating the
+    // optimized code.
+    unoptimized.SetFunction(info->function());
+    unoptimized.PrepareForCompilation(info->scope());
+    unoptimized.SetContext(info->context());
+    unoptimized.EnableDeoptimizationSupport();
+    if (!FullCodeGenerator::MakeCode(&unoptimized)) return false;
+
+    Handle<SharedFunctionInfo> shared = info->shared_info();
+    shared->EnableDeoptimizationSupport(*unoptimized.code());
+    shared->set_feedback_vector(*unoptimized.feedback_vector());
+
+    // The scope info might not have been set if a lazily compiled
+    // function is inlined before being called for the first time.
+    if (shared->scope_info() == ScopeInfo::Empty(info->isolate())) {
+      Handle<ScopeInfo> target_scope_info =
+          ScopeInfo::Create(info->scope(), info->zone());
+      shared->set_scope_info(*target_scope_info);
+    }
+
+    // The existing unoptimized code was replaced with the new one.
+    RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, &unoptimized, shared);
+  }
+  return true;
+}
+
+
+// Compile full code for debugging. This code will have debug break slots
+// and deoptimization information. Deoptimization information is required
+// in case that an optimized version of this function is still activated on
+// the stack. It will also make sure that the full code is compiled with
+// the same flags as the previous version, that is flags which can change
+// the code generated. The current method of mapping from already compiled
+// full code without debug break slots to full code with debug break slots
+// depends on the generated code is otherwise exactly the same.
+// If compilation fails, just keep the existing code.
+MaybeHandle<Code> Compiler::GetDebugCode(Handle<JSFunction> function) {
+  CompilationInfoWithZone info(function);
+  Isolate* isolate = info.isolate();
+  VMState<COMPILER> state(isolate);
+
+  info.MarkAsDebug();
+
+  DCHECK(!isolate->has_pending_exception());
+  Handle<Code> old_code(function->shared()->code());
+  DCHECK(old_code->kind() == Code::FUNCTION);
+  DCHECK(!old_code->has_debug_break_slots());
+
+  info.MarkCompilingForDebugging();
+  if (old_code->is_compiled_optimizable()) {
+    info.EnableDeoptimizationSupport();
+  } else {
+    info.MarkNonOptimizable();
+  }
+  MaybeHandle<Code> maybe_new_code = GetUnoptimizedCodeCommon(&info);
+  Handle<Code> new_code;
+  if (!maybe_new_code.ToHandle(&new_code)) {
+    isolate->clear_pending_exception();
+  } else {
+    DCHECK_EQ(old_code->is_compiled_optimizable(),
+              new_code->is_compiled_optimizable());
+  }
+  return maybe_new_code;
+}
+
+
+void Compiler::CompileForLiveEdit(Handle<Script> script) {
+  // TODO(635): support extensions.
+  CompilationInfoWithZone info(script);
+  PostponeInterruptsScope postpone(info.isolate());
+  VMState<COMPILER> state(info.isolate());
+
+  info.MarkAsGlobal();
+  if (!Parser::Parse(&info)) return;
+
+  LiveEditFunctionTracker tracker(info.isolate(), info.function());
+  if (!CompileUnoptimizedCode(&info)) return;
+  if (!info.shared_info().is_null()) {
+    Handle<ScopeInfo> scope_info = ScopeInfo::Create(info.scope(),
+                                                     info.zone());
+    info.shared_info()->set_scope_info(*scope_info);
+  }
+  tracker.RecordRootFunctionInfo(info.code());
+}
+
+
+static Handle<SharedFunctionInfo> CompileToplevel(CompilationInfo* info) {
+  Isolate* isolate = info->isolate();
+  PostponeInterruptsScope postpone(isolate);
+  DCHECK(!isolate->native_context().is_null());
+  Handle<Script> script = info->script();
+
+  // TODO(svenpanne) Obscure place for this, perhaps move to OnBeforeCompile?
+  FixedArray* array = isolate->native_context()->embedder_data();
+  script->set_context_data(array->get(0));
+
+  isolate->debug()->OnBeforeCompile(script);
+
+  DCHECK(info->is_eval() || info->is_global());
+
+  Handle<SharedFunctionInfo> result;
+
+  { VMState<COMPILER> state(info->isolate());
+    if (info->function() == NULL) {
+      // Parse the script if needed (if it's already parsed, function() is
+      // non-NULL).
+      bool parse_allow_lazy =
+          (info->compile_options() == ScriptCompiler::kConsumeParserCache ||
+           String::cast(script->source())->length() >
+               FLAG_min_preparse_length) &&
+          !Compiler::DebuggerWantsEagerCompilation(info);
+
+      if (!parse_allow_lazy &&
+          (info->compile_options() == ScriptCompiler::kProduceParserCache ||
+           info->compile_options() == ScriptCompiler::kConsumeParserCache)) {
+        // We are going to parse eagerly, but we either 1) have cached data
+        // produced by lazy parsing or 2) are asked to generate cached data.
+        // Eager parsing cannot benefit from cached data, and producing cached
+        // data while parsing eagerly is not implemented.
+        info->SetCachedData(NULL, ScriptCompiler::kNoCompileOptions);
+      }
+      if (!Parser::Parse(info, parse_allow_lazy)) {
+        return Handle<SharedFunctionInfo>::null();
+      }
+    }
+
+    FunctionLiteral* lit = info->function();
+    LiveEditFunctionTracker live_edit_tracker(isolate, lit);
+
+    // Measure how long it takes to do the compilation; only take the
+    // rest of the function into account to avoid overlap with the
+    // parsing statistics.
+    HistogramTimer* rate = info->is_eval()
+          ? info->isolate()->counters()->compile_eval()
+          : info->isolate()->counters()->compile();
+    HistogramTimerScope timer(rate);
+
+    // Compile the code.
+    if (!CompileUnoptimizedCode(info)) {
+      return Handle<SharedFunctionInfo>::null();
+    }
+
+    // Allocate function.
+    DCHECK(!info->code().is_null());
+    result = isolate->factory()->NewSharedFunctionInfo(
+        lit->name(), lit->materialized_literal_count(), lit->kind(),
+        info->code(), ScopeInfo::Create(info->scope(), info->zone()),
+        info->feedback_vector());
+
+    DCHECK_EQ(RelocInfo::kNoPosition, lit->function_token_position());
+    SetFunctionInfo(result, lit, true, script);
+
+    Handle<String> script_name = script->name()->IsString()
+        ? Handle<String>(String::cast(script->name()))
+        : isolate->factory()->empty_string();
+    Logger::LogEventsAndTags log_tag = info->is_eval()
+        ? Logger::EVAL_TAG
+        : Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script);
+
+    PROFILE(isolate, CodeCreateEvent(
+                log_tag, *info->code(), *result, info, *script_name));
+    GDBJIT(AddCode(script_name, script, info->code(), info));
+
+    // Hint to the runtime system used when allocating space for initial
+    // property space by setting the expected number of properties for
+    // the instances of the function.
+    SetExpectedNofPropertiesFromEstimate(result,
+                                         lit->expected_property_count());
+
+    if (!script.is_null())
+      script->set_compilation_state(Script::COMPILATION_STATE_COMPILED);
+
+    live_edit_tracker.RecordFunctionInfo(result, lit, info->zone());
+  }
+
+  isolate->debug()->OnAfterCompile(script);
+
+  return result;
+}
+
+
+MaybeHandle<JSFunction> Compiler::GetFunctionFromEval(
+    Handle<String> source, Handle<SharedFunctionInfo> outer_info,
+    Handle<Context> context, StrictMode strict_mode,
+    ParseRestriction restriction, int scope_position) {
+  Isolate* isolate = source->GetIsolate();
+  int source_length = source->length();
+  isolate->counters()->total_eval_size()->Increment(source_length);
+  isolate->counters()->total_compile_size()->Increment(source_length);
+
+  CompilationCache* compilation_cache = isolate->compilation_cache();
+  MaybeHandle<SharedFunctionInfo> maybe_shared_info =
+      compilation_cache->LookupEval(source, outer_info, context, strict_mode,
+                                    scope_position);
+  Handle<SharedFunctionInfo> shared_info;
+
+  if (!maybe_shared_info.ToHandle(&shared_info)) {
+    Handle<Script> script = isolate->factory()->NewScript(source);
+    CompilationInfoWithZone info(script);
+    info.MarkAsEval();
+    if (context->IsNativeContext()) info.MarkAsGlobal();
+    info.SetStrictMode(strict_mode);
+    info.SetParseRestriction(restriction);
+    info.SetContext(context);
+
+    Debug::RecordEvalCaller(script);
+
+    shared_info = CompileToplevel(&info);
+
+    if (shared_info.is_null()) {
+      return MaybeHandle<JSFunction>();
+    } else {
+      // Explicitly disable optimization for eval code. We're not yet prepared
+      // to handle eval-code in the optimizing compiler.
+      shared_info->DisableOptimization(kEval);
+
+      // If caller is strict mode, the result must be in strict mode as well.
+      DCHECK(strict_mode == SLOPPY || shared_info->strict_mode() == STRICT);
+      if (!shared_info->dont_cache()) {
+        compilation_cache->PutEval(source, outer_info, context, shared_info,
+                                   scope_position);
+      }
+    }
+  } else if (shared_info->ic_age() != isolate->heap()->global_ic_age()) {
+    shared_info->ResetForNewContext(isolate->heap()->global_ic_age());
+  }
+
+  return isolate->factory()->NewFunctionFromSharedFunctionInfo(
+      shared_info, context, NOT_TENURED);
+}
+
+
+Handle<SharedFunctionInfo> Compiler::CompileScript(
+    Handle<String> source, Handle<Object> script_name, int line_offset,
+    int column_offset, bool is_shared_cross_origin, Handle<Context> context,
+    v8::Extension* extension, ScriptData** cached_data,
+    ScriptCompiler::CompileOptions compile_options, NativesFlag natives) {
+  if (compile_options == ScriptCompiler::kNoCompileOptions) {
+    cached_data = NULL;
+  } else if (compile_options == ScriptCompiler::kProduceParserCache ||
+             compile_options == ScriptCompiler::kProduceCodeCache) {
+    DCHECK(cached_data && !*cached_data);
+    DCHECK(extension == NULL);
+  } else {
+    DCHECK(compile_options == ScriptCompiler::kConsumeParserCache ||
+           compile_options == ScriptCompiler::kConsumeCodeCache);
+    DCHECK(cached_data && *cached_data);
+    DCHECK(extension == NULL);
+  }
+  Isolate* isolate = source->GetIsolate();
+  int source_length = source->length();
+  isolate->counters()->total_load_size()->Increment(source_length);
+  isolate->counters()->total_compile_size()->Increment(source_length);
+
+  CompilationCache* compilation_cache = isolate->compilation_cache();
+
+  // Do a lookup in the compilation cache but not for extensions.
+  MaybeHandle<SharedFunctionInfo> maybe_result;
+  Handle<SharedFunctionInfo> result;
+  if (extension == NULL) {
+    if (FLAG_serialize_toplevel &&
+        compile_options == ScriptCompiler::kConsumeCodeCache &&
+        !isolate->debug()->is_loaded()) {
+      HistogramTimerScope timer(isolate->counters()->compile_deserialize());
+      return CodeSerializer::Deserialize(isolate, *cached_data, source);
+    } else {
+      maybe_result = compilation_cache->LookupScript(
+          source, script_name, line_offset, column_offset,
+          is_shared_cross_origin, context);
+    }
+  }
+
+  base::ElapsedTimer timer;
+  if (FLAG_profile_deserialization && FLAG_serialize_toplevel &&
+      compile_options == ScriptCompiler::kProduceCodeCache) {
+    timer.Start();
+  }
+
+  if (!maybe_result.ToHandle(&result)) {
+    // No cache entry found. Compile the script.
+
+    // Create a script object describing the script to be compiled.
+    Handle<Script> script = isolate->factory()->NewScript(source);
+    if (natives == NATIVES_CODE) {
+      script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
+    }
+    if (!script_name.is_null()) {
+      script->set_name(*script_name);
+      script->set_line_offset(Smi::FromInt(line_offset));
+      script->set_column_offset(Smi::FromInt(column_offset));
+    }
+    script->set_is_shared_cross_origin(is_shared_cross_origin);
+
+    // Compile the function and add it to the cache.
+    CompilationInfoWithZone info(script);
+    info.MarkAsGlobal();
+    info.SetCachedData(cached_data, compile_options);
+    info.SetExtension(extension);
+    info.SetContext(context);
+    if (FLAG_serialize_toplevel &&
+        compile_options == ScriptCompiler::kProduceCodeCache) {
+      info.PrepareForSerializing();
+    }
+    if (FLAG_use_strict) info.SetStrictMode(STRICT);
+
+    result = CompileToplevel(&info);
+    if (extension == NULL && !result.is_null() && !result->dont_cache()) {
+      compilation_cache->PutScript(source, context, result);
+      if (FLAG_serialize_toplevel &&
+          compile_options == ScriptCompiler::kProduceCodeCache) {
+        HistogramTimerScope histogram_timer(
+            isolate->counters()->compile_serialize());
+        *cached_data = CodeSerializer::Serialize(isolate, result, source);
+        if (FLAG_profile_deserialization) {
+          PrintF("[Compiling and serializing %d bytes took %0.3f ms]\n",
+                 (*cached_data)->length(), timer.Elapsed().InMillisecondsF());
+        }
+      }
+    }
+
+    if (result.is_null()) isolate->ReportPendingMessages();
+  } else if (result->ic_age() != isolate->heap()->global_ic_age()) {
+    result->ResetForNewContext(isolate->heap()->global_ic_age());
+  }
+  return result;
+}
+
+
+Handle<SharedFunctionInfo> Compiler::CompileStreamedScript(
+    CompilationInfo* info, int source_length) {
+  Isolate* isolate = info->isolate();
+  isolate->counters()->total_load_size()->Increment(source_length);
+  isolate->counters()->total_compile_size()->Increment(source_length);
+
+  if (FLAG_use_strict) info->SetStrictMode(STRICT);
+  // TODO(marja): FLAG_serialize_toplevel is not honoured and won't be; when the
+  // real code caching lands, streaming needs to be adapted to use it.
+  return CompileToplevel(info);
+}
+
+
+Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(
+    FunctionLiteral* literal, Handle<Script> script,
+    CompilationInfo* outer_info) {
+  // Precondition: code has been parsed and scopes have been analyzed.
+  CompilationInfoWithZone info(script);
+  info.SetFunction(literal);
+  info.PrepareForCompilation(literal->scope());
+  info.SetStrictMode(literal->scope()->strict_mode());
+  if (outer_info->will_serialize()) info.PrepareForSerializing();
+
+  Isolate* isolate = info.isolate();
+  Factory* factory = isolate->factory();
+  LiveEditFunctionTracker live_edit_tracker(isolate, literal);
+  // Determine if the function can be lazily compiled. This is necessary to
+  // allow some of our builtin JS files to be lazily compiled. These
+  // builtins cannot be handled lazily by the parser, since we have to know
+  // if a function uses the special natives syntax, which is something the
+  // parser records.
+  // If the debugger requests compilation for break points, we cannot be
+  // aggressive about lazy compilation, because it might trigger compilation
+  // of functions without an outer context when setting a breakpoint through
+  // Debug::FindSharedFunctionInfoInScript.
+  bool allow_lazy_without_ctx = literal->AllowsLazyCompilationWithoutContext();
+  bool allow_lazy = literal->AllowsLazyCompilation() &&
+      !DebuggerWantsEagerCompilation(&info, allow_lazy_without_ctx);
+
+  // Generate code
+  Handle<ScopeInfo> scope_info;
+  if (FLAG_lazy && allow_lazy && !literal->is_parenthesized()) {
+    Handle<Code> code = isolate->builtins()->CompileLazy();
+    info.SetCode(code);
+    scope_info = Handle<ScopeInfo>(ScopeInfo::Empty(isolate));
+  } else if (FullCodeGenerator::MakeCode(&info)) {
+    DCHECK(!info.code().is_null());
+    scope_info = ScopeInfo::Create(info.scope(), info.zone());
+  } else {
+    return Handle<SharedFunctionInfo>::null();
+  }
+
+  // Create a shared function info object.
+  Handle<SharedFunctionInfo> result = factory->NewSharedFunctionInfo(
+      literal->name(), literal->materialized_literal_count(), literal->kind(),
+      info.code(), scope_info, info.feedback_vector());
+  SetFunctionInfo(result, literal, false, script);
+  RecordFunctionCompilation(Logger::FUNCTION_TAG, &info, result);
+  result->set_allows_lazy_compilation(allow_lazy);
+  result->set_allows_lazy_compilation_without_context(allow_lazy_without_ctx);
+
+  // Set the expected number of properties for instances and return
+  // the resulting function.
+  SetExpectedNofPropertiesFromEstimate(result,
+                                       literal->expected_property_count());
+  live_edit_tracker.RecordFunctionInfo(result, literal, info.zone());
+  return result;
+}
+
+
 MaybeHandle<Code> Compiler::GetOptimizedCode(Handle<JSFunction> function,
                                              Handle<Code> current_code,
                                              ConcurrencyMode mode,
@@ -1159,14 +1312,23 @@
 
   SmartPointer<CompilationInfo> info(new CompilationInfoWithZone(function));
   Isolate* isolate = info->isolate();
+  DCHECK(AllowCompilation::IsAllowed(isolate));
   VMState<COMPILER> state(isolate);
-  ASSERT(!isolate->has_pending_exception());
+  DCHECK(!isolate->has_pending_exception());
   PostponeInterruptsScope postpone(isolate);
 
   Handle<SharedFunctionInfo> shared = info->shared_info();
-  ASSERT_NE(ScopeInfo::Empty(isolate), shared->scope_info());
-  int compiled_size = shared->end_position() - shared->start_position();
-  isolate->counters()->total_compile_size()->Increment(compiled_size);
+  if (shared->code()->kind() != Code::FUNCTION ||
+      ScopeInfo::Empty(isolate) == shared->scope_info()) {
+    // The function was never compiled. Compile it unoptimized first.
+    // TODO(titzer): reuse the AST and scope info from this compile.
+    CompilationInfoWithZone nested(function);
+    nested.EnableDeoptimizationSupport();
+    if (!GetUnoptimizedCodeCommon(&nested).ToHandle(&current_code)) {
+      return MaybeHandle<Code>();
+    }
+    shared->ReplaceCode(*current_code);
+  }
   current_code->set_profiler_ticks(0);
 
   info->SetOptimizing(osr_ast_id, current_code);
@@ -1180,13 +1342,6 @@
     if (GetOptimizedCodeNow(info.get())) return info->code();
   }
 
-  // Failed.
-  if (FLAG_trace_opt) {
-    PrintF("[failed to optimize ");
-    function->PrintName();
-    PrintF(": %s]\n", GetBailoutReason(info->bailout_reason()));
-  }
-
   if (isolate->has_pending_exception()) isolate->clear_pending_exception();
   return MaybeHandle<Code>();
 }
@@ -1199,78 +1354,53 @@
   Isolate* isolate = info->isolate();
 
   VMState<COMPILER> state(isolate);
-  Logger::TimerEventScope timer(
-      isolate, Logger::TimerEventScope::v8_recompile_synchronous);
+  TimerEventScope<TimerEventRecompileSynchronous> timer(info->isolate());
 
   Handle<SharedFunctionInfo> shared = info->shared_info();
   shared->code()->set_profiler_ticks(0);
 
-  // 1) Optimization may have failed.
+  // 1) Optimization on the concurrent thread may have failed.
   // 2) The function may have already been optimized by OSR.  Simply continue.
   //    Except when OSR already disabled optimization for some reason.
   // 3) The code may have already been invalidated due to dependency change.
   // 4) Debugger may have been activated.
-
-  if (job->last_status() != OptimizedCompileJob::SUCCEEDED ||
-      shared->optimization_disabled() ||
-      info->HasAbortedDueToDependencyChange() ||
-      isolate->DebuggerHasBreakPoints()) {
-    return Handle<Code>::null();
+  // 5) Code generation may have failed.
+  if (job->last_status() == OptimizedCompileJob::SUCCEEDED) {
+    if (shared->optimization_disabled()) {
+      job->RetryOptimization(kOptimizationDisabled);
+    } else if (info->HasAbortedDueToDependencyChange()) {
+      job->RetryOptimization(kBailedOutDueToDependencyChange);
+    } else if (isolate->DebuggerHasBreakPoints()) {
+      job->RetryOptimization(kDebuggerHasBreakPoints);
+    } else if (job->GenerateCode() == OptimizedCompileJob::SUCCEEDED) {
+      RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info.get(), shared);
+      if (info->shared_info()->SearchOptimizedCodeMap(
+              info->context()->native_context(), info->osr_ast_id()) == -1) {
+        InsertCodeIntoOptimizedCodeMap(info.get());
+      }
+      if (FLAG_trace_opt) {
+        PrintF("[completed optimizing ");
+        info->closure()->ShortPrint();
+        PrintF("]\n");
+      }
+      return Handle<Code>(*info->code());
+    }
   }
 
-  if (job->GenerateCode() != OptimizedCompileJob::SUCCEEDED) {
-    return Handle<Code>::null();
+  DCHECK(job->last_status() != OptimizedCompileJob::SUCCEEDED);
+  if (FLAG_trace_opt) {
+    PrintF("[aborted optimizing ");
+    info->closure()->ShortPrint();
+    PrintF(" because: %s]\n", GetBailoutReason(info->bailout_reason()));
   }
-
-  Compiler::RecordFunctionCompilation(
-      Logger::LAZY_COMPILE_TAG, info.get(), shared);
-  if (info->shared_info()->SearchOptimizedCodeMap(
-          info->context()->native_context(), info->osr_ast_id()) == -1) {
-    InsertCodeIntoOptimizedCodeMap(info.get());
-  }
-
-  if (FLAG_trace_concurrent_recompilation) {
-    PrintF("  ** Optimized code for ");
-    info->closure()->PrintName();
-    PrintF(" generated.\n");
-  }
-
-  return Handle<Code>(*info->code());
+  return Handle<Code>::null();
 }
 
 
-void Compiler::RecordFunctionCompilation(Logger::LogEventsAndTags tag,
-                                         CompilationInfo* info,
-                                         Handle<SharedFunctionInfo> shared) {
-  // SharedFunctionInfo is passed separately, because if CompilationInfo
-  // was created using Script object, it will not have it.
-
-  // Log the code generation. If source information is available include
-  // script name and line number. Check explicitly whether logging is
-  // enabled as finding the line number is not free.
-  if (info->isolate()->logger()->is_logging_code_events() ||
-      info->isolate()->cpu_profiler()->is_profiling()) {
-    Handle<Script> script = info->script();
-    Handle<Code> code = info->code();
-    if (code.is_identical_to(
-            info->isolate()->builtins()->CompileUnoptimized())) {
-      return;
-    }
-    int line_num = Script::GetLineNumber(script, shared->start_position()) + 1;
-    int column_num =
-        Script::GetColumnNumber(script, shared->start_position()) + 1;
-    String* script_name = script->name()->IsString()
-        ? String::cast(script->name())
-        : info->isolate()->heap()->empty_string();
-    Logger::LogEventsAndTags log_tag = Logger::ToNativeByScript(tag, *script);
-    PROFILE(info->isolate(), CodeCreateEvent(
-        log_tag, *code, *shared, info, script_name, line_num, column_num));
-  }
-
-  GDBJIT(AddCode(Handle<String>(shared->DebugName()),
-                 Handle<Script>(info->script()),
-                 Handle<Code>(info->code()),
-                 info));
+bool Compiler::DebuggerWantsEagerCompilation(CompilationInfo* info,
+                                             bool allow_lazy_without_ctx) {
+  return LiveEditFunctionTracker::IsActive(info->isolate()) ||
+         (info->isolate()->DebuggerHasBreakPoints() && !allow_lazy_without_ctx);
 }
 
 
@@ -1301,7 +1431,7 @@
       : (FLAG_trace_hydrogen &&
          info()->closure()->PassesFilter(FLAG_trace_hydrogen_filter));
   return (tracing_on &&
-      OS::StrChr(const_cast<char*>(FLAG_trace_phase), name_[0]) != NULL);
+      base::OS::StrChr(const_cast<char*>(FLAG_trace_phase), name_[0]) != NULL);
 }
 
 } }  // namespace v8::internal
diff --git a/src/compiler.h b/src/compiler.h
index 6531474..f950ef7 100644
--- a/src/compiler.h
+++ b/src/compiler.h
@@ -7,12 +7,13 @@
 
 #include "src/allocation.h"
 #include "src/ast.h"
+#include "src/bailout-reason.h"
 #include "src/zone.h"
 
 namespace v8 {
 namespace internal {
 
-class ScriptData;
+class AstValueFactory;
 class HydrogenCodeStub;
 
 // ParseRestriction is used to restrict the set of valid statements in a
@@ -22,23 +23,72 @@
   ONLY_SINGLE_FUNCTION_LITERAL  // Only a single FunctionLiteral expression.
 };
 
-enum CachedDataMode {
-  NO_CACHED_DATA,
-  CONSUME_CACHED_DATA,
-  PRODUCE_CACHED_DATA
-};
-
 struct OffsetRange {
   OffsetRange(int from, int to) : from(from), to(to) {}
   int from;
   int to;
 };
 
+
+class ScriptData {
+ public:
+  ScriptData(const byte* data, int length);
+  ~ScriptData() {
+    if (owns_data_) DeleteArray(data_);
+  }
+
+  const byte* data() const { return data_; }
+  int length() const { return length_; }
+
+  void AcquireDataOwnership() {
+    DCHECK(!owns_data_);
+    owns_data_ = true;
+  }
+
+  void ReleaseDataOwnership() {
+    DCHECK(owns_data_);
+    owns_data_ = false;
+  }
+
+ private:
+  bool owns_data_;
+  const byte* data_;
+  int length_;
+
+  DISALLOW_COPY_AND_ASSIGN(ScriptData);
+};
+
 // CompilationInfo encapsulates some information known at compile time.  It
 // is constructed based on the resources available at compile-time.
 class CompilationInfo {
  public:
+  // Various configuration flags for a compilation, as well as some properties
+  // of the compiled code produced by a compilation.
+  enum Flag {
+    kLazy = 1 << 0,
+    kEval = 1 << 1,
+    kGlobal = 1 << 2,
+    kStrictMode = 1 << 3,
+    kThisHasUses = 1 << 4,
+    kNative = 1 << 5,
+    kDeferredCalling = 1 << 6,
+    kNonDeferredCalling = 1 << 7,
+    kSavesCallerDoubles = 1 << 8,
+    kRequiresFrame = 1 << 9,
+    kMustNotHaveEagerFrame = 1 << 10,
+    kDeoptimizationSupport = 1 << 11,
+    kDebug = 1 << 12,
+    kCompilingForDebugging = 1 << 13,
+    kParseRestriction = 1 << 14,
+    kSerializing = 1 << 15,
+    kContextSpecializing = 1 << 16,
+    kInliningEnabled = 1 << 17,
+    kTypingEnabled = 1 << 18,
+    kDisableFutureOptimization = 1 << 19
+  };
+
   CompilationInfo(Handle<JSFunction> closure, Zone* zone);
+  CompilationInfo(Isolate* isolate, Zone* zone);
   virtual ~CompilationInfo();
 
   Isolate* isolate() const {
@@ -46,10 +96,12 @@
   }
   Zone* zone() { return zone_; }
   bool is_osr() const { return !osr_ast_id_.IsNone(); }
-  bool is_lazy() const { return IsLazy::decode(flags_); }
-  bool is_eval() const { return IsEval::decode(flags_); }
-  bool is_global() const { return IsGlobal::decode(flags_); }
-  StrictMode strict_mode() const { return StrictModeField::decode(flags_); }
+  bool is_lazy() const { return GetFlag(kLazy); }
+  bool is_eval() const { return GetFlag(kEval); }
+  bool is_global() const { return GetFlag(kGlobal); }
+  StrictMode strict_mode() const {
+    return GetFlag(kStrictMode) ? STRICT : SLOPPY;
+  }
   FunctionLiteral* function() const { return function_; }
   Scope* scope() const { return scope_; }
   Scope* global_scope() const { return global_scope_; }
@@ -57,11 +109,18 @@
   Handle<JSFunction> closure() const { return closure_; }
   Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
   Handle<Script> script() const { return script_; }
+  void set_script(Handle<Script> script) { script_ = script; }
   HydrogenCodeStub* code_stub() const {return code_stub_; }
   v8::Extension* extension() const { return extension_; }
   ScriptData** cached_data() const { return cached_data_; }
-  CachedDataMode cached_data_mode() const {
-    return cached_data_mode_;
+  ScriptCompiler::CompileOptions compile_options() const {
+    return compile_options_;
+  }
+  ScriptCompiler::ExternalSourceStream* source_stream() const {
+    return source_stream_;
+  }
+  ScriptCompiler::StreamedSource::Encoding source_stream_encoding() const {
+    return source_stream_encoding_;
   }
   Handle<Context> context() const { return context_; }
   BailoutId osr_ast_id() const { return osr_ast_id_; }
@@ -72,124 +131,120 @@
   Code::Flags flags() const;
 
   void MarkAsEval() {
-    ASSERT(!is_lazy());
-    flags_ |= IsEval::encode(true);
+    DCHECK(!is_lazy());
+    SetFlag(kEval);
   }
+
   void MarkAsGlobal() {
-    ASSERT(!is_lazy());
-    flags_ |= IsGlobal::encode(true);
+    DCHECK(!is_lazy());
+    SetFlag(kGlobal);
   }
+
   void set_parameter_count(int parameter_count) {
-    ASSERT(IsStub());
+    DCHECK(IsStub());
     parameter_count_ = parameter_count;
   }
 
   void set_this_has_uses(bool has_no_uses) {
-    this_has_uses_ = has_no_uses;
-  }
-  bool this_has_uses() {
-    return this_has_uses_;
-  }
-  void SetStrictMode(StrictMode strict_mode) {
-    ASSERT(this->strict_mode() == SLOPPY || this->strict_mode() == strict_mode);
-    flags_ = StrictModeField::update(flags_, strict_mode);
-  }
-  void MarkAsNative() {
-    flags_ |= IsNative::encode(true);
+    SetFlag(kThisHasUses, has_no_uses);
   }
 
-  bool is_native() const {
-    return IsNative::decode(flags_);
+  bool this_has_uses() { return GetFlag(kThisHasUses); }
+
+  void SetStrictMode(StrictMode strict_mode) {
+    SetFlag(kStrictMode, strict_mode == STRICT);
   }
 
+  void MarkAsNative() { SetFlag(kNative); }
+
+  bool is_native() const { return GetFlag(kNative); }
+
   bool is_calling() const {
-    return is_deferred_calling() || is_non_deferred_calling();
+    return GetFlag(kDeferredCalling) || GetFlag(kNonDeferredCalling);
   }
 
-  void MarkAsDeferredCalling() {
-    flags_ |= IsDeferredCalling::encode(true);
-  }
+  void MarkAsDeferredCalling() { SetFlag(kDeferredCalling); }
 
-  bool is_deferred_calling() const {
-    return IsDeferredCalling::decode(flags_);
-  }
+  bool is_deferred_calling() const { return GetFlag(kDeferredCalling); }
 
-  void MarkAsNonDeferredCalling() {
-    flags_ |= IsNonDeferredCalling::encode(true);
-  }
+  void MarkAsNonDeferredCalling() { SetFlag(kNonDeferredCalling); }
 
-  bool is_non_deferred_calling() const {
-    return IsNonDeferredCalling::decode(flags_);
-  }
+  bool is_non_deferred_calling() const { return GetFlag(kNonDeferredCalling); }
 
-  void MarkAsSavesCallerDoubles() {
-    flags_ |= SavesCallerDoubles::encode(true);
-  }
+  void MarkAsSavesCallerDoubles() { SetFlag(kSavesCallerDoubles); }
 
-  bool saves_caller_doubles() const {
-    return SavesCallerDoubles::decode(flags_);
-  }
+  bool saves_caller_doubles() const { return GetFlag(kSavesCallerDoubles); }
 
-  void MarkAsRequiresFrame() {
-    flags_ |= RequiresFrame::encode(true);
-  }
+  void MarkAsRequiresFrame() { SetFlag(kRequiresFrame); }
 
-  bool requires_frame() const {
-    return RequiresFrame::decode(flags_);
-  }
+  bool requires_frame() const { return GetFlag(kRequiresFrame); }
 
-  void MarkMustNotHaveEagerFrame() {
-    flags_ |= MustNotHaveEagerFrame::encode(true);
-  }
+  void MarkMustNotHaveEagerFrame() { SetFlag(kMustNotHaveEagerFrame); }
 
   bool GetMustNotHaveEagerFrame() const {
-    return MustNotHaveEagerFrame::decode(flags_);
+    return GetFlag(kMustNotHaveEagerFrame);
   }
 
-  void MarkAsDebug() {
-    flags_ |= IsDebug::encode(true);
-  }
+  void MarkAsDebug() { SetFlag(kDebug); }
 
-  bool is_debug() const {
-    return IsDebug::decode(flags_);
-  }
+  bool is_debug() const { return GetFlag(kDebug); }
+
+  void PrepareForSerializing() { SetFlag(kSerializing); }
+
+  bool will_serialize() const { return GetFlag(kSerializing); }
+
+  void MarkAsContextSpecializing() { SetFlag(kContextSpecializing); }
+
+  bool is_context_specializing() const { return GetFlag(kContextSpecializing); }
+
+  void MarkAsInliningEnabled() { SetFlag(kInliningEnabled); }
+
+  void MarkAsInliningDisabled() { SetFlag(kInliningEnabled, false); }
+
+  bool is_inlining_enabled() const { return GetFlag(kInliningEnabled); }
+
+  void MarkAsTypingEnabled() { SetFlag(kTypingEnabled); }
+
+  bool is_typing_enabled() const { return GetFlag(kTypingEnabled); }
 
   bool IsCodePreAgingActive() const {
-    return FLAG_optimize_for_size && FLAG_age_code && !is_debug();
+    return FLAG_optimize_for_size && FLAG_age_code && !will_serialize() &&
+           !is_debug();
   }
 
   void SetParseRestriction(ParseRestriction restriction) {
-    flags_ = ParseRestricitonField::update(flags_, restriction);
+    SetFlag(kParseRestriction, restriction != NO_PARSE_RESTRICTION);
   }
 
   ParseRestriction parse_restriction() const {
-    return ParseRestricitonField::decode(flags_);
+    return GetFlag(kParseRestriction) ? ONLY_SINGLE_FUNCTION_LITERAL
+                                      : NO_PARSE_RESTRICTION;
   }
 
   void SetFunction(FunctionLiteral* literal) {
-    ASSERT(function_ == NULL);
+    DCHECK(function_ == NULL);
     function_ = literal;
   }
   void PrepareForCompilation(Scope* scope);
   void SetGlobalScope(Scope* global_scope) {
-    ASSERT(global_scope_ == NULL);
+    DCHECK(global_scope_ == NULL);
     global_scope_ = global_scope;
   }
-  Handle<FixedArray> feedback_vector() const {
+  Handle<TypeFeedbackVector> feedback_vector() const {
     return feedback_vector_;
   }
   void SetCode(Handle<Code> code) { code_ = code; }
   void SetExtension(v8::Extension* extension) {
-    ASSERT(!is_lazy());
+    DCHECK(!is_lazy());
     extension_ = extension;
   }
   void SetCachedData(ScriptData** cached_data,
-                     CachedDataMode cached_data_mode) {
-    cached_data_mode_ = cached_data_mode;
-    if (cached_data_mode == NO_CACHED_DATA) {
+                     ScriptCompiler::CompileOptions compile_options) {
+    compile_options_ = compile_options;
+    if (compile_options == ScriptCompiler::kNoCompileOptions) {
       cached_data_ = NULL;
     } else {
-      ASSERT(!is_lazy());
+      DCHECK(!is_lazy());
       cached_data_ = cached_data;
     }
   }
@@ -197,12 +252,8 @@
     context_ = context;
   }
 
-  void MarkCompilingForDebugging() {
-    flags_ |= IsCompilingForDebugging::encode(true);
-  }
-  bool IsCompilingForDebugging() {
-    return IsCompilingForDebugging::decode(flags_);
-  }
+  void MarkCompilingForDebugging() { SetFlag(kCompilingForDebugging); }
+  bool IsCompilingForDebugging() { return GetFlag(kCompilingForDebugging); }
   void MarkNonOptimizable() {
     SetMode(CompilationInfo::NONOPT);
   }
@@ -226,28 +277,27 @@
   bool IsOptimizable() const { return mode_ == BASE; }
   bool IsStub() const { return mode_ == STUB; }
   void SetOptimizing(BailoutId osr_ast_id, Handle<Code> unoptimized) {
-    ASSERT(!shared_info_.is_null());
+    DCHECK(!shared_info_.is_null());
     SetMode(OPTIMIZE);
     osr_ast_id_ = osr_ast_id;
     unoptimized_code_ = unoptimized;
     optimization_id_ = isolate()->NextOptimizationId();
   }
-  void DisableOptimization();
 
   // Deoptimization support.
   bool HasDeoptimizationSupport() const {
-    return SupportsDeoptimization::decode(flags_);
+    return GetFlag(kDeoptimizationSupport);
   }
   void EnableDeoptimizationSupport() {
-    ASSERT(IsOptimizable());
-    flags_ |= SupportsDeoptimization::encode(true);
+    DCHECK(IsOptimizable());
+    SetFlag(kDeoptimizationSupport);
   }
 
   // Determines whether or not to insert a self-optimization header.
   bool ShouldSelfOptimize();
 
   void set_deferred_handles(DeferredHandles* deferred_handles) {
-    ASSERT(deferred_handles_ == NULL);
+    DCHECK(deferred_handles_ == NULL);
     deferred_handles_ = deferred_handles;
   }
 
@@ -271,16 +321,24 @@
     SaveHandle(&unoptimized_code_);
   }
 
+  void AbortOptimization(BailoutReason reason) {
+    if (bailout_reason_ != kNoReason) bailout_reason_ = reason;
+    SetFlag(kDisableFutureOptimization);
+  }
+
+  void RetryOptimization(BailoutReason reason) {
+    if (bailout_reason_ != kNoReason) bailout_reason_ = reason;
+  }
+
   BailoutReason bailout_reason() const { return bailout_reason_; }
-  void set_bailout_reason(BailoutReason reason) { bailout_reason_ = reason; }
 
   int prologue_offset() const {
-    ASSERT_NE(Code::kPrologueOffsetNotSet, prologue_offset_);
+    DCHECK_NE(Code::kPrologueOffsetNotSet, prologue_offset_);
     return prologue_offset_;
   }
 
   void set_prologue_offset(int prologue_offset) {
-    ASSERT_EQ(Code::kPrologueOffsetNotSet, prologue_offset_);
+    DCHECK_EQ(Code::kPrologueOffsetNotSet, prologue_offset_);
     prologue_offset_ = prologue_offset;
   }
 
@@ -306,13 +364,13 @@
   }
 
   void AbortDueToDependencyChange() {
-    ASSERT(!OptimizingCompilerThread::IsOptimizerThread(isolate()));
-    abort_due_to_dependency_ = true;
+    DCHECK(!OptimizingCompilerThread::IsOptimizerThread(isolate()));
+    aborted_due_to_dependency_change_ = true;
   }
 
-  bool HasAbortedDueToDependencyChange() {
-    ASSERT(!OptimizingCompilerThread::IsOptimizerThread(isolate()));
-    return abort_due_to_dependency_;
+  bool HasAbortedDueToDependencyChange() const {
+    DCHECK(!OptimizingCompilerThread::IsOptimizerThread(isolate()));
+    return aborted_due_to_dependency_change_;
   }
 
   bool HasSameOsrEntry(Handle<JSFunction> function, BailoutId osr_ast_id) {
@@ -321,6 +379,15 @@
 
   int optimization_id() const { return optimization_id_; }
 
+  AstValueFactory* ast_value_factory() const { return ast_value_factory_; }
+  void SetAstValueFactory(AstValueFactory* ast_value_factory,
+                          bool owned = true) {
+    ast_value_factory_ = ast_value_factory;
+    ast_value_factory_owned_ = owned;
+  }
+
+  AstNode::IdGen* ast_node_id_gen() { return &ast_node_id_gen_; }
+
  protected:
   CompilationInfo(Handle<Script> script,
                   Zone* zone);
@@ -329,6 +396,10 @@
   CompilationInfo(HydrogenCodeStub* stub,
                   Isolate* isolate,
                   Zone* zone);
+  CompilationInfo(ScriptCompiler::ExternalSourceStream* source_stream,
+                  ScriptCompiler::StreamedSource::Encoding encoding,
+                  Isolate* isolate, Zone* zone);
+
 
  private:
   Isolate* isolate_;
@@ -348,43 +419,16 @@
   void Initialize(Isolate* isolate, Mode mode, Zone* zone);
 
   void SetMode(Mode mode) {
-    ASSERT(isolate()->use_crankshaft());
     mode_ = mode;
   }
 
-  // Flags using template class BitField<type, start, length>.  All are
-  // false by default.
-  //
-  // Compilation is either eager or lazy.
-  class IsLazy:   public BitField<bool, 0, 1> {};
-  // Flags that can be set for eager compilation.
-  class IsEval:   public BitField<bool, 1, 1> {};
-  class IsGlobal: public BitField<bool, 2, 1> {};
-  // If the function is being compiled for the debugger.
-  class IsDebug: public BitField<bool, 3, 1> {};
-  // Strict mode - used in eager compilation.
-  class StrictModeField: public BitField<StrictMode, 4, 1> {};
-  // Is this a function from our natives.
-  class IsNative: public BitField<bool, 5, 1> {};
-  // Is this code being compiled with support for deoptimization..
-  class SupportsDeoptimization: public BitField<bool, 6, 1> {};
-  // If compiling for debugging produce just full code matching the
-  // initial mode setting.
-  class IsCompilingForDebugging: public BitField<bool, 7, 1> {};
-  // If the compiled code contains calls that require building a frame
-  class IsCalling: public BitField<bool, 8, 1> {};
-  // If the compiled code contains calls that require building a frame
-  class IsDeferredCalling: public BitField<bool, 9, 1> {};
-  // If the compiled code contains calls that require building a frame
-  class IsNonDeferredCalling: public BitField<bool, 10, 1> {};
-  // If the compiled code saves double caller registers that it clobbers.
-  class SavesCallerDoubles: public BitField<bool, 11, 1> {};
-  // If the set of valid statements is restricted.
-  class ParseRestricitonField: public BitField<ParseRestriction, 12, 1> {};
-  // If the function requires a frame (for unspecified reasons)
-  class RequiresFrame: public BitField<bool, 13, 1> {};
-  // If the function cannot build a frame (for unspecified reasons)
-  class MustNotHaveEagerFrame: public BitField<bool, 14, 1> {};
+  void SetFlag(Flag flag) { flags_ |= flag; }
+
+  void SetFlag(Flag flag, bool value) {
+    flags_ = value ? flags_ | flag : flags_ & ~flag;
+  }
+
+  bool GetFlag(Flag flag) const { return (flags_ & flag) != 0; }
 
   unsigned flags_;
 
@@ -405,18 +449,20 @@
   Handle<JSFunction> closure_;
   Handle<SharedFunctionInfo> shared_info_;
   Handle<Script> script_;
+  ScriptCompiler::ExternalSourceStream* source_stream_;  // Not owned.
+  ScriptCompiler::StreamedSource::Encoding source_stream_encoding_;
 
   // Fields possibly needed for eager compilation, NULL by default.
   v8::Extension* extension_;
   ScriptData** cached_data_;
-  CachedDataMode cached_data_mode_;
+  ScriptCompiler::CompileOptions compile_options_;
 
   // The context of the caller for eval code, and the global context for a
   // global script. Will be a null handle otherwise.
   Handle<Context> context_;
 
   // Used by codegen, ultimately kept rooted by the SharedFunctionInfo.
-  Handle<FixedArray> feedback_vector_;
+  Handle<TypeFeedbackVector> feedback_vector_;
 
   // Compilation mode flag and whether deoptimization is allowed.
   Mode mode_;
@@ -426,9 +472,6 @@
   // data.  Keep track which code we patched.
   Handle<Code> unoptimized_code_;
 
-  // Flag whether compilation needs to be aborted due to dependency change.
-  bool abort_due_to_dependency_;
-
   // The zone from which the compilation pipeline working on this
   // CompilationInfo allocates.
   Zone* zone_;
@@ -458,12 +501,18 @@
   // Number of parameters used for compilation of stubs that require arguments.
   int parameter_count_;
 
-  bool this_has_uses_;
-
   Handle<Foreign> object_wrapper_;
 
   int optimization_id_;
 
+  AstValueFactory* ast_value_factory_;
+  bool ast_value_factory_owned_;
+  AstNode::IdGen ast_node_id_gen_;
+
+  // This flag is used by the main thread to track whether this compilation
+  // should be abandoned due to dependency change.
+  bool aborted_due_to_dependency_change_;
+
   DISALLOW_COPY_AND_ASSIGN(CompilationInfo);
 };
 
@@ -484,6 +533,10 @@
   CompilationInfoWithZone(HydrogenCodeStub* stub, Isolate* isolate)
       : CompilationInfo(stub, isolate, &zone_),
         zone_(isolate) {}
+  CompilationInfoWithZone(ScriptCompiler::ExternalSourceStream* stream,
+                          ScriptCompiler::StreamedSource::Encoding encoding,
+                          Isolate* isolate)
+      : CompilationInfo(stream, encoding, isolate, &zone_), zone_(isolate) {}
 
   // Virtual destructor because a CompilationInfoWithZone has to exit the
   // zone scope and get rid of dependent maps even when the destructor is
@@ -546,23 +599,18 @@
   CompilationInfo* info() const { return info_; }
   Isolate* isolate() const { return info()->isolate(); }
 
-  MUST_USE_RESULT Status AbortOptimization(
-      BailoutReason reason = kNoReason) {
-    if (reason != kNoReason) info_->set_bailout_reason(reason);
+  Status RetryOptimization(BailoutReason reason) {
+    info_->RetryOptimization(reason);
     return SetLastStatus(BAILED_OUT);
   }
 
-  MUST_USE_RESULT Status AbortAndDisableOptimization(
-      BailoutReason reason = kNoReason) {
-    if (reason != kNoReason) info_->set_bailout_reason(reason);
-    // Reference to shared function info does not change between phases.
-    AllowDeferredHandleDereference allow_handle_dereference;
-    info_->shared_info()->DisableOptimization(info_->bailout_reason());
+  Status AbortOptimization(BailoutReason reason) {
+    info_->AbortOptimization(reason);
     return SetLastStatus(BAILED_OUT);
   }
 
   void WaitForInstall() {
-    ASSERT(info_->is_osr());
+    DCHECK(info_->is_osr());
     awaiting_install_ = true;
   }
 
@@ -573,9 +621,9 @@
   HOptimizedGraphBuilder* graph_builder_;
   HGraph* graph_;
   LChunk* chunk_;
-  TimeDelta time_taken_to_create_graph_;
-  TimeDelta time_taken_to_optimize_;
-  TimeDelta time_taken_to_codegen_;
+  base::TimeDelta time_taken_to_create_graph_;
+  base::TimeDelta time_taken_to_optimize_;
+  base::TimeDelta time_taken_to_codegen_;
   Status last_status_;
   bool awaiting_install_;
 
@@ -586,9 +634,9 @@
   void RecordOptimizationStats();
 
   struct Timer {
-    Timer(OptimizedCompileJob* job, TimeDelta* location)
+    Timer(OptimizedCompileJob* job, base::TimeDelta* location)
         : job_(job), location_(location) {
-      ASSERT(location_ != NULL);
+      DCHECK(location_ != NULL);
       timer_.Start();
     }
 
@@ -597,8 +645,8 @@
     }
 
     OptimizedCompileJob* job_;
-    ElapsedTimer timer_;
-    TimeDelta* location_;
+    base::ElapsedTimer timer_;
+    base::TimeDelta* location_;
   };
 };
 
@@ -618,39 +666,41 @@
  public:
   MUST_USE_RESULT static MaybeHandle<Code> GetUnoptimizedCode(
       Handle<JSFunction> function);
+  MUST_USE_RESULT static MaybeHandle<Code> GetLazyCode(
+      Handle<JSFunction> function);
   MUST_USE_RESULT static MaybeHandle<Code> GetUnoptimizedCode(
       Handle<SharedFunctionInfo> shared);
+  MUST_USE_RESULT static MaybeHandle<Code> GetDebugCode(
+      Handle<JSFunction> function);
+
   static bool EnsureCompiled(Handle<JSFunction> function,
                              ClearExceptionFlag flag);
-  MUST_USE_RESULT static MaybeHandle<Code> GetCodeForDebugging(
-      Handle<JSFunction> function);
+
+  static bool EnsureDeoptimizationSupport(CompilationInfo* info);
 
   static void CompileForLiveEdit(Handle<Script> script);
 
   // Compile a String source within a context for eval.
   MUST_USE_RESULT static MaybeHandle<JSFunction> GetFunctionFromEval(
-      Handle<String> source,
-      Handle<Context> context,
-      StrictMode strict_mode,
-      ParseRestriction restriction,
-      int scope_position);
+      Handle<String> source, Handle<SharedFunctionInfo> outer_info,
+      Handle<Context> context, StrictMode strict_mode,
+      ParseRestriction restriction, int scope_position);
 
   // Compile a String source within a context.
   static Handle<SharedFunctionInfo> CompileScript(
-      Handle<String> source,
-      Handle<Object> script_name,
-      int line_offset,
-      int column_offset,
-      bool is_shared_cross_origin,
-      Handle<Context> context,
-      v8::Extension* extension,
-      ScriptData** cached_data,
-      CachedDataMode cached_data_mode,
+      Handle<String> source, Handle<Object> script_name, int line_offset,
+      int column_offset, bool is_shared_cross_origin, Handle<Context> context,
+      v8::Extension* extension, ScriptData** cached_data,
+      ScriptCompiler::CompileOptions compile_options,
       NativesFlag is_natives_code);
 
+  static Handle<SharedFunctionInfo> CompileStreamedScript(CompilationInfo* info,
+                                                          int source_length);
+
   // Create a shared function info object (the code may be lazily compiled).
   static Handle<SharedFunctionInfo> BuildFunctionInfo(FunctionLiteral* node,
-                                                      Handle<Script> script);
+                                                      Handle<Script> script,
+                                                      CompilationInfo* outer);
 
   enum ConcurrencyMode { NOT_CONCURRENT, CONCURRENT };
 
@@ -667,9 +717,8 @@
   // On failure, return the empty handle.
   static Handle<Code> GetConcurrentlyOptimizedCode(OptimizedCompileJob* job);
 
-  static void RecordFunctionCompilation(Logger::LogEventsAndTags tag,
-                                        CompilationInfo* info,
-                                        Handle<SharedFunctionInfo> shared);
+  static bool DebuggerWantsEagerCompilation(
+      CompilationInfo* info, bool allow_lazy_without_ctx = false);
 };
 
 
@@ -691,12 +740,11 @@
   CompilationInfo* info_;
   Zone zone_;
   unsigned info_zone_start_allocation_size_;
-  ElapsedTimer timer_;
+  base::ElapsedTimer timer_;
 
   DISALLOW_COPY_AND_ASSIGN(CompilationPhase);
 };
 
-
 } }  // namespace v8::internal
 
 #endif  // V8_COMPILER_H_
diff --git a/src/compiler/access-builder.cc b/src/compiler/access-builder.cc
new file mode 100644
index 0000000..ac9cfa8
--- /dev/null
+++ b/src/compiler/access-builder.cc
@@ -0,0 +1,97 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/access-builder.h"
+#include "src/types-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// static
+FieldAccess AccessBuilder::ForMap() {
+  return {kTaggedBase, HeapObject::kMapOffset, Handle<Name>(), Type::Any(),
+          kMachAnyTagged};
+}
+
+
+// static
+FieldAccess AccessBuilder::ForJSObjectProperties() {
+  return {kTaggedBase, JSObject::kPropertiesOffset, Handle<Name>(), Type::Any(),
+          kMachAnyTagged};
+}
+
+
+// static
+FieldAccess AccessBuilder::ForJSObjectElements() {
+  return {kTaggedBase, JSObject::kElementsOffset, Handle<Name>(),
+          Type::Internal(), kMachAnyTagged};
+}
+
+
+// static
+FieldAccess AccessBuilder::ForJSFunctionContext() {
+  return {kTaggedBase, JSFunction::kContextOffset, Handle<Name>(),
+          Type::Internal(), kMachAnyTagged};
+}
+
+
+// static
+FieldAccess AccessBuilder::ForJSArrayBufferBackingStore() {
+  return {kTaggedBase, JSArrayBuffer::kBackingStoreOffset, Handle<Name>(),
+          Type::UntaggedPtr(), kMachPtr};
+}
+
+
+// static
+FieldAccess AccessBuilder::ForExternalArrayPointer() {
+  return {kTaggedBase, ExternalArray::kExternalPointerOffset, Handle<Name>(),
+          Type::UntaggedPtr(), kMachPtr};
+}
+
+
+// static
+ElementAccess AccessBuilder::ForFixedArrayElement() {
+  return {kTaggedBase, FixedArray::kHeaderSize, Type::Any(), kMachAnyTagged};
+}
+
+
+// static
+ElementAccess AccessBuilder::ForBackingStoreElement(MachineType rep) {
+  return {kUntaggedBase, kNonHeapObjectHeaderSize - kHeapObjectTag, Type::Any(),
+          rep};
+}
+
+
+// static
+ElementAccess AccessBuilder::ForTypedArrayElement(ExternalArrayType type,
+                                                  bool is_external) {
+  BaseTaggedness taggedness = is_external ? kUntaggedBase : kTaggedBase;
+  int header_size = is_external ? 0 : FixedTypedArrayBase::kDataOffset;
+  switch (type) {
+    case kExternalInt8Array:
+      return {taggedness, header_size, Type::Signed32(), kMachInt8};
+    case kExternalUint8Array:
+    case kExternalUint8ClampedArray:
+      return {taggedness, header_size, Type::Unsigned32(), kMachUint8};
+    case kExternalInt16Array:
+      return {taggedness, header_size, Type::Signed32(), kMachInt16};
+    case kExternalUint16Array:
+      return {taggedness, header_size, Type::Unsigned32(), kMachUint16};
+    case kExternalInt32Array:
+      return {taggedness, header_size, Type::Signed32(), kMachInt32};
+    case kExternalUint32Array:
+      return {taggedness, header_size, Type::Unsigned32(), kMachUint32};
+    case kExternalFloat32Array:
+      return {taggedness, header_size, Type::Number(), kRepFloat32};
+    case kExternalFloat64Array:
+      return {taggedness, header_size, Type::Number(), kRepFloat64};
+  }
+  UNREACHABLE();
+  return {kUntaggedBase, 0, Type::None(), kMachNone};
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/access-builder.h b/src/compiler/access-builder.h
new file mode 100644
index 0000000..7d0bda1
--- /dev/null
+++ b/src/compiler/access-builder.h
@@ -0,0 +1,55 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_ACCESS_BUILDER_H_
+#define V8_COMPILER_ACCESS_BUILDER_H_
+
+#include "src/compiler/simplified-operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// This access builder provides a set of static methods constructing commonly
+// used FieldAccess and ElementAccess descriptors. These descriptors server as
+// parameters to simplified load/store operators.
+class AccessBuilder FINAL : public AllStatic {
+ public:
+  // Provides access to HeapObject::map() field.
+  static FieldAccess ForMap();
+
+  // Provides access to JSObject::properties() field.
+  static FieldAccess ForJSObjectProperties();
+
+  // Provides access to JSObject::elements() field.
+  static FieldAccess ForJSObjectElements();
+
+  // Provides access to JSFunction::context() field.
+  static FieldAccess ForJSFunctionContext();
+
+  // Provides access to JSArrayBuffer::backing_store() field.
+  static FieldAccess ForJSArrayBufferBackingStore();
+
+  // Provides access to ExternalArray::external_pointer() field.
+  static FieldAccess ForExternalArrayPointer();
+
+  // Provides access to FixedArray elements.
+  static ElementAccess ForFixedArrayElement();
+
+  // TODO(mstarzinger): Raw access only for testing, drop me.
+  static ElementAccess ForBackingStoreElement(MachineType rep);
+
+  // Provides access to Fixed{type}TypedArray and External{type}Array elements.
+  static ElementAccess ForTypedArrayElement(ExternalArrayType type,
+                                            bool is_external);
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(AccessBuilder);
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_ACCESS_BUILDER_H_
diff --git a/src/compiler/arm/code-generator-arm.cc b/src/compiler/arm/code-generator-arm.cc
new file mode 100644
index 0000000..1ec174d
--- /dev/null
+++ b/src/compiler/arm/code-generator-arm.cc
@@ -0,0 +1,876 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/code-generator.h"
+
+#include "src/arm/macro-assembler-arm.h"
+#include "src/compiler/code-generator-impl.h"
+#include "src/compiler/gap-resolver.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/scopes.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define __ masm()->
+
+
+#define kScratchReg r9
+
+
+// Adds Arm-specific methods to convert InstructionOperands.
+class ArmOperandConverter : public InstructionOperandConverter {
+ public:
+  ArmOperandConverter(CodeGenerator* gen, Instruction* instr)
+      : InstructionOperandConverter(gen, instr) {}
+
+  SBit OutputSBit() const {
+    switch (instr_->flags_mode()) {
+      case kFlags_branch:
+      case kFlags_set:
+        return SetCC;
+      case kFlags_none:
+        return LeaveCC;
+    }
+    UNREACHABLE();
+    return LeaveCC;
+  }
+
+  Operand InputImmediate(int index) {
+    Constant constant = ToConstant(instr_->InputAt(index));
+    switch (constant.type()) {
+      case Constant::kInt32:
+        return Operand(constant.ToInt32());
+      case Constant::kFloat64:
+        return Operand(
+            isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
+      case Constant::kInt64:
+      case Constant::kExternalReference:
+      case Constant::kHeapObject:
+        break;
+    }
+    UNREACHABLE();
+    return Operand::Zero();
+  }
+
+  Operand InputOperand2(int first_index) {
+    const int index = first_index;
+    switch (AddressingModeField::decode(instr_->opcode())) {
+      case kMode_None:
+      case kMode_Offset_RI:
+      case kMode_Offset_RR:
+        break;
+      case kMode_Operand2_I:
+        return InputImmediate(index + 0);
+      case kMode_Operand2_R:
+        return Operand(InputRegister(index + 0));
+      case kMode_Operand2_R_ASR_I:
+        return Operand(InputRegister(index + 0), ASR, InputInt5(index + 1));
+      case kMode_Operand2_R_ASR_R:
+        return Operand(InputRegister(index + 0), ASR, InputRegister(index + 1));
+      case kMode_Operand2_R_LSL_I:
+        return Operand(InputRegister(index + 0), LSL, InputInt5(index + 1));
+      case kMode_Operand2_R_LSL_R:
+        return Operand(InputRegister(index + 0), LSL, InputRegister(index + 1));
+      case kMode_Operand2_R_LSR_I:
+        return Operand(InputRegister(index + 0), LSR, InputInt5(index + 1));
+      case kMode_Operand2_R_LSR_R:
+        return Operand(InputRegister(index + 0), LSR, InputRegister(index + 1));
+      case kMode_Operand2_R_ROR_I:
+        return Operand(InputRegister(index + 0), ROR, InputInt5(index + 1));
+      case kMode_Operand2_R_ROR_R:
+        return Operand(InputRegister(index + 0), ROR, InputRegister(index + 1));
+    }
+    UNREACHABLE();
+    return Operand::Zero();
+  }
+
+  MemOperand InputOffset(int* first_index) {
+    const int index = *first_index;
+    switch (AddressingModeField::decode(instr_->opcode())) {
+      case kMode_None:
+      case kMode_Operand2_I:
+      case kMode_Operand2_R:
+      case kMode_Operand2_R_ASR_I:
+      case kMode_Operand2_R_ASR_R:
+      case kMode_Operand2_R_LSL_I:
+      case kMode_Operand2_R_LSL_R:
+      case kMode_Operand2_R_LSR_I:
+      case kMode_Operand2_R_LSR_R:
+      case kMode_Operand2_R_ROR_I:
+      case kMode_Operand2_R_ROR_R:
+        break;
+      case kMode_Offset_RI:
+        *first_index += 2;
+        return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
+      case kMode_Offset_RR:
+        *first_index += 2;
+        return MemOperand(InputRegister(index + 0), InputRegister(index + 1));
+    }
+    UNREACHABLE();
+    return MemOperand(r0);
+  }
+
+  MemOperand InputOffset() {
+    int index = 0;
+    return InputOffset(&index);
+  }
+
+  MemOperand ToMemOperand(InstructionOperand* op) const {
+    DCHECK(op != NULL);
+    DCHECK(!op->IsRegister());
+    DCHECK(!op->IsDoubleRegister());
+    DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+    // The linkage computes where all spill slots are located.
+    FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), 0);
+    return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
+  }
+};
+
+
+// Assembles an instruction after register allocation, producing machine code.
+void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+  ArmOperandConverter i(this, instr);
+
+  switch (ArchOpcodeField::decode(instr->opcode())) {
+    case kArchCallCodeObject: {
+      EnsureSpaceForLazyDeopt();
+      if (instr->InputAt(0)->IsImmediate()) {
+        __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
+                RelocInfo::CODE_TARGET);
+      } else {
+        __ add(ip, i.InputRegister(0),
+               Operand(Code::kHeaderSize - kHeapObjectTag));
+        __ Call(ip);
+      }
+      AddSafepointAndDeopt(instr);
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
+    case kArchCallJSFunction: {
+      EnsureSpaceForLazyDeopt();
+      Register func = i.InputRegister(0);
+      if (FLAG_debug_code) {
+        // Check the function's context matches the context argument.
+        __ ldr(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
+        __ cmp(cp, kScratchReg);
+        __ Assert(eq, kWrongFunctionContext);
+      }
+      __ ldr(ip, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
+      __ Call(ip);
+      AddSafepointAndDeopt(instr);
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
+    case kArchJmp:
+      __ b(code_->GetLabel(i.InputBlock(0)));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArchNop:
+      // don't emit code for nops.
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArchRet:
+      AssembleReturn();
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArchTruncateDoubleToI:
+      __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArmAdd:
+      __ add(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
+             i.OutputSBit());
+      break;
+    case kArmAnd:
+      __ and_(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
+              i.OutputSBit());
+      break;
+    case kArmBic:
+      __ bic(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
+             i.OutputSBit());
+      break;
+    case kArmMul:
+      __ mul(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+             i.OutputSBit());
+      break;
+    case kArmMla:
+      __ mla(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+             i.InputRegister(2), i.OutputSBit());
+      break;
+    case kArmMls: {
+      CpuFeatureScope scope(masm(), MLS);
+      __ mls(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+             i.InputRegister(2));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
+    case kArmSdiv: {
+      CpuFeatureScope scope(masm(), SUDIV);
+      __ sdiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
+    case kArmUdiv: {
+      CpuFeatureScope scope(masm(), SUDIV);
+      __ udiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
+    case kArmMov:
+      __ Move(i.OutputRegister(), i.InputOperand2(0), i.OutputSBit());
+      break;
+    case kArmMvn:
+      __ mvn(i.OutputRegister(), i.InputOperand2(0), i.OutputSBit());
+      break;
+    case kArmOrr:
+      __ orr(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
+             i.OutputSBit());
+      break;
+    case kArmEor:
+      __ eor(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
+             i.OutputSBit());
+      break;
+    case kArmSub:
+      __ sub(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
+             i.OutputSBit());
+      break;
+    case kArmRsb:
+      __ rsb(i.OutputRegister(), i.InputRegister(0), i.InputOperand2(1),
+             i.OutputSBit());
+      break;
+    case kArmBfc: {
+      CpuFeatureScope scope(masm(), ARMv7);
+      __ bfc(i.OutputRegister(), i.InputInt8(1), i.InputInt8(2));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
+    case kArmUbfx: {
+      CpuFeatureScope scope(masm(), ARMv7);
+      __ ubfx(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
+              i.InputInt8(2));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
+    case kArmCmp:
+      __ cmp(i.InputRegister(0), i.InputOperand2(1));
+      DCHECK_EQ(SetCC, i.OutputSBit());
+      break;
+    case kArmCmn:
+      __ cmn(i.InputRegister(0), i.InputOperand2(1));
+      DCHECK_EQ(SetCC, i.OutputSBit());
+      break;
+    case kArmTst:
+      __ tst(i.InputRegister(0), i.InputOperand2(1));
+      DCHECK_EQ(SetCC, i.OutputSBit());
+      break;
+    case kArmTeq:
+      __ teq(i.InputRegister(0), i.InputOperand2(1));
+      DCHECK_EQ(SetCC, i.OutputSBit());
+      break;
+    case kArmVcmpF64:
+      __ VFPCompareAndSetFlags(i.InputDoubleRegister(0),
+                               i.InputDoubleRegister(1));
+      DCHECK_EQ(SetCC, i.OutputSBit());
+      break;
+    case kArmVaddF64:
+      __ vadd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+              i.InputDoubleRegister(1));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArmVsubF64:
+      __ vsub(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+              i.InputDoubleRegister(1));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArmVmulF64:
+      __ vmul(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+              i.InputDoubleRegister(1));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArmVmlaF64:
+      __ vmla(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+              i.InputDoubleRegister(2));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArmVmlsF64:
+      __ vmls(i.OutputDoubleRegister(), i.InputDoubleRegister(1),
+              i.InputDoubleRegister(2));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArmVdivF64:
+      __ vdiv(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+              i.InputDoubleRegister(1));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArmVmodF64: {
+      // TODO(bmeurer): We should really get rid of this special instruction,
+      // and generate a CallAddress instruction instead.
+      FrameScope scope(masm(), StackFrame::MANUAL);
+      __ PrepareCallCFunction(0, 2, kScratchReg);
+      __ MovToFloatParameters(i.InputDoubleRegister(0),
+                              i.InputDoubleRegister(1));
+      __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
+                       0, 2);
+      // Move the result in the double result register.
+      __ MovFromFloatResult(i.OutputDoubleRegister());
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
+    case kArmVnegF64:
+      __ vneg(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+      break;
+    case kArmVsqrtF64:
+      __ vsqrt(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+      break;
+    case kArmVcvtF64S32: {
+      SwVfpRegister scratch = kScratchDoubleReg.low();
+      __ vmov(scratch, i.InputRegister(0));
+      __ vcvt_f64_s32(i.OutputDoubleRegister(), scratch);
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
+    case kArmVcvtF64U32: {
+      SwVfpRegister scratch = kScratchDoubleReg.low();
+      __ vmov(scratch, i.InputRegister(0));
+      __ vcvt_f64_u32(i.OutputDoubleRegister(), scratch);
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
+    case kArmVcvtS32F64: {
+      SwVfpRegister scratch = kScratchDoubleReg.low();
+      __ vcvt_s32_f64(scratch, i.InputDoubleRegister(0));
+      __ vmov(i.OutputRegister(), scratch);
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
+    case kArmVcvtU32F64: {
+      SwVfpRegister scratch = kScratchDoubleReg.low();
+      __ vcvt_u32_f64(scratch, i.InputDoubleRegister(0));
+      __ vmov(i.OutputRegister(), scratch);
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
+    case kArmLdrb:
+      __ ldrb(i.OutputRegister(), i.InputOffset());
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArmLdrsb:
+      __ ldrsb(i.OutputRegister(), i.InputOffset());
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArmStrb: {
+      int index = 0;
+      MemOperand operand = i.InputOffset(&index);
+      __ strb(i.InputRegister(index), operand);
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
+    case kArmLdrh:
+      __ ldrh(i.OutputRegister(), i.InputOffset());
+      break;
+    case kArmLdrsh:
+      __ ldrsh(i.OutputRegister(), i.InputOffset());
+      break;
+    case kArmStrh: {
+      int index = 0;
+      MemOperand operand = i.InputOffset(&index);
+      __ strh(i.InputRegister(index), operand);
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
+    case kArmLdr:
+      __ ldr(i.OutputRegister(), i.InputOffset());
+      break;
+    case kArmStr: {
+      int index = 0;
+      MemOperand operand = i.InputOffset(&index);
+      __ str(i.InputRegister(index), operand);
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
+    case kArmVldr32: {
+      SwVfpRegister scratch = kScratchDoubleReg.low();
+      __ vldr(scratch, i.InputOffset());
+      __ vcvt_f64_f32(i.OutputDoubleRegister(), scratch);
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
+    case kArmVstr32: {
+      int index = 0;
+      SwVfpRegister scratch = kScratchDoubleReg.low();
+      MemOperand operand = i.InputOffset(&index);
+      __ vcvt_f32_f64(scratch, i.InputDoubleRegister(index));
+      __ vstr(scratch, operand);
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
+    case kArmVldr64:
+      __ vldr(i.OutputDoubleRegister(), i.InputOffset());
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArmVstr64: {
+      int index = 0;
+      MemOperand operand = i.InputOffset(&index);
+      __ vstr(i.InputDoubleRegister(index), operand);
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
+    case kArmPush:
+      __ Push(i.InputRegister(0));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArmStoreWriteBarrier: {
+      Register object = i.InputRegister(0);
+      Register index = i.InputRegister(1);
+      Register value = i.InputRegister(2);
+      __ add(index, object, index);
+      __ str(value, MemOperand(index));
+      SaveFPRegsMode mode =
+          frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
+      LinkRegisterStatus lr_status = kLRHasNotBeenSaved;
+      __ RecordWrite(object, index, value, lr_status, mode);
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    }
+  }
+}
+
+
+// Assembles branches after an instruction.
+void CodeGenerator::AssembleArchBranch(Instruction* instr,
+                                       FlagsCondition condition) {
+  ArmOperandConverter i(this, instr);
+  Label done;
+
+  // Emit a branch. The true and false targets are always the last two inputs
+  // to the instruction.
+  BasicBlock* tblock = i.InputBlock(instr->InputCount() - 2);
+  BasicBlock* fblock = i.InputBlock(instr->InputCount() - 1);
+  bool fallthru = IsNextInAssemblyOrder(fblock);
+  Label* tlabel = code()->GetLabel(tblock);
+  Label* flabel = fallthru ? &done : code()->GetLabel(fblock);
+  switch (condition) {
+    case kUnorderedEqual:
+      __ b(vs, flabel);
+    // Fall through.
+    case kEqual:
+      __ b(eq, tlabel);
+      break;
+    case kUnorderedNotEqual:
+      __ b(vs, tlabel);
+    // Fall through.
+    case kNotEqual:
+      __ b(ne, tlabel);
+      break;
+    case kSignedLessThan:
+      __ b(lt, tlabel);
+      break;
+    case kSignedGreaterThanOrEqual:
+      __ b(ge, tlabel);
+      break;
+    case kSignedLessThanOrEqual:
+      __ b(le, tlabel);
+      break;
+    case kSignedGreaterThan:
+      __ b(gt, tlabel);
+      break;
+    case kUnorderedLessThan:
+      __ b(vs, flabel);
+    // Fall through.
+    case kUnsignedLessThan:
+      __ b(lo, tlabel);
+      break;
+    case kUnorderedGreaterThanOrEqual:
+      __ b(vs, tlabel);
+    // Fall through.
+    case kUnsignedGreaterThanOrEqual:
+      __ b(hs, tlabel);
+      break;
+    case kUnorderedLessThanOrEqual:
+      __ b(vs, flabel);
+    // Fall through.
+    case kUnsignedLessThanOrEqual:
+      __ b(ls, tlabel);
+      break;
+    case kUnorderedGreaterThan:
+      __ b(vs, tlabel);
+    // Fall through.
+    case kUnsignedGreaterThan:
+      __ b(hi, tlabel);
+      break;
+    case kOverflow:
+      __ b(vs, tlabel);
+      break;
+    case kNotOverflow:
+      __ b(vc, tlabel);
+      break;
+  }
+  if (!fallthru) __ b(flabel);  // no fallthru to flabel.
+  __ bind(&done);
+}
+
+
+// Assembles boolean materializations after an instruction.
+void CodeGenerator::AssembleArchBoolean(Instruction* instr,
+                                        FlagsCondition condition) {
+  ArmOperandConverter i(this, instr);
+  Label done;
+
+  // Materialize a full 32-bit 1 or 0 value. The result register is always the
+  // last output of the instruction.
+  Label check;
+  DCHECK_NE(0, instr->OutputCount());
+  Register reg = i.OutputRegister(instr->OutputCount() - 1);
+  Condition cc = kNoCondition;
+  switch (condition) {
+    case kUnorderedEqual:
+      __ b(vc, &check);
+      __ mov(reg, Operand(0));
+      __ b(&done);
+    // Fall through.
+    case kEqual:
+      cc = eq;
+      break;
+    case kUnorderedNotEqual:
+      __ b(vc, &check);
+      __ mov(reg, Operand(1));
+      __ b(&done);
+    // Fall through.
+    case kNotEqual:
+      cc = ne;
+      break;
+    case kSignedLessThan:
+      cc = lt;
+      break;
+    case kSignedGreaterThanOrEqual:
+      cc = ge;
+      break;
+    case kSignedLessThanOrEqual:
+      cc = le;
+      break;
+    case kSignedGreaterThan:
+      cc = gt;
+      break;
+    case kUnorderedLessThan:
+      __ b(vc, &check);
+      __ mov(reg, Operand(0));
+      __ b(&done);
+    // Fall through.
+    case kUnsignedLessThan:
+      cc = lo;
+      break;
+    case kUnorderedGreaterThanOrEqual:
+      __ b(vc, &check);
+      __ mov(reg, Operand(1));
+      __ b(&done);
+    // Fall through.
+    case kUnsignedGreaterThanOrEqual:
+      cc = hs;
+      break;
+    case kUnorderedLessThanOrEqual:
+      __ b(vc, &check);
+      __ mov(reg, Operand(0));
+      __ b(&done);
+    // Fall through.
+    case kUnsignedLessThanOrEqual:
+      cc = ls;
+      break;
+    case kUnorderedGreaterThan:
+      __ b(vc, &check);
+      __ mov(reg, Operand(1));
+      __ b(&done);
+    // Fall through.
+    case kUnsignedGreaterThan:
+      cc = hi;
+      break;
+    case kOverflow:
+      cc = vs;
+      break;
+    case kNotOverflow:
+      cc = vc;
+      break;
+  }
+  __ bind(&check);
+  __ mov(reg, Operand(0));
+  __ mov(reg, Operand(1), LeaveCC, cc);
+  __ bind(&done);
+}
+
+
+void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
+  Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
+      isolate(), deoptimization_id, Deoptimizer::LAZY);
+  __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+}
+
+
+void CodeGenerator::AssemblePrologue() {
+  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+  if (descriptor->kind() == CallDescriptor::kCallAddress) {
+    bool saved_pp;
+    if (FLAG_enable_ool_constant_pool) {
+      __ Push(lr, fp, pp);
+      // Adjust FP to point to saved FP.
+      __ sub(fp, sp, Operand(StandardFrameConstants::kConstantPoolOffset));
+      saved_pp = true;
+    } else {
+      __ Push(lr, fp);
+      __ mov(fp, sp);
+      saved_pp = false;
+    }
+    const RegList saves = descriptor->CalleeSavedRegisters();
+    if (saves != 0 || saved_pp) {
+      // Save callee-saved registers.
+      int register_save_area_size = saved_pp ? kPointerSize : 0;
+      for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
+        if (!((1 << i) & saves)) continue;
+        register_save_area_size += kPointerSize;
+      }
+      frame()->SetRegisterSaveAreaSize(register_save_area_size);
+      __ stm(db_w, sp, saves);
+    }
+  } else if (descriptor->IsJSFunctionCall()) {
+    CompilationInfo* info = linkage()->info();
+    __ Prologue(info->IsCodePreAgingActive());
+    frame()->SetRegisterSaveAreaSize(
+        StandardFrameConstants::kFixedFrameSizeFromFp);
+
+    // Sloppy mode functions and builtins need to replace the receiver with the
+    // global proxy when called as functions (without an explicit receiver
+    // object).
+    // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
+    if (info->strict_mode() == SLOPPY && !info->is_native()) {
+      Label ok;
+      // +2 for return address and saved frame pointer.
+      int receiver_slot = info->scope()->num_parameters() + 2;
+      __ ldr(r2, MemOperand(fp, receiver_slot * kPointerSize));
+      __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
+      __ b(ne, &ok);
+      __ ldr(r2, GlobalObjectOperand());
+      __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalProxyOffset));
+      __ str(r2, MemOperand(fp, receiver_slot * kPointerSize));
+      __ bind(&ok);
+    }
+
+  } else {
+    __ StubPrologue();
+    frame()->SetRegisterSaveAreaSize(
+        StandardFrameConstants::kFixedFrameSizeFromFp);
+  }
+  int stack_slots = frame()->GetSpillSlotCount();
+  if (stack_slots > 0) {
+    __ sub(sp, sp, Operand(stack_slots * kPointerSize));
+  }
+}
+
+
+void CodeGenerator::AssembleReturn() {
+  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+  if (descriptor->kind() == CallDescriptor::kCallAddress) {
+    if (frame()->GetRegisterSaveAreaSize() > 0) {
+      // Remove this frame's spill slots first.
+      int stack_slots = frame()->GetSpillSlotCount();
+      if (stack_slots > 0) {
+        __ add(sp, sp, Operand(stack_slots * kPointerSize));
+      }
+      // Restore registers.
+      const RegList saves = descriptor->CalleeSavedRegisters();
+      if (saves != 0) {
+        __ ldm(ia_w, sp, saves);
+      }
+    }
+    __ LeaveFrame(StackFrame::MANUAL);
+    __ Ret();
+  } else {
+    __ LeaveFrame(StackFrame::MANUAL);
+    int pop_count = descriptor->IsJSFunctionCall()
+                        ? static_cast<int>(descriptor->JSParameterCount())
+                        : 0;
+    __ Drop(pop_count);
+    __ Ret();
+  }
+}
+
+
+void CodeGenerator::AssembleMove(InstructionOperand* source,
+                                 InstructionOperand* destination) {
+  ArmOperandConverter g(this, NULL);
+  // Dispatch on the source and destination operand kinds.  Not all
+  // combinations are possible.
+  if (source->IsRegister()) {
+    DCHECK(destination->IsRegister() || destination->IsStackSlot());
+    Register src = g.ToRegister(source);
+    if (destination->IsRegister()) {
+      __ mov(g.ToRegister(destination), src);
+    } else {
+      __ str(src, g.ToMemOperand(destination));
+    }
+  } else if (source->IsStackSlot()) {
+    DCHECK(destination->IsRegister() || destination->IsStackSlot());
+    MemOperand src = g.ToMemOperand(source);
+    if (destination->IsRegister()) {
+      __ ldr(g.ToRegister(destination), src);
+    } else {
+      Register temp = kScratchReg;
+      __ ldr(temp, src);
+      __ str(temp, g.ToMemOperand(destination));
+    }
+  } else if (source->IsConstant()) {
+    if (destination->IsRegister() || destination->IsStackSlot()) {
+      Register dst =
+          destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
+      Constant src = g.ToConstant(source);
+      switch (src.type()) {
+        case Constant::kInt32:
+          __ mov(dst, Operand(src.ToInt32()));
+          break;
+        case Constant::kInt64:
+          UNREACHABLE();
+          break;
+        case Constant::kFloat64:
+          __ Move(dst,
+                  isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
+          break;
+        case Constant::kExternalReference:
+          __ mov(dst, Operand(src.ToExternalReference()));
+          break;
+        case Constant::kHeapObject:
+          __ Move(dst, src.ToHeapObject());
+          break;
+      }
+      if (destination->IsStackSlot()) __ str(dst, g.ToMemOperand(destination));
+    } else if (destination->IsDoubleRegister()) {
+      DwVfpRegister result = g.ToDoubleRegister(destination);
+      __ vmov(result, g.ToDouble(source));
+    } else {
+      DCHECK(destination->IsDoubleStackSlot());
+      DwVfpRegister temp = kScratchDoubleReg;
+      __ vmov(temp, g.ToDouble(source));
+      __ vstr(temp, g.ToMemOperand(destination));
+    }
+  } else if (source->IsDoubleRegister()) {
+    DwVfpRegister src = g.ToDoubleRegister(source);
+    if (destination->IsDoubleRegister()) {
+      DwVfpRegister dst = g.ToDoubleRegister(destination);
+      __ Move(dst, src);
+    } else {
+      DCHECK(destination->IsDoubleStackSlot());
+      __ vstr(src, g.ToMemOperand(destination));
+    }
+  } else if (source->IsDoubleStackSlot()) {
+    DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
+    MemOperand src = g.ToMemOperand(source);
+    if (destination->IsDoubleRegister()) {
+      __ vldr(g.ToDoubleRegister(destination), src);
+    } else {
+      DwVfpRegister temp = kScratchDoubleReg;
+      __ vldr(temp, src);
+      __ vstr(temp, g.ToMemOperand(destination));
+    }
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
+void CodeGenerator::AssembleSwap(InstructionOperand* source,
+                                 InstructionOperand* destination) {
+  ArmOperandConverter g(this, NULL);
+  // Dispatch on the source and destination operand kinds.  Not all
+  // combinations are possible.
+  if (source->IsRegister()) {
+    // Register-register.
+    Register temp = kScratchReg;
+    Register src = g.ToRegister(source);
+    if (destination->IsRegister()) {
+      Register dst = g.ToRegister(destination);
+      __ Move(temp, src);
+      __ Move(src, dst);
+      __ Move(dst, temp);
+    } else {
+      DCHECK(destination->IsStackSlot());
+      MemOperand dst = g.ToMemOperand(destination);
+      __ mov(temp, src);
+      __ ldr(src, dst);
+      __ str(temp, dst);
+    }
+  } else if (source->IsStackSlot()) {
+    DCHECK(destination->IsStackSlot());
+    Register temp_0 = kScratchReg;
+    SwVfpRegister temp_1 = kScratchDoubleReg.low();
+    MemOperand src = g.ToMemOperand(source);
+    MemOperand dst = g.ToMemOperand(destination);
+    __ ldr(temp_0, src);
+    __ vldr(temp_1, dst);
+    __ str(temp_0, dst);
+    __ vstr(temp_1, src);
+  } else if (source->IsDoubleRegister()) {
+    DwVfpRegister temp = kScratchDoubleReg;
+    DwVfpRegister src = g.ToDoubleRegister(source);
+    if (destination->IsDoubleRegister()) {
+      DwVfpRegister dst = g.ToDoubleRegister(destination);
+      __ Move(temp, src);
+      __ Move(src, dst);
+      __ Move(dst, temp);
+    } else {
+      DCHECK(destination->IsDoubleStackSlot());
+      MemOperand dst = g.ToMemOperand(destination);
+      __ Move(temp, src);
+      __ vldr(src, dst);
+      __ vstr(temp, dst);
+    }
+  } else if (source->IsDoubleStackSlot()) {
+    DCHECK(destination->IsDoubleStackSlot());
+    Register temp_0 = kScratchReg;
+    DwVfpRegister temp_1 = kScratchDoubleReg;
+    MemOperand src0 = g.ToMemOperand(source);
+    MemOperand src1(src0.rn(), src0.offset() + kPointerSize);
+    MemOperand dst0 = g.ToMemOperand(destination);
+    MemOperand dst1(dst0.rn(), dst0.offset() + kPointerSize);
+    __ vldr(temp_1, dst0);  // Save destination in temp_1.
+    __ ldr(temp_0, src0);   // Then use temp_0 to copy source to destination.
+    __ str(temp_0, dst0);
+    __ ldr(temp_0, src1);
+    __ str(temp_0, dst1);
+    __ vstr(temp_1, src0);
+  } else {
+    // No other combinations are possible.
+    UNREACHABLE();
+  }
+}
+
+
+void CodeGenerator::AddNopForSmiCodeInlining() {
+  // On 32-bit ARM we do not insert nops for inlined Smi code.
+}
+
+
+void CodeGenerator::EnsureSpaceForLazyDeopt() {
+  int space_needed = Deoptimizer::patch_size();
+  if (!linkage()->info()->IsStub()) {
+    // Ensure that we have enough space after the previous lazy-bailout
+    // instruction for patching the code here.
+    int current_pc = masm()->pc_offset();
+    if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+      // Block literal pool emission for duration of padding.
+      v8::internal::Assembler::BlockConstPoolScope block_const_pool(masm());
+      int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+      DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
+      while (padding_size > 0) {
+        __ nop();
+        padding_size -= v8::internal::Assembler::kInstrSize;
+      }
+    }
+  }
+  MarkLazyDeoptSite();
+}
+
+#undef __
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/arm/instruction-codes-arm.h b/src/compiler/arm/instruction-codes-arm.h
new file mode 100644
index 0000000..7849ca9
--- /dev/null
+++ b/src/compiler/arm/instruction-codes-arm.h
@@ -0,0 +1,87 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_ARM_INSTRUCTION_CODES_ARM_H_
+#define V8_COMPILER_ARM_INSTRUCTION_CODES_ARM_H_
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// ARM-specific opcodes that specify which assembly sequence to emit.
+// Most opcodes specify a single instruction.
+#define TARGET_ARCH_OPCODE_LIST(V) \
+  V(ArmAdd)                        \
+  V(ArmAnd)                        \
+  V(ArmBic)                        \
+  V(ArmCmp)                        \
+  V(ArmCmn)                        \
+  V(ArmTst)                        \
+  V(ArmTeq)                        \
+  V(ArmOrr)                        \
+  V(ArmEor)                        \
+  V(ArmSub)                        \
+  V(ArmRsb)                        \
+  V(ArmMul)                        \
+  V(ArmMla)                        \
+  V(ArmMls)                        \
+  V(ArmSdiv)                       \
+  V(ArmUdiv)                       \
+  V(ArmMov)                        \
+  V(ArmMvn)                        \
+  V(ArmBfc)                        \
+  V(ArmUbfx)                       \
+  V(ArmVcmpF64)                    \
+  V(ArmVaddF64)                    \
+  V(ArmVsubF64)                    \
+  V(ArmVmulF64)                    \
+  V(ArmVmlaF64)                    \
+  V(ArmVmlsF64)                    \
+  V(ArmVdivF64)                    \
+  V(ArmVmodF64)                    \
+  V(ArmVnegF64)                    \
+  V(ArmVsqrtF64)                   \
+  V(ArmVcvtF64S32)                 \
+  V(ArmVcvtF64U32)                 \
+  V(ArmVcvtS32F64)                 \
+  V(ArmVcvtU32F64)                 \
+  V(ArmVldr32)                     \
+  V(ArmVstr32)                     \
+  V(ArmVldr64)                     \
+  V(ArmVstr64)                     \
+  V(ArmLdrb)                       \
+  V(ArmLdrsb)                      \
+  V(ArmStrb)                       \
+  V(ArmLdrh)                       \
+  V(ArmLdrsh)                      \
+  V(ArmStrh)                       \
+  V(ArmLdr)                        \
+  V(ArmStr)                        \
+  V(ArmPush)                       \
+  V(ArmStoreWriteBarrier)
+
+
+// Addressing modes represent the "shape" of inputs to an instruction.
+// Many instructions support multiple addressing modes. Addressing modes
+// are encoded into the InstructionCode of the instruction and tell the
+// code generator after register allocation which assembler method to call.
+#define TARGET_ADDRESSING_MODE_LIST(V)  \
+  V(Offset_RI)        /* [%r0 + K] */   \
+  V(Offset_RR)        /* [%r0 + %r1] */ \
+  V(Operand2_I)       /* K */           \
+  V(Operand2_R)       /* %r0 */         \
+  V(Operand2_R_ASR_I) /* %r0 ASR K */   \
+  V(Operand2_R_LSL_I) /* %r0 LSL K */   \
+  V(Operand2_R_LSR_I) /* %r0 LSR K */   \
+  V(Operand2_R_ROR_I) /* %r0 ROR K */   \
+  V(Operand2_R_ASR_R) /* %r0 ASR %r1 */ \
+  V(Operand2_R_LSL_R) /* %r0 LSL %r1 */ \
+  V(Operand2_R_LSR_R) /* %r0 LSR %r1 */ \
+  V(Operand2_R_ROR_R) /* %r0 ROR %r1 */
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_ARM_INSTRUCTION_CODES_ARM_H_
diff --git a/src/compiler/arm/instruction-selector-arm-unittest.cc b/src/compiler/arm/instruction-selector-arm-unittest.cc
new file mode 100644
index 0000000..208d2e9
--- /dev/null
+++ b/src/compiler/arm/instruction-selector-arm-unittest.cc
@@ -0,0 +1,1900 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-selector-unittest.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+
+typedef RawMachineAssembler::Label MLabel;
+typedef Node* (RawMachineAssembler::*Constructor)(Node*, Node*);
+
+
+// Data processing instructions.
+struct DPI {
+  Constructor constructor;
+  const char* constructor_name;
+  ArchOpcode arch_opcode;
+  ArchOpcode reverse_arch_opcode;
+  ArchOpcode test_arch_opcode;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const DPI& dpi) {
+  return os << dpi.constructor_name;
+}
+
+
+static const DPI kDPIs[] = {
+    {&RawMachineAssembler::Word32And, "Word32And", kArmAnd, kArmAnd, kArmTst},
+    {&RawMachineAssembler::Word32Or, "Word32Or", kArmOrr, kArmOrr, kArmOrr},
+    {&RawMachineAssembler::Word32Xor, "Word32Xor", kArmEor, kArmEor, kArmTeq},
+    {&RawMachineAssembler::Int32Add, "Int32Add", kArmAdd, kArmAdd, kArmCmn},
+    {&RawMachineAssembler::Int32Sub, "Int32Sub", kArmSub, kArmRsb, kArmCmp}};
+
+
+// Data processing instructions with overflow.
+struct ODPI {
+  Constructor constructor;
+  const char* constructor_name;
+  ArchOpcode arch_opcode;
+  ArchOpcode reverse_arch_opcode;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const ODPI& odpi) {
+  return os << odpi.constructor_name;
+}
+
+
+static const ODPI kODPIs[] = {{&RawMachineAssembler::Int32AddWithOverflow,
+                               "Int32AddWithOverflow", kArmAdd, kArmAdd},
+                              {&RawMachineAssembler::Int32SubWithOverflow,
+                               "Int32SubWithOverflow", kArmSub, kArmRsb}};
+
+
+// Shifts.
+struct Shift {
+  Constructor constructor;
+  const char* constructor_name;
+  int32_t i_low;          // lowest possible immediate
+  int32_t i_high;         // highest possible immediate
+  AddressingMode i_mode;  // Operand2_R_<shift>_I
+  AddressingMode r_mode;  // Operand2_R_<shift>_R
+};
+
+
+std::ostream& operator<<(std::ostream& os, const Shift& shift) {
+  return os << shift.constructor_name;
+}
+
+
+static const Shift kShifts[] = {
+    {&RawMachineAssembler::Word32Sar, "Word32Sar", 1, 32,
+     kMode_Operand2_R_ASR_I, kMode_Operand2_R_ASR_R},
+    {&RawMachineAssembler::Word32Shl, "Word32Shl", 0, 31,
+     kMode_Operand2_R_LSL_I, kMode_Operand2_R_LSL_R},
+    {&RawMachineAssembler::Word32Shr, "Word32Shr", 1, 32,
+     kMode_Operand2_R_LSR_I, kMode_Operand2_R_LSR_R},
+    {&RawMachineAssembler::Word32Ror, "Word32Ror", 1, 31,
+     kMode_Operand2_R_ROR_I, kMode_Operand2_R_ROR_R}};
+
+
+// Immediates (random subset).
+static const int32_t kImmediates[] = {
+    -2147483617, -2147483606, -2113929216, -2080374784, -1996488704,
+    -1879048192, -1459617792, -1358954496, -1342177265, -1275068414,
+    -1073741818, -1073741777, -855638016,  -805306368,  -402653184,
+    -268435444,  -16777216,   0,           35,          61,
+    105,         116,         171,         245,         255,
+    692,         1216,        1248,        1520,        1600,
+    1888,        3744,        4080,        5888,        8384,
+    9344,        9472,        9792,        13312,       15040,
+    15360,       20736,       22272,       23296,       32000,
+    33536,       37120,       45824,       47872,       56320,
+    59392,       65280,       72704,       101376,      147456,
+    161792,      164864,      167936,      173056,      195584,
+    209920,      212992,      356352,      655360,      704512,
+    716800,      851968,      901120,      1044480,     1523712,
+    2572288,     3211264,     3588096,     3833856,     3866624,
+    4325376,     5177344,     6488064,     7012352,     7471104,
+    14090240,    16711680,    19398656,    22282240,    28573696,
+    30408704,    30670848,    43253760,    54525952,    55312384,
+    56623104,    68157440,    115343360,   131072000,   187695104,
+    188743680,   195035136,   197132288,   203423744,   218103808,
+    267386880,   268435470,   285212672,   402653185,   415236096,
+    595591168,   603979776,   603979778,   629145600,   1073741835,
+    1073741855,  1073741861,  1073741884,  1157627904,  1476395008,
+    1476395010,  1610612741,  2030043136,  2080374785,  2097152000};
+
+}  // namespace
+
+
+// -----------------------------------------------------------------------------
+// Data processing instructions.
+
+
+typedef InstructionSelectorTestWithParam<DPI> InstructionSelectorDPITest;
+
+
+TEST_P(InstructionSelectorDPITest, Parameters) {
+  const DPI dpi = GetParam();
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  m.Return((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+  EXPECT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_P(InstructionSelectorDPITest, Immediate) {
+  const DPI dpi = GetParam();
+  TRACED_FOREACH(int32_t, imm, kImmediates) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    m.Return((m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_EQ(1U, s[0]->OutputCount());
+  }
+  TRACED_FOREACH(int32_t, imm, kImmediates) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    m.Return((m.*dpi.constructor)(m.Int32Constant(imm), m.Parameter(0)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(dpi.reverse_arch_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_EQ(1U, s[0]->OutputCount());
+  }
+}
+
+
+TEST_P(InstructionSelectorDPITest, ShiftByParameter) {
+  const DPI dpi = GetParam();
+  TRACED_FOREACH(Shift, shift, kShifts) {
+    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+    m.Return((m.*dpi.constructor)(
+        m.Parameter(0),
+        (m.*shift.constructor)(m.Parameter(1), m.Parameter(2))));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+    EXPECT_EQ(3U, s[0]->InputCount());
+    EXPECT_EQ(1U, s[0]->OutputCount());
+  }
+  TRACED_FOREACH(Shift, shift, kShifts) {
+    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+    m.Return((m.*dpi.constructor)(
+        (m.*shift.constructor)(m.Parameter(0), m.Parameter(1)),
+        m.Parameter(2)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(dpi.reverse_arch_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+    EXPECT_EQ(3U, s[0]->InputCount());
+    EXPECT_EQ(1U, s[0]->OutputCount());
+  }
+}
+
+
+TEST_P(InstructionSelectorDPITest, ShiftByImmediate) {
+  const DPI dpi = GetParam();
+  TRACED_FOREACH(Shift, shift, kShifts) {
+    TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+      StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+      m.Return((m.*dpi.constructor)(
+          m.Parameter(0),
+          (m.*shift.constructor)(m.Parameter(1), m.Int32Constant(imm))));
+      Stream s = m.Build();
+      ASSERT_EQ(1U, s.size());
+      EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+      EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+      ASSERT_EQ(3U, s[0]->InputCount());
+      EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+      EXPECT_EQ(1U, s[0]->OutputCount());
+    }
+  }
+  TRACED_FOREACH(Shift, shift, kShifts) {
+    TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+      StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+      m.Return((m.*dpi.constructor)(
+          (m.*shift.constructor)(m.Parameter(0), m.Int32Constant(imm)),
+          m.Parameter(1)));
+      Stream s = m.Build();
+      ASSERT_EQ(1U, s.size());
+      EXPECT_EQ(dpi.reverse_arch_opcode, s[0]->arch_opcode());
+      EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+      ASSERT_EQ(3U, s[0]->InputCount());
+      EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+      EXPECT_EQ(1U, s[0]->OutputCount());
+    }
+  }
+}
+
+
+TEST_P(InstructionSelectorDPITest, BranchWithParameters) {
+  const DPI dpi = GetParam();
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  MLabel a, b;
+  m.Branch((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)), &a, &b);
+  m.Bind(&a);
+  m.Return(m.Int32Constant(1));
+  m.Bind(&b);
+  m.Return(m.Int32Constant(0));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+  EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+  EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorDPITest, BranchWithImmediate) {
+  const DPI dpi = GetParam();
+  TRACED_FOREACH(int32_t, imm, kImmediates) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    MLabel a, b;
+    m.Branch((m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm)), &a,
+             &b);
+    m.Bind(&a);
+    m.Return(m.Int32Constant(1));
+    m.Bind(&b);
+    m.Return(m.Int32Constant(0));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+    EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+    EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+  }
+  TRACED_FOREACH(int32_t, imm, kImmediates) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    MLabel a, b;
+    m.Branch((m.*dpi.constructor)(m.Int32Constant(imm), m.Parameter(0)), &a,
+             &b);
+    m.Bind(&a);
+    m.Return(m.Int32Constant(1));
+    m.Bind(&b);
+    m.Return(m.Int32Constant(0));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+    EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+    EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+  }
+}
+
+
+TEST_P(InstructionSelectorDPITest, BranchWithShiftByParameter) {
+  const DPI dpi = GetParam();
+  TRACED_FOREACH(Shift, shift, kShifts) {
+    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+    MLabel a, b;
+    m.Branch((m.*dpi.constructor)(
+                 m.Parameter(0),
+                 (m.*shift.constructor)(m.Parameter(1), m.Parameter(2))),
+             &a, &b);
+    m.Bind(&a);
+    m.Return(m.Int32Constant(1));
+    m.Bind(&b);
+    m.Return(m.Int32Constant(0));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+    EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+    EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+  }
+  TRACED_FOREACH(Shift, shift, kShifts) {
+    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+    MLabel a, b;
+    m.Branch((m.*dpi.constructor)(
+                 (m.*shift.constructor)(m.Parameter(0), m.Parameter(1)),
+                 m.Parameter(2)),
+             &a, &b);
+    m.Bind(&a);
+    m.Return(m.Int32Constant(1));
+    m.Bind(&b);
+    m.Return(m.Int32Constant(0));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+    EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+    EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+  }
+}
+
+
+TEST_P(InstructionSelectorDPITest, BranchWithShiftByImmediate) {
+  const DPI dpi = GetParam();
+  TRACED_FOREACH(Shift, shift, kShifts) {
+    TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+      StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+      MLabel a, b;
+      m.Branch((m.*dpi.constructor)(m.Parameter(0),
+                                    (m.*shift.constructor)(
+                                        m.Parameter(1), m.Int32Constant(imm))),
+               &a, &b);
+      m.Bind(&a);
+      m.Return(m.Int32Constant(1));
+      m.Bind(&b);
+      m.Return(m.Int32Constant(0));
+      Stream s = m.Build();
+      ASSERT_EQ(1U, s.size());
+      EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+      EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+      ASSERT_EQ(5U, s[0]->InputCount());
+      EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+      EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+      EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+    }
+  }
+  TRACED_FOREACH(Shift, shift, kShifts) {
+    TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+      StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+      MLabel a, b;
+      m.Branch((m.*dpi.constructor)(
+                   (m.*shift.constructor)(m.Parameter(0), m.Int32Constant(imm)),
+                   m.Parameter(1)),
+               &a, &b);
+      m.Bind(&a);
+      m.Return(m.Int32Constant(1));
+      m.Bind(&b);
+      m.Return(m.Int32Constant(0));
+      Stream s = m.Build();
+      ASSERT_EQ(1U, s.size());
+      EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+      EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+      ASSERT_EQ(5U, s[0]->InputCount());
+      EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+      EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+      EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+    }
+  }
+}
+
+
+TEST_P(InstructionSelectorDPITest, BranchIfZeroWithParameters) {
+  const DPI dpi = GetParam();
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  MLabel a, b;
+  m.Branch(m.Word32Equal((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)),
+                         m.Int32Constant(0)),
+           &a, &b);
+  m.Bind(&a);
+  m.Return(m.Int32Constant(1));
+  m.Bind(&b);
+  m.Return(m.Int32Constant(0));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+  EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+  EXPECT_EQ(kEqual, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorDPITest, BranchIfNotZeroWithParameters) {
+  const DPI dpi = GetParam();
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  MLabel a, b;
+  m.Branch(
+      m.Word32NotEqual((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)),
+                       m.Int32Constant(0)),
+      &a, &b);
+  m.Bind(&a);
+  m.Return(m.Int32Constant(1));
+  m.Bind(&b);
+  m.Return(m.Int32Constant(0));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+  EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+  EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorDPITest, BranchIfZeroWithImmediate) {
+  const DPI dpi = GetParam();
+  TRACED_FOREACH(int32_t, imm, kImmediates) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    MLabel a, b;
+    m.Branch(m.Word32Equal(
+                 (m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm)),
+                 m.Int32Constant(0)),
+             &a, &b);
+    m.Bind(&a);
+    m.Return(m.Int32Constant(1));
+    m.Bind(&b);
+    m.Return(m.Int32Constant(0));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+    EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+    EXPECT_EQ(kEqual, s[0]->flags_condition());
+  }
+  TRACED_FOREACH(int32_t, imm, kImmediates) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    MLabel a, b;
+    m.Branch(m.Word32Equal(
+                 (m.*dpi.constructor)(m.Int32Constant(imm), m.Parameter(0)),
+                 m.Int32Constant(0)),
+             &a, &b);
+    m.Bind(&a);
+    m.Return(m.Int32Constant(1));
+    m.Bind(&b);
+    m.Return(m.Int32Constant(0));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+    EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+    EXPECT_EQ(kEqual, s[0]->flags_condition());
+  }
+}
+
+
+TEST_P(InstructionSelectorDPITest, BranchIfNotZeroWithImmediate) {
+  const DPI dpi = GetParam();
+  TRACED_FOREACH(int32_t, imm, kImmediates) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    MLabel a, b;
+    m.Branch(m.Word32NotEqual(
+                 (m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm)),
+                 m.Int32Constant(0)),
+             &a, &b);
+    m.Bind(&a);
+    m.Return(m.Int32Constant(1));
+    m.Bind(&b);
+    m.Return(m.Int32Constant(0));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+    EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+    EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+  }
+  TRACED_FOREACH(int32_t, imm, kImmediates) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    MLabel a, b;
+    m.Branch(m.Word32NotEqual(
+                 (m.*dpi.constructor)(m.Int32Constant(imm), m.Parameter(0)),
+                 m.Int32Constant(0)),
+             &a, &b);
+    m.Bind(&a);
+    m.Return(m.Int32Constant(1));
+    m.Bind(&b);
+    m.Return(m.Int32Constant(0));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(dpi.test_arch_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+    EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+    EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+  }
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorDPITest,
+                        ::testing::ValuesIn(kDPIs));
+
+
+// -----------------------------------------------------------------------------
+// Data processing instructions with overflow.
+
+
+typedef InstructionSelectorTestWithParam<ODPI> InstructionSelectorODPITest;
+
+
+TEST_P(InstructionSelectorODPITest, OvfWithParameters) {
+  const ODPI odpi = GetParam();
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  m.Return(
+      m.Projection(1, (m.*odpi.constructor)(m.Parameter(0), m.Parameter(1))));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+  EXPECT_EQ(2U, s[0]->InputCount());
+  EXPECT_LE(1U, s[0]->OutputCount());
+  EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+  EXPECT_EQ(kOverflow, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorODPITest, OvfWithImmediate) {
+  const ODPI odpi = GetParam();
+  TRACED_FOREACH(int32_t, imm, kImmediates) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    m.Return(m.Projection(
+        1, (m.*odpi.constructor)(m.Parameter(0), m.Int32Constant(imm))));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_LE(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+    EXPECT_EQ(kOverflow, s[0]->flags_condition());
+  }
+  TRACED_FOREACH(int32_t, imm, kImmediates) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    m.Return(m.Projection(
+        1, (m.*odpi.constructor)(m.Int32Constant(imm), m.Parameter(0))));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(odpi.reverse_arch_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_LE(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+    EXPECT_EQ(kOverflow, s[0]->flags_condition());
+  }
+}
+
+
+TEST_P(InstructionSelectorODPITest, OvfWithShiftByParameter) {
+  const ODPI odpi = GetParam();
+  TRACED_FOREACH(Shift, shift, kShifts) {
+    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+    m.Return(m.Projection(
+        1, (m.*odpi.constructor)(
+               m.Parameter(0),
+               (m.*shift.constructor)(m.Parameter(1), m.Parameter(2)))));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+    EXPECT_EQ(3U, s[0]->InputCount());
+    EXPECT_LE(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+    EXPECT_EQ(kOverflow, s[0]->flags_condition());
+  }
+  TRACED_FOREACH(Shift, shift, kShifts) {
+    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+    m.Return(m.Projection(
+        1, (m.*odpi.constructor)(
+               (m.*shift.constructor)(m.Parameter(0), m.Parameter(1)),
+               m.Parameter(0))));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(odpi.reverse_arch_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+    EXPECT_EQ(3U, s[0]->InputCount());
+    EXPECT_LE(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+    EXPECT_EQ(kOverflow, s[0]->flags_condition());
+  }
+}
+
+
+TEST_P(InstructionSelectorODPITest, OvfWithShiftByImmediate) {
+  const ODPI odpi = GetParam();
+  TRACED_FOREACH(Shift, shift, kShifts) {
+    TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+      StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+      m.Return(m.Projection(
+          1, (m.*odpi.constructor)(m.Parameter(0),
+                                   (m.*shift.constructor)(
+                                       m.Parameter(1), m.Int32Constant(imm)))));
+      Stream s = m.Build();
+      ASSERT_EQ(1U, s.size());
+      EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+      EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+      ASSERT_EQ(3U, s[0]->InputCount());
+      EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+      EXPECT_LE(1U, s[0]->OutputCount());
+      EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+      EXPECT_EQ(kOverflow, s[0]->flags_condition());
+    }
+  }
+  TRACED_FOREACH(Shift, shift, kShifts) {
+    TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+      StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+      m.Return(m.Projection(
+          1, (m.*odpi.constructor)(
+                 (m.*shift.constructor)(m.Parameter(1), m.Int32Constant(imm)),
+                 m.Parameter(0))));
+      Stream s = m.Build();
+      ASSERT_EQ(1U, s.size());
+      EXPECT_EQ(odpi.reverse_arch_opcode, s[0]->arch_opcode());
+      EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+      ASSERT_EQ(3U, s[0]->InputCount());
+      EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+      EXPECT_LE(1U, s[0]->OutputCount());
+      EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+      EXPECT_EQ(kOverflow, s[0]->flags_condition());
+    }
+  }
+}
+
+
+TEST_P(InstructionSelectorODPITest, ValWithParameters) {
+  const ODPI odpi = GetParam();
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  m.Return(
+      m.Projection(0, (m.*odpi.constructor)(m.Parameter(0), m.Parameter(1))));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+  EXPECT_EQ(2U, s[0]->InputCount());
+  EXPECT_LE(1U, s[0]->OutputCount());
+  EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+}
+
+
+TEST_P(InstructionSelectorODPITest, ValWithImmediate) {
+  const ODPI odpi = GetParam();
+  TRACED_FOREACH(int32_t, imm, kImmediates) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    m.Return(m.Projection(
+        0, (m.*odpi.constructor)(m.Parameter(0), m.Int32Constant(imm))));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_LE(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+  }
+  TRACED_FOREACH(int32_t, imm, kImmediates) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    m.Return(m.Projection(
+        0, (m.*odpi.constructor)(m.Int32Constant(imm), m.Parameter(0))));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(odpi.reverse_arch_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_LE(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+  }
+}
+
+
+TEST_P(InstructionSelectorODPITest, ValWithShiftByParameter) {
+  const ODPI odpi = GetParam();
+  TRACED_FOREACH(Shift, shift, kShifts) {
+    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+    m.Return(m.Projection(
+        0, (m.*odpi.constructor)(
+               m.Parameter(0),
+               (m.*shift.constructor)(m.Parameter(1), m.Parameter(2)))));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+    EXPECT_EQ(3U, s[0]->InputCount());
+    EXPECT_LE(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+  }
+  TRACED_FOREACH(Shift, shift, kShifts) {
+    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+    m.Return(m.Projection(
+        0, (m.*odpi.constructor)(
+               (m.*shift.constructor)(m.Parameter(0), m.Parameter(1)),
+               m.Parameter(0))));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(odpi.reverse_arch_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+    EXPECT_EQ(3U, s[0]->InputCount());
+    EXPECT_LE(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+  }
+}
+
+
+TEST_P(InstructionSelectorODPITest, ValWithShiftByImmediate) {
+  const ODPI odpi = GetParam();
+  TRACED_FOREACH(Shift, shift, kShifts) {
+    TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+      StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+      m.Return(m.Projection(
+          0, (m.*odpi.constructor)(m.Parameter(0),
+                                   (m.*shift.constructor)(
+                                       m.Parameter(1), m.Int32Constant(imm)))));
+      Stream s = m.Build();
+      ASSERT_EQ(1U, s.size());
+      EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+      EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+      ASSERT_EQ(3U, s[0]->InputCount());
+      EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+      EXPECT_LE(1U, s[0]->OutputCount());
+      EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+    }
+  }
+  TRACED_FOREACH(Shift, shift, kShifts) {
+    TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+      StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+      m.Return(m.Projection(
+          0, (m.*odpi.constructor)(
+                 (m.*shift.constructor)(m.Parameter(1), m.Int32Constant(imm)),
+                 m.Parameter(0))));
+      Stream s = m.Build();
+      ASSERT_EQ(1U, s.size());
+      EXPECT_EQ(odpi.reverse_arch_opcode, s[0]->arch_opcode());
+      EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+      ASSERT_EQ(3U, s[0]->InputCount());
+      EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+      EXPECT_LE(1U, s[0]->OutputCount());
+      EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+    }
+  }
+}
+
+
+TEST_P(InstructionSelectorODPITest, BothWithParameters) {
+  const ODPI odpi = GetParam();
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  Node* n = (m.*odpi.constructor)(m.Parameter(0), m.Parameter(1));
+  m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
+  Stream s = m.Build();
+  ASSERT_LE(1U, s.size());
+  EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+  EXPECT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(2U, s[0]->OutputCount());
+  EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+  EXPECT_EQ(kOverflow, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorODPITest, BothWithImmediate) {
+  const ODPI odpi = GetParam();
+  TRACED_FOREACH(int32_t, imm, kImmediates) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    Node* n = (m.*odpi.constructor)(m.Parameter(0), m.Int32Constant(imm));
+    m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
+    Stream s = m.Build();
+    ASSERT_LE(1U, s.size());
+    EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_EQ(2U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+    EXPECT_EQ(kOverflow, s[0]->flags_condition());
+  }
+  TRACED_FOREACH(int32_t, imm, kImmediates) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    Node* n = (m.*odpi.constructor)(m.Int32Constant(imm), m.Parameter(0));
+    m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
+    Stream s = m.Build();
+    ASSERT_LE(1U, s.size());
+    EXPECT_EQ(odpi.reverse_arch_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_EQ(2U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+    EXPECT_EQ(kOverflow, s[0]->flags_condition());
+  }
+}
+
+
+TEST_P(InstructionSelectorODPITest, BothWithShiftByParameter) {
+  const ODPI odpi = GetParam();
+  TRACED_FOREACH(Shift, shift, kShifts) {
+    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+    Node* n = (m.*odpi.constructor)(
+        m.Parameter(0), (m.*shift.constructor)(m.Parameter(1), m.Parameter(2)));
+    m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
+    Stream s = m.Build();
+    ASSERT_LE(1U, s.size());
+    EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+    EXPECT_EQ(3U, s[0]->InputCount());
+    EXPECT_EQ(2U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+    EXPECT_EQ(kOverflow, s[0]->flags_condition());
+  }
+  TRACED_FOREACH(Shift, shift, kShifts) {
+    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+    Node* n = (m.*odpi.constructor)(
+        (m.*shift.constructor)(m.Parameter(0), m.Parameter(1)), m.Parameter(2));
+    m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
+    Stream s = m.Build();
+    ASSERT_LE(1U, s.size());
+    EXPECT_EQ(odpi.reverse_arch_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+    EXPECT_EQ(3U, s[0]->InputCount());
+    EXPECT_EQ(2U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+    EXPECT_EQ(kOverflow, s[0]->flags_condition());
+  }
+}
+
+
+TEST_P(InstructionSelectorODPITest, BothWithShiftByImmediate) {
+  const ODPI odpi = GetParam();
+  TRACED_FOREACH(Shift, shift, kShifts) {
+    TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+      StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+      Node* n = (m.*odpi.constructor)(
+          m.Parameter(0),
+          (m.*shift.constructor)(m.Parameter(1), m.Int32Constant(imm)));
+      m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
+      Stream s = m.Build();
+      ASSERT_LE(1U, s.size());
+      EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+      EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+      ASSERT_EQ(3U, s[0]->InputCount());
+      EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+      EXPECT_EQ(2U, s[0]->OutputCount());
+      EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+      EXPECT_EQ(kOverflow, s[0]->flags_condition());
+    }
+  }
+  TRACED_FOREACH(Shift, shift, kShifts) {
+    TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+      StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+      Node* n = (m.*odpi.constructor)(
+          (m.*shift.constructor)(m.Parameter(0), m.Int32Constant(imm)),
+          m.Parameter(1));
+      m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
+      Stream s = m.Build();
+      ASSERT_LE(1U, s.size());
+      EXPECT_EQ(odpi.reverse_arch_opcode, s[0]->arch_opcode());
+      EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+      ASSERT_EQ(3U, s[0]->InputCount());
+      EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+      EXPECT_EQ(2U, s[0]->OutputCount());
+      EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+      EXPECT_EQ(kOverflow, s[0]->flags_condition());
+    }
+  }
+}
+
+
+TEST_P(InstructionSelectorODPITest, BranchWithParameters) {
+  const ODPI odpi = GetParam();
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  MLabel a, b;
+  Node* n = (m.*odpi.constructor)(m.Parameter(0), m.Parameter(1));
+  m.Branch(m.Projection(1, n), &a, &b);
+  m.Bind(&a);
+  m.Return(m.Int32Constant(0));
+  m.Bind(&b);
+  m.Return(m.Projection(0, n));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+  EXPECT_EQ(4U, s[0]->InputCount());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+  EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+  EXPECT_EQ(kOverflow, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorODPITest, BranchWithImmediate) {
+  const ODPI odpi = GetParam();
+  TRACED_FOREACH(int32_t, imm, kImmediates) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    MLabel a, b;
+    Node* n = (m.*odpi.constructor)(m.Parameter(0), m.Int32Constant(imm));
+    m.Branch(m.Projection(1, n), &a, &b);
+    m.Bind(&a);
+    m.Return(m.Int32Constant(0));
+    m.Bind(&b);
+    m.Return(m.Projection(0, n));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+    ASSERT_EQ(4U, s[0]->InputCount());
+    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+    EXPECT_EQ(kOverflow, s[0]->flags_condition());
+  }
+  TRACED_FOREACH(int32_t, imm, kImmediates) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    MLabel a, b;
+    Node* n = (m.*odpi.constructor)(m.Int32Constant(imm), m.Parameter(0));
+    m.Branch(m.Projection(1, n), &a, &b);
+    m.Bind(&a);
+    m.Return(m.Int32Constant(0));
+    m.Bind(&b);
+    m.Return(m.Projection(0, n));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(odpi.reverse_arch_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+    ASSERT_EQ(4U, s[0]->InputCount());
+    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+    EXPECT_EQ(kOverflow, s[0]->flags_condition());
+  }
+}
+
+
+TEST_P(InstructionSelectorODPITest, BranchIfZeroWithParameters) {
+  const ODPI odpi = GetParam();
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  MLabel a, b;
+  Node* n = (m.*odpi.constructor)(m.Parameter(0), m.Parameter(1));
+  m.Branch(m.Word32Equal(m.Projection(1, n), m.Int32Constant(0)), &a, &b);
+  m.Bind(&a);
+  m.Return(m.Projection(0, n));
+  m.Bind(&b);
+  m.Return(m.Int32Constant(0));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+  EXPECT_EQ(4U, s[0]->InputCount());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+  EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+  EXPECT_EQ(kNotOverflow, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorODPITest, BranchIfNotZeroWithParameters) {
+  const ODPI odpi = GetParam();
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  MLabel a, b;
+  Node* n = (m.*odpi.constructor)(m.Parameter(0), m.Parameter(1));
+  m.Branch(m.Word32NotEqual(m.Projection(1, n), m.Int32Constant(0)), &a, &b);
+  m.Bind(&a);
+  m.Return(m.Projection(0, n));
+  m.Bind(&b);
+  m.Return(m.Int32Constant(0));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(odpi.arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+  EXPECT_EQ(4U, s[0]->InputCount());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+  EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+  EXPECT_EQ(kOverflow, s[0]->flags_condition());
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorODPITest,
+                        ::testing::ValuesIn(kODPIs));
+
+
+// -----------------------------------------------------------------------------
+// Shifts.
+
+
+typedef InstructionSelectorTestWithParam<Shift> InstructionSelectorShiftTest;
+
+
+TEST_P(InstructionSelectorShiftTest, Parameters) {
+  const Shift shift = GetParam();
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  m.Return((m.*shift.constructor)(m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kArmMov, s[0]->arch_opcode());
+  EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+  EXPECT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_P(InstructionSelectorShiftTest, Immediate) {
+  const Shift shift = GetParam();
+  TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    m.Return((m.*shift.constructor)(m.Parameter(0), m.Int32Constant(imm)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmMov, s[0]->arch_opcode());
+    EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_EQ(1U, s[0]->OutputCount());
+  }
+}
+
+
+TEST_P(InstructionSelectorShiftTest, Word32EqualWithParameter) {
+  const Shift shift = GetParam();
+  {
+    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+    m.Return(
+        m.Word32Equal(m.Parameter(0),
+                      (m.*shift.constructor)(m.Parameter(1), m.Parameter(2))));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmCmp, s[0]->arch_opcode());
+    EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+    EXPECT_EQ(3U, s[0]->InputCount());
+    EXPECT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+    EXPECT_EQ(kEqual, s[0]->flags_condition());
+  }
+  {
+    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+    m.Return(
+        m.Word32Equal((m.*shift.constructor)(m.Parameter(1), m.Parameter(2)),
+                      m.Parameter(0)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmCmp, s[0]->arch_opcode());
+    EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+    EXPECT_EQ(3U, s[0]->InputCount());
+    EXPECT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+    EXPECT_EQ(kEqual, s[0]->flags_condition());
+  }
+}
+
+
+TEST_P(InstructionSelectorShiftTest, Word32EqualWithParameterAndImmediate) {
+  const Shift shift = GetParam();
+  TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+    m.Return(m.Word32Equal(
+        (m.*shift.constructor)(m.Parameter(1), m.Int32Constant(imm)),
+        m.Parameter(0)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmCmp, s[0]->arch_opcode());
+    EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+    ASSERT_EQ(3U, s[0]->InputCount());
+    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+    EXPECT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+    EXPECT_EQ(kEqual, s[0]->flags_condition());
+  }
+  TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+    m.Return(m.Word32Equal(
+        m.Parameter(0),
+        (m.*shift.constructor)(m.Parameter(1), m.Int32Constant(imm))));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmCmp, s[0]->arch_opcode());
+    EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+    ASSERT_EQ(3U, s[0]->InputCount());
+    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+    EXPECT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+    EXPECT_EQ(kEqual, s[0]->flags_condition());
+  }
+}
+
+
+TEST_P(InstructionSelectorShiftTest, Word32EqualToZeroWithParameters) {
+  const Shift shift = GetParam();
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  m.Return(
+      m.Word32Equal(m.Int32Constant(0),
+                    (m.*shift.constructor)(m.Parameter(0), m.Parameter(1))));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kArmMov, s[0]->arch_opcode());
+  EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+  EXPECT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(2U, s[0]->OutputCount());
+  EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+  EXPECT_EQ(kEqual, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorShiftTest, Word32EqualToZeroWithImmediate) {
+  const Shift shift = GetParam();
+  TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+    m.Return(m.Word32Equal(
+        m.Int32Constant(0),
+        (m.*shift.constructor)(m.Parameter(0), m.Int32Constant(imm))));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmMov, s[0]->arch_opcode());
+    EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_EQ(2U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+    EXPECT_EQ(kEqual, s[0]->flags_condition());
+  }
+}
+
+
+TEST_P(InstructionSelectorShiftTest, Word32NotWithParameters) {
+  const Shift shift = GetParam();
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  m.Return(m.Word32Not((m.*shift.constructor)(m.Parameter(0), m.Parameter(1))));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kArmMvn, s[0]->arch_opcode());
+  EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+  EXPECT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_P(InstructionSelectorShiftTest, Word32NotWithImmediate) {
+  const Shift shift = GetParam();
+  TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    m.Return(m.Word32Not(
+        (m.*shift.constructor)(m.Parameter(0), m.Int32Constant(imm))));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmMvn, s[0]->arch_opcode());
+    EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_EQ(1U, s[0]->OutputCount());
+  }
+}
+
+
+TEST_P(InstructionSelectorShiftTest, Word32AndWithWord32NotWithParameters) {
+  const Shift shift = GetParam();
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+  m.Return(m.Word32And(m.Parameter(0), m.Word32Not((m.*shift.constructor)(
+                                           m.Parameter(1), m.Parameter(2)))));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kArmBic, s[0]->arch_opcode());
+  EXPECT_EQ(shift.r_mode, s[0]->addressing_mode());
+  EXPECT_EQ(3U, s[0]->InputCount());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_P(InstructionSelectorShiftTest, Word32AndWithWord32NotWithImmediate) {
+  const Shift shift = GetParam();
+  TRACED_FORRANGE(int32_t, imm, shift.i_low, shift.i_high) {
+    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+    m.Return(m.Word32And(m.Parameter(0),
+                         m.Word32Not((m.*shift.constructor)(
+                             m.Parameter(1), m.Int32Constant(imm)))));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmBic, s[0]->arch_opcode());
+    EXPECT_EQ(shift.i_mode, s[0]->addressing_mode());
+    ASSERT_EQ(3U, s[0]->InputCount());
+    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(2)));
+    EXPECT_EQ(1U, s[0]->OutputCount());
+  }
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorShiftTest,
+                        ::testing::ValuesIn(kShifts));
+
+
+// -----------------------------------------------------------------------------
+// Memory access instructions.
+
+
+namespace {
+
+struct MemoryAccess {
+  MachineType type;
+  ArchOpcode ldr_opcode;
+  ArchOpcode str_opcode;
+  bool (InstructionSelectorTest::Stream::*val_predicate)(
+      const InstructionOperand*) const;
+  const int32_t immediates[40];
+};
+
+
+std::ostream& operator<<(std::ostream& os, const MemoryAccess& memacc) {
+  OStringStream ost;
+  ost << memacc.type;
+  return os << ost.c_str();
+}
+
+
+static const MemoryAccess kMemoryAccesses[] = {
+    {kMachInt8,
+     kArmLdrsb,
+     kArmStrb,
+     &InstructionSelectorTest::Stream::IsInteger,
+     {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
+      -87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
+      115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
+    {kMachUint8,
+     kArmLdrb,
+     kArmStrb,
+     &InstructionSelectorTest::Stream::IsInteger,
+     {-4095, -3914, -3536, -3234, -3185, -3169, -1073, -990, -859, -720, -434,
+      -127, -124, -122, -105, -91, -86, -64, -55, -53, -30, -10, -3, 0, 20, 28,
+      39, 58, 64, 73, 75, 100, 108, 121, 686, 963, 1363, 2759, 3449, 4095}},
+    {kMachInt16,
+     kArmLdrsh,
+     kArmStrh,
+     &InstructionSelectorTest::Stream::IsInteger,
+     {-255, -251, -232, -220, -144, -138, -130, -126, -116, -115, -102, -101,
+      -98, -69, -59, -56, -39, -35, -23, -19, -7, 0, 22, 26, 37, 68, 83, 87, 98,
+      102, 108, 111, 117, 171, 195, 203, 204, 245, 246, 255}},
+    {kMachUint16,
+     kArmLdrh,
+     kArmStrh,
+     &InstructionSelectorTest::Stream::IsInteger,
+     {-255, -230, -201, -172, -125, -119, -118, -105, -98, -79, -54, -42, -41,
+      -32, -12, -11, -5, -4, 0, 5, 9, 25, 28, 51, 58, 60, 89, 104, 108, 109,
+      114, 116, 120, 138, 150, 161, 166, 172, 228, 255}},
+    {kMachInt32,
+     kArmLdr,
+     kArmStr,
+     &InstructionSelectorTest::Stream::IsInteger,
+     {-4095, -1898, -1685, -1562, -1408, -1313, -344, -128, -116, -100, -92,
+      -80, -72, -71, -56, -25, -21, -11, -9, 0, 3, 5, 27, 28, 42, 52, 63, 88,
+      93, 97, 125, 846, 1037, 2102, 2403, 2597, 2632, 2997, 3935, 4095}},
+    {kMachFloat32,
+     kArmVldr32,
+     kArmVstr32,
+     &InstructionSelectorTest::Stream::IsDouble,
+     {-1020, -928, -896, -772, -728, -680, -660, -488, -372, -112, -100, -92,
+      -84, -80, -72, -64, -60, -56, -52, -48, -36, -32, -20, -8, -4, 0, 8, 20,
+      24, 40, 64, 112, 204, 388, 516, 852, 856, 976, 988, 1020}},
+    {kMachFloat64,
+     kArmVldr64,
+     kArmVstr64,
+     &InstructionSelectorTest::Stream::IsDouble,
+     {-1020, -948, -796, -696, -612, -364, -320, -308, -128, -112, -108, -104,
+      -96, -84, -80, -56, -48, -40, -20, 0, 24, 28, 36, 48, 64, 84, 96, 100,
+      108, 116, 120, 140, 156, 408, 432, 444, 772, 832, 940, 1020}}};
+
+}  // namespace
+
+
+typedef InstructionSelectorTestWithParam<MemoryAccess>
+    InstructionSelectorMemoryAccessTest;
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) {
+  const MemoryAccess memacc = GetParam();
+  StreamBuilder m(this, memacc.type, kMachPtr, kMachInt32);
+  m.Return(m.Load(memacc.type, m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(memacc.ldr_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_Offset_RR, s[0]->addressing_mode());
+  EXPECT_EQ(2U, s[0]->InputCount());
+  ASSERT_EQ(1U, s[0]->OutputCount());
+  EXPECT_TRUE((s.*memacc.val_predicate)(s[0]->Output()));
+}
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, LoadWithImmediateIndex) {
+  const MemoryAccess memacc = GetParam();
+  TRACED_FOREACH(int32_t, index, memacc.immediates) {
+    StreamBuilder m(this, memacc.type, kMachPtr);
+    m.Return(m.Load(memacc.type, m.Parameter(0), m.Int32Constant(index)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(memacc.ldr_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_Offset_RI, s[0]->addressing_mode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+    EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
+    ASSERT_EQ(1U, s[0]->OutputCount());
+    EXPECT_TRUE((s.*memacc.val_predicate)(s[0]->Output()));
+  }
+}
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, StoreWithParameters) {
+  const MemoryAccess memacc = GetParam();
+  StreamBuilder m(this, kMachInt32, kMachPtr, kMachInt32, memacc.type);
+  m.Store(memacc.type, m.Parameter(0), m.Parameter(1), m.Parameter(2));
+  m.Return(m.Int32Constant(0));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(memacc.str_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_Offset_RR, s[0]->addressing_mode());
+  EXPECT_EQ(3U, s[0]->InputCount());
+  EXPECT_EQ(0U, s[0]->OutputCount());
+}
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, StoreWithImmediateIndex) {
+  const MemoryAccess memacc = GetParam();
+  TRACED_FOREACH(int32_t, index, memacc.immediates) {
+    StreamBuilder m(this, kMachInt32, kMachPtr, memacc.type);
+    m.Store(memacc.type, m.Parameter(0), m.Int32Constant(index),
+            m.Parameter(1));
+    m.Return(m.Int32Constant(0));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(memacc.str_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_Offset_RI, s[0]->addressing_mode());
+    ASSERT_EQ(3U, s[0]->InputCount());
+    ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+    EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_EQ(0U, s[0]->OutputCount());
+  }
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+                        InstructionSelectorMemoryAccessTest,
+                        ::testing::ValuesIn(kMemoryAccesses));
+
+
+// -----------------------------------------------------------------------------
+// Miscellaneous.
+
+
+TEST_F(InstructionSelectorTest, Int32AddWithInt32Mul) {
+  {
+    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+    m.Return(
+        m.Int32Add(m.Parameter(0), m.Int32Mul(m.Parameter(1), m.Parameter(2))));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmMla, s[0]->arch_opcode());
+    EXPECT_EQ(3U, s[0]->InputCount());
+    EXPECT_EQ(1U, s[0]->OutputCount());
+  }
+  {
+    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+    m.Return(
+        m.Int32Add(m.Int32Mul(m.Parameter(1), m.Parameter(2)), m.Parameter(0)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmMla, s[0]->arch_opcode());
+    EXPECT_EQ(3U, s[0]->InputCount());
+    EXPECT_EQ(1U, s[0]->OutputCount());
+  }
+}
+
+
+TEST_F(InstructionSelectorTest, Int32DivWithParameters) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  m.Return(m.Int32Div(m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build();
+  ASSERT_EQ(4U, s.size());
+  EXPECT_EQ(kArmVcvtF64S32, s[0]->arch_opcode());
+  ASSERT_EQ(1U, s[0]->OutputCount());
+  EXPECT_EQ(kArmVcvtF64S32, s[1]->arch_opcode());
+  ASSERT_EQ(1U, s[1]->OutputCount());
+  EXPECT_EQ(kArmVdivF64, s[2]->arch_opcode());
+  ASSERT_EQ(2U, s[2]->InputCount());
+  ASSERT_EQ(1U, s[2]->OutputCount());
+  EXPECT_EQ(s.ToVreg(s[0]->Output()), s.ToVreg(s[2]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(s[1]->Output()), s.ToVreg(s[2]->InputAt(1)));
+  EXPECT_EQ(kArmVcvtS32F64, s[3]->arch_opcode());
+  ASSERT_EQ(1U, s[3]->InputCount());
+  EXPECT_EQ(s.ToVreg(s[2]->Output()), s.ToVreg(s[3]->InputAt(0)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32DivWithParametersForSUDIV) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  m.Return(m.Int32Div(m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build(SUDIV);
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kArmSdiv, s[0]->arch_opcode());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32ModWithParameters) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  m.Return(m.Int32Mod(m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build();
+  ASSERT_EQ(6U, s.size());
+  EXPECT_EQ(kArmVcvtF64S32, s[0]->arch_opcode());
+  ASSERT_EQ(1U, s[0]->OutputCount());
+  EXPECT_EQ(kArmVcvtF64S32, s[1]->arch_opcode());
+  ASSERT_EQ(1U, s[1]->OutputCount());
+  EXPECT_EQ(kArmVdivF64, s[2]->arch_opcode());
+  ASSERT_EQ(2U, s[2]->InputCount());
+  ASSERT_EQ(1U, s[2]->OutputCount());
+  EXPECT_EQ(s.ToVreg(s[0]->Output()), s.ToVreg(s[2]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(s[1]->Output()), s.ToVreg(s[2]->InputAt(1)));
+  EXPECT_EQ(kArmVcvtS32F64, s[3]->arch_opcode());
+  ASSERT_EQ(1U, s[3]->InputCount());
+  EXPECT_EQ(s.ToVreg(s[2]->Output()), s.ToVreg(s[3]->InputAt(0)));
+  EXPECT_EQ(kArmMul, s[4]->arch_opcode());
+  ASSERT_EQ(1U, s[4]->OutputCount());
+  ASSERT_EQ(2U, s[4]->InputCount());
+  EXPECT_EQ(s.ToVreg(s[3]->Output()), s.ToVreg(s[4]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(s[1]->InputAt(0)), s.ToVreg(s[4]->InputAt(1)));
+  EXPECT_EQ(kArmSub, s[5]->arch_opcode());
+  ASSERT_EQ(1U, s[5]->OutputCount());
+  ASSERT_EQ(2U, s[5]->InputCount());
+  EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[5]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(s[4]->Output()), s.ToVreg(s[5]->InputAt(1)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32ModWithParametersForSUDIV) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  m.Return(m.Int32Mod(m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build(SUDIV);
+  ASSERT_EQ(3U, s.size());
+  EXPECT_EQ(kArmSdiv, s[0]->arch_opcode());
+  ASSERT_EQ(1U, s[0]->OutputCount());
+  ASSERT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(kArmMul, s[1]->arch_opcode());
+  ASSERT_EQ(1U, s[1]->OutputCount());
+  ASSERT_EQ(2U, s[1]->InputCount());
+  EXPECT_EQ(s.ToVreg(s[0]->Output()), s.ToVreg(s[1]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(s[0]->InputAt(1)), s.ToVreg(s[1]->InputAt(1)));
+  EXPECT_EQ(kArmSub, s[2]->arch_opcode());
+  ASSERT_EQ(1U, s[2]->OutputCount());
+  ASSERT_EQ(2U, s[2]->InputCount());
+  EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[2]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(s[1]->Output()), s.ToVreg(s[2]->InputAt(1)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32ModWithParametersForSUDIVAndMLS) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  m.Return(m.Int32Mod(m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build(MLS, SUDIV);
+  ASSERT_EQ(2U, s.size());
+  EXPECT_EQ(kArmSdiv, s[0]->arch_opcode());
+  ASSERT_EQ(1U, s[0]->OutputCount());
+  ASSERT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(kArmMls, s[1]->arch_opcode());
+  ASSERT_EQ(1U, s[1]->OutputCount());
+  ASSERT_EQ(3U, s[1]->InputCount());
+  EXPECT_EQ(s.ToVreg(s[0]->Output()), s.ToVreg(s[1]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(s[0]->InputAt(1)), s.ToVreg(s[1]->InputAt(1)));
+  EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[1]->InputAt(2)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32MulWithParameters) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  m.Return(m.Int32Mul(m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kArmMul, s[0]->arch_opcode());
+  EXPECT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32MulWithImmediate) {
+  // x * (2^k + 1) -> x + (x >> k)
+  TRACED_FORRANGE(int32_t, k, 1, 30) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    m.Return(m.Int32Mul(m.Parameter(0), m.Int32Constant((1 << k) + 1)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmAdd, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
+    ASSERT_EQ(3U, s[0]->InputCount());
+    EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+    EXPECT_EQ(k, s.ToInt32(s[0]->InputAt(2)));
+    EXPECT_EQ(1U, s[0]->OutputCount());
+  }
+  // x * (2^k - 1) -> -x + (x >> k)
+  TRACED_FORRANGE(int32_t, k, 3, 30) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    m.Return(m.Int32Mul(m.Parameter(0), m.Int32Constant((1 << k) - 1)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmRsb, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
+    ASSERT_EQ(3U, s[0]->InputCount());
+    EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+    EXPECT_EQ(k, s.ToInt32(s[0]->InputAt(2)));
+    EXPECT_EQ(1U, s[0]->OutputCount());
+  }
+  // (2^k + 1) * x -> x + (x >> k)
+  TRACED_FORRANGE(int32_t, k, 1, 30) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    m.Return(m.Int32Mul(m.Int32Constant((1 << k) + 1), m.Parameter(0)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmAdd, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
+    ASSERT_EQ(3U, s[0]->InputCount());
+    EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+    EXPECT_EQ(k, s.ToInt32(s[0]->InputAt(2)));
+    EXPECT_EQ(1U, s[0]->OutputCount());
+  }
+  // x * (2^k - 1) -> -x + (x >> k)
+  TRACED_FORRANGE(int32_t, k, 3, 30) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    m.Return(m.Int32Mul(m.Int32Constant((1 << k) - 1), m.Parameter(0)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmRsb, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_Operand2_R_LSL_I, s[0]->addressing_mode());
+    ASSERT_EQ(3U, s[0]->InputCount());
+    EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+    EXPECT_EQ(k, s.ToInt32(s[0]->InputAt(2)));
+    EXPECT_EQ(1U, s[0]->OutputCount());
+  }
+}
+
+
+TEST_F(InstructionSelectorTest, Int32SubWithInt32Mul) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+  m.Return(
+      m.Int32Sub(m.Parameter(0), m.Int32Mul(m.Parameter(1), m.Parameter(2))));
+  Stream s = m.Build();
+  ASSERT_EQ(2U, s.size());
+  EXPECT_EQ(kArmMul, s[0]->arch_opcode());
+  ASSERT_EQ(1U, s[0]->OutputCount());
+  EXPECT_EQ(kArmSub, s[1]->arch_opcode());
+  ASSERT_EQ(2U, s[1]->InputCount());
+  EXPECT_EQ(s.ToVreg(s[0]->Output()), s.ToVreg(s[1]->InputAt(1)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32SubWithInt32MulForMLS) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+  m.Return(
+      m.Int32Sub(m.Parameter(0), m.Int32Mul(m.Parameter(1), m.Parameter(2))));
+  Stream s = m.Build(MLS);
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kArmMls, s[0]->arch_opcode());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+  EXPECT_EQ(3U, s[0]->InputCount());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32UDivWithParameters) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  m.Return(m.Int32UDiv(m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build();
+  ASSERT_EQ(4U, s.size());
+  EXPECT_EQ(kArmVcvtF64U32, s[0]->arch_opcode());
+  ASSERT_EQ(1U, s[0]->OutputCount());
+  EXPECT_EQ(kArmVcvtF64U32, s[1]->arch_opcode());
+  ASSERT_EQ(1U, s[1]->OutputCount());
+  EXPECT_EQ(kArmVdivF64, s[2]->arch_opcode());
+  ASSERT_EQ(2U, s[2]->InputCount());
+  ASSERT_EQ(1U, s[2]->OutputCount());
+  EXPECT_EQ(s.ToVreg(s[0]->Output()), s.ToVreg(s[2]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(s[1]->Output()), s.ToVreg(s[2]->InputAt(1)));
+  EXPECT_EQ(kArmVcvtU32F64, s[3]->arch_opcode());
+  ASSERT_EQ(1U, s[3]->InputCount());
+  EXPECT_EQ(s.ToVreg(s[2]->Output()), s.ToVreg(s[3]->InputAt(0)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32UDivWithParametersForSUDIV) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  m.Return(m.Int32UDiv(m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build(SUDIV);
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kArmUdiv, s[0]->arch_opcode());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32UModWithParameters) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  m.Return(m.Int32UMod(m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build();
+  ASSERT_EQ(6U, s.size());
+  EXPECT_EQ(kArmVcvtF64U32, s[0]->arch_opcode());
+  ASSERT_EQ(1U, s[0]->OutputCount());
+  EXPECT_EQ(kArmVcvtF64U32, s[1]->arch_opcode());
+  ASSERT_EQ(1U, s[1]->OutputCount());
+  EXPECT_EQ(kArmVdivF64, s[2]->arch_opcode());
+  ASSERT_EQ(2U, s[2]->InputCount());
+  ASSERT_EQ(1U, s[2]->OutputCount());
+  EXPECT_EQ(s.ToVreg(s[0]->Output()), s.ToVreg(s[2]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(s[1]->Output()), s.ToVreg(s[2]->InputAt(1)));
+  EXPECT_EQ(kArmVcvtU32F64, s[3]->arch_opcode());
+  ASSERT_EQ(1U, s[3]->InputCount());
+  EXPECT_EQ(s.ToVreg(s[2]->Output()), s.ToVreg(s[3]->InputAt(0)));
+  EXPECT_EQ(kArmMul, s[4]->arch_opcode());
+  ASSERT_EQ(1U, s[4]->OutputCount());
+  ASSERT_EQ(2U, s[4]->InputCount());
+  EXPECT_EQ(s.ToVreg(s[3]->Output()), s.ToVreg(s[4]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(s[1]->InputAt(0)), s.ToVreg(s[4]->InputAt(1)));
+  EXPECT_EQ(kArmSub, s[5]->arch_opcode());
+  ASSERT_EQ(1U, s[5]->OutputCount());
+  ASSERT_EQ(2U, s[5]->InputCount());
+  EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[5]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(s[4]->Output()), s.ToVreg(s[5]->InputAt(1)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32UModWithParametersForSUDIV) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  m.Return(m.Int32UMod(m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build(SUDIV);
+  ASSERT_EQ(3U, s.size());
+  EXPECT_EQ(kArmUdiv, s[0]->arch_opcode());
+  ASSERT_EQ(1U, s[0]->OutputCount());
+  ASSERT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(kArmMul, s[1]->arch_opcode());
+  ASSERT_EQ(1U, s[1]->OutputCount());
+  ASSERT_EQ(2U, s[1]->InputCount());
+  EXPECT_EQ(s.ToVreg(s[0]->Output()), s.ToVreg(s[1]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(s[0]->InputAt(1)), s.ToVreg(s[1]->InputAt(1)));
+  EXPECT_EQ(kArmSub, s[2]->arch_opcode());
+  ASSERT_EQ(1U, s[2]->OutputCount());
+  ASSERT_EQ(2U, s[2]->InputCount());
+  EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[2]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(s[1]->Output()), s.ToVreg(s[2]->InputAt(1)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32UModWithParametersForSUDIVAndMLS) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  m.Return(m.Int32UMod(m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build(MLS, SUDIV);
+  ASSERT_EQ(2U, s.size());
+  EXPECT_EQ(kArmUdiv, s[0]->arch_opcode());
+  ASSERT_EQ(1U, s[0]->OutputCount());
+  ASSERT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(kArmMls, s[1]->arch_opcode());
+  ASSERT_EQ(1U, s[1]->OutputCount());
+  ASSERT_EQ(3U, s[1]->InputCount());
+  EXPECT_EQ(s.ToVreg(s[0]->Output()), s.ToVreg(s[1]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(s[0]->InputAt(1)), s.ToVreg(s[1]->InputAt(1)));
+  EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[1]->InputAt(2)));
+}
+
+
+TEST_F(InstructionSelectorTest, Word32AndWithUbfxImmediateForARMv7) {
+  TRACED_FORRANGE(int32_t, width, 1, 32) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    m.Return(m.Word32And(m.Parameter(0),
+                         m.Int32Constant(0xffffffffu >> (32 - width))));
+    Stream s = m.Build(ARMv7);
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmUbfx, s[0]->arch_opcode());
+    ASSERT_EQ(3U, s[0]->InputCount());
+    EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
+  }
+  TRACED_FORRANGE(int32_t, width, 1, 32) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    m.Return(m.Word32And(m.Int32Constant(0xffffffffu >> (32 - width)),
+                         m.Parameter(0)));
+    Stream s = m.Build(ARMv7);
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmUbfx, s[0]->arch_opcode());
+    ASSERT_EQ(3U, s[0]->InputCount());
+    EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
+  }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32AndWithBfcImmediateForARMv7) {
+  TRACED_FORRANGE(int32_t, lsb, 0, 31) {
+    TRACED_FORRANGE(int32_t, width, 1, (32 - lsb) - 1) {
+      StreamBuilder m(this, kMachInt32, kMachInt32);
+      m.Return(m.Word32And(
+          m.Parameter(0),
+          m.Int32Constant(~((0xffffffffu >> (32 - width)) << lsb))));
+      Stream s = m.Build(ARMv7);
+      ASSERT_EQ(1U, s.size());
+      EXPECT_EQ(kArmBfc, s[0]->arch_opcode());
+      ASSERT_EQ(1U, s[0]->OutputCount());
+      EXPECT_TRUE(
+          UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
+      ASSERT_EQ(3U, s[0]->InputCount());
+      EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1)));
+      EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
+    }
+  }
+  TRACED_FORRANGE(int32_t, lsb, 0, 31) {
+    TRACED_FORRANGE(int32_t, width, 1, (32 - lsb) - 1) {
+      StreamBuilder m(this, kMachInt32, kMachInt32);
+      m.Return(
+          m.Word32And(m.Int32Constant(~((0xffffffffu >> (32 - width)) << lsb)),
+                      m.Parameter(0)));
+      Stream s = m.Build(ARMv7);
+      ASSERT_EQ(1U, s.size());
+      EXPECT_EQ(kArmBfc, s[0]->arch_opcode());
+      ASSERT_EQ(1U, s[0]->OutputCount());
+      EXPECT_TRUE(
+          UnallocatedOperand::cast(s[0]->Output())->HasSameAsInputPolicy());
+      ASSERT_EQ(3U, s[0]->InputCount());
+      EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1)));
+      EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
+    }
+  }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32ShrWithWord32AndWithImmediateForARMv7) {
+  TRACED_FORRANGE(int32_t, lsb, 0, 31) {
+    TRACED_FORRANGE(int32_t, width, 1, 32 - lsb) {
+      uint32_t max = 1 << lsb;
+      if (max > static_cast<uint32_t>(kMaxInt)) max -= 1;
+      uint32_t jnk = rng()->NextInt(max);
+      uint32_t msk = ((0xffffffffu >> (32 - width)) << lsb) | jnk;
+      StreamBuilder m(this, kMachInt32, kMachInt32);
+      m.Return(m.Word32Shr(m.Word32And(m.Parameter(0), m.Int32Constant(msk)),
+                           m.Int32Constant(lsb)));
+      Stream s = m.Build(ARMv7);
+      ASSERT_EQ(1U, s.size());
+      EXPECT_EQ(kArmUbfx, s[0]->arch_opcode());
+      ASSERT_EQ(3U, s[0]->InputCount());
+      EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1)));
+      EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
+    }
+  }
+  TRACED_FORRANGE(int32_t, lsb, 0, 31) {
+    TRACED_FORRANGE(int32_t, width, 1, 32 - lsb) {
+      uint32_t max = 1 << lsb;
+      if (max > static_cast<uint32_t>(kMaxInt)) max -= 1;
+      uint32_t jnk = rng()->NextInt(max);
+      uint32_t msk = ((0xffffffffu >> (32 - width)) << lsb) | jnk;
+      StreamBuilder m(this, kMachInt32, kMachInt32);
+      m.Return(m.Word32Shr(m.Word32And(m.Int32Constant(msk), m.Parameter(0)),
+                           m.Int32Constant(lsb)));
+      Stream s = m.Build(ARMv7);
+      ASSERT_EQ(1U, s.size());
+      EXPECT_EQ(kArmUbfx, s[0]->arch_opcode());
+      ASSERT_EQ(3U, s[0]->InputCount());
+      EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1)));
+      EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
+    }
+  }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32AndWithWord32Not) {
+  {
+    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+    m.Return(m.Word32And(m.Parameter(0), m.Word32Not(m.Parameter(1))));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmBic, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+    EXPECT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(1U, s[0]->OutputCount());
+  }
+  {
+    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+    m.Return(m.Word32And(m.Word32Not(m.Parameter(0)), m.Parameter(1)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmBic, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+    EXPECT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(1U, s[0]->OutputCount());
+  }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32EqualWithParameters) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  m.Return(m.Word32Equal(m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kArmCmp, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+  EXPECT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+  EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+  EXPECT_EQ(kEqual, s[0]->flags_condition());
+}
+
+
+TEST_F(InstructionSelectorTest, Word32EqualWithImmediate) {
+  TRACED_FOREACH(int32_t, imm, kImmediates) {
+    if (imm == 0) continue;
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    m.Return(m.Word32Equal(m.Parameter(0), m.Int32Constant(imm)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmCmp, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+    EXPECT_EQ(kEqual, s[0]->flags_condition());
+  }
+  TRACED_FOREACH(int32_t, imm, kImmediates) {
+    if (imm == 0) continue;
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    m.Return(m.Word32Equal(m.Int32Constant(imm), m.Parameter(0)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmCmp, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_Operand2_I, s[0]->addressing_mode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+    EXPECT_EQ(kEqual, s[0]->flags_condition());
+  }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32EqualWithZero) {
+  {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    m.Return(m.Word32Equal(m.Parameter(0), m.Int32Constant(0)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmTst, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+    EXPECT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+    EXPECT_EQ(kEqual, s[0]->flags_condition());
+  }
+  {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    m.Return(m.Word32Equal(m.Int32Constant(0), m.Parameter(0)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmTst, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+    EXPECT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+    EXPECT_EQ(kEqual, s[0]->flags_condition());
+  }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32NotWithParameter) {
+  StreamBuilder m(this, kMachInt32, kMachInt32);
+  m.Return(m.Word32Not(m.Parameter(0)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kArmMvn, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_Operand2_R, s[0]->addressing_mode());
+  EXPECT_EQ(1U, s[0]->InputCount());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_F(InstructionSelectorTest, Word32AndWithWord32ShrWithImmediateForARMv7) {
+  TRACED_FORRANGE(int32_t, lsb, 0, 31) {
+    TRACED_FORRANGE(int32_t, width, 1, 32 - lsb) {
+      StreamBuilder m(this, kMachInt32, kMachInt32);
+      m.Return(m.Word32And(m.Word32Shr(m.Parameter(0), m.Int32Constant(lsb)),
+                           m.Int32Constant(0xffffffffu >> (32 - width))));
+      Stream s = m.Build(ARMv7);
+      ASSERT_EQ(1U, s.size());
+      EXPECT_EQ(kArmUbfx, s[0]->arch_opcode());
+      ASSERT_EQ(3U, s[0]->InputCount());
+      EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1)));
+      EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
+    }
+  }
+  TRACED_FORRANGE(int32_t, lsb, 0, 31) {
+    TRACED_FORRANGE(int32_t, width, 1, 32 - lsb) {
+      StreamBuilder m(this, kMachInt32, kMachInt32);
+      m.Return(m.Word32And(m.Int32Constant(0xffffffffu >> (32 - width)),
+                           m.Word32Shr(m.Parameter(0), m.Int32Constant(lsb))));
+      Stream s = m.Build(ARMv7);
+      ASSERT_EQ(1U, s.size());
+      EXPECT_EQ(kArmUbfx, s[0]->arch_opcode());
+      ASSERT_EQ(3U, s[0]->InputCount());
+      EXPECT_EQ(lsb, s.ToInt32(s[0]->InputAt(1)));
+      EXPECT_EQ(width, s.ToInt32(s[0]->InputAt(2)));
+    }
+  }
+}
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/arm/instruction-selector-arm.cc b/src/compiler/arm/instruction-selector-arm.cc
new file mode 100644
index 0000000..ae93b27
--- /dev/null
+++ b/src/compiler/arm/instruction-selector-arm.cc
@@ -0,0 +1,950 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/bits.h"
+#include "src/compiler/instruction-selector-impl.h"
+#include "src/compiler/node-matchers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Adds Arm-specific methods for generating InstructionOperands.
+class ArmOperandGenerator FINAL : public OperandGenerator {
+ public:
+  explicit ArmOperandGenerator(InstructionSelector* selector)
+      : OperandGenerator(selector) {}
+
+  InstructionOperand* UseOperand(Node* node, InstructionCode opcode) {
+    if (CanBeImmediate(node, opcode)) {
+      return UseImmediate(node);
+    }
+    return UseRegister(node);
+  }
+
+  bool CanBeImmediate(Node* node, InstructionCode opcode) {
+    Int32Matcher m(node);
+    if (!m.HasValue()) return false;
+    int32_t value = m.Value();
+    switch (ArchOpcodeField::decode(opcode)) {
+      case kArmAnd:
+      case kArmMov:
+      case kArmMvn:
+      case kArmBic:
+        return ImmediateFitsAddrMode1Instruction(value) ||
+               ImmediateFitsAddrMode1Instruction(~value);
+
+      case kArmAdd:
+      case kArmSub:
+      case kArmCmp:
+      case kArmCmn:
+        return ImmediateFitsAddrMode1Instruction(value) ||
+               ImmediateFitsAddrMode1Instruction(-value);
+
+      case kArmTst:
+      case kArmTeq:
+      case kArmOrr:
+      case kArmEor:
+      case kArmRsb:
+        return ImmediateFitsAddrMode1Instruction(value);
+
+      case kArmVldr32:
+      case kArmVstr32:
+      case kArmVldr64:
+      case kArmVstr64:
+        return value >= -1020 && value <= 1020 && (value % 4) == 0;
+
+      case kArmLdrb:
+      case kArmLdrsb:
+      case kArmStrb:
+      case kArmLdr:
+      case kArmStr:
+      case kArmStoreWriteBarrier:
+        return value >= -4095 && value <= 4095;
+
+      case kArmLdrh:
+      case kArmLdrsh:
+      case kArmStrh:
+        return value >= -255 && value <= 255;
+
+      case kArchCallCodeObject:
+      case kArchCallJSFunction:
+      case kArchJmp:
+      case kArchNop:
+      case kArchRet:
+      case kArchTruncateDoubleToI:
+      case kArmMul:
+      case kArmMla:
+      case kArmMls:
+      case kArmSdiv:
+      case kArmUdiv:
+      case kArmBfc:
+      case kArmUbfx:
+      case kArmVcmpF64:
+      case kArmVaddF64:
+      case kArmVsubF64:
+      case kArmVmulF64:
+      case kArmVmlaF64:
+      case kArmVmlsF64:
+      case kArmVdivF64:
+      case kArmVmodF64:
+      case kArmVnegF64:
+      case kArmVsqrtF64:
+      case kArmVcvtF64S32:
+      case kArmVcvtF64U32:
+      case kArmVcvtS32F64:
+      case kArmVcvtU32F64:
+      case kArmPush:
+        return false;
+    }
+    UNREACHABLE();
+    return false;
+  }
+
+ private:
+  bool ImmediateFitsAddrMode1Instruction(int32_t imm) const {
+    return Assembler::ImmediateFitsAddrMode1Instruction(imm);
+  }
+};
+
+
+static void VisitRRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
+                            Node* node) {
+  ArmOperandGenerator g(selector);
+  selector->Emit(opcode, g.DefineAsRegister(node),
+                 g.UseRegister(node->InputAt(0)),
+                 g.UseRegister(node->InputAt(1)));
+}
+
+
+static bool TryMatchROR(InstructionSelector* selector,
+                        InstructionCode* opcode_return, Node* node,
+                        InstructionOperand** value_return,
+                        InstructionOperand** shift_return) {
+  ArmOperandGenerator g(selector);
+  if (node->opcode() != IrOpcode::kWord32Ror) return false;
+  Int32BinopMatcher m(node);
+  *value_return = g.UseRegister(m.left().node());
+  if (m.right().IsInRange(1, 31)) {
+    *opcode_return |= AddressingModeField::encode(kMode_Operand2_R_ROR_I);
+    *shift_return = g.UseImmediate(m.right().node());
+  } else {
+    *opcode_return |= AddressingModeField::encode(kMode_Operand2_R_ROR_R);
+    *shift_return = g.UseRegister(m.right().node());
+  }
+  return true;
+}
+
+
+static inline bool TryMatchASR(InstructionSelector* selector,
+                               InstructionCode* opcode_return, Node* node,
+                               InstructionOperand** value_return,
+                               InstructionOperand** shift_return) {
+  ArmOperandGenerator g(selector);
+  if (node->opcode() != IrOpcode::kWord32Sar) return false;
+  Int32BinopMatcher m(node);
+  *value_return = g.UseRegister(m.left().node());
+  if (m.right().IsInRange(1, 32)) {
+    *opcode_return |= AddressingModeField::encode(kMode_Operand2_R_ASR_I);
+    *shift_return = g.UseImmediate(m.right().node());
+  } else {
+    *opcode_return |= AddressingModeField::encode(kMode_Operand2_R_ASR_R);
+    *shift_return = g.UseRegister(m.right().node());
+  }
+  return true;
+}
+
+
+static inline bool TryMatchLSL(InstructionSelector* selector,
+                               InstructionCode* opcode_return, Node* node,
+                               InstructionOperand** value_return,
+                               InstructionOperand** shift_return) {
+  ArmOperandGenerator g(selector);
+  if (node->opcode() != IrOpcode::kWord32Shl) return false;
+  Int32BinopMatcher m(node);
+  *value_return = g.UseRegister(m.left().node());
+  if (m.right().IsInRange(0, 31)) {
+    *opcode_return |= AddressingModeField::encode(kMode_Operand2_R_LSL_I);
+    *shift_return = g.UseImmediate(m.right().node());
+  } else {
+    *opcode_return |= AddressingModeField::encode(kMode_Operand2_R_LSL_R);
+    *shift_return = g.UseRegister(m.right().node());
+  }
+  return true;
+}
+
+
+static inline bool TryMatchLSR(InstructionSelector* selector,
+                               InstructionCode* opcode_return, Node* node,
+                               InstructionOperand** value_return,
+                               InstructionOperand** shift_return) {
+  ArmOperandGenerator g(selector);
+  if (node->opcode() != IrOpcode::kWord32Shr) return false;
+  Int32BinopMatcher m(node);
+  *value_return = g.UseRegister(m.left().node());
+  if (m.right().IsInRange(1, 32)) {
+    *opcode_return |= AddressingModeField::encode(kMode_Operand2_R_LSR_I);
+    *shift_return = g.UseImmediate(m.right().node());
+  } else {
+    *opcode_return |= AddressingModeField::encode(kMode_Operand2_R_LSR_R);
+    *shift_return = g.UseRegister(m.right().node());
+  }
+  return true;
+}
+
+
+static inline bool TryMatchShift(InstructionSelector* selector,
+                                 InstructionCode* opcode_return, Node* node,
+                                 InstructionOperand** value_return,
+                                 InstructionOperand** shift_return) {
+  return (
+      TryMatchASR(selector, opcode_return, node, value_return, shift_return) ||
+      TryMatchLSL(selector, opcode_return, node, value_return, shift_return) ||
+      TryMatchLSR(selector, opcode_return, node, value_return, shift_return) ||
+      TryMatchROR(selector, opcode_return, node, value_return, shift_return));
+}
+
+
+static inline bool TryMatchImmediateOrShift(InstructionSelector* selector,
+                                            InstructionCode* opcode_return,
+                                            Node* node,
+                                            size_t* input_count_return,
+                                            InstructionOperand** inputs) {
+  ArmOperandGenerator g(selector);
+  if (g.CanBeImmediate(node, *opcode_return)) {
+    *opcode_return |= AddressingModeField::encode(kMode_Operand2_I);
+    inputs[0] = g.UseImmediate(node);
+    *input_count_return = 1;
+    return true;
+  }
+  if (TryMatchShift(selector, opcode_return, node, &inputs[0], &inputs[1])) {
+    *input_count_return = 2;
+    return true;
+  }
+  return false;
+}
+
+
+static void VisitBinop(InstructionSelector* selector, Node* node,
+                       InstructionCode opcode, InstructionCode reverse_opcode,
+                       FlagsContinuation* cont) {
+  ArmOperandGenerator g(selector);
+  Int32BinopMatcher m(node);
+  InstructionOperand* inputs[5];
+  size_t input_count = 0;
+  InstructionOperand* outputs[2];
+  size_t output_count = 0;
+
+  if (TryMatchImmediateOrShift(selector, &opcode, m.right().node(),
+                               &input_count, &inputs[1])) {
+    inputs[0] = g.UseRegister(m.left().node());
+    input_count++;
+  } else if (TryMatchImmediateOrShift(selector, &reverse_opcode,
+                                      m.left().node(), &input_count,
+                                      &inputs[1])) {
+    inputs[0] = g.UseRegister(m.right().node());
+    opcode = reverse_opcode;
+    input_count++;
+  } else {
+    opcode |= AddressingModeField::encode(kMode_Operand2_R);
+    inputs[input_count++] = g.UseRegister(m.left().node());
+    inputs[input_count++] = g.UseRegister(m.right().node());
+  }
+
+  if (cont->IsBranch()) {
+    inputs[input_count++] = g.Label(cont->true_block());
+    inputs[input_count++] = g.Label(cont->false_block());
+  }
+
+  outputs[output_count++] = g.DefineAsRegister(node);
+  if (cont->IsSet()) {
+    outputs[output_count++] = g.DefineAsRegister(cont->result());
+  }
+
+  DCHECK_NE(0, input_count);
+  DCHECK_NE(0, output_count);
+  DCHECK_GE(arraysize(inputs), input_count);
+  DCHECK_GE(arraysize(outputs), output_count);
+  DCHECK_NE(kMode_None, AddressingModeField::decode(opcode));
+
+  Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
+                                      outputs, input_count, inputs);
+  if (cont->IsBranch()) instr->MarkAsControl();
+}
+
+
+static void VisitBinop(InstructionSelector* selector, Node* node,
+                       InstructionCode opcode, InstructionCode reverse_opcode) {
+  FlagsContinuation cont;
+  VisitBinop(selector, node, opcode, reverse_opcode, &cont);
+}
+
+
+void InstructionSelector::VisitLoad(Node* node) {
+  MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
+  MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
+  ArmOperandGenerator g(this);
+  Node* base = node->InputAt(0);
+  Node* index = node->InputAt(1);
+
+  ArchOpcode opcode;
+  switch (rep) {
+    case kRepFloat32:
+      opcode = kArmVldr32;
+      break;
+    case kRepFloat64:
+      opcode = kArmVldr64;
+      break;
+    case kRepBit:  // Fall through.
+    case kRepWord8:
+      opcode = typ == kTypeUint32 ? kArmLdrb : kArmLdrsb;
+      break;
+    case kRepWord16:
+      opcode = typ == kTypeUint32 ? kArmLdrh : kArmLdrsh;
+      break;
+    case kRepTagged:  // Fall through.
+    case kRepWord32:
+      opcode = kArmLdr;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+
+  if (g.CanBeImmediate(index, opcode)) {
+    Emit(opcode | AddressingModeField::encode(kMode_Offset_RI),
+         g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
+  } else {
+    Emit(opcode | AddressingModeField::encode(kMode_Offset_RR),
+         g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
+  }
+}
+
+
+void InstructionSelector::VisitStore(Node* node) {
+  ArmOperandGenerator g(this);
+  Node* base = node->InputAt(0);
+  Node* index = node->InputAt(1);
+  Node* value = node->InputAt(2);
+
+  StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
+  MachineType rep = RepresentationOf(store_rep.machine_type());
+  if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
+    DCHECK(rep == kRepTagged);
+    // TODO(dcarney): refactor RecordWrite function to take temp registers
+    //                and pass them here instead of using fixed regs
+    // TODO(dcarney): handle immediate indices.
+    InstructionOperand* temps[] = {g.TempRegister(r5), g.TempRegister(r6)};
+    Emit(kArmStoreWriteBarrier, NULL, g.UseFixed(base, r4),
+         g.UseFixed(index, r5), g.UseFixed(value, r6), arraysize(temps),
+         temps);
+    return;
+  }
+  DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
+
+  ArchOpcode opcode;
+  switch (rep) {
+    case kRepFloat32:
+      opcode = kArmVstr32;
+      break;
+    case kRepFloat64:
+      opcode = kArmVstr64;
+      break;
+    case kRepBit:  // Fall through.
+    case kRepWord8:
+      opcode = kArmStrb;
+      break;
+    case kRepWord16:
+      opcode = kArmStrh;
+      break;
+    case kRepTagged:  // Fall through.
+    case kRepWord32:
+      opcode = kArmStr;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+
+  if (g.CanBeImmediate(index, opcode)) {
+    Emit(opcode | AddressingModeField::encode(kMode_Offset_RI), NULL,
+         g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+  } else {
+    Emit(opcode | AddressingModeField::encode(kMode_Offset_RR), NULL,
+         g.UseRegister(base), g.UseRegister(index), g.UseRegister(value));
+  }
+}
+
+
+static inline void EmitBic(InstructionSelector* selector, Node* node,
+                           Node* left, Node* right) {
+  ArmOperandGenerator g(selector);
+  InstructionCode opcode = kArmBic;
+  InstructionOperand* value_operand;
+  InstructionOperand* shift_operand;
+  if (TryMatchShift(selector, &opcode, right, &value_operand, &shift_operand)) {
+    selector->Emit(opcode, g.DefineAsRegister(node), g.UseRegister(left),
+                   value_operand, shift_operand);
+    return;
+  }
+  selector->Emit(opcode | AddressingModeField::encode(kMode_Operand2_R),
+                 g.DefineAsRegister(node), g.UseRegister(left),
+                 g.UseRegister(right));
+}
+
+
+void InstructionSelector::VisitWord32And(Node* node) {
+  ArmOperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  if (m.left().IsWord32Xor() && CanCover(node, m.left().node())) {
+    Int32BinopMatcher mleft(m.left().node());
+    if (mleft.right().Is(-1)) {
+      EmitBic(this, node, m.right().node(), mleft.left().node());
+      return;
+    }
+  }
+  if (m.right().IsWord32Xor() && CanCover(node, m.right().node())) {
+    Int32BinopMatcher mright(m.right().node());
+    if (mright.right().Is(-1)) {
+      EmitBic(this, node, m.left().node(), mright.left().node());
+      return;
+    }
+  }
+  if (IsSupported(ARMv7) && m.right().HasValue()) {
+    uint32_t value = m.right().Value();
+    uint32_t width = base::bits::CountPopulation32(value);
+    uint32_t msb = base::bits::CountLeadingZeros32(value);
+    if (width != 0 && msb + width == 32) {
+      DCHECK_EQ(0, base::bits::CountTrailingZeros32(value));
+      if (m.left().IsWord32Shr()) {
+        Int32BinopMatcher mleft(m.left().node());
+        if (mleft.right().IsInRange(0, 31)) {
+          Emit(kArmUbfx, g.DefineAsRegister(node),
+               g.UseRegister(mleft.left().node()),
+               g.UseImmediate(mleft.right().node()), g.TempImmediate(width));
+          return;
+        }
+      }
+      Emit(kArmUbfx, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+           g.TempImmediate(0), g.TempImmediate(width));
+      return;
+    }
+    // Try to interpret this AND as BFC.
+    width = 32 - width;
+    msb = base::bits::CountLeadingZeros32(~value);
+    uint32_t lsb = base::bits::CountTrailingZeros32(~value);
+    if (msb + width + lsb == 32) {
+      Emit(kArmBfc, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
+           g.TempImmediate(lsb), g.TempImmediate(width));
+      return;
+    }
+  }
+  VisitBinop(this, node, kArmAnd, kArmAnd);
+}
+
+
+void InstructionSelector::VisitWord32Or(Node* node) {
+  VisitBinop(this, node, kArmOrr, kArmOrr);
+}
+
+
+void InstructionSelector::VisitWord32Xor(Node* node) {
+  ArmOperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  if (m.right().Is(-1)) {
+    InstructionCode opcode = kArmMvn;
+    InstructionOperand* value_operand;
+    InstructionOperand* shift_operand;
+    if (TryMatchShift(this, &opcode, m.left().node(), &value_operand,
+                      &shift_operand)) {
+      Emit(opcode, g.DefineAsRegister(node), value_operand, shift_operand);
+      return;
+    }
+    Emit(opcode | AddressingModeField::encode(kMode_Operand2_R),
+         g.DefineAsRegister(node), g.UseRegister(m.left().node()));
+    return;
+  }
+  VisitBinop(this, node, kArmEor, kArmEor);
+}
+
+
+template <typename TryMatchShift>
+static inline void VisitShift(InstructionSelector* selector, Node* node,
+                              TryMatchShift try_match_shift,
+                              FlagsContinuation* cont) {
+  ArmOperandGenerator g(selector);
+  InstructionCode opcode = kArmMov;
+  InstructionOperand* inputs[4];
+  size_t input_count = 2;
+  InstructionOperand* outputs[2];
+  size_t output_count = 0;
+
+  CHECK(try_match_shift(selector, &opcode, node, &inputs[0], &inputs[1]));
+
+  if (cont->IsBranch()) {
+    inputs[input_count++] = g.Label(cont->true_block());
+    inputs[input_count++] = g.Label(cont->false_block());
+  }
+
+  outputs[output_count++] = g.DefineAsRegister(node);
+  if (cont->IsSet()) {
+    outputs[output_count++] = g.DefineAsRegister(cont->result());
+  }
+
+  DCHECK_NE(0, input_count);
+  DCHECK_NE(0, output_count);
+  DCHECK_GE(arraysize(inputs), input_count);
+  DCHECK_GE(arraysize(outputs), output_count);
+  DCHECK_NE(kMode_None, AddressingModeField::decode(opcode));
+
+  Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
+                                      outputs, input_count, inputs);
+  if (cont->IsBranch()) instr->MarkAsControl();
+}
+
+
+template <typename TryMatchShift>
+static inline void VisitShift(InstructionSelector* selector, Node* node,
+                              TryMatchShift try_match_shift) {
+  FlagsContinuation cont;
+  VisitShift(selector, node, try_match_shift, &cont);
+}
+
+
+void InstructionSelector::VisitWord32Shl(Node* node) {
+  VisitShift(this, node, TryMatchLSL);
+}
+
+
+void InstructionSelector::VisitWord32Shr(Node* node) {
+  ArmOperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  if (IsSupported(ARMv7) && m.left().IsWord32And() &&
+      m.right().IsInRange(0, 31)) {
+    int32_t lsb = m.right().Value();
+    Int32BinopMatcher mleft(m.left().node());
+    if (mleft.right().HasValue()) {
+      uint32_t value = (mleft.right().Value() >> lsb) << lsb;
+      uint32_t width = base::bits::CountPopulation32(value);
+      uint32_t msb = base::bits::CountLeadingZeros32(value);
+      if (msb + width + lsb == 32) {
+        DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(value));
+        Emit(kArmUbfx, g.DefineAsRegister(node),
+             g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
+             g.TempImmediate(width));
+        return;
+      }
+    }
+  }
+  VisitShift(this, node, TryMatchLSR);
+}
+
+
+void InstructionSelector::VisitWord32Sar(Node* node) {
+  VisitShift(this, node, TryMatchASR);
+}
+
+
+void InstructionSelector::VisitWord32Ror(Node* node) {
+  VisitShift(this, node, TryMatchROR);
+}
+
+
+void InstructionSelector::VisitInt32Add(Node* node) {
+  ArmOperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  if (m.left().IsInt32Mul() && CanCover(node, m.left().node())) {
+    Int32BinopMatcher mleft(m.left().node());
+    Emit(kArmMla, g.DefineAsRegister(node), g.UseRegister(mleft.left().node()),
+         g.UseRegister(mleft.right().node()), g.UseRegister(m.right().node()));
+    return;
+  }
+  if (m.right().IsInt32Mul() && CanCover(node, m.right().node())) {
+    Int32BinopMatcher mright(m.right().node());
+    Emit(kArmMla, g.DefineAsRegister(node), g.UseRegister(mright.left().node()),
+         g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
+    return;
+  }
+  VisitBinop(this, node, kArmAdd, kArmAdd);
+}
+
+
+void InstructionSelector::VisitInt32Sub(Node* node) {
+  ArmOperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  if (IsSupported(MLS) && m.right().IsInt32Mul() &&
+      CanCover(node, m.right().node())) {
+    Int32BinopMatcher mright(m.right().node());
+    Emit(kArmMls, g.DefineAsRegister(node), g.UseRegister(mright.left().node()),
+         g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
+    return;
+  }
+  VisitBinop(this, node, kArmSub, kArmRsb);
+}
+
+
+void InstructionSelector::VisitInt32Mul(Node* node) {
+  ArmOperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  if (m.right().HasValue() && m.right().Value() > 0) {
+    int32_t value = m.right().Value();
+    if (base::bits::IsPowerOfTwo32(value - 1)) {
+      Emit(kArmAdd | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
+           g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+           g.UseRegister(m.left().node()),
+           g.TempImmediate(WhichPowerOf2(value - 1)));
+      return;
+    }
+    if (value < kMaxInt && base::bits::IsPowerOfTwo32(value + 1)) {
+      Emit(kArmRsb | AddressingModeField::encode(kMode_Operand2_R_LSL_I),
+           g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+           g.UseRegister(m.left().node()),
+           g.TempImmediate(WhichPowerOf2(value + 1)));
+      return;
+    }
+  }
+  Emit(kArmMul, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+       g.UseRegister(m.right().node()));
+}
+
+
+static void EmitDiv(InstructionSelector* selector, ArchOpcode div_opcode,
+                    ArchOpcode f64i32_opcode, ArchOpcode i32f64_opcode,
+                    InstructionOperand* result_operand,
+                    InstructionOperand* left_operand,
+                    InstructionOperand* right_operand) {
+  ArmOperandGenerator g(selector);
+  if (selector->IsSupported(SUDIV)) {
+    selector->Emit(div_opcode, result_operand, left_operand, right_operand);
+    return;
+  }
+  InstructionOperand* left_double_operand = g.TempDoubleRegister();
+  InstructionOperand* right_double_operand = g.TempDoubleRegister();
+  InstructionOperand* result_double_operand = g.TempDoubleRegister();
+  selector->Emit(f64i32_opcode, left_double_operand, left_operand);
+  selector->Emit(f64i32_opcode, right_double_operand, right_operand);
+  selector->Emit(kArmVdivF64, result_double_operand, left_double_operand,
+                 right_double_operand);
+  selector->Emit(i32f64_opcode, result_operand, result_double_operand);
+}
+
+
+static void VisitDiv(InstructionSelector* selector, Node* node,
+                     ArchOpcode div_opcode, ArchOpcode f64i32_opcode,
+                     ArchOpcode i32f64_opcode) {
+  ArmOperandGenerator g(selector);
+  Int32BinopMatcher m(node);
+  EmitDiv(selector, div_opcode, f64i32_opcode, i32f64_opcode,
+          g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+          g.UseRegister(m.right().node()));
+}
+
+
+void InstructionSelector::VisitInt32Div(Node* node) {
+  VisitDiv(this, node, kArmSdiv, kArmVcvtF64S32, kArmVcvtS32F64);
+}
+
+
+void InstructionSelector::VisitInt32UDiv(Node* node) {
+  VisitDiv(this, node, kArmUdiv, kArmVcvtF64U32, kArmVcvtU32F64);
+}
+
+
+static void VisitMod(InstructionSelector* selector, Node* node,
+                     ArchOpcode div_opcode, ArchOpcode f64i32_opcode,
+                     ArchOpcode i32f64_opcode) {
+  ArmOperandGenerator g(selector);
+  Int32BinopMatcher m(node);
+  InstructionOperand* div_operand = g.TempRegister();
+  InstructionOperand* result_operand = g.DefineAsRegister(node);
+  InstructionOperand* left_operand = g.UseRegister(m.left().node());
+  InstructionOperand* right_operand = g.UseRegister(m.right().node());
+  EmitDiv(selector, div_opcode, f64i32_opcode, i32f64_opcode, div_operand,
+          left_operand, right_operand);
+  if (selector->IsSupported(MLS)) {
+    selector->Emit(kArmMls, result_operand, div_operand, right_operand,
+                   left_operand);
+    return;
+  }
+  InstructionOperand* mul_operand = g.TempRegister();
+  selector->Emit(kArmMul, mul_operand, div_operand, right_operand);
+  selector->Emit(kArmSub, result_operand, left_operand, mul_operand);
+}
+
+
+void InstructionSelector::VisitInt32Mod(Node* node) {
+  VisitMod(this, node, kArmSdiv, kArmVcvtF64S32, kArmVcvtS32F64);
+}
+
+
+void InstructionSelector::VisitInt32UMod(Node* node) {
+  VisitMod(this, node, kArmUdiv, kArmVcvtF64U32, kArmVcvtU32F64);
+}
+
+
+void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
+  ArmOperandGenerator g(this);
+  Emit(kArmVcvtF64S32, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
+  ArmOperandGenerator g(this);
+  Emit(kArmVcvtF64U32, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
+  ArmOperandGenerator g(this);
+  Emit(kArmVcvtS32F64, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
+  ArmOperandGenerator g(this);
+  Emit(kArmVcvtU32F64, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64Add(Node* node) {
+  ArmOperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
+    Int32BinopMatcher mleft(m.left().node());
+    Emit(kArmVmlaF64, g.DefineSameAsFirst(node),
+         g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
+         g.UseRegister(mleft.right().node()));
+    return;
+  }
+  if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
+    Int32BinopMatcher mright(m.right().node());
+    Emit(kArmVmlaF64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
+         g.UseRegister(mright.left().node()),
+         g.UseRegister(mright.right().node()));
+    return;
+  }
+  VisitRRRFloat64(this, kArmVaddF64, node);
+}
+
+
+void InstructionSelector::VisitFloat64Sub(Node* node) {
+  ArmOperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
+    Int32BinopMatcher mright(m.right().node());
+    Emit(kArmVmlsF64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
+         g.UseRegister(mright.left().node()),
+         g.UseRegister(mright.right().node()));
+    return;
+  }
+  VisitRRRFloat64(this, kArmVsubF64, node);
+}
+
+
+void InstructionSelector::VisitFloat64Mul(Node* node) {
+  ArmOperandGenerator g(this);
+  Float64BinopMatcher m(node);
+  if (m.right().Is(-1.0)) {
+    Emit(kArmVnegF64, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
+  } else {
+    VisitRRRFloat64(this, kArmVmulF64, node);
+  }
+}
+
+
+void InstructionSelector::VisitFloat64Div(Node* node) {
+  VisitRRRFloat64(this, kArmVdivF64, node);
+}
+
+
+void InstructionSelector::VisitFloat64Mod(Node* node) {
+  ArmOperandGenerator g(this);
+  Emit(kArmVmodF64, g.DefineAsFixed(node, d0), g.UseFixed(node->InputAt(0), d0),
+       g.UseFixed(node->InputAt(1), d1))->MarkAsCall();
+}
+
+
+void InstructionSelector::VisitFloat64Sqrt(Node* node) {
+  ArmOperandGenerator g(this);
+  Emit(kArmVsqrtF64, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
+                                    BasicBlock* deoptimization) {
+  ArmOperandGenerator g(this);
+  CallDescriptor* descriptor = OpParameter<CallDescriptor*>(call);
+
+  FrameStateDescriptor* frame_state_descriptor = NULL;
+  if (descriptor->NeedsFrameState()) {
+    frame_state_descriptor =
+        GetFrameStateDescriptor(call->InputAt(descriptor->InputCount()));
+  }
+
+  CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
+
+  // Compute InstructionOperands for inputs and outputs.
+  // TODO(turbofan): on ARM64 it's probably better to use the code object in a
+  // register if there are multiple uses of it. Improve constant pool and the
+  // heuristics in the register allocator for where to emit constants.
+  InitializeCallBuffer(call, &buffer, true, false);
+
+  // TODO(dcarney): might be possible to use claim/poke instead
+  // Push any stack arguments.
+  for (NodeVectorRIter input = buffer.pushed_nodes.rbegin();
+       input != buffer.pushed_nodes.rend(); input++) {
+    Emit(kArmPush, NULL, g.UseRegister(*input));
+  }
+
+  // Select the appropriate opcode based on the call type.
+  InstructionCode opcode;
+  switch (descriptor->kind()) {
+    case CallDescriptor::kCallCodeObject: {
+      opcode = kArchCallCodeObject;
+      break;
+    }
+    case CallDescriptor::kCallJSFunction:
+      opcode = kArchCallJSFunction;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+  opcode |= MiscField::encode(descriptor->flags());
+
+  // Emit the call instruction.
+  Instruction* call_instr =
+      Emit(opcode, buffer.outputs.size(), &buffer.outputs.front(),
+           buffer.instruction_args.size(), &buffer.instruction_args.front());
+
+  call_instr->MarkAsCall();
+  if (deoptimization != NULL) {
+    DCHECK(continuation != NULL);
+    call_instr->MarkAsControl();
+  }
+}
+
+
+void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
+                                                    FlagsContinuation* cont) {
+  VisitBinop(this, node, kArmAdd, kArmAdd, cont);
+}
+
+
+void InstructionSelector::VisitInt32SubWithOverflow(Node* node,
+                                                    FlagsContinuation* cont) {
+  VisitBinop(this, node, kArmSub, kArmRsb, cont);
+}
+
+
+// Shared routine for multiple compare operations.
+static void VisitWordCompare(InstructionSelector* selector, Node* node,
+                             InstructionCode opcode, FlagsContinuation* cont,
+                             bool commutative) {
+  ArmOperandGenerator g(selector);
+  Int32BinopMatcher m(node);
+  InstructionOperand* inputs[5];
+  size_t input_count = 0;
+  InstructionOperand* outputs[1];
+  size_t output_count = 0;
+
+  if (TryMatchImmediateOrShift(selector, &opcode, m.right().node(),
+                               &input_count, &inputs[1])) {
+    inputs[0] = g.UseRegister(m.left().node());
+    input_count++;
+  } else if (TryMatchImmediateOrShift(selector, &opcode, m.left().node(),
+                                      &input_count, &inputs[1])) {
+    if (!commutative) cont->Commute();
+    inputs[0] = g.UseRegister(m.right().node());
+    input_count++;
+  } else {
+    opcode |= AddressingModeField::encode(kMode_Operand2_R);
+    inputs[input_count++] = g.UseRegister(m.left().node());
+    inputs[input_count++] = g.UseRegister(m.right().node());
+  }
+
+  if (cont->IsBranch()) {
+    inputs[input_count++] = g.Label(cont->true_block());
+    inputs[input_count++] = g.Label(cont->false_block());
+  } else {
+    DCHECK(cont->IsSet());
+    outputs[output_count++] = g.DefineAsRegister(cont->result());
+  }
+
+  DCHECK_NE(0, input_count);
+  DCHECK_GE(arraysize(inputs), input_count);
+  DCHECK_GE(arraysize(outputs), output_count);
+
+  Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
+                                      outputs, input_count, inputs);
+  if (cont->IsBranch()) instr->MarkAsControl();
+}
+
+
+void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation* cont) {
+  switch (node->opcode()) {
+    case IrOpcode::kInt32Add:
+      return VisitWordCompare(this, node, kArmCmn, cont, true);
+    case IrOpcode::kInt32Sub:
+      return VisitWordCompare(this, node, kArmCmp, cont, false);
+    case IrOpcode::kWord32And:
+      return VisitWordCompare(this, node, kArmTst, cont, true);
+    case IrOpcode::kWord32Or:
+      return VisitBinop(this, node, kArmOrr, kArmOrr, cont);
+    case IrOpcode::kWord32Xor:
+      return VisitWordCompare(this, node, kArmTeq, cont, true);
+    case IrOpcode::kWord32Sar:
+      return VisitShift(this, node, TryMatchASR, cont);
+    case IrOpcode::kWord32Shl:
+      return VisitShift(this, node, TryMatchLSL, cont);
+    case IrOpcode::kWord32Shr:
+      return VisitShift(this, node, TryMatchLSR, cont);
+    case IrOpcode::kWord32Ror:
+      return VisitShift(this, node, TryMatchROR, cont);
+    default:
+      break;
+  }
+
+  ArmOperandGenerator g(this);
+  InstructionCode opcode =
+      cont->Encode(kArmTst) | AddressingModeField::encode(kMode_Operand2_R);
+  if (cont->IsBranch()) {
+    Emit(opcode, NULL, g.UseRegister(node), g.UseRegister(node),
+         g.Label(cont->true_block()),
+         g.Label(cont->false_block()))->MarkAsControl();
+  } else {
+    Emit(opcode, g.DefineAsRegister(cont->result()), g.UseRegister(node),
+         g.UseRegister(node));
+  }
+}
+
+
+void InstructionSelector::VisitWord32Compare(Node* node,
+                                             FlagsContinuation* cont) {
+  VisitWordCompare(this, node, kArmCmp, cont, false);
+}
+
+
+void InstructionSelector::VisitFloat64Compare(Node* node,
+                                              FlagsContinuation* cont) {
+  ArmOperandGenerator g(this);
+  Float64BinopMatcher m(node);
+  if (cont->IsBranch()) {
+    Emit(cont->Encode(kArmVcmpF64), NULL, g.UseRegister(m.left().node()),
+         g.UseRegister(m.right().node()), g.Label(cont->true_block()),
+         g.Label(cont->false_block()))->MarkAsControl();
+  } else {
+    DCHECK(cont->IsSet());
+    Emit(cont->Encode(kArmVcmpF64), g.DefineAsRegister(cont->result()),
+         g.UseRegister(m.left().node()), g.UseRegister(m.right().node()));
+  }
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/arm/linkage-arm.cc b/src/compiler/arm/linkage-arm.cc
new file mode 100644
index 0000000..6673a47
--- /dev/null
+++ b/src/compiler/arm/linkage-arm.cc
@@ -0,0 +1,66 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/assembler.h"
+#include "src/code-stubs.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/linkage-impl.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+struct ArmLinkageHelperTraits {
+  static Register ReturnValueReg() { return r0; }
+  static Register ReturnValue2Reg() { return r1; }
+  static Register JSCallFunctionReg() { return r1; }
+  static Register ContextReg() { return cp; }
+  static Register RuntimeCallFunctionReg() { return r1; }
+  static Register RuntimeCallArgCountReg() { return r0; }
+  static RegList CCalleeSaveRegisters() {
+    return r4.bit() | r5.bit() | r6.bit() | r7.bit() | r8.bit() | r9.bit() |
+           r10.bit();
+  }
+  static Register CRegisterParameter(int i) {
+    static Register register_parameters[] = {r0, r1, r2, r3};
+    return register_parameters[i];
+  }
+  static int CRegisterParametersLength() { return 4; }
+};
+
+
+typedef LinkageHelper<ArmLinkageHelperTraits> LH;
+
+CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone) {
+  return LH::GetJSCallDescriptor(zone, parameter_count);
+}
+
+
+CallDescriptor* Linkage::GetRuntimeCallDescriptor(
+    Runtime::FunctionId function, int parameter_count,
+    Operator::Properties properties, Zone* zone) {
+  return LH::GetRuntimeCallDescriptor(zone, function, parameter_count,
+                                      properties);
+}
+
+
+CallDescriptor* Linkage::GetStubCallDescriptor(
+    CallInterfaceDescriptor descriptor, int stack_parameter_count,
+    CallDescriptor::Flags flags, Zone* zone) {
+  return LH::GetStubCallDescriptor(zone, descriptor, stack_parameter_count,
+                                   flags);
+}
+
+
+CallDescriptor* Linkage::GetSimplifiedCDescriptor(Zone* zone,
+                                                  MachineSignature* sig) {
+  return LH::GetSimplifiedCDescriptor(zone, sig);
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/arm64/code-generator-arm64.cc b/src/compiler/arm64/code-generator-arm64.cc
new file mode 100644
index 0000000..31c53d3
--- /dev/null
+++ b/src/compiler/arm64/code-generator-arm64.cc
@@ -0,0 +1,879 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/code-generator.h"
+
+#include "src/arm64/macro-assembler-arm64.h"
+#include "src/compiler/code-generator-impl.h"
+#include "src/compiler/gap-resolver.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/scopes.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define __ masm()->
+
+
+// Adds Arm64-specific methods to convert InstructionOperands.
+class Arm64OperandConverter FINAL : public InstructionOperandConverter {
+ public:
+  Arm64OperandConverter(CodeGenerator* gen, Instruction* instr)
+      : InstructionOperandConverter(gen, instr) {}
+
+  Register InputRegister32(int index) {
+    return ToRegister(instr_->InputAt(index)).W();
+  }
+
+  Register InputRegister64(int index) { return InputRegister(index); }
+
+  Operand InputImmediate(int index) {
+    return ToImmediate(instr_->InputAt(index));
+  }
+
+  Operand InputOperand(int index) { return ToOperand(instr_->InputAt(index)); }
+
+  Operand InputOperand64(int index) { return InputOperand(index); }
+
+  Operand InputOperand32(int index) {
+    return ToOperand32(instr_->InputAt(index));
+  }
+
+  Register OutputRegister64() { return OutputRegister(); }
+
+  Register OutputRegister32() { return ToRegister(instr_->Output()).W(); }
+
+  MemOperand MemoryOperand(int* first_index) {
+    const int index = *first_index;
+    switch (AddressingModeField::decode(instr_->opcode())) {
+      case kMode_None:
+        break;
+      case kMode_MRI:
+        *first_index += 2;
+        return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
+      case kMode_MRR:
+        *first_index += 2;
+        return MemOperand(InputRegister(index + 0), InputRegister(index + 1),
+                          SXTW);
+    }
+    UNREACHABLE();
+    return MemOperand(no_reg);
+  }
+
+  MemOperand MemoryOperand() {
+    int index = 0;
+    return MemoryOperand(&index);
+  }
+
+  Operand ToOperand(InstructionOperand* op) {
+    if (op->IsRegister()) {
+      return Operand(ToRegister(op));
+    }
+    return ToImmediate(op);
+  }
+
+  Operand ToOperand32(InstructionOperand* op) {
+    if (op->IsRegister()) {
+      return Operand(ToRegister(op).W());
+    }
+    return ToImmediate(op);
+  }
+
+  Operand ToImmediate(InstructionOperand* operand) {
+    Constant constant = ToConstant(operand);
+    switch (constant.type()) {
+      case Constant::kInt32:
+        return Operand(constant.ToInt32());
+      case Constant::kInt64:
+        return Operand(constant.ToInt64());
+      case Constant::kFloat64:
+        return Operand(
+            isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
+      case Constant::kExternalReference:
+        return Operand(constant.ToExternalReference());
+      case Constant::kHeapObject:
+        return Operand(constant.ToHeapObject());
+    }
+    UNREACHABLE();
+    return Operand(-1);
+  }
+
+  MemOperand ToMemOperand(InstructionOperand* op, MacroAssembler* masm) const {
+    DCHECK(op != NULL);
+    DCHECK(!op->IsRegister());
+    DCHECK(!op->IsDoubleRegister());
+    DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+    // The linkage computes where all spill slots are located.
+    FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), 0);
+    return MemOperand(offset.from_stack_pointer() ? masm->StackPointer() : fp,
+                      offset.offset());
+  }
+};
+
+
+#define ASSEMBLE_SHIFT(asm_instr, width)                                       \
+  do {                                                                         \
+    if (instr->InputAt(1)->IsRegister()) {                                     \
+      __ asm_instr(i.OutputRegister##width(), i.InputRegister##width(0),       \
+                   i.InputRegister##width(1));                                 \
+    } else {                                                                   \
+      int64_t imm = i.InputOperand##width(1).immediate().value();              \
+      __ asm_instr(i.OutputRegister##width(), i.InputRegister##width(0), imm); \
+    }                                                                          \
+  } while (0);
+
+
+// Assembles an instruction after register allocation, producing machine code.
+void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+  Arm64OperandConverter i(this, instr);
+  InstructionCode opcode = instr->opcode();
+  switch (ArchOpcodeField::decode(opcode)) {
+    case kArchCallCodeObject: {
+      EnsureSpaceForLazyDeopt();
+      if (instr->InputAt(0)->IsImmediate()) {
+        __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
+                RelocInfo::CODE_TARGET);
+      } else {
+        Register target = i.InputRegister(0);
+        __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
+        __ Call(target);
+      }
+      AddSafepointAndDeopt(instr);
+      break;
+    }
+    case kArchCallJSFunction: {
+      EnsureSpaceForLazyDeopt();
+      Register func = i.InputRegister(0);
+      if (FLAG_debug_code) {
+        // Check the function's context matches the context argument.
+        UseScratchRegisterScope scope(masm());
+        Register temp = scope.AcquireX();
+        __ Ldr(temp, FieldMemOperand(func, JSFunction::kContextOffset));
+        __ cmp(cp, temp);
+        __ Assert(eq, kWrongFunctionContext);
+      }
+      __ Ldr(x10, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
+      __ Call(x10);
+      AddSafepointAndDeopt(instr);
+      break;
+    }
+    case kArchJmp:
+      __ B(code_->GetLabel(i.InputBlock(0)));
+      break;
+    case kArchNop:
+      // don't emit code for nops.
+      break;
+    case kArchRet:
+      AssembleReturn();
+      break;
+    case kArchTruncateDoubleToI:
+      __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
+      break;
+    case kArm64Add:
+      __ Add(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kArm64Add32:
+      if (FlagsModeField::decode(opcode) != kFlags_none) {
+        __ Adds(i.OutputRegister32(), i.InputRegister32(0),
+                i.InputOperand32(1));
+      } else {
+        __ Add(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
+      }
+      break;
+    case kArm64And:
+      __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kArm64And32:
+      __ And(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
+      break;
+    case kArm64Mul:
+      __ Mul(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+      break;
+    case kArm64Mul32:
+      __ Mul(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
+      break;
+    case kArm64Idiv:
+      __ Sdiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+      break;
+    case kArm64Idiv32:
+      __ Sdiv(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
+      break;
+    case kArm64Udiv:
+      __ Udiv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+      break;
+    case kArm64Udiv32:
+      __ Udiv(i.OutputRegister32(), i.InputRegister32(0), i.InputRegister32(1));
+      break;
+    case kArm64Imod: {
+      UseScratchRegisterScope scope(masm());
+      Register temp = scope.AcquireX();
+      __ Sdiv(temp, i.InputRegister(0), i.InputRegister(1));
+      __ Msub(i.OutputRegister(), temp, i.InputRegister(1), i.InputRegister(0));
+      break;
+    }
+    case kArm64Imod32: {
+      UseScratchRegisterScope scope(masm());
+      Register temp = scope.AcquireW();
+      __ Sdiv(temp, i.InputRegister32(0), i.InputRegister32(1));
+      __ Msub(i.OutputRegister32(), temp, i.InputRegister32(1),
+              i.InputRegister32(0));
+      break;
+    }
+    case kArm64Umod: {
+      UseScratchRegisterScope scope(masm());
+      Register temp = scope.AcquireX();
+      __ Udiv(temp, i.InputRegister(0), i.InputRegister(1));
+      __ Msub(i.OutputRegister(), temp, i.InputRegister(1), i.InputRegister(0));
+      break;
+    }
+    case kArm64Umod32: {
+      UseScratchRegisterScope scope(masm());
+      Register temp = scope.AcquireW();
+      __ Udiv(temp, i.InputRegister32(0), i.InputRegister32(1));
+      __ Msub(i.OutputRegister32(), temp, i.InputRegister32(1),
+              i.InputRegister32(0));
+      break;
+    }
+    // TODO(dcarney): use mvn instr??
+    case kArm64Not:
+      __ Orn(i.OutputRegister(), xzr, i.InputOperand(0));
+      break;
+    case kArm64Not32:
+      __ Orn(i.OutputRegister32(), wzr, i.InputOperand32(0));
+      break;
+    case kArm64Neg:
+      __ Neg(i.OutputRegister(), i.InputOperand(0));
+      break;
+    case kArm64Neg32:
+      __ Neg(i.OutputRegister32(), i.InputOperand32(0));
+      break;
+    case kArm64Or:
+      __ Orr(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kArm64Or32:
+      __ Orr(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
+      break;
+    case kArm64Xor:
+      __ Eor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kArm64Xor32:
+      __ Eor(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
+      break;
+    case kArm64Sub:
+      __ Sub(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kArm64Sub32:
+      if (FlagsModeField::decode(opcode) != kFlags_none) {
+        __ Subs(i.OutputRegister32(), i.InputRegister32(0),
+                i.InputOperand32(1));
+      } else {
+        __ Sub(i.OutputRegister32(), i.InputRegister32(0), i.InputOperand32(1));
+      }
+      break;
+    case kArm64Shl:
+      ASSEMBLE_SHIFT(Lsl, 64);
+      break;
+    case kArm64Shl32:
+      ASSEMBLE_SHIFT(Lsl, 32);
+      break;
+    case kArm64Shr:
+      ASSEMBLE_SHIFT(Lsr, 64);
+      break;
+    case kArm64Shr32:
+      ASSEMBLE_SHIFT(Lsr, 32);
+      break;
+    case kArm64Sar:
+      ASSEMBLE_SHIFT(Asr, 64);
+      break;
+    case kArm64Sar32:
+      ASSEMBLE_SHIFT(Asr, 32);
+      break;
+    case kArm64Ror:
+      ASSEMBLE_SHIFT(Ror, 64);
+      break;
+    case kArm64Ror32:
+      ASSEMBLE_SHIFT(Ror, 32);
+      break;
+    case kArm64Mov32:
+      __ Mov(i.OutputRegister32(), i.InputRegister32(0));
+      break;
+    case kArm64Sxtw:
+      __ Sxtw(i.OutputRegister(), i.InputRegister32(0));
+      break;
+    case kArm64Claim: {
+      int words = MiscField::decode(instr->opcode());
+      __ Claim(words);
+      break;
+    }
+    case kArm64Poke: {
+      int slot = MiscField::decode(instr->opcode());
+      Operand operand(slot * kPointerSize);
+      __ Poke(i.InputRegister(0), operand);
+      break;
+    }
+    case kArm64PokePairZero: {
+      // TODO(dcarney): test slot offset and register order.
+      int slot = MiscField::decode(instr->opcode()) - 1;
+      __ PokePair(i.InputRegister(0), xzr, slot * kPointerSize);
+      break;
+    }
+    case kArm64PokePair: {
+      int slot = MiscField::decode(instr->opcode()) - 1;
+      __ PokePair(i.InputRegister(1), i.InputRegister(0), slot * kPointerSize);
+      break;
+    }
+    case kArm64Cmp:
+      __ Cmp(i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kArm64Cmp32:
+      __ Cmp(i.InputRegister32(0), i.InputOperand32(1));
+      break;
+    case kArm64Cmn:
+      __ Cmn(i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kArm64Cmn32:
+      __ Cmn(i.InputRegister32(0), i.InputOperand32(1));
+      break;
+    case kArm64Tst:
+      __ Tst(i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kArm64Tst32:
+      __ Tst(i.InputRegister32(0), i.InputOperand32(1));
+      break;
+    case kArm64Float64Cmp:
+      __ Fcmp(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+      break;
+    case kArm64Float64Add:
+      __ Fadd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+              i.InputDoubleRegister(1));
+      break;
+    case kArm64Float64Sub:
+      __ Fsub(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+              i.InputDoubleRegister(1));
+      break;
+    case kArm64Float64Mul:
+      __ Fmul(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+              i.InputDoubleRegister(1));
+      break;
+    case kArm64Float64Div:
+      __ Fdiv(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+              i.InputDoubleRegister(1));
+      break;
+    case kArm64Float64Mod: {
+      // TODO(dcarney): implement directly. See note in lithium-codegen-arm64.cc
+      FrameScope scope(masm(), StackFrame::MANUAL);
+      DCHECK(d0.is(i.InputDoubleRegister(0)));
+      DCHECK(d1.is(i.InputDoubleRegister(1)));
+      DCHECK(d0.is(i.OutputDoubleRegister()));
+      // TODO(dcarney): make sure this saves all relevant registers.
+      __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
+                       0, 2);
+      break;
+    }
+    case kArm64Float64Sqrt:
+      __ Fsqrt(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+      break;
+    case kArm64Float64ToInt32:
+      __ Fcvtzs(i.OutputRegister32(), i.InputDoubleRegister(0));
+      break;
+    case kArm64Float64ToUint32:
+      __ Fcvtzu(i.OutputRegister32(), i.InputDoubleRegister(0));
+      break;
+    case kArm64Int32ToFloat64:
+      __ Scvtf(i.OutputDoubleRegister(), i.InputRegister32(0));
+      break;
+    case kArm64Uint32ToFloat64:
+      __ Ucvtf(i.OutputDoubleRegister(), i.InputRegister32(0));
+      break;
+    case kArm64Ldrb:
+      __ Ldrb(i.OutputRegister(), i.MemoryOperand());
+      break;
+    case kArm64Ldrsb:
+      __ Ldrsb(i.OutputRegister(), i.MemoryOperand());
+      break;
+    case kArm64Strb:
+      __ Strb(i.InputRegister(2), i.MemoryOperand());
+      break;
+    case kArm64Ldrh:
+      __ Ldrh(i.OutputRegister(), i.MemoryOperand());
+      break;
+    case kArm64Ldrsh:
+      __ Ldrsh(i.OutputRegister(), i.MemoryOperand());
+      break;
+    case kArm64Strh:
+      __ Strh(i.InputRegister(2), i.MemoryOperand());
+      break;
+    case kArm64LdrW:
+      __ Ldr(i.OutputRegister32(), i.MemoryOperand());
+      break;
+    case kArm64StrW:
+      __ Str(i.InputRegister32(2), i.MemoryOperand());
+      break;
+    case kArm64Ldr:
+      __ Ldr(i.OutputRegister(), i.MemoryOperand());
+      break;
+    case kArm64Str:
+      __ Str(i.InputRegister(2), i.MemoryOperand());
+      break;
+    case kArm64LdrS: {
+      UseScratchRegisterScope scope(masm());
+      FPRegister scratch = scope.AcquireS();
+      __ Ldr(scratch, i.MemoryOperand());
+      __ Fcvt(i.OutputDoubleRegister(), scratch);
+      break;
+    }
+    case kArm64StrS: {
+      UseScratchRegisterScope scope(masm());
+      FPRegister scratch = scope.AcquireS();
+      __ Fcvt(scratch, i.InputDoubleRegister(2));
+      __ Str(scratch, i.MemoryOperand());
+      break;
+    }
+    case kArm64LdrD:
+      __ Ldr(i.OutputDoubleRegister(), i.MemoryOperand());
+      break;
+    case kArm64StrD:
+      __ Str(i.InputDoubleRegister(2), i.MemoryOperand());
+      break;
+    case kArm64StoreWriteBarrier: {
+      Register object = i.InputRegister(0);
+      Register index = i.InputRegister(1);
+      Register value = i.InputRegister(2);
+      __ Add(index, object, Operand(index, SXTW));
+      __ Str(value, MemOperand(index));
+      SaveFPRegsMode mode = code_->frame()->DidAllocateDoubleRegisters()
+                                ? kSaveFPRegs
+                                : kDontSaveFPRegs;
+      // TODO(dcarney): we shouldn't test write barriers from c calls.
+      LinkRegisterStatus lr_status = kLRHasNotBeenSaved;
+      UseScratchRegisterScope scope(masm());
+      Register temp = no_reg;
+      if (csp.is(masm()->StackPointer())) {
+        temp = scope.AcquireX();
+        lr_status = kLRHasBeenSaved;
+        __ Push(lr, temp);  // Need to push a pair
+      }
+      __ RecordWrite(object, index, value, lr_status, mode);
+      if (csp.is(masm()->StackPointer())) {
+        __ Pop(temp, lr);
+      }
+      break;
+    }
+  }
+}
+
+
+// Assemble branches after this instruction.
+void CodeGenerator::AssembleArchBranch(Instruction* instr,
+                                       FlagsCondition condition) {
+  Arm64OperandConverter i(this, instr);
+  Label done;
+
+  // Emit a branch. The true and false targets are always the last two inputs
+  // to the instruction.
+  BasicBlock* tblock = i.InputBlock(instr->InputCount() - 2);
+  BasicBlock* fblock = i.InputBlock(instr->InputCount() - 1);
+  bool fallthru = IsNextInAssemblyOrder(fblock);
+  Label* tlabel = code()->GetLabel(tblock);
+  Label* flabel = fallthru ? &done : code()->GetLabel(fblock);
+  switch (condition) {
+    case kUnorderedEqual:
+      __ B(vs, flabel);
+    // Fall through.
+    case kEqual:
+      __ B(eq, tlabel);
+      break;
+    case kUnorderedNotEqual:
+      __ B(vs, tlabel);
+    // Fall through.
+    case kNotEqual:
+      __ B(ne, tlabel);
+      break;
+    case kSignedLessThan:
+      __ B(lt, tlabel);
+      break;
+    case kSignedGreaterThanOrEqual:
+      __ B(ge, tlabel);
+      break;
+    case kSignedLessThanOrEqual:
+      __ B(le, tlabel);
+      break;
+    case kSignedGreaterThan:
+      __ B(gt, tlabel);
+      break;
+    case kUnorderedLessThan:
+      __ B(vs, flabel);
+    // Fall through.
+    case kUnsignedLessThan:
+      __ B(lo, tlabel);
+      break;
+    case kUnorderedGreaterThanOrEqual:
+      __ B(vs, tlabel);
+    // Fall through.
+    case kUnsignedGreaterThanOrEqual:
+      __ B(hs, tlabel);
+      break;
+    case kUnorderedLessThanOrEqual:
+      __ B(vs, flabel);
+    // Fall through.
+    case kUnsignedLessThanOrEqual:
+      __ B(ls, tlabel);
+      break;
+    case kUnorderedGreaterThan:
+      __ B(vs, tlabel);
+    // Fall through.
+    case kUnsignedGreaterThan:
+      __ B(hi, tlabel);
+      break;
+    case kOverflow:
+      __ B(vs, tlabel);
+      break;
+    case kNotOverflow:
+      __ B(vc, tlabel);
+      break;
+  }
+  if (!fallthru) __ B(flabel);  // no fallthru to flabel.
+  __ Bind(&done);
+}
+
+
+// Assemble boolean materializations after this instruction.
+void CodeGenerator::AssembleArchBoolean(Instruction* instr,
+                                        FlagsCondition condition) {
+  Arm64OperandConverter i(this, instr);
+  Label done;
+
+  // Materialize a full 64-bit 1 or 0 value. The result register is always the
+  // last output of the instruction.
+  Label check;
+  DCHECK_NE(0, instr->OutputCount());
+  Register reg = i.OutputRegister(instr->OutputCount() - 1);
+  Condition cc = nv;
+  switch (condition) {
+    case kUnorderedEqual:
+      __ B(vc, &check);
+      __ Mov(reg, 0);
+      __ B(&done);
+    // Fall through.
+    case kEqual:
+      cc = eq;
+      break;
+    case kUnorderedNotEqual:
+      __ B(vc, &check);
+      __ Mov(reg, 1);
+      __ B(&done);
+    // Fall through.
+    case kNotEqual:
+      cc = ne;
+      break;
+    case kSignedLessThan:
+      cc = lt;
+      break;
+    case kSignedGreaterThanOrEqual:
+      cc = ge;
+      break;
+    case kSignedLessThanOrEqual:
+      cc = le;
+      break;
+    case kSignedGreaterThan:
+      cc = gt;
+      break;
+    case kUnorderedLessThan:
+      __ B(vc, &check);
+      __ Mov(reg, 0);
+      __ B(&done);
+    // Fall through.
+    case kUnsignedLessThan:
+      cc = lo;
+      break;
+    case kUnorderedGreaterThanOrEqual:
+      __ B(vc, &check);
+      __ Mov(reg, 1);
+      __ B(&done);
+    // Fall through.
+    case kUnsignedGreaterThanOrEqual:
+      cc = hs;
+      break;
+    case kUnorderedLessThanOrEqual:
+      __ B(vc, &check);
+      __ Mov(reg, 0);
+      __ B(&done);
+    // Fall through.
+    case kUnsignedLessThanOrEqual:
+      cc = ls;
+      break;
+    case kUnorderedGreaterThan:
+      __ B(vc, &check);
+      __ Mov(reg, 1);
+      __ B(&done);
+    // Fall through.
+    case kUnsignedGreaterThan:
+      cc = hi;
+      break;
+    case kOverflow:
+      cc = vs;
+      break;
+    case kNotOverflow:
+      cc = vc;
+      break;
+  }
+  __ bind(&check);
+  __ Cset(reg, cc);
+  __ Bind(&done);
+}
+
+
+void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
+  Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
+      isolate(), deoptimization_id, Deoptimizer::LAZY);
+  __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+}
+
+
+// TODO(dcarney): increase stack slots in frame once before first use.
+static int AlignedStackSlots(int stack_slots) {
+  if (stack_slots & 1) stack_slots++;
+  return stack_slots;
+}
+
+
+void CodeGenerator::AssemblePrologue() {
+  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+  if (descriptor->kind() == CallDescriptor::kCallAddress) {
+    __ SetStackPointer(csp);
+    __ Push(lr, fp);
+    __ Mov(fp, csp);
+    // TODO(dcarney): correct callee saved registers.
+    __ PushCalleeSavedRegisters();
+    frame()->SetRegisterSaveAreaSize(20 * kPointerSize);
+  } else if (descriptor->IsJSFunctionCall()) {
+    CompilationInfo* info = linkage()->info();
+    __ SetStackPointer(jssp);
+    __ Prologue(info->IsCodePreAgingActive());
+    frame()->SetRegisterSaveAreaSize(
+        StandardFrameConstants::kFixedFrameSizeFromFp);
+
+    // Sloppy mode functions and builtins need to replace the receiver with the
+    // global proxy when called as functions (without an explicit receiver
+    // object).
+    // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
+    if (info->strict_mode() == SLOPPY && !info->is_native()) {
+      Label ok;
+      // +2 for return address and saved frame pointer.
+      int receiver_slot = info->scope()->num_parameters() + 2;
+      __ Ldr(x10, MemOperand(fp, receiver_slot * kXRegSize));
+      __ JumpIfNotRoot(x10, Heap::kUndefinedValueRootIndex, &ok);
+      __ Ldr(x10, GlobalObjectMemOperand());
+      __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kGlobalProxyOffset));
+      __ Str(x10, MemOperand(fp, receiver_slot * kXRegSize));
+      __ Bind(&ok);
+    }
+
+  } else {
+    __ SetStackPointer(jssp);
+    __ StubPrologue();
+    frame()->SetRegisterSaveAreaSize(
+        StandardFrameConstants::kFixedFrameSizeFromFp);
+  }
+  int stack_slots = frame()->GetSpillSlotCount();
+  if (stack_slots > 0) {
+    Register sp = __ StackPointer();
+    if (!sp.Is(csp)) {
+      __ Sub(sp, sp, stack_slots * kPointerSize);
+    }
+    __ Sub(csp, csp, AlignedStackSlots(stack_slots) * kPointerSize);
+  }
+}
+
+
+void CodeGenerator::AssembleReturn() {
+  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+  if (descriptor->kind() == CallDescriptor::kCallAddress) {
+    if (frame()->GetRegisterSaveAreaSize() > 0) {
+      // Remove this frame's spill slots first.
+      int stack_slots = frame()->GetSpillSlotCount();
+      if (stack_slots > 0) {
+        __ Add(csp, csp, AlignedStackSlots(stack_slots) * kPointerSize);
+      }
+      // Restore registers.
+      // TODO(dcarney): correct callee saved registers.
+      __ PopCalleeSavedRegisters();
+    }
+    __ Mov(csp, fp);
+    __ Pop(fp, lr);
+    __ Ret();
+  } else {
+    __ Mov(jssp, fp);
+    __ Pop(fp, lr);
+    int pop_count = descriptor->IsJSFunctionCall()
+                        ? static_cast<int>(descriptor->JSParameterCount())
+                        : 0;
+    __ Drop(pop_count);
+    __ Ret();
+  }
+}
+
+
+void CodeGenerator::AssembleMove(InstructionOperand* source,
+                                 InstructionOperand* destination) {
+  Arm64OperandConverter g(this, NULL);
+  // Dispatch on the source and destination operand kinds.  Not all
+  // combinations are possible.
+  if (source->IsRegister()) {
+    DCHECK(destination->IsRegister() || destination->IsStackSlot());
+    Register src = g.ToRegister(source);
+    if (destination->IsRegister()) {
+      __ Mov(g.ToRegister(destination), src);
+    } else {
+      __ Str(src, g.ToMemOperand(destination, masm()));
+    }
+  } else if (source->IsStackSlot()) {
+    MemOperand src = g.ToMemOperand(source, masm());
+    DCHECK(destination->IsRegister() || destination->IsStackSlot());
+    if (destination->IsRegister()) {
+      __ Ldr(g.ToRegister(destination), src);
+    } else {
+      UseScratchRegisterScope scope(masm());
+      Register temp = scope.AcquireX();
+      __ Ldr(temp, src);
+      __ Str(temp, g.ToMemOperand(destination, masm()));
+    }
+  } else if (source->IsConstant()) {
+    ConstantOperand* constant_source = ConstantOperand::cast(source);
+    if (destination->IsRegister() || destination->IsStackSlot()) {
+      UseScratchRegisterScope scope(masm());
+      Register dst = destination->IsRegister() ? g.ToRegister(destination)
+                                               : scope.AcquireX();
+      Constant src = g.ToConstant(source);
+      if (src.type() == Constant::kHeapObject) {
+        __ LoadObject(dst, src.ToHeapObject());
+      } else {
+        __ Mov(dst, g.ToImmediate(source));
+      }
+      if (destination->IsStackSlot()) {
+        __ Str(dst, g.ToMemOperand(destination, masm()));
+      }
+    } else if (destination->IsDoubleRegister()) {
+      FPRegister result = g.ToDoubleRegister(destination);
+      __ Fmov(result, g.ToDouble(constant_source));
+    } else {
+      DCHECK(destination->IsDoubleStackSlot());
+      UseScratchRegisterScope scope(masm());
+      FPRegister temp = scope.AcquireD();
+      __ Fmov(temp, g.ToDouble(constant_source));
+      __ Str(temp, g.ToMemOperand(destination, masm()));
+    }
+  } else if (source->IsDoubleRegister()) {
+    FPRegister src = g.ToDoubleRegister(source);
+    if (destination->IsDoubleRegister()) {
+      FPRegister dst = g.ToDoubleRegister(destination);
+      __ Fmov(dst, src);
+    } else {
+      DCHECK(destination->IsDoubleStackSlot());
+      __ Str(src, g.ToMemOperand(destination, masm()));
+    }
+  } else if (source->IsDoubleStackSlot()) {
+    DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
+    MemOperand src = g.ToMemOperand(source, masm());
+    if (destination->IsDoubleRegister()) {
+      __ Ldr(g.ToDoubleRegister(destination), src);
+    } else {
+      UseScratchRegisterScope scope(masm());
+      FPRegister temp = scope.AcquireD();
+      __ Ldr(temp, src);
+      __ Str(temp, g.ToMemOperand(destination, masm()));
+    }
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
+void CodeGenerator::AssembleSwap(InstructionOperand* source,
+                                 InstructionOperand* destination) {
+  Arm64OperandConverter g(this, NULL);
+  // Dispatch on the source and destination operand kinds.  Not all
+  // combinations are possible.
+  if (source->IsRegister()) {
+    // Register-register.
+    UseScratchRegisterScope scope(masm());
+    Register temp = scope.AcquireX();
+    Register src = g.ToRegister(source);
+    if (destination->IsRegister()) {
+      Register dst = g.ToRegister(destination);
+      __ Mov(temp, src);
+      __ Mov(src, dst);
+      __ Mov(dst, temp);
+    } else {
+      DCHECK(destination->IsStackSlot());
+      MemOperand dst = g.ToMemOperand(destination, masm());
+      __ Mov(temp, src);
+      __ Ldr(src, dst);
+      __ Str(temp, dst);
+    }
+  } else if (source->IsStackSlot() || source->IsDoubleStackSlot()) {
+    UseScratchRegisterScope scope(masm());
+    CPURegister temp_0 = scope.AcquireX();
+    CPURegister temp_1 = scope.AcquireX();
+    MemOperand src = g.ToMemOperand(source, masm());
+    MemOperand dst = g.ToMemOperand(destination, masm());
+    __ Ldr(temp_0, src);
+    __ Ldr(temp_1, dst);
+    __ Str(temp_0, dst);
+    __ Str(temp_1, src);
+  } else if (source->IsDoubleRegister()) {
+    UseScratchRegisterScope scope(masm());
+    FPRegister temp = scope.AcquireD();
+    FPRegister src = g.ToDoubleRegister(source);
+    if (destination->IsDoubleRegister()) {
+      FPRegister dst = g.ToDoubleRegister(destination);
+      __ Fmov(temp, src);
+      __ Fmov(src, dst);
+      __ Fmov(dst, temp);
+    } else {
+      DCHECK(destination->IsDoubleStackSlot());
+      MemOperand dst = g.ToMemOperand(destination, masm());
+      __ Fmov(temp, src);
+      __ Ldr(src, dst);
+      __ Str(temp, dst);
+    }
+  } else {
+    // No other combinations are possible.
+    UNREACHABLE();
+  }
+}
+
+
+void CodeGenerator::AddNopForSmiCodeInlining() { __ movz(xzr, 0); }
+
+
+void CodeGenerator::EnsureSpaceForLazyDeopt() {
+  int space_needed = Deoptimizer::patch_size();
+  if (!linkage()->info()->IsStub()) {
+    // Ensure that we have enough space after the previous lazy-bailout
+    // instruction for patching the code here.
+    intptr_t current_pc = masm()->pc_offset();
+
+    if (current_pc < (last_lazy_deopt_pc_ + space_needed)) {
+      intptr_t padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+      DCHECK((padding_size % kInstructionSize) == 0);
+      InstructionAccurateScope instruction_accurate(
+          masm(), padding_size / kInstructionSize);
+
+      while (padding_size > 0) {
+        __ nop();
+        padding_size -= kInstructionSize;
+      }
+    }
+  }
+  MarkLazyDeoptSite();
+}
+
+#undef __
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/arm64/instruction-codes-arm64.h b/src/compiler/arm64/instruction-codes-arm64.h
new file mode 100644
index 0000000..0a9a2ed
--- /dev/null
+++ b/src/compiler/arm64/instruction-codes-arm64.h
@@ -0,0 +1,108 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_ARM64_INSTRUCTION_CODES_ARM64_H_
+#define V8_COMPILER_ARM64_INSTRUCTION_CODES_ARM64_H_
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// ARM64-specific opcodes that specify which assembly sequence to emit.
+// Most opcodes specify a single instruction.
+#define TARGET_ARCH_OPCODE_LIST(V) \
+  V(Arm64Add)                      \
+  V(Arm64Add32)                    \
+  V(Arm64And)                      \
+  V(Arm64And32)                    \
+  V(Arm64Cmp)                      \
+  V(Arm64Cmp32)                    \
+  V(Arm64Cmn)                      \
+  V(Arm64Cmn32)                    \
+  V(Arm64Tst)                      \
+  V(Arm64Tst32)                    \
+  V(Arm64Or)                       \
+  V(Arm64Or32)                     \
+  V(Arm64Xor)                      \
+  V(Arm64Xor32)                    \
+  V(Arm64Sub)                      \
+  V(Arm64Sub32)                    \
+  V(Arm64Mul)                      \
+  V(Arm64Mul32)                    \
+  V(Arm64Idiv)                     \
+  V(Arm64Idiv32)                   \
+  V(Arm64Udiv)                     \
+  V(Arm64Udiv32)                   \
+  V(Arm64Imod)                     \
+  V(Arm64Imod32)                   \
+  V(Arm64Umod)                     \
+  V(Arm64Umod32)                   \
+  V(Arm64Not)                      \
+  V(Arm64Not32)                    \
+  V(Arm64Neg)                      \
+  V(Arm64Neg32)                    \
+  V(Arm64Shl)                      \
+  V(Arm64Shl32)                    \
+  V(Arm64Shr)                      \
+  V(Arm64Shr32)                    \
+  V(Arm64Sar)                      \
+  V(Arm64Sar32)                    \
+  V(Arm64Ror)                      \
+  V(Arm64Ror32)                    \
+  V(Arm64Mov32)                    \
+  V(Arm64Sxtw)                     \
+  V(Arm64Claim)                    \
+  V(Arm64Poke)                     \
+  V(Arm64PokePairZero)             \
+  V(Arm64PokePair)                 \
+  V(Arm64Float64Cmp)               \
+  V(Arm64Float64Add)               \
+  V(Arm64Float64Sub)               \
+  V(Arm64Float64Mul)               \
+  V(Arm64Float64Div)               \
+  V(Arm64Float64Mod)               \
+  V(Arm64Float64Sqrt)              \
+  V(Arm64Float64ToInt32)           \
+  V(Arm64Float64ToUint32)          \
+  V(Arm64Int32ToFloat64)           \
+  V(Arm64Uint32ToFloat64)          \
+  V(Arm64LdrS)                     \
+  V(Arm64StrS)                     \
+  V(Arm64LdrD)                     \
+  V(Arm64StrD)                     \
+  V(Arm64Ldrb)                     \
+  V(Arm64Ldrsb)                    \
+  V(Arm64Strb)                     \
+  V(Arm64Ldrh)                     \
+  V(Arm64Ldrsh)                    \
+  V(Arm64Strh)                     \
+  V(Arm64LdrW)                     \
+  V(Arm64StrW)                     \
+  V(Arm64Ldr)                      \
+  V(Arm64Str)                      \
+  V(Arm64StoreWriteBarrier)
+
+
+// Addressing modes represent the "shape" of inputs to an instruction.
+// Many instructions support multiple addressing modes. Addressing modes
+// are encoded into the InstructionCode of the instruction and tell the
+// code generator after register allocation which assembler method to call.
+//
+// We use the following local notation for addressing modes:
+//
+// R = register
+// O = register or stack slot
+// D = double register
+// I = immediate (handle, external, int32)
+// MRI = [register + immediate]
+// MRR = [register + register]
+#define TARGET_ADDRESSING_MODE_LIST(V) \
+  V(MRI) /* [%r0 + K] */               \
+  V(MRR) /* [%r0 + %r1] */
+
+}  // namespace internal
+}  // namespace compiler
+}  // namespace v8
+
+#endif  // V8_COMPILER_ARM64_INSTRUCTION_CODES_ARM64_H_
diff --git a/src/compiler/arm64/instruction-selector-arm64-unittest.cc b/src/compiler/arm64/instruction-selector-arm64-unittest.cc
new file mode 100644
index 0000000..b5562c2
--- /dev/null
+++ b/src/compiler/arm64/instruction-selector-arm64-unittest.cc
@@ -0,0 +1,1121 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <list>
+
+#include "src/compiler/instruction-selector-unittest.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+
+typedef RawMachineAssembler::Label MLabel;
+
+template <typename T>
+struct MachInst {
+  T constructor;
+  const char* constructor_name;
+  ArchOpcode arch_opcode;
+  MachineType machine_type;
+};
+
+typedef MachInst<Node* (RawMachineAssembler::*)(Node*)> MachInst1;
+typedef MachInst<Node* (RawMachineAssembler::*)(Node*, Node*)> MachInst2;
+
+
+template <typename T>
+std::ostream& operator<<(std::ostream& os, const MachInst<T>& mi) {
+  return os << mi.constructor_name;
+}
+
+
+// Helper to build Int32Constant or Int64Constant depending on the given
+// machine type.
+Node* BuildConstant(InstructionSelectorTest::StreamBuilder& m, MachineType type,
+                    int64_t value) {
+  switch (type) {
+    case kMachInt32:
+      return m.Int32Constant(value);
+      break;
+
+    case kMachInt64:
+      return m.Int64Constant(value);
+      break;
+
+    default:
+      UNIMPLEMENTED();
+  }
+  return NULL;
+}
+
+
+// ARM64 logical instructions.
+static const MachInst2 kLogicalInstructions[] = {
+    {&RawMachineAssembler::Word32And, "Word32And", kArm64And32, kMachInt32},
+    {&RawMachineAssembler::Word64And, "Word64And", kArm64And, kMachInt64},
+    {&RawMachineAssembler::Word32Or, "Word32Or", kArm64Or32, kMachInt32},
+    {&RawMachineAssembler::Word64Or, "Word64Or", kArm64Or, kMachInt64},
+    {&RawMachineAssembler::Word32Xor, "Word32Xor", kArm64Xor32, kMachInt32},
+    {&RawMachineAssembler::Word64Xor, "Word64Xor", kArm64Xor, kMachInt64}};
+
+
+// ARM64 logical immediates: contiguous set bits, rotated about a power of two
+// sized block. The block is then duplicated across the word. Below is a random
+// subset of the 32-bit immediates.
+static const uint32_t kLogicalImmediates[] = {
+    0x00000002, 0x00000003, 0x00000070, 0x00000080, 0x00000100, 0x000001c0,
+    0x00000300, 0x000007e0, 0x00003ffc, 0x00007fc0, 0x0003c000, 0x0003f000,
+    0x0003ffc0, 0x0003fff8, 0x0007ff00, 0x0007ffe0, 0x000e0000, 0x001e0000,
+    0x001ffffc, 0x003f0000, 0x003f8000, 0x00780000, 0x007fc000, 0x00ff0000,
+    0x01800000, 0x01800180, 0x01f801f8, 0x03fe0000, 0x03ffffc0, 0x03fffffc,
+    0x06000000, 0x07fc0000, 0x07ffc000, 0x07ffffc0, 0x07ffffe0, 0x0ffe0ffe,
+    0x0ffff800, 0x0ffffff0, 0x0fffffff, 0x18001800, 0x1f001f00, 0x1f801f80,
+    0x30303030, 0x3ff03ff0, 0x3ff83ff8, 0x3fff0000, 0x3fff8000, 0x3fffffc0,
+    0x70007000, 0x7f7f7f7f, 0x7fc00000, 0x7fffffc0, 0x8000001f, 0x800001ff,
+    0x81818181, 0x9fff9fff, 0xc00007ff, 0xc0ffffff, 0xdddddddd, 0xe00001ff,
+    0xe00003ff, 0xe007ffff, 0xefffefff, 0xf000003f, 0xf001f001, 0xf3fff3ff,
+    0xf800001f, 0xf80fffff, 0xf87ff87f, 0xfbfbfbfb, 0xfc00001f, 0xfc0000ff,
+    0xfc0001ff, 0xfc03fc03, 0xfe0001ff, 0xff000001, 0xff03ff03, 0xff800000,
+    0xff800fff, 0xff801fff, 0xff87ffff, 0xffc0003f, 0xffc007ff, 0xffcfffcf,
+    0xffe00003, 0xffe1ffff, 0xfff0001f, 0xfff07fff, 0xfff80007, 0xfff87fff,
+    0xfffc00ff, 0xfffe07ff, 0xffff00ff, 0xffffc001, 0xfffff007, 0xfffff3ff,
+    0xfffff807, 0xfffff9ff, 0xfffffc0f, 0xfffffeff};
+
+
+// ARM64 arithmetic instructions.
+static const MachInst2 kAddSubInstructions[] = {
+    {&RawMachineAssembler::Int32Add, "Int32Add", kArm64Add32, kMachInt32},
+    {&RawMachineAssembler::Int64Add, "Int64Add", kArm64Add, kMachInt64},
+    {&RawMachineAssembler::Int32Sub, "Int32Sub", kArm64Sub32, kMachInt32},
+    {&RawMachineAssembler::Int64Sub, "Int64Sub", kArm64Sub, kMachInt64}};
+
+
+// ARM64 Add/Sub immediates: 12-bit immediate optionally shifted by 12.
+// Below is a combination of a random subset and some edge values.
+static const int32_t kAddSubImmediates[] = {
+    0,        1,        69,       493,      599,      701,      719,
+    768,      818,      842,      945,      1246,     1286,     1429,
+    1669,     2171,     2179,     2182,     2254,     2334,     2338,
+    2343,     2396,     2449,     2610,     2732,     2855,     2876,
+    2944,     3377,     3458,     3475,     3476,     3540,     3574,
+    3601,     3813,     3871,     3917,     4095,     4096,     16384,
+    364544,   462848,   970752,   1523712,  1863680,  2363392,  3219456,
+    3280896,  4247552,  4526080,  4575232,  4960256,  5505024,  5894144,
+    6004736,  6193152,  6385664,  6795264,  7114752,  7233536,  7348224,
+    7499776,  7573504,  7729152,  8634368,  8937472,  9465856,  10354688,
+    10682368, 11059200, 11460608, 13168640, 13176832, 14336000, 15028224,
+    15597568, 15892480, 16773120};
+
+
+// ARM64 flag setting data processing instructions.
+static const MachInst2 kDPFlagSetInstructions[] = {
+    {&RawMachineAssembler::Word32And, "Word32And", kArm64Tst32, kMachInt32},
+    {&RawMachineAssembler::Int32Add, "Int32Add", kArm64Cmn32, kMachInt32},
+    {&RawMachineAssembler::Int32Sub, "Int32Sub", kArm64Cmp32, kMachInt32}};
+
+
+// ARM64 arithmetic with overflow instructions.
+static const MachInst2 kOvfAddSubInstructions[] = {
+    {&RawMachineAssembler::Int32AddWithOverflow, "Int32AddWithOverflow",
+     kArm64Add32, kMachInt32},
+    {&RawMachineAssembler::Int32SubWithOverflow, "Int32SubWithOverflow",
+     kArm64Sub32, kMachInt32}};
+
+
+// ARM64 shift instructions.
+static const MachInst2 kShiftInstructions[] = {
+    {&RawMachineAssembler::Word32Shl, "Word32Shl", kArm64Shl32, kMachInt32},
+    {&RawMachineAssembler::Word64Shl, "Word64Shl", kArm64Shl, kMachInt64},
+    {&RawMachineAssembler::Word32Shr, "Word32Shr", kArm64Shr32, kMachInt32},
+    {&RawMachineAssembler::Word64Shr, "Word64Shr", kArm64Shr, kMachInt64},
+    {&RawMachineAssembler::Word32Sar, "Word32Sar", kArm64Sar32, kMachInt32},
+    {&RawMachineAssembler::Word64Sar, "Word64Sar", kArm64Sar, kMachInt64},
+    {&RawMachineAssembler::Word32Ror, "Word32Ror", kArm64Ror32, kMachInt32},
+    {&RawMachineAssembler::Word64Ror, "Word64Ror", kArm64Ror, kMachInt64}};
+
+
+// ARM64 Mul/Div instructions.
+static const MachInst2 kMulDivInstructions[] = {
+    {&RawMachineAssembler::Int32Mul, "Int32Mul", kArm64Mul32, kMachInt32},
+    {&RawMachineAssembler::Int64Mul, "Int64Mul", kArm64Mul, kMachInt64},
+    {&RawMachineAssembler::Int32Div, "Int32Div", kArm64Idiv32, kMachInt32},
+    {&RawMachineAssembler::Int64Div, "Int64Div", kArm64Idiv, kMachInt64},
+    {&RawMachineAssembler::Int32UDiv, "Int32UDiv", kArm64Udiv32, kMachInt32},
+    {&RawMachineAssembler::Int64UDiv, "Int64UDiv", kArm64Udiv, kMachInt64}};
+
+
+// ARM64 FP arithmetic instructions.
+static const MachInst2 kFPArithInstructions[] = {
+    {&RawMachineAssembler::Float64Add, "Float64Add", kArm64Float64Add,
+     kMachFloat64},
+    {&RawMachineAssembler::Float64Sub, "Float64Sub", kArm64Float64Sub,
+     kMachFloat64},
+    {&RawMachineAssembler::Float64Mul, "Float64Mul", kArm64Float64Mul,
+     kMachFloat64},
+    {&RawMachineAssembler::Float64Div, "Float64Div", kArm64Float64Div,
+     kMachFloat64}};
+
+
+struct FPCmp {
+  MachInst2 mi;
+  FlagsCondition cond;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const FPCmp& cmp) {
+  return os << cmp.mi;
+}
+
+
+// ARM64 FP comparison instructions.
+static const FPCmp kFPCmpInstructions[] = {
+    {{&RawMachineAssembler::Float64Equal, "Float64Equal", kArm64Float64Cmp,
+      kMachFloat64},
+     kUnorderedEqual},
+    {{&RawMachineAssembler::Float64LessThan, "Float64LessThan",
+      kArm64Float64Cmp, kMachFloat64},
+     kUnorderedLessThan},
+    {{&RawMachineAssembler::Float64LessThanOrEqual, "Float64LessThanOrEqual",
+      kArm64Float64Cmp, kMachFloat64},
+     kUnorderedLessThanOrEqual}};
+
+
+struct Conversion {
+  // The machine_type field in MachInst1 represents the destination type.
+  MachInst1 mi;
+  MachineType src_machine_type;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const Conversion& conv) {
+  return os << conv.mi;
+}
+
+
+// ARM64 type conversion instructions.
+static const Conversion kConversionInstructions[] = {
+    {{&RawMachineAssembler::ChangeInt32ToInt64, "ChangeInt32ToInt64",
+      kArm64Sxtw, kMachInt64},
+     kMachInt32},
+    {{&RawMachineAssembler::ChangeUint32ToUint64, "ChangeUint32ToUint64",
+      kArm64Mov32, kMachUint64},
+     kMachUint32},
+    {{&RawMachineAssembler::TruncateInt64ToInt32, "TruncateInt64ToInt32",
+      kArm64Mov32, kMachInt32},
+     kMachInt64},
+    {{&RawMachineAssembler::ChangeInt32ToFloat64, "ChangeInt32ToFloat64",
+      kArm64Int32ToFloat64, kMachFloat64},
+     kMachInt32},
+    {{&RawMachineAssembler::ChangeUint32ToFloat64, "ChangeUint32ToFloat64",
+      kArm64Uint32ToFloat64, kMachFloat64},
+     kMachUint32},
+    {{&RawMachineAssembler::ChangeFloat64ToInt32, "ChangeFloat64ToInt32",
+      kArm64Float64ToInt32, kMachInt32},
+     kMachFloat64},
+    {{&RawMachineAssembler::ChangeFloat64ToUint32, "ChangeFloat64ToUint32",
+      kArm64Float64ToUint32, kMachUint32},
+     kMachFloat64}};
+
+}  // namespace
+
+
+// -----------------------------------------------------------------------------
+// Logical instructions.
+
+
+typedef InstructionSelectorTestWithParam<MachInst2>
+    InstructionSelectorLogicalTest;
+
+
+TEST_P(InstructionSelectorLogicalTest, Parameter) {
+  const MachInst2 dpi = GetParam();
+  const MachineType type = dpi.machine_type;
+  StreamBuilder m(this, type, type, type);
+  m.Return((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_P(InstructionSelectorLogicalTest, Immediate) {
+  const MachInst2 dpi = GetParam();
+  const MachineType type = dpi.machine_type;
+  // TODO(all): Add support for testing 64-bit immediates.
+  if (type == kMachInt32) {
+    // Immediate on the right.
+    TRACED_FOREACH(int32_t, imm, kLogicalImmediates) {
+      StreamBuilder m(this, type, type);
+      m.Return((m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm)));
+      Stream s = m.Build();
+      ASSERT_EQ(1U, s.size());
+      EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+      ASSERT_EQ(2U, s[0]->InputCount());
+      EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
+      EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+      EXPECT_EQ(1U, s[0]->OutputCount());
+    }
+
+    // Immediate on the left; all logical ops should commute.
+    TRACED_FOREACH(int32_t, imm, kLogicalImmediates) {
+      StreamBuilder m(this, type, type);
+      m.Return((m.*dpi.constructor)(m.Int32Constant(imm), m.Parameter(0)));
+      Stream s = m.Build();
+      ASSERT_EQ(1U, s.size());
+      EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+      ASSERT_EQ(2U, s[0]->InputCount());
+      EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
+      EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+      EXPECT_EQ(1U, s[0]->OutputCount());
+    }
+  }
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorLogicalTest,
+                        ::testing::ValuesIn(kLogicalInstructions));
+
+
+// -----------------------------------------------------------------------------
+// Add and Sub instructions.
+
+typedef InstructionSelectorTestWithParam<MachInst2>
+    InstructionSelectorAddSubTest;
+
+
+TEST_P(InstructionSelectorAddSubTest, Parameter) {
+  const MachInst2 dpi = GetParam();
+  const MachineType type = dpi.machine_type;
+  StreamBuilder m(this, type, type, type);
+  m.Return((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_P(InstructionSelectorAddSubTest, ImmediateOnRight) {
+  const MachInst2 dpi = GetParam();
+  const MachineType type = dpi.machine_type;
+  TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+    StreamBuilder m(this, type, type);
+    m.Return((m.*dpi.constructor)(m.Parameter(0), BuildConstant(m, type, imm)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
+    EXPECT_EQ(imm, s.ToInt64(s[0]->InputAt(1)));
+    EXPECT_EQ(1U, s[0]->OutputCount());
+  }
+}
+
+
+TEST_P(InstructionSelectorAddSubTest, ImmediateOnLeft) {
+  const MachInst2 dpi = GetParam();
+  const MachineType type = dpi.machine_type;
+
+  TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+    StreamBuilder m(this, type, type);
+    m.Return((m.*dpi.constructor)(BuildConstant(m, type, imm), m.Parameter(0)));
+    Stream s = m.Build();
+
+    // Add can support an immediate on the left by commuting, but Sub can't
+    // commute. We test zero-on-left Sub later.
+    if (strstr(dpi.constructor_name, "Add") != NULL) {
+      ASSERT_EQ(1U, s.size());
+      EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+      ASSERT_EQ(2U, s[0]->InputCount());
+      EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
+      EXPECT_EQ(imm, s.ToInt64(s[0]->InputAt(1)));
+      EXPECT_EQ(1U, s[0]->OutputCount());
+    }
+  }
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorAddSubTest,
+                        ::testing::ValuesIn(kAddSubInstructions));
+
+
+TEST_F(InstructionSelectorTest, SubZeroOnLeft) {
+  // Subtraction with zero on the left maps to Neg.
+  {
+    // 32-bit subtract.
+    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+    m.Return(m.Int32Sub(m.Int32Constant(0), m.Parameter(0)));
+    Stream s = m.Build();
+
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArm64Neg32, s[0]->arch_opcode());
+    EXPECT_EQ(1U, s[0]->InputCount());
+    EXPECT_EQ(1U, s[0]->OutputCount());
+  }
+  {
+    // 64-bit subtract.
+    StreamBuilder m(this, kMachInt64, kMachInt64, kMachInt64);
+    m.Return(m.Int64Sub(m.Int64Constant(0), m.Parameter(0)));
+    Stream s = m.Build();
+
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArm64Neg, s[0]->arch_opcode());
+    EXPECT_EQ(1U, s[0]->InputCount());
+    EXPECT_EQ(1U, s[0]->OutputCount());
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+// Data processing controlled branches.
+
+
+typedef InstructionSelectorTestWithParam<MachInst2>
+    InstructionSelectorDPFlagSetTest;
+
+
+TEST_P(InstructionSelectorDPFlagSetTest, BranchWithParameters) {
+  const MachInst2 dpi = GetParam();
+  const MachineType type = dpi.machine_type;
+  StreamBuilder m(this, type, type, type);
+  MLabel a, b;
+  m.Branch((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)), &a, &b);
+  m.Bind(&a);
+  m.Return(m.Int32Constant(1));
+  m.Bind(&b);
+  m.Return(m.Int32Constant(0));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+  EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+                        InstructionSelectorDPFlagSetTest,
+                        ::testing::ValuesIn(kDPFlagSetInstructions));
+
+
+TEST_F(InstructionSelectorTest, AndBranchWithImmediateOnRight) {
+  TRACED_FOREACH(int32_t, imm, kLogicalImmediates) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    MLabel a, b;
+    m.Branch(m.Word32And(m.Parameter(0), m.Int32Constant(imm)), &a, &b);
+    m.Bind(&a);
+    m.Return(m.Int32Constant(1));
+    m.Bind(&b);
+    m.Return(m.Int32Constant(0));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArm64Tst32, s[0]->arch_opcode());
+    EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+    EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+  }
+}
+
+
+TEST_F(InstructionSelectorTest, AddBranchWithImmediateOnRight) {
+  TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    MLabel a, b;
+    m.Branch(m.Int32Add(m.Parameter(0), m.Int32Constant(imm)), &a, &b);
+    m.Bind(&a);
+    m.Return(m.Int32Constant(1));
+    m.Bind(&b);
+    m.Return(m.Int32Constant(0));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArm64Cmn32, s[0]->arch_opcode());
+    EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+    EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+  }
+}
+
+
+TEST_F(InstructionSelectorTest, SubBranchWithImmediateOnRight) {
+  TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    MLabel a, b;
+    m.Branch(m.Int32Sub(m.Parameter(0), m.Int32Constant(imm)), &a, &b);
+    m.Bind(&a);
+    m.Return(m.Int32Constant(1));
+    m.Bind(&b);
+    m.Return(m.Int32Constant(0));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArm64Cmp32, s[0]->arch_opcode());
+    EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+    EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+  }
+}
+
+
+TEST_F(InstructionSelectorTest, AndBranchWithImmediateOnLeft) {
+  TRACED_FOREACH(int32_t, imm, kLogicalImmediates) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    MLabel a, b;
+    m.Branch(m.Word32And(m.Int32Constant(imm), m.Parameter(0)), &a, &b);
+    m.Bind(&a);
+    m.Return(m.Int32Constant(1));
+    m.Bind(&b);
+    m.Return(m.Int32Constant(0));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArm64Tst32, s[0]->arch_opcode());
+    ASSERT_LE(1U, s[0]->InputCount());
+    EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+    EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+  }
+}
+
+
+TEST_F(InstructionSelectorTest, AddBranchWithImmediateOnLeft) {
+  TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    MLabel a, b;
+    m.Branch(m.Int32Add(m.Int32Constant(imm), m.Parameter(0)), &a, &b);
+    m.Bind(&a);
+    m.Return(m.Int32Constant(1));
+    m.Bind(&b);
+    m.Return(m.Int32Constant(0));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArm64Cmn32, s[0]->arch_opcode());
+    ASSERT_LE(1U, s[0]->InputCount());
+    EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+    EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+// Add and subtract instructions with overflow.
+
+
+typedef InstructionSelectorTestWithParam<MachInst2>
+    InstructionSelectorOvfAddSubTest;
+
+
+TEST_P(InstructionSelectorOvfAddSubTest, OvfParameter) {
+  const MachInst2 dpi = GetParam();
+  const MachineType type = dpi.machine_type;
+  StreamBuilder m(this, type, type, type);
+  m.Return(
+      m.Projection(1, (m.*dpi.constructor)(m.Parameter(0), m.Parameter(1))));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(2U, s[0]->InputCount());
+  EXPECT_LE(1U, s[0]->OutputCount());
+  EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+  EXPECT_EQ(kOverflow, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorOvfAddSubTest, OvfImmediateOnRight) {
+  const MachInst2 dpi = GetParam();
+  const MachineType type = dpi.machine_type;
+  TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+    StreamBuilder m(this, type, type);
+    m.Return(m.Projection(
+        1, (m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm))));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_LE(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+    EXPECT_EQ(kOverflow, s[0]->flags_condition());
+  }
+}
+
+
+TEST_P(InstructionSelectorOvfAddSubTest, ValParameter) {
+  const MachInst2 dpi = GetParam();
+  const MachineType type = dpi.machine_type;
+  StreamBuilder m(this, type, type, type);
+  m.Return(
+      m.Projection(0, (m.*dpi.constructor)(m.Parameter(0), m.Parameter(1))));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(2U, s[0]->InputCount());
+  EXPECT_LE(1U, s[0]->OutputCount());
+  EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+}
+
+
+TEST_P(InstructionSelectorOvfAddSubTest, ValImmediateOnRight) {
+  const MachInst2 dpi = GetParam();
+  const MachineType type = dpi.machine_type;
+  TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+    StreamBuilder m(this, type, type);
+    m.Return(m.Projection(
+        0, (m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm))));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_LE(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+  }
+}
+
+
+TEST_P(InstructionSelectorOvfAddSubTest, BothParameter) {
+  const MachInst2 dpi = GetParam();
+  const MachineType type = dpi.machine_type;
+  StreamBuilder m(this, type, type, type);
+  Node* n = (m.*dpi.constructor)(m.Parameter(0), m.Parameter(1));
+  m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
+  Stream s = m.Build();
+  ASSERT_LE(1U, s.size());
+  EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(2U, s[0]->OutputCount());
+  EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+  EXPECT_EQ(kOverflow, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorOvfAddSubTest, BothImmediateOnRight) {
+  const MachInst2 dpi = GetParam();
+  const MachineType type = dpi.machine_type;
+  TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+    StreamBuilder m(this, type, type);
+    Node* n = (m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm));
+    m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
+    Stream s = m.Build();
+    ASSERT_LE(1U, s.size());
+    EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_EQ(2U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+    EXPECT_EQ(kOverflow, s[0]->flags_condition());
+  }
+}
+
+
+TEST_P(InstructionSelectorOvfAddSubTest, BranchWithParameters) {
+  const MachInst2 dpi = GetParam();
+  const MachineType type = dpi.machine_type;
+  StreamBuilder m(this, type, type, type);
+  MLabel a, b;
+  Node* n = (m.*dpi.constructor)(m.Parameter(0), m.Parameter(1));
+  m.Branch(m.Projection(1, n), &a, &b);
+  m.Bind(&a);
+  m.Return(m.Int32Constant(0));
+  m.Bind(&b);
+  m.Return(m.Projection(0, n));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(4U, s[0]->InputCount());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+  EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+  EXPECT_EQ(kOverflow, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorOvfAddSubTest, BranchWithImmediateOnRight) {
+  const MachInst2 dpi = GetParam();
+  const MachineType type = dpi.machine_type;
+  TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+    StreamBuilder m(this, type, type);
+    MLabel a, b;
+    Node* n = (m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm));
+    m.Branch(m.Projection(1, n), &a, &b);
+    m.Bind(&a);
+    m.Return(m.Int32Constant(0));
+    m.Bind(&b);
+    m.Return(m.Projection(0, n));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+    ASSERT_EQ(4U, s[0]->InputCount());
+    EXPECT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+    EXPECT_EQ(kOverflow, s[0]->flags_condition());
+  }
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+                        InstructionSelectorOvfAddSubTest,
+                        ::testing::ValuesIn(kOvfAddSubInstructions));
+
+
+TEST_F(InstructionSelectorTest, OvfFlagAddImmediateOnLeft) {
+  TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    m.Return(m.Projection(
+        1, m.Int32AddWithOverflow(m.Int32Constant(imm), m.Parameter(0))));
+    Stream s = m.Build();
+
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArm64Add32, s[0]->arch_opcode());
+    EXPECT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_LE(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+    EXPECT_EQ(kOverflow, s[0]->flags_condition());
+  }
+}
+
+
+TEST_F(InstructionSelectorTest, OvfValAddImmediateOnLeft) {
+  TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    m.Return(m.Projection(
+        0, m.Int32AddWithOverflow(m.Int32Constant(imm), m.Parameter(0))));
+    Stream s = m.Build();
+
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArm64Add32, s[0]->arch_opcode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_LE(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_none, s[0]->flags_mode());
+  }
+}
+
+
+TEST_F(InstructionSelectorTest, OvfBothAddImmediateOnLeft) {
+  TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    Node* n = m.Int32AddWithOverflow(m.Int32Constant(imm), m.Parameter(0));
+    m.Return(m.Word32Equal(m.Projection(0, n), m.Projection(1, n)));
+    Stream s = m.Build();
+
+    ASSERT_LE(1U, s.size());
+    EXPECT_EQ(kArm64Add32, s[0]->arch_opcode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_EQ(2U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+    EXPECT_EQ(kOverflow, s[0]->flags_condition());
+  }
+}
+
+
+TEST_F(InstructionSelectorTest, OvfBranchWithImmediateOnLeft) {
+  TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    MLabel a, b;
+    Node* n = m.Int32AddWithOverflow(m.Int32Constant(imm), m.Parameter(0));
+    m.Branch(m.Projection(1, n), &a, &b);
+    m.Bind(&a);
+    m.Return(m.Int32Constant(0));
+    m.Bind(&b);
+    m.Return(m.Projection(0, n));
+    Stream s = m.Build();
+
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArm64Add32, s[0]->arch_opcode());
+    ASSERT_EQ(4U, s[0]->InputCount());
+    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_branch, s[0]->flags_mode());
+    EXPECT_EQ(kOverflow, s[0]->flags_condition());
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+// Shift instructions.
+
+
+typedef InstructionSelectorTestWithParam<MachInst2>
+    InstructionSelectorShiftTest;
+
+
+TEST_P(InstructionSelectorShiftTest, Parameter) {
+  const MachInst2 dpi = GetParam();
+  const MachineType type = dpi.machine_type;
+  StreamBuilder m(this, type, type, type);
+  m.Return((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_P(InstructionSelectorShiftTest, Immediate) {
+  const MachInst2 dpi = GetParam();
+  const MachineType type = dpi.machine_type;
+  TRACED_FORRANGE(int32_t, imm, 0, (ElementSizeOf(type) * 8) - 1) {
+    StreamBuilder m(this, type, type);
+    m.Return((m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(2U, s[0]->InputCount());
+    EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
+    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_EQ(1U, s[0]->OutputCount());
+  }
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorShiftTest,
+                        ::testing::ValuesIn(kShiftInstructions));
+
+
+// -----------------------------------------------------------------------------
+// Mul and Div instructions.
+
+
+typedef InstructionSelectorTestWithParam<MachInst2>
+    InstructionSelectorMulDivTest;
+
+
+TEST_P(InstructionSelectorMulDivTest, Parameter) {
+  const MachInst2 dpi = GetParam();
+  const MachineType type = dpi.machine_type;
+  StreamBuilder m(this, type, type, type);
+  m.Return((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorMulDivTest,
+                        ::testing::ValuesIn(kMulDivInstructions));
+
+
+// -----------------------------------------------------------------------------
+// Floating point instructions.
+
+typedef InstructionSelectorTestWithParam<MachInst2>
+    InstructionSelectorFPArithTest;
+
+
+TEST_P(InstructionSelectorFPArithTest, Parameter) {
+  const MachInst2 fpa = GetParam();
+  StreamBuilder m(this, fpa.machine_type, fpa.machine_type, fpa.machine_type);
+  m.Return((m.*fpa.constructor)(m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(fpa.arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorFPArithTest,
+                        ::testing::ValuesIn(kFPArithInstructions));
+
+
+typedef InstructionSelectorTestWithParam<FPCmp> InstructionSelectorFPCmpTest;
+
+
+TEST_P(InstructionSelectorFPCmpTest, Parameter) {
+  const FPCmp cmp = GetParam();
+  StreamBuilder m(this, kMachInt32, cmp.mi.machine_type, cmp.mi.machine_type);
+  m.Return((m.*cmp.mi.constructor)(m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(cmp.mi.arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+  EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+  EXPECT_EQ(cmp.cond, s[0]->flags_condition());
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorFPCmpTest,
+                        ::testing::ValuesIn(kFPCmpInstructions));
+
+
+// -----------------------------------------------------------------------------
+// Conversions.
+
+typedef InstructionSelectorTestWithParam<Conversion>
+    InstructionSelectorConversionTest;
+
+
+TEST_P(InstructionSelectorConversionTest, Parameter) {
+  const Conversion conv = GetParam();
+  StreamBuilder m(this, conv.mi.machine_type, conv.src_machine_type);
+  m.Return((m.*conv.mi.constructor)(m.Parameter(0)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(conv.mi.arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(1U, s[0]->InputCount());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+                        InstructionSelectorConversionTest,
+                        ::testing::ValuesIn(kConversionInstructions));
+
+
+// -----------------------------------------------------------------------------
+// Memory access instructions.
+
+
+namespace {
+
+struct MemoryAccess {
+  MachineType type;
+  ArchOpcode ldr_opcode;
+  ArchOpcode str_opcode;
+  const int32_t immediates[20];
+};
+
+
+std::ostream& operator<<(std::ostream& os, const MemoryAccess& memacc) {
+  OStringStream ost;
+  ost << memacc.type;
+  return os << ost.c_str();
+}
+
+}  // namespace
+
+
+static const MemoryAccess kMemoryAccesses[] = {
+    {kMachInt8, kArm64Ldrsb, kArm64Strb,
+     {-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 257, 258, 1000, 1001,
+      2121, 2442, 4093, 4094, 4095}},
+    {kMachUint8, kArm64Ldrb, kArm64Strb,
+     {-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 257, 258, 1000, 1001,
+      2121, 2442, 4093, 4094, 4095}},
+    {kMachInt16, kArm64Ldrsh, kArm64Strh,
+     {-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 258, 260, 4096, 4098,
+      4100, 4242, 6786, 8188, 8190}},
+    {kMachUint16, kArm64Ldrh, kArm64Strh,
+     {-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 258, 260, 4096, 4098,
+      4100, 4242, 6786, 8188, 8190}},
+    {kMachInt32, kArm64LdrW, kArm64StrW,
+     {-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 260, 4096, 4100, 8192,
+      8196, 3276, 3280, 16376, 16380}},
+    {kMachUint32, kArm64LdrW, kArm64StrW,
+     {-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 260, 4096, 4100, 8192,
+      8196, 3276, 3280, 16376, 16380}},
+    {kMachInt64, kArm64Ldr, kArm64Str,
+     {-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 264, 4096, 4104, 8192,
+      8200, 16384, 16392, 32752, 32760}},
+    {kMachUint64, kArm64Ldr, kArm64Str,
+     {-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 264, 4096, 4104, 8192,
+      8200, 16384, 16392, 32752, 32760}},
+    {kMachFloat32, kArm64LdrS, kArm64StrS,
+     {-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 260, 4096, 4100, 8192,
+      8196, 3276, 3280, 16376, 16380}},
+    {kMachFloat64, kArm64LdrD, kArm64StrD,
+     {-256, -255, -3, -2, -1, 0, 1, 2, 3, 255, 256, 264, 4096, 4104, 8192,
+      8200, 16384, 16392, 32752, 32760}}};
+
+
+typedef InstructionSelectorTestWithParam<MemoryAccess>
+    InstructionSelectorMemoryAccessTest;
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) {
+  const MemoryAccess memacc = GetParam();
+  StreamBuilder m(this, memacc.type, kMachPtr, kMachInt32);
+  m.Return(m.Load(memacc.type, m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(memacc.ldr_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_MRR, s[0]->addressing_mode());
+  EXPECT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, LoadWithImmediateIndex) {
+  const MemoryAccess memacc = GetParam();
+  TRACED_FOREACH(int32_t, index, memacc.immediates) {
+    StreamBuilder m(this, memacc.type, kMachPtr);
+    m.Return(m.Load(memacc.type, m.Parameter(0), m.Int32Constant(index)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(memacc.ldr_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
+    EXPECT_EQ(2U, s[0]->InputCount());
+    ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+    EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
+    ASSERT_EQ(1U, s[0]->OutputCount());
+  }
+}
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, StoreWithParameters) {
+  const MemoryAccess memacc = GetParam();
+  StreamBuilder m(this, kMachInt32, kMachPtr, kMachInt32, memacc.type);
+  m.Store(memacc.type, m.Parameter(0), m.Parameter(1), m.Parameter(2));
+  m.Return(m.Int32Constant(0));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(memacc.str_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_MRR, s[0]->addressing_mode());
+  EXPECT_EQ(3U, s[0]->InputCount());
+  EXPECT_EQ(0U, s[0]->OutputCount());
+}
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, StoreWithImmediateIndex) {
+  const MemoryAccess memacc = GetParam();
+  TRACED_FOREACH(int32_t, index, memacc.immediates) {
+    StreamBuilder m(this, kMachInt32, kMachPtr, memacc.type);
+    m.Store(memacc.type, m.Parameter(0), m.Int32Constant(index),
+            m.Parameter(1));
+    m.Return(m.Int32Constant(0));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(memacc.str_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
+    ASSERT_EQ(3U, s[0]->InputCount());
+    ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+    EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_EQ(0U, s[0]->OutputCount());
+  }
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+                        InstructionSelectorMemoryAccessTest,
+                        ::testing::ValuesIn(kMemoryAccesses));
+
+
+// -----------------------------------------------------------------------------
+// Comparison instructions.
+
+static const MachInst2 kComparisonInstructions[] = {
+    {&RawMachineAssembler::Word32Equal, "Word32Equal", kArm64Cmp32, kMachInt32},
+    {&RawMachineAssembler::Word64Equal, "Word64Equal", kArm64Cmp, kMachInt64},
+};
+
+
+typedef InstructionSelectorTestWithParam<MachInst2>
+    InstructionSelectorComparisonTest;
+
+
+TEST_P(InstructionSelectorComparisonTest, WithParameters) {
+  const MachInst2 cmp = GetParam();
+  const MachineType type = cmp.machine_type;
+  StreamBuilder m(this, type, type, type);
+  m.Return((m.*cmp.constructor)(m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(cmp.arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+  EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+  EXPECT_EQ(kEqual, s[0]->flags_condition());
+}
+
+
+TEST_P(InstructionSelectorComparisonTest, WithImmediate) {
+  const MachInst2 cmp = GetParam();
+  const MachineType type = cmp.machine_type;
+  TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+    // Compare with 0 are turned into tst instruction.
+    if (imm == 0) continue;
+    StreamBuilder m(this, type, type);
+    m.Return((m.*cmp.constructor)(m.Parameter(0), BuildConstant(m, type, imm)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(cmp.arch_opcode, s[0]->arch_opcode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+    EXPECT_EQ(imm, s.ToInt64(s[0]->InputAt(1)));
+    EXPECT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+    EXPECT_EQ(kEqual, s[0]->flags_condition());
+  }
+  TRACED_FOREACH(int32_t, imm, kAddSubImmediates) {
+    // Compare with 0 are turned into tst instruction.
+    if (imm == 0) continue;
+    StreamBuilder m(this, type, type);
+    m.Return((m.*cmp.constructor)(m.Parameter(0), BuildConstant(m, type, imm)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(cmp.arch_opcode, s[0]->arch_opcode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+    EXPECT_EQ(imm, s.ToInt64(s[0]->InputAt(1)));
+    EXPECT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+    EXPECT_EQ(kEqual, s[0]->flags_condition());
+  }
+}
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+                        InstructionSelectorComparisonTest,
+                        ::testing::ValuesIn(kComparisonInstructions));
+
+
+TEST_F(InstructionSelectorTest, Word32EqualWithZero) {
+  {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    m.Return(m.Word32Equal(m.Parameter(0), m.Int32Constant(0)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArm64Tst32, s[0]->arch_opcode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+    EXPECT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+    EXPECT_EQ(kEqual, s[0]->flags_condition());
+  }
+  {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    m.Return(m.Word32Equal(m.Int32Constant(0), m.Parameter(0)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArm64Tst32, s[0]->arch_opcode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+    EXPECT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+    EXPECT_EQ(kEqual, s[0]->flags_condition());
+  }
+}
+
+
+TEST_F(InstructionSelectorTest, Word64EqualWithZero) {
+  {
+    StreamBuilder m(this, kMachInt64, kMachInt64);
+    m.Return(m.Word64Equal(m.Parameter(0), m.Int64Constant(0)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArm64Tst, s[0]->arch_opcode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+    EXPECT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+    EXPECT_EQ(kEqual, s[0]->flags_condition());
+  }
+  {
+    StreamBuilder m(this, kMachInt64, kMachInt64);
+    m.Return(m.Word64Equal(m.Int64Constant(0), m.Parameter(0)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArm64Tst, s[0]->arch_opcode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(s.ToVreg(s[0]->InputAt(0)), s.ToVreg(s[0]->InputAt(1)));
+    EXPECT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+    EXPECT_EQ(kEqual, s[0]->flags_condition());
+  }
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/arm64/instruction-selector-arm64.cc b/src/compiler/arm64/instruction-selector-arm64.cc
new file mode 100644
index 0000000..472ce6f
--- /dev/null
+++ b/src/compiler/arm64/instruction-selector-arm64.cc
@@ -0,0 +1,697 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-selector-impl.h"
+#include "src/compiler/node-matchers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+enum ImmediateMode {
+  kArithmeticImm,  // 12 bit unsigned immediate shifted left 0 or 12 bits
+  kShift32Imm,     // 0 - 31
+  kShift64Imm,     // 0 - 63
+  kLogical32Imm,
+  kLogical64Imm,
+  kLoadStoreImm8,   // signed 8 bit or 12 bit unsigned scaled by access size
+  kLoadStoreImm16,
+  kLoadStoreImm32,
+  kLoadStoreImm64,
+  kNoImmediate
+};
+
+
+// Adds Arm64-specific methods for generating operands.
+class Arm64OperandGenerator FINAL : public OperandGenerator {
+ public:
+  explicit Arm64OperandGenerator(InstructionSelector* selector)
+      : OperandGenerator(selector) {}
+
+  InstructionOperand* UseOperand(Node* node, ImmediateMode mode) {
+    if (CanBeImmediate(node, mode)) {
+      return UseImmediate(node);
+    }
+    return UseRegister(node);
+  }
+
+  bool CanBeImmediate(Node* node, ImmediateMode mode) {
+    int64_t value;
+    if (node->opcode() == IrOpcode::kInt32Constant)
+      value = OpParameter<int32_t>(node);
+    else if (node->opcode() == IrOpcode::kInt64Constant)
+      value = OpParameter<int64_t>(node);
+    else
+      return false;
+    unsigned ignored;
+    switch (mode) {
+      case kLogical32Imm:
+        // TODO(dcarney): some unencodable values can be handled by
+        // switching instructions.
+        return Assembler::IsImmLogical(static_cast<uint64_t>(value), 32,
+                                       &ignored, &ignored, &ignored);
+      case kLogical64Imm:
+        return Assembler::IsImmLogical(static_cast<uint64_t>(value), 64,
+                                       &ignored, &ignored, &ignored);
+      case kArithmeticImm:
+        // TODO(dcarney): -values can be handled by instruction swapping
+        return Assembler::IsImmAddSub(value);
+      case kShift32Imm:
+        return 0 <= value && value < 32;
+      case kShift64Imm:
+        return 0 <= value && value < 64;
+      case kLoadStoreImm8:
+        return IsLoadStoreImmediate(value, LSByte);
+      case kLoadStoreImm16:
+        return IsLoadStoreImmediate(value, LSHalfword);
+      case kLoadStoreImm32:
+        return IsLoadStoreImmediate(value, LSWord);
+      case kLoadStoreImm64:
+        return IsLoadStoreImmediate(value, LSDoubleWord);
+      case kNoImmediate:
+        return false;
+    }
+    return false;
+  }
+
+ private:
+  bool IsLoadStoreImmediate(int64_t value, LSDataSize size) {
+    return Assembler::IsImmLSScaled(value, size) ||
+           Assembler::IsImmLSUnscaled(value);
+  }
+};
+
+
+static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
+                     Node* node) {
+  Arm64OperandGenerator g(selector);
+  selector->Emit(opcode, g.DefineAsRegister(node),
+                 g.UseRegister(node->InputAt(0)),
+                 g.UseRegister(node->InputAt(1)));
+}
+
+
+static void VisitRRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
+                            Node* node) {
+  Arm64OperandGenerator g(selector);
+  selector->Emit(opcode, g.DefineAsRegister(node),
+                 g.UseRegister(node->InputAt(0)),
+                 g.UseRegister(node->InputAt(1)));
+}
+
+
+static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode,
+                     Node* node, ImmediateMode operand_mode) {
+  Arm64OperandGenerator g(selector);
+  selector->Emit(opcode, g.DefineAsRegister(node),
+                 g.UseRegister(node->InputAt(0)),
+                 g.UseOperand(node->InputAt(1), operand_mode));
+}
+
+
+// Shared routine for multiple binary operations.
+template <typename Matcher>
+static void VisitBinop(InstructionSelector* selector, Node* node,
+                       InstructionCode opcode, ImmediateMode operand_mode,
+                       FlagsContinuation* cont) {
+  Arm64OperandGenerator g(selector);
+  Matcher m(node);
+  InstructionOperand* inputs[4];
+  size_t input_count = 0;
+  InstructionOperand* outputs[2];
+  size_t output_count = 0;
+
+  inputs[input_count++] = g.UseRegister(m.left().node());
+  inputs[input_count++] = g.UseOperand(m.right().node(), operand_mode);
+
+  if (cont->IsBranch()) {
+    inputs[input_count++] = g.Label(cont->true_block());
+    inputs[input_count++] = g.Label(cont->false_block());
+  }
+
+  outputs[output_count++] = g.DefineAsRegister(node);
+  if (cont->IsSet()) {
+    outputs[output_count++] = g.DefineAsRegister(cont->result());
+  }
+
+  DCHECK_NE(0, input_count);
+  DCHECK_NE(0, output_count);
+  DCHECK_GE(arraysize(inputs), input_count);
+  DCHECK_GE(arraysize(outputs), output_count);
+
+  Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
+                                      outputs, input_count, inputs);
+  if (cont->IsBranch()) instr->MarkAsControl();
+}
+
+
+// Shared routine for multiple binary operations.
+template <typename Matcher>
+static void VisitBinop(InstructionSelector* selector, Node* node,
+                       ArchOpcode opcode, ImmediateMode operand_mode) {
+  FlagsContinuation cont;
+  VisitBinop<Matcher>(selector, node, opcode, operand_mode, &cont);
+}
+
+
+void InstructionSelector::VisitLoad(Node* node) {
+  MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
+  MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
+  Arm64OperandGenerator g(this);
+  Node* base = node->InputAt(0);
+  Node* index = node->InputAt(1);
+  ArchOpcode opcode;
+  ImmediateMode immediate_mode = kNoImmediate;
+  switch (rep) {
+    case kRepFloat32:
+      opcode = kArm64LdrS;
+      immediate_mode = kLoadStoreImm32;
+      break;
+    case kRepFloat64:
+      opcode = kArm64LdrD;
+      immediate_mode = kLoadStoreImm64;
+      break;
+    case kRepBit:  // Fall through.
+    case kRepWord8:
+      opcode = typ == kTypeInt32 ? kArm64Ldrsb : kArm64Ldrb;
+      immediate_mode = kLoadStoreImm8;
+      break;
+    case kRepWord16:
+      opcode = typ == kTypeInt32 ? kArm64Ldrsh : kArm64Ldrh;
+      immediate_mode = kLoadStoreImm16;
+      break;
+    case kRepWord32:
+      opcode = kArm64LdrW;
+      immediate_mode = kLoadStoreImm32;
+      break;
+    case kRepTagged:  // Fall through.
+    case kRepWord64:
+      opcode = kArm64Ldr;
+      immediate_mode = kLoadStoreImm64;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+  if (g.CanBeImmediate(index, immediate_mode)) {
+    Emit(opcode | AddressingModeField::encode(kMode_MRI),
+         g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
+  } else {
+    Emit(opcode | AddressingModeField::encode(kMode_MRR),
+         g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
+  }
+}
+
+
+void InstructionSelector::VisitStore(Node* node) {
+  Arm64OperandGenerator g(this);
+  Node* base = node->InputAt(0);
+  Node* index = node->InputAt(1);
+  Node* value = node->InputAt(2);
+
+  StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
+  MachineType rep = RepresentationOf(store_rep.machine_type());
+  if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
+    DCHECK(rep == kRepTagged);
+    // TODO(dcarney): refactor RecordWrite function to take temp registers
+    //                and pass them here instead of using fixed regs
+    // TODO(dcarney): handle immediate indices.
+    InstructionOperand* temps[] = {g.TempRegister(x11), g.TempRegister(x12)};
+    Emit(kArm64StoreWriteBarrier, NULL, g.UseFixed(base, x10),
+         g.UseFixed(index, x11), g.UseFixed(value, x12), arraysize(temps),
+         temps);
+    return;
+  }
+  DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
+  ArchOpcode opcode;
+  ImmediateMode immediate_mode = kNoImmediate;
+  switch (rep) {
+    case kRepFloat32:
+      opcode = kArm64StrS;
+      immediate_mode = kLoadStoreImm32;
+      break;
+    case kRepFloat64:
+      opcode = kArm64StrD;
+      immediate_mode = kLoadStoreImm64;
+      break;
+    case kRepBit:  // Fall through.
+    case kRepWord8:
+      opcode = kArm64Strb;
+      immediate_mode = kLoadStoreImm8;
+      break;
+    case kRepWord16:
+      opcode = kArm64Strh;
+      immediate_mode = kLoadStoreImm16;
+      break;
+    case kRepWord32:
+      opcode = kArm64StrW;
+      immediate_mode = kLoadStoreImm32;
+      break;
+    case kRepTagged:  // Fall through.
+    case kRepWord64:
+      opcode = kArm64Str;
+      immediate_mode = kLoadStoreImm64;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+  if (g.CanBeImmediate(index, immediate_mode)) {
+    Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
+         g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+  } else {
+    Emit(opcode | AddressingModeField::encode(kMode_MRR), NULL,
+         g.UseRegister(base), g.UseRegister(index), g.UseRegister(value));
+  }
+}
+
+
+void InstructionSelector::VisitWord32And(Node* node) {
+  VisitBinop<Int32BinopMatcher>(this, node, kArm64And32, kLogical32Imm);
+}
+
+
+void InstructionSelector::VisitWord64And(Node* node) {
+  VisitBinop<Int64BinopMatcher>(this, node, kArm64And, kLogical64Imm);
+}
+
+
+void InstructionSelector::VisitWord32Or(Node* node) {
+  VisitBinop<Int32BinopMatcher>(this, node, kArm64Or32, kLogical32Imm);
+}
+
+
+void InstructionSelector::VisitWord64Or(Node* node) {
+  VisitBinop<Int64BinopMatcher>(this, node, kArm64Or, kLogical64Imm);
+}
+
+
+void InstructionSelector::VisitWord32Xor(Node* node) {
+  Arm64OperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  if (m.right().Is(-1)) {
+    Emit(kArm64Not32, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
+  } else {
+    VisitBinop<Int32BinopMatcher>(this, node, kArm64Xor32, kLogical32Imm);
+  }
+}
+
+
+void InstructionSelector::VisitWord64Xor(Node* node) {
+  Arm64OperandGenerator g(this);
+  Int64BinopMatcher m(node);
+  if (m.right().Is(-1)) {
+    Emit(kArm64Not, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
+  } else {
+    VisitBinop<Int64BinopMatcher>(this, node, kArm64Xor, kLogical32Imm);
+  }
+}
+
+
+void InstructionSelector::VisitWord32Shl(Node* node) {
+  VisitRRO(this, kArm64Shl32, node, kShift32Imm);
+}
+
+
+void InstructionSelector::VisitWord64Shl(Node* node) {
+  VisitRRO(this, kArm64Shl, node, kShift64Imm);
+}
+
+
+void InstructionSelector::VisitWord32Shr(Node* node) {
+  VisitRRO(this, kArm64Shr32, node, kShift32Imm);
+}
+
+
+void InstructionSelector::VisitWord64Shr(Node* node) {
+  VisitRRO(this, kArm64Shr, node, kShift64Imm);
+}
+
+
+void InstructionSelector::VisitWord32Sar(Node* node) {
+  VisitRRO(this, kArm64Sar32, node, kShift32Imm);
+}
+
+
+void InstructionSelector::VisitWord64Sar(Node* node) {
+  VisitRRO(this, kArm64Sar, node, kShift64Imm);
+}
+
+
+void InstructionSelector::VisitWord32Ror(Node* node) {
+  VisitRRO(this, kArm64Ror32, node, kShift32Imm);
+}
+
+
+void InstructionSelector::VisitWord64Ror(Node* node) {
+  VisitRRO(this, kArm64Ror, node, kShift64Imm);
+}
+
+
+void InstructionSelector::VisitInt32Add(Node* node) {
+  VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32, kArithmeticImm);
+}
+
+
+void InstructionSelector::VisitInt64Add(Node* node) {
+  VisitBinop<Int64BinopMatcher>(this, node, kArm64Add, kArithmeticImm);
+}
+
+
+void InstructionSelector::VisitInt32Sub(Node* node) {
+  Arm64OperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  if (m.left().Is(0)) {
+    Emit(kArm64Neg32, g.DefineAsRegister(node),
+         g.UseRegister(m.right().node()));
+  } else {
+    VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32, kArithmeticImm);
+  }
+}
+
+
+void InstructionSelector::VisitInt64Sub(Node* node) {
+  Arm64OperandGenerator g(this);
+  Int64BinopMatcher m(node);
+  if (m.left().Is(0)) {
+    Emit(kArm64Neg, g.DefineAsRegister(node), g.UseRegister(m.right().node()));
+  } else {
+    VisitBinop<Int64BinopMatcher>(this, node, kArm64Sub, kArithmeticImm);
+  }
+}
+
+
+void InstructionSelector::VisitInt32Mul(Node* node) {
+  VisitRRR(this, kArm64Mul32, node);
+}
+
+
+void InstructionSelector::VisitInt64Mul(Node* node) {
+  VisitRRR(this, kArm64Mul, node);
+}
+
+
+void InstructionSelector::VisitInt32Div(Node* node) {
+  VisitRRR(this, kArm64Idiv32, node);
+}
+
+
+void InstructionSelector::VisitInt64Div(Node* node) {
+  VisitRRR(this, kArm64Idiv, node);
+}
+
+
+void InstructionSelector::VisitInt32UDiv(Node* node) {
+  VisitRRR(this, kArm64Udiv32, node);
+}
+
+
+void InstructionSelector::VisitInt64UDiv(Node* node) {
+  VisitRRR(this, kArm64Udiv, node);
+}
+
+
+void InstructionSelector::VisitInt32Mod(Node* node) {
+  VisitRRR(this, kArm64Imod32, node);
+}
+
+
+void InstructionSelector::VisitInt64Mod(Node* node) {
+  VisitRRR(this, kArm64Imod, node);
+}
+
+
+void InstructionSelector::VisitInt32UMod(Node* node) {
+  VisitRRR(this, kArm64Umod32, node);
+}
+
+
+void InstructionSelector::VisitInt64UMod(Node* node) {
+  VisitRRR(this, kArm64Umod, node);
+}
+
+
+void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
+  Arm64OperandGenerator g(this);
+  Emit(kArm64Int32ToFloat64, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
+  Arm64OperandGenerator g(this);
+  Emit(kArm64Uint32ToFloat64, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
+  Arm64OperandGenerator g(this);
+  Emit(kArm64Float64ToInt32, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
+  Arm64OperandGenerator g(this);
+  Emit(kArm64Float64ToUint32, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
+  Arm64OperandGenerator g(this);
+  Emit(kArm64Sxtw, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
+  Arm64OperandGenerator g(this);
+  Emit(kArm64Mov32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
+  Arm64OperandGenerator g(this);
+  Emit(kArm64Mov32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64Add(Node* node) {
+  VisitRRRFloat64(this, kArm64Float64Add, node);
+}
+
+
+void InstructionSelector::VisitFloat64Sub(Node* node) {
+  VisitRRRFloat64(this, kArm64Float64Sub, node);
+}
+
+
+void InstructionSelector::VisitFloat64Mul(Node* node) {
+  VisitRRRFloat64(this, kArm64Float64Mul, node);
+}
+
+
+void InstructionSelector::VisitFloat64Div(Node* node) {
+  VisitRRRFloat64(this, kArm64Float64Div, node);
+}
+
+
+void InstructionSelector::VisitFloat64Mod(Node* node) {
+  Arm64OperandGenerator g(this);
+  Emit(kArm64Float64Mod, g.DefineAsFixed(node, d0),
+       g.UseFixed(node->InputAt(0), d0),
+       g.UseFixed(node->InputAt(1), d1))->MarkAsCall();
+}
+
+
+void InstructionSelector::VisitFloat64Sqrt(Node* node) {
+  Arm64OperandGenerator g(this);
+  Emit(kArm64Float64Sqrt, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
+                                                    FlagsContinuation* cont) {
+  VisitBinop<Int32BinopMatcher>(this, node, kArm64Add32, kArithmeticImm, cont);
+}
+
+
+void InstructionSelector::VisitInt32SubWithOverflow(Node* node,
+                                                    FlagsContinuation* cont) {
+  VisitBinop<Int32BinopMatcher>(this, node, kArm64Sub32, kArithmeticImm, cont);
+}
+
+
+// Shared routine for multiple compare operations.
+static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
+                         InstructionOperand* left, InstructionOperand* right,
+                         FlagsContinuation* cont) {
+  Arm64OperandGenerator g(selector);
+  opcode = cont->Encode(opcode);
+  if (cont->IsBranch()) {
+    selector->Emit(opcode, NULL, left, right, g.Label(cont->true_block()),
+                   g.Label(cont->false_block()))->MarkAsControl();
+  } else {
+    DCHECK(cont->IsSet());
+    selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
+  }
+}
+
+
+// Shared routine for multiple word compare operations.
+static void VisitWordCompare(InstructionSelector* selector, Node* node,
+                             InstructionCode opcode, FlagsContinuation* cont,
+                             bool commutative) {
+  Arm64OperandGenerator g(selector);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+
+  // Match immediates on left or right side of comparison.
+  if (g.CanBeImmediate(right, kArithmeticImm)) {
+    VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
+                 cont);
+  } else if (g.CanBeImmediate(left, kArithmeticImm)) {
+    if (!commutative) cont->Commute();
+    VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
+                 cont);
+  } else {
+    VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
+                 cont);
+  }
+}
+
+
+void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation* cont) {
+  switch (node->opcode()) {
+    case IrOpcode::kInt32Add:
+      return VisitWordCompare(this, node, kArm64Cmn32, cont, true);
+    case IrOpcode::kInt32Sub:
+      return VisitWordCompare(this, node, kArm64Cmp32, cont, false);
+    case IrOpcode::kWord32And:
+      return VisitWordCompare(this, node, kArm64Tst32, cont, true);
+    default:
+      break;
+  }
+
+  Arm64OperandGenerator g(this);
+  VisitCompare(this, kArm64Tst32, g.UseRegister(node), g.UseRegister(node),
+               cont);
+}
+
+
+void InstructionSelector::VisitWord64Test(Node* node, FlagsContinuation* cont) {
+  switch (node->opcode()) {
+    case IrOpcode::kWord64And:
+      return VisitWordCompare(this, node, kArm64Tst, cont, true);
+    default:
+      break;
+  }
+
+  Arm64OperandGenerator g(this);
+  VisitCompare(this, kArm64Tst, g.UseRegister(node), g.UseRegister(node), cont);
+}
+
+
+void InstructionSelector::VisitWord32Compare(Node* node,
+                                             FlagsContinuation* cont) {
+  VisitWordCompare(this, node, kArm64Cmp32, cont, false);
+}
+
+
+void InstructionSelector::VisitWord64Compare(Node* node,
+                                             FlagsContinuation* cont) {
+  VisitWordCompare(this, node, kArm64Cmp, cont, false);
+}
+
+
+void InstructionSelector::VisitFloat64Compare(Node* node,
+                                              FlagsContinuation* cont) {
+  Arm64OperandGenerator g(this);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+  VisitCompare(this, kArm64Float64Cmp, g.UseRegister(left),
+               g.UseRegister(right), cont);
+}
+
+
+void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
+                                    BasicBlock* deoptimization) {
+  Arm64OperandGenerator g(this);
+  CallDescriptor* descriptor = OpParameter<CallDescriptor*>(call);
+
+  FrameStateDescriptor* frame_state_descriptor = NULL;
+  if (descriptor->NeedsFrameState()) {
+    frame_state_descriptor =
+        GetFrameStateDescriptor(call->InputAt(descriptor->InputCount()));
+  }
+
+  CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
+
+  // Compute InstructionOperands for inputs and outputs.
+  // TODO(turbofan): on ARM64 it's probably better to use the code object in a
+  // register if there are multiple uses of it. Improve constant pool and the
+  // heuristics in the register allocator for where to emit constants.
+  InitializeCallBuffer(call, &buffer, true, false);
+
+  // Push the arguments to the stack.
+  bool pushed_count_uneven = buffer.pushed_nodes.size() & 1;
+  int aligned_push_count = buffer.pushed_nodes.size();
+  // TODO(dcarney): claim and poke probably take small immediates,
+  //                loop here or whatever.
+  // Bump the stack pointer(s).
+  if (aligned_push_count > 0) {
+    // TODO(dcarney): it would be better to bump the csp here only
+    //                and emit paired stores with increment for non c frames.
+    Emit(kArm64Claim | MiscField::encode(aligned_push_count), NULL);
+  }
+  // Move arguments to the stack.
+  {
+    int slot = buffer.pushed_nodes.size() - 1;
+    // Emit the uneven pushes.
+    if (pushed_count_uneven) {
+      Node* input = buffer.pushed_nodes[slot];
+      Emit(kArm64Poke | MiscField::encode(slot), NULL, g.UseRegister(input));
+      slot--;
+    }
+    // Now all pushes can be done in pairs.
+    for (; slot >= 0; slot -= 2) {
+      Emit(kArm64PokePair | MiscField::encode(slot), NULL,
+           g.UseRegister(buffer.pushed_nodes[slot]),
+           g.UseRegister(buffer.pushed_nodes[slot - 1]));
+    }
+  }
+
+  // Select the appropriate opcode based on the call type.
+  InstructionCode opcode;
+  switch (descriptor->kind()) {
+    case CallDescriptor::kCallCodeObject: {
+      opcode = kArchCallCodeObject;
+      break;
+    }
+    case CallDescriptor::kCallJSFunction:
+      opcode = kArchCallJSFunction;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+  opcode |= MiscField::encode(descriptor->flags());
+
+  // Emit the call instruction.
+  Instruction* call_instr =
+      Emit(opcode, buffer.outputs.size(), &buffer.outputs.front(),
+           buffer.instruction_args.size(), &buffer.instruction_args.front());
+
+  call_instr->MarkAsCall();
+  if (deoptimization != NULL) {
+    DCHECK(continuation != NULL);
+    call_instr->MarkAsControl();
+  }
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/arm64/linkage-arm64.cc b/src/compiler/arm64/linkage-arm64.cc
new file mode 100644
index 0000000..2be2cb1
--- /dev/null
+++ b/src/compiler/arm64/linkage-arm64.cc
@@ -0,0 +1,66 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/assembler.h"
+#include "src/code-stubs.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/linkage-impl.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+struct Arm64LinkageHelperTraits {
+  static Register ReturnValueReg() { return x0; }
+  static Register ReturnValue2Reg() { return x1; }
+  static Register JSCallFunctionReg() { return x1; }
+  static Register ContextReg() { return cp; }
+  static Register RuntimeCallFunctionReg() { return x1; }
+  static Register RuntimeCallArgCountReg() { return x0; }
+  static RegList CCalleeSaveRegisters() {
+    // TODO(dcarney): correct callee saved registers.
+    return 0;
+  }
+  static Register CRegisterParameter(int i) {
+    static Register register_parameters[] = {x0, x1, x2, x3, x4, x5, x6, x7};
+    return register_parameters[i];
+  }
+  static int CRegisterParametersLength() { return 8; }
+};
+
+
+typedef LinkageHelper<Arm64LinkageHelperTraits> LH;
+
+CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone) {
+  return LH::GetJSCallDescriptor(zone, parameter_count);
+}
+
+
+CallDescriptor* Linkage::GetRuntimeCallDescriptor(
+    Runtime::FunctionId function, int parameter_count,
+    Operator::Properties properties, Zone* zone) {
+  return LH::GetRuntimeCallDescriptor(zone, function, parameter_count,
+                                      properties);
+}
+
+
+CallDescriptor* Linkage::GetStubCallDescriptor(
+    CallInterfaceDescriptor descriptor, int stack_parameter_count,
+    CallDescriptor::Flags flags, Zone* zone) {
+  return LH::GetStubCallDescriptor(zone, descriptor, stack_parameter_count,
+                                   flags);
+}
+
+
+CallDescriptor* Linkage::GetSimplifiedCDescriptor(Zone* zone,
+                                                  MachineSignature* sig) {
+  return LH::GetSimplifiedCDescriptor(zone, sig);
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/ast-graph-builder.cc b/src/compiler/ast-graph-builder.cc
new file mode 100644
index 0000000..74fb0ae
--- /dev/null
+++ b/src/compiler/ast-graph-builder.cc
@@ -0,0 +1,2034 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/ast-graph-builder.h"
+
+#include "src/compiler.h"
+#include "src/compiler/control-builders.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/full-codegen.h"
+#include "src/parser.h"
+#include "src/scopes.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+AstGraphBuilder::AstGraphBuilder(CompilationInfo* info, JSGraph* jsgraph)
+    : StructuredGraphBuilder(jsgraph->graph(), jsgraph->common()),
+      info_(info),
+      jsgraph_(jsgraph),
+      globals_(0, info->zone()),
+      breakable_(NULL),
+      execution_context_(NULL) {
+  InitializeAstVisitor(info->zone());
+}
+
+
+Node* AstGraphBuilder::GetFunctionClosure() {
+  if (!function_closure_.is_set()) {
+    // Parameter -1 is special for the function closure
+    const Operator* op = common()->Parameter(-1);
+    Node* node = NewNode(op, graph()->start());
+    function_closure_.set(node);
+  }
+  return function_closure_.get();
+}
+
+
+Node* AstGraphBuilder::GetFunctionContext() {
+  if (!function_context_.is_set()) {
+    // Parameter (arity + 1) is special for the outer context of the function
+    const Operator* op = common()->Parameter(info()->num_parameters() + 1);
+    Node* node = NewNode(op, graph()->start());
+    function_context_.set(node);
+  }
+  return function_context_.get();
+}
+
+
+bool AstGraphBuilder::CreateGraph() {
+  Scope* scope = info()->scope();
+  DCHECK(graph() != NULL);
+
+  // Set up the basic structure of the graph.
+  int parameter_count = info()->num_parameters();
+  graph()->SetStart(graph()->NewNode(common()->Start(parameter_count)));
+
+  // Initialize the top-level environment.
+  Environment env(this, scope, graph()->start());
+  set_environment(&env);
+
+  // Build node to initialize local function context.
+  Node* closure = GetFunctionClosure();
+  Node* outer = GetFunctionContext();
+  Node* inner = BuildLocalFunctionContext(outer, closure);
+
+  // Push top-level function scope for the function body.
+  ContextScope top_context(this, scope, inner);
+
+  // Build the arguments object if it is used.
+  BuildArgumentsObject(scope->arguments());
+
+  // Emit tracing call if requested to do so.
+  if (FLAG_trace) {
+    NewNode(javascript()->Runtime(Runtime::kTraceEnter, 0));
+  }
+
+  // Visit implicit declaration of the function name.
+  if (scope->is_function_scope() && scope->function() != NULL) {
+    VisitVariableDeclaration(scope->function());
+  }
+
+  // Visit declarations within the function scope.
+  VisitDeclarations(scope->declarations());
+
+  // TODO(mstarzinger): This should do an inlined stack check.
+  Node* node = NewNode(javascript()->Runtime(Runtime::kStackGuard, 0));
+  PrepareFrameState(node, BailoutId::FunctionEntry());
+
+  // Visit statements in the function body.
+  VisitStatements(info()->function()->body());
+  if (HasStackOverflow()) return false;
+
+  // Emit tracing call if requested to do so.
+  if (FLAG_trace) {
+    // TODO(mstarzinger): Only traces implicit return.
+    Node* return_value = jsgraph()->UndefinedConstant();
+    NewNode(javascript()->Runtime(Runtime::kTraceExit, 1), return_value);
+  }
+
+  // Return 'undefined' in case we can fall off the end.
+  Node* control = NewNode(common()->Return(), jsgraph()->UndefinedConstant());
+  UpdateControlDependencyToLeaveFunction(control);
+
+  // Finish the basic structure of the graph.
+  environment()->UpdateControlDependency(exit_control());
+  graph()->SetEnd(NewNode(common()->End()));
+
+  return true;
+}
+
+
+// Left-hand side can only be a property, a global or a variable slot.
+enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+
+
+// Determine the left-hand side kind of an assignment.
+static LhsKind DetermineLhsKind(Expression* expr) {
+  Property* property = expr->AsProperty();
+  DCHECK(expr->IsValidReferenceExpression());
+  LhsKind lhs_kind =
+      (property == NULL) ? VARIABLE : (property->key()->IsPropertyName())
+                                          ? NAMED_PROPERTY
+                                          : KEYED_PROPERTY;
+  return lhs_kind;
+}
+
+
+// Helper to find an existing shared function info in the baseline code for the
+// given function literal. Used to canonicalize SharedFunctionInfo objects.
+static Handle<SharedFunctionInfo> SearchSharedFunctionInfo(
+    Code* unoptimized_code, FunctionLiteral* expr) {
+  int start_position = expr->start_position();
+  for (RelocIterator it(unoptimized_code); !it.done(); it.next()) {
+    RelocInfo* rinfo = it.rinfo();
+    if (rinfo->rmode() != RelocInfo::EMBEDDED_OBJECT) continue;
+    Object* obj = rinfo->target_object();
+    if (obj->IsSharedFunctionInfo()) {
+      SharedFunctionInfo* shared = SharedFunctionInfo::cast(obj);
+      if (shared->start_position() == start_position) {
+        return Handle<SharedFunctionInfo>(shared);
+      }
+    }
+  }
+  return Handle<SharedFunctionInfo>();
+}
+
+
+StructuredGraphBuilder::Environment* AstGraphBuilder::CopyEnvironment(
+    StructuredGraphBuilder::Environment* env) {
+  return new (zone()) Environment(*reinterpret_cast<Environment*>(env));
+}
+
+
+AstGraphBuilder::Environment::Environment(AstGraphBuilder* builder,
+                                          Scope* scope,
+                                          Node* control_dependency)
+    : StructuredGraphBuilder::Environment(builder, control_dependency),
+      parameters_count_(scope->num_parameters() + 1),
+      locals_count_(scope->num_stack_slots()),
+      parameters_node_(NULL),
+      locals_node_(NULL),
+      stack_node_(NULL) {
+  DCHECK_EQ(scope->num_parameters() + 1, parameters_count());
+
+  // Bind the receiver variable.
+  Node* receiver = builder->graph()->NewNode(common()->Parameter(0),
+                                             builder->graph()->start());
+  values()->push_back(receiver);
+
+  // Bind all parameter variables. The parameter indices are shifted by 1
+  // (receiver is parameter index -1 but environment index 0).
+  for (int i = 0; i < scope->num_parameters(); ++i) {
+    Node* parameter = builder->graph()->NewNode(common()->Parameter(i + 1),
+                                                builder->graph()->start());
+    values()->push_back(parameter);
+  }
+
+  // Bind all local variables to undefined.
+  Node* undefined_constant = builder->jsgraph()->UndefinedConstant();
+  values()->insert(values()->end(), locals_count(), undefined_constant);
+}
+
+
+AstGraphBuilder::Environment::Environment(const Environment& copy)
+    : StructuredGraphBuilder::Environment(
+          static_cast<StructuredGraphBuilder::Environment>(copy)),
+      parameters_count_(copy.parameters_count_),
+      locals_count_(copy.locals_count_),
+      parameters_node_(copy.parameters_node_),
+      locals_node_(copy.locals_node_),
+      stack_node_(copy.stack_node_) {}
+
+
+void AstGraphBuilder::Environment::UpdateStateValues(Node** state_values,
+                                                     int offset, int count) {
+  bool should_update = false;
+  Node** env_values = (count == 0) ? NULL : &values()->at(offset);
+  if (*state_values == NULL || (*state_values)->InputCount() != count) {
+    should_update = true;
+  } else {
+    DCHECK(static_cast<size_t>(offset + count) <= values()->size());
+    for (int i = 0; i < count; i++) {
+      if ((*state_values)->InputAt(i) != env_values[i]) {
+        should_update = true;
+        break;
+      }
+    }
+  }
+  if (should_update) {
+    const Operator* op = common()->StateValues(count);
+    (*state_values) = graph()->NewNode(op, count, env_values);
+  }
+}
+
+
+Node* AstGraphBuilder::Environment::Checkpoint(
+    BailoutId ast_id, OutputFrameStateCombine combine) {
+  UpdateStateValues(&parameters_node_, 0, parameters_count());
+  UpdateStateValues(&locals_node_, parameters_count(), locals_count());
+  UpdateStateValues(&stack_node_, parameters_count() + locals_count(),
+                    stack_height());
+
+  const Operator* op = common()->FrameState(JS_FRAME, ast_id, combine);
+
+  return graph()->NewNode(op, parameters_node_, locals_node_, stack_node_,
+                          GetContext(),
+                          builder()->jsgraph()->UndefinedConstant());
+}
+
+
+AstGraphBuilder::AstContext::AstContext(AstGraphBuilder* own,
+                                        Expression::Context kind)
+    : kind_(kind), owner_(own), outer_(own->ast_context()) {
+  owner()->set_ast_context(this);  // Push.
+#ifdef DEBUG
+  original_height_ = environment()->stack_height();
+#endif
+}
+
+
+AstGraphBuilder::AstContext::~AstContext() {
+  owner()->set_ast_context(outer_);  // Pop.
+}
+
+
+AstGraphBuilder::AstEffectContext::~AstEffectContext() {
+  DCHECK(environment()->stack_height() == original_height_);
+}
+
+
+AstGraphBuilder::AstValueContext::~AstValueContext() {
+  DCHECK(environment()->stack_height() == original_height_ + 1);
+}
+
+
+AstGraphBuilder::AstTestContext::~AstTestContext() {
+  DCHECK(environment()->stack_height() == original_height_ + 1);
+}
+
+
+void AstGraphBuilder::AstEffectContext::ProduceValue(Node* value) {
+  // The value is ignored.
+}
+
+
+void AstGraphBuilder::AstValueContext::ProduceValue(Node* value) {
+  environment()->Push(value);
+}
+
+
+void AstGraphBuilder::AstTestContext::ProduceValue(Node* value) {
+  environment()->Push(owner()->BuildToBoolean(value));
+}
+
+
+Node* AstGraphBuilder::AstEffectContext::ConsumeValue() { return NULL; }
+
+
+Node* AstGraphBuilder::AstValueContext::ConsumeValue() {
+  return environment()->Pop();
+}
+
+
+Node* AstGraphBuilder::AstTestContext::ConsumeValue() {
+  return environment()->Pop();
+}
+
+
+AstGraphBuilder::BreakableScope* AstGraphBuilder::BreakableScope::FindBreakable(
+    BreakableStatement* target) {
+  BreakableScope* current = this;
+  while (current != NULL && current->target_ != target) {
+    owner_->environment()->Drop(current->drop_extra_);
+    current = current->next_;
+  }
+  DCHECK(current != NULL);  // Always found (unless stack is malformed).
+  return current;
+}
+
+
+void AstGraphBuilder::BreakableScope::BreakTarget(BreakableStatement* stmt) {
+  FindBreakable(stmt)->control_->Break();
+}
+
+
+void AstGraphBuilder::BreakableScope::ContinueTarget(BreakableStatement* stmt) {
+  FindBreakable(stmt)->control_->Continue();
+}
+
+
+void AstGraphBuilder::VisitForValueOrNull(Expression* expr) {
+  if (expr == NULL) {
+    return environment()->Push(jsgraph()->NullConstant());
+  }
+  VisitForValue(expr);
+}
+
+
+void AstGraphBuilder::VisitForValues(ZoneList<Expression*>* exprs) {
+  for (int i = 0; i < exprs->length(); ++i) {
+    VisitForValue(exprs->at(i));
+  }
+}
+
+
+void AstGraphBuilder::VisitForValue(Expression* expr) {
+  AstValueContext for_value(this);
+  if (!HasStackOverflow()) {
+    expr->Accept(this);
+  }
+}
+
+
+void AstGraphBuilder::VisitForEffect(Expression* expr) {
+  AstEffectContext for_effect(this);
+  if (!HasStackOverflow()) {
+    expr->Accept(this);
+  }
+}
+
+
+void AstGraphBuilder::VisitForTest(Expression* expr) {
+  AstTestContext for_condition(this);
+  if (!HasStackOverflow()) {
+    expr->Accept(this);
+  }
+}
+
+
+void AstGraphBuilder::VisitVariableDeclaration(VariableDeclaration* decl) {
+  Variable* variable = decl->proxy()->var();
+  VariableMode mode = decl->mode();
+  bool hole_init = mode == CONST || mode == CONST_LEGACY || mode == LET;
+  switch (variable->location()) {
+    case Variable::UNALLOCATED: {
+      Handle<Oddball> value = variable->binding_needs_init()
+                                  ? isolate()->factory()->the_hole_value()
+                                  : isolate()->factory()->undefined_value();
+      globals()->Add(variable->name(), zone());
+      globals()->Add(value, zone());
+      break;
+    }
+    case Variable::PARAMETER:
+    case Variable::LOCAL:
+      if (hole_init) {
+        Node* value = jsgraph()->TheHoleConstant();
+        environment()->Bind(variable, value);
+      }
+      break;
+    case Variable::CONTEXT:
+      if (hole_init) {
+        Node* value = jsgraph()->TheHoleConstant();
+        const Operator* op = javascript()->StoreContext(0, variable->index());
+        NewNode(op, current_context(), value);
+      }
+      break;
+    case Variable::LOOKUP:
+      UNIMPLEMENTED();
+  }
+}
+
+
+void AstGraphBuilder::VisitFunctionDeclaration(FunctionDeclaration* decl) {
+  Variable* variable = decl->proxy()->var();
+  switch (variable->location()) {
+    case Variable::UNALLOCATED: {
+      Handle<SharedFunctionInfo> function =
+          Compiler::BuildFunctionInfo(decl->fun(), info()->script(), info());
+      // Check for stack-overflow exception.
+      if (function.is_null()) return SetStackOverflow();
+      globals()->Add(variable->name(), zone());
+      globals()->Add(function, zone());
+      break;
+    }
+    case Variable::PARAMETER:
+    case Variable::LOCAL: {
+      VisitForValue(decl->fun());
+      Node* value = environment()->Pop();
+      environment()->Bind(variable, value);
+      break;
+    }
+    case Variable::CONTEXT: {
+      VisitForValue(decl->fun());
+      Node* value = environment()->Pop();
+      const Operator* op = javascript()->StoreContext(0, variable->index());
+      NewNode(op, current_context(), value);
+      break;
+    }
+    case Variable::LOOKUP:
+      UNIMPLEMENTED();
+  }
+}
+
+
+void AstGraphBuilder::VisitModuleDeclaration(ModuleDeclaration* decl) {
+  UNREACHABLE();
+}
+
+
+void AstGraphBuilder::VisitImportDeclaration(ImportDeclaration* decl) {
+  UNREACHABLE();
+}
+
+
+void AstGraphBuilder::VisitExportDeclaration(ExportDeclaration* decl) {
+  UNREACHABLE();
+}
+
+
+void AstGraphBuilder::VisitModuleLiteral(ModuleLiteral* modl) { UNREACHABLE(); }
+
+
+void AstGraphBuilder::VisitModuleVariable(ModuleVariable* modl) {
+  UNREACHABLE();
+}
+
+
+void AstGraphBuilder::VisitModulePath(ModulePath* modl) { UNREACHABLE(); }
+
+
+void AstGraphBuilder::VisitModuleUrl(ModuleUrl* modl) { UNREACHABLE(); }
+
+
+void AstGraphBuilder::VisitBlock(Block* stmt) {
+  BlockBuilder block(this);
+  BreakableScope scope(this, stmt, &block, 0);
+  if (stmt->labels() != NULL) block.BeginBlock();
+  if (stmt->scope() == NULL) {
+    // Visit statements in the same scope, no declarations.
+    VisitStatements(stmt->statements());
+  } else {
+    const Operator* op = javascript()->CreateBlockContext();
+    Node* scope_info = jsgraph()->Constant(stmt->scope()->GetScopeInfo());
+    Node* context = NewNode(op, scope_info, GetFunctionClosure());
+    ContextScope scope(this, stmt->scope(), context);
+
+    // Visit declarations and statements in a block scope.
+    VisitDeclarations(stmt->scope()->declarations());
+    VisitStatements(stmt->statements());
+  }
+  if (stmt->labels() != NULL) block.EndBlock();
+}
+
+
+void AstGraphBuilder::VisitModuleStatement(ModuleStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void AstGraphBuilder::VisitExpressionStatement(ExpressionStatement* stmt) {
+  VisitForEffect(stmt->expression());
+}
+
+
+void AstGraphBuilder::VisitEmptyStatement(EmptyStatement* stmt) {
+  // Do nothing.
+}
+
+
+void AstGraphBuilder::VisitIfStatement(IfStatement* stmt) {
+  IfBuilder compare_if(this);
+  VisitForTest(stmt->condition());
+  Node* condition = environment()->Pop();
+  compare_if.If(condition);
+  compare_if.Then();
+  Visit(stmt->then_statement());
+  compare_if.Else();
+  Visit(stmt->else_statement());
+  compare_if.End();
+}
+
+
+void AstGraphBuilder::VisitContinueStatement(ContinueStatement* stmt) {
+  StructuredGraphBuilder::Environment* env = environment()->CopyAsUnreachable();
+  breakable()->ContinueTarget(stmt->target());
+  set_environment(env);
+}
+
+
+void AstGraphBuilder::VisitBreakStatement(BreakStatement* stmt) {
+  StructuredGraphBuilder::Environment* env = environment()->CopyAsUnreachable();
+  breakable()->BreakTarget(stmt->target());
+  set_environment(env);
+}
+
+
+void AstGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
+  VisitForValue(stmt->expression());
+  Node* result = environment()->Pop();
+  Node* control = NewNode(common()->Return(), result);
+  UpdateControlDependencyToLeaveFunction(control);
+}
+
+
+void AstGraphBuilder::VisitWithStatement(WithStatement* stmt) {
+  VisitForValue(stmt->expression());
+  Node* value = environment()->Pop();
+  const Operator* op = javascript()->CreateWithContext();
+  Node* context = NewNode(op, value, GetFunctionClosure());
+  ContextScope scope(this, stmt->scope(), context);
+  Visit(stmt->statement());
+}
+
+
+void AstGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
+  ZoneList<CaseClause*>* clauses = stmt->cases();
+  SwitchBuilder compare_switch(this, clauses->length());
+  BreakableScope scope(this, stmt, &compare_switch, 0);
+  compare_switch.BeginSwitch();
+  int default_index = -1;
+
+  // Keep the switch value on the stack until a case matches.
+  VisitForValue(stmt->tag());
+  Node* tag = environment()->Top();
+
+  // Iterate over all cases and create nodes for label comparison.
+  for (int i = 0; i < clauses->length(); i++) {
+    CaseClause* clause = clauses->at(i);
+
+    // The default is not a test, remember index.
+    if (clause->is_default()) {
+      default_index = i;
+      continue;
+    }
+
+    // Create nodes to perform label comparison as if via '==='. The switch
+    // value is still on the operand stack while the label is evaluated.
+    VisitForValue(clause->label());
+    Node* label = environment()->Pop();
+    const Operator* op = javascript()->StrictEqual();
+    Node* condition = NewNode(op, tag, label);
+    compare_switch.BeginLabel(i, condition);
+
+    // Discard the switch value at label match.
+    environment()->Pop();
+    compare_switch.EndLabel();
+  }
+
+  // Discard the switch value and mark the default case.
+  environment()->Pop();
+  if (default_index >= 0) {
+    compare_switch.DefaultAt(default_index);
+  }
+
+  // Iterate over all cases and create nodes for case bodies.
+  for (int i = 0; i < clauses->length(); i++) {
+    CaseClause* clause = clauses->at(i);
+    compare_switch.BeginCase(i);
+    VisitStatements(clause->statements());
+    compare_switch.EndCase();
+  }
+
+  compare_switch.EndSwitch();
+}
+
+
+void AstGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
+  LoopBuilder while_loop(this);
+  while_loop.BeginLoop();
+  VisitIterationBody(stmt, &while_loop, 0);
+  while_loop.EndBody();
+  VisitForTest(stmt->cond());
+  Node* condition = environment()->Pop();
+  while_loop.BreakUnless(condition);
+  while_loop.EndLoop();
+}
+
+
+void AstGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
+  LoopBuilder while_loop(this);
+  while_loop.BeginLoop();
+  VisitForTest(stmt->cond());
+  Node* condition = environment()->Pop();
+  while_loop.BreakUnless(condition);
+  VisitIterationBody(stmt, &while_loop, 0);
+  while_loop.EndBody();
+  while_loop.EndLoop();
+}
+
+
+void AstGraphBuilder::VisitForStatement(ForStatement* stmt) {
+  LoopBuilder for_loop(this);
+  VisitIfNotNull(stmt->init());
+  for_loop.BeginLoop();
+  if (stmt->cond() != NULL) {
+    VisitForTest(stmt->cond());
+    Node* condition = environment()->Pop();
+    for_loop.BreakUnless(condition);
+  }
+  VisitIterationBody(stmt, &for_loop, 0);
+  for_loop.EndBody();
+  VisitIfNotNull(stmt->next());
+  for_loop.EndLoop();
+}
+
+
+// TODO(dcarney): this is a big function.  Try to clean up some.
+void AstGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
+  VisitForValue(stmt->subject());
+  Node* obj = environment()->Pop();
+  // Check for undefined or null before entering loop.
+  IfBuilder is_undefined(this);
+  Node* is_undefined_cond =
+      NewNode(javascript()->StrictEqual(), obj, jsgraph()->UndefinedConstant());
+  is_undefined.If(is_undefined_cond);
+  is_undefined.Then();
+  is_undefined.Else();
+  {
+    IfBuilder is_null(this);
+    Node* is_null_cond =
+        NewNode(javascript()->StrictEqual(), obj, jsgraph()->NullConstant());
+    is_null.If(is_null_cond);
+    is_null.Then();
+    is_null.Else();
+    // Convert object to jsobject.
+    // PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
+    obj = NewNode(javascript()->ToObject(), obj);
+    environment()->Push(obj);
+    // TODO(dcarney): should do a fast enum cache check here to skip runtime.
+    environment()->Push(obj);
+    Node* cache_type = ProcessArguments(
+        javascript()->Runtime(Runtime::kGetPropertyNamesFast, 1), 1);
+    // TODO(dcarney): these next runtime calls should be removed in favour of
+    //                a few simplified instructions.
+    environment()->Push(obj);
+    environment()->Push(cache_type);
+    Node* cache_pair =
+        ProcessArguments(javascript()->Runtime(Runtime::kForInInit, 2), 2);
+    // cache_type may have been replaced.
+    Node* cache_array = NewNode(common()->Projection(0), cache_pair);
+    cache_type = NewNode(common()->Projection(1), cache_pair);
+    environment()->Push(cache_type);
+    environment()->Push(cache_array);
+    Node* cache_length = ProcessArguments(
+        javascript()->Runtime(Runtime::kForInCacheArrayLength, 2), 2);
+    {
+      // TODO(dcarney): this check is actually supposed to be for the
+      //                empty enum case only.
+      IfBuilder have_no_properties(this);
+      Node* empty_array_cond = NewNode(javascript()->StrictEqual(),
+                                       cache_length, jsgraph()->ZeroConstant());
+      have_no_properties.If(empty_array_cond);
+      have_no_properties.Then();
+      // Pop obj and skip loop.
+      environment()->Pop();
+      have_no_properties.Else();
+      {
+        // Construct the rest of the environment.
+        environment()->Push(cache_type);
+        environment()->Push(cache_array);
+        environment()->Push(cache_length);
+        environment()->Push(jsgraph()->ZeroConstant());
+        // PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
+        LoopBuilder for_loop(this);
+        for_loop.BeginLoop();
+        // Check loop termination condition.
+        Node* index = environment()->Peek(0);
+        Node* exit_cond =
+            NewNode(javascript()->LessThan(), index, cache_length);
+        // TODO(jarin): provide real bailout id.
+        PrepareFrameState(exit_cond, BailoutId::None());
+        for_loop.BreakUnless(exit_cond);
+        // TODO(dcarney): this runtime call should be a handful of
+        //                simplified instructions that
+        //                basically produce
+        //                    value = array[index]
+        environment()->Push(obj);
+        environment()->Push(cache_array);
+        environment()->Push(cache_type);
+        environment()->Push(index);
+        Node* pair =
+            ProcessArguments(javascript()->Runtime(Runtime::kForInNext, 4), 4);
+        Node* value = NewNode(common()->Projection(0), pair);
+        Node* should_filter = NewNode(common()->Projection(1), pair);
+        environment()->Push(value);
+        {
+          // Test if FILTER_KEY needs to be called.
+          IfBuilder test_should_filter(this);
+          Node* should_filter_cond =
+              NewNode(javascript()->StrictEqual(), should_filter,
+                      jsgraph()->TrueConstant());
+          test_should_filter.If(should_filter_cond);
+          test_should_filter.Then();
+          value = environment()->Pop();
+          Node* builtins = BuildLoadBuiltinsObject();
+          Node* function = BuildLoadObjectField(
+              builtins,
+              JSBuiltinsObject::OffsetOfFunctionWithId(Builtins::FILTER_KEY));
+          // Callee.
+          environment()->Push(function);
+          // Receiver.
+          environment()->Push(obj);
+          // Args.
+          environment()->Push(value);
+          // result is either the string key or Smi(0) indicating the property
+          // is gone.
+          Node* res = ProcessArguments(
+              javascript()->Call(3, NO_CALL_FUNCTION_FLAGS), 3);
+          // TODO(jarin): provide real bailout id.
+          PrepareFrameState(res, BailoutId::None());
+          Node* property_missing = NewNode(javascript()->StrictEqual(), res,
+                                           jsgraph()->ZeroConstant());
+          {
+            IfBuilder is_property_missing(this);
+            is_property_missing.If(property_missing);
+            is_property_missing.Then();
+            // Inc counter and continue.
+            Node* index_inc =
+                NewNode(javascript()->Add(), index, jsgraph()->OneConstant());
+            // TODO(jarin): provide real bailout id.
+            PrepareFrameState(index_inc, BailoutId::None());
+            environment()->Poke(0, index_inc);
+            for_loop.Continue();
+            is_property_missing.Else();
+            is_property_missing.End();
+          }
+          // Replace 'value' in environment.
+          environment()->Push(res);
+          test_should_filter.Else();
+          test_should_filter.End();
+        }
+        value = environment()->Pop();
+        // Bind value and do loop body.
+        VisitForInAssignment(stmt->each(), value);
+        VisitIterationBody(stmt, &for_loop, 5);
+        for_loop.EndBody();
+        // Inc counter and continue.
+        Node* index_inc =
+            NewNode(javascript()->Add(), index, jsgraph()->OneConstant());
+        // TODO(jarin): provide real bailout id.
+        PrepareFrameState(index_inc, BailoutId::None());
+        environment()->Poke(0, index_inc);
+        for_loop.EndLoop();
+        environment()->Drop(5);
+        // PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+      }
+      have_no_properties.End();
+    }
+    is_null.End();
+  }
+  is_undefined.End();
+}
+
+
+void AstGraphBuilder::VisitForOfStatement(ForOfStatement* stmt) {
+  VisitForValue(stmt->subject());
+  environment()->Pop();
+  // TODO(turbofan): create and use loop builder.
+}
+
+
+void AstGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void AstGraphBuilder::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
+  UNREACHABLE();
+}
+
+
+void AstGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
+  // TODO(turbofan): Do we really need a separate reloc-info for this?
+  Node* node = NewNode(javascript()->Runtime(Runtime::kDebugBreak, 0));
+  PrepareFrameState(node, stmt->DebugBreakId());
+}
+
+
+void AstGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
+  Node* context = current_context();
+
+  // Build a new shared function info if we cannot find one in the baseline
+  // code. We also have a stack overflow if the recursive compilation did.
+  Handle<SharedFunctionInfo> shared_info =
+      SearchSharedFunctionInfo(info()->shared_info()->code(), expr);
+  if (shared_info.is_null()) {
+    shared_info = Compiler::BuildFunctionInfo(expr, info()->script(), info());
+    CHECK(!shared_info.is_null());  // TODO(mstarzinger): Set stack overflow?
+  }
+
+  // Create node to instantiate a new closure.
+  Node* info = jsgraph()->Constant(shared_info);
+  Node* pretenure = expr->pretenure() ? jsgraph()->TrueConstant()
+                                      : jsgraph()->FalseConstant();
+  const Operator* op = javascript()->Runtime(Runtime::kNewClosure, 3);
+  Node* value = NewNode(op, context, info, pretenure);
+  ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitClassLiteral(ClassLiteral* expr) {
+  // TODO(arv): Implement.
+  UNREACHABLE();
+}
+
+
+void AstGraphBuilder::VisitNativeFunctionLiteral(NativeFunctionLiteral* expr) {
+  UNREACHABLE();
+}
+
+
+void AstGraphBuilder::VisitConditional(Conditional* expr) {
+  IfBuilder compare_if(this);
+  VisitForTest(expr->condition());
+  Node* condition = environment()->Pop();
+  compare_if.If(condition);
+  compare_if.Then();
+  Visit(expr->then_expression());
+  compare_if.Else();
+  Visit(expr->else_expression());
+  compare_if.End();
+  ast_context()->ReplaceValue();
+}
+
+
+void AstGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
+  Node* value = BuildVariableLoad(expr->var(), expr->id());
+  ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitLiteral(Literal* expr) {
+  Node* value = jsgraph()->Constant(expr->value());
+  ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
+  Node* closure = GetFunctionClosure();
+
+  // Create node to materialize a regular expression literal.
+  Node* literals_array =
+      BuildLoadObjectField(closure, JSFunction::kLiteralsOffset);
+  Node* literal_index = jsgraph()->Constant(expr->literal_index());
+  Node* pattern = jsgraph()->Constant(expr->pattern());
+  Node* flags = jsgraph()->Constant(expr->flags());
+  const Operator* op =
+      javascript()->Runtime(Runtime::kMaterializeRegExpLiteral, 4);
+  Node* literal = NewNode(op, literals_array, literal_index, pattern, flags);
+  ast_context()->ProduceValue(literal);
+}
+
+
+void AstGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
+  Node* closure = GetFunctionClosure();
+
+  // Create node to deep-copy the literal boilerplate.
+  expr->BuildConstantProperties(isolate());
+  Node* literals_array =
+      BuildLoadObjectField(closure, JSFunction::kLiteralsOffset);
+  Node* literal_index = jsgraph()->Constant(expr->literal_index());
+  Node* constants = jsgraph()->Constant(expr->constant_properties());
+  Node* flags = jsgraph()->Constant(expr->ComputeFlags());
+  const Operator* op = javascript()->Runtime(Runtime::kCreateObjectLiteral, 4);
+  Node* literal = NewNode(op, literals_array, literal_index, constants, flags);
+
+  // The object is expected on the operand stack during computation of the
+  // property values and is the value of the entire expression.
+  environment()->Push(literal);
+
+  // Mark all computed expressions that are bound to a key that is shadowed by
+  // a later occurrence of the same key. For the marked expressions, no store
+  // code is emitted.
+  expr->CalculateEmitStore(zone());
+
+  // Create nodes to store computed values into the literal.
+  AccessorTable accessor_table(zone());
+  for (int i = 0; i < expr->properties()->length(); i++) {
+    ObjectLiteral::Property* property = expr->properties()->at(i);
+    if (property->IsCompileTimeValue()) continue;
+
+    Literal* key = property->key();
+    switch (property->kind()) {
+      case ObjectLiteral::Property::CONSTANT:
+        UNREACHABLE();
+      case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+        DCHECK(!CompileTimeValue::IsCompileTimeValue(property->value()));
+      // Fall through.
+      case ObjectLiteral::Property::COMPUTED: {
+        // It is safe to use [[Put]] here because the boilerplate already
+        // contains computed properties with an uninitialized value.
+        if (key->value()->IsInternalizedString()) {
+          if (property->emit_store()) {
+            VisitForValue(property->value());
+            Node* value = environment()->Pop();
+            Unique<Name> name = MakeUnique(key->AsPropertyName());
+            Node* store = NewNode(javascript()->StoreNamed(strict_mode(), name),
+                                  literal, value);
+            PrepareFrameState(store, key->id());
+          } else {
+            VisitForEffect(property->value());
+          }
+          break;
+        }
+        environment()->Push(literal);  // Duplicate receiver.
+        VisitForValue(property->key());
+        VisitForValue(property->value());
+        Node* value = environment()->Pop();
+        Node* key = environment()->Pop();
+        Node* receiver = environment()->Pop();
+        if (property->emit_store()) {
+          Node* strict = jsgraph()->Constant(SLOPPY);
+          const Operator* op = javascript()->Runtime(Runtime::kSetProperty, 4);
+          NewNode(op, receiver, key, value, strict);
+        }
+        break;
+      }
+      case ObjectLiteral::Property::PROTOTYPE: {
+        environment()->Push(literal);  // Duplicate receiver.
+        VisitForValue(property->value());
+        Node* value = environment()->Pop();
+        Node* receiver = environment()->Pop();
+        if (property->emit_store()) {
+          const Operator* op = javascript()->Runtime(Runtime::kSetPrototype, 2);
+          NewNode(op, receiver, value);
+        }
+        break;
+      }
+      case ObjectLiteral::Property::GETTER:
+        accessor_table.lookup(key)->second->getter = property->value();
+        break;
+      case ObjectLiteral::Property::SETTER:
+        accessor_table.lookup(key)->second->setter = property->value();
+        break;
+    }
+  }
+
+  // Create nodes to define accessors, using only a single call to the runtime
+  // for each pair of corresponding getters and setters.
+  for (AccessorTable::Iterator it = accessor_table.begin();
+       it != accessor_table.end(); ++it) {
+    VisitForValue(it->first);
+    VisitForValueOrNull(it->second->getter);
+    VisitForValueOrNull(it->second->setter);
+    Node* setter = environment()->Pop();
+    Node* getter = environment()->Pop();
+    Node* name = environment()->Pop();
+    Node* attr = jsgraph()->Constant(NONE);
+    const Operator* op =
+        javascript()->Runtime(Runtime::kDefineAccessorPropertyUnchecked, 5);
+    Node* call = NewNode(op, literal, name, getter, setter, attr);
+    PrepareFrameState(call, it->first->id());
+  }
+
+  // Transform literals that contain functions to fast properties.
+  if (expr->has_function()) {
+    const Operator* op = javascript()->Runtime(Runtime::kToFastProperties, 1);
+    NewNode(op, literal);
+  }
+
+  ast_context()->ProduceValue(environment()->Pop());
+}
+
+
+void AstGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
+  Node* closure = GetFunctionClosure();
+
+  // Create node to deep-copy the literal boilerplate.
+  expr->BuildConstantElements(isolate());
+  Node* literals_array =
+      BuildLoadObjectField(closure, JSFunction::kLiteralsOffset);
+  Node* literal_index = jsgraph()->Constant(expr->literal_index());
+  Node* constants = jsgraph()->Constant(expr->constant_elements());
+  Node* flags = jsgraph()->Constant(expr->ComputeFlags());
+  const Operator* op = javascript()->Runtime(Runtime::kCreateArrayLiteral, 4);
+  Node* literal = NewNode(op, literals_array, literal_index, constants, flags);
+
+  // The array and the literal index are both expected on the operand stack
+  // during computation of the element values.
+  environment()->Push(literal);
+  environment()->Push(literal_index);
+
+  // Create nodes to evaluate all the non-constant subexpressions and to store
+  // them into the newly cloned array.
+  for (int i = 0; i < expr->values()->length(); i++) {
+    Expression* subexpr = expr->values()->at(i);
+    if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
+
+    VisitForValue(subexpr);
+    Node* value = environment()->Pop();
+    Node* index = jsgraph()->Constant(i);
+    Node* store = NewNode(javascript()->StoreProperty(strict_mode()), literal,
+                          index, value);
+    PrepareFrameState(store, expr->GetIdForElement(i));
+  }
+
+  environment()->Pop();  // Array literal index.
+  ast_context()->ProduceValue(environment()->Pop());
+}
+
+
+void AstGraphBuilder::VisitForInAssignment(Expression* expr, Node* value) {
+  DCHECK(expr->IsValidReferenceExpression());
+
+  // Left-hand side can only be a property, a global or a variable slot.
+  Property* property = expr->AsProperty();
+  LhsKind assign_type = DetermineLhsKind(expr);
+
+  // Evaluate LHS expression and store the value.
+  switch (assign_type) {
+    case VARIABLE: {
+      Variable* var = expr->AsVariableProxy()->var();
+      // TODO(jarin) Fill in the correct bailout id.
+      BuildVariableAssignment(var, value, Token::ASSIGN, BailoutId::None());
+      break;
+    }
+    case NAMED_PROPERTY: {
+      environment()->Push(value);
+      VisitForValue(property->obj());
+      Node* object = environment()->Pop();
+      value = environment()->Pop();
+      Unique<Name> name =
+          MakeUnique(property->key()->AsLiteral()->AsPropertyName());
+      Node* store =
+          NewNode(javascript()->StoreNamed(strict_mode(), name), object, value);
+      // TODO(jarin) Fill in the correct bailout id.
+      PrepareFrameState(store, BailoutId::None());
+      break;
+    }
+    case KEYED_PROPERTY: {
+      environment()->Push(value);
+      VisitForValue(property->obj());
+      VisitForValue(property->key());
+      Node* key = environment()->Pop();
+      Node* object = environment()->Pop();
+      value = environment()->Pop();
+      Node* store = NewNode(javascript()->StoreProperty(strict_mode()), object,
+                            key, value);
+      // TODO(jarin) Fill in the correct bailout id.
+      PrepareFrameState(store, BailoutId::None());
+      break;
+    }
+  }
+}
+
+
+void AstGraphBuilder::VisitAssignment(Assignment* expr) {
+  DCHECK(expr->target()->IsValidReferenceExpression());
+
+  // Left-hand side can only be a property, a global or a variable slot.
+  Property* property = expr->target()->AsProperty();
+  LhsKind assign_type = DetermineLhsKind(expr->target());
+
+  // Evaluate LHS expression.
+  switch (assign_type) {
+    case VARIABLE:
+      // Nothing to do here.
+      break;
+    case NAMED_PROPERTY:
+      VisitForValue(property->obj());
+      break;
+    case KEYED_PROPERTY: {
+      VisitForValue(property->obj());
+      VisitForValue(property->key());
+      break;
+    }
+  }
+
+  // Evaluate the value and potentially handle compound assignments by loading
+  // the left-hand side value and performing a binary operation.
+  if (expr->is_compound()) {
+    Node* old_value = NULL;
+    switch (assign_type) {
+      case VARIABLE: {
+        Variable* variable = expr->target()->AsVariableProxy()->var();
+        old_value = BuildVariableLoad(variable, expr->target()->id());
+        break;
+      }
+      case NAMED_PROPERTY: {
+        Node* object = environment()->Top();
+        Unique<Name> name =
+            MakeUnique(property->key()->AsLiteral()->AsPropertyName());
+        old_value = NewNode(javascript()->LoadNamed(name), object);
+        PrepareFrameState(old_value, property->LoadId(), kPushOutput);
+        break;
+      }
+      case KEYED_PROPERTY: {
+        Node* key = environment()->Top();
+        Node* object = environment()->Peek(1);
+        old_value = NewNode(javascript()->LoadProperty(), object, key);
+        PrepareFrameState(old_value, property->LoadId(), kPushOutput);
+        break;
+      }
+    }
+    environment()->Push(old_value);
+    VisitForValue(expr->value());
+    Node* right = environment()->Pop();
+    Node* left = environment()->Pop();
+    Node* value = BuildBinaryOp(left, right, expr->binary_op());
+    PrepareFrameState(value, expr->binary_operation()->id(), kPushOutput);
+    environment()->Push(value);
+  } else {
+    VisitForValue(expr->value());
+  }
+
+  // Store the value.
+  Node* value = environment()->Pop();
+  switch (assign_type) {
+    case VARIABLE: {
+      Variable* variable = expr->target()->AsVariableProxy()->var();
+      BuildVariableAssignment(variable, value, expr->op(),
+                              expr->AssignmentId());
+      break;
+    }
+    case NAMED_PROPERTY: {
+      Node* object = environment()->Pop();
+      Unique<Name> name =
+          MakeUnique(property->key()->AsLiteral()->AsPropertyName());
+      Node* store =
+          NewNode(javascript()->StoreNamed(strict_mode(), name), object, value);
+      PrepareFrameState(store, expr->AssignmentId());
+      break;
+    }
+    case KEYED_PROPERTY: {
+      Node* key = environment()->Pop();
+      Node* object = environment()->Pop();
+      Node* store = NewNode(javascript()->StoreProperty(strict_mode()), object,
+                            key, value);
+      PrepareFrameState(store, expr->AssignmentId());
+      break;
+    }
+  }
+
+  ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitYield(Yield* expr) {
+  VisitForValue(expr->generator_object());
+  VisitForValue(expr->expression());
+  environment()->Pop();
+  environment()->Pop();
+  // TODO(turbofan): VisitYield
+  ast_context()->ProduceValue(jsgraph()->UndefinedConstant());
+}
+
+
+void AstGraphBuilder::VisitThrow(Throw* expr) {
+  VisitForValue(expr->exception());
+  Node* exception = environment()->Pop();
+  const Operator* op = javascript()->Runtime(Runtime::kThrow, 1);
+  Node* value = NewNode(op, exception);
+  ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitProperty(Property* expr) {
+  Node* value;
+  if (expr->key()->IsPropertyName()) {
+    VisitForValue(expr->obj());
+    Node* object = environment()->Pop();
+    Unique<Name> name = MakeUnique(expr->key()->AsLiteral()->AsPropertyName());
+    value = NewNode(javascript()->LoadNamed(name), object);
+  } else {
+    VisitForValue(expr->obj());
+    VisitForValue(expr->key());
+    Node* key = environment()->Pop();
+    Node* object = environment()->Pop();
+    value = NewNode(javascript()->LoadProperty(), object, key);
+  }
+  PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
+  ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitCall(Call* expr) {
+  Expression* callee = expr->expression();
+  Call::CallType call_type = expr->GetCallType(isolate());
+
+  // Prepare the callee and the receiver to the function call. This depends on
+  // the semantics of the underlying call type.
+  CallFunctionFlags flags = NO_CALL_FUNCTION_FLAGS;
+  Node* receiver_value = NULL;
+  Node* callee_value = NULL;
+  bool possibly_eval = false;
+  switch (call_type) {
+    case Call::GLOBAL_CALL: {
+      Variable* variable = callee->AsVariableProxy()->var();
+      callee_value = BuildVariableLoad(variable, expr->expression()->id());
+      receiver_value = jsgraph()->UndefinedConstant();
+      break;
+    }
+    case Call::LOOKUP_SLOT_CALL: {
+      Variable* variable = callee->AsVariableProxy()->var();
+      DCHECK(variable->location() == Variable::LOOKUP);
+      Node* name = jsgraph()->Constant(variable->name());
+      const Operator* op = javascript()->Runtime(Runtime::kLoadLookupSlot, 2);
+      Node* pair = NewNode(op, current_context(), name);
+      callee_value = NewNode(common()->Projection(0), pair);
+      receiver_value = NewNode(common()->Projection(1), pair);
+      break;
+    }
+    case Call::PROPERTY_CALL: {
+      Property* property = callee->AsProperty();
+      VisitForValue(property->obj());
+      Node* object = environment()->Top();
+      if (property->key()->IsPropertyName()) {
+        Unique<Name> name =
+            MakeUnique(property->key()->AsLiteral()->AsPropertyName());
+        callee_value = NewNode(javascript()->LoadNamed(name), object);
+      } else {
+        VisitForValue(property->key());
+        Node* key = environment()->Pop();
+        callee_value = NewNode(javascript()->LoadProperty(), object, key);
+      }
+      PrepareFrameState(callee_value, property->LoadId(), kPushOutput);
+      receiver_value = environment()->Pop();
+      // Note that a PROPERTY_CALL requires the receiver to be wrapped into an
+      // object for sloppy callees. This could also be modeled explicitly here,
+      // thereby obsoleting the need for a flag to the call operator.
+      flags = CALL_AS_METHOD;
+      break;
+    }
+    case Call::POSSIBLY_EVAL_CALL:
+      possibly_eval = true;
+    // Fall through.
+    case Call::OTHER_CALL:
+      VisitForValue(callee);
+      callee_value = environment()->Pop();
+      receiver_value = jsgraph()->UndefinedConstant();
+      break;
+  }
+
+  // The callee and the receiver both have to be pushed onto the operand stack
+  // before arguments are being evaluated.
+  environment()->Push(callee_value);
+  environment()->Push(receiver_value);
+
+  // Evaluate all arguments to the function call,
+  ZoneList<Expression*>* args = expr->arguments();
+  VisitForValues(args);
+
+  // Resolve callee and receiver for a potential direct eval call. This block
+  // will mutate the callee and receiver values pushed onto the environment.
+  if (possibly_eval && args->length() > 0) {
+    int arg_count = args->length();
+
+    // Extract callee and source string from the environment.
+    Node* callee = environment()->Peek(arg_count + 1);
+    Node* source = environment()->Peek(arg_count - 1);
+
+    // Create node to ask for help resolving potential eval call. This will
+    // provide a fully resolved callee and the corresponding receiver.
+    Node* function = GetFunctionClosure();
+    Node* receiver = environment()->Lookup(info()->scope()->receiver());
+    Node* strict = jsgraph()->Constant(strict_mode());
+    Node* position = jsgraph()->Constant(info()->scope()->start_position());
+    const Operator* op =
+        javascript()->Runtime(Runtime::kResolvePossiblyDirectEval, 6);
+    Node* pair =
+        NewNode(op, callee, source, function, receiver, strict, position);
+    Node* new_callee = NewNode(common()->Projection(0), pair);
+    Node* new_receiver = NewNode(common()->Projection(1), pair);
+
+    // Patch callee and receiver on the environment.
+    environment()->Poke(arg_count + 1, new_callee);
+    environment()->Poke(arg_count + 0, new_receiver);
+  }
+
+  // Create node to perform the function call.
+  const Operator* call = javascript()->Call(args->length() + 2, flags);
+  Node* value = ProcessArguments(call, args->length() + 2);
+  PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
+  ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitCallNew(CallNew* expr) {
+  VisitForValue(expr->expression());
+
+  // Evaluate all arguments to the construct call.
+  ZoneList<Expression*>* args = expr->arguments();
+  VisitForValues(args);
+
+  // Create node to perform the construct call.
+  const Operator* call = javascript()->CallNew(args->length() + 1);
+  Node* value = ProcessArguments(call, args->length() + 1);
+  PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
+  ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitCallJSRuntime(CallRuntime* expr) {
+  Handle<String> name = expr->name();
+
+  // The callee and the receiver both have to be pushed onto the operand stack
+  // before arguments are being evaluated.
+  CallFunctionFlags flags = NO_CALL_FUNCTION_FLAGS;
+  Node* receiver_value = BuildLoadBuiltinsObject();
+  Unique<String> unique = MakeUnique(name);
+  Node* callee_value = NewNode(javascript()->LoadNamed(unique), receiver_value);
+  // TODO(jarin): Find/create a bailout id to deoptimize to (crankshaft
+  // refuses to optimize functions with jsruntime calls).
+  PrepareFrameState(callee_value, BailoutId::None(), kPushOutput);
+  environment()->Push(callee_value);
+  environment()->Push(receiver_value);
+
+  // Evaluate all arguments to the JS runtime call.
+  ZoneList<Expression*>* args = expr->arguments();
+  VisitForValues(args);
+
+  // Create node to perform the JS runtime call.
+  const Operator* call = javascript()->Call(args->length() + 2, flags);
+  Node* value = ProcessArguments(call, args->length() + 2);
+  PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
+  ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
+  const Runtime::Function* function = expr->function();
+
+  // Handle calls to runtime functions implemented in JavaScript separately as
+  // the call follows JavaScript ABI and the callee is statically unknown.
+  if (expr->is_jsruntime()) {
+    DCHECK(function == NULL && expr->name()->length() > 0);
+    return VisitCallJSRuntime(expr);
+  }
+
+  // Evaluate all arguments to the runtime call.
+  ZoneList<Expression*>* args = expr->arguments();
+  VisitForValues(args);
+
+  // Create node to perform the runtime call.
+  Runtime::FunctionId functionId = function->function_id;
+  const Operator* call = javascript()->Runtime(functionId, args->length());
+  Node* value = ProcessArguments(call, args->length());
+  PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
+  ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) {
+  switch (expr->op()) {
+    case Token::DELETE:
+      return VisitDelete(expr);
+    case Token::VOID:
+      return VisitVoid(expr);
+    case Token::TYPEOF:
+      return VisitTypeof(expr);
+    case Token::NOT:
+      return VisitNot(expr);
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void AstGraphBuilder::VisitCountOperation(CountOperation* expr) {
+  DCHECK(expr->expression()->IsValidReferenceExpression());
+
+  // Left-hand side can only be a property, a global or a variable slot.
+  Property* property = expr->expression()->AsProperty();
+  LhsKind assign_type = DetermineLhsKind(expr->expression());
+
+  // Reserve space for result of postfix operation.
+  bool is_postfix = expr->is_postfix() && !ast_context()->IsEffect();
+  if (is_postfix) environment()->Push(jsgraph()->UndefinedConstant());
+
+  // Evaluate LHS expression and get old value.
+  Node* old_value = NULL;
+  int stack_depth = -1;
+  switch (assign_type) {
+    case VARIABLE: {
+      Variable* variable = expr->expression()->AsVariableProxy()->var();
+      old_value = BuildVariableLoad(variable, expr->expression()->id());
+      stack_depth = 0;
+      break;
+    }
+    case NAMED_PROPERTY: {
+      VisitForValue(property->obj());
+      Node* object = environment()->Top();
+      Unique<Name> name =
+          MakeUnique(property->key()->AsLiteral()->AsPropertyName());
+      old_value = NewNode(javascript()->LoadNamed(name), object);
+      PrepareFrameState(old_value, property->LoadId(), kPushOutput);
+      stack_depth = 1;
+      break;
+    }
+    case KEYED_PROPERTY: {
+      VisitForValue(property->obj());
+      VisitForValue(property->key());
+      Node* key = environment()->Top();
+      Node* object = environment()->Peek(1);
+      old_value = NewNode(javascript()->LoadProperty(), object, key);
+      PrepareFrameState(old_value, property->LoadId(), kPushOutput);
+      stack_depth = 2;
+      break;
+    }
+  }
+
+  // Convert old value into a number.
+  old_value = NewNode(javascript()->ToNumber(), old_value);
+
+  // Save result for postfix expressions at correct stack depth.
+  if (is_postfix) environment()->Poke(stack_depth, old_value);
+
+  // Create node to perform +1/-1 operation.
+  Node* value =
+      BuildBinaryOp(old_value, jsgraph()->OneConstant(), expr->binary_op());
+  // TODO(jarin) Insert proper bailout id here (will need to change
+  // full code generator).
+  PrepareFrameState(value, BailoutId::None());
+
+  // Store the value.
+  switch (assign_type) {
+    case VARIABLE: {
+      Variable* variable = expr->expression()->AsVariableProxy()->var();
+      environment()->Push(value);
+      BuildVariableAssignment(variable, value, expr->op(),
+                              expr->AssignmentId());
+      environment()->Pop();
+      break;
+    }
+    case NAMED_PROPERTY: {
+      Node* object = environment()->Pop();
+      Unique<Name> name =
+          MakeUnique(property->key()->AsLiteral()->AsPropertyName());
+      Node* store =
+          NewNode(javascript()->StoreNamed(strict_mode(), name), object, value);
+      environment()->Push(value);
+      PrepareFrameState(store, expr->AssignmentId());
+      environment()->Pop();
+      break;
+    }
+    case KEYED_PROPERTY: {
+      Node* key = environment()->Pop();
+      Node* object = environment()->Pop();
+      Node* store = NewNode(javascript()->StoreProperty(strict_mode()), object,
+                            key, value);
+      environment()->Push(value);
+      PrepareFrameState(store, expr->AssignmentId());
+      environment()->Pop();
+      break;
+    }
+  }
+
+  // Restore old value for postfix expressions.
+  if (is_postfix) value = environment()->Pop();
+
+  ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) {
+  switch (expr->op()) {
+    case Token::COMMA:
+      return VisitComma(expr);
+    case Token::OR:
+    case Token::AND:
+      return VisitLogicalExpression(expr);
+    default: {
+      VisitForValue(expr->left());
+      VisitForValue(expr->right());
+      Node* right = environment()->Pop();
+      Node* left = environment()->Pop();
+      Node* value = BuildBinaryOp(left, right, expr->op());
+      PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
+      ast_context()->ProduceValue(value);
+    }
+  }
+}
+
+
+void AstGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
+  const Operator* op;
+  switch (expr->op()) {
+    case Token::EQ:
+      op = javascript()->Equal();
+      break;
+    case Token::NE:
+      op = javascript()->NotEqual();
+      break;
+    case Token::EQ_STRICT:
+      op = javascript()->StrictEqual();
+      break;
+    case Token::NE_STRICT:
+      op = javascript()->StrictNotEqual();
+      break;
+    case Token::LT:
+      op = javascript()->LessThan();
+      break;
+    case Token::GT:
+      op = javascript()->GreaterThan();
+      break;
+    case Token::LTE:
+      op = javascript()->LessThanOrEqual();
+      break;
+    case Token::GTE:
+      op = javascript()->GreaterThanOrEqual();
+      break;
+    case Token::INSTANCEOF:
+      op = javascript()->InstanceOf();
+      break;
+    case Token::IN:
+      op = javascript()->HasProperty();
+      break;
+    default:
+      op = NULL;
+      UNREACHABLE();
+  }
+  VisitForValue(expr->left());
+  VisitForValue(expr->right());
+  Node* right = environment()->Pop();
+  Node* left = environment()->Pop();
+  Node* value = NewNode(op, left, right);
+  PrepareFrameState(value, expr->id(), ast_context()->GetStateCombine());
+  ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitThisFunction(ThisFunction* expr) {
+  Node* value = GetFunctionClosure();
+  ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitSuperReference(SuperReference* expr) {
+  UNREACHABLE();
+}
+
+
+void AstGraphBuilder::VisitCaseClause(CaseClause* expr) { UNREACHABLE(); }
+
+
+void AstGraphBuilder::VisitDeclarations(ZoneList<Declaration*>* declarations) {
+  DCHECK(globals()->is_empty());
+  AstVisitor::VisitDeclarations(declarations);
+  if (globals()->is_empty()) return;
+  Handle<FixedArray> data =
+      isolate()->factory()->NewFixedArray(globals()->length(), TENURED);
+  for (int i = 0; i < globals()->length(); ++i) data->set(i, *globals()->at(i));
+  int encoded_flags = DeclareGlobalsEvalFlag::encode(info()->is_eval()) |
+                      DeclareGlobalsNativeFlag::encode(info()->is_native()) |
+                      DeclareGlobalsStrictMode::encode(strict_mode());
+  Node* flags = jsgraph()->Constant(encoded_flags);
+  Node* pairs = jsgraph()->Constant(data);
+  const Operator* op = javascript()->Runtime(Runtime::kDeclareGlobals, 3);
+  NewNode(op, current_context(), pairs, flags);
+  globals()->Rewind(0);
+}
+
+
+void AstGraphBuilder::VisitIfNotNull(Statement* stmt) {
+  if (stmt == NULL) return;
+  Visit(stmt);
+}
+
+
+void AstGraphBuilder::VisitIterationBody(IterationStatement* stmt,
+                                         LoopBuilder* loop, int drop_extra) {
+  BreakableScope scope(this, stmt, loop, drop_extra);
+  Visit(stmt->body());
+}
+
+
+void AstGraphBuilder::VisitDelete(UnaryOperation* expr) {
+  Node* value;
+  if (expr->expression()->IsVariableProxy()) {
+    // Delete of an unqualified identifier is only allowed in classic mode but
+    // deleting "this" is allowed in all language modes.
+    Variable* variable = expr->expression()->AsVariableProxy()->var();
+    DCHECK(strict_mode() == SLOPPY || variable->is_this());
+    value = BuildVariableDelete(variable);
+  } else if (expr->expression()->IsProperty()) {
+    Property* property = expr->expression()->AsProperty();
+    VisitForValue(property->obj());
+    VisitForValue(property->key());
+    Node* key = environment()->Pop();
+    Node* object = environment()->Pop();
+    value = NewNode(javascript()->DeleteProperty(strict_mode()), object, key);
+  } else {
+    VisitForEffect(expr->expression());
+    value = jsgraph()->TrueConstant();
+  }
+  ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitVoid(UnaryOperation* expr) {
+  VisitForEffect(expr->expression());
+  Node* value = jsgraph()->UndefinedConstant();
+  ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitTypeof(UnaryOperation* expr) {
+  Node* operand;
+  if (expr->expression()->IsVariableProxy()) {
+    // Typeof does not throw a reference error on global variables, hence we
+    // perform a non-contextual load in case the operand is a variable proxy.
+    Variable* variable = expr->expression()->AsVariableProxy()->var();
+    operand =
+        BuildVariableLoad(variable, expr->expression()->id(), NOT_CONTEXTUAL);
+  } else {
+    VisitForValue(expr->expression());
+    operand = environment()->Pop();
+  }
+  Node* value = NewNode(javascript()->TypeOf(), operand);
+  ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitNot(UnaryOperation* expr) {
+  VisitForValue(expr->expression());
+  Node* operand = environment()->Pop();
+  // TODO(mstarzinger): Possible optimization when we are in effect context.
+  Node* value = NewNode(javascript()->UnaryNot(), operand);
+  ast_context()->ProduceValue(value);
+}
+
+
+void AstGraphBuilder::VisitComma(BinaryOperation* expr) {
+  VisitForEffect(expr->left());
+  Visit(expr->right());
+  ast_context()->ReplaceValue();
+}
+
+
+void AstGraphBuilder::VisitLogicalExpression(BinaryOperation* expr) {
+  bool is_logical_and = expr->op() == Token::AND;
+  IfBuilder compare_if(this);
+  VisitForValue(expr->left());
+  Node* condition = environment()->Top();
+  compare_if.If(BuildToBoolean(condition));
+  compare_if.Then();
+  if (is_logical_and) {
+    environment()->Pop();
+    Visit(expr->right());
+  } else if (ast_context()->IsEffect()) {
+    environment()->Pop();
+  }
+  compare_if.Else();
+  if (!is_logical_and) {
+    environment()->Pop();
+    Visit(expr->right());
+  } else if (ast_context()->IsEffect()) {
+    environment()->Pop();
+  }
+  compare_if.End();
+  ast_context()->ReplaceValue();
+}
+
+
+Node* AstGraphBuilder::ProcessArguments(const Operator* op, int arity) {
+  DCHECK(environment()->stack_height() >= arity);
+  Node** all = info()->zone()->NewArray<Node*>(arity);
+  for (int i = arity - 1; i >= 0; --i) {
+    all[i] = environment()->Pop();
+  }
+  Node* value = NewNode(op, arity, all);
+  return value;
+}
+
+
+Node* AstGraphBuilder::BuildLocalFunctionContext(Node* context, Node* closure) {
+  int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+  if (heap_slots <= 0) return context;
+  set_current_context(context);
+
+  // Allocate a new local context.
+  const Operator* op = javascript()->CreateFunctionContext();
+  Node* local_context = NewNode(op, closure);
+  set_current_context(local_context);
+
+  // Copy parameters into context if necessary.
+  int num_parameters = info()->scope()->num_parameters();
+  for (int i = 0; i < num_parameters; i++) {
+    Variable* variable = info()->scope()->parameter(i);
+    if (!variable->IsContextSlot()) continue;
+    // Temporary parameter node. The parameter indices are shifted by 1
+    // (receiver is parameter index -1 but environment index 0).
+    Node* parameter = NewNode(common()->Parameter(i + 1), graph()->start());
+    // Context variable (at bottom of the context chain).
+    DCHECK_EQ(0, info()->scope()->ContextChainLength(variable->scope()));
+    const Operator* op = javascript()->StoreContext(0, variable->index());
+    NewNode(op, local_context, parameter);
+  }
+
+  return local_context;
+}
+
+
+Node* AstGraphBuilder::BuildArgumentsObject(Variable* arguments) {
+  if (arguments == NULL) return NULL;
+
+  // Allocate and initialize a new arguments object.
+  Node* callee = GetFunctionClosure();
+  const Operator* op = javascript()->Runtime(Runtime::kNewArguments, 1);
+  Node* object = NewNode(op, callee);
+
+  // Assign the object to the arguments variable.
+  DCHECK(arguments->IsContextSlot() || arguments->IsStackAllocated());
+  // This should never lazy deopt, so it is fine to send invalid bailout id.
+  BuildVariableAssignment(arguments, object, Token::ASSIGN, BailoutId::None());
+
+  return object;
+}
+
+
+Node* AstGraphBuilder::BuildHoleCheckSilent(Node* value, Node* for_hole,
+                                            Node* not_hole) {
+  IfBuilder hole_check(this);
+  Node* the_hole = jsgraph()->TheHoleConstant();
+  Node* check = NewNode(javascript()->StrictEqual(), value, the_hole);
+  hole_check.If(check);
+  hole_check.Then();
+  environment()->Push(for_hole);
+  hole_check.Else();
+  environment()->Push(not_hole);
+  hole_check.End();
+  return environment()->Pop();
+}
+
+
+Node* AstGraphBuilder::BuildHoleCheckThrow(Node* value, Variable* variable,
+                                           Node* not_hole) {
+  IfBuilder hole_check(this);
+  Node* the_hole = jsgraph()->TheHoleConstant();
+  Node* check = NewNode(javascript()->StrictEqual(), value, the_hole);
+  hole_check.If(check);
+  hole_check.Then();
+  environment()->Push(BuildThrowReferenceError(variable));
+  hole_check.Else();
+  environment()->Push(not_hole);
+  hole_check.End();
+  return environment()->Pop();
+}
+
+
+Node* AstGraphBuilder::BuildVariableLoad(Variable* variable,
+                                         BailoutId bailout_id,
+                                         ContextualMode contextual_mode) {
+  Node* the_hole = jsgraph()->TheHoleConstant();
+  VariableMode mode = variable->mode();
+  switch (variable->location()) {
+    case Variable::UNALLOCATED: {
+      // Global var, const, or let variable.
+      Node* global = BuildLoadGlobalObject();
+      Unique<Name> name = MakeUnique(variable->name());
+      const Operator* op = javascript()->LoadNamed(name, contextual_mode);
+      Node* node = NewNode(op, global);
+      PrepareFrameState(node, bailout_id, kPushOutput);
+      return node;
+    }
+    case Variable::PARAMETER:
+    case Variable::LOCAL: {
+      // Local var, const, or let variable.
+      Node* value = environment()->Lookup(variable);
+      if (mode == CONST_LEGACY) {
+        // Perform check for uninitialized legacy const variables.
+        if (value->op() == the_hole->op()) {
+          value = jsgraph()->UndefinedConstant();
+        } else if (value->opcode() == IrOpcode::kPhi) {
+          Node* undefined = jsgraph()->UndefinedConstant();
+          value = BuildHoleCheckSilent(value, undefined, value);
+        }
+      } else if (mode == LET || mode == CONST) {
+        // Perform check for uninitialized let/const variables.
+        if (value->op() == the_hole->op()) {
+          value = BuildThrowReferenceError(variable);
+        } else if (value->opcode() == IrOpcode::kPhi) {
+          value = BuildHoleCheckThrow(value, variable, value);
+        }
+      }
+      return value;
+    }
+    case Variable::CONTEXT: {
+      // Context variable (potentially up the context chain).
+      int depth = current_scope()->ContextChainLength(variable->scope());
+      bool immutable = variable->maybe_assigned() == kNotAssigned;
+      const Operator* op =
+          javascript()->LoadContext(depth, variable->index(), immutable);
+      Node* value = NewNode(op, current_context());
+      // TODO(titzer): initialization checks are redundant for already
+      // initialized immutable context loads, but only specialization knows.
+      // Maybe specializer should be a parameter to the graph builder?
+      if (mode == CONST_LEGACY) {
+        // Perform check for uninitialized legacy const variables.
+        Node* undefined = jsgraph()->UndefinedConstant();
+        value = BuildHoleCheckSilent(value, undefined, value);
+      } else if (mode == LET || mode == CONST) {
+        // Perform check for uninitialized let/const variables.
+        value = BuildHoleCheckThrow(value, variable, value);
+      }
+      return value;
+    }
+    case Variable::LOOKUP: {
+      // Dynamic lookup of context variable (anywhere in the chain).
+      Node* name = jsgraph()->Constant(variable->name());
+      Runtime::FunctionId function_id =
+          (contextual_mode == CONTEXTUAL)
+              ? Runtime::kLoadLookupSlot
+              : Runtime::kLoadLookupSlotNoReferenceError;
+      const Operator* op = javascript()->Runtime(function_id, 2);
+      Node* pair = NewNode(op, current_context(), name);
+      return NewNode(common()->Projection(0), pair);
+    }
+  }
+  UNREACHABLE();
+  return NULL;
+}
+
+
+Node* AstGraphBuilder::BuildVariableDelete(Variable* variable) {
+  switch (variable->location()) {
+    case Variable::UNALLOCATED: {
+      // Global var, const, or let variable.
+      Node* global = BuildLoadGlobalObject();
+      Node* name = jsgraph()->Constant(variable->name());
+      const Operator* op = javascript()->DeleteProperty(strict_mode());
+      return NewNode(op, global, name);
+    }
+    case Variable::PARAMETER:
+    case Variable::LOCAL:
+    case Variable::CONTEXT:
+      // Local var, const, or let variable or context variable.
+      return variable->is_this() ? jsgraph()->TrueConstant()
+                                 : jsgraph()->FalseConstant();
+    case Variable::LOOKUP: {
+      // Dynamic lookup of context variable (anywhere in the chain).
+      Node* name = jsgraph()->Constant(variable->name());
+      const Operator* op = javascript()->Runtime(Runtime::kDeleteLookupSlot, 2);
+      return NewNode(op, current_context(), name);
+    }
+  }
+  UNREACHABLE();
+  return NULL;
+}
+
+
+Node* AstGraphBuilder::BuildVariableAssignment(Variable* variable, Node* value,
+                                               Token::Value op,
+                                               BailoutId bailout_id) {
+  Node* the_hole = jsgraph()->TheHoleConstant();
+  VariableMode mode = variable->mode();
+  switch (variable->location()) {
+    case Variable::UNALLOCATED: {
+      // Global var, const, or let variable.
+      Node* global = BuildLoadGlobalObject();
+      Unique<Name> name = MakeUnique(variable->name());
+      const Operator* op = javascript()->StoreNamed(strict_mode(), name);
+      Node* store = NewNode(op, global, value);
+      PrepareFrameState(store, bailout_id);
+      return store;
+    }
+    case Variable::PARAMETER:
+    case Variable::LOCAL:
+      // Local var, const, or let variable.
+      if (mode == CONST_LEGACY && op == Token::INIT_CONST_LEGACY) {
+        // Perform an initialization check for legacy const variables.
+        Node* current = environment()->Lookup(variable);
+        if (current->op() != the_hole->op()) {
+          value = BuildHoleCheckSilent(current, value, current);
+        }
+      } else if (mode == CONST_LEGACY && op != Token::INIT_CONST_LEGACY) {
+        // Non-initializing assignments to legacy const is ignored.
+        return value;
+      } else if (mode == LET && op != Token::INIT_LET) {
+        // Perform an initialization check for let declared variables.
+        // Also note that the dynamic hole-check is only done to ensure that
+        // this does not break in the presence of do-expressions within the
+        // temporal dead zone of a let declared variable.
+        Node* current = environment()->Lookup(variable);
+        if (current->op() == the_hole->op()) {
+          value = BuildThrowReferenceError(variable);
+        } else if (value->opcode() == IrOpcode::kPhi) {
+          value = BuildHoleCheckThrow(current, variable, value);
+        }
+      } else if (mode == CONST && op != Token::INIT_CONST) {
+        // All assignments to const variables are early errors.
+        UNREACHABLE();
+      }
+      environment()->Bind(variable, value);
+      return value;
+    case Variable::CONTEXT: {
+      // Context variable (potentially up the context chain).
+      int depth = current_scope()->ContextChainLength(variable->scope());
+      if (mode == CONST_LEGACY && op == Token::INIT_CONST_LEGACY) {
+        // Perform an initialization check for legacy const variables.
+        const Operator* op =
+            javascript()->LoadContext(depth, variable->index(), false);
+        Node* current = NewNode(op, current_context());
+        value = BuildHoleCheckSilent(current, value, current);
+      } else if (mode == CONST_LEGACY && op != Token::INIT_CONST_LEGACY) {
+        // Non-initializing assignments to legacy const is ignored.
+        return value;
+      } else if (mode == LET && op != Token::INIT_LET) {
+        // Perform an initialization check for let declared variables.
+        const Operator* op =
+            javascript()->LoadContext(depth, variable->index(), false);
+        Node* current = NewNode(op, current_context());
+        value = BuildHoleCheckThrow(current, variable, value);
+      } else if (mode == CONST && op != Token::INIT_CONST) {
+        // All assignments to const variables are early errors.
+        UNREACHABLE();
+      }
+      const Operator* op = javascript()->StoreContext(depth, variable->index());
+      return NewNode(op, current_context(), value);
+    }
+    case Variable::LOOKUP: {
+      // Dynamic lookup of context variable (anywhere in the chain).
+      Node* name = jsgraph()->Constant(variable->name());
+      Node* strict = jsgraph()->Constant(strict_mode());
+      // TODO(mstarzinger): Use Runtime::kInitializeLegacyConstLookupSlot for
+      // initializations of const declarations.
+      const Operator* op = javascript()->Runtime(Runtime::kStoreLookupSlot, 4);
+      return NewNode(op, value, current_context(), name, strict);
+    }
+  }
+  UNREACHABLE();
+  return NULL;
+}
+
+
+Node* AstGraphBuilder::BuildLoadObjectField(Node* object, int offset) {
+  // TODO(sigurds) Use simplified load here once it is ready.
+  Node* field_load = NewNode(jsgraph()->machine()->Load(kMachAnyTagged), object,
+                             jsgraph()->Int32Constant(offset - kHeapObjectTag));
+  return field_load;
+}
+
+
+Node* AstGraphBuilder::BuildLoadBuiltinsObject() {
+  Node* global = BuildLoadGlobalObject();
+  Node* builtins =
+      BuildLoadObjectField(global, JSGlobalObject::kBuiltinsOffset);
+  return builtins;
+}
+
+
+Node* AstGraphBuilder::BuildLoadGlobalObject() {
+  Node* context = GetFunctionContext();
+  const Operator* load_op =
+      javascript()->LoadContext(0, Context::GLOBAL_OBJECT_INDEX, true);
+  return NewNode(load_op, context);
+}
+
+
+Node* AstGraphBuilder::BuildToBoolean(Node* value) {
+  // TODO(mstarzinger): Possible optimization is to NOP for boolean values.
+  return NewNode(javascript()->ToBoolean(), value);
+}
+
+
+Node* AstGraphBuilder::BuildThrowReferenceError(Variable* variable) {
+  // TODO(mstarzinger): Should be unified with the VisitThrow implementation.
+  Node* variable_name = jsgraph()->Constant(variable->name());
+  const Operator* op = javascript()->Runtime(Runtime::kThrowReferenceError, 1);
+  return NewNode(op, variable_name);
+}
+
+
+Node* AstGraphBuilder::BuildBinaryOp(Node* left, Node* right, Token::Value op) {
+  const Operator* js_op;
+  switch (op) {
+    case Token::BIT_OR:
+      js_op = javascript()->BitwiseOr();
+      break;
+    case Token::BIT_AND:
+      js_op = javascript()->BitwiseAnd();
+      break;
+    case Token::BIT_XOR:
+      js_op = javascript()->BitwiseXor();
+      break;
+    case Token::SHL:
+      js_op = javascript()->ShiftLeft();
+      break;
+    case Token::SAR:
+      js_op = javascript()->ShiftRight();
+      break;
+    case Token::SHR:
+      js_op = javascript()->ShiftRightLogical();
+      break;
+    case Token::ADD:
+      js_op = javascript()->Add();
+      break;
+    case Token::SUB:
+      js_op = javascript()->Subtract();
+      break;
+    case Token::MUL:
+      js_op = javascript()->Multiply();
+      break;
+    case Token::DIV:
+      js_op = javascript()->Divide();
+      break;
+    case Token::MOD:
+      js_op = javascript()->Modulus();
+      break;
+    default:
+      UNREACHABLE();
+      js_op = NULL;
+  }
+  return NewNode(js_op, left, right);
+}
+
+
+void AstGraphBuilder::PrepareFrameState(Node* node, BailoutId ast_id,
+                                        OutputFrameStateCombine combine) {
+  if (OperatorProperties::HasFrameStateInput(node->op())) {
+    DCHECK(NodeProperties::GetFrameStateInput(node)->opcode() ==
+           IrOpcode::kDead);
+    NodeProperties::ReplaceFrameStateInput(
+        node, environment()->Checkpoint(ast_id, combine));
+  }
+}
+
+}
+}
+}  // namespace v8::internal::compiler
diff --git a/src/compiler/ast-graph-builder.h b/src/compiler/ast-graph-builder.h
new file mode 100644
index 0000000..6a7e3db
--- /dev/null
+++ b/src/compiler/ast-graph-builder.h
@@ -0,0 +1,430 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_AST_GRAPH_BUILDER_H_
+#define V8_COMPILER_AST_GRAPH_BUILDER_H_
+
+#include "src/v8.h"
+
+#include "src/ast.h"
+#include "src/compiler/graph-builder.h"
+#include "src/compiler/js-graph.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class ControlBuilder;
+class LoopBuilder;
+class Graph;
+
+// The AstGraphBuilder produces a high-level IR graph, based on an
+// underlying AST. The produced graph can either be compiled into a
+// stand-alone function or be wired into another graph for the purposes
+// of function inlining.
+class AstGraphBuilder : public StructuredGraphBuilder, public AstVisitor {
+ public:
+  AstGraphBuilder(CompilationInfo* info, JSGraph* jsgraph);
+
+  // Creates a graph by visiting the entire AST.
+  bool CreateGraph();
+
+ protected:
+  class AstContext;
+  class AstEffectContext;
+  class AstValueContext;
+  class AstTestContext;
+  class BreakableScope;
+  class ContextScope;
+  class Environment;
+
+  Environment* environment() {
+    return reinterpret_cast<Environment*>(
+        StructuredGraphBuilder::environment());
+  }
+
+  AstContext* ast_context() const { return ast_context_; }
+  BreakableScope* breakable() const { return breakable_; }
+  ContextScope* execution_context() const { return execution_context_; }
+
+  void set_ast_context(AstContext* ctx) { ast_context_ = ctx; }
+  void set_breakable(BreakableScope* brk) { breakable_ = brk; }
+  void set_execution_context(ContextScope* ctx) { execution_context_ = ctx; }
+
+  // Support for control flow builders. The concrete type of the environment
+  // depends on the graph builder, but environments themselves are not virtual.
+  typedef StructuredGraphBuilder::Environment BaseEnvironment;
+  virtual BaseEnvironment* CopyEnvironment(BaseEnvironment* env);
+
+  // TODO(mstarzinger): The pipeline only needs to be a friend to access the
+  // function context. Remove as soon as the context is a parameter.
+  friend class Pipeline;
+
+  // Getters for values in the activation record.
+  Node* GetFunctionClosure();
+  Node* GetFunctionContext();
+
+  //
+  // The following build methods all generate graph fragments and return one
+  // resulting node. The operand stack height remains the same, variables and
+  // other dependencies tracked by the environment might be mutated though.
+  //
+
+  // Builder to create a local function context.
+  Node* BuildLocalFunctionContext(Node* context, Node* closure);
+
+  // Builder to create an arguments object if it is used.
+  Node* BuildArgumentsObject(Variable* arguments);
+
+  // Builders for variable load and assignment.
+  Node* BuildVariableAssignment(Variable* var, Node* value, Token::Value op,
+                                BailoutId bailout_id);
+  Node* BuildVariableDelete(Variable* var);
+  Node* BuildVariableLoad(Variable* var, BailoutId bailout_id,
+                          ContextualMode mode = CONTEXTUAL);
+
+  // Builders for accessing the function context.
+  Node* BuildLoadBuiltinsObject();
+  Node* BuildLoadGlobalObject();
+  Node* BuildLoadClosure();
+  Node* BuildLoadObjectField(Node* object, int offset);
+
+  // Builders for automatic type conversion.
+  Node* BuildToBoolean(Node* value);
+
+  // Builders for error reporting at runtime.
+  Node* BuildThrowReferenceError(Variable* var);
+
+  // Builders for dynamic hole-checks at runtime.
+  Node* BuildHoleCheckSilent(Node* value, Node* for_hole, Node* not_hole);
+  Node* BuildHoleCheckThrow(Node* value, Variable* var, Node* not_hole);
+
+  // Builders for binary operations.
+  Node* BuildBinaryOp(Node* left, Node* right, Token::Value op);
+
+#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
+  // Visiting functions for AST nodes make this an AstVisitor.
+  AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
+
+  // Visiting function for declarations list is overridden.
+  virtual void VisitDeclarations(ZoneList<Declaration*>* declarations);
+
+ private:
+  CompilationInfo* info_;
+  AstContext* ast_context_;
+  JSGraph* jsgraph_;
+
+  // List of global declarations for functions and variables.
+  ZoneList<Handle<Object> > globals_;
+
+  // Stack of breakable statements entered by the visitor.
+  BreakableScope* breakable_;
+
+  // Stack of context objects pushed onto the chain by the visitor.
+  ContextScope* execution_context_;
+
+  // Nodes representing values in the activation record.
+  SetOncePointer<Node> function_closure_;
+  SetOncePointer<Node> function_context_;
+
+  CompilationInfo* info() { return info_; }
+  StrictMode strict_mode() { return info()->strict_mode(); }
+  JSGraph* jsgraph() { return jsgraph_; }
+  JSOperatorBuilder* javascript() { return jsgraph_->javascript(); }
+  ZoneList<Handle<Object> >* globals() { return &globals_; }
+
+  // Current scope during visitation.
+  inline Scope* current_scope() const;
+
+  // Process arguments to a call by popping {arity} elements off the operand
+  // stack and build a call node using the given call operator.
+  Node* ProcessArguments(const Operator* op, int arity);
+
+  // Visit statements.
+  void VisitIfNotNull(Statement* stmt);
+
+  // Visit expressions.
+  void VisitForTest(Expression* expr);
+  void VisitForEffect(Expression* expr);
+  void VisitForValue(Expression* expr);
+  void VisitForValueOrNull(Expression* expr);
+  void VisitForValues(ZoneList<Expression*>* exprs);
+
+  // Common for all IterationStatement bodies.
+  void VisitIterationBody(IterationStatement* stmt, LoopBuilder* loop, int);
+
+  // Dispatched from VisitCallRuntime.
+  void VisitCallJSRuntime(CallRuntime* expr);
+
+  // Dispatched from VisitUnaryOperation.
+  void VisitDelete(UnaryOperation* expr);
+  void VisitVoid(UnaryOperation* expr);
+  void VisitTypeof(UnaryOperation* expr);
+  void VisitNot(UnaryOperation* expr);
+
+  // Dispatched from VisitBinaryOperation.
+  void VisitComma(BinaryOperation* expr);
+  void VisitLogicalExpression(BinaryOperation* expr);
+  void VisitArithmeticExpression(BinaryOperation* expr);
+
+  // Dispatched from VisitForInStatement.
+  void VisitForInAssignment(Expression* expr, Node* value);
+
+  // Builds deoptimization for a given node.
+  void PrepareFrameState(Node* node, BailoutId ast_id,
+                         OutputFrameStateCombine combine = kIgnoreOutput);
+
+  OutputFrameStateCombine StateCombineFromAstContext();
+
+  DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
+  DISALLOW_COPY_AND_ASSIGN(AstGraphBuilder);
+};
+
+
+// The abstract execution environment for generated code consists of
+// parameter variables, local variables and the operand stack. The
+// environment will perform proper SSA-renaming of all tracked nodes
+// at split and merge points in the control flow. Internally all the
+// values are stored in one list using the following layout:
+//
+//  [parameters (+receiver)] [locals] [operand stack]
+//
+class AstGraphBuilder::Environment
+    : public StructuredGraphBuilder::Environment {
+ public:
+  Environment(AstGraphBuilder* builder, Scope* scope, Node* control_dependency);
+  Environment(const Environment& copy);
+
+  int parameters_count() const { return parameters_count_; }
+  int locals_count() const { return locals_count_; }
+  int stack_height() {
+    return static_cast<int>(values()->size()) - parameters_count_ -
+           locals_count_;
+  }
+
+  // Operations on parameter or local variables. The parameter indices are
+  // shifted by 1 (receiver is parameter index -1 but environment index 0).
+  void Bind(Variable* variable, Node* node) {
+    DCHECK(variable->IsStackAllocated());
+    if (variable->IsParameter()) {
+      values()->at(variable->index() + 1) = node;
+    } else {
+      DCHECK(variable->IsStackLocal());
+      values()->at(variable->index() + parameters_count_) = node;
+    }
+  }
+  Node* Lookup(Variable* variable) {
+    DCHECK(variable->IsStackAllocated());
+    if (variable->IsParameter()) {
+      return values()->at(variable->index() + 1);
+    } else {
+      DCHECK(variable->IsStackLocal());
+      return values()->at(variable->index() + parameters_count_);
+    }
+  }
+
+  // Operations on the operand stack.
+  void Push(Node* node) {
+    values()->push_back(node);
+  }
+  Node* Top() {
+    DCHECK(stack_height() > 0);
+    return values()->back();
+  }
+  Node* Pop() {
+    DCHECK(stack_height() > 0);
+    Node* back = values()->back();
+    values()->pop_back();
+    return back;
+  }
+
+  // Direct mutations of the operand stack.
+  void Poke(int depth, Node* node) {
+    DCHECK(depth >= 0 && depth < stack_height());
+    int index = static_cast<int>(values()->size()) - depth - 1;
+    values()->at(index) = node;
+  }
+  Node* Peek(int depth) {
+    DCHECK(depth >= 0 && depth < stack_height());
+    int index = static_cast<int>(values()->size()) - depth - 1;
+    return values()->at(index);
+  }
+  void Drop(int depth) {
+    DCHECK(depth >= 0 && depth <= stack_height());
+    values()->erase(values()->end() - depth, values()->end());
+  }
+
+  // Preserve a checkpoint of the environment for the IR graph. Any
+  // further mutation of the environment will not affect checkpoints.
+  Node* Checkpoint(BailoutId ast_id, OutputFrameStateCombine combine);
+
+ protected:
+  AstGraphBuilder* builder() const {
+    return reinterpret_cast<AstGraphBuilder*>(
+        StructuredGraphBuilder::Environment::builder());
+  }
+
+ private:
+  void UpdateStateValues(Node** state_values, int offset, int count);
+
+  int parameters_count_;
+  int locals_count_;
+  Node* parameters_node_;
+  Node* locals_node_;
+  Node* stack_node_;
+};
+
+
+// Each expression in the AST is evaluated in a specific context. This context
+// decides how the evaluation result is passed up the visitor.
+class AstGraphBuilder::AstContext BASE_EMBEDDED {
+ public:
+  bool IsEffect() const { return kind_ == Expression::kEffect; }
+  bool IsValue() const { return kind_ == Expression::kValue; }
+  bool IsTest() const { return kind_ == Expression::kTest; }
+
+  // Determines how to combine the frame state with the value
+  // that is about to be plugged into this AstContext.
+  OutputFrameStateCombine GetStateCombine() {
+    return IsEffect() ? kIgnoreOutput : kPushOutput;
+  }
+
+  // Plug a node into this expression context.  Call this function in tail
+  // position in the Visit functions for expressions.
+  virtual void ProduceValue(Node* value) = 0;
+
+  // Unplugs a node from this expression context.  Call this to retrieve the
+  // result of another Visit function that already plugged the context.
+  virtual Node* ConsumeValue() = 0;
+
+  // Shortcut for "context->ProduceValue(context->ConsumeValue())".
+  void ReplaceValue() { ProduceValue(ConsumeValue()); }
+
+ protected:
+  AstContext(AstGraphBuilder* owner, Expression::Context kind);
+  virtual ~AstContext();
+
+  AstGraphBuilder* owner() const { return owner_; }
+  Environment* environment() const { return owner_->environment(); }
+
+// We want to be able to assert, in a context-specific way, that the stack
+// height makes sense when the context is filled.
+#ifdef DEBUG
+  int original_height_;
+#endif
+
+ private:
+  Expression::Context kind_;
+  AstGraphBuilder* owner_;
+  AstContext* outer_;
+};
+
+
+// Context to evaluate expression for its side effects only.
+class AstGraphBuilder::AstEffectContext FINAL : public AstContext {
+ public:
+  explicit AstEffectContext(AstGraphBuilder* owner)
+      : AstContext(owner, Expression::kEffect) {}
+  virtual ~AstEffectContext();
+  virtual void ProduceValue(Node* value) OVERRIDE;
+  virtual Node* ConsumeValue() OVERRIDE;
+};
+
+
+// Context to evaluate expression for its value (and side effects).
+class AstGraphBuilder::AstValueContext FINAL : public AstContext {
+ public:
+  explicit AstValueContext(AstGraphBuilder* owner)
+      : AstContext(owner, Expression::kValue) {}
+  virtual ~AstValueContext();
+  virtual void ProduceValue(Node* value) OVERRIDE;
+  virtual Node* ConsumeValue() OVERRIDE;
+};
+
+
+// Context to evaluate expression for a condition value (and side effects).
+class AstGraphBuilder::AstTestContext FINAL : public AstContext {
+ public:
+  explicit AstTestContext(AstGraphBuilder* owner)
+      : AstContext(owner, Expression::kTest) {}
+  virtual ~AstTestContext();
+  virtual void ProduceValue(Node* value) OVERRIDE;
+  virtual Node* ConsumeValue() OVERRIDE;
+};
+
+
+// Scoped class tracking breakable statements entered by the visitor. Allows to
+// properly 'break' and 'continue' iteration statements as well as to 'break'
+// from blocks within switch statements.
+class AstGraphBuilder::BreakableScope BASE_EMBEDDED {
+ public:
+  BreakableScope(AstGraphBuilder* owner, BreakableStatement* target,
+                 ControlBuilder* control, int drop_extra)
+      : owner_(owner),
+        target_(target),
+        next_(owner->breakable()),
+        control_(control),
+        drop_extra_(drop_extra) {
+    owner_->set_breakable(this);  // Push.
+  }
+
+  ~BreakableScope() {
+    owner_->set_breakable(next_);  // Pop.
+  }
+
+  // Either 'break' or 'continue' the target statement.
+  void BreakTarget(BreakableStatement* target);
+  void ContinueTarget(BreakableStatement* target);
+
+ private:
+  AstGraphBuilder* owner_;
+  BreakableStatement* target_;
+  BreakableScope* next_;
+  ControlBuilder* control_;
+  int drop_extra_;
+
+  // Find the correct scope for the target statement. Note that this also drops
+  // extra operands from the environment for each scope skipped along the way.
+  BreakableScope* FindBreakable(BreakableStatement* target);
+};
+
+
+// Scoped class tracking context objects created by the visitor. Represents
+// mutations of the context chain within the function body and allows to
+// change the current {scope} and {context} during visitation.
+class AstGraphBuilder::ContextScope BASE_EMBEDDED {
+ public:
+  ContextScope(AstGraphBuilder* owner, Scope* scope, Node* context)
+      : owner_(owner),
+        next_(owner->execution_context()),
+        outer_(owner->current_context()),
+        scope_(scope) {
+    owner_->set_execution_context(this);  // Push.
+    owner_->set_current_context(context);
+  }
+
+  ~ContextScope() {
+    owner_->set_execution_context(next_);  // Pop.
+    owner_->set_current_context(outer_);
+  }
+
+  // Current scope during visitation.
+  Scope* scope() const { return scope_; }
+
+ private:
+  AstGraphBuilder* owner_;
+  ContextScope* next_;
+  Node* outer_;
+  Scope* scope_;
+};
+
+Scope* AstGraphBuilder::current_scope() const {
+  return execution_context_->scope();
+}
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_AST_GRAPH_BUILDER_H_
diff --git a/src/compiler/change-lowering-unittest.cc b/src/compiler/change-lowering-unittest.cc
new file mode 100644
index 0000000..994027a
--- /dev/null
+++ b/src/compiler/change-lowering-unittest.cc
@@ -0,0 +1,476 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/change-lowering.h"
+#include "src/compiler/compiler-test-utils.h"
+#include "src/compiler/graph-unittest.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/simplified-operator.h"
+#include "src/compiler/typer.h"
+#include "testing/gmock-support.h"
+
+using testing::_;
+using testing::AllOf;
+using testing::Capture;
+using testing::CaptureEq;
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// TODO(bmeurer): Find a new home for these functions.
+inline std::ostream& operator<<(std::ostream& os, const MachineType& type) {
+  OStringStream ost;
+  ost << type;
+  return os << ost.c_str();
+}
+
+
+class ChangeLoweringTest : public GraphTest {
+ public:
+  ChangeLoweringTest() : simplified_(zone()) {}
+  virtual ~ChangeLoweringTest() {}
+
+  virtual MachineType WordRepresentation() const = 0;
+
+ protected:
+  int HeapNumberValueOffset() const {
+    STATIC_ASSERT(HeapNumber::kValueOffset % kApiPointerSize == 0);
+    return (HeapNumber::kValueOffset / kApiPointerSize) * PointerSize() -
+           kHeapObjectTag;
+  }
+  bool Is32() const { return WordRepresentation() == kRepWord32; }
+  int PointerSize() const {
+    switch (WordRepresentation()) {
+      case kRepWord32:
+        return 4;
+      case kRepWord64:
+        return 8;
+      default:
+        break;
+    }
+    UNREACHABLE();
+    return 0;
+  }
+  int SmiMaxValue() const { return -(SmiMinValue() + 1); }
+  int SmiMinValue() const {
+    return static_cast<int>(0xffffffffu << (SmiValueSize() - 1));
+  }
+  int SmiShiftAmount() const { return kSmiTagSize + SmiShiftSize(); }
+  int SmiShiftSize() const {
+    return Is32() ? SmiTagging<4>::SmiShiftSize()
+                  : SmiTagging<8>::SmiShiftSize();
+  }
+  int SmiValueSize() const {
+    return Is32() ? SmiTagging<4>::SmiValueSize()
+                  : SmiTagging<8>::SmiValueSize();
+  }
+
+  Node* Parameter(int32_t index = 0) {
+    return graph()->NewNode(common()->Parameter(index), graph()->start());
+  }
+
+  Reduction Reduce(Node* node) {
+    Typer typer(zone());
+    MachineOperatorBuilder machine(WordRepresentation());
+    JSOperatorBuilder javascript(zone());
+    JSGraph jsgraph(graph(), common(), &javascript, &typer, &machine);
+    CompilationInfo info(isolate(), zone());
+    Linkage linkage(&info);
+    ChangeLowering reducer(&jsgraph, &linkage);
+    return reducer.Reduce(node);
+  }
+
+  SimplifiedOperatorBuilder* simplified() { return &simplified_; }
+
+  Matcher<Node*> IsAllocateHeapNumber(const Matcher<Node*>& effect_matcher,
+                                      const Matcher<Node*>& control_matcher) {
+    return IsCall(
+        _, IsHeapConstant(Unique<HeapObject>::CreateImmovable(
+               CEntryStub(isolate(), 1).GetCode())),
+        IsExternalConstant(ExternalReference(
+            Runtime::FunctionForId(Runtime::kAllocateHeapNumber), isolate())),
+        IsInt32Constant(0), IsNumberConstant(0.0), effect_matcher,
+        control_matcher);
+  }
+  Matcher<Node*> IsWordEqual(const Matcher<Node*>& lhs_matcher,
+                             const Matcher<Node*>& rhs_matcher) {
+    return Is32() ? IsWord32Equal(lhs_matcher, rhs_matcher)
+                  : IsWord64Equal(lhs_matcher, rhs_matcher);
+  }
+
+ private:
+  SimplifiedOperatorBuilder simplified_;
+};
+
+
+// -----------------------------------------------------------------------------
+// Common.
+
+
+class ChangeLoweringCommonTest
+    : public ChangeLoweringTest,
+      public ::testing::WithParamInterface<MachineType> {
+ public:
+  virtual ~ChangeLoweringCommonTest() {}
+
+  virtual MachineType WordRepresentation() const FINAL OVERRIDE {
+    return GetParam();
+  }
+};
+
+
+TARGET_TEST_P(ChangeLoweringCommonTest, ChangeBitToBool) {
+  Node* val = Parameter(0);
+  Node* node = graph()->NewNode(simplified()->ChangeBitToBool(), val);
+  Reduction reduction = Reduce(node);
+  ASSERT_TRUE(reduction.Changed());
+
+  Node* phi = reduction.replacement();
+  Capture<Node*> branch;
+  EXPECT_THAT(phi,
+              IsPhi(static_cast<MachineType>(kTypeBool | kRepTagged),
+                    IsTrueConstant(), IsFalseConstant(),
+                    IsMerge(IsIfTrue(AllOf(CaptureEq(&branch),
+                                           IsBranch(val, graph()->start()))),
+                            IsIfFalse(CaptureEq(&branch)))));
+}
+
+
+TARGET_TEST_P(ChangeLoweringCommonTest, ChangeBoolToBit) {
+  Node* val = Parameter(0);
+  Node* node = graph()->NewNode(simplified()->ChangeBoolToBit(), val);
+  Reduction reduction = Reduce(node);
+  ASSERT_TRUE(reduction.Changed());
+
+  EXPECT_THAT(reduction.replacement(), IsWordEqual(val, IsTrueConstant()));
+}
+
+
+TARGET_TEST_P(ChangeLoweringCommonTest, ChangeFloat64ToTagged) {
+  Node* val = Parameter(0);
+  Node* node = graph()->NewNode(simplified()->ChangeFloat64ToTagged(), val);
+  Reduction reduction = Reduce(node);
+  ASSERT_TRUE(reduction.Changed());
+
+  Node* finish = reduction.replacement();
+  Capture<Node*> heap_number;
+  EXPECT_THAT(
+      finish,
+      IsFinish(
+          AllOf(CaptureEq(&heap_number),
+                IsAllocateHeapNumber(IsValueEffect(val), graph()->start())),
+          IsStore(kMachFloat64, kNoWriteBarrier, CaptureEq(&heap_number),
+                  IsInt32Constant(HeapNumberValueOffset()), val,
+                  CaptureEq(&heap_number), graph()->start())));
+}
+
+
+TARGET_TEST_P(ChangeLoweringCommonTest, StringAdd) {
+  Node* node =
+      graph()->NewNode(simplified()->StringAdd(), Parameter(0), Parameter(1));
+  Reduction reduction = Reduce(node);
+  EXPECT_FALSE(reduction.Changed());
+}
+
+
+INSTANTIATE_TEST_CASE_P(ChangeLoweringTest, ChangeLoweringCommonTest,
+                        ::testing::Values(kRepWord32, kRepWord64));
+
+
+// -----------------------------------------------------------------------------
+// 32-bit
+
+
+class ChangeLowering32Test : public ChangeLoweringTest {
+ public:
+  virtual ~ChangeLowering32Test() {}
+  virtual MachineType WordRepresentation() const FINAL OVERRIDE {
+    return kRepWord32;
+  }
+};
+
+
+TARGET_TEST_F(ChangeLowering32Test, ChangeInt32ToTagged) {
+  Node* val = Parameter(0);
+  Node* node = graph()->NewNode(simplified()->ChangeInt32ToTagged(), val);
+  Reduction reduction = Reduce(node);
+  ASSERT_TRUE(reduction.Changed());
+
+  Node* phi = reduction.replacement();
+  Capture<Node*> add, branch, heap_number, if_true;
+  EXPECT_THAT(
+      phi,
+      IsPhi(kMachAnyTagged,
+            IsFinish(
+                AllOf(CaptureEq(&heap_number),
+                      IsAllocateHeapNumber(_, CaptureEq(&if_true))),
+                IsStore(kMachFloat64, kNoWriteBarrier, CaptureEq(&heap_number),
+                        IsInt32Constant(HeapNumberValueOffset()),
+                        IsChangeInt32ToFloat64(val), CaptureEq(&heap_number),
+                        CaptureEq(&if_true))),
+            IsProjection(
+                0, AllOf(CaptureEq(&add), IsInt32AddWithOverflow(val, val))),
+            IsMerge(AllOf(CaptureEq(&if_true), IsIfTrue(CaptureEq(&branch))),
+                    IsIfFalse(AllOf(CaptureEq(&branch),
+                                    IsBranch(IsProjection(1, CaptureEq(&add)),
+                                             graph()->start()))))));
+}
+
+
+TARGET_TEST_F(ChangeLowering32Test, ChangeTaggedToFloat64) {
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize == 1);
+
+  Node* val = Parameter(0);
+  Node* node = graph()->NewNode(simplified()->ChangeTaggedToFloat64(), val);
+  Reduction reduction = Reduce(node);
+  ASSERT_TRUE(reduction.Changed());
+
+  Node* phi = reduction.replacement();
+  Capture<Node*> branch, if_true;
+  EXPECT_THAT(
+      phi,
+      IsPhi(
+          kMachFloat64,
+          IsLoad(kMachFloat64, val, IsInt32Constant(HeapNumberValueOffset()),
+                 IsControlEffect(CaptureEq(&if_true))),
+          IsChangeInt32ToFloat64(
+              IsWord32Sar(val, IsInt32Constant(SmiShiftAmount()))),
+          IsMerge(
+              AllOf(CaptureEq(&if_true),
+                    IsIfTrue(AllOf(
+                        CaptureEq(&branch),
+                        IsBranch(IsWord32And(val, IsInt32Constant(kSmiTagMask)),
+                                 graph()->start())))),
+              IsIfFalse(CaptureEq(&branch)))));
+}
+
+
+TARGET_TEST_F(ChangeLowering32Test, ChangeTaggedToInt32) {
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize == 1);
+
+  Node* val = Parameter(0);
+  Node* node = graph()->NewNode(simplified()->ChangeTaggedToInt32(), val);
+  Reduction reduction = Reduce(node);
+  ASSERT_TRUE(reduction.Changed());
+
+  Node* phi = reduction.replacement();
+  Capture<Node*> branch, if_true;
+  EXPECT_THAT(
+      phi,
+      IsPhi(kMachInt32,
+            IsChangeFloat64ToInt32(IsLoad(
+                kMachFloat64, val, IsInt32Constant(HeapNumberValueOffset()),
+                IsControlEffect(CaptureEq(&if_true)))),
+            IsWord32Sar(val, IsInt32Constant(SmiShiftAmount())),
+            IsMerge(AllOf(CaptureEq(&if_true), IsIfTrue(CaptureEq(&branch))),
+                    IsIfFalse(AllOf(
+                        CaptureEq(&branch),
+                        IsBranch(IsWord32And(val, IsInt32Constant(kSmiTagMask)),
+                                 graph()->start()))))));
+}
+
+
+TARGET_TEST_F(ChangeLowering32Test, ChangeTaggedToUint32) {
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize == 1);
+
+  Node* val = Parameter(0);
+  Node* node = graph()->NewNode(simplified()->ChangeTaggedToUint32(), val);
+  Reduction reduction = Reduce(node);
+  ASSERT_TRUE(reduction.Changed());
+
+  Node* phi = reduction.replacement();
+  Capture<Node*> branch, if_true;
+  EXPECT_THAT(
+      phi,
+      IsPhi(kMachUint32,
+            IsChangeFloat64ToUint32(IsLoad(
+                kMachFloat64, val, IsInt32Constant(HeapNumberValueOffset()),
+                IsControlEffect(CaptureEq(&if_true)))),
+            IsWord32Sar(val, IsInt32Constant(SmiShiftAmount())),
+            IsMerge(AllOf(CaptureEq(&if_true), IsIfTrue(CaptureEq(&branch))),
+                    IsIfFalse(AllOf(
+                        CaptureEq(&branch),
+                        IsBranch(IsWord32And(val, IsInt32Constant(kSmiTagMask)),
+                                 graph()->start()))))));
+}
+
+
+TARGET_TEST_F(ChangeLowering32Test, ChangeUint32ToTagged) {
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize == 1);
+
+  Node* val = Parameter(0);
+  Node* node = graph()->NewNode(simplified()->ChangeUint32ToTagged(), val);
+  Reduction reduction = Reduce(node);
+  ASSERT_TRUE(reduction.Changed());
+
+  Node* phi = reduction.replacement();
+  Capture<Node*> branch, heap_number, if_false;
+  EXPECT_THAT(
+      phi,
+      IsPhi(
+          kMachAnyTagged, IsWord32Shl(val, IsInt32Constant(SmiShiftAmount())),
+          IsFinish(
+              AllOf(CaptureEq(&heap_number),
+                    IsAllocateHeapNumber(_, CaptureEq(&if_false))),
+              IsStore(kMachFloat64, kNoWriteBarrier, CaptureEq(&heap_number),
+                      IsInt32Constant(HeapNumberValueOffset()),
+                      IsChangeUint32ToFloat64(val), CaptureEq(&heap_number),
+                      CaptureEq(&if_false))),
+          IsMerge(
+              IsIfTrue(AllOf(CaptureEq(&branch),
+                             IsBranch(IsUint32LessThanOrEqual(
+                                          val, IsInt32Constant(SmiMaxValue())),
+                                      graph()->start()))),
+              AllOf(CaptureEq(&if_false), IsIfFalse(CaptureEq(&branch))))));
+}
+
+
+// -----------------------------------------------------------------------------
+// 64-bit
+
+
+class ChangeLowering64Test : public ChangeLoweringTest {
+ public:
+  virtual ~ChangeLowering64Test() {}
+  virtual MachineType WordRepresentation() const FINAL OVERRIDE {
+    return kRepWord64;
+  }
+};
+
+
+TARGET_TEST_F(ChangeLowering64Test, ChangeInt32ToTagged) {
+  Node* val = Parameter(0);
+  Node* node = graph()->NewNode(simplified()->ChangeInt32ToTagged(), val);
+  Reduction reduction = Reduce(node);
+  ASSERT_TRUE(reduction.Changed());
+
+  EXPECT_THAT(reduction.replacement(),
+              IsWord64Shl(IsChangeInt32ToInt64(val),
+                          IsInt32Constant(SmiShiftAmount())));
+}
+
+
+TARGET_TEST_F(ChangeLowering64Test, ChangeTaggedToFloat64) {
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize == 1);
+
+  Node* val = Parameter(0);
+  Node* node = graph()->NewNode(simplified()->ChangeTaggedToFloat64(), val);
+  Reduction reduction = Reduce(node);
+  ASSERT_TRUE(reduction.Changed());
+
+  Node* phi = reduction.replacement();
+  Capture<Node*> branch, if_true;
+  EXPECT_THAT(
+      phi,
+      IsPhi(
+          kMachFloat64,
+          IsLoad(kMachFloat64, val, IsInt32Constant(HeapNumberValueOffset()),
+                 IsControlEffect(CaptureEq(&if_true))),
+          IsChangeInt32ToFloat64(IsTruncateInt64ToInt32(
+              IsWord64Sar(val, IsInt32Constant(SmiShiftAmount())))),
+          IsMerge(
+              AllOf(CaptureEq(&if_true),
+                    IsIfTrue(AllOf(
+                        CaptureEq(&branch),
+                        IsBranch(IsWord64And(val, IsInt32Constant(kSmiTagMask)),
+                                 graph()->start())))),
+              IsIfFalse(CaptureEq(&branch)))));
+}
+
+
+TARGET_TEST_F(ChangeLowering64Test, ChangeTaggedToInt32) {
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize == 1);
+
+  Node* val = Parameter(0);
+  Node* node = graph()->NewNode(simplified()->ChangeTaggedToInt32(), val);
+  Reduction reduction = Reduce(node);
+  ASSERT_TRUE(reduction.Changed());
+
+  Node* phi = reduction.replacement();
+  Capture<Node*> branch, if_true;
+  EXPECT_THAT(
+      phi,
+      IsPhi(kMachInt32,
+            IsChangeFloat64ToInt32(IsLoad(
+                kMachFloat64, val, IsInt32Constant(HeapNumberValueOffset()),
+                IsControlEffect(CaptureEq(&if_true)))),
+            IsTruncateInt64ToInt32(
+                IsWord64Sar(val, IsInt32Constant(SmiShiftAmount()))),
+            IsMerge(AllOf(CaptureEq(&if_true), IsIfTrue(CaptureEq(&branch))),
+                    IsIfFalse(AllOf(
+                        CaptureEq(&branch),
+                        IsBranch(IsWord64And(val, IsInt32Constant(kSmiTagMask)),
+                                 graph()->start()))))));
+}
+
+
+TARGET_TEST_F(ChangeLowering64Test, ChangeTaggedToUint32) {
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize == 1);
+
+  Node* val = Parameter(0);
+  Node* node = graph()->NewNode(simplified()->ChangeTaggedToUint32(), val);
+  Reduction reduction = Reduce(node);
+  ASSERT_TRUE(reduction.Changed());
+
+  Node* phi = reduction.replacement();
+  Capture<Node*> branch, if_true;
+  EXPECT_THAT(
+      phi,
+      IsPhi(kMachUint32,
+            IsChangeFloat64ToUint32(IsLoad(
+                kMachFloat64, val, IsInt32Constant(HeapNumberValueOffset()),
+                IsControlEffect(CaptureEq(&if_true)))),
+            IsTruncateInt64ToInt32(
+                IsWord64Sar(val, IsInt32Constant(SmiShiftAmount()))),
+            IsMerge(AllOf(CaptureEq(&if_true), IsIfTrue(CaptureEq(&branch))),
+                    IsIfFalse(AllOf(
+                        CaptureEq(&branch),
+                        IsBranch(IsWord64And(val, IsInt32Constant(kSmiTagMask)),
+                                 graph()->start()))))));
+}
+
+
+TARGET_TEST_F(ChangeLowering64Test, ChangeUint32ToTagged) {
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize == 1);
+
+  Node* val = Parameter(0);
+  Node* node = graph()->NewNode(simplified()->ChangeUint32ToTagged(), val);
+  Reduction reduction = Reduce(node);
+  ASSERT_TRUE(reduction.Changed());
+
+  Node* phi = reduction.replacement();
+  Capture<Node*> branch, heap_number, if_false;
+  EXPECT_THAT(
+      phi,
+      IsPhi(
+          kMachAnyTagged, IsWord64Shl(IsChangeUint32ToUint64(val),
+                                      IsInt32Constant(SmiShiftAmount())),
+          IsFinish(
+              AllOf(CaptureEq(&heap_number),
+                    IsAllocateHeapNumber(_, CaptureEq(&if_false))),
+              IsStore(kMachFloat64, kNoWriteBarrier, CaptureEq(&heap_number),
+                      IsInt32Constant(HeapNumberValueOffset()),
+                      IsChangeUint32ToFloat64(val), CaptureEq(&heap_number),
+                      CaptureEq(&if_false))),
+          IsMerge(
+              IsIfTrue(AllOf(CaptureEq(&branch),
+                             IsBranch(IsUint32LessThanOrEqual(
+                                          val, IsInt32Constant(SmiMaxValue())),
+                                      graph()->start()))),
+              AllOf(CaptureEq(&if_false), IsIfFalse(CaptureEq(&branch))))));
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/change-lowering.cc b/src/compiler/change-lowering.cc
new file mode 100644
index 0000000..b13db4c
--- /dev/null
+++ b/src/compiler/change-lowering.cc
@@ -0,0 +1,256 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/change-lowering.h"
+#include "src/compiler/machine-operator.h"
+
+#include "src/compiler/js-graph.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+ChangeLowering::~ChangeLowering() {}
+
+
+Reduction ChangeLowering::Reduce(Node* node) {
+  Node* control = graph()->start();
+  switch (node->opcode()) {
+    case IrOpcode::kChangeBitToBool:
+      return ChangeBitToBool(node->InputAt(0), control);
+    case IrOpcode::kChangeBoolToBit:
+      return ChangeBoolToBit(node->InputAt(0));
+    case IrOpcode::kChangeFloat64ToTagged:
+      return ChangeFloat64ToTagged(node->InputAt(0), control);
+    case IrOpcode::kChangeInt32ToTagged:
+      return ChangeInt32ToTagged(node->InputAt(0), control);
+    case IrOpcode::kChangeTaggedToFloat64:
+      return ChangeTaggedToFloat64(node->InputAt(0), control);
+    case IrOpcode::kChangeTaggedToInt32:
+      return ChangeTaggedToUI32(node->InputAt(0), control, kSigned);
+    case IrOpcode::kChangeTaggedToUint32:
+      return ChangeTaggedToUI32(node->InputAt(0), control, kUnsigned);
+    case IrOpcode::kChangeUint32ToTagged:
+      return ChangeUint32ToTagged(node->InputAt(0), control);
+    default:
+      return NoChange();
+  }
+  UNREACHABLE();
+  return NoChange();
+}
+
+
+Node* ChangeLowering::HeapNumberValueIndexConstant() {
+  STATIC_ASSERT(HeapNumber::kValueOffset % kPointerSize == 0);
+  const int heap_number_value_offset =
+      ((HeapNumber::kValueOffset / kPointerSize) * (machine()->Is64() ? 8 : 4));
+  return jsgraph()->Int32Constant(heap_number_value_offset - kHeapObjectTag);
+}
+
+
+Node* ChangeLowering::SmiMaxValueConstant() {
+  const int smi_value_size = machine()->Is32() ? SmiTagging<4>::SmiValueSize()
+                                               : SmiTagging<8>::SmiValueSize();
+  return jsgraph()->Int32Constant(
+      -(static_cast<int>(0xffffffffu << (smi_value_size - 1)) + 1));
+}
+
+
+Node* ChangeLowering::SmiShiftBitsConstant() {
+  const int smi_shift_size = machine()->Is32() ? SmiTagging<4>::SmiShiftSize()
+                                               : SmiTagging<8>::SmiShiftSize();
+  return jsgraph()->Int32Constant(smi_shift_size + kSmiTagSize);
+}
+
+
+Node* ChangeLowering::AllocateHeapNumberWithValue(Node* value, Node* control) {
+  // The AllocateHeapNumber() runtime function does not use the context, so we
+  // can safely pass in Smi zero here.
+  Node* context = jsgraph()->ZeroConstant();
+  Node* effect = graph()->NewNode(common()->ValueEffect(1), value);
+  const Runtime::Function* function =
+      Runtime::FunctionForId(Runtime::kAllocateHeapNumber);
+  DCHECK_EQ(0, function->nargs);
+  CallDescriptor* desc = linkage()->GetRuntimeCallDescriptor(
+      function->function_id, 0, Operator::kNoProperties);
+  Node* heap_number = graph()->NewNode(
+      common()->Call(desc), jsgraph()->CEntryStubConstant(),
+      jsgraph()->ExternalConstant(ExternalReference(function, isolate())),
+      jsgraph()->Int32Constant(function->nargs), context, effect, control);
+  Node* store = graph()->NewNode(
+      machine()->Store(StoreRepresentation(kMachFloat64, kNoWriteBarrier)),
+      heap_number, HeapNumberValueIndexConstant(), value, heap_number, control);
+  return graph()->NewNode(common()->Finish(1), heap_number, store);
+}
+
+
+Node* ChangeLowering::ChangeSmiToInt32(Node* value) {
+  value = graph()->NewNode(machine()->WordSar(), value, SmiShiftBitsConstant());
+  if (machine()->Is64()) {
+    value = graph()->NewNode(machine()->TruncateInt64ToInt32(), value);
+  }
+  return value;
+}
+
+
+Node* ChangeLowering::LoadHeapNumberValue(Node* value, Node* control) {
+  return graph()->NewNode(machine()->Load(kMachFloat64), value,
+                          HeapNumberValueIndexConstant(),
+                          graph()->NewNode(common()->ControlEffect(), control));
+}
+
+
+Reduction ChangeLowering::ChangeBitToBool(Node* val, Node* control) {
+  Node* branch = graph()->NewNode(common()->Branch(), val, control);
+
+  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+  Node* true_value = jsgraph()->TrueConstant();
+
+  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+  Node* false_value = jsgraph()->FalseConstant();
+
+  Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+  Node* phi = graph()->NewNode(
+      common()->Phi(static_cast<MachineType>(kTypeBool | kRepTagged), 2),
+      true_value, false_value, merge);
+
+  return Replace(phi);
+}
+
+
+Reduction ChangeLowering::ChangeBoolToBit(Node* val) {
+  return Replace(
+      graph()->NewNode(machine()->WordEqual(), val, jsgraph()->TrueConstant()));
+}
+
+
+Reduction ChangeLowering::ChangeFloat64ToTagged(Node* val, Node* control) {
+  return Replace(AllocateHeapNumberWithValue(val, control));
+}
+
+
+Reduction ChangeLowering::ChangeInt32ToTagged(Node* val, Node* control) {
+  if (machine()->Is64()) {
+    return Replace(
+        graph()->NewNode(machine()->Word64Shl(),
+                         graph()->NewNode(machine()->ChangeInt32ToInt64(), val),
+                         SmiShiftBitsConstant()));
+  }
+
+  Node* add = graph()->NewNode(machine()->Int32AddWithOverflow(), val, val);
+  Node* ovf = graph()->NewNode(common()->Projection(1), add);
+
+  Node* branch = graph()->NewNode(common()->Branch(), ovf, control);
+
+  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+  Node* heap_number = AllocateHeapNumberWithValue(
+      graph()->NewNode(machine()->ChangeInt32ToFloat64(), val), if_true);
+
+  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+  Node* smi = graph()->NewNode(common()->Projection(0), add);
+
+  Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+  Node* phi = graph()->NewNode(common()->Phi(kMachAnyTagged, 2), heap_number,
+                               smi, merge);
+
+  return Replace(phi);
+}
+
+
+Reduction ChangeLowering::ChangeTaggedToUI32(Node* val, Node* control,
+                                             Signedness signedness) {
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagMask == 1);
+
+  Node* tag = graph()->NewNode(machine()->WordAnd(), val,
+                               jsgraph()->Int32Constant(kSmiTagMask));
+  Node* branch = graph()->NewNode(common()->Branch(), tag, control);
+
+  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+  const Operator* op = (signedness == kSigned)
+                           ? machine()->ChangeFloat64ToInt32()
+                           : machine()->ChangeFloat64ToUint32();
+  Node* change = graph()->NewNode(op, LoadHeapNumberValue(val, if_true));
+
+  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+  Node* number = ChangeSmiToInt32(val);
+
+  Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+  Node* phi = graph()->NewNode(
+      common()->Phi((signedness == kSigned) ? kMachInt32 : kMachUint32, 2),
+      change, number, merge);
+
+  return Replace(phi);
+}
+
+
+Reduction ChangeLowering::ChangeTaggedToFloat64(Node* val, Node* control) {
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagMask == 1);
+
+  Node* tag = graph()->NewNode(machine()->WordAnd(), val,
+                               jsgraph()->Int32Constant(kSmiTagMask));
+  Node* branch = graph()->NewNode(common()->Branch(), tag, control);
+
+  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+  Node* load = LoadHeapNumberValue(val, if_true);
+
+  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+  Node* number = graph()->NewNode(machine()->ChangeInt32ToFloat64(),
+                                  ChangeSmiToInt32(val));
+
+  Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+  Node* phi =
+      graph()->NewNode(common()->Phi(kMachFloat64, 2), load, number, merge);
+
+  return Replace(phi);
+}
+
+
+Reduction ChangeLowering::ChangeUint32ToTagged(Node* val, Node* control) {
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagMask == 1);
+
+  Node* cmp = graph()->NewNode(machine()->Uint32LessThanOrEqual(), val,
+                               SmiMaxValueConstant());
+  Node* branch = graph()->NewNode(common()->Branch(), cmp, control);
+
+  Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+  Node* smi = graph()->NewNode(
+      machine()->WordShl(),
+      machine()->Is64()
+          ? graph()->NewNode(machine()->ChangeUint32ToUint64(), val)
+          : val,
+      SmiShiftBitsConstant());
+
+  Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+  Node* heap_number = AllocateHeapNumberWithValue(
+      graph()->NewNode(machine()->ChangeUint32ToFloat64(), val), if_false);
+
+  Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+  Node* phi = graph()->NewNode(common()->Phi(kMachAnyTagged, 2), smi,
+                               heap_number, merge);
+
+  return Replace(phi);
+}
+
+
+Isolate* ChangeLowering::isolate() const { return jsgraph()->isolate(); }
+
+
+Graph* ChangeLowering::graph() const { return jsgraph()->graph(); }
+
+
+CommonOperatorBuilder* ChangeLowering::common() const {
+  return jsgraph()->common();
+}
+
+
+MachineOperatorBuilder* ChangeLowering::machine() const {
+  return jsgraph()->machine();
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/change-lowering.h b/src/compiler/change-lowering.h
new file mode 100644
index 0000000..5d7ab41
--- /dev/null
+++ b/src/compiler/change-lowering.h
@@ -0,0 +1,60 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_CHANGE_LOWERING_H_
+#define V8_COMPILER_CHANGE_LOWERING_H_
+
+#include "src/compiler/graph-reducer.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Forward declarations.
+class CommonOperatorBuilder;
+class JSGraph;
+class Linkage;
+class MachineOperatorBuilder;
+
+class ChangeLowering FINAL : public Reducer {
+ public:
+  ChangeLowering(JSGraph* jsgraph, Linkage* linkage)
+      : jsgraph_(jsgraph), linkage_(linkage) {}
+  virtual ~ChangeLowering();
+
+  virtual Reduction Reduce(Node* node) OVERRIDE;
+
+ private:
+  Node* HeapNumberValueIndexConstant();
+  Node* SmiMaxValueConstant();
+  Node* SmiShiftBitsConstant();
+
+  Node* AllocateHeapNumberWithValue(Node* value, Node* control);
+  Node* ChangeSmiToInt32(Node* value);
+  Node* LoadHeapNumberValue(Node* value, Node* control);
+
+  Reduction ChangeBitToBool(Node* val, Node* control);
+  Reduction ChangeBoolToBit(Node* val);
+  Reduction ChangeFloat64ToTagged(Node* val, Node* control);
+  Reduction ChangeInt32ToTagged(Node* val, Node* control);
+  Reduction ChangeTaggedToFloat64(Node* val, Node* control);
+  Reduction ChangeTaggedToUI32(Node* val, Node* control, Signedness signedness);
+  Reduction ChangeUint32ToTagged(Node* val, Node* control);
+
+  Graph* graph() const;
+  Isolate* isolate() const;
+  JSGraph* jsgraph() const { return jsgraph_; }
+  Linkage* linkage() const { return linkage_; }
+  CommonOperatorBuilder* common() const;
+  MachineOperatorBuilder* machine() const;
+
+  JSGraph* jsgraph_;
+  Linkage* linkage_;
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_CHANGE_LOWERING_H_
diff --git a/src/compiler/code-generator-impl.h b/src/compiler/code-generator-impl.h
new file mode 100644
index 0000000..a3f7e4c
--- /dev/null
+++ b/src/compiler/code-generator-impl.h
@@ -0,0 +1,132 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_CODE_GENERATOR_IMPL_H_
+#define V8_COMPILER_CODE_GENERATOR_IMPL_H_
+
+#include "src/compiler/code-generator.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/generic-graph.h"
+#include "src/compiler/instruction.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/opcodes.h"
+#include "src/compiler/operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Converts InstructionOperands from a given instruction to
+// architecture-specific
+// registers and operands after they have been assigned by the register
+// allocator.
+class InstructionOperandConverter {
+ public:
+  InstructionOperandConverter(CodeGenerator* gen, Instruction* instr)
+      : gen_(gen), instr_(instr) {}
+
+  Register InputRegister(int index) {
+    return ToRegister(instr_->InputAt(index));
+  }
+
+  DoubleRegister InputDoubleRegister(int index) {
+    return ToDoubleRegister(instr_->InputAt(index));
+  }
+
+  double InputDouble(int index) { return ToDouble(instr_->InputAt(index)); }
+
+  int32_t InputInt32(int index) {
+    return ToConstant(instr_->InputAt(index)).ToInt32();
+  }
+
+  int8_t InputInt8(int index) { return static_cast<int8_t>(InputInt32(index)); }
+
+  int16_t InputInt16(int index) {
+    return static_cast<int16_t>(InputInt32(index));
+  }
+
+  uint8_t InputInt5(int index) {
+    return static_cast<uint8_t>(InputInt32(index) & 0x1F);
+  }
+
+  uint8_t InputInt6(int index) {
+    return static_cast<uint8_t>(InputInt32(index) & 0x3F);
+  }
+
+  Handle<HeapObject> InputHeapObject(int index) {
+    return ToHeapObject(instr_->InputAt(index));
+  }
+
+  Label* InputLabel(int index) {
+    return gen_->code()->GetLabel(InputBlock(index));
+  }
+
+  BasicBlock* InputBlock(int index) {
+    NodeId block_id = static_cast<NodeId>(InputInt32(index));
+    // operand should be a block id.
+    DCHECK(block_id >= 0);
+    DCHECK(block_id < gen_->schedule()->BasicBlockCount());
+    return gen_->schedule()->GetBlockById(block_id);
+  }
+
+  Register OutputRegister(int index = 0) {
+    return ToRegister(instr_->OutputAt(index));
+  }
+
+  DoubleRegister OutputDoubleRegister() {
+    return ToDoubleRegister(instr_->Output());
+  }
+
+  Register TempRegister(int index) { return ToRegister(instr_->TempAt(index)); }
+
+  Register ToRegister(InstructionOperand* op) {
+    DCHECK(op->IsRegister());
+    return Register::FromAllocationIndex(op->index());
+  }
+
+  DoubleRegister ToDoubleRegister(InstructionOperand* op) {
+    DCHECK(op->IsDoubleRegister());
+    return DoubleRegister::FromAllocationIndex(op->index());
+  }
+
+  Constant ToConstant(InstructionOperand* operand) {
+    if (operand->IsImmediate()) {
+      return gen_->code()->GetImmediate(operand->index());
+    }
+    return gen_->code()->GetConstant(operand->index());
+  }
+
+  double ToDouble(InstructionOperand* operand) {
+    return ToConstant(operand).ToFloat64();
+  }
+
+  Handle<HeapObject> ToHeapObject(InstructionOperand* operand) {
+    return ToConstant(operand).ToHeapObject();
+  }
+
+  Frame* frame() const { return gen_->frame(); }
+  Isolate* isolate() const { return gen_->isolate(); }
+  Linkage* linkage() const { return gen_->linkage(); }
+
+ protected:
+  CodeGenerator* gen_;
+  Instruction* instr_;
+};
+
+
+// TODO(dcarney): generify this on bleeding_edge and replace this call
+// when merged.
+static inline void FinishCode(MacroAssembler* masm) {
+#if V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM
+  masm->CheckConstPool(true, false);
+#endif
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_CODE_GENERATOR_IMPL_H
diff --git a/src/compiler/code-generator.cc b/src/compiler/code-generator.cc
new file mode 100644
index 0000000..f22c479
--- /dev/null
+++ b/src/compiler/code-generator.cc
@@ -0,0 +1,460 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/code-generator.h"
+
+#include "src/compiler/code-generator-impl.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/pipeline.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+CodeGenerator::CodeGenerator(InstructionSequence* code)
+    : code_(code),
+      current_block_(NULL),
+      current_source_position_(SourcePosition::Invalid()),
+      masm_(code->zone()->isolate(), NULL, 0),
+      resolver_(this),
+      safepoints_(code->zone()),
+      deoptimization_states_(code->zone()),
+      deoptimization_literals_(code->zone()),
+      translations_(code->zone()),
+      last_lazy_deopt_pc_(0) {}
+
+
+Handle<Code> CodeGenerator::GenerateCode() {
+  CompilationInfo* info = linkage()->info();
+
+  // Emit a code line info recording start event.
+  PositionsRecorder* recorder = masm()->positions_recorder();
+  LOG_CODE_EVENT(isolate(), CodeStartLinePosInfoRecordEvent(recorder));
+
+  // Place function entry hook if requested to do so.
+  if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) {
+    ProfileEntryHookStub::MaybeCallEntryHook(masm());
+  }
+
+  // Architecture-specific, linkage-specific prologue.
+  info->set_prologue_offset(masm()->pc_offset());
+  AssemblePrologue();
+
+  // Assemble all instructions.
+  for (InstructionSequence::const_iterator i = code()->begin();
+       i != code()->end(); ++i) {
+    AssembleInstruction(*i);
+  }
+
+  FinishCode(masm());
+
+  // Ensure there is space for lazy deopt.
+  if (!info->IsStub()) {
+    int target_offset = masm()->pc_offset() + Deoptimizer::patch_size();
+    while (masm()->pc_offset() < target_offset) {
+      masm()->nop();
+    }
+  }
+
+  safepoints()->Emit(masm(), frame()->GetSpillSlotCount());
+
+  // TODO(titzer): what are the right code flags here?
+  Code::Kind kind = Code::STUB;
+  if (linkage()->GetIncomingDescriptor()->IsJSFunctionCall()) {
+    kind = Code::OPTIMIZED_FUNCTION;
+  }
+  Handle<Code> result = v8::internal::CodeGenerator::MakeCodeEpilogue(
+      masm(), Code::ComputeFlags(kind), info);
+  result->set_is_turbofanned(true);
+  result->set_stack_slots(frame()->GetSpillSlotCount());
+  result->set_safepoint_table_offset(safepoints()->GetCodeOffset());
+
+  PopulateDeoptimizationData(result);
+
+  // Emit a code line info recording stop event.
+  void* line_info = recorder->DetachJITHandlerData();
+  LOG_CODE_EVENT(isolate(), CodeEndLinePosInfoRecordEvent(*result, line_info));
+
+  return result;
+}
+
+
+void CodeGenerator::RecordSafepoint(PointerMap* pointers, Safepoint::Kind kind,
+                                    int arguments,
+                                    Safepoint::DeoptMode deopt_mode) {
+  const ZoneList<InstructionOperand*>* operands =
+      pointers->GetNormalizedOperands();
+  Safepoint safepoint =
+      safepoints()->DefineSafepoint(masm(), kind, arguments, deopt_mode);
+  for (int i = 0; i < operands->length(); i++) {
+    InstructionOperand* pointer = operands->at(i);
+    if (pointer->IsStackSlot()) {
+      safepoint.DefinePointerSlot(pointer->index(), zone());
+    } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
+      Register reg = Register::FromAllocationIndex(pointer->index());
+      safepoint.DefinePointerRegister(reg, zone());
+    }
+  }
+}
+
+
+void CodeGenerator::AssembleInstruction(Instruction* instr) {
+  if (instr->IsBlockStart()) {
+    // Bind a label for a block start and handle parallel moves.
+    BlockStartInstruction* block_start = BlockStartInstruction::cast(instr);
+    current_block_ = block_start->block();
+    if (FLAG_code_comments) {
+      // TODO(titzer): these code comments are a giant memory leak.
+      Vector<char> buffer = Vector<char>::New(32);
+      SNPrintF(buffer, "-- B%d start --", block_start->block()->id());
+      masm()->RecordComment(buffer.start());
+    }
+    masm()->bind(block_start->label());
+  }
+  if (instr->IsGapMoves()) {
+    // Handle parallel moves associated with the gap instruction.
+    AssembleGap(GapInstruction::cast(instr));
+  } else if (instr->IsSourcePosition()) {
+    AssembleSourcePosition(SourcePositionInstruction::cast(instr));
+  } else {
+    // Assemble architecture-specific code for the instruction.
+    AssembleArchInstruction(instr);
+
+    // Assemble branches or boolean materializations after this instruction.
+    FlagsMode mode = FlagsModeField::decode(instr->opcode());
+    FlagsCondition condition = FlagsConditionField::decode(instr->opcode());
+    switch (mode) {
+      case kFlags_none:
+        return;
+      case kFlags_set:
+        return AssembleArchBoolean(instr, condition);
+      case kFlags_branch:
+        return AssembleArchBranch(instr, condition);
+    }
+    UNREACHABLE();
+  }
+}
+
+
+void CodeGenerator::AssembleSourcePosition(SourcePositionInstruction* instr) {
+  SourcePosition source_position = instr->source_position();
+  if (source_position == current_source_position_) return;
+  DCHECK(!source_position.IsInvalid());
+  if (!source_position.IsUnknown()) {
+    int code_pos = source_position.raw();
+    masm()->positions_recorder()->RecordPosition(source_position.raw());
+    masm()->positions_recorder()->WriteRecordedPositions();
+    if (FLAG_code_comments) {
+      Vector<char> buffer = Vector<char>::New(256);
+      CompilationInfo* info = linkage()->info();
+      int ln = Script::GetLineNumber(info->script(), code_pos);
+      int cn = Script::GetColumnNumber(info->script(), code_pos);
+      if (info->script()->name()->IsString()) {
+        Handle<String> file(String::cast(info->script()->name()));
+        base::OS::SNPrintF(buffer.start(), buffer.length(), "-- %s:%d:%d --",
+                           file->ToCString().get(), ln, cn);
+      } else {
+        base::OS::SNPrintF(buffer.start(), buffer.length(),
+                           "-- <unknown>:%d:%d --", ln, cn);
+      }
+      masm()->RecordComment(buffer.start());
+    }
+  }
+  current_source_position_ = source_position;
+}
+
+
+void CodeGenerator::AssembleGap(GapInstruction* instr) {
+  for (int i = GapInstruction::FIRST_INNER_POSITION;
+       i <= GapInstruction::LAST_INNER_POSITION; i++) {
+    GapInstruction::InnerPosition inner_pos =
+        static_cast<GapInstruction::InnerPosition>(i);
+    ParallelMove* move = instr->GetParallelMove(inner_pos);
+    if (move != NULL) resolver()->Resolve(move);
+  }
+}
+
+
+void CodeGenerator::PopulateDeoptimizationData(Handle<Code> code_object) {
+  CompilationInfo* info = linkage()->info();
+  int deopt_count = static_cast<int>(deoptimization_states_.size());
+  if (deopt_count == 0) return;
+  Handle<DeoptimizationInputData> data =
+      DeoptimizationInputData::New(isolate(), deopt_count, TENURED);
+
+  Handle<ByteArray> translation_array =
+      translations_.CreateByteArray(isolate()->factory());
+
+  data->SetTranslationByteArray(*translation_array);
+  data->SetInlinedFunctionCount(Smi::FromInt(0));
+  data->SetOptimizationId(Smi::FromInt(info->optimization_id()));
+  // TODO(jarin) The following code was copied over from Lithium, not sure
+  // whether the scope or the IsOptimizing condition are really needed.
+  if (info->IsOptimizing()) {
+    // Reference to shared function info does not change between phases.
+    AllowDeferredHandleDereference allow_handle_dereference;
+    data->SetSharedFunctionInfo(*info->shared_info());
+  } else {
+    data->SetSharedFunctionInfo(Smi::FromInt(0));
+  }
+
+  Handle<FixedArray> literals = isolate()->factory()->NewFixedArray(
+      static_cast<int>(deoptimization_literals_.size()), TENURED);
+  {
+    AllowDeferredHandleDereference copy_handles;
+    for (unsigned i = 0; i < deoptimization_literals_.size(); i++) {
+      literals->set(i, *deoptimization_literals_[i]);
+    }
+    data->SetLiteralArray(*literals);
+  }
+
+  // No OSR in Turbofan yet...
+  BailoutId osr_ast_id = BailoutId::None();
+  data->SetOsrAstId(Smi::FromInt(osr_ast_id.ToInt()));
+  data->SetOsrPcOffset(Smi::FromInt(-1));
+
+  // Populate deoptimization entries.
+  for (int i = 0; i < deopt_count; i++) {
+    DeoptimizationState* deoptimization_state = deoptimization_states_[i];
+    data->SetAstId(i, deoptimization_state->bailout_id());
+    CHECK_NE(NULL, deoptimization_states_[i]);
+    data->SetTranslationIndex(
+        i, Smi::FromInt(deoptimization_states_[i]->translation_id()));
+    data->SetArgumentsStackHeight(i, Smi::FromInt(0));
+    data->SetPc(i, Smi::FromInt(deoptimization_state->pc_offset()));
+  }
+
+  code_object->set_deoptimization_data(*data);
+}
+
+
+void CodeGenerator::AddSafepointAndDeopt(Instruction* instr) {
+  CallDescriptor::Flags flags(MiscField::decode(instr->opcode()));
+
+  bool needs_frame_state = (flags & CallDescriptor::kNeedsFrameState);
+
+  RecordSafepoint(
+      instr->pointer_map(), Safepoint::kSimple, 0,
+      needs_frame_state ? Safepoint::kLazyDeopt : Safepoint::kNoLazyDeopt);
+
+  if (flags & CallDescriptor::kNeedsNopAfterCall) {
+    AddNopForSmiCodeInlining();
+  }
+
+  if (needs_frame_state) {
+    MarkLazyDeoptSite();
+    // If the frame state is present, it starts at argument 1
+    // (just after the code address).
+    InstructionOperandConverter converter(this, instr);
+    // Deoptimization info starts at argument 1
+    size_t frame_state_offset = 1;
+    FrameStateDescriptor* descriptor =
+        GetFrameStateDescriptor(instr, frame_state_offset);
+    int pc_offset = masm()->pc_offset();
+    int deopt_state_id = BuildTranslation(instr, pc_offset, frame_state_offset,
+                                          descriptor->state_combine());
+    // If the pre-call frame state differs from the post-call one, produce the
+    // pre-call frame state, too.
+    // TODO(jarin) We might want to avoid building the pre-call frame state
+    // because it is only used to get locals and arguments (by the debugger and
+    // f.arguments), and those are the same in the pre-call and post-call
+    // states.
+    if (descriptor->state_combine() != kIgnoreOutput) {
+      deopt_state_id =
+          BuildTranslation(instr, -1, frame_state_offset, kIgnoreOutput);
+    }
+#if DEBUG
+    // Make sure all the values live in stack slots or they are immediates.
+    // (The values should not live in register because registers are clobbered
+    // by calls.)
+    for (size_t i = 0; i < descriptor->size(); i++) {
+      InstructionOperand* op = instr->InputAt(frame_state_offset + 1 + i);
+      CHECK(op->IsStackSlot() || op->IsImmediate());
+    }
+#endif
+    safepoints()->RecordLazyDeoptimizationIndex(deopt_state_id);
+  }
+}
+
+
+int CodeGenerator::DefineDeoptimizationLiteral(Handle<Object> literal) {
+  int result = static_cast<int>(deoptimization_literals_.size());
+  for (unsigned i = 0; i < deoptimization_literals_.size(); ++i) {
+    if (deoptimization_literals_[i].is_identical_to(literal)) return i;
+  }
+  deoptimization_literals_.push_back(literal);
+  return result;
+}
+
+
+FrameStateDescriptor* CodeGenerator::GetFrameStateDescriptor(
+    Instruction* instr, size_t frame_state_offset) {
+  InstructionOperandConverter i(this, instr);
+  InstructionSequence::StateId state_id = InstructionSequence::StateId::FromInt(
+      i.InputInt32(static_cast<int>(frame_state_offset)));
+  return code()->GetFrameStateDescriptor(state_id);
+}
+
+
+void CodeGenerator::BuildTranslationForFrameStateDescriptor(
+    FrameStateDescriptor* descriptor, Instruction* instr,
+    Translation* translation, size_t frame_state_offset,
+    OutputFrameStateCombine state_combine) {
+  // Outer-most state must be added to translation first.
+  if (descriptor->outer_state() != NULL) {
+    BuildTranslationForFrameStateDescriptor(descriptor->outer_state(), instr,
+                                            translation, frame_state_offset,
+                                            kIgnoreOutput);
+  }
+
+  int id = Translation::kSelfLiteralId;
+  if (!descriptor->jsfunction().is_null()) {
+    id = DefineDeoptimizationLiteral(
+        Handle<Object>::cast(descriptor->jsfunction().ToHandleChecked()));
+  }
+
+  switch (descriptor->type()) {
+    case JS_FRAME:
+      translation->BeginJSFrame(
+          descriptor->bailout_id(), id,
+          static_cast<unsigned int>(descriptor->GetHeight(state_combine)));
+      break;
+    case ARGUMENTS_ADAPTOR:
+      translation->BeginArgumentsAdaptorFrame(
+          id, static_cast<unsigned int>(descriptor->parameters_count()));
+      break;
+  }
+
+  frame_state_offset += descriptor->outer_state()->GetTotalSize();
+  for (size_t i = 0; i < descriptor->size(); i++) {
+    AddTranslationForOperand(
+        translation, instr,
+        instr->InputAt(static_cast<int>(frame_state_offset + i)));
+  }
+
+  switch (state_combine) {
+    case kPushOutput:
+      DCHECK(instr->OutputCount() == 1);
+      AddTranslationForOperand(translation, instr, instr->OutputAt(0));
+      break;
+    case kIgnoreOutput:
+      break;
+  }
+}
+
+
+int CodeGenerator::BuildTranslation(Instruction* instr, int pc_offset,
+                                    size_t frame_state_offset,
+                                    OutputFrameStateCombine state_combine) {
+  FrameStateDescriptor* descriptor =
+      GetFrameStateDescriptor(instr, frame_state_offset);
+  frame_state_offset++;
+
+  Translation translation(
+      &translations_, static_cast<int>(descriptor->GetFrameCount()),
+      static_cast<int>(descriptor->GetJSFrameCount()), zone());
+  BuildTranslationForFrameStateDescriptor(descriptor, instr, &translation,
+                                          frame_state_offset, state_combine);
+
+  int deoptimization_id = static_cast<int>(deoptimization_states_.size());
+
+  deoptimization_states_.push_back(new (zone()) DeoptimizationState(
+      descriptor->bailout_id(), translation.index(), pc_offset));
+
+  return deoptimization_id;
+}
+
+
+void CodeGenerator::AddTranslationForOperand(Translation* translation,
+                                             Instruction* instr,
+                                             InstructionOperand* op) {
+  if (op->IsStackSlot()) {
+    translation->StoreStackSlot(op->index());
+  } else if (op->IsDoubleStackSlot()) {
+    translation->StoreDoubleStackSlot(op->index());
+  } else if (op->IsRegister()) {
+    InstructionOperandConverter converter(this, instr);
+    translation->StoreRegister(converter.ToRegister(op));
+  } else if (op->IsDoubleRegister()) {
+    InstructionOperandConverter converter(this, instr);
+    translation->StoreDoubleRegister(converter.ToDoubleRegister(op));
+  } else if (op->IsImmediate()) {
+    InstructionOperandConverter converter(this, instr);
+    Constant constant = converter.ToConstant(op);
+    Handle<Object> constant_object;
+    switch (constant.type()) {
+      case Constant::kInt32:
+        constant_object =
+            isolate()->factory()->NewNumberFromInt(constant.ToInt32());
+        break;
+      case Constant::kFloat64:
+        constant_object = isolate()->factory()->NewNumber(constant.ToFloat64());
+        break;
+      case Constant::kHeapObject:
+        constant_object = constant.ToHeapObject();
+        break;
+      default:
+        UNREACHABLE();
+    }
+    int literal_id = DefineDeoptimizationLiteral(constant_object);
+    translation->StoreLiteral(literal_id);
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
+void CodeGenerator::MarkLazyDeoptSite() {
+  last_lazy_deopt_pc_ = masm()->pc_offset();
+}
+
+#if !V8_TURBOFAN_BACKEND
+
+void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+  UNIMPLEMENTED();
+}
+
+
+void CodeGenerator::AssembleArchBranch(Instruction* instr,
+                                       FlagsCondition condition) {
+  UNIMPLEMENTED();
+}
+
+
+void CodeGenerator::AssembleArchBoolean(Instruction* instr,
+                                        FlagsCondition condition) {
+  UNIMPLEMENTED();
+}
+
+
+void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
+  UNIMPLEMENTED();
+}
+
+
+void CodeGenerator::AssemblePrologue() { UNIMPLEMENTED(); }
+
+
+void CodeGenerator::AssembleReturn() { UNIMPLEMENTED(); }
+
+
+void CodeGenerator::AssembleMove(InstructionOperand* source,
+                                 InstructionOperand* destination) {
+  UNIMPLEMENTED();
+}
+
+
+void CodeGenerator::AssembleSwap(InstructionOperand* source,
+                                 InstructionOperand* destination) {
+  UNIMPLEMENTED();
+}
+
+
+void CodeGenerator::AddNopForSmiCodeInlining() { UNIMPLEMENTED(); }
+
+#endif  // !V8_TURBOFAN_BACKEND
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/code-generator.h b/src/compiler/code-generator.h
new file mode 100644
index 0000000..ddc2f9a
--- /dev/null
+++ b/src/compiler/code-generator.h
@@ -0,0 +1,138 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_CODE_GENERATOR_H_
+#define V8_COMPILER_CODE_GENERATOR_H_
+
+#include <deque>
+
+#include "src/compiler/gap-resolver.h"
+#include "src/compiler/instruction.h"
+#include "src/deoptimizer.h"
+#include "src/macro-assembler.h"
+#include "src/safepoint-table.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Generates native code for a sequence of instructions.
+class CodeGenerator FINAL : public GapResolver::Assembler {
+ public:
+  explicit CodeGenerator(InstructionSequence* code);
+
+  // Generate native code.
+  Handle<Code> GenerateCode();
+
+  InstructionSequence* code() const { return code_; }
+  Frame* frame() const { return code()->frame(); }
+  Graph* graph() const { return code()->graph(); }
+  Isolate* isolate() const { return zone()->isolate(); }
+  Linkage* linkage() const { return code()->linkage(); }
+  Schedule* schedule() const { return code()->schedule(); }
+
+ private:
+  MacroAssembler* masm() { return &masm_; }
+  GapResolver* resolver() { return &resolver_; }
+  SafepointTableBuilder* safepoints() { return &safepoints_; }
+  Zone* zone() const { return code()->zone(); }
+
+  // Checks if {block} will appear directly after {current_block_} when
+  // assembling code, in which case, a fall-through can be used.
+  bool IsNextInAssemblyOrder(const BasicBlock* block) const {
+    return block->rpo_number_ == (current_block_->rpo_number_ + 1) &&
+           block->deferred_ == current_block_->deferred_;
+  }
+
+  // Record a safepoint with the given pointer map.
+  void RecordSafepoint(PointerMap* pointers, Safepoint::Kind kind,
+                       int arguments, Safepoint::DeoptMode deopt_mode);
+
+  // Assemble code for the specified instruction.
+  void AssembleInstruction(Instruction* instr);
+  void AssembleSourcePosition(SourcePositionInstruction* instr);
+  void AssembleGap(GapInstruction* gap);
+
+  // ===========================================================================
+  // ============= Architecture-specific code generation methods. ==============
+  // ===========================================================================
+
+  void AssembleArchInstruction(Instruction* instr);
+  void AssembleArchBranch(Instruction* instr, FlagsCondition condition);
+  void AssembleArchBoolean(Instruction* instr, FlagsCondition condition);
+
+  void AssembleDeoptimizerCall(int deoptimization_id);
+
+  // Generates an architecture-specific, descriptor-specific prologue
+  // to set up a stack frame.
+  void AssemblePrologue();
+  // Generates an architecture-specific, descriptor-specific return sequence
+  // to tear down a stack frame.
+  void AssembleReturn();
+
+  // ===========================================================================
+  // ============== Architecture-specific gap resolver methods. ================
+  // ===========================================================================
+
+  // Interface used by the gap resolver to emit moves and swaps.
+  virtual void AssembleMove(InstructionOperand* source,
+                            InstructionOperand* destination) OVERRIDE;
+  virtual void AssembleSwap(InstructionOperand* source,
+                            InstructionOperand* destination) OVERRIDE;
+
+  // ===========================================================================
+  // Deoptimization table construction
+  void AddSafepointAndDeopt(Instruction* instr);
+  void PopulateDeoptimizationData(Handle<Code> code);
+  int DefineDeoptimizationLiteral(Handle<Object> literal);
+  FrameStateDescriptor* GetFrameStateDescriptor(Instruction* instr,
+                                                size_t frame_state_offset);
+  int BuildTranslation(Instruction* instr, int pc_offset,
+                       size_t frame_state_offset,
+                       OutputFrameStateCombine state_combine);
+  void BuildTranslationForFrameStateDescriptor(
+      FrameStateDescriptor* descriptor, Instruction* instr,
+      Translation* translation, size_t frame_state_offset,
+      OutputFrameStateCombine state_combine);
+  void AddTranslationForOperand(Translation* translation, Instruction* instr,
+                                InstructionOperand* op);
+  void AddNopForSmiCodeInlining();
+  void EnsureSpaceForLazyDeopt();
+  void MarkLazyDeoptSite();
+
+  // ===========================================================================
+  struct DeoptimizationState : ZoneObject {
+   public:
+    BailoutId bailout_id() const { return bailout_id_; }
+    int translation_id() const { return translation_id_; }
+    int pc_offset() const { return pc_offset_; }
+
+    DeoptimizationState(BailoutId bailout_id, int translation_id, int pc_offset)
+        : bailout_id_(bailout_id),
+          translation_id_(translation_id),
+          pc_offset_(pc_offset) {}
+
+   private:
+    BailoutId bailout_id_;
+    int translation_id_;
+    int pc_offset_;
+  };
+
+  InstructionSequence* code_;
+  BasicBlock* current_block_;
+  SourcePosition current_source_position_;
+  MacroAssembler masm_;
+  GapResolver resolver_;
+  SafepointTableBuilder safepoints_;
+  ZoneDeque<DeoptimizationState*> deoptimization_states_;
+  ZoneDeque<Handle<Object> > deoptimization_literals_;
+  TranslationBuffer translations_;
+  int last_lazy_deopt_pc_;
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_CODE_GENERATOR_H
diff --git a/src/compiler/common-node-cache.h b/src/compiler/common-node-cache.h
new file mode 100644
index 0000000..1ed2b04
--- /dev/null
+++ b/src/compiler/common-node-cache.h
@@ -0,0 +1,51 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_COMMON_NODE_CACHE_H_
+#define V8_COMPILER_COMMON_NODE_CACHE_H_
+
+#include "src/assembler.h"
+#include "src/compiler/node-cache.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Bundles various caches for common nodes.
+class CommonNodeCache FINAL : public ZoneObject {
+ public:
+  explicit CommonNodeCache(Zone* zone) : zone_(zone) {}
+
+  Node** FindInt32Constant(int32_t value) {
+    return int32_constants_.Find(zone_, value);
+  }
+
+  Node** FindFloat64Constant(double value) {
+    // We canonicalize double constants at the bit representation level.
+    return float64_constants_.Find(zone_, bit_cast<int64_t>(value));
+  }
+
+  Node** FindExternalConstant(ExternalReference reference) {
+    return external_constants_.Find(zone_, reference.address());
+  }
+
+  Node** FindNumberConstant(double value) {
+    // We canonicalize double constants at the bit representation level.
+    return number_constants_.Find(zone_, bit_cast<int64_t>(value));
+  }
+
+  Zone* zone() const { return zone_; }
+
+ private:
+  Int32NodeCache int32_constants_;
+  Int64NodeCache float64_constants_;
+  PtrNodeCache external_constants_;
+  Int64NodeCache number_constants_;
+  Zone* zone_;
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_COMMON_NODE_CACHE_H_
diff --git a/src/compiler/common-operator-unittest.cc b/src/compiler/common-operator-unittest.cc
new file mode 100644
index 0000000..5001770
--- /dev/null
+++ b/src/compiler/common-operator-unittest.cc
@@ -0,0 +1,183 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/common-operator.h"
+
+#include <limits>
+
+#include "src/compiler/operator-properties-inl.h"
+#include "src/test/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+
+// -----------------------------------------------------------------------------
+// Shared operators.
+
+
+namespace {
+
+struct SharedOperator {
+  const Operator* (CommonOperatorBuilder::*constructor)();
+  IrOpcode::Value opcode;
+  Operator::Properties properties;
+  int value_input_count;
+  int effect_input_count;
+  int control_input_count;
+  int effect_output_count;
+  int control_output_count;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const SharedOperator& fop) {
+  return os << IrOpcode::Mnemonic(fop.opcode);
+}
+
+
+const SharedOperator kSharedOperators[] = {
+#define SHARED(Name, properties, value_input_count, effect_input_count,        \
+               control_input_count, effect_output_count, control_output_count) \
+  {                                                                            \
+    &CommonOperatorBuilder::Name, IrOpcode::k##Name, properties,               \
+        value_input_count, effect_input_count, control_input_count,            \
+        effect_output_count, control_output_count                              \
+  }
+    SHARED(Dead, Operator::kFoldable, 0, 0, 0, 0, 1),
+    SHARED(End, Operator::kFoldable, 0, 0, 1, 0, 0),
+    SHARED(Branch, Operator::kFoldable, 1, 0, 1, 0, 2),
+    SHARED(IfTrue, Operator::kFoldable, 0, 0, 1, 0, 1),
+    SHARED(IfFalse, Operator::kFoldable, 0, 0, 1, 0, 1),
+    SHARED(Throw, Operator::kFoldable, 1, 0, 1, 0, 1),
+    SHARED(Return, Operator::kNoProperties, 1, 1, 1, 1, 1),
+    SHARED(ControlEffect, Operator::kPure, 0, 0, 1, 1, 0)
+#undef SHARED
+};
+
+
+class CommonSharedOperatorTest
+    : public TestWithZone,
+      public ::testing::WithParamInterface<SharedOperator> {};
+
+}  // namespace
+
+
+TEST_P(CommonSharedOperatorTest, InstancesAreGloballyShared) {
+  const SharedOperator& sop = GetParam();
+  CommonOperatorBuilder common1(zone());
+  CommonOperatorBuilder common2(zone());
+  EXPECT_EQ((common1.*sop.constructor)(), (common2.*sop.constructor)());
+}
+
+
+TEST_P(CommonSharedOperatorTest, NumberOfInputsAndOutputs) {
+  CommonOperatorBuilder common(zone());
+  const SharedOperator& sop = GetParam();
+  const Operator* op = (common.*sop.constructor)();
+
+  EXPECT_EQ(sop.value_input_count, OperatorProperties::GetValueInputCount(op));
+  EXPECT_EQ(sop.effect_input_count,
+            OperatorProperties::GetEffectInputCount(op));
+  EXPECT_EQ(sop.control_input_count,
+            OperatorProperties::GetControlInputCount(op));
+  EXPECT_EQ(
+      sop.value_input_count + sop.effect_input_count + sop.control_input_count,
+      OperatorProperties::GetTotalInputCount(op));
+
+  EXPECT_EQ(0, OperatorProperties::GetValueOutputCount(op));
+  EXPECT_EQ(sop.effect_output_count,
+            OperatorProperties::GetEffectOutputCount(op));
+  EXPECT_EQ(sop.control_output_count,
+            OperatorProperties::GetControlOutputCount(op));
+}
+
+
+TEST_P(CommonSharedOperatorTest, OpcodeIsCorrect) {
+  CommonOperatorBuilder common(zone());
+  const SharedOperator& sop = GetParam();
+  const Operator* op = (common.*sop.constructor)();
+  EXPECT_EQ(sop.opcode, op->opcode());
+}
+
+
+TEST_P(CommonSharedOperatorTest, Properties) {
+  CommonOperatorBuilder common(zone());
+  const SharedOperator& sop = GetParam();
+  const Operator* op = (common.*sop.constructor)();
+  EXPECT_EQ(sop.properties, op->properties());
+}
+
+
+INSTANTIATE_TEST_CASE_P(CommonOperatorTest, CommonSharedOperatorTest,
+                        ::testing::ValuesIn(kSharedOperators));
+
+
+// -----------------------------------------------------------------------------
+// Other operators.
+
+
+namespace {
+
+class CommonOperatorTest : public TestWithZone {
+ public:
+  CommonOperatorTest() : common_(zone()) {}
+  virtual ~CommonOperatorTest() {}
+
+  CommonOperatorBuilder* common() { return &common_; }
+
+ private:
+  CommonOperatorBuilder common_;
+};
+
+
+const int kArguments[] = {1, 5, 6, 42, 100, 10000, kMaxInt};
+
+const float kFloat32Values[] = {
+    std::numeric_limits<float>::min(), -1.0f, -0.0f, 0.0f, 1.0f,
+    std::numeric_limits<float>::max()};
+
+}  // namespace
+
+
+TEST_F(CommonOperatorTest, Float32Constant) {
+  TRACED_FOREACH(float, value, kFloat32Values) {
+    const Operator* op = common()->Float32Constant(value);
+    EXPECT_FLOAT_EQ(value, OpParameter<float>(op));
+    EXPECT_EQ(0, OperatorProperties::GetValueInputCount(op));
+    EXPECT_EQ(0, OperatorProperties::GetTotalInputCount(op));
+    EXPECT_EQ(0, OperatorProperties::GetControlOutputCount(op));
+    EXPECT_EQ(0, OperatorProperties::GetEffectOutputCount(op));
+    EXPECT_EQ(1, OperatorProperties::GetValueOutputCount(op));
+  }
+}
+
+
+TEST_F(CommonOperatorTest, ValueEffect) {
+  TRACED_FOREACH(int, arguments, kArguments) {
+    const Operator* op = common()->ValueEffect(arguments);
+    EXPECT_EQ(arguments, OperatorProperties::GetValueInputCount(op));
+    EXPECT_EQ(arguments, OperatorProperties::GetTotalInputCount(op));
+    EXPECT_EQ(0, OperatorProperties::GetControlOutputCount(op));
+    EXPECT_EQ(1, OperatorProperties::GetEffectOutputCount(op));
+    EXPECT_EQ(0, OperatorProperties::GetValueOutputCount(op));
+  }
+}
+
+
+TEST_F(CommonOperatorTest, Finish) {
+  TRACED_FOREACH(int, arguments, kArguments) {
+    const Operator* op = common()->Finish(arguments);
+    EXPECT_EQ(1, OperatorProperties::GetValueInputCount(op));
+    EXPECT_EQ(arguments, OperatorProperties::GetEffectInputCount(op));
+    EXPECT_EQ(arguments + 1, OperatorProperties::GetTotalInputCount(op));
+    EXPECT_EQ(0, OperatorProperties::GetControlOutputCount(op));
+    EXPECT_EQ(0, OperatorProperties::GetEffectOutputCount(op));
+    EXPECT_EQ(1, OperatorProperties::GetValueOutputCount(op));
+  }
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/common-operator.cc b/src/compiler/common-operator.cc
new file mode 100644
index 0000000..19792bd
--- /dev/null
+++ b/src/compiler/common-operator.cc
@@ -0,0 +1,252 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/common-operator.h"
+
+#include "src/assembler.h"
+#include "src/base/lazy-instance.h"
+#include "src/compiler/linkage.h"
+#include "src/unique.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+
+// TODO(turbofan): Use size_t instead of int here.
+class ControlOperator : public Operator1<int> {
+ public:
+  ControlOperator(IrOpcode::Value opcode, Properties properties, int inputs,
+                  int outputs, int controls, const char* mnemonic)
+      : Operator1<int>(opcode, properties, inputs, outputs, mnemonic,
+                       controls) {}
+
+  virtual OStream& PrintParameter(OStream& os) const FINAL { return os; }
+};
+
+}  // namespace
+
+
+// Specialization for static parameters of type {ExternalReference}.
+template <>
+struct StaticParameterTraits<ExternalReference> {
+  static OStream& PrintTo(OStream& os, ExternalReference reference) {
+    os << reference.address();
+    // TODO(bmeurer): Move to operator<<(os, ExternalReference)
+    const Runtime::Function* function =
+        Runtime::FunctionForEntry(reference.address());
+    if (function) {
+      os << " <" << function->name << ".entry>";
+    }
+    return os;
+  }
+  static int HashCode(ExternalReference reference) {
+    return bit_cast<int>(static_cast<uint32_t>(
+        reinterpret_cast<uintptr_t>(reference.address())));
+  }
+  static bool Equals(ExternalReference lhs, ExternalReference rhs) {
+    return lhs == rhs;
+  }
+};
+
+
+#define SHARED_OP_LIST(V)               \
+  V(Dead, Operator::kFoldable, 0, 0)    \
+  V(End, Operator::kFoldable, 0, 1)     \
+  V(Branch, Operator::kFoldable, 1, 1)  \
+  V(IfTrue, Operator::kFoldable, 0, 1)  \
+  V(IfFalse, Operator::kFoldable, 0, 1) \
+  V(Throw, Operator::kFoldable, 1, 1)   \
+  V(Return, Operator::kNoProperties, 1, 1)
+
+
+struct CommonOperatorBuilderImpl FINAL {
+#define SHARED(Name, properties, value_input_count, control_input_count)       \
+  struct Name##Operator FINAL : public ControlOperator {                       \
+    Name##Operator()                                                           \
+        : ControlOperator(IrOpcode::k##Name, properties, value_input_count, 0, \
+                          control_input_count, #Name) {}                       \
+  };                                                                           \
+  Name##Operator k##Name##Operator;
+  SHARED_OP_LIST(SHARED)
+#undef SHARED
+
+  struct ControlEffectOperator FINAL : public SimpleOperator {
+    ControlEffectOperator()
+        : SimpleOperator(IrOpcode::kControlEffect, Operator::kPure, 0, 0,
+                         "ControlEffect") {}
+  };
+  ControlEffectOperator kControlEffectOperator;
+};
+
+
+static base::LazyInstance<CommonOperatorBuilderImpl>::type kImpl =
+    LAZY_INSTANCE_INITIALIZER;
+
+
+CommonOperatorBuilder::CommonOperatorBuilder(Zone* zone)
+    : impl_(kImpl.Get()), zone_(zone) {}
+
+
+#define SHARED(Name, properties, value_input_count, control_input_count) \
+  const Operator* CommonOperatorBuilder::Name() {                        \
+    return &impl_.k##Name##Operator;                                     \
+  }
+SHARED_OP_LIST(SHARED)
+#undef SHARED
+
+
+const Operator* CommonOperatorBuilder::Start(int num_formal_parameters) {
+  // Outputs are formal parameters, plus context, receiver, and JSFunction.
+  const int value_output_count = num_formal_parameters + 3;
+  return new (zone()) ControlOperator(IrOpcode::kStart, Operator::kFoldable, 0,
+                                      value_output_count, 0, "Start");
+}
+
+
+const Operator* CommonOperatorBuilder::Merge(int controls) {
+  return new (zone()) ControlOperator(IrOpcode::kMerge, Operator::kFoldable, 0,
+                                      0, controls, "Merge");
+}
+
+
+const Operator* CommonOperatorBuilder::Loop(int controls) {
+  return new (zone()) ControlOperator(IrOpcode::kLoop, Operator::kFoldable, 0,
+                                      0, controls, "Loop");
+}
+
+
+const Operator* CommonOperatorBuilder::Parameter(int index) {
+  return new (zone()) Operator1<int>(IrOpcode::kParameter, Operator::kPure, 1,
+                                     1, "Parameter", index);
+}
+
+
+const Operator* CommonOperatorBuilder::Int32Constant(int32_t value) {
+  return new (zone()) Operator1<int32_t>(
+      IrOpcode::kInt32Constant, Operator::kPure, 0, 1, "Int32Constant", value);
+}
+
+
+const Operator* CommonOperatorBuilder::Int64Constant(int64_t value) {
+  return new (zone()) Operator1<int64_t>(
+      IrOpcode::kInt64Constant, Operator::kPure, 0, 1, "Int64Constant", value);
+}
+
+
+const Operator* CommonOperatorBuilder::Float32Constant(volatile float value) {
+  return new (zone())
+      Operator1<float>(IrOpcode::kFloat32Constant, Operator::kPure, 0, 1,
+                       "Float32Constant", value);
+}
+
+
+const Operator* CommonOperatorBuilder::Float64Constant(volatile double value) {
+  return new (zone())
+      Operator1<double>(IrOpcode::kFloat64Constant, Operator::kPure, 0, 1,
+                        "Float64Constant", value);
+}
+
+
+const Operator* CommonOperatorBuilder::ExternalConstant(
+    const ExternalReference& value) {
+  return new (zone())
+      Operator1<ExternalReference>(IrOpcode::kExternalConstant, Operator::kPure,
+                                   0, 1, "ExternalConstant", value);
+}
+
+
+const Operator* CommonOperatorBuilder::NumberConstant(volatile double value) {
+  return new (zone())
+      Operator1<double>(IrOpcode::kNumberConstant, Operator::kPure, 0, 1,
+                        "NumberConstant", value);
+}
+
+
+const Operator* CommonOperatorBuilder::HeapConstant(
+    const Unique<Object>& value) {
+  return new (zone()) Operator1<Unique<Object> >(
+      IrOpcode::kHeapConstant, Operator::kPure, 0, 1, "HeapConstant", value);
+}
+
+
+const Operator* CommonOperatorBuilder::Phi(MachineType type, int arguments) {
+  DCHECK(arguments > 0);  // Disallow empty phis.
+  return new (zone()) Operator1<MachineType>(IrOpcode::kPhi, Operator::kPure,
+                                             arguments, 1, "Phi", type);
+}
+
+
+const Operator* CommonOperatorBuilder::EffectPhi(int arguments) {
+  DCHECK(arguments > 0);  // Disallow empty phis.
+  return new (zone()) Operator1<int>(IrOpcode::kEffectPhi, Operator::kPure, 0,
+                                     0, "EffectPhi", arguments);
+}
+
+
+const Operator* CommonOperatorBuilder::ControlEffect() {
+  return &impl_.kControlEffectOperator;
+}
+
+
+const Operator* CommonOperatorBuilder::ValueEffect(int arguments) {
+  DCHECK(arguments > 0);  // Disallow empty value effects.
+  return new (zone()) SimpleOperator(IrOpcode::kValueEffect, Operator::kPure,
+                                     arguments, 0, "ValueEffect");
+}
+
+
+const Operator* CommonOperatorBuilder::Finish(int arguments) {
+  DCHECK(arguments > 0);  // Disallow empty finishes.
+  return new (zone()) Operator1<int>(IrOpcode::kFinish, Operator::kPure, 1, 1,
+                                     "Finish", arguments);
+}
+
+
+const Operator* CommonOperatorBuilder::StateValues(int arguments) {
+  return new (zone()) Operator1<int>(IrOpcode::kStateValues, Operator::kPure,
+                                     arguments, 1, "StateValues", arguments);
+}
+
+
+const Operator* CommonOperatorBuilder::FrameState(
+    FrameStateType type, BailoutId bailout_id,
+    OutputFrameStateCombine state_combine, MaybeHandle<JSFunction> jsfunction) {
+  return new (zone()) Operator1<FrameStateCallInfo>(
+      IrOpcode::kFrameState, Operator::kPure, 4, 1, "FrameState",
+      FrameStateCallInfo(type, bailout_id, state_combine, jsfunction));
+}
+
+
+const Operator* CommonOperatorBuilder::Call(const CallDescriptor* descriptor) {
+  class CallOperator FINAL : public Operator1<const CallDescriptor*> {
+   public:
+    // TODO(titzer): Operator still uses int, whereas CallDescriptor uses
+    // size_t.
+    CallOperator(const CallDescriptor* descriptor, const char* mnemonic)
+        : Operator1<const CallDescriptor*>(
+              IrOpcode::kCall, descriptor->properties(),
+              static_cast<int>(descriptor->InputCount() +
+                               descriptor->FrameStateCount()),
+              static_cast<int>(descriptor->ReturnCount()), mnemonic,
+              descriptor) {}
+
+    virtual OStream& PrintParameter(OStream& os) const OVERRIDE {
+      return os << "[" << *parameter() << "]";
+    }
+  };
+  return new (zone()) CallOperator(descriptor, "Call");
+}
+
+
+const Operator* CommonOperatorBuilder::Projection(size_t index) {
+  return new (zone()) Operator1<size_t>(IrOpcode::kProjection, Operator::kPure,
+                                        1, 1, "Projection", index);
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/common-operator.h b/src/compiler/common-operator.h
new file mode 100644
index 0000000..a3659ad
--- /dev/null
+++ b/src/compiler/common-operator.h
@@ -0,0 +1,117 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_COMMON_OPERATOR_H_
+#define V8_COMPILER_COMMON_OPERATOR_H_
+
+#include "src/compiler/machine-type.h"
+#include "src/unique.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class ExternalReference;
+class OStream;
+
+
+namespace compiler {
+
+// Forward declarations.
+class CallDescriptor;
+struct CommonOperatorBuilderImpl;
+class Operator;
+
+
+// Flag that describes how to combine the current environment with
+// the output of a node to obtain a framestate for lazy bailout.
+enum OutputFrameStateCombine {
+  kPushOutput,   // Push the output on the expression stack.
+  kIgnoreOutput  // Use the frame state as-is.
+};
+
+
+// The type of stack frame that a FrameState node represents.
+enum FrameStateType {
+  JS_FRAME,          // Represents an unoptimized JavaScriptFrame.
+  ARGUMENTS_ADAPTOR  // Represents an ArgumentsAdaptorFrame.
+};
+
+
+class FrameStateCallInfo FINAL {
+ public:
+  FrameStateCallInfo(
+      FrameStateType type, BailoutId bailout_id,
+      OutputFrameStateCombine state_combine,
+      MaybeHandle<JSFunction> jsfunction = MaybeHandle<JSFunction>())
+      : type_(type),
+        bailout_id_(bailout_id),
+        frame_state_combine_(state_combine),
+        jsfunction_(jsfunction) {}
+
+  FrameStateType type() const { return type_; }
+  BailoutId bailout_id() const { return bailout_id_; }
+  OutputFrameStateCombine state_combine() const { return frame_state_combine_; }
+  MaybeHandle<JSFunction> jsfunction() const { return jsfunction_; }
+
+ private:
+  FrameStateType type_;
+  BailoutId bailout_id_;
+  OutputFrameStateCombine frame_state_combine_;
+  MaybeHandle<JSFunction> jsfunction_;
+};
+
+
+// Interface for building common operators that can be used at any level of IR,
+// including JavaScript, mid-level, and low-level.
+class CommonOperatorBuilder FINAL {
+ public:
+  explicit CommonOperatorBuilder(Zone* zone);
+
+  const Operator* Dead();
+  const Operator* End();
+  const Operator* Branch();
+  const Operator* IfTrue();
+  const Operator* IfFalse();
+  const Operator* Throw();
+  const Operator* Return();
+
+  const Operator* Start(int num_formal_parameters);
+  const Operator* Merge(int controls);
+  const Operator* Loop(int controls);
+  const Operator* Parameter(int index);
+
+  const Operator* Int32Constant(int32_t);
+  const Operator* Int64Constant(int64_t);
+  const Operator* Float32Constant(volatile float);
+  const Operator* Float64Constant(volatile double);
+  const Operator* ExternalConstant(const ExternalReference&);
+  const Operator* NumberConstant(volatile double);
+  const Operator* HeapConstant(const Unique<Object>&);
+
+  const Operator* Phi(MachineType type, int arguments);
+  const Operator* EffectPhi(int arguments);
+  const Operator* ControlEffect();
+  const Operator* ValueEffect(int arguments);
+  const Operator* Finish(int arguments);
+  const Operator* StateValues(int arguments);
+  const Operator* FrameState(
+      FrameStateType type, BailoutId bailout_id,
+      OutputFrameStateCombine state_combine,
+      MaybeHandle<JSFunction> jsfunction = MaybeHandle<JSFunction>());
+  const Operator* Call(const CallDescriptor* descriptor);
+  const Operator* Projection(size_t index);
+
+ private:
+  Zone* zone() const { return zone_; }
+
+  const CommonOperatorBuilderImpl& impl_;
+  Zone* const zone_;
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_COMMON_OPERATOR_H_
diff --git a/src/compiler/compiler-test-utils.h b/src/compiler/compiler-test-utils.h
new file mode 100644
index 0000000..437abd6
--- /dev/null
+++ b/src/compiler/compiler-test-utils.h
@@ -0,0 +1,57 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_COMPILER_TEST_UTILS_H_
+#define V8_COMPILER_COMPILER_TEST_UTILS_H_
+
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// The TARGET_TEST(Case, Name) macro works just like
+// TEST(Case, Name), except that the test is disabled
+// if the platform is not a supported TurboFan target.
+#if V8_TURBOFAN_TARGET
+#define TARGET_TEST(Case, Name) TEST(Case, Name)
+#else
+#define TARGET_TEST(Case, Name) TEST(Case, DISABLED_##Name)
+#endif
+
+
+// The TARGET_TEST_F(Case, Name) macro works just like
+// TEST_F(Case, Name), except that the test is disabled
+// if the platform is not a supported TurboFan target.
+#if V8_TURBOFAN_TARGET
+#define TARGET_TEST_F(Case, Name) TEST_F(Case, Name)
+#else
+#define TARGET_TEST_F(Case, Name) TEST_F(Case, DISABLED_##Name)
+#endif
+
+
+// The TARGET_TEST_P(Case, Name) macro works just like
+// TEST_P(Case, Name), except that the test is disabled
+// if the platform is not a supported TurboFan target.
+#if V8_TURBOFAN_TARGET
+#define TARGET_TEST_P(Case, Name) TEST_P(Case, Name)
+#else
+#define TARGET_TEST_P(Case, Name) TEST_P(Case, DISABLED_##Name)
+#endif
+
+
+// The TARGET_TYPED_TEST(Case, Name) macro works just like
+// TYPED_TEST(Case, Name), except that the test is disabled
+// if the platform is not a supported TurboFan target.
+#if V8_TURBOFAN_TARGET
+#define TARGET_TYPED_TEST(Case, Name) TYPED_TEST(Case, Name)
+#else
+#define TARGET_TYPED_TEST(Case, Name) TYPED_TEST(Case, DISABLED_##Name)
+#endif
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_COMPILER_TEST_UTILS_H_
diff --git a/src/compiler/compiler.gyp b/src/compiler/compiler.gyp
new file mode 100644
index 0000000..ec5ec28
--- /dev/null
+++ b/src/compiler/compiler.gyp
@@ -0,0 +1,60 @@
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+  'variables': {
+    'v8_code': 1,
+  },
+  'includes': ['../../build/toolchain.gypi', '../../build/features.gypi'],
+  'targets': [
+    {
+      'target_name': 'compiler-unittests',
+      'type': 'executable',
+      'dependencies': [
+        '../test/test.gyp:run-all-unittests',
+      ],
+      'include_dirs': [
+        '../..',
+      ],
+      'sources': [  ### gcmole(all) ###
+        'change-lowering-unittest.cc',
+        'common-operator-unittest.cc',
+        'compiler-test-utils.h',
+        'graph-reducer-unittest.cc',
+        'graph-unittest.cc',
+        'graph-unittest.h',
+        'instruction-selector-unittest.cc',
+        'instruction-selector-unittest.h',
+        'js-builtin-reducer-unittest.cc',
+        'machine-operator-reducer-unittest.cc',
+        'machine-operator-unittest.cc',
+        'simplified-operator-reducer-unittest.cc',
+        'simplified-operator-unittest.cc',
+        'value-numbering-reducer-unittest.cc',
+      ],
+      'conditions': [
+        ['v8_target_arch=="arm"', {
+          'sources': [  ### gcmole(arch:arm) ###
+            'arm/instruction-selector-arm-unittest.cc',
+          ],
+        }],
+        ['v8_target_arch=="arm64"', {
+          'sources': [  ### gcmole(arch:arm64) ###
+            'arm64/instruction-selector-arm64-unittest.cc',
+          ],
+        }],
+        ['v8_target_arch=="ia32"', {
+          'sources': [  ### gcmole(arch:ia32) ###
+            'ia32/instruction-selector-ia32-unittest.cc',
+          ],
+        }],
+        ['v8_target_arch=="x64"', {
+          'sources': [  ### gcmole(arch:x64) ###
+            'x64/instruction-selector-x64-unittest.cc',
+          ],
+        }],
+      ],
+    },
+  ],
+}
diff --git a/src/compiler/control-builders.cc b/src/compiler/control-builders.cc
new file mode 100644
index 0000000..3b7d05b
--- /dev/null
+++ b/src/compiler/control-builders.cc
@@ -0,0 +1,144 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "control-builders.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+
+void IfBuilder::If(Node* condition) {
+  builder_->NewBranch(condition);
+  else_environment_ = environment()->CopyForConditional();
+}
+
+
+void IfBuilder::Then() { builder_->NewIfTrue(); }
+
+
+void IfBuilder::Else() {
+  builder_->NewMerge();
+  then_environment_ = environment();
+  set_environment(else_environment_);
+  builder_->NewIfFalse();
+}
+
+
+void IfBuilder::End() {
+  then_environment_->Merge(environment());
+  set_environment(then_environment_);
+}
+
+
+void LoopBuilder::BeginLoop() {
+  builder_->NewLoop();
+  loop_environment_ = environment()->CopyForLoop();
+  continue_environment_ = environment()->CopyAsUnreachable();
+  break_environment_ = environment()->CopyAsUnreachable();
+}
+
+
+void LoopBuilder::Continue() {
+  continue_environment_->Merge(environment());
+  environment()->MarkAsUnreachable();
+}
+
+
+void LoopBuilder::Break() {
+  break_environment_->Merge(environment());
+  environment()->MarkAsUnreachable();
+}
+
+
+void LoopBuilder::EndBody() {
+  continue_environment_->Merge(environment());
+  set_environment(continue_environment_);
+}
+
+
+void LoopBuilder::EndLoop() {
+  loop_environment_->Merge(environment());
+  set_environment(break_environment_);
+}
+
+
+void LoopBuilder::BreakUnless(Node* condition) {
+  IfBuilder control_if(builder_);
+  control_if.If(condition);
+  control_if.Then();
+  control_if.Else();
+  Break();
+  control_if.End();
+}
+
+
+void SwitchBuilder::BeginSwitch() {
+  body_environment_ = environment()->CopyAsUnreachable();
+  label_environment_ = environment()->CopyAsUnreachable();
+  break_environment_ = environment()->CopyAsUnreachable();
+  body_environments_.AddBlock(NULL, case_count(), zone());
+}
+
+
+void SwitchBuilder::BeginLabel(int index, Node* condition) {
+  builder_->NewBranch(condition);
+  label_environment_ = environment()->CopyForConditional();
+  builder_->NewIfTrue();
+  body_environments_[index] = environment();
+}
+
+
+void SwitchBuilder::EndLabel() {
+  set_environment(label_environment_);
+  builder_->NewIfFalse();
+}
+
+
+void SwitchBuilder::DefaultAt(int index) {
+  label_environment_ = environment()->CopyAsUnreachable();
+  body_environments_[index] = environment();
+}
+
+
+void SwitchBuilder::BeginCase(int index) {
+  set_environment(body_environments_[index]);
+  environment()->Merge(body_environment_);
+}
+
+
+void SwitchBuilder::Break() {
+  break_environment_->Merge(environment());
+  environment()->MarkAsUnreachable();
+}
+
+
+void SwitchBuilder::EndCase() { body_environment_ = environment(); }
+
+
+void SwitchBuilder::EndSwitch() {
+  break_environment_->Merge(label_environment_);
+  break_environment_->Merge(environment());
+  set_environment(break_environment_);
+}
+
+
+void BlockBuilder::BeginBlock() {
+  break_environment_ = environment()->CopyAsUnreachable();
+}
+
+
+void BlockBuilder::Break() {
+  break_environment_->Merge(environment());
+  environment()->MarkAsUnreachable();
+}
+
+
+void BlockBuilder::EndBlock() {
+  break_environment_->Merge(environment());
+  set_environment(break_environment_);
+}
+}
+}
+}  // namespace v8::internal::compiler
diff --git a/src/compiler/control-builders.h b/src/compiler/control-builders.h
new file mode 100644
index 0000000..695282b
--- /dev/null
+++ b/src/compiler/control-builders.h
@@ -0,0 +1,144 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_CONTROL_BUILDERS_H_
+#define V8_COMPILER_CONTROL_BUILDERS_H_
+
+#include "src/v8.h"
+
+#include "src/compiler/graph-builder.h"
+#include "src/compiler/node.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+
+// Base class for all control builders. Also provides a common interface for
+// control builders to handle 'break' and 'continue' statements when they are
+// used to model breakable statements.
+class ControlBuilder {
+ public:
+  explicit ControlBuilder(StructuredGraphBuilder* builder)
+      : builder_(builder) {}
+  virtual ~ControlBuilder() {}
+
+  // Interface for break and continue.
+  virtual void Break() { UNREACHABLE(); }
+  virtual void Continue() { UNREACHABLE(); }
+
+ protected:
+  typedef StructuredGraphBuilder Builder;
+  typedef StructuredGraphBuilder::Environment Environment;
+
+  Zone* zone() const { return builder_->zone(); }
+  Environment* environment() { return builder_->environment(); }
+  void set_environment(Environment* env) { builder_->set_environment(env); }
+
+  Builder* builder_;
+};
+
+
+// Tracks control flow for a conditional statement.
+class IfBuilder : public ControlBuilder {
+ public:
+  explicit IfBuilder(StructuredGraphBuilder* builder)
+      : ControlBuilder(builder),
+        then_environment_(NULL),
+        else_environment_(NULL) {}
+
+  // Primitive control commands.
+  void If(Node* condition);
+  void Then();
+  void Else();
+  void End();
+
+ private:
+  Environment* then_environment_;  // Environment after the 'then' body.
+  Environment* else_environment_;  // Environment for the 'else' body.
+};
+
+
+// Tracks control flow for an iteration statement.
+class LoopBuilder : public ControlBuilder {
+ public:
+  explicit LoopBuilder(StructuredGraphBuilder* builder)
+      : ControlBuilder(builder),
+        loop_environment_(NULL),
+        continue_environment_(NULL),
+        break_environment_(NULL) {}
+
+  // Primitive control commands.
+  void BeginLoop();
+  void EndBody();
+  void EndLoop();
+
+  // Primitive support for break and continue.
+  virtual void Continue();
+  virtual void Break();
+
+  // Compound control command for conditional break.
+  void BreakUnless(Node* condition);
+
+ private:
+  Environment* loop_environment_;      // Environment of the loop header.
+  Environment* continue_environment_;  // Environment after the loop body.
+  Environment* break_environment_;     // Environment after the loop exits.
+};
+
+
+// Tracks control flow for a switch statement.
+class SwitchBuilder : public ControlBuilder {
+ public:
+  explicit SwitchBuilder(StructuredGraphBuilder* builder, int case_count)
+      : ControlBuilder(builder),
+        body_environment_(NULL),
+        label_environment_(NULL),
+        break_environment_(NULL),
+        body_environments_(case_count, zone()) {}
+
+  // Primitive control commands.
+  void BeginSwitch();
+  void BeginLabel(int index, Node* condition);
+  void EndLabel();
+  void DefaultAt(int index);
+  void BeginCase(int index);
+  void EndCase();
+  void EndSwitch();
+
+  // Primitive support for break.
+  virtual void Break();
+
+  // The number of cases within a switch is statically known.
+  int case_count() const { return body_environments_.capacity(); }
+
+ private:
+  Environment* body_environment_;   // Environment after last case body.
+  Environment* label_environment_;  // Environment for next label condition.
+  Environment* break_environment_;  // Environment after the switch exits.
+  ZoneList<Environment*> body_environments_;
+};
+
+
+// Tracks control flow for a block statement.
+class BlockBuilder : public ControlBuilder {
+ public:
+  explicit BlockBuilder(StructuredGraphBuilder* builder)
+      : ControlBuilder(builder), break_environment_(NULL) {}
+
+  // Primitive control commands.
+  void BeginBlock();
+  void EndBlock();
+
+  // Primitive support for break.
+  virtual void Break();
+
+ private:
+  Environment* break_environment_;  // Environment after the block exits.
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_CONTROL_BUILDERS_H_
diff --git a/src/compiler/frame.h b/src/compiler/frame.h
new file mode 100644
index 0000000..afcbc37
--- /dev/null
+++ b/src/compiler/frame.h
@@ -0,0 +1,104 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_FRAME_H_
+#define V8_COMPILER_FRAME_H_
+
+#include "src/v8.h"
+
+#include "src/data-flow.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Collects the spill slot requirements and the allocated general and double
+// registers for a compiled function. Frames are usually populated by the
+// register allocator and are used by Linkage to generate code for the prologue
+// and epilogue to compiled code.
+class Frame {
+ public:
+  Frame()
+      : register_save_area_size_(0),
+        spill_slot_count_(0),
+        double_spill_slot_count_(0),
+        allocated_registers_(NULL),
+        allocated_double_registers_(NULL) {}
+
+  inline int GetSpillSlotCount() { return spill_slot_count_; }
+  inline int GetDoubleSpillSlotCount() { return double_spill_slot_count_; }
+
+  void SetAllocatedRegisters(BitVector* regs) {
+    DCHECK(allocated_registers_ == NULL);
+    allocated_registers_ = regs;
+  }
+
+  void SetAllocatedDoubleRegisters(BitVector* regs) {
+    DCHECK(allocated_double_registers_ == NULL);
+    allocated_double_registers_ = regs;
+  }
+
+  bool DidAllocateDoubleRegisters() {
+    return !allocated_double_registers_->IsEmpty();
+  }
+
+  void SetRegisterSaveAreaSize(int size) {
+    DCHECK(IsAligned(size, kPointerSize));
+    register_save_area_size_ = size;
+  }
+
+  int GetRegisterSaveAreaSize() { return register_save_area_size_; }
+
+  int AllocateSpillSlot(bool is_double) {
+    // If 32-bit, skip one if the new slot is a double.
+    if (is_double) {
+      if (kDoubleSize > kPointerSize) {
+        DCHECK(kDoubleSize == kPointerSize * 2);
+        spill_slot_count_++;
+        spill_slot_count_ |= 1;
+      }
+      double_spill_slot_count_++;
+    }
+    return spill_slot_count_++;
+  }
+
+ private:
+  int register_save_area_size_;
+  int spill_slot_count_;
+  int double_spill_slot_count_;
+  BitVector* allocated_registers_;
+  BitVector* allocated_double_registers_;
+};
+
+
+// Represents an offset from either the stack pointer or frame pointer.
+class FrameOffset {
+ public:
+  inline bool from_stack_pointer() { return (offset_ & 1) == kFromSp; }
+  inline bool from_frame_pointer() { return (offset_ & 1) == kFromFp; }
+  inline int offset() { return offset_ & ~1; }
+
+  inline static FrameOffset FromStackPointer(int offset) {
+    DCHECK((offset & 1) == 0);
+    return FrameOffset(offset | kFromSp);
+  }
+
+  inline static FrameOffset FromFramePointer(int offset) {
+    DCHECK((offset & 1) == 0);
+    return FrameOffset(offset | kFromFp);
+  }
+
+ private:
+  explicit FrameOffset(int offset) : offset_(offset) {}
+
+  int offset_;  // Encodes SP or FP in the low order bit.
+
+  static const int kFromSp = 1;
+  static const int kFromFp = 0;
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_FRAME_H_
diff --git a/src/compiler/gap-resolver.cc b/src/compiler/gap-resolver.cc
new file mode 100644
index 0000000..f369607
--- /dev/null
+++ b/src/compiler/gap-resolver.cc
@@ -0,0 +1,136 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/gap-resolver.h"
+
+#include <algorithm>
+#include <functional>
+#include <set>
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+typedef ZoneList<MoveOperands>::iterator op_iterator;
+
+#ifdef ENABLE_SLOW_DCHECKS
+// TODO(svenpanne) Brush up InstructionOperand with comparison?
+struct InstructionOperandComparator {
+  bool operator()(const InstructionOperand* x,
+                  const InstructionOperand* y) const {
+    return (x->kind() < y->kind()) ||
+           (x->kind() == y->kind() && x->index() < y->index());
+  }
+};
+#endif
+
+// No operand should be the destination for more than one move.
+static void VerifyMovesAreInjective(ZoneList<MoveOperands>* moves) {
+#ifdef ENABLE_SLOW_DCHECKS
+  std::set<InstructionOperand*, InstructionOperandComparator> seen;
+  for (op_iterator i = moves->begin(); i != moves->end(); ++i) {
+    SLOW_DCHECK(seen.find(i->destination()) == seen.end());
+    seen.insert(i->destination());
+  }
+#endif
+}
+
+
+void GapResolver::Resolve(ParallelMove* parallel_move) const {
+  ZoneList<MoveOperands>* moves = parallel_move->move_operands();
+  // TODO(svenpanne) Use the member version of remove_if when we use real lists.
+  op_iterator end =
+      std::remove_if(moves->begin(), moves->end(),
+                     std::mem_fun_ref(&MoveOperands::IsRedundant));
+  moves->Rewind(static_cast<int>(end - moves->begin()));
+
+  VerifyMovesAreInjective(moves);
+
+  for (op_iterator move = moves->begin(); move != moves->end(); ++move) {
+    if (!move->IsEliminated()) PerformMove(moves, &*move);
+  }
+}
+
+
+void GapResolver::PerformMove(ZoneList<MoveOperands>* moves,
+                              MoveOperands* move) const {
+  // Each call to this function performs a move and deletes it from the move
+  // graph.  We first recursively perform any move blocking this one.  We mark a
+  // move as "pending" on entry to PerformMove in order to detect cycles in the
+  // move graph.  We use operand swaps to resolve cycles, which means that a
+  // call to PerformMove could change any source operand in the move graph.
+  DCHECK(!move->IsPending());
+  DCHECK(!move->IsRedundant());
+
+  // Clear this move's destination to indicate a pending move.  The actual
+  // destination is saved on the side.
+  DCHECK_NOT_NULL(move->source());  // Or else it will look eliminated.
+  InstructionOperand* destination = move->destination();
+  move->set_destination(NULL);
+
+  // Perform a depth-first traversal of the move graph to resolve dependencies.
+  // Any unperformed, unpending move with a source the same as this one's
+  // destination blocks this one so recursively perform all such moves.
+  for (op_iterator other = moves->begin(); other != moves->end(); ++other) {
+    if (other->Blocks(destination) && !other->IsPending()) {
+      // Though PerformMove can change any source operand in the move graph,
+      // this call cannot create a blocking move via a swap (this loop does not
+      // miss any).  Assume there is a non-blocking move with source A and this
+      // move is blocked on source B and there is a swap of A and B.  Then A and
+      // B must be involved in the same cycle (or they would not be swapped).
+      // Since this move's destination is B and there is only a single incoming
+      // edge to an operand, this move must also be involved in the same cycle.
+      // In that case, the blocking move will be created but will be "pending"
+      // when we return from PerformMove.
+      PerformMove(moves, other);
+    }
+  }
+
+  // We are about to resolve this move and don't need it marked as pending, so
+  // restore its destination.
+  move->set_destination(destination);
+
+  // This move's source may have changed due to swaps to resolve cycles and so
+  // it may now be the last move in the cycle.  If so remove it.
+  InstructionOperand* source = move->source();
+  if (source->Equals(destination)) {
+    move->Eliminate();
+    return;
+  }
+
+  // The move may be blocked on a (at most one) pending move, in which case we
+  // have a cycle.  Search for such a blocking move and perform a swap to
+  // resolve it.
+  op_iterator blocker = std::find_if(
+      moves->begin(), moves->end(),
+      std::bind2nd(std::mem_fun_ref(&MoveOperands::Blocks), destination));
+  if (blocker == moves->end()) {
+    // The easy case: This move is not blocked.
+    assembler_->AssembleMove(source, destination);
+    move->Eliminate();
+    return;
+  }
+
+  DCHECK(blocker->IsPending());
+  // Ensure source is a register or both are stack slots, to limit swap cases.
+  if (source->IsStackSlot() || source->IsDoubleStackSlot()) {
+    std::swap(source, destination);
+  }
+  assembler_->AssembleSwap(source, destination);
+  move->Eliminate();
+
+  // Any unperformed (including pending) move with a source of either this
+  // move's source or destination needs to have their source changed to
+  // reflect the state of affairs after the swap.
+  for (op_iterator other = moves->begin(); other != moves->end(); ++other) {
+    if (other->Blocks(source)) {
+      other->set_source(destination);
+    } else if (other->Blocks(destination)) {
+      other->set_source(source);
+    }
+  }
+}
+}
+}
+}  // namespace v8::internal::compiler
diff --git a/src/compiler/gap-resolver.h b/src/compiler/gap-resolver.h
new file mode 100644
index 0000000..98aaab2
--- /dev/null
+++ b/src/compiler/gap-resolver.h
@@ -0,0 +1,46 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GAP_RESOLVER_H_
+#define V8_COMPILER_GAP_RESOLVER_H_
+
+#include "src/compiler/instruction.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class GapResolver FINAL {
+ public:
+  // Interface used by the gap resolver to emit moves and swaps.
+  class Assembler {
+   public:
+    virtual ~Assembler() {}
+
+    // Assemble move.
+    virtual void AssembleMove(InstructionOperand* source,
+                              InstructionOperand* destination) = 0;
+    // Assemble swap.
+    virtual void AssembleSwap(InstructionOperand* source,
+                              InstructionOperand* destination) = 0;
+  };
+
+  explicit GapResolver(Assembler* assembler) : assembler_(assembler) {}
+
+  // Resolve a set of parallel moves, emitting assembler instructions.
+  void Resolve(ParallelMove* parallel_move) const;
+
+ private:
+  // Perform the given move, possibly requiring other moves to satisfy
+  // dependencies.
+  void PerformMove(ZoneList<MoveOperands>* moves, MoveOperands* move) const;
+
+  // Assembler used to emit moves and save registers.
+  Assembler* const assembler_;
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_GAP_RESOLVER_H_
diff --git a/src/compiler/generic-algorithm-inl.h b/src/compiler/generic-algorithm-inl.h
new file mode 100644
index 0000000..a25131f
--- /dev/null
+++ b/src/compiler/generic-algorithm-inl.h
@@ -0,0 +1,48 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GENERIC_ALGORITHM_INL_H_
+#define V8_COMPILER_GENERIC_ALGORITHM_INL_H_
+
+#include <vector>
+
+#include "src/compiler/generic-algorithm.h"
+#include "src/compiler/generic-graph.h"
+#include "src/compiler/generic-node.h"
+#include "src/compiler/generic-node-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+template <class N>
+class NodeInputIterationTraits {
+ public:
+  typedef N Node;
+  typedef typename N::Inputs::iterator Iterator;
+
+  static Iterator begin(Node* node) { return node->inputs().begin(); }
+  static Iterator end(Node* node) { return node->inputs().end(); }
+  static int max_id(GenericGraphBase* graph) { return graph->NodeCount(); }
+  static Node* to(Iterator iterator) { return *iterator; }
+  static Node* from(Iterator iterator) { return iterator.edge().from(); }
+};
+
+template <class N>
+class NodeUseIterationTraits {
+ public:
+  typedef N Node;
+  typedef typename N::Uses::iterator Iterator;
+
+  static Iterator begin(Node* node) { return node->uses().begin(); }
+  static Iterator end(Node* node) { return node->uses().end(); }
+  static int max_id(GenericGraphBase* graph) { return graph->NodeCount(); }
+  static Node* to(Iterator iterator) { return *iterator; }
+  static Node* from(Iterator iterator) { return iterator.edge().to(); }
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_GENERIC_ALGORITHM_INL_H_
diff --git a/src/compiler/generic-algorithm.h b/src/compiler/generic-algorithm.h
new file mode 100644
index 0000000..cd4984f
--- /dev/null
+++ b/src/compiler/generic-algorithm.h
@@ -0,0 +1,132 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GENERIC_ALGORITHM_H_
+#define V8_COMPILER_GENERIC_ALGORITHM_H_
+
+#include <stack>
+
+#include "src/compiler/generic-graph.h"
+#include "src/compiler/generic-node.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// GenericGraphVisit allows visitation of graphs of nodes and edges in pre- and
+// post-order. Visitation uses an explicitly allocated stack rather than the
+// execution stack to avoid stack overflow. Although GenericGraphVisit is
+// primarily intended to traverse networks of nodes through their
+// dependencies and uses, it also can be used to visit any graph-like network
+// by specifying custom traits.
+class GenericGraphVisit {
+ public:
+  enum Control {
+    CONTINUE = 0x0,  // Continue depth-first normally
+    SKIP = 0x1,      // Skip this node and its successors
+    REENTER = 0x2,   // Allow reentering this node
+    DEFER = SKIP | REENTER
+  };
+
+  // struct Visitor {
+  //   Control Pre(Traits::Node* current);
+  //   Control Post(Traits::Node* current);
+  //   void PreEdge(Traits::Node* from, int index, Traits::Node* to);
+  //   void PostEdge(Traits::Node* from, int index, Traits::Node* to);
+  // }
+  template <class Visitor, class Traits, class RootIterator>
+  static void Visit(GenericGraphBase* graph, Zone* zone,
+                    RootIterator root_begin, RootIterator root_end,
+                    Visitor* visitor) {
+    typedef typename Traits::Node Node;
+    typedef typename Traits::Iterator Iterator;
+    typedef std::pair<Iterator, Iterator> NodeState;
+    typedef std::stack<NodeState, ZoneDeque<NodeState> > NodeStateStack;
+    NodeStateStack stack((ZoneDeque<NodeState>(zone)));
+    BoolVector visited(Traits::max_id(graph), false, zone);
+    Node* current = *root_begin;
+    while (true) {
+      DCHECK(current != NULL);
+      const int id = current->id();
+      DCHECK(id >= 0);
+      DCHECK(id < Traits::max_id(graph));  // Must be a valid id.
+      bool visit = !GetVisited(&visited, id);
+      if (visit) {
+        Control control = visitor->Pre(current);
+        visit = !IsSkip(control);
+        if (!IsReenter(control)) SetVisited(&visited, id, true);
+      }
+      Iterator begin(visit ? Traits::begin(current) : Traits::end(current));
+      Iterator end(Traits::end(current));
+      stack.push(NodeState(begin, end));
+      Node* post_order_node = current;
+      while (true) {
+        NodeState top = stack.top();
+        if (top.first == top.second) {
+          if (visit) {
+            Control control = visitor->Post(post_order_node);
+            DCHECK(!IsSkip(control));
+            SetVisited(&visited, post_order_node->id(), !IsReenter(control));
+          }
+          stack.pop();
+          if (stack.empty()) {
+            if (++root_begin == root_end) return;
+            current = *root_begin;
+            break;
+          }
+          post_order_node = Traits::from(stack.top().first);
+          visit = true;
+        } else {
+          visitor->PreEdge(Traits::from(top.first), top.first.edge().index(),
+                           Traits::to(top.first));
+          current = Traits::to(top.first);
+          if (!GetVisited(&visited, current->id())) break;
+        }
+        top = stack.top();
+        visitor->PostEdge(Traits::from(top.first), top.first.edge().index(),
+                          Traits::to(top.first));
+        ++stack.top().first;
+      }
+    }
+  }
+
+  template <class Visitor, class Traits>
+  static void Visit(GenericGraphBase* graph, Zone* zone,
+                    typename Traits::Node* current, Visitor* visitor) {
+    typename Traits::Node* array[] = {current};
+    Visit<Visitor, Traits>(graph, zone, &array[0], &array[1], visitor);
+  }
+
+  template <class B, class S>
+  struct NullNodeVisitor {
+    Control Pre(GenericNode<B, S>* node) { return CONTINUE; }
+    Control Post(GenericNode<B, S>* node) { return CONTINUE; }
+    void PreEdge(GenericNode<B, S>* from, int index, GenericNode<B, S>* to) {}
+    void PostEdge(GenericNode<B, S>* from, int index, GenericNode<B, S>* to) {}
+  };
+
+ private:
+  static bool IsSkip(Control c) { return c & SKIP; }
+  static bool IsReenter(Control c) { return c & REENTER; }
+
+  // TODO(turbofan): resizing could be optionally templatized away.
+  static void SetVisited(BoolVector* visited, int id, bool value) {
+    if (id >= static_cast<int>(visited->size())) {
+      // Resize and set all values to unvisited.
+      visited->resize((3 * id) / 2, false);
+    }
+    visited->at(id) = value;
+  }
+
+  static bool GetVisited(BoolVector* visited, int id) {
+    if (id >= static_cast<int>(visited->size())) return false;
+    return visited->at(id);
+  }
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_GENERIC_ALGORITHM_H_
diff --git a/src/compiler/generic-graph.h b/src/compiler/generic-graph.h
new file mode 100644
index 0000000..a555456
--- /dev/null
+++ b/src/compiler/generic-graph.h
@@ -0,0 +1,53 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GENERIC_GRAPH_H_
+#define V8_COMPILER_GENERIC_GRAPH_H_
+
+#include "src/compiler/generic-node.h"
+
+namespace v8 {
+namespace internal {
+
+class Zone;
+
+namespace compiler {
+
+class GenericGraphBase : public ZoneObject {
+ public:
+  explicit GenericGraphBase(Zone* zone) : zone_(zone), next_node_id_(0) {}
+
+  Zone* zone() const { return zone_; }
+
+  NodeId NextNodeID() { return next_node_id_++; }
+  NodeId NodeCount() const { return next_node_id_; }
+
+ private:
+  Zone* zone_;
+  NodeId next_node_id_;
+};
+
+template <class V>
+class GenericGraph : public GenericGraphBase {
+ public:
+  explicit GenericGraph(Zone* zone)
+      : GenericGraphBase(zone), start_(NULL), end_(NULL) {}
+
+  V* start() { return start_; }
+  V* end() { return end_; }
+
+  void SetStart(V* start) { start_ = start; }
+  void SetEnd(V* end) { end_ = end; }
+
+ private:
+  V* start_;
+  V* end_;
+
+  DISALLOW_COPY_AND_ASSIGN(GenericGraph);
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_GENERIC_GRAPH_H_
diff --git a/src/compiler/generic-node-inl.h b/src/compiler/generic-node-inl.h
new file mode 100644
index 0000000..c2dc24e
--- /dev/null
+++ b/src/compiler/generic-node-inl.h
@@ -0,0 +1,256 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GENERIC_NODE_INL_H_
+#define V8_COMPILER_GENERIC_NODE_INL_H_
+
+#include "src/v8.h"
+
+#include "src/compiler/generic-graph.h"
+#include "src/compiler/generic-node.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+template <class B, class S>
+GenericNode<B, S>::GenericNode(GenericGraphBase* graph, int input_count)
+    : BaseClass(graph->zone()),
+      input_count_(input_count),
+      has_appendable_inputs_(false),
+      use_count_(0),
+      first_use_(NULL),
+      last_use_(NULL) {
+  inputs_.static_ = reinterpret_cast<Input*>(this + 1), AssignUniqueID(graph);
+}
+
+template <class B, class S>
+inline void GenericNode<B, S>::AssignUniqueID(GenericGraphBase* graph) {
+  id_ = graph->NextNodeID();
+}
+
+template <class B, class S>
+inline typename GenericNode<B, S>::Inputs::iterator
+GenericNode<B, S>::Inputs::begin() {
+  return typename GenericNode<B, S>::Inputs::iterator(this->node_, 0);
+}
+
+template <class B, class S>
+inline typename GenericNode<B, S>::Inputs::iterator
+GenericNode<B, S>::Inputs::end() {
+  return typename GenericNode<B, S>::Inputs::iterator(
+      this->node_, this->node_->InputCount());
+}
+
+template <class B, class S>
+inline typename GenericNode<B, S>::Uses::iterator
+GenericNode<B, S>::Uses::begin() {
+  return typename GenericNode<B, S>::Uses::iterator(this->node_);
+}
+
+template <class B, class S>
+inline typename GenericNode<B, S>::Uses::iterator
+GenericNode<B, S>::Uses::end() {
+  return typename GenericNode<B, S>::Uses::iterator();
+}
+
+template <class B, class S>
+void GenericNode<B, S>::ReplaceUses(GenericNode* replace_to) {
+  for (Use* use = first_use_; use != NULL; use = use->next) {
+    use->from->GetInputRecordPtr(use->input_index)->to = replace_to;
+  }
+  if (replace_to->last_use_ == NULL) {
+    DCHECK_EQ(NULL, replace_to->first_use_);
+    replace_to->first_use_ = first_use_;
+    replace_to->last_use_ = last_use_;
+  } else if (first_use_ != NULL) {
+    DCHECK_NE(NULL, replace_to->first_use_);
+    replace_to->last_use_->next = first_use_;
+    first_use_->prev = replace_to->last_use_;
+    replace_to->last_use_ = last_use_;
+  }
+  replace_to->use_count_ += use_count_;
+  use_count_ = 0;
+  first_use_ = NULL;
+  last_use_ = NULL;
+}
+
+template <class B, class S>
+template <class UnaryPredicate>
+void GenericNode<B, S>::ReplaceUsesIf(UnaryPredicate pred,
+                                      GenericNode* replace_to) {
+  for (Use* use = first_use_; use != NULL;) {
+    Use* next = use->next;
+    if (pred(static_cast<S*>(use->from))) {
+      RemoveUse(use);
+      replace_to->AppendUse(use);
+      use->from->GetInputRecordPtr(use->input_index)->to = replace_to;
+    }
+    use = next;
+  }
+}
+
+template <class B, class S>
+void GenericNode<B, S>::RemoveAllInputs() {
+  for (typename Inputs::iterator iter(inputs().begin()); iter != inputs().end();
+       ++iter) {
+    iter.GetInput()->Update(NULL);
+  }
+}
+
+template <class B, class S>
+void GenericNode<B, S>::TrimInputCount(int new_input_count) {
+  if (new_input_count == input_count_) return;  // Nothing to do.
+
+  DCHECK(new_input_count < input_count_);
+
+  // Update inline inputs.
+  for (int i = new_input_count; i < input_count_; i++) {
+    typename GenericNode<B, S>::Input* input = GetInputRecordPtr(i);
+    input->Update(NULL);
+  }
+  input_count_ = new_input_count;
+}
+
+template <class B, class S>
+void GenericNode<B, S>::ReplaceInput(int index, GenericNode<B, S>* new_to) {
+  Input* input = GetInputRecordPtr(index);
+  input->Update(new_to);
+}
+
+template <class B, class S>
+void GenericNode<B, S>::Input::Update(GenericNode<B, S>* new_to) {
+  GenericNode* old_to = this->to;
+  if (new_to == old_to) return;  // Nothing to do.
+  // Snip out the use from where it used to be
+  if (old_to != NULL) {
+    old_to->RemoveUse(use);
+  }
+  to = new_to;
+  // And put it into the new node's use list.
+  if (new_to != NULL) {
+    new_to->AppendUse(use);
+  } else {
+    use->next = NULL;
+    use->prev = NULL;
+  }
+}
+
+template <class B, class S>
+void GenericNode<B, S>::EnsureAppendableInputs(Zone* zone) {
+  if (!has_appendable_inputs_) {
+    void* deque_buffer = zone->New(sizeof(InputDeque));
+    InputDeque* deque = new (deque_buffer) InputDeque(zone);
+    for (int i = 0; i < input_count_; ++i) {
+      deque->push_back(inputs_.static_[i]);
+    }
+    inputs_.appendable_ = deque;
+    has_appendable_inputs_ = true;
+  }
+}
+
+template <class B, class S>
+void GenericNode<B, S>::AppendInput(Zone* zone, GenericNode<B, S>* to_append) {
+  EnsureAppendableInputs(zone);
+  Use* new_use = new (zone) Use;
+  Input new_input;
+  new_input.to = to_append;
+  new_input.use = new_use;
+  inputs_.appendable_->push_back(new_input);
+  new_use->input_index = input_count_;
+  new_use->from = this;
+  to_append->AppendUse(new_use);
+  input_count_++;
+}
+
+template <class B, class S>
+void GenericNode<B, S>::InsertInput(Zone* zone, int index,
+                                    GenericNode<B, S>* to_insert) {
+  DCHECK(index >= 0 && index < InputCount());
+  // TODO(turbofan): Optimize this implementation!
+  AppendInput(zone, InputAt(InputCount() - 1));
+  for (int i = InputCount() - 1; i > index; --i) {
+    ReplaceInput(i, InputAt(i - 1));
+  }
+  ReplaceInput(index, to_insert);
+}
+
+template <class B, class S>
+void GenericNode<B, S>::RemoveInput(int index) {
+  DCHECK(index >= 0 && index < InputCount());
+  // TODO(turbofan): Optimize this implementation!
+  for (; index < InputCount() - 1; ++index) {
+    ReplaceInput(index, InputAt(index + 1));
+  }
+  TrimInputCount(InputCount() - 1);
+}
+
+template <class B, class S>
+void GenericNode<B, S>::AppendUse(Use* use) {
+  use->next = NULL;
+  use->prev = last_use_;
+  if (last_use_ == NULL) {
+    first_use_ = use;
+  } else {
+    last_use_->next = use;
+  }
+  last_use_ = use;
+  ++use_count_;
+}
+
+template <class B, class S>
+void GenericNode<B, S>::RemoveUse(Use* use) {
+  if (last_use_ == use) {
+    last_use_ = use->prev;
+  }
+  if (use->prev != NULL) {
+    use->prev->next = use->next;
+  } else {
+    first_use_ = use->next;
+  }
+  if (use->next != NULL) {
+    use->next->prev = use->prev;
+  }
+  --use_count_;
+}
+
+template <class B, class S>
+inline bool GenericNode<B, S>::OwnedBy(GenericNode* owner) const {
+  return first_use_ != NULL && first_use_->from == owner &&
+         first_use_->next == NULL;
+}
+
+template <class B, class S>
+S* GenericNode<B, S>::New(GenericGraphBase* graph, int input_count,
+                          S** inputs) {
+  size_t node_size = sizeof(GenericNode);
+  size_t inputs_size = input_count * sizeof(Input);
+  size_t uses_size = input_count * sizeof(Use);
+  int size = static_cast<int>(node_size + inputs_size + uses_size);
+  Zone* zone = graph->zone();
+  void* buffer = zone->New(size);
+  S* result = new (buffer) S(graph, input_count);
+  Input* input =
+      reinterpret_cast<Input*>(reinterpret_cast<char*>(buffer) + node_size);
+  Use* use =
+      reinterpret_cast<Use*>(reinterpret_cast<char*>(input) + inputs_size);
+
+  for (int current = 0; current < input_count; ++current) {
+    GenericNode* to = *inputs++;
+    input->to = to;
+    input->use = use;
+    use->input_index = current;
+    use->from = result;
+    to->AppendUse(use);
+    ++use;
+    ++input;
+  }
+  return result;
+}
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_GENERIC_NODE_INL_H_
diff --git a/src/compiler/generic-node.h b/src/compiler/generic-node.h
new file mode 100644
index 0000000..3dc324d
--- /dev/null
+++ b/src/compiler/generic-node.h
@@ -0,0 +1,272 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GENERIC_NODE_H_
+#define V8_COMPILER_GENERIC_NODE_H_
+
+#include "src/v8.h"
+
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class GenericGraphBase;
+
+typedef int NodeId;
+
+// A GenericNode<> is the basic primitive of graphs. GenericNode's are
+// chained together by input/use chains but by default otherwise contain only an
+// identifying number which specific applications of graphs and nodes can use
+// to index auxiliary out-of-line data, especially transient data.
+// Specializations of the templatized GenericNode<> class must provide a base
+// class B that contains all of the members to be made available in each
+// specialized Node instance. GenericNode uses a mixin template pattern to
+// ensure that common accessors and methods expect the derived class S type
+// rather than the GenericNode<B, S> type.
+template <class B, class S>
+class GenericNode : public B {
+ public:
+  typedef B BaseClass;
+  typedef S DerivedClass;
+
+  inline NodeId id() const { return id_; }
+
+  int InputCount() const { return input_count_; }
+  S* InputAt(int index) const {
+    return static_cast<S*>(GetInputRecordPtr(index)->to);
+  }
+  inline void ReplaceInput(int index, GenericNode* new_input);
+  inline void AppendInput(Zone* zone, GenericNode* new_input);
+  inline void InsertInput(Zone* zone, int index, GenericNode* new_input);
+  inline void RemoveInput(int index);
+
+  int UseCount() { return use_count_; }
+  S* UseAt(int index) {
+    DCHECK(index < use_count_);
+    Use* current = first_use_;
+    while (index-- != 0) {
+      current = current->next;
+    }
+    return static_cast<S*>(current->from);
+  }
+  inline void ReplaceUses(GenericNode* replace_to);
+  template <class UnaryPredicate>
+  inline void ReplaceUsesIf(UnaryPredicate pred, GenericNode* replace_to);
+  inline void RemoveAllInputs();
+
+  inline void TrimInputCount(int input_count);
+
+  class Inputs {
+   public:
+    class iterator;
+    iterator begin();
+    iterator end();
+
+    explicit Inputs(GenericNode* node) : node_(node) {}
+
+   private:
+    GenericNode* node_;
+  };
+
+  Inputs inputs() { return Inputs(this); }
+
+  class Uses {
+   public:
+    class iterator;
+    iterator begin();
+    iterator end();
+    bool empty() { return begin() == end(); }
+
+    explicit Uses(GenericNode* node) : node_(node) {}
+
+   private:
+    GenericNode* node_;
+  };
+
+  Uses uses() { return Uses(this); }
+
+  class Edge;
+
+  bool OwnedBy(GenericNode* owner) const;
+
+  static S* New(GenericGraphBase* graph, int input_count, S** inputs);
+
+ protected:
+  friend class GenericGraphBase;
+
+  class Use : public ZoneObject {
+   public:
+    GenericNode* from;
+    Use* next;
+    Use* prev;
+    int input_index;
+  };
+
+  class Input {
+   public:
+    GenericNode* to;
+    Use* use;
+
+    void Update(GenericNode* new_to);
+  };
+
+  void EnsureAppendableInputs(Zone* zone);
+
+  Input* GetInputRecordPtr(int index) const {
+    if (has_appendable_inputs_) {
+      return &((*inputs_.appendable_)[index]);
+    } else {
+      return inputs_.static_ + index;
+    }
+  }
+
+  inline void AppendUse(Use* use);
+  inline void RemoveUse(Use* use);
+
+  void* operator new(size_t, void* location) { return location; }
+
+  GenericNode(GenericGraphBase* graph, int input_count);
+
+ private:
+  void AssignUniqueID(GenericGraphBase* graph);
+
+  typedef ZoneDeque<Input> InputDeque;
+
+  NodeId id_;
+  int input_count_ : 31;
+  bool has_appendable_inputs_ : 1;
+  union {
+    // When a node is initially allocated, it uses a static buffer to hold its
+    // inputs under the assumption that the number of outputs will not increase.
+    // When the first input is appended, the static buffer is converted into a
+    // deque to allow for space-efficient growing.
+    Input* static_;
+    InputDeque* appendable_;
+  } inputs_;
+  int use_count_;
+  Use* first_use_;
+  Use* last_use_;
+
+  DISALLOW_COPY_AND_ASSIGN(GenericNode);
+};
+
+// An encapsulation for information associated with a single use of node as a
+// input from another node, allowing access to both the defining node and
+// the ndoe having the input.
+template <class B, class S>
+class GenericNode<B, S>::Edge {
+ public:
+  S* from() const { return static_cast<S*>(input_->use->from); }
+  S* to() const { return static_cast<S*>(input_->to); }
+  int index() const {
+    int index = input_->use->input_index;
+    DCHECK(index < input_->use->from->input_count_);
+    return index;
+  }
+
+ private:
+  friend class GenericNode<B, S>::Uses::iterator;
+  friend class GenericNode<B, S>::Inputs::iterator;
+
+  explicit Edge(typename GenericNode<B, S>::Input* input) : input_(input) {}
+
+  typename GenericNode<B, S>::Input* input_;
+};
+
+// A forward iterator to visit the nodes which are depended upon by a node
+// in the order of input.
+template <class B, class S>
+class GenericNode<B, S>::Inputs::iterator {
+ public:
+  iterator(const typename GenericNode<B, S>::Inputs::iterator& other)  // NOLINT
+      : node_(other.node_),
+        index_(other.index_) {}
+
+  S* operator*() { return static_cast<S*>(GetInput()->to); }
+  typename GenericNode<B, S>::Edge edge() {
+    return typename GenericNode::Edge(GetInput());
+  }
+  bool operator==(const iterator& other) const {
+    return other.index_ == index_ && other.node_ == node_;
+  }
+  bool operator!=(const iterator& other) const { return !(other == *this); }
+  iterator& operator++() {
+    DCHECK(node_ != NULL);
+    DCHECK(index_ < node_->input_count_);
+    ++index_;
+    return *this;
+  }
+  iterator& UpdateToAndIncrement(GenericNode<B, S>* new_to) {
+    typename GenericNode<B, S>::Input* input = GetInput();
+    input->Update(new_to);
+    index_++;
+    return *this;
+  }
+  int index() { return index_; }
+
+ private:
+  friend class GenericNode;
+
+  explicit iterator(GenericNode* node, int index)
+      : node_(node), index_(index) {}
+
+  Input* GetInput() const { return node_->GetInputRecordPtr(index_); }
+
+  GenericNode* node_;
+  int index_;
+};
+
+// A forward iterator to visit the uses of a node. The uses are returned in
+// the order in which they were added as inputs.
+template <class B, class S>
+class GenericNode<B, S>::Uses::iterator {
+ public:
+  iterator(const typename GenericNode<B, S>::Uses::iterator& other)  // NOLINT
+      : current_(other.current_),
+        index_(other.index_) {}
+
+  S* operator*() { return static_cast<S*>(current_->from); }
+  typename GenericNode<B, S>::Edge edge() {
+    return typename GenericNode::Edge(CurrentInput());
+  }
+
+  bool operator==(const iterator& other) { return other.current_ == current_; }
+  bool operator!=(const iterator& other) { return other.current_ != current_; }
+  iterator& operator++() {
+    DCHECK(current_ != NULL);
+    index_++;
+    current_ = current_->next;
+    return *this;
+  }
+  iterator& UpdateToAndIncrement(GenericNode<B, S>* new_to) {
+    DCHECK(current_ != NULL);
+    index_++;
+    typename GenericNode<B, S>::Input* input = CurrentInput();
+    current_ = current_->next;
+    input->Update(new_to);
+    return *this;
+  }
+  int index() const { return index_; }
+
+ private:
+  friend class GenericNode<B, S>::Uses;
+
+  iterator() : current_(NULL), index_(0) {}
+  explicit iterator(GenericNode<B, S>* node)
+      : current_(node->first_use_), index_(0) {}
+
+  Input* CurrentInput() const {
+    return current_->from->GetInputRecordPtr(current_->input_index);
+  }
+
+  typename GenericNode<B, S>::Use* current_;
+  int index_;
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_GENERIC_NODE_H_
diff --git a/src/compiler/graph-builder.cc b/src/compiler/graph-builder.cc
new file mode 100644
index 0000000..8992881
--- /dev/null
+++ b/src/compiler/graph-builder.cc
@@ -0,0 +1,249 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/graph-builder.h"
+
+#include "src/compiler.h"
+#include "src/compiler/generic-graph.h"
+#include "src/compiler/generic-node.h"
+#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/graph-visualizer.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/operator-properties.h"
+#include "src/compiler/operator-properties-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+
+StructuredGraphBuilder::StructuredGraphBuilder(Graph* graph,
+                                               CommonOperatorBuilder* common)
+    : GraphBuilder(graph),
+      common_(common),
+      environment_(NULL),
+      current_context_(NULL),
+      exit_control_(NULL) {}
+
+
+Node* StructuredGraphBuilder::MakeNode(const Operator* op,
+                                       int value_input_count,
+                                       Node** value_inputs) {
+  DCHECK(op->InputCount() == value_input_count);
+
+  bool has_context = OperatorProperties::HasContextInput(op);
+  bool has_framestate = OperatorProperties::HasFrameStateInput(op);
+  bool has_control = OperatorProperties::GetControlInputCount(op) == 1;
+  bool has_effect = OperatorProperties::GetEffectInputCount(op) == 1;
+
+  DCHECK(OperatorProperties::GetControlInputCount(op) < 2);
+  DCHECK(OperatorProperties::GetEffectInputCount(op) < 2);
+
+  Node* result = NULL;
+  if (!has_context && !has_framestate && !has_control && !has_effect) {
+    result = graph()->NewNode(op, value_input_count, value_inputs);
+  } else {
+    int input_count_with_deps = value_input_count;
+    if (has_context) ++input_count_with_deps;
+    if (has_framestate) ++input_count_with_deps;
+    if (has_control) ++input_count_with_deps;
+    if (has_effect) ++input_count_with_deps;
+    Node** buffer = zone()->NewArray<Node*>(input_count_with_deps);
+    memcpy(buffer, value_inputs, kPointerSize * value_input_count);
+    Node** current_input = buffer + value_input_count;
+    if (has_context) {
+      *current_input++ = current_context();
+    }
+    if (has_framestate) {
+      // The frame state will be inserted later. Here we misuse
+      // the dead_control node as a sentinel to be later overwritten
+      // with the real frame state.
+      *current_input++ = dead_control();
+    }
+    if (has_effect) {
+      *current_input++ = environment_->GetEffectDependency();
+    }
+    if (has_control) {
+      *current_input++ = environment_->GetControlDependency();
+    }
+    result = graph()->NewNode(op, input_count_with_deps, buffer);
+    if (has_effect) {
+      environment_->UpdateEffectDependency(result);
+    }
+    if (OperatorProperties::HasControlOutput(result->op()) &&
+        !environment()->IsMarkedAsUnreachable()) {
+      environment_->UpdateControlDependency(result);
+    }
+  }
+
+  return result;
+}
+
+
+void StructuredGraphBuilder::UpdateControlDependencyToLeaveFunction(
+    Node* exit) {
+  if (environment()->IsMarkedAsUnreachable()) return;
+  if (exit_control() != NULL) {
+    exit = MergeControl(exit_control(), exit);
+  }
+  environment()->MarkAsUnreachable();
+  set_exit_control(exit);
+}
+
+
+StructuredGraphBuilder::Environment* StructuredGraphBuilder::CopyEnvironment(
+    Environment* env) {
+  return new (zone()) Environment(*env);
+}
+
+
+StructuredGraphBuilder::Environment::Environment(
+    StructuredGraphBuilder* builder, Node* control_dependency)
+    : builder_(builder),
+      control_dependency_(control_dependency),
+      effect_dependency_(control_dependency),
+      values_(zone()) {}
+
+
+StructuredGraphBuilder::Environment::Environment(const Environment& copy)
+    : builder_(copy.builder()),
+      control_dependency_(copy.control_dependency_),
+      effect_dependency_(copy.effect_dependency_),
+      values_(copy.values_) {}
+
+
+void StructuredGraphBuilder::Environment::Merge(Environment* other) {
+  DCHECK(values_.size() == other->values_.size());
+
+  // Nothing to do if the other environment is dead.
+  if (other->IsMarkedAsUnreachable()) return;
+
+  // Resurrect a dead environment by copying the contents of the other one and
+  // placing a singleton merge as the new control dependency.
+  if (this->IsMarkedAsUnreachable()) {
+    Node* other_control = other->control_dependency_;
+    control_dependency_ = graph()->NewNode(common()->Merge(1), other_control);
+    effect_dependency_ = other->effect_dependency_;
+    values_ = other->values_;
+    return;
+  }
+
+  // Create a merge of the control dependencies of both environments and update
+  // the current environment's control dependency accordingly.
+  Node* control = builder_->MergeControl(this->GetControlDependency(),
+                                         other->GetControlDependency());
+  UpdateControlDependency(control);
+
+  // Create a merge of the effect dependencies of both environments and update
+  // the current environment's effect dependency accordingly.
+  Node* effect = builder_->MergeEffect(this->GetEffectDependency(),
+                                       other->GetEffectDependency(), control);
+  UpdateEffectDependency(effect);
+
+  // Introduce Phi nodes for values that have differing input at merge points,
+  // potentially extending an existing Phi node if possible.
+  for (int i = 0; i < static_cast<int>(values_.size()); ++i) {
+    values_[i] = builder_->MergeValue(values_[i], other->values_[i], control);
+  }
+}
+
+
+void StructuredGraphBuilder::Environment::PrepareForLoop() {
+  Node* control = GetControlDependency();
+  for (int i = 0; i < static_cast<int>(values()->size()); ++i) {
+    Node* phi = builder_->NewPhi(1, values()->at(i), control);
+    values()->at(i) = phi;
+  }
+  Node* effect = builder_->NewEffectPhi(1, GetEffectDependency(), control);
+  UpdateEffectDependency(effect);
+}
+
+
+Node* StructuredGraphBuilder::NewPhi(int count, Node* input, Node* control) {
+  const Operator* phi_op = common()->Phi(kMachAnyTagged, count);
+  Node** buffer = zone()->NewArray<Node*>(count + 1);
+  MemsetPointer(buffer, input, count);
+  buffer[count] = control;
+  return graph()->NewNode(phi_op, count + 1, buffer);
+}
+
+
+// TODO(mstarzinger): Revisit this once we have proper effect states.
+Node* StructuredGraphBuilder::NewEffectPhi(int count, Node* input,
+                                           Node* control) {
+  const Operator* phi_op = common()->EffectPhi(count);
+  Node** buffer = zone()->NewArray<Node*>(count + 1);
+  MemsetPointer(buffer, input, count);
+  buffer[count] = control;
+  return graph()->NewNode(phi_op, count + 1, buffer);
+}
+
+
+Node* StructuredGraphBuilder::MergeControl(Node* control, Node* other) {
+  int inputs = OperatorProperties::GetControlInputCount(control->op()) + 1;
+  if (control->opcode() == IrOpcode::kLoop) {
+    // Control node for loop exists, add input.
+    const Operator* op = common()->Loop(inputs);
+    control->AppendInput(zone(), other);
+    control->set_op(op);
+  } else if (control->opcode() == IrOpcode::kMerge) {
+    // Control node for merge exists, add input.
+    const Operator* op = common()->Merge(inputs);
+    control->AppendInput(zone(), other);
+    control->set_op(op);
+  } else {
+    // Control node is a singleton, introduce a merge.
+    const Operator* op = common()->Merge(inputs);
+    control = graph()->NewNode(op, control, other);
+  }
+  return control;
+}
+
+
+Node* StructuredGraphBuilder::MergeEffect(Node* value, Node* other,
+                                          Node* control) {
+  int inputs = OperatorProperties::GetControlInputCount(control->op());
+  if (value->opcode() == IrOpcode::kEffectPhi &&
+      NodeProperties::GetControlInput(value) == control) {
+    // Phi already exists, add input.
+    value->set_op(common()->EffectPhi(inputs));
+    value->InsertInput(zone(), inputs - 1, other);
+  } else if (value != other) {
+    // Phi does not exist yet, introduce one.
+    value = NewEffectPhi(inputs, value, control);
+    value->ReplaceInput(inputs - 1, other);
+  }
+  return value;
+}
+
+
+Node* StructuredGraphBuilder::MergeValue(Node* value, Node* other,
+                                         Node* control) {
+  int inputs = OperatorProperties::GetControlInputCount(control->op());
+  if (value->opcode() == IrOpcode::kPhi &&
+      NodeProperties::GetControlInput(value) == control) {
+    // Phi already exists, add input.
+    value->set_op(common()->Phi(kMachAnyTagged, inputs));
+    value->InsertInput(zone(), inputs - 1, other);
+  } else if (value != other) {
+    // Phi does not exist yet, introduce one.
+    value = NewPhi(inputs, value, control);
+    value->ReplaceInput(inputs - 1, other);
+  }
+  return value;
+}
+
+
+Node* StructuredGraphBuilder::dead_control() {
+  if (!dead_control_.is_set()) {
+    Node* dead_node = graph()->NewNode(common_->Dead());
+    dead_control_.set(dead_node);
+    return dead_node;
+  }
+  return dead_control_.get();
+}
+}
+}
+}  // namespace v8::internal::compiler
diff --git a/src/compiler/graph-builder.h b/src/compiler/graph-builder.h
new file mode 100644
index 0000000..c966c29
--- /dev/null
+++ b/src/compiler/graph-builder.h
@@ -0,0 +1,230 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GRAPH_BUILDER_H_
+#define V8_COMPILER_GRAPH_BUILDER_H_
+
+#include "src/v8.h"
+
+#include "src/allocation.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
+#include "src/unique.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class Node;
+
+// A common base class for anything that creates nodes in a graph.
+class GraphBuilder {
+ public:
+  explicit GraphBuilder(Graph* graph) : graph_(graph) {}
+  virtual ~GraphBuilder() {}
+
+  Node* NewNode(const Operator* op) {
+    return MakeNode(op, 0, static_cast<Node**>(NULL));
+  }
+
+  Node* NewNode(const Operator* op, Node* n1) { return MakeNode(op, 1, &n1); }
+
+  Node* NewNode(const Operator* op, Node* n1, Node* n2) {
+    Node* buffer[] = {n1, n2};
+    return MakeNode(op, arraysize(buffer), buffer);
+  }
+
+  Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3) {
+    Node* buffer[] = {n1, n2, n3};
+    return MakeNode(op, arraysize(buffer), buffer);
+  }
+
+  Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4) {
+    Node* buffer[] = {n1, n2, n3, n4};
+    return MakeNode(op, arraysize(buffer), buffer);
+  }
+
+  Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
+                Node* n5) {
+    Node* buffer[] = {n1, n2, n3, n4, n5};
+    return MakeNode(op, arraysize(buffer), buffer);
+  }
+
+  Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
+                Node* n5, Node* n6) {
+    Node* nodes[] = {n1, n2, n3, n4, n5, n6};
+    return MakeNode(op, arraysize(nodes), nodes);
+  }
+
+  Node* NewNode(const Operator* op, int value_input_count,
+                Node** value_inputs) {
+    return MakeNode(op, value_input_count, value_inputs);
+  }
+
+  Graph* graph() const { return graph_; }
+
+ protected:
+  // Base implementation used by all factory methods.
+  virtual Node* MakeNode(const Operator* op, int value_input_count,
+                         Node** value_inputs) = 0;
+
+ private:
+  Graph* graph_;
+};
+
+
+// The StructuredGraphBuilder produces a high-level IR graph. It is used as the
+// base class for concrete implementations (e.g the AstGraphBuilder or the
+// StubGraphBuilder).
+class StructuredGraphBuilder : public GraphBuilder {
+ public:
+  StructuredGraphBuilder(Graph* graph, CommonOperatorBuilder* common);
+  virtual ~StructuredGraphBuilder() {}
+
+  // Creates a new Phi node having {count} input values.
+  Node* NewPhi(int count, Node* input, Node* control);
+  Node* NewEffectPhi(int count, Node* input, Node* control);
+
+  // Helpers for merging control, effect or value dependencies.
+  Node* MergeControl(Node* control, Node* other);
+  Node* MergeEffect(Node* value, Node* other, Node* control);
+  Node* MergeValue(Node* value, Node* other, Node* control);
+
+  // Helpers to create new control nodes.
+  Node* NewIfTrue() { return NewNode(common()->IfTrue()); }
+  Node* NewIfFalse() { return NewNode(common()->IfFalse()); }
+  Node* NewMerge() { return NewNode(common()->Merge(1)); }
+  Node* NewLoop() { return NewNode(common()->Loop(1)); }
+  Node* NewBranch(Node* condition) {
+    return NewNode(common()->Branch(), condition);
+  }
+
+ protected:
+  class Environment;
+  friend class Environment;
+  friend class ControlBuilder;
+
+  // The following method creates a new node having the specified operator and
+  // ensures effect and control dependencies are wired up. The dependencies
+  // tracked by the environment might be mutated.
+  virtual Node* MakeNode(const Operator* op, int value_input_count,
+                         Node** value_inputs) FINAL;
+
+  Environment* environment() const { return environment_; }
+  void set_environment(Environment* env) { environment_ = env; }
+
+  Node* current_context() const { return current_context_; }
+  void set_current_context(Node* context) { current_context_ = context; }
+
+  Node* exit_control() const { return exit_control_; }
+  void set_exit_control(Node* node) { exit_control_ = node; }
+
+  Node* dead_control();
+
+  // TODO(mstarzinger): Use phase-local zone instead!
+  Zone* zone() const { return graph()->zone(); }
+  Isolate* isolate() const { return zone()->isolate(); }
+  CommonOperatorBuilder* common() const { return common_; }
+
+  // Helper to wrap a Handle<T> into a Unique<T>.
+  template <class T>
+  Unique<T> MakeUnique(Handle<T> object) {
+    return Unique<T>::CreateUninitialized(object);
+  }
+
+  // Support for control flow builders. The concrete type of the environment
+  // depends on the graph builder, but environments themselves are not virtual.
+  virtual Environment* CopyEnvironment(Environment* env);
+
+  // Helper to indicate a node exits the function body.
+  void UpdateControlDependencyToLeaveFunction(Node* exit);
+
+ private:
+  CommonOperatorBuilder* common_;
+  Environment* environment_;
+
+  // Node representing the control dependency for dead code.
+  SetOncePointer<Node> dead_control_;
+
+  // Node representing the current context within the function body.
+  Node* current_context_;
+
+  // Merge of all control nodes that exit the function body.
+  Node* exit_control_;
+
+  DISALLOW_COPY_AND_ASSIGN(StructuredGraphBuilder);
+};
+
+
+// The abstract execution environment contains static knowledge about
+// execution state at arbitrary control-flow points. It allows for
+// simulation of the control-flow at compile time.
+class StructuredGraphBuilder::Environment : public ZoneObject {
+ public:
+  Environment(StructuredGraphBuilder* builder, Node* control_dependency);
+  Environment(const Environment& copy);
+
+  // Control dependency tracked by this environment.
+  Node* GetControlDependency() { return control_dependency_; }
+  void UpdateControlDependency(Node* dependency) {
+    control_dependency_ = dependency;
+  }
+
+  // Effect dependency tracked by this environment.
+  Node* GetEffectDependency() { return effect_dependency_; }
+  void UpdateEffectDependency(Node* dependency) {
+    effect_dependency_ = dependency;
+  }
+
+  // Mark this environment as being unreachable.
+  void MarkAsUnreachable() {
+    UpdateControlDependency(builder()->dead_control());
+  }
+  bool IsMarkedAsUnreachable() {
+    return GetControlDependency()->opcode() == IrOpcode::kDead;
+  }
+
+  // Merge another environment into this one.
+  void Merge(Environment* other);
+
+  // Copies this environment at a control-flow split point.
+  Environment* CopyForConditional() { return builder()->CopyEnvironment(this); }
+
+  // Copies this environment to a potentially unreachable control-flow point.
+  Environment* CopyAsUnreachable() {
+    Environment* env = builder()->CopyEnvironment(this);
+    env->MarkAsUnreachable();
+    return env;
+  }
+
+  // Copies this environment at a loop header control-flow point.
+  Environment* CopyForLoop() {
+    PrepareForLoop();
+    return builder()->CopyEnvironment(this);
+  }
+
+  Node* GetContext() { return builder_->current_context(); }
+
+ protected:
+  // TODO(mstarzinger): Use phase-local zone instead!
+  Zone* zone() const { return graph()->zone(); }
+  Graph* graph() const { return builder_->graph(); }
+  StructuredGraphBuilder* builder() const { return builder_; }
+  CommonOperatorBuilder* common() { return builder_->common(); }
+  NodeVector* values() { return &values_; }
+
+  // Prepare environment to be used as loop header.
+  void PrepareForLoop();
+
+ private:
+  StructuredGraphBuilder* builder_;
+  Node* control_dependency_;
+  Node* effect_dependency_;
+  NodeVector values_;
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_GRAPH_BUILDER_H__
diff --git a/src/compiler/graph-inl.h b/src/compiler/graph-inl.h
new file mode 100644
index 0000000..571ffb3
--- /dev/null
+++ b/src/compiler/graph-inl.h
@@ -0,0 +1,37 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GRAPH_INL_H_
+#define V8_COMPILER_GRAPH_INL_H_
+
+#include "src/compiler/generic-algorithm-inl.h"
+#include "src/compiler/graph.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+template <class Visitor>
+void Graph::VisitNodeUsesFrom(Node* node, Visitor* visitor) {
+  GenericGraphVisit::Visit<Visitor, NodeUseIterationTraits<Node> >(
+      this, zone(), node, visitor);
+}
+
+
+template <class Visitor>
+void Graph::VisitNodeUsesFromStart(Visitor* visitor) {
+  VisitNodeUsesFrom(start(), visitor);
+}
+
+
+template <class Visitor>
+void Graph::VisitNodeInputsFromEnd(Visitor* visitor) {
+  GenericGraphVisit::Visit<Visitor, NodeInputIterationTraits<Node> >(
+      this, zone(), end(), visitor);
+}
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_GRAPH_INL_H_
diff --git a/src/compiler/graph-reducer-unittest.cc b/src/compiler/graph-reducer-unittest.cc
new file mode 100644
index 0000000..6567203
--- /dev/null
+++ b/src/compiler/graph-reducer-unittest.cc
@@ -0,0 +1,114 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/graph.h"
+#include "src/compiler/graph-reducer.h"
+#include "src/compiler/operator.h"
+#include "src/test/test-utils.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+using testing::_;
+using testing::DefaultValue;
+using testing::Return;
+using testing::Sequence;
+using testing::StrictMock;
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+
+SimpleOperator OP0(0, Operator::kNoWrite, 0, 1, "op0");
+SimpleOperator OP1(1, Operator::kNoProperties, 1, 1, "op1");
+
+
+struct MockReducer : public Reducer {
+  MOCK_METHOD1(Reduce, Reduction(Node*));
+};
+
+}  // namespace
+
+
+class GraphReducerTest : public TestWithZone {
+ public:
+  GraphReducerTest() : graph_(zone()) {}
+
+  static void SetUpTestCase() {
+    TestWithZone::SetUpTestCase();
+    DefaultValue<Reduction>::Set(Reducer::NoChange());
+  }
+
+  static void TearDownTestCase() {
+    DefaultValue<Reduction>::Clear();
+    TestWithZone::TearDownTestCase();
+  }
+
+ protected:
+  void ReduceNode(Node* node, Reducer* r) {
+    GraphReducer reducer(graph());
+    reducer.AddReducer(r);
+    reducer.ReduceNode(node);
+  }
+
+  void ReduceNode(Node* node, Reducer* r1, Reducer* r2) {
+    GraphReducer reducer(graph());
+    reducer.AddReducer(r1);
+    reducer.AddReducer(r2);
+    reducer.ReduceNode(node);
+  }
+
+  void ReduceNode(Node* node, Reducer* r1, Reducer* r2, Reducer* r3) {
+    GraphReducer reducer(graph());
+    reducer.AddReducer(r1);
+    reducer.AddReducer(r2);
+    reducer.AddReducer(r3);
+    reducer.ReduceNode(node);
+  }
+
+  Graph* graph() { return &graph_; }
+
+ private:
+  Graph graph_;
+};
+
+
+TEST_F(GraphReducerTest, NodeIsDeadAfterReplace) {
+  StrictMock<MockReducer> r;
+  Node* node0 = graph()->NewNode(&OP0);
+  Node* node1 = graph()->NewNode(&OP1, node0);
+  Node* node2 = graph()->NewNode(&OP1, node0);
+  EXPECT_CALL(r, Reduce(node1)).WillOnce(Return(Reducer::Replace(node2)));
+  ReduceNode(node1, &r);
+  EXPECT_FALSE(node0->IsDead());
+  EXPECT_TRUE(node1->IsDead());
+  EXPECT_FALSE(node2->IsDead());
+}
+
+
+TEST_F(GraphReducerTest, ReduceOnceForEveryReducer) {
+  StrictMock<MockReducer> r1, r2;
+  Node* node0 = graph()->NewNode(&OP0);
+  EXPECT_CALL(r1, Reduce(node0));
+  EXPECT_CALL(r2, Reduce(node0));
+  ReduceNode(node0, &r1, &r2);
+}
+
+
+TEST_F(GraphReducerTest, ReduceAgainAfterChanged) {
+  Sequence s1, s2;
+  StrictMock<MockReducer> r1, r2, r3;
+  Node* node0 = graph()->NewNode(&OP0);
+  EXPECT_CALL(r1, Reduce(node0));
+  EXPECT_CALL(r2, Reduce(node0));
+  EXPECT_CALL(r3, Reduce(node0)).InSequence(s1, s2).WillOnce(
+      Return(Reducer::Changed(node0)));
+  EXPECT_CALL(r1, Reduce(node0)).InSequence(s1);
+  EXPECT_CALL(r2, Reduce(node0)).InSequence(s2);
+  ReduceNode(node0, &r1, &r2, &r3);
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/graph-reducer.cc b/src/compiler/graph-reducer.cc
new file mode 100644
index 0000000..36a54e0
--- /dev/null
+++ b/src/compiler/graph-reducer.cc
@@ -0,0 +1,98 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/graph-reducer.h"
+
+#include <functional>
+
+#include "src/compiler/graph-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+GraphReducer::GraphReducer(Graph* graph)
+    : graph_(graph), reducers_(graph->zone()) {}
+
+
+static bool NodeIdIsLessThan(const Node* node, NodeId id) {
+  return node->id() < id;
+}
+
+
+void GraphReducer::ReduceNode(Node* node) {
+  ZoneVector<Reducer*>::iterator skip = reducers_.end();
+  static const unsigned kMaxAttempts = 16;
+  bool reduce = true;
+  for (unsigned attempts = 0; attempts <= kMaxAttempts; ++attempts) {
+    if (!reduce) return;
+    reduce = false;  // Assume we don't need to rerun any reducers.
+    int before = graph_->NodeCount();
+    for (ZoneVector<Reducer*>::iterator i = reducers_.begin();
+         i != reducers_.end(); ++i) {
+      if (i == skip) continue;  // Skip this reducer.
+      Reduction reduction = (*i)->Reduce(node);
+      Node* replacement = reduction.replacement();
+      if (replacement == NULL) {
+        // No change from this reducer.
+      } else if (replacement == node) {
+        // {replacement == node} represents an in-place reduction.
+        // Rerun all the reducers except the current one for this node,
+        // as now there may be more opportunities for reduction.
+        reduce = true;
+        skip = i;
+        break;
+      } else {
+        if (node == graph_->start()) graph_->SetStart(replacement);
+        if (node == graph_->end()) graph_->SetEnd(replacement);
+        // If {node} was replaced by an old node, unlink {node} and assume that
+        // {replacement} was already reduced and finish.
+        if (replacement->id() < before) {
+          node->ReplaceUses(replacement);
+          node->Kill();
+          return;
+        }
+        // Otherwise, {node} was replaced by a new node. Replace all old uses of
+        // {node} with {replacement}. New nodes created by this reduction can
+        // use {node}.
+        node->ReplaceUsesIf(
+            std::bind2nd(std::ptr_fun(&NodeIdIsLessThan), before), replacement);
+        // Unlink {node} if it's no longer used.
+        if (node->uses().empty()) {
+          node->Kill();
+        }
+        // Rerun all the reductions on the {replacement}.
+        skip = reducers_.end();
+        node = replacement;
+        reduce = true;
+        break;
+      }
+    }
+  }
+}
+
+
+// A helper class to reuse the node traversal algorithm.
+struct GraphReducerVisitor FINAL : public NullNodeVisitor {
+  explicit GraphReducerVisitor(GraphReducer* reducer) : reducer_(reducer) {}
+  GenericGraphVisit::Control Post(Node* node) {
+    reducer_->ReduceNode(node);
+    return GenericGraphVisit::CONTINUE;
+  }
+  GraphReducer* reducer_;
+};
+
+
+void GraphReducer::ReduceGraph() {
+  GraphReducerVisitor visitor(this);
+  // Perform a post-order reduction of all nodes starting from the end.
+  graph()->VisitNodeInputsFromEnd(&visitor);
+}
+
+
+// TODO(titzer): partial graph reductions.
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/graph-reducer.h b/src/compiler/graph-reducer.h
new file mode 100644
index 0000000..e0e4f7a
--- /dev/null
+++ b/src/compiler/graph-reducer.h
@@ -0,0 +1,80 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GRAPH_REDUCER_H_
+#define V8_COMPILER_GRAPH_REDUCER_H_
+
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Forward declarations.
+class Graph;
+class Node;
+
+
+// Represents the result of trying to reduce a node in the graph.
+class Reduction FINAL {
+ public:
+  explicit Reduction(Node* replacement = NULL) : replacement_(replacement) {}
+
+  Node* replacement() const { return replacement_; }
+  bool Changed() const { return replacement() != NULL; }
+
+ private:
+  Node* replacement_;
+};
+
+
+// A reducer can reduce or simplify a given node based on its operator and
+// inputs. This class functions as an extension point for the graph reducer for
+// language-specific reductions (e.g. reduction based on types or constant
+// folding of low-level operators) can be integrated into the graph reduction
+// phase.
+class Reducer {
+ public:
+  Reducer() {}
+  virtual ~Reducer() {}
+
+  // Try to reduce a node if possible.
+  virtual Reduction Reduce(Node* node) = 0;
+
+  // Helper functions for subclasses to produce reductions for a node.
+  static Reduction NoChange() { return Reduction(); }
+  static Reduction Replace(Node* node) { return Reduction(node); }
+  static Reduction Changed(Node* node) { return Reduction(node); }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(Reducer);
+};
+
+
+// Performs an iterative reduction of a node graph.
+class GraphReducer FINAL {
+ public:
+  explicit GraphReducer(Graph* graph);
+
+  Graph* graph() const { return graph_; }
+
+  void AddReducer(Reducer* reducer) { reducers_.push_back(reducer); }
+
+  // Reduce a single node.
+  void ReduceNode(Node* node);
+  // Reduce the whole graph.
+  void ReduceGraph();
+
+ private:
+  Graph* graph_;
+  ZoneVector<Reducer*> reducers_;
+
+  DISALLOW_COPY_AND_ASSIGN(GraphReducer);
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_GRAPH_REDUCER_H_
diff --git a/src/compiler/graph-replay.cc b/src/compiler/graph-replay.cc
new file mode 100644
index 0000000..494d431
--- /dev/null
+++ b/src/compiler/graph-replay.cc
@@ -0,0 +1,81 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/graph-replay.h"
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/graph-inl.h"
+#include "src/compiler/node.h"
+#include "src/compiler/operator.h"
+#include "src/compiler/operator-properties-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#ifdef DEBUG
+
+void GraphReplayPrinter::PrintReplay(Graph* graph) {
+  GraphReplayPrinter replay;
+  PrintF("  Node* nil = graph.NewNode(common_builder.Dead());\n");
+  graph->VisitNodeInputsFromEnd(&replay);
+}
+
+
+GenericGraphVisit::Control GraphReplayPrinter::Pre(Node* node) {
+  PrintReplayOpCreator(node->op());
+  PrintF("  Node* n%d = graph.NewNode(op", node->id());
+  for (int i = 0; i < node->InputCount(); ++i) {
+    PrintF(", nil");
+  }
+  PrintF("); USE(n%d);\n", node->id());
+  return GenericGraphVisit::CONTINUE;
+}
+
+
+void GraphReplayPrinter::PostEdge(Node* from, int index, Node* to) {
+  PrintF("  n%d->ReplaceInput(%d, n%d);\n", from->id(), index, to->id());
+}
+
+
+void GraphReplayPrinter::PrintReplayOpCreator(const Operator* op) {
+  IrOpcode::Value opcode = static_cast<IrOpcode::Value>(op->opcode());
+  const char* builder =
+      IrOpcode::IsCommonOpcode(opcode) ? "common_builder" : "js_builder";
+  const char* mnemonic = IrOpcode::IsCommonOpcode(opcode)
+                             ? IrOpcode::Mnemonic(opcode)
+                             : IrOpcode::Mnemonic(opcode) + 2;
+  PrintF("  op = %s.%s(", builder, mnemonic);
+  switch (opcode) {
+    case IrOpcode::kParameter:
+    case IrOpcode::kNumberConstant:
+      PrintF("0");
+      break;
+    case IrOpcode::kLoad:
+      PrintF("unique_name");
+      break;
+    case IrOpcode::kHeapConstant:
+      PrintF("unique_constant");
+      break;
+    case IrOpcode::kPhi:
+      PrintF("%d", op->InputCount());
+      break;
+    case IrOpcode::kEffectPhi:
+      PrintF("%d", OperatorProperties::GetEffectInputCount(op));
+      break;
+    case IrOpcode::kLoop:
+    case IrOpcode::kMerge:
+      PrintF("%d", OperatorProperties::GetControlInputCount(op));
+      break;
+    default:
+      break;
+  }
+  PrintF(");\n");
+}
+
+#endif  // DEBUG
+}
+}
+}  // namespace v8::internal::compiler
diff --git a/src/compiler/graph-replay.h b/src/compiler/graph-replay.h
new file mode 100644
index 0000000..53d5247
--- /dev/null
+++ b/src/compiler/graph-replay.h
@@ -0,0 +1,43 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GRAPH_REPLAY_H_
+#define V8_COMPILER_GRAPH_REPLAY_H_
+
+#include "src/compiler/node.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Forward declarations.
+class Graph;
+
+// Helper class to print a full replay of a graph. This replay can be used to
+// materialize the same graph within a C++ unit test and hence test subsequent
+// optimization passes on a graph without going through the construction steps.
+class GraphReplayPrinter FINAL : public NullNodeVisitor {
+ public:
+#ifdef DEBUG
+  static void PrintReplay(Graph* graph);
+#else
+  static void PrintReplay(Graph* graph) {}
+#endif
+
+  GenericGraphVisit::Control Pre(Node* node);
+  void PostEdge(Node* from, int index, Node* to);
+
+ private:
+  GraphReplayPrinter() {}
+
+  static void PrintReplayOpCreator(const Operator* op);
+
+  DISALLOW_COPY_AND_ASSIGN(GraphReplayPrinter);
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_GRAPH_REPLAY_H_
diff --git a/src/compiler/graph-unittest.cc b/src/compiler/graph-unittest.cc
new file mode 100644
index 0000000..75e70cb
--- /dev/null
+++ b/src/compiler/graph-unittest.cc
@@ -0,0 +1,779 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/graph-unittest.h"
+
+#include <ostream>  // NOLINT(readability/streams)
+
+#include "src/compiler/node-properties-inl.h"
+
+using testing::_;
+using testing::MakeMatcher;
+using testing::MatcherInterface;
+using testing::MatchResultListener;
+using testing::StringMatchResultListener;
+
+namespace v8 {
+namespace internal {
+
+// TODO(bmeurer): Find a new home for these functions.
+template <typename T>
+inline std::ostream& operator<<(std::ostream& os, const Unique<T>& value) {
+  return os << *value.handle();
+}
+inline std::ostream& operator<<(std::ostream& os,
+                                const ExternalReference& value) {
+  OStringStream ost;
+  compiler::StaticParameterTraits<ExternalReference>::PrintTo(ost, value);
+  return os << ost.c_str();
+}
+
+namespace compiler {
+
+GraphTest::GraphTest(int num_parameters) : common_(zone()), graph_(zone()) {
+  graph()->SetStart(graph()->NewNode(common()->Start(num_parameters)));
+}
+
+
+GraphTest::~GraphTest() {}
+
+
+Node* GraphTest::Parameter(int32_t index) {
+  return graph()->NewNode(common()->Parameter(index), graph()->start());
+}
+
+
+Node* GraphTest::Float32Constant(volatile float value) {
+  return graph()->NewNode(common()->Float32Constant(value));
+}
+
+
+Node* GraphTest::Float64Constant(volatile double value) {
+  return graph()->NewNode(common()->Float64Constant(value));
+}
+
+
+Node* GraphTest::Int32Constant(int32_t value) {
+  return graph()->NewNode(common()->Int32Constant(value));
+}
+
+
+Node* GraphTest::Int64Constant(int64_t value) {
+  return graph()->NewNode(common()->Int64Constant(value));
+}
+
+
+Node* GraphTest::NumberConstant(volatile double value) {
+  return graph()->NewNode(common()->NumberConstant(value));
+}
+
+
+Node* GraphTest::HeapConstant(const Unique<HeapObject>& value) {
+  return graph()->NewNode(common()->HeapConstant(value));
+}
+
+
+Node* GraphTest::FalseConstant() {
+  return HeapConstant(
+      Unique<HeapObject>::CreateImmovable(factory()->false_value()));
+}
+
+
+Node* GraphTest::TrueConstant() {
+  return HeapConstant(
+      Unique<HeapObject>::CreateImmovable(factory()->true_value()));
+}
+
+
+Matcher<Node*> GraphTest::IsFalseConstant() {
+  return IsHeapConstant(
+      Unique<HeapObject>::CreateImmovable(factory()->false_value()));
+}
+
+
+Matcher<Node*> GraphTest::IsTrueConstant() {
+  return IsHeapConstant(
+      Unique<HeapObject>::CreateImmovable(factory()->true_value()));
+}
+
+namespace {
+
+template <typename T>
+bool PrintMatchAndExplain(const T& value, const char* value_name,
+                          const Matcher<T>& value_matcher,
+                          MatchResultListener* listener) {
+  StringMatchResultListener value_listener;
+  if (!value_matcher.MatchAndExplain(value, &value_listener)) {
+    *listener << "whose " << value_name << " " << value << " doesn't match";
+    if (value_listener.str() != "") {
+      *listener << ", " << value_listener.str();
+    }
+    return false;
+  }
+  return true;
+}
+
+
+class NodeMatcher : public MatcherInterface<Node*> {
+ public:
+  explicit NodeMatcher(IrOpcode::Value opcode) : opcode_(opcode) {}
+
+  virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+    *os << "is a " << IrOpcode::Mnemonic(opcode_) << " node";
+  }
+
+  virtual bool MatchAndExplain(Node* node, MatchResultListener* listener) const
+      OVERRIDE {
+    if (node == NULL) {
+      *listener << "which is NULL";
+      return false;
+    }
+    if (node->opcode() != opcode_) {
+      *listener << "whose opcode is " << IrOpcode::Mnemonic(node->opcode())
+                << " but should have been " << IrOpcode::Mnemonic(opcode_);
+      return false;
+    }
+    return true;
+  }
+
+ private:
+  const IrOpcode::Value opcode_;
+};
+
+
+class IsBranchMatcher FINAL : public NodeMatcher {
+ public:
+  IsBranchMatcher(const Matcher<Node*>& value_matcher,
+                  const Matcher<Node*>& control_matcher)
+      : NodeMatcher(IrOpcode::kBranch),
+        value_matcher_(value_matcher),
+        control_matcher_(control_matcher) {}
+
+  virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+    NodeMatcher::DescribeTo(os);
+    *os << " whose value (";
+    value_matcher_.DescribeTo(os);
+    *os << ") and control (";
+    control_matcher_.DescribeTo(os);
+    *os << ")";
+  }
+
+  virtual bool MatchAndExplain(Node* node, MatchResultListener* listener) const
+      OVERRIDE {
+    return (NodeMatcher::MatchAndExplain(node, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
+                                 "value", value_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetControlInput(node),
+                                 "control", control_matcher_, listener));
+  }
+
+ private:
+  const Matcher<Node*> value_matcher_;
+  const Matcher<Node*> control_matcher_;
+};
+
+
+class IsMergeMatcher FINAL : public NodeMatcher {
+ public:
+  IsMergeMatcher(const Matcher<Node*>& control0_matcher,
+                 const Matcher<Node*>& control1_matcher)
+      : NodeMatcher(IrOpcode::kMerge),
+        control0_matcher_(control0_matcher),
+        control1_matcher_(control1_matcher) {}
+
+  virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+    NodeMatcher::DescribeTo(os);
+    *os << " whose control0 (";
+    control0_matcher_.DescribeTo(os);
+    *os << ") and control1 (";
+    control1_matcher_.DescribeTo(os);
+    *os << ")";
+  }
+
+  virtual bool MatchAndExplain(Node* node, MatchResultListener* listener) const
+      OVERRIDE {
+    return (NodeMatcher::MatchAndExplain(node, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetControlInput(node, 0),
+                                 "control0", control0_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetControlInput(node, 1),
+                                 "control1", control1_matcher_, listener));
+  }
+
+ private:
+  const Matcher<Node*> control0_matcher_;
+  const Matcher<Node*> control1_matcher_;
+};
+
+
+class IsControl1Matcher FINAL : public NodeMatcher {
+ public:
+  IsControl1Matcher(IrOpcode::Value opcode,
+                    const Matcher<Node*>& control_matcher)
+      : NodeMatcher(opcode), control_matcher_(control_matcher) {}
+
+  virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+    NodeMatcher::DescribeTo(os);
+    *os << " whose control (";
+    control_matcher_.DescribeTo(os);
+    *os << ")";
+  }
+
+  virtual bool MatchAndExplain(Node* node, MatchResultListener* listener) const
+      OVERRIDE {
+    return (NodeMatcher::MatchAndExplain(node, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetControlInput(node),
+                                 "control", control_matcher_, listener));
+  }
+
+ private:
+  const Matcher<Node*> control_matcher_;
+};
+
+
+class IsFinishMatcher FINAL : public NodeMatcher {
+ public:
+  IsFinishMatcher(const Matcher<Node*>& value_matcher,
+                  const Matcher<Node*>& effect_matcher)
+      : NodeMatcher(IrOpcode::kFinish),
+        value_matcher_(value_matcher),
+        effect_matcher_(effect_matcher) {}
+
+  virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+    NodeMatcher::DescribeTo(os);
+    *os << " whose value (";
+    value_matcher_.DescribeTo(os);
+    *os << ") and effect (";
+    effect_matcher_.DescribeTo(os);
+    *os << ")";
+  }
+
+  virtual bool MatchAndExplain(Node* node, MatchResultListener* listener) const
+      OVERRIDE {
+    return (NodeMatcher::MatchAndExplain(node, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
+                                 "value", value_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetEffectInput(node), "effect",
+                                 effect_matcher_, listener));
+  }
+
+ private:
+  const Matcher<Node*> value_matcher_;
+  const Matcher<Node*> effect_matcher_;
+};
+
+
+template <typename T>
+class IsConstantMatcher FINAL : public NodeMatcher {
+ public:
+  IsConstantMatcher(IrOpcode::Value opcode, const Matcher<T>& value_matcher)
+      : NodeMatcher(opcode), value_matcher_(value_matcher) {}
+
+  virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+    NodeMatcher::DescribeTo(os);
+    *os << " whose value (";
+    value_matcher_.DescribeTo(os);
+    *os << ")";
+  }
+
+  virtual bool MatchAndExplain(Node* node, MatchResultListener* listener) const
+      OVERRIDE {
+    return (NodeMatcher::MatchAndExplain(node, listener) &&
+            PrintMatchAndExplain(OpParameter<T>(node), "value", value_matcher_,
+                                 listener));
+  }
+
+ private:
+  const Matcher<T> value_matcher_;
+};
+
+
+class IsPhiMatcher FINAL : public NodeMatcher {
+ public:
+  IsPhiMatcher(const Matcher<MachineType>& type_matcher,
+               const Matcher<Node*>& value0_matcher,
+               const Matcher<Node*>& value1_matcher,
+               const Matcher<Node*>& control_matcher)
+      : NodeMatcher(IrOpcode::kPhi),
+        type_matcher_(type_matcher),
+        value0_matcher_(value0_matcher),
+        value1_matcher_(value1_matcher),
+        control_matcher_(control_matcher) {}
+
+  virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+    NodeMatcher::DescribeTo(os);
+    *os << " whose type (";
+    type_matcher_.DescribeTo(os);
+    *os << "), value0 (";
+    value0_matcher_.DescribeTo(os);
+    *os << "), value1 (";
+    value1_matcher_.DescribeTo(os);
+    *os << ") and control (";
+    control_matcher_.DescribeTo(os);
+    *os << ")";
+  }
+
+  virtual bool MatchAndExplain(Node* node, MatchResultListener* listener) const
+      OVERRIDE {
+    return (NodeMatcher::MatchAndExplain(node, listener) &&
+            PrintMatchAndExplain(OpParameter<MachineType>(node), "type",
+                                 type_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
+                                 "value0", value0_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1),
+                                 "value1", value1_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetControlInput(node),
+                                 "control", control_matcher_, listener));
+  }
+
+ private:
+  const Matcher<MachineType> type_matcher_;
+  const Matcher<Node*> value0_matcher_;
+  const Matcher<Node*> value1_matcher_;
+  const Matcher<Node*> control_matcher_;
+};
+
+
+class IsProjectionMatcher FINAL : public NodeMatcher {
+ public:
+  IsProjectionMatcher(const Matcher<size_t>& index_matcher,
+                      const Matcher<Node*>& base_matcher)
+      : NodeMatcher(IrOpcode::kProjection),
+        index_matcher_(index_matcher),
+        base_matcher_(base_matcher) {}
+
+  virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+    NodeMatcher::DescribeTo(os);
+    *os << " whose index (";
+    index_matcher_.DescribeTo(os);
+    *os << ") and base (";
+    base_matcher_.DescribeTo(os);
+    *os << ")";
+  }
+
+  virtual bool MatchAndExplain(Node* node, MatchResultListener* listener) const
+      OVERRIDE {
+    return (NodeMatcher::MatchAndExplain(node, listener) &&
+            PrintMatchAndExplain(OpParameter<size_t>(node), "index",
+                                 index_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), "base",
+                                 base_matcher_, listener));
+  }
+
+ private:
+  const Matcher<size_t> index_matcher_;
+  const Matcher<Node*> base_matcher_;
+};
+
+
+class IsCallMatcher FINAL : public NodeMatcher {
+ public:
+  IsCallMatcher(const Matcher<CallDescriptor*>& descriptor_matcher,
+                const Matcher<Node*>& value0_matcher,
+                const Matcher<Node*>& value1_matcher,
+                const Matcher<Node*>& value2_matcher,
+                const Matcher<Node*>& value3_matcher,
+                const Matcher<Node*>& effect_matcher,
+                const Matcher<Node*>& control_matcher)
+      : NodeMatcher(IrOpcode::kCall),
+        descriptor_matcher_(descriptor_matcher),
+        value0_matcher_(value0_matcher),
+        value1_matcher_(value1_matcher),
+        value2_matcher_(value2_matcher),
+        value3_matcher_(value3_matcher),
+        effect_matcher_(effect_matcher),
+        control_matcher_(control_matcher) {}
+
+  virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+    NodeMatcher::DescribeTo(os);
+    *os << " whose value0 (";
+    value0_matcher_.DescribeTo(os);
+    *os << ") and value1 (";
+    value1_matcher_.DescribeTo(os);
+    *os << ") and value2 (";
+    value2_matcher_.DescribeTo(os);
+    *os << ") and value3 (";
+    value3_matcher_.DescribeTo(os);
+    *os << ") and effect (";
+    effect_matcher_.DescribeTo(os);
+    *os << ") and control (";
+    control_matcher_.DescribeTo(os);
+    *os << ")";
+  }
+
+  virtual bool MatchAndExplain(Node* node, MatchResultListener* listener) const
+      OVERRIDE {
+    return (NodeMatcher::MatchAndExplain(node, listener) &&
+            PrintMatchAndExplain(OpParameter<CallDescriptor*>(node),
+                                 "descriptor", descriptor_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
+                                 "value0", value0_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1),
+                                 "value1", value1_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 2),
+                                 "value2", value2_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 3),
+                                 "value3", value3_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetEffectInput(node), "effect",
+                                 effect_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetControlInput(node),
+                                 "control", control_matcher_, listener));
+  }
+
+ private:
+  const Matcher<CallDescriptor*> descriptor_matcher_;
+  const Matcher<Node*> value0_matcher_;
+  const Matcher<Node*> value1_matcher_;
+  const Matcher<Node*> value2_matcher_;
+  const Matcher<Node*> value3_matcher_;
+  const Matcher<Node*> effect_matcher_;
+  const Matcher<Node*> control_matcher_;
+};
+
+
+class IsLoadMatcher FINAL : public NodeMatcher {
+ public:
+  IsLoadMatcher(const Matcher<LoadRepresentation>& rep_matcher,
+                const Matcher<Node*>& base_matcher,
+                const Matcher<Node*>& index_matcher,
+                const Matcher<Node*>& effect_matcher)
+      : NodeMatcher(IrOpcode::kLoad),
+        rep_matcher_(rep_matcher),
+        base_matcher_(base_matcher),
+        index_matcher_(index_matcher),
+        effect_matcher_(effect_matcher) {}
+
+  virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+    NodeMatcher::DescribeTo(os);
+    *os << " whose rep (";
+    rep_matcher_.DescribeTo(os);
+    *os << "), base (";
+    base_matcher_.DescribeTo(os);
+    *os << "), index (";
+    index_matcher_.DescribeTo(os);
+    *os << ") and effect (";
+    effect_matcher_.DescribeTo(os);
+    *os << ")";
+  }
+
+  virtual bool MatchAndExplain(Node* node, MatchResultListener* listener) const
+      OVERRIDE {
+    return (NodeMatcher::MatchAndExplain(node, listener) &&
+            PrintMatchAndExplain(OpParameter<LoadRepresentation>(node), "rep",
+                                 rep_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), "base",
+                                 base_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1),
+                                 "index", index_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetEffectInput(node), "effect",
+                                 effect_matcher_, listener));
+  }
+
+ private:
+  const Matcher<LoadRepresentation> rep_matcher_;
+  const Matcher<Node*> base_matcher_;
+  const Matcher<Node*> index_matcher_;
+  const Matcher<Node*> effect_matcher_;
+};
+
+
+class IsStoreMatcher FINAL : public NodeMatcher {
+ public:
+  IsStoreMatcher(const Matcher<MachineType>& type_matcher,
+                 const Matcher<WriteBarrierKind> write_barrier_matcher,
+                 const Matcher<Node*>& base_matcher,
+                 const Matcher<Node*>& index_matcher,
+                 const Matcher<Node*>& value_matcher,
+                 const Matcher<Node*>& effect_matcher,
+                 const Matcher<Node*>& control_matcher)
+      : NodeMatcher(IrOpcode::kStore),
+        type_matcher_(type_matcher),
+        write_barrier_matcher_(write_barrier_matcher),
+        base_matcher_(base_matcher),
+        index_matcher_(index_matcher),
+        value_matcher_(value_matcher),
+        effect_matcher_(effect_matcher),
+        control_matcher_(control_matcher) {}
+
+  virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+    NodeMatcher::DescribeTo(os);
+    *os << " whose type (";
+    type_matcher_.DescribeTo(os);
+    *os << "), write barrier (";
+    write_barrier_matcher_.DescribeTo(os);
+    *os << "), base (";
+    base_matcher_.DescribeTo(os);
+    *os << "), index (";
+    index_matcher_.DescribeTo(os);
+    *os << "), value (";
+    value_matcher_.DescribeTo(os);
+    *os << "), effect (";
+    effect_matcher_.DescribeTo(os);
+    *os << ") and control (";
+    control_matcher_.DescribeTo(os);
+    *os << ")";
+  }
+
+  virtual bool MatchAndExplain(Node* node, MatchResultListener* listener) const
+      OVERRIDE {
+    return (NodeMatcher::MatchAndExplain(node, listener) &&
+            PrintMatchAndExplain(
+                OpParameter<StoreRepresentation>(node).machine_type(), "type",
+                type_matcher_, listener) &&
+            PrintMatchAndExplain(
+                OpParameter<StoreRepresentation>(node).write_barrier_kind(),
+                "write barrier", write_barrier_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), "base",
+                                 base_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1),
+                                 "index", index_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 2),
+                                 "value", value_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetEffectInput(node), "effect",
+                                 effect_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetControlInput(node),
+                                 "control", control_matcher_, listener));
+  }
+
+ private:
+  const Matcher<MachineType> type_matcher_;
+  const Matcher<WriteBarrierKind> write_barrier_matcher_;
+  const Matcher<Node*> base_matcher_;
+  const Matcher<Node*> index_matcher_;
+  const Matcher<Node*> value_matcher_;
+  const Matcher<Node*> effect_matcher_;
+  const Matcher<Node*> control_matcher_;
+};
+
+
+class IsBinopMatcher FINAL : public NodeMatcher {
+ public:
+  IsBinopMatcher(IrOpcode::Value opcode, const Matcher<Node*>& lhs_matcher,
+                 const Matcher<Node*>& rhs_matcher)
+      : NodeMatcher(opcode),
+        lhs_matcher_(lhs_matcher),
+        rhs_matcher_(rhs_matcher) {}
+
+  virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+    NodeMatcher::DescribeTo(os);
+    *os << " whose lhs (";
+    lhs_matcher_.DescribeTo(os);
+    *os << ") and rhs (";
+    rhs_matcher_.DescribeTo(os);
+    *os << ")";
+  }
+
+  virtual bool MatchAndExplain(Node* node, MatchResultListener* listener) const
+      OVERRIDE {
+    return (NodeMatcher::MatchAndExplain(node, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), "lhs",
+                                 lhs_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1), "rhs",
+                                 rhs_matcher_, listener));
+  }
+
+ private:
+  const Matcher<Node*> lhs_matcher_;
+  const Matcher<Node*> rhs_matcher_;
+};
+
+
+class IsUnopMatcher FINAL : public NodeMatcher {
+ public:
+  IsUnopMatcher(IrOpcode::Value opcode, const Matcher<Node*>& input_matcher)
+      : NodeMatcher(opcode), input_matcher_(input_matcher) {}
+
+  virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+    NodeMatcher::DescribeTo(os);
+    *os << " whose input (";
+    input_matcher_.DescribeTo(os);
+    *os << ")";
+  }
+
+  virtual bool MatchAndExplain(Node* node, MatchResultListener* listener) const
+      OVERRIDE {
+    return (NodeMatcher::MatchAndExplain(node, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
+                                 "input", input_matcher_, listener));
+  }
+
+ private:
+  const Matcher<Node*> input_matcher_;
+};
+}
+
+
+Matcher<Node*> IsBranch(const Matcher<Node*>& value_matcher,
+                        const Matcher<Node*>& control_matcher) {
+  return MakeMatcher(new IsBranchMatcher(value_matcher, control_matcher));
+}
+
+
+Matcher<Node*> IsMerge(const Matcher<Node*>& control0_matcher,
+                       const Matcher<Node*>& control1_matcher) {
+  return MakeMatcher(new IsMergeMatcher(control0_matcher, control1_matcher));
+}
+
+
+Matcher<Node*> IsIfTrue(const Matcher<Node*>& control_matcher) {
+  return MakeMatcher(new IsControl1Matcher(IrOpcode::kIfTrue, control_matcher));
+}
+
+
+Matcher<Node*> IsIfFalse(const Matcher<Node*>& control_matcher) {
+  return MakeMatcher(
+      new IsControl1Matcher(IrOpcode::kIfFalse, control_matcher));
+}
+
+
+Matcher<Node*> IsControlEffect(const Matcher<Node*>& control_matcher) {
+  return MakeMatcher(
+      new IsControl1Matcher(IrOpcode::kControlEffect, control_matcher));
+}
+
+
+Matcher<Node*> IsValueEffect(const Matcher<Node*>& value_matcher) {
+  return MakeMatcher(new IsUnopMatcher(IrOpcode::kValueEffect, value_matcher));
+}
+
+
+Matcher<Node*> IsFinish(const Matcher<Node*>& value_matcher,
+                        const Matcher<Node*>& effect_matcher) {
+  return MakeMatcher(new IsFinishMatcher(value_matcher, effect_matcher));
+}
+
+
+Matcher<Node*> IsExternalConstant(
+    const Matcher<ExternalReference>& value_matcher) {
+  return MakeMatcher(new IsConstantMatcher<ExternalReference>(
+      IrOpcode::kExternalConstant, value_matcher));
+}
+
+
+Matcher<Node*> IsHeapConstant(
+    const Matcher<Unique<HeapObject> >& value_matcher) {
+  return MakeMatcher(new IsConstantMatcher<Unique<HeapObject> >(
+      IrOpcode::kHeapConstant, value_matcher));
+}
+
+
+Matcher<Node*> IsInt32Constant(const Matcher<int32_t>& value_matcher) {
+  return MakeMatcher(
+      new IsConstantMatcher<int32_t>(IrOpcode::kInt32Constant, value_matcher));
+}
+
+
+Matcher<Node*> IsInt64Constant(const Matcher<int64_t>& value_matcher) {
+  return MakeMatcher(
+      new IsConstantMatcher<int64_t>(IrOpcode::kInt64Constant, value_matcher));
+}
+
+
+Matcher<Node*> IsFloat32Constant(const Matcher<float>& value_matcher) {
+  return MakeMatcher(
+      new IsConstantMatcher<float>(IrOpcode::kFloat32Constant, value_matcher));
+}
+
+
+Matcher<Node*> IsFloat64Constant(const Matcher<double>& value_matcher) {
+  return MakeMatcher(
+      new IsConstantMatcher<double>(IrOpcode::kFloat64Constant, value_matcher));
+}
+
+
+Matcher<Node*> IsNumberConstant(const Matcher<double>& value_matcher) {
+  return MakeMatcher(
+      new IsConstantMatcher<double>(IrOpcode::kNumberConstant, value_matcher));
+}
+
+
+Matcher<Node*> IsPhi(const Matcher<MachineType>& type_matcher,
+                     const Matcher<Node*>& value0_matcher,
+                     const Matcher<Node*>& value1_matcher,
+                     const Matcher<Node*>& merge_matcher) {
+  return MakeMatcher(new IsPhiMatcher(type_matcher, value0_matcher,
+                                      value1_matcher, merge_matcher));
+}
+
+
+Matcher<Node*> IsProjection(const Matcher<size_t>& index_matcher,
+                            const Matcher<Node*>& base_matcher) {
+  return MakeMatcher(new IsProjectionMatcher(index_matcher, base_matcher));
+}
+
+
+Matcher<Node*> IsCall(const Matcher<CallDescriptor*>& descriptor_matcher,
+                      const Matcher<Node*>& value0_matcher,
+                      const Matcher<Node*>& value1_matcher,
+                      const Matcher<Node*>& value2_matcher,
+                      const Matcher<Node*>& value3_matcher,
+                      const Matcher<Node*>& effect_matcher,
+                      const Matcher<Node*>& control_matcher) {
+  return MakeMatcher(new IsCallMatcher(
+      descriptor_matcher, value0_matcher, value1_matcher, value2_matcher,
+      value3_matcher, effect_matcher, control_matcher));
+}
+
+
+Matcher<Node*> IsLoad(const Matcher<LoadRepresentation>& rep_matcher,
+                      const Matcher<Node*>& base_matcher,
+                      const Matcher<Node*>& index_matcher,
+                      const Matcher<Node*>& effect_matcher) {
+  return MakeMatcher(new IsLoadMatcher(rep_matcher, base_matcher, index_matcher,
+                                       effect_matcher));
+}
+
+
+Matcher<Node*> IsStore(const Matcher<MachineType>& type_matcher,
+                       const Matcher<WriteBarrierKind>& write_barrier_matcher,
+                       const Matcher<Node*>& base_matcher,
+                       const Matcher<Node*>& index_matcher,
+                       const Matcher<Node*>& value_matcher,
+                       const Matcher<Node*>& effect_matcher,
+                       const Matcher<Node*>& control_matcher) {
+  return MakeMatcher(new IsStoreMatcher(
+      type_matcher, write_barrier_matcher, base_matcher, index_matcher,
+      value_matcher, effect_matcher, control_matcher));
+}
+
+
+#define IS_BINOP_MATCHER(Name)                                            \
+  Matcher<Node*> Is##Name(const Matcher<Node*>& lhs_matcher,              \
+                          const Matcher<Node*>& rhs_matcher) {            \
+    return MakeMatcher(                                                   \
+        new IsBinopMatcher(IrOpcode::k##Name, lhs_matcher, rhs_matcher)); \
+  }
+IS_BINOP_MATCHER(NumberLessThan)
+IS_BINOP_MATCHER(Word32And)
+IS_BINOP_MATCHER(Word32Sar)
+IS_BINOP_MATCHER(Word32Shl)
+IS_BINOP_MATCHER(Word32Ror)
+IS_BINOP_MATCHER(Word32Equal)
+IS_BINOP_MATCHER(Word64And)
+IS_BINOP_MATCHER(Word64Sar)
+IS_BINOP_MATCHER(Word64Shl)
+IS_BINOP_MATCHER(Word64Equal)
+IS_BINOP_MATCHER(Int32AddWithOverflow)
+IS_BINOP_MATCHER(Int32Mul)
+IS_BINOP_MATCHER(Uint32LessThanOrEqual)
+#undef IS_BINOP_MATCHER
+
+
+#define IS_UNOP_MATCHER(Name)                                                \
+  Matcher<Node*> Is##Name(const Matcher<Node*>& input_matcher) {             \
+    return MakeMatcher(new IsUnopMatcher(IrOpcode::k##Name, input_matcher)); \
+  }
+IS_UNOP_MATCHER(ChangeFloat64ToInt32)
+IS_UNOP_MATCHER(ChangeFloat64ToUint32)
+IS_UNOP_MATCHER(ChangeInt32ToFloat64)
+IS_UNOP_MATCHER(ChangeInt32ToInt64)
+IS_UNOP_MATCHER(ChangeUint32ToFloat64)
+IS_UNOP_MATCHER(ChangeUint32ToUint64)
+IS_UNOP_MATCHER(TruncateFloat64ToInt32)
+IS_UNOP_MATCHER(TruncateInt64ToInt32)
+IS_UNOP_MATCHER(Float64Sqrt)
+#undef IS_UNOP_MATCHER
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/graph-unittest.h b/src/compiler/graph-unittest.h
new file mode 100644
index 0000000..1dc9c3d
--- /dev/null
+++ b/src/compiler/graph-unittest.h
@@ -0,0 +1,140 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GRAPH_UNITTEST_H_
+#define V8_COMPILER_GRAPH_UNITTEST_H_
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/machine-operator.h"
+#include "src/test/test-utils.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class HeapObject;
+template <class T>
+class Unique;
+
+namespace compiler {
+
+using ::testing::Matcher;
+
+
+class GraphTest : public TestWithContext, public TestWithZone {
+ public:
+  explicit GraphTest(int parameters = 1);
+  virtual ~GraphTest();
+
+ protected:
+  Node* Parameter(int32_t index);
+  Node* Float32Constant(volatile float value);
+  Node* Float64Constant(volatile double value);
+  Node* Int32Constant(int32_t value);
+  Node* Int64Constant(int64_t value);
+  Node* NumberConstant(volatile double value);
+  Node* HeapConstant(const Unique<HeapObject>& value);
+  Node* FalseConstant();
+  Node* TrueConstant();
+
+  Matcher<Node*> IsFalseConstant();
+  Matcher<Node*> IsTrueConstant();
+
+  CommonOperatorBuilder* common() { return &common_; }
+  Graph* graph() { return &graph_; }
+
+ private:
+  CommonOperatorBuilder common_;
+  Graph graph_;
+};
+
+
+Matcher<Node*> IsBranch(const Matcher<Node*>& value_matcher,
+                        const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsMerge(const Matcher<Node*>& control0_matcher,
+                       const Matcher<Node*>& control1_matcher);
+Matcher<Node*> IsIfTrue(const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsIfFalse(const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsControlEffect(const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsValueEffect(const Matcher<Node*>& value_matcher);
+Matcher<Node*> IsFinish(const Matcher<Node*>& value_matcher,
+                        const Matcher<Node*>& effect_matcher);
+Matcher<Node*> IsExternalConstant(
+    const Matcher<ExternalReference>& value_matcher);
+Matcher<Node*> IsHeapConstant(
+    const Matcher<Unique<HeapObject> >& value_matcher);
+Matcher<Node*> IsFloat32Constant(const Matcher<float>& value_matcher);
+Matcher<Node*> IsFloat64Constant(const Matcher<double>& value_matcher);
+Matcher<Node*> IsInt32Constant(const Matcher<int32_t>& value_matcher);
+Matcher<Node*> IsInt64Constant(const Matcher<int64_t>& value_matcher);
+Matcher<Node*> IsNumberConstant(const Matcher<double>& value_matcher);
+Matcher<Node*> IsPhi(const Matcher<MachineType>& type_matcher,
+                     const Matcher<Node*>& value0_matcher,
+                     const Matcher<Node*>& value1_matcher,
+                     const Matcher<Node*>& merge_matcher);
+Matcher<Node*> IsProjection(const Matcher<size_t>& index_matcher,
+                            const Matcher<Node*>& base_matcher);
+Matcher<Node*> IsCall(const Matcher<CallDescriptor*>& descriptor_matcher,
+                      const Matcher<Node*>& value0_matcher,
+                      const Matcher<Node*>& value1_matcher,
+                      const Matcher<Node*>& value2_matcher,
+                      const Matcher<Node*>& value3_matcher,
+                      const Matcher<Node*>& effect_matcher,
+                      const Matcher<Node*>& control_matcher);
+
+Matcher<Node*> IsNumberLessThan(const Matcher<Node*>& lhs_matcher,
+                                const Matcher<Node*>& rhs_matcher);
+
+Matcher<Node*> IsLoad(const Matcher<LoadRepresentation>& rep_matcher,
+                      const Matcher<Node*>& base_matcher,
+                      const Matcher<Node*>& index_matcher,
+                      const Matcher<Node*>& effect_matcher);
+Matcher<Node*> IsStore(const Matcher<MachineType>& type_matcher,
+                       const Matcher<WriteBarrierKind>& write_barrier_matcher,
+                       const Matcher<Node*>& base_matcher,
+                       const Matcher<Node*>& index_matcher,
+                       const Matcher<Node*>& value_matcher,
+                       const Matcher<Node*>& effect_matcher,
+                       const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsWord32And(const Matcher<Node*>& lhs_matcher,
+                           const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsWord32Sar(const Matcher<Node*>& lhs_matcher,
+                           const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsWord32Shl(const Matcher<Node*>& lhs_matcher,
+                           const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsWord32Ror(const Matcher<Node*>& lhs_matcher,
+                           const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsWord32Equal(const Matcher<Node*>& lhs_matcher,
+                             const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsWord64And(const Matcher<Node*>& lhs_matcher,
+                           const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsWord64Shl(const Matcher<Node*>& lhs_matcher,
+                           const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsWord64Sar(const Matcher<Node*>& lhs_matcher,
+                           const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsWord64Equal(const Matcher<Node*>& lhs_matcher,
+                             const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsInt32AddWithOverflow(const Matcher<Node*>& lhs_matcher,
+                                      const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsInt32Mul(const Matcher<Node*>& lhs_matcher,
+                          const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsUint32LessThanOrEqual(const Matcher<Node*>& lhs_matcher,
+                                       const Matcher<Node*>& rhs_matcher);
+Matcher<Node*> IsChangeFloat64ToInt32(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsChangeFloat64ToUint32(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsChangeInt32ToFloat64(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsChangeInt32ToInt64(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsChangeUint32ToFloat64(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsChangeUint32ToUint64(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsTruncateFloat64ToInt32(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsTruncateInt64ToInt32(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsFloat64Sqrt(const Matcher<Node*>& input_matcher);
+
+}  //  namespace compiler
+}  //  namespace internal
+}  //  namespace v8
+
+#endif  // V8_COMPILER_GRAPH_UNITTEST_H_
diff --git a/src/compiler/graph-visualizer.cc b/src/compiler/graph-visualizer.cc
new file mode 100644
index 0000000..10d6698
--- /dev/null
+++ b/src/compiler/graph-visualizer.cc
@@ -0,0 +1,282 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/graph-visualizer.h"
+
+#include "src/compiler/generic-algorithm.h"
+#include "src/compiler/generic-node.h"
+#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/graph-inl.h"
+#include "src/compiler/node.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/opcodes.h"
+#include "src/compiler/operator.h"
+#include "src/ostreams.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define DEAD_COLOR "#999999"
+
+class GraphVisualizer : public NullNodeVisitor {
+ public:
+  GraphVisualizer(OStream& os, Zone* zone, const Graph* graph);  // NOLINT
+
+  void Print();
+
+  GenericGraphVisit::Control Pre(Node* node);
+  GenericGraphVisit::Control PreEdge(Node* from, int index, Node* to);
+
+ private:
+  void AnnotateNode(Node* node);
+  void PrintEdge(Node::Edge edge);
+
+  Zone* zone_;
+  NodeSet all_nodes_;
+  NodeSet white_nodes_;
+  bool use_to_def_;
+  OStream& os_;
+  const Graph* const graph_;
+
+  DISALLOW_COPY_AND_ASSIGN(GraphVisualizer);
+};
+
+
+static Node* GetControlCluster(Node* node) {
+  if (OperatorProperties::IsBasicBlockBegin(node->op())) {
+    return node;
+  } else if (OperatorProperties::GetControlInputCount(node->op()) == 1) {
+    Node* control = NodeProperties::GetControlInput(node, 0);
+    return OperatorProperties::IsBasicBlockBegin(control->op()) ? control
+                                                                : NULL;
+  } else {
+    return NULL;
+  }
+}
+
+
+GenericGraphVisit::Control GraphVisualizer::Pre(Node* node) {
+  if (all_nodes_.count(node) == 0) {
+    Node* control_cluster = GetControlCluster(node);
+    if (control_cluster != NULL) {
+      os_ << "  subgraph cluster_BasicBlock" << control_cluster->id() << " {\n";
+    }
+    os_ << "  ID" << node->id() << " [\n";
+    AnnotateNode(node);
+    os_ << "  ]\n";
+    if (control_cluster != NULL) os_ << "  }\n";
+    all_nodes_.insert(node);
+    if (use_to_def_) white_nodes_.insert(node);
+  }
+  return GenericGraphVisit::CONTINUE;
+}
+
+
+GenericGraphVisit::Control GraphVisualizer::PreEdge(Node* from, int index,
+                                                    Node* to) {
+  if (use_to_def_) return GenericGraphVisit::CONTINUE;
+  // When going from def to use, only consider white -> other edges, which are
+  // the dead nodes that use live nodes. We're probably not interested in
+  // dead nodes that only use other dead nodes.
+  if (white_nodes_.count(from) > 0) return GenericGraphVisit::CONTINUE;
+  return GenericGraphVisit::SKIP;
+}
+
+
+class Escaped {
+ public:
+  explicit Escaped(const OStringStream& os) : str_(os.c_str()) {}
+
+  friend OStream& operator<<(OStream& os, const Escaped& e) {
+    for (const char* s = e.str_; *s != '\0'; ++s) {
+      if (needs_escape(*s)) os << "\\";
+      os << *s;
+    }
+    return os;
+  }
+
+ private:
+  static bool needs_escape(char ch) {
+    switch (ch) {
+      case '>':
+      case '<':
+      case '|':
+      case '}':
+      case '{':
+        return true;
+      default:
+        return false;
+    }
+  }
+
+  const char* const str_;
+};
+
+
+static bool IsLikelyBackEdge(Node* from, int index, Node* to) {
+  if (from->opcode() == IrOpcode::kPhi ||
+      from->opcode() == IrOpcode::kEffectPhi) {
+    Node* control = NodeProperties::GetControlInput(from, 0);
+    return control->opcode() != IrOpcode::kMerge && control != to && index != 0;
+  } else if (from->opcode() == IrOpcode::kLoop) {
+    return index != 0;
+  } else {
+    return false;
+  }
+}
+
+
+void GraphVisualizer::AnnotateNode(Node* node) {
+  if (!use_to_def_) {
+    os_ << "    style=\"filled\"\n"
+        << "    fillcolor=\"" DEAD_COLOR "\"\n";
+  }
+
+  os_ << "    shape=\"record\"\n";
+  switch (node->opcode()) {
+    case IrOpcode::kEnd:
+    case IrOpcode::kDead:
+    case IrOpcode::kStart:
+      os_ << "    style=\"diagonals\"\n";
+      break;
+    case IrOpcode::kMerge:
+    case IrOpcode::kIfTrue:
+    case IrOpcode::kIfFalse:
+    case IrOpcode::kLoop:
+      os_ << "    style=\"rounded\"\n";
+      break;
+    default:
+      break;
+  }
+
+  OStringStream label;
+  label << *node->op();
+  os_ << "    label=\"{{#" << node->id() << ":" << Escaped(label);
+
+  InputIter i = node->inputs().begin();
+  for (int j = OperatorProperties::GetValueInputCount(node->op()); j > 0;
+       ++i, j--) {
+    os_ << "|<I" << i.index() << ">#" << (*i)->id();
+  }
+  for (int j = OperatorProperties::GetContextInputCount(node->op()); j > 0;
+       ++i, j--) {
+    os_ << "|<I" << i.index() << ">X #" << (*i)->id();
+  }
+  for (int j = OperatorProperties::GetFrameStateInputCount(node->op()); j > 0;
+       ++i, j--) {
+    os_ << "|<I" << i.index() << ">F #" << (*i)->id();
+  }
+  for (int j = OperatorProperties::GetEffectInputCount(node->op()); j > 0;
+       ++i, j--) {
+    os_ << "|<I" << i.index() << ">E #" << (*i)->id();
+  }
+
+  if (!use_to_def_ || OperatorProperties::IsBasicBlockBegin(node->op()) ||
+      GetControlCluster(node) == NULL) {
+    for (int j = OperatorProperties::GetControlInputCount(node->op()); j > 0;
+         ++i, j--) {
+      os_ << "|<I" << i.index() << ">C #" << (*i)->id();
+    }
+  }
+  os_ << "}";
+
+  if (FLAG_trace_turbo_types && !NodeProperties::IsControl(node)) {
+    Bounds bounds = NodeProperties::GetBounds(node);
+    OStringStream upper;
+    bounds.upper->PrintTo(upper);
+    OStringStream lower;
+    bounds.lower->PrintTo(lower);
+    os_ << "|" << Escaped(upper) << "|" << Escaped(lower);
+  }
+  os_ << "}\"\n";
+}
+
+
+void GraphVisualizer::PrintEdge(Node::Edge edge) {
+  Node* from = edge.from();
+  int index = edge.index();
+  Node* to = edge.to();
+  bool unconstrained = IsLikelyBackEdge(from, index, to);
+  os_ << "  ID" << from->id();
+  if (all_nodes_.count(to) == 0) {
+    os_ << ":I" << index << ":n -> DEAD_INPUT";
+  } else if (OperatorProperties::IsBasicBlockBegin(from->op()) ||
+             GetControlCluster(from) == NULL ||
+             (OperatorProperties::GetControlInputCount(from->op()) > 0 &&
+              NodeProperties::GetControlInput(from) != to)) {
+    os_ << ":I" << index << ":n -> ID" << to->id() << ":s"
+        << "[" << (unconstrained ? "constraint=false, " : "")
+        << (NodeProperties::IsControlEdge(edge) ? "style=bold, " : "")
+        << (NodeProperties::IsEffectEdge(edge) ? "style=dotted, " : "")
+        << (NodeProperties::IsContextEdge(edge) ? "style=dashed, " : "") << "]";
+  } else {
+    os_ << " -> ID" << to->id() << ":s [color=transparent, "
+        << (unconstrained ? "constraint=false, " : "")
+        << (NodeProperties::IsControlEdge(edge) ? "style=dashed, " : "") << "]";
+  }
+  os_ << "\n";
+}
+
+
+void GraphVisualizer::Print() {
+  os_ << "digraph D {\n"
+      << "  node [fontsize=8,height=0.25]\n"
+      << "  rankdir=\"BT\"\n"
+      << "  ranksep=\"1.2 equally\"\n"
+      << "  overlap=\"false\"\n"
+      << "  splines=\"true\"\n"
+      << "  concentrate=\"true\"\n"
+      << "  \n";
+
+  // Make sure all nodes have been output before writing out the edges.
+  use_to_def_ = true;
+  // TODO(svenpanne) Remove the need for the const_casts.
+  const_cast<Graph*>(graph_)->VisitNodeInputsFromEnd(this);
+  white_nodes_.insert(const_cast<Graph*>(graph_)->start());
+
+  // Visit all uses of white nodes.
+  use_to_def_ = false;
+  GenericGraphVisit::Visit<GraphVisualizer, NodeUseIterationTraits<Node> >(
+      const_cast<Graph*>(graph_), zone_, white_nodes_.begin(),
+      white_nodes_.end(), this);
+
+  os_ << "  DEAD_INPUT [\n"
+      << "    style=\"filled\" \n"
+      << "    fillcolor=\"" DEAD_COLOR "\"\n"
+      << "  ]\n"
+      << "\n";
+
+  // With all the nodes written, add the edges.
+  for (NodeSetIter i = all_nodes_.begin(); i != all_nodes_.end(); ++i) {
+    Node::Inputs inputs = (*i)->inputs();
+    for (Node::Inputs::iterator iter(inputs.begin()); iter != inputs.end();
+         ++iter) {
+      PrintEdge(iter.edge());
+    }
+  }
+  os_ << "}\n";
+}
+
+
+GraphVisualizer::GraphVisualizer(OStream& os, Zone* zone,
+                                 const Graph* graph)  // NOLINT
+    : zone_(zone),
+      all_nodes_(NodeSet::key_compare(), NodeSet::allocator_type(zone)),
+      white_nodes_(NodeSet::key_compare(), NodeSet::allocator_type(zone)),
+      use_to_def_(true),
+      os_(os),
+      graph_(graph) {}
+
+
+OStream& operator<<(OStream& os, const AsDOT& ad) {
+  Zone tmp_zone(ad.graph.zone()->isolate());
+  GraphVisualizer(os, &tmp_zone, &ad.graph).Print();
+  return os;
+}
+}
+}
+}  // namespace v8::internal::compiler
diff --git a/src/compiler/graph-visualizer.h b/src/compiler/graph-visualizer.h
new file mode 100644
index 0000000..12532ba
--- /dev/null
+++ b/src/compiler/graph-visualizer.h
@@ -0,0 +1,29 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GRAPH_VISUALIZER_H_
+#define V8_COMPILER_GRAPH_VISUALIZER_H_
+
+#include "src/v8.h"
+
+namespace v8 {
+namespace internal {
+
+class OStream;
+
+namespace compiler {
+
+class Graph;
+
+struct AsDOT {
+  explicit AsDOT(const Graph& g) : graph(g) {}
+  const Graph& graph;
+};
+
+OStream& operator<<(OStream& os, const AsDOT& ad);
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_GRAPH_VISUALIZER_H_
diff --git a/src/compiler/graph.cc b/src/compiler/graph.cc
new file mode 100644
index 0000000..7b5f228
--- /dev/null
+++ b/src/compiler/graph.cc
@@ -0,0 +1,37 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/graph.h"
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/graph-inl.h"
+#include "src/compiler/node.h"
+#include "src/compiler/node-aux-data-inl.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/operator-properties.h"
+#include "src/compiler/operator-properties-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+Graph::Graph(Zone* zone) : GenericGraph<Node>(zone), decorators_(zone) {}
+
+
+Node* Graph::NewNode(const Operator* op, int input_count, Node** inputs) {
+  DCHECK_LE(op->InputCount(), input_count);
+  Node* result = Node::New(this, input_count, inputs);
+  result->Initialize(op);
+  for (ZoneVector<GraphDecorator*>::iterator i = decorators_.begin();
+       i != decorators_.end(); ++i) {
+    (*i)->Decorate(result);
+  }
+  return result;
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/graph.h b/src/compiler/graph.h
new file mode 100644
index 0000000..07eb02f
--- /dev/null
+++ b/src/compiler/graph.h
@@ -0,0 +1,93 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_GRAPH_H_
+#define V8_COMPILER_GRAPH_H_
+
+#include <map>
+#include <set>
+
+#include "src/compiler/generic-algorithm.h"
+#include "src/compiler/node.h"
+#include "src/compiler/node-aux-data.h"
+#include "src/compiler/source-position.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class GraphDecorator;
+
+
+class Graph : public GenericGraph<Node> {
+ public:
+  explicit Graph(Zone* zone);
+
+  // Base implementation used by all factory methods.
+  Node* NewNode(const Operator* op, int input_count, Node** inputs);
+
+  // Factories for nodes with static input counts.
+  Node* NewNode(const Operator* op) {
+    return NewNode(op, 0, static_cast<Node**>(NULL));
+  }
+  Node* NewNode(const Operator* op, Node* n1) { return NewNode(op, 1, &n1); }
+  Node* NewNode(const Operator* op, Node* n1, Node* n2) {
+    Node* nodes[] = {n1, n2};
+    return NewNode(op, arraysize(nodes), nodes);
+  }
+  Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3) {
+    Node* nodes[] = {n1, n2, n3};
+    return NewNode(op, arraysize(nodes), nodes);
+  }
+  Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4) {
+    Node* nodes[] = {n1, n2, n3, n4};
+    return NewNode(op, arraysize(nodes), nodes);
+  }
+  Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
+                Node* n5) {
+    Node* nodes[] = {n1, n2, n3, n4, n5};
+    return NewNode(op, arraysize(nodes), nodes);
+  }
+  Node* NewNode(const Operator* op, Node* n1, Node* n2, Node* n3, Node* n4,
+                Node* n5, Node* n6) {
+    Node* nodes[] = {n1, n2, n3, n4, n5, n6};
+    return NewNode(op, arraysize(nodes), nodes);
+  }
+
+  template <class Visitor>
+  void VisitNodeUsesFrom(Node* node, Visitor* visitor);
+
+  template <class Visitor>
+  void VisitNodeUsesFromStart(Visitor* visitor);
+
+  template <class Visitor>
+  void VisitNodeInputsFromEnd(Visitor* visitor);
+
+  void AddDecorator(GraphDecorator* decorator) {
+    decorators_.push_back(decorator);
+  }
+
+  void RemoveDecorator(GraphDecorator* decorator) {
+    ZoneVector<GraphDecorator*>::iterator it =
+        std::find(decorators_.begin(), decorators_.end(), decorator);
+    DCHECK(it != decorators_.end());
+    decorators_.erase(it, it + 1);
+  }
+
+ private:
+  ZoneVector<GraphDecorator*> decorators_;
+};
+
+
+class GraphDecorator : public ZoneObject {
+ public:
+  virtual ~GraphDecorator() {}
+  virtual void Decorate(Node* node) = 0;
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_GRAPH_H_
diff --git a/src/compiler/ia32/code-generator-ia32.cc b/src/compiler/ia32/code-generator-ia32.cc
new file mode 100644
index 0000000..deab7cd
--- /dev/null
+++ b/src/compiler/ia32/code-generator-ia32.cc
@@ -0,0 +1,959 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/code-generator.h"
+
+#include "src/compiler/code-generator-impl.h"
+#include "src/compiler/gap-resolver.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/ia32/assembler-ia32.h"
+#include "src/ia32/macro-assembler-ia32.h"
+#include "src/scopes.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define __ masm()->
+
+
+// Adds IA-32 specific methods for decoding operands.
+class IA32OperandConverter : public InstructionOperandConverter {
+ public:
+  IA32OperandConverter(CodeGenerator* gen, Instruction* instr)
+      : InstructionOperandConverter(gen, instr) {}
+
+  Operand InputOperand(int index) { return ToOperand(instr_->InputAt(index)); }
+
+  Immediate InputImmediate(int index) {
+    return ToImmediate(instr_->InputAt(index));
+  }
+
+  Operand OutputOperand() { return ToOperand(instr_->Output()); }
+
+  Operand TempOperand(int index) { return ToOperand(instr_->TempAt(index)); }
+
+  Operand ToOperand(InstructionOperand* op, int extra = 0) {
+    if (op->IsRegister()) {
+      DCHECK(extra == 0);
+      return Operand(ToRegister(op));
+    } else if (op->IsDoubleRegister()) {
+      DCHECK(extra == 0);
+      return Operand(ToDoubleRegister(op));
+    }
+    DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+    // The linkage computes where all spill slots are located.
+    FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), extra);
+    return Operand(offset.from_stack_pointer() ? esp : ebp, offset.offset());
+  }
+
+  Operand HighOperand(InstructionOperand* op) {
+    DCHECK(op->IsDoubleStackSlot());
+    return ToOperand(op, kPointerSize);
+  }
+
+  Immediate ToImmediate(InstructionOperand* operand) {
+    Constant constant = ToConstant(operand);
+    switch (constant.type()) {
+      case Constant::kInt32:
+        return Immediate(constant.ToInt32());
+      case Constant::kFloat64:
+        return Immediate(
+            isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
+      case Constant::kExternalReference:
+        return Immediate(constant.ToExternalReference());
+      case Constant::kHeapObject:
+        return Immediate(constant.ToHeapObject());
+      case Constant::kInt64:
+        break;
+    }
+    UNREACHABLE();
+    return Immediate(-1);
+  }
+
+  Operand MemoryOperand(int* first_input) {
+    const int offset = *first_input;
+    switch (AddressingModeField::decode(instr_->opcode())) {
+      case kMode_MR1I:
+        *first_input += 2;
+        return Operand(InputRegister(offset + 0), InputRegister(offset + 1),
+                       times_1,
+                       0);  // TODO(dcarney): K != 0
+      case kMode_MRI:
+        *first_input += 2;
+        return Operand::ForRegisterPlusImmediate(InputRegister(offset + 0),
+                                                 InputImmediate(offset + 1));
+      case kMode_MI:
+        *first_input += 1;
+        return Operand(InputImmediate(offset + 0));
+      default:
+        UNREACHABLE();
+        return Operand(no_reg);
+    }
+  }
+
+  Operand MemoryOperand() {
+    int first_input = 0;
+    return MemoryOperand(&first_input);
+  }
+};
+
+
+static bool HasImmediateInput(Instruction* instr, int index) {
+  return instr->InputAt(index)->IsImmediate();
+}
+
+
+// Assembles an instruction after register allocation, producing machine code.
+void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+  IA32OperandConverter i(this, instr);
+
+  switch (ArchOpcodeField::decode(instr->opcode())) {
+    case kArchCallCodeObject: {
+      EnsureSpaceForLazyDeopt();
+      if (HasImmediateInput(instr, 0)) {
+        Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
+        __ call(code, RelocInfo::CODE_TARGET);
+      } else {
+        Register reg = i.InputRegister(0);
+        __ call(Operand(reg, Code::kHeaderSize - kHeapObjectTag));
+      }
+      AddSafepointAndDeopt(instr);
+      break;
+    }
+    case kArchCallJSFunction: {
+      EnsureSpaceForLazyDeopt();
+      Register func = i.InputRegister(0);
+      if (FLAG_debug_code) {
+        // Check the function's context matches the context argument.
+        __ cmp(esi, FieldOperand(func, JSFunction::kContextOffset));
+        __ Assert(equal, kWrongFunctionContext);
+      }
+      __ call(FieldOperand(func, JSFunction::kCodeEntryOffset));
+      AddSafepointAndDeopt(instr);
+      break;
+    }
+    case kArchJmp:
+      __ jmp(code()->GetLabel(i.InputBlock(0)));
+      break;
+    case kArchNop:
+      // don't emit code for nops.
+      break;
+    case kArchRet:
+      AssembleReturn();
+      break;
+    case kArchTruncateDoubleToI:
+      __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
+      break;
+    case kIA32Add:
+      if (HasImmediateInput(instr, 1)) {
+        __ add(i.InputOperand(0), i.InputImmediate(1));
+      } else {
+        __ add(i.InputRegister(0), i.InputOperand(1));
+      }
+      break;
+    case kIA32And:
+      if (HasImmediateInput(instr, 1)) {
+        __ and_(i.InputOperand(0), i.InputImmediate(1));
+      } else {
+        __ and_(i.InputRegister(0), i.InputOperand(1));
+      }
+      break;
+    case kIA32Cmp:
+      if (HasImmediateInput(instr, 1)) {
+        __ cmp(i.InputOperand(0), i.InputImmediate(1));
+      } else {
+        __ cmp(i.InputRegister(0), i.InputOperand(1));
+      }
+      break;
+    case kIA32Test:
+      if (HasImmediateInput(instr, 1)) {
+        __ test(i.InputOperand(0), i.InputImmediate(1));
+      } else {
+        __ test(i.InputRegister(0), i.InputOperand(1));
+      }
+      break;
+    case kIA32Imul:
+      if (HasImmediateInput(instr, 1)) {
+        __ imul(i.OutputRegister(), i.InputOperand(0), i.InputInt32(1));
+      } else {
+        __ imul(i.OutputRegister(), i.InputOperand(1));
+      }
+      break;
+    case kIA32Idiv:
+      __ cdq();
+      __ idiv(i.InputOperand(1));
+      break;
+    case kIA32Udiv:
+      __ xor_(edx, edx);
+      __ div(i.InputOperand(1));
+      break;
+    case kIA32Not:
+      __ not_(i.OutputOperand());
+      break;
+    case kIA32Neg:
+      __ neg(i.OutputOperand());
+      break;
+    case kIA32Or:
+      if (HasImmediateInput(instr, 1)) {
+        __ or_(i.InputOperand(0), i.InputImmediate(1));
+      } else {
+        __ or_(i.InputRegister(0), i.InputOperand(1));
+      }
+      break;
+    case kIA32Xor:
+      if (HasImmediateInput(instr, 1)) {
+        __ xor_(i.InputOperand(0), i.InputImmediate(1));
+      } else {
+        __ xor_(i.InputRegister(0), i.InputOperand(1));
+      }
+      break;
+    case kIA32Sub:
+      if (HasImmediateInput(instr, 1)) {
+        __ sub(i.InputOperand(0), i.InputImmediate(1));
+      } else {
+        __ sub(i.InputRegister(0), i.InputOperand(1));
+      }
+      break;
+    case kIA32Shl:
+      if (HasImmediateInput(instr, 1)) {
+        __ shl(i.OutputRegister(), i.InputInt5(1));
+      } else {
+        __ shl_cl(i.OutputRegister());
+      }
+      break;
+    case kIA32Shr:
+      if (HasImmediateInput(instr, 1)) {
+        __ shr(i.OutputRegister(), i.InputInt5(1));
+      } else {
+        __ shr_cl(i.OutputRegister());
+      }
+      break;
+    case kIA32Sar:
+      if (HasImmediateInput(instr, 1)) {
+        __ sar(i.OutputRegister(), i.InputInt5(1));
+      } else {
+        __ sar_cl(i.OutputRegister());
+      }
+      break;
+    case kIA32Ror:
+      if (HasImmediateInput(instr, 1)) {
+        __ ror(i.OutputRegister(), i.InputInt5(1));
+      } else {
+        __ ror_cl(i.OutputRegister());
+      }
+      break;
+    case kSSEFloat64Cmp:
+      __ ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
+      break;
+    case kSSEFloat64Add:
+      __ addsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+      break;
+    case kSSEFloat64Sub:
+      __ subsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+      break;
+    case kSSEFloat64Mul:
+      __ mulsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+      break;
+    case kSSEFloat64Div:
+      __ divsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+      break;
+    case kSSEFloat64Mod: {
+      // TODO(dcarney): alignment is wrong.
+      __ sub(esp, Immediate(kDoubleSize));
+      // Move values to st(0) and st(1).
+      __ movsd(Operand(esp, 0), i.InputDoubleRegister(1));
+      __ fld_d(Operand(esp, 0));
+      __ movsd(Operand(esp, 0), i.InputDoubleRegister(0));
+      __ fld_d(Operand(esp, 0));
+      // Loop while fprem isn't done.
+      Label mod_loop;
+      __ bind(&mod_loop);
+      // This instructions traps on all kinds inputs, but we are assuming the
+      // floating point control word is set to ignore them all.
+      __ fprem();
+      // The following 2 instruction implicitly use eax.
+      __ fnstsw_ax();
+      __ sahf();
+      __ j(parity_even, &mod_loop);
+      // Move output to stack and clean up.
+      __ fstp(1);
+      __ fstp_d(Operand(esp, 0));
+      __ movsd(i.OutputDoubleRegister(), Operand(esp, 0));
+      __ add(esp, Immediate(kDoubleSize));
+      break;
+    }
+    case kSSEFloat64Sqrt:
+      __ sqrtsd(i.OutputDoubleRegister(), i.InputOperand(0));
+      break;
+    case kSSEFloat64ToInt32:
+      __ cvttsd2si(i.OutputRegister(), i.InputOperand(0));
+      break;
+    case kSSEFloat64ToUint32: {
+      XMMRegister scratch = xmm0;
+      __ Move(scratch, -2147483648.0);
+      __ addsd(scratch, i.InputOperand(0));
+      __ cvttsd2si(i.OutputRegister(), scratch);
+      __ add(i.OutputRegister(), Immediate(0x80000000));
+      break;
+    }
+    case kSSEInt32ToFloat64:
+      __ cvtsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
+      break;
+    case kSSEUint32ToFloat64:
+      // TODO(turbofan): IA32 SSE LoadUint32() should take an operand.
+      __ LoadUint32(i.OutputDoubleRegister(), i.InputRegister(0));
+      break;
+    case kIA32Movsxbl:
+      __ movsx_b(i.OutputRegister(), i.MemoryOperand());
+      break;
+    case kIA32Movzxbl:
+      __ movzx_b(i.OutputRegister(), i.MemoryOperand());
+      break;
+    case kIA32Movb: {
+      int index = 0;
+      Operand operand = i.MemoryOperand(&index);
+      if (HasImmediateInput(instr, index)) {
+        __ mov_b(operand, i.InputInt8(index));
+      } else {
+        __ mov_b(operand, i.InputRegister(index));
+      }
+      break;
+    }
+    case kIA32Movsxwl:
+      __ movsx_w(i.OutputRegister(), i.MemoryOperand());
+      break;
+    case kIA32Movzxwl:
+      __ movzx_w(i.OutputRegister(), i.MemoryOperand());
+      break;
+    case kIA32Movw: {
+      int index = 0;
+      Operand operand = i.MemoryOperand(&index);
+      if (HasImmediateInput(instr, index)) {
+        __ mov_w(operand, i.InputInt16(index));
+      } else {
+        __ mov_w(operand, i.InputRegister(index));
+      }
+      break;
+    }
+    case kIA32Movl:
+      if (instr->HasOutput()) {
+        __ mov(i.OutputRegister(), i.MemoryOperand());
+      } else {
+        int index = 0;
+        Operand operand = i.MemoryOperand(&index);
+        if (HasImmediateInput(instr, index)) {
+          __ mov(operand, i.InputImmediate(index));
+        } else {
+          __ mov(operand, i.InputRegister(index));
+        }
+      }
+      break;
+    case kIA32Movsd:
+      if (instr->HasOutput()) {
+        __ movsd(i.OutputDoubleRegister(), i.MemoryOperand());
+      } else {
+        int index = 0;
+        Operand operand = i.MemoryOperand(&index);
+        __ movsd(operand, i.InputDoubleRegister(index));
+      }
+      break;
+    case kIA32Movss:
+      if (instr->HasOutput()) {
+        __ movss(i.OutputDoubleRegister(), i.MemoryOperand());
+        __ cvtss2sd(i.OutputDoubleRegister(), i.OutputDoubleRegister());
+      } else {
+        int index = 0;
+        Operand operand = i.MemoryOperand(&index);
+        __ cvtsd2ss(xmm0, i.InputDoubleRegister(index));
+        __ movss(operand, xmm0);
+      }
+      break;
+    case kIA32Push:
+      if (HasImmediateInput(instr, 0)) {
+        __ push(i.InputImmediate(0));
+      } else {
+        __ push(i.InputOperand(0));
+      }
+      break;
+    case kIA32StoreWriteBarrier: {
+      Register object = i.InputRegister(0);
+      Register index = i.InputRegister(1);
+      Register value = i.InputRegister(2);
+      __ mov(Operand(object, index, times_1, 0), value);
+      __ lea(index, Operand(object, index, times_1, 0));
+      SaveFPRegsMode mode = code_->frame()->DidAllocateDoubleRegisters()
+                                ? kSaveFPRegs
+                                : kDontSaveFPRegs;
+      __ RecordWrite(object, index, value, mode);
+      break;
+    }
+  }
+}
+
+
+// Assembles branches after an instruction.
+void CodeGenerator::AssembleArchBranch(Instruction* instr,
+                                       FlagsCondition condition) {
+  IA32OperandConverter i(this, instr);
+  Label done;
+
+  // Emit a branch. The true and false targets are always the last two inputs
+  // to the instruction.
+  BasicBlock* tblock = i.InputBlock(instr->InputCount() - 2);
+  BasicBlock* fblock = i.InputBlock(instr->InputCount() - 1);
+  bool fallthru = IsNextInAssemblyOrder(fblock);
+  Label* tlabel = code()->GetLabel(tblock);
+  Label* flabel = fallthru ? &done : code()->GetLabel(fblock);
+  Label::Distance flabel_distance = fallthru ? Label::kNear : Label::kFar;
+  switch (condition) {
+    case kUnorderedEqual:
+      __ j(parity_even, flabel, flabel_distance);
+    // Fall through.
+    case kEqual:
+      __ j(equal, tlabel);
+      break;
+    case kUnorderedNotEqual:
+      __ j(parity_even, tlabel);
+    // Fall through.
+    case kNotEqual:
+      __ j(not_equal, tlabel);
+      break;
+    case kSignedLessThan:
+      __ j(less, tlabel);
+      break;
+    case kSignedGreaterThanOrEqual:
+      __ j(greater_equal, tlabel);
+      break;
+    case kSignedLessThanOrEqual:
+      __ j(less_equal, tlabel);
+      break;
+    case kSignedGreaterThan:
+      __ j(greater, tlabel);
+      break;
+    case kUnorderedLessThan:
+      __ j(parity_even, flabel, flabel_distance);
+    // Fall through.
+    case kUnsignedLessThan:
+      __ j(below, tlabel);
+      break;
+    case kUnorderedGreaterThanOrEqual:
+      __ j(parity_even, tlabel);
+    // Fall through.
+    case kUnsignedGreaterThanOrEqual:
+      __ j(above_equal, tlabel);
+      break;
+    case kUnorderedLessThanOrEqual:
+      __ j(parity_even, flabel, flabel_distance);
+    // Fall through.
+    case kUnsignedLessThanOrEqual:
+      __ j(below_equal, tlabel);
+      break;
+    case kUnorderedGreaterThan:
+      __ j(parity_even, tlabel);
+    // Fall through.
+    case kUnsignedGreaterThan:
+      __ j(above, tlabel);
+      break;
+    case kOverflow:
+      __ j(overflow, tlabel);
+      break;
+    case kNotOverflow:
+      __ j(no_overflow, tlabel);
+      break;
+  }
+  if (!fallthru) __ jmp(flabel, flabel_distance);  // no fallthru to flabel.
+  __ bind(&done);
+}
+
+
+// Assembles boolean materializations after an instruction.
+void CodeGenerator::AssembleArchBoolean(Instruction* instr,
+                                        FlagsCondition condition) {
+  IA32OperandConverter i(this, instr);
+  Label done;
+
+  // Materialize a full 32-bit 1 or 0 value. The result register is always the
+  // last output of the instruction.
+  Label check;
+  DCHECK_NE(0, instr->OutputCount());
+  Register reg = i.OutputRegister(instr->OutputCount() - 1);
+  Condition cc = no_condition;
+  switch (condition) {
+    case kUnorderedEqual:
+      __ j(parity_odd, &check, Label::kNear);
+      __ mov(reg, Immediate(0));
+      __ jmp(&done, Label::kNear);
+    // Fall through.
+    case kEqual:
+      cc = equal;
+      break;
+    case kUnorderedNotEqual:
+      __ j(parity_odd, &check, Label::kNear);
+      __ mov(reg, Immediate(1));
+      __ jmp(&done, Label::kNear);
+    // Fall through.
+    case kNotEqual:
+      cc = not_equal;
+      break;
+    case kSignedLessThan:
+      cc = less;
+      break;
+    case kSignedGreaterThanOrEqual:
+      cc = greater_equal;
+      break;
+    case kSignedLessThanOrEqual:
+      cc = less_equal;
+      break;
+    case kSignedGreaterThan:
+      cc = greater;
+      break;
+    case kUnorderedLessThan:
+      __ j(parity_odd, &check, Label::kNear);
+      __ mov(reg, Immediate(0));
+      __ jmp(&done, Label::kNear);
+    // Fall through.
+    case kUnsignedLessThan:
+      cc = below;
+      break;
+    case kUnorderedGreaterThanOrEqual:
+      __ j(parity_odd, &check, Label::kNear);
+      __ mov(reg, Immediate(1));
+      __ jmp(&done, Label::kNear);
+    // Fall through.
+    case kUnsignedGreaterThanOrEqual:
+      cc = above_equal;
+      break;
+    case kUnorderedLessThanOrEqual:
+      __ j(parity_odd, &check, Label::kNear);
+      __ mov(reg, Immediate(0));
+      __ jmp(&done, Label::kNear);
+    // Fall through.
+    case kUnsignedLessThanOrEqual:
+      cc = below_equal;
+      break;
+    case kUnorderedGreaterThan:
+      __ j(parity_odd, &check, Label::kNear);
+      __ mov(reg, Immediate(1));
+      __ jmp(&done, Label::kNear);
+    // Fall through.
+    case kUnsignedGreaterThan:
+      cc = above;
+      break;
+    case kOverflow:
+      cc = overflow;
+      break;
+    case kNotOverflow:
+      cc = no_overflow;
+      break;
+  }
+  __ bind(&check);
+  if (reg.is_byte_register()) {
+    // setcc for byte registers (al, bl, cl, dl).
+    __ setcc(cc, reg);
+    __ movzx_b(reg, reg);
+  } else {
+    // Emit a branch to set a register to either 1 or 0.
+    Label set;
+    __ j(cc, &set, Label::kNear);
+    __ mov(reg, Immediate(0));
+    __ jmp(&done, Label::kNear);
+    __ bind(&set);
+    __ mov(reg, Immediate(1));
+  }
+  __ bind(&done);
+}
+
+
+void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
+  Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
+      isolate(), deoptimization_id, Deoptimizer::LAZY);
+  __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+}
+
+
+// The calling convention for JSFunctions on IA32 passes arguments on the
+// stack and the JSFunction and context in EDI and ESI, respectively, thus
+// the steps of the call look as follows:
+
+// --{ before the call instruction }--------------------------------------------
+//                                                         |  caller frame |
+//                                                         ^ esp           ^ ebp
+
+// --{ push arguments and setup ESI, EDI }--------------------------------------
+//                                       | args + receiver |  caller frame |
+//                                       ^ esp                             ^ ebp
+//                 [edi = JSFunction, esi = context]
+
+// --{ call [edi + kCodeEntryOffset] }------------------------------------------
+//                                 | RET | args + receiver |  caller frame |
+//                                 ^ esp                                   ^ ebp
+
+// =={ prologue of called function }============================================
+// --{ push ebp }---------------------------------------------------------------
+//                            | FP | RET | args + receiver |  caller frame |
+//                            ^ esp                                        ^ ebp
+
+// --{ mov ebp, esp }-----------------------------------------------------------
+//                            | FP | RET | args + receiver |  caller frame |
+//                            ^ ebp,esp
+
+// --{ push esi }---------------------------------------------------------------
+//                      | CTX | FP | RET | args + receiver |  caller frame |
+//                      ^esp  ^ ebp
+
+// --{ push edi }---------------------------------------------------------------
+//                | FNC | CTX | FP | RET | args + receiver |  caller frame |
+//                ^esp        ^ ebp
+
+// --{ subi esp, #N }-----------------------------------------------------------
+// | callee frame | FNC | CTX | FP | RET | args + receiver |  caller frame |
+// ^esp                       ^ ebp
+
+// =={ body of called function }================================================
+
+// =={ epilogue of called function }============================================
+// --{ mov esp, ebp }-----------------------------------------------------------
+//                            | FP | RET | args + receiver |  caller frame |
+//                            ^ esp,ebp
+
+// --{ pop ebp }-----------------------------------------------------------
+// |                               | RET | args + receiver |  caller frame |
+//                                 ^ esp                                   ^ ebp
+
+// --{ ret #A+1 }-----------------------------------------------------------
+// |                                                       |  caller frame |
+//                                                         ^ esp           ^ ebp
+
+
+// Runtime function calls are accomplished by doing a stub call to the
+// CEntryStub (a real code object). On IA32 passes arguments on the
+// stack, the number of arguments in EAX, the address of the runtime function
+// in EBX, and the context in ESI.
+
+// --{ before the call instruction }--------------------------------------------
+//                                                         |  caller frame |
+//                                                         ^ esp           ^ ebp
+
+// --{ push arguments and setup EAX, EBX, and ESI }-----------------------------
+//                                       | args + receiver |  caller frame |
+//                                       ^ esp                             ^ ebp
+//              [eax = #args, ebx = runtime function, esi = context]
+
+// --{ call #CEntryStub }-------------------------------------------------------
+//                                 | RET | args + receiver |  caller frame |
+//                                 ^ esp                                   ^ ebp
+
+// =={ body of runtime function }===============================================
+
+// --{ runtime returns }--------------------------------------------------------
+//                                                         |  caller frame |
+//                                                         ^ esp           ^ ebp
+
+// Other custom linkages (e.g. for calling directly into and out of C++) may
+// need to save callee-saved registers on the stack, which is done in the
+// function prologue of generated code.
+
+// --{ before the call instruction }--------------------------------------------
+//                                                         |  caller frame |
+//                                                         ^ esp           ^ ebp
+
+// --{ set up arguments in registers on stack }---------------------------------
+//                                                  | args |  caller frame |
+//                                                  ^ esp                  ^ ebp
+//                  [r0 = arg0, r1 = arg1, ...]
+
+// --{ call code }--------------------------------------------------------------
+//                                            | RET | args |  caller frame |
+//                                            ^ esp                        ^ ebp
+
+// =={ prologue of called function }============================================
+// --{ push ebp }---------------------------------------------------------------
+//                                       | FP | RET | args |  caller frame |
+//                                       ^ esp                             ^ ebp
+
+// --{ mov ebp, esp }-----------------------------------------------------------
+//                                       | FP | RET | args |  caller frame |
+//                                       ^ ebp,esp
+
+// --{ save registers }---------------------------------------------------------
+//                                | regs | FP | RET | args |  caller frame |
+//                                ^ esp  ^ ebp
+
+// --{ subi esp, #N }-----------------------------------------------------------
+//                 | callee frame | regs | FP | RET | args |  caller frame |
+//                 ^esp                  ^ ebp
+
+// =={ body of called function }================================================
+
+// =={ epilogue of called function }============================================
+// --{ restore registers }------------------------------------------------------
+//                                | regs | FP | RET | args |  caller frame |
+//                                ^ esp  ^ ebp
+
+// --{ mov esp, ebp }-----------------------------------------------------------
+//                                       | FP | RET | args |  caller frame |
+//                                       ^ esp,ebp
+
+// --{ pop ebp }----------------------------------------------------------------
+//                                            | RET | args |  caller frame |
+//                                            ^ esp                        ^ ebp
+
+
+void CodeGenerator::AssemblePrologue() {
+  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+  Frame* frame = code_->frame();
+  int stack_slots = frame->GetSpillSlotCount();
+  if (descriptor->kind() == CallDescriptor::kCallAddress) {
+    // Assemble a prologue similar the to cdecl calling convention.
+    __ push(ebp);
+    __ mov(ebp, esp);
+    const RegList saves = descriptor->CalleeSavedRegisters();
+    if (saves != 0) {  // Save callee-saved registers.
+      int register_save_area_size = 0;
+      for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
+        if (!((1 << i) & saves)) continue;
+        __ push(Register::from_code(i));
+        register_save_area_size += kPointerSize;
+      }
+      frame->SetRegisterSaveAreaSize(register_save_area_size);
+    }
+  } else if (descriptor->IsJSFunctionCall()) {
+    CompilationInfo* info = linkage()->info();
+    __ Prologue(info->IsCodePreAgingActive());
+    frame->SetRegisterSaveAreaSize(
+        StandardFrameConstants::kFixedFrameSizeFromFp);
+
+    // Sloppy mode functions and builtins need to replace the receiver with the
+    // global proxy when called as functions (without an explicit receiver
+    // object).
+    // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
+    if (info->strict_mode() == SLOPPY && !info->is_native()) {
+      Label ok;
+      // +2 for return address and saved frame pointer.
+      int receiver_slot = info->scope()->num_parameters() + 2;
+      __ mov(ecx, Operand(ebp, receiver_slot * kPointerSize));
+      __ cmp(ecx, isolate()->factory()->undefined_value());
+      __ j(not_equal, &ok, Label::kNear);
+      __ mov(ecx, GlobalObjectOperand());
+      __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalProxyOffset));
+      __ mov(Operand(ebp, receiver_slot * kPointerSize), ecx);
+      __ bind(&ok);
+    }
+
+  } else {
+    __ StubPrologue();
+    frame->SetRegisterSaveAreaSize(
+        StandardFrameConstants::kFixedFrameSizeFromFp);
+  }
+  if (stack_slots > 0) {
+    __ sub(esp, Immediate(stack_slots * kPointerSize));
+  }
+}
+
+
+void CodeGenerator::AssembleReturn() {
+  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+  if (descriptor->kind() == CallDescriptor::kCallAddress) {
+    const RegList saves = descriptor->CalleeSavedRegisters();
+    if (frame()->GetRegisterSaveAreaSize() > 0) {
+      // Remove this frame's spill slots first.
+      int stack_slots = frame()->GetSpillSlotCount();
+      if (stack_slots > 0) {
+        __ add(esp, Immediate(stack_slots * kPointerSize));
+      }
+      // Restore registers.
+      if (saves != 0) {
+        for (int i = 0; i < Register::kNumRegisters; i++) {
+          if (!((1 << i) & saves)) continue;
+          __ pop(Register::from_code(i));
+        }
+      }
+      __ pop(ebp);  // Pop caller's frame pointer.
+      __ ret(0);
+    } else {
+      // No saved registers.
+      __ mov(esp, ebp);  // Move stack pointer back to frame pointer.
+      __ pop(ebp);       // Pop caller's frame pointer.
+      __ ret(0);
+    }
+  } else {
+    __ mov(esp, ebp);  // Move stack pointer back to frame pointer.
+    __ pop(ebp);       // Pop caller's frame pointer.
+    int pop_count = descriptor->IsJSFunctionCall()
+                        ? static_cast<int>(descriptor->JSParameterCount())
+                        : 0;
+    __ ret(pop_count * kPointerSize);
+  }
+}
+
+
+void CodeGenerator::AssembleMove(InstructionOperand* source,
+                                 InstructionOperand* destination) {
+  IA32OperandConverter g(this, NULL);
+  // Dispatch on the source and destination operand kinds.  Not all
+  // combinations are possible.
+  if (source->IsRegister()) {
+    DCHECK(destination->IsRegister() || destination->IsStackSlot());
+    Register src = g.ToRegister(source);
+    Operand dst = g.ToOperand(destination);
+    __ mov(dst, src);
+  } else if (source->IsStackSlot()) {
+    DCHECK(destination->IsRegister() || destination->IsStackSlot());
+    Operand src = g.ToOperand(source);
+    if (destination->IsRegister()) {
+      Register dst = g.ToRegister(destination);
+      __ mov(dst, src);
+    } else {
+      Operand dst = g.ToOperand(destination);
+      __ push(src);
+      __ pop(dst);
+    }
+  } else if (source->IsConstant()) {
+    Constant src_constant = g.ToConstant(source);
+    if (src_constant.type() == Constant::kHeapObject) {
+      Handle<HeapObject> src = src_constant.ToHeapObject();
+      if (destination->IsRegister()) {
+        Register dst = g.ToRegister(destination);
+        __ LoadHeapObject(dst, src);
+      } else {
+        DCHECK(destination->IsStackSlot());
+        Operand dst = g.ToOperand(destination);
+        AllowDeferredHandleDereference embedding_raw_address;
+        if (isolate()->heap()->InNewSpace(*src)) {
+          __ PushHeapObject(src);
+          __ pop(dst);
+        } else {
+          __ mov(dst, src);
+        }
+      }
+    } else if (destination->IsRegister()) {
+      Register dst = g.ToRegister(destination);
+      __ mov(dst, g.ToImmediate(source));
+    } else if (destination->IsStackSlot()) {
+      Operand dst = g.ToOperand(destination);
+      __ mov(dst, g.ToImmediate(source));
+    } else {
+      double v = g.ToDouble(source);
+      uint64_t int_val = bit_cast<uint64_t, double>(v);
+      int32_t lower = static_cast<int32_t>(int_val);
+      int32_t upper = static_cast<int32_t>(int_val >> kBitsPerInt);
+      if (destination->IsDoubleRegister()) {
+        XMMRegister dst = g.ToDoubleRegister(destination);
+        __ Move(dst, v);
+      } else {
+        DCHECK(destination->IsDoubleStackSlot());
+        Operand dst0 = g.ToOperand(destination);
+        Operand dst1 = g.HighOperand(destination);
+        __ mov(dst0, Immediate(lower));
+        __ mov(dst1, Immediate(upper));
+      }
+    }
+  } else if (source->IsDoubleRegister()) {
+    XMMRegister src = g.ToDoubleRegister(source);
+    if (destination->IsDoubleRegister()) {
+      XMMRegister dst = g.ToDoubleRegister(destination);
+      __ movaps(dst, src);
+    } else {
+      DCHECK(destination->IsDoubleStackSlot());
+      Operand dst = g.ToOperand(destination);
+      __ movsd(dst, src);
+    }
+  } else if (source->IsDoubleStackSlot()) {
+    DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
+    Operand src = g.ToOperand(source);
+    if (destination->IsDoubleRegister()) {
+      XMMRegister dst = g.ToDoubleRegister(destination);
+      __ movsd(dst, src);
+    } else {
+      // We rely on having xmm0 available as a fixed scratch register.
+      Operand dst = g.ToOperand(destination);
+      __ movsd(xmm0, src);
+      __ movsd(dst, xmm0);
+    }
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
+void CodeGenerator::AssembleSwap(InstructionOperand* source,
+                                 InstructionOperand* destination) {
+  IA32OperandConverter g(this, NULL);
+  // Dispatch on the source and destination operand kinds.  Not all
+  // combinations are possible.
+  if (source->IsRegister() && destination->IsRegister()) {
+    // Register-register.
+    Register src = g.ToRegister(source);
+    Register dst = g.ToRegister(destination);
+    __ xchg(dst, src);
+  } else if (source->IsRegister() && destination->IsStackSlot()) {
+    // Register-memory.
+    __ xchg(g.ToRegister(source), g.ToOperand(destination));
+  } else if (source->IsStackSlot() && destination->IsStackSlot()) {
+    // Memory-memory.
+    Operand src = g.ToOperand(source);
+    Operand dst = g.ToOperand(destination);
+    __ push(dst);
+    __ push(src);
+    __ pop(dst);
+    __ pop(src);
+  } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
+    // XMM register-register swap. We rely on having xmm0
+    // available as a fixed scratch register.
+    XMMRegister src = g.ToDoubleRegister(source);
+    XMMRegister dst = g.ToDoubleRegister(destination);
+    __ movaps(xmm0, src);
+    __ movaps(src, dst);
+    __ movaps(dst, xmm0);
+  } else if (source->IsDoubleRegister() && source->IsDoubleStackSlot()) {
+    // XMM register-memory swap.  We rely on having xmm0
+    // available as a fixed scratch register.
+    XMMRegister reg = g.ToDoubleRegister(source);
+    Operand other = g.ToOperand(destination);
+    __ movsd(xmm0, other);
+    __ movsd(other, reg);
+    __ movaps(reg, xmm0);
+  } else if (source->IsDoubleStackSlot() && destination->IsDoubleStackSlot()) {
+    // Double-width memory-to-memory.
+    Operand src0 = g.ToOperand(source);
+    Operand src1 = g.HighOperand(source);
+    Operand dst0 = g.ToOperand(destination);
+    Operand dst1 = g.HighOperand(destination);
+    __ movsd(xmm0, dst0);  // Save destination in xmm0.
+    __ push(src0);         // Then use stack to copy source to destination.
+    __ pop(dst0);
+    __ push(src1);
+    __ pop(dst1);
+    __ movsd(src0, xmm0);
+  } else {
+    // No other combinations are possible.
+    UNREACHABLE();
+  }
+}
+
+
+void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
+
+
+void CodeGenerator::EnsureSpaceForLazyDeopt() {
+  int space_needed = Deoptimizer::patch_size();
+  if (!linkage()->info()->IsStub()) {
+    // Ensure that we have enough space after the previous lazy-bailout
+    // instruction for patching the code here.
+    int current_pc = masm()->pc_offset();
+    if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+      int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+      __ Nop(padding_size);
+    }
+  }
+  MarkLazyDeoptSite();
+}
+
+#undef __
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/ia32/instruction-codes-ia32.h b/src/compiler/ia32/instruction-codes-ia32.h
new file mode 100644
index 0000000..0f46088
--- /dev/null
+++ b/src/compiler/ia32/instruction-codes-ia32.h
@@ -0,0 +1,84 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_IA32_INSTRUCTION_CODES_IA32_H_
+#define V8_COMPILER_IA32_INSTRUCTION_CODES_IA32_H_
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// IA32-specific opcodes that specify which assembly sequence to emit.
+// Most opcodes specify a single instruction.
+#define TARGET_ARCH_OPCODE_LIST(V) \
+  V(IA32Add)                       \
+  V(IA32And)                       \
+  V(IA32Cmp)                       \
+  V(IA32Test)                      \
+  V(IA32Or)                        \
+  V(IA32Xor)                       \
+  V(IA32Sub)                       \
+  V(IA32Imul)                      \
+  V(IA32Idiv)                      \
+  V(IA32Udiv)                      \
+  V(IA32Not)                       \
+  V(IA32Neg)                       \
+  V(IA32Shl)                       \
+  V(IA32Shr)                       \
+  V(IA32Sar)                       \
+  V(IA32Ror)                       \
+  V(SSEFloat64Cmp)                 \
+  V(SSEFloat64Add)                 \
+  V(SSEFloat64Sub)                 \
+  V(SSEFloat64Mul)                 \
+  V(SSEFloat64Div)                 \
+  V(SSEFloat64Mod)                 \
+  V(SSEFloat64Sqrt)                \
+  V(SSEFloat64ToInt32)             \
+  V(SSEFloat64ToUint32)            \
+  V(SSEInt32ToFloat64)             \
+  V(SSEUint32ToFloat64)            \
+  V(IA32Movsxbl)                   \
+  V(IA32Movzxbl)                   \
+  V(IA32Movb)                      \
+  V(IA32Movsxwl)                   \
+  V(IA32Movzxwl)                   \
+  V(IA32Movw)                      \
+  V(IA32Movl)                      \
+  V(IA32Movss)                     \
+  V(IA32Movsd)                     \
+  V(IA32Push)                      \
+  V(IA32StoreWriteBarrier)
+
+
+// Addressing modes represent the "shape" of inputs to an instruction.
+// Many instructions support multiple addressing modes. Addressing modes
+// are encoded into the InstructionCode of the instruction and tell the
+// code generator after register allocation which assembler method to call.
+//
+// We use the following local notation for addressing modes:
+//
+// R = register
+// O = register or stack slot
+// D = double register
+// I = immediate (handle, external, int32)
+// MR = [register]
+// MI = [immediate]
+// MRN = [register + register * N in {1, 2, 4, 8}]
+// MRI = [register + immediate]
+// MRNI = [register + register * N in {1, 2, 4, 8} + immediate]
+#define TARGET_ADDRESSING_MODE_LIST(V) \
+  V(MI)   /* [K] */                    \
+  V(MR)   /* [%r0] */                  \
+  V(MRI)  /* [%r0 + K] */              \
+  V(MR1I) /* [%r0 + %r1 * 1 + K] */    \
+  V(MR2I) /* [%r0 + %r1 * 2 + K] */    \
+  V(MR4I) /* [%r0 + %r1 * 4 + K] */    \
+  V(MR8I) /* [%r0 + %r1 * 8 + K] */
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_IA32_INSTRUCTION_CODES_IA32_H_
diff --git a/src/compiler/ia32/instruction-selector-ia32-unittest.cc b/src/compiler/ia32/instruction-selector-ia32-unittest.cc
new file mode 100644
index 0000000..60708c1
--- /dev/null
+++ b/src/compiler/ia32/instruction-selector-ia32-unittest.cc
@@ -0,0 +1,211 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-selector-unittest.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+
+// Immediates (random subset).
+static const int32_t kImmediates[] = {
+    kMinInt, -42, -1, 0,  1,  2,    3,      4,          5,
+    6,       7,   8,  16, 42, 0xff, 0xffff, 0x0f0f0f0f, kMaxInt};
+
+}  // namespace
+
+
+TEST_F(InstructionSelectorTest, Int32AddWithParameter) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  m.Return(m.Int32Add(m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kIA32Add, s[0]->arch_opcode());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddWithImmediate) {
+  TRACED_FOREACH(int32_t, imm, kImmediates) {
+    {
+      StreamBuilder m(this, kMachInt32, kMachInt32);
+      m.Return(m.Int32Add(m.Parameter(0), m.Int32Constant(imm)));
+      Stream s = m.Build();
+      ASSERT_EQ(1U, s.size());
+      EXPECT_EQ(kIA32Add, s[0]->arch_opcode());
+      ASSERT_EQ(2U, s[0]->InputCount());
+      EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+    }
+    {
+      StreamBuilder m(this, kMachInt32, kMachInt32);
+      m.Return(m.Int32Add(m.Int32Constant(imm), m.Parameter(0)));
+      Stream s = m.Build();
+      ASSERT_EQ(1U, s.size());
+      EXPECT_EQ(kIA32Add, s[0]->arch_opcode());
+      ASSERT_EQ(2U, s[0]->InputCount());
+      EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+    }
+  }
+}
+
+
+TEST_F(InstructionSelectorTest, Int32SubWithParameter) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  m.Return(m.Int32Sub(m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kIA32Sub, s[0]->arch_opcode());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32SubWithImmediate) {
+  TRACED_FOREACH(int32_t, imm, kImmediates) {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    m.Return(m.Int32Sub(m.Parameter(0), m.Int32Constant(imm)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kIA32Sub, s[0]->arch_opcode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+// Loads and stores
+
+namespace {
+
+struct MemoryAccess {
+  MachineType type;
+  ArchOpcode load_opcode;
+  ArchOpcode store_opcode;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const MemoryAccess& memacc) {
+  OStringStream ost;
+  ost << memacc.type;
+  return os << ost.c_str();
+}
+
+
+static const MemoryAccess kMemoryAccesses[] = {
+    {kMachInt8, kIA32Movsxbl, kIA32Movb},
+    {kMachUint8, kIA32Movzxbl, kIA32Movb},
+    {kMachInt16, kIA32Movsxwl, kIA32Movw},
+    {kMachUint16, kIA32Movzxwl, kIA32Movw},
+    {kMachInt32, kIA32Movl, kIA32Movl},
+    {kMachUint32, kIA32Movl, kIA32Movl},
+    {kMachFloat32, kIA32Movss, kIA32Movss},
+    {kMachFloat64, kIA32Movsd, kIA32Movsd}};
+
+}  // namespace
+
+
+typedef InstructionSelectorTestWithParam<MemoryAccess>
+    InstructionSelectorMemoryAccessTest;
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) {
+  const MemoryAccess memacc = GetParam();
+  StreamBuilder m(this, memacc.type, kMachPtr, kMachInt32);
+  m.Return(m.Load(memacc.type, m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(memacc.load_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, LoadWithImmediateBase) {
+  const MemoryAccess memacc = GetParam();
+  TRACED_FOREACH(int32_t, base, kImmediates) {
+    StreamBuilder m(this, memacc.type, kMachPtr);
+    m.Return(m.Load(memacc.type, m.Int32Constant(base), m.Parameter(0)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(memacc.load_opcode, s[0]->arch_opcode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+    EXPECT_EQ(base, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_EQ(1U, s[0]->OutputCount());
+  }
+}
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, LoadWithImmediateIndex) {
+  const MemoryAccess memacc = GetParam();
+  TRACED_FOREACH(int32_t, index, kImmediates) {
+    StreamBuilder m(this, memacc.type, kMachPtr);
+    m.Return(m.Load(memacc.type, m.Parameter(0), m.Int32Constant(index)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(memacc.load_opcode, s[0]->arch_opcode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+    EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_EQ(1U, s[0]->OutputCount());
+  }
+}
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, StoreWithParameters) {
+  const MemoryAccess memacc = GetParam();
+  StreamBuilder m(this, kMachInt32, kMachPtr, kMachInt32, memacc.type);
+  m.Store(memacc.type, m.Parameter(0), m.Parameter(1), m.Parameter(2));
+  m.Return(m.Int32Constant(0));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(3U, s[0]->InputCount());
+  EXPECT_EQ(0U, s[0]->OutputCount());
+}
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, StoreWithImmediateBase) {
+  const MemoryAccess memacc = GetParam();
+  TRACED_FOREACH(int32_t, base, kImmediates) {
+    StreamBuilder m(this, kMachInt32, kMachInt32, memacc.type);
+    m.Store(memacc.type, m.Int32Constant(base), m.Parameter(0), m.Parameter(1));
+    m.Return(m.Int32Constant(0));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode());
+    ASSERT_EQ(3U, s[0]->InputCount());
+    ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+    EXPECT_EQ(base, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_EQ(0U, s[0]->OutputCount());
+  }
+}
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, StoreWithImmediateIndex) {
+  const MemoryAccess memacc = GetParam();
+  TRACED_FOREACH(int32_t, index, kImmediates) {
+    StreamBuilder m(this, kMachInt32, kMachPtr, memacc.type);
+    m.Store(memacc.type, m.Parameter(0), m.Int32Constant(index),
+            m.Parameter(1));
+    m.Return(m.Int32Constant(0));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode());
+    ASSERT_EQ(3U, s[0]->InputCount());
+    ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+    EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_EQ(0U, s[0]->OutputCount());
+  }
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+                        InstructionSelectorMemoryAccessTest,
+                        ::testing::ValuesIn(kMemoryAccesses));
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/ia32/instruction-selector-ia32.cc b/src/compiler/ia32/instruction-selector-ia32.cc
new file mode 100644
index 0000000..24ebc38
--- /dev/null
+++ b/src/compiler/ia32/instruction-selector-ia32.cc
@@ -0,0 +1,563 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-selector-impl.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Adds IA32-specific methods for generating operands.
+class IA32OperandGenerator FINAL : public OperandGenerator {
+ public:
+  explicit IA32OperandGenerator(InstructionSelector* selector)
+      : OperandGenerator(selector) {}
+
+  InstructionOperand* UseByteRegister(Node* node) {
+    // TODO(dcarney): relax constraint.
+    return UseFixed(node, edx);
+  }
+
+  bool CanBeImmediate(Node* node) {
+    switch (node->opcode()) {
+      case IrOpcode::kInt32Constant:
+      case IrOpcode::kNumberConstant:
+      case IrOpcode::kExternalConstant:
+        return true;
+      case IrOpcode::kHeapConstant: {
+        // Constants in new space cannot be used as immediates in V8 because
+        // the GC does not scan code objects when collecting the new generation.
+        Unique<HeapObject> value = OpParameter<Unique<HeapObject> >(node);
+        return !isolate()->heap()->InNewSpace(*value.handle());
+      }
+      default:
+        return false;
+    }
+  }
+};
+
+
+void InstructionSelector::VisitLoad(Node* node) {
+  MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
+  MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
+  IA32OperandGenerator g(this);
+  Node* base = node->InputAt(0);
+  Node* index = node->InputAt(1);
+
+  ArchOpcode opcode;
+  // TODO(titzer): signed/unsigned small loads
+  switch (rep) {
+    case kRepFloat32:
+      opcode = kIA32Movss;
+      break;
+    case kRepFloat64:
+      opcode = kIA32Movsd;
+      break;
+    case kRepBit:  // Fall through.
+    case kRepWord8:
+      opcode = typ == kTypeInt32 ? kIA32Movsxbl : kIA32Movzxbl;
+      break;
+    case kRepWord16:
+      opcode = typ == kTypeInt32 ? kIA32Movsxwl : kIA32Movzxwl;
+      break;
+    case kRepTagged:  // Fall through.
+    case kRepWord32:
+      opcode = kIA32Movl;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+  if (g.CanBeImmediate(base)) {
+    if (Int32Matcher(index).Is(0)) {  // load [#base + #0]
+      Emit(opcode | AddressingModeField::encode(kMode_MI),
+           g.DefineAsRegister(node), g.UseImmediate(base));
+    } else {  // load [#base + %index]
+      Emit(opcode | AddressingModeField::encode(kMode_MRI),
+           g.DefineAsRegister(node), g.UseRegister(index),
+           g.UseImmediate(base));
+    }
+  } else if (g.CanBeImmediate(index)) {  // load [%base + #index]
+    Emit(opcode | AddressingModeField::encode(kMode_MRI),
+         g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
+  } else {  // load [%base + %index + K]
+    Emit(opcode | AddressingModeField::encode(kMode_MR1I),
+         g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
+  }
+  // TODO(turbofan): addressing modes [r+r*{2,4,8}+K]
+}
+
+
+void InstructionSelector::VisitStore(Node* node) {
+  IA32OperandGenerator g(this);
+  Node* base = node->InputAt(0);
+  Node* index = node->InputAt(1);
+  Node* value = node->InputAt(2);
+
+  StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
+  MachineType rep = RepresentationOf(store_rep.machine_type());
+  if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
+    DCHECK_EQ(kRepTagged, rep);
+    // TODO(dcarney): refactor RecordWrite function to take temp registers
+    //                and pass them here instead of using fixed regs
+    // TODO(dcarney): handle immediate indices.
+    InstructionOperand* temps[] = {g.TempRegister(ecx), g.TempRegister(edx)};
+    Emit(kIA32StoreWriteBarrier, NULL, g.UseFixed(base, ebx),
+         g.UseFixed(index, ecx), g.UseFixed(value, edx), arraysize(temps),
+         temps);
+    return;
+  }
+  DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
+  InstructionOperand* val;
+  if (g.CanBeImmediate(value)) {
+    val = g.UseImmediate(value);
+  } else if (rep == kRepWord8 || rep == kRepBit) {
+    val = g.UseByteRegister(value);
+  } else {
+    val = g.UseRegister(value);
+  }
+  ArchOpcode opcode;
+  switch (rep) {
+    case kRepFloat32:
+      opcode = kIA32Movss;
+      break;
+    case kRepFloat64:
+      opcode = kIA32Movsd;
+      break;
+    case kRepBit:  // Fall through.
+    case kRepWord8:
+      opcode = kIA32Movb;
+      break;
+    case kRepWord16:
+      opcode = kIA32Movw;
+      break;
+    case kRepTagged:  // Fall through.
+    case kRepWord32:
+      opcode = kIA32Movl;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+  if (g.CanBeImmediate(base)) {
+    if (Int32Matcher(index).Is(0)) {  // store [#base], %|#value
+      Emit(opcode | AddressingModeField::encode(kMode_MI), NULL,
+           g.UseImmediate(base), val);
+    } else {  // store [#base + %index], %|#value
+      Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
+           g.UseRegister(index), g.UseImmediate(base), val);
+    }
+  } else if (g.CanBeImmediate(index)) {  // store [%base + #index], %|#value
+    Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
+         g.UseRegister(base), g.UseImmediate(index), val);
+  } else {  // store [%base + %index], %|#value
+    Emit(opcode | AddressingModeField::encode(kMode_MR1I), NULL,
+         g.UseRegister(base), g.UseRegister(index), val);
+  }
+  // TODO(turbofan): addressing modes [r+r*{2,4,8}+K]
+}
+
+
+// Shared routine for multiple binary operations.
+static void VisitBinop(InstructionSelector* selector, Node* node,
+                       InstructionCode opcode, FlagsContinuation* cont) {
+  IA32OperandGenerator g(selector);
+  Int32BinopMatcher m(node);
+  InstructionOperand* inputs[4];
+  size_t input_count = 0;
+  InstructionOperand* outputs[2];
+  size_t output_count = 0;
+
+  // TODO(turbofan): match complex addressing modes.
+  // TODO(turbofan): if commutative, pick the non-live-in operand as the left as
+  // this might be the last use and therefore its register can be reused.
+  if (g.CanBeImmediate(m.right().node())) {
+    inputs[input_count++] = g.Use(m.left().node());
+    inputs[input_count++] = g.UseImmediate(m.right().node());
+  } else {
+    inputs[input_count++] = g.UseRegister(m.left().node());
+    inputs[input_count++] = g.Use(m.right().node());
+  }
+
+  if (cont->IsBranch()) {
+    inputs[input_count++] = g.Label(cont->true_block());
+    inputs[input_count++] = g.Label(cont->false_block());
+  }
+
+  outputs[output_count++] = g.DefineSameAsFirst(node);
+  if (cont->IsSet()) {
+    // TODO(turbofan): Use byte register here.
+    outputs[output_count++] = g.DefineAsRegister(cont->result());
+  }
+
+  DCHECK_NE(0, input_count);
+  DCHECK_NE(0, output_count);
+  DCHECK_GE(arraysize(inputs), input_count);
+  DCHECK_GE(arraysize(outputs), output_count);
+
+  Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
+                                      outputs, input_count, inputs);
+  if (cont->IsBranch()) instr->MarkAsControl();
+}
+
+
+// Shared routine for multiple binary operations.
+static void VisitBinop(InstructionSelector* selector, Node* node,
+                       InstructionCode opcode) {
+  FlagsContinuation cont;
+  VisitBinop(selector, node, opcode, &cont);
+}
+
+
+void InstructionSelector::VisitWord32And(Node* node) {
+  VisitBinop(this, node, kIA32And);
+}
+
+
+void InstructionSelector::VisitWord32Or(Node* node) {
+  VisitBinop(this, node, kIA32Or);
+}
+
+
+void InstructionSelector::VisitWord32Xor(Node* node) {
+  IA32OperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  if (m.right().Is(-1)) {
+    Emit(kIA32Not, g.DefineSameAsFirst(node), g.Use(m.left().node()));
+  } else {
+    VisitBinop(this, node, kIA32Xor);
+  }
+}
+
+
+// Shared routine for multiple shift operations.
+static inline void VisitShift(InstructionSelector* selector, Node* node,
+                              ArchOpcode opcode) {
+  IA32OperandGenerator g(selector);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+
+  // TODO(turbofan): assembler only supports some addressing modes for shifts.
+  if (g.CanBeImmediate(right)) {
+    selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
+                   g.UseImmediate(right));
+  } else {
+    Int32BinopMatcher m(node);
+    if (m.right().IsWord32And()) {
+      Int32BinopMatcher mright(right);
+      if (mright.right().Is(0x1F)) {
+        right = mright.left().node();
+      }
+    }
+    selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
+                   g.UseFixed(right, ecx));
+  }
+}
+
+
+void InstructionSelector::VisitWord32Shl(Node* node) {
+  VisitShift(this, node, kIA32Shl);
+}
+
+
+void InstructionSelector::VisitWord32Shr(Node* node) {
+  VisitShift(this, node, kIA32Shr);
+}
+
+
+void InstructionSelector::VisitWord32Sar(Node* node) {
+  VisitShift(this, node, kIA32Sar);
+}
+
+
+void InstructionSelector::VisitWord32Ror(Node* node) {
+  VisitShift(this, node, kIA32Ror);
+}
+
+
+void InstructionSelector::VisitInt32Add(Node* node) {
+  VisitBinop(this, node, kIA32Add);
+}
+
+
+void InstructionSelector::VisitInt32Sub(Node* node) {
+  IA32OperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  if (m.left().Is(0)) {
+    Emit(kIA32Neg, g.DefineSameAsFirst(node), g.Use(m.right().node()));
+  } else {
+    VisitBinop(this, node, kIA32Sub);
+  }
+}
+
+
+void InstructionSelector::VisitInt32Mul(Node* node) {
+  IA32OperandGenerator g(this);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+  if (g.CanBeImmediate(right)) {
+    Emit(kIA32Imul, g.DefineAsRegister(node), g.Use(left),
+         g.UseImmediate(right));
+  } else if (g.CanBeImmediate(left)) {
+    Emit(kIA32Imul, g.DefineAsRegister(node), g.Use(right),
+         g.UseImmediate(left));
+  } else {
+    // TODO(turbofan): select better left operand.
+    Emit(kIA32Imul, g.DefineSameAsFirst(node), g.UseRegister(left),
+         g.Use(right));
+  }
+}
+
+
+static inline void VisitDiv(InstructionSelector* selector, Node* node,
+                            ArchOpcode opcode) {
+  IA32OperandGenerator g(selector);
+  InstructionOperand* temps[] = {g.TempRegister(edx)};
+  size_t temp_count = arraysize(temps);
+  selector->Emit(opcode, g.DefineAsFixed(node, eax),
+                 g.UseFixed(node->InputAt(0), eax),
+                 g.UseUnique(node->InputAt(1)), temp_count, temps);
+}
+
+
+void InstructionSelector::VisitInt32Div(Node* node) {
+  VisitDiv(this, node, kIA32Idiv);
+}
+
+
+void InstructionSelector::VisitInt32UDiv(Node* node) {
+  VisitDiv(this, node, kIA32Udiv);
+}
+
+
+static inline void VisitMod(InstructionSelector* selector, Node* node,
+                            ArchOpcode opcode) {
+  IA32OperandGenerator g(selector);
+  InstructionOperand* temps[] = {g.TempRegister(eax), g.TempRegister(edx)};
+  size_t temp_count = arraysize(temps);
+  selector->Emit(opcode, g.DefineAsFixed(node, edx),
+                 g.UseFixed(node->InputAt(0), eax),
+                 g.UseUnique(node->InputAt(1)), temp_count, temps);
+}
+
+
+void InstructionSelector::VisitInt32Mod(Node* node) {
+  VisitMod(this, node, kIA32Idiv);
+}
+
+
+void InstructionSelector::VisitInt32UMod(Node* node) {
+  VisitMod(this, node, kIA32Udiv);
+}
+
+
+void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
+  IA32OperandGenerator g(this);
+  Emit(kSSEInt32ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
+  IA32OperandGenerator g(this);
+  // TODO(turbofan): IA32 SSE LoadUint32() should take an operand.
+  Emit(kSSEUint32ToFloat64, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
+  IA32OperandGenerator g(this);
+  Emit(kSSEFloat64ToInt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
+  IA32OperandGenerator g(this);
+  Emit(kSSEFloat64ToUint32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64Add(Node* node) {
+  IA32OperandGenerator g(this);
+  Emit(kSSEFloat64Add, g.DefineSameAsFirst(node),
+       g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+
+void InstructionSelector::VisitFloat64Sub(Node* node) {
+  IA32OperandGenerator g(this);
+  Emit(kSSEFloat64Sub, g.DefineSameAsFirst(node),
+       g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+
+void InstructionSelector::VisitFloat64Mul(Node* node) {
+  IA32OperandGenerator g(this);
+  Emit(kSSEFloat64Mul, g.DefineSameAsFirst(node),
+       g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+
+void InstructionSelector::VisitFloat64Div(Node* node) {
+  IA32OperandGenerator g(this);
+  Emit(kSSEFloat64Div, g.DefineSameAsFirst(node),
+       g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+
+void InstructionSelector::VisitFloat64Mod(Node* node) {
+  IA32OperandGenerator g(this);
+  InstructionOperand* temps[] = {g.TempRegister(eax)};
+  Emit(kSSEFloat64Mod, g.DefineSameAsFirst(node),
+       g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), 1,
+       temps);
+}
+
+
+void InstructionSelector::VisitFloat64Sqrt(Node* node) {
+  IA32OperandGenerator g(this);
+  Emit(kSSEFloat64Sqrt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
+                                                    FlagsContinuation* cont) {
+  VisitBinop(this, node, kIA32Add, cont);
+}
+
+
+void InstructionSelector::VisitInt32SubWithOverflow(Node* node,
+                                                    FlagsContinuation* cont) {
+  VisitBinop(this, node, kIA32Sub, cont);
+}
+
+
+// Shared routine for multiple compare operations.
+static inline void VisitCompare(InstructionSelector* selector,
+                                InstructionCode opcode,
+                                InstructionOperand* left,
+                                InstructionOperand* right,
+                                FlagsContinuation* cont) {
+  IA32OperandGenerator g(selector);
+  if (cont->IsBranch()) {
+    selector->Emit(cont->Encode(opcode), NULL, left, right,
+                   g.Label(cont->true_block()),
+                   g.Label(cont->false_block()))->MarkAsControl();
+  } else {
+    DCHECK(cont->IsSet());
+    // TODO(titzer): Needs byte register.
+    selector->Emit(cont->Encode(opcode), g.DefineAsRegister(cont->result()),
+                   left, right);
+  }
+}
+
+
+// Shared routine for multiple word compare operations.
+static inline void VisitWordCompare(InstructionSelector* selector, Node* node,
+                                    InstructionCode opcode,
+                                    FlagsContinuation* cont, bool commutative) {
+  IA32OperandGenerator g(selector);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+
+  // Match immediates on left or right side of comparison.
+  if (g.CanBeImmediate(right)) {
+    VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right), cont);
+  } else if (g.CanBeImmediate(left)) {
+    if (!commutative) cont->Commute();
+    VisitCompare(selector, opcode, g.Use(right), g.UseImmediate(left), cont);
+  } else {
+    VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
+  }
+}
+
+
+void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation* cont) {
+  switch (node->opcode()) {
+    case IrOpcode::kInt32Sub:
+      return VisitWordCompare(this, node, kIA32Cmp, cont, false);
+    case IrOpcode::kWord32And:
+      return VisitWordCompare(this, node, kIA32Test, cont, true);
+    default:
+      break;
+  }
+
+  IA32OperandGenerator g(this);
+  VisitCompare(this, kIA32Test, g.Use(node), g.TempImmediate(-1), cont);
+}
+
+
+void InstructionSelector::VisitWord32Compare(Node* node,
+                                             FlagsContinuation* cont) {
+  VisitWordCompare(this, node, kIA32Cmp, cont, false);
+}
+
+
+void InstructionSelector::VisitFloat64Compare(Node* node,
+                                              FlagsContinuation* cont) {
+  IA32OperandGenerator g(this);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+  VisitCompare(this, kSSEFloat64Cmp, g.UseRegister(left), g.Use(right), cont);
+}
+
+
+void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
+                                    BasicBlock* deoptimization) {
+  IA32OperandGenerator g(this);
+  CallDescriptor* descriptor = OpParameter<CallDescriptor*>(call);
+
+  FrameStateDescriptor* frame_state_descriptor = NULL;
+
+  if (descriptor->NeedsFrameState()) {
+    frame_state_descriptor =
+        GetFrameStateDescriptor(call->InputAt(descriptor->InputCount()));
+  }
+
+  CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
+
+  // Compute InstructionOperands for inputs and outputs.
+  InitializeCallBuffer(call, &buffer, true, true);
+
+  // Push any stack arguments.
+  for (NodeVectorRIter input = buffer.pushed_nodes.rbegin();
+       input != buffer.pushed_nodes.rend(); input++) {
+    // TODO(titzer): handle pushing double parameters.
+    Emit(kIA32Push, NULL,
+         g.CanBeImmediate(*input) ? g.UseImmediate(*input) : g.Use(*input));
+  }
+
+  // Select the appropriate opcode based on the call type.
+  InstructionCode opcode;
+  switch (descriptor->kind()) {
+    case CallDescriptor::kCallCodeObject: {
+      opcode = kArchCallCodeObject;
+      break;
+    }
+    case CallDescriptor::kCallJSFunction:
+      opcode = kArchCallJSFunction;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+  opcode |= MiscField::encode(descriptor->flags());
+
+  // Emit the call instruction.
+  Instruction* call_instr =
+      Emit(opcode, buffer.outputs.size(), &buffer.outputs.front(),
+           buffer.instruction_args.size(), &buffer.instruction_args.front());
+
+  call_instr->MarkAsCall();
+  if (deoptimization != NULL) {
+    DCHECK(continuation != NULL);
+    call_instr->MarkAsControl();
+  }
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/ia32/linkage-ia32.cc b/src/compiler/ia32/linkage-ia32.cc
new file mode 100644
index 0000000..f2c5fab
--- /dev/null
+++ b/src/compiler/ia32/linkage-ia32.cc
@@ -0,0 +1,61 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/assembler.h"
+#include "src/code-stubs.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/linkage-impl.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+struct IA32LinkageHelperTraits {
+  static Register ReturnValueReg() { return eax; }
+  static Register ReturnValue2Reg() { return edx; }
+  static Register JSCallFunctionReg() { return edi; }
+  static Register ContextReg() { return esi; }
+  static Register RuntimeCallFunctionReg() { return ebx; }
+  static Register RuntimeCallArgCountReg() { return eax; }
+  static RegList CCalleeSaveRegisters() {
+    return esi.bit() | edi.bit() | ebx.bit();
+  }
+  static Register CRegisterParameter(int i) { return no_reg; }
+  static int CRegisterParametersLength() { return 0; }
+};
+
+typedef LinkageHelper<IA32LinkageHelperTraits> LH;
+
+CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone) {
+  return LH::GetJSCallDescriptor(zone, parameter_count);
+}
+
+
+CallDescriptor* Linkage::GetRuntimeCallDescriptor(
+    Runtime::FunctionId function, int parameter_count,
+    Operator::Properties properties, Zone* zone) {
+  return LH::GetRuntimeCallDescriptor(zone, function, parameter_count,
+                                      properties);
+}
+
+
+CallDescriptor* Linkage::GetStubCallDescriptor(
+    CallInterfaceDescriptor descriptor, int stack_parameter_count,
+    CallDescriptor::Flags flags, Zone* zone) {
+  return LH::GetStubCallDescriptor(zone, descriptor, stack_parameter_count,
+                                   flags);
+}
+
+
+CallDescriptor* Linkage::GetSimplifiedCDescriptor(Zone* zone,
+                                                  MachineSignature* sig) {
+  return LH::GetSimplifiedCDescriptor(zone, sig);
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/instruction-codes.h b/src/compiler/instruction-codes.h
new file mode 100644
index 0000000..2d921bd
--- /dev/null
+++ b/src/compiler/instruction-codes.h
@@ -0,0 +1,119 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_INSTRUCTION_CODES_H_
+#define V8_COMPILER_INSTRUCTION_CODES_H_
+
+#if V8_TARGET_ARCH_ARM
+#include "src/compiler/arm/instruction-codes-arm.h"
+#elif V8_TARGET_ARCH_ARM64
+#include "src/compiler/arm64/instruction-codes-arm64.h"
+#elif V8_TARGET_ARCH_IA32
+#include "src/compiler/ia32/instruction-codes-ia32.h"
+#elif V8_TARGET_ARCH_X64
+#include "src/compiler/x64/instruction-codes-x64.h"
+#else
+#define TARGET_ARCH_OPCODE_LIST(V)
+#define TARGET_ADDRESSING_MODE_LIST(V)
+#endif
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+class OStream;
+
+namespace compiler {
+
+// Target-specific opcodes that specify which assembly sequence to emit.
+// Most opcodes specify a single instruction.
+#define ARCH_OPCODE_LIST(V) \
+  V(ArchCallCodeObject)     \
+  V(ArchCallJSFunction)     \
+  V(ArchJmp)                \
+  V(ArchNop)                \
+  V(ArchRet)                \
+  V(ArchTruncateDoubleToI)  \
+  TARGET_ARCH_OPCODE_LIST(V)
+
+enum ArchOpcode {
+#define DECLARE_ARCH_OPCODE(Name) k##Name,
+  ARCH_OPCODE_LIST(DECLARE_ARCH_OPCODE)
+#undef DECLARE_ARCH_OPCODE
+#define COUNT_ARCH_OPCODE(Name) +1
+  kLastArchOpcode = -1 ARCH_OPCODE_LIST(COUNT_ARCH_OPCODE)
+#undef COUNT_ARCH_OPCODE
+};
+
+OStream& operator<<(OStream& os, const ArchOpcode& ao);
+
+// Addressing modes represent the "shape" of inputs to an instruction.
+// Many instructions support multiple addressing modes. Addressing modes
+// are encoded into the InstructionCode of the instruction and tell the
+// code generator after register allocation which assembler method to call.
+#define ADDRESSING_MODE_LIST(V) \
+  V(None)                       \
+  TARGET_ADDRESSING_MODE_LIST(V)
+
+enum AddressingMode {
+#define DECLARE_ADDRESSING_MODE(Name) kMode_##Name,
+  ADDRESSING_MODE_LIST(DECLARE_ADDRESSING_MODE)
+#undef DECLARE_ADDRESSING_MODE
+#define COUNT_ADDRESSING_MODE(Name) +1
+  kLastAddressingMode = -1 ADDRESSING_MODE_LIST(COUNT_ADDRESSING_MODE)
+#undef COUNT_ADDRESSING_MODE
+};
+
+OStream& operator<<(OStream& os, const AddressingMode& am);
+
+// The mode of the flags continuation (see below).
+enum FlagsMode { kFlags_none = 0, kFlags_branch = 1, kFlags_set = 2 };
+
+OStream& operator<<(OStream& os, const FlagsMode& fm);
+
+// The condition of flags continuation (see below).
+enum FlagsCondition {
+  kEqual,
+  kNotEqual,
+  kSignedLessThan,
+  kSignedGreaterThanOrEqual,
+  kSignedLessThanOrEqual,
+  kSignedGreaterThan,
+  kUnsignedLessThan,
+  kUnsignedGreaterThanOrEqual,
+  kUnsignedLessThanOrEqual,
+  kUnsignedGreaterThan,
+  kUnorderedEqual,
+  kUnorderedNotEqual,
+  kUnorderedLessThan,
+  kUnorderedGreaterThanOrEqual,
+  kUnorderedLessThanOrEqual,
+  kUnorderedGreaterThan,
+  kOverflow,
+  kNotOverflow
+};
+
+OStream& operator<<(OStream& os, const FlagsCondition& fc);
+
+// The InstructionCode is an opaque, target-specific integer that encodes
+// what code to emit for an instruction in the code generator. It is not
+// interesting to the register allocator, as the inputs and flags on the
+// instructions specify everything of interest.
+typedef int32_t InstructionCode;
+
+// Helpers for encoding / decoding InstructionCode into the fields needed
+// for code generation. We encode the instruction, addressing mode, and flags
+// continuation into a single InstructionCode which is stored as part of
+// the instruction.
+typedef BitField<ArchOpcode, 0, 7> ArchOpcodeField;
+typedef BitField<AddressingMode, 7, 4> AddressingModeField;
+typedef BitField<FlagsMode, 11, 2> FlagsModeField;
+typedef BitField<FlagsCondition, 13, 5> FlagsConditionField;
+typedef BitField<int, 13, 19> MiscField;
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_INSTRUCTION_CODES_H_
diff --git a/src/compiler/instruction-selector-impl.h b/src/compiler/instruction-selector-impl.h
new file mode 100644
index 0000000..d00109e
--- /dev/null
+++ b/src/compiler/instruction-selector-impl.h
@@ -0,0 +1,360 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_INSTRUCTION_SELECTOR_IMPL_H_
+#define V8_COMPILER_INSTRUCTION_SELECTOR_IMPL_H_
+
+#include "src/compiler/instruction.h"
+#include "src/compiler/instruction-selector.h"
+#include "src/compiler/linkage.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// A helper class for the instruction selector that simplifies construction of
+// Operands. This class implements a base for architecture-specific helpers.
+class OperandGenerator {
+ public:
+  explicit OperandGenerator(InstructionSelector* selector)
+      : selector_(selector) {}
+
+  InstructionOperand* DefineAsRegister(Node* node) {
+    return Define(node, new (zone())
+                  UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER));
+  }
+
+  InstructionOperand* DefineSameAsFirst(Node* result) {
+    return Define(result, new (zone())
+                  UnallocatedOperand(UnallocatedOperand::SAME_AS_FIRST_INPUT));
+  }
+
+  InstructionOperand* DefineAsFixed(Node* node, Register reg) {
+    return Define(node, new (zone())
+                  UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
+                                     Register::ToAllocationIndex(reg)));
+  }
+
+  InstructionOperand* DefineAsFixed(Node* node, DoubleRegister reg) {
+    return Define(node, new (zone())
+                  UnallocatedOperand(UnallocatedOperand::FIXED_DOUBLE_REGISTER,
+                                     DoubleRegister::ToAllocationIndex(reg)));
+  }
+
+  InstructionOperand* DefineAsConstant(Node* node) {
+    selector()->MarkAsDefined(node);
+    sequence()->AddConstant(node->id(), ToConstant(node));
+    return ConstantOperand::Create(node->id(), zone());
+  }
+
+  InstructionOperand* DefineAsLocation(Node* node, LinkageLocation location,
+                                       MachineType type) {
+    return Define(node, ToUnallocatedOperand(location, type));
+  }
+
+  InstructionOperand* Use(Node* node) {
+    return Use(node,
+               new (zone()) UnallocatedOperand(
+                   UnallocatedOperand::ANY, UnallocatedOperand::USED_AT_START));
+  }
+
+  InstructionOperand* UseRegister(Node* node) {
+    return Use(node, new (zone())
+               UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER,
+                                  UnallocatedOperand::USED_AT_START));
+  }
+
+  // Use register or operand for the node. If a register is chosen, it won't
+  // alias any temporary or output registers.
+  InstructionOperand* UseUnique(Node* node) {
+    return Use(node, new (zone()) UnallocatedOperand(UnallocatedOperand::ANY));
+  }
+
+  // Use a unique register for the node that does not alias any temporary or
+  // output registers.
+  InstructionOperand* UseUniqueRegister(Node* node) {
+    return Use(node, new (zone())
+               UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER));
+  }
+
+  InstructionOperand* UseFixed(Node* node, Register reg) {
+    return Use(node, new (zone())
+               UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
+                                  Register::ToAllocationIndex(reg)));
+  }
+
+  InstructionOperand* UseFixed(Node* node, DoubleRegister reg) {
+    return Use(node, new (zone())
+               UnallocatedOperand(UnallocatedOperand::FIXED_DOUBLE_REGISTER,
+                                  DoubleRegister::ToAllocationIndex(reg)));
+  }
+
+  InstructionOperand* UseImmediate(Node* node) {
+    int index = sequence()->AddImmediate(ToConstant(node));
+    return ImmediateOperand::Create(index, zone());
+  }
+
+  InstructionOperand* UseLocation(Node* node, LinkageLocation location,
+                                  MachineType type) {
+    return Use(node, ToUnallocatedOperand(location, type));
+  }
+
+  InstructionOperand* TempRegister() {
+    UnallocatedOperand* op =
+        new (zone()) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER,
+                                        UnallocatedOperand::USED_AT_START);
+    op->set_virtual_register(sequence()->NextVirtualRegister());
+    return op;
+  }
+
+  InstructionOperand* TempDoubleRegister() {
+    UnallocatedOperand* op =
+        new (zone()) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER,
+                                        UnallocatedOperand::USED_AT_START);
+    op->set_virtual_register(sequence()->NextVirtualRegister());
+    sequence()->MarkAsDouble(op->virtual_register());
+    return op;
+  }
+
+  InstructionOperand* TempRegister(Register reg) {
+    return new (zone()) UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
+                                           Register::ToAllocationIndex(reg));
+  }
+
+  InstructionOperand* TempImmediate(int32_t imm) {
+    int index = sequence()->AddImmediate(Constant(imm));
+    return ImmediateOperand::Create(index, zone());
+  }
+
+  InstructionOperand* Label(BasicBlock* block) {
+    // TODO(bmeurer): We misuse ImmediateOperand here.
+    return TempImmediate(block->id());
+  }
+
+ protected:
+  Graph* graph() const { return selector()->graph(); }
+  InstructionSelector* selector() const { return selector_; }
+  InstructionSequence* sequence() const { return selector()->sequence(); }
+  Isolate* isolate() const { return zone()->isolate(); }
+  Zone* zone() const { return selector()->instruction_zone(); }
+
+ private:
+  static Constant ToConstant(const Node* node) {
+    switch (node->opcode()) {
+      case IrOpcode::kInt32Constant:
+        return Constant(OpParameter<int32_t>(node));
+      case IrOpcode::kInt64Constant:
+        return Constant(OpParameter<int64_t>(node));
+      case IrOpcode::kNumberConstant:
+      case IrOpcode::kFloat64Constant:
+        return Constant(OpParameter<double>(node));
+      case IrOpcode::kExternalConstant:
+        return Constant(OpParameter<ExternalReference>(node));
+      case IrOpcode::kHeapConstant:
+        return Constant(OpParameter<Unique<HeapObject> >(node).handle());
+      default:
+        break;
+    }
+    UNREACHABLE();
+    return Constant(static_cast<int32_t>(0));
+  }
+
+  UnallocatedOperand* Define(Node* node, UnallocatedOperand* operand) {
+    DCHECK_NOT_NULL(node);
+    DCHECK_NOT_NULL(operand);
+    operand->set_virtual_register(node->id());
+    selector()->MarkAsDefined(node);
+    return operand;
+  }
+
+  UnallocatedOperand* Use(Node* node, UnallocatedOperand* operand) {
+    DCHECK_NOT_NULL(node);
+    DCHECK_NOT_NULL(operand);
+    operand->set_virtual_register(node->id());
+    selector()->MarkAsUsed(node);
+    return operand;
+  }
+
+  UnallocatedOperand* ToUnallocatedOperand(LinkageLocation location,
+                                           MachineType type) {
+    if (location.location_ == LinkageLocation::ANY_REGISTER) {
+      return new (zone())
+          UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER);
+    }
+    if (location.location_ < 0) {
+      return new (zone()) UnallocatedOperand(UnallocatedOperand::FIXED_SLOT,
+                                             location.location_);
+    }
+    if (RepresentationOf(type) == kRepFloat64) {
+      return new (zone()) UnallocatedOperand(
+          UnallocatedOperand::FIXED_DOUBLE_REGISTER, location.location_);
+    }
+    return new (zone()) UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
+                                           location.location_);
+  }
+
+  InstructionSelector* selector_;
+};
+
+
+// The flags continuation is a way to combine a branch or a materialization
+// of a boolean value with an instruction that sets the flags register.
+// The whole instruction is treated as a unit by the register allocator, and
+// thus no spills or moves can be introduced between the flags-setting
+// instruction and the branch or set it should be combined with.
+class FlagsContinuation FINAL {
+ public:
+  FlagsContinuation() : mode_(kFlags_none) {}
+
+  // Creates a new flags continuation from the given condition and true/false
+  // blocks.
+  FlagsContinuation(FlagsCondition condition, BasicBlock* true_block,
+                    BasicBlock* false_block)
+      : mode_(kFlags_branch),
+        condition_(condition),
+        true_block_(true_block),
+        false_block_(false_block) {
+    DCHECK_NOT_NULL(true_block);
+    DCHECK_NOT_NULL(false_block);
+  }
+
+  // Creates a new flags continuation from the given condition and result node.
+  FlagsContinuation(FlagsCondition condition, Node* result)
+      : mode_(kFlags_set), condition_(condition), result_(result) {
+    DCHECK_NOT_NULL(result);
+  }
+
+  bool IsNone() const { return mode_ == kFlags_none; }
+  bool IsBranch() const { return mode_ == kFlags_branch; }
+  bool IsSet() const { return mode_ == kFlags_set; }
+  FlagsCondition condition() const {
+    DCHECK(!IsNone());
+    return condition_;
+  }
+  Node* result() const {
+    DCHECK(IsSet());
+    return result_;
+  }
+  BasicBlock* true_block() const {
+    DCHECK(IsBranch());
+    return true_block_;
+  }
+  BasicBlock* false_block() const {
+    DCHECK(IsBranch());
+    return false_block_;
+  }
+
+  void Negate() {
+    DCHECK(!IsNone());
+    condition_ = static_cast<FlagsCondition>(condition_ ^ 1);
+  }
+
+  void Commute() {
+    DCHECK(!IsNone());
+    switch (condition_) {
+      case kEqual:
+      case kNotEqual:
+      case kOverflow:
+      case kNotOverflow:
+        return;
+      case kSignedLessThan:
+        condition_ = kSignedGreaterThan;
+        return;
+      case kSignedGreaterThanOrEqual:
+        condition_ = kSignedLessThanOrEqual;
+        return;
+      case kSignedLessThanOrEqual:
+        condition_ = kSignedGreaterThanOrEqual;
+        return;
+      case kSignedGreaterThan:
+        condition_ = kSignedLessThan;
+        return;
+      case kUnsignedLessThan:
+        condition_ = kUnsignedGreaterThan;
+        return;
+      case kUnsignedGreaterThanOrEqual:
+        condition_ = kUnsignedLessThanOrEqual;
+        return;
+      case kUnsignedLessThanOrEqual:
+        condition_ = kUnsignedGreaterThanOrEqual;
+        return;
+      case kUnsignedGreaterThan:
+        condition_ = kUnsignedLessThan;
+        return;
+      case kUnorderedEqual:
+      case kUnorderedNotEqual:
+        return;
+      case kUnorderedLessThan:
+        condition_ = kUnorderedGreaterThan;
+        return;
+      case kUnorderedGreaterThanOrEqual:
+        condition_ = kUnorderedLessThanOrEqual;
+        return;
+      case kUnorderedLessThanOrEqual:
+        condition_ = kUnorderedGreaterThanOrEqual;
+        return;
+      case kUnorderedGreaterThan:
+        condition_ = kUnorderedLessThan;
+        return;
+    }
+    UNREACHABLE();
+  }
+
+  void OverwriteAndNegateIfEqual(FlagsCondition condition) {
+    bool negate = condition_ == kEqual;
+    condition_ = condition;
+    if (negate) Negate();
+  }
+
+  void SwapBlocks() { std::swap(true_block_, false_block_); }
+
+  // Encodes this flags continuation into the given opcode.
+  InstructionCode Encode(InstructionCode opcode) {
+    opcode |= FlagsModeField::encode(mode_);
+    if (mode_ != kFlags_none) {
+      opcode |= FlagsConditionField::encode(condition_);
+    }
+    return opcode;
+  }
+
+ private:
+  FlagsMode mode_;
+  FlagsCondition condition_;
+  Node* result_;             // Only valid if mode_ == kFlags_set.
+  BasicBlock* true_block_;   // Only valid if mode_ == kFlags_branch.
+  BasicBlock* false_block_;  // Only valid if mode_ == kFlags_branch.
+};
+
+
+// An internal helper class for generating the operands to calls.
+// TODO(bmeurer): Get rid of the CallBuffer business and make
+// InstructionSelector::VisitCall platform independent instead.
+struct CallBuffer {
+  CallBuffer(Zone* zone, CallDescriptor* descriptor,
+             FrameStateDescriptor* frame_state);
+
+  CallDescriptor* descriptor;
+  FrameStateDescriptor* frame_state_descriptor;
+  NodeVector output_nodes;
+  InstructionOperandVector outputs;
+  InstructionOperandVector instruction_args;
+  NodeVector pushed_nodes;
+
+  size_t input_count() const { return descriptor->InputCount(); }
+
+  size_t frame_state_count() const { return descriptor->FrameStateCount(); }
+
+  size_t frame_state_value_count() const {
+    return (frame_state_descriptor == NULL)
+               ? 0
+               : (frame_state_descriptor->GetTotalSize() +
+                  1);  // Include deopt id.
+  }
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_INSTRUCTION_SELECTOR_IMPL_H_
diff --git a/src/compiler/instruction-selector-unittest.cc b/src/compiler/instruction-selector-unittest.cc
new file mode 100644
index 0000000..aa70735
--- /dev/null
+++ b/src/compiler/instruction-selector-unittest.cc
@@ -0,0 +1,496 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-selector-unittest.h"
+
+#include "src/compiler/compiler-test-utils.h"
+#include "src/flags.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+
+typedef RawMachineAssembler::Label MLabel;
+
+}  // namespace
+
+
+InstructionSelectorTest::InstructionSelectorTest() : rng_(FLAG_random_seed) {}
+
+
+InstructionSelectorTest::~InstructionSelectorTest() {}
+
+
+InstructionSelectorTest::Stream InstructionSelectorTest::StreamBuilder::Build(
+    InstructionSelector::Features features,
+    InstructionSelectorTest::StreamBuilderMode mode) {
+  Schedule* schedule = Export();
+  if (FLAG_trace_turbo) {
+    OFStream out(stdout);
+    out << "=== Schedule before instruction selection ===" << endl << *schedule;
+  }
+  EXPECT_NE(0, graph()->NodeCount());
+  CompilationInfo info(test_->isolate(), test_->zone());
+  Linkage linkage(&info, call_descriptor());
+  InstructionSequence sequence(&linkage, graph(), schedule);
+  SourcePositionTable source_position_table(graph());
+  InstructionSelector selector(&sequence, &source_position_table, features);
+  selector.SelectInstructions();
+  if (FLAG_trace_turbo) {
+    OFStream out(stdout);
+    out << "=== Code sequence after instruction selection ===" << endl
+        << sequence;
+  }
+  Stream s;
+  std::set<int> virtual_registers;
+  for (InstructionSequence::const_iterator i = sequence.begin();
+       i != sequence.end(); ++i) {
+    Instruction* instr = *i;
+    if (instr->opcode() < 0) continue;
+    if (mode == kTargetInstructions) {
+      switch (instr->arch_opcode()) {
+#define CASE(Name) \
+  case k##Name:    \
+    break;
+        TARGET_ARCH_OPCODE_LIST(CASE)
+#undef CASE
+        default:
+          continue;
+      }
+    }
+    if (mode == kAllExceptNopInstructions && instr->arch_opcode() == kArchNop) {
+      continue;
+    }
+    for (size_t i = 0; i < instr->OutputCount(); ++i) {
+      InstructionOperand* output = instr->OutputAt(i);
+      EXPECT_NE(InstructionOperand::IMMEDIATE, output->kind());
+      if (output->IsConstant()) {
+        s.constants_.insert(std::make_pair(
+            output->index(), sequence.GetConstant(output->index())));
+        virtual_registers.insert(output->index());
+      } else if (output->IsUnallocated()) {
+        virtual_registers.insert(
+            UnallocatedOperand::cast(output)->virtual_register());
+      }
+    }
+    for (size_t i = 0; i < instr->InputCount(); ++i) {
+      InstructionOperand* input = instr->InputAt(i);
+      EXPECT_NE(InstructionOperand::CONSTANT, input->kind());
+      if (input->IsImmediate()) {
+        s.immediates_.insert(std::make_pair(
+            input->index(), sequence.GetImmediate(input->index())));
+      } else if (input->IsUnallocated()) {
+        virtual_registers.insert(
+            UnallocatedOperand::cast(input)->virtual_register());
+      }
+    }
+    s.instructions_.push_back(instr);
+  }
+  for (std::set<int>::const_iterator i = virtual_registers.begin();
+       i != virtual_registers.end(); ++i) {
+    int virtual_register = *i;
+    if (sequence.IsDouble(virtual_register)) {
+      EXPECT_FALSE(sequence.IsReference(virtual_register));
+      s.doubles_.insert(virtual_register);
+    }
+    if (sequence.IsReference(virtual_register)) {
+      EXPECT_FALSE(sequence.IsDouble(virtual_register));
+      s.references_.insert(virtual_register);
+    }
+  }
+  for (int i = 0; i < sequence.GetFrameStateDescriptorCount(); i++) {
+    s.deoptimization_entries_.push_back(sequence.GetFrameStateDescriptor(
+        InstructionSequence::StateId::FromInt(i)));
+  }
+  return s;
+}
+
+
+// -----------------------------------------------------------------------------
+// Return.
+
+
+TARGET_TEST_F(InstructionSelectorTest, ReturnParameter) {
+  StreamBuilder m(this, kMachInt32, kMachInt32);
+  m.Return(m.Parameter(0));
+  Stream s = m.Build(kAllInstructions);
+  ASSERT_EQ(2U, s.size());
+  EXPECT_EQ(kArchNop, s[0]->arch_opcode());
+  ASSERT_EQ(1U, s[0]->OutputCount());
+  EXPECT_EQ(kArchRet, s[1]->arch_opcode());
+  EXPECT_EQ(1U, s[1]->InputCount());
+}
+
+
+TARGET_TEST_F(InstructionSelectorTest, ReturnZero) {
+  StreamBuilder m(this, kMachInt32);
+  m.Return(m.Int32Constant(0));
+  Stream s = m.Build(kAllInstructions);
+  ASSERT_EQ(2U, s.size());
+  EXPECT_EQ(kArchNop, s[0]->arch_opcode());
+  ASSERT_EQ(1U, s[0]->OutputCount());
+  EXPECT_EQ(InstructionOperand::CONSTANT, s[0]->OutputAt(0)->kind());
+  EXPECT_EQ(0, s.ToInt32(s[0]->OutputAt(0)));
+  EXPECT_EQ(kArchRet, s[1]->arch_opcode());
+  EXPECT_EQ(1U, s[1]->InputCount());
+}
+
+
+// -----------------------------------------------------------------------------
+// Conversions.
+
+
+TARGET_TEST_F(InstructionSelectorTest, TruncateFloat64ToInt32WithParameter) {
+  StreamBuilder m(this, kMachInt32, kMachFloat64);
+  m.Return(m.TruncateFloat64ToInt32(m.Parameter(0)));
+  Stream s = m.Build(kAllInstructions);
+  ASSERT_EQ(3U, s.size());
+  EXPECT_EQ(kArchNop, s[0]->arch_opcode());
+  EXPECT_EQ(kArchTruncateDoubleToI, s[1]->arch_opcode());
+  EXPECT_EQ(1U, s[1]->InputCount());
+  EXPECT_EQ(1U, s[1]->OutputCount());
+  EXPECT_EQ(kArchRet, s[2]->arch_opcode());
+}
+
+
+// -----------------------------------------------------------------------------
+// Parameters.
+
+
+TARGET_TEST_F(InstructionSelectorTest, DoubleParameter) {
+  StreamBuilder m(this, kMachFloat64, kMachFloat64);
+  Node* param = m.Parameter(0);
+  m.Return(param);
+  Stream s = m.Build(kAllInstructions);
+  EXPECT_TRUE(s.IsDouble(param->id()));
+}
+
+
+TARGET_TEST_F(InstructionSelectorTest, ReferenceParameter) {
+  StreamBuilder m(this, kMachAnyTagged, kMachAnyTagged);
+  Node* param = m.Parameter(0);
+  m.Return(param);
+  Stream s = m.Build(kAllInstructions);
+  EXPECT_TRUE(s.IsReference(param->id()));
+}
+
+
+// -----------------------------------------------------------------------------
+// Finish.
+
+
+TARGET_TEST_F(InstructionSelectorTest, Finish) {
+  StreamBuilder m(this, kMachAnyTagged, kMachAnyTagged);
+  Node* param = m.Parameter(0);
+  Node* finish = m.NewNode(m.common()->Finish(1), param, m.graph()->start());
+  m.Return(finish);
+  Stream s = m.Build(kAllInstructions);
+  ASSERT_EQ(3U, s.size());
+  EXPECT_EQ(kArchNop, s[0]->arch_opcode());
+  ASSERT_EQ(1U, s[0]->OutputCount());
+  ASSERT_TRUE(s[0]->Output()->IsUnallocated());
+  EXPECT_EQ(param->id(), s.ToVreg(s[0]->Output()));
+  EXPECT_EQ(kArchNop, s[1]->arch_opcode());
+  ASSERT_EQ(1U, s[1]->InputCount());
+  ASSERT_TRUE(s[1]->InputAt(0)->IsUnallocated());
+  EXPECT_EQ(param->id(), s.ToVreg(s[1]->InputAt(0)));
+  ASSERT_EQ(1U, s[1]->OutputCount());
+  ASSERT_TRUE(s[1]->Output()->IsUnallocated());
+  EXPECT_TRUE(UnallocatedOperand::cast(s[1]->Output())->HasSameAsInputPolicy());
+  EXPECT_EQ(finish->id(), s.ToVreg(s[1]->Output()));
+  EXPECT_TRUE(s.IsReference(finish->id()));
+}
+
+
+// -----------------------------------------------------------------------------
+// Phi.
+
+
+typedef InstructionSelectorTestWithParam<MachineType>
+    InstructionSelectorPhiTest;
+
+
+TARGET_TEST_P(InstructionSelectorPhiTest, Doubleness) {
+  const MachineType type = GetParam();
+  StreamBuilder m(this, type, type, type);
+  Node* param0 = m.Parameter(0);
+  Node* param1 = m.Parameter(1);
+  MLabel a, b, c;
+  m.Branch(m.Int32Constant(0), &a, &b);
+  m.Bind(&a);
+  m.Goto(&c);
+  m.Bind(&b);
+  m.Goto(&c);
+  m.Bind(&c);
+  Node* phi = m.Phi(type, param0, param1);
+  m.Return(phi);
+  Stream s = m.Build(kAllInstructions);
+  EXPECT_EQ(s.IsDouble(phi->id()), s.IsDouble(param0->id()));
+  EXPECT_EQ(s.IsDouble(phi->id()), s.IsDouble(param1->id()));
+}
+
+
+TARGET_TEST_P(InstructionSelectorPhiTest, Referenceness) {
+  const MachineType type = GetParam();
+  StreamBuilder m(this, type, type, type);
+  Node* param0 = m.Parameter(0);
+  Node* param1 = m.Parameter(1);
+  MLabel a, b, c;
+  m.Branch(m.Int32Constant(1), &a, &b);
+  m.Bind(&a);
+  m.Goto(&c);
+  m.Bind(&b);
+  m.Goto(&c);
+  m.Bind(&c);
+  Node* phi = m.Phi(type, param0, param1);
+  m.Return(phi);
+  Stream s = m.Build(kAllInstructions);
+  EXPECT_EQ(s.IsReference(phi->id()), s.IsReference(param0->id()));
+  EXPECT_EQ(s.IsReference(phi->id()), s.IsReference(param1->id()));
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorPhiTest,
+                        ::testing::Values(kMachFloat64, kMachInt8, kMachUint8,
+                                          kMachInt16, kMachUint16, kMachInt32,
+                                          kMachUint32, kMachInt64, kMachUint64,
+                                          kMachPtr, kMachAnyTagged));
+
+
+// -----------------------------------------------------------------------------
+// ValueEffect.
+
+
+TARGET_TEST_F(InstructionSelectorTest, ValueEffect) {
+  StreamBuilder m1(this, kMachInt32, kMachPtr);
+  Node* p1 = m1.Parameter(0);
+  m1.Return(m1.Load(kMachInt32, p1, m1.Int32Constant(0)));
+  Stream s1 = m1.Build(kAllInstructions);
+  StreamBuilder m2(this, kMachInt32, kMachPtr);
+  Node* p2 = m2.Parameter(0);
+  m2.Return(m2.NewNode(m2.machine()->Load(kMachInt32), p2, m2.Int32Constant(0),
+                       m2.NewNode(m2.common()->ValueEffect(1), p2)));
+  Stream s2 = m2.Build(kAllInstructions);
+  EXPECT_LE(3U, s1.size());
+  ASSERT_EQ(s1.size(), s2.size());
+  TRACED_FORRANGE(size_t, i, 0, s1.size() - 1) {
+    const Instruction* i1 = s1[i];
+    const Instruction* i2 = s2[i];
+    EXPECT_EQ(i1->arch_opcode(), i2->arch_opcode());
+    EXPECT_EQ(i1->InputCount(), i2->InputCount());
+    EXPECT_EQ(i1->OutputCount(), i2->OutputCount());
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+// Calls with deoptimization.
+TARGET_TEST_F(InstructionSelectorTest, CallJSFunctionWithDeopt) {
+  StreamBuilder m(this, kMachAnyTagged, kMachAnyTagged, kMachAnyTagged,
+                  kMachAnyTagged);
+
+  BailoutId bailout_id(42);
+
+  Node* function_node = m.Parameter(0);
+  Node* receiver = m.Parameter(1);
+  Node* context = m.Parameter(2);
+
+  Node* parameters = m.NewNode(m.common()->StateValues(1), m.Int32Constant(1));
+  Node* locals = m.NewNode(m.common()->StateValues(0));
+  Node* stack = m.NewNode(m.common()->StateValues(0));
+  Node* context_dummy = m.Int32Constant(0);
+
+  Node* state_node = m.NewNode(
+      m.common()->FrameState(JS_FRAME, bailout_id, kPushOutput), parameters,
+      locals, stack, context_dummy, m.UndefinedConstant());
+  Node* call = m.CallJS0(function_node, receiver, context, state_node);
+  m.Return(call);
+
+  Stream s = m.Build(kAllExceptNopInstructions);
+
+  // Skip until kArchCallJSFunction.
+  size_t index = 0;
+  for (; index < s.size() && s[index]->arch_opcode() != kArchCallJSFunction;
+       index++) {
+  }
+  // Now we should have two instructions: call and return.
+  ASSERT_EQ(index + 2, s.size());
+
+  EXPECT_EQ(kArchCallJSFunction, s[index++]->arch_opcode());
+  EXPECT_EQ(kArchRet, s[index++]->arch_opcode());
+
+  // TODO(jarin) Check deoptimization table.
+}
+
+
+TARGET_TEST_F(InstructionSelectorTest, CallFunctionStubWithDeopt) {
+  StreamBuilder m(this, kMachAnyTagged, kMachAnyTagged, kMachAnyTagged,
+                  kMachAnyTagged);
+
+  BailoutId bailout_id_before(42);
+
+  // Some arguments for the call node.
+  Node* function_node = m.Parameter(0);
+  Node* receiver = m.Parameter(1);
+  Node* context = m.Int32Constant(1);  // Context is ignored.
+
+  // Build frame state for the state before the call.
+  Node* parameters = m.NewNode(m.common()->StateValues(1), m.Int32Constant(43));
+  Node* locals = m.NewNode(m.common()->StateValues(1), m.Int32Constant(44));
+  Node* stack = m.NewNode(m.common()->StateValues(1), m.Int32Constant(45));
+
+  Node* context_sentinel = m.Int32Constant(0);
+  Node* frame_state_before = m.NewNode(
+      m.common()->FrameState(JS_FRAME, bailout_id_before, kPushOutput),
+      parameters, locals, stack, context_sentinel, m.UndefinedConstant());
+
+  // Build the call.
+  Node* call = m.CallFunctionStub0(function_node, receiver, context,
+                                   frame_state_before, CALL_AS_METHOD);
+
+  m.Return(call);
+
+  Stream s = m.Build(kAllExceptNopInstructions);
+
+  // Skip until kArchCallJSFunction.
+  size_t index = 0;
+  for (; index < s.size() && s[index]->arch_opcode() != kArchCallCodeObject;
+       index++) {
+  }
+  // Now we should have two instructions: call, return.
+  ASSERT_EQ(index + 2, s.size());
+
+  // Check the call instruction
+  const Instruction* call_instr = s[index++];
+  EXPECT_EQ(kArchCallCodeObject, call_instr->arch_opcode());
+  size_t num_operands =
+      1 +  // Code object.
+      1 +
+      4 +  // Frame state deopt id + one input for each value in frame state.
+      1 +  // Function.
+      1;   // Context.
+  ASSERT_EQ(num_operands, call_instr->InputCount());
+
+  // Code object.
+  EXPECT_TRUE(call_instr->InputAt(0)->IsImmediate());
+
+  // Deoptimization id.
+  int32_t deopt_id_before = s.ToInt32(call_instr->InputAt(1));
+  FrameStateDescriptor* desc_before =
+      s.GetFrameStateDescriptor(deopt_id_before);
+  EXPECT_EQ(bailout_id_before, desc_before->bailout_id());
+  EXPECT_EQ(kPushOutput, desc_before->state_combine());
+  EXPECT_EQ(1u, desc_before->parameters_count());
+  EXPECT_EQ(1u, desc_before->locals_count());
+  EXPECT_EQ(1u, desc_before->stack_count());
+  EXPECT_EQ(43, s.ToInt32(call_instr->InputAt(2)));
+  EXPECT_EQ(0, s.ToInt32(call_instr->InputAt(3)));
+  EXPECT_EQ(44, s.ToInt32(call_instr->InputAt(4)));
+  EXPECT_EQ(45, s.ToInt32(call_instr->InputAt(5)));
+
+  // Function.
+  EXPECT_EQ(function_node->id(), s.ToVreg(call_instr->InputAt(6)));
+  // Context.
+  EXPECT_EQ(context->id(), s.ToVreg(call_instr->InputAt(7)));
+
+  EXPECT_EQ(kArchRet, s[index++]->arch_opcode());
+
+  EXPECT_EQ(index, s.size());
+}
+
+
+TARGET_TEST_F(InstructionSelectorTest,
+              CallFunctionStubDeoptRecursiveFrameState) {
+  StreamBuilder m(this, kMachAnyTagged, kMachAnyTagged, kMachAnyTagged,
+                  kMachAnyTagged);
+
+  BailoutId bailout_id_before(42);
+  BailoutId bailout_id_parent(62);
+
+  // Some arguments for the call node.
+  Node* function_node = m.Parameter(0);
+  Node* receiver = m.Parameter(1);
+  Node* context = m.Int32Constant(66);
+
+  // Build frame state for the state before the call.
+  Node* parameters = m.NewNode(m.common()->StateValues(1), m.Int32Constant(63));
+  Node* locals = m.NewNode(m.common()->StateValues(1), m.Int32Constant(64));
+  Node* stack = m.NewNode(m.common()->StateValues(1), m.Int32Constant(65));
+  Node* frame_state_parent = m.NewNode(
+      m.common()->FrameState(JS_FRAME, bailout_id_parent, kIgnoreOutput),
+      parameters, locals, stack, context, m.UndefinedConstant());
+
+  Node* context2 = m.Int32Constant(46);
+  Node* parameters2 =
+      m.NewNode(m.common()->StateValues(1), m.Int32Constant(43));
+  Node* locals2 = m.NewNode(m.common()->StateValues(1), m.Int32Constant(44));
+  Node* stack2 = m.NewNode(m.common()->StateValues(1), m.Int32Constant(45));
+  Node* frame_state_before = m.NewNode(
+      m.common()->FrameState(JS_FRAME, bailout_id_before, kPushOutput),
+      parameters2, locals2, stack2, context2, frame_state_parent);
+
+  // Build the call.
+  Node* call = m.CallFunctionStub0(function_node, receiver, context2,
+                                   frame_state_before, CALL_AS_METHOD);
+
+  m.Return(call);
+
+  Stream s = m.Build(kAllExceptNopInstructions);
+
+  // Skip until kArchCallJSFunction.
+  size_t index = 0;
+  for (; index < s.size() && s[index]->arch_opcode() != kArchCallCodeObject;
+       index++) {
+  }
+  // Now we should have three instructions: call, return.
+  EXPECT_EQ(index + 2, s.size());
+
+  // Check the call instruction
+  const Instruction* call_instr = s[index++];
+  EXPECT_EQ(kArchCallCodeObject, call_instr->arch_opcode());
+  size_t num_operands =
+      1 +  // Code object.
+      1 +  // Frame state deopt id
+      4 +  // One input for each value in frame state + context.
+      4 +  // One input for each value in the parent frame state + context.
+      1 +  // Function.
+      1;   // Context.
+  EXPECT_EQ(num_operands, call_instr->InputCount());
+  // Code object.
+  EXPECT_TRUE(call_instr->InputAt(0)->IsImmediate());
+
+  // Deoptimization id.
+  int32_t deopt_id_before = s.ToInt32(call_instr->InputAt(1));
+  FrameStateDescriptor* desc_before =
+      s.GetFrameStateDescriptor(deopt_id_before);
+  EXPECT_EQ(bailout_id_before, desc_before->bailout_id());
+  EXPECT_EQ(1u, desc_before->parameters_count());
+  EXPECT_EQ(1u, desc_before->locals_count());
+  EXPECT_EQ(1u, desc_before->stack_count());
+  EXPECT_EQ(63, s.ToInt32(call_instr->InputAt(2)));
+  // Context:
+  EXPECT_EQ(66, s.ToInt32(call_instr->InputAt(3)));
+  EXPECT_EQ(64, s.ToInt32(call_instr->InputAt(4)));
+  EXPECT_EQ(65, s.ToInt32(call_instr->InputAt(5)));
+  // Values from parent environment should follow.
+  EXPECT_EQ(43, s.ToInt32(call_instr->InputAt(6)));
+  EXPECT_EQ(46, s.ToInt32(call_instr->InputAt(7)));
+  EXPECT_EQ(44, s.ToInt32(call_instr->InputAt(8)));
+  EXPECT_EQ(45, s.ToInt32(call_instr->InputAt(9)));
+
+  // Function.
+  EXPECT_EQ(function_node->id(), s.ToVreg(call_instr->InputAt(10)));
+  // Context.
+  EXPECT_EQ(context2->id(), s.ToVreg(call_instr->InputAt(11)));
+  // Continuation.
+
+  EXPECT_EQ(kArchRet, s[index++]->arch_opcode());
+  EXPECT_EQ(index, s.size());
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/instruction-selector-unittest.h b/src/compiler/instruction-selector-unittest.h
new file mode 100644
index 0000000..4e12dab
--- /dev/null
+++ b/src/compiler/instruction-selector-unittest.h
@@ -0,0 +1,209 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_INSTRUCTION_SELECTOR_UNITTEST_H_
+#define V8_COMPILER_INSTRUCTION_SELECTOR_UNITTEST_H_
+
+#include <deque>
+#include <set>
+
+#include "src/base/utils/random-number-generator.h"
+#include "src/compiler/instruction-selector.h"
+#include "src/compiler/raw-machine-assembler.h"
+#include "src/test/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class InstructionSelectorTest : public TestWithContext, public TestWithZone {
+ public:
+  InstructionSelectorTest();
+  virtual ~InstructionSelectorTest();
+
+  base::RandomNumberGenerator* rng() { return &rng_; }
+
+  class Stream;
+
+  enum StreamBuilderMode {
+    kAllInstructions,
+    kTargetInstructions,
+    kAllExceptNopInstructions
+  };
+
+  class StreamBuilder FINAL : public RawMachineAssembler {
+   public:
+    StreamBuilder(InstructionSelectorTest* test, MachineType return_type)
+        : RawMachineAssembler(new (test->zone()) Graph(test->zone()),
+                              MakeMachineSignature(test->zone(), return_type)),
+          test_(test) {}
+    StreamBuilder(InstructionSelectorTest* test, MachineType return_type,
+                  MachineType parameter0_type)
+        : RawMachineAssembler(
+              new (test->zone()) Graph(test->zone()),
+              MakeMachineSignature(test->zone(), return_type, parameter0_type)),
+          test_(test) {}
+    StreamBuilder(InstructionSelectorTest* test, MachineType return_type,
+                  MachineType parameter0_type, MachineType parameter1_type)
+        : RawMachineAssembler(
+              new (test->zone()) Graph(test->zone()),
+              MakeMachineSignature(test->zone(), return_type, parameter0_type,
+                                   parameter1_type)),
+          test_(test) {}
+    StreamBuilder(InstructionSelectorTest* test, MachineType return_type,
+                  MachineType parameter0_type, MachineType parameter1_type,
+                  MachineType parameter2_type)
+        : RawMachineAssembler(
+              new (test->zone()) Graph(test->zone()),
+              MakeMachineSignature(test->zone(), return_type, parameter0_type,
+                                   parameter1_type, parameter2_type)),
+          test_(test) {}
+
+    Stream Build(CpuFeature feature) {
+      return Build(InstructionSelector::Features(feature));
+    }
+    Stream Build(CpuFeature feature1, CpuFeature feature2) {
+      return Build(InstructionSelector::Features(feature1, feature2));
+    }
+    Stream Build(StreamBuilderMode mode = kTargetInstructions) {
+      return Build(InstructionSelector::Features(), mode);
+    }
+    Stream Build(InstructionSelector::Features features,
+                 StreamBuilderMode mode = kTargetInstructions);
+
+   private:
+    MachineSignature* MakeMachineSignature(Zone* zone,
+                                           MachineType return_type) {
+      MachineSignature::Builder builder(zone, 1, 0);
+      builder.AddReturn(return_type);
+      return builder.Build();
+    }
+
+    MachineSignature* MakeMachineSignature(Zone* zone, MachineType return_type,
+                                           MachineType parameter0_type) {
+      MachineSignature::Builder builder(zone, 1, 1);
+      builder.AddReturn(return_type);
+      builder.AddParam(parameter0_type);
+      return builder.Build();
+    }
+
+    MachineSignature* MakeMachineSignature(Zone* zone, MachineType return_type,
+                                           MachineType parameter0_type,
+                                           MachineType parameter1_type) {
+      MachineSignature::Builder builder(zone, 1, 2);
+      builder.AddReturn(return_type);
+      builder.AddParam(parameter0_type);
+      builder.AddParam(parameter1_type);
+      return builder.Build();
+    }
+
+    MachineSignature* MakeMachineSignature(Zone* zone, MachineType return_type,
+                                           MachineType parameter0_type,
+                                           MachineType parameter1_type,
+                                           MachineType parameter2_type) {
+      MachineSignature::Builder builder(zone, 1, 3);
+      builder.AddReturn(return_type);
+      builder.AddParam(parameter0_type);
+      builder.AddParam(parameter1_type);
+      builder.AddParam(parameter2_type);
+      return builder.Build();
+    }
+
+   private:
+    InstructionSelectorTest* test_;
+  };
+
+  class Stream FINAL {
+   public:
+    size_t size() const { return instructions_.size(); }
+    const Instruction* operator[](size_t index) const {
+      EXPECT_LT(index, size());
+      return instructions_[index];
+    }
+
+    bool IsDouble(const InstructionOperand* operand) const {
+      return IsDouble(ToVreg(operand));
+    }
+    bool IsDouble(int virtual_register) const {
+      return doubles_.find(virtual_register) != doubles_.end();
+    }
+
+    bool IsInteger(const InstructionOperand* operand) const {
+      return IsInteger(ToVreg(operand));
+    }
+    bool IsInteger(int virtual_register) const {
+      return !IsDouble(virtual_register) && !IsReference(virtual_register);
+    }
+
+    bool IsReference(const InstructionOperand* operand) const {
+      return IsReference(ToVreg(operand));
+    }
+    bool IsReference(int virtual_register) const {
+      return references_.find(virtual_register) != references_.end();
+    }
+
+    int32_t ToInt32(const InstructionOperand* operand) const {
+      return ToConstant(operand).ToInt32();
+    }
+
+    int64_t ToInt64(const InstructionOperand* operand) const {
+      return ToConstant(operand).ToInt64();
+    }
+
+    int ToVreg(const InstructionOperand* operand) const {
+      if (operand->IsConstant()) return operand->index();
+      EXPECT_EQ(InstructionOperand::UNALLOCATED, operand->kind());
+      return UnallocatedOperand::cast(operand)->virtual_register();
+    }
+
+    FrameStateDescriptor* GetFrameStateDescriptor(int deoptimization_id) {
+      EXPECT_LT(deoptimization_id, GetFrameStateDescriptorCount());
+      return deoptimization_entries_[deoptimization_id];
+    }
+
+    int GetFrameStateDescriptorCount() {
+      return static_cast<int>(deoptimization_entries_.size());
+    }
+
+   private:
+    Constant ToConstant(const InstructionOperand* operand) const {
+      ConstantMap::const_iterator i;
+      if (operand->IsConstant()) {
+        i = constants_.find(operand->index());
+        EXPECT_FALSE(constants_.end() == i);
+      } else {
+        EXPECT_EQ(InstructionOperand::IMMEDIATE, operand->kind());
+        i = immediates_.find(operand->index());
+        EXPECT_FALSE(immediates_.end() == i);
+      }
+      EXPECT_EQ(operand->index(), i->first);
+      return i->second;
+    }
+
+    friend class StreamBuilder;
+
+    typedef std::map<int, Constant> ConstantMap;
+
+    ConstantMap constants_;
+    ConstantMap immediates_;
+    std::deque<Instruction*> instructions_;
+    std::set<int> doubles_;
+    std::set<int> references_;
+    std::deque<FrameStateDescriptor*> deoptimization_entries_;
+  };
+
+  base::RandomNumberGenerator rng_;
+};
+
+
+template <typename T>
+class InstructionSelectorTestWithParam
+    : public InstructionSelectorTest,
+      public ::testing::WithParamInterface<T> {};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_INSTRUCTION_SELECTOR_UNITTEST_H_
diff --git a/src/compiler/instruction-selector.cc b/src/compiler/instruction-selector.cc
new file mode 100644
index 0000000..3c32b64
--- /dev/null
+++ b/src/compiler/instruction-selector.cc
@@ -0,0 +1,1101 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-selector.h"
+
+#include "src/compiler/instruction-selector-impl.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/pipeline.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+InstructionSelector::InstructionSelector(InstructionSequence* sequence,
+                                         SourcePositionTable* source_positions,
+                                         Features features)
+    : zone_(sequence->isolate()),
+      sequence_(sequence),
+      source_positions_(source_positions),
+      features_(features),
+      current_block_(NULL),
+      instructions_(zone()),
+      defined_(graph()->NodeCount(), false, zone()),
+      used_(graph()->NodeCount(), false, zone()) {}
+
+
+void InstructionSelector::SelectInstructions() {
+  // Mark the inputs of all phis in loop headers as used.
+  BasicBlockVector* blocks = schedule()->rpo_order();
+  for (BasicBlockVectorIter i = blocks->begin(); i != blocks->end(); ++i) {
+    BasicBlock* block = *i;
+    if (!block->IsLoopHeader()) continue;
+    DCHECK_NE(0, block->PredecessorCount());
+    DCHECK_NE(1, block->PredecessorCount());
+    for (BasicBlock::const_iterator j = block->begin(); j != block->end();
+         ++j) {
+      Node* phi = *j;
+      if (phi->opcode() != IrOpcode::kPhi) continue;
+
+      // Mark all inputs as used.
+      Node::Inputs inputs = phi->inputs();
+      for (InputIter k = inputs.begin(); k != inputs.end(); ++k) {
+        MarkAsUsed(*k);
+      }
+    }
+  }
+
+  // Visit each basic block in post order.
+  for (BasicBlockVectorRIter i = blocks->rbegin(); i != blocks->rend(); ++i) {
+    VisitBlock(*i);
+  }
+
+  // Schedule the selected instructions.
+  for (BasicBlockVectorIter i = blocks->begin(); i != blocks->end(); ++i) {
+    BasicBlock* block = *i;
+    size_t end = block->code_end_;
+    size_t start = block->code_start_;
+    sequence()->StartBlock(block);
+    while (start-- > end) {
+      sequence()->AddInstruction(instructions_[start], block);
+    }
+    sequence()->EndBlock(block);
+  }
+}
+
+
+Instruction* InstructionSelector::Emit(InstructionCode opcode,
+                                       InstructionOperand* output,
+                                       size_t temp_count,
+                                       InstructionOperand** temps) {
+  size_t output_count = output == NULL ? 0 : 1;
+  return Emit(opcode, output_count, &output, 0, NULL, temp_count, temps);
+}
+
+
+Instruction* InstructionSelector::Emit(InstructionCode opcode,
+                                       InstructionOperand* output,
+                                       InstructionOperand* a, size_t temp_count,
+                                       InstructionOperand** temps) {
+  size_t output_count = output == NULL ? 0 : 1;
+  return Emit(opcode, output_count, &output, 1, &a, temp_count, temps);
+}
+
+
+Instruction* InstructionSelector::Emit(InstructionCode opcode,
+                                       InstructionOperand* output,
+                                       InstructionOperand* a,
+                                       InstructionOperand* b, size_t temp_count,
+                                       InstructionOperand** temps) {
+  size_t output_count = output == NULL ? 0 : 1;
+  InstructionOperand* inputs[] = {a, b};
+  size_t input_count = arraysize(inputs);
+  return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
+              temps);
+}
+
+
+Instruction* InstructionSelector::Emit(InstructionCode opcode,
+                                       InstructionOperand* output,
+                                       InstructionOperand* a,
+                                       InstructionOperand* b,
+                                       InstructionOperand* c, size_t temp_count,
+                                       InstructionOperand** temps) {
+  size_t output_count = output == NULL ? 0 : 1;
+  InstructionOperand* inputs[] = {a, b, c};
+  size_t input_count = arraysize(inputs);
+  return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
+              temps);
+}
+
+
+Instruction* InstructionSelector::Emit(
+    InstructionCode opcode, InstructionOperand* output, InstructionOperand* a,
+    InstructionOperand* b, InstructionOperand* c, InstructionOperand* d,
+    size_t temp_count, InstructionOperand** temps) {
+  size_t output_count = output == NULL ? 0 : 1;
+  InstructionOperand* inputs[] = {a, b, c, d};
+  size_t input_count = arraysize(inputs);
+  return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
+              temps);
+}
+
+
+Instruction* InstructionSelector::Emit(
+    InstructionCode opcode, size_t output_count, InstructionOperand** outputs,
+    size_t input_count, InstructionOperand** inputs, size_t temp_count,
+    InstructionOperand** temps) {
+  Instruction* instr =
+      Instruction::New(instruction_zone(), opcode, output_count, outputs,
+                       input_count, inputs, temp_count, temps);
+  return Emit(instr);
+}
+
+
+Instruction* InstructionSelector::Emit(Instruction* instr) {
+  instructions_.push_back(instr);
+  return instr;
+}
+
+
+bool InstructionSelector::IsNextInAssemblyOrder(const BasicBlock* block) const {
+  return block->rpo_number_ == (current_block_->rpo_number_ + 1) &&
+         block->deferred_ == current_block_->deferred_;
+}
+
+
+bool InstructionSelector::CanCover(Node* user, Node* node) const {
+  return node->OwnedBy(user) &&
+         schedule()->block(node) == schedule()->block(user);
+}
+
+
+bool InstructionSelector::IsDefined(Node* node) const {
+  DCHECK_NOT_NULL(node);
+  NodeId id = node->id();
+  DCHECK(id >= 0);
+  DCHECK(id < static_cast<NodeId>(defined_.size()));
+  return defined_[id];
+}
+
+
+void InstructionSelector::MarkAsDefined(Node* node) {
+  DCHECK_NOT_NULL(node);
+  NodeId id = node->id();
+  DCHECK(id >= 0);
+  DCHECK(id < static_cast<NodeId>(defined_.size()));
+  defined_[id] = true;
+}
+
+
+bool InstructionSelector::IsUsed(Node* node) const {
+  if (!node->op()->HasProperty(Operator::kEliminatable)) return true;
+  NodeId id = node->id();
+  DCHECK(id >= 0);
+  DCHECK(id < static_cast<NodeId>(used_.size()));
+  return used_[id];
+}
+
+
+void InstructionSelector::MarkAsUsed(Node* node) {
+  DCHECK_NOT_NULL(node);
+  NodeId id = node->id();
+  DCHECK(id >= 0);
+  DCHECK(id < static_cast<NodeId>(used_.size()));
+  used_[id] = true;
+}
+
+
+bool InstructionSelector::IsDouble(const Node* node) const {
+  DCHECK_NOT_NULL(node);
+  return sequence()->IsDouble(node->id());
+}
+
+
+void InstructionSelector::MarkAsDouble(Node* node) {
+  DCHECK_NOT_NULL(node);
+  DCHECK(!IsReference(node));
+  sequence()->MarkAsDouble(node->id());
+}
+
+
+bool InstructionSelector::IsReference(const Node* node) const {
+  DCHECK_NOT_NULL(node);
+  return sequence()->IsReference(node->id());
+}
+
+
+void InstructionSelector::MarkAsReference(Node* node) {
+  DCHECK_NOT_NULL(node);
+  DCHECK(!IsDouble(node));
+  sequence()->MarkAsReference(node->id());
+}
+
+
+void InstructionSelector::MarkAsRepresentation(MachineType rep, Node* node) {
+  DCHECK_NOT_NULL(node);
+  switch (RepresentationOf(rep)) {
+    case kRepFloat32:
+    case kRepFloat64:
+      MarkAsDouble(node);
+      break;
+    case kRepTagged:
+      MarkAsReference(node);
+      break;
+    default:
+      break;
+  }
+}
+
+
+// TODO(bmeurer): Get rid of the CallBuffer business and make
+// InstructionSelector::VisitCall platform independent instead.
+CallBuffer::CallBuffer(Zone* zone, CallDescriptor* d,
+                       FrameStateDescriptor* frame_desc)
+    : descriptor(d),
+      frame_state_descriptor(frame_desc),
+      output_nodes(zone),
+      outputs(zone),
+      instruction_args(zone),
+      pushed_nodes(zone) {
+  output_nodes.reserve(d->ReturnCount());
+  outputs.reserve(d->ReturnCount());
+  pushed_nodes.reserve(input_count());
+  instruction_args.reserve(input_count() + frame_state_value_count());
+}
+
+
+// TODO(bmeurer): Get rid of the CallBuffer business and make
+// InstructionSelector::VisitCall platform independent instead.
+void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
+                                               bool call_code_immediate,
+                                               bool call_address_immediate) {
+  OperandGenerator g(this);
+  DCHECK_EQ(call->op()->OutputCount(), buffer->descriptor->ReturnCount());
+  DCHECK_EQ(OperatorProperties::GetValueInputCount(call->op()),
+            buffer->input_count() + buffer->frame_state_count());
+
+  if (buffer->descriptor->ReturnCount() > 0) {
+    // Collect the projections that represent multiple outputs from this call.
+    if (buffer->descriptor->ReturnCount() == 1) {
+      buffer->output_nodes.push_back(call);
+    } else {
+      buffer->output_nodes.resize(buffer->descriptor->ReturnCount(), NULL);
+      call->CollectProjections(&buffer->output_nodes);
+    }
+
+    // Filter out the outputs that aren't live because no projection uses them.
+    for (size_t i = 0; i < buffer->output_nodes.size(); i++) {
+      if (buffer->output_nodes[i] != NULL) {
+        Node* output = buffer->output_nodes[i];
+        MachineType type =
+            buffer->descriptor->GetReturnType(static_cast<int>(i));
+        LinkageLocation location =
+            buffer->descriptor->GetReturnLocation(static_cast<int>(i));
+        MarkAsRepresentation(type, output);
+        buffer->outputs.push_back(g.DefineAsLocation(output, location, type));
+      }
+    }
+  }
+
+  // The first argument is always the callee code.
+  Node* callee = call->InputAt(0);
+  switch (buffer->descriptor->kind()) {
+    case CallDescriptor::kCallCodeObject:
+      buffer->instruction_args.push_back(
+          (call_code_immediate && callee->opcode() == IrOpcode::kHeapConstant)
+              ? g.UseImmediate(callee)
+              : g.UseRegister(callee));
+      break;
+    case CallDescriptor::kCallAddress:
+      buffer->instruction_args.push_back(
+          (call_address_immediate &&
+           (callee->opcode() == IrOpcode::kInt32Constant ||
+            callee->opcode() == IrOpcode::kInt64Constant))
+              ? g.UseImmediate(callee)
+              : g.UseRegister(callee));
+      break;
+    case CallDescriptor::kCallJSFunction:
+      buffer->instruction_args.push_back(
+          g.UseLocation(callee, buffer->descriptor->GetInputLocation(0),
+                        buffer->descriptor->GetInputType(0)));
+      break;
+  }
+  DCHECK_EQ(1, buffer->instruction_args.size());
+
+  // If the call needs a frame state, we insert the state information as
+  // follows (n is the number of value inputs to the frame state):
+  // arg 1               : deoptimization id.
+  // arg 2 - arg (n + 1) : value inputs to the frame state.
+  if (buffer->frame_state_descriptor != NULL) {
+    InstructionSequence::StateId state_id =
+        sequence()->AddFrameStateDescriptor(buffer->frame_state_descriptor);
+    buffer->instruction_args.push_back(g.TempImmediate(state_id.ToInt()));
+
+    Node* frame_state =
+        call->InputAt(static_cast<int>(buffer->descriptor->InputCount()));
+    AddFrameStateInputs(frame_state, &buffer->instruction_args,
+                        buffer->frame_state_descriptor);
+  }
+  DCHECK(1 + buffer->frame_state_value_count() ==
+         buffer->instruction_args.size());
+
+  size_t input_count = static_cast<size_t>(buffer->input_count());
+
+  // Split the arguments into pushed_nodes and instruction_args. Pushed
+  // arguments require an explicit push instruction before the call and do
+  // not appear as arguments to the call. Everything else ends up
+  // as an InstructionOperand argument to the call.
+  InputIter iter(call->inputs().begin());
+  int pushed_count = 0;
+  for (size_t index = 0; index < input_count; ++iter, ++index) {
+    DCHECK(iter != call->inputs().end());
+    DCHECK(index == static_cast<size_t>(iter.index()));
+    DCHECK((*iter)->op()->opcode() != IrOpcode::kFrameState);
+    if (index == 0) continue;  // The first argument (callee) is already done.
+    InstructionOperand* op =
+        g.UseLocation(*iter, buffer->descriptor->GetInputLocation(index),
+                      buffer->descriptor->GetInputType(index));
+    if (UnallocatedOperand::cast(op)->HasFixedSlotPolicy()) {
+      int stack_index = -UnallocatedOperand::cast(op)->fixed_slot_index() - 1;
+      if (static_cast<size_t>(stack_index) >= buffer->pushed_nodes.size()) {
+        buffer->pushed_nodes.resize(stack_index + 1, NULL);
+      }
+      DCHECK_EQ(NULL, buffer->pushed_nodes[stack_index]);
+      buffer->pushed_nodes[stack_index] = *iter;
+      pushed_count++;
+    } else {
+      buffer->instruction_args.push_back(op);
+    }
+  }
+  CHECK_EQ(pushed_count, static_cast<int>(buffer->pushed_nodes.size()));
+  DCHECK(static_cast<size_t>(input_count) ==
+         (buffer->instruction_args.size() + buffer->pushed_nodes.size() -
+          buffer->frame_state_value_count()));
+}
+
+
+void InstructionSelector::VisitBlock(BasicBlock* block) {
+  DCHECK_EQ(NULL, current_block_);
+  current_block_ = block;
+  int current_block_end = static_cast<int>(instructions_.size());
+
+  // Generate code for the block control "top down", but schedule the code
+  // "bottom up".
+  VisitControl(block);
+  std::reverse(instructions_.begin() + current_block_end, instructions_.end());
+
+  // Visit code in reverse control flow order, because architecture-specific
+  // matching may cover more than one node at a time.
+  for (BasicBlock::reverse_iterator i = block->rbegin(); i != block->rend();
+       ++i) {
+    Node* node = *i;
+    // Skip nodes that are unused or already defined.
+    if (!IsUsed(node) || IsDefined(node)) continue;
+    // Generate code for this node "top down", but schedule the code "bottom
+    // up".
+    size_t current_node_end = instructions_.size();
+    VisitNode(node);
+    std::reverse(instructions_.begin() + current_node_end, instructions_.end());
+  }
+
+  // We're done with the block.
+  // TODO(bmeurer): We should not mutate the schedule.
+  block->code_end_ = current_block_end;
+  block->code_start_ = static_cast<int>(instructions_.size());
+
+  current_block_ = NULL;
+}
+
+
+static inline void CheckNoPhis(const BasicBlock* block) {
+#ifdef DEBUG
+  // Branch targets should not have phis.
+  for (BasicBlock::const_iterator i = block->begin(); i != block->end(); ++i) {
+    const Node* node = *i;
+    CHECK_NE(IrOpcode::kPhi, node->opcode());
+  }
+#endif
+}
+
+
+void InstructionSelector::VisitControl(BasicBlock* block) {
+  Node* input = block->control_input_;
+  switch (block->control_) {
+    case BasicBlockData::kGoto:
+      return VisitGoto(block->SuccessorAt(0));
+    case BasicBlockData::kBranch: {
+      DCHECK_EQ(IrOpcode::kBranch, input->opcode());
+      BasicBlock* tbranch = block->SuccessorAt(0);
+      BasicBlock* fbranch = block->SuccessorAt(1);
+      // SSA deconstruction requires targets of branches not to have phis.
+      // Edge split form guarantees this property, but is more strict.
+      CheckNoPhis(tbranch);
+      CheckNoPhis(fbranch);
+      if (tbranch == fbranch) return VisitGoto(tbranch);
+      return VisitBranch(input, tbranch, fbranch);
+    }
+    case BasicBlockData::kReturn: {
+      // If the result itself is a return, return its input.
+      Node* value = (input != NULL && input->opcode() == IrOpcode::kReturn)
+                        ? input->InputAt(0)
+                        : input;
+      return VisitReturn(value);
+    }
+    case BasicBlockData::kThrow:
+      return VisitThrow(input);
+    case BasicBlockData::kNone: {
+      // TODO(titzer): exit block doesn't have control.
+      DCHECK(input == NULL);
+      break;
+    }
+    default:
+      UNREACHABLE();
+      break;
+  }
+}
+
+
+void InstructionSelector::VisitNode(Node* node) {
+  DCHECK_NOT_NULL(schedule()->block(node));  // should only use scheduled nodes.
+  SourcePosition source_position = source_positions_->GetSourcePosition(node);
+  if (!source_position.IsUnknown()) {
+    DCHECK(!source_position.IsInvalid());
+    if (FLAG_turbo_source_positions || node->opcode() == IrOpcode::kCall) {
+      Emit(SourcePositionInstruction::New(instruction_zone(), source_position));
+    }
+  }
+  switch (node->opcode()) {
+    case IrOpcode::kStart:
+    case IrOpcode::kLoop:
+    case IrOpcode::kEnd:
+    case IrOpcode::kBranch:
+    case IrOpcode::kIfTrue:
+    case IrOpcode::kIfFalse:
+    case IrOpcode::kEffectPhi:
+    case IrOpcode::kMerge:
+      // No code needed for these graph artifacts.
+      return;
+    case IrOpcode::kFinish:
+      return MarkAsReference(node), VisitFinish(node);
+    case IrOpcode::kParameter: {
+      MachineType type = linkage()->GetParameterType(OpParameter<int>(node));
+      MarkAsRepresentation(type, node);
+      return VisitParameter(node);
+    }
+    case IrOpcode::kPhi: {
+      MachineType type = OpParameter<MachineType>(node);
+      MarkAsRepresentation(type, node);
+      return VisitPhi(node);
+    }
+    case IrOpcode::kProjection:
+      return VisitProjection(node);
+    case IrOpcode::kInt32Constant:
+    case IrOpcode::kInt64Constant:
+    case IrOpcode::kExternalConstant:
+      return VisitConstant(node);
+    case IrOpcode::kFloat64Constant:
+      return MarkAsDouble(node), VisitConstant(node);
+    case IrOpcode::kHeapConstant:
+    case IrOpcode::kNumberConstant:
+      // TODO(turbofan): only mark non-smis as references.
+      return MarkAsReference(node), VisitConstant(node);
+    case IrOpcode::kCall:
+      return VisitCall(node, NULL, NULL);
+    case IrOpcode::kFrameState:
+    case IrOpcode::kStateValues:
+      return;
+    case IrOpcode::kLoad: {
+      LoadRepresentation rep = OpParameter<LoadRepresentation>(node);
+      MarkAsRepresentation(rep, node);
+      return VisitLoad(node);
+    }
+    case IrOpcode::kStore:
+      return VisitStore(node);
+    case IrOpcode::kWord32And:
+      return VisitWord32And(node);
+    case IrOpcode::kWord32Or:
+      return VisitWord32Or(node);
+    case IrOpcode::kWord32Xor:
+      return VisitWord32Xor(node);
+    case IrOpcode::kWord32Shl:
+      return VisitWord32Shl(node);
+    case IrOpcode::kWord32Shr:
+      return VisitWord32Shr(node);
+    case IrOpcode::kWord32Sar:
+      return VisitWord32Sar(node);
+    case IrOpcode::kWord32Ror:
+      return VisitWord32Ror(node);
+    case IrOpcode::kWord32Equal:
+      return VisitWord32Equal(node);
+    case IrOpcode::kWord64And:
+      return VisitWord64And(node);
+    case IrOpcode::kWord64Or:
+      return VisitWord64Or(node);
+    case IrOpcode::kWord64Xor:
+      return VisitWord64Xor(node);
+    case IrOpcode::kWord64Shl:
+      return VisitWord64Shl(node);
+    case IrOpcode::kWord64Shr:
+      return VisitWord64Shr(node);
+    case IrOpcode::kWord64Sar:
+      return VisitWord64Sar(node);
+    case IrOpcode::kWord64Ror:
+      return VisitWord64Ror(node);
+    case IrOpcode::kWord64Equal:
+      return VisitWord64Equal(node);
+    case IrOpcode::kInt32Add:
+      return VisitInt32Add(node);
+    case IrOpcode::kInt32AddWithOverflow:
+      return VisitInt32AddWithOverflow(node);
+    case IrOpcode::kInt32Sub:
+      return VisitInt32Sub(node);
+    case IrOpcode::kInt32SubWithOverflow:
+      return VisitInt32SubWithOverflow(node);
+    case IrOpcode::kInt32Mul:
+      return VisitInt32Mul(node);
+    case IrOpcode::kInt32Div:
+      return VisitInt32Div(node);
+    case IrOpcode::kInt32UDiv:
+      return VisitInt32UDiv(node);
+    case IrOpcode::kInt32Mod:
+      return VisitInt32Mod(node);
+    case IrOpcode::kInt32UMod:
+      return VisitInt32UMod(node);
+    case IrOpcode::kInt32LessThan:
+      return VisitInt32LessThan(node);
+    case IrOpcode::kInt32LessThanOrEqual:
+      return VisitInt32LessThanOrEqual(node);
+    case IrOpcode::kUint32LessThan:
+      return VisitUint32LessThan(node);
+    case IrOpcode::kUint32LessThanOrEqual:
+      return VisitUint32LessThanOrEqual(node);
+    case IrOpcode::kInt64Add:
+      return VisitInt64Add(node);
+    case IrOpcode::kInt64Sub:
+      return VisitInt64Sub(node);
+    case IrOpcode::kInt64Mul:
+      return VisitInt64Mul(node);
+    case IrOpcode::kInt64Div:
+      return VisitInt64Div(node);
+    case IrOpcode::kInt64UDiv:
+      return VisitInt64UDiv(node);
+    case IrOpcode::kInt64Mod:
+      return VisitInt64Mod(node);
+    case IrOpcode::kInt64UMod:
+      return VisitInt64UMod(node);
+    case IrOpcode::kInt64LessThan:
+      return VisitInt64LessThan(node);
+    case IrOpcode::kInt64LessThanOrEqual:
+      return VisitInt64LessThanOrEqual(node);
+    case IrOpcode::kChangeInt32ToFloat64:
+      return MarkAsDouble(node), VisitChangeInt32ToFloat64(node);
+    case IrOpcode::kChangeUint32ToFloat64:
+      return MarkAsDouble(node), VisitChangeUint32ToFloat64(node);
+    case IrOpcode::kChangeFloat64ToInt32:
+      return VisitChangeFloat64ToInt32(node);
+    case IrOpcode::kChangeFloat64ToUint32:
+      return VisitChangeFloat64ToUint32(node);
+    case IrOpcode::kChangeInt32ToInt64:
+      return VisitChangeInt32ToInt64(node);
+    case IrOpcode::kChangeUint32ToUint64:
+      return VisitChangeUint32ToUint64(node);
+    case IrOpcode::kTruncateFloat64ToInt32:
+      return VisitTruncateFloat64ToInt32(node);
+    case IrOpcode::kTruncateInt64ToInt32:
+      return VisitTruncateInt64ToInt32(node);
+    case IrOpcode::kFloat64Add:
+      return MarkAsDouble(node), VisitFloat64Add(node);
+    case IrOpcode::kFloat64Sub:
+      return MarkAsDouble(node), VisitFloat64Sub(node);
+    case IrOpcode::kFloat64Mul:
+      return MarkAsDouble(node), VisitFloat64Mul(node);
+    case IrOpcode::kFloat64Div:
+      return MarkAsDouble(node), VisitFloat64Div(node);
+    case IrOpcode::kFloat64Mod:
+      return MarkAsDouble(node), VisitFloat64Mod(node);
+    case IrOpcode::kFloat64Sqrt:
+      return MarkAsDouble(node), VisitFloat64Sqrt(node);
+    case IrOpcode::kFloat64Equal:
+      return VisitFloat64Equal(node);
+    case IrOpcode::kFloat64LessThan:
+      return VisitFloat64LessThan(node);
+    case IrOpcode::kFloat64LessThanOrEqual:
+      return VisitFloat64LessThanOrEqual(node);
+    default:
+      V8_Fatal(__FILE__, __LINE__, "Unexpected operator #%d:%s @ node #%d",
+               node->opcode(), node->op()->mnemonic(), node->id());
+  }
+}
+
+
+#if V8_TURBOFAN_BACKEND
+
+void InstructionSelector::VisitWord32Equal(Node* node) {
+  FlagsContinuation cont(kEqual, node);
+  Int32BinopMatcher m(node);
+  if (m.right().Is(0)) {
+    return VisitWord32Test(m.left().node(), &cont);
+  }
+  VisitWord32Compare(node, &cont);
+}
+
+
+void InstructionSelector::VisitInt32LessThan(Node* node) {
+  FlagsContinuation cont(kSignedLessThan, node);
+  VisitWord32Compare(node, &cont);
+}
+
+
+void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
+  FlagsContinuation cont(kSignedLessThanOrEqual, node);
+  VisitWord32Compare(node, &cont);
+}
+
+
+void InstructionSelector::VisitUint32LessThan(Node* node) {
+  FlagsContinuation cont(kUnsignedLessThan, node);
+  VisitWord32Compare(node, &cont);
+}
+
+
+void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
+  FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+  VisitWord32Compare(node, &cont);
+}
+
+
+void InstructionSelector::VisitWord64Equal(Node* node) {
+  FlagsContinuation cont(kEqual, node);
+  Int64BinopMatcher m(node);
+  if (m.right().Is(0)) {
+    return VisitWord64Test(m.left().node(), &cont);
+  }
+  VisitWord64Compare(node, &cont);
+}
+
+
+void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
+  if (Node* ovf = node->FindProjection(1)) {
+    FlagsContinuation cont(kOverflow, ovf);
+    return VisitInt32AddWithOverflow(node, &cont);
+  }
+  FlagsContinuation cont;
+  VisitInt32AddWithOverflow(node, &cont);
+}
+
+
+void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
+  if (Node* ovf = node->FindProjection(1)) {
+    FlagsContinuation cont(kOverflow, ovf);
+    return VisitInt32SubWithOverflow(node, &cont);
+  }
+  FlagsContinuation cont;
+  VisitInt32SubWithOverflow(node, &cont);
+}
+
+
+void InstructionSelector::VisitInt64LessThan(Node* node) {
+  FlagsContinuation cont(kSignedLessThan, node);
+  VisitWord64Compare(node, &cont);
+}
+
+
+void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
+  FlagsContinuation cont(kSignedLessThanOrEqual, node);
+  VisitWord64Compare(node, &cont);
+}
+
+
+void InstructionSelector::VisitTruncateFloat64ToInt32(Node* node) {
+  OperandGenerator g(this);
+  Emit(kArchTruncateDoubleToI, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64Equal(Node* node) {
+  FlagsContinuation cont(kUnorderedEqual, node);
+  VisitFloat64Compare(node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat64LessThan(Node* node) {
+  FlagsContinuation cont(kUnorderedLessThan, node);
+  VisitFloat64Compare(node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
+  FlagsContinuation cont(kUnorderedLessThanOrEqual, node);
+  VisitFloat64Compare(node, &cont);
+}
+
+#endif  // V8_TURBOFAN_BACKEND
+
+// 32 bit targets do not implement the following instructions.
+#if V8_TARGET_ARCH_32_BIT && V8_TURBOFAN_BACKEND
+
+void InstructionSelector::VisitWord64And(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitWord64Or(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitWord64Xor(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitWord64Shl(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitWord64Shr(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitWord64Sar(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitWord64Ror(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitInt64Add(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitInt64Sub(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitInt64Mul(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitInt64Div(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitInt64UDiv(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitInt64Mod(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitInt64UMod(Node* node) { UNIMPLEMENTED(); }
+
+
+void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
+  UNIMPLEMENTED();
+}
+
+
+void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
+  UNIMPLEMENTED();
+}
+
+
+void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
+  UNIMPLEMENTED();
+}
+
+#endif  // V8_TARGET_ARCH_32_BIT && V8_TURBOFAN_BACKEND
+
+
+// 32-bit targets and unsupported architectures need dummy implementations of
+// selected 64-bit ops.
+#if V8_TARGET_ARCH_32_BIT || !V8_TURBOFAN_BACKEND
+
+void InstructionSelector::VisitWord64Test(Node* node, FlagsContinuation* cont) {
+  UNIMPLEMENTED();
+}
+
+
+void InstructionSelector::VisitWord64Compare(Node* node,
+                                             FlagsContinuation* cont) {
+  UNIMPLEMENTED();
+}
+
+#endif  // V8_TARGET_ARCH_32_BIT || !V8_TURBOFAN_BACKEND
+
+
+void InstructionSelector::VisitFinish(Node* node) {
+  OperandGenerator g(this);
+  Node* value = node->InputAt(0);
+  Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
+}
+
+
+void InstructionSelector::VisitParameter(Node* node) {
+  OperandGenerator g(this);
+  int index = OpParameter<int>(node);
+  Emit(kArchNop,
+       g.DefineAsLocation(node, linkage()->GetParameterLocation(index),
+                          linkage()->GetParameterType(index)));
+}
+
+
+void InstructionSelector::VisitPhi(Node* node) {
+  // TODO(bmeurer): Emit a PhiInstruction here.
+  for (InputIter i = node->inputs().begin(); i != node->inputs().end(); ++i) {
+    MarkAsUsed(*i);
+  }
+}
+
+
+void InstructionSelector::VisitProjection(Node* node) {
+  OperandGenerator g(this);
+  Node* value = node->InputAt(0);
+  switch (value->opcode()) {
+    case IrOpcode::kInt32AddWithOverflow:
+    case IrOpcode::kInt32SubWithOverflow:
+      if (OpParameter<size_t>(node) == 0) {
+        Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
+      } else {
+        DCHECK(OpParameter<size_t>(node) == 1u);
+        MarkAsUsed(value);
+      }
+      break;
+    default:
+      break;
+  }
+}
+
+
+void InstructionSelector::VisitConstant(Node* node) {
+  // We must emit a NOP here because every live range needs a defining
+  // instruction in the register allocator.
+  OperandGenerator g(this);
+  Emit(kArchNop, g.DefineAsConstant(node));
+}
+
+
+void InstructionSelector::VisitGoto(BasicBlock* target) {
+  if (IsNextInAssemblyOrder(target)) {
+    // fall through to the next block.
+    Emit(kArchNop, NULL)->MarkAsControl();
+  } else {
+    // jump to the next block.
+    OperandGenerator g(this);
+    Emit(kArchJmp, NULL, g.Label(target))->MarkAsControl();
+  }
+}
+
+
+void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
+                                      BasicBlock* fbranch) {
+  OperandGenerator g(this);
+  Node* user = branch;
+  Node* value = branch->InputAt(0);
+
+  FlagsContinuation cont(kNotEqual, tbranch, fbranch);
+
+  // If we can fall through to the true block, invert the branch.
+  if (IsNextInAssemblyOrder(tbranch)) {
+    cont.Negate();
+    cont.SwapBlocks();
+  }
+
+  // Try to combine with comparisons against 0 by simply inverting the branch.
+  while (CanCover(user, value)) {
+    if (value->opcode() == IrOpcode::kWord32Equal) {
+      Int32BinopMatcher m(value);
+      if (m.right().Is(0)) {
+        user = value;
+        value = m.left().node();
+        cont.Negate();
+      } else {
+        break;
+      }
+    } else if (value->opcode() == IrOpcode::kWord64Equal) {
+      Int64BinopMatcher m(value);
+      if (m.right().Is(0)) {
+        user = value;
+        value = m.left().node();
+        cont.Negate();
+      } else {
+        break;
+      }
+    } else {
+      break;
+    }
+  }
+
+  // Try to combine the branch with a comparison.
+  if (CanCover(user, value)) {
+    switch (value->opcode()) {
+      case IrOpcode::kWord32Equal:
+        cont.OverwriteAndNegateIfEqual(kEqual);
+        return VisitWord32Compare(value, &cont);
+      case IrOpcode::kInt32LessThan:
+        cont.OverwriteAndNegateIfEqual(kSignedLessThan);
+        return VisitWord32Compare(value, &cont);
+      case IrOpcode::kInt32LessThanOrEqual:
+        cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+        return VisitWord32Compare(value, &cont);
+      case IrOpcode::kUint32LessThan:
+        cont.OverwriteAndNegateIfEqual(kUnsignedLessThan);
+        return VisitWord32Compare(value, &cont);
+      case IrOpcode::kUint32LessThanOrEqual:
+        cont.OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+        return VisitWord32Compare(value, &cont);
+      case IrOpcode::kWord64Equal:
+        cont.OverwriteAndNegateIfEqual(kEqual);
+        return VisitWord64Compare(value, &cont);
+      case IrOpcode::kInt64LessThan:
+        cont.OverwriteAndNegateIfEqual(kSignedLessThan);
+        return VisitWord64Compare(value, &cont);
+      case IrOpcode::kInt64LessThanOrEqual:
+        cont.OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+        return VisitWord64Compare(value, &cont);
+      case IrOpcode::kFloat64Equal:
+        cont.OverwriteAndNegateIfEqual(kUnorderedEqual);
+        return VisitFloat64Compare(value, &cont);
+      case IrOpcode::kFloat64LessThan:
+        cont.OverwriteAndNegateIfEqual(kUnorderedLessThan);
+        return VisitFloat64Compare(value, &cont);
+      case IrOpcode::kFloat64LessThanOrEqual:
+        cont.OverwriteAndNegateIfEqual(kUnorderedLessThanOrEqual);
+        return VisitFloat64Compare(value, &cont);
+      case IrOpcode::kProjection:
+        // Check if this is the overflow output projection of an
+        // <Operation>WithOverflow node.
+        if (OpParameter<size_t>(value) == 1u) {
+          // We cannot combine the <Operation>WithOverflow with this branch
+          // unless the 0th projection (the use of the actual value of the
+          // <Operation> is either NULL, which means there's no use of the
+          // actual value, or was already defined, which means it is scheduled
+          // *AFTER* this branch).
+          Node* node = value->InputAt(0);
+          Node* result = node->FindProjection(0);
+          if (result == NULL || IsDefined(result)) {
+            switch (node->opcode()) {
+              case IrOpcode::kInt32AddWithOverflow:
+                cont.OverwriteAndNegateIfEqual(kOverflow);
+                return VisitInt32AddWithOverflow(node, &cont);
+              case IrOpcode::kInt32SubWithOverflow:
+                cont.OverwriteAndNegateIfEqual(kOverflow);
+                return VisitInt32SubWithOverflow(node, &cont);
+              default:
+                break;
+            }
+          }
+        }
+        break;
+      default:
+        break;
+    }
+  }
+
+  // Branch could not be combined with a compare, emit compare against 0.
+  VisitWord32Test(value, &cont);
+}
+
+
+void InstructionSelector::VisitReturn(Node* value) {
+  OperandGenerator g(this);
+  if (value != NULL) {
+    Emit(kArchRet, NULL, g.UseLocation(value, linkage()->GetReturnLocation(),
+                                       linkage()->GetReturnType()));
+  } else {
+    Emit(kArchRet, NULL);
+  }
+}
+
+
+void InstructionSelector::VisitThrow(Node* value) {
+  UNIMPLEMENTED();  // TODO(titzer)
+}
+
+
+FrameStateDescriptor* InstructionSelector::GetFrameStateDescriptor(
+    Node* state) {
+  DCHECK(state->opcode() == IrOpcode::kFrameState);
+  DCHECK_EQ(5, state->InputCount());
+  FrameStateCallInfo state_info = OpParameter<FrameStateCallInfo>(state);
+  int parameters = OpParameter<int>(state->InputAt(0));
+  int locals = OpParameter<int>(state->InputAt(1));
+  int stack = OpParameter<int>(state->InputAt(2));
+
+  FrameStateDescriptor* outer_state = NULL;
+  Node* outer_node = state->InputAt(4);
+  if (outer_node->opcode() == IrOpcode::kFrameState) {
+    outer_state = GetFrameStateDescriptor(outer_node);
+  }
+
+  return new (instruction_zone())
+      FrameStateDescriptor(state_info, parameters, locals, stack, outer_state);
+}
+
+
+static InstructionOperand* UseOrImmediate(OperandGenerator* g, Node* input) {
+  switch (input->opcode()) {
+    case IrOpcode::kInt32Constant:
+    case IrOpcode::kNumberConstant:
+    case IrOpcode::kFloat64Constant:
+    case IrOpcode::kHeapConstant:
+      return g->UseImmediate(input);
+    default:
+      return g->UseUnique(input);
+  }
+}
+
+
+void InstructionSelector::AddFrameStateInputs(
+    Node* state, InstructionOperandVector* inputs,
+    FrameStateDescriptor* descriptor) {
+  DCHECK_EQ(IrOpcode::kFrameState, state->op()->opcode());
+
+  if (descriptor->outer_state() != NULL) {
+    AddFrameStateInputs(state->InputAt(4), inputs, descriptor->outer_state());
+  }
+
+  Node* parameters = state->InputAt(0);
+  Node* locals = state->InputAt(1);
+  Node* stack = state->InputAt(2);
+  Node* context = state->InputAt(3);
+
+  DCHECK_EQ(IrOpcode::kStateValues, parameters->op()->opcode());
+  DCHECK_EQ(IrOpcode::kStateValues, locals->op()->opcode());
+  DCHECK_EQ(IrOpcode::kStateValues, stack->op()->opcode());
+
+  DCHECK_EQ(descriptor->parameters_count(), parameters->InputCount());
+  DCHECK_EQ(descriptor->locals_count(), locals->InputCount());
+  DCHECK_EQ(descriptor->stack_count(), stack->InputCount());
+
+  OperandGenerator g(this);
+  for (int i = 0; i < static_cast<int>(descriptor->parameters_count()); i++) {
+    inputs->push_back(UseOrImmediate(&g, parameters->InputAt(i)));
+  }
+  if (descriptor->HasContext()) {
+    inputs->push_back(UseOrImmediate(&g, context));
+  }
+  for (int i = 0; i < static_cast<int>(descriptor->locals_count()); i++) {
+    inputs->push_back(UseOrImmediate(&g, locals->InputAt(i)));
+  }
+  for (int i = 0; i < static_cast<int>(descriptor->stack_count()); i++) {
+    inputs->push_back(UseOrImmediate(&g, stack->InputAt(i)));
+  }
+}
+
+
+#if !V8_TURBOFAN_BACKEND
+
+#define DECLARE_UNIMPLEMENTED_SELECTOR(x) \
+  void InstructionSelector::Visit##x(Node* node) { UNIMPLEMENTED(); }
+MACHINE_OP_LIST(DECLARE_UNIMPLEMENTED_SELECTOR)
+#undef DECLARE_UNIMPLEMENTED_SELECTOR
+
+
+void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
+                                                    FlagsContinuation* cont) {
+  UNIMPLEMENTED();
+}
+
+
+void InstructionSelector::VisitInt32SubWithOverflow(Node* node,
+                                                    FlagsContinuation* cont) {
+  UNIMPLEMENTED();
+}
+
+
+void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation* cont) {
+  UNIMPLEMENTED();
+}
+
+
+void InstructionSelector::VisitWord32Compare(Node* node,
+                                             FlagsContinuation* cont) {
+  UNIMPLEMENTED();
+}
+
+
+void InstructionSelector::VisitFloat64Compare(Node* node,
+                                              FlagsContinuation* cont) {
+  UNIMPLEMENTED();
+}
+
+
+void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
+                                    BasicBlock* deoptimization) {}
+
+#endif  // !V8_TURBOFAN_BACKEND
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/instruction-selector.h b/src/compiler/instruction-selector.h
new file mode 100644
index 0000000..a86e156
--- /dev/null
+++ b/src/compiler/instruction-selector.h
@@ -0,0 +1,213 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_INSTRUCTION_SELECTOR_H_
+#define V8_COMPILER_INSTRUCTION_SELECTOR_H_
+
+#include <deque>
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/instruction.h"
+#include "src/compiler/machine-operator.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Forward declarations.
+struct CallBuffer;  // TODO(bmeurer): Remove this.
+class FlagsContinuation;
+
+class InstructionSelector FINAL {
+ public:
+  // Forward declarations.
+  class Features;
+
+  InstructionSelector(InstructionSequence* sequence,
+                      SourcePositionTable* source_positions,
+                      Features features = SupportedFeatures());
+
+  // Visit code for the entire graph with the included schedule.
+  void SelectInstructions();
+
+  // ===========================================================================
+  // ============= Architecture-independent code emission methods. =============
+  // ===========================================================================
+
+  Instruction* Emit(InstructionCode opcode, InstructionOperand* output,
+                    size_t temp_count = 0, InstructionOperand* *temps = NULL);
+  Instruction* Emit(InstructionCode opcode, InstructionOperand* output,
+                    InstructionOperand* a, size_t temp_count = 0,
+                    InstructionOperand* *temps = NULL);
+  Instruction* Emit(InstructionCode opcode, InstructionOperand* output,
+                    InstructionOperand* a, InstructionOperand* b,
+                    size_t temp_count = 0, InstructionOperand* *temps = NULL);
+  Instruction* Emit(InstructionCode opcode, InstructionOperand* output,
+                    InstructionOperand* a, InstructionOperand* b,
+                    InstructionOperand* c, size_t temp_count = 0,
+                    InstructionOperand* *temps = NULL);
+  Instruction* Emit(InstructionCode opcode, InstructionOperand* output,
+                    InstructionOperand* a, InstructionOperand* b,
+                    InstructionOperand* c, InstructionOperand* d,
+                    size_t temp_count = 0, InstructionOperand* *temps = NULL);
+  Instruction* Emit(InstructionCode opcode, size_t output_count,
+                    InstructionOperand** outputs, size_t input_count,
+                    InstructionOperand** inputs, size_t temp_count = 0,
+                    InstructionOperand* *temps = NULL);
+  Instruction* Emit(Instruction* instr);
+
+  // ===========================================================================
+  // ============== Architecture-independent CPU feature methods. ==============
+  // ===========================================================================
+
+  class Features FINAL {
+   public:
+    Features() : bits_(0) {}
+    explicit Features(unsigned bits) : bits_(bits) {}
+    explicit Features(CpuFeature f) : bits_(1u << f) {}
+    Features(CpuFeature f1, CpuFeature f2) : bits_((1u << f1) | (1u << f2)) {}
+
+    bool Contains(CpuFeature f) const { return (bits_ & (1u << f)); }
+
+   private:
+    unsigned bits_;
+  };
+
+  bool IsSupported(CpuFeature feature) const {
+    return features_.Contains(feature);
+  }
+
+  // Returns the features supported on the target platform.
+  static Features SupportedFeatures() {
+    return Features(CpuFeatures::SupportedFeatures());
+  }
+
+ private:
+  friend class OperandGenerator;
+
+  // ===========================================================================
+  // ============ Architecture-independent graph covering methods. =============
+  // ===========================================================================
+
+  // Checks if {block} will appear directly after {current_block_} when
+  // assembling code, in which case, a fall-through can be used.
+  bool IsNextInAssemblyOrder(const BasicBlock* block) const;
+
+  // Used in pattern matching during code generation.
+  // Check if {node} can be covered while generating code for the current
+  // instruction. A node can be covered if the {user} of the node has the only
+  // edge and the two are in the same basic block.
+  bool CanCover(Node* user, Node* node) const;
+
+  // Checks if {node} was already defined, and therefore code was already
+  // generated for it.
+  bool IsDefined(Node* node) const;
+
+  // Inform the instruction selection that {node} was just defined.
+  void MarkAsDefined(Node* node);
+
+  // Checks if {node} has any uses, and therefore code has to be generated for
+  // it.
+  bool IsUsed(Node* node) const;
+
+  // Inform the instruction selection that {node} has at least one use and we
+  // will need to generate code for it.
+  void MarkAsUsed(Node* node);
+
+  // Checks if {node} is marked as double.
+  bool IsDouble(const Node* node) const;
+
+  // Inform the register allocator of a double result.
+  void MarkAsDouble(Node* node);
+
+  // Checks if {node} is marked as reference.
+  bool IsReference(const Node* node) const;
+
+  // Inform the register allocator of a reference result.
+  void MarkAsReference(Node* node);
+
+  // Inform the register allocation of the representation of the value produced
+  // by {node}.
+  void MarkAsRepresentation(MachineType rep, Node* node);
+
+  // Initialize the call buffer with the InstructionOperands, nodes, etc,
+  // corresponding
+  // to the inputs and outputs of the call.
+  // {call_code_immediate} to generate immediate operands to calls of code.
+  // {call_address_immediate} to generate immediate operands to address calls.
+  void InitializeCallBuffer(Node* call, CallBuffer* buffer,
+                            bool call_code_immediate,
+                            bool call_address_immediate);
+
+  FrameStateDescriptor* GetFrameStateDescriptor(Node* node);
+  void AddFrameStateInputs(Node* state, InstructionOperandVector* inputs,
+                           FrameStateDescriptor* descriptor);
+
+  // ===========================================================================
+  // ============= Architecture-specific graph covering methods. ===============
+  // ===========================================================================
+
+  // Visit nodes in the given block and generate code.
+  void VisitBlock(BasicBlock* block);
+
+  // Visit the node for the control flow at the end of the block, generating
+  // code if necessary.
+  void VisitControl(BasicBlock* block);
+
+  // Visit the node and generate code, if any.
+  void VisitNode(Node* node);
+
+#define DECLARE_GENERATOR(x) void Visit##x(Node* node);
+  MACHINE_OP_LIST(DECLARE_GENERATOR)
+#undef DECLARE_GENERATOR
+
+  void VisitInt32AddWithOverflow(Node* node, FlagsContinuation* cont);
+  void VisitInt32SubWithOverflow(Node* node, FlagsContinuation* cont);
+
+  void VisitWord32Test(Node* node, FlagsContinuation* cont);
+  void VisitWord64Test(Node* node, FlagsContinuation* cont);
+  void VisitWord32Compare(Node* node, FlagsContinuation* cont);
+  void VisitWord64Compare(Node* node, FlagsContinuation* cont);
+  void VisitFloat64Compare(Node* node, FlagsContinuation* cont);
+
+  void VisitFinish(Node* node);
+  void VisitParameter(Node* node);
+  void VisitPhi(Node* node);
+  void VisitProjection(Node* node);
+  void VisitConstant(Node* node);
+  void VisitCall(Node* call, BasicBlock* continuation,
+                 BasicBlock* deoptimization);
+  void VisitGoto(BasicBlock* target);
+  void VisitBranch(Node* input, BasicBlock* tbranch, BasicBlock* fbranch);
+  void VisitReturn(Node* value);
+  void VisitThrow(Node* value);
+  void VisitDeoptimize(Node* deopt);
+
+  // ===========================================================================
+
+  Graph* graph() const { return sequence()->graph(); }
+  Linkage* linkage() const { return sequence()->linkage(); }
+  Schedule* schedule() const { return sequence()->schedule(); }
+  InstructionSequence* sequence() const { return sequence_; }
+  Zone* instruction_zone() const { return sequence()->zone(); }
+  Zone* zone() { return &zone_; }
+
+  // ===========================================================================
+
+  Zone zone_;
+  InstructionSequence* sequence_;
+  SourcePositionTable* source_positions_;
+  Features features_;
+  BasicBlock* current_block_;
+  ZoneDeque<Instruction*> instructions_;
+  BoolVector defined_;
+  BoolVector used_;
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_INSTRUCTION_SELECTOR_H_
diff --git a/src/compiler/instruction.cc b/src/compiler/instruction.cc
new file mode 100644
index 0000000..9ab81b6
--- /dev/null
+++ b/src/compiler/instruction.cc
@@ -0,0 +1,484 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction.h"
+
+#include "src/compiler/common-operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+OStream& operator<<(OStream& os, const InstructionOperand& op) {
+  switch (op.kind()) {
+    case InstructionOperand::INVALID:
+      return os << "(0)";
+    case InstructionOperand::UNALLOCATED: {
+      const UnallocatedOperand* unalloc = UnallocatedOperand::cast(&op);
+      os << "v" << unalloc->virtual_register();
+      if (unalloc->basic_policy() == UnallocatedOperand::FIXED_SLOT) {
+        return os << "(=" << unalloc->fixed_slot_index() << "S)";
+      }
+      switch (unalloc->extended_policy()) {
+        case UnallocatedOperand::NONE:
+          return os;
+        case UnallocatedOperand::FIXED_REGISTER:
+          return os << "(=" << Register::AllocationIndexToString(
+                                   unalloc->fixed_register_index()) << ")";
+        case UnallocatedOperand::FIXED_DOUBLE_REGISTER:
+          return os << "(=" << DoubleRegister::AllocationIndexToString(
+                                   unalloc->fixed_register_index()) << ")";
+        case UnallocatedOperand::MUST_HAVE_REGISTER:
+          return os << "(R)";
+        case UnallocatedOperand::SAME_AS_FIRST_INPUT:
+          return os << "(1)";
+        case UnallocatedOperand::ANY:
+          return os << "(-)";
+      }
+    }
+    case InstructionOperand::CONSTANT:
+      return os << "[constant:" << op.index() << "]";
+    case InstructionOperand::IMMEDIATE:
+      return os << "[immediate:" << op.index() << "]";
+    case InstructionOperand::STACK_SLOT:
+      return os << "[stack:" << op.index() << "]";
+    case InstructionOperand::DOUBLE_STACK_SLOT:
+      return os << "[double_stack:" << op.index() << "]";
+    case InstructionOperand::REGISTER:
+      return os << "[" << Register::AllocationIndexToString(op.index())
+                << "|R]";
+    case InstructionOperand::DOUBLE_REGISTER:
+      return os << "[" << DoubleRegister::AllocationIndexToString(op.index())
+                << "|R]";
+  }
+  UNREACHABLE();
+  return os;
+}
+
+
+template <InstructionOperand::Kind kOperandKind, int kNumCachedOperands>
+SubKindOperand<kOperandKind, kNumCachedOperands>*
+    SubKindOperand<kOperandKind, kNumCachedOperands>::cache = NULL;
+
+
+template <InstructionOperand::Kind kOperandKind, int kNumCachedOperands>
+void SubKindOperand<kOperandKind, kNumCachedOperands>::SetUpCache() {
+  if (cache) return;
+  cache = new SubKindOperand[kNumCachedOperands];
+  for (int i = 0; i < kNumCachedOperands; i++) {
+    cache[i].ConvertTo(kOperandKind, i);
+  }
+}
+
+
+template <InstructionOperand::Kind kOperandKind, int kNumCachedOperands>
+void SubKindOperand<kOperandKind, kNumCachedOperands>::TearDownCache() {
+  delete[] cache;
+  cache = NULL;
+}
+
+
+void InstructionOperand::SetUpCaches() {
+#define INSTRUCTION_OPERAND_SETUP(name, type, number) \
+  name##Operand::SetUpCache();
+  INSTRUCTION_OPERAND_LIST(INSTRUCTION_OPERAND_SETUP)
+#undef INSTRUCTION_OPERAND_SETUP
+}
+
+
+void InstructionOperand::TearDownCaches() {
+#define INSTRUCTION_OPERAND_TEARDOWN(name, type, number) \
+  name##Operand::TearDownCache();
+  INSTRUCTION_OPERAND_LIST(INSTRUCTION_OPERAND_TEARDOWN)
+#undef INSTRUCTION_OPERAND_TEARDOWN
+}
+
+
+OStream& operator<<(OStream& os, const MoveOperands& mo) {
+  os << *mo.destination();
+  if (!mo.source()->Equals(mo.destination())) os << " = " << *mo.source();
+  return os << ";";
+}
+
+
+bool ParallelMove::IsRedundant() const {
+  for (int i = 0; i < move_operands_.length(); ++i) {
+    if (!move_operands_[i].IsRedundant()) return false;
+  }
+  return true;
+}
+
+
+OStream& operator<<(OStream& os, const ParallelMove& pm) {
+  bool first = true;
+  for (ZoneList<MoveOperands>::iterator move = pm.move_operands()->begin();
+       move != pm.move_operands()->end(); ++move) {
+    if (move->IsEliminated()) continue;
+    if (!first) os << " ";
+    first = false;
+    os << *move;
+  }
+  return os;
+}
+
+
+void PointerMap::RecordPointer(InstructionOperand* op, Zone* zone) {
+  // Do not record arguments as pointers.
+  if (op->IsStackSlot() && op->index() < 0) return;
+  DCHECK(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
+  pointer_operands_.Add(op, zone);
+}
+
+
+void PointerMap::RemovePointer(InstructionOperand* op) {
+  // Do not record arguments as pointers.
+  if (op->IsStackSlot() && op->index() < 0) return;
+  DCHECK(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
+  for (int i = 0; i < pointer_operands_.length(); ++i) {
+    if (pointer_operands_[i]->Equals(op)) {
+      pointer_operands_.Remove(i);
+      --i;
+    }
+  }
+}
+
+
+void PointerMap::RecordUntagged(InstructionOperand* op, Zone* zone) {
+  // Do not record arguments as pointers.
+  if (op->IsStackSlot() && op->index() < 0) return;
+  DCHECK(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
+  untagged_operands_.Add(op, zone);
+}
+
+
+OStream& operator<<(OStream& os, const PointerMap& pm) {
+  os << "{";
+  for (ZoneList<InstructionOperand*>::iterator op =
+           pm.pointer_operands_.begin();
+       op != pm.pointer_operands_.end(); ++op) {
+    if (op != pm.pointer_operands_.begin()) os << ";";
+    os << *op;
+  }
+  return os << "}";
+}
+
+
+OStream& operator<<(OStream& os, const ArchOpcode& ao) {
+  switch (ao) {
+#define CASE(Name) \
+  case k##Name:    \
+    return os << #Name;
+    ARCH_OPCODE_LIST(CASE)
+#undef CASE
+  }
+  UNREACHABLE();
+  return os;
+}
+
+
+OStream& operator<<(OStream& os, const AddressingMode& am) {
+  switch (am) {
+    case kMode_None:
+      return os;
+#define CASE(Name)   \
+  case kMode_##Name: \
+    return os << #Name;
+      TARGET_ADDRESSING_MODE_LIST(CASE)
+#undef CASE
+  }
+  UNREACHABLE();
+  return os;
+}
+
+
+OStream& operator<<(OStream& os, const FlagsMode& fm) {
+  switch (fm) {
+    case kFlags_none:
+      return os;
+    case kFlags_branch:
+      return os << "branch";
+    case kFlags_set:
+      return os << "set";
+  }
+  UNREACHABLE();
+  return os;
+}
+
+
+OStream& operator<<(OStream& os, const FlagsCondition& fc) {
+  switch (fc) {
+    case kEqual:
+      return os << "equal";
+    case kNotEqual:
+      return os << "not equal";
+    case kSignedLessThan:
+      return os << "signed less than";
+    case kSignedGreaterThanOrEqual:
+      return os << "signed greater than or equal";
+    case kSignedLessThanOrEqual:
+      return os << "signed less than or equal";
+    case kSignedGreaterThan:
+      return os << "signed greater than";
+    case kUnsignedLessThan:
+      return os << "unsigned less than";
+    case kUnsignedGreaterThanOrEqual:
+      return os << "unsigned greater than or equal";
+    case kUnsignedLessThanOrEqual:
+      return os << "unsigned less than or equal";
+    case kUnsignedGreaterThan:
+      return os << "unsigned greater than";
+    case kUnorderedEqual:
+      return os << "unordered equal";
+    case kUnorderedNotEqual:
+      return os << "unordered not equal";
+    case kUnorderedLessThan:
+      return os << "unordered less than";
+    case kUnorderedGreaterThanOrEqual:
+      return os << "unordered greater than or equal";
+    case kUnorderedLessThanOrEqual:
+      return os << "unordered less than or equal";
+    case kUnorderedGreaterThan:
+      return os << "unordered greater than";
+    case kOverflow:
+      return os << "overflow";
+    case kNotOverflow:
+      return os << "not overflow";
+  }
+  UNREACHABLE();
+  return os;
+}
+
+
+OStream& operator<<(OStream& os, const Instruction& instr) {
+  if (instr.OutputCount() > 1) os << "(";
+  for (size_t i = 0; i < instr.OutputCount(); i++) {
+    if (i > 0) os << ", ";
+    os << *instr.OutputAt(i);
+  }
+
+  if (instr.OutputCount() > 1) os << ") = ";
+  if (instr.OutputCount() == 1) os << " = ";
+
+  if (instr.IsGapMoves()) {
+    const GapInstruction* gap = GapInstruction::cast(&instr);
+    os << (instr.IsBlockStart() ? " block-start" : "gap ");
+    for (int i = GapInstruction::FIRST_INNER_POSITION;
+         i <= GapInstruction::LAST_INNER_POSITION; i++) {
+      os << "(";
+      if (gap->parallel_moves_[i] != NULL) os << *gap->parallel_moves_[i];
+      os << ") ";
+    }
+  } else if (instr.IsSourcePosition()) {
+    const SourcePositionInstruction* pos =
+        SourcePositionInstruction::cast(&instr);
+    os << "position (" << pos->source_position().raw() << ")";
+  } else {
+    os << ArchOpcodeField::decode(instr.opcode());
+    AddressingMode am = AddressingModeField::decode(instr.opcode());
+    if (am != kMode_None) {
+      os << " : " << AddressingModeField::decode(instr.opcode());
+    }
+    FlagsMode fm = FlagsModeField::decode(instr.opcode());
+    if (fm != kFlags_none) {
+      os << " && " << fm << " if "
+         << FlagsConditionField::decode(instr.opcode());
+    }
+  }
+  if (instr.InputCount() > 0) {
+    for (size_t i = 0; i < instr.InputCount(); i++) {
+      os << " " << *instr.InputAt(i);
+    }
+  }
+  return os << "\n";
+}
+
+
+OStream& operator<<(OStream& os, const Constant& constant) {
+  switch (constant.type()) {
+    case Constant::kInt32:
+      return os << constant.ToInt32();
+    case Constant::kInt64:
+      return os << constant.ToInt64() << "l";
+    case Constant::kFloat64:
+      return os << constant.ToFloat64();
+    case Constant::kExternalReference:
+      return os << constant.ToExternalReference().address();
+    case Constant::kHeapObject:
+      return os << Brief(*constant.ToHeapObject());
+  }
+  UNREACHABLE();
+  return os;
+}
+
+
+Label* InstructionSequence::GetLabel(BasicBlock* block) {
+  return GetBlockStart(block)->label();
+}
+
+
+BlockStartInstruction* InstructionSequence::GetBlockStart(BasicBlock* block) {
+  return BlockStartInstruction::cast(InstructionAt(block->code_start_));
+}
+
+
+void InstructionSequence::StartBlock(BasicBlock* block) {
+  block->code_start_ = static_cast<int>(instructions_.size());
+  BlockStartInstruction* block_start =
+      BlockStartInstruction::New(zone(), block);
+  AddInstruction(block_start, block);
+}
+
+
+void InstructionSequence::EndBlock(BasicBlock* block) {
+  int end = static_cast<int>(instructions_.size());
+  DCHECK(block->code_start_ >= 0 && block->code_start_ < end);
+  block->code_end_ = end;
+}
+
+
+int InstructionSequence::AddInstruction(Instruction* instr, BasicBlock* block) {
+  // TODO(titzer): the order of these gaps is a holdover from Lithium.
+  GapInstruction* gap = GapInstruction::New(zone());
+  if (instr->IsControl()) instructions_.push_back(gap);
+  int index = static_cast<int>(instructions_.size());
+  instructions_.push_back(instr);
+  if (!instr->IsControl()) instructions_.push_back(gap);
+  if (instr->NeedsPointerMap()) {
+    DCHECK(instr->pointer_map() == NULL);
+    PointerMap* pointer_map = new (zone()) PointerMap(zone());
+    pointer_map->set_instruction_position(index);
+    instr->set_pointer_map(pointer_map);
+    pointer_maps_.push_back(pointer_map);
+  }
+  return index;
+}
+
+
+BasicBlock* InstructionSequence::GetBasicBlock(int instruction_index) {
+  // TODO(turbofan): Optimize this.
+  for (;;) {
+    DCHECK_LE(0, instruction_index);
+    Instruction* instruction = InstructionAt(instruction_index--);
+    if (instruction->IsBlockStart()) {
+      return BlockStartInstruction::cast(instruction)->block();
+    }
+  }
+}
+
+
+bool InstructionSequence::IsReference(int virtual_register) const {
+  return references_.find(virtual_register) != references_.end();
+}
+
+
+bool InstructionSequence::IsDouble(int virtual_register) const {
+  return doubles_.find(virtual_register) != doubles_.end();
+}
+
+
+void InstructionSequence::MarkAsReference(int virtual_register) {
+  references_.insert(virtual_register);
+}
+
+
+void InstructionSequence::MarkAsDouble(int virtual_register) {
+  doubles_.insert(virtual_register);
+}
+
+
+void InstructionSequence::AddGapMove(int index, InstructionOperand* from,
+                                     InstructionOperand* to) {
+  GapAt(index)->GetOrCreateParallelMove(GapInstruction::START, zone())->AddMove(
+      from, to, zone());
+}
+
+
+InstructionSequence::StateId InstructionSequence::AddFrameStateDescriptor(
+    FrameStateDescriptor* descriptor) {
+  int deoptimization_id = static_cast<int>(deoptimization_entries_.size());
+  deoptimization_entries_.push_back(descriptor);
+  return StateId::FromInt(deoptimization_id);
+}
+
+FrameStateDescriptor* InstructionSequence::GetFrameStateDescriptor(
+    InstructionSequence::StateId state_id) {
+  return deoptimization_entries_[state_id.ToInt()];
+}
+
+
+int InstructionSequence::GetFrameStateDescriptorCount() {
+  return static_cast<int>(deoptimization_entries_.size());
+}
+
+
+OStream& operator<<(OStream& os, const InstructionSequence& code) {
+  for (size_t i = 0; i < code.immediates_.size(); ++i) {
+    Constant constant = code.immediates_[i];
+    os << "IMM#" << i << ": " << constant << "\n";
+  }
+  int i = 0;
+  for (ConstantMap::const_iterator it = code.constants_.begin();
+       it != code.constants_.end(); ++i, ++it) {
+    os << "CST#" << i << ": v" << it->first << " = " << it->second << "\n";
+  }
+  for (int i = 0; i < code.BasicBlockCount(); i++) {
+    BasicBlock* block = code.BlockAt(i);
+
+    int bid = block->id();
+    os << "RPO#" << block->rpo_number_ << ": B" << bid;
+    CHECK(block->rpo_number_ == i);
+    if (block->IsLoopHeader()) {
+      os << " loop blocks: [" << block->rpo_number_ << ", " << block->loop_end_
+         << ")";
+    }
+    os << "  instructions: [" << block->code_start_ << ", " << block->code_end_
+       << ")\n  predecessors:";
+
+    BasicBlock::Predecessors predecessors = block->predecessors();
+    for (BasicBlock::Predecessors::iterator iter = predecessors.begin();
+         iter != predecessors.end(); ++iter) {
+      os << " B" << (*iter)->id();
+    }
+    os << "\n";
+
+    for (BasicBlock::const_iterator j = block->begin(); j != block->end();
+         ++j) {
+      Node* phi = *j;
+      if (phi->opcode() != IrOpcode::kPhi) continue;
+      os << "     phi: v" << phi->id() << " =";
+      Node::Inputs inputs = phi->inputs();
+      for (Node::Inputs::iterator iter(inputs.begin()); iter != inputs.end();
+           ++iter) {
+        os << " v" << (*iter)->id();
+      }
+      os << "\n";
+    }
+
+    ScopedVector<char> buf(32);
+    for (int j = block->first_instruction_index();
+         j <= block->last_instruction_index(); j++) {
+      // TODO(svenpanne) Add some basic formatting to our streams.
+      SNPrintF(buf, "%5d", j);
+      os << "   " << buf.start() << ": " << *code.InstructionAt(j);
+    }
+
+    os << "  " << block->control_;
+
+    if (block->control_input_ != NULL) {
+      os << " v" << block->control_input_->id();
+    }
+
+    BasicBlock::Successors successors = block->successors();
+    for (BasicBlock::Successors::iterator iter = successors.begin();
+         iter != successors.end(); ++iter) {
+      os << " B" << (*iter)->id();
+    }
+    os << "\n";
+  }
+  return os;
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/instruction.h b/src/compiler/instruction.h
new file mode 100644
index 0000000..6d00784
--- /dev/null
+++ b/src/compiler/instruction.h
@@ -0,0 +1,940 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_INSTRUCTION_H_
+#define V8_COMPILER_INSTRUCTION_H_
+
+#include <deque>
+#include <map>
+#include <set>
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/frame.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/instruction-codes.h"
+#include "src/compiler/opcodes.h"
+#include "src/compiler/schedule.h"
+// TODO(titzer): don't include the macro-assembler?
+#include "src/macro-assembler.h"
+#include "src/zone-allocator.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class OStream;
+
+namespace compiler {
+
+// Forward declarations.
+class Linkage;
+
+// A couple of reserved opcodes are used for internal use.
+const InstructionCode kGapInstruction = -1;
+const InstructionCode kBlockStartInstruction = -2;
+const InstructionCode kSourcePositionInstruction = -3;
+
+
+#define INSTRUCTION_OPERAND_LIST(V)              \
+  V(Constant, CONSTANT, 128)                     \
+  V(Immediate, IMMEDIATE, 128)                   \
+  V(StackSlot, STACK_SLOT, 128)                  \
+  V(DoubleStackSlot, DOUBLE_STACK_SLOT, 128)     \
+  V(Register, REGISTER, Register::kNumRegisters) \
+  V(DoubleRegister, DOUBLE_REGISTER, DoubleRegister::kMaxNumRegisters)
+
+class InstructionOperand : public ZoneObject {
+ public:
+  enum Kind {
+    INVALID,
+    UNALLOCATED,
+    CONSTANT,
+    IMMEDIATE,
+    STACK_SLOT,
+    DOUBLE_STACK_SLOT,
+    REGISTER,
+    DOUBLE_REGISTER
+  };
+
+  InstructionOperand() : value_(KindField::encode(INVALID)) {}
+  InstructionOperand(Kind kind, int index) { ConvertTo(kind, index); }
+
+  Kind kind() const { return KindField::decode(value_); }
+  int index() const { return static_cast<int>(value_) >> KindField::kSize; }
+#define INSTRUCTION_OPERAND_PREDICATE(name, type, number) \
+  bool Is##name() const { return kind() == type; }
+  INSTRUCTION_OPERAND_LIST(INSTRUCTION_OPERAND_PREDICATE)
+  INSTRUCTION_OPERAND_PREDICATE(Unallocated, UNALLOCATED, 0)
+  INSTRUCTION_OPERAND_PREDICATE(Ignored, INVALID, 0)
+#undef INSTRUCTION_OPERAND_PREDICATE
+  bool Equals(InstructionOperand* other) const {
+    return value_ == other->value_;
+  }
+
+  void ConvertTo(Kind kind, int index) {
+    if (kind == REGISTER || kind == DOUBLE_REGISTER) DCHECK(index >= 0);
+    value_ = KindField::encode(kind);
+    value_ |= index << KindField::kSize;
+    DCHECK(this->index() == index);
+  }
+
+  // Calls SetUpCache()/TearDownCache() for each subclass.
+  static void SetUpCaches();
+  static void TearDownCaches();
+
+ protected:
+  typedef BitField<Kind, 0, 3> KindField;
+
+  unsigned value_;
+};
+
+typedef ZoneVector<InstructionOperand*> InstructionOperandVector;
+
+OStream& operator<<(OStream& os, const InstructionOperand& op);
+
+class UnallocatedOperand : public InstructionOperand {
+ public:
+  enum BasicPolicy { FIXED_SLOT, EXTENDED_POLICY };
+
+  enum ExtendedPolicy {
+    NONE,
+    ANY,
+    FIXED_REGISTER,
+    FIXED_DOUBLE_REGISTER,
+    MUST_HAVE_REGISTER,
+    SAME_AS_FIRST_INPUT
+  };
+
+  // Lifetime of operand inside the instruction.
+  enum Lifetime {
+    // USED_AT_START operand is guaranteed to be live only at
+    // instruction start. Register allocator is free to assign the same register
+    // to some other operand used inside instruction (i.e. temporary or
+    // output).
+    USED_AT_START,
+
+    // USED_AT_END operand is treated as live until the end of
+    // instruction. This means that register allocator will not reuse it's
+    // register for any other operand inside instruction.
+    USED_AT_END
+  };
+
+  explicit UnallocatedOperand(ExtendedPolicy policy)
+      : InstructionOperand(UNALLOCATED, 0) {
+    value_ |= BasicPolicyField::encode(EXTENDED_POLICY);
+    value_ |= ExtendedPolicyField::encode(policy);
+    value_ |= LifetimeField::encode(USED_AT_END);
+  }
+
+  UnallocatedOperand(BasicPolicy policy, int index)
+      : InstructionOperand(UNALLOCATED, 0) {
+    DCHECK(policy == FIXED_SLOT);
+    value_ |= BasicPolicyField::encode(policy);
+    value_ |= index << FixedSlotIndexField::kShift;
+    DCHECK(this->fixed_slot_index() == index);
+  }
+
+  UnallocatedOperand(ExtendedPolicy policy, int index)
+      : InstructionOperand(UNALLOCATED, 0) {
+    DCHECK(policy == FIXED_REGISTER || policy == FIXED_DOUBLE_REGISTER);
+    value_ |= BasicPolicyField::encode(EXTENDED_POLICY);
+    value_ |= ExtendedPolicyField::encode(policy);
+    value_ |= LifetimeField::encode(USED_AT_END);
+    value_ |= FixedRegisterField::encode(index);
+  }
+
+  UnallocatedOperand(ExtendedPolicy policy, Lifetime lifetime)
+      : InstructionOperand(UNALLOCATED, 0) {
+    value_ |= BasicPolicyField::encode(EXTENDED_POLICY);
+    value_ |= ExtendedPolicyField::encode(policy);
+    value_ |= LifetimeField::encode(lifetime);
+  }
+
+  UnallocatedOperand* CopyUnconstrained(Zone* zone) {
+    UnallocatedOperand* result = new (zone) UnallocatedOperand(ANY);
+    result->set_virtual_register(virtual_register());
+    return result;
+  }
+
+  static const UnallocatedOperand* cast(const InstructionOperand* op) {
+    DCHECK(op->IsUnallocated());
+    return static_cast<const UnallocatedOperand*>(op);
+  }
+
+  static UnallocatedOperand* cast(InstructionOperand* op) {
+    DCHECK(op->IsUnallocated());
+    return static_cast<UnallocatedOperand*>(op);
+  }
+
+  // The encoding used for UnallocatedOperand operands depends on the policy
+  // that is
+  // stored within the operand. The FIXED_SLOT policy uses a compact encoding
+  // because it accommodates a larger pay-load.
+  //
+  // For FIXED_SLOT policy:
+  //     +------------------------------------------+
+  //     |       slot_index      |  vreg  | 0 | 001 |
+  //     +------------------------------------------+
+  //
+  // For all other (extended) policies:
+  //     +------------------------------------------+
+  //     |  reg_index  | L | PPP |  vreg  | 1 | 001 |    L ... Lifetime
+  //     +------------------------------------------+    P ... Policy
+  //
+  // The slot index is a signed value which requires us to decode it manually
+  // instead of using the BitField utility class.
+
+  // The superclass has a KindField.
+  STATIC_ASSERT(KindField::kSize == 3);
+
+  // BitFields for all unallocated operands.
+  class BasicPolicyField : public BitField<BasicPolicy, 3, 1> {};
+  class VirtualRegisterField : public BitField<unsigned, 4, 18> {};
+
+  // BitFields specific to BasicPolicy::FIXED_SLOT.
+  class FixedSlotIndexField : public BitField<int, 22, 10> {};
+
+  // BitFields specific to BasicPolicy::EXTENDED_POLICY.
+  class ExtendedPolicyField : public BitField<ExtendedPolicy, 22, 3> {};
+  class LifetimeField : public BitField<Lifetime, 25, 1> {};
+  class FixedRegisterField : public BitField<int, 26, 6> {};
+
+  static const int kMaxVirtualRegisters = VirtualRegisterField::kMax + 1;
+  static const int kFixedSlotIndexWidth = FixedSlotIndexField::kSize;
+  static const int kMaxFixedSlotIndex = (1 << (kFixedSlotIndexWidth - 1)) - 1;
+  static const int kMinFixedSlotIndex = -(1 << (kFixedSlotIndexWidth - 1));
+
+  // Predicates for the operand policy.
+  bool HasAnyPolicy() const {
+    return basic_policy() == EXTENDED_POLICY && extended_policy() == ANY;
+  }
+  bool HasFixedPolicy() const {
+    return basic_policy() == FIXED_SLOT ||
+           extended_policy() == FIXED_REGISTER ||
+           extended_policy() == FIXED_DOUBLE_REGISTER;
+  }
+  bool HasRegisterPolicy() const {
+    return basic_policy() == EXTENDED_POLICY &&
+           extended_policy() == MUST_HAVE_REGISTER;
+  }
+  bool HasSameAsInputPolicy() const {
+    return basic_policy() == EXTENDED_POLICY &&
+           extended_policy() == SAME_AS_FIRST_INPUT;
+  }
+  bool HasFixedSlotPolicy() const { return basic_policy() == FIXED_SLOT; }
+  bool HasFixedRegisterPolicy() const {
+    return basic_policy() == EXTENDED_POLICY &&
+           extended_policy() == FIXED_REGISTER;
+  }
+  bool HasFixedDoubleRegisterPolicy() const {
+    return basic_policy() == EXTENDED_POLICY &&
+           extended_policy() == FIXED_DOUBLE_REGISTER;
+  }
+
+  // [basic_policy]: Distinguish between FIXED_SLOT and all other policies.
+  BasicPolicy basic_policy() const { return BasicPolicyField::decode(value_); }
+
+  // [extended_policy]: Only for non-FIXED_SLOT. The finer-grained policy.
+  ExtendedPolicy extended_policy() const {
+    DCHECK(basic_policy() == EXTENDED_POLICY);
+    return ExtendedPolicyField::decode(value_);
+  }
+
+  // [fixed_slot_index]: Only for FIXED_SLOT.
+  int fixed_slot_index() const {
+    DCHECK(HasFixedSlotPolicy());
+    return static_cast<int>(value_) >> FixedSlotIndexField::kShift;
+  }
+
+  // [fixed_register_index]: Only for FIXED_REGISTER or FIXED_DOUBLE_REGISTER.
+  int fixed_register_index() const {
+    DCHECK(HasFixedRegisterPolicy() || HasFixedDoubleRegisterPolicy());
+    return FixedRegisterField::decode(value_);
+  }
+
+  // [virtual_register]: The virtual register ID for this operand.
+  int virtual_register() const { return VirtualRegisterField::decode(value_); }
+  void set_virtual_register(unsigned id) {
+    value_ = VirtualRegisterField::update(value_, id);
+  }
+
+  // [lifetime]: Only for non-FIXED_SLOT.
+  bool IsUsedAtStart() {
+    DCHECK(basic_policy() == EXTENDED_POLICY);
+    return LifetimeField::decode(value_) == USED_AT_START;
+  }
+};
+
+
+class MoveOperands FINAL {
+ public:
+  MoveOperands(InstructionOperand* source, InstructionOperand* destination)
+      : source_(source), destination_(destination) {}
+
+  InstructionOperand* source() const { return source_; }
+  void set_source(InstructionOperand* operand) { source_ = operand; }
+
+  InstructionOperand* destination() const { return destination_; }
+  void set_destination(InstructionOperand* operand) { destination_ = operand; }
+
+  // The gap resolver marks moves as "in-progress" by clearing the
+  // destination (but not the source).
+  bool IsPending() const { return destination_ == NULL && source_ != NULL; }
+
+  // True if this move a move into the given destination operand.
+  bool Blocks(InstructionOperand* operand) const {
+    return !IsEliminated() && source()->Equals(operand);
+  }
+
+  // A move is redundant if it's been eliminated, if its source and
+  // destination are the same, or if its destination is unneeded or constant.
+  bool IsRedundant() const {
+    return IsEliminated() || source_->Equals(destination_) || IsIgnored() ||
+           (destination_ != NULL && destination_->IsConstant());
+  }
+
+  bool IsIgnored() const {
+    return destination_ != NULL && destination_->IsIgnored();
+  }
+
+  // We clear both operands to indicate move that's been eliminated.
+  void Eliminate() { source_ = destination_ = NULL; }
+  bool IsEliminated() const {
+    DCHECK(source_ != NULL || destination_ == NULL);
+    return source_ == NULL;
+  }
+
+ private:
+  InstructionOperand* source_;
+  InstructionOperand* destination_;
+};
+
+OStream& operator<<(OStream& os, const MoveOperands& mo);
+
+template <InstructionOperand::Kind kOperandKind, int kNumCachedOperands>
+class SubKindOperand FINAL : public InstructionOperand {
+ public:
+  static SubKindOperand* Create(int index, Zone* zone) {
+    DCHECK(index >= 0);
+    if (index < kNumCachedOperands) return &cache[index];
+    return new (zone) SubKindOperand(index);
+  }
+
+  static SubKindOperand* cast(InstructionOperand* op) {
+    DCHECK(op->kind() == kOperandKind);
+    return reinterpret_cast<SubKindOperand*>(op);
+  }
+
+  static void SetUpCache();
+  static void TearDownCache();
+
+ private:
+  static SubKindOperand* cache;
+
+  SubKindOperand() : InstructionOperand() {}
+  explicit SubKindOperand(int index)
+      : InstructionOperand(kOperandKind, index) {}
+};
+
+
+#define INSTRUCTION_TYPEDEF_SUBKIND_OPERAND_CLASS(name, type, number) \
+  typedef SubKindOperand<InstructionOperand::type, number> name##Operand;
+INSTRUCTION_OPERAND_LIST(INSTRUCTION_TYPEDEF_SUBKIND_OPERAND_CLASS)
+#undef INSTRUCTION_TYPEDEF_SUBKIND_OPERAND_CLASS
+
+
+class ParallelMove FINAL : public ZoneObject {
+ public:
+  explicit ParallelMove(Zone* zone) : move_operands_(4, zone) {}
+
+  void AddMove(InstructionOperand* from, InstructionOperand* to, Zone* zone) {
+    move_operands_.Add(MoveOperands(from, to), zone);
+  }
+
+  bool IsRedundant() const;
+
+  ZoneList<MoveOperands>* move_operands() { return &move_operands_; }
+  const ZoneList<MoveOperands>* move_operands() const {
+    return &move_operands_;
+  }
+
+ private:
+  ZoneList<MoveOperands> move_operands_;
+};
+
+OStream& operator<<(OStream& os, const ParallelMove& pm);
+
+class PointerMap FINAL : public ZoneObject {
+ public:
+  explicit PointerMap(Zone* zone)
+      : pointer_operands_(8, zone),
+        untagged_operands_(0, zone),
+        instruction_position_(-1) {}
+
+  const ZoneList<InstructionOperand*>* GetNormalizedOperands() {
+    for (int i = 0; i < untagged_operands_.length(); ++i) {
+      RemovePointer(untagged_operands_[i]);
+    }
+    untagged_operands_.Clear();
+    return &pointer_operands_;
+  }
+  int instruction_position() const { return instruction_position_; }
+
+  void set_instruction_position(int pos) {
+    DCHECK(instruction_position_ == -1);
+    instruction_position_ = pos;
+  }
+
+  void RecordPointer(InstructionOperand* op, Zone* zone);
+  void RemovePointer(InstructionOperand* op);
+  void RecordUntagged(InstructionOperand* op, Zone* zone);
+
+ private:
+  friend OStream& operator<<(OStream& os, const PointerMap& pm);
+
+  ZoneList<InstructionOperand*> pointer_operands_;
+  ZoneList<InstructionOperand*> untagged_operands_;
+  int instruction_position_;
+};
+
+OStream& operator<<(OStream& os, const PointerMap& pm);
+
+// TODO(titzer): s/PointerMap/ReferenceMap/
+class Instruction : public ZoneObject {
+ public:
+  size_t OutputCount() const { return OutputCountField::decode(bit_field_); }
+  InstructionOperand* OutputAt(size_t i) const {
+    DCHECK(i < OutputCount());
+    return operands_[i];
+  }
+
+  bool HasOutput() const { return OutputCount() == 1; }
+  InstructionOperand* Output() const { return OutputAt(0); }
+
+  size_t InputCount() const { return InputCountField::decode(bit_field_); }
+  InstructionOperand* InputAt(size_t i) const {
+    DCHECK(i < InputCount());
+    return operands_[OutputCount() + i];
+  }
+
+  size_t TempCount() const { return TempCountField::decode(bit_field_); }
+  InstructionOperand* TempAt(size_t i) const {
+    DCHECK(i < TempCount());
+    return operands_[OutputCount() + InputCount() + i];
+  }
+
+  InstructionCode opcode() const { return opcode_; }
+  ArchOpcode arch_opcode() const { return ArchOpcodeField::decode(opcode()); }
+  AddressingMode addressing_mode() const {
+    return AddressingModeField::decode(opcode());
+  }
+  FlagsMode flags_mode() const { return FlagsModeField::decode(opcode()); }
+  FlagsCondition flags_condition() const {
+    return FlagsConditionField::decode(opcode());
+  }
+
+  // TODO(titzer): make control and call into flags.
+  static Instruction* New(Zone* zone, InstructionCode opcode) {
+    return New(zone, opcode, 0, NULL, 0, NULL, 0, NULL);
+  }
+
+  static Instruction* New(Zone* zone, InstructionCode opcode,
+                          size_t output_count, InstructionOperand** outputs,
+                          size_t input_count, InstructionOperand** inputs,
+                          size_t temp_count, InstructionOperand** temps) {
+    DCHECK(opcode >= 0);
+    DCHECK(output_count == 0 || outputs != NULL);
+    DCHECK(input_count == 0 || inputs != NULL);
+    DCHECK(temp_count == 0 || temps != NULL);
+    InstructionOperand* none = NULL;
+    USE(none);
+    int size = static_cast<int>(RoundUp(sizeof(Instruction), kPointerSize) +
+                                (output_count + input_count + temp_count - 1) *
+                                    sizeof(none));
+    return new (zone->New(size)) Instruction(
+        opcode, output_count, outputs, input_count, inputs, temp_count, temps);
+  }
+
+  // TODO(titzer): another holdover from lithium days; register allocator
+  // should not need to know about control instructions.
+  Instruction* MarkAsControl() {
+    bit_field_ = IsControlField::update(bit_field_, true);
+    return this;
+  }
+  Instruction* MarkAsCall() {
+    bit_field_ = IsCallField::update(bit_field_, true);
+    return this;
+  }
+  bool IsControl() const { return IsControlField::decode(bit_field_); }
+  bool IsCall() const { return IsCallField::decode(bit_field_); }
+  bool NeedsPointerMap() const { return IsCall(); }
+  bool HasPointerMap() const { return pointer_map_ != NULL; }
+
+  bool IsGapMoves() const {
+    return opcode() == kGapInstruction || opcode() == kBlockStartInstruction;
+  }
+  bool IsBlockStart() const { return opcode() == kBlockStartInstruction; }
+  bool IsSourcePosition() const {
+    return opcode() == kSourcePositionInstruction;
+  }
+
+  bool ClobbersRegisters() const { return IsCall(); }
+  bool ClobbersTemps() const { return IsCall(); }
+  bool ClobbersDoubleRegisters() const { return IsCall(); }
+  PointerMap* pointer_map() const { return pointer_map_; }
+
+  void set_pointer_map(PointerMap* map) {
+    DCHECK(NeedsPointerMap());
+    DCHECK_EQ(NULL, pointer_map_);
+    pointer_map_ = map;
+  }
+
+  // Placement new operator so that we can smash instructions into
+  // zone-allocated memory.
+  void* operator new(size_t, void* location) { return location; }
+
+  void operator delete(void* pointer, void* location) { UNREACHABLE(); }
+
+ protected:
+  explicit Instruction(InstructionCode opcode)
+      : opcode_(opcode),
+        bit_field_(OutputCountField::encode(0) | InputCountField::encode(0) |
+                   TempCountField::encode(0) | IsCallField::encode(false) |
+                   IsControlField::encode(false)),
+        pointer_map_(NULL) {}
+
+  Instruction(InstructionCode opcode, size_t output_count,
+              InstructionOperand** outputs, size_t input_count,
+              InstructionOperand** inputs, size_t temp_count,
+              InstructionOperand** temps)
+      : opcode_(opcode),
+        bit_field_(OutputCountField::encode(output_count) |
+                   InputCountField::encode(input_count) |
+                   TempCountField::encode(temp_count) |
+                   IsCallField::encode(false) | IsControlField::encode(false)),
+        pointer_map_(NULL) {
+    for (size_t i = 0; i < output_count; ++i) {
+      operands_[i] = outputs[i];
+    }
+    for (size_t i = 0; i < input_count; ++i) {
+      operands_[output_count + i] = inputs[i];
+    }
+    for (size_t i = 0; i < temp_count; ++i) {
+      operands_[output_count + input_count + i] = temps[i];
+    }
+  }
+
+ protected:
+  typedef BitField<size_t, 0, 8> OutputCountField;
+  typedef BitField<size_t, 8, 16> InputCountField;
+  typedef BitField<size_t, 24, 6> TempCountField;
+  typedef BitField<bool, 30, 1> IsCallField;
+  typedef BitField<bool, 31, 1> IsControlField;
+
+  InstructionCode opcode_;
+  uint32_t bit_field_;
+  PointerMap* pointer_map_;
+  InstructionOperand* operands_[1];
+};
+
+OStream& operator<<(OStream& os, const Instruction& instr);
+
+// Represents moves inserted before an instruction due to register allocation.
+// TODO(titzer): squash GapInstruction back into Instruction, since essentially
+// every instruction can possibly have moves inserted before it.
+class GapInstruction : public Instruction {
+ public:
+  enum InnerPosition {
+    BEFORE,
+    START,
+    END,
+    AFTER,
+    FIRST_INNER_POSITION = BEFORE,
+    LAST_INNER_POSITION = AFTER
+  };
+
+  ParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone) {
+    if (parallel_moves_[pos] == NULL) {
+      parallel_moves_[pos] = new (zone) ParallelMove(zone);
+    }
+    return parallel_moves_[pos];
+  }
+
+  ParallelMove* GetParallelMove(InnerPosition pos) {
+    return parallel_moves_[pos];
+  }
+
+  static GapInstruction* New(Zone* zone) {
+    void* buffer = zone->New(sizeof(GapInstruction));
+    return new (buffer) GapInstruction(kGapInstruction);
+  }
+
+  static GapInstruction* cast(Instruction* instr) {
+    DCHECK(instr->IsGapMoves());
+    return static_cast<GapInstruction*>(instr);
+  }
+
+  static const GapInstruction* cast(const Instruction* instr) {
+    DCHECK(instr->IsGapMoves());
+    return static_cast<const GapInstruction*>(instr);
+  }
+
+ protected:
+  explicit GapInstruction(InstructionCode opcode) : Instruction(opcode) {
+    parallel_moves_[BEFORE] = NULL;
+    parallel_moves_[START] = NULL;
+    parallel_moves_[END] = NULL;
+    parallel_moves_[AFTER] = NULL;
+  }
+
+ private:
+  friend OStream& operator<<(OStream& os, const Instruction& instr);
+  ParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
+};
+
+
+// This special kind of gap move instruction represents the beginning of a
+// block of code.
+// TODO(titzer): move code_start and code_end from BasicBlock to here.
+class BlockStartInstruction FINAL : public GapInstruction {
+ public:
+  BasicBlock* block() const { return block_; }
+  Label* label() { return &label_; }
+
+  static BlockStartInstruction* New(Zone* zone, BasicBlock* block) {
+    void* buffer = zone->New(sizeof(BlockStartInstruction));
+    return new (buffer) BlockStartInstruction(block);
+  }
+
+  static BlockStartInstruction* cast(Instruction* instr) {
+    DCHECK(instr->IsBlockStart());
+    return static_cast<BlockStartInstruction*>(instr);
+  }
+
+ private:
+  explicit BlockStartInstruction(BasicBlock* block)
+      : GapInstruction(kBlockStartInstruction), block_(block) {}
+
+  BasicBlock* block_;
+  Label label_;
+};
+
+
+class SourcePositionInstruction FINAL : public Instruction {
+ public:
+  static SourcePositionInstruction* New(Zone* zone, SourcePosition position) {
+    void* buffer = zone->New(sizeof(SourcePositionInstruction));
+    return new (buffer) SourcePositionInstruction(position);
+  }
+
+  SourcePosition source_position() const { return source_position_; }
+
+  static SourcePositionInstruction* cast(Instruction* instr) {
+    DCHECK(instr->IsSourcePosition());
+    return static_cast<SourcePositionInstruction*>(instr);
+  }
+
+  static const SourcePositionInstruction* cast(const Instruction* instr) {
+    DCHECK(instr->IsSourcePosition());
+    return static_cast<const SourcePositionInstruction*>(instr);
+  }
+
+ private:
+  explicit SourcePositionInstruction(SourcePosition source_position)
+      : Instruction(kSourcePositionInstruction),
+        source_position_(source_position) {
+    DCHECK(!source_position_.IsInvalid());
+    DCHECK(!source_position_.IsUnknown());
+  }
+
+  SourcePosition source_position_;
+};
+
+
+class Constant FINAL {
+ public:
+  enum Type { kInt32, kInt64, kFloat64, kExternalReference, kHeapObject };
+
+  explicit Constant(int32_t v) : type_(kInt32), value_(v) {}
+  explicit Constant(int64_t v) : type_(kInt64), value_(v) {}
+  explicit Constant(double v) : type_(kFloat64), value_(bit_cast<int64_t>(v)) {}
+  explicit Constant(ExternalReference ref)
+      : type_(kExternalReference), value_(bit_cast<intptr_t>(ref)) {}
+  explicit Constant(Handle<HeapObject> obj)
+      : type_(kHeapObject), value_(bit_cast<intptr_t>(obj)) {}
+
+  Type type() const { return type_; }
+
+  int32_t ToInt32() const {
+    DCHECK_EQ(kInt32, type());
+    return static_cast<int32_t>(value_);
+  }
+
+  int64_t ToInt64() const {
+    if (type() == kInt32) return ToInt32();
+    DCHECK_EQ(kInt64, type());
+    return value_;
+  }
+
+  double ToFloat64() const {
+    if (type() == kInt32) return ToInt32();
+    DCHECK_EQ(kFloat64, type());
+    return bit_cast<double>(value_);
+  }
+
+  ExternalReference ToExternalReference() const {
+    DCHECK_EQ(kExternalReference, type());
+    return bit_cast<ExternalReference>(static_cast<intptr_t>(value_));
+  }
+
+  Handle<HeapObject> ToHeapObject() const {
+    DCHECK_EQ(kHeapObject, type());
+    return bit_cast<Handle<HeapObject> >(static_cast<intptr_t>(value_));
+  }
+
+ private:
+  Type type_;
+  int64_t value_;
+};
+
+
+class FrameStateDescriptor : public ZoneObject {
+ public:
+  FrameStateDescriptor(const FrameStateCallInfo& state_info,
+                       size_t parameters_count, size_t locals_count,
+                       size_t stack_count,
+                       FrameStateDescriptor* outer_state = NULL)
+      : type_(state_info.type()),
+        bailout_id_(state_info.bailout_id()),
+        frame_state_combine_(state_info.state_combine()),
+        parameters_count_(parameters_count),
+        locals_count_(locals_count),
+        stack_count_(stack_count),
+        outer_state_(outer_state),
+        jsfunction_(state_info.jsfunction()) {}
+
+  FrameStateType type() const { return type_; }
+  BailoutId bailout_id() const { return bailout_id_; }
+  OutputFrameStateCombine state_combine() const { return frame_state_combine_; }
+  size_t parameters_count() const { return parameters_count_; }
+  size_t locals_count() const { return locals_count_; }
+  size_t stack_count() const { return stack_count_; }
+  FrameStateDescriptor* outer_state() const { return outer_state_; }
+  MaybeHandle<JSFunction> jsfunction() const { return jsfunction_; }
+
+  size_t size() const {
+    return parameters_count_ + locals_count_ + stack_count_ +
+           (HasContext() ? 1 : 0);
+  }
+
+  size_t GetTotalSize() const {
+    size_t total_size = 0;
+    for (const FrameStateDescriptor* iter = this; iter != NULL;
+         iter = iter->outer_state_) {
+      total_size += iter->size();
+    }
+    return total_size;
+  }
+
+  size_t GetHeight(OutputFrameStateCombine override) const {
+    size_t height = size() - parameters_count();
+    switch (override) {
+      case kPushOutput:
+        ++height;
+        break;
+      case kIgnoreOutput:
+        break;
+    }
+    return height;
+  }
+
+  size_t GetFrameCount() const {
+    size_t count = 0;
+    for (const FrameStateDescriptor* iter = this; iter != NULL;
+         iter = iter->outer_state_) {
+      ++count;
+    }
+    return count;
+  }
+
+  size_t GetJSFrameCount() const {
+    size_t count = 0;
+    for (const FrameStateDescriptor* iter = this; iter != NULL;
+         iter = iter->outer_state_) {
+      if (iter->type_ == JS_FRAME) {
+        ++count;
+      }
+    }
+    return count;
+  }
+
+  bool HasContext() const { return type_ == JS_FRAME; }
+
+ private:
+  FrameStateType type_;
+  BailoutId bailout_id_;
+  OutputFrameStateCombine frame_state_combine_;
+  size_t parameters_count_;
+  size_t locals_count_;
+  size_t stack_count_;
+  FrameStateDescriptor* outer_state_;
+  MaybeHandle<JSFunction> jsfunction_;
+};
+
+OStream& operator<<(OStream& os, const Constant& constant);
+
+typedef ZoneDeque<Constant> ConstantDeque;
+typedef std::map<int, Constant, std::less<int>,
+                 zone_allocator<std::pair<int, Constant> > > ConstantMap;
+
+typedef ZoneDeque<Instruction*> InstructionDeque;
+typedef ZoneDeque<PointerMap*> PointerMapDeque;
+typedef ZoneVector<FrameStateDescriptor*> DeoptimizationVector;
+
+// Represents architecture-specific generated code before, during, and after
+// register allocation.
+// TODO(titzer): s/IsDouble/IsFloat64/
+class InstructionSequence FINAL {
+ public:
+  InstructionSequence(Linkage* linkage, Graph* graph, Schedule* schedule)
+      : graph_(graph),
+        linkage_(linkage),
+        schedule_(schedule),
+        constants_(ConstantMap::key_compare(),
+                   ConstantMap::allocator_type(zone())),
+        immediates_(zone()),
+        instructions_(zone()),
+        next_virtual_register_(graph->NodeCount()),
+        pointer_maps_(zone()),
+        doubles_(std::less<int>(), VirtualRegisterSet::allocator_type(zone())),
+        references_(std::less<int>(),
+                    VirtualRegisterSet::allocator_type(zone())),
+        deoptimization_entries_(zone()) {}
+
+  int NextVirtualRegister() { return next_virtual_register_++; }
+  int VirtualRegisterCount() const { return next_virtual_register_; }
+
+  int ValueCount() const { return graph_->NodeCount(); }
+
+  int BasicBlockCount() const {
+    return static_cast<int>(schedule_->rpo_order()->size());
+  }
+
+  BasicBlock* BlockAt(int rpo_number) const {
+    return (*schedule_->rpo_order())[rpo_number];
+  }
+
+  BasicBlock* GetContainingLoop(BasicBlock* block) {
+    return block->loop_header_;
+  }
+
+  int GetLoopEnd(BasicBlock* block) const { return block->loop_end_; }
+
+  BasicBlock* GetBasicBlock(int instruction_index);
+
+  int GetVirtualRegister(Node* node) const { return node->id(); }
+
+  bool IsReference(int virtual_register) const;
+  bool IsDouble(int virtual_register) const;
+
+  void MarkAsReference(int virtual_register);
+  void MarkAsDouble(int virtual_register);
+
+  void AddGapMove(int index, InstructionOperand* from, InstructionOperand* to);
+
+  Label* GetLabel(BasicBlock* block);
+  BlockStartInstruction* GetBlockStart(BasicBlock* block);
+
+  typedef InstructionDeque::const_iterator const_iterator;
+  const_iterator begin() const { return instructions_.begin(); }
+  const_iterator end() const { return instructions_.end(); }
+
+  GapInstruction* GapAt(int index) const {
+    return GapInstruction::cast(InstructionAt(index));
+  }
+  bool IsGapAt(int index) const { return InstructionAt(index)->IsGapMoves(); }
+  Instruction* InstructionAt(int index) const {
+    DCHECK(index >= 0);
+    DCHECK(index < static_cast<int>(instructions_.size()));
+    return instructions_[index];
+  }
+
+  Frame* frame() { return &frame_; }
+  Graph* graph() const { return graph_; }
+  Isolate* isolate() const { return zone()->isolate(); }
+  Linkage* linkage() const { return linkage_; }
+  Schedule* schedule() const { return schedule_; }
+  const PointerMapDeque* pointer_maps() const { return &pointer_maps_; }
+  Zone* zone() const { return graph_->zone(); }
+
+  // Used by the code generator while adding instructions.
+  int AddInstruction(Instruction* instr, BasicBlock* block);
+  void StartBlock(BasicBlock* block);
+  void EndBlock(BasicBlock* block);
+
+  void AddConstant(int virtual_register, Constant constant) {
+    DCHECK(constants_.find(virtual_register) == constants_.end());
+    constants_.insert(std::make_pair(virtual_register, constant));
+  }
+  Constant GetConstant(int virtual_register) const {
+    ConstantMap::const_iterator it = constants_.find(virtual_register);
+    DCHECK(it != constants_.end());
+    DCHECK_EQ(virtual_register, it->first);
+    return it->second;
+  }
+
+  typedef ConstantDeque Immediates;
+  const Immediates& immediates() const { return immediates_; }
+
+  int AddImmediate(Constant constant) {
+    int index = static_cast<int>(immediates_.size());
+    immediates_.push_back(constant);
+    return index;
+  }
+  Constant GetImmediate(int index) const {
+    DCHECK(index >= 0);
+    DCHECK(index < static_cast<int>(immediates_.size()));
+    return immediates_[index];
+  }
+
+  class StateId {
+   public:
+    static StateId FromInt(int id) { return StateId(id); }
+    int ToInt() const { return id_; }
+
+   private:
+    explicit StateId(int id) : id_(id) {}
+    int id_;
+  };
+
+  StateId AddFrameStateDescriptor(FrameStateDescriptor* descriptor);
+  FrameStateDescriptor* GetFrameStateDescriptor(StateId deoptimization_id);
+  int GetFrameStateDescriptorCount();
+
+ private:
+  friend OStream& operator<<(OStream& os, const InstructionSequence& code);
+
+  typedef std::set<int, std::less<int>, ZoneIntAllocator> VirtualRegisterSet;
+
+  Graph* graph_;
+  Linkage* linkage_;
+  Schedule* schedule_;
+  ConstantMap constants_;
+  ConstantDeque immediates_;
+  InstructionDeque instructions_;
+  int next_virtual_register_;
+  PointerMapDeque pointer_maps_;
+  VirtualRegisterSet doubles_;
+  VirtualRegisterSet references_;
+  Frame frame_;
+  DeoptimizationVector deoptimization_entries_;
+};
+
+OStream& operator<<(OStream& os, const InstructionSequence& code);
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_INSTRUCTION_H_
diff --git a/src/compiler/ir-operations.txt b/src/compiler/ir-operations.txt
new file mode 100644
index 0000000..e69de29
--- /dev/null
+++ b/src/compiler/ir-operations.txt
diff --git a/src/compiler/js-builtin-reducer-unittest.cc b/src/compiler/js-builtin-reducer-unittest.cc
new file mode 100644
index 0000000..51561d0
--- /dev/null
+++ b/src/compiler/js-builtin-reducer-unittest.cc
@@ -0,0 +1,177 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/graph-unittest.h"
+#include "src/compiler/js-builtin-reducer.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/typer.h"
+#include "testing/gmock-support.h"
+
+using testing::Capture;
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class JSBuiltinReducerTest : public GraphTest {
+ public:
+  JSBuiltinReducerTest() : javascript_(zone()) {}
+
+ protected:
+  Reduction Reduce(Node* node) {
+    Typer typer(zone());
+    MachineOperatorBuilder machine;
+    JSGraph jsgraph(graph(), common(), javascript(), &typer, &machine);
+    JSBuiltinReducer reducer(&jsgraph);
+    return reducer.Reduce(node);
+  }
+
+  Node* Parameter(Type* t, int32_t index = 0) {
+    Node* n = graph()->NewNode(common()->Parameter(index), graph()->start());
+    NodeProperties::SetBounds(n, Bounds(Type::None(), t));
+    return n;
+  }
+
+  Node* UndefinedConstant() {
+    return HeapConstant(
+        Unique<HeapObject>::CreateImmovable(factory()->undefined_value()));
+  }
+
+  JSOperatorBuilder* javascript() { return &javascript_; }
+
+ private:
+  JSOperatorBuilder javascript_;
+};
+
+
+namespace {
+
+// TODO(mstarzinger): Find a common place and unify with test-js-typed-lowering.
+Type* const kNumberTypes[] = {
+    Type::UnsignedSmall(),   Type::OtherSignedSmall(), Type::OtherUnsigned31(),
+    Type::OtherUnsigned32(), Type::OtherSigned32(),    Type::SignedSmall(),
+    Type::Signed32(),        Type::Unsigned32(),       Type::Integral32(),
+    Type::MinusZero(),       Type::NaN(),              Type::OtherNumber(),
+    Type::OrderedNumber(),   Type::Number()};
+
+}  // namespace
+
+
+// -----------------------------------------------------------------------------
+// Math.sqrt
+
+
+TEST_F(JSBuiltinReducerTest, MathSqrt) {
+  Handle<JSFunction> f(isolate()->context()->math_sqrt_fun());
+
+  TRACED_FOREACH(Type*, t0, kNumberTypes) {
+    Node* p0 = Parameter(t0, 0);
+    Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
+    Node* call = graph()->NewNode(javascript()->Call(3, NO_CALL_FUNCTION_FLAGS),
+                                  fun, UndefinedConstant(), p0);
+    Reduction r = Reduce(call);
+
+    ASSERT_TRUE(r.Changed());
+    EXPECT_THAT(r.replacement(), IsFloat64Sqrt(p0));
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+// Math.max
+
+
+TEST_F(JSBuiltinReducerTest, MathMax0) {
+  Handle<JSFunction> f(isolate()->context()->math_max_fun());
+
+  Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
+  Node* call = graph()->NewNode(javascript()->Call(2, NO_CALL_FUNCTION_FLAGS),
+                                fun, UndefinedConstant());
+  Reduction r = Reduce(call);
+
+  ASSERT_TRUE(r.Changed());
+  EXPECT_THAT(r.replacement(), IsNumberConstant(-V8_INFINITY));
+}
+
+
+TEST_F(JSBuiltinReducerTest, MathMax1) {
+  Handle<JSFunction> f(isolate()->context()->math_max_fun());
+
+  TRACED_FOREACH(Type*, t0, kNumberTypes) {
+    Node* p0 = Parameter(t0, 0);
+    Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
+    Node* call = graph()->NewNode(javascript()->Call(3, NO_CALL_FUNCTION_FLAGS),
+                                  fun, UndefinedConstant(), p0);
+    Reduction r = Reduce(call);
+
+    ASSERT_TRUE(r.Changed());
+    EXPECT_THAT(r.replacement(), p0);
+  }
+}
+
+
+TEST_F(JSBuiltinReducerTest, MathMax2) {
+  Handle<JSFunction> f(isolate()->context()->math_max_fun());
+
+  TRACED_FOREACH(Type*, t0, kNumberTypes) {
+    TRACED_FOREACH(Type*, t1, kNumberTypes) {
+      Node* p0 = Parameter(t0, 0);
+      Node* p1 = Parameter(t1, 1);
+      Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
+      Node* call =
+          graph()->NewNode(javascript()->Call(4, NO_CALL_FUNCTION_FLAGS), fun,
+                           UndefinedConstant(), p0, p1);
+      Reduction r = Reduce(call);
+
+      if (t0->Is(Type::Integral32()) && t1->Is(Type::Integral32())) {
+        Capture<Node*> branch;
+        ASSERT_TRUE(r.Changed());
+        EXPECT_THAT(
+            r.replacement(),
+            IsPhi(kMachNone, p1, p0,
+                  IsMerge(IsIfTrue(CaptureEq(&branch)),
+                          IsIfFalse(AllOf(CaptureEq(&branch),
+                                          IsBranch(IsNumberLessThan(p0, p1),
+                                                   graph()->start()))))));
+      } else {
+        ASSERT_FALSE(r.Changed());
+        EXPECT_EQ(IrOpcode::kJSCallFunction, call->opcode());
+      }
+    }
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+// Math.imul
+
+
+TEST_F(JSBuiltinReducerTest, MathImul) {
+  Handle<JSFunction> f(isolate()->context()->math_imul_fun());
+
+  TRACED_FOREACH(Type*, t0, kNumberTypes) {
+    TRACED_FOREACH(Type*, t1, kNumberTypes) {
+      Node* p0 = Parameter(t0, 0);
+      Node* p1 = Parameter(t1, 1);
+      Node* fun = HeapConstant(Unique<HeapObject>::CreateUninitialized(f));
+      Node* call =
+          graph()->NewNode(javascript()->Call(4, NO_CALL_FUNCTION_FLAGS), fun,
+                           UndefinedConstant(), p0, p1);
+      Reduction r = Reduce(call);
+
+      if (t0->Is(Type::Integral32()) && t1->Is(Type::Integral32())) {
+        ASSERT_TRUE(r.Changed());
+        EXPECT_THAT(r.replacement(), IsInt32Mul(p0, p1));
+      } else {
+        ASSERT_FALSE(r.Changed());
+        EXPECT_EQ(IrOpcode::kJSCallFunction, call->opcode());
+      }
+    }
+  }
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/js-builtin-reducer.cc b/src/compiler/js-builtin-reducer.cc
new file mode 100644
index 0000000..c57ac33
--- /dev/null
+++ b/src/compiler/js-builtin-reducer.cc
@@ -0,0 +1,174 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/graph-inl.h"
+#include "src/compiler/js-builtin-reducer.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/types.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+
+// Helper method that assumes replacement nodes are pure values that don't
+// produce an effect. Replaces {node} with {reduction} and relaxes effects.
+static Reduction ReplaceWithPureReduction(Node* node, Reduction reduction) {
+  if (reduction.Changed()) {
+    NodeProperties::ReplaceWithValue(node, reduction.replacement());
+    return reduction;
+  }
+  return Reducer::NoChange();
+}
+
+
+// Helper class to access JSCallFunction nodes that are potential candidates
+// for reduction when they have a BuiltinFunctionId associated with them.
+class JSCallReduction {
+ public:
+  explicit JSCallReduction(Node* node) : node_(node) {}
+
+  // Determines whether the node is a JSCallFunction operation that targets a
+  // constant callee being a well-known builtin with a BuiltinFunctionId.
+  bool HasBuiltinFunctionId() {
+    if (node_->opcode() != IrOpcode::kJSCallFunction) return false;
+    HeapObjectMatcher<Object> m(NodeProperties::GetValueInput(node_, 0));
+    if (!m.HasValue() || !m.Value().handle()->IsJSFunction()) return false;
+    Handle<JSFunction> function = Handle<JSFunction>::cast(m.Value().handle());
+    return function->shared()->HasBuiltinFunctionId();
+  }
+
+  // Retrieves the BuiltinFunctionId as described above.
+  BuiltinFunctionId GetBuiltinFunctionId() {
+    DCHECK_EQ(IrOpcode::kJSCallFunction, node_->opcode());
+    HeapObjectMatcher<Object> m(NodeProperties::GetValueInput(node_, 0));
+    Handle<JSFunction> function = Handle<JSFunction>::cast(m.Value().handle());
+    return function->shared()->builtin_function_id();
+  }
+
+  // Determines whether the call takes zero inputs.
+  bool InputsMatchZero() { return GetJSCallArity() == 0; }
+
+  // Determines whether the call takes one input of the given type.
+  bool InputsMatchOne(Type* t1) {
+    return GetJSCallArity() == 1 &&
+           NodeProperties::GetBounds(GetJSCallInput(0)).upper->Is(t1);
+  }
+
+  // Determines whether the call takes two inputs of the given types.
+  bool InputsMatchTwo(Type* t1, Type* t2) {
+    return GetJSCallArity() == 2 &&
+           NodeProperties::GetBounds(GetJSCallInput(0)).upper->Is(t1) &&
+           NodeProperties::GetBounds(GetJSCallInput(1)).upper->Is(t2);
+  }
+
+  // Determines whether the call takes inputs all of the given type.
+  bool InputsMatchAll(Type* t) {
+    for (int i = 0; i < GetJSCallArity(); i++) {
+      if (!NodeProperties::GetBounds(GetJSCallInput(i)).upper->Is(t)) {
+        return false;
+      }
+    }
+    return true;
+  }
+
+  Node* left() { return GetJSCallInput(0); }
+  Node* right() { return GetJSCallInput(1); }
+
+  int GetJSCallArity() {
+    DCHECK_EQ(IrOpcode::kJSCallFunction, node_->opcode());
+    // Skip first (i.e. callee) and second (i.e. receiver) operand.
+    return OperatorProperties::GetValueInputCount(node_->op()) - 2;
+  }
+
+  Node* GetJSCallInput(int index) {
+    DCHECK_EQ(IrOpcode::kJSCallFunction, node_->opcode());
+    DCHECK_LT(index, GetJSCallArity());
+    // Skip first (i.e. callee) and second (i.e. receiver) operand.
+    return NodeProperties::GetValueInput(node_, index + 2);
+  }
+
+ private:
+  Node* node_;
+};
+
+
+// ECMA-262, section 15.8.2.17.
+Reduction JSBuiltinReducer::ReduceMathSqrt(Node* node) {
+  JSCallReduction r(node);
+  if (r.InputsMatchOne(Type::Number())) {
+    // Math.sqrt(a:number) -> Float64Sqrt(a)
+    Node* value = graph()->NewNode(machine()->Float64Sqrt(), r.left());
+    return Replace(value);
+  }
+  return NoChange();
+}
+
+
+// ECMA-262, section 15.8.2.11.
+Reduction JSBuiltinReducer::ReduceMathMax(Node* node) {
+  JSCallReduction r(node);
+  if (r.InputsMatchZero()) {
+    // Math.max() -> -Infinity
+    return Replace(jsgraph()->Constant(-V8_INFINITY));
+  }
+  if (r.InputsMatchOne(Type::Number())) {
+    // Math.max(a:number) -> a
+    return Replace(r.left());
+  }
+  if (r.InputsMatchAll(Type::Integral32())) {
+    // Math.max(a:int32, b:int32, ...)
+    Node* value = r.GetJSCallInput(0);
+    for (int i = 1; i < r.GetJSCallArity(); i++) {
+      Node* p = r.GetJSCallInput(i);
+      Node* control = graph()->start();
+      Node* tag = graph()->NewNode(simplified()->NumberLessThan(), value, p);
+
+      Node* branch = graph()->NewNode(common()->Branch(), tag, control);
+      Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+      Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+      Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+
+      value = graph()->NewNode(common()->Phi(kMachNone, 2), p, value, merge);
+    }
+    return Replace(value);
+  }
+  return NoChange();
+}
+
+
+// ES6 draft 08-24-14, section 20.2.2.19.
+Reduction JSBuiltinReducer::ReduceMathImul(Node* node) {
+  JSCallReduction r(node);
+  if (r.InputsMatchTwo(Type::Integral32(), Type::Integral32())) {
+    // Math.imul(a:int32, b:int32) -> Int32Mul(a, b)
+    Node* value = graph()->NewNode(machine()->Int32Mul(), r.left(), r.right());
+    return Replace(value);
+  }
+  return NoChange();
+}
+
+
+Reduction JSBuiltinReducer::Reduce(Node* node) {
+  JSCallReduction r(node);
+
+  // Dispatch according to the BuiltinFunctionId if present.
+  if (!r.HasBuiltinFunctionId()) return NoChange();
+  switch (r.GetBuiltinFunctionId()) {
+    case kMathSqrt:
+      return ReplaceWithPureReduction(node, ReduceMathSqrt(node));
+    case kMathMax:
+      return ReplaceWithPureReduction(node, ReduceMathMax(node));
+    case kMathImul:
+      return ReplaceWithPureReduction(node, ReduceMathImul(node));
+    default:
+      break;
+  }
+  return NoChange();
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/js-builtin-reducer.h b/src/compiler/js-builtin-reducer.h
new file mode 100644
index 0000000..13927f6
--- /dev/null
+++ b/src/compiler/js-builtin-reducer.h
@@ -0,0 +1,45 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_JS_BUILTIN_REDUCER_H_
+#define V8_COMPILER_JS_BUILTIN_REDUCER_H_
+
+#include "src/compiler/graph-reducer.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/simplified-operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class JSBuiltinReducer FINAL : public Reducer {
+ public:
+  explicit JSBuiltinReducer(JSGraph* jsgraph)
+      : jsgraph_(jsgraph), simplified_(jsgraph->zone()) {}
+  virtual ~JSBuiltinReducer() {}
+
+  virtual Reduction Reduce(Node* node) OVERRIDE;
+
+ private:
+  JSGraph* jsgraph() const { return jsgraph_; }
+  Graph* graph() const { return jsgraph_->graph(); }
+  CommonOperatorBuilder* common() const { return jsgraph_->common(); }
+  MachineOperatorBuilder* machine() const { return jsgraph_->machine(); }
+  SimplifiedOperatorBuilder* simplified() { return &simplified_; }
+
+  Reduction ReduceMathSqrt(Node* node);
+  Reduction ReduceMathMax(Node* node);
+  Reduction ReduceMathImul(Node* node);
+
+  JSGraph* jsgraph_;
+  SimplifiedOperatorBuilder simplified_;
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_JS_BUILTIN_REDUCER_H_
diff --git a/src/compiler/js-context-specialization.cc b/src/compiler/js-context-specialization.cc
new file mode 100644
index 0000000..cd8932b
--- /dev/null
+++ b/src/compiler/js-context-specialization.cc
@@ -0,0 +1,141 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/graph-inl.h"
+#include "src/compiler/js-context-specialization.h"
+#include "src/compiler/js-operator.h"
+#include "src/compiler/node-aux-data-inl.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class ContextSpecializationVisitor : public NullNodeVisitor {
+ public:
+  explicit ContextSpecializationVisitor(JSContextSpecializer* spec)
+      : spec_(spec) {}
+
+  GenericGraphVisit::Control Post(Node* node) {
+    switch (node->opcode()) {
+      case IrOpcode::kJSLoadContext: {
+        Reduction r = spec_->ReduceJSLoadContext(node);
+        if (r.Changed() && r.replacement() != node) {
+          NodeProperties::ReplaceWithValue(node, r.replacement());
+          node->RemoveAllInputs();
+        }
+        break;
+      }
+      case IrOpcode::kJSStoreContext: {
+        Reduction r = spec_->ReduceJSStoreContext(node);
+        if (r.Changed() && r.replacement() != node) {
+          NodeProperties::ReplaceWithValue(node, r.replacement());
+          node->RemoveAllInputs();
+        }
+        break;
+      }
+      default:
+        break;
+    }
+    return GenericGraphVisit::CONTINUE;
+  }
+
+ private:
+  JSContextSpecializer* spec_;
+};
+
+
+void JSContextSpecializer::SpecializeToContext() {
+  NodeProperties::ReplaceWithValue(context_,
+                                   jsgraph_->Constant(info_->context()));
+
+  ContextSpecializationVisitor visitor(this);
+  jsgraph_->graph()->VisitNodeInputsFromEnd(&visitor);
+}
+
+
+Reduction JSContextSpecializer::ReduceJSLoadContext(Node* node) {
+  DCHECK_EQ(IrOpcode::kJSLoadContext, node->opcode());
+
+  HeapObjectMatcher<Context> m(NodeProperties::GetValueInput(node, 0));
+  // If the context is not constant, no reduction can occur.
+  if (!m.HasValue()) {
+    return Reducer::NoChange();
+  }
+
+  ContextAccess access = OpParameter<ContextAccess>(node);
+
+  // Find the right parent context.
+  Context* context = *m.Value().handle();
+  for (int i = access.depth(); i > 0; --i) {
+    context = context->previous();
+  }
+
+  // If the access itself is mutable, only fold-in the parent.
+  if (!access.immutable()) {
+    // The access does not have to look up a parent, nothing to fold.
+    if (access.depth() == 0) {
+      return Reducer::NoChange();
+    }
+    const Operator* op = jsgraph_->javascript()->LoadContext(
+        0, access.index(), access.immutable());
+    node->set_op(op);
+    Handle<Object> context_handle = Handle<Object>(context, info_->isolate());
+    node->ReplaceInput(0, jsgraph_->Constant(context_handle));
+    return Reducer::Changed(node);
+  }
+  Handle<Object> value =
+      Handle<Object>(context->get(access.index()), info_->isolate());
+
+  // Even though the context slot is immutable, the context might have escaped
+  // before the function to which it belongs has initialized the slot.
+  // We must be conservative and check if the value in the slot is currently the
+  // hole or undefined. If it is neither of these, then it must be initialized.
+  if (value->IsUndefined() || value->IsTheHole()) {
+    return Reducer::NoChange();
+  }
+
+  // Success. The context load can be replaced with the constant.
+  // TODO(titzer): record the specialization for sharing code across multiple
+  // contexts that have the same value in the corresponding context slot.
+  return Reducer::Replace(jsgraph_->Constant(value));
+}
+
+
+Reduction JSContextSpecializer::ReduceJSStoreContext(Node* node) {
+  DCHECK_EQ(IrOpcode::kJSStoreContext, node->opcode());
+
+  HeapObjectMatcher<Context> m(NodeProperties::GetValueInput(node, 0));
+  // If the context is not constant, no reduction can occur.
+  if (!m.HasValue()) {
+    return Reducer::NoChange();
+  }
+
+  ContextAccess access = OpParameter<ContextAccess>(node);
+
+  // The access does not have to look up a parent, nothing to fold.
+  if (access.depth() == 0) {
+    return Reducer::NoChange();
+  }
+
+  // Find the right parent context.
+  Context* context = *m.Value().handle();
+  for (int i = access.depth(); i > 0; --i) {
+    context = context->previous();
+  }
+
+  const Operator* op = jsgraph_->javascript()->StoreContext(0, access.index());
+  node->set_op(op);
+  Handle<Object> new_context_handle = Handle<Object>(context, info_->isolate());
+  node->ReplaceInput(0, jsgraph_->Constant(new_context_handle));
+
+  return Reducer::Changed(node);
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/js-context-specialization.h b/src/compiler/js-context-specialization.h
new file mode 100644
index 0000000..b8b50ed
--- /dev/null
+++ b/src/compiler/js-context-specialization.h
@@ -0,0 +1,37 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_JS_CONTEXT_SPECIALIZATION_H_
+#define V8_COMPILER_JS_CONTEXT_SPECIALIZATION_H_
+
+#include "src/compiler/graph-reducer.h"
+#include "src/compiler/js-graph.h"
+#include "src/contexts.h"
+#include "src/v8.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Specializes a given JSGraph to a given context, potentially constant folding
+// some {LoadContext} nodes or strength reducing some {StoreContext} nodes.
+class JSContextSpecializer {
+ public:
+  JSContextSpecializer(CompilationInfo* info, JSGraph* jsgraph, Node* context)
+      : info_(info), jsgraph_(jsgraph), context_(context) {}
+
+  void SpecializeToContext();
+  Reduction ReduceJSLoadContext(Node* node);
+  Reduction ReduceJSStoreContext(Node* node);
+
+ private:
+  CompilationInfo* info_;
+  JSGraph* jsgraph_;
+  Node* context_;
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_JS_CONTEXT_SPECIALIZATION_H_
diff --git a/src/compiler/js-generic-lowering.cc b/src/compiler/js-generic-lowering.cc
new file mode 100644
index 0000000..300604e
--- /dev/null
+++ b/src/compiler/js-generic-lowering.cc
@@ -0,0 +1,403 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/code-factory.h"
+#include "src/code-stubs.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph-inl.h"
+#include "src/compiler/js-generic-lowering.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node-aux-data-inl.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/unique.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+JSGenericLowering::JSGenericLowering(CompilationInfo* info, JSGraph* jsgraph)
+    : info_(info),
+      jsgraph_(jsgraph),
+      linkage_(new (jsgraph->zone()) Linkage(info)) {}
+
+
+void JSGenericLowering::PatchOperator(Node* node, const Operator* op) {
+  node->set_op(op);
+}
+
+
+void JSGenericLowering::PatchInsertInput(Node* node, int index, Node* input) {
+  node->InsertInput(zone(), index, input);
+}
+
+
+Node* JSGenericLowering::SmiConstant(int32_t immediate) {
+  return jsgraph()->SmiConstant(immediate);
+}
+
+
+Node* JSGenericLowering::Int32Constant(int immediate) {
+  return jsgraph()->Int32Constant(immediate);
+}
+
+
+Node* JSGenericLowering::CodeConstant(Handle<Code> code) {
+  return jsgraph()->HeapConstant(code);
+}
+
+
+Node* JSGenericLowering::FunctionConstant(Handle<JSFunction> function) {
+  return jsgraph()->HeapConstant(function);
+}
+
+
+Node* JSGenericLowering::ExternalConstant(ExternalReference ref) {
+  return jsgraph()->ExternalConstant(ref);
+}
+
+
+Reduction JSGenericLowering::Reduce(Node* node) {
+  switch (node->opcode()) {
+#define DECLARE_CASE(x) \
+  case IrOpcode::k##x:  \
+    Lower##x(node);     \
+    break;
+    DECLARE_CASE(Branch)
+    JS_OP_LIST(DECLARE_CASE)
+#undef DECLARE_CASE
+    default:
+      // Nothing to see.
+      return NoChange();
+  }
+  return Changed(node);
+}
+
+
+#define REPLACE_BINARY_OP_IC_CALL(op, token)                             \
+  void JSGenericLowering::Lower##op(Node* node) {                        \
+    ReplaceWithStubCall(node, CodeFactory::BinaryOpIC(isolate(), token), \
+                        CallDescriptor::kPatchableCallSiteWithNop);      \
+  }
+REPLACE_BINARY_OP_IC_CALL(JSBitwiseOr, Token::BIT_OR)
+REPLACE_BINARY_OP_IC_CALL(JSBitwiseXor, Token::BIT_XOR)
+REPLACE_BINARY_OP_IC_CALL(JSBitwiseAnd, Token::BIT_AND)
+REPLACE_BINARY_OP_IC_CALL(JSShiftLeft, Token::SHL)
+REPLACE_BINARY_OP_IC_CALL(JSShiftRight, Token::SAR)
+REPLACE_BINARY_OP_IC_CALL(JSShiftRightLogical, Token::SHR)
+REPLACE_BINARY_OP_IC_CALL(JSAdd, Token::ADD)
+REPLACE_BINARY_OP_IC_CALL(JSSubtract, Token::SUB)
+REPLACE_BINARY_OP_IC_CALL(JSMultiply, Token::MUL)
+REPLACE_BINARY_OP_IC_CALL(JSDivide, Token::DIV)
+REPLACE_BINARY_OP_IC_CALL(JSModulus, Token::MOD)
+#undef REPLACE_BINARY_OP_IC_CALL
+
+
+#define REPLACE_COMPARE_IC_CALL(op, token, pure)  \
+  void JSGenericLowering::Lower##op(Node* node) { \
+    ReplaceWithCompareIC(node, token, pure);      \
+  }
+REPLACE_COMPARE_IC_CALL(JSEqual, Token::EQ, false)
+REPLACE_COMPARE_IC_CALL(JSNotEqual, Token::NE, false)
+REPLACE_COMPARE_IC_CALL(JSStrictEqual, Token::EQ_STRICT, true)
+REPLACE_COMPARE_IC_CALL(JSStrictNotEqual, Token::NE_STRICT, true)
+REPLACE_COMPARE_IC_CALL(JSLessThan, Token::LT, false)
+REPLACE_COMPARE_IC_CALL(JSGreaterThan, Token::GT, false)
+REPLACE_COMPARE_IC_CALL(JSLessThanOrEqual, Token::LTE, false)
+REPLACE_COMPARE_IC_CALL(JSGreaterThanOrEqual, Token::GTE, false)
+#undef REPLACE_COMPARE_IC_CALL
+
+
+#define REPLACE_RUNTIME_CALL(op, fun)             \
+  void JSGenericLowering::Lower##op(Node* node) { \
+    ReplaceWithRuntimeCall(node, fun);            \
+  }
+REPLACE_RUNTIME_CALL(JSTypeOf, Runtime::kTypeof)
+REPLACE_RUNTIME_CALL(JSCreate, Runtime::kAbort)
+REPLACE_RUNTIME_CALL(JSCreateFunctionContext, Runtime::kNewFunctionContext)
+REPLACE_RUNTIME_CALL(JSCreateCatchContext, Runtime::kPushCatchContext)
+REPLACE_RUNTIME_CALL(JSCreateWithContext, Runtime::kPushWithContext)
+REPLACE_RUNTIME_CALL(JSCreateBlockContext, Runtime::kPushBlockContext)
+REPLACE_RUNTIME_CALL(JSCreateModuleContext, Runtime::kPushModuleContext)
+REPLACE_RUNTIME_CALL(JSCreateGlobalContext, Runtime::kAbort)
+#undef REPLACE_RUNTIME
+
+
+#define REPLACE_UNIMPLEMENTED(op) \
+  void JSGenericLowering::Lower##op(Node* node) { UNIMPLEMENTED(); }
+REPLACE_UNIMPLEMENTED(JSToName)
+REPLACE_UNIMPLEMENTED(JSYield)
+REPLACE_UNIMPLEMENTED(JSDebugger)
+#undef REPLACE_UNIMPLEMENTED
+
+
+static CallDescriptor::Flags FlagsForNode(Node* node) {
+  CallDescriptor::Flags result = CallDescriptor::kNoFlags;
+  if (OperatorProperties::HasFrameStateInput(node->op())) {
+    result |= CallDescriptor::kNeedsFrameState;
+  }
+  return result;
+}
+
+
+void JSGenericLowering::ReplaceWithCompareIC(Node* node, Token::Value token,
+                                             bool pure) {
+  Callable callable = CodeFactory::CompareIC(isolate(), token);
+  bool has_frame_state = OperatorProperties::HasFrameStateInput(node->op());
+  CallDescriptor* desc_compare = linkage()->GetStubCallDescriptor(
+      callable.descriptor(), 0,
+      CallDescriptor::kPatchableCallSiteWithNop | FlagsForNode(node));
+  NodeVector inputs(zone());
+  inputs.reserve(node->InputCount() + 1);
+  inputs.push_back(CodeConstant(callable.code()));
+  inputs.push_back(NodeProperties::GetValueInput(node, 0));
+  inputs.push_back(NodeProperties::GetValueInput(node, 1));
+  inputs.push_back(NodeProperties::GetContextInput(node));
+  if (pure) {
+    // A pure (strict) comparison doesn't have an effect, control or frame
+    // state.  But for the graph, we need to add control and effect inputs.
+    DCHECK(!has_frame_state);
+    inputs.push_back(graph()->start());
+    inputs.push_back(graph()->start());
+  } else {
+    DCHECK(has_frame_state == FLAG_turbo_deoptimization);
+    if (FLAG_turbo_deoptimization) {
+      inputs.push_back(NodeProperties::GetFrameStateInput(node));
+    }
+    inputs.push_back(NodeProperties::GetEffectInput(node));
+    inputs.push_back(NodeProperties::GetControlInput(node));
+  }
+  Node* compare =
+      graph()->NewNode(common()->Call(desc_compare),
+                       static_cast<int>(inputs.size()), &inputs.front());
+
+  node->ReplaceInput(0, compare);
+  node->ReplaceInput(1, SmiConstant(token));
+
+  if (has_frame_state) {
+    // Remove the frame state from inputs.
+    node->RemoveInput(NodeProperties::FirstFrameStateIndex(node));
+  }
+
+  ReplaceWithRuntimeCall(node, Runtime::kBooleanize);
+}
+
+
+void JSGenericLowering::ReplaceWithStubCall(Node* node, Callable callable,
+                                            CallDescriptor::Flags flags) {
+  CallDescriptor* desc = linkage()->GetStubCallDescriptor(
+      callable.descriptor(), 0, flags | FlagsForNode(node));
+  Node* stub_code = CodeConstant(callable.code());
+  PatchInsertInput(node, 0, stub_code);
+  PatchOperator(node, common()->Call(desc));
+}
+
+
+void JSGenericLowering::ReplaceWithBuiltinCall(Node* node,
+                                               Builtins::JavaScript id,
+                                               int nargs) {
+  Callable callable =
+      CodeFactory::CallFunction(isolate(), nargs - 1, NO_CALL_FUNCTION_FLAGS);
+  CallDescriptor* desc =
+      linkage()->GetStubCallDescriptor(callable.descriptor(), nargs);
+  // TODO(mstarzinger): Accessing the builtins object this way prevents sharing
+  // of code across native contexts. Fix this by loading from given context.
+  Handle<JSFunction> function(
+      JSFunction::cast(info()->context()->builtins()->javascript_builtin(id)));
+  Node* stub_code = CodeConstant(callable.code());
+  Node* function_node = FunctionConstant(function);
+  PatchInsertInput(node, 0, stub_code);
+  PatchInsertInput(node, 1, function_node);
+  PatchOperator(node, common()->Call(desc));
+}
+
+
+void JSGenericLowering::ReplaceWithRuntimeCall(Node* node,
+                                               Runtime::FunctionId f,
+                                               int nargs_override) {
+  Operator::Properties properties = node->op()->properties();
+  const Runtime::Function* fun = Runtime::FunctionForId(f);
+  int nargs = (nargs_override < 0) ? fun->nargs : nargs_override;
+  CallDescriptor* desc =
+      linkage()->GetRuntimeCallDescriptor(f, nargs, properties);
+  Node* ref = ExternalConstant(ExternalReference(f, isolate()));
+  Node* arity = Int32Constant(nargs);
+  if (!centrystub_constant_.is_set()) {
+    centrystub_constant_.set(CodeConstant(CEntryStub(isolate(), 1).GetCode()));
+  }
+  PatchInsertInput(node, 0, centrystub_constant_.get());
+  PatchInsertInput(node, nargs + 1, ref);
+  PatchInsertInput(node, nargs + 2, arity);
+  PatchOperator(node, common()->Call(desc));
+}
+
+
+void JSGenericLowering::LowerBranch(Node* node) {
+  if (!info()->is_typing_enabled()) {
+    // TODO(mstarzinger): If typing is enabled then simplified lowering will
+    // have inserted the correct ChangeBoolToBit, otherwise we need to perform
+    // poor-man's representation inference here and insert manual change.
+    Node* test = graph()->NewNode(machine()->WordEqual(), node->InputAt(0),
+                                  jsgraph()->TrueConstant());
+    node->ReplaceInput(0, test);
+  }
+}
+
+
+void JSGenericLowering::LowerJSUnaryNot(Node* node) {
+  Callable callable = CodeFactory::ToBoolean(
+      isolate(), ToBooleanStub::RESULT_AS_INVERSE_ODDBALL);
+  ReplaceWithStubCall(node, callable, CallDescriptor::kPatchableCallSite);
+}
+
+
+void JSGenericLowering::LowerJSToBoolean(Node* node) {
+  Callable callable =
+      CodeFactory::ToBoolean(isolate(), ToBooleanStub::RESULT_AS_ODDBALL);
+  ReplaceWithStubCall(node, callable, CallDescriptor::kPatchableCallSite);
+}
+
+
+void JSGenericLowering::LowerJSToNumber(Node* node) {
+  Callable callable = CodeFactory::ToNumber(isolate());
+  ReplaceWithStubCall(node, callable, CallDescriptor::kNoFlags);
+}
+
+
+void JSGenericLowering::LowerJSToString(Node* node) {
+  ReplaceWithBuiltinCall(node, Builtins::TO_STRING, 1);
+}
+
+
+void JSGenericLowering::LowerJSToObject(Node* node) {
+  ReplaceWithBuiltinCall(node, Builtins::TO_OBJECT, 1);
+}
+
+
+void JSGenericLowering::LowerJSLoadProperty(Node* node) {
+  Callable callable = CodeFactory::KeyedLoadIC(isolate());
+  ReplaceWithStubCall(node, callable, CallDescriptor::kPatchableCallSite);
+}
+
+
+void JSGenericLowering::LowerJSLoadNamed(Node* node) {
+  LoadNamedParameters p = OpParameter<LoadNamedParameters>(node);
+  Callable callable = CodeFactory::LoadIC(isolate(), p.contextual_mode);
+  PatchInsertInput(node, 1, jsgraph()->HeapConstant(p.name));
+  ReplaceWithStubCall(node, callable, CallDescriptor::kPatchableCallSite);
+}
+
+
+void JSGenericLowering::LowerJSStoreProperty(Node* node) {
+  StrictMode strict_mode = OpParameter<StrictMode>(node);
+  Callable callable = CodeFactory::KeyedStoreIC(isolate(), strict_mode);
+  ReplaceWithStubCall(node, callable, CallDescriptor::kPatchableCallSite);
+}
+
+
+void JSGenericLowering::LowerJSStoreNamed(Node* node) {
+  StoreNamedParameters params = OpParameter<StoreNamedParameters>(node);
+  Callable callable = CodeFactory::StoreIC(isolate(), params.strict_mode);
+  PatchInsertInput(node, 1, jsgraph()->HeapConstant(params.name));
+  ReplaceWithStubCall(node, callable, CallDescriptor::kPatchableCallSite);
+}
+
+
+void JSGenericLowering::LowerJSDeleteProperty(Node* node) {
+  StrictMode strict_mode = OpParameter<StrictMode>(node);
+  PatchInsertInput(node, 2, SmiConstant(strict_mode));
+  ReplaceWithBuiltinCall(node, Builtins::DELETE, 3);
+}
+
+
+void JSGenericLowering::LowerJSHasProperty(Node* node) {
+  ReplaceWithBuiltinCall(node, Builtins::IN, 2);
+}
+
+
+void JSGenericLowering::LowerJSInstanceOf(Node* node) {
+  InstanceofStub::Flags flags = static_cast<InstanceofStub::Flags>(
+      InstanceofStub::kReturnTrueFalseObject |
+      InstanceofStub::kArgsInRegisters);
+  InstanceofStub stub(isolate(), flags);
+  CallInterfaceDescriptor d = stub.GetCallInterfaceDescriptor();
+  CallDescriptor* desc = linkage()->GetStubCallDescriptor(d, 0);
+  Node* stub_code = CodeConstant(stub.GetCode());
+  PatchInsertInput(node, 0, stub_code);
+  PatchOperator(node, common()->Call(desc));
+}
+
+
+void JSGenericLowering::LowerJSLoadContext(Node* node) {
+  ContextAccess access = OpParameter<ContextAccess>(node);
+  // TODO(mstarzinger): Use simplified operators instead of machine operators
+  // here so that load/store optimization can be applied afterwards.
+  for (int i = 0; i < access.depth(); ++i) {
+    node->ReplaceInput(
+        0, graph()->NewNode(
+               machine()->Load(kMachAnyTagged),
+               NodeProperties::GetValueInput(node, 0),
+               Int32Constant(Context::SlotOffset(Context::PREVIOUS_INDEX)),
+               NodeProperties::GetEffectInput(node)));
+  }
+  node->ReplaceInput(1, Int32Constant(Context::SlotOffset(access.index())));
+  PatchOperator(node, machine()->Load(kMachAnyTagged));
+}
+
+
+void JSGenericLowering::LowerJSStoreContext(Node* node) {
+  ContextAccess access = OpParameter<ContextAccess>(node);
+  // TODO(mstarzinger): Use simplified operators instead of machine operators
+  // here so that load/store optimization can be applied afterwards.
+  for (int i = 0; i < access.depth(); ++i) {
+    node->ReplaceInput(
+        0, graph()->NewNode(
+               machine()->Load(kMachAnyTagged),
+               NodeProperties::GetValueInput(node, 0),
+               Int32Constant(Context::SlotOffset(Context::PREVIOUS_INDEX)),
+               NodeProperties::GetEffectInput(node)));
+  }
+  node->ReplaceInput(2, NodeProperties::GetValueInput(node, 1));
+  node->ReplaceInput(1, Int32Constant(Context::SlotOffset(access.index())));
+  PatchOperator(node, machine()->Store(StoreRepresentation(kMachAnyTagged,
+                                                           kFullWriteBarrier)));
+}
+
+
+void JSGenericLowering::LowerJSCallConstruct(Node* node) {
+  int arity = OpParameter<int>(node);
+  CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
+  CallInterfaceDescriptor d = stub.GetCallInterfaceDescriptor();
+  CallDescriptor* desc =
+      linkage()->GetStubCallDescriptor(d, arity, FlagsForNode(node));
+  Node* stub_code = CodeConstant(stub.GetCode());
+  Node* construct = NodeProperties::GetValueInput(node, 0);
+  PatchInsertInput(node, 0, stub_code);
+  PatchInsertInput(node, 1, Int32Constant(arity - 1));
+  PatchInsertInput(node, 2, construct);
+  PatchInsertInput(node, 3, jsgraph()->UndefinedConstant());
+  PatchOperator(node, common()->Call(desc));
+}
+
+
+void JSGenericLowering::LowerJSCallFunction(Node* node) {
+  CallParameters p = OpParameter<CallParameters>(node);
+  CallFunctionStub stub(isolate(), p.arity - 2, p.flags);
+  CallInterfaceDescriptor d = stub.GetCallInterfaceDescriptor();
+  CallDescriptor* desc =
+      linkage()->GetStubCallDescriptor(d, p.arity - 1, FlagsForNode(node));
+  Node* stub_code = CodeConstant(stub.GetCode());
+  PatchInsertInput(node, 0, stub_code);
+  PatchOperator(node, common()->Call(desc));
+}
+
+
+void JSGenericLowering::LowerJSCallRuntime(Node* node) {
+  Runtime::FunctionId function = OpParameter<Runtime::FunctionId>(node);
+  int arity = OperatorProperties::GetValueInputCount(node->op());
+  ReplaceWithRuntimeCall(node, function, arity);
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/js-generic-lowering.h b/src/compiler/js-generic-lowering.h
new file mode 100644
index 0000000..400f806
--- /dev/null
+++ b/src/compiler/js-generic-lowering.h
@@ -0,0 +1,77 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_JS_GENERIC_LOWERING_H_
+#define V8_COMPILER_JS_GENERIC_LOWERING_H_
+
+#include "src/v8.h"
+
+#include "src/allocation.h"
+#include "src/code-factory.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/graph-reducer.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/opcodes.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Forward declarations.
+class CommonOperatorBuilder;
+class MachineOperatorBuilder;
+class Linkage;
+
+// Lowers JS-level operators to runtime and IC calls in the "generic" case.
+class JSGenericLowering : public Reducer {
+ public:
+  JSGenericLowering(CompilationInfo* info, JSGraph* graph);
+  virtual ~JSGenericLowering() {}
+
+  virtual Reduction Reduce(Node* node);
+
+ protected:
+#define DECLARE_LOWER(x) void Lower##x(Node* node);
+  // Dispatched depending on opcode.
+  ALL_OP_LIST(DECLARE_LOWER)
+#undef DECLARE_LOWER
+
+  // Helpers to create new constant nodes.
+  Node* SmiConstant(int immediate);
+  Node* Int32Constant(int immediate);
+  Node* CodeConstant(Handle<Code> code);
+  Node* FunctionConstant(Handle<JSFunction> function);
+  Node* ExternalConstant(ExternalReference ref);
+
+  // Helpers to patch existing nodes in the graph.
+  void PatchOperator(Node* node, const Operator* new_op);
+  void PatchInsertInput(Node* node, int index, Node* input);
+
+  // Helpers to replace existing nodes with a generic call.
+  void ReplaceWithCompareIC(Node* node, Token::Value token, bool pure);
+  void ReplaceWithStubCall(Node* node, Callable c, CallDescriptor::Flags flags);
+  void ReplaceWithBuiltinCall(Node* node, Builtins::JavaScript id, int args);
+  void ReplaceWithRuntimeCall(Node* node, Runtime::FunctionId f, int args = -1);
+
+  Zone* zone() const { return graph()->zone(); }
+  Isolate* isolate() const { return zone()->isolate(); }
+  JSGraph* jsgraph() const { return jsgraph_; }
+  Graph* graph() const { return jsgraph()->graph(); }
+  Linkage* linkage() const { return linkage_; }
+  CompilationInfo* info() const { return info_; }
+  CommonOperatorBuilder* common() const { return jsgraph()->common(); }
+  MachineOperatorBuilder* machine() const { return jsgraph()->machine(); }
+
+ private:
+  CompilationInfo* info_;
+  JSGraph* jsgraph_;
+  Linkage* linkage_;
+  SetOncePointer<Node> centrystub_constant_;
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_JS_GENERIC_LOWERING_H_
diff --git a/src/compiler/js-graph.cc b/src/compiler/js-graph.cc
new file mode 100644
index 0000000..1309531
--- /dev/null
+++ b/src/compiler/js-graph.cc
@@ -0,0 +1,186 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/js-graph.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/typer.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+Node* JSGraph::ImmovableHeapConstant(Handle<Object> object) {
+  Unique<Object> unique = Unique<Object>::CreateImmovable(object);
+  return NewNode(common()->HeapConstant(unique));
+}
+
+
+Node* JSGraph::NewNode(const Operator* op) {
+  Node* node = graph()->NewNode(op);
+  typer_->Init(node);
+  return node;
+}
+
+
+Node* JSGraph::CEntryStubConstant() {
+  if (!c_entry_stub_constant_.is_set()) {
+    c_entry_stub_constant_.set(
+        ImmovableHeapConstant(CEntryStub(isolate(), 1).GetCode()));
+  }
+  return c_entry_stub_constant_.get();
+}
+
+
+Node* JSGraph::UndefinedConstant() {
+  if (!undefined_constant_.is_set()) {
+    undefined_constant_.set(
+        ImmovableHeapConstant(factory()->undefined_value()));
+  }
+  return undefined_constant_.get();
+}
+
+
+Node* JSGraph::TheHoleConstant() {
+  if (!the_hole_constant_.is_set()) {
+    the_hole_constant_.set(ImmovableHeapConstant(factory()->the_hole_value()));
+  }
+  return the_hole_constant_.get();
+}
+
+
+Node* JSGraph::TrueConstant() {
+  if (!true_constant_.is_set()) {
+    true_constant_.set(ImmovableHeapConstant(factory()->true_value()));
+  }
+  return true_constant_.get();
+}
+
+
+Node* JSGraph::FalseConstant() {
+  if (!false_constant_.is_set()) {
+    false_constant_.set(ImmovableHeapConstant(factory()->false_value()));
+  }
+  return false_constant_.get();
+}
+
+
+Node* JSGraph::NullConstant() {
+  if (!null_constant_.is_set()) {
+    null_constant_.set(ImmovableHeapConstant(factory()->null_value()));
+  }
+  return null_constant_.get();
+}
+
+
+Node* JSGraph::ZeroConstant() {
+  if (!zero_constant_.is_set()) zero_constant_.set(NumberConstant(0.0));
+  return zero_constant_.get();
+}
+
+
+Node* JSGraph::OneConstant() {
+  if (!one_constant_.is_set()) one_constant_.set(NumberConstant(1.0));
+  return one_constant_.get();
+}
+
+
+Node* JSGraph::NaNConstant() {
+  if (!nan_constant_.is_set()) {
+    nan_constant_.set(NumberConstant(base::OS::nan_value()));
+  }
+  return nan_constant_.get();
+}
+
+
+Node* JSGraph::HeapConstant(Unique<Object> value) {
+  // TODO(turbofan): canonicalize heap constants using Unique<T>
+  return NewNode(common()->HeapConstant(value));
+}
+
+
+Node* JSGraph::HeapConstant(Handle<Object> value) {
+  // TODO(titzer): We could also match against the addresses of immortable
+  // immovables here, even without access to the heap, thus always
+  // canonicalizing references to them.
+  // return HeapConstant(Unique<Object>::CreateUninitialized(value));
+  // TODO(turbofan): This is a work-around to make Unique::HashCode() work for
+  // value numbering. We need some sane way to compute a unique hash code for
+  // arbitrary handles here.
+  Unique<Object> unique(reinterpret_cast<Address>(*value.location()), value);
+  return HeapConstant(unique);
+}
+
+
+Node* JSGraph::Constant(Handle<Object> value) {
+  // Dereference the handle to determine if a number constant or other
+  // canonicalized node can be used.
+  if (value->IsNumber()) {
+    return Constant(value->Number());
+  } else if (value->IsUndefined()) {
+    return UndefinedConstant();
+  } else if (value->IsTrue()) {
+    return TrueConstant();
+  } else if (value->IsFalse()) {
+    return FalseConstant();
+  } else if (value->IsNull()) {
+    return NullConstant();
+  } else if (value->IsTheHole()) {
+    return TheHoleConstant();
+  } else {
+    return HeapConstant(value);
+  }
+}
+
+
+Node* JSGraph::Constant(double value) {
+  if (bit_cast<int64_t>(value) == bit_cast<int64_t>(0.0)) return ZeroConstant();
+  if (bit_cast<int64_t>(value) == bit_cast<int64_t>(1.0)) return OneConstant();
+  return NumberConstant(value);
+}
+
+
+Node* JSGraph::Constant(int32_t value) {
+  if (value == 0) return ZeroConstant();
+  if (value == 1) return OneConstant();
+  return NumberConstant(value);
+}
+
+
+Node* JSGraph::Int32Constant(int32_t value) {
+  Node** loc = cache_.FindInt32Constant(value);
+  if (*loc == NULL) {
+    *loc = NewNode(common()->Int32Constant(value));
+  }
+  return *loc;
+}
+
+
+Node* JSGraph::NumberConstant(double value) {
+  Node** loc = cache_.FindNumberConstant(value);
+  if (*loc == NULL) {
+    *loc = NewNode(common()->NumberConstant(value));
+  }
+  return *loc;
+}
+
+
+Node* JSGraph::Float64Constant(double value) {
+  Node** loc = cache_.FindFloat64Constant(value);
+  if (*loc == NULL) {
+    *loc = NewNode(common()->Float64Constant(value));
+  }
+  return *loc;
+}
+
+
+Node* JSGraph::ExternalConstant(ExternalReference reference) {
+  Node** loc = cache_.FindExternalConstant(reference);
+  if (*loc == NULL) {
+    *loc = NewNode(common()->ExternalConstant(reference));
+  }
+  return *loc;
+}
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/js-graph.h b/src/compiler/js-graph.h
new file mode 100644
index 0000000..2b2dfd1
--- /dev/null
+++ b/src/compiler/js-graph.h
@@ -0,0 +1,120 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_JS_GRAPH_H_
+#define V8_COMPILER_JS_GRAPH_H_
+
+#include "src/compiler/common-node-cache.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/js-operator.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node-properties.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class Typer;
+
+// Implements a facade on a Graph, enhancing the graph with JS-specific
+// notions, including a builder for for JS* operators, canonicalized global
+// constants, and various helper methods.
+class JSGraph : public ZoneObject {
+ public:
+  JSGraph(Graph* graph, CommonOperatorBuilder* common,
+          JSOperatorBuilder* javascript, Typer* typer,
+          MachineOperatorBuilder* machine)
+      : graph_(graph),
+        common_(common),
+        javascript_(javascript),
+        typer_(typer),
+        machine_(machine),
+        cache_(zone()) {}
+
+  // Canonicalized global constants.
+  Node* CEntryStubConstant();
+  Node* UndefinedConstant();
+  Node* TheHoleConstant();
+  Node* TrueConstant();
+  Node* FalseConstant();
+  Node* NullConstant();
+  Node* ZeroConstant();
+  Node* OneConstant();
+  Node* NaNConstant();
+
+  // Creates a HeapConstant node, possibly canonicalized, without inspecting the
+  // object.
+  Node* HeapConstant(Unique<Object> value);
+
+  // Creates a HeapConstant node, possibly canonicalized, and may access the
+  // heap to inspect the object.
+  Node* HeapConstant(Handle<Object> value);
+
+  // Creates a Constant node of the appropriate type for the given object.
+  // Accesses the heap to inspect the object and determine whether one of the
+  // canonicalized globals or a number constant should be returned.
+  Node* Constant(Handle<Object> value);
+
+  // Creates a NumberConstant node, usually canonicalized.
+  Node* Constant(double value);
+
+  // Creates a NumberConstant node, usually canonicalized.
+  Node* Constant(int32_t value);
+
+  // Creates a Int32Constant node, usually canonicalized.
+  Node* Int32Constant(int32_t value);
+  Node* Uint32Constant(uint32_t value) {
+    return Int32Constant(bit_cast<int32_t>(value));
+  }
+
+  // Creates a Float64Constant node, usually canonicalized.
+  Node* Float64Constant(double value);
+
+  // Creates an ExternalConstant node, usually canonicalized.
+  Node* ExternalConstant(ExternalReference ref);
+
+  Node* SmiConstant(int32_t immediate) {
+    DCHECK(Smi::IsValid(immediate));
+    return Constant(immediate);
+  }
+
+  JSOperatorBuilder* javascript() { return javascript_; }
+  CommonOperatorBuilder* common() { return common_; }
+  MachineOperatorBuilder* machine() { return machine_; }
+  Graph* graph() { return graph_; }
+  Zone* zone() { return graph()->zone(); }
+  Isolate* isolate() { return zone()->isolate(); }
+
+ private:
+  Graph* graph_;
+  CommonOperatorBuilder* common_;
+  JSOperatorBuilder* javascript_;
+  Typer* typer_;
+  MachineOperatorBuilder* machine_;
+
+  SetOncePointer<Node> c_entry_stub_constant_;
+  SetOncePointer<Node> undefined_constant_;
+  SetOncePointer<Node> the_hole_constant_;
+  SetOncePointer<Node> true_constant_;
+  SetOncePointer<Node> false_constant_;
+  SetOncePointer<Node> null_constant_;
+  SetOncePointer<Node> zero_constant_;
+  SetOncePointer<Node> one_constant_;
+  SetOncePointer<Node> nan_constant_;
+
+  CommonNodeCache cache_;
+
+  Node* ImmovableHeapConstant(Handle<Object> value);
+  Node* NumberConstant(double value);
+  Node* NewNode(const Operator* op);
+
+  Factory* factory() { return isolate()->factory(); }
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif
diff --git a/src/compiler/js-inlining.cc b/src/compiler/js-inlining.cc
new file mode 100644
index 0000000..af02145
--- /dev/null
+++ b/src/compiler/js-inlining.cc
@@ -0,0 +1,446 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/access-builder.h"
+#include "src/compiler/ast-graph-builder.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/graph-inl.h"
+#include "src/compiler/graph-visualizer.h"
+#include "src/compiler/js-inlining.h"
+#include "src/compiler/js-operator.h"
+#include "src/compiler/node-aux-data-inl.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/simplified-operator.h"
+#include "src/compiler/typer.h"
+#include "src/full-codegen.h"
+#include "src/parser.h"
+#include "src/rewriter.h"
+#include "src/scopes.h"
+
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class InlinerVisitor : public NullNodeVisitor {
+ public:
+  explicit InlinerVisitor(JSInliner* inliner) : inliner_(inliner) {}
+
+  GenericGraphVisit::Control Post(Node* node) {
+    switch (node->opcode()) {
+      case IrOpcode::kJSCallFunction:
+        inliner_->TryInlineCall(node);
+        break;
+      default:
+        break;
+    }
+    return GenericGraphVisit::CONTINUE;
+  }
+
+ private:
+  JSInliner* inliner_;
+};
+
+
+void JSInliner::Inline() {
+  InlinerVisitor visitor(this);
+  jsgraph_->graph()->VisitNodeInputsFromEnd(&visitor);
+}
+
+
+// TODO(sigurds) Find a home for this function and reuse it everywhere (esp. in
+// test cases, where similar code is currently duplicated).
+static void Parse(Handle<JSFunction> function, CompilationInfoWithZone* info) {
+  CHECK(Parser::Parse(info));
+  CHECK(Rewriter::Rewrite(info));
+  CHECK(Scope::Analyze(info));
+  CHECK(Compiler::EnsureDeoptimizationSupport(info));
+}
+
+
+// A facade on a JSFunction's graph to facilitate inlining. It assumes the
+// that the function graph has only one return statement, and provides
+// {UnifyReturn} to convert a function graph to that end.
+class Inlinee {
+ public:
+  Inlinee(Node* start, Node* end) : start_(start), end_(end) {}
+
+  // Returns the last regular control node, that is
+  // the last control node before the end node.
+  Node* end_block() { return NodeProperties::GetControlInput(unique_return()); }
+
+  // Return the effect output of the graph,
+  // that is the effect input of the return statement of the inlinee.
+  Node* effect_output() {
+    return NodeProperties::GetEffectInput(unique_return());
+  }
+  // Return the value output of the graph,
+  // that is the value input of the return statement of the inlinee.
+  Node* value_output() {
+    return NodeProperties::GetValueInput(unique_return(), 0);
+  }
+  // Return the unique return statement of the graph.
+  Node* unique_return() {
+    Node* unique_return = NodeProperties::GetControlInput(end_);
+    DCHECK_EQ(IrOpcode::kReturn, unique_return->opcode());
+    return unique_return;
+  }
+
+  // Counts JSFunction, Receiver, arguments, context but not effect, control.
+  size_t total_parameters() { return start_->op()->OutputCount(); }
+
+  // Counts only formal parameters.
+  size_t formal_parameters() {
+    DCHECK_GE(total_parameters(), 3);
+    return total_parameters() - 3;
+  }
+
+  // Inline this graph at {call}, use {jsgraph} and its zone to create
+  // any new nodes.
+  void InlineAtCall(JSGraph* jsgraph, Node* call);
+
+  // Ensure that only a single return reaches the end node.
+  static void UnifyReturn(JSGraph* jsgraph);
+
+ private:
+  Node* start_;
+  Node* end_;
+};
+
+
+void Inlinee::UnifyReturn(JSGraph* jsgraph) {
+  Graph* graph = jsgraph->graph();
+
+  Node* final_merge = NodeProperties::GetControlInput(graph->end(), 0);
+  if (final_merge->opcode() == IrOpcode::kReturn) {
+    // nothing to do
+    return;
+  }
+  DCHECK_EQ(IrOpcode::kMerge, final_merge->opcode());
+
+  int predecessors =
+      OperatorProperties::GetControlInputCount(final_merge->op());
+
+  const Operator* op_phi = jsgraph->common()->Phi(kMachAnyTagged, predecessors);
+  const Operator* op_ephi = jsgraph->common()->EffectPhi(predecessors);
+
+  NodeVector values(jsgraph->zone());
+  NodeVector effects(jsgraph->zone());
+  // Iterate over all control flow predecessors,
+  // which must be return statements.
+  InputIter iter = final_merge->inputs().begin();
+  while (iter != final_merge->inputs().end()) {
+    Node* input = *iter;
+    switch (input->opcode()) {
+      case IrOpcode::kReturn:
+        values.push_back(NodeProperties::GetValueInput(input, 0));
+        effects.push_back(NodeProperties::GetEffectInput(input));
+        iter.UpdateToAndIncrement(NodeProperties::GetControlInput(input));
+        input->RemoveAllInputs();
+        break;
+      default:
+        UNREACHABLE();
+        ++iter;
+        break;
+    }
+  }
+  values.push_back(final_merge);
+  effects.push_back(final_merge);
+  Node* phi =
+      graph->NewNode(op_phi, static_cast<int>(values.size()), &values.front());
+  Node* ephi = graph->NewNode(op_ephi, static_cast<int>(effects.size()),
+                              &effects.front());
+  Node* new_return =
+      graph->NewNode(jsgraph->common()->Return(), phi, ephi, final_merge);
+  graph->end()->ReplaceInput(0, new_return);
+}
+
+
+class CopyVisitor : public NullNodeVisitor {
+ public:
+  CopyVisitor(Graph* source_graph, Graph* target_graph, Zone* temp_zone)
+      : copies_(source_graph->NodeCount(), NULL, temp_zone),
+        sentinels_(source_graph->NodeCount(), NULL, temp_zone),
+        source_graph_(source_graph),
+        target_graph_(target_graph),
+        temp_zone_(temp_zone),
+        sentinel_op_(IrOpcode::kDead, Operator::kNoProperties, 0, 0,
+                     "sentinel") {}
+
+  GenericGraphVisit::Control Post(Node* original) {
+    NodeVector inputs(temp_zone_);
+    for (InputIter it = original->inputs().begin();
+         it != original->inputs().end(); ++it) {
+      inputs.push_back(GetCopy(*it));
+    }
+
+    // Reuse the operator in the copy. This assumes that op lives in a zone
+    // that lives longer than graph()'s zone.
+    Node* copy =
+        target_graph_->NewNode(original->op(), static_cast<int>(inputs.size()),
+                               (inputs.empty() ? NULL : &inputs.front()));
+    copies_[original->id()] = copy;
+    return GenericGraphVisit::CONTINUE;
+  }
+
+  Node* GetCopy(Node* original) {
+    Node* copy = copies_[original->id()];
+    if (copy == NULL) {
+      copy = GetSentinel(original);
+    }
+    DCHECK_NE(NULL, copy);
+    return copy;
+  }
+
+  void CopyGraph() {
+    source_graph_->VisitNodeInputsFromEnd(this);
+    ReplaceSentinels();
+  }
+
+  const NodeVector& copies() { return copies_; }
+
+ private:
+  void ReplaceSentinels() {
+    for (NodeId id = 0; id < source_graph_->NodeCount(); ++id) {
+      Node* sentinel = sentinels_[id];
+      if (sentinel == NULL) continue;
+      Node* copy = copies_[id];
+      DCHECK_NE(NULL, copy);
+      sentinel->ReplaceUses(copy);
+    }
+  }
+
+  Node* GetSentinel(Node* original) {
+    Node* sentinel = sentinels_[original->id()];
+    if (sentinel == NULL) {
+      sentinel = target_graph_->NewNode(&sentinel_op_);
+    }
+    return sentinel;
+  }
+
+  NodeVector copies_;
+  NodeVector sentinels_;
+  Graph* source_graph_;
+  Graph* target_graph_;
+  Zone* temp_zone_;
+  SimpleOperator sentinel_op_;
+};
+
+
+void Inlinee::InlineAtCall(JSGraph* jsgraph, Node* call) {
+  // The scheduler is smart enough to place our code; we just ensure {control}
+  // becomes the control input of the start of the inlinee.
+  Node* control = NodeProperties::GetControlInput(call);
+
+  // The inlinee uses the context from the JSFunction object. This will
+  // also be the effect dependency for the inlinee as it produces an effect.
+  SimplifiedOperatorBuilder simplified(jsgraph->zone());
+  Node* context = jsgraph->graph()->NewNode(
+      simplified.LoadField(AccessBuilder::ForJSFunctionContext()),
+      NodeProperties::GetValueInput(call, 0),
+      NodeProperties::GetEffectInput(call));
+
+  // Context is last argument.
+  int inlinee_context_index = static_cast<int>(total_parameters()) - 1;
+  // {inliner_inputs} counts JSFunction, Receiver, arguments, but not
+  // context, effect, control.
+  int inliner_inputs = OperatorProperties::GetValueInputCount(call->op());
+  // Iterate over all uses of the start node.
+  UseIter iter = start_->uses().begin();
+  while (iter != start_->uses().end()) {
+    Node* use = *iter;
+    switch (use->opcode()) {
+      case IrOpcode::kParameter: {
+        int index = 1 + OpParameter<int>(use->op());
+        if (index < inliner_inputs && index < inlinee_context_index) {
+          // There is an input from the call, and the index is a value
+          // projection but not the context, so rewire the input.
+          NodeProperties::ReplaceWithValue(*iter, call->InputAt(index));
+        } else if (index == inlinee_context_index) {
+          // This is the context projection, rewire it to the context from the
+          // JSFunction object.
+          NodeProperties::ReplaceWithValue(*iter, context);
+        } else if (index < inlinee_context_index) {
+          // Call has fewer arguments than required, fill with undefined.
+          NodeProperties::ReplaceWithValue(*iter, jsgraph->UndefinedConstant());
+        } else {
+          // We got too many arguments, discard for now.
+          // TODO(sigurds): Fix to treat arguments array correctly.
+        }
+        ++iter;
+        break;
+      }
+      default:
+        if (NodeProperties::IsEffectEdge(iter.edge())) {
+          iter.UpdateToAndIncrement(context);
+        } else if (NodeProperties::IsControlEdge(iter.edge())) {
+          iter.UpdateToAndIncrement(control);
+        } else {
+          UNREACHABLE();
+        }
+        break;
+    }
+  }
+
+  // Iterate over all uses of the call node.
+  iter = call->uses().begin();
+  while (iter != call->uses().end()) {
+    if (NodeProperties::IsEffectEdge(iter.edge())) {
+      iter.UpdateToAndIncrement(effect_output());
+    } else if (NodeProperties::IsControlEdge(iter.edge())) {
+      UNREACHABLE();
+    } else {
+      DCHECK(NodeProperties::IsValueEdge(iter.edge()));
+      iter.UpdateToAndIncrement(value_output());
+    }
+  }
+  call->RemoveAllInputs();
+  DCHECK_EQ(0, call->UseCount());
+  // TODO(sigurds) Remove this once we copy.
+  unique_return()->RemoveAllInputs();
+}
+
+
+// TODO(turbofan) Provide such accessors for every node, possibly even
+// generate them.
+class JSCallFunctionAccessor {
+ public:
+  explicit JSCallFunctionAccessor(Node* call) : call_(call) {
+    DCHECK_EQ(IrOpcode::kJSCallFunction, call->opcode());
+  }
+
+  Node* jsfunction() { return call_->InputAt(0); }
+
+  Node* receiver() { return call_->InputAt(1); }
+
+  Node* formal_argument(size_t index) {
+    DCHECK(index < formal_arguments());
+    return call_->InputAt(static_cast<int>(2 + index));
+  }
+
+  size_t formal_arguments() {
+    // {value_inputs} includes jsfunction and receiver.
+    size_t value_inputs = OperatorProperties::GetValueInputCount(call_->op());
+    DCHECK_GE(call_->InputCount(), 2);
+    return value_inputs - 2;
+  }
+
+  Node* frame_state() { return NodeProperties::GetFrameStateInput(call_); }
+
+ private:
+  Node* call_;
+};
+
+
+void JSInliner::AddClosureToFrameState(Node* frame_state,
+                                       Handle<JSFunction> jsfunction) {
+  FrameStateCallInfo call_info = OpParameter<FrameStateCallInfo>(frame_state);
+  const Operator* op = jsgraph_->common()->FrameState(
+      FrameStateType::JS_FRAME, call_info.bailout_id(),
+      call_info.state_combine(), jsfunction);
+  frame_state->set_op(op);
+}
+
+
+Node* JSInliner::CreateArgumentsAdaptorFrameState(JSCallFunctionAccessor* call,
+                                                  Handle<JSFunction> jsfunction,
+                                                  Zone* temp_zone) {
+  const Operator* op =
+      jsgraph_->common()->FrameState(FrameStateType::ARGUMENTS_ADAPTOR,
+                                     BailoutId(-1), kIgnoreOutput, jsfunction);
+  const Operator* op0 = jsgraph_->common()->StateValues(0);
+  Node* node0 = jsgraph_->graph()->NewNode(op0);
+  NodeVector params(temp_zone);
+  params.push_back(call->receiver());
+  for (size_t argument = 0; argument != call->formal_arguments(); ++argument) {
+    params.push_back(call->formal_argument(argument));
+  }
+  const Operator* op_param =
+      jsgraph_->common()->StateValues(static_cast<int>(params.size()));
+  Node* params_node = jsgraph_->graph()->NewNode(
+      op_param, static_cast<int>(params.size()), &params.front());
+  return jsgraph_->graph()->NewNode(op, params_node, node0, node0,
+                                    jsgraph_->UndefinedConstant(),
+                                    call->frame_state());
+}
+
+
+void JSInliner::TryInlineCall(Node* call_node) {
+  JSCallFunctionAccessor call(call_node);
+
+  HeapObjectMatcher<JSFunction> match(call.jsfunction());
+  if (!match.HasValue()) {
+    return;
+  }
+
+  Handle<JSFunction> function = match.Value().handle();
+
+  if (function->shared()->native()) {
+    if (FLAG_trace_turbo_inlining) {
+      SmartArrayPointer<char> name =
+          function->shared()->DebugName()->ToCString();
+      PrintF("Not Inlining %s into %s because inlinee is native\n", name.get(),
+             info_->shared_info()->DebugName()->ToCString().get());
+    }
+    return;
+  }
+
+  CompilationInfoWithZone info(function);
+  Parse(function, &info);
+
+  if (info.scope()->arguments() != NULL) {
+    // For now do not inline functions that use their arguments array.
+    SmartArrayPointer<char> name = function->shared()->DebugName()->ToCString();
+    if (FLAG_trace_turbo_inlining) {
+      PrintF(
+          "Not Inlining %s into %s because inlinee uses arguments "
+          "array\n",
+          name.get(), info_->shared_info()->DebugName()->ToCString().get());
+    }
+    return;
+  }
+
+  if (FLAG_trace_turbo_inlining) {
+    SmartArrayPointer<char> name = function->shared()->DebugName()->ToCString();
+    PrintF("Inlining %s into %s\n", name.get(),
+           info_->shared_info()->DebugName()->ToCString().get());
+  }
+
+  Graph graph(info.zone());
+  Typer typer(info.zone());
+  JSGraph jsgraph(&graph, jsgraph_->common(), jsgraph_->javascript(), &typer,
+                  jsgraph_->machine());
+
+  AstGraphBuilder graph_builder(&info, &jsgraph);
+  graph_builder.CreateGraph();
+  Inlinee::UnifyReturn(&jsgraph);
+
+  CopyVisitor visitor(&graph, jsgraph_->graph(), info.zone());
+  visitor.CopyGraph();
+
+  Inlinee inlinee(visitor.GetCopy(graph.start()), visitor.GetCopy(graph.end()));
+
+  Node* outer_frame_state = call.frame_state();
+  // Insert argument adaptor frame if required.
+  if (call.formal_arguments() != inlinee.formal_parameters()) {
+    outer_frame_state =
+        CreateArgumentsAdaptorFrameState(&call, function, info.zone());
+  }
+
+  for (NodeVectorConstIter it = visitor.copies().begin();
+       it != visitor.copies().end(); ++it) {
+    Node* node = *it;
+    if (node != NULL && node->opcode() == IrOpcode::kFrameState) {
+      AddClosureToFrameState(node, function);
+      NodeProperties::ReplaceFrameStateInput(node, outer_frame_state);
+    }
+  }
+
+  inlinee.InlineAtCall(jsgraph_, call_node);
+}
+}
+}
+}  // namespace v8::internal::compiler
diff --git a/src/compiler/js-inlining.h b/src/compiler/js-inlining.h
new file mode 100644
index 0000000..f135170
--- /dev/null
+++ b/src/compiler/js-inlining.h
@@ -0,0 +1,40 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_JS_INLINING_H_
+#define V8_COMPILER_JS_INLINING_H_
+
+#include "src/compiler/js-graph.h"
+#include "src/v8.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class JSCallFunctionAccessor;
+
+class JSInliner {
+ public:
+  JSInliner(CompilationInfo* info, JSGraph* jsgraph)
+      : info_(info), jsgraph_(jsgraph) {}
+
+  void Inline();
+  void TryInlineCall(Node* node);
+
+ private:
+  friend class InlinerVisitor;
+  CompilationInfo* info_;
+  JSGraph* jsgraph_;
+
+  Node* CreateArgumentsAdaptorFrameState(JSCallFunctionAccessor* call,
+                                         Handle<JSFunction> jsfunction,
+                                         Zone* temp_zone);
+  void AddClosureToFrameState(Node* frame_state, Handle<JSFunction> jsfunction);
+  static void UnifyReturn(Graph* graph);
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_JS_INLINING_H_
diff --git a/src/compiler/js-operator.h b/src/compiler/js-operator.h
new file mode 100644
index 0000000..b95467f
--- /dev/null
+++ b/src/compiler/js-operator.h
@@ -0,0 +1,233 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_JS_OPERATOR_H_
+#define V8_COMPILER_JS_OPERATOR_H_
+
+#include "src/compiler/linkage.h"
+#include "src/compiler/opcodes.h"
+#include "src/compiler/operator.h"
+#include "src/unique.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Defines the location of a context slot relative to a specific scope. This is
+// used as a parameter by JSLoadContext and JSStoreContext operators and allows
+// accessing a context-allocated variable without keeping track of the scope.
+class ContextAccess {
+ public:
+  ContextAccess(int depth, int index, bool immutable)
+      : immutable_(immutable), depth_(depth), index_(index) {
+    DCHECK(0 <= depth && depth <= kMaxUInt16);
+    DCHECK(0 <= index && static_cast<uint32_t>(index) <= kMaxUInt32);
+  }
+  int depth() const { return depth_; }
+  int index() const { return index_; }
+  bool immutable() const { return immutable_; }
+
+ private:
+  // For space reasons, we keep this tightly packed, otherwise we could just use
+  // a simple int/int/bool POD.
+  const bool immutable_;
+  const uint16_t depth_;
+  const uint32_t index_;
+};
+
+// Defines the property being loaded from an object by a named load. This is
+// used as a parameter by JSLoadNamed operators.
+struct LoadNamedParameters {
+  Unique<Name> name;
+  ContextualMode contextual_mode;
+};
+
+// Defines the arity and the call flags for a JavaScript function call. This is
+// used as a parameter by JSCall operators.
+struct CallParameters {
+  int arity;
+  CallFunctionFlags flags;
+};
+
+// Defines the property being stored to an object by a named store. This is
+// used as a parameter by JSStoreNamed operators.
+struct StoreNamedParameters {
+  StrictMode strict_mode;
+  Unique<Name> name;
+};
+
+// Interface for building JavaScript-level operators, e.g. directly from the
+// AST. Most operators have no parameters, thus can be globally shared for all
+// graphs.
+class JSOperatorBuilder {
+ public:
+  explicit JSOperatorBuilder(Zone* zone) : zone_(zone) {}
+
+#define SIMPLE(name, properties, inputs, outputs) \
+  return new (zone_)                              \
+      SimpleOperator(IrOpcode::k##name, properties, inputs, outputs, #name);
+
+#define NOPROPS(name, inputs, outputs) \
+  SIMPLE(name, Operator::kNoProperties, inputs, outputs)
+
+#define OP1(name, ptype, pname, properties, inputs, outputs)                 \
+  return new (zone_) Operator1<ptype>(IrOpcode::k##name, properties, inputs, \
+                                      outputs, #name, pname)
+
+#define BINOP(name) NOPROPS(name, 2, 1)
+#define UNOP(name) NOPROPS(name, 1, 1)
+
+#define PURE_BINOP(name) SIMPLE(name, Operator::kPure, 2, 1)
+
+  const Operator* Equal() { BINOP(JSEqual); }
+  const Operator* NotEqual() { BINOP(JSNotEqual); }
+  const Operator* StrictEqual() { PURE_BINOP(JSStrictEqual); }
+  const Operator* StrictNotEqual() { PURE_BINOP(JSStrictNotEqual); }
+  const Operator* LessThan() { BINOP(JSLessThan); }
+  const Operator* GreaterThan() { BINOP(JSGreaterThan); }
+  const Operator* LessThanOrEqual() { BINOP(JSLessThanOrEqual); }
+  const Operator* GreaterThanOrEqual() { BINOP(JSGreaterThanOrEqual); }
+  const Operator* BitwiseOr() { BINOP(JSBitwiseOr); }
+  const Operator* BitwiseXor() { BINOP(JSBitwiseXor); }
+  const Operator* BitwiseAnd() { BINOP(JSBitwiseAnd); }
+  const Operator* ShiftLeft() { BINOP(JSShiftLeft); }
+  const Operator* ShiftRight() { BINOP(JSShiftRight); }
+  const Operator* ShiftRightLogical() { BINOP(JSShiftRightLogical); }
+  const Operator* Add() { BINOP(JSAdd); }
+  const Operator* Subtract() { BINOP(JSSubtract); }
+  const Operator* Multiply() { BINOP(JSMultiply); }
+  const Operator* Divide() { BINOP(JSDivide); }
+  const Operator* Modulus() { BINOP(JSModulus); }
+
+  const Operator* UnaryNot() { UNOP(JSUnaryNot); }
+  const Operator* ToBoolean() { UNOP(JSToBoolean); }
+  const Operator* ToNumber() { UNOP(JSToNumber); }
+  const Operator* ToString() { UNOP(JSToString); }
+  const Operator* ToName() { UNOP(JSToName); }
+  const Operator* ToObject() { UNOP(JSToObject); }
+  const Operator* Yield() { UNOP(JSYield); }
+
+  const Operator* Create() { SIMPLE(JSCreate, Operator::kEliminatable, 0, 1); }
+
+  const Operator* Call(int arguments, CallFunctionFlags flags) {
+    CallParameters parameters = {arguments, flags};
+    OP1(JSCallFunction, CallParameters, parameters, Operator::kNoProperties,
+        arguments, 1);
+  }
+
+  const Operator* CallNew(int arguments) {
+    return new (zone_)
+        Operator1<int>(IrOpcode::kJSCallConstruct, Operator::kNoProperties,
+                       arguments, 1, "JSCallConstruct", arguments);
+  }
+
+  const Operator* LoadProperty() { BINOP(JSLoadProperty); }
+  const Operator* LoadNamed(Unique<Name> name,
+                            ContextualMode contextual_mode = NOT_CONTEXTUAL) {
+    LoadNamedParameters parameters = {name, contextual_mode};
+    OP1(JSLoadNamed, LoadNamedParameters, parameters, Operator::kNoProperties,
+        1, 1);
+  }
+
+  const Operator* StoreProperty(StrictMode strict_mode) {
+    OP1(JSStoreProperty, StrictMode, strict_mode, Operator::kNoProperties, 3,
+        0);
+  }
+
+  const Operator* StoreNamed(StrictMode strict_mode, Unique<Name> name) {
+    StoreNamedParameters parameters = {strict_mode, name};
+    OP1(JSStoreNamed, StoreNamedParameters, parameters, Operator::kNoProperties,
+        2, 0);
+  }
+
+  const Operator* DeleteProperty(StrictMode strict_mode) {
+    OP1(JSDeleteProperty, StrictMode, strict_mode, Operator::kNoProperties, 2,
+        1);
+  }
+
+  const Operator* HasProperty() { NOPROPS(JSHasProperty, 2, 1); }
+
+  const Operator* LoadContext(uint16_t depth, uint32_t index, bool immutable) {
+    ContextAccess access(depth, index, immutable);
+    OP1(JSLoadContext, ContextAccess, access,
+        Operator::kEliminatable | Operator::kNoWrite, 1, 1);
+  }
+  const Operator* StoreContext(uint16_t depth, uint32_t index) {
+    ContextAccess access(depth, index, false);
+    OP1(JSStoreContext, ContextAccess, access, Operator::kNoProperties, 2, 0);
+  }
+
+  const Operator* TypeOf() { SIMPLE(JSTypeOf, Operator::kPure, 1, 1); }
+  const Operator* InstanceOf() { NOPROPS(JSInstanceOf, 2, 1); }
+  const Operator* Debugger() { NOPROPS(JSDebugger, 0, 0); }
+
+  // TODO(titzer): nail down the static parts of each of these context flavors.
+  const Operator* CreateFunctionContext() {
+    NOPROPS(JSCreateFunctionContext, 1, 1);
+  }
+  const Operator* CreateCatchContext(Unique<String> name) {
+    OP1(JSCreateCatchContext, Unique<String>, name, Operator::kNoProperties, 1,
+        1);
+  }
+  const Operator* CreateWithContext() { NOPROPS(JSCreateWithContext, 2, 1); }
+  const Operator* CreateBlockContext() { NOPROPS(JSCreateBlockContext, 2, 1); }
+  const Operator* CreateModuleContext() {
+    NOPROPS(JSCreateModuleContext, 2, 1);
+  }
+  const Operator* CreateGlobalContext() {
+    NOPROPS(JSCreateGlobalContext, 2, 1);
+  }
+
+  const Operator* Runtime(Runtime::FunctionId function, int arguments) {
+    const Runtime::Function* f = Runtime::FunctionForId(function);
+    DCHECK(f->nargs == -1 || f->nargs == arguments);
+    OP1(JSCallRuntime, Runtime::FunctionId, function, Operator::kNoProperties,
+        arguments, f->result_size);
+  }
+
+#undef SIMPLE
+#undef NOPROPS
+#undef OP1
+#undef BINOP
+#undef UNOP
+
+ private:
+  Zone* zone_;
+};
+
+// Specialization for static parameters of type {ContextAccess}.
+template <>
+struct StaticParameterTraits<ContextAccess> {
+  static OStream& PrintTo(OStream& os, ContextAccess val) {  // NOLINT
+    return os << val.depth() << "," << val.index()
+              << (val.immutable() ? ",imm" : "");
+  }
+  static int HashCode(ContextAccess val) {
+    return (val.depth() << 16) | (val.index() & 0xffff);
+  }
+  static bool Equals(ContextAccess a, ContextAccess b) {
+    return a.immutable() == b.immutable() && a.depth() == b.depth() &&
+           a.index() == b.index();
+  }
+};
+
+// Specialization for static parameters of type {Runtime::FunctionId}.
+template <>
+struct StaticParameterTraits<Runtime::FunctionId> {
+  static OStream& PrintTo(OStream& os, Runtime::FunctionId val) {  // NOLINT
+    const Runtime::Function* f = Runtime::FunctionForId(val);
+    return os << (f->name ? f->name : "?Runtime?");
+  }
+  static int HashCode(Runtime::FunctionId val) { return static_cast<int>(val); }
+  static bool Equals(Runtime::FunctionId a, Runtime::FunctionId b) {
+    return a == b;
+  }
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_JS_OPERATOR_H_
diff --git a/src/compiler/js-typed-lowering.cc b/src/compiler/js-typed-lowering.cc
new file mode 100644
index 0000000..be12534
--- /dev/null
+++ b/src/compiler/js-typed-lowering.cc
@@ -0,0 +1,710 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/access-builder.h"
+#include "src/compiler/graph-inl.h"
+#include "src/compiler/js-builtin-reducer.h"
+#include "src/compiler/js-typed-lowering.h"
+#include "src/compiler/node-aux-data-inl.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/types.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// TODO(turbofan): js-typed-lowering improvements possible
+// - immediately put in type bounds for all new nodes
+// - relax effects from generic but not-side-effecting operations
+// - relax effects for ToNumber(mixed)
+
+
+// Relax the effects of {node} by immediately replacing effect uses of {node}
+// with the effect input to {node}.
+// TODO(turbofan): replace the effect input to {node} with {graph->start()}.
+// TODO(titzer): move into a GraphEditor?
+static void RelaxEffects(Node* node) {
+  NodeProperties::ReplaceWithValue(node, node, NULL);
+}
+
+
+JSTypedLowering::~JSTypedLowering() {}
+
+
+Reduction JSTypedLowering::ReplaceEagerly(Node* old, Node* node) {
+  NodeProperties::ReplaceWithValue(old, node, node);
+  return Changed(node);
+}
+
+
+// A helper class to simplify the process of reducing a single binop node with a
+// JSOperator. This class manages the rewriting of context, control, and effect
+// dependencies during lowering of a binop and contains numerous helper
+// functions for matching the types of inputs to an operation.
+class JSBinopReduction {
+ public:
+  JSBinopReduction(JSTypedLowering* lowering, Node* node)
+      : lowering_(lowering),
+        node_(node),
+        left_type_(NodeProperties::GetBounds(node->InputAt(0)).upper),
+        right_type_(NodeProperties::GetBounds(node->InputAt(1)).upper) {}
+
+  void ConvertInputsToNumber() {
+    node_->ReplaceInput(0, ConvertToNumber(left()));
+    node_->ReplaceInput(1, ConvertToNumber(right()));
+  }
+
+  void ConvertInputsToInt32(bool left_signed, bool right_signed) {
+    node_->ReplaceInput(0, ConvertToI32(left_signed, left()));
+    node_->ReplaceInput(1, ConvertToI32(right_signed, right()));
+  }
+
+  void ConvertInputsToString() {
+    node_->ReplaceInput(0, ConvertToString(left()));
+    node_->ReplaceInput(1, ConvertToString(right()));
+  }
+
+  // Convert inputs for bitwise shift operation (ES5 spec 11.7).
+  void ConvertInputsForShift(bool left_signed) {
+    node_->ReplaceInput(0, ConvertToI32(left_signed, left()));
+    Node* rnum = ConvertToI32(false, right());
+    node_->ReplaceInput(1, graph()->NewNode(machine()->Word32And(), rnum,
+                                            jsgraph()->Int32Constant(0x1F)));
+  }
+
+  void SwapInputs() {
+    Node* l = left();
+    Node* r = right();
+    node_->ReplaceInput(0, r);
+    node_->ReplaceInput(1, l);
+    std::swap(left_type_, right_type_);
+  }
+
+  // Remove all effect and control inputs and outputs to this node and change
+  // to the pure operator {op}, possibly inserting a boolean inversion.
+  Reduction ChangeToPureOperator(const Operator* op, bool invert = false) {
+    DCHECK_EQ(0, OperatorProperties::GetEffectInputCount(op));
+    DCHECK_EQ(false, OperatorProperties::HasContextInput(op));
+    DCHECK_EQ(0, OperatorProperties::GetControlInputCount(op));
+    DCHECK_EQ(2, OperatorProperties::GetValueInputCount(op));
+
+    // Remove the effects from the node, if any, and update its effect usages.
+    if (OperatorProperties::GetEffectInputCount(node_->op()) > 0) {
+      RelaxEffects(node_);
+    }
+    // Remove the inputs corresponding to context, effect, and control.
+    NodeProperties::RemoveNonValueInputs(node_);
+    // Finally, update the operator to the new one.
+    node_->set_op(op);
+
+    if (invert) {
+      // Insert an boolean not to invert the value.
+      Node* value = graph()->NewNode(simplified()->BooleanNot(), node_);
+      node_->ReplaceUses(value);
+      // Note: ReplaceUses() smashes all uses, so smash it back here.
+      value->ReplaceInput(0, node_);
+      return lowering_->ReplaceWith(value);
+    }
+    return lowering_->Changed(node_);
+  }
+
+  bool OneInputIs(Type* t) { return left_type_->Is(t) || right_type_->Is(t); }
+
+  bool BothInputsAre(Type* t) {
+    return left_type_->Is(t) && right_type_->Is(t);
+  }
+
+  bool OneInputCannotBe(Type* t) {
+    return !left_type_->Maybe(t) || !right_type_->Maybe(t);
+  }
+
+  bool NeitherInputCanBe(Type* t) {
+    return !left_type_->Maybe(t) && !right_type_->Maybe(t);
+  }
+
+  Node* effect() { return NodeProperties::GetEffectInput(node_); }
+  Node* control() { return NodeProperties::GetControlInput(node_); }
+  Node* context() { return NodeProperties::GetContextInput(node_); }
+  Node* left() { return NodeProperties::GetValueInput(node_, 0); }
+  Node* right() { return NodeProperties::GetValueInput(node_, 1); }
+  Type* left_type() { return left_type_; }
+  Type* right_type() { return right_type_; }
+
+  SimplifiedOperatorBuilder* simplified() { return lowering_->simplified(); }
+  Graph* graph() { return lowering_->graph(); }
+  JSGraph* jsgraph() { return lowering_->jsgraph(); }
+  JSOperatorBuilder* javascript() { return lowering_->javascript(); }
+  MachineOperatorBuilder* machine() { return lowering_->machine(); }
+
+ private:
+  JSTypedLowering* lowering_;  // The containing lowering instance.
+  Node* node_;                 // The original node.
+  Type* left_type_;            // Cache of the left input's type.
+  Type* right_type_;           // Cache of the right input's type.
+
+  Node* ConvertToString(Node* node) {
+    // Avoid introducing too many eager ToString() operations.
+    Reduction reduced = lowering_->ReduceJSToStringInput(node);
+    if (reduced.Changed()) return reduced.replacement();
+    Node* n = graph()->NewNode(javascript()->ToString(), node, context(),
+                               effect(), control());
+    update_effect(n);
+    return n;
+  }
+
+  Node* ConvertToNumber(Node* node) {
+    // Avoid introducing too many eager ToNumber() operations.
+    Reduction reduced = lowering_->ReduceJSToNumberInput(node);
+    if (reduced.Changed()) return reduced.replacement();
+    Node* n = graph()->NewNode(javascript()->ToNumber(), node, context(),
+                               effect(), control());
+    update_effect(n);
+    return n;
+  }
+
+  // Try to narrowing a double or number operation to an Int32 operation.
+  bool TryNarrowingToI32(Type* type, Node* node) {
+    switch (node->opcode()) {
+      case IrOpcode::kFloat64Add:
+      case IrOpcode::kNumberAdd: {
+        JSBinopReduction r(lowering_, node);
+        if (r.BothInputsAre(Type::Integral32())) {
+          node->set_op(lowering_->machine()->Int32Add());
+          // TODO(titzer): narrow bounds instead of overwriting.
+          NodeProperties::SetBounds(node, Bounds(type));
+          return true;
+        }
+      }
+      case IrOpcode::kFloat64Sub:
+      case IrOpcode::kNumberSubtract: {
+        JSBinopReduction r(lowering_, node);
+        if (r.BothInputsAre(Type::Integral32())) {
+          node->set_op(lowering_->machine()->Int32Sub());
+          // TODO(titzer): narrow bounds instead of overwriting.
+          NodeProperties::SetBounds(node, Bounds(type));
+          return true;
+        }
+      }
+      default:
+        return false;
+    }
+  }
+
+  Node* ConvertToI32(bool is_signed, Node* node) {
+    Type* type = is_signed ? Type::Signed32() : Type::Unsigned32();
+    if (node->OwnedBy(node_)) {
+      // If this node {node_} has the only edge to {node}, then try narrowing
+      // its operation to an Int32 add or subtract.
+      if (TryNarrowingToI32(type, node)) return node;
+    } else {
+      // Otherwise, {node} has multiple uses. Leave it as is and let the
+      // further lowering passes deal with it, which use a full backwards
+      // fixpoint.
+    }
+
+    // Avoid introducing too many eager NumberToXXnt32() operations.
+    node = ConvertToNumber(node);
+    Type* input_type = NodeProperties::GetBounds(node).upper;
+
+    if (input_type->Is(type)) return node;  // already in the value range.
+
+    const Operator* op = is_signed ? simplified()->NumberToInt32()
+                                   : simplified()->NumberToUint32();
+    Node* n = graph()->NewNode(op, node);
+    return n;
+  }
+
+  void update_effect(Node* effect) {
+    NodeProperties::ReplaceEffectInput(node_, effect);
+  }
+};
+
+
+Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
+  JSBinopReduction r(this, node);
+  if (r.BothInputsAre(Type::Number())) {
+    // JSAdd(x:number, y:number) => NumberAdd(x, y)
+    return r.ChangeToPureOperator(simplified()->NumberAdd());
+  }
+  Type* maybe_string = Type::Union(Type::String(), Type::Receiver(), zone());
+  if (r.NeitherInputCanBe(maybe_string)) {
+    // JSAdd(x:-string, y:-string) => NumberAdd(ToNumber(x), ToNumber(y))
+    r.ConvertInputsToNumber();
+    return r.ChangeToPureOperator(simplified()->NumberAdd());
+  }
+#if 0
+  // TODO(turbofan): Lowering of StringAdd is disabled for now because:
+  //   a) The inserted ToString operation screws up valueOf vs. toString order.
+  //   b) Deoptimization at ToString doesn't have corresponding bailout id.
+  //   c) Our current StringAddStub is actually non-pure and requires context.
+  if (r.OneInputIs(Type::String())) {
+    // JSAdd(x:string, y:string) => StringAdd(x, y)
+    // JSAdd(x:string, y) => StringAdd(x, ToString(y))
+    // JSAdd(x, y:string) => StringAdd(ToString(x), y)
+    r.ConvertInputsToString();
+    return r.ChangeToPureOperator(simplified()->StringAdd());
+  }
+#endif
+  return NoChange();
+}
+
+
+Reduction JSTypedLowering::ReduceNumberBinop(Node* node,
+                                             const Operator* numberOp) {
+  JSBinopReduction r(this, node);
+  if (r.OneInputIs(Type::Primitive())) {
+    // If at least one input is a primitive, then insert appropriate conversions
+    // to number and reduce this operator to the given numeric one.
+    // TODO(turbofan): make this heuristic configurable for code size.
+    r.ConvertInputsToNumber();
+    return r.ChangeToPureOperator(numberOp);
+  }
+  // TODO(turbofan): relax/remove the effects of this operator in other cases.
+  return NoChange();
+}
+
+
+Reduction JSTypedLowering::ReduceI32Binop(Node* node, bool left_signed,
+                                          bool right_signed,
+                                          const Operator* intOp) {
+  JSBinopReduction r(this, node);
+  // TODO(titzer): some Smi bitwise operations don't really require going
+  // all the way to int32, which can save tagging/untagging for some operations
+  // on some platforms.
+  // TODO(turbofan): make this heuristic configurable for code size.
+  r.ConvertInputsToInt32(left_signed, right_signed);
+  return r.ChangeToPureOperator(intOp);
+}
+
+
+Reduction JSTypedLowering::ReduceI32Shift(Node* node, bool left_signed,
+                                          const Operator* shift_op) {
+  JSBinopReduction r(this, node);
+  r.ConvertInputsForShift(left_signed);
+  return r.ChangeToPureOperator(shift_op);
+}
+
+
+Reduction JSTypedLowering::ReduceJSComparison(Node* node) {
+  JSBinopReduction r(this, node);
+  if (r.BothInputsAre(Type::String())) {
+    // If both inputs are definitely strings, perform a string comparison.
+    const Operator* stringOp;
+    switch (node->opcode()) {
+      case IrOpcode::kJSLessThan:
+        stringOp = simplified()->StringLessThan();
+        break;
+      case IrOpcode::kJSGreaterThan:
+        stringOp = simplified()->StringLessThan();
+        r.SwapInputs();  // a > b => b < a
+        break;
+      case IrOpcode::kJSLessThanOrEqual:
+        stringOp = simplified()->StringLessThanOrEqual();
+        break;
+      case IrOpcode::kJSGreaterThanOrEqual:
+        stringOp = simplified()->StringLessThanOrEqual();
+        r.SwapInputs();  // a >= b => b <= a
+        break;
+      default:
+        return NoChange();
+    }
+    return r.ChangeToPureOperator(stringOp);
+  }
+  Type* maybe_string = Type::Union(Type::String(), Type::Receiver(), zone());
+  if (r.OneInputCannotBe(maybe_string)) {
+    // If one input cannot be a string, then emit a number comparison.
+    const Operator* less_than;
+    const Operator* less_than_or_equal;
+    if (r.BothInputsAre(Type::Unsigned32())) {
+      less_than = machine()->Uint32LessThan();
+      less_than_or_equal = machine()->Uint32LessThanOrEqual();
+    } else if (r.BothInputsAre(Type::Signed32())) {
+      less_than = machine()->Int32LessThan();
+      less_than_or_equal = machine()->Int32LessThanOrEqual();
+    } else {
+      // TODO(turbofan): mixed signed/unsigned int32 comparisons.
+      r.ConvertInputsToNumber();
+      less_than = simplified()->NumberLessThan();
+      less_than_or_equal = simplified()->NumberLessThanOrEqual();
+    }
+    const Operator* comparison;
+    switch (node->opcode()) {
+      case IrOpcode::kJSLessThan:
+        comparison = less_than;
+        break;
+      case IrOpcode::kJSGreaterThan:
+        comparison = less_than;
+        r.SwapInputs();  // a > b => b < a
+        break;
+      case IrOpcode::kJSLessThanOrEqual:
+        comparison = less_than_or_equal;
+        break;
+      case IrOpcode::kJSGreaterThanOrEqual:
+        comparison = less_than_or_equal;
+        r.SwapInputs();  // a >= b => b <= a
+        break;
+      default:
+        return NoChange();
+    }
+    return r.ChangeToPureOperator(comparison);
+  }
+  // TODO(turbofan): relax/remove effects of this operator in other cases.
+  return NoChange();  // Keep a generic comparison.
+}
+
+
+Reduction JSTypedLowering::ReduceJSEqual(Node* node, bool invert) {
+  JSBinopReduction r(this, node);
+
+  if (r.BothInputsAre(Type::Number())) {
+    return r.ChangeToPureOperator(simplified()->NumberEqual(), invert);
+  }
+  if (r.BothInputsAre(Type::String())) {
+    return r.ChangeToPureOperator(simplified()->StringEqual(), invert);
+  }
+  if (r.BothInputsAre(Type::Receiver())) {
+    return r.ChangeToPureOperator(
+        simplified()->ReferenceEqual(Type::Receiver()), invert);
+  }
+  // TODO(turbofan): js-typed-lowering of Equal(undefined)
+  // TODO(turbofan): js-typed-lowering of Equal(null)
+  // TODO(turbofan): js-typed-lowering of Equal(boolean)
+  return NoChange();
+}
+
+
+Reduction JSTypedLowering::ReduceJSStrictEqual(Node* node, bool invert) {
+  JSBinopReduction r(this, node);
+  if (r.left() == r.right()) {
+    // x === x is always true if x != NaN
+    if (!r.left_type()->Maybe(Type::NaN())) {
+      return ReplaceEagerly(node, invert ? jsgraph()->FalseConstant()
+                                         : jsgraph()->TrueConstant());
+    }
+  }
+  if (!r.left_type()->Maybe(r.right_type())) {
+    // Type intersection is empty; === is always false unless both
+    // inputs could be strings (one internalized and one not).
+    if (r.OneInputCannotBe(Type::String())) {
+      return ReplaceEagerly(node, invert ? jsgraph()->TrueConstant()
+                                         : jsgraph()->FalseConstant());
+    }
+  }
+  if (r.OneInputIs(Type::Undefined())) {
+    return r.ChangeToPureOperator(
+        simplified()->ReferenceEqual(Type::Undefined()), invert);
+  }
+  if (r.OneInputIs(Type::Null())) {
+    return r.ChangeToPureOperator(simplified()->ReferenceEqual(Type::Null()),
+                                  invert);
+  }
+  if (r.OneInputIs(Type::Boolean())) {
+    return r.ChangeToPureOperator(simplified()->ReferenceEqual(Type::Boolean()),
+                                  invert);
+  }
+  if (r.OneInputIs(Type::Object())) {
+    return r.ChangeToPureOperator(simplified()->ReferenceEqual(Type::Object()),
+                                  invert);
+  }
+  if (r.OneInputIs(Type::Receiver())) {
+    return r.ChangeToPureOperator(
+        simplified()->ReferenceEqual(Type::Receiver()), invert);
+  }
+  if (r.BothInputsAre(Type::String())) {
+    return r.ChangeToPureOperator(simplified()->StringEqual(), invert);
+  }
+  if (r.BothInputsAre(Type::Number())) {
+    return r.ChangeToPureOperator(simplified()->NumberEqual(), invert);
+  }
+  // TODO(turbofan): js-typed-lowering of StrictEqual(mixed types)
+  return NoChange();
+}
+
+
+Reduction JSTypedLowering::ReduceJSToNumberInput(Node* input) {
+  if (input->opcode() == IrOpcode::kJSToNumber) {
+    // Recursively try to reduce the input first.
+    Reduction result = ReduceJSToNumberInput(input->InputAt(0));
+    if (result.Changed()) {
+      RelaxEffects(input);
+      return result;
+    }
+    return Changed(input);  // JSToNumber(JSToNumber(x)) => JSToNumber(x)
+  }
+  Type* input_type = NodeProperties::GetBounds(input).upper;
+  if (input_type->Is(Type::Number())) {
+    // JSToNumber(x:number) => x
+    return Changed(input);
+  }
+  if (input_type->Is(Type::Undefined())) {
+    // JSToNumber(undefined) => #NaN
+    return ReplaceWith(jsgraph()->NaNConstant());
+  }
+  if (input_type->Is(Type::Null())) {
+    // JSToNumber(null) => #0
+    return ReplaceWith(jsgraph()->ZeroConstant());
+  }
+  if (input_type->Is(Type::Boolean())) {
+    // JSToNumber(x:boolean) => BooleanToNumber(x)
+    return ReplaceWith(
+        graph()->NewNode(simplified()->BooleanToNumber(), input));
+  }
+  // TODO(turbofan): js-typed-lowering of ToNumber(x:string)
+  return NoChange();
+}
+
+
+Reduction JSTypedLowering::ReduceJSToStringInput(Node* input) {
+  if (input->opcode() == IrOpcode::kJSToString) {
+    // Recursively try to reduce the input first.
+    Reduction result = ReduceJSToStringInput(input->InputAt(0));
+    if (result.Changed()) {
+      RelaxEffects(input);
+      return result;
+    }
+    return Changed(input);  // JSToString(JSToString(x)) => JSToString(x)
+  }
+  Type* input_type = NodeProperties::GetBounds(input).upper;
+  if (input_type->Is(Type::String())) {
+    return Changed(input);  // JSToString(x:string) => x
+  }
+  if (input_type->Is(Type::Undefined())) {
+    return ReplaceWith(jsgraph()->HeapConstant(
+        graph()->zone()->isolate()->factory()->undefined_string()));
+  }
+  if (input_type->Is(Type::Null())) {
+    return ReplaceWith(jsgraph()->HeapConstant(
+        graph()->zone()->isolate()->factory()->null_string()));
+  }
+  // TODO(turbofan): js-typed-lowering of ToString(x:boolean)
+  // TODO(turbofan): js-typed-lowering of ToString(x:number)
+  return NoChange();
+}
+
+
+Reduction JSTypedLowering::ReduceJSToBooleanInput(Node* input) {
+  if (input->opcode() == IrOpcode::kJSToBoolean) {
+    // Recursively try to reduce the input first.
+    Reduction result = ReduceJSToBooleanInput(input->InputAt(0));
+    if (result.Changed()) {
+      RelaxEffects(input);
+      return result;
+    }
+    return Changed(input);  // JSToBoolean(JSToBoolean(x)) => JSToBoolean(x)
+  }
+  Type* input_type = NodeProperties::GetBounds(input).upper;
+  if (input_type->Is(Type::Boolean())) {
+    return Changed(input);  // JSToBoolean(x:boolean) => x
+  }
+  if (input_type->Is(Type::Undefined())) {
+    // JSToBoolean(undefined) => #false
+    return ReplaceWith(jsgraph()->FalseConstant());
+  }
+  if (input_type->Is(Type::Null())) {
+    // JSToBoolean(null) => #false
+    return ReplaceWith(jsgraph()->FalseConstant());
+  }
+  if (input_type->Is(Type::DetectableReceiver())) {
+    // JSToBoolean(x:detectable) => #true
+    return ReplaceWith(jsgraph()->TrueConstant());
+  }
+  if (input_type->Is(Type::Undetectable())) {
+    // JSToBoolean(x:undetectable) => #false
+    return ReplaceWith(jsgraph()->FalseConstant());
+  }
+  if (input_type->Is(Type::OrderedNumber())) {
+    // JSToBoolean(x:ordered-number) => BooleanNot(NumberEqual(x, #0))
+    Node* cmp = graph()->NewNode(simplified()->NumberEqual(), input,
+                                 jsgraph()->ZeroConstant());
+    Node* inv = graph()->NewNode(simplified()->BooleanNot(), cmp);
+    return ReplaceWith(inv);
+  }
+  // TODO(turbofan): js-typed-lowering of ToBoolean(string)
+  return NoChange();
+}
+
+
+Reduction JSTypedLowering::ReduceJSLoadProperty(Node* node) {
+  Node* key = NodeProperties::GetValueInput(node, 1);
+  Node* base = NodeProperties::GetValueInput(node, 0);
+  Type* key_type = NodeProperties::GetBounds(key).upper;
+  Type* base_type = NodeProperties::GetBounds(base).upper;
+  // TODO(mstarzinger): This lowering is not correct if:
+  //   a) The typed array turns external (i.e. MaterializeArrayBuffer)
+  //   b) The typed array or it's buffer is neutered.
+  //   c) The index is out of bounds.
+  if (base_type->IsConstant() && key_type->Is(Type::Integral32()) &&
+      base_type->AsConstant()->Value()->IsJSTypedArray()) {
+    // JSLoadProperty(typed-array, int32)
+    JSTypedArray* array = JSTypedArray::cast(*base_type->AsConstant()->Value());
+    ElementsKind elements_kind = array->map()->elements_kind();
+    ExternalArrayType type = array->type();
+    uint32_t length;
+    CHECK(array->length()->ToUint32(&length));
+    ElementAccess element_access;
+    Node* elements = graph()->NewNode(
+        simplified()->LoadField(AccessBuilder::ForJSObjectElements()), base,
+        NodeProperties::GetEffectInput(node));
+    if (IsExternalArrayElementsKind(elements_kind)) {
+      elements = graph()->NewNode(
+          simplified()->LoadField(AccessBuilder::ForExternalArrayPointer()),
+          elements, NodeProperties::GetEffectInput(node));
+      element_access = AccessBuilder::ForTypedArrayElement(type, true);
+    } else {
+      DCHECK(IsFixedTypedArrayElementsKind(elements_kind));
+      element_access = AccessBuilder::ForTypedArrayElement(type, false);
+    }
+    Node* value =
+        graph()->NewNode(simplified()->LoadElement(element_access), elements,
+                         key, jsgraph()->Uint32Constant(length),
+                         NodeProperties::GetEffectInput(node));
+    return ReplaceEagerly(node, value);
+  }
+  return NoChange();
+}
+
+
+Reduction JSTypedLowering::ReduceJSStoreProperty(Node* node) {
+  Node* key = NodeProperties::GetValueInput(node, 1);
+  Node* base = NodeProperties::GetValueInput(node, 0);
+  Node* value = NodeProperties::GetValueInput(node, 2);
+  Type* key_type = NodeProperties::GetBounds(key).upper;
+  Type* base_type = NodeProperties::GetBounds(base).upper;
+  // TODO(mstarzinger): This lowering is not correct if:
+  //   a) The typed array turns external (i.e. MaterializeArrayBuffer)
+  //   b) The typed array or it's buffer is neutered.
+  if (key_type->Is(Type::Integral32()) && base_type->IsConstant() &&
+      base_type->AsConstant()->Value()->IsJSTypedArray()) {
+    // JSStoreProperty(typed-array, int32, value)
+    JSTypedArray* array = JSTypedArray::cast(*base_type->AsConstant()->Value());
+    ElementsKind elements_kind = array->map()->elements_kind();
+    ExternalArrayType type = array->type();
+    uint32_t length;
+    CHECK(array->length()->ToUint32(&length));
+    ElementAccess element_access;
+    Node* elements = graph()->NewNode(
+        simplified()->LoadField(AccessBuilder::ForJSObjectElements()), base,
+        NodeProperties::GetEffectInput(node));
+    if (IsExternalArrayElementsKind(elements_kind)) {
+      elements = graph()->NewNode(
+          simplified()->LoadField(AccessBuilder::ForExternalArrayPointer()),
+          elements, NodeProperties::GetEffectInput(node));
+      element_access = AccessBuilder::ForTypedArrayElement(type, true);
+    } else {
+      DCHECK(IsFixedTypedArrayElementsKind(elements_kind));
+      element_access = AccessBuilder::ForTypedArrayElement(type, false);
+    }
+
+    Node* check = graph()->NewNode(machine()->Uint32LessThan(), key,
+                                   jsgraph()->Uint32Constant(length));
+    Node* branch = graph()->NewNode(common()->Branch(), check,
+                                    NodeProperties::GetControlInput(node));
+
+    Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+    Node* store =
+        graph()->NewNode(simplified()->StoreElement(element_access), elements,
+                         key, jsgraph()->Uint32Constant(length), value,
+                         NodeProperties::GetEffectInput(node), if_true);
+
+    Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+
+    Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+    Node* phi = graph()->NewNode(common()->EffectPhi(2), store,
+                                 NodeProperties::GetEffectInput(node), merge);
+
+    return ReplaceWith(phi);
+  }
+  return NoChange();
+}
+
+
+static Reduction ReplaceWithReduction(Node* node, Reduction reduction) {
+  if (reduction.Changed()) {
+    NodeProperties::ReplaceWithValue(node, reduction.replacement());
+    return reduction;
+  }
+  return Reducer::NoChange();
+}
+
+
+Reduction JSTypedLowering::Reduce(Node* node) {
+  switch (node->opcode()) {
+    case IrOpcode::kJSEqual:
+      return ReduceJSEqual(node, false);
+    case IrOpcode::kJSNotEqual:
+      return ReduceJSEqual(node, true);
+    case IrOpcode::kJSStrictEqual:
+      return ReduceJSStrictEqual(node, false);
+    case IrOpcode::kJSStrictNotEqual:
+      return ReduceJSStrictEqual(node, true);
+    case IrOpcode::kJSLessThan:         // fall through
+    case IrOpcode::kJSGreaterThan:      // fall through
+    case IrOpcode::kJSLessThanOrEqual:  // fall through
+    case IrOpcode::kJSGreaterThanOrEqual:
+      return ReduceJSComparison(node);
+    case IrOpcode::kJSBitwiseOr:
+      return ReduceI32Binop(node, true, true, machine()->Word32Or());
+    case IrOpcode::kJSBitwiseXor:
+      return ReduceI32Binop(node, true, true, machine()->Word32Xor());
+    case IrOpcode::kJSBitwiseAnd:
+      return ReduceI32Binop(node, true, true, machine()->Word32And());
+    case IrOpcode::kJSShiftLeft:
+      return ReduceI32Shift(node, true, machine()->Word32Shl());
+    case IrOpcode::kJSShiftRight:
+      return ReduceI32Shift(node, true, machine()->Word32Sar());
+    case IrOpcode::kJSShiftRightLogical:
+      return ReduceI32Shift(node, false, machine()->Word32Shr());
+    case IrOpcode::kJSAdd:
+      return ReduceJSAdd(node);
+    case IrOpcode::kJSSubtract:
+      return ReduceNumberBinop(node, simplified()->NumberSubtract());
+    case IrOpcode::kJSMultiply:
+      return ReduceNumberBinop(node, simplified()->NumberMultiply());
+    case IrOpcode::kJSDivide:
+      return ReduceNumberBinop(node, simplified()->NumberDivide());
+    case IrOpcode::kJSModulus:
+      return ReduceNumberBinop(node, simplified()->NumberModulus());
+    case IrOpcode::kJSUnaryNot: {
+      Reduction result = ReduceJSToBooleanInput(node->InputAt(0));
+      Node* value;
+      if (result.Changed()) {
+        // JSUnaryNot(x:boolean) => BooleanNot(x)
+        value =
+            graph()->NewNode(simplified()->BooleanNot(), result.replacement());
+        NodeProperties::ReplaceWithValue(node, value);
+        return Changed(value);
+      } else {
+        // JSUnaryNot(x) => BooleanNot(JSToBoolean(x))
+        value = graph()->NewNode(simplified()->BooleanNot(), node);
+        node->set_op(javascript()->ToBoolean());
+        NodeProperties::ReplaceWithValue(node, value, node);
+        // Note: ReplaceUses() smashes all uses, so smash it back here.
+        value->ReplaceInput(0, node);
+        return Changed(node);
+      }
+    }
+    case IrOpcode::kJSToBoolean:
+      return ReplaceWithReduction(node,
+                                  ReduceJSToBooleanInput(node->InputAt(0)));
+    case IrOpcode::kJSToNumber:
+      return ReplaceWithReduction(node,
+                                  ReduceJSToNumberInput(node->InputAt(0)));
+    case IrOpcode::kJSToString:
+      return ReplaceWithReduction(node,
+                                  ReduceJSToStringInput(node->InputAt(0)));
+    case IrOpcode::kJSLoadProperty:
+      return ReduceJSLoadProperty(node);
+    case IrOpcode::kJSStoreProperty:
+      return ReduceJSStoreProperty(node);
+    case IrOpcode::kJSCallFunction:
+      return JSBuiltinReducer(jsgraph()).Reduce(node);
+    default:
+      break;
+  }
+  return NoChange();
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/js-typed-lowering.h b/src/compiler/js-typed-lowering.h
new file mode 100644
index 0000000..deaf1fa
--- /dev/null
+++ b/src/compiler/js-typed-lowering.h
@@ -0,0 +1,64 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_JS_TYPED_LOWERING_H_
+#define V8_COMPILER_JS_TYPED_LOWERING_H_
+
+#include "src/compiler/graph-reducer.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/simplified-operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Lowers JS-level operators to simplified operators based on types.
+class JSTypedLowering FINAL : public Reducer {
+ public:
+  explicit JSTypedLowering(JSGraph* jsgraph)
+      : jsgraph_(jsgraph), simplified_(jsgraph->zone()) {}
+  virtual ~JSTypedLowering();
+
+  virtual Reduction Reduce(Node* node) OVERRIDE;
+
+  JSGraph* jsgraph() { return jsgraph_; }
+  Graph* graph() { return jsgraph_->graph(); }
+  Zone* zone() { return jsgraph_->zone(); }
+
+ private:
+  friend class JSBinopReduction;
+
+  Reduction ReplaceEagerly(Node* old, Node* node);
+  Reduction ReplaceWith(Node* node) { return Reducer::Replace(node); }
+  Reduction ReduceJSAdd(Node* node);
+  Reduction ReduceJSComparison(Node* node);
+  Reduction ReduceJSLoadProperty(Node* node);
+  Reduction ReduceJSStoreProperty(Node* node);
+  Reduction ReduceJSEqual(Node* node, bool invert);
+  Reduction ReduceJSStrictEqual(Node* node, bool invert);
+  Reduction ReduceJSToNumberInput(Node* input);
+  Reduction ReduceJSToStringInput(Node* input);
+  Reduction ReduceJSToBooleanInput(Node* input);
+  Reduction ReduceNumberBinop(Node* node, const Operator* numberOp);
+  Reduction ReduceI32Binop(Node* node, bool left_signed, bool right_signed,
+                           const Operator* intOp);
+  Reduction ReduceI32Shift(Node* node, bool left_signed,
+                           const Operator* shift_op);
+
+  JSOperatorBuilder* javascript() { return jsgraph_->javascript(); }
+  CommonOperatorBuilder* common() { return jsgraph_->common(); }
+  SimplifiedOperatorBuilder* simplified() { return &simplified_; }
+  MachineOperatorBuilder* machine() { return jsgraph_->machine(); }
+
+  JSGraph* jsgraph_;
+  SimplifiedOperatorBuilder simplified_;
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_JS_TYPED_LOWERING_H_
diff --git a/src/compiler/linkage-impl.h b/src/compiler/linkage-impl.h
new file mode 100644
index 0000000..c32c706
--- /dev/null
+++ b/src/compiler/linkage-impl.h
@@ -0,0 +1,226 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_LINKAGE_IMPL_H_
+#define V8_COMPILER_LINKAGE_IMPL_H_
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// TODO(titzer): replace uses of int with size_t in LinkageHelper.
+template <typename LinkageTraits>
+class LinkageHelper {
+ public:
+  static const RegList kNoCalleeSaved = 0;
+
+  static void AddReturnLocations(LocationSignature::Builder* locations) {
+    DCHECK(locations->return_count_ <= 2);
+    if (locations->return_count_ > 0) {
+      locations->AddReturn(regloc(LinkageTraits::ReturnValueReg()));
+    }
+    if (locations->return_count_ > 1) {
+      locations->AddReturn(regloc(LinkageTraits::ReturnValue2Reg()));
+    }
+  }
+
+  // TODO(turbofan): cache call descriptors for JSFunction calls.
+  static CallDescriptor* GetJSCallDescriptor(Zone* zone,
+                                             int js_parameter_count) {
+    const size_t return_count = 1;
+    const size_t context_count = 1;
+    const size_t parameter_count = js_parameter_count + context_count;
+
+    LocationSignature::Builder locations(zone, return_count, parameter_count);
+    MachineSignature::Builder types(zone, return_count, parameter_count);
+
+    // Add returns.
+    AddReturnLocations(&locations);
+    for (size_t i = 0; i < return_count; i++) {
+      types.AddReturn(kMachAnyTagged);
+    }
+
+    // All parameters to JS calls go on the stack.
+    for (int i = 0; i < js_parameter_count; i++) {
+      int spill_slot_index = i - js_parameter_count;
+      locations.AddParam(stackloc(spill_slot_index));
+      types.AddParam(kMachAnyTagged);
+    }
+    // Add context.
+    locations.AddParam(regloc(LinkageTraits::ContextReg()));
+    types.AddParam(kMachAnyTagged);
+
+    // The target for JS function calls is the JSFunction object.
+    MachineType target_type = kMachAnyTagged;
+    LinkageLocation target_loc = regloc(LinkageTraits::JSCallFunctionReg());
+    return new (zone) CallDescriptor(CallDescriptor::kCallJSFunction,  // kind
+                                     target_type,         // target MachineType
+                                     target_loc,          // target location
+                                     types.Build(),       // machine_sig
+                                     locations.Build(),   // location_sig
+                                     js_parameter_count,  // js_parameter_count
+                                     Operator::kNoProperties,  // properties
+                                     kNoCalleeSaved,           // callee-saved
+                                     CallDescriptor::kNeedsFrameState,  // flags
+                                     "js-call");
+  }
+
+
+  // TODO(turbofan): cache call descriptors for runtime calls.
+  static CallDescriptor* GetRuntimeCallDescriptor(
+      Zone* zone, Runtime::FunctionId function_id, int js_parameter_count,
+      Operator::Properties properties) {
+    const size_t function_count = 1;
+    const size_t num_args_count = 1;
+    const size_t context_count = 1;
+    const size_t parameter_count = function_count +
+                                   static_cast<size_t>(js_parameter_count) +
+                                   num_args_count + context_count;
+
+    const Runtime::Function* function = Runtime::FunctionForId(function_id);
+    const size_t return_count = static_cast<size_t>(function->result_size);
+
+    LocationSignature::Builder locations(zone, return_count, parameter_count);
+    MachineSignature::Builder types(zone, return_count, parameter_count);
+
+    // Add returns.
+    AddReturnLocations(&locations);
+    for (size_t i = 0; i < return_count; i++) {
+      types.AddReturn(kMachAnyTagged);
+    }
+
+    // All parameters to the runtime call go on the stack.
+    for (int i = 0; i < js_parameter_count; i++) {
+      locations.AddParam(stackloc(i - js_parameter_count));
+      types.AddParam(kMachAnyTagged);
+    }
+    // Add runtime function itself.
+    locations.AddParam(regloc(LinkageTraits::RuntimeCallFunctionReg()));
+    types.AddParam(kMachAnyTagged);
+
+    // Add runtime call argument count.
+    locations.AddParam(regloc(LinkageTraits::RuntimeCallArgCountReg()));
+    types.AddParam(kMachPtr);
+
+    // Add context.
+    locations.AddParam(regloc(LinkageTraits::ContextReg()));
+    types.AddParam(kMachAnyTagged);
+
+    CallDescriptor::Flags flags = Linkage::NeedsFrameState(function_id)
+                                      ? CallDescriptor::kNeedsFrameState
+                                      : CallDescriptor::kNoFlags;
+
+    // The target for runtime calls is a code object.
+    MachineType target_type = kMachAnyTagged;
+    LinkageLocation target_loc = LinkageLocation::AnyRegister();
+    return new (zone) CallDescriptor(CallDescriptor::kCallCodeObject,  // kind
+                                     target_type,         // target MachineType
+                                     target_loc,          // target location
+                                     types.Build(),       // machine_sig
+                                     locations.Build(),   // location_sig
+                                     js_parameter_count,  // js_parameter_count
+                                     properties,          // properties
+                                     kNoCalleeSaved,      // callee-saved
+                                     flags,               // flags
+                                     function->name);     // debug name
+  }
+
+
+  // TODO(turbofan): cache call descriptors for code stub calls.
+  static CallDescriptor* GetStubCallDescriptor(
+      Zone* zone, CallInterfaceDescriptor descriptor, int stack_parameter_count,
+      CallDescriptor::Flags flags) {
+    const int register_parameter_count =
+        descriptor.GetEnvironmentParameterCount();
+    const int js_parameter_count =
+        register_parameter_count + stack_parameter_count;
+    const int context_count = 1;
+    const size_t return_count = 1;
+    const size_t parameter_count =
+        static_cast<size_t>(js_parameter_count + context_count);
+
+    LocationSignature::Builder locations(zone, return_count, parameter_count);
+    MachineSignature::Builder types(zone, return_count, parameter_count);
+
+    // Add return location.
+    AddReturnLocations(&locations);
+    types.AddReturn(kMachAnyTagged);
+
+    // Add parameters in registers and on the stack.
+    for (int i = 0; i < js_parameter_count; i++) {
+      if (i < register_parameter_count) {
+        // The first parameters go in registers.
+        Register reg = descriptor.GetEnvironmentParameterRegister(i);
+        locations.AddParam(regloc(reg));
+      } else {
+        // The rest of the parameters go on the stack.
+        int stack_slot = i - register_parameter_count - stack_parameter_count;
+        locations.AddParam(stackloc(stack_slot));
+      }
+      types.AddParam(kMachAnyTagged);
+    }
+    // Add context.
+    locations.AddParam(regloc(LinkageTraits::ContextReg()));
+    types.AddParam(kMachAnyTagged);
+
+    // The target for stub calls is a code object.
+    MachineType target_type = kMachAnyTagged;
+    LinkageLocation target_loc = LinkageLocation::AnyRegister();
+    return new (zone) CallDescriptor(CallDescriptor::kCallCodeObject,  // kind
+                                     target_type,         // target MachineType
+                                     target_loc,          // target location
+                                     types.Build(),       // machine_sig
+                                     locations.Build(),   // location_sig
+                                     js_parameter_count,  // js_parameter_count
+                                     Operator::kNoProperties,  // properties
+                                     kNoCalleeSaved,  // callee-saved registers
+                                     flags,           // flags
+                                     descriptor.DebugName(zone->isolate()));
+  }
+
+  static CallDescriptor* GetSimplifiedCDescriptor(Zone* zone,
+                                                  MachineSignature* msig) {
+    LocationSignature::Builder locations(zone, msig->return_count(),
+                                         msig->parameter_count());
+    // Add return location(s).
+    AddReturnLocations(&locations);
+
+    // Add register and/or stack parameter(s).
+    const int parameter_count = static_cast<int>(msig->parameter_count());
+    for (int i = 0; i < parameter_count; i++) {
+      if (i < LinkageTraits::CRegisterParametersLength()) {
+        locations.AddParam(regloc(LinkageTraits::CRegisterParameter(i)));
+      } else {
+        locations.AddParam(stackloc(-1 - i));
+      }
+    }
+
+    // The target for C calls is always an address (i.e. machine pointer).
+    MachineType target_type = kMachPtr;
+    LinkageLocation target_loc = LinkageLocation::AnyRegister();
+    return new (zone) CallDescriptor(CallDescriptor::kCallAddress,  // kind
+                                     target_type,        // target MachineType
+                                     target_loc,         // target location
+                                     msig,               // machine_sig
+                                     locations.Build(),  // location_sig
+                                     0,                  // js_parameter_count
+                                     Operator::kNoProperties,  // properties
+                                     LinkageTraits::CCalleeSaveRegisters(),
+                                     CallDescriptor::kNoFlags, "c-call");
+  }
+
+  static LinkageLocation regloc(Register reg) {
+    return LinkageLocation(Register::ToAllocationIndex(reg));
+  }
+
+  static LinkageLocation stackloc(int i) {
+    DCHECK_LT(i, 0);
+    return LinkageLocation(i);
+  }
+};
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_LINKAGE_IMPL_H_
diff --git a/src/compiler/linkage.cc b/src/compiler/linkage.cc
new file mode 100644
index 0000000..465a667
--- /dev/null
+++ b/src/compiler/linkage.cc
@@ -0,0 +1,170 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/linkage.h"
+
+#include "src/code-stubs.h"
+#include "src/compiler.h"
+#include "src/compiler/node.h"
+#include "src/compiler/pipeline.h"
+#include "src/scopes.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+
+OStream& operator<<(OStream& os, const CallDescriptor::Kind& k) {
+  switch (k) {
+    case CallDescriptor::kCallCodeObject:
+      os << "Code";
+      break;
+    case CallDescriptor::kCallJSFunction:
+      os << "JS";
+      break;
+    case CallDescriptor::kCallAddress:
+      os << "Addr";
+      break;
+  }
+  return os;
+}
+
+
+OStream& operator<<(OStream& os, const CallDescriptor& d) {
+  // TODO(svenpanne) Output properties etc. and be less cryptic.
+  return os << d.kind() << ":" << d.debug_name() << ":r" << d.ReturnCount()
+            << "j" << d.JSParameterCount() << "i" << d.InputCount() << "f"
+            << d.FrameStateCount();
+}
+
+
+Linkage::Linkage(CompilationInfo* info) : info_(info) {
+  if (info->function() != NULL) {
+    // If we already have the function literal, use the number of parameters
+    // plus the receiver.
+    incoming_ = GetJSCallDescriptor(1 + info->function()->parameter_count());
+  } else if (!info->closure().is_null()) {
+    // If we are compiling a JS function, use a JS call descriptor,
+    // plus the receiver.
+    SharedFunctionInfo* shared = info->closure()->shared();
+    incoming_ = GetJSCallDescriptor(1 + shared->formal_parameter_count());
+  } else if (info->code_stub() != NULL) {
+    // Use the code stub interface descriptor.
+    CallInterfaceDescriptor descriptor =
+        info->code_stub()->GetCallInterfaceDescriptor();
+    incoming_ = GetStubCallDescriptor(descriptor);
+  } else {
+    incoming_ = NULL;  // TODO(titzer): ?
+  }
+}
+
+
+FrameOffset Linkage::GetFrameOffset(int spill_slot, Frame* frame, int extra) {
+  if (frame->GetSpillSlotCount() > 0 || incoming_->IsJSFunctionCall() ||
+      incoming_->kind() == CallDescriptor::kCallAddress) {
+    int offset;
+    int register_save_area_size = frame->GetRegisterSaveAreaSize();
+    if (spill_slot >= 0) {
+      // Local or spill slot. Skip the frame pointer, function, and
+      // context in the fixed part of the frame.
+      offset =
+          -(spill_slot + 1) * kPointerSize - register_save_area_size + extra;
+    } else {
+      // Incoming parameter. Skip the return address.
+      offset = -(spill_slot + 1) * kPointerSize + kFPOnStackSize +
+               kPCOnStackSize + extra;
+    }
+    return FrameOffset::FromFramePointer(offset);
+  } else {
+    // No frame. Retrieve all parameters relative to stack pointer.
+    DCHECK(spill_slot < 0);  // Must be a parameter.
+    int register_save_area_size = frame->GetRegisterSaveAreaSize();
+    int offset = register_save_area_size - (spill_slot + 1) * kPointerSize +
+                 kPCOnStackSize + extra;
+    return FrameOffset::FromStackPointer(offset);
+  }
+}
+
+
+CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count) {
+  return GetJSCallDescriptor(parameter_count, this->info_->zone());
+}
+
+
+CallDescriptor* Linkage::GetRuntimeCallDescriptor(
+    Runtime::FunctionId function, int parameter_count,
+    Operator::Properties properties) {
+  return GetRuntimeCallDescriptor(function, parameter_count, properties,
+                                  this->info_->zone());
+}
+
+
+CallDescriptor* Linkage::GetStubCallDescriptor(
+    CallInterfaceDescriptor descriptor, int stack_parameter_count,
+    CallDescriptor::Flags flags) {
+  return GetStubCallDescriptor(descriptor, stack_parameter_count, flags,
+                               this->info_->zone());
+}
+
+
+// static
+bool Linkage::NeedsFrameState(Runtime::FunctionId function) {
+  if (!FLAG_turbo_deoptimization) {
+    return false;
+  }
+  // TODO(jarin) At the moment, we only add frame state for
+  // few chosen runtime functions.
+  switch (function) {
+    case Runtime::kDebugBreak:
+    case Runtime::kDebugGetLoadedScripts:
+    case Runtime::kDeoptimizeFunction:
+    case Runtime::kInlineCallFunction:
+    case Runtime::kPrepareStep:
+    case Runtime::kSetScriptBreakPoint:
+    case Runtime::kStackGuard:
+    case Runtime::kCheckExecutionState:
+    case Runtime::kDebugEvaluate:
+    case Runtime::kCollectStackTrace:
+      return true;
+    default:
+      return false;
+  }
+}
+
+
+//==============================================================================
+// Provide unimplemented methods on unsupported architectures, to at least link.
+//==============================================================================
+#if !V8_TURBOFAN_BACKEND
+CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone) {
+  UNIMPLEMENTED();
+  return NULL;
+}
+
+
+CallDescriptor* Linkage::GetRuntimeCallDescriptor(
+    Runtime::FunctionId function, int parameter_count,
+    Operator::Properties properties, Zone* zone) {
+  UNIMPLEMENTED();
+  return NULL;
+}
+
+
+CallDescriptor* Linkage::GetStubCallDescriptor(
+    CallInterfaceDescriptor descriptor, int stack_parameter_count,
+    CallDescriptor::Flags flags, Zone* zone) {
+  UNIMPLEMENTED();
+  return NULL;
+}
+
+
+CallDescriptor* Linkage::GetSimplifiedCDescriptor(Zone* zone,
+                                                  MachineSignature* sig) {
+  UNIMPLEMENTED();
+  return NULL;
+}
+#endif  // !V8_TURBOFAN_BACKEND
+}
+}
+}  // namespace v8::internal::compiler
diff --git a/src/compiler/linkage.h b/src/compiler/linkage.h
new file mode 100644
index 0000000..c5cef5e
--- /dev/null
+++ b/src/compiler/linkage.h
@@ -0,0 +1,232 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_LINKAGE_H_
+#define V8_COMPILER_LINKAGE_H_
+
+#include "src/base/flags.h"
+#include "src/code-stubs.h"
+#include "src/compiler/frame.h"
+#include "src/compiler/machine-type.h"
+#include "src/compiler/node.h"
+#include "src/compiler/operator.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Describes the location for a parameter or a return value to a call.
+class LinkageLocation {
+ public:
+  explicit LinkageLocation(int location) : location_(location) {}
+
+  static const int16_t ANY_REGISTER = 32767;
+
+  static LinkageLocation AnyRegister() { return LinkageLocation(ANY_REGISTER); }
+
+ private:
+  friend class CallDescriptor;
+  friend class OperandGenerator;
+  int16_t location_;  // >= 0 implies register, otherwise stack slot.
+};
+
+typedef Signature<LinkageLocation> LocationSignature;
+
+// Describes a call to various parts of the compiler. Every call has the notion
+// of a "target", which is the first input to the call.
+class CallDescriptor FINAL : public ZoneObject {
+ public:
+  // Describes the kind of this call, which determines the target.
+  enum Kind {
+    kCallCodeObject,  // target is a Code object
+    kCallJSFunction,  // target is a JSFunction object
+    kCallAddress      // target is a machine pointer
+  };
+
+  enum Flag {
+    // TODO(jarin) kLazyDeoptimization and kNeedsFrameState should be unified.
+    kNoFlags = 0u,
+    kNeedsFrameState = 1u << 0,
+    kPatchableCallSite = 1u << 1,
+    kNeedsNopAfterCall = 1u << 2,
+    kPatchableCallSiteWithNop = kPatchableCallSite | kNeedsNopAfterCall
+  };
+  typedef base::Flags<Flag> Flags;
+
+  CallDescriptor(Kind kind, MachineType target_type, LinkageLocation target_loc,
+                 MachineSignature* machine_sig, LocationSignature* location_sig,
+                 size_t js_param_count, Operator::Properties properties,
+                 RegList callee_saved_registers, Flags flags,
+                 const char* debug_name = "")
+      : kind_(kind),
+        target_type_(target_type),
+        target_loc_(target_loc),
+        machine_sig_(machine_sig),
+        location_sig_(location_sig),
+        js_param_count_(js_param_count),
+        properties_(properties),
+        callee_saved_registers_(callee_saved_registers),
+        flags_(flags),
+        debug_name_(debug_name) {
+    DCHECK(machine_sig->return_count() == location_sig->return_count());
+    DCHECK(machine_sig->parameter_count() == location_sig->parameter_count());
+  }
+
+  // Returns the kind of this call.
+  Kind kind() const { return kind_; }
+
+  // Returns {true} if this descriptor is a call to a JSFunction.
+  bool IsJSFunctionCall() const { return kind_ == kCallJSFunction; }
+
+  // The number of return values from this call.
+  size_t ReturnCount() const { return machine_sig_->return_count(); }
+
+  // The number of JavaScript parameters to this call, including the receiver
+  // object.
+  size_t JSParameterCount() const { return js_param_count_; }
+
+  // The total number of inputs to this call, which includes the target,
+  // receiver, context, etc.
+  // TODO(titzer): this should input the framestate input too.
+  size_t InputCount() const { return 1 + machine_sig_->parameter_count(); }
+
+  size_t FrameStateCount() const { return NeedsFrameState() ? 1 : 0; }
+
+  Flags flags() const { return flags_; }
+
+  bool NeedsFrameState() const { return flags() & kNeedsFrameState; }
+
+  LinkageLocation GetReturnLocation(size_t index) const {
+    return location_sig_->GetReturn(index);
+  }
+
+  LinkageLocation GetInputLocation(size_t index) const {
+    if (index == 0) return target_loc_;
+    return location_sig_->GetParam(index - 1);
+  }
+
+  const MachineSignature* GetMachineSignature() const { return machine_sig_; }
+
+  MachineType GetReturnType(size_t index) const {
+    return machine_sig_->GetReturn(index);
+  }
+
+  MachineType GetInputType(size_t index) const {
+    if (index == 0) return target_type_;
+    return machine_sig_->GetParam(index - 1);
+  }
+
+  // Operator properties describe how this call can be optimized, if at all.
+  Operator::Properties properties() const { return properties_; }
+
+  // Get the callee-saved registers, if any, across this call.
+  RegList CalleeSavedRegisters() const { return callee_saved_registers_; }
+
+  const char* debug_name() const { return debug_name_; }
+
+ private:
+  friend class Linkage;
+
+  Kind kind_;
+  MachineType target_type_;
+  LinkageLocation target_loc_;
+  MachineSignature* machine_sig_;
+  LocationSignature* location_sig_;
+  size_t js_param_count_;
+  Operator::Properties properties_;
+  RegList callee_saved_registers_;
+  Flags flags_;
+  const char* debug_name_;
+};
+
+DEFINE_OPERATORS_FOR_FLAGS(CallDescriptor::Flags)
+
+OStream& operator<<(OStream& os, const CallDescriptor& d);
+OStream& operator<<(OStream& os, const CallDescriptor::Kind& k);
+
+// Defines the linkage for a compilation, including the calling conventions
+// for incoming parameters and return value(s) as well as the outgoing calling
+// convention for any kind of call. Linkage is generally architecture-specific.
+//
+// Can be used to translate {arg_index} (i.e. index of the call node input) as
+// well as {param_index} (i.e. as stored in parameter nodes) into an operator
+// representing the architecture-specific location. The following call node
+// layouts are supported (where {n} is the number value inputs):
+//
+//                  #0          #1     #2     #3     [...]             #n
+// Call[CodeStub]   code,       arg 1, arg 2, arg 3, [...],            context
+// Call[JSFunction] function,   rcvr,  arg 1, arg 2, [...],            context
+// Call[Runtime]    CEntryStub, arg 1, arg 2, arg 3, [...], fun, #arg, context
+class Linkage : public ZoneObject {
+ public:
+  explicit Linkage(CompilationInfo* info);
+  explicit Linkage(CompilationInfo* info, CallDescriptor* incoming)
+      : info_(info), incoming_(incoming) {}
+
+  // The call descriptor for this compilation unit describes the locations
+  // of incoming parameters and the outgoing return value(s).
+  CallDescriptor* GetIncomingDescriptor() { return incoming_; }
+  CallDescriptor* GetJSCallDescriptor(int parameter_count);
+  static CallDescriptor* GetJSCallDescriptor(int parameter_count, Zone* zone);
+  CallDescriptor* GetRuntimeCallDescriptor(Runtime::FunctionId function,
+                                           int parameter_count,
+                                           Operator::Properties properties);
+  static CallDescriptor* GetRuntimeCallDescriptor(
+      Runtime::FunctionId function, int parameter_count,
+      Operator::Properties properties, Zone* zone);
+
+  CallDescriptor* GetStubCallDescriptor(
+      CallInterfaceDescriptor descriptor, int stack_parameter_count = 0,
+      CallDescriptor::Flags flags = CallDescriptor::kNoFlags);
+  static CallDescriptor* GetStubCallDescriptor(
+      CallInterfaceDescriptor descriptor, int stack_parameter_count,
+      CallDescriptor::Flags flags, Zone* zone);
+
+  // Creates a call descriptor for simplified C calls that is appropriate
+  // for the host platform. This simplified calling convention only supports
+  // integers and pointers of one word size each, i.e. no floating point,
+  // structs, pointers to members, etc.
+  static CallDescriptor* GetSimplifiedCDescriptor(Zone* zone,
+                                                  MachineSignature* sig);
+
+  // Get the location of an (incoming) parameter to this function.
+  LinkageLocation GetParameterLocation(int index) {
+    return incoming_->GetInputLocation(index + 1);  // + 1 to skip target.
+  }
+
+  // Get the machine type of an (incoming) parameter to this function.
+  MachineType GetParameterType(int index) {
+    return incoming_->GetInputType(index + 1);  // + 1 to skip target.
+  }
+
+  // Get the location where this function should place its return value.
+  LinkageLocation GetReturnLocation() {
+    return incoming_->GetReturnLocation(0);
+  }
+
+  // Get the machine type of this function's return value.
+  MachineType GetReturnType() { return incoming_->GetReturnType(0); }
+
+  // Get the frame offset for a given spill slot. The location depends on the
+  // calling convention and the specific frame layout, and may thus be
+  // architecture-specific. Negative spill slots indicate arguments on the
+  // caller's frame. The {extra} parameter indicates an additional offset from
+  // the frame offset, e.g. to index into part of a double slot.
+  FrameOffset GetFrameOffset(int spill_slot, Frame* frame, int extra = 0);
+
+  CompilationInfo* info() const { return info_; }
+
+  static bool NeedsFrameState(Runtime::FunctionId function);
+
+ private:
+  CompilationInfo* info_;
+  CallDescriptor* incoming_;
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_LINKAGE_H_
diff --git a/src/compiler/machine-operator-reducer-unittest.cc b/src/compiler/machine-operator-reducer-unittest.cc
new file mode 100644
index 0000000..f3073ab
--- /dev/null
+++ b/src/compiler/machine-operator-reducer-unittest.cc
@@ -0,0 +1,616 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/bits.h"
+#include "src/compiler/graph-unittest.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/machine-operator-reducer.h"
+#include "src/compiler/typer.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class MachineOperatorReducerTest : public GraphTest {
+ public:
+  explicit MachineOperatorReducerTest(int num_parameters = 2)
+      : GraphTest(num_parameters) {}
+
+ protected:
+  Reduction Reduce(Node* node) {
+    Typer typer(zone());
+    JSOperatorBuilder javascript(zone());
+    JSGraph jsgraph(graph(), common(), &javascript, &typer, &machine_);
+    MachineOperatorReducer reducer(&jsgraph);
+    return reducer.Reduce(node);
+  }
+
+  MachineOperatorBuilder* machine() { return &machine_; }
+
+ private:
+  MachineOperatorBuilder machine_;
+};
+
+
+template <typename T>
+class MachineOperatorReducerTestWithParam
+    : public MachineOperatorReducerTest,
+      public ::testing::WithParamInterface<T> {
+ public:
+  explicit MachineOperatorReducerTestWithParam(int num_parameters = 2)
+      : MachineOperatorReducerTest(num_parameters) {}
+  virtual ~MachineOperatorReducerTestWithParam() {}
+};
+
+
+namespace {
+
+static const float kFloat32Values[] = {
+    -std::numeric_limits<float>::infinity(), -2.70497e+38f, -1.4698e+37f,
+    -1.22813e+35f,                           -1.20555e+35f, -1.34584e+34f,
+    -1.0079e+32f,                            -6.49364e+26f, -3.06077e+25f,
+    -1.46821e+25f,                           -1.17658e+23f, -1.9617e+22f,
+    -2.7357e+20f,                            -1.48708e+13f, -1.89633e+12f,
+    -4.66622e+11f,                           -2.22581e+11f, -1.45381e+10f,
+    -1.3956e+09f,                            -1.32951e+09f, -1.30721e+09f,
+    -1.19756e+09f,                           -9.26822e+08f, -6.35647e+08f,
+    -4.00037e+08f,                           -1.81227e+08f, -5.09256e+07f,
+    -964300.0f,                              -192446.0f,    -28455.0f,
+    -27194.0f,                               -26401.0f,     -20575.0f,
+    -17069.0f,                               -9167.0f,      -960.178f,
+    -113.0f,                                 -62.0f,        -15.0f,
+    -7.0f,                                   -0.0256635f,   -4.60374e-07f,
+    -3.63759e-10f,                           -4.30175e-14f, -5.27385e-15f,
+    -1.48084e-15f,                           -1.05755e-19f, -3.2995e-21f,
+    -1.67354e-23f,                           -1.11885e-23f, -1.78506e-30f,
+    -5.07594e-31f,                           -3.65799e-31f, -1.43718e-34f,
+    -1.27126e-38f,                           -0.0f,         0.0f,
+    1.17549e-38f,                            1.56657e-37f,  4.08512e-29f,
+    3.31357e-28f,                            6.25073e-22f,  4.1723e-13f,
+    1.44343e-09f,                            5.27004e-08f,  9.48298e-08f,
+    5.57888e-07f,                            4.89988e-05f,  0.244326f,
+    12.4895f,                                19.0f,         47.0f,
+    106.0f,                                  538.324f,      564.536f,
+    819.124f,                                7048.0f,       12611.0f,
+    19878.0f,                                20309.0f,      797056.0f,
+    1.77219e+09f,                            1.51116e+11f,  4.18193e+13f,
+    3.59167e+16f,                            3.38211e+19f,  2.67488e+20f,
+    1.78831e+21f,                            9.20914e+21f,  8.35654e+23f,
+    1.4495e+24f,                             5.94015e+25f,  4.43608e+30f,
+    2.44502e+33f,                            2.61152e+33f,  1.38178e+37f,
+    1.71306e+37f,                            3.31899e+38f,  3.40282e+38f,
+    std::numeric_limits<float>::infinity()};
+
+
+static const double kFloat64Values[] = {
+    -V8_INFINITY,  -4.23878e+275, -5.82632e+265, -6.60355e+220, -6.26172e+212,
+    -2.56222e+211, -4.82408e+201, -1.84106e+157, -1.63662e+127, -1.55772e+100,
+    -1.67813e+72,  -2.3382e+55,   -3.179e+30,    -1.441e+09,    -1.0647e+09,
+    -7.99361e+08,  -5.77375e+08,  -2.20984e+08,  -32757,        -13171,
+    -9970,         -3984,         -107,          -105,          -92,
+    -77,           -61,           -0.000208163,  -1.86685e-06,  -1.17296e-10,
+    -9.26358e-11,  -5.08004e-60,  -1.74753e-65,  -1.06561e-71,  -5.67879e-79,
+    -5.78459e-130, -2.90989e-171, -7.15489e-243, -3.76242e-252, -1.05639e-263,
+    -4.40497e-267, -2.19666e-273, -4.9998e-276,  -5.59821e-278, -2.03855e-282,
+    -5.99335e-283, -7.17554e-284, -3.11744e-309, -0.0,          0.0,
+    2.22507e-308,  1.30127e-270,  7.62898e-260,  4.00313e-249,  3.16829e-233,
+    1.85244e-228,  2.03544e-129,  1.35126e-110,  1.01182e-106,  5.26333e-94,
+    1.35292e-90,   2.85394e-83,   1.78323e-77,   5.4967e-57,    1.03207e-25,
+    4.57401e-25,   1.58738e-05,   2,             125,           2310,
+    9636,          14802,         17168,         28945,         29305,
+    4.81336e+07,   1.41207e+08,   4.65962e+08,   1.40499e+09,   2.12648e+09,
+    8.80006e+30,   1.4446e+45,    1.12164e+54,   2.48188e+89,   6.71121e+102,
+    3.074e+112,    4.9699e+152,   5.58383e+166,  4.30654e+172,  7.08824e+185,
+    9.6586e+214,   2.028e+223,    6.63277e+243,  1.56192e+261,  1.23202e+269,
+    5.72883e+289,  8.5798e+290,   1.40256e+294,  1.79769e+308,  V8_INFINITY};
+
+
+static const int32_t kInt32Values[] = {
+    -2147483647 - 1, -1914954528, -1698749618, -1578693386, -1577976073,
+    -1573998034,     -1529085059, -1499540537, -1299205097, -1090814845,
+    -938186388,      -806828902,  -750927650,  -520676892,  -513661538,
+    -453036354,      -433622833,  -282638793,  -28375,      -27788,
+    -22770,          -18806,      -14173,      -11956,      -11200,
+    -10212,          -8160,       -3751,       -2758,       -1522,
+    -121,            -120,        -118,        -117,        -106,
+    -84,             -80,         -74,         -59,         -52,
+    -48,             -39,         -35,         -17,         -11,
+    -10,             -9,          -7,          -5,          0,
+    9,               12,          17,          23,          29,
+    31,              33,          35,          40,          47,
+    55,              56,          62,          64,          67,
+    68,              69,          74,          79,          84,
+    89,              90,          97,          104,         118,
+    124,             126,         127,         7278,        17787,
+    24136,           24202,       25570,       26680,       30242,
+    32399,           420886487,   642166225,   821912648,   822577803,
+    851385718,       1212241078,  1411419304,  1589626102,  1596437184,
+    1876245816,      1954730266,  2008792749,  2045320228,  2147483647};
+
+
+static const int64_t kInt64Values[] = {
+    V8_INT64_C(-9223372036854775807) - 1, V8_INT64_C(-8974392461363618006),
+    V8_INT64_C(-8874367046689588135),     V8_INT64_C(-8269197512118230839),
+    V8_INT64_C(-8146091527100606733),     V8_INT64_C(-7550917981466150848),
+    V8_INT64_C(-7216590251577894337),     V8_INT64_C(-6464086891160048440),
+    V8_INT64_C(-6365616494908257190),     V8_INT64_C(-6305630541365849726),
+    V8_INT64_C(-5982222642272245453),     V8_INT64_C(-5510103099058504169),
+    V8_INT64_C(-5496838675802432701),     V8_INT64_C(-4047626578868642657),
+    V8_INT64_C(-4033755046900164544),     V8_INT64_C(-3554299241457877041),
+    V8_INT64_C(-2482258764588614470),     V8_INT64_C(-1688515425526875335),
+    V8_INT64_C(-924784137176548532),      V8_INT64_C(-725316567157391307),
+    V8_INT64_C(-439022654781092241),      V8_INT64_C(-105545757668917080),
+    V8_INT64_C(-2088319373),              V8_INT64_C(-2073699916),
+    V8_INT64_C(-1844949911),              V8_INT64_C(-1831090548),
+    V8_INT64_C(-1756711933),              V8_INT64_C(-1559409497),
+    V8_INT64_C(-1281179700),              V8_INT64_C(-1211513985),
+    V8_INT64_C(-1182371520),              V8_INT64_C(-785934753),
+    V8_INT64_C(-767480697),               V8_INT64_C(-705745662),
+    V8_INT64_C(-514362436),               V8_INT64_C(-459916580),
+    V8_INT64_C(-312328082),               V8_INT64_C(-302949707),
+    V8_INT64_C(-285499304),               V8_INT64_C(-125701262),
+    V8_INT64_C(-95139843),                V8_INT64_C(-32768),
+    V8_INT64_C(-27542),                   V8_INT64_C(-23600),
+    V8_INT64_C(-18582),                   V8_INT64_C(-17770),
+    V8_INT64_C(-9086),                    V8_INT64_C(-9010),
+    V8_INT64_C(-8244),                    V8_INT64_C(-2890),
+    V8_INT64_C(-103),                     V8_INT64_C(-34),
+    V8_INT64_C(-27),                      V8_INT64_C(-25),
+    V8_INT64_C(-9),                       V8_INT64_C(-7),
+    V8_INT64_C(0),                        V8_INT64_C(2),
+    V8_INT64_C(38),                       V8_INT64_C(58),
+    V8_INT64_C(65),                       V8_INT64_C(93),
+    V8_INT64_C(111),                      V8_INT64_C(1003),
+    V8_INT64_C(1267),                     V8_INT64_C(12797),
+    V8_INT64_C(23122),                    V8_INT64_C(28200),
+    V8_INT64_C(30888),                    V8_INT64_C(42648848),
+    V8_INT64_C(116836693),                V8_INT64_C(263003643),
+    V8_INT64_C(571039860),                V8_INT64_C(1079398689),
+    V8_INT64_C(1145196402),               V8_INT64_C(1184846321),
+    V8_INT64_C(1758281648),               V8_INT64_C(1859991374),
+    V8_INT64_C(1960251588),               V8_INT64_C(2042443199),
+    V8_INT64_C(296220586027987448),       V8_INT64_C(1015494173071134726),
+    V8_INT64_C(1151237951914455318),      V8_INT64_C(1331941174616854174),
+    V8_INT64_C(2022020418667972654),      V8_INT64_C(2450251424374977035),
+    V8_INT64_C(3668393562685561486),      V8_INT64_C(4858229301215502171),
+    V8_INT64_C(4919426235170669383),      V8_INT64_C(5034286595330341762),
+    V8_INT64_C(5055797915536941182),      V8_INT64_C(6072389716149252074),
+    V8_INT64_C(6185309910199801210),      V8_INT64_C(6297328311011094138),
+    V8_INT64_C(6932372858072165827),      V8_INT64_C(8483640924987737210),
+    V8_INT64_C(8663764179455849203),      V8_INT64_C(8877197042645298254),
+    V8_INT64_C(8901543506779157333),      V8_INT64_C(9223372036854775807)};
+
+
+static const uint32_t kUint32Values[] = {
+    0x00000000, 0x00000001, 0xffffffff, 0x1b09788b, 0x04c5fce8, 0xcc0de5bf,
+    0x273a798e, 0x187937a3, 0xece3af83, 0x5495a16b, 0x0b668ecc, 0x11223344,
+    0x0000009e, 0x00000043, 0x0000af73, 0x0000116b, 0x00658ecc, 0x002b3b4c,
+    0x88776655, 0x70000000, 0x07200000, 0x7fffffff, 0x56123761, 0x7fffff00,
+    0x761c4761, 0x80000000, 0x88888888, 0xa0000000, 0xdddddddd, 0xe0000000,
+    0xeeeeeeee, 0xfffffffd, 0xf0000000, 0x007fffff, 0x003fffff, 0x001fffff,
+    0x000fffff, 0x0007ffff, 0x0003ffff, 0x0001ffff, 0x0000ffff, 0x00007fff,
+    0x00003fff, 0x00001fff, 0x00000fff, 0x000007ff, 0x000003ff, 0x000001ff};
+
+}  // namespace
+
+
+// -----------------------------------------------------------------------------
+// Unary operators
+
+
+namespace {
+
+struct UnaryOperator {
+  const Operator* (MachineOperatorBuilder::*constructor)();
+  const char* constructor_name;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const UnaryOperator& unop) {
+  return os << unop.constructor_name;
+}
+
+
+static const UnaryOperator kUnaryOperators[] = {
+    {&MachineOperatorBuilder::ChangeInt32ToFloat64, "ChangeInt32ToFloat64"},
+    {&MachineOperatorBuilder::ChangeUint32ToFloat64, "ChangeUint32ToFloat64"},
+    {&MachineOperatorBuilder::ChangeFloat64ToInt32, "ChangeFloat64ToInt32"},
+    {&MachineOperatorBuilder::ChangeFloat64ToUint32, "ChangeFloat64ToUint32"},
+    {&MachineOperatorBuilder::ChangeInt32ToInt64, "ChangeInt32ToInt64"},
+    {&MachineOperatorBuilder::ChangeUint32ToUint64, "ChangeUint32ToUint64"},
+    {&MachineOperatorBuilder::TruncateFloat64ToInt32, "TruncateFloat64ToInt32"},
+    {&MachineOperatorBuilder::TruncateInt64ToInt32, "TruncateInt64ToInt32"}};
+
+}  // namespace
+
+
+typedef MachineOperatorReducerTestWithParam<UnaryOperator>
+    MachineUnaryOperatorReducerTest;
+
+
+TEST_P(MachineUnaryOperatorReducerTest, Parameter) {
+  const UnaryOperator unop = GetParam();
+  Reduction reduction =
+      Reduce(graph()->NewNode((machine()->*unop.constructor)(), Parameter(0)));
+  EXPECT_FALSE(reduction.Changed());
+}
+
+
+INSTANTIATE_TEST_CASE_P(MachineOperatorReducerTest,
+                        MachineUnaryOperatorReducerTest,
+                        ::testing::ValuesIn(kUnaryOperators));
+
+
+// -----------------------------------------------------------------------------
+// ChangeFloat64ToFloat32
+
+
+TEST_F(MachineOperatorReducerTest, ChangeFloat64ToFloat32WithConstant) {
+  TRACED_FOREACH(float, x, kFloat32Values) {
+    Reduction reduction = Reduce(graph()->NewNode(
+        machine()->ChangeFloat32ToFloat64(), Float32Constant(x)));
+    ASSERT_TRUE(reduction.Changed());
+    EXPECT_THAT(reduction.replacement(), IsFloat64Constant(x));
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+// ChangeFloat64ToInt32
+
+
+TEST_F(MachineOperatorReducerTest,
+       ChangeFloat64ToInt32WithChangeInt32ToFloat64) {
+  Node* value = Parameter(0);
+  Reduction reduction = Reduce(graph()->NewNode(
+      machine()->ChangeFloat64ToInt32(),
+      graph()->NewNode(machine()->ChangeInt32ToFloat64(), value)));
+  ASSERT_TRUE(reduction.Changed());
+  EXPECT_EQ(value, reduction.replacement());
+}
+
+
+TEST_F(MachineOperatorReducerTest, ChangeFloat64ToInt32WithConstant) {
+  TRACED_FOREACH(int32_t, x, kInt32Values) {
+    Reduction reduction = Reduce(graph()->NewNode(
+        machine()->ChangeFloat64ToInt32(), Float64Constant(FastI2D(x))));
+    ASSERT_TRUE(reduction.Changed());
+    EXPECT_THAT(reduction.replacement(), IsInt32Constant(x));
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+// ChangeFloat64ToUint32
+
+
+TEST_F(MachineOperatorReducerTest,
+       ChangeFloat64ToUint32WithChangeUint32ToFloat64) {
+  Node* value = Parameter(0);
+  Reduction reduction = Reduce(graph()->NewNode(
+      machine()->ChangeFloat64ToUint32(),
+      graph()->NewNode(machine()->ChangeUint32ToFloat64(), value)));
+  ASSERT_TRUE(reduction.Changed());
+  EXPECT_EQ(value, reduction.replacement());
+}
+
+
+TEST_F(MachineOperatorReducerTest, ChangeFloat64ToUint32WithConstant) {
+  TRACED_FOREACH(uint32_t, x, kUint32Values) {
+    Reduction reduction = Reduce(graph()->NewNode(
+        machine()->ChangeFloat64ToUint32(), Float64Constant(FastUI2D(x))));
+    ASSERT_TRUE(reduction.Changed());
+    EXPECT_THAT(reduction.replacement(), IsInt32Constant(bit_cast<int32_t>(x)));
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+// ChangeInt32ToFloat64
+
+
+TEST_F(MachineOperatorReducerTest, ChangeInt32ToFloat64WithConstant) {
+  TRACED_FOREACH(int32_t, x, kInt32Values) {
+    Reduction reduction = Reduce(
+        graph()->NewNode(machine()->ChangeInt32ToFloat64(), Int32Constant(x)));
+    ASSERT_TRUE(reduction.Changed());
+    EXPECT_THAT(reduction.replacement(), IsFloat64Constant(FastI2D(x)));
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+// ChangeInt32ToInt64
+
+
+TEST_F(MachineOperatorReducerTest, ChangeInt32ToInt64WithConstant) {
+  TRACED_FOREACH(int32_t, x, kInt32Values) {
+    Reduction reduction = Reduce(
+        graph()->NewNode(machine()->ChangeInt32ToInt64(), Int32Constant(x)));
+    ASSERT_TRUE(reduction.Changed());
+    EXPECT_THAT(reduction.replacement(), IsInt64Constant(x));
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+// ChangeUint32ToFloat64
+
+
+TEST_F(MachineOperatorReducerTest, ChangeUint32ToFloat64WithConstant) {
+  TRACED_FOREACH(uint32_t, x, kUint32Values) {
+    Reduction reduction =
+        Reduce(graph()->NewNode(machine()->ChangeUint32ToFloat64(),
+                                Int32Constant(bit_cast<int32_t>(x))));
+    ASSERT_TRUE(reduction.Changed());
+    EXPECT_THAT(reduction.replacement(), IsFloat64Constant(FastUI2D(x)));
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+// ChangeUint32ToUint64
+
+
+TEST_F(MachineOperatorReducerTest, ChangeUint32ToUint64WithConstant) {
+  TRACED_FOREACH(uint32_t, x, kUint32Values) {
+    Reduction reduction =
+        Reduce(graph()->NewNode(machine()->ChangeUint32ToUint64(),
+                                Int32Constant(bit_cast<int32_t>(x))));
+    ASSERT_TRUE(reduction.Changed());
+    EXPECT_THAT(reduction.replacement(),
+                IsInt64Constant(bit_cast<int64_t>(static_cast<uint64_t>(x))));
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+// TruncateFloat64ToFloat32
+
+
+TEST_F(MachineOperatorReducerTest,
+       TruncateFloat64ToFloat32WithChangeFloat32ToFloat64) {
+  Node* value = Parameter(0);
+  Reduction reduction = Reduce(graph()->NewNode(
+      machine()->TruncateFloat64ToFloat32(),
+      graph()->NewNode(machine()->ChangeFloat32ToFloat64(), value)));
+  ASSERT_TRUE(reduction.Changed());
+  EXPECT_EQ(value, reduction.replacement());
+}
+
+
+TEST_F(MachineOperatorReducerTest, TruncateFloat64ToFloat32WithConstant) {
+  TRACED_FOREACH(double, x, kFloat64Values) {
+    Reduction reduction = Reduce(graph()->NewNode(
+        machine()->TruncateFloat64ToFloat32(), Float64Constant(x)));
+    ASSERT_TRUE(reduction.Changed());
+    EXPECT_THAT(reduction.replacement(), IsFloat32Constant(DoubleToFloat32(x)));
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+// TruncateFloat64ToInt32
+
+
+TEST_F(MachineOperatorReducerTest,
+       TruncateFloat64ToInt32WithChangeInt32ToFloat64) {
+  Node* value = Parameter(0);
+  Reduction reduction = Reduce(graph()->NewNode(
+      machine()->TruncateFloat64ToInt32(),
+      graph()->NewNode(machine()->ChangeInt32ToFloat64(), value)));
+  ASSERT_TRUE(reduction.Changed());
+  EXPECT_EQ(value, reduction.replacement());
+}
+
+
+TEST_F(MachineOperatorReducerTest, TruncateFloat64ToInt32WithConstant) {
+  TRACED_FOREACH(double, x, kFloat64Values) {
+    Reduction reduction = Reduce(graph()->NewNode(
+        machine()->TruncateFloat64ToInt32(), Float64Constant(x)));
+    ASSERT_TRUE(reduction.Changed());
+    EXPECT_THAT(reduction.replacement(), IsInt32Constant(DoubleToInt32(x)));
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+// TruncateInt64ToInt32
+
+
+TEST_F(MachineOperatorReducerTest, TruncateInt64ToInt32WithChangeInt32ToInt64) {
+  Node* value = Parameter(0);
+  Reduction reduction = Reduce(graph()->NewNode(
+      machine()->TruncateInt64ToInt32(),
+      graph()->NewNode(machine()->ChangeInt32ToInt64(), value)));
+  ASSERT_TRUE(reduction.Changed());
+  EXPECT_EQ(value, reduction.replacement());
+}
+
+
+TEST_F(MachineOperatorReducerTest, TruncateInt64ToInt32WithConstant) {
+  TRACED_FOREACH(int64_t, x, kInt64Values) {
+    Reduction reduction = Reduce(
+        graph()->NewNode(machine()->TruncateInt64ToInt32(), Int64Constant(x)));
+    ASSERT_TRUE(reduction.Changed());
+    EXPECT_THAT(reduction.replacement(),
+                IsInt32Constant(bit_cast<int32_t>(
+                    static_cast<uint32_t>(bit_cast<uint64_t>(x)))));
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+// Word32Ror
+
+
+TEST_F(MachineOperatorReducerTest, ReduceToWord32RorWithParameters) {
+  Node* value = Parameter(0);
+  Node* shift = Parameter(1);
+  Node* shl = graph()->NewNode(machine()->Word32Shl(), value, shift);
+  Node* shr = graph()->NewNode(
+      machine()->Word32Shr(), value,
+      graph()->NewNode(machine()->Int32Sub(), Int32Constant(32), shift));
+
+  // (x << y) | (x >> (32 - y)) => x ror y
+  Node* node1 = graph()->NewNode(machine()->Word32Or(), shl, shr);
+  Reduction reduction1 = Reduce(node1);
+  EXPECT_TRUE(reduction1.Changed());
+  EXPECT_EQ(reduction1.replacement(), node1);
+  EXPECT_THAT(reduction1.replacement(), IsWord32Ror(value, shift));
+
+  // (x >> (32 - y)) | (x << y) => x ror y
+  Node* node2 = graph()->NewNode(machine()->Word32Or(), shr, shl);
+  Reduction reduction2 = Reduce(node2);
+  EXPECT_TRUE(reduction2.Changed());
+  EXPECT_EQ(reduction2.replacement(), node2);
+  EXPECT_THAT(reduction2.replacement(), IsWord32Ror(value, shift));
+}
+
+
+TEST_F(MachineOperatorReducerTest, ReduceToWord32RorWithConstant) {
+  Node* value = Parameter(0);
+  TRACED_FORRANGE(int32_t, k, 0, 31) {
+    Node* shl =
+        graph()->NewNode(machine()->Word32Shl(), value, Int32Constant(k));
+    Node* shr =
+        graph()->NewNode(machine()->Word32Shr(), value, Int32Constant(32 - k));
+
+    // (x << K) | (x >> ((32 - K) - y)) => x ror K
+    Node* node1 = graph()->NewNode(machine()->Word32Or(), shl, shr);
+    Reduction reduction1 = Reduce(node1);
+    EXPECT_TRUE(reduction1.Changed());
+    EXPECT_EQ(reduction1.replacement(), node1);
+    EXPECT_THAT(reduction1.replacement(),
+                IsWord32Ror(value, IsInt32Constant(k)));
+
+    // (x >> (32 - K)) | (x << K) => x ror K
+    Node* node2 = graph()->NewNode(machine()->Word32Or(), shr, shl);
+    Reduction reduction2 = Reduce(node2);
+    EXPECT_TRUE(reduction2.Changed());
+    EXPECT_EQ(reduction2.replacement(), node2);
+    EXPECT_THAT(reduction2.replacement(),
+                IsWord32Ror(value, IsInt32Constant(k)));
+  }
+}
+
+
+TEST_F(MachineOperatorReducerTest, Word32RorWithZeroShift) {
+  Node* value = Parameter(0);
+  Node* node =
+      graph()->NewNode(machine()->Word32Ror(), value, Int32Constant(0));
+  Reduction reduction = Reduce(node);
+  EXPECT_TRUE(reduction.Changed());
+  EXPECT_EQ(reduction.replacement(), value);
+}
+
+
+TEST_F(MachineOperatorReducerTest, Word32RorWithConstants) {
+  TRACED_FOREACH(int32_t, x, kUint32Values) {
+    TRACED_FORRANGE(int32_t, y, 0, 31) {
+      Node* node = graph()->NewNode(machine()->Word32Ror(), Int32Constant(x),
+                                    Int32Constant(y));
+      Reduction reduction = Reduce(node);
+      EXPECT_TRUE(reduction.Changed());
+      EXPECT_THAT(reduction.replacement(),
+                  IsInt32Constant(base::bits::RotateRight32(x, y)));
+    }
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+// Int32AddWithOverflow
+
+
+TEST_F(MachineOperatorReducerTest, Int32AddWithOverflowWithZero) {
+  Node* p0 = Parameter(0);
+  {
+    Node* add = graph()->NewNode(machine()->Int32AddWithOverflow(),
+                                 Int32Constant(0), p0);
+
+    Reduction r = Reduce(graph()->NewNode(common()->Projection(1), add));
+    ASSERT_TRUE(r.Changed());
+    EXPECT_THAT(r.replacement(), IsInt32Constant(0));
+
+    r = Reduce(graph()->NewNode(common()->Projection(0), add));
+    ASSERT_TRUE(r.Changed());
+    EXPECT_EQ(p0, r.replacement());
+  }
+  {
+    Node* add = graph()->NewNode(machine()->Int32AddWithOverflow(), p0,
+                                 Int32Constant(0));
+
+    Reduction r = Reduce(graph()->NewNode(common()->Projection(1), add));
+    ASSERT_TRUE(r.Changed());
+    EXPECT_THAT(r.replacement(), IsInt32Constant(0));
+
+    r = Reduce(graph()->NewNode(common()->Projection(0), add));
+    ASSERT_TRUE(r.Changed());
+    EXPECT_EQ(p0, r.replacement());
+  }
+}
+
+
+TEST_F(MachineOperatorReducerTest, Int32AddWithOverflowWithConstant) {
+  TRACED_FOREACH(int32_t, x, kInt32Values) {
+    TRACED_FOREACH(int32_t, y, kInt32Values) {
+      int32_t z;
+      Node* add = graph()->NewNode(machine()->Int32AddWithOverflow(),
+                                   Int32Constant(x), Int32Constant(y));
+
+      Reduction r = Reduce(graph()->NewNode(common()->Projection(1), add));
+      ASSERT_TRUE(r.Changed());
+      EXPECT_THAT(r.replacement(),
+                  IsInt32Constant(base::bits::SignedAddOverflow32(x, y, &z)));
+
+      r = Reduce(graph()->NewNode(common()->Projection(0), add));
+      ASSERT_TRUE(r.Changed());
+      EXPECT_THAT(r.replacement(), IsInt32Constant(z));
+    }
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+// Int32SubWithOverflow
+
+
+TEST_F(MachineOperatorReducerTest, Int32SubWithOverflowWithZero) {
+  Node* p0 = Parameter(0);
+  Node* add =
+      graph()->NewNode(machine()->Int32SubWithOverflow(), p0, Int32Constant(0));
+
+  Reduction r = Reduce(graph()->NewNode(common()->Projection(1), add));
+  ASSERT_TRUE(r.Changed());
+  EXPECT_THAT(r.replacement(), IsInt32Constant(0));
+
+  r = Reduce(graph()->NewNode(common()->Projection(0), add));
+  ASSERT_TRUE(r.Changed());
+  EXPECT_EQ(p0, r.replacement());
+}
+
+
+TEST_F(MachineOperatorReducerTest, Int32SubWithOverflowWithConstant) {
+  TRACED_FOREACH(int32_t, x, kInt32Values) {
+    TRACED_FOREACH(int32_t, y, kInt32Values) {
+      int32_t z;
+      Node* add = graph()->NewNode(machine()->Int32SubWithOverflow(),
+                                   Int32Constant(x), Int32Constant(y));
+
+      Reduction r = Reduce(graph()->NewNode(common()->Projection(1), add));
+      ASSERT_TRUE(r.Changed());
+      EXPECT_THAT(r.replacement(),
+                  IsInt32Constant(base::bits::SignedSubOverflow32(x, y, &z)));
+
+      r = Reduce(graph()->NewNode(common()->Projection(0), add));
+      ASSERT_TRUE(r.Changed());
+      EXPECT_THAT(r.replacement(), IsInt32Constant(z));
+    }
+  }
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/machine-operator-reducer.cc b/src/compiler/machine-operator-reducer.cc
new file mode 100644
index 0000000..9328547
--- /dev/null
+++ b/src/compiler/machine-operator-reducer.cc
@@ -0,0 +1,504 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/machine-operator-reducer.h"
+
+#include "src/base/bits.h"
+#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/node-matchers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+MachineOperatorReducer::MachineOperatorReducer(JSGraph* jsgraph)
+    : jsgraph_(jsgraph) {}
+
+
+MachineOperatorReducer::~MachineOperatorReducer() {}
+
+
+Node* MachineOperatorReducer::Float32Constant(volatile float value) {
+  return graph()->NewNode(common()->Float32Constant(value));
+}
+
+
+Node* MachineOperatorReducer::Float64Constant(volatile double value) {
+  return jsgraph()->Float64Constant(value);
+}
+
+
+Node* MachineOperatorReducer::Int32Constant(int32_t value) {
+  return jsgraph()->Int32Constant(value);
+}
+
+
+Node* MachineOperatorReducer::Int64Constant(int64_t value) {
+  return graph()->NewNode(common()->Int64Constant(value));
+}
+
+
+// Perform constant folding and strength reduction on machine operators.
+Reduction MachineOperatorReducer::Reduce(Node* node) {
+  switch (node->opcode()) {
+    case IrOpcode::kProjection:
+      return ReduceProjection(OpParameter<size_t>(node), node->InputAt(0));
+    case IrOpcode::kWord32And: {
+      Int32BinopMatcher m(node);
+      if (m.right().Is(0)) return Replace(m.right().node());  // x & 0  => 0
+      if (m.right().Is(-1)) return Replace(m.left().node());  // x & -1 => x
+      if (m.IsFoldable()) {                                   // K & K  => K
+        return ReplaceInt32(m.left().Value() & m.right().Value());
+      }
+      if (m.LeftEqualsRight()) return Replace(m.left().node());  // x & x => x
+      break;
+    }
+    case IrOpcode::kWord32Or: {
+      Int32BinopMatcher m(node);
+      if (m.right().Is(0)) return Replace(m.left().node());    // x | 0  => x
+      if (m.right().Is(-1)) return Replace(m.right().node());  // x | -1 => -1
+      if (m.IsFoldable()) {                                    // K | K  => K
+        return ReplaceInt32(m.left().Value() | m.right().Value());
+      }
+      if (m.LeftEqualsRight()) return Replace(m.left().node());  // x | x => x
+      if (m.left().IsWord32Shl() && m.right().IsWord32Shr()) {
+        Int32BinopMatcher mleft(m.left().node());
+        Int32BinopMatcher mright(m.right().node());
+        if (mleft.left().node() == mright.left().node()) {
+          // (x << y) | (x >> (32 - y)) => x ror y
+          if (mright.right().IsInt32Sub()) {
+            Int32BinopMatcher mrightright(mright.right().node());
+            if (mrightright.left().Is(32) &&
+                mrightright.right().node() == mleft.right().node()) {
+              node->set_op(machine()->Word32Ror());
+              node->ReplaceInput(0, mleft.left().node());
+              node->ReplaceInput(1, mleft.right().node());
+              return Changed(node);
+            }
+          }
+          // (x << K) | (x >> (32 - K)) => x ror K
+          if (mleft.right().IsInRange(0, 31) &&
+              mright.right().Is(32 - mleft.right().Value())) {
+            node->set_op(machine()->Word32Ror());
+            node->ReplaceInput(0, mleft.left().node());
+            node->ReplaceInput(1, mleft.right().node());
+            return Changed(node);
+          }
+        }
+      }
+      if (m.left().IsWord32Shr() && m.right().IsWord32Shl()) {
+        // (x >> (32 - y)) | (x << y)  => x ror y
+        Int32BinopMatcher mleft(m.left().node());
+        Int32BinopMatcher mright(m.right().node());
+        if (mleft.left().node() == mright.left().node()) {
+          if (mleft.right().IsInt32Sub()) {
+            Int32BinopMatcher mleftright(mleft.right().node());
+            if (mleftright.left().Is(32) &&
+                mleftright.right().node() == mright.right().node()) {
+              node->set_op(machine()->Word32Ror());
+              node->ReplaceInput(0, mright.left().node());
+              node->ReplaceInput(1, mright.right().node());
+              return Changed(node);
+            }
+          }
+          // (x >> (32 - K)) | (x << K) => x ror K
+          if (mright.right().IsInRange(0, 31) &&
+              mleft.right().Is(32 - mright.right().Value())) {
+            node->set_op(machine()->Word32Ror());
+            node->ReplaceInput(0, mright.left().node());
+            node->ReplaceInput(1, mright.right().node());
+            return Changed(node);
+          }
+        }
+      }
+      break;
+    }
+    case IrOpcode::kWord32Xor: {
+      Int32BinopMatcher m(node);
+      if (m.right().Is(0)) return Replace(m.left().node());  // x ^ 0 => x
+      if (m.IsFoldable()) {                                  // K ^ K => K
+        return ReplaceInt32(m.left().Value() ^ m.right().Value());
+      }
+      if (m.LeftEqualsRight()) return ReplaceInt32(0);  // x ^ x => 0
+      break;
+    }
+    case IrOpcode::kWord32Shl: {
+      Int32BinopMatcher m(node);
+      if (m.right().Is(0)) return Replace(m.left().node());  // x << 0 => x
+      if (m.IsFoldable()) {                                  // K << K => K
+        return ReplaceInt32(m.left().Value() << m.right().Value());
+      }
+      break;
+    }
+    case IrOpcode::kWord32Shr: {
+      Uint32BinopMatcher m(node);
+      if (m.right().Is(0)) return Replace(m.left().node());  // x >>> 0 => x
+      if (m.IsFoldable()) {                                  // K >>> K => K
+        return ReplaceInt32(m.left().Value() >> m.right().Value());
+      }
+      break;
+    }
+    case IrOpcode::kWord32Sar: {
+      Int32BinopMatcher m(node);
+      if (m.right().Is(0)) return Replace(m.left().node());  // x >> 0 => x
+      if (m.IsFoldable()) {                                  // K >> K => K
+        return ReplaceInt32(m.left().Value() >> m.right().Value());
+      }
+      break;
+    }
+    case IrOpcode::kWord32Ror: {
+      Int32BinopMatcher m(node);
+      if (m.right().Is(0)) return Replace(m.left().node());  // x ror 0 => x
+      if (m.IsFoldable()) {                                  // K ror K => K
+        return ReplaceInt32(
+            base::bits::RotateRight32(m.left().Value(), m.right().Value()));
+      }
+      break;
+    }
+    case IrOpcode::kWord32Equal: {
+      Int32BinopMatcher m(node);
+      if (m.IsFoldable()) {  // K == K => K
+        return ReplaceBool(m.left().Value() == m.right().Value());
+      }
+      if (m.left().IsInt32Sub() && m.right().Is(0)) {  // x - y == 0 => x == y
+        Int32BinopMatcher msub(m.left().node());
+        node->ReplaceInput(0, msub.left().node());
+        node->ReplaceInput(1, msub.right().node());
+        return Changed(node);
+      }
+      // TODO(turbofan): fold HeapConstant, ExternalReference, pointer compares
+      if (m.LeftEqualsRight()) return ReplaceBool(true);  // x == x => true
+      break;
+    }
+    case IrOpcode::kInt32Add: {
+      Int32BinopMatcher m(node);
+      if (m.right().Is(0)) return Replace(m.left().node());  // x + 0 => x
+      if (m.IsFoldable()) {                                  // K + K => K
+        return ReplaceInt32(static_cast<uint32_t>(m.left().Value()) +
+                            static_cast<uint32_t>(m.right().Value()));
+      }
+      break;
+    }
+    case IrOpcode::kInt32Sub: {
+      Int32BinopMatcher m(node);
+      if (m.right().Is(0)) return Replace(m.left().node());  // x - 0 => x
+      if (m.IsFoldable()) {                                  // K - K => K
+        return ReplaceInt32(static_cast<uint32_t>(m.left().Value()) -
+                            static_cast<uint32_t>(m.right().Value()));
+      }
+      if (m.LeftEqualsRight()) return ReplaceInt32(0);  // x - x => 0
+      break;
+    }
+    case IrOpcode::kInt32Mul: {
+      Int32BinopMatcher m(node);
+      if (m.right().Is(0)) return Replace(m.right().node());  // x * 0 => 0
+      if (m.right().Is(1)) return Replace(m.left().node());   // x * 1 => x
+      if (m.IsFoldable()) {                                   // K * K => K
+        return ReplaceInt32(m.left().Value() * m.right().Value());
+      }
+      if (m.right().Is(-1)) {  // x * -1 => 0 - x
+        node->set_op(machine()->Int32Sub());
+        node->ReplaceInput(0, Int32Constant(0));
+        node->ReplaceInput(1, m.left().node());
+        return Changed(node);
+      }
+      if (m.right().IsPowerOf2()) {  // x * 2^n => x << n
+        node->set_op(machine()->Word32Shl());
+        node->ReplaceInput(1, Int32Constant(WhichPowerOf2(m.right().Value())));
+        return Changed(node);
+      }
+      break;
+    }
+    case IrOpcode::kInt32Div: {
+      Int32BinopMatcher m(node);
+      if (m.right().Is(1)) return Replace(m.left().node());  // x / 1 => x
+      // TODO(turbofan): if (m.left().Is(0))
+      // TODO(turbofan): if (m.right().IsPowerOf2())
+      // TODO(turbofan): if (m.right().Is(0))
+      // TODO(turbofan): if (m.LeftEqualsRight())
+      if (m.IsFoldable() && !m.right().Is(0)) {  // K / K => K
+        if (m.right().Is(-1)) return ReplaceInt32(-m.left().Value());
+        return ReplaceInt32(m.left().Value() / m.right().Value());
+      }
+      if (m.right().Is(-1)) {  // x / -1 => 0 - x
+        node->set_op(machine()->Int32Sub());
+        node->ReplaceInput(0, Int32Constant(0));
+        node->ReplaceInput(1, m.left().node());
+        return Changed(node);
+      }
+      break;
+    }
+    case IrOpcode::kInt32UDiv: {
+      Uint32BinopMatcher m(node);
+      if (m.right().Is(1)) return Replace(m.left().node());  // x / 1 => x
+      // TODO(turbofan): if (m.left().Is(0))
+      // TODO(turbofan): if (m.right().Is(0))
+      // TODO(turbofan): if (m.LeftEqualsRight())
+      if (m.IsFoldable() && !m.right().Is(0)) {  // K / K => K
+        return ReplaceInt32(m.left().Value() / m.right().Value());
+      }
+      if (m.right().IsPowerOf2()) {  // x / 2^n => x >> n
+        node->set_op(machine()->Word32Shr());
+        node->ReplaceInput(1, Int32Constant(WhichPowerOf2(m.right().Value())));
+        return Changed(node);
+      }
+      break;
+    }
+    case IrOpcode::kInt32Mod: {
+      Int32BinopMatcher m(node);
+      if (m.right().Is(1)) return ReplaceInt32(0);   // x % 1  => 0
+      if (m.right().Is(-1)) return ReplaceInt32(0);  // x % -1 => 0
+      // TODO(turbofan): if (m.left().Is(0))
+      // TODO(turbofan): if (m.right().IsPowerOf2())
+      // TODO(turbofan): if (m.right().Is(0))
+      // TODO(turbofan): if (m.LeftEqualsRight())
+      if (m.IsFoldable() && !m.right().Is(0)) {  // K % K => K
+        return ReplaceInt32(m.left().Value() % m.right().Value());
+      }
+      break;
+    }
+    case IrOpcode::kInt32UMod: {
+      Uint32BinopMatcher m(node);
+      if (m.right().Is(1)) return ReplaceInt32(0);  // x % 1 => 0
+      // TODO(turbofan): if (m.left().Is(0))
+      // TODO(turbofan): if (m.right().Is(0))
+      // TODO(turbofan): if (m.LeftEqualsRight())
+      if (m.IsFoldable() && !m.right().Is(0)) {  // K % K => K
+        return ReplaceInt32(m.left().Value() % m.right().Value());
+      }
+      if (m.right().IsPowerOf2()) {  // x % 2^n => x & 2^n-1
+        node->set_op(machine()->Word32And());
+        node->ReplaceInput(1, Int32Constant(m.right().Value() - 1));
+        return Changed(node);
+      }
+      break;
+    }
+    case IrOpcode::kInt32LessThan: {
+      Int32BinopMatcher m(node);
+      if (m.IsFoldable()) {  // K < K => K
+        return ReplaceBool(m.left().Value() < m.right().Value());
+      }
+      if (m.left().IsInt32Sub() && m.right().Is(0)) {  // x - y < 0 => x < y
+        Int32BinopMatcher msub(m.left().node());
+        node->ReplaceInput(0, msub.left().node());
+        node->ReplaceInput(1, msub.right().node());
+        return Changed(node);
+      }
+      if (m.left().Is(0) && m.right().IsInt32Sub()) {  // 0 < x - y => y < x
+        Int32BinopMatcher msub(m.right().node());
+        node->ReplaceInput(0, msub.right().node());
+        node->ReplaceInput(1, msub.left().node());
+        return Changed(node);
+      }
+      if (m.LeftEqualsRight()) return ReplaceBool(false);  // x < x => false
+      break;
+    }
+    case IrOpcode::kInt32LessThanOrEqual: {
+      Int32BinopMatcher m(node);
+      if (m.IsFoldable()) {  // K <= K => K
+        return ReplaceBool(m.left().Value() <= m.right().Value());
+      }
+      if (m.left().IsInt32Sub() && m.right().Is(0)) {  // x - y <= 0 => x <= y
+        Int32BinopMatcher msub(m.left().node());
+        node->ReplaceInput(0, msub.left().node());
+        node->ReplaceInput(1, msub.right().node());
+        return Changed(node);
+      }
+      if (m.left().Is(0) && m.right().IsInt32Sub()) {  // 0 <= x - y => y <= x
+        Int32BinopMatcher msub(m.right().node());
+        node->ReplaceInput(0, msub.right().node());
+        node->ReplaceInput(1, msub.left().node());
+        return Changed(node);
+      }
+      if (m.LeftEqualsRight()) return ReplaceBool(true);  // x <= x => true
+      break;
+    }
+    case IrOpcode::kUint32LessThan: {
+      Uint32BinopMatcher m(node);
+      if (m.left().Is(kMaxUInt32)) return ReplaceBool(false);  // M < x => false
+      if (m.right().Is(0)) return ReplaceBool(false);          // x < 0 => false
+      if (m.IsFoldable()) {                                    // K < K => K
+        return ReplaceBool(m.left().Value() < m.right().Value());
+      }
+      if (m.LeftEqualsRight()) return ReplaceBool(false);  // x < x => false
+      break;
+    }
+    case IrOpcode::kUint32LessThanOrEqual: {
+      Uint32BinopMatcher m(node);
+      if (m.left().Is(0)) return ReplaceBool(true);            // 0 <= x => true
+      if (m.right().Is(kMaxUInt32)) return ReplaceBool(true);  // x <= M => true
+      if (m.IsFoldable()) {                                    // K <= K => K
+        return ReplaceBool(m.left().Value() <= m.right().Value());
+      }
+      if (m.LeftEqualsRight()) return ReplaceBool(true);  // x <= x => true
+      break;
+    }
+    case IrOpcode::kFloat64Add: {
+      Float64BinopMatcher m(node);
+      if (m.IsFoldable()) {  // K + K => K
+        return ReplaceFloat64(m.left().Value() + m.right().Value());
+      }
+      break;
+    }
+    case IrOpcode::kFloat64Sub: {
+      Float64BinopMatcher m(node);
+      if (m.IsFoldable()) {  // K - K => K
+        return ReplaceFloat64(m.left().Value() - m.right().Value());
+      }
+      break;
+    }
+    case IrOpcode::kFloat64Mul: {
+      Float64BinopMatcher m(node);
+      if (m.right().Is(1)) return Replace(m.left().node());  // x * 1.0 => x
+      if (m.right().IsNaN()) {                               // x * NaN => NaN
+        return Replace(m.right().node());
+      }
+      if (m.IsFoldable()) {  // K * K => K
+        return ReplaceFloat64(m.left().Value() * m.right().Value());
+      }
+      break;
+    }
+    case IrOpcode::kFloat64Div: {
+      Float64BinopMatcher m(node);
+      if (m.right().Is(1)) return Replace(m.left().node());  // x / 1.0 => x
+      if (m.right().IsNaN()) {                               // x / NaN => NaN
+        return Replace(m.right().node());
+      }
+      if (m.left().IsNaN()) {  // NaN / x => NaN
+        return Replace(m.left().node());
+      }
+      if (m.IsFoldable()) {  // K / K => K
+        return ReplaceFloat64(m.left().Value() / m.right().Value());
+      }
+      break;
+    }
+    case IrOpcode::kFloat64Mod: {
+      Float64BinopMatcher m(node);
+      if (m.right().IsNaN()) {  // x % NaN => NaN
+        return Replace(m.right().node());
+      }
+      if (m.left().IsNaN()) {  // NaN % x => NaN
+        return Replace(m.left().node());
+      }
+      if (m.IsFoldable()) {  // K % K => K
+        return ReplaceFloat64(modulo(m.left().Value(), m.right().Value()));
+      }
+      break;
+    }
+    case IrOpcode::kChangeFloat32ToFloat64: {
+      Float32Matcher m(node->InputAt(0));
+      if (m.HasValue()) return ReplaceFloat64(m.Value());
+      break;
+    }
+    case IrOpcode::kChangeFloat64ToInt32: {
+      Float64Matcher m(node->InputAt(0));
+      if (m.HasValue()) return ReplaceInt32(FastD2I(m.Value()));
+      if (m.IsChangeInt32ToFloat64()) return Replace(m.node()->InputAt(0));
+      break;
+    }
+    case IrOpcode::kChangeFloat64ToUint32: {
+      Float64Matcher m(node->InputAt(0));
+      if (m.HasValue()) return ReplaceInt32(FastD2UI(m.Value()));
+      if (m.IsChangeUint32ToFloat64()) return Replace(m.node()->InputAt(0));
+      break;
+    }
+    case IrOpcode::kChangeInt32ToFloat64: {
+      Int32Matcher m(node->InputAt(0));
+      if (m.HasValue()) return ReplaceFloat64(FastI2D(m.Value()));
+      break;
+    }
+    case IrOpcode::kChangeInt32ToInt64: {
+      Int32Matcher m(node->InputAt(0));
+      if (m.HasValue()) return ReplaceInt64(m.Value());
+      break;
+    }
+    case IrOpcode::kChangeUint32ToFloat64: {
+      Uint32Matcher m(node->InputAt(0));
+      if (m.HasValue()) return ReplaceFloat64(FastUI2D(m.Value()));
+      break;
+    }
+    case IrOpcode::kChangeUint32ToUint64: {
+      Uint32Matcher m(node->InputAt(0));
+      if (m.HasValue()) return ReplaceInt64(static_cast<uint64_t>(m.Value()));
+      break;
+    }
+    case IrOpcode::kTruncateFloat64ToInt32: {
+      Float64Matcher m(node->InputAt(0));
+      if (m.HasValue()) return ReplaceInt32(DoubleToInt32(m.Value()));
+      if (m.IsChangeInt32ToFloat64()) return Replace(m.node()->InputAt(0));
+      break;
+    }
+    case IrOpcode::kTruncateInt64ToInt32: {
+      Int64Matcher m(node->InputAt(0));
+      if (m.HasValue()) return ReplaceInt32(static_cast<int32_t>(m.Value()));
+      if (m.IsChangeInt32ToInt64()) return Replace(m.node()->InputAt(0));
+      break;
+    }
+    case IrOpcode::kTruncateFloat64ToFloat32: {
+      Float64Matcher m(node->InputAt(0));
+      if (m.HasValue()) return ReplaceFloat32(DoubleToFloat32(m.Value()));
+      if (m.IsChangeFloat32ToFloat64()) return Replace(m.node()->InputAt(0));
+      break;
+    }
+    default:
+      break;
+  }
+  return NoChange();
+}
+
+
+Reduction MachineOperatorReducer::ReduceProjection(size_t index, Node* node) {
+  switch (node->opcode()) {
+    case IrOpcode::kInt32AddWithOverflow: {
+      DCHECK(index == 0 || index == 1);
+      Int32BinopMatcher m(node);
+      if (m.IsFoldable()) {
+        int32_t val;
+        bool ovf = base::bits::SignedAddOverflow32(m.left().Value(),
+                                                   m.right().Value(), &val);
+        return ReplaceInt32((index == 0) ? val : ovf);
+      }
+      if (m.right().Is(0)) {
+        return (index == 0) ? Replace(m.left().node()) : ReplaceInt32(0);
+      }
+      break;
+    }
+    case IrOpcode::kInt32SubWithOverflow: {
+      DCHECK(index == 0 || index == 1);
+      Int32BinopMatcher m(node);
+      if (m.IsFoldable()) {
+        int32_t val;
+        bool ovf = base::bits::SignedSubOverflow32(m.left().Value(),
+                                                   m.right().Value(), &val);
+        return ReplaceInt32((index == 0) ? val : ovf);
+      }
+      if (m.right().Is(0)) {
+        return (index == 0) ? Replace(m.left().node()) : ReplaceInt32(0);
+      }
+      break;
+    }
+    default:
+      break;
+  }
+  return NoChange();
+}
+
+
+CommonOperatorBuilder* MachineOperatorReducer::common() const {
+  return jsgraph()->common();
+}
+
+
+MachineOperatorBuilder* MachineOperatorReducer::machine() const {
+  return jsgraph()->machine();
+}
+
+
+Graph* MachineOperatorReducer::graph() const { return jsgraph()->graph(); }
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/machine-operator-reducer.h b/src/compiler/machine-operator-reducer.h
new file mode 100644
index 0000000..c79ceae
--- /dev/null
+++ b/src/compiler/machine-operator-reducer.h
@@ -0,0 +1,63 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_MACHINE_OPERATOR_REDUCER_H_
+#define V8_COMPILER_MACHINE_OPERATOR_REDUCER_H_
+
+#include "src/compiler/graph-reducer.h"
+#include "src/compiler/machine-operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Forward declarations.
+class CommonOperatorBuilder;
+class JSGraph;
+
+
+// Performs constant folding and strength reduction on nodes that have
+// machine operators.
+class MachineOperatorReducer FINAL : public Reducer {
+ public:
+  explicit MachineOperatorReducer(JSGraph* jsgraph);
+  ~MachineOperatorReducer();
+
+  virtual Reduction Reduce(Node* node) OVERRIDE;
+
+ private:
+  Node* Float32Constant(volatile float value);
+  Node* Float64Constant(volatile double value);
+  Node* Int32Constant(int32_t value);
+  Node* Int64Constant(int64_t value);
+
+  Reduction ReplaceBool(bool value) { return ReplaceInt32(value ? 1 : 0); }
+  Reduction ReplaceFloat32(volatile float value) {
+    return Replace(Float32Constant(value));
+  }
+  Reduction ReplaceFloat64(volatile double value) {
+    return Replace(Float64Constant(value));
+  }
+  Reduction ReplaceInt32(int32_t value) {
+    return Replace(Int32Constant(value));
+  }
+  Reduction ReplaceInt64(int64_t value) {
+    return Replace(Int64Constant(value));
+  }
+
+  Reduction ReduceProjection(size_t index, Node* node);
+
+  Graph* graph() const;
+  JSGraph* jsgraph() const { return jsgraph_; }
+  CommonOperatorBuilder* common() const;
+  MachineOperatorBuilder* machine() const;
+
+  JSGraph* jsgraph_;
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_MACHINE_OPERATOR_REDUCER_H_
diff --git a/src/compiler/machine-operator-unittest.cc b/src/compiler/machine-operator-unittest.cc
new file mode 100644
index 0000000..cb93ce7
--- /dev/null
+++ b/src/compiler/machine-operator-unittest.cc
@@ -0,0 +1,325 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/operator-properties-inl.h"
+#include "testing/gtest-support.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#if GTEST_HAS_COMBINE
+
+// TODO(bmeurer): Find a new home for these.
+inline std::ostream& operator<<(std::ostream& os, const MachineType& type) {
+  OStringStream ost;
+  ost << type;
+  return os << ost.c_str();
+}
+inline std::ostream& operator<<(std::ostream& os,
+                         const WriteBarrierKind& write_barrier_kind) {
+  OStringStream ost;
+  ost << write_barrier_kind;
+  return os << ost.c_str();
+}
+
+
+template <typename T>
+class MachineOperatorTestWithParam
+    : public ::testing::TestWithParam< ::testing::tuple<MachineType, T> > {
+ protected:
+  MachineType type() const { return ::testing::get<0>(B::GetParam()); }
+  const T& GetParam() const { return ::testing::get<1>(B::GetParam()); }
+
+ private:
+  typedef ::testing::TestWithParam< ::testing::tuple<MachineType, T> > B;
+};
+
+
+namespace {
+
+const MachineType kMachineReps[] = {kRepWord32, kRepWord64};
+
+
+const MachineType kMachineTypes[] = {
+    kMachFloat32, kMachFloat64,   kMachInt8,   kMachUint8,  kMachInt16,
+    kMachUint16,  kMachInt32,     kMachUint32, kMachInt64,  kMachUint64,
+    kMachPtr,     kMachAnyTagged, kRepBit,     kRepWord8,   kRepWord16,
+    kRepWord32,   kRepWord64,     kRepFloat32, kRepFloat64, kRepTagged};
+
+}  // namespace
+
+
+// -----------------------------------------------------------------------------
+// Load operator.
+
+
+typedef MachineOperatorTestWithParam<LoadRepresentation>
+    MachineLoadOperatorTest;
+
+
+TEST_P(MachineLoadOperatorTest, InstancesAreGloballyShared) {
+  MachineOperatorBuilder machine1(type());
+  MachineOperatorBuilder machine2(type());
+  EXPECT_EQ(machine1.Load(GetParam()), machine2.Load(GetParam()));
+}
+
+
+TEST_P(MachineLoadOperatorTest, NumberOfInputsAndOutputs) {
+  MachineOperatorBuilder machine(type());
+  const Operator* op = machine.Load(GetParam());
+
+  EXPECT_EQ(2, OperatorProperties::GetValueInputCount(op));
+  EXPECT_EQ(1, OperatorProperties::GetEffectInputCount(op));
+  EXPECT_EQ(0, OperatorProperties::GetControlInputCount(op));
+  EXPECT_EQ(3, OperatorProperties::GetTotalInputCount(op));
+
+  EXPECT_EQ(1, OperatorProperties::GetValueOutputCount(op));
+  EXPECT_EQ(1, OperatorProperties::GetEffectOutputCount(op));
+  EXPECT_EQ(0, OperatorProperties::GetControlOutputCount(op));
+}
+
+
+TEST_P(MachineLoadOperatorTest, OpcodeIsCorrect) {
+  MachineOperatorBuilder machine(type());
+  EXPECT_EQ(IrOpcode::kLoad, machine.Load(GetParam())->opcode());
+}
+
+
+TEST_P(MachineLoadOperatorTest, ParameterIsCorrect) {
+  MachineOperatorBuilder machine(type());
+  EXPECT_EQ(GetParam(),
+            OpParameter<LoadRepresentation>(machine.Load(GetParam())));
+}
+
+
+INSTANTIATE_TEST_CASE_P(MachineOperatorTest, MachineLoadOperatorTest,
+                        ::testing::Combine(::testing::ValuesIn(kMachineReps),
+                                           ::testing::ValuesIn(kMachineTypes)));
+
+
+// -----------------------------------------------------------------------------
+// Store operator.
+
+
+class MachineStoreOperatorTest
+    : public MachineOperatorTestWithParam<
+          ::testing::tuple<MachineType, WriteBarrierKind> > {
+ protected:
+  StoreRepresentation GetParam() const {
+    return StoreRepresentation(
+        ::testing::get<0>(MachineOperatorTestWithParam<
+            ::testing::tuple<MachineType, WriteBarrierKind> >::GetParam()),
+        ::testing::get<1>(MachineOperatorTestWithParam<
+            ::testing::tuple<MachineType, WriteBarrierKind> >::GetParam()));
+  }
+};
+
+
+TEST_P(MachineStoreOperatorTest, InstancesAreGloballyShared) {
+  MachineOperatorBuilder machine1(type());
+  MachineOperatorBuilder machine2(type());
+  EXPECT_EQ(machine1.Store(GetParam()), machine2.Store(GetParam()));
+}
+
+
+TEST_P(MachineStoreOperatorTest, NumberOfInputsAndOutputs) {
+  MachineOperatorBuilder machine(type());
+  const Operator* op = machine.Store(GetParam());
+
+  EXPECT_EQ(3, OperatorProperties::GetValueInputCount(op));
+  EXPECT_EQ(1, OperatorProperties::GetEffectInputCount(op));
+  EXPECT_EQ(1, OperatorProperties::GetControlInputCount(op));
+  EXPECT_EQ(5, OperatorProperties::GetTotalInputCount(op));
+
+  EXPECT_EQ(0, OperatorProperties::GetValueOutputCount(op));
+  EXPECT_EQ(1, OperatorProperties::GetEffectOutputCount(op));
+  EXPECT_EQ(0, OperatorProperties::GetControlOutputCount(op));
+}
+
+
+TEST_P(MachineStoreOperatorTest, OpcodeIsCorrect) {
+  MachineOperatorBuilder machine(type());
+  EXPECT_EQ(IrOpcode::kStore, machine.Store(GetParam())->opcode());
+}
+
+
+TEST_P(MachineStoreOperatorTest, ParameterIsCorrect) {
+  MachineOperatorBuilder machine(type());
+  EXPECT_EQ(GetParam(),
+            OpParameter<StoreRepresentation>(machine.Store(GetParam())));
+}
+
+
+INSTANTIATE_TEST_CASE_P(
+    MachineOperatorTest, MachineStoreOperatorTest,
+    ::testing::Combine(
+        ::testing::ValuesIn(kMachineReps),
+        ::testing::Combine(::testing::ValuesIn(kMachineTypes),
+                           ::testing::Values(kNoWriteBarrier,
+                                             kFullWriteBarrier))));
+
+
+// -----------------------------------------------------------------------------
+// Pure operators.
+
+
+namespace {
+
+struct PureOperator {
+  const Operator* (MachineOperatorBuilder::*constructor)();
+  IrOpcode::Value opcode;
+  int value_input_count;
+  int value_output_count;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const PureOperator& pop) {
+  return os << IrOpcode::Mnemonic(pop.opcode);
+}
+
+
+const PureOperator kPureOperators[] = {
+#define PURE(Name, input_count, output_count)                      \
+  {                                                                \
+    &MachineOperatorBuilder::Name, IrOpcode::k##Name, input_count, \
+        output_count                                               \
+  }
+    PURE(Word32And, 2, 1),                PURE(Word32Or, 2, 1),
+    PURE(Word32Xor, 2, 1),                PURE(Word32Shl, 2, 1),
+    PURE(Word32Shr, 2, 1),                PURE(Word32Sar, 2, 1),
+    PURE(Word32Ror, 2, 1),                PURE(Word32Equal, 2, 1),
+    PURE(Word64And, 2, 1),                PURE(Word64Or, 2, 1),
+    PURE(Word64Xor, 2, 1),                PURE(Word64Shl, 2, 1),
+    PURE(Word64Shr, 2, 1),                PURE(Word64Sar, 2, 1),
+    PURE(Word64Ror, 2, 1),                PURE(Word64Equal, 2, 1),
+    PURE(Int32Add, 2, 1),                 PURE(Int32AddWithOverflow, 2, 2),
+    PURE(Int32Sub, 2, 1),                 PURE(Int32SubWithOverflow, 2, 2),
+    PURE(Int32Mul, 2, 1),                 PURE(Int32Div, 2, 1),
+    PURE(Int32UDiv, 2, 1),                PURE(Int32Mod, 2, 1),
+    PURE(Int32UMod, 2, 1),                PURE(Int32LessThan, 2, 1),
+    PURE(Int32LessThanOrEqual, 2, 1),     PURE(Uint32LessThan, 2, 1),
+    PURE(Uint32LessThanOrEqual, 2, 1),    PURE(Int64Add, 2, 1),
+    PURE(Int64Sub, 2, 1),                 PURE(Int64Mul, 2, 1),
+    PURE(Int64Div, 2, 1),                 PURE(Int64UDiv, 2, 1),
+    PURE(Int64Mod, 2, 1),                 PURE(Int64UMod, 2, 1),
+    PURE(Int64LessThan, 2, 1),            PURE(Int64LessThanOrEqual, 2, 1),
+    PURE(ChangeFloat32ToFloat64, 1, 1),   PURE(ChangeFloat64ToInt32, 1, 1),
+    PURE(ChangeFloat64ToUint32, 1, 1),    PURE(ChangeInt32ToInt64, 1, 1),
+    PURE(ChangeUint32ToFloat64, 1, 1),    PURE(ChangeUint32ToUint64, 1, 1),
+    PURE(TruncateFloat64ToFloat32, 1, 1), PURE(TruncateFloat64ToInt32, 1, 1),
+    PURE(TruncateInt64ToInt32, 1, 1),     PURE(Float64Add, 2, 1),
+    PURE(Float64Sub, 2, 1),               PURE(Float64Mul, 2, 1),
+    PURE(Float64Div, 2, 1),               PURE(Float64Mod, 2, 1),
+    PURE(Float64Sqrt, 1, 1),              PURE(Float64Equal, 2, 1),
+    PURE(Float64LessThan, 2, 1),          PURE(Float64LessThanOrEqual, 2, 1)
+#undef PURE
+};
+
+
+typedef MachineOperatorTestWithParam<PureOperator> MachinePureOperatorTest;
+
+}  // namespace
+
+
+TEST_P(MachinePureOperatorTest, InstancesAreGloballyShared) {
+  const PureOperator& pop = GetParam();
+  MachineOperatorBuilder machine1(type());
+  MachineOperatorBuilder machine2(type());
+  EXPECT_EQ((machine1.*pop.constructor)(), (machine2.*pop.constructor)());
+}
+
+
+TEST_P(MachinePureOperatorTest, NumberOfInputsAndOutputs) {
+  MachineOperatorBuilder machine(type());
+  const PureOperator& pop = GetParam();
+  const Operator* op = (machine.*pop.constructor)();
+
+  EXPECT_EQ(pop.value_input_count, OperatorProperties::GetValueInputCount(op));
+  EXPECT_EQ(0, OperatorProperties::GetEffectInputCount(op));
+  EXPECT_EQ(0, OperatorProperties::GetControlInputCount(op));
+  EXPECT_EQ(pop.value_input_count, OperatorProperties::GetTotalInputCount(op));
+
+  EXPECT_EQ(pop.value_output_count,
+            OperatorProperties::GetValueOutputCount(op));
+  EXPECT_EQ(0, OperatorProperties::GetEffectOutputCount(op));
+  EXPECT_EQ(0, OperatorProperties::GetControlOutputCount(op));
+}
+
+
+TEST_P(MachinePureOperatorTest, MarkedAsPure) {
+  MachineOperatorBuilder machine(type());
+  const PureOperator& pop = GetParam();
+  const Operator* op = (machine.*pop.constructor)();
+  EXPECT_TRUE(op->HasProperty(Operator::kPure));
+}
+
+
+TEST_P(MachinePureOperatorTest, OpcodeIsCorrect) {
+  MachineOperatorBuilder machine(type());
+  const PureOperator& pop = GetParam();
+  const Operator* op = (machine.*pop.constructor)();
+  EXPECT_EQ(pop.opcode, op->opcode());
+}
+
+
+INSTANTIATE_TEST_CASE_P(
+    MachineOperatorTest, MachinePureOperatorTest,
+    ::testing::Combine(::testing::ValuesIn(kMachineReps),
+                       ::testing::ValuesIn(kPureOperators)));
+
+#endif  // GTEST_HAS_COMBINE
+
+
+// -----------------------------------------------------------------------------
+// Pseudo operators.
+
+
+TEST(MachineOperatorTest, PseudoOperatorsWhenWordSizeIs32Bit) {
+  MachineOperatorBuilder machine(kRepWord32);
+  EXPECT_EQ(machine.Word32And(), machine.WordAnd());
+  EXPECT_EQ(machine.Word32Or(), machine.WordOr());
+  EXPECT_EQ(machine.Word32Xor(), machine.WordXor());
+  EXPECT_EQ(machine.Word32Shl(), machine.WordShl());
+  EXPECT_EQ(machine.Word32Shr(), machine.WordShr());
+  EXPECT_EQ(machine.Word32Sar(), machine.WordSar());
+  EXPECT_EQ(machine.Word32Ror(), machine.WordRor());
+  EXPECT_EQ(machine.Word32Equal(), machine.WordEqual());
+  EXPECT_EQ(machine.Int32Add(), machine.IntAdd());
+  EXPECT_EQ(machine.Int32Sub(), machine.IntSub());
+  EXPECT_EQ(machine.Int32Mul(), machine.IntMul());
+  EXPECT_EQ(machine.Int32Div(), machine.IntDiv());
+  EXPECT_EQ(machine.Int32UDiv(), machine.IntUDiv());
+  EXPECT_EQ(machine.Int32Mod(), machine.IntMod());
+  EXPECT_EQ(machine.Int32UMod(), machine.IntUMod());
+  EXPECT_EQ(machine.Int32LessThan(), machine.IntLessThan());
+  EXPECT_EQ(machine.Int32LessThanOrEqual(), machine.IntLessThanOrEqual());
+}
+
+
+TEST(MachineOperatorTest, PseudoOperatorsWhenWordSizeIs64Bit) {
+  MachineOperatorBuilder machine(kRepWord64);
+  EXPECT_EQ(machine.Word64And(), machine.WordAnd());
+  EXPECT_EQ(machine.Word64Or(), machine.WordOr());
+  EXPECT_EQ(machine.Word64Xor(), machine.WordXor());
+  EXPECT_EQ(machine.Word64Shl(), machine.WordShl());
+  EXPECT_EQ(machine.Word64Shr(), machine.WordShr());
+  EXPECT_EQ(machine.Word64Sar(), machine.WordSar());
+  EXPECT_EQ(machine.Word64Ror(), machine.WordRor());
+  EXPECT_EQ(machine.Word64Equal(), machine.WordEqual());
+  EXPECT_EQ(machine.Int64Add(), machine.IntAdd());
+  EXPECT_EQ(machine.Int64Sub(), machine.IntSub());
+  EXPECT_EQ(machine.Int64Mul(), machine.IntMul());
+  EXPECT_EQ(machine.Int64Div(), machine.IntDiv());
+  EXPECT_EQ(machine.Int64UDiv(), machine.IntUDiv());
+  EXPECT_EQ(machine.Int64Mod(), machine.IntMod());
+  EXPECT_EQ(machine.Int64UMod(), machine.IntUMod());
+  EXPECT_EQ(machine.Int64LessThan(), machine.IntLessThan());
+  EXPECT_EQ(machine.Int64LessThanOrEqual(), machine.IntLessThanOrEqual());
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/machine-operator.cc b/src/compiler/machine-operator.cc
new file mode 100644
index 0000000..2f30bd2
--- /dev/null
+++ b/src/compiler/machine-operator.cc
@@ -0,0 +1,244 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/machine-operator.h"
+
+#include "src/base/lazy-instance.h"
+#include "src/compiler/opcodes.h"
+#include "src/compiler/operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+OStream& operator<<(OStream& os, const WriteBarrierKind& write_barrier_kind) {
+  switch (write_barrier_kind) {
+    case kNoWriteBarrier:
+      return os << "NoWriteBarrier";
+    case kFullWriteBarrier:
+      return os << "FullWriteBarrier";
+  }
+  UNREACHABLE();
+  return os;
+}
+
+
+OStream& operator<<(OStream& os, const StoreRepresentation& rep) {
+  return os << "(" << rep.machine_type() << " : " << rep.write_barrier_kind()
+            << ")";
+}
+
+
+template <>
+struct StaticParameterTraits<StoreRepresentation> {
+  static OStream& PrintTo(OStream& os, const StoreRepresentation& rep) {
+    return os << rep;
+  }
+  static int HashCode(const StoreRepresentation& rep) {
+    return rep.machine_type() + rep.write_barrier_kind();
+  }
+  static bool Equals(const StoreRepresentation& rep1,
+                     const StoreRepresentation& rep2) {
+    return rep1 == rep2;
+  }
+};
+
+
+template <>
+struct StaticParameterTraits<LoadRepresentation> {
+  static OStream& PrintTo(OStream& os, LoadRepresentation type) {  // NOLINT
+    return os << type;
+  }
+  static int HashCode(LoadRepresentation type) { return type; }
+  static bool Equals(LoadRepresentation lhs, LoadRepresentation rhs) {
+    return lhs == rhs;
+  }
+};
+
+
+#define PURE_OP_LIST(V)                                                       \
+  V(Word32And, Operator::kAssociative | Operator::kCommutative, 2, 1)         \
+  V(Word32Or, Operator::kAssociative | Operator::kCommutative, 2, 1)          \
+  V(Word32Xor, Operator::kAssociative | Operator::kCommutative, 2, 1)         \
+  V(Word32Shl, Operator::kNoProperties, 2, 1)                                 \
+  V(Word32Shr, Operator::kNoProperties, 2, 1)                                 \
+  V(Word32Sar, Operator::kNoProperties, 2, 1)                                 \
+  V(Word32Ror, Operator::kNoProperties, 2, 1)                                 \
+  V(Word32Equal, Operator::kCommutative, 2, 1)                                \
+  V(Word64And, Operator::kAssociative | Operator::kCommutative, 2, 1)         \
+  V(Word64Or, Operator::kAssociative | Operator::kCommutative, 2, 1)          \
+  V(Word64Xor, Operator::kAssociative | Operator::kCommutative, 2, 1)         \
+  V(Word64Shl, Operator::kNoProperties, 2, 1)                                 \
+  V(Word64Shr, Operator::kNoProperties, 2, 1)                                 \
+  V(Word64Sar, Operator::kNoProperties, 2, 1)                                 \
+  V(Word64Ror, Operator::kNoProperties, 2, 1)                                 \
+  V(Word64Equal, Operator::kCommutative, 2, 1)                                \
+  V(Int32Add, Operator::kAssociative | Operator::kCommutative, 2, 1)          \
+  V(Int32AddWithOverflow, Operator::kAssociative | Operator::kCommutative, 2, \
+    2)                                                                        \
+  V(Int32Sub, Operator::kNoProperties, 2, 1)                                  \
+  V(Int32SubWithOverflow, Operator::kNoProperties, 2, 2)                      \
+  V(Int32Mul, Operator::kAssociative | Operator::kCommutative, 2, 1)          \
+  V(Int32Div, Operator::kNoProperties, 2, 1)                                  \
+  V(Int32UDiv, Operator::kNoProperties, 2, 1)                                 \
+  V(Int32Mod, Operator::kNoProperties, 2, 1)                                  \
+  V(Int32UMod, Operator::kNoProperties, 2, 1)                                 \
+  V(Int32LessThan, Operator::kNoProperties, 2, 1)                             \
+  V(Int32LessThanOrEqual, Operator::kNoProperties, 2, 1)                      \
+  V(Uint32LessThan, Operator::kNoProperties, 2, 1)                            \
+  V(Uint32LessThanOrEqual, Operator::kNoProperties, 2, 1)                     \
+  V(Int64Add, Operator::kAssociative | Operator::kCommutative, 2, 1)          \
+  V(Int64Sub, Operator::kNoProperties, 2, 1)                                  \
+  V(Int64Mul, Operator::kAssociative | Operator::kCommutative, 2, 1)          \
+  V(Int64Div, Operator::kNoProperties, 2, 1)                                  \
+  V(Int64UDiv, Operator::kNoProperties, 2, 1)                                 \
+  V(Int64Mod, Operator::kNoProperties, 2, 1)                                  \
+  V(Int64UMod, Operator::kNoProperties, 2, 1)                                 \
+  V(Int64LessThan, Operator::kNoProperties, 2, 1)                             \
+  V(Int64LessThanOrEqual, Operator::kNoProperties, 2, 1)                      \
+  V(ChangeFloat32ToFloat64, Operator::kNoProperties, 1, 1)                    \
+  V(ChangeFloat64ToInt32, Operator::kNoProperties, 1, 1)                      \
+  V(ChangeFloat64ToUint32, Operator::kNoProperties, 1, 1)                     \
+  V(ChangeInt32ToFloat64, Operator::kNoProperties, 1, 1)                      \
+  V(ChangeInt32ToInt64, Operator::kNoProperties, 1, 1)                        \
+  V(ChangeUint32ToFloat64, Operator::kNoProperties, 1, 1)                     \
+  V(ChangeUint32ToUint64, Operator::kNoProperties, 1, 1)                      \
+  V(TruncateFloat64ToFloat32, Operator::kNoProperties, 1, 1)                  \
+  V(TruncateFloat64ToInt32, Operator::kNoProperties, 1, 1)                    \
+  V(TruncateInt64ToInt32, Operator::kNoProperties, 1, 1)                      \
+  V(Float64Add, Operator::kCommutative, 2, 1)                                 \
+  V(Float64Sub, Operator::kNoProperties, 2, 1)                                \
+  V(Float64Mul, Operator::kCommutative, 2, 1)                                 \
+  V(Float64Div, Operator::kNoProperties, 2, 1)                                \
+  V(Float64Mod, Operator::kNoProperties, 2, 1)                                \
+  V(Float64Sqrt, Operator::kNoProperties, 1, 1)                               \
+  V(Float64Equal, Operator::kCommutative, 2, 1)                               \
+  V(Float64LessThan, Operator::kNoProperties, 2, 1)                           \
+  V(Float64LessThanOrEqual, Operator::kNoProperties, 2, 1)
+
+
+#define MACHINE_TYPE_LIST(V) \
+  V(MachFloat32)             \
+  V(MachFloat64)             \
+  V(MachInt8)                \
+  V(MachUint8)               \
+  V(MachInt16)               \
+  V(MachUint16)              \
+  V(MachInt32)               \
+  V(MachUint32)              \
+  V(MachInt64)               \
+  V(MachUint64)              \
+  V(MachAnyTagged)           \
+  V(RepBit)                  \
+  V(RepWord8)                \
+  V(RepWord16)               \
+  V(RepWord32)               \
+  V(RepWord64)               \
+  V(RepFloat32)              \
+  V(RepFloat64)              \
+  V(RepTagged)
+
+
+struct MachineOperatorBuilderImpl {
+#define PURE(Name, properties, input_count, output_count)                 \
+  struct Name##Operator FINAL : public SimpleOperator {                   \
+    Name##Operator()                                                      \
+        : SimpleOperator(IrOpcode::k##Name, Operator::kPure | properties, \
+                         input_count, output_count, #Name) {}             \
+  };                                                                      \
+  Name##Operator k##Name;
+  PURE_OP_LIST(PURE)
+#undef PURE
+
+#define LOAD(Type)                                                            \
+  struct Load##Type##Operator FINAL : public Operator1<LoadRepresentation> {  \
+    Load##Type##Operator()                                                    \
+        : Operator1<LoadRepresentation>(                                      \
+              IrOpcode::kLoad, Operator::kNoThrow | Operator::kNoWrite, 2, 1, \
+              "Load", k##Type) {}                                             \
+  };                                                                          \
+  Load##Type##Operator k##Load##Type;
+  MACHINE_TYPE_LIST(LOAD)
+#undef LOAD
+
+#define STORE(Type)                                                           \
+  struct Store##Type##Operator : public Operator1<StoreRepresentation> {      \
+    explicit Store##Type##Operator(WriteBarrierKind write_barrier_kind)       \
+        : Operator1<StoreRepresentation>(                                     \
+              IrOpcode::kStore, Operator::kNoRead | Operator::kNoThrow, 3, 0, \
+              "Store", StoreRepresentation(k##Type, write_barrier_kind)) {}   \
+  };                                                                          \
+  struct Store##Type##NoWriteBarrier##Operator FINAL                          \
+      : public Store##Type##Operator {                                        \
+    Store##Type##NoWriteBarrier##Operator()                                   \
+        : Store##Type##Operator(kNoWriteBarrier) {}                           \
+  };                                                                          \
+  struct Store##Type##FullWriteBarrier##Operator FINAL                        \
+      : public Store##Type##Operator {                                        \
+    Store##Type##FullWriteBarrier##Operator()                                 \
+        : Store##Type##Operator(kFullWriteBarrier) {}                         \
+  };                                                                          \
+  Store##Type##NoWriteBarrier##Operator k##Store##Type##NoWriteBarrier;       \
+  Store##Type##FullWriteBarrier##Operator k##Store##Type##FullWriteBarrier;
+  MACHINE_TYPE_LIST(STORE)
+#undef STORE
+};
+
+
+static base::LazyInstance<MachineOperatorBuilderImpl>::type kImpl =
+    LAZY_INSTANCE_INITIALIZER;
+
+
+MachineOperatorBuilder::MachineOperatorBuilder(MachineType word)
+    : impl_(kImpl.Get()), word_(word) {
+  DCHECK(word == kRepWord32 || word == kRepWord64);
+}
+
+
+#define PURE(Name, properties, input_count, output_count) \
+  const Operator* MachineOperatorBuilder::Name() { return &impl_.k##Name; }
+PURE_OP_LIST(PURE)
+#undef PURE
+
+
+const Operator* MachineOperatorBuilder::Load(LoadRepresentation rep) {
+  switch (rep) {
+#define LOAD(Type) \
+  case k##Type:    \
+    return &impl_.k##Load##Type;
+    MACHINE_TYPE_LIST(LOAD)
+#undef LOAD
+
+    default:
+      break;
+  }
+  UNREACHABLE();
+  return NULL;
+}
+
+
+const Operator* MachineOperatorBuilder::Store(StoreRepresentation rep) {
+  switch (rep.machine_type()) {
+#define STORE(Type)                                     \
+  case k##Type:                                         \
+    switch (rep.write_barrier_kind()) {                 \
+      case kNoWriteBarrier:                             \
+        return &impl_.k##Store##Type##NoWriteBarrier;   \
+      case kFullWriteBarrier:                           \
+        return &impl_.k##Store##Type##FullWriteBarrier; \
+    }                                                   \
+    break;
+    MACHINE_TYPE_LIST(STORE)
+#undef STORE
+
+    default:
+      break;
+  }
+  UNREACHABLE();
+  return NULL;
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/machine-operator.h b/src/compiler/machine-operator.h
new file mode 100644
index 0000000..92c8ac4
--- /dev/null
+++ b/src/compiler/machine-operator.h
@@ -0,0 +1,187 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_MACHINE_OPERATOR_H_
+#define V8_COMPILER_MACHINE_OPERATOR_H_
+
+#include "src/compiler/machine-type.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Forward declarations.
+struct MachineOperatorBuilderImpl;
+class Operator;
+
+
+// Supported write barrier modes.
+enum WriteBarrierKind { kNoWriteBarrier, kFullWriteBarrier };
+
+OStream& operator<<(OStream& os, const WriteBarrierKind& write_barrier_kind);
+
+
+typedef MachineType LoadRepresentation;
+
+
+// A Store needs a MachineType and a WriteBarrierKind
+// in order to emit the correct write barrier.
+class StoreRepresentation FINAL {
+ public:
+  StoreRepresentation(MachineType machine_type,
+                      WriteBarrierKind write_barrier_kind)
+      : machine_type_(machine_type), write_barrier_kind_(write_barrier_kind) {}
+
+  MachineType machine_type() const { return machine_type_; }
+  WriteBarrierKind write_barrier_kind() const { return write_barrier_kind_; }
+
+ private:
+  MachineType machine_type_;
+  WriteBarrierKind write_barrier_kind_;
+};
+
+inline bool operator==(const StoreRepresentation& rep1,
+                       const StoreRepresentation& rep2) {
+  return rep1.machine_type() == rep2.machine_type() &&
+         rep1.write_barrier_kind() == rep2.write_barrier_kind();
+}
+
+inline bool operator!=(const StoreRepresentation& rep1,
+                       const StoreRepresentation& rep2) {
+  return !(rep1 == rep2);
+}
+
+OStream& operator<<(OStream& os, const StoreRepresentation& rep);
+
+
+// Interface for building machine-level operators. These operators are
+// machine-level but machine-independent and thus define a language suitable
+// for generating code to run on architectures such as ia32, x64, arm, etc.
+class MachineOperatorBuilder FINAL {
+ public:
+  explicit MachineOperatorBuilder(MachineType word = kMachPtr);
+
+  const Operator* Word32And();
+  const Operator* Word32Or();
+  const Operator* Word32Xor();
+  const Operator* Word32Shl();
+  const Operator* Word32Shr();
+  const Operator* Word32Sar();
+  const Operator* Word32Ror();
+  const Operator* Word32Equal();
+
+  const Operator* Word64And();
+  const Operator* Word64Or();
+  const Operator* Word64Xor();
+  const Operator* Word64Shl();
+  const Operator* Word64Shr();
+  const Operator* Word64Sar();
+  const Operator* Word64Ror();
+  const Operator* Word64Equal();
+
+  const Operator* Int32Add();
+  const Operator* Int32AddWithOverflow();
+  const Operator* Int32Sub();
+  const Operator* Int32SubWithOverflow();
+  const Operator* Int32Mul();
+  const Operator* Int32Div();
+  const Operator* Int32UDiv();
+  const Operator* Int32Mod();
+  const Operator* Int32UMod();
+  const Operator* Int32LessThan();
+  const Operator* Int32LessThanOrEqual();
+  const Operator* Uint32LessThan();
+  const Operator* Uint32LessThanOrEqual();
+
+  const Operator* Int64Add();
+  const Operator* Int64Sub();
+  const Operator* Int64Mul();
+  const Operator* Int64Div();
+  const Operator* Int64UDiv();
+  const Operator* Int64Mod();
+  const Operator* Int64UMod();
+  const Operator* Int64LessThan();
+  const Operator* Int64LessThanOrEqual();
+
+  // These operators change the representation of numbers while preserving the
+  // value of the number. Narrowing operators assume the input is representable
+  // in the target type and are *not* defined for other inputs.
+  // Use narrowing change operators only when there is a static guarantee that
+  // the input value is representable in the target value.
+  const Operator* ChangeFloat32ToFloat64();
+  const Operator* ChangeFloat64ToInt32();   // narrowing
+  const Operator* ChangeFloat64ToUint32();  // narrowing
+  const Operator* ChangeInt32ToFloat64();
+  const Operator* ChangeInt32ToInt64();
+  const Operator* ChangeUint32ToFloat64();
+  const Operator* ChangeUint32ToUint64();
+
+  // These operators truncate numbers, both changing the representation of
+  // the number and mapping multiple input values onto the same output value.
+  const Operator* TruncateFloat64ToFloat32();
+  const Operator* TruncateFloat64ToInt32();  // JavaScript semantics.
+  const Operator* TruncateInt64ToInt32();
+
+  // Floating point operators always operate with IEEE 754 round-to-nearest.
+  const Operator* Float64Add();
+  const Operator* Float64Sub();
+  const Operator* Float64Mul();
+  const Operator* Float64Div();
+  const Operator* Float64Mod();
+  const Operator* Float64Sqrt();
+
+  // Floating point comparisons complying to IEEE 754.
+  const Operator* Float64Equal();
+  const Operator* Float64LessThan();
+  const Operator* Float64LessThanOrEqual();
+
+  // load [base + index]
+  const Operator* Load(LoadRepresentation rep);
+
+  // store [base + index], value
+  const Operator* Store(StoreRepresentation rep);
+
+  // Target machine word-size assumed by this builder.
+  bool Is32() const { return word() == kRepWord32; }
+  bool Is64() const { return word() == kRepWord64; }
+  MachineType word() const { return word_; }
+
+// Pseudo operators that translate to 32/64-bit operators depending on the
+// word-size of the target machine assumed by this builder.
+#define PSEUDO_OP_LIST(V) \
+  V(Word, And)            \
+  V(Word, Or)             \
+  V(Word, Xor)            \
+  V(Word, Shl)            \
+  V(Word, Shr)            \
+  V(Word, Sar)            \
+  V(Word, Ror)            \
+  V(Word, Equal)          \
+  V(Int, Add)             \
+  V(Int, Sub)             \
+  V(Int, Mul)             \
+  V(Int, Div)             \
+  V(Int, UDiv)            \
+  V(Int, Mod)             \
+  V(Int, UMod)            \
+  V(Int, LessThan)        \
+  V(Int, LessThanOrEqual)
+#define PSEUDO_OP(Prefix, Suffix)                                \
+  const Operator* Prefix##Suffix() {                             \
+    return Is32() ? Prefix##32##Suffix() : Prefix##64##Suffix(); \
+  }
+  PSEUDO_OP_LIST(PSEUDO_OP)
+#undef PSEUDO_OP
+#undef PSEUDO_OP_LIST
+
+ private:
+  const MachineOperatorBuilderImpl& impl_;
+  const MachineType word_;
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_MACHINE_OPERATOR_H_
diff --git a/src/compiler/machine-type.cc b/src/compiler/machine-type.cc
new file mode 100644
index 0000000..94aa124
--- /dev/null
+++ b/src/compiler/machine-type.cc
@@ -0,0 +1,46 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/machine-type.h"
+#include "src/ostreams.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define PRINT(bit)         \
+  if (type & bit) {        \
+    if (before) os << "|"; \
+    os << #bit;            \
+    before = true;         \
+  }
+
+
+OStream& operator<<(OStream& os, const MachineType& type) {
+  bool before = false;
+  PRINT(kRepBit);
+  PRINT(kRepWord8);
+  PRINT(kRepWord16);
+  PRINT(kRepWord32);
+  PRINT(kRepWord64);
+  PRINT(kRepFloat32);
+  PRINT(kRepFloat64);
+  PRINT(kRepTagged);
+
+  PRINT(kTypeBool);
+  PRINT(kTypeInt32);
+  PRINT(kTypeUint32);
+  PRINT(kTypeInt64);
+  PRINT(kTypeUint64);
+  PRINT(kTypeNumber);
+  PRINT(kTypeAny);
+  return os;
+}
+
+
+#undef PRINT
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/machine-type.h b/src/compiler/machine-type.h
new file mode 100644
index 0000000..88b482c
--- /dev/null
+++ b/src/compiler/machine-type.h
@@ -0,0 +1,173 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_MACHINE_TYPE_H_
+#define V8_COMPILER_MACHINE_TYPE_H_
+
+#include "src/base/bits.h"
+#include "src/globals.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+
+class OStream;
+
+namespace compiler {
+
+// Machine-level types and representations.
+// TODO(titzer): Use the real type system instead of MachineType.
+enum MachineType {
+  // Representations.
+  kRepBit = 1 << 0,
+  kRepWord8 = 1 << 1,
+  kRepWord16 = 1 << 2,
+  kRepWord32 = 1 << 3,
+  kRepWord64 = 1 << 4,
+  kRepFloat32 = 1 << 5,
+  kRepFloat64 = 1 << 6,
+  kRepTagged = 1 << 7,
+
+  // Types.
+  kTypeBool = 1 << 8,
+  kTypeInt32 = 1 << 9,
+  kTypeUint32 = 1 << 10,
+  kTypeInt64 = 1 << 11,
+  kTypeUint64 = 1 << 12,
+  kTypeNumber = 1 << 13,
+  kTypeAny = 1 << 14,
+
+  // Machine types.
+  kMachNone = 0,
+  kMachFloat32 = kRepFloat32 | kTypeNumber,
+  kMachFloat64 = kRepFloat64 | kTypeNumber,
+  kMachInt8 = kRepWord8 | kTypeInt32,
+  kMachUint8 = kRepWord8 | kTypeUint32,
+  kMachInt16 = kRepWord16 | kTypeInt32,
+  kMachUint16 = kRepWord16 | kTypeUint32,
+  kMachInt32 = kRepWord32 | kTypeInt32,
+  kMachUint32 = kRepWord32 | kTypeUint32,
+  kMachInt64 = kRepWord64 | kTypeInt64,
+  kMachUint64 = kRepWord64 | kTypeUint64,
+  kMachPtr = (kPointerSize == 4) ? kRepWord32 : kRepWord64,
+  kMachAnyTagged = kRepTagged | kTypeAny
+};
+
+OStream& operator<<(OStream& os, const MachineType& type);
+
+typedef uint16_t MachineTypeUnion;
+
+// Globally useful machine types and constants.
+const MachineTypeUnion kRepMask = kRepBit | kRepWord8 | kRepWord16 |
+                                  kRepWord32 | kRepWord64 | kRepFloat32 |
+                                  kRepFloat64 | kRepTagged;
+const MachineTypeUnion kTypeMask = kTypeBool | kTypeInt32 | kTypeUint32 |
+                                   kTypeInt64 | kTypeUint64 | kTypeNumber |
+                                   kTypeAny;
+
+// Gets only the type of the given type.
+inline MachineType TypeOf(MachineType machine_type) {
+  int result = machine_type & kTypeMask;
+  return static_cast<MachineType>(result);
+}
+
+// Gets only the representation of the given type.
+inline MachineType RepresentationOf(MachineType machine_type) {
+  int result = machine_type & kRepMask;
+  CHECK(base::bits::IsPowerOfTwo32(result));
+  return static_cast<MachineType>(result);
+}
+
+// Gets the element size in bytes of the machine type.
+inline int ElementSizeOf(MachineType machine_type) {
+  switch (RepresentationOf(machine_type)) {
+    case kRepBit:
+    case kRepWord8:
+      return 1;
+    case kRepWord16:
+      return 2;
+    case kRepWord32:
+    case kRepFloat32:
+      return 4;
+    case kRepWord64:
+    case kRepFloat64:
+      return 8;
+    case kRepTagged:
+      return kPointerSize;
+    default:
+      UNREACHABLE();
+      return kPointerSize;
+  }
+}
+
+// Describes the inputs and outputs of a function or call.
+template <typename T>
+class Signature : public ZoneObject {
+ public:
+  Signature(size_t return_count, size_t parameter_count, T* reps)
+      : return_count_(return_count),
+        parameter_count_(parameter_count),
+        reps_(reps) {}
+
+  size_t return_count() const { return return_count_; }
+  size_t parameter_count() const { return parameter_count_; }
+
+  T GetParam(size_t index) const {
+    DCHECK(index < parameter_count_);
+    return reps_[return_count_ + index];
+  }
+
+  T GetReturn(size_t index = 0) const {
+    DCHECK(index < return_count_);
+    return reps_[index];
+  }
+
+  // For incrementally building signatures.
+  class Builder {
+   public:
+    Builder(Zone* zone, size_t return_count, size_t parameter_count)
+        : return_count_(return_count),
+          parameter_count_(parameter_count),
+          zone_(zone),
+          rcursor_(0),
+          pcursor_(0),
+          buffer_(zone->NewArray<T>(
+              static_cast<int>(return_count + parameter_count))) {}
+
+    const size_t return_count_;
+    const size_t parameter_count_;
+
+    void AddReturn(T val) {
+      DCHECK(rcursor_ < return_count_);
+      buffer_[rcursor_++] = val;
+    }
+    void AddParam(T val) {
+      DCHECK(pcursor_ < parameter_count_);
+      buffer_[return_count_ + pcursor_++] = val;
+    }
+    Signature<T>* Build() {
+      DCHECK(rcursor_ == return_count_);
+      DCHECK(pcursor_ == parameter_count_);
+      return new (zone_) Signature<T>(return_count_, parameter_count_, buffer_);
+    }
+
+   private:
+    Zone* zone_;
+    size_t rcursor_;
+    size_t pcursor_;
+    T* buffer_;
+  };
+
+ protected:
+  size_t return_count_;
+  size_t parameter_count_;
+  T* reps_;
+};
+
+typedef Signature<MachineType> MachineSignature;
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_MACHINE_TYPE_H_
diff --git a/src/compiler/node-aux-data-inl.h b/src/compiler/node-aux-data-inl.h
new file mode 100644
index 0000000..79f1abf
--- /dev/null
+++ b/src/compiler/node-aux-data-inl.h
@@ -0,0 +1,43 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_NODE_AUX_DATA_INL_H_
+#define V8_COMPILER_NODE_AUX_DATA_INL_H_
+
+#include "src/compiler/graph.h"
+#include "src/compiler/node.h"
+#include "src/compiler/node-aux-data.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+template <class T>
+NodeAuxData<T>::NodeAuxData(Zone* zone)
+    : aux_data_(zone) {}
+
+
+template <class T>
+void NodeAuxData<T>::Set(Node* node, const T& data) {
+  int id = node->id();
+  if (id >= static_cast<int>(aux_data_.size())) {
+    aux_data_.resize(id + 1);
+  }
+  aux_data_[id] = data;
+}
+
+
+template <class T>
+T NodeAuxData<T>::Get(Node* node) {
+  int id = node->id();
+  if (id >= static_cast<int>(aux_data_.size())) {
+    return T();
+  }
+  return aux_data_[id];
+}
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif
diff --git a/src/compiler/node-aux-data.h b/src/compiler/node-aux-data.h
new file mode 100644
index 0000000..7acce33
--- /dev/null
+++ b/src/compiler/node-aux-data.h
@@ -0,0 +1,33 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_NODE_AUX_DATA_H_
+#define V8_COMPILER_NODE_AUX_DATA_H_
+
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Forward declarations.
+class Graph;
+class Node;
+
+template <class T>
+class NodeAuxData {
+ public:
+  inline explicit NodeAuxData(Zone* zone);
+
+  inline void Set(Node* node, const T& data);
+  inline T Get(Node* node);
+
+ private:
+  ZoneVector<T> aux_data_;
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif
diff --git a/src/compiler/node-cache.cc b/src/compiler/node-cache.cc
new file mode 100644
index 0000000..7cda167
--- /dev/null
+++ b/src/compiler/node-cache.cc
@@ -0,0 +1,120 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/node-cache.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define INITIAL_SIZE 16
+#define LINEAR_PROBE 5
+
+template <typename Key>
+int32_t NodeCacheHash(Key key) {
+  UNIMPLEMENTED();
+  return 0;
+}
+
+template <>
+inline int32_t NodeCacheHash(int32_t key) {
+  return ComputeIntegerHash(key, 0);
+}
+
+
+template <>
+inline int32_t NodeCacheHash(int64_t key) {
+  return ComputeLongHash(key);
+}
+
+
+template <>
+inline int32_t NodeCacheHash(double key) {
+  return ComputeLongHash(bit_cast<int64_t>(key));
+}
+
+
+template <>
+inline int32_t NodeCacheHash(void* key) {
+  return ComputePointerHash(key);
+}
+
+
+template <typename Key>
+bool NodeCache<Key>::Resize(Zone* zone) {
+  if (size_ >= max_) return false;  // Don't grow past the maximum size.
+
+  // Allocate a new block of entries 4x the size.
+  Entry* old_entries = entries_;
+  int old_size = size_ + LINEAR_PROBE;
+  size_ = size_ * 4;
+  int num_entries = size_ + LINEAR_PROBE;
+  entries_ = zone->NewArray<Entry>(num_entries);
+  memset(entries_, 0, sizeof(Entry) * num_entries);
+
+  // Insert the old entries into the new block.
+  for (int i = 0; i < old_size; i++) {
+    Entry* old = &old_entries[i];
+    if (old->value_ != NULL) {
+      int hash = NodeCacheHash(old->key_);
+      int start = hash & (size_ - 1);
+      int end = start + LINEAR_PROBE;
+      for (int j = start; j < end; j++) {
+        Entry* entry = &entries_[j];
+        if (entry->value_ == NULL) {
+          entry->key_ = old->key_;
+          entry->value_ = old->value_;
+          break;
+        }
+      }
+    }
+  }
+  return true;
+}
+
+
+template <typename Key>
+Node** NodeCache<Key>::Find(Zone* zone, Key key) {
+  int32_t hash = NodeCacheHash(key);
+  if (entries_ == NULL) {
+    // Allocate the initial entries and insert the first entry.
+    int num_entries = INITIAL_SIZE + LINEAR_PROBE;
+    entries_ = zone->NewArray<Entry>(num_entries);
+    size_ = INITIAL_SIZE;
+    memset(entries_, 0, sizeof(Entry) * num_entries);
+    Entry* entry = &entries_[hash & (INITIAL_SIZE - 1)];
+    entry->key_ = key;
+    return &entry->value_;
+  }
+
+  while (true) {
+    // Search up to N entries after (linear probing).
+    int start = hash & (size_ - 1);
+    int end = start + LINEAR_PROBE;
+    for (int i = start; i < end; i++) {
+      Entry* entry = &entries_[i];
+      if (entry->key_ == key) return &entry->value_;
+      if (entry->value_ == NULL) {
+        entry->key_ = key;
+        return &entry->value_;
+      }
+    }
+
+    if (!Resize(zone)) break;  // Don't grow past the maximum size.
+  }
+
+  // If resized to maximum and still didn't find space, overwrite an entry.
+  Entry* entry = &entries_[hash & (size_ - 1)];
+  entry->key_ = key;
+  entry->value_ = NULL;
+  return &entry->value_;
+}
+
+
+template class NodeCache<int64_t>;
+template class NodeCache<int32_t>;
+template class NodeCache<void*>;
+}
+}
+}  // namespace v8::internal::compiler
diff --git a/src/compiler/node-cache.h b/src/compiler/node-cache.h
new file mode 100644
index 0000000..35352ea
--- /dev/null
+++ b/src/compiler/node-cache.h
@@ -0,0 +1,53 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_NODE_CACHE_H_
+#define V8_COMPILER_NODE_CACHE_H_
+
+#include "src/v8.h"
+
+#include "src/compiler/node.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// A cache for nodes based on a key. Useful for implementing canonicalization of
+// nodes such as constants, parameters, etc.
+template <typename Key>
+class NodeCache {
+ public:
+  explicit NodeCache(int max = 256) : entries_(NULL), size_(0), max_(max) {}
+
+  // Search for node associated with {key} and return a pointer to a memory
+  // location in this cache that stores an entry for the key. If the location
+  // returned by this method contains a non-NULL node, the caller can use that
+  // node. Otherwise it is the responsibility of the caller to fill the entry
+  // with a new node.
+  // Note that a previous cache entry may be overwritten if the cache becomes
+  // too full or encounters too many hash collisions.
+  Node** Find(Zone* zone, Key key);
+
+ private:
+  struct Entry {
+    Key key_;
+    Node* value_;
+  };
+
+  Entry* entries_;  // lazily-allocated hash entries.
+  int32_t size_;
+  int32_t max_;
+
+  bool Resize(Zone* zone);
+};
+
+// Various default cache types.
+typedef NodeCache<int64_t> Int64NodeCache;
+typedef NodeCache<int32_t> Int32NodeCache;
+typedef NodeCache<void*> PtrNodeCache;
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_NODE_CACHE_H_
diff --git a/src/compiler/node-matchers.h b/src/compiler/node-matchers.h
new file mode 100644
index 0000000..e62eaee
--- /dev/null
+++ b/src/compiler/node-matchers.h
@@ -0,0 +1,146 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_NODE_MATCHERS_H_
+#define V8_COMPILER_NODE_MATCHERS_H_
+
+#include "src/compiler/node.h"
+#include "src/compiler/operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// A pattern matcher for nodes.
+struct NodeMatcher {
+  explicit NodeMatcher(Node* node) : node_(node) {}
+
+  Node* node() const { return node_; }
+  const Operator* op() const { return node()->op(); }
+  IrOpcode::Value opcode() const { return node()->opcode(); }
+
+  bool HasProperty(Operator::Property property) const {
+    return op()->HasProperty(property);
+  }
+  Node* InputAt(int index) const { return node()->InputAt(index); }
+
+#define DEFINE_IS_OPCODE(Opcode) \
+  bool Is##Opcode() const { return opcode() == IrOpcode::k##Opcode; }
+  ALL_OP_LIST(DEFINE_IS_OPCODE)
+#undef DEFINE_IS_OPCODE
+
+ private:
+  Node* node_;
+};
+
+
+// A pattern matcher for abitrary value constants.
+template <typename T, IrOpcode::Value kOpcode>
+struct ValueMatcher : public NodeMatcher {
+  explicit ValueMatcher(Node* node)
+      : NodeMatcher(node), value_(), has_value_(opcode() == kOpcode) {
+    if (has_value_) {
+      value_ = OpParameter<T>(node);
+    }
+  }
+
+  bool HasValue() const { return has_value_; }
+  const T& Value() const {
+    DCHECK(HasValue());
+    return value_;
+  }
+
+  bool Is(const T& value) const {
+    return this->HasValue() && this->Value() == value;
+  }
+
+  bool IsInRange(const T& low, const T& high) const {
+    return this->HasValue() && low <= this->Value() && this->Value() <= high;
+  }
+
+ private:
+  T value_;
+  bool has_value_;
+};
+
+
+// A pattern matcher for integer constants.
+template <typename T, IrOpcode::Value kOpcode>
+struct IntMatcher FINAL : public ValueMatcher<T, kOpcode> {
+  explicit IntMatcher(Node* node) : ValueMatcher<T, kOpcode>(node) {}
+
+  bool IsPowerOf2() const {
+    return this->HasValue() && this->Value() > 0 &&
+           (this->Value() & (this->Value() - 1)) == 0;
+  }
+};
+
+typedef IntMatcher<int32_t, IrOpcode::kInt32Constant> Int32Matcher;
+typedef IntMatcher<uint32_t, IrOpcode::kInt32Constant> Uint32Matcher;
+typedef IntMatcher<int64_t, IrOpcode::kInt64Constant> Int64Matcher;
+typedef IntMatcher<uint64_t, IrOpcode::kInt64Constant> Uint64Matcher;
+
+
+// A pattern matcher for floating point constants.
+template <typename T, IrOpcode::Value kOpcode>
+struct FloatMatcher FINAL : public ValueMatcher<T, kOpcode> {
+  explicit FloatMatcher(Node* node) : ValueMatcher<T, kOpcode>(node) {}
+
+  bool IsNaN() const { return this->HasValue() && std::isnan(this->Value()); }
+};
+
+typedef FloatMatcher<float, IrOpcode::kFloat32Constant> Float32Matcher;
+typedef FloatMatcher<double, IrOpcode::kFloat64Constant> Float64Matcher;
+typedef FloatMatcher<double, IrOpcode::kNumberConstant> NumberMatcher;
+
+
+// A pattern matcher for heap object constants.
+template <typename T>
+struct HeapObjectMatcher FINAL
+    : public ValueMatcher<Unique<T>, IrOpcode::kHeapConstant> {
+  explicit HeapObjectMatcher(Node* node)
+      : ValueMatcher<Unique<T>, IrOpcode::kHeapConstant>(node) {}
+};
+
+
+// For shorter pattern matching code, this struct matches both the left and
+// right hand sides of a binary operation and can put constants on the right
+// if they appear on the left hand side of a commutative operation.
+template <typename Left, typename Right>
+struct BinopMatcher FINAL : public NodeMatcher {
+  explicit BinopMatcher(Node* node)
+      : NodeMatcher(node), left_(InputAt(0)), right_(InputAt(1)) {
+    if (HasProperty(Operator::kCommutative)) PutConstantOnRight();
+  }
+
+  const Left& left() const { return left_; }
+  const Right& right() const { return right_; }
+
+  bool IsFoldable() const { return left().HasValue() && right().HasValue(); }
+  bool LeftEqualsRight() const { return left().node() == right().node(); }
+
+ private:
+  void PutConstantOnRight() {
+    if (left().HasValue() && !right().HasValue()) {
+      std::swap(left_, right_);
+      node()->ReplaceInput(0, left().node());
+      node()->ReplaceInput(1, right().node());
+    }
+  }
+
+  Left left_;
+  Right right_;
+};
+
+typedef BinopMatcher<Int32Matcher, Int32Matcher> Int32BinopMatcher;
+typedef BinopMatcher<Uint32Matcher, Uint32Matcher> Uint32BinopMatcher;
+typedef BinopMatcher<Int64Matcher, Int64Matcher> Int64BinopMatcher;
+typedef BinopMatcher<Uint64Matcher, Uint64Matcher> Uint64BinopMatcher;
+typedef BinopMatcher<Float64Matcher, Float64Matcher> Float64BinopMatcher;
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_NODE_MATCHERS_H_
diff --git a/src/compiler/node-properties-inl.h b/src/compiler/node-properties-inl.h
new file mode 100644
index 0000000..3f6d531
--- /dev/null
+++ b/src/compiler/node-properties-inl.h
@@ -0,0 +1,212 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_NODE_PROPERTIES_INL_H_
+#define V8_COMPILER_NODE_PROPERTIES_INL_H_
+
+#include "src/v8.h"
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/opcodes.h"
+#include "src/compiler/operator.h"
+#include "src/compiler/operator-properties-inl.h"
+#include "src/compiler/operator-properties.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// -----------------------------------------------------------------------------
+// Input layout.
+// Inputs are always arranged in order as follows:
+//     0 [ values, context, effects, control ] node->InputCount()
+
+inline int NodeProperties::FirstValueIndex(Node* node) { return 0; }
+
+inline int NodeProperties::FirstContextIndex(Node* node) {
+  return PastValueIndex(node);
+}
+
+inline int NodeProperties::FirstFrameStateIndex(Node* node) {
+  return PastContextIndex(node);
+}
+
+inline int NodeProperties::FirstEffectIndex(Node* node) {
+  return PastFrameStateIndex(node);
+}
+
+inline int NodeProperties::FirstControlIndex(Node* node) {
+  return PastEffectIndex(node);
+}
+
+
+inline int NodeProperties::PastValueIndex(Node* node) {
+  return FirstValueIndex(node) +
+         OperatorProperties::GetValueInputCount(node->op());
+}
+
+inline int NodeProperties::PastContextIndex(Node* node) {
+  return FirstContextIndex(node) +
+         OperatorProperties::GetContextInputCount(node->op());
+}
+
+inline int NodeProperties::PastFrameStateIndex(Node* node) {
+  return FirstFrameStateIndex(node) +
+         OperatorProperties::GetFrameStateInputCount(node->op());
+}
+
+inline int NodeProperties::PastEffectIndex(Node* node) {
+  return FirstEffectIndex(node) +
+         OperatorProperties::GetEffectInputCount(node->op());
+}
+
+inline int NodeProperties::PastControlIndex(Node* node) {
+  return FirstControlIndex(node) +
+         OperatorProperties::GetControlInputCount(node->op());
+}
+
+
+// -----------------------------------------------------------------------------
+// Input accessors.
+
+inline Node* NodeProperties::GetValueInput(Node* node, int index) {
+  DCHECK(0 <= index &&
+         index < OperatorProperties::GetValueInputCount(node->op()));
+  return node->InputAt(FirstValueIndex(node) + index);
+}
+
+inline Node* NodeProperties::GetContextInput(Node* node) {
+  DCHECK(OperatorProperties::HasContextInput(node->op()));
+  return node->InputAt(FirstContextIndex(node));
+}
+
+inline Node* NodeProperties::GetFrameStateInput(Node* node) {
+  DCHECK(OperatorProperties::HasFrameStateInput(node->op()));
+  return node->InputAt(FirstFrameStateIndex(node));
+}
+
+inline Node* NodeProperties::GetEffectInput(Node* node, int index) {
+  DCHECK(0 <= index &&
+         index < OperatorProperties::GetEffectInputCount(node->op()));
+  return node->InputAt(FirstEffectIndex(node) + index);
+}
+
+inline Node* NodeProperties::GetControlInput(Node* node, int index) {
+  DCHECK(0 <= index &&
+         index < OperatorProperties::GetControlInputCount(node->op()));
+  return node->InputAt(FirstControlIndex(node) + index);
+}
+
+inline int NodeProperties::GetFrameStateIndex(Node* node) {
+  DCHECK(OperatorProperties::HasFrameStateInput(node->op()));
+  return FirstFrameStateIndex(node);
+}
+
+// -----------------------------------------------------------------------------
+// Edge kinds.
+
+inline bool NodeProperties::IsInputRange(Node::Edge edge, int first, int num) {
+  // TODO(titzer): edge.index() is linear time;
+  // edges maybe need to be marked as value/effect/control.
+  if (num == 0) return false;
+  int index = edge.index();
+  return first <= index && index < first + num;
+}
+
+inline bool NodeProperties::IsValueEdge(Node::Edge edge) {
+  Node* node = edge.from();
+  return IsInputRange(edge, FirstValueIndex(node),
+                      OperatorProperties::GetValueInputCount(node->op()));
+}
+
+inline bool NodeProperties::IsContextEdge(Node::Edge edge) {
+  Node* node = edge.from();
+  return IsInputRange(edge, FirstContextIndex(node),
+                      OperatorProperties::GetContextInputCount(node->op()));
+}
+
+inline bool NodeProperties::IsEffectEdge(Node::Edge edge) {
+  Node* node = edge.from();
+  return IsInputRange(edge, FirstEffectIndex(node),
+                      OperatorProperties::GetEffectInputCount(node->op()));
+}
+
+inline bool NodeProperties::IsControlEdge(Node::Edge edge) {
+  Node* node = edge.from();
+  return IsInputRange(edge, FirstControlIndex(node),
+                      OperatorProperties::GetControlInputCount(node->op()));
+}
+
+
+// -----------------------------------------------------------------------------
+// Miscellaneous predicates.
+
+inline bool NodeProperties::IsControl(Node* node) {
+  return IrOpcode::IsControlOpcode(node->opcode());
+}
+
+
+// -----------------------------------------------------------------------------
+// Miscellaneous mutators.
+
+inline void NodeProperties::ReplaceControlInput(Node* node, Node* control) {
+  node->ReplaceInput(FirstControlIndex(node), control);
+}
+
+inline void NodeProperties::ReplaceEffectInput(Node* node, Node* effect,
+                                               int index) {
+  DCHECK(index < OperatorProperties::GetEffectInputCount(node->op()));
+  return node->ReplaceInput(FirstEffectIndex(node) + index, effect);
+}
+
+inline void NodeProperties::ReplaceFrameStateInput(Node* node,
+                                                   Node* frame_state) {
+  DCHECK(OperatorProperties::HasFrameStateInput(node->op()));
+  node->ReplaceInput(FirstFrameStateIndex(node), frame_state);
+}
+
+inline void NodeProperties::RemoveNonValueInputs(Node* node) {
+  node->TrimInputCount(OperatorProperties::GetValueInputCount(node->op()));
+}
+
+
+// Replace value uses of {node} with {value} and effect uses of {node} with
+// {effect}. If {effect == NULL}, then use the effect input to {node}.
+inline void NodeProperties::ReplaceWithValue(Node* node, Node* value,
+                                             Node* effect) {
+  DCHECK(!OperatorProperties::HasControlOutput(node->op()));
+  if (effect == NULL && OperatorProperties::HasEffectInput(node->op())) {
+    effect = NodeProperties::GetEffectInput(node);
+  }
+
+  // Requires distinguishing between value and effect edges.
+  UseIter iter = node->uses().begin();
+  while (iter != node->uses().end()) {
+    if (NodeProperties::IsEffectEdge(iter.edge())) {
+      DCHECK_NE(NULL, effect);
+      iter = iter.UpdateToAndIncrement(effect);
+    } else {
+      iter = iter.UpdateToAndIncrement(value);
+    }
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+// Type Bounds.
+
+inline Bounds NodeProperties::GetBounds(Node* node) { return node->bounds(); }
+
+inline void NodeProperties::SetBounds(Node* node, Bounds b) {
+  node->set_bounds(b);
+}
+
+
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_NODE_PROPERTIES_INL_H_
diff --git a/src/compiler/node-properties.h b/src/compiler/node-properties.h
new file mode 100644
index 0000000..94bd731
--- /dev/null
+++ b/src/compiler/node-properties.h
@@ -0,0 +1,64 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_NODE_PROPERTIES_H_
+#define V8_COMPILER_NODE_PROPERTIES_H_
+
+#include "src/compiler/node.h"
+#include "src/types.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class Operator;
+
+// A facade that simplifies access to the different kinds of inputs to a node.
+class NodeProperties {
+ public:
+  static inline Node* GetValueInput(Node* node, int index);
+  static inline Node* GetContextInput(Node* node);
+  static inline Node* GetFrameStateInput(Node* node);
+  static inline Node* GetEffectInput(Node* node, int index = 0);
+  static inline Node* GetControlInput(Node* node, int index = 0);
+
+  static inline int GetFrameStateIndex(Node* node);
+
+  static inline bool IsValueEdge(Node::Edge edge);
+  static inline bool IsContextEdge(Node::Edge edge);
+  static inline bool IsEffectEdge(Node::Edge edge);
+  static inline bool IsControlEdge(Node::Edge edge);
+
+  static inline bool IsControl(Node* node);
+
+  static inline void ReplaceControlInput(Node* node, Node* control);
+  static inline void ReplaceEffectInput(Node* node, Node* effect,
+                                        int index = 0);
+  static inline void ReplaceFrameStateInput(Node* node, Node* frame_state);
+  static inline void RemoveNonValueInputs(Node* node);
+  static inline void ReplaceWithValue(Node* node, Node* value,
+                                      Node* effect = NULL);
+
+  static inline Bounds GetBounds(Node* node);
+  static inline void SetBounds(Node* node, Bounds bounds);
+
+  static inline int FirstValueIndex(Node* node);
+  static inline int FirstContextIndex(Node* node);
+  static inline int FirstFrameStateIndex(Node* node);
+  static inline int FirstEffectIndex(Node* node);
+  static inline int FirstControlIndex(Node* node);
+  static inline int PastValueIndex(Node* node);
+  static inline int PastContextIndex(Node* node);
+  static inline int PastFrameStateIndex(Node* node);
+  static inline int PastEffectIndex(Node* node);
+  static inline int PastControlIndex(Node* node);
+
+  static inline bool IsInputRange(Node::Edge edge, int first, int count);
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_NODE_PROPERTIES_H_
diff --git a/src/compiler/node.cc b/src/compiler/node.cc
new file mode 100644
index 0000000..7df736e
--- /dev/null
+++ b/src/compiler/node.cc
@@ -0,0 +1,63 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/node.h"
+
+#include "src/compiler/generic-node-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+void Node::Kill() {
+  DCHECK_NOT_NULL(op());
+  RemoveAllInputs();
+  DCHECK(uses().empty());
+}
+
+
+void Node::CollectProjections(NodeVector* projections) {
+  for (size_t i = 0; i < projections->size(); i++) {
+    (*projections)[i] = NULL;
+  }
+  for (UseIter i = uses().begin(); i != uses().end(); ++i) {
+    if ((*i)->opcode() != IrOpcode::kProjection) continue;
+    size_t index = OpParameter<size_t>(*i);
+    DCHECK_LT(index, projections->size());
+    DCHECK_EQ(NULL, (*projections)[index]);
+    (*projections)[index] = *i;
+  }
+}
+
+
+Node* Node::FindProjection(size_t projection_index) {
+  for (UseIter i = uses().begin(); i != uses().end(); ++i) {
+    if ((*i)->opcode() == IrOpcode::kProjection &&
+        OpParameter<size_t>(*i) == projection_index) {
+      return *i;
+    }
+  }
+  return NULL;
+}
+
+
+OStream& operator<<(OStream& os, const Operator& op) { return op.PrintTo(os); }
+
+
+OStream& operator<<(OStream& os, const Node& n) {
+  os << n.id() << ": " << *n.op();
+  if (n.op()->InputCount() != 0) {
+    os << "(";
+    for (int i = 0; i < n.op()->InputCount(); ++i) {
+      if (i != 0) os << ", ";
+      os << n.InputAt(i)->id();
+    }
+    os << ")";
+  }
+  return os;
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/node.h b/src/compiler/node.h
new file mode 100644
index 0000000..c3f5a53
--- /dev/null
+++ b/src/compiler/node.h
@@ -0,0 +1,94 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_NODE_H_
+#define V8_COMPILER_NODE_H_
+
+#include <deque>
+#include <set>
+#include <vector>
+
+#include "src/compiler/generic-algorithm.h"
+#include "src/compiler/generic-node.h"
+#include "src/compiler/opcodes.h"
+#include "src/compiler/operator.h"
+#include "src/types.h"
+#include "src/zone.h"
+#include "src/zone-allocator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class NodeData {
+ public:
+  const Operator* op() const { return op_; }
+  void set_op(const Operator* op) { op_ = op; }
+
+  IrOpcode::Value opcode() const {
+    DCHECK(op_->opcode() <= IrOpcode::kLast);
+    return static_cast<IrOpcode::Value>(op_->opcode());
+  }
+
+  Bounds bounds() { return bounds_; }
+
+ protected:
+  const Operator* op_;
+  Bounds bounds_;
+  explicit NodeData(Zone* zone) : bounds_(Bounds(Type::None(zone))) {}
+
+  friend class NodeProperties;
+  void set_bounds(Bounds b) { bounds_ = b; }
+};
+
+// A Node is the basic primitive of an IR graph. In addition to the members
+// inherited from Vector, Nodes only contain a mutable Operator that may change
+// during compilation, e.g. during lowering passes.  Other information that
+// needs to be associated with Nodes during compilation must be stored
+// out-of-line indexed by the Node's id.
+class Node FINAL : public GenericNode<NodeData, Node> {
+ public:
+  Node(GenericGraphBase* graph, int input_count)
+      : GenericNode<NodeData, Node>(graph, input_count) {}
+
+  void Initialize(const Operator* op) { set_op(op); }
+
+  bool IsDead() const { return InputCount() > 0 && InputAt(0) == NULL; }
+  void Kill();
+
+  void CollectProjections(ZoneVector<Node*>* projections);
+  Node* FindProjection(size_t projection_index);
+};
+
+OStream& operator<<(OStream& os, const Node& n);
+
+typedef GenericGraphVisit::NullNodeVisitor<NodeData, Node> NullNodeVisitor;
+
+typedef std::set<Node*, std::less<Node*>, zone_allocator<Node*> > NodeSet;
+typedef NodeSet::iterator NodeSetIter;
+typedef NodeSet::reverse_iterator NodeSetRIter;
+
+typedef ZoneVector<Node*> NodeVector;
+typedef NodeVector::iterator NodeVectorIter;
+typedef NodeVector::const_iterator NodeVectorConstIter;
+typedef NodeVector::reverse_iterator NodeVectorRIter;
+
+typedef ZoneVector<NodeVector> NodeVectorVector;
+typedef NodeVectorVector::iterator NodeVectorVectorIter;
+typedef NodeVectorVector::reverse_iterator NodeVectorVectorRIter;
+
+typedef Node::Uses::iterator UseIter;
+typedef Node::Inputs::iterator InputIter;
+
+// Helper to extract parameters from Operator1<*> nodes.
+template <typename T>
+static inline const T& OpParameter(const Node* node) {
+  return OpParameter<T>(node->op());
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_NODE_H_
diff --git a/src/compiler/opcodes.h b/src/compiler/opcodes.h
new file mode 100644
index 0000000..e210abd
--- /dev/null
+++ b/src/compiler/opcodes.h
@@ -0,0 +1,310 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_OPCODES_H_
+#define V8_COMPILER_OPCODES_H_
+
+// Opcodes for control operators.
+#define INNER_CONTROL_OP_LIST(V) \
+  V(Dead)                  \
+  V(Loop)                  \
+  V(Branch)                \
+  V(IfTrue)                \
+  V(IfFalse)               \
+  V(Merge)                 \
+  V(Return)                \
+  V(Throw)
+
+#define CONTROL_OP_LIST(V) \
+  INNER_CONTROL_OP_LIST(V) \
+  V(Start)                 \
+  V(End)
+
+// Opcodes for common operators.
+#define LEAF_OP_LIST(V) \
+  V(Int32Constant)      \
+  V(Int64Constant)      \
+  V(Float32Constant)    \
+  V(Float64Constant)    \
+  V(ExternalConstant)   \
+  V(NumberConstant)     \
+  V(HeapConstant)
+
+#define INNER_OP_LIST(V) \
+  V(Phi)                 \
+  V(EffectPhi)           \
+  V(ControlEffect)       \
+  V(ValueEffect)         \
+  V(Finish)              \
+  V(FrameState)          \
+  V(StateValues)         \
+  V(Call)                \
+  V(Parameter)           \
+  V(Projection)
+
+#define COMMON_OP_LIST(V) \
+  LEAF_OP_LIST(V)         \
+  INNER_OP_LIST(V)
+
+// Opcodes for JavaScript operators.
+#define JS_COMPARE_BINOP_LIST(V) \
+  V(JSEqual)                     \
+  V(JSNotEqual)                  \
+  V(JSStrictEqual)               \
+  V(JSStrictNotEqual)            \
+  V(JSLessThan)                  \
+  V(JSGreaterThan)               \
+  V(JSLessThanOrEqual)           \
+  V(JSGreaterThanOrEqual)
+
+#define JS_BITWISE_BINOP_LIST(V) \
+  V(JSBitwiseOr)                 \
+  V(JSBitwiseXor)                \
+  V(JSBitwiseAnd)                \
+  V(JSShiftLeft)                 \
+  V(JSShiftRight)                \
+  V(JSShiftRightLogical)
+
+#define JS_ARITH_BINOP_LIST(V) \
+  V(JSAdd)                     \
+  V(JSSubtract)                \
+  V(JSMultiply)                \
+  V(JSDivide)                  \
+  V(JSModulus)
+
+#define JS_SIMPLE_BINOP_LIST(V) \
+  JS_COMPARE_BINOP_LIST(V)      \
+  JS_BITWISE_BINOP_LIST(V)      \
+  JS_ARITH_BINOP_LIST(V)
+
+#define JS_LOGIC_UNOP_LIST(V) V(JSUnaryNot)
+
+#define JS_CONVERSION_UNOP_LIST(V) \
+  V(JSToBoolean)                   \
+  V(JSToNumber)                    \
+  V(JSToString)                    \
+  V(JSToName)                      \
+  V(JSToObject)
+
+#define JS_OTHER_UNOP_LIST(V) \
+  V(JSTypeOf)
+
+#define JS_SIMPLE_UNOP_LIST(V) \
+  JS_LOGIC_UNOP_LIST(V)        \
+  JS_CONVERSION_UNOP_LIST(V)   \
+  JS_OTHER_UNOP_LIST(V)
+
+#define JS_OBJECT_OP_LIST(V) \
+  V(JSCreate)                \
+  V(JSLoadProperty)          \
+  V(JSLoadNamed)             \
+  V(JSStoreProperty)         \
+  V(JSStoreNamed)            \
+  V(JSDeleteProperty)        \
+  V(JSHasProperty)           \
+  V(JSInstanceOf)
+
+#define JS_CONTEXT_OP_LIST(V) \
+  V(JSLoadContext)            \
+  V(JSStoreContext)           \
+  V(JSCreateFunctionContext)  \
+  V(JSCreateCatchContext)     \
+  V(JSCreateWithContext)      \
+  V(JSCreateBlockContext)     \
+  V(JSCreateModuleContext)    \
+  V(JSCreateGlobalContext)
+
+#define JS_OTHER_OP_LIST(V) \
+  V(JSCallConstruct)        \
+  V(JSCallFunction)         \
+  V(JSCallRuntime)          \
+  V(JSYield)                \
+  V(JSDebugger)
+
+#define JS_OP_LIST(V)     \
+  JS_SIMPLE_BINOP_LIST(V) \
+  JS_SIMPLE_UNOP_LIST(V)  \
+  JS_OBJECT_OP_LIST(V)    \
+  JS_CONTEXT_OP_LIST(V)   \
+  JS_OTHER_OP_LIST(V)
+
+// Opcodes for VirtuaMachine-level operators.
+#define SIMPLIFIED_OP_LIST(V) \
+  V(BooleanNot)               \
+  V(BooleanToNumber)          \
+  V(NumberEqual)              \
+  V(NumberLessThan)           \
+  V(NumberLessThanOrEqual)    \
+  V(NumberAdd)                \
+  V(NumberSubtract)           \
+  V(NumberMultiply)           \
+  V(NumberDivide)             \
+  V(NumberModulus)            \
+  V(NumberToInt32)            \
+  V(NumberToUint32)           \
+  V(ReferenceEqual)           \
+  V(StringEqual)              \
+  V(StringLessThan)           \
+  V(StringLessThanOrEqual)    \
+  V(StringAdd)                \
+  V(ChangeTaggedToInt32)      \
+  V(ChangeTaggedToUint32)     \
+  V(ChangeTaggedToFloat64)    \
+  V(ChangeInt32ToTagged)      \
+  V(ChangeUint32ToTagged)     \
+  V(ChangeFloat64ToTagged)    \
+  V(ChangeBoolToBit)          \
+  V(ChangeBitToBool)          \
+  V(LoadField)                \
+  V(LoadElement)              \
+  V(StoreField)               \
+  V(StoreElement)
+
+// Opcodes for Machine-level operators.
+#define MACHINE_OP_LIST(V)    \
+  V(Load)                     \
+  V(Store)                    \
+  V(Word32And)                \
+  V(Word32Or)                 \
+  V(Word32Xor)                \
+  V(Word32Shl)                \
+  V(Word32Shr)                \
+  V(Word32Sar)                \
+  V(Word32Ror)                \
+  V(Word32Equal)              \
+  V(Word64And)                \
+  V(Word64Or)                 \
+  V(Word64Xor)                \
+  V(Word64Shl)                \
+  V(Word64Shr)                \
+  V(Word64Sar)                \
+  V(Word64Ror)                \
+  V(Word64Equal)              \
+  V(Int32Add)                 \
+  V(Int32AddWithOverflow)     \
+  V(Int32Sub)                 \
+  V(Int32SubWithOverflow)     \
+  V(Int32Mul)                 \
+  V(Int32Div)                 \
+  V(Int32UDiv)                \
+  V(Int32Mod)                 \
+  V(Int32UMod)                \
+  V(Int32LessThan)            \
+  V(Int32LessThanOrEqual)     \
+  V(Uint32LessThan)           \
+  V(Uint32LessThanOrEqual)    \
+  V(Int64Add)                 \
+  V(Int64Sub)                 \
+  V(Int64Mul)                 \
+  V(Int64Div)                 \
+  V(Int64UDiv)                \
+  V(Int64Mod)                 \
+  V(Int64UMod)                \
+  V(Int64LessThan)            \
+  V(Int64LessThanOrEqual)     \
+  V(ChangeFloat32ToFloat64)   \
+  V(ChangeFloat64ToInt32)     \
+  V(ChangeFloat64ToUint32)    \
+  V(ChangeInt32ToFloat64)     \
+  V(ChangeInt32ToInt64)       \
+  V(ChangeUint32ToFloat64)    \
+  V(ChangeUint32ToUint64)     \
+  V(TruncateFloat64ToFloat32) \
+  V(TruncateFloat64ToInt32)   \
+  V(TruncateInt64ToInt32)     \
+  V(Float64Add)               \
+  V(Float64Sub)               \
+  V(Float64Mul)               \
+  V(Float64Div)               \
+  V(Float64Mod)               \
+  V(Float64Sqrt)              \
+  V(Float64Equal)             \
+  V(Float64LessThan)          \
+  V(Float64LessThanOrEqual)
+
+#define VALUE_OP_LIST(V) \
+  COMMON_OP_LIST(V)      \
+  SIMPLIFIED_OP_LIST(V)  \
+  MACHINE_OP_LIST(V)     \
+  JS_OP_LIST(V)
+
+// The combination of all operators at all levels and the common operators.
+#define ALL_OP_LIST(V) \
+  CONTROL_OP_LIST(V)   \
+  VALUE_OP_LIST(V)
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Declare an enumeration with all the opcodes at all levels so that they
+// can be globally, uniquely numbered.
+class IrOpcode {
+ public:
+  enum Value {
+#define DECLARE_OPCODE(x) k##x,
+    ALL_OP_LIST(DECLARE_OPCODE)
+#undef DECLARE_OPCODE
+    kLast = -1
+#define COUNT_OPCODE(x) +1
+            ALL_OP_LIST(COUNT_OPCODE)
+#undef COUNT_OPCODE
+  };
+
+  // Returns the mnemonic name of an opcode.
+  static const char* Mnemonic(Value val) {
+    switch (val) {
+#define RETURN_NAME(x) \
+  case k##x:           \
+    return #x;
+      ALL_OP_LIST(RETURN_NAME)
+#undef RETURN_NAME
+      default:
+        return "UnknownOpcode";
+    }
+  }
+
+  static bool IsJsOpcode(Value val) {
+    switch (val) {
+#define RETURN_NAME(x) \
+  case k##x:           \
+    return true;
+      JS_OP_LIST(RETURN_NAME)
+#undef RETURN_NAME
+      default:
+        return false;
+    }
+  }
+
+  static bool IsControlOpcode(Value val) {
+    switch (val) {
+#define RETURN_NAME(x) \
+  case k##x:           \
+    return true;
+      CONTROL_OP_LIST(RETURN_NAME)
+#undef RETURN_NAME
+      default:
+        return false;
+    }
+  }
+
+  static bool IsCommonOpcode(Value val) {
+    switch (val) {
+#define RETURN_NAME(x) \
+  case k##x:           \
+    return true;
+      CONTROL_OP_LIST(RETURN_NAME)
+      COMMON_OP_LIST(RETURN_NAME)
+#undef RETURN_NAME
+      default:
+        return false;
+    }
+  }
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_OPCODES_H_
diff --git a/src/compiler/operator-properties-inl.h b/src/compiler/operator-properties-inl.h
new file mode 100644
index 0000000..9dae106
--- /dev/null
+++ b/src/compiler/operator-properties-inl.h
@@ -0,0 +1,183 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_OPERATOR_PROPERTIES_INL_H_
+#define V8_COMPILER_OPERATOR_PROPERTIES_INL_H_
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/js-operator.h"
+#include "src/compiler/opcodes.h"
+#include "src/compiler/operator-properties.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+inline bool OperatorProperties::HasValueInput(const Operator* op) {
+  return OperatorProperties::GetValueInputCount(op) > 0;
+}
+
+inline bool OperatorProperties::HasContextInput(const Operator* op) {
+  IrOpcode::Value opcode = static_cast<IrOpcode::Value>(op->opcode());
+  return IrOpcode::IsJsOpcode(opcode);
+}
+
+inline bool OperatorProperties::HasEffectInput(const Operator* op) {
+  return OperatorProperties::GetEffectInputCount(op) > 0;
+}
+
+inline bool OperatorProperties::HasControlInput(const Operator* op) {
+  return OperatorProperties::GetControlInputCount(op) > 0;
+}
+
+inline bool OperatorProperties::HasFrameStateInput(const Operator* op) {
+  if (!FLAG_turbo_deoptimization) {
+    return false;
+  }
+
+  switch (op->opcode()) {
+    case IrOpcode::kFrameState:
+      return true;
+    case IrOpcode::kJSCallRuntime: {
+      Runtime::FunctionId function = OpParameter<Runtime::FunctionId>(op);
+      return Linkage::NeedsFrameState(function);
+    }
+
+    // Strict equality cannot lazily deoptimize.
+    case IrOpcode::kJSStrictEqual:
+    case IrOpcode::kJSStrictNotEqual:
+      return false;
+
+    // Calls
+    case IrOpcode::kJSCallFunction:
+    case IrOpcode::kJSCallConstruct:
+
+    // Compare operations
+    case IrOpcode::kJSEqual:
+    case IrOpcode::kJSNotEqual:
+    case IrOpcode::kJSLessThan:
+    case IrOpcode::kJSGreaterThan:
+    case IrOpcode::kJSLessThanOrEqual:
+    case IrOpcode::kJSGreaterThanOrEqual:
+
+    // Binary operations
+    case IrOpcode::kJSBitwiseOr:
+    case IrOpcode::kJSBitwiseXor:
+    case IrOpcode::kJSBitwiseAnd:
+    case IrOpcode::kJSShiftLeft:
+    case IrOpcode::kJSShiftRight:
+    case IrOpcode::kJSShiftRightLogical:
+    case IrOpcode::kJSAdd:
+    case IrOpcode::kJSSubtract:
+    case IrOpcode::kJSMultiply:
+    case IrOpcode::kJSDivide:
+    case IrOpcode::kJSModulus:
+    case IrOpcode::kJSLoadProperty:
+    case IrOpcode::kJSStoreProperty:
+    case IrOpcode::kJSLoadNamed:
+    case IrOpcode::kJSStoreNamed:
+      return true;
+
+    default:
+      return false;
+  }
+}
+
+inline int OperatorProperties::GetValueInputCount(const Operator* op) {
+  return op->InputCount();
+}
+
+inline int OperatorProperties::GetContextInputCount(const Operator* op) {
+  return OperatorProperties::HasContextInput(op) ? 1 : 0;
+}
+
+inline int OperatorProperties::GetFrameStateInputCount(const Operator* op) {
+  return OperatorProperties::HasFrameStateInput(op) ? 1 : 0;
+}
+
+inline int OperatorProperties::GetEffectInputCount(const Operator* op) {
+  if (op->opcode() == IrOpcode::kEffectPhi ||
+      op->opcode() == IrOpcode::kFinish) {
+    return OpParameter<int>(op);
+  }
+  if (op->HasProperty(Operator::kNoRead) && op->HasProperty(Operator::kNoWrite))
+    return 0;  // no effects.
+  return 1;
+}
+
+inline int OperatorProperties::GetControlInputCount(const Operator* op) {
+  switch (op->opcode()) {
+    case IrOpcode::kPhi:
+    case IrOpcode::kEffectPhi:
+    case IrOpcode::kControlEffect:
+      return 1;
+#define OPCODE_CASE(x) case IrOpcode::k##x:
+      CONTROL_OP_LIST(OPCODE_CASE)
+#undef OPCODE_CASE
+      // Control operators are Operator1<int>.
+      return OpParameter<int>(op);
+    default:
+      // Operators that have write effects must have a control
+      // dependency. Effect dependencies only ensure the correct order of
+      // write/read operations without consideration of control flow. Without an
+      // explicit control dependency writes can be float in the schedule too
+      // early along a path that shouldn't generate a side-effect.
+      return op->HasProperty(Operator::kNoWrite) ? 0 : 1;
+  }
+  return 0;
+}
+
+inline int OperatorProperties::GetTotalInputCount(const Operator* op) {
+  return GetValueInputCount(op) + GetContextInputCount(op) +
+         GetFrameStateInputCount(op) + GetEffectInputCount(op) +
+         GetControlInputCount(op);
+}
+
+// -----------------------------------------------------------------------------
+// Output properties.
+
+inline bool OperatorProperties::HasValueOutput(const Operator* op) {
+  return GetValueOutputCount(op) > 0;
+}
+
+inline bool OperatorProperties::HasEffectOutput(const Operator* op) {
+  return op->opcode() == IrOpcode::kStart ||
+         op->opcode() == IrOpcode::kControlEffect ||
+         op->opcode() == IrOpcode::kValueEffect ||
+         (op->opcode() != IrOpcode::kFinish && GetEffectInputCount(op) > 0);
+}
+
+inline bool OperatorProperties::HasControlOutput(const Operator* op) {
+  IrOpcode::Value opcode = static_cast<IrOpcode::Value>(op->opcode());
+  return (opcode != IrOpcode::kEnd && IrOpcode::IsControlOpcode(opcode));
+}
+
+
+inline int OperatorProperties::GetValueOutputCount(const Operator* op) {
+  return op->OutputCount();
+}
+
+inline int OperatorProperties::GetEffectOutputCount(const Operator* op) {
+  return HasEffectOutput(op) ? 1 : 0;
+}
+
+inline int OperatorProperties::GetControlOutputCount(const Operator* node) {
+  return node->opcode() == IrOpcode::kBranch ? 2 : HasControlOutput(node) ? 1
+                                                                          : 0;
+}
+
+
+inline bool OperatorProperties::IsBasicBlockBegin(const Operator* op) {
+  uint8_t opcode = op->opcode();
+  return opcode == IrOpcode::kStart || opcode == IrOpcode::kEnd ||
+         opcode == IrOpcode::kDead || opcode == IrOpcode::kLoop ||
+         opcode == IrOpcode::kMerge || opcode == IrOpcode::kIfTrue ||
+         opcode == IrOpcode::kIfFalse;
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_OPERATOR_PROPERTIES_INL_H_
diff --git a/src/compiler/operator-properties.h b/src/compiler/operator-properties.h
new file mode 100644
index 0000000..718eea0
--- /dev/null
+++ b/src/compiler/operator-properties.h
@@ -0,0 +1,44 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_OPERATOR_PROPERTIES_H_
+#define V8_COMPILER_OPERATOR_PROPERTIES_H_
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class Operator;
+
+class OperatorProperties {
+ public:
+  static inline bool HasValueInput(const Operator* op);
+  static inline bool HasContextInput(const Operator* op);
+  static inline bool HasEffectInput(const Operator* op);
+  static inline bool HasControlInput(const Operator* op);
+  static inline bool HasFrameStateInput(const Operator* op);
+
+  static inline int GetValueInputCount(const Operator* op);
+  static inline int GetContextInputCount(const Operator* op);
+  static inline int GetEffectInputCount(const Operator* op);
+  static inline int GetControlInputCount(const Operator* op);
+  static inline int GetFrameStateInputCount(const Operator* op);
+  static inline int GetTotalInputCount(const Operator* op);
+
+  static inline bool HasValueOutput(const Operator* op);
+  static inline bool HasEffectOutput(const Operator* op);
+  static inline bool HasControlOutput(const Operator* op);
+
+  static inline int GetValueOutputCount(const Operator* op);
+  static inline int GetEffectOutputCount(const Operator* op);
+  static inline int GetControlOutputCount(const Operator* op);
+
+  static inline bool IsBasicBlockBegin(const Operator* op);
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_OPERATOR_PROPERTIES_H_
diff --git a/src/compiler/operator.cc b/src/compiler/operator.cc
new file mode 100644
index 0000000..35f9c88
--- /dev/null
+++ b/src/compiler/operator.cc
@@ -0,0 +1,26 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+Operator::~Operator() {}
+
+
+SimpleOperator::SimpleOperator(Opcode opcode, Properties properties,
+                               int input_count, int output_count,
+                               const char* mnemonic)
+    : Operator(opcode, properties, mnemonic),
+      input_count_(input_count),
+      output_count_(output_count) {}
+
+
+SimpleOperator::~SimpleOperator() {}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/operator.h b/src/compiler/operator.h
new file mode 100644
index 0000000..5137806
--- /dev/null
+++ b/src/compiler/operator.h
@@ -0,0 +1,262 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_OPERATOR_H_
+#define V8_COMPILER_OPERATOR_H_
+
+#include "src/base/flags.h"
+#include "src/ostreams.h"
+#include "src/unique.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// An operator represents description of the "computation" of a node in the
+// compiler IR. A computation takes values (i.e. data) as input and produces
+// zero or more values as output. The side-effects of a computation must be
+// captured by additional control and data dependencies which are part of the
+// IR graph.
+// Operators are immutable and describe the statically-known parts of a
+// computation. Thus they can be safely shared by many different nodes in the
+// IR graph, or even globally between graphs. Operators can have "static
+// parameters" which are compile-time constant parameters to the operator, such
+// as the name for a named field access, the ID of a runtime function, etc.
+// Static parameters are private to the operator and only semantically
+// meaningful to the operator itself.
+class Operator : public ZoneObject {
+ public:
+  typedef uint8_t Opcode;
+
+  // Properties inform the operator-independent optimizer about legal
+  // transformations for nodes that have this operator.
+  enum Property {
+    kNoProperties = 0,
+    kReducible = 1 << 0,    // Participates in strength reduction.
+    kCommutative = 1 << 1,  // OP(a, b) == OP(b, a) for all inputs.
+    kAssociative = 1 << 2,  // OP(a, OP(b,c)) == OP(OP(a,b), c) for all inputs.
+    kIdempotent = 1 << 3,   // OP(a); OP(a) == OP(a).
+    kNoRead = 1 << 4,       // Has no scheduling dependency on Effects
+    kNoWrite = 1 << 5,      // Does not modify any Effects and thereby
+                            // create new scheduling dependencies.
+    kNoThrow = 1 << 6,      // Can never generate an exception.
+    kFoldable = kNoRead | kNoWrite,
+    kEliminatable = kNoWrite | kNoThrow,
+    kPure = kNoRead | kNoWrite | kNoThrow | kIdempotent
+  };
+  typedef base::Flags<Property, uint8_t> Properties;
+
+  Operator(Opcode opcode, Properties properties, const char* mnemonic)
+      : opcode_(opcode), properties_(properties), mnemonic_(mnemonic) {}
+  virtual ~Operator();
+
+  // A small integer unique to all instances of a particular kind of operator,
+  // useful for quick matching for specific kinds of operators. For fast access
+  // the opcode is stored directly in the operator object.
+  Opcode opcode() const { return opcode_; }
+
+  // Returns a constant string representing the mnemonic of the operator,
+  // without the static parameters. Useful for debugging.
+  const char* mnemonic() const { return mnemonic_; }
+
+  // Check if this operator equals another operator. Equivalent operators can
+  // be merged, and nodes with equivalent operators and equivalent inputs
+  // can be merged.
+  virtual bool Equals(const Operator* other) const = 0;
+
+  // Compute a hashcode to speed up equivalence-set checking.
+  // Equal operators should always have equal hashcodes, and unequal operators
+  // should have unequal hashcodes with high probability.
+  virtual int HashCode() const = 0;
+
+  // Check whether this operator has the given property.
+  bool HasProperty(Property property) const {
+    return (properties() & property) == property;
+  }
+
+  // Number of data inputs to the operator, for verifying graph structure.
+  virtual int InputCount() const = 0;
+
+  // Number of data outputs from the operator, for verifying graph structure.
+  virtual int OutputCount() const = 0;
+
+  Properties properties() const { return properties_; }
+
+  // TODO(titzer): API for input and output types, for typechecking graph.
+ protected:
+  // Print the full operator into the given stream, including any
+  // static parameters. Useful for debugging and visualizing the IR.
+  virtual OStream& PrintTo(OStream& os) const = 0;  // NOLINT
+  friend OStream& operator<<(OStream& os, const Operator& op);
+
+ private:
+  Opcode opcode_;
+  Properties properties_;
+  const char* mnemonic_;
+
+  DISALLOW_COPY_AND_ASSIGN(Operator);
+};
+
+DEFINE_OPERATORS_FOR_FLAGS(Operator::Properties)
+
+OStream& operator<<(OStream& os, const Operator& op);
+
+// An implementation of Operator that has no static parameters. Such operators
+// have just a name, an opcode, and a fixed number of inputs and outputs.
+// They can represented by singletons and shared globally.
+class SimpleOperator : public Operator {
+ public:
+  SimpleOperator(Opcode opcode, Properties properties, int input_count,
+                 int output_count, const char* mnemonic);
+  ~SimpleOperator();
+
+  virtual bool Equals(const Operator* that) const FINAL {
+    return opcode() == that->opcode();
+  }
+  virtual int HashCode() const FINAL { return opcode(); }
+  virtual int InputCount() const FINAL { return input_count_; }
+  virtual int OutputCount() const FINAL { return output_count_; }
+
+ private:
+  virtual OStream& PrintTo(OStream& os) const FINAL {  // NOLINT
+    return os << mnemonic();
+  }
+
+  int input_count_;
+  int output_count_;
+
+  DISALLOW_COPY_AND_ASSIGN(SimpleOperator);
+};
+
+// Template specialization implements a kind of type class for dealing with the
+// static parameters of Operator1 automatically.
+template <typename T>
+struct StaticParameterTraits {
+  static OStream& PrintTo(OStream& os, T val) {  // NOLINT
+    return os << "??";
+  }
+  static int HashCode(T a) { return 0; }
+  static bool Equals(T a, T b) {
+    return false;  // Not every T has a ==. By default, be conservative.
+  }
+};
+
+// Specialization for static parameters of type {int}.
+template <>
+struct StaticParameterTraits<int> {
+  static OStream& PrintTo(OStream& os, int val) {  // NOLINT
+    return os << val;
+  }
+  static int HashCode(int a) { return a; }
+  static bool Equals(int a, int b) { return a == b; }
+};
+
+// Specialization for static parameters of type {double}.
+template <>
+struct StaticParameterTraits<double> {
+  static OStream& PrintTo(OStream& os, double val) {  // NOLINT
+    return os << val;
+  }
+  static int HashCode(double a) {
+    return static_cast<int>(bit_cast<int64_t>(a));
+  }
+  static bool Equals(double a, double b) {
+    return bit_cast<int64_t>(a) == bit_cast<int64_t>(b);
+  }
+};
+
+// Specialization for static parameters of type {Unique<Object>}.
+template <>
+struct StaticParameterTraits<Unique<Object> > {
+  static OStream& PrintTo(OStream& os, Unique<Object> val) {  // NOLINT
+    return os << Brief(*val.handle());
+  }
+  static int HashCode(Unique<Object> a) {
+    return static_cast<int>(a.Hashcode());
+  }
+  static bool Equals(Unique<Object> a, Unique<Object> b) { return a == b; }
+};
+
+// Specialization for static parameters of type {Unique<Name>}.
+template <>
+struct StaticParameterTraits<Unique<Name> > {
+  static OStream& PrintTo(OStream& os, Unique<Name> val) {  // NOLINT
+    return os << Brief(*val.handle());
+  }
+  static int HashCode(Unique<Name> a) { return static_cast<int>(a.Hashcode()); }
+  static bool Equals(Unique<Name> a, Unique<Name> b) { return a == b; }
+};
+
+#if DEBUG
+// Specialization for static parameters of type {Handle<Object>} to prevent any
+// direct usage of Handles in constants.
+template <>
+struct StaticParameterTraits<Handle<Object> > {
+  static OStream& PrintTo(OStream& os, Handle<Object> val) {  // NOLINT
+    UNREACHABLE();  // Should use Unique<Object> instead
+    return os;
+  }
+  static int HashCode(Handle<Object> a) {
+    UNREACHABLE();  // Should use Unique<Object> instead
+    return 0;
+  }
+  static bool Equals(Handle<Object> a, Handle<Object> b) {
+    UNREACHABLE();  // Should use Unique<Object> instead
+    return false;
+  }
+};
+#endif
+
+// A templatized implementation of Operator that has one static parameter of
+// type {T}. If a specialization of StaticParameterTraits<{T}> exists, then
+// operators of this kind can automatically be hashed, compared, and printed.
+template <typename T>
+class Operator1 : public Operator {
+ public:
+  Operator1(Opcode opcode, Properties properties, int input_count,
+            int output_count, const char* mnemonic, T parameter)
+      : Operator(opcode, properties, mnemonic),
+        input_count_(input_count),
+        output_count_(output_count),
+        parameter_(parameter) {}
+
+  const T& parameter() const { return parameter_; }
+
+  virtual bool Equals(const Operator* other) const OVERRIDE {
+    if (opcode() != other->opcode()) return false;
+    const Operator1<T>* that = static_cast<const Operator1<T>*>(other);
+    return StaticParameterTraits<T>::Equals(this->parameter_, that->parameter_);
+  }
+  virtual int HashCode() const OVERRIDE {
+    return opcode() + 33 * StaticParameterTraits<T>::HashCode(this->parameter_);
+  }
+  virtual int InputCount() const OVERRIDE { return input_count_; }
+  virtual int OutputCount() const OVERRIDE { return output_count_; }
+  virtual OStream& PrintParameter(OStream& os) const {  // NOLINT
+    return StaticParameterTraits<T>::PrintTo(os << "[", parameter_) << "]";
+  }
+
+ protected:
+  virtual OStream& PrintTo(OStream& os) const FINAL {  // NOLINT
+    return PrintParameter(os << mnemonic());
+  }
+
+ private:
+  int input_count_;
+  int output_count_;
+  T parameter_;
+};
+
+
+// Helper to extract parameters from Operator1<*> operator.
+template <typename T>
+static inline const T& OpParameter(const Operator* op) {
+  return reinterpret_cast<const Operator1<T>*>(op)->parameter();
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_OPERATOR_H_
diff --git a/src/compiler/phi-reducer.h b/src/compiler/phi-reducer.h
new file mode 100644
index 0000000..5870d04
--- /dev/null
+++ b/src/compiler/phi-reducer.h
@@ -0,0 +1,42 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_PHI_REDUCER_H_
+#define V8_COMPILER_PHI_REDUCER_H_
+
+#include "src/compiler/graph-reducer.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Replaces redundant phis if all the inputs are the same or the phi itself.
+class PhiReducer FINAL : public Reducer {
+ public:
+  virtual Reduction Reduce(Node* node) OVERRIDE {
+    if (node->opcode() != IrOpcode::kPhi &&
+        node->opcode() != IrOpcode::kEffectPhi)
+      return NoChange();
+
+    int n = node->op()->InputCount();
+    if (n == 1) return Replace(node->InputAt(0));
+
+    Node* replacement = NULL;
+    Node::Inputs inputs = node->inputs();
+    for (InputIter it = inputs.begin(); n > 0; --n, ++it) {
+      Node* input = *it;
+      if (input != node && input != replacement) {
+        if (replacement != NULL) return NoChange();
+        replacement = input;
+      }
+    }
+    DCHECK_NE(node, replacement);
+    return Replace(replacement);
+  }
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_PHI_REDUCER_H_
diff --git a/src/compiler/pipeline.cc b/src/compiler/pipeline.cc
new file mode 100644
index 0000000..9889b6a
--- /dev/null
+++ b/src/compiler/pipeline.cc
@@ -0,0 +1,422 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/pipeline.h"
+
+#include "src/base/platform/elapsed-timer.h"
+#include "src/compiler/ast-graph-builder.h"
+#include "src/compiler/change-lowering.h"
+#include "src/compiler/code-generator.h"
+#include "src/compiler/graph-replay.h"
+#include "src/compiler/graph-visualizer.h"
+#include "src/compiler/instruction.h"
+#include "src/compiler/instruction-selector.h"
+#include "src/compiler/js-context-specialization.h"
+#include "src/compiler/js-generic-lowering.h"
+#include "src/compiler/js-inlining.h"
+#include "src/compiler/js-typed-lowering.h"
+#include "src/compiler/machine-operator-reducer.h"
+#include "src/compiler/phi-reducer.h"
+#include "src/compiler/register-allocator.h"
+#include "src/compiler/schedule.h"
+#include "src/compiler/scheduler.h"
+#include "src/compiler/simplified-lowering.h"
+#include "src/compiler/simplified-operator-reducer.h"
+#include "src/compiler/typer.h"
+#include "src/compiler/value-numbering-reducer.h"
+#include "src/compiler/verifier.h"
+#include "src/hydrogen.h"
+#include "src/ostreams.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class PhaseStats {
+ public:
+  enum PhaseKind { CREATE_GRAPH, OPTIMIZATION, CODEGEN };
+
+  PhaseStats(CompilationInfo* info, PhaseKind kind, const char* name)
+      : info_(info),
+        kind_(kind),
+        name_(name),
+        size_(info->zone()->allocation_size()) {
+    if (FLAG_turbo_stats) {
+      timer_.Start();
+    }
+  }
+
+  ~PhaseStats() {
+    if (FLAG_turbo_stats) {
+      base::TimeDelta delta = timer_.Elapsed();
+      size_t bytes = info_->zone()->allocation_size() - size_;
+      HStatistics* stats = info_->isolate()->GetTStatistics();
+      stats->SaveTiming(name_, delta, static_cast<int>(bytes));
+
+      switch (kind_) {
+        case CREATE_GRAPH:
+          stats->IncrementCreateGraph(delta);
+          break;
+        case OPTIMIZATION:
+          stats->IncrementOptimizeGraph(delta);
+          break;
+        case CODEGEN:
+          stats->IncrementGenerateCode(delta);
+          break;
+      }
+    }
+  }
+
+ private:
+  CompilationInfo* info_;
+  PhaseKind kind_;
+  const char* name_;
+  size_t size_;
+  base::ElapsedTimer timer_;
+};
+
+
+static inline bool VerifyGraphs() {
+#ifdef DEBUG
+  return true;
+#else
+  return FLAG_turbo_verify;
+#endif
+}
+
+
+void Pipeline::VerifyAndPrintGraph(Graph* graph, const char* phase) {
+  if (FLAG_trace_turbo) {
+    char buffer[256];
+    Vector<char> filename(buffer, sizeof(buffer));
+    if (!info_->shared_info().is_null()) {
+      SmartArrayPointer<char> functionname =
+          info_->shared_info()->DebugName()->ToCString();
+      if (strlen(functionname.get()) > 0) {
+        SNPrintF(filename, "turbo-%s-%s.dot", functionname.get(), phase);
+      } else {
+        SNPrintF(filename, "turbo-%p-%s.dot", static_cast<void*>(info_), phase);
+      }
+    } else {
+      SNPrintF(filename, "turbo-none-%s.dot", phase);
+    }
+    std::replace(filename.start(), filename.start() + filename.length(), ' ',
+                 '_');
+    FILE* file = base::OS::FOpen(filename.start(), "w+");
+    OFStream of(file);
+    of << AsDOT(*graph);
+    fclose(file);
+
+    OFStream os(stdout);
+    os << "-- " << phase << " graph printed to file " << filename.start()
+       << "\n";
+  }
+  if (VerifyGraphs()) Verifier::Run(graph);
+}
+
+
+class AstGraphBuilderWithPositions : public AstGraphBuilder {
+ public:
+  explicit AstGraphBuilderWithPositions(CompilationInfo* info, JSGraph* jsgraph,
+                                        SourcePositionTable* source_positions)
+      : AstGraphBuilder(info, jsgraph), source_positions_(source_positions) {}
+
+  bool CreateGraph() {
+    SourcePositionTable::Scope pos(source_positions_,
+                                   SourcePosition::Unknown());
+    return AstGraphBuilder::CreateGraph();
+  }
+
+#define DEF_VISIT(type)                                               \
+  virtual void Visit##type(type* node) OVERRIDE {                  \
+    SourcePositionTable::Scope pos(source_positions_,                 \
+                                   SourcePosition(node->position())); \
+    AstGraphBuilder::Visit##type(node);                               \
+  }
+  AST_NODE_LIST(DEF_VISIT)
+#undef DEF_VISIT
+
+ private:
+  SourcePositionTable* source_positions_;
+};
+
+
+static void TraceSchedule(Schedule* schedule) {
+  if (!FLAG_trace_turbo) return;
+  OFStream os(stdout);
+  os << "-- Schedule --------------------------------------\n" << *schedule;
+}
+
+
+Handle<Code> Pipeline::GenerateCode() {
+  if (info()->function()->dont_optimize_reason() == kTryCatchStatement ||
+      info()->function()->dont_optimize_reason() == kTryFinallyStatement ||
+      // TODO(turbofan): Make ES6 for-of work and remove this bailout.
+      info()->function()->dont_optimize_reason() == kForOfStatement ||
+      // TODO(turbofan): Make super work and remove this bailout.
+      info()->function()->dont_optimize_reason() == kSuperReference ||
+      // TODO(turbofan): Make OSR work and remove this bailout.
+      info()->is_osr()) {
+    return Handle<Code>::null();
+  }
+
+  if (FLAG_turbo_stats) isolate()->GetTStatistics()->Initialize(info_);
+
+  if (FLAG_trace_turbo) {
+    OFStream os(stdout);
+    os << "---------------------------------------------------\n"
+       << "Begin compiling method "
+       << info()->function()->debug_name()->ToCString().get()
+       << " using Turbofan" << endl;
+  }
+
+  // Build the graph.
+  Graph graph(zone());
+  SourcePositionTable source_positions(&graph);
+  source_positions.AddDecorator();
+  // TODO(turbofan): there is no need to type anything during initial graph
+  // construction.  This is currently only needed for the node cache, which the
+  // typer could sweep over later.
+  Typer typer(zone());
+  MachineOperatorBuilder machine;
+  CommonOperatorBuilder common(zone());
+  JSOperatorBuilder javascript(zone());
+  JSGraph jsgraph(&graph, &common, &javascript, &typer, &machine);
+  Node* context_node;
+  {
+    PhaseStats graph_builder_stats(info(), PhaseStats::CREATE_GRAPH,
+                                   "graph builder");
+    AstGraphBuilderWithPositions graph_builder(info(), &jsgraph,
+                                               &source_positions);
+    graph_builder.CreateGraph();
+    context_node = graph_builder.GetFunctionContext();
+  }
+  {
+    PhaseStats phi_reducer_stats(info(), PhaseStats::CREATE_GRAPH,
+                                 "phi reduction");
+    PhiReducer phi_reducer;
+    GraphReducer graph_reducer(&graph);
+    graph_reducer.AddReducer(&phi_reducer);
+    graph_reducer.ReduceGraph();
+    // TODO(mstarzinger): Running reducer once ought to be enough for everyone.
+    graph_reducer.ReduceGraph();
+    graph_reducer.ReduceGraph();
+  }
+
+  VerifyAndPrintGraph(&graph, "Initial untyped");
+
+  if (info()->is_context_specializing()) {
+    SourcePositionTable::Scope pos(&source_positions,
+                                   SourcePosition::Unknown());
+    // Specialize the code to the context as aggressively as possible.
+    JSContextSpecializer spec(info(), &jsgraph, context_node);
+    spec.SpecializeToContext();
+    VerifyAndPrintGraph(&graph, "Context specialized");
+  }
+
+  if (info()->is_inlining_enabled()) {
+    SourcePositionTable::Scope pos(&source_positions,
+                                   SourcePosition::Unknown());
+    JSInliner inliner(info(), &jsgraph);
+    inliner.Inline();
+    VerifyAndPrintGraph(&graph, "Inlined");
+  }
+
+  // Print a replay of the initial graph.
+  if (FLAG_print_turbo_replay) {
+    GraphReplayPrinter::PrintReplay(&graph);
+  }
+
+  if (info()->is_typing_enabled()) {
+    {
+      // Type the graph.
+      PhaseStats typer_stats(info(), PhaseStats::CREATE_GRAPH, "typer");
+      typer.Run(&graph, info()->context());
+      VerifyAndPrintGraph(&graph, "Typed");
+    }
+    // All new nodes must be typed.
+    typer.DecorateGraph(&graph);
+    {
+      // Lower JSOperators where we can determine types.
+      PhaseStats lowering_stats(info(), PhaseStats::CREATE_GRAPH,
+                                "typed lowering");
+      SourcePositionTable::Scope pos(&source_positions,
+                                     SourcePosition::Unknown());
+      JSTypedLowering lowering(&jsgraph);
+      GraphReducer graph_reducer(&graph);
+      graph_reducer.AddReducer(&lowering);
+      graph_reducer.ReduceGraph();
+
+      VerifyAndPrintGraph(&graph, "Lowered typed");
+    }
+    {
+      // Lower simplified operators and insert changes.
+      PhaseStats lowering_stats(info(), PhaseStats::CREATE_GRAPH,
+                                "simplified lowering");
+      SourcePositionTable::Scope pos(&source_positions,
+                                     SourcePosition::Unknown());
+      SimplifiedLowering lowering(&jsgraph);
+      lowering.LowerAllNodes();
+
+      VerifyAndPrintGraph(&graph, "Lowered simplified");
+    }
+    {
+      // Lower changes that have been inserted before.
+      PhaseStats lowering_stats(info(), PhaseStats::OPTIMIZATION,
+                                "change lowering");
+      SourcePositionTable::Scope pos(&source_positions,
+                                     SourcePosition::Unknown());
+      Linkage linkage(info());
+      // TODO(turbofan): Value numbering disabled for now.
+      // ValueNumberingReducer vn_reducer(zone());
+      SimplifiedOperatorReducer simple_reducer(&jsgraph);
+      ChangeLowering lowering(&jsgraph, &linkage);
+      MachineOperatorReducer mach_reducer(&jsgraph);
+      GraphReducer graph_reducer(&graph);
+      // TODO(titzer): Figure out if we should run all reducers at once here.
+      // graph_reducer.AddReducer(&vn_reducer);
+      graph_reducer.AddReducer(&simple_reducer);
+      graph_reducer.AddReducer(&lowering);
+      graph_reducer.AddReducer(&mach_reducer);
+      graph_reducer.ReduceGraph();
+
+      VerifyAndPrintGraph(&graph, "Lowered changes");
+    }
+  }
+
+  Handle<Code> code = Handle<Code>::null();
+  if (SupportedTarget()) {
+    {
+      // Lower any remaining generic JSOperators.
+      PhaseStats lowering_stats(info(), PhaseStats::CREATE_GRAPH,
+                                "generic lowering");
+      SourcePositionTable::Scope pos(&source_positions,
+                                     SourcePosition::Unknown());
+      JSGenericLowering lowering(info(), &jsgraph);
+      GraphReducer graph_reducer(&graph);
+      graph_reducer.AddReducer(&lowering);
+      graph_reducer.ReduceGraph();
+
+      VerifyAndPrintGraph(&graph, "Lowered generic");
+    }
+
+    {
+      // Compute a schedule.
+      Schedule* schedule = ComputeSchedule(&graph);
+      // Generate optimized code.
+      PhaseStats codegen_stats(info(), PhaseStats::CODEGEN, "codegen");
+      Linkage linkage(info());
+      code = GenerateCode(&linkage, &graph, schedule, &source_positions);
+      info()->SetCode(code);
+    }
+
+    // Print optimized code.
+    v8::internal::CodeGenerator::PrintCode(code, info());
+  }
+
+  if (FLAG_trace_turbo) {
+    OFStream os(stdout);
+    os << "--------------------------------------------------\n"
+       << "Finished compiling method "
+       << info()->function()->debug_name()->ToCString().get()
+       << " using Turbofan" << endl;
+  }
+
+  return code;
+}
+
+
+Schedule* Pipeline::ComputeSchedule(Graph* graph) {
+  PhaseStats schedule_stats(info(), PhaseStats::CODEGEN, "scheduling");
+  Schedule* schedule = Scheduler::ComputeSchedule(graph);
+  TraceSchedule(schedule);
+  if (VerifyGraphs()) ScheduleVerifier::Run(schedule);
+  return schedule;
+}
+
+
+Handle<Code> Pipeline::GenerateCodeForMachineGraph(Linkage* linkage,
+                                                   Graph* graph,
+                                                   Schedule* schedule) {
+  CHECK(SupportedBackend());
+  if (schedule == NULL) {
+    VerifyAndPrintGraph(graph, "Machine");
+    schedule = ComputeSchedule(graph);
+  }
+  TraceSchedule(schedule);
+
+  SourcePositionTable source_positions(graph);
+  Handle<Code> code = GenerateCode(linkage, graph, schedule, &source_positions);
+#if ENABLE_DISASSEMBLER
+  if (!code.is_null() && FLAG_print_opt_code) {
+    CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
+    OFStream os(tracing_scope.file());
+    code->Disassemble("test code", os);
+  }
+#endif
+  return code;
+}
+
+
+Handle<Code> Pipeline::GenerateCode(Linkage* linkage, Graph* graph,
+                                    Schedule* schedule,
+                                    SourcePositionTable* source_positions) {
+  DCHECK_NOT_NULL(graph);
+  DCHECK_NOT_NULL(linkage);
+  DCHECK_NOT_NULL(schedule);
+  CHECK(SupportedBackend());
+
+  InstructionSequence sequence(linkage, graph, schedule);
+
+  // Select and schedule instructions covering the scheduled graph.
+  {
+    InstructionSelector selector(&sequence, source_positions);
+    selector.SelectInstructions();
+  }
+
+  if (FLAG_trace_turbo) {
+    OFStream os(stdout);
+    os << "----- Instruction sequence before register allocation -----\n"
+       << sequence;
+  }
+
+  // Allocate registers.
+  {
+    int node_count = graph->NodeCount();
+    if (node_count > UnallocatedOperand::kMaxVirtualRegisters) {
+      linkage->info()->AbortOptimization(kNotEnoughVirtualRegistersForValues);
+      return Handle<Code>::null();
+    }
+    RegisterAllocator allocator(&sequence);
+    if (!allocator.Allocate()) {
+      linkage->info()->AbortOptimization(kNotEnoughVirtualRegistersRegalloc);
+      return Handle<Code>::null();
+    }
+  }
+
+  if (FLAG_trace_turbo) {
+    OFStream os(stdout);
+    os << "----- Instruction sequence after register allocation -----\n"
+       << sequence;
+  }
+
+  // Generate native sequence.
+  CodeGenerator generator(&sequence);
+  return generator.GenerateCode();
+}
+
+
+void Pipeline::SetUp() {
+  InstructionOperand::SetUpCaches();
+}
+
+
+void Pipeline::TearDown() {
+  InstructionOperand::TearDownCaches();
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/pipeline.h b/src/compiler/pipeline.h
new file mode 100644
index 0000000..9f8241a
--- /dev/null
+++ b/src/compiler/pipeline.h
@@ -0,0 +1,59 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_PIPELINE_H_
+#define V8_COMPILER_PIPELINE_H_
+
+#include "src/v8.h"
+
+#include "src/compiler.h"
+
+// Note: TODO(turbofan) implies a performance improvement opportunity,
+//   and TODO(name) implies an incomplete implementation
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Clients of this interface shouldn't depend on lots of compiler internals.
+class Graph;
+class Schedule;
+class SourcePositionTable;
+class Linkage;
+
+class Pipeline {
+ public:
+  explicit Pipeline(CompilationInfo* info) : info_(info) {}
+
+  // Run the entire pipeline and generate a handle to a code object.
+  Handle<Code> GenerateCode();
+
+  // Run the pipeline on a machine graph and generate code. If {schedule}
+  // is {NULL}, then compute a new schedule for code generation.
+  Handle<Code> GenerateCodeForMachineGraph(Linkage* linkage, Graph* graph,
+                                           Schedule* schedule = NULL);
+
+  static inline bool SupportedBackend() { return V8_TURBOFAN_BACKEND != 0; }
+  static inline bool SupportedTarget() { return V8_TURBOFAN_TARGET != 0; }
+
+  static void SetUp();
+  static void TearDown();
+
+ private:
+  CompilationInfo* info_;
+
+  CompilationInfo* info() const { return info_; }
+  Isolate* isolate() { return info_->isolate(); }
+  Zone* zone() { return info_->zone(); }
+
+  Schedule* ComputeSchedule(Graph* graph);
+  void VerifyAndPrintGraph(Graph* graph, const char* phase);
+  Handle<Code> GenerateCode(Linkage* linkage, Graph* graph, Schedule* schedule,
+                            SourcePositionTable* source_positions);
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_PIPELINE_H_
diff --git a/src/compiler/raw-machine-assembler.cc b/src/compiler/raw-machine-assembler.cc
new file mode 100644
index 0000000..7f45eb9
--- /dev/null
+++ b/src/compiler/raw-machine-assembler.cc
@@ -0,0 +1,165 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/code-factory.h"
+#include "src/compiler/pipeline.h"
+#include "src/compiler/raw-machine-assembler.h"
+#include "src/compiler/scheduler.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+RawMachineAssembler::RawMachineAssembler(Graph* graph,
+                                         MachineSignature* machine_sig,
+                                         MachineType word)
+    : GraphBuilder(graph),
+      schedule_(new (zone()) Schedule(zone())),
+      machine_(word),
+      common_(zone()),
+      machine_sig_(machine_sig),
+      call_descriptor_(
+          Linkage::GetSimplifiedCDescriptor(graph->zone(), machine_sig)),
+      parameters_(NULL),
+      exit_label_(schedule()->end()),
+      current_block_(schedule()->start()) {
+  int param_count = static_cast<int>(parameter_count());
+  Node* s = graph->NewNode(common_.Start(param_count));
+  graph->SetStart(s);
+  if (parameter_count() == 0) return;
+  parameters_ = zone()->NewArray<Node*>(param_count);
+  for (size_t i = 0; i < parameter_count(); ++i) {
+    parameters_[i] =
+        NewNode(common()->Parameter(static_cast<int>(i)), graph->start());
+  }
+}
+
+
+Schedule* RawMachineAssembler::Export() {
+  // Compute the correct codegen order.
+  DCHECK(schedule_->rpo_order()->empty());
+  Scheduler::ComputeSpecialRPO(schedule_);
+  // Invalidate MachineAssembler.
+  Schedule* schedule = schedule_;
+  schedule_ = NULL;
+  return schedule;
+}
+
+
+Node* RawMachineAssembler::Parameter(size_t index) {
+  DCHECK(index < parameter_count());
+  return parameters_[index];
+}
+
+
+RawMachineAssembler::Label* RawMachineAssembler::Exit() {
+  exit_label_.used_ = true;
+  return &exit_label_;
+}
+
+
+void RawMachineAssembler::Goto(Label* label) {
+  DCHECK(current_block_ != schedule()->end());
+  schedule()->AddGoto(CurrentBlock(), Use(label));
+  current_block_ = NULL;
+}
+
+
+void RawMachineAssembler::Branch(Node* condition, Label* true_val,
+                                 Label* false_val) {
+  DCHECK(current_block_ != schedule()->end());
+  Node* branch = NewNode(common()->Branch(), condition);
+  schedule()->AddBranch(CurrentBlock(), branch, Use(true_val), Use(false_val));
+  current_block_ = NULL;
+}
+
+
+void RawMachineAssembler::Return(Node* value) {
+  schedule()->AddReturn(CurrentBlock(), value);
+  current_block_ = NULL;
+}
+
+
+Node* RawMachineAssembler::CallFunctionStub0(Node* function, Node* receiver,
+                                             Node* context, Node* frame_state,
+                                             CallFunctionFlags flags) {
+  Callable callable = CodeFactory::CallFunction(isolate(), 0, flags);
+  CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+      callable.descriptor(), 1, CallDescriptor::kNeedsFrameState, zone());
+  Node* stub_code = HeapConstant(callable.code());
+  Node* call = graph()->NewNode(common()->Call(desc), stub_code, function,
+                                receiver, context, frame_state);
+  schedule()->AddNode(CurrentBlock(), call);
+  return call;
+}
+
+
+Node* RawMachineAssembler::CallJS0(Node* function, Node* receiver,
+                                   Node* context, Node* frame_state) {
+  CallDescriptor* descriptor = Linkage::GetJSCallDescriptor(1, zone());
+  Node* call = graph()->NewNode(common()->Call(descriptor), function, receiver,
+                                context, frame_state);
+  schedule()->AddNode(CurrentBlock(), call);
+  return call;
+}
+
+
+Node* RawMachineAssembler::CallRuntime1(Runtime::FunctionId function,
+                                        Node* arg0, Node* context,
+                                        Node* frame_state) {
+  CallDescriptor* descriptor = Linkage::GetRuntimeCallDescriptor(
+      function, 1, Operator::kNoProperties, zone());
+
+  Node* centry = HeapConstant(CEntryStub(isolate(), 1).GetCode());
+  Node* ref = NewNode(
+      common()->ExternalConstant(ExternalReference(function, isolate())));
+  Node* arity = Int32Constant(1);
+
+  Node* call = graph()->NewNode(common()->Call(descriptor), centry, arg0, ref,
+                                arity, context, frame_state);
+  schedule()->AddNode(CurrentBlock(), call);
+  return call;
+}
+
+
+void RawMachineAssembler::Bind(Label* label) {
+  DCHECK(current_block_ == NULL);
+  DCHECK(!label->bound_);
+  label->bound_ = true;
+  current_block_ = EnsureBlock(label);
+}
+
+
+BasicBlock* RawMachineAssembler::Use(Label* label) {
+  label->used_ = true;
+  return EnsureBlock(label);
+}
+
+
+BasicBlock* RawMachineAssembler::EnsureBlock(Label* label) {
+  if (label->block_ == NULL) label->block_ = schedule()->NewBasicBlock();
+  return label->block_;
+}
+
+
+BasicBlock* RawMachineAssembler::CurrentBlock() {
+  DCHECK(current_block_);
+  return current_block_;
+}
+
+
+Node* RawMachineAssembler::MakeNode(const Operator* op, int input_count,
+                                    Node** inputs) {
+  DCHECK(ScheduleValid());
+  DCHECK(current_block_ != NULL);
+  Node* node = graph()->NewNode(op, input_count, inputs);
+  BasicBlock* block = op->opcode() == IrOpcode::kParameter ? schedule()->start()
+                                                           : CurrentBlock();
+  schedule()->AddNode(block, node);
+  return node;
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/raw-machine-assembler.h b/src/compiler/raw-machine-assembler.h
new file mode 100644
index 0000000..a4af55a
--- /dev/null
+++ b/src/compiler/raw-machine-assembler.h
@@ -0,0 +1,438 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_RAW_MACHINE_ASSEMBLER_H_
+#define V8_COMPILER_RAW_MACHINE_ASSEMBLER_H_
+
+#include "src/v8.h"
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph-builder.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/operator.h"
+
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class BasicBlock;
+class Schedule;
+
+
+class RawMachineAssembler : public GraphBuilder {
+ public:
+  class Label {
+   public:
+    Label() : block_(NULL), used_(false), bound_(false) {}
+    ~Label() { DCHECK(bound_ || !used_); }
+
+    BasicBlock* block() { return block_; }
+
+   private:
+    // Private constructor for exit label.
+    explicit Label(BasicBlock* block)
+        : block_(block), used_(false), bound_(false) {}
+
+    BasicBlock* block_;
+    bool used_;
+    bool bound_;
+    friend class RawMachineAssembler;
+    DISALLOW_COPY_AND_ASSIGN(Label);
+  };
+
+  RawMachineAssembler(Graph* graph, MachineSignature* machine_sig,
+                      MachineType word = kMachPtr);
+  virtual ~RawMachineAssembler() {}
+
+  Isolate* isolate() const { return zone()->isolate(); }
+  Zone* zone() const { return graph()->zone(); }
+  MachineOperatorBuilder* machine() { return &machine_; }
+  CommonOperatorBuilder* common() { return &common_; }
+  CallDescriptor* call_descriptor() const { return call_descriptor_; }
+  size_t parameter_count() const { return machine_sig_->parameter_count(); }
+  MachineSignature* machine_sig() const { return machine_sig_; }
+
+  Node* UndefinedConstant() {
+    Unique<Object> unique = Unique<Object>::CreateImmovable(
+        isolate()->factory()->undefined_value());
+    return NewNode(common()->HeapConstant(unique));
+  }
+
+  // Constants.
+  Node* PointerConstant(void* value) {
+    return IntPtrConstant(reinterpret_cast<intptr_t>(value));
+  }
+  Node* IntPtrConstant(intptr_t value) {
+    // TODO(dcarney): mark generated code as unserializable if value != 0.
+    return kPointerSize == 8 ? Int64Constant(value)
+                             : Int32Constant(static_cast<int>(value));
+  }
+  Node* Int32Constant(int32_t value) {
+    return NewNode(common()->Int32Constant(value));
+  }
+  Node* Int64Constant(int64_t value) {
+    return NewNode(common()->Int64Constant(value));
+  }
+  Node* NumberConstant(double value) {
+    return NewNode(common()->NumberConstant(value));
+  }
+  Node* Float64Constant(double value) {
+    return NewNode(common()->Float64Constant(value));
+  }
+  Node* HeapConstant(Handle<Object> object) {
+    Unique<Object> val = Unique<Object>::CreateUninitialized(object);
+    return NewNode(common()->HeapConstant(val));
+  }
+
+  Node* Projection(int index, Node* a) {
+    return NewNode(common()->Projection(index), a);
+  }
+
+  // Memory Operations.
+  Node* Load(MachineType rep, Node* base) {
+    return Load(rep, base, Int32Constant(0));
+  }
+  Node* Load(MachineType rep, Node* base, Node* index) {
+    return NewNode(machine()->Load(rep), base, index);
+  }
+  void Store(MachineType rep, Node* base, Node* value) {
+    Store(rep, base, Int32Constant(0), value);
+  }
+  void Store(MachineType rep, Node* base, Node* index, Node* value) {
+    NewNode(machine()->Store(StoreRepresentation(rep, kNoWriteBarrier)), base,
+            index, value);
+  }
+  // Arithmetic Operations.
+  Node* WordAnd(Node* a, Node* b) {
+    return NewNode(machine()->WordAnd(), a, b);
+  }
+  Node* WordOr(Node* a, Node* b) { return NewNode(machine()->WordOr(), a, b); }
+  Node* WordXor(Node* a, Node* b) {
+    return NewNode(machine()->WordXor(), a, b);
+  }
+  Node* WordShl(Node* a, Node* b) {
+    return NewNode(machine()->WordShl(), a, b);
+  }
+  Node* WordShr(Node* a, Node* b) {
+    return NewNode(machine()->WordShr(), a, b);
+  }
+  Node* WordSar(Node* a, Node* b) {
+    return NewNode(machine()->WordSar(), a, b);
+  }
+  Node* WordRor(Node* a, Node* b) {
+    return NewNode(machine()->WordRor(), a, b);
+  }
+  Node* WordEqual(Node* a, Node* b) {
+    return NewNode(machine()->WordEqual(), a, b);
+  }
+  Node* WordNotEqual(Node* a, Node* b) {
+    return WordBinaryNot(WordEqual(a, b));
+  }
+  Node* WordNot(Node* a) {
+    if (machine()->Is32()) {
+      return Word32Not(a);
+    } else {
+      return Word64Not(a);
+    }
+  }
+  Node* WordBinaryNot(Node* a) {
+    if (machine()->Is32()) {
+      return Word32BinaryNot(a);
+    } else {
+      return Word64BinaryNot(a);
+    }
+  }
+
+  Node* Word32And(Node* a, Node* b) {
+    return NewNode(machine()->Word32And(), a, b);
+  }
+  Node* Word32Or(Node* a, Node* b) {
+    return NewNode(machine()->Word32Or(), a, b);
+  }
+  Node* Word32Xor(Node* a, Node* b) {
+    return NewNode(machine()->Word32Xor(), a, b);
+  }
+  Node* Word32Shl(Node* a, Node* b) {
+    return NewNode(machine()->Word32Shl(), a, b);
+  }
+  Node* Word32Shr(Node* a, Node* b) {
+    return NewNode(machine()->Word32Shr(), a, b);
+  }
+  Node* Word32Sar(Node* a, Node* b) {
+    return NewNode(machine()->Word32Sar(), a, b);
+  }
+  Node* Word32Ror(Node* a, Node* b) {
+    return NewNode(machine()->Word32Ror(), a, b);
+  }
+  Node* Word32Equal(Node* a, Node* b) {
+    return NewNode(machine()->Word32Equal(), a, b);
+  }
+  Node* Word32NotEqual(Node* a, Node* b) {
+    return Word32BinaryNot(Word32Equal(a, b));
+  }
+  Node* Word32Not(Node* a) { return Word32Xor(a, Int32Constant(-1)); }
+  Node* Word32BinaryNot(Node* a) { return Word32Equal(a, Int32Constant(0)); }
+
+  Node* Word64And(Node* a, Node* b) {
+    return NewNode(machine()->Word64And(), a, b);
+  }
+  Node* Word64Or(Node* a, Node* b) {
+    return NewNode(machine()->Word64Or(), a, b);
+  }
+  Node* Word64Xor(Node* a, Node* b) {
+    return NewNode(machine()->Word64Xor(), a, b);
+  }
+  Node* Word64Shl(Node* a, Node* b) {
+    return NewNode(machine()->Word64Shl(), a, b);
+  }
+  Node* Word64Shr(Node* a, Node* b) {
+    return NewNode(machine()->Word64Shr(), a, b);
+  }
+  Node* Word64Sar(Node* a, Node* b) {
+    return NewNode(machine()->Word64Sar(), a, b);
+  }
+  Node* Word64Ror(Node* a, Node* b) {
+    return NewNode(machine()->Word64Ror(), a, b);
+  }
+  Node* Word64Equal(Node* a, Node* b) {
+    return NewNode(machine()->Word64Equal(), a, b);
+  }
+  Node* Word64NotEqual(Node* a, Node* b) {
+    return Word64BinaryNot(Word64Equal(a, b));
+  }
+  Node* Word64Not(Node* a) { return Word64Xor(a, Int64Constant(-1)); }
+  Node* Word64BinaryNot(Node* a) { return Word64Equal(a, Int64Constant(0)); }
+
+  Node* Int32Add(Node* a, Node* b) {
+    return NewNode(machine()->Int32Add(), a, b);
+  }
+  Node* Int32AddWithOverflow(Node* a, Node* b) {
+    return NewNode(machine()->Int32AddWithOverflow(), a, b);
+  }
+  Node* Int32Sub(Node* a, Node* b) {
+    return NewNode(machine()->Int32Sub(), a, b);
+  }
+  Node* Int32SubWithOverflow(Node* a, Node* b) {
+    return NewNode(machine()->Int32SubWithOverflow(), a, b);
+  }
+  Node* Int32Mul(Node* a, Node* b) {
+    return NewNode(machine()->Int32Mul(), a, b);
+  }
+  Node* Int32Div(Node* a, Node* b) {
+    return NewNode(machine()->Int32Div(), a, b);
+  }
+  Node* Int32UDiv(Node* a, Node* b) {
+    return NewNode(machine()->Int32UDiv(), a, b);
+  }
+  Node* Int32Mod(Node* a, Node* b) {
+    return NewNode(machine()->Int32Mod(), a, b);
+  }
+  Node* Int32UMod(Node* a, Node* b) {
+    return NewNode(machine()->Int32UMod(), a, b);
+  }
+  Node* Int32LessThan(Node* a, Node* b) {
+    return NewNode(machine()->Int32LessThan(), a, b);
+  }
+  Node* Int32LessThanOrEqual(Node* a, Node* b) {
+    return NewNode(machine()->Int32LessThanOrEqual(), a, b);
+  }
+  Node* Uint32LessThan(Node* a, Node* b) {
+    return NewNode(machine()->Uint32LessThan(), a, b);
+  }
+  Node* Uint32LessThanOrEqual(Node* a, Node* b) {
+    return NewNode(machine()->Uint32LessThanOrEqual(), a, b);
+  }
+  Node* Int32GreaterThan(Node* a, Node* b) { return Int32LessThan(b, a); }
+  Node* Int32GreaterThanOrEqual(Node* a, Node* b) {
+    return Int32LessThanOrEqual(b, a);
+  }
+  Node* Int32Neg(Node* a) { return Int32Sub(Int32Constant(0), a); }
+
+  Node* Int64Add(Node* a, Node* b) {
+    return NewNode(machine()->Int64Add(), a, b);
+  }
+  Node* Int64Sub(Node* a, Node* b) {
+    return NewNode(machine()->Int64Sub(), a, b);
+  }
+  Node* Int64Mul(Node* a, Node* b) {
+    return NewNode(machine()->Int64Mul(), a, b);
+  }
+  Node* Int64Div(Node* a, Node* b) {
+    return NewNode(machine()->Int64Div(), a, b);
+  }
+  Node* Int64UDiv(Node* a, Node* b) {
+    return NewNode(machine()->Int64UDiv(), a, b);
+  }
+  Node* Int64Mod(Node* a, Node* b) {
+    return NewNode(machine()->Int64Mod(), a, b);
+  }
+  Node* Int64UMod(Node* a, Node* b) {
+    return NewNode(machine()->Int64UMod(), a, b);
+  }
+  Node* Int64Neg(Node* a) { return Int64Sub(Int64Constant(0), a); }
+  Node* Int64LessThan(Node* a, Node* b) {
+    return NewNode(machine()->Int64LessThan(), a, b);
+  }
+  Node* Int64LessThanOrEqual(Node* a, Node* b) {
+    return NewNode(machine()->Int64LessThanOrEqual(), a, b);
+  }
+  Node* Int64GreaterThan(Node* a, Node* b) { return Int64LessThan(b, a); }
+  Node* Int64GreaterThanOrEqual(Node* a, Node* b) {
+    return Int64LessThanOrEqual(b, a);
+  }
+
+  // TODO(turbofan): What is this used for?
+  Node* ConvertIntPtrToInt32(Node* a) {
+    return kPointerSize == 8 ? NewNode(machine()->TruncateInt64ToInt32(), a)
+                             : a;
+  }
+  Node* ConvertInt32ToIntPtr(Node* a) {
+    return kPointerSize == 8 ? NewNode(machine()->ChangeInt32ToInt64(), a) : a;
+  }
+
+#define INTPTR_BINOP(prefix, name)                     \
+  Node* IntPtr##name(Node* a, Node* b) {               \
+    return kPointerSize == 8 ? prefix##64##name(a, b)  \
+                             : prefix##32##name(a, b); \
+  }
+
+  INTPTR_BINOP(Int, Add);
+  INTPTR_BINOP(Int, Sub);
+  INTPTR_BINOP(Int, LessThan);
+  INTPTR_BINOP(Int, LessThanOrEqual);
+  INTPTR_BINOP(Word, Equal);
+  INTPTR_BINOP(Word, NotEqual);
+  INTPTR_BINOP(Int, GreaterThanOrEqual);
+  INTPTR_BINOP(Int, GreaterThan);
+
+#undef INTPTR_BINOP
+
+  Node* Float64Add(Node* a, Node* b) {
+    return NewNode(machine()->Float64Add(), a, b);
+  }
+  Node* Float64Sub(Node* a, Node* b) {
+    return NewNode(machine()->Float64Sub(), a, b);
+  }
+  Node* Float64Mul(Node* a, Node* b) {
+    return NewNode(machine()->Float64Mul(), a, b);
+  }
+  Node* Float64Div(Node* a, Node* b) {
+    return NewNode(machine()->Float64Div(), a, b);
+  }
+  Node* Float64Mod(Node* a, Node* b) {
+    return NewNode(machine()->Float64Mod(), a, b);
+  }
+  Node* Float64Equal(Node* a, Node* b) {
+    return NewNode(machine()->Float64Equal(), a, b);
+  }
+  Node* Float64NotEqual(Node* a, Node* b) {
+    return WordBinaryNot(Float64Equal(a, b));
+  }
+  Node* Float64LessThan(Node* a, Node* b) {
+    return NewNode(machine()->Float64LessThan(), a, b);
+  }
+  Node* Float64LessThanOrEqual(Node* a, Node* b) {
+    return NewNode(machine()->Float64LessThanOrEqual(), a, b);
+  }
+  Node* Float64GreaterThan(Node* a, Node* b) { return Float64LessThan(b, a); }
+  Node* Float64GreaterThanOrEqual(Node* a, Node* b) {
+    return Float64LessThanOrEqual(b, a);
+  }
+
+  // Conversions.
+  Node* ChangeInt32ToFloat64(Node* a) {
+    return NewNode(machine()->ChangeInt32ToFloat64(), a);
+  }
+  Node* ChangeUint32ToFloat64(Node* a) {
+    return NewNode(machine()->ChangeUint32ToFloat64(), a);
+  }
+  Node* ChangeFloat64ToInt32(Node* a) {
+    return NewNode(machine()->ChangeFloat64ToInt32(), a);
+  }
+  Node* ChangeFloat64ToUint32(Node* a) {
+    return NewNode(machine()->ChangeFloat64ToUint32(), a);
+  }
+  Node* ChangeInt32ToInt64(Node* a) {
+    return NewNode(machine()->ChangeInt32ToInt64(), a);
+  }
+  Node* ChangeUint32ToUint64(Node* a) {
+    return NewNode(machine()->ChangeUint32ToUint64(), a);
+  }
+  Node* TruncateFloat64ToInt32(Node* a) {
+    return NewNode(machine()->TruncateFloat64ToInt32(), a);
+  }
+  Node* TruncateInt64ToInt32(Node* a) {
+    return NewNode(machine()->TruncateInt64ToInt32(), a);
+  }
+
+  // Parameters.
+  Node* Parameter(size_t index);
+
+  // Control flow.
+  Label* Exit();
+  void Goto(Label* label);
+  void Branch(Node* condition, Label* true_val, Label* false_val);
+  // Call through CallFunctionStub with lazy deopt and frame-state.
+  Node* CallFunctionStub0(Node* function, Node* receiver, Node* context,
+                          Node* frame_state, CallFunctionFlags flags);
+  // Call to a JS function with zero parameters.
+  Node* CallJS0(Node* function, Node* receiver, Node* context,
+                Node* frame_state);
+  // Call to a runtime function with zero parameters.
+  Node* CallRuntime1(Runtime::FunctionId function, Node* arg0, Node* context,
+                     Node* frame_state);
+  void Return(Node* value);
+  void Bind(Label* label);
+  void Deoptimize(Node* state);
+
+  // Variables.
+  Node* Phi(MachineType type, Node* n1, Node* n2) {
+    return NewNode(common()->Phi(type, 2), n1, n2);
+  }
+  Node* Phi(MachineType type, Node* n1, Node* n2, Node* n3) {
+    return NewNode(common()->Phi(type, 3), n1, n2, n3);
+  }
+  Node* Phi(MachineType type, Node* n1, Node* n2, Node* n3, Node* n4) {
+    return NewNode(common()->Phi(type, 4), n1, n2, n3, n4);
+  }
+
+  // MachineAssembler is invalid after export.
+  Schedule* Export();
+
+ protected:
+  virtual Node* MakeNode(const Operator* op, int input_count,
+                         Node** inputs) FINAL;
+
+  bool ScheduleValid() { return schedule_ != NULL; }
+
+  Schedule* schedule() {
+    DCHECK(ScheduleValid());
+    return schedule_;
+  }
+
+ private:
+  BasicBlock* Use(Label* label);
+  BasicBlock* EnsureBlock(Label* label);
+  BasicBlock* CurrentBlock();
+
+  Schedule* schedule_;
+  MachineOperatorBuilder machine_;
+  CommonOperatorBuilder common_;
+  MachineSignature* machine_sig_;
+  CallDescriptor* call_descriptor_;
+  Node** parameters_;
+  Label exit_label_;
+  BasicBlock* current_block_;
+
+  DISALLOW_COPY_AND_ASSIGN(RawMachineAssembler);
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_RAW_MACHINE_ASSEMBLER_H_
diff --git a/src/compiler/register-allocator.cc b/src/compiler/register-allocator.cc
new file mode 100644
index 0000000..972a904
--- /dev/null
+++ b/src/compiler/register-allocator.cc
@@ -0,0 +1,2232 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/register-allocator.h"
+
+#include "src/compiler/linkage.h"
+#include "src/hydrogen.h"
+#include "src/string-stream.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+static inline LifetimePosition Min(LifetimePosition a, LifetimePosition b) {
+  return a.Value() < b.Value() ? a : b;
+}
+
+
+static inline LifetimePosition Max(LifetimePosition a, LifetimePosition b) {
+  return a.Value() > b.Value() ? a : b;
+}
+
+
+UsePosition::UsePosition(LifetimePosition pos, InstructionOperand* operand,
+                         InstructionOperand* hint)
+    : operand_(operand),
+      hint_(hint),
+      pos_(pos),
+      next_(NULL),
+      requires_reg_(false),
+      register_beneficial_(true) {
+  if (operand_ != NULL && operand_->IsUnallocated()) {
+    const UnallocatedOperand* unalloc = UnallocatedOperand::cast(operand_);
+    requires_reg_ = unalloc->HasRegisterPolicy();
+    register_beneficial_ = !unalloc->HasAnyPolicy();
+  }
+  DCHECK(pos_.IsValid());
+}
+
+
+bool UsePosition::HasHint() const {
+  return hint_ != NULL && !hint_->IsUnallocated();
+}
+
+
+bool UsePosition::RequiresRegister() const { return requires_reg_; }
+
+
+bool UsePosition::RegisterIsBeneficial() const { return register_beneficial_; }
+
+
+void UseInterval::SplitAt(LifetimePosition pos, Zone* zone) {
+  DCHECK(Contains(pos) && pos.Value() != start().Value());
+  UseInterval* after = new (zone) UseInterval(pos, end_);
+  after->next_ = next_;
+  next_ = after;
+  end_ = pos;
+}
+
+
+#ifdef DEBUG
+
+
+void LiveRange::Verify() const {
+  UsePosition* cur = first_pos_;
+  while (cur != NULL) {
+    DCHECK(Start().Value() <= cur->pos().Value() &&
+           cur->pos().Value() <= End().Value());
+    cur = cur->next();
+  }
+}
+
+
+bool LiveRange::HasOverlap(UseInterval* target) const {
+  UseInterval* current_interval = first_interval_;
+  while (current_interval != NULL) {
+    // Intervals overlap if the start of one is contained in the other.
+    if (current_interval->Contains(target->start()) ||
+        target->Contains(current_interval->start())) {
+      return true;
+    }
+    current_interval = current_interval->next();
+  }
+  return false;
+}
+
+
+#endif
+
+
+LiveRange::LiveRange(int id, Zone* zone)
+    : id_(id),
+      spilled_(false),
+      is_phi_(false),
+      is_non_loop_phi_(false),
+      kind_(UNALLOCATED_REGISTERS),
+      assigned_register_(kInvalidAssignment),
+      last_interval_(NULL),
+      first_interval_(NULL),
+      first_pos_(NULL),
+      parent_(NULL),
+      next_(NULL),
+      current_interval_(NULL),
+      last_processed_use_(NULL),
+      current_hint_operand_(NULL),
+      spill_operand_(new (zone) InstructionOperand()),
+      spill_start_index_(kMaxInt) {}
+
+
+void LiveRange::set_assigned_register(int reg, Zone* zone) {
+  DCHECK(!HasRegisterAssigned() && !IsSpilled());
+  assigned_register_ = reg;
+  ConvertOperands(zone);
+}
+
+
+void LiveRange::MakeSpilled(Zone* zone) {
+  DCHECK(!IsSpilled());
+  DCHECK(TopLevel()->HasAllocatedSpillOperand());
+  spilled_ = true;
+  assigned_register_ = kInvalidAssignment;
+  ConvertOperands(zone);
+}
+
+
+bool LiveRange::HasAllocatedSpillOperand() const {
+  DCHECK(spill_operand_ != NULL);
+  return !spill_operand_->IsIgnored();
+}
+
+
+void LiveRange::SetSpillOperand(InstructionOperand* operand) {
+  DCHECK(!operand->IsUnallocated());
+  DCHECK(spill_operand_ != NULL);
+  DCHECK(spill_operand_->IsIgnored());
+  spill_operand_->ConvertTo(operand->kind(), operand->index());
+}
+
+
+UsePosition* LiveRange::NextUsePosition(LifetimePosition start) {
+  UsePosition* use_pos = last_processed_use_;
+  if (use_pos == NULL) use_pos = first_pos();
+  while (use_pos != NULL && use_pos->pos().Value() < start.Value()) {
+    use_pos = use_pos->next();
+  }
+  last_processed_use_ = use_pos;
+  return use_pos;
+}
+
+
+UsePosition* LiveRange::NextUsePositionRegisterIsBeneficial(
+    LifetimePosition start) {
+  UsePosition* pos = NextUsePosition(start);
+  while (pos != NULL && !pos->RegisterIsBeneficial()) {
+    pos = pos->next();
+  }
+  return pos;
+}
+
+
+UsePosition* LiveRange::PreviousUsePositionRegisterIsBeneficial(
+    LifetimePosition start) {
+  UsePosition* pos = first_pos();
+  UsePosition* prev = NULL;
+  while (pos != NULL && pos->pos().Value() < start.Value()) {
+    if (pos->RegisterIsBeneficial()) prev = pos;
+    pos = pos->next();
+  }
+  return prev;
+}
+
+
+UsePosition* LiveRange::NextRegisterPosition(LifetimePosition start) {
+  UsePosition* pos = NextUsePosition(start);
+  while (pos != NULL && !pos->RequiresRegister()) {
+    pos = pos->next();
+  }
+  return pos;
+}
+
+
+bool LiveRange::CanBeSpilled(LifetimePosition pos) {
+  // We cannot spill a live range that has a use requiring a register
+  // at the current or the immediate next position.
+  UsePosition* use_pos = NextRegisterPosition(pos);
+  if (use_pos == NULL) return true;
+  return use_pos->pos().Value() >
+         pos.NextInstruction().InstructionEnd().Value();
+}
+
+
+InstructionOperand* LiveRange::CreateAssignedOperand(Zone* zone) {
+  InstructionOperand* op = NULL;
+  if (HasRegisterAssigned()) {
+    DCHECK(!IsSpilled());
+    switch (Kind()) {
+      case GENERAL_REGISTERS:
+        op = RegisterOperand::Create(assigned_register(), zone);
+        break;
+      case DOUBLE_REGISTERS:
+        op = DoubleRegisterOperand::Create(assigned_register(), zone);
+        break;
+      default:
+        UNREACHABLE();
+    }
+  } else if (IsSpilled()) {
+    DCHECK(!HasRegisterAssigned());
+    op = TopLevel()->GetSpillOperand();
+    DCHECK(!op->IsUnallocated());
+  } else {
+    UnallocatedOperand* unalloc =
+        new (zone) UnallocatedOperand(UnallocatedOperand::NONE);
+    unalloc->set_virtual_register(id_);
+    op = unalloc;
+  }
+  return op;
+}
+
+
+UseInterval* LiveRange::FirstSearchIntervalForPosition(
+    LifetimePosition position) const {
+  if (current_interval_ == NULL) return first_interval_;
+  if (current_interval_->start().Value() > position.Value()) {
+    current_interval_ = NULL;
+    return first_interval_;
+  }
+  return current_interval_;
+}
+
+
+void LiveRange::AdvanceLastProcessedMarker(
+    UseInterval* to_start_of, LifetimePosition but_not_past) const {
+  if (to_start_of == NULL) return;
+  if (to_start_of->start().Value() > but_not_past.Value()) return;
+  LifetimePosition start = current_interval_ == NULL
+                               ? LifetimePosition::Invalid()
+                               : current_interval_->start();
+  if (to_start_of->start().Value() > start.Value()) {
+    current_interval_ = to_start_of;
+  }
+}
+
+
+void LiveRange::SplitAt(LifetimePosition position, LiveRange* result,
+                        Zone* zone) {
+  DCHECK(Start().Value() < position.Value());
+  DCHECK(result->IsEmpty());
+  // Find the last interval that ends before the position. If the
+  // position is contained in one of the intervals in the chain, we
+  // split that interval and use the first part.
+  UseInterval* current = FirstSearchIntervalForPosition(position);
+
+  // If the split position coincides with the beginning of a use interval
+  // we need to split use positons in a special way.
+  bool split_at_start = false;
+
+  if (current->start().Value() == position.Value()) {
+    // When splitting at start we need to locate the previous use interval.
+    current = first_interval_;
+  }
+
+  while (current != NULL) {
+    if (current->Contains(position)) {
+      current->SplitAt(position, zone);
+      break;
+    }
+    UseInterval* next = current->next();
+    if (next->start().Value() >= position.Value()) {
+      split_at_start = (next->start().Value() == position.Value());
+      break;
+    }
+    current = next;
+  }
+
+  // Partition original use intervals to the two live ranges.
+  UseInterval* before = current;
+  UseInterval* after = before->next();
+  result->last_interval_ =
+      (last_interval_ == before)
+          ? after            // Only interval in the range after split.
+          : last_interval_;  // Last interval of the original range.
+  result->first_interval_ = after;
+  last_interval_ = before;
+
+  // Find the last use position before the split and the first use
+  // position after it.
+  UsePosition* use_after = first_pos_;
+  UsePosition* use_before = NULL;
+  if (split_at_start) {
+    // The split position coincides with the beginning of a use interval (the
+    // end of a lifetime hole). Use at this position should be attributed to
+    // the split child because split child owns use interval covering it.
+    while (use_after != NULL && use_after->pos().Value() < position.Value()) {
+      use_before = use_after;
+      use_after = use_after->next();
+    }
+  } else {
+    while (use_after != NULL && use_after->pos().Value() <= position.Value()) {
+      use_before = use_after;
+      use_after = use_after->next();
+    }
+  }
+
+  // Partition original use positions to the two live ranges.
+  if (use_before != NULL) {
+    use_before->next_ = NULL;
+  } else {
+    first_pos_ = NULL;
+  }
+  result->first_pos_ = use_after;
+
+  // Discard cached iteration state. It might be pointing
+  // to the use that no longer belongs to this live range.
+  last_processed_use_ = NULL;
+  current_interval_ = NULL;
+
+  // Link the new live range in the chain before any of the other
+  // ranges linked from the range before the split.
+  result->parent_ = (parent_ == NULL) ? this : parent_;
+  result->kind_ = result->parent_->kind_;
+  result->next_ = next_;
+  next_ = result;
+
+#ifdef DEBUG
+  Verify();
+  result->Verify();
+#endif
+}
+
+
+// This implements an ordering on live ranges so that they are ordered by their
+// start positions.  This is needed for the correctness of the register
+// allocation algorithm.  If two live ranges start at the same offset then there
+// is a tie breaker based on where the value is first used.  This part of the
+// ordering is merely a heuristic.
+bool LiveRange::ShouldBeAllocatedBefore(const LiveRange* other) const {
+  LifetimePosition start = Start();
+  LifetimePosition other_start = other->Start();
+  if (start.Value() == other_start.Value()) {
+    UsePosition* pos = first_pos();
+    if (pos == NULL) return false;
+    UsePosition* other_pos = other->first_pos();
+    if (other_pos == NULL) return true;
+    return pos->pos().Value() < other_pos->pos().Value();
+  }
+  return start.Value() < other_start.Value();
+}
+
+
+void LiveRange::ShortenTo(LifetimePosition start) {
+  RegisterAllocator::TraceAlloc("Shorten live range %d to [%d\n", id_,
+                                start.Value());
+  DCHECK(first_interval_ != NULL);
+  DCHECK(first_interval_->start().Value() <= start.Value());
+  DCHECK(start.Value() < first_interval_->end().Value());
+  first_interval_->set_start(start);
+}
+
+
+void LiveRange::EnsureInterval(LifetimePosition start, LifetimePosition end,
+                               Zone* zone) {
+  RegisterAllocator::TraceAlloc("Ensure live range %d in interval [%d %d[\n",
+                                id_, start.Value(), end.Value());
+  LifetimePosition new_end = end;
+  while (first_interval_ != NULL &&
+         first_interval_->start().Value() <= end.Value()) {
+    if (first_interval_->end().Value() > end.Value()) {
+      new_end = first_interval_->end();
+    }
+    first_interval_ = first_interval_->next();
+  }
+
+  UseInterval* new_interval = new (zone) UseInterval(start, new_end);
+  new_interval->next_ = first_interval_;
+  first_interval_ = new_interval;
+  if (new_interval->next() == NULL) {
+    last_interval_ = new_interval;
+  }
+}
+
+
+void LiveRange::AddUseInterval(LifetimePosition start, LifetimePosition end,
+                               Zone* zone) {
+  RegisterAllocator::TraceAlloc("Add to live range %d interval [%d %d[\n", id_,
+                                start.Value(), end.Value());
+  if (first_interval_ == NULL) {
+    UseInterval* interval = new (zone) UseInterval(start, end);
+    first_interval_ = interval;
+    last_interval_ = interval;
+  } else {
+    if (end.Value() == first_interval_->start().Value()) {
+      first_interval_->set_start(start);
+    } else if (end.Value() < first_interval_->start().Value()) {
+      UseInterval* interval = new (zone) UseInterval(start, end);
+      interval->set_next(first_interval_);
+      first_interval_ = interval;
+    } else {
+      // Order of instruction's processing (see ProcessInstructions) guarantees
+      // that each new use interval either precedes or intersects with
+      // last added interval.
+      DCHECK(start.Value() < first_interval_->end().Value());
+      first_interval_->start_ = Min(start, first_interval_->start_);
+      first_interval_->end_ = Max(end, first_interval_->end_);
+    }
+  }
+}
+
+
+void LiveRange::AddUsePosition(LifetimePosition pos,
+                               InstructionOperand* operand,
+                               InstructionOperand* hint, Zone* zone) {
+  RegisterAllocator::TraceAlloc("Add to live range %d use position %d\n", id_,
+                                pos.Value());
+  UsePosition* use_pos = new (zone) UsePosition(pos, operand, hint);
+  UsePosition* prev_hint = NULL;
+  UsePosition* prev = NULL;
+  UsePosition* current = first_pos_;
+  while (current != NULL && current->pos().Value() < pos.Value()) {
+    prev_hint = current->HasHint() ? current : prev_hint;
+    prev = current;
+    current = current->next();
+  }
+
+  if (prev == NULL) {
+    use_pos->set_next(first_pos_);
+    first_pos_ = use_pos;
+  } else {
+    use_pos->next_ = prev->next_;
+    prev->next_ = use_pos;
+  }
+
+  if (prev_hint == NULL && use_pos->HasHint()) {
+    current_hint_operand_ = hint;
+  }
+}
+
+
+void LiveRange::ConvertOperands(Zone* zone) {
+  InstructionOperand* op = CreateAssignedOperand(zone);
+  UsePosition* use_pos = first_pos();
+  while (use_pos != NULL) {
+    DCHECK(Start().Value() <= use_pos->pos().Value() &&
+           use_pos->pos().Value() <= End().Value());
+
+    if (use_pos->HasOperand()) {
+      DCHECK(op->IsRegister() || op->IsDoubleRegister() ||
+             !use_pos->RequiresRegister());
+      use_pos->operand()->ConvertTo(op->kind(), op->index());
+    }
+    use_pos = use_pos->next();
+  }
+}
+
+
+bool LiveRange::CanCover(LifetimePosition position) const {
+  if (IsEmpty()) return false;
+  return Start().Value() <= position.Value() &&
+         position.Value() < End().Value();
+}
+
+
+bool LiveRange::Covers(LifetimePosition position) {
+  if (!CanCover(position)) return false;
+  UseInterval* start_search = FirstSearchIntervalForPosition(position);
+  for (UseInterval* interval = start_search; interval != NULL;
+       interval = interval->next()) {
+    DCHECK(interval->next() == NULL ||
+           interval->next()->start().Value() >= interval->start().Value());
+    AdvanceLastProcessedMarker(interval, position);
+    if (interval->Contains(position)) return true;
+    if (interval->start().Value() > position.Value()) return false;
+  }
+  return false;
+}
+
+
+LifetimePosition LiveRange::FirstIntersection(LiveRange* other) {
+  UseInterval* b = other->first_interval();
+  if (b == NULL) return LifetimePosition::Invalid();
+  LifetimePosition advance_last_processed_up_to = b->start();
+  UseInterval* a = FirstSearchIntervalForPosition(b->start());
+  while (a != NULL && b != NULL) {
+    if (a->start().Value() > other->End().Value()) break;
+    if (b->start().Value() > End().Value()) break;
+    LifetimePosition cur_intersection = a->Intersect(b);
+    if (cur_intersection.IsValid()) {
+      return cur_intersection;
+    }
+    if (a->start().Value() < b->start().Value()) {
+      a = a->next();
+      if (a == NULL || a->start().Value() > other->End().Value()) break;
+      AdvanceLastProcessedMarker(a, advance_last_processed_up_to);
+    } else {
+      b = b->next();
+    }
+  }
+  return LifetimePosition::Invalid();
+}
+
+
+RegisterAllocator::RegisterAllocator(InstructionSequence* code)
+    : zone_(code->isolate()),
+      code_(code),
+      live_in_sets_(code->BasicBlockCount(), zone()),
+      live_ranges_(code->VirtualRegisterCount() * 2, zone()),
+      fixed_live_ranges_(NULL),
+      fixed_double_live_ranges_(NULL),
+      unhandled_live_ranges_(code->VirtualRegisterCount() * 2, zone()),
+      active_live_ranges_(8, zone()),
+      inactive_live_ranges_(8, zone()),
+      reusable_slots_(8, zone()),
+      mode_(UNALLOCATED_REGISTERS),
+      num_registers_(-1),
+      allocation_ok_(true) {}
+
+
+void RegisterAllocator::InitializeLivenessAnalysis() {
+  // Initialize the live_in sets for each block to NULL.
+  int block_count = code()->BasicBlockCount();
+  live_in_sets_.Initialize(block_count, zone());
+  live_in_sets_.AddBlock(NULL, block_count, zone());
+}
+
+
+BitVector* RegisterAllocator::ComputeLiveOut(BasicBlock* block) {
+  // Compute live out for the given block, except not including backward
+  // successor edges.
+  BitVector* live_out =
+      new (zone()) BitVector(code()->VirtualRegisterCount(), zone());
+
+  // Process all successor blocks.
+  BasicBlock::Successors successors = block->successors();
+  for (BasicBlock::Successors::iterator i = successors.begin();
+       i != successors.end(); ++i) {
+    // Add values live on entry to the successor. Note the successor's
+    // live_in will not be computed yet for backwards edges.
+    BasicBlock* successor = *i;
+    BitVector* live_in = live_in_sets_[successor->rpo_number_];
+    if (live_in != NULL) live_out->Union(*live_in);
+
+    // All phi input operands corresponding to this successor edge are live
+    // out from this block.
+    int index = successor->PredecessorIndexOf(block);
+    DCHECK(index >= 0);
+    DCHECK(index < static_cast<int>(successor->PredecessorCount()));
+    for (BasicBlock::const_iterator j = successor->begin();
+         j != successor->end(); ++j) {
+      Node* phi = *j;
+      if (phi->opcode() != IrOpcode::kPhi) continue;
+      Node* input = phi->InputAt(index);
+      live_out->Add(input->id());
+    }
+  }
+
+  return live_out;
+}
+
+
+void RegisterAllocator::AddInitialIntervals(BasicBlock* block,
+                                            BitVector* live_out) {
+  // Add an interval that includes the entire block to the live range for
+  // each live_out value.
+  LifetimePosition start =
+      LifetimePosition::FromInstructionIndex(block->first_instruction_index());
+  LifetimePosition end = LifetimePosition::FromInstructionIndex(
+                             block->last_instruction_index()).NextInstruction();
+  BitVector::Iterator iterator(live_out);
+  while (!iterator.Done()) {
+    int operand_index = iterator.Current();
+    LiveRange* range = LiveRangeFor(operand_index);
+    range->AddUseInterval(start, end, zone());
+    iterator.Advance();
+  }
+}
+
+
+int RegisterAllocator::FixedDoubleLiveRangeID(int index) {
+  return -index - 1 - Register::kMaxNumAllocatableRegisters;
+}
+
+
+InstructionOperand* RegisterAllocator::AllocateFixed(
+    UnallocatedOperand* operand, int pos, bool is_tagged) {
+  TraceAlloc("Allocating fixed reg for op %d\n", operand->virtual_register());
+  DCHECK(operand->HasFixedPolicy());
+  if (operand->HasFixedSlotPolicy()) {
+    operand->ConvertTo(InstructionOperand::STACK_SLOT,
+                       operand->fixed_slot_index());
+  } else if (operand->HasFixedRegisterPolicy()) {
+    int reg_index = operand->fixed_register_index();
+    operand->ConvertTo(InstructionOperand::REGISTER, reg_index);
+  } else if (operand->HasFixedDoubleRegisterPolicy()) {
+    int reg_index = operand->fixed_register_index();
+    operand->ConvertTo(InstructionOperand::DOUBLE_REGISTER, reg_index);
+  } else {
+    UNREACHABLE();
+  }
+  if (is_tagged) {
+    TraceAlloc("Fixed reg is tagged at %d\n", pos);
+    Instruction* instr = InstructionAt(pos);
+    if (instr->HasPointerMap()) {
+      instr->pointer_map()->RecordPointer(operand, code_zone());
+    }
+  }
+  return operand;
+}
+
+
+LiveRange* RegisterAllocator::FixedLiveRangeFor(int index) {
+  DCHECK(index < Register::kMaxNumAllocatableRegisters);
+  LiveRange* result = fixed_live_ranges_[index];
+  if (result == NULL) {
+    // TODO(titzer): add a utility method to allocate a new LiveRange:
+    // The LiveRange object itself can go in this zone, but the
+    // InstructionOperand needs
+    // to go in the code zone, since it may survive register allocation.
+    result = new (zone()) LiveRange(FixedLiveRangeID(index), code_zone());
+    DCHECK(result->IsFixed());
+    result->kind_ = GENERAL_REGISTERS;
+    SetLiveRangeAssignedRegister(result, index);
+    fixed_live_ranges_[index] = result;
+  }
+  return result;
+}
+
+
+LiveRange* RegisterAllocator::FixedDoubleLiveRangeFor(int index) {
+  DCHECK(index < DoubleRegister::NumAllocatableRegisters());
+  LiveRange* result = fixed_double_live_ranges_[index];
+  if (result == NULL) {
+    result = new (zone()) LiveRange(FixedDoubleLiveRangeID(index), code_zone());
+    DCHECK(result->IsFixed());
+    result->kind_ = DOUBLE_REGISTERS;
+    SetLiveRangeAssignedRegister(result, index);
+    fixed_double_live_ranges_[index] = result;
+  }
+  return result;
+}
+
+
+LiveRange* RegisterAllocator::LiveRangeFor(int index) {
+  if (index >= live_ranges_.length()) {
+    live_ranges_.AddBlock(NULL, index - live_ranges_.length() + 1, zone());
+  }
+  LiveRange* result = live_ranges_[index];
+  if (result == NULL) {
+    result = new (zone()) LiveRange(index, code_zone());
+    live_ranges_[index] = result;
+  }
+  return result;
+}
+
+
+GapInstruction* RegisterAllocator::GetLastGap(BasicBlock* block) {
+  int last_instruction = block->last_instruction_index();
+  return code()->GapAt(last_instruction - 1);
+}
+
+
+LiveRange* RegisterAllocator::LiveRangeFor(InstructionOperand* operand) {
+  if (operand->IsUnallocated()) {
+    return LiveRangeFor(UnallocatedOperand::cast(operand)->virtual_register());
+  } else if (operand->IsRegister()) {
+    return FixedLiveRangeFor(operand->index());
+  } else if (operand->IsDoubleRegister()) {
+    return FixedDoubleLiveRangeFor(operand->index());
+  } else {
+    return NULL;
+  }
+}
+
+
+void RegisterAllocator::Define(LifetimePosition position,
+                               InstructionOperand* operand,
+                               InstructionOperand* hint) {
+  LiveRange* range = LiveRangeFor(operand);
+  if (range == NULL) return;
+
+  if (range->IsEmpty() || range->Start().Value() > position.Value()) {
+    // Can happen if there is a definition without use.
+    range->AddUseInterval(position, position.NextInstruction(), zone());
+    range->AddUsePosition(position.NextInstruction(), NULL, NULL, zone());
+  } else {
+    range->ShortenTo(position);
+  }
+
+  if (operand->IsUnallocated()) {
+    UnallocatedOperand* unalloc_operand = UnallocatedOperand::cast(operand);
+    range->AddUsePosition(position, unalloc_operand, hint, zone());
+  }
+}
+
+
+void RegisterAllocator::Use(LifetimePosition block_start,
+                            LifetimePosition position,
+                            InstructionOperand* operand,
+                            InstructionOperand* hint) {
+  LiveRange* range = LiveRangeFor(operand);
+  if (range == NULL) return;
+  if (operand->IsUnallocated()) {
+    UnallocatedOperand* unalloc_operand = UnallocatedOperand::cast(operand);
+    range->AddUsePosition(position, unalloc_operand, hint, zone());
+  }
+  range->AddUseInterval(block_start, position, zone());
+}
+
+
+void RegisterAllocator::AddConstraintsGapMove(int index,
+                                              InstructionOperand* from,
+                                              InstructionOperand* to) {
+  GapInstruction* gap = code()->GapAt(index);
+  ParallelMove* move =
+      gap->GetOrCreateParallelMove(GapInstruction::START, code_zone());
+  if (from->IsUnallocated()) {
+    const ZoneList<MoveOperands>* move_operands = move->move_operands();
+    for (int i = 0; i < move_operands->length(); ++i) {
+      MoveOperands cur = move_operands->at(i);
+      InstructionOperand* cur_to = cur.destination();
+      if (cur_to->IsUnallocated()) {
+        if (UnallocatedOperand::cast(cur_to)->virtual_register() ==
+            UnallocatedOperand::cast(from)->virtual_register()) {
+          move->AddMove(cur.source(), to, code_zone());
+          return;
+        }
+      }
+    }
+  }
+  move->AddMove(from, to, code_zone());
+}
+
+
+void RegisterAllocator::MeetRegisterConstraints(BasicBlock* block) {
+  int start = block->first_instruction_index();
+  int end = block->last_instruction_index();
+  DCHECK_NE(-1, start);
+  for (int i = start; i <= end; ++i) {
+    if (code()->IsGapAt(i)) {
+      Instruction* instr = NULL;
+      Instruction* prev_instr = NULL;
+      if (i < end) instr = InstructionAt(i + 1);
+      if (i > start) prev_instr = InstructionAt(i - 1);
+      MeetConstraintsBetween(prev_instr, instr, i);
+      if (!AllocationOk()) return;
+    }
+  }
+
+  // Meet register constraints for the instruction in the end.
+  if (!code()->IsGapAt(end)) {
+    MeetRegisterConstraintsForLastInstructionInBlock(block);
+  }
+}
+
+
+void RegisterAllocator::MeetRegisterConstraintsForLastInstructionInBlock(
+    BasicBlock* block) {
+  int end = block->last_instruction_index();
+  Instruction* last_instruction = InstructionAt(end);
+  for (size_t i = 0; i < last_instruction->OutputCount(); i++) {
+    InstructionOperand* output_operand = last_instruction->OutputAt(i);
+    DCHECK(!output_operand->IsConstant());
+    UnallocatedOperand* output = UnallocatedOperand::cast(output_operand);
+    int output_vreg = output->virtual_register();
+    LiveRange* range = LiveRangeFor(output_vreg);
+    bool assigned = false;
+    if (output->HasFixedPolicy()) {
+      AllocateFixed(output, -1, false);
+      // This value is produced on the stack, we never need to spill it.
+      if (output->IsStackSlot()) {
+        range->SetSpillOperand(output);
+        range->SetSpillStartIndex(end);
+        assigned = true;
+      }
+
+      BasicBlock::Successors successors = block->successors();
+      for (BasicBlock::Successors::iterator succ = successors.begin();
+           succ != successors.end(); ++succ) {
+        DCHECK((*succ)->PredecessorCount() == 1);
+        int gap_index = (*succ)->first_instruction_index() + 1;
+        DCHECK(code()->IsGapAt(gap_index));
+
+        // Create an unconstrained operand for the same virtual register
+        // and insert a gap move from the fixed output to the operand.
+        UnallocatedOperand* output_copy =
+            new (code_zone()) UnallocatedOperand(UnallocatedOperand::ANY);
+        output_copy->set_virtual_register(output_vreg);
+
+        code()->AddGapMove(gap_index, output, output_copy);
+      }
+    }
+
+    if (!assigned) {
+      BasicBlock::Successors successors = block->successors();
+      for (BasicBlock::Successors::iterator succ = successors.begin();
+           succ != successors.end(); ++succ) {
+        DCHECK((*succ)->PredecessorCount() == 1);
+        int gap_index = (*succ)->first_instruction_index() + 1;
+        range->SetSpillStartIndex(gap_index);
+
+        // This move to spill operand is not a real use. Liveness analysis
+        // and splitting of live ranges do not account for it.
+        // Thus it should be inserted to a lifetime position corresponding to
+        // the instruction end.
+        GapInstruction* gap = code()->GapAt(gap_index);
+        ParallelMove* move =
+            gap->GetOrCreateParallelMove(GapInstruction::BEFORE, code_zone());
+        move->AddMove(output, range->GetSpillOperand(), code_zone());
+      }
+    }
+  }
+}
+
+
+void RegisterAllocator::MeetConstraintsBetween(Instruction* first,
+                                               Instruction* second,
+                                               int gap_index) {
+  if (first != NULL) {
+    // Handle fixed temporaries.
+    for (size_t i = 0; i < first->TempCount(); i++) {
+      UnallocatedOperand* temp = UnallocatedOperand::cast(first->TempAt(i));
+      if (temp->HasFixedPolicy()) {
+        AllocateFixed(temp, gap_index - 1, false);
+      }
+    }
+
+    // Handle constant/fixed output operands.
+    for (size_t i = 0; i < first->OutputCount(); i++) {
+      InstructionOperand* output = first->OutputAt(i);
+      if (output->IsConstant()) {
+        int output_vreg = output->index();
+        LiveRange* range = LiveRangeFor(output_vreg);
+        range->SetSpillStartIndex(gap_index - 1);
+        range->SetSpillOperand(output);
+      } else {
+        UnallocatedOperand* first_output = UnallocatedOperand::cast(output);
+        LiveRange* range = LiveRangeFor(first_output->virtual_register());
+        bool assigned = false;
+        if (first_output->HasFixedPolicy()) {
+          UnallocatedOperand* output_copy =
+              first_output->CopyUnconstrained(code_zone());
+          bool is_tagged = HasTaggedValue(first_output->virtual_register());
+          AllocateFixed(first_output, gap_index, is_tagged);
+
+          // This value is produced on the stack, we never need to spill it.
+          if (first_output->IsStackSlot()) {
+            range->SetSpillOperand(first_output);
+            range->SetSpillStartIndex(gap_index - 1);
+            assigned = true;
+          }
+          code()->AddGapMove(gap_index, first_output, output_copy);
+        }
+
+        // Make sure we add a gap move for spilling (if we have not done
+        // so already).
+        if (!assigned) {
+          range->SetSpillStartIndex(gap_index);
+
+          // This move to spill operand is not a real use. Liveness analysis
+          // and splitting of live ranges do not account for it.
+          // Thus it should be inserted to a lifetime position corresponding to
+          // the instruction end.
+          GapInstruction* gap = code()->GapAt(gap_index);
+          ParallelMove* move =
+              gap->GetOrCreateParallelMove(GapInstruction::BEFORE, code_zone());
+          move->AddMove(first_output, range->GetSpillOperand(), code_zone());
+        }
+      }
+    }
+  }
+
+  if (second != NULL) {
+    // Handle fixed input operands of second instruction.
+    for (size_t i = 0; i < second->InputCount(); i++) {
+      InstructionOperand* input = second->InputAt(i);
+      if (input->IsImmediate()) continue;  // Ignore immediates.
+      UnallocatedOperand* cur_input = UnallocatedOperand::cast(input);
+      if (cur_input->HasFixedPolicy()) {
+        UnallocatedOperand* input_copy =
+            cur_input->CopyUnconstrained(code_zone());
+        bool is_tagged = HasTaggedValue(cur_input->virtual_register());
+        AllocateFixed(cur_input, gap_index + 1, is_tagged);
+        AddConstraintsGapMove(gap_index, input_copy, cur_input);
+      }
+    }
+
+    // Handle "output same as input" for second instruction.
+    for (size_t i = 0; i < second->OutputCount(); i++) {
+      InstructionOperand* output = second->OutputAt(i);
+      if (!output->IsUnallocated()) continue;
+      UnallocatedOperand* second_output = UnallocatedOperand::cast(output);
+      if (second_output->HasSameAsInputPolicy()) {
+        DCHECK(i == 0);  // Only valid for first output.
+        UnallocatedOperand* cur_input =
+            UnallocatedOperand::cast(second->InputAt(0));
+        int output_vreg = second_output->virtual_register();
+        int input_vreg = cur_input->virtual_register();
+
+        UnallocatedOperand* input_copy =
+            cur_input->CopyUnconstrained(code_zone());
+        cur_input->set_virtual_register(second_output->virtual_register());
+        AddConstraintsGapMove(gap_index, input_copy, cur_input);
+
+        if (HasTaggedValue(input_vreg) && !HasTaggedValue(output_vreg)) {
+          int index = gap_index + 1;
+          Instruction* instr = InstructionAt(index);
+          if (instr->HasPointerMap()) {
+            instr->pointer_map()->RecordPointer(input_copy, code_zone());
+          }
+        } else if (!HasTaggedValue(input_vreg) && HasTaggedValue(output_vreg)) {
+          // The input is assumed to immediately have a tagged representation,
+          // before the pointer map can be used. I.e. the pointer map at the
+          // instruction will include the output operand (whose value at the
+          // beginning of the instruction is equal to the input operand). If
+          // this is not desired, then the pointer map at this instruction needs
+          // to be adjusted manually.
+        }
+      }
+    }
+  }
+}
+
+
+bool RegisterAllocator::IsOutputRegisterOf(Instruction* instr, int index) {
+  for (size_t i = 0; i < instr->OutputCount(); i++) {
+    InstructionOperand* output = instr->OutputAt(i);
+    if (output->IsRegister() && output->index() == index) return true;
+  }
+  return false;
+}
+
+
+bool RegisterAllocator::IsOutputDoubleRegisterOf(Instruction* instr,
+                                                 int index) {
+  for (size_t i = 0; i < instr->OutputCount(); i++) {
+    InstructionOperand* output = instr->OutputAt(i);
+    if (output->IsDoubleRegister() && output->index() == index) return true;
+  }
+  return false;
+}
+
+
+void RegisterAllocator::ProcessInstructions(BasicBlock* block,
+                                            BitVector* live) {
+  int block_start = block->first_instruction_index();
+
+  LifetimePosition block_start_position =
+      LifetimePosition::FromInstructionIndex(block_start);
+
+  for (int index = block->last_instruction_index(); index >= block_start;
+       index--) {
+    LifetimePosition curr_position =
+        LifetimePosition::FromInstructionIndex(index);
+
+    Instruction* instr = InstructionAt(index);
+    DCHECK(instr != NULL);
+    if (instr->IsGapMoves()) {
+      // Process the moves of the gap instruction, making their sources live.
+      GapInstruction* gap = code()->GapAt(index);
+
+      // TODO(titzer): no need to create the parallel move if it doesn't exist.
+      ParallelMove* move =
+          gap->GetOrCreateParallelMove(GapInstruction::START, code_zone());
+      const ZoneList<MoveOperands>* move_operands = move->move_operands();
+      for (int i = 0; i < move_operands->length(); ++i) {
+        MoveOperands* cur = &move_operands->at(i);
+        if (cur->IsIgnored()) continue;
+        InstructionOperand* from = cur->source();
+        InstructionOperand* to = cur->destination();
+        InstructionOperand* hint = to;
+        if (to->IsUnallocated()) {
+          int to_vreg = UnallocatedOperand::cast(to)->virtual_register();
+          LiveRange* to_range = LiveRangeFor(to_vreg);
+          if (to_range->is_phi()) {
+            if (to_range->is_non_loop_phi()) {
+              hint = to_range->current_hint_operand();
+            }
+          } else {
+            if (live->Contains(to_vreg)) {
+              Define(curr_position, to, from);
+              live->Remove(to_vreg);
+            } else {
+              cur->Eliminate();
+              continue;
+            }
+          }
+        } else {
+          Define(curr_position, to, from);
+        }
+        Use(block_start_position, curr_position, from, hint);
+        if (from->IsUnallocated()) {
+          live->Add(UnallocatedOperand::cast(from)->virtual_register());
+        }
+      }
+    } else {
+      // Process output, inputs, and temps of this non-gap instruction.
+      for (size_t i = 0; i < instr->OutputCount(); i++) {
+        InstructionOperand* output = instr->OutputAt(i);
+        if (output->IsUnallocated()) {
+          int out_vreg = UnallocatedOperand::cast(output)->virtual_register();
+          live->Remove(out_vreg);
+        } else if (output->IsConstant()) {
+          int out_vreg = output->index();
+          live->Remove(out_vreg);
+        }
+        Define(curr_position, output, NULL);
+      }
+
+      if (instr->ClobbersRegisters()) {
+        for (int i = 0; i < Register::kMaxNumAllocatableRegisters; ++i) {
+          if (!IsOutputRegisterOf(instr, i)) {
+            LiveRange* range = FixedLiveRangeFor(i);
+            range->AddUseInterval(curr_position, curr_position.InstructionEnd(),
+                                  zone());
+          }
+        }
+      }
+
+      if (instr->ClobbersDoubleRegisters()) {
+        for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
+          if (!IsOutputDoubleRegisterOf(instr, i)) {
+            LiveRange* range = FixedDoubleLiveRangeFor(i);
+            range->AddUseInterval(curr_position, curr_position.InstructionEnd(),
+                                  zone());
+          }
+        }
+      }
+
+      for (size_t i = 0; i < instr->InputCount(); i++) {
+        InstructionOperand* input = instr->InputAt(i);
+        if (input->IsImmediate()) continue;  // Ignore immediates.
+        LifetimePosition use_pos;
+        if (input->IsUnallocated() &&
+            UnallocatedOperand::cast(input)->IsUsedAtStart()) {
+          use_pos = curr_position;
+        } else {
+          use_pos = curr_position.InstructionEnd();
+        }
+
+        Use(block_start_position, use_pos, input, NULL);
+        if (input->IsUnallocated()) {
+          live->Add(UnallocatedOperand::cast(input)->virtual_register());
+        }
+      }
+
+      for (size_t i = 0; i < instr->TempCount(); i++) {
+        InstructionOperand* temp = instr->TempAt(i);
+        if (instr->ClobbersTemps()) {
+          if (temp->IsRegister()) continue;
+          if (temp->IsUnallocated()) {
+            UnallocatedOperand* temp_unalloc = UnallocatedOperand::cast(temp);
+            if (temp_unalloc->HasFixedPolicy()) {
+              continue;
+            }
+          }
+        }
+        Use(block_start_position, curr_position.InstructionEnd(), temp, NULL);
+        Define(curr_position, temp, NULL);
+      }
+    }
+  }
+}
+
+
+void RegisterAllocator::ResolvePhis(BasicBlock* block) {
+  for (BasicBlock::const_iterator i = block->begin(); i != block->end(); ++i) {
+    Node* phi = *i;
+    if (phi->opcode() != IrOpcode::kPhi) continue;
+
+    UnallocatedOperand* phi_operand =
+        new (code_zone()) UnallocatedOperand(UnallocatedOperand::NONE);
+    phi_operand->set_virtual_register(phi->id());
+
+    int j = 0;
+    Node::Inputs inputs = phi->inputs();
+    for (Node::Inputs::iterator iter(inputs.begin()); iter != inputs.end();
+         ++iter, ++j) {
+      Node* op = *iter;
+      // TODO(mstarzinger): Use a ValueInputIterator instead.
+      if (j >= block->PredecessorCount()) continue;
+      UnallocatedOperand* operand =
+          new (code_zone()) UnallocatedOperand(UnallocatedOperand::ANY);
+      operand->set_virtual_register(op->id());
+      BasicBlock* cur_block = block->PredecessorAt(j);
+      // The gap move must be added without any special processing as in
+      // the AddConstraintsGapMove.
+      code()->AddGapMove(cur_block->last_instruction_index() - 1, operand,
+                         phi_operand);
+
+      Instruction* branch = InstructionAt(cur_block->last_instruction_index());
+      DCHECK(!branch->HasPointerMap());
+      USE(branch);
+    }
+
+    LiveRange* live_range = LiveRangeFor(phi->id());
+    BlockStartInstruction* block_start = code()->GetBlockStart(block);
+    block_start->GetOrCreateParallelMove(GapInstruction::START, code_zone())
+        ->AddMove(phi_operand, live_range->GetSpillOperand(), code_zone());
+    live_range->SetSpillStartIndex(block->first_instruction_index());
+
+    // We use the phi-ness of some nodes in some later heuristics.
+    live_range->set_is_phi(true);
+    if (!block->IsLoopHeader()) {
+      live_range->set_is_non_loop_phi(true);
+    }
+  }
+}
+
+
+bool RegisterAllocator::Allocate() {
+  assigned_registers_ = new (code_zone())
+      BitVector(Register::NumAllocatableRegisters(), code_zone());
+  assigned_double_registers_ = new (code_zone())
+      BitVector(DoubleRegister::NumAllocatableRegisters(), code_zone());
+  MeetRegisterConstraints();
+  if (!AllocationOk()) return false;
+  ResolvePhis();
+  BuildLiveRanges();
+  AllocateGeneralRegisters();
+  if (!AllocationOk()) return false;
+  AllocateDoubleRegisters();
+  if (!AllocationOk()) return false;
+  PopulatePointerMaps();
+  ConnectRanges();
+  ResolveControlFlow();
+  code()->frame()->SetAllocatedRegisters(assigned_registers_);
+  code()->frame()->SetAllocatedDoubleRegisters(assigned_double_registers_);
+  return true;
+}
+
+
+void RegisterAllocator::MeetRegisterConstraints() {
+  RegisterAllocatorPhase phase("L_Register constraints", this);
+  for (int i = 0; i < code()->BasicBlockCount(); ++i) {
+    MeetRegisterConstraints(code()->BlockAt(i));
+    if (!AllocationOk()) return;
+  }
+}
+
+
+void RegisterAllocator::ResolvePhis() {
+  RegisterAllocatorPhase phase("L_Resolve phis", this);
+
+  // Process the blocks in reverse order.
+  for (int i = code()->BasicBlockCount() - 1; i >= 0; --i) {
+    ResolvePhis(code()->BlockAt(i));
+  }
+}
+
+
+void RegisterAllocator::ResolveControlFlow(LiveRange* range, BasicBlock* block,
+                                           BasicBlock* pred) {
+  LifetimePosition pred_end =
+      LifetimePosition::FromInstructionIndex(pred->last_instruction_index());
+  LifetimePosition cur_start =
+      LifetimePosition::FromInstructionIndex(block->first_instruction_index());
+  LiveRange* pred_cover = NULL;
+  LiveRange* cur_cover = NULL;
+  LiveRange* cur_range = range;
+  while (cur_range != NULL && (cur_cover == NULL || pred_cover == NULL)) {
+    if (cur_range->CanCover(cur_start)) {
+      DCHECK(cur_cover == NULL);
+      cur_cover = cur_range;
+    }
+    if (cur_range->CanCover(pred_end)) {
+      DCHECK(pred_cover == NULL);
+      pred_cover = cur_range;
+    }
+    cur_range = cur_range->next();
+  }
+
+  if (cur_cover->IsSpilled()) return;
+  DCHECK(pred_cover != NULL && cur_cover != NULL);
+  if (pred_cover != cur_cover) {
+    InstructionOperand* pred_op =
+        pred_cover->CreateAssignedOperand(code_zone());
+    InstructionOperand* cur_op = cur_cover->CreateAssignedOperand(code_zone());
+    if (!pred_op->Equals(cur_op)) {
+      GapInstruction* gap = NULL;
+      if (block->PredecessorCount() == 1) {
+        gap = code()->GapAt(block->first_instruction_index());
+      } else {
+        DCHECK(pred->SuccessorCount() == 1);
+        gap = GetLastGap(pred);
+
+        Instruction* branch = InstructionAt(pred->last_instruction_index());
+        DCHECK(!branch->HasPointerMap());
+        USE(branch);
+      }
+      gap->GetOrCreateParallelMove(GapInstruction::START, code_zone())
+          ->AddMove(pred_op, cur_op, code_zone());
+    }
+  }
+}
+
+
+ParallelMove* RegisterAllocator::GetConnectingParallelMove(
+    LifetimePosition pos) {
+  int index = pos.InstructionIndex();
+  if (code()->IsGapAt(index)) {
+    GapInstruction* gap = code()->GapAt(index);
+    return gap->GetOrCreateParallelMove(
+        pos.IsInstructionStart() ? GapInstruction::START : GapInstruction::END,
+        code_zone());
+  }
+  int gap_pos = pos.IsInstructionStart() ? (index - 1) : (index + 1);
+  return code()->GapAt(gap_pos)->GetOrCreateParallelMove(
+      (gap_pos < index) ? GapInstruction::AFTER : GapInstruction::BEFORE,
+      code_zone());
+}
+
+
+BasicBlock* RegisterAllocator::GetBlock(LifetimePosition pos) {
+  return code()->GetBasicBlock(pos.InstructionIndex());
+}
+
+
+void RegisterAllocator::ConnectRanges() {
+  RegisterAllocatorPhase phase("L_Connect ranges", this);
+  for (int i = 0; i < live_ranges()->length(); ++i) {
+    LiveRange* first_range = live_ranges()->at(i);
+    if (first_range == NULL || first_range->parent() != NULL) continue;
+
+    LiveRange* second_range = first_range->next();
+    while (second_range != NULL) {
+      LifetimePosition pos = second_range->Start();
+
+      if (!second_range->IsSpilled()) {
+        // Add gap move if the two live ranges touch and there is no block
+        // boundary.
+        if (first_range->End().Value() == pos.Value()) {
+          bool should_insert = true;
+          if (IsBlockBoundary(pos)) {
+            should_insert = CanEagerlyResolveControlFlow(GetBlock(pos));
+          }
+          if (should_insert) {
+            ParallelMove* move = GetConnectingParallelMove(pos);
+            InstructionOperand* prev_operand =
+                first_range->CreateAssignedOperand(code_zone());
+            InstructionOperand* cur_operand =
+                second_range->CreateAssignedOperand(code_zone());
+            move->AddMove(prev_operand, cur_operand, code_zone());
+          }
+        }
+      }
+
+      first_range = second_range;
+      second_range = second_range->next();
+    }
+  }
+}
+
+
+bool RegisterAllocator::CanEagerlyResolveControlFlow(BasicBlock* block) const {
+  if (block->PredecessorCount() != 1) return false;
+  return block->PredecessorAt(0)->rpo_number_ == block->rpo_number_ - 1;
+}
+
+
+void RegisterAllocator::ResolveControlFlow() {
+  RegisterAllocatorPhase phase("L_Resolve control flow", this);
+  for (int block_id = 1; block_id < code()->BasicBlockCount(); ++block_id) {
+    BasicBlock* block = code()->BlockAt(block_id);
+    if (CanEagerlyResolveControlFlow(block)) continue;
+    BitVector* live = live_in_sets_[block->rpo_number_];
+    BitVector::Iterator iterator(live);
+    while (!iterator.Done()) {
+      int operand_index = iterator.Current();
+      BasicBlock::Predecessors predecessors = block->predecessors();
+      for (BasicBlock::Predecessors::iterator i = predecessors.begin();
+           i != predecessors.end(); ++i) {
+        BasicBlock* cur = *i;
+        LiveRange* cur_range = LiveRangeFor(operand_index);
+        ResolveControlFlow(cur_range, block, cur);
+      }
+      iterator.Advance();
+    }
+  }
+}
+
+
+void RegisterAllocator::BuildLiveRanges() {
+  RegisterAllocatorPhase phase("L_Build live ranges", this);
+  InitializeLivenessAnalysis();
+  // Process the blocks in reverse order.
+  for (int block_id = code()->BasicBlockCount() - 1; block_id >= 0;
+       --block_id) {
+    BasicBlock* block = code()->BlockAt(block_id);
+    BitVector* live = ComputeLiveOut(block);
+    // Initially consider all live_out values live for the entire block. We
+    // will shorten these intervals if necessary.
+    AddInitialIntervals(block, live);
+
+    // Process the instructions in reverse order, generating and killing
+    // live values.
+    ProcessInstructions(block, live);
+    // All phi output operands are killed by this block.
+    for (BasicBlock::const_iterator i = block->begin(); i != block->end();
+         ++i) {
+      Node* phi = *i;
+      if (phi->opcode() != IrOpcode::kPhi) continue;
+
+      // The live range interval already ends at the first instruction of the
+      // block.
+      live->Remove(phi->id());
+
+      InstructionOperand* hint = NULL;
+      InstructionOperand* phi_operand = NULL;
+      GapInstruction* gap = GetLastGap(block->PredecessorAt(0));
+
+      // TODO(titzer): no need to create the parallel move if it doesn't exit.
+      ParallelMove* move =
+          gap->GetOrCreateParallelMove(GapInstruction::START, code_zone());
+      for (int j = 0; j < move->move_operands()->length(); ++j) {
+        InstructionOperand* to = move->move_operands()->at(j).destination();
+        if (to->IsUnallocated() &&
+            UnallocatedOperand::cast(to)->virtual_register() == phi->id()) {
+          hint = move->move_operands()->at(j).source();
+          phi_operand = to;
+          break;
+        }
+      }
+      DCHECK(hint != NULL);
+
+      LifetimePosition block_start = LifetimePosition::FromInstructionIndex(
+          block->first_instruction_index());
+      Define(block_start, phi_operand, hint);
+    }
+
+    // Now live is live_in for this block except not including values live
+    // out on backward successor edges.
+    live_in_sets_[block_id] = live;
+
+    if (block->IsLoopHeader()) {
+      // Add a live range stretching from the first loop instruction to the last
+      // for each value live on entry to the header.
+      BitVector::Iterator iterator(live);
+      LifetimePosition start = LifetimePosition::FromInstructionIndex(
+          block->first_instruction_index());
+      int end_index =
+          code()->BlockAt(block->loop_end_)->last_instruction_index();
+      LifetimePosition end =
+          LifetimePosition::FromInstructionIndex(end_index).NextInstruction();
+      while (!iterator.Done()) {
+        int operand_index = iterator.Current();
+        LiveRange* range = LiveRangeFor(operand_index);
+        range->EnsureInterval(start, end, zone());
+        iterator.Advance();
+      }
+
+      // Insert all values into the live in sets of all blocks in the loop.
+      for (int i = block->rpo_number_ + 1; i < block->loop_end_; ++i) {
+        live_in_sets_[i]->Union(*live);
+      }
+    }
+
+#ifdef DEBUG
+    if (block_id == 0) {
+      BitVector::Iterator iterator(live);
+      bool found = false;
+      while (!iterator.Done()) {
+        found = true;
+        int operand_index = iterator.Current();
+        PrintF("Register allocator error: live v%d reached first block.\n",
+               operand_index);
+        LiveRange* range = LiveRangeFor(operand_index);
+        PrintF("  (first use is at %d)\n", range->first_pos()->pos().Value());
+        CompilationInfo* info = code()->linkage()->info();
+        if (info->IsStub()) {
+          if (info->code_stub() == NULL) {
+            PrintF("\n");
+          } else {
+            CodeStub::Major major_key = info->code_stub()->MajorKey();
+            PrintF("  (function: %s)\n", CodeStub::MajorName(major_key, false));
+          }
+        } else {
+          DCHECK(info->IsOptimizing());
+          AllowHandleDereference allow_deref;
+          PrintF("  (function: %s)\n",
+                 info->function()->debug_name()->ToCString().get());
+        }
+        iterator.Advance();
+      }
+      DCHECK(!found);
+    }
+#endif
+  }
+
+  for (int i = 0; i < live_ranges_.length(); ++i) {
+    if (live_ranges_[i] != NULL) {
+      live_ranges_[i]->kind_ = RequiredRegisterKind(live_ranges_[i]->id());
+
+      // TODO(bmeurer): This is a horrible hack to make sure that for constant
+      // live ranges, every use requires the constant to be in a register.
+      // Without this hack, all uses with "any" policy would get the constant
+      // operand assigned.
+      LiveRange* range = live_ranges_[i];
+      if (range->HasAllocatedSpillOperand() &&
+          range->GetSpillOperand()->IsConstant()) {
+        for (UsePosition* pos = range->first_pos(); pos != NULL;
+             pos = pos->next_) {
+          pos->register_beneficial_ = true;
+          pos->requires_reg_ = true;
+        }
+      }
+    }
+  }
+}
+
+
+bool RegisterAllocator::SafePointsAreInOrder() const {
+  int safe_point = 0;
+  const PointerMapDeque* pointer_maps = code()->pointer_maps();
+  for (PointerMapDeque::const_iterator it = pointer_maps->begin();
+       it != pointer_maps->end(); ++it) {
+    PointerMap* map = *it;
+    if (safe_point > map->instruction_position()) return false;
+    safe_point = map->instruction_position();
+  }
+  return true;
+}
+
+
+void RegisterAllocator::PopulatePointerMaps() {
+  RegisterAllocatorPhase phase("L_Populate pointer maps", this);
+
+  DCHECK(SafePointsAreInOrder());
+
+  // Iterate over all safe point positions and record a pointer
+  // for all spilled live ranges at this point.
+  int last_range_start = 0;
+  const PointerMapDeque* pointer_maps = code()->pointer_maps();
+  PointerMapDeque::const_iterator first_it = pointer_maps->begin();
+  for (int range_idx = 0; range_idx < live_ranges()->length(); ++range_idx) {
+    LiveRange* range = live_ranges()->at(range_idx);
+    if (range == NULL) continue;
+    // Iterate over the first parts of multi-part live ranges.
+    if (range->parent() != NULL) continue;
+    // Skip non-reference values.
+    if (!HasTaggedValue(range->id())) continue;
+    // Skip empty live ranges.
+    if (range->IsEmpty()) continue;
+
+    // Find the extent of the range and its children.
+    int start = range->Start().InstructionIndex();
+    int end = 0;
+    for (LiveRange* cur = range; cur != NULL; cur = cur->next()) {
+      LifetimePosition this_end = cur->End();
+      if (this_end.InstructionIndex() > end) end = this_end.InstructionIndex();
+      DCHECK(cur->Start().InstructionIndex() >= start);
+    }
+
+    // Most of the ranges are in order, but not all.  Keep an eye on when they
+    // step backwards and reset the first_it so we don't miss any safe points.
+    if (start < last_range_start) first_it = pointer_maps->begin();
+    last_range_start = start;
+
+    // Step across all the safe points that are before the start of this range,
+    // recording how far we step in order to save doing this for the next range.
+    for (; first_it != pointer_maps->end(); ++first_it) {
+      PointerMap* map = *first_it;
+      if (map->instruction_position() >= start) break;
+    }
+
+    // Step through the safe points to see whether they are in the range.
+    for (PointerMapDeque::const_iterator it = first_it;
+         it != pointer_maps->end(); ++it) {
+      PointerMap* map = *it;
+      int safe_point = map->instruction_position();
+
+      // The safe points are sorted so we can stop searching here.
+      if (safe_point - 1 > end) break;
+
+      // Advance to the next active range that covers the current
+      // safe point position.
+      LifetimePosition safe_point_pos =
+          LifetimePosition::FromInstructionIndex(safe_point);
+      LiveRange* cur = range;
+      while (cur != NULL && !cur->Covers(safe_point_pos)) {
+        cur = cur->next();
+      }
+      if (cur == NULL) continue;
+
+      // Check if the live range is spilled and the safe point is after
+      // the spill position.
+      if (range->HasAllocatedSpillOperand() &&
+          safe_point >= range->spill_start_index() &&
+          !range->GetSpillOperand()->IsConstant()) {
+        TraceAlloc("Pointer for range %d (spilled at %d) at safe point %d\n",
+                   range->id(), range->spill_start_index(), safe_point);
+        map->RecordPointer(range->GetSpillOperand(), code_zone());
+      }
+
+      if (!cur->IsSpilled()) {
+        TraceAlloc(
+            "Pointer in register for range %d (start at %d) "
+            "at safe point %d\n",
+            cur->id(), cur->Start().Value(), safe_point);
+        InstructionOperand* operand = cur->CreateAssignedOperand(code_zone());
+        DCHECK(!operand->IsStackSlot());
+        map->RecordPointer(operand, code_zone());
+      }
+    }
+  }
+}
+
+
+void RegisterAllocator::AllocateGeneralRegisters() {
+  RegisterAllocatorPhase phase("L_Allocate general registers", this);
+  num_registers_ = Register::NumAllocatableRegisters();
+  mode_ = GENERAL_REGISTERS;
+  AllocateRegisters();
+}
+
+
+void RegisterAllocator::AllocateDoubleRegisters() {
+  RegisterAllocatorPhase phase("L_Allocate double registers", this);
+  num_registers_ = DoubleRegister::NumAllocatableRegisters();
+  mode_ = DOUBLE_REGISTERS;
+  AllocateRegisters();
+}
+
+
+void RegisterAllocator::AllocateRegisters() {
+  DCHECK(unhandled_live_ranges_.is_empty());
+
+  for (int i = 0; i < live_ranges_.length(); ++i) {
+    if (live_ranges_[i] != NULL) {
+      if (live_ranges_[i]->Kind() == mode_) {
+        AddToUnhandledUnsorted(live_ranges_[i]);
+      }
+    }
+  }
+  SortUnhandled();
+  DCHECK(UnhandledIsSorted());
+
+  DCHECK(reusable_slots_.is_empty());
+  DCHECK(active_live_ranges_.is_empty());
+  DCHECK(inactive_live_ranges_.is_empty());
+
+  if (mode_ == DOUBLE_REGISTERS) {
+    for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
+      LiveRange* current = fixed_double_live_ranges_.at(i);
+      if (current != NULL) {
+        AddToInactive(current);
+      }
+    }
+  } else {
+    DCHECK(mode_ == GENERAL_REGISTERS);
+    for (int i = 0; i < fixed_live_ranges_.length(); ++i) {
+      LiveRange* current = fixed_live_ranges_.at(i);
+      if (current != NULL) {
+        AddToInactive(current);
+      }
+    }
+  }
+
+  while (!unhandled_live_ranges_.is_empty()) {
+    DCHECK(UnhandledIsSorted());
+    LiveRange* current = unhandled_live_ranges_.RemoveLast();
+    DCHECK(UnhandledIsSorted());
+    LifetimePosition position = current->Start();
+#ifdef DEBUG
+    allocation_finger_ = position;
+#endif
+    TraceAlloc("Processing interval %d start=%d\n", current->id(),
+               position.Value());
+
+    if (current->HasAllocatedSpillOperand()) {
+      TraceAlloc("Live range %d already has a spill operand\n", current->id());
+      LifetimePosition next_pos = position;
+      if (code()->IsGapAt(next_pos.InstructionIndex())) {
+        next_pos = next_pos.NextInstruction();
+      }
+      UsePosition* pos = current->NextUsePositionRegisterIsBeneficial(next_pos);
+      // If the range already has a spill operand and it doesn't need a
+      // register immediately, split it and spill the first part of the range.
+      if (pos == NULL) {
+        Spill(current);
+        continue;
+      } else if (pos->pos().Value() >
+                 current->Start().NextInstruction().Value()) {
+        // Do not spill live range eagerly if use position that can benefit from
+        // the register is too close to the start of live range.
+        SpillBetween(current, current->Start(), pos->pos());
+        if (!AllocationOk()) return;
+        DCHECK(UnhandledIsSorted());
+        continue;
+      }
+    }
+
+    for (int i = 0; i < active_live_ranges_.length(); ++i) {
+      LiveRange* cur_active = active_live_ranges_.at(i);
+      if (cur_active->End().Value() <= position.Value()) {
+        ActiveToHandled(cur_active);
+        --i;  // The live range was removed from the list of active live ranges.
+      } else if (!cur_active->Covers(position)) {
+        ActiveToInactive(cur_active);
+        --i;  // The live range was removed from the list of active live ranges.
+      }
+    }
+
+    for (int i = 0; i < inactive_live_ranges_.length(); ++i) {
+      LiveRange* cur_inactive = inactive_live_ranges_.at(i);
+      if (cur_inactive->End().Value() <= position.Value()) {
+        InactiveToHandled(cur_inactive);
+        --i;  // Live range was removed from the list of inactive live ranges.
+      } else if (cur_inactive->Covers(position)) {
+        InactiveToActive(cur_inactive);
+        --i;  // Live range was removed from the list of inactive live ranges.
+      }
+    }
+
+    DCHECK(!current->HasRegisterAssigned() && !current->IsSpilled());
+
+    bool result = TryAllocateFreeReg(current);
+    if (!AllocationOk()) return;
+
+    if (!result) AllocateBlockedReg(current);
+    if (!AllocationOk()) return;
+
+    if (current->HasRegisterAssigned()) {
+      AddToActive(current);
+    }
+  }
+
+  reusable_slots_.Rewind(0);
+  active_live_ranges_.Rewind(0);
+  inactive_live_ranges_.Rewind(0);
+}
+
+
+const char* RegisterAllocator::RegisterName(int allocation_index) {
+  if (mode_ == GENERAL_REGISTERS) {
+    return Register::AllocationIndexToString(allocation_index);
+  } else {
+    return DoubleRegister::AllocationIndexToString(allocation_index);
+  }
+}
+
+
+void RegisterAllocator::TraceAlloc(const char* msg, ...) {
+  if (FLAG_trace_alloc) {
+    va_list arguments;
+    va_start(arguments, msg);
+    base::OS::VPrint(msg, arguments);
+    va_end(arguments);
+  }
+}
+
+
+bool RegisterAllocator::HasTaggedValue(int virtual_register) const {
+  return code()->IsReference(virtual_register);
+}
+
+
+RegisterKind RegisterAllocator::RequiredRegisterKind(
+    int virtual_register) const {
+  return (code()->IsDouble(virtual_register)) ? DOUBLE_REGISTERS
+                                              : GENERAL_REGISTERS;
+}
+
+
+void RegisterAllocator::AddToActive(LiveRange* range) {
+  TraceAlloc("Add live range %d to active\n", range->id());
+  active_live_ranges_.Add(range, zone());
+}
+
+
+void RegisterAllocator::AddToInactive(LiveRange* range) {
+  TraceAlloc("Add live range %d to inactive\n", range->id());
+  inactive_live_ranges_.Add(range, zone());
+}
+
+
+void RegisterAllocator::AddToUnhandledSorted(LiveRange* range) {
+  if (range == NULL || range->IsEmpty()) return;
+  DCHECK(!range->HasRegisterAssigned() && !range->IsSpilled());
+  DCHECK(allocation_finger_.Value() <= range->Start().Value());
+  for (int i = unhandled_live_ranges_.length() - 1; i >= 0; --i) {
+    LiveRange* cur_range = unhandled_live_ranges_.at(i);
+    if (range->ShouldBeAllocatedBefore(cur_range)) {
+      TraceAlloc("Add live range %d to unhandled at %d\n", range->id(), i + 1);
+      unhandled_live_ranges_.InsertAt(i + 1, range, zone());
+      DCHECK(UnhandledIsSorted());
+      return;
+    }
+  }
+  TraceAlloc("Add live range %d to unhandled at start\n", range->id());
+  unhandled_live_ranges_.InsertAt(0, range, zone());
+  DCHECK(UnhandledIsSorted());
+}
+
+
+void RegisterAllocator::AddToUnhandledUnsorted(LiveRange* range) {
+  if (range == NULL || range->IsEmpty()) return;
+  DCHECK(!range->HasRegisterAssigned() && !range->IsSpilled());
+  TraceAlloc("Add live range %d to unhandled unsorted at end\n", range->id());
+  unhandled_live_ranges_.Add(range, zone());
+}
+
+
+static int UnhandledSortHelper(LiveRange* const* a, LiveRange* const* b) {
+  DCHECK(!(*a)->ShouldBeAllocatedBefore(*b) ||
+         !(*b)->ShouldBeAllocatedBefore(*a));
+  if ((*a)->ShouldBeAllocatedBefore(*b)) return 1;
+  if ((*b)->ShouldBeAllocatedBefore(*a)) return -1;
+  return (*a)->id() - (*b)->id();
+}
+
+
+// Sort the unhandled live ranges so that the ranges to be processed first are
+// at the end of the array list.  This is convenient for the register allocation
+// algorithm because it is efficient to remove elements from the end.
+void RegisterAllocator::SortUnhandled() {
+  TraceAlloc("Sort unhandled\n");
+  unhandled_live_ranges_.Sort(&UnhandledSortHelper);
+}
+
+
+bool RegisterAllocator::UnhandledIsSorted() {
+  int len = unhandled_live_ranges_.length();
+  for (int i = 1; i < len; i++) {
+    LiveRange* a = unhandled_live_ranges_.at(i - 1);
+    LiveRange* b = unhandled_live_ranges_.at(i);
+    if (a->Start().Value() < b->Start().Value()) return false;
+  }
+  return true;
+}
+
+
+void RegisterAllocator::FreeSpillSlot(LiveRange* range) {
+  // Check that we are the last range.
+  if (range->next() != NULL) return;
+
+  if (!range->TopLevel()->HasAllocatedSpillOperand()) return;
+
+  InstructionOperand* spill_operand = range->TopLevel()->GetSpillOperand();
+  if (spill_operand->IsConstant()) return;
+  if (spill_operand->index() >= 0) {
+    reusable_slots_.Add(range, zone());
+  }
+}
+
+
+InstructionOperand* RegisterAllocator::TryReuseSpillSlot(LiveRange* range) {
+  if (reusable_slots_.is_empty()) return NULL;
+  if (reusable_slots_.first()->End().Value() >
+      range->TopLevel()->Start().Value()) {
+    return NULL;
+  }
+  InstructionOperand* result =
+      reusable_slots_.first()->TopLevel()->GetSpillOperand();
+  reusable_slots_.Remove(0);
+  return result;
+}
+
+
+void RegisterAllocator::ActiveToHandled(LiveRange* range) {
+  DCHECK(active_live_ranges_.Contains(range));
+  active_live_ranges_.RemoveElement(range);
+  TraceAlloc("Moving live range %d from active to handled\n", range->id());
+  FreeSpillSlot(range);
+}
+
+
+void RegisterAllocator::ActiveToInactive(LiveRange* range) {
+  DCHECK(active_live_ranges_.Contains(range));
+  active_live_ranges_.RemoveElement(range);
+  inactive_live_ranges_.Add(range, zone());
+  TraceAlloc("Moving live range %d from active to inactive\n", range->id());
+}
+
+
+void RegisterAllocator::InactiveToHandled(LiveRange* range) {
+  DCHECK(inactive_live_ranges_.Contains(range));
+  inactive_live_ranges_.RemoveElement(range);
+  TraceAlloc("Moving live range %d from inactive to handled\n", range->id());
+  FreeSpillSlot(range);
+}
+
+
+void RegisterAllocator::InactiveToActive(LiveRange* range) {
+  DCHECK(inactive_live_ranges_.Contains(range));
+  inactive_live_ranges_.RemoveElement(range);
+  active_live_ranges_.Add(range, zone());
+  TraceAlloc("Moving live range %d from inactive to active\n", range->id());
+}
+
+
+// TryAllocateFreeReg and AllocateBlockedReg assume this
+// when allocating local arrays.
+STATIC_ASSERT(DoubleRegister::kMaxNumAllocatableRegisters >=
+              Register::kMaxNumAllocatableRegisters);
+
+
+bool RegisterAllocator::TryAllocateFreeReg(LiveRange* current) {
+  LifetimePosition free_until_pos[DoubleRegister::kMaxNumAllocatableRegisters];
+
+  for (int i = 0; i < num_registers_; i++) {
+    free_until_pos[i] = LifetimePosition::MaxPosition();
+  }
+
+  for (int i = 0; i < active_live_ranges_.length(); ++i) {
+    LiveRange* cur_active = active_live_ranges_.at(i);
+    free_until_pos[cur_active->assigned_register()] =
+        LifetimePosition::FromInstructionIndex(0);
+  }
+
+  for (int i = 0; i < inactive_live_ranges_.length(); ++i) {
+    LiveRange* cur_inactive = inactive_live_ranges_.at(i);
+    DCHECK(cur_inactive->End().Value() > current->Start().Value());
+    LifetimePosition next_intersection =
+        cur_inactive->FirstIntersection(current);
+    if (!next_intersection.IsValid()) continue;
+    int cur_reg = cur_inactive->assigned_register();
+    free_until_pos[cur_reg] = Min(free_until_pos[cur_reg], next_intersection);
+  }
+
+  InstructionOperand* hint = current->FirstHint();
+  if (hint != NULL && (hint->IsRegister() || hint->IsDoubleRegister())) {
+    int register_index = hint->index();
+    TraceAlloc(
+        "Found reg hint %s (free until [%d) for live range %d (end %d[).\n",
+        RegisterName(register_index), free_until_pos[register_index].Value(),
+        current->id(), current->End().Value());
+
+    // The desired register is free until the end of the current live range.
+    if (free_until_pos[register_index].Value() >= current->End().Value()) {
+      TraceAlloc("Assigning preferred reg %s to live range %d\n",
+                 RegisterName(register_index), current->id());
+      SetLiveRangeAssignedRegister(current, register_index);
+      return true;
+    }
+  }
+
+  // Find the register which stays free for the longest time.
+  int reg = 0;
+  for (int i = 1; i < RegisterCount(); ++i) {
+    if (free_until_pos[i].Value() > free_until_pos[reg].Value()) {
+      reg = i;
+    }
+  }
+
+  LifetimePosition pos = free_until_pos[reg];
+
+  if (pos.Value() <= current->Start().Value()) {
+    // All registers are blocked.
+    return false;
+  }
+
+  if (pos.Value() < current->End().Value()) {
+    // Register reg is available at the range start but becomes blocked before
+    // the range end. Split current at position where it becomes blocked.
+    LiveRange* tail = SplitRangeAt(current, pos);
+    if (!AllocationOk()) return false;
+    AddToUnhandledSorted(tail);
+  }
+
+
+  // Register reg is available at the range start and is free until
+  // the range end.
+  DCHECK(pos.Value() >= current->End().Value());
+  TraceAlloc("Assigning free reg %s to live range %d\n", RegisterName(reg),
+             current->id());
+  SetLiveRangeAssignedRegister(current, reg);
+
+  return true;
+}
+
+
+void RegisterAllocator::AllocateBlockedReg(LiveRange* current) {
+  UsePosition* register_use = current->NextRegisterPosition(current->Start());
+  if (register_use == NULL) {
+    // There is no use in the current live range that requires a register.
+    // We can just spill it.
+    Spill(current);
+    return;
+  }
+
+
+  LifetimePosition use_pos[DoubleRegister::kMaxNumAllocatableRegisters];
+  LifetimePosition block_pos[DoubleRegister::kMaxNumAllocatableRegisters];
+
+  for (int i = 0; i < num_registers_; i++) {
+    use_pos[i] = block_pos[i] = LifetimePosition::MaxPosition();
+  }
+
+  for (int i = 0; i < active_live_ranges_.length(); ++i) {
+    LiveRange* range = active_live_ranges_[i];
+    int cur_reg = range->assigned_register();
+    if (range->IsFixed() || !range->CanBeSpilled(current->Start())) {
+      block_pos[cur_reg] = use_pos[cur_reg] =
+          LifetimePosition::FromInstructionIndex(0);
+    } else {
+      UsePosition* next_use =
+          range->NextUsePositionRegisterIsBeneficial(current->Start());
+      if (next_use == NULL) {
+        use_pos[cur_reg] = range->End();
+      } else {
+        use_pos[cur_reg] = next_use->pos();
+      }
+    }
+  }
+
+  for (int i = 0; i < inactive_live_ranges_.length(); ++i) {
+    LiveRange* range = inactive_live_ranges_.at(i);
+    DCHECK(range->End().Value() > current->Start().Value());
+    LifetimePosition next_intersection = range->FirstIntersection(current);
+    if (!next_intersection.IsValid()) continue;
+    int cur_reg = range->assigned_register();
+    if (range->IsFixed()) {
+      block_pos[cur_reg] = Min(block_pos[cur_reg], next_intersection);
+      use_pos[cur_reg] = Min(block_pos[cur_reg], use_pos[cur_reg]);
+    } else {
+      use_pos[cur_reg] = Min(use_pos[cur_reg], next_intersection);
+    }
+  }
+
+  int reg = 0;
+  for (int i = 1; i < RegisterCount(); ++i) {
+    if (use_pos[i].Value() > use_pos[reg].Value()) {
+      reg = i;
+    }
+  }
+
+  LifetimePosition pos = use_pos[reg];
+
+  if (pos.Value() < register_use->pos().Value()) {
+    // All registers are blocked before the first use that requires a register.
+    // Spill starting part of live range up to that use.
+    SpillBetween(current, current->Start(), register_use->pos());
+    return;
+  }
+
+  if (block_pos[reg].Value() < current->End().Value()) {
+    // Register becomes blocked before the current range end. Split before that
+    // position.
+    LiveRange* tail = SplitBetween(current, current->Start(),
+                                   block_pos[reg].InstructionStart());
+    if (!AllocationOk()) return;
+    AddToUnhandledSorted(tail);
+  }
+
+  // Register reg is not blocked for the whole range.
+  DCHECK(block_pos[reg].Value() >= current->End().Value());
+  TraceAlloc("Assigning blocked reg %s to live range %d\n", RegisterName(reg),
+             current->id());
+  SetLiveRangeAssignedRegister(current, reg);
+
+  // This register was not free. Thus we need to find and spill
+  // parts of active and inactive live regions that use the same register
+  // at the same lifetime positions as current.
+  SplitAndSpillIntersecting(current);
+}
+
+
+LifetimePosition RegisterAllocator::FindOptimalSpillingPos(
+    LiveRange* range, LifetimePosition pos) {
+  BasicBlock* block = GetBlock(pos.InstructionStart());
+  BasicBlock* loop_header =
+      block->IsLoopHeader() ? block : code()->GetContainingLoop(block);
+
+  if (loop_header == NULL) return pos;
+
+  UsePosition* prev_use = range->PreviousUsePositionRegisterIsBeneficial(pos);
+
+  while (loop_header != NULL) {
+    // We are going to spill live range inside the loop.
+    // If possible try to move spilling position backwards to loop header.
+    // This will reduce number of memory moves on the back edge.
+    LifetimePosition loop_start = LifetimePosition::FromInstructionIndex(
+        loop_header->first_instruction_index());
+
+    if (range->Covers(loop_start)) {
+      if (prev_use == NULL || prev_use->pos().Value() < loop_start.Value()) {
+        // No register beneficial use inside the loop before the pos.
+        pos = loop_start;
+      }
+    }
+
+    // Try hoisting out to an outer loop.
+    loop_header = code()->GetContainingLoop(loop_header);
+  }
+
+  return pos;
+}
+
+
+void RegisterAllocator::SplitAndSpillIntersecting(LiveRange* current) {
+  DCHECK(current->HasRegisterAssigned());
+  int reg = current->assigned_register();
+  LifetimePosition split_pos = current->Start();
+  for (int i = 0; i < active_live_ranges_.length(); ++i) {
+    LiveRange* range = active_live_ranges_[i];
+    if (range->assigned_register() == reg) {
+      UsePosition* next_pos = range->NextRegisterPosition(current->Start());
+      LifetimePosition spill_pos = FindOptimalSpillingPos(range, split_pos);
+      if (next_pos == NULL) {
+        SpillAfter(range, spill_pos);
+      } else {
+        // When spilling between spill_pos and next_pos ensure that the range
+        // remains spilled at least until the start of the current live range.
+        // This guarantees that we will not introduce new unhandled ranges that
+        // start before the current range as this violates allocation invariant
+        // and will lead to an inconsistent state of active and inactive
+        // live-ranges: ranges are allocated in order of their start positions,
+        // ranges are retired from active/inactive when the start of the
+        // current live-range is larger than their end.
+        SpillBetweenUntil(range, spill_pos, current->Start(), next_pos->pos());
+      }
+      if (!AllocationOk()) return;
+      ActiveToHandled(range);
+      --i;
+    }
+  }
+
+  for (int i = 0; i < inactive_live_ranges_.length(); ++i) {
+    LiveRange* range = inactive_live_ranges_[i];
+    DCHECK(range->End().Value() > current->Start().Value());
+    if (range->assigned_register() == reg && !range->IsFixed()) {
+      LifetimePosition next_intersection = range->FirstIntersection(current);
+      if (next_intersection.IsValid()) {
+        UsePosition* next_pos = range->NextRegisterPosition(current->Start());
+        if (next_pos == NULL) {
+          SpillAfter(range, split_pos);
+        } else {
+          next_intersection = Min(next_intersection, next_pos->pos());
+          SpillBetween(range, split_pos, next_intersection);
+        }
+        if (!AllocationOk()) return;
+        InactiveToHandled(range);
+        --i;
+      }
+    }
+  }
+}
+
+
+bool RegisterAllocator::IsBlockBoundary(LifetimePosition pos) {
+  return pos.IsInstructionStart() &&
+         InstructionAt(pos.InstructionIndex())->IsBlockStart();
+}
+
+
+LiveRange* RegisterAllocator::SplitRangeAt(LiveRange* range,
+                                           LifetimePosition pos) {
+  DCHECK(!range->IsFixed());
+  TraceAlloc("Splitting live range %d at %d\n", range->id(), pos.Value());
+
+  if (pos.Value() <= range->Start().Value()) return range;
+
+  // We can't properly connect liveranges if split occured at the end
+  // of control instruction.
+  DCHECK(pos.IsInstructionStart() ||
+         !InstructionAt(pos.InstructionIndex())->IsControl());
+
+  int vreg = GetVirtualRegister();
+  if (!AllocationOk()) return NULL;
+  LiveRange* result = LiveRangeFor(vreg);
+  range->SplitAt(pos, result, zone());
+  return result;
+}
+
+
+LiveRange* RegisterAllocator::SplitBetween(LiveRange* range,
+                                           LifetimePosition start,
+                                           LifetimePosition end) {
+  DCHECK(!range->IsFixed());
+  TraceAlloc("Splitting live range %d in position between [%d, %d]\n",
+             range->id(), start.Value(), end.Value());
+
+  LifetimePosition split_pos = FindOptimalSplitPos(start, end);
+  DCHECK(split_pos.Value() >= start.Value());
+  return SplitRangeAt(range, split_pos);
+}
+
+
+LifetimePosition RegisterAllocator::FindOptimalSplitPos(LifetimePosition start,
+                                                        LifetimePosition end) {
+  int start_instr = start.InstructionIndex();
+  int end_instr = end.InstructionIndex();
+  DCHECK(start_instr <= end_instr);
+
+  // We have no choice
+  if (start_instr == end_instr) return end;
+
+  BasicBlock* start_block = GetBlock(start);
+  BasicBlock* end_block = GetBlock(end);
+
+  if (end_block == start_block) {
+    // The interval is split in the same basic block. Split at the latest
+    // possible position.
+    return end;
+  }
+
+  BasicBlock* block = end_block;
+  // Find header of outermost loop.
+  // TODO(titzer): fix redundancy below.
+  while (code()->GetContainingLoop(block) != NULL &&
+         code()->GetContainingLoop(block)->rpo_number_ >
+             start_block->rpo_number_) {
+    block = code()->GetContainingLoop(block);
+  }
+
+  // We did not find any suitable outer loop. Split at the latest possible
+  // position unless end_block is a loop header itself.
+  if (block == end_block && !end_block->IsLoopHeader()) return end;
+
+  return LifetimePosition::FromInstructionIndex(
+      block->first_instruction_index());
+}
+
+
+void RegisterAllocator::SpillAfter(LiveRange* range, LifetimePosition pos) {
+  LiveRange* second_part = SplitRangeAt(range, pos);
+  if (!AllocationOk()) return;
+  Spill(second_part);
+}
+
+
+void RegisterAllocator::SpillBetween(LiveRange* range, LifetimePosition start,
+                                     LifetimePosition end) {
+  SpillBetweenUntil(range, start, start, end);
+}
+
+
+void RegisterAllocator::SpillBetweenUntil(LiveRange* range,
+                                          LifetimePosition start,
+                                          LifetimePosition until,
+                                          LifetimePosition end) {
+  CHECK(start.Value() < end.Value());
+  LiveRange* second_part = SplitRangeAt(range, start);
+  if (!AllocationOk()) return;
+
+  if (second_part->Start().Value() < end.Value()) {
+    // The split result intersects with [start, end[.
+    // Split it at position between ]start+1, end[, spill the middle part
+    // and put the rest to unhandled.
+    LiveRange* third_part = SplitBetween(
+        second_part, Max(second_part->Start().InstructionEnd(), until),
+        end.PrevInstruction().InstructionEnd());
+    if (!AllocationOk()) return;
+
+    DCHECK(third_part != second_part);
+
+    Spill(second_part);
+    AddToUnhandledSorted(third_part);
+  } else {
+    // The split result does not intersect with [start, end[.
+    // Nothing to spill. Just put it to unhandled as whole.
+    AddToUnhandledSorted(second_part);
+  }
+}
+
+
+void RegisterAllocator::Spill(LiveRange* range) {
+  DCHECK(!range->IsSpilled());
+  TraceAlloc("Spilling live range %d\n", range->id());
+  LiveRange* first = range->TopLevel();
+
+  if (!first->HasAllocatedSpillOperand()) {
+    InstructionOperand* op = TryReuseSpillSlot(range);
+    if (op == NULL) {
+      // Allocate a new operand referring to the spill slot.
+      RegisterKind kind = range->Kind();
+      int index = code()->frame()->AllocateSpillSlot(kind == DOUBLE_REGISTERS);
+      if (kind == DOUBLE_REGISTERS) {
+        op = DoubleStackSlotOperand::Create(index, zone());
+      } else {
+        DCHECK(kind == GENERAL_REGISTERS);
+        op = StackSlotOperand::Create(index, zone());
+      }
+    }
+    first->SetSpillOperand(op);
+  }
+  range->MakeSpilled(code_zone());
+}
+
+
+int RegisterAllocator::RegisterCount() const { return num_registers_; }
+
+
+#ifdef DEBUG
+
+
+void RegisterAllocator::Verify() const {
+  for (int i = 0; i < live_ranges()->length(); ++i) {
+    LiveRange* current = live_ranges()->at(i);
+    if (current != NULL) current->Verify();
+  }
+}
+
+
+#endif
+
+
+void RegisterAllocator::SetLiveRangeAssignedRegister(LiveRange* range,
+                                                     int reg) {
+  if (range->Kind() == DOUBLE_REGISTERS) {
+    assigned_double_registers_->Add(reg);
+  } else {
+    DCHECK(range->Kind() == GENERAL_REGISTERS);
+    assigned_registers_->Add(reg);
+  }
+  range->set_assigned_register(reg, code_zone());
+}
+
+
+RegisterAllocatorPhase::RegisterAllocatorPhase(const char* name,
+                                               RegisterAllocator* allocator)
+    : CompilationPhase(name, allocator->code()->linkage()->info()),
+      allocator_(allocator) {
+  if (FLAG_turbo_stats) {
+    allocator_zone_start_allocation_size_ =
+        allocator->zone()->allocation_size();
+  }
+}
+
+
+RegisterAllocatorPhase::~RegisterAllocatorPhase() {
+  if (FLAG_turbo_stats) {
+    unsigned size = allocator_->zone()->allocation_size() -
+                    allocator_zone_start_allocation_size_;
+    isolate()->GetTStatistics()->SaveTiming(name(), base::TimeDelta(), size);
+  }
+#ifdef DEBUG
+  if (allocator_ != NULL) allocator_->Verify();
+#endif
+}
+}
+}
+}  // namespace v8::internal::compiler
diff --git a/src/compiler/register-allocator.h b/src/compiler/register-allocator.h
new file mode 100644
index 0000000..881ce37
--- /dev/null
+++ b/src/compiler/register-allocator.h
@@ -0,0 +1,548 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_REGISTER_ALLOCATOR_H_
+#define V8_REGISTER_ALLOCATOR_H_
+
+#include "src/allocation.h"
+#include "src/compiler/instruction.h"
+#include "src/compiler/node.h"
+#include "src/compiler/schedule.h"
+#include "src/macro-assembler.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class BitVector;
+class InstructionOperand;
+class UnallocatedOperand;
+class ParallelMove;
+class PointerMap;
+
+namespace compiler {
+
+enum RegisterKind {
+  UNALLOCATED_REGISTERS,
+  GENERAL_REGISTERS,
+  DOUBLE_REGISTERS
+};
+
+
+// This class represents a single point of a InstructionOperand's lifetime. For
+// each instruction there are exactly two lifetime positions: the beginning and
+// the end of the instruction. Lifetime positions for different instructions are
+// disjoint.
+class LifetimePosition {
+ public:
+  // Return the lifetime position that corresponds to the beginning of
+  // the instruction with the given index.
+  static LifetimePosition FromInstructionIndex(int index) {
+    return LifetimePosition(index * kStep);
+  }
+
+  // Returns a numeric representation of this lifetime position.
+  int Value() const { return value_; }
+
+  // Returns the index of the instruction to which this lifetime position
+  // corresponds.
+  int InstructionIndex() const {
+    DCHECK(IsValid());
+    return value_ / kStep;
+  }
+
+  // Returns true if this lifetime position corresponds to the instruction
+  // start.
+  bool IsInstructionStart() const { return (value_ & (kStep - 1)) == 0; }
+
+  // Returns the lifetime position for the start of the instruction which
+  // corresponds to this lifetime position.
+  LifetimePosition InstructionStart() const {
+    DCHECK(IsValid());
+    return LifetimePosition(value_ & ~(kStep - 1));
+  }
+
+  // Returns the lifetime position for the end of the instruction which
+  // corresponds to this lifetime position.
+  LifetimePosition InstructionEnd() const {
+    DCHECK(IsValid());
+    return LifetimePosition(InstructionStart().Value() + kStep / 2);
+  }
+
+  // Returns the lifetime position for the beginning of the next instruction.
+  LifetimePosition NextInstruction() const {
+    DCHECK(IsValid());
+    return LifetimePosition(InstructionStart().Value() + kStep);
+  }
+
+  // Returns the lifetime position for the beginning of the previous
+  // instruction.
+  LifetimePosition PrevInstruction() const {
+    DCHECK(IsValid());
+    DCHECK(value_ > 1);
+    return LifetimePosition(InstructionStart().Value() - kStep);
+  }
+
+  // Constructs the lifetime position which does not correspond to any
+  // instruction.
+  LifetimePosition() : value_(-1) {}
+
+  // Returns true if this lifetime positions corrensponds to some
+  // instruction.
+  bool IsValid() const { return value_ != -1; }
+
+  static inline LifetimePosition Invalid() { return LifetimePosition(); }
+
+  static inline LifetimePosition MaxPosition() {
+    // We have to use this kind of getter instead of static member due to
+    // crash bug in GDB.
+    return LifetimePosition(kMaxInt);
+  }
+
+ private:
+  static const int kStep = 2;
+
+  // Code relies on kStep being a power of two.
+  STATIC_ASSERT(IS_POWER_OF_TWO(kStep));
+
+  explicit LifetimePosition(int value) : value_(value) {}
+
+  int value_;
+};
+
+
+// Representation of the non-empty interval [start,end[.
+class UseInterval : public ZoneObject {
+ public:
+  UseInterval(LifetimePosition start, LifetimePosition end)
+      : start_(start), end_(end), next_(NULL) {
+    DCHECK(start.Value() < end.Value());
+  }
+
+  LifetimePosition start() const { return start_; }
+  LifetimePosition end() const { return end_; }
+  UseInterval* next() const { return next_; }
+
+  // Split this interval at the given position without effecting the
+  // live range that owns it. The interval must contain the position.
+  void SplitAt(LifetimePosition pos, Zone* zone);
+
+  // If this interval intersects with other return smallest position
+  // that belongs to both of them.
+  LifetimePosition Intersect(const UseInterval* other) const {
+    if (other->start().Value() < start_.Value()) return other->Intersect(this);
+    if (other->start().Value() < end_.Value()) return other->start();
+    return LifetimePosition::Invalid();
+  }
+
+  bool Contains(LifetimePosition point) const {
+    return start_.Value() <= point.Value() && point.Value() < end_.Value();
+  }
+
+  void set_start(LifetimePosition start) { start_ = start; }
+  void set_next(UseInterval* next) { next_ = next; }
+
+  LifetimePosition start_;
+  LifetimePosition end_;
+  UseInterval* next_;
+};
+
+// Representation of a use position.
+class UsePosition : public ZoneObject {
+ public:
+  UsePosition(LifetimePosition pos, InstructionOperand* operand,
+              InstructionOperand* hint);
+
+  InstructionOperand* operand() const { return operand_; }
+  bool HasOperand() const { return operand_ != NULL; }
+
+  InstructionOperand* hint() const { return hint_; }
+  bool HasHint() const;
+  bool RequiresRegister() const;
+  bool RegisterIsBeneficial() const;
+
+  LifetimePosition pos() const { return pos_; }
+  UsePosition* next() const { return next_; }
+
+  void set_next(UsePosition* next) { next_ = next; }
+
+  InstructionOperand* const operand_;
+  InstructionOperand* const hint_;
+  LifetimePosition const pos_;
+  UsePosition* next_;
+  bool requires_reg_;
+  bool register_beneficial_;
+};
+
+// Representation of SSA values' live ranges as a collection of (continuous)
+// intervals over the instruction ordering.
+class LiveRange : public ZoneObject {
+ public:
+  static const int kInvalidAssignment = 0x7fffffff;
+
+  LiveRange(int id, Zone* zone);
+
+  UseInterval* first_interval() const { return first_interval_; }
+  UsePosition* first_pos() const { return first_pos_; }
+  LiveRange* parent() const { return parent_; }
+  LiveRange* TopLevel() { return (parent_ == NULL) ? this : parent_; }
+  LiveRange* next() const { return next_; }
+  bool IsChild() const { return parent() != NULL; }
+  int id() const { return id_; }
+  bool IsFixed() const { return id_ < 0; }
+  bool IsEmpty() const { return first_interval() == NULL; }
+  InstructionOperand* CreateAssignedOperand(Zone* zone);
+  int assigned_register() const { return assigned_register_; }
+  int spill_start_index() const { return spill_start_index_; }
+  void set_assigned_register(int reg, Zone* zone);
+  void MakeSpilled(Zone* zone);
+  bool is_phi() const { return is_phi_; }
+  void set_is_phi(bool is_phi) { is_phi_ = is_phi; }
+  bool is_non_loop_phi() const { return is_non_loop_phi_; }
+  void set_is_non_loop_phi(bool is_non_loop_phi) {
+    is_non_loop_phi_ = is_non_loop_phi;
+  }
+
+  // Returns use position in this live range that follows both start
+  // and last processed use position.
+  // Modifies internal state of live range!
+  UsePosition* NextUsePosition(LifetimePosition start);
+
+  // Returns use position for which register is required in this live
+  // range and which follows both start and last processed use position
+  // Modifies internal state of live range!
+  UsePosition* NextRegisterPosition(LifetimePosition start);
+
+  // Returns use position for which register is beneficial in this live
+  // range and which follows both start and last processed use position
+  // Modifies internal state of live range!
+  UsePosition* NextUsePositionRegisterIsBeneficial(LifetimePosition start);
+
+  // Returns use position for which register is beneficial in this live
+  // range and which precedes start.
+  UsePosition* PreviousUsePositionRegisterIsBeneficial(LifetimePosition start);
+
+  // Can this live range be spilled at this position.
+  bool CanBeSpilled(LifetimePosition pos);
+
+  // Split this live range at the given position which must follow the start of
+  // the range.
+  // All uses following the given position will be moved from this
+  // live range to the result live range.
+  void SplitAt(LifetimePosition position, LiveRange* result, Zone* zone);
+
+  RegisterKind Kind() const { return kind_; }
+  bool HasRegisterAssigned() const {
+    return assigned_register_ != kInvalidAssignment;
+  }
+  bool IsSpilled() const { return spilled_; }
+
+  InstructionOperand* current_hint_operand() const {
+    DCHECK(current_hint_operand_ == FirstHint());
+    return current_hint_operand_;
+  }
+  InstructionOperand* FirstHint() const {
+    UsePosition* pos = first_pos_;
+    while (pos != NULL && !pos->HasHint()) pos = pos->next();
+    if (pos != NULL) return pos->hint();
+    return NULL;
+  }
+
+  LifetimePosition Start() const {
+    DCHECK(!IsEmpty());
+    return first_interval()->start();
+  }
+
+  LifetimePosition End() const {
+    DCHECK(!IsEmpty());
+    return last_interval_->end();
+  }
+
+  bool HasAllocatedSpillOperand() const;
+  InstructionOperand* GetSpillOperand() const { return spill_operand_; }
+  void SetSpillOperand(InstructionOperand* operand);
+
+  void SetSpillStartIndex(int start) {
+    spill_start_index_ = Min(start, spill_start_index_);
+  }
+
+  bool ShouldBeAllocatedBefore(const LiveRange* other) const;
+  bool CanCover(LifetimePosition position) const;
+  bool Covers(LifetimePosition position);
+  LifetimePosition FirstIntersection(LiveRange* other);
+
+  // Add a new interval or a new use position to this live range.
+  void EnsureInterval(LifetimePosition start, LifetimePosition end, Zone* zone);
+  void AddUseInterval(LifetimePosition start, LifetimePosition end, Zone* zone);
+  void AddUsePosition(LifetimePosition pos, InstructionOperand* operand,
+                      InstructionOperand* hint, Zone* zone);
+
+  // Shorten the most recently added interval by setting a new start.
+  void ShortenTo(LifetimePosition start);
+
+#ifdef DEBUG
+  // True if target overlaps an existing interval.
+  bool HasOverlap(UseInterval* target) const;
+  void Verify() const;
+#endif
+
+ private:
+  void ConvertOperands(Zone* zone);
+  UseInterval* FirstSearchIntervalForPosition(LifetimePosition position) const;
+  void AdvanceLastProcessedMarker(UseInterval* to_start_of,
+                                  LifetimePosition but_not_past) const;
+
+  int id_;
+  bool spilled_;
+  bool is_phi_;
+  bool is_non_loop_phi_;
+  RegisterKind kind_;
+  int assigned_register_;
+  UseInterval* last_interval_;
+  UseInterval* first_interval_;
+  UsePosition* first_pos_;
+  LiveRange* parent_;
+  LiveRange* next_;
+  // This is used as a cache, it doesn't affect correctness.
+  mutable UseInterval* current_interval_;
+  UsePosition* last_processed_use_;
+  // This is used as a cache, it's invalid outside of BuildLiveRanges.
+  InstructionOperand* current_hint_operand_;
+  InstructionOperand* spill_operand_;
+  int spill_start_index_;
+
+  friend class RegisterAllocator;  // Assigns to kind_.
+};
+
+
+class RegisterAllocator BASE_EMBEDDED {
+ public:
+  explicit RegisterAllocator(InstructionSequence* code);
+
+  static void TraceAlloc(const char* msg, ...);
+
+  // Checks whether the value of a given virtual register is a reference.
+  // TODO(titzer): rename this to IsReference.
+  bool HasTaggedValue(int virtual_register) const;
+
+  // Returns the register kind required by the given virtual register.
+  RegisterKind RequiredRegisterKind(int virtual_register) const;
+
+  bool Allocate();
+
+  const ZoneList<LiveRange*>* live_ranges() const { return &live_ranges_; }
+  const Vector<LiveRange*>* fixed_live_ranges() const {
+    return &fixed_live_ranges_;
+  }
+  const Vector<LiveRange*>* fixed_double_live_ranges() const {
+    return &fixed_double_live_ranges_;
+  }
+
+  inline InstructionSequence* code() const { return code_; }
+
+  // This zone is for datastructures only needed during register allocation.
+  inline Zone* zone() { return &zone_; }
+
+  // This zone is for InstructionOperands and moves that live beyond register
+  // allocation.
+  inline Zone* code_zone() { return code()->zone(); }
+
+  int GetVirtualRegister() {
+    int vreg = code()->NextVirtualRegister();
+    if (vreg >= UnallocatedOperand::kMaxVirtualRegisters) {
+      allocation_ok_ = false;
+      // Maintain the invariant that we return something below the maximum.
+      return 0;
+    }
+    return vreg;
+  }
+
+  bool AllocationOk() { return allocation_ok_; }
+
+#ifdef DEBUG
+  void Verify() const;
+#endif
+
+  BitVector* assigned_registers() { return assigned_registers_; }
+  BitVector* assigned_double_registers() { return assigned_double_registers_; }
+
+ private:
+  void MeetRegisterConstraints();
+  void ResolvePhis();
+  void BuildLiveRanges();
+  void AllocateGeneralRegisters();
+  void AllocateDoubleRegisters();
+  void ConnectRanges();
+  void ResolveControlFlow();
+  void PopulatePointerMaps();  // TODO(titzer): rename to PopulateReferenceMaps.
+  void AllocateRegisters();
+  bool CanEagerlyResolveControlFlow(BasicBlock* block) const;
+  inline bool SafePointsAreInOrder() const;
+
+  // Liveness analysis support.
+  void InitializeLivenessAnalysis();
+  BitVector* ComputeLiveOut(BasicBlock* block);
+  void AddInitialIntervals(BasicBlock* block, BitVector* live_out);
+  bool IsOutputRegisterOf(Instruction* instr, int index);
+  bool IsOutputDoubleRegisterOf(Instruction* instr, int index);
+  void ProcessInstructions(BasicBlock* block, BitVector* live);
+  void MeetRegisterConstraints(BasicBlock* block);
+  void MeetConstraintsBetween(Instruction* first, Instruction* second,
+                              int gap_index);
+  void MeetRegisterConstraintsForLastInstructionInBlock(BasicBlock* block);
+  void ResolvePhis(BasicBlock* block);
+
+  // Helper methods for building intervals.
+  InstructionOperand* AllocateFixed(UnallocatedOperand* operand, int pos,
+                                    bool is_tagged);
+  LiveRange* LiveRangeFor(InstructionOperand* operand);
+  void Define(LifetimePosition position, InstructionOperand* operand,
+              InstructionOperand* hint);
+  void Use(LifetimePosition block_start, LifetimePosition position,
+           InstructionOperand* operand, InstructionOperand* hint);
+  void AddConstraintsGapMove(int index, InstructionOperand* from,
+                             InstructionOperand* to);
+
+  // Helper methods for updating the life range lists.
+  void AddToActive(LiveRange* range);
+  void AddToInactive(LiveRange* range);
+  void AddToUnhandledSorted(LiveRange* range);
+  void AddToUnhandledUnsorted(LiveRange* range);
+  void SortUnhandled();
+  bool UnhandledIsSorted();
+  void ActiveToHandled(LiveRange* range);
+  void ActiveToInactive(LiveRange* range);
+  void InactiveToHandled(LiveRange* range);
+  void InactiveToActive(LiveRange* range);
+  void FreeSpillSlot(LiveRange* range);
+  InstructionOperand* TryReuseSpillSlot(LiveRange* range);
+
+  // Helper methods for allocating registers.
+  bool TryAllocateFreeReg(LiveRange* range);
+  void AllocateBlockedReg(LiveRange* range);
+
+  // Live range splitting helpers.
+
+  // Split the given range at the given position.
+  // If range starts at or after the given position then the
+  // original range is returned.
+  // Otherwise returns the live range that starts at pos and contains
+  // all uses from the original range that follow pos. Uses at pos will
+  // still be owned by the original range after splitting.
+  LiveRange* SplitRangeAt(LiveRange* range, LifetimePosition pos);
+
+  // Split the given range in a position from the interval [start, end].
+  LiveRange* SplitBetween(LiveRange* range, LifetimePosition start,
+                          LifetimePosition end);
+
+  // Find a lifetime position in the interval [start, end] which
+  // is optimal for splitting: it is either header of the outermost
+  // loop covered by this interval or the latest possible position.
+  LifetimePosition FindOptimalSplitPos(LifetimePosition start,
+                                       LifetimePosition end);
+
+  // Spill the given life range after position pos.
+  void SpillAfter(LiveRange* range, LifetimePosition pos);
+
+  // Spill the given life range after position [start] and up to position [end].
+  void SpillBetween(LiveRange* range, LifetimePosition start,
+                    LifetimePosition end);
+
+  // Spill the given life range after position [start] and up to position [end].
+  // Range is guaranteed to be spilled at least until position [until].
+  void SpillBetweenUntil(LiveRange* range, LifetimePosition start,
+                         LifetimePosition until, LifetimePosition end);
+
+  void SplitAndSpillIntersecting(LiveRange* range);
+
+  // If we are trying to spill a range inside the loop try to
+  // hoist spill position out to the point just before the loop.
+  LifetimePosition FindOptimalSpillingPos(LiveRange* range,
+                                          LifetimePosition pos);
+
+  void Spill(LiveRange* range);
+  bool IsBlockBoundary(LifetimePosition pos);
+
+  // Helper methods for resolving control flow.
+  void ResolveControlFlow(LiveRange* range, BasicBlock* block,
+                          BasicBlock* pred);
+
+  inline void SetLiveRangeAssignedRegister(LiveRange* range, int reg);
+
+  // Return parallel move that should be used to connect ranges split at the
+  // given position.
+  ParallelMove* GetConnectingParallelMove(LifetimePosition pos);
+
+  // Return the block which contains give lifetime position.
+  BasicBlock* GetBlock(LifetimePosition pos);
+
+  // Helper methods for the fixed registers.
+  int RegisterCount() const;
+  static int FixedLiveRangeID(int index) { return -index - 1; }
+  static int FixedDoubleLiveRangeID(int index);
+  LiveRange* FixedLiveRangeFor(int index);
+  LiveRange* FixedDoubleLiveRangeFor(int index);
+  LiveRange* LiveRangeFor(int index);
+  GapInstruction* GetLastGap(BasicBlock* block);
+
+  const char* RegisterName(int allocation_index);
+
+  inline Instruction* InstructionAt(int index) {
+    return code()->InstructionAt(index);
+  }
+
+  Zone zone_;
+  InstructionSequence* code_;
+
+  // During liveness analysis keep a mapping from block id to live_in sets
+  // for blocks already analyzed.
+  ZoneList<BitVector*> live_in_sets_;
+
+  // Liveness analysis results.
+  ZoneList<LiveRange*> live_ranges_;
+
+  // Lists of live ranges
+  EmbeddedVector<LiveRange*, Register::kMaxNumAllocatableRegisters>
+      fixed_live_ranges_;
+  EmbeddedVector<LiveRange*, DoubleRegister::kMaxNumAllocatableRegisters>
+      fixed_double_live_ranges_;
+  ZoneList<LiveRange*> unhandled_live_ranges_;
+  ZoneList<LiveRange*> active_live_ranges_;
+  ZoneList<LiveRange*> inactive_live_ranges_;
+  ZoneList<LiveRange*> reusable_slots_;
+
+  RegisterKind mode_;
+  int num_registers_;
+
+  BitVector* assigned_registers_;
+  BitVector* assigned_double_registers_;
+
+  // Indicates success or failure during register allocation.
+  bool allocation_ok_;
+
+#ifdef DEBUG
+  LifetimePosition allocation_finger_;
+#endif
+
+  DISALLOW_COPY_AND_ASSIGN(RegisterAllocator);
+};
+
+
+class RegisterAllocatorPhase : public CompilationPhase {
+ public:
+  RegisterAllocatorPhase(const char* name, RegisterAllocator* allocator);
+  ~RegisterAllocatorPhase();
+
+ private:
+  RegisterAllocator* allocator_;
+  unsigned allocator_zone_start_allocation_size_;
+
+  DISALLOW_COPY_AND_ASSIGN(RegisterAllocatorPhase);
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_REGISTER_ALLOCATOR_H_
diff --git a/src/compiler/representation-change.h b/src/compiler/representation-change.h
new file mode 100644
index 0000000..aaa248e
--- /dev/null
+++ b/src/compiler/representation-change.h
@@ -0,0 +1,360 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_REPRESENTATION_CHANGE_H_
+#define V8_COMPILER_REPRESENTATION_CHANGE_H_
+
+#include "src/base/bits.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/simplified-operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Contains logic related to changing the representation of values for constants
+// and other nodes, as well as lowering Simplified->Machine operators.
+// Eagerly folds any representation changes for constants.
+class RepresentationChanger {
+ public:
+  RepresentationChanger(JSGraph* jsgraph, SimplifiedOperatorBuilder* simplified,
+                        Isolate* isolate)
+      : jsgraph_(jsgraph),
+        simplified_(simplified),
+        isolate_(isolate),
+        testing_type_errors_(false),
+        type_error_(false) {}
+
+  // TODO(titzer): should Word64 also be implicitly convertable to others?
+  static const MachineTypeUnion rWord =
+      kRepBit | kRepWord8 | kRepWord16 | kRepWord32;
+
+  Node* GetRepresentationFor(Node* node, MachineTypeUnion output_type,
+                             MachineTypeUnion use_type) {
+    if (!base::bits::IsPowerOfTwo32(output_type & kRepMask)) {
+      // There should be only one output representation.
+      return TypeError(node, output_type, use_type);
+    }
+    if ((use_type & kRepMask) == (output_type & kRepMask)) {
+      // Representations are the same. That's a no-op.
+      return node;
+    }
+    if ((use_type & rWord) && (output_type & rWord)) {
+      // Both are words less than or equal to 32-bits.
+      // Since loads of integers from memory implicitly sign or zero extend the
+      // value to the full machine word size and stores implicitly truncate,
+      // no representation change is necessary.
+      return node;
+    }
+    if (use_type & kRepTagged) {
+      return GetTaggedRepresentationFor(node, output_type);
+    } else if (use_type & kRepFloat64) {
+      return GetFloat64RepresentationFor(node, output_type);
+    } else if (use_type & kRepFloat32) {
+      return TypeError(node, output_type, use_type);  // TODO(titzer): handle
+    } else if (use_type & kRepBit) {
+      return GetBitRepresentationFor(node, output_type);
+    } else if (use_type & rWord) {
+      return GetWord32RepresentationFor(node, output_type,
+                                        use_type & kTypeUint32);
+    } else if (use_type & kRepWord64) {
+      return GetWord64RepresentationFor(node, output_type);
+    } else {
+      return node;
+    }
+  }
+
+  Node* GetTaggedRepresentationFor(Node* node, MachineTypeUnion output_type) {
+    // Eagerly fold representation changes for constants.
+    switch (node->opcode()) {
+      case IrOpcode::kNumberConstant:
+      case IrOpcode::kHeapConstant:
+        return node;  // No change necessary.
+      case IrOpcode::kInt32Constant:
+        if (output_type & kTypeUint32) {
+          uint32_t value = OpParameter<uint32_t>(node);
+          return jsgraph()->Constant(static_cast<double>(value));
+        } else if (output_type & kTypeInt32) {
+          int32_t value = OpParameter<int32_t>(node);
+          return jsgraph()->Constant(value);
+        } else if (output_type & kRepBit) {
+          return OpParameter<int32_t>(node) == 0 ? jsgraph()->FalseConstant()
+                                                 : jsgraph()->TrueConstant();
+        } else {
+          return TypeError(node, output_type, kRepTagged);
+        }
+      case IrOpcode::kFloat64Constant:
+        return jsgraph()->Constant(OpParameter<double>(node));
+      default:
+        break;
+    }
+    // Select the correct X -> Tagged operator.
+    const Operator* op;
+    if (output_type & kRepBit) {
+      op = simplified()->ChangeBitToBool();
+    } else if (output_type & rWord) {
+      if (output_type & kTypeUint32) {
+        op = simplified()->ChangeUint32ToTagged();
+      } else if (output_type & kTypeInt32) {
+        op = simplified()->ChangeInt32ToTagged();
+      } else {
+        return TypeError(node, output_type, kRepTagged);
+      }
+    } else if (output_type & kRepFloat64) {
+      op = simplified()->ChangeFloat64ToTagged();
+    } else {
+      return TypeError(node, output_type, kRepTagged);
+    }
+    return jsgraph()->graph()->NewNode(op, node);
+  }
+
+  Node* GetFloat64RepresentationFor(Node* node, MachineTypeUnion output_type) {
+    // Eagerly fold representation changes for constants.
+    switch (node->opcode()) {
+      case IrOpcode::kNumberConstant:
+        return jsgraph()->Float64Constant(OpParameter<double>(node));
+      case IrOpcode::kInt32Constant:
+        if (output_type & kTypeUint32) {
+          uint32_t value = OpParameter<uint32_t>(node);
+          return jsgraph()->Float64Constant(static_cast<double>(value));
+        } else {
+          int32_t value = OpParameter<int32_t>(node);
+          return jsgraph()->Float64Constant(value);
+        }
+      case IrOpcode::kFloat64Constant:
+        return node;  // No change necessary.
+      default:
+        break;
+    }
+    // Select the correct X -> Float64 operator.
+    const Operator* op;
+    if (output_type & kRepBit) {
+      return TypeError(node, output_type, kRepFloat64);
+    } else if (output_type & rWord) {
+      if (output_type & kTypeUint32) {
+        op = machine()->ChangeUint32ToFloat64();
+      } else {
+        op = machine()->ChangeInt32ToFloat64();
+      }
+    } else if (output_type & kRepTagged) {
+      op = simplified()->ChangeTaggedToFloat64();
+    } else {
+      return TypeError(node, output_type, kRepFloat64);
+    }
+    return jsgraph()->graph()->NewNode(op, node);
+  }
+
+  Node* GetWord32RepresentationFor(Node* node, MachineTypeUnion output_type,
+                                   bool use_unsigned) {
+    // Eagerly fold representation changes for constants.
+    switch (node->opcode()) {
+      case IrOpcode::kInt32Constant:
+        return node;  // No change necessary.
+      case IrOpcode::kNumberConstant:
+      case IrOpcode::kFloat64Constant: {
+        double value = OpParameter<double>(node);
+        if (value < 0) {
+          DCHECK(IsInt32Double(value));
+          int32_t iv = static_cast<int32_t>(value);
+          return jsgraph()->Int32Constant(iv);
+        } else {
+          DCHECK(IsUint32Double(value));
+          int32_t iv = static_cast<int32_t>(static_cast<uint32_t>(value));
+          return jsgraph()->Int32Constant(iv);
+        }
+      }
+      default:
+        break;
+    }
+    // Select the correct X -> Word32 operator.
+    const Operator* op = NULL;
+    if (output_type & kRepFloat64) {
+      if (output_type & kTypeUint32 || use_unsigned) {
+        op = machine()->ChangeFloat64ToUint32();
+      } else {
+        op = machine()->ChangeFloat64ToInt32();
+      }
+    } else if (output_type & kRepTagged) {
+      if (output_type & kTypeUint32 || use_unsigned) {
+        op = simplified()->ChangeTaggedToUint32();
+      } else {
+        op = simplified()->ChangeTaggedToInt32();
+      }
+    } else {
+      return TypeError(node, output_type, kRepWord32);
+    }
+    return jsgraph()->graph()->NewNode(op, node);
+  }
+
+  Node* GetBitRepresentationFor(Node* node, MachineTypeUnion output_type) {
+    // Eagerly fold representation changes for constants.
+    switch (node->opcode()) {
+      case IrOpcode::kInt32Constant: {
+        int32_t value = OpParameter<int32_t>(node);
+        if (value == 0 || value == 1) return node;
+        return jsgraph()->OneConstant();  // value != 0
+      }
+      case IrOpcode::kHeapConstant: {
+        Handle<Object> handle = OpParameter<Unique<Object> >(node).handle();
+        DCHECK(*handle == isolate()->heap()->true_value() ||
+               *handle == isolate()->heap()->false_value());
+        return jsgraph()->Int32Constant(
+            *handle == isolate()->heap()->true_value() ? 1 : 0);
+      }
+      default:
+        break;
+    }
+    // Select the correct X -> Bit operator.
+    const Operator* op;
+    if (output_type & rWord) {
+      return node;  // No change necessary.
+    } else if (output_type & kRepWord64) {
+      return node;  // TODO(titzer): No change necessary, on 64-bit.
+    } else if (output_type & kRepTagged) {
+      op = simplified()->ChangeBoolToBit();
+    } else {
+      return TypeError(node, output_type, kRepBit);
+    }
+    return jsgraph()->graph()->NewNode(op, node);
+  }
+
+  Node* GetWord64RepresentationFor(Node* node, MachineTypeUnion output_type) {
+    if (output_type & kRepBit) {
+      return node;  // Sloppy comparison -> word64
+    }
+    // Can't really convert Word64 to anything else. Purported to be internal.
+    return TypeError(node, output_type, kRepWord64);
+  }
+
+  const Operator* Int32OperatorFor(IrOpcode::Value opcode) {
+    switch (opcode) {
+      case IrOpcode::kNumberAdd:
+        return machine()->Int32Add();
+      case IrOpcode::kNumberSubtract:
+        return machine()->Int32Sub();
+      case IrOpcode::kNumberMultiply:
+        return machine()->Int32Mul();
+      case IrOpcode::kNumberDivide:
+        return machine()->Int32Div();
+      case IrOpcode::kNumberModulus:
+        return machine()->Int32Mod();
+      case IrOpcode::kNumberEqual:
+        return machine()->Word32Equal();
+      case IrOpcode::kNumberLessThan:
+        return machine()->Int32LessThan();
+      case IrOpcode::kNumberLessThanOrEqual:
+        return machine()->Int32LessThanOrEqual();
+      default:
+        UNREACHABLE();
+        return NULL;
+    }
+  }
+
+  const Operator* Uint32OperatorFor(IrOpcode::Value opcode) {
+    switch (opcode) {
+      case IrOpcode::kNumberAdd:
+        return machine()->Int32Add();
+      case IrOpcode::kNumberSubtract:
+        return machine()->Int32Sub();
+      case IrOpcode::kNumberMultiply:
+        return machine()->Int32Mul();
+      case IrOpcode::kNumberDivide:
+        return machine()->Int32UDiv();
+      case IrOpcode::kNumberModulus:
+        return machine()->Int32UMod();
+      case IrOpcode::kNumberEqual:
+        return machine()->Word32Equal();
+      case IrOpcode::kNumberLessThan:
+        return machine()->Uint32LessThan();
+      case IrOpcode::kNumberLessThanOrEqual:
+        return machine()->Uint32LessThanOrEqual();
+      default:
+        UNREACHABLE();
+        return NULL;
+    }
+  }
+
+  const Operator* Float64OperatorFor(IrOpcode::Value opcode) {
+    switch (opcode) {
+      case IrOpcode::kNumberAdd:
+        return machine()->Float64Add();
+      case IrOpcode::kNumberSubtract:
+        return machine()->Float64Sub();
+      case IrOpcode::kNumberMultiply:
+        return machine()->Float64Mul();
+      case IrOpcode::kNumberDivide:
+        return machine()->Float64Div();
+      case IrOpcode::kNumberModulus:
+        return machine()->Float64Mod();
+      case IrOpcode::kNumberEqual:
+        return machine()->Float64Equal();
+      case IrOpcode::kNumberLessThan:
+        return machine()->Float64LessThan();
+      case IrOpcode::kNumberLessThanOrEqual:
+        return machine()->Float64LessThanOrEqual();
+      default:
+        UNREACHABLE();
+        return NULL;
+    }
+  }
+
+  MachineType TypeForBasePointer(const FieldAccess& access) {
+    return access.tag() != 0 ? kMachAnyTagged : kMachPtr;
+  }
+
+  MachineType TypeForBasePointer(const ElementAccess& access) {
+    return access.tag() != 0 ? kMachAnyTagged : kMachPtr;
+  }
+
+  MachineType TypeFromUpperBound(Type* type) {
+    if (type->Is(Type::None()))
+      return kTypeAny;  // TODO(titzer): should be an error
+    if (type->Is(Type::Signed32())) return kTypeInt32;
+    if (type->Is(Type::Unsigned32())) return kTypeUint32;
+    if (type->Is(Type::Number())) return kTypeNumber;
+    if (type->Is(Type::Boolean())) return kTypeBool;
+    return kTypeAny;
+  }
+
+ private:
+  JSGraph* jsgraph_;
+  SimplifiedOperatorBuilder* simplified_;
+  Isolate* isolate_;
+
+  friend class RepresentationChangerTester;  // accesses the below fields.
+
+  bool testing_type_errors_;  // If {true}, don't abort on a type error.
+  bool type_error_;           // Set when a type error is detected.
+
+  Node* TypeError(Node* node, MachineTypeUnion output_type,
+                  MachineTypeUnion use) {
+    type_error_ = true;
+    if (!testing_type_errors_) {
+      OStringStream out_str;
+      out_str << static_cast<MachineType>(output_type);
+
+      OStringStream use_str;
+      use_str << static_cast<MachineType>(use);
+
+      V8_Fatal(__FILE__, __LINE__,
+               "RepresentationChangerError: node #%d:%s of "
+               "%s cannot be changed to %s",
+               node->id(), node->op()->mnemonic(), out_str.c_str(),
+               use_str.c_str());
+    }
+    return node;
+  }
+
+  JSGraph* jsgraph() { return jsgraph_; }
+  Isolate* isolate() { return isolate_; }
+  SimplifiedOperatorBuilder* simplified() { return simplified_; }
+  MachineOperatorBuilder* machine() { return jsgraph()->machine(); }
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_REPRESENTATION_CHANGE_H_
diff --git a/src/compiler/schedule.cc b/src/compiler/schedule.cc
new file mode 100644
index 0000000..a3b5ed3
--- /dev/null
+++ b/src/compiler/schedule.cc
@@ -0,0 +1,88 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/node.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/schedule.h"
+#include "src/ostreams.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+OStream& operator<<(OStream& os, const BasicBlockData::Control& c) {
+  switch (c) {
+    case BasicBlockData::kNone:
+      return os << "none";
+    case BasicBlockData::kGoto:
+      return os << "goto";
+    case BasicBlockData::kBranch:
+      return os << "branch";
+    case BasicBlockData::kReturn:
+      return os << "return";
+    case BasicBlockData::kThrow:
+      return os << "throw";
+  }
+  UNREACHABLE();
+  return os;
+}
+
+
+OStream& operator<<(OStream& os, const Schedule& s) {
+  // TODO(svenpanne) Const-correct the RPO stuff/iterators.
+  BasicBlockVector* rpo = const_cast<Schedule*>(&s)->rpo_order();
+  for (BasicBlockVectorIter i = rpo->begin(); i != rpo->end(); ++i) {
+    BasicBlock* block = *i;
+    os << "--- BLOCK B" << block->id();
+    if (block->PredecessorCount() != 0) os << " <- ";
+    BasicBlock::Predecessors predecessors = block->predecessors();
+    bool comma = false;
+    for (BasicBlock::Predecessors::iterator j = predecessors.begin();
+         j != predecessors.end(); ++j) {
+      if (comma) os << ", ";
+      comma = true;
+      os << "B" << (*j)->id();
+    }
+    os << " ---\n";
+    for (BasicBlock::const_iterator j = block->begin(); j != block->end();
+         ++j) {
+      Node* node = *j;
+      os << "  " << *node;
+      if (!NodeProperties::IsControl(node)) {
+        Bounds bounds = NodeProperties::GetBounds(node);
+        os << " : ";
+        bounds.lower->PrintTo(os);
+        if (!bounds.upper->Is(bounds.lower)) {
+          os << "..";
+          bounds.upper->PrintTo(os);
+        }
+      }
+      os << "\n";
+    }
+    BasicBlock::Control control = block->control_;
+    if (control != BasicBlock::kNone) {
+      os << "  ";
+      if (block->control_input_ != NULL) {
+        os << *block->control_input_;
+      } else {
+        os << "Goto";
+      }
+      os << " -> ";
+      BasicBlock::Successors successors = block->successors();
+      comma = false;
+      for (BasicBlock::Successors::iterator j = successors.begin();
+           j != successors.end(); ++j) {
+        if (comma) os << ", ";
+        comma = true;
+        os << "B" << (*j)->id();
+      }
+      os << "\n";
+    }
+  }
+  return os;
+}
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/schedule.h b/src/compiler/schedule.h
new file mode 100644
index 0000000..070691e
--- /dev/null
+++ b/src/compiler/schedule.h
@@ -0,0 +1,306 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_SCHEDULE_H_
+#define V8_COMPILER_SCHEDULE_H_
+
+#include <vector>
+
+#include "src/v8.h"
+
+#include "src/compiler/generic-algorithm.h"
+#include "src/compiler/generic-graph.h"
+#include "src/compiler/generic-node.h"
+#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/node.h"
+#include "src/compiler/opcodes.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class BasicBlock;
+class Graph;
+class ConstructScheduleData;
+class CodeGenerator;  // Because of a namespace bug in clang.
+
+class BasicBlockData {
+ public:
+  // Possible control nodes that can end a block.
+  enum Control {
+    kNone,    // Control not initialized yet.
+    kGoto,    // Goto a single successor block.
+    kBranch,  // Branch if true to first successor, otherwise second.
+    kReturn,  // Return a value from this method.
+    kThrow    // Throw an exception.
+  };
+
+  int32_t rpo_number_;       // special RPO number of the block.
+  BasicBlock* dominator_;    // Immediate dominator of the block.
+  BasicBlock* loop_header_;  // Pointer to dominating loop header basic block,
+                             // NULL if none. For loop headers, this points to
+                             // enclosing loop header.
+  int32_t loop_depth_;       // loop nesting, 0 is top-level
+  int32_t loop_end_;         // end of the loop, if this block is a loop header.
+  int32_t code_start_;       // start index of arch-specific code.
+  int32_t code_end_;         // end index of arch-specific code.
+  bool deferred_;            // {true} if this block is considered the slow
+                             // path.
+  Control control_;          // Control at the end of the block.
+  Node* control_input_;      // Input value for control.
+  NodeVector nodes_;         // nodes of this block in forward order.
+
+  explicit BasicBlockData(Zone* zone)
+      : rpo_number_(-1),
+        dominator_(NULL),
+        loop_header_(NULL),
+        loop_depth_(0),
+        loop_end_(-1),
+        code_start_(-1),
+        code_end_(-1),
+        deferred_(false),
+        control_(kNone),
+        control_input_(NULL),
+        nodes_(zone) {}
+
+  inline bool IsLoopHeader() const { return loop_end_ >= 0; }
+  inline bool LoopContains(BasicBlockData* block) const {
+    // RPO numbers must be initialized.
+    DCHECK(rpo_number_ >= 0);
+    DCHECK(block->rpo_number_ >= 0);
+    if (loop_end_ < 0) return false;  // This is not a loop.
+    return block->rpo_number_ >= rpo_number_ && block->rpo_number_ < loop_end_;
+  }
+  int first_instruction_index() {
+    DCHECK(code_start_ >= 0);
+    DCHECK(code_end_ > 0);
+    DCHECK(code_end_ >= code_start_);
+    return code_start_;
+  }
+  int last_instruction_index() {
+    DCHECK(code_start_ >= 0);
+    DCHECK(code_end_ > 0);
+    DCHECK(code_end_ >= code_start_);
+    return code_end_ - 1;
+  }
+};
+
+OStream& operator<<(OStream& os, const BasicBlockData::Control& c);
+
+// A basic block contains an ordered list of nodes and ends with a control
+// node. Note that if a basic block has phis, then all phis must appear as the
+// first nodes in the block.
+class BasicBlock FINAL : public GenericNode<BasicBlockData, BasicBlock> {
+ public:
+  BasicBlock(GenericGraphBase* graph, int input_count)
+      : GenericNode<BasicBlockData, BasicBlock>(graph, input_count) {}
+
+  typedef Uses Successors;
+  typedef Inputs Predecessors;
+
+  Successors successors() { return static_cast<Successors>(uses()); }
+  Predecessors predecessors() { return static_cast<Predecessors>(inputs()); }
+
+  int PredecessorCount() { return InputCount(); }
+  BasicBlock* PredecessorAt(int index) { return InputAt(index); }
+
+  int SuccessorCount() { return UseCount(); }
+  BasicBlock* SuccessorAt(int index) { return UseAt(index); }
+
+  int PredecessorIndexOf(BasicBlock* predecessor) {
+    BasicBlock::Predecessors predecessors = this->predecessors();
+    for (BasicBlock::Predecessors::iterator i = predecessors.begin();
+         i != predecessors.end(); ++i) {
+      if (*i == predecessor) return i.index();
+    }
+    return -1;
+  }
+
+  inline BasicBlock* loop_header() {
+    return static_cast<BasicBlock*>(loop_header_);
+  }
+  inline BasicBlock* ContainingLoop() {
+    if (IsLoopHeader()) return this;
+    return static_cast<BasicBlock*>(loop_header_);
+  }
+
+  typedef NodeVector::iterator iterator;
+  iterator begin() { return nodes_.begin(); }
+  iterator end() { return nodes_.end(); }
+
+  typedef NodeVector::const_iterator const_iterator;
+  const_iterator begin() const { return nodes_.begin(); }
+  const_iterator end() const { return nodes_.end(); }
+
+  typedef NodeVector::reverse_iterator reverse_iterator;
+  reverse_iterator rbegin() { return nodes_.rbegin(); }
+  reverse_iterator rend() { return nodes_.rend(); }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(BasicBlock);
+};
+
+typedef GenericGraphVisit::NullNodeVisitor<BasicBlockData, BasicBlock>
+    NullBasicBlockVisitor;
+
+typedef ZoneVector<BasicBlock*> BasicBlockVector;
+typedef BasicBlockVector::iterator BasicBlockVectorIter;
+typedef BasicBlockVector::reverse_iterator BasicBlockVectorRIter;
+
+// A schedule represents the result of assigning nodes to basic blocks
+// and ordering them within basic blocks. Prior to computing a schedule,
+// a graph has no notion of control flow ordering other than that induced
+// by the graph's dependencies. A schedule is required to generate code.
+class Schedule : public GenericGraph<BasicBlock> {
+ public:
+  explicit Schedule(Zone* zone)
+      : GenericGraph<BasicBlock>(zone),
+        zone_(zone),
+        all_blocks_(zone),
+        nodeid_to_block_(zone),
+        rpo_order_(zone) {
+    SetStart(NewBasicBlock());  // entry.
+    SetEnd(NewBasicBlock());    // exit.
+  }
+
+  // Return the block which contains {node}, if any.
+  BasicBlock* block(Node* node) const {
+    if (node->id() < static_cast<NodeId>(nodeid_to_block_.size())) {
+      return nodeid_to_block_[node->id()];
+    }
+    return NULL;
+  }
+
+  bool IsScheduled(Node* node) {
+    int length = static_cast<int>(nodeid_to_block_.size());
+    if (node->id() >= length) return false;
+    return nodeid_to_block_[node->id()] != NULL;
+  }
+
+  BasicBlock* GetBlockById(int block_id) { return all_blocks_[block_id]; }
+
+  int BasicBlockCount() const { return NodeCount(); }
+  int RpoBlockCount() const { return static_cast<int>(rpo_order_.size()); }
+
+  typedef ContainerPointerWrapper<BasicBlockVector> BasicBlocks;
+
+  // Return a list of all the blocks in the schedule, in arbitrary order.
+  BasicBlocks all_blocks() { return BasicBlocks(&all_blocks_); }
+
+  // Check if nodes {a} and {b} are in the same block.
+  inline bool SameBasicBlock(Node* a, Node* b) const {
+    BasicBlock* block = this->block(a);
+    return block != NULL && block == this->block(b);
+  }
+
+  // BasicBlock building: create a new block.
+  inline BasicBlock* NewBasicBlock() {
+    BasicBlock* block =
+        BasicBlock::New(this, 0, static_cast<BasicBlock**>(NULL));
+    all_blocks_.push_back(block);
+    return block;
+  }
+
+  // BasicBlock building: records that a node will later be added to a block but
+  // doesn't actually add the node to the block.
+  inline void PlanNode(BasicBlock* block, Node* node) {
+    if (FLAG_trace_turbo_scheduler) {
+      PrintF("Planning #%d:%s for future add to B%d\n", node->id(),
+             node->op()->mnemonic(), block->id());
+    }
+    DCHECK(this->block(node) == NULL);
+    SetBlockForNode(block, node);
+  }
+
+  // BasicBlock building: add a node to the end of the block.
+  inline void AddNode(BasicBlock* block, Node* node) {
+    if (FLAG_trace_turbo_scheduler) {
+      PrintF("Adding #%d:%s to B%d\n", node->id(), node->op()->mnemonic(),
+             block->id());
+    }
+    DCHECK(this->block(node) == NULL || this->block(node) == block);
+    block->nodes_.push_back(node);
+    SetBlockForNode(block, node);
+  }
+
+  // BasicBlock building: add a goto to the end of {block}.
+  void AddGoto(BasicBlock* block, BasicBlock* succ) {
+    DCHECK(block->control_ == BasicBlock::kNone);
+    block->control_ = BasicBlock::kGoto;
+    AddSuccessor(block, succ);
+  }
+
+  // BasicBlock building: add a branch at the end of {block}.
+  void AddBranch(BasicBlock* block, Node* branch, BasicBlock* tblock,
+                 BasicBlock* fblock) {
+    DCHECK(block->control_ == BasicBlock::kNone);
+    DCHECK(branch->opcode() == IrOpcode::kBranch);
+    block->control_ = BasicBlock::kBranch;
+    AddSuccessor(block, tblock);
+    AddSuccessor(block, fblock);
+    SetControlInput(block, branch);
+    if (branch->opcode() == IrOpcode::kBranch) {
+      // TODO(titzer): require a Branch node here. (sloppy tests).
+      SetBlockForNode(block, branch);
+    }
+  }
+
+  // BasicBlock building: add a return at the end of {block}.
+  void AddReturn(BasicBlock* block, Node* input) {
+    DCHECK(block->control_ == BasicBlock::kNone);
+    block->control_ = BasicBlock::kReturn;
+    SetControlInput(block, input);
+    if (block != end()) AddSuccessor(block, end());
+    if (input->opcode() == IrOpcode::kReturn) {
+      // TODO(titzer): require a Return node here. (sloppy tests).
+      SetBlockForNode(block, input);
+    }
+  }
+
+  // BasicBlock building: add a throw at the end of {block}.
+  void AddThrow(BasicBlock* block, Node* input) {
+    DCHECK(block->control_ == BasicBlock::kNone);
+    block->control_ = BasicBlock::kThrow;
+    SetControlInput(block, input);
+    if (block != end()) AddSuccessor(block, end());
+  }
+
+  friend class Scheduler;
+  friend class CodeGenerator;
+
+  void AddSuccessor(BasicBlock* block, BasicBlock* succ) {
+    succ->AppendInput(zone_, block);
+  }
+
+  BasicBlockVector* rpo_order() { return &rpo_order_; }
+
+ private:
+  friend class ScheduleVisualizer;
+
+  void SetControlInput(BasicBlock* block, Node* node) {
+    block->control_input_ = node;
+    SetBlockForNode(block, node);
+  }
+
+  void SetBlockForNode(BasicBlock* block, Node* node) {
+    int length = static_cast<int>(nodeid_to_block_.size());
+    if (node->id() >= length) {
+      nodeid_to_block_.resize(node->id() + 1);
+    }
+    nodeid_to_block_[node->id()] = block;
+  }
+
+  Zone* zone_;
+  BasicBlockVector all_blocks_;           // All basic blocks in the schedule.
+  BasicBlockVector nodeid_to_block_;      // Map from node to containing block.
+  BasicBlockVector rpo_order_;            // Reverse-post-order block list.
+};
+
+OStream& operator<<(OStream& os, const Schedule& s);
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_SCHEDULE_H_
diff --git a/src/compiler/scheduler.cc b/src/compiler/scheduler.cc
new file mode 100644
index 0000000..4029950
--- /dev/null
+++ b/src/compiler/scheduler.cc
@@ -0,0 +1,1125 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <deque>
+#include <queue>
+
+#include "src/compiler/scheduler.h"
+
+#include "src/compiler/graph.h"
+#include "src/compiler/graph-inl.h"
+#include "src/compiler/node.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/data-flow.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+static inline void Trace(const char* msg, ...) {
+  if (FLAG_trace_turbo_scheduler) {
+    va_list arguments;
+    va_start(arguments, msg);
+    base::OS::VPrint(msg, arguments);
+    va_end(arguments);
+  }
+}
+
+
+// Internal class to build a control flow graph (i.e the basic blocks and edges
+// between them within a Schedule) from the node graph.
+// Visits the control edges of the graph backwards from end in order to find
+// the connected control subgraph, needed for scheduling.
+class CFGBuilder {
+ public:
+  Scheduler* scheduler_;
+  Schedule* schedule_;
+  ZoneQueue<Node*> queue_;
+  NodeVector control_;
+
+  CFGBuilder(Zone* zone, Scheduler* scheduler)
+      : scheduler_(scheduler),
+        schedule_(scheduler->schedule_),
+        queue_(zone),
+        control_(zone) {}
+
+  // Run the control flow graph construction algorithm by walking the graph
+  // backwards from end through control edges, building and connecting the
+  // basic blocks for control nodes.
+  void Run() {
+    Graph* graph = scheduler_->graph_;
+    FixNode(schedule_->start(), graph->start());
+    Queue(graph->end());
+
+    while (!queue_.empty()) {  // Breadth-first backwards traversal.
+      Node* node = queue_.front();
+      queue_.pop();
+      int max = NodeProperties::PastControlIndex(node);
+      for (int i = NodeProperties::FirstControlIndex(node); i < max; i++) {
+        Queue(node->InputAt(i));
+      }
+    }
+
+    for (NodeVector::iterator i = control_.begin(); i != control_.end(); ++i) {
+      ConnectBlocks(*i);  // Connect block to its predecessor/successors.
+    }
+
+    FixNode(schedule_->end(), graph->end());
+  }
+
+  void FixNode(BasicBlock* block, Node* node) {
+    schedule_->AddNode(block, node);
+    scheduler_->GetData(node)->is_connected_control_ = true;
+    scheduler_->GetData(node)->placement_ = Scheduler::kFixed;
+  }
+
+  void Queue(Node* node) {
+    // Mark the connected control nodes as they queued.
+    Scheduler::SchedulerData* data = scheduler_->GetData(node);
+    if (!data->is_connected_control_) {
+      BuildBlocks(node);
+      queue_.push(node);
+      control_.push_back(node);
+      data->is_connected_control_ = true;
+    }
+  }
+
+  void BuildBlocks(Node* node) {
+    switch (node->opcode()) {
+      case IrOpcode::kLoop:
+      case IrOpcode::kMerge:
+        BuildBlockForNode(node);
+        break;
+      case IrOpcode::kBranch:
+        BuildBlocksForSuccessors(node, IrOpcode::kIfTrue, IrOpcode::kIfFalse);
+        break;
+      default:
+        break;
+    }
+  }
+
+  void ConnectBlocks(Node* node) {
+    switch (node->opcode()) {
+      case IrOpcode::kLoop:
+      case IrOpcode::kMerge:
+        ConnectMerge(node);
+        break;
+      case IrOpcode::kBranch:
+        scheduler_->schedule_root_nodes_.push_back(node);
+        ConnectBranch(node);
+        break;
+      case IrOpcode::kReturn:
+        scheduler_->schedule_root_nodes_.push_back(node);
+        ConnectReturn(node);
+        break;
+      default:
+        break;
+    }
+  }
+
+  void BuildBlockForNode(Node* node) {
+    if (schedule_->block(node) == NULL) {
+      BasicBlock* block = schedule_->NewBasicBlock();
+      Trace("Create block B%d for #%d:%s\n", block->id(), node->id(),
+            node->op()->mnemonic());
+      FixNode(block, node);
+    }
+  }
+
+  void BuildBlocksForSuccessors(Node* node, IrOpcode::Value a,
+                                IrOpcode::Value b) {
+    Node* successors[2];
+    CollectSuccessorProjections(node, successors, a, b);
+    BuildBlockForNode(successors[0]);
+    BuildBlockForNode(successors[1]);
+  }
+
+  // Collect the branch-related projections from a node, such as IfTrue,
+  // IfFalse.
+  // TODO(titzer): consider moving this to node.h
+  void CollectSuccessorProjections(Node* node, Node** buffer,
+                                   IrOpcode::Value true_opcode,
+                                   IrOpcode::Value false_opcode) {
+    buffer[0] = NULL;
+    buffer[1] = NULL;
+    for (UseIter i = node->uses().begin(); i != node->uses().end(); ++i) {
+      if ((*i)->opcode() == true_opcode) {
+        DCHECK_EQ(NULL, buffer[0]);
+        buffer[0] = *i;
+      }
+      if ((*i)->opcode() == false_opcode) {
+        DCHECK_EQ(NULL, buffer[1]);
+        buffer[1] = *i;
+      }
+    }
+    DCHECK_NE(NULL, buffer[0]);
+    DCHECK_NE(NULL, buffer[1]);
+  }
+
+  void CollectSuccessorBlocks(Node* node, BasicBlock** buffer,
+                              IrOpcode::Value true_opcode,
+                              IrOpcode::Value false_opcode) {
+    Node* successors[2];
+    CollectSuccessorProjections(node, successors, true_opcode, false_opcode);
+    buffer[0] = schedule_->block(successors[0]);
+    buffer[1] = schedule_->block(successors[1]);
+  }
+
+  void ConnectBranch(Node* branch) {
+    Node* branch_block_node = NodeProperties::GetControlInput(branch);
+    BasicBlock* branch_block = schedule_->block(branch_block_node);
+    DCHECK(branch_block != NULL);
+
+    BasicBlock* successor_blocks[2];
+    CollectSuccessorBlocks(branch, successor_blocks, IrOpcode::kIfTrue,
+                           IrOpcode::kIfFalse);
+
+    TraceConnect(branch, branch_block, successor_blocks[0]);
+    TraceConnect(branch, branch_block, successor_blocks[1]);
+
+    schedule_->AddBranch(branch_block, branch, successor_blocks[0],
+                         successor_blocks[1]);
+  }
+
+  void ConnectMerge(Node* merge) {
+    BasicBlock* block = schedule_->block(merge);
+    DCHECK(block != NULL);
+    // For all of the merge's control inputs, add a goto at the end to the
+    // merge's basic block.
+    for (InputIter j = merge->inputs().begin(); j != merge->inputs().end();
+         ++j) {
+      BasicBlock* predecessor_block = schedule_->block(*j);
+      if ((*j)->opcode() != IrOpcode::kReturn) {
+        TraceConnect(merge, predecessor_block, block);
+        schedule_->AddGoto(predecessor_block, block);
+      }
+    }
+  }
+
+  void ConnectReturn(Node* ret) {
+    Node* return_block_node = NodeProperties::GetControlInput(ret);
+    BasicBlock* return_block = schedule_->block(return_block_node);
+    TraceConnect(ret, return_block, NULL);
+    schedule_->AddReturn(return_block, ret);
+  }
+
+  void TraceConnect(Node* node, BasicBlock* block, BasicBlock* succ) {
+    DCHECK_NE(NULL, block);
+    if (succ == NULL) {
+      Trace("Connect #%d:%s, B%d -> end\n", node->id(), node->op()->mnemonic(),
+            block->id());
+    } else {
+      Trace("Connect #%d:%s, B%d -> B%d\n", node->id(), node->op()->mnemonic(),
+            block->id(), succ->id());
+    }
+  }
+};
+
+
+Scheduler::SchedulerData Scheduler::DefaultSchedulerData() {
+  SchedulerData def = {0, 0, false, false, kUnknown};
+  return def;
+}
+
+
+Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule)
+    : zone_(zone),
+      graph_(graph),
+      schedule_(schedule),
+      scheduled_nodes_(zone),
+      schedule_root_nodes_(zone),
+      node_data_(graph_->NodeCount(), DefaultSchedulerData(), zone),
+      has_floating_control_(false) {}
+
+
+Schedule* Scheduler::ComputeSchedule(Graph* graph) {
+  Schedule* schedule;
+  bool had_floating_control = false;
+  do {
+    Zone tmp_zone(graph->zone()->isolate());
+    schedule = new (graph->zone()) Schedule(graph->zone());
+    Scheduler scheduler(&tmp_zone, graph, schedule);
+
+    scheduler.BuildCFG();
+
+    Scheduler::ComputeSpecialRPO(schedule);
+    scheduler.GenerateImmediateDominatorTree();
+
+    scheduler.PrepareUses();
+    scheduler.ScheduleEarly();
+    scheduler.ScheduleLate();
+
+    had_floating_control = scheduler.ConnectFloatingControl();
+  } while (had_floating_control);
+
+  return schedule;
+}
+
+
+Scheduler::Placement Scheduler::GetPlacement(Node* node) {
+  SchedulerData* data = GetData(node);
+  if (data->placement_ == kUnknown) {  // Compute placement, once, on demand.
+    switch (node->opcode()) {
+      case IrOpcode::kParameter:
+        // Parameters are always fixed to the start node.
+        data->placement_ = kFixed;
+        break;
+      case IrOpcode::kPhi:
+      case IrOpcode::kEffectPhi: {
+        // Phis and effect phis are fixed if their control inputs are.
+        data->placement_ = GetPlacement(NodeProperties::GetControlInput(node));
+        break;
+      }
+#define DEFINE_FLOATING_CONTROL_CASE(V) case IrOpcode::k##V:
+        CONTROL_OP_LIST(DEFINE_FLOATING_CONTROL_CASE)
+#undef DEFINE_FLOATING_CONTROL_CASE
+        {
+          // Control nodes that were not control-reachable from end may float.
+          data->placement_ = kSchedulable;
+          if (!data->is_connected_control_) {
+            data->is_floating_control_ = true;
+            has_floating_control_ = true;
+            Trace("Floating control found: #%d:%s\n", node->id(),
+                  node->op()->mnemonic());
+          }
+          break;
+        }
+      default:
+        data->placement_ = kSchedulable;
+        break;
+    }
+  }
+  return data->placement_;
+}
+
+
+void Scheduler::BuildCFG() {
+  Trace("---------------- CREATING CFG ------------------\n");
+  CFGBuilder cfg_builder(zone_, this);
+  cfg_builder.Run();
+  // Initialize per-block data.
+  scheduled_nodes_.resize(schedule_->BasicBlockCount(), NodeVector(zone_));
+}
+
+
+BasicBlock* Scheduler::GetCommonDominator(BasicBlock* b1, BasicBlock* b2) {
+  while (b1 != b2) {
+    int b1_rpo = GetRPONumber(b1);
+    int b2_rpo = GetRPONumber(b2);
+    DCHECK(b1_rpo != b2_rpo);
+    if (b1_rpo < b2_rpo) {
+      b2 = b2->dominator_;
+    } else {
+      b1 = b1->dominator_;
+    }
+  }
+  return b1;
+}
+
+
+void Scheduler::GenerateImmediateDominatorTree() {
+  // Build the dominator graph.  TODO(danno): consider using Lengauer & Tarjan's
+  // if this becomes really slow.
+  Trace("------------ IMMEDIATE BLOCK DOMINATORS -----------\n");
+  for (size_t i = 0; i < schedule_->rpo_order_.size(); i++) {
+    BasicBlock* current_rpo = schedule_->rpo_order_[i];
+    if (current_rpo != schedule_->start()) {
+      BasicBlock::Predecessors::iterator current_pred =
+          current_rpo->predecessors().begin();
+      BasicBlock::Predecessors::iterator end =
+          current_rpo->predecessors().end();
+      DCHECK(current_pred != end);
+      BasicBlock* dominator = *current_pred;
+      ++current_pred;
+      // For multiple predecessors, walk up the rpo ordering until a common
+      // dominator is found.
+      int current_rpo_pos = GetRPONumber(current_rpo);
+      while (current_pred != end) {
+        // Don't examine backwards edges
+        BasicBlock* pred = *current_pred;
+        if (GetRPONumber(pred) < current_rpo_pos) {
+          dominator = GetCommonDominator(dominator, *current_pred);
+        }
+        ++current_pred;
+      }
+      current_rpo->dominator_ = dominator;
+      Trace("Block %d's idom is %d\n", current_rpo->id(), dominator->id());
+    }
+  }
+}
+
+
+class ScheduleEarlyNodeVisitor : public NullNodeVisitor {
+ public:
+  explicit ScheduleEarlyNodeVisitor(Scheduler* scheduler)
+      : has_changed_rpo_constraints_(true),
+        scheduler_(scheduler),
+        schedule_(scheduler->schedule_) {}
+
+  GenericGraphVisit::Control Pre(Node* node) {
+    int max_rpo = 0;
+    // Fixed nodes already know their schedule early position.
+    if (scheduler_->GetPlacement(node) == Scheduler::kFixed) {
+      BasicBlock* block = schedule_->block(node);
+      DCHECK(block != NULL);
+      max_rpo = block->rpo_number_;
+      if (scheduler_->GetData(node)->minimum_rpo_ != max_rpo) {
+        has_changed_rpo_constraints_ = true;
+      }
+      scheduler_->GetData(node)->minimum_rpo_ = max_rpo;
+      Trace("Preschedule #%d:%s minimum_rpo = %d\n", node->id(),
+            node->op()->mnemonic(), max_rpo);
+    }
+    return GenericGraphVisit::CONTINUE;
+  }
+
+  GenericGraphVisit::Control Post(Node* node) {
+    int max_rpo = 0;
+    // Otherwise, the minimum rpo for the node is the max of all of the inputs.
+    if (scheduler_->GetPlacement(node) != Scheduler::kFixed) {
+      for (InputIter i = node->inputs().begin(); i != node->inputs().end();
+           ++i) {
+        int control_rpo = scheduler_->GetData(*i)->minimum_rpo_;
+        if (control_rpo > max_rpo) {
+          max_rpo = control_rpo;
+        }
+      }
+      if (scheduler_->GetData(node)->minimum_rpo_ != max_rpo) {
+        has_changed_rpo_constraints_ = true;
+      }
+      scheduler_->GetData(node)->minimum_rpo_ = max_rpo;
+      Trace("Postschedule #%d:%s minimum_rpo = %d\n", node->id(),
+            node->op()->mnemonic(), max_rpo);
+    }
+    return GenericGraphVisit::CONTINUE;
+  }
+
+  // TODO(mstarzinger): Dirty hack to unblock others, schedule early should be
+  // rewritten to use a pre-order traversal from the start instead.
+  bool has_changed_rpo_constraints_;
+
+ private:
+  Scheduler* scheduler_;
+  Schedule* schedule_;
+};
+
+
+void Scheduler::ScheduleEarly() {
+  Trace("------------------- SCHEDULE EARLY ----------------\n");
+
+  int fixpoint_count = 0;
+  ScheduleEarlyNodeVisitor visitor(this);
+  while (visitor.has_changed_rpo_constraints_) {
+    visitor.has_changed_rpo_constraints_ = false;
+    graph_->VisitNodeInputsFromEnd(&visitor);
+    fixpoint_count++;
+  }
+
+  Trace("It took %d iterations to determine fixpoint\n", fixpoint_count);
+}
+
+
+class PrepareUsesVisitor : public NullNodeVisitor {
+ public:
+  explicit PrepareUsesVisitor(Scheduler* scheduler)
+      : scheduler_(scheduler), schedule_(scheduler->schedule_) {}
+
+  GenericGraphVisit::Control Pre(Node* node) {
+    if (scheduler_->GetPlacement(node) == Scheduler::kFixed) {
+      // Fixed nodes are always roots for schedule late.
+      scheduler_->schedule_root_nodes_.push_back(node);
+      if (!schedule_->IsScheduled(node)) {
+        // Make sure root nodes are scheduled in their respective blocks.
+        Trace("  Scheduling fixed position node #%d:%s\n", node->id(),
+              node->op()->mnemonic());
+        IrOpcode::Value opcode = node->opcode();
+        BasicBlock* block =
+            opcode == IrOpcode::kParameter
+                ? schedule_->start()
+                : schedule_->block(NodeProperties::GetControlInput(node));
+        DCHECK(block != NULL);
+        schedule_->AddNode(block, node);
+      }
+    }
+
+    return GenericGraphVisit::CONTINUE;
+  }
+
+  void PostEdge(Node* from, int index, Node* to) {
+    // If the edge is from an unscheduled node, then tally it in the use count
+    // for all of its inputs. The same criterion will be used in ScheduleLate
+    // for decrementing use counts.
+    if (!schedule_->IsScheduled(from)) {
+      DCHECK_NE(Scheduler::kFixed, scheduler_->GetPlacement(from));
+      ++(scheduler_->GetData(to)->unscheduled_count_);
+      Trace("  Use count of #%d:%s (used by #%d:%s)++ = %d\n", to->id(),
+            to->op()->mnemonic(), from->id(), from->op()->mnemonic(),
+            scheduler_->GetData(to)->unscheduled_count_);
+    }
+  }
+
+ private:
+  Scheduler* scheduler_;
+  Schedule* schedule_;
+};
+
+
+void Scheduler::PrepareUses() {
+  Trace("------------------- PREPARE USES ------------------\n");
+  // Count the uses of every node, it will be used to ensure that all of a
+  // node's uses are scheduled before the node itself.
+  PrepareUsesVisitor prepare_uses(this);
+  graph_->VisitNodeInputsFromEnd(&prepare_uses);
+}
+
+
+class ScheduleLateNodeVisitor : public NullNodeVisitor {
+ public:
+  explicit ScheduleLateNodeVisitor(Scheduler* scheduler)
+      : scheduler_(scheduler), schedule_(scheduler_->schedule_) {}
+
+  GenericGraphVisit::Control Pre(Node* node) {
+    // Don't schedule nodes that are already scheduled.
+    if (schedule_->IsScheduled(node)) {
+      return GenericGraphVisit::CONTINUE;
+    }
+    Scheduler::SchedulerData* data = scheduler_->GetData(node);
+    DCHECK_EQ(Scheduler::kSchedulable, data->placement_);
+
+    // If all the uses of a node have been scheduled, then the node itself can
+    // be scheduled.
+    bool eligible = data->unscheduled_count_ == 0;
+    Trace("Testing for schedule eligibility for #%d:%s = %s\n", node->id(),
+          node->op()->mnemonic(), eligible ? "true" : "false");
+    if (!eligible) return GenericGraphVisit::DEFER;
+
+    // Determine the dominating block for all of the uses of this node. It is
+    // the latest block that this node can be scheduled in.
+    BasicBlock* block = NULL;
+    for (Node::Uses::iterator i = node->uses().begin(); i != node->uses().end();
+         ++i) {
+      BasicBlock* use_block = GetBlockForUse(i.edge());
+      block = block == NULL ? use_block : use_block == NULL
+                                              ? block
+                                              : scheduler_->GetCommonDominator(
+                                                    block, use_block);
+    }
+    DCHECK(block != NULL);
+
+    int min_rpo = data->minimum_rpo_;
+    Trace(
+        "Schedule late conservative for #%d:%s is B%d at loop depth %d, "
+        "minimum_rpo = %d\n",
+        node->id(), node->op()->mnemonic(), block->id(), block->loop_depth_,
+        min_rpo);
+    // Hoist nodes out of loops if possible. Nodes can be hoisted iteratively
+    // into enclosing loop pre-headers until they would preceed their
+    // ScheduleEarly position.
+    BasicBlock* hoist_block = block;
+    while (hoist_block != NULL && hoist_block->rpo_number_ >= min_rpo) {
+      if (hoist_block->loop_depth_ < block->loop_depth_) {
+        block = hoist_block;
+        Trace("  hoisting #%d:%s to block %d\n", node->id(),
+              node->op()->mnemonic(), block->id());
+      }
+      // Try to hoist to the pre-header of the loop header.
+      hoist_block = hoist_block->loop_header();
+      if (hoist_block != NULL) {
+        BasicBlock* pre_header = hoist_block->dominator_;
+        DCHECK(pre_header == NULL ||
+               *hoist_block->predecessors().begin() == pre_header);
+        Trace(
+            "  hoist to pre-header B%d of loop header B%d, depth would be %d\n",
+            pre_header->id(), hoist_block->id(), pre_header->loop_depth_);
+        hoist_block = pre_header;
+      }
+    }
+
+    ScheduleNode(block, node);
+
+    return GenericGraphVisit::CONTINUE;
+  }
+
+ private:
+  BasicBlock* GetBlockForUse(Node::Edge edge) {
+    Node* use = edge.from();
+    IrOpcode::Value opcode = use->opcode();
+    if (opcode == IrOpcode::kPhi || opcode == IrOpcode::kEffectPhi) {
+      // If the use is from a fixed (i.e. non-floating) phi, use the block
+      // of the corresponding control input to the merge.
+      int index = edge.index();
+      if (scheduler_->GetPlacement(use) == Scheduler::kFixed) {
+        Trace("  input@%d into a fixed phi #%d:%s\n", index, use->id(),
+              use->op()->mnemonic());
+        Node* merge = NodeProperties::GetControlInput(use, 0);
+        opcode = merge->opcode();
+        DCHECK(opcode == IrOpcode::kMerge || opcode == IrOpcode::kLoop);
+        use = NodeProperties::GetControlInput(merge, index);
+      }
+    }
+    BasicBlock* result = schedule_->block(use);
+    if (result == NULL) return NULL;
+    Trace("  must dominate use #%d:%s in B%d\n", use->id(),
+          use->op()->mnemonic(), result->id());
+    return result;
+  }
+
+  void ScheduleNode(BasicBlock* block, Node* node) {
+    schedule_->PlanNode(block, node);
+    scheduler_->scheduled_nodes_[block->id()].push_back(node);
+
+    // Reduce the use count of the node's inputs to potentially make them
+    // schedulable.
+    for (InputIter i = node->inputs().begin(); i != node->inputs().end(); ++i) {
+      Scheduler::SchedulerData* data = scheduler_->GetData(*i);
+      DCHECK(data->unscheduled_count_ > 0);
+      --data->unscheduled_count_;
+      if (FLAG_trace_turbo_scheduler) {
+        Trace("  Use count for #%d:%s (used by #%d:%s)-- = %d\n", (*i)->id(),
+              (*i)->op()->mnemonic(), i.edge().from()->id(),
+              i.edge().from()->op()->mnemonic(), data->unscheduled_count_);
+        if (data->unscheduled_count_ == 0) {
+          Trace("  newly eligible #%d:%s\n", (*i)->id(),
+                (*i)->op()->mnemonic());
+        }
+      }
+    }
+  }
+
+  Scheduler* scheduler_;
+  Schedule* schedule_;
+};
+
+
+void Scheduler::ScheduleLate() {
+  Trace("------------------- SCHEDULE LATE -----------------\n");
+  if (FLAG_trace_turbo_scheduler) {
+    Trace("roots: ");
+    for (NodeVectorIter i = schedule_root_nodes_.begin();
+         i != schedule_root_nodes_.end(); ++i) {
+      Trace("#%d:%s ", (*i)->id(), (*i)->op()->mnemonic());
+    }
+    Trace("\n");
+  }
+
+  // Schedule: Places nodes in dominator block of all their uses.
+  ScheduleLateNodeVisitor schedule_late_visitor(this);
+
+  {
+    Zone zone(zone_->isolate());
+    GenericGraphVisit::Visit<ScheduleLateNodeVisitor,
+                             NodeInputIterationTraits<Node> >(
+        graph_, &zone, schedule_root_nodes_.begin(), schedule_root_nodes_.end(),
+        &schedule_late_visitor);
+  }
+
+  // Add collected nodes for basic blocks to their blocks in the right order.
+  int block_num = 0;
+  for (NodeVectorVectorIter i = scheduled_nodes_.begin();
+       i != scheduled_nodes_.end(); ++i) {
+    for (NodeVectorRIter j = i->rbegin(); j != i->rend(); ++j) {
+      schedule_->AddNode(schedule_->all_blocks_.at(block_num), *j);
+    }
+    block_num++;
+  }
+}
+
+
+bool Scheduler::ConnectFloatingControl() {
+  if (!has_floating_control_) return false;
+
+  Trace("Connecting floating control...\n");
+
+  // Process blocks and instructions backwards to find and connect floating
+  // control nodes into the control graph according to the block they were
+  // scheduled into.
+  int max = static_cast<int>(schedule_->rpo_order()->size());
+  for (int i = max - 1; i >= 0; i--) {
+    BasicBlock* block = schedule_->rpo_order()->at(i);
+    // TODO(titzer): we place at most one floating control structure per
+    // basic block because scheduling currently can interleave phis from
+    // one subgraph with the merges from another subgraph.
+    bool one_placed = false;
+    for (int j = static_cast<int>(block->nodes_.size()) - 1; j >= 0; j--) {
+      Node* node = block->nodes_[j];
+      SchedulerData* data = GetData(node);
+      if (data->is_floating_control_ && !data->is_connected_control_ &&
+          !one_placed) {
+        Trace("  Floating control #%d:%s was scheduled in B%d\n", node->id(),
+              node->op()->mnemonic(), block->id());
+        ConnectFloatingControlSubgraph(block, node);
+        one_placed = true;
+      }
+    }
+  }
+
+  return true;
+}
+
+
+void Scheduler::ConnectFloatingControlSubgraph(BasicBlock* block, Node* end) {
+  Node* block_start = block->nodes_[0];
+  DCHECK(IrOpcode::IsControlOpcode(block_start->opcode()));
+  // Find the current "control successor" of the node that starts the block
+  // by searching the control uses for a control input edge from a connected
+  // control node.
+  Node* control_succ = NULL;
+  for (UseIter i = block_start->uses().begin(); i != block_start->uses().end();
+       ++i) {
+    Node::Edge edge = i.edge();
+    if (NodeProperties::IsControlEdge(edge) &&
+        GetData(edge.from())->is_connected_control_) {
+      DCHECK_EQ(NULL, control_succ);
+      control_succ = edge.from();
+      control_succ->ReplaceInput(edge.index(), end);
+    }
+  }
+  DCHECK_NE(NULL, control_succ);
+  Trace("  Inserting floating control end %d:%s between %d:%s -> %d:%s\n",
+        end->id(), end->op()->mnemonic(), control_succ->id(),
+        control_succ->op()->mnemonic(), block_start->id(),
+        block_start->op()->mnemonic());
+
+  // Find the "start" node of the control subgraph, which should be the
+  // unique node that is itself floating control but has a control input that
+  // is not floating.
+  Node* start = NULL;
+  ZoneQueue<Node*> queue(zone_);
+  queue.push(end);
+  GetData(end)->is_connected_control_ = true;
+  while (!queue.empty()) {
+    Node* node = queue.front();
+    queue.pop();
+    Trace("  Search #%d:%s for control subgraph start\n", node->id(),
+          node->op()->mnemonic());
+    int max = NodeProperties::PastControlIndex(node);
+    for (int i = NodeProperties::FirstControlIndex(node); i < max; i++) {
+      Node* input = node->InputAt(i);
+      SchedulerData* data = GetData(input);
+      if (data->is_floating_control_) {
+        // {input} is floating control.
+        if (!data->is_connected_control_) {
+          // First time seeing {input} during this traversal, queue it.
+          queue.push(input);
+          data->is_connected_control_ = true;
+        }
+      } else {
+        // Otherwise, {node} is the start node, because it is floating control
+        // but is connected to {input} that is not floating control.
+        DCHECK_EQ(NULL, start);  // There can be only one.
+        start = node;
+      }
+    }
+  }
+
+  DCHECK_NE(NULL, start);
+  start->ReplaceInput(NodeProperties::FirstControlIndex(start), block_start);
+
+  Trace("  Connecting floating control start %d:%s to %d:%s\n", start->id(),
+        start->op()->mnemonic(), block_start->id(),
+        block_start->op()->mnemonic());
+}
+
+
+// Numbering for BasicBlockData.rpo_number_ for this block traversal:
+static const int kBlockOnStack = -2;
+static const int kBlockVisited1 = -3;
+static const int kBlockVisited2 = -4;
+static const int kBlockUnvisited1 = -1;
+static const int kBlockUnvisited2 = kBlockVisited1;
+
+struct SpecialRPOStackFrame {
+  BasicBlock* block;
+  int index;
+};
+
+struct BlockList {
+  BasicBlock* block;
+  BlockList* next;
+
+  BlockList* Add(Zone* zone, BasicBlock* b) {
+    BlockList* list = static_cast<BlockList*>(zone->New(sizeof(BlockList)));
+    list->block = b;
+    list->next = this;
+    return list;
+  }
+
+  void Serialize(BasicBlockVector* final_order) {
+    for (BlockList* l = this; l != NULL; l = l->next) {
+      l->block->rpo_number_ = static_cast<int>(final_order->size());
+      final_order->push_back(l->block);
+    }
+  }
+};
+
+struct LoopInfo {
+  BasicBlock* header;
+  ZoneList<BasicBlock*>* outgoing;
+  BitVector* members;
+  LoopInfo* prev;
+  BlockList* end;
+  BlockList* start;
+
+  void AddOutgoing(Zone* zone, BasicBlock* block) {
+    if (outgoing == NULL) outgoing = new (zone) ZoneList<BasicBlock*>(2, zone);
+    outgoing->Add(block, zone);
+  }
+};
+
+
+static int Push(SpecialRPOStackFrame* stack, int depth, BasicBlock* child,
+                int unvisited) {
+  if (child->rpo_number_ == unvisited) {
+    stack[depth].block = child;
+    stack[depth].index = 0;
+    child->rpo_number_ = kBlockOnStack;
+    return depth + 1;
+  }
+  return depth;
+}
+
+
+// Computes loop membership from the backedges of the control flow graph.
+static LoopInfo* ComputeLoopInfo(
+    Zone* zone, SpecialRPOStackFrame* queue, int num_loops, int num_blocks,
+    ZoneList<std::pair<BasicBlock*, int> >* backedges) {
+  LoopInfo* loops = zone->NewArray<LoopInfo>(num_loops);
+  memset(loops, 0, num_loops * sizeof(LoopInfo));
+
+  // Compute loop membership starting from backedges.
+  // O(max(loop_depth) * max(|loop|)
+  for (int i = 0; i < backedges->length(); i++) {
+    BasicBlock* member = backedges->at(i).first;
+    BasicBlock* header = member->SuccessorAt(backedges->at(i).second);
+    int loop_num = header->loop_end_;
+    if (loops[loop_num].header == NULL) {
+      loops[loop_num].header = header;
+      loops[loop_num].members = new (zone) BitVector(num_blocks, zone);
+    }
+
+    int queue_length = 0;
+    if (member != header) {
+      // As long as the header doesn't have a backedge to itself,
+      // Push the member onto the queue and process its predecessors.
+      if (!loops[loop_num].members->Contains(member->id())) {
+        loops[loop_num].members->Add(member->id());
+      }
+      queue[queue_length++].block = member;
+    }
+
+    // Propagate loop membership backwards. All predecessors of M up to the
+    // loop header H are members of the loop too. O(|blocks between M and H|).
+    while (queue_length > 0) {
+      BasicBlock* block = queue[--queue_length].block;
+      for (int i = 0; i < block->PredecessorCount(); i++) {
+        BasicBlock* pred = block->PredecessorAt(i);
+        if (pred != header) {
+          if (!loops[loop_num].members->Contains(pred->id())) {
+            loops[loop_num].members->Add(pred->id());
+            queue[queue_length++].block = pred;
+          }
+        }
+      }
+    }
+  }
+  return loops;
+}
+
+
+#if DEBUG
+static void PrintRPO(int num_loops, LoopInfo* loops, BasicBlockVector* order) {
+  PrintF("-- RPO with %d loops ", num_loops);
+  if (num_loops > 0) {
+    PrintF("(");
+    for (int i = 0; i < num_loops; i++) {
+      if (i > 0) PrintF(" ");
+      PrintF("B%d", loops[i].header->id());
+    }
+    PrintF(") ");
+  }
+  PrintF("-- \n");
+
+  for (int i = 0; i < static_cast<int>(order->size()); i++) {
+    BasicBlock* block = (*order)[i];
+    int bid = block->id();
+    PrintF("%5d:", i);
+    for (int i = 0; i < num_loops; i++) {
+      bool membership = loops[i].members->Contains(bid);
+      bool range = loops[i].header->LoopContains(block);
+      PrintF(membership ? " |" : "  ");
+      PrintF(range ? "x" : " ");
+    }
+    PrintF("  B%d: ", bid);
+    if (block->loop_end_ >= 0) {
+      PrintF(" range: [%d, %d)", block->rpo_number_, block->loop_end_);
+    }
+    PrintF("\n");
+  }
+}
+
+
+static void VerifySpecialRPO(int num_loops, LoopInfo* loops,
+                             BasicBlockVector* order) {
+  DCHECK(order->size() > 0);
+  DCHECK((*order)[0]->id() == 0);  // entry should be first.
+
+  for (int i = 0; i < num_loops; i++) {
+    LoopInfo* loop = &loops[i];
+    BasicBlock* header = loop->header;
+
+    DCHECK(header != NULL);
+    DCHECK(header->rpo_number_ >= 0);
+    DCHECK(header->rpo_number_ < static_cast<int>(order->size()));
+    DCHECK(header->loop_end_ >= 0);
+    DCHECK(header->loop_end_ <= static_cast<int>(order->size()));
+    DCHECK(header->loop_end_ > header->rpo_number_);
+
+    // Verify the start ... end list relationship.
+    int links = 0;
+    BlockList* l = loop->start;
+    DCHECK(l != NULL && l->block == header);
+    bool end_found;
+    while (true) {
+      if (l == NULL || l == loop->end) {
+        end_found = (loop->end == l);
+        break;
+      }
+      // The list should be in same order as the final result.
+      DCHECK(l->block->rpo_number_ == links + loop->header->rpo_number_);
+      links++;
+      l = l->next;
+      DCHECK(links < static_cast<int>(2 * order->size()));  // cycle?
+    }
+    DCHECK(links > 0);
+    DCHECK(links == (header->loop_end_ - header->rpo_number_));
+    DCHECK(end_found);
+
+    // Check the contiguousness of loops.
+    int count = 0;
+    for (int j = 0; j < static_cast<int>(order->size()); j++) {
+      BasicBlock* block = order->at(j);
+      DCHECK(block->rpo_number_ == j);
+      if (j < header->rpo_number_ || j >= header->loop_end_) {
+        DCHECK(!loop->members->Contains(block->id()));
+      } else {
+        if (block == header) {
+          DCHECK(!loop->members->Contains(block->id()));
+        } else {
+          DCHECK(loop->members->Contains(block->id()));
+        }
+        count++;
+      }
+    }
+    DCHECK(links == count);
+  }
+}
+#endif  // DEBUG
+
+
+// Compute the special reverse-post-order block ordering, which is essentially
+// a RPO of the graph where loop bodies are contiguous. Properties:
+// 1. If block A is a predecessor of B, then A appears before B in the order,
+//    unless B is a loop header and A is in the loop headed at B
+//    (i.e. A -> B is a backedge).
+// => If block A dominates block B, then A appears before B in the order.
+// => If block A is a loop header, A appears before all blocks in the loop
+//    headed at A.
+// 2. All loops are contiguous in the order (i.e. no intervening blocks that
+//    do not belong to the loop.)
+// Note a simple RPO traversal satisfies (1) but not (3).
+BasicBlockVector* Scheduler::ComputeSpecialRPO(Schedule* schedule) {
+  Zone tmp_zone(schedule->zone()->isolate());
+  Zone* zone = &tmp_zone;
+  Trace("------------- COMPUTING SPECIAL RPO ---------------\n");
+  // RPO should not have been computed for this schedule yet.
+  CHECK_EQ(kBlockUnvisited1, schedule->start()->rpo_number_);
+  CHECK_EQ(0, static_cast<int>(schedule->rpo_order_.size()));
+
+  // Perform an iterative RPO traversal using an explicit stack,
+  // recording backedges that form cycles. O(|B|).
+  ZoneList<std::pair<BasicBlock*, int> > backedges(1, zone);
+  SpecialRPOStackFrame* stack =
+      zone->NewArray<SpecialRPOStackFrame>(schedule->BasicBlockCount());
+  BasicBlock* entry = schedule->start();
+  BlockList* order = NULL;
+  int stack_depth = Push(stack, 0, entry, kBlockUnvisited1);
+  int num_loops = 0;
+
+  while (stack_depth > 0) {
+    int current = stack_depth - 1;
+    SpecialRPOStackFrame* frame = stack + current;
+
+    if (frame->index < frame->block->SuccessorCount()) {
+      // Process the next successor.
+      BasicBlock* succ = frame->block->SuccessorAt(frame->index++);
+      if (succ->rpo_number_ == kBlockVisited1) continue;
+      if (succ->rpo_number_ == kBlockOnStack) {
+        // The successor is on the stack, so this is a backedge (cycle).
+        backedges.Add(
+            std::pair<BasicBlock*, int>(frame->block, frame->index - 1), zone);
+        if (succ->loop_end_ < 0) {
+          // Assign a new loop number to the header if it doesn't have one.
+          succ->loop_end_ = num_loops++;
+        }
+      } else {
+        // Push the successor onto the stack.
+        DCHECK(succ->rpo_number_ == kBlockUnvisited1);
+        stack_depth = Push(stack, stack_depth, succ, kBlockUnvisited1);
+      }
+    } else {
+      // Finished with all successors; pop the stack and add the block.
+      order = order->Add(zone, frame->block);
+      frame->block->rpo_number_ = kBlockVisited1;
+      stack_depth--;
+    }
+  }
+
+  // If no loops were encountered, then the order we computed was correct.
+  LoopInfo* loops = NULL;
+  if (num_loops != 0) {
+    // Otherwise, compute the loop information from the backedges in order
+    // to perform a traversal that groups loop bodies together.
+    loops = ComputeLoopInfo(zone, stack, num_loops, schedule->BasicBlockCount(),
+                            &backedges);
+
+    // Initialize the "loop stack". Note the entry could be a loop header.
+    LoopInfo* loop = entry->IsLoopHeader() ? &loops[entry->loop_end_] : NULL;
+    order = NULL;
+
+    // Perform an iterative post-order traversal, visiting loop bodies before
+    // edges that lead out of loops. Visits each block once, but linking loop
+    // sections together is linear in the loop size, so overall is
+    // O(|B| + max(loop_depth) * max(|loop|))
+    stack_depth = Push(stack, 0, entry, kBlockUnvisited2);
+    while (stack_depth > 0) {
+      SpecialRPOStackFrame* frame = stack + (stack_depth - 1);
+      BasicBlock* block = frame->block;
+      BasicBlock* succ = NULL;
+
+      if (frame->index < block->SuccessorCount()) {
+        // Process the next normal successor.
+        succ = block->SuccessorAt(frame->index++);
+      } else if (block->IsLoopHeader()) {
+        // Process additional outgoing edges from the loop header.
+        if (block->rpo_number_ == kBlockOnStack) {
+          // Finish the loop body the first time the header is left on the
+          // stack.
+          DCHECK(loop != NULL && loop->header == block);
+          loop->start = order->Add(zone, block);
+          order = loop->end;
+          block->rpo_number_ = kBlockVisited2;
+          // Pop the loop stack and continue visiting outgoing edges within the
+          // the context of the outer loop, if any.
+          loop = loop->prev;
+          // We leave the loop header on the stack; the rest of this iteration
+          // and later iterations will go through its outgoing edges list.
+        }
+
+        // Use the next outgoing edge if there are any.
+        int outgoing_index = frame->index - block->SuccessorCount();
+        LoopInfo* info = &loops[block->loop_end_];
+        DCHECK(loop != info);
+        if (info->outgoing != NULL &&
+            outgoing_index < info->outgoing->length()) {
+          succ = info->outgoing->at(outgoing_index);
+          frame->index++;
+        }
+      }
+
+      if (succ != NULL) {
+        // Process the next successor.
+        if (succ->rpo_number_ == kBlockOnStack) continue;
+        if (succ->rpo_number_ == kBlockVisited2) continue;
+        DCHECK(succ->rpo_number_ == kBlockUnvisited2);
+        if (loop != NULL && !loop->members->Contains(succ->id())) {
+          // The successor is not in the current loop or any nested loop.
+          // Add it to the outgoing edges of this loop and visit it later.
+          loop->AddOutgoing(zone, succ);
+        } else {
+          // Push the successor onto the stack.
+          stack_depth = Push(stack, stack_depth, succ, kBlockUnvisited2);
+          if (succ->IsLoopHeader()) {
+            // Push the inner loop onto the loop stack.
+            DCHECK(succ->loop_end_ >= 0 && succ->loop_end_ < num_loops);
+            LoopInfo* next = &loops[succ->loop_end_];
+            next->end = order;
+            next->prev = loop;
+            loop = next;
+          }
+        }
+      } else {
+        // Finished with all successors of the current block.
+        if (block->IsLoopHeader()) {
+          // If we are going to pop a loop header, then add its entire body.
+          LoopInfo* info = &loops[block->loop_end_];
+          for (BlockList* l = info->start; true; l = l->next) {
+            if (l->next == info->end) {
+              l->next = order;
+              info->end = order;
+              break;
+            }
+          }
+          order = info->start;
+        } else {
+          // Pop a single node off the stack and add it to the order.
+          order = order->Add(zone, block);
+          block->rpo_number_ = kBlockVisited2;
+        }
+        stack_depth--;
+      }
+    }
+  }
+
+  // Construct the final order from the list.
+  BasicBlockVector* final_order = &schedule->rpo_order_;
+  order->Serialize(final_order);
+
+  // Compute the correct loop header for every block and set the correct loop
+  // ends.
+  LoopInfo* current_loop = NULL;
+  BasicBlock* current_header = NULL;
+  int loop_depth = 0;
+  for (BasicBlockVectorIter i = final_order->begin(); i != final_order->end();
+       ++i) {
+    BasicBlock* current = *i;
+    current->loop_header_ = current_header;
+    if (current->IsLoopHeader()) {
+      loop_depth++;
+      current_loop = &loops[current->loop_end_];
+      BlockList* end = current_loop->end;
+      current->loop_end_ = end == NULL ? static_cast<int>(final_order->size())
+                                       : end->block->rpo_number_;
+      current_header = current_loop->header;
+      Trace("B%d is a loop header, increment loop depth to %d\n", current->id(),
+            loop_depth);
+    } else {
+      while (current_header != NULL &&
+             current->rpo_number_ >= current_header->loop_end_) {
+        DCHECK(current_header->IsLoopHeader());
+        DCHECK(current_loop != NULL);
+        current_loop = current_loop->prev;
+        current_header = current_loop == NULL ? NULL : current_loop->header;
+        --loop_depth;
+      }
+    }
+    current->loop_depth_ = loop_depth;
+    if (current->loop_header_ == NULL) {
+      Trace("B%d is not in a loop (depth == %d)\n", current->id(),
+            current->loop_depth_);
+    } else {
+      Trace("B%d has loop header B%d, (depth == %d)\n", current->id(),
+            current->loop_header_->id(), current->loop_depth_);
+    }
+  }
+
+#if DEBUG
+  if (FLAG_trace_turbo_scheduler) PrintRPO(num_loops, loops, final_order);
+  VerifySpecialRPO(num_loops, loops, final_order);
+#endif
+  return final_order;
+}
+}
+}
+}  // namespace v8::internal::compiler
diff --git a/src/compiler/scheduler.h b/src/compiler/scheduler.h
new file mode 100644
index 0000000..b21662f
--- /dev/null
+++ b/src/compiler/scheduler.h
@@ -0,0 +1,97 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_SCHEDULER_H_
+#define V8_COMPILER_SCHEDULER_H_
+
+#include "src/v8.h"
+
+#include "src/compiler/opcodes.h"
+#include "src/compiler/schedule.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Computes a schedule from a graph, placing nodes into basic blocks and
+// ordering the basic blocks in the special RPO order.
+class Scheduler {
+ public:
+  // The complete scheduling algorithm.
+  // Create a new schedule and place all nodes from the graph into it.
+  static Schedule* ComputeSchedule(Graph* graph);
+
+  // Compute the RPO of blocks in an existing schedule.
+  static BasicBlockVector* ComputeSpecialRPO(Schedule* schedule);
+
+  // (Exposed for testing only)
+  // Build and connect the CFG for a node graph, but don't schedule nodes.
+  static void ComputeCFG(Graph* graph, Schedule* schedule);
+
+ private:
+  enum Placement { kUnknown, kSchedulable, kFixed };
+
+  // Per-node data tracked during scheduling.
+  struct SchedulerData {
+    int unscheduled_count_;      // Number of unscheduled uses of this node.
+    int minimum_rpo_;            // Minimum legal RPO placement.
+    bool is_connected_control_;  // {true} if control-connected to the end node.
+    bool is_floating_control_;   // {true} if control, but not control-connected
+                                 // to the end node.
+    Placement placement_ : 3;    // Whether the node is fixed, schedulable,
+                                 // or not yet known.
+  };
+
+  Zone* zone_;
+  Graph* graph_;
+  Schedule* schedule_;
+  NodeVectorVector scheduled_nodes_;
+  NodeVector schedule_root_nodes_;
+  ZoneVector<SchedulerData> node_data_;
+  bool has_floating_control_;
+
+  Scheduler(Zone* zone, Graph* graph, Schedule* schedule);
+
+  SchedulerData DefaultSchedulerData();
+
+  SchedulerData* GetData(Node* node) {
+    DCHECK(node->id() < static_cast<int>(node_data_.size()));
+    return &node_data_[node->id()];
+  }
+
+  void BuildCFG();
+
+  Placement GetPlacement(Node* node);
+
+  int GetRPONumber(BasicBlock* block) {
+    DCHECK(block->rpo_number_ >= 0 &&
+           block->rpo_number_ < static_cast<int>(schedule_->rpo_order_.size()));
+    DCHECK(schedule_->rpo_order_[block->rpo_number_] == block);
+    return block->rpo_number_;
+  }
+
+  void GenerateImmediateDominatorTree();
+  BasicBlock* GetCommonDominator(BasicBlock* b1, BasicBlock* b2);
+
+  friend class CFGBuilder;
+
+  friend class ScheduleEarlyNodeVisitor;
+  void ScheduleEarly();
+
+  friend class PrepareUsesVisitor;
+  void PrepareUses();
+
+  friend class ScheduleLateNodeVisitor;
+  void ScheduleLate();
+
+  bool ConnectFloatingControl();
+
+  void ConnectFloatingControlSubgraph(BasicBlock* block, Node* node);
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_SCHEDULER_H_
diff --git a/src/compiler/simplified-lowering.cc b/src/compiler/simplified-lowering.cc
new file mode 100644
index 0000000..f794525
--- /dev/null
+++ b/src/compiler/simplified-lowering.cc
@@ -0,0 +1,945 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/simplified-lowering.h"
+
+#include "src/base/bits.h"
+#include "src/code-factory.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph-inl.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/representation-change.h"
+#include "src/compiler/simplified-lowering.h"
+#include "src/compiler/simplified-operator.h"
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Macro for outputting trace information from representation inference.
+#define TRACE(x) \
+  if (FLAG_trace_representation) PrintF x
+
+// Representation selection and lowering of {Simplified} operators to machine
+// operators are interwined. We use a fixpoint calculation to compute both the
+// output representation and the best possible lowering for {Simplified} nodes.
+// Representation change insertion ensures that all values are in the correct
+// machine representation after this phase, as dictated by the machine
+// operators themselves.
+enum Phase {
+  // 1.) PROPAGATE: Traverse the graph from the end, pushing usage information
+  //     backwards from uses to definitions, around cycles in phis, according
+  //     to local rules for each operator.
+  //     During this phase, the usage information for a node determines the best
+  //     possible lowering for each operator so far, and that in turn determines
+  //     the output representation.
+  //     Therefore, to be correct, this phase must iterate to a fixpoint before
+  //     the next phase can begin.
+  PROPAGATE,
+
+  // 2.) LOWER: perform lowering for all {Simplified} nodes by replacing some
+  //     operators for some nodes, expanding some nodes to multiple nodes, or
+  //     removing some (redundant) nodes.
+  //     During this phase, use the {RepresentationChanger} to insert
+  //     representation changes between uses that demand a particular
+  //     representation and nodes that produce a different representation.
+  LOWER
+};
+
+
+class RepresentationSelector {
+ public:
+  // Information for each node tracked during the fixpoint.
+  struct NodeInfo {
+    MachineTypeUnion use : 15;     // Union of all usages for the node.
+    bool queued : 1;           // Bookkeeping for the traversal.
+    bool visited : 1;          // Bookkeeping for the traversal.
+    MachineTypeUnion output : 15;  // Output type of the node.
+  };
+
+  RepresentationSelector(JSGraph* jsgraph, Zone* zone,
+                         RepresentationChanger* changer)
+      : jsgraph_(jsgraph),
+        count_(jsgraph->graph()->NodeCount()),
+        info_(zone->NewArray<NodeInfo>(count_)),
+        nodes_(zone),
+        replacements_(zone),
+        contains_js_nodes_(false),
+        phase_(PROPAGATE),
+        changer_(changer),
+        queue_(zone) {
+    memset(info_, 0, sizeof(NodeInfo) * count_);
+  }
+
+  void Run(SimplifiedLowering* lowering) {
+    // Run propagation phase to a fixpoint.
+    TRACE(("--{Propagation phase}--\n"));
+    phase_ = PROPAGATE;
+    Enqueue(jsgraph_->graph()->end());
+    // Process nodes from the queue until it is empty.
+    while (!queue_.empty()) {
+      Node* node = queue_.front();
+      NodeInfo* info = GetInfo(node);
+      queue_.pop();
+      info->queued = false;
+      TRACE((" visit #%d: %s\n", node->id(), node->op()->mnemonic()));
+      VisitNode(node, info->use, NULL);
+      TRACE(("  ==> output "));
+      PrintInfo(info->output);
+      TRACE(("\n"));
+    }
+
+    // Run lowering and change insertion phase.
+    TRACE(("--{Simplified lowering phase}--\n"));
+    phase_ = LOWER;
+    // Process nodes from the collected {nodes_} vector.
+    for (NodeVector::iterator i = nodes_.begin(); i != nodes_.end(); ++i) {
+      Node* node = *i;
+      TRACE((" visit #%d: %s\n", node->id(), node->op()->mnemonic()));
+      // Reuse {VisitNode()} so the representation rules are in one place.
+      VisitNode(node, GetUseInfo(node), lowering);
+    }
+
+    // Perform the final replacements.
+    for (NodeVector::iterator i = replacements_.begin();
+         i != replacements_.end(); ++i) {
+      Node* node = *i;
+      Node* replacement = *(++i);
+      node->ReplaceUses(replacement);
+    }
+  }
+
+  // Enqueue {node} if the {use} contains new information for that node.
+  // Add {node} to {nodes_} if this is the first time it's been visited.
+  void Enqueue(Node* node, MachineTypeUnion use = 0) {
+    if (phase_ != PROPAGATE) return;
+    NodeInfo* info = GetInfo(node);
+    if (!info->visited) {
+      // First visit of this node.
+      info->visited = true;
+      info->queued = true;
+      nodes_.push_back(node);
+      queue_.push(node);
+      TRACE(("  initial: "));
+      info->use |= use;
+      PrintUseInfo(node);
+      return;
+    }
+    TRACE(("   queue?: "));
+    PrintUseInfo(node);
+    if ((info->use & use) != use) {
+      // New usage information for the node is available.
+      if (!info->queued) {
+        queue_.push(node);
+        info->queued = true;
+        TRACE(("   added: "));
+      } else {
+        TRACE((" inqueue: "));
+      }
+      info->use |= use;
+      PrintUseInfo(node);
+    }
+  }
+
+  bool lower() { return phase_ == LOWER; }
+
+  void Enqueue(Node* node, MachineType use) {
+    Enqueue(node, static_cast<MachineTypeUnion>(use));
+  }
+
+  void SetOutput(Node* node, MachineTypeUnion output) {
+    // Every node should have at most one output representation. Note that
+    // phis can have 0, if they have not been used in a representation-inducing
+    // instruction.
+    DCHECK((output & kRepMask) == 0 ||
+           base::bits::IsPowerOfTwo32(output & kRepMask));
+    GetInfo(node)->output = output;
+  }
+
+  bool BothInputsAre(Node* node, Type* type) {
+    DCHECK_EQ(2, node->InputCount());
+    return NodeProperties::GetBounds(node->InputAt(0)).upper->Is(type) &&
+           NodeProperties::GetBounds(node->InputAt(1)).upper->Is(type);
+  }
+
+  void ProcessInput(Node* node, int index, MachineTypeUnion use) {
+    Node* input = node->InputAt(index);
+    if (phase_ == PROPAGATE) {
+      // In the propagate phase, propagate the usage information backward.
+      Enqueue(input, use);
+    } else {
+      // In the change phase, insert a change before the use if necessary.
+      if ((use & kRepMask) == 0) return;  // No input requirement on the use.
+      MachineTypeUnion output = GetInfo(input)->output;
+      if ((output & kRepMask & use) == 0) {
+        // Output representation doesn't match usage.
+        TRACE(("  change: #%d:%s(@%d #%d:%s) ", node->id(),
+               node->op()->mnemonic(), index, input->id(),
+               input->op()->mnemonic()));
+        TRACE((" from "));
+        PrintInfo(output);
+        TRACE((" to "));
+        PrintInfo(use);
+        TRACE(("\n"));
+        Node* n = changer_->GetRepresentationFor(input, output, use);
+        node->ReplaceInput(index, n);
+      }
+    }
+  }
+
+  void ProcessRemainingInputs(Node* node, int index) {
+    DCHECK_GE(index, NodeProperties::PastValueIndex(node));
+    DCHECK_GE(index, NodeProperties::PastContextIndex(node));
+    for (int i = std::max(index, NodeProperties::FirstEffectIndex(node));
+         i < NodeProperties::PastEffectIndex(node); ++i) {
+      Enqueue(node->InputAt(i));  // Effect inputs: just visit
+    }
+    for (int i = std::max(index, NodeProperties::FirstControlIndex(node));
+         i < NodeProperties::PastControlIndex(node); ++i) {
+      Enqueue(node->InputAt(i));  // Control inputs: just visit
+    }
+  }
+
+  // The default, most general visitation case. For {node}, process all value,
+  // context, effect, and control inputs, assuming that value inputs should have
+  // {kRepTagged} representation and can observe all output values {kTypeAny}.
+  void VisitInputs(Node* node) {
+    InputIter i = node->inputs().begin();
+    for (int j = OperatorProperties::GetValueInputCount(node->op()); j > 0;
+         ++i, j--) {
+      ProcessInput(node, i.index(), kMachAnyTagged);  // Value inputs
+    }
+    for (int j = OperatorProperties::GetContextInputCount(node->op()); j > 0;
+         ++i, j--) {
+      ProcessInput(node, i.index(), kMachAnyTagged);  // Context inputs
+    }
+    for (int j = OperatorProperties::GetEffectInputCount(node->op()); j > 0;
+         ++i, j--) {
+      Enqueue(*i);  // Effect inputs: just visit
+    }
+    for (int j = OperatorProperties::GetControlInputCount(node->op()); j > 0;
+         ++i, j--) {
+      Enqueue(*i);  // Control inputs: just visit
+    }
+    SetOutput(node, kMachAnyTagged);
+  }
+
+  // Helper for binops of the I x I -> O variety.
+  void VisitBinop(Node* node, MachineTypeUnion input_use,
+                  MachineTypeUnion output) {
+    DCHECK_EQ(2, node->InputCount());
+    ProcessInput(node, 0, input_use);
+    ProcessInput(node, 1, input_use);
+    SetOutput(node, output);
+  }
+
+  // Helper for unops of the I -> O variety.
+  void VisitUnop(Node* node, MachineTypeUnion input_use,
+                 MachineTypeUnion output) {
+    DCHECK_EQ(1, node->InputCount());
+    ProcessInput(node, 0, input_use);
+    SetOutput(node, output);
+  }
+
+  // Helper for leaf nodes.
+  void VisitLeaf(Node* node, MachineTypeUnion output) {
+    DCHECK_EQ(0, node->InputCount());
+    SetOutput(node, output);
+  }
+
+  // Helpers for specific types of binops.
+  void VisitFloat64Binop(Node* node) {
+    VisitBinop(node, kMachFloat64, kMachFloat64);
+  }
+  void VisitInt32Binop(Node* node) { VisitBinop(node, kMachInt32, kMachInt32); }
+  void VisitUint32Binop(Node* node) {
+    VisitBinop(node, kMachUint32, kMachUint32);
+  }
+  void VisitInt64Binop(Node* node) { VisitBinop(node, kMachInt64, kMachInt64); }
+  void VisitUint64Binop(Node* node) {
+    VisitBinop(node, kMachUint64, kMachUint64);
+  }
+  void VisitFloat64Cmp(Node* node) { VisitBinop(node, kMachFloat64, kRepBit); }
+  void VisitInt32Cmp(Node* node) { VisitBinop(node, kMachInt32, kRepBit); }
+  void VisitUint32Cmp(Node* node) { VisitBinop(node, kMachUint32, kRepBit); }
+  void VisitInt64Cmp(Node* node) { VisitBinop(node, kMachInt64, kRepBit); }
+  void VisitUint64Cmp(Node* node) { VisitBinop(node, kMachUint64, kRepBit); }
+
+  // Helper for handling phis.
+  void VisitPhi(Node* node, MachineTypeUnion use,
+                SimplifiedLowering* lowering) {
+    // First, propagate the usage information to inputs of the phi.
+    if (!lower()) {
+      int values = OperatorProperties::GetValueInputCount(node->op());
+      // Propagate {use} of the phi to value inputs, and 0 to control.
+      Node::Inputs inputs = node->inputs();
+      for (Node::Inputs::iterator iter(inputs.begin()); iter != inputs.end();
+           ++iter, --values) {
+        // TODO(titzer): it'd be nice to have distinguished edge kinds here.
+        ProcessInput(node, iter.index(), values > 0 ? use : 0);
+      }
+    }
+    // Phis adapt to whatever output representation their uses demand,
+    // pushing representation changes to their inputs.
+    MachineTypeUnion use_rep = GetUseInfo(node) & kRepMask;
+    MachineTypeUnion use_type = GetUseInfo(node) & kTypeMask;
+    MachineTypeUnion rep = 0;
+    if (use_rep & kRepTagged) {
+      rep = kRepTagged;  // Tagged overrides everything.
+    } else if (use_rep & kRepFloat64) {
+      rep = kRepFloat64;
+    } else if (use_rep & kRepWord64) {
+      rep = kRepWord64;
+    } else if (use_rep & kRepWord32) {
+      rep = kRepWord32;
+    } else if (use_rep & kRepBit) {
+      rep = kRepBit;
+    } else {
+      // There was no representation associated with any of the uses.
+      // TODO(titzer): Select the best rep using phi's type, not the usage type?
+      if (use_type & kTypeAny) {
+        rep = kRepTagged;
+      } else if (use_type & kTypeNumber) {
+        rep = kRepFloat64;
+      } else if (use_type & kTypeInt64 || use_type & kTypeUint64) {
+        rep = kRepWord64;
+      } else if (use_type & kTypeInt32 || use_type & kTypeUint32) {
+        rep = kRepWord32;
+      } else if (use_type & kTypeBool) {
+        rep = kRepBit;
+      } else {
+        UNREACHABLE();  // should have at least a usage type!
+      }
+    }
+    // Preserve the usage type, but set the representation.
+    Type* upper = NodeProperties::GetBounds(node).upper;
+    MachineTypeUnion output_type = rep | changer_->TypeFromUpperBound(upper);
+    SetOutput(node, output_type);
+
+    if (lower()) {
+      int values = OperatorProperties::GetValueInputCount(node->op());
+
+      // Update the phi operator.
+      MachineType type = static_cast<MachineType>(output_type);
+      if (type != OpParameter<MachineType>(node)) {
+        node->set_op(lowering->common()->Phi(type, values));
+      }
+
+      // Convert inputs to the output representation of this phi.
+      Node::Inputs inputs = node->inputs();
+      for (Node::Inputs::iterator iter(inputs.begin()); iter != inputs.end();
+           ++iter, --values) {
+        // TODO(titzer): it'd be nice to have distinguished edge kinds here.
+        ProcessInput(node, iter.index(), values > 0 ? output_type : 0);
+      }
+    }
+  }
+
+  const Operator* Int32Op(Node* node) {
+    return changer_->Int32OperatorFor(node->opcode());
+  }
+
+  const Operator* Uint32Op(Node* node) {
+    return changer_->Uint32OperatorFor(node->opcode());
+  }
+
+  const Operator* Float64Op(Node* node) {
+    return changer_->Float64OperatorFor(node->opcode());
+  }
+
+  static MachineType AssumeImplicitFloat32Change(MachineType type) {
+    // TODO(titzer): Assume loads of float32 change representation to float64.
+    // Fix this with full support for float32 representations.
+    if (type & kRepFloat32) {
+      return static_cast<MachineType>((type & ~kRepFloat32) | kRepFloat64);
+    }
+    return type;
+  }
+
+  // Dispatching routine for visiting the node {node} with the usage {use}.
+  // Depending on the operator, propagate new usage info to the inputs.
+  void VisitNode(Node* node, MachineTypeUnion use,
+                 SimplifiedLowering* lowering) {
+    switch (node->opcode()) {
+      //------------------------------------------------------------------
+      // Common operators.
+      //------------------------------------------------------------------
+      case IrOpcode::kStart:
+      case IrOpcode::kDead:
+        return VisitLeaf(node, 0);
+      case IrOpcode::kParameter: {
+        // TODO(titzer): use representation from linkage.
+        Type* upper = NodeProperties::GetBounds(node).upper;
+        ProcessInput(node, 0, 0);
+        SetOutput(node, kRepTagged | changer_->TypeFromUpperBound(upper));
+        return;
+      }
+      case IrOpcode::kInt32Constant:
+        return VisitLeaf(node, kRepWord32);
+      case IrOpcode::kInt64Constant:
+        return VisitLeaf(node, kRepWord64);
+      case IrOpcode::kFloat64Constant:
+        return VisitLeaf(node, kRepFloat64);
+      case IrOpcode::kExternalConstant:
+        return VisitLeaf(node, kMachPtr);
+      case IrOpcode::kNumberConstant:
+        return VisitLeaf(node, kRepTagged);
+      case IrOpcode::kHeapConstant:
+        return VisitLeaf(node, kRepTagged);
+
+      case IrOpcode::kEnd:
+      case IrOpcode::kIfTrue:
+      case IrOpcode::kIfFalse:
+      case IrOpcode::kReturn:
+      case IrOpcode::kMerge:
+      case IrOpcode::kThrow:
+        return VisitInputs(node);  // default visit for all node inputs.
+
+      case IrOpcode::kBranch:
+        ProcessInput(node, 0, kRepBit);
+        Enqueue(NodeProperties::GetControlInput(node, 0));
+        break;
+      case IrOpcode::kPhi:
+        return VisitPhi(node, use, lowering);
+
+//------------------------------------------------------------------
+// JavaScript operators.
+//------------------------------------------------------------------
+// For now, we assume that all JS operators were too complex to lower
+// to Simplified and that they will always require tagged value inputs
+// and produce tagged value outputs.
+// TODO(turbofan): it might be possible to lower some JSOperators here,
+// but that responsibility really lies in the typed lowering phase.
+#define DEFINE_JS_CASE(x) case IrOpcode::k##x:
+        JS_OP_LIST(DEFINE_JS_CASE)
+#undef DEFINE_JS_CASE
+        contains_js_nodes_ = true;
+        VisitInputs(node);
+        return SetOutput(node, kRepTagged);
+
+      //------------------------------------------------------------------
+      // Simplified operators.
+      //------------------------------------------------------------------
+      case IrOpcode::kBooleanNot: {
+        if (lower()) {
+          MachineTypeUnion input = GetInfo(node->InputAt(0))->output;
+          if (input & kRepBit) {
+            // BooleanNot(x: kRepBit) => WordEqual(x, #0)
+            node->set_op(lowering->machine()->WordEqual());
+            node->AppendInput(jsgraph_->zone(), jsgraph_->Int32Constant(0));
+          } else {
+            // BooleanNot(x: kRepTagged) => WordEqual(x, #false)
+            node->set_op(lowering->machine()->WordEqual());
+            node->AppendInput(jsgraph_->zone(), jsgraph_->FalseConstant());
+          }
+        } else {
+          // No input representation requirement; adapt during lowering.
+          ProcessInput(node, 0, kTypeBool);
+          SetOutput(node, kRepBit);
+        }
+        break;
+      }
+      case IrOpcode::kBooleanToNumber: {
+        if (lower()) {
+          MachineTypeUnion input = GetInfo(node->InputAt(0))->output;
+          if (input & kRepBit) {
+            // BooleanToNumber(x: kRepBit) => x
+            DeferReplacement(node, node->InputAt(0));
+          } else {
+            // BooleanToNumber(x: kRepTagged) => WordEqual(x, #true)
+            node->set_op(lowering->machine()->WordEqual());
+            node->AppendInput(jsgraph_->zone(), jsgraph_->TrueConstant());
+          }
+        } else {
+          // No input representation requirement; adapt during lowering.
+          ProcessInput(node, 0, kTypeBool);
+          SetOutput(node, kMachInt32);
+        }
+        break;
+      }
+      case IrOpcode::kNumberEqual:
+      case IrOpcode::kNumberLessThan:
+      case IrOpcode::kNumberLessThanOrEqual: {
+        // Number comparisons reduce to integer comparisons for integer inputs.
+        if (BothInputsAre(node, Type::Signed32())) {
+          // => signed Int32Cmp
+          VisitInt32Cmp(node);
+          if (lower()) node->set_op(Int32Op(node));
+        } else if (BothInputsAre(node, Type::Unsigned32())) {
+          // => unsigned Int32Cmp
+          VisitUint32Cmp(node);
+          if (lower()) node->set_op(Uint32Op(node));
+        } else {
+          // => Float64Cmp
+          VisitFloat64Cmp(node);
+          if (lower()) node->set_op(Float64Op(node));
+        }
+        break;
+      }
+      case IrOpcode::kNumberAdd:
+      case IrOpcode::kNumberSubtract: {
+        // Add and subtract reduce to Int32Add/Sub if the inputs
+        // are already integers and all uses are truncating.
+        if (BothInputsAre(node, Type::Signed32()) &&
+            (use & (kTypeUint32 | kTypeNumber | kTypeAny)) == 0) {
+          // => signed Int32Add/Sub
+          VisitInt32Binop(node);
+          if (lower()) node->set_op(Int32Op(node));
+        } else if (BothInputsAre(node, Type::Unsigned32()) &&
+                   (use & (kTypeInt32 | kTypeNumber | kTypeAny)) == 0) {
+          // => unsigned Int32Add/Sub
+          VisitUint32Binop(node);
+          if (lower()) node->set_op(Uint32Op(node));
+        } else {
+          // => Float64Add/Sub
+          VisitFloat64Binop(node);
+          if (lower()) node->set_op(Float64Op(node));
+        }
+        break;
+      }
+      case IrOpcode::kNumberMultiply:
+      case IrOpcode::kNumberDivide:
+      case IrOpcode::kNumberModulus: {
+        // Float64Mul/Div/Mod
+        VisitFloat64Binop(node);
+        if (lower()) node->set_op(Float64Op(node));
+        break;
+      }
+      case IrOpcode::kNumberToInt32: {
+        MachineTypeUnion use_rep = use & kRepMask;
+        if (lower()) {
+          MachineTypeUnion in = GetInfo(node->InputAt(0))->output;
+          if ((in & kTypeMask) == kTypeInt32 || (in & kRepMask) == kRepWord32) {
+            // If the input has type int32, or is already a word32, just change
+            // representation if necessary.
+            VisitUnop(node, kTypeInt32 | use_rep, kTypeInt32 | use_rep);
+            DeferReplacement(node, node->InputAt(0));
+          } else {
+            // Require the input in float64 format and perform truncation.
+            // TODO(turbofan): avoid a truncation with a smi check.
+            VisitUnop(node, kTypeInt32 | kRepFloat64, kTypeInt32 | kRepWord32);
+            node->set_op(lowering->machine()->TruncateFloat64ToInt32());
+          }
+        } else {
+          // Propagate a type to the input, but pass through representation.
+          VisitUnop(node, kTypeInt32, kTypeInt32 | use_rep);
+        }
+        break;
+      }
+      case IrOpcode::kNumberToUint32: {
+        MachineTypeUnion use_rep = use & kRepMask;
+        if (lower()) {
+          MachineTypeUnion in = GetInfo(node->InputAt(0))->output;
+          if ((in & kTypeMask) == kTypeUint32 ||
+              (in & kRepMask) == kRepWord32) {
+            // The input has type int32, just change representation.
+            VisitUnop(node, kTypeUint32 | use_rep, kTypeUint32 | use_rep);
+            DeferReplacement(node, node->InputAt(0));
+          } else {
+            // Require the input in float64 format to perform truncation.
+            // TODO(turbofan): avoid the truncation with a smi check.
+            VisitUnop(node, kTypeUint32 | kRepFloat64,
+                      kTypeUint32 | kRepWord32);
+            node->set_op(lowering->machine()->TruncateFloat64ToInt32());
+          }
+        } else {
+          // Propagate a type to the input, but pass through representation.
+          VisitUnop(node, kTypeUint32, kTypeUint32 | use_rep);
+        }
+        break;
+      }
+      case IrOpcode::kReferenceEqual: {
+        VisitBinop(node, kMachAnyTagged, kRepBit);
+        if (lower()) node->set_op(lowering->machine()->WordEqual());
+        break;
+      }
+      case IrOpcode::kStringEqual: {
+        VisitBinop(node, kMachAnyTagged, kRepBit);
+        if (lower()) lowering->DoStringEqual(node);
+        break;
+      }
+      case IrOpcode::kStringLessThan: {
+        VisitBinop(node, kMachAnyTagged, kRepBit);
+        if (lower()) lowering->DoStringLessThan(node);
+        break;
+      }
+      case IrOpcode::kStringLessThanOrEqual: {
+        VisitBinop(node, kMachAnyTagged, kRepBit);
+        if (lower()) lowering->DoStringLessThanOrEqual(node);
+        break;
+      }
+      case IrOpcode::kStringAdd: {
+        VisitBinop(node, kMachAnyTagged, kMachAnyTagged);
+        if (lower()) lowering->DoStringAdd(node);
+        break;
+      }
+      case IrOpcode::kLoadField: {
+        FieldAccess access = FieldAccessOf(node->op());
+        ProcessInput(node, 0, changer_->TypeForBasePointer(access));
+        ProcessRemainingInputs(node, 1);
+        SetOutput(node, AssumeImplicitFloat32Change(access.machine_type));
+        if (lower()) lowering->DoLoadField(node);
+        break;
+      }
+      case IrOpcode::kStoreField: {
+        FieldAccess access = FieldAccessOf(node->op());
+        ProcessInput(node, 0, changer_->TypeForBasePointer(access));
+        ProcessInput(node, 1, AssumeImplicitFloat32Change(access.machine_type));
+        ProcessRemainingInputs(node, 2);
+        SetOutput(node, 0);
+        if (lower()) lowering->DoStoreField(node);
+        break;
+      }
+      case IrOpcode::kLoadElement: {
+        ElementAccess access = ElementAccessOf(node->op());
+        ProcessInput(node, 0, changer_->TypeForBasePointer(access));
+        ProcessInput(node, 1, kMachInt32);  // element index
+        ProcessInput(node, 2, kMachInt32);  // length
+        ProcessRemainingInputs(node, 3);
+        SetOutput(node, AssumeImplicitFloat32Change(access.machine_type));
+        if (lower()) lowering->DoLoadElement(node);
+        break;
+      }
+      case IrOpcode::kStoreElement: {
+        ElementAccess access = ElementAccessOf(node->op());
+        ProcessInput(node, 0, changer_->TypeForBasePointer(access));
+        ProcessInput(node, 1, kMachInt32);  // element index
+        ProcessInput(node, 2, kMachInt32);  // length
+        ProcessInput(node, 3, AssumeImplicitFloat32Change(access.machine_type));
+        ProcessRemainingInputs(node, 4);
+        SetOutput(node, 0);
+        if (lower()) lowering->DoStoreElement(node);
+        break;
+      }
+
+      //------------------------------------------------------------------
+      // Machine-level operators.
+      //------------------------------------------------------------------
+      case IrOpcode::kLoad: {
+        // TODO(titzer): machine loads/stores need to know BaseTaggedness!?
+        MachineType tBase = kRepTagged;
+        LoadRepresentation rep = OpParameter<LoadRepresentation>(node);
+        ProcessInput(node, 0, tBase);   // pointer or object
+        ProcessInput(node, 1, kMachInt32);  // index
+        ProcessRemainingInputs(node, 2);
+        SetOutput(node, rep);
+        break;
+      }
+      case IrOpcode::kStore: {
+        // TODO(titzer): machine loads/stores need to know BaseTaggedness!?
+        MachineType tBase = kRepTagged;
+        StoreRepresentation rep = OpParameter<StoreRepresentation>(node);
+        ProcessInput(node, 0, tBase);   // pointer or object
+        ProcessInput(node, 1, kMachInt32);  // index
+        ProcessInput(node, 2, rep.machine_type());
+        ProcessRemainingInputs(node, 3);
+        SetOutput(node, 0);
+        break;
+      }
+      case IrOpcode::kWord32Shr:
+        // We output unsigned int32 for shift right because JavaScript.
+        return VisitBinop(node, kRepWord32, kRepWord32 | kTypeUint32);
+      case IrOpcode::kWord32And:
+      case IrOpcode::kWord32Or:
+      case IrOpcode::kWord32Xor:
+      case IrOpcode::kWord32Shl:
+      case IrOpcode::kWord32Sar:
+        // We use signed int32 as the output type for these word32 operations,
+        // though the machine bits are the same for either signed or unsigned,
+        // because JavaScript considers the result from these operations signed.
+        return VisitBinop(node, kRepWord32, kRepWord32 | kTypeInt32);
+      case IrOpcode::kWord32Equal:
+        return VisitBinop(node, kRepWord32, kRepBit);
+
+      case IrOpcode::kInt32Add:
+      case IrOpcode::kInt32Sub:
+      case IrOpcode::kInt32Mul:
+      case IrOpcode::kInt32Div:
+      case IrOpcode::kInt32Mod:
+        return VisitInt32Binop(node);
+      case IrOpcode::kInt32UDiv:
+      case IrOpcode::kInt32UMod:
+        return VisitUint32Binop(node);
+      case IrOpcode::kInt32LessThan:
+      case IrOpcode::kInt32LessThanOrEqual:
+        return VisitInt32Cmp(node);
+
+      case IrOpcode::kUint32LessThan:
+      case IrOpcode::kUint32LessThanOrEqual:
+        return VisitUint32Cmp(node);
+
+      case IrOpcode::kInt64Add:
+      case IrOpcode::kInt64Sub:
+      case IrOpcode::kInt64Mul:
+      case IrOpcode::kInt64Div:
+      case IrOpcode::kInt64Mod:
+        return VisitInt64Binop(node);
+      case IrOpcode::kInt64LessThan:
+      case IrOpcode::kInt64LessThanOrEqual:
+        return VisitInt64Cmp(node);
+
+      case IrOpcode::kInt64UDiv:
+      case IrOpcode::kInt64UMod:
+        return VisitUint64Binop(node);
+
+      case IrOpcode::kWord64And:
+      case IrOpcode::kWord64Or:
+      case IrOpcode::kWord64Xor:
+      case IrOpcode::kWord64Shl:
+      case IrOpcode::kWord64Shr:
+      case IrOpcode::kWord64Sar:
+        return VisitBinop(node, kRepWord64, kRepWord64);
+      case IrOpcode::kWord64Equal:
+        return VisitBinop(node, kRepWord64, kRepBit);
+
+      case IrOpcode::kChangeInt32ToInt64:
+        return VisitUnop(node, kTypeInt32 | kRepWord32,
+                         kTypeInt32 | kRepWord64);
+      case IrOpcode::kChangeUint32ToUint64:
+        return VisitUnop(node, kTypeUint32 | kRepWord32,
+                         kTypeUint32 | kRepWord64);
+      case IrOpcode::kTruncateInt64ToInt32:
+        // TODO(titzer): Is kTypeInt32 correct here?
+        return VisitUnop(node, kTypeInt32 | kRepWord64,
+                         kTypeInt32 | kRepWord32);
+
+      case IrOpcode::kChangeInt32ToFloat64:
+        return VisitUnop(node, kTypeInt32 | kRepWord32,
+                         kTypeInt32 | kRepFloat64);
+      case IrOpcode::kChangeUint32ToFloat64:
+        return VisitUnop(node, kTypeUint32 | kRepWord32,
+                         kTypeUint32 | kRepFloat64);
+      case IrOpcode::kChangeFloat64ToInt32:
+        return VisitUnop(node, kTypeInt32 | kRepFloat64,
+                         kTypeInt32 | kRepWord32);
+      case IrOpcode::kChangeFloat64ToUint32:
+        return VisitUnop(node, kTypeUint32 | kRepFloat64,
+                         kTypeUint32 | kRepWord32);
+
+      case IrOpcode::kFloat64Add:
+      case IrOpcode::kFloat64Sub:
+      case IrOpcode::kFloat64Mul:
+      case IrOpcode::kFloat64Div:
+      case IrOpcode::kFloat64Mod:
+        return VisitFloat64Binop(node);
+      case IrOpcode::kFloat64Sqrt:
+        return VisitUnop(node, kMachFloat64, kMachFloat64);
+      case IrOpcode::kFloat64Equal:
+      case IrOpcode::kFloat64LessThan:
+      case IrOpcode::kFloat64LessThanOrEqual:
+        return VisitFloat64Cmp(node);
+      default:
+        VisitInputs(node);
+        break;
+    }
+  }
+
+  void DeferReplacement(Node* node, Node* replacement) {
+    if (replacement->id() < count_) {
+      // Replace with a previously existing node eagerly.
+      node->ReplaceUses(replacement);
+    } else {
+      // Otherwise, we are replacing a node with a representation change.
+      // Such a substitution must be done after all lowering is done, because
+      // new nodes do not have {NodeInfo} entries, and that would confuse
+      // the representation change insertion for uses of it.
+      replacements_.push_back(node);
+      replacements_.push_back(replacement);
+    }
+    // TODO(titzer) node->RemoveAllInputs();  // Node is now dead.
+  }
+
+  void PrintUseInfo(Node* node) {
+    TRACE(("#%d:%-20s ", node->id(), node->op()->mnemonic()));
+    PrintInfo(GetUseInfo(node));
+    TRACE(("\n"));
+  }
+
+  void PrintInfo(MachineTypeUnion info) {
+    if (FLAG_trace_representation) {
+      OFStream os(stdout);
+      os << static_cast<MachineType>(info);
+    }
+  }
+
+ private:
+  JSGraph* jsgraph_;
+  int count_;                       // number of nodes in the graph
+  NodeInfo* info_;                  // node id -> usage information
+  NodeVector nodes_;                // collected nodes
+  NodeVector replacements_;         // replacements to be done after lowering
+  bool contains_js_nodes_;          // {true} if a JS operator was seen
+  Phase phase_;                     // current phase of algorithm
+  RepresentationChanger* changer_;  // for inserting representation changes
+  ZoneQueue<Node*> queue_;          // queue for traversing the graph
+
+  NodeInfo* GetInfo(Node* node) {
+    DCHECK(node->id() >= 0);
+    DCHECK(node->id() < count_);
+    return &info_[node->id()];
+  }
+
+  MachineTypeUnion GetUseInfo(Node* node) { return GetInfo(node)->use; }
+};
+
+
+Node* SimplifiedLowering::IsTagged(Node* node) {
+  // TODO(titzer): factor this out to a TaggingScheme abstraction.
+  STATIC_ASSERT(kSmiTagMask == 1);  // Only works if tag is the low bit.
+  return graph()->NewNode(machine()->WordAnd(), node,
+                          jsgraph()->Int32Constant(kSmiTagMask));
+}
+
+
+void SimplifiedLowering::LowerAllNodes() {
+  SimplifiedOperatorBuilder simplified(graph()->zone());
+  RepresentationChanger changer(jsgraph(), &simplified,
+                                graph()->zone()->isolate());
+  RepresentationSelector selector(jsgraph(), zone(), &changer);
+  selector.Run(this);
+}
+
+
+Node* SimplifiedLowering::Untag(Node* node) {
+  // TODO(titzer): factor this out to a TaggingScheme abstraction.
+  Node* shift_amount = jsgraph()->Int32Constant(kSmiTagSize + kSmiShiftSize);
+  return graph()->NewNode(machine()->WordSar(), node, shift_amount);
+}
+
+
+Node* SimplifiedLowering::SmiTag(Node* node) {
+  // TODO(titzer): factor this out to a TaggingScheme abstraction.
+  Node* shift_amount = jsgraph()->Int32Constant(kSmiTagSize + kSmiShiftSize);
+  return graph()->NewNode(machine()->WordShl(), node, shift_amount);
+}
+
+
+Node* SimplifiedLowering::OffsetMinusTagConstant(int32_t offset) {
+  return jsgraph()->Int32Constant(offset - kHeapObjectTag);
+}
+
+
+static WriteBarrierKind ComputeWriteBarrierKind(BaseTaggedness base_is_tagged,
+                                                MachineType representation,
+                                                Type* type) {
+  // TODO(turbofan): skip write barriers for Smis, etc.
+  if (base_is_tagged == kTaggedBase &&
+      RepresentationOf(representation) == kRepTagged) {
+    // Write barriers are only for writes into heap objects (i.e. tagged base).
+    return kFullWriteBarrier;
+  }
+  return kNoWriteBarrier;
+}
+
+
+void SimplifiedLowering::DoLoadField(Node* node) {
+  const FieldAccess& access = FieldAccessOf(node->op());
+  node->set_op(machine()->Load(access.machine_type));
+  Node* offset = jsgraph()->Int32Constant(access.offset - access.tag());
+  node->InsertInput(zone(), 1, offset);
+}
+
+
+void SimplifiedLowering::DoStoreField(Node* node) {
+  const FieldAccess& access = FieldAccessOf(node->op());
+  WriteBarrierKind kind = ComputeWriteBarrierKind(
+      access.base_is_tagged, access.machine_type, access.type);
+  node->set_op(
+      machine()->Store(StoreRepresentation(access.machine_type, kind)));
+  Node* offset = jsgraph()->Int32Constant(access.offset - access.tag());
+  node->InsertInput(zone(), 1, offset);
+}
+
+
+Node* SimplifiedLowering::ComputeIndex(const ElementAccess& access,
+                                       Node* index) {
+  int element_size = ElementSizeOf(access.machine_type);
+  if (element_size != 1) {
+    index = graph()->NewNode(machine()->Int32Mul(),
+                             jsgraph()->Int32Constant(element_size), index);
+  }
+  int fixed_offset = access.header_size - access.tag();
+  if (fixed_offset == 0) return index;
+  return graph()->NewNode(machine()->Int32Add(), index,
+                          jsgraph()->Int32Constant(fixed_offset));
+}
+
+
+void SimplifiedLowering::DoLoadElement(Node* node) {
+  const ElementAccess& access = ElementAccessOf(node->op());
+  node->set_op(machine()->Load(access.machine_type));
+  node->ReplaceInput(1, ComputeIndex(access, node->InputAt(1)));
+  node->RemoveInput(2);
+}
+
+
+void SimplifiedLowering::DoStoreElement(Node* node) {
+  const ElementAccess& access = ElementAccessOf(node->op());
+  WriteBarrierKind kind = ComputeWriteBarrierKind(
+      access.base_is_tagged, access.machine_type, access.type);
+  node->set_op(
+      machine()->Store(StoreRepresentation(access.machine_type, kind)));
+  node->ReplaceInput(1, ComputeIndex(access, node->InputAt(1)));
+  node->RemoveInput(2);
+}
+
+
+void SimplifiedLowering::DoStringAdd(Node* node) {
+  Callable callable = CodeFactory::StringAdd(
+      zone()->isolate(), STRING_ADD_CHECK_NONE, NOT_TENURED);
+  CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
+  CallDescriptor* desc =
+      Linkage::GetStubCallDescriptor(callable.descriptor(), 0, flags, zone());
+  node->set_op(common()->Call(desc));
+  node->InsertInput(zone(), 0, jsgraph()->HeapConstant(callable.code()));
+  node->AppendInput(zone(), jsgraph()->UndefinedConstant());
+  node->AppendInput(zone(), graph()->start());
+  node->AppendInput(zone(), graph()->start());
+}
+
+
+Node* SimplifiedLowering::StringComparison(Node* node, bool requires_ordering) {
+  CEntryStub stub(zone()->isolate(), 1);
+  Runtime::FunctionId f =
+      requires_ordering ? Runtime::kStringCompare : Runtime::kStringEquals;
+  ExternalReference ref(f, zone()->isolate());
+  Operator::Properties props = node->op()->properties();
+  // TODO(mstarzinger): We should call StringCompareStub here instead, once an
+  // interface descriptor is available for it.
+  CallDescriptor* desc = Linkage::GetRuntimeCallDescriptor(f, 2, props, zone());
+  return graph()->NewNode(common()->Call(desc),
+                          jsgraph()->HeapConstant(stub.GetCode()),
+                          NodeProperties::GetValueInput(node, 0),
+                          NodeProperties::GetValueInput(node, 1),
+                          jsgraph()->ExternalConstant(ref),
+                          jsgraph()->Int32Constant(2),
+                          jsgraph()->UndefinedConstant());
+}
+
+
+void SimplifiedLowering::DoStringEqual(Node* node) {
+  node->set_op(machine()->WordEqual());
+  node->ReplaceInput(0, StringComparison(node, false));
+  node->ReplaceInput(1, jsgraph()->SmiConstant(EQUAL));
+}
+
+
+void SimplifiedLowering::DoStringLessThan(Node* node) {
+  node->set_op(machine()->IntLessThan());
+  node->ReplaceInput(0, StringComparison(node, true));
+  node->ReplaceInput(1, jsgraph()->SmiConstant(EQUAL));
+}
+
+
+void SimplifiedLowering::DoStringLessThanOrEqual(Node* node) {
+  node->set_op(machine()->IntLessThanOrEqual());
+  node->ReplaceInput(0, StringComparison(node, true));
+  node->ReplaceInput(1, jsgraph()->SmiConstant(EQUAL));
+}
+
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/simplified-lowering.h b/src/compiler/simplified-lowering.h
new file mode 100644
index 0000000..2ba7e3b
--- /dev/null
+++ b/src/compiler/simplified-lowering.h
@@ -0,0 +1,57 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_SIMPLIFIED_LOWERING_H_
+#define V8_COMPILER_SIMPLIFIED_LOWERING_H_
+
+#include "src/compiler/js-graph.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/simplified-operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class SimplifiedLowering {
+ public:
+  explicit SimplifiedLowering(JSGraph* jsgraph) : jsgraph_(jsgraph) {}
+  virtual ~SimplifiedLowering() {}
+
+  void LowerAllNodes();
+
+  // TODO(titzer): These are exposed for direct testing. Use a friend class.
+  void DoLoadField(Node* node);
+  void DoStoreField(Node* node);
+  void DoLoadElement(Node* node);
+  void DoStoreElement(Node* node);
+  void DoStringAdd(Node* node);
+  void DoStringEqual(Node* node);
+  void DoStringLessThan(Node* node);
+  void DoStringLessThanOrEqual(Node* node);
+
+ private:
+  JSGraph* jsgraph_;
+
+  Node* SmiTag(Node* node);
+  Node* IsTagged(Node* node);
+  Node* Untag(Node* node);
+  Node* OffsetMinusTagConstant(int32_t offset);
+  Node* ComputeIndex(const ElementAccess& access, Node* index);
+  Node* StringComparison(Node* node, bool requires_ordering);
+
+  friend class RepresentationSelector;
+
+  Zone* zone() { return jsgraph_->zone(); }
+  JSGraph* jsgraph() { return jsgraph_; }
+  Graph* graph() { return jsgraph()->graph(); }
+  CommonOperatorBuilder* common() { return jsgraph()->common(); }
+  MachineOperatorBuilder* machine() { return jsgraph()->machine(); }
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_SIMPLIFIED_LOWERING_H_
diff --git a/src/compiler/simplified-operator-reducer-unittest.cc b/src/compiler/simplified-operator-reducer-unittest.cc
new file mode 100644
index 0000000..739264e
--- /dev/null
+++ b/src/compiler/simplified-operator-reducer-unittest.cc
@@ -0,0 +1,483 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/graph-unittest.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/simplified-operator.h"
+#include "src/compiler/simplified-operator-reducer.h"
+#include "src/compiler/typer.h"
+#include "src/conversions.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class SimplifiedOperatorReducerTest : public GraphTest {
+ public:
+  explicit SimplifiedOperatorReducerTest(int num_parameters = 1)
+      : GraphTest(num_parameters), simplified_(zone()) {}
+  virtual ~SimplifiedOperatorReducerTest() {}
+
+ protected:
+  Reduction Reduce(Node* node) {
+    Typer typer(zone());
+    MachineOperatorBuilder machine;
+    JSOperatorBuilder javascript(zone());
+    JSGraph jsgraph(graph(), common(), &javascript, &typer, &machine);
+    SimplifiedOperatorReducer reducer(&jsgraph);
+    return reducer.Reduce(node);
+  }
+
+  SimplifiedOperatorBuilder* simplified() { return &simplified_; }
+
+ private:
+  SimplifiedOperatorBuilder simplified_;
+};
+
+
+template <typename T>
+class SimplifiedOperatorReducerTestWithParam
+    : public SimplifiedOperatorReducerTest,
+      public ::testing::WithParamInterface<T> {
+ public:
+  explicit SimplifiedOperatorReducerTestWithParam(int num_parameters = 1)
+      : SimplifiedOperatorReducerTest(num_parameters) {}
+  virtual ~SimplifiedOperatorReducerTestWithParam() {}
+};
+
+
+namespace {
+
+static const double kFloat64Values[] = {
+    -V8_INFINITY,  -6.52696e+290, -1.05768e+290, -5.34203e+268, -1.01997e+268,
+    -8.22758e+266, -1.58402e+261, -5.15246e+241, -5.92107e+226, -1.21477e+226,
+    -1.67913e+188, -1.6257e+184,  -2.60043e+170, -2.52941e+168, -3.06033e+116,
+    -4.56201e+52,  -3.56788e+50,  -9.9066e+38,   -3.07261e+31,  -2.1271e+09,
+    -1.91489e+09,  -1.73053e+09,  -9.30675e+08,  -26030,        -20453,
+    -15790,        -11699,        -111,          -97,           -78,
+    -63,           -58,           -1.53858e-06,  -2.98914e-12,  -1.14741e-39,
+    -8.20347e-57,  -1.48932e-59,  -3.17692e-66,  -8.93103e-81,  -3.91337e-83,
+    -6.0489e-92,   -8.83291e-113, -4.28266e-117, -1.92058e-178, -2.0567e-192,
+    -1.68167e-194, -1.51841e-214, -3.98738e-234, -7.31851e-242, -2.21875e-253,
+    -1.11612e-293, -0.0,          0.0,           2.22507e-308,  1.06526e-307,
+    4.16643e-227,  6.76624e-223,  2.0432e-197,   3.16254e-184,  1.37315e-173,
+    2.88603e-172,  1.54155e-99,   4.42923e-81,   1.40539e-73,   5.4462e-73,
+    1.24064e-58,   3.11167e-58,   2.75826e-39,   0.143815,      58,
+    67,            601,           7941,          11644,         13697,
+    25680,         29882,         1.32165e+08,   1.62439e+08,   4.16837e+08,
+    9.59097e+08,   1.32491e+09,   1.8728e+09,    1.0672e+17,    2.69606e+46,
+    1.98285e+79,   1.0098e+82,    7.93064e+88,   3.67444e+121,  9.36506e+123,
+    7.27954e+162,  3.05316e+168,  1.16171e+175,  1.64771e+189,  1.1622e+202,
+    2.00748e+239,  2.51778e+244,  3.90282e+306,  1.79769e+308,  V8_INFINITY};
+
+
+static const int32_t kInt32Values[] = {
+    -2147483647 - 1, -2104508227, -2103151830, -1435284490, -1378926425,
+    -1318814539,     -1289388009, -1287537572, -1279026536, -1241605942,
+    -1226046939,     -941837148,  -779818051,  -413830641,  -245798087,
+    -184657557,      -127145950,  -105483328,  -32325,      -26653,
+    -23858,          -23834,      -22363,      -19858,      -19044,
+    -18744,          -15528,      -5309,       -3372,       -2093,
+    -104,            -98,         -97,         -93,         -84,
+    -80,             -78,         -76,         -72,         -58,
+    -57,             -56,         -55,         -45,         -40,
+    -34,             -32,         -25,         -24,         -5,
+    -2,              0,           3,           10,          24,
+    34,              42,          46,          47,          48,
+    52,              56,          64,          65,          71,
+    76,              79,          81,          82,          97,
+    102,             103,         104,         106,         107,
+    109,             116,         122,         3653,        4485,
+    12405,           16504,       26262,       28704,       29755,
+    30554,           16476817,    605431957,   832401070,   873617242,
+    914205764,       1062628108,  1087581664,  1488498068,  1534668023,
+    1661587028,      1696896187,  1866841746,  2032089723,  2147483647};
+
+
+static const uint32_t kUint32Values[] = {
+    0x0,        0x5,        0x8,        0xc,        0xd,        0x26,
+    0x28,       0x29,       0x30,       0x34,       0x3e,       0x42,
+    0x50,       0x5b,       0x63,       0x71,       0x77,       0x7c,
+    0x83,       0x88,       0x96,       0x9c,       0xa3,       0xfa,
+    0x7a7,      0x165d,     0x234d,     0x3acb,     0x43a5,     0x4573,
+    0x5b4f,     0x5f14,     0x6996,     0x6c6e,     0x7289,     0x7b9a,
+    0x7bc9,     0x86bb,     0xa839,     0xaa41,     0xb03b,     0xc942,
+    0xce68,     0xcf4c,     0xd3ad,     0xdea3,     0xe90c,     0xed86,
+    0xfba5,     0x172dcc6,  0x114d8fc1, 0x182d6c9d, 0x1b1e3fad, 0x1db033bf,
+    0x1e1de755, 0x1f625c80, 0x28f6cf00, 0x2acb6a94, 0x2c20240e, 0x2f0fe54e,
+    0x31863a7c, 0x33325474, 0x3532fae3, 0x3bab82ea, 0x4c4b83a2, 0x4cd93d1e,
+    0x4f7331d4, 0x5491b09b, 0x57cc6ff9, 0x60d3b4dc, 0x653f5904, 0x690ae256,
+    0x69fe3276, 0x6bebf0ba, 0x6e2c69a3, 0x73b84ff7, 0x7b3a1924, 0x7ed032d9,
+    0x84dd734b, 0x8552ea53, 0x8680754f, 0x8e9660eb, 0x94fe2b9c, 0x972d30cf,
+    0x9b98c482, 0xb158667e, 0xb432932c, 0xb5b70989, 0xb669971a, 0xb7c359d1,
+    0xbeb15c0d, 0xc171c53d, 0xc743dd38, 0xc8e2af50, 0xc98e2df0, 0xd9d1cdf9,
+    0xdcc91049, 0xe46f396d, 0xee991950, 0xef64e521, 0xf7aeefc9, 0xffffffff};
+
+
+MATCHER(IsNaN, std::string(negation ? "isn't" : "is") + " NaN") {
+  return std::isnan(arg);
+}
+
+}  // namespace
+
+
+// -----------------------------------------------------------------------------
+// Unary operators
+
+
+namespace {
+
+struct UnaryOperator {
+  const Operator* (SimplifiedOperatorBuilder::*constructor)();
+  const char* constructor_name;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const UnaryOperator& unop) {
+  return os << unop.constructor_name;
+}
+
+
+static const UnaryOperator kUnaryOperators[] = {
+    {&SimplifiedOperatorBuilder::BooleanNot, "BooleanNot"},
+    {&SimplifiedOperatorBuilder::ChangeBitToBool, "ChangeBitToBool"},
+    {&SimplifiedOperatorBuilder::ChangeBoolToBit, "ChangeBoolToBit"},
+    {&SimplifiedOperatorBuilder::ChangeFloat64ToTagged,
+     "ChangeFloat64ToTagged"},
+    {&SimplifiedOperatorBuilder::ChangeInt32ToTagged, "ChangeInt32ToTagged"},
+    {&SimplifiedOperatorBuilder::ChangeTaggedToFloat64,
+     "ChangeTaggedToFloat64"},
+    {&SimplifiedOperatorBuilder::ChangeTaggedToInt32, "ChangeTaggedToInt32"},
+    {&SimplifiedOperatorBuilder::ChangeTaggedToUint32, "ChangeTaggedToUint32"},
+    {&SimplifiedOperatorBuilder::ChangeUint32ToTagged, "ChangeUint32ToTagged"}};
+
+}  // namespace
+
+
+typedef SimplifiedOperatorReducerTestWithParam<UnaryOperator>
+    SimplifiedUnaryOperatorTest;
+
+
+TEST_P(SimplifiedUnaryOperatorTest, Parameter) {
+  const UnaryOperator& unop = GetParam();
+  Reduction reduction = Reduce(
+      graph()->NewNode((simplified()->*unop.constructor)(), Parameter(0)));
+  EXPECT_FALSE(reduction.Changed());
+}
+
+
+INSTANTIATE_TEST_CASE_P(SimplifiedOperatorReducerTest,
+                        SimplifiedUnaryOperatorTest,
+                        ::testing::ValuesIn(kUnaryOperators));
+
+
+// -----------------------------------------------------------------------------
+// BooleanNot
+
+
+TEST_F(SimplifiedOperatorReducerTest, BooleanNotWithBooleanNot) {
+  Node* param0 = Parameter(0);
+  Reduction reduction = Reduce(
+      graph()->NewNode(simplified()->BooleanNot(),
+                       graph()->NewNode(simplified()->BooleanNot(), param0)));
+  ASSERT_TRUE(reduction.Changed());
+  EXPECT_EQ(param0, reduction.replacement());
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest, BooleanNotWithFalseConstant) {
+  Reduction reduction0 =
+      Reduce(graph()->NewNode(simplified()->BooleanNot(), FalseConstant()));
+  ASSERT_TRUE(reduction0.Changed());
+  EXPECT_THAT(reduction0.replacement(), IsTrueConstant());
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest, BooleanNotWithTrueConstant) {
+  Reduction reduction1 =
+      Reduce(graph()->NewNode(simplified()->BooleanNot(), TrueConstant()));
+  ASSERT_TRUE(reduction1.Changed());
+  EXPECT_THAT(reduction1.replacement(), IsFalseConstant());
+}
+
+
+// -----------------------------------------------------------------------------
+// ChangeBoolToBit
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeBitToBoolWithChangeBoolToBit) {
+  Node* param0 = Parameter(0);
+  Reduction reduction = Reduce(graph()->NewNode(
+      simplified()->ChangeBitToBool(),
+      graph()->NewNode(simplified()->ChangeBoolToBit(), param0)));
+  ASSERT_TRUE(reduction.Changed());
+  EXPECT_EQ(param0, reduction.replacement());
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeBitToBoolWithZeroConstant) {
+  Reduction reduction = Reduce(
+      graph()->NewNode(simplified()->ChangeBitToBool(), Int32Constant(0)));
+  ASSERT_TRUE(reduction.Changed());
+  EXPECT_THAT(reduction.replacement(), IsFalseConstant());
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeBitToBoolWithOneConstant) {
+  Reduction reduction = Reduce(
+      graph()->NewNode(simplified()->ChangeBitToBool(), Int32Constant(1)));
+  ASSERT_TRUE(reduction.Changed());
+  EXPECT_THAT(reduction.replacement(), IsTrueConstant());
+}
+
+
+// -----------------------------------------------------------------------------
+// ChangeBoolToBit
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeBoolToBitWithFalseConstant) {
+  Reduction reduction = Reduce(
+      graph()->NewNode(simplified()->ChangeBoolToBit(), FalseConstant()));
+  ASSERT_TRUE(reduction.Changed());
+  EXPECT_THAT(reduction.replacement(), IsInt32Constant(0));
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeBoolToBitWithTrueConstant) {
+  Reduction reduction =
+      Reduce(graph()->NewNode(simplified()->ChangeBoolToBit(), TrueConstant()));
+  ASSERT_TRUE(reduction.Changed());
+  EXPECT_THAT(reduction.replacement(), IsInt32Constant(1));
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeBoolToBitWithChangeBitToBool) {
+  Node* param0 = Parameter(0);
+  Reduction reduction = Reduce(graph()->NewNode(
+      simplified()->ChangeBoolToBit(),
+      graph()->NewNode(simplified()->ChangeBitToBool(), param0)));
+  ASSERT_TRUE(reduction.Changed());
+  EXPECT_EQ(param0, reduction.replacement());
+}
+
+
+// -----------------------------------------------------------------------------
+// ChangeFloat64ToTagged
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeFloat64ToTaggedWithConstant) {
+  TRACED_FOREACH(double, n, kFloat64Values) {
+    Reduction reduction = Reduce(graph()->NewNode(
+        simplified()->ChangeFloat64ToTagged(), Float64Constant(n)));
+    ASSERT_TRUE(reduction.Changed());
+    EXPECT_THAT(reduction.replacement(), IsNumberConstant(n));
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+// ChangeInt32ToTagged
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeInt32ToTaggedWithConstant) {
+  TRACED_FOREACH(int32_t, n, kInt32Values) {
+    Reduction reduction = Reduce(graph()->NewNode(
+        simplified()->ChangeInt32ToTagged(), Int32Constant(n)));
+    ASSERT_TRUE(reduction.Changed());
+    EXPECT_THAT(reduction.replacement(), IsNumberConstant(FastI2D(n)));
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+// ChangeTaggedToFloat64
+
+
+TEST_F(SimplifiedOperatorReducerTest,
+       ChangeTaggedToFloat64WithChangeFloat64ToTagged) {
+  Node* param0 = Parameter(0);
+  Reduction reduction = Reduce(graph()->NewNode(
+      simplified()->ChangeTaggedToFloat64(),
+      graph()->NewNode(simplified()->ChangeFloat64ToTagged(), param0)));
+  ASSERT_TRUE(reduction.Changed());
+  EXPECT_EQ(param0, reduction.replacement());
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest,
+       ChangeTaggedToFloat64WithChangeInt32ToTagged) {
+  Node* param0 = Parameter(0);
+  Reduction reduction = Reduce(graph()->NewNode(
+      simplified()->ChangeTaggedToFloat64(),
+      graph()->NewNode(simplified()->ChangeInt32ToTagged(), param0)));
+  ASSERT_TRUE(reduction.Changed());
+  EXPECT_THAT(reduction.replacement(), IsChangeInt32ToFloat64(param0));
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest,
+       ChangeTaggedToFloat64WithChangeUint32ToTagged) {
+  Node* param0 = Parameter(0);
+  Reduction reduction = Reduce(graph()->NewNode(
+      simplified()->ChangeTaggedToFloat64(),
+      graph()->NewNode(simplified()->ChangeUint32ToTagged(), param0)));
+  ASSERT_TRUE(reduction.Changed());
+  EXPECT_THAT(reduction.replacement(), IsChangeUint32ToFloat64(param0));
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToFloat64WithConstant) {
+  TRACED_FOREACH(double, n, kFloat64Values) {
+    Reduction reduction = Reduce(graph()->NewNode(
+        simplified()->ChangeTaggedToFloat64(), NumberConstant(n)));
+    ASSERT_TRUE(reduction.Changed());
+    EXPECT_THAT(reduction.replacement(), IsFloat64Constant(n));
+  }
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToFloat64WithNaNConstant1) {
+  Reduction reduction =
+      Reduce(graph()->NewNode(simplified()->ChangeTaggedToFloat64(),
+                              NumberConstant(-base::OS::nan_value())));
+  ASSERT_TRUE(reduction.Changed());
+  EXPECT_THAT(reduction.replacement(), IsFloat64Constant(IsNaN()));
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToFloat64WithNaNConstant2) {
+  Reduction reduction =
+      Reduce(graph()->NewNode(simplified()->ChangeTaggedToFloat64(),
+                              NumberConstant(base::OS::nan_value())));
+  ASSERT_TRUE(reduction.Changed());
+  EXPECT_THAT(reduction.replacement(), IsFloat64Constant(IsNaN()));
+}
+
+
+// -----------------------------------------------------------------------------
+// ChangeTaggedToInt32
+
+
+TEST_F(SimplifiedOperatorReducerTest,
+       ChangeTaggedToInt32WithChangeFloat64ToTagged) {
+  Node* param0 = Parameter(0);
+  Reduction reduction = Reduce(graph()->NewNode(
+      simplified()->ChangeTaggedToInt32(),
+      graph()->NewNode(simplified()->ChangeFloat64ToTagged(), param0)));
+  ASSERT_TRUE(reduction.Changed());
+  EXPECT_THAT(reduction.replacement(), IsChangeFloat64ToInt32(param0));
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest,
+       ChangeTaggedToInt32WithChangeInt32ToTagged) {
+  Node* param0 = Parameter(0);
+  Reduction reduction = Reduce(graph()->NewNode(
+      simplified()->ChangeTaggedToInt32(),
+      graph()->NewNode(simplified()->ChangeInt32ToTagged(), param0)));
+  ASSERT_TRUE(reduction.Changed());
+  EXPECT_EQ(param0, reduction.replacement());
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToInt32WithConstant) {
+  TRACED_FOREACH(double, n, kFloat64Values) {
+    Reduction reduction = Reduce(graph()->NewNode(
+        simplified()->ChangeTaggedToInt32(), NumberConstant(n)));
+    ASSERT_TRUE(reduction.Changed());
+    EXPECT_THAT(reduction.replacement(), IsInt32Constant(DoubleToInt32(n)));
+  }
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToInt32WithNaNConstant1) {
+  Reduction reduction =
+      Reduce(graph()->NewNode(simplified()->ChangeTaggedToInt32(),
+                              NumberConstant(-base::OS::nan_value())));
+  ASSERT_TRUE(reduction.Changed());
+  EXPECT_THAT(reduction.replacement(), IsInt32Constant(0));
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToInt32WithNaNConstant2) {
+  Reduction reduction =
+      Reduce(graph()->NewNode(simplified()->ChangeTaggedToInt32(),
+                              NumberConstant(base::OS::nan_value())));
+  ASSERT_TRUE(reduction.Changed());
+  EXPECT_THAT(reduction.replacement(), IsInt32Constant(0));
+}
+
+
+// -----------------------------------------------------------------------------
+// ChangeTaggedToUint32
+
+
+TEST_F(SimplifiedOperatorReducerTest,
+       ChangeTaggedToUint32WithChangeFloat64ToTagged) {
+  Node* param0 = Parameter(0);
+  Reduction reduction = Reduce(graph()->NewNode(
+      simplified()->ChangeTaggedToUint32(),
+      graph()->NewNode(simplified()->ChangeFloat64ToTagged(), param0)));
+  ASSERT_TRUE(reduction.Changed());
+  EXPECT_THAT(reduction.replacement(), IsChangeFloat64ToUint32(param0));
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest,
+       ChangeTaggedToUint32WithChangeUint32ToTagged) {
+  Node* param0 = Parameter(0);
+  Reduction reduction = Reduce(graph()->NewNode(
+      simplified()->ChangeTaggedToUint32(),
+      graph()->NewNode(simplified()->ChangeUint32ToTagged(), param0)));
+  ASSERT_TRUE(reduction.Changed());
+  EXPECT_EQ(param0, reduction.replacement());
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToUint32WithConstant) {
+  TRACED_FOREACH(double, n, kFloat64Values) {
+    Reduction reduction = Reduce(graph()->NewNode(
+        simplified()->ChangeTaggedToUint32(), NumberConstant(n)));
+    ASSERT_TRUE(reduction.Changed());
+    EXPECT_THAT(reduction.replacement(),
+                IsInt32Constant(bit_cast<int32_t>(DoubleToUint32(n))));
+  }
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToUint32WithNaNConstant1) {
+  Reduction reduction =
+      Reduce(graph()->NewNode(simplified()->ChangeTaggedToUint32(),
+                              NumberConstant(-base::OS::nan_value())));
+  ASSERT_TRUE(reduction.Changed());
+  EXPECT_THAT(reduction.replacement(), IsInt32Constant(0));
+}
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeTaggedToUint32WithNaNConstant2) {
+  Reduction reduction =
+      Reduce(graph()->NewNode(simplified()->ChangeTaggedToUint32(),
+                              NumberConstant(base::OS::nan_value())));
+  ASSERT_TRUE(reduction.Changed());
+  EXPECT_THAT(reduction.replacement(), IsInt32Constant(0));
+}
+
+
+// -----------------------------------------------------------------------------
+// ChangeUint32ToTagged
+
+
+TEST_F(SimplifiedOperatorReducerTest, ChangeUint32ToTagged) {
+  TRACED_FOREACH(uint32_t, n, kUint32Values) {
+    Reduction reduction =
+        Reduce(graph()->NewNode(simplified()->ChangeUint32ToTagged(),
+                                Int32Constant(bit_cast<int32_t>(n))));
+    ASSERT_TRUE(reduction.Changed());
+    EXPECT_THAT(reduction.replacement(), IsNumberConstant(FastUI2D(n)));
+  }
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/simplified-operator-reducer.cc b/src/compiler/simplified-operator-reducer.cc
new file mode 100644
index 0000000..f6181ea
--- /dev/null
+++ b/src/compiler/simplified-operator-reducer.cc
@@ -0,0 +1,147 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/simplified-operator-reducer.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+SimplifiedOperatorReducer::~SimplifiedOperatorReducer() {}
+
+
+Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
+  switch (node->opcode()) {
+    case IrOpcode::kBooleanNot: {
+      HeapObjectMatcher<HeapObject> m(node->InputAt(0));
+      if (m.Is(Unique<HeapObject>::CreateImmovable(factory()->false_value()))) {
+        return Replace(jsgraph()->TrueConstant());
+      }
+      if (m.Is(Unique<HeapObject>::CreateImmovable(factory()->true_value()))) {
+        return Replace(jsgraph()->FalseConstant());
+      }
+      if (m.IsBooleanNot()) return Replace(m.node()->InputAt(0));
+      break;
+    }
+    case IrOpcode::kChangeBitToBool: {
+      Int32Matcher m(node->InputAt(0));
+      if (m.Is(0)) return Replace(jsgraph()->FalseConstant());
+      if (m.Is(1)) return Replace(jsgraph()->TrueConstant());
+      if (m.IsChangeBoolToBit()) return Replace(m.node()->InputAt(0));
+      break;
+    }
+    case IrOpcode::kChangeBoolToBit: {
+      HeapObjectMatcher<HeapObject> m(node->InputAt(0));
+      if (m.Is(Unique<HeapObject>::CreateImmovable(factory()->false_value()))) {
+        return ReplaceInt32(0);
+      }
+      if (m.Is(Unique<HeapObject>::CreateImmovable(factory()->true_value()))) {
+        return ReplaceInt32(1);
+      }
+      if (m.IsChangeBitToBool()) return Replace(m.node()->InputAt(0));
+      break;
+    }
+    case IrOpcode::kChangeFloat64ToTagged: {
+      Float64Matcher m(node->InputAt(0));
+      if (m.HasValue()) return ReplaceNumber(m.Value());
+      break;
+    }
+    case IrOpcode::kChangeInt32ToTagged: {
+      Int32Matcher m(node->InputAt(0));
+      if (m.HasValue()) return ReplaceNumber(m.Value());
+      break;
+    }
+    case IrOpcode::kChangeTaggedToFloat64: {
+      NumberMatcher m(node->InputAt(0));
+      if (m.HasValue()) return ReplaceFloat64(m.Value());
+      if (m.IsChangeFloat64ToTagged()) return Replace(m.node()->InputAt(0));
+      if (m.IsChangeInt32ToTagged()) {
+        return Change(node, machine()->ChangeInt32ToFloat64(),
+                      m.node()->InputAt(0));
+      }
+      if (m.IsChangeUint32ToTagged()) {
+        return Change(node, machine()->ChangeUint32ToFloat64(),
+                      m.node()->InputAt(0));
+      }
+      break;
+    }
+    case IrOpcode::kChangeTaggedToInt32: {
+      NumberMatcher m(node->InputAt(0));
+      if (m.HasValue()) return ReplaceInt32(DoubleToInt32(m.Value()));
+      if (m.IsChangeFloat64ToTagged()) {
+        return Change(node, machine()->ChangeFloat64ToInt32(),
+                      m.node()->InputAt(0));
+      }
+      if (m.IsChangeInt32ToTagged()) return Replace(m.node()->InputAt(0));
+      break;
+    }
+    case IrOpcode::kChangeTaggedToUint32: {
+      NumberMatcher m(node->InputAt(0));
+      if (m.HasValue()) return ReplaceUint32(DoubleToUint32(m.Value()));
+      if (m.IsChangeFloat64ToTagged()) {
+        return Change(node, machine()->ChangeFloat64ToUint32(),
+                      m.node()->InputAt(0));
+      }
+      if (m.IsChangeUint32ToTagged()) return Replace(m.node()->InputAt(0));
+      break;
+    }
+    case IrOpcode::kChangeUint32ToTagged: {
+      Uint32Matcher m(node->InputAt(0));
+      if (m.HasValue()) return ReplaceNumber(FastUI2D(m.Value()));
+      break;
+    }
+    default:
+      break;
+  }
+  return NoChange();
+}
+
+
+Reduction SimplifiedOperatorReducer::Change(Node* node, const Operator* op,
+                                            Node* a) {
+  node->set_op(op);
+  node->ReplaceInput(0, a);
+  return Changed(node);
+}
+
+
+Reduction SimplifiedOperatorReducer::ReplaceFloat64(double value) {
+  return Replace(jsgraph()->Float64Constant(value));
+}
+
+
+Reduction SimplifiedOperatorReducer::ReplaceInt32(int32_t value) {
+  return Replace(jsgraph()->Int32Constant(value));
+}
+
+
+Reduction SimplifiedOperatorReducer::ReplaceNumber(double value) {
+  return Replace(jsgraph()->Constant(value));
+}
+
+
+Reduction SimplifiedOperatorReducer::ReplaceNumber(int32_t value) {
+  return Replace(jsgraph()->Constant(value));
+}
+
+
+Graph* SimplifiedOperatorReducer::graph() const { return jsgraph()->graph(); }
+
+
+Factory* SimplifiedOperatorReducer::factory() const {
+  return jsgraph()->isolate()->factory();
+}
+
+
+MachineOperatorBuilder* SimplifiedOperatorReducer::machine() const {
+  return jsgraph()->machine();
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/simplified-operator-reducer.h b/src/compiler/simplified-operator-reducer.h
new file mode 100644
index 0000000..32f49ad
--- /dev/null
+++ b/src/compiler/simplified-operator-reducer.h
@@ -0,0 +1,53 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_SIMPLIFIED_OPERATOR_REDUCER_H_
+#define V8_COMPILER_SIMPLIFIED_OPERATOR_REDUCER_H_
+
+#include "src/compiler/graph-reducer.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class Heap;
+
+namespace compiler {
+
+// Forward declarations.
+class JSGraph;
+class MachineOperatorBuilder;
+
+class SimplifiedOperatorReducer FINAL : public Reducer {
+ public:
+  explicit SimplifiedOperatorReducer(JSGraph* jsgraph) : jsgraph_(jsgraph) {}
+  virtual ~SimplifiedOperatorReducer();
+
+  virtual Reduction Reduce(Node* node) OVERRIDE;
+
+ private:
+  Reduction Change(Node* node, const Operator* op, Node* a);
+  Reduction ReplaceFloat64(double value);
+  Reduction ReplaceInt32(int32_t value);
+  Reduction ReplaceUint32(uint32_t value) {
+    return ReplaceInt32(bit_cast<int32_t>(value));
+  }
+  Reduction ReplaceNumber(double value);
+  Reduction ReplaceNumber(int32_t value);
+
+  Graph* graph() const;
+  Factory* factory() const;
+  JSGraph* jsgraph() const { return jsgraph_; }
+  MachineOperatorBuilder* machine() const;
+
+  JSGraph* jsgraph_;
+
+  DISALLOW_COPY_AND_ASSIGN(SimplifiedOperatorReducer);
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_SIMPLIFIED_OPERATOR_REDUCER_H_
diff --git a/src/compiler/simplified-operator-unittest.cc b/src/compiler/simplified-operator-unittest.cc
new file mode 100644
index 0000000..4014f24
--- /dev/null
+++ b/src/compiler/simplified-operator-unittest.cc
@@ -0,0 +1,222 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/simplified-operator.h"
+
+#include "src/compiler/operator-properties-inl.h"
+#include "src/test/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// TODO(bmeurer): Drop once we use std::ostream instead of our OStream.
+inline std::ostream& operator<<(std::ostream& os, const ElementAccess& access) {
+  OStringStream ost;
+  ost << access;
+  return os << ost.c_str();
+}
+
+
+// -----------------------------------------------------------------------------
+// Pure operators.
+
+
+namespace {
+
+struct PureOperator {
+  const Operator* (SimplifiedOperatorBuilder::*constructor)();
+  IrOpcode::Value opcode;
+  Operator::Properties properties;
+  int value_input_count;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const PureOperator& pop) {
+  return os << IrOpcode::Mnemonic(pop.opcode);
+}
+
+
+const PureOperator kPureOperators[] = {
+#define PURE(Name, properties, input_count)              \
+  {                                                      \
+    &SimplifiedOperatorBuilder::Name, IrOpcode::k##Name, \
+        Operator::kPure | properties, input_count        \
+  }
+    PURE(BooleanNot, Operator::kNoProperties, 1),
+    PURE(NumberEqual, Operator::kCommutative, 2),
+    PURE(NumberLessThan, Operator::kNoProperties, 2),
+    PURE(NumberLessThanOrEqual, Operator::kNoProperties, 2),
+    PURE(NumberAdd, Operator::kCommutative, 2),
+    PURE(NumberSubtract, Operator::kNoProperties, 2),
+    PURE(NumberMultiply, Operator::kCommutative, 2),
+    PURE(NumberDivide, Operator::kNoProperties, 2),
+    PURE(NumberModulus, Operator::kNoProperties, 2),
+    PURE(NumberToInt32, Operator::kNoProperties, 1),
+    PURE(NumberToUint32, Operator::kNoProperties, 1),
+    PURE(StringEqual, Operator::kCommutative, 2),
+    PURE(StringLessThan, Operator::kNoProperties, 2),
+    PURE(StringLessThanOrEqual, Operator::kNoProperties, 2),
+    PURE(StringAdd, Operator::kNoProperties, 2),
+    PURE(ChangeTaggedToInt32, Operator::kNoProperties, 1),
+    PURE(ChangeTaggedToUint32, Operator::kNoProperties, 1),
+    PURE(ChangeTaggedToFloat64, Operator::kNoProperties, 1),
+    PURE(ChangeInt32ToTagged, Operator::kNoProperties, 1),
+    PURE(ChangeUint32ToTagged, Operator::kNoProperties, 1),
+    PURE(ChangeFloat64ToTagged, Operator::kNoProperties, 1),
+    PURE(ChangeBoolToBit, Operator::kNoProperties, 1),
+    PURE(ChangeBitToBool, Operator::kNoProperties, 1)
+#undef PURE
+};
+
+}  // namespace
+
+
+class SimplifiedPureOperatorTest
+    : public TestWithZone,
+      public ::testing::WithParamInterface<PureOperator> {};
+
+
+TEST_P(SimplifiedPureOperatorTest, InstancesAreGloballyShared) {
+  const PureOperator& pop = GetParam();
+  SimplifiedOperatorBuilder simplified1(zone());
+  SimplifiedOperatorBuilder simplified2(zone());
+  EXPECT_EQ((simplified1.*pop.constructor)(), (simplified2.*pop.constructor)());
+}
+
+
+TEST_P(SimplifiedPureOperatorTest, NumberOfInputsAndOutputs) {
+  SimplifiedOperatorBuilder simplified(zone());
+  const PureOperator& pop = GetParam();
+  const Operator* op = (simplified.*pop.constructor)();
+
+  EXPECT_EQ(pop.value_input_count, OperatorProperties::GetValueInputCount(op));
+  EXPECT_EQ(0, OperatorProperties::GetEffectInputCount(op));
+  EXPECT_EQ(0, OperatorProperties::GetControlInputCount(op));
+  EXPECT_EQ(pop.value_input_count, OperatorProperties::GetTotalInputCount(op));
+
+  EXPECT_EQ(1, OperatorProperties::GetValueOutputCount(op));
+  EXPECT_EQ(0, OperatorProperties::GetEffectOutputCount(op));
+  EXPECT_EQ(0, OperatorProperties::GetControlOutputCount(op));
+}
+
+
+TEST_P(SimplifiedPureOperatorTest, OpcodeIsCorrect) {
+  SimplifiedOperatorBuilder simplified(zone());
+  const PureOperator& pop = GetParam();
+  const Operator* op = (simplified.*pop.constructor)();
+  EXPECT_EQ(pop.opcode, op->opcode());
+}
+
+
+TEST_P(SimplifiedPureOperatorTest, Properties) {
+  SimplifiedOperatorBuilder simplified(zone());
+  const PureOperator& pop = GetParam();
+  const Operator* op = (simplified.*pop.constructor)();
+  EXPECT_EQ(pop.properties, op->properties() & pop.properties);
+}
+
+INSTANTIATE_TEST_CASE_P(SimplifiedOperatorTest, SimplifiedPureOperatorTest,
+                        ::testing::ValuesIn(kPureOperators));
+
+
+// -----------------------------------------------------------------------------
+// Element access operators.
+
+namespace {
+
+const ElementAccess kElementAccesses[] = {
+    {kTaggedBase, FixedArray::kHeaderSize, Type::Any(), kMachAnyTagged},
+    {kUntaggedBase, kNonHeapObjectHeaderSize - kHeapObjectTag, Type::Any(),
+     kMachInt8},
+    {kUntaggedBase, kNonHeapObjectHeaderSize - kHeapObjectTag, Type::Any(),
+     kMachInt16},
+    {kUntaggedBase, kNonHeapObjectHeaderSize - kHeapObjectTag, Type::Any(),
+     kMachInt32},
+    {kUntaggedBase, kNonHeapObjectHeaderSize - kHeapObjectTag, Type::Any(),
+     kMachUint8},
+    {kUntaggedBase, kNonHeapObjectHeaderSize - kHeapObjectTag, Type::Any(),
+     kMachUint16},
+    {kUntaggedBase, kNonHeapObjectHeaderSize - kHeapObjectTag, Type::Any(),
+     kMachUint32},
+    {kUntaggedBase, 0, Type::Signed32(), kMachInt8},
+    {kUntaggedBase, 0, Type::Unsigned32(), kMachUint8},
+    {kUntaggedBase, 0, Type::Signed32(), kMachInt16},
+    {kUntaggedBase, 0, Type::Unsigned32(), kMachUint16},
+    {kUntaggedBase, 0, Type::Signed32(), kMachInt32},
+    {kUntaggedBase, 0, Type::Unsigned32(), kMachUint32},
+    {kUntaggedBase, 0, Type::Number(), kRepFloat32},
+    {kUntaggedBase, 0, Type::Number(), kRepFloat64},
+    {kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Signed32(),
+     kMachInt8},
+    {kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Unsigned32(),
+     kMachUint8},
+    {kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Signed32(),
+     kMachInt16},
+    {kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Unsigned32(),
+     kMachUint16},
+    {kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Signed32(),
+     kMachInt32},
+    {kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Unsigned32(),
+     kMachUint32},
+    {kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Number(),
+     kRepFloat32},
+    {kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Number(),
+     kRepFloat64}};
+
+}  // namespace
+
+
+class SimplifiedElementAccessOperatorTest
+    : public TestWithZone,
+      public ::testing::WithParamInterface<ElementAccess> {};
+
+
+TEST_P(SimplifiedElementAccessOperatorTest, LoadElement) {
+  SimplifiedOperatorBuilder simplified(zone());
+  const ElementAccess& access = GetParam();
+  const Operator* op = simplified.LoadElement(access);
+
+  EXPECT_EQ(IrOpcode::kLoadElement, op->opcode());
+  EXPECT_EQ(Operator::kNoThrow | Operator::kNoWrite, op->properties());
+  EXPECT_EQ(access, ElementAccessOf(op));
+
+  EXPECT_EQ(3, OperatorProperties::GetValueInputCount(op));
+  EXPECT_EQ(1, OperatorProperties::GetEffectInputCount(op));
+  EXPECT_EQ(0, OperatorProperties::GetControlInputCount(op));
+  EXPECT_EQ(4, OperatorProperties::GetTotalInputCount(op));
+
+  EXPECT_EQ(1, OperatorProperties::GetValueOutputCount(op));
+  EXPECT_EQ(1, OperatorProperties::GetEffectOutputCount(op));
+  EXPECT_EQ(0, OperatorProperties::GetControlOutputCount(op));
+}
+
+
+TEST_P(SimplifiedElementAccessOperatorTest, StoreElement) {
+  SimplifiedOperatorBuilder simplified(zone());
+  const ElementAccess& access = GetParam();
+  const Operator* op = simplified.StoreElement(access);
+
+  EXPECT_EQ(IrOpcode::kStoreElement, op->opcode());
+  EXPECT_EQ(Operator::kNoRead | Operator::kNoThrow, op->properties());
+  EXPECT_EQ(access, ElementAccessOf(op));
+
+  EXPECT_EQ(4, OperatorProperties::GetValueInputCount(op));
+  EXPECT_EQ(1, OperatorProperties::GetEffectInputCount(op));
+  EXPECT_EQ(1, OperatorProperties::GetControlInputCount(op));
+  EXPECT_EQ(6, OperatorProperties::GetTotalInputCount(op));
+
+  EXPECT_EQ(0, OperatorProperties::GetValueOutputCount(op));
+  EXPECT_EQ(1, OperatorProperties::GetEffectOutputCount(op));
+  EXPECT_EQ(0, OperatorProperties::GetControlOutputCount(op));
+}
+
+
+INSTANTIATE_TEST_CASE_P(SimplifiedOperatorTest,
+                        SimplifiedElementAccessOperatorTest,
+                        ::testing::ValuesIn(kElementAccesses));
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/simplified-operator.cc b/src/compiler/simplified-operator.cc
new file mode 100644
index 0000000..642ffc7
--- /dev/null
+++ b/src/compiler/simplified-operator.cc
@@ -0,0 +1,178 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/simplified-operator.h"
+
+#include "src/base/lazy-instance.h"
+#include "src/compiler/opcodes.h"
+#include "src/compiler/operator.h"
+#include "src/types-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+OStream& operator<<(OStream& os, BaseTaggedness base_taggedness) {
+  switch (base_taggedness) {
+    case kUntaggedBase:
+      return os << "untagged base";
+    case kTaggedBase:
+      return os << "tagged base";
+  }
+  UNREACHABLE();
+  return os;
+}
+
+
+bool operator==(ElementAccess const& lhs, ElementAccess const& rhs) {
+  return lhs.base_is_tagged == rhs.base_is_tagged &&
+         lhs.header_size == rhs.header_size && lhs.type == rhs.type &&
+         lhs.machine_type == rhs.machine_type;
+}
+
+
+bool operator!=(ElementAccess const& lhs, ElementAccess const& rhs) {
+  return !(lhs == rhs);
+}
+
+
+OStream& operator<<(OStream& os, ElementAccess const& access) {
+  os << "[" << access.base_is_tagged << ", " << access.header_size << ", ";
+  access.type->PrintTo(os);
+  os << ", " << access.machine_type << "]";
+  return os;
+}
+
+
+const FieldAccess& FieldAccessOf(const Operator* op) {
+  DCHECK_NOT_NULL(op);
+  DCHECK(op->opcode() == IrOpcode::kLoadField ||
+         op->opcode() == IrOpcode::kStoreField);
+  return OpParameter<FieldAccess>(op);
+}
+
+
+const ElementAccess& ElementAccessOf(const Operator* op) {
+  DCHECK_NOT_NULL(op);
+  DCHECK(op->opcode() == IrOpcode::kLoadElement ||
+         op->opcode() == IrOpcode::kStoreElement);
+  return OpParameter<ElementAccess>(op);
+}
+
+
+// Specialization for static parameters of type {FieldAccess}.
+template <>
+struct StaticParameterTraits<FieldAccess> {
+  static OStream& PrintTo(OStream& os, const FieldAccess& val) {
+    return os << val.offset;
+  }
+  static int HashCode(const FieldAccess& val) {
+    return (val.offset < 16) | (val.machine_type & 0xffff);
+  }
+  static bool Equals(const FieldAccess& lhs, const FieldAccess& rhs) {
+    return lhs.base_is_tagged == rhs.base_is_tagged &&
+           lhs.offset == rhs.offset && lhs.machine_type == rhs.machine_type &&
+           lhs.type->Is(rhs.type);
+  }
+};
+
+
+// Specialization for static parameters of type {ElementAccess}.
+template <>
+struct StaticParameterTraits<ElementAccess> {
+  static OStream& PrintTo(OStream& os, const ElementAccess& access) {
+    return os << access;
+  }
+  static int HashCode(const ElementAccess& access) {
+    return (access.header_size < 16) | (access.machine_type & 0xffff);
+  }
+  static bool Equals(const ElementAccess& lhs, const ElementAccess& rhs) {
+    return lhs.base_is_tagged == rhs.base_is_tagged &&
+           lhs.header_size == rhs.header_size &&
+           lhs.machine_type == rhs.machine_type && lhs.type->Is(rhs.type);
+  }
+};
+
+
+#define PURE_OP_LIST(V)                                \
+  V(BooleanNot, Operator::kNoProperties, 1)            \
+  V(BooleanToNumber, Operator::kNoProperties, 1)       \
+  V(NumberEqual, Operator::kCommutative, 2)            \
+  V(NumberLessThan, Operator::kNoProperties, 2)        \
+  V(NumberLessThanOrEqual, Operator::kNoProperties, 2) \
+  V(NumberAdd, Operator::kCommutative, 2)              \
+  V(NumberSubtract, Operator::kNoProperties, 2)        \
+  V(NumberMultiply, Operator::kCommutative, 2)         \
+  V(NumberDivide, Operator::kNoProperties, 2)          \
+  V(NumberModulus, Operator::kNoProperties, 2)         \
+  V(NumberToInt32, Operator::kNoProperties, 1)         \
+  V(NumberToUint32, Operator::kNoProperties, 1)        \
+  V(StringEqual, Operator::kCommutative, 2)            \
+  V(StringLessThan, Operator::kNoProperties, 2)        \
+  V(StringLessThanOrEqual, Operator::kNoProperties, 2) \
+  V(StringAdd, Operator::kNoProperties, 2)             \
+  V(ChangeTaggedToInt32, Operator::kNoProperties, 1)   \
+  V(ChangeTaggedToUint32, Operator::kNoProperties, 1)  \
+  V(ChangeTaggedToFloat64, Operator::kNoProperties, 1) \
+  V(ChangeInt32ToTagged, Operator::kNoProperties, 1)   \
+  V(ChangeUint32ToTagged, Operator::kNoProperties, 1)  \
+  V(ChangeFloat64ToTagged, Operator::kNoProperties, 1) \
+  V(ChangeBoolToBit, Operator::kNoProperties, 1)       \
+  V(ChangeBitToBool, Operator::kNoProperties, 1)
+
+
+#define ACCESS_OP_LIST(V)                                 \
+  V(LoadField, FieldAccess, Operator::kNoWrite, 1, 1)     \
+  V(StoreField, FieldAccess, Operator::kNoRead, 2, 0)     \
+  V(LoadElement, ElementAccess, Operator::kNoWrite, 3, 1) \
+  V(StoreElement, ElementAccess, Operator::kNoRead, 4, 0)
+
+
+struct SimplifiedOperatorBuilderImpl FINAL {
+#define PURE(Name, properties, input_count)                               \
+  struct Name##Operator FINAL : public SimpleOperator {                   \
+    Name##Operator()                                                      \
+        : SimpleOperator(IrOpcode::k##Name, Operator::kPure | properties, \
+                         input_count, 1, #Name) {}                        \
+  };                                                                      \
+  Name##Operator k##Name;
+  PURE_OP_LIST(PURE)
+#undef PURE
+};
+
+
+static base::LazyInstance<SimplifiedOperatorBuilderImpl>::type kImpl =
+    LAZY_INSTANCE_INITIALIZER;
+
+
+SimplifiedOperatorBuilder::SimplifiedOperatorBuilder(Zone* zone)
+    : impl_(kImpl.Get()), zone_(zone) {}
+
+
+#define PURE(Name, properties, input_count) \
+  const Operator* SimplifiedOperatorBuilder::Name() { return &impl_.k##Name; }
+PURE_OP_LIST(PURE)
+#undef PURE
+
+
+const Operator* SimplifiedOperatorBuilder::ReferenceEqual(Type* type) {
+  // TODO(titzer): What about the type parameter?
+  return new (zone()) SimpleOperator(IrOpcode::kReferenceEqual,
+                                     Operator::kCommutative | Operator::kPure,
+                                     2, 1, "ReferenceEqual");
+}
+
+
+#define ACCESS(Name, Type, properties, input_count, output_count)           \
+  const Operator* SimplifiedOperatorBuilder::Name(const Type& access) {     \
+    return new (zone())                                                     \
+        Operator1<Type>(IrOpcode::k##Name, Operator::kNoThrow | properties, \
+                        input_count, output_count, #Name, access);          \
+  }
+ACCESS_OP_LIST(ACCESS)
+#undef ACCESS
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/simplified-operator.h b/src/compiler/simplified-operator.h
new file mode 100644
index 0000000..32f0e8b
--- /dev/null
+++ b/src/compiler/simplified-operator.h
@@ -0,0 +1,152 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_SIMPLIFIED_OPERATOR_H_
+#define V8_COMPILER_SIMPLIFIED_OPERATOR_H_
+
+#include "src/compiler/machine-type.h"
+#include "src/handles.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+template <class>
+class TypeImpl;
+struct ZoneTypeConfig;
+typedef TypeImpl<ZoneTypeConfig> Type;
+class Zone;
+
+
+namespace compiler {
+
+// Forward declarations.
+class Operator;
+struct SimplifiedOperatorBuilderImpl;
+
+
+enum BaseTaggedness { kUntaggedBase, kTaggedBase };
+
+OStream& operator<<(OStream&, BaseTaggedness);
+
+// An access descriptor for loads/stores of fixed structures like field
+// accesses of heap objects. Accesses from either tagged or untagged base
+// pointers are supported; untagging is done automatically during lowering.
+struct FieldAccess {
+  BaseTaggedness base_is_tagged;  // specifies if the base pointer is tagged.
+  int offset;                     // offset of the field, without tag.
+  Handle<Name> name;              // debugging only.
+  Type* type;                     // type of the field.
+  MachineType machine_type;       // machine type of the field.
+
+  int tag() const { return base_is_tagged == kTaggedBase ? kHeapObjectTag : 0; }
+};
+
+
+// An access descriptor for loads/stores of indexed structures like characters
+// in strings or off-heap backing stores. Accesses from either tagged or
+// untagged base pointers are supported; untagging is done automatically during
+// lowering.
+struct ElementAccess {
+  BaseTaggedness base_is_tagged;  // specifies if the base pointer is tagged.
+  int header_size;                // size of the header, without tag.
+  Type* type;                     // type of the element.
+  MachineType machine_type;       // machine type of the element.
+
+  int tag() const { return base_is_tagged == kTaggedBase ? kHeapObjectTag : 0; }
+};
+
+bool operator==(ElementAccess const& lhs, ElementAccess const& rhs);
+bool operator!=(ElementAccess const& lhs, ElementAccess const& rhs);
+
+OStream& operator<<(OStream&, ElementAccess const&);
+
+
+// If the accessed object is not a heap object, add this to the header_size.
+static const int kNonHeapObjectHeaderSize = kHeapObjectTag;
+
+
+const FieldAccess& FieldAccessOf(const Operator* op) WARN_UNUSED_RESULT;
+const ElementAccess& ElementAccessOf(const Operator* op) WARN_UNUSED_RESULT;
+
+
+// Interface for building simplified operators, which represent the
+// medium-level operations of V8, including adding numbers, allocating objects,
+// indexing into objects and arrays, etc.
+// All operators are typed but many are representation independent.
+
+// Number values from JS can be in one of these representations:
+//   - Tagged: word-sized integer that is either
+//     - a signed small integer (31 or 32 bits plus a tag)
+//     - a tagged pointer to a HeapNumber object that has a float64 field
+//   - Int32: an untagged signed 32-bit integer
+//   - Uint32: an untagged unsigned 32-bit integer
+//   - Float64: an untagged float64
+
+// Additional representations for intermediate code or non-JS code:
+//   - Int64: an untagged signed 64-bit integer
+//   - Uint64: an untagged unsigned 64-bit integer
+//   - Float32: an untagged float32
+
+// Boolean values can be:
+//   - Bool: a tagged pointer to either the canonical JS #false or
+//           the canonical JS #true object
+//   - Bit: an untagged integer 0 or 1, but word-sized
+class SimplifiedOperatorBuilder FINAL {
+ public:
+  explicit SimplifiedOperatorBuilder(Zone* zone);
+
+  const Operator* BooleanNot();
+  const Operator* BooleanToNumber();
+
+  const Operator* NumberEqual();
+  const Operator* NumberLessThan();
+  const Operator* NumberLessThanOrEqual();
+  const Operator* NumberAdd();
+  const Operator* NumberSubtract();
+  const Operator* NumberMultiply();
+  const Operator* NumberDivide();
+  const Operator* NumberModulus();
+  const Operator* NumberToInt32();
+  const Operator* NumberToUint32();
+
+  const Operator* ReferenceEqual(Type* type);
+
+  const Operator* StringEqual();
+  const Operator* StringLessThan();
+  const Operator* StringLessThanOrEqual();
+  const Operator* StringAdd();
+
+  const Operator* ChangeTaggedToInt32();
+  const Operator* ChangeTaggedToUint32();
+  const Operator* ChangeTaggedToFloat64();
+  const Operator* ChangeInt32ToTagged();
+  const Operator* ChangeUint32ToTagged();
+  const Operator* ChangeFloat64ToTagged();
+  const Operator* ChangeBoolToBit();
+  const Operator* ChangeBitToBool();
+
+  const Operator* LoadField(const FieldAccess&);
+  const Operator* StoreField(const FieldAccess&);
+
+  // load-element [base + index], length
+  const Operator* LoadElement(ElementAccess const&);
+
+  // store-element [base + index], length, value
+  const Operator* StoreElement(ElementAccess const&);
+
+ private:
+  Zone* zone() const { return zone_; }
+
+  const SimplifiedOperatorBuilderImpl& impl_;
+  Zone* const zone_;
+
+  DISALLOW_COPY_AND_ASSIGN(SimplifiedOperatorBuilder);
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_SIMPLIFIED_OPERATOR_H_
diff --git a/src/compiler/source-position.cc b/src/compiler/source-position.cc
new file mode 100644
index 0000000..1178390
--- /dev/null
+++ b/src/compiler/source-position.cc
@@ -0,0 +1,55 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/source-position.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/node-aux-data-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class SourcePositionTable::Decorator : public GraphDecorator {
+ public:
+  explicit Decorator(SourcePositionTable* source_positions)
+      : source_positions_(source_positions) {}
+
+  virtual void Decorate(Node* node) {
+    DCHECK(!source_positions_->current_position_.IsInvalid());
+    source_positions_->table_.Set(node, source_positions_->current_position_);
+  }
+
+ private:
+  SourcePositionTable* source_positions_;
+};
+
+
+SourcePositionTable::SourcePositionTable(Graph* graph)
+    : graph_(graph),
+      decorator_(NULL),
+      current_position_(SourcePosition::Invalid()),
+      table_(graph->zone()) {}
+
+
+void SourcePositionTable::AddDecorator() {
+  DCHECK(decorator_ == NULL);
+  decorator_ = new (graph_->zone()) Decorator(this);
+  graph_->AddDecorator(decorator_);
+}
+
+
+void SourcePositionTable::RemoveDecorator() {
+  DCHECK(decorator_ != NULL);
+  graph_->RemoveDecorator(decorator_);
+  decorator_ = NULL;
+}
+
+
+SourcePosition SourcePositionTable::GetSourcePosition(Node* node) {
+  return table_.Get(node);
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/source-position.h b/src/compiler/source-position.h
new file mode 100644
index 0000000..778f067
--- /dev/null
+++ b/src/compiler/source-position.h
@@ -0,0 +1,99 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_SOURCE_POSITION_H_
+#define V8_COMPILER_SOURCE_POSITION_H_
+
+#include "src/assembler.h"
+#include "src/compiler/node-aux-data.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Encapsulates encoding and decoding of sources positions from which Nodes
+// originated.
+class SourcePosition FINAL {
+ public:
+  explicit SourcePosition(int raw = kUnknownPosition) : raw_(raw) {}
+
+  static SourcePosition Unknown() { return SourcePosition(kUnknownPosition); }
+  bool IsUnknown() const { return raw() == kUnknownPosition; }
+
+  static SourcePosition Invalid() { return SourcePosition(kInvalidPosition); }
+  bool IsInvalid() const { return raw() == kInvalidPosition; }
+
+  int raw() const { return raw_; }
+
+ private:
+  static const int kInvalidPosition = -2;
+  static const int kUnknownPosition = RelocInfo::kNoPosition;
+  STATIC_ASSERT(kInvalidPosition != kUnknownPosition);
+  int raw_;
+};
+
+
+inline bool operator==(const SourcePosition& lhs, const SourcePosition& rhs) {
+  return lhs.raw() == rhs.raw();
+}
+
+inline bool operator!=(const SourcePosition& lhs, const SourcePosition& rhs) {
+  return !(lhs == rhs);
+}
+
+
+class SourcePositionTable FINAL {
+ public:
+  class Scope {
+   public:
+    Scope(SourcePositionTable* source_positions, SourcePosition position)
+        : source_positions_(source_positions),
+          prev_position_(source_positions->current_position_) {
+      Init(position);
+    }
+    Scope(SourcePositionTable* source_positions, Node* node)
+        : source_positions_(source_positions),
+          prev_position_(source_positions->current_position_) {
+      Init(source_positions_->GetSourcePosition(node));
+    }
+    ~Scope() { source_positions_->current_position_ = prev_position_; }
+
+   private:
+    void Init(SourcePosition position) {
+      if (!position.IsUnknown() || prev_position_.IsInvalid()) {
+        source_positions_->current_position_ = position;
+      }
+    }
+
+    SourcePositionTable* source_positions_;
+    SourcePosition prev_position_;
+    DISALLOW_COPY_AND_ASSIGN(Scope);
+  };
+
+  explicit SourcePositionTable(Graph* graph);
+  ~SourcePositionTable() {
+    if (decorator_ != NULL) RemoveDecorator();
+  }
+
+  void AddDecorator();
+  void RemoveDecorator();
+
+  SourcePosition GetSourcePosition(Node* node);
+
+ private:
+  class Decorator;
+
+  Graph* graph_;
+  Decorator* decorator_;
+  SourcePosition current_position_;
+  NodeAuxData<SourcePosition> table_;
+
+  DISALLOW_COPY_AND_ASSIGN(SourcePositionTable);
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif
diff --git a/src/compiler/typer.cc b/src/compiler/typer.cc
new file mode 100644
index 0000000..bfecdef
--- /dev/null
+++ b/src/compiler/typer.cc
@@ -0,0 +1,904 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/graph-inl.h"
+#include "src/compiler/js-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/simplified-operator.h"
+#include "src/compiler/typer.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+Typer::Typer(Zone* zone) : zone_(zone) {
+  Type* number = Type::Number(zone);
+  Type* signed32 = Type::Signed32(zone);
+  Type* unsigned32 = Type::Unsigned32(zone);
+  Type* integral32 = Type::Integral32(zone);
+  Type* object = Type::Object(zone);
+  Type* undefined = Type::Undefined(zone);
+  number_fun0_ = Type::Function(number, zone);
+  number_fun1_ = Type::Function(number, number, zone);
+  number_fun2_ = Type::Function(number, number, number, zone);
+  imul_fun_ = Type::Function(signed32, integral32, integral32, zone);
+
+#define NATIVE_TYPE(sem, rep) \
+  Type::Intersect(Type::sem(zone), Type::rep(zone), zone)
+  // TODO(rossberg): Use range types for more precision, once we have them.
+  Type* int8 = NATIVE_TYPE(SignedSmall, UntaggedInt8);
+  Type* int16 = NATIVE_TYPE(SignedSmall, UntaggedInt16);
+  Type* int32 = NATIVE_TYPE(Signed32, UntaggedInt32);
+  Type* uint8 = NATIVE_TYPE(UnsignedSmall, UntaggedInt8);
+  Type* uint16 = NATIVE_TYPE(UnsignedSmall, UntaggedInt16);
+  Type* uint32 = NATIVE_TYPE(Unsigned32, UntaggedInt32);
+  Type* float32 = NATIVE_TYPE(Number, UntaggedFloat32);
+  Type* float64 = NATIVE_TYPE(Number, UntaggedFloat64);
+#undef NATIVE_TYPE
+  Type* buffer = Type::Buffer(zone);
+  Type* int8_array = Type::Array(int8, zone);
+  Type* int16_array = Type::Array(int16, zone);
+  Type* int32_array = Type::Array(int32, zone);
+  Type* uint8_array = Type::Array(uint8, zone);
+  Type* uint16_array = Type::Array(uint16, zone);
+  Type* uint32_array = Type::Array(uint32, zone);
+  Type* float32_array = Type::Array(float32, zone);
+  Type* float64_array = Type::Array(float64, zone);
+  Type* arg1 = Type::Union(unsigned32, object, zone);
+  Type* arg2 = Type::Union(unsigned32, undefined, zone);
+  Type* arg3 = arg2;
+  array_buffer_fun_ = Type::Function(buffer, unsigned32, zone);
+  int8_array_fun_ = Type::Function(int8_array, arg1, arg2, arg3, zone);
+  int16_array_fun_ = Type::Function(int16_array, arg1, arg2, arg3, zone);
+  int32_array_fun_ = Type::Function(int32_array, arg1, arg2, arg3, zone);
+  uint8_array_fun_ = Type::Function(uint8_array, arg1, arg2, arg3, zone);
+  uint16_array_fun_ = Type::Function(uint16_array, arg1, arg2, arg3, zone);
+  uint32_array_fun_ = Type::Function(uint32_array, arg1, arg2, arg3, zone);
+  float32_array_fun_ = Type::Function(float32_array, arg1, arg2, arg3, zone);
+  float64_array_fun_ = Type::Function(float64_array, arg1, arg2, arg3, zone);
+}
+
+
+class Typer::Visitor : public NullNodeVisitor {
+ public:
+  Visitor(Typer* typer, MaybeHandle<Context> context)
+      : typer_(typer), context_(context) {}
+
+  Bounds TypeNode(Node* node) {
+    switch (node->opcode()) {
+#define DECLARE_CASE(x) case IrOpcode::k##x: return Type##x(node);
+      DECLARE_CASE(Start)
+      VALUE_OP_LIST(DECLARE_CASE)
+#undef DECLARE_CASE
+
+#define DECLARE_CASE(x) case IrOpcode::k##x:
+      DECLARE_CASE(End)
+      INNER_CONTROL_OP_LIST(DECLARE_CASE)
+#undef DECLARE_CASE
+      break;
+    }
+    UNREACHABLE();
+    return Bounds();
+  }
+
+  Type* TypeConstant(Handle<Object> value);
+
+ protected:
+#define DECLARE_METHOD(x) inline Bounds Type##x(Node* node);
+  DECLARE_METHOD(Start)
+  VALUE_OP_LIST(DECLARE_METHOD)
+#undef DECLARE_METHOD
+
+  Bounds OperandType(Node* node, int i) {
+    return NodeProperties::GetBounds(NodeProperties::GetValueInput(node, i));
+  }
+
+  Type* ContextType(Node* node) {
+    Bounds result =
+        NodeProperties::GetBounds(NodeProperties::GetContextInput(node));
+    DCHECK(result.upper->Maybe(Type::Internal()));
+    // TODO(rossberg): More precisely, instead of the above assertion, we should
+    // back-propagate the constraint that it has to be a subtype of Internal.
+    return result.upper;
+  }
+
+  Zone* zone() { return typer_->zone(); }
+  Isolate* isolate() { return typer_->isolate(); }
+  MaybeHandle<Context> context() { return context_; }
+
+ private:
+  Typer* typer_;
+  MaybeHandle<Context> context_;
+};
+
+
+class Typer::RunVisitor : public Typer::Visitor {
+ public:
+  RunVisitor(Typer* typer, MaybeHandle<Context> context)
+      : Visitor(typer, context),
+        redo(NodeSet::key_compare(), NodeSet::allocator_type(typer->zone())) {}
+
+  GenericGraphVisit::Control Post(Node* node) {
+    if (OperatorProperties::HasValueOutput(node->op())) {
+      Bounds bounds = TypeNode(node);
+      NodeProperties::SetBounds(node, bounds);
+      // Remember incompletely typed nodes for least fixpoint iteration.
+      int arity = OperatorProperties::GetValueInputCount(node->op());
+      for (int i = 0; i < arity; ++i) {
+        // TODO(rossberg): change once IsTyped is available.
+        // if (!NodeProperties::IsTyped(NodeProperties::GetValueInput(node, i)))
+        if (OperandType(node, i).upper->Is(Type::None())) {
+          redo.insert(node);
+          break;
+        }
+      }
+    }
+    return GenericGraphVisit::CONTINUE;
+  }
+
+  NodeSet redo;
+};
+
+
+class Typer::NarrowVisitor : public Typer::Visitor {
+ public:
+  NarrowVisitor(Typer* typer, MaybeHandle<Context> context)
+      : Visitor(typer, context) {}
+
+  GenericGraphVisit::Control Pre(Node* node) {
+    if (OperatorProperties::HasValueOutput(node->op())) {
+      Bounds previous = NodeProperties::GetBounds(node);
+      Bounds bounds = TypeNode(node);
+      NodeProperties::SetBounds(node, Bounds::Both(bounds, previous, zone()));
+      DCHECK(bounds.Narrows(previous));
+      // Stop when nothing changed (but allow re-entry in case it does later).
+      return previous.Narrows(bounds)
+          ? GenericGraphVisit::DEFER : GenericGraphVisit::REENTER;
+    } else {
+      return GenericGraphVisit::SKIP;
+    }
+  }
+
+  GenericGraphVisit::Control Post(Node* node) {
+    return GenericGraphVisit::REENTER;
+  }
+};
+
+
+class Typer::WidenVisitor : public Typer::Visitor {
+ public:
+  WidenVisitor(Typer* typer, MaybeHandle<Context> context)
+      : Visitor(typer, context) {}
+
+  GenericGraphVisit::Control Pre(Node* node) {
+    if (OperatorProperties::HasValueOutput(node->op())) {
+      Bounds previous = NodeProperties::GetBounds(node);
+      Bounds bounds = TypeNode(node);
+      DCHECK(previous.lower->Is(bounds.lower));
+      DCHECK(previous.upper->Is(bounds.upper));
+      NodeProperties::SetBounds(node, bounds);  // TODO(rossberg): Either?
+      // Stop when nothing changed (but allow re-entry in case it does later).
+      return bounds.Narrows(previous)
+          ? GenericGraphVisit::DEFER : GenericGraphVisit::REENTER;
+    } else {
+      return GenericGraphVisit::SKIP;
+    }
+  }
+
+  GenericGraphVisit::Control Post(Node* node) {
+    return GenericGraphVisit::REENTER;
+  }
+};
+
+
+void Typer::Run(Graph* graph, MaybeHandle<Context> context) {
+  RunVisitor typing(this, context);
+  graph->VisitNodeInputsFromEnd(&typing);
+  // Find least fixpoint.
+  for (NodeSetIter i = typing.redo.begin(); i != typing.redo.end(); ++i) {
+    Widen(graph, *i, context);
+  }
+}
+
+
+void Typer::Narrow(Graph* graph, Node* start, MaybeHandle<Context> context) {
+  NarrowVisitor typing(this, context);
+  graph->VisitNodeUsesFrom(start, &typing);
+}
+
+
+void Typer::Widen(Graph* graph, Node* start, MaybeHandle<Context> context) {
+  WidenVisitor typing(this, context);
+  graph->VisitNodeUsesFrom(start, &typing);
+}
+
+
+void Typer::Init(Node* node) {
+  if (OperatorProperties::HasValueOutput(node->op())) {
+    Visitor typing(this, MaybeHandle<Context>());
+    Bounds bounds = typing.TypeNode(node);
+    NodeProperties::SetBounds(node, bounds);
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+
+
+// Control operators.
+
+Bounds Typer::Visitor::TypeStart(Node* node) {
+  return Bounds(Type::Internal(zone()));
+}
+
+
+// Common operators.
+
+Bounds Typer::Visitor::TypeParameter(Node* node) {
+  return Bounds::Unbounded(zone());
+}
+
+
+Bounds Typer::Visitor::TypeInt32Constant(Node* node) {
+  // TODO(titzer): only call Type::Of() if the type is not already known.
+  return Bounds(Type::Of(OpParameter<int32_t>(node), zone()));
+}
+
+
+Bounds Typer::Visitor::TypeInt64Constant(Node* node) {
+  // TODO(titzer): only call Type::Of() if the type is not already known.
+  return Bounds(
+      Type::Of(static_cast<double>(OpParameter<int64_t>(node)), zone()));
+}
+
+
+Bounds Typer::Visitor::TypeFloat32Constant(Node* node) {
+  // TODO(titzer): only call Type::Of() if the type is not already known.
+  return Bounds(Type::Of(OpParameter<float>(node), zone()));
+}
+
+
+Bounds Typer::Visitor::TypeFloat64Constant(Node* node) {
+  // TODO(titzer): only call Type::Of() if the type is not already known.
+  return Bounds(Type::Of(OpParameter<double>(node), zone()));
+}
+
+
+Bounds Typer::Visitor::TypeNumberConstant(Node* node) {
+  // TODO(titzer): only call Type::Of() if the type is not already known.
+  return Bounds(Type::Of(OpParameter<double>(node), zone()));
+}
+
+
+Bounds Typer::Visitor::TypeHeapConstant(Node* node) {
+  return Bounds(TypeConstant(OpParameter<Unique<Object> >(node).handle()));
+}
+
+
+Bounds Typer::Visitor::TypeExternalConstant(Node* node) {
+  return Bounds(Type::Internal(zone()));
+}
+
+
+Bounds Typer::Visitor::TypePhi(Node* node) {
+  int arity = OperatorProperties::GetValueInputCount(node->op());
+  Bounds bounds = OperandType(node, 0);
+  for (int i = 1; i < arity; ++i) {
+    bounds = Bounds::Either(bounds, OperandType(node, i), zone());
+  }
+  return bounds;
+}
+
+
+Bounds Typer::Visitor::TypeEffectPhi(Node* node) {
+  UNREACHABLE();
+  return Bounds();
+}
+
+
+Bounds Typer::Visitor::TypeControlEffect(Node* node) {
+  UNREACHABLE();
+  return Bounds();
+}
+
+
+Bounds Typer::Visitor::TypeValueEffect(Node* node) {
+  UNREACHABLE();
+  return Bounds();
+}
+
+
+Bounds Typer::Visitor::TypeFinish(Node* node) {
+  return OperandType(node, 0);
+}
+
+
+Bounds Typer::Visitor::TypeFrameState(Node* node) {
+  // TODO(rossberg): Ideally FrameState wouldn't have a value output.
+  return Bounds(Type::Internal(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeStateValues(Node* node) {
+  return Bounds(Type::Internal(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeCall(Node* node) {
+  return Bounds::Unbounded(zone());
+}
+
+
+Bounds Typer::Visitor::TypeProjection(Node* node) {
+  // TODO(titzer): use the output type of the input to determine the bounds.
+  return Bounds::Unbounded(zone());
+}
+
+
+// JS comparison operators.
+
+#define DEFINE_METHOD(x)                       \
+  Bounds Typer::Visitor::Type##x(Node* node) { \
+    return Bounds(Type::Boolean(zone()));      \
+  }
+JS_COMPARE_BINOP_LIST(DEFINE_METHOD)
+#undef DEFINE_METHOD
+
+
+// JS bitwise operators.
+
+Bounds Typer::Visitor::TypeJSBitwiseOr(Node* node) {
+  Bounds left = OperandType(node, 0);
+  Bounds right = OperandType(node, 1);
+  Type* upper = Type::Union(left.upper, right.upper, zone());
+  if (!upper->Is(Type::Signed32())) upper = Type::Signed32(zone());
+  Type* lower = Type::Intersect(Type::SignedSmall(zone()), upper, zone());
+  return Bounds(lower, upper);
+}
+
+
+Bounds Typer::Visitor::TypeJSBitwiseAnd(Node* node) {
+  Bounds left = OperandType(node, 0);
+  Bounds right = OperandType(node, 1);
+  Type* upper = Type::Union(left.upper, right.upper, zone());
+  if (!upper->Is(Type::Signed32())) upper = Type::Signed32(zone());
+  Type* lower = Type::Intersect(Type::SignedSmall(zone()), upper, zone());
+  return Bounds(lower, upper);
+}
+
+
+Bounds Typer::Visitor::TypeJSBitwiseXor(Node* node) {
+  return Bounds(Type::SignedSmall(zone()), Type::Signed32(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSShiftLeft(Node* node) {
+  return Bounds(Type::SignedSmall(zone()), Type::Signed32(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSShiftRight(Node* node) {
+  return Bounds(Type::SignedSmall(zone()), Type::Signed32(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSShiftRightLogical(Node* node) {
+  return Bounds(Type::UnsignedSmall(zone()), Type::Unsigned32(zone()));
+}
+
+
+// JS arithmetic operators.
+
+Bounds Typer::Visitor::TypeJSAdd(Node* node) {
+  Bounds left = OperandType(node, 0);
+  Bounds right = OperandType(node, 1);
+  Type* lower =
+      left.lower->Is(Type::None()) || right.lower->Is(Type::None()) ?
+          Type::None(zone()) :
+      left.lower->Is(Type::Number()) && right.lower->Is(Type::Number()) ?
+          Type::SignedSmall(zone()) :
+      left.lower->Is(Type::String()) || right.lower->Is(Type::String()) ?
+          Type::String(zone()) : Type::None(zone());
+  Type* upper =
+      left.upper->Is(Type::None()) && right.upper->Is(Type::None()) ?
+          Type::None(zone()) :
+      left.upper->Is(Type::Number()) && right.upper->Is(Type::Number()) ?
+          Type::Number(zone()) :
+      left.upper->Is(Type::String()) || right.upper->Is(Type::String()) ?
+          Type::String(zone()) : Type::NumberOrString(zone());
+  return Bounds(lower, upper);
+}
+
+
+Bounds Typer::Visitor::TypeJSSubtract(Node* node) {
+  return Bounds(Type::SignedSmall(zone()), Type::Number(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSMultiply(Node* node) {
+  return Bounds(Type::SignedSmall(zone()), Type::Number(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSDivide(Node* node) {
+  return Bounds(Type::SignedSmall(zone()), Type::Number(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSModulus(Node* node) {
+  return Bounds(Type::SignedSmall(zone()), Type::Number(zone()));
+}
+
+
+// JS unary operators.
+
+Bounds Typer::Visitor::TypeJSUnaryNot(Node* node) {
+  return Bounds(Type::Boolean(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSTypeOf(Node* node) {
+  return Bounds(Type::InternalizedString(zone()));
+}
+
+
+// JS conversion operators.
+
+Bounds Typer::Visitor::TypeJSToBoolean(Node* node) {
+  return Bounds(Type::Boolean(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSToNumber(Node* node) {
+  return Bounds(Type::SignedSmall(zone()), Type::Number(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSToString(Node* node) {
+  return Bounds(Type::None(zone()), Type::String(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSToName(Node* node) {
+  return Bounds(Type::None(zone()), Type::Name(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSToObject(Node* node) {
+  return Bounds(Type::None(zone()), Type::Receiver(zone()));
+}
+
+
+// JS object operators.
+
+Bounds Typer::Visitor::TypeJSCreate(Node* node) {
+  return Bounds(Type::None(zone()), Type::Object(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSLoadProperty(Node* node) {
+  Bounds object = OperandType(node, 0);
+  Bounds name = OperandType(node, 1);
+  Bounds result = Bounds::Unbounded(zone());
+  // TODO(rossberg): Use range types and sized array types to filter undefined.
+  if (object.lower->IsArray() && name.lower->Is(Type::Integral32())) {
+    result.lower = Type::Union(
+        object.lower->AsArray()->Element(), Type::Undefined(zone()), zone());
+  }
+  if (object.upper->IsArray() && name.upper->Is(Type::Integral32())) {
+    result.upper = Type::Union(
+        object.upper->AsArray()->Element(),  Type::Undefined(zone()), zone());
+  }
+  return result;
+}
+
+
+Bounds Typer::Visitor::TypeJSLoadNamed(Node* node) {
+  return Bounds::Unbounded(zone());
+}
+
+
+Bounds Typer::Visitor::TypeJSStoreProperty(Node* node) {
+  UNREACHABLE();
+  return Bounds();
+}
+
+
+Bounds Typer::Visitor::TypeJSStoreNamed(Node* node) {
+  UNREACHABLE();
+  return Bounds();
+}
+
+
+Bounds Typer::Visitor::TypeJSDeleteProperty(Node* node) {
+  return Bounds(Type::Boolean(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSHasProperty(Node* node) {
+  return Bounds(Type::Boolean(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSInstanceOf(Node* node) {
+  return Bounds(Type::Boolean(zone()));
+}
+
+
+// JS context operators.
+
+Bounds Typer::Visitor::TypeJSLoadContext(Node* node) {
+  Bounds outer = OperandType(node, 0);
+  DCHECK(outer.upper->Maybe(Type::Internal()));
+  // TODO(rossberg): More precisely, instead of the above assertion, we should
+  // back-propagate the constraint that it has to be a subtype of Internal.
+
+  ContextAccess access = OpParameter<ContextAccess>(node);
+  Type* context_type = outer.upper;
+  MaybeHandle<Context> context;
+  if (context_type->IsConstant()) {
+    context = Handle<Context>::cast(context_type->AsConstant()->Value());
+  }
+  // Walk context chain (as far as known), mirroring dynamic lookup.
+  // Since contexts are mutable, the information is only useful as a lower
+  // bound.
+  // TODO(rossberg): Could use scope info to fix upper bounds for constant
+  // bindings if we know that this code is never shared.
+  for (int i = access.depth(); i > 0; --i) {
+    if (context_type->IsContext()) {
+      context_type = context_type->AsContext()->Outer();
+      if (context_type->IsConstant()) {
+        context = Handle<Context>::cast(context_type->AsConstant()->Value());
+      }
+    } else if (!context.is_null()) {
+      context = handle(context.ToHandleChecked()->previous(), isolate());
+    }
+  }
+  if (context.is_null()) {
+    return Bounds::Unbounded(zone());
+  } else {
+    Handle<Object> value =
+        handle(context.ToHandleChecked()->get(access.index()), isolate());
+    Type* lower = TypeConstant(value);
+    return Bounds(lower, Type::Any(zone()));
+  }
+}
+
+
+Bounds Typer::Visitor::TypeJSStoreContext(Node* node) {
+  UNREACHABLE();
+  return Bounds();
+}
+
+
+Bounds Typer::Visitor::TypeJSCreateFunctionContext(Node* node) {
+  Type* outer = ContextType(node);
+  return Bounds(Type::Context(outer, zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSCreateCatchContext(Node* node) {
+  Type* outer = ContextType(node);
+  return Bounds(Type::Context(outer, zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSCreateWithContext(Node* node) {
+  Type* outer = ContextType(node);
+  return Bounds(Type::Context(outer, zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSCreateBlockContext(Node* node) {
+  Type* outer = ContextType(node);
+  return Bounds(Type::Context(outer, zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSCreateModuleContext(Node* node) {
+  // TODO(rossberg): this is probably incorrect
+  Type* outer = ContextType(node);
+  return Bounds(Type::Context(outer, zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSCreateGlobalContext(Node* node) {
+  Type* outer = ContextType(node);
+  return Bounds(Type::Context(outer, zone()));
+}
+
+
+// JS other operators.
+
+Bounds Typer::Visitor::TypeJSYield(Node* node) {
+  return Bounds::Unbounded(zone());
+}
+
+
+Bounds Typer::Visitor::TypeJSCallConstruct(Node* node) {
+  return Bounds(Type::None(zone()), Type::Receiver(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeJSCallFunction(Node* node) {
+  Bounds fun = OperandType(node, 0);
+  Type* lower = fun.lower->IsFunction()
+      ? fun.lower->AsFunction()->Result() : Type::None(zone());
+  Type* upper = fun.upper->IsFunction()
+      ? fun.upper->AsFunction()->Result() : Type::Any(zone());
+  return Bounds(lower, upper);
+}
+
+
+Bounds Typer::Visitor::TypeJSCallRuntime(Node* node) {
+  return Bounds::Unbounded(zone());
+}
+
+
+Bounds Typer::Visitor::TypeJSDebugger(Node* node) {
+  return Bounds::Unbounded(zone());
+}
+
+
+// Simplified operators.
+
+Bounds Typer::Visitor::TypeBooleanNot(Node* node) {
+  return Bounds(Type::Boolean(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeBooleanToNumber(Node* node) {
+  return Bounds(Type::Number(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeNumberEqual(Node* node) {
+  return Bounds(Type::Boolean(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeNumberLessThan(Node* node) {
+  return Bounds(Type::Boolean(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeNumberLessThanOrEqual(Node* node) {
+  return Bounds(Type::Boolean(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeNumberAdd(Node* node) {
+  return Bounds(Type::Number(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeNumberSubtract(Node* node) {
+  return Bounds(Type::Number(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeNumberMultiply(Node* node) {
+  return Bounds(Type::Number(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeNumberDivide(Node* node) {
+  return Bounds(Type::Number(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeNumberModulus(Node* node) {
+  return Bounds(Type::Number(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeNumberToInt32(Node* node) {
+  Bounds arg = OperandType(node, 0);
+  Type* s32 = Type::Signed32(zone());
+  Type* lower = arg.lower->Is(s32) ? arg.lower : s32;
+  Type* upper = arg.upper->Is(s32) ? arg.upper : s32;
+  return Bounds(lower, upper);
+}
+
+
+Bounds Typer::Visitor::TypeNumberToUint32(Node* node) {
+  Bounds arg = OperandType(node, 0);
+  Type* u32 = Type::Unsigned32(zone());
+  Type* lower = arg.lower->Is(u32) ? arg.lower : u32;
+  Type* upper = arg.upper->Is(u32) ? arg.upper : u32;
+  return Bounds(lower, upper);
+}
+
+
+Bounds Typer::Visitor::TypeReferenceEqual(Node* node) {
+  return Bounds(Type::Boolean(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeStringEqual(Node* node) {
+  return Bounds(Type::Boolean(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeStringLessThan(Node* node) {
+  return Bounds(Type::Boolean(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeStringLessThanOrEqual(Node* node) {
+  return Bounds(Type::Boolean(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeStringAdd(Node* node) {
+  return Bounds(Type::String(zone()));
+}
+
+
+Bounds Typer::Visitor::TypeChangeTaggedToInt32(Node* node) {
+  // TODO(titzer): type is type of input, representation is Word32.
+  return Bounds(Type::Integral32());
+}
+
+
+Bounds Typer::Visitor::TypeChangeTaggedToUint32(Node* node) {
+  return Bounds(Type::Integral32());  // TODO(titzer): add appropriate rep
+}
+
+
+Bounds Typer::Visitor::TypeChangeTaggedToFloat64(Node* node) {
+  // TODO(titzer): type is type of input, representation is Float64.
+  return Bounds(Type::Number());
+}
+
+
+Bounds Typer::Visitor::TypeChangeInt32ToTagged(Node* node) {
+  // TODO(titzer): type is type of input, representation is Tagged.
+  return Bounds(Type::Integral32());
+}
+
+
+Bounds Typer::Visitor::TypeChangeUint32ToTagged(Node* node) {
+  // TODO(titzer): type is type of input, representation is Tagged.
+  return Bounds(Type::Unsigned32());
+}
+
+
+Bounds Typer::Visitor::TypeChangeFloat64ToTagged(Node* node) {
+  // TODO(titzer): type is type of input, representation is Tagged.
+  return Bounds(Type::Number());
+}
+
+
+Bounds Typer::Visitor::TypeChangeBoolToBit(Node* node) {
+  // TODO(titzer): type is type of input, representation is Bit.
+  return Bounds(Type::Boolean());
+}
+
+
+Bounds Typer::Visitor::TypeChangeBitToBool(Node* node) {
+  // TODO(titzer): type is type of input, representation is Tagged.
+  return Bounds(Type::Boolean());
+}
+
+
+Bounds Typer::Visitor::TypeLoadField(Node* node) {
+  return Bounds(FieldAccessOf(node->op()).type);
+}
+
+
+Bounds Typer::Visitor::TypeLoadElement(Node* node) {
+  return Bounds(ElementAccessOf(node->op()).type);
+}
+
+
+Bounds Typer::Visitor::TypeStoreField(Node* node) {
+  UNREACHABLE();
+  return Bounds();
+}
+
+
+Bounds Typer::Visitor::TypeStoreElement(Node* node) {
+  UNREACHABLE();
+  return Bounds();
+}
+
+
+// Machine operators.
+
+// TODO(rossberg): implement
+#define DEFINE_METHOD(x) \
+    Bounds Typer::Visitor::Type##x(Node* node) { return Bounds(Type::None()); }
+MACHINE_OP_LIST(DEFINE_METHOD)
+#undef DEFINE_METHOD
+
+
+// Heap constants.
+
+Type* Typer::Visitor::TypeConstant(Handle<Object> value) {
+  if (value->IsJSFunction() && JSFunction::cast(*value)->IsBuiltin() &&
+      !context().is_null()) {
+    Handle<Context> native =
+        handle(context().ToHandleChecked()->native_context(), isolate());
+    if (*value == native->math_abs_fun()) {
+      return typer_->number_fun1_;  // TODO(rossberg): can't express overloading
+    } else if (*value == native->math_acos_fun()) {
+      return typer_->number_fun1_;
+    } else if (*value == native->math_asin_fun()) {
+      return typer_->number_fun1_;
+    } else if (*value == native->math_atan_fun()) {
+      return typer_->number_fun1_;
+    } else if (*value == native->math_atan2_fun()) {
+      return typer_->number_fun2_;
+    } else if (*value == native->math_ceil_fun()) {
+      return typer_->number_fun1_;
+    } else if (*value == native->math_cos_fun()) {
+      return typer_->number_fun1_;
+    } else if (*value == native->math_exp_fun()) {
+      return typer_->number_fun1_;
+    } else if (*value == native->math_floor_fun()) {
+      return typer_->number_fun1_;
+    } else if (*value == native->math_imul_fun()) {
+      return typer_->imul_fun_;
+    } else if (*value == native->math_log_fun()) {
+      return typer_->number_fun1_;
+    } else if (*value == native->math_pow_fun()) {
+      return typer_->number_fun2_;
+    } else if (*value == native->math_random_fun()) {
+      return typer_->number_fun0_;
+    } else if (*value == native->math_round_fun()) {
+      return typer_->number_fun1_;
+    } else if (*value == native->math_sin_fun()) {
+      return typer_->number_fun1_;
+    } else if (*value == native->math_sqrt_fun()) {
+      return typer_->number_fun1_;
+    } else if (*value == native->math_tan_fun()) {
+      return typer_->number_fun1_;
+    } else if (*value == native->array_buffer_fun()) {
+      return typer_->array_buffer_fun_;
+    } else if (*value == native->int8_array_fun()) {
+      return typer_->int8_array_fun_;
+    } else if (*value == native->int16_array_fun()) {
+      return typer_->int16_array_fun_;
+    } else if (*value == native->int32_array_fun()) {
+      return typer_->int32_array_fun_;
+    } else if (*value == native->uint8_array_fun()) {
+      return typer_->uint8_array_fun_;
+    } else if (*value == native->uint16_array_fun()) {
+      return typer_->uint16_array_fun_;
+    } else if (*value == native->uint32_array_fun()) {
+      return typer_->uint32_array_fun_;
+    } else if (*value == native->float32_array_fun()) {
+      return typer_->float32_array_fun_;
+    } else if (*value == native->float64_array_fun()) {
+      return typer_->float64_array_fun_;
+    }
+  }
+  return Type::Constant(value, zone());
+}
+
+
+namespace {
+
+class TyperDecorator : public GraphDecorator {
+ public:
+  explicit TyperDecorator(Typer* typer) : typer_(typer) {}
+  virtual void Decorate(Node* node) { typer_->Init(node); }
+
+ private:
+  Typer* typer_;
+};
+
+}
+
+
+void Typer::DecorateGraph(Graph* graph) {
+  graph->AddDecorator(new (zone()) TyperDecorator(this));
+}
+
+}
+}
+}  // namespace v8::internal::compiler
diff --git a/src/compiler/typer.h b/src/compiler/typer.h
new file mode 100644
index 0000000..2957e4b
--- /dev/null
+++ b/src/compiler/typer.h
@@ -0,0 +1,57 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_TYPER_H_
+#define V8_COMPILER_TYPER_H_
+
+#include "src/v8.h"
+
+#include "src/compiler/graph.h"
+#include "src/compiler/opcodes.h"
+#include "src/types.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class Typer {
+ public:
+  explicit Typer(Zone* zone);
+
+  void Init(Node* node);
+  void Run(Graph* graph, MaybeHandle<Context> context);
+  void Narrow(Graph* graph, Node* node, MaybeHandle<Context> context);
+  void Widen(Graph* graph, Node* node, MaybeHandle<Context> context);
+
+  void DecorateGraph(Graph* graph);
+
+  Zone* zone() { return zone_; }
+  Isolate* isolate() { return zone_->isolate(); }
+
+ private:
+  class Visitor;
+  class RunVisitor;
+  class NarrowVisitor;
+  class WidenVisitor;
+
+  Zone* zone_;
+  Type* number_fun0_;
+  Type* number_fun1_;
+  Type* number_fun2_;
+  Type* imul_fun_;
+  Type* array_buffer_fun_;
+  Type* int8_array_fun_;
+  Type* int16_array_fun_;
+  Type* int32_array_fun_;
+  Type* uint8_array_fun_;
+  Type* uint16_array_fun_;
+  Type* uint32_array_fun_;
+  Type* float32_array_fun_;
+  Type* float64_array_fun_;
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_TYPER_H_
diff --git a/src/compiler/value-numbering-reducer-unittest.cc b/src/compiler/value-numbering-reducer-unittest.cc
new file mode 100644
index 0000000..8db6458
--- /dev/null
+++ b/src/compiler/value-numbering-reducer-unittest.cc
@@ -0,0 +1,120 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <limits>
+
+#include "src/compiler/graph.h"
+#include "src/compiler/value-numbering-reducer.h"
+#include "src/test/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+
+const SimpleOperator kOp0(0, Operator::kNoProperties, 0, 1, "op0");
+const SimpleOperator kOp1(1, Operator::kNoProperties, 1, 1, "op1");
+
+}  // namespace
+
+
+class ValueNumberingReducerTest : public TestWithZone {
+ public:
+  ValueNumberingReducerTest() : graph_(zone()), reducer_(zone()) {}
+
+ protected:
+  Reduction Reduce(Node* node) { return reducer_.Reduce(node); }
+
+  Graph* graph() { return &graph_; }
+
+ private:
+  Graph graph_;
+  ValueNumberingReducer reducer_;
+};
+
+
+TEST_F(ValueNumberingReducerTest, AllInputsAreChecked) {
+  Node* na = graph()->NewNode(&kOp0);
+  Node* nb = graph()->NewNode(&kOp0);
+  Node* n1 = graph()->NewNode(&kOp0, na);
+  Node* n2 = graph()->NewNode(&kOp0, nb);
+  EXPECT_FALSE(Reduce(n1).Changed());
+  EXPECT_FALSE(Reduce(n2).Changed());
+}
+
+
+TEST_F(ValueNumberingReducerTest, DeadNodesAreNeverReturned) {
+  Node* n0 = graph()->NewNode(&kOp0);
+  Node* n1 = graph()->NewNode(&kOp1, n0);
+  EXPECT_FALSE(Reduce(n1).Changed());
+  n1->Kill();
+  EXPECT_FALSE(Reduce(graph()->NewNode(&kOp1, n0)).Changed());
+}
+
+
+TEST_F(ValueNumberingReducerTest, OperatorEqualityNotIdentity) {
+  static const size_t kMaxInputCount = 16;
+  Node* inputs[kMaxInputCount];
+  for (size_t i = 0; i < arraysize(inputs); ++i) {
+    Operator::Opcode opcode = static_cast<Operator::Opcode>(
+        std::numeric_limits<Operator::Opcode>::max() - i);
+    inputs[i] = graph()->NewNode(new (zone()) SimpleOperator(
+        opcode, Operator::kNoProperties, 0, 1, "Operator"));
+  }
+  TRACED_FORRANGE(size_t, input_count, 0, arraysize(inputs)) {
+    const SimpleOperator op1(static_cast<Operator::Opcode>(input_count),
+                             Operator::kNoProperties,
+                             static_cast<int>(input_count), 1, "op");
+    Node* n1 = graph()->NewNode(&op1, static_cast<int>(input_count), inputs);
+    Reduction r1 = Reduce(n1);
+    EXPECT_FALSE(r1.Changed());
+
+    const SimpleOperator op2(static_cast<Operator::Opcode>(input_count),
+                             Operator::kNoProperties,
+                             static_cast<int>(input_count), 1, "op");
+    Node* n2 = graph()->NewNode(&op2, static_cast<int>(input_count), inputs);
+    Reduction r2 = Reduce(n2);
+    EXPECT_TRUE(r2.Changed());
+    EXPECT_EQ(n1, r2.replacement());
+  }
+}
+
+
+TEST_F(ValueNumberingReducerTest, SubsequentReductionsYieldTheSameNode) {
+  static const size_t kMaxInputCount = 16;
+  Node* inputs[kMaxInputCount];
+  for (size_t i = 0; i < arraysize(inputs); ++i) {
+    Operator::Opcode opcode = static_cast<Operator::Opcode>(
+        std::numeric_limits<Operator::Opcode>::max() - i);
+    inputs[i] = graph()->NewNode(new (zone()) SimpleOperator(
+        opcode, Operator::kNoProperties, 0, 1, "Operator"));
+  }
+  TRACED_FORRANGE(size_t, input_count, 0, arraysize(inputs)) {
+    const SimpleOperator op1(1, Operator::kNoProperties,
+                             static_cast<int>(input_count), 1, "op1");
+    Node* n = graph()->NewNode(&op1, static_cast<int>(input_count), inputs);
+    Reduction r = Reduce(n);
+    EXPECT_FALSE(r.Changed());
+
+    r = Reduce(graph()->NewNode(&op1, static_cast<int>(input_count), inputs));
+    ASSERT_TRUE(r.Changed());
+    EXPECT_EQ(n, r.replacement());
+
+    r = Reduce(graph()->NewNode(&op1, static_cast<int>(input_count), inputs));
+    ASSERT_TRUE(r.Changed());
+    EXPECT_EQ(n, r.replacement());
+  }
+}
+
+
+TEST_F(ValueNumberingReducerTest, WontReplaceNodeWithItself) {
+  Node* n = graph()->NewNode(&kOp0);
+  EXPECT_FALSE(Reduce(n).Changed());
+  EXPECT_FALSE(Reduce(n).Changed());
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/value-numbering-reducer.cc b/src/compiler/value-numbering-reducer.cc
new file mode 100644
index 0000000..595a4f3
--- /dev/null
+++ b/src/compiler/value-numbering-reducer.cc
@@ -0,0 +1,74 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/value-numbering-reducer.h"
+
+#include "src/compiler/node.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+
+size_t HashCode(Node* node) { return node->op()->HashCode(); }
+
+
+bool Equals(Node* a, Node* b) {
+  DCHECK_NOT_NULL(a);
+  DCHECK_NOT_NULL(b);
+  DCHECK_NOT_NULL(a->op());
+  DCHECK_NOT_NULL(b->op());
+  if (!a->op()->Equals(b->op())) return false;
+  if (a->InputCount() != b->InputCount()) return false;
+  for (int j = 0; j < a->InputCount(); ++j) {
+    DCHECK_NOT_NULL(a->InputAt(j));
+    DCHECK_NOT_NULL(b->InputAt(j));
+    if (a->InputAt(j)->id() != b->InputAt(j)->id()) return false;
+  }
+  return true;
+}
+
+}  // namespace
+
+
+class ValueNumberingReducer::Entry FINAL : public ZoneObject {
+ public:
+  Entry(Node* node, Entry* next) : node_(node), next_(next) {}
+
+  Node* node() const { return node_; }
+  Entry* next() const { return next_; }
+
+ private:
+  Node* node_;
+  Entry* next_;
+};
+
+
+ValueNumberingReducer::ValueNumberingReducer(Zone* zone) : zone_(zone) {
+  for (size_t i = 0; i < arraysize(buckets_); ++i) {
+    buckets_[i] = NULL;
+  }
+}
+
+
+ValueNumberingReducer::~ValueNumberingReducer() {}
+
+
+Reduction ValueNumberingReducer::Reduce(Node* node) {
+  Entry** head = &buckets_[HashCode(node) % arraysize(buckets_)];
+  for (Entry* entry = *head; entry; entry = entry->next()) {
+    if (entry->node()->IsDead()) continue;
+    if (entry->node() == node) return NoChange();
+    if (Equals(node, entry->node())) {
+      return Replace(entry->node());
+    }
+  }
+  *head = new (zone()) Entry(node, *head);
+  return NoChange();
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/value-numbering-reducer.h b/src/compiler/value-numbering-reducer.h
new file mode 100644
index 0000000..0d67e5d
--- /dev/null
+++ b/src/compiler/value-numbering-reducer.h
@@ -0,0 +1,36 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_VALUE_NUMBERING_REDUCER_H_
+#define V8_COMPILER_VALUE_NUMBERING_REDUCER_H_
+
+#include "src/compiler/graph-reducer.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class ValueNumberingReducer FINAL : public Reducer {
+ public:
+  explicit ValueNumberingReducer(Zone* zone);
+  ~ValueNumberingReducer();
+
+  virtual Reduction Reduce(Node* node) OVERRIDE;
+
+ private:
+  Zone* zone() const { return zone_; }
+
+  // TODO(turbofan): We currently use separate chaining with linked lists here,
+  // we may want to replace that with a more sophisticated data structure at
+  // some point in the future.
+  class Entry;
+  Entry* buckets_[117u];
+  Zone* zone_;
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_VALUE_NUMBERING_REDUCER_H_
diff --git a/src/compiler/verifier.cc b/src/compiler/verifier.cc
new file mode 100644
index 0000000..23cec7a
--- /dev/null
+++ b/src/compiler/verifier.cc
@@ -0,0 +1,455 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/verifier.h"
+
+#include <deque>
+#include <queue>
+
+#include "src/compiler/generic-algorithm.h"
+#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/generic-node.h"
+#include "src/compiler/graph-inl.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/node.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/node-properties.h"
+#include "src/compiler/opcodes.h"
+#include "src/compiler/operator.h"
+#include "src/compiler/schedule.h"
+#include "src/data-flow.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+
+static bool IsDefUseChainLinkPresent(Node* def, Node* use) {
+  Node::Uses uses = def->uses();
+  for (Node::Uses::iterator it = uses.begin(); it != uses.end(); ++it) {
+    if (*it == use) return true;
+  }
+  return false;
+}
+
+
+static bool IsUseDefChainLinkPresent(Node* def, Node* use) {
+  Node::Inputs inputs = use->inputs();
+  for (Node::Inputs::iterator it = inputs.begin(); it != inputs.end(); ++it) {
+    if (*it == def) return true;
+  }
+  return false;
+}
+
+
+class Verifier::Visitor : public NullNodeVisitor {
+ public:
+  explicit Visitor(Zone* zone)
+      : reached_from_start(NodeSet::key_compare(),
+                           NodeSet::allocator_type(zone)),
+        reached_from_end(NodeSet::key_compare(),
+                         NodeSet::allocator_type(zone)) {}
+
+  // Fulfills the PreNodeCallback interface.
+  GenericGraphVisit::Control Pre(Node* node);
+
+  bool from_start;
+  NodeSet reached_from_start;
+  NodeSet reached_from_end;
+};
+
+
+GenericGraphVisit::Control Verifier::Visitor::Pre(Node* node) {
+  int value_count = OperatorProperties::GetValueInputCount(node->op());
+  int context_count = OperatorProperties::GetContextInputCount(node->op());
+  int frame_state_count =
+      OperatorProperties::GetFrameStateInputCount(node->op());
+  int effect_count = OperatorProperties::GetEffectInputCount(node->op());
+  int control_count = OperatorProperties::GetControlInputCount(node->op());
+
+  // Verify number of inputs matches up.
+  int input_count = value_count + context_count + frame_state_count +
+                    effect_count + control_count;
+  CHECK_EQ(input_count, node->InputCount());
+
+  // Verify that frame state has been inserted for the nodes that need it.
+  if (OperatorProperties::HasFrameStateInput(node->op())) {
+    Node* frame_state = NodeProperties::GetFrameStateInput(node);
+    CHECK(frame_state->opcode() == IrOpcode::kFrameState ||
+          // kFrameState uses undefined as a sentinel.
+          (node->opcode() == IrOpcode::kFrameState &&
+           frame_state->opcode() == IrOpcode::kHeapConstant));
+    CHECK(IsDefUseChainLinkPresent(frame_state, node));
+    CHECK(IsUseDefChainLinkPresent(frame_state, node));
+  }
+
+  // Verify all value inputs actually produce a value.
+  for (int i = 0; i < value_count; ++i) {
+    Node* value = NodeProperties::GetValueInput(node, i);
+    CHECK(OperatorProperties::HasValueOutput(value->op()));
+    CHECK(IsDefUseChainLinkPresent(value, node));
+    CHECK(IsUseDefChainLinkPresent(value, node));
+  }
+
+  // Verify all context inputs are value nodes.
+  for (int i = 0; i < context_count; ++i) {
+    Node* context = NodeProperties::GetContextInput(node);
+    CHECK(OperatorProperties::HasValueOutput(context->op()));
+    CHECK(IsDefUseChainLinkPresent(context, node));
+    CHECK(IsUseDefChainLinkPresent(context, node));
+  }
+
+  // Verify all effect inputs actually have an effect.
+  for (int i = 0; i < effect_count; ++i) {
+    Node* effect = NodeProperties::GetEffectInput(node);
+    CHECK(OperatorProperties::HasEffectOutput(effect->op()));
+    CHECK(IsDefUseChainLinkPresent(effect, node));
+    CHECK(IsUseDefChainLinkPresent(effect, node));
+  }
+
+  // Verify all control inputs are control nodes.
+  for (int i = 0; i < control_count; ++i) {
+    Node* control = NodeProperties::GetControlInput(node, i);
+    CHECK(OperatorProperties::HasControlOutput(control->op()));
+    CHECK(IsDefUseChainLinkPresent(control, node));
+    CHECK(IsUseDefChainLinkPresent(control, node));
+  }
+
+  // Verify all successors are projections if multiple value outputs exist.
+  if (OperatorProperties::GetValueOutputCount(node->op()) > 1) {
+    Node::Uses uses = node->uses();
+    for (Node::Uses::iterator it = uses.begin(); it != uses.end(); ++it) {
+      CHECK(!NodeProperties::IsValueEdge(it.edge()) ||
+            (*it)->opcode() == IrOpcode::kProjection ||
+            (*it)->opcode() == IrOpcode::kParameter);
+    }
+  }
+
+  switch (node->opcode()) {
+    case IrOpcode::kStart:
+      // Start has no inputs.
+      CHECK_EQ(0, input_count);
+      break;
+    case IrOpcode::kEnd:
+      // End has no outputs.
+      CHECK(!OperatorProperties::HasValueOutput(node->op()));
+      CHECK(!OperatorProperties::HasEffectOutput(node->op()));
+      CHECK(!OperatorProperties::HasControlOutput(node->op()));
+      break;
+    case IrOpcode::kDead:
+      // Dead is never connected to the graph.
+      UNREACHABLE();
+    case IrOpcode::kBranch: {
+      // Branch uses are IfTrue and IfFalse.
+      Node::Uses uses = node->uses();
+      bool got_true = false, got_false = false;
+      for (Node::Uses::iterator it = uses.begin(); it != uses.end(); ++it) {
+        CHECK(((*it)->opcode() == IrOpcode::kIfTrue && !got_true) ||
+              ((*it)->opcode() == IrOpcode::kIfFalse && !got_false));
+        if ((*it)->opcode() == IrOpcode::kIfTrue) got_true = true;
+        if ((*it)->opcode() == IrOpcode::kIfFalse) got_false = true;
+      }
+      // TODO(rossberg): Currently fails for various tests.
+      // CHECK(got_true && got_false);
+      break;
+    }
+    case IrOpcode::kIfTrue:
+    case IrOpcode::kIfFalse:
+      CHECK_EQ(IrOpcode::kBranch,
+               NodeProperties::GetControlInput(node, 0)->opcode());
+      break;
+    case IrOpcode::kLoop:
+    case IrOpcode::kMerge:
+      break;
+    case IrOpcode::kReturn:
+      // TODO(rossberg): check successor is End
+      break;
+    case IrOpcode::kThrow:
+      // TODO(rossberg): what are the constraints on these?
+      break;
+    case IrOpcode::kParameter: {
+      // Parameters have the start node as inputs.
+      CHECK_EQ(1, input_count);
+      CHECK_EQ(IrOpcode::kStart,
+               NodeProperties::GetValueInput(node, 0)->opcode());
+      // Parameter has an input that produces enough values.
+      int index = OpParameter<int>(node);
+      Node* input = NodeProperties::GetValueInput(node, 0);
+      // Currently, parameter indices start at -1 instead of 0.
+      CHECK_GT(OperatorProperties::GetValueOutputCount(input->op()), index + 1);
+      break;
+    }
+    case IrOpcode::kInt32Constant:
+    case IrOpcode::kInt64Constant:
+    case IrOpcode::kFloat64Constant:
+    case IrOpcode::kExternalConstant:
+    case IrOpcode::kNumberConstant:
+    case IrOpcode::kHeapConstant:
+      // Constants have no inputs.
+      CHECK_EQ(0, input_count);
+      break;
+    case IrOpcode::kPhi: {
+      // Phi input count matches parent control node.
+      CHECK_EQ(1, control_count);
+      Node* control = NodeProperties::GetControlInput(node, 0);
+      CHECK_EQ(value_count,
+               OperatorProperties::GetControlInputCount(control->op()));
+      break;
+    }
+    case IrOpcode::kEffectPhi: {
+      // EffectPhi input count matches parent control node.
+      CHECK_EQ(1, control_count);
+      Node* control = NodeProperties::GetControlInput(node, 0);
+      CHECK_EQ(effect_count,
+               OperatorProperties::GetControlInputCount(control->op()));
+      break;
+    }
+    case IrOpcode::kFrameState:
+      // TODO(jarin): what are the constraints on these?
+      break;
+    case IrOpcode::kCall:
+      // TODO(rossberg): what are the constraints on these?
+      break;
+    case IrOpcode::kProjection: {
+      // Projection has an input that produces enough values.
+      size_t index = OpParameter<size_t>(node);
+      Node* input = NodeProperties::GetValueInput(node, 0);
+      CHECK_GT(OperatorProperties::GetValueOutputCount(input->op()),
+               static_cast<int>(index));
+      break;
+    }
+    default:
+      // TODO(rossberg): Check other node kinds.
+      break;
+  }
+
+  if (from_start) {
+    reached_from_start.insert(node);
+  } else {
+    reached_from_end.insert(node);
+  }
+
+  return GenericGraphVisit::CONTINUE;
+}
+
+
+void Verifier::Run(Graph* graph) {
+  Visitor visitor(graph->zone());
+
+  CHECK_NE(NULL, graph->start());
+  visitor.from_start = true;
+  graph->VisitNodeUsesFromStart(&visitor);
+  CHECK_NE(NULL, graph->end());
+  visitor.from_start = false;
+  graph->VisitNodeInputsFromEnd(&visitor);
+
+  // All control nodes reachable from end are reachable from start.
+  for (NodeSet::iterator it = visitor.reached_from_end.begin();
+       it != visitor.reached_from_end.end(); ++it) {
+    CHECK(!NodeProperties::IsControl(*it) ||
+          visitor.reached_from_start.count(*it));
+  }
+}
+
+
+static bool HasDominatingDef(Schedule* schedule, Node* node,
+                             BasicBlock* container, BasicBlock* use_block,
+                             int use_pos) {
+  BasicBlock* block = use_block;
+  while (true) {
+    while (use_pos >= 0) {
+      if (block->nodes_[use_pos] == node) return true;
+      use_pos--;
+    }
+    block = block->dominator_;
+    if (block == NULL) break;
+    use_pos = static_cast<int>(block->nodes_.size()) - 1;
+    if (node == block->control_input_) return true;
+  }
+  return false;
+}
+
+
+static void CheckInputsDominate(Schedule* schedule, BasicBlock* block,
+                                Node* node, int use_pos) {
+  for (int j = OperatorProperties::GetValueInputCount(node->op()) - 1; j >= 0;
+       j--) {
+    BasicBlock* use_block = block;
+    if (node->opcode() == IrOpcode::kPhi) {
+      use_block = use_block->PredecessorAt(j);
+      use_pos = static_cast<int>(use_block->nodes_.size()) - 1;
+    }
+    Node* input = node->InputAt(j);
+    if (!HasDominatingDef(schedule, node->InputAt(j), block, use_block,
+                          use_pos)) {
+      V8_Fatal(__FILE__, __LINE__,
+               "Node #%d:%s in B%d is not dominated by input@%d #%d:%s",
+               node->id(), node->op()->mnemonic(), block->id(), j, input->id(),
+               input->op()->mnemonic());
+    }
+  }
+}
+
+
+void ScheduleVerifier::Run(Schedule* schedule) {
+  const int count = schedule->BasicBlockCount();
+  Zone tmp_zone(schedule->zone()->isolate());
+  Zone* zone = &tmp_zone;
+  BasicBlock* start = schedule->start();
+  BasicBlockVector* rpo_order = schedule->rpo_order();
+
+  // Verify the RPO order contains only blocks from this schedule.
+  CHECK_GE(count, static_cast<int>(rpo_order->size()));
+  for (BasicBlockVector::iterator b = rpo_order->begin(); b != rpo_order->end();
+       ++b) {
+    CHECK_EQ((*b), schedule->GetBlockById((*b)->id()));
+  }
+
+  // Verify RPO numbers of blocks.
+  CHECK_EQ(start, rpo_order->at(0));  // Start should be first.
+  for (size_t b = 0; b < rpo_order->size(); b++) {
+    BasicBlock* block = rpo_order->at(b);
+    CHECK_EQ(static_cast<int>(b), block->rpo_number_);
+    BasicBlock* dom = block->dominator_;
+    if (b == 0) {
+      // All blocks except start should have a dominator.
+      CHECK_EQ(NULL, dom);
+    } else {
+      // Check that the immediate dominator appears somewhere before the block.
+      CHECK_NE(NULL, dom);
+      CHECK_LT(dom->rpo_number_, block->rpo_number_);
+    }
+  }
+
+  // Verify that all blocks reachable from start are in the RPO.
+  BoolVector marked(count, false, zone);
+  {
+    ZoneQueue<BasicBlock*> queue(zone);
+    queue.push(start);
+    marked[start->id()] = true;
+    while (!queue.empty()) {
+      BasicBlock* block = queue.front();
+      queue.pop();
+      for (int s = 0; s < block->SuccessorCount(); s++) {
+        BasicBlock* succ = block->SuccessorAt(s);
+        if (!marked[succ->id()]) {
+          marked[succ->id()] = true;
+          queue.push(succ);
+        }
+      }
+    }
+  }
+  // Verify marked blocks are in the RPO.
+  for (int i = 0; i < count; i++) {
+    BasicBlock* block = schedule->GetBlockById(i);
+    if (marked[i]) {
+      CHECK_GE(block->rpo_number_, 0);
+      CHECK_EQ(block, rpo_order->at(block->rpo_number_));
+    }
+  }
+  // Verify RPO blocks are marked.
+  for (size_t b = 0; b < rpo_order->size(); b++) {
+    CHECK(marked[rpo_order->at(b)->id()]);
+  }
+
+  {
+    // Verify the dominance relation.
+    ZoneList<BitVector*> dominators(count, zone);
+    dominators.Initialize(count, zone);
+    dominators.AddBlock(NULL, count, zone);
+
+    // Compute a set of all the nodes that dominate a given node by using
+    // a forward fixpoint. O(n^2).
+    ZoneQueue<BasicBlock*> queue(zone);
+    queue.push(start);
+    dominators[start->id()] = new (zone) BitVector(count, zone);
+    while (!queue.empty()) {
+      BasicBlock* block = queue.front();
+      queue.pop();
+      BitVector* block_doms = dominators[block->id()];
+      BasicBlock* idom = block->dominator_;
+      if (idom != NULL && !block_doms->Contains(idom->id())) {
+        V8_Fatal(__FILE__, __LINE__, "Block B%d is not dominated by B%d",
+                 block->id(), idom->id());
+      }
+      for (int s = 0; s < block->SuccessorCount(); s++) {
+        BasicBlock* succ = block->SuccessorAt(s);
+        BitVector* succ_doms = dominators[succ->id()];
+
+        if (succ_doms == NULL) {
+          // First time visiting the node. S.doms = B U B.doms
+          succ_doms = new (zone) BitVector(count, zone);
+          succ_doms->CopyFrom(*block_doms);
+          succ_doms->Add(block->id());
+          dominators[succ->id()] = succ_doms;
+          queue.push(succ);
+        } else {
+          // Nth time visiting the successor. S.doms = S.doms ^ (B U B.doms)
+          bool had = succ_doms->Contains(block->id());
+          if (had) succ_doms->Remove(block->id());
+          if (succ_doms->IntersectIsChanged(*block_doms)) queue.push(succ);
+          if (had) succ_doms->Add(block->id());
+        }
+      }
+    }
+
+    // Verify the immediateness of dominators.
+    for (BasicBlockVector::iterator b = rpo_order->begin();
+         b != rpo_order->end(); ++b) {
+      BasicBlock* block = *b;
+      BasicBlock* idom = block->dominator_;
+      if (idom == NULL) continue;
+      BitVector* block_doms = dominators[block->id()];
+
+      for (BitVector::Iterator it(block_doms); !it.Done(); it.Advance()) {
+        BasicBlock* dom = schedule->GetBlockById(it.Current());
+        if (dom != idom && !dominators[idom->id()]->Contains(dom->id())) {
+          V8_Fatal(__FILE__, __LINE__,
+                   "Block B%d is not immediately dominated by B%d", block->id(),
+                   idom->id());
+        }
+      }
+    }
+  }
+
+  // Verify phis are placed in the block of their control input.
+  for (BasicBlockVector::iterator b = rpo_order->begin(); b != rpo_order->end();
+       ++b) {
+    for (BasicBlock::const_iterator i = (*b)->begin(); i != (*b)->end(); ++i) {
+      Node* phi = *i;
+      if (phi->opcode() != IrOpcode::kPhi) continue;
+      // TODO(titzer): Nasty special case. Phis from RawMachineAssembler
+      // schedules don't have control inputs.
+      if (phi->InputCount() >
+          OperatorProperties::GetValueInputCount(phi->op())) {
+        Node* control = NodeProperties::GetControlInput(phi);
+        CHECK(control->opcode() == IrOpcode::kMerge ||
+              control->opcode() == IrOpcode::kLoop);
+        CHECK_EQ((*b), schedule->block(control));
+      }
+    }
+  }
+
+  // Verify that all uses are dominated by their definitions.
+  for (BasicBlockVector::iterator b = rpo_order->begin(); b != rpo_order->end();
+       ++b) {
+    BasicBlock* block = *b;
+
+    // Check inputs to control for this block.
+    Node* control = block->control_input_;
+    if (control != NULL) {
+      CHECK_EQ(block, schedule->block(control));
+      CheckInputsDominate(schedule, block, control,
+                          static_cast<int>(block->nodes_.size()) - 1);
+    }
+    // Check inputs for all nodes in the block.
+    for (size_t i = 0; i < block->nodes_.size(); i++) {
+      Node* node = block->nodes_[i];
+      CheckInputsDominate(schedule, block, node, static_cast<int>(i) - 1);
+    }
+  }
+}
+}
+}
+}  // namespace v8::internal::compiler
diff --git a/src/compiler/verifier.h b/src/compiler/verifier.h
new file mode 100644
index 0000000..b5c028e
--- /dev/null
+++ b/src/compiler/verifier.h
@@ -0,0 +1,37 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_VERIFIER_H_
+#define V8_COMPILER_VERIFIER_H_
+
+#include "src/v8.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class Graph;
+class Schedule;
+
+// Verifies properties of a graph, such as the well-formedness of inputs to
+// each node, etc.
+class Verifier {
+ public:
+  static void Run(Graph* graph);
+
+ private:
+  class Visitor;
+  DISALLOW_COPY_AND_ASSIGN(Verifier);
+};
+
+// Verifies properties of a schedule, such as dominance, phi placement, etc.
+class ScheduleVerifier {
+ public:
+  static void Run(Schedule* schedule);
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_VERIFIER_H_
diff --git a/src/compiler/x64/code-generator-x64.cc b/src/compiler/x64/code-generator-x64.cc
new file mode 100644
index 0000000..f71d3bf
--- /dev/null
+++ b/src/compiler/x64/code-generator-x64.cc
@@ -0,0 +1,1024 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/code-generator.h"
+
+#include "src/compiler/code-generator-impl.h"
+#include "src/compiler/gap-resolver.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/scopes.h"
+#include "src/x64/assembler-x64.h"
+#include "src/x64/macro-assembler-x64.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define __ masm()->
+
+
+// TODO(turbofan): Cleanup these hacks.
+enum Immediate64Type { kImm64Value, kImm64Handle, kImm64Reference };
+
+
+struct Immediate64 {
+  uint64_t value;
+  Handle<Object> handle;
+  ExternalReference reference;
+  Immediate64Type type;
+};
+
+
+enum RegisterOrOperandType { kRegister, kDoubleRegister, kOperand };
+
+
+struct RegisterOrOperand {
+  RegisterOrOperand() : operand(no_reg, 0) {}
+  Register reg;
+  DoubleRegister double_reg;
+  Operand operand;
+  RegisterOrOperandType type;
+};
+
+
+// Adds X64 specific methods for decoding operands.
+class X64OperandConverter : public InstructionOperandConverter {
+ public:
+  X64OperandConverter(CodeGenerator* gen, Instruction* instr)
+      : InstructionOperandConverter(gen, instr) {}
+
+  RegisterOrOperand InputRegisterOrOperand(int index) {
+    return ToRegisterOrOperand(instr_->InputAt(index));
+  }
+
+  Immediate InputImmediate(int index) {
+    return ToImmediate(instr_->InputAt(index));
+  }
+
+  RegisterOrOperand OutputRegisterOrOperand() {
+    return ToRegisterOrOperand(instr_->Output());
+  }
+
+  Immediate64 InputImmediate64(int index) {
+    return ToImmediate64(instr_->InputAt(index));
+  }
+
+  Immediate64 ToImmediate64(InstructionOperand* operand) {
+    Constant constant = ToConstant(operand);
+    Immediate64 immediate;
+    immediate.value = 0xbeefdeaddeefbeed;
+    immediate.type = kImm64Value;
+    switch (constant.type()) {
+      case Constant::kInt32:
+      case Constant::kInt64:
+        immediate.value = constant.ToInt64();
+        return immediate;
+      case Constant::kFloat64:
+        immediate.type = kImm64Handle;
+        immediate.handle =
+            isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED);
+        return immediate;
+      case Constant::kExternalReference:
+        immediate.type = kImm64Reference;
+        immediate.reference = constant.ToExternalReference();
+        return immediate;
+      case Constant::kHeapObject:
+        immediate.type = kImm64Handle;
+        immediate.handle = constant.ToHeapObject();
+        return immediate;
+    }
+    UNREACHABLE();
+    return immediate;
+  }
+
+  Immediate ToImmediate(InstructionOperand* operand) {
+    Constant constant = ToConstant(operand);
+    switch (constant.type()) {
+      case Constant::kInt32:
+        return Immediate(constant.ToInt32());
+      case Constant::kInt64:
+      case Constant::kFloat64:
+      case Constant::kExternalReference:
+      case Constant::kHeapObject:
+        break;
+    }
+    UNREACHABLE();
+    return Immediate(-1);
+  }
+
+  Operand ToOperand(InstructionOperand* op, int extra = 0) {
+    RegisterOrOperand result = ToRegisterOrOperand(op, extra);
+    DCHECK_EQ(kOperand, result.type);
+    return result.operand;
+  }
+
+  RegisterOrOperand ToRegisterOrOperand(InstructionOperand* op, int extra = 0) {
+    RegisterOrOperand result;
+    if (op->IsRegister()) {
+      DCHECK(extra == 0);
+      result.type = kRegister;
+      result.reg = ToRegister(op);
+      return result;
+    } else if (op->IsDoubleRegister()) {
+      DCHECK(extra == 0);
+      DCHECK(extra == 0);
+      result.type = kDoubleRegister;
+      result.double_reg = ToDoubleRegister(op);
+      return result;
+    }
+
+    DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+
+    result.type = kOperand;
+    // The linkage computes where all spill slots are located.
+    FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), extra);
+    result.operand =
+        Operand(offset.from_stack_pointer() ? rsp : rbp, offset.offset());
+    return result;
+  }
+
+  Operand MemoryOperand(int* first_input) {
+    const int offset = *first_input;
+    switch (AddressingModeField::decode(instr_->opcode())) {
+      case kMode_MR1I: {
+        *first_input += 2;
+        Register index = InputRegister(offset + 1);
+        return Operand(InputRegister(offset + 0), index, times_1,
+                       0);  // TODO(dcarney): K != 0
+      }
+      case kMode_MRI:
+        *first_input += 2;
+        return Operand(InputRegister(offset + 0), InputInt32(offset + 1));
+      default:
+        UNREACHABLE();
+        return Operand(no_reg, 0);
+    }
+  }
+
+  Operand MemoryOperand() {
+    int first_input = 0;
+    return MemoryOperand(&first_input);
+  }
+};
+
+
+static bool HasImmediateInput(Instruction* instr, int index) {
+  return instr->InputAt(index)->IsImmediate();
+}
+
+
+#define ASSEMBLE_BINOP(asm_instr)                            \
+  do {                                                       \
+    if (HasImmediateInput(instr, 1)) {                       \
+      RegisterOrOperand input = i.InputRegisterOrOperand(0); \
+      if (input.type == kRegister) {                         \
+        __ asm_instr(input.reg, i.InputImmediate(1));        \
+      } else {                                               \
+        __ asm_instr(input.operand, i.InputImmediate(1));    \
+      }                                                      \
+    } else {                                                 \
+      RegisterOrOperand input = i.InputRegisterOrOperand(1); \
+      if (input.type == kRegister) {                         \
+        __ asm_instr(i.InputRegister(0), input.reg);         \
+      } else {                                               \
+        __ asm_instr(i.InputRegister(0), input.operand);     \
+      }                                                      \
+    }                                                        \
+  } while (0)
+
+
+#define ASSEMBLE_SHIFT(asm_instr, width)                                 \
+  do {                                                                   \
+    if (HasImmediateInput(instr, 1)) {                                   \
+      __ asm_instr(i.OutputRegister(), Immediate(i.InputInt##width(1))); \
+    } else {                                                             \
+      __ asm_instr##_cl(i.OutputRegister());                             \
+    }                                                                    \
+  } while (0)
+
+
+// Assembles an instruction after register allocation, producing machine code.
+void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+  X64OperandConverter i(this, instr);
+
+  switch (ArchOpcodeField::decode(instr->opcode())) {
+    case kArchCallCodeObject: {
+      EnsureSpaceForLazyDeopt();
+      if (HasImmediateInput(instr, 0)) {
+        Handle<Code> code = Handle<Code>::cast(i.InputHeapObject(0));
+        __ Call(code, RelocInfo::CODE_TARGET);
+      } else {
+        Register reg = i.InputRegister(0);
+        int entry = Code::kHeaderSize - kHeapObjectTag;
+        __ Call(Operand(reg, entry));
+      }
+      AddSafepointAndDeopt(instr);
+      break;
+    }
+    case kArchCallJSFunction: {
+      EnsureSpaceForLazyDeopt();
+      Register func = i.InputRegister(0);
+      if (FLAG_debug_code) {
+        // Check the function's context matches the context argument.
+        __ cmpp(rsi, FieldOperand(func, JSFunction::kContextOffset));
+        __ Assert(equal, kWrongFunctionContext);
+      }
+      __ Call(FieldOperand(func, JSFunction::kCodeEntryOffset));
+      AddSafepointAndDeopt(instr);
+      break;
+    }
+    case kArchJmp:
+      __ jmp(code_->GetLabel(i.InputBlock(0)));
+      break;
+    case kArchNop:
+      // don't emit code for nops.
+      break;
+    case kArchRet:
+      AssembleReturn();
+      break;
+    case kArchTruncateDoubleToI:
+      __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
+      break;
+    case kX64Add32:
+      ASSEMBLE_BINOP(addl);
+      break;
+    case kX64Add:
+      ASSEMBLE_BINOP(addq);
+      break;
+    case kX64Sub32:
+      ASSEMBLE_BINOP(subl);
+      break;
+    case kX64Sub:
+      ASSEMBLE_BINOP(subq);
+      break;
+    case kX64And32:
+      ASSEMBLE_BINOP(andl);
+      break;
+    case kX64And:
+      ASSEMBLE_BINOP(andq);
+      break;
+    case kX64Cmp32:
+      ASSEMBLE_BINOP(cmpl);
+      break;
+    case kX64Cmp:
+      ASSEMBLE_BINOP(cmpq);
+      break;
+    case kX64Test32:
+      ASSEMBLE_BINOP(testl);
+      break;
+    case kX64Test:
+      ASSEMBLE_BINOP(testq);
+      break;
+    case kX64Imul32:
+      if (HasImmediateInput(instr, 1)) {
+        RegisterOrOperand input = i.InputRegisterOrOperand(0);
+        if (input.type == kRegister) {
+          __ imull(i.OutputRegister(), input.reg, i.InputImmediate(1));
+        } else {
+          __ movq(kScratchRegister, input.operand);
+          __ imull(i.OutputRegister(), kScratchRegister, i.InputImmediate(1));
+        }
+      } else {
+        RegisterOrOperand input = i.InputRegisterOrOperand(1);
+        if (input.type == kRegister) {
+          __ imull(i.OutputRegister(), input.reg);
+        } else {
+          __ imull(i.OutputRegister(), input.operand);
+        }
+      }
+      break;
+    case kX64Imul:
+      if (HasImmediateInput(instr, 1)) {
+        RegisterOrOperand input = i.InputRegisterOrOperand(0);
+        if (input.type == kRegister) {
+          __ imulq(i.OutputRegister(), input.reg, i.InputImmediate(1));
+        } else {
+          __ movq(kScratchRegister, input.operand);
+          __ imulq(i.OutputRegister(), kScratchRegister, i.InputImmediate(1));
+        }
+      } else {
+        RegisterOrOperand input = i.InputRegisterOrOperand(1);
+        if (input.type == kRegister) {
+          __ imulq(i.OutputRegister(), input.reg);
+        } else {
+          __ imulq(i.OutputRegister(), input.operand);
+        }
+      }
+      break;
+    case kX64Idiv32:
+      __ cdq();
+      __ idivl(i.InputRegister(1));
+      break;
+    case kX64Idiv:
+      __ cqo();
+      __ idivq(i.InputRegister(1));
+      break;
+    case kX64Udiv32:
+      __ xorl(rdx, rdx);
+      __ divl(i.InputRegister(1));
+      break;
+    case kX64Udiv:
+      __ xorq(rdx, rdx);
+      __ divq(i.InputRegister(1));
+      break;
+    case kX64Not: {
+      RegisterOrOperand output = i.OutputRegisterOrOperand();
+      if (output.type == kRegister) {
+        __ notq(output.reg);
+      } else {
+        __ notq(output.operand);
+      }
+      break;
+    }
+    case kX64Not32: {
+      RegisterOrOperand output = i.OutputRegisterOrOperand();
+      if (output.type == kRegister) {
+        __ notl(output.reg);
+      } else {
+        __ notl(output.operand);
+      }
+      break;
+    }
+    case kX64Neg: {
+      RegisterOrOperand output = i.OutputRegisterOrOperand();
+      if (output.type == kRegister) {
+        __ negq(output.reg);
+      } else {
+        __ negq(output.operand);
+      }
+      break;
+    }
+    case kX64Neg32: {
+      RegisterOrOperand output = i.OutputRegisterOrOperand();
+      if (output.type == kRegister) {
+        __ negl(output.reg);
+      } else {
+        __ negl(output.operand);
+      }
+      break;
+    }
+    case kX64Or32:
+      ASSEMBLE_BINOP(orl);
+      break;
+    case kX64Or:
+      ASSEMBLE_BINOP(orq);
+      break;
+    case kX64Xor32:
+      ASSEMBLE_BINOP(xorl);
+      break;
+    case kX64Xor:
+      ASSEMBLE_BINOP(xorq);
+      break;
+    case kX64Shl32:
+      ASSEMBLE_SHIFT(shll, 5);
+      break;
+    case kX64Shl:
+      ASSEMBLE_SHIFT(shlq, 6);
+      break;
+    case kX64Shr32:
+      ASSEMBLE_SHIFT(shrl, 5);
+      break;
+    case kX64Shr:
+      ASSEMBLE_SHIFT(shrq, 6);
+      break;
+    case kX64Sar32:
+      ASSEMBLE_SHIFT(sarl, 5);
+      break;
+    case kX64Sar:
+      ASSEMBLE_SHIFT(sarq, 6);
+      break;
+    case kX64Ror32:
+      ASSEMBLE_SHIFT(rorl, 5);
+      break;
+    case kX64Ror:
+      ASSEMBLE_SHIFT(rorq, 6);
+      break;
+    case kSSEFloat64Cmp: {
+      RegisterOrOperand input = i.InputRegisterOrOperand(1);
+      if (input.type == kDoubleRegister) {
+        __ ucomisd(i.InputDoubleRegister(0), input.double_reg);
+      } else {
+        __ ucomisd(i.InputDoubleRegister(0), input.operand);
+      }
+      break;
+    }
+    case kSSEFloat64Add:
+      __ addsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+      break;
+    case kSSEFloat64Sub:
+      __ subsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+      break;
+    case kSSEFloat64Mul:
+      __ mulsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+      break;
+    case kSSEFloat64Div:
+      __ divsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
+      break;
+    case kSSEFloat64Mod: {
+      __ subq(rsp, Immediate(kDoubleSize));
+      // Move values to st(0) and st(1).
+      __ movsd(Operand(rsp, 0), i.InputDoubleRegister(1));
+      __ fld_d(Operand(rsp, 0));
+      __ movsd(Operand(rsp, 0), i.InputDoubleRegister(0));
+      __ fld_d(Operand(rsp, 0));
+      // Loop while fprem isn't done.
+      Label mod_loop;
+      __ bind(&mod_loop);
+      // This instructions traps on all kinds inputs, but we are assuming the
+      // floating point control word is set to ignore them all.
+      __ fprem();
+      // The following 2 instruction implicitly use rax.
+      __ fnstsw_ax();
+      if (CpuFeatures::IsSupported(SAHF) && masm()->IsEnabled(SAHF)) {
+        __ sahf();
+      } else {
+        __ shrl(rax, Immediate(8));
+        __ andl(rax, Immediate(0xFF));
+        __ pushq(rax);
+        __ popfq();
+      }
+      __ j(parity_even, &mod_loop);
+      // Move output to stack and clean up.
+      __ fstp(1);
+      __ fstp_d(Operand(rsp, 0));
+      __ movsd(i.OutputDoubleRegister(), Operand(rsp, 0));
+      __ addq(rsp, Immediate(kDoubleSize));
+      break;
+    }
+    case kSSEFloat64Sqrt: {
+      RegisterOrOperand input = i.InputRegisterOrOperand(0);
+      if (input.type == kDoubleRegister) {
+        __ sqrtsd(i.OutputDoubleRegister(), input.double_reg);
+      } else {
+        __ sqrtsd(i.OutputDoubleRegister(), input.operand);
+      }
+      break;
+    }
+    case kSSEFloat64ToInt32: {
+      RegisterOrOperand input = i.InputRegisterOrOperand(0);
+      if (input.type == kDoubleRegister) {
+        __ cvttsd2si(i.OutputRegister(), input.double_reg);
+      } else {
+        __ cvttsd2si(i.OutputRegister(), input.operand);
+      }
+      break;
+    }
+    case kSSEFloat64ToUint32: {
+      RegisterOrOperand input = i.InputRegisterOrOperand(0);
+      if (input.type == kDoubleRegister) {
+        __ cvttsd2siq(i.OutputRegister(), input.double_reg);
+      } else {
+        __ cvttsd2siq(i.OutputRegister(), input.operand);
+      }
+      __ andl(i.OutputRegister(), i.OutputRegister());  // clear upper bits.
+      // TODO(turbofan): generated code should not look at the upper 32 bits
+      // of the result, but those bits could escape to the outside world.
+      break;
+    }
+    case kSSEInt32ToFloat64: {
+      RegisterOrOperand input = i.InputRegisterOrOperand(0);
+      if (input.type == kRegister) {
+        __ cvtlsi2sd(i.OutputDoubleRegister(), input.reg);
+      } else {
+        __ cvtlsi2sd(i.OutputDoubleRegister(), input.operand);
+      }
+      break;
+    }
+    case kSSEUint32ToFloat64: {
+      // TODO(turbofan): X64 SSE cvtqsi2sd should support operands.
+      __ cvtqsi2sd(i.OutputDoubleRegister(), i.InputRegister(0));
+      break;
+    }
+    case kX64Movsxbl:
+      __ movsxbl(i.OutputRegister(), i.MemoryOperand());
+      break;
+    case kX64Movzxbl:
+      __ movzxbl(i.OutputRegister(), i.MemoryOperand());
+      break;
+    case kX64Movb: {
+      int index = 0;
+      Operand operand = i.MemoryOperand(&index);
+      if (HasImmediateInput(instr, index)) {
+        __ movb(operand, Immediate(i.InputInt8(index)));
+      } else {
+        __ movb(operand, i.InputRegister(index));
+      }
+      break;
+    }
+    case kX64Movsxwl:
+      __ movsxwl(i.OutputRegister(), i.MemoryOperand());
+      break;
+    case kX64Movzxwl:
+      __ movzxwl(i.OutputRegister(), i.MemoryOperand());
+      break;
+    case kX64Movw: {
+      int index = 0;
+      Operand operand = i.MemoryOperand(&index);
+      if (HasImmediateInput(instr, index)) {
+        __ movw(operand, Immediate(i.InputInt16(index)));
+      } else {
+        __ movw(operand, i.InputRegister(index));
+      }
+      break;
+    }
+    case kX64Movl:
+      if (instr->HasOutput()) {
+        if (instr->addressing_mode() == kMode_None) {
+          RegisterOrOperand input = i.InputRegisterOrOperand(0);
+          if (input.type == kRegister) {
+            __ movl(i.OutputRegister(), input.reg);
+          } else {
+            __ movl(i.OutputRegister(), input.operand);
+          }
+        } else {
+          __ movl(i.OutputRegister(), i.MemoryOperand());
+        }
+      } else {
+        int index = 0;
+        Operand operand = i.MemoryOperand(&index);
+        if (HasImmediateInput(instr, index)) {
+          __ movl(operand, i.InputImmediate(index));
+        } else {
+          __ movl(operand, i.InputRegister(index));
+        }
+      }
+      break;
+    case kX64Movsxlq: {
+      RegisterOrOperand input = i.InputRegisterOrOperand(0);
+      if (input.type == kRegister) {
+        __ movsxlq(i.OutputRegister(), input.reg);
+      } else {
+        __ movsxlq(i.OutputRegister(), input.operand);
+      }
+      break;
+    }
+    case kX64Movq:
+      if (instr->HasOutput()) {
+        __ movq(i.OutputRegister(), i.MemoryOperand());
+      } else {
+        int index = 0;
+        Operand operand = i.MemoryOperand(&index);
+        if (HasImmediateInput(instr, index)) {
+          __ movq(operand, i.InputImmediate(index));
+        } else {
+          __ movq(operand, i.InputRegister(index));
+        }
+      }
+      break;
+    case kX64Movss:
+      if (instr->HasOutput()) {
+        __ movss(i.OutputDoubleRegister(), i.MemoryOperand());
+        __ cvtss2sd(i.OutputDoubleRegister(), i.OutputDoubleRegister());
+      } else {
+        int index = 0;
+        Operand operand = i.MemoryOperand(&index);
+        __ cvtsd2ss(xmm0, i.InputDoubleRegister(index));
+        __ movss(operand, xmm0);
+      }
+      break;
+    case kX64Movsd:
+      if (instr->HasOutput()) {
+        __ movsd(i.OutputDoubleRegister(), i.MemoryOperand());
+      } else {
+        int index = 0;
+        Operand operand = i.MemoryOperand(&index);
+        __ movsd(operand, i.InputDoubleRegister(index));
+      }
+      break;
+    case kX64Push:
+      if (HasImmediateInput(instr, 0)) {
+        __ pushq(i.InputImmediate(0));
+      } else {
+        RegisterOrOperand input = i.InputRegisterOrOperand(0);
+        if (input.type == kRegister) {
+          __ pushq(input.reg);
+        } else {
+          __ pushq(input.operand);
+        }
+      }
+      break;
+    case kX64StoreWriteBarrier: {
+      Register object = i.InputRegister(0);
+      Register index = i.InputRegister(1);
+      Register value = i.InputRegister(2);
+      __ movsxlq(index, index);
+      __ movq(Operand(object, index, times_1, 0), value);
+      __ leaq(index, Operand(object, index, times_1, 0));
+      SaveFPRegsMode mode = code_->frame()->DidAllocateDoubleRegisters()
+                                ? kSaveFPRegs
+                                : kDontSaveFPRegs;
+      __ RecordWrite(object, index, value, mode);
+      break;
+    }
+  }
+}
+
+
+// Assembles branches after this instruction.
+void CodeGenerator::AssembleArchBranch(Instruction* instr,
+                                       FlagsCondition condition) {
+  X64OperandConverter i(this, instr);
+  Label done;
+
+  // Emit a branch. The true and false targets are always the last two inputs
+  // to the instruction.
+  BasicBlock* tblock = i.InputBlock(static_cast<int>(instr->InputCount()) - 2);
+  BasicBlock* fblock = i.InputBlock(static_cast<int>(instr->InputCount()) - 1);
+  bool fallthru = IsNextInAssemblyOrder(fblock);
+  Label* tlabel = code()->GetLabel(tblock);
+  Label* flabel = fallthru ? &done : code()->GetLabel(fblock);
+  Label::Distance flabel_distance = fallthru ? Label::kNear : Label::kFar;
+  switch (condition) {
+    case kUnorderedEqual:
+      __ j(parity_even, flabel, flabel_distance);
+    // Fall through.
+    case kEqual:
+      __ j(equal, tlabel);
+      break;
+    case kUnorderedNotEqual:
+      __ j(parity_even, tlabel);
+    // Fall through.
+    case kNotEqual:
+      __ j(not_equal, tlabel);
+      break;
+    case kSignedLessThan:
+      __ j(less, tlabel);
+      break;
+    case kSignedGreaterThanOrEqual:
+      __ j(greater_equal, tlabel);
+      break;
+    case kSignedLessThanOrEqual:
+      __ j(less_equal, tlabel);
+      break;
+    case kSignedGreaterThan:
+      __ j(greater, tlabel);
+      break;
+    case kUnorderedLessThan:
+      __ j(parity_even, flabel, flabel_distance);
+    // Fall through.
+    case kUnsignedLessThan:
+      __ j(below, tlabel);
+      break;
+    case kUnorderedGreaterThanOrEqual:
+      __ j(parity_even, tlabel);
+    // Fall through.
+    case kUnsignedGreaterThanOrEqual:
+      __ j(above_equal, tlabel);
+      break;
+    case kUnorderedLessThanOrEqual:
+      __ j(parity_even, flabel, flabel_distance);
+    // Fall through.
+    case kUnsignedLessThanOrEqual:
+      __ j(below_equal, tlabel);
+      break;
+    case kUnorderedGreaterThan:
+      __ j(parity_even, tlabel);
+    // Fall through.
+    case kUnsignedGreaterThan:
+      __ j(above, tlabel);
+      break;
+    case kOverflow:
+      __ j(overflow, tlabel);
+      break;
+    case kNotOverflow:
+      __ j(no_overflow, tlabel);
+      break;
+  }
+  if (!fallthru) __ jmp(flabel, flabel_distance);  // no fallthru to flabel.
+  __ bind(&done);
+}
+
+
+// Assembles boolean materializations after this instruction.
+void CodeGenerator::AssembleArchBoolean(Instruction* instr,
+                                        FlagsCondition condition) {
+  X64OperandConverter i(this, instr);
+  Label done;
+
+  // Materialize a full 64-bit 1 or 0 value. The result register is always the
+  // last output of the instruction.
+  Label check;
+  DCHECK_NE(0, instr->OutputCount());
+  Register reg = i.OutputRegister(static_cast<int>(instr->OutputCount() - 1));
+  Condition cc = no_condition;
+  switch (condition) {
+    case kUnorderedEqual:
+      __ j(parity_odd, &check, Label::kNear);
+      __ movl(reg, Immediate(0));
+      __ jmp(&done, Label::kNear);
+    // Fall through.
+    case kEqual:
+      cc = equal;
+      break;
+    case kUnorderedNotEqual:
+      __ j(parity_odd, &check, Label::kNear);
+      __ movl(reg, Immediate(1));
+      __ jmp(&done, Label::kNear);
+    // Fall through.
+    case kNotEqual:
+      cc = not_equal;
+      break;
+    case kSignedLessThan:
+      cc = less;
+      break;
+    case kSignedGreaterThanOrEqual:
+      cc = greater_equal;
+      break;
+    case kSignedLessThanOrEqual:
+      cc = less_equal;
+      break;
+    case kSignedGreaterThan:
+      cc = greater;
+      break;
+    case kUnorderedLessThan:
+      __ j(parity_odd, &check, Label::kNear);
+      __ movl(reg, Immediate(0));
+      __ jmp(&done, Label::kNear);
+    // Fall through.
+    case kUnsignedLessThan:
+      cc = below;
+      break;
+    case kUnorderedGreaterThanOrEqual:
+      __ j(parity_odd, &check, Label::kNear);
+      __ movl(reg, Immediate(1));
+      __ jmp(&done, Label::kNear);
+    // Fall through.
+    case kUnsignedGreaterThanOrEqual:
+      cc = above_equal;
+      break;
+    case kUnorderedLessThanOrEqual:
+      __ j(parity_odd, &check, Label::kNear);
+      __ movl(reg, Immediate(0));
+      __ jmp(&done, Label::kNear);
+    // Fall through.
+    case kUnsignedLessThanOrEqual:
+      cc = below_equal;
+      break;
+    case kUnorderedGreaterThan:
+      __ j(parity_odd, &check, Label::kNear);
+      __ movl(reg, Immediate(1));
+      __ jmp(&done, Label::kNear);
+    // Fall through.
+    case kUnsignedGreaterThan:
+      cc = above;
+      break;
+    case kOverflow:
+      cc = overflow;
+      break;
+    case kNotOverflow:
+      cc = no_overflow;
+      break;
+  }
+  __ bind(&check);
+  __ setcc(cc, reg);
+  __ movzxbl(reg, reg);
+  __ bind(&done);
+}
+
+
+void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
+  Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
+      isolate(), deoptimization_id, Deoptimizer::LAZY);
+  __ call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+}
+
+
+void CodeGenerator::AssemblePrologue() {
+  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+  int stack_slots = frame()->GetSpillSlotCount();
+  if (descriptor->kind() == CallDescriptor::kCallAddress) {
+    __ pushq(rbp);
+    __ movq(rbp, rsp);
+    const RegList saves = descriptor->CalleeSavedRegisters();
+    if (saves != 0) {  // Save callee-saved registers.
+      int register_save_area_size = 0;
+      for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
+        if (!((1 << i) & saves)) continue;
+        __ pushq(Register::from_code(i));
+        register_save_area_size += kPointerSize;
+      }
+      frame()->SetRegisterSaveAreaSize(register_save_area_size);
+    }
+  } else if (descriptor->IsJSFunctionCall()) {
+    CompilationInfo* info = linkage()->info();
+    __ Prologue(info->IsCodePreAgingActive());
+    frame()->SetRegisterSaveAreaSize(
+        StandardFrameConstants::kFixedFrameSizeFromFp);
+
+    // Sloppy mode functions and builtins need to replace the receiver with the
+    // global proxy when called as functions (without an explicit receiver
+    // object).
+    // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
+    if (info->strict_mode() == SLOPPY && !info->is_native()) {
+      Label ok;
+      StackArgumentsAccessor args(rbp, info->scope()->num_parameters());
+      __ movp(rcx, args.GetReceiverOperand());
+      __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
+      __ j(not_equal, &ok, Label::kNear);
+      __ movp(rcx, GlobalObjectOperand());
+      __ movp(rcx, FieldOperand(rcx, GlobalObject::kGlobalProxyOffset));
+      __ movp(args.GetReceiverOperand(), rcx);
+      __ bind(&ok);
+    }
+
+  } else {
+    __ StubPrologue();
+    frame()->SetRegisterSaveAreaSize(
+        StandardFrameConstants::kFixedFrameSizeFromFp);
+  }
+  if (stack_slots > 0) {
+    __ subq(rsp, Immediate(stack_slots * kPointerSize));
+  }
+}
+
+
+void CodeGenerator::AssembleReturn() {
+  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+  if (descriptor->kind() == CallDescriptor::kCallAddress) {
+    if (frame()->GetRegisterSaveAreaSize() > 0) {
+      // Remove this frame's spill slots first.
+      int stack_slots = frame()->GetSpillSlotCount();
+      if (stack_slots > 0) {
+        __ addq(rsp, Immediate(stack_slots * kPointerSize));
+      }
+      const RegList saves = descriptor->CalleeSavedRegisters();
+      // Restore registers.
+      if (saves != 0) {
+        for (int i = 0; i < Register::kNumRegisters; i++) {
+          if (!((1 << i) & saves)) continue;
+          __ popq(Register::from_code(i));
+        }
+      }
+      __ popq(rbp);  // Pop caller's frame pointer.
+      __ ret(0);
+    } else {
+      // No saved registers.
+      __ movq(rsp, rbp);  // Move stack pointer back to frame pointer.
+      __ popq(rbp);       // Pop caller's frame pointer.
+      __ ret(0);
+    }
+  } else {
+    __ movq(rsp, rbp);  // Move stack pointer back to frame pointer.
+    __ popq(rbp);       // Pop caller's frame pointer.
+    int pop_count = descriptor->IsJSFunctionCall()
+                        ? static_cast<int>(descriptor->JSParameterCount())
+                        : 0;
+    __ ret(pop_count * kPointerSize);
+  }
+}
+
+
+void CodeGenerator::AssembleMove(InstructionOperand* source,
+                                 InstructionOperand* destination) {
+  X64OperandConverter g(this, NULL);
+  // Dispatch on the source and destination operand kinds.  Not all
+  // combinations are possible.
+  if (source->IsRegister()) {
+    DCHECK(destination->IsRegister() || destination->IsStackSlot());
+    Register src = g.ToRegister(source);
+    if (destination->IsRegister()) {
+      __ movq(g.ToRegister(destination), src);
+    } else {
+      __ movq(g.ToOperand(destination), src);
+    }
+  } else if (source->IsStackSlot()) {
+    DCHECK(destination->IsRegister() || destination->IsStackSlot());
+    Operand src = g.ToOperand(source);
+    if (destination->IsRegister()) {
+      Register dst = g.ToRegister(destination);
+      __ movq(dst, src);
+    } else {
+      // Spill on demand to use a temporary register for memory-to-memory
+      // moves.
+      Register tmp = kScratchRegister;
+      Operand dst = g.ToOperand(destination);
+      __ movq(tmp, src);
+      __ movq(dst, tmp);
+    }
+  } else if (source->IsConstant()) {
+    ConstantOperand* constant_source = ConstantOperand::cast(source);
+    if (destination->IsRegister() || destination->IsStackSlot()) {
+      Register dst = destination->IsRegister() ? g.ToRegister(destination)
+                                               : kScratchRegister;
+      Immediate64 imm = g.ToImmediate64(constant_source);
+      switch (imm.type) {
+        case kImm64Value:
+          __ Set(dst, imm.value);
+          break;
+        case kImm64Reference:
+          __ Move(dst, imm.reference);
+          break;
+        case kImm64Handle:
+          __ Move(dst, imm.handle);
+          break;
+      }
+      if (destination->IsStackSlot()) {
+        __ movq(g.ToOperand(destination), kScratchRegister);
+      }
+    } else {
+      __ movq(kScratchRegister,
+              bit_cast<uint64_t, double>(g.ToDouble(constant_source)));
+      if (destination->IsDoubleRegister()) {
+        __ movq(g.ToDoubleRegister(destination), kScratchRegister);
+      } else {
+        DCHECK(destination->IsDoubleStackSlot());
+        __ movq(g.ToOperand(destination), kScratchRegister);
+      }
+    }
+  } else if (source->IsDoubleRegister()) {
+    XMMRegister src = g.ToDoubleRegister(source);
+    if (destination->IsDoubleRegister()) {
+      XMMRegister dst = g.ToDoubleRegister(destination);
+      __ movsd(dst, src);
+    } else {
+      DCHECK(destination->IsDoubleStackSlot());
+      Operand dst = g.ToOperand(destination);
+      __ movsd(dst, src);
+    }
+  } else if (source->IsDoubleStackSlot()) {
+    DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
+    Operand src = g.ToOperand(source);
+    if (destination->IsDoubleRegister()) {
+      XMMRegister dst = g.ToDoubleRegister(destination);
+      __ movsd(dst, src);
+    } else {
+      // We rely on having xmm0 available as a fixed scratch register.
+      Operand dst = g.ToOperand(destination);
+      __ movsd(xmm0, src);
+      __ movsd(dst, xmm0);
+    }
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
+void CodeGenerator::AssembleSwap(InstructionOperand* source,
+                                 InstructionOperand* destination) {
+  X64OperandConverter g(this, NULL);
+  // Dispatch on the source and destination operand kinds.  Not all
+  // combinations are possible.
+  if (source->IsRegister() && destination->IsRegister()) {
+    // Register-register.
+    __ xchgq(g.ToRegister(source), g.ToRegister(destination));
+  } else if (source->IsRegister() && destination->IsStackSlot()) {
+    Register src = g.ToRegister(source);
+    Operand dst = g.ToOperand(destination);
+    __ xchgq(src, dst);
+  } else if ((source->IsStackSlot() && destination->IsStackSlot()) ||
+             (source->IsDoubleStackSlot() &&
+              destination->IsDoubleStackSlot())) {
+    // Memory-memory.
+    Register tmp = kScratchRegister;
+    Operand src = g.ToOperand(source);
+    Operand dst = g.ToOperand(destination);
+    __ movq(tmp, dst);
+    __ xchgq(tmp, src);
+    __ movq(dst, tmp);
+  } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
+    // XMM register-register swap. We rely on having xmm0
+    // available as a fixed scratch register.
+    XMMRegister src = g.ToDoubleRegister(source);
+    XMMRegister dst = g.ToDoubleRegister(destination);
+    __ movsd(xmm0, src);
+    __ movsd(src, dst);
+    __ movsd(dst, xmm0);
+  } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
+    // XMM register-memory swap.  We rely on having xmm0
+    // available as a fixed scratch register.
+    XMMRegister src = g.ToDoubleRegister(source);
+    Operand dst = g.ToOperand(destination);
+    __ movsd(xmm0, src);
+    __ movsd(src, dst);
+    __ movsd(dst, xmm0);
+  } else {
+    // No other combinations are possible.
+    UNREACHABLE();
+  }
+}
+
+
+void CodeGenerator::AddNopForSmiCodeInlining() { __ nop(); }
+
+
+void CodeGenerator::EnsureSpaceForLazyDeopt() {
+  int space_needed = Deoptimizer::patch_size();
+  if (!linkage()->info()->IsStub()) {
+    // Ensure that we have enough space after the previous lazy-bailout
+    // instruction for patching the code here.
+    int current_pc = masm()->pc_offset();
+    if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+      int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+      __ Nop(padding_size);
+    }
+  }
+  MarkLazyDeoptSite();
+}
+
+#undef __
+
+}  // namespace internal
+}  // namespace compiler
+}  // namespace v8
diff --git a/src/compiler/x64/instruction-codes-x64.h b/src/compiler/x64/instruction-codes-x64.h
new file mode 100644
index 0000000..dfad203
--- /dev/null
+++ b/src/compiler/x64/instruction-codes-x64.h
@@ -0,0 +1,101 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_
+#define V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// X64-specific opcodes that specify which assembly sequence to emit.
+// Most opcodes specify a single instruction.
+#define TARGET_ARCH_OPCODE_LIST(V) \
+  V(X64Add)                        \
+  V(X64Add32)                      \
+  V(X64And)                        \
+  V(X64And32)                      \
+  V(X64Cmp)                        \
+  V(X64Cmp32)                      \
+  V(X64Test)                       \
+  V(X64Test32)                     \
+  V(X64Or)                         \
+  V(X64Or32)                       \
+  V(X64Xor)                        \
+  V(X64Xor32)                      \
+  V(X64Sub)                        \
+  V(X64Sub32)                      \
+  V(X64Imul)                       \
+  V(X64Imul32)                     \
+  V(X64Idiv)                       \
+  V(X64Idiv32)                     \
+  V(X64Udiv)                       \
+  V(X64Udiv32)                     \
+  V(X64Not)                        \
+  V(X64Not32)                      \
+  V(X64Neg)                        \
+  V(X64Neg32)                      \
+  V(X64Shl)                        \
+  V(X64Shl32)                      \
+  V(X64Shr)                        \
+  V(X64Shr32)                      \
+  V(X64Sar)                        \
+  V(X64Sar32)                      \
+  V(X64Ror)                        \
+  V(X64Ror32)                      \
+  V(SSEFloat64Cmp)                 \
+  V(SSEFloat64Add)                 \
+  V(SSEFloat64Sub)                 \
+  V(SSEFloat64Mul)                 \
+  V(SSEFloat64Div)                 \
+  V(SSEFloat64Mod)                 \
+  V(SSEFloat64Sqrt)                \
+  V(SSEFloat64ToInt32)             \
+  V(SSEFloat64ToUint32)            \
+  V(SSEInt32ToFloat64)             \
+  V(SSEUint32ToFloat64)            \
+  V(X64Movsxbl)                    \
+  V(X64Movzxbl)                    \
+  V(X64Movb)                       \
+  V(X64Movsxwl)                    \
+  V(X64Movzxwl)                    \
+  V(X64Movw)                       \
+  V(X64Movl)                       \
+  V(X64Movsxlq)                    \
+  V(X64Movq)                       \
+  V(X64Movsd)                      \
+  V(X64Movss)                      \
+  V(X64Push)                       \
+  V(X64StoreWriteBarrier)
+
+
+// Addressing modes represent the "shape" of inputs to an instruction.
+// Many instructions support multiple addressing modes. Addressing modes
+// are encoded into the InstructionCode of the instruction and tell the
+// code generator after register allocation which assembler method to call.
+//
+// We use the following local notation for addressing modes:
+//
+// R = register
+// O = register or stack slot
+// D = double register
+// I = immediate (handle, external, int32)
+// MR = [register]
+// MI = [immediate]
+// MRN = [register + register * N in {1, 2, 4, 8}]
+// MRI = [register + immediate]
+// MRNI = [register + register * N in {1, 2, 4, 8} + immediate]
+#define TARGET_ADDRESSING_MODE_LIST(V) \
+  V(MR)   /* [%r1] */                  \
+  V(MRI)  /* [%r1 + K] */              \
+  V(MR1I) /* [%r1 + %r2 + K] */        \
+  V(MR2I) /* [%r1 + %r2*2 + K] */      \
+  V(MR4I) /* [%r1 + %r2*4 + K] */      \
+  V(MR8I) /* [%r1 + %r2*8 + K] */
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_X64_INSTRUCTION_CODES_X64_H_
diff --git a/src/compiler/x64/instruction-selector-x64-unittest.cc b/src/compiler/x64/instruction-selector-x64-unittest.cc
new file mode 100644
index 0000000..22f0bce
--- /dev/null
+++ b/src/compiler/x64/instruction-selector-x64-unittest.cc
@@ -0,0 +1,111 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-selector-unittest.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// -----------------------------------------------------------------------------
+// Conversions.
+
+
+TEST_F(InstructionSelectorTest, ChangeInt32ToInt64WithParameter) {
+  StreamBuilder m(this, kMachInt64, kMachInt32);
+  m.Return(m.ChangeInt32ToInt64(m.Parameter(0)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kX64Movsxlq, s[0]->arch_opcode());
+}
+
+
+TEST_F(InstructionSelectorTest, ChangeUint32ToUint64WithParameter) {
+  StreamBuilder m(this, kMachUint64, kMachUint32);
+  m.Return(m.ChangeUint32ToUint64(m.Parameter(0)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kX64Movl, s[0]->arch_opcode());
+}
+
+
+TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithParameter) {
+  StreamBuilder m(this, kMachInt32, kMachInt64);
+  m.Return(m.TruncateInt64ToInt32(m.Parameter(0)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kX64Movl, s[0]->arch_opcode());
+}
+
+
+// -----------------------------------------------------------------------------
+// Loads and stores
+
+namespace {
+
+struct MemoryAccess {
+  MachineType type;
+  ArchOpcode load_opcode;
+  ArchOpcode store_opcode;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const MemoryAccess& memacc) {
+  OStringStream ost;
+  ost << memacc.type;
+  return os << ost.c_str();
+}
+
+
+static const MemoryAccess kMemoryAccesses[] = {
+    {kMachInt8, kX64Movsxbl, kX64Movb},
+    {kMachUint8, kX64Movzxbl, kX64Movb},
+    {kMachInt16, kX64Movsxwl, kX64Movw},
+    {kMachUint16, kX64Movzxwl, kX64Movw},
+    {kMachInt32, kX64Movl, kX64Movl},
+    {kMachUint32, kX64Movl, kX64Movl},
+    {kMachInt64, kX64Movq, kX64Movq},
+    {kMachUint64, kX64Movq, kX64Movq},
+    {kMachFloat32, kX64Movss, kX64Movss},
+    {kMachFloat64, kX64Movsd, kX64Movsd}};
+
+}  // namespace
+
+
+typedef InstructionSelectorTestWithParam<MemoryAccess>
+    InstructionSelectorMemoryAccessTest;
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) {
+  const MemoryAccess memacc = GetParam();
+  StreamBuilder m(this, memacc.type, kMachPtr, kMachInt32);
+  m.Return(m.Load(memacc.type, m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(memacc.load_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, StoreWithParameters) {
+  const MemoryAccess memacc = GetParam();
+  StreamBuilder m(this, kMachInt32, kMachPtr, kMachInt32, memacc.type);
+  m.Store(memacc.type, m.Parameter(0), m.Parameter(1), m.Parameter(2));
+  m.Return(m.Int32Constant(0));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(3U, s[0]->InputCount());
+  EXPECT_EQ(0U, s[0]->OutputCount());
+}
+
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+                        InstructionSelectorMemoryAccessTest,
+                        ::testing::ValuesIn(kMemoryAccesses));
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/x64/instruction-selector-x64.cc b/src/compiler/x64/instruction-selector-x64.cc
new file mode 100644
index 0000000..5fe7bad
--- /dev/null
+++ b/src/compiler/x64/instruction-selector-x64.cc
@@ -0,0 +1,723 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction-selector-impl.h"
+#include "src/compiler/node-matchers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Adds X64-specific methods for generating operands.
+class X64OperandGenerator FINAL : public OperandGenerator {
+ public:
+  explicit X64OperandGenerator(InstructionSelector* selector)
+      : OperandGenerator(selector) {}
+
+  InstructionOperand* TempRegister(Register reg) {
+    return new (zone()) UnallocatedOperand(UnallocatedOperand::FIXED_REGISTER,
+                                           Register::ToAllocationIndex(reg));
+  }
+
+  InstructionOperand* UseByteRegister(Node* node) {
+    // TODO(dcarney): relax constraint.
+    return UseFixed(node, rdx);
+  }
+
+  InstructionOperand* UseImmediate64(Node* node) { return UseImmediate(node); }
+
+  bool CanBeImmediate(Node* node) {
+    switch (node->opcode()) {
+      case IrOpcode::kInt32Constant:
+        return true;
+      default:
+        return false;
+    }
+  }
+
+  bool CanBeImmediate64(Node* node) {
+    switch (node->opcode()) {
+      case IrOpcode::kInt32Constant:
+        return true;
+      case IrOpcode::kNumberConstant:
+        return true;
+      case IrOpcode::kHeapConstant: {
+        // Constants in new space cannot be used as immediates in V8 because
+        // the GC does not scan code objects when collecting the new generation.
+        Unique<HeapObject> value = OpParameter<Unique<HeapObject> >(node);
+        return !isolate()->heap()->InNewSpace(*value.handle());
+      }
+      default:
+        return false;
+    }
+  }
+};
+
+
+void InstructionSelector::VisitLoad(Node* node) {
+  MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
+  MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
+  X64OperandGenerator g(this);
+  Node* base = node->InputAt(0);
+  Node* index = node->InputAt(1);
+
+  ArchOpcode opcode;
+  // TODO(titzer): signed/unsigned small loads
+  switch (rep) {
+    case kRepFloat32:
+      opcode = kX64Movss;
+      break;
+    case kRepFloat64:
+      opcode = kX64Movsd;
+      break;
+    case kRepBit:  // Fall through.
+    case kRepWord8:
+      opcode = typ == kTypeInt32 ? kX64Movsxbl : kX64Movzxbl;
+      break;
+    case kRepWord16:
+      opcode = typ == kTypeInt32 ? kX64Movsxwl : kX64Movzxwl;
+      break;
+    case kRepWord32:
+      opcode = kX64Movl;
+      break;
+    case kRepTagged:  // Fall through.
+    case kRepWord64:
+      opcode = kX64Movq;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+  if (g.CanBeImmediate(base)) {
+    // load [#base + %index]
+    Emit(opcode | AddressingModeField::encode(kMode_MRI),
+         g.DefineAsRegister(node), g.UseRegister(index), g.UseImmediate(base));
+  } else if (g.CanBeImmediate(index)) {  // load [%base + #index]
+    Emit(opcode | AddressingModeField::encode(kMode_MRI),
+         g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
+  } else {  // load [%base + %index + K]
+    Emit(opcode | AddressingModeField::encode(kMode_MR1I),
+         g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
+  }
+  // TODO(turbofan): addressing modes [r+r*{2,4,8}+K]
+}
+
+
+void InstructionSelector::VisitStore(Node* node) {
+  X64OperandGenerator g(this);
+  Node* base = node->InputAt(0);
+  Node* index = node->InputAt(1);
+  Node* value = node->InputAt(2);
+
+  StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
+  MachineType rep = RepresentationOf(store_rep.machine_type());
+  if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
+    DCHECK(rep == kRepTagged);
+    // TODO(dcarney): refactor RecordWrite function to take temp registers
+    //                and pass them here instead of using fixed regs
+    // TODO(dcarney): handle immediate indices.
+    InstructionOperand* temps[] = {g.TempRegister(rcx), g.TempRegister(rdx)};
+    Emit(kX64StoreWriteBarrier, NULL, g.UseFixed(base, rbx),
+         g.UseFixed(index, rcx), g.UseFixed(value, rdx), arraysize(temps),
+         temps);
+    return;
+  }
+  DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
+  InstructionOperand* val;
+  if (g.CanBeImmediate(value)) {
+    val = g.UseImmediate(value);
+  } else if (rep == kRepWord8 || rep == kRepBit) {
+    val = g.UseByteRegister(value);
+  } else {
+    val = g.UseRegister(value);
+  }
+  ArchOpcode opcode;
+  switch (rep) {
+    case kRepFloat32:
+      opcode = kX64Movss;
+      break;
+    case kRepFloat64:
+      opcode = kX64Movsd;
+      break;
+    case kRepBit:  // Fall through.
+    case kRepWord8:
+      opcode = kX64Movb;
+      break;
+    case kRepWord16:
+      opcode = kX64Movw;
+      break;
+    case kRepWord32:
+      opcode = kX64Movl;
+      break;
+    case kRepTagged:  // Fall through.
+    case kRepWord64:
+      opcode = kX64Movq;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+  if (g.CanBeImmediate(base)) {
+    // store [#base + %index], %|#value
+    Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
+         g.UseRegister(index), g.UseImmediate(base), val);
+  } else if (g.CanBeImmediate(index)) {  // store [%base + #index], %|#value
+    Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
+         g.UseRegister(base), g.UseImmediate(index), val);
+  } else {  // store [%base + %index], %|#value
+    Emit(opcode | AddressingModeField::encode(kMode_MR1I), NULL,
+         g.UseRegister(base), g.UseRegister(index), val);
+  }
+  // TODO(turbofan): addressing modes [r+r*{2,4,8}+K]
+}
+
+
+// Shared routine for multiple binary operations.
+static void VisitBinop(InstructionSelector* selector, Node* node,
+                       InstructionCode opcode, FlagsContinuation* cont) {
+  X64OperandGenerator g(selector);
+  Int32BinopMatcher m(node);
+  InstructionOperand* inputs[4];
+  size_t input_count = 0;
+  InstructionOperand* outputs[2];
+  size_t output_count = 0;
+
+  // TODO(turbofan): match complex addressing modes.
+  // TODO(turbofan): if commutative, pick the non-live-in operand as the left as
+  // this might be the last use and therefore its register can be reused.
+  if (g.CanBeImmediate(m.right().node())) {
+    inputs[input_count++] = g.Use(m.left().node());
+    inputs[input_count++] = g.UseImmediate(m.right().node());
+  } else {
+    inputs[input_count++] = g.UseRegister(m.left().node());
+    inputs[input_count++] = g.Use(m.right().node());
+  }
+
+  if (cont->IsBranch()) {
+    inputs[input_count++] = g.Label(cont->true_block());
+    inputs[input_count++] = g.Label(cont->false_block());
+  }
+
+  outputs[output_count++] = g.DefineSameAsFirst(node);
+  if (cont->IsSet()) {
+    outputs[output_count++] = g.DefineAsRegister(cont->result());
+  }
+
+  DCHECK_NE(0, input_count);
+  DCHECK_NE(0, output_count);
+  DCHECK_GE(arraysize(inputs), input_count);
+  DCHECK_GE(arraysize(outputs), output_count);
+
+  Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
+                                      outputs, input_count, inputs);
+  if (cont->IsBranch()) instr->MarkAsControl();
+}
+
+
+// Shared routine for multiple binary operations.
+static void VisitBinop(InstructionSelector* selector, Node* node,
+                       InstructionCode opcode) {
+  FlagsContinuation cont;
+  VisitBinop(selector, node, opcode, &cont);
+}
+
+
+void InstructionSelector::VisitWord32And(Node* node) {
+  VisitBinop(this, node, kX64And32);
+}
+
+
+void InstructionSelector::VisitWord64And(Node* node) {
+  VisitBinop(this, node, kX64And);
+}
+
+
+void InstructionSelector::VisitWord32Or(Node* node) {
+  VisitBinop(this, node, kX64Or32);
+}
+
+
+void InstructionSelector::VisitWord64Or(Node* node) {
+  VisitBinop(this, node, kX64Or);
+}
+
+
+void InstructionSelector::VisitWord32Xor(Node* node) {
+  X64OperandGenerator g(this);
+  Uint32BinopMatcher m(node);
+  if (m.right().Is(-1)) {
+    Emit(kX64Not32, g.DefineSameAsFirst(node), g.Use(m.left().node()));
+  } else {
+    VisitBinop(this, node, kX64Xor32);
+  }
+}
+
+
+void InstructionSelector::VisitWord64Xor(Node* node) {
+  X64OperandGenerator g(this);
+  Uint64BinopMatcher m(node);
+  if (m.right().Is(-1)) {
+    Emit(kX64Not, g.DefineSameAsFirst(node), g.Use(m.left().node()));
+  } else {
+    VisitBinop(this, node, kX64Xor);
+  }
+}
+
+
+// Shared routine for multiple 32-bit shift operations.
+// TODO(bmeurer): Merge this with VisitWord64Shift using template magic?
+static void VisitWord32Shift(InstructionSelector* selector, Node* node,
+                             ArchOpcode opcode) {
+  X64OperandGenerator g(selector);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+
+  // TODO(turbofan): assembler only supports some addressing modes for shifts.
+  if (g.CanBeImmediate(right)) {
+    selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
+                   g.UseImmediate(right));
+  } else {
+    Int32BinopMatcher m(node);
+    if (m.right().IsWord32And()) {
+      Int32BinopMatcher mright(right);
+      if (mright.right().Is(0x1F)) {
+        right = mright.left().node();
+      }
+    }
+    selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
+                   g.UseFixed(right, rcx));
+  }
+}
+
+
+// Shared routine for multiple 64-bit shift operations.
+// TODO(bmeurer): Merge this with VisitWord32Shift using template magic?
+static void VisitWord64Shift(InstructionSelector* selector, Node* node,
+                             ArchOpcode opcode) {
+  X64OperandGenerator g(selector);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+
+  // TODO(turbofan): assembler only supports some addressing modes for shifts.
+  if (g.CanBeImmediate(right)) {
+    selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
+                   g.UseImmediate(right));
+  } else {
+    Int64BinopMatcher m(node);
+    if (m.right().IsWord64And()) {
+      Int64BinopMatcher mright(right);
+      if (mright.right().Is(0x3F)) {
+        right = mright.left().node();
+      }
+    }
+    selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
+                   g.UseFixed(right, rcx));
+  }
+}
+
+
+void InstructionSelector::VisitWord32Shl(Node* node) {
+  VisitWord32Shift(this, node, kX64Shl32);
+}
+
+
+void InstructionSelector::VisitWord64Shl(Node* node) {
+  VisitWord64Shift(this, node, kX64Shl);
+}
+
+
+void InstructionSelector::VisitWord32Shr(Node* node) {
+  VisitWord32Shift(this, node, kX64Shr32);
+}
+
+
+void InstructionSelector::VisitWord64Shr(Node* node) {
+  VisitWord64Shift(this, node, kX64Shr);
+}
+
+
+void InstructionSelector::VisitWord32Sar(Node* node) {
+  VisitWord32Shift(this, node, kX64Sar32);
+}
+
+
+void InstructionSelector::VisitWord64Sar(Node* node) {
+  VisitWord64Shift(this, node, kX64Sar);
+}
+
+
+void InstructionSelector::VisitWord32Ror(Node* node) {
+  VisitWord32Shift(this, node, kX64Ror32);
+}
+
+
+void InstructionSelector::VisitWord64Ror(Node* node) {
+  VisitWord64Shift(this, node, kX64Ror);
+}
+
+
+void InstructionSelector::VisitInt32Add(Node* node) {
+  VisitBinop(this, node, kX64Add32);
+}
+
+
+void InstructionSelector::VisitInt64Add(Node* node) {
+  VisitBinop(this, node, kX64Add);
+}
+
+
+void InstructionSelector::VisitInt32Sub(Node* node) {
+  X64OperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  if (m.left().Is(0)) {
+    Emit(kX64Neg32, g.DefineSameAsFirst(node), g.Use(m.right().node()));
+  } else {
+    VisitBinop(this, node, kX64Sub32);
+  }
+}
+
+
+void InstructionSelector::VisitInt64Sub(Node* node) {
+  X64OperandGenerator g(this);
+  Int64BinopMatcher m(node);
+  if (m.left().Is(0)) {
+    Emit(kX64Neg, g.DefineSameAsFirst(node), g.Use(m.right().node()));
+  } else {
+    VisitBinop(this, node, kX64Sub);
+  }
+}
+
+
+static void VisitMul(InstructionSelector* selector, Node* node,
+                     ArchOpcode opcode) {
+  X64OperandGenerator g(selector);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+  if (g.CanBeImmediate(right)) {
+    selector->Emit(opcode, g.DefineAsRegister(node), g.Use(left),
+                   g.UseImmediate(right));
+  } else if (g.CanBeImmediate(left)) {
+    selector->Emit(opcode, g.DefineAsRegister(node), g.Use(right),
+                   g.UseImmediate(left));
+  } else {
+    // TODO(turbofan): select better left operand.
+    selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
+                   g.Use(right));
+  }
+}
+
+
+void InstructionSelector::VisitInt32Mul(Node* node) {
+  VisitMul(this, node, kX64Imul32);
+}
+
+
+void InstructionSelector::VisitInt64Mul(Node* node) {
+  VisitMul(this, node, kX64Imul);
+}
+
+
+static void VisitDiv(InstructionSelector* selector, Node* node,
+                     ArchOpcode opcode) {
+  X64OperandGenerator g(selector);
+  InstructionOperand* temps[] = {g.TempRegister(rdx)};
+  selector->Emit(
+      opcode, g.DefineAsFixed(node, rax), g.UseFixed(node->InputAt(0), rax),
+      g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
+}
+
+
+void InstructionSelector::VisitInt32Div(Node* node) {
+  VisitDiv(this, node, kX64Idiv32);
+}
+
+
+void InstructionSelector::VisitInt64Div(Node* node) {
+  VisitDiv(this, node, kX64Idiv);
+}
+
+
+void InstructionSelector::VisitInt32UDiv(Node* node) {
+  VisitDiv(this, node, kX64Udiv32);
+}
+
+
+void InstructionSelector::VisitInt64UDiv(Node* node) {
+  VisitDiv(this, node, kX64Udiv);
+}
+
+
+static void VisitMod(InstructionSelector* selector, Node* node,
+                     ArchOpcode opcode) {
+  X64OperandGenerator g(selector);
+  InstructionOperand* temps[] = {g.TempRegister(rax), g.TempRegister(rdx)};
+  selector->Emit(
+      opcode, g.DefineAsFixed(node, rdx), g.UseFixed(node->InputAt(0), rax),
+      g.UseUniqueRegister(node->InputAt(1)), arraysize(temps), temps);
+}
+
+
+void InstructionSelector::VisitInt32Mod(Node* node) {
+  VisitMod(this, node, kX64Idiv32);
+}
+
+
+void InstructionSelector::VisitInt64Mod(Node* node) {
+  VisitMod(this, node, kX64Idiv);
+}
+
+
+void InstructionSelector::VisitInt32UMod(Node* node) {
+  VisitMod(this, node, kX64Udiv32);
+}
+
+
+void InstructionSelector::VisitInt64UMod(Node* node) {
+  VisitMod(this, node, kX64Udiv);
+}
+
+
+void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
+  X64OperandGenerator g(this);
+  Emit(kSSEInt32ToFloat64, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
+  X64OperandGenerator g(this);
+  // TODO(turbofan): X64 SSE cvtqsi2sd should support operands.
+  Emit(kSSEUint32ToFloat64, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
+  X64OperandGenerator g(this);
+  Emit(kSSEFloat64ToInt32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
+  X64OperandGenerator g(this);
+  Emit(kSSEFloat64ToUint32, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
+  X64OperandGenerator g(this);
+  Emit(kX64Movsxlq, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
+  X64OperandGenerator g(this);
+  Emit(kX64Movl, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
+  X64OperandGenerator g(this);
+  Emit(kX64Movl, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64Add(Node* node) {
+  X64OperandGenerator g(this);
+  Emit(kSSEFloat64Add, g.DefineSameAsFirst(node),
+       g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+
+void InstructionSelector::VisitFloat64Sub(Node* node) {
+  X64OperandGenerator g(this);
+  Emit(kSSEFloat64Sub, g.DefineSameAsFirst(node),
+       g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+
+void InstructionSelector::VisitFloat64Mul(Node* node) {
+  X64OperandGenerator g(this);
+  Emit(kSSEFloat64Mul, g.DefineSameAsFirst(node),
+       g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+
+void InstructionSelector::VisitFloat64Div(Node* node) {
+  X64OperandGenerator g(this);
+  Emit(kSSEFloat64Div, g.DefineSameAsFirst(node),
+       g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+
+void InstructionSelector::VisitFloat64Mod(Node* node) {
+  X64OperandGenerator g(this);
+  InstructionOperand* temps[] = {g.TempRegister(rax)};
+  Emit(kSSEFloat64Mod, g.DefineSameAsFirst(node),
+       g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)), 1,
+       temps);
+}
+
+
+void InstructionSelector::VisitFloat64Sqrt(Node* node) {
+  X64OperandGenerator g(this);
+  Emit(kSSEFloat64Sqrt, g.DefineAsRegister(node), g.Use(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitInt32AddWithOverflow(Node* node,
+                                                    FlagsContinuation* cont) {
+  VisitBinop(this, node, kX64Add32, cont);
+}
+
+
+void InstructionSelector::VisitInt32SubWithOverflow(Node* node,
+                                                    FlagsContinuation* cont) {
+  VisitBinop(this, node, kX64Sub32, cont);
+}
+
+
+// Shared routine for multiple compare operations.
+static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
+                         InstructionOperand* left, InstructionOperand* right,
+                         FlagsContinuation* cont) {
+  X64OperandGenerator g(selector);
+  opcode = cont->Encode(opcode);
+  if (cont->IsBranch()) {
+    selector->Emit(opcode, NULL, left, right, g.Label(cont->true_block()),
+                   g.Label(cont->false_block()))->MarkAsControl();
+  } else {
+    DCHECK(cont->IsSet());
+    selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
+  }
+}
+
+
+// Shared routine for multiple word compare operations.
+static void VisitWordCompare(InstructionSelector* selector, Node* node,
+                             InstructionCode opcode, FlagsContinuation* cont,
+                             bool commutative) {
+  X64OperandGenerator g(selector);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+
+  // Match immediates on left or right side of comparison.
+  if (g.CanBeImmediate(right)) {
+    VisitCompare(selector, opcode, g.Use(left), g.UseImmediate(right), cont);
+  } else if (g.CanBeImmediate(left)) {
+    if (!commutative) cont->Commute();
+    VisitCompare(selector, opcode, g.Use(right), g.UseImmediate(left), cont);
+  } else {
+    VisitCompare(selector, opcode, g.UseRegister(left), g.Use(right), cont);
+  }
+}
+
+
+void InstructionSelector::VisitWord32Test(Node* node, FlagsContinuation* cont) {
+  switch (node->opcode()) {
+    case IrOpcode::kInt32Sub:
+      return VisitWordCompare(this, node, kX64Cmp32, cont, false);
+    case IrOpcode::kWord32And:
+      return VisitWordCompare(this, node, kX64Test32, cont, true);
+    default:
+      break;
+  }
+
+  X64OperandGenerator g(this);
+  VisitCompare(this, kX64Test32, g.Use(node), g.TempImmediate(-1), cont);
+}
+
+
+void InstructionSelector::VisitWord64Test(Node* node, FlagsContinuation* cont) {
+  switch (node->opcode()) {
+    case IrOpcode::kInt64Sub:
+      return VisitWordCompare(this, node, kX64Cmp, cont, false);
+    case IrOpcode::kWord64And:
+      return VisitWordCompare(this, node, kX64Test, cont, true);
+    default:
+      break;
+  }
+
+  X64OperandGenerator g(this);
+  VisitCompare(this, kX64Test, g.Use(node), g.TempImmediate(-1), cont);
+}
+
+
+void InstructionSelector::VisitWord32Compare(Node* node,
+                                             FlagsContinuation* cont) {
+  VisitWordCompare(this, node, kX64Cmp32, cont, false);
+}
+
+
+void InstructionSelector::VisitWord64Compare(Node* node,
+                                             FlagsContinuation* cont) {
+  VisitWordCompare(this, node, kX64Cmp, cont, false);
+}
+
+
+void InstructionSelector::VisitFloat64Compare(Node* node,
+                                              FlagsContinuation* cont) {
+  X64OperandGenerator g(this);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+  VisitCompare(this, kSSEFloat64Cmp, g.UseRegister(left), g.Use(right), cont);
+}
+
+
+void InstructionSelector::VisitCall(Node* call, BasicBlock* continuation,
+                                    BasicBlock* deoptimization) {
+  X64OperandGenerator g(this);
+  CallDescriptor* descriptor = OpParameter<CallDescriptor*>(call);
+
+  FrameStateDescriptor* frame_state_descriptor = NULL;
+  if (descriptor->NeedsFrameState()) {
+    frame_state_descriptor = GetFrameStateDescriptor(
+        call->InputAt(static_cast<int>(descriptor->InputCount())));
+  }
+
+  CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
+
+  // Compute InstructionOperands for inputs and outputs.
+  InitializeCallBuffer(call, &buffer, true, true);
+
+  // TODO(dcarney): stack alignment for c calls.
+  // TODO(dcarney): shadow space on window for c calls.
+  // Push any stack arguments.
+  for (NodeVectorRIter input = buffer.pushed_nodes.rbegin();
+       input != buffer.pushed_nodes.rend(); input++) {
+    // TODO(titzer): handle pushing double parameters.
+    Emit(kX64Push, NULL,
+         g.CanBeImmediate(*input) ? g.UseImmediate(*input) : g.Use(*input));
+  }
+
+  // Select the appropriate opcode based on the call type.
+  InstructionCode opcode;
+  switch (descriptor->kind()) {
+    case CallDescriptor::kCallCodeObject: {
+      opcode = kArchCallCodeObject;
+      break;
+    }
+    case CallDescriptor::kCallJSFunction:
+      opcode = kArchCallJSFunction;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+  opcode |= MiscField::encode(descriptor->flags());
+
+  // Emit the call instruction.
+  Instruction* call_instr =
+      Emit(opcode, buffer.outputs.size(), &buffer.outputs.front(),
+           buffer.instruction_args.size(), &buffer.instruction_args.front());
+
+  call_instr->MarkAsCall();
+  if (deoptimization != NULL) {
+    DCHECK(continuation != NULL);
+    call_instr->MarkAsControl();
+  }
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/compiler/x64/linkage-x64.cc b/src/compiler/x64/linkage-x64.cc
new file mode 100644
index 0000000..8175bc6
--- /dev/null
+++ b/src/compiler/x64/linkage-x64.cc
@@ -0,0 +1,80 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/assembler.h"
+#include "src/code-stubs.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/linkage-impl.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#ifdef _WIN64
+const bool kWin64 = true;
+#else
+const bool kWin64 = false;
+#endif
+
+struct X64LinkageHelperTraits {
+  static Register ReturnValueReg() { return rax; }
+  static Register ReturnValue2Reg() { return rdx; }
+  static Register JSCallFunctionReg() { return rdi; }
+  static Register ContextReg() { return rsi; }
+  static Register RuntimeCallFunctionReg() { return rbx; }
+  static Register RuntimeCallArgCountReg() { return rax; }
+  static RegList CCalleeSaveRegisters() {
+    if (kWin64) {
+      return rbx.bit() | rdi.bit() | rsi.bit() | r12.bit() | r13.bit() |
+             r14.bit() | r15.bit();
+    } else {
+      return rbx.bit() | r12.bit() | r13.bit() | r14.bit() | r15.bit();
+    }
+  }
+  static Register CRegisterParameter(int i) {
+    if (kWin64) {
+      static Register register_parameters[] = {rcx, rdx, r8, r9};
+      return register_parameters[i];
+    } else {
+      static Register register_parameters[] = {rdi, rsi, rdx, rcx, r8, r9};
+      return register_parameters[i];
+    }
+  }
+  static int CRegisterParametersLength() { return kWin64 ? 4 : 6; }
+};
+
+typedef LinkageHelper<X64LinkageHelperTraits> LH;
+
+CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone) {
+  return LH::GetJSCallDescriptor(zone, parameter_count);
+}
+
+
+CallDescriptor* Linkage::GetRuntimeCallDescriptor(
+    Runtime::FunctionId function, int parameter_count,
+    Operator::Properties properties, Zone* zone) {
+  return LH::GetRuntimeCallDescriptor(zone, function, parameter_count,
+                                      properties);
+}
+
+
+CallDescriptor* Linkage::GetStubCallDescriptor(
+    CallInterfaceDescriptor descriptor, int stack_parameter_count,
+    CallDescriptor::Flags flags, Zone* zone) {
+  return LH::GetStubCallDescriptor(zone, descriptor, stack_parameter_count,
+                                   flags);
+}
+
+
+CallDescriptor* Linkage::GetSimplifiedCDescriptor(Zone* zone,
+                                                  MachineSignature* sig) {
+  return LH::GetSimplifiedCDescriptor(zone, sig);
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/src/contexts.cc b/src/contexts.cc
index cb5e852..30c474d 100644
--- a/src/contexts.cc
+++ b/src/contexts.cc
@@ -15,7 +15,7 @@
   Context* current = this;
   while (!current->IsFunctionContext() && !current->IsNativeContext()) {
     current = current->previous();
-    ASSERT(current->closure() == closure());
+    DCHECK(current->closure() == closure());
   }
   return current;
 }
@@ -26,7 +26,7 @@
   if (object->IsJSGlobalObject()) {
     return JSGlobalObject::cast(object)->builtins();
   } else {
-    ASSERT(object->IsJSBuiltinsObject());
+    DCHECK(object->IsJSBuiltinsObject());
     return JSBuiltinsObject::cast(object);
   }
 }
@@ -51,7 +51,7 @@
 
   // During bootstrapping, the global object might not be set and we
   // have to search the context chain to find the native context.
-  ASSERT(this->GetIsolate()->bootstrapper()->IsActive());
+  DCHECK(this->GetIsolate()->bootstrapper()->IsActive());
   Context* current = this;
   while (!current->IsNativeContext()) {
     JSFunction* closure = JSFunction::cast(current->closure());
@@ -71,6 +71,38 @@
 }
 
 
+/**
+ * Lookups a property in an object environment, taking the unscopables into
+ * account. This is used For HasBinding spec algorithms for ObjectEnvironment.
+ */
+static Maybe<PropertyAttributes> UnscopableLookup(LookupIterator* it) {
+  Isolate* isolate = it->isolate();
+
+  Maybe<PropertyAttributes> attrs = JSReceiver::GetPropertyAttributes(it);
+  DCHECK(attrs.has_value || isolate->has_pending_exception());
+  if (!attrs.has_value || attrs.value == ABSENT) return attrs;
+
+  Handle<Symbol> unscopables_symbol(
+      isolate->native_context()->unscopables_symbol(), isolate);
+  Handle<Object> receiver = it->GetReceiver();
+  Handle<Object> unscopables;
+  MaybeHandle<Object> maybe_unscopables =
+      Object::GetProperty(receiver, unscopables_symbol);
+  if (!maybe_unscopables.ToHandle(&unscopables)) {
+    return Maybe<PropertyAttributes>();
+  }
+  if (!unscopables->IsSpecObject()) return attrs;
+  Maybe<bool> blacklist = JSReceiver::HasProperty(
+      Handle<JSReceiver>::cast(unscopables), it->name());
+  if (!blacklist.has_value) {
+    DCHECK(isolate->has_pending_exception());
+    return Maybe<PropertyAttributes>();
+  }
+  if (blacklist.value) return maybe(ABSENT);
+  return attrs;
+}
+
+
 Handle<Object> Context::Lookup(Handle<String> name,
                                ContextLookupFlags flags,
                                int* index,
@@ -106,15 +138,22 @@
       // Context extension objects needs to behave as if they have no
       // prototype.  So even if we want to follow prototype chains, we need
       // to only do a local lookup for context extension objects.
+      Maybe<PropertyAttributes> maybe;
       if ((flags & FOLLOW_PROTOTYPE_CHAIN) == 0 ||
           object->IsJSContextExtensionObject()) {
-        *attributes = JSReceiver::GetOwnPropertyAttributes(object, name);
+        maybe = JSReceiver::GetOwnPropertyAttributes(object, name);
+      } else if (context->IsWithContext()) {
+        LookupIterator it(object, name);
+        maybe = UnscopableLookup(&it);
       } else {
-        *attributes = JSReceiver::GetPropertyAttributes(object, name);
+        maybe = JSReceiver::GetPropertyAttributes(object, name);
       }
-      if (isolate->has_pending_exception()) return Handle<Object>();
 
-      if (*attributes != ABSENT) {
+      if (!maybe.has_value) return Handle<Object>();
+      DCHECK(!isolate->has_pending_exception());
+      *attributes = maybe.value;
+
+      if (maybe.value != ABSENT) {
         if (FLAG_trace_contexts) {
           PrintF("=> found property in context object %p\n",
                  reinterpret_cast<void*>(*object));
@@ -137,9 +176,12 @@
       }
       VariableMode mode;
       InitializationFlag init_flag;
-      int slot_index =
-          ScopeInfo::ContextSlotIndex(scope_info, name, &mode, &init_flag);
-      ASSERT(slot_index < 0 || slot_index >= MIN_CONTEXT_SLOTS);
+      // TODO(sigurds) Figure out whether maybe_assigned_flag should
+      // be used to compute binding_flags.
+      MaybeAssignedFlag maybe_assigned_flag;
+      int slot_index = ScopeInfo::ContextSlotIndex(
+          scope_info, name, &mode, &init_flag, &maybe_assigned_flag);
+      DCHECK(slot_index < 0 || slot_index >= MIN_CONTEXT_SLOTS);
       if (slot_index >= 0) {
         if (FLAG_trace_contexts) {
           PrintF("=> found local in context slot %d (mode = %d)\n",
@@ -200,7 +242,7 @@
           }
           *index = function_index;
           *attributes = READ_ONLY;
-          ASSERT(mode == CONST_LEGACY || mode == CONST);
+          DCHECK(mode == CONST_LEGACY || mode == CONST);
           *binding_flags = (mode == CONST_LEGACY)
               ? IMMUTABLE_IS_INITIALIZED : IMMUTABLE_IS_INITIALIZED_HARMONY;
           return context;
@@ -236,8 +278,8 @@
 
 
 void Context::AddOptimizedFunction(JSFunction* function) {
-  ASSERT(IsNativeContext());
-#ifdef ENABLE_SLOW_ASSERTS
+  DCHECK(IsNativeContext());
+#ifdef ENABLE_SLOW_DCHECKS
   if (FLAG_enable_slow_asserts) {
     Object* element = get(OPTIMIZED_FUNCTIONS_LIST);
     while (!element->IsUndefined()) {
@@ -266,7 +308,7 @@
     flusher->EvictCandidate(function);
   }
 
-  ASSERT(function->next_function_link()->IsUndefined());
+  DCHECK(function->next_function_link()->IsUndefined());
 
   function->set_next_function_link(get(OPTIMIZED_FUNCTIONS_LIST));
   set(OPTIMIZED_FUNCTIONS_LIST, function);
@@ -274,12 +316,12 @@
 
 
 void Context::RemoveOptimizedFunction(JSFunction* function) {
-  ASSERT(IsNativeContext());
+  DCHECK(IsNativeContext());
   Object* element = get(OPTIMIZED_FUNCTIONS_LIST);
   JSFunction* prev = NULL;
   while (!element->IsUndefined()) {
     JSFunction* element_function = JSFunction::cast(element);
-    ASSERT(element_function->next_function_link()->IsUndefined() ||
+    DCHECK(element_function->next_function_link()->IsUndefined() ||
            element_function->next_function_link()->IsJSFunction());
     if (element_function == function) {
       if (prev == NULL) {
@@ -298,46 +340,46 @@
 
 
 void Context::SetOptimizedFunctionsListHead(Object* head) {
-  ASSERT(IsNativeContext());
+  DCHECK(IsNativeContext());
   set(OPTIMIZED_FUNCTIONS_LIST, head);
 }
 
 
 Object* Context::OptimizedFunctionsListHead() {
-  ASSERT(IsNativeContext());
+  DCHECK(IsNativeContext());
   return get(OPTIMIZED_FUNCTIONS_LIST);
 }
 
 
 void Context::AddOptimizedCode(Code* code) {
-  ASSERT(IsNativeContext());
-  ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
-  ASSERT(code->next_code_link()->IsUndefined());
+  DCHECK(IsNativeContext());
+  DCHECK(code->kind() == Code::OPTIMIZED_FUNCTION);
+  DCHECK(code->next_code_link()->IsUndefined());
   code->set_next_code_link(get(OPTIMIZED_CODE_LIST));
   set(OPTIMIZED_CODE_LIST, code);
 }
 
 
 void Context::SetOptimizedCodeListHead(Object* head) {
-  ASSERT(IsNativeContext());
+  DCHECK(IsNativeContext());
   set(OPTIMIZED_CODE_LIST, head);
 }
 
 
 Object* Context::OptimizedCodeListHead() {
-  ASSERT(IsNativeContext());
+  DCHECK(IsNativeContext());
   return get(OPTIMIZED_CODE_LIST);
 }
 
 
 void Context::SetDeoptimizedCodeListHead(Object* head) {
-  ASSERT(IsNativeContext());
+  DCHECK(IsNativeContext());
   set(DEOPTIMIZED_CODE_LIST, head);
 }
 
 
 Object* Context::DeoptimizedCodeListHead() {
-  ASSERT(IsNativeContext());
+  DCHECK(IsNativeContext());
   return get(DEOPTIMIZED_CODE_LIST);
 }
 
@@ -346,7 +388,7 @@
   Isolate* isolate = GetIsolate();
   Handle<Object> result(error_message_for_code_gen_from_strings(), isolate);
   if (!result->IsUndefined()) return result;
-  return isolate->factory()->NewStringFromStaticAscii(
+  return isolate->factory()->NewStringFromStaticChars(
       "Code generation from strings disallowed for this context");
 }
 
diff --git a/src/contexts.h b/src/contexts.h
index 1ee5a6f..ac25e48 100644
--- a/src/contexts.h
+++ b/src/contexts.h
@@ -5,7 +5,7 @@
 #ifndef V8_CONTEXTS_H_
 #define V8_CONTEXTS_H_
 
-#include "src/heap.h"
+#include "src/heap/heap.h"
 #include "src/objects.h"
 
 namespace v8 {
@@ -73,123 +73,136 @@
 // must always be allocated via Heap::AllocateContext() or
 // Factory::NewContext.
 
-#define NATIVE_CONTEXT_FIELDS(V) \
-  V(GLOBAL_PROXY_INDEX, JSObject, global_proxy_object) \
-  V(SECURITY_TOKEN_INDEX, Object, security_token) \
-  V(BOOLEAN_FUNCTION_INDEX, JSFunction, boolean_function) \
-  V(NUMBER_FUNCTION_INDEX, JSFunction, number_function) \
-  V(STRING_FUNCTION_INDEX, JSFunction, string_function) \
-  V(STRING_FUNCTION_PROTOTYPE_MAP_INDEX, Map, string_function_prototype_map) \
-  V(SYMBOL_FUNCTION_INDEX, JSFunction, symbol_function) \
-  V(OBJECT_FUNCTION_INDEX, JSFunction, object_function) \
-  V(INTERNAL_ARRAY_FUNCTION_INDEX, JSFunction, internal_array_function) \
-  V(ARRAY_FUNCTION_INDEX, JSFunction, array_function) \
-  V(JS_ARRAY_MAPS_INDEX, Object, js_array_maps) \
-  V(DATE_FUNCTION_INDEX, JSFunction, date_function) \
-  V(JSON_OBJECT_INDEX, JSObject, json_object) \
-  V(REGEXP_FUNCTION_INDEX, JSFunction, regexp_function) \
-  V(INITIAL_OBJECT_PROTOTYPE_INDEX, JSObject, initial_object_prototype) \
-  V(INITIAL_ARRAY_PROTOTYPE_INDEX, JSObject, initial_array_prototype) \
-  V(CREATE_DATE_FUN_INDEX, JSFunction,  create_date_fun) \
-  V(TO_NUMBER_FUN_INDEX, JSFunction, to_number_fun) \
-  V(TO_STRING_FUN_INDEX, JSFunction, to_string_fun) \
-  V(TO_DETAIL_STRING_FUN_INDEX, JSFunction, to_detail_string_fun) \
-  V(TO_OBJECT_FUN_INDEX, JSFunction, to_object_fun) \
-  V(TO_INTEGER_FUN_INDEX, JSFunction, to_integer_fun) \
-  V(TO_UINT32_FUN_INDEX, JSFunction, to_uint32_fun) \
-  V(TO_INT32_FUN_INDEX, JSFunction, to_int32_fun) \
-  V(GLOBAL_EVAL_FUN_INDEX, JSFunction, global_eval_fun) \
-  V(INSTANTIATE_FUN_INDEX, JSFunction, instantiate_fun) \
-  V(CONFIGURE_INSTANCE_FUN_INDEX, JSFunction, configure_instance_fun) \
-  V(ARRAY_BUFFER_FUN_INDEX, JSFunction, array_buffer_fun) \
-  V(UINT8_ARRAY_FUN_INDEX, JSFunction, uint8_array_fun) \
-  V(INT8_ARRAY_FUN_INDEX, JSFunction, int8_array_fun) \
-  V(UINT16_ARRAY_FUN_INDEX, JSFunction, uint16_array_fun) \
-  V(INT16_ARRAY_FUN_INDEX, JSFunction, int16_array_fun) \
-  V(UINT32_ARRAY_FUN_INDEX, JSFunction, uint32_array_fun) \
-  V(INT32_ARRAY_FUN_INDEX, JSFunction, int32_array_fun) \
-  V(FLOAT32_ARRAY_FUN_INDEX, JSFunction, float32_array_fun) \
-  V(FLOAT64_ARRAY_FUN_INDEX, JSFunction, float64_array_fun) \
-  V(UINT8_CLAMPED_ARRAY_FUN_INDEX, JSFunction, uint8_clamped_array_fun) \
-  V(INT8_ARRAY_EXTERNAL_MAP_INDEX, Map, int8_array_external_map) \
-  V(UINT8_ARRAY_EXTERNAL_MAP_INDEX, Map, uint8_array_external_map) \
-  V(INT16_ARRAY_EXTERNAL_MAP_INDEX, Map, int16_array_external_map) \
-  V(UINT16_ARRAY_EXTERNAL_MAP_INDEX, Map, uint16_array_external_map) \
-  V(INT32_ARRAY_EXTERNAL_MAP_INDEX, Map, int32_array_external_map) \
-  V(UINT32_ARRAY_EXTERNAL_MAP_INDEX, Map, uint32_array_external_map) \
-  V(FLOAT32_ARRAY_EXTERNAL_MAP_INDEX, Map, float32_array_external_map) \
-  V(FLOAT64_ARRAY_EXTERNAL_MAP_INDEX, Map, float64_array_external_map) \
-  V(UINT8_CLAMPED_ARRAY_EXTERNAL_MAP_INDEX, Map, \
-      uint8_clamped_array_external_map) \
-  V(DATA_VIEW_FUN_INDEX, JSFunction, data_view_fun) \
-  V(SLOPPY_FUNCTION_MAP_INDEX, Map, sloppy_function_map) \
-  V(SLOPPY_FUNCTION_WITH_READONLY_PROTOTYPE_MAP_INDEX, Map, \
-    sloppy_function_with_readonly_prototype_map) \
-  V(STRICT_FUNCTION_MAP_INDEX, Map, strict_function_map) \
-  V(SLOPPY_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map, \
-    sloppy_function_without_prototype_map) \
-  V(STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map, \
-    strict_function_without_prototype_map) \
-  V(BOUND_FUNCTION_MAP_INDEX, Map, bound_function_map) \
-  V(REGEXP_RESULT_MAP_INDEX, Map, regexp_result_map)\
-  V(SLOPPY_ARGUMENTS_BOILERPLATE_INDEX, JSObject, \
-    sloppy_arguments_boilerplate) \
-  V(ALIASED_ARGUMENTS_BOILERPLATE_INDEX, JSObject, \
-    aliased_arguments_boilerplate) \
-  V(STRICT_ARGUMENTS_BOILERPLATE_INDEX, JSObject, \
-    strict_arguments_boilerplate) \
-  V(MESSAGE_LISTENERS_INDEX, JSObject, message_listeners) \
-  V(MAKE_MESSAGE_FUN_INDEX, JSFunction, make_message_fun) \
-  V(GET_STACK_TRACE_LINE_INDEX, JSFunction, get_stack_trace_line_fun) \
-  V(CONFIGURE_GLOBAL_INDEX, JSFunction, configure_global_fun) \
-  V(FUNCTION_CACHE_INDEX, JSObject, function_cache) \
-  V(JSFUNCTION_RESULT_CACHES_INDEX, FixedArray, jsfunction_result_caches) \
-  V(NORMALIZED_MAP_CACHE_INDEX, NormalizedMapCache, normalized_map_cache) \
-  V(RUNTIME_CONTEXT_INDEX, Context, runtime_context) \
-  V(CALL_AS_FUNCTION_DELEGATE_INDEX, JSFunction, call_as_function_delegate) \
-  V(CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, JSFunction, \
-    call_as_constructor_delegate) \
-  V(SCRIPT_FUNCTION_INDEX, JSFunction, script_function) \
-  V(OPAQUE_REFERENCE_FUNCTION_INDEX, JSFunction, opaque_reference_function) \
-  V(CONTEXT_EXTENSION_FUNCTION_INDEX, JSFunction, context_extension_function) \
-  V(MAP_CACHE_INDEX, Object, map_cache) \
-  V(EMBEDDER_DATA_INDEX, FixedArray, embedder_data) \
-  V(ALLOW_CODE_GEN_FROM_STRINGS_INDEX, Object, allow_code_gen_from_strings) \
-  V(ERROR_MESSAGE_FOR_CODE_GEN_FROM_STRINGS_INDEX, Object, \
-    error_message_for_code_gen_from_strings) \
-  V(IS_PROMISE_INDEX, JSFunction, is_promise) \
-  V(PROMISE_CREATE_INDEX, JSFunction, promise_create) \
-  V(PROMISE_RESOLVE_INDEX, JSFunction, promise_resolve) \
-  V(PROMISE_REJECT_INDEX, JSFunction, promise_reject) \
-  V(PROMISE_CHAIN_INDEX, JSFunction, promise_chain) \
-  V(PROMISE_CATCH_INDEX, JSFunction, promise_catch) \
-  V(PROMISE_THEN_INDEX, JSFunction, promise_then) \
-  V(TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX, JSFunction, \
-    to_complete_property_descriptor) \
-  V(DERIVED_HAS_TRAP_INDEX, JSFunction, derived_has_trap) \
-  V(DERIVED_GET_TRAP_INDEX, JSFunction, derived_get_trap) \
-  V(DERIVED_SET_TRAP_INDEX, JSFunction, derived_set_trap) \
-  V(PROXY_ENUMERATE_INDEX, JSFunction, proxy_enumerate) \
-  V(OBSERVERS_NOTIFY_CHANGE_INDEX, JSFunction, observers_notify_change) \
-  V(OBSERVERS_ENQUEUE_SPLICE_INDEX, JSFunction, observers_enqueue_splice) \
-  V(OBSERVERS_BEGIN_SPLICE_INDEX, JSFunction, \
-    observers_begin_perform_splice) \
-  V(OBSERVERS_END_SPLICE_INDEX, JSFunction, \
-    observers_end_perform_splice) \
-  V(NATIVE_OBJECT_OBSERVE_INDEX, JSFunction, \
-    native_object_observe) \
-  V(NATIVE_OBJECT_GET_NOTIFIER_INDEX, JSFunction, \
-    native_object_get_notifier) \
-  V(NATIVE_OBJECT_NOTIFIER_PERFORM_CHANGE, JSFunction, \
-    native_object_notifier_perform_change) \
-  V(SLOPPY_GENERATOR_FUNCTION_MAP_INDEX, Map, sloppy_generator_function_map) \
-  V(STRICT_GENERATOR_FUNCTION_MAP_INDEX, Map, strict_generator_function_map) \
-  V(GENERATOR_OBJECT_PROTOTYPE_MAP_INDEX, Map, \
-    generator_object_prototype_map) \
-  V(ITERATOR_RESULT_MAP_INDEX, Map, iterator_result_map) \
-  V(MAP_ITERATOR_MAP_INDEX, Map, map_iterator_map) \
-  V(SET_ITERATOR_MAP_INDEX, Map, set_iterator_map) \
-  V(ITERATOR_SYMBOL_INDEX, Symbol, iterator_symbol)
+#define NATIVE_CONTEXT_FIELDS(V)                                               \
+  V(GLOBAL_PROXY_INDEX, JSObject, global_proxy_object)                         \
+  V(SECURITY_TOKEN_INDEX, Object, security_token)                              \
+  V(BOOLEAN_FUNCTION_INDEX, JSFunction, boolean_function)                      \
+  V(NUMBER_FUNCTION_INDEX, JSFunction, number_function)                        \
+  V(STRING_FUNCTION_INDEX, JSFunction, string_function)                        \
+  V(STRING_FUNCTION_PROTOTYPE_MAP_INDEX, Map, string_function_prototype_map)   \
+  V(SYMBOL_FUNCTION_INDEX, JSFunction, symbol_function)                        \
+  V(OBJECT_FUNCTION_INDEX, JSFunction, object_function)                        \
+  V(INTERNAL_ARRAY_FUNCTION_INDEX, JSFunction, internal_array_function)        \
+  V(ARRAY_FUNCTION_INDEX, JSFunction, array_function)                          \
+  V(JS_ARRAY_MAPS_INDEX, Object, js_array_maps)                                \
+  V(DATE_FUNCTION_INDEX, JSFunction, date_function)                            \
+  V(JSON_OBJECT_INDEX, JSObject, json_object)                                  \
+  V(REGEXP_FUNCTION_INDEX, JSFunction, regexp_function)                        \
+  V(INITIAL_OBJECT_PROTOTYPE_INDEX, JSObject, initial_object_prototype)        \
+  V(INITIAL_ARRAY_PROTOTYPE_INDEX, JSObject, initial_array_prototype)          \
+  V(CREATE_DATE_FUN_INDEX, JSFunction, create_date_fun)                        \
+  V(TO_NUMBER_FUN_INDEX, JSFunction, to_number_fun)                            \
+  V(TO_STRING_FUN_INDEX, JSFunction, to_string_fun)                            \
+  V(TO_DETAIL_STRING_FUN_INDEX, JSFunction, to_detail_string_fun)              \
+  V(TO_OBJECT_FUN_INDEX, JSFunction, to_object_fun)                            \
+  V(TO_INTEGER_FUN_INDEX, JSFunction, to_integer_fun)                          \
+  V(TO_UINT32_FUN_INDEX, JSFunction, to_uint32_fun)                            \
+  V(TO_INT32_FUN_INDEX, JSFunction, to_int32_fun)                              \
+  V(GLOBAL_EVAL_FUN_INDEX, JSFunction, global_eval_fun)                        \
+  V(INSTANTIATE_FUN_INDEX, JSFunction, instantiate_fun)                        \
+  V(CONFIGURE_INSTANCE_FUN_INDEX, JSFunction, configure_instance_fun)          \
+  V(MATH_ABS_FUN_INDEX, JSFunction, math_abs_fun)                              \
+  V(MATH_ACOS_FUN_INDEX, JSFunction, math_acos_fun)                            \
+  V(MATH_ASIN_FUN_INDEX, JSFunction, math_asin_fun)                            \
+  V(MATH_ATAN_FUN_INDEX, JSFunction, math_atan_fun)                            \
+  V(MATH_ATAN2_FUN_INDEX, JSFunction, math_atan2_fun)                          \
+  V(MATH_CEIL_FUN_INDEX, JSFunction, math_ceil_fun)                            \
+  V(MATH_COS_FUN_INDEX, JSFunction, math_cos_fun)                              \
+  V(MATH_EXP_FUN_INDEX, JSFunction, math_exp_fun)                              \
+  V(MATH_FLOOR_FUN_INDEX, JSFunction, math_floor_fun)                          \
+  V(MATH_IMUL_FUN_INDEX, JSFunction, math_imul_fun)                            \
+  V(MATH_LOG_FUN_INDEX, JSFunction, math_log_fun)                              \
+  V(MATH_MAX_FUN_INDEX, JSFunction, math_max_fun)                              \
+  V(MATH_MIN_FUN_INDEX, JSFunction, math_min_fun)                              \
+  V(MATH_POW_FUN_INDEX, JSFunction, math_pow_fun)                              \
+  V(MATH_RANDOM_FUN_INDEX, JSFunction, math_random_fun)                        \
+  V(MATH_ROUND_FUN_INDEX, JSFunction, math_round_fun)                          \
+  V(MATH_SIN_FUN_INDEX, JSFunction, math_sin_fun)                              \
+  V(MATH_SQRT_FUN_INDEX, JSFunction, math_sqrt_fun)                            \
+  V(MATH_TAN_FUN_INDEX, JSFunction, math_tan_fun)                              \
+  V(ARRAY_BUFFER_FUN_INDEX, JSFunction, array_buffer_fun)                      \
+  V(UINT8_ARRAY_FUN_INDEX, JSFunction, uint8_array_fun)                        \
+  V(INT8_ARRAY_FUN_INDEX, JSFunction, int8_array_fun)                          \
+  V(UINT16_ARRAY_FUN_INDEX, JSFunction, uint16_array_fun)                      \
+  V(INT16_ARRAY_FUN_INDEX, JSFunction, int16_array_fun)                        \
+  V(UINT32_ARRAY_FUN_INDEX, JSFunction, uint32_array_fun)                      \
+  V(INT32_ARRAY_FUN_INDEX, JSFunction, int32_array_fun)                        \
+  V(FLOAT32_ARRAY_FUN_INDEX, JSFunction, float32_array_fun)                    \
+  V(FLOAT64_ARRAY_FUN_INDEX, JSFunction, float64_array_fun)                    \
+  V(UINT8_CLAMPED_ARRAY_FUN_INDEX, JSFunction, uint8_clamped_array_fun)        \
+  V(INT8_ARRAY_EXTERNAL_MAP_INDEX, Map, int8_array_external_map)               \
+  V(UINT8_ARRAY_EXTERNAL_MAP_INDEX, Map, uint8_array_external_map)             \
+  V(INT16_ARRAY_EXTERNAL_MAP_INDEX, Map, int16_array_external_map)             \
+  V(UINT16_ARRAY_EXTERNAL_MAP_INDEX, Map, uint16_array_external_map)           \
+  V(INT32_ARRAY_EXTERNAL_MAP_INDEX, Map, int32_array_external_map)             \
+  V(UINT32_ARRAY_EXTERNAL_MAP_INDEX, Map, uint32_array_external_map)           \
+  V(FLOAT32_ARRAY_EXTERNAL_MAP_INDEX, Map, float32_array_external_map)         \
+  V(FLOAT64_ARRAY_EXTERNAL_MAP_INDEX, Map, float64_array_external_map)         \
+  V(UINT8_CLAMPED_ARRAY_EXTERNAL_MAP_INDEX, Map,                               \
+    uint8_clamped_array_external_map)                                          \
+  V(DATA_VIEW_FUN_INDEX, JSFunction, data_view_fun)                            \
+  V(SLOPPY_FUNCTION_MAP_INDEX, Map, sloppy_function_map)                       \
+  V(SLOPPY_FUNCTION_WITH_READONLY_PROTOTYPE_MAP_INDEX, Map,                    \
+    sloppy_function_with_readonly_prototype_map)                               \
+  V(STRICT_FUNCTION_MAP_INDEX, Map, strict_function_map)                       \
+  V(SLOPPY_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map,                          \
+    sloppy_function_without_prototype_map)                                     \
+  V(STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map,                          \
+    strict_function_without_prototype_map)                                     \
+  V(BOUND_FUNCTION_MAP_INDEX, Map, bound_function_map)                         \
+  V(REGEXP_RESULT_MAP_INDEX, Map, regexp_result_map)                           \
+  V(SLOPPY_ARGUMENTS_MAP_INDEX, Map, sloppy_arguments_map)                     \
+  V(ALIASED_ARGUMENTS_MAP_INDEX, Map, aliased_arguments_map)                   \
+  V(STRICT_ARGUMENTS_MAP_INDEX, Map, strict_arguments_map)                     \
+  V(MESSAGE_LISTENERS_INDEX, JSObject, message_listeners)                      \
+  V(MAKE_MESSAGE_FUN_INDEX, JSFunction, make_message_fun)                      \
+  V(GET_STACK_TRACE_LINE_INDEX, JSFunction, get_stack_trace_line_fun)          \
+  V(CONFIGURE_GLOBAL_INDEX, JSFunction, configure_global_fun)                  \
+  V(FUNCTION_CACHE_INDEX, JSObject, function_cache)                            \
+  V(JSFUNCTION_RESULT_CACHES_INDEX, FixedArray, jsfunction_result_caches)      \
+  V(NORMALIZED_MAP_CACHE_INDEX, Object, normalized_map_cache)                  \
+  V(RUNTIME_CONTEXT_INDEX, Context, runtime_context)                           \
+  V(CALL_AS_FUNCTION_DELEGATE_INDEX, JSFunction, call_as_function_delegate)    \
+  V(CALL_AS_CONSTRUCTOR_DELEGATE_INDEX, JSFunction,                            \
+    call_as_constructor_delegate)                                              \
+  V(SCRIPT_FUNCTION_INDEX, JSFunction, script_function)                        \
+  V(OPAQUE_REFERENCE_FUNCTION_INDEX, JSFunction, opaque_reference_function)    \
+  V(CONTEXT_EXTENSION_FUNCTION_INDEX, JSFunction, context_extension_function)  \
+  V(MAP_CACHE_INDEX, Object, map_cache)                                        \
+  V(EMBEDDER_DATA_INDEX, FixedArray, embedder_data)                            \
+  V(ALLOW_CODE_GEN_FROM_STRINGS_INDEX, Object, allow_code_gen_from_strings)    \
+  V(ERROR_MESSAGE_FOR_CODE_GEN_FROM_STRINGS_INDEX, Object,                     \
+    error_message_for_code_gen_from_strings)                                   \
+  V(IS_PROMISE_INDEX, JSFunction, is_promise)                                  \
+  V(PROMISE_CREATE_INDEX, JSFunction, promise_create)                          \
+  V(PROMISE_RESOLVE_INDEX, JSFunction, promise_resolve)                        \
+  V(PROMISE_REJECT_INDEX, JSFunction, promise_reject)                          \
+  V(PROMISE_CHAIN_INDEX, JSFunction, promise_chain)                            \
+  V(PROMISE_CATCH_INDEX, JSFunction, promise_catch)                            \
+  V(PROMISE_THEN_INDEX, JSFunction, promise_then)                              \
+  V(TO_COMPLETE_PROPERTY_DESCRIPTOR_INDEX, JSFunction,                         \
+    to_complete_property_descriptor)                                           \
+  V(DERIVED_HAS_TRAP_INDEX, JSFunction, derived_has_trap)                      \
+  V(DERIVED_GET_TRAP_INDEX, JSFunction, derived_get_trap)                      \
+  V(DERIVED_SET_TRAP_INDEX, JSFunction, derived_set_trap)                      \
+  V(PROXY_ENUMERATE_INDEX, JSFunction, proxy_enumerate)                        \
+  V(OBSERVERS_NOTIFY_CHANGE_INDEX, JSFunction, observers_notify_change)        \
+  V(OBSERVERS_ENQUEUE_SPLICE_INDEX, JSFunction, observers_enqueue_splice)      \
+  V(OBSERVERS_BEGIN_SPLICE_INDEX, JSFunction, observers_begin_perform_splice)  \
+  V(OBSERVERS_END_SPLICE_INDEX, JSFunction, observers_end_perform_splice)      \
+  V(NATIVE_OBJECT_OBSERVE_INDEX, JSFunction, native_object_observe)            \
+  V(NATIVE_OBJECT_GET_NOTIFIER_INDEX, JSFunction, native_object_get_notifier)  \
+  V(NATIVE_OBJECT_NOTIFIER_PERFORM_CHANGE, JSFunction,                         \
+    native_object_notifier_perform_change)                                     \
+  V(SLOPPY_GENERATOR_FUNCTION_MAP_INDEX, Map, sloppy_generator_function_map)   \
+  V(STRICT_GENERATOR_FUNCTION_MAP_INDEX, Map, strict_generator_function_map)   \
+  V(GENERATOR_OBJECT_PROTOTYPE_MAP_INDEX, Map, generator_object_prototype_map) \
+  V(ITERATOR_RESULT_MAP_INDEX, Map, iterator_result_map)                       \
+  V(MAP_ITERATOR_MAP_INDEX, Map, map_iterator_map)                             \
+  V(SET_ITERATOR_MAP_INDEX, Map, set_iterator_map)                             \
+  V(ITERATOR_SYMBOL_INDEX, Symbol, iterator_symbol)                            \
+  V(UNSCOPABLES_SYMBOL_INDEX, Symbol, unscopables_symbol)                      \
+  V(ARRAY_VALUES_ITERATOR_INDEX, JSFunction, array_values_iterator)
 
 // JSFunctions are pairs (context, function code), sometimes also called
 // closures. A Context object is used to represent function contexts and
@@ -240,7 +253,7 @@
  public:
   // Conversions.
   static Context* cast(Object* context) {
-    ASSERT(context->IsContext());
+    DCHECK(context->IsContext());
     return reinterpret_cast<Context*>(context);
   }
 
@@ -263,9 +276,9 @@
     // These slots are only in native contexts.
     GLOBAL_PROXY_INDEX = MIN_CONTEXT_SLOTS,
     SECURITY_TOKEN_INDEX,
-    SLOPPY_ARGUMENTS_BOILERPLATE_INDEX,
-    ALIASED_ARGUMENTS_BOILERPLATE_INDEX,
-    STRICT_ARGUMENTS_BOILERPLATE_INDEX,
+    SLOPPY_ARGUMENTS_MAP_INDEX,
+    ALIASED_ARGUMENTS_MAP_INDEX,
+    STRICT_ARGUMENTS_MAP_INDEX,
     REGEXP_RESULT_MAP_INDEX,
     SLOPPY_FUNCTION_MAP_INDEX,
     SLOPPY_FUNCTION_WITH_READONLY_PROTOTYPE_MAP_INDEX,
@@ -299,6 +312,25 @@
     GLOBAL_EVAL_FUN_INDEX,
     INSTANTIATE_FUN_INDEX,
     CONFIGURE_INSTANCE_FUN_INDEX,
+    MATH_ABS_FUN_INDEX,
+    MATH_ACOS_FUN_INDEX,
+    MATH_ASIN_FUN_INDEX,
+    MATH_ATAN_FUN_INDEX,
+    MATH_ATAN2_FUN_INDEX,
+    MATH_CEIL_FUN_INDEX,
+    MATH_COS_FUN_INDEX,
+    MATH_EXP_FUN_INDEX,
+    MATH_FLOOR_FUN_INDEX,
+    MATH_IMUL_FUN_INDEX,
+    MATH_LOG_FUN_INDEX,
+    MATH_MAX_FUN_INDEX,
+    MATH_MIN_FUN_INDEX,
+    MATH_POW_FUN_INDEX,
+    MATH_RANDOM_FUN_INDEX,
+    MATH_ROUND_FUN_INDEX,
+    MATH_SIN_FUN_INDEX,
+    MATH_SQRT_FUN_INDEX,
+    MATH_TAN_FUN_INDEX,
     ARRAY_BUFFER_FUN_INDEX,
     UINT8_ARRAY_FUN_INDEX,
     INT8_ARRAY_FUN_INDEX,
@@ -364,6 +396,8 @@
     MAP_ITERATOR_MAP_INDEX,
     SET_ITERATOR_MAP_INDEX,
     ITERATOR_SYMBOL_INDEX,
+    UNSCOPABLES_SYMBOL_INDEX,
+    ARRAY_VALUES_ITERATOR_INDEX,
 
     // Properties from here are treated as weak references by the full GC.
     // Scavenge treats them as strong references.
@@ -375,7 +409,6 @@
 
     // Total number of slots.
     NATIVE_CONTEXT_SLOTS,
-
     FIRST_WEAK_SLOT = OPTIMIZED_FUNCTIONS_LIST
   };
 
@@ -385,7 +418,7 @@
 
   Context* previous() {
     Object* result = unchecked_previous();
-    ASSERT(IsBootstrappingOrValidParentContext(result, this));
+    DCHECK(IsBootstrappingOrValidParentContext(result, this));
     return reinterpret_cast<Context*>(result);
   }
   void set_previous(Context* context) { set(PREVIOUS_INDEX, context); }
@@ -403,7 +436,7 @@
 
   GlobalObject* global_object() {
     Object* result = get(GLOBAL_OBJECT_INDEX);
-    ASSERT(IsBootstrappingOrGlobalObject(this->GetIsolate(), result));
+    DCHECK(IsBootstrappingOrGlobalObject(this->GetIsolate(), result));
     return reinterpret_cast<GlobalObject*>(result);
   }
   void set_global_object(GlobalObject* object) {
@@ -478,15 +511,15 @@
 
 #define NATIVE_CONTEXT_FIELD_ACCESSORS(index, type, name) \
   void  set_##name(type* value) {                         \
-    ASSERT(IsNativeContext());                            \
+    DCHECK(IsNativeContext());                            \
     set(index, value);                                    \
   }                                                       \
   bool is_##name(type* value) {                           \
-    ASSERT(IsNativeContext());                            \
+    DCHECK(IsNativeContext());                            \
     return type::cast(get(index)) == value;               \
   }                                                       \
   type* name() {                                          \
-    ASSERT(IsNativeContext());                            \
+    DCHECK(IsNativeContext());                            \
     return type::cast(get(index));                        \
   }
   NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_FIELD_ACCESSORS)
@@ -520,14 +553,20 @@
     return kHeaderSize + index * kPointerSize - kHeapObjectTag;
   }
 
-  static int FunctionMapIndex(StrictMode strict_mode, bool is_generator) {
-    return is_generator
-      ? (strict_mode == SLOPPY
-         ? SLOPPY_GENERATOR_FUNCTION_MAP_INDEX
-         : STRICT_GENERATOR_FUNCTION_MAP_INDEX)
-      : (strict_mode == SLOPPY
-         ? SLOPPY_FUNCTION_MAP_INDEX
-         : STRICT_FUNCTION_MAP_INDEX);
+  static int FunctionMapIndex(StrictMode strict_mode, FunctionKind kind) {
+    if (IsGeneratorFunction(kind)) {
+      return strict_mode == SLOPPY ? SLOPPY_GENERATOR_FUNCTION_MAP_INDEX
+                                   : STRICT_GENERATOR_FUNCTION_MAP_INDEX;
+    }
+
+    if (IsArrowFunction(kind) || IsConciseMethod(kind)) {
+      return strict_mode == SLOPPY
+                 ? SLOPPY_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX
+                 : STRICT_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX;
+    }
+
+    return strict_mode == SLOPPY ? SLOPPY_FUNCTION_MAP_INDEX
+                                 : STRICT_FUNCTION_MAP_INDEX;
   }
 
   static const int kSize = kHeaderSize + NATIVE_CONTEXT_SLOTS * kPointerSize;
diff --git a/src/conversions-inl.h b/src/conversions-inl.h
index 27fed95..ae87dc4 100644
--- a/src/conversions-inl.h
+++ b/src/conversions-inl.h
@@ -5,8 +5,8 @@
 #ifndef V8_CONVERSIONS_INL_H_
 #define V8_CONVERSIONS_INL_H_
 
-#include <limits.h>        // Required for INT_MAX etc.
 #include <float.h>         // Required for DBL_MAX and on Win32 for finite()
+#include <limits.h>        // Required for INT_MAX etc.
 #include <stdarg.h>
 #include <cmath>
 #include "src/globals.h"       // Required for V8_INFINITY
@@ -14,9 +14,10 @@
 // ----------------------------------------------------------------------------
 // Extra POSIX/ANSI functions for Win32/MSVC.
 
+#include "src/base/bits.h"
+#include "src/base/platform/platform.h"
 #include "src/conversions.h"
 #include "src/double.h"
-#include "src/platform.h"
 #include "src/scanner.h"
 #include "src/strtod.h"
 
@@ -24,7 +25,7 @@
 namespace internal {
 
 inline double JunkStringValue() {
-  return BitCast<double, uint64_t>(kQuietNaNMask);
+  return bit_cast<double, uint64_t>(kQuietNaNMask);
 }
 
 
@@ -66,6 +67,14 @@
 }
 
 
+inline float DoubleToFloat32(double x) {
+  // TODO(yanggou): This static_cast is implementation-defined behaviour in C++,
+  // so we may need to do the conversion manually instead to match the spec.
+  volatile float f = static_cast<float>(x);
+  return f;
+}
+
+
 inline double DoubleToInteger(double x) {
   if (std::isnan(x)) return 0;
   if (!std::isfinite(x) || x == 0) return x;
@@ -92,7 +101,7 @@
 bool SubStringEquals(Iterator* current,
                      EndMark end,
                      const char* substring) {
-  ASSERT(**current == *substring);
+  DCHECK(**current == *substring);
   for (substring++; *substring != '\0'; substring++) {
     ++*current;
     if (*current == end || **current != *substring) return false;
@@ -123,7 +132,7 @@
                                  EndMark end,
                                  bool negative,
                                  bool allow_trailing_junk) {
-  ASSERT(current != end);
+  DCHECK(current != end);
 
   // Skip leading 0s.
   while (*current == '0') {
@@ -202,8 +211,8 @@
     ++current;
   } while (current != end);
 
-  ASSERT(number < ((int64_t)1 << 53));
-  ASSERT(static_cast<int64_t>(static_cast<double>(number)) == number);
+  DCHECK(number < ((int64_t)1 << 53));
+  DCHECK(static_cast<int64_t>(static_cast<double>(number)) == number);
 
   if (exponent == 0) {
     if (negative) {
@@ -213,7 +222,7 @@
     return static_cast<double>(number);
   }
 
-  ASSERT(number != 0);
+  DCHECK(number != 0);
   return std::ldexp(static_cast<double>(negative ? -number : number), exponent);
 }
 
@@ -288,7 +297,7 @@
     return JunkStringValue();
   }
 
-  if (IsPowerOf2(radix)) {
+  if (base::bits::IsPowerOfTwo32(radix)) {
     switch (radix) {
       case 2:
         return InternalStringToIntDouble<1>(
@@ -324,7 +333,7 @@
       if (buffer_pos <= kMaxSignificantDigits) {
         // If the number has more than kMaxSignificantDigits it will be parsed
         // as infinity.
-        ASSERT(buffer_pos < kBufferSize);
+        DCHECK(buffer_pos < kBufferSize);
         buffer[buffer_pos++] = static_cast<char>(*current);
       }
       ++current;
@@ -336,7 +345,7 @@
       return JunkStringValue();
     }
 
-    SLOW_ASSERT(buffer_pos < kBufferSize);
+    SLOW_DCHECK(buffer_pos < kBufferSize);
     buffer[buffer_pos] = '\0';
     Vector<const char> buffer_vector(buffer, buffer_pos);
     return negative ? -Strtod(buffer_vector, 0) : Strtod(buffer_vector, 0);
@@ -384,7 +393,7 @@
       if (m > kMaximumMultiplier) break;
       part = part * radix + d;
       multiplier = m;
-      ASSERT(multiplier > part);
+      DCHECK(multiplier > part);
 
       ++current;
       if (current == end) {
@@ -473,7 +482,7 @@
       return JunkStringValue();
     }
 
-    ASSERT(buffer_pos == 0);
+    DCHECK(buffer_pos == 0);
     return (sign == NEGATIVE) ? -V8_INFINITY : V8_INFINITY;
   }
 
@@ -536,7 +545,7 @@
   // Copy significant digits of the integer part (if any) to the buffer.
   while (*current >= '0' && *current <= '9') {
     if (significant_digits < kMaxSignificantDigits) {
-      ASSERT(buffer_pos < kBufferSize);
+      DCHECK(buffer_pos < kBufferSize);
       buffer[buffer_pos++] = static_cast<char>(*current);
       significant_digits++;
       // Will later check if it's an octal in the buffer.
@@ -581,7 +590,7 @@
     // instead.
     while (*current >= '0' && *current <= '9') {
       if (significant_digits < kMaxSignificantDigits) {
-        ASSERT(buffer_pos < kBufferSize);
+        DCHECK(buffer_pos < kBufferSize);
         buffer[buffer_pos++] = static_cast<char>(*current);
         significant_digits++;
         exponent--;
@@ -635,7 +644,7 @@
     }
 
     const int max_exponent = INT_MAX / 2;
-    ASSERT(-max_exponent / 2 <= exponent && exponent <= max_exponent / 2);
+    DCHECK(-max_exponent / 2 <= exponent && exponent <= max_exponent / 2);
     int num = 0;
     do {
       // Check overflow.
@@ -673,7 +682,7 @@
     exponent--;
   }
 
-  SLOW_ASSERT(buffer_pos < kBufferSize);
+  SLOW_DCHECK(buffer_pos < kBufferSize);
   buffer[buffer_pos] = '\0';
 
   double converted = Strtod(Vector<const char>(buffer, buffer_pos), exponent);
diff --git a/src/conversions.cc b/src/conversions.cc
index 4efe903..8b77623 100644
--- a/src/conversions.cc
+++ b/src/conversions.cc
@@ -2,15 +2,15 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include <stdarg.h>
 #include <limits.h>
+#include <stdarg.h>
 #include <cmath>
 
 #include "src/v8.h"
 
 #include "src/assert-scope.h"
-#include "src/conversions.h"
 #include "src/conversions-inl.h"
+#include "src/conversions.h"
 #include "src/dtoa.h"
 #include "src/factory.h"
 #include "src/list-inl.h"
@@ -197,8 +197,8 @@
   const int kMaxDigitsBeforePoint = 21;
   const double kFirstNonFixed = 1e21;
   const int kMaxDigitsAfterPoint = 20;
-  ASSERT(f >= 0);
-  ASSERT(f <= kMaxDigitsAfterPoint);
+  DCHECK(f >= 0);
+  DCHECK(f <= kMaxDigitsAfterPoint);
 
   bool negative = false;
   double abs_value = value;
@@ -211,7 +211,7 @@
   // use the non-fixed conversion routine.
   if (abs_value >= kFirstNonFixed) {
     char arr[100];
-    Vector<char> buffer(arr, ARRAY_SIZE(arr));
+    Vector<char> buffer(arr, arraysize(arr));
     return StrDup(DoubleToCString(value, buffer));
   }
 
@@ -299,7 +299,7 @@
 char* DoubleToExponentialCString(double value, int f) {
   const int kMaxDigitsAfterPoint = 20;
   // f might be -1 to signal that f was undefined in JavaScript.
-  ASSERT(f >= -1 && f <= kMaxDigitsAfterPoint);
+  DCHECK(f >= -1 && f <= kMaxDigitsAfterPoint);
 
   bool negative = false;
   if (value < 0) {
@@ -316,7 +316,7 @@
   const int kV8DtoaBufferCapacity = kMaxDigitsAfterPoint + 1 + 1;
   // Make sure that the buffer is big enough, even if we fall back to the
   // shortest representation (which happens when f equals -1).
-  ASSERT(kBase10MaximalLength <= kMaxDigitsAfterPoint + 1);
+  DCHECK(kBase10MaximalLength <= kMaxDigitsAfterPoint + 1);
   char decimal_rep[kV8DtoaBufferCapacity];
   int decimal_rep_length;
 
@@ -330,8 +330,8 @@
                   Vector<char>(decimal_rep, kV8DtoaBufferCapacity),
                   &sign, &decimal_rep_length, &decimal_point);
   }
-  ASSERT(decimal_rep_length > 0);
-  ASSERT(decimal_rep_length <= f + 1);
+  DCHECK(decimal_rep_length > 0);
+  DCHECK(decimal_rep_length <= f + 1);
 
   int exponent = decimal_point - 1;
   char* result =
@@ -344,7 +344,7 @@
 char* DoubleToPrecisionCString(double value, int p) {
   const int kMinimalDigits = 1;
   const int kMaximalDigits = 21;
-  ASSERT(p >= kMinimalDigits && p <= kMaximalDigits);
+  DCHECK(p >= kMinimalDigits && p <= kMaximalDigits);
   USE(kMinimalDigits);
 
   bool negative = false;
@@ -364,7 +364,7 @@
   DoubleToAscii(value, DTOA_PRECISION, p,
                 Vector<char>(decimal_rep, kV8DtoaBufferCapacity),
                 &sign, &decimal_rep_length, &decimal_point);
-  ASSERT(decimal_rep_length <= p);
+  DCHECK(decimal_rep_length <= p);
 
   int exponent = decimal_point - 1;
 
@@ -412,7 +412,7 @@
 
 
 char* DoubleToRadixCString(double value, int radix) {
-  ASSERT(radix >= 2 && radix <= 36);
+  DCHECK(radix >= 2 && radix <= 36);
 
   // Character array used for conversion.
   static const char chars[] = "0123456789abcdefghijklmnopqrstuvwxyz";
@@ -446,7 +446,7 @@
     integer_part /= radix;
   } while (integer_part >= 1.0);
   // Sanity check.
-  ASSERT(integer_pos > 0);
+  DCHECK(integer_pos > 0);
   // Add sign if needed.
   if (is_negative) integer_buffer[integer_pos--] = '-';
 
@@ -490,7 +490,7 @@
   DisallowHeapAllocation no_gc;
   String::FlatContent flat = string->GetFlatContent();
   // ECMA-262 section 15.1.2.3, empty string is NaN
-  if (flat.IsAscii()) {
+  if (flat.IsOneByte()) {
     return StringToDouble(
         unicode_cache, flat.ToOneByteVector(), flags, empty_string_val);
   } else {
diff --git a/src/conversions.h b/src/conversions.h
index a23ea90..6a28b5f 100644
--- a/src/conversions.h
+++ b/src/conversions.h
@@ -7,7 +7,7 @@
 
 #include <limits>
 
-#include "src/checks.h"
+#include "src/base/logging.h"
 #include "src/handles.h"
 #include "src/objects.h"
 #include "src/utils.h"
@@ -41,7 +41,8 @@
 
 // The fast double-to-(unsigned-)int conversion routine does not guarantee
 // rounding towards zero.
-// For NaN and values outside the int range, return INT_MIN or INT_MAX.
+// If x is NaN, the result is INT_MIN.  Otherwise the result is the argument x,
+// clamped to [INT_MIN, INT_MAX] and then rounded to an integer.
 inline int FastD2IChecked(double x) {
   if (!(x >= INT_MIN)) return INT_MIN;  // Negation to catch NaNs.
   if (x > INT_MAX) return INT_MAX;
@@ -76,6 +77,10 @@
 }
 
 
+// This function should match the exact semantics of ECMA-262 20.2.2.17.
+inline float DoubleToFloat32(double x);
+
+
 // This function should match the exact semantics of ECMA-262 9.4.
 inline double DoubleToInteger(double x);
 
@@ -152,6 +157,12 @@
 }
 
 
+static inline bool IsSmiDouble(double value) {
+  return !IsMinusZero(value) && value >= Smi::kMinValue &&
+         value <= Smi::kMaxValue && value == FastI2D(FastD2I(value));
+}
+
+
 // Integer32 is an integer that can be represented as a signed 32-bit
 // integer. It has to be in the range [-2^31, 2^31 - 1].
 // We also have to check for negative 0 as it is not an Integer32.
@@ -198,7 +209,7 @@
   SealHandleScope shs(isolate);
   if (number->IsSmi()) {
     int value = Smi::cast(number)->value();
-    ASSERT(static_cast<unsigned>(Smi::kMaxValue)
+    DCHECK(static_cast<unsigned>(Smi::kMaxValue)
            <= std::numeric_limits<size_t>::max());
     if (value >= 0) {
       *result = static_cast<size_t>(value);
@@ -206,7 +217,7 @@
     }
     return false;
   } else {
-    ASSERT(number->IsHeapNumber());
+    DCHECK(number->IsHeapNumber());
     double value = HeapNumber::cast(number)->value();
     if (value >= 0 &&
         value <= std::numeric_limits<size_t>::max()) {
diff --git a/src/counters.cc b/src/counters.cc
index cdff887..a8dcc0b 100644
--- a/src/counters.cc
+++ b/src/counters.cc
@@ -4,9 +4,9 @@
 
 #include "src/v8.h"
 
+#include "src/base/platform/platform.h"
 #include "src/counters.h"
 #include "src/isolate.h"
-#include "src/platform.h"
 
 namespace v8 {
 namespace internal {
@@ -55,6 +55,11 @@
 
 
 Counters::Counters(Isolate* isolate) {
+#define HR(name, caption, min, max, num_buckets) \
+  name##_ = Histogram(#caption, min, max, num_buckets, isolate);
+  HISTOGRAM_RANGE_LIST(HR)
+#undef HR
+
 #define HT(name, caption) \
     name##_ = HistogramTimer(#caption, 0, 10000, 50, isolate);
     HISTOGRAM_TIMER_LIST(HT)
@@ -142,6 +147,10 @@
 
 
 void Counters::ResetHistograms() {
+#define HR(name, caption, min, max, num_buckets) name##_.Reset();
+  HISTOGRAM_RANGE_LIST(HR)
+#undef HR
+
 #define HT(name, caption) name##_.Reset();
     HISTOGRAM_TIMER_LIST(HT)
 #undef HT
diff --git a/src/counters.h b/src/counters.h
index a7d00dc..651cf54 100644
--- a/src/counters.h
+++ b/src/counters.h
@@ -7,9 +7,9 @@
 
 #include "include/v8.h"
 #include "src/allocation.h"
+#include "src/base/platform/elapsed-timer.h"
 #include "src/globals.h"
 #include "src/objects.h"
-#include "src/platform/elapsed-timer.h"
 
 namespace v8 {
 namespace internal {
@@ -139,7 +139,7 @@
   // given counter without calling the runtime system.
   int* GetInternalPointer() {
     int* loc = GetPtr();
-    ASSERT(loc != NULL);
+    DCHECK(loc != NULL);
     return loc;
   }
 
@@ -244,11 +244,11 @@
 
   // TODO(bmeurer): Remove this when HistogramTimerScope is fixed.
 #ifdef DEBUG
-  ElapsedTimer* timer() { return &timer_; }
+  base::ElapsedTimer* timer() { return &timer_; }
 #endif
 
  private:
-  ElapsedTimer timer_;
+  base::ElapsedTimer timer_;
 };
 
 // Helper class for scoping a HistogramTimer.
@@ -291,20 +291,31 @@
 #endif
 };
 
+#define HISTOGRAM_RANGE_LIST(HR)                                              \
+  /* Generic range histograms */                                              \
+  HR(gc_idle_time_allotted_in_ms, V8.GCIdleTimeAllottedInMS, 0, 10000, 101)   \
+  HR(gc_idle_time_limit_overshot, V8.GCIdleTimeLimit.Overshot, 0, 10000, 101) \
+  HR(gc_idle_time_limit_undershot, V8.GCIdleTimeLimit.Undershot, 0, 10000, 101)
 
-#define HISTOGRAM_TIMER_LIST(HT)                                      \
-  /* Garbage collection timers. */                                    \
-  HT(gc_compactor, V8.GCCompactor)                                    \
-  HT(gc_scavenger, V8.GCScavenger)                                    \
-  HT(gc_context, V8.GCContext) /* GC context cleanup time */          \
-  /* Parsing timers. */                                               \
-  HT(parse, V8.Parse)                                                 \
-  HT(parse_lazy, V8.ParseLazy)                                        \
-  HT(pre_parse, V8.PreParse)                                          \
-  /* Total compilation times. */                                      \
-  HT(compile, V8.Compile)                                             \
-  HT(compile_eval, V8.CompileEval)                                    \
-  HT(compile_lazy, V8.CompileLazy)
+#define HISTOGRAM_TIMER_LIST(HT)                             \
+  /* Garbage collection timers. */                           \
+  HT(gc_compactor, V8.GCCompactor)                           \
+  HT(gc_scavenger, V8.GCScavenger)                           \
+  HT(gc_context, V8.GCContext) /* GC context cleanup time */ \
+  HT(gc_idle_notification, V8.GCIdleNotification)            \
+  HT(gc_incremental_marking, V8.GCIncrementalMarking)        \
+  HT(gc_low_memory_notification, V8.GCLowMemoryNotification) \
+  /* Parsing timers. */                                      \
+  HT(parse, V8.Parse)                                        \
+  HT(parse_lazy, V8.ParseLazy)                               \
+  HT(pre_parse, V8.PreParse)                                 \
+  /* Total compilation times. */                             \
+  HT(compile, V8.Compile)                                    \
+  HT(compile_eval, V8.CompileEval)                           \
+  /* Serialization as part of compilation (code caching) */  \
+  HT(compile_serialize, V8.CompileSerialize)                 \
+  HT(compile_deserialize, V8.CompileDeserialize)
+
 
 #define HISTOGRAM_PERCENTAGE_LIST(HP)                                 \
   /* Heap fragmentation. */                                           \
@@ -419,136 +430,139 @@
   SC(store_buffer_overflows, V8.StoreBufferOverflows)
 
 
-#define STATS_COUNTER_LIST_2(SC)                                      \
-  /* Number of code stubs. */                                         \
-  SC(code_stubs, V8.CodeStubs)                                        \
-  /* Amount of stub code. */                                          \
-  SC(total_stubs_code_size, V8.TotalStubsCodeSize)                    \
-  /* Amount of (JS) compiled code. */                                 \
-  SC(total_compiled_code_size, V8.TotalCompiledCodeSize)              \
-  SC(gc_compactor_caused_by_request, V8.GCCompactorCausedByRequest)   \
-  SC(gc_compactor_caused_by_promoted_data,                            \
-     V8.GCCompactorCausedByPromotedData)                              \
-  SC(gc_compactor_caused_by_oldspace_exhaustion,                      \
-     V8.GCCompactorCausedByOldspaceExhaustion)                        \
-  SC(gc_last_resort_from_js, V8.GCLastResortFromJS)                   \
-  SC(gc_last_resort_from_handles, V8.GCLastResortFromHandles)         \
-  /* How is the generic keyed-load stub used? */                      \
-  SC(keyed_load_generic_smi, V8.KeyedLoadGenericSmi)                  \
-  SC(keyed_load_generic_symbol, V8.KeyedLoadGenericSymbol)            \
-  SC(keyed_load_generic_lookup_cache, V8.KeyedLoadGenericLookupCache) \
-  SC(keyed_load_generic_slow, V8.KeyedLoadGenericSlow)                \
-  SC(keyed_load_polymorphic_stubs, V8.KeyedLoadPolymorphicStubs)      \
-  SC(keyed_load_external_array_slow, V8.KeyedLoadExternalArraySlow)   \
-  /* How is the generic keyed-call stub used? */                      \
-  SC(keyed_call_generic_smi_fast, V8.KeyedCallGenericSmiFast)         \
-  SC(keyed_call_generic_smi_dict, V8.KeyedCallGenericSmiDict)         \
-  SC(keyed_call_generic_lookup_cache, V8.KeyedCallGenericLookupCache) \
-  SC(keyed_call_generic_lookup_dict, V8.KeyedCallGenericLookupDict)   \
-  SC(keyed_call_generic_slow, V8.KeyedCallGenericSlow)                \
-  SC(keyed_call_generic_slow_load, V8.KeyedCallGenericSlowLoad)       \
-  SC(named_load_global_stub, V8.NamedLoadGlobalStub)                  \
-  SC(named_store_global_inline, V8.NamedStoreGlobalInline)            \
-  SC(named_store_global_inline_miss, V8.NamedStoreGlobalInlineMiss)   \
-  SC(keyed_store_polymorphic_stubs, V8.KeyedStorePolymorphicStubs)    \
-  SC(keyed_store_external_array_slow, V8.KeyedStoreExternalArraySlow) \
-  SC(store_normal_miss, V8.StoreNormalMiss)                           \
-  SC(store_normal_hit, V8.StoreNormalHit)                             \
-  SC(cow_arrays_created_stub, V8.COWArraysCreatedStub)                \
-  SC(cow_arrays_created_runtime, V8.COWArraysCreatedRuntime)          \
-  SC(cow_arrays_converted, V8.COWArraysConverted)                     \
-  SC(call_miss, V8.CallMiss)                                          \
-  SC(keyed_call_miss, V8.KeyedCallMiss)                               \
-  SC(load_miss, V8.LoadMiss)                                          \
-  SC(keyed_load_miss, V8.KeyedLoadMiss)                               \
-  SC(call_const, V8.CallConst)                                        \
-  SC(call_const_fast_api, V8.CallConstFastApi)                        \
-  SC(call_const_interceptor, V8.CallConstInterceptor)                 \
-  SC(call_const_interceptor_fast_api, V8.CallConstInterceptorFastApi) \
-  SC(call_global_inline, V8.CallGlobalInline)                         \
-  SC(call_global_inline_miss, V8.CallGlobalInlineMiss)                \
-  SC(constructed_objects, V8.ConstructedObjects)                      \
-  SC(constructed_objects_runtime, V8.ConstructedObjectsRuntime)       \
-  SC(negative_lookups, V8.NegativeLookups)                            \
-  SC(negative_lookups_miss, V8.NegativeLookupsMiss)                   \
-  SC(megamorphic_stub_cache_probes, V8.MegamorphicStubCacheProbes)    \
-  SC(megamorphic_stub_cache_misses, V8.MegamorphicStubCacheMisses)    \
-  SC(megamorphic_stub_cache_updates, V8.MegamorphicStubCacheUpdates)  \
-  SC(array_function_runtime, V8.ArrayFunctionRuntime)                 \
-  SC(array_function_native, V8.ArrayFunctionNative)                   \
-  SC(for_in, V8.ForIn)                                                \
-  SC(enum_cache_hits, V8.EnumCacheHits)                               \
-  SC(enum_cache_misses, V8.EnumCacheMisses)                           \
-  SC(zone_segment_bytes, V8.ZoneSegmentBytes)                         \
-  SC(fast_new_closure_total, V8.FastNewClosureTotal)                  \
-  SC(fast_new_closure_try_optimized, V8.FastNewClosureTryOptimized)   \
-  SC(fast_new_closure_install_optimized, V8.FastNewClosureInstallOptimized) \
-  SC(string_add_runtime, V8.StringAddRuntime)                         \
-  SC(string_add_native, V8.StringAddNative)                           \
-  SC(string_add_runtime_ext_to_ascii, V8.StringAddRuntimeExtToAscii)  \
-  SC(sub_string_runtime, V8.SubStringRuntime)                         \
-  SC(sub_string_native, V8.SubStringNative)                           \
-  SC(string_add_make_two_char, V8.StringAddMakeTwoChar)               \
-  SC(string_compare_native, V8.StringCompareNative)                   \
-  SC(string_compare_runtime, V8.StringCompareRuntime)                 \
-  SC(regexp_entry_runtime, V8.RegExpEntryRuntime)                     \
-  SC(regexp_entry_native, V8.RegExpEntryNative)                       \
-  SC(number_to_string_native, V8.NumberToStringNative)                \
-  SC(number_to_string_runtime, V8.NumberToStringRuntime)              \
-  SC(math_acos, V8.MathAcos)                                          \
-  SC(math_asin, V8.MathAsin)                                          \
-  SC(math_atan, V8.MathAtan)                                          \
-  SC(math_atan2, V8.MathAtan2)                                        \
-  SC(math_exp, V8.MathExp)                                            \
-  SC(math_floor, V8.MathFloor)                                        \
-  SC(math_log, V8.MathLog)                                            \
-  SC(math_pow, V8.MathPow)                                            \
-  SC(math_round, V8.MathRound)                                        \
-  SC(math_sqrt, V8.MathSqrt)                                          \
-  SC(stack_interrupts, V8.StackInterrupts)                            \
-  SC(runtime_profiler_ticks, V8.RuntimeProfilerTicks)                 \
-  SC(bounds_checks_eliminated, V8.BoundsChecksEliminated)             \
-  SC(bounds_checks_hoisted, V8.BoundsChecksHoisted)                   \
-  SC(soft_deopts_requested, V8.SoftDeoptsRequested)                   \
-  SC(soft_deopts_inserted, V8.SoftDeoptsInserted)                     \
-  SC(soft_deopts_executed, V8.SoftDeoptsExecuted)                     \
-  /* Number of write barriers in generated code. */                   \
-  SC(write_barriers_dynamic, V8.WriteBarriersDynamic)                 \
-  SC(write_barriers_static, V8.WriteBarriersStatic)                   \
-  SC(new_space_bytes_available, V8.MemoryNewSpaceBytesAvailable)      \
-  SC(new_space_bytes_committed, V8.MemoryNewSpaceBytesCommitted)      \
-  SC(new_space_bytes_used, V8.MemoryNewSpaceBytesUsed)                \
-  SC(old_pointer_space_bytes_available,                               \
-     V8.MemoryOldPointerSpaceBytesAvailable)                          \
-  SC(old_pointer_space_bytes_committed,                               \
-     V8.MemoryOldPointerSpaceBytesCommitted)                          \
-  SC(old_pointer_space_bytes_used, V8.MemoryOldPointerSpaceBytesUsed) \
-  SC(old_data_space_bytes_available, V8.MemoryOldDataSpaceBytesAvailable) \
-  SC(old_data_space_bytes_committed, V8.MemoryOldDataSpaceBytesCommitted) \
-  SC(old_data_space_bytes_used, V8.MemoryOldDataSpaceBytesUsed)       \
-  SC(code_space_bytes_available, V8.MemoryCodeSpaceBytesAvailable)    \
-  SC(code_space_bytes_committed, V8.MemoryCodeSpaceBytesCommitted)    \
-  SC(code_space_bytes_used, V8.MemoryCodeSpaceBytesUsed)              \
-  SC(map_space_bytes_available, V8.MemoryMapSpaceBytesAvailable)      \
-  SC(map_space_bytes_committed, V8.MemoryMapSpaceBytesCommitted)      \
-  SC(map_space_bytes_used, V8.MemoryMapSpaceBytesUsed)                \
-  SC(cell_space_bytes_available, V8.MemoryCellSpaceBytesAvailable)    \
-  SC(cell_space_bytes_committed, V8.MemoryCellSpaceBytesCommitted)    \
-  SC(cell_space_bytes_used, V8.MemoryCellSpaceBytesUsed)              \
-  SC(property_cell_space_bytes_available,                             \
-     V8.MemoryPropertyCellSpaceBytesAvailable)                        \
-  SC(property_cell_space_bytes_committed,                             \
-     V8.MemoryPropertyCellSpaceBytesCommitted)                        \
-  SC(property_cell_space_bytes_used,                                  \
-     V8.MemoryPropertyCellSpaceBytesUsed)                             \
-  SC(lo_space_bytes_available, V8.MemoryLoSpaceBytesAvailable)        \
-  SC(lo_space_bytes_committed, V8.MemoryLoSpaceBytesCommitted)        \
+#define STATS_COUNTER_LIST_2(SC)                                               \
+  /* Number of code stubs. */                                                  \
+  SC(code_stubs, V8.CodeStubs)                                                 \
+  /* Amount of stub code. */                                                   \
+  SC(total_stubs_code_size, V8.TotalStubsCodeSize)                             \
+  /* Amount of (JS) compiled code. */                                          \
+  SC(total_compiled_code_size, V8.TotalCompiledCodeSize)                       \
+  SC(gc_compactor_caused_by_request, V8.GCCompactorCausedByRequest)            \
+  SC(gc_compactor_caused_by_promoted_data, V8.GCCompactorCausedByPromotedData) \
+  SC(gc_compactor_caused_by_oldspace_exhaustion,                               \
+     V8.GCCompactorCausedByOldspaceExhaustion)                                 \
+  SC(gc_last_resort_from_js, V8.GCLastResortFromJS)                            \
+  SC(gc_last_resort_from_handles, V8.GCLastResortFromHandles)                  \
+  /* How is the generic keyed-load stub used? */                               \
+  SC(keyed_load_generic_smi, V8.KeyedLoadGenericSmi)                           \
+  SC(keyed_load_generic_symbol, V8.KeyedLoadGenericSymbol)                     \
+  SC(keyed_load_generic_lookup_cache, V8.KeyedLoadGenericLookupCache)          \
+  SC(keyed_load_generic_slow, V8.KeyedLoadGenericSlow)                         \
+  SC(keyed_load_polymorphic_stubs, V8.KeyedLoadPolymorphicStubs)               \
+  SC(keyed_load_external_array_slow, V8.KeyedLoadExternalArraySlow)            \
+  /* How is the generic keyed-call stub used? */                               \
+  SC(keyed_call_generic_smi_fast, V8.KeyedCallGenericSmiFast)                  \
+  SC(keyed_call_generic_smi_dict, V8.KeyedCallGenericSmiDict)                  \
+  SC(keyed_call_generic_lookup_cache, V8.KeyedCallGenericLookupCache)          \
+  SC(keyed_call_generic_lookup_dict, V8.KeyedCallGenericLookupDict)            \
+  SC(keyed_call_generic_slow, V8.KeyedCallGenericSlow)                         \
+  SC(keyed_call_generic_slow_load, V8.KeyedCallGenericSlowLoad)                \
+  SC(named_load_global_stub, V8.NamedLoadGlobalStub)                           \
+  SC(named_store_global_inline, V8.NamedStoreGlobalInline)                     \
+  SC(named_store_global_inline_miss, V8.NamedStoreGlobalInlineMiss)            \
+  SC(keyed_store_polymorphic_stubs, V8.KeyedStorePolymorphicStubs)             \
+  SC(keyed_store_external_array_slow, V8.KeyedStoreExternalArraySlow)          \
+  SC(store_normal_miss, V8.StoreNormalMiss)                                    \
+  SC(store_normal_hit, V8.StoreNormalHit)                                      \
+  SC(cow_arrays_created_stub, V8.COWArraysCreatedStub)                         \
+  SC(cow_arrays_created_runtime, V8.COWArraysCreatedRuntime)                   \
+  SC(cow_arrays_converted, V8.COWArraysConverted)                              \
+  SC(call_miss, V8.CallMiss)                                                   \
+  SC(keyed_call_miss, V8.KeyedCallMiss)                                        \
+  SC(load_miss, V8.LoadMiss)                                                   \
+  SC(keyed_load_miss, V8.KeyedLoadMiss)                                        \
+  SC(call_const, V8.CallConst)                                                 \
+  SC(call_const_fast_api, V8.CallConstFastApi)                                 \
+  SC(call_const_interceptor, V8.CallConstInterceptor)                          \
+  SC(call_const_interceptor_fast_api, V8.CallConstInterceptorFastApi)          \
+  SC(call_global_inline, V8.CallGlobalInline)                                  \
+  SC(call_global_inline_miss, V8.CallGlobalInlineMiss)                         \
+  SC(constructed_objects, V8.ConstructedObjects)                               \
+  SC(constructed_objects_runtime, V8.ConstructedObjectsRuntime)                \
+  SC(negative_lookups, V8.NegativeLookups)                                     \
+  SC(negative_lookups_miss, V8.NegativeLookupsMiss)                            \
+  SC(megamorphic_stub_cache_probes, V8.MegamorphicStubCacheProbes)             \
+  SC(megamorphic_stub_cache_misses, V8.MegamorphicStubCacheMisses)             \
+  SC(megamorphic_stub_cache_updates, V8.MegamorphicStubCacheUpdates)           \
+  SC(array_function_runtime, V8.ArrayFunctionRuntime)                          \
+  SC(array_function_native, V8.ArrayFunctionNative)                            \
+  SC(for_in, V8.ForIn)                                                         \
+  SC(enum_cache_hits, V8.EnumCacheHits)                                        \
+  SC(enum_cache_misses, V8.EnumCacheMisses)                                    \
+  SC(zone_segment_bytes, V8.ZoneSegmentBytes)                                  \
+  SC(fast_new_closure_total, V8.FastNewClosureTotal)                           \
+  SC(fast_new_closure_try_optimized, V8.FastNewClosureTryOptimized)            \
+  SC(fast_new_closure_install_optimized, V8.FastNewClosureInstallOptimized)    \
+  SC(string_add_runtime, V8.StringAddRuntime)                                  \
+  SC(string_add_native, V8.StringAddNative)                                    \
+  SC(string_add_runtime_ext_to_one_byte, V8.StringAddRuntimeExtToOneByte)      \
+  SC(sub_string_runtime, V8.SubStringRuntime)                                  \
+  SC(sub_string_native, V8.SubStringNative)                                    \
+  SC(string_add_make_two_char, V8.StringAddMakeTwoChar)                        \
+  SC(string_compare_native, V8.StringCompareNative)                            \
+  SC(string_compare_runtime, V8.StringCompareRuntime)                          \
+  SC(regexp_entry_runtime, V8.RegExpEntryRuntime)                              \
+  SC(regexp_entry_native, V8.RegExpEntryNative)                                \
+  SC(number_to_string_native, V8.NumberToStringNative)                         \
+  SC(number_to_string_runtime, V8.NumberToStringRuntime)                       \
+  SC(math_acos, V8.MathAcos)                                                   \
+  SC(math_asin, V8.MathAsin)                                                   \
+  SC(math_atan, V8.MathAtan)                                                   \
+  SC(math_atan2, V8.MathAtan2)                                                 \
+  SC(math_exp, V8.MathExp)                                                     \
+  SC(math_floor, V8.MathFloor)                                                 \
+  SC(math_log, V8.MathLog)                                                     \
+  SC(math_pow, V8.MathPow)                                                     \
+  SC(math_round, V8.MathRound)                                                 \
+  SC(math_sqrt, V8.MathSqrt)                                                   \
+  SC(stack_interrupts, V8.StackInterrupts)                                     \
+  SC(runtime_profiler_ticks, V8.RuntimeProfilerTicks)                          \
+  SC(bounds_checks_eliminated, V8.BoundsChecksEliminated)                      \
+  SC(bounds_checks_hoisted, V8.BoundsChecksHoisted)                            \
+  SC(soft_deopts_requested, V8.SoftDeoptsRequested)                            \
+  SC(soft_deopts_inserted, V8.SoftDeoptsInserted)                              \
+  SC(soft_deopts_executed, V8.SoftDeoptsExecuted)                              \
+  /* Number of write barriers in generated code. */                            \
+  SC(write_barriers_dynamic, V8.WriteBarriersDynamic)                          \
+  SC(write_barriers_static, V8.WriteBarriersStatic)                            \
+  SC(new_space_bytes_available, V8.MemoryNewSpaceBytesAvailable)               \
+  SC(new_space_bytes_committed, V8.MemoryNewSpaceBytesCommitted)               \
+  SC(new_space_bytes_used, V8.MemoryNewSpaceBytesUsed)                         \
+  SC(old_pointer_space_bytes_available,                                        \
+     V8.MemoryOldPointerSpaceBytesAvailable)                                   \
+  SC(old_pointer_space_bytes_committed,                                        \
+     V8.MemoryOldPointerSpaceBytesCommitted)                                   \
+  SC(old_pointer_space_bytes_used, V8.MemoryOldPointerSpaceBytesUsed)          \
+  SC(old_data_space_bytes_available, V8.MemoryOldDataSpaceBytesAvailable)      \
+  SC(old_data_space_bytes_committed, V8.MemoryOldDataSpaceBytesCommitted)      \
+  SC(old_data_space_bytes_used, V8.MemoryOldDataSpaceBytesUsed)                \
+  SC(code_space_bytes_available, V8.MemoryCodeSpaceBytesAvailable)             \
+  SC(code_space_bytes_committed, V8.MemoryCodeSpaceBytesCommitted)             \
+  SC(code_space_bytes_used, V8.MemoryCodeSpaceBytesUsed)                       \
+  SC(map_space_bytes_available, V8.MemoryMapSpaceBytesAvailable)               \
+  SC(map_space_bytes_committed, V8.MemoryMapSpaceBytesCommitted)               \
+  SC(map_space_bytes_used, V8.MemoryMapSpaceBytesUsed)                         \
+  SC(cell_space_bytes_available, V8.MemoryCellSpaceBytesAvailable)             \
+  SC(cell_space_bytes_committed, V8.MemoryCellSpaceBytesCommitted)             \
+  SC(cell_space_bytes_used, V8.MemoryCellSpaceBytesUsed)                       \
+  SC(property_cell_space_bytes_available,                                      \
+     V8.MemoryPropertyCellSpaceBytesAvailable)                                 \
+  SC(property_cell_space_bytes_committed,                                      \
+     V8.MemoryPropertyCellSpaceBytesCommitted)                                 \
+  SC(property_cell_space_bytes_used, V8.MemoryPropertyCellSpaceBytesUsed)      \
+  SC(lo_space_bytes_available, V8.MemoryLoSpaceBytesAvailable)                 \
+  SC(lo_space_bytes_committed, V8.MemoryLoSpaceBytesCommitted)                 \
   SC(lo_space_bytes_used, V8.MemoryLoSpaceBytesUsed)
 
 
 // This file contains all the v8 counters that are in use.
 class Counters {
  public:
+#define HR(name, caption, min, max, num_buckets) \
+  Histogram* name() { return &name##_; }
+  HISTOGRAM_RANGE_LIST(HR)
+#undef HR
+
 #define HT(name, caption) \
   HistogramTimer* name() { return &name##_; }
   HISTOGRAM_TIMER_LIST(HT)
@@ -636,6 +650,10 @@
   void ResetHistograms();
 
  private:
+#define HR(name, caption, min, max, num_buckets) Histogram name##_;
+  HISTOGRAM_RANGE_LIST(HR)
+#undef HR
+
 #define HT(name, caption) \
   HistogramTimer name##_;
   HISTOGRAM_TIMER_LIST(HT)
diff --git a/src/cpu-profiler.cc b/src/cpu-profiler.cc
index 49a1d7a..68a565c 100644
--- a/src/cpu-profiler.cc
+++ b/src/cpu-profiler.cc
@@ -23,7 +23,7 @@
 ProfilerEventsProcessor::ProfilerEventsProcessor(
     ProfileGenerator* generator,
     Sampler* sampler,
-    TimeDelta period)
+    base::TimeDelta period)
     : Thread(Thread::Options("v8:ProfEvntProc", kProfilerStackSize)),
       generator_(generator),
       sampler_(sampler),
@@ -108,7 +108,7 @@
 
 void ProfilerEventsProcessor::Run() {
   while (running_) {
-    ElapsedTimer timer;
+    base::ElapsedTimer timer;
     timer.Start();
     // Keep processing existing events until we need to do next sample.
     do {
@@ -222,21 +222,21 @@
 }
 
 
-void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
-                                  Code* code,
+void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag, Code* code,
                                   SharedFunctionInfo* shared,
-                                  CompilationInfo* info,
-                                  Name* name) {
+                                  CompilationInfo* info, Name* script_name) {
   if (FilterOutCodeCreateEvent(tag)) return;
   CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
   CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
   rec->start = code->address();
-  rec->entry = profiles_->NewCodeEntry(tag, profiles_->GetFunctionName(name));
+  rec->entry = profiles_->NewCodeEntry(
+      tag, profiles_->GetFunctionName(shared->DebugName()),
+      CodeEntry::kEmptyNamePrefix, profiles_->GetName(script_name));
   if (info) {
     rec->entry->set_no_frame_ranges(info->ReleaseNoFrameRanges());
   }
   if (shared->script()->IsScript()) {
-    ASSERT(Script::cast(shared->script()));
+    DCHECK(Script::cast(shared->script()));
     Script* script = Script::cast(shared->script());
     rec->entry->set_script_id(script->id()->value());
     rec->entry->set_bailout_reason(
@@ -248,26 +248,22 @@
 }
 
 
-void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
-                                  Code* code,
+void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag, Code* code,
                                   SharedFunctionInfo* shared,
-                                  CompilationInfo* info,
-                                  Name* source, int line, int column) {
+                                  CompilationInfo* info, Name* script_name,
+                                  int line, int column) {
   if (FilterOutCodeCreateEvent(tag)) return;
   CodeEventsContainer evt_rec(CodeEventRecord::CODE_CREATION);
   CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
   rec->start = code->address();
   rec->entry = profiles_->NewCodeEntry(
-      tag,
-      profiles_->GetFunctionName(shared->DebugName()),
-      CodeEntry::kEmptyNamePrefix,
-      profiles_->GetName(source),
-      line,
+      tag, profiles_->GetFunctionName(shared->DebugName()),
+      CodeEntry::kEmptyNamePrefix, profiles_->GetName(script_name), line,
       column);
   if (info) {
     rec->entry->set_no_frame_ranges(info->ReleaseNoFrameRanges());
   }
-  ASSERT(Script::cast(shared->script()));
+  DCHECK(Script::cast(shared->script()));
   Script* script = Script::cast(shared->script());
   rec->entry->set_script_id(script->id()->value());
   rec->size = code->ExecutableSize();
@@ -373,7 +369,7 @@
 
 CpuProfiler::CpuProfiler(Isolate* isolate)
     : isolate_(isolate),
-      sampling_interval_(TimeDelta::FromMicroseconds(
+      sampling_interval_(base::TimeDelta::FromMicroseconds(
           FLAG_cpu_profiler_sampling_interval)),
       profiles_(new CpuProfilesCollection(isolate->heap())),
       generator_(NULL),
@@ -387,7 +383,7 @@
                          ProfileGenerator* test_generator,
                          ProfilerEventsProcessor* test_processor)
     : isolate_(isolate),
-      sampling_interval_(TimeDelta::FromMicroseconds(
+      sampling_interval_(base::TimeDelta::FromMicroseconds(
           FLAG_cpu_profiler_sampling_interval)),
       profiles_(test_profiles),
       generator_(test_generator),
@@ -397,13 +393,13 @@
 
 
 CpuProfiler::~CpuProfiler() {
-  ASSERT(!is_profiling_);
+  DCHECK(!is_profiling_);
   delete profiles_;
 }
 
 
-void CpuProfiler::set_sampling_interval(TimeDelta value) {
-  ASSERT(!is_profiling_);
+void CpuProfiler::set_sampling_interval(base::TimeDelta value) {
+  DCHECK(!is_profiling_);
   sampling_interval_ = value;
 }
 
@@ -441,7 +437,7 @@
       generator_, sampler, sampling_interval_);
   is_profiling_ = true;
   // Enumerate stuff we already have in the heap.
-  ASSERT(isolate_->heap()->HasBeenSetUp());
+  DCHECK(isolate_->heap()->HasBeenSetUp());
   if (!FLAG_prof_browser_mode) {
     logger->LogCodeObjects();
   }
@@ -497,7 +493,7 @@
 
 void CpuProfiler::LogBuiltins() {
   Builtins* builtins = isolate_->builtins();
-  ASSERT(builtins->is_initialized());
+  DCHECK(builtins->is_initialized());
   for (int i = 0; i < Builtins::builtin_count; i++) {
     CodeEventsContainer evt_rec(CodeEventRecord::REPORT_BUILTIN);
     ReportBuiltinEventRecord* rec = &evt_rec.ReportBuiltinEventRecord_;
diff --git a/src/cpu-profiler.h b/src/cpu-profiler.h
index f5f2014..c1e75a1 100644
--- a/src/cpu-profiler.h
+++ b/src/cpu-profiler.h
@@ -7,8 +7,8 @@
 
 #include "src/allocation.h"
 #include "src/base/atomicops.h"
+#include "src/base/platform/time.h"
 #include "src/circular-queue.h"
-#include "src/platform/time.h"
 #include "src/sampler.h"
 #include "src/unbound-queue.h"
 
@@ -122,11 +122,11 @@
 
 // This class implements both the profile events processor thread and
 // methods called by event producers: VM and stack sampler threads.
-class ProfilerEventsProcessor : public Thread {
+class ProfilerEventsProcessor : public base::Thread {
  public:
   ProfilerEventsProcessor(ProfileGenerator* generator,
                           Sampler* sampler,
-                          TimeDelta period);
+                          base::TimeDelta period);
   virtual ~ProfilerEventsProcessor() {}
 
   // Thread control.
@@ -165,7 +165,7 @@
   Sampler* sampler_;
   bool running_;
   // Sampling period in microseconds.
-  const TimeDelta period_;
+  const base::TimeDelta period_;
   UnboundQueue<CodeEventsContainer> events_buffer_;
   static const size_t kTickSampleBufferSize = 1 * MB;
   static const size_t kTickSampleQueueLength =
@@ -200,7 +200,7 @@
 
   virtual ~CpuProfiler();
 
-  void set_sampling_interval(TimeDelta value);
+  void set_sampling_interval(base::TimeDelta value);
   void StartProfiling(const char* title, bool record_samples = false);
   void StartProfiling(String* title, bool record_samples);
   CpuProfile* StopProfiling(const char* title);
@@ -221,16 +221,13 @@
                                Code* code, const char* comment);
   virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
                                Code* code, Name* name);
-  virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
-                               Code* code,
+  virtual void CodeCreateEvent(Logger::LogEventsAndTags tag, Code* code,
                                SharedFunctionInfo* shared,
-                               CompilationInfo* info,
-                               Name* name);
-  virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
-                               Code* code,
+                               CompilationInfo* info, Name* script_name);
+  virtual void CodeCreateEvent(Logger::LogEventsAndTags tag, Code* code,
                                SharedFunctionInfo* shared,
-                               CompilationInfo* info,
-                               Name* source, int line, int column);
+                               CompilationInfo* info, Name* script_name,
+                               int line, int column);
   virtual void CodeCreateEvent(Logger::LogEventsAndTags tag,
                                Code* code, int args_count);
   virtual void CodeMovingGCEvent() {}
@@ -259,7 +256,7 @@
   void LogBuiltins();
 
   Isolate* isolate_;
-  TimeDelta sampling_interval_;
+  base::TimeDelta sampling_interval_;
   CpuProfilesCollection* profiles_;
   ProfileGenerator* generator_;
   ProfilerEventsProcessor* processor_;
diff --git a/src/cpu.cc b/src/cpu.cc
deleted file mode 100644
index 01c1036..0000000
--- a/src/cpu.cc
+++ /dev/null
@@ -1,498 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/cpu.h"
-
-#if V8_LIBC_MSVCRT
-#include <intrin.h>  // __cpuid()
-#endif
-#if V8_OS_POSIX
-#include <unistd.h>  // sysconf()
-#endif
-#if V8_OS_QNX
-#include <sys/syspage.h>  // cpuinfo
-#endif
-
-#include <ctype.h>
-#include <limits.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <algorithm>
-
-#include "src/checks.h"
-#if V8_OS_WIN
-#include "src/base/win32-headers.h"
-#endif
-
-namespace v8 {
-namespace internal {
-
-#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
-
-// Define __cpuid() for non-MSVC libraries.
-#if !V8_LIBC_MSVCRT
-
-static V8_INLINE void __cpuid(int cpu_info[4], int info_type) {
-#if defined(__i386__) && defined(__pic__)
-  // Make sure to preserve ebx, which contains the pointer
-  // to the GOT in case we're generating PIC.
-  __asm__ volatile (
-    "mov %%ebx, %%edi\n\t"
-    "cpuid\n\t"
-    "xchg %%edi, %%ebx\n\t"
-    : "=a"(cpu_info[0]), "=D"(cpu_info[1]), "=c"(cpu_info[2]), "=d"(cpu_info[3])
-    : "a"(info_type)
-  );
-#else
-  __asm__ volatile (
-    "cpuid \n\t"
-    : "=a"(cpu_info[0]), "=b"(cpu_info[1]), "=c"(cpu_info[2]), "=d"(cpu_info[3])
-    : "a"(info_type)
-  );
-#endif  // defined(__i386__) && defined(__pic__)
-}
-
-#endif  // !V8_LIBC_MSVCRT
-
-#elif V8_HOST_ARCH_ARM || V8_HOST_ARCH_ARM64 || V8_HOST_ARCH_MIPS
-
-#if V8_OS_LINUX
-
-#if V8_HOST_ARCH_ARM
-
-// See <uapi/asm/hwcap.h> kernel header.
-/*
- * HWCAP flags - for elf_hwcap (in kernel) and AT_HWCAP
- */
-#define HWCAP_SWP (1 << 0)
-#define HWCAP_HALF  (1 << 1)
-#define HWCAP_THUMB (1 << 2)
-#define HWCAP_26BIT (1 << 3)  /* Play it safe */
-#define HWCAP_FAST_MULT (1 << 4)
-#define HWCAP_FPA (1 << 5)
-#define HWCAP_VFP (1 << 6)
-#define HWCAP_EDSP  (1 << 7)
-#define HWCAP_JAVA  (1 << 8)
-#define HWCAP_IWMMXT  (1 << 9)
-#define HWCAP_CRUNCH  (1 << 10)
-#define HWCAP_THUMBEE (1 << 11)
-#define HWCAP_NEON  (1 << 12)
-#define HWCAP_VFPv3 (1 << 13)
-#define HWCAP_VFPv3D16  (1 << 14) /* also set for VFPv4-D16 */
-#define HWCAP_TLS (1 << 15)
-#define HWCAP_VFPv4 (1 << 16)
-#define HWCAP_IDIVA (1 << 17)
-#define HWCAP_IDIVT (1 << 18)
-#define HWCAP_VFPD32  (1 << 19) /* set if VFP has 32 regs (not 16) */
-#define HWCAP_IDIV  (HWCAP_IDIVA | HWCAP_IDIVT)
-#define HWCAP_LPAE  (1 << 20)
-
-#define AT_HWCAP 16
-
-// Read the ELF HWCAP flags by parsing /proc/self/auxv.
-static uint32_t ReadELFHWCaps() {
-  uint32_t result = 0;
-  FILE* fp = fopen("/proc/self/auxv", "r");
-  if (fp != NULL) {
-    struct { uint32_t tag; uint32_t value; } entry;
-    for (;;) {
-      size_t n = fread(&entry, sizeof(entry), 1, fp);
-      if (n == 0 || (entry.tag == 0 && entry.value == 0)) {
-        break;
-      }
-      if (entry.tag == AT_HWCAP) {
-        result = entry.value;
-        break;
-      }
-    }
-    fclose(fp);
-  }
-  return result;
-}
-
-#endif  // V8_HOST_ARCH_ARM
-
-// Extract the information exposed by the kernel via /proc/cpuinfo.
-class CPUInfo V8_FINAL BASE_EMBEDDED {
- public:
-  CPUInfo() : datalen_(0) {
-    // Get the size of the cpuinfo file by reading it until the end. This is
-    // required because files under /proc do not always return a valid size
-    // when using fseek(0, SEEK_END) + ftell(). Nor can the be mmap()-ed.
-    static const char PATHNAME[] = "/proc/cpuinfo";
-    FILE* fp = fopen(PATHNAME, "r");
-    if (fp != NULL) {
-      for (;;) {
-        char buffer[256];
-        size_t n = fread(buffer, 1, sizeof(buffer), fp);
-        if (n == 0) {
-          break;
-        }
-        datalen_ += n;
-      }
-      fclose(fp);
-    }
-
-    // Read the contents of the cpuinfo file.
-    data_ = new char[datalen_ + 1];
-    fp = fopen(PATHNAME, "r");
-    if (fp != NULL) {
-      for (size_t offset = 0; offset < datalen_; ) {
-        size_t n = fread(data_ + offset, 1, datalen_ - offset, fp);
-        if (n == 0) {
-          break;
-        }
-        offset += n;
-      }
-      fclose(fp);
-    }
-
-    // Zero-terminate the data.
-    data_[datalen_] = '\0';
-  }
-
-  ~CPUInfo() {
-    delete[] data_;
-  }
-
-  // Extract the content of a the first occurence of a given field in
-  // the content of the cpuinfo file and return it as a heap-allocated
-  // string that must be freed by the caller using delete[].
-  // Return NULL if not found.
-  char* ExtractField(const char* field) const {
-    ASSERT(field != NULL);
-
-    // Look for first field occurence, and ensure it starts the line.
-    size_t fieldlen = strlen(field);
-    char* p = data_;
-    for (;;) {
-      p = strstr(p, field);
-      if (p == NULL) {
-        return NULL;
-      }
-      if (p == data_ || p[-1] == '\n') {
-        break;
-      }
-      p += fieldlen;
-    }
-
-    // Skip to the first colon followed by a space.
-    p = strchr(p + fieldlen, ':');
-    if (p == NULL || !isspace(p[1])) {
-      return NULL;
-    }
-    p += 2;
-
-    // Find the end of the line.
-    char* q = strchr(p, '\n');
-    if (q == NULL) {
-      q = data_ + datalen_;
-    }
-
-    // Copy the line into a heap-allocated buffer.
-    size_t len = q - p;
-    char* result = new char[len + 1];
-    if (result != NULL) {
-      memcpy(result, p, len);
-      result[len] = '\0';
-    }
-    return result;
-  }
-
- private:
-  char* data_;
-  size_t datalen_;
-};
-
-#if V8_HOST_ARCH_ARM || V8_HOST_ARCH_MIPS
-
-// Checks that a space-separated list of items contains one given 'item'.
-static bool HasListItem(const char* list, const char* item) {
-  ssize_t item_len = strlen(item);
-  const char* p = list;
-  if (p != NULL) {
-    while (*p != '\0') {
-      // Skip whitespace.
-      while (isspace(*p)) ++p;
-
-      // Find end of current list item.
-      const char* q = p;
-      while (*q != '\0' && !isspace(*q)) ++q;
-
-      if (item_len == q - p && memcmp(p, item, item_len) == 0) {
-        return true;
-      }
-
-      // Skip to next item.
-      p = q;
-    }
-  }
-  return false;
-}
-
-#endif  // V8_HOST_ARCH_ARM || V8_HOST_ARCH_MIPS
-
-#endif  // V8_OS_LINUX
-
-#endif  // V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
-
-CPU::CPU() : stepping_(0),
-             model_(0),
-             ext_model_(0),
-             family_(0),
-             ext_family_(0),
-             type_(0),
-             implementer_(0),
-             architecture_(0),
-             part_(0),
-             has_fpu_(false),
-             has_cmov_(false),
-             has_sahf_(false),
-             has_mmx_(false),
-             has_sse_(false),
-             has_sse2_(false),
-             has_sse3_(false),
-             has_ssse3_(false),
-             has_sse41_(false),
-             has_sse42_(false),
-             has_idiva_(false),
-             has_neon_(false),
-             has_thumb2_(false),
-             has_vfp_(false),
-             has_vfp3_(false),
-             has_vfp3_d32_(false) {
-  memcpy(vendor_, "Unknown", 8);
-#if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
-  int cpu_info[4];
-
-  // __cpuid with an InfoType argument of 0 returns the number of
-  // valid Ids in CPUInfo[0] and the CPU identification string in
-  // the other three array elements. The CPU identification string is
-  // not in linear order. The code below arranges the information
-  // in a human readable form. The human readable order is CPUInfo[1] |
-  // CPUInfo[3] | CPUInfo[2]. CPUInfo[2] and CPUInfo[3] are swapped
-  // before using memcpy to copy these three array elements to cpu_string.
-  __cpuid(cpu_info, 0);
-  unsigned num_ids = cpu_info[0];
-  std::swap(cpu_info[2], cpu_info[3]);
-  memcpy(vendor_, cpu_info + 1, 12);
-  vendor_[12] = '\0';
-
-  // Interpret CPU feature information.
-  if (num_ids > 0) {
-    __cpuid(cpu_info, 1);
-    stepping_ = cpu_info[0] & 0xf;
-    model_ = ((cpu_info[0] >> 4) & 0xf) + ((cpu_info[0] >> 12) & 0xf0);
-    family_ = (cpu_info[0] >> 8) & 0xf;
-    type_ = (cpu_info[0] >> 12) & 0x3;
-    ext_model_ = (cpu_info[0] >> 16) & 0xf;
-    ext_family_ = (cpu_info[0] >> 20) & 0xff;
-    has_fpu_ = (cpu_info[3] & 0x00000001) != 0;
-    has_cmov_ = (cpu_info[3] & 0x00008000) != 0;
-    has_mmx_ = (cpu_info[3] & 0x00800000) != 0;
-    has_sse_ = (cpu_info[3] & 0x02000000) != 0;
-    has_sse2_ = (cpu_info[3] & 0x04000000) != 0;
-    has_sse3_ = (cpu_info[2] & 0x00000001) != 0;
-    has_ssse3_ = (cpu_info[2] & 0x00000200) != 0;
-    has_sse41_ = (cpu_info[2] & 0x00080000) != 0;
-    has_sse42_ = (cpu_info[2] & 0x00100000) != 0;
-  }
-
-#if V8_HOST_ARCH_IA32
-  // SAHF is always available in compat/legacy mode,
-  has_sahf_ = true;
-#else
-  // Query extended IDs.
-  __cpuid(cpu_info, 0x80000000);
-  unsigned num_ext_ids = cpu_info[0];
-
-  // Interpret extended CPU feature information.
-  if (num_ext_ids > 0x80000000) {
-    __cpuid(cpu_info, 0x80000001);
-    // SAHF must be probed in long mode.
-    has_sahf_ = (cpu_info[2] & 0x00000001) != 0;
-  }
-#endif
-
-#elif V8_HOST_ARCH_ARM
-
-#if V8_OS_LINUX
-
-  CPUInfo cpu_info;
-
-  // Extract implementor from the "CPU implementer" field.
-  char* implementer = cpu_info.ExtractField("CPU implementer");
-  if (implementer != NULL) {
-    char* end ;
-    implementer_ = strtol(implementer, &end, 0);
-    if (end == implementer) {
-      implementer_ = 0;
-    }
-    delete[] implementer;
-  }
-
-  // Extract part number from the "CPU part" field.
-  char* part = cpu_info.ExtractField("CPU part");
-  if (part != NULL) {
-    char* end ;
-    part_ = strtol(part, &end, 0);
-    if (end == part) {
-      part_ = 0;
-    }
-    delete[] part;
-  }
-
-  // Extract architecture from the "CPU Architecture" field.
-  // The list is well-known, unlike the the output of
-  // the 'Processor' field which can vary greatly.
-  // See the definition of the 'proc_arch' array in
-  // $KERNEL/arch/arm/kernel/setup.c and the 'c_show' function in
-  // same file.
-  char* architecture = cpu_info.ExtractField("CPU architecture");
-  if (architecture != NULL) {
-    char* end;
-    architecture_ = strtol(architecture, &end, 10);
-    if (end == architecture) {
-      architecture_ = 0;
-    }
-    delete[] architecture;
-
-    // Unfortunately, it seems that certain ARMv6-based CPUs
-    // report an incorrect architecture number of 7!
-    //
-    // See http://code.google.com/p/android/issues/detail?id=10812
-    //
-    // We try to correct this by looking at the 'elf_format'
-    // field reported by the 'Processor' field, which is of the
-    // form of "(v7l)" for an ARMv7-based CPU, and "(v6l)" for
-    // an ARMv6-one. For example, the Raspberry Pi is one popular
-    // ARMv6 device that reports architecture 7.
-    if (architecture_ == 7) {
-      char* processor = cpu_info.ExtractField("Processor");
-      if (HasListItem(processor, "(v6l)")) {
-        architecture_ = 6;
-      }
-      delete[] processor;
-    }
-  }
-
-  // Try to extract the list of CPU features from ELF hwcaps.
-  uint32_t hwcaps = ReadELFHWCaps();
-  if (hwcaps != 0) {
-    has_idiva_ = (hwcaps & HWCAP_IDIVA) != 0;
-    has_neon_ = (hwcaps & HWCAP_NEON) != 0;
-    has_vfp_ = (hwcaps & HWCAP_VFP) != 0;
-    has_vfp3_ = (hwcaps & (HWCAP_VFPv3 | HWCAP_VFPv3D16 | HWCAP_VFPv4)) != 0;
-    has_vfp3_d32_ = (has_vfp3_ && ((hwcaps & HWCAP_VFPv3D16) == 0 ||
-                                   (hwcaps & HWCAP_VFPD32) != 0));
-  } else {
-    // Try to fallback to "Features" CPUInfo field.
-    char* features = cpu_info.ExtractField("Features");
-    has_idiva_ = HasListItem(features, "idiva");
-    has_neon_ = HasListItem(features, "neon");
-    has_thumb2_ = HasListItem(features, "thumb2");
-    has_vfp_ = HasListItem(features, "vfp");
-    if (HasListItem(features, "vfpv3d16")) {
-      has_vfp3_ = true;
-    } else if (HasListItem(features, "vfpv3")) {
-      has_vfp3_ = true;
-      has_vfp3_d32_ = true;
-    }
-    delete[] features;
-  }
-
-  // Some old kernels will report vfp not vfpv3. Here we make an attempt
-  // to detect vfpv3 by checking for vfp *and* neon, since neon is only
-  // available on architectures with vfpv3. Checking neon on its own is
-  // not enough as it is possible to have neon without vfp.
-  if (has_vfp_ && has_neon_) {
-    has_vfp3_ = true;
-  }
-
-  // VFPv3 implies ARMv7, see ARM DDI 0406B, page A1-6.
-  if (architecture_ < 7 && has_vfp3_) {
-    architecture_ = 7;
-  }
-
-  // ARMv7 implies Thumb2.
-  if (architecture_ >= 7) {
-    has_thumb2_ = true;
-  }
-
-  // The earliest architecture with Thumb2 is ARMv6T2.
-  if (has_thumb2_ && architecture_ < 6) {
-    architecture_ = 6;
-  }
-
-  // We don't support any FPUs other than VFP.
-  has_fpu_ = has_vfp_;
-
-#elif V8_OS_QNX
-
-  uint32_t cpu_flags = SYSPAGE_ENTRY(cpuinfo)->flags;
-  if (cpu_flags & ARM_CPU_FLAG_V7) {
-    architecture_ = 7;
-    has_thumb2_ = true;
-  } else if (cpu_flags & ARM_CPU_FLAG_V6) {
-    architecture_ = 6;
-    // QNX doesn't say if Thumb2 is available.
-    // Assume false for the architectures older than ARMv7.
-  }
-  ASSERT(architecture_ >= 6);
-  has_fpu_ = (cpu_flags & CPU_FLAG_FPU) != 0;
-  has_vfp_ = has_fpu_;
-  if (cpu_flags & ARM_CPU_FLAG_NEON) {
-    has_neon_ = true;
-    has_vfp3_ = has_vfp_;
-#ifdef ARM_CPU_FLAG_VFP_D32
-    has_vfp3_d32_ = (cpu_flags & ARM_CPU_FLAG_VFP_D32) != 0;
-#endif
-  }
-  has_idiva_ = (cpu_flags & ARM_CPU_FLAG_IDIV) != 0;
-
-#endif  // V8_OS_LINUX
-
-#elif V8_HOST_ARCH_MIPS
-
-  // Simple detection of FPU at runtime for Linux.
-  // It is based on /proc/cpuinfo, which reveals hardware configuration
-  // to user-space applications.  According to MIPS (early 2010), no similar
-  // facility is universally available on the MIPS architectures,
-  // so it's up to individual OSes to provide such.
-  CPUInfo cpu_info;
-  char* cpu_model = cpu_info.ExtractField("cpu model");
-  has_fpu_ = HasListItem(cpu_model, "FPU");
-  delete[] cpu_model;
-
-#elif V8_HOST_ARCH_ARM64
-
-  CPUInfo cpu_info;
-
-  // Extract implementor from the "CPU implementer" field.
-  char* implementer = cpu_info.ExtractField("CPU implementer");
-  if (implementer != NULL) {
-    char* end ;
-    implementer_ = strtol(implementer, &end, 0);
-    if (end == implementer) {
-      implementer_ = 0;
-    }
-    delete[] implementer;
-  }
-
-  // Extract part number from the "CPU part" field.
-  char* part = cpu_info.ExtractField("CPU part");
-  if (part != NULL) {
-    char* end ;
-    part_ = strtol(part, &end, 0);
-    if (end == part) {
-      part_ = 0;
-    }
-    delete[] part;
-  }
-
-#endif
-}
-
-} }  // namespace v8::internal
diff --git a/src/cpu.h b/src/cpu.h
deleted file mode 100644
index ac8ee98..0000000
--- a/src/cpu.h
+++ /dev/null
@@ -1,114 +0,0 @@
-// Copyright 2006-2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This module contains the architecture-specific code. This make the rest of
-// the code less dependent on differences between different processor
-// architecture.
-// The classes have the same definition for all architectures. The
-// implementation for a particular architecture is put in cpu_<arch>.cc.
-// The build system then uses the implementation for the target architecture.
-//
-
-#ifndef V8_CPU_H_
-#define V8_CPU_H_
-
-#include "src/allocation.h"
-
-namespace v8 {
-namespace internal {
-
-// ----------------------------------------------------------------------------
-// CPU
-//
-// Query information about the processor.
-//
-// This class also has static methods for the architecture specific functions.
-// Add methods here to cope with differences between the supported
-// architectures. For each architecture the file cpu_<arch>.cc contains the
-// implementation of these static functions.
-
-class CPU V8_FINAL BASE_EMBEDDED {
- public:
-  CPU();
-
-  // x86 CPUID information
-  const char* vendor() const { return vendor_; }
-  int stepping() const { return stepping_; }
-  int model() const { return model_; }
-  int ext_model() const { return ext_model_; }
-  int family() const { return family_; }
-  int ext_family() const { return ext_family_; }
-  int type() const { return type_; }
-
-  // arm implementer/part information
-  int implementer() const { return implementer_; }
-  static const int ARM = 0x41;
-  static const int NVIDIA = 0x4e;
-  static const int QUALCOMM = 0x51;
-  int architecture() const { return architecture_; }
-  int part() const { return part_; }
-  static const int ARM_CORTEX_A5 = 0xc05;
-  static const int ARM_CORTEX_A7 = 0xc07;
-  static const int ARM_CORTEX_A8 = 0xc08;
-  static const int ARM_CORTEX_A9 = 0xc09;
-  static const int ARM_CORTEX_A12 = 0xc0c;
-  static const int ARM_CORTEX_A15 = 0xc0f;
-
-  // General features
-  bool has_fpu() const { return has_fpu_; }
-
-  // x86 features
-  bool has_cmov() const { return has_cmov_; }
-  bool has_sahf() const { return has_sahf_; }
-  bool has_mmx() const { return has_mmx_; }
-  bool has_sse() const { return has_sse_; }
-  bool has_sse2() const { return has_sse2_; }
-  bool has_sse3() const { return has_sse3_; }
-  bool has_ssse3() const { return has_ssse3_; }
-  bool has_sse41() const { return has_sse41_; }
-  bool has_sse42() const { return has_sse42_; }
-
-  // arm features
-  bool has_idiva() const { return has_idiva_; }
-  bool has_neon() const { return has_neon_; }
-  bool has_thumb2() const { return has_thumb2_; }
-  bool has_vfp() const { return has_vfp_; }
-  bool has_vfp3() const { return has_vfp3_; }
-  bool has_vfp3_d32() const { return has_vfp3_d32_; }
-
-  // Flush instruction cache.
-  static void FlushICache(void* start, size_t size);
-
- private:
-  char vendor_[13];
-  int stepping_;
-  int model_;
-  int ext_model_;
-  int family_;
-  int ext_family_;
-  int type_;
-  int implementer_;
-  int architecture_;
-  int part_;
-  bool has_fpu_;
-  bool has_cmov_;
-  bool has_sahf_;
-  bool has_mmx_;
-  bool has_sse_;
-  bool has_sse2_;
-  bool has_sse3_;
-  bool has_ssse3_;
-  bool has_sse41_;
-  bool has_sse42_;
-  bool has_idiva_;
-  bool has_neon_;
-  bool has_thumb2_;
-  bool has_vfp_;
-  bool has_vfp3_;
-  bool has_vfp3_d32_;
-};
-
-} }  // namespace v8::internal
-
-#endif  // V8_CPU_H_
diff --git a/src/d8-posix.cc b/src/d8-posix.cc
index 8851ce8..9a20b06 100644
--- a/src/d8-posix.cc
+++ b/src/d8-posix.cc
@@ -2,23 +2,22 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-
-#include <stdlib.h>
 #include <errno.h>
-#include <sys/types.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <string.h>
 #include <sys/stat.h>
 #include <sys/time.h>
-#include <time.h>
-#include <unistd.h>
-#include <fcntl.h>
+#include <sys/types.h>
 #include <sys/wait.h>
-#include <signal.h>
-
+#include <unistd.h>
 
 #include "src/d8.h"
-#include "src/d8-debug.h"
-#include "src/debug.h"
 
+#if !V8_OS_NACL
+#include <sys/select.h>
+#endif
 
 namespace v8 {
 
@@ -105,11 +104,16 @@
   }
   timeout.tv_usec = (read_timeout % 1000) * 1000;
   timeout.tv_sec = read_timeout / 1000;
+#if V8_OS_NACL
+  // PNaCL has no support for select.
+  int number_of_fds_ready = -1;
+#else
   int number_of_fds_ready = select(fd + 1,
                                    &readfds,
                                    &writefds,
                                    &exceptfds,
                                    read_timeout != -1 ? &timeout : NULL);
+#endif
   return number_of_fds_ready == 1;
 }
 
@@ -550,8 +554,12 @@
     return;
   }
   if (args[0]->IsNumber()) {
-    mode_t mask = args[0]->Int32Value();
-    int previous = umask(mask);
+#if V8_OS_NACL
+    // PNaCL has no support for umask.
+    int previous = 0;
+#else
+    int previous = umask(args[0]->Int32Value());
+#endif
     args.GetReturnValue().Set(previous);
     return;
   } else {
diff --git a/src/d8-windows.cc b/src/d8-windows.cc
index b519407..06c0a4e 100644
--- a/src/d8-windows.cc
+++ b/src/d8-windows.cc
@@ -2,11 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-
 #include "src/d8.h"
-#include "src/d8-debug.h"
-#include "src/debug.h"
-#include "src/api.h"
 
 
 namespace v8 {
diff --git a/src/d8.cc b/src/d8.cc
index 661307f..9db7744 100644
--- a/src/d8.cc
+++ b/src/d8.cc
@@ -29,29 +29,40 @@
 #include "include/v8-testing.h"
 #endif  // V8_SHARED
 
+#if !defined(V8_SHARED) && defined(ENABLE_GDB_JIT_INTERFACE)
+#include "src/gdb-jit.h"
+#endif
+
 #ifdef ENABLE_VTUNE_JIT_INTERFACE
 #include "src/third_party/vtune/v8-vtune.h"
 #endif
 
 #include "src/d8.h"
 
+#include "include/libplatform/libplatform.h"
 #ifndef V8_SHARED
 #include "src/api.h"
-#include "src/checks.h"
-#include "src/cpu.h"
+#include "src/base/cpu.h"
+#include "src/base/logging.h"
+#include "src/base/platform/platform.h"
+#include "src/base/sys-info.h"
 #include "src/d8-debug.h"
 #include "src/debug.h"
 #include "src/natives.h"
-#include "src/platform.h"
 #include "src/v8.h"
 #endif  // !V8_SHARED
 
 #if !defined(_WIN32) && !defined(_WIN64)
 #include <unistd.h>  // NOLINT
-#endif
+#else
+#include <windows.h>  // NOLINT
+#if defined(_MSC_VER)
+#include <crtdbg.h>  // NOLINT
+#endif               // defined(_MSC_VER)
+#endif               // !defined(_WIN32) && !defined(_WIN64)
 
-#ifndef ASSERT
-#define ASSERT(condition) assert(condition)
+#ifndef DCHECK
+#define DCHECK(condition) assert(condition)
 #endif
 
 namespace v8 {
@@ -134,11 +145,12 @@
 
 #ifndef V8_SHARED
 CounterMap* Shell::counter_map_;
-i::OS::MemoryMappedFile* Shell::counters_file_ = NULL;
+base::OS::MemoryMappedFile* Shell::counters_file_ = NULL;
 CounterCollection Shell::local_counters_;
 CounterCollection* Shell::counters_ = &local_counters_;
-i::Mutex Shell::context_mutex_;
-const i::TimeTicks Shell::kInitialTicks = i::TimeTicks::HighResolutionNow();
+base::Mutex Shell::context_mutex_;
+const base::TimeTicks Shell::kInitialTicks =
+    base::TimeTicks::HighResolutionNow();
 Persistent<Context> Shell::utility_context_;
 #endif  // !V8_SHARED
 
@@ -164,6 +176,36 @@
 }
 
 
+// Compile a string within the current v8 context.
+Local<UnboundScript> Shell::CompileString(
+    Isolate* isolate, Local<String> source, Local<Value> name,
+    v8::ScriptCompiler::CompileOptions compile_options) {
+  ScriptOrigin origin(name);
+  ScriptCompiler::Source script_source(source, origin);
+  Local<UnboundScript> script =
+      ScriptCompiler::CompileUnbound(isolate, &script_source, compile_options);
+
+  // Was caching requested & successful? Then compile again, now with cache.
+  if (script_source.GetCachedData()) {
+    if (compile_options == ScriptCompiler::kProduceCodeCache) {
+      compile_options = ScriptCompiler::kConsumeCodeCache;
+    } else if (compile_options == ScriptCompiler::kProduceParserCache) {
+      compile_options = ScriptCompiler::kConsumeParserCache;
+    } else {
+      DCHECK(false);  // A new compile option?
+    }
+    ScriptCompiler::Source cached_source(
+        source, origin, new v8::ScriptCompiler::CachedData(
+                            script_source.GetCachedData()->data,
+                            script_source.GetCachedData()->length,
+                            v8::ScriptCompiler::CachedData::BufferNotOwned));
+    script = ScriptCompiler::CompileUnbound(isolate, &cached_source,
+                                            compile_options);
+  }
+  return script;
+}
+
+
 // Executes a string within the current v8 context.
 bool Shell::ExecuteString(Isolate* isolate,
                           Handle<String> source,
@@ -182,10 +224,9 @@
     // When debugging make exceptions appear to be uncaught.
     try_catch.SetVerbose(true);
   }
-  ScriptOrigin origin(name);
-  ScriptCompiler::Source script_source(source, origin);
+
   Handle<UnboundScript> script =
-      ScriptCompiler::CompileUnbound(isolate, &script_source);
+      Shell::CompileString(isolate, source, name, options.compile_options);
   if (script.IsEmpty()) {
     // Print errors that happened during compilation.
     if (report_exceptions && !FLAG_debugger)
@@ -200,13 +241,13 @@
     realm->Exit();
     data->realm_current_ = data->realm_switch_;
     if (result.IsEmpty()) {
-      ASSERT(try_catch.HasCaught());
+      DCHECK(try_catch.HasCaught());
       // Print errors that happened during execution.
       if (report_exceptions && !FLAG_debugger)
         ReportException(isolate, &try_catch);
       return false;
     } else {
-      ASSERT(!try_catch.HasCaught());
+      DCHECK(!try_catch.HasCaught());
       if (print_result) {
 #if !defined(V8_SHARED)
         if (options.test_shell) {
@@ -290,9 +331,18 @@
 
 #ifndef V8_SHARED
 // performance.now() returns a time stamp as double, measured in milliseconds.
+// When FLAG_verify_predictable mode is enabled it returns current value
+// of Heap::allocations_count().
 void Shell::PerformanceNow(const v8::FunctionCallbackInfo<v8::Value>& args) {
-  i::TimeDelta delta = i::TimeTicks::HighResolutionNow() - kInitialTicks;
-  args.GetReturnValue().Set(delta.InMillisecondsF());
+  if (i::FLAG_verify_predictable) {
+    Isolate* v8_isolate = args.GetIsolate();
+    i::Heap* heap = reinterpret_cast<i::Isolate*>(v8_isolate)->heap();
+    args.GetReturnValue().Set(heap->synthetic_time());
+  } else {
+    base::TimeDelta delta =
+        base::TimeTicks::HighResolutionNow() - kInitialTicks;
+    args.GetReturnValue().Set(delta.InMillisecondsF());
+  }
 }
 #endif  // !V8_SHARED
 
@@ -552,7 +602,7 @@
     printf("%s\n", exception_string);
   } else {
     // Print (filename):(line number): (message).
-    v8::String::Utf8Value filename(message->GetScriptResourceName());
+    v8::String::Utf8Value filename(message->GetScriptOrigin().ResourceName());
     const char* filename_string = ToCString(filename);
     int linenum = message->GetLineNumber();
     printf("%s:%i: %s\n", filename_string, linenum, exception_string);
@@ -666,7 +716,7 @@
 
 
 void Shell::MapCounters(v8::Isolate* isolate, const char* name) {
-  counters_file_ = i::OS::MemoryMappedFile::create(
+  counters_file_ = base::OS::MemoryMappedFile::create(
       name, sizeof(CounterCollection), &local_counters_);
   void* memory = (counters_file_ == NULL) ?
       NULL : counters_file_->memory();
@@ -702,7 +752,7 @@
       counter->Bind(name, is_histogram);
     }
   } else {
-    ASSERT(counter->is_histogram() == is_histogram);
+    DCHECK(counter->is_histogram() == is_histogram);
   }
   return counter;
 }
@@ -798,7 +848,7 @@
                              int* raw_data_size,
                              const char* compressed_data,
                              int compressed_data_size) {
-    ASSERT_EQ(v8::StartupData::kBZip2,
+    DCHECK_EQ(v8::StartupData::kBZip2,
               v8::V8::GetCompressedStartupDataAlgorithm());
     unsigned int decompressed_size = *raw_data_size;
     int result =
@@ -863,11 +913,9 @@
                        performance_template);
 #endif  // !V8_SHARED
 
-#if !defined(V8_SHARED) && !defined(_WIN32) && !defined(_WIN64)
   Handle<ObjectTemplate> os_templ = ObjectTemplate::New(isolate);
   AddOSMethods(isolate, os_templ);
   global_template->Set(String::NewFromUtf8(isolate, "os"), os_templ);
-#endif  // !V8_SHARED && !_WIN32 && !_WIN64
 
   return global_template;
 }
@@ -889,9 +937,9 @@
   if (i::StrLength(i::FLAG_map_counters) != 0)
     MapCounters(isolate, i::FLAG_map_counters);
   if (i::FLAG_dump_counters || i::FLAG_track_gc_object_stats) {
-    V8::SetCounterFunction(LookupCounter);
-    V8::SetCreateHistogramFunction(CreateHistogram);
-    V8::SetAddHistogramSampleFunction(AddHistogramSample);
+    isolate->SetCounterFunction(LookupCounter);
+    isolate->SetCreateHistogramFunction(CreateHistogram);
+    isolate->SetAddHistogramSampleFunction(AddHistogramSample);
   }
 #endif  // !V8_SHARED
 }
@@ -911,13 +959,13 @@
 Local<Context> Shell::CreateEvaluationContext(Isolate* isolate) {
 #ifndef V8_SHARED
   // This needs to be a critical section since this is not thread-safe
-  i::LockGuard<i::Mutex> lock_guard(&context_mutex_);
+  base::LockGuard<base::Mutex> lock_guard(&context_mutex_);
 #endif  // !V8_SHARED
   // Initialize the global objects
   Handle<ObjectTemplate> global_template = CreateGlobalTemplate(isolate);
   EscapableHandleScope handle_scope(isolate);
   Local<Context> context = Context::New(isolate, NULL, global_template);
-  ASSERT(!context.IsEmpty());
+  DCHECK(!context.IsEmpty());
   Context::Scope scope(context);
 
 #ifndef V8_SHARED
@@ -1064,7 +1112,7 @@
 
 
 void Shell::ReadBuffer(const v8::FunctionCallbackInfo<v8::Value>& args) {
-  ASSERT(sizeof(char) == sizeof(uint8_t));  // NOLINT
+  DCHECK(sizeof(char) == sizeof(uint8_t));  // NOLINT
   String::Utf8Value filename(args[0]);
   int length;
   if (*filename == NULL) {
@@ -1181,12 +1229,12 @@
 
 
 #ifndef V8_SHARED
-i::Thread::Options SourceGroup::GetThreadOptions() {
+base::Thread::Options SourceGroup::GetThreadOptions() {
   // On some systems (OSX 10.6) the stack size default is 0.5Mb or less
   // which is not enough to parse the big literal expressions used in tests.
   // The stack size should be at least StackGuard::kLimitSize + some
   // OS-specific padding for thread startup code.  2Mbytes seems to be enough.
-  return i::Thread::Options("IsolateThread", 2 * MB);
+  return base::Thread::Options("IsolateThread", 2 * MB);
 }
 
 
@@ -1208,14 +1256,14 @@
       }
       if (Shell::options.send_idle_notification) {
         const int kLongIdlePauseInMs = 1000;
-        V8::ContextDisposedNotification();
-        V8::IdleNotification(kLongIdlePauseInMs);
+        isolate->ContextDisposedNotification();
+        isolate->IdleNotification(kLongIdlePauseInMs);
       }
       if (Shell::options.invoke_weak_callbacks) {
         // By sending a low memory notifications, we will try hard to collect
         // all garbage and will therefore also invoke all weak callbacks of
         // actually unreachable persistent handles.
-        V8::LowMemoryNotification();
+        isolate->LowMemoryNotification();
       }
     }
     done_semaphore_.Signal();
@@ -1318,6 +1366,28 @@
       printf("Javascript debugger not included\n");
       return false;
 #endif  // V8_SHARED
+#ifdef V8_USE_EXTERNAL_STARTUP_DATA
+    } else if (strncmp(argv[i], "--natives_blob=", 15) == 0) {
+      options.natives_blob = argv[i] + 15;
+      argv[i] = NULL;
+    } else if (strncmp(argv[i], "--snapshot_blob=", 16) == 0) {
+      options.snapshot_blob = argv[i] + 16;
+      argv[i] = NULL;
+#endif  // V8_USE_EXTERNAL_STARTUP_DATA
+    } else if (strcmp(argv[i], "--cache") == 0 ||
+               strncmp(argv[i], "--cache=", 8) == 0) {
+      const char* value = argv[i] + 7;
+      if (!*value || strncmp(value, "=code", 6) == 0) {
+        options.compile_options = v8::ScriptCompiler::kProduceCodeCache;
+      } else if (strncmp(value, "=parse", 7) == 0) {
+        options.compile_options = v8::ScriptCompiler::kProduceParserCache;
+      } else if (strncmp(value, "=none", 6) == 0) {
+        options.compile_options = v8::ScriptCompiler::kNoCompileOptions;
+      } else {
+        printf("Unknown option to --cache.\n");
+        return false;
+      }
+      argv[i] = NULL;
     }
   }
 
@@ -1375,14 +1445,14 @@
   }
   if (options.send_idle_notification) {
     const int kLongIdlePauseInMs = 1000;
-    V8::ContextDisposedNotification();
-    V8::IdleNotification(kLongIdlePauseInMs);
+    isolate->ContextDisposedNotification();
+    isolate->IdleNotification(kLongIdlePauseInMs);
   }
   if (options.invoke_weak_callbacks) {
     // By sending a low memory notifications, we will try hard to collect all
     // garbage and will therefore also invoke all weak callbacks of actually
     // unreachable persistent handles.
-    V8::LowMemoryNotification();
+    isolate->LowMemoryNotification();
   }
 
 #ifndef V8_SHARED
@@ -1464,21 +1534,95 @@
 
 class MockArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
  public:
-  virtual void* Allocate(size_t) V8_OVERRIDE {
+  virtual void* Allocate(size_t) OVERRIDE {
     return malloc(0);
   }
-  virtual void* AllocateUninitialized(size_t length) V8_OVERRIDE {
+  virtual void* AllocateUninitialized(size_t length) OVERRIDE {
     return malloc(0);
   }
-  virtual void Free(void* p, size_t) V8_OVERRIDE {
+  virtual void Free(void* p, size_t) OVERRIDE {
     free(p);
   }
 };
 
 
+#ifdef V8_USE_EXTERNAL_STARTUP_DATA
+class StartupDataHandler {
+ public:
+  StartupDataHandler(const char* natives_blob,
+                     const char* snapshot_blob) {
+    Load(natives_blob, &natives_, v8::V8::SetNativesDataBlob);
+    Load(snapshot_blob, &snapshot_, v8::V8::SetSnapshotDataBlob);
+  }
+
+  ~StartupDataHandler() {
+    delete[] natives_.data;
+    delete[] snapshot_.data;
+  }
+
+ private:
+  void Load(const char* blob_file,
+            v8::StartupData* startup_data,
+            void (*setter_fn)(v8::StartupData*)) {
+    startup_data->data = NULL;
+    startup_data->compressed_size = 0;
+    startup_data->raw_size = 0;
+
+    if (!blob_file)
+      return;
+
+    FILE* file = fopen(blob_file, "rb");
+    if (!file)
+      return;
+
+    fseek(file, 0, SEEK_END);
+    startup_data->raw_size = ftell(file);
+    rewind(file);
+
+    startup_data->data = new char[startup_data->raw_size];
+    startup_data->compressed_size = fread(
+        const_cast<char*>(startup_data->data), 1, startup_data->raw_size,
+        file);
+    fclose(file);
+
+    if (startup_data->raw_size == startup_data->compressed_size)
+      (*setter_fn)(startup_data);
+  }
+
+  v8::StartupData natives_;
+  v8::StartupData snapshot_;
+
+  // Disallow copy & assign.
+  StartupDataHandler(const StartupDataHandler& other);
+  void operator=(const StartupDataHandler& other);
+};
+#endif  // V8_USE_EXTERNAL_STARTUP_DATA
+
+
 int Shell::Main(int argc, char* argv[]) {
+#if (defined(_WIN32) || defined(_WIN64))
+  UINT new_flags =
+      SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX | SEM_NOOPENFILEERRORBOX;
+  UINT existing_flags = SetErrorMode(new_flags);
+  SetErrorMode(existing_flags | new_flags);
+#if defined(_MSC_VER)
+  _CrtSetReportMode(_CRT_WARN, _CRTDBG_MODE_DEBUG | _CRTDBG_MODE_FILE);
+  _CrtSetReportFile(_CRT_WARN, _CRTDBG_FILE_STDERR);
+  _CrtSetReportMode(_CRT_ASSERT, _CRTDBG_MODE_DEBUG | _CRTDBG_MODE_FILE);
+  _CrtSetReportFile(_CRT_ASSERT, _CRTDBG_FILE_STDERR);
+  _CrtSetReportMode(_CRT_ERROR, _CRTDBG_MODE_DEBUG | _CRTDBG_MODE_FILE);
+  _CrtSetReportFile(_CRT_ERROR, _CRTDBG_FILE_STDERR);
+  _set_error_mode(_OUT_TO_STDERR);
+#endif  // defined(_MSC_VER)
+#endif  // defined(_WIN32) || defined(_WIN64)
   if (!SetOptions(argc, argv)) return 1;
   v8::V8::InitializeICU(options.icu_data_file);
+  v8::Platform* platform = v8::platform::CreateDefaultPlatform();
+  v8::V8::InitializePlatform(platform);
+  v8::V8::Initialize();
+#ifdef V8_USE_EXTERNAL_STARTUP_DATA
+  StartupDataHandler startup_data(options.natives_blob, options.snapshot_blob);
+#endif
   SetFlagsFromString("--trace-hydrogen-file=hydrogen.cfg");
   SetFlagsFromString("--redirect-code-traces-to=code.asm");
   ShellArrayBufferAllocator array_buffer_allocator;
@@ -1489,21 +1633,26 @@
     v8::V8::SetArrayBufferAllocator(&array_buffer_allocator);
   }
   int result = 0;
-  Isolate* isolate = Isolate::New();
-#ifndef V8_SHARED
-  v8::ResourceConstraints constraints;
-  constraints.ConfigureDefaults(i::OS::TotalPhysicalMemory(),
-                                i::OS::MaxVirtualMemory(),
-                                i::OS::NumberOfProcessorsOnline());
-  v8::SetResourceConstraints(isolate, &constraints);
+  Isolate::CreateParams create_params;
+#if !defined(V8_SHARED) && defined(ENABLE_GDB_JIT_INTERFACE)
+  if (i::FLAG_gdbjit) {
+    create_params.code_event_handler = i::GDBJITInterface::EventHandler;
+  }
 #endif
+#ifdef ENABLE_VTUNE_JIT_INTERFACE
+  vTune::InitializeVtuneForV8(create_params);
+#endif
+#ifndef V8_SHARED
+  create_params.constraints.ConfigureDefaults(
+      base::SysInfo::AmountOfPhysicalMemory(),
+      base::SysInfo::AmountOfVirtualMemory(),
+      base::SysInfo::NumberOfProcessors());
+#endif
+  Isolate* isolate = Isolate::New(create_params);
   DumbLineEditor dumb_line_editor(isolate);
   {
     Isolate::Scope scope(isolate);
     Initialize(isolate);
-#ifdef ENABLE_VTUNE_JIT_INTERFACE
-    vTune::InitializeVtuneForV8();
-#endif
     PerIsolateData data(isolate);
     InitializeDebugger(isolate);
 
@@ -1553,6 +1702,8 @@
   }
   isolate->Dispose();
   V8::Dispose();
+  V8::ShutdownPlatform();
+  delete platform;
 
   OnExit();
 
diff --git a/src/d8.gyp b/src/d8.gyp
index b353eb0..a084979 100644
--- a/src/d8.gyp
+++ b/src/d8.gyp
@@ -41,6 +41,7 @@
       'type': 'executable',
       'dependencies': [
         '../tools/gyp/v8.gyp:v8',
+        '../tools/gyp/v8.gyp:v8_libplatform',
       ],
       # Generated source files need this explicitly:
       'include_dirs+': [
@@ -57,6 +58,14 @@
           'libraries': [ '-lreadline', ],
           'sources': [ 'd8-readline.cc' ],
         }],
+        ['(OS=="linux" or OS=="mac" or OS=="freebsd" or OS=="netbsd" \
+           or OS=="openbsd" or OS=="solaris" or OS=="android" \
+           or OS=="qnx")', {
+             'sources': [ 'd8-posix.cc', ]
+           }],
+        [ 'OS=="win"', {
+          'sources': [ 'd8-windows.cc', ]
+        }],
         [ 'component!="shared_library"', {
           'sources': [ 'd8-debug.cc', '<(SHARED_INTERMEDIATE_DIR)/d8-js.cc', ],
           'conditions': [
@@ -69,14 +78,6 @@
                 'd8_js2c',
               ],
             }],
-            ['(OS=="linux" or OS=="mac" or OS=="freebsd" or OS=="netbsd" \
-               or OS=="openbsd" or OS=="solaris" or OS=="android" \
-               or OS=="qnx")', {
-              'sources': [ 'd8-posix.cc', ]
-            }],
-            [ 'OS=="win"', {
-              'sources': [ 'd8-windows.cc', ]
-            }],
           ],
         }],
         ['v8_enable_vtunejit==1', {
diff --git a/src/d8.h b/src/d8.h
index 143eabb..44ee09a 100644
--- a/src/d8.h
+++ b/src/d8.h
@@ -12,6 +12,7 @@
 #include "src/v8.h"
 #else
 #include "include/v8.h"
+#include "src/base/compiler-specific.h"
 #endif  // !V8_SHARED
 
 namespace v8 {
@@ -69,7 +70,7 @@
         const_cast<char*>(name),
         Hash(name),
         true);
-    ASSERT(answer != NULL);
+    DCHECK(answer != NULL);
     answer->value = value;
   }
   class Iterator {
@@ -141,10 +142,10 @@
   void WaitForThread();
 
  private:
-  class IsolateThread : public i::Thread {
+  class IsolateThread : public base::Thread {
    public:
     explicit IsolateThread(SourceGroup* group)
-        : i::Thread(GetThreadOptions()), group_(group) {}
+        : base::Thread(GetThreadOptions()), group_(group) {}
 
     virtual void Run() {
       group_->ExecuteInThread();
@@ -154,12 +155,12 @@
     SourceGroup* group_;
   };
 
-  static i::Thread::Options GetThreadOptions();
+  static base::Thread::Options GetThreadOptions();
   void ExecuteInThread();
 
-  i::Semaphore next_semaphore_;
-  i::Semaphore done_semaphore_;
-  i::Thread* thread_;
+  base::Semaphore next_semaphore_;
+  base::Semaphore done_semaphore_;
+  base::Thread* thread_;
 #endif  // !V8_SHARED
 
   void ExitShell(int exit_code);
@@ -171,7 +172,7 @@
 };
 
 
-class BinaryResource : public v8::String::ExternalAsciiStringResource {
+class BinaryResource : public v8::String::ExternalOneByteStringResource {
  public:
   BinaryResource(const char* string, int length)
       : data_(string),
@@ -194,21 +195,24 @@
 
 class ShellOptions {
  public:
-  ShellOptions() :
-     script_executed(false),
-     last_run(true),
-     send_idle_notification(false),
-     invoke_weak_callbacks(false),
-     stress_opt(false),
-     stress_deopt(false),
-     interactive_shell(false),
-     test_shell(false),
-     dump_heap_constants(false),
-     expected_to_throw(false),
-     mock_arraybuffer_allocator(false),
-     num_isolates(1),
-     isolate_sources(NULL),
-     icu_data_file(NULL) { }
+  ShellOptions()
+      : script_executed(false),
+        last_run(true),
+        send_idle_notification(false),
+        invoke_weak_callbacks(false),
+        stress_opt(false),
+        stress_deopt(false),
+        interactive_shell(false),
+        test_shell(false),
+        dump_heap_constants(false),
+        expected_to_throw(false),
+        mock_arraybuffer_allocator(false),
+        num_isolates(1),
+        compile_options(v8::ScriptCompiler::kNoCompileOptions),
+        isolate_sources(NULL),
+        icu_data_file(NULL),
+        natives_blob(NULL),
+        snapshot_blob(NULL) {}
 
   ~ShellOptions() {
     delete[] isolate_sources;
@@ -230,8 +234,11 @@
   bool expected_to_throw;
   bool mock_arraybuffer_allocator;
   int num_isolates;
+  v8::ScriptCompiler::CompileOptions compile_options;
   SourceGroup* isolate_sources;
   const char* icu_data_file;
+  const char* natives_blob;
+  const char* snapshot_blob;
 };
 
 #ifdef V8_SHARED
@@ -241,6 +248,9 @@
 #endif  // V8_SHARED
 
  public:
+  static Local<UnboundScript> CompileString(
+      Isolate* isolate, Local<String> source, Local<Value> name,
+      v8::ScriptCompiler::CompileOptions compile_options);
   static bool ExecuteString(Isolate* isolate,
                             Handle<String> source,
                             Handle<Value> name,
@@ -363,9 +373,9 @@
   // don't want to store the stats in a memory-mapped file
   static CounterCollection local_counters_;
   static CounterCollection* counters_;
-  static i::OS::MemoryMappedFile* counters_file_;
-  static i::Mutex context_mutex_;
-  static const i::TimeTicks kInitialTicks;
+  static base::OS::MemoryMappedFile* counters_file_;
+  static base::Mutex context_mutex_;
+  static const base::TimeTicks kInitialTicks;
 
   static Counter* GetCounter(const char* name, bool is_histogram);
   static void InstallUtilityScript(Isolate* isolate);
diff --git a/src/d8.js b/src/d8.js
index 3f7832d..2b927af 100644
--- a/src/d8.js
+++ b/src/d8.js
@@ -208,10 +208,6 @@
       details.text = result;
       break;
 
-    case 'scriptCollected':
-      details.text = result;
-      break;
-
     default:
       details.text = 'Unknown debug event ' + response.event();
   }
@@ -1984,7 +1980,7 @@
     case "string":
       return "\"" + x.toString() + "\"";
     case "symbol":
-      return "Symbol(" + (x.name ? Stringify(x.name, depth) : "") + ")"
+      return x.toString();
     case "object":
       if (IS_NULL(x)) return "null";
       if (x.constructor && x.constructor.name === "Array") {
@@ -2000,18 +1996,22 @@
         if (string && string !== "[object Object]") return string;
       } catch(e) {}
       var props = [];
-      for (var name in x) {
+      var names = Object.getOwnPropertyNames(x);
+      names = names.concat(Object.getOwnPropertySymbols(x));
+      for (var i in names) {
+        var name = names[i];
         var desc = Object.getOwnPropertyDescriptor(x, name);
         if (IS_UNDEFINED(desc)) continue;
+        if (IS_SYMBOL(name)) name = "[" + Stringify(name) + "]";
         if ("value" in desc) {
           props.push(name + ": " + Stringify(desc.value, depth - 1));
         }
-        if ("get" in desc) {
-          var getter = desc.get.toString();
+        if (desc.get) {
+          var getter = Stringify(desc.get);
           props.push("get " + name + getter.slice(getter.indexOf('(')));
         }
-        if ("set" in desc) {
-          var setter = desc.set.toString();
+        if (desc.set) {
+          var setter = Stringify(desc.set);
           props.push("set " + name + setter.slice(setter.indexOf('(')));
         }
       }
diff --git a/src/data-flow.cc b/src/data-flow.cc
index e591778..bd92ea0 100644
--- a/src/data-flow.cc
+++ b/src/data-flow.cc
@@ -2,9 +2,9 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/v8.h"
-
 #include "src/data-flow.h"
+
+#include "src/base/bits.h"
 #include "src/scopes.h"
 
 namespace v8 {
@@ -40,4 +40,15 @@
   current_value_ = val >> 1;
 }
 
-} }  // namespace v8::internal
+
+int BitVector::Count() const {
+  int count = 0;
+  for (int i = 0; i < data_length_; i++) {
+    int data = data_[i];
+    if (data != 0) count += base::bits::CountPopulation32(data);
+  }
+  return count;
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/data-flow.h b/src/data-flow.h
index 5c214ae..bfd238d 100644
--- a/src/data-flow.h
+++ b/src/data-flow.h
@@ -25,7 +25,7 @@
           current_index_(0),
           current_value_(target->data_[0]),
           current_(-1) {
-      ASSERT(target->data_length_ > 0);
+      DCHECK(target->data_length_ > 0);
       Advance();
     }
     ~Iterator() { }
@@ -34,7 +34,7 @@
     void Advance();
 
     int Current() const {
-      ASSERT(!Done());
+      DCHECK(!Done());
       return current_;
     }
 
@@ -66,7 +66,7 @@
       : length_(length),
         data_length_(SizeFor(length)),
         data_(zone->NewArray<uint32_t>(data_length_)) {
-    ASSERT(length > 0);
+    DCHECK(length > 0);
     Clear();
   }
 
@@ -87,7 +87,7 @@
   }
 
   void CopyFrom(const BitVector& other) {
-    ASSERT(other.length() <= length());
+    DCHECK(other.length() <= length());
     for (int i = 0; i < other.data_length_; i++) {
       data_[i] = other.data_[i];
     }
@@ -97,30 +97,30 @@
   }
 
   bool Contains(int i) const {
-    ASSERT(i >= 0 && i < length());
+    DCHECK(i >= 0 && i < length());
     uint32_t block = data_[i / 32];
     return (block & (1U << (i % 32))) != 0;
   }
 
   void Add(int i) {
-    ASSERT(i >= 0 && i < length());
+    DCHECK(i >= 0 && i < length());
     data_[i / 32] |= (1U << (i % 32));
   }
 
   void Remove(int i) {
-    ASSERT(i >= 0 && i < length());
+    DCHECK(i >= 0 && i < length());
     data_[i / 32] &= ~(1U << (i % 32));
   }
 
   void Union(const BitVector& other) {
-    ASSERT(other.length() == length());
+    DCHECK(other.length() == length());
     for (int i = 0; i < data_length_; i++) {
       data_[i] |= other.data_[i];
     }
   }
 
   bool UnionIsChanged(const BitVector& other) {
-    ASSERT(other.length() == length());
+    DCHECK(other.length() == length());
     bool changed = false;
     for (int i = 0; i < data_length_; i++) {
       uint32_t old_data = data_[i];
@@ -131,14 +131,25 @@
   }
 
   void Intersect(const BitVector& other) {
-    ASSERT(other.length() == length());
+    DCHECK(other.length() == length());
     for (int i = 0; i < data_length_; i++) {
       data_[i] &= other.data_[i];
     }
   }
 
+  bool IntersectIsChanged(const BitVector& other) {
+    DCHECK(other.length() == length());
+    bool changed = false;
+    for (int i = 0; i < data_length_; i++) {
+      uint32_t old_data = data_[i];
+      data_[i] &= other.data_[i];
+      if (data_[i] != old_data) changed = true;
+    }
+    return changed;
+  }
+
   void Subtract(const BitVector& other) {
-    ASSERT(other.length() == length());
+    DCHECK(other.length() == length());
     for (int i = 0; i < data_length_; i++) {
       data_[i] &= ~other.data_[i];
     }
@@ -164,6 +175,8 @@
     return true;
   }
 
+  int Count() const;
+
   int length() const { return length_; }
 
 #ifdef DEBUG
@@ -176,6 +189,7 @@
   uint32_t* data_;
 };
 
+
 class GrowableBitVector BASE_EMBEDDED {
  public:
   class Iterator BASE_EMBEDDED {
@@ -232,8 +246,7 @@
   BitVector* bits_;
 };
 
-
-} }  // namespace v8::internal
-
+}  // namespace internal
+}  // namespace v8
 
 #endif  // V8_DATAFLOW_H_
diff --git a/src/date.cc b/src/date.cc
index 3425ce2..6b95cb7 100644
--- a/src/date.cc
+++ b/src/date.cc
@@ -31,7 +31,7 @@
   } else {
     stamp_ = Smi::FromInt(stamp_->value() + 1);
   }
-  ASSERT(stamp_ != Smi::FromInt(kInvalidStamp));
+  DCHECK(stamp_ != Smi::FromInt(kInvalidStamp));
   for (int i = 0; i < kDSTSize; ++i) {
     ClearSegment(&dst_[i]);
   }
@@ -40,7 +40,7 @@
   after_ = &dst_[1];
   local_offset_ms_ = kInvalidLocalOffsetInMs;
   ymd_valid_ = false;
-  OS::ClearTimezoneCache(tz_cache_);
+  base::OS::ClearTimezoneCache(tz_cache_);
 }
 
 
@@ -73,7 +73,7 @@
   *year = 400 * (days / kDaysIn400Years) - kYearsOffset;
   days %= kDaysIn400Years;
 
-  ASSERT(DaysFromYearMonth(*year, 0) + days == save_days);
+  DCHECK(DaysFromYearMonth(*year, 0) + days == save_days);
 
   days--;
   int yd1 = days / kDaysIn100Years;
@@ -93,12 +93,12 @@
 
   bool is_leap = (!yd1 || yd2) && !yd3;
 
-  ASSERT(days >= -1);
-  ASSERT(is_leap || (days >= 0));
-  ASSERT((days < 365) || (is_leap && (days < 366)));
-  ASSERT(is_leap == ((*year % 4 == 0) && (*year % 100 || (*year % 400 == 0))));
-  ASSERT(is_leap || ((DaysFromYearMonth(*year, 0) + days) == save_days));
-  ASSERT(!is_leap || ((DaysFromYearMonth(*year, 0) + days + 1) == save_days));
+  DCHECK(days >= -1);
+  DCHECK(is_leap || (days >= 0));
+  DCHECK((days < 365) || (is_leap && (days < 366)));
+  DCHECK(is_leap == ((*year % 4 == 0) && (*year % 100 || (*year % 400 == 0))));
+  DCHECK(is_leap || ((DaysFromYearMonth(*year, 0) + days) == save_days));
+  DCHECK(!is_leap || ((DaysFromYearMonth(*year, 0) + days + 1) == save_days));
 
   days += is_leap;
 
@@ -124,7 +124,7 @@
       *day = days - 31 + 1;
     }
   }
-  ASSERT(DaysFromYearMonth(*year, *month) + *day - 1 == save_days);
+  DCHECK(DaysFromYearMonth(*year, *month) + *day - 1 == save_days);
   ymd_valid_ = true;
   ymd_year_ = *year;
   ymd_month_ = *month;
@@ -146,8 +146,8 @@
     month += 12;
   }
 
-  ASSERT(month >= 0);
-  ASSERT(month < 12);
+  DCHECK(month >= 0);
+  DCHECK(month < 12);
 
   // year_delta is an arbitrary number such that:
   // a) year_delta = -1 (mod 400)
@@ -222,8 +222,8 @@
 
   ProbeDST(time_sec);
 
-  ASSERT(InvalidSegment(before_) || before_->start_sec <= time_sec);
-  ASSERT(InvalidSegment(after_) || time_sec < after_->start_sec);
+  DCHECK(InvalidSegment(before_) || before_->start_sec <= time_sec);
+  DCHECK(InvalidSegment(after_) || time_sec < after_->start_sec);
 
   if (InvalidSegment(before_)) {
     // Cache miss.
@@ -264,7 +264,7 @@
     int new_offset_ms = GetDaylightSavingsOffsetFromOS(new_after_start_sec);
     ExtendTheAfterSegment(new_after_start_sec, new_offset_ms);
   } else {
-    ASSERT(!InvalidSegment(after_));
+    DCHECK(!InvalidSegment(after_));
     // Update the usage counter of after_ since it is going to be used.
     after_->last_used = ++dst_usage_counter_;
   }
@@ -291,7 +291,7 @@
         return offset_ms;
       }
     } else {
-      ASSERT(after_->offset_ms == offset_ms);
+      DCHECK(after_->offset_ms == offset_ms);
       after_->start_sec = middle_sec;
       if (time_sec >= after_->start_sec) {
         // This swap helps the optimistic fast check in subsequent invocations.
@@ -310,7 +310,7 @@
 void DateCache::ProbeDST(int time_sec) {
   DST* before = NULL;
   DST* after = NULL;
-  ASSERT(before_ != after_);
+  DCHECK(before_ != after_);
 
   for (int i = 0; i < kDSTSize; ++i) {
     if (dst_[i].start_sec <= time_sec) {
@@ -334,12 +334,12 @@
             ? after_ : LeastRecentlyUsedDST(before);
   }
 
-  ASSERT(before != NULL);
-  ASSERT(after != NULL);
-  ASSERT(before != after);
-  ASSERT(InvalidSegment(before) || before->start_sec <= time_sec);
-  ASSERT(InvalidSegment(after) || time_sec < after->start_sec);
-  ASSERT(InvalidSegment(before) || InvalidSegment(after) ||
+  DCHECK(before != NULL);
+  DCHECK(after != NULL);
+  DCHECK(before != after);
+  DCHECK(InvalidSegment(before) || before->start_sec <= time_sec);
+  DCHECK(InvalidSegment(after) || time_sec < after->start_sec);
+  DCHECK(InvalidSegment(before) || InvalidSegment(after) ||
          before->end_sec < after->start_sec);
 
   before_ = before;
diff --git a/src/date.h b/src/date.h
index 89ae641..813d312 100644
--- a/src/date.h
+++ b/src/date.h
@@ -6,8 +6,8 @@
 #define V8_DATE_H_
 
 #include "src/allocation.h"
+#include "src/base/platform/platform.h"
 #include "src/globals.h"
-#include "src/platform.h"
 
 
 namespace v8 {
@@ -39,12 +39,12 @@
   // It is an invariant of DateCache that cache stamp is non-negative.
   static const int kInvalidStamp = -1;
 
-  DateCache() : stamp_(0), tz_cache_(OS::CreateTimezoneCache()) {
+  DateCache() : stamp_(0), tz_cache_(base::OS::CreateTimezoneCache()) {
     ResetDateCache();
   }
 
   virtual ~DateCache() {
-    OS::DisposeTimezoneCache(tz_cache_);
+    base::OS::DisposeTimezoneCache(tz_cache_);
     tz_cache_ = NULL;
   }
 
@@ -93,7 +93,7 @@
     if (time_ms < 0 || time_ms > kMaxEpochTimeInMs) {
       time_ms = EquivalentTime(time_ms);
     }
-    return OS::LocalTimezone(static_cast<double>(time_ms), tz_cache_);
+    return base::OS::LocalTimezone(static_cast<double>(time_ms), tz_cache_);
   }
 
   // ECMA 262 - 15.9.5.26
@@ -103,14 +103,52 @@
   }
 
   // ECMA 262 - 15.9.1.9
+  // LocalTime(t) = t + LocalTZA + DaylightSavingTA(t)
   int64_t ToLocal(int64_t time_ms) {
     return time_ms + LocalOffsetInMs() + DaylightSavingsOffsetInMs(time_ms);
   }
 
   // ECMA 262 - 15.9.1.9
+  // UTC(t) = t - LocalTZA - DaylightSavingTA(t - LocalTZA)
   int64_t ToUTC(int64_t time_ms) {
+    // We need to compute UTC time that corresponds to the given local time.
+    // Literally following spec here leads to incorrect time computation at
+    // the points were we transition to and from DST.
+    //
+    // The following shows that using DST for (t - LocalTZA - hour) produces
+    // correct conversion.
+    //
+    // Consider transition to DST at local time L1.
+    // Let L0 = L1 - hour, L2 = L1 + hour,
+    //     U1 = UTC time that corresponds to L1,
+    //     U0 = U1 - hour.
+    // Transitioning to DST moves local clock one hour forward L1 => L2, so
+    // U0 = UTC time that corresponds to L0 = L0 - LocalTZA,
+    // U1 = UTC time that corresponds to L1 = L1 - LocalTZA,
+    // U1 = UTC time that corresponds to L2 = L2 - LocalTZA - hour.
+    // Note that DST(U0 - hour) = 0, DST(U0) = 0, DST(U1) = 1.
+    // U0 = L0 - LocalTZA - DST(L0 - LocalTZA - hour),
+    // U1 = L1 - LocalTZA - DST(L1 - LocalTZA - hour),
+    // U1 = L2 - LocalTZA - DST(L2 - LocalTZA - hour).
+    //
+    // Consider transition from DST at local time L1.
+    // Let L0 = L1 - hour,
+    //     U1 = UTC time that corresponds to L1,
+    //     U0 = U1 - hour, U2 = U1 + hour.
+    // Transitioning from DST moves local clock one hour back L1 => L0, so
+    // U0 = UTC time that corresponds to L0 (before transition)
+    //    = L0 - LocalTZA - hour.
+    // U1 = UTC time that corresponds to L0 (after transition)
+    //    = L0 - LocalTZA = L1 - LocalTZA - hour
+    // U2 = UTC time that corresponds to L1 = L1 - LocalTZA.
+    // Note that DST(U0) = 1, DST(U1) = 0, DST(U2) = 0.
+    // U0 = L0 - LocalTZA - DST(L0 - LocalTZA - hour) = L0 - LocalTZA - DST(U0).
+    // U2 = L1 - LocalTZA - DST(L1 - LocalTZA - hour) = L1 - LocalTZA - DST(U1).
+    // It is impossible to get U1 from local time.
+
+    const int kMsPerHour = 3600 * 1000;
     time_ms -= LocalOffsetInMs();
-    return time_ms - DaylightSavingsOffsetInMs(time_ms);
+    return time_ms - DaylightSavingsOffsetInMs(time_ms - kMsPerHour);
   }
 
 
@@ -162,12 +200,13 @@
   // These functions are virtual so that we can override them when testing.
   virtual int GetDaylightSavingsOffsetFromOS(int64_t time_sec) {
     double time_ms = static_cast<double>(time_sec * 1000);
-    return static_cast<int>(OS::DaylightSavingsOffset(time_ms, tz_cache_));
+    return static_cast<int>(
+        base::OS::DaylightSavingsOffset(time_ms, tz_cache_));
   }
 
   virtual int GetLocalOffsetFromOS() {
-    double offset = OS::LocalTimeOffset(tz_cache_);
-    ASSERT(offset < kInvalidLocalOffsetInMs);
+    double offset = base::OS::LocalTimeOffset(tz_cache_);
+    DCHECK(offset < kInvalidLocalOffsetInMs);
     return static_cast<int>(offset);
   }
 
@@ -234,7 +273,7 @@
   int ymd_month_;
   int ymd_day_;
 
-  TimezoneCache* tz_cache_;
+  base::TimezoneCache* tz_cache_;
 };
 
 } }   // namespace v8::internal
diff --git a/src/date.js b/src/date.js
index c58903c..87c87bf 100644
--- a/src/date.js
+++ b/src/date.js
@@ -763,7 +763,7 @@
   ));
 
   // Set up non-enumerable constructor property of the Date prototype object.
-  %SetProperty($Date.prototype, "constructor", $Date, DONT_ENUM);
+  %AddNamedProperty($Date.prototype, "constructor", $Date, DONT_ENUM);
 
   // Set up non-enumerable functions of the Date prototype object and
   // set their names.
diff --git a/src/dateparser-inl.h b/src/dateparser-inl.h
index c16812b..f7360f8 100644
--- a/src/dateparser-inl.h
+++ b/src/dateparser-inl.h
@@ -14,7 +14,7 @@
 bool DateParser::Parse(Vector<Char> str,
                        FixedArray* out,
                        UnicodeCache* unicode_cache) {
-  ASSERT(out->length() >= OUTPUT_SIZE);
+  DCHECK(out->length() >= OUTPUT_SIZE);
   InputReader<Char> in(unicode_cache, str);
   DateStringTokenizer<Char> scanner(&in);
   TimeZoneComposer tz;
@@ -175,7 +175,7 @@
   if (in_->Skip('.')) return DateToken::Symbol('.');
   if (in_->Skip(')')) return DateToken::Symbol(')');
   if (in_->IsAsciiAlphaOrAbove()) {
-    ASSERT(KeywordTable::kPrefixLength == 3);
+    DCHECK(KeywordTable::kPrefixLength == 3);
     uint32_t buffer[3] = {0, 0, 0};
     int length = in_->ReadWord(buffer, 3);
     int index = KeywordTable::Lookup(buffer, length);
@@ -200,9 +200,9 @@
     DayComposer* day,
     TimeComposer* time,
     TimeZoneComposer* tz) {
-  ASSERT(day->IsEmpty());
-  ASSERT(time->IsEmpty());
-  ASSERT(tz->IsEmpty());
+  DCHECK(day->IsEmpty());
+  DCHECK(time->IsEmpty());
+  DCHECK(tz->IsEmpty());
 
   // Parse mandatory date string: [('-'|'+')yy]yyyy[':'MM[':'DD]]
   if (scanner->Peek().IsAsciiSign()) {
diff --git a/src/dateparser.cc b/src/dateparser.cc
index 7180911..5db0391 100644
--- a/src/dateparser.cc
+++ b/src/dateparser.cc
@@ -177,7 +177,7 @@
     // most significant digits.
     int factor = 1;
     do {
-      ASSERT(factor <= 100000000);  // factor won't overflow.
+      DCHECK(factor <= 100000000);  // factor won't overflow.
       factor *= 10;
       length--;
     } while (length > 3);
diff --git a/src/dateparser.h b/src/dateparser.h
index 0a0a6f0..f284590 100644
--- a/src/dateparser.h
+++ b/src/dateparser.h
@@ -151,19 +151,19 @@
     int length() { return length_; }
 
     int number() {
-      ASSERT(IsNumber());
+      DCHECK(IsNumber());
       return value_;
     }
     KeywordType keyword_type() {
-      ASSERT(IsKeyword());
+      DCHECK(IsKeyword());
       return static_cast<KeywordType>(tag_);
     }
     int keyword_value() {
-      ASSERT(IsKeyword());
+      DCHECK(IsKeyword());
       return value_;
     }
     char symbol() {
-      ASSERT(IsSymbol());
+      DCHECK(IsSymbol());
       return static_cast<char>(value_);
     }
     bool IsSymbol(char symbol) {
@@ -179,7 +179,7 @@
       return tag_ == kSymbolTag && (value_ == '-' || value_ == '+');
     }
     int ascii_sign() {
-      ASSERT(IsAsciiSign());
+      DCHECK(IsAsciiSign());
       return 44 - value_;
     }
     bool IsKeywordZ() {
diff --git a/src/debug-debugger.js b/src/debug-debugger.js
index 660ea79..a1468a0 100644
--- a/src/debug-debugger.js
+++ b/src/debug-debugger.js
@@ -19,7 +19,10 @@
                      NewFunction: 3,
                      BeforeCompile: 4,
                      AfterCompile: 5,
-                     ScriptCollected: 6 };
+                     CompileError: 6,
+                     PromiseEvent: 7,
+                     AsyncTaskEvent: 8,
+                     BreakForCommand: 9 };
 
 // Types of exceptions that can be broken upon.
 Debug.ExceptionBreak = { Caught : 0,
@@ -986,44 +989,39 @@
 };
 
 
-function MakeBreakEvent(exec_state, break_points_hit) {
-  return new BreakEvent(exec_state, break_points_hit);
+function MakeBreakEvent(break_id, break_points_hit) {
+  return new BreakEvent(break_id, break_points_hit);
 }
 
 
-function BreakEvent(exec_state, break_points_hit) {
-  this.exec_state_ = exec_state;
+function BreakEvent(break_id, break_points_hit) {
+  this.frame_ = new FrameMirror(break_id, 0);
   this.break_points_hit_ = break_points_hit;
 }
 
 
-BreakEvent.prototype.executionState = function() {
-  return this.exec_state_;
-};
-
-
 BreakEvent.prototype.eventType = function() {
   return Debug.DebugEvent.Break;
 };
 
 
 BreakEvent.prototype.func = function() {
-  return this.exec_state_.frame(0).func();
+  return this.frame_.func();
 };
 
 
 BreakEvent.prototype.sourceLine = function() {
-  return this.exec_state_.frame(0).sourceLine();
+  return this.frame_.sourceLine();
 };
 
 
 BreakEvent.prototype.sourceColumn = function() {
-  return this.exec_state_.frame(0).sourceColumn();
+  return this.frame_.sourceColumn();
 };
 
 
 BreakEvent.prototype.sourceLineText = function() {
-  return this.exec_state_.frame(0).sourceLineText();
+  return this.frame_.sourceLineText();
 };
 
 
@@ -1036,8 +1034,7 @@
   var o = { seq: next_response_seq++,
             type: "event",
             event: "break",
-            body: { invocationText: this.exec_state_.frame(0).invocationText(),
-                  }
+            body: { invocationText: this.frame_.invocationText() }
           };
 
   // Add script related information to the event if available.
@@ -1070,24 +1067,19 @@
 };
 
 
-function MakeExceptionEvent(exec_state, exception, uncaught, promise) {
-  return new ExceptionEvent(exec_state, exception, uncaught, promise);
+function MakeExceptionEvent(break_id, exception, uncaught, promise) {
+  return new ExceptionEvent(break_id, exception, uncaught, promise);
 }
 
 
-function ExceptionEvent(exec_state, exception, uncaught, promise) {
-  this.exec_state_ = exec_state;
+function ExceptionEvent(break_id, exception, uncaught, promise) {
+  this.exec_state_ = new ExecutionState(break_id);
   this.exception_ = exception;
   this.uncaught_ = uncaught;
   this.promise_ = promise;
 }
 
 
-ExceptionEvent.prototype.executionState = function() {
-  return this.exec_state_;
-};
-
-
 ExceptionEvent.prototype.eventType = function() {
   return Debug.DebugEvent.Exception;
 };
@@ -1154,29 +1146,19 @@
 };
 
 
-function MakeCompileEvent(exec_state, script, before) {
-  return new CompileEvent(exec_state, script, before);
+function MakeCompileEvent(script, type) {
+  return new CompileEvent(script, type);
 }
 
 
-function CompileEvent(exec_state, script, before) {
-  this.exec_state_ = exec_state;
+function CompileEvent(script, type) {
   this.script_ = MakeMirror(script);
-  this.before_ = before;
+  this.type_ = type;
 }
 
 
-CompileEvent.prototype.executionState = function() {
-  return this.exec_state_;
-};
-
-
 CompileEvent.prototype.eventType = function() {
-  if (this.before_) {
-    return Debug.DebugEvent.BeforeCompile;
-  } else {
-    return Debug.DebugEvent.AfterCompile;
-  }
+  return this.type_;
 };
 
 
@@ -1188,10 +1170,16 @@
 CompileEvent.prototype.toJSONProtocol = function() {
   var o = new ProtocolMessage();
   o.running = true;
-  if (this.before_) {
-    o.event = "beforeCompile";
-  } else {
-    o.event = "afterCompile";
+  switch (this.type_) {
+    case Debug.DebugEvent.BeforeCompile:
+      o.event = "beforeCompile";
+      break;
+    case Debug.DebugEvent.AfterCompile:
+      o.event = "afterCompile";
+      break;
+    case Debug.DebugEvent.CompileError:
+      o.event = "compileError";
+      break;
   }
   o.body = {};
   o.body.script = this.script_;
@@ -1200,37 +1188,6 @@
 };
 
 
-function MakeScriptCollectedEvent(exec_state, id) {
-  return new ScriptCollectedEvent(exec_state, id);
-}
-
-
-function ScriptCollectedEvent(exec_state, id) {
-  this.exec_state_ = exec_state;
-  this.id_ = id;
-}
-
-
-ScriptCollectedEvent.prototype.id = function() {
-  return this.id_;
-};
-
-
-ScriptCollectedEvent.prototype.executionState = function() {
-  return this.exec_state_;
-};
-
-
-ScriptCollectedEvent.prototype.toJSONProtocol = function() {
-  var o = new ProtocolMessage();
-  o.running = true;
-  o.event = "scriptCollected";
-  o.body = {};
-  o.body.script = { id: this.id() };
-  return o.toJSONProtocol();
-};
-
-
 function MakeScriptObject_(script, include_source) {
   var o = { id: script.id(),
             name: script.name(),
@@ -1248,6 +1205,66 @@
 }
 
 
+function MakePromiseEvent(event_data) {
+  return new PromiseEvent(event_data);
+}
+
+
+function PromiseEvent(event_data) {
+  this.promise_ = event_data.promise;
+  this.parentPromise_ = event_data.parentPromise;
+  this.status_ = event_data.status;
+  this.value_ = event_data.value;
+}
+
+
+PromiseEvent.prototype.promise = function() {
+  return MakeMirror(this.promise_);
+}
+
+
+PromiseEvent.prototype.parentPromise = function() {
+  return MakeMirror(this.parentPromise_);
+}
+
+
+PromiseEvent.prototype.status = function() {
+  return this.status_;
+}
+
+
+PromiseEvent.prototype.value = function() {
+  return MakeMirror(this.value_);
+}
+
+
+function MakeAsyncTaskEvent(event_data) {
+  return new AsyncTaskEvent(event_data);
+}
+
+
+function AsyncTaskEvent(event_data) {
+  this.type_ = event_data.type;
+  this.name_ = event_data.name;
+  this.id_ = event_data.id;
+}
+
+
+AsyncTaskEvent.prototype.type = function() {
+  return this.type_;
+}
+
+
+AsyncTaskEvent.prototype.name = function() {
+  return this.name_;
+}
+
+
+AsyncTaskEvent.prototype.id = function() {
+  return this.id_;
+}
+
+
 function DebugCommandProcessor(exec_state, opt_is_running) {
   this.exec_state_ = exec_state;
   this.running_ = opt_is_running || false;
diff --git a/src/debug.cc b/src/debug.cc
index a710f0b..f0e7796 100644
--- a/src/debug.cc
+++ b/src/debug.cc
@@ -16,14 +16,11 @@
 #include "src/execution.h"
 #include "src/full-codegen.h"
 #include "src/global-handles.h"
-#include "src/ic.h"
-#include "src/ic-inl.h"
 #include "src/isolate-inl.h"
 #include "src/list.h"
+#include "src/log.h"
 #include "src/messages.h"
 #include "src/natives.h"
-#include "src/stub-cache.h"
-#include "src/log.h"
 
 #include "include/v8-debug.h"
 
@@ -73,8 +70,8 @@
 
 
 BreakLocationIterator::~BreakLocationIterator() {
-  ASSERT(reloc_iterator_ != NULL);
-  ASSERT(reloc_iterator_original_ != NULL);
+  DCHECK(reloc_iterator_ != NULL);
+  DCHECK(reloc_iterator_original_ != NULL);
   delete reloc_iterator_;
   delete reloc_iterator_original_;
 }
@@ -98,7 +95,7 @@
 
 void BreakLocationIterator::Next() {
   DisallowHeapAllocation no_gc;
-  ASSERT(!RinfoDone());
+  DCHECK(!RinfoDone());
 
   // Iterate through reloc info for code and original code stopping at each
   // breakable code target.
@@ -119,8 +116,8 @@
       // statement position.
       position_ = static_cast<int>(
           rinfo()->data() - debug_info_->shared()->start_position());
-      ASSERT(position_ >= 0);
-      ASSERT(statement_position_ >= 0);
+      DCHECK(position_ >= 0);
+      DCHECK(statement_position_ >= 0);
     }
 
     if (IsDebugBreakSlot()) {
@@ -151,7 +148,7 @@
             return;
           }
         } else {
-          ASSERT(type_ == SOURCE_BREAK_LOCATIONS);
+          DCHECK(type_ == SOURCE_BREAK_LOCATIONS);
           if (IsSourceBreakStub(code)) {
             break_point_++;
             return;
@@ -273,7 +270,7 @@
   // If there is not already a real break point here patch code with debug
   // break.
   if (!HasBreakPoint()) SetDebugBreak();
-  ASSERT(IsDebugBreak() || IsDebuggerStatement());
+  DCHECK(IsDebugBreak() || IsDebuggerStatement());
   // Set the break point information.
   DebugInfo::SetBreakPoint(debug_info_, code_position(),
                            position(), statement_position(),
@@ -287,7 +284,7 @@
   // If there are no more break points here remove the debug break.
   if (!HasBreakPoint()) {
     ClearDebugBreak();
-    ASSERT(!IsDebugBreak());
+    DCHECK(!IsDebugBreak());
   }
 }
 
@@ -298,7 +295,7 @@
 
   // If there is a real break point here no more to do.
   if (HasBreakPoint()) {
-    ASSERT(IsDebugBreak());
+    DCHECK(IsDebugBreak());
     return;
   }
 
@@ -313,13 +310,13 @@
 
   // If there is a real break point here no more to do.
   if (HasBreakPoint()) {
-    ASSERT(IsDebugBreak());
+    DCHECK(IsDebugBreak());
     return;
   }
 
   // Patch code removing debug break.
   ClearDebugBreak();
-  ASSERT(!IsDebugBreak());
+  DCHECK(!IsDebugBreak());
 }
 
 
@@ -343,7 +340,7 @@
     // Patch the IC call.
     SetDebugBreakAtIC();
   }
-  ASSERT(IsDebugBreak());
+  DCHECK(IsDebugBreak());
 }
 
 
@@ -361,7 +358,7 @@
     // Patch the IC call.
     ClearDebugBreakAtIC();
   }
-  ASSERT(!IsDebugBreak());
+  DCHECK(!IsDebugBreak());
 }
 
 
@@ -373,7 +370,7 @@
     Address target = original_rinfo()->target_address();
     Handle<Code> target_code(Code::GetCodeFromTargetAddress(target));
     if (target_code->kind() == Code::STUB) {
-      return target_code->major_key() == CodeStub::CallFunction;
+      return CodeStub::GetMajorKey(*target_code) == CodeStub::CallFunction;
     }
     return target_code->is_call_stub();
   }
@@ -398,7 +395,8 @@
   }
   bool is_call_function_stub =
       (maybe_call_function_stub->kind() == Code::STUB &&
-       maybe_call_function_stub->major_key() == CodeStub::CallFunction);
+       CodeStub::GetMajorKey(*maybe_call_function_stub) ==
+           CodeStub::CallFunction);
 
   // Step in through construct call requires no changes to the running code.
   // Step in through getters/setters should already be prepared as well
@@ -407,7 +405,7 @@
   // Step in through CallFunction stub should also be prepared by caller of
   // this function (Debug::PrepareStep) which should flood target function
   // with breakpoints.
-  ASSERT(RelocInfo::IsConstructCall(rmode()) ||
+  DCHECK(RelocInfo::IsConstructCall(rmode()) ||
          target_code->is_inline_cache_stub() ||
          is_call_function_stub);
 #endif
@@ -475,7 +473,7 @@
     }
   }
   if (code->kind() == Code::STUB) {
-    ASSERT(code->major_key() == CodeStub::CallFunction);
+    DCHECK(CodeStub::GetMajorKey(*code) == CodeStub::CallFunction);
     return isolate->builtins()->CallFunctionStub_DebugBreak();
   }
 
@@ -535,7 +533,7 @@
 
 
 bool BreakLocationIterator::RinfoDone() const {
-  ASSERT(reloc_iterator_->done() == reloc_iterator_original_->done());
+  DCHECK(reloc_iterator_->done() == reloc_iterator_original_->done());
   return reloc_iterator_->done();
 }
 
@@ -544,9 +542,9 @@
   reloc_iterator_->next();
   reloc_iterator_original_->next();
 #ifdef DEBUG
-  ASSERT(reloc_iterator_->done() == reloc_iterator_original_->done());
+  DCHECK(reloc_iterator_->done() == reloc_iterator_original_->done());
   if (!reloc_iterator_->done()) {
-    ASSERT(rmode() == original_rmode());
+    DCHECK(rmode() == original_rmode());
   }
 #endif
 }
@@ -567,7 +565,6 @@
   // TODO(isolates): frames_are_dropped_?
   thread_local_.current_debug_scope_ = NULL;
   thread_local_.restarter_frame_function_pointer_ = NULL;
-  thread_local_.promise_on_stack_ = NULL;
 }
 
 
@@ -592,8 +589,7 @@
 
 
 ScriptCache::ScriptCache(Isolate* isolate) : HashMap(HashMap::PointersMatch),
-                                             isolate_(isolate),
-                                             collected_scripts_(10) {
+                                             isolate_(isolate) {
   Heap* heap = isolate_->heap();
   HandleScope scope(isolate_);
 
@@ -622,7 +618,14 @@
   HashMap::Entry* entry =
       HashMap::Lookup(reinterpret_cast<void*>(id), Hash(id), true);
   if (entry->value != NULL) {
-    ASSERT(*script == *reinterpret_cast<Script**>(entry->value));
+#ifdef DEBUG
+    // The code deserializer may introduce duplicate Script objects.
+    // Assert that the Script objects with the same id have the same name.
+    Handle<Script> found(reinterpret_cast<Script**>(entry->value));
+    DCHECK(script->id() == found->id());
+    DCHECK(!script->name()->IsString() ||
+           String::cast(script->name())->Equals(String::cast(found->name())));
+#endif
     return;
   }
   // Globalize the script object, make it weak and use the location of the
@@ -641,7 +644,7 @@
   Handle<FixedArray> instances = factory->NewFixedArray(occupancy());
   int count = 0;
   for (HashMap::Entry* entry = Start(); entry != NULL; entry = Next(entry)) {
-    ASSERT(entry->value != NULL);
+    DCHECK(entry->value != NULL);
     if (entry->value != NULL) {
       instances->set(count, *reinterpret_cast<Script**>(entry->value));
       count++;
@@ -651,21 +654,12 @@
 }
 
 
-void ScriptCache::ProcessCollectedScripts() {
-  Debug* debug = isolate_->debug();
-  for (int i = 0; i < collected_scripts_.length(); i++) {
-    debug->OnScriptCollected(collected_scripts_[i]);
-  }
-  collected_scripts_.Clear();
-}
-
-
 void ScriptCache::Clear() {
   // Iterate the script cache to get rid of all the weak handles.
   for (HashMap::Entry* entry = Start(); entry != NULL; entry = Next(entry)) {
-    ASSERT(entry != NULL);
+    DCHECK(entry != NULL);
     Object** location = reinterpret_cast<Object**>(entry->value);
-    ASSERT((*location)->IsScript());
+    DCHECK((*location)->IsScript());
     GlobalHandles::ClearWeakness(location);
     GlobalHandles::Destroy(location);
   }
@@ -688,7 +682,6 @@
   HashMap::Entry* entry = script_cache->Lookup(key, hash, false);
   Object** location = reinterpret_cast<Object**>(entry->value);
   script_cache->Remove(key, hash);
-  script_cache->collected_scripts_.Add(id);
 
   // Clear the weak handle.
   GlobalHandles::Destroy(location);
@@ -711,7 +704,7 @@
   for (DebugInfoListNode* n = debug->debug_info_list_;
        n != NULL;
        n = n->next()) {
-    ASSERT(n != node);
+    DCHECK(n != node);
   }
 #endif
 }
@@ -749,16 +742,13 @@
 
   // Compile the script.
   Handle<SharedFunctionInfo> function_info;
-  function_info = Compiler::CompileScript(source_code,
-                                          script_name, 0, 0,
-                                          false,
-                                          context,
-                                          NULL, NULL, NO_CACHED_DATA,
-                                          NATIVES_CODE);
+  function_info = Compiler::CompileScript(
+      source_code, script_name, 0, 0, false, context, NULL, NULL,
+      ScriptCompiler::kNoCompileOptions, NATIVES_CODE);
 
   // Silently ignore stack overflows during compilation.
   if (function_info.is_null()) {
-    ASSERT(isolate->has_pending_exception());
+    DCHECK(isolate->has_pending_exception());
     isolate->clear_pending_exception();
     return false;
   }
@@ -767,24 +757,21 @@
   Handle<JSFunction> function =
       factory->NewFunctionFromSharedFunctionInfo(function_info, context);
 
-  Handle<Object> exception;
-  MaybeHandle<Object> result =
-      Execution::TryCall(function,
-                         Handle<Object>(context->global_object(), isolate),
-                         0,
-                         NULL,
-                         &exception);
+  MaybeHandle<Object> maybe_exception;
+  MaybeHandle<Object> result = Execution::TryCall(
+      function, handle(context->global_proxy()), 0, NULL, &maybe_exception);
 
   // Check for caught exceptions.
   if (result.is_null()) {
-    ASSERT(!isolate->has_pending_exception());
+    DCHECK(!isolate->has_pending_exception());
     MessageLocation computed_location;
     isolate->ComputeLocation(&computed_location);
     Handle<Object> message = MessageHandler::MakeMessageObject(
         isolate, "error_loading_debugger", &computed_location,
         Vector<Handle<Object> >::empty(), Handle<JSArray>());
-    ASSERT(!isolate->has_pending_exception());
-    if (!exception.is_null()) {
+    DCHECK(!isolate->has_pending_exception());
+    Handle<Object> exception;
+    if (maybe_exception.ToHandle(&exception)) {
       isolate->set_pending_exception(*exception);
       MessageHandler::ReportMessage(isolate, NULL, message);
       isolate->clear_pending_exception();
@@ -818,7 +805,7 @@
   ExtensionConfiguration no_extensions;
   Handle<Context> context =
       isolate_->bootstrapper()->CreateEnvironment(
-          Handle<Object>::null(),
+          MaybeHandle<JSGlobalProxy>(),
           v8::Handle<ObjectTemplate>(),
           &no_extensions);
 
@@ -831,15 +818,13 @@
 
   // Expose the builtins object in the debugger context.
   Handle<String> key = isolate_->factory()->InternalizeOneByteString(
-      STATIC_ASCII_VECTOR("builtins"));
+      STATIC_CHAR_VECTOR("builtins"));
   Handle<GlobalObject> global =
       Handle<GlobalObject>(context->global_object(), isolate_);
   Handle<JSBuiltinsObject> builtin =
       Handle<JSBuiltinsObject>(global->builtins(), isolate_);
   RETURN_ON_EXCEPTION_VALUE(
-      isolate_,
-      JSReceiver::SetProperty(global, key, builtin, NONE, SLOPPY),
-      false);
+      isolate_, Object::SetProperty(global, key, builtin, SLOPPY), false);
 
   // Compile the JavaScript for the debugger in the debugger context.
   bool caught_exception =
@@ -863,9 +848,6 @@
   ClearAllBreakPoints();
   ClearStepping();
 
-  // Match unmatched PromiseHandlePrologue calls.
-  while (thread_local_.promise_on_stack_) PromiseHandleEpilogue();
-
   // Return debugger is not loaded.
   if (!is_loaded()) return;
 
@@ -884,7 +866,7 @@
 void Debug::Break(Arguments args, JavaScriptFrame* frame) {
   Heap* heap = isolate_->heap();
   HandleScope scope(isolate_);
-  ASSERT(args.length() == 0);
+  DCHECK(args.length() == 0);
 
   // Initialize LiveEdit.
   LiveEdit::InitializeThreadLocal(this);
@@ -934,7 +916,7 @@
       frame->fp() != thread_local_.step_out_fp_ &&
       break_points_hit->IsUndefined() ) {
       // Step count should always be 0 for StepOut.
-      ASSERT(thread_local_.step_count_ == 0);
+      DCHECK(thread_local_.step_count_ == 0);
   } else if (!break_points_hit->IsUndefined() ||
              (thread_local_.last_step_action_ != StepNone &&
               thread_local_.step_count_ == 0)) {
@@ -1013,7 +995,7 @@
   // they are in a FixedArray.
   Handle<FixedArray> break_points_hit;
   int break_points_hit_count = 0;
-  ASSERT(!break_point_objects->IsUndefined());
+  DCHECK(!break_point_objects->IsUndefined());
   if (break_point_objects->IsFixedArray()) {
     Handle<FixedArray> array(FixedArray::cast(*break_point_objects));
     break_points_hit = factory->NewFixedArray(array->length());
@@ -1052,7 +1034,7 @@
   // Get the function IsBreakPointTriggered (defined in debug-debugger.js).
   Handle<String> is_break_point_triggered_string =
       factory->InternalizeOneByteString(
-          STATIC_ASCII_VECTOR("IsBreakPointTriggered"));
+          STATIC_CHAR_VECTOR("IsBreakPointTriggered"));
   Handle<GlobalObject> debug_global(debug_context()->global_object());
   Handle<JSFunction> check_break_point =
     Handle<JSFunction>::cast(Object::GetProperty(
@@ -1066,7 +1048,7 @@
   Handle<Object> result;
   if (!Execution::TryCall(check_break_point,
                           isolate_->js_builtins_object(),
-                          ARRAY_SIZE(argv),
+                          arraysize(argv),
                           argv).ToHandle(&result)) {
     return false;
   }
@@ -1085,7 +1067,7 @@
 // Return the debug info for this function. EnsureDebugInfo must be called
 // prior to ensure the debug info has been generated for shared.
 Handle<DebugInfo> Debug::GetDebugInfo(Handle<SharedFunctionInfo> shared) {
-  ASSERT(HasDebugInfo(shared));
+  DCHECK(HasDebugInfo(shared));
   return Handle<DebugInfo>(DebugInfo::cast(shared->debug_info()));
 }
 
@@ -1106,7 +1088,7 @@
 
   Handle<DebugInfo> debug_info = GetDebugInfo(shared);
   // Source positions starts with zero.
-  ASSERT(*source_position >= 0);
+  DCHECK(*source_position >= 0);
 
   // Find the break point and change it.
   BreakLocationIterator it(debug_info, SOURCE_BREAK_LOCATIONS);
@@ -1150,7 +1132,7 @@
 
   Handle<DebugInfo> debug_info = GetDebugInfo(shared);
   // Source positions starts with zero.
-  ASSERT(position >= 0);
+  DCHECK(position >= 0);
 
   // Find the break point and change it.
   BreakLocationIterator it(debug_info, SOURCE_BREAK_LOCATIONS);
@@ -1160,7 +1142,7 @@
   *source_position = it.position() + shared->start_position();
 
   // At least one active break point now.
-  ASSERT(debug_info->GetBreakPointCount() > 0);
+  DCHECK(debug_info->GetBreakPointCount() > 0);
   return true;
 }
 
@@ -1237,7 +1219,7 @@
                         isolate_);
 
   if (!bindee.is_null() && bindee->IsJSFunction() &&
-      !JSFunction::cast(*bindee)->IsNative()) {
+      !JSFunction::cast(*bindee)->IsFromNativeScript()) {
     Handle<JSFunction> bindee_function(JSFunction::cast(*bindee));
     Debug::FloodWithOneShot(bindee_function);
   }
@@ -1280,63 +1262,14 @@
 }
 
 
-PromiseOnStack::PromiseOnStack(Isolate* isolate,
-                                      PromiseOnStack* prev,
-                                      Handle<JSFunction> getter)
-    : isolate_(isolate), prev_(prev) {
-  handler_ = StackHandler::FromAddress(
-      Isolate::handler(isolate->thread_local_top()));
-  getter_ = Handle<JSFunction>::cast(
-      isolate->global_handles()->Create(*getter));
-}
-
-
-PromiseOnStack::~PromiseOnStack() {
-  isolate_->global_handles()->Destroy(Handle<Object>::cast(getter_).location());
-}
-
-
-void Debug::PromiseHandlePrologue(Handle<JSFunction> promise_getter) {
-  PromiseOnStack* prev = thread_local_.promise_on_stack_;
-  thread_local_.promise_on_stack_ =
-      new PromiseOnStack(isolate_, prev, promise_getter);
-}
-
-
-void Debug::PromiseHandleEpilogue() {
-  if (thread_local_.promise_on_stack_ == NULL) return;
-  PromiseOnStack* prev = thread_local_.promise_on_stack_->prev();
-  delete thread_local_.promise_on_stack_;
-  thread_local_.promise_on_stack_ = prev;
-}
-
-
-Handle<Object> Debug::GetPromiseForUncaughtException() {
-  Handle<Object> undefined = isolate_->factory()->undefined_value();
-  if (thread_local_.promise_on_stack_ == NULL) return undefined;
-  Handle<JSFunction> promise_getter = thread_local_.promise_on_stack_->getter();
-  StackHandler* promise_catch = thread_local_.promise_on_stack_->handler();
-  // Find the top-most try-catch handler.
-  StackHandler* handler = StackHandler::FromAddress(
-      Isolate::handler(isolate_->thread_local_top()));
-  while (handler != NULL && !handler->is_catch()) {
-    handler = handler->next();
-  }
-#ifdef DEBUG
-  // Make sure that our promise catch handler is in the list of handlers,
-  // even if it's not the top-most try-catch handler.
-  StackHandler* temp = handler;
-  while (temp != promise_catch && !temp->is_catch()) {
-    temp = temp->next();
-    CHECK(temp != NULL);
-  }
-#endif  // DEBUG
-
-  if (handler == promise_catch) {
-    return Execution::Call(
-        isolate_, promise_getter, undefined, 0, NULL).ToHandleChecked();
-  }
-  return undefined;
+bool Debug::PromiseHasRejectHandler(Handle<JSObject> promise) {
+  Handle<JSFunction> fun = Handle<JSFunction>::cast(
+      JSObject::GetDataProperty(isolate_->js_builtins_object(),
+                                isolate_->factory()->NewStringFromStaticChars(
+                                    "PromiseHasRejectHandler")));
+  Handle<Object> result =
+      Execution::Call(isolate_, fun, promise, 0, NULL).ToHandleChecked();
+  return result->IsTrue();
 }
 
 
@@ -1347,7 +1280,7 @@
 
   PrepareForBreakPoints();
 
-  ASSERT(in_debug_scope());
+  DCHECK(in_debug_scope());
 
   // Remember this step action and count.
   thread_local_.last_step_action_ = step_action;
@@ -1435,7 +1368,8 @@
             Code::GetCodeFromTargetAddress(original_target);
       }
       if ((maybe_call_function_stub->kind() == Code::STUB &&
-           maybe_call_function_stub->major_key() == CodeStub::CallFunction) ||
+           CodeStub::GetMajorKey(maybe_call_function_stub) ==
+               CodeStub::CallFunction) ||
           maybe_call_function_stub->kind() == Code::CALL_IC) {
         // Save reference to the code as we may need it to find out arguments
         // count for 'step in' later.
@@ -1454,11 +1388,12 @@
         frames_it.Advance();
       }
     } else {
-      ASSERT(it.IsExit());
+      DCHECK(it.IsExit());
       frames_it.Advance();
     }
     // Skip builtin functions on the stack.
-    while (!frames_it.done() && frames_it.frame()->function()->IsNative()) {
+    while (!frames_it.done() &&
+           frames_it.frame()->function()->IsFromNativeScript()) {
       frames_it.Advance();
     }
     // Step out: If there is a JavaScript caller frame, we need to
@@ -1495,17 +1430,7 @@
       bool is_call_ic = call_function_stub->kind() == Code::CALL_IC;
 
       // Find out number of arguments from the stub minor key.
-      // Reverse lookup required as the minor key cannot be retrieved
-      // from the code object.
-      Handle<Object> obj(
-          isolate_->heap()->code_stubs()->SlowReverseLookup(
-              *call_function_stub),
-          isolate_);
-      ASSERT(!obj.is_null());
-      ASSERT(!(*obj)->IsUndefined());
-      ASSERT(obj->IsSmi());
-      // Get the STUB key and extract major and minor key.
-      uint32_t key = Smi::cast(*obj)->value();
+      uint32_t key = call_function_stub->stub_key();
       // Argc in the stub is the number of arguments passed - not the
       // expected arguments of the called function.
       int call_function_arg_count = is_call_ic
@@ -1513,8 +1438,9 @@
           : CallFunctionStub::ExtractArgcFromMinorKey(
               CodeStub::MinorKeyFromKey(key));
 
-      ASSERT(is_call_ic ||
-             call_function_stub->major_key() == CodeStub::MajorKeyFromKey(key));
+      DCHECK(is_call_ic ||
+             CodeStub::GetMajorKey(*call_function_stub) ==
+                 CodeStub::MajorKeyFromKey(key));
 
       // Find target function on the expression stack.
       // Expression stack looks like this (top to bottom):
@@ -1524,7 +1450,7 @@
       // Receiver
       // Function to call
       int expressions_count = frame->ComputeExpressionsCount();
-      ASSERT(expressions_count - 2 - call_function_arg_count >= 0);
+      DCHECK(expressions_count - 2 - call_function_arg_count >= 0);
       Object* fun = frame->GetExpression(
           expressions_count - 2 - call_function_arg_count);
 
@@ -1545,7 +1471,7 @@
         Handle<JSFunction> js_function(JSFunction::cast(fun));
         if (js_function->shared()->bound()) {
           Debug::FloodBoundFunctionWithOneShot(js_function);
-        } else if (!js_function->IsNative()) {
+        } else if (!js_function->IsFromNativeScript()) {
           // Don't step into builtins.
           // It will also compile target function if it's not compiled yet.
           FloodWithOneShot(js_function);
@@ -1562,7 +1488,7 @@
     if (is_load_or_store) {
       // Remember source position and frame to handle step in getter/setter. If
       // there is a custom getter/setter it will be handled in
-      // Object::Get/SetPropertyWithCallback, otherwise the step action will be
+      // Object::Get/SetPropertyWithAccessor, otherwise the step action will be
       // propagated on the next Debug::Break.
       thread_local_.last_statement_position_ =
           debug_info->code()->SourceStatementPosition(frame->pc());
@@ -1675,7 +1601,7 @@
     it.Advance();
     // For constructor functions skip another frame.
     if (is_constructor) {
-      ASSERT(it.frame()->is_construct());
+      DCHECK(it.frame()->is_construct());
       it.Advance();
     }
     fp = it.frame()->fp();
@@ -1687,7 +1613,7 @@
     if (function->shared()->bound()) {
       // Handle Function.prototype.bind
       Debug::FloodBoundFunctionWithOneShot(function);
-    } else if (!function->IsNative()) {
+    } else if (!function->IsFromNativeScript()) {
       // Don't allow step into functions in the native context.
       if (function->shared()->code() ==
           isolate->builtins()->builtin(Builtins::kFunctionApply) ||
@@ -1699,7 +1625,7 @@
         // function.
         if (!holder.is_null() && holder->IsJSFunction()) {
           Handle<JSFunction> js_function = Handle<JSFunction>::cast(holder);
-          if (!js_function->IsNative()) {
+          if (!js_function->IsFromNativeScript()) {
             Debug::FloodWithOneShot(js_function);
           } else if (js_function->shared()->bound()) {
             // Handle Function.prototype.bind
@@ -1747,7 +1673,7 @@
 
 
 void Debug::ActivateStepIn(StackFrame* frame) {
-  ASSERT(!StepOutActive());
+  DCHECK(!StepOutActive());
   thread_local_.step_into_fp_ = frame->UnpaddedFP();
 }
 
@@ -1758,7 +1684,7 @@
 
 
 void Debug::ActivateStepOut(StackFrame* frame) {
-  ASSERT(!StepInActive());
+  DCHECK(!StepInActive());
   thread_local_.step_out_fp_ = frame->UnpaddedFP();
 }
 
@@ -1796,7 +1722,7 @@
       }
     } else if (frame->function()->IsJSFunction()) {
       JSFunction* function = frame->function();
-      ASSERT(frame->LookupCode()->kind() == Code::FUNCTION);
+      DCHECK(frame->LookupCode()->kind() == Code::FUNCTION);
       active_functions->Add(Handle<JSFunction>(function));
       function->shared()->code()->set_gc_metadata(active_code_marker);
     }
@@ -1809,10 +1735,10 @@
 // Assembler::CheckConstPool() and Assembler::CheckVeneerPool(). Note that this
 // is only useful for architectures using constant pools or veneer pools.
 static int ComputeCodeOffsetFromPcOffset(Code *code, int pc_offset) {
-  ASSERT_EQ(code->kind(), Code::FUNCTION);
-  ASSERT(!code->has_debug_break_slots());
-  ASSERT_LE(0, pc_offset);
-  ASSERT_LT(pc_offset, code->instruction_end() - code->instruction_start());
+  DCHECK_EQ(code->kind(), Code::FUNCTION);
+  DCHECK(!code->has_debug_break_slots());
+  DCHECK_LE(0, pc_offset);
+  DCHECK_LT(pc_offset, code->instruction_end() - code->instruction_start());
 
   int mask = RelocInfo::ModeMask(RelocInfo::CONST_POOL) |
              RelocInfo::ModeMask(RelocInfo::VENEER_POOL);
@@ -1821,9 +1747,9 @@
   for (RelocIterator it(code, mask); !it.done(); it.next()) {
     RelocInfo* info = it.rinfo();
     if (info->pc() >= pc) break;
-    ASSERT(RelocInfo::IsConstPool(info->rmode()));
+    DCHECK(RelocInfo::IsConstPool(info->rmode()));
     code_offset -= static_cast<int>(info->data());
-    ASSERT_LE(0, code_offset);
+    DCHECK_LE(0, code_offset);
   }
 
   return code_offset;
@@ -1832,7 +1758,7 @@
 
 // The inverse of ComputeCodeOffsetFromPcOffset.
 static int ComputePcOffsetFromCodeOffset(Code *code, int code_offset) {
-  ASSERT_EQ(code->kind(), Code::FUNCTION);
+  DCHECK_EQ(code->kind(), Code::FUNCTION);
 
   int mask = RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
              RelocInfo::ModeMask(RelocInfo::CONST_POOL) |
@@ -1844,14 +1770,14 @@
     if (RelocInfo::IsDebugBreakSlot(info->rmode())) {
       reloc += Assembler::kDebugBreakSlotLength;
     } else {
-      ASSERT(RelocInfo::IsConstPool(info->rmode()));
+      DCHECK(RelocInfo::IsConstPool(info->rmode()));
       reloc += static_cast<int>(info->data());
     }
   }
 
   int pc_offset = code_offset + reloc;
 
-  ASSERT_LT(code->instruction_start() + pc_offset, code->instruction_end());
+  DCHECK_LT(code->instruction_start() + pc_offset, code->instruction_end());
 
   return pc_offset;
 }
@@ -1867,7 +1793,7 @@
 
     JSFunction* function = frame->function();
 
-    ASSERT(frame->LookupCode()->kind() == Code::FUNCTION);
+    DCHECK(frame->LookupCode()->kind() == Code::FUNCTION);
 
     Handle<Code> frame_code(frame->LookupCode());
     if (frame_code->has_debug_break_slots()) continue;
@@ -1905,6 +1831,11 @@
              reinterpret_cast<intptr_t>(new_pc));
     }
 
+    if (FLAG_enable_ool_constant_pool) {
+      // Update constant pool pointer for new code.
+      frame->set_constant_pool(new_code->constant_pool());
+    }
+
     // Patch the return address to return into the code with
     // debug break slots.
     frame->set_pc(new_pc);
@@ -1949,7 +1880,7 @@
   // Make sure that the shared full code is compiled with debug
   // break slots.
   if (!function->shared()->code()->has_debug_break_slots()) {
-    MaybeHandle<Code> code = Compiler::GetCodeForDebugging(function);
+    MaybeHandle<Code> code = Compiler::GetDebugCode(function);
     // Recompilation can fail.  In that case leave the code as it was.
     if (!code.is_null()) function->ReplaceCode(*code.ToHandleChecked());
   } else {
@@ -1983,7 +1914,7 @@
 
     Deoptimizer::DeoptimizeAll(isolate_);
 
-    Handle<Code> lazy_compile = isolate_->builtins()->CompileUnoptimized();
+    Handle<Code> lazy_compile = isolate_->builtins()->CompileLazy();
 
     // There will be at least one break point when we are done.
     has_break_points_ = true;
@@ -2041,7 +1972,7 @@
 
           if (!shared->allows_lazy_compilation()) continue;
           if (!shared->script()->IsScript()) continue;
-          if (function->IsNative()) continue;
+          if (function->IsFromNativeScript()) continue;
           if (shared->code()->gc_metadata() == active_code_marker) continue;
 
           if (shared->is_generator()) {
@@ -2073,11 +2004,11 @@
           if (!gen->is_suspended()) continue;
 
           JSFunction* fun = gen->function();
-          ASSERT_EQ(fun->code()->kind(), Code::FUNCTION);
+          DCHECK_EQ(fun->code()->kind(), Code::FUNCTION);
           if (fun->code()->has_debug_break_slots()) continue;
 
           int pc_offset = gen->continuation();
-          ASSERT_LT(0, pc_offset);
+          DCHECK_LT(0, pc_offset);
 
           int code_offset =
               ComputeCodeOffsetFromPcOffset(fun->code(), pc_offset);
@@ -2167,7 +2098,7 @@
         if (obj->IsJSFunction()) {
           function = Handle<JSFunction>(JSFunction::cast(obj));
           shared = Handle<SharedFunctionInfo>(function->shared());
-          ASSERT(shared->allows_lazy_compilation() || shared->is_compiled());
+          DCHECK(shared->allows_lazy_compilation() || shared->is_compiled());
           found_next_candidate = true;
         } else if (obj->IsSharedFunctionInfo()) {
           shared = Handle<SharedFunctionInfo>(SharedFunctionInfo::cast(obj));
@@ -2249,7 +2180,7 @@
 
   // Return if we already have the debug info for shared.
   if (HasDebugInfo(shared)) {
-    ASSERT(shared->is_compiled());
+    DCHECK(shared->is_compiled());
     return true;
   }
 
@@ -2275,7 +2206,7 @@
 
 
 void Debug::RemoveDebugInfo(Handle<DebugInfo> debug_info) {
-  ASSERT(debug_info_list_ != NULL);
+  DCHECK(debug_info_list_ != NULL);
   // Run through the debug info objects to find this one and remove it.
   DebugInfoListNode* prev = NULL;
   DebugInfoListNode* current = debug_info_list_;
@@ -2326,13 +2257,13 @@
 #ifdef DEBUG
   // Get the code which is actually executing.
   Handle<Code> frame_code(frame->LookupCode());
-  ASSERT(frame_code.is_identical_to(code));
+  DCHECK(frame_code.is_identical_to(code));
 #endif
 
   // Find the call address in the running code. This address holds the call to
   // either a DebugBreakXXX or to the debug break return entry code if the
   // break point is still active after processing the break point.
-  Address addr = frame->pc() - Assembler::kPatchDebugBreakSlotReturnOffset;
+  Address addr = Assembler::break_address_from_return_address(frame->pc());
 
   // Check if the location is at JS exit or debug break slot.
   bool at_js_return = false;
@@ -2370,24 +2301,27 @@
 
     // Continue just after the slot.
     after_break_target_ = addr + Assembler::kDebugBreakSlotLength;
-  } else if (IsDebugBreak(Assembler::target_address_at(addr, *code))) {
-    // We now know that there is still a debug break call at the target address,
-    // so the break point is still there and the original code will hold the
-    // address to jump to in order to complete the call which is replaced by a
-    // call to DebugBreakXXX.
-
-    // Find the corresponding address in the original code.
-    addr += original_code->instruction_start() - code->instruction_start();
-
-    // Install jump to the call address in the original code. This will be the
-    // call which was overwritten by the call to DebugBreakXXX.
-    after_break_target_ = Assembler::target_address_at(addr, *original_code);
   } else {
-    // There is no longer a break point present. Don't try to look in the
-    // original code as the running code will have the right address. This takes
-    // care of the case where the last break point is removed from the function
-    // and therefore no "original code" is available.
-    after_break_target_ = Assembler::target_address_at(addr, *code);
+    addr = Assembler::target_address_from_return_address(frame->pc());
+    if (IsDebugBreak(Assembler::target_address_at(addr, *code))) {
+      // We now know that there is still a debug break call at the target
+      // address, so the break point is still there and the original code will
+      // hold the address to jump to in order to complete the call which is
+      // replaced by a call to DebugBreakXXX.
+
+      // Find the corresponding address in the original code.
+      addr += original_code->instruction_start() - code->instruction_start();
+
+      // Install jump to the call address in the original code. This will be the
+      // call which was overwritten by the call to DebugBreakXXX.
+      after_break_target_ = Assembler::target_address_at(addr, *original_code);
+    } else {
+      // There is no longer a break point present. Don't try to look in the
+      // original code as the running code will have the right address. This
+      // takes care of the case where the last break point is removed from the
+      // function and therefore no "original code" is available.
+      after_break_target_ = Assembler::target_address_at(addr, *code);
+    }
   }
 }
 
@@ -2416,11 +2350,11 @@
 #ifdef DEBUG
   // Get the code which is actually executing.
   Handle<Code> frame_code(frame->LookupCode());
-  ASSERT(frame_code.is_identical_to(code));
+  DCHECK(frame_code.is_identical_to(code));
 #endif
 
   // Find the call address in the running code.
-  Address addr = frame->pc() - Assembler::kPatchDebugBreakSlotReturnOffset;
+  Address addr = Assembler::break_address_from_return_address(frame->pc());
 
   // Check if the location is at JS return.
   RelocIterator it(debug_info->code());
@@ -2457,16 +2391,13 @@
   HandleScope scope(isolate_);
   AssertDebugContext();
   Factory* factory = isolate_->factory();
-  JSObject::SetProperty(isolate_->global_object(),
-      factory->NewStringFromAsciiChecked("next_handle_"),
-      handle(Smi::FromInt(0), isolate_),
-      NONE,
-      SLOPPY).Check();
-  JSObject::SetProperty(isolate_->global_object(),
-      factory->NewStringFromAsciiChecked("mirror_cache_"),
-      factory->NewJSArray(0, FAST_ELEMENTS),
-      NONE,
-      SLOPPY).Check();
+  Handle<GlobalObject> global(isolate_->global_object());
+  JSObject::SetProperty(global,
+                        factory->NewStringFromAsciiChecked("next_handle_"),
+                        handle(Smi::FromInt(0), isolate_), SLOPPY).Check();
+  JSObject::SetProperty(global,
+                        factory->NewStringFromAsciiChecked("mirror_cache_"),
+                        factory->NewJSArray(0, FAST_ELEMENTS), SLOPPY).Check();
 }
 
 
@@ -2500,27 +2431,20 @@
 }
 
 
-void Debug::AfterGarbageCollection() {
-  // Generate events for collected scripts.
-  if (script_cache_ != NULL) {
-    script_cache_->ProcessCollectedScripts();
-  }
-}
-
-
 MaybeHandle<Object> Debug::MakeJSObject(const char* constructor_name,
                                         int argc,
                                         Handle<Object> argv[]) {
   AssertDebugContext();
   // Create the execution state object.
+  Handle<GlobalObject> global(isolate_->global_object());
   Handle<Object> constructor = Object::GetProperty(
-      isolate_, isolate_->global_object(), constructor_name).ToHandleChecked();
-  ASSERT(constructor->IsJSFunction());
+      isolate_, global, constructor_name).ToHandleChecked();
+  DCHECK(constructor->IsJSFunction());
   if (!constructor->IsJSFunction()) return MaybeHandle<Object>();
   // We do not handle interrupts here.  In particular, termination interrupts.
   PostponeInterruptsScope no_interrupts(isolate_);
   return Execution::TryCall(Handle<JSFunction>::cast(constructor),
-                            Handle<JSObject>(debug_context()->global_object()),
+                            handle(debug_context()->global_proxy()),
                             argc,
                             argv);
 }
@@ -2529,63 +2453,83 @@
 MaybeHandle<Object> Debug::MakeExecutionState() {
   // Create the execution state object.
   Handle<Object> argv[] = { isolate_->factory()->NewNumberFromInt(break_id()) };
-  return MakeJSObject("MakeExecutionState", ARRAY_SIZE(argv), argv);
+  return MakeJSObject("MakeExecutionState", arraysize(argv), argv);
 }
 
 
 MaybeHandle<Object> Debug::MakeBreakEvent(Handle<Object> break_points_hit) {
-  Handle<Object> exec_state;
-  if (!MakeExecutionState().ToHandle(&exec_state)) return MaybeHandle<Object>();
   // Create the new break event object.
-  Handle<Object> argv[] = { exec_state, break_points_hit };
-  return MakeJSObject("MakeBreakEvent", ARRAY_SIZE(argv), argv);
+  Handle<Object> argv[] = { isolate_->factory()->NewNumberFromInt(break_id()),
+                            break_points_hit };
+  return MakeJSObject("MakeBreakEvent", arraysize(argv), argv);
 }
 
 
 MaybeHandle<Object> Debug::MakeExceptionEvent(Handle<Object> exception,
                                               bool uncaught,
                                               Handle<Object> promise) {
-  Handle<Object> exec_state;
-  if (!MakeExecutionState().ToHandle(&exec_state)) return MaybeHandle<Object>();
   // Create the new exception event object.
-  Handle<Object> argv[] = { exec_state,
+  Handle<Object> argv[] = { isolate_->factory()->NewNumberFromInt(break_id()),
                             exception,
                             isolate_->factory()->ToBoolean(uncaught),
                             promise };
-  return MakeJSObject("MakeExceptionEvent", ARRAY_SIZE(argv), argv);
+  return MakeJSObject("MakeExceptionEvent", arraysize(argv), argv);
 }
 
 
 MaybeHandle<Object> Debug::MakeCompileEvent(Handle<Script> script,
-                                            bool before) {
-  Handle<Object> exec_state;
-  if (!MakeExecutionState().ToHandle(&exec_state)) return MaybeHandle<Object>();
+                                            v8::DebugEvent type) {
   // Create the compile event object.
   Handle<Object> script_wrapper = Script::GetWrapper(script);
-  Handle<Object> argv[] = { exec_state,
-                            script_wrapper,
-                            isolate_->factory()->ToBoolean(before) };
-  return MakeJSObject("MakeCompileEvent", ARRAY_SIZE(argv), argv);
+  Handle<Object> argv[] = { script_wrapper,
+                            isolate_->factory()->NewNumberFromInt(type) };
+  return MakeJSObject("MakeCompileEvent", arraysize(argv), argv);
 }
 
 
-MaybeHandle<Object> Debug::MakeScriptCollectedEvent(int id) {
-  Handle<Object> exec_state;
-  if (!MakeExecutionState().ToHandle(&exec_state)) return MaybeHandle<Object>();
-  // Create the script collected event object.
-  Handle<Object> id_object = Handle<Smi>(Smi::FromInt(id), isolate_);
-  Handle<Object> argv[] = { exec_state, id_object };
-  return MakeJSObject("MakeScriptCollectedEvent", ARRAY_SIZE(argv), argv);
+MaybeHandle<Object> Debug::MakePromiseEvent(Handle<JSObject> event_data) {
+  // Create the promise event object.
+  Handle<Object> argv[] = { event_data };
+  return MakeJSObject("MakePromiseEvent", arraysize(argv), argv);
 }
 
 
-void Debug::OnException(Handle<Object> exception, bool uncaught) {
+MaybeHandle<Object> Debug::MakeAsyncTaskEvent(Handle<JSObject> task_event) {
+  // Create the async task event object.
+  Handle<Object> argv[] = { task_event };
+  return MakeJSObject("MakeAsyncTaskEvent", arraysize(argv), argv);
+}
+
+
+void Debug::OnThrow(Handle<Object> exception, bool uncaught) {
   if (in_debug_scope() || ignore_events()) return;
-
+  // Temporarily clear any scheduled_exception to allow evaluating
+  // JavaScript from the debug event handler.
   HandleScope scope(isolate_);
-  Handle<Object> promise = GetPromiseForUncaughtException();
-  uncaught |= !promise->IsUndefined();
+  Handle<Object> scheduled_exception;
+  if (isolate_->has_scheduled_exception()) {
+    scheduled_exception = handle(isolate_->scheduled_exception(), isolate_);
+    isolate_->clear_scheduled_exception();
+  }
+  OnException(exception, uncaught, isolate_->GetPromiseOnStackOnThrow());
+  if (!scheduled_exception.is_null()) {
+    isolate_->thread_local_top()->scheduled_exception_ = *scheduled_exception;
+  }
+}
 
+
+void Debug::OnPromiseReject(Handle<JSObject> promise, Handle<Object> value) {
+  if (in_debug_scope() || ignore_events()) return;
+  HandleScope scope(isolate_);
+  OnException(value, false, promise);
+}
+
+
+void Debug::OnException(Handle<Object> exception, bool uncaught,
+                        Handle<Object> promise) {
+  if (promise->IsJSObject()) {
+    uncaught |= !PromiseHasRejectHandler(Handle<JSObject>::cast(promise));
+  }
   // Bail out if exception breaks are not active
   if (uncaught) {
     // Uncaught exceptions are reported by either flags.
@@ -2615,6 +2559,24 @@
 }
 
 
+void Debug::OnCompileError(Handle<Script> script) {
+  // No more to do if not debugging.
+  if (in_debug_scope() || ignore_events()) return;
+
+  HandleScope scope(isolate_);
+  DebugScope debug_scope(this);
+  if (debug_scope.failed()) return;
+
+  // Create the compile state object.
+  Handle<Object> event_data;
+  // Bail out and don't call debugger if exception.
+  if (!MakeCompileEvent(script, v8::CompileError).ToHandle(&event_data)) return;
+
+  // Process debug event.
+  ProcessDebugEvent(v8::CompileError, Handle<JSObject>::cast(event_data), true);
+}
+
+
 void Debug::OnDebugBreak(Handle<Object> break_points_hit,
                             bool auto_continue) {
   // The caller provided for DebugScope.
@@ -2645,7 +2607,8 @@
   // Create the event data object.
   Handle<Object> event_data;
   // Bail out and don't call debugger if exception.
-  if (!MakeCompileEvent(script, true).ToHandle(&event_data)) return;
+  if (!MakeCompileEvent(script, v8::BeforeCompile).ToHandle(&event_data))
+    return;
 
   // Process debug event.
   ProcessDebugEvent(v8::BeforeCompile,
@@ -2655,8 +2618,7 @@
 
 
 // Handle debugger actions when a new script is compiled.
-void Debug::OnAfterCompile(Handle<Script> script,
-                           AfterCompileFlags after_compile_flags) {
+void Debug::OnAfterCompile(Handle<Script> script) {
   // Add the newly compiled script to the script cache.
   if (script_cache_ != NULL) script_cache_->Add(script);
 
@@ -2664,9 +2626,6 @@
   if (in_debug_scope() || ignore_events()) return;
 
   HandleScope scope(isolate_);
-  // Store whether in debugger before entering debugger.
-  bool was_in_scope = in_debug_scope();
-
   DebugScope debug_scope(this);
   if (debug_scope.failed()) return;
 
@@ -2676,7 +2635,7 @@
   // Get the function UpdateScriptBreakPoints (defined in debug-debugger.js).
   Handle<String> update_script_break_points_string =
       isolate_->factory()->InternalizeOneByteString(
-          STATIC_ASCII_VECTOR("UpdateScriptBreakPoints"));
+          STATIC_CHAR_VECTOR("UpdateScriptBreakPoints"));
   Handle<GlobalObject> debug_global(debug_context()->global_object());
   Handle<Object> update_script_break_points =
       Object::GetProperty(
@@ -2684,7 +2643,7 @@
   if (!update_script_break_points->IsJSFunction()) {
     return;
   }
-  ASSERT(update_script_break_points->IsJSFunction());
+  DCHECK(update_script_break_points->IsJSFunction());
 
   // Wrap the script object in a proper JS object before passing it
   // to JavaScript.
@@ -2694,24 +2653,22 @@
   Handle<Object> argv[] = { wrapper };
   if (Execution::TryCall(Handle<JSFunction>::cast(update_script_break_points),
                          isolate_->js_builtins_object(),
-                         ARRAY_SIZE(argv),
+                         arraysize(argv),
                          argv).is_null()) {
     return;
   }
-  // Bail out based on state or if there is no listener for this event
-  if (was_in_scope && (after_compile_flags & SEND_WHEN_DEBUGGING) == 0) return;
 
   // Create the compile state object.
   Handle<Object> event_data;
   // Bail out and don't call debugger if exception.
-  if (!MakeCompileEvent(script, false).ToHandle(&event_data)) return;
+  if (!MakeCompileEvent(script, v8::AfterCompile).ToHandle(&event_data)) return;
 
   // Process debug event.
   ProcessDebugEvent(v8::AfterCompile, Handle<JSObject>::cast(event_data), true);
 }
 
 
-void Debug::OnScriptCollected(int id) {
+void Debug::OnPromiseEvent(Handle<JSObject> data) {
   if (in_debug_scope() || ignore_events()) return;
 
   HandleScope scope(isolate_);
@@ -2721,10 +2678,29 @@
   // Create the script collected state object.
   Handle<Object> event_data;
   // Bail out and don't call debugger if exception.
-  if (!MakeScriptCollectedEvent(id).ToHandle(&event_data)) return;
+  if (!MakePromiseEvent(data).ToHandle(&event_data)) return;
 
   // Process debug event.
-  ProcessDebugEvent(v8::ScriptCollected,
+  ProcessDebugEvent(v8::PromiseEvent,
+                    Handle<JSObject>::cast(event_data),
+                    true);
+}
+
+
+void Debug::OnAsyncTaskEvent(Handle<JSObject> data) {
+  if (in_debug_scope() || ignore_events()) return;
+
+  HandleScope scope(isolate_);
+  DebugScope debug_scope(this);
+  if (debug_scope.failed()) return;
+
+  // Create the script collected state object.
+  Handle<Object> event_data;
+  // Bail out and don't call debugger if exception.
+  if (!MakeAsyncTaskEvent(data).ToHandle(&event_data)) return;
+
+  // Process debug event.
+  ProcessDebugEvent(v8::AsyncTaskEvent,
                     Handle<JSObject>::cast(event_data),
                     true);
 }
@@ -2784,18 +2760,17 @@
                                    event_listener_data_,
                                    client_data);
     callback(event_details);
-    ASSERT(!isolate_->has_scheduled_exception());
+    DCHECK(!isolate_->has_scheduled_exception());
   } else {
     // Invoke the JavaScript debug event listener.
-    ASSERT(event_listener_->IsJSFunction());
+    DCHECK(event_listener_->IsJSFunction());
     Handle<Object> argv[] = { Handle<Object>(Smi::FromInt(event), isolate_),
                               exec_state,
                               event_data,
                               event_listener_data_ };
+    Handle<JSReceiver> global(isolate_->global_proxy());
     Execution::TryCall(Handle<JSFunction>::cast(event_listener_),
-                       isolate_->global_object(),
-                       ARRAY_SIZE(argv),
-                       argv);
+                       global, arraysize(argv), argv);
   }
 }
 
@@ -2814,7 +2789,7 @@
   // Prevent other interrupts from triggering, for example API callbacks,
   // while dispatching message handler callbacks.
   PostponeInterruptsScope no_interrupts(isolate_);
-  ASSERT(is_active_);
+  DCHECK(is_active_);
   HandleScope scope(isolate_);
   // Process the individual events.
   bool sendEventMessage = false;
@@ -2831,9 +2806,6 @@
     case v8::AfterCompile:
       sendEventMessage = true;
       break;
-    case v8::ScriptCollected:
-      sendEventMessage = true;
-      break;
     case v8::NewFunction:
       break;
     default:
@@ -2843,7 +2815,7 @@
   // The debug command interrupt flag might have been set when the command was
   // added. It should be enough to clear the flag only once while we are in the
   // debugger.
-  ASSERT(in_debug_scope());
+  DCHECK(in_debug_scope());
   isolate_->stack_guard()->ClearDebugCommand();
 
   // Notify the debugger that a debug event has occurred unless auto continue is
@@ -2861,9 +2833,7 @@
   // in the queue if any. For script collected events don't even process
   // messages in the queue as the execution state might not be what is expected
   // by the client.
-  if ((auto_continue && !has_commands()) || event == v8::ScriptCollected) {
-    return;
-  }
+  if (auto_continue && !has_commands()) return;
 
   // DebugCommandProcessor goes here.
   bool running = auto_continue;
@@ -2900,11 +2870,12 @@
     Handle<String> request_text = isolate_->factory()->NewStringFromTwoByte(
         command_text).ToHandleChecked();
     Handle<Object> request_args[] = { request_text };
-    Handle<Object> exception;
     Handle<Object> answer_value;
     Handle<String> answer;
-    MaybeHandle<Object> maybe_result = Execution::TryCall(
-        process_debug_request, cmd_processor, 1, request_args, &exception);
+    MaybeHandle<Object> maybe_exception;
+    MaybeHandle<Object> maybe_result =
+        Execution::TryCall(process_debug_request, cmd_processor, 1,
+                           request_args, &maybe_exception);
 
     if (maybe_result.ToHandle(&answer_value)) {
       if (answer_value->IsUndefined()) {
@@ -2922,10 +2893,15 @@
       Handle<Object> is_running_args[] = { answer };
       maybe_result = Execution::Call(
           isolate_, is_running, cmd_processor, 1, is_running_args);
-      running = maybe_result.ToHandleChecked()->IsTrue();
+      Handle<Object> result;
+      if (!maybe_result.ToHandle(&result)) break;
+      running = result->IsTrue();
     } else {
-      answer = Handle<String>::cast(
-          Execution::ToString(isolate_, exception).ToHandleChecked());
+      Handle<Object> exception;
+      if (!maybe_exception.ToHandle(&exception)) break;
+      Handle<Object> result;
+      if (!Execution::ToString(isolate_, exception).ToHandle(&result)) break;
+      answer = Handle<String>::cast(result);
     }
 
     // Return the result.
@@ -2938,6 +2914,7 @@
     // running state (through a continue command) or auto continue is active
     // and there are no more commands queued.
   } while (!running || has_commands());
+  command_queue_.Clear();
 }
 
 
@@ -3039,7 +3016,7 @@
       isolate_,
       fun,
       Handle<Object>(debug_context()->global_proxy(), isolate_),
-      ARRAY_SIZE(argv),
+      arraysize(argv),
       argv);
 }
 
@@ -3056,7 +3033,7 @@
   if (check.HasOverflowed()) return;
 
   { JavaScriptFrameIterator it(isolate_);
-    ASSERT(!it.done());
+    DCHECK(!it.done());
     Object* fun = it.frame()->function();
     if (fun && fun->IsJSFunction()) {
       // Don't stop in builtin functions.
@@ -3093,9 +3070,12 @@
 }
 
 
-DebugScope::DebugScope(Debug* debug) : debug_(debug),
-                                       prev_(debug->debugger_entry()),
-                                       save_(debug_->isolate_) {
+DebugScope::DebugScope(Debug* debug)
+    : debug_(debug),
+      prev_(debug->debugger_entry()),
+      save_(debug_->isolate_),
+      no_termination_exceptons_(debug_->isolate_,
+                                StackGuard::TERMINATE_EXECUTION) {
   // Link recursive debugger entry.
   debug_->thread_local_.current_debug_scope_ = this;
 
@@ -3246,7 +3226,7 @@
   Isolate* isolate = event_data_->GetIsolate();
   v8::Handle<v8::Context> context = GetDebugEventContext(isolate);
   // Isolate::context() may be NULL when "script collected" event occures.
-  ASSERT(!context.IsEmpty() || event_ == v8::ScriptCollected);
+  DCHECK(!context.IsEmpty());
   return context;
 }
 
@@ -3336,7 +3316,7 @@
 
 
 CommandMessage CommandMessageQueue::Get() {
-  ASSERT(!IsEmpty());
+  DCHECK(!IsEmpty());
   int result = start_;
   start_ = (start_ + 1) % size_;
   return messages_[result];
@@ -3371,13 +3351,13 @@
 
 
 bool LockingCommandMessageQueue::IsEmpty() const {
-  LockGuard<Mutex> lock_guard(&mutex_);
+  base::LockGuard<base::Mutex> lock_guard(&mutex_);
   return queue_.IsEmpty();
 }
 
 
 CommandMessage LockingCommandMessageQueue::Get() {
-  LockGuard<Mutex> lock_guard(&mutex_);
+  base::LockGuard<base::Mutex> lock_guard(&mutex_);
   CommandMessage result = queue_.Get();
   logger_->DebugEvent("Get", result.text());
   return result;
@@ -3385,14 +3365,14 @@
 
 
 void LockingCommandMessageQueue::Put(const CommandMessage& message) {
-  LockGuard<Mutex> lock_guard(&mutex_);
+  base::LockGuard<base::Mutex> lock_guard(&mutex_);
   queue_.Put(message);
   logger_->DebugEvent("Put", message.text());
 }
 
 
 void LockingCommandMessageQueue::Clear() {
-  LockGuard<Mutex> lock_guard(&mutex_);
+  base::LockGuard<base::Mutex> lock_guard(&mutex_);
   queue_.Clear();
 }
 
diff --git a/src/debug.h b/src/debug.h
index 7f9b1a2..a5119d0 100644
--- a/src/debug.h
+++ b/src/debug.h
@@ -8,13 +8,13 @@
 #include "src/allocation.h"
 #include "src/arguments.h"
 #include "src/assembler.h"
+#include "src/base/platform/platform.h"
 #include "src/execution.h"
 #include "src/factory.h"
 #include "src/flags.h"
 #include "src/frames-inl.h"
 #include "src/hashmap.h"
 #include "src/liveedit.h"
-#include "src/platform.h"
 #include "src/string-stream.h"
 #include "src/v8threads.h"
 
@@ -159,9 +159,6 @@
   // Return the scripts in the cache.
   Handle<FixedArray> GetScripts();
 
-  // Generate debugger events for collected scripts.
-  void ProcessCollectedScripts();
-
  private:
   // Calculate the hash value from the key (script id).
   static uint32_t Hash(int key) {
@@ -176,8 +173,6 @@
       const v8::WeakCallbackData<v8::Value, void>& data);
 
   Isolate* isolate_;
-  // List used during GC to temporarily store id's of collected scripts.
-  List<int> collected_scripts_;
 };
 
 
@@ -333,28 +328,11 @@
  private:
   Logger* logger_;
   CommandMessageQueue queue_;
-  mutable Mutex mutex_;
+  mutable base::Mutex mutex_;
   DISALLOW_COPY_AND_ASSIGN(LockingCommandMessageQueue);
 };
 
 
-class PromiseOnStack {
- public:
-  PromiseOnStack(Isolate* isolate,
-                 PromiseOnStack* prev,
-                 Handle<JSFunction> getter);
-  ~PromiseOnStack();
-  StackHandler* handler() { return handler_; }
-  Handle<JSFunction> getter() { return getter_; }
-  PromiseOnStack* prev() { return prev_; }
- private:
-  Isolate* isolate_;
-  StackHandler* handler_;
-  Handle<JSFunction> getter_;
-  PromiseOnStack* prev_;
-};
-
-
 // This class contains the debugger support. The main purpose is to handle
 // setting break points in the code.
 //
@@ -364,18 +342,16 @@
 // DebugInfo.
 class Debug {
  public:
-  enum AfterCompileFlags {
-    NO_AFTER_COMPILE_FLAGS,
-    SEND_WHEN_DEBUGGING
-  };
-
   // Debug event triggers.
   void OnDebugBreak(Handle<Object> break_points_hit, bool auto_continue);
-  void OnException(Handle<Object> exception, bool uncaught);
+
+  void OnThrow(Handle<Object> exception, bool uncaught);
+  void OnPromiseReject(Handle<JSObject> promise, Handle<Object> value);
+  void OnCompileError(Handle<Script> script);
   void OnBeforeCompile(Handle<Script> script);
-  void OnAfterCompile(Handle<Script> script,
-                      AfterCompileFlags after_compile_flags);
-  void OnScriptCollected(int id);
+  void OnAfterCompile(Handle<Script> script);
+  void OnPromiseEvent(Handle<JSObject> data);
+  void OnAsyncTaskEvent(Handle<JSObject> data);
 
   // API facing.
   void SetEventListener(Handle<Object> callback, Handle<Object> data);
@@ -460,10 +436,6 @@
   // Check whether this frame is just about to return.
   bool IsBreakAtReturn(JavaScriptFrame* frame);
 
-  // Promise handling.
-  void PromiseHandlePrologue(Handle<JSFunction> promise_getter);
-  void PromiseHandleEpilogue();
-
   // Support for LiveEdit
   void FramesHaveBeenDropped(StackFrame::Id new_break_frame_id,
                              LiveEdit::FrameDropMode mode,
@@ -482,9 +454,6 @@
   // Record function from which eval was called.
   static void RecordEvalCaller(Handle<Script> script);
 
-  // Garbage collection notifications.
-  void AfterGarbageCollection();
-
   // Flags and states.
   DebugScope* debugger_entry() { return thread_local_.current_debug_scope_; }
   inline Handle<Context> debug_context() { return debug_context_; }
@@ -505,6 +474,10 @@
   int break_id() { return thread_local_.break_id_; }
 
   // Support for embedding into generated code.
+  Address is_active_address() {
+    return reinterpret_cast<Address>(&is_active_);
+  }
+
   Address after_break_target_address() {
     return reinterpret_cast<Address>(&after_break_target_);
   }
@@ -531,6 +504,9 @@
   inline bool has_commands() const { return !command_queue_.IsEmpty(); }
   inline bool ignore_events() const { return is_suppressed_ || !is_active_; }
 
+  void OnException(Handle<Object> exception, bool uncaught,
+                   Handle<Object> promise);
+
   // Constructors for debug event objects.
   MUST_USE_RESULT MaybeHandle<Object> MakeJSObject(
       const char* constructor_name,
@@ -544,14 +520,17 @@
       bool uncaught,
       Handle<Object> promise);
   MUST_USE_RESULT MaybeHandle<Object> MakeCompileEvent(
-      Handle<Script> script, bool before);
-  MUST_USE_RESULT MaybeHandle<Object> MakeScriptCollectedEvent(int id);
+      Handle<Script> script, v8::DebugEvent type);
+  MUST_USE_RESULT MaybeHandle<Object> MakePromiseEvent(
+      Handle<JSObject> promise_event);
+  MUST_USE_RESULT MaybeHandle<Object> MakeAsyncTaskEvent(
+      Handle<JSObject> task_event);
 
   // Mirror cache handling.
   void ClearMirrorCache();
 
-  // Returns a promise if it does not have a reject handler.
-  Handle<Object> GetPromiseForUncaughtException();
+  // Returns a promise if the pushed try-catch handler matches the current one.
+  bool PromiseHasRejectHandler(Handle<JSObject> promise);
 
   void CallEventCallback(v8::DebugEvent event,
                          Handle<Object> exec_state,
@@ -578,8 +557,8 @@
   bool CheckBreakPoint(Handle<Object> break_point_object);
 
   inline void AssertDebugContext() {
-    ASSERT(isolate_->context() == *debug_context());
-    ASSERT(in_debug_scope());
+    DCHECK(isolate_->context() == *debug_context());
+    DCHECK(in_debug_scope());
   }
 
   void ThreadInit();
@@ -592,7 +571,7 @@
   v8::Debug::MessageHandler message_handler_;
 
   static const int kQueueInitialSize = 4;
-  Semaphore command_received_;  // Signaled for each command received.
+  base::Semaphore command_received_;  // Signaled for each command received.
   LockingCommandMessageQueue command_queue_;
   LockingCommandMessageQueue event_command_queue_;
 
@@ -657,13 +636,6 @@
     // of the pointer to function being restarted. Otherwise (most of the time)
     // stores NULL. This pointer is used with 'step in' implementation.
     Object** restarter_frame_function_pointer_;
-
-    // When a promise is being resolved, we may want to trigger a debug event
-    // if we catch a throw.  For this purpose we remember the try-catch
-    // handler address that would catch the exception.  We also hold onto a
-    // closure that returns a promise if the exception is considered uncaught.
-    // Due to the possibility of reentry we use a linked list.
-    PromiseOnStack* promise_on_stack_;
   };
 
   // Storage location for registers when handling debug break calls
@@ -710,6 +682,7 @@
   int break_id_;                   // Previous break id.
   bool failed_;                    // Did the debug context fail to load?
   SaveContext save_;               // Saves previous context.
+  PostponeInterruptsScope no_termination_exceptons_;
 };
 
 
diff --git a/src/deoptimizer.cc b/src/deoptimizer.cc
index 2b39ff6..dd274ed 100644
--- a/src/deoptimizer.cc
+++ b/src/deoptimizer.cc
@@ -19,7 +19,7 @@
 
 static MemoryChunk* AllocateCodeChunk(MemoryAllocator* allocator) {
   return allocator->AllocateChunk(Deoptimizer::GetMaxDeoptTableSize(),
-                                  OS::CommitPageSize(),
+                                  base::OS::CommitPageSize(),
 #if defined(__native_client__)
   // The Native Client port of V8 uses an interpreter,
   // so code pages don't need PROT_EXEC.
@@ -101,7 +101,7 @@
 size_t Deoptimizer::GetMaxDeoptTableSize() {
   int entries_size =
       Deoptimizer::kMaxNumberOfEntries * Deoptimizer::table_entry_size_;
-  int commit_page_size = static_cast<int>(OS::CommitPageSize());
+  int commit_page_size = static_cast<int>(base::OS::CommitPageSize());
   int page_count = ((kDeoptTableMaxEpilogueCodeSize + entries_size - 1) /
                     commit_page_size) + 1;
   return static_cast<size_t>(commit_page_size * page_count);
@@ -352,8 +352,11 @@
       }
       SafepointEntry safepoint = code->GetSafepointEntry(it.frame()->pc());
       int deopt_index = safepoint.deoptimization_index();
-      bool safe_to_deopt = deopt_index != Safepoint::kNoDeoptimizationIndex;
-      CHECK(topmost_optimized_code == NULL || safe_to_deopt);
+      // Turbofan deopt is checked when we are patching addresses on stack.
+      bool turbofanned = code->is_turbofanned() && !FLAG_turbo_deoptimization;
+      bool safe_to_deopt =
+          deopt_index != Safepoint::kNoDeoptimizationIndex || turbofanned;
+      CHECK(topmost_optimized_code == NULL || safe_to_deopt || turbofanned);
       if (topmost_optimized_code == NULL) {
         topmost_optimized_code = code;
         safe_to_deopt_topmost_optimized_code = safe_to_deopt;
@@ -374,7 +377,9 @@
     Code* code = Code::cast(element);
     CHECK_EQ(code->kind(), Code::OPTIMIZED_FUNCTION);
     Object* next = code->next_code_link();
-    if (code->marked_for_deoptimization()) {
+
+    if (code->marked_for_deoptimization() &&
+        (!code->is_turbofanned() || FLAG_turbo_deoptimization)) {
       // Put the code into the list for later patching.
       codes.Add(code, &zone);
 
@@ -404,17 +409,27 @@
   for (int i = 0; i < codes.length(); i++) {
 #ifdef DEBUG
     if (codes[i] == topmost_optimized_code) {
-      ASSERT(safe_to_deopt_topmost_optimized_code);
+      DCHECK(safe_to_deopt_topmost_optimized_code);
     }
 #endif
     // It is finally time to die, code object.
-    // Do platform-specific patching to force any activations to lazy deopt.
-    PatchCodeForDeoptimization(isolate, codes[i]);
 
-    // We might be in the middle of incremental marking with compaction.
-    // Tell collector to treat this code object in a special way and
-    // ignore all slots that might have been recorded on it.
-    isolate->heap()->mark_compact_collector()->InvalidateCode(codes[i]);
+    // Remove the code from optimized code map.
+    DeoptimizationInputData* deopt_data =
+        DeoptimizationInputData::cast(codes[i]->deoptimization_data());
+    SharedFunctionInfo* shared =
+        SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo());
+    shared->EvictFromOptimizedCodeMap(codes[i], "deoptimized code");
+
+    // Do platform-specific patching to force any activations to lazy deopt.
+    if (!codes[i]->is_turbofanned() || FLAG_turbo_deoptimization) {
+      PatchCodeForDeoptimization(isolate, codes[i]);
+
+      // We might be in the middle of incremental marking with compaction.
+      // Tell collector to treat this code object in a special way and
+      // ignore all slots that might have been recorded on it.
+      isolate->heap()->mark_compact_collector()->InvalidateCode(codes[i]);
+    }
   }
 }
 
@@ -459,9 +474,11 @@
         reinterpret_cast<intptr_t>(object));
   }
   if (object->IsJSGlobalProxy()) {
-    Object* proto = object->GetPrototype();
-    CHECK(proto->IsJSGlobalObject());
-    Context* native_context = GlobalObject::cast(proto)->native_context();
+    PrototypeIterator iter(object->GetIsolate(), object);
+    // TODO(verwaest): This CHECK will be hit if the global proxy is detached.
+    CHECK(iter.GetCurrent()->IsJSGlobalObject());
+    Context* native_context =
+        GlobalObject::cast(iter.GetCurrent())->native_context();
     MarkAllCodeForContext(native_context);
     DeoptimizeMarkedCodeForContext(native_context);
   } else if (object->IsGlobalObject()) {
@@ -562,7 +579,7 @@
   if (function->IsSmi()) {
     function = NULL;
   }
-  ASSERT(from != NULL);
+  DCHECK(from != NULL);
   if (function != NULL && function->IsOptimized()) {
     function->shared()->increment_deopt_count();
     if (bailout_type_ == Deoptimizer::SOFT) {
@@ -577,9 +594,9 @@
   compiled_code_ = FindOptimizedCode(function, optimized_code);
 
 #if DEBUG
-  ASSERT(compiled_code_ != NULL);
+  DCHECK(compiled_code_ != NULL);
   if (type == EAGER || type == SOFT || type == LAZY) {
-    ASSERT(compiled_code_->kind() != Code::FUNCTION);
+    DCHECK(compiled_code_->kind() != Code::FUNCTION);
   }
 #endif
 
@@ -610,7 +627,7 @@
           : compiled_code;
     }
     case Deoptimizer::DEBUGGER:
-      ASSERT(optimized_code->contains(from_));
+      DCHECK(optimized_code->contains(from_));
       return optimized_code;
   }
   FATAL("Could not find code for optimized function");
@@ -629,8 +646,8 @@
 
 
 Deoptimizer::~Deoptimizer() {
-  ASSERT(input_ == NULL && output_ == NULL);
-  ASSERT(disallow_heap_allocation_ == NULL);
+  DCHECK(input_ == NULL && output_ == NULL);
+  DCHECK(disallow_heap_allocation_ == NULL);
   delete trace_scope_;
 }
 
@@ -681,7 +698,7 @@
       addr >= start + (kMaxNumberOfEntries * table_entry_size_)) {
     return kNotDeoptimizationEntry;
   }
-  ASSERT_EQ(0,
+  DCHECK_EQ(0,
             static_cast<int>(addr - start) % table_entry_size_);
   return static_cast<int>(addr - start) / table_entry_size_;
 }
@@ -699,13 +716,10 @@
       return data->PcAndState(i)->value();
     }
   }
-  PrintF(stderr, "[couldn't find pc offset for node=%d]\n", id.ToInt());
-  PrintF(stderr, "[method: %s]\n", shared->DebugName()->ToCString().get());
-  // Print the source code if available.
-  HeapStringAllocator string_allocator;
-  StringStream stream(&string_allocator);
-  shared->SourceCodePrint(&stream, -1);
-  PrintF(stderr, "[source:\n%s\n]", stream.ToCString().get());
+  OFStream os(stderr);
+  os << "[couldn't find pc offset for node=" << id.ToInt() << "]\n"
+     << "[method: " << shared->DebugName()->ToCString().get() << "]\n"
+     << "[source:\n" << SourceCodeOf(shared) << "\n]" << endl;
 
   FATAL("unable to find pc offset during deoptimization");
   return -1;
@@ -721,7 +735,7 @@
     Object* element = native_context->DeoptimizedCodeListHead();
     while (!element->IsUndefined()) {
       Code* code = Code::cast(element);
-      ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
+      DCHECK(code->kind() == Code::OPTIMIZED_FUNCTION);
       length++;
       element = code->next_code_link();
     }
@@ -739,7 +753,7 @@
       compiled_code_->kind() == Code::OPTIMIZED_FUNCTION) {
     LOG(isolate(), CodeDeoptEvent(compiled_code_));
   }
-  ElapsedTimer timer;
+  base::ElapsedTimer timer;
 
   // Determine basic deoptimization information.  The optimized frame is
   // described by the input data.
@@ -758,7 +772,8 @@
            input_data->OptimizationId()->value(),
            bailout_id_,
            fp_to_sp_delta_);
-    if (bailout_type_ == EAGER || bailout_type_ == SOFT) {
+    if (bailout_type_ == EAGER || bailout_type_ == SOFT ||
+        (compiled_code_->is_hydrogen_stub())) {
       compiled_code_->PrintDeoptLocation(trace_scope_->file(), bailout_id_);
     }
   }
@@ -772,13 +787,13 @@
   TranslationIterator iterator(translations, translation_index);
   Translation::Opcode opcode =
       static_cast<Translation::Opcode>(iterator.Next());
-  ASSERT(Translation::BEGIN == opcode);
+  DCHECK(Translation::BEGIN == opcode);
   USE(opcode);
   // Read the number of output frames and allocate an array for their
   // descriptions.
   int count = iterator.Next();
   iterator.Next();  // Drop JS frames count.
-  ASSERT(output_ == NULL);
+  DCHECK(output_ == NULL);
   output_ = new FrameDescription*[count];
   for (int i = 0; i < count; ++i) {
     output_[i] = NULL;
@@ -869,7 +884,7 @@
     CHECK_EQ(Translation::kSelfLiteralId, closure_id);
     function = function_;
   }
-  unsigned height = iterator->Next();
+  unsigned height = iterator->Next() - 1;  // Do not count the context.
   unsigned height_in_bytes = height * kPointerSize;
   if (trace_scope_ != NULL) {
     PrintF(trace_scope_->file(), "  translating ");
@@ -903,7 +918,10 @@
   intptr_t top_address;
   if (is_bottommost) {
     // Determine whether the input frame contains alignment padding.
-    has_alignment_padding_ = HasAlignmentPadding(function) ? 1 : 0;
+    has_alignment_padding_ =
+        (!compiled_code_->is_turbofanned() && HasAlignmentPadding(function))
+            ? 1
+            : 0;
     // 2 = context and function in the frame.
     // If the optimized frame had alignment padding, adjust the frame pointer
     // to point to the new position of the old frame pointer after padding
@@ -963,7 +981,7 @@
   }
   output_frame->SetCallerFp(output_offset, value);
   intptr_t fp_value = top_address + output_offset;
-  ASSERT(!is_bottommost || (input_->GetRegister(fp_reg.code()) +
+  DCHECK(!is_bottommost || (input_->GetRegister(fp_reg.code()) +
       has_alignment_padding_ * kPointerSize) == fp_value);
   output_frame->SetFp(fp_value);
   if (is_topmost) output_frame->SetRegister(fp_reg.code(), fp_value);
@@ -973,7 +991,7 @@
            V8PRIxPTR " ; caller's fp\n",
            fp_value, output_offset, value);
   }
-  ASSERT(!is_bottommost || !has_alignment_padding_ ||
+  DCHECK(!is_bottommost || !has_alignment_padding_ ||
          (fp_value & kPointerSize) != 0);
 
   if (FLAG_enable_ool_constant_pool) {
@@ -1001,12 +1019,24 @@
   Register context_reg = JavaScriptFrame::context_register();
   output_offset -= kPointerSize;
   input_offset -= kPointerSize;
-  if (is_bottommost) {
-    value = input_->GetFrameSlot(input_offset);
-  } else {
-    value = reinterpret_cast<intptr_t>(function->context());
+  // Read the context from the translations.
+  DoTranslateCommand(iterator, frame_index, output_offset);
+  value = output_frame->GetFrameSlot(output_offset);
+  // The context should not be a placeholder for a materialized object.
+  CHECK(value !=
+        reinterpret_cast<intptr_t>(isolate_->heap()->arguments_marker()));
+  if (value ==
+      reinterpret_cast<intptr_t>(isolate_->heap()->undefined_value())) {
+    // If the context was optimized away, just use the context from
+    // the activation. This should only apply to Crankshaft code.
+    CHECK(!compiled_code_->is_turbofanned());
+    if (is_bottommost) {
+      value = input_->GetFrameSlot(input_offset);
+    } else {
+      value = reinterpret_cast<intptr_t>(function->context());
+    }
+    output_frame->SetFrameSlot(output_offset, value);
   }
-  output_frame->SetFrameSlot(output_offset, value);
   output_frame->SetContext(value);
   if (is_topmost) output_frame->SetRegister(context_reg.code(), value);
   if (trace_scope_ != NULL) {
@@ -1022,7 +1052,7 @@
   value = reinterpret_cast<intptr_t>(function);
   // The function for the bottommost output frame should also agree with the
   // input frame.
-  ASSERT(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
+  DCHECK(!is_bottommost || input_->GetFrameSlot(input_offset) == value);
   output_frame->SetFrameSlot(output_offset, value);
   if (trace_scope_ != NULL) {
     PrintF(trace_scope_->file(),
@@ -1188,7 +1218,7 @@
            top_address + output_offset, output_offset, value, height - 1);
   }
 
-  ASSERT(0 == output_offset);
+  DCHECK(0 == output_offset);
 
   Builtins* builtins = isolate_->builtins();
   Code* adaptor_trampoline =
@@ -1226,8 +1256,8 @@
   output_frame->SetFrameType(StackFrame::CONSTRUCT);
 
   // Construct stub can not be topmost or bottommost.
-  ASSERT(frame_index > 0 && frame_index < output_count_ - 1);
-  ASSERT(output_[frame_index] == NULL);
+  DCHECK(frame_index > 0 && frame_index < output_count_ - 1);
+  DCHECK(output_[frame_index] == NULL);
   output_[frame_index] = output_frame;
 
   // The top address of the frame is computed from the previous
@@ -1548,19 +1578,19 @@
   //                                         reg = JSFunction context
   //
 
-  CHECK(compiled_code_->is_crankshafted() &&
-        compiled_code_->kind() != Code::OPTIMIZED_FUNCTION);
-  int major_key = compiled_code_->major_key();
-  CodeStubInterfaceDescriptor* descriptor =
-      isolate_->code_stub_interface_descriptor(major_key);
+  CHECK(compiled_code_->is_hydrogen_stub());
+  int major_key = CodeStub::GetMajorKey(compiled_code_);
+  CodeStubDescriptor descriptor(isolate_, compiled_code_->stub_key());
 
   // The output frame must have room for all pushed register parameters
   // and the standard stack frame slots.  Include space for an argument
   // object to the callee and optionally the space to pass the argument
   // object to the stub failure handler.
-  CHECK_GE(descriptor->register_param_count_, 0);
-  int height_in_bytes = kPointerSize * descriptor->register_param_count_ +
-      sizeof(Arguments) + kPointerSize;
+  int param_count = descriptor.GetEnvironmentParameterCount();
+  CHECK_GE(param_count, 0);
+
+  int height_in_bytes = kPointerSize * param_count + sizeof(Arguments) +
+      kPointerSize;
   int fixed_frame_size = StandardFrameConstants::kFixedFrameSize;
   int input_frame_size = input_->GetFrameSize();
   int output_frame_size = height_in_bytes + fixed_frame_size;
@@ -1654,7 +1684,7 @@
   }
 
   intptr_t caller_arg_count = 0;
-  bool arg_count_known = !descriptor->stack_parameter_count_.is_valid();
+  bool arg_count_known = !descriptor.stack_parameter_count().is_valid();
 
   // Build the Arguments object for the caller's parameters and a pointer to it.
   output_frame_offset -= kPointerSize;
@@ -1702,11 +1732,11 @@
 
   // Copy the register parameters to the failure frame.
   int arguments_length_offset = -1;
-  for (int i = 0; i < descriptor->register_param_count_; ++i) {
+  for (int i = 0; i < param_count; ++i) {
     output_frame_offset -= kPointerSize;
     DoTranslateCommand(iterator, 0, output_frame_offset);
 
-    if (!arg_count_known && descriptor->IsParameterCountRegister(i)) {
+    if (!arg_count_known && descriptor.IsEnvironmentParameterCountRegister(i)) {
       arguments_length_offset = output_frame_offset;
     }
   }
@@ -1745,14 +1775,14 @@
   CopyDoubleRegisters(output_frame);
 
   // Fill registers containing handler and number of parameters.
-  SetPlatformCompiledStubRegisters(output_frame, descriptor);
+  SetPlatformCompiledStubRegisters(output_frame, &descriptor);
 
   // Compute this frame's PC, state, and continuation.
   Code* trampoline = NULL;
-  StubFunctionMode function_mode = descriptor->function_mode_;
+  StubFunctionMode function_mode = descriptor.function_mode();
   StubFailureTrampolineStub(isolate_,
                             function_mode).FindCodeInCache(&trampoline);
-  ASSERT(trampoline != NULL);
+  DCHECK(trampoline != NULL);
   output_frame->SetPc(reinterpret_cast<intptr_t>(
       trampoline->instruction_start()));
   if (FLAG_enable_ool_constant_pool) {
@@ -1799,7 +1829,7 @@
     Handle<JSObject> arguments =
         isolate_->factory()->NewArgumentsObject(function, length);
     Handle<FixedArray> array = isolate_->factory()->NewFixedArray(length);
-    ASSERT_EQ(array->length(), length);
+    DCHECK_EQ(array->length(), length);
     arguments->set_elements(*array);
     materialized_objects_->Add(arguments);
     for (int i = 0; i < length; ++i) {
@@ -1813,9 +1843,11 @@
     Handle<Map> map = Map::GeneralizeAllFieldRepresentations(
         Handle<Map>::cast(MaterializeNextValue()));
     switch (map->instance_type()) {
+      case MUTABLE_HEAP_NUMBER_TYPE:
       case HEAP_NUMBER_TYPE: {
         // Reuse the HeapNumber value directly as it is already properly
-        // tagged and skip materializing the HeapNumber explicitly.
+        // tagged and skip materializing the HeapNumber explicitly. Turn mutable
+        // heap numbers immutable.
         Handle<Object> object = MaterializeNextValue();
         if (object_index < prev_materialized_count_) {
           materialized_objects_->Add(Handle<Object>(
@@ -1877,6 +1909,9 @@
 Handle<Object> Deoptimizer::MaterializeNextValue() {
   int value_index = materialization_value_index_++;
   Handle<Object> value = materialized_values_->at(value_index);
+  if (value->IsMutableHeapNumber()) {
+    HeapNumber::cast(*value)->set_map(isolate_->heap()->heap_number_map());
+  }
   if (*value == isolate_->heap()->arguments_marker()) {
     value = MaterializeNextHeapObject();
   }
@@ -1885,7 +1920,7 @@
 
 
 void Deoptimizer::MaterializeHeapObjects(JavaScriptFrameIterator* it) {
-  ASSERT_NE(DEBUGGER, bailout_type_);
+  DCHECK_NE(DEBUGGER, bailout_type_);
 
   MaterializedObjectStore* materialized_store =
       isolate_->materialized_object_store();
@@ -1938,7 +1973,7 @@
              d.value(),
              d.destination());
     }
-    ASSERT(values.at(d.destination())->IsTheHole());
+    DCHECK(values.at(d.destination())->IsTheHole());
     values.Set(d.destination(), num);
   }
 
@@ -2772,7 +2807,7 @@
   GenerateDeoptimizationEntries(&masm, entry_count, type);
   CodeDesc desc;
   masm.GetCode(&desc);
-  ASSERT(!RelocInfo::RequiresRelocation(desc));
+  DCHECK(!RelocInfo::RequiresRelocation(desc));
 
   MemoryChunk* chunk = data->deopt_entry_code_[type];
   CHECK(static_cast<int>(Deoptimizer::GetMaxDeoptTableSize()) >=
@@ -2780,7 +2815,7 @@
   chunk->CommitArea(desc.instr_size);
   CopyBytes(chunk->area_start(), desc.buffer,
       static_cast<size_t>(desc.instr_size));
-  CPU::FlushICache(chunk->area_start(), desc.instr_size);
+  CpuFeatures::FlushICache(chunk->area_start(), desc.instr_size);
 
   data->deopt_entry_code_entries_[type] = entry_count;
 }
@@ -2866,7 +2901,7 @@
 
 
 Object* FrameDescription::GetExpression(int index) {
-  ASSERT_EQ(StackFrame::JAVA_SCRIPT, type_);
+  DCHECK_EQ(StackFrame::JAVA_SCRIPT, type_);
   unsigned offset = GetOffsetFromSlotIndex(index);
   return reinterpret_cast<Object*>(*GetFrameSlotPointer(offset));
 }
@@ -2892,7 +2927,7 @@
   // bit of zero (marks the end).
   uint32_t bits = 0;
   for (int i = 0; true; i += 7) {
-    ASSERT(HasNext());
+    DCHECK(HasNext());
     uint8_t next = buffer_->get(index_++);
     bits |= (next >> 1) << i;
     if ((next & 1) == 0) break;
@@ -3261,7 +3296,11 @@
       return Handle<Object>(Memory::Object_at(addr_), isolate);
 
     case INT32: {
+#if V8_TARGET_BIG_ENDIAN && V8_HOST_ARCH_64_BIT
+      int value = Memory::int32_at(addr_ + kIntSize);
+#else
       int value = Memory::int32_at(addr_);
+#endif
       if (Smi::IsValid(value)) {
         return Handle<Object>(Smi::FromInt(value), isolate);
       } else {
@@ -3270,7 +3309,11 @@
     }
 
     case UINT32: {
+#if V8_TARGET_BIG_ENDIAN && V8_HOST_ARCH_64_BIT
+      uint32_t value = Memory::uint32_at(addr_ + kIntSize);
+#else
       uint32_t value = Memory::uint32_at(addr_);
+#endif
       if (value <= static_cast<uint32_t>(Smi::kMaxValue)) {
         return Handle<Object>(Smi::FromInt(static_cast<int>(value)), isolate);
       } else {
@@ -3383,6 +3426,7 @@
       // TODO(jarin) this should be unified with the code in
       // Deoptimizer::MaterializeNextHeapObject()
       switch (map->instance_type()) {
+        case MUTABLE_HEAP_NUMBER_TYPE:
         case HEAP_NUMBER_TYPE: {
           // Reuse the HeapNumber value directly as it is already properly
           // tagged and skip materializing the HeapNumber explicitly.
@@ -3551,6 +3595,7 @@
                                            bool has_construct_stub) {
   FrameDescription* output_frame = deoptimizer->output_[frame_index];
   function_ = output_frame->GetFunction();
+  context_ = reinterpret_cast<Object*>(output_frame->GetContext());
   has_construct_stub_ = has_construct_stub;
   expression_count_ = output_frame->GetExpressionCount();
   expression_stack_ = new Object*[expression_count_];
@@ -3583,7 +3628,8 @@
 
 
 void DeoptimizedFrameInfo::Iterate(ObjectVisitor* v) {
-  v->VisitPointer(BitCast<Object**>(&function_));
+  v->VisitPointer(bit_cast<Object**>(&function_));
+  v->VisitPointer(&context_);
   v->VisitPointers(parameters_, parameters_ + parameters_count_);
   v->VisitPointers(expression_stack_, expression_stack_ + expression_count_);
 }
diff --git a/src/deoptimizer.h b/src/deoptimizer.h
index 1a6f668..612d5f6 100644
--- a/src/deoptimizer.h
+++ b/src/deoptimizer.h
@@ -17,19 +17,9 @@
 
 
 static inline double read_double_value(Address p) {
-#ifdef V8_HOST_CAN_READ_UNALIGNED
-  return Memory::double_at(p);
-#else  // V8_HOST_CAN_READ_UNALIGNED
-  // Prevent gcc from using load-double (mips ldc1) on (possibly)
-  // non-64-bit aligned address.
-  union conversion {
-    double d;
-    uint32_t u[2];
-  } c;
-  c.u[0] = *reinterpret_cast<uint32_t*>(p);
-  c.u[1] = *reinterpret_cast<uint32_t*>(p + 4);
-  return c.d;
-#endif  // V8_HOST_CAN_READ_UNALIGNED
+  double d;
+  memcpy(&d, p, sizeof(d));
+  return d;
 }
 
 
@@ -111,16 +101,41 @@
 
   static const int kBailoutTypesWithCodeEntry = SOFT + 1;
 
+  struct Reason {
+    Reason(int r, const char* m, const char* d)
+        : raw_position(r), mnemonic(m), detail(d) {}
+
+    bool operator==(const Reason& other) const {
+      return raw_position == other.raw_position &&
+             CStringEquals(mnemonic, other.mnemonic) &&
+             CStringEquals(detail, other.detail);
+    }
+
+    bool operator!=(const Reason& other) const { return !(*this == other); }
+
+    int raw_position;
+    const char* mnemonic;
+    const char* detail;
+  };
+
   struct JumpTableEntry : public ZoneObject {
-    inline JumpTableEntry(Address entry,
-                          Deoptimizer::BailoutType type,
-                          bool frame)
+    inline JumpTableEntry(Address entry, const Reason& the_reason,
+                          Deoptimizer::BailoutType type, bool frame)
         : label(),
           address(entry),
+          reason(the_reason),
           bailout_type(type),
-          needs_frame(frame) { }
+          needs_frame(frame) {}
+
+    bool IsEquivalentTo(const JumpTableEntry& other) const {
+      return address == other.address && bailout_type == other.bailout_type &&
+             needs_frame == other.needs_frame &&
+             (!FLAG_trace_deopt || reason == other.reason);
+    }
+
     Label label;
     Address address;
+    Reason reason;
     Deoptimizer::BailoutType bailout_type;
     bool needs_frame;
   };
@@ -377,7 +392,7 @@
   // Fill the given output frame's registers to contain the failure handler
   // address and the number of parameters for a stub failure trampoline.
   void SetPlatformCompiledStubRegisters(FrameDescription* output_frame,
-                                        CodeStubInterfaceDescriptor* desc);
+                                        CodeStubDescriptor* desc);
 
   // Fill the given output frame's double registers with the original values
   // from the input frame's double registers.
@@ -460,7 +475,7 @@
   }
 
   uint32_t GetFrameSize() const {
-    ASSERT(static_cast<uint32_t>(frame_size_) == frame_size_);
+    DCHECK(static_cast<uint32_t>(frame_size_) == frame_size_);
     return static_cast<uint32_t>(frame_size_);
   }
 
@@ -489,11 +504,11 @@
 
   intptr_t GetRegister(unsigned n) const {
 #if DEBUG
-    // This convoluted ASSERT is needed to work around a gcc problem that
+    // This convoluted DCHECK is needed to work around a gcc problem that
     // improperly detects an array bounds overflow in optimized debug builds
-    // when using a plain ASSERT.
-    if (n >= ARRAY_SIZE(registers_)) {
-      ASSERT(false);
+    // when using a plain DCHECK.
+    if (n >= arraysize(registers_)) {
+      DCHECK(false);
       return 0;
     }
 #endif
@@ -501,17 +516,17 @@
   }
 
   double GetDoubleRegister(unsigned n) const {
-    ASSERT(n < ARRAY_SIZE(double_registers_));
+    DCHECK(n < arraysize(double_registers_));
     return double_registers_[n];
   }
 
   void SetRegister(unsigned n, intptr_t value) {
-    ASSERT(n < ARRAY_SIZE(registers_));
+    DCHECK(n < arraysize(registers_));
     registers_[n] = value;
   }
 
   void SetDoubleRegister(unsigned n, double value) {
-    ASSERT(n < ARRAY_SIZE(double_registers_));
+    DCHECK(n < arraysize(double_registers_));
     double_registers_[n] = value;
   }
 
@@ -607,7 +622,7 @@
   intptr_t frame_content_[1];
 
   intptr_t* GetFrameSlotPointer(unsigned offset) {
-    ASSERT(offset < frame_size_);
+    DCHECK(offset < frame_size_);
     return reinterpret_cast<intptr_t*>(
         reinterpret_cast<Address>(this) + frame_content_offset() + offset);
   }
@@ -656,7 +671,7 @@
  public:
   TranslationIterator(ByteArray* buffer, int index)
       : buffer_(buffer), index_(index) {
-    ASSERT(index >= 0 && index < buffer->length());
+    DCHECK(index >= 0 && index < buffer->length());
   }
 
   int32_t Next();
@@ -920,6 +935,9 @@
     return function_;
   }
 
+  // Get the frame context.
+  Object* GetContext() { return context_; }
+
   // Check if this frame is preceded by construct stub frame.  The bottom-most
   // inlined frame might still be called by an uninlined construct stub.
   bool HasConstructStub() {
@@ -928,13 +946,13 @@
 
   // Get an incoming argument.
   Object* GetParameter(int index) {
-    ASSERT(0 <= index && index < parameters_count());
+    DCHECK(0 <= index && index < parameters_count());
     return parameters_[index];
   }
 
   // Get an expression from the expression stack.
   Object* GetExpression(int index) {
-    ASSERT(0 <= index && index < expression_count());
+    DCHECK(0 <= index && index < expression_count());
     return expression_stack_[index];
   }
 
@@ -945,17 +963,18 @@
  private:
   // Set an incoming argument.
   void SetParameter(int index, Object* obj) {
-    ASSERT(0 <= index && index < parameters_count());
+    DCHECK(0 <= index && index < parameters_count());
     parameters_[index] = obj;
   }
 
   // Set an expression on the expression stack.
   void SetExpression(int index, Object* obj) {
-    ASSERT(0 <= index && index < expression_count());
+    DCHECK(0 <= index && index < expression_count());
     expression_stack_[index] = obj;
   }
 
   JSFunction* function_;
+  Object* context_;
   bool has_construct_stub_;
   int parameters_count_;
   int expression_count_;
diff --git a/src/disassembler.cc b/src/disassembler.cc
index f1c28e8..d9448ce 100644
--- a/src/disassembler.cc
+++ b/src/disassembler.cc
@@ -95,7 +95,6 @@
   SealHandleScope shs(isolate);
   DisallowHeapAllocation no_alloc;
   ExternalReferenceEncoder ref_encoder(isolate);
-  Heap* heap = isolate->heap();
 
   v8::internal::EmbeddedVector<char, 128> decode_buffer;
   v8::internal::EmbeddedVector<char, kOutBufferSize> out_buffer;
@@ -215,7 +214,8 @@
         Code::Kind kind = code->kind();
         if (code->is_inline_cache_stub()) {
           if (kind == Code::LOAD_IC &&
-              LoadIC::GetContextualMode(code->extra_ic_state()) == CONTEXTUAL) {
+              LoadICState::GetContextualMode(code->extra_ic_state()) ==
+                  CONTEXTUAL) {
             out.AddFormatted(" contextual,");
           }
           InlineCacheState ic_state = code->ic_state();
@@ -226,29 +226,21 @@
             out.AddFormatted(", %s", Code::StubType2String(type));
           }
         } else if (kind == Code::STUB || kind == Code::HANDLER) {
-          // Reverse lookup required as the minor key cannot be retrieved
-          // from the code object.
-          Object* obj = heap->code_stubs()->SlowReverseLookup(code);
-          if (obj != heap->undefined_value()) {
-            ASSERT(obj->IsSmi());
-            // Get the STUB key and extract major and minor key.
-            uint32_t key = Smi::cast(obj)->value();
-            uint32_t minor_key = CodeStub::MinorKeyFromKey(key);
-            CodeStub::Major major_key = CodeStub::GetMajorKey(code);
-            ASSERT(major_key == CodeStub::MajorKeyFromKey(key));
-            out.AddFormatted(" %s, %s, ",
-                             Code::Kind2String(kind),
-                             CodeStub::MajorName(major_key, false));
-            switch (major_key) {
-              case CodeStub::CallFunction: {
-                int argc =
-                    CallFunctionStub::ExtractArgcFromMinorKey(minor_key);
-                out.AddFormatted("argc = %d", argc);
-                break;
-              }
-              default:
-                out.AddFormatted("minor: %d", minor_key);
+          // Get the STUB key and extract major and minor key.
+          uint32_t key = code->stub_key();
+          uint32_t minor_key = CodeStub::MinorKeyFromKey(key);
+          CodeStub::Major major_key = CodeStub::GetMajorKey(code);
+          DCHECK(major_key == CodeStub::MajorKeyFromKey(key));
+          out.AddFormatted(" %s, %s, ", Code::Kind2String(kind),
+                           CodeStub::MajorName(major_key, false));
+          switch (major_key) {
+            case CodeStub::CallFunction: {
+              int argc = CallFunctionStub::ExtractArgcFromMinorKey(minor_key);
+              out.AddFormatted("argc = %d", argc);
+              break;
             }
+            default:
+              out.AddFormatted("minor: %d", minor_key);
           }
         } else {
           out.AddFormatted(" %s", Code::Kind2String(kind));
diff --git a/src/diy-fp.cc b/src/diy-fp.cc
index 3abf14d..cdad2a8 100644
--- a/src/diy-fp.cc
+++ b/src/diy-fp.cc
@@ -3,9 +3,9 @@
 // found in the LICENSE file.
 
 #include "include/v8stdint.h"
-#include "src/globals.h"
-#include "src/checks.h"
+#include "src/base/logging.h"
 #include "src/diy-fp.h"
+#include "src/globals.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/diy-fp.h b/src/diy-fp.h
index f8f2673..31f7872 100644
--- a/src/diy-fp.h
+++ b/src/diy-fp.h
@@ -25,8 +25,8 @@
   // must be bigger than the significand of other.
   // The result will not be normalized.
   void Subtract(const DiyFp& other) {
-    ASSERT(e_ == other.e_);
-    ASSERT(f_ >= other.f_);
+    DCHECK(e_ == other.e_);
+    DCHECK(f_ >= other.f_);
     f_ -= other.f_;
   }
 
@@ -51,7 +51,7 @@
   }
 
   void Normalize() {
-    ASSERT(f_ != 0);
+    DCHECK(f_ != 0);
     uint64_t f = f_;
     int e = e_;
 
diff --git a/src/double.h b/src/double.h
index 947d259..cb12628 100644
--- a/src/double.h
+++ b/src/double.h
@@ -11,8 +11,8 @@
 namespace internal {
 
 // We assume that doubles and uint64_t have the same endianness.
-inline uint64_t double_to_uint64(double d) { return BitCast<uint64_t>(d); }
-inline double uint64_to_double(uint64_t d64) { return BitCast<double>(d64); }
+inline uint64_t double_to_uint64(double d) { return bit_cast<uint64_t>(d); }
+inline double uint64_to_double(uint64_t d64) { return bit_cast<double>(d64); }
 
 // Helper functions for doubles.
 class Double {
@@ -34,14 +34,14 @@
   // The value encoded by this Double must be greater or equal to +0.0.
   // It must not be special (infinity, or NaN).
   DiyFp AsDiyFp() const {
-    ASSERT(Sign() > 0);
-    ASSERT(!IsSpecial());
+    DCHECK(Sign() > 0);
+    DCHECK(!IsSpecial());
     return DiyFp(Significand(), Exponent());
   }
 
   // The value encoded by this Double must be strictly greater than 0.
   DiyFp AsNormalizedDiyFp() const {
-    ASSERT(value() > 0.0);
+    DCHECK(value() > 0.0);
     uint64_t f = Significand();
     int e = Exponent();
 
@@ -121,7 +121,7 @@
   // Precondition: the value encoded by this Double must be greater or equal
   // than +0.0.
   DiyFp UpperBoundary() const {
-    ASSERT(Sign() > 0);
+    DCHECK(Sign() > 0);
     return DiyFp(Significand() * 2 + 1, Exponent() - 1);
   }
 
@@ -130,7 +130,7 @@
   // exponent as m_plus.
   // Precondition: the value encoded by this Double must be greater than 0.
   void NormalizedBoundaries(DiyFp* out_m_minus, DiyFp* out_m_plus) const {
-    ASSERT(value() > 0.0);
+    DCHECK(value() > 0.0);
     DiyFp v = this->AsDiyFp();
     bool significand_is_zero = (v.f() == kHiddenBit);
     DiyFp m_plus = DiyFp::Normalize(DiyFp((v.f() << 1) + 1, v.e() - 1));
diff --git a/src/dtoa.cc b/src/dtoa.cc
index 949e76b..f39b0b0 100644
--- a/src/dtoa.cc
+++ b/src/dtoa.cc
@@ -5,7 +5,7 @@
 #include <cmath>
 
 #include "include/v8stdint.h"
-#include "src/checks.h"
+#include "src/base/logging.h"
 #include "src/utils.h"
 
 #include "src/dtoa.h"
@@ -32,8 +32,8 @@
 
 void DoubleToAscii(double v, DtoaMode mode, int requested_digits,
                    Vector<char> buffer, int* sign, int* length, int* point) {
-  ASSERT(!Double(v).IsSpecial());
-  ASSERT(mode == DTOA_SHORTEST || requested_digits >= 0);
+  DCHECK(!Double(v).IsSpecial());
+  DCHECK(mode == DTOA_SHORTEST || requested_digits >= 0);
 
   if (Double(v).Sign() < 0) {
     *sign = 1;
diff --git a/src/effects.h b/src/effects.h
index 8cf5a88..9481bb8 100644
--- a/src/effects.h
+++ b/src/effects.h
@@ -33,7 +33,7 @@
   Bounds bounds;
 
   Effect() : modality(DEFINITE) {}
-  Effect(Bounds b, Modality m = DEFINITE) : modality(m), bounds(b) {}
+  explicit Effect(Bounds b, Modality m = DEFINITE) : modality(m), bounds(b) {}
 
   // The unknown effect.
   static Effect Unknown(Zone* zone) {
@@ -195,15 +195,15 @@
   typedef typename Mapping::Locator Locator;
 
   bool Contains(Var var) {
-    ASSERT(var != kNoVar);
+    DCHECK(var != kNoVar);
     return map_->Contains(var);
   }
   bool Find(Var var, Locator* locator) {
-    ASSERT(var != kNoVar);
+    DCHECK(var != kNoVar);
     return map_->Find(var, locator);
   }
   bool Insert(Var var, Locator* locator) {
-    ASSERT(var != kNoVar);
+    DCHECK(var != kNoVar);
     return map_->Insert(var, locator);
   }
 
@@ -259,7 +259,7 @@
   bool is_empty() { return node_ == NULL; }
 
   bool Contains(Var var) {
-    ASSERT(var != kNoVar);
+    DCHECK(var != kNoVar);
     for (Node* node = node_; node != NULL; node = node->previous) {
       if (node->effects.Contains(var)) return true;
     }
@@ -267,7 +267,7 @@
   }
 
   bool Find(Var var, Locator* locator) {
-    ASSERT(var != kNoVar);
+    DCHECK(var != kNoVar);
     for (Node* node = node_; node != NULL; node = node->previous) {
       if (node->effects.Find(var, locator)) return true;
     }
@@ -293,7 +293,7 @@
 
 template<class Var, Var kNoVar>
 bool NestedEffectsBase<Var, kNoVar>::Insert(Var var, Locator* locator) {
-  ASSERT(var != kNoVar);
+  DCHECK(var != kNoVar);
   if (!node_->effects.Insert(var, locator)) return false;
   Locator shadowed;
   for (Node* node = node_->previous; node != NULL; node = node->previous) {
@@ -326,7 +326,7 @@
   NestedEffects Pop() {
     NestedEffects result = *this;
     result.pop();
-    ASSERT(!this->is_empty());
+    DCHECK(!this->is_empty());
     return result;
   }
 };
diff --git a/src/elements-kind.cc b/src/elements-kind.cc
index cd946e8..0ebc6dc 100644
--- a/src/elements-kind.cc
+++ b/src/elements-kind.cc
@@ -65,20 +65,6 @@
 }
 
 
-void PrintElementsKind(FILE* out, ElementsKind kind) {
-  PrintF(out, "%s", ElementsKindToString(kind));
-}
-
-
-ElementsKind GetInitialFastElementsKind() {
-  if (FLAG_packed_arrays) {
-    return FAST_SMI_ELEMENTS;
-  } else {
-    return FAST_HOLEY_SMI_ELEMENTS;
-  }
-}
-
-
 struct InitializeFastElementsKindSequence {
   static void Construct(
       ElementsKind** fast_elements_kind_sequence_ptr) {
@@ -110,7 +96,7 @@
 
 
 ElementsKind GetFastElementsKindFromSequenceIndex(int sequence_number) {
-  ASSERT(sequence_number >= 0 &&
+  DCHECK(sequence_number >= 0 &&
          sequence_number < kFastElementsKindCount);
   return fast_elements_kind_sequence.Get()[sequence_number];
 }
@@ -144,8 +130,8 @@
 
 ElementsKind GetNextMoreGeneralFastElementsKind(ElementsKind elements_kind,
                                                 bool allow_only_packed) {
-  ASSERT(IsFastElementsKind(elements_kind));
-  ASSERT(elements_kind != TERMINAL_FAST_ELEMENTS_KIND);
+  DCHECK(IsFastElementsKind(elements_kind));
+  DCHECK(elements_kind != TERMINAL_FAST_ELEMENTS_KIND);
   while (true) {
     elements_kind = GetNextTransitionElementsKind(elements_kind);
     if (!IsFastHoleyElementsKind(elements_kind) || !allow_only_packed) {
diff --git a/src/elements-kind.h b/src/elements-kind.h
index cdd928b..fb97341 100644
--- a/src/elements-kind.h
+++ b/src/elements-kind.h
@@ -5,7 +5,7 @@
 #ifndef V8_ELEMENTS_KIND_H_
 #define V8_ELEMENTS_KIND_H_
 
-#include "src/v8checks.h"
+#include "src/checks.h"
 
 namespace v8 {
 namespace internal {
@@ -74,9 +74,8 @@
 int ElementsKindToShiftSize(ElementsKind elements_kind);
 int GetDefaultHeaderSizeForElementsKind(ElementsKind elements_kind);
 const char* ElementsKindToString(ElementsKind kind);
-void PrintElementsKind(FILE* out, ElementsKind kind);
 
-ElementsKind GetInitialFastElementsKind();
+inline ElementsKind GetInitialFastElementsKind() { return FAST_SMI_ELEMENTS; }
 
 ElementsKind GetFastElementsKindFromSequenceIndex(int sequence_number);
 int GetSequenceIndexFromFastElementsKind(ElementsKind elements_kind);
@@ -88,6 +87,11 @@
 }
 
 
+inline bool IsSloppyArgumentsElements(ElementsKind kind) {
+  return kind == SLOPPY_ARGUMENTS_ELEMENTS;
+}
+
+
 inline bool IsExternalArrayElementsKind(ElementsKind kind) {
   return kind >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND &&
       kind <= LAST_EXTERNAL_ARRAY_ELEMENTS_KIND;
@@ -107,7 +111,7 @@
 
 
 inline bool IsFastElementsKind(ElementsKind kind) {
-  ASSERT(FIRST_FAST_ELEMENTS_KIND == 0);
+  DCHECK(FIRST_FAST_ELEMENTS_KIND == 0);
   return kind <= FAST_HOLEY_DOUBLE_ELEMENTS;
 }
 
@@ -210,7 +214,7 @@
 
 
 inline ElementsKind FastSmiToObjectElementsKind(ElementsKind from_kind) {
-  ASSERT(IsFastSmiElementsKind(from_kind));
+  DCHECK(IsFastSmiElementsKind(from_kind));
   return (from_kind == FAST_SMI_ELEMENTS)
       ? FAST_ELEMENTS
       : FAST_HOLEY_ELEMENTS;
diff --git a/src/elements.cc b/src/elements.cc
index a642688..e2127c4 100644
--- a/src/elements.cc
+++ b/src/elements.cc
@@ -120,7 +120,7 @@
 #undef ELEMENTS_TRAITS
 
 
-ElementsAccessor** ElementsAccessor::elements_accessors_;
+ElementsAccessor** ElementsAccessor::elements_accessors_ = NULL;
 
 
 static bool HasKey(Handle<FixedArray> array, Handle<Object> key_handle) {
@@ -141,25 +141,24 @@
 
 MUST_USE_RESULT
 static MaybeHandle<Object> ThrowArrayLengthRangeError(Isolate* isolate) {
-  return isolate->Throw<Object>(
-      isolate->factory()->NewRangeError("invalid_array_length",
-                                        HandleVector<Object>(NULL, 0)));
+  THROW_NEW_ERROR(isolate, NewRangeError("invalid_array_length",
+                                         HandleVector<Object>(NULL, 0)),
+                  Object);
 }
 
 
-static void CopyObjectToObjectElements(Handle<FixedArrayBase> from_base,
+static void CopyObjectToObjectElements(FixedArrayBase* from_base,
                                        ElementsKind from_kind,
                                        uint32_t from_start,
-                                       Handle<FixedArrayBase> to_base,
-                                       ElementsKind to_kind,
-                                       uint32_t to_start,
+                                       FixedArrayBase* to_base,
+                                       ElementsKind to_kind, uint32_t to_start,
                                        int raw_copy_size) {
-  ASSERT(to_base->map() !=
+  DCHECK(to_base->map() !=
       from_base->GetIsolate()->heap()->fixed_cow_array_map());
   DisallowHeapAllocation no_allocation;
   int copy_size = raw_copy_size;
   if (raw_copy_size < 0) {
-    ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
+    DCHECK(raw_copy_size == ElementsAccessor::kCopyToEnd ||
            raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
     copy_size = Min(from_base->length() - from_start,
                     to_base->length() - to_start);
@@ -168,18 +167,18 @@
       int length = to_base->length() - start;
       if (length > 0) {
         Heap* heap = from_base->GetHeap();
-        MemsetPointer(Handle<FixedArray>::cast(to_base)->data_start() + start,
+        MemsetPointer(FixedArray::cast(to_base)->data_start() + start,
                       heap->the_hole_value(), length);
       }
     }
   }
-  ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
+  DCHECK((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
          (copy_size + static_cast<int>(from_start)) <= from_base->length());
   if (copy_size == 0) return;
-  Handle<FixedArray> from = Handle<FixedArray>::cast(from_base);
-  Handle<FixedArray> to = Handle<FixedArray>::cast(to_base);
-  ASSERT(IsFastSmiOrObjectElementsKind(from_kind));
-  ASSERT(IsFastSmiOrObjectElementsKind(to_kind));
+  FixedArray* from = FixedArray::cast(from_base);
+  FixedArray* to = FixedArray::cast(to_base);
+  DCHECK(IsFastSmiOrObjectElementsKind(from_kind));
+  DCHECK(IsFastSmiOrObjectElementsKind(to_kind));
   Address to_address = to->address() + FixedArray::kHeaderSize;
   Address from_address = from->address() + FixedArray::kHeaderSize;
   CopyWords(reinterpret_cast<Object**>(to_address) + to_start,
@@ -188,29 +187,25 @@
   if (IsFastObjectElementsKind(from_kind) &&
       IsFastObjectElementsKind(to_kind)) {
     Heap* heap = from->GetHeap();
-    if (!heap->InNewSpace(*to)) {
+    if (!heap->InNewSpace(to)) {
       heap->RecordWrites(to->address(),
                          to->OffsetOfElementAt(to_start),
                          copy_size);
     }
-    heap->incremental_marking()->RecordWrites(*to);
+    heap->incremental_marking()->RecordWrites(to);
   }
 }
 
 
-static void CopyDictionaryToObjectElements(Handle<FixedArrayBase> from_base,
-                                           uint32_t from_start,
-                                           Handle<FixedArrayBase> to_base,
-                                           ElementsKind to_kind,
-                                           uint32_t to_start,
-                                           int raw_copy_size) {
-  Handle<SeededNumberDictionary> from =
-      Handle<SeededNumberDictionary>::cast(from_base);
+static void CopyDictionaryToObjectElements(
+    FixedArrayBase* from_base, uint32_t from_start, FixedArrayBase* to_base,
+    ElementsKind to_kind, uint32_t to_start, int raw_copy_size) {
   DisallowHeapAllocation no_allocation;
+  SeededNumberDictionary* from = SeededNumberDictionary::cast(from_base);
   int copy_size = raw_copy_size;
   Heap* heap = from->GetHeap();
   if (raw_copy_size < 0) {
-    ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
+    DCHECK(raw_copy_size == ElementsAccessor::kCopyToEnd ||
            raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
     copy_size = from->max_number_key() + 1 - from_start;
     if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
@@ -218,15 +213,15 @@
       int length = to_base->length() - start;
       if (length > 0) {
         Heap* heap = from->GetHeap();
-        MemsetPointer(Handle<FixedArray>::cast(to_base)->data_start() + start,
+        MemsetPointer(FixedArray::cast(to_base)->data_start() + start,
                       heap->the_hole_value(), length);
       }
     }
   }
-  ASSERT(*to_base != *from_base);
-  ASSERT(IsFastSmiOrObjectElementsKind(to_kind));
+  DCHECK(to_base != from_base);
+  DCHECK(IsFastSmiOrObjectElementsKind(to_kind));
   if (copy_size == 0) return;
-  Handle<FixedArray> to = Handle<FixedArray>::cast(to_base);
+  FixedArray* to = FixedArray::cast(to_base);
   uint32_t to_length = to->length();
   if (to_start + copy_size > to_length) {
     copy_size = to_length - to_start;
@@ -235,19 +230,19 @@
     int entry = from->FindEntry(i + from_start);
     if (entry != SeededNumberDictionary::kNotFound) {
       Object* value = from->ValueAt(entry);
-      ASSERT(!value->IsTheHole());
+      DCHECK(!value->IsTheHole());
       to->set(i + to_start, value, SKIP_WRITE_BARRIER);
     } else {
       to->set_the_hole(i + to_start);
     }
   }
   if (IsFastObjectElementsKind(to_kind)) {
-    if (!heap->InNewSpace(*to)) {
+    if (!heap->InNewSpace(to)) {
       heap->RecordWrites(to->address(),
                          to->OffsetOfElementAt(to_start),
                          copy_size);
     }
-    heap->incremental_marking()->RecordWrites(*to);
+    heap->incremental_marking()->RecordWrites(to);
   }
 }
 
@@ -258,10 +253,10 @@
                                        ElementsKind to_kind,
                                        uint32_t to_start,
                                        int raw_copy_size) {
-  ASSERT(IsFastSmiOrObjectElementsKind(to_kind));
+  DCHECK(IsFastSmiOrObjectElementsKind(to_kind));
   int copy_size = raw_copy_size;
   if (raw_copy_size < 0) {
-    ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
+    DCHECK(raw_copy_size == ElementsAccessor::kCopyToEnd ||
            raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
     copy_size = Min(from_base->length() - from_start,
                     to_base->length() - to_start);
@@ -273,12 +268,12 @@
       int length = to_base->length() - start;
       if (length > 0) {
         Heap* heap = from_base->GetHeap();
-        MemsetPointer(Handle<FixedArray>::cast(to_base)->data_start() + start,
+        MemsetPointer(FixedArray::cast(*to_base)->data_start() + start,
                       heap->the_hole_value(), length);
       }
     }
   }
-  ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
+  DCHECK((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
          (copy_size + static_cast<int>(from_start)) <= from_base->length());
   if (copy_size == 0) return;
   Isolate* isolate = from_base->GetIsolate();
@@ -289,7 +284,7 @@
     if (IsFastSmiElementsKind(to_kind)) {
       UNIMPLEMENTED();
     } else {
-      ASSERT(IsFastObjectElementsKind(to_kind));
+      DCHECK(IsFastObjectElementsKind(to_kind));
       Handle<Object> value = FixedDoubleArray::get(from, i + from_start);
       to->set(i + to_start, *value, UPDATE_WRITE_BARRIER);
     }
@@ -297,29 +292,28 @@
 }
 
 
-static void CopyDoubleToDoubleElements(Handle<FixedArrayBase> from_base,
+static void CopyDoubleToDoubleElements(FixedArrayBase* from_base,
                                        uint32_t from_start,
-                                       Handle<FixedArrayBase> to_base,
-                                       uint32_t to_start,
-                                       int raw_copy_size) {
+                                       FixedArrayBase* to_base,
+                                       uint32_t to_start, int raw_copy_size) {
   DisallowHeapAllocation no_allocation;
   int copy_size = raw_copy_size;
   if (raw_copy_size < 0) {
-    ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
+    DCHECK(raw_copy_size == ElementsAccessor::kCopyToEnd ||
            raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
     copy_size = Min(from_base->length() - from_start,
                     to_base->length() - to_start);
     if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
       for (int i = to_start + copy_size; i < to_base->length(); ++i) {
-        Handle<FixedDoubleArray>::cast(to_base)->set_the_hole(i);
+        FixedDoubleArray::cast(to_base)->set_the_hole(i);
       }
     }
   }
-  ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
+  DCHECK((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
          (copy_size + static_cast<int>(from_start)) <= from_base->length());
   if (copy_size == 0) return;
-  Handle<FixedDoubleArray> from = Handle<FixedDoubleArray>::cast(from_base);
-  Handle<FixedDoubleArray> to = Handle<FixedDoubleArray>::cast(to_base);
+  FixedDoubleArray* from = FixedDoubleArray::cast(from_base);
+  FixedDoubleArray* to = FixedDoubleArray::cast(to_base);
   Address to_address = to->address() + FixedDoubleArray::kHeaderSize;
   Address from_address = from->address() + FixedDoubleArray::kHeaderSize;
   to_address += kDoubleSize * to_start;
@@ -331,33 +325,32 @@
 }
 
 
-static void CopySmiToDoubleElements(Handle<FixedArrayBase> from_base,
+static void CopySmiToDoubleElements(FixedArrayBase* from_base,
                                     uint32_t from_start,
-                                    Handle<FixedArrayBase> to_base,
-                                    uint32_t to_start,
+                                    FixedArrayBase* to_base, uint32_t to_start,
                                     int raw_copy_size) {
   DisallowHeapAllocation no_allocation;
   int copy_size = raw_copy_size;
   if (raw_copy_size < 0) {
-    ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
+    DCHECK(raw_copy_size == ElementsAccessor::kCopyToEnd ||
            raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
     copy_size = from_base->length() - from_start;
     if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
       for (int i = to_start + copy_size; i < to_base->length(); ++i) {
-        Handle<FixedDoubleArray>::cast(to_base)->set_the_hole(i);
+        FixedDoubleArray::cast(to_base)->set_the_hole(i);
       }
     }
   }
-  ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
+  DCHECK((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
          (copy_size + static_cast<int>(from_start)) <= from_base->length());
   if (copy_size == 0) return;
-  Handle<FixedArray> from = Handle<FixedArray>::cast(from_base);
-  Handle<FixedDoubleArray> to = Handle<FixedDoubleArray>::cast(to_base);
-  Handle<Object> the_hole = from->GetIsolate()->factory()->the_hole_value();
+  FixedArray* from = FixedArray::cast(from_base);
+  FixedDoubleArray* to = FixedDoubleArray::cast(to_base);
+  Object* the_hole = from->GetHeap()->the_hole_value();
   for (uint32_t from_end = from_start + static_cast<uint32_t>(copy_size);
        from_start < from_end; from_start++, to_start++) {
     Object* hole_or_smi = from->get(from_start);
-    if (hole_or_smi == *the_hole) {
+    if (hole_or_smi == the_hole) {
       to->set_the_hole(to_start);
     } else {
       to->set(to_start, Smi::cast(hole_or_smi)->value());
@@ -366,23 +359,22 @@
 }
 
 
-static void CopyPackedSmiToDoubleElements(Handle<FixedArrayBase> from_base,
+static void CopyPackedSmiToDoubleElements(FixedArrayBase* from_base,
                                           uint32_t from_start,
-                                          Handle<FixedArrayBase> to_base,
-                                          uint32_t to_start,
-                                          int packed_size,
+                                          FixedArrayBase* to_base,
+                                          uint32_t to_start, int packed_size,
                                           int raw_copy_size) {
   DisallowHeapAllocation no_allocation;
   int copy_size = raw_copy_size;
   uint32_t to_end;
   if (raw_copy_size < 0) {
-    ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
+    DCHECK(raw_copy_size == ElementsAccessor::kCopyToEnd ||
            raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
     copy_size = packed_size - from_start;
     if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
       to_end = to_base->length();
       for (uint32_t i = to_start + copy_size; i < to_end; ++i) {
-        Handle<FixedDoubleArray>::cast(to_base)->set_the_hole(i);
+        FixedDoubleArray::cast(to_base)->set_the_hole(i);
       }
     } else {
       to_end = to_start + static_cast<uint32_t>(copy_size);
@@ -390,49 +382,48 @@
   } else {
     to_end = to_start + static_cast<uint32_t>(copy_size);
   }
-  ASSERT(static_cast<int>(to_end) <= to_base->length());
-  ASSERT(packed_size >= 0 && packed_size <= copy_size);
-  ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
+  DCHECK(static_cast<int>(to_end) <= to_base->length());
+  DCHECK(packed_size >= 0 && packed_size <= copy_size);
+  DCHECK((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
          (copy_size + static_cast<int>(from_start)) <= from_base->length());
   if (copy_size == 0) return;
-  Handle<FixedArray> from = Handle<FixedArray>::cast(from_base);
-  Handle<FixedDoubleArray> to = Handle<FixedDoubleArray>::cast(to_base);
+  FixedArray* from = FixedArray::cast(from_base);
+  FixedDoubleArray* to = FixedDoubleArray::cast(to_base);
   for (uint32_t from_end = from_start + static_cast<uint32_t>(packed_size);
        from_start < from_end; from_start++, to_start++) {
     Object* smi = from->get(from_start);
-    ASSERT(!smi->IsTheHole());
+    DCHECK(!smi->IsTheHole());
     to->set(to_start, Smi::cast(smi)->value());
   }
 }
 
 
-static void CopyObjectToDoubleElements(Handle<FixedArrayBase> from_base,
+static void CopyObjectToDoubleElements(FixedArrayBase* from_base,
                                        uint32_t from_start,
-                                       Handle<FixedArrayBase> to_base,
-                                       uint32_t to_start,
-                                       int raw_copy_size) {
+                                       FixedArrayBase* to_base,
+                                       uint32_t to_start, int raw_copy_size) {
   DisallowHeapAllocation no_allocation;
   int copy_size = raw_copy_size;
   if (raw_copy_size < 0) {
-    ASSERT(raw_copy_size == ElementsAccessor::kCopyToEnd ||
+    DCHECK(raw_copy_size == ElementsAccessor::kCopyToEnd ||
            raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
     copy_size = from_base->length() - from_start;
     if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
       for (int i = to_start + copy_size; i < to_base->length(); ++i) {
-        Handle<FixedDoubleArray>::cast(to_base)->set_the_hole(i);
+        FixedDoubleArray::cast(to_base)->set_the_hole(i);
       }
     }
   }
-  ASSERT((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
+  DCHECK((copy_size + static_cast<int>(to_start)) <= to_base->length() &&
          (copy_size + static_cast<int>(from_start)) <= from_base->length());
   if (copy_size == 0) return;
-  Handle<FixedArray> from = Handle<FixedArray>::cast(from_base);
-  Handle<FixedDoubleArray> to = Handle<FixedDoubleArray>::cast(to_base);
-  Handle<Object> the_hole = from->GetIsolate()->factory()->the_hole_value();
+  FixedArray* from = FixedArray::cast(from_base);
+  FixedDoubleArray* to = FixedDoubleArray::cast(to_base);
+  Object* the_hole = from->GetHeap()->the_hole_value();
   for (uint32_t from_end = from_start + copy_size;
        from_start < from_end; from_start++, to_start++) {
     Object* hole_or_object = from->get(from_start);
-    if (hole_or_object == *the_hole) {
+    if (hole_or_object == the_hole) {
       to->set_the_hole(to_start);
     } else {
       to->set(to_start, hole_or_object->Number());
@@ -441,27 +432,26 @@
 }
 
 
-static void CopyDictionaryToDoubleElements(Handle<FixedArrayBase> from_base,
+static void CopyDictionaryToDoubleElements(FixedArrayBase* from_base,
                                            uint32_t from_start,
-                                           Handle<FixedArrayBase> to_base,
+                                           FixedArrayBase* to_base,
                                            uint32_t to_start,
                                            int raw_copy_size) {
-  Handle<SeededNumberDictionary> from =
-      Handle<SeededNumberDictionary>::cast(from_base);
   DisallowHeapAllocation no_allocation;
+  SeededNumberDictionary* from = SeededNumberDictionary::cast(from_base);
   int copy_size = raw_copy_size;
   if (copy_size < 0) {
-    ASSERT(copy_size == ElementsAccessor::kCopyToEnd ||
+    DCHECK(copy_size == ElementsAccessor::kCopyToEnd ||
            copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole);
     copy_size = from->max_number_key() + 1 - from_start;
     if (raw_copy_size == ElementsAccessor::kCopyToEndAndInitializeToHole) {
       for (int i = to_start + copy_size; i < to_base->length(); ++i) {
-        Handle<FixedDoubleArray>::cast(to_base)->set_the_hole(i);
+        FixedDoubleArray::cast(to_base)->set_the_hole(i);
       }
     }
   }
   if (copy_size == 0) return;
-  Handle<FixedDoubleArray> to = Handle<FixedDoubleArray>::cast(to_base);
+  FixedDoubleArray* to = FixedDoubleArray::cast(to_base);
   uint32_t to_length = to->length();
   if (to_start + copy_size > to_length) {
     copy_size = to_length - to_start;
@@ -564,7 +554,7 @@
   typedef ElementsTraitsParam ElementsTraits;
   typedef typename ElementsTraitsParam::BackingStore BackingStore;
 
-  virtual ElementsKind kind() const V8_FINAL V8_OVERRIDE {
+  virtual ElementsKind kind() const FINAL OVERRIDE {
     return ElementsTraits::Kind;
   }
 
@@ -588,7 +578,7 @@
     ElementsAccessorSubclass::ValidateContents(holder, length);
   }
 
-  virtual void Validate(Handle<JSObject> holder) V8_FINAL V8_OVERRIDE {
+  virtual void Validate(Handle<JSObject> holder) FINAL OVERRIDE {
     DisallowHeapAllocation no_gc;
     ElementsAccessorSubclass::ValidateImpl(holder);
   }
@@ -605,7 +595,7 @@
       Handle<Object> receiver,
       Handle<JSObject> holder,
       uint32_t key,
-      Handle<FixedArrayBase> backing_store) V8_FINAL V8_OVERRIDE {
+      Handle<FixedArrayBase> backing_store) FINAL OVERRIDE {
     return ElementsAccessorSubclass::HasElementImpl(
         receiver, holder, key, backing_store);
   }
@@ -614,7 +604,7 @@
       Handle<Object> receiver,
       Handle<JSObject> holder,
       uint32_t key,
-      Handle<FixedArrayBase> backing_store) V8_FINAL V8_OVERRIDE {
+      Handle<FixedArrayBase> backing_store) FINAL OVERRIDE {
     if (!IsExternalArrayElementsKind(ElementsTraits::Kind) &&
         FLAG_trace_js_array_abuse) {
       CheckArrayAbuse(holder, "elements read", key);
@@ -645,7 +635,7 @@
       Handle<Object> receiver,
       Handle<JSObject> holder,
       uint32_t key,
-      Handle<FixedArrayBase> backing_store) V8_FINAL V8_OVERRIDE {
+      Handle<FixedArrayBase> backing_store) FINAL OVERRIDE {
     return ElementsAccessorSubclass::GetAttributesImpl(
         receiver, holder, key, backing_store);
   }
@@ -663,33 +653,11 @@
           ? ABSENT : NONE;
   }
 
-  MUST_USE_RESULT virtual PropertyType GetType(
-      Handle<Object> receiver,
-      Handle<JSObject> holder,
-      uint32_t key,
-      Handle<FixedArrayBase> backing_store) V8_FINAL V8_OVERRIDE {
-    return ElementsAccessorSubclass::GetTypeImpl(
-        receiver, holder, key, backing_store);
-  }
-
-  MUST_USE_RESULT static PropertyType GetTypeImpl(
-      Handle<Object> receiver,
-      Handle<JSObject> obj,
-      uint32_t key,
-      Handle<FixedArrayBase> backing_store) {
-    if (key >= ElementsAccessorSubclass::GetCapacityImpl(backing_store)) {
-      return NONEXISTENT;
-    }
-    return
-        Handle<BackingStore>::cast(backing_store)->is_the_hole(key)
-          ? NONEXISTENT : FIELD;
-  }
-
   MUST_USE_RESULT virtual MaybeHandle<AccessorPair> GetAccessorPair(
       Handle<Object> receiver,
       Handle<JSObject> holder,
       uint32_t key,
-      Handle<FixedArrayBase> backing_store) V8_FINAL V8_OVERRIDE {
+      Handle<FixedArrayBase> backing_store) FINAL OVERRIDE {
     return ElementsAccessorSubclass::GetAccessorPairImpl(
         receiver, holder, key, backing_store);
   }
@@ -704,7 +672,7 @@
 
   MUST_USE_RESULT virtual MaybeHandle<Object> SetLength(
       Handle<JSArray> array,
-      Handle<Object> length) V8_FINAL V8_OVERRIDE {
+      Handle<Object> length) FINAL OVERRIDE {
     return ElementsAccessorSubclass::SetLengthImpl(
         array, length, handle(array->elements()));
   }
@@ -717,7 +685,7 @@
   virtual void SetCapacityAndLength(
       Handle<JSArray> array,
       int capacity,
-      int length) V8_FINAL V8_OVERRIDE {
+      int length) FINAL OVERRIDE {
     ElementsAccessorSubclass::
         SetFastElementsCapacityAndLength(array, capacity, length);
   }
@@ -732,7 +700,7 @@
   MUST_USE_RESULT virtual MaybeHandle<Object> Delete(
       Handle<JSObject> obj,
       uint32_t key,
-      JSReceiver::DeleteMode mode) V8_OVERRIDE = 0;
+      JSReceiver::DeleteMode mode) OVERRIDE = 0;
 
   static void CopyElementsImpl(Handle<FixedArrayBase> from,
                                uint32_t from_start,
@@ -750,8 +718,8 @@
       ElementsKind from_kind,
       Handle<FixedArrayBase> to,
       uint32_t to_start,
-      int copy_size) V8_FINAL V8_OVERRIDE {
-    ASSERT(!from.is_null());
+      int copy_size) FINAL OVERRIDE {
+    DCHECK(!from.is_null());
     ElementsAccessorSubclass::CopyElementsImpl(
         from, from_start, to, from_kind, to_start, kPackedSizeNotKnown,
         copy_size);
@@ -763,7 +731,7 @@
       ElementsKind from_kind,
       Handle<FixedArrayBase> to,
       uint32_t to_start,
-      int copy_size) V8_FINAL V8_OVERRIDE {
+      int copy_size) FINAL OVERRIDE {
     int packed_size = kPackedSizeNotKnown;
     bool is_packed = IsFastPackedElementsKind(from_kind) &&
         from_holder->IsJSArray();
@@ -783,12 +751,12 @@
       Handle<Object> receiver,
       Handle<JSObject> holder,
       Handle<FixedArray> to,
-      Handle<FixedArrayBase> from) V8_FINAL V8_OVERRIDE {
+      Handle<FixedArrayBase> from) FINAL OVERRIDE {
     int len0 = to->length();
-#ifdef ENABLE_SLOW_ASSERTS
+#ifdef ENABLE_SLOW_DCHECKS
     if (FLAG_enable_slow_asserts) {
       for (int i = 0; i < len0; i++) {
-        ASSERT(!to->get(i)->IsTheHole());
+        DCHECK(!to->get(i)->IsTheHole());
       }
     }
 #endif
@@ -812,7 +780,7 @@
             ElementsAccessorSubclass::GetImpl(receiver, holder, key, from),
             FixedArray);
 
-        ASSERT(!value->IsTheHole());
+        DCHECK(!value->IsTheHole());
         if (!HasKey(to, value)) {
           extra++;
         }
@@ -830,7 +798,7 @@
       WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
       for (int i = 0; i < len0; i++) {
         Object* e = to->get(i);
-        ASSERT(e->IsString() || e->IsNumber());
+        DCHECK(e->IsString() || e->IsNumber());
         result->set(i, e, mode);
       }
     }
@@ -852,7 +820,7 @@
         }
       }
     }
-    ASSERT(extra == index);
+    DCHECK(extra == index);
     return result;
   }
 
@@ -862,7 +830,7 @@
   }
 
   virtual uint32_t GetCapacity(Handle<FixedArrayBase> backing_store)
-      V8_FINAL V8_OVERRIDE {
+      FINAL OVERRIDE {
     return ElementsAccessorSubclass::GetCapacityImpl(backing_store);
   }
 
@@ -872,7 +840,7 @@
   }
 
   virtual uint32_t GetKeyForIndex(Handle<FixedArrayBase> backing_store,
-                                  uint32_t index) V8_FINAL V8_OVERRIDE {
+                                  uint32_t index) FINAL OVERRIDE {
     return ElementsAccessorSubclass::GetKeyForIndexImpl(backing_store, index);
   }
 
@@ -883,8 +851,7 @@
 
 // Super class for all fast element arrays.
 template<typename FastElementsAccessorSubclass,
-         typename KindTraits,
-         int ElementSize>
+         typename KindTraits>
 class FastElementsAccessor
     : public ElementsAccessorBase<FastElementsAccessorSubclass, KindTraits> {
  public:
@@ -897,8 +864,7 @@
 
   typedef typename KindTraits::BackingStore BackingStore;
 
-  // Adjusts the length of the fast backing store or returns the new length or
-  // undefined in case conversion to a slow backing store should be performed.
+  // Adjusts the length of the fast backing store.
   static Handle<Object> SetLengthWithoutNormalize(
       Handle<FixedArrayBase> backing_store,
       Handle<JSArray> array,
@@ -927,15 +893,8 @@
         if (length == 0) {
           array->initialize_elements();
         } else {
-          int filler_size = (old_capacity - length) * ElementSize;
-          Address filler_start = backing_store->address() +
-              BackingStore::OffsetOfElementAt(length);
-          array->GetHeap()->CreateFillerObjectAt(filler_start, filler_size);
-
-          // We are storing the new length using release store after creating a
-          // filler for the left-over space to avoid races with the sweeper
-          // thread.
-          backing_store->synchronized_set_length(length);
+          isolate->heap()->RightTrimFixedArray<Heap::FROM_MUTATOR>(
+              *backing_store, old_capacity - length);
         }
       } else {
         // Otherwise, fill the unused tail with holes.
@@ -950,21 +909,16 @@
     // Check whether the backing store should be expanded.
     uint32_t min = JSObject::NewElementsCapacity(old_capacity);
     uint32_t new_capacity = length > min ? length : min;
-    if (!array->ShouldConvertToSlowElements(new_capacity)) {
-      FastElementsAccessorSubclass::
-          SetFastElementsCapacityAndLength(array, new_capacity, length);
-      JSObject::ValidateElements(array);
-      return length_object;
-    }
-
-    // Request conversion to slow elements.
-    return isolate->factory()->undefined_value();
+    FastElementsAccessorSubclass::SetFastElementsCapacityAndLength(
+        array, new_capacity, length);
+    JSObject::ValidateElements(array);
+    return length_object;
   }
 
   static Handle<Object> DeleteCommon(Handle<JSObject> obj,
                                      uint32_t key,
                                      JSReceiver::DeleteMode mode) {
-    ASSERT(obj->HasFastSmiOrObjectElements() ||
+    DCHECK(obj->HasFastSmiOrObjectElements() ||
            obj->HasFastDoubleElements() ||
            obj->HasFastArgumentsElements());
     Isolate* isolate = obj->GetIsolate();
@@ -1023,7 +977,7 @@
   virtual MaybeHandle<Object> Delete(
       Handle<JSObject> obj,
       uint32_t key,
-      JSReceiver::DeleteMode mode) V8_FINAL V8_OVERRIDE {
+      JSReceiver::DeleteMode mode) FINAL OVERRIDE {
     return DeleteCommon(obj, key, mode);
   }
 
@@ -1044,7 +998,7 @@
     HandleScope scope(isolate);
     Handle<FixedArrayBase> elements(holder->elements(), isolate);
     Map* map = elements->map();
-    ASSERT((IsFastSmiOrObjectElementsKind(KindTraits::Kind) &&
+    DCHECK((IsFastSmiOrObjectElementsKind(KindTraits::Kind) &&
             (map == isolate->heap()->fixed_array_map() ||
              map == isolate->heap()->fixed_cow_array_map())) ||
            (IsFastDoubleElementsKind(KindTraits::Kind) ==
@@ -1054,7 +1008,7 @@
     for (int i = 0; i < length; i++) {
       HandleScope scope(isolate);
       Handle<BackingStore> backing_store = Handle<BackingStore>::cast(elements);
-      ASSERT((!IsFastSmiElementsKind(KindTraits::Kind) ||
+      DCHECK((!IsFastSmiElementsKind(KindTraits::Kind) ||
               BackingStore::get(backing_store, i)->IsSmi()) ||
              (IsFastHoleyElementsKind(KindTraits::Kind) ==
               backing_store->is_the_hole(i)));
@@ -1094,14 +1048,11 @@
 template<typename FastElementsAccessorSubclass,
          typename KindTraits>
 class FastSmiOrObjectElementsAccessor
-    : public FastElementsAccessor<FastElementsAccessorSubclass,
-                                  KindTraits,
-                                  kPointerSize> {
+    : public FastElementsAccessor<FastElementsAccessorSubclass, KindTraits> {
  public:
   explicit FastSmiOrObjectElementsAccessor(const char* name)
       : FastElementsAccessor<FastElementsAccessorSubclass,
-                             KindTraits,
-                             kPointerSize>(name) {}
+                             KindTraits>(name) {}
 
   static void CopyElementsImpl(Handle<FixedArrayBase> from,
                                uint32_t from_start,
@@ -1116,8 +1067,8 @@
       case FAST_HOLEY_SMI_ELEMENTS:
       case FAST_ELEMENTS:
       case FAST_HOLEY_ELEMENTS:
-        CopyObjectToObjectElements(
-            from, from_kind, from_start, to, to_kind, to_start, copy_size);
+        CopyObjectToObjectElements(*from, from_kind, from_start, *to, to_kind,
+                                   to_start, copy_size);
         break;
       case FAST_DOUBLE_ELEMENTS:
       case FAST_HOLEY_DOUBLE_ELEMENTS:
@@ -1125,8 +1076,8 @@
             from, from_start, to, to_kind, to_start, copy_size);
         break;
       case DICTIONARY_ELEMENTS:
-        CopyDictionaryToObjectElements(
-            from, from_start, to, to_kind, to_start, copy_size);
+        CopyDictionaryToObjectElements(*from, from_start, *to, to_kind,
+                                       to_start, copy_size);
         break;
       case SLOPPY_ARGUMENTS_ELEMENTS: {
         // TODO(verwaest): This is a temporary hack to support extending
@@ -1215,14 +1166,11 @@
 template<typename FastElementsAccessorSubclass,
          typename KindTraits>
 class FastDoubleElementsAccessor
-    : public FastElementsAccessor<FastElementsAccessorSubclass,
-                                  KindTraits,
-                                  kDoubleSize> {
+    : public FastElementsAccessor<FastElementsAccessorSubclass, KindTraits> {
  public:
   explicit FastDoubleElementsAccessor(const char* name)
       : FastElementsAccessor<FastElementsAccessorSubclass,
-                             KindTraits,
-                             kDoubleSize>(name) {}
+                             KindTraits>(name) {}
 
   static void SetFastElementsCapacityAndLength(Handle<JSObject> obj,
                                                uint32_t capacity,
@@ -1240,23 +1188,23 @@
                                int copy_size) {
     switch (from_kind) {
       case FAST_SMI_ELEMENTS:
-        CopyPackedSmiToDoubleElements(
-            from, from_start, to, to_start, packed_size, copy_size);
+        CopyPackedSmiToDoubleElements(*from, from_start, *to, to_start,
+                                      packed_size, copy_size);
         break;
       case FAST_HOLEY_SMI_ELEMENTS:
-        CopySmiToDoubleElements(from, from_start, to, to_start, copy_size);
+        CopySmiToDoubleElements(*from, from_start, *to, to_start, copy_size);
         break;
       case FAST_DOUBLE_ELEMENTS:
       case FAST_HOLEY_DOUBLE_ELEMENTS:
-        CopyDoubleToDoubleElements(from, from_start, to, to_start, copy_size);
+        CopyDoubleToDoubleElements(*from, from_start, *to, to_start, copy_size);
         break;
       case FAST_ELEMENTS:
       case FAST_HOLEY_ELEMENTS:
-        CopyObjectToDoubleElements(from, from_start, to, to_start, copy_size);
+        CopyObjectToDoubleElements(*from, from_start, *to, to_start, copy_size);
         break;
       case DICTIONARY_ELEMENTS:
-        CopyDictionaryToDoubleElements(
-            from, from_start, to, to_start, copy_size);
+        CopyDictionaryToDoubleElements(*from, from_start, *to, to_start,
+                                       copy_size);
         break;
       case SLOPPY_ARGUMENTS_ELEMENTS:
         UNREACHABLE();
@@ -1340,16 +1288,6 @@
           ? NONE : ABSENT;
   }
 
-  MUST_USE_RESULT static PropertyType GetTypeImpl(
-      Handle<Object> receiver,
-      Handle<JSObject> obj,
-      uint32_t key,
-      Handle<FixedArrayBase> backing_store) {
-    return
-        key < AccessorClass::GetCapacityImpl(backing_store)
-          ? FIELD : NONEXISTENT;
-  }
-
   MUST_USE_RESULT static MaybeHandle<Object> SetLengthImpl(
       Handle<JSObject> obj,
       Handle<Object> length,
@@ -1362,7 +1300,7 @@
   MUST_USE_RESULT virtual MaybeHandle<Object> Delete(
       Handle<JSObject> obj,
       uint32_t key,
-      JSReceiver::DeleteMode mode) V8_FINAL V8_OVERRIDE {
+      JSReceiver::DeleteMode mode) FINAL OVERRIDE {
     // External arrays always ignore deletes.
     return obj->GetIsolate()->factory()->true_value();
   }
@@ -1426,7 +1364,7 @@
           uint32_t number = static_cast<uint32_t>(key->Number());
           if (new_length <= number && number < old_length) {
             PropertyDetails details = dict->DetailsAt(i);
-            if (details.IsDontDelete()) new_length = number + 1;
+            if (!details.IsConfigurable()) new_length = number + 1;
           }
         }
       }
@@ -1483,10 +1421,9 @@
           // Deleting a non-configurable property in strict mode.
           Handle<Object> name = isolate->factory()->NewNumberFromUint(key);
           Handle<Object> args[2] = { name, obj };
-          Handle<Object> error =
-              isolate->factory()->NewTypeError("strict_delete_property",
-                                               HandleVector(args, 2));
-          return isolate->Throw<Object>(error);
+          THROW_NEW_ERROR(isolate, NewTypeError("strict_delete_property",
+                                                HandleVector(args, 2)),
+                          Object);
         }
         return isolate->factory()->false_value();
       }
@@ -1520,7 +1457,7 @@
   MUST_USE_RESULT virtual MaybeHandle<Object> Delete(
       Handle<JSObject> obj,
       uint32_t key,
-      JSReceiver::DeleteMode mode) V8_FINAL V8_OVERRIDE {
+      JSReceiver::DeleteMode mode) FINAL OVERRIDE {
     return DeleteCommon(obj, key, mode);
   }
 
@@ -1560,20 +1497,6 @@
     return ABSENT;
   }
 
-  MUST_USE_RESULT static PropertyType GetTypeImpl(
-      Handle<Object> receiver,
-      Handle<JSObject> obj,
-      uint32_t key,
-      Handle<FixedArrayBase> store) {
-    Handle<SeededNumberDictionary> backing_store =
-        Handle<SeededNumberDictionary>::cast(store);
-    int entry = backing_store->FindEntry(key);
-    if (entry != SeededNumberDictionary::kNotFound) {
-      return backing_store->DetailsAt(entry).type();
-    }
-    return NONEXISTENT;
-  }
-
   MUST_USE_RESULT static MaybeHandle<AccessorPair> GetAccessorPairImpl(
       Handle<Object> receiver,
       Handle<JSObject> obj,
@@ -1635,7 +1558,7 @@
       DisallowHeapAllocation no_gc;
       Context* context = Context::cast(parameter_map->get(0));
       int context_index = Handle<Smi>::cast(probe)->value();
-      ASSERT(!context->get(context_index)->IsTheHole());
+      DCHECK(!context->get(context_index)->IsTheHole());
       return handle(context->get(context_index), isolate);
     } else {
       // Object is not mapped, defer to the arguments.
@@ -1653,7 +1576,7 @@
         AliasedArgumentsEntry* entry = AliasedArgumentsEntry::cast(*result);
         Context* context = Context::cast(parameter_map->get(0));
         int context_index = entry->aliased_context_slot();
-        ASSERT(!context->get(context_index)->IsTheHole());
+        DCHECK(!context->get(context_index)->IsTheHole());
         return handle(context->get(context_index), isolate);
       } else {
         return result;
@@ -1678,23 +1601,6 @@
     }
   }
 
-  MUST_USE_RESULT static PropertyType GetTypeImpl(
-      Handle<Object> receiver,
-      Handle<JSObject> obj,
-      uint32_t key,
-      Handle<FixedArrayBase> parameters) {
-    Handle<FixedArray> parameter_map = Handle<FixedArray>::cast(parameters);
-    Handle<Object> probe = GetParameterMapArg(obj, parameter_map, key);
-    if (!probe->IsTheHole()) {
-      return FIELD;
-    } else {
-      // If not aliased, check the arguments.
-      Handle<FixedArray> arguments(FixedArray::cast(parameter_map->get(1)));
-      return ElementsAccessor::ForArray(arguments)->GetType(
-          receiver, obj, key, arguments);
-    }
-  }
-
   MUST_USE_RESULT static MaybeHandle<AccessorPair> GetAccessorPairImpl(
       Handle<Object> receiver,
       Handle<JSObject> obj,
@@ -1725,7 +1631,7 @@
   MUST_USE_RESULT virtual MaybeHandle<Object> Delete(
       Handle<JSObject> obj,
       uint32_t key,
-      JSReceiver::DeleteMode mode) V8_FINAL V8_OVERRIDE {
+      JSReceiver::DeleteMode mode) FINAL OVERRIDE {
     Isolate* isolate = obj->GetIsolate();
     Handle<FixedArray> parameter_map(FixedArray::cast(obj->elements()));
     Handle<Object> probe = GetParameterMapArg(obj, parameter_map, key);
@@ -1828,6 +1734,7 @@
 
 
 void ElementsAccessor::TearDown() {
+  if (elements_accessors_ == NULL) return;
 #define ACCESSOR_DELETE(Class, Kind, Store) delete elements_accessors_[Kind];
   ELEMENTS_LIST(ACCESSOR_DELETE)
 #undef ACCESSOR_DELETE
@@ -1854,13 +1761,13 @@
     if (value >= 0) {
       Handle<Object> new_length = ElementsAccessorSubclass::
           SetLengthWithoutNormalize(backing_store, array, smi_length, value);
-      ASSERT(!new_length.is_null());
+      DCHECK(!new_length.is_null());
 
       // even though the proposed length was a smi, new_length could
       // still be a heap number because SetLengthWithoutNormalize doesn't
       // allow the array length property to drop below the index of
       // non-deletable elements.
-      ASSERT(new_length->IsSmi() || new_length->IsHeapNumber() ||
+      DCHECK(new_length->IsSmi() || new_length->IsHeapNumber() ||
              new_length->IsUndefined());
       if (new_length->IsSmi()) {
         array->set_length(*Handle<Smi>::cast(new_length));
@@ -1881,13 +1788,13 @@
     if (length->ToArrayIndex(&value)) {
       Handle<SeededNumberDictionary> dictionary =
           JSObject::NormalizeElements(array);
-      ASSERT(!dictionary.is_null());
+      DCHECK(!dictionary.is_null());
 
       Handle<Object> new_length = DictionaryElementsAccessor::
           SetLengthWithoutNormalize(dictionary, array, length, value);
-      ASSERT(!new_length.is_null());
+      DCHECK(!new_length.is_null());
 
-      ASSERT(new_length->IsNumber());
+      DCHECK(new_length->IsNumber());
       array->set_length(*new_length);
       return array;
     } else {
diff --git a/src/elements.h b/src/elements.h
index aa0159e..d0bddf9 100644
--- a/src/elements.h
+++ b/src/elements.h
@@ -6,9 +6,9 @@
 #define V8_ELEMENTS_H_
 
 #include "src/elements-kind.h"
-#include "src/objects.h"
-#include "src/heap.h"
+#include "src/heap/heap.h"
 #include "src/isolate.h"
+#include "src/objects.h"
 
 namespace v8 {
 namespace internal {
@@ -81,24 +81,6 @@
     return GetAttributes(receiver, holder, key, handle(holder->elements()));
   }
 
-  // Returns an element's type, or NONEXISTENT if there is no such
-  // element. This method doesn't iterate up the prototype chain.  The caller
-  // can optionally pass in the backing store to use for the check, which must
-  // be compatible with the ElementsKind of the ElementsAccessor. If
-  // backing_store is NULL, the holder->elements() is used as the backing store.
-  MUST_USE_RESULT virtual PropertyType GetType(
-      Handle<Object> receiver,
-      Handle<JSObject> holder,
-      uint32_t key,
-      Handle<FixedArrayBase> backing_store) = 0;
-
-  MUST_USE_RESULT inline PropertyType GetType(
-      Handle<Object> receiver,
-      Handle<JSObject> holder,
-      uint32_t key) {
-    return GetType(receiver, holder, key, handle(holder->elements()));
-  }
-
   // Returns an element's accessors, or NULL if the element does not exist or
   // is plain. This method doesn't iterate up the prototype chain.  The caller
   // can optionally pass in the backing store to use for the check, which must
@@ -199,7 +181,7 @@
 
   // Returns a shared ElementsAccessor for the specified ElementsKind.
   static ElementsAccessor* ForKind(ElementsKind elements_kind) {
-    ASSERT(elements_kind < kElementsKindCount);
+    DCHECK(elements_kind < kElementsKindCount);
     return elements_accessors_[elements_kind];
   }
 
diff --git a/src/execution.cc b/src/execution.cc
index 2766e76..7aa4f33 100644
--- a/src/execution.cc
+++ b/src/execution.cc
@@ -19,9 +19,7 @@
 
 
 void StackGuard::set_interrupt_limits(const ExecutionAccess& lock) {
-  ASSERT(isolate_ != NULL);
-  // Ignore attempts to interrupt when interrupts are postponed.
-  if (should_postpone_interrupts(lock)) return;
+  DCHECK(isolate_ != NULL);
   thread_local_.jslimit_ = kInterruptLimit;
   thread_local_.climit_ = kInterruptLimit;
   isolate_->heap()->SetStackLimits();
@@ -29,7 +27,7 @@
 
 
 void StackGuard::reset_limits(const ExecutionAccess& lock) {
-  ASSERT(isolate_ != NULL);
+  DCHECK(isolate_ != NULL);
   thread_local_.jslimit_ = thread_local_.real_jslimit_;
   thread_local_.climit_ = thread_local_.real_climit_;
   isolate_->heap()->SetStackLimits();
@@ -70,13 +68,12 @@
   // receiver instead to avoid having a 'this' pointer which refers
   // directly to a global object.
   if (receiver->IsGlobalObject()) {
-    Handle<GlobalObject> global = Handle<GlobalObject>::cast(receiver);
-    receiver = Handle<JSObject>(global->global_receiver());
+    receiver = handle(Handle<GlobalObject>::cast(receiver)->global_proxy());
   }
 
   // Make sure that the global object of the context we're about to
   // make the current one is indeed a global object.
-  ASSERT(function->context()->global_object()->IsGlobalObject());
+  DCHECK(function->context()->global_object()->IsGlobalObject());
 
   {
     // Save and restore context around invocation and block the
@@ -100,7 +97,7 @@
 
   // Update the pending exception flag and return the value.
   bool has_exception = value->IsException();
-  ASSERT(has_exception == isolate->has_pending_exception());
+  DCHECK(has_exception == isolate->has_pending_exception());
   if (has_exception) {
     isolate->ReportPendingMessages();
     // Reset stepping state when script exits with uncaught exception.
@@ -133,13 +130,8 @@
       !func->shared()->native() &&
       func->shared()->strict_mode() == SLOPPY) {
     if (receiver->IsUndefined() || receiver->IsNull()) {
-      Object* global = func->context()->global_object()->global_receiver();
-      // Under some circumstances, 'global' can be the JSBuiltinsObject
-      // In that case, don't rewrite.  (FWIW, the same holds for
-      // GetIsolate()->global_object()->global_receiver().)
-      if (!global->IsJSBuiltinsObject()) {
-        receiver = Handle<Object>(global, func->GetIsolate());
-      }
+      receiver = handle(func->global_proxy());
+      DCHECK(!receiver->IsJSBuiltinsObject());
     } else {
       ASSIGN_RETURN_ON_EXCEPTION(
           isolate, receiver, ToObject(isolate, receiver), Object);
@@ -153,52 +145,55 @@
 MaybeHandle<Object> Execution::New(Handle<JSFunction> func,
                                    int argc,
                                    Handle<Object> argv[]) {
-  return Invoke(true, func, func->GetIsolate()->global_object(), argc, argv);
+  return Invoke(true, func, handle(func->global_proxy()), argc, argv);
 }
 
 
 MaybeHandle<Object> Execution::TryCall(Handle<JSFunction> func,
-                                       Handle<Object> receiver,
-                                       int argc,
+                                       Handle<Object> receiver, int argc,
                                        Handle<Object> args[],
-                                       Handle<Object>* exception_out) {
+                                       MaybeHandle<Object>* exception_out) {
+  bool is_termination = false;
+  Isolate* isolate = func->GetIsolate();
+  MaybeHandle<Object> maybe_result;
+  if (exception_out != NULL) *exception_out = MaybeHandle<Object>();
   // Enter a try-block while executing the JavaScript code. To avoid
   // duplicate error printing it must be non-verbose.  Also, to avoid
   // creating message objects during stack overflow we shouldn't
   // capture messages.
-  v8::TryCatch catcher;
-  catcher.SetVerbose(false);
-  catcher.SetCaptureMessage(false);
+  {
+    v8::TryCatch catcher;
+    catcher.SetVerbose(false);
+    catcher.SetCaptureMessage(false);
 
-  // Get isolate now, because handle might be persistent
-  // and get destroyed in the next call.
-  Isolate* isolate = func->GetIsolate();
-  MaybeHandle<Object> maybe_result = Invoke(false, func, receiver, argc, args);
+    maybe_result = Invoke(false, func, receiver, argc, args);
 
-  if (maybe_result.is_null()) {
-    ASSERT(catcher.HasCaught());
-    ASSERT(isolate->has_pending_exception());
-    ASSERT(isolate->external_caught_exception());
-    if (exception_out != NULL) {
-      if (isolate->pending_exception() ==
-          isolate->heap()->termination_exception()) {
-        *exception_out = isolate->factory()->termination_exception();
-      } else {
-        *exception_out = v8::Utils::OpenHandle(*catcher.Exception());
+    if (maybe_result.is_null()) {
+      DCHECK(catcher.HasCaught());
+      DCHECK(isolate->has_pending_exception());
+      DCHECK(isolate->external_caught_exception());
+      if (exception_out != NULL) {
+        if (isolate->pending_exception() ==
+            isolate->heap()->termination_exception()) {
+          is_termination = true;
+        } else {
+          *exception_out = v8::Utils::OpenHandle(*catcher.Exception());
+        }
       }
+      isolate->OptionalRescheduleException(true);
     }
-    isolate->OptionalRescheduleException(true);
-  }
 
-  ASSERT(!isolate->has_pending_exception());
-  ASSERT(!isolate->external_caught_exception());
+    DCHECK(!isolate->has_pending_exception());
+    DCHECK(!isolate->external_caught_exception());
+  }
+  if (is_termination) isolate->TerminateExecution();
   return maybe_result;
 }
 
 
 Handle<Object> Execution::GetFunctionDelegate(Isolate* isolate,
                                               Handle<Object> object) {
-  ASSERT(!object->IsJSFunction());
+  DCHECK(!object->IsJSFunction());
   Factory* factory = isolate->factory();
 
   // If you return a function from here, it will be called when an
@@ -225,7 +220,7 @@
 
 MaybeHandle<Object> Execution::TryGetFunctionDelegate(Isolate* isolate,
                                                       Handle<Object> object) {
-  ASSERT(!object->IsJSFunction());
+  DCHECK(!object->IsJSFunction());
 
   // If object is a function proxy, get its handler. Iterate if necessary.
   Object* fun = *object;
@@ -244,16 +239,15 @@
 
   // If the Object doesn't have an instance-call handler we should
   // throw a non-callable exception.
-  i::Handle<i::Object> error_obj = isolate->factory()->NewTypeError(
-      "called_non_callable", i::HandleVector<i::Object>(&object, 1));
-
-  return isolate->Throw<Object>(error_obj);
+  THROW_NEW_ERROR(isolate, NewTypeError("called_non_callable",
+                                        i::HandleVector<i::Object>(&object, 1)),
+                  Object);
 }
 
 
 Handle<Object> Execution::GetConstructorDelegate(Isolate* isolate,
                                                  Handle<Object> object) {
-  ASSERT(!object->IsJSFunction());
+  DCHECK(!object->IsJSFunction());
 
   // If you return a function from here, it will be called when an
   // attempt is made to call the given object as a constructor.
@@ -279,7 +273,7 @@
 
 MaybeHandle<Object> Execution::TryGetConstructorDelegate(
     Isolate* isolate, Handle<Object> object) {
-  ASSERT(!object->IsJSFunction());
+  DCHECK(!object->IsJSFunction());
 
   // If you return a function from here, it will be called when an
   // attempt is made to call the given object as a constructor.
@@ -301,9 +295,9 @@
 
   // If the Object doesn't have an instance-call handler we should
   // throw a non-callable exception.
-  i::Handle<i::Object> error_obj = isolate->factory()->NewTypeError(
-      "called_non_callable", i::HandleVector<i::Object>(&object, 1));
-  return isolate->Throw<Object>(error_obj);
+  THROW_NEW_ERROR(isolate, NewTypeError("called_non_callable",
+                                        i::HandleVector<i::Object>(&object, 1)),
+                  Object);
 }
 
 
@@ -337,36 +331,71 @@
 }
 
 
-bool StackGuard::CheckInterrupt(int flagbit) {
+void StackGuard::PushPostponeInterruptsScope(PostponeInterruptsScope* scope) {
   ExecutionAccess access(isolate_);
-  return thread_local_.interrupt_flags_ & flagbit;
+  // Intercept already requested interrupts.
+  int intercepted = thread_local_.interrupt_flags_ & scope->intercept_mask_;
+  scope->intercepted_flags_ = intercepted;
+  thread_local_.interrupt_flags_ &= ~intercepted;
+  if (!has_pending_interrupts(access)) reset_limits(access);
+  // Add scope to the chain.
+  scope->prev_ = thread_local_.postpone_interrupts_;
+  thread_local_.postpone_interrupts_ = scope;
 }
 
 
-void StackGuard::RequestInterrupt(int flagbit) {
+void StackGuard::PopPostponeInterruptsScope() {
   ExecutionAccess access(isolate_);
-  thread_local_.interrupt_flags_ |= flagbit;
+  PostponeInterruptsScope* top = thread_local_.postpone_interrupts_;
+  // Make intercepted interrupts active.
+  DCHECK((thread_local_.interrupt_flags_ & top->intercept_mask_) == 0);
+  thread_local_.interrupt_flags_ |= top->intercepted_flags_;
+  if (has_pending_interrupts(access)) set_interrupt_limits(access);
+  // Remove scope from chain.
+  thread_local_.postpone_interrupts_ = top->prev_;
+}
+
+
+bool StackGuard::CheckInterrupt(InterruptFlag flag) {
+  ExecutionAccess access(isolate_);
+  return thread_local_.interrupt_flags_ & flag;
+}
+
+
+void StackGuard::RequestInterrupt(InterruptFlag flag) {
+  ExecutionAccess access(isolate_);
+  // Check the chain of PostponeInterruptsScopes for interception.
+  if (thread_local_.postpone_interrupts_ &&
+      thread_local_.postpone_interrupts_->Intercept(flag)) {
+    return;
+  }
+
+  // Not intercepted.  Set as active interrupt flag.
+  thread_local_.interrupt_flags_ |= flag;
   set_interrupt_limits(access);
 }
 
 
-void StackGuard::ClearInterrupt(int flagbit) {
+void StackGuard::ClearInterrupt(InterruptFlag flag) {
   ExecutionAccess access(isolate_);
-  thread_local_.interrupt_flags_ &= ~flagbit;
-  if (!should_postpone_interrupts(access) && !has_pending_interrupts(access)) {
-    reset_limits(access);
+  // Clear the interrupt flag from the chain of PostponeInterruptsScopes.
+  for (PostponeInterruptsScope* current = thread_local_.postpone_interrupts_;
+       current != NULL;
+       current = current->prev_) {
+    current->intercepted_flags_ &= ~flag;
   }
+
+  // Clear the interrupt flag from the active interrupt flags.
+  thread_local_.interrupt_flags_ &= ~flag;
+  if (!has_pending_interrupts(access)) reset_limits(access);
 }
 
 
 bool StackGuard::CheckAndClearInterrupt(InterruptFlag flag) {
   ExecutionAccess access(isolate_);
-  int flagbit = 1 << flag;
-  bool result = (thread_local_.interrupt_flags_ & flagbit);
-  thread_local_.interrupt_flags_ &= ~flagbit;
-  if (!should_postpone_interrupts(access) && !has_pending_interrupts(access)) {
-    reset_limits(access);
-  }
+  bool result = (thread_local_.interrupt_flags_ & flag);
+  thread_local_.interrupt_flags_ &= ~flag;
+  if (!has_pending_interrupts(access)) reset_limits(access);
   return result;
 }
 
@@ -408,8 +437,7 @@
   jslimit_ = kIllegalLimit;
   real_climit_ = kIllegalLimit;
   climit_ = kIllegalLimit;
-  nesting_ = 0;
-  postpone_interrupts_nesting_ = 0;
+  postpone_interrupts_ = NULL;
   interrupt_flags_ = 0;
 }
 
@@ -417,19 +445,16 @@
 bool StackGuard::ThreadLocal::Initialize(Isolate* isolate) {
   bool should_set_stack_limits = false;
   if (real_climit_ == kIllegalLimit) {
-    // Takes the address of the limit variable in order to find out where
-    // the top of stack is right now.
     const uintptr_t kLimitSize = FLAG_stack_size * KB;
-    uintptr_t limit = reinterpret_cast<uintptr_t>(&limit) - kLimitSize;
-    ASSERT(reinterpret_cast<uintptr_t>(&limit) > kLimitSize);
+    DCHECK(GetCurrentStackPosition() > kLimitSize);
+    uintptr_t limit = GetCurrentStackPosition() - kLimitSize;
     real_jslimit_ = SimulatorStack::JsLimitFromCLimit(isolate, limit);
     jslimit_ = SimulatorStack::JsLimitFromCLimit(isolate, limit);
     real_climit_ = limit;
     climit_ = limit;
     should_set_stack_limits = true;
   }
-  nesting_ = 0;
-  postpone_interrupts_nesting_ = 0;
+  postpone_interrupts_ = NULL;
   interrupt_flags_ = 0;
   return should_set_stack_limits;
 }
@@ -461,7 +486,7 @@
     return Call(isolate,                                                \
                 isolate->name##_fun(),                                  \
                 isolate->js_builtins_object(),                          \
-                ARRAY_SIZE(argv), argv);                                \
+                arraysize(argv), argv);                                \
   } while (false)
 
 
@@ -552,7 +577,7 @@
   Handle<Object> result;
   if (!TryCall(Handle<JSFunction>::cast(char_at),
                string,
-               ARRAY_SIZE(index_arg),
+               arraysize(index_arg),
                index_arg).ToHandle(&result)) {
     return factory->undefined_value();
   }
@@ -579,7 +604,7 @@
       Call(isolate,
            isolate->instantiate_fun(),
            isolate->js_builtins_object(),
-           ARRAY_SIZE(args),
+           arraysize(args),
            args),
       JSFunction);
   return Handle<JSFunction>::cast(result);
@@ -606,7 +631,7 @@
         Call(isolate,
              isolate->instantiate_fun(),
              isolate->js_builtins_object(),
-             ARRAY_SIZE(args),
+             arraysize(args),
              args),
         JSObject);
   }
@@ -622,7 +647,7 @@
   return Execution::Call(isolate,
                          isolate->configure_instance_fun(),
                          isolate->js_builtins_object(),
-                         ARRAY_SIZE(args),
+                         arraysize(args),
                          args);
 }
 
@@ -636,7 +661,7 @@
   MaybeHandle<Object> maybe_result =
       TryCall(isolate->get_stack_trace_line_fun(),
               isolate->js_builtins_object(),
-              ARRAY_SIZE(args),
+              arraysize(args),
               args);
   Handle<Object> result;
   if (!maybe_result.ToHandle(&result) || !result->IsString()) {
@@ -648,13 +673,6 @@
 
 
 Object* StackGuard::HandleInterrupts() {
-  {
-    ExecutionAccess access(isolate_);
-    if (should_postpone_interrupts(access)) {
-      return isolate_->heap()->undefined_value();
-    }
-  }
-
   if (CheckAndClearInterrupt(GC_REQUEST)) {
     isolate_->heap()->CollectAllGarbage(Heap::kNoGCFlags, "GC interrupt");
   }
@@ -672,7 +690,7 @@
   }
 
   if (CheckAndClearInterrupt(INSTALL_CODE)) {
-    ASSERT(isolate_->concurrent_recompilation_enabled());
+    DCHECK(isolate_->concurrent_recompilation_enabled());
     isolate_->optimizing_compiler_thread()->InstallOptimizedFunctions();
   }
 
diff --git a/src/execution.h b/src/execution.h
index 74d0feb..89175cd 100644
--- a/src/execution.h
+++ b/src/execution.h
@@ -10,7 +10,7 @@
 namespace v8 {
 namespace internal {
 
-class Execution V8_FINAL : public AllStatic {
+class Execution FINAL : public AllStatic {
  public:
   // Call a function, the caller supplies a receiver and an array
   // of arguments. Arguments are Object* type. After function returns,
@@ -46,12 +46,12 @@
   // any thrown exceptions. The return value is either the result of
   // calling the function (if caught exception is false) or the exception
   // that occurred (if caught exception is true).
-  static MaybeHandle<Object> TryCall(
-      Handle<JSFunction> func,
-      Handle<Object> receiver,
-      int argc,
-      Handle<Object> argv[],
-      Handle<Object>* exception_out = NULL);
+  // In the exception case, exception_out holds the caught exceptions, unless
+  // it is a termination exception.
+  static MaybeHandle<Object> TryCall(Handle<JSFunction> func,
+                                     Handle<Object> receiver, int argc,
+                                     Handle<Object> argv[],
+                                     MaybeHandle<Object>* exception_out = NULL);
 
   // ECMA-262 9.3
   MUST_USE_RESULT static MaybeHandle<Object> ToNumber(
@@ -122,12 +122,13 @@
 
 
 class ExecutionAccess;
+class PostponeInterruptsScope;
 
 
 // StackGuard contains the handling of the limits that are used to limit the
 // number of nested invocations of JavaScript and the stack size used in each
 // invocation.
-class StackGuard V8_FINAL {
+class StackGuard FINAL {
  public:
   // Pass the address beyond which the stack should not grow.  The stack
   // is assumed to grow downwards.
@@ -145,22 +146,32 @@
   // it has been set up.
   void ClearThread(const ExecutionAccess& lock);
 
-#define INTERRUPT_LIST(V)                                       \
-  V(DEBUGBREAK, DebugBreak)                                     \
-  V(DEBUGCOMMAND, DebugCommand)                                 \
-  V(TERMINATE_EXECUTION, TerminateExecution)                    \
-  V(GC_REQUEST, GC)                                             \
-  V(INSTALL_CODE, InstallCode)                                  \
-  V(API_INTERRUPT, ApiInterrupt)                                \
-  V(DEOPT_MARKED_ALLOCATION_SITES, DeoptMarkedAllocationSites)
+#define INTERRUPT_LIST(V)                                          \
+  V(DEBUGBREAK, DebugBreak, 0)                                     \
+  V(DEBUGCOMMAND, DebugCommand, 1)                                 \
+  V(TERMINATE_EXECUTION, TerminateExecution, 2)                    \
+  V(GC_REQUEST, GC, 3)                                             \
+  V(INSTALL_CODE, InstallCode, 4)                                  \
+  V(API_INTERRUPT, ApiInterrupt, 5)                                \
+  V(DEOPT_MARKED_ALLOCATION_SITES, DeoptMarkedAllocationSites, 6)
 
-#define V(NAME, Name)                                              \
-  inline bool Check##Name() { return CheckInterrupt(1 << NAME); }  \
-  inline void Request##Name() { RequestInterrupt(1 << NAME); }     \
-  inline void Clear##Name() { ClearInterrupt(1 << NAME); }
+#define V(NAME, Name, id)                                          \
+  inline bool Check##Name() { return CheckInterrupt(NAME); }  \
+  inline void Request##Name() { RequestInterrupt(NAME); }     \
+  inline void Clear##Name() { ClearInterrupt(NAME); }
   INTERRUPT_LIST(V)
 #undef V
 
+  // Flag used to set the interrupt causes.
+  enum InterruptFlag {
+  #define V(NAME, Name, id) NAME = (1 << id),
+    INTERRUPT_LIST(V)
+  #undef V
+  #define V(NAME, Name, id) NAME |
+    ALL_INTERRUPTS = INTERRUPT_LIST(V) 0
+  #undef V
+  };
+
   // This provides an asynchronous read of the stack limits for the current
   // thread.  There are no locks protecting this, but it is assumed that you
   // have the global V8 lock if you are using multiple V8 threads.
@@ -190,33 +201,17 @@
  private:
   StackGuard();
 
-// Flag used to set the interrupt causes.
-enum InterruptFlag {
-#define V(NAME, Name) NAME,
-  INTERRUPT_LIST(V)
-#undef V
-  NUMBER_OF_INTERRUPTS
-};
-
-  bool CheckInterrupt(int flagbit);
-  void RequestInterrupt(int flagbit);
-  void ClearInterrupt(int flagbit);
+  bool CheckInterrupt(InterruptFlag flag);
+  void RequestInterrupt(InterruptFlag flag);
+  void ClearInterrupt(InterruptFlag flag);
   bool CheckAndClearInterrupt(InterruptFlag flag);
 
   // You should hold the ExecutionAccess lock when calling this method.
   bool has_pending_interrupts(const ExecutionAccess& lock) {
-    // Sanity check: We shouldn't be asking about pending interrupts
-    // unless we're not postponing them anymore.
-    ASSERT(!should_postpone_interrupts(lock));
     return thread_local_.interrupt_flags_ != 0;
   }
 
   // You should hold the ExecutionAccess lock when calling this method.
-  bool should_postpone_interrupts(const ExecutionAccess& lock) {
-    return thread_local_.postpone_interrupts_nesting_ > 0;
-  }
-
-  // You should hold the ExecutionAccess lock when calling this method.
   inline void set_interrupt_limits(const ExecutionAccess& lock);
 
   // Reset limits to actual values. For example after handling interrupt.
@@ -227,7 +222,7 @@
   void EnableInterrupts();
   void DisableInterrupts();
 
-#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64
+#if V8_TARGET_ARCH_64_BIT
   static const uintptr_t kInterruptLimit = V8_UINT64_C(0xfffffffffffffffe);
   static const uintptr_t kIllegalLimit = V8_UINT64_C(0xfffffffffffffff8);
 #else
@@ -235,7 +230,10 @@
   static const uintptr_t kIllegalLimit = 0xfffffff8;
 #endif
 
-  class ThreadLocal V8_FINAL {
+  void PushPostponeInterruptsScope(PostponeInterruptsScope* scope);
+  void PopPostponeInterruptsScope();
+
+  class ThreadLocal FINAL {
    public:
     ThreadLocal() { Clear(); }
     // You should hold the ExecutionAccess lock when you call Initialize or
@@ -259,16 +257,10 @@
     uintptr_t real_climit_;  // Actual C++ stack limit set for the VM.
     uintptr_t climit_;
 
-    int nesting_;
-    int postpone_interrupts_nesting_;
+    PostponeInterruptsScope* postpone_interrupts_;
     int interrupt_flags_;
   };
 
-  class StackPointer {
-   public:
-    inline uintptr_t address() { return reinterpret_cast<uintptr_t>(this); }
-  };
-
   // TODO(isolates): Technically this could be calculated directly from a
   //                 pointer to StackGuard.
   Isolate* isolate_;
diff --git a/src/extensions/externalize-string-extension.cc b/src/extensions/externalize-string-extension.cc
index c3b1ec7..8d38dfa 100644
--- a/src/extensions/externalize-string-extension.cc
+++ b/src/extensions/externalize-string-extension.cc
@@ -27,15 +27,15 @@
 };
 
 
-typedef SimpleStringResource<char, v8::String::ExternalAsciiStringResource>
-    SimpleAsciiStringResource;
+typedef SimpleStringResource<char, v8::String::ExternalOneByteStringResource>
+    SimpleOneByteStringResource;
 typedef SimpleStringResource<uc16, v8::String::ExternalStringResource>
     SimpleTwoByteStringResource;
 
 
 const char* const ExternalizeStringExtension::kSource =
     "native function externalizeString();"
-    "native function isAsciiString();";
+    "native function isOneByteString();";
 
 v8::Handle<v8::FunctionTemplate>
 ExternalizeStringExtension::GetNativeFunctionTemplate(
@@ -44,9 +44,9 @@
     return v8::FunctionTemplate::New(isolate,
                                      ExternalizeStringExtension::Externalize);
   } else {
-    ASSERT(strcmp(*v8::String::Utf8Value(str), "isAsciiString") == 0);
+    DCHECK(strcmp(*v8::String::Utf8Value(str), "isOneByteString") == 0);
     return v8::FunctionTemplate::New(isolate,
-                                     ExternalizeStringExtension::IsAscii);
+                                     ExternalizeStringExtension::IsOneByte);
   }
 }
 
@@ -81,7 +81,7 @@
   if (string->IsOneByteRepresentation() && !force_two_byte) {
     uint8_t* data = new uint8_t[string->length()];
     String::WriteToFlat(*string, data, 0, string->length());
-    SimpleAsciiStringResource* resource = new SimpleAsciiStringResource(
+    SimpleOneByteStringResource* resource = new SimpleOneByteStringResource(
         reinterpret_cast<char*>(data), string->length());
     result = string->MakeExternal(resource);
     if (result) {
@@ -109,12 +109,12 @@
 }
 
 
-void ExternalizeStringExtension::IsAscii(
+void ExternalizeStringExtension::IsOneByte(
     const v8::FunctionCallbackInfo<v8::Value>& args) {
   if (args.Length() != 1 || !args[0]->IsString()) {
     args.GetIsolate()->ThrowException(v8::String::NewFromUtf8(
         args.GetIsolate(),
-        "isAsciiString() requires a single string argument."));
+        "isOneByteString() requires a single string argument."));
     return;
   }
   bool is_one_byte =
diff --git a/src/extensions/externalize-string-extension.h b/src/extensions/externalize-string-extension.h
index 74b5665..f8c54f8 100644
--- a/src/extensions/externalize-string-extension.h
+++ b/src/extensions/externalize-string-extension.h
@@ -17,7 +17,7 @@
       v8::Isolate* isolate,
       v8::Handle<v8::String> name);
   static void Externalize(const v8::FunctionCallbackInfo<v8::Value>& args);
-  static void IsAscii(const v8::FunctionCallbackInfo<v8::Value>& args);
+  static void IsOneByte(const v8::FunctionCallbackInfo<v8::Value>& args);
 
  private:
   static const char* const kSource;
diff --git a/src/extensions/free-buffer-extension.cc b/src/extensions/free-buffer-extension.cc
index ffba655..e8c7732 100644
--- a/src/extensions/free-buffer-extension.cc
+++ b/src/extensions/free-buffer-extension.cc
@@ -3,7 +3,8 @@
 // found in the LICENSE file.
 
 #include "src/extensions/free-buffer-extension.h"
-#include "src/platform.h"
+
+#include "src/base/platform/platform.h"
 #include "src/v8.h"
 
 namespace v8 {
diff --git a/src/extensions/gc-extension.cc b/src/extensions/gc-extension.cc
index e3c2b1d..74b7481 100644
--- a/src/extensions/gc-extension.cc
+++ b/src/extensions/gc-extension.cc
@@ -3,7 +3,8 @@
 // found in the LICENSE file.
 
 #include "src/extensions/gc-extension.h"
-#include "src/platform.h"
+
+#include "src/base/platform/platform.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/extensions/statistics-extension.cc b/src/extensions/statistics-extension.cc
index fe34c9a..6f63245 100644
--- a/src/extensions/statistics-extension.cc
+++ b/src/extensions/statistics-extension.cc
@@ -14,7 +14,7 @@
 v8::Handle<v8::FunctionTemplate> StatisticsExtension::GetNativeFunctionTemplate(
     v8::Isolate* isolate,
     v8::Handle<v8::String> str) {
-  ASSERT(strcmp(*v8::String::Utf8Value(str), "getV8Statistics") == 0);
+  DCHECK(strcmp(*v8::String::Utf8Value(str), "getV8Statistics") == 0);
   return v8::FunctionTemplate::New(isolate, StatisticsExtension::GetCounters);
 }
 
diff --git a/src/extensions/trigger-failure-extension.cc b/src/extensions/trigger-failure-extension.cc
index 30cd9c2..b0aacb4 100644
--- a/src/extensions/trigger-failure-extension.cc
+++ b/src/extensions/trigger-failure-extension.cc
@@ -44,13 +44,13 @@
 
 void TriggerFailureExtension::TriggerAssertFalse(
     const v8::FunctionCallbackInfo<v8::Value>& args) {
-  ASSERT(false);
+  DCHECK(false);
 }
 
 
 void TriggerFailureExtension::TriggerSlowAssertFalse(
     const v8::FunctionCallbackInfo<v8::Value>& args) {
-  SLOW_ASSERT(false);
+  SLOW_DCHECK(false);
 }
 
 } }  // namespace v8::internal
diff --git a/src/factory.cc b/src/factory.cc
index 3d373fb..45a79c1 100644
--- a/src/factory.cc
+++ b/src/factory.cc
@@ -4,6 +4,8 @@
 
 #include "src/factory.h"
 
+#include "src/allocation-site-scopes.h"
+#include "src/base/bits.h"
 #include "src/conversions.h"
 #include "src/isolate-inl.h"
 #include "src/macro-assembler.h"
@@ -60,7 +62,7 @@
 
 
 Handle<FixedArray> Factory::NewFixedArray(int size, PretenureFlag pretenure) {
-  ASSERT(0 <= size);
+  DCHECK(0 <= size);
   CALL_HEAP_FUNCTION(
       isolate(),
       isolate()->heap()->AllocateFixedArray(size, pretenure),
@@ -70,7 +72,7 @@
 
 Handle<FixedArray> Factory::NewFixedArrayWithHoles(int size,
                                                    PretenureFlag pretenure) {
-  ASSERT(0 <= size);
+  DCHECK(0 <= size);
   CALL_HEAP_FUNCTION(
       isolate(),
       isolate()->heap()->AllocateFixedArrayWithFiller(size,
@@ -90,7 +92,7 @@
 
 Handle<FixedArrayBase> Factory::NewFixedDoubleArray(int size,
                                                     PretenureFlag pretenure) {
-  ASSERT(0 <= size);
+  DCHECK(0 <= size);
   CALL_HEAP_FUNCTION(
       isolate(),
       isolate()->heap()->AllocateUninitializedFixedDoubleArray(size, pretenure),
@@ -101,7 +103,7 @@
 Handle<FixedArrayBase> Factory::NewFixedDoubleArrayWithHoles(
     int size,
     PretenureFlag pretenure) {
-  ASSERT(0 <= size);
+  DCHECK(0 <= size);
   Handle<FixedArrayBase> array = NewFixedDoubleArray(size, pretenure);
   if (size > 0) {
     Handle<FixedDoubleArray> double_array =
@@ -116,7 +118,7 @@
 
 Handle<ConstantPoolArray> Factory::NewConstantPoolArray(
     const ConstantPoolArray::NumberOfEntries& small) {
-  ASSERT(small.total_count() > 0);
+  DCHECK(small.total_count() > 0);
   CALL_HEAP_FUNCTION(
       isolate(),
       isolate()->heap()->AllocateConstantPoolArray(small),
@@ -127,8 +129,8 @@
 Handle<ConstantPoolArray> Factory::NewExtendedConstantPoolArray(
     const ConstantPoolArray::NumberOfEntries& small,
     const ConstantPoolArray::NumberOfEntries& extended) {
-  ASSERT(small.total_count() > 0);
-  ASSERT(extended.total_count() > 0);
+  DCHECK(small.total_count() > 0);
+  DCHECK(extended.total_count() > 0);
   CALL_HEAP_FUNCTION(
       isolate(),
       isolate()->heap()->AllocateExtendedConstantPoolArray(small, extended),
@@ -151,7 +153,6 @@
       Handle<AccessorPair>::cast(NewStruct(ACCESSOR_PAIR_TYPE));
   accessors->set_getter(*the_hole_value(), SKIP_WRITE_BARRIER);
   accessors->set_setter(*the_hole_value(), SKIP_WRITE_BARRIER);
-  accessors->set_access_flags(Smi::FromInt(0), SKIP_WRITE_BARRIER);
   return accessors;
 }
 
@@ -186,7 +187,7 @@
 
 Handle<String> Factory::InternalizeOneByteString(
     Handle<SeqOneByteString> string, int from, int length) {
-  SubStringKey<uint8_t> key(string, from, length);
+  SeqOneByteSubStringKey key(string, from, length);
   return InternalizeStringWithKey(&key);
 }
 
@@ -203,12 +204,6 @@
 }
 
 
-template Handle<String> Factory::InternalizeStringWithKey<
-    SubStringKey<uint8_t> > (SubStringKey<uint8_t>* key);
-template Handle<String> Factory::InternalizeStringWithKey<
-    SubStringKey<uint16_t> > (SubStringKey<uint16_t>* key);
-
-
 MaybeHandle<String> Factory::NewStringFromOneByte(Vector<const uint8_t> string,
                                                   PretenureFlag pretenure) {
   int length = string.length();
@@ -246,14 +241,14 @@
   decoder->Reset(string.start() + non_ascii_start,
                  length - non_ascii_start);
   int utf16_length = decoder->Utf16Length();
-  ASSERT(utf16_length > 0);
+  DCHECK(utf16_length > 0);
   // Allocate string.
   Handle<SeqTwoByteString> result;
   ASSIGN_RETURN_ON_EXCEPTION(
       isolate(), result,
       NewRawTwoByteString(non_ascii_start + utf16_length, pretenure),
       String);
-  // Copy ascii portion.
+  // Copy ASCII portion.
   uint16_t* data = result->GetChars();
   const char* ascii_data = string.start();
   for (int i = 0; i < non_ascii_start; i++) {
@@ -270,6 +265,7 @@
   int length = string.length();
   const uc16* start = string.start();
   if (String::IsOneByte(start, length)) {
+    if (length == 1) return LookupSingleCharacterStringFromCode(string[0]);
     Handle<SeqOneByteString> result;
     ASSIGN_RETURN_ON_EXCEPTION(
         isolate(),
@@ -312,6 +308,17 @@
 }
 
 
+MUST_USE_RESULT Handle<String> Factory::NewOneByteInternalizedSubString(
+    Handle<SeqOneByteString> string, int offset, int length,
+    uint32_t hash_field) {
+  CALL_HEAP_FUNCTION(
+      isolate(), isolate()->heap()->AllocateOneByteInternalizedString(
+                     Vector<const uint8_t>(string->GetChars() + offset, length),
+                     hash_field),
+      String);
+}
+
+
 MUST_USE_RESULT Handle<String> Factory::NewTwoByteInternalizedString(
       Vector<const uc16> str,
       uint32_t hash_field) {
@@ -340,16 +347,17 @@
   // Find the corresponding internalized string map for strings.
   switch (string->map()->instance_type()) {
     case STRING_TYPE: return internalized_string_map();
-    case ASCII_STRING_TYPE: return ascii_internalized_string_map();
+    case ONE_BYTE_STRING_TYPE:
+      return one_byte_internalized_string_map();
     case EXTERNAL_STRING_TYPE: return external_internalized_string_map();
-    case EXTERNAL_ASCII_STRING_TYPE:
-      return external_ascii_internalized_string_map();
+    case EXTERNAL_ONE_BYTE_STRING_TYPE:
+      return external_one_byte_internalized_string_map();
     case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
       return external_internalized_string_with_one_byte_data_map();
     case SHORT_EXTERNAL_STRING_TYPE:
       return short_external_internalized_string_map();
-    case SHORT_EXTERNAL_ASCII_STRING_TYPE:
-      return short_external_ascii_internalized_string_map();
+    case SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE:
+      return short_external_one_byte_internalized_string_map();
     case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
       return short_external_internalized_string_with_one_byte_data_map();
     default: return MaybeHandle<Map>();  // No match found.
@@ -360,7 +368,7 @@
 MaybeHandle<SeqOneByteString> Factory::NewRawOneByteString(
     int length, PretenureFlag pretenure) {
   if (length > String::kMaxLength || length < 0) {
-    return isolate()->Throw<SeqOneByteString>(NewInvalidStringLengthError());
+    THROW_NEW_ERROR(isolate(), NewInvalidStringLengthError(), SeqOneByteString);
   }
   CALL_HEAP_FUNCTION(
       isolate(),
@@ -372,7 +380,7 @@
 MaybeHandle<SeqTwoByteString> Factory::NewRawTwoByteString(
     int length, PretenureFlag pretenure) {
   if (length > String::kMaxLength || length < 0) {
-    return isolate()->Throw<SeqTwoByteString>(NewInvalidStringLengthError());
+    THROW_NEW_ERROR(isolate(), NewInvalidStringLengthError(), SeqTwoByteString);
   }
   CALL_HEAP_FUNCTION(
       isolate(),
@@ -397,7 +405,7 @@
     single_character_string_cache()->set(code, *result);
     return result;
   }
-  ASSERT(code <= String::kMaxUtf16CodeUnitU);
+  DCHECK(code <= String::kMaxUtf16CodeUnitU);
 
   Handle<SeqTwoByteString> result = NewRawTwoByteString(1).ToHandleChecked();
   result->SeqTwoByteStringSet(0, static_cast<uint16_t>(code));
@@ -429,7 +437,8 @@
   // when building the new string.
   if (static_cast<unsigned>(c1 | c2) <= String::kMaxOneByteCharCodeU) {
     // We can do this.
-    ASSERT(IsPowerOf2(String::kMaxOneByteCharCodeU + 1));  // because of this.
+    DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCodeU +
+                                      1));  // because of this.
     Handle<SeqOneByteString> str =
         isolate->factory()->NewRawOneByteString(2).ToHandleChecked();
     uint8_t* dest = str->GetChars();
@@ -477,7 +486,7 @@
   // Make sure that an out of memory exception is thrown if the length
   // of the new cons string is too large.
   if (length > String::kMaxLength || length < 0) {
-    return isolate()->Throw<String>(NewInvalidStringLengthError());
+    THROW_NEW_ERROR(isolate(), NewInvalidStringLengthError(), String);
   }
 
   bool left_is_one_byte = left->IsOneByteRepresentation();
@@ -486,12 +495,12 @@
   bool is_one_byte_data_in_two_byte_string = false;
   if (!is_one_byte) {
     // At least one of the strings uses two-byte representation so we
-    // can't use the fast case code for short ASCII strings below, but
-    // we can try to save memory if all chars actually fit in ASCII.
+    // can't use the fast case code for short one-byte strings below, but
+    // we can try to save memory if all chars actually fit in one-byte.
     is_one_byte_data_in_two_byte_string =
         left->HasOnlyOneByteChars() && right->HasOnlyOneByteChars();
     if (is_one_byte_data_in_two_byte_string) {
-      isolate()->counters()->string_add_runtime_ext_to_ascii()->Increment();
+      isolate()->counters()->string_add_runtime_ext_to_one_byte()->Increment();
     }
   }
 
@@ -499,8 +508,8 @@
   if (length < ConsString::kMinLength) {
     // Note that neither of the two inputs can be a slice because:
     STATIC_ASSERT(ConsString::kMinLength <= SlicedString::kMinLength);
-    ASSERT(left->IsFlat());
-    ASSERT(right->IsFlat());
+    DCHECK(left->IsFlat());
+    DCHECK(right->IsFlat());
 
     STATIC_ASSERT(ConsString::kMinLength <= String::kMaxLength);
     if (is_one_byte) {
@@ -509,14 +518,15 @@
       DisallowHeapAllocation no_gc;
       uint8_t* dest = result->GetChars();
       // Copy left part.
-      const uint8_t* src = left->IsExternalString()
-          ? Handle<ExternalAsciiString>::cast(left)->GetChars()
-          : Handle<SeqOneByteString>::cast(left)->GetChars();
+      const uint8_t* src =
+          left->IsExternalString()
+              ? Handle<ExternalOneByteString>::cast(left)->GetChars()
+              : Handle<SeqOneByteString>::cast(left)->GetChars();
       for (int i = 0; i < left_length; i++) *dest++ = src[i];
       // Copy right part.
       src = right->IsExternalString()
-          ? Handle<ExternalAsciiString>::cast(right)->GetChars()
-          : Handle<SeqOneByteString>::cast(right)->GetChars();
+                ? Handle<ExternalOneByteString>::cast(right)->GetChars()
+                : Handle<SeqOneByteString>::cast(right)->GetChars();
       for (int i = 0; i < right_length; i++) *dest++ = src[i];
       return result;
     }
@@ -529,7 +539,8 @@
   }
 
   Handle<Map> map = (is_one_byte || is_one_byte_data_in_two_byte_string)
-      ? cons_ascii_string_map()  : cons_string_map();
+                        ? cons_one_byte_string_map()
+                        : cons_string_map();
   Handle<ConsString> result =  New<ConsString>(map, NEW_SPACE);
 
   DisallowHeapAllocation no_gc;
@@ -543,26 +554,13 @@
 }
 
 
-Handle<String> Factory::NewFlatConcatString(Handle<String> first,
-                                            Handle<String> second) {
-  int total_length = first->length() + second->length();
-  if (first->IsOneByteRepresentation() && second->IsOneByteRepresentation()) {
-    return ConcatStringContent<uint8_t>(
-        NewRawOneByteString(total_length).ToHandleChecked(), first, second);
-  } else {
-    return ConcatStringContent<uc16>(
-        NewRawTwoByteString(total_length).ToHandleChecked(), first, second);
-  }
-}
-
-
 Handle<String> Factory::NewProperSubString(Handle<String> str,
                                            int begin,
                                            int end) {
 #if VERIFY_HEAP
   if (FLAG_verify_heap) str->StringVerify();
 #endif
-  ASSERT(begin > 0 || end < str->length());
+  DCHECK(begin > 0 || end < str->length());
 
   str = String::Flatten(str);
 
@@ -606,9 +604,10 @@
     offset += slice->offset();
   }
 
-  ASSERT(str->IsSeqString() || str->IsExternalString());
-  Handle<Map> map = str->IsOneByteRepresentation() ? sliced_ascii_string_map()
-                                                   : sliced_string_map();
+  DCHECK(str->IsSeqString() || str->IsExternalString());
+  Handle<Map> map = str->IsOneByteRepresentation()
+                        ? sliced_one_byte_string_map()
+                        : sliced_string_map();
   Handle<SlicedString> slice = New<SlicedString>(map, NEW_SPACE);
 
   slice->set_hash_field(String::kEmptyHashField);
@@ -619,16 +618,16 @@
 }
 
 
-MaybeHandle<String> Factory::NewExternalStringFromAscii(
-    const ExternalAsciiString::Resource* resource) {
+MaybeHandle<String> Factory::NewExternalStringFromOneByte(
+    const ExternalOneByteString::Resource* resource) {
   size_t length = resource->length();
   if (length > static_cast<size_t>(String::kMaxLength)) {
-    return isolate()->Throw<String>(NewInvalidStringLengthError());
+    THROW_NEW_ERROR(isolate(), NewInvalidStringLengthError(), String);
   }
 
-  Handle<Map> map = external_ascii_string_map();
-  Handle<ExternalAsciiString> external_string =
-      New<ExternalAsciiString>(map, NEW_SPACE);
+  Handle<Map> map = external_one_byte_string_map();
+  Handle<ExternalOneByteString> external_string =
+      New<ExternalOneByteString>(map, NEW_SPACE);
   external_string->set_length(static_cast<int>(length));
   external_string->set_hash_field(String::kEmptyHashField);
   external_string->set_resource(resource);
@@ -641,7 +640,7 @@
     const ExternalTwoByteString::Resource* resource) {
   size_t length = resource->length();
   if (length > static_cast<size_t>(String::kMaxLength)) {
-    return isolate()->Throw<String>(NewInvalidStringLengthError());
+    THROW_NEW_ERROR(isolate(), NewInvalidStringLengthError(), String);
   }
 
   // For small strings we check whether the resource contains only
@@ -676,12 +675,20 @@
 }
 
 
+Handle<Symbol> Factory::NewPrivateOwnSymbol() {
+  Handle<Symbol> symbol = NewSymbol();
+  symbol->set_is_private(true);
+  symbol->set_is_own(true);
+  return symbol;
+}
+
+
 Handle<Context> Factory::NewNativeContext() {
   Handle<FixedArray> array = NewFixedArray(Context::NATIVE_CONTEXT_SLOTS);
   array->set_map_no_write_barrier(*native_context_map());
   Handle<Context> context = Handle<Context>::cast(array);
   context->set_js_array_maps(*undefined_value());
-  ASSERT(context->IsNativeContext());
+  DCHECK(context->IsNativeContext());
   return context;
 }
 
@@ -696,7 +703,7 @@
   context->set_previous(function->context());
   context->set_extension(*scope_info);
   context->set_global_object(function->context()->global_object());
-  ASSERT(context->IsGlobalContext());
+  DCHECK(context->IsGlobalContext());
   return context;
 }
 
@@ -714,7 +721,7 @@
 
 Handle<Context> Factory::NewFunctionContext(int length,
                                             Handle<JSFunction> function) {
-  ASSERT(length >= Context::MIN_CONTEXT_SLOTS);
+  DCHECK(length >= Context::MIN_CONTEXT_SLOTS);
   Handle<FixedArray> array = NewFixedArray(length);
   array->set_map_no_write_barrier(*function_context_map());
   Handle<Context> context = Handle<Context>::cast(array);
@@ -862,7 +869,7 @@
 
 
 Handle<ByteArray> Factory::NewByteArray(int length, PretenureFlag pretenure) {
-  ASSERT(0 <= length);
+  DCHECK(0 <= length);
   CALL_HEAP_FUNCTION(
       isolate(),
       isolate()->heap()->AllocateByteArray(length, pretenure),
@@ -874,7 +881,7 @@
                                                 ExternalArrayType array_type,
                                                 void* external_pointer,
                                                 PretenureFlag pretenure) {
-  ASSERT(0 <= length && length <= Smi::kMaxValue);
+  DCHECK(0 <= length && length <= Smi::kMaxValue);
   CALL_HEAP_FUNCTION(
       isolate(),
       isolate()->heap()->AllocateExternalArray(length,
@@ -889,7 +896,7 @@
     int length,
     ExternalArrayType array_type,
     PretenureFlag pretenure) {
-  ASSERT(0 <= length && length <= Smi::kMaxValue);
+  DCHECK(0 <= length && length <= Smi::kMaxValue);
   CALL_HEAP_FUNCTION(
       isolate(),
       isolate()->heap()->AllocateFixedTypedArray(length,
@@ -981,7 +988,7 @@
 
 Handle<FixedArray> Factory::CopyAndTenureFixedCOWArray(
     Handle<FixedArray> array) {
-  ASSERT(isolate()->heap()->InNewSpace(*array));
+  DCHECK(isolate()->heap()->InNewSpace(*array));
   CALL_HEAP_FUNCTION(isolate(),
                      isolate()->heap()->CopyAndTenureFixedCOWArray(*array),
                      FixedArray);
@@ -1009,7 +1016,7 @@
   // We need to distinguish the minus zero value and this cannot be
   // done after conversion to int. Doing this by comparing bit
   // patterns is faster than using fpclassify() et al.
-  if (IsMinusZero(value)) return NewHeapNumber(-0.0, pretenure);
+  if (IsMinusZero(value)) return NewHeapNumber(-0.0, IMMUTABLE, pretenure);
 
   int int_value = FastD2I(value);
   if (value == int_value && Smi::IsValid(int_value)) {
@@ -1017,15 +1024,15 @@
   }
 
   // Materialize the value in the heap.
-  return NewHeapNumber(value, pretenure);
+  return NewHeapNumber(value, IMMUTABLE, pretenure);
 }
 
 
 Handle<Object> Factory::NewNumberFromInt(int32_t value,
                                          PretenureFlag pretenure) {
   if (Smi::IsValid(value)) return handle(Smi::FromInt(value), isolate());
-  // Bypass NumberFromDouble to avoid various redundant checks.
-  return NewHeapNumber(FastI2D(value), pretenure);
+  // Bypass NewNumber to avoid various redundant checks.
+  return NewHeapNumber(FastI2D(value), IMMUTABLE, pretenure);
 }
 
 
@@ -1035,71 +1042,72 @@
   if (int32v >= 0 && Smi::IsValid(int32v)) {
     return handle(Smi::FromInt(int32v), isolate());
   }
-  return NewHeapNumber(FastUI2D(value), pretenure);
+  return NewHeapNumber(FastUI2D(value), IMMUTABLE, pretenure);
 }
 
 
 Handle<HeapNumber> Factory::NewHeapNumber(double value,
+                                          MutableMode mode,
                                           PretenureFlag pretenure) {
   CALL_HEAP_FUNCTION(
       isolate(),
-      isolate()->heap()->AllocateHeapNumber(value, pretenure), HeapNumber);
+      isolate()->heap()->AllocateHeapNumber(value, mode, pretenure),
+      HeapNumber);
 }
 
 
-Handle<Object> Factory::NewTypeError(const char* message,
-                                     Vector< Handle<Object> > args) {
+MaybeHandle<Object> Factory::NewTypeError(const char* message,
+                                          Vector<Handle<Object> > args) {
   return NewError("MakeTypeError", message, args);
 }
 
 
-Handle<Object> Factory::NewTypeError(Handle<String> message) {
+MaybeHandle<Object> Factory::NewTypeError(Handle<String> message) {
   return NewError("$TypeError", message);
 }
 
 
-Handle<Object> Factory::NewRangeError(const char* message,
-                                      Vector< Handle<Object> > args) {
+MaybeHandle<Object> Factory::NewRangeError(const char* message,
+                                           Vector<Handle<Object> > args) {
   return NewError("MakeRangeError", message, args);
 }
 
 
-Handle<Object> Factory::NewRangeError(Handle<String> message) {
+MaybeHandle<Object> Factory::NewRangeError(Handle<String> message) {
   return NewError("$RangeError", message);
 }
 
 
-Handle<Object> Factory::NewSyntaxError(const char* message,
-                                       Handle<JSArray> args) {
+MaybeHandle<Object> Factory::NewSyntaxError(const char* message,
+                                            Handle<JSArray> args) {
   return NewError("MakeSyntaxError", message, args);
 }
 
 
-Handle<Object> Factory::NewSyntaxError(Handle<String> message) {
+MaybeHandle<Object> Factory::NewSyntaxError(Handle<String> message) {
   return NewError("$SyntaxError", message);
 }
 
 
-Handle<Object> Factory::NewReferenceError(const char* message,
-                                          Vector< Handle<Object> > args) {
+MaybeHandle<Object> Factory::NewReferenceError(const char* message,
+                                               Vector<Handle<Object> > args) {
   return NewError("MakeReferenceError", message, args);
 }
 
 
-Handle<Object> Factory::NewReferenceError(const char* message,
-                                          Handle<JSArray> args) {
+MaybeHandle<Object> Factory::NewReferenceError(const char* message,
+                                               Handle<JSArray> args) {
   return NewError("MakeReferenceError", message, args);
 }
 
 
-Handle<Object> Factory::NewReferenceError(Handle<String> message) {
+MaybeHandle<Object> Factory::NewReferenceError(Handle<String> message) {
   return NewError("$ReferenceError", message);
 }
 
 
-Handle<Object> Factory::NewError(const char* maker,
-                                 const char* message,
-                                 Vector< Handle<Object> > args) {
+MaybeHandle<Object> Factory::NewError(const char* maker, const char* message,
+                                      Vector<Handle<Object> > args) {
   // Instantiate a closeable HandleScope for EscapeFrom.
   v8::EscapableHandleScope scope(reinterpret_cast<v8::Isolate*>(isolate()));
   Handle<FixedArray> array = NewFixedArray(args.length());
@@ -1107,19 +1115,21 @@
     array->set(i, *args[i]);
   }
   Handle<JSArray> object = NewJSArrayWithElements(array);
-  Handle<Object> result = NewError(maker, message, object);
+  Handle<Object> result;
+  ASSIGN_RETURN_ON_EXCEPTION(isolate(), result,
+                             NewError(maker, message, object), Object);
   return result.EscapeFrom(&scope);
 }
 
 
-Handle<Object> Factory::NewEvalError(const char* message,
-                                     Vector< Handle<Object> > args) {
+MaybeHandle<Object> Factory::NewEvalError(const char* message,
+                                          Vector<Handle<Object> > args) {
   return NewError("MakeEvalError", message, args);
 }
 
 
-Handle<Object> Factory::NewError(const char* message,
-                                 Vector< Handle<Object> > args) {
+MaybeHandle<Object> Factory::NewError(const char* message,
+                                      Vector<Handle<Object> > args) {
   return NewError("MakeError", message, args);
 }
 
@@ -1136,7 +1146,7 @@
   space -= Min(space, strlen(message));
   p = &buffer[kBufferSize] - space;
 
-  for (unsigned i = 0; i < ARRAY_SIZE(args); i++) {
+  for (int i = 0; i < Smi::cast(args->length())->value(); i++) {
     if (space > 0) {
       *p++ = ' ';
       space--;
@@ -1160,9 +1170,8 @@
 }
 
 
-Handle<Object> Factory::NewError(const char* maker,
-                                 const char* message,
-                                 Handle<JSArray> args) {
+MaybeHandle<Object> Factory::NewError(const char* maker, const char* message,
+                                      Handle<JSArray> args) {
   Handle<String> make_str = InternalizeUtf8String(maker);
   Handle<Object> fun_obj = Object::GetProperty(
       isolate()->js_builtins_object(), make_str).ToHandleChecked();
@@ -1178,10 +1187,10 @@
   // Invoke the JavaScript factory method. If an exception is thrown while
   // running the factory method, use the exception as the result.
   Handle<Object> result;
-  Handle<Object> exception;
+  MaybeHandle<Object> exception;
   if (!Execution::TryCall(fun,
                           isolate()->js_builtins_object(),
-                          ARRAY_SIZE(argv),
+                          arraysize(argv),
                           argv,
                           &exception).ToHandle(&result)) {
     return exception;
@@ -1190,13 +1199,13 @@
 }
 
 
-Handle<Object> Factory::NewError(Handle<String> message) {
+MaybeHandle<Object> Factory::NewError(Handle<String> message) {
   return NewError("$Error", message);
 }
 
 
-Handle<Object> Factory::NewError(const char* constructor,
-                                 Handle<String> message) {
+MaybeHandle<Object> Factory::NewError(const char* constructor,
+                                      Handle<String> message) {
   Handle<String> constr = InternalizeUtf8String(constructor);
   Handle<JSFunction> fun = Handle<JSFunction>::cast(Object::GetProperty(
       isolate()->js_builtins_object(), constr).ToHandleChecked());
@@ -1205,10 +1214,10 @@
   // Invoke the JavaScript factory method. If an exception is thrown while
   // running the factory method, use the exception as the result.
   Handle<Object> result;
-  Handle<Object> exception;
+  MaybeHandle<Object> exception;
   if (!Execution::TryCall(fun,
                           isolate()->js_builtins_object(),
-                          ARRAY_SIZE(argv),
+                          arraysize(argv),
                           argv,
                           &exception).ToHandle(&result)) {
     return exception;
@@ -1245,9 +1254,9 @@
 Handle<JSFunction> Factory::NewFunction(Handle<Map> map,
                                         Handle<String> name,
                                         MaybeHandle<Code> code) {
-  Handle<Context> context(isolate()->context()->native_context());
+  Handle<Context> context(isolate()->native_context());
   Handle<SharedFunctionInfo> info = NewSharedFunctionInfo(name, code);
-  ASSERT((info->strict_mode() == SLOPPY) &&
+  DCHECK((info->strict_mode() == SLOPPY) &&
          (map.is_identical_to(isolate()->sloppy_function_map()) ||
           map.is_identical_to(
               isolate()->sloppy_function_without_prototype_map()) ||
@@ -1298,9 +1307,9 @@
   if (prototype->IsTheHole() && !function->shared()->is_generator()) {
     prototype = NewFunctionPrototype(function);
   }
-  initial_map->set_prototype(*prototype);
-  function->set_initial_map(*initial_map);
-  initial_map->set_constructor(*function);
+
+  JSFunction::SetInitialMap(function, initial_map,
+                            Handle<JSReceiver>::cast(prototype));
 
   return function;
 }
@@ -1327,17 +1336,15 @@
     // Each function prototype gets a fresh map to avoid unwanted sharing of
     // maps between prototypes of different constructors.
     Handle<JSFunction> object_function(native_context->object_function());
-    ASSERT(object_function->has_initial_map());
-    new_map = Map::Copy(handle(object_function->initial_map()));
+    DCHECK(object_function->has_initial_map());
+    new_map = handle(object_function->initial_map());
   }
 
+  DCHECK(!new_map->is_prototype_map());
   Handle<JSObject> prototype = NewJSObjectFromMap(new_map);
 
   if (!function->shared()->is_generator()) {
-    JSObject::SetOwnPropertyIgnoreAttributes(prototype,
-                                             constructor_string(),
-                                             function,
-                                             DONT_ENUM).Assert();
+    JSObject::AddProperty(prototype, constructor_string(), function, DONT_ENUM);
   }
 
   return prototype;
@@ -1348,8 +1355,7 @@
     Handle<SharedFunctionInfo> info,
     Handle<Context> context,
     PretenureFlag pretenure) {
-  int map_index = Context::FunctionMapIndex(info->strict_mode(),
-                                            info->is_generator());
+  int map_index = Context::FunctionMapIndex(info->strict_mode(), info->kind());
   Handle<Map> map(Map::cast(context->native_context()->get(map_index)));
   Handle<JSFunction> result = NewFunction(map, info, context, pretenure);
 
@@ -1377,7 +1383,7 @@
     FixedArray* literals = info->GetLiteralsFromOptimizedCodeMap(index);
     if (literals != NULL) result->set_literals(literals);
     Code* code = info->GetCodeFromOptimizedCodeMap(index);
-    ASSERT(!code->marked_for_deoptimization());
+    DCHECK(!code->marked_for_deoptimization());
     result->ReplaceCode(code);
     return result;
   }
@@ -1395,18 +1401,6 @@
 }
 
 
-Handle<JSObject> Factory::NewIteratorResultObject(Handle<Object> value,
-                                                     bool done) {
-  Handle<Map> map(isolate()->native_context()->iterator_result_map());
-  Handle<JSObject> result = NewJSObjectFromMap(map, NOT_TENURED, false);
-  result->InObjectPropertyAtPut(
-      JSGeneratorObject::kResultValuePropertyIndex, *value);
-  result->InObjectPropertyAtPut(
-      JSGeneratorObject::kResultDonePropertyIndex, *ToBoolean(done));
-  return result;
-}
-
-
 Handle<ScopeInfo> Factory::NewScopeInfo(int length) {
   Handle<FixedArray> array = NewFixedArray(length, TENURED);
   array->set_map_no_write_barrier(*scope_info_map());
@@ -1446,7 +1440,7 @@
   int obj_size = Code::SizeFor(body_size);
 
   Handle<Code> code = NewCodeRaw(obj_size, immovable);
-  ASSERT(isolate()->code_range() == NULL ||
+  DCHECK(isolate()->code_range() == NULL ||
          !isolate()->code_range()->valid() ||
          isolate()->code_range()->contains(code->address()));
 
@@ -1462,7 +1456,7 @@
   code->set_raw_kind_specific_flags2(0);
   code->set_is_crankshafted(crankshafted);
   code->set_deoptimization_data(*empty_fixed_array(), SKIP_WRITE_BARRIER);
-  code->set_raw_type_feedback_info(*undefined_value());
+  code->set_raw_type_feedback_info(Smi::FromInt(0));
   code->set_next_code_link(*undefined_value());
   code->set_handler_table(*empty_fixed_array(), SKIP_WRITE_BARRIER);
   code->set_prologue_offset(prologue_offset);
@@ -1471,7 +1465,7 @@
   }
 
   if (is_debug) {
-    ASSERT(code->kind() == Code::FUNCTION);
+    DCHECK(code->kind() == Code::FUNCTION);
     code->set_has_debug_break_slots(true);
   }
 
@@ -1544,19 +1538,19 @@
 
 
 Handle<GlobalObject> Factory::NewGlobalObject(Handle<JSFunction> constructor) {
-  ASSERT(constructor->has_initial_map());
+  DCHECK(constructor->has_initial_map());
   Handle<Map> map(constructor->initial_map());
-  ASSERT(map->is_dictionary_map());
+  DCHECK(map->is_dictionary_map());
 
   // Make sure no field properties are described in the initial map.
   // This guarantees us that normalizing the properties does not
   // require us to change property values to PropertyCells.
-  ASSERT(map->NextFreePropertyIndex() == 0);
+  DCHECK(map->NextFreePropertyIndex() == 0);
 
   // Make sure we don't have a ton of pre-allocated slots in the
   // global objects. They will be unused once we normalize the object.
-  ASSERT(map->unused_property_fields() == 0);
-  ASSERT(map->inobject_properties() == 0);
+  DCHECK(map->unused_property_fields() == 0);
+  DCHECK(map->inobject_properties() == 0);
 
   // Initial size of the backing store to avoid resize of the storage during
   // bootstrapping. The size differs between the JS global object ad the
@@ -1573,7 +1567,7 @@
   Handle<DescriptorArray> descs(map->instance_descriptors());
   for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
     PropertyDetails details = descs->GetDetails(i);
-    ASSERT(details.type() == CALLBACKS);  // Only accessors are expected.
+    DCHECK(details.type() == CALLBACKS);  // Only accessors are expected.
     PropertyDetails d = PropertyDetails(details.attributes(), CALLBACKS, i + 1);
     Handle<Name> name(descs->GetKey(i));
     Handle<Object> value(descs->GetCallbacksObject(i), isolate());
@@ -1595,7 +1589,7 @@
   global->set_properties(*dictionary);
 
   // Make sure result is a global object with properties in dictionary.
-  ASSERT(global->IsGlobalObject() && !global->HasFastProperties());
+  DCHECK(global->IsGlobalObject() && !global->HasFastProperties());
   return global;
 }
 
@@ -1642,7 +1636,7 @@
                                                 ElementsKind elements_kind,
                                                 int length,
                                                 PretenureFlag pretenure) {
-  ASSERT(length <= elements->length());
+  DCHECK(length <= elements->length());
   Handle<JSArray> array = NewJSArray(elements_kind, pretenure);
 
   array->set_elements(*elements);
@@ -1656,7 +1650,7 @@
                                 int length,
                                 int capacity,
                                 ArrayStorageAllocationMode mode) {
-  ASSERT(capacity >= length);
+  DCHECK(capacity >= length);
 
   if (capacity == 0) {
     array->set_length(Smi::FromInt(0));
@@ -1670,15 +1664,15 @@
     if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
       elms = NewFixedDoubleArray(capacity);
     } else {
-      ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
+      DCHECK(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
       elms = NewFixedDoubleArrayWithHoles(capacity);
     }
   } else {
-    ASSERT(IsFastSmiOrObjectElementsKind(elements_kind));
+    DCHECK(IsFastSmiOrObjectElementsKind(elements_kind));
     if (mode == DONT_INITIALIZE_ARRAY_ELEMENTS) {
       elms = NewUninitializedFixedArray(capacity);
     } else {
-      ASSERT(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
+      DCHECK(mode == INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
       elms = NewFixedArrayWithHoles(capacity);
     }
   }
@@ -1690,10 +1684,10 @@
 
 Handle<JSGeneratorObject> Factory::NewJSGeneratorObject(
     Handle<JSFunction> function) {
-  ASSERT(function->shared()->is_generator());
+  DCHECK(function->shared()->is_generator());
   JSFunction::EnsureHasInitialMap(function);
   Handle<Map> map(function->initial_map());
-  ASSERT(map->instance_type() == JS_GENERATOR_OBJECT_TYPE);
+  DCHECK(map->instance_type() == JS_GENERATOR_OBJECT_TYPE);
   CALL_HEAP_FUNCTION(
       isolate(),
       isolate()->heap()->AllocateJSObjectFromMap(*map),
@@ -1703,7 +1697,7 @@
 
 Handle<JSArrayBuffer> Factory::NewJSArrayBuffer() {
   Handle<JSFunction> array_buffer_fun(
-      isolate()->context()->native_context()->array_buffer_fun());
+      isolate()->native_context()->array_buffer_fun());
   CALL_HEAP_FUNCTION(
       isolate(),
       isolate()->heap()->AllocateJSObject(*array_buffer_fun),
@@ -1713,7 +1707,7 @@
 
 Handle<JSDataView> Factory::NewJSDataView() {
   Handle<JSFunction> data_view_fun(
-      isolate()->context()->native_context()->data_view_fun());
+      isolate()->native_context()->data_view_fun());
   CALL_HEAP_FUNCTION(
       isolate(),
       isolate()->heap()->AllocateJSObject(*data_view_fun),
@@ -1787,20 +1781,19 @@
 }
 
 
-void Factory::ReinitializeJSReceiver(Handle<JSReceiver> object,
-                                     InstanceType type,
-                                     int size) {
-  ASSERT(type >= FIRST_JS_OBJECT_TYPE);
+void Factory::ReinitializeJSProxy(Handle<JSProxy> proxy, InstanceType type,
+                                  int size) {
+  DCHECK(type == JS_OBJECT_TYPE || type == JS_FUNCTION_TYPE);
 
   // Allocate fresh map.
   // TODO(rossberg): Once we optimize proxies, cache these maps.
   Handle<Map> map = NewMap(type, size);
 
   // Check that the receiver has at least the size of the fresh object.
-  int size_difference = object->map()->instance_size() - map->instance_size();
-  ASSERT(size_difference >= 0);
+  int size_difference = proxy->map()->instance_size() - map->instance_size();
+  DCHECK(size_difference >= 0);
 
-  map->set_prototype(object->map()->prototype());
+  map->set_prototype(proxy->map()->prototype());
 
   // Allocate the backing storage for the properties.
   int prop_size = map->InitialPropertiesLength();
@@ -1809,7 +1802,7 @@
   Heap* heap = isolate()->heap();
   MaybeHandle<SharedFunctionInfo> shared;
   if (type == JS_FUNCTION_TYPE) {
-    OneByteStringKey key(STATIC_ASCII_VECTOR("<freezing call trap>"),
+    OneByteStringKey key(STATIC_CHAR_VECTOR("<freezing call trap>"),
                          heap->HashSeed());
     Handle<String> name = InternalizeStringWithKey(&key);
     shared = NewSharedFunctionInfo(name, MaybeHandle<Code>());
@@ -1819,32 +1812,40 @@
   // before object re-initialization is finished and filler object is installed.
   DisallowHeapAllocation no_allocation;
 
+  // Put in filler if the new object is smaller than the old.
+  if (size_difference > 0) {
+    Address address = proxy->address();
+    heap->CreateFillerObjectAt(address + map->instance_size(), size_difference);
+    heap->AdjustLiveBytes(address, -size_difference, Heap::FROM_MUTATOR);
+  }
+
   // Reset the map for the object.
-  object->set_map(*map);
-  Handle<JSObject> jsobj = Handle<JSObject>::cast(object);
+  proxy->synchronized_set_map(*map);
+  Handle<JSObject> jsobj = Handle<JSObject>::cast(proxy);
 
   // Reinitialize the object from the constructor map.
   heap->InitializeJSObjectFromMap(*jsobj, *properties, *map);
 
+  // The current native context is used to set up certain bits.
+  // TODO(adamk): Using the current context seems wrong, it should be whatever
+  // context the JSProxy originated in. But that context isn't stored anywhere.
+  Handle<Context> context(isolate()->native_context());
+
   // Functions require some minimal initialization.
   if (type == JS_FUNCTION_TYPE) {
     map->set_function_with_prototype(true);
-    Handle<JSFunction> js_function = Handle<JSFunction>::cast(object);
-    Handle<Context> context(isolate()->context()->native_context());
+    Handle<JSFunction> js_function = Handle<JSFunction>::cast(proxy);
     InitializeFunction(js_function, shared.ToHandleChecked(), context);
-  }
-
-  // Put in filler if the new object is smaller than the old.
-  if (size_difference > 0) {
-    heap->CreateFillerObjectAt(
-        object->address() + map->instance_size(), size_difference);
+  } else {
+    // Provide JSObjects with a constructor.
+    map->set_constructor(context->object_function());
   }
 }
 
 
 void Factory::ReinitializeJSGlobalProxy(Handle<JSGlobalProxy> object,
                                         Handle<JSFunction> constructor) {
-  ASSERT(constructor->has_initial_map());
+  DCHECK(constructor->has_initial_map());
   Handle<Map> map(constructor->initial_map(), isolate());
 
   // The proxy's hash should be retained across reinitialization.
@@ -1852,8 +1853,8 @@
 
   // Check that the already allocated object has the same size and type as
   // objects allocated using the constructor.
-  ASSERT(map->instance_size() == object->map()->instance_size());
-  ASSERT(map->instance_type() == object->map()->instance_type());
+  DCHECK(map->instance_size() == object->map()->instance_size());
+  DCHECK(map->instance_type() == object->map()->instance_type());
 
   // Allocate the backing storage for the properties.
   int prop_size = map->InitialPropertiesLength();
@@ -1864,7 +1865,7 @@
   DisallowHeapAllocation no_allocation;
 
   // Reset the map for the object.
-  object->set_map(constructor->initial_map());
+  object->synchronized_set_map(*map);
 
   Heap* heap = isolate()->heap();
   // Reinitialize the object from the constructor map.
@@ -1875,41 +1876,42 @@
 }
 
 
-void Factory::BecomeJSObject(Handle<JSReceiver> object) {
-  ReinitializeJSReceiver(object, JS_OBJECT_TYPE, JSObject::kHeaderSize);
+void Factory::BecomeJSObject(Handle<JSProxy> proxy) {
+  ReinitializeJSProxy(proxy, JS_OBJECT_TYPE, JSObject::kHeaderSize);
 }
 
 
-void Factory::BecomeJSFunction(Handle<JSReceiver> object) {
-  ReinitializeJSReceiver(object, JS_FUNCTION_TYPE, JSFunction::kSize);
+void Factory::BecomeJSFunction(Handle<JSProxy> proxy) {
+  ReinitializeJSProxy(proxy, JS_FUNCTION_TYPE, JSFunction::kSize);
 }
 
 
-Handle<FixedArray> Factory::NewTypeFeedbackVector(int slot_count) {
+Handle<TypeFeedbackVector> Factory::NewTypeFeedbackVector(int slot_count) {
   // Ensure we can skip the write barrier
-  ASSERT_EQ(isolate()->heap()->uninitialized_symbol(),
-            *TypeFeedbackInfo::UninitializedSentinel(isolate()));
+  DCHECK_EQ(isolate()->heap()->uninitialized_symbol(),
+            *TypeFeedbackVector::UninitializedSentinel(isolate()));
 
-  CALL_HEAP_FUNCTION(
-      isolate(),
-      isolate()->heap()->AllocateFixedArrayWithFiller(
-          slot_count,
-          TENURED,
-          *TypeFeedbackInfo::UninitializedSentinel(isolate())),
-      FixedArray);
+  if (slot_count == 0) {
+    return Handle<TypeFeedbackVector>::cast(empty_fixed_array());
+  }
+
+  CALL_HEAP_FUNCTION(isolate(),
+                     isolate()->heap()->AllocateFixedArrayWithFiller(
+                         slot_count, TENURED,
+                         *TypeFeedbackVector::UninitializedSentinel(isolate())),
+                     TypeFeedbackVector);
 }
 
 
 Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
-    Handle<String> name,
-    int number_of_literals,
-    bool is_generator,
-    Handle<Code> code,
-    Handle<ScopeInfo> scope_info,
-    Handle<FixedArray> feedback_vector) {
+    Handle<String> name, int number_of_literals, FunctionKind kind,
+    Handle<Code> code, Handle<ScopeInfo> scope_info,
+    Handle<TypeFeedbackVector> feedback_vector) {
+  DCHECK(IsValidFunctionKind(kind));
   Handle<SharedFunctionInfo> shared = NewSharedFunctionInfo(name, code);
   shared->set_scope_info(*scope_info);
   shared->set_feedback_vector(*feedback_vector);
+  shared->set_kind(kind);
   int literals_array_size = number_of_literals;
   // If the function contains object, regexp or array literals,
   // allocate extra space for a literals array prefix containing the
@@ -1918,7 +1920,7 @@
     literals_array_size += JSFunction::kLiteralsPrefixSize;
   }
   shared->set_num_literals(literals_array_size);
-  if (is_generator) {
+  if (IsGeneratorFunction(kind)) {
     shared->set_instance_class_name(isolate()->heap()->Generator_string());
     shared->DisableOptimization(kGenerator);
   }
@@ -1972,7 +1974,8 @@
   share->set_script(*undefined_value(), SKIP_WRITE_BARRIER);
   share->set_debug_info(*undefined_value(), SKIP_WRITE_BARRIER);
   share->set_inferred_name(*empty_string(), SKIP_WRITE_BARRIER);
-  share->set_feedback_vector(*empty_fixed_array(), SKIP_WRITE_BARRIER);
+  Handle<TypeFeedbackVector> feedback_vector = NewTypeFeedbackVector(0);
+  share->set_feedback_vector(*feedback_vector, SKIP_WRITE_BARRIER);
   share->set_profiler_ticks(0);
   share->set_ast_node_count(0);
   share->set_counters(0);
@@ -2030,7 +2033,7 @@
       // cache in the snapshot to keep  boot-time memory usage down.
       // If we expand the number string cache already while creating
       // the snapshot then that didn't work out.
-      ASSERT(!isolate()->serializer_enabled() || FLAG_extra_code != NULL);
+      DCHECK(!isolate()->serializer_enabled() || FLAG_extra_code != NULL);
       Handle<FixedArray> new_cache = NewFixedArray(full_size, TENURED);
       isolate()->heap()->set_number_string_cache(*new_cache);
       return;
@@ -2050,7 +2053,7 @@
   }
 
   char arr[100];
-  Vector<char> buffer(arr, ARRAY_SIZE(arr));
+  Vector<char> buffer(arr, arraysize(arr));
   const char* str;
   if (number->IsSmi()) {
     int num = Handle<Smi>::cast(number)->value();
@@ -2099,11 +2102,22 @@
 }
 
 
-Handle<JSObject> Factory::NewArgumentsObject(Handle<Object> callee,
+Handle<JSObject> Factory::NewArgumentsObject(Handle<JSFunction> callee,
                                              int length) {
-  CALL_HEAP_FUNCTION(
-      isolate(),
-      isolate()->heap()->AllocateArgumentsObject(*callee, length), JSObject);
+  bool strict_mode_callee = callee->shared()->strict_mode() == STRICT;
+  Handle<Map> map = strict_mode_callee ? isolate()->strict_arguments_map()
+                                       : isolate()->sloppy_arguments_map();
+
+  AllocationSiteUsageContext context(isolate(), Handle<AllocationSite>(),
+                                     false);
+  DCHECK(!isolate()->has_pending_exception());
+  Handle<JSObject> result = NewJSObjectFromMap(map);
+  Handle<Smi> value(Smi::FromInt(length), isolate());
+  Object::SetProperty(result, length_string(), value, STRICT).Assert();
+  if (!strict_mode_callee) {
+    Object::SetProperty(result, callee_string(), callee, STRICT).Assert();
+  }
+  return result;
 }
 
 
@@ -2132,15 +2146,15 @@
     int instance_size = kPointerSize * internal_field_count;
     InstanceType type;
     switch (instance_type) {
-      case JavaScriptObject:
+      case JavaScriptObjectType:
         type = JS_OBJECT_TYPE;
         instance_size += JSObject::kHeaderSize;
         break;
-      case InnerGlobalObject:
+      case GlobalObjectType:
         type = JS_GLOBAL_OBJECT_TYPE;
         instance_size += JSGlobalObject::kSize;
         break;
-      case OuterGlobalObject:
+      case GlobalProxyType:
         type = JS_GLOBAL_PROXY_TYPE;
         instance_size += JSGlobalProxy::kSize;
         break;
@@ -2165,17 +2179,25 @@
   result->shared()->DontAdaptArguments();
 
   if (obj->remove_prototype()) {
-    ASSERT(result->shared()->IsApiFunction());
-    ASSERT(!result->has_initial_map());
-    ASSERT(!result->has_prototype());
+    DCHECK(result->shared()->IsApiFunction());
+    DCHECK(!result->has_initial_map());
+    DCHECK(!result->has_prototype());
     return result;
   }
 
-  JSObject::SetOwnPropertyIgnoreAttributes(
-      handle(JSObject::cast(result->prototype())),
-      constructor_string(),
-      result,
-      DONT_ENUM).Assert();
+  if (prototype->IsTheHole()) {
+#ifdef DEBUG
+    LookupIterator it(handle(JSObject::cast(result->prototype())),
+                      constructor_string(),
+                      LookupIterator::OWN_SKIP_INTERCEPTOR);
+    MaybeHandle<Object> maybe_prop = Object::GetProperty(&it);
+    DCHECK(it.IsFound());
+    DCHECK(maybe_prop.ToHandleChecked().is_identical_to(result));
+#endif
+  } else {
+    JSObject::AddProperty(handle(JSObject::cast(result->prototype())),
+                          constructor_string(), result, DONT_ENUM);
+  }
 
   // Down from here is only valid for API functions that can be used as a
   // constructor (don't set the "remove prototype" flag).
@@ -2279,7 +2301,7 @@
     JSObject::SetAccessor(result, accessor).Assert();
   }
 
-  ASSERT(result->shared()->IsApiFunction());
+  DCHECK(result->shared()->IsApiFunction());
   return result;
 }
 
@@ -2306,9 +2328,12 @@
       Handle<MapCache>(MapCache::cast(context->map_cache()));
   Handle<Object> result = Handle<Object>(cache->Lookup(*keys), isolate());
   if (result->IsMap()) return Handle<Map>::cast(result);
-  // Create a new map and add it to the cache.
-  Handle<Map> map = Map::Create(
-      handle(context->object_function()), keys->length());
+  int length = keys->length();
+  // Create a new map and add it to the cache. Reuse the initial map of the
+  // Object function if the literal has no predeclared properties.
+  Handle<Map> map = length == 0
+                        ? handle(context->object_function()->initial_map())
+                        : Map::Create(isolate(), length);
   AddToMapCache(context, keys, map);
   return map;
 }
@@ -2338,9 +2363,9 @@
   store->set(JSRegExp::kTagIndex, Smi::FromInt(type));
   store->set(JSRegExp::kSourceIndex, *source);
   store->set(JSRegExp::kFlagsIndex, Smi::FromInt(flags.value()));
-  store->set(JSRegExp::kIrregexpASCIICodeIndex, uninitialized);
+  store->set(JSRegExp::kIrregexpLatin1CodeIndex, uninitialized);
   store->set(JSRegExp::kIrregexpUC16CodeIndex, uninitialized);
-  store->set(JSRegExp::kIrregexpASCIICodeSavedIndex, uninitialized);
+  store->set(JSRegExp::kIrregexpLatin1CodeSavedIndex, uninitialized);
   store->set(JSRegExp::kIrregexpUC16CodeSavedIndex, uninitialized);
   store->set(JSRegExp::kIrregexpMaxRegisterCountIndex, Smi::FromInt(0));
   store->set(JSRegExp::kIrregexpCaptureCountIndex,
diff --git a/src/factory.h b/src/factory.h
index e22ea8d..24b490c 100644
--- a/src/factory.h
+++ b/src/factory.h
@@ -12,7 +12,7 @@
 
 // Interface for handle based allocation.
 
-class Factory V8_FINAL {
+class Factory FINAL {
  public:
   Handle<Oddball> NewOddball(Handle<Map> map,
                              const char* to_string,
@@ -85,34 +85,31 @@
   // allocated in the old generation.  The pretenure flag defaults to
   // DONT_TENURE.
   //
-  // Creates a new String object.  There are two String encodings: ASCII and
-  // two byte.  One should choose between the three string factory functions
+  // Creates a new String object.  There are two String encodings: one-byte and
+  // two-byte.  One should choose between the three string factory functions
   // based on the encoding of the string buffer that the string is
   // initialized from.
-  //   - ...FromAscii initializes the string from a buffer that is ASCII
-  //     encoded (it does not check that the buffer is ASCII encoded) and
-  //     the result will be ASCII encoded.
+  //   - ...FromOneByte initializes the string from a buffer that is Latin1
+  //     encoded (it does not check that the buffer is Latin1 encoded) and
+  //     the result will be Latin1 encoded.
   //   - ...FromUtf8 initializes the string from a buffer that is UTF-8
-  //     encoded.  If the characters are all single-byte characters, the
-  //     result will be ASCII encoded, otherwise it will converted to two
-  //     byte.
-  //   - ...FromTwoByte initializes the string from a buffer that is two
-  //     byte encoded.  If the characters are all single-byte characters,
-  //     the result will be converted to ASCII, otherwise it will be left as
-  //     two byte.
+  //     encoded.  If the characters are all ASCII characters, the result
+  //     will be Latin1 encoded, otherwise it will converted to two-byte.
+  //   - ...FromTwoByte initializes the string from a buffer that is two-byte
+  //     encoded.  If the characters are all Latin1 characters, the result
+  //     will be converted to Latin1, otherwise it will be left as two-byte.
   //
-  // ASCII strings are pretenured when used as keys in the SourceCodeCache.
+  // One-byte strings are pretenured when used as keys in the SourceCodeCache.
   MUST_USE_RESULT MaybeHandle<String> NewStringFromOneByte(
       Vector<const uint8_t> str,
       PretenureFlag pretenure = NOT_TENURED);
 
-  template<size_t N>
-  inline Handle<String> NewStringFromStaticAscii(
-      const char (&str)[N],
-      PretenureFlag pretenure = NOT_TENURED) {
-    ASSERT(N == StrLength(str) + 1);
-    return NewStringFromOneByte(
-        STATIC_ASCII_VECTOR(str), pretenure).ToHandleChecked();
+  template <size_t N>
+  inline Handle<String> NewStringFromStaticChars(
+      const char (&str)[N], PretenureFlag pretenure = NOT_TENURED) {
+    DCHECK(N == StrLength(str) + 1);
+    return NewStringFromOneByte(STATIC_CHAR_VECTOR(str), pretenure)
+        .ToHandleChecked();
   }
 
   inline Handle<String> NewStringFromAsciiChecked(
@@ -123,20 +120,19 @@
   }
 
 
-  // Allocates and fully initializes a String.  There are two String
-  // encodings: ASCII and two byte. One should choose between the three string
+  // Allocates and fully initializes a String.  There are two String encodings:
+  // one-byte and two-byte. One should choose between the threestring
   // allocation functions based on the encoding of the string buffer used to
   // initialized the string.
-  //   - ...FromAscii initializes the string from a buffer that is ASCII
-  //     encoded (it does not check that the buffer is ASCII encoded) and the
-  //     result will be ASCII encoded.
+  //   - ...FromOneByte initializes the string from a buffer that is Latin1
+  //     encoded (it does not check that the buffer is Latin1 encoded) and the
+  //     result will be Latin1 encoded.
   //   - ...FromUTF8 initializes the string from a buffer that is UTF-8
-  //     encoded.  If the characters are all single-byte characters, the
-  //     result will be ASCII encoded, otherwise it will converted to two
-  //     byte.
+  //     encoded.  If the characters are all ASCII characters, the result
+  //     will be Latin1 encoded, otherwise it will converted to two-byte.
   //   - ...FromTwoByte initializes the string from a buffer that is two-byte
-  //     encoded.  If the characters are all single-byte characters, the
-  //     result will be converted to ASCII, otherwise it will be left as
+  //     encoded.  If the characters are all Latin1 characters, the
+  //     result will be converted to Latin1, otherwise it will be left as
   //     two-byte.
 
   // TODO(dcarney): remove this function.
@@ -164,8 +160,11 @@
       uint32_t hash_field);
 
   MUST_USE_RESULT Handle<String> NewOneByteInternalizedString(
-        Vector<const uint8_t> str,
-        uint32_t hash_field);
+      Vector<const uint8_t> str, uint32_t hash_field);
+
+  MUST_USE_RESULT Handle<String> NewOneByteInternalizedSubString(
+      Handle<SeqOneByteString> string, int offset, int length,
+      uint32_t hash_field);
 
   MUST_USE_RESULT Handle<String> NewTwoByteInternalizedString(
         Vector<const uc16> str,
@@ -179,7 +178,7 @@
   MUST_USE_RESULT MaybeHandle<Map> InternalizedStringMapForString(
       Handle<String> string);
 
-  // Allocates and partially initializes an ASCII or TwoByte String. The
+  // Allocates and partially initializes an one-byte or two-byte String. The
   // characters of the string are uninitialized. Currently used in regexp code
   // only, where they are pretenured.
   MUST_USE_RESULT MaybeHandle<SeqOneByteString> NewRawOneByteString(
@@ -190,17 +189,13 @@
       PretenureFlag pretenure = NOT_TENURED);
 
   // Creates a single character string where the character has given code.
-  // A cache is used for ASCII codes.
+  // A cache is used for Latin1 codes.
   Handle<String> LookupSingleCharacterStringFromCode(uint32_t code);
 
   // Create a new cons string object which consists of a pair of strings.
   MUST_USE_RESULT MaybeHandle<String> NewConsString(Handle<String> left,
                                                     Handle<String> right);
 
-  // Create a new sequential string containing the concatenation of the inputs.
-  Handle<String> NewFlatConcatString(Handle<String> first,
-                                     Handle<String> second);
-
   // Create a new string object which holds a proper substring of a string.
   Handle<String> NewProperSubString(Handle<String> str,
                                     int begin,
@@ -213,18 +208,19 @@
   }
 
   // Creates a new external String object.  There are two String encodings
-  // in the system: ASCII and two byte.  Unlike other String types, it does
+  // in the system: one-byte and two-byte.  Unlike other String types, it does
   // not make sense to have a UTF-8 factory function for external strings,
   // because we cannot change the underlying buffer.  Note that these strings
   // are backed by a string resource that resides outside the V8 heap.
-  MUST_USE_RESULT MaybeHandle<String> NewExternalStringFromAscii(
-      const ExternalAsciiString::Resource* resource);
+  MUST_USE_RESULT MaybeHandle<String> NewExternalStringFromOneByte(
+      const ExternalOneByteString::Resource* resource);
   MUST_USE_RESULT MaybeHandle<String> NewExternalStringFromTwoByte(
       const ExternalTwoByteString::Resource* resource);
 
   // Create a symbol.
   Handle<Symbol> NewSymbol();
   Handle<Symbol> NewPrivateSymbol();
+  Handle<Symbol> NewPrivateOwnSymbol();
 
   // Create a global (but otherwise uninitialized) context.
   Handle<Context> NewNativeContext();
@@ -352,16 +348,16 @@
     return NewNumber(static_cast<double>(value), pretenure);
   }
   Handle<HeapNumber> NewHeapNumber(double value,
+                                   MutableMode mode = IMMUTABLE,
                                    PretenureFlag pretenure = NOT_TENURED);
 
-
   // These objects are used by the api to create env-independent data
   // structures in the heap.
   inline Handle<JSObject> NewNeanderObject() {
     return NewJSObjectFromMap(neander_map());
   }
 
-  Handle<JSObject> NewArgumentsObject(Handle<Object> callee, int length);
+  Handle<JSObject> NewArgumentsObject(Handle<JSFunction> callee, int length);
 
   // JS objects are pretenured when allocated by the bootstrapper and
   // runtime.
@@ -396,10 +392,8 @@
   // Create a JSArray with a specified length and elements initialized
   // according to the specified mode.
   Handle<JSArray> NewJSArray(
-      ElementsKind elements_kind,
-      int length,
-      int capacity,
-      ArrayStorageAllocationMode mode = INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE,
+      ElementsKind elements_kind, int length, int capacity,
+      ArrayStorageAllocationMode mode = DONT_INITIALIZE_ARRAY_ELEMENTS,
       PretenureFlag pretenure = NOT_TENURED);
 
   Handle<JSArray> NewJSArray(
@@ -451,13 +445,6 @@
                                      Handle<Object> construct_trap,
                                      Handle<Object> prototype);
 
-  // Reinitialize a JSReceiver into an (empty) JS object of respective type and
-  // size, but keeping the original prototype.  The receiver must have at least
-  // the size of the new object.  The object is reinitialized and behaves as an
-  // object that has been freshly allocated.
-  void ReinitializeJSReceiver(
-      Handle<JSReceiver> object, InstanceType type, int size);
-
   // Reinitialize an JSGlobalProxy based on a constructor.  The object
   // must have the same size as objects allocated using the
   // constructor.  The object is reinitialized and behaves as an
@@ -466,8 +453,8 @@
                                  Handle<JSFunction> constructor);
 
   // Change the type of the argument into a JS object/function and reinitialize.
-  void BecomeJSObject(Handle<JSReceiver> object);
-  void BecomeJSFunction(Handle<JSReceiver> object);
+  void BecomeJSObject(Handle<JSProxy> object);
+  void BecomeJSFunction(Handle<JSProxy> object);
 
   Handle<JSFunction> NewFunction(Handle<String> name,
                                  Handle<Code> code,
@@ -516,42 +503,40 @@
 
   // Interface for creating error objects.
 
-  Handle<Object> NewError(const char* maker, const char* message,
-                          Handle<JSArray> args);
+  MaybeHandle<Object> NewError(const char* maker, const char* message,
+                               Handle<JSArray> args);
   Handle<String> EmergencyNewError(const char* message, Handle<JSArray> args);
-  Handle<Object> NewError(const char* maker, const char* message,
-                          Vector< Handle<Object> > args);
-  Handle<Object> NewError(const char* message,
-                          Vector< Handle<Object> > args);
-  Handle<Object> NewError(Handle<String> message);
-  Handle<Object> NewError(const char* constructor,
-                          Handle<String> message);
+  MaybeHandle<Object> NewError(const char* maker, const char* message,
+                               Vector<Handle<Object> > args);
+  MaybeHandle<Object> NewError(const char* message,
+                               Vector<Handle<Object> > args);
+  MaybeHandle<Object> NewError(Handle<String> message);
+  MaybeHandle<Object> NewError(const char* constructor, Handle<String> message);
 
-  Handle<Object> NewTypeError(const char* message,
-                              Vector< Handle<Object> > args);
-  Handle<Object> NewTypeError(Handle<String> message);
+  MaybeHandle<Object> NewTypeError(const char* message,
+                                   Vector<Handle<Object> > args);
+  MaybeHandle<Object> NewTypeError(Handle<String> message);
 
-  Handle<Object> NewRangeError(const char* message,
-                               Vector< Handle<Object> > args);
-  Handle<Object> NewRangeError(Handle<String> message);
+  MaybeHandle<Object> NewRangeError(const char* message,
+                                    Vector<Handle<Object> > args);
+  MaybeHandle<Object> NewRangeError(Handle<String> message);
 
-  Handle<Object> NewInvalidStringLengthError() {
+  MaybeHandle<Object> NewInvalidStringLengthError() {
     return NewRangeError("invalid_string_length",
                          HandleVector<Object>(NULL, 0));
   }
 
-  Handle<Object> NewSyntaxError(const char* message, Handle<JSArray> args);
-  Handle<Object> NewSyntaxError(Handle<String> message);
+  MaybeHandle<Object> NewSyntaxError(const char* message, Handle<JSArray> args);
+  MaybeHandle<Object> NewSyntaxError(Handle<String> message);
 
-  Handle<Object> NewReferenceError(const char* message,
-                                   Vector< Handle<Object> > args);
-  Handle<Object> NewReferenceError(const char* message, Handle<JSArray> args);
-  Handle<Object> NewReferenceError(Handle<String> message);
+  MaybeHandle<Object> NewReferenceError(const char* message,
+                                        Vector<Handle<Object> > args);
+  MaybeHandle<Object> NewReferenceError(const char* message,
+                                        Handle<JSArray> args);
+  MaybeHandle<Object> NewReferenceError(Handle<String> message);
 
-  Handle<Object> NewEvalError(const char* message,
-                              Vector< Handle<Object> > args);
-
-  Handle<JSObject> NewIteratorResultObject(Handle<Object> value, bool done);
+  MaybeHandle<Object> NewEvalError(const char* message,
+                                   Vector<Handle<Object> > args);
 
   Handle<String> NumberToString(Handle<Object> number,
                                 bool check_number_string_cache = true);
@@ -561,15 +546,15 @@
   }
 
   enum ApiInstanceType {
-    JavaScriptObject,
-    InnerGlobalObject,
-    OuterGlobalObject
+    JavaScriptObjectType,
+    GlobalObjectType,
+    GlobalProxyType
   };
 
   Handle<JSFunction> CreateApiFunction(
       Handle<FunctionTemplateInfo> data,
       Handle<Object> prototype,
-      ApiInstanceType type = JavaScriptObject);
+      ApiInstanceType type = JavaScriptObjectType);
 
   Handle<JSFunction> InstallMembers(Handle<JSFunction> function);
 
@@ -579,26 +564,26 @@
   MUST_USE_RESULT MaybeHandle<FunctionTemplateInfo> ConfigureInstance(
       Handle<FunctionTemplateInfo> desc, Handle<JSObject> instance);
 
-#define ROOT_ACCESSOR(type, name, camel_name)                                  \
-  inline Handle<type> name() {                                                 \
-    return Handle<type>(BitCast<type**>(                                       \
-        &isolate()->heap()->roots_[Heap::k##camel_name##RootIndex]));          \
+#define ROOT_ACCESSOR(type, name, camel_name)                         \
+  inline Handle<type> name() {                                        \
+    return Handle<type>(bit_cast<type**>(                             \
+        &isolate()->heap()->roots_[Heap::k##camel_name##RootIndex])); \
   }
   ROOT_LIST(ROOT_ACCESSOR)
 #undef ROOT_ACCESSOR
 
-#define STRUCT_MAP_ACCESSOR(NAME, Name, name)                                  \
-  inline Handle<Map> name##_map() {                                            \
-    return Handle<Map>(BitCast<Map**>(                                         \
-        &isolate()->heap()->roots_[Heap::k##Name##MapRootIndex]));             \
-    }
+#define STRUCT_MAP_ACCESSOR(NAME, Name, name)                      \
+  inline Handle<Map> name##_map() {                                \
+    return Handle<Map>(bit_cast<Map**>(                            \
+        &isolate()->heap()->roots_[Heap::k##Name##MapRootIndex])); \
+  }
   STRUCT_LIST(STRUCT_MAP_ACCESSOR)
 #undef STRUCT_MAP_ACCESSOR
 
-#define STRING_ACCESSOR(name, str)                                             \
-  inline Handle<String> name() {                                               \
-    return Handle<String>(BitCast<String**>(                                   \
-        &isolate()->heap()->roots_[Heap::k##name##RootIndex]));                \
+#define STRING_ACCESSOR(name, str)                              \
+  inline Handle<String> name() {                                \
+    return Handle<String>(bit_cast<String**>(                   \
+        &isolate()->heap()->roots_[Heap::k##name##RootIndex])); \
   }
   INTERNALIZED_STRING_LIST(STRING_ACCESSOR)
 #undef STRING_ACCESSOR
@@ -613,17 +598,14 @@
 
   // Allocates a new SharedFunctionInfo object.
   Handle<SharedFunctionInfo> NewSharedFunctionInfo(
-      Handle<String> name,
-      int number_of_literals,
-      bool is_generator,
-      Handle<Code> code,
-      Handle<ScopeInfo> scope_info,
-      Handle<FixedArray> feedback_vector);
+      Handle<String> name, int number_of_literals, FunctionKind kind,
+      Handle<Code> code, Handle<ScopeInfo> scope_info,
+      Handle<TypeFeedbackVector> feedback_vector);
   Handle<SharedFunctionInfo> NewSharedFunctionInfo(Handle<String> name,
                                                    MaybeHandle<Code> code);
 
   // Allocate a new type feedback vector
-  Handle<FixedArray> NewTypeFeedbackVector(int slot_count);
+  Handle<TypeFeedbackVector> NewTypeFeedbackVector(int slot_count);
 
   // Allocates a new JSMessageObject object.
   Handle<JSMessageObject> NewJSMessageObject(
@@ -714,6 +696,12 @@
   Handle<JSFunction> NewFunction(Handle<Map> map,
                                  Handle<String> name,
                                  MaybeHandle<Code> maybe_code);
+
+  // Reinitialize a JSProxy into an (empty) JS object of respective type and
+  // size, but keeping the original prototype.  The receiver must have at least
+  // the size of the new object.  The object is reinitialized and behaves as an
+  // object that has been freshly allocated.
+  void ReinitializeJSProxy(Handle<JSProxy> proxy, InstanceType type, int size);
 };
 
 } }  // namespace v8::internal
diff --git a/src/fast-dtoa.cc b/src/fast-dtoa.cc
index 919023c..13b0463 100644
--- a/src/fast-dtoa.cc
+++ b/src/fast-dtoa.cc
@@ -3,7 +3,7 @@
 // found in the LICENSE file.
 
 #include "include/v8stdint.h"
-#include "src/checks.h"
+#include "src/base/logging.h"
 #include "src/utils.h"
 
 #include "src/fast-dtoa.h"
@@ -120,7 +120,7 @@
   // Conceptually rest ~= too_high - buffer
   // We need to do the following tests in this order to avoid over- and
   // underflows.
-  ASSERT(rest <= unsafe_interval);
+  DCHECK(rest <= unsafe_interval);
   while (rest < small_distance &&  // Negated condition 1
          unsafe_interval - rest >= ten_kappa &&  // Negated condition 2
          (rest + ten_kappa < small_distance ||  // buffer{-1} > w_high
@@ -166,7 +166,7 @@
                              uint64_t ten_kappa,
                              uint64_t unit,
                              int* kappa) {
-  ASSERT(rest < ten_kappa);
+  DCHECK(rest < ten_kappa);
   // The following tests are done in a specific order to avoid overflows. They
   // will work correctly with any uint64 values of rest < ten_kappa and unit.
   //
@@ -365,9 +365,9 @@
                      Vector<char> buffer,
                      int* length,
                      int* kappa) {
-  ASSERT(low.e() == w.e() && w.e() == high.e());
-  ASSERT(low.f() + 1 <= high.f() - 1);
-  ASSERT(kMinimalTargetExponent <= w.e() && w.e() <= kMaximalTargetExponent);
+  DCHECK(low.e() == w.e() && w.e() == high.e());
+  DCHECK(low.f() + 1 <= high.f() - 1);
+  DCHECK(kMinimalTargetExponent <= w.e() && w.e() <= kMaximalTargetExponent);
   // low, w and high are imprecise, but by less than one ulp (unit in the last
   // place).
   // If we remove (resp. add) 1 ulp from low (resp. high) we are certain that
@@ -435,9 +435,9 @@
   // data (like the interval or 'unit'), too.
   // Note that the multiplication by 10 does not overflow, because w.e >= -60
   // and thus one.e >= -60.
-  ASSERT(one.e() >= -60);
-  ASSERT(fractionals < one.f());
-  ASSERT(V8_2PART_UINT64_C(0xFFFFFFFF, FFFFFFFF) / 10 >= one.f());
+  DCHECK(one.e() >= -60);
+  DCHECK(fractionals < one.f());
+  DCHECK(V8_2PART_UINT64_C(0xFFFFFFFF, FFFFFFFF) / 10 >= one.f());
   while (true) {
     fractionals *= 10;
     unit *= 10;
@@ -490,9 +490,9 @@
                             Vector<char> buffer,
                             int* length,
                             int* kappa) {
-  ASSERT(kMinimalTargetExponent <= w.e() && w.e() <= kMaximalTargetExponent);
-  ASSERT(kMinimalTargetExponent >= -60);
-  ASSERT(kMaximalTargetExponent <= -32);
+  DCHECK(kMinimalTargetExponent <= w.e() && w.e() <= kMaximalTargetExponent);
+  DCHECK(kMinimalTargetExponent >= -60);
+  DCHECK(kMaximalTargetExponent <= -32);
   // w is assumed to have an error less than 1 unit. Whenever w is scaled we
   // also scale its error.
   uint64_t w_error = 1;
@@ -543,9 +543,9 @@
   // data (the 'unit'), too.
   // Note that the multiplication by 10 does not overflow, because w.e >= -60
   // and thus one.e >= -60.
-  ASSERT(one.e() >= -60);
-  ASSERT(fractionals < one.f());
-  ASSERT(V8_2PART_UINT64_C(0xFFFFFFFF, FFFFFFFF) / 10 >= one.f());
+  DCHECK(one.e() >= -60);
+  DCHECK(fractionals < one.f());
+  DCHECK(V8_2PART_UINT64_C(0xFFFFFFFF, FFFFFFFF) / 10 >= one.f());
   while (requested_digits > 0 && fractionals > w_error) {
     fractionals *= 10;
     w_error *= 10;
@@ -585,7 +585,7 @@
   // Grisu3 will never output representations that lie exactly on a boundary.
   DiyFp boundary_minus, boundary_plus;
   Double(v).NormalizedBoundaries(&boundary_minus, &boundary_plus);
-  ASSERT(boundary_plus.e() == w.e());
+  DCHECK(boundary_plus.e() == w.e());
   DiyFp ten_mk;  // Cached power of ten: 10^-k
   int mk;        // -k
   int ten_mk_minimal_binary_exponent =
@@ -596,7 +596,7 @@
       ten_mk_minimal_binary_exponent,
       ten_mk_maximal_binary_exponent,
       &ten_mk, &mk);
-  ASSERT((kMinimalTargetExponent <= w.e() + ten_mk.e() +
+  DCHECK((kMinimalTargetExponent <= w.e() + ten_mk.e() +
           DiyFp::kSignificandSize) &&
          (kMaximalTargetExponent >= w.e() + ten_mk.e() +
           DiyFp::kSignificandSize));
@@ -610,7 +610,7 @@
   // In other words: let f = scaled_w.f() and e = scaled_w.e(), then
   //           (f-1) * 2^e < w*10^k < (f+1) * 2^e
   DiyFp scaled_w = DiyFp::Times(w, ten_mk);
-  ASSERT(scaled_w.e() ==
+  DCHECK(scaled_w.e() ==
          boundary_plus.e() + ten_mk.e() + DiyFp::kSignificandSize);
   // In theory it would be possible to avoid some recomputations by computing
   // the difference between w and boundary_minus/plus (a power of 2) and to
@@ -655,7 +655,7 @@
       ten_mk_minimal_binary_exponent,
       ten_mk_maximal_binary_exponent,
       &ten_mk, &mk);
-  ASSERT((kMinimalTargetExponent <= w.e() + ten_mk.e() +
+  DCHECK((kMinimalTargetExponent <= w.e() + ten_mk.e() +
           DiyFp::kSignificandSize) &&
          (kMaximalTargetExponent >= w.e() + ten_mk.e() +
           DiyFp::kSignificandSize));
@@ -689,8 +689,8 @@
               Vector<char> buffer,
               int* length,
               int* decimal_point) {
-  ASSERT(v > 0);
-  ASSERT(!Double(v).IsSpecial());
+  DCHECK(v > 0);
+  DCHECK(!Double(v).IsSpecial());
 
   bool result = false;
   int decimal_exponent = 0;
diff --git a/src/field-index-inl.h b/src/field-index-inl.h
index d3bf94a..198422f 100644
--- a/src/field-index-inl.h
+++ b/src/field-index-inl.h
@@ -12,7 +12,7 @@
 
 
 inline FieldIndex FieldIndex::ForInObjectOffset(int offset, Map* map) {
-  ASSERT((offset % kPointerSize) == 0);
+  DCHECK((offset % kPointerSize) == 0);
   int index = offset / kPointerSize;
   if (map == NULL) {
     return FieldIndex(true, index, false, index + 1, 0, true);
@@ -29,7 +29,7 @@
 inline FieldIndex FieldIndex::ForPropertyIndex(Map* map,
                                                int property_index,
                                                bool is_double) {
-  ASSERT(map->instance_type() >= FIRST_NONSTRING_TYPE);
+  DCHECK(map->instance_type() >= FIRST_NONSTRING_TYPE);
   int inobject_properties = map->inobject_properties();
   bool is_inobject = property_index < inobject_properties;
   int first_inobject_offset;
@@ -45,6 +45,8 @@
 }
 
 
+// Takes an index as computed by GetLoadFieldByIndex and reconstructs a
+// FieldIndex object from it.
 inline FieldIndex FieldIndex::ForLoadByFieldIndex(Map* map, int orig_index) {
   int field_index = orig_index;
   int is_inobject = true;
@@ -60,8 +62,32 @@
     first_inobject_offset = map->GetInObjectPropertyOffset(0);
     field_index += JSObject::kHeaderSize / kPointerSize;
   }
-  return FieldIndex(is_inobject, field_index, is_double,
+  FieldIndex result(is_inobject, field_index, is_double,
                     map->inobject_properties(), first_inobject_offset);
+  DCHECK(result.GetLoadByFieldIndex() == orig_index);
+  return result;
+}
+
+
+// Returns the index format accepted by the HLoadFieldByIndex instruction.
+// (In-object: zero-based from (object start + JSObject::kHeaderSize),
+// out-of-object: zero-based from FixedArray::kHeaderSize.)
+inline int FieldIndex::GetLoadByFieldIndex() const {
+  // For efficiency, the LoadByFieldIndex instruction takes an index that is
+  // optimized for quick access. If the property is inline, the index is
+  // positive. If it's out-of-line, the encoded index is -raw_index - 1 to
+  // disambiguate the zero out-of-line index from the zero inobject case.
+  // The index itself is shifted up by one bit, the lower-most bit
+  // signifying if the field is a mutable double box (1) or not (0).
+  int result = index();
+  if (is_inobject()) {
+    result -= JSObject::kHeaderSize / kPointerSize;
+  } else {
+    result -= FixedArray::kHeaderSize / kPointerSize;
+    result = -result - 1;
+  }
+  result <<= 1;
+  return is_double() ? (result | 1) : result;
 }
 
 
@@ -84,6 +110,11 @@
 }
 
 
+inline FieldIndex FieldIndex::FromFieldAccessStubKey(int key) {
+  return FieldIndex(key);
+}
+
+
 inline int FieldIndex::GetKeyedLookupCacheIndex() const {
   if (FLAG_compiled_keyed_generic_loads) {
     return GetLoadByFieldIndex();
diff --git a/src/field-index.cc b/src/field-index.cc
deleted file mode 100644
index 5392afc..0000000
--- a/src/field-index.cc
+++ /dev/null
@@ -1,23 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#include "src/field-index.h"
-#include "src/objects.h"
-#include "src/objects-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-FieldIndex FieldIndex::ForLookupResult(const LookupResult* lookup_result) {
-  Map* map = lookup_result->holder()->map();
-  return ForPropertyIndex(map,
-                          lookup_result->GetFieldIndexFromMap(map),
-                          lookup_result->representation().IsDouble());
-}
-
-
-} }  // namespace v8::internal
diff --git a/src/field-index.h b/src/field-index.h
index 0f77c8c..2558529 100644
--- a/src/field-index.h
+++ b/src/field-index.h
@@ -5,8 +5,8 @@
 #ifndef V8_FIELD_INDEX_H_
 #define V8_FIELD_INDEX_H_
 
-#include "src/utils.h"
 #include "src/property-details.h"
+#include "src/utils.h"
 
 namespace v8 {
 namespace internal {
@@ -17,16 +17,18 @@
 // from a property index. When available, the wrapper class captures additional
 // information to allow the field index to be translated back into the property
 // index it was originally generated from.
-class FieldIndex V8_FINAL {
+class FieldIndex FINAL {
  public:
   static FieldIndex ForPropertyIndex(Map* map,
                                      int index,
                                      bool is_double = false);
   static FieldIndex ForInObjectOffset(int offset, Map* map = NULL);
-  static FieldIndex ForLookupResult(const LookupResult* result);
   static FieldIndex ForDescriptor(Map* map, int descriptor_index);
   static FieldIndex ForLoadByFieldIndex(Map* map, int index);
   static FieldIndex ForKeyedLookupCacheIndex(Map* map, int index);
+  static FieldIndex FromFieldAccessStubKey(int key);
+
+  int GetLoadByFieldIndex() const;
 
   bool is_inobject() const {
     return IsInObjectBits::decode(bit_field_);
@@ -40,17 +42,20 @@
     return index() * kPointerSize;
   }
 
+  // Zero-indexed from beginning of the object.
   int index() const {
     return IndexBits::decode(bit_field_);
   }
 
   int outobject_array_index() const {
-    ASSERT(!is_inobject());
+    DCHECK(!is_inobject());
     return index() - first_inobject_property_offset() / kPointerSize;
   }
 
+  // Zero-based from the first inobject property. Overflows to out-of-object
+  // properties.
   int property_index() const {
-    ASSERT(!IsHiddenField::decode(bit_field_));
+    DCHECK(!IsHiddenField::decode(bit_field_));
     int result = index() - first_inobject_property_offset() / kPointerSize;
     if (!is_inobject()) {
       result += InObjectPropertyBits::decode(bit_field_);
@@ -58,24 +63,9 @@
     return result;
   }
 
-  int GetLoadByFieldIndex() const {
-    // For efficiency, the LoadByFieldIndex instruction takes an index that is
-    // optimized for quick access. If the property is inline, the index is
-    // positive. If it's out-of-line, the encoded index is -raw_index - 1 to
-    // disambiguate the zero out-of-line index from the zero inobject case.
-    // The index itself is shifted up by one bit, the lower-most bit
-    // signifying if the field is a mutable double box (1) or not (0).
-    int result = index() - first_inobject_property_offset() / kPointerSize;
-    if (!is_inobject()) {
-      result = -result - 1;
-    }
-    result <<= 1;
-    return is_double() ? (result | 1) : result;
-  }
-
   int GetKeyedLookupCacheIndex() const;
 
-  int GetLoadFieldStubKey() const {
+  int GetFieldAccessStubKey() const {
     return bit_field_ &
         (IsInObjectBits::kMask | IsDoubleBits::kMask | IndexBits::kMask);
   }
@@ -84,7 +74,7 @@
   FieldIndex(bool is_inobject, int local_index, bool is_double,
              int inobject_properties, int first_inobject_property_offset,
              bool is_hidden = false) {
-    ASSERT((first_inobject_property_offset & (kPointerSize - 1)) == 0);
+    DCHECK((first_inobject_property_offset & (kPointerSize - 1)) == 0);
     bit_field_ = IsInObjectBits::encode(is_inobject) |
       IsDoubleBits::encode(is_double) |
       FirstInobjectPropertyOffsetBits::encode(first_inobject_property_offset) |
@@ -93,22 +83,27 @@
       InObjectPropertyBits::encode(inobject_properties);
   }
 
+  explicit FieldIndex(int bit_field) : bit_field_(bit_field) {}
+
   int first_inobject_property_offset() const {
-    ASSERT(!IsHiddenField::decode(bit_field_));
+    DCHECK(!IsHiddenField::decode(bit_field_));
     return FirstInobjectPropertyOffsetBits::decode(bit_field_);
   }
 
   static const int kIndexBitsSize = kDescriptorIndexBitCount + 1;
 
+  // Index from beginning of object.
   class IndexBits: public BitField<int, 0, kIndexBitsSize> {};
   class IsInObjectBits: public BitField<bool, IndexBits::kNext, 1> {};
   class IsDoubleBits: public BitField<bool, IsInObjectBits::kNext, 1> {};
-  class InObjectPropertyBits: public BitField<int, IsDoubleBits::kNext,
-                                              kDescriptorIndexBitCount> {};
-  class FirstInobjectPropertyOffsetBits:
-      public BitField<int, InObjectPropertyBits::kNext, 7> {};
-  class IsHiddenField:
-      public BitField<bool, FirstInobjectPropertyOffsetBits::kNext, 1> {};
+  // Number of inobject properties.
+  class InObjectPropertyBits
+      : public BitField<int, IsDoubleBits::kNext, kDescriptorIndexBitCount> {};
+  // Offset of first inobject property from beginning of object.
+  class FirstInobjectPropertyOffsetBits
+      : public BitField<int, InObjectPropertyBits::kNext, 7> {};
+  class IsHiddenField
+      : public BitField<bool, FirstInobjectPropertyOffsetBits::kNext, 1> {};
   STATIC_ASSERT(IsHiddenField::kNext <= 32);
 
   int bit_field_;
diff --git a/src/fixed-dtoa.cc b/src/fixed-dtoa.cc
index 4541e85..56fe9ab 100644
--- a/src/fixed-dtoa.cc
+++ b/src/fixed-dtoa.cc
@@ -5,7 +5,7 @@
 #include <cmath>
 
 #include "include/v8stdint.h"
-#include "src/checks.h"
+#include "src/base/logging.h"
 #include "src/utils.h"
 
 #include "src/double.h"
@@ -35,11 +35,11 @@
     accumulator >>= 32;
     accumulator = accumulator + (high_bits_ >> 32) * multiplicand;
     high_bits_ = (accumulator << 32) + part;
-    ASSERT((accumulator >> 32) == 0);
+    DCHECK((accumulator >> 32) == 0);
   }
 
   void Shift(int shift_amount) {
-    ASSERT(-64 <= shift_amount && shift_amount <= 64);
+    DCHECK(-64 <= shift_amount && shift_amount <= 64);
     if (shift_amount == 0) {
       return;
     } else if (shift_amount == -64) {
@@ -212,13 +212,13 @@
 static void FillFractionals(uint64_t fractionals, int exponent,
                             int fractional_count, Vector<char> buffer,
                             int* length, int* decimal_point) {
-  ASSERT(-128 <= exponent && exponent <= 0);
+  DCHECK(-128 <= exponent && exponent <= 0);
   // 'fractionals' is a fixed-point number, with binary point at bit
   // (-exponent). Inside the function the non-converted remainder of fractionals
   // is a fixed-point number, with binary point at bit 'point'.
   if (-exponent <= 64) {
     // One 64 bit number is sufficient.
-    ASSERT(fractionals >> 56 == 0);
+    DCHECK(fractionals >> 56 == 0);
     int point = -exponent;
     for (int i = 0; i < fractional_count; ++i) {
       if (fractionals == 0) break;
@@ -244,7 +244,7 @@
       RoundUp(buffer, length, decimal_point);
     }
   } else {  // We need 128 bits.
-    ASSERT(64 < -exponent && -exponent <= 128);
+    DCHECK(64 < -exponent && -exponent <= 128);
     UInt128 fractionals128 = UInt128(fractionals, 0);
     fractionals128.Shift(-exponent - 64);
     int point = 128;
@@ -362,7 +362,7 @@
   } else if (exponent < -128) {
     // This configuration (with at most 20 digits) means that all digits must be
     // 0.
-    ASSERT(fractional_count <= 20);
+    DCHECK(fractional_count <= 20);
     buffer[0] = '\0';
     *length = 0;
     *decimal_point = -fractional_count;
diff --git a/src/flag-definitions.h b/src/flag-definitions.h
index 1d83481..49f0714 100644
--- a/src/flag-definitions.h
+++ b/src/flag-definitions.h
@@ -10,20 +10,24 @@
 // which can be included multiple times in different modes.  It expects to have
 // a mode defined before it's included.  The modes are FLAG_MODE_... below:
 
+#define DEFINE_IMPLICATION(whenflag, thenflag)              \
+  DEFINE_VALUE_IMPLICATION(whenflag, thenflag, true)
+
+#define DEFINE_NEG_IMPLICATION(whenflag, thenflag)          \
+  DEFINE_VALUE_IMPLICATION(whenflag, thenflag, false)
+
 // We want to declare the names of the variables for the header file.  Normally
 // this will just be an extern declaration, but for a readonly flag we let the
 // compiler make better optimizations by giving it the value.
 #if defined(FLAG_MODE_DECLARE)
-#define FLAG_FULL(ftype, ctype, nam, def, cmt) \
-  extern ctype FLAG_##nam;
+#define FLAG_FULL(ftype, ctype, nam, def, cmt) extern ctype FLAG_##nam;
 #define FLAG_READONLY(ftype, ctype, nam, def, cmt) \
   static ctype const FLAG_##nam = def;
 
 // We want to supply the actual storage and value for the flag variable in the
 // .cc file.  We only do this for writable flags.
 #elif defined(FLAG_MODE_DEFINE)
-#define FLAG_FULL(ftype, ctype, nam, def, cmt) \
-  ctype FLAG_##nam = def;
+#define FLAG_FULL(ftype, ctype, nam, def, cmt) ctype FLAG_##nam = def;
 
 // We need to define all of our default values so that the Flag structure can
 // access them by pointer.  These are just used internally inside of one .cc,
@@ -35,19 +39,20 @@
 // We want to write entries into our meta data table, for internal parsing and
 // printing / etc in the flag parser code.  We only do this for writable flags.
 #elif defined(FLAG_MODE_META)
-#define FLAG_FULL(ftype, ctype, nam, def, cmt) \
-  { Flag::TYPE_##ftype, #nam, &FLAG_##nam, &FLAGDEFAULT_##nam, cmt, false },
-#define FLAG_ALIAS(ftype, ctype, alias, nam) \
-  { Flag::TYPE_##ftype, #alias, &FLAG_##nam, &FLAGDEFAULT_##nam, \
-    "alias for --"#nam, false },
+#define FLAG_FULL(ftype, ctype, nam, def, cmt)                              \
+  { Flag::TYPE_##ftype, #nam, &FLAG_##nam, &FLAGDEFAULT_##nam, cmt, false } \
+  ,
+#define FLAG_ALIAS(ftype, ctype, alias, nam)                     \
+  {                                                              \
+    Flag::TYPE_##ftype, #alias, &FLAG_##nam, &FLAGDEFAULT_##nam, \
+        "alias for --" #nam, false                               \
+  }                                                              \
+  ,
 
 // We produce the code to set flags when it is implied by another flag.
 #elif defined(FLAG_MODE_DEFINE_IMPLICATIONS)
-#define DEFINE_implication(whenflag, thenflag) \
-  if (FLAG_##whenflag) FLAG_##thenflag = true;
-
-#define DEFINE_neg_implication(whenflag, thenflag) \
-  if (FLAG_##whenflag) FLAG_##thenflag = false;
+#define DEFINE_VALUE_IMPLICATION(whenflag, thenflag, value) \
+  if (FLAG_##whenflag) FLAG_##thenflag = value;
 
 #else
 #error No mode supplied when including flags.defs
@@ -66,12 +71,8 @@
 #define FLAG_ALIAS(ftype, ctype, alias, nam)
 #endif
 
-#ifndef DEFINE_implication
-#define DEFINE_implication(whenflag, thenflag)
-#endif
-
-#ifndef DEFINE_neg_implication
-#define DEFINE_neg_implication(whenflag, thenflag)
+#ifndef DEFINE_VALUE_IMPLICATION
+#define DEFINE_VALUE_IMPLICATION(whenflag, thenflag, value)
 #endif
 
 #define COMMA ,
@@ -79,10 +80,8 @@
 #ifdef FLAG_MODE_DECLARE
 // Structure used to hold a collection of arguments to the JavaScript code.
 struct JSArguments {
-public:
-  inline const char*& operator[] (int idx) const {
-    return argv[idx];
-  }
+ public:
+  inline const char*& operator[](int idx) const { return argv[idx]; }
   static JSArguments Create(int argc, const char** argv) {
     JSArguments args;
     args.argc = argc;
@@ -105,42 +104,41 @@
 };
 #endif
 
-#if (defined CAN_USE_VFP3_INSTRUCTIONS) || !(defined ARM_TEST)
-# define ENABLE_VFP3_DEFAULT true
+#if (defined CAN_USE_VFP3_INSTRUCTIONS) || !(defined ARM_TEST_NO_FEATURE_PROBE)
+#define ENABLE_VFP3_DEFAULT true
 #else
-# define ENABLE_VFP3_DEFAULT false
+#define ENABLE_VFP3_DEFAULT false
 #endif
-#if (defined CAN_USE_ARMV7_INSTRUCTIONS) || !(defined ARM_TEST)
-# define ENABLE_ARMV7_DEFAULT true
+#if (defined CAN_USE_ARMV7_INSTRUCTIONS) || !(defined ARM_TEST_NO_FEATURE_PROBE)
+#define ENABLE_ARMV7_DEFAULT true
 #else
-# define ENABLE_ARMV7_DEFAULT false
+#define ENABLE_ARMV7_DEFAULT false
 #endif
-#if (defined CAN_USE_VFP32DREGS) || !(defined ARM_TEST)
-# define ENABLE_32DREGS_DEFAULT true
+#if (defined CAN_USE_VFP32DREGS) || !(defined ARM_TEST_NO_FEATURE_PROBE)
+#define ENABLE_32DREGS_DEFAULT true
 #else
-# define ENABLE_32DREGS_DEFAULT false
+#define ENABLE_32DREGS_DEFAULT false
 #endif
-#if (defined CAN_USE_NEON) || !(defined ARM_TEST)
+#if (defined CAN_USE_NEON) || !(defined ARM_TEST_NO_FEATURE_PROBE)
 # define ENABLE_NEON_DEFAULT true
 #else
 # define ENABLE_NEON_DEFAULT false
 #endif
 
-#define DEFINE_bool(nam, def, cmt)   FLAG(BOOL, bool, nam, def, cmt)
-#define DEFINE_maybe_bool(nam, cmt)  FLAG(MAYBE_BOOL, MaybeBoolFlag, nam,  \
-                                          { false COMMA false }, cmt)
-#define DEFINE_int(nam, def, cmt)    FLAG(INT, int, nam, def, cmt)
-#define DEFINE_float(nam, def, cmt)  FLAG(FLOAT, double, nam, def, cmt)
-#define DEFINE_string(nam, def, cmt) FLAG(STRING, const char*, nam, def, cmt)
-#define DEFINE_args(nam, cmt)        FLAG(ARGS, JSArguments, nam, \
-                                          { 0 COMMA NULL }, cmt)
+#define DEFINE_BOOL(nam, def, cmt) FLAG(BOOL, bool, nam, def, cmt)
+#define DEFINE_MAYBE_BOOL(nam, cmt) \
+  FLAG(MAYBE_BOOL, MaybeBoolFlag, nam, {false COMMA false}, cmt)
+#define DEFINE_INT(nam, def, cmt) FLAG(INT, int, nam, def, cmt)
+#define DEFINE_FLOAT(nam, def, cmt) FLAG(FLOAT, double, nam, def, cmt)
+#define DEFINE_STRING(nam, def, cmt) FLAG(STRING, const char*, nam, def, cmt)
+#define DEFINE_ARGS(nam, cmt) FLAG(ARGS, JSArguments, nam, {0 COMMA NULL}, cmt)
 
-#define DEFINE_ALIAS_bool(alias, nam)  FLAG_ALIAS(BOOL, bool, alias, nam)
-#define DEFINE_ALIAS_int(alias, nam)   FLAG_ALIAS(INT, int, alias, nam)
-#define DEFINE_ALIAS_float(alias, nam) FLAG_ALIAS(FLOAT, double, alias, nam)
-#define DEFINE_ALIAS_string(alias, nam) \
+#define DEFINE_ALIAS_BOOL(alias, nam) FLAG_ALIAS(BOOL, bool, alias, nam)
+#define DEFINE_ALIAS_INT(alias, nam) FLAG_ALIAS(INT, int, alias, nam)
+#define DEFINE_ALIAS_FLOAT(alias, nam) FLAG_ALIAS(FLOAT, double, alias, nam)
+#define DEFINE_ALIAS_STRING(alias, nam) \
   FLAG_ALIAS(STRING, const char*, alias, nam)
-#define DEFINE_ALIAS_args(alias, nam)  FLAG_ALIAS(ARGS, JSArguments, alias, nam)
+#define DEFINE_ALIAS_ARGS(alias, nam) FLAG_ALIAS(ARGS, JSArguments, alias, nam)
 
 //
 // Flags in all modes.
@@ -148,542 +146,560 @@
 #define FLAG FLAG_FULL
 
 // Flags for language modes and experimental language features.
-DEFINE_bool(use_strict, false, "enforce strict mode")
-DEFINE_bool(es_staging, false, "enable upcoming ES6+ features")
+DEFINE_BOOL(use_strict, false, "enforce strict mode")
+DEFINE_BOOL(es_staging, false, "enable upcoming ES6+ features")
 
-DEFINE_bool(harmony_typeof, false, "enable harmony semantics for typeof")
-DEFINE_bool(harmony_scoping, false, "enable harmony block scoping")
-DEFINE_bool(harmony_modules, false,
+DEFINE_BOOL(harmony_scoping, false, "enable harmony block scoping")
+DEFINE_BOOL(harmony_modules, false,
             "enable harmony modules (implies block scoping)")
-DEFINE_bool(harmony_symbols, false, "enable harmony symbols")
-DEFINE_bool(harmony_proxies, false, "enable harmony proxies")
-DEFINE_bool(harmony_collections, false,
-            "enable harmony collections (sets, maps)")
-DEFINE_bool(harmony_generators, false, "enable harmony generators")
-DEFINE_bool(harmony_iteration, false, "enable harmony iteration (for-of)")
-DEFINE_bool(harmony_numeric_literals, false,
+DEFINE_BOOL(harmony_proxies, false, "enable harmony proxies")
+DEFINE_BOOL(harmony_numeric_literals, false,
             "enable harmony numeric literals (0o77, 0b11)")
-DEFINE_bool(harmony_strings, false, "enable harmony string")
-DEFINE_bool(harmony_arrays, false, "enable harmony arrays")
-DEFINE_bool(harmony_maths, false, "enable harmony math functions")
-DEFINE_bool(harmony, false, "enable all harmony features (except typeof)")
+DEFINE_BOOL(harmony_strings, false, "enable harmony string")
+DEFINE_BOOL(harmony_arrays, false, "enable harmony arrays")
+DEFINE_BOOL(harmony_arrow_functions, false, "enable harmony arrow functions")
+DEFINE_BOOL(harmony_classes, false, "enable harmony classes")
+DEFINE_BOOL(harmony_object_literals, false,
+            "enable harmony object literal extensions")
+DEFINE_BOOL(harmony_regexps, false, "enable regexp-related harmony features")
+DEFINE_BOOL(harmony, false, "enable all harmony features (except proxies)")
 
-DEFINE_implication(harmony, harmony_scoping)
-DEFINE_implication(harmony, harmony_modules)
-DEFINE_implication(harmony, harmony_proxies)
-DEFINE_implication(harmony, harmony_collections)
-DEFINE_implication(harmony, harmony_generators)
-DEFINE_implication(harmony, harmony_iteration)
-DEFINE_implication(harmony, harmony_numeric_literals)
-DEFINE_implication(harmony, harmony_strings)
-DEFINE_implication(harmony, harmony_arrays)
-DEFINE_implication(harmony_modules, harmony_scoping)
-DEFINE_implication(harmony_collections, harmony_symbols)
-DEFINE_implication(harmony_generators, harmony_symbols)
-DEFINE_implication(harmony_iteration, harmony_symbols)
+DEFINE_IMPLICATION(harmony, harmony_scoping)
+DEFINE_IMPLICATION(harmony, harmony_modules)
+// TODO(rossberg): Reenable when problems are sorted out.
+// DEFINE_IMPLICATION(harmony, harmony_proxies)
+DEFINE_IMPLICATION(harmony, harmony_numeric_literals)
+DEFINE_IMPLICATION(harmony, harmony_strings)
+DEFINE_IMPLICATION(harmony, harmony_arrays)
+DEFINE_IMPLICATION(harmony, harmony_arrow_functions)
+DEFINE_IMPLICATION(harmony, harmony_classes)
+DEFINE_IMPLICATION(harmony, harmony_object_literals)
+DEFINE_IMPLICATION(harmony, harmony_regexps)
+DEFINE_IMPLICATION(harmony_modules, harmony_scoping)
+DEFINE_IMPLICATION(harmony_classes, harmony_scoping)
+DEFINE_IMPLICATION(harmony_classes, harmony_object_literals)
 
-DEFINE_implication(harmony, es_staging)
-DEFINE_implication(es_staging, harmony_maths)
-DEFINE_implication(es_staging, harmony_symbols)
-DEFINE_implication(es_staging, harmony_collections)
+DEFINE_IMPLICATION(harmony, es_staging)
 
 // Flags for experimental implementation features.
-DEFINE_bool(packed_arrays, true, "optimizes arrays that have no holes")
-DEFINE_bool(smi_only_arrays, true, "tracks arrays with only smi values")
-DEFINE_bool(compiled_keyed_dictionary_loads, true,
-            "use optimizing compiler to generate keyed dictionary load stubs")
-DEFINE_bool(compiled_keyed_generic_loads, false,
+DEFINE_BOOL(compiled_keyed_generic_loads, false,
             "use optimizing compiler to generate keyed generic load stubs")
-DEFINE_bool(clever_optimizations, true,
+DEFINE_BOOL(clever_optimizations, true,
             "Optimize object size, Array shift, DOM strings and string +")
 // TODO(hpayer): We will remove this flag as soon as we have pretenuring
 // support for specific allocation sites.
-DEFINE_bool(pretenuring_call_new, false, "pretenure call new")
-DEFINE_bool(allocation_site_pretenuring, true,
+DEFINE_BOOL(pretenuring_call_new, false, "pretenure call new")
+DEFINE_BOOL(allocation_site_pretenuring, true,
             "pretenure with allocation sites")
-DEFINE_bool(trace_pretenuring, false,
+DEFINE_BOOL(trace_pretenuring, false,
             "trace pretenuring decisions of HAllocate instructions")
-DEFINE_bool(trace_pretenuring_statistics, false,
+DEFINE_BOOL(trace_pretenuring_statistics, false,
             "trace allocation site pretenuring statistics")
-DEFINE_bool(track_fields, true, "track fields with only smi values")
-DEFINE_bool(track_double_fields, true, "track fields with double values")
-DEFINE_bool(track_heap_object_fields, true, "track fields with heap values")
-DEFINE_bool(track_computed_fields, true, "track computed boilerplate fields")
-DEFINE_implication(track_double_fields, track_fields)
-DEFINE_implication(track_heap_object_fields, track_fields)
-DEFINE_implication(track_computed_fields, track_fields)
-DEFINE_bool(track_field_types, true, "track field types")
-DEFINE_implication(track_field_types, track_fields)
-DEFINE_implication(track_field_types, track_heap_object_fields)
-DEFINE_bool(smi_binop, true, "support smi representation in binary operations")
+DEFINE_BOOL(track_fields, true, "track fields with only smi values")
+DEFINE_BOOL(track_double_fields, true, "track fields with double values")
+DEFINE_BOOL(track_heap_object_fields, true, "track fields with heap values")
+DEFINE_BOOL(track_computed_fields, true, "track computed boilerplate fields")
+DEFINE_IMPLICATION(track_double_fields, track_fields)
+DEFINE_IMPLICATION(track_heap_object_fields, track_fields)
+DEFINE_IMPLICATION(track_computed_fields, track_fields)
+DEFINE_BOOL(track_field_types, true, "track field types")
+DEFINE_IMPLICATION(track_field_types, track_fields)
+DEFINE_IMPLICATION(track_field_types, track_heap_object_fields)
+DEFINE_BOOL(smi_binop, true, "support smi representation in binary operations")
+DEFINE_BOOL(vector_ics, false, "support vector-based ics")
 
 // Flags for optimization types.
-DEFINE_bool(optimize_for_size, false,
+DEFINE_BOOL(optimize_for_size, false,
             "Enables optimizations which favor memory size over execution "
             "speed.")
 
+DEFINE_VALUE_IMPLICATION(optimize_for_size, max_semi_space_size, 1)
+
 // Flags for data representation optimizations
-DEFINE_bool(unbox_double_arrays, true, "automatically unbox arrays of doubles")
-DEFINE_bool(string_slices, true, "use string slices")
+DEFINE_BOOL(unbox_double_arrays, true, "automatically unbox arrays of doubles")
+DEFINE_BOOL(string_slices, true, "use string slices")
 
 // Flags for Crankshaft.
-DEFINE_bool(crankshaft, true, "use crankshaft")
-DEFINE_string(hydrogen_filter, "*", "optimization filter")
-DEFINE_bool(use_gvn, true, "use hydrogen global value numbering")
-DEFINE_int(gvn_iterations, 3, "maximum number of GVN fix-point iterations")
-DEFINE_bool(use_canonicalizing, true, "use hydrogen instruction canonicalizing")
-DEFINE_bool(use_inlining, true, "use function inlining")
-DEFINE_bool(use_escape_analysis, true, "use hydrogen escape analysis")
-DEFINE_bool(use_allocation_folding, true, "use allocation folding")
-DEFINE_bool(use_local_allocation_folding, false, "only fold in basic blocks")
-DEFINE_bool(use_write_barrier_elimination, true,
+DEFINE_BOOL(crankshaft, true, "use crankshaft")
+DEFINE_STRING(hydrogen_filter, "*", "optimization filter")
+DEFINE_BOOL(use_gvn, true, "use hydrogen global value numbering")
+DEFINE_INT(gvn_iterations, 3, "maximum number of GVN fix-point iterations")
+DEFINE_BOOL(use_canonicalizing, true, "use hydrogen instruction canonicalizing")
+DEFINE_BOOL(use_inlining, true, "use function inlining")
+DEFINE_BOOL(use_escape_analysis, true, "use hydrogen escape analysis")
+DEFINE_BOOL(use_allocation_folding, true, "use allocation folding")
+DEFINE_BOOL(use_local_allocation_folding, false, "only fold in basic blocks")
+DEFINE_BOOL(use_write_barrier_elimination, true,
             "eliminate write barriers targeting allocations in optimized code")
-DEFINE_int(max_inlining_levels, 5, "maximum number of inlining levels")
-DEFINE_int(max_inlined_source_size, 600,
+DEFINE_INT(max_inlining_levels, 5, "maximum number of inlining levels")
+DEFINE_INT(max_inlined_source_size, 600,
            "maximum source size in bytes considered for a single inlining")
-DEFINE_int(max_inlined_nodes, 196,
+DEFINE_INT(max_inlined_nodes, 196,
            "maximum number of AST nodes considered for a single inlining")
-DEFINE_int(max_inlined_nodes_cumulative, 400,
+DEFINE_INT(max_inlined_nodes_cumulative, 400,
            "maximum cumulative number of AST nodes considered for inlining")
-DEFINE_bool(loop_invariant_code_motion, true, "loop invariant code motion")
-DEFINE_bool(fast_math, true, "faster (but maybe less accurate) math functions")
-DEFINE_bool(collect_megamorphic_maps_from_stub_cache, true,
+DEFINE_BOOL(loop_invariant_code_motion, true, "loop invariant code motion")
+DEFINE_BOOL(fast_math, true, "faster (but maybe less accurate) math functions")
+DEFINE_BOOL(collect_megamorphic_maps_from_stub_cache, true,
             "crankshaft harvests type feedback from stub cache")
-DEFINE_bool(hydrogen_stats, false, "print statistics for hydrogen")
-DEFINE_bool(trace_check_elimination, false, "trace check elimination phase")
-DEFINE_bool(trace_hydrogen, false, "trace generated hydrogen to file")
-DEFINE_string(trace_hydrogen_filter, "*", "hydrogen tracing filter")
-DEFINE_bool(trace_hydrogen_stubs, false, "trace generated hydrogen for stubs")
-DEFINE_string(trace_hydrogen_file, NULL, "trace hydrogen to given file name")
-DEFINE_string(trace_phase, "HLZ", "trace generated IR for specified phases")
-DEFINE_bool(trace_inlining, false, "trace inlining decisions")
-DEFINE_bool(trace_load_elimination, false, "trace load elimination")
-DEFINE_bool(trace_store_elimination, false, "trace store elimination")
-DEFINE_bool(trace_alloc, false, "trace register allocator")
-DEFINE_bool(trace_all_uses, false, "trace all use positions")
-DEFINE_bool(trace_range, false, "trace range analysis")
-DEFINE_bool(trace_gvn, false, "trace global value numbering")
-DEFINE_bool(trace_representation, false, "trace representation types")
-DEFINE_bool(trace_removable_simulates, false, "trace removable simulates")
-DEFINE_bool(trace_escape_analysis, false, "trace hydrogen escape analysis")
-DEFINE_bool(trace_allocation_folding, false, "trace allocation folding")
-DEFINE_bool(trace_track_allocation_sites, false,
+DEFINE_BOOL(hydrogen_stats, false, "print statistics for hydrogen")
+DEFINE_BOOL(trace_check_elimination, false, "trace check elimination phase")
+DEFINE_BOOL(trace_hydrogen, false, "trace generated hydrogen to file")
+DEFINE_STRING(trace_hydrogen_filter, "*", "hydrogen tracing filter")
+DEFINE_BOOL(trace_hydrogen_stubs, false, "trace generated hydrogen for stubs")
+DEFINE_STRING(trace_hydrogen_file, NULL, "trace hydrogen to given file name")
+DEFINE_STRING(trace_phase, "HLZ", "trace generated IR for specified phases")
+DEFINE_BOOL(trace_inlining, false, "trace inlining decisions")
+DEFINE_BOOL(trace_load_elimination, false, "trace load elimination")
+DEFINE_BOOL(trace_store_elimination, false, "trace store elimination")
+DEFINE_BOOL(trace_alloc, false, "trace register allocator")
+DEFINE_BOOL(trace_all_uses, false, "trace all use positions")
+DEFINE_BOOL(trace_range, false, "trace range analysis")
+DEFINE_BOOL(trace_gvn, false, "trace global value numbering")
+DEFINE_BOOL(trace_representation, false, "trace representation types")
+DEFINE_BOOL(trace_removable_simulates, false, "trace removable simulates")
+DEFINE_BOOL(trace_escape_analysis, false, "trace hydrogen escape analysis")
+DEFINE_BOOL(trace_allocation_folding, false, "trace allocation folding")
+DEFINE_BOOL(trace_track_allocation_sites, false,
             "trace the tracking of allocation sites")
-DEFINE_bool(trace_migration, false, "trace object migration")
-DEFINE_bool(trace_generalization, false, "trace map generalization")
-DEFINE_bool(stress_pointer_maps, false, "pointer map for every instruction")
-DEFINE_bool(stress_environments, false, "environment for every instruction")
-DEFINE_int(deopt_every_n_times, 0,
+DEFINE_BOOL(trace_migration, false, "trace object migration")
+DEFINE_BOOL(trace_generalization, false, "trace map generalization")
+DEFINE_BOOL(stress_pointer_maps, false, "pointer map for every instruction")
+DEFINE_BOOL(stress_environments, false, "environment for every instruction")
+DEFINE_INT(deopt_every_n_times, 0,
            "deoptimize every n times a deopt point is passed")
-DEFINE_int(deopt_every_n_garbage_collections, 0,
+DEFINE_INT(deopt_every_n_garbage_collections, 0,
            "deoptimize every n garbage collections")
-DEFINE_bool(print_deopt_stress, false, "print number of possible deopt points")
-DEFINE_bool(trap_on_deopt, false, "put a break point before deoptimizing")
-DEFINE_bool(trap_on_stub_deopt, false,
+DEFINE_BOOL(print_deopt_stress, false, "print number of possible deopt points")
+DEFINE_BOOL(trap_on_deopt, false, "put a break point before deoptimizing")
+DEFINE_BOOL(trap_on_stub_deopt, false,
             "put a break point before deoptimizing a stub")
-DEFINE_bool(deoptimize_uncommon_cases, true, "deoptimize uncommon cases")
-DEFINE_bool(polymorphic_inlining, true, "polymorphic inlining")
-DEFINE_bool(use_osr, true, "use on-stack replacement")
-DEFINE_bool(array_bounds_checks_elimination, true,
+DEFINE_BOOL(deoptimize_uncommon_cases, true, "deoptimize uncommon cases")
+DEFINE_BOOL(polymorphic_inlining, true, "polymorphic inlining")
+DEFINE_BOOL(use_osr, true, "use on-stack replacement")
+DEFINE_BOOL(array_bounds_checks_elimination, true,
             "perform array bounds checks elimination")
-DEFINE_bool(trace_bce, false, "trace array bounds check elimination")
-DEFINE_bool(array_bounds_checks_hoisting, false,
+DEFINE_BOOL(trace_bce, false, "trace array bounds check elimination")
+DEFINE_BOOL(array_bounds_checks_hoisting, false,
             "perform array bounds checks hoisting")
-DEFINE_bool(array_index_dehoisting, true,
-            "perform array index dehoisting")
-DEFINE_bool(analyze_environment_liveness, true,
+DEFINE_BOOL(array_index_dehoisting, true, "perform array index dehoisting")
+DEFINE_BOOL(analyze_environment_liveness, true,
             "analyze liveness of environment slots and zap dead values")
-DEFINE_bool(load_elimination, true, "use load elimination")
-DEFINE_bool(check_elimination, true, "use check elimination")
-DEFINE_bool(store_elimination, false, "use store elimination")
-DEFINE_bool(dead_code_elimination, true, "use dead code elimination")
-DEFINE_bool(fold_constants, true, "use constant folding")
-DEFINE_bool(trace_dead_code_elimination, false, "trace dead code elimination")
-DEFINE_bool(unreachable_code_elimination, true, "eliminate unreachable code")
-DEFINE_bool(trace_osr, false, "trace on-stack replacement")
-DEFINE_int(stress_runs, 0, "number of stress runs")
-DEFINE_bool(optimize_closures, true, "optimize closures")
-DEFINE_bool(lookup_sample_by_shared, true,
+DEFINE_BOOL(load_elimination, true, "use load elimination")
+DEFINE_BOOL(check_elimination, true, "use check elimination")
+DEFINE_BOOL(store_elimination, false, "use store elimination")
+DEFINE_BOOL(dead_code_elimination, true, "use dead code elimination")
+DEFINE_BOOL(fold_constants, true, "use constant folding")
+DEFINE_BOOL(trace_dead_code_elimination, false, "trace dead code elimination")
+DEFINE_BOOL(unreachable_code_elimination, true, "eliminate unreachable code")
+DEFINE_BOOL(trace_osr, false, "trace on-stack replacement")
+DEFINE_INT(stress_runs, 0, "number of stress runs")
+DEFINE_BOOL(lookup_sample_by_shared, true,
             "when picking a function to optimize, watch for shared function "
             "info, not JSFunction itself")
-DEFINE_bool(cache_optimized_code, true,
-            "cache optimized code for closures")
-DEFINE_bool(flush_optimized_code_cache, true,
+DEFINE_BOOL(cache_optimized_code, true, "cache optimized code for closures")
+DEFINE_BOOL(flush_optimized_code_cache, true,
             "flushes the cache of optimized code for closures on every GC")
-DEFINE_bool(inline_construct, true, "inline constructor calls")
-DEFINE_bool(inline_arguments, true, "inline functions with arguments object")
-DEFINE_bool(inline_accessors, true, "inline JavaScript accessors")
-DEFINE_int(escape_analysis_iterations, 2,
+DEFINE_BOOL(inline_construct, true, "inline constructor calls")
+DEFINE_BOOL(inline_arguments, true, "inline functions with arguments object")
+DEFINE_BOOL(inline_accessors, true, "inline JavaScript accessors")
+DEFINE_INT(escape_analysis_iterations, 2,
            "maximum number of escape analysis fix-point iterations")
 
-DEFINE_bool(optimize_for_in, true,
-            "optimize functions containing for-in loops")
-DEFINE_bool(opt_safe_uint32_operations, true,
+DEFINE_BOOL(optimize_for_in, true, "optimize functions containing for-in loops")
+DEFINE_BOOL(opt_safe_uint32_operations, true,
             "allow uint32 values on optimize frames if they are used only in "
             "safe operations")
 
-DEFINE_bool(concurrent_recompilation, true,
+DEFINE_BOOL(concurrent_recompilation, true,
             "optimizing hot functions asynchronously on a separate thread")
-DEFINE_bool(trace_concurrent_recompilation, false,
+DEFINE_BOOL(trace_concurrent_recompilation, false,
             "track concurrent recompilation")
-DEFINE_int(concurrent_recompilation_queue_length, 8,
+DEFINE_INT(concurrent_recompilation_queue_length, 8,
            "the length of the concurrent compilation queue")
-DEFINE_int(concurrent_recompilation_delay, 0,
+DEFINE_INT(concurrent_recompilation_delay, 0,
            "artificial compilation delay in ms")
-DEFINE_bool(block_concurrent_recompilation, false,
+DEFINE_BOOL(block_concurrent_recompilation, false,
             "block queued jobs until released")
-DEFINE_bool(concurrent_osr, true,
-            "concurrent on-stack replacement")
-DEFINE_implication(concurrent_osr, concurrent_recompilation)
+DEFINE_BOOL(concurrent_osr, true, "concurrent on-stack replacement")
+DEFINE_IMPLICATION(concurrent_osr, concurrent_recompilation)
 
-DEFINE_bool(omit_map_checks_for_leaf_maps, true,
+DEFINE_BOOL(omit_map_checks_for_leaf_maps, true,
             "do not emit check maps for constant values that have a leaf map, "
             "deoptimize the optimized code if the layout of the maps changes.")
 
-DEFINE_int(typed_array_max_size_in_heap, 64,
-    "threshold for in-heap typed array")
+// Flags for TurboFan.
+DEFINE_STRING(turbo_filter, "~", "optimization filter for TurboFan compiler")
+DEFINE_BOOL(trace_turbo, false, "trace generated TurboFan IR")
+DEFINE_BOOL(trace_turbo_types, true, "trace generated TurboFan types")
+DEFINE_BOOL(trace_turbo_scheduler, false, "trace generated TurboFan scheduler")
+DEFINE_BOOL(turbo_asm, false, "enable TurboFan for asm.js code")
+DEFINE_BOOL(turbo_verify, false, "verify TurboFan graphs at each phase")
+DEFINE_BOOL(turbo_stats, false, "print TurboFan statistics")
+#if V8_TURBOFAN_BACKEND
+DEFINE_BOOL(turbo_types, true, "use typed lowering in TurboFan")
+#else
+DEFINE_BOOL(turbo_types, false, "use typed lowering in TurboFan")
+#endif
+DEFINE_BOOL(turbo_source_positions, false,
+            "track source code positions when building TurboFan IR")
+DEFINE_BOOL(context_specialization, false,
+            "enable context specialization in TurboFan")
+DEFINE_BOOL(turbo_deoptimization, false, "enable deoptimization in TurboFan")
+DEFINE_BOOL(turbo_inlining, false, "enable inlining in TurboFan")
+DEFINE_BOOL(trace_turbo_inlining, false, "trace TurboFan inlining")
+DEFINE_IMPLICATION(turbo_inlining, turbo_types)
+
+DEFINE_INT(typed_array_max_size_in_heap, 64,
+           "threshold for in-heap typed array")
 
 // Profiler flags.
-DEFINE_int(frame_count, 1, "number of stack frames inspected by the profiler")
-           // 0x1800 fits in the immediate field of an ARM instruction.
-DEFINE_int(interrupt_budget, 0x1800,
+DEFINE_INT(frame_count, 1, "number of stack frames inspected by the profiler")
+// 0x1800 fits in the immediate field of an ARM instruction.
+DEFINE_INT(interrupt_budget, 0x1800,
            "execution budget before interrupt is triggered")
-DEFINE_int(type_info_threshold, 25,
+DEFINE_INT(type_info_threshold, 25,
            "percentage of ICs that must have type info to allow optimization")
-DEFINE_int(self_opt_count, 130, "call count before self-optimization")
+DEFINE_INT(generic_ic_threshold, 30,
+           "max percentage of megamorphic/generic ICs to allow optimization")
+DEFINE_INT(self_opt_count, 130, "call count before self-optimization")
 
-DEFINE_bool(trace_opt_verbose, false, "extra verbose compilation tracing")
-DEFINE_implication(trace_opt_verbose, trace_opt)
+DEFINE_BOOL(trace_opt_verbose, false, "extra verbose compilation tracing")
+DEFINE_IMPLICATION(trace_opt_verbose, trace_opt)
 
 // assembler-ia32.cc / assembler-arm.cc / assembler-x64.cc
-DEFINE_bool(debug_code, false,
-            "generate extra code (assertions) for debugging")
-DEFINE_bool(code_comments, false, "emit comments in code disassembly")
-DEFINE_bool(enable_sse3, true,
-            "enable use of SSE3 instructions if available")
-DEFINE_bool(enable_sse4_1, true,
+DEFINE_BOOL(debug_code, false, "generate extra code (assertions) for debugging")
+DEFINE_BOOL(code_comments, false, "emit comments in code disassembly")
+DEFINE_BOOL(enable_sse3, true, "enable use of SSE3 instructions if available")
+DEFINE_BOOL(enable_sse4_1, true,
             "enable use of SSE4.1 instructions if available")
-DEFINE_bool(enable_sahf, true,
+DEFINE_BOOL(enable_sahf, true,
             "enable use of SAHF instruction if available (X64 only)")
-DEFINE_bool(enable_vfp3, ENABLE_VFP3_DEFAULT,
+DEFINE_BOOL(enable_vfp3, ENABLE_VFP3_DEFAULT,
             "enable use of VFP3 instructions if available")
-DEFINE_bool(enable_armv7, ENABLE_ARMV7_DEFAULT,
+DEFINE_BOOL(enable_armv7, ENABLE_ARMV7_DEFAULT,
             "enable use of ARMv7 instructions if available (ARM only)")
-DEFINE_bool(enable_neon, ENABLE_NEON_DEFAULT,
+DEFINE_BOOL(enable_neon, ENABLE_NEON_DEFAULT,
             "enable use of NEON instructions if available (ARM only)")
-DEFINE_bool(enable_sudiv, true,
+DEFINE_BOOL(enable_sudiv, true,
             "enable use of SDIV and UDIV instructions if available (ARM only)")
-DEFINE_bool(enable_mls, true,
+DEFINE_BOOL(enable_mls, true,
             "enable use of MLS instructions if available (ARM only)")
-DEFINE_bool(enable_movw_movt, false,
+DEFINE_BOOL(enable_movw_movt, false,
             "enable loading 32-bit constant by means of movw/movt "
             "instruction pairs (ARM only)")
-DEFINE_bool(enable_unaligned_accesses, true,
+DEFINE_BOOL(enable_unaligned_accesses, true,
             "enable unaligned accesses for ARMv7 (ARM only)")
-DEFINE_bool(enable_32dregs, ENABLE_32DREGS_DEFAULT,
+DEFINE_BOOL(enable_32dregs, ENABLE_32DREGS_DEFAULT,
             "enable use of d16-d31 registers on ARM - this requires VFP3")
-DEFINE_bool(enable_vldr_imm, false,
+DEFINE_BOOL(enable_vldr_imm, false,
             "enable use of constant pools for double immediate (ARM only)")
-DEFINE_bool(force_long_branches, false,
+DEFINE_BOOL(force_long_branches, false,
             "force all emitted branches to be in long mode (MIPS only)")
 
 // cpu-arm64.cc
-DEFINE_bool(enable_always_align_csp, true,
+DEFINE_BOOL(enable_always_align_csp, true,
             "enable alignment of csp to 16 bytes on platforms which prefer "
             "the register to always be aligned (ARM64 only)")
 
 // bootstrapper.cc
-DEFINE_string(expose_natives_as, NULL, "expose natives in global object")
-DEFINE_string(expose_debug_as, NULL, "expose debug in global object")
-DEFINE_bool(expose_free_buffer, false, "expose freeBuffer extension")
-DEFINE_bool(expose_gc, false, "expose gc extension")
-DEFINE_string(expose_gc_as, NULL,
+DEFINE_STRING(expose_natives_as, NULL, "expose natives in global object")
+DEFINE_STRING(expose_debug_as, NULL, "expose debug in global object")
+DEFINE_BOOL(expose_free_buffer, false, "expose freeBuffer extension")
+DEFINE_BOOL(expose_gc, false, "expose gc extension")
+DEFINE_STRING(expose_gc_as, NULL,
               "expose gc extension under the specified name")
-DEFINE_implication(expose_gc_as, expose_gc)
-DEFINE_bool(expose_externalize_string, false,
+DEFINE_IMPLICATION(expose_gc_as, expose_gc)
+DEFINE_BOOL(expose_externalize_string, false,
             "expose externalize string extension")
-DEFINE_bool(expose_trigger_failure, false, "expose trigger-failure extension")
-DEFINE_int(stack_trace_limit, 10, "number of stack frames to capture")
-DEFINE_bool(builtins_in_stack_traces, false,
+DEFINE_BOOL(expose_trigger_failure, false, "expose trigger-failure extension")
+DEFINE_INT(stack_trace_limit, 10, "number of stack frames to capture")
+DEFINE_BOOL(builtins_in_stack_traces, false,
             "show built-in functions in stack traces")
-DEFINE_bool(disable_native_files, false, "disable builtin natives files")
+DEFINE_BOOL(disable_native_files, false, "disable builtin natives files")
 
 // builtins-ia32.cc
-DEFINE_bool(inline_new, true, "use fast inline allocation")
+DEFINE_BOOL(inline_new, true, "use fast inline allocation")
 
 // codegen-ia32.cc / codegen-arm.cc
-DEFINE_bool(trace_codegen, false,
+DEFINE_BOOL(trace_codegen, false,
             "print name of functions for which code is generated")
-DEFINE_bool(trace, false, "trace function calls")
-DEFINE_bool(mask_constants_with_cookie, true,
+DEFINE_BOOL(trace, false, "trace function calls")
+DEFINE_BOOL(mask_constants_with_cookie, true,
             "use random jit cookie to mask large constants")
 
 // codegen.cc
-DEFINE_bool(lazy, true, "use lazy compilation")
-DEFINE_bool(trace_opt, false, "trace lazy optimization")
-DEFINE_bool(trace_opt_stats, false, "trace lazy optimization statistics")
-DEFINE_bool(opt, true, "use adaptive optimizations")
-DEFINE_bool(always_opt, false, "always try to optimize functions")
-DEFINE_bool(always_osr, false, "always try to OSR functions")
-DEFINE_bool(prepare_always_opt, false, "prepare for turning on always opt")
-DEFINE_bool(trace_deopt, false, "trace optimize function deoptimization")
-DEFINE_bool(trace_stub_failures, false,
+DEFINE_BOOL(lazy, true, "use lazy compilation")
+DEFINE_BOOL(trace_opt, false, "trace lazy optimization")
+DEFINE_BOOL(trace_opt_stats, false, "trace lazy optimization statistics")
+DEFINE_BOOL(opt, true, "use adaptive optimizations")
+DEFINE_BOOL(always_opt, false, "always try to optimize functions")
+DEFINE_BOOL(always_osr, false, "always try to OSR functions")
+DEFINE_BOOL(prepare_always_opt, false, "prepare for turning on always opt")
+DEFINE_BOOL(trace_deopt, false, "trace optimize function deoptimization")
+DEFINE_BOOL(trace_stub_failures, false,
             "trace deoptimization of generated code stubs")
 
+DEFINE_BOOL(serialize_toplevel, false, "enable caching of toplevel scripts")
+DEFINE_BOOL(trace_code_serializer, false, "trace code serializer")
+
 // compiler.cc
-DEFINE_int(min_preparse_length, 1024,
+DEFINE_INT(min_preparse_length, 1024,
            "minimum length for automatic enable preparsing")
-DEFINE_bool(always_full_compiler, false,
-            "try to use the dedicated run-once backend for all code")
-DEFINE_int(max_opt_count, 10,
+DEFINE_INT(max_opt_count, 10,
            "maximum number of optimization attempts before giving up.")
 
 // compilation-cache.cc
-DEFINE_bool(compilation_cache, true, "enable compilation cache")
+DEFINE_BOOL(compilation_cache, true, "enable compilation cache")
 
-DEFINE_bool(cache_prototype_transitions, true, "cache prototype transitions")
+DEFINE_BOOL(cache_prototype_transitions, true, "cache prototype transitions")
 
 // cpu-profiler.cc
-DEFINE_int(cpu_profiler_sampling_interval, 1000,
+DEFINE_INT(cpu_profiler_sampling_interval, 1000,
            "CPU profiler sampling interval in microseconds")
 
 // debug.cc
-DEFINE_bool(trace_debug_json, false, "trace debugging JSON request/response")
-DEFINE_bool(trace_js_array_abuse, false,
+DEFINE_BOOL(trace_debug_json, false, "trace debugging JSON request/response")
+DEFINE_BOOL(trace_js_array_abuse, false,
             "trace out-of-bounds accesses to JS arrays")
-DEFINE_bool(trace_external_array_abuse, false,
+DEFINE_BOOL(trace_external_array_abuse, false,
             "trace out-of-bounds-accesses to external arrays")
-DEFINE_bool(trace_array_abuse, false,
+DEFINE_BOOL(trace_array_abuse, false,
             "trace out-of-bounds accesses to all arrays")
-DEFINE_implication(trace_array_abuse, trace_js_array_abuse)
-DEFINE_implication(trace_array_abuse, trace_external_array_abuse)
-DEFINE_bool(enable_liveedit, true, "enable liveedit experimental feature")
-DEFINE_bool(hard_abort, true, "abort by crashing")
+DEFINE_IMPLICATION(trace_array_abuse, trace_js_array_abuse)
+DEFINE_IMPLICATION(trace_array_abuse, trace_external_array_abuse)
+DEFINE_BOOL(enable_liveedit, true, "enable liveedit experimental feature")
+DEFINE_BOOL(hard_abort, true, "abort by crashing")
 
 // execution.cc
-// Slightly less than 1MB on 64-bit, since Windows' default stack size for
-// the main execution thread is 1MB for both 32 and 64-bit.
-DEFINE_int(stack_size, kPointerSize * 123,
+DEFINE_INT(stack_size, V8_DEFAULT_STACK_SIZE_KB,
            "default size of stack region v8 is allowed to use (in kBytes)")
 
 // frames.cc
-DEFINE_int(max_stack_trace_source_length, 300,
+DEFINE_INT(max_stack_trace_source_length, 300,
            "maximum length of function source code printed in a stack trace.")
 
 // full-codegen.cc
-DEFINE_bool(always_inline_smi_code, false,
+DEFINE_BOOL(always_inline_smi_code, false,
             "always inline smi code in non-opt code")
 
 // heap.cc
-DEFINE_int(min_semi_space_size, 0,
-    "min size of a semi-space (in MBytes), the new space consists of two"
-    "semi-spaces")
-DEFINE_int(max_semi_space_size, 0,
-    "max size of a semi-space (in MBytes), the new space consists of two"
-    "semi-spaces")
-DEFINE_int(max_old_space_size, 0, "max size of the old space (in Mbytes)")
-DEFINE_int(max_executable_size, 0, "max size of executable memory (in Mbytes)")
-DEFINE_bool(gc_global, false, "always perform global GCs")
-DEFINE_int(gc_interval, -1, "garbage collect after <n> allocations")
-DEFINE_bool(trace_gc, false,
+DEFINE_INT(min_semi_space_size, 0,
+           "min size of a semi-space (in MBytes), the new space consists of two"
+           "semi-spaces")
+DEFINE_INT(max_semi_space_size, 0,
+           "max size of a semi-space (in MBytes), the new space consists of two"
+           "semi-spaces")
+DEFINE_INT(max_old_space_size, 0, "max size of the old space (in Mbytes)")
+DEFINE_INT(max_executable_size, 0, "max size of executable memory (in Mbytes)")
+DEFINE_BOOL(gc_global, false, "always perform global GCs")
+DEFINE_INT(gc_interval, -1, "garbage collect after <n> allocations")
+DEFINE_BOOL(trace_gc, false,
             "print one trace line following each garbage collection")
-DEFINE_bool(trace_gc_nvp, false,
+DEFINE_BOOL(trace_gc_nvp, false,
             "print one detailed trace line in name=value format "
             "after each garbage collection")
-DEFINE_bool(trace_gc_ignore_scavenger, false,
+DEFINE_BOOL(trace_gc_ignore_scavenger, false,
             "do not print trace line after scavenger collection")
-DEFINE_bool(print_cumulative_gc_stat, false,
+DEFINE_BOOL(trace_idle_notification, false,
+            "print one trace line following each idle notification")
+DEFINE_BOOL(print_cumulative_gc_stat, false,
             "print cumulative GC statistics in name=value format on exit")
-DEFINE_bool(print_max_heap_committed, false,
+DEFINE_BOOL(print_max_heap_committed, false,
             "print statistics of the maximum memory committed for the heap "
             "in name=value format on exit")
-DEFINE_bool(trace_gc_verbose, false,
+DEFINE_BOOL(trace_gc_verbose, false,
             "print more details following each garbage collection")
-DEFINE_bool(trace_fragmentation, false,
+DEFINE_BOOL(trace_fragmentation, false,
             "report fragmentation for old pointer and data pages")
-DEFINE_bool(collect_maps, true,
+DEFINE_BOOL(collect_maps, true,
             "garbage collect maps from which no objects can be reached")
-DEFINE_bool(weak_embedded_maps_in_ic, true,
+DEFINE_BOOL(weak_embedded_maps_in_ic, true,
             "make maps embedded in inline cache stubs")
-DEFINE_bool(weak_embedded_maps_in_optimized_code, true,
+DEFINE_BOOL(weak_embedded_maps_in_optimized_code, true,
             "make maps embedded in optimized code weak")
-DEFINE_bool(weak_embedded_objects_in_optimized_code, true,
+DEFINE_BOOL(weak_embedded_objects_in_optimized_code, true,
             "make objects embedded in optimized code weak")
-DEFINE_bool(flush_code, true,
+DEFINE_BOOL(flush_code, true,
             "flush code that we expect not to use again (during full gc)")
-DEFINE_bool(flush_code_incrementally, true,
+DEFINE_BOOL(flush_code_incrementally, true,
             "flush code that we expect not to use again (incrementally)")
-DEFINE_bool(trace_code_flushing, false, "trace code flushing progress")
-DEFINE_bool(age_code, true,
+DEFINE_BOOL(trace_code_flushing, false, "trace code flushing progress")
+DEFINE_BOOL(age_code, true,
             "track un-executed functions to age code and flush only "
             "old code (required for code flushing)")
-DEFINE_bool(incremental_marking, true, "use incremental marking")
-DEFINE_bool(incremental_marking_steps, true, "do incremental marking steps")
-DEFINE_bool(trace_incremental_marking, false,
+DEFINE_BOOL(incremental_marking, true, "use incremental marking")
+DEFINE_BOOL(incremental_marking_steps, true, "do incremental marking steps")
+DEFINE_BOOL(trace_incremental_marking, false,
             "trace progress of the incremental marking")
-DEFINE_bool(track_gc_object_stats, false,
+DEFINE_BOOL(track_gc_object_stats, false,
             "track object counts and memory usage")
-DEFINE_bool(parallel_sweeping, false, "enable parallel sweeping")
-DEFINE_bool(concurrent_sweeping, true, "enable concurrent sweeping")
-DEFINE_int(sweeper_threads, 0,
+DEFINE_BOOL(parallel_sweeping, false, "enable parallel sweeping")
+DEFINE_BOOL(concurrent_sweeping, true, "enable concurrent sweeping")
+DEFINE_INT(sweeper_threads, 0,
            "number of parallel and concurrent sweeping threads")
-DEFINE_bool(job_based_sweeping, false, "enable job based sweeping")
+DEFINE_BOOL(job_based_sweeping, true, "enable job based sweeping")
 #ifdef VERIFY_HEAP
-DEFINE_bool(verify_heap, false, "verify heap pointers before and after GC")
+DEFINE_BOOL(verify_heap, false, "verify heap pointers before and after GC")
 #endif
 
 
 // heap-snapshot-generator.cc
-DEFINE_bool(heap_profiler_trace_objects, false,
+DEFINE_BOOL(heap_profiler_trace_objects, false,
             "Dump heap object allocations/movements/size_updates")
 
 
 // v8.cc
-DEFINE_bool(use_idle_notification, true,
+DEFINE_BOOL(use_idle_notification, true,
             "Use idle notification to reduce memory footprint.")
 // ic.cc
-DEFINE_bool(use_ic, true, "use inline caching")
+DEFINE_BOOL(use_ic, true, "use inline caching")
+DEFINE_BOOL(trace_ic, false, "trace inline cache state transitions")
 
 // macro-assembler-ia32.cc
-DEFINE_bool(native_code_counters, false,
+DEFINE_BOOL(native_code_counters, false,
             "generate extra code for manipulating stats counters")
 
 // mark-compact.cc
-DEFINE_bool(always_compact, false, "Perform compaction on every full GC")
-DEFINE_bool(never_compact, false,
+DEFINE_BOOL(always_compact, false, "Perform compaction on every full GC")
+DEFINE_BOOL(never_compact, false,
             "Never perform compaction on full GC - testing only")
-DEFINE_bool(compact_code_space, true,
+DEFINE_BOOL(compact_code_space, true,
             "Compact code space on full non-incremental collections")
-DEFINE_bool(incremental_code_compaction, true,
+DEFINE_BOOL(incremental_code_compaction, true,
             "Compact code space on full incremental collections")
-DEFINE_bool(cleanup_code_caches_at_gc, true,
+DEFINE_BOOL(cleanup_code_caches_at_gc, true,
             "Flush inline caches prior to mark compact collection and "
             "flush code caches in maps during mark compact cycle.")
-DEFINE_bool(use_marking_progress_bar, true,
+DEFINE_BOOL(use_marking_progress_bar, true,
             "Use a progress bar to scan large objects in increments when "
             "incremental marking is active.")
-DEFINE_bool(zap_code_space, true,
+DEFINE_BOOL(zap_code_space, true,
             "Zap free memory in code space with 0xCC while sweeping.")
-DEFINE_int(random_seed, 0,
+DEFINE_INT(random_seed, 0,
            "Default seed for initializing random generator "
            "(0, the default, means to use system random).")
 
 // objects.cc
-DEFINE_bool(use_verbose_printer, true, "allows verbose printing")
+DEFINE_BOOL(use_verbose_printer, true, "allows verbose printing")
 
 // parser.cc
-DEFINE_bool(allow_natives_syntax, false, "allow natives syntax")
-DEFINE_bool(trace_parse, false, "trace parsing and preparsing")
+DEFINE_BOOL(allow_natives_syntax, false, "allow natives syntax")
+DEFINE_BOOL(trace_parse, false, "trace parsing and preparsing")
 
 // simulator-arm.cc, simulator-arm64.cc and simulator-mips.cc
-DEFINE_bool(trace_sim, false, "Trace simulator execution")
-DEFINE_bool(debug_sim, false, "Enable debugging the simulator")
-DEFINE_bool(check_icache, false,
+DEFINE_BOOL(trace_sim, false, "Trace simulator execution")
+DEFINE_BOOL(debug_sim, false, "Enable debugging the simulator")
+DEFINE_BOOL(check_icache, false,
             "Check icache flushes in ARM and MIPS simulator")
-DEFINE_int(stop_sim_at, 0, "Simulator stop after x number of instructions")
-#ifdef V8_TARGET_ARCH_ARM64
-DEFINE_int(sim_stack_alignment, 16,
+DEFINE_INT(stop_sim_at, 0, "Simulator stop after x number of instructions")
+#if defined(V8_TARGET_ARCH_ARM64) || defined(V8_TARGET_ARCH_MIPS64)
+DEFINE_INT(sim_stack_alignment, 16,
            "Stack alignment in bytes in simulator. This must be a power of two "
            "and it must be at least 16. 16 is default.")
 #else
-DEFINE_int(sim_stack_alignment, 8,
+DEFINE_INT(sim_stack_alignment, 8,
            "Stack alingment in bytes in simulator (4 or 8, 8 is default)")
 #endif
-DEFINE_int(sim_stack_size, 2 * MB / KB,
-           "Stack size of the ARM64 simulator in kBytes (default is 2 MB)")
-DEFINE_bool(log_regs_modified, true,
+DEFINE_INT(sim_stack_size, 2 * MB / KB,
+           "Stack size of the ARM64 and MIPS64 simulator "
+           "in kBytes (default is 2 MB)")
+DEFINE_BOOL(log_regs_modified, true,
             "When logging register values, only print modified registers.")
-DEFINE_bool(log_colour, true,
-            "When logging, try to use coloured output.")
-DEFINE_bool(ignore_asm_unimplemented_break, false,
+DEFINE_BOOL(log_colour, true, "When logging, try to use coloured output.")
+DEFINE_BOOL(ignore_asm_unimplemented_break, false,
             "Don't break for ASM_UNIMPLEMENTED_BREAK macros.")
-DEFINE_bool(trace_sim_messages, false,
+DEFINE_BOOL(trace_sim_messages, false,
             "Trace simulator debug messages. Implied by --trace-sim.")
 
 // isolate.cc
-DEFINE_bool(stack_trace_on_illegal, false,
+DEFINE_BOOL(stack_trace_on_illegal, false,
             "print stack trace when an illegal exception is thrown")
-DEFINE_bool(abort_on_uncaught_exception, false,
+DEFINE_BOOL(abort_on_uncaught_exception, false,
             "abort program (dump core) when an uncaught exception is thrown")
-DEFINE_bool(randomize_hashes, true,
+DEFINE_BOOL(randomize_hashes, true,
             "randomize hashes to avoid predictable hash collisions "
             "(with snapshots this option cannot override the baked-in seed)")
-DEFINE_int(hash_seed, 0,
+DEFINE_INT(hash_seed, 0,
            "Fixed seed to use to hash property keys (0 means random)"
            "(with snapshots this option cannot override the baked-in seed)")
 
 // snapshot-common.cc
-DEFINE_bool(profile_deserialization, false,
+DEFINE_BOOL(profile_deserialization, false,
             "Print the time it takes to deserialize the snapshot.")
 
 // Regexp
-DEFINE_bool(regexp_optimization, true, "generate optimized regexp code")
+DEFINE_BOOL(regexp_optimization, true, "generate optimized regexp code")
 
 // Testing flags test/cctest/test-{flags,api,serialization}.cc
-DEFINE_bool(testing_bool_flag, true, "testing_bool_flag")
-DEFINE_maybe_bool(testing_maybe_bool_flag, "testing_maybe_bool_flag")
-DEFINE_int(testing_int_flag, 13, "testing_int_flag")
-DEFINE_float(testing_float_flag, 2.5, "float-flag")
-DEFINE_string(testing_string_flag, "Hello, world!", "string-flag")
-DEFINE_int(testing_prng_seed, 42, "Seed used for threading test randomness")
+DEFINE_BOOL(testing_bool_flag, true, "testing_bool_flag")
+DEFINE_MAYBE_BOOL(testing_maybe_bool_flag, "testing_maybe_bool_flag")
+DEFINE_INT(testing_int_flag, 13, "testing_int_flag")
+DEFINE_FLOAT(testing_float_flag, 2.5, "float-flag")
+DEFINE_STRING(testing_string_flag, "Hello, world!", "string-flag")
+DEFINE_INT(testing_prng_seed, 42, "Seed used for threading test randomness")
 #ifdef _WIN32
-DEFINE_string(testing_serialization_file, "C:\\Windows\\Temp\\serdes",
+DEFINE_STRING(testing_serialization_file, "C:\\Windows\\Temp\\serdes",
               "file in which to testing_serialize heap")
 #else
-DEFINE_string(testing_serialization_file, "/tmp/serdes",
+DEFINE_STRING(testing_serialization_file, "/tmp/serdes",
               "file in which to serialize heap")
 #endif
 
 // mksnapshot.cc
-DEFINE_string(extra_code, NULL, "A filename with extra code to be included in"
-                                " the snapshot (mksnapshot only)")
-DEFINE_string(raw_file, NULL, "A file to write the raw snapshot bytes to. "
-                              "(mksnapshot only)")
-DEFINE_string(raw_context_file, NULL, "A file to write the raw context "
-                                      "snapshot bytes to. (mksnapshot only)")
-DEFINE_bool(omit, false, "Omit raw snapshot bytes in generated code. "
-                         "(mksnapshot only)")
+DEFINE_STRING(extra_code, NULL,
+              "A filename with extra code to be included in"
+              " the snapshot (mksnapshot only)")
+DEFINE_STRING(raw_file, NULL,
+              "A file to write the raw snapshot bytes to. "
+              "(mksnapshot only)")
+DEFINE_STRING(raw_context_file, NULL,
+              "A file to write the raw context "
+              "snapshot bytes to. (mksnapshot only)")
+DEFINE_STRING(startup_blob, NULL,
+              "Write V8 startup blob file. "
+              "(mksnapshot only)")
 
 // code-stubs-hydrogen.cc
-DEFINE_bool(profile_hydrogen_code_stub_compilation, false,
+DEFINE_BOOL(profile_hydrogen_code_stub_compilation, false,
             "Print the time it takes to lazily compile hydrogen code stubs.")
 
-DEFINE_bool(predictable, false, "enable predictable mode")
-DEFINE_neg_implication(predictable, concurrent_recompilation)
-DEFINE_neg_implication(predictable, concurrent_osr)
-DEFINE_neg_implication(predictable, concurrent_sweeping)
-DEFINE_neg_implication(predictable, parallel_sweeping)
+DEFINE_BOOL(predictable, false, "enable predictable mode")
+DEFINE_NEG_IMPLICATION(predictable, concurrent_recompilation)
+DEFINE_NEG_IMPLICATION(predictable, concurrent_osr)
+DEFINE_NEG_IMPLICATION(predictable, concurrent_sweeping)
+DEFINE_NEG_IMPLICATION(predictable, parallel_sweeping)
 
 
 //
 // Dev shell flags
 //
 
-DEFINE_bool(help, false, "Print usage message, including flags, on console")
-DEFINE_bool(dump_counters, false, "Dump counters on exit")
+DEFINE_BOOL(help, false, "Print usage message, including flags, on console")
+DEFINE_BOOL(dump_counters, false, "Dump counters on exit")
 
-DEFINE_bool(debugger, false, "Enable JavaScript debugger")
+DEFINE_BOOL(debugger, false, "Enable JavaScript debugger")
 
-DEFINE_string(map_counters, "", "Map counters to a file")
-DEFINE_args(js_arguments,
+DEFINE_STRING(map_counters, "", "Map counters to a file")
+DEFINE_ARGS(js_arguments,
             "Pass all remaining arguments to the script. Alias for \"--\".")
 
 //
 // GDB JIT integration flags.
 //
 
-DEFINE_bool(gdbjit, false, "enable GDBJIT interface (disables compacting GC)")
-DEFINE_bool(gdbjit_full, false, "enable GDBJIT interface for all code objects")
-DEFINE_bool(gdbjit_dump, false, "dump elf objects with debug info to disk")
-DEFINE_string(gdbjit_dump_filter, "",
+DEFINE_BOOL(gdbjit, false, "enable GDBJIT interface (disables compacting GC)")
+DEFINE_BOOL(gdbjit_full, false, "enable GDBJIT interface for all code objects")
+DEFINE_BOOL(gdbjit_dump, false, "dump elf objects with debug info to disk")
+DEFINE_STRING(gdbjit_dump_filter, "",
               "dump only objects containing this substring")
 
 // mark-compact.cc
-DEFINE_bool(force_marking_deque_overflows, false,
+DEFINE_BOOL(force_marking_deque_overflows, false,
             "force overflows of marking deque by reducing it's size "
             "to 64 words")
 
-DEFINE_bool(stress_compaction, false,
+DEFINE_BOOL(stress_compaction, false,
             "stress the GC compactor to flush out bugs (implies "
             "--force_marking_deque_overflows)")
 
@@ -698,63 +714,64 @@
 #endif
 
 // checks.cc
-#ifdef ENABLE_SLOW_ASSERTS
-DEFINE_bool(enable_slow_asserts, false,
+#ifdef ENABLE_SLOW_DCHECKS
+DEFINE_BOOL(enable_slow_asserts, false,
             "enable asserts that are slow to execute")
 #endif
 
 // codegen-ia32.cc / codegen-arm.cc / macro-assembler-*.cc
-DEFINE_bool(print_source, false, "pretty print source code")
-DEFINE_bool(print_builtin_source, false,
+DEFINE_BOOL(print_source, false, "pretty print source code")
+DEFINE_BOOL(print_builtin_source, false,
             "pretty print source code for builtins")
-DEFINE_bool(print_ast, false, "print source AST")
-DEFINE_bool(print_builtin_ast, false, "print source AST for builtins")
-DEFINE_string(stop_at, "", "function name where to insert a breakpoint")
-DEFINE_bool(trap_on_abort, false, "replace aborts by breakpoints")
+DEFINE_BOOL(print_ast, false, "print source AST")
+DEFINE_BOOL(print_builtin_ast, false, "print source AST for builtins")
+DEFINE_STRING(stop_at, "", "function name where to insert a breakpoint")
+DEFINE_BOOL(trap_on_abort, false, "replace aborts by breakpoints")
 
 // compiler.cc
-DEFINE_bool(print_builtin_scopes, false, "print scopes for builtins")
-DEFINE_bool(print_scopes, false, "print scopes")
+DEFINE_BOOL(print_builtin_scopes, false, "print scopes for builtins")
+DEFINE_BOOL(print_scopes, false, "print scopes")
 
 // contexts.cc
-DEFINE_bool(trace_contexts, false, "trace contexts operations")
+DEFINE_BOOL(trace_contexts, false, "trace contexts operations")
 
 // heap.cc
-DEFINE_bool(gc_verbose, false, "print stuff during garbage collection")
-DEFINE_bool(heap_stats, false, "report heap statistics before and after GC")
-DEFINE_bool(code_stats, false, "report code statistics after GC")
-DEFINE_bool(verify_native_context_separation, false,
+DEFINE_BOOL(gc_verbose, false, "print stuff during garbage collection")
+DEFINE_BOOL(heap_stats, false, "report heap statistics before and after GC")
+DEFINE_BOOL(code_stats, false, "report code statistics after GC")
+DEFINE_BOOL(verify_native_context_separation, false,
             "verify that code holds on to at most one native context after GC")
-DEFINE_bool(print_handles, false, "report handles after GC")
-DEFINE_bool(print_global_handles, false, "report global handles after GC")
+DEFINE_BOOL(print_handles, false, "report handles after GC")
+DEFINE_BOOL(print_global_handles, false, "report global handles after GC")
 
-// ic.cc
-DEFINE_bool(trace_ic, false, "trace inline cache state transitions")
+// TurboFan debug-only flags.
+DEFINE_BOOL(print_turbo_replay, false,
+            "print C++ code to recreate TurboFan graphs")
 
 // interface.cc
-DEFINE_bool(print_interfaces, false, "print interfaces")
-DEFINE_bool(print_interface_details, false, "print interface inference details")
-DEFINE_int(print_interface_depth, 5, "depth for printing interfaces")
+DEFINE_BOOL(print_interfaces, false, "print interfaces")
+DEFINE_BOOL(print_interface_details, false, "print interface inference details")
+DEFINE_INT(print_interface_depth, 5, "depth for printing interfaces")
 
 // objects.cc
-DEFINE_bool(trace_normalization, false,
+DEFINE_BOOL(trace_normalization, false,
             "prints when objects are turned into dictionaries.")
 
 // runtime.cc
-DEFINE_bool(trace_lazy, false, "trace lazy compilation")
+DEFINE_BOOL(trace_lazy, false, "trace lazy compilation")
 
 // spaces.cc
-DEFINE_bool(collect_heap_spill_statistics, false,
+DEFINE_BOOL(collect_heap_spill_statistics, false,
             "report heap spill statistics along with heap_stats "
             "(requires heap_stats)")
 
-DEFINE_bool(trace_isolates, false, "trace isolate state changes")
+DEFINE_BOOL(trace_isolates, false, "trace isolate state changes")
 
 // Regexp
-DEFINE_bool(regexp_possessive_quantifier, false,
+DEFINE_BOOL(regexp_possessive_quantifier, false,
             "enable possessive quantifier syntax for testing")
-DEFINE_bool(trace_regexp_bytecodes, false, "trace regexp bytecode execution")
-DEFINE_bool(trace_regexp_assembler, false,
+DEFINE_BOOL(trace_regexp_bytecodes, false, "trace regexp bytecode execution")
+DEFINE_BOOL(trace_regexp_assembler, false,
             "trace regexp macro assembler calls.")
 
 //
@@ -764,50 +781,52 @@
 #define FLAG FLAG_FULL
 
 // log.cc
-DEFINE_bool(log, false,
+DEFINE_BOOL(log, false,
             "Minimal logging (no API, code, GC, suspect, or handles samples).")
-DEFINE_bool(log_all, false, "Log all events to the log file.")
-DEFINE_bool(log_api, false, "Log API events to the log file.")
-DEFINE_bool(log_code, false,
+DEFINE_BOOL(log_all, false, "Log all events to the log file.")
+DEFINE_BOOL(log_api, false, "Log API events to the log file.")
+DEFINE_BOOL(log_code, false,
             "Log code events to the log file without profiling.")
-DEFINE_bool(log_gc, false,
+DEFINE_BOOL(log_gc, false,
             "Log heap samples on garbage collection for the hp2ps tool.")
-DEFINE_bool(log_handles, false, "Log global handle events.")
-DEFINE_bool(log_snapshot_positions, false,
+DEFINE_BOOL(log_handles, false, "Log global handle events.")
+DEFINE_BOOL(log_snapshot_positions, false,
             "log positions of (de)serialized objects in the snapshot.")
-DEFINE_bool(log_suspect, false, "Log suspect operations.")
-DEFINE_bool(prof, false,
+DEFINE_BOOL(log_suspect, false, "Log suspect operations.")
+DEFINE_BOOL(prof, false,
             "Log statistical profiling information (implies --log-code).")
-DEFINE_bool(prof_browser_mode, true,
+DEFINE_BOOL(prof_browser_mode, true,
             "Used with --prof, turns on browser-compatible mode for profiling.")
-DEFINE_bool(log_regexp, false, "Log regular expression execution.")
-DEFINE_string(logfile, "v8.log", "Specify the name of the log file.")
-DEFINE_bool(logfile_per_isolate, true, "Separate log files for each isolate.")
-DEFINE_bool(ll_prof, false, "Enable low-level linux profiler.")
-DEFINE_bool(perf_basic_prof, false,
+DEFINE_BOOL(log_regexp, false, "Log regular expression execution.")
+DEFINE_STRING(logfile, "v8.log", "Specify the name of the log file.")
+DEFINE_BOOL(logfile_per_isolate, true, "Separate log files for each isolate.")
+DEFINE_BOOL(ll_prof, false, "Enable low-level linux profiler.")
+DEFINE_BOOL(perf_basic_prof, false,
             "Enable perf linux profiler (basic support).")
-DEFINE_bool(perf_jit_prof, false,
+DEFINE_NEG_IMPLICATION(perf_basic_prof, compact_code_space)
+DEFINE_BOOL(perf_jit_prof, false,
             "Enable perf linux profiler (experimental annotate support).")
-DEFINE_string(gc_fake_mmap, "/tmp/__v8_gc__",
+DEFINE_NEG_IMPLICATION(perf_jit_prof, compact_code_space)
+DEFINE_STRING(gc_fake_mmap, "/tmp/__v8_gc__",
               "Specify the name of the file for fake gc mmap used in ll_prof")
-DEFINE_bool(log_internal_timer_events, false, "Time internal events.")
-DEFINE_bool(log_timer_events, false,
+DEFINE_BOOL(log_internal_timer_events, false, "Time internal events.")
+DEFINE_BOOL(log_timer_events, false,
             "Time events including external callbacks.")
-DEFINE_implication(log_timer_events, log_internal_timer_events)
-DEFINE_implication(log_internal_timer_events, prof)
-DEFINE_bool(log_instruction_stats, false, "Log AArch64 instruction statistics.")
-DEFINE_string(log_instruction_file, "arm64_inst.csv",
+DEFINE_IMPLICATION(log_timer_events, log_internal_timer_events)
+DEFINE_IMPLICATION(log_internal_timer_events, prof)
+DEFINE_BOOL(log_instruction_stats, false, "Log AArch64 instruction statistics.")
+DEFINE_STRING(log_instruction_file, "arm64_inst.csv",
               "AArch64 instruction statistics log file.")
-DEFINE_int(log_instruction_period, 1 << 22,
+DEFINE_INT(log_instruction_period, 1 << 22,
            "AArch64 instruction statistics logging period.")
 
-DEFINE_bool(redirect_code_traces, false,
+DEFINE_BOOL(redirect_code_traces, false,
             "output deopt information and disassembly into file "
             "code-<pid>-<isolate id>.asm")
-DEFINE_string(redirect_code_traces_to, NULL,
-            "output deopt information and disassembly into the given file")
+DEFINE_STRING(redirect_code_traces_to, NULL,
+              "output deopt information and disassembly into the given file")
 
-DEFINE_bool(hydrogen_track_positions, false,
+DEFINE_BOOL(hydrogen_track_positions, false,
             "track source code positions when building IR")
 
 //
@@ -821,51 +840,71 @@
 #endif
 
 // elements.cc
-DEFINE_bool(trace_elements_transitions, false, "trace elements transitions")
+DEFINE_BOOL(trace_elements_transitions, false, "trace elements transitions")
 
-DEFINE_bool(trace_creation_allocation_sites, false,
+DEFINE_BOOL(trace_creation_allocation_sites, false,
             "trace the creation of allocation sites")
 
 // code-stubs.cc
-DEFINE_bool(print_code_stubs, false, "print code stubs")
-DEFINE_bool(test_secondary_stub_cache, false,
+DEFINE_BOOL(print_code_stubs, false, "print code stubs")
+DEFINE_BOOL(test_secondary_stub_cache, false,
             "test secondary stub cache by disabling the primary one")
 
-DEFINE_bool(test_primary_stub_cache, false,
+DEFINE_BOOL(test_primary_stub_cache, false,
             "test primary stub cache by disabling the secondary one")
 
 
 // codegen-ia32.cc / codegen-arm.cc
-DEFINE_bool(print_code, false, "print generated code")
-DEFINE_bool(print_opt_code, false, "print optimized code")
-DEFINE_bool(print_unopt_code, false, "print unoptimized code before "
+DEFINE_BOOL(print_code, false, "print generated code")
+DEFINE_BOOL(print_opt_code, false, "print optimized code")
+DEFINE_BOOL(print_unopt_code, false,
+            "print unoptimized code before "
             "printing optimized code based on it")
-DEFINE_bool(print_code_verbose, false, "print more information for code")
-DEFINE_bool(print_builtin_code, false, "print generated code for builtins")
+DEFINE_BOOL(print_code_verbose, false, "print more information for code")
+DEFINE_BOOL(print_builtin_code, false, "print generated code for builtins")
 
 #ifdef ENABLE_DISASSEMBLER
-DEFINE_bool(sodium, false, "print generated code output suitable for use with "
+DEFINE_BOOL(sodium, false,
+            "print generated code output suitable for use with "
             "the Sodium code viewer")
 
-DEFINE_implication(sodium, print_code_stubs)
-DEFINE_implication(sodium, print_code)
-DEFINE_implication(sodium, print_opt_code)
-DEFINE_implication(sodium, hydrogen_track_positions)
-DEFINE_implication(sodium, code_comments)
+DEFINE_IMPLICATION(sodium, print_code_stubs)
+DEFINE_IMPLICATION(sodium, print_code)
+DEFINE_IMPLICATION(sodium, print_opt_code)
+DEFINE_IMPLICATION(sodium, hydrogen_track_positions)
+DEFINE_IMPLICATION(sodium, code_comments)
 
-DEFINE_bool(print_all_code, false, "enable all flags related to printing code")
-DEFINE_implication(print_all_code, print_code)
-DEFINE_implication(print_all_code, print_opt_code)
-DEFINE_implication(print_all_code, print_unopt_code)
-DEFINE_implication(print_all_code, print_code_verbose)
-DEFINE_implication(print_all_code, print_builtin_code)
-DEFINE_implication(print_all_code, print_code_stubs)
-DEFINE_implication(print_all_code, code_comments)
+DEFINE_BOOL(print_all_code, false, "enable all flags related to printing code")
+DEFINE_IMPLICATION(print_all_code, print_code)
+DEFINE_IMPLICATION(print_all_code, print_opt_code)
+DEFINE_IMPLICATION(print_all_code, print_unopt_code)
+DEFINE_IMPLICATION(print_all_code, print_code_verbose)
+DEFINE_IMPLICATION(print_all_code, print_builtin_code)
+DEFINE_IMPLICATION(print_all_code, print_code_stubs)
+DEFINE_IMPLICATION(print_all_code, code_comments)
 #ifdef DEBUG
-DEFINE_implication(print_all_code, trace_codegen)
+DEFINE_IMPLICATION(print_all_code, trace_codegen)
 #endif
 #endif
 
+
+//
+// VERIFY_PREDICTABLE related flags
+//
+#undef FLAG
+
+#ifdef VERIFY_PREDICTABLE
+#define FLAG FLAG_FULL
+#else
+#define FLAG FLAG_READONLY
+#endif
+
+DEFINE_BOOL(verify_predictable, false,
+            "this mode is used for checking that V8 behaves predictably")
+DEFINE_INT(dump_allocations_digest_at_alloc, 0,
+           "dump allocations digest each n-th allocation")
+
+
 //
 // Read-only flags
 //
@@ -873,7 +912,7 @@
 #define FLAG FLAG_READONLY
 
 // assembler-arm.h
-DEFINE_bool(enable_ool_constant_pool, V8_OOL_CONSTANT_POOL,
+DEFINE_BOOL(enable_ool_constant_pool, V8_OOL_CONSTANT_POOL,
             "enable use of out-of-line constant pools (ARM only)")
 
 // Cleanup...
@@ -882,19 +921,20 @@
 #undef FLAG
 #undef FLAG_ALIAS
 
-#undef DEFINE_bool
-#undef DEFINE_maybe_bool
-#undef DEFINE_int
-#undef DEFINE_string
-#undef DEFINE_float
-#undef DEFINE_args
-#undef DEFINE_implication
-#undef DEFINE_neg_implication
-#undef DEFINE_ALIAS_bool
-#undef DEFINE_ALIAS_int
-#undef DEFINE_ALIAS_string
-#undef DEFINE_ALIAS_float
-#undef DEFINE_ALIAS_args
+#undef DEFINE_BOOL
+#undef DEFINE_MAYBE_BOOL
+#undef DEFINE_INT
+#undef DEFINE_STRING
+#undef DEFINE_FLOAT
+#undef DEFINE_ARGS
+#undef DEFINE_IMPLICATION
+#undef DEFINE_NEG_IMPLICATION
+#undef DEFINE_VALUE_IMPLICATION
+#undef DEFINE_ALIAS_BOOL
+#undef DEFINE_ALIAS_INT
+#undef DEFINE_ALIAS_STRING
+#undef DEFINE_ALIAS_FLOAT
+#undef DEFINE_ALIAS_ARGS
 
 #undef FLAG_MODE_DECLARE
 #undef FLAG_MODE_DEFINE
diff --git a/src/flags.cc b/src/flags.cc
index 265eb8f..98f21ef 100644
--- a/src/flags.cc
+++ b/src/flags.cc
@@ -8,20 +8,19 @@
 #include "src/v8.h"
 
 #include "src/assembler.h"
-#include "src/platform.h"
-#include "src/smart-pointers.h"
-#include "src/string-stream.h"
+#include "src/base/platform/platform.h"
+#include "src/ostreams.h"
 
 namespace v8 {
 namespace internal {
 
 // Define all of our flags.
 #define FLAG_MODE_DEFINE
-#include "src/flag-definitions.h"
+#include "src/flag-definitions.h"  // NOLINT
 
 // Define all of our flags default values.
 #define FLAG_MODE_DEFINE_DEFAULTS
-#include "src/flag-definitions.h"
+#include "src/flag-definitions.h"  // NOLINT
 
 namespace {
 
@@ -46,32 +45,32 @@
   const char* comment() const { return cmt_; }
 
   bool* bool_variable() const {
-    ASSERT(type_ == TYPE_BOOL);
+    DCHECK(type_ == TYPE_BOOL);
     return reinterpret_cast<bool*>(valptr_);
   }
 
   MaybeBoolFlag* maybe_bool_variable() const {
-    ASSERT(type_ == TYPE_MAYBE_BOOL);
+    DCHECK(type_ == TYPE_MAYBE_BOOL);
     return reinterpret_cast<MaybeBoolFlag*>(valptr_);
   }
 
   int* int_variable() const {
-    ASSERT(type_ == TYPE_INT);
+    DCHECK(type_ == TYPE_INT);
     return reinterpret_cast<int*>(valptr_);
   }
 
   double* float_variable() const {
-    ASSERT(type_ == TYPE_FLOAT);
+    DCHECK(type_ == TYPE_FLOAT);
     return reinterpret_cast<double*>(valptr_);
   }
 
   const char* string_value() const {
-    ASSERT(type_ == TYPE_STRING);
+    DCHECK(type_ == TYPE_STRING);
     return *reinterpret_cast<const char**>(valptr_);
   }
 
   void set_string_value(const char* value, bool owns_ptr) {
-    ASSERT(type_ == TYPE_STRING);
+    DCHECK(type_ == TYPE_STRING);
     const char** ptr = reinterpret_cast<const char**>(valptr_);
     if (owns_ptr_ && *ptr != NULL) DeleteArray(*ptr);
     *ptr = value;
@@ -79,32 +78,32 @@
   }
 
   JSArguments* args_variable() const {
-    ASSERT(type_ == TYPE_ARGS);
+    DCHECK(type_ == TYPE_ARGS);
     return reinterpret_cast<JSArguments*>(valptr_);
   }
 
   bool bool_default() const {
-    ASSERT(type_ == TYPE_BOOL);
+    DCHECK(type_ == TYPE_BOOL);
     return *reinterpret_cast<const bool*>(defptr_);
   }
 
   int int_default() const {
-    ASSERT(type_ == TYPE_INT);
+    DCHECK(type_ == TYPE_INT);
     return *reinterpret_cast<const int*>(defptr_);
   }
 
   double float_default() const {
-    ASSERT(type_ == TYPE_FLOAT);
+    DCHECK(type_ == TYPE_FLOAT);
     return *reinterpret_cast<const double*>(defptr_);
   }
 
   const char* string_default() const {
-    ASSERT(type_ == TYPE_STRING);
+    DCHECK(type_ == TYPE_STRING);
     return *reinterpret_cast<const char* const *>(defptr_);
   }
 
   JSArguments args_default() const {
-    ASSERT(type_ == TYPE_ARGS);
+    DCHECK(type_ == TYPE_ARGS);
     return *reinterpret_cast<const JSArguments*>(defptr_);
   }
 
@@ -182,41 +181,39 @@
 }
 
 
-static SmartArrayPointer<const char> ToString(Flag* flag) {
-  HeapStringAllocator string_allocator;
-  StringStream buffer(&string_allocator);
-  switch (flag->type()) {
+OStream& operator<<(OStream& os, const Flag& flag) {  // NOLINT
+  switch (flag.type()) {
     case Flag::TYPE_BOOL:
-      buffer.Add("%s", (*flag->bool_variable() ? "true" : "false"));
+      os << (*flag.bool_variable() ? "true" : "false");
       break;
     case Flag::TYPE_MAYBE_BOOL:
-      buffer.Add("%s", flag->maybe_bool_variable()->has_value
-                       ? (flag->maybe_bool_variable()->value ? "true" : "false")
-                       : "unset");
+      os << (flag.maybe_bool_variable()->has_value
+                 ? (flag.maybe_bool_variable()->value ? "true" : "false")
+                 : "unset");
       break;
     case Flag::TYPE_INT:
-      buffer.Add("%d", *flag->int_variable());
+      os << *flag.int_variable();
       break;
     case Flag::TYPE_FLOAT:
-      buffer.Add("%f", FmtElm(*flag->float_variable()));
+      os << *flag.float_variable();
       break;
     case Flag::TYPE_STRING: {
-      const char* str = flag->string_value();
-      buffer.Add("%s", str ? str : "NULL");
+      const char* str = flag.string_value();
+      os << (str ? str : "NULL");
       break;
     }
     case Flag::TYPE_ARGS: {
-      JSArguments args = *flag->args_variable();
+      JSArguments args = *flag.args_variable();
       if (args.argc > 0) {
-        buffer.Add("%s",  args[0]);
+        os << args[0];
         for (int i = 1; i < args.argc; i++) {
-          buffer.Add(" %s", args[i]);
+          os << args[i];
         }
       }
       break;
     }
   }
-  return buffer.ToCString();
+  return os;
 }
 
 
@@ -228,28 +225,27 @@
     Flag* f = &flags[i];
     if (!f->IsDefault()) {
       if (f->type() == Flag::TYPE_ARGS) {
-        ASSERT(args_flag == NULL);
+        DCHECK(args_flag == NULL);
         args_flag = f;  // Must be last in arguments.
         continue;
       }
-      HeapStringAllocator string_allocator;
-      StringStream buffer(&string_allocator);
-      if (f->type() != Flag::TYPE_BOOL || *(f->bool_variable())) {
-        buffer.Add("--%s", f->name());
-      } else {
-        buffer.Add("--no%s", f->name());
+      {
+        bool disabled = f->type() == Flag::TYPE_BOOL && !*f->bool_variable();
+        OStringStream os;
+        os << (disabled ? "--no" : "--") << f->name();
+        args->Add(StrDup(os.c_str()));
       }
-      args->Add(buffer.ToCString().Detach());
       if (f->type() != Flag::TYPE_BOOL) {
-        args->Add(ToString(f).Detach());
+        OStringStream os;
+        os << *f;
+        args->Add(StrDup(os.c_str()));
       }
     }
   }
   if (args_flag != NULL) {
-    HeapStringAllocator string_allocator;
-    StringStream buffer(&string_allocator);
-    buffer.Add("--%s", args_flag->name());
-    args->Add(buffer.ToCString().Detach());
+    OStringStream os;
+    os << "--" << args_flag->name();
+    args->Add(StrDup(os.c_str()));
     JSArguments jsargs = *args_flag->args_variable();
     for (int j = 0; j < jsargs.argc; j++) {
       args->Add(StrDup(jsargs[j]));
@@ -376,7 +372,8 @@
           value == NULL) {
         if (i < *argc) {
           value = argv[i++];
-        } else {
+        }
+        if (!value) {
           PrintF(stderr, "Error: missing value for flag %s of type %s\n"
                  "Try --help for options\n",
                  arg, Type2String(flag->type()));
@@ -517,27 +514,29 @@
 
 // static
 void FlagList::PrintHelp() {
+  CpuFeatures::Probe(false);
   CpuFeatures::PrintTarget();
   CpuFeatures::PrintFeatures();
 
-  printf("Usage:\n");
-  printf("  shell [options] -e string\n");
-  printf("    execute string in V8\n");
-  printf("  shell [options] file1 file2 ... filek\n");
-  printf("    run JavaScript scripts in file1, file2, ..., filek\n");
-  printf("  shell [options]\n");
-  printf("  shell [options] --shell [file1 file2 ... filek]\n");
-  printf("    run an interactive JavaScript shell\n");
-  printf("  d8 [options] file1 file2 ... filek\n");
-  printf("  d8 [options]\n");
-  printf("  d8 [options] --shell [file1 file2 ... filek]\n");
-  printf("    run the new debugging shell\n\n");
-  printf("Options:\n");
+  OFStream os(stdout);
+  os << "Usage:\n"
+     << "  shell [options] -e string\n"
+     << "    execute string in V8\n"
+     << "  shell [options] file1 file2 ... filek\n"
+     << "    run JavaScript scripts in file1, file2, ..., filek\n"
+     << "  shell [options]\n"
+     << "  shell [options] --shell [file1 file2 ... filek]\n"
+     << "    run an interactive JavaScript shell\n"
+     << "  d8 [options] file1 file2 ... filek\n"
+     << "  d8 [options]\n"
+     << "  d8 [options] --shell [file1 file2 ... filek]\n"
+     << "    run the new debugging shell\n\n"
+     << "Options:\n";
   for (size_t i = 0; i < num_flags; ++i) {
     Flag* f = &flags[i];
-    SmartArrayPointer<const char> value = ToString(f);
-    printf("  --%s (%s)\n        type: %s  default: %s\n",
-           f->name(), f->comment(), Type2String(f->type()), value.get());
+    os << "  --" << f->name() << " (" << f->comment() << ")\n"
+       << "        type: " << Type2String(f->type()) << "  default: " << *f
+       << "\n";
   }
 }
 
diff --git a/src/flags.h b/src/flags.h
index 092de21..78522ff 100644
--- a/src/flags.h
+++ b/src/flags.h
@@ -12,7 +12,7 @@
 
 // Declare all of our flags.
 #define FLAG_MODE_DECLARE
-#include "src/flag-definitions.h"
+#include "src/flag-definitions.h"  // NOLINT
 
 // The global list of all flags.
 class FlagList {
diff --git a/src/frames-inl.h b/src/frames-inl.h
index 02e7fb4..9241a44 100644
--- a/src/frames-inl.h
+++ b/src/frames-inl.h
@@ -10,17 +10,19 @@
 #include "src/v8memory.h"
 
 #if V8_TARGET_ARCH_IA32
-#include "src/ia32/frames-ia32.h"
+#include "src/ia32/frames-ia32.h"  // NOLINT
 #elif V8_TARGET_ARCH_X64
-#include "src/x64/frames-x64.h"
+#include "src/x64/frames-x64.h"  // NOLINT
 #elif V8_TARGET_ARCH_ARM64
-#include "src/arm64/frames-arm64.h"
+#include "src/arm64/frames-arm64.h"  // NOLINT
 #elif V8_TARGET_ARCH_ARM
-#include "src/arm/frames-arm.h"
+#include "src/arm/frames-arm.h"  // NOLINT
 #elif V8_TARGET_ARCH_MIPS
-#include "src/mips/frames-mips.h"
+#include "src/mips/frames-mips.h"  // NOLINT
+#elif V8_TARGET_ARCH_MIPS64
+#include "src/mips64/frames-mips64.h"  // NOLINT
 #elif V8_TARGET_ARCH_X87
-#include "src/x87/frames-x87.h"
+#include "src/x87/frames-x87.h"  // NOLINT
 #else
 #error Unsupported target architecture.
 #endif
@@ -206,7 +208,7 @@
 
 Address JavaScriptFrame::GetParameterSlot(int index) const {
   int param_count = ComputeParametersCount();
-  ASSERT(-1 <= index && index < param_count);
+  DCHECK(-1 <= index && index < param_count);
   int parameter_offset = (param_count - index - 1) * kPointerSize;
   return caller_sp() + parameter_offset;
 }
@@ -219,10 +221,10 @@
 
 inline Address JavaScriptFrame::GetOperandSlot(int index) const {
   Address base = fp() + JavaScriptFrameConstants::kLocal0Offset;
-  ASSERT(IsAddressAligned(base, kPointerSize));
-  ASSERT_EQ(type(), JAVA_SCRIPT);
-  ASSERT_LT(index, ComputeOperandsCount());
-  ASSERT_LE(0, index);
+  DCHECK(IsAddressAligned(base, kPointerSize));
+  DCHECK_EQ(type(), JAVA_SCRIPT);
+  DCHECK_LT(index, ComputeOperandsCount());
+  DCHECK_LE(0, index);
   // Operand stack grows down.
   return base - index * kPointerSize;
 }
@@ -238,9 +240,9 @@
   // Base points to low address of first operand and stack grows down, so add
   // kPointerSize to get the actual stack size.
   intptr_t stack_size_in_bytes = (base + kPointerSize) - sp();
-  ASSERT(IsAligned(stack_size_in_bytes, kPointerSize));
-  ASSERT(type() == JAVA_SCRIPT);
-  ASSERT(stack_size_in_bytes >= 0);
+  DCHECK(IsAligned(stack_size_in_bytes, kPointerSize));
+  DCHECK(type() == JAVA_SCRIPT);
+  DCHECK(stack_size_in_bytes >= 0);
   return static_cast<int>(stack_size_in_bytes >> kPointerSizeLog2);
 }
 
@@ -315,14 +317,14 @@
   // the JavaScript frame type, because we may encounter arguments
   // adaptor frames.
   StackFrame* frame = iterator_.frame();
-  ASSERT(frame->is_java_script() || frame->is_arguments_adaptor());
+  DCHECK(frame->is_java_script() || frame->is_arguments_adaptor());
   return static_cast<JavaScriptFrame*>(frame);
 }
 
 
 inline StackFrame* SafeStackFrameIterator::frame() const {
-  ASSERT(!done());
-  ASSERT(frame_->is_java_script() || frame_->is_exit());
+  DCHECK(!done());
+  DCHECK(frame_->is_java_script() || frame_->is_exit());
   return frame_;
 }
 
diff --git a/src/frames.cc b/src/frames.cc
index 7e0079b..f116fd2 100644
--- a/src/frames.cc
+++ b/src/frames.cc
@@ -5,10 +5,11 @@
 #include "src/v8.h"
 
 #include "src/ast.h"
+#include "src/base/bits.h"
 #include "src/deoptimizer.h"
 #include "src/frames-inl.h"
 #include "src/full-codegen.h"
-#include "src/mark-compact.h"
+#include "src/heap/mark-compact.h"
 #include "src/safepoint-table.h"
 #include "src/scopeinfo.h"
 #include "src/string-stream.h"
@@ -29,7 +30,7 @@
   StackHandlerIterator(const StackFrame* frame, StackHandler* handler)
       : limit_(frame->fp()), handler_(handler) {
     // Make sure the handler has already been unwound to this frame.
-    ASSERT(frame->sp() <= handler->address());
+    DCHECK(frame->sp() <= handler->address());
   }
 
   StackHandler* handler() const { return handler_; }
@@ -38,7 +39,7 @@
     return handler_ == NULL || handler_->address() > limit_;
   }
   void Advance() {
-    ASSERT(!done());
+    DCHECK(!done());
     handler_ = handler_->next();
   }
 
@@ -75,7 +76,7 @@
 
 
 void StackFrameIterator::Advance() {
-  ASSERT(!done());
+  DCHECK(!done());
   // Compute the state of the calling frame before restoring
   // callee-saved registers and unwinding handlers. This allows the
   // frame code that computes the caller state to access the top
@@ -93,7 +94,7 @@
 
   // When we're done iterating over the stack frames, the handler
   // chain must have been completely unwound.
-  ASSERT(!done() || handler_ == NULL);
+  DCHECK(!done() || handler_ == NULL);
 }
 
 
@@ -111,7 +112,7 @@
                                              StackFrame::State* state) {
   if (type == StackFrame::NONE) return NULL;
   StackFrame* result = SingletonFor(type);
-  ASSERT(result != NULL);
+  DCHECK(result != NULL);
   result->state_ = *state;
   return result;
 }
@@ -156,7 +157,7 @@
 void JavaScriptFrameIterator::AdvanceToArgumentsFrame() {
   if (!frame()->has_adapted_arguments()) return;
   iterator_.Advance();
-  ASSERT(iterator_.frame()->is_arguments_adaptor());
+  DCHECK(iterator_.frame()->is_arguments_adaptor());
 }
 
 
@@ -205,7 +206,7 @@
     type = ExitFrame::GetStateForFramePointer(Isolate::c_entry_fp(top), &state);
     top_frame_type_ = type;
   } else if (IsValidStackAddress(fp)) {
-    ASSERT(fp != NULL);
+    DCHECK(fp != NULL);
     state.fp = fp;
     state.sp = sp;
     state.pc_address = StackFrame::ResolveReturnAddressLocation(
@@ -258,7 +259,7 @@
 
 
 void SafeStackFrameIterator::AdvanceOneFrame() {
-  ASSERT(!done());
+  DCHECK(!done());
   StackFrame* last_frame = frame_;
   Address last_sp = last_frame->sp(), last_fp = last_frame->fp();
   // Before advancing to the next stack frame, perform pointer validity tests.
@@ -341,7 +342,7 @@
           frame_->state_.pc_address = callback_address;
         }
         external_callback_scope_ = external_callback_scope_->previous();
-        ASSERT(external_callback_scope_ == NULL ||
+        DCHECK(external_callback_scope_ == NULL ||
                external_callback_scope_->scope_address() > frame_->fp());
         return;
       }
@@ -361,9 +362,9 @@
       isolate->inner_pointer_to_code_cache()->GetCacheEntry(inner_pointer);
   if (!entry->safepoint_entry.is_valid()) {
     entry->safepoint_entry = entry->code->GetSafepointEntry(inner_pointer);
-    ASSERT(entry->safepoint_entry.is_valid());
+    DCHECK(entry->safepoint_entry.is_valid());
   } else {
-    ASSERT(entry->safepoint_entry.Equals(
+    DCHECK(entry->safepoint_entry.Equals(
         entry->code->GetSafepointEntry(inner_pointer)));
   }
 
@@ -390,7 +391,7 @@
                            Address* pc_address,
                            Code* holder) {
   Address pc = *pc_address;
-  ASSERT(GcSafeCodeContains(holder, pc));
+  DCHECK(GcSafeCodeContains(holder, pc));
   unsigned pc_offset = static_cast<unsigned>(pc - holder->instruction_start());
   Object* code = holder;
   v->VisitPointer(&code);
@@ -404,14 +405,14 @@
 
 void StackFrame::SetReturnAddressLocationResolver(
     ReturnAddressLocationResolver resolver) {
-  ASSERT(return_address_location_resolver_ == NULL);
+  DCHECK(return_address_location_resolver_ == NULL);
   return_address_location_resolver_ = resolver;
 }
 
 
 StackFrame::Type StackFrame::ComputeType(const StackFrameIteratorBase* iterator,
                                          State* state) {
-  ASSERT(state->fp != NULL);
+  DCHECK(state->fp != NULL);
   if (StandardFrame::IsArgumentsAdaptorFrame(state->fp)) {
     return ARGUMENTS_ADAPTOR;
   }
@@ -428,7 +429,7 @@
     if (!iterator->can_access_heap_objects_) return JAVA_SCRIPT;
     Code::Kind kind = GetContainingCode(iterator->isolate(),
                                         *(state->pc_address))->kind();
-    ASSERT(kind == Code::FUNCTION || kind == Code::OPTIMIZED_FUNCTION);
+    DCHECK(kind == Code::FUNCTION || kind == Code::OPTIMIZED_FUNCTION);
     return (kind == Code::OPTIMIZED_FUNCTION) ? OPTIMIZED : JAVA_SCRIPT;
   }
   return static_cast<StackFrame::Type>(Smi::cast(marker)->value());
@@ -539,7 +540,7 @@
   if (fp == 0) return NONE;
   Address sp = ComputeStackPointer(fp);
   FillState(fp, sp, state);
-  ASSERT(*state->pc_address != NULL);
+  DCHECK(*state->pc_address != NULL);
   return EXIT;
 }
 
@@ -581,7 +582,7 @@
       StandardFrameConstants::kExpressionsOffset + kPointerSize;
   Address base = fp() + offset;
   Address limit = sp();
-  ASSERT(base >= limit);  // stack grows downwards
+  DCHECK(base >= limit);  // stack grows downwards
   // Include register-allocated locals in number of expressions.
   return static_cast<int>((base - limit) / kPointerSize);
 }
@@ -615,7 +616,7 @@
 void StandardFrame::IterateCompiledFrame(ObjectVisitor* v) const {
   // Make sure that we're not doing "safe" stack frame iteration. We cannot
   // possibly find pointers in optimized frames in that state.
-  ASSERT(can_access_heap_objects());
+  DCHECK(can_access_heap_objects());
 
   // Compute the safepoint information.
   unsigned stack_slots = 0;
@@ -639,7 +640,7 @@
   // Skip saved double registers.
   if (safepoint_entry.has_doubles()) {
     // Number of doubles not known at snapshot time.
-    ASSERT(!isolate()->serializer_enabled());
+    DCHECK(!isolate()->serializer_enabled());
     parameters_base += DoubleRegister::NumAllocatableRegisters() *
         kDoubleSize / kPointerSize;
   }
@@ -708,7 +709,7 @@
 #ifdef DEBUG
   // Make sure that optimized frames do not contain any stack handlers.
   StackHandlerIterator it(this, top_handler());
-  ASSERT(it.done());
+  DCHECK(it.done());
 #endif
 
   IterateCompiledFrame(v);
@@ -746,7 +747,7 @@
 
 
 int JavaScriptFrame::GetNumberOfIncomingArguments() const {
-  ASSERT(can_access_heap_objects() &&
+  DCHECK(can_access_heap_objects() &&
          isolate()->heap()->gc_state() == Heap::NOT_IN_GC);
 
   return function()->shared()->formal_parameter_count();
@@ -759,13 +760,13 @@
 
 
 void JavaScriptFrame::GetFunctions(List<JSFunction*>* functions) {
-  ASSERT(functions->length() == 0);
+  DCHECK(functions->length() == 0);
   functions->Add(function());
 }
 
 
 void JavaScriptFrame::Summarize(List<FrameSummary>* functions) {
-  ASSERT(functions->length() == 0);
+  DCHECK(functions->length() == 0);
   Code* code_pointer = LookupCode();
   int offset = static_cast<int>(pc() - code_pointer->address());
   FrameSummary summary(receiver(),
@@ -777,9 +778,37 @@
 }
 
 
-void JavaScriptFrame::PrintTop(Isolate* isolate,
-                               FILE* file,
-                               bool print_args,
+void JavaScriptFrame::PrintFunctionAndOffset(JSFunction* function, Code* code,
+                                             Address pc, FILE* file,
+                                             bool print_line_number) {
+  PrintF(file, "%s", function->IsOptimized() ? "*" : "~");
+  function->PrintName(file);
+  int code_offset = static_cast<int>(pc - code->instruction_start());
+  PrintF(file, "+%d", code_offset);
+  if (print_line_number) {
+    SharedFunctionInfo* shared = function->shared();
+    int source_pos = code->SourcePosition(pc);
+    Object* maybe_script = shared->script();
+    if (maybe_script->IsScript()) {
+      Script* script = Script::cast(maybe_script);
+      int line = script->GetLineNumber(source_pos) + 1;
+      Object* script_name_raw = script->name();
+      if (script_name_raw->IsString()) {
+        String* script_name = String::cast(script->name());
+        SmartArrayPointer<char> c_script_name =
+            script_name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
+        PrintF(file, " at %s:%d", c_script_name.get(), line);
+      } else {
+        PrintF(file, " at <unknown>:%d", line);
+      }
+    } else {
+      PrintF(file, " at <unknown>:<unknown>");
+    }
+  }
+}
+
+
+void JavaScriptFrame::PrintTop(Isolate* isolate, FILE* file, bool print_args,
                                bool print_line_number) {
   // constructor calls
   DisallowHeapAllocation no_allocation;
@@ -788,37 +817,8 @@
     if (it.frame()->is_java_script()) {
       JavaScriptFrame* frame = it.frame();
       if (frame->IsConstructor()) PrintF(file, "new ");
-      // function name
-      JSFunction* fun = frame->function();
-      fun->PrintName();
-      Code* js_code = frame->unchecked_code();
-      Address pc = frame->pc();
-      int code_offset =
-          static_cast<int>(pc - js_code->instruction_start());
-      PrintF("+%d", code_offset);
-      SharedFunctionInfo* shared = fun->shared();
-      if (print_line_number) {
-        Code* code = Code::cast(isolate->FindCodeObject(pc));
-        int source_pos = code->SourcePosition(pc);
-        Object* maybe_script = shared->script();
-        if (maybe_script->IsScript()) {
-          Script* script = Script::cast(maybe_script);
-          int line = script->GetLineNumber(source_pos) + 1;
-          Object* script_name_raw = script->name();
-          if (script_name_raw->IsString()) {
-            String* script_name = String::cast(script->name());
-            SmartArrayPointer<char> c_script_name =
-                script_name->ToCString(DISALLOW_NULLS,
-                                       ROBUST_STRING_TRAVERSAL);
-            PrintF(file, " at %s:%d", c_script_name.get(), line);
-          } else {
-            PrintF(file, " at <unknown>:%d", line);
-          }
-        } else {
-          PrintF(file, " at <unknown>:<unknown>");
-        }
-      }
-
+      PrintFunctionAndOffset(frame->function(), frame->unchecked_code(),
+                             frame->pc(), file, print_line_number);
       if (print_args) {
         // function arguments
         // (we are intentionally only printing the actually
@@ -842,7 +842,7 @@
 void JavaScriptFrame::SaveOperandStack(FixedArray* store,
                                        int* stack_handler_index) const {
   int operands_count = store->length();
-  ASSERT_LE(operands_count, ComputeOperandsCount());
+  DCHECK_LE(operands_count, ComputeOperandsCount());
 
   // Visit the stack in LIFO order, saving operands and stack handlers into the
   // array.  The saved stack handlers store a link to the next stack handler,
@@ -856,8 +856,8 @@
     for (; GetOperandSlot(i) < handler->address(); i--) {
       store->set(i, GetOperand(i));
     }
-    ASSERT_GE(i + 1, StackHandlerConstants::kSlotCount);
-    ASSERT_EQ(handler->address(), GetOperandSlot(i));
+    DCHECK_GE(i + 1, StackHandlerConstants::kSlotCount);
+    DCHECK_EQ(handler->address(), GetOperandSlot(i));
     int next_stack_handler_index = i + 1 - StackHandlerConstants::kSlotCount;
     handler->Unwind(isolate(), store, next_stack_handler_index,
                     *stack_handler_index);
@@ -875,17 +875,17 @@
 void JavaScriptFrame::RestoreOperandStack(FixedArray* store,
                                           int stack_handler_index) {
   int operands_count = store->length();
-  ASSERT_LE(operands_count, ComputeOperandsCount());
+  DCHECK_LE(operands_count, ComputeOperandsCount());
   int i = 0;
   while (i <= stack_handler_index) {
     if (i < stack_handler_index) {
       // An operand.
-      ASSERT_EQ(GetOperand(i), isolate()->heap()->the_hole_value());
+      DCHECK_EQ(GetOperand(i), isolate()->heap()->the_hole_value());
       Memory::Object_at(GetOperandSlot(i)) = store->get(i);
       i++;
     } else {
       // A stack handler.
-      ASSERT_EQ(i, stack_handler_index);
+      DCHECK_EQ(i, stack_handler_index);
       // The FixedArray store grows up.  The stack grows down.  So the operand
       // slot for i actually points to the bottom of the top word in the
       // handler.  The base of the StackHandler* is the address of the bottom
@@ -899,7 +899,7 @@
   }
 
   for (; i < operands_count; i++) {
-    ASSERT_EQ(GetOperand(i), isolate()->heap()->the_hole_value());
+    DCHECK_EQ(GetOperand(i), isolate()->heap()->the_hole_value());
     Memory::Object_at(GetOperandSlot(i)) = store->get(i);
   }
 }
@@ -929,8 +929,14 @@
 
 
 void OptimizedFrame::Summarize(List<FrameSummary>* frames) {
-  ASSERT(frames->length() == 0);
-  ASSERT(is_optimized());
+  DCHECK(frames->length() == 0);
+  DCHECK(is_optimized());
+
+  // Delegate to JS frame in absence of turbofan deoptimization.
+  // TODO(turbofan): Revisit once we support deoptimization across the board.
+  if (LookupCode()->is_turbofanned() && !FLAG_turbo_deoptimization) {
+    return JavaScriptFrame::Summarize(frames);
+  }
 
   int deopt_index = Safepoint::kNoDeoptimizationIndex;
   DeoptimizationInputData* data = GetDeoptimizationData(&deopt_index);
@@ -941,15 +947,12 @@
   // throw. An entry with no deoptimization index indicates a call-site
   // without a lazy-deopt. As a consequence we are not allowed to inline
   // functions containing throw.
-  if (deopt_index == Safepoint::kNoDeoptimizationIndex) {
-    JavaScriptFrame::Summarize(frames);
-    return;
-  }
+  DCHECK(deopt_index != Safepoint::kNoDeoptimizationIndex);
 
   TranslationIterator it(data->TranslationByteArray(),
                          data->TranslationIndex(deopt_index)->value());
   Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
-  ASSERT(opcode == Translation::BEGIN);
+  DCHECK(opcode == Translation::BEGIN);
   it.Next();  // Drop frame count.
   int jsframe_count = it.Next();
 
@@ -1009,7 +1012,7 @@
                                                   function->shared());
       unsigned pc_offset =
           FullCodeGenerator::PcField::decode(entry) + Code::kHeaderSize;
-      ASSERT(pc_offset > 0);
+      DCHECK(pc_offset > 0);
 
       FrameSummary summary(receiver, function, code, pc_offset, is_constructor);
       frames->Add(summary);
@@ -1017,20 +1020,20 @@
     } else if (opcode == Translation::CONSTRUCT_STUB_FRAME) {
       // The next encountered JS_FRAME will be marked as a constructor call.
       it.Skip(Translation::NumberOfOperandsFor(opcode));
-      ASSERT(!is_constructor);
+      DCHECK(!is_constructor);
       is_constructor = true;
     } else {
       // Skip over operands to advance to the next opcode.
       it.Skip(Translation::NumberOfOperandsFor(opcode));
     }
   }
-  ASSERT(!is_constructor);
+  DCHECK(!is_constructor);
 }
 
 
 DeoptimizationInputData* OptimizedFrame::GetDeoptimizationData(
     int* deopt_index) {
-  ASSERT(is_optimized());
+  DCHECK(is_optimized());
 
   JSFunction* opt_function = function();
   Code* code = opt_function->code();
@@ -1042,19 +1045,25 @@
     code = isolate()->inner_pointer_to_code_cache()->
         GcSafeFindCodeForInnerPointer(pc());
   }
-  ASSERT(code != NULL);
-  ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
+  DCHECK(code != NULL);
+  DCHECK(code->kind() == Code::OPTIMIZED_FUNCTION);
 
   SafepointEntry safepoint_entry = code->GetSafepointEntry(pc());
   *deopt_index = safepoint_entry.deoptimization_index();
-  ASSERT(*deopt_index != Safepoint::kNoDeoptimizationIndex);
+  DCHECK(*deopt_index != Safepoint::kNoDeoptimizationIndex);
 
   return DeoptimizationInputData::cast(code->deoptimization_data());
 }
 
 
 int OptimizedFrame::GetInlineCount() {
-  ASSERT(is_optimized());
+  DCHECK(is_optimized());
+
+  // Delegate to JS frame in absence of turbofan deoptimization.
+  // TODO(turbofan): Revisit once we support deoptimization across the board.
+  if (LookupCode()->is_turbofanned() && !FLAG_turbo_deoptimization) {
+    return JavaScriptFrame::GetInlineCount();
+  }
 
   int deopt_index = Safepoint::kNoDeoptimizationIndex;
   DeoptimizationInputData* data = GetDeoptimizationData(&deopt_index);
@@ -1062,7 +1071,7 @@
   TranslationIterator it(data->TranslationByteArray(),
                          data->TranslationIndex(deopt_index)->value());
   Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
-  ASSERT(opcode == Translation::BEGIN);
+  DCHECK(opcode == Translation::BEGIN);
   USE(opcode);
   it.Next();  // Drop frame count.
   int jsframe_count = it.Next();
@@ -1071,8 +1080,14 @@
 
 
 void OptimizedFrame::GetFunctions(List<JSFunction*>* functions) {
-  ASSERT(functions->length() == 0);
-  ASSERT(is_optimized());
+  DCHECK(functions->length() == 0);
+  DCHECK(is_optimized());
+
+  // Delegate to JS frame in absence of turbofan deoptimization.
+  // TODO(turbofan): Revisit once we support deoptimization across the board.
+  if (LookupCode()->is_turbofanned() && !FLAG_turbo_deoptimization) {
+    return JavaScriptFrame::GetFunctions(functions);
+  }
 
   int deopt_index = Safepoint::kNoDeoptimizationIndex;
   DeoptimizationInputData* data = GetDeoptimizationData(&deopt_index);
@@ -1081,7 +1096,7 @@
   TranslationIterator it(data->TranslationByteArray(),
                          data->TranslationIndex(deopt_index)->value());
   Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
-  ASSERT(opcode == Translation::BEGIN);
+  DCHECK(opcode == Translation::BEGIN);
   it.Next();  // Drop frame count.
   int jsframe_count = it.Next();
 
@@ -1129,7 +1144,7 @@
 Code* InternalFrame::unchecked_code() const {
   const int offset = InternalFrameConstants::kCodeOffset;
   Object* code = Memory::Object_at(fp() + offset);
-  ASSERT(code != NULL);
+  DCHECK(code != NULL);
   return reinterpret_cast<Code*>(code);
 }
 
@@ -1236,7 +1251,7 @@
   }
   while (context->IsWithContext()) {
     context = context->previous();
-    ASSERT(context != NULL);
+    DCHECK(context != NULL);
   }
 
   // Print heap-allocated local variables.
@@ -1273,10 +1288,12 @@
 
   // Print details about the function.
   if (FLAG_max_stack_trace_source_length != 0 && code != NULL) {
+    OStringStream os;
     SharedFunctionInfo* shared = function->shared();
-    accumulator->Add("--------- s o u r c e   c o d e ---------\n");
-    shared->SourceCodePrint(accumulator, FLAG_max_stack_trace_source_length);
-    accumulator->Add("\n-----------------------------------------\n");
+    os << "--------- s o u r c e   c o d e ---------\n"
+       << SourceCodeOf(shared, FLAG_max_stack_trace_source_length)
+       << "\n-----------------------------------------\n";
+    accumulator->Add(os.c_str());
   }
 
   accumulator->Add("}\n\n");
@@ -1315,15 +1332,15 @@
 
 void EntryFrame::Iterate(ObjectVisitor* v) const {
   StackHandlerIterator it(this, top_handler());
-  ASSERT(!it.done());
+  DCHECK(!it.done());
   StackHandler* handler = it.handler();
-  ASSERT(handler->is_js_entry());
+  DCHECK(handler->is_js_entry());
   handler->Iterate(v, LookupCode());
 #ifdef DEBUG
   // Make sure that the entry frame does not contain more than one
   // stack handler.
   it.Advance();
-  ASSERT(it.done());
+  DCHECK(it.done());
 #endif
   IteratePc(v, pc_address(), LookupCode());
 }
@@ -1403,7 +1420,7 @@
 
 
 JavaScriptFrame* StackFrameLocator::FindJavaScriptFrame(int n) {
-  ASSERT(n >= 0);
+  DCHECK(n >= 0);
   for (int i = 0; i <= n; i++) {
     while (!iterator_.frame()->is_java_script()) iterator_.Advance();
     if (i == n) return JavaScriptFrame::cast(iterator_.frame());
@@ -1432,7 +1449,7 @@
 #ifdef DEBUG
 static bool GcSafeCodeContains(HeapObject* code, Address addr) {
   Map* map = GcSafeMapOfCodeSpaceObject(code);
-  ASSERT(map == code->GetHeap()->code_map());
+  DCHECK(map == code->GetHeap()->code_map());
   Address start = code->address();
   Address end = code->address() + code->SizeFromMap(map);
   return start <= addr && addr < end;
@@ -1443,7 +1460,7 @@
 Code* InnerPointerToCodeCache::GcSafeCastToCode(HeapObject* object,
                                                 Address inner_pointer) {
   Code* code = reinterpret_cast<Code*>(object);
-  ASSERT(code != NULL && GcSafeCodeContains(code, inner_pointer));
+  DCHECK(code != NULL && GcSafeCodeContains(code, inner_pointer));
   return code;
 }
 
@@ -1484,7 +1501,7 @@
 InnerPointerToCodeCache::InnerPointerToCodeCacheEntry*
     InnerPointerToCodeCache::GetCacheEntry(Address inner_pointer) {
   isolate_->counters()->pc_to_code()->Increment();
-  ASSERT(IsPowerOf2(kInnerPointerToCodeCacheSize));
+  DCHECK(base::bits::IsPowerOfTwo32(kInnerPointerToCodeCacheSize));
   uint32_t hash = ComputeIntegerHash(
       static_cast<uint32_t>(reinterpret_cast<uintptr_t>(inner_pointer)),
       v8::internal::kZeroHashSeed);
@@ -1492,7 +1509,7 @@
   InnerPointerToCodeCacheEntry* entry = cache(index);
   if (entry->inner_pointer == inner_pointer) {
     isolate_->counters()->pc_to_code_cached()->Increment();
-    ASSERT(entry->code == GcSafeFindCodeForInnerPointer(inner_pointer));
+    DCHECK(entry->code == GcSafeFindCodeForInnerPointer(inner_pointer));
   } else {
     // Because this code may be interrupted by a profiling signal that
     // also queries the cache, we cannot update inner_pointer before the code
@@ -1514,8 +1531,8 @@
                           int offset,
                           int previous_handler_offset) const {
   STATIC_ASSERT(StackHandlerConstants::kSlotCount >= 5);
-  ASSERT_LE(0, offset);
-  ASSERT_GE(array->length(), offset + StackHandlerConstants::kSlotCount);
+  DCHECK_LE(0, offset);
+  DCHECK_GE(array->length(), offset + StackHandlerConstants::kSlotCount);
   // Unwinding a stack handler into an array chains it in the opposite
   // direction, re-using the "next" slot as a "previous" link, so that stack
   // handlers can be later re-wound in the correct order.  Decode the "state"
@@ -1535,8 +1552,8 @@
                          int offset,
                          Address fp) {
   STATIC_ASSERT(StackHandlerConstants::kSlotCount >= 5);
-  ASSERT_LE(0, offset);
-  ASSERT_GE(array->length(), offset + StackHandlerConstants::kSlotCount);
+  DCHECK_LE(0, offset);
+  DCHECK_GE(array->length(), offset + StackHandlerConstants::kSlotCount);
   Smi* prev_handler_offset = Smi::cast(array->get(offset));
   Code* code = Code::cast(array->get(offset + 1));
   Smi* smi_index = Smi::cast(array->get(offset + 2));
@@ -1562,9 +1579,7 @@
 
 // -------------------------------------------------------------------------
 
-int NumRegs(RegList reglist) {
-  return CompilerIntrinsics::CountSetBits(reglist);
-}
+int NumRegs(RegList reglist) { return base::bits::CountPopulation32(reglist); }
 
 
 struct JSCallerSavedCodeData {
@@ -1579,12 +1594,12 @@
     if ((kJSCallerSaved & (1 << r)) != 0)
       caller_saved_code_data.reg_code[i++] = r;
 
-  ASSERT(i == kNumJSCallerSaved);
+  DCHECK(i == kNumJSCallerSaved);
 }
 
 
 int JSCallerSavedCode(int n) {
-  ASSERT(0 <= n && n < kNumJSCallerSaved);
+  DCHECK(0 <= n && n < kNumJSCallerSaved);
   return caller_saved_code_data.reg_code[n];
 }
 
diff --git a/src/frames.h b/src/frames.h
index e80e339..f7e60ae 100644
--- a/src/frames.h
+++ b/src/frames.h
@@ -371,7 +371,7 @@
   virtual void Iterate(ObjectVisitor* v) const;
 
   static EntryFrame* cast(StackFrame* frame) {
-    ASSERT(frame->is_entry());
+    DCHECK(frame->is_entry());
     return static_cast<EntryFrame*>(frame);
   }
   virtual void SetCallerFp(Address caller_fp);
@@ -399,7 +399,7 @@
   virtual Code* unchecked_code() const;
 
   static EntryConstructFrame* cast(StackFrame* frame) {
-    ASSERT(frame->is_entry_construct());
+    DCHECK(frame->is_entry_construct());
     return static_cast<EntryConstructFrame*>(frame);
   }
 
@@ -427,7 +427,7 @@
   virtual void SetCallerFp(Address caller_fp);
 
   static ExitFrame* cast(StackFrame* frame) {
-    ASSERT(frame->is_exit());
+    DCHECK(frame->is_exit());
     return static_cast<ExitFrame*>(frame);
   }
 
@@ -467,7 +467,7 @@
   virtual void SetCallerFp(Address caller_fp);
 
   static StandardFrame* cast(StackFrame* frame) {
-    ASSERT(frame->is_standard());
+    DCHECK(frame->is_standard());
     return static_cast<StandardFrame*>(frame);
   }
 
@@ -610,13 +610,15 @@
   static Register constant_pool_pointer_register();
 
   static JavaScriptFrame* cast(StackFrame* frame) {
-    ASSERT(frame->is_java_script());
+    DCHECK(frame->is_java_script());
     return static_cast<JavaScriptFrame*>(frame);
   }
 
-  static void PrintTop(Isolate* isolate,
-                       FILE* file,
-                       bool print_args,
+  static void PrintFunctionAndOffset(JSFunction* function, Code* code,
+                                     Address pc, FILE* file,
+                                     bool print_line_number);
+
+  static void PrintTop(Isolate* isolate, FILE* file, bool print_args,
                        bool print_line_number);
 
  protected:
@@ -697,7 +699,7 @@
   virtual Code* unchecked_code() const;
 
   static ArgumentsAdaptorFrame* cast(StackFrame* frame) {
-    ASSERT(frame->is_arguments_adaptor());
+    DCHECK(frame->is_arguments_adaptor());
     return static_cast<ArgumentsAdaptorFrame*>(frame);
   }
 
@@ -729,7 +731,7 @@
   virtual Code* unchecked_code() const;
 
   static InternalFrame* cast(StackFrame* frame) {
-    ASSERT(frame->is_internal());
+    DCHECK(frame->is_internal());
     return static_cast<InternalFrame*>(frame);
   }
 
@@ -784,7 +786,7 @@
   virtual Type type() const { return CONSTRUCT; }
 
   static ConstructFrame* cast(StackFrame* frame) {
-    ASSERT(frame->is_construct());
+    DCHECK(frame->is_construct());
     return static_cast<ConstructFrame*>(frame);
   }
 
@@ -815,7 +817,7 @@
   const bool can_access_heap_objects_;
 
   StackHandler* handler() const {
-    ASSERT(!done());
+    DCHECK(!done());
     return handler_;
   }
 
@@ -838,7 +840,7 @@
   StackFrameIterator(Isolate* isolate, ThreadLocalTop* t);
 
   StackFrame* frame() const {
-    ASSERT(!done());
+    DCHECK(!done());
     return frame_;
   }
   void Advance();
diff --git a/src/full-codegen.cc b/src/full-codegen.cc
index 0c82eb3..35d51d9 100644
--- a/src/full-codegen.cc
+++ b/src/full-codegen.cc
@@ -4,6 +4,7 @@
 
 #include "src/v8.h"
 
+#include "src/code-factory.h"
 #include "src/codegen.h"
 #include "src/compiler.h"
 #include "src/debug.h"
@@ -11,10 +12,9 @@
 #include "src/liveedit.h"
 #include "src/macro-assembler.h"
 #include "src/prettyprinter.h"
-#include "src/scopes.h"
 #include "src/scopeinfo.h"
+#include "src/scopes.h"
 #include "src/snapshot.h"
-#include "src/stub-cache.h"
 
 namespace v8 {
 namespace internal {
@@ -33,18 +33,22 @@
     VariableDeclaration* decl) {
 }
 
+
 void BreakableStatementChecker::VisitFunctionDeclaration(
     FunctionDeclaration* decl) {
 }
 
+
 void BreakableStatementChecker::VisitModuleDeclaration(
     ModuleDeclaration* decl) {
 }
 
+
 void BreakableStatementChecker::VisitImportDeclaration(
     ImportDeclaration* decl) {
 }
 
+
 void BreakableStatementChecker::VisitExportDeclaration(
     ExportDeclaration* decl) {
 }
@@ -178,6 +182,13 @@
 }
 
 
+void BreakableStatementChecker::VisitClassLiteral(ClassLiteral* expr) {
+  if (expr->extends() != NULL) {
+    Visit(expr->extends());
+  }
+}
+
+
 void BreakableStatementChecker::VisitNativeFunctionLiteral(
     NativeFunctionLiteral* expr) {
 }
@@ -285,13 +296,15 @@
 }
 
 
+void BreakableStatementChecker::VisitSuperReference(SuperReference* expr) {}
+
+
 #define __ ACCESS_MASM(masm())
 
 bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
   Isolate* isolate = info->isolate();
 
-  Logger::TimerEventScope timer(
-      isolate, Logger::TimerEventScope::v8_compile_full_code);
+  TimerEventScope<TimerEventCompileFullCode> timer(info->isolate());
 
   Handle<Script> script = info->script();
   if (!script->IsUndefined() && !script->source()->IsUndefined()) {
@@ -301,16 +314,15 @@
   CodeGenerator::MakeCodePrologue(info, "full");
   const int kInitialBufferSize = 4 * KB;
   MacroAssembler masm(info->isolate(), NULL, kInitialBufferSize);
-#ifdef ENABLE_GDB_JIT_INTERFACE
-  masm.positions_recorder()->StartGDBJITLineInfoRecording();
-#endif
+  if (info->will_serialize()) masm.enable_serializer();
+
   LOG_CODE_EVENT(isolate,
                  CodeStartLinePosInfoRecordEvent(masm.positions_recorder()));
 
   FullCodeGenerator cgen(&masm, info);
   cgen.Generate();
   if (cgen.HasStackOverflow()) {
-    ASSERT(!isolate->has_pending_exception());
+    DCHECK(!isolate->has_pending_exception());
     return false;
   }
   unsigned table_offset = cgen.EmitBackEdgeTable();
@@ -328,16 +340,8 @@
   code->set_allow_osr_at_loop_nesting_level(0);
   code->set_profiler_ticks(0);
   code->set_back_edge_table_offset(table_offset);
-  code->set_back_edges_patched_for_osr(false);
   CodeGenerator::PrintCode(code, info);
   info->SetCode(code);
-#ifdef ENABLE_GDB_JIT_INTERFACE
-  if (FLAG_gdbjit) {
-    GDBJITLineInfo* lineinfo =
-        masm.positions_recorder()->DetachGDBJITLineInfo();
-    GDBJIT(RegisterDetailedLineInfo(*code, lineinfo));
-  }
-#endif
   void* line_info = masm.positions_recorder()->DetachJITHandlerData();
   LOG_CODE_EVENT(isolate, CodeEndLinePosInfoRecordEvent(*code, line_info));
   return true;
@@ -348,7 +352,7 @@
   // The back edge table consists of a length (in number of entries)
   // field, and then a sequence of entries.  Each entry is a pair of AST id
   // and code-relative pc offset.
-  masm()->Align(kIntSize);
+  masm()->Align(kPointerSize);
   unsigned offset = masm()->pc_offset();
   unsigned length = back_edges_.length();
   __ dd(length);
@@ -373,7 +377,7 @@
 
 void FullCodeGenerator::PopulateDeoptimizationData(Handle<Code> code) {
   // Fill in the deoptimization information.
-  ASSERT(info_->HasDeoptimizationSupport() || bailout_entries_.is_empty());
+  DCHECK(info_->HasDeoptimizationSupport() || bailout_entries_.is_empty());
   if (!info_->HasDeoptimizationSupport()) return;
   int length = bailout_entries_.length();
   Handle<DeoptimizationOutputData> data =
@@ -389,7 +393,7 @@
 void FullCodeGenerator::PopulateTypeFeedbackInfo(Handle<Code> code) {
   Handle<TypeFeedbackInfo> info = isolate()->factory()->NewTypeFeedbackInfo();
   info->set_ic_total_count(ic_total_count_);
-  ASSERT(!isolate()->heap()->InNewSpace(*info));
+  DCHECK(!isolate()->heap()->InNewSpace(*info));
   code->set_type_feedback_info(*info);
 }
 
@@ -416,14 +420,13 @@
 
 void FullCodeGenerator::CallLoadIC(ContextualMode contextual_mode,
                                    TypeFeedbackId id) {
-  ExtraICState extra_state = LoadIC::ComputeExtraICState(contextual_mode);
-  Handle<Code> ic = LoadIC::initialize_stub(isolate(), extra_state);
+  Handle<Code> ic = CodeFactory::LoadIC(isolate(), contextual_mode).code();
   CallIC(ic, id);
 }
 
 
 void FullCodeGenerator::CallStoreIC(TypeFeedbackId id) {
-  Handle<Code> ic = StoreIC::initialize_stub(isolate(), strict_mode());
+  Handle<Code> ic = CodeFactory::StoreIC(isolate(), strict_mode()).code();
   CallIC(ic, id);
 }
 
@@ -439,7 +442,7 @@
 #ifdef DEBUG
   // In debug builds, mark the return so we can verify that this function
   // was called.
-  ASSERT(!call->return_is_recorded_);
+  DCHECK(!call->return_is_recorded_);
   call->return_is_recorded_ = true;
 #endif
 }
@@ -451,10 +454,10 @@
   if (!info_->HasDeoptimizationSupport()) return;
   unsigned pc_and_state =
       StateField::encode(state) | PcField::encode(masm_->pc_offset());
-  ASSERT(Smi::IsValid(pc_and_state));
+  DCHECK(Smi::IsValid(pc_and_state));
 #ifdef DEBUG
   for (int i = 0; i < bailout_entries_.length(); ++i) {
-    ASSERT(bailout_entries_[i].id != id);
+    DCHECK(bailout_entries_[i].id != id);
   }
 #endif
   BailoutEntry entry = { id, pc_and_state };
@@ -464,8 +467,8 @@
 
 void FullCodeGenerator::RecordBackEdge(BailoutId ast_id) {
   // The pc offset does not need to be encoded and packed together with a state.
-  ASSERT(masm_->pc_offset() > 0);
-  ASSERT(loop_depth() > 0);
+  DCHECK(masm_->pc_offset() > 0);
+  DCHECK(loop_depth() > 0);
   uint8_t depth = Min(loop_depth(), Code::kMaxLoopNestingMarker);
   BackEdgeEntry entry =
       { ast_id, static_cast<unsigned>(masm_->pc_offset()), depth };
@@ -581,7 +584,7 @@
 
 
 void FullCodeGenerator::AllocateModules(ZoneList<Declaration*>* declarations) {
-  ASSERT(scope_->is_global_scope());
+  DCHECK(scope_->is_global_scope());
 
   for (int i = 0; i < declarations->length(); i++) {
     ModuleDeclaration* declaration = declarations->at(i)->AsModuleDeclaration();
@@ -591,15 +594,15 @@
         Comment cmnt(masm_, "[ Link nested modules");
         Scope* scope = module->body()->scope();
         Interface* interface = scope->interface();
-        ASSERT(interface->IsModule() && interface->IsFrozen());
+        DCHECK(interface->IsModule() && interface->IsFrozen());
 
         interface->Allocate(scope->module_var()->index());
 
         // Set up module context.
-        ASSERT(scope->interface()->Index() >= 0);
+        DCHECK(scope->interface()->Index() >= 0);
         __ Push(Smi::FromInt(scope->interface()->Index()));
         __ Push(scope->GetScopeInfo());
-        __ CallRuntime(Runtime::kHiddenPushModuleContext, 2);
+        __ CallRuntime(Runtime::kPushModuleContext, 2);
         StoreToFrameField(StandardFrameConstants::kContextOffset,
                           context_register());
 
@@ -688,7 +691,7 @@
     // This is a scope hosting modules. Allocate a descriptor array to pass
     // to the runtime for initialization.
     Comment cmnt(masm_, "[ Allocate modules");
-    ASSERT(scope_->is_global_scope());
+    DCHECK(scope_->is_global_scope());
     modules_ =
         isolate()->factory()->NewFixedArray(scope_->num_modules(), TENURED);
     module_index_ = 0;
@@ -702,7 +705,7 @@
 
   if (scope_->num_modules() != 0) {
     // Initialize modules from descriptor array.
-    ASSERT(module_index_ == modules_->length());
+    DCHECK(module_index_ == modules_->length());
     DeclareModules(modules_);
     modules_ = saved_modules;
     module_index_ = saved_module_index;
@@ -731,15 +734,15 @@
   Comment cmnt(masm_, "[ ModuleLiteral");
   SetStatementPosition(block);
 
-  ASSERT(!modules_.is_null());
-  ASSERT(module_index_ < modules_->length());
+  DCHECK(!modules_.is_null());
+  DCHECK(module_index_ < modules_->length());
   int index = module_index_++;
 
   // Set up module context.
-  ASSERT(interface->Index() >= 0);
+  DCHECK(interface->Index() >= 0);
   __ Push(Smi::FromInt(interface->Index()));
   __ Push(Smi::FromInt(0));
-  __ CallRuntime(Runtime::kHiddenPushModuleContext, 2);
+  __ CallRuntime(Runtime::kPushModuleContext, 2);
   StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
 
   {
@@ -777,9 +780,9 @@
   Scope* scope = module->body()->scope();
   Interface* interface = scope_->interface();
 
-  ASSERT(interface->IsModule() && interface->IsFrozen());
-  ASSERT(!modules_.is_null());
-  ASSERT(module_index_ < modules_->length());
+  DCHECK(interface->IsModule() && interface->IsFrozen());
+  DCHECK(!modules_.is_null());
+  DCHECK(module_index_ < modules_->length());
   interface->Allocate(scope->module_var()->index());
   int index = module_index_++;
 
@@ -790,7 +793,7 @@
 
 
 int FullCodeGenerator::DeclareGlobalsFlags() {
-  ASSERT(DeclareGlobalsStrictMode::is_valid(strict_mode()));
+  DCHECK(DeclareGlobalsStrictMode::is_valid(strict_mode()));
   return DeclareGlobalsEvalFlag::encode(is_eval()) |
       DeclareGlobalsNativeFlag::encode(is_native()) |
       DeclareGlobalsStrictMode::encode(strict_mode());
@@ -829,6 +832,11 @@
 }
 
 
+void FullCodeGenerator::VisitSuperReference(SuperReference* super) {
+  __ CallRuntime(Runtime::kThrowUnsupportedSuperError, 0);
+}
+
+
 void FullCodeGenerator::SetExpressionPosition(Expression* expr) {
   if (!info_->is_debug()) {
     CodeGenerator::RecordPositions(masm_, expr->position());
@@ -855,11 +863,6 @@
 }
 
 
-void FullCodeGenerator::SetStatementPosition(int pos) {
-  CodeGenerator::RecordPositions(masm_, pos);
-}
-
-
 void FullCodeGenerator::SetSourcePosition(int pos) {
   if (pos != RelocInfo::kNoPosition) {
     masm_->positions_recorder()->RecordPosition(pos);
@@ -883,17 +886,17 @@
   FullCodeGenerator::FindInlineFunctionGenerator(Runtime::FunctionId id) {
     int lookup_index =
         static_cast<int>(id) - static_cast<int>(Runtime::kFirstInlineFunction);
-    ASSERT(lookup_index >= 0);
-    ASSERT(static_cast<size_t>(lookup_index) <
-           ARRAY_SIZE(kInlineFunctionGenerators));
+    DCHECK(lookup_index >= 0);
+    DCHECK(static_cast<size_t>(lookup_index) <
+           arraysize(kInlineFunctionGenerators));
     return kInlineFunctionGenerators[lookup_index];
 }
 
 
 void FullCodeGenerator::EmitInlineRuntimeCall(CallRuntime* expr) {
   const Runtime::Function* function = expr->function();
-  ASSERT(function != NULL);
-  ASSERT(function->intrinsic_type == Runtime::INLINE);
+  DCHECK(function != NULL);
+  DCHECK(function->intrinsic_type == Runtime::INLINE);
   InlineFunctionGenerator generator =
       FindInlineFunctionGenerator(function->function_id);
   ((*this).*(generator))(expr);
@@ -902,14 +905,14 @@
 
 void FullCodeGenerator::EmitGeneratorNext(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 2);
+  DCHECK(args->length() == 2);
   EmitGeneratorResume(args->at(0), args->at(1), JSGeneratorObject::NEXT);
 }
 
 
 void FullCodeGenerator::EmitGeneratorThrow(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 2);
+  DCHECK(args->length() == 2);
   EmitGeneratorResume(args->at(0), args->at(1), JSGeneratorObject::THROW);
 }
 
@@ -1007,7 +1010,7 @@
     PrepareForBailoutForId(right_id, NO_REGISTERS);
 
   } else {
-    ASSERT(context()->IsEffect());
+    DCHECK(context()->IsEffect());
     Label eval_right;
     if (is_logical_and) {
       VisitForControl(left, &eval_right, &done, &eval_right);
@@ -1056,11 +1059,11 @@
     PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
   } else {
     scope_ = stmt->scope();
-    ASSERT(!scope_->is_module_scope());
+    DCHECK(!scope_->is_module_scope());
     { Comment cmnt(masm_, "[ Extend block context");
       __ Push(scope_->GetScopeInfo());
       PushFunctionArgumentForContextAllocation();
-      __ CallRuntime(Runtime::kHiddenPushBlockContext, 2);
+      __ CallRuntime(Runtime::kPushBlockContext, 2);
 
       // Replace the context stored in the frame.
       StoreToFrameField(StandardFrameConstants::kContextOffset,
@@ -1093,7 +1096,7 @@
 
   __ Push(Smi::FromInt(stmt->proxy()->interface()->Index()));
   __ Push(Smi::FromInt(0));
-  __ CallRuntime(Runtime::kHiddenPushModuleContext, 2);
+  __ CallRuntime(Runtime::kPushModuleContext, 2);
   StoreToFrameField(
       StandardFrameConstants::kContextOffset, context_register());
 
@@ -1232,7 +1235,7 @@
 
   VisitForStackValue(stmt->expression());
   PushFunctionArgumentForContextAllocation();
-  __ CallRuntime(Runtime::kHiddenPushWithContext, 2);
+  __ CallRuntime(Runtime::kPushWithContext, 2);
   StoreToFrameField(StandardFrameConstants::kContextOffset, context_register());
 
   Scope* saved_scope = scope();
@@ -1284,31 +1287,28 @@
 
 void FullCodeGenerator::VisitWhileStatement(WhileStatement* stmt) {
   Comment cmnt(masm_, "[ WhileStatement");
-  Label test, body;
+  Label loop, body;
 
   Iteration loop_statement(this, stmt);
   increment_loop_depth();
 
-  // Emit the test at the bottom of the loop.
-  __ jmp(&test);
+  __ bind(&loop);
+
+  SetExpressionPosition(stmt->cond());
+  VisitForControl(stmt->cond(),
+                  &body,
+                  loop_statement.break_label(),
+                  &body);
 
   PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
   __ bind(&body);
   Visit(stmt->body());
 
-  // Emit the statement position here as this is where the while
-  // statement code starts.
   __ bind(loop_statement.continue_label());
-  SetStatementPosition(stmt);
 
   // Check stack before looping.
-  EmitBackEdgeBookkeeping(stmt, &body);
-
-  __ bind(&test);
-  VisitForControl(stmt->cond(),
-                  &body,
-                  loop_statement.break_label(),
-                  loop_statement.break_label());
+  EmitBackEdgeBookkeeping(stmt, &loop);
+  __ jmp(&loop);
 
   PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
   __ bind(loop_statement.break_label());
@@ -1385,14 +1385,14 @@
     __ Push(stmt->variable()->name());
     __ Push(result_register());
     PushFunctionArgumentForContextAllocation();
-    __ CallRuntime(Runtime::kHiddenPushCatchContext, 3);
+    __ CallRuntime(Runtime::kPushCatchContext, 3);
     StoreToFrameField(StandardFrameConstants::kContextOffset,
                       context_register());
   }
 
   Scope* saved_scope = scope();
   scope_ = stmt->scope();
-  ASSERT(scope_->declarations()->is_empty());
+  DCHECK(scope_->declarations()->is_empty());
   { WithOrCatch catch_body(this);
     Visit(stmt->catch_block());
   }
@@ -1449,7 +1449,7 @@
   // rethrow the exception if it returns.
   __ Call(&finally_entry);
   __ Push(result_register());
-  __ CallRuntime(Runtime::kHiddenReThrow, 1);
+  __ CallRuntime(Runtime::kReThrow, 1);
 
   // Finally block implementation.
   __ bind(&finally_entry);
@@ -1481,6 +1481,8 @@
 
   __ DebugBreak();
   // Ignore the return value.
+
+  PrepareForBailoutForId(stmt->DebugBreakId(), NO_REGISTERS);
 }
 
 
@@ -1530,7 +1532,7 @@
 
   // Build the function boilerplate and instantiate it.
   Handle<SharedFunctionInfo> function_info =
-      Compiler::BuildFunctionInfo(expr, script());
+      Compiler::BuildFunctionInfo(expr, script(), info_);
   if (function_info.is_null()) {
     SetStackOverflow();
     return;
@@ -1539,6 +1541,16 @@
 }
 
 
+void FullCodeGenerator::VisitClassLiteral(ClassLiteral* expr) {
+  // TODO(arv): Implement
+  Comment cmnt(masm_, "[ ClassLiteral");
+  if (expr->extends() != NULL) {
+    VisitForEffect(expr->extends());
+  }
+  context()->Plug(isolate()->factory()->undefined_value());
+}
+
+
 void FullCodeGenerator::VisitNativeFunctionLiteral(
     NativeFunctionLiteral* expr) {
   Comment cmnt(masm_, "[ NativeFunctionLiteral");
@@ -1548,19 +1560,18 @@
   v8::Handle<v8::FunctionTemplate> fun_template =
       expr->extension()->GetNativeFunctionTemplate(
           reinterpret_cast<v8::Isolate*>(isolate()), v8::Utils::ToLocal(name));
-  ASSERT(!fun_template.IsEmpty());
+  DCHECK(!fun_template.IsEmpty());
 
   // Instantiate the function and create a shared function info from it.
   Handle<JSFunction> fun = Utils::OpenHandle(*fun_template->GetFunction());
   const int literals = fun->NumberOfLiterals();
   Handle<Code> code = Handle<Code>(fun->shared()->code());
   Handle<Code> construct_stub = Handle<Code>(fun->shared()->construct_stub());
-  bool is_generator = false;
   Handle<SharedFunctionInfo> shared =
       isolate()->factory()->NewSharedFunctionInfo(
-          name, literals, is_generator,
-          code, Handle<ScopeInfo>(fun->shared()->scope_info()),
-          Handle<FixedArray>(fun->shared()->feedback_vector()));
+          name, literals, FunctionKind::kNormalFunction, code,
+          Handle<ScopeInfo>(fun->shared()->scope_info()),
+          Handle<TypeFeedbackVector>(fun->shared()->feedback_vector()));
   shared->set_construct_stub(*construct_stub);
 
   // Copy the function data to the shared function info.
@@ -1575,7 +1586,7 @@
 void FullCodeGenerator::VisitThrow(Throw* expr) {
   Comment cmnt(masm_, "[ Throw");
   VisitForStackValue(expr->exception());
-  __ CallRuntime(Runtime::kHiddenThrow, 1);
+  __ CallRuntime(Runtime::kThrow, 1);
   // Never returns here.
 }
 
@@ -1617,22 +1628,24 @@
   DisallowHeapAllocation no_gc;
   Code* patch = isolate->builtins()->builtin(Builtins::kOnStackReplacement);
 
-  // Iterate over the back edge table and patch every interrupt
+  // Increment loop nesting level by one and iterate over the back edge table
+  // to find the matching loops to patch the interrupt
   // call to an unconditional call to the replacement code.
-  int loop_nesting_level = unoptimized->allow_osr_at_loop_nesting_level();
+  int loop_nesting_level = unoptimized->allow_osr_at_loop_nesting_level() + 1;
+  if (loop_nesting_level > Code::kMaxLoopNestingMarker) return;
 
   BackEdgeTable back_edges(unoptimized, &no_gc);
   for (uint32_t i = 0; i < back_edges.length(); i++) {
     if (static_cast<int>(back_edges.loop_depth(i)) == loop_nesting_level) {
-      ASSERT_EQ(INTERRUPT, GetBackEdgeState(isolate,
+      DCHECK_EQ(INTERRUPT, GetBackEdgeState(isolate,
                                             unoptimized,
                                             back_edges.pc(i)));
       PatchAt(unoptimized, back_edges.pc(i), ON_STACK_REPLACEMENT, patch);
     }
   }
 
-  unoptimized->set_back_edges_patched_for_osr(true);
-  ASSERT(Verify(isolate, unoptimized, loop_nesting_level));
+  unoptimized->set_allow_osr_at_loop_nesting_level(loop_nesting_level);
+  DCHECK(Verify(isolate, unoptimized));
 }
 
 
@@ -1641,23 +1654,21 @@
   Code* patch = isolate->builtins()->builtin(Builtins::kInterruptCheck);
 
   // Iterate over the back edge table and revert the patched interrupt calls.
-  ASSERT(unoptimized->back_edges_patched_for_osr());
   int loop_nesting_level = unoptimized->allow_osr_at_loop_nesting_level();
 
   BackEdgeTable back_edges(unoptimized, &no_gc);
   for (uint32_t i = 0; i < back_edges.length(); i++) {
     if (static_cast<int>(back_edges.loop_depth(i)) <= loop_nesting_level) {
-      ASSERT_NE(INTERRUPT, GetBackEdgeState(isolate,
+      DCHECK_NE(INTERRUPT, GetBackEdgeState(isolate,
                                             unoptimized,
                                             back_edges.pc(i)));
       PatchAt(unoptimized, back_edges.pc(i), INTERRUPT, patch);
     }
   }
 
-  unoptimized->set_back_edges_patched_for_osr(false);
   unoptimized->set_allow_osr_at_loop_nesting_level(0);
   // Assert that none of the back edges are patched anymore.
-  ASSERT(Verify(isolate, unoptimized, -1));
+  DCHECK(Verify(isolate, unoptimized));
 }
 
 
@@ -1683,10 +1694,9 @@
 
 
 #ifdef DEBUG
-bool BackEdgeTable::Verify(Isolate* isolate,
-                           Code* unoptimized,
-                           int loop_nesting_level) {
+bool BackEdgeTable::Verify(Isolate* isolate, Code* unoptimized) {
   DisallowHeapAllocation no_gc;
+  int loop_nesting_level = unoptimized->allow_osr_at_loop_nesting_level();
   BackEdgeTable back_edges(unoptimized, &no_gc);
   for (uint32_t i = 0; i < back_edges.length(); i++) {
     uint32_t loop_depth = back_edges.loop_depth(i);
diff --git a/src/full-codegen.h b/src/full-codegen.h
index bd6aa13..71e1b60 100644
--- a/src/full-codegen.h
+++ b/src/full-codegen.h
@@ -74,7 +74,7 @@
                          info->zone()),
         back_edges_(2, info->zone()),
         ic_total_count_(0) {
-    ASSERT(!info->IsStub());
+    DCHECK(!info->IsStub());
     Initialize();
   }
 
@@ -115,6 +115,9 @@
 #elif V8_TARGET_ARCH_MIPS
   static const int kCodeSizeMultiplier = 149;
   static const int kBootCodeSizeMultiplier = 120;
+#elif V8_TARGET_ARCH_MIPS64
+  static const int kCodeSizeMultiplier = 149;
+  static const int kBootCodeSizeMultiplier = 120;
 #else
 #error Unsupported target architecture.
 #endif
@@ -134,7 +137,7 @@
     }
     virtual ~NestedStatement() {
       // Unlink from codegen's nesting stack.
-      ASSERT_EQ(this, codegen_->nesting_stack_);
+      DCHECK_EQ(this, codegen_->nesting_stack_);
       codegen_->nesting_stack_ = previous_;
     }
 
@@ -320,6 +323,13 @@
              Label* if_true,
              Label* if_false,
              Label* fall_through);
+#elif V8_TARGET_ARCH_MIPS64
+  void Split(Condition cc,
+             Register lhs,
+             const Operand&  rhs,
+             Label* if_true,
+             Label* if_false,
+             Label* fall_through);
 #else  // All non-mips arch.
   void Split(Condition cc,
              Label* if_true,
@@ -465,8 +475,9 @@
   void EmitReturnSequence();
 
   // Platform-specific code sequences for calls
-  void EmitCall(Call* expr, CallIC::CallType = CallIC::FUNCTION);
+  void EmitCall(Call* expr, CallICState::CallType = CallICState::FUNCTION);
   void EmitCallWithLoadIC(Call* expr);
+  void EmitSuperCallWithLoadIC(Call* expr);
   void EmitKeyedCallWithLoadIC(Call* expr, Expression* key);
 
   // Platform-specific code for inline runtime calls.
@@ -485,11 +496,11 @@
                            JSGeneratorObject::ResumeMode resume_mode);
 
   // Platform-specific code for loading variables.
-  void EmitLoadGlobalCheckExtensions(Variable* var,
+  void EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
                                      TypeofState typeof_state,
                                      Label* slow);
   MemOperand ContextSlotOperandCheckExtensions(Variable* var, Label* slow);
-  void EmitDynamicLookupFastCase(Variable* var,
+  void EmitDynamicLookupFastCase(VariableProxy* proxy,
                                  TypeofState typeof_state,
                                  Label* slow,
                                  Label* done);
@@ -510,6 +521,8 @@
   // The receiver is left on the stack by the IC.
   void EmitNamedPropertyLoad(Property* expr);
 
+  void EmitNamedSuperPropertyLoad(Property* expr);
+
   // Load a value from a keyed property.
   // The receiver and the key is left on the stack by the IC.
   void EmitKeyedPropertyLoad(Property* expr);
@@ -540,7 +553,6 @@
   // Helper functions to EmitVariableAssignment
   void EmitStoreToStackLocalOrContextSlot(Variable* var,
                                           MemOperand location);
-  void EmitCallStoreContextSlot(Handle<String> name, StrictMode strict_mode);
 
   // Complete a named property assignment.  The receiver is expected on top
   // of the stack and the right-hand-side value in the accumulator.
@@ -551,6 +563,8 @@
   // accumulator.
   void EmitKeyedPropertyAssignment(Assignment* expr);
 
+  void EmitLoadHomeObject(SuperReference* expr);
+
   void CallIC(Handle<Code> code,
               TypeFeedbackId id = TypeFeedbackId::None());
 
@@ -562,7 +576,6 @@
   void SetReturnPosition(FunctionLiteral* fun);
   void SetStatementPosition(Statement* stmt);
   void SetExpressionPosition(Expression* expr);
-  void SetStatementPosition(int pos);
   void SetSourcePosition(int pos);
 
   // Non-local control flow support.
@@ -573,7 +586,7 @@
   int loop_depth() { return loop_depth_; }
   void increment_loop_depth() { loop_depth_++; }
   void decrement_loop_depth() {
-    ASSERT(loop_depth_ > 0);
+    DCHECK(loop_depth_ > 0);
     loop_depth_--;
   }
 
@@ -757,7 +770,7 @@
           fall_through_(fall_through) { }
 
     static const TestContext* cast(const ExpressionContext* context) {
-      ASSERT(context->IsTest());
+      DCHECK(context->IsTest());
       return reinterpret_cast<const TestContext*>(context);
     }
 
@@ -858,7 +871,7 @@
 class BackEdgeTable {
  public:
   BackEdgeTable(Code* code, DisallowHeapAllocation* required) {
-    ASSERT(code->kind() == Code::FUNCTION);
+    DCHECK(code->kind() == Code::FUNCTION);
     instruction_start_ = code->instruction_start();
     Address table_address = instruction_start_ + code->back_edge_table_offset();
     length_ = Memory::uint32_at(table_address);
@@ -890,10 +903,8 @@
     OSR_AFTER_STACK_CHECK
   };
 
-  // Patch all interrupts with allowed loop depth in the unoptimized code to
-  // unconditionally call replacement_code.
-  static void Patch(Isolate* isolate,
-                    Code* unoptimized_code);
+  // Increase allowed loop nesting level by one and patch those matching loops.
+  static void Patch(Isolate* isolate, Code* unoptimized_code);
 
   // Patch the back edge to the target state, provided the correct callee.
   static void PatchAt(Code* unoptimized_code,
@@ -919,14 +930,12 @@
 
 #ifdef DEBUG
   // Verify that all back edges of a certain loop depth are patched.
-  static bool Verify(Isolate* isolate,
-                     Code* unoptimized_code,
-                     int loop_nesting_level);
+  static bool Verify(Isolate* isolate, Code* unoptimized_code);
 #endif  // DEBUG
 
  private:
   Address entry_at(uint32_t index) {
-    ASSERT(index < length_);
+    DCHECK(index < length_);
     return start_ + index * kEntrySize;
   }
 
diff --git a/src/func-name-inferrer.cc b/src/func-name-inferrer.cc
index a3c2f08..b3a64b2 100644
--- a/src/func-name-inferrer.cc
+++ b/src/func-name-inferrer.cc
@@ -5,14 +5,16 @@
 #include "src/v8.h"
 
 #include "src/ast.h"
+#include "src/ast-value-factory.h"
 #include "src/func-name-inferrer.h"
 #include "src/list-inl.h"
 
 namespace v8 {
 namespace internal {
 
-FuncNameInferrer::FuncNameInferrer(Isolate* isolate, Zone* zone)
-    : isolate_(isolate),
+FuncNameInferrer::FuncNameInferrer(AstValueFactory* ast_value_factory,
+                                   Zone* zone)
+    : ast_value_factory_(ast_value_factory),
       entries_stack_(10, zone),
       names_stack_(5, zone),
       funcs_to_infer_(4, zone),
@@ -20,40 +22,36 @@
 }
 
 
-void FuncNameInferrer::PushEnclosingName(Handle<String> name) {
+void FuncNameInferrer::PushEnclosingName(const AstRawString* name) {
   // Enclosing name is a name of a constructor function. To check
   // that it is really a constructor, we check that it is not empty
   // and starts with a capital letter.
-  if (name->length() > 0 && Runtime::IsUpperCaseChar(
-          isolate()->runtime_state(), name->Get(0))) {
+  if (!name->IsEmpty() && unibrow::Uppercase::Is(name->FirstCharacter())) {
     names_stack_.Add(Name(name, kEnclosingConstructorName), zone());
   }
 }
 
 
-void FuncNameInferrer::PushLiteralName(Handle<String> name) {
-  if (IsOpen() &&
-      !String::Equals(isolate()->factory()->prototype_string(), name)) {
+void FuncNameInferrer::PushLiteralName(const AstRawString* name) {
+  if (IsOpen() && name != ast_value_factory_->prototype_string()) {
     names_stack_.Add(Name(name, kLiteralName), zone());
   }
 }
 
 
-void FuncNameInferrer::PushVariableName(Handle<String> name) {
-  if (IsOpen() &&
-      !String::Equals(isolate()->factory()->dot_result_string(), name)) {
+void FuncNameInferrer::PushVariableName(const AstRawString* name) {
+  if (IsOpen() && name != ast_value_factory_->dot_result_string()) {
     names_stack_.Add(Name(name, kVariableName), zone());
   }
 }
 
 
-Handle<String> FuncNameInferrer::MakeNameFromStack() {
-  return MakeNameFromStackHelper(0, isolate()->factory()->empty_string());
+const AstString* FuncNameInferrer::MakeNameFromStack() {
+  return MakeNameFromStackHelper(0, ast_value_factory_->empty_string());
 }
 
-
-Handle<String> FuncNameInferrer::MakeNameFromStackHelper(int pos,
-                                                         Handle<String> prev) {
+const AstString* FuncNameInferrer::MakeNameFromStackHelper(
+    int pos, const AstString* prev) {
   if (pos >= names_stack_.length()) return prev;
   if (pos < names_stack_.length() - 1 &&
       names_stack_.at(pos).type == kVariableName &&
@@ -62,12 +60,11 @@
     return MakeNameFromStackHelper(pos + 1, prev);
   } else {
     if (prev->length() > 0) {
-      Handle<String> name = names_stack_.at(pos).name;
+      const AstRawString* name = names_stack_.at(pos).name;
       if (prev->length() + name->length() + 1 > String::kMaxLength) return prev;
-      Factory* factory = isolate()->factory();
-      Handle<String> curr =
-          factory->NewConsString(factory->dot_string(), name).ToHandleChecked();
-      curr = factory->NewConsString(prev, curr).ToHandleChecked();
+      const AstConsString* curr = ast_value_factory_->NewConsString(
+          ast_value_factory_->dot_string(), name);
+      curr = ast_value_factory_->NewConsString(prev, curr);
       return MakeNameFromStackHelper(pos + 1, curr);
     } else {
       return MakeNameFromStackHelper(pos + 1, names_stack_.at(pos).name);
@@ -77,9 +74,9 @@
 
 
 void FuncNameInferrer::InferFunctionsNames() {
-  Handle<String> func_name = MakeNameFromStack();
+  const AstString* func_name = MakeNameFromStack();
   for (int i = 0; i < funcs_to_infer_.length(); ++i) {
-    funcs_to_infer_[i]->set_inferred_name(func_name);
+    funcs_to_infer_[i]->set_raw_inferred_name(func_name);
   }
   funcs_to_infer_.Rewind(0);
 }
diff --git a/src/func-name-inferrer.h b/src/func-name-inferrer.h
index 0c5399c..8b077f9 100644
--- a/src/func-name-inferrer.h
+++ b/src/func-name-inferrer.h
@@ -11,8 +11,10 @@
 namespace v8 {
 namespace internal {
 
+class AstRawString;
+class AstString;
+class AstValueFactory;
 class FunctionLiteral;
-class Isolate;
 
 // FuncNameInferrer is a stateful class that is used to perform name
 // inference for anonymous functions during static analysis of source code.
@@ -26,13 +28,13 @@
 // a name.
 class FuncNameInferrer : public ZoneObject {
  public:
-  FuncNameInferrer(Isolate* isolate, Zone* zone);
+  FuncNameInferrer(AstValueFactory* ast_value_factory, Zone* zone);
 
   // Returns whether we have entered name collection state.
   bool IsOpen() const { return !entries_stack_.is_empty(); }
 
   // Pushes an enclosing the name of enclosing function onto names stack.
-  void PushEnclosingName(Handle<String> name);
+  void PushEnclosingName(const AstRawString* name);
 
   // Enters name collection state.
   void Enter() {
@@ -40,9 +42,9 @@
   }
 
   // Pushes an encountered name onto names stack when in collection state.
-  void PushLiteralName(Handle<String> name);
+  void PushLiteralName(const AstRawString* name);
 
-  void PushVariableName(Handle<String> name);
+  void PushVariableName(const AstRawString* name);
 
   // Adds a function to infer name for.
   void AddFunction(FunctionLiteral* func_to_infer) {
@@ -59,7 +61,7 @@
 
   // Infers a function name and leaves names collection state.
   void Infer() {
-    ASSERT(IsOpen());
+    DCHECK(IsOpen());
     if (!funcs_to_infer_.is_empty()) {
       InferFunctionsNames();
     }
@@ -67,7 +69,7 @@
 
   // Leaves names collection state.
   void Leave() {
-    ASSERT(IsOpen());
+    DCHECK(IsOpen());
     names_stack_.Rewind(entries_stack_.RemoveLast());
     if (entries_stack_.is_empty())
       funcs_to_infer_.Clear();
@@ -80,24 +82,24 @@
     kVariableName
   };
   struct Name {
-    Name(Handle<String> name, NameType type) : name(name), type(type) { }
-    Handle<String> name;
+    Name(const AstRawString* name, NameType type) : name(name), type(type) {}
+    const AstRawString* name;
     NameType type;
   };
 
-  Isolate* isolate() { return isolate_; }
   Zone* zone() const { return zone_; }
 
   // Constructs a full name in dotted notation from gathered names.
-  Handle<String> MakeNameFromStack();
+  const AstString* MakeNameFromStack();
 
   // A helper function for MakeNameFromStack.
-  Handle<String> MakeNameFromStackHelper(int pos, Handle<String> prev);
+  const AstString* MakeNameFromStackHelper(int pos,
+                                               const AstString* prev);
 
   // Performs name inferring for added functions.
   void InferFunctionsNames();
 
-  Isolate* isolate_;
+  AstValueFactory* ast_value_factory_;
   ZoneList<int> entries_stack_;
   ZoneList<Name> names_stack_;
   ZoneList<FunctionLiteral*> funcs_to_infer_;
diff --git a/src/gdb-jit.cc b/src/gdb-jit.cc
index 789a0fd..776c662 100644
--- a/src/gdb-jit.cc
+++ b/src/gdb-jit.cc
@@ -4,16 +4,18 @@
 
 #ifdef ENABLE_GDB_JIT_INTERFACE
 #include "src/v8.h"
-#include "src/gdb-jit.h"
 
+#include "src/base/bits.h"
+#include "src/base/platform/platform.h"
 #include "src/bootstrapper.h"
 #include "src/compiler.h"
-#include "src/frames.h"
 #include "src/frames-inl.h"
+#include "src/frames.h"
+#include "src/gdb-jit.h"
 #include "src/global-handles.h"
 #include "src/messages.h"
 #include "src/natives.h"
-#include "src/platform.h"
+#include "src/ostreams.h"
 #include "src/scopes.h"
 
 namespace v8 {
@@ -114,7 +116,7 @@
     if (delta == 0) return;
     uintptr_t padding = align - delta;
     Ensure(position_ += padding);
-    ASSERT((position_ % align) == 0);
+    DCHECK((position_ % align) == 0);
   }
 
   void WriteULEB128(uintptr_t value) {
@@ -154,7 +156,7 @@
 
   template<typename T>
   T* RawSlotAt(uintptr_t offset) {
-    ASSERT(offset < capacity_ && offset + sizeof(T) <= capacity_);
+    DCHECK(offset < capacity_ && offset + sizeof(T) <= capacity_);
     return reinterpret_cast<T*>(&buffer_[offset]);
   }
 
@@ -221,16 +223,11 @@
     S_ATTR_PURE_INSTRUCTIONS = 0x80000000u
   };
 
-  MachOSection(const char* name,
-               const char* segment,
-               uintptr_t align,
+  MachOSection(const char* name, const char* segment, uint32_t align,
                uint32_t flags)
-    : name_(name),
-      segment_(segment),
-      align_(align),
-      flags_(flags) {
+      : name_(name), segment_(segment), align_(align), flags_(flags) {
     if (align_ != 0) {
-      ASSERT(IsPowerOf2(align));
+      DCHECK(base::bits::IsPowerOfTwo32(align));
       align_ = WhichPowerOf2(align_);
     }
   }
@@ -249,8 +246,8 @@
     header->reserved2 = 0;
     memset(header->sectname, 0, sizeof(header->sectname));
     memset(header->segname, 0, sizeof(header->segname));
-    ASSERT(strlen(name_) < sizeof(header->sectname));
-    ASSERT(strlen(segment_) < sizeof(header->segname));
+    DCHECK(strlen(name_) < sizeof(header->sectname));
+    DCHECK(strlen(segment_) < sizeof(header->segname));
     strncpy(header->sectname, name_, sizeof(header->sectname));
     strncpy(header->segname, segment_, sizeof(header->segname));
   }
@@ -258,7 +255,7 @@
  private:
   const char* name_;
   const char* segment_;
-  uintptr_t align_;
+  uint32_t align_;
   uint32_t flags_;
 };
 
@@ -442,7 +439,7 @@
   }
 
   virtual void WriteBody(Writer::Slot<Header> header, Writer* w) {
-    ASSERT(writer_ == NULL);
+    DCHECK(writer_ == NULL);
     header->offset = offset_;
     header->size = size_;
   }
@@ -535,7 +532,7 @@
 
 
   Writer::Slot<MachOHeader> WriteHeader(Writer* w) {
-    ASSERT(w->position() == 0);
+    DCHECK(w->position() == 0);
     Writer::Slot<MachOHeader> header = w->CreateSlotHere<MachOHeader>();
 #if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
     header->magic = 0xFEEDFACEu;
@@ -647,12 +644,13 @@
 
 
   void WriteHeader(Writer* w) {
-    ASSERT(w->position() == 0);
+    DCHECK(w->position() == 0);
     Writer::Slot<ELFHeader> header = w->CreateSlotHere<ELFHeader>();
-#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X87
+#if (V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X87 || \
+     (V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT))
     const uint8_t ident[16] =
         { 0x7f, 'E', 'L', 'F', 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0};
-#elif V8_TARGET_ARCH_X64
+#elif V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_64_BIT
     const uint8_t ident[16] =
         { 0x7f, 'E', 'L', 'F', 2, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0};
 #else
@@ -689,7 +687,7 @@
 
   void WriteSectionTable(Writer* w) {
     // Section headers table immediately follows file header.
-    ASSERT(w->position() == sizeof(ELFHeader));
+    DCHECK(w->position() == sizeof(ELFHeader));
 
     Writer::Slot<ELFSection::Header> headers =
         w->CreateSlotsHere<ELFSection::Header>(sections_.length());
@@ -762,7 +760,8 @@
   Binding binding() const {
     return static_cast<Binding>(info >> 4);
   }
-#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X87
+#if (V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_X87 || \
+     (V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT))
   struct SerializedLayout {
     SerializedLayout(uint32_t name,
                      uintptr_t value,
@@ -785,7 +784,7 @@
     uint8_t other;
     uint16_t section;
   };
-#elif V8_TARGET_ARCH_X64
+#elif V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_64_BIT
   struct SerializedLayout {
     SerializedLayout(uint32_t name,
                      uintptr_t value,
@@ -897,6 +896,32 @@
 #endif  // defined(__ELF)
 
 
+class LineInfo : public Malloced {
+ public:
+  LineInfo() : pc_info_(10) {}
+
+  void SetPosition(intptr_t pc, int pos, bool is_statement) {
+    AddPCInfo(PCInfo(pc, pos, is_statement));
+  }
+
+  struct PCInfo {
+    PCInfo(intptr_t pc, int pos, bool is_statement)
+        : pc_(pc), pos_(pos), is_statement_(is_statement) {}
+
+    intptr_t pc_;
+    int pos_;
+    bool is_statement_;
+  };
+
+  List<PCInfo>* pc_info() { return &pc_info_; }
+
+ private:
+  void AddPCInfo(const PCInfo& pc_info) { pc_info_.Add(pc_info); }
+
+  List<PCInfo> pc_info_;
+};
+
+
 class CodeDescription BASE_EMBEDDED {
  public:
 #if V8_TARGET_ARCH_X64
@@ -908,27 +933,21 @@
   };
 #endif
 
-  CodeDescription(const char* name,
-                  Code* code,
-                  Handle<Script> script,
-                  GDBJITLineInfo* lineinfo,
-                  GDBJITInterface::CodeTag tag,
+  CodeDescription(const char* name, Code* code, Handle<Script> script,
+                  LineInfo* lineinfo, GDBJITInterface::CodeTag tag,
                   CompilationInfo* info)
       : name_(name),
         code_(code),
         script_(script),
         lineinfo_(lineinfo),
         tag_(tag),
-        info_(info) {
-  }
+        info_(info) {}
 
   const char* name() const {
     return name_;
   }
 
-  GDBJITLineInfo* lineinfo() const {
-    return lineinfo_;
-  }
+  LineInfo* lineinfo() const { return lineinfo_; }
 
   GDBJITInterface::CodeTag tag() const {
     return tag_;
@@ -964,12 +983,12 @@
 
 #if V8_TARGET_ARCH_X64
   uintptr_t GetStackStateStartAddress(StackState state) const {
-    ASSERT(state < STACK_STATE_MAX);
+    DCHECK(state < STACK_STATE_MAX);
     return stack_state_start_addresses_[state];
   }
 
   void SetStackStateStartAddress(StackState state, uintptr_t addr) {
-    ASSERT(state < STACK_STATE_MAX);
+    DCHECK(state < STACK_STATE_MAX);
     stack_state_start_addresses_[state] = addr;
   }
 #endif
@@ -987,7 +1006,7 @@
   const char* name_;
   Code* code_;
   Handle<Script> script_;
-  GDBJITLineInfo* lineinfo_;
+  LineInfo* lineinfo_;
   GDBJITInterface::CodeTag tag_;
   CompilationInfo* info_;
 #if V8_TARGET_ARCH_X64
@@ -1092,6 +1111,8 @@
       UNIMPLEMENTED();
 #elif V8_TARGET_ARCH_MIPS
       UNIMPLEMENTED();
+#elif V8_TARGET_ARCH_MIPS64
+      UNIMPLEMENTED();
 #else
 #error Unsupported target architecture.
 #endif
@@ -1130,11 +1151,11 @@
       }
 
       // See contexts.h for more information.
-      ASSERT(Context::MIN_CONTEXT_SLOTS == 4);
-      ASSERT(Context::CLOSURE_INDEX == 0);
-      ASSERT(Context::PREVIOUS_INDEX == 1);
-      ASSERT(Context::EXTENSION_INDEX == 2);
-      ASSERT(Context::GLOBAL_OBJECT_INDEX == 3);
+      DCHECK(Context::MIN_CONTEXT_SLOTS == 4);
+      DCHECK(Context::CLOSURE_INDEX == 0);
+      DCHECK(Context::PREVIOUS_INDEX == 1);
+      DCHECK(Context::EXTENSION_INDEX == 2);
+      DCHECK(Context::GLOBAL_OBJECT_INDEX == 3);
       w->WriteULEB128(current_abbreviation++);
       w->WriteString(".closure");
       w->WriteULEB128(current_abbreviation++);
@@ -1282,7 +1303,7 @@
   bool WriteBodyInternal(Writer* w) {
     int current_abbreviation = 1;
     bool extra_info = desc_->IsInfoAvailable();
-    ASSERT(desc_->IsLineInfoAvailable());
+    DCHECK(desc_->IsLineInfoAvailable());
     w->WriteULEB128(current_abbreviation++);
     w->WriteULEB128(DW_TAG_COMPILE_UNIT);
     w->Write<uint8_t>(extra_info ? DW_CHILDREN_YES : DW_CHILDREN_NO);
@@ -1447,13 +1468,13 @@
     intptr_t line = 1;
     bool is_statement = true;
 
-    List<GDBJITLineInfo::PCInfo>* pc_info = desc_->lineinfo()->pc_info();
+    List<LineInfo::PCInfo>* pc_info = desc_->lineinfo()->pc_info();
     pc_info->Sort(&ComparePCInfo);
 
     int pc_info_length = pc_info->length();
     for (int i = 0; i < pc_info_length; i++) {
-      GDBJITLineInfo::PCInfo* info = &pc_info->at(i);
-      ASSERT(info->pc_ >= pc);
+      LineInfo::PCInfo* info = &pc_info->at(i);
+      DCHECK(info->pc_ >= pc);
 
       // Reduce bloating in the debug line table by removing duplicate line
       // entries (per DWARF2 standard).
@@ -1523,8 +1544,8 @@
     w->Write<uint8_t>(op);
   }
 
-  static int ComparePCInfo(const GDBJITLineInfo::PCInfo* a,
-                           const GDBJITLineInfo::PCInfo* b) {
+  static int ComparePCInfo(const LineInfo::PCInfo* a,
+                           const LineInfo::PCInfo* b) {
     if (a->pc_ == b->pc_) {
       if (a->is_statement_ != b->is_statement_) {
         return b->is_statement_ ? +1 : -1;
@@ -1623,7 +1644,7 @@
     }
   }
 
-  ASSERT((w->position() - initial_position) % kPointerSize == 0);
+  DCHECK((w->position() - initial_position) % kPointerSize == 0);
   length_slot->set(w->position() - initial_position);
 }
 
@@ -1819,8 +1840,9 @@
 
 #ifdef OBJECT_PRINT
   void __gdb_print_v8_object(Object* object) {
-    object->Print();
-    PrintF(stdout, "\n");
+    OFStream os(stdout);
+    object->Print(os);
+    os << flush;
   }
 #endif
 }
@@ -1962,15 +1984,15 @@
 }
 
 
-static void* TagLineInfo(GDBJITLineInfo* ptr) {
+static void* TagLineInfo(LineInfo* ptr) {
   return reinterpret_cast<void*>(
       reinterpret_cast<intptr_t>(ptr) | kLineInfoTag);
 }
 
 
-static GDBJITLineInfo* UntagLineInfo(void* ptr) {
-  return reinterpret_cast<GDBJITLineInfo*>(
-      reinterpret_cast<intptr_t>(ptr) & ~kLineInfoTag);
+static LineInfo* UntagLineInfo(void* ptr) {
+  return reinterpret_cast<LineInfo*>(reinterpret_cast<intptr_t>(ptr) &
+                                     ~kLineInfoTag);
 }
 
 
@@ -2030,7 +2052,7 @@
 }
 
 
-static LazyMutex mutex = LAZY_MUTEX_INITIALIZER;
+static base::LazyMutex mutex = LAZY_MUTEX_INITIALIZER;
 
 
 void GDBJITInterface::AddCode(const char* name,
@@ -2038,15 +2060,13 @@
                               GDBJITInterface::CodeTag tag,
                               Script* script,
                               CompilationInfo* info) {
-  if (!FLAG_gdbjit) return;
-
-  LockGuard<Mutex> lock_guard(mutex.Pointer());
+  base::LockGuard<base::Mutex> lock_guard(mutex.Pointer());
   DisallowHeapAllocation no_gc;
 
   HashMap::Entry* e = GetEntries()->Lookup(code, HashForCodeObject(code), true);
   if (e->value != NULL && !IsLineInfoTagged(e->value)) return;
 
-  GDBJITLineInfo* lineinfo = UntagLineInfo(e->value);
+  LineInfo* lineinfo = UntagLineInfo(e->value);
   CodeDescription code_desc(name,
                             code,
                             script != NULL ? Handle<Script>(script)
@@ -2064,7 +2084,7 @@
   AddUnwindInfo(&code_desc);
   Isolate* isolate = code->GetIsolate();
   JITCodeEntry* entry = CreateELFObject(&code_desc, isolate);
-  ASSERT(!IsLineInfoTagged(entry));
+  DCHECK(!IsLineInfoTagged(entry));
 
   delete lineinfo;
   e->value = entry;
@@ -2084,49 +2104,10 @@
 }
 
 
-void GDBJITInterface::AddCode(GDBJITInterface::CodeTag tag,
-                              const char* name,
-                              Code* code) {
-  if (!FLAG_gdbjit) return;
-
-  EmbeddedVector<char, 256> buffer;
-  StringBuilder builder(buffer.start(), buffer.length());
-
-  builder.AddString(Tag2String(tag));
-  if ((name != NULL) && (*name != '\0')) {
-    builder.AddString(": ");
-    builder.AddString(name);
-  } else {
-    builder.AddFormatted(": code object %p", static_cast<void*>(code));
-  }
-
-  AddCode(builder.Finalize(), code, tag, NULL, NULL);
-}
-
-
-void GDBJITInterface::AddCode(GDBJITInterface::CodeTag tag,
-                              Name* name,
-                              Code* code) {
-  if (!FLAG_gdbjit) return;
-  if (name != NULL && name->IsString()) {
-    AddCode(tag, String::cast(name)->ToCString(DISALLOW_NULLS).get(), code);
-  } else {
-    AddCode(tag, "", code);
-  }
-}
-
-
-void GDBJITInterface::AddCode(GDBJITInterface::CodeTag tag, Code* code) {
-  if (!FLAG_gdbjit) return;
-
-  AddCode(tag, "", code);
-}
-
-
 void GDBJITInterface::RemoveCode(Code* code) {
   if (!FLAG_gdbjit) return;
 
-  LockGuard<Mutex> lock_guard(mutex.Pointer());
+  base::LockGuard<base::Mutex> lock_guard(mutex.Pointer());
   HashMap::Entry* e = GetEntries()->Lookup(code,
                                            HashForCodeObject(code),
                                            false);
@@ -2162,15 +2143,62 @@
 }
 
 
-void GDBJITInterface::RegisterDetailedLineInfo(Code* code,
-                                               GDBJITLineInfo* line_info) {
-  LockGuard<Mutex> lock_guard(mutex.Pointer());
-  ASSERT(!IsLineInfoTagged(line_info));
+static void RegisterDetailedLineInfo(Code* code, LineInfo* line_info) {
+  base::LockGuard<base::Mutex> lock_guard(mutex.Pointer());
+  DCHECK(!IsLineInfoTagged(line_info));
   HashMap::Entry* e = GetEntries()->Lookup(code, HashForCodeObject(code), true);
-  ASSERT(e->value == NULL);
+  DCHECK(e->value == NULL);
   e->value = TagLineInfo(line_info);
 }
 
 
+void GDBJITInterface::EventHandler(const v8::JitCodeEvent* event) {
+  if (!FLAG_gdbjit) return;
+  switch (event->type) {
+    case v8::JitCodeEvent::CODE_ADDED: {
+      Code* code = Code::GetCodeFromTargetAddress(
+          reinterpret_cast<Address>(event->code_start));
+      if (code->kind() == Code::OPTIMIZED_FUNCTION ||
+          code->kind() == Code::FUNCTION) {
+        break;
+      }
+      EmbeddedVector<char, 256> buffer;
+      StringBuilder builder(buffer.start(), buffer.length());
+      builder.AddSubstring(event->name.str, static_cast<int>(event->name.len));
+      AddCode(builder.Finalize(), code, NON_FUNCTION, NULL, NULL);
+      break;
+    }
+    case v8::JitCodeEvent::CODE_MOVED:
+      break;
+    case v8::JitCodeEvent::CODE_REMOVED: {
+      Code* code = Code::GetCodeFromTargetAddress(
+          reinterpret_cast<Address>(event->code_start));
+      RemoveCode(code);
+      break;
+    }
+    case v8::JitCodeEvent::CODE_ADD_LINE_POS_INFO: {
+      LineInfo* line_info = reinterpret_cast<LineInfo*>(event->user_data);
+      line_info->SetPosition(static_cast<intptr_t>(event->line_info.offset),
+                             static_cast<int>(event->line_info.pos),
+                             event->line_info.position_type ==
+                                 v8::JitCodeEvent::STATEMENT_POSITION);
+      break;
+    }
+    case v8::JitCodeEvent::CODE_START_LINE_INFO_RECORDING: {
+      v8::JitCodeEvent* mutable_event = const_cast<v8::JitCodeEvent*>(event);
+      mutable_event->user_data = new LineInfo();
+      break;
+    }
+    case v8::JitCodeEvent::CODE_END_LINE_INFO_RECORDING: {
+      LineInfo* line_info = reinterpret_cast<LineInfo*>(event->user_data);
+      Code* code = Code::GetCodeFromTargetAddress(
+          reinterpret_cast<Address>(event->code_start));
+      RegisterDetailedLineInfo(code, line_info);
+      break;
+    }
+  }
+}
+
+
 } }  // namespace v8::internal
 #endif
diff --git a/src/gdb-jit.h b/src/gdb-jit.h
index d882856..14536cf 100644
--- a/src/gdb-jit.h
+++ b/src/gdb-jit.h
@@ -15,6 +15,7 @@
 
 #ifdef ENABLE_GDB_JIT_INTERFACE
 #include "src/v8.h"
+
 #include "src/factory.h"
 
 namespace v8 {
@@ -22,89 +23,25 @@
 
 class CompilationInfo;
 
-#define CODE_TAGS_LIST(V)                       \
-  V(LOAD_IC)                                    \
-  V(KEYED_LOAD_IC)                              \
-  V(STORE_IC)                                   \
-  V(KEYED_STORE_IC)                             \
-  V(STUB)                                       \
-  V(BUILTIN)                                    \
-  V(SCRIPT)                                     \
-  V(EVAL)                                       \
-  V(FUNCTION)
-
-class GDBJITLineInfo : public Malloced {
- public:
-  GDBJITLineInfo()
-      : pc_info_(10) { }
-
-  void SetPosition(intptr_t pc, int pos, bool is_statement) {
-    AddPCInfo(PCInfo(pc, pos, is_statement));
-  }
-
-  struct PCInfo {
-    PCInfo(intptr_t pc, int pos, bool is_statement)
-        : pc_(pc), pos_(pos), is_statement_(is_statement) { }
-
-    intptr_t pc_;
-    int pos_;
-    bool is_statement_;
-  };
-
-  List<PCInfo>* pc_info() {
-    return &pc_info_;
-  }
-
- private:
-  void AddPCInfo(const PCInfo& pc_info) {
-    pc_info_.Add(pc_info);
-  }
-
-  List<PCInfo> pc_info_;
-};
-
-
 class GDBJITInterface: public AllStatic {
  public:
-  enum CodeTag {
-#define V(x) x,
-    CODE_TAGS_LIST(V)
-#undef V
-    TAG_COUNT
-  };
+  enum CodeTag { NON_FUNCTION, FUNCTION };
 
-  static const char* Tag2String(CodeTag tag) {
-    switch (tag) {
-#define V(x) case x: return #x;
-      CODE_TAGS_LIST(V)
-#undef V
-      default:
-        return NULL;
-    }
-  }
-
-  static void AddCode(const char* name,
-                      Code* code,
-                      CodeTag tag,
-                      Script* script,
-                      CompilationInfo* info);
+  // Main entry point into GDB JIT realized as a JitCodeEventHandler.
+  static void EventHandler(const v8::JitCodeEvent* event);
 
   static void AddCode(Handle<Name> name,
                       Handle<Script> script,
                       Handle<Code> code,
                       CompilationInfo* info);
 
-  static void AddCode(CodeTag tag, Name* name, Code* code);
-
-  static void AddCode(CodeTag tag, const char* name, Code* code);
-
-  static void AddCode(CodeTag tag, Code* code);
-
-  static void RemoveCode(Code* code);
-
   static void RemoveCodeRange(Address start, Address end);
 
-  static void RegisterDetailedLineInfo(Code* code, GDBJITLineInfo* line_info);
+ private:
+  static void AddCode(const char* name, Code* code, CodeTag tag, Script* script,
+                      CompilationInfo* info);
+
+  static void RemoveCode(Code* code);
 };
 
 #define GDBJIT(action) GDBJITInterface::action
diff --git a/src/generator.js b/src/generator.js
index a0c2aff..72e64dc 100644
--- a/src/generator.js
+++ b/src/generator.js
@@ -20,6 +20,7 @@
                         ['[Generator].prototype.next', this]);
   }
 
+  if (DEBUG_IS_ACTIVE) %DebugPrepareStepInIfStepping(this);
   return %_GeneratorNext(this, value);
 }
 
@@ -44,12 +45,10 @@
 
 function GeneratorFunctionConstructor(arg1) {  // length == 1
   var source = NewFunctionString(arguments, 'function*');
-  var global_receiver = %GlobalReceiver(global);
+  var global_proxy = %GlobalProxy(global);
   // Compile the string in the constructor and not a helper so that errors
   // appear to come from here.
-  var f = %CompileString(source, true);
-  if (!IS_FUNCTION(f)) return f;
-  f = %_CallFunction(global_receiver, f);
+  var f = %_CallFunction(global_proxy, %CompileString(source, true));
   %FunctionMarkNameShouldPrintAsAnonymous(f);
   return f;
 }
@@ -57,21 +56,29 @@
 
 function SetUpGenerators() {
   %CheckIsBootstrapping();
+
+  // Both Runtime_GeneratorNext and Runtime_GeneratorThrow are supported by
+  // neither Crankshaft nor TurboFan, disable optimization of wrappers here.
+  %NeverOptimizeFunction(GeneratorObjectNext);
+  %NeverOptimizeFunction(GeneratorObjectThrow);
+
+  // Set up non-enumerable functions on the generator prototype object.
   var GeneratorObjectPrototype = GeneratorFunctionPrototype.prototype;
   InstallFunctions(GeneratorObjectPrototype,
                    DONT_ENUM | DONT_DELETE | READ_ONLY,
                    ["next", GeneratorObjectNext,
                     "throw", GeneratorObjectThrow]);
+
   %FunctionSetName(GeneratorObjectIterator, '[Symbol.iterator]');
-  %SetProperty(GeneratorObjectPrototype, symbolIterator, GeneratorObjectIterator,
-      DONT_ENUM | DONT_DELETE | READ_ONLY);
-  %SetProperty(GeneratorObjectPrototype, "constructor",
-               GeneratorFunctionPrototype, DONT_ENUM | DONT_DELETE | READ_ONLY);
-  %SetPrototype(GeneratorFunctionPrototype, $Function.prototype);
+  %AddNamedProperty(GeneratorObjectPrototype, symbolIterator,
+      GeneratorObjectIterator, DONT_ENUM | DONT_DELETE | READ_ONLY);
+  %AddNamedProperty(GeneratorObjectPrototype, "constructor",
+      GeneratorFunctionPrototype, DONT_ENUM | DONT_DELETE | READ_ONLY);
+  %InternalSetPrototype(GeneratorFunctionPrototype, $Function.prototype);
   %SetCode(GeneratorFunctionPrototype, GeneratorFunctionPrototypeConstructor);
-  %SetProperty(GeneratorFunctionPrototype, "constructor",
-               GeneratorFunction, DONT_ENUM | DONT_DELETE | READ_ONLY);
-  %SetPrototype(GeneratorFunction, $Function);
+  %AddNamedProperty(GeneratorFunctionPrototype, "constructor",
+      GeneratorFunction, DONT_ENUM | DONT_DELETE | READ_ONLY);
+  %InternalSetPrototype(GeneratorFunction, $Function);
   %SetCode(GeneratorFunction, GeneratorFunctionConstructor);
 }
 
diff --git a/src/global-handles.cc b/src/global-handles.cc
index a5ae2d5..282ca2d 100644
--- a/src/global-handles.cc
+++ b/src/global-handles.cc
@@ -38,13 +38,13 @@
 
   // Maps handle location (slot) to the containing node.
   static Node* FromLocation(Object** location) {
-    ASSERT(OFFSET_OF(Node, object_) == 0);
+    DCHECK(OFFSET_OF(Node, object_) == 0);
     return reinterpret_cast<Node*>(location);
   }
 
   Node() {
-    ASSERT(OFFSET_OF(Node, class_id_) == Internals::kNodeClassIdOffset);
-    ASSERT(OFFSET_OF(Node, flags_) == Internals::kNodeFlagsOffset);
+    DCHECK(OFFSET_OF(Node, class_id_) == Internals::kNodeClassIdOffset);
+    DCHECK(OFFSET_OF(Node, flags_) == Internals::kNodeFlagsOffset);
     STATIC_ASSERT(static_cast<int>(NodeState::kMask) ==
                   Internals::kNodeStateMask);
     STATIC_ASSERT(WEAK == Internals::kNodeStateIsWeakValue);
@@ -73,7 +73,7 @@
 
   void Initialize(int index, Node** first_free) {
     index_ = static_cast<uint8_t>(index);
-    ASSERT(static_cast<int>(index_) == index);
+    DCHECK(static_cast<int>(index_) == index);
     set_state(FREE);
     set_in_new_space_list(false);
     parameter_or_next_free_.next_free = *first_free;
@@ -81,7 +81,7 @@
   }
 
   void Acquire(Object* object) {
-    ASSERT(state() == FREE);
+    DCHECK(state() == FREE);
     object_ = object;
     class_id_ = v8::HeapProfiler::kPersistentHandleNoClassId;
     set_independent(false);
@@ -93,7 +93,7 @@
   }
 
   void Release() {
-    ASSERT(state() != FREE);
+    DCHECK(state() != FREE);
     set_state(FREE);
     // Zap the values for eager trapping.
     object_ = reinterpret_cast<Object*>(kGlobalHandleZapValue);
@@ -162,18 +162,18 @@
   }
 
   void MarkPending() {
-    ASSERT(state() == WEAK);
+    DCHECK(state() == WEAK);
     set_state(PENDING);
   }
 
   // Independent flag accessors.
   void MarkIndependent() {
-    ASSERT(state() != FREE);
+    DCHECK(state() != FREE);
     set_independent(true);
   }
 
   void MarkPartiallyDependent() {
-    ASSERT(state() != FREE);
+    DCHECK(state() != FREE);
     if (GetGlobalHandles()->isolate()->heap()->InNewSpace(object_)) {
       set_partially_dependent(true);
     }
@@ -186,27 +186,27 @@
 
   // Callback parameter accessors.
   void set_parameter(void* parameter) {
-    ASSERT(state() != FREE);
+    DCHECK(state() != FREE);
     parameter_or_next_free_.parameter = parameter;
   }
   void* parameter() const {
-    ASSERT(state() != FREE);
+    DCHECK(state() != FREE);
     return parameter_or_next_free_.parameter;
   }
 
   // Accessors for next free node in the free list.
   Node* next_free() {
-    ASSERT(state() == FREE);
+    DCHECK(state() == FREE);
     return parameter_or_next_free_.next_free;
   }
   void set_next_free(Node* value) {
-    ASSERT(state() == FREE);
+    DCHECK(state() == FREE);
     parameter_or_next_free_.next_free = value;
   }
 
   void MakeWeak(void* parameter, WeakCallback weak_callback) {
-    ASSERT(weak_callback != NULL);
-    ASSERT(state() != FREE);
+    DCHECK(weak_callback != NULL);
+    DCHECK(state() != FREE);
     CHECK(object_ != NULL);
     set_state(WEAK);
     set_parameter(parameter);
@@ -214,7 +214,7 @@
   }
 
   void* ClearWeakness() {
-    ASSERT(state() != FREE);
+    DCHECK(state() != FREE);
     void* p = parameter();
     set_state(NORMAL);
     set_parameter(NULL);
@@ -235,9 +235,9 @@
     {
       // Check that we are not passing a finalized external string to
       // the callback.
-      ASSERT(!object_->IsExternalAsciiString() ||
-             ExternalAsciiString::cast(object_)->resource() != NULL);
-      ASSERT(!object_->IsExternalTwoByteString() ||
+      DCHECK(!object_->IsExternalOneByteString() ||
+             ExternalOneByteString::cast(object_)->resource() != NULL);
+      DCHECK(!object_->IsExternalTwoByteString() ||
              ExternalTwoByteString::cast(object_)->resource() != NULL);
       // Leaving V8.
       VMState<EXTERNAL> state(isolate);
@@ -316,12 +316,12 @@
   }
 
   Node* node_at(int index) {
-    ASSERT(0 <= index && index < kSize);
+    DCHECK(0 <= index && index < kSize);
     return &nodes_[index];
   }
 
   void IncreaseUses() {
-    ASSERT(used_nodes_ < kSize);
+    DCHECK(used_nodes_ < kSize);
     if (used_nodes_++ == 0) {
       NodeBlock* old_first = global_handles_->first_used_block_;
       global_handles_->first_used_block_ = this;
@@ -333,7 +333,7 @@
   }
 
   void DecreaseUses() {
-    ASSERT(used_nodes_ > 0);
+    DCHECK(used_nodes_ > 0);
     if (--used_nodes_ == 0) {
       if (next_used_ != NULL) next_used_->prev_used_ = prev_used_;
       if (prev_used_ != NULL) prev_used_->next_used_ = next_used_;
@@ -371,7 +371,7 @@
   intptr_t ptr = reinterpret_cast<intptr_t>(this);
   ptr = ptr - index_ * sizeof(Node);
   NodeBlock* block = reinterpret_cast<NodeBlock*>(ptr);
-  ASSERT(block->node_at(index_) == this);
+  DCHECK(block->node_at(index_) == this);
   return block;
 }
 
@@ -405,12 +405,12 @@
   bool done() const { return block_ == NULL; }
 
   Node* node() const {
-    ASSERT(!done());
+    DCHECK(!done());
     return block_->node_at(index_);
   }
 
   void Advance() {
-    ASSERT(!done());
+    DCHECK(!done());
     if (++index_ < NodeBlock::kSize) return;
     index_ = 0;
     block_ = block_->next_used();
@@ -450,7 +450,7 @@
     first_block_ = new NodeBlock(this, first_block_);
     first_block_->PutNodesOnFreeList(&first_free_);
   }
-  ASSERT(first_free_ != NULL);
+  DCHECK(first_free_ != NULL);
   // Take the first node in the free list.
   Node* result = first_free_;
   first_free_ = result->next_free();
@@ -465,7 +465,7 @@
 
 
 Handle<Object> GlobalHandles::CopyGlobal(Object** location) {
-  ASSERT(location != NULL);
+  DCHECK(location != NULL);
   return Node::FromLocation(location)->GetGlobalHandles()->Create(*location);
 }
 
@@ -544,7 +544,7 @@
     WeakSlotCallbackWithHeap f) {
   for (int i = 0; i < new_space_nodes_.length(); ++i) {
     Node* node = new_space_nodes_[i];
-    ASSERT(node->is_in_new_space_list());
+    DCHECK(node->is_in_new_space_list());
     if ((node->is_independent() || node->is_partially_dependent()) &&
         node->IsWeak() && f(isolate_->heap(), node->location())) {
       node->MarkPending();
@@ -556,7 +556,7 @@
 void GlobalHandles::IterateNewSpaceWeakIndependentRoots(ObjectVisitor* v) {
   for (int i = 0; i < new_space_nodes_.length(); ++i) {
     Node* node = new_space_nodes_[i];
-    ASSERT(node->is_in_new_space_list());
+    DCHECK(node->is_in_new_space_list());
     if ((node->is_independent() || node->is_partially_dependent()) &&
         node->IsWeakRetainer()) {
       v->VisitPointer(node->location());
@@ -572,7 +572,7 @@
   bool any_group_was_visited = false;
   for (int i = 0; i < object_groups_.length(); i++) {
     ObjectGroup* entry = object_groups_.at(i);
-    ASSERT(entry != NULL);
+    DCHECK(entry != NULL);
 
     Object*** objects = entry->objects;
     bool group_should_be_visited = false;
@@ -612,17 +612,17 @@
 
 
 int GlobalHandles::PostGarbageCollectionProcessing(
-    GarbageCollector collector, GCTracer* tracer) {
+    GarbageCollector collector) {
   // Process weak global handle callbacks. This must be done after the
   // GC is completely done, because the callbacks may invoke arbitrary
   // API functions.
-  ASSERT(isolate_->heap()->gc_state() == Heap::NOT_IN_GC);
+  DCHECK(isolate_->heap()->gc_state() == Heap::NOT_IN_GC);
   const int initial_post_gc_processing_count = ++post_gc_processing_count_;
   int freed_nodes = 0;
   if (collector == SCAVENGER) {
     for (int i = 0; i < new_space_nodes_.length(); ++i) {
       Node* node = new_space_nodes_[i];
-      ASSERT(node->is_in_new_space_list());
+      DCHECK(node->is_in_new_space_list());
       if (!node->IsRetainer()) {
         // Free nodes do not have weak callbacks. Do not use them to compute
         // the freed_nodes.
@@ -671,18 +671,18 @@
   int last = 0;
   for (int i = 0; i < new_space_nodes_.length(); ++i) {
     Node* node = new_space_nodes_[i];
-    ASSERT(node->is_in_new_space_list());
+    DCHECK(node->is_in_new_space_list());
     if (node->IsRetainer()) {
       if (isolate_->heap()->InNewSpace(node->object())) {
         new_space_nodes_[last++] = node;
-        tracer->increment_nodes_copied_in_new_space();
+        isolate_->heap()->IncrementNodesCopiedInNewSpace();
       } else {
         node->set_in_new_space_list(false);
-        tracer->increment_nodes_promoted();
+        isolate_->heap()->IncrementNodesPromoted();
       }
     } else {
       node->set_in_new_space_list(false);
-      tracer->increment_nodes_died_in_new_space();
+      isolate_->heap()->IncrementNodesDiedInNewSpace();
     }
   }
   new_space_nodes_.Rewind(last);
@@ -818,7 +818,7 @@
                                    v8::RetainedObjectInfo* info) {
 #ifdef DEBUG
   for (size_t i = 0; i < length; ++i) {
-    ASSERT(!Node::FromLocation(handles[i])->is_independent());
+    DCHECK(!Node::FromLocation(handles[i])->is_independent());
   }
 #endif
   if (length == 0) {
@@ -845,31 +845,14 @@
 }
 
 
-void GlobalHandles::AddImplicitReferences(HeapObject** parent,
-                                          Object*** children,
-                                          size_t length) {
-#ifdef DEBUG
-  ASSERT(!Node::FromLocation(BitCast<Object**>(parent))->is_independent());
-  for (size_t i = 0; i < length; ++i) {
-    ASSERT(!Node::FromLocation(children[i])->is_independent());
-  }
-#endif
-  if (length == 0) return;
-  ImplicitRefGroup* group = new ImplicitRefGroup(parent, length);
-  for (size_t i = 0; i < length; ++i)
-    group->children[i] = children[i];
-  implicit_ref_groups_.Add(group);
-}
-
-
 void GlobalHandles::SetReferenceFromGroup(UniqueId id, Object** child) {
-  ASSERT(!Node::FromLocation(child)->is_independent());
+  DCHECK(!Node::FromLocation(child)->is_independent());
   implicit_ref_connections_.Add(ObjectGroupConnection(id, child));
 }
 
 
 void GlobalHandles::SetReference(HeapObject** parent, Object** child) {
-  ASSERT(!Node::FromLocation(child)->is_independent());
+  DCHECK(!Node::FromLocation(child)->is_independent());
   ImplicitRefGroup* group = new ImplicitRefGroup(parent, 1);
   group->children[0] = child;
   implicit_ref_groups_.Add(group);
@@ -1007,7 +990,7 @@
 
 
 EternalHandles::EternalHandles() : size_(0) {
-  for (unsigned i = 0; i < ARRAY_SIZE(singleton_handles_); i++) {
+  for (unsigned i = 0; i < arraysize(singleton_handles_); i++) {
     singleton_handles_[i] = kInvalidIndex;
   }
 }
@@ -1021,7 +1004,7 @@
 void EternalHandles::IterateAllRoots(ObjectVisitor* visitor) {
   int limit = size_;
   for (int i = 0; i < blocks_.length(); i++) {
-    ASSERT(limit > 0);
+    DCHECK(limit > 0);
     Object** block = blocks_[i];
     visitor->VisitPointers(block, block + Min(limit, kSize));
     limit -= kSize;
@@ -1049,9 +1032,9 @@
 
 
 void EternalHandles::Create(Isolate* isolate, Object* object, int* index) {
-  ASSERT_EQ(kInvalidIndex, *index);
+  DCHECK_EQ(kInvalidIndex, *index);
   if (object == NULL) return;
-  ASSERT_NE(isolate->heap()->the_hole_value(), object);
+  DCHECK_NE(isolate->heap()->the_hole_value(), object);
   int block = size_ >> kShift;
   int offset = size_ & kMask;
   // need to resize
@@ -1061,7 +1044,7 @@
     MemsetPointer(next_block, the_hole, kSize);
     blocks_.Add(next_block);
   }
-  ASSERT_EQ(isolate->heap()->the_hole_value(), blocks_[block][offset]);
+  DCHECK_EQ(isolate->heap()->the_hole_value(), blocks_[block][offset]);
   blocks_[block][offset] = object;
   if (isolate->heap()->InNewSpace(object)) {
     new_space_indices_.Add(size_);
diff --git a/src/global-handles.h b/src/global-handles.h
index 2f5afc9..a06cba0 100644
--- a/src/global-handles.h
+++ b/src/global-handles.h
@@ -15,7 +15,6 @@
 namespace v8 {
 namespace internal {
 
-class GCTracer;
 class HeapStats;
 class ObjectVisitor;
 
@@ -38,7 +37,7 @@
 struct ObjectGroup {
   explicit ObjectGroup(size_t length)
       : info(NULL), length(length) {
-    ASSERT(length > 0);
+    DCHECK(length > 0);
     objects = new Object**[length];
   }
   ~ObjectGroup();
@@ -52,7 +51,7 @@
 struct ImplicitRefGroup {
   ImplicitRefGroup(HeapObject** parent, size_t length)
       : parent(parent), length(length) {
-    ASSERT(length > 0);
+    DCHECK(length > 0);
     children = new Object**[length];
   }
   ~ImplicitRefGroup();
@@ -156,8 +155,7 @@
 
   // Process pending weak handles.
   // Returns the number of freed nodes.
-  int PostGarbageCollectionProcessing(GarbageCollector collector,
-                                      GCTracer* tracer);
+  int PostGarbageCollectionProcessing(GarbageCollector collector);
 
   // Iterates over all strong handles.
   void IterateStrongRoots(ObjectVisitor* v);
@@ -217,13 +215,6 @@
   // handles.
   void SetRetainedObjectInfo(UniqueId id, RetainedObjectInfo* info);
 
-  // Add an implicit references' group.
-  // Should be only used in GC callback function before a collection.
-  // All groups are destroyed after a mark-compact collection.
-  void AddImplicitReferences(HeapObject** parent,
-                             Object*** children,
-                             size_t length);
-
   // Adds an implicit reference from a group to an object. Should be only used
   // in GC callback function before a collection. All implicit references are
   // destroyed after a mark-compact collection.
@@ -337,7 +328,7 @@
 
   // Grab the handle for an existing SingletonHandle.
   inline Handle<Object> GetSingleton(SingletonHandle singleton) {
-    ASSERT(Exists(singleton));
+    DCHECK(Exists(singleton));
     return Get(singleton_handles_[singleton]);
   }
 
@@ -369,7 +360,7 @@
 
   // Gets the slot for an index
   inline Object** GetLocation(int index) {
-    ASSERT(index >= 0 && index < size_);
+    DCHECK(index >= 0 && index < size_);
     return &blocks_[index >> kShift][index & kMask];
   }
 
diff --git a/src/globals.h b/src/globals.h
index 595ecc3..609ab88 100644
--- a/src/globals.h
+++ b/src/globals.h
@@ -8,8 +8,8 @@
 #include "include/v8stdint.h"
 
 #include "src/base/build_config.h"
+#include "src/base/logging.h"
 #include "src/base/macros.h"
-#include "src/checks.h"
 
 // Unfortunately, the INFINITY macro cannot be used with the '-pedantic'
 // warning flag and certain versions of GCC due to a bug:
@@ -25,7 +25,26 @@
 # define V8_INFINITY INFINITY
 #endif
 
+#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM || \
+    V8_TARGET_ARCH_ARM64
+#define V8_TURBOFAN_BACKEND 1
+#else
+#define V8_TURBOFAN_BACKEND 0
+#endif
+#if V8_TURBOFAN_BACKEND && !(V8_OS_WIN && V8_TARGET_ARCH_X64)
+#define V8_TURBOFAN_TARGET 1
+#else
+#define V8_TURBOFAN_TARGET 0
+#endif
+
 namespace v8 {
+
+namespace base {
+class Mutex;
+class RecursiveMutex;
+class VirtualMemory;
+}
+
 namespace internal {
 
 // Determine whether we are running in a simulated environment.
@@ -41,11 +60,26 @@
 #if (V8_TARGET_ARCH_MIPS && !V8_HOST_ARCH_MIPS)
 #define USE_SIMULATOR 1
 #endif
+#if (V8_TARGET_ARCH_MIPS64 && !V8_HOST_ARCH_MIPS64)
+#define USE_SIMULATOR 1
+#endif
 #endif
 
 // Determine whether the architecture uses an out-of-line constant pool.
 #define V8_OOL_CONSTANT_POOL 0
 
+#ifdef V8_TARGET_ARCH_ARM
+// Set stack limit lower for ARM than for other architectures because
+// stack allocating MacroAssembler takes 120K bytes.
+// See issue crbug.com/405338
+#define V8_DEFAULT_STACK_SIZE_KB 864
+#else
+// Slightly less than 1MB, since Windows' default stack size for
+// the main execution thread is 1MB for both 32 and 64-bit.
+#define V8_DEFAULT_STACK_SIZE_KB 984
+#endif
+
+
 // Support for alternative bool type. This is only enabled if the code is
 // compiled with USE_MYBOOL defined. This catches some nasty type bugs.
 // For instance, 'bool b = "false";' results in b == true! This is a hidden
@@ -66,51 +100,6 @@
 typedef uint8_t byte;
 typedef byte* Address;
 
-// Define our own macros for writing 64-bit constants.  This is less fragile
-// than defining __STDC_CONSTANT_MACROS before including <stdint.h>, and it
-// works on compilers that don't have it (like MSVC).
-#if V8_CC_MSVC
-# define V8_UINT64_C(x)   (x ## UI64)
-# define V8_INT64_C(x)    (x ## I64)
-# if V8_HOST_ARCH_64_BIT
-#  define V8_INTPTR_C(x)  (x ## I64)
-#  define V8_PTR_PREFIX   "ll"
-# else
-#  define V8_INTPTR_C(x)  (x)
-#  define V8_PTR_PREFIX   ""
-# endif  // V8_HOST_ARCH_64_BIT
-#elif V8_CC_MINGW64
-# define V8_UINT64_C(x)   (x ## ULL)
-# define V8_INT64_C(x)    (x ## LL)
-# define V8_INTPTR_C(x)   (x ## LL)
-# define V8_PTR_PREFIX    "I64"
-#elif V8_HOST_ARCH_64_BIT
-# if V8_OS_MACOSX
-#  define V8_UINT64_C(x)   (x ## ULL)
-#  define V8_INT64_C(x)    (x ## LL)
-# else
-#  define V8_UINT64_C(x)   (x ## UL)
-#  define V8_INT64_C(x)    (x ## L)
-# endif
-# define V8_INTPTR_C(x)   (x ## L)
-# define V8_PTR_PREFIX    "l"
-#else
-# define V8_UINT64_C(x)   (x ## ULL)
-# define V8_INT64_C(x)    (x ## LL)
-# define V8_INTPTR_C(x)   (x)
-# define V8_PTR_PREFIX    ""
-#endif
-
-#define V8PRIxPTR V8_PTR_PREFIX "x"
-#define V8PRIdPTR V8_PTR_PREFIX "d"
-#define V8PRIuPTR V8_PTR_PREFIX "u"
-
-// Fix for Mac OS X defining uintptr_t as "unsigned long":
-#if V8_OS_MACOSX
-#undef V8PRIxPTR
-#define V8PRIxPTR "lx"
-#endif
-
 // -----------------------------------------------------------------------------
 // Constants
 
@@ -138,7 +127,11 @@
 const int kDoubleSize    = sizeof(double);    // NOLINT
 const int kIntptrSize    = sizeof(intptr_t);  // NOLINT
 const int kPointerSize   = sizeof(void*);     // NOLINT
+#if V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT
+const int kRegisterSize  = kPointerSize + kPointerSize;
+#else
 const int kRegisterSize  = kPointerSize;
+#endif
 const int kPCOnStackSize = kRegisterSize;
 const int kFPOnStackSize = kRegisterSize;
 
@@ -154,9 +147,17 @@
 const int kPointerSizeLog2 = 2;
 const intptr_t kIntptrSignBit = 0x80000000;
 const uintptr_t kUintptrAllBitsSet = 0xFFFFFFFFu;
+#if V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT
+// x32 port also requires code range.
+const bool kRequiresCodeRange = true;
+const size_t kMaximalCodeRangeSize = 256 * MB;
+#else
 const bool kRequiresCodeRange = false;
 const size_t kMaximalCodeRangeSize = 0 * MB;
 #endif
+#endif
+
+STATIC_ASSERT(kPointerSize == (1 << kPointerSizeLog2));
 
 const int kBitsPerByte = 8;
 const int kBitsPerByteLog2 = 3;
@@ -239,11 +240,15 @@
 const intptr_t kCodeAlignment = 1 << kCodeAlignmentBits;
 const intptr_t kCodeAlignmentMask = kCodeAlignment - 1;
 
-// Tag information for Failure.
-// TODO(yangguo): remove this from space owner calculation.
-const int kFailureTag = 3;
-const int kFailureTagSize = 2;
-const intptr_t kFailureTagMask = (1 << kFailureTagSize) - 1;
+// The owner field of a page is tagged with the page header tag. We need that
+// to find out if a slot is part of a large object. If we mask out the lower
+// 0xfffff bits (1M pages), go to the owner offset, and see that this field
+// is tagged with the page header tag, we can just look up the owner.
+// Otherwise, we know that we are somewhere (not within the first 1M) in a
+// large object.
+const int kPageHeaderTag = 3;
+const int kPageHeaderTagSize = 2;
+const intptr_t kPageHeaderTagMask = (1 << kPageHeaderTagSize) - 1;
 
 
 // Zap-value: The value used for zapping dead objects.
@@ -272,10 +277,6 @@
 
 const int kCodeZapValue = 0xbadc0de;
 
-// Number of bits to represent the page size for paged spaces. The value of 20
-// gives 1Mb bytes per page.
-const int kPageSizeBits = 20;
-
 // On Intel architecture, cache line size is 64 bytes.
 // On ARM it may be less (32 bytes), but as far this constant is
 // used for aligning data, it doesn't hurt to align on a greater value.
@@ -344,9 +345,6 @@
 class RelocInfo;
 class Deserializer;
 class MessageLocation;
-class VirtualMemory;
-class Mutex;
-class RecursiveMutex;
 
 typedef bool (*WeakSlotCallback)(Object** pointer);
 
@@ -450,8 +448,8 @@
   PREMONOMORPHIC,
   // Has been executed and only one receiver type has been seen.
   MONOMORPHIC,
-  // Like MONOMORPHIC but check failed due to prototype.
-  MONOMORPHIC_PROTOTYPE_FAILURE,
+  // Check failed due to prototype (or map deprecation).
+  PROTOTYPE_FAILURE,
   // Multiple receiver types have been seen.
   POLYMORPHIC,
   // Many receiver types have been seen.
@@ -459,7 +457,11 @@
   // A generic handler is installed and no extra typefeedback is recorded.
   GENERIC,
   // Special state for debug break or step in prepare stubs.
-  DEBUG_STUB
+  DEBUG_STUB,
+  // Type-vector-based ICs have a default state, with the full calculation
+  // of IC state only determined by a look at the IC and the typevector
+  // together.
+  DEFAULT
 };
 
 
@@ -479,9 +481,11 @@
 };
 
 
-enum InlineCacheHolderFlag {
-  OWN_MAP,  // For fast properties objects.
-  PROTOTYPE_MAP  // For slow properties objects (except GlobalObjects).
+enum CacheHolderFlag {
+  kCacheOnPrototype,
+  kCacheOnPrototypeReceiverIsDictionary,
+  kCacheOnPrototypeReceiverIsPrimitive,
+  kCacheOnReceiver
 };
 
 
@@ -618,8 +622,12 @@
     MOVW_MOVT_IMMEDIATE_LOADS,
     VFP32DREGS,
     NEON,
-    // MIPS
+    // MIPS, MIPS64
     FPU,
+    FP64FPU,
+    MIPSr1,
+    MIPSr2,
+    MIPSr6,
     // ARM64
     ALWAYS_ALIGN_CSP,
     NUMBER_OF_CPU_FEATURES
@@ -746,6 +754,9 @@
 };
 
 
+enum MaybeAssignedFlag { kNotAssigned, kMaybeAssigned };
+
+
 enum ClearExceptionFlag {
   KEEP_EXCEPTION,
   CLEAR_EXCEPTION
@@ -757,6 +768,44 @@
   FAIL_ON_MINUS_ZERO
 };
 
+
+enum Signedness { kSigned, kUnsigned };
+
+
+enum FunctionKind {
+  kNormalFunction = 0,
+  kArrowFunction = 1,
+  kGeneratorFunction = 2,
+  kConciseMethod = 4,
+  kConciseGeneratorMethod = kGeneratorFunction | kConciseMethod
+};
+
+
+inline bool IsValidFunctionKind(FunctionKind kind) {
+  return kind == FunctionKind::kNormalFunction ||
+         kind == FunctionKind::kArrowFunction ||
+         kind == FunctionKind::kGeneratorFunction ||
+         kind == FunctionKind::kConciseMethod ||
+         kind == FunctionKind::kConciseGeneratorMethod;
+}
+
+
+inline bool IsArrowFunction(FunctionKind kind) {
+  DCHECK(IsValidFunctionKind(kind));
+  return kind & FunctionKind::kArrowFunction;
+}
+
+
+inline bool IsGeneratorFunction(FunctionKind kind) {
+  DCHECK(IsValidFunctionKind(kind));
+  return kind & FunctionKind::kGeneratorFunction;
+}
+
+
+inline bool IsConciseMethod(FunctionKind kind) {
+  DCHECK(IsValidFunctionKind(kind));
+  return kind & FunctionKind::kConciseMethod;
+}
 } }  // namespace v8::internal
 
 namespace i = v8::internal;
diff --git a/src/handles-inl.h b/src/handles-inl.h
index 833f9dd..34b3f32 100644
--- a/src/handles-inl.h
+++ b/src/handles-inl.h
@@ -8,7 +8,7 @@
 
 #include "src/api.h"
 #include "src/handles.h"
-#include "src/heap.h"
+#include "src/heap/heap.h"
 #include "src/isolate.h"
 
 namespace v8 {
@@ -29,7 +29,7 @@
 template <typename T>
 inline bool Handle<T>::is_identical_to(const Handle<T> o) const {
   // Dereferencing deferred handles to check object equality is safe.
-  SLOW_ASSERT(
+  SLOW_DCHECK(
       (location_ == NULL || IsDereferenceAllowed(NO_DEFERRED_CHECK)) &&
       (o.location_ == NULL || o.IsDereferenceAllowed(NO_DEFERRED_CHECK)));
   if (location_ == o.location_) return true;
@@ -40,13 +40,13 @@
 
 template <typename T>
 inline T* Handle<T>::operator*() const {
-  SLOW_ASSERT(IsDereferenceAllowed(INCLUDE_DEFERRED_CHECK));
-  return *BitCast<T**>(location_);
+  SLOW_DCHECK(IsDereferenceAllowed(INCLUDE_DEFERRED_CHECK));
+  return *bit_cast<T**>(location_);
 }
 
 template <typename T>
 inline T** Handle<T>::location() const {
-  SLOW_ASSERT(location_ == NULL ||
+  SLOW_DCHECK(location_ == NULL ||
               IsDereferenceAllowed(INCLUDE_DEFERRED_CHECK));
   return location_;
 }
@@ -54,8 +54,8 @@
 #ifdef DEBUG
 template <typename T>
 bool Handle<T>::IsDereferenceAllowed(DereferenceCheckMode mode) const {
-  ASSERT(location_ != NULL);
-  Object* object = *BitCast<T**>(location_);
+  DCHECK(location_ != NULL);
+  Object* object = *bit_cast<T**>(location_);
   if (object->IsSmi()) return true;
   HeapObject* heap_object = HeapObject::cast(object);
   Heap* heap = heap_object->GetHeap();
@@ -123,7 +123,7 @@
   // Throw away all handles in the current scope.
   CloseScope(isolate_, prev_next_, prev_limit_);
   // Allocate one handle in the parent scope.
-  ASSERT(current->level > 0);
+  DCHECK(current->level > 0);
   Handle<T> result(CreateHandle<T>(isolate_, value));
   // Reinitialize the current scope (so that it's ready
   // to be used or closed again).
@@ -136,14 +136,14 @@
 
 template <typename T>
 T** HandleScope::CreateHandle(Isolate* isolate, T* value) {
-  ASSERT(AllowHandleAllocation::IsAllowed());
+  DCHECK(AllowHandleAllocation::IsAllowed());
   HandleScopeData* current = isolate->handle_scope_data();
 
   internal::Object** cur = current->next;
   if (cur == current->limit) cur = Extend(isolate);
   // Update the current next field, set the value in the created
   // handle, and return the result.
-  ASSERT(cur < current->limit);
+  DCHECK(cur < current->limit);
   current->next = cur + 1;
 
   T** result = reinterpret_cast<T**>(cur);
@@ -170,9 +170,9 @@
   // Restore state in current handle scope to re-enable handle
   // allocations.
   HandleScopeData* current = isolate_->handle_scope_data();
-  ASSERT_EQ(0, current->level);
+  DCHECK_EQ(0, current->level);
   current->level = level_;
-  ASSERT_EQ(current->next, current->limit);
+  DCHECK_EQ(current->next, current->limit);
   current->limit = limit_;
 }
 
diff --git a/src/handles.cc b/src/handles.cc
index f701d26..d9b130f 100644
--- a/src/handles.cc
+++ b/src/handles.cc
@@ -24,7 +24,7 @@
 
   Object** result = current->next;
 
-  ASSERT(result == current->limit);
+  DCHECK(result == current->limit);
   // Make sure there's at least one scope on the stack and that the
   // top of the scope stack isn't a barrier.
   if (!Utils::ApiCheck(current->level != 0,
@@ -39,7 +39,7 @@
     Object** limit = &impl->blocks()->last()[kHandleBlockSize];
     if (current->limit != limit) {
       current->limit = limit;
-      ASSERT(limit - current->next < kHandleBlockSize);
+      DCHECK(limit - current->next < kHandleBlockSize);
     }
   }
 
@@ -66,7 +66,7 @@
 
 #ifdef ENABLE_HANDLE_ZAPPING
 void HandleScope::ZapRange(Object** start, Object** end) {
-  ASSERT(end - start <= kHandleBlockSize);
+  DCHECK(end - start <= kHandleBlockSize);
   for (Object** p = start; p != end; p++) {
     *reinterpret_cast<Address*>(p) = v8::internal::kHandleZapValue;
   }
@@ -95,7 +95,7 @@
   HandleScopeData* data = impl_->isolate()->handle_scope_data();
   Object** new_next = impl_->GetSpareOrNewBlock();
   Object** new_limit = &new_next[kHandleBlockSize];
-  ASSERT(data->limit == &impl_->blocks()->last()[kHandleBlockSize]);
+  DCHECK(data->limit == &impl_->blocks()->last()[kHandleBlockSize]);
   impl_->blocks()->Add(new_next);
 
 #ifdef DEBUG
@@ -111,8 +111,8 @@
 
 DeferredHandleScope::~DeferredHandleScope() {
   impl_->isolate()->handle_scope_data()->level--;
-  ASSERT(handles_detached_);
-  ASSERT(impl_->isolate()->handle_scope_data()->level == prev_level_);
+  DCHECK(handles_detached_);
+  DCHECK(impl_->isolate()->handle_scope_data()->level == prev_level_);
 }
 
 
diff --git a/src/handles.h b/src/handles.h
index 3bd82e5..577e83a 100644
--- a/src/handles.h
+++ b/src/handles.h
@@ -44,7 +44,7 @@
     location_ = reinterpret_cast<T**>(maybe_handle.location_);
   }
 
-  INLINE(void Assert() const) { ASSERT(location_ != NULL); }
+  INLINE(void Assert() const) { DCHECK(location_ != NULL); }
   INLINE(void Check() const) { CHECK(location_ != NULL); }
 
   INLINE(Handle<T> ToHandleChecked()) const {
diff --git a/src/harmony-array.js b/src/harmony-array.js
index dbcb292..88b878f 100644
--- a/src/harmony-array.js
+++ b/src/harmony-array.js
@@ -123,11 +123,29 @@
   return array;
 }
 
+// ES6, draft 05-22-14, section 22.1.2.3
+function ArrayOf() {
+  var length = %_ArgumentsLength();
+  var constructor = this;
+  // TODO: Implement IsConstructor (ES6 section 7.2.5)
+  var array = IS_SPEC_FUNCTION(constructor) ? new constructor(length) : [];
+  for (var i = 0; i < length; i++) {
+    %AddElement(array, i, %_Arguments(i), NONE);
+  }
+  array.length = length;
+  return array;
+}
+
 // -------------------------------------------------------------------
 
 function HarmonyArrayExtendArrayPrototype() {
   %CheckIsBootstrapping();
 
+  // Set up non-enumerable functions on the Array object.
+  InstallFunctions($Array, DONT_ENUM, $Array(
+    "of", ArrayOf
+  ));
+
   // Set up the non-enumerable functions on the Array prototype object.
   InstallFunctions($Array.prototype, DONT_ENUM, $Array(
     "find", ArrayFind,
diff --git a/src/harmony-classes.js b/src/harmony-classes.js
new file mode 100644
index 0000000..b6605a9
--- /dev/null
+++ b/src/harmony-classes.js
@@ -0,0 +1,32 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// This file relies on the fact that the following declarations have been made
+// in runtime.js:
+// var $Function = global.Function;
+// var $Array = global.Array;
+
+
+(function() {
+  function FunctionToMethod(homeObject) {
+    if (!IS_SPEC_FUNCTION(this)) {
+      throw MakeTypeError('toMethod_non_function',
+                          [%ToString(this), typeof this]);
+
+    }
+
+    if (!IS_SPEC_OBJECT(homeObject)) {
+      throw MakeTypeError('toMethod_non_object',
+                          [%ToString(homeObject)]);
+    }
+
+    return %ToMethod(this, homeObject);
+  }
+
+  %CheckIsBootstrapping();
+
+  InstallFunctions($Function.prototype, DONT_ENUM, $Array(
+      "toMethod", FunctionToMethod
+  ));
+}());
diff --git a/src/harmony-math.js b/src/harmony-math.js
deleted file mode 100644
index 4a8d95b..0000000
--- a/src/harmony-math.js
+++ /dev/null
@@ -1,246 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-'use strict';
-
-// ES6 draft 09-27-13, section 20.2.2.28.
-function MathSign(x) {
-  x = TO_NUMBER_INLINE(x);
-  if (x > 0) return 1;
-  if (x < 0) return -1;
-  if (x === 0) return x;
-  return NAN;
-}
-
-
-// ES6 draft 09-27-13, section 20.2.2.34.
-function MathTrunc(x) {
-  x = TO_NUMBER_INLINE(x);
-  if (x > 0) return MathFloor(x);
-  if (x < 0) return MathCeil(x);
-  if (x === 0) return x;
-  return NAN;
-}
-
-
-// ES6 draft 09-27-13, section 20.2.2.30.
-function MathSinh(x) {
-  if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
-  // Idempotent for NaN, +/-0 and +/-Infinity.
-  if (x === 0 || !NUMBER_IS_FINITE(x)) return x;
-  return (MathExp(x) - MathExp(-x)) / 2;
-}
-
-
-// ES6 draft 09-27-13, section 20.2.2.12.
-function MathCosh(x) {
-  if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
-  if (!NUMBER_IS_FINITE(x)) return MathAbs(x);
-  return (MathExp(x) + MathExp(-x)) / 2;
-}
-
-
-// ES6 draft 09-27-13, section 20.2.2.33.
-function MathTanh(x) {
-  if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
-  // Idempotent for +/-0.
-  if (x === 0) return x;
-  // Returns +/-1 for +/-Infinity.
-  if (!NUMBER_IS_FINITE(x)) return MathSign(x);
-  var exp1 = MathExp(x);
-  var exp2 = MathExp(-x);
-  return (exp1 - exp2) / (exp1 + exp2);
-}
-
-
-// ES6 draft 09-27-13, section 20.2.2.5.
-function MathAsinh(x) {
-  if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
-  // Idempotent for NaN, +/-0 and +/-Infinity.
-  if (x === 0 || !NUMBER_IS_FINITE(x)) return x;
-  if (x > 0) return MathLog(x + MathSqrt(x * x + 1));
-  // This is to prevent numerical errors caused by large negative x.
-  return -MathLog(-x + MathSqrt(x * x + 1));
-}
-
-
-// ES6 draft 09-27-13, section 20.2.2.3.
-function MathAcosh(x) {
-  if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
-  if (x < 1) return NAN;
-  // Idempotent for NaN and +Infinity.
-  if (!NUMBER_IS_FINITE(x)) return x;
-  return MathLog(x + MathSqrt(x + 1) * MathSqrt(x - 1));
-}
-
-
-// ES6 draft 09-27-13, section 20.2.2.7.
-function MathAtanh(x) {
-  if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
-  // Idempotent for +/-0.
-  if (x === 0) return x;
-  // Returns NaN for NaN and +/- Infinity.
-  if (!NUMBER_IS_FINITE(x)) return NAN;
-  return 0.5 * MathLog((1 + x) / (1 - x));
-}
-
-
-// ES6 draft 09-27-13, section 20.2.2.21.
-function MathLog10(x) {
-  return MathLog(x) * 0.434294481903251828;  // log10(x) = log(x)/log(10).
-}
-
-
-// ES6 draft 09-27-13, section 20.2.2.22.
-function MathLog2(x) {
-  return MathLog(x) * 1.442695040888963407;  // log2(x) = log(x)/log(2).
-}
-
-
-// ES6 draft 09-27-13, section 20.2.2.17.
-function MathHypot(x, y) {  // Function length is 2.
-  // We may want to introduce fast paths for two arguments and when
-  // normalization to avoid overflow is not necessary.  For now, we
-  // simply assume the general case.
-  var length = %_ArgumentsLength();
-  var args = new InternalArray(length);
-  var max = 0;
-  for (var i = 0; i < length; i++) {
-    var n = %_Arguments(i);
-    if (!IS_NUMBER(n)) n = NonNumberToNumber(n);
-    if (n === INFINITY || n === -INFINITY) return INFINITY;
-    n = MathAbs(n);
-    if (n > max) max = n;
-    args[i] = n;
-  }
-
-  // Kahan summation to avoid rounding errors.
-  // Normalize the numbers to the largest one to avoid overflow.
-  if (max === 0) max = 1;
-  var sum = 0;
-  var compensation = 0;
-  for (var i = 0; i < length; i++) {
-    var n = args[i] / max;
-    var summand = n * n - compensation;
-    var preliminary = sum + summand;
-    compensation = (preliminary - sum) - summand;
-    sum = preliminary;
-  }
-  return MathSqrt(sum) * max;
-}
-
-
-// ES6 draft 09-27-13, section 20.2.2.16.
-function MathFroundJS(x) {
-  return %MathFround(TO_NUMBER_INLINE(x));
-}
-
-
-function MathClz32(x) {
-  x = ToUint32(TO_NUMBER_INLINE(x));
-  if (x == 0) return 32;
-  var result = 0;
-  // Binary search.
-  if ((x & 0xFFFF0000) === 0) { x <<= 16; result += 16; };
-  if ((x & 0xFF000000) === 0) { x <<=  8; result +=  8; };
-  if ((x & 0xF0000000) === 0) { x <<=  4; result +=  4; };
-  if ((x & 0xC0000000) === 0) { x <<=  2; result +=  2; };
-  if ((x & 0x80000000) === 0) { x <<=  1; result +=  1; };
-  return result;
-}
-
-
-// ES6 draft 09-27-13, section 20.2.2.9.
-// Cube root approximation, refer to: http://metamerist.com/cbrt/cbrt.htm
-// Using initial approximation adapted from Kahan's cbrt and 4 iterations
-// of Newton's method.
-function MathCbrt(x) {
-  if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
-  if (x == 0 || !NUMBER_IS_FINITE(x)) return x;
-  return x >= 0 ? CubeRoot(x) : -CubeRoot(-x);
-}
-
-macro NEWTON_ITERATION_CBRT(x, approx)
-  (1.0 / 3.0) * (x / (approx * approx) + 2 * approx);
-endmacro
-
-function CubeRoot(x) {
-  var approx_hi = MathFloor(%_DoubleHi(x) / 3) + 0x2A9F7893;
-  var approx = %_ConstructDouble(approx_hi, 0);
-  approx = NEWTON_ITERATION_CBRT(x, approx);
-  approx = NEWTON_ITERATION_CBRT(x, approx);
-  approx = NEWTON_ITERATION_CBRT(x, approx);
-  return NEWTON_ITERATION_CBRT(x, approx);
-}
-
-
-
-// ES6 draft 09-27-13, section 20.2.2.14.
-// Use Taylor series to approximate.
-// exp(x) - 1 at 0 == -1 + exp(0) + exp'(0)*x/1! + exp''(0)*x^2/2! + ...
-//                 == x/1! + x^2/2! + x^3/3! + ...
-// The closer x is to 0, the fewer terms are required.
-function MathExpm1(x) {
-  if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
-  var xabs = MathAbs(x);
-  if (xabs < 2E-7) {
-    return x * (1 + x * (1/2));
-  } else if (xabs < 6E-5) {
-    return x * (1 + x * (1/2 + x * (1/6)));
-  } else if (xabs < 2E-2) {
-    return x * (1 + x * (1/2 + x * (1/6 +
-           x * (1/24 + x * (1/120 + x * (1/720))))));
-  } else {  // Use regular exp if not close enough to 0.
-    return MathExp(x) - 1;
-  }
-}
-
-
-// ES6 draft 09-27-13, section 20.2.2.20.
-// Use Taylor series to approximate. With y = x + 1;
-// log(y) at 1 == log(1) + log'(1)(y-1)/1! + log''(1)(y-1)^2/2! + ...
-//             == 0 + x - x^2/2 + x^3/3 ...
-// The closer x is to 0, the fewer terms are required.
-function MathLog1p(x) {
-  if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
-  var xabs = MathAbs(x);
-  if (xabs < 1E-7) {
-    return x * (1 - x * (1/2));
-  } else if (xabs < 3E-5) {
-    return x * (1 - x * (1/2 - x * (1/3)));
-  } else if (xabs < 7E-3) {
-    return x * (1 - x * (1/2 - x * (1/3 - x * (1/4 -
-           x * (1/5 - x * (1/6 - x * (1/7)))))));
-  } else {  // Use regular log if not close enough to 0.
-    return MathLog(1 + x);
-  }
-}
-
-
-function ExtendMath() {
-  %CheckIsBootstrapping();
-
-  // Set up the non-enumerable functions on the Math object.
-  InstallFunctions($Math, DONT_ENUM, $Array(
-    "sign", MathSign,
-    "trunc", MathTrunc,
-    "sinh", MathSinh,
-    "cosh", MathCosh,
-    "tanh", MathTanh,
-    "asinh", MathAsinh,
-    "acosh", MathAcosh,
-    "atanh", MathAtanh,
-    "log10", MathLog10,
-    "log2", MathLog2,
-    "hypot", MathHypot,
-    "fround", MathFroundJS,
-    "clz32", MathClz32,
-    "cbrt", MathCbrt,
-    "log1p", MathLog1p,
-    "expm1", MathExpm1
-  ));
-}
-
-
-ExtendMath();
diff --git a/src/harmony-string.js b/src/harmony-string.js
index 4cd8e66..ae13745 100644
--- a/src/harmony-string.js
+++ b/src/harmony-string.js
@@ -120,17 +120,71 @@
 }
 
 
+// ES6 Draft 05-22-2014, section 21.1.3.3
+function StringCodePointAt(pos) {
+  CHECK_OBJECT_COERCIBLE(this, "String.prototype.codePointAt");
+
+  var string = TO_STRING_INLINE(this);
+  var size = string.length;
+  pos = TO_INTEGER(pos);
+  if (pos < 0 || pos >= size) {
+    return UNDEFINED;
+  }
+  var first = %_StringCharCodeAt(string, pos);
+  if (first < 0xD800 || first > 0xDBFF || pos + 1 == size) {
+    return first;
+  }
+  var second = %_StringCharCodeAt(string, pos + 1);
+  if (second < 0xDC00 || second > 0xDFFF) {
+    return first;
+  }
+  return (first - 0xD800) * 0x400 + second + 0x2400;
+}
+
+
+// ES6 Draft 05-22-2014, section 21.1.2.2
+function StringFromCodePoint(_) {  // length = 1
+  var code;
+  var length = %_ArgumentsLength();
+  var index;
+  var result = "";
+  for (index = 0; index < length; index++) {
+    code = %_Arguments(index);
+    if (!%_IsSmi(code)) {
+      code = ToNumber(code);
+    }
+    if (code < 0 || code > 0x10FFFF || code !== TO_INTEGER(code)) {
+      throw MakeRangeError("invalid_code_point", [code]);
+    }
+    if (code <= 0xFFFF) {
+      result += %_StringCharFromCode(code);
+    } else {
+      code -= 0x10000;
+      result += %_StringCharFromCode((code >>> 10) & 0x3FF | 0xD800);
+      result += %_StringCharFromCode(code & 0x3FF | 0xDC00);
+    }
+  }
+  return result;
+}
+
+
 // -------------------------------------------------------------------
 
 function ExtendStringPrototype() {
   %CheckIsBootstrapping();
 
+  // Set up the non-enumerable functions on the String object.
+  InstallFunctions($String, DONT_ENUM, $Array(
+    "fromCodePoint", StringFromCodePoint
+  ));
+
   // Set up the non-enumerable functions on the String prototype object.
   InstallFunctions($String.prototype, DONT_ENUM, $Array(
-    "repeat", StringRepeat,
-    "startsWith", StringStartsWith,
+    "codePointAt", StringCodePointAt,
+    "contains", StringContains,
     "endsWith", StringEndsWith,
-    "contains", StringContains
+    "repeat", StringRepeat,
+    "startsWith", StringStartsWith
   ));
 }
 
diff --git a/src/hashmap.h b/src/hashmap.h
index d800f2f..33eb115 100644
--- a/src/hashmap.h
+++ b/src/hashmap.h
@@ -6,7 +6,8 @@
 #define V8_HASHMAP_H_
 
 #include "src/allocation.h"
-#include "src/checks.h"
+#include "src/base/bits.h"
+#include "src/base/logging.h"
 #include "src/utils.h"
 
 namespace v8 {
@@ -164,7 +165,7 @@
 
   // This guarantees loop termination as there is at least one empty entry so
   // eventually the removed entry will have an empty entry after it.
-  ASSERT(occupancy_ < capacity_);
+  DCHECK(occupancy_ < capacity_);
 
   // p is the candidate entry to clear. q is used to scan forwards.
   Entry* q = p;  // Start at the entry to remove.
@@ -224,7 +225,7 @@
 typename TemplateHashMapImpl<AllocationPolicy>::Entry*
     TemplateHashMapImpl<AllocationPolicy>::Next(Entry* p) const {
   const Entry* end = map_end();
-  ASSERT(map_ - 1 <= p && p < end);
+  DCHECK(map_ - 1 <= p && p < end);
   for (p++; p < end; p++) {
     if (p->key != NULL) {
       return p;
@@ -237,14 +238,14 @@
 template<class AllocationPolicy>
 typename TemplateHashMapImpl<AllocationPolicy>::Entry*
     TemplateHashMapImpl<AllocationPolicy>::Probe(void* key, uint32_t hash) {
-  ASSERT(key != NULL);
+  DCHECK(key != NULL);
 
-  ASSERT(IsPowerOf2(capacity_));
+  DCHECK(base::bits::IsPowerOfTwo32(capacity_));
   Entry* p = map_ + (hash & (capacity_ - 1));
   const Entry* end = map_end();
-  ASSERT(map_ <= p && p < end);
+  DCHECK(map_ <= p && p < end);
 
-  ASSERT(occupancy_ < capacity_);  // Guarantees loop termination.
+  DCHECK(occupancy_ < capacity_);  // Guarantees loop termination.
   while (p->key != NULL && (hash != p->hash || !match_(key, p->key))) {
     p++;
     if (p >= end) {
@@ -259,7 +260,7 @@
 template<class AllocationPolicy>
 void TemplateHashMapImpl<AllocationPolicy>::Initialize(
     uint32_t capacity, AllocationPolicy allocator) {
-  ASSERT(IsPowerOf2(capacity));
+  DCHECK(base::bits::IsPowerOfTwo32(capacity));
   map_ = reinterpret_cast<Entry*>(allocator.New(capacity * sizeof(Entry)));
   if (map_ == NULL) {
     v8::internal::FatalProcessOutOfMemory("HashMap::Initialize");
diff --git a/src/heap-inl.h b/src/heap-inl.h
deleted file mode 100644
index 2e80452..0000000
--- a/src/heap-inl.h
+++ /dev/null
@@ -1,729 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_HEAP_INL_H_
-#define V8_HEAP_INL_H_
-
-#include <cmath>
-
-#include "src/heap.h"
-#include "src/heap-profiler.h"
-#include "src/isolate.h"
-#include "src/list-inl.h"
-#include "src/objects.h"
-#include "src/platform.h"
-#include "src/store-buffer.h"
-#include "src/store-buffer-inl.h"
-
-namespace v8 {
-namespace internal {
-
-void PromotionQueue::insert(HeapObject* target, int size) {
-  if (emergency_stack_ != NULL) {
-    emergency_stack_->Add(Entry(target, size));
-    return;
-  }
-
-  if (NewSpacePage::IsAtStart(reinterpret_cast<Address>(rear_))) {
-    NewSpacePage* rear_page =
-        NewSpacePage::FromAddress(reinterpret_cast<Address>(rear_));
-    ASSERT(!rear_page->prev_page()->is_anchor());
-    rear_ = reinterpret_cast<intptr_t*>(rear_page->prev_page()->area_end());
-    ActivateGuardIfOnTheSamePage();
-  }
-
-  if (guard_) {
-    ASSERT(GetHeadPage() ==
-           Page::FromAllocationTop(reinterpret_cast<Address>(limit_)));
-
-    if ((rear_ - 2) < limit_) {
-      RelocateQueueHead();
-      emergency_stack_->Add(Entry(target, size));
-      return;
-    }
-  }
-
-  *(--rear_) = reinterpret_cast<intptr_t>(target);
-  *(--rear_) = size;
-  // Assert no overflow into live objects.
-#ifdef DEBUG
-  SemiSpace::AssertValidRange(target->GetIsolate()->heap()->new_space()->top(),
-                              reinterpret_cast<Address>(rear_));
-#endif
-}
-
-
-void PromotionQueue::ActivateGuardIfOnTheSamePage() {
-  guard_ = guard_ ||
-      heap_->new_space()->active_space()->current_page()->address() ==
-      GetHeadPage()->address();
-}
-
-
-template<>
-bool inline Heap::IsOneByte(Vector<const char> str, int chars) {
-  // TODO(dcarney): incorporate Latin-1 check when Latin-1 is supported?
-  // ASCII only check.
-  return chars == str.length();
-}
-
-
-template<>
-bool inline Heap::IsOneByte(String* str, int chars) {
-  return str->IsOneByteRepresentation();
-}
-
-
-AllocationResult Heap::AllocateInternalizedStringFromUtf8(
-    Vector<const char> str, int chars, uint32_t hash_field) {
-  if (IsOneByte(str, chars)) {
-    return AllocateOneByteInternalizedString(
-        Vector<const uint8_t>::cast(str), hash_field);
-  }
-  return AllocateInternalizedStringImpl<false>(str, chars, hash_field);
-}
-
-
-template<typename T>
-AllocationResult Heap::AllocateInternalizedStringImpl(
-    T t, int chars, uint32_t hash_field) {
-  if (IsOneByte(t, chars)) {
-    return AllocateInternalizedStringImpl<true>(t, chars, hash_field);
-  }
-  return AllocateInternalizedStringImpl<false>(t, chars, hash_field);
-}
-
-
-AllocationResult Heap::AllocateOneByteInternalizedString(
-    Vector<const uint8_t> str,
-    uint32_t hash_field) {
-  CHECK_GE(String::kMaxLength, str.length());
-  // Compute map and object size.
-  Map* map = ascii_internalized_string_map();
-  int size = SeqOneByteString::SizeFor(str.length());
-  AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, TENURED);
-
-  // Allocate string.
-  HeapObject* result;
-  { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
-    if (!allocation.To(&result)) return allocation;
-  }
-
-  // String maps are all immortal immovable objects.
-  result->set_map_no_write_barrier(map);
-  // Set length and hash fields of the allocated string.
-  String* answer = String::cast(result);
-  answer->set_length(str.length());
-  answer->set_hash_field(hash_field);
-
-  ASSERT_EQ(size, answer->Size());
-
-  // Fill in the characters.
-  MemCopy(answer->address() + SeqOneByteString::kHeaderSize, str.start(),
-          str.length());
-
-  return answer;
-}
-
-
-AllocationResult Heap::AllocateTwoByteInternalizedString(Vector<const uc16> str,
-                                                         uint32_t hash_field) {
-  CHECK_GE(String::kMaxLength, str.length());
-  // Compute map and object size.
-  Map* map = internalized_string_map();
-  int size = SeqTwoByteString::SizeFor(str.length());
-  AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, TENURED);
-
-  // Allocate string.
-  HeapObject* result;
-  { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
-    if (!allocation.To(&result)) return allocation;
-  }
-
-  result->set_map(map);
-  // Set length and hash fields of the allocated string.
-  String* answer = String::cast(result);
-  answer->set_length(str.length());
-  answer->set_hash_field(hash_field);
-
-  ASSERT_EQ(size, answer->Size());
-
-  // Fill in the characters.
-  MemCopy(answer->address() + SeqTwoByteString::kHeaderSize, str.start(),
-          str.length() * kUC16Size);
-
-  return answer;
-}
-
-AllocationResult Heap::CopyFixedArray(FixedArray* src) {
-  if (src->length() == 0) return src;
-  return CopyFixedArrayWithMap(src, src->map());
-}
-
-
-AllocationResult Heap::CopyFixedDoubleArray(FixedDoubleArray* src) {
-  if (src->length() == 0) return src;
-  return CopyFixedDoubleArrayWithMap(src, src->map());
-}
-
-
-AllocationResult Heap::CopyConstantPoolArray(ConstantPoolArray* src) {
-  if (src->length() == 0) return src;
-  return CopyConstantPoolArrayWithMap(src, src->map());
-}
-
-
-AllocationResult Heap::AllocateRaw(int size_in_bytes,
-                                   AllocationSpace space,
-                                   AllocationSpace retry_space) {
-  ASSERT(AllowHandleAllocation::IsAllowed());
-  ASSERT(AllowHeapAllocation::IsAllowed());
-  ASSERT(gc_state_ == NOT_IN_GC);
-  HeapProfiler* profiler = isolate_->heap_profiler();
-#ifdef DEBUG
-  if (FLAG_gc_interval >= 0 &&
-      AllowAllocationFailure::IsAllowed(isolate_) &&
-      Heap::allocation_timeout_-- <= 0) {
-    return AllocationResult::Retry(space);
-  }
-  isolate_->counters()->objs_since_last_full()->Increment();
-  isolate_->counters()->objs_since_last_young()->Increment();
-#endif
-
-  HeapObject* object;
-  AllocationResult allocation;
-  if (NEW_SPACE == space) {
-    allocation = new_space_.AllocateRaw(size_in_bytes);
-    if (always_allocate() &&
-        allocation.IsRetry() &&
-        retry_space != NEW_SPACE) {
-      space = retry_space;
-    } else {
-      if (profiler->is_tracking_allocations() && allocation.To(&object)) {
-        profiler->AllocationEvent(object->address(), size_in_bytes);
-      }
-      return allocation;
-    }
-  }
-
-  if (OLD_POINTER_SPACE == space) {
-    allocation = old_pointer_space_->AllocateRaw(size_in_bytes);
-  } else if (OLD_DATA_SPACE == space) {
-    allocation = old_data_space_->AllocateRaw(size_in_bytes);
-  } else if (CODE_SPACE == space) {
-    allocation = code_space_->AllocateRaw(size_in_bytes);
-  } else if (LO_SPACE == space) {
-    allocation = lo_space_->AllocateRaw(size_in_bytes, NOT_EXECUTABLE);
-  } else if (CELL_SPACE == space) {
-    allocation = cell_space_->AllocateRaw(size_in_bytes);
-  } else if (PROPERTY_CELL_SPACE == space) {
-    allocation = property_cell_space_->AllocateRaw(size_in_bytes);
-  } else {
-    ASSERT(MAP_SPACE == space);
-    allocation = map_space_->AllocateRaw(size_in_bytes);
-  }
-  if (allocation.IsRetry()) old_gen_exhausted_ = true;
-  if (profiler->is_tracking_allocations() && allocation.To(&object)) {
-    profiler->AllocationEvent(object->address(), size_in_bytes);
-  }
-  return allocation;
-}
-
-
-void Heap::FinalizeExternalString(String* string) {
-  ASSERT(string->IsExternalString());
-  v8::String::ExternalStringResourceBase** resource_addr =
-      reinterpret_cast<v8::String::ExternalStringResourceBase**>(
-          reinterpret_cast<byte*>(string) +
-          ExternalString::kResourceOffset -
-          kHeapObjectTag);
-
-  // Dispose of the C++ object if it has not already been disposed.
-  if (*resource_addr != NULL) {
-    (*resource_addr)->Dispose();
-    *resource_addr = NULL;
-  }
-}
-
-
-bool Heap::InNewSpace(Object* object) {
-  bool result = new_space_.Contains(object);
-  ASSERT(!result ||                  // Either not in new space
-         gc_state_ != NOT_IN_GC ||   // ... or in the middle of GC
-         InToSpace(object));         // ... or in to-space (where we allocate).
-  return result;
-}
-
-
-bool Heap::InNewSpace(Address address) {
-  return new_space_.Contains(address);
-}
-
-
-bool Heap::InFromSpace(Object* object) {
-  return new_space_.FromSpaceContains(object);
-}
-
-
-bool Heap::InToSpace(Object* object) {
-  return new_space_.ToSpaceContains(object);
-}
-
-
-bool Heap::InOldPointerSpace(Address address) {
-  return old_pointer_space_->Contains(address);
-}
-
-
-bool Heap::InOldPointerSpace(Object* object) {
-  return InOldPointerSpace(reinterpret_cast<Address>(object));
-}
-
-
-bool Heap::InOldDataSpace(Address address) {
-  return old_data_space_->Contains(address);
-}
-
-
-bool Heap::InOldDataSpace(Object* object) {
-  return InOldDataSpace(reinterpret_cast<Address>(object));
-}
-
-
-bool Heap::OldGenerationAllocationLimitReached() {
-  if (!incremental_marking()->IsStopped()) return false;
-  return OldGenerationSpaceAvailable() < 0;
-}
-
-
-bool Heap::ShouldBePromoted(Address old_address, int object_size) {
-  // An object should be promoted if:
-  // - the object has survived a scavenge operation or
-  // - to space is already 25% full.
-  NewSpacePage* page = NewSpacePage::FromAddress(old_address);
-  Address age_mark = new_space_.age_mark();
-  bool below_mark = page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) &&
-      (!page->ContainsLimit(age_mark) || old_address < age_mark);
-  return below_mark || (new_space_.Size() + object_size) >=
-                        (new_space_.EffectiveCapacity() >> 2);
-}
-
-
-void Heap::RecordWrite(Address address, int offset) {
-  if (!InNewSpace(address)) store_buffer_.Mark(address + offset);
-}
-
-
-void Heap::RecordWrites(Address address, int start, int len) {
-  if (!InNewSpace(address)) {
-    for (int i = 0; i < len; i++) {
-      store_buffer_.Mark(address + start + i * kPointerSize);
-    }
-  }
-}
-
-
-OldSpace* Heap::TargetSpace(HeapObject* object) {
-  InstanceType type = object->map()->instance_type();
-  AllocationSpace space = TargetSpaceId(type);
-  return (space == OLD_POINTER_SPACE)
-      ? old_pointer_space_
-      : old_data_space_;
-}
-
-
-AllocationSpace Heap::TargetSpaceId(InstanceType type) {
-  // Heap numbers and sequential strings are promoted to old data space, all
-  // other object types are promoted to old pointer space.  We do not use
-  // object->IsHeapNumber() and object->IsSeqString() because we already
-  // know that object has the heap object tag.
-
-  // These objects are never allocated in new space.
-  ASSERT(type != MAP_TYPE);
-  ASSERT(type != CODE_TYPE);
-  ASSERT(type != ODDBALL_TYPE);
-  ASSERT(type != CELL_TYPE);
-  ASSERT(type != PROPERTY_CELL_TYPE);
-
-  if (type <= LAST_NAME_TYPE) {
-    if (type == SYMBOL_TYPE) return OLD_POINTER_SPACE;
-    ASSERT(type < FIRST_NONSTRING_TYPE);
-    // There are four string representations: sequential strings, external
-    // strings, cons strings, and sliced strings.
-    // Only the latter two contain non-map-word pointers to heap objects.
-    return ((type & kIsIndirectStringMask) == kIsIndirectStringTag)
-        ? OLD_POINTER_SPACE
-        : OLD_DATA_SPACE;
-  } else {
-    return (type <= LAST_DATA_TYPE) ? OLD_DATA_SPACE : OLD_POINTER_SPACE;
-  }
-}
-
-
-bool Heap::AllowedToBeMigrated(HeapObject* obj, AllocationSpace dst) {
-  // Object migration is governed by the following rules:
-  //
-  // 1) Objects in new-space can be migrated to one of the old spaces
-  //    that matches their target space or they stay in new-space.
-  // 2) Objects in old-space stay in the same space when migrating.
-  // 3) Fillers (two or more words) can migrate due to left-trimming of
-  //    fixed arrays in new-space, old-data-space and old-pointer-space.
-  // 4) Fillers (one word) can never migrate, they are skipped by
-  //    incremental marking explicitly to prevent invalid pattern.
-  // 5) Short external strings can end up in old pointer space when a cons
-  //    string in old pointer space is made external (String::MakeExternal).
-  //
-  // Since this function is used for debugging only, we do not place
-  // asserts here, but check everything explicitly.
-  if (obj->map() == one_pointer_filler_map()) return false;
-  InstanceType type = obj->map()->instance_type();
-  MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
-  AllocationSpace src = chunk->owner()->identity();
-  switch (src) {
-    case NEW_SPACE:
-      return dst == src || dst == TargetSpaceId(type);
-    case OLD_POINTER_SPACE:
-      return dst == src &&
-          (dst == TargetSpaceId(type) || obj->IsFiller() ||
-          (obj->IsExternalString() && ExternalString::cast(obj)->is_short()));
-    case OLD_DATA_SPACE:
-      return dst == src && dst == TargetSpaceId(type);
-    case CODE_SPACE:
-      return dst == src && type == CODE_TYPE;
-    case MAP_SPACE:
-    case CELL_SPACE:
-    case PROPERTY_CELL_SPACE:
-    case LO_SPACE:
-      return false;
-    case INVALID_SPACE:
-      break;
-  }
-  UNREACHABLE();
-  return false;
-}
-
-
-void Heap::CopyBlock(Address dst, Address src, int byte_size) {
-  CopyWords(reinterpret_cast<Object**>(dst),
-            reinterpret_cast<Object**>(src),
-            static_cast<size_t>(byte_size / kPointerSize));
-}
-
-
-void Heap::MoveBlock(Address dst, Address src, int byte_size) {
-  ASSERT(IsAligned(byte_size, kPointerSize));
-
-  int size_in_words = byte_size / kPointerSize;
-
-  if ((dst < src) || (dst >= (src + byte_size))) {
-    Object** src_slot = reinterpret_cast<Object**>(src);
-    Object** dst_slot = reinterpret_cast<Object**>(dst);
-    Object** end_slot = src_slot + size_in_words;
-
-    while (src_slot != end_slot) {
-      *dst_slot++ = *src_slot++;
-    }
-  } else {
-    MemMove(dst, src, static_cast<size_t>(byte_size));
-  }
-}
-
-
-void Heap::ScavengePointer(HeapObject** p) {
-  ScavengeObject(p, *p);
-}
-
-
-AllocationMemento* Heap::FindAllocationMemento(HeapObject* object) {
-  // Check if there is potentially a memento behind the object. If
-  // the last word of the momento is on another page we return
-  // immediately.
-  Address object_address = object->address();
-  Address memento_address = object_address + object->Size();
-  Address last_memento_word_address = memento_address + kPointerSize;
-  if (!NewSpacePage::OnSamePage(object_address,
-                                last_memento_word_address)) {
-    return NULL;
-  }
-
-  HeapObject* candidate = HeapObject::FromAddress(memento_address);
-  if (candidate->map() != allocation_memento_map()) return NULL;
-
-  // Either the object is the last object in the new space, or there is another
-  // object of at least word size (the header map word) following it, so
-  // suffices to compare ptr and top here. Note that technically we do not have
-  // to compare with the current top pointer of the from space page during GC,
-  // since we always install filler objects above the top pointer of a from
-  // space page when performing a garbage collection. However, always performing
-  // the test makes it possible to have a single, unified version of
-  // FindAllocationMemento that is used both by the GC and the mutator.
-  Address top = NewSpaceTop();
-  ASSERT(memento_address == top ||
-         memento_address + HeapObject::kHeaderSize <= top ||
-         !NewSpacePage::OnSamePage(memento_address, top));
-  if (memento_address == top) return NULL;
-
-  AllocationMemento* memento = AllocationMemento::cast(candidate);
-  if (!memento->IsValid()) return NULL;
-  return memento;
-}
-
-
-void Heap::UpdateAllocationSiteFeedback(HeapObject* object,
-                                        ScratchpadSlotMode mode) {
-  Heap* heap = object->GetHeap();
-  ASSERT(heap->InFromSpace(object));
-
-  if (!FLAG_allocation_site_pretenuring ||
-      !AllocationSite::CanTrack(object->map()->instance_type())) return;
-
-  AllocationMemento* memento = heap->FindAllocationMemento(object);
-  if (memento == NULL) return;
-
-  if (memento->GetAllocationSite()->IncrementMementoFoundCount()) {
-    heap->AddAllocationSiteToScratchpad(memento->GetAllocationSite(), mode);
-  }
-}
-
-
-void Heap::ScavengeObject(HeapObject** p, HeapObject* object) {
-  ASSERT(object->GetIsolate()->heap()->InFromSpace(object));
-
-  // We use the first word (where the map pointer usually is) of a heap
-  // object to record the forwarding pointer.  A forwarding pointer can
-  // point to an old space, the code space, or the to space of the new
-  // generation.
-  MapWord first_word = object->map_word();
-
-  // If the first word is a forwarding address, the object has already been
-  // copied.
-  if (first_word.IsForwardingAddress()) {
-    HeapObject* dest = first_word.ToForwardingAddress();
-    ASSERT(object->GetIsolate()->heap()->InFromSpace(*p));
-    *p = dest;
-    return;
-  }
-
-  UpdateAllocationSiteFeedback(object, IGNORE_SCRATCHPAD_SLOT);
-
-  // AllocationMementos are unrooted and shouldn't survive a scavenge
-  ASSERT(object->map() != object->GetHeap()->allocation_memento_map());
-  // Call the slow part of scavenge object.
-  return ScavengeObjectSlow(p, object);
-}
-
-
-bool Heap::CollectGarbage(AllocationSpace space,
-                          const char* gc_reason,
-                          const v8::GCCallbackFlags callbackFlags) {
-  const char* collector_reason = NULL;
-  GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
-  return CollectGarbage(collector, gc_reason, collector_reason, callbackFlags);
-}
-
-
-Isolate* Heap::isolate() {
-  return reinterpret_cast<Isolate*>(reinterpret_cast<intptr_t>(this) -
-      reinterpret_cast<size_t>(reinterpret_cast<Isolate*>(4)->heap()) + 4);
-}
-
-
-// Calls the FUNCTION_CALL function and retries it up to three times
-// to guarantee that any allocations performed during the call will
-// succeed if there's enough memory.
-
-// Warning: Do not use the identifiers __object__, __maybe_object__ or
-// __scope__ in a call to this macro.
-
-#define RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE)                      \
-  if (__allocation__.To(&__object__)) {                                        \
-    ASSERT(__object__ != (ISOLATE)->heap()->exception());                      \
-    RETURN_VALUE;                                                              \
-  }
-
-#define CALL_AND_RETRY(ISOLATE, FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY)     \
-  do {                                                                         \
-    AllocationResult __allocation__ = FUNCTION_CALL;                           \
-    Object* __object__ = NULL;                                                 \
-    RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE)                          \
-    (ISOLATE)->heap()->CollectGarbage(__allocation__.RetrySpace(),             \
-                                      "allocation failure");                   \
-    __allocation__ = FUNCTION_CALL;                                            \
-    RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE)                          \
-    (ISOLATE)->counters()->gc_last_resort_from_handles()->Increment();         \
-    (ISOLATE)->heap()->CollectAllAvailableGarbage("last resort gc");           \
-    {                                                                          \
-      AlwaysAllocateScope __scope__(ISOLATE);                                  \
-      __allocation__ = FUNCTION_CALL;                                          \
-    }                                                                          \
-    RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE)                          \
-      /* TODO(1181417): Fix this. */                                           \
-    v8::internal::Heap::FatalProcessOutOfMemory("CALL_AND_RETRY_LAST", true);  \
-    RETURN_EMPTY;                                                              \
-  } while (false)
-
-#define CALL_AND_RETRY_OR_DIE(                                             \
-     ISOLATE, FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY)                   \
-  CALL_AND_RETRY(                                                          \
-      ISOLATE,                                                             \
-      FUNCTION_CALL,                                                       \
-      RETURN_VALUE,                                                        \
-      RETURN_EMPTY)
-
-#define CALL_HEAP_FUNCTION(ISOLATE, FUNCTION_CALL, TYPE)                      \
-  CALL_AND_RETRY_OR_DIE(ISOLATE,                                              \
-                        FUNCTION_CALL,                                        \
-                        return Handle<TYPE>(TYPE::cast(__object__), ISOLATE), \
-                        return Handle<TYPE>())                                \
-
-
-#define CALL_HEAP_FUNCTION_VOID(ISOLATE, FUNCTION_CALL)  \
-  CALL_AND_RETRY_OR_DIE(ISOLATE, FUNCTION_CALL, return, return)
-
-
-void ExternalStringTable::AddString(String* string) {
-  ASSERT(string->IsExternalString());
-  if (heap_->InNewSpace(string)) {
-    new_space_strings_.Add(string);
-  } else {
-    old_space_strings_.Add(string);
-  }
-}
-
-
-void ExternalStringTable::Iterate(ObjectVisitor* v) {
-  if (!new_space_strings_.is_empty()) {
-    Object** start = &new_space_strings_[0];
-    v->VisitPointers(start, start + new_space_strings_.length());
-  }
-  if (!old_space_strings_.is_empty()) {
-    Object** start = &old_space_strings_[0];
-    v->VisitPointers(start, start + old_space_strings_.length());
-  }
-}
-
-
-// Verify() is inline to avoid ifdef-s around its calls in release
-// mode.
-void ExternalStringTable::Verify() {
-#ifdef DEBUG
-  for (int i = 0; i < new_space_strings_.length(); ++i) {
-    Object* obj = Object::cast(new_space_strings_[i]);
-    ASSERT(heap_->InNewSpace(obj));
-    ASSERT(obj != heap_->the_hole_value());
-  }
-  for (int i = 0; i < old_space_strings_.length(); ++i) {
-    Object* obj = Object::cast(old_space_strings_[i]);
-    ASSERT(!heap_->InNewSpace(obj));
-    ASSERT(obj != heap_->the_hole_value());
-  }
-#endif
-}
-
-
-void ExternalStringTable::AddOldString(String* string) {
-  ASSERT(string->IsExternalString());
-  ASSERT(!heap_->InNewSpace(string));
-  old_space_strings_.Add(string);
-}
-
-
-void ExternalStringTable::ShrinkNewStrings(int position) {
-  new_space_strings_.Rewind(position);
-#ifdef VERIFY_HEAP
-  if (FLAG_verify_heap) {
-    Verify();
-  }
-#endif
-}
-
-
-void Heap::ClearInstanceofCache() {
-  set_instanceof_cache_function(the_hole_value());
-}
-
-
-Object* Heap::ToBoolean(bool condition) {
-  return condition ? true_value() : false_value();
-}
-
-
-void Heap::CompletelyClearInstanceofCache() {
-  set_instanceof_cache_map(the_hole_value());
-  set_instanceof_cache_function(the_hole_value());
-}
-
-
-AlwaysAllocateScope::AlwaysAllocateScope(Isolate* isolate)
-    : heap_(isolate->heap()), daf_(isolate) {
-  // We shouldn't hit any nested scopes, because that requires
-  // non-handle code to call handle code. The code still works but
-  // performance will degrade, so we want to catch this situation
-  // in debug mode.
-  ASSERT(heap_->always_allocate_scope_depth_ == 0);
-  heap_->always_allocate_scope_depth_++;
-}
-
-
-AlwaysAllocateScope::~AlwaysAllocateScope() {
-  heap_->always_allocate_scope_depth_--;
-  ASSERT(heap_->always_allocate_scope_depth_ == 0);
-}
-
-
-#ifdef VERIFY_HEAP
-NoWeakObjectVerificationScope::NoWeakObjectVerificationScope() {
-  Isolate* isolate = Isolate::Current();
-  isolate->heap()->no_weak_object_verification_scope_depth_++;
-}
-
-
-NoWeakObjectVerificationScope::~NoWeakObjectVerificationScope() {
-  Isolate* isolate = Isolate::Current();
-  isolate->heap()->no_weak_object_verification_scope_depth_--;
-}
-#endif
-
-
-GCCallbacksScope::GCCallbacksScope(Heap* heap) : heap_(heap) {
-  heap_->gc_callbacks_depth_++;
-}
-
-
-GCCallbacksScope::~GCCallbacksScope() {
-  heap_->gc_callbacks_depth_--;
-}
-
-
-bool GCCallbacksScope::CheckReenter() {
-  return heap_->gc_callbacks_depth_ == 1;
-}
-
-
-void VerifyPointersVisitor::VisitPointers(Object** start, Object** end) {
-  for (Object** current = start; current < end; current++) {
-    if ((*current)->IsHeapObject()) {
-      HeapObject* object = HeapObject::cast(*current);
-      CHECK(object->GetIsolate()->heap()->Contains(object));
-      CHECK(object->map()->IsMap());
-    }
-  }
-}
-
-
-void VerifySmisVisitor::VisitPointers(Object** start, Object** end) {
-  for (Object** current = start; current < end; current++) {
-     CHECK((*current)->IsSmi());
-  }
-}
-
-
-double GCTracer::SizeOfHeapObjects() {
-  return (static_cast<double>(heap_->SizeOfObjects())) / MB;
-}
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_HEAP_INL_H_
diff --git a/src/heap-profiler.cc b/src/heap-profiler.cc
index e576d3b..d86ce5e 100644
--- a/src/heap-profiler.cc
+++ b/src/heap-profiler.cc
@@ -45,7 +45,7 @@
 
 void HeapProfiler::DefineWrapperClass(
     uint16_t class_id, v8::HeapProfiler::WrapperInfoCallback callback) {
-  ASSERT(class_id != v8::HeapProfiler::kPersistentHandleNoClassId);
+  DCHECK(class_id != v8::HeapProfiler::kPersistentHandleNoClassId);
   if (wrapper_callbacks_.length() <= class_id) {
     wrapper_callbacks_.AddBlock(
         NULL, class_id - wrapper_callbacks_.length() + 1);
@@ -93,7 +93,7 @@
 void HeapProfiler::StartHeapObjectsTracking(bool track_allocations) {
   ids_->UpdateHeapObjectsMap();
   is_tracking_object_moves_ = true;
-  ASSERT(!is_tracking_allocations());
+  DCHECK(!is_tracking_allocations());
   if (track_allocations) {
     allocation_tracker_.Reset(new AllocationTracker(ids_.get(), names_.get()));
     heap()->DisableInlineAllocation();
@@ -180,7 +180,7 @@
        obj != NULL;
        obj = iterator.next()) {
     if (ids_->FindEntry(obj->address()) == id) {
-      ASSERT(object == NULL);
+      DCHECK(object == NULL);
       object = obj;
       // Can't break -- kFilterUnreachable requires full heap traversal.
     }
diff --git a/src/heap-snapshot-generator-inl.h b/src/heap-snapshot-generator-inl.h
index b4021ec..3f7e622 100644
--- a/src/heap-snapshot-generator-inl.h
+++ b/src/heap-snapshot-generator-inl.h
@@ -35,33 +35,14 @@
 
 
 HeapGraphEdge** HeapEntry::children_arr() {
-  ASSERT(children_index_ >= 0);
-  SLOW_ASSERT(children_index_ < snapshot_->children().length() ||
+  DCHECK(children_index_ >= 0);
+  SLOW_DCHECK(children_index_ < snapshot_->children().length() ||
       (children_index_ == snapshot_->children().length() &&
        children_count_ == 0));
   return &snapshot_->children().first() + children_index_;
 }
 
 
-SnapshotObjectId HeapObjectsMap::GetNthGcSubrootId(int delta) {
-  return kGcRootsFirstSubrootId + delta * kObjectIdStep;
-}
-
-
-HeapObject* V8HeapExplorer::GetNthGcSubrootObject(int delta) {
-  return reinterpret_cast<HeapObject*>(
-      reinterpret_cast<char*>(kFirstGcSubrootObject) +
-      delta * HeapObjectsMap::kObjectIdStep);
-}
-
-
-int V8HeapExplorer::GetGcSubrootOrder(HeapObject* subroot) {
-  return static_cast<int>(
-      (reinterpret_cast<char*>(subroot) -
-       reinterpret_cast<char*>(kFirstGcSubrootObject)) /
-      HeapObjectsMap::kObjectIdStep);
-}
-
 } }  // namespace v8::internal
 
 #endif  // V8_HEAP_SNAPSHOT_GENERATOR_INL_H_
diff --git a/src/heap-snapshot-generator.cc b/src/heap-snapshot-generator.cc
index be970ee..4a4c914 100644
--- a/src/heap-snapshot-generator.cc
+++ b/src/heap-snapshot-generator.cc
@@ -22,7 +22,7 @@
       from_index_(from),
       to_index_(to),
       name_(name) {
-  ASSERT(type == kContextVariable
+  DCHECK(type == kContextVariable
       || type == kProperty
       || type == kInternal
       || type == kShortcut
@@ -35,7 +35,7 @@
       from_index_(from),
       to_index_(to),
       index_(index) {
-  ASSERT(type == kElement || type == kHidden);
+  DCHECK(type == kElement || type == kHidden);
 }
 
 
@@ -83,21 +83,21 @@
 void HeapEntry::Print(
     const char* prefix, const char* edge_name, int max_depth, int indent) {
   STATIC_ASSERT(sizeof(unsigned) == sizeof(id()));
-  OS::Print("%6" V8PRIuPTR " @%6u %*c %s%s: ",
-            self_size(), id(), indent, ' ', prefix, edge_name);
+  base::OS::Print("%6" V8PRIuPTR " @%6u %*c %s%s: ", self_size(), id(), indent,
+                  ' ', prefix, edge_name);
   if (type() != kString) {
-    OS::Print("%s %.40s\n", TypeAsString(), name_);
+    base::OS::Print("%s %.40s\n", TypeAsString(), name_);
   } else {
-    OS::Print("\"");
+    base::OS::Print("\"");
     const char* c = name_;
     while (*c && (c - name_) <= 40) {
       if (*c != '\n')
-        OS::Print("%c", *c);
+        base::OS::Print("%c", *c);
       else
-        OS::Print("\\n");
+        base::OS::Print("\\n");
       ++c;
     }
-    OS::Print("\"\n");
+    base::OS::Print("\"\n");
   }
   if (--max_depth == 0) return;
   Vector<HeapGraphEdge*> ch = children();
@@ -188,7 +188,6 @@
       uid_(uid),
       root_index_(HeapEntry::kNoEntry),
       gc_roots_index_(HeapEntry::kNoEntry),
-      natives_root_index_(HeapEntry::kNoEntry),
       max_snapshot_js_object_id_(0) {
   STATIC_ASSERT(
       sizeof(HeapGraphEdge) ==
@@ -217,22 +216,34 @@
 }
 
 
+void HeapSnapshot::AddSyntheticRootEntries() {
+  AddRootEntry();
+  AddGcRootsEntry();
+  SnapshotObjectId id = HeapObjectsMap::kGcRootsFirstSubrootId;
+  for (int tag = 0; tag < VisitorSynchronization::kNumberOfSyncTags; tag++) {
+    AddGcSubrootEntry(tag, id);
+    id += HeapObjectsMap::kObjectIdStep;
+  }
+  DCHECK(HeapObjectsMap::kFirstAvailableObjectId == id);
+}
+
+
 HeapEntry* HeapSnapshot::AddRootEntry() {
-  ASSERT(root_index_ == HeapEntry::kNoEntry);
-  ASSERT(entries_.is_empty());  // Root entry must be the first one.
+  DCHECK(root_index_ == HeapEntry::kNoEntry);
+  DCHECK(entries_.is_empty());  // Root entry must be the first one.
   HeapEntry* entry = AddEntry(HeapEntry::kSynthetic,
                               "",
                               HeapObjectsMap::kInternalRootObjectId,
                               0,
                               0);
   root_index_ = entry->index();
-  ASSERT(root_index_ == 0);
+  DCHECK(root_index_ == 0);
   return entry;
 }
 
 
 HeapEntry* HeapSnapshot::AddGcRootsEntry() {
-  ASSERT(gc_roots_index_ == HeapEntry::kNoEntry);
+  DCHECK(gc_roots_index_ == HeapEntry::kNoEntry);
   HeapEntry* entry = AddEntry(HeapEntry::kSynthetic,
                               "(GC roots)",
                               HeapObjectsMap::kGcRootsObjectId,
@@ -243,15 +254,11 @@
 }
 
 
-HeapEntry* HeapSnapshot::AddGcSubrootEntry(int tag) {
-  ASSERT(gc_subroot_indexes_[tag] == HeapEntry::kNoEntry);
-  ASSERT(0 <= tag && tag < VisitorSynchronization::kNumberOfSyncTags);
-  HeapEntry* entry = AddEntry(
-      HeapEntry::kSynthetic,
-      VisitorSynchronization::kTagNames[tag],
-      HeapObjectsMap::GetNthGcSubrootId(tag),
-      0,
-      0);
+HeapEntry* HeapSnapshot::AddGcSubrootEntry(int tag, SnapshotObjectId id) {
+  DCHECK(gc_subroot_indexes_[tag] == HeapEntry::kNoEntry);
+  DCHECK(0 <= tag && tag < VisitorSynchronization::kNumberOfSyncTags);
+  HeapEntry* entry = AddEntry(HeapEntry::kSynthetic,
+                              VisitorSynchronization::kTagNames[tag], id, 0, 0);
   gc_subroot_indexes_[tag] = entry->index();
   return entry;
 }
@@ -269,14 +276,14 @@
 
 
 void HeapSnapshot::FillChildren() {
-  ASSERT(children().is_empty());
+  DCHECK(children().is_empty());
   children().Allocate(edges().length());
   int children_index = 0;
   for (int i = 0; i < entries().length(); ++i) {
     HeapEntry* entry = &entries()[i];
     children_index = entry->set_children_index(children_index);
   }
-  ASSERT(edges().length() == children_index);
+  DCHECK(edges().length() == children_index);
   for (int i = 0; i < edges().length(); ++i) {
     HeapGraphEdge* edge = &edges()[i];
     edge->ReplaceToIndexWithEntry(this);
@@ -375,8 +382,8 @@
 
 
 bool HeapObjectsMap::MoveObject(Address from, Address to, int object_size) {
-  ASSERT(to != NULL);
-  ASSERT(from != NULL);
+  DCHECK(to != NULL);
+  DCHECK(from != NULL);
   if (from == to) return false;
   void* from_value = entries_map_.Remove(from, ComputePointerHash(from));
   if (from_value == NULL) {
@@ -433,7 +440,7 @@
   if (entry == NULL) return 0;
   int entry_index = static_cast<int>(reinterpret_cast<intptr_t>(entry->value));
   EntryInfo& entry_info = entries_.at(entry_index);
-  ASSERT(static_cast<uint32_t>(entries_.length()) > entries_map_.occupancy());
+  DCHECK(static_cast<uint32_t>(entries_.length()) > entries_map_.occupancy());
   return entry_info.id;
 }
 
@@ -441,7 +448,7 @@
 SnapshotObjectId HeapObjectsMap::FindOrAddEntry(Address addr,
                                                 unsigned int size,
                                                 bool accessed) {
-  ASSERT(static_cast<uint32_t>(entries_.length()) > entries_map_.occupancy());
+  DCHECK(static_cast<uint32_t>(entries_.length()) > entries_map_.occupancy());
   HashMap::Entry* entry = entries_map_.Lookup(addr, ComputePointerHash(addr),
                                               true);
   if (entry->value != NULL) {
@@ -462,7 +469,7 @@
   SnapshotObjectId id = next_id_;
   next_id_ += kObjectIdStep;
   entries_.Add(EntryInfo(id, addr, size, accessed));
-  ASSERT(static_cast<uint32_t>(entries_.length()) > entries_map_.occupancy());
+  DCHECK(static_cast<uint32_t>(entries_.length()) > entries_map_.occupancy());
   return id;
 }
 
@@ -615,7 +622,7 @@
   time_intervals_.Add(TimeInterval(next_id_));
   int prefered_chunk_size = stream->GetChunkSize();
   List<v8::HeapStatsUpdate> stats_buffer;
-  ASSERT(!entries_.is_empty());
+  DCHECK(!entries_.is_empty());
   EntryInfo* entry_info = &entries_.first();
   EntryInfo* end_entry_info = &entries_.last() + 1;
   for (int time_interval_index = 0;
@@ -645,7 +652,7 @@
       }
     }
   }
-  ASSERT(entry_info == end_entry_info);
+  DCHECK(entry_info == end_entry_info);
   if (!stats_buffer.is_empty()) {
     OutputStream::WriteResult result = stream->WriteHeapStatsChunk(
         &stats_buffer.first(), stats_buffer.length());
@@ -657,7 +664,7 @@
 
 
 void HeapObjectsMap::RemoveDeadEntries() {
-  ASSERT(entries_.length() > 0 &&
+  DCHECK(entries_.length() > 0 &&
          entries_.at(0).id == 0 &&
          entries_.at(0).addr == NULL);
   int first_free_entry = 1;
@@ -670,7 +677,7 @@
       entries_.at(first_free_entry).accessed = false;
       HashMap::Entry* entry = entries_map_.Lookup(
           entry_info.addr, ComputePointerHash(entry_info.addr), false);
-      ASSERT(entry);
+      DCHECK(entry);
       entry->value = reinterpret_cast<void*>(first_free_entry);
       ++first_free_entry;
     } else {
@@ -681,7 +688,7 @@
     }
   }
   entries_.Rewind(first_free_entry);
-  ASSERT(static_cast<uint32_t>(entries_.length()) - 1 ==
+  DCHECK(static_cast<uint32_t>(entries_.length()) - 1 ==
          entries_map_.occupancy());
 }
 
@@ -723,7 +730,7 @@
 
 void HeapEntriesMap::Pair(HeapThing thing, int entry) {
   HashMap::Entry* cache_entry = entries_.Lookup(thing, Hash(thing), true);
-  ASSERT(cache_entry->value == NULL);
+  DCHECK(cache_entry->value == NULL);
   cache_entry->value = reinterpret_cast<void*>(static_cast<intptr_t>(entry));
 }
 
@@ -771,20 +778,6 @@
 }
 
 
-HeapObject* const V8HeapExplorer::kInternalRootObject =
-    reinterpret_cast<HeapObject*>(
-        static_cast<intptr_t>(HeapObjectsMap::kInternalRootObjectId));
-HeapObject* const V8HeapExplorer::kGcRootsObject =
-    reinterpret_cast<HeapObject*>(
-        static_cast<intptr_t>(HeapObjectsMap::kGcRootsObjectId));
-HeapObject* const V8HeapExplorer::kFirstGcSubrootObject =
-    reinterpret_cast<HeapObject*>(
-        static_cast<intptr_t>(HeapObjectsMap::kGcRootsFirstSubrootId));
-HeapObject* const V8HeapExplorer::kLastGcSubrootObject =
-    reinterpret_cast<HeapObject*>(
-        static_cast<intptr_t>(HeapObjectsMap::kFirstAvailableObjectId));
-
-
 V8HeapExplorer::V8HeapExplorer(
     HeapSnapshot* snapshot,
     SnapshottingProgressReportingInterface* progress,
@@ -809,16 +802,7 @@
 
 
 HeapEntry* V8HeapExplorer::AddEntry(HeapObject* object) {
-  if (object == kInternalRootObject) {
-    snapshot_->AddRootEntry();
-    return snapshot_->root();
-  } else if (object == kGcRootsObject) {
-    HeapEntry* entry = snapshot_->AddGcRootsEntry();
-    return entry;
-  } else if (object >= kFirstGcSubrootObject && object < kLastGcSubrootObject) {
-    HeapEntry* entry = snapshot_->AddGcSubrootEntry(GetGcSubrootOrder(object));
-    return entry;
-  } else if (object->IsJSFunction()) {
+  if (object->IsJSFunction()) {
     JSFunction* func = JSFunction::cast(object);
     SharedFunctionInfo* shared = func->shared();
     const char* name = shared->bound() ? "native_bind" :
@@ -965,41 +949,6 @@
 };
 
 
-class GcSubrootsEnumerator : public ObjectVisitor {
- public:
-  GcSubrootsEnumerator(
-      SnapshotFiller* filler, V8HeapExplorer* explorer)
-      : filler_(filler),
-        explorer_(explorer),
-        previous_object_count_(0),
-        object_count_(0) {
-  }
-  void VisitPointers(Object** start, Object** end) {
-    object_count_ += end - start;
-  }
-  void Synchronize(VisitorSynchronization::SyncTag tag) {
-    // Skip empty subroots.
-    if (previous_object_count_ != object_count_) {
-      previous_object_count_ = object_count_;
-      filler_->AddEntry(V8HeapExplorer::GetNthGcSubrootObject(tag), explorer_);
-    }
-  }
- private:
-  SnapshotFiller* filler_;
-  V8HeapExplorer* explorer_;
-  intptr_t previous_object_count_;
-  intptr_t object_count_;
-};
-
-
-void V8HeapExplorer::AddRootEntries(SnapshotFiller* filler) {
-  filler->AddEntry(kInternalRootObject, this);
-  filler->AddEntry(kGcRootsObject, this);
-  GcSubrootsEnumerator enumerator(filler, this);
-  heap_->IterateRoots(&enumerator, VISIT_ALL);
-}
-
-
 const char* V8HeapExplorer::GetSystemEntryName(HeapObject* object) {
   switch (object->map()->instance_type()) {
     case MAP_TYPE:
@@ -1059,9 +1008,9 @@
   static void MarkVisitedField(HeapObject* obj, int offset) {
     if (offset < 0) return;
     Address field = obj->address() + offset;
-    ASSERT(Memory::Object_at(field)->IsHeapObject());
+    DCHECK(Memory::Object_at(field)->IsHeapObject());
     intptr_t p = reinterpret_cast<intptr_t>(Memory::Object_at(field));
-    ASSERT(!IsMarked(p));
+    DCHECK(!IsMarked(p));
     intptr_t p_tagged = p | kTag;
     Memory::Object_at(field) = reinterpret_cast<Object*>(p_tagged);
   }
@@ -1072,7 +1021,7 @@
     if (IsMarked(p)) {
       intptr_t p_untagged = (p & ~kTaggingMask) | kHeapObjectTag;
       *field = reinterpret_cast<Object*>(p_untagged);
-      ASSERT((*field)->IsHeapObject());
+      DCHECK((*field)->IsHeapObject());
       return true;
     }
     return false;
@@ -1097,11 +1046,16 @@
     ExtractJSGlobalProxyReferences(entry, JSGlobalProxy::cast(obj));
   } else if (obj->IsJSArrayBuffer()) {
     ExtractJSArrayBufferReferences(entry, JSArrayBuffer::cast(obj));
-  } else if (obj->IsJSWeakSet()) {
-    ExtractJSWeakCollectionReferences(entry, JSWeakSet::cast(obj));
-  } else if (obj->IsJSWeakMap()) {
-    ExtractJSWeakCollectionReferences(entry, JSWeakMap::cast(obj));
   } else if (obj->IsJSObject()) {
+    if (obj->IsJSWeakSet()) {
+      ExtractJSWeakCollectionReferences(entry, JSWeakSet::cast(obj));
+    } else if (obj->IsJSWeakMap()) {
+      ExtractJSWeakCollectionReferences(entry, JSWeakMap::cast(obj));
+    } else if (obj->IsJSSet()) {
+      ExtractJSCollectionReferences(entry, JSSet::cast(obj));
+    } else if (obj->IsJSMap()) {
+      ExtractJSCollectionReferences(entry, JSMap::cast(obj));
+    }
     ExtractJSObjectReferences(entry, JSObject::cast(obj));
   } else if (obj->IsString()) {
     ExtractStringReferences(entry, String::cast(obj));
@@ -1113,6 +1067,8 @@
     ExtractSharedFunctionInfoReferences(entry, SharedFunctionInfo::cast(obj));
   } else if (obj->IsScript()) {
     ExtractScriptReferences(entry, Script::cast(obj));
+  } else if (obj->IsAccessorInfo()) {
+    ExtractAccessorInfoReferences(entry, AccessorInfo::cast(obj));
   } else if (obj->IsAccessorPair()) {
     ExtractAccessorPairReferences(entry, AccessorPair::cast(obj));
   } else if (obj->IsCodeCache()) {
@@ -1159,8 +1115,8 @@
   ExtractPropertyReferences(js_obj, entry);
   ExtractElementReferences(js_obj, entry);
   ExtractInternalReferences(js_obj, entry);
-  SetPropertyReference(
-      obj, entry, heap_->proto_string(), js_obj->GetPrototype());
+  PrototypeIterator iter(heap_->isolate(), js_obj);
+  SetPropertyReference(obj, entry, heap_->proto_string(), iter.GetCurrent());
   if (obj->IsJSFunction()) {
     JSFunction* js_fun = JSFunction::cast(js_obj);
     Object* proto_or_map = js_fun->prototype_or_initial_map();
@@ -1216,8 +1172,8 @@
                          "global_context", global_obj->global_context(),
                          GlobalObject::kGlobalContextOffset);
     SetInternalReference(global_obj, entry,
-                         "global_receiver", global_obj->global_receiver(),
-                         GlobalObject::kGlobalReceiverOffset);
+                         "global_proxy", global_obj->global_proxy(),
+                         GlobalObject::kGlobalProxyOffset);
     STATIC_ASSERT(GlobalObject::kHeaderSize - JSObject::kHeaderSize ==
                  4 * kPointerSize);
   } else if (obj->IsJSArrayBufferView()) {
@@ -1260,6 +1216,13 @@
 }
 
 
+void V8HeapExplorer::ExtractJSCollectionReferences(int entry,
+                                                   JSCollection* collection) {
+  SetInternalReference(collection, entry, "table", collection->table(),
+                       JSCollection::kTableOffset);
+}
+
+
 void V8HeapExplorer::ExtractJSWeakCollectionReferences(
     int entry, JSWeakCollection* collection) {
   MarkAsWeakContainer(collection->table());
@@ -1457,6 +1420,35 @@
 }
 
 
+void V8HeapExplorer::ExtractAccessorInfoReferences(
+    int entry, AccessorInfo* accessor_info) {
+  SetInternalReference(accessor_info, entry, "name", accessor_info->name(),
+                       AccessorInfo::kNameOffset);
+  SetInternalReference(accessor_info, entry, "expected_receiver_type",
+                       accessor_info->expected_receiver_type(),
+                       AccessorInfo::kExpectedReceiverTypeOffset);
+  if (accessor_info->IsDeclaredAccessorInfo()) {
+    DeclaredAccessorInfo* declared_accessor_info =
+        DeclaredAccessorInfo::cast(accessor_info);
+    SetInternalReference(declared_accessor_info, entry, "descriptor",
+                         declared_accessor_info->descriptor(),
+                         DeclaredAccessorInfo::kDescriptorOffset);
+  } else if (accessor_info->IsExecutableAccessorInfo()) {
+    ExecutableAccessorInfo* executable_accessor_info =
+        ExecutableAccessorInfo::cast(accessor_info);
+    SetInternalReference(executable_accessor_info, entry, "getter",
+                         executable_accessor_info->getter(),
+                         ExecutableAccessorInfo::kGetterOffset);
+    SetInternalReference(executable_accessor_info, entry, "setter",
+                         executable_accessor_info->setter(),
+                         ExecutableAccessorInfo::kSetterOffset);
+    SetInternalReference(executable_accessor_info, entry, "data",
+                         executable_accessor_info->data(),
+                         ExecutableAccessorInfo::kDataOffset);
+  }
+}
+
+
 void V8HeapExplorer::ExtractAccessorPairReferences(
     int entry, AccessorPair* accessors) {
   SetInternalReference(accessors, entry, "getter", accessors->getter(),
@@ -1487,8 +1479,8 @@
 void V8HeapExplorer::TagCodeObject(Code* code) {
   if (code->kind() == Code::STUB) {
     TagObject(code, names_->GetFormatted(
-        "(%s code)", CodeStub::MajorName(
-            static_cast<CodeStub::Major>(code->major_key()), true)));
+                        "(%s code)", CodeStub::MajorName(
+                                         CodeStub::GetMajorKey(code), true)));
   }
 }
 
@@ -1641,6 +1633,8 @@
     for (int i = 0; i < real_size; i++) {
       switch (descs->GetType(i)) {
         case FIELD: {
+          Representation r = descs->GetDetails(i).representation();
+          if (r.IsSmi() || r.IsDouble()) break;
           int index = descs->GetFieldIndex(i);
 
           Name* k = descs->GetKey(i);
@@ -1683,10 +1677,6 @@
               descs->GetKey(i), descs->GetValue(i));
           break;
         case NORMAL:  // only in slow mode
-        case HANDLER:  // only in lookup results, not in descriptors
-        case INTERCEPTOR:  // only in lookup results, not in descriptors
-          break;
-        case NONEXISTENT:
           UNREACHABLE();
           break;
       }
@@ -1748,7 +1738,7 @@
     for (int i = 0; i < length; ++i) {
       Object* k = dictionary->KeyAt(i);
       if (dictionary->IsKey(k)) {
-        ASSERT(k->IsNumber());
+        DCHECK(k->IsNumber());
         uint32_t index = static_cast<uint32_t>(k->Number());
         SetElementReference(js_obj, entry, index, dictionary->ValueAt(i));
       }
@@ -1772,25 +1762,8 @@
   if (object->IsJSFunction()) return heap->closure_string();
   String* constructor_name = object->constructor_name();
   if (constructor_name == heap->Object_string()) {
-    // Look up an immediate "constructor" property, if it is a function,
-    // return its name. This is for instances of binding objects, which
-    // have prototype constructor type "Object".
-    Object* constructor_prop = NULL;
-    Isolate* isolate = heap->isolate();
-    LookupResult result(isolate);
-    object->LookupOwnRealNamedProperty(
-        isolate->factory()->constructor_string(), &result);
-    if (!result.IsFound()) return object->constructor_name();
-
-    constructor_prop = result.GetLazyValue();
-    if (constructor_prop->IsJSFunction()) {
-      Object* maybe_name =
-          JSFunction::cast(constructor_prop)->shared()->name();
-      if (maybe_name->IsString()) {
-        String* name = String::cast(maybe_name);
-        if (name->length() > 0) return name;
-      }
-    }
+    // TODO(verwaest): Try to get object.constructor.name in this case.
+    // This requires handlification of the V8HeapExplorer.
   }
   return object->constructor_name();
 }
@@ -1829,11 +1802,8 @@
   void SetCollectingAllReferences() { collecting_all_references_ = true; }
 
   void FillReferences(V8HeapExplorer* explorer) {
-    ASSERT(strong_references_.length() <= all_references_.length());
+    DCHECK(strong_references_.length() <= all_references_.length());
     Builtins* builtins = heap_->isolate()->builtins();
-    for (int i = 0; i < reference_tags_.length(); ++i) {
-      explorer->SetGcRootsReference(reference_tags_[i].tag);
-    }
     int strong_index = 0, all_index = 0, tags_index = 0, builtin_index = 0;
     while (all_index < all_references_.length()) {
       bool is_strong = strong_index < strong_references_.length()
@@ -1843,7 +1813,7 @@
                                       all_references_[all_index]);
       if (reference_tags_[tags_index].tag ==
           VisitorSynchronization::kBuiltins) {
-        ASSERT(all_references_[all_index]->IsCode());
+        DCHECK(all_references_[all_index]->IsCode());
         explorer->TagBuiltinCodeObject(
             Code::cast(all_references_[all_index]),
             builtins->name(builtin_index++));
@@ -1876,10 +1846,15 @@
     SnapshotFiller* filler) {
   filler_ = filler;
 
+  // Create references to the synthetic roots.
+  SetRootGcRootsReference();
+  for (int tag = 0; tag < VisitorSynchronization::kNumberOfSyncTags; tag++) {
+    SetGcRootsReference(static_cast<VisitorSynchronization::SyncTag>(tag));
+  }
+
   // Make sure builtin code objects get their builtin tags
   // first. Otherwise a particular JSFunction object could set
   // its custom name to a generic builtin.
-  SetRootGcRootsReference();
   RootsReferencesExtractor extractor(heap_);
   heap_->IterateRoots(&extractor, VISIT_ONLY_STRONG);
   extractor.SetCollectingAllReferences();
@@ -1952,7 +1927,7 @@
                                          String* reference_name,
                                          Object* child_obj,
                                          int field_offset) {
-  ASSERT(parent_entry == GetEntry(parent_obj)->index());
+  DCHECK(parent_entry == GetEntry(parent_obj)->index());
   HeapEntry* child_entry = GetEntry(child_obj);
   if (child_entry != NULL) {
     filler_->SetNamedReference(HeapGraphEdge::kContextVariable,
@@ -1968,7 +1943,7 @@
                                             int parent_entry,
                                             const char* reference_name,
                                             Object* child_obj) {
-  ASSERT(parent_entry == GetEntry(parent_obj)->index());
+  DCHECK(parent_entry == GetEntry(parent_obj)->index());
   HeapEntry* child_entry = GetEntry(child_obj);
   if (child_entry != NULL) {
     filler_->SetNamedReference(HeapGraphEdge::kShortcut,
@@ -1983,7 +1958,7 @@
                                          int parent_entry,
                                          int index,
                                          Object* child_obj) {
-  ASSERT(parent_entry == GetEntry(parent_obj)->index());
+  DCHECK(parent_entry == GetEntry(parent_obj)->index());
   HeapEntry* child_entry = GetEntry(child_obj);
   if (child_entry != NULL) {
     filler_->SetIndexedReference(HeapGraphEdge::kElement,
@@ -1999,7 +1974,7 @@
                                           const char* reference_name,
                                           Object* child_obj,
                                           int field_offset) {
-  ASSERT(parent_entry == GetEntry(parent_obj)->index());
+  DCHECK(parent_entry == GetEntry(parent_obj)->index());
   HeapEntry* child_entry = GetEntry(child_obj);
   if (child_entry == NULL) return;
   if (IsEssentialObject(child_obj)) {
@@ -2017,7 +1992,7 @@
                                           int index,
                                           Object* child_obj,
                                           int field_offset) {
-  ASSERT(parent_entry == GetEntry(parent_obj)->index());
+  DCHECK(parent_entry == GetEntry(parent_obj)->index());
   HeapEntry* child_entry = GetEntry(child_obj);
   if (child_entry == NULL) return;
   if (IsEssentialObject(child_obj)) {
@@ -2034,7 +2009,7 @@
                                         int parent_entry,
                                         int index,
                                         Object* child_obj) {
-  ASSERT(parent_entry == GetEntry(parent_obj)->index());
+  DCHECK(parent_entry == GetEntry(parent_obj)->index());
   HeapEntry* child_entry = GetEntry(child_obj);
   if (child_entry != NULL && IsEssentialObject(child_obj)) {
     filler_->SetIndexedReference(HeapGraphEdge::kHidden,
@@ -2050,7 +2025,7 @@
                                       const char* reference_name,
                                       Object* child_obj,
                                       int field_offset) {
-  ASSERT(parent_entry == GetEntry(parent_obj)->index());
+  DCHECK(parent_entry == GetEntry(parent_obj)->index());
   HeapEntry* child_entry = GetEntry(child_obj);
   if (child_entry == NULL) return;
   if (IsEssentialObject(child_obj)) {
@@ -2068,7 +2043,7 @@
                                       int index,
                                       Object* child_obj,
                                       int field_offset) {
-  ASSERT(parent_entry == GetEntry(parent_obj)->index());
+  DCHECK(parent_entry == GetEntry(parent_obj)->index());
   HeapEntry* child_entry = GetEntry(child_obj);
   if (child_entry == NULL) return;
   if (IsEssentialObject(child_obj)) {
@@ -2087,7 +2062,7 @@
                                           Object* child_obj,
                                           const char* name_format_string,
                                           int field_offset) {
-  ASSERT(parent_entry == GetEntry(parent_obj)->index());
+  DCHECK(parent_entry == GetEntry(parent_obj)->index());
   HeapEntry* child_entry = GetEntry(child_obj);
   if (child_entry != NULL) {
     HeapGraphEdge::Type type =
@@ -2119,7 +2094,7 @@
 
 void V8HeapExplorer::SetUserGlobalReference(Object* child_obj) {
   HeapEntry* child_entry = GetEntry(child_obj);
-  ASSERT(child_entry != NULL);
+  DCHECK(child_entry != NULL);
   filler_->SetNamedAutoIndexReference(
       HeapGraphEdge::kShortcut,
       snapshot_->root()->index(),
@@ -2400,7 +2375,7 @@
     HeapObject* parent = *group->parent;
     int parent_entry =
         filler_->FindOrAddEntry(parent, native_entries_allocator_)->index();
-    ASSERT(parent_entry != HeapEntry::kNoEntry);
+    DCHECK(parent_entry != HeapEntry::kNoEntry);
     Object*** children = group->children;
     for (size_t j = 0; j < group->length; ++j) {
       Object* child = *children[j];
@@ -2501,7 +2476,7 @@
     v8::RetainedObjectInfo* info) {
   HeapEntry* child_entry =
       filler_->FindOrAddEntry(info, native_entries_allocator_);
-  ASSERT(child_entry != NULL);
+  DCHECK(child_entry != NULL);
   NativeGroupRetainedObjectInfo* group_info =
       FindOrAddGroupInfo(info->GetGroupLabel());
   HeapEntry* group_entry =
@@ -2516,10 +2491,10 @@
 void NativeObjectsExplorer::SetWrapperNativeReferences(
     HeapObject* wrapper, v8::RetainedObjectInfo* info) {
   HeapEntry* wrapper_entry = filler_->FindEntry(wrapper);
-  ASSERT(wrapper_entry != NULL);
+  DCHECK(wrapper_entry != NULL);
   HeapEntry* info_entry =
       filler_->FindOrAddEntry(info, native_entries_allocator_);
-  ASSERT(info_entry != NULL);
+  DCHECK(info_entry != NULL);
   filler_->SetNamedReference(HeapGraphEdge::kInternal,
                              wrapper_entry->index(),
                              "native",
@@ -2538,7 +2513,7 @@
         static_cast<NativeGroupRetainedObjectInfo*>(entry->value);
     HeapEntry* group_entry =
         filler_->FindOrAddEntry(group_info, native_entries_allocator_);
-    ASSERT(group_entry != NULL);
+    DCHECK(group_entry != NULL);
     filler_->SetIndexedAutoIndexReference(
         HeapGraphEdge::kElement,
         snapshot_->root()->index(),
@@ -2586,16 +2561,6 @@
 
 #ifdef VERIFY_HEAP
   Heap* debug_heap = heap_;
-  CHECK(!debug_heap->old_data_space()->was_swept_conservatively());
-  CHECK(!debug_heap->old_pointer_space()->was_swept_conservatively());
-  CHECK(!debug_heap->code_space()->was_swept_conservatively());
-  CHECK(!debug_heap->cell_space()->was_swept_conservatively());
-  CHECK(!debug_heap->property_cell_space()->
-        was_swept_conservatively());
-  CHECK(!debug_heap->map_space()->was_swept_conservatively());
-#endif
-
-#ifdef VERIFY_HEAP
   debug_heap->Verify();
 #endif
 
@@ -2605,6 +2570,8 @@
   debug_heap->Verify();
 #endif
 
+  snapshot_->AddSyntheticRootEntries();
+
   if (!FillReferences()) return false;
 
   snapshot_->FillChildren();
@@ -2645,7 +2612,6 @@
 
 bool HeapSnapshotGenerator::FillReferences() {
   SnapshotFiller filler(snapshot_, &entries_);
-  v8_heap_explorer_.AddRootEntries(&filler);
   return v8_heap_explorer_.IterateAndExtractReferences(&filler)
       && dom_explorer_.IterateAndExtractReferences(&filler);
 }
@@ -2670,12 +2636,12 @@
         chunk_(chunk_size_),
         chunk_pos_(0),
         aborted_(false) {
-    ASSERT(chunk_size_ > 0);
+    DCHECK(chunk_size_ > 0);
   }
   bool aborted() { return aborted_; }
   void AddCharacter(char c) {
-    ASSERT(c != '\0');
-    ASSERT(chunk_pos_ < chunk_size_);
+    DCHECK(c != '\0');
+    DCHECK(chunk_pos_ < chunk_size_);
     chunk_[chunk_pos_++] = c;
     MaybeWriteChunk();
   }
@@ -2684,12 +2650,12 @@
   }
   void AddSubstring(const char* s, int n) {
     if (n <= 0) return;
-    ASSERT(static_cast<size_t>(n) <= strlen(s));
+    DCHECK(static_cast<size_t>(n) <= strlen(s));
     const char* s_end = s + n;
     while (s < s_end) {
       int s_chunk_size =
           Min(chunk_size_ - chunk_pos_, static_cast<int>(s_end - s));
-      ASSERT(s_chunk_size > 0);
+      DCHECK(s_chunk_size > 0);
       MemCopy(chunk_.start() + chunk_pos_, s, s_chunk_size);
       s += s_chunk_size;
       chunk_pos_ += s_chunk_size;
@@ -2699,7 +2665,7 @@
   void AddNumber(unsigned n) { AddNumberImpl<unsigned>(n, "%u"); }
   void Finalize() {
     if (aborted_) return;
-    ASSERT(chunk_pos_ < chunk_size_);
+    DCHECK(chunk_pos_ < chunk_size_);
     if (chunk_pos_ != 0) {
       WriteChunk();
     }
@@ -2715,19 +2681,19 @@
     if (chunk_size_ - chunk_pos_ >= kMaxNumberSize) {
       int result = SNPrintF(
           chunk_.SubVector(chunk_pos_, chunk_size_), format, n);
-      ASSERT(result != -1);
+      DCHECK(result != -1);
       chunk_pos_ += result;
       MaybeWriteChunk();
     } else {
       EmbeddedVector<char, kMaxNumberSize> buffer;
       int result = SNPrintF(buffer, format, n);
       USE(result);
-      ASSERT(result != -1);
+      DCHECK(result != -1);
       AddString(buffer.start());
     }
   }
   void MaybeWriteChunk() {
-    ASSERT(chunk_pos_ <= chunk_size_);
+    DCHECK(chunk_pos_ <= chunk_size_);
     if (chunk_pos_ == chunk_size_) {
       WriteChunk();
     }
@@ -2757,7 +2723,7 @@
       snapshot_->profiler()->allocation_tracker()) {
     allocation_tracker->PrepareForSerialization();
   }
-  ASSERT(writer_ == NULL);
+  DCHECK(writer_ == NULL);
   writer_ = new OutputStreamWriter(stream);
   SerializeImpl();
   delete writer_;
@@ -2766,7 +2732,7 @@
 
 
 void HeapSnapshotJSONSerializer::SerializeImpl() {
-  ASSERT(0 == snapshot_->root()->index());
+  DCHECK(0 == snapshot_->root()->index());
   writer_->AddCharacter('{');
   writer_->AddString("\"snapshot\":{");
   SerializeSnapshot();
@@ -2879,7 +2845,7 @@
 void HeapSnapshotJSONSerializer::SerializeEdges() {
   List<HeapGraphEdge*>& edges = snapshot_->children();
   for (int i = 0; i < edges.length(); ++i) {
-    ASSERT(i == 0 ||
+    DCHECK(i == 0 ||
            edges[i - 1]->from()->index() <= edges[i]->from()->index());
     SerializeEdge(edges[i], i == 0);
     if (writer_->aborted()) return;
@@ -3063,7 +3029,7 @@
   if (position == -1) {
     buffer[buffer_pos++] = '0';
   } else {
-    ASSERT(position >= 0);
+    DCHECK(position >= 0);
     buffer_pos = utoa(static_cast<unsigned>(position + 1), buffer, buffer_pos);
   }
   return buffer_pos;
@@ -3147,7 +3113,7 @@
           unibrow::uchar c = unibrow::Utf8::CalculateValue(s, length, &cursor);
           if (c != unibrow::Utf8::kBadChar) {
             WriteUChar(writer_, c);
-            ASSERT(cursor != 0);
+            DCHECK(cursor != 0);
             s += cursor - 1;
           } else {
             writer_->AddCharacter('?');
diff --git a/src/heap-snapshot-generator.h b/src/heap-snapshot-generator.h
index e18d70a..3e4ce71 100644
--- a/src/heap-snapshot-generator.h
+++ b/src/heap-snapshot-generator.h
@@ -35,11 +35,11 @@
 
   Type type() const { return static_cast<Type>(type_); }
   int index() const {
-    ASSERT(type_ == kElement || type_ == kHidden);
+    DCHECK(type_ == kElement || type_ == kHidden);
     return index_;
   }
   const char* name() const {
-    ASSERT(type_ == kContextVariable
+    DCHECK(type_ == kContextVariable
         || type_ == kProperty
         || type_ == kInternal
         || type_ == kShortcut
@@ -100,7 +100,7 @@
   Type type() { return static_cast<Type>(type_); }
   const char* name() { return name_; }
   void set_name(const char* name) { name_ = name; }
-  inline SnapshotObjectId id() { return id_; }
+  SnapshotObjectId id() { return id_; }
   size_t self_size() { return self_size_; }
   unsigned trace_node_id() const { return trace_node_id_; }
   INLINE(int index() const);
@@ -154,7 +154,6 @@
   size_t RawSnapshotSize() const;
   HeapEntry* root() { return &entries_[root_index_]; }
   HeapEntry* gc_roots() { return &entries_[gc_roots_index_]; }
-  HeapEntry* natives_root() { return &entries_[natives_root_index_]; }
   HeapEntry* gc_subroot(int index) {
     return &entries_[gc_subroot_indexes_[index]];
   }
@@ -171,10 +170,7 @@
                       SnapshotObjectId id,
                       size_t size,
                       unsigned trace_node_id);
-  HeapEntry* AddRootEntry();
-  HeapEntry* AddGcRootsEntry();
-  HeapEntry* AddGcSubrootEntry(int tag);
-  HeapEntry* AddNativesRootEntry();
+  void AddSyntheticRootEntries();
   HeapEntry* GetEntryById(SnapshotObjectId id);
   List<HeapEntry*>* GetSortedEntriesList();
   void FillChildren();
@@ -183,12 +179,15 @@
   void PrintEntriesSize();
 
  private:
+  HeapEntry* AddRootEntry();
+  HeapEntry* AddGcRootsEntry();
+  HeapEntry* AddGcSubrootEntry(int tag, SnapshotObjectId id);
+
   HeapProfiler* profiler_;
   const char* title_;
   unsigned uid_;
   int root_index_;
   int gc_roots_index_;
-  int natives_root_index_;
   int gc_subroot_indexes_[VisitorSynchronization::kNumberOfSyncTags];
   List<HeapEntry> entries_;
   List<HeapGraphEdge> edges_;
@@ -223,12 +222,10 @@
   size_t GetUsedMemorySize() const;
 
   SnapshotObjectId GenerateId(v8::RetainedObjectInfo* info);
-  static inline SnapshotObjectId GetNthGcSubrootId(int delta);
 
   static const int kObjectIdStep = 2;
   static const SnapshotObjectId kInternalRootObjectId;
   static const SnapshotObjectId kGcRootsObjectId;
-  static const SnapshotObjectId kNativesRootObjectId;
   static const SnapshotObjectId kGcRootsFirstSubrootId;
   static const SnapshotObjectId kFirstAvailableObjectId;
 
@@ -348,8 +345,6 @@
 
   static String* GetConstructorName(JSObject* object);
 
-  static HeapObject* const kInternalRootObject;
-
  private:
   typedef bool (V8HeapExplorer::*ExtractReferencesMethod)(int entry,
                                                           HeapObject* object);
@@ -370,6 +365,7 @@
   void ExtractJSObjectReferences(int entry, JSObject* js_obj);
   void ExtractStringReferences(int entry, String* obj);
   void ExtractSymbolReferences(int entry, Symbol* symbol);
+  void ExtractJSCollectionReferences(int entry, JSCollection* collection);
   void ExtractJSWeakCollectionReferences(int entry,
                                          JSWeakCollection* collection);
   void ExtractContextReferences(int entry, Context* context);
@@ -377,6 +373,7 @@
   void ExtractSharedFunctionInfoReferences(int entry,
                                            SharedFunctionInfo* shared);
   void ExtractScriptReferences(int entry, Script* script);
+  void ExtractAccessorInfoReferences(int entry, AccessorInfo* accessor_info);
   void ExtractAccessorPairReferences(int entry, AccessorPair* accessors);
   void ExtractCodeCacheReferences(int entry, CodeCache* code_cache);
   void ExtractCodeReferences(int entry, Code* code);
@@ -448,9 +445,6 @@
 
   HeapEntry* GetEntry(Object* obj);
 
-  static inline HeapObject* GetNthGcSubrootObject(int delta);
-  static inline int GetGcSubrootOrder(HeapObject* subroot);
-
   Heap* heap_;
   HeapSnapshot* snapshot_;
   StringsStorage* names_;
@@ -463,12 +457,7 @@
   HeapObjectsSet weak_containers_;
   v8::HeapProfiler::ObjectNameResolver* global_object_name_resolver_;
 
-  static HeapObject* const kGcRootsObject;
-  static HeapObject* const kFirstGcSubrootObject;
-  static HeapObject* const kLastGcSubrootObject;
-
   friend class IndexedReferencesExtractor;
-  friend class GcSubrootsEnumerator;
   friend class RootsReferencesExtractor;
 
   DISALLOW_COPY_AND_ASSIGN(V8HeapExplorer);
diff --git a/src/heap.cc b/src/heap.cc
deleted file mode 100644
index dd3946f..0000000
--- a/src/heap.cc
+++ /dev/null
@@ -1,6417 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#include "src/accessors.h"
-#include "src/api.h"
-#include "src/base/once.h"
-#include "src/bootstrapper.h"
-#include "src/codegen.h"
-#include "src/compilation-cache.h"
-#include "src/conversions.h"
-#include "src/cpu-profiler.h"
-#include "src/debug.h"
-#include "src/deoptimizer.h"
-#include "src/global-handles.h"
-#include "src/heap-profiler.h"
-#include "src/incremental-marking.h"
-#include "src/isolate-inl.h"
-#include "src/mark-compact.h"
-#include "src/natives.h"
-#include "src/objects-visiting.h"
-#include "src/objects-visiting-inl.h"
-#include "src/runtime-profiler.h"
-#include "src/scopeinfo.h"
-#include "src/snapshot.h"
-#include "src/store-buffer.h"
-#include "src/utils/random-number-generator.h"
-#include "src/utils.h"
-#include "src/v8threads.h"
-#include "src/vm-state-inl.h"
-#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
-#include "src/regexp-macro-assembler.h"
-#include "src/arm/regexp-macro-assembler-arm.h"
-#endif
-#if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
-#include "src/regexp-macro-assembler.h"
-#include "src/mips/regexp-macro-assembler-mips.h"
-#endif
-
-namespace v8 {
-namespace internal {
-
-
-Heap::Heap()
-    : amount_of_external_allocated_memory_(0),
-      amount_of_external_allocated_memory_at_last_global_gc_(0),
-      isolate_(NULL),
-      code_range_size_(0),
-// semispace_size_ should be a power of 2 and old_generation_size_ should be
-// a multiple of Page::kPageSize.
-      reserved_semispace_size_(8 * (kPointerSize / 4) * MB),
-      max_semi_space_size_(8 * (kPointerSize / 4)  * MB),
-      initial_semispace_size_(Page::kPageSize),
-      max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
-      max_executable_size_(256ul * (kPointerSize / 4) * MB),
-// Variables set based on semispace_size_ and old_generation_size_ in
-// ConfigureHeap.
-// Will be 4 * reserved_semispace_size_ to ensure that young
-// generation can be aligned to its size.
-      maximum_committed_(0),
-      survived_since_last_expansion_(0),
-      sweep_generation_(0),
-      always_allocate_scope_depth_(0),
-      linear_allocation_scope_depth_(0),
-      contexts_disposed_(0),
-      global_ic_age_(0),
-      flush_monomorphic_ics_(false),
-      scan_on_scavenge_pages_(0),
-      new_space_(this),
-      old_pointer_space_(NULL),
-      old_data_space_(NULL),
-      code_space_(NULL),
-      map_space_(NULL),
-      cell_space_(NULL),
-      property_cell_space_(NULL),
-      lo_space_(NULL),
-      gc_state_(NOT_IN_GC),
-      gc_post_processing_depth_(0),
-      ms_count_(0),
-      gc_count_(0),
-      remembered_unmapped_pages_index_(0),
-      unflattened_strings_length_(0),
-#ifdef DEBUG
-      allocation_timeout_(0),
-#endif  // DEBUG
-      old_generation_allocation_limit_(kMinimumOldGenerationAllocationLimit),
-      old_gen_exhausted_(false),
-      inline_allocation_disabled_(false),
-      store_buffer_rebuilder_(store_buffer()),
-      hidden_string_(NULL),
-      gc_safe_size_of_old_object_(NULL),
-      total_regexp_code_generated_(0),
-      tracer_(NULL),
-      high_survival_rate_period_length_(0),
-      promoted_objects_size_(0),
-      promotion_rate_(0),
-      semi_space_copied_object_size_(0),
-      semi_space_copied_rate_(0),
-      maximum_size_scavenges_(0),
-      max_gc_pause_(0.0),
-      total_gc_time_ms_(0.0),
-      max_alive_after_gc_(0),
-      min_in_mutator_(kMaxInt),
-      alive_after_last_gc_(0),
-      last_gc_end_timestamp_(0.0),
-      marking_time_(0.0),
-      sweeping_time_(0.0),
-      mark_compact_collector_(this),
-      store_buffer_(this),
-      marking_(this),
-      incremental_marking_(this),
-      number_idle_notifications_(0),
-      last_idle_notification_gc_count_(0),
-      last_idle_notification_gc_count_init_(false),
-      mark_sweeps_since_idle_round_started_(0),
-      gc_count_at_last_idle_gc_(0),
-      scavenges_since_last_idle_round_(kIdleScavengeThreshold),
-      full_codegen_bytes_generated_(0),
-      crankshaft_codegen_bytes_generated_(0),
-      gcs_since_last_deopt_(0),
-#ifdef VERIFY_HEAP
-      no_weak_object_verification_scope_depth_(0),
-#endif
-      allocation_sites_scratchpad_length_(0),
-      promotion_queue_(this),
-      configured_(false),
-      external_string_table_(this),
-      chunks_queued_for_free_(NULL),
-      gc_callbacks_depth_(0) {
-  // Allow build-time customization of the max semispace size. Building
-  // V8 with snapshots and a non-default max semispace size is much
-  // easier if you can define it as part of the build environment.
-#if defined(V8_MAX_SEMISPACE_SIZE)
-  max_semi_space_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
-#endif
-
-  // Ensure old_generation_size_ is a multiple of kPageSize.
-  ASSERT(MB >= Page::kPageSize);
-
-  memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
-  set_native_contexts_list(NULL);
-  set_array_buffers_list(Smi::FromInt(0));
-  set_allocation_sites_list(Smi::FromInt(0));
-  set_encountered_weak_collections(Smi::FromInt(0));
-  // Put a dummy entry in the remembered pages so we can find the list the
-  // minidump even if there are no real unmapped pages.
-  RememberUnmappedPage(NULL, false);
-
-  ClearObjectStats(true);
-}
-
-
-intptr_t Heap::Capacity() {
-  if (!HasBeenSetUp()) return 0;
-
-  return new_space_.Capacity() +
-      old_pointer_space_->Capacity() +
-      old_data_space_->Capacity() +
-      code_space_->Capacity() +
-      map_space_->Capacity() +
-      cell_space_->Capacity() +
-      property_cell_space_->Capacity();
-}
-
-
-intptr_t Heap::CommittedMemory() {
-  if (!HasBeenSetUp()) return 0;
-
-  return new_space_.CommittedMemory() +
-      old_pointer_space_->CommittedMemory() +
-      old_data_space_->CommittedMemory() +
-      code_space_->CommittedMemory() +
-      map_space_->CommittedMemory() +
-      cell_space_->CommittedMemory() +
-      property_cell_space_->CommittedMemory() +
-      lo_space_->Size();
-}
-
-
-size_t Heap::CommittedPhysicalMemory() {
-  if (!HasBeenSetUp()) return 0;
-
-  return new_space_.CommittedPhysicalMemory() +
-      old_pointer_space_->CommittedPhysicalMemory() +
-      old_data_space_->CommittedPhysicalMemory() +
-      code_space_->CommittedPhysicalMemory() +
-      map_space_->CommittedPhysicalMemory() +
-      cell_space_->CommittedPhysicalMemory() +
-      property_cell_space_->CommittedPhysicalMemory() +
-      lo_space_->CommittedPhysicalMemory();
-}
-
-
-intptr_t Heap::CommittedMemoryExecutable() {
-  if (!HasBeenSetUp()) return 0;
-
-  return isolate()->memory_allocator()->SizeExecutable();
-}
-
-
-void Heap::UpdateMaximumCommitted() {
-  if (!HasBeenSetUp()) return;
-
-  intptr_t current_committed_memory = CommittedMemory();
-  if (current_committed_memory > maximum_committed_) {
-    maximum_committed_ = current_committed_memory;
-  }
-}
-
-
-intptr_t Heap::Available() {
-  if (!HasBeenSetUp()) return 0;
-
-  return new_space_.Available() +
-      old_pointer_space_->Available() +
-      old_data_space_->Available() +
-      code_space_->Available() +
-      map_space_->Available() +
-      cell_space_->Available() +
-      property_cell_space_->Available();
-}
-
-
-bool Heap::HasBeenSetUp() {
-  return old_pointer_space_ != NULL &&
-         old_data_space_ != NULL &&
-         code_space_ != NULL &&
-         map_space_ != NULL &&
-         cell_space_ != NULL &&
-         property_cell_space_ != NULL &&
-         lo_space_ != NULL;
-}
-
-
-int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
-  if (IntrusiveMarking::IsMarked(object)) {
-    return IntrusiveMarking::SizeOfMarkedObject(object);
-  }
-  return object->SizeFromMap(object->map());
-}
-
-
-GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
-                                              const char** reason) {
-  // Is global GC requested?
-  if (space != NEW_SPACE) {
-    isolate_->counters()->gc_compactor_caused_by_request()->Increment();
-    *reason = "GC in old space requested";
-    return MARK_COMPACTOR;
-  }
-
-  if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
-    *reason = "GC in old space forced by flags";
-    return MARK_COMPACTOR;
-  }
-
-  // Is enough data promoted to justify a global GC?
-  if (OldGenerationAllocationLimitReached()) {
-    isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
-    *reason = "promotion limit reached";
-    return MARK_COMPACTOR;
-  }
-
-  // Have allocation in OLD and LO failed?
-  if (old_gen_exhausted_) {
-    isolate_->counters()->
-        gc_compactor_caused_by_oldspace_exhaustion()->Increment();
-    *reason = "old generations exhausted";
-    return MARK_COMPACTOR;
-  }
-
-  // Is there enough space left in OLD to guarantee that a scavenge can
-  // succeed?
-  //
-  // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
-  // for object promotion. It counts only the bytes that the memory
-  // allocator has not yet allocated from the OS and assigned to any space,
-  // and does not count available bytes already in the old space or code
-  // space.  Undercounting is safe---we may get an unrequested full GC when
-  // a scavenge would have succeeded.
-  if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
-    isolate_->counters()->
-        gc_compactor_caused_by_oldspace_exhaustion()->Increment();
-    *reason = "scavenge might not succeed";
-    return MARK_COMPACTOR;
-  }
-
-  // Default
-  *reason = NULL;
-  return SCAVENGER;
-}
-
-
-// TODO(1238405): Combine the infrastructure for --heap-stats and
-// --log-gc to avoid the complicated preprocessor and flag testing.
-void Heap::ReportStatisticsBeforeGC() {
-  // Heap::ReportHeapStatistics will also log NewSpace statistics when
-  // compiled --log-gc is set.  The following logic is used to avoid
-  // double logging.
-#ifdef DEBUG
-  if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
-  if (FLAG_heap_stats) {
-    ReportHeapStatistics("Before GC");
-  } else if (FLAG_log_gc) {
-    new_space_.ReportStatistics();
-  }
-  if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
-#else
-  if (FLAG_log_gc) {
-    new_space_.CollectStatistics();
-    new_space_.ReportStatistics();
-    new_space_.ClearHistograms();
-  }
-#endif  // DEBUG
-}
-
-
-void Heap::PrintShortHeapStatistics() {
-  if (!FLAG_trace_gc_verbose) return;
-  PrintPID("Memory allocator,   used: %6" V8_PTR_PREFIX "d KB"
-               ", available: %6" V8_PTR_PREFIX "d KB\n",
-           isolate_->memory_allocator()->Size() / KB,
-           isolate_->memory_allocator()->Available() / KB);
-  PrintPID("New space,          used: %6" V8_PTR_PREFIX "d KB"
-               ", available: %6" V8_PTR_PREFIX "d KB"
-               ", committed: %6" V8_PTR_PREFIX "d KB\n",
-           new_space_.Size() / KB,
-           new_space_.Available() / KB,
-           new_space_.CommittedMemory() / KB);
-  PrintPID("Old pointers,       used: %6" V8_PTR_PREFIX "d KB"
-               ", available: %6" V8_PTR_PREFIX "d KB"
-               ", committed: %6" V8_PTR_PREFIX "d KB\n",
-           old_pointer_space_->SizeOfObjects() / KB,
-           old_pointer_space_->Available() / KB,
-           old_pointer_space_->CommittedMemory() / KB);
-  PrintPID("Old data space,     used: %6" V8_PTR_PREFIX "d KB"
-               ", available: %6" V8_PTR_PREFIX "d KB"
-               ", committed: %6" V8_PTR_PREFIX "d KB\n",
-           old_data_space_->SizeOfObjects() / KB,
-           old_data_space_->Available() / KB,
-           old_data_space_->CommittedMemory() / KB);
-  PrintPID("Code space,         used: %6" V8_PTR_PREFIX "d KB"
-               ", available: %6" V8_PTR_PREFIX "d KB"
-               ", committed: %6" V8_PTR_PREFIX "d KB\n",
-           code_space_->SizeOfObjects() / KB,
-           code_space_->Available() / KB,
-           code_space_->CommittedMemory() / KB);
-  PrintPID("Map space,          used: %6" V8_PTR_PREFIX "d KB"
-               ", available: %6" V8_PTR_PREFIX "d KB"
-               ", committed: %6" V8_PTR_PREFIX "d KB\n",
-           map_space_->SizeOfObjects() / KB,
-           map_space_->Available() / KB,
-           map_space_->CommittedMemory() / KB);
-  PrintPID("Cell space,         used: %6" V8_PTR_PREFIX "d KB"
-               ", available: %6" V8_PTR_PREFIX "d KB"
-               ", committed: %6" V8_PTR_PREFIX "d KB\n",
-           cell_space_->SizeOfObjects() / KB,
-           cell_space_->Available() / KB,
-           cell_space_->CommittedMemory() / KB);
-  PrintPID("PropertyCell space, used: %6" V8_PTR_PREFIX "d KB"
-               ", available: %6" V8_PTR_PREFIX "d KB"
-               ", committed: %6" V8_PTR_PREFIX "d KB\n",
-           property_cell_space_->SizeOfObjects() / KB,
-           property_cell_space_->Available() / KB,
-           property_cell_space_->CommittedMemory() / KB);
-  PrintPID("Large object space, used: %6" V8_PTR_PREFIX "d KB"
-               ", available: %6" V8_PTR_PREFIX "d KB"
-               ", committed: %6" V8_PTR_PREFIX "d KB\n",
-           lo_space_->SizeOfObjects() / KB,
-           lo_space_->Available() / KB,
-           lo_space_->CommittedMemory() / KB);
-  PrintPID("All spaces,         used: %6" V8_PTR_PREFIX "d KB"
-               ", available: %6" V8_PTR_PREFIX "d KB"
-               ", committed: %6" V8_PTR_PREFIX "d KB\n",
-           this->SizeOfObjects() / KB,
-           this->Available() / KB,
-           this->CommittedMemory() / KB);
-  PrintPID("External memory reported: %6" V8_PTR_PREFIX "d KB\n",
-           static_cast<intptr_t>(amount_of_external_allocated_memory_ / KB));
-  PrintPID("Total time spent in GC  : %.1f ms\n", total_gc_time_ms_);
-}
-
-
-// TODO(1238405): Combine the infrastructure for --heap-stats and
-// --log-gc to avoid the complicated preprocessor and flag testing.
-void Heap::ReportStatisticsAfterGC() {
-  // Similar to the before GC, we use some complicated logic to ensure that
-  // NewSpace statistics are logged exactly once when --log-gc is turned on.
-#if defined(DEBUG)
-  if (FLAG_heap_stats) {
-    new_space_.CollectStatistics();
-    ReportHeapStatistics("After GC");
-  } else if (FLAG_log_gc) {
-    new_space_.ReportStatistics();
-  }
-#else
-  if (FLAG_log_gc) new_space_.ReportStatistics();
-#endif  // DEBUG
-}
-
-
-void Heap::GarbageCollectionPrologue() {
-  {  AllowHeapAllocation for_the_first_part_of_prologue;
-    ClearJSFunctionResultCaches();
-    gc_count_++;
-    unflattened_strings_length_ = 0;
-
-    if (FLAG_flush_code && FLAG_flush_code_incrementally) {
-      mark_compact_collector()->EnableCodeFlushing(true);
-    }
-
-#ifdef VERIFY_HEAP
-    if (FLAG_verify_heap) {
-      Verify();
-    }
-#endif
-  }
-
-  // Reset GC statistics.
-  promoted_objects_size_ = 0;
-  semi_space_copied_object_size_ = 0;
-
-  UpdateMaximumCommitted();
-
-#ifdef DEBUG
-  ASSERT(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
-
-  if (FLAG_gc_verbose) Print();
-
-  ReportStatisticsBeforeGC();
-#endif  // DEBUG
-
-  store_buffer()->GCPrologue();
-
-  if (isolate()->concurrent_osr_enabled()) {
-    isolate()->optimizing_compiler_thread()->AgeBufferedOsrJobs();
-  }
-
-  if (new_space_.IsAtMaximumCapacity()) {
-    maximum_size_scavenges_++;
-  } else {
-    maximum_size_scavenges_ = 0;
-  }
-  CheckNewSpaceExpansionCriteria();
-}
-
-
-intptr_t Heap::SizeOfObjects() {
-  intptr_t total = 0;
-  AllSpaces spaces(this);
-  for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
-    total += space->SizeOfObjects();
-  }
-  return total;
-}
-
-
-void Heap::ClearAllICsByKind(Code::Kind kind) {
-  HeapObjectIterator it(code_space());
-
-  for (Object* object = it.Next(); object != NULL; object = it.Next()) {
-    Code* code = Code::cast(object);
-    Code::Kind current_kind = code->kind();
-    if (current_kind == Code::FUNCTION ||
-        current_kind == Code::OPTIMIZED_FUNCTION) {
-      code->ClearInlineCaches(kind);
-    }
-  }
-}
-
-
-void Heap::RepairFreeListsAfterBoot() {
-  PagedSpaces spaces(this);
-  for (PagedSpace* space = spaces.next();
-       space != NULL;
-       space = spaces.next()) {
-    space->RepairFreeListsAfterBoot();
-  }
-}
-
-
-void Heap::ProcessPretenuringFeedback() {
-  if (FLAG_allocation_site_pretenuring) {
-    int tenure_decisions = 0;
-    int dont_tenure_decisions = 0;
-    int allocation_mementos_found = 0;
-    int allocation_sites = 0;
-    int active_allocation_sites = 0;
-
-    // If the scratchpad overflowed, we have to iterate over the allocation
-    // sites list.
-    // TODO(hpayer): We iterate over the whole list of allocation sites when
-    // we grew to the maximum semi-space size to deopt maybe tenured
-    // allocation sites. We could hold the maybe tenured allocation sites
-    // in a seperate data structure if this is a performance problem.
-    bool deopt_maybe_tenured = DeoptMaybeTenuredAllocationSites();
-    bool use_scratchpad =
-         allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize &&
-         !deopt_maybe_tenured;
-
-    int i = 0;
-    Object* list_element = allocation_sites_list();
-    bool trigger_deoptimization = false;
-    bool maximum_size_scavenge = MaximumSizeScavenge();
-    while (use_scratchpad ?
-              i < allocation_sites_scratchpad_length_ :
-              list_element->IsAllocationSite()) {
-      AllocationSite* site = use_scratchpad ?
-          AllocationSite::cast(allocation_sites_scratchpad()->get(i)) :
-          AllocationSite::cast(list_element);
-      allocation_mementos_found += site->memento_found_count();
-      if (site->memento_found_count() > 0) {
-        active_allocation_sites++;
-        if (site->DigestPretenuringFeedback(maximum_size_scavenge)) {
-          trigger_deoptimization = true;
-        }
-        if (site->GetPretenureMode() == TENURED) {
-          tenure_decisions++;
-        } else {
-          dont_tenure_decisions++;
-        }
-        allocation_sites++;
-      }
-
-      if (deopt_maybe_tenured && site->IsMaybeTenure()) {
-        site->set_deopt_dependent_code(true);
-        trigger_deoptimization = true;
-      }
-
-      if (use_scratchpad) {
-        i++;
-      } else {
-        list_element = site->weak_next();
-      }
-    }
-
-    if (trigger_deoptimization) {
-      isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
-    }
-
-    FlushAllocationSitesScratchpad();
-
-    if (FLAG_trace_pretenuring_statistics &&
-        (allocation_mementos_found > 0 ||
-         tenure_decisions > 0 ||
-         dont_tenure_decisions > 0)) {
-      PrintF("GC: (mode, #visited allocation sites, #active allocation sites, "
-             "#mementos, #tenure decisions, #donttenure decisions) "
-             "(%s, %d, %d, %d, %d, %d)\n",
-             use_scratchpad ? "use scratchpad" : "use list",
-             allocation_sites,
-             active_allocation_sites,
-             allocation_mementos_found,
-             tenure_decisions,
-             dont_tenure_decisions);
-    }
-  }
-}
-
-
-void Heap::DeoptMarkedAllocationSites() {
-  // TODO(hpayer): If iterating over the allocation sites list becomes a
-  // performance issue, use a cache heap data structure instead (similar to the
-  // allocation sites scratchpad).
-  Object* list_element = allocation_sites_list();
-  while (list_element->IsAllocationSite()) {
-    AllocationSite* site = AllocationSite::cast(list_element);
-    if (site->deopt_dependent_code()) {
-      site->dependent_code()->MarkCodeForDeoptimization(
-          isolate_,
-          DependentCode::kAllocationSiteTenuringChangedGroup);
-      site->set_deopt_dependent_code(false);
-    }
-    list_element = site->weak_next();
-  }
-  Deoptimizer::DeoptimizeMarkedCode(isolate_);
-}
-
-
-void Heap::GarbageCollectionEpilogue() {
-  store_buffer()->GCEpilogue();
-
-  // In release mode, we only zap the from space under heap verification.
-  if (Heap::ShouldZapGarbage()) {
-    ZapFromSpace();
-  }
-
-  // Process pretenuring feedback and update allocation sites.
-  ProcessPretenuringFeedback();
-
-#ifdef VERIFY_HEAP
-  if (FLAG_verify_heap) {
-    Verify();
-  }
-#endif
-
-  AllowHeapAllocation for_the_rest_of_the_epilogue;
-
-#ifdef DEBUG
-  if (FLAG_print_global_handles) isolate_->global_handles()->Print();
-  if (FLAG_print_handles) PrintHandles();
-  if (FLAG_gc_verbose) Print();
-  if (FLAG_code_stats) ReportCodeStatistics("After GC");
-#endif
-  if (FLAG_deopt_every_n_garbage_collections > 0) {
-    // TODO(jkummerow/ulan/jarin): This is not safe! We can't assume that
-    // the topmost optimized frame can be deoptimized safely, because it
-    // might not have a lazy bailout point right after its current PC.
-    if (++gcs_since_last_deopt_ == FLAG_deopt_every_n_garbage_collections) {
-      Deoptimizer::DeoptimizeAll(isolate());
-      gcs_since_last_deopt_ = 0;
-    }
-  }
-
-  UpdateMaximumCommitted();
-
-  isolate_->counters()->alive_after_last_gc()->Set(
-      static_cast<int>(SizeOfObjects()));
-
-  isolate_->counters()->string_table_capacity()->Set(
-      string_table()->Capacity());
-  isolate_->counters()->number_of_symbols()->Set(
-      string_table()->NumberOfElements());
-
-  if (full_codegen_bytes_generated_ + crankshaft_codegen_bytes_generated_ > 0) {
-    isolate_->counters()->codegen_fraction_crankshaft()->AddSample(
-        static_cast<int>((crankshaft_codegen_bytes_generated_ * 100.0) /
-            (crankshaft_codegen_bytes_generated_
-            + full_codegen_bytes_generated_)));
-  }
-
-  if (CommittedMemory() > 0) {
-    isolate_->counters()->external_fragmentation_total()->AddSample(
-        static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
-
-    isolate_->counters()->heap_fraction_new_space()->
-        AddSample(static_cast<int>(
-            (new_space()->CommittedMemory() * 100.0) / CommittedMemory()));
-    isolate_->counters()->heap_fraction_old_pointer_space()->AddSample(
-        static_cast<int>(
-            (old_pointer_space()->CommittedMemory() * 100.0) /
-            CommittedMemory()));
-    isolate_->counters()->heap_fraction_old_data_space()->AddSample(
-        static_cast<int>(
-            (old_data_space()->CommittedMemory() * 100.0) /
-            CommittedMemory()));
-    isolate_->counters()->heap_fraction_code_space()->
-        AddSample(static_cast<int>(
-            (code_space()->CommittedMemory() * 100.0) / CommittedMemory()));
-    isolate_->counters()->heap_fraction_map_space()->AddSample(
-        static_cast<int>(
-            (map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
-    isolate_->counters()->heap_fraction_cell_space()->AddSample(
-        static_cast<int>(
-            (cell_space()->CommittedMemory() * 100.0) / CommittedMemory()));
-    isolate_->counters()->heap_fraction_property_cell_space()->
-        AddSample(static_cast<int>(
-            (property_cell_space()->CommittedMemory() * 100.0) /
-            CommittedMemory()));
-    isolate_->counters()->heap_fraction_lo_space()->
-        AddSample(static_cast<int>(
-            (lo_space()->CommittedMemory() * 100.0) / CommittedMemory()));
-
-    isolate_->counters()->heap_sample_total_committed()->AddSample(
-        static_cast<int>(CommittedMemory() / KB));
-    isolate_->counters()->heap_sample_total_used()->AddSample(
-        static_cast<int>(SizeOfObjects() / KB));
-    isolate_->counters()->heap_sample_map_space_committed()->AddSample(
-        static_cast<int>(map_space()->CommittedMemory() / KB));
-    isolate_->counters()->heap_sample_cell_space_committed()->AddSample(
-        static_cast<int>(cell_space()->CommittedMemory() / KB));
-    isolate_->counters()->
-        heap_sample_property_cell_space_committed()->
-            AddSample(static_cast<int>(
-                property_cell_space()->CommittedMemory() / KB));
-    isolate_->counters()->heap_sample_code_space_committed()->AddSample(
-        static_cast<int>(code_space()->CommittedMemory() / KB));
-
-    isolate_->counters()->heap_sample_maximum_committed()->AddSample(
-        static_cast<int>(MaximumCommittedMemory() / KB));
-  }
-
-#define UPDATE_COUNTERS_FOR_SPACE(space)                                       \
-  isolate_->counters()->space##_bytes_available()->Set(                        \
-      static_cast<int>(space()->Available()));                                 \
-  isolate_->counters()->space##_bytes_committed()->Set(                        \
-      static_cast<int>(space()->CommittedMemory()));                           \
-  isolate_->counters()->space##_bytes_used()->Set(                             \
-      static_cast<int>(space()->SizeOfObjects()));
-#define UPDATE_FRAGMENTATION_FOR_SPACE(space)                                  \
-  if (space()->CommittedMemory() > 0) {                                        \
-    isolate_->counters()->external_fragmentation_##space()->AddSample(         \
-        static_cast<int>(100 -                                                 \
-            (space()->SizeOfObjects() * 100.0) / space()->CommittedMemory())); \
-  }
-#define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space)                     \
-  UPDATE_COUNTERS_FOR_SPACE(space)                                             \
-  UPDATE_FRAGMENTATION_FOR_SPACE(space)
-
-  UPDATE_COUNTERS_FOR_SPACE(new_space)
-  UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_pointer_space)
-  UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_data_space)
-  UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
-  UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
-  UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(cell_space)
-  UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(property_cell_space)
-  UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
-#undef UPDATE_COUNTERS_FOR_SPACE
-#undef UPDATE_FRAGMENTATION_FOR_SPACE
-#undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
-
-#ifdef DEBUG
-  ReportStatisticsAfterGC();
-#endif  // DEBUG
-  isolate_->debug()->AfterGarbageCollection();
-
-  // Remember the last top pointer so that we can later find out
-  // whether we allocated in new space since the last GC.
-  new_space_top_after_last_gc_ = new_space()->top();
-}
-
-
-void Heap::CollectAllGarbage(int flags,
-                             const char* gc_reason,
-                             const v8::GCCallbackFlags gc_callback_flags) {
-  // Since we are ignoring the return value, the exact choice of space does
-  // not matter, so long as we do not specify NEW_SPACE, which would not
-  // cause a full GC.
-  mark_compact_collector_.SetFlags(flags);
-  CollectGarbage(OLD_POINTER_SPACE, gc_reason, gc_callback_flags);
-  mark_compact_collector_.SetFlags(kNoGCFlags);
-}
-
-
-void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
-  // Since we are ignoring the return value, the exact choice of space does
-  // not matter, so long as we do not specify NEW_SPACE, which would not
-  // cause a full GC.
-  // Major GC would invoke weak handle callbacks on weakly reachable
-  // handles, but won't collect weakly reachable objects until next
-  // major GC.  Therefore if we collect aggressively and weak handle callback
-  // has been invoked, we rerun major GC to release objects which become
-  // garbage.
-  // Note: as weak callbacks can execute arbitrary code, we cannot
-  // hope that eventually there will be no weak callbacks invocations.
-  // Therefore stop recollecting after several attempts.
-  if (isolate()->concurrent_recompilation_enabled()) {
-    // The optimizing compiler may be unnecessarily holding on to memory.
-    DisallowHeapAllocation no_recursive_gc;
-    isolate()->optimizing_compiler_thread()->Flush();
-  }
-  mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
-                                     kReduceMemoryFootprintMask);
-  isolate_->compilation_cache()->Clear();
-  const int kMaxNumberOfAttempts = 7;
-  const int kMinNumberOfAttempts = 2;
-  for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
-    if (!CollectGarbage(MARK_COMPACTOR, gc_reason, NULL) &&
-        attempt + 1 >= kMinNumberOfAttempts) {
-      break;
-    }
-  }
-  mark_compact_collector()->SetFlags(kNoGCFlags);
-  new_space_.Shrink();
-  UncommitFromSpace();
-  incremental_marking()->UncommitMarkingDeque();
-}
-
-
-void Heap::EnsureFillerObjectAtTop() {
-  // There may be an allocation memento behind every object in new space.
-  // If we evacuate a not full new space or if we are on the last page of
-  // the new space, then there may be uninitialized memory behind the top
-  // pointer of the new space page. We store a filler object there to
-  // identify the unused space.
-  Address from_top = new_space_.top();
-  Address from_limit = new_space_.limit();
-  if (from_top < from_limit) {
-    int remaining_in_page = static_cast<int>(from_limit - from_top);
-    CreateFillerObjectAt(from_top, remaining_in_page);
-  }
-}
-
-
-bool Heap::CollectGarbage(GarbageCollector collector,
-                          const char* gc_reason,
-                          const char* collector_reason,
-                          const v8::GCCallbackFlags gc_callback_flags) {
-  // The VM is in the GC state until exiting this function.
-  VMState<GC> state(isolate_);
-
-#ifdef DEBUG
-  // Reset the allocation timeout to the GC interval, but make sure to
-  // allow at least a few allocations after a collection. The reason
-  // for this is that we have a lot of allocation sequences and we
-  // assume that a garbage collection will allow the subsequent
-  // allocation attempts to go through.
-  allocation_timeout_ = Max(6, FLAG_gc_interval);
-#endif
-
-  EnsureFillerObjectAtTop();
-
-  if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
-    if (FLAG_trace_incremental_marking) {
-      PrintF("[IncrementalMarking] Scavenge during marking.\n");
-    }
-  }
-
-  if (collector == MARK_COMPACTOR &&
-      !mark_compact_collector()->abort_incremental_marking() &&
-      !incremental_marking()->IsStopped() &&
-      !incremental_marking()->should_hurry() &&
-      FLAG_incremental_marking_steps) {
-    // Make progress in incremental marking.
-    const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
-    incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
-                                IncrementalMarking::NO_GC_VIA_STACK_GUARD);
-    if (!incremental_marking()->IsComplete() && !FLAG_gc_global) {
-      if (FLAG_trace_incremental_marking) {
-        PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
-      }
-      collector = SCAVENGER;
-      collector_reason = "incremental marking delaying mark-sweep";
-    }
-  }
-
-  bool next_gc_likely_to_collect_more = false;
-
-  { GCTracer tracer(this, gc_reason, collector_reason);
-    ASSERT(AllowHeapAllocation::IsAllowed());
-    DisallowHeapAllocation no_allocation_during_gc;
-    GarbageCollectionPrologue();
-    // The GC count was incremented in the prologue.  Tell the tracer about
-    // it.
-    tracer.set_gc_count(gc_count_);
-
-    // Tell the tracer which collector we've selected.
-    tracer.set_collector(collector);
-
-    {
-      HistogramTimerScope histogram_timer_scope(
-          (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger()
-                                   : isolate_->counters()->gc_compactor());
-      next_gc_likely_to_collect_more =
-          PerformGarbageCollection(collector, &tracer, gc_callback_flags);
-    }
-
-    GarbageCollectionEpilogue();
-  }
-
-  // Start incremental marking for the next cycle. The heap snapshot
-  // generator needs incremental marking to stay off after it aborted.
-  if (!mark_compact_collector()->abort_incremental_marking() &&
-      incremental_marking()->IsStopped() &&
-      incremental_marking()->WorthActivating() &&
-      NextGCIsLikelyToBeFull()) {
-    incremental_marking()->Start();
-  }
-
-  return next_gc_likely_to_collect_more;
-}
-
-
-int Heap::NotifyContextDisposed() {
-  if (isolate()->concurrent_recompilation_enabled()) {
-    // Flush the queued recompilation tasks.
-    isolate()->optimizing_compiler_thread()->Flush();
-  }
-  flush_monomorphic_ics_ = true;
-  AgeInlineCaches();
-  return ++contexts_disposed_;
-}
-
-
-void Heap::MoveElements(FixedArray* array,
-                        int dst_index,
-                        int src_index,
-                        int len) {
-  if (len == 0) return;
-
-  ASSERT(array->map() != fixed_cow_array_map());
-  Object** dst_objects = array->data_start() + dst_index;
-  MemMove(dst_objects, array->data_start() + src_index, len * kPointerSize);
-  if (!InNewSpace(array)) {
-    for (int i = 0; i < len; i++) {
-      // TODO(hpayer): check store buffer for entries
-      if (InNewSpace(dst_objects[i])) {
-        RecordWrite(array->address(), array->OffsetOfElementAt(dst_index + i));
-      }
-    }
-  }
-  incremental_marking()->RecordWrites(array);
-}
-
-
-#ifdef VERIFY_HEAP
-// Helper class for verifying the string table.
-class StringTableVerifier : public ObjectVisitor {
- public:
-  void VisitPointers(Object** start, Object** end) {
-    // Visit all HeapObject pointers in [start, end).
-    for (Object** p = start; p < end; p++) {
-      if ((*p)->IsHeapObject()) {
-        // Check that the string is actually internalized.
-        CHECK((*p)->IsTheHole() || (*p)->IsUndefined() ||
-              (*p)->IsInternalizedString());
-      }
-    }
-  }
-};
-
-
-static void VerifyStringTable(Heap* heap) {
-  StringTableVerifier verifier;
-  heap->string_table()->IterateElements(&verifier);
-}
-#endif  // VERIFY_HEAP
-
-
-static bool AbortIncrementalMarkingAndCollectGarbage(
-    Heap* heap,
-    AllocationSpace space,
-    const char* gc_reason = NULL) {
-  heap->mark_compact_collector()->SetFlags(Heap::kAbortIncrementalMarkingMask);
-  bool result = heap->CollectGarbage(space, gc_reason);
-  heap->mark_compact_collector()->SetFlags(Heap::kNoGCFlags);
-  return result;
-}
-
-
-void Heap::ReserveSpace(int *sizes, Address *locations_out) {
-  bool gc_performed = true;
-  int counter = 0;
-  static const int kThreshold = 20;
-  while (gc_performed && counter++ < kThreshold) {
-    gc_performed = false;
-    ASSERT(NEW_SPACE == FIRST_PAGED_SPACE - 1);
-    for (int space = NEW_SPACE; space <= LAST_PAGED_SPACE; space++) {
-      if (sizes[space] != 0) {
-        AllocationResult allocation;
-        if (space == NEW_SPACE) {
-          allocation = new_space()->AllocateRaw(sizes[space]);
-        } else {
-          allocation = paged_space(space)->AllocateRaw(sizes[space]);
-        }
-        FreeListNode* node;
-        if (!allocation.To(&node)) {
-          if (space == NEW_SPACE) {
-            Heap::CollectGarbage(NEW_SPACE,
-                                 "failed to reserve space in the new space");
-          } else {
-            AbortIncrementalMarkingAndCollectGarbage(
-                this,
-                static_cast<AllocationSpace>(space),
-                "failed to reserve space in paged space");
-          }
-          gc_performed = true;
-          break;
-        } else {
-          // Mark with a free list node, in case we have a GC before
-          // deserializing.
-          node->set_size(this, sizes[space]);
-          locations_out[space] = node->address();
-        }
-      }
-    }
-  }
-
-  if (gc_performed) {
-    // Failed to reserve the space after several attempts.
-    V8::FatalProcessOutOfMemory("Heap::ReserveSpace");
-  }
-}
-
-
-void Heap::EnsureFromSpaceIsCommitted() {
-  if (new_space_.CommitFromSpaceIfNeeded()) return;
-
-  // Committing memory to from space failed.
-  // Memory is exhausted and we will die.
-  V8::FatalProcessOutOfMemory("Committing semi space failed.");
-}
-
-
-void Heap::ClearJSFunctionResultCaches() {
-  if (isolate_->bootstrapper()->IsActive()) return;
-
-  Object* context = native_contexts_list();
-  while (!context->IsUndefined()) {
-    // Get the caches for this context. GC can happen when the context
-    // is not fully initialized, so the caches can be undefined.
-    Object* caches_or_undefined =
-        Context::cast(context)->get(Context::JSFUNCTION_RESULT_CACHES_INDEX);
-    if (!caches_or_undefined->IsUndefined()) {
-      FixedArray* caches = FixedArray::cast(caches_or_undefined);
-      // Clear the caches:
-      int length = caches->length();
-      for (int i = 0; i < length; i++) {
-        JSFunctionResultCache::cast(caches->get(i))->Clear();
-      }
-    }
-    // Get the next context:
-    context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
-  }
-}
-
-
-void Heap::ClearNormalizedMapCaches() {
-  if (isolate_->bootstrapper()->IsActive() &&
-      !incremental_marking()->IsMarking()) {
-    return;
-  }
-
-  Object* context = native_contexts_list();
-  while (!context->IsUndefined()) {
-    // GC can happen when the context is not fully initialized,
-    // so the cache can be undefined.
-    Object* cache =
-        Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX);
-    if (!cache->IsUndefined()) {
-      NormalizedMapCache::cast(cache)->Clear();
-    }
-    context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
-  }
-}
-
-
-void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
-  if (start_new_space_size == 0) return;
-
-  promotion_rate_ =
-        (static_cast<double>(promoted_objects_size_) /
-            static_cast<double>(start_new_space_size) * 100);
-
-  semi_space_copied_rate_ =
-        (static_cast<double>(semi_space_copied_object_size_) /
-            static_cast<double>(start_new_space_size) * 100);
-
-  double survival_rate = promotion_rate_ + semi_space_copied_rate_;
-
-  if (survival_rate > kYoungSurvivalRateHighThreshold) {
-    high_survival_rate_period_length_++;
-  } else {
-    high_survival_rate_period_length_ = 0;
-  }
-}
-
-bool Heap::PerformGarbageCollection(
-    GarbageCollector collector,
-    GCTracer* tracer,
-    const v8::GCCallbackFlags gc_callback_flags) {
-  int freed_global_handles = 0;
-
-  if (collector != SCAVENGER) {
-    PROFILE(isolate_, CodeMovingGCEvent());
-  }
-
-#ifdef VERIFY_HEAP
-  if (FLAG_verify_heap) {
-    VerifyStringTable(this);
-  }
-#endif
-
-  GCType gc_type =
-      collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
-
-  { GCCallbacksScope scope(this);
-    if (scope.CheckReenter()) {
-      AllowHeapAllocation allow_allocation;
-      GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
-      VMState<EXTERNAL> state(isolate_);
-      HandleScope handle_scope(isolate_);
-      CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
-    }
-  }
-
-  EnsureFromSpaceIsCommitted();
-
-  int start_new_space_size = Heap::new_space()->SizeAsInt();
-
-  if (IsHighSurvivalRate()) {
-    // We speed up the incremental marker if it is running so that it
-    // does not fall behind the rate of promotion, which would cause a
-    // constantly growing old space.
-    incremental_marking()->NotifyOfHighPromotionRate();
-  }
-
-  if (collector == MARK_COMPACTOR) {
-    // Perform mark-sweep with optional compaction.
-    MarkCompact(tracer);
-    sweep_generation_++;
-    // Temporarily set the limit for case when PostGarbageCollectionProcessing
-    // allocates and triggers GC. The real limit is set at after
-    // PostGarbageCollectionProcessing.
-    old_generation_allocation_limit_ =
-        OldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(), 0);
-    old_gen_exhausted_ = false;
-  } else {
-    tracer_ = tracer;
-    Scavenge();
-    tracer_ = NULL;
-  }
-
-  UpdateSurvivalStatistics(start_new_space_size);
-
-  isolate_->counters()->objs_since_last_young()->Set(0);
-
-  // Callbacks that fire after this point might trigger nested GCs and
-  // restart incremental marking, the assertion can't be moved down.
-  ASSERT(collector == SCAVENGER || incremental_marking()->IsStopped());
-
-  gc_post_processing_depth_++;
-  { AllowHeapAllocation allow_allocation;
-    GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
-    freed_global_handles =
-        isolate_->global_handles()->PostGarbageCollectionProcessing(
-            collector, tracer);
-  }
-  gc_post_processing_depth_--;
-
-  isolate_->eternal_handles()->PostGarbageCollectionProcessing(this);
-
-  // Update relocatables.
-  Relocatable::PostGarbageCollectionProcessing(isolate_);
-
-  if (collector == MARK_COMPACTOR) {
-    // Register the amount of external allocated memory.
-    amount_of_external_allocated_memory_at_last_global_gc_ =
-        amount_of_external_allocated_memory_;
-    old_generation_allocation_limit_ =
-        OldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(),
-                                     freed_global_handles);
-  }
-
-  { GCCallbacksScope scope(this);
-    if (scope.CheckReenter()) {
-      AllowHeapAllocation allow_allocation;
-      GCTracer::Scope scope(tracer, GCTracer::Scope::EXTERNAL);
-      VMState<EXTERNAL> state(isolate_);
-      HandleScope handle_scope(isolate_);
-      CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
-    }
-  }
-
-#ifdef VERIFY_HEAP
-  if (FLAG_verify_heap) {
-    VerifyStringTable(this);
-  }
-#endif
-
-  return freed_global_handles > 0;
-}
-
-
-void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
-  for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
-    if (gc_type & gc_prologue_callbacks_[i].gc_type) {
-      if (!gc_prologue_callbacks_[i].pass_isolate_) {
-        v8::GCPrologueCallback callback =
-            reinterpret_cast<v8::GCPrologueCallback>(
-                gc_prologue_callbacks_[i].callback);
-        callback(gc_type, flags);
-      } else {
-        v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
-        gc_prologue_callbacks_[i].callback(isolate, gc_type, flags);
-      }
-    }
-  }
-}
-
-
-void Heap::CallGCEpilogueCallbacks(GCType gc_type,
-                                   GCCallbackFlags gc_callback_flags) {
-  for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
-    if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
-      if (!gc_epilogue_callbacks_[i].pass_isolate_) {
-        v8::GCPrologueCallback callback =
-            reinterpret_cast<v8::GCPrologueCallback>(
-                gc_epilogue_callbacks_[i].callback);
-        callback(gc_type, gc_callback_flags);
-      } else {
-        v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
-        gc_epilogue_callbacks_[i].callback(
-            isolate, gc_type, gc_callback_flags);
-      }
-    }
-  }
-}
-
-
-void Heap::MarkCompact(GCTracer* tracer) {
-  gc_state_ = MARK_COMPACT;
-  LOG(isolate_, ResourceEvent("markcompact", "begin"));
-
-  uint64_t size_of_objects_before_gc = SizeOfObjects();
-
-  mark_compact_collector_.Prepare(tracer);
-
-  ms_count_++;
-  tracer->set_full_gc_count(ms_count_);
-
-  MarkCompactPrologue();
-
-  mark_compact_collector_.CollectGarbage();
-
-  LOG(isolate_, ResourceEvent("markcompact", "end"));
-
-  gc_state_ = NOT_IN_GC;
-
-  isolate_->counters()->objs_since_last_full()->Set(0);
-
-  flush_monomorphic_ics_ = false;
-
-  if (FLAG_allocation_site_pretenuring) {
-    EvaluateOldSpaceLocalPretenuring(size_of_objects_before_gc);
-  }
-}
-
-
-void Heap::MarkCompactPrologue() {
-  // At any old GC clear the keyed lookup cache to enable collection of unused
-  // maps.
-  isolate_->keyed_lookup_cache()->Clear();
-  isolate_->context_slot_cache()->Clear();
-  isolate_->descriptor_lookup_cache()->Clear();
-  RegExpResultsCache::Clear(string_split_cache());
-  RegExpResultsCache::Clear(regexp_multiple_cache());
-
-  isolate_->compilation_cache()->MarkCompactPrologue();
-
-  CompletelyClearInstanceofCache();
-
-  FlushNumberStringCache();
-  if (FLAG_cleanup_code_caches_at_gc) {
-    polymorphic_code_cache()->set_cache(undefined_value());
-  }
-
-  ClearNormalizedMapCaches();
-}
-
-
-// Helper class for copying HeapObjects
-class ScavengeVisitor: public ObjectVisitor {
- public:
-  explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
-
-  void VisitPointer(Object** p) { ScavengePointer(p); }
-
-  void VisitPointers(Object** start, Object** end) {
-    // Copy all HeapObject pointers in [start, end)
-    for (Object** p = start; p < end; p++) ScavengePointer(p);
-  }
-
- private:
-  void ScavengePointer(Object** p) {
-    Object* object = *p;
-    if (!heap_->InNewSpace(object)) return;
-    Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
-                         reinterpret_cast<HeapObject*>(object));
-  }
-
-  Heap* heap_;
-};
-
-
-#ifdef VERIFY_HEAP
-// Visitor class to verify pointers in code or data space do not point into
-// new space.
-class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
- public:
-  explicit VerifyNonPointerSpacePointersVisitor(Heap* heap) : heap_(heap) {}
-  void VisitPointers(Object** start, Object**end) {
-    for (Object** current = start; current < end; current++) {
-      if ((*current)->IsHeapObject()) {
-        CHECK(!heap_->InNewSpace(HeapObject::cast(*current)));
-      }
-    }
-  }
-
- private:
-  Heap* heap_;
-};
-
-
-static void VerifyNonPointerSpacePointers(Heap* heap) {
-  // Verify that there are no pointers to new space in spaces where we
-  // do not expect them.
-  VerifyNonPointerSpacePointersVisitor v(heap);
-  HeapObjectIterator code_it(heap->code_space());
-  for (HeapObject* object = code_it.Next();
-       object != NULL; object = code_it.Next())
-    object->Iterate(&v);
-
-  // The old data space was normally swept conservatively so that the iterator
-  // doesn't work, so we normally skip the next bit.
-  if (!heap->old_data_space()->was_swept_conservatively()) {
-    HeapObjectIterator data_it(heap->old_data_space());
-    for (HeapObject* object = data_it.Next();
-         object != NULL; object = data_it.Next())
-      object->Iterate(&v);
-  }
-}
-#endif  // VERIFY_HEAP
-
-
-void Heap::CheckNewSpaceExpansionCriteria() {
-  if (new_space_.Capacity() < new_space_.MaximumCapacity() &&
-      survived_since_last_expansion_ > new_space_.Capacity()) {
-    // Grow the size of new space if there is room to grow, enough data
-    // has survived scavenge since the last expansion and we are not in
-    // high promotion mode.
-    new_space_.Grow();
-    survived_since_last_expansion_ = 0;
-  }
-}
-
-
-static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
-  return heap->InNewSpace(*p) &&
-      !HeapObject::cast(*p)->map_word().IsForwardingAddress();
-}
-
-
-void Heap::ScavengeStoreBufferCallback(
-    Heap* heap,
-    MemoryChunk* page,
-    StoreBufferEvent event) {
-  heap->store_buffer_rebuilder_.Callback(page, event);
-}
-
-
-void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
-  if (event == kStoreBufferStartScanningPagesEvent) {
-    start_of_current_page_ = NULL;
-    current_page_ = NULL;
-  } else if (event == kStoreBufferScanningPageEvent) {
-    if (current_page_ != NULL) {
-      // If this page already overflowed the store buffer during this iteration.
-      if (current_page_->scan_on_scavenge()) {
-        // Then we should wipe out the entries that have been added for it.
-        store_buffer_->SetTop(start_of_current_page_);
-      } else if (store_buffer_->Top() - start_of_current_page_ >=
-                 (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
-        // Did we find too many pointers in the previous page?  The heuristic is
-        // that no page can take more then 1/5 the remaining slots in the store
-        // buffer.
-        current_page_->set_scan_on_scavenge(true);
-        store_buffer_->SetTop(start_of_current_page_);
-      } else {
-        // In this case the page we scanned took a reasonable number of slots in
-        // the store buffer.  It has now been rehabilitated and is no longer
-        // marked scan_on_scavenge.
-        ASSERT(!current_page_->scan_on_scavenge());
-      }
-    }
-    start_of_current_page_ = store_buffer_->Top();
-    current_page_ = page;
-  } else if (event == kStoreBufferFullEvent) {
-    // The current page overflowed the store buffer again.  Wipe out its entries
-    // in the store buffer and mark it scan-on-scavenge again.  This may happen
-    // several times while scanning.
-    if (current_page_ == NULL) {
-      // Store Buffer overflowed while scanning promoted objects.  These are not
-      // in any particular page, though they are likely to be clustered by the
-      // allocation routines.
-      store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize / 2);
-    } else {
-      // Store Buffer overflowed while scanning a particular old space page for
-      // pointers to new space.
-      ASSERT(current_page_ == page);
-      ASSERT(page != NULL);
-      current_page_->set_scan_on_scavenge(true);
-      ASSERT(start_of_current_page_ != store_buffer_->Top());
-      store_buffer_->SetTop(start_of_current_page_);
-    }
-  } else {
-    UNREACHABLE();
-  }
-}
-
-
-void PromotionQueue::Initialize() {
-  // Assumes that a NewSpacePage exactly fits a number of promotion queue
-  // entries (where each is a pair of intptr_t). This allows us to simplify
-  // the test fpr when to switch pages.
-  ASSERT((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize)
-         == 0);
-  limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart());
-  front_ = rear_ =
-      reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
-  emergency_stack_ = NULL;
-  guard_ = false;
-}
-
-
-void PromotionQueue::RelocateQueueHead() {
-  ASSERT(emergency_stack_ == NULL);
-
-  Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
-  intptr_t* head_start = rear_;
-  intptr_t* head_end =
-      Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
-
-  int entries_count =
-      static_cast<int>(head_end - head_start) / kEntrySizeInWords;
-
-  emergency_stack_ = new List<Entry>(2 * entries_count);
-
-  while (head_start != head_end) {
-    int size = static_cast<int>(*(head_start++));
-    HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
-    emergency_stack_->Add(Entry(obj, size));
-  }
-  rear_ = head_end;
-}
-
-
-class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
- public:
-  explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) { }
-
-  virtual Object* RetainAs(Object* object) {
-    if (!heap_->InFromSpace(object)) {
-      return object;
-    }
-
-    MapWord map_word = HeapObject::cast(object)->map_word();
-    if (map_word.IsForwardingAddress()) {
-      return map_word.ToForwardingAddress();
-    }
-    return NULL;
-  }
-
- private:
-  Heap* heap_;
-};
-
-
-void Heap::Scavenge() {
-  RelocationLock relocation_lock(this);
-
-#ifdef VERIFY_HEAP
-  if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this);
-#endif
-
-  gc_state_ = SCAVENGE;
-
-  // Implements Cheney's copying algorithm
-  LOG(isolate_, ResourceEvent("scavenge", "begin"));
-
-  // Clear descriptor cache.
-  isolate_->descriptor_lookup_cache()->Clear();
-
-  // Used for updating survived_since_last_expansion_ at function end.
-  intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
-
-  SelectScavengingVisitorsTable();
-
-  incremental_marking()->PrepareForScavenge();
-
-  // Flip the semispaces.  After flipping, to space is empty, from space has
-  // live objects.
-  new_space_.Flip();
-  new_space_.ResetAllocationInfo();
-
-  // We need to sweep newly copied objects which can be either in the
-  // to space or promoted to the old generation.  For to-space
-  // objects, we treat the bottom of the to space as a queue.  Newly
-  // copied and unswept objects lie between a 'front' mark and the
-  // allocation pointer.
-  //
-  // Promoted objects can go into various old-generation spaces, and
-  // can be allocated internally in the spaces (from the free list).
-  // We treat the top of the to space as a queue of addresses of
-  // promoted objects.  The addresses of newly promoted and unswept
-  // objects lie between a 'front' mark and a 'rear' mark that is
-  // updated as a side effect of promoting an object.
-  //
-  // There is guaranteed to be enough room at the top of the to space
-  // for the addresses of promoted objects: every object promoted
-  // frees up its size in bytes from the top of the new space, and
-  // objects are at least one pointer in size.
-  Address new_space_front = new_space_.ToSpaceStart();
-  promotion_queue_.Initialize();
-
-#ifdef DEBUG
-  store_buffer()->Clean();
-#endif
-
-  ScavengeVisitor scavenge_visitor(this);
-  // Copy roots.
-  IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
-
-  // Copy objects reachable from the old generation.
-  {
-    StoreBufferRebuildScope scope(this,
-                                  store_buffer(),
-                                  &ScavengeStoreBufferCallback);
-    store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
-  }
-
-  // Copy objects reachable from simple cells by scavenging cell values
-  // directly.
-  HeapObjectIterator cell_iterator(cell_space_);
-  for (HeapObject* heap_object = cell_iterator.Next();
-       heap_object != NULL;
-       heap_object = cell_iterator.Next()) {
-    if (heap_object->IsCell()) {
-      Cell* cell = Cell::cast(heap_object);
-      Address value_address = cell->ValueAddress();
-      scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
-    }
-  }
-
-  // Copy objects reachable from global property cells by scavenging global
-  // property cell values directly.
-  HeapObjectIterator js_global_property_cell_iterator(property_cell_space_);
-  for (HeapObject* heap_object = js_global_property_cell_iterator.Next();
-       heap_object != NULL;
-       heap_object = js_global_property_cell_iterator.Next()) {
-    if (heap_object->IsPropertyCell()) {
-      PropertyCell* cell = PropertyCell::cast(heap_object);
-      Address value_address = cell->ValueAddress();
-      scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
-      Address type_address = cell->TypeAddress();
-      scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(type_address));
-    }
-  }
-
-  // Copy objects reachable from the encountered weak collections list.
-  scavenge_visitor.VisitPointer(&encountered_weak_collections_);
-
-  // Copy objects reachable from the code flushing candidates list.
-  MarkCompactCollector* collector = mark_compact_collector();
-  if (collector->is_code_flushing_enabled()) {
-    collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor);
-  }
-
-  new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
-
-  while (isolate()->global_handles()->IterateObjectGroups(
-      &scavenge_visitor, &IsUnscavengedHeapObject)) {
-    new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
-  }
-  isolate()->global_handles()->RemoveObjectGroups();
-  isolate()->global_handles()->RemoveImplicitRefGroups();
-
-  isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
-      &IsUnscavengedHeapObject);
-  isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots(
-      &scavenge_visitor);
-  new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
-
-  UpdateNewSpaceReferencesInExternalStringTable(
-      &UpdateNewSpaceReferenceInExternalStringTableEntry);
-
-  promotion_queue_.Destroy();
-
-  incremental_marking()->UpdateMarkingDequeAfterScavenge();
-
-  ScavengeWeakObjectRetainer weak_object_retainer(this);
-  ProcessWeakReferences(&weak_object_retainer);
-
-  ASSERT(new_space_front == new_space_.top());
-
-  // Set age mark.
-  new_space_.set_age_mark(new_space_.top());
-
-  new_space_.LowerInlineAllocationLimit(
-      new_space_.inline_allocation_limit_step());
-
-  // Update how much has survived scavenge.
-  IncrementYoungSurvivorsCounter(static_cast<int>(
-      (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
-
-  LOG(isolate_, ResourceEvent("scavenge", "end"));
-
-  gc_state_ = NOT_IN_GC;
-
-  scavenges_since_last_idle_round_++;
-}
-
-
-String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
-                                                                Object** p) {
-  MapWord first_word = HeapObject::cast(*p)->map_word();
-
-  if (!first_word.IsForwardingAddress()) {
-    // Unreachable external string can be finalized.
-    heap->FinalizeExternalString(String::cast(*p));
-    return NULL;
-  }
-
-  // String is still reachable.
-  return String::cast(first_word.ToForwardingAddress());
-}
-
-
-void Heap::UpdateNewSpaceReferencesInExternalStringTable(
-    ExternalStringTableUpdaterCallback updater_func) {
-#ifdef VERIFY_HEAP
-  if (FLAG_verify_heap) {
-    external_string_table_.Verify();
-  }
-#endif
-
-  if (external_string_table_.new_space_strings_.is_empty()) return;
-
-  Object** start = &external_string_table_.new_space_strings_[0];
-  Object** end = start + external_string_table_.new_space_strings_.length();
-  Object** last = start;
-
-  for (Object** p = start; p < end; ++p) {
-    ASSERT(InFromSpace(*p));
-    String* target = updater_func(this, p);
-
-    if (target == NULL) continue;
-
-    ASSERT(target->IsExternalString());
-
-    if (InNewSpace(target)) {
-      // String is still in new space.  Update the table entry.
-      *last = target;
-      ++last;
-    } else {
-      // String got promoted.  Move it to the old string list.
-      external_string_table_.AddOldString(target);
-    }
-  }
-
-  ASSERT(last <= end);
-  external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
-}
-
-
-void Heap::UpdateReferencesInExternalStringTable(
-    ExternalStringTableUpdaterCallback updater_func) {
-
-  // Update old space string references.
-  if (external_string_table_.old_space_strings_.length() > 0) {
-    Object** start = &external_string_table_.old_space_strings_[0];
-    Object** end = start + external_string_table_.old_space_strings_.length();
-    for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
-  }
-
-  UpdateNewSpaceReferencesInExternalStringTable(updater_func);
-}
-
-
-void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
-  ProcessArrayBuffers(retainer);
-  ProcessNativeContexts(retainer);
-  // TODO(mvstanton): AllocationSites only need to be processed during
-  // MARK_COMPACT, as they live in old space. Verify and address.
-  ProcessAllocationSites(retainer);
-}
-
-
-void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer) {
-  Object* head = VisitWeakList<Context>(this, native_contexts_list(), retainer);
-  // Update the head of the list of contexts.
-  set_native_contexts_list(head);
-}
-
-
-void Heap::ProcessArrayBuffers(WeakObjectRetainer* retainer) {
-  Object* array_buffer_obj =
-      VisitWeakList<JSArrayBuffer>(this, array_buffers_list(), retainer);
-  set_array_buffers_list(array_buffer_obj);
-}
-
-
-void Heap::TearDownArrayBuffers() {
-  Object* undefined = undefined_value();
-  for (Object* o = array_buffers_list(); o != undefined;) {
-    JSArrayBuffer* buffer = JSArrayBuffer::cast(o);
-    Runtime::FreeArrayBuffer(isolate(), buffer);
-    o = buffer->weak_next();
-  }
-  set_array_buffers_list(undefined);
-}
-
-
-void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer) {
-  Object* allocation_site_obj =
-      VisitWeakList<AllocationSite>(this, allocation_sites_list(), retainer);
-  set_allocation_sites_list(allocation_site_obj);
-}
-
-
-void Heap::ResetAllAllocationSitesDependentCode(PretenureFlag flag) {
-  DisallowHeapAllocation no_allocation_scope;
-  Object* cur = allocation_sites_list();
-  bool marked = false;
-  while (cur->IsAllocationSite()) {
-    AllocationSite* casted = AllocationSite::cast(cur);
-    if (casted->GetPretenureMode() == flag) {
-      casted->ResetPretenureDecision();
-      casted->set_deopt_dependent_code(true);
-      marked = true;
-    }
-    cur = casted->weak_next();
-  }
-  if (marked) isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
-}
-
-
-void Heap::EvaluateOldSpaceLocalPretenuring(
-    uint64_t size_of_objects_before_gc) {
-  uint64_t size_of_objects_after_gc = SizeOfObjects();
-  double old_generation_survival_rate =
-      (static_cast<double>(size_of_objects_after_gc) * 100) /
-          static_cast<double>(size_of_objects_before_gc);
-
-  if (old_generation_survival_rate < kOldSurvivalRateLowThreshold) {
-    // Too many objects died in the old generation, pretenuring of wrong
-    // allocation sites may be the cause for that. We have to deopt all
-    // dependent code registered in the allocation sites to re-evaluate
-    // our pretenuring decisions.
-    ResetAllAllocationSitesDependentCode(TENURED);
-    if (FLAG_trace_pretenuring) {
-      PrintF("Deopt all allocation sites dependent code due to low survival "
-             "rate in the old generation %f\n", old_generation_survival_rate);
-    }
-  }
-}
-
-
-void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
-  DisallowHeapAllocation no_allocation;
-  // All external strings are listed in the external string table.
-
-  class ExternalStringTableVisitorAdapter : public ObjectVisitor {
-   public:
-    explicit ExternalStringTableVisitorAdapter(
-        v8::ExternalResourceVisitor* visitor) : visitor_(visitor) {}
-    virtual void VisitPointers(Object** start, Object** end) {
-      for (Object** p = start; p < end; p++) {
-        ASSERT((*p)->IsExternalString());
-        visitor_->VisitExternalString(Utils::ToLocal(
-            Handle<String>(String::cast(*p))));
-      }
-    }
-   private:
-    v8::ExternalResourceVisitor* visitor_;
-  } external_string_table_visitor(visitor);
-
-  external_string_table_.Iterate(&external_string_table_visitor);
-}
-
-
-class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
- public:
-  static inline void VisitPointer(Heap* heap, Object** p) {
-    Object* object = *p;
-    if (!heap->InNewSpace(object)) return;
-    Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
-                         reinterpret_cast<HeapObject*>(object));
-  }
-};
-
-
-Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
-                         Address new_space_front) {
-  do {
-    SemiSpace::AssertValidRange(new_space_front, new_space_.top());
-    // The addresses new_space_front and new_space_.top() define a
-    // queue of unprocessed copied objects.  Process them until the
-    // queue is empty.
-    while (new_space_front != new_space_.top()) {
-      if (!NewSpacePage::IsAtEnd(new_space_front)) {
-        HeapObject* object = HeapObject::FromAddress(new_space_front);
-        new_space_front +=
-          NewSpaceScavenger::IterateBody(object->map(), object);
-      } else {
-        new_space_front =
-            NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
-      }
-    }
-
-    // Promote and process all the to-be-promoted objects.
-    {
-      StoreBufferRebuildScope scope(this,
-                                    store_buffer(),
-                                    &ScavengeStoreBufferCallback);
-      while (!promotion_queue()->is_empty()) {
-        HeapObject* target;
-        int size;
-        promotion_queue()->remove(&target, &size);
-
-        // Promoted object might be already partially visited
-        // during old space pointer iteration. Thus we search specificly
-        // for pointers to from semispace instead of looking for pointers
-        // to new space.
-        ASSERT(!target->IsMap());
-        IterateAndMarkPointersToFromSpace(target->address(),
-                                          target->address() + size,
-                                          &ScavengeObject);
-      }
-    }
-
-    // Take another spin if there are now unswept objects in new space
-    // (there are currently no more unswept promoted objects).
-  } while (new_space_front != new_space_.top());
-
-  return new_space_front;
-}
-
-
-STATIC_ASSERT((FixedDoubleArray::kHeaderSize &
-               kDoubleAlignmentMask) == 0);  // NOLINT
-STATIC_ASSERT((ConstantPoolArray::kFirstEntryOffset &
-               kDoubleAlignmentMask) == 0);  // NOLINT
-STATIC_ASSERT((ConstantPoolArray::kExtendedFirstOffset &
-               kDoubleAlignmentMask) == 0);  // NOLINT
-
-
-INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap,
-                                              HeapObject* object,
-                                              int size));
-
-static HeapObject* EnsureDoubleAligned(Heap* heap,
-                                       HeapObject* object,
-                                       int size) {
-  if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
-    heap->CreateFillerObjectAt(object->address(), kPointerSize);
-    return HeapObject::FromAddress(object->address() + kPointerSize);
-  } else {
-    heap->CreateFillerObjectAt(object->address() + size - kPointerSize,
-                               kPointerSize);
-    return object;
-  }
-}
-
-
-enum LoggingAndProfiling {
-  LOGGING_AND_PROFILING_ENABLED,
-  LOGGING_AND_PROFILING_DISABLED
-};
-
-
-enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
-
-
-template<MarksHandling marks_handling,
-         LoggingAndProfiling logging_and_profiling_mode>
-class ScavengingVisitor : public StaticVisitorBase {
- public:
-  static void Initialize() {
-    table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString);
-    table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
-    table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
-    table_.Register(kVisitByteArray, &EvacuateByteArray);
-    table_.Register(kVisitFixedArray, &EvacuateFixedArray);
-    table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
-    table_.Register(kVisitFixedTypedArray, &EvacuateFixedTypedArray);
-    table_.Register(kVisitFixedFloat64Array, &EvacuateFixedFloat64Array);
-
-    table_.Register(kVisitNativeContext,
-                    &ObjectEvacuationStrategy<POINTER_OBJECT>::
-                        template VisitSpecialized<Context::kSize>);
-
-    table_.Register(kVisitConsString,
-                    &ObjectEvacuationStrategy<POINTER_OBJECT>::
-                        template VisitSpecialized<ConsString::kSize>);
-
-    table_.Register(kVisitSlicedString,
-                    &ObjectEvacuationStrategy<POINTER_OBJECT>::
-                        template VisitSpecialized<SlicedString::kSize>);
-
-    table_.Register(kVisitSymbol,
-                    &ObjectEvacuationStrategy<POINTER_OBJECT>::
-                        template VisitSpecialized<Symbol::kSize>);
-
-    table_.Register(kVisitSharedFunctionInfo,
-                    &ObjectEvacuationStrategy<POINTER_OBJECT>::
-                        template VisitSpecialized<SharedFunctionInfo::kSize>);
-
-    table_.Register(kVisitJSWeakCollection,
-                    &ObjectEvacuationStrategy<POINTER_OBJECT>::
-                    Visit);
-
-    table_.Register(kVisitJSArrayBuffer,
-                    &ObjectEvacuationStrategy<POINTER_OBJECT>::
-                    Visit);
-
-    table_.Register(kVisitJSTypedArray,
-                    &ObjectEvacuationStrategy<POINTER_OBJECT>::
-                    Visit);
-
-    table_.Register(kVisitJSDataView,
-                    &ObjectEvacuationStrategy<POINTER_OBJECT>::
-                    Visit);
-
-    table_.Register(kVisitJSRegExp,
-                    &ObjectEvacuationStrategy<POINTER_OBJECT>::
-                    Visit);
-
-    if (marks_handling == IGNORE_MARKS) {
-      table_.Register(kVisitJSFunction,
-                      &ObjectEvacuationStrategy<POINTER_OBJECT>::
-                          template VisitSpecialized<JSFunction::kSize>);
-    } else {
-      table_.Register(kVisitJSFunction, &EvacuateJSFunction);
-    }
-
-    table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
-                                   kVisitDataObject,
-                                   kVisitDataObjectGeneric>();
-
-    table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
-                                   kVisitJSObject,
-                                   kVisitJSObjectGeneric>();
-
-    table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
-                                   kVisitStruct,
-                                   kVisitStructGeneric>();
-  }
-
-  static VisitorDispatchTable<ScavengingCallback>* GetTable() {
-    return &table_;
-  }
-
- private:
-  enum ObjectContents  { DATA_OBJECT, POINTER_OBJECT };
-
-  static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
-    bool should_record = false;
-#ifdef DEBUG
-    should_record = FLAG_heap_stats;
-#endif
-    should_record = should_record || FLAG_log_gc;
-    if (should_record) {
-      if (heap->new_space()->Contains(obj)) {
-        heap->new_space()->RecordAllocation(obj);
-      } else {
-        heap->new_space()->RecordPromotion(obj);
-      }
-    }
-  }
-
-  // Helper function used by CopyObject to copy a source object to an
-  // allocated target object and update the forwarding pointer in the source
-  // object.  Returns the target object.
-  INLINE(static void MigrateObject(Heap* heap,
-                                   HeapObject* source,
-                                   HeapObject* target,
-                                   int size)) {
-    // Copy the content of source to target.
-    heap->CopyBlock(target->address(), source->address(), size);
-
-    // Set the forwarding address.
-    source->set_map_word(MapWord::FromForwardingAddress(target));
-
-    if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
-      // Update NewSpace stats if necessary.
-      RecordCopiedObject(heap, target);
-      Isolate* isolate = heap->isolate();
-      HeapProfiler* heap_profiler = isolate->heap_profiler();
-      if (heap_profiler->is_tracking_object_moves()) {
-        heap_profiler->ObjectMoveEvent(source->address(), target->address(),
-                                       size);
-      }
-      if (isolate->logger()->is_logging_code_events() ||
-          isolate->cpu_profiler()->is_profiling()) {
-        if (target->IsSharedFunctionInfo()) {
-          PROFILE(isolate, SharedFunctionInfoMoveEvent(
-              source->address(), target->address()));
-        }
-      }
-    }
-
-    if (marks_handling == TRANSFER_MARKS) {
-      if (Marking::TransferColor(source, target)) {
-        MemoryChunk::IncrementLiveBytesFromGC(target->address(), size);
-      }
-    }
-  }
-
-
-  template<ObjectContents object_contents, int alignment>
-  static inline void EvacuateObject(Map* map,
-                                    HeapObject** slot,
-                                    HeapObject* object,
-                                    int object_size) {
-    SLOW_ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
-    SLOW_ASSERT(object->Size() == object_size);
-
-    int allocation_size = object_size;
-    if (alignment != kObjectAlignment) {
-      ASSERT(alignment == kDoubleAlignment);
-      allocation_size += kPointerSize;
-    }
-
-    Heap* heap = map->GetHeap();
-    if (heap->ShouldBePromoted(object->address(), object_size)) {
-      AllocationResult allocation;
-
-      if (object_contents == DATA_OBJECT) {
-        ASSERT(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE));
-        allocation = heap->old_data_space()->AllocateRaw(allocation_size);
-      } else {
-        ASSERT(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE));
-        allocation = heap->old_pointer_space()->AllocateRaw(allocation_size);
-      }
-
-      HeapObject* target = NULL;  // Initialization to please compiler.
-      if (allocation.To(&target)) {
-        if (alignment != kObjectAlignment) {
-          target = EnsureDoubleAligned(heap, target, allocation_size);
-        }
-
-        // Order is important: slot might be inside of the target if target
-        // was allocated over a dead object and slot comes from the store
-        // buffer.
-        *slot = target;
-        MigrateObject(heap, object, target, object_size);
-
-        if (object_contents == POINTER_OBJECT) {
-          if (map->instance_type() == JS_FUNCTION_TYPE) {
-            heap->promotion_queue()->insert(
-                target, JSFunction::kNonWeakFieldsEndOffset);
-          } else {
-            heap->promotion_queue()->insert(target, object_size);
-          }
-        }
-
-        heap->IncrementPromotedObjectsSize(object_size);
-        return;
-      }
-    }
-    ASSERT(heap->AllowedToBeMigrated(object, NEW_SPACE));
-    AllocationResult allocation =
-        heap->new_space()->AllocateRaw(allocation_size);
-    heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
-    HeapObject* target = HeapObject::cast(allocation.ToObjectChecked());
-
-    if (alignment != kObjectAlignment) {
-      target = EnsureDoubleAligned(heap, target, allocation_size);
-    }
-
-    // Order is important: slot might be inside of the target if target
-    // was allocated over a dead object and slot comes from the store
-    // buffer.
-    *slot = target;
-    MigrateObject(heap, object, target, object_size);
-    heap->IncrementSemiSpaceCopiedObjectSize(object_size);
-    return;
-  }
-
-
-  static inline void EvacuateJSFunction(Map* map,
-                                        HeapObject** slot,
-                                        HeapObject* object) {
-    ObjectEvacuationStrategy<POINTER_OBJECT>::
-        template VisitSpecialized<JSFunction::kSize>(map, slot, object);
-
-    HeapObject* target = *slot;
-    MarkBit mark_bit = Marking::MarkBitFrom(target);
-    if (Marking::IsBlack(mark_bit)) {
-      // This object is black and it might not be rescanned by marker.
-      // We should explicitly record code entry slot for compaction because
-      // promotion queue processing (IterateAndMarkPointersToFromSpace) will
-      // miss it as it is not HeapObject-tagged.
-      Address code_entry_slot =
-          target->address() + JSFunction::kCodeEntryOffset;
-      Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
-      map->GetHeap()->mark_compact_collector()->
-          RecordCodeEntrySlot(code_entry_slot, code);
-    }
-  }
-
-
-  static inline void EvacuateFixedArray(Map* map,
-                                        HeapObject** slot,
-                                        HeapObject* object) {
-    int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
-    EvacuateObject<POINTER_OBJECT, kObjectAlignment>(
-        map, slot, object, object_size);
-  }
-
-
-  static inline void EvacuateFixedDoubleArray(Map* map,
-                                              HeapObject** slot,
-                                              HeapObject* object) {
-    int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
-    int object_size = FixedDoubleArray::SizeFor(length);
-    EvacuateObject<DATA_OBJECT, kDoubleAlignment>(
-        map, slot, object, object_size);
-  }
-
-
-  static inline void EvacuateFixedTypedArray(Map* map,
-                                             HeapObject** slot,
-                                             HeapObject* object) {
-    int object_size = reinterpret_cast<FixedTypedArrayBase*>(object)->size();
-    EvacuateObject<DATA_OBJECT, kObjectAlignment>(
-        map, slot, object, object_size);
-  }
-
-
-  static inline void EvacuateFixedFloat64Array(Map* map,
-                                               HeapObject** slot,
-                                               HeapObject* object) {
-    int object_size = reinterpret_cast<FixedFloat64Array*>(object)->size();
-    EvacuateObject<DATA_OBJECT, kDoubleAlignment>(
-        map, slot, object, object_size);
-  }
-
-
-  static inline void EvacuateByteArray(Map* map,
-                                       HeapObject** slot,
-                                       HeapObject* object) {
-    int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
-    EvacuateObject<DATA_OBJECT, kObjectAlignment>(
-        map, slot, object, object_size);
-  }
-
-
-  static inline void EvacuateSeqOneByteString(Map* map,
-                                            HeapObject** slot,
-                                            HeapObject* object) {
-    int object_size = SeqOneByteString::cast(object)->
-        SeqOneByteStringSize(map->instance_type());
-    EvacuateObject<DATA_OBJECT, kObjectAlignment>(
-        map, slot, object, object_size);
-  }
-
-
-  static inline void EvacuateSeqTwoByteString(Map* map,
-                                              HeapObject** slot,
-                                              HeapObject* object) {
-    int object_size = SeqTwoByteString::cast(object)->
-        SeqTwoByteStringSize(map->instance_type());
-    EvacuateObject<DATA_OBJECT, kObjectAlignment>(
-        map, slot, object, object_size);
-  }
-
-
-  static inline bool IsShortcutCandidate(int type) {
-    return ((type & kShortcutTypeMask) == kShortcutTypeTag);
-  }
-
-  static inline void EvacuateShortcutCandidate(Map* map,
-                                               HeapObject** slot,
-                                               HeapObject* object) {
-    ASSERT(IsShortcutCandidate(map->instance_type()));
-
-    Heap* heap = map->GetHeap();
-
-    if (marks_handling == IGNORE_MARKS &&
-        ConsString::cast(object)->unchecked_second() ==
-        heap->empty_string()) {
-      HeapObject* first =
-          HeapObject::cast(ConsString::cast(object)->unchecked_first());
-
-      *slot = first;
-
-      if (!heap->InNewSpace(first)) {
-        object->set_map_word(MapWord::FromForwardingAddress(first));
-        return;
-      }
-
-      MapWord first_word = first->map_word();
-      if (first_word.IsForwardingAddress()) {
-        HeapObject* target = first_word.ToForwardingAddress();
-
-        *slot = target;
-        object->set_map_word(MapWord::FromForwardingAddress(target));
-        return;
-      }
-
-      heap->DoScavengeObject(first->map(), slot, first);
-      object->set_map_word(MapWord::FromForwardingAddress(*slot));
-      return;
-    }
-
-    int object_size = ConsString::kSize;
-    EvacuateObject<POINTER_OBJECT, kObjectAlignment>(
-        map, slot, object, object_size);
-  }
-
-  template<ObjectContents object_contents>
-  class ObjectEvacuationStrategy {
-   public:
-    template<int object_size>
-    static inline void VisitSpecialized(Map* map,
-                                        HeapObject** slot,
-                                        HeapObject* object) {
-      EvacuateObject<object_contents, kObjectAlignment>(
-          map, slot, object, object_size);
-    }
-
-    static inline void Visit(Map* map,
-                             HeapObject** slot,
-                             HeapObject* object) {
-      int object_size = map->instance_size();
-      EvacuateObject<object_contents, kObjectAlignment>(
-          map, slot, object, object_size);
-    }
-  };
-
-  static VisitorDispatchTable<ScavengingCallback> table_;
-};
-
-
-template<MarksHandling marks_handling,
-         LoggingAndProfiling logging_and_profiling_mode>
-VisitorDispatchTable<ScavengingCallback>
-    ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
-
-
-static void InitializeScavengingVisitorsTables() {
-  ScavengingVisitor<TRANSFER_MARKS,
-                    LOGGING_AND_PROFILING_DISABLED>::Initialize();
-  ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
-  ScavengingVisitor<TRANSFER_MARKS,
-                    LOGGING_AND_PROFILING_ENABLED>::Initialize();
-  ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
-}
-
-
-void Heap::SelectScavengingVisitorsTable() {
-  bool logging_and_profiling =
-      isolate()->logger()->is_logging() ||
-      isolate()->cpu_profiler()->is_profiling() ||
-      (isolate()->heap_profiler() != NULL &&
-       isolate()->heap_profiler()->is_tracking_object_moves());
-
-  if (!incremental_marking()->IsMarking()) {
-    if (!logging_and_profiling) {
-      scavenging_visitors_table_.CopyFrom(
-          ScavengingVisitor<IGNORE_MARKS,
-                            LOGGING_AND_PROFILING_DISABLED>::GetTable());
-    } else {
-      scavenging_visitors_table_.CopyFrom(
-          ScavengingVisitor<IGNORE_MARKS,
-                            LOGGING_AND_PROFILING_ENABLED>::GetTable());
-    }
-  } else {
-    if (!logging_and_profiling) {
-      scavenging_visitors_table_.CopyFrom(
-          ScavengingVisitor<TRANSFER_MARKS,
-                            LOGGING_AND_PROFILING_DISABLED>::GetTable());
-    } else {
-      scavenging_visitors_table_.CopyFrom(
-          ScavengingVisitor<TRANSFER_MARKS,
-                            LOGGING_AND_PROFILING_ENABLED>::GetTable());
-    }
-
-    if (incremental_marking()->IsCompacting()) {
-      // When compacting forbid short-circuiting of cons-strings.
-      // Scavenging code relies on the fact that new space object
-      // can't be evacuated into evacuation candidate but
-      // short-circuiting violates this assumption.
-      scavenging_visitors_table_.Register(
-          StaticVisitorBase::kVisitShortcutCandidate,
-          scavenging_visitors_table_.GetVisitorById(
-              StaticVisitorBase::kVisitConsString));
-    }
-  }
-}
-
-
-void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
-  SLOW_ASSERT(object->GetIsolate()->heap()->InFromSpace(object));
-  MapWord first_word = object->map_word();
-  SLOW_ASSERT(!first_word.IsForwardingAddress());
-  Map* map = first_word.ToMap();
-  map->GetHeap()->DoScavengeObject(map, p, object);
-}
-
-
-AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
-                                          int instance_size) {
-  Object* result;
-  AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
-  if (!allocation.To(&result)) return allocation;
-
-  // Map::cast cannot be used due to uninitialized map field.
-  reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
-  reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
-  reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
-  reinterpret_cast<Map*>(result)->set_visitor_id(
-        StaticVisitorBase::GetVisitorId(instance_type, instance_size));
-  reinterpret_cast<Map*>(result)->set_inobject_properties(0);
-  reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
-  reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
-  reinterpret_cast<Map*>(result)->set_bit_field(0);
-  reinterpret_cast<Map*>(result)->set_bit_field2(0);
-  int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
-                   Map::OwnsDescriptors::encode(true);
-  reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
-  return result;
-}
-
-
-AllocationResult Heap::AllocateMap(InstanceType instance_type,
-                                   int instance_size,
-                                   ElementsKind elements_kind) {
-  HeapObject* result;
-  AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
-  if (!allocation.To(&result)) return allocation;
-
-  result->set_map_no_write_barrier(meta_map());
-  Map* map = Map::cast(result);
-  map->set_instance_type(instance_type);
-  map->set_visitor_id(
-      StaticVisitorBase::GetVisitorId(instance_type, instance_size));
-  map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
-  map->set_constructor(null_value(), SKIP_WRITE_BARRIER);
-  map->set_instance_size(instance_size);
-  map->set_inobject_properties(0);
-  map->set_pre_allocated_property_fields(0);
-  map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
-  map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
-                          SKIP_WRITE_BARRIER);
-  map->init_back_pointer(undefined_value());
-  map->set_unused_property_fields(0);
-  map->set_instance_descriptors(empty_descriptor_array());
-  map->set_bit_field(0);
-  map->set_bit_field2(1 << Map::kIsExtensible);
-  int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
-                   Map::OwnsDescriptors::encode(true);
-  map->set_bit_field3(bit_field3);
-  map->set_elements_kind(elements_kind);
-
-  return map;
-}
-
-
-AllocationResult Heap::AllocateFillerObject(int size,
-                                            bool double_align,
-                                            AllocationSpace space) {
-  HeapObject* obj;
-  { AllocationResult allocation = AllocateRaw(size, space, space);
-    if (!allocation.To(&obj)) return allocation;
-  }
-#ifdef DEBUG
-  MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
-  ASSERT(chunk->owner()->identity() == space);
-#endif
-  CreateFillerObjectAt(obj->address(), size);
-  return obj;
-}
-
-
-const Heap::StringTypeTable Heap::string_type_table[] = {
-#define STRING_TYPE_ELEMENT(type, size, name, camel_name)                      \
-  {type, size, k##camel_name##MapRootIndex},
-  STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
-#undef STRING_TYPE_ELEMENT
-};
-
-
-const Heap::ConstantStringTable Heap::constant_string_table[] = {
-#define CONSTANT_STRING_ELEMENT(name, contents)                                \
-  {contents, k##name##RootIndex},
-  INTERNALIZED_STRING_LIST(CONSTANT_STRING_ELEMENT)
-#undef CONSTANT_STRING_ELEMENT
-};
-
-
-const Heap::StructTable Heap::struct_table[] = {
-#define STRUCT_TABLE_ELEMENT(NAME, Name, name)                                 \
-  { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex },
-  STRUCT_LIST(STRUCT_TABLE_ELEMENT)
-#undef STRUCT_TABLE_ELEMENT
-};
-
-
-bool Heap::CreateInitialMaps() {
-  HeapObject* obj;
-  { AllocationResult allocation = AllocatePartialMap(MAP_TYPE, Map::kSize);
-    if (!allocation.To(&obj)) return false;
-  }
-  // Map::cast cannot be used due to uninitialized map field.
-  Map* new_meta_map = reinterpret_cast<Map*>(obj);
-  set_meta_map(new_meta_map);
-  new_meta_map->set_map(new_meta_map);
-
-  {  // Partial map allocation
-#define ALLOCATE_PARTIAL_MAP(instance_type, size, field_name)                  \
-    { Map* map;                                                                \
-      if (!AllocatePartialMap((instance_type), (size)).To(&map)) return false; \
-      set_##field_name##_map(map);                                             \
-    }
-
-    ALLOCATE_PARTIAL_MAP(FIXED_ARRAY_TYPE, kVariableSizeSentinel, fixed_array);
-    ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, undefined);
-    ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, null);
-    ALLOCATE_PARTIAL_MAP(CONSTANT_POOL_ARRAY_TYPE, kVariableSizeSentinel,
-                         constant_pool_array);
-
-#undef ALLOCATE_PARTIAL_MAP
-  }
-
-  // Allocate the empty array.
-  { AllocationResult allocation = AllocateEmptyFixedArray();
-    if (!allocation.To(&obj)) return false;
-  }
-  set_empty_fixed_array(FixedArray::cast(obj));
-
-  { AllocationResult allocation = Allocate(null_map(), OLD_POINTER_SPACE);
-    if (!allocation.To(&obj)) return false;
-  }
-  set_null_value(Oddball::cast(obj));
-  Oddball::cast(obj)->set_kind(Oddball::kNull);
-
-  { AllocationResult allocation = Allocate(undefined_map(), OLD_POINTER_SPACE);
-    if (!allocation.To(&obj)) return false;
-  }
-  set_undefined_value(Oddball::cast(obj));
-  Oddball::cast(obj)->set_kind(Oddball::kUndefined);
-  ASSERT(!InNewSpace(undefined_value()));
-
-  // Set preliminary exception sentinel value before actually initializing it.
-  set_exception(null_value());
-
-  // Allocate the empty descriptor array.
-  { AllocationResult allocation = AllocateEmptyFixedArray();
-    if (!allocation.To(&obj)) return false;
-  }
-  set_empty_descriptor_array(DescriptorArray::cast(obj));
-
-  // Allocate the constant pool array.
-  { AllocationResult allocation = AllocateEmptyConstantPoolArray();
-    if (!allocation.To(&obj)) return false;
-  }
-  set_empty_constant_pool_array(ConstantPoolArray::cast(obj));
-
-  // Fix the instance_descriptors for the existing maps.
-  meta_map()->set_code_cache(empty_fixed_array());
-  meta_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
-  meta_map()->init_back_pointer(undefined_value());
-  meta_map()->set_instance_descriptors(empty_descriptor_array());
-
-  fixed_array_map()->set_code_cache(empty_fixed_array());
-  fixed_array_map()->set_dependent_code(
-      DependentCode::cast(empty_fixed_array()));
-  fixed_array_map()->init_back_pointer(undefined_value());
-  fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
-
-  undefined_map()->set_code_cache(empty_fixed_array());
-  undefined_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
-  undefined_map()->init_back_pointer(undefined_value());
-  undefined_map()->set_instance_descriptors(empty_descriptor_array());
-
-  null_map()->set_code_cache(empty_fixed_array());
-  null_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
-  null_map()->init_back_pointer(undefined_value());
-  null_map()->set_instance_descriptors(empty_descriptor_array());
-
-  constant_pool_array_map()->set_code_cache(empty_fixed_array());
-  constant_pool_array_map()->set_dependent_code(
-      DependentCode::cast(empty_fixed_array()));
-  constant_pool_array_map()->init_back_pointer(undefined_value());
-  constant_pool_array_map()->set_instance_descriptors(empty_descriptor_array());
-
-  // Fix prototype object for existing maps.
-  meta_map()->set_prototype(null_value());
-  meta_map()->set_constructor(null_value());
-
-  fixed_array_map()->set_prototype(null_value());
-  fixed_array_map()->set_constructor(null_value());
-
-  undefined_map()->set_prototype(null_value());
-  undefined_map()->set_constructor(null_value());
-
-  null_map()->set_prototype(null_value());
-  null_map()->set_constructor(null_value());
-
-  constant_pool_array_map()->set_prototype(null_value());
-  constant_pool_array_map()->set_constructor(null_value());
-
-  {  // Map allocation
-#define ALLOCATE_MAP(instance_type, size, field_name)                          \
-    { Map* map;                                                                \
-      if (!AllocateMap((instance_type), size).To(&map)) return false;          \
-      set_##field_name##_map(map);                                             \
-    }
-
-#define ALLOCATE_VARSIZE_MAP(instance_type, field_name)                        \
-    ALLOCATE_MAP(instance_type, kVariableSizeSentinel, field_name)
-
-    ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, fixed_cow_array)
-    ASSERT(fixed_array_map() != fixed_cow_array_map());
-
-    ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, scope_info)
-    ALLOCATE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number)
-    ALLOCATE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol)
-    ALLOCATE_MAP(FOREIGN_TYPE, Foreign::kSize, foreign)
-
-    ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, the_hole);
-    ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, boolean);
-    ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, uninitialized);
-    ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, arguments_marker);
-    ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, no_interceptor_result_sentinel);
-    ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, exception);
-    ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, termination_exception);
-
-    for (unsigned i = 0; i < ARRAY_SIZE(string_type_table); i++) {
-      const StringTypeTable& entry = string_type_table[i];
-      { AllocationResult allocation = AllocateMap(entry.type, entry.size);
-        if (!allocation.To(&obj)) return false;
-      }
-      // Mark cons string maps as unstable, because their objects can change
-      // maps during GC.
-      Map* map = Map::cast(obj);
-      if (StringShape(entry.type).IsCons()) map->mark_unstable();
-      roots_[entry.index] = map;
-    }
-
-    ALLOCATE_VARSIZE_MAP(STRING_TYPE, undetectable_string)
-    undetectable_string_map()->set_is_undetectable();
-
-    ALLOCATE_VARSIZE_MAP(ASCII_STRING_TYPE, undetectable_ascii_string);
-    undetectable_ascii_string_map()->set_is_undetectable();
-
-    ALLOCATE_VARSIZE_MAP(FIXED_DOUBLE_ARRAY_TYPE, fixed_double_array)
-    ALLOCATE_VARSIZE_MAP(BYTE_ARRAY_TYPE, byte_array)
-    ALLOCATE_VARSIZE_MAP(FREE_SPACE_TYPE, free_space)
-
-#define ALLOCATE_EXTERNAL_ARRAY_MAP(Type, type, TYPE, ctype, size)            \
-    ALLOCATE_MAP(EXTERNAL_##TYPE##_ARRAY_TYPE, ExternalArray::kAlignedSize,   \
-        external_##type##_array)
-
-     TYPED_ARRAYS(ALLOCATE_EXTERNAL_ARRAY_MAP)
-#undef ALLOCATE_EXTERNAL_ARRAY_MAP
-
-#define ALLOCATE_FIXED_TYPED_ARRAY_MAP(Type, type, TYPE, ctype, size)         \
-    ALLOCATE_VARSIZE_MAP(FIXED_##TYPE##_ARRAY_TYPE,                           \
-        fixed_##type##_array)
-
-     TYPED_ARRAYS(ALLOCATE_FIXED_TYPED_ARRAY_MAP)
-#undef ALLOCATE_FIXED_TYPED_ARRAY_MAP
-
-    ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, sloppy_arguments_elements)
-
-    ALLOCATE_VARSIZE_MAP(CODE_TYPE, code)
-
-    ALLOCATE_MAP(CELL_TYPE, Cell::kSize, cell)
-    ALLOCATE_MAP(PROPERTY_CELL_TYPE, PropertyCell::kSize, global_property_cell)
-    ALLOCATE_MAP(FILLER_TYPE, kPointerSize, one_pointer_filler)
-    ALLOCATE_MAP(FILLER_TYPE, 2 * kPointerSize, two_pointer_filler)
-
-
-    for (unsigned i = 0; i < ARRAY_SIZE(struct_table); i++) {
-      const StructTable& entry = struct_table[i];
-      Map* map;
-      if (!AllocateMap(entry.type, entry.size).To(&map))
-        return false;
-      roots_[entry.index] = map;
-    }
-
-    ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, hash_table)
-    ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, ordered_hash_table)
-
-    ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, function_context)
-    ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, catch_context)
-    ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, with_context)
-    ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, block_context)
-    ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_context)
-    ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, global_context)
-
-    ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, native_context)
-    native_context_map()->set_dictionary_map(true);
-    native_context_map()->set_visitor_id(
-        StaticVisitorBase::kVisitNativeContext);
-
-    ALLOCATE_MAP(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kAlignedSize,
-        shared_function_info)
-
-    ALLOCATE_MAP(JS_MESSAGE_OBJECT_TYPE, JSMessageObject::kSize,
-        message_object)
-    ALLOCATE_MAP(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize,
-        external)
-    external_map()->set_is_extensible(false);
-#undef ALLOCATE_VARSIZE_MAP
-#undef ALLOCATE_MAP
-  }
-
-  { // Empty arrays
-    { ByteArray* byte_array;
-      if (!AllocateByteArray(0, TENURED).To(&byte_array)) return false;
-      set_empty_byte_array(byte_array);
-    }
-
-#define ALLOCATE_EMPTY_EXTERNAL_ARRAY(Type, type, TYPE, ctype, size)           \
-    { ExternalArray* obj;                                                      \
-      if (!AllocateEmptyExternalArray(kExternal##Type##Array).To(&obj))        \
-          return false;                                                        \
-      set_empty_external_##type##_array(obj);                                  \
-    }
-
-    TYPED_ARRAYS(ALLOCATE_EMPTY_EXTERNAL_ARRAY)
-#undef ALLOCATE_EMPTY_EXTERNAL_ARRAY
-
-#define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size)        \
-    { FixedTypedArrayBase* obj;                                                \
-      if (!AllocateEmptyFixedTypedArray(kExternal##Type##Array).To(&obj))      \
-          return false;                                                        \
-      set_empty_fixed_##type##_array(obj);                                     \
-    }
-
-    TYPED_ARRAYS(ALLOCATE_EMPTY_FIXED_TYPED_ARRAY)
-#undef ALLOCATE_EMPTY_FIXED_TYPED_ARRAY
-  }
-  ASSERT(!InNewSpace(empty_fixed_array()));
-  return true;
-}
-
-
-AllocationResult Heap::AllocateHeapNumber(double value,
-                                          PretenureFlag pretenure) {
-  // Statically ensure that it is safe to allocate heap numbers in paged
-  // spaces.
-  int size = HeapNumber::kSize;
-  STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxRegularHeapObjectSize);
-
-  AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
-
-  HeapObject* result;
-  { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
-    if (!allocation.To(&result)) return allocation;
-  }
-
-  result->set_map_no_write_barrier(heap_number_map());
-  HeapNumber::cast(result)->set_value(value);
-  return result;
-}
-
-
-AllocationResult Heap::AllocateCell(Object* value) {
-  int size = Cell::kSize;
-  STATIC_ASSERT(Cell::kSize <= Page::kMaxRegularHeapObjectSize);
-
-  HeapObject* result;
-  { AllocationResult allocation = AllocateRaw(size, CELL_SPACE, CELL_SPACE);
-    if (!allocation.To(&result)) return allocation;
-  }
-  result->set_map_no_write_barrier(cell_map());
-  Cell::cast(result)->set_value(value);
-  return result;
-}
-
-
-AllocationResult Heap::AllocatePropertyCell() {
-  int size = PropertyCell::kSize;
-  STATIC_ASSERT(PropertyCell::kSize <= Page::kMaxRegularHeapObjectSize);
-
-  HeapObject* result;
-  AllocationResult allocation =
-      AllocateRaw(size, PROPERTY_CELL_SPACE, PROPERTY_CELL_SPACE);
-  if (!allocation.To(&result)) return allocation;
-
-  result->set_map_no_write_barrier(global_property_cell_map());
-  PropertyCell* cell = PropertyCell::cast(result);
-  cell->set_dependent_code(DependentCode::cast(empty_fixed_array()),
-                           SKIP_WRITE_BARRIER);
-  cell->set_value(the_hole_value());
-  cell->set_type(HeapType::None());
-  return result;
-}
-
-
-void Heap::CreateApiObjects() {
-  HandleScope scope(isolate());
-  Factory* factory = isolate()->factory();
-  Handle<Map> new_neander_map =
-      factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
-
-  // Don't use Smi-only elements optimizations for objects with the neander
-  // map. There are too many cases where element values are set directly with a
-  // bottleneck to trap the Smi-only -> fast elements transition, and there
-  // appears to be no benefit for optimize this case.
-  new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
-  set_neander_map(*new_neander_map);
-
-  Handle<JSObject> listeners = factory->NewNeanderObject();
-  Handle<FixedArray> elements = factory->NewFixedArray(2);
-  elements->set(0, Smi::FromInt(0));
-  listeners->set_elements(*elements);
-  set_message_listeners(*listeners);
-}
-
-
-void Heap::CreateJSEntryStub() {
-  JSEntryStub stub(isolate());
-  set_js_entry_code(*stub.GetCode());
-}
-
-
-void Heap::CreateJSConstructEntryStub() {
-  JSConstructEntryStub stub(isolate());
-  set_js_construct_entry_code(*stub.GetCode());
-}
-
-
-void Heap::CreateFixedStubs() {
-  // Here we create roots for fixed stubs. They are needed at GC
-  // for cooking and uncooking (check out frames.cc).
-  // The eliminates the need for doing dictionary lookup in the
-  // stub cache for these stubs.
-  HandleScope scope(isolate());
-
-  // Create stubs that should be there, so we don't unexpectedly have to
-  // create them if we need them during the creation of another stub.
-  // Stub creation mixes raw pointers and handles in an unsafe manner so
-  // we cannot create stubs while we are creating stubs.
-  CodeStub::GenerateStubsAheadOfTime(isolate());
-
-  // MacroAssembler::Abort calls (usually enabled with --debug-code) depend on
-  // CEntryStub, so we need to call GenerateStubsAheadOfTime before JSEntryStub
-  // is created.
-
-  // gcc-4.4 has problem generating correct code of following snippet:
-  // {  JSEntryStub stub;
-  //    js_entry_code_ = *stub.GetCode();
-  // }
-  // {  JSConstructEntryStub stub;
-  //    js_construct_entry_code_ = *stub.GetCode();
-  // }
-  // To workaround the problem, make separate functions without inlining.
-  Heap::CreateJSEntryStub();
-  Heap::CreateJSConstructEntryStub();
-}
-
-
-void Heap::CreateInitialObjects() {
-  HandleScope scope(isolate());
-  Factory* factory = isolate()->factory();
-
-  // The -0 value must be set before NumberFromDouble works.
-  set_minus_zero_value(*factory->NewHeapNumber(-0.0, TENURED));
-  ASSERT(std::signbit(minus_zero_value()->Number()) != 0);
-
-  set_nan_value(*factory->NewHeapNumber(OS::nan_value(), TENURED));
-  set_infinity_value(*factory->NewHeapNumber(V8_INFINITY, TENURED));
-
-  // The hole has not been created yet, but we want to put something
-  // predictable in the gaps in the string table, so lets make that Smi zero.
-  set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0)));
-
-  // Allocate initial string table.
-  set_string_table(*StringTable::New(isolate(), kInitialStringTableSize));
-
-  // Finish initializing oddballs after creating the string table.
-  Oddball::Initialize(isolate(),
-                      factory->undefined_value(),
-                      "undefined",
-                      factory->nan_value(),
-                      Oddball::kUndefined);
-
-  // Initialize the null_value.
-  Oddball::Initialize(isolate(),
-                      factory->null_value(),
-                      "null",
-                      handle(Smi::FromInt(0), isolate()),
-                      Oddball::kNull);
-
-  set_true_value(*factory->NewOddball(factory->boolean_map(),
-                                      "true",
-                                      handle(Smi::FromInt(1), isolate()),
-                                      Oddball::kTrue));
-
-  set_false_value(*factory->NewOddball(factory->boolean_map(),
-                                       "false",
-                                       handle(Smi::FromInt(0), isolate()),
-                                       Oddball::kFalse));
-
-  set_the_hole_value(*factory->NewOddball(factory->the_hole_map(),
-                                          "hole",
-                                          handle(Smi::FromInt(-1), isolate()),
-                                          Oddball::kTheHole));
-
-  set_uninitialized_value(
-      *factory->NewOddball(factory->uninitialized_map(),
-                           "uninitialized",
-                           handle(Smi::FromInt(-1), isolate()),
-                           Oddball::kUninitialized));
-
-  set_arguments_marker(*factory->NewOddball(factory->arguments_marker_map(),
-                                            "arguments_marker",
-                                            handle(Smi::FromInt(-4), isolate()),
-                                            Oddball::kArgumentMarker));
-
-  set_no_interceptor_result_sentinel(
-      *factory->NewOddball(factory->no_interceptor_result_sentinel_map(),
-                           "no_interceptor_result_sentinel",
-                           handle(Smi::FromInt(-2), isolate()),
-                           Oddball::kOther));
-
-  set_termination_exception(
-      *factory->NewOddball(factory->termination_exception_map(),
-                           "termination_exception",
-                           handle(Smi::FromInt(-3), isolate()),
-                           Oddball::kOther));
-
-  set_exception(
-      *factory->NewOddball(factory->exception_map(),
-                           "exception",
-                           handle(Smi::FromInt(-5), isolate()),
-                           Oddball::kException));
-
-  for (unsigned i = 0; i < ARRAY_SIZE(constant_string_table); i++) {
-    Handle<String> str =
-        factory->InternalizeUtf8String(constant_string_table[i].contents);
-    roots_[constant_string_table[i].index] = *str;
-  }
-
-  // Allocate the hidden string which is used to identify the hidden properties
-  // in JSObjects. The hash code has a special value so that it will not match
-  // the empty string when searching for the property. It cannot be part of the
-  // loop above because it needs to be allocated manually with the special
-  // hash code in place. The hash code for the hidden_string is zero to ensure
-  // that it will always be at the first entry in property descriptors.
-  hidden_string_ = *factory->NewOneByteInternalizedString(
-      OneByteVector("", 0), String::kEmptyStringHash);
-
-  // Create the code_stubs dictionary. The initial size is set to avoid
-  // expanding the dictionary during bootstrapping.
-  set_code_stubs(*UnseededNumberDictionary::New(isolate(), 128));
-
-  // Create the non_monomorphic_cache used in stub-cache.cc. The initial size
-  // is set to avoid expanding the dictionary during bootstrapping.
-  set_non_monomorphic_cache(*UnseededNumberDictionary::New(isolate(), 64));
-
-  set_polymorphic_code_cache(PolymorphicCodeCache::cast(
-      *factory->NewStruct(POLYMORPHIC_CODE_CACHE_TYPE)));
-
-  set_instanceof_cache_function(Smi::FromInt(0));
-  set_instanceof_cache_map(Smi::FromInt(0));
-  set_instanceof_cache_answer(Smi::FromInt(0));
-
-  CreateFixedStubs();
-
-  // Allocate the dictionary of intrinsic function names.
-  Handle<NameDictionary> intrinsic_names =
-      NameDictionary::New(isolate(), Runtime::kNumFunctions);
-  Runtime::InitializeIntrinsicFunctionNames(isolate(), intrinsic_names);
-  set_intrinsic_function_names(*intrinsic_names);
-
-  set_number_string_cache(*factory->NewFixedArray(
-      kInitialNumberStringCacheSize * 2, TENURED));
-
-  // Allocate cache for single character one byte strings.
-  set_single_character_string_cache(*factory->NewFixedArray(
-      String::kMaxOneByteCharCode + 1, TENURED));
-
-  // Allocate cache for string split and regexp-multiple.
-  set_string_split_cache(*factory->NewFixedArray(
-      RegExpResultsCache::kRegExpResultsCacheSize, TENURED));
-  set_regexp_multiple_cache(*factory->NewFixedArray(
-      RegExpResultsCache::kRegExpResultsCacheSize, TENURED));
-
-  // Allocate cache for external strings pointing to native source code.
-  set_natives_source_cache(*factory->NewFixedArray(
-      Natives::GetBuiltinsCount()));
-
-  set_undefined_cell(*factory->NewCell(factory->undefined_value()));
-
-  // The symbol registry is initialized lazily.
-  set_symbol_registry(undefined_value());
-
-  // Allocate object to hold object observation state.
-  set_observation_state(*factory->NewJSObjectFromMap(
-      factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize)));
-
-  // Microtask queue uses the empty fixed array as a sentinel for "empty".
-  // Number of queued microtasks stored in Isolate::pending_microtask_count().
-  set_microtask_queue(empty_fixed_array());
-
-  set_frozen_symbol(*factory->NewPrivateSymbol());
-  set_nonexistent_symbol(*factory->NewPrivateSymbol());
-  set_elements_transition_symbol(*factory->NewPrivateSymbol());
-  set_uninitialized_symbol(*factory->NewPrivateSymbol());
-  set_megamorphic_symbol(*factory->NewPrivateSymbol());
-  set_observed_symbol(*factory->NewPrivateSymbol());
-
-  Handle<SeededNumberDictionary> slow_element_dictionary =
-      SeededNumberDictionary::New(isolate(), 0, TENURED);
-  slow_element_dictionary->set_requires_slow_elements();
-  set_empty_slow_element_dictionary(*slow_element_dictionary);
-
-  set_materialized_objects(*factory->NewFixedArray(0, TENURED));
-
-  // Handling of script id generation is in Factory::NewScript.
-  set_last_script_id(Smi::FromInt(v8::UnboundScript::kNoScriptId));
-
-  set_allocation_sites_scratchpad(*factory->NewFixedArray(
-      kAllocationSiteScratchpadSize, TENURED));
-  InitializeAllocationSitesScratchpad();
-
-  // Initialize keyed lookup cache.
-  isolate_->keyed_lookup_cache()->Clear();
-
-  // Initialize context slot cache.
-  isolate_->context_slot_cache()->Clear();
-
-  // Initialize descriptor cache.
-  isolate_->descriptor_lookup_cache()->Clear();
-
-  // Initialize compilation cache.
-  isolate_->compilation_cache()->Clear();
-}
-
-
-bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
-  RootListIndex writable_roots[] = {
-    kStoreBufferTopRootIndex,
-    kStackLimitRootIndex,
-    kNumberStringCacheRootIndex,
-    kInstanceofCacheFunctionRootIndex,
-    kInstanceofCacheMapRootIndex,
-    kInstanceofCacheAnswerRootIndex,
-    kCodeStubsRootIndex,
-    kNonMonomorphicCacheRootIndex,
-    kPolymorphicCodeCacheRootIndex,
-    kLastScriptIdRootIndex,
-    kEmptyScriptRootIndex,
-    kRealStackLimitRootIndex,
-    kArgumentsAdaptorDeoptPCOffsetRootIndex,
-    kConstructStubDeoptPCOffsetRootIndex,
-    kGetterStubDeoptPCOffsetRootIndex,
-    kSetterStubDeoptPCOffsetRootIndex,
-    kStringTableRootIndex,
-  };
-
-  for (unsigned int i = 0; i < ARRAY_SIZE(writable_roots); i++) {
-    if (root_index == writable_roots[i])
-      return true;
-  }
-  return false;
-}
-
-
-bool Heap::RootCanBeTreatedAsConstant(RootListIndex root_index) {
-  return !RootCanBeWrittenAfterInitialization(root_index) &&
-      !InNewSpace(roots_array_start()[root_index]);
-}
-
-
-Object* RegExpResultsCache::Lookup(Heap* heap,
-                                   String* key_string,
-                                   Object* key_pattern,
-                                   ResultsCacheType type) {
-  FixedArray* cache;
-  if (!key_string->IsInternalizedString()) return Smi::FromInt(0);
-  if (type == STRING_SPLIT_SUBSTRINGS) {
-    ASSERT(key_pattern->IsString());
-    if (!key_pattern->IsInternalizedString()) return Smi::FromInt(0);
-    cache = heap->string_split_cache();
-  } else {
-    ASSERT(type == REGEXP_MULTIPLE_INDICES);
-    ASSERT(key_pattern->IsFixedArray());
-    cache = heap->regexp_multiple_cache();
-  }
-
-  uint32_t hash = key_string->Hash();
-  uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
-      ~(kArrayEntriesPerCacheEntry - 1));
-  if (cache->get(index + kStringOffset) == key_string &&
-      cache->get(index + kPatternOffset) == key_pattern) {
-    return cache->get(index + kArrayOffset);
-  }
-  index =
-      ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
-  if (cache->get(index + kStringOffset) == key_string &&
-      cache->get(index + kPatternOffset) == key_pattern) {
-    return cache->get(index + kArrayOffset);
-  }
-  return Smi::FromInt(0);
-}
-
-
-void RegExpResultsCache::Enter(Isolate* isolate,
-                               Handle<String> key_string,
-                               Handle<Object> key_pattern,
-                               Handle<FixedArray> value_array,
-                               ResultsCacheType type) {
-  Factory* factory = isolate->factory();
-  Handle<FixedArray> cache;
-  if (!key_string->IsInternalizedString()) return;
-  if (type == STRING_SPLIT_SUBSTRINGS) {
-    ASSERT(key_pattern->IsString());
-    if (!key_pattern->IsInternalizedString()) return;
-    cache = factory->string_split_cache();
-  } else {
-    ASSERT(type == REGEXP_MULTIPLE_INDICES);
-    ASSERT(key_pattern->IsFixedArray());
-    cache = factory->regexp_multiple_cache();
-  }
-
-  uint32_t hash = key_string->Hash();
-  uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
-      ~(kArrayEntriesPerCacheEntry - 1));
-  if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
-    cache->set(index + kStringOffset, *key_string);
-    cache->set(index + kPatternOffset, *key_pattern);
-    cache->set(index + kArrayOffset, *value_array);
-  } else {
-    uint32_t index2 =
-        ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
-    if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) {
-      cache->set(index2 + kStringOffset, *key_string);
-      cache->set(index2 + kPatternOffset, *key_pattern);
-      cache->set(index2 + kArrayOffset, *value_array);
-    } else {
-      cache->set(index2 + kStringOffset, Smi::FromInt(0));
-      cache->set(index2 + kPatternOffset, Smi::FromInt(0));
-      cache->set(index2 + kArrayOffset, Smi::FromInt(0));
-      cache->set(index + kStringOffset, *key_string);
-      cache->set(index + kPatternOffset, *key_pattern);
-      cache->set(index + kArrayOffset, *value_array);
-    }
-  }
-  // If the array is a reasonably short list of substrings, convert it into a
-  // list of internalized strings.
-  if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) {
-    for (int i = 0; i < value_array->length(); i++) {
-      Handle<String> str(String::cast(value_array->get(i)), isolate);
-      Handle<String> internalized_str = factory->InternalizeString(str);
-      value_array->set(i, *internalized_str);
-    }
-  }
-  // Convert backing store to a copy-on-write array.
-  value_array->set_map_no_write_barrier(*factory->fixed_cow_array_map());
-}
-
-
-void RegExpResultsCache::Clear(FixedArray* cache) {
-  for (int i = 0; i < kRegExpResultsCacheSize; i++) {
-    cache->set(i, Smi::FromInt(0));
-  }
-}
-
-
-int Heap::FullSizeNumberStringCacheLength() {
-  // Compute the size of the number string cache based on the max newspace size.
-  // The number string cache has a minimum size based on twice the initial cache
-  // size to ensure that it is bigger after being made 'full size'.
-  int number_string_cache_size = max_semi_space_size_ / 512;
-  number_string_cache_size = Max(kInitialNumberStringCacheSize * 2,
-                                 Min(0x4000, number_string_cache_size));
-  // There is a string and a number per entry so the length is twice the number
-  // of entries.
-  return number_string_cache_size * 2;
-}
-
-
-void Heap::FlushNumberStringCache() {
-  // Flush the number to string cache.
-  int len = number_string_cache()->length();
-  for (int i = 0; i < len; i++) {
-    number_string_cache()->set_undefined(i);
-  }
-}
-
-
-void Heap::FlushAllocationSitesScratchpad() {
-  for (int i = 0; i < allocation_sites_scratchpad_length_; i++) {
-    allocation_sites_scratchpad()->set_undefined(i);
-  }
-  allocation_sites_scratchpad_length_ = 0;
-}
-
-
-void Heap::InitializeAllocationSitesScratchpad() {
-  ASSERT(allocation_sites_scratchpad()->length() ==
-         kAllocationSiteScratchpadSize);
-  for (int i = 0; i < kAllocationSiteScratchpadSize; i++) {
-    allocation_sites_scratchpad()->set_undefined(i);
-  }
-}
-
-
-void Heap::AddAllocationSiteToScratchpad(AllocationSite* site,
-                                         ScratchpadSlotMode mode) {
-  if (allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize) {
-    // We cannot use the normal write-barrier because slots need to be
-    // recorded with non-incremental marking as well. We have to explicitly
-    // record the slot to take evacuation candidates into account.
-    allocation_sites_scratchpad()->set(
-        allocation_sites_scratchpad_length_, site, SKIP_WRITE_BARRIER);
-    Object** slot = allocation_sites_scratchpad()->RawFieldOfElementAt(
-        allocation_sites_scratchpad_length_);
-
-    if (mode == RECORD_SCRATCHPAD_SLOT) {
-      // We need to allow slots buffer overflow here since the evacuation
-      // candidates are not part of the global list of old space pages and
-      // releasing an evacuation candidate due to a slots buffer overflow
-      // results in lost pages.
-      mark_compact_collector()->RecordSlot(
-          slot, slot, *slot, SlotsBuffer::IGNORE_OVERFLOW);
-    }
-    allocation_sites_scratchpad_length_++;
-  }
-}
-
-
-Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
-  return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
-}
-
-
-Heap::RootListIndex Heap::RootIndexForExternalArrayType(
-    ExternalArrayType array_type) {
-  switch (array_type) {
-#define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size)               \
-    case kExternal##Type##Array:                                              \
-      return kExternal##Type##ArrayMapRootIndex;
-
-    TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
-#undef ARRAY_TYPE_TO_ROOT_INDEX
-
-    default:
-      UNREACHABLE();
-      return kUndefinedValueRootIndex;
-  }
-}
-
-
-Map* Heap::MapForFixedTypedArray(ExternalArrayType array_type) {
-  return Map::cast(roots_[RootIndexForFixedTypedArray(array_type)]);
-}
-
-
-Heap::RootListIndex Heap::RootIndexForFixedTypedArray(
-    ExternalArrayType array_type) {
-  switch (array_type) {
-#define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size)               \
-    case kExternal##Type##Array:                                              \
-      return kFixed##Type##ArrayMapRootIndex;
-
-    TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
-#undef ARRAY_TYPE_TO_ROOT_INDEX
-
-    default:
-      UNREACHABLE();
-      return kUndefinedValueRootIndex;
-  }
-}
-
-
-Heap::RootListIndex Heap::RootIndexForEmptyExternalArray(
-    ElementsKind elementsKind) {
-  switch (elementsKind) {
-#define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size)             \
-    case EXTERNAL_##TYPE##_ELEMENTS:                                          \
-      return kEmptyExternal##Type##ArrayRootIndex;
-
-    TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
-#undef ELEMENT_KIND_TO_ROOT_INDEX
-
-    default:
-      UNREACHABLE();
-      return kUndefinedValueRootIndex;
-  }
-}
-
-
-Heap::RootListIndex Heap::RootIndexForEmptyFixedTypedArray(
-    ElementsKind elementsKind) {
-  switch (elementsKind) {
-#define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size)             \
-    case TYPE##_ELEMENTS:                                                     \
-      return kEmptyFixed##Type##ArrayRootIndex;
-
-    TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
-#undef ELEMENT_KIND_TO_ROOT_INDEX
-    default:
-      UNREACHABLE();
-      return kUndefinedValueRootIndex;
-  }
-}
-
-
-ExternalArray* Heap::EmptyExternalArrayForMap(Map* map) {
-  return ExternalArray::cast(
-      roots_[RootIndexForEmptyExternalArray(map->elements_kind())]);
-}
-
-
-FixedTypedArrayBase* Heap::EmptyFixedTypedArrayForMap(Map* map) {
-  return FixedTypedArrayBase::cast(
-      roots_[RootIndexForEmptyFixedTypedArray(map->elements_kind())]);
-}
-
-
-AllocationResult Heap::AllocateForeign(Address address,
-                                       PretenureFlag pretenure) {
-  // Statically ensure that it is safe to allocate foreigns in paged spaces.
-  STATIC_ASSERT(Foreign::kSize <= Page::kMaxRegularHeapObjectSize);
-  AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
-  Foreign* result;
-  AllocationResult allocation = Allocate(foreign_map(), space);
-  if (!allocation.To(&result)) return allocation;
-  result->set_foreign_address(address);
-  return result;
-}
-
-
-AllocationResult Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
-  if (length < 0 || length > ByteArray::kMaxLength) {
-    v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
-  }
-  int size = ByteArray::SizeFor(length);
-  AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
-  HeapObject* result;
-  { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
-    if (!allocation.To(&result)) return allocation;
-  }
-
-  result->set_map_no_write_barrier(byte_array_map());
-  ByteArray::cast(result)->set_length(length);
-  return result;
-}
-
-
-void Heap::CreateFillerObjectAt(Address addr, int size) {
-  if (size == 0) return;
-  HeapObject* filler = HeapObject::FromAddress(addr);
-  if (size == kPointerSize) {
-    filler->set_map_no_write_barrier(one_pointer_filler_map());
-  } else if (size == 2 * kPointerSize) {
-    filler->set_map_no_write_barrier(two_pointer_filler_map());
-  } else {
-    filler->set_map_no_write_barrier(free_space_map());
-    FreeSpace::cast(filler)->set_size(size);
-  }
-}
-
-
-bool Heap::CanMoveObjectStart(HeapObject* object) {
-  Address address = object->address();
-  bool is_in_old_pointer_space = InOldPointerSpace(address);
-  bool is_in_old_data_space = InOldDataSpace(address);
-
-  if (lo_space()->Contains(object)) return false;
-
-  Page* page = Page::FromAddress(address);
-  // We can move the object start if:
-  // (1) the object is not in old pointer or old data space,
-  // (2) the page of the object was already swept,
-  // (3) the page was already concurrently swept. This case is an optimization
-  // for concurrent sweeping. The WasSwept predicate for concurrently swept
-  // pages is set after sweeping all pages.
-  return (!is_in_old_pointer_space && !is_in_old_data_space) ||
-         page->WasSwept() ||
-         (mark_compact_collector()->AreSweeperThreadsActivated() &&
-              page->parallel_sweeping() <=
-                  MemoryChunk::PARALLEL_SWEEPING_FINALIZE);
-}
-
-
-void Heap::AdjustLiveBytes(Address address, int by, InvocationMode mode) {
-  if (incremental_marking()->IsMarking() &&
-      Marking::IsBlack(Marking::MarkBitFrom(address))) {
-    if (mode == FROM_GC) {
-      MemoryChunk::IncrementLiveBytesFromGC(address, by);
-    } else {
-      MemoryChunk::IncrementLiveBytesFromMutator(address, by);
-    }
-  }
-}
-
-
-AllocationResult Heap::AllocateExternalArray(int length,
-                                         ExternalArrayType array_type,
-                                         void* external_pointer,
-                                         PretenureFlag pretenure) {
-  int size = ExternalArray::kAlignedSize;
-  AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
-  HeapObject* result;
-  { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
-    if (!allocation.To(&result)) return allocation;
-  }
-
-  result->set_map_no_write_barrier(
-      MapForExternalArrayType(array_type));
-  ExternalArray::cast(result)->set_length(length);
-  ExternalArray::cast(result)->set_external_pointer(external_pointer);
-  return result;
-}
-
-static void ForFixedTypedArray(ExternalArrayType array_type,
-                               int* element_size,
-                               ElementsKind* element_kind) {
-  switch (array_type) {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size)                       \
-    case kExternal##Type##Array:                                              \
-      *element_size = size;                                                   \
-      *element_kind = TYPE##_ELEMENTS;                                        \
-      return;
-
-    TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
-
-    default:
-      *element_size = 0;  // Bogus
-      *element_kind = UINT8_ELEMENTS;  // Bogus
-      UNREACHABLE();
-  }
-}
-
-
-AllocationResult Heap::AllocateFixedTypedArray(int length,
-                                               ExternalArrayType array_type,
-                                               PretenureFlag pretenure) {
-  int element_size;
-  ElementsKind elements_kind;
-  ForFixedTypedArray(array_type, &element_size, &elements_kind);
-  int size = OBJECT_POINTER_ALIGN(
-      length * element_size + FixedTypedArrayBase::kDataOffset);
-#ifndef V8_HOST_ARCH_64_BIT
-  if (array_type == kExternalFloat64Array) {
-    size += kPointerSize;
-  }
-#endif
-  AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
-
-  HeapObject* object;
-  AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
-  if (!allocation.To(&object)) return allocation;
-
-  if (array_type == kExternalFloat64Array) {
-    object = EnsureDoubleAligned(this, object, size);
-  }
-
-  object->set_map(MapForFixedTypedArray(array_type));
-  FixedTypedArrayBase* elements = FixedTypedArrayBase::cast(object);
-  elements->set_length(length);
-  memset(elements->DataPtr(), 0, elements->DataSize());
-  return elements;
-}
-
-
-AllocationResult Heap::AllocateCode(int object_size,
-                                bool immovable) {
-  ASSERT(IsAligned(static_cast<intptr_t>(object_size), kCodeAlignment));
-  AllocationResult allocation;
-  // Large code objects and code objects which should stay at a fixed address
-  // are allocated in large object space.
-  HeapObject* result;
-  bool force_lo_space = object_size > code_space()->AreaSize();
-  if (force_lo_space) {
-    allocation = lo_space_->AllocateRaw(object_size, EXECUTABLE);
-  } else {
-    allocation = AllocateRaw(object_size, CODE_SPACE, CODE_SPACE);
-  }
-  if (!allocation.To(&result)) return allocation;
-
-  if (immovable && !force_lo_space &&
-     // Objects on the first page of each space are never moved.
-     !code_space_->FirstPage()->Contains(result->address())) {
-    // Discard the first code allocation, which was on a page where it could be
-    // moved.
-    CreateFillerObjectAt(result->address(), object_size);
-    allocation = lo_space_->AllocateRaw(object_size, EXECUTABLE);
-    if (!allocation.To(&result)) return allocation;
-  }
-
-  result->set_map_no_write_barrier(code_map());
-  Code* code = Code::cast(result);
-  ASSERT(isolate_->code_range() == NULL ||
-         !isolate_->code_range()->valid() ||
-         isolate_->code_range()->contains(code->address()));
-  code->set_gc_metadata(Smi::FromInt(0));
-  code->set_ic_age(global_ic_age_);
-  return code;
-}
-
-
-AllocationResult Heap::CopyCode(Code* code) {
-  AllocationResult allocation;
-  HeapObject* new_constant_pool;
-  if (FLAG_enable_ool_constant_pool &&
-      code->constant_pool() != empty_constant_pool_array()) {
-    // Copy the constant pool, since edits to the copied code may modify
-    // the constant pool.
-    allocation = CopyConstantPoolArray(code->constant_pool());
-    if (!allocation.To(&new_constant_pool)) return allocation;
-  } else {
-    new_constant_pool = empty_constant_pool_array();
-  }
-
-  // Allocate an object the same size as the code object.
-  int obj_size = code->Size();
-  if (obj_size > code_space()->AreaSize()) {
-    allocation = lo_space_->AllocateRaw(obj_size, EXECUTABLE);
-  } else {
-    allocation = AllocateRaw(obj_size, CODE_SPACE, CODE_SPACE);
-  }
-
-  HeapObject* result;
-  if (!allocation.To(&result)) return allocation;
-
-  // Copy code object.
-  Address old_addr = code->address();
-  Address new_addr = result->address();
-  CopyBlock(new_addr, old_addr, obj_size);
-  Code* new_code = Code::cast(result);
-
-  // Update the constant pool.
-  new_code->set_constant_pool(new_constant_pool);
-
-  // Relocate the copy.
-  ASSERT(isolate_->code_range() == NULL ||
-         !isolate_->code_range()->valid() ||
-         isolate_->code_range()->contains(code->address()));
-  new_code->Relocate(new_addr - old_addr);
-  return new_code;
-}
-
-
-AllocationResult Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
-  // Allocate ByteArray and ConstantPoolArray before the Code object, so that we
-  // do not risk leaving uninitialized Code object (and breaking the heap).
-  ByteArray* reloc_info_array;
-  { AllocationResult allocation =
-        AllocateByteArray(reloc_info.length(), TENURED);
-    if (!allocation.To(&reloc_info_array)) return allocation;
-  }
-  HeapObject* new_constant_pool;
-  if (FLAG_enable_ool_constant_pool &&
-      code->constant_pool() != empty_constant_pool_array()) {
-    // Copy the constant pool, since edits to the copied code may modify
-    // the constant pool.
-    AllocationResult allocation =
-        CopyConstantPoolArray(code->constant_pool());
-    if (!allocation.To(&new_constant_pool)) return allocation;
-  } else {
-    new_constant_pool = empty_constant_pool_array();
-  }
-
-  int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
-
-  int new_obj_size = Code::SizeFor(new_body_size);
-
-  Address old_addr = code->address();
-
-  size_t relocation_offset =
-      static_cast<size_t>(code->instruction_end() - old_addr);
-
-  AllocationResult allocation;
-  if (new_obj_size > code_space()->AreaSize()) {
-    allocation = lo_space_->AllocateRaw(new_obj_size, EXECUTABLE);
-  } else {
-    allocation = AllocateRaw(new_obj_size, CODE_SPACE, CODE_SPACE);
-  }
-
-  HeapObject* result;
-  if (!allocation.To(&result)) return allocation;
-
-  // Copy code object.
-  Address new_addr = result->address();
-
-  // Copy header and instructions.
-  CopyBytes(new_addr, old_addr, relocation_offset);
-
-  Code* new_code = Code::cast(result);
-  new_code->set_relocation_info(reloc_info_array);
-
-  // Update constant pool.
-  new_code->set_constant_pool(new_constant_pool);
-
-  // Copy patched rinfo.
-  CopyBytes(new_code->relocation_start(),
-            reloc_info.start(),
-            static_cast<size_t>(reloc_info.length()));
-
-  // Relocate the copy.
-  ASSERT(isolate_->code_range() == NULL ||
-         !isolate_->code_range()->valid() ||
-         isolate_->code_range()->contains(code->address()));
-  new_code->Relocate(new_addr - old_addr);
-
-#ifdef VERIFY_HEAP
-  if (FLAG_verify_heap) code->ObjectVerify();
-#endif
-  return new_code;
-}
-
-
-void Heap::InitializeAllocationMemento(AllocationMemento* memento,
-                                       AllocationSite* allocation_site) {
-  memento->set_map_no_write_barrier(allocation_memento_map());
-  ASSERT(allocation_site->map() == allocation_site_map());
-  memento->set_allocation_site(allocation_site, SKIP_WRITE_BARRIER);
-  if (FLAG_allocation_site_pretenuring) {
-    allocation_site->IncrementMementoCreateCount();
-  }
-}
-
-
-AllocationResult Heap::Allocate(Map* map, AllocationSpace space,
-                            AllocationSite* allocation_site) {
-  ASSERT(gc_state_ == NOT_IN_GC);
-  ASSERT(map->instance_type() != MAP_TYPE);
-  // If allocation failures are disallowed, we may allocate in a different
-  // space when new space is full and the object is not a large object.
-  AllocationSpace retry_space =
-      (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
-  int size = map->instance_size();
-  if (allocation_site != NULL) {
-    size += AllocationMemento::kSize;
-  }
-  HeapObject* result;
-  AllocationResult allocation = AllocateRaw(size, space, retry_space);
-  if (!allocation.To(&result)) return allocation;
-  // No need for write barrier since object is white and map is in old space.
-  result->set_map_no_write_barrier(map);
-  if (allocation_site != NULL) {
-    AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
-        reinterpret_cast<Address>(result) + map->instance_size());
-    InitializeAllocationMemento(alloc_memento, allocation_site);
-  }
-  return result;
-}
-
-
-AllocationResult Heap::AllocateArgumentsObject(Object* callee, int length) {
-  // To get fast allocation and map sharing for arguments objects we
-  // allocate them based on an arguments boilerplate.
-
-  JSObject* boilerplate;
-  int arguments_object_size;
-  bool strict_mode_callee = callee->IsJSFunction() &&
-      JSFunction::cast(callee)->shared()->strict_mode() == STRICT;
-  if (strict_mode_callee) {
-    boilerplate =
-        isolate()->context()->native_context()->strict_arguments_boilerplate();
-    arguments_object_size = kStrictArgumentsObjectSize;
-  } else {
-    boilerplate =
-        isolate()->context()->native_context()->sloppy_arguments_boilerplate();
-    arguments_object_size = kSloppyArgumentsObjectSize;
-  }
-
-  // Check that the size of the boilerplate matches our
-  // expectations. The ArgumentsAccessStub::GenerateNewObject relies
-  // on the size being a known constant.
-  ASSERT(arguments_object_size == boilerplate->map()->instance_size());
-
-  // Do the allocation.
-  HeapObject* result;
-  { AllocationResult allocation =
-        AllocateRaw(arguments_object_size, NEW_SPACE, OLD_POINTER_SPACE);
-    if (!allocation.To(&result)) return allocation;
-  }
-
-  // Copy the content. The arguments boilerplate doesn't have any
-  // fields that point to new space so it's safe to skip the write
-  // barrier here.
-  CopyBlock(result->address(), boilerplate->address(), JSObject::kHeaderSize);
-
-  // Set the length property.
-  JSObject* js_obj = JSObject::cast(result);
-  js_obj->InObjectPropertyAtPut(
-      kArgumentsLengthIndex, Smi::FromInt(length), SKIP_WRITE_BARRIER);
-  // Set the callee property for sloppy mode arguments object only.
-  if (!strict_mode_callee) {
-    js_obj->InObjectPropertyAtPut(kArgumentsCalleeIndex, callee);
-  }
-
-  // Check the state of the object
-  ASSERT(js_obj->HasFastProperties());
-  ASSERT(js_obj->HasFastObjectElements());
-
-  return js_obj;
-}
-
-
-void Heap::InitializeJSObjectFromMap(JSObject* obj,
-                                     FixedArray* properties,
-                                     Map* map) {
-  obj->set_properties(properties);
-  obj->initialize_elements();
-  // TODO(1240798): Initialize the object's body using valid initial values
-  // according to the object's initial map.  For example, if the map's
-  // instance type is JS_ARRAY_TYPE, the length field should be initialized
-  // to a number (e.g. Smi::FromInt(0)) and the elements initialized to a
-  // fixed array (e.g. Heap::empty_fixed_array()).  Currently, the object
-  // verification code has to cope with (temporarily) invalid objects.  See
-  // for example, JSArray::JSArrayVerify).
-  Object* filler;
-  // We cannot always fill with one_pointer_filler_map because objects
-  // created from API functions expect their internal fields to be initialized
-  // with undefined_value.
-  // Pre-allocated fields need to be initialized with undefined_value as well
-  // so that object accesses before the constructor completes (e.g. in the
-  // debugger) will not cause a crash.
-  if (map->constructor()->IsJSFunction() &&
-      JSFunction::cast(map->constructor())->
-          IsInobjectSlackTrackingInProgress()) {
-    // We might want to shrink the object later.
-    ASSERT(obj->GetInternalFieldCount() == 0);
-    filler = Heap::one_pointer_filler_map();
-  } else {
-    filler = Heap::undefined_value();
-  }
-  obj->InitializeBody(map, Heap::undefined_value(), filler);
-}
-
-
-AllocationResult Heap::AllocateJSObjectFromMap(
-    Map* map,
-    PretenureFlag pretenure,
-    bool allocate_properties,
-    AllocationSite* allocation_site) {
-  // JSFunctions should be allocated using AllocateFunction to be
-  // properly initialized.
-  ASSERT(map->instance_type() != JS_FUNCTION_TYPE);
-
-  // Both types of global objects should be allocated using
-  // AllocateGlobalObject to be properly initialized.
-  ASSERT(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
-  ASSERT(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
-
-  // Allocate the backing storage for the properties.
-  FixedArray* properties;
-  if (allocate_properties) {
-    int prop_size = map->InitialPropertiesLength();
-    ASSERT(prop_size >= 0);
-    { AllocationResult allocation = AllocateFixedArray(prop_size, pretenure);
-      if (!allocation.To(&properties)) return allocation;
-    }
-  } else {
-    properties = empty_fixed_array();
-  }
-
-  // Allocate the JSObject.
-  int size = map->instance_size();
-  AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure);
-  JSObject* js_obj;
-  AllocationResult allocation = Allocate(map, space, allocation_site);
-  if (!allocation.To(&js_obj)) return allocation;
-
-  // Initialize the JSObject.
-  InitializeJSObjectFromMap(js_obj, properties, map);
-  ASSERT(js_obj->HasFastElements() ||
-         js_obj->HasExternalArrayElements() ||
-         js_obj->HasFixedTypedArrayElements());
-  return js_obj;
-}
-
-
-AllocationResult Heap::AllocateJSObject(JSFunction* constructor,
-                                        PretenureFlag pretenure,
-                                        AllocationSite* allocation_site) {
-  ASSERT(constructor->has_initial_map());
-
-  // Allocate the object based on the constructors initial map.
-  AllocationResult allocation = AllocateJSObjectFromMap(
-      constructor->initial_map(), pretenure, true, allocation_site);
-#ifdef DEBUG
-  // Make sure result is NOT a global object if valid.
-  HeapObject* obj;
-  ASSERT(!allocation.To(&obj) || !obj->IsGlobalObject());
-#endif
-  return allocation;
-}
-
-
-AllocationResult Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
-  // Never used to copy functions.  If functions need to be copied we
-  // have to be careful to clear the literals array.
-  SLOW_ASSERT(!source->IsJSFunction());
-
-  // Make the clone.
-  Map* map = source->map();
-  int object_size = map->instance_size();
-  HeapObject* clone;
-
-  ASSERT(site == NULL || AllocationSite::CanTrack(map->instance_type()));
-
-  WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
-
-  // If we're forced to always allocate, we use the general allocation
-  // functions which may leave us with an object in old space.
-  if (always_allocate()) {
-    { AllocationResult allocation =
-          AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
-      if (!allocation.To(&clone)) return allocation;
-    }
-    Address clone_address = clone->address();
-    CopyBlock(clone_address,
-              source->address(),
-              object_size);
-    // Update write barrier for all fields that lie beyond the header.
-    RecordWrites(clone_address,
-                 JSObject::kHeaderSize,
-                 (object_size - JSObject::kHeaderSize) / kPointerSize);
-  } else {
-    wb_mode = SKIP_WRITE_BARRIER;
-
-    { int adjusted_object_size = site != NULL
-          ? object_size + AllocationMemento::kSize
-          : object_size;
-    AllocationResult allocation =
-          AllocateRaw(adjusted_object_size, NEW_SPACE, NEW_SPACE);
-      if (!allocation.To(&clone)) return allocation;
-    }
-    SLOW_ASSERT(InNewSpace(clone));
-    // Since we know the clone is allocated in new space, we can copy
-    // the contents without worrying about updating the write barrier.
-    CopyBlock(clone->address(),
-              source->address(),
-              object_size);
-
-    if (site != NULL) {
-      AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
-          reinterpret_cast<Address>(clone) + object_size);
-      InitializeAllocationMemento(alloc_memento, site);
-    }
-  }
-
-  SLOW_ASSERT(
-      JSObject::cast(clone)->GetElementsKind() == source->GetElementsKind());
-  FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
-  FixedArray* properties = FixedArray::cast(source->properties());
-  // Update elements if necessary.
-  if (elements->length() > 0) {
-    FixedArrayBase* elem;
-    { AllocationResult allocation;
-      if (elements->map() == fixed_cow_array_map()) {
-        allocation = FixedArray::cast(elements);
-      } else if (source->HasFastDoubleElements()) {
-        allocation = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
-      } else {
-        allocation = CopyFixedArray(FixedArray::cast(elements));
-      }
-      if (!allocation.To(&elem)) return allocation;
-    }
-    JSObject::cast(clone)->set_elements(elem, wb_mode);
-  }
-  // Update properties if necessary.
-  if (properties->length() > 0) {
-    FixedArray* prop;
-    { AllocationResult allocation = CopyFixedArray(properties);
-      if (!allocation.To(&prop)) return allocation;
-    }
-    JSObject::cast(clone)->set_properties(prop, wb_mode);
-  }
-  // Return the new clone.
-  return clone;
-}
-
-
-static inline void WriteOneByteData(Vector<const char> vector,
-                                    uint8_t* chars,
-                                    int len) {
-  // Only works for ascii.
-  ASSERT(vector.length() == len);
-  MemCopy(chars, vector.start(), len);
-}
-
-static inline void WriteTwoByteData(Vector<const char> vector,
-                                    uint16_t* chars,
-                                    int len) {
-  const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start());
-  unsigned stream_length = vector.length();
-  while (stream_length != 0) {
-    unsigned consumed = 0;
-    uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed);
-    ASSERT(c != unibrow::Utf8::kBadChar);
-    ASSERT(consumed <= stream_length);
-    stream_length -= consumed;
-    stream += consumed;
-    if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
-      len -= 2;
-      if (len < 0) break;
-      *chars++ = unibrow::Utf16::LeadSurrogate(c);
-      *chars++ = unibrow::Utf16::TrailSurrogate(c);
-    } else {
-      len -= 1;
-      if (len < 0) break;
-      *chars++ = c;
-    }
-  }
-  ASSERT(stream_length == 0);
-  ASSERT(len == 0);
-}
-
-
-static inline void WriteOneByteData(String* s, uint8_t* chars, int len) {
-  ASSERT(s->length() == len);
-  String::WriteToFlat(s, chars, 0, len);
-}
-
-
-static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) {
-  ASSERT(s->length() == len);
-  String::WriteToFlat(s, chars, 0, len);
-}
-
-
-template<bool is_one_byte, typename T>
-AllocationResult Heap::AllocateInternalizedStringImpl(
-    T t, int chars, uint32_t hash_field) {
-  ASSERT(chars >= 0);
-  // Compute map and object size.
-  int size;
-  Map* map;
-
-  ASSERT_LE(0, chars);
-  ASSERT_GE(String::kMaxLength, chars);
-  if (is_one_byte) {
-    map = ascii_internalized_string_map();
-    size = SeqOneByteString::SizeFor(chars);
-  } else {
-    map = internalized_string_map();
-    size = SeqTwoByteString::SizeFor(chars);
-  }
-  AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, TENURED);
-
-  // Allocate string.
-  HeapObject* result;
-  { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
-    if (!allocation.To(&result)) return allocation;
-  }
-
-  result->set_map_no_write_barrier(map);
-  // Set length and hash fields of the allocated string.
-  String* answer = String::cast(result);
-  answer->set_length(chars);
-  answer->set_hash_field(hash_field);
-
-  ASSERT_EQ(size, answer->Size());
-
-  if (is_one_byte) {
-    WriteOneByteData(t, SeqOneByteString::cast(answer)->GetChars(), chars);
-  } else {
-    WriteTwoByteData(t, SeqTwoByteString::cast(answer)->GetChars(), chars);
-  }
-  return answer;
-}
-
-
-// Need explicit instantiations.
-template
-AllocationResult Heap::AllocateInternalizedStringImpl<true>(
-    String*, int, uint32_t);
-template
-AllocationResult Heap::AllocateInternalizedStringImpl<false>(
-    String*, int, uint32_t);
-template
-AllocationResult Heap::AllocateInternalizedStringImpl<false>(
-    Vector<const char>, int, uint32_t);
-
-
-AllocationResult Heap::AllocateRawOneByteString(int length,
-                                                PretenureFlag pretenure) {
-  ASSERT_LE(0, length);
-  ASSERT_GE(String::kMaxLength, length);
-  int size = SeqOneByteString::SizeFor(length);
-  ASSERT(size <= SeqOneByteString::kMaxSize);
-  AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
-
-  HeapObject* result;
-  { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
-    if (!allocation.To(&result)) return allocation;
-  }
-
-  // Partially initialize the object.
-  result->set_map_no_write_barrier(ascii_string_map());
-  String::cast(result)->set_length(length);
-  String::cast(result)->set_hash_field(String::kEmptyHashField);
-  ASSERT_EQ(size, HeapObject::cast(result)->Size());
-
-  return result;
-}
-
-
-AllocationResult Heap::AllocateRawTwoByteString(int length,
-                                                PretenureFlag pretenure) {
-  ASSERT_LE(0, length);
-  ASSERT_GE(String::kMaxLength, length);
-  int size = SeqTwoByteString::SizeFor(length);
-  ASSERT(size <= SeqTwoByteString::kMaxSize);
-  AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
-
-  HeapObject* result;
-  { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
-    if (!allocation.To(&result)) return allocation;
-  }
-
-  // Partially initialize the object.
-  result->set_map_no_write_barrier(string_map());
-  String::cast(result)->set_length(length);
-  String::cast(result)->set_hash_field(String::kEmptyHashField);
-  ASSERT_EQ(size, HeapObject::cast(result)->Size());
-  return result;
-}
-
-
-AllocationResult Heap::AllocateEmptyFixedArray() {
-  int size = FixedArray::SizeFor(0);
-  HeapObject* result;
-  { AllocationResult allocation =
-        AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
-    if (!allocation.To(&result)) return allocation;
-  }
-  // Initialize the object.
-  result->set_map_no_write_barrier(fixed_array_map());
-  FixedArray::cast(result)->set_length(0);
-  return result;
-}
-
-
-AllocationResult Heap::AllocateEmptyExternalArray(
-    ExternalArrayType array_type) {
-  return AllocateExternalArray(0, array_type, NULL, TENURED);
-}
-
-
-AllocationResult Heap::CopyAndTenureFixedCOWArray(FixedArray* src) {
-  if (!InNewSpace(src)) {
-    return src;
-  }
-
-  int len = src->length();
-  HeapObject* obj;
-  { AllocationResult allocation = AllocateRawFixedArray(len, TENURED);
-    if (!allocation.To(&obj)) return allocation;
-  }
-  obj->set_map_no_write_barrier(fixed_array_map());
-  FixedArray* result = FixedArray::cast(obj);
-  result->set_length(len);
-
-  // Copy the content
-  DisallowHeapAllocation no_gc;
-  WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
-  for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
-
-  // TODO(mvstanton): The map is set twice because of protection against calling
-  // set() on a COW FixedArray. Issue v8:3221 created to track this, and
-  // we might then be able to remove this whole method.
-  HeapObject::cast(obj)->set_map_no_write_barrier(fixed_cow_array_map());
-  return result;
-}
-
-
-AllocationResult Heap::AllocateEmptyFixedTypedArray(
-    ExternalArrayType array_type) {
-  return AllocateFixedTypedArray(0, array_type, TENURED);
-}
-
-
-AllocationResult Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
-  int len = src->length();
-  HeapObject* obj;
-  { AllocationResult allocation = AllocateRawFixedArray(len, NOT_TENURED);
-    if (!allocation.To(&obj)) return allocation;
-  }
-  if (InNewSpace(obj)) {
-    obj->set_map_no_write_barrier(map);
-    CopyBlock(obj->address() + kPointerSize,
-              src->address() + kPointerSize,
-              FixedArray::SizeFor(len) - kPointerSize);
-    return obj;
-  }
-  obj->set_map_no_write_barrier(map);
-  FixedArray* result = FixedArray::cast(obj);
-  result->set_length(len);
-
-  // Copy the content
-  DisallowHeapAllocation no_gc;
-  WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
-  for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
-  return result;
-}
-
-
-AllocationResult Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
-                                                   Map* map) {
-  int len = src->length();
-  HeapObject* obj;
-  { AllocationResult allocation = AllocateRawFixedDoubleArray(len, NOT_TENURED);
-    if (!allocation.To(&obj)) return allocation;
-  }
-  obj->set_map_no_write_barrier(map);
-  CopyBlock(
-      obj->address() + FixedDoubleArray::kLengthOffset,
-      src->address() + FixedDoubleArray::kLengthOffset,
-      FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
-  return obj;
-}
-
-
-AllocationResult Heap::CopyConstantPoolArrayWithMap(ConstantPoolArray* src,
-                                                    Map* map) {
-  HeapObject* obj;
-  if (src->is_extended_layout()) {
-    ConstantPoolArray::NumberOfEntries small(src,
-        ConstantPoolArray::SMALL_SECTION);
-    ConstantPoolArray::NumberOfEntries extended(src,
-        ConstantPoolArray::EXTENDED_SECTION);
-    AllocationResult allocation =
-        AllocateExtendedConstantPoolArray(small, extended);
-    if (!allocation.To(&obj)) return allocation;
-  } else {
-    ConstantPoolArray::NumberOfEntries small(src,
-        ConstantPoolArray::SMALL_SECTION);
-    AllocationResult allocation = AllocateConstantPoolArray(small);
-    if (!allocation.To(&obj)) return allocation;
-  }
-  obj->set_map_no_write_barrier(map);
-  CopyBlock(
-      obj->address() + ConstantPoolArray::kFirstEntryOffset,
-      src->address() + ConstantPoolArray::kFirstEntryOffset,
-      src->size() - ConstantPoolArray::kFirstEntryOffset);
-  return obj;
-}
-
-
-AllocationResult Heap::AllocateRawFixedArray(int length,
-                                             PretenureFlag pretenure) {
-  if (length < 0 || length > FixedArray::kMaxLength) {
-    v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
-  }
-  int size = FixedArray::SizeFor(length);
-  AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure);
-
-  return AllocateRaw(size, space, OLD_POINTER_SPACE);
-}
-
-
-AllocationResult Heap::AllocateFixedArrayWithFiller(int length,
-                                                    PretenureFlag pretenure,
-                                                    Object* filler) {
-  ASSERT(length >= 0);
-  ASSERT(empty_fixed_array()->IsFixedArray());
-  if (length == 0) return empty_fixed_array();
-
-  ASSERT(!InNewSpace(filler));
-  HeapObject* result;
-  { AllocationResult allocation = AllocateRawFixedArray(length, pretenure);
-    if (!allocation.To(&result)) return allocation;
-  }
-
-  result->set_map_no_write_barrier(fixed_array_map());
-  FixedArray* array = FixedArray::cast(result);
-  array->set_length(length);
-  MemsetPointer(array->data_start(), filler, length);
-  return array;
-}
-
-
-AllocationResult Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
-  return AllocateFixedArrayWithFiller(length, pretenure, undefined_value());
-}
-
-
-AllocationResult Heap::AllocateUninitializedFixedArray(int length) {
-  if (length == 0) return empty_fixed_array();
-
-  HeapObject* obj;
-  { AllocationResult allocation = AllocateRawFixedArray(length, NOT_TENURED);
-    if (!allocation.To(&obj)) return allocation;
-  }
-
-  obj->set_map_no_write_barrier(fixed_array_map());
-  FixedArray::cast(obj)->set_length(length);
-  return obj;
-}
-
-
-AllocationResult Heap::AllocateUninitializedFixedDoubleArray(
-    int length,
-    PretenureFlag pretenure) {
-  if (length == 0) return empty_fixed_array();
-
-  HeapObject* elements;
-  AllocationResult allocation = AllocateRawFixedDoubleArray(length, pretenure);
-  if (!allocation.To(&elements)) return allocation;
-
-  elements->set_map_no_write_barrier(fixed_double_array_map());
-  FixedDoubleArray::cast(elements)->set_length(length);
-  return elements;
-}
-
-
-AllocationResult Heap::AllocateRawFixedDoubleArray(int length,
-                                                   PretenureFlag pretenure) {
-  if (length < 0 || length > FixedDoubleArray::kMaxLength) {
-    v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
-  }
-  int size = FixedDoubleArray::SizeFor(length);
-#ifndef V8_HOST_ARCH_64_BIT
-  size += kPointerSize;
-#endif
-  AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
-
-  HeapObject* object;
-  { AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
-    if (!allocation.To(&object)) return allocation;
-  }
-
-  return EnsureDoubleAligned(this, object, size);
-}
-
-
-AllocationResult Heap::AllocateConstantPoolArray(
-      const ConstantPoolArray::NumberOfEntries& small) {
-  CHECK(small.are_in_range(0, ConstantPoolArray::kMaxSmallEntriesPerType));
-  int size = ConstantPoolArray::SizeFor(small);
-#ifndef V8_HOST_ARCH_64_BIT
-  size += kPointerSize;
-#endif
-  AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
-
-  HeapObject* object;
-  { AllocationResult allocation = AllocateRaw(size, space, OLD_POINTER_SPACE);
-    if (!allocation.To(&object)) return allocation;
-  }
-  object = EnsureDoubleAligned(this, object, size);
-  object->set_map_no_write_barrier(constant_pool_array_map());
-
-  ConstantPoolArray* constant_pool = ConstantPoolArray::cast(object);
-  constant_pool->Init(small);
-  constant_pool->ClearPtrEntries(isolate());
-  return constant_pool;
-}
-
-
-AllocationResult Heap::AllocateExtendedConstantPoolArray(
-    const ConstantPoolArray::NumberOfEntries& small,
-    const ConstantPoolArray::NumberOfEntries& extended) {
-  CHECK(small.are_in_range(0, ConstantPoolArray::kMaxSmallEntriesPerType));
-  CHECK(extended.are_in_range(0, kMaxInt));
-  int size = ConstantPoolArray::SizeForExtended(small, extended);
-#ifndef V8_HOST_ARCH_64_BIT
-  size += kPointerSize;
-#endif
-  AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
-
-  HeapObject* object;
-  { AllocationResult allocation = AllocateRaw(size, space, OLD_POINTER_SPACE);
-    if (!allocation.To(&object)) return allocation;
-  }
-  object = EnsureDoubleAligned(this, object, size);
-  object->set_map_no_write_barrier(constant_pool_array_map());
-
-  ConstantPoolArray* constant_pool = ConstantPoolArray::cast(object);
-  constant_pool->InitExtended(small, extended);
-  constant_pool->ClearPtrEntries(isolate());
-  return constant_pool;
-}
-
-
-AllocationResult Heap::AllocateEmptyConstantPoolArray() {
-  ConstantPoolArray::NumberOfEntries small(0, 0, 0, 0);
-  int size = ConstantPoolArray::SizeFor(small);
-  HeapObject* result;
-  { AllocationResult allocation =
-        AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
-    if (!allocation.To(&result)) return allocation;
-  }
-  result->set_map_no_write_barrier(constant_pool_array_map());
-  ConstantPoolArray::cast(result)->Init(small);
-  return result;
-}
-
-
-AllocationResult Heap::AllocateSymbol() {
-  // Statically ensure that it is safe to allocate symbols in paged spaces.
-  STATIC_ASSERT(Symbol::kSize <= Page::kMaxRegularHeapObjectSize);
-
-  HeapObject* result;
-  AllocationResult allocation =
-      AllocateRaw(Symbol::kSize, OLD_POINTER_SPACE, OLD_POINTER_SPACE);
-  if (!allocation.To(&result)) return allocation;
-
-  result->set_map_no_write_barrier(symbol_map());
-
-  // Generate a random hash value.
-  int hash;
-  int attempts = 0;
-  do {
-    hash = isolate()->random_number_generator()->NextInt() & Name::kHashBitMask;
-    attempts++;
-  } while (hash == 0 && attempts < 30);
-  if (hash == 0) hash = 1;  // never return 0
-
-  Symbol::cast(result)->set_hash_field(
-      Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
-  Symbol::cast(result)->set_name(undefined_value());
-  Symbol::cast(result)->set_flags(Smi::FromInt(0));
-
-  ASSERT(!Symbol::cast(result)->is_private());
-  return result;
-}
-
-
-AllocationResult Heap::AllocateStruct(InstanceType type) {
-  Map* map;
-  switch (type) {
-#define MAKE_CASE(NAME, Name, name) \
-    case NAME##_TYPE: map = name##_map(); break;
-STRUCT_LIST(MAKE_CASE)
-#undef MAKE_CASE
-    default:
-      UNREACHABLE();
-      return exception();
-  }
-  int size = map->instance_size();
-  AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
-  Struct* result;
-  { AllocationResult allocation = Allocate(map, space);
-    if (!allocation.To(&result)) return allocation;
-  }
-  result->InitializeBody(size);
-  return result;
-}
-
-
-bool Heap::IsHeapIterable() {
-  return (!old_pointer_space()->was_swept_conservatively() &&
-          !old_data_space()->was_swept_conservatively() &&
-          new_space_top_after_last_gc_ == new_space()->top());
-}
-
-
-void Heap::MakeHeapIterable() {
-  ASSERT(AllowHeapAllocation::IsAllowed());
-  if (!IsHeapIterable()) {
-    CollectAllGarbage(kMakeHeapIterableMask, "Heap::MakeHeapIterable");
-  }
-  ASSERT(IsHeapIterable());
-}
-
-
-void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
-  incremental_marking()->Step(step_size,
-                              IncrementalMarking::NO_GC_VIA_STACK_GUARD);
-
-  if (incremental_marking()->IsComplete()) {
-    bool uncommit = false;
-    if (gc_count_at_last_idle_gc_ == gc_count_) {
-      // No GC since the last full GC, the mutator is probably not active.
-      isolate_->compilation_cache()->Clear();
-      uncommit = true;
-    }
-    CollectAllGarbage(kReduceMemoryFootprintMask,
-                      "idle notification: finalize incremental");
-    mark_sweeps_since_idle_round_started_++;
-    gc_count_at_last_idle_gc_ = gc_count_;
-    if (uncommit) {
-      new_space_.Shrink();
-      UncommitFromSpace();
-    }
-  }
-}
-
-
-bool Heap::IdleNotification(int hint) {
-  // Hints greater than this value indicate that
-  // the embedder is requesting a lot of GC work.
-  const int kMaxHint = 1000;
-  const int kMinHintForIncrementalMarking = 10;
-  // Minimal hint that allows to do full GC.
-  const int kMinHintForFullGC = 100;
-  intptr_t size_factor = Min(Max(hint, 20), kMaxHint) / 4;
-  // The size factor is in range [5..250]. The numbers here are chosen from
-  // experiments. If you changes them, make sure to test with
-  // chrome/performance_ui_tests --gtest_filter="GeneralMixMemoryTest.*
-  intptr_t step_size =
-      size_factor * IncrementalMarking::kAllocatedThreshold;
-
-  if (contexts_disposed_ > 0) {
-    contexts_disposed_ = 0;
-    int mark_sweep_time = Min(TimeMarkSweepWouldTakeInMs(), 1000);
-    if (hint >= mark_sweep_time && !FLAG_expose_gc &&
-        incremental_marking()->IsStopped()) {
-      HistogramTimerScope scope(isolate_->counters()->gc_context());
-      CollectAllGarbage(kReduceMemoryFootprintMask,
-                        "idle notification: contexts disposed");
-    } else {
-      AdvanceIdleIncrementalMarking(step_size);
-    }
-
-    // After context disposal there is likely a lot of garbage remaining, reset
-    // the idle notification counters in order to trigger more incremental GCs
-    // on subsequent idle notifications.
-    StartIdleRound();
-    return false;
-  }
-
-  if (!FLAG_incremental_marking || isolate_->serializer_enabled()) {
-    return IdleGlobalGC();
-  }
-
-  // By doing small chunks of GC work in each IdleNotification,
-  // perform a round of incremental GCs and after that wait until
-  // the mutator creates enough garbage to justify a new round.
-  // An incremental GC progresses as follows:
-  // 1. many incremental marking steps,
-  // 2. one old space mark-sweep-compact,
-  // Use mark-sweep-compact events to count incremental GCs in a round.
-
-  if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
-    if (EnoughGarbageSinceLastIdleRound()) {
-      StartIdleRound();
-    } else {
-      return true;
-    }
-  }
-
-  int remaining_mark_sweeps = kMaxMarkSweepsInIdleRound -
-                              mark_sweeps_since_idle_round_started_;
-
-  if (incremental_marking()->IsStopped()) {
-    // If there are no more than two GCs left in this idle round and we are
-    // allowed to do a full GC, then make those GCs full in order to compact
-    // the code space.
-    // TODO(ulan): Once we enable code compaction for incremental marking,
-    // we can get rid of this special case and always start incremental marking.
-    if (remaining_mark_sweeps <= 2 && hint >= kMinHintForFullGC) {
-      CollectAllGarbage(kReduceMemoryFootprintMask,
-                        "idle notification: finalize idle round");
-      mark_sweeps_since_idle_round_started_++;
-    } else if (hint > kMinHintForIncrementalMarking) {
-      incremental_marking()->Start();
-    }
-  }
-  if (!incremental_marking()->IsStopped() &&
-      hint > kMinHintForIncrementalMarking) {
-    AdvanceIdleIncrementalMarking(step_size);
-  }
-
-  if (mark_sweeps_since_idle_round_started_ >= kMaxMarkSweepsInIdleRound) {
-    FinishIdleRound();
-    return true;
-  }
-
-  // If the IdleNotifcation is called with a large hint we will wait for
-  // the sweepter threads here.
-  if (hint >= kMinHintForFullGC &&
-      mark_compact_collector()->IsConcurrentSweepingInProgress()) {
-    mark_compact_collector()->WaitUntilSweepingCompleted();
-  }
-
-  return false;
-}
-
-
-bool Heap::IdleGlobalGC() {
-  static const int kIdlesBeforeScavenge = 4;
-  static const int kIdlesBeforeMarkSweep = 7;
-  static const int kIdlesBeforeMarkCompact = 8;
-  static const int kMaxIdleCount = kIdlesBeforeMarkCompact + 1;
-  static const unsigned int kGCsBetweenCleanup = 4;
-
-  if (!last_idle_notification_gc_count_init_) {
-    last_idle_notification_gc_count_ = gc_count_;
-    last_idle_notification_gc_count_init_ = true;
-  }
-
-  bool uncommit = true;
-  bool finished = false;
-
-  // Reset the number of idle notifications received when a number of
-  // GCs have taken place. This allows another round of cleanup based
-  // on idle notifications if enough work has been carried out to
-  // provoke a number of garbage collections.
-  if (gc_count_ - last_idle_notification_gc_count_ < kGCsBetweenCleanup) {
-    number_idle_notifications_ =
-        Min(number_idle_notifications_ + 1, kMaxIdleCount);
-  } else {
-    number_idle_notifications_ = 0;
-    last_idle_notification_gc_count_ = gc_count_;
-  }
-
-  if (number_idle_notifications_ == kIdlesBeforeScavenge) {
-    CollectGarbage(NEW_SPACE, "idle notification");
-    new_space_.Shrink();
-    last_idle_notification_gc_count_ = gc_count_;
-  } else if (number_idle_notifications_ == kIdlesBeforeMarkSweep) {
-    // Before doing the mark-sweep collections we clear the
-    // compilation cache to avoid hanging on to source code and
-    // generated code for cached functions.
-    isolate_->compilation_cache()->Clear();
-
-    CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
-    new_space_.Shrink();
-    last_idle_notification_gc_count_ = gc_count_;
-
-  } else if (number_idle_notifications_ == kIdlesBeforeMarkCompact) {
-    CollectAllGarbage(kReduceMemoryFootprintMask, "idle notification");
-    new_space_.Shrink();
-    last_idle_notification_gc_count_ = gc_count_;
-    number_idle_notifications_ = 0;
-    finished = true;
-  } else if (number_idle_notifications_ > kIdlesBeforeMarkCompact) {
-    // If we have received more than kIdlesBeforeMarkCompact idle
-    // notifications we do not perform any cleanup because we don't
-    // expect to gain much by doing so.
-    finished = true;
-  }
-
-  if (uncommit) UncommitFromSpace();
-
-  return finished;
-}
-
-
-#ifdef DEBUG
-
-void Heap::Print() {
-  if (!HasBeenSetUp()) return;
-  isolate()->PrintStack(stdout);
-  AllSpaces spaces(this);
-  for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
-    space->Print();
-  }
-}
-
-
-void Heap::ReportCodeStatistics(const char* title) {
-  PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
-  PagedSpace::ResetCodeStatistics(isolate());
-  // We do not look for code in new space, map space, or old space.  If code
-  // somehow ends up in those spaces, we would miss it here.
-  code_space_->CollectCodeStatistics();
-  lo_space_->CollectCodeStatistics();
-  PagedSpace::ReportCodeStatistics(isolate());
-}
-
-
-// This function expects that NewSpace's allocated objects histogram is
-// populated (via a call to CollectStatistics or else as a side effect of a
-// just-completed scavenge collection).
-void Heap::ReportHeapStatistics(const char* title) {
-  USE(title);
-  PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n",
-         title, gc_count_);
-  PrintF("old_generation_allocation_limit_ %" V8_PTR_PREFIX "d\n",
-         old_generation_allocation_limit_);
-
-  PrintF("\n");
-  PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles(isolate_));
-  isolate_->global_handles()->PrintStats();
-  PrintF("\n");
-
-  PrintF("Heap statistics : ");
-  isolate_->memory_allocator()->ReportStatistics();
-  PrintF("To space : ");
-  new_space_.ReportStatistics();
-  PrintF("Old pointer space : ");
-  old_pointer_space_->ReportStatistics();
-  PrintF("Old data space : ");
-  old_data_space_->ReportStatistics();
-  PrintF("Code space : ");
-  code_space_->ReportStatistics();
-  PrintF("Map space : ");
-  map_space_->ReportStatistics();
-  PrintF("Cell space : ");
-  cell_space_->ReportStatistics();
-  PrintF("PropertyCell space : ");
-  property_cell_space_->ReportStatistics();
-  PrintF("Large object space : ");
-  lo_space_->ReportStatistics();
-  PrintF(">>>>>> ========================================= >>>>>>\n");
-}
-
-#endif  // DEBUG
-
-bool Heap::Contains(HeapObject* value) {
-  return Contains(value->address());
-}
-
-
-bool Heap::Contains(Address addr) {
-  if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
-  return HasBeenSetUp() &&
-    (new_space_.ToSpaceContains(addr) ||
-     old_pointer_space_->Contains(addr) ||
-     old_data_space_->Contains(addr) ||
-     code_space_->Contains(addr) ||
-     map_space_->Contains(addr) ||
-     cell_space_->Contains(addr) ||
-     property_cell_space_->Contains(addr) ||
-     lo_space_->SlowContains(addr));
-}
-
-
-bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
-  return InSpace(value->address(), space);
-}
-
-
-bool Heap::InSpace(Address addr, AllocationSpace space) {
-  if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
-  if (!HasBeenSetUp()) return false;
-
-  switch (space) {
-    case NEW_SPACE:
-      return new_space_.ToSpaceContains(addr);
-    case OLD_POINTER_SPACE:
-      return old_pointer_space_->Contains(addr);
-    case OLD_DATA_SPACE:
-      return old_data_space_->Contains(addr);
-    case CODE_SPACE:
-      return code_space_->Contains(addr);
-    case MAP_SPACE:
-      return map_space_->Contains(addr);
-    case CELL_SPACE:
-      return cell_space_->Contains(addr);
-    case PROPERTY_CELL_SPACE:
-      return property_cell_space_->Contains(addr);
-    case LO_SPACE:
-      return lo_space_->SlowContains(addr);
-    case INVALID_SPACE:
-      break;
-  }
-  UNREACHABLE();
-  return false;
-}
-
-
-#ifdef VERIFY_HEAP
-void Heap::Verify() {
-  CHECK(HasBeenSetUp());
-  HandleScope scope(isolate());
-
-  store_buffer()->Verify();
-
-  VerifyPointersVisitor visitor;
-  IterateRoots(&visitor, VISIT_ONLY_STRONG);
-
-  VerifySmisVisitor smis_visitor;
-  IterateSmiRoots(&smis_visitor);
-
-  new_space_.Verify();
-
-  old_pointer_space_->Verify(&visitor);
-  map_space_->Verify(&visitor);
-
-  VerifyPointersVisitor no_dirty_regions_visitor;
-  old_data_space_->Verify(&no_dirty_regions_visitor);
-  code_space_->Verify(&no_dirty_regions_visitor);
-  cell_space_->Verify(&no_dirty_regions_visitor);
-  property_cell_space_->Verify(&no_dirty_regions_visitor);
-
-  lo_space_->Verify();
-}
-#endif
-
-
-void Heap::ZapFromSpace() {
-  NewSpacePageIterator it(new_space_.FromSpaceStart(),
-                          new_space_.FromSpaceEnd());
-  while (it.has_next()) {
-    NewSpacePage* page = it.next();
-    for (Address cursor = page->area_start(), limit = page->area_end();
-         cursor < limit;
-         cursor += kPointerSize) {
-      Memory::Address_at(cursor) = kFromSpaceZapValue;
-    }
-  }
-}
-
-
-void Heap::IterateAndMarkPointersToFromSpace(Address start,
-                                             Address end,
-                                             ObjectSlotCallback callback) {
-  Address slot_address = start;
-
-  // We are not collecting slots on new space objects during mutation
-  // thus we have to scan for pointers to evacuation candidates when we
-  // promote objects. But we should not record any slots in non-black
-  // objects. Grey object's slots would be rescanned.
-  // White object might not survive until the end of collection
-  // it would be a violation of the invariant to record it's slots.
-  bool record_slots = false;
-  if (incremental_marking()->IsCompacting()) {
-    MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start));
-    record_slots = Marking::IsBlack(mark_bit);
-  }
-
-  while (slot_address < end) {
-    Object** slot = reinterpret_cast<Object**>(slot_address);
-    Object* object = *slot;
-    // If the store buffer becomes overfull we mark pages as being exempt from
-    // the store buffer.  These pages are scanned to find pointers that point
-    // to the new space.  In that case we may hit newly promoted objects and
-    // fix the pointers before the promotion queue gets to them.  Thus the 'if'.
-    if (object->IsHeapObject()) {
-      if (Heap::InFromSpace(object)) {
-        callback(reinterpret_cast<HeapObject**>(slot),
-                 HeapObject::cast(object));
-        Object* new_object = *slot;
-        if (InNewSpace(new_object)) {
-          SLOW_ASSERT(Heap::InToSpace(new_object));
-          SLOW_ASSERT(new_object->IsHeapObject());
-          store_buffer_.EnterDirectlyIntoStoreBuffer(
-              reinterpret_cast<Address>(slot));
-        }
-        SLOW_ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
-      } else if (record_slots &&
-                 MarkCompactCollector::IsOnEvacuationCandidate(object)) {
-        mark_compact_collector()->RecordSlot(slot, slot, object);
-      }
-    }
-    slot_address += kPointerSize;
-  }
-}
-
-
-#ifdef DEBUG
-typedef bool (*CheckStoreBufferFilter)(Object** addr);
-
-
-bool IsAMapPointerAddress(Object** addr) {
-  uintptr_t a = reinterpret_cast<uintptr_t>(addr);
-  int mod = a % Map::kSize;
-  return mod >= Map::kPointerFieldsBeginOffset &&
-         mod < Map::kPointerFieldsEndOffset;
-}
-
-
-bool EverythingsAPointer(Object** addr) {
-  return true;
-}
-
-
-static void CheckStoreBuffer(Heap* heap,
-                             Object** current,
-                             Object** limit,
-                             Object**** store_buffer_position,
-                             Object*** store_buffer_top,
-                             CheckStoreBufferFilter filter,
-                             Address special_garbage_start,
-                             Address special_garbage_end) {
-  Map* free_space_map = heap->free_space_map();
-  for ( ; current < limit; current++) {
-    Object* o = *current;
-    Address current_address = reinterpret_cast<Address>(current);
-    // Skip free space.
-    if (o == free_space_map) {
-      Address current_address = reinterpret_cast<Address>(current);
-      FreeSpace* free_space =
-          FreeSpace::cast(HeapObject::FromAddress(current_address));
-      int skip = free_space->Size();
-      ASSERT(current_address + skip <= reinterpret_cast<Address>(limit));
-      ASSERT(skip > 0);
-      current_address += skip - kPointerSize;
-      current = reinterpret_cast<Object**>(current_address);
-      continue;
-    }
-    // Skip the current linear allocation space between top and limit which is
-    // unmarked with the free space map, but can contain junk.
-    if (current_address == special_garbage_start &&
-        special_garbage_end != special_garbage_start) {
-      current_address = special_garbage_end - kPointerSize;
-      current = reinterpret_cast<Object**>(current_address);
-      continue;
-    }
-    if (!(*filter)(current)) continue;
-    ASSERT(current_address < special_garbage_start ||
-           current_address >= special_garbage_end);
-    ASSERT(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue);
-    // We have to check that the pointer does not point into new space
-    // without trying to cast it to a heap object since the hash field of
-    // a string can contain values like 1 and 3 which are tagged null
-    // pointers.
-    if (!heap->InNewSpace(o)) continue;
-    while (**store_buffer_position < current &&
-           *store_buffer_position < store_buffer_top) {
-      (*store_buffer_position)++;
-    }
-    if (**store_buffer_position != current ||
-        *store_buffer_position == store_buffer_top) {
-      Object** obj_start = current;
-      while (!(*obj_start)->IsMap()) obj_start--;
-      UNREACHABLE();
-    }
-  }
-}
-
-
-// Check that the store buffer contains all intergenerational pointers by
-// scanning a page and ensuring that all pointers to young space are in the
-// store buffer.
-void Heap::OldPointerSpaceCheckStoreBuffer() {
-  OldSpace* space = old_pointer_space();
-  PageIterator pages(space);
-
-  store_buffer()->SortUniq();
-
-  while (pages.has_next()) {
-    Page* page = pages.next();
-    Object** current = reinterpret_cast<Object**>(page->area_start());
-
-    Address end = page->area_end();
-
-    Object*** store_buffer_position = store_buffer()->Start();
-    Object*** store_buffer_top = store_buffer()->Top();
-
-    Object** limit = reinterpret_cast<Object**>(end);
-    CheckStoreBuffer(this,
-                     current,
-                     limit,
-                     &store_buffer_position,
-                     store_buffer_top,
-                     &EverythingsAPointer,
-                     space->top(),
-                     space->limit());
-  }
-}
-
-
-void Heap::MapSpaceCheckStoreBuffer() {
-  MapSpace* space = map_space();
-  PageIterator pages(space);
-
-  store_buffer()->SortUniq();
-
-  while (pages.has_next()) {
-    Page* page = pages.next();
-    Object** current = reinterpret_cast<Object**>(page->area_start());
-
-    Address end = page->area_end();
-
-    Object*** store_buffer_position = store_buffer()->Start();
-    Object*** store_buffer_top = store_buffer()->Top();
-
-    Object** limit = reinterpret_cast<Object**>(end);
-    CheckStoreBuffer(this,
-                     current,
-                     limit,
-                     &store_buffer_position,
-                     store_buffer_top,
-                     &IsAMapPointerAddress,
-                     space->top(),
-                     space->limit());
-  }
-}
-
-
-void Heap::LargeObjectSpaceCheckStoreBuffer() {
-  LargeObjectIterator it(lo_space());
-  for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
-    // We only have code, sequential strings, or fixed arrays in large
-    // object space, and only fixed arrays can possibly contain pointers to
-    // the young generation.
-    if (object->IsFixedArray()) {
-      Object*** store_buffer_position = store_buffer()->Start();
-      Object*** store_buffer_top = store_buffer()->Top();
-      Object** current = reinterpret_cast<Object**>(object->address());
-      Object** limit =
-          reinterpret_cast<Object**>(object->address() + object->Size());
-      CheckStoreBuffer(this,
-                       current,
-                       limit,
-                       &store_buffer_position,
-                       store_buffer_top,
-                       &EverythingsAPointer,
-                       NULL,
-                       NULL);
-    }
-  }
-}
-#endif
-
-
-void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
-  IterateStrongRoots(v, mode);
-  IterateWeakRoots(v, mode);
-}
-
-
-void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
-  v->VisitPointer(reinterpret_cast<Object**>(&roots_[kStringTableRootIndex]));
-  v->Synchronize(VisitorSynchronization::kStringTable);
-  if (mode != VISIT_ALL_IN_SCAVENGE &&
-      mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
-    // Scavenge collections have special processing for this.
-    external_string_table_.Iterate(v);
-  }
-  v->Synchronize(VisitorSynchronization::kExternalStringsTable);
-}
-
-
-void Heap::IterateSmiRoots(ObjectVisitor* v) {
-  // Acquire execution access since we are going to read stack limit values.
-  ExecutionAccess access(isolate());
-  v->VisitPointers(&roots_[kSmiRootsStart], &roots_[kRootListLength]);
-  v->Synchronize(VisitorSynchronization::kSmiRootList);
-}
-
-
-void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
-  v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
-  v->Synchronize(VisitorSynchronization::kStrongRootList);
-
-  v->VisitPointer(BitCast<Object**>(&hidden_string_));
-  v->Synchronize(VisitorSynchronization::kInternalizedString);
-
-  isolate_->bootstrapper()->Iterate(v);
-  v->Synchronize(VisitorSynchronization::kBootstrapper);
-  isolate_->Iterate(v);
-  v->Synchronize(VisitorSynchronization::kTop);
-  Relocatable::Iterate(isolate_, v);
-  v->Synchronize(VisitorSynchronization::kRelocatable);
-
-  if (isolate_->deoptimizer_data() != NULL) {
-    isolate_->deoptimizer_data()->Iterate(v);
-  }
-  v->Synchronize(VisitorSynchronization::kDebug);
-  isolate_->compilation_cache()->Iterate(v);
-  v->Synchronize(VisitorSynchronization::kCompilationCache);
-
-  // Iterate over local handles in handle scopes.
-  isolate_->handle_scope_implementer()->Iterate(v);
-  isolate_->IterateDeferredHandles(v);
-  v->Synchronize(VisitorSynchronization::kHandleScope);
-
-  // Iterate over the builtin code objects and code stubs in the
-  // heap. Note that it is not necessary to iterate over code objects
-  // on scavenge collections.
-  if (mode != VISIT_ALL_IN_SCAVENGE) {
-    isolate_->builtins()->IterateBuiltins(v);
-  }
-  v->Synchronize(VisitorSynchronization::kBuiltins);
-
-  // Iterate over global handles.
-  switch (mode) {
-    case VISIT_ONLY_STRONG:
-      isolate_->global_handles()->IterateStrongRoots(v);
-      break;
-    case VISIT_ALL_IN_SCAVENGE:
-      isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
-      break;
-    case VISIT_ALL_IN_SWEEP_NEWSPACE:
-    case VISIT_ALL:
-      isolate_->global_handles()->IterateAllRoots(v);
-      break;
-  }
-  v->Synchronize(VisitorSynchronization::kGlobalHandles);
-
-  // Iterate over eternal handles.
-  if (mode == VISIT_ALL_IN_SCAVENGE) {
-    isolate_->eternal_handles()->IterateNewSpaceRoots(v);
-  } else {
-    isolate_->eternal_handles()->IterateAllRoots(v);
-  }
-  v->Synchronize(VisitorSynchronization::kEternalHandles);
-
-  // Iterate over pointers being held by inactive threads.
-  isolate_->thread_manager()->Iterate(v);
-  v->Synchronize(VisitorSynchronization::kThreadManager);
-
-  // Iterate over the pointers the Serialization/Deserialization code is
-  // holding.
-  // During garbage collection this keeps the partial snapshot cache alive.
-  // During deserialization of the startup snapshot this creates the partial
-  // snapshot cache and deserializes the objects it refers to.  During
-  // serialization this does nothing, since the partial snapshot cache is
-  // empty.  However the next thing we do is create the partial snapshot,
-  // filling up the partial snapshot cache with objects it needs as we go.
-  SerializerDeserializer::Iterate(isolate_, v);
-  // We don't do a v->Synchronize call here, because in debug mode that will
-  // output a flag to the snapshot.  However at this point the serializer and
-  // deserializer are deliberately a little unsynchronized (see above) so the
-  // checking of the sync flag in the snapshot would fail.
-}
-
-
-// TODO(1236194): Since the heap size is configurable on the command line
-// and through the API, we should gracefully handle the case that the heap
-// size is not big enough to fit all the initial objects.
-bool Heap::ConfigureHeap(int max_semi_space_size,
-                         int max_old_space_size,
-                         int max_executable_size,
-                         size_t code_range_size) {
-  if (HasBeenSetUp()) return false;
-
-  // Overwrite default configuration.
-  if (max_semi_space_size > 0) {
-    max_semi_space_size_ = max_semi_space_size * MB;
-  }
-  if (max_old_space_size > 0) {
-    max_old_generation_size_ = max_old_space_size * MB;
-  }
-  if (max_executable_size > 0) {
-    max_executable_size_ = max_executable_size * MB;
-  }
-
-  // If max space size flags are specified overwrite the configuration.
-  if (FLAG_max_semi_space_size > 0) {
-    max_semi_space_size_ = FLAG_max_semi_space_size * MB;
-  }
-  if (FLAG_max_old_space_size > 0) {
-    max_old_generation_size_ = FLAG_max_old_space_size * MB;
-  }
-  if (FLAG_max_executable_size > 0) {
-    max_executable_size_ = FLAG_max_executable_size * MB;
-  }
-
-  if (FLAG_stress_compaction) {
-    // This will cause more frequent GCs when stressing.
-    max_semi_space_size_ = Page::kPageSize;
-  }
-
-  if (Snapshot::IsEnabled()) {
-    // If we are using a snapshot we always reserve the default amount
-    // of memory for each semispace because code in the snapshot has
-    // write-barrier code that relies on the size and alignment of new
-    // space.  We therefore cannot use a larger max semispace size
-    // than the default reserved semispace size.
-    if (max_semi_space_size_ > reserved_semispace_size_) {
-      max_semi_space_size_ = reserved_semispace_size_;
-      if (FLAG_trace_gc) {
-        PrintPID("Max semi-space size cannot be more than %d kbytes\n",
-                 reserved_semispace_size_ >> 10);
-      }
-    }
-  } else {
-    // If we are not using snapshots we reserve space for the actual
-    // max semispace size.
-    reserved_semispace_size_ = max_semi_space_size_;
-  }
-
-  // The max executable size must be less than or equal to the max old
-  // generation size.
-  if (max_executable_size_ > max_old_generation_size_) {
-    max_executable_size_ = max_old_generation_size_;
-  }
-
-  // The new space size must be a power of two to support single-bit testing
-  // for containment.
-  max_semi_space_size_ = RoundUpToPowerOf2(max_semi_space_size_);
-  reserved_semispace_size_ = RoundUpToPowerOf2(reserved_semispace_size_);
-
-  if (FLAG_min_semi_space_size > 0) {
-    int initial_semispace_size = FLAG_min_semi_space_size * MB;
-    if (initial_semispace_size > max_semi_space_size_) {
-      initial_semispace_size_ = max_semi_space_size_;
-      if (FLAG_trace_gc) {
-        PrintPID("Min semi-space size cannot be more than the maximum"
-                 "semi-space size of %d MB\n", max_semi_space_size_);
-      }
-    } else {
-      initial_semispace_size_ = initial_semispace_size;
-    }
-  }
-
-  initial_semispace_size_ = Min(initial_semispace_size_, max_semi_space_size_);
-
-  // The old generation is paged and needs at least one page for each space.
-  int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
-  max_old_generation_size_ =
-      Max(static_cast<intptr_t>(paged_space_count * Page::kPageSize),
-          max_old_generation_size_);
-
-  // We rely on being able to allocate new arrays in paged spaces.
-  ASSERT(Page::kMaxRegularHeapObjectSize >=
-         (JSArray::kSize +
-          FixedArray::SizeFor(JSObject::kInitialMaxFastElementArray) +
-          AllocationMemento::kSize));
-
-  code_range_size_ = code_range_size * MB;
-
-  configured_ = true;
-  return true;
-}
-
-
-bool Heap::ConfigureHeapDefault() {
-  return ConfigureHeap(0, 0, 0, 0);
-}
-
-
-void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
-  *stats->start_marker = HeapStats::kStartMarker;
-  *stats->end_marker = HeapStats::kEndMarker;
-  *stats->new_space_size = new_space_.SizeAsInt();
-  *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
-  *stats->old_pointer_space_size = old_pointer_space_->SizeOfObjects();
-  *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
-  *stats->old_data_space_size = old_data_space_->SizeOfObjects();
-  *stats->old_data_space_capacity = old_data_space_->Capacity();
-  *stats->code_space_size = code_space_->SizeOfObjects();
-  *stats->code_space_capacity = code_space_->Capacity();
-  *stats->map_space_size = map_space_->SizeOfObjects();
-  *stats->map_space_capacity = map_space_->Capacity();
-  *stats->cell_space_size = cell_space_->SizeOfObjects();
-  *stats->cell_space_capacity = cell_space_->Capacity();
-  *stats->property_cell_space_size = property_cell_space_->SizeOfObjects();
-  *stats->property_cell_space_capacity = property_cell_space_->Capacity();
-  *stats->lo_space_size = lo_space_->Size();
-  isolate_->global_handles()->RecordStats(stats);
-  *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
-  *stats->memory_allocator_capacity =
-      isolate()->memory_allocator()->Size() +
-      isolate()->memory_allocator()->Available();
-  *stats->os_error = OS::GetLastError();
-      isolate()->memory_allocator()->Available();
-  if (take_snapshot) {
-    HeapIterator iterator(this);
-    for (HeapObject* obj = iterator.next();
-         obj != NULL;
-         obj = iterator.next()) {
-      InstanceType type = obj->map()->instance_type();
-      ASSERT(0 <= type && type <= LAST_TYPE);
-      stats->objects_per_type[type]++;
-      stats->size_per_type[type] += obj->Size();
-    }
-  }
-}
-
-
-intptr_t Heap::PromotedSpaceSizeOfObjects() {
-  return old_pointer_space_->SizeOfObjects()
-      + old_data_space_->SizeOfObjects()
-      + code_space_->SizeOfObjects()
-      + map_space_->SizeOfObjects()
-      + cell_space_->SizeOfObjects()
-      + property_cell_space_->SizeOfObjects()
-      + lo_space_->SizeOfObjects();
-}
-
-
-int64_t Heap::PromotedExternalMemorySize() {
-  if (amount_of_external_allocated_memory_
-      <= amount_of_external_allocated_memory_at_last_global_gc_) return 0;
-  return amount_of_external_allocated_memory_
-      - amount_of_external_allocated_memory_at_last_global_gc_;
-}
-
-
-intptr_t Heap::OldGenerationAllocationLimit(intptr_t old_gen_size,
-                                            int freed_global_handles) {
-  const int kMaxHandles = 1000;
-  const int kMinHandles = 100;
-  double min_factor = 1.1;
-  double max_factor = 4;
-  // We set the old generation growing factor to 2 to grow the heap slower on
-  // memory-constrained devices.
-  if (max_old_generation_size_ <= kMaxOldSpaceSizeMediumMemoryDevice) {
-    max_factor = 2;
-  }
-  // If there are many freed global handles, then the next full GC will
-  // likely collect a lot of garbage. Choose the heap growing factor
-  // depending on freed global handles.
-  // TODO(ulan, hpayer): Take into account mutator utilization.
-  double factor;
-  if (freed_global_handles <= kMinHandles) {
-    factor = max_factor;
-  } else if (freed_global_handles >= kMaxHandles) {
-    factor = min_factor;
-  } else {
-    // Compute factor using linear interpolation between points
-    // (kMinHandles, max_factor) and (kMaxHandles, min_factor).
-    factor = max_factor -
-             (freed_global_handles - kMinHandles) * (max_factor - min_factor) /
-             (kMaxHandles - kMinHandles);
-  }
-
-  if (FLAG_stress_compaction ||
-      mark_compact_collector()->reduce_memory_footprint_) {
-    factor = min_factor;
-  }
-
-  intptr_t limit = static_cast<intptr_t>(old_gen_size * factor);
-  limit = Max(limit, kMinimumOldGenerationAllocationLimit);
-  limit += new_space_.Capacity();
-  intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
-  return Min(limit, halfway_to_the_max);
-}
-
-
-void Heap::EnableInlineAllocation() {
-  if (!inline_allocation_disabled_) return;
-  inline_allocation_disabled_ = false;
-
-  // Update inline allocation limit for new space.
-  new_space()->UpdateInlineAllocationLimit(0);
-}
-
-
-void Heap::DisableInlineAllocation() {
-  if (inline_allocation_disabled_) return;
-  inline_allocation_disabled_ = true;
-
-  // Update inline allocation limit for new space.
-  new_space()->UpdateInlineAllocationLimit(0);
-
-  // Update inline allocation limit for old spaces.
-  PagedSpaces spaces(this);
-  for (PagedSpace* space = spaces.next();
-       space != NULL;
-       space = spaces.next()) {
-    space->EmptyAllocationInfo();
-  }
-}
-
-
-V8_DECLARE_ONCE(initialize_gc_once);
-
-static void InitializeGCOnce() {
-  InitializeScavengingVisitorsTables();
-  NewSpaceScavenger::Initialize();
-  MarkCompactCollector::Initialize();
-}
-
-
-bool Heap::SetUp() {
-#ifdef DEBUG
-  allocation_timeout_ = FLAG_gc_interval;
-#endif
-
-  // Initialize heap spaces and initial maps and objects. Whenever something
-  // goes wrong, just return false. The caller should check the results and
-  // call Heap::TearDown() to release allocated memory.
-  //
-  // If the heap is not yet configured (e.g. through the API), configure it.
-  // Configuration is based on the flags new-space-size (really the semispace
-  // size) and old-space-size if set or the initial values of semispace_size_
-  // and old_generation_size_ otherwise.
-  if (!configured_) {
-    if (!ConfigureHeapDefault()) return false;
-  }
-
-  base::CallOnce(&initialize_gc_once, &InitializeGCOnce);
-
-  MarkMapPointersAsEncoded(false);
-
-  // Set up memory allocator.
-  if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
-      return false;
-
-  // Set up new space.
-  if (!new_space_.SetUp(reserved_semispace_size_, max_semi_space_size_)) {
-    return false;
-  }
-  new_space_top_after_last_gc_ = new_space()->top();
-
-  // Initialize old pointer space.
-  old_pointer_space_ =
-      new OldSpace(this,
-                   max_old_generation_size_,
-                   OLD_POINTER_SPACE,
-                   NOT_EXECUTABLE);
-  if (old_pointer_space_ == NULL) return false;
-  if (!old_pointer_space_->SetUp()) return false;
-
-  // Initialize old data space.
-  old_data_space_ =
-      new OldSpace(this,
-                   max_old_generation_size_,
-                   OLD_DATA_SPACE,
-                   NOT_EXECUTABLE);
-  if (old_data_space_ == NULL) return false;
-  if (!old_data_space_->SetUp()) return false;
-
-  if (!isolate_->code_range()->SetUp(code_range_size_)) return false;
-
-  // Initialize the code space, set its maximum capacity to the old
-  // generation size. It needs executable memory.
-  code_space_ =
-      new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
-  if (code_space_ == NULL) return false;
-  if (!code_space_->SetUp()) return false;
-
-  // Initialize map space.
-  map_space_ = new MapSpace(this, max_old_generation_size_, MAP_SPACE);
-  if (map_space_ == NULL) return false;
-  if (!map_space_->SetUp()) return false;
-
-  // Initialize simple cell space.
-  cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
-  if (cell_space_ == NULL) return false;
-  if (!cell_space_->SetUp()) return false;
-
-  // Initialize global property cell space.
-  property_cell_space_ = new PropertyCellSpace(this, max_old_generation_size_,
-                                               PROPERTY_CELL_SPACE);
-  if (property_cell_space_ == NULL) return false;
-  if (!property_cell_space_->SetUp()) return false;
-
-  // The large object code space may contain code or data.  We set the memory
-  // to be non-executable here for safety, but this means we need to enable it
-  // explicitly when allocating large code objects.
-  lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE);
-  if (lo_space_ == NULL) return false;
-  if (!lo_space_->SetUp()) return false;
-
-  // Set up the seed that is used to randomize the string hash function.
-  ASSERT(hash_seed() == 0);
-  if (FLAG_randomize_hashes) {
-    if (FLAG_hash_seed == 0) {
-      int rnd = isolate()->random_number_generator()->NextInt();
-      set_hash_seed(Smi::FromInt(rnd & Name::kHashBitMask));
-    } else {
-      set_hash_seed(Smi::FromInt(FLAG_hash_seed));
-    }
-  }
-
-  LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
-  LOG(isolate_, IntPtrTEvent("heap-available", Available()));
-
-  store_buffer()->SetUp();
-
-  mark_compact_collector()->SetUp();
-
-  return true;
-}
-
-
-bool Heap::CreateHeapObjects() {
-  // Create initial maps.
-  if (!CreateInitialMaps()) return false;
-  CreateApiObjects();
-
-  // Create initial objects
-  CreateInitialObjects();
-  CHECK_EQ(0, gc_count_);
-
-  set_native_contexts_list(undefined_value());
-  set_array_buffers_list(undefined_value());
-  set_allocation_sites_list(undefined_value());
-  weak_object_to_code_table_ = undefined_value();
-  return true;
-}
-
-
-void Heap::SetStackLimits() {
-  ASSERT(isolate_ != NULL);
-  ASSERT(isolate_ == isolate());
-  // On 64 bit machines, pointers are generally out of range of Smis.  We write
-  // something that looks like an out of range Smi to the GC.
-
-  // Set up the special root array entries containing the stack limits.
-  // These are actually addresses, but the tag makes the GC ignore it.
-  roots_[kStackLimitRootIndex] =
-      reinterpret_cast<Object*>(
-          (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
-  roots_[kRealStackLimitRootIndex] =
-      reinterpret_cast<Object*>(
-          (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
-}
-
-
-void Heap::TearDown() {
-#ifdef VERIFY_HEAP
-  if (FLAG_verify_heap) {
-    Verify();
-  }
-#endif
-
-  UpdateMaximumCommitted();
-
-  if (FLAG_print_cumulative_gc_stat) {
-    PrintF("\n");
-    PrintF("gc_count=%d ", gc_count_);
-    PrintF("mark_sweep_count=%d ", ms_count_);
-    PrintF("max_gc_pause=%.1f ", get_max_gc_pause());
-    PrintF("total_gc_time=%.1f ", total_gc_time_ms_);
-    PrintF("min_in_mutator=%.1f ", get_min_in_mutator());
-    PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ",
-           get_max_alive_after_gc());
-    PrintF("total_marking_time=%.1f ", marking_time());
-    PrintF("total_sweeping_time=%.1f ", sweeping_time());
-    PrintF("\n\n");
-  }
-
-  if (FLAG_print_max_heap_committed) {
-    PrintF("\n");
-    PrintF("maximum_committed_by_heap=%" V8_PTR_PREFIX "d ",
-      MaximumCommittedMemory());
-    PrintF("maximum_committed_by_new_space=%" V8_PTR_PREFIX "d ",
-      new_space_.MaximumCommittedMemory());
-    PrintF("maximum_committed_by_old_pointer_space=%" V8_PTR_PREFIX "d ",
-      old_data_space_->MaximumCommittedMemory());
-    PrintF("maximum_committed_by_old_data_space=%" V8_PTR_PREFIX "d ",
-      old_pointer_space_->MaximumCommittedMemory());
-    PrintF("maximum_committed_by_old_data_space=%" V8_PTR_PREFIX "d ",
-      old_pointer_space_->MaximumCommittedMemory());
-    PrintF("maximum_committed_by_code_space=%" V8_PTR_PREFIX "d ",
-      code_space_->MaximumCommittedMemory());
-    PrintF("maximum_committed_by_map_space=%" V8_PTR_PREFIX "d ",
-      map_space_->MaximumCommittedMemory());
-    PrintF("maximum_committed_by_cell_space=%" V8_PTR_PREFIX "d ",
-      cell_space_->MaximumCommittedMemory());
-    PrintF("maximum_committed_by_property_space=%" V8_PTR_PREFIX "d ",
-      property_cell_space_->MaximumCommittedMemory());
-    PrintF("maximum_committed_by_lo_space=%" V8_PTR_PREFIX "d ",
-      lo_space_->MaximumCommittedMemory());
-    PrintF("\n\n");
-  }
-
-  TearDownArrayBuffers();
-
-  isolate_->global_handles()->TearDown();
-
-  external_string_table_.TearDown();
-
-  mark_compact_collector()->TearDown();
-
-  new_space_.TearDown();
-
-  if (old_pointer_space_ != NULL) {
-    old_pointer_space_->TearDown();
-    delete old_pointer_space_;
-    old_pointer_space_ = NULL;
-  }
-
-  if (old_data_space_ != NULL) {
-    old_data_space_->TearDown();
-    delete old_data_space_;
-    old_data_space_ = NULL;
-  }
-
-  if (code_space_ != NULL) {
-    code_space_->TearDown();
-    delete code_space_;
-    code_space_ = NULL;
-  }
-
-  if (map_space_ != NULL) {
-    map_space_->TearDown();
-    delete map_space_;
-    map_space_ = NULL;
-  }
-
-  if (cell_space_ != NULL) {
-    cell_space_->TearDown();
-    delete cell_space_;
-    cell_space_ = NULL;
-  }
-
-  if (property_cell_space_ != NULL) {
-    property_cell_space_->TearDown();
-    delete property_cell_space_;
-    property_cell_space_ = NULL;
-  }
-
-  if (lo_space_ != NULL) {
-    lo_space_->TearDown();
-    delete lo_space_;
-    lo_space_ = NULL;
-  }
-
-  store_buffer()->TearDown();
-  incremental_marking()->TearDown();
-
-  isolate_->memory_allocator()->TearDown();
-}
-
-
-void Heap::AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback,
-                                 GCType gc_type,
-                                 bool pass_isolate) {
-  ASSERT(callback != NULL);
-  GCPrologueCallbackPair pair(callback, gc_type, pass_isolate);
-  ASSERT(!gc_prologue_callbacks_.Contains(pair));
-  return gc_prologue_callbacks_.Add(pair);
-}
-
-
-void Heap::RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback) {
-  ASSERT(callback != NULL);
-  for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
-    if (gc_prologue_callbacks_[i].callback == callback) {
-      gc_prologue_callbacks_.Remove(i);
-      return;
-    }
-  }
-  UNREACHABLE();
-}
-
-
-void Heap::AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback,
-                                 GCType gc_type,
-                                 bool pass_isolate) {
-  ASSERT(callback != NULL);
-  GCEpilogueCallbackPair pair(callback, gc_type, pass_isolate);
-  ASSERT(!gc_epilogue_callbacks_.Contains(pair));
-  return gc_epilogue_callbacks_.Add(pair);
-}
-
-
-void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback) {
-  ASSERT(callback != NULL);
-  for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
-    if (gc_epilogue_callbacks_[i].callback == callback) {
-      gc_epilogue_callbacks_.Remove(i);
-      return;
-    }
-  }
-  UNREACHABLE();
-}
-
-
-// TODO(ishell): Find a better place for this.
-void Heap::AddWeakObjectToCodeDependency(Handle<Object> obj,
-                                         Handle<DependentCode> dep) {
-  ASSERT(!InNewSpace(*obj));
-  ASSERT(!InNewSpace(*dep));
-  // This handle scope keeps the table handle local to this function, which
-  // allows us to safely skip write barriers in table update operations.
-  HandleScope scope(isolate());
-  Handle<WeakHashTable> table(WeakHashTable::cast(weak_object_to_code_table_),
-                              isolate());
-  table = WeakHashTable::Put(table, obj, dep);
-
-  if (ShouldZapGarbage() && weak_object_to_code_table_ != *table) {
-    WeakHashTable::cast(weak_object_to_code_table_)->Zap(the_hole_value());
-  }
-  set_weak_object_to_code_table(*table);
-  ASSERT_EQ(*dep, table->Lookup(obj));
-}
-
-
-DependentCode* Heap::LookupWeakObjectToCodeDependency(Handle<Object> obj) {
-  Object* dep = WeakHashTable::cast(weak_object_to_code_table_)->Lookup(obj);
-  if (dep->IsDependentCode()) return DependentCode::cast(dep);
-  return DependentCode::cast(empty_fixed_array());
-}
-
-
-void Heap::EnsureWeakObjectToCodeTable() {
-  if (!weak_object_to_code_table()->IsHashTable()) {
-    set_weak_object_to_code_table(*WeakHashTable::New(
-        isolate(), 16, USE_DEFAULT_MINIMUM_CAPACITY, TENURED));
-  }
-}
-
-
-void Heap::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
-  v8::internal::V8::FatalProcessOutOfMemory(location, take_snapshot);
-}
-
-#ifdef DEBUG
-
-class PrintHandleVisitor: public ObjectVisitor {
- public:
-  void VisitPointers(Object** start, Object** end) {
-    for (Object** p = start; p < end; p++)
-      PrintF("  handle %p to %p\n",
-             reinterpret_cast<void*>(p),
-             reinterpret_cast<void*>(*p));
-  }
-};
-
-
-void Heap::PrintHandles() {
-  PrintF("Handles:\n");
-  PrintHandleVisitor v;
-  isolate_->handle_scope_implementer()->Iterate(&v);
-}
-
-#endif
-
-
-Space* AllSpaces::next() {
-  switch (counter_++) {
-    case NEW_SPACE:
-      return heap_->new_space();
-    case OLD_POINTER_SPACE:
-      return heap_->old_pointer_space();
-    case OLD_DATA_SPACE:
-      return heap_->old_data_space();
-    case CODE_SPACE:
-      return heap_->code_space();
-    case MAP_SPACE:
-      return heap_->map_space();
-    case CELL_SPACE:
-      return heap_->cell_space();
-    case PROPERTY_CELL_SPACE:
-      return heap_->property_cell_space();
-    case LO_SPACE:
-      return heap_->lo_space();
-    default:
-      return NULL;
-  }
-}
-
-
-PagedSpace* PagedSpaces::next() {
-  switch (counter_++) {
-    case OLD_POINTER_SPACE:
-      return heap_->old_pointer_space();
-    case OLD_DATA_SPACE:
-      return heap_->old_data_space();
-    case CODE_SPACE:
-      return heap_->code_space();
-    case MAP_SPACE:
-      return heap_->map_space();
-    case CELL_SPACE:
-      return heap_->cell_space();
-    case PROPERTY_CELL_SPACE:
-      return heap_->property_cell_space();
-    default:
-      return NULL;
-  }
-}
-
-
-
-OldSpace* OldSpaces::next() {
-  switch (counter_++) {
-    case OLD_POINTER_SPACE:
-      return heap_->old_pointer_space();
-    case OLD_DATA_SPACE:
-      return heap_->old_data_space();
-    case CODE_SPACE:
-      return heap_->code_space();
-    default:
-      return NULL;
-  }
-}
-
-
-SpaceIterator::SpaceIterator(Heap* heap)
-    : heap_(heap),
-      current_space_(FIRST_SPACE),
-      iterator_(NULL),
-      size_func_(NULL) {
-}
-
-
-SpaceIterator::SpaceIterator(Heap* heap, HeapObjectCallback size_func)
-    : heap_(heap),
-      current_space_(FIRST_SPACE),
-      iterator_(NULL),
-      size_func_(size_func) {
-}
-
-
-SpaceIterator::~SpaceIterator() {
-  // Delete active iterator if any.
-  delete iterator_;
-}
-
-
-bool SpaceIterator::has_next() {
-  // Iterate until no more spaces.
-  return current_space_ != LAST_SPACE;
-}
-
-
-ObjectIterator* SpaceIterator::next() {
-  if (iterator_ != NULL) {
-    delete iterator_;
-    iterator_ = NULL;
-    // Move to the next space
-    current_space_++;
-    if (current_space_ > LAST_SPACE) {
-      return NULL;
-    }
-  }
-
-  // Return iterator for the new current space.
-  return CreateIterator();
-}
-
-
-// Create an iterator for the space to iterate.
-ObjectIterator* SpaceIterator::CreateIterator() {
-  ASSERT(iterator_ == NULL);
-
-  switch (current_space_) {
-    case NEW_SPACE:
-      iterator_ = new SemiSpaceIterator(heap_->new_space(), size_func_);
-      break;
-    case OLD_POINTER_SPACE:
-      iterator_ =
-          new HeapObjectIterator(heap_->old_pointer_space(), size_func_);
-      break;
-    case OLD_DATA_SPACE:
-      iterator_ = new HeapObjectIterator(heap_->old_data_space(), size_func_);
-      break;
-    case CODE_SPACE:
-      iterator_ = new HeapObjectIterator(heap_->code_space(), size_func_);
-      break;
-    case MAP_SPACE:
-      iterator_ = new HeapObjectIterator(heap_->map_space(), size_func_);
-      break;
-    case CELL_SPACE:
-      iterator_ = new HeapObjectIterator(heap_->cell_space(), size_func_);
-      break;
-    case PROPERTY_CELL_SPACE:
-      iterator_ = new HeapObjectIterator(heap_->property_cell_space(),
-                                         size_func_);
-      break;
-    case LO_SPACE:
-      iterator_ = new LargeObjectIterator(heap_->lo_space(), size_func_);
-      break;
-  }
-
-  // Return the newly allocated iterator;
-  ASSERT(iterator_ != NULL);
-  return iterator_;
-}
-
-
-class HeapObjectsFilter {
- public:
-  virtual ~HeapObjectsFilter() {}
-  virtual bool SkipObject(HeapObject* object) = 0;
-};
-
-
-class UnreachableObjectsFilter : public HeapObjectsFilter {
- public:
-  explicit UnreachableObjectsFilter(Heap* heap) : heap_(heap) {
-    MarkReachableObjects();
-  }
-
-  ~UnreachableObjectsFilter() {
-    heap_->mark_compact_collector()->ClearMarkbits();
-  }
-
-  bool SkipObject(HeapObject* object) {
-    MarkBit mark_bit = Marking::MarkBitFrom(object);
-    return !mark_bit.Get();
-  }
-
- private:
-  class MarkingVisitor : public ObjectVisitor {
-   public:
-    MarkingVisitor() : marking_stack_(10) {}
-
-    void VisitPointers(Object** start, Object** end) {
-      for (Object** p = start; p < end; p++) {
-        if (!(*p)->IsHeapObject()) continue;
-        HeapObject* obj = HeapObject::cast(*p);
-        MarkBit mark_bit = Marking::MarkBitFrom(obj);
-        if (!mark_bit.Get()) {
-          mark_bit.Set();
-          marking_stack_.Add(obj);
-        }
-      }
-    }
-
-    void TransitiveClosure() {
-      while (!marking_stack_.is_empty()) {
-        HeapObject* obj = marking_stack_.RemoveLast();
-        obj->Iterate(this);
-      }
-    }
-
-   private:
-    List<HeapObject*> marking_stack_;
-  };
-
-  void MarkReachableObjects() {
-    MarkingVisitor visitor;
-    heap_->IterateRoots(&visitor, VISIT_ALL);
-    visitor.TransitiveClosure();
-  }
-
-  Heap* heap_;
-  DisallowHeapAllocation no_allocation_;
-};
-
-
-HeapIterator::HeapIterator(Heap* heap)
-    : make_heap_iterable_helper_(heap),
-      no_heap_allocation_(),
-      heap_(heap),
-      filtering_(HeapIterator::kNoFiltering),
-      filter_(NULL) {
-  Init();
-}
-
-
-HeapIterator::HeapIterator(Heap* heap,
-                           HeapIterator::HeapObjectsFiltering filtering)
-    : make_heap_iterable_helper_(heap),
-      no_heap_allocation_(),
-      heap_(heap),
-      filtering_(filtering),
-      filter_(NULL) {
-  Init();
-}
-
-
-HeapIterator::~HeapIterator() {
-  Shutdown();
-}
-
-
-void HeapIterator::Init() {
-  // Start the iteration.
-  space_iterator_ = new SpaceIterator(heap_);
-  switch (filtering_) {
-    case kFilterUnreachable:
-      filter_ = new UnreachableObjectsFilter(heap_);
-      break;
-    default:
-      break;
-  }
-  object_iterator_ = space_iterator_->next();
-}
-
-
-void HeapIterator::Shutdown() {
-#ifdef DEBUG
-  // Assert that in filtering mode we have iterated through all
-  // objects. Otherwise, heap will be left in an inconsistent state.
-  if (filtering_ != kNoFiltering) {
-    ASSERT(object_iterator_ == NULL);
-  }
-#endif
-  // Make sure the last iterator is deallocated.
-  delete space_iterator_;
-  space_iterator_ = NULL;
-  object_iterator_ = NULL;
-  delete filter_;
-  filter_ = NULL;
-}
-
-
-HeapObject* HeapIterator::next() {
-  if (filter_ == NULL) return NextObject();
-
-  HeapObject* obj = NextObject();
-  while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
-  return obj;
-}
-
-
-HeapObject* HeapIterator::NextObject() {
-  // No iterator means we are done.
-  if (object_iterator_ == NULL) return NULL;
-
-  if (HeapObject* obj = object_iterator_->next_object()) {
-    // If the current iterator has more objects we are fine.
-    return obj;
-  } else {
-    // Go though the spaces looking for one that has objects.
-    while (space_iterator_->has_next()) {
-      object_iterator_ = space_iterator_->next();
-      if (HeapObject* obj = object_iterator_->next_object()) {
-        return obj;
-      }
-    }
-  }
-  // Done with the last space.
-  object_iterator_ = NULL;
-  return NULL;
-}
-
-
-void HeapIterator::reset() {
-  // Restart the iterator.
-  Shutdown();
-  Init();
-}
-
-
-#ifdef DEBUG
-
-Object* const PathTracer::kAnyGlobalObject = NULL;
-
-class PathTracer::MarkVisitor: public ObjectVisitor {
- public:
-  explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
-  void VisitPointers(Object** start, Object** end) {
-    // Scan all HeapObject pointers in [start, end)
-    for (Object** p = start; !tracer_->found() && (p < end); p++) {
-      if ((*p)->IsHeapObject())
-        tracer_->MarkRecursively(p, this);
-    }
-  }
-
- private:
-  PathTracer* tracer_;
-};
-
-
-class PathTracer::UnmarkVisitor: public ObjectVisitor {
- public:
-  explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
-  void VisitPointers(Object** start, Object** end) {
-    // Scan all HeapObject pointers in [start, end)
-    for (Object** p = start; p < end; p++) {
-      if ((*p)->IsHeapObject())
-        tracer_->UnmarkRecursively(p, this);
-    }
-  }
-
- private:
-  PathTracer* tracer_;
-};
-
-
-void PathTracer::VisitPointers(Object** start, Object** end) {
-  bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
-  // Visit all HeapObject pointers in [start, end)
-  for (Object** p = start; !done && (p < end); p++) {
-    if ((*p)->IsHeapObject()) {
-      TracePathFrom(p);
-      done = ((what_to_find_ == FIND_FIRST) && found_target_);
-    }
-  }
-}
-
-
-void PathTracer::Reset() {
-  found_target_ = false;
-  object_stack_.Clear();
-}
-
-
-void PathTracer::TracePathFrom(Object** root) {
-  ASSERT((search_target_ == kAnyGlobalObject) ||
-         search_target_->IsHeapObject());
-  found_target_in_trace_ = false;
-  Reset();
-
-  MarkVisitor mark_visitor(this);
-  MarkRecursively(root, &mark_visitor);
-
-  UnmarkVisitor unmark_visitor(this);
-  UnmarkRecursively(root, &unmark_visitor);
-
-  ProcessResults();
-}
-
-
-static bool SafeIsNativeContext(HeapObject* obj) {
-  return obj->map() == obj->GetHeap()->raw_unchecked_native_context_map();
-}
-
-
-void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
-  if (!(*p)->IsHeapObject()) return;
-
-  HeapObject* obj = HeapObject::cast(*p);
-
-  MapWord map_word = obj->map_word();
-  if (!map_word.ToMap()->IsHeapObject()) return;  // visited before
-
-  if (found_target_in_trace_) return;  // stop if target found
-  object_stack_.Add(obj);
-  if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
-      (obj == search_target_)) {
-    found_target_in_trace_ = true;
-    found_target_ = true;
-    return;
-  }
-
-  bool is_native_context = SafeIsNativeContext(obj);
-
-  // not visited yet
-  Map* map = Map::cast(map_word.ToMap());
-
-  MapWord marked_map_word =
-      MapWord::FromRawValue(obj->map_word().ToRawValue() + kMarkTag);
-  obj->set_map_word(marked_map_word);
-
-  // Scan the object body.
-  if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
-    // This is specialized to scan Context's properly.
-    Object** start = reinterpret_cast<Object**>(obj->address() +
-                                                Context::kHeaderSize);
-    Object** end = reinterpret_cast<Object**>(obj->address() +
-        Context::kHeaderSize + Context::FIRST_WEAK_SLOT * kPointerSize);
-    mark_visitor->VisitPointers(start, end);
-  } else {
-    obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), mark_visitor);
-  }
-
-  // Scan the map after the body because the body is a lot more interesting
-  // when doing leak detection.
-  MarkRecursively(reinterpret_cast<Object**>(&map), mark_visitor);
-
-  if (!found_target_in_trace_) {  // don't pop if found the target
-    object_stack_.RemoveLast();
-  }
-}
-
-
-void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
-  if (!(*p)->IsHeapObject()) return;
-
-  HeapObject* obj = HeapObject::cast(*p);
-
-  MapWord map_word = obj->map_word();
-  if (map_word.ToMap()->IsHeapObject()) return;  // unmarked already
-
-  MapWord unmarked_map_word =
-      MapWord::FromRawValue(map_word.ToRawValue() - kMarkTag);
-  obj->set_map_word(unmarked_map_word);
-
-  Map* map = Map::cast(unmarked_map_word.ToMap());
-
-  UnmarkRecursively(reinterpret_cast<Object**>(&map), unmark_visitor);
-
-  obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), unmark_visitor);
-}
-
-
-void PathTracer::ProcessResults() {
-  if (found_target_) {
-    PrintF("=====================================\n");
-    PrintF("====        Path to object       ====\n");
-    PrintF("=====================================\n\n");
-
-    ASSERT(!object_stack_.is_empty());
-    for (int i = 0; i < object_stack_.length(); i++) {
-      if (i > 0) PrintF("\n     |\n     |\n     V\n\n");
-      Object* obj = object_stack_[i];
-      obj->Print();
-    }
-    PrintF("=====================================\n");
-  }
-}
-
-
-// Triggers a depth-first traversal of reachable objects from one
-// given root object and finds a path to a specific heap object and
-// prints it.
-void Heap::TracePathToObjectFrom(Object* target, Object* root) {
-  PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
-  tracer.VisitPointer(&root);
-}
-
-
-// Triggers a depth-first traversal of reachable objects from roots
-// and finds a path to a specific heap object and prints it.
-void Heap::TracePathToObject(Object* target) {
-  PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
-  IterateRoots(&tracer, VISIT_ONLY_STRONG);
-}
-
-
-// Triggers a depth-first traversal of reachable objects from roots
-// and finds a path to any global object and prints it. Useful for
-// determining the source for leaks of global objects.
-void Heap::TracePathToGlobal() {
-  PathTracer tracer(PathTracer::kAnyGlobalObject,
-                    PathTracer::FIND_ALL,
-                    VISIT_ALL);
-  IterateRoots(&tracer, VISIT_ONLY_STRONG);
-}
-#endif
-
-
-static intptr_t CountTotalHolesSize(Heap* heap) {
-  intptr_t holes_size = 0;
-  OldSpaces spaces(heap);
-  for (OldSpace* space = spaces.next();
-       space != NULL;
-       space = spaces.next()) {
-    holes_size += space->Waste() + space->Available();
-  }
-  return holes_size;
-}
-
-
-GCTracer::GCTracer(Heap* heap,
-                   const char* gc_reason,
-                   const char* collector_reason)
-    : start_time_(0.0),
-      start_object_size_(0),
-      start_memory_size_(0),
-      gc_count_(0),
-      full_gc_count_(0),
-      allocated_since_last_gc_(0),
-      spent_in_mutator_(0),
-      nodes_died_in_new_space_(0),
-      nodes_copied_in_new_space_(0),
-      nodes_promoted_(0),
-      heap_(heap),
-      gc_reason_(gc_reason),
-      collector_reason_(collector_reason) {
-  if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
-  start_time_ = OS::TimeCurrentMillis();
-  start_object_size_ = heap_->SizeOfObjects();
-  start_memory_size_ = heap_->isolate()->memory_allocator()->Size();
-
-  for (int i = 0; i < Scope::kNumberOfScopes; i++) {
-    scopes_[i] = 0;
-  }
-
-  in_free_list_or_wasted_before_gc_ = CountTotalHolesSize(heap);
-
-  allocated_since_last_gc_ =
-      heap_->SizeOfObjects() - heap_->alive_after_last_gc_;
-
-  if (heap_->last_gc_end_timestamp_ > 0) {
-    spent_in_mutator_ = Max(start_time_ - heap_->last_gc_end_timestamp_, 0.0);
-  }
-
-  steps_count_ = heap_->incremental_marking()->steps_count();
-  steps_took_ = heap_->incremental_marking()->steps_took();
-  longest_step_ = heap_->incremental_marking()->longest_step();
-  steps_count_since_last_gc_ =
-      heap_->incremental_marking()->steps_count_since_last_gc();
-  steps_took_since_last_gc_ =
-      heap_->incremental_marking()->steps_took_since_last_gc();
-}
-
-
-GCTracer::~GCTracer() {
-  // Printf ONE line iff flag is set.
-  if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
-
-  bool first_gc = (heap_->last_gc_end_timestamp_ == 0);
-
-  heap_->alive_after_last_gc_ = heap_->SizeOfObjects();
-  heap_->last_gc_end_timestamp_ = OS::TimeCurrentMillis();
-
-  double time = heap_->last_gc_end_timestamp_ - start_time_;
-
-  // Update cumulative GC statistics if required.
-  if (FLAG_print_cumulative_gc_stat) {
-    heap_->total_gc_time_ms_ += time;
-    heap_->max_gc_pause_ = Max(heap_->max_gc_pause_, time);
-    heap_->max_alive_after_gc_ = Max(heap_->max_alive_after_gc_,
-                                     heap_->alive_after_last_gc_);
-    if (!first_gc) {
-      heap_->min_in_mutator_ = Min(heap_->min_in_mutator_,
-                                   spent_in_mutator_);
-    }
-  } else if (FLAG_trace_gc_verbose) {
-    heap_->total_gc_time_ms_ += time;
-  }
-
-  if (collector_ == SCAVENGER && FLAG_trace_gc_ignore_scavenger) return;
-
-  heap_->AddMarkingTime(scopes_[Scope::MC_MARK]);
-
-  if (FLAG_print_cumulative_gc_stat && !FLAG_trace_gc) return;
-  PrintPID("%8.0f ms: ", heap_->isolate()->time_millis_since_init());
-
-  if (!FLAG_trace_gc_nvp) {
-    int external_time = static_cast<int>(scopes_[Scope::EXTERNAL]);
-
-    double end_memory_size_mb =
-        static_cast<double>(heap_->isolate()->memory_allocator()->Size()) / MB;
-
-    PrintF("%s %.1f (%.1f) -> %.1f (%.1f) MB, ",
-           CollectorString(),
-           static_cast<double>(start_object_size_) / MB,
-           static_cast<double>(start_memory_size_) / MB,
-           SizeOfHeapObjects(),
-           end_memory_size_mb);
-
-    if (external_time > 0) PrintF("%d / ", external_time);
-    PrintF("%.1f ms", time);
-    if (steps_count_ > 0) {
-      if (collector_ == SCAVENGER) {
-        PrintF(" (+ %.1f ms in %d steps since last GC)",
-               steps_took_since_last_gc_,
-               steps_count_since_last_gc_);
-      } else {
-        PrintF(" (+ %.1f ms in %d steps since start of marking, "
-                   "biggest step %.1f ms)",
-               steps_took_,
-               steps_count_,
-               longest_step_);
-      }
-    }
-
-    if (gc_reason_ != NULL) {
-      PrintF(" [%s]", gc_reason_);
-    }
-
-    if (collector_reason_ != NULL) {
-      PrintF(" [%s]", collector_reason_);
-    }
-
-    PrintF(".\n");
-  } else {
-    PrintF("pause=%.1f ", time);
-    PrintF("mutator=%.1f ", spent_in_mutator_);
-    PrintF("gc=");
-    switch (collector_) {
-      case SCAVENGER:
-        PrintF("s");
-        break;
-      case MARK_COMPACTOR:
-        PrintF("ms");
-        break;
-      default:
-        UNREACHABLE();
-    }
-    PrintF(" ");
-
-    PrintF("external=%.1f ", scopes_[Scope::EXTERNAL]);
-    PrintF("mark=%.1f ", scopes_[Scope::MC_MARK]);
-    PrintF("sweep=%.2f ", scopes_[Scope::MC_SWEEP]);
-    PrintF("sweepns=%.2f ", scopes_[Scope::MC_SWEEP_NEWSPACE]);
-    PrintF("sweepos=%.2f ", scopes_[Scope::MC_SWEEP_OLDSPACE]);
-    PrintF("evacuate=%.1f ", scopes_[Scope::MC_EVACUATE_PAGES]);
-    PrintF("new_new=%.1f ", scopes_[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS]);
-    PrintF("root_new=%.1f ", scopes_[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS]);
-    PrintF("old_new=%.1f ", scopes_[Scope::MC_UPDATE_OLD_TO_NEW_POINTERS]);
-    PrintF("compaction_ptrs=%.1f ",
-        scopes_[Scope::MC_UPDATE_POINTERS_TO_EVACUATED]);
-    PrintF("intracompaction_ptrs=%.1f ",
-        scopes_[Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED]);
-    PrintF("misc_compaction=%.1f ", scopes_[Scope::MC_UPDATE_MISC_POINTERS]);
-    PrintF("weakcollection_process=%.1f ",
-        scopes_[Scope::MC_WEAKCOLLECTION_PROCESS]);
-    PrintF("weakcollection_clear=%.1f ",
-        scopes_[Scope::MC_WEAKCOLLECTION_CLEAR]);
-
-    PrintF("total_size_before=%" V8_PTR_PREFIX "d ", start_object_size_);
-    PrintF("total_size_after=%" V8_PTR_PREFIX "d ", heap_->SizeOfObjects());
-    PrintF("holes_size_before=%" V8_PTR_PREFIX "d ",
-           in_free_list_or_wasted_before_gc_);
-    PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", CountTotalHolesSize(heap_));
-
-    PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc_);
-    PrintF("promoted=%" V8_PTR_PREFIX "d ", heap_->promoted_objects_size_);
-    PrintF("semi_space_copied=%" V8_PTR_PREFIX "d ",
-        heap_->semi_space_copied_object_size_);
-    PrintF("nodes_died_in_new=%d ", nodes_died_in_new_space_);
-    PrintF("nodes_copied_in_new=%d ", nodes_copied_in_new_space_);
-    PrintF("nodes_promoted=%d ", nodes_promoted_);
-    PrintF("promotion_rate=%.1f%% ", heap_->promotion_rate_);
-    PrintF("semi_space_copy_rate=%.1f%% ", heap_->semi_space_copied_rate_);
-
-    if (collector_ == SCAVENGER) {
-      PrintF("stepscount=%d ", steps_count_since_last_gc_);
-      PrintF("stepstook=%.1f ", steps_took_since_last_gc_);
-    } else {
-      PrintF("stepscount=%d ", steps_count_);
-      PrintF("stepstook=%.1f ", steps_took_);
-      PrintF("longeststep=%.1f ", longest_step_);
-    }
-
-    PrintF("\n");
-  }
-
-  heap_->PrintShortHeapStatistics();
-}
-
-
-const char* GCTracer::CollectorString() {
-  switch (collector_) {
-    case SCAVENGER:
-      return "Scavenge";
-    case MARK_COMPACTOR:
-      return "Mark-sweep";
-  }
-  return "Unknown GC";
-}
-
-
-int KeyedLookupCache::Hash(Handle<Map> map, Handle<Name> name) {
-  DisallowHeapAllocation no_gc;
-  // Uses only lower 32 bits if pointers are larger.
-  uintptr_t addr_hash =
-      static_cast<uint32_t>(reinterpret_cast<uintptr_t>(*map)) >> kMapHashShift;
-  return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
-}
-
-
-int KeyedLookupCache::Lookup(Handle<Map> map, Handle<Name> name) {
-  DisallowHeapAllocation no_gc;
-  int index = (Hash(map, name) & kHashMask);
-  for (int i = 0; i < kEntriesPerBucket; i++) {
-    Key& key = keys_[index + i];
-    if ((key.map == *map) && key.name->Equals(*name)) {
-      return field_offsets_[index + i];
-    }
-  }
-  return kNotFound;
-}
-
-
-void KeyedLookupCache::Update(Handle<Map> map,
-                              Handle<Name> name,
-                              int field_offset) {
-  DisallowHeapAllocation no_gc;
-  if (!name->IsUniqueName()) {
-    if (!StringTable::InternalizeStringIfExists(name->GetIsolate(),
-                                                Handle<String>::cast(name)).
-        ToHandle(&name)) {
-      return;
-    }
-  }
-  // This cache is cleared only between mark compact passes, so we expect the
-  // cache to only contain old space names.
-  ASSERT(!map->GetIsolate()->heap()->InNewSpace(*name));
-
-  int index = (Hash(map, name) & kHashMask);
-  // After a GC there will be free slots, so we use them in order (this may
-  // help to get the most frequently used one in position 0).
-  for (int i = 0; i< kEntriesPerBucket; i++) {
-    Key& key = keys_[index];
-    Object* free_entry_indicator = NULL;
-    if (key.map == free_entry_indicator) {
-      key.map = *map;
-      key.name = *name;
-      field_offsets_[index + i] = field_offset;
-      return;
-    }
-  }
-  // No free entry found in this bucket, so we move them all down one and
-  // put the new entry at position zero.
-  for (int i = kEntriesPerBucket - 1; i > 0; i--) {
-    Key& key = keys_[index + i];
-    Key& key2 = keys_[index + i - 1];
-    key = key2;
-    field_offsets_[index + i] = field_offsets_[index + i - 1];
-  }
-
-  // Write the new first entry.
-  Key& key = keys_[index];
-  key.map = *map;
-  key.name = *name;
-  field_offsets_[index] = field_offset;
-}
-
-
-void KeyedLookupCache::Clear() {
-  for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
-}
-
-
-void DescriptorLookupCache::Clear() {
-  for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
-}
-
-
-void ExternalStringTable::CleanUp() {
-  int last = 0;
-  for (int i = 0; i < new_space_strings_.length(); ++i) {
-    if (new_space_strings_[i] == heap_->the_hole_value()) {
-      continue;
-    }
-    ASSERT(new_space_strings_[i]->IsExternalString());
-    if (heap_->InNewSpace(new_space_strings_[i])) {
-      new_space_strings_[last++] = new_space_strings_[i];
-    } else {
-      old_space_strings_.Add(new_space_strings_[i]);
-    }
-  }
-  new_space_strings_.Rewind(last);
-  new_space_strings_.Trim();
-
-  last = 0;
-  for (int i = 0; i < old_space_strings_.length(); ++i) {
-    if (old_space_strings_[i] == heap_->the_hole_value()) {
-      continue;
-    }
-    ASSERT(old_space_strings_[i]->IsExternalString());
-    ASSERT(!heap_->InNewSpace(old_space_strings_[i]));
-    old_space_strings_[last++] = old_space_strings_[i];
-  }
-  old_space_strings_.Rewind(last);
-  old_space_strings_.Trim();
-#ifdef VERIFY_HEAP
-  if (FLAG_verify_heap) {
-    Verify();
-  }
-#endif
-}
-
-
-void ExternalStringTable::TearDown() {
-  for (int i = 0; i < new_space_strings_.length(); ++i) {
-    heap_->FinalizeExternalString(ExternalString::cast(new_space_strings_[i]));
-  }
-  new_space_strings_.Free();
-  for (int i = 0; i < old_space_strings_.length(); ++i) {
-    heap_->FinalizeExternalString(ExternalString::cast(old_space_strings_[i]));
-  }
-  old_space_strings_.Free();
-}
-
-
-void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
-  chunk->set_next_chunk(chunks_queued_for_free_);
-  chunks_queued_for_free_ = chunk;
-}
-
-
-void Heap::FreeQueuedChunks() {
-  if (chunks_queued_for_free_ == NULL) return;
-  MemoryChunk* next;
-  MemoryChunk* chunk;
-  for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
-    next = chunk->next_chunk();
-    chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
-
-    if (chunk->owner()->identity() == LO_SPACE) {
-      // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
-      // If FromAnyPointerAddress encounters a slot that belongs to a large
-      // chunk queued for deletion it will fail to find the chunk because
-      // it try to perform a search in the list of pages owned by of the large
-      // object space and queued chunks were detached from that list.
-      // To work around this we split large chunk into normal kPageSize aligned
-      // pieces and initialize size, owner and flags field of every piece.
-      // If FromAnyPointerAddress encounters a slot that belongs to one of
-      // these smaller pieces it will treat it as a slot on a normal Page.
-      Address chunk_end = chunk->address() + chunk->size();
-      MemoryChunk* inner = MemoryChunk::FromAddress(
-          chunk->address() + Page::kPageSize);
-      MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1);
-      while (inner <= inner_last) {
-        // Size of a large chunk is always a multiple of
-        // OS::AllocateAlignment() so there is always
-        // enough space for a fake MemoryChunk header.
-        Address area_end = Min(inner->address() + Page::kPageSize, chunk_end);
-        // Guard against overflow.
-        if (area_end < inner->address()) area_end = chunk_end;
-        inner->SetArea(inner->address(), area_end);
-        inner->set_size(Page::kPageSize);
-        inner->set_owner(lo_space());
-        inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
-        inner = MemoryChunk::FromAddress(
-            inner->address() + Page::kPageSize);
-      }
-    }
-  }
-  isolate_->heap()->store_buffer()->Compact();
-  isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
-  for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
-    next = chunk->next_chunk();
-    isolate_->memory_allocator()->Free(chunk);
-  }
-  chunks_queued_for_free_ = NULL;
-}
-
-
-void Heap::RememberUnmappedPage(Address page, bool compacted) {
-  uintptr_t p = reinterpret_cast<uintptr_t>(page);
-  // Tag the page pointer to make it findable in the dump file.
-  if (compacted) {
-    p ^= 0xc1ead & (Page::kPageSize - 1);  // Cleared.
-  } else {
-    p ^= 0x1d1ed & (Page::kPageSize - 1);  // I died.
-  }
-  remembered_unmapped_pages_[remembered_unmapped_pages_index_] =
-      reinterpret_cast<Address>(p);
-  remembered_unmapped_pages_index_++;
-  remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
-}
-
-
-void Heap::ClearObjectStats(bool clear_last_time_stats) {
-  memset(object_counts_, 0, sizeof(object_counts_));
-  memset(object_sizes_, 0, sizeof(object_sizes_));
-  if (clear_last_time_stats) {
-    memset(object_counts_last_time_, 0, sizeof(object_counts_last_time_));
-    memset(object_sizes_last_time_, 0, sizeof(object_sizes_last_time_));
-  }
-}
-
-
-static LazyMutex checkpoint_object_stats_mutex = LAZY_MUTEX_INITIALIZER;
-
-
-void Heap::CheckpointObjectStats() {
-  LockGuard<Mutex> lock_guard(checkpoint_object_stats_mutex.Pointer());
-  Counters* counters = isolate()->counters();
-#define ADJUST_LAST_TIME_OBJECT_COUNT(name)                                    \
-  counters->count_of_##name()->Increment(                                      \
-      static_cast<int>(object_counts_[name]));                                 \
-  counters->count_of_##name()->Decrement(                                      \
-      static_cast<int>(object_counts_last_time_[name]));                       \
-  counters->size_of_##name()->Increment(                                       \
-      static_cast<int>(object_sizes_[name]));                                  \
-  counters->size_of_##name()->Decrement(                                       \
-      static_cast<int>(object_sizes_last_time_[name]));
-  INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
-#undef ADJUST_LAST_TIME_OBJECT_COUNT
-  int index;
-#define ADJUST_LAST_TIME_OBJECT_COUNT(name)               \
-  index = FIRST_CODE_KIND_SUB_TYPE + Code::name;          \
-  counters->count_of_CODE_TYPE_##name()->Increment(       \
-      static_cast<int>(object_counts_[index]));           \
-  counters->count_of_CODE_TYPE_##name()->Decrement(       \
-      static_cast<int>(object_counts_last_time_[index])); \
-  counters->size_of_CODE_TYPE_##name()->Increment(        \
-      static_cast<int>(object_sizes_[index]));            \
-  counters->size_of_CODE_TYPE_##name()->Decrement(        \
-      static_cast<int>(object_sizes_last_time_[index]));
-  CODE_KIND_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
-#undef ADJUST_LAST_TIME_OBJECT_COUNT
-#define ADJUST_LAST_TIME_OBJECT_COUNT(name)               \
-  index = FIRST_FIXED_ARRAY_SUB_TYPE + name;              \
-  counters->count_of_FIXED_ARRAY_##name()->Increment(     \
-      static_cast<int>(object_counts_[index]));           \
-  counters->count_of_FIXED_ARRAY_##name()->Decrement(     \
-      static_cast<int>(object_counts_last_time_[index])); \
-  counters->size_of_FIXED_ARRAY_##name()->Increment(      \
-      static_cast<int>(object_sizes_[index]));            \
-  counters->size_of_FIXED_ARRAY_##name()->Decrement(      \
-      static_cast<int>(object_sizes_last_time_[index]));
-  FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
-#undef ADJUST_LAST_TIME_OBJECT_COUNT
-#define ADJUST_LAST_TIME_OBJECT_COUNT(name)                                   \
-  index =                                                                     \
-      FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - Code::kFirstCodeAge; \
-  counters->count_of_CODE_AGE_##name()->Increment(                            \
-      static_cast<int>(object_counts_[index]));                               \
-  counters->count_of_CODE_AGE_##name()->Decrement(                            \
-      static_cast<int>(object_counts_last_time_[index]));                     \
-  counters->size_of_CODE_AGE_##name()->Increment(                             \
-      static_cast<int>(object_sizes_[index]));                                \
-  counters->size_of_CODE_AGE_##name()->Decrement(                             \
-      static_cast<int>(object_sizes_last_time_[index]));
-  CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT)
-#undef ADJUST_LAST_TIME_OBJECT_COUNT
-
-  MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
-  MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
-  ClearObjectStats();
-}
-
-} }  // namespace v8::internal
diff --git a/src/heap.h b/src/heap.h
deleted file mode 100644
index d05e350..0000000
--- a/src/heap.h
+++ /dev/null
@@ -1,2754 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_HEAP_H_
-#define V8_HEAP_H_
-
-#include <cmath>
-
-#include "src/allocation.h"
-#include "src/assert-scope.h"
-#include "src/counters.h"
-#include "src/globals.h"
-#include "src/incremental-marking.h"
-#include "src/list.h"
-#include "src/mark-compact.h"
-#include "src/objects-visiting.h"
-#include "src/spaces.h"
-#include "src/splay-tree-inl.h"
-#include "src/store-buffer.h"
-
-namespace v8 {
-namespace internal {
-
-// Defines all the roots in Heap.
-#define STRONG_ROOT_LIST(V)                                                    \
-  V(Map, byte_array_map, ByteArrayMap)                                         \
-  V(Map, free_space_map, FreeSpaceMap)                                         \
-  V(Map, one_pointer_filler_map, OnePointerFillerMap)                          \
-  V(Map, two_pointer_filler_map, TwoPointerFillerMap)                          \
-  /* Cluster the most popular ones in a few cache lines here at the top.    */ \
-  V(Smi, store_buffer_top, StoreBufferTop)                                     \
-  V(Oddball, undefined_value, UndefinedValue)                                  \
-  V(Oddball, the_hole_value, TheHoleValue)                                     \
-  V(Oddball, null_value, NullValue)                                            \
-  V(Oddball, true_value, TrueValue)                                            \
-  V(Oddball, false_value, FalseValue)                                          \
-  V(Oddball, uninitialized_value, UninitializedValue)                          \
-  V(Oddball, exception, Exception)                                             \
-  V(Map, cell_map, CellMap)                                                    \
-  V(Map, global_property_cell_map, GlobalPropertyCellMap)                      \
-  V(Map, shared_function_info_map, SharedFunctionInfoMap)                      \
-  V(Map, meta_map, MetaMap)                                                    \
-  V(Map, heap_number_map, HeapNumberMap)                                       \
-  V(Map, native_context_map, NativeContextMap)                                 \
-  V(Map, fixed_array_map, FixedArrayMap)                                       \
-  V(Map, code_map, CodeMap)                                                    \
-  V(Map, scope_info_map, ScopeInfoMap)                                         \
-  V(Map, fixed_cow_array_map, FixedCOWArrayMap)                                \
-  V(Map, fixed_double_array_map, FixedDoubleArrayMap)                          \
-  V(Map, constant_pool_array_map, ConstantPoolArrayMap)                        \
-  V(Oddball, no_interceptor_result_sentinel, NoInterceptorResultSentinel)      \
-  V(Map, hash_table_map, HashTableMap)                                         \
-  V(Map, ordered_hash_table_map, OrderedHashTableMap)                          \
-  V(FixedArray, empty_fixed_array, EmptyFixedArray)                            \
-  V(ByteArray, empty_byte_array, EmptyByteArray)                               \
-  V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray)             \
-  V(ConstantPoolArray, empty_constant_pool_array, EmptyConstantPoolArray)      \
-  V(Oddball, arguments_marker, ArgumentsMarker)                                \
-  /* The roots above this line should be boring from a GC point of view.    */ \
-  /* This means they are never in new space and never on a page that is     */ \
-  /* being compacted.                                                       */ \
-  V(FixedArray, number_string_cache, NumberStringCache)                        \
-  V(Object, instanceof_cache_function, InstanceofCacheFunction)                \
-  V(Object, instanceof_cache_map, InstanceofCacheMap)                          \
-  V(Object, instanceof_cache_answer, InstanceofCacheAnswer)                    \
-  V(FixedArray, single_character_string_cache, SingleCharacterStringCache)     \
-  V(FixedArray, string_split_cache, StringSplitCache)                          \
-  V(FixedArray, regexp_multiple_cache, RegExpMultipleCache)                    \
-  V(Oddball, termination_exception, TerminationException)                      \
-  V(Smi, hash_seed, HashSeed)                                                  \
-  V(Map, symbol_map, SymbolMap)                                                \
-  V(Map, string_map, StringMap)                                                \
-  V(Map, ascii_string_map, AsciiStringMap)                                     \
-  V(Map, cons_string_map, ConsStringMap)                                       \
-  V(Map, cons_ascii_string_map, ConsAsciiStringMap)                            \
-  V(Map, sliced_string_map, SlicedStringMap)                                   \
-  V(Map, sliced_ascii_string_map, SlicedAsciiStringMap)                        \
-  V(Map, external_string_map, ExternalStringMap)                               \
-  V(Map,                                                                       \
-    external_string_with_one_byte_data_map,                                    \
-    ExternalStringWithOneByteDataMap)                                          \
-  V(Map, external_ascii_string_map, ExternalAsciiStringMap)                    \
-  V(Map, short_external_string_map, ShortExternalStringMap)                    \
-  V(Map,                                                                       \
-    short_external_string_with_one_byte_data_map,                              \
-    ShortExternalStringWithOneByteDataMap)                                     \
-  V(Map, internalized_string_map, InternalizedStringMap)                       \
-  V(Map, ascii_internalized_string_map, AsciiInternalizedStringMap)            \
-  V(Map,                                                                       \
-    external_internalized_string_map,                                          \
-    ExternalInternalizedStringMap)                                             \
-  V(Map,                                                                       \
-    external_internalized_string_with_one_byte_data_map,                       \
-    ExternalInternalizedStringWithOneByteDataMap)                              \
-  V(Map,                                                                       \
-    external_ascii_internalized_string_map,                                    \
-    ExternalAsciiInternalizedStringMap)                                        \
-  V(Map,                                                                       \
-    short_external_internalized_string_map,                                    \
-    ShortExternalInternalizedStringMap)                                        \
-  V(Map,                                                                       \
-    short_external_internalized_string_with_one_byte_data_map,                 \
-    ShortExternalInternalizedStringWithOneByteDataMap)                         \
-  V(Map,                                                                       \
-    short_external_ascii_internalized_string_map,                              \
-    ShortExternalAsciiInternalizedStringMap)                                   \
-  V(Map, short_external_ascii_string_map, ShortExternalAsciiStringMap)         \
-  V(Map, undetectable_string_map, UndetectableStringMap)                       \
-  V(Map, undetectable_ascii_string_map, UndetectableAsciiStringMap)            \
-  V(Map, external_int8_array_map, ExternalInt8ArrayMap)                        \
-  V(Map, external_uint8_array_map, ExternalUint8ArrayMap)                      \
-  V(Map, external_int16_array_map, ExternalInt16ArrayMap)                      \
-  V(Map, external_uint16_array_map, ExternalUint16ArrayMap)                    \
-  V(Map, external_int32_array_map, ExternalInt32ArrayMap)                      \
-  V(Map, external_uint32_array_map, ExternalUint32ArrayMap)                    \
-  V(Map, external_float32_array_map, ExternalFloat32ArrayMap)                  \
-  V(Map, external_float64_array_map, ExternalFloat64ArrayMap)                  \
-  V(Map, external_uint8_clamped_array_map, ExternalUint8ClampedArrayMap)       \
-  V(ExternalArray, empty_external_int8_array,                                  \
-      EmptyExternalInt8Array)                                                  \
-  V(ExternalArray, empty_external_uint8_array,                                 \
-      EmptyExternalUint8Array)                                                 \
-  V(ExternalArray, empty_external_int16_array, EmptyExternalInt16Array)        \
-  V(ExternalArray, empty_external_uint16_array,                                \
-      EmptyExternalUint16Array)                                                \
-  V(ExternalArray, empty_external_int32_array, EmptyExternalInt32Array)        \
-  V(ExternalArray, empty_external_uint32_array,                                \
-      EmptyExternalUint32Array)                                                \
-  V(ExternalArray, empty_external_float32_array, EmptyExternalFloat32Array)    \
-  V(ExternalArray, empty_external_float64_array, EmptyExternalFloat64Array)    \
-  V(ExternalArray, empty_external_uint8_clamped_array,                         \
-      EmptyExternalUint8ClampedArray)                                          \
-  V(Map, fixed_uint8_array_map, FixedUint8ArrayMap)                            \
-  V(Map, fixed_int8_array_map, FixedInt8ArrayMap)                              \
-  V(Map, fixed_uint16_array_map, FixedUint16ArrayMap)                          \
-  V(Map, fixed_int16_array_map, FixedInt16ArrayMap)                            \
-  V(Map, fixed_uint32_array_map, FixedUint32ArrayMap)                          \
-  V(Map, fixed_int32_array_map, FixedInt32ArrayMap)                            \
-  V(Map, fixed_float32_array_map, FixedFloat32ArrayMap)                        \
-  V(Map, fixed_float64_array_map, FixedFloat64ArrayMap)                        \
-  V(Map, fixed_uint8_clamped_array_map, FixedUint8ClampedArrayMap)             \
-  V(FixedTypedArrayBase, empty_fixed_uint8_array, EmptyFixedUint8Array)        \
-  V(FixedTypedArrayBase, empty_fixed_int8_array, EmptyFixedInt8Array)          \
-  V(FixedTypedArrayBase, empty_fixed_uint16_array, EmptyFixedUint16Array)      \
-  V(FixedTypedArrayBase, empty_fixed_int16_array, EmptyFixedInt16Array)        \
-  V(FixedTypedArrayBase, empty_fixed_uint32_array, EmptyFixedUint32Array)      \
-  V(FixedTypedArrayBase, empty_fixed_int32_array, EmptyFixedInt32Array)        \
-  V(FixedTypedArrayBase, empty_fixed_float32_array, EmptyFixedFloat32Array)    \
-  V(FixedTypedArrayBase, empty_fixed_float64_array, EmptyFixedFloat64Array)    \
-  V(FixedTypedArrayBase, empty_fixed_uint8_clamped_array,                      \
-      EmptyFixedUint8ClampedArray)                                             \
-  V(Map, sloppy_arguments_elements_map, SloppyArgumentsElementsMap)            \
-  V(Map, function_context_map, FunctionContextMap)                             \
-  V(Map, catch_context_map, CatchContextMap)                                   \
-  V(Map, with_context_map, WithContextMap)                                     \
-  V(Map, block_context_map, BlockContextMap)                                   \
-  V(Map, module_context_map, ModuleContextMap)                                 \
-  V(Map, global_context_map, GlobalContextMap)                                 \
-  V(Map, undefined_map, UndefinedMap)                                          \
-  V(Map, the_hole_map, TheHoleMap)                                             \
-  V(Map, null_map, NullMap)                                                    \
-  V(Map, boolean_map, BooleanMap)                                              \
-  V(Map, uninitialized_map, UninitializedMap)                                  \
-  V(Map, arguments_marker_map, ArgumentsMarkerMap)                             \
-  V(Map, no_interceptor_result_sentinel_map, NoInterceptorResultSentinelMap)   \
-  V(Map, exception_map, ExceptionMap)                                          \
-  V(Map, termination_exception_map, TerminationExceptionMap)                   \
-  V(Map, message_object_map, JSMessageObjectMap)                               \
-  V(Map, foreign_map, ForeignMap)                                              \
-  V(HeapNumber, nan_value, NanValue)                                           \
-  V(HeapNumber, infinity_value, InfinityValue)                                 \
-  V(HeapNumber, minus_zero_value, MinusZeroValue)                              \
-  V(Map, neander_map, NeanderMap)                                              \
-  V(JSObject, message_listeners, MessageListeners)                             \
-  V(UnseededNumberDictionary, code_stubs, CodeStubs)                           \
-  V(UnseededNumberDictionary, non_monomorphic_cache, NonMonomorphicCache)      \
-  V(PolymorphicCodeCache, polymorphic_code_cache, PolymorphicCodeCache)        \
-  V(Code, js_entry_code, JsEntryCode)                                          \
-  V(Code, js_construct_entry_code, JsConstructEntryCode)                       \
-  V(FixedArray, natives_source_cache, NativesSourceCache)                      \
-  V(Script, empty_script, EmptyScript)                                         \
-  V(NameDictionary, intrinsic_function_names, IntrinsicFunctionNames)          \
-  V(Cell, undefined_cell, UndefineCell)                                        \
-  V(JSObject, observation_state, ObservationState)                             \
-  V(Map, external_map, ExternalMap)                                            \
-  V(Object, symbol_registry, SymbolRegistry)                                   \
-  V(Symbol, frozen_symbol, FrozenSymbol)                                       \
-  V(Symbol, nonexistent_symbol, NonExistentSymbol)                             \
-  V(Symbol, elements_transition_symbol, ElementsTransitionSymbol)              \
-  V(SeededNumberDictionary, empty_slow_element_dictionary,                     \
-      EmptySlowElementDictionary)                                              \
-  V(Symbol, observed_symbol, ObservedSymbol)                                   \
-  V(Symbol, uninitialized_symbol, UninitializedSymbol)                         \
-  V(Symbol, megamorphic_symbol, MegamorphicSymbol)                             \
-  V(FixedArray, materialized_objects, MaterializedObjects)                     \
-  V(FixedArray, allocation_sites_scratchpad, AllocationSitesScratchpad)        \
-  V(FixedArray, microtask_queue, MicrotaskQueue)
-
-// Entries in this list are limited to Smis and are not visited during GC.
-#define SMI_ROOT_LIST(V)                                                       \
-  V(Smi, stack_limit, StackLimit)                                              \
-  V(Smi, real_stack_limit, RealStackLimit)                                     \
-  V(Smi, last_script_id, LastScriptId)                                         \
-  V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset)     \
-  V(Smi, construct_stub_deopt_pc_offset, ConstructStubDeoptPCOffset)           \
-  V(Smi, getter_stub_deopt_pc_offset, GetterStubDeoptPCOffset)                 \
-  V(Smi, setter_stub_deopt_pc_offset, SetterStubDeoptPCOffset)
-
-#define ROOT_LIST(V)                                  \
-  STRONG_ROOT_LIST(V)                                 \
-  SMI_ROOT_LIST(V)                                    \
-  V(StringTable, string_table, StringTable)
-
-// Heap roots that are known to be immortal immovable, for which we can safely
-// skip write barriers.
-#define IMMORTAL_IMMOVABLE_ROOT_LIST(V)   \
-  V(byte_array_map)                       \
-  V(free_space_map)                       \
-  V(one_pointer_filler_map)               \
-  V(two_pointer_filler_map)               \
-  V(undefined_value)                      \
-  V(the_hole_value)                       \
-  V(null_value)                           \
-  V(true_value)                           \
-  V(false_value)                          \
-  V(uninitialized_value)                  \
-  V(cell_map)                             \
-  V(global_property_cell_map)             \
-  V(shared_function_info_map)             \
-  V(meta_map)                             \
-  V(heap_number_map)                      \
-  V(native_context_map)                   \
-  V(fixed_array_map)                      \
-  V(code_map)                             \
-  V(scope_info_map)                       \
-  V(fixed_cow_array_map)                  \
-  V(fixed_double_array_map)               \
-  V(constant_pool_array_map)              \
-  V(no_interceptor_result_sentinel)       \
-  V(hash_table_map)                       \
-  V(ordered_hash_table_map)               \
-  V(empty_fixed_array)                    \
-  V(empty_byte_array)                     \
-  V(empty_descriptor_array)               \
-  V(empty_constant_pool_array)            \
-  V(arguments_marker)                     \
-  V(symbol_map)                           \
-  V(sloppy_arguments_elements_map)        \
-  V(function_context_map)                 \
-  V(catch_context_map)                    \
-  V(with_context_map)                     \
-  V(block_context_map)                    \
-  V(module_context_map)                   \
-  V(global_context_map)                   \
-  V(undefined_map)                        \
-  V(the_hole_map)                         \
-  V(null_map)                             \
-  V(boolean_map)                          \
-  V(uninitialized_map)                    \
-  V(message_object_map)                   \
-  V(foreign_map)                          \
-  V(neander_map)
-
-#define INTERNALIZED_STRING_LIST(V)                                      \
-  V(Array_string, "Array")                                               \
-  V(Object_string, "Object")                                             \
-  V(proto_string, "__proto__")                                           \
-  V(arguments_string, "arguments")                                       \
-  V(Arguments_string, "Arguments")                                       \
-  V(call_string, "call")                                                 \
-  V(apply_string, "apply")                                               \
-  V(caller_string, "caller")                                             \
-  V(boolean_string, "boolean")                                           \
-  V(Boolean_string, "Boolean")                                           \
-  V(callee_string, "callee")                                             \
-  V(constructor_string, "constructor")                                   \
-  V(dot_result_string, ".result")                                        \
-  V(dot_for_string, ".for.")                                             \
-  V(dot_iterable_string, ".iterable")                                    \
-  V(dot_iterator_string, ".iterator")                                    \
-  V(dot_generator_object_string, ".generator_object")                    \
-  V(eval_string, "eval")                                                 \
-  V(empty_string, "")                                                    \
-  V(function_string, "function")                                         \
-  V(length_string, "length")                                             \
-  V(module_string, "module")                                             \
-  V(name_string, "name")                                                 \
-  V(native_string, "native")                                             \
-  V(null_string, "null")                                                 \
-  V(number_string, "number")                                             \
-  V(Number_string, "Number")                                             \
-  V(nan_string, "NaN")                                                   \
-  V(RegExp_string, "RegExp")                                             \
-  V(source_string, "source")                                             \
-  V(global_string, "global")                                             \
-  V(ignore_case_string, "ignoreCase")                                    \
-  V(multiline_string, "multiline")                                       \
-  V(input_string, "input")                                               \
-  V(index_string, "index")                                               \
-  V(last_index_string, "lastIndex")                                      \
-  V(object_string, "object")                                             \
-  V(literals_string, "literals")                                         \
-  V(prototype_string, "prototype")                                       \
-  V(string_string, "string")                                             \
-  V(String_string, "String")                                             \
-  V(symbol_string, "symbol")                                             \
-  V(Symbol_string, "Symbol")                                             \
-  V(for_string, "for")                                                   \
-  V(for_api_string, "for_api")                                           \
-  V(for_intern_string, "for_intern")                                     \
-  V(private_api_string, "private_api")                                   \
-  V(private_intern_string, "private_intern")                             \
-  V(Date_string, "Date")                                                 \
-  V(this_string, "this")                                                 \
-  V(to_string_string, "toString")                                        \
-  V(char_at_string, "CharAt")                                            \
-  V(undefined_string, "undefined")                                       \
-  V(value_of_string, "valueOf")                                          \
-  V(stack_string, "stack")                                               \
-  V(toJSON_string, "toJSON")                                             \
-  V(InitializeVarGlobal_string, "InitializeVarGlobal")                   \
-  V(InitializeConstGlobal_string, "InitializeConstGlobal")               \
-  V(KeyedLoadElementMonomorphic_string,                                  \
-    "KeyedLoadElementMonomorphic")                                       \
-  V(KeyedStoreElementMonomorphic_string,                                 \
-    "KeyedStoreElementMonomorphic")                                      \
-  V(stack_overflow_string, "kStackOverflowBoilerplate")                  \
-  V(illegal_access_string, "illegal access")                             \
-  V(get_string, "get")                                                   \
-  V(set_string, "set")                                                   \
-  V(map_field_string, "%map")                                            \
-  V(elements_field_string, "%elements")                                  \
-  V(length_field_string, "%length")                                      \
-  V(cell_value_string, "%cell_value")                                    \
-  V(function_class_string, "Function")                                   \
-  V(illegal_argument_string, "illegal argument")                         \
-  V(MakeReferenceError_string, "MakeReferenceError")                     \
-  V(MakeSyntaxError_string, "MakeSyntaxError")                           \
-  V(MakeTypeError_string, "MakeTypeError")                               \
-  V(unknown_label_string, "unknown_label")                               \
-  V(space_string, " ")                                                   \
-  V(exec_string, "exec")                                                 \
-  V(zero_string, "0")                                                    \
-  V(global_eval_string, "GlobalEval")                                    \
-  V(identity_hash_string, "v8::IdentityHash")                            \
-  V(closure_string, "(closure)")                                         \
-  V(use_strict_string, "use strict")                                     \
-  V(dot_string, ".")                                                     \
-  V(anonymous_function_string, "(anonymous function)")                   \
-  V(compare_ic_string, "==")                                             \
-  V(strict_compare_ic_string, "===")                                     \
-  V(infinity_string, "Infinity")                                         \
-  V(minus_infinity_string, "-Infinity")                                  \
-  V(hidden_stack_trace_string, "v8::hidden_stack_trace")                 \
-  V(query_colon_string, "(?:)")                                          \
-  V(Generator_string, "Generator")                                       \
-  V(throw_string, "throw")                                               \
-  V(done_string, "done")                                                 \
-  V(value_string, "value")                                               \
-  V(next_string, "next")                                                 \
-  V(byte_length_string, "byteLength")                                    \
-  V(byte_offset_string, "byteOffset")                                    \
-  V(buffer_string, "buffer")                                             \
-  V(intl_initialized_marker_string, "v8::intl_initialized_marker")       \
-  V(intl_impl_object_string, "v8::intl_object")
-
-// Forward declarations.
-class GCTracer;
-class HeapStats;
-class Isolate;
-class WeakObjectRetainer;
-
-
-typedef String* (*ExternalStringTableUpdaterCallback)(Heap* heap,
-                                                      Object** pointer);
-
-class StoreBufferRebuilder {
- public:
-  explicit StoreBufferRebuilder(StoreBuffer* store_buffer)
-      : store_buffer_(store_buffer) {
-  }
-
-  void Callback(MemoryChunk* page, StoreBufferEvent event);
-
- private:
-  StoreBuffer* store_buffer_;
-
-  // We record in this variable how full the store buffer was when we started
-  // iterating over the current page, finding pointers to new space.  If the
-  // store buffer overflows again we can exempt the page from the store buffer
-  // by rewinding to this point instead of having to search the store buffer.
-  Object*** start_of_current_page_;
-  // The current page we are scanning in the store buffer iterator.
-  MemoryChunk* current_page_;
-};
-
-
-
-// A queue of objects promoted during scavenge. Each object is accompanied
-// by it's size to avoid dereferencing a map pointer for scanning.
-class PromotionQueue {
- public:
-  explicit PromotionQueue(Heap* heap)
-      : front_(NULL),
-        rear_(NULL),
-        limit_(NULL),
-        emergency_stack_(0),
-        heap_(heap) { }
-
-  void Initialize();
-
-  void Destroy() {
-    ASSERT(is_empty());
-    delete emergency_stack_;
-    emergency_stack_ = NULL;
-  }
-
-  inline void ActivateGuardIfOnTheSamePage();
-
-  Page* GetHeadPage() {
-    return Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
-  }
-
-  void SetNewLimit(Address limit) {
-    if (!guard_) {
-      return;
-    }
-
-    ASSERT(GetHeadPage() == Page::FromAllocationTop(limit));
-    limit_ = reinterpret_cast<intptr_t*>(limit);
-
-    if (limit_ <= rear_) {
-      return;
-    }
-
-    RelocateQueueHead();
-  }
-
-  bool is_empty() {
-    return (front_ == rear_) &&
-        (emergency_stack_ == NULL || emergency_stack_->length() == 0);
-  }
-
-  inline void insert(HeapObject* target, int size);
-
-  void remove(HeapObject** target, int* size) {
-    ASSERT(!is_empty());
-    if (front_ == rear_) {
-      Entry e = emergency_stack_->RemoveLast();
-      *target = e.obj_;
-      *size = e.size_;
-      return;
-    }
-
-    if (NewSpacePage::IsAtStart(reinterpret_cast<Address>(front_))) {
-      NewSpacePage* front_page =
-          NewSpacePage::FromAddress(reinterpret_cast<Address>(front_));
-      ASSERT(!front_page->prev_page()->is_anchor());
-      front_ =
-          reinterpret_cast<intptr_t*>(front_page->prev_page()->area_end());
-    }
-    *target = reinterpret_cast<HeapObject*>(*(--front_));
-    *size = static_cast<int>(*(--front_));
-    // Assert no underflow.
-    SemiSpace::AssertValidRange(reinterpret_cast<Address>(rear_),
-                                reinterpret_cast<Address>(front_));
-  }
-
- private:
-  // The front of the queue is higher in the memory page chain than the rear.
-  intptr_t* front_;
-  intptr_t* rear_;
-  intptr_t* limit_;
-
-  bool guard_;
-
-  static const int kEntrySizeInWords = 2;
-
-  struct Entry {
-    Entry(HeapObject* obj, int size) : obj_(obj), size_(size) { }
-
-    HeapObject* obj_;
-    int size_;
-  };
-  List<Entry>* emergency_stack_;
-
-  Heap* heap_;
-
-  void RelocateQueueHead();
-
-  DISALLOW_COPY_AND_ASSIGN(PromotionQueue);
-};
-
-
-typedef void (*ScavengingCallback)(Map* map,
-                                   HeapObject** slot,
-                                   HeapObject* object);
-
-
-// External strings table is a place where all external strings are
-// registered.  We need to keep track of such strings to properly
-// finalize them.
-class ExternalStringTable {
- public:
-  // Registers an external string.
-  inline void AddString(String* string);
-
-  inline void Iterate(ObjectVisitor* v);
-
-  // Restores internal invariant and gets rid of collected strings.
-  // Must be called after each Iterate() that modified the strings.
-  void CleanUp();
-
-  // Destroys all allocated memory.
-  void TearDown();
-
- private:
-  explicit ExternalStringTable(Heap* heap) : heap_(heap) { }
-
-  friend class Heap;
-
-  inline void Verify();
-
-  inline void AddOldString(String* string);
-
-  // Notifies the table that only a prefix of the new list is valid.
-  inline void ShrinkNewStrings(int position);
-
-  // To speed up scavenge collections new space string are kept
-  // separate from old space strings.
-  List<Object*> new_space_strings_;
-  List<Object*> old_space_strings_;
-
-  Heap* heap_;
-
-  DISALLOW_COPY_AND_ASSIGN(ExternalStringTable);
-};
-
-
-enum ArrayStorageAllocationMode {
-  DONT_INITIALIZE_ARRAY_ELEMENTS,
-  INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE
-};
-
-
-class Heap {
- public:
-  // Configure heap size in MB before setup. Return false if the heap has been
-  // set up already.
-  bool ConfigureHeap(int max_semi_space_size,
-                     int max_old_space_size,
-                     int max_executable_size,
-                     size_t code_range_size);
-  bool ConfigureHeapDefault();
-
-  // Prepares the heap, setting up memory areas that are needed in the isolate
-  // without actually creating any objects.
-  bool SetUp();
-
-  // Bootstraps the object heap with the core set of objects required to run.
-  // Returns whether it succeeded.
-  bool CreateHeapObjects();
-
-  // Destroys all memory allocated by the heap.
-  void TearDown();
-
-  // Set the stack limit in the roots_ array.  Some architectures generate
-  // code that looks here, because it is faster than loading from the static
-  // jslimit_/real_jslimit_ variable in the StackGuard.
-  void SetStackLimits();
-
-  // Returns whether SetUp has been called.
-  bool HasBeenSetUp();
-
-  // Returns the maximum amount of memory reserved for the heap.  For
-  // the young generation, we reserve 4 times the amount needed for a
-  // semi space.  The young generation consists of two semi spaces and
-  // we reserve twice the amount needed for those in order to ensure
-  // that new space can be aligned to its size.
-  intptr_t MaxReserved() {
-    return 4 * reserved_semispace_size_ + max_old_generation_size_;
-  }
-  int MaxSemiSpaceSize() { return max_semi_space_size_; }
-  int ReservedSemiSpaceSize() { return reserved_semispace_size_; }
-  int InitialSemiSpaceSize() { return initial_semispace_size_; }
-  intptr_t MaxOldGenerationSize() { return max_old_generation_size_; }
-  intptr_t MaxExecutableSize() { return max_executable_size_; }
-
-  // Returns the capacity of the heap in bytes w/o growing. Heap grows when
-  // more spaces are needed until it reaches the limit.
-  intptr_t Capacity();
-
-  // Returns the amount of memory currently committed for the heap.
-  intptr_t CommittedMemory();
-
-  // Returns the amount of executable memory currently committed for the heap.
-  intptr_t CommittedMemoryExecutable();
-
-  // Returns the amount of phyical memory currently committed for the heap.
-  size_t CommittedPhysicalMemory();
-
-  // Returns the maximum amount of memory ever committed for the heap.
-  intptr_t MaximumCommittedMemory() { return maximum_committed_; }
-
-  // Updates the maximum committed memory for the heap. Should be called
-  // whenever a space grows.
-  void UpdateMaximumCommitted();
-
-  // Returns the available bytes in space w/o growing.
-  // Heap doesn't guarantee that it can allocate an object that requires
-  // all available bytes. Check MaxHeapObjectSize() instead.
-  intptr_t Available();
-
-  // Returns of size of all objects residing in the heap.
-  intptr_t SizeOfObjects();
-
-  // Return the starting address and a mask for the new space.  And-masking an
-  // address with the mask will result in the start address of the new space
-  // for all addresses in either semispace.
-  Address NewSpaceStart() { return new_space_.start(); }
-  uintptr_t NewSpaceMask() { return new_space_.mask(); }
-  Address NewSpaceTop() { return new_space_.top(); }
-
-  NewSpace* new_space() { return &new_space_; }
-  OldSpace* old_pointer_space() { return old_pointer_space_; }
-  OldSpace* old_data_space() { return old_data_space_; }
-  OldSpace* code_space() { return code_space_; }
-  MapSpace* map_space() { return map_space_; }
-  CellSpace* cell_space() { return cell_space_; }
-  PropertyCellSpace* property_cell_space() {
-    return property_cell_space_;
-  }
-  LargeObjectSpace* lo_space() { return lo_space_; }
-  PagedSpace* paged_space(int idx) {
-    switch (idx) {
-      case OLD_POINTER_SPACE:
-        return old_pointer_space();
-      case OLD_DATA_SPACE:
-        return old_data_space();
-      case MAP_SPACE:
-        return map_space();
-      case CELL_SPACE:
-        return cell_space();
-      case PROPERTY_CELL_SPACE:
-        return property_cell_space();
-      case CODE_SPACE:
-        return code_space();
-      case NEW_SPACE:
-      case LO_SPACE:
-        UNREACHABLE();
-    }
-    return NULL;
-  }
-
-  bool always_allocate() { return always_allocate_scope_depth_ != 0; }
-  Address always_allocate_scope_depth_address() {
-    return reinterpret_cast<Address>(&always_allocate_scope_depth_);
-  }
-  bool linear_allocation() {
-    return linear_allocation_scope_depth_ != 0;
-  }
-
-  Address* NewSpaceAllocationTopAddress() {
-    return new_space_.allocation_top_address();
-  }
-  Address* NewSpaceAllocationLimitAddress() {
-    return new_space_.allocation_limit_address();
-  }
-
-  Address* OldPointerSpaceAllocationTopAddress() {
-    return old_pointer_space_->allocation_top_address();
-  }
-  Address* OldPointerSpaceAllocationLimitAddress() {
-    return old_pointer_space_->allocation_limit_address();
-  }
-
-  Address* OldDataSpaceAllocationTopAddress() {
-    return old_data_space_->allocation_top_address();
-  }
-  Address* OldDataSpaceAllocationLimitAddress() {
-    return old_data_space_->allocation_limit_address();
-  }
-
-  // Returns a deep copy of the JavaScript object.
-  // Properties and elements are copied too.
-  // Optionally takes an AllocationSite to be appended in an AllocationMemento.
-  MUST_USE_RESULT AllocationResult CopyJSObject(JSObject* source,
-                                                AllocationSite* site = NULL);
-
-  // Clear the Instanceof cache (used when a prototype changes).
-  inline void ClearInstanceofCache();
-
-  // Iterates the whole code space to clear all ICs of the given kind.
-  void ClearAllICsByKind(Code::Kind kind);
-
-  // For use during bootup.
-  void RepairFreeListsAfterBoot();
-
-  template<typename T>
-  static inline bool IsOneByte(T t, int chars);
-
-  // Move len elements within a given array from src_index index to dst_index
-  // index.
-  void MoveElements(FixedArray* array, int dst_index, int src_index, int len);
-
-  // Sloppy mode arguments object size.
-  static const int kSloppyArgumentsObjectSize =
-      JSObject::kHeaderSize + 2 * kPointerSize;
-  // Strict mode arguments has no callee so it is smaller.
-  static const int kStrictArgumentsObjectSize =
-      JSObject::kHeaderSize + 1 * kPointerSize;
-  // Indicies for direct access into argument objects.
-  static const int kArgumentsLengthIndex = 0;
-  // callee is only valid in sloppy mode.
-  static const int kArgumentsCalleeIndex = 1;
-
-  // Finalizes an external string by deleting the associated external
-  // data and clearing the resource pointer.
-  inline void FinalizeExternalString(String* string);
-
-  // Initialize a filler object to keep the ability to iterate over the heap
-  // when shortening objects.
-  void CreateFillerObjectAt(Address addr, int size);
-
-  bool CanMoveObjectStart(HeapObject* object);
-
-  enum InvocationMode { FROM_GC, FROM_MUTATOR };
-
-  // Maintain marking consistency for IncrementalMarking.
-  void AdjustLiveBytes(Address address, int by, InvocationMode mode);
-
-  // Converts the given boolean condition to JavaScript boolean value.
-  inline Object* ToBoolean(bool condition);
-
-  // Performs garbage collection operation.
-  // Returns whether there is a chance that another major GC could
-  // collect more garbage.
-  inline bool CollectGarbage(
-      AllocationSpace space,
-      const char* gc_reason = NULL,
-      const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
-
-  static const int kNoGCFlags = 0;
-  static const int kSweepPreciselyMask = 1;
-  static const int kReduceMemoryFootprintMask = 2;
-  static const int kAbortIncrementalMarkingMask = 4;
-
-  // Making the heap iterable requires us to sweep precisely and abort any
-  // incremental marking as well.
-  static const int kMakeHeapIterableMask =
-      kSweepPreciselyMask | kAbortIncrementalMarkingMask;
-
-  // Performs a full garbage collection.  If (flags & kMakeHeapIterableMask) is
-  // non-zero, then the slower precise sweeper is used, which leaves the heap
-  // in a state where we can iterate over the heap visiting all objects.
-  void CollectAllGarbage(
-      int flags,
-      const char* gc_reason = NULL,
-      const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
-
-  // Last hope GC, should try to squeeze as much as possible.
-  void CollectAllAvailableGarbage(const char* gc_reason = NULL);
-
-  // Check whether the heap is currently iterable.
-  bool IsHeapIterable();
-
-  // Notify the heap that a context has been disposed.
-  int NotifyContextDisposed();
-
-  inline void increment_scan_on_scavenge_pages() {
-    scan_on_scavenge_pages_++;
-    if (FLAG_gc_verbose) {
-      PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_);
-    }
-  }
-
-  inline void decrement_scan_on_scavenge_pages() {
-    scan_on_scavenge_pages_--;
-    if (FLAG_gc_verbose) {
-      PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_);
-    }
-  }
-
-  PromotionQueue* promotion_queue() { return &promotion_queue_; }
-
-  void AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback,
-                             GCType gc_type_filter,
-                             bool pass_isolate = true);
-  void RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback);
-
-  void AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback,
-                             GCType gc_type_filter,
-                             bool pass_isolate = true);
-  void RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback);
-
-  // Heap root getters.  We have versions with and without type::cast() here.
-  // You can't use type::cast during GC because the assert fails.
-  // TODO(1490): Try removing the unchecked accessors, now that GC marking does
-  // not corrupt the map.
-#define ROOT_ACCESSOR(type, name, camel_name)                                  \
-  type* name() {                                                               \
-    return type::cast(roots_[k##camel_name##RootIndex]);                       \
-  }                                                                            \
-  type* raw_unchecked_##name() {                                               \
-    return reinterpret_cast<type*>(roots_[k##camel_name##RootIndex]);          \
-  }
-  ROOT_LIST(ROOT_ACCESSOR)
-#undef ROOT_ACCESSOR
-
-// Utility type maps
-#define STRUCT_MAP_ACCESSOR(NAME, Name, name)                                  \
-    Map* name##_map() {                                                        \
-      return Map::cast(roots_[k##Name##MapRootIndex]);                         \
-    }
-  STRUCT_LIST(STRUCT_MAP_ACCESSOR)
-#undef STRUCT_MAP_ACCESSOR
-
-#define STRING_ACCESSOR(name, str) String* name() {                            \
-    return String::cast(roots_[k##name##RootIndex]);                           \
-  }
-  INTERNALIZED_STRING_LIST(STRING_ACCESSOR)
-#undef STRING_ACCESSOR
-
-  // The hidden_string is special because it is the empty string, but does
-  // not match the empty string.
-  String* hidden_string() { return hidden_string_; }
-
-  void set_native_contexts_list(Object* object) {
-    native_contexts_list_ = object;
-  }
-  Object* native_contexts_list() const { return native_contexts_list_; }
-
-  void set_array_buffers_list(Object* object) {
-    array_buffers_list_ = object;
-  }
-  Object* array_buffers_list() const { return array_buffers_list_; }
-
-  void set_allocation_sites_list(Object* object) {
-    allocation_sites_list_ = object;
-  }
-  Object* allocation_sites_list() { return allocation_sites_list_; }
-
-  // Used in CreateAllocationSiteStub and the (de)serializer.
-  Object** allocation_sites_list_address() { return &allocation_sites_list_; }
-
-  Object* weak_object_to_code_table() { return weak_object_to_code_table_; }
-
-  void set_encountered_weak_collections(Object* weak_collection) {
-    encountered_weak_collections_ = weak_collection;
-  }
-  Object* encountered_weak_collections() const {
-    return encountered_weak_collections_;
-  }
-
-  // Number of mark-sweeps.
-  unsigned int ms_count() { return ms_count_; }
-
-  // Iterates over all roots in the heap.
-  void IterateRoots(ObjectVisitor* v, VisitMode mode);
-  // Iterates over all strong roots in the heap.
-  void IterateStrongRoots(ObjectVisitor* v, VisitMode mode);
-  // Iterates over entries in the smi roots list.  Only interesting to the
-  // serializer/deserializer, since GC does not care about smis.
-  void IterateSmiRoots(ObjectVisitor* v);
-  // Iterates over all the other roots in the heap.
-  void IterateWeakRoots(ObjectVisitor* v, VisitMode mode);
-
-  // Iterate pointers to from semispace of new space found in memory interval
-  // from start to end.
-  void IterateAndMarkPointersToFromSpace(Address start,
-                                         Address end,
-                                         ObjectSlotCallback callback);
-
-  // Returns whether the object resides in new space.
-  inline bool InNewSpace(Object* object);
-  inline bool InNewSpace(Address address);
-  inline bool InNewSpacePage(Address address);
-  inline bool InFromSpace(Object* object);
-  inline bool InToSpace(Object* object);
-
-  // Returns whether the object resides in old pointer space.
-  inline bool InOldPointerSpace(Address address);
-  inline bool InOldPointerSpace(Object* object);
-
-  // Returns whether the object resides in old data space.
-  inline bool InOldDataSpace(Address address);
-  inline bool InOldDataSpace(Object* object);
-
-  // Checks whether an address/object in the heap (including auxiliary
-  // area and unused area).
-  bool Contains(Address addr);
-  bool Contains(HeapObject* value);
-
-  // Checks whether an address/object in a space.
-  // Currently used by tests, serialization and heap verification only.
-  bool InSpace(Address addr, AllocationSpace space);
-  bool InSpace(HeapObject* value, AllocationSpace space);
-
-  // Finds out which space an object should get promoted to based on its type.
-  inline OldSpace* TargetSpace(HeapObject* object);
-  static inline AllocationSpace TargetSpaceId(InstanceType type);
-
-  // Checks whether the given object is allowed to be migrated from it's
-  // current space into the given destination space. Used for debugging.
-  inline bool AllowedToBeMigrated(HeapObject* object, AllocationSpace dest);
-
-  // Sets the stub_cache_ (only used when expanding the dictionary).
-  void public_set_code_stubs(UnseededNumberDictionary* value) {
-    roots_[kCodeStubsRootIndex] = value;
-  }
-
-  // Support for computing object sizes for old objects during GCs. Returns
-  // a function that is guaranteed to be safe for computing object sizes in
-  // the current GC phase.
-  HeapObjectCallback GcSafeSizeOfOldObjectFunction() {
-    return gc_safe_size_of_old_object_;
-  }
-
-  // Sets the non_monomorphic_cache_ (only used when expanding the dictionary).
-  void public_set_non_monomorphic_cache(UnseededNumberDictionary* value) {
-    roots_[kNonMonomorphicCacheRootIndex] = value;
-  }
-
-  void public_set_empty_script(Script* script) {
-    roots_[kEmptyScriptRootIndex] = script;
-  }
-
-  void public_set_store_buffer_top(Address* top) {
-    roots_[kStoreBufferTopRootIndex] = reinterpret_cast<Smi*>(top);
-  }
-
-  void public_set_materialized_objects(FixedArray* objects) {
-    roots_[kMaterializedObjectsRootIndex] = objects;
-  }
-
-  // Generated code can embed this address to get access to the roots.
-  Object** roots_array_start() { return roots_; }
-
-  Address* store_buffer_top_address() {
-    return reinterpret_cast<Address*>(&roots_[kStoreBufferTopRootIndex]);
-  }
-
-#ifdef VERIFY_HEAP
-  // Verify the heap is in its normal state before or after a GC.
-  void Verify();
-
-
-  bool weak_embedded_objects_verification_enabled() {
-    return no_weak_object_verification_scope_depth_ == 0;
-  }
-#endif
-
-#ifdef DEBUG
-  void Print();
-  void PrintHandles();
-
-  void OldPointerSpaceCheckStoreBuffer();
-  void MapSpaceCheckStoreBuffer();
-  void LargeObjectSpaceCheckStoreBuffer();
-
-  // Report heap statistics.
-  void ReportHeapStatistics(const char* title);
-  void ReportCodeStatistics(const char* title);
-#endif
-
-  // Zapping is needed for verify heap, and always done in debug builds.
-  static inline bool ShouldZapGarbage() {
-#ifdef DEBUG
-    return true;
-#else
-#ifdef VERIFY_HEAP
-    return FLAG_verify_heap;
-#else
-    return false;
-#endif
-#endif
-  }
-
-  // Print short heap statistics.
-  void PrintShortHeapStatistics();
-
-  // Write barrier support for address[offset] = o.
-  INLINE(void RecordWrite(Address address, int offset));
-
-  // Write barrier support for address[start : start + len[ = o.
-  INLINE(void RecordWrites(Address address, int start, int len));
-
-  enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT };
-  inline HeapState gc_state() { return gc_state_; }
-
-  inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; }
-
-#ifdef DEBUG
-  void set_allocation_timeout(int timeout) {
-    allocation_timeout_ = timeout;
-  }
-
-  void TracePathToObjectFrom(Object* target, Object* root);
-  void TracePathToObject(Object* target);
-  void TracePathToGlobal();
-#endif
-
-  // Callback function passed to Heap::Iterate etc.  Copies an object if
-  // necessary, the object might be promoted to an old space.  The caller must
-  // ensure the precondition that the object is (a) a heap object and (b) in
-  // the heap's from space.
-  static inline void ScavengePointer(HeapObject** p);
-  static inline void ScavengeObject(HeapObject** p, HeapObject* object);
-
-  enum ScratchpadSlotMode {
-    IGNORE_SCRATCHPAD_SLOT,
-    RECORD_SCRATCHPAD_SLOT
-  };
-
-  // If an object has an AllocationMemento trailing it, return it, otherwise
-  // return NULL;
-  inline AllocationMemento* FindAllocationMemento(HeapObject* object);
-
-  // An object may have an AllocationSite associated with it through a trailing
-  // AllocationMemento. Its feedback should be updated when objects are found
-  // in the heap.
-  static inline void UpdateAllocationSiteFeedback(
-      HeapObject* object, ScratchpadSlotMode mode);
-
-  // Support for partial snapshots.  After calling this we have a linear
-  // space to write objects in each space.
-  void ReserveSpace(int *sizes, Address* addresses);
-
-  //
-  // Support for the API.
-  //
-
-  void CreateApiObjects();
-
-  inline intptr_t PromotedTotalSize() {
-    int64_t total = PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize();
-    if (total > kMaxInt) return static_cast<intptr_t>(kMaxInt);
-    if (total < 0) return 0;
-    return static_cast<intptr_t>(total);
-  }
-
-  inline intptr_t OldGenerationSpaceAvailable() {
-    return old_generation_allocation_limit_ - PromotedTotalSize();
-  }
-
-  inline intptr_t OldGenerationCapacityAvailable() {
-    return max_old_generation_size_ - PromotedTotalSize();
-  }
-
-  static const intptr_t kMinimumOldGenerationAllocationLimit =
-      8 * (Page::kPageSize > MB ? Page::kPageSize : MB);
-
-  static const int kPointerMultiplier = i::kPointerSize / 4;
-
-  // The new space size has to be a power of 2. Sizes are in MB.
-  static const int kMaxSemiSpaceSizeLowMemoryDevice =
-      1 * kPointerMultiplier;
-  static const int kMaxSemiSpaceSizeMediumMemoryDevice =
-      4 * kPointerMultiplier;
-  static const int kMaxSemiSpaceSizeHighMemoryDevice =
-      8 * kPointerMultiplier;
-  static const int kMaxSemiSpaceSizeHugeMemoryDevice =
-      8 * kPointerMultiplier;
-
-  // The old space size has to be a multiple of Page::kPageSize.
-  // Sizes are in MB.
-  static const int kMaxOldSpaceSizeLowMemoryDevice =
-      128 * kPointerMultiplier;
-  static const int kMaxOldSpaceSizeMediumMemoryDevice =
-      256 * kPointerMultiplier;
-  static const int kMaxOldSpaceSizeHighMemoryDevice =
-      512 * kPointerMultiplier;
-  static const int kMaxOldSpaceSizeHugeMemoryDevice =
-      700 * kPointerMultiplier;
-
-  // The executable size has to be a multiple of Page::kPageSize.
-  // Sizes are in MB.
-  static const int kMaxExecutableSizeLowMemoryDevice = 96 * kPointerMultiplier;
-  static const int kMaxExecutableSizeMediumMemoryDevice =
-      192 * kPointerMultiplier;
-  static const int kMaxExecutableSizeHighMemoryDevice =
-      256 * kPointerMultiplier;
-  static const int kMaxExecutableSizeHugeMemoryDevice =
-      256 * kPointerMultiplier;
-
-  intptr_t OldGenerationAllocationLimit(intptr_t old_gen_size,
-                                        int freed_global_handles);
-
-  // Indicates whether inline bump-pointer allocation has been disabled.
-  bool inline_allocation_disabled() { return inline_allocation_disabled_; }
-
-  // Switch whether inline bump-pointer allocation should be used.
-  void EnableInlineAllocation();
-  void DisableInlineAllocation();
-
-  // Implements the corresponding V8 API function.
-  bool IdleNotification(int hint);
-
-  // Declare all the root indices.  This defines the root list order.
-  enum RootListIndex {
-#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
-    STRONG_ROOT_LIST(ROOT_INDEX_DECLARATION)
-#undef ROOT_INDEX_DECLARATION
-
-#define STRING_INDEX_DECLARATION(name, str) k##name##RootIndex,
-    INTERNALIZED_STRING_LIST(STRING_INDEX_DECLARATION)
-#undef STRING_DECLARATION
-
-    // Utility type maps
-#define DECLARE_STRUCT_MAP(NAME, Name, name) k##Name##MapRootIndex,
-    STRUCT_LIST(DECLARE_STRUCT_MAP)
-#undef DECLARE_STRUCT_MAP
-
-    kStringTableRootIndex,
-
-#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
-    SMI_ROOT_LIST(ROOT_INDEX_DECLARATION)
-#undef ROOT_INDEX_DECLARATION
-
-    kRootListLength,
-    kStrongRootListLength = kStringTableRootIndex,
-    kSmiRootsStart = kStringTableRootIndex + 1
-  };
-
-  STATIC_ASSERT(kUndefinedValueRootIndex ==
-                Internals::kUndefinedValueRootIndex);
-  STATIC_ASSERT(kNullValueRootIndex == Internals::kNullValueRootIndex);
-  STATIC_ASSERT(kTrueValueRootIndex == Internals::kTrueValueRootIndex);
-  STATIC_ASSERT(kFalseValueRootIndex == Internals::kFalseValueRootIndex);
-  STATIC_ASSERT(kempty_stringRootIndex == Internals::kEmptyStringRootIndex);
-
-  // Generated code can embed direct references to non-writable roots if
-  // they are in new space.
-  static bool RootCanBeWrittenAfterInitialization(RootListIndex root_index);
-  // Generated code can treat direct references to this root as constant.
-  bool RootCanBeTreatedAsConstant(RootListIndex root_index);
-
-  Map* MapForFixedTypedArray(ExternalArrayType array_type);
-  RootListIndex RootIndexForFixedTypedArray(
-      ExternalArrayType array_type);
-
-  Map* MapForExternalArrayType(ExternalArrayType array_type);
-  RootListIndex RootIndexForExternalArrayType(
-      ExternalArrayType array_type);
-
-  RootListIndex RootIndexForEmptyExternalArray(ElementsKind kind);
-  RootListIndex RootIndexForEmptyFixedTypedArray(ElementsKind kind);
-  ExternalArray* EmptyExternalArrayForMap(Map* map);
-  FixedTypedArrayBase* EmptyFixedTypedArrayForMap(Map* map);
-
-  void RecordStats(HeapStats* stats, bool take_snapshot = false);
-
-  // Copy block of memory from src to dst. Size of block should be aligned
-  // by pointer size.
-  static inline void CopyBlock(Address dst, Address src, int byte_size);
-
-  // Optimized version of memmove for blocks with pointer size aligned sizes and
-  // pointer size aligned addresses.
-  static inline void MoveBlock(Address dst, Address src, int byte_size);
-
-  // Check new space expansion criteria and expand semispaces if it was hit.
-  void CheckNewSpaceExpansionCriteria();
-
-  inline void IncrementPromotedObjectsSize(int object_size) {
-    ASSERT(object_size > 0);
-    promoted_objects_size_ += object_size;
-  }
-
-  inline void IncrementSemiSpaceCopiedObjectSize(int object_size) {
-    ASSERT(object_size > 0);
-    semi_space_copied_object_size_ += object_size;
-  }
-
-  inline void IncrementYoungSurvivorsCounter(int survived) {
-    ASSERT(survived >= 0);
-    survived_since_last_expansion_ += survived;
-  }
-
-  inline bool NextGCIsLikelyToBeFull() {
-    if (FLAG_gc_global) return true;
-
-    if (FLAG_stress_compaction && (gc_count_ & 1) != 0) return true;
-
-    intptr_t adjusted_allocation_limit =
-        old_generation_allocation_limit_ - new_space_.Capacity();
-
-    if (PromotedTotalSize() >= adjusted_allocation_limit) return true;
-
-    return false;
-  }
-
-  void UpdateNewSpaceReferencesInExternalStringTable(
-      ExternalStringTableUpdaterCallback updater_func);
-
-  void UpdateReferencesInExternalStringTable(
-      ExternalStringTableUpdaterCallback updater_func);
-
-  void ProcessWeakReferences(WeakObjectRetainer* retainer);
-
-  void VisitExternalResources(v8::ExternalResourceVisitor* visitor);
-
-  // Helper function that governs the promotion policy from new space to
-  // old.  If the object's old address lies below the new space's age
-  // mark or if we've already filled the bottom 1/16th of the to space,
-  // we try to promote this object.
-  inline bool ShouldBePromoted(Address old_address, int object_size);
-
-  void ClearJSFunctionResultCaches();
-
-  void ClearNormalizedMapCaches();
-
-  GCTracer* tracer() { return tracer_; }
-
-  // Returns the size of objects residing in non new spaces.
-  intptr_t PromotedSpaceSizeOfObjects();
-
-  double total_regexp_code_generated() { return total_regexp_code_generated_; }
-  void IncreaseTotalRegexpCodeGenerated(int size) {
-    total_regexp_code_generated_ += size;
-  }
-
-  void IncrementCodeGeneratedBytes(bool is_crankshafted, int size) {
-    if (is_crankshafted) {
-      crankshaft_codegen_bytes_generated_ += size;
-    } else {
-      full_codegen_bytes_generated_ += size;
-    }
-  }
-
-  // Returns maximum GC pause.
-  double get_max_gc_pause() { return max_gc_pause_; }
-
-  // Returns maximum size of objects alive after GC.
-  intptr_t get_max_alive_after_gc() { return max_alive_after_gc_; }
-
-  // Returns minimal interval between two subsequent collections.
-  double get_min_in_mutator() { return min_in_mutator_; }
-
-  // TODO(hpayer): remove, should be handled by GCTracer
-  void AddMarkingTime(double marking_time) {
-    marking_time_ += marking_time;
-  }
-
-  double marking_time() const {
-    return marking_time_;
-  }
-
-  // TODO(hpayer): remove, should be handled by GCTracer
-  void AddSweepingTime(double sweeping_time) {
-    sweeping_time_ += sweeping_time;
-  }
-
-  double sweeping_time() const {
-    return sweeping_time_;
-  }
-
-  MarkCompactCollector* mark_compact_collector() {
-    return &mark_compact_collector_;
-  }
-
-  StoreBuffer* store_buffer() {
-    return &store_buffer_;
-  }
-
-  Marking* marking() {
-    return &marking_;
-  }
-
-  IncrementalMarking* incremental_marking() {
-    return &incremental_marking_;
-  }
-
-  ExternalStringTable* external_string_table() {
-    return &external_string_table_;
-  }
-
-  // Returns the current sweep generation.
-  int sweep_generation() {
-    return sweep_generation_;
-  }
-
-  inline Isolate* isolate();
-
-  void CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags);
-  void CallGCEpilogueCallbacks(GCType gc_type, GCCallbackFlags flags);
-
-  inline bool OldGenerationAllocationLimitReached();
-
-  inline void DoScavengeObject(Map* map, HeapObject** slot, HeapObject* obj) {
-    scavenging_visitors_table_.GetVisitor(map)(map, slot, obj);
-  }
-
-  void QueueMemoryChunkForFree(MemoryChunk* chunk);
-  void FreeQueuedChunks();
-
-  int gc_count() const { return gc_count_; }
-
-  // Completely clear the Instanceof cache (to stop it keeping objects alive
-  // around a GC).
-  inline void CompletelyClearInstanceofCache();
-
-  // The roots that have an index less than this are always in old space.
-  static const int kOldSpaceRoots = 0x20;
-
-  uint32_t HashSeed() {
-    uint32_t seed = static_cast<uint32_t>(hash_seed()->value());
-    ASSERT(FLAG_randomize_hashes || seed == 0);
-    return seed;
-  }
-
-  void SetArgumentsAdaptorDeoptPCOffset(int pc_offset) {
-    ASSERT(arguments_adaptor_deopt_pc_offset() == Smi::FromInt(0));
-    set_arguments_adaptor_deopt_pc_offset(Smi::FromInt(pc_offset));
-  }
-
-  void SetConstructStubDeoptPCOffset(int pc_offset) {
-    ASSERT(construct_stub_deopt_pc_offset() == Smi::FromInt(0));
-    set_construct_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
-  }
-
-  void SetGetterStubDeoptPCOffset(int pc_offset) {
-    ASSERT(getter_stub_deopt_pc_offset() == Smi::FromInt(0));
-    set_getter_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
-  }
-
-  void SetSetterStubDeoptPCOffset(int pc_offset) {
-    ASSERT(setter_stub_deopt_pc_offset() == Smi::FromInt(0));
-    set_setter_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
-  }
-
-  // For post mortem debugging.
-  void RememberUnmappedPage(Address page, bool compacted);
-
-  // Global inline caching age: it is incremented on some GCs after context
-  // disposal. We use it to flush inline caches.
-  int global_ic_age() {
-    return global_ic_age_;
-  }
-
-  void AgeInlineCaches() {
-    global_ic_age_ = (global_ic_age_ + 1) & SharedFunctionInfo::ICAgeBits::kMax;
-  }
-
-  bool flush_monomorphic_ics() { return flush_monomorphic_ics_; }
-
-  int64_t amount_of_external_allocated_memory() {
-    return amount_of_external_allocated_memory_;
-  }
-
-  void DeoptMarkedAllocationSites();
-
-  bool MaximumSizeScavenge() {
-    return maximum_size_scavenges_ > 0;
-  }
-
-  bool DeoptMaybeTenuredAllocationSites() {
-    return new_space_.IsAtMaximumCapacity() && maximum_size_scavenges_ == 0;
-  }
-
-  // ObjectStats are kept in two arrays, counts and sizes. Related stats are
-  // stored in a contiguous linear buffer. Stats groups are stored one after
-  // another.
-  enum {
-    FIRST_CODE_KIND_SUB_TYPE = LAST_TYPE + 1,
-    FIRST_FIXED_ARRAY_SUB_TYPE =
-        FIRST_CODE_KIND_SUB_TYPE + Code::NUMBER_OF_KINDS,
-    FIRST_CODE_AGE_SUB_TYPE =
-        FIRST_FIXED_ARRAY_SUB_TYPE + LAST_FIXED_ARRAY_SUB_TYPE + 1,
-    OBJECT_STATS_COUNT = FIRST_CODE_AGE_SUB_TYPE + Code::kCodeAgeCount + 1
-  };
-
-  void RecordObjectStats(InstanceType type, size_t size) {
-    ASSERT(type <= LAST_TYPE);
-    object_counts_[type]++;
-    object_sizes_[type] += size;
-  }
-
-  void RecordCodeSubTypeStats(int code_sub_type, int code_age, size_t size) {
-    int code_sub_type_index = FIRST_CODE_KIND_SUB_TYPE + code_sub_type;
-    int code_age_index =
-        FIRST_CODE_AGE_SUB_TYPE + code_age - Code::kFirstCodeAge;
-    ASSERT(code_sub_type_index >= FIRST_CODE_KIND_SUB_TYPE &&
-           code_sub_type_index < FIRST_CODE_AGE_SUB_TYPE);
-    ASSERT(code_age_index >= FIRST_CODE_AGE_SUB_TYPE &&
-           code_age_index < OBJECT_STATS_COUNT);
-    object_counts_[code_sub_type_index]++;
-    object_sizes_[code_sub_type_index] += size;
-    object_counts_[code_age_index]++;
-    object_sizes_[code_age_index] += size;
-  }
-
-  void RecordFixedArraySubTypeStats(int array_sub_type, size_t size) {
-    ASSERT(array_sub_type <= LAST_FIXED_ARRAY_SUB_TYPE);
-    object_counts_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type]++;
-    object_sizes_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type] += size;
-  }
-
-  void CheckpointObjectStats();
-
-  // We don't use a LockGuard here since we want to lock the heap
-  // only when FLAG_concurrent_recompilation is true.
-  class RelocationLock {
-   public:
-    explicit RelocationLock(Heap* heap) : heap_(heap) {
-      heap_->relocation_mutex_.Lock();
-    }
-
-
-    ~RelocationLock() {
-      heap_->relocation_mutex_.Unlock();
-    }
-
-   private:
-    Heap* heap_;
-  };
-
-  void AddWeakObjectToCodeDependency(Handle<Object> obj,
-                                     Handle<DependentCode> dep);
-
-  DependentCode* LookupWeakObjectToCodeDependency(Handle<Object> obj);
-
-  void InitializeWeakObjectToCodeTable() {
-    set_weak_object_to_code_table(undefined_value());
-  }
-
-  void EnsureWeakObjectToCodeTable();
-
-  static void FatalProcessOutOfMemory(const char* location,
-                                      bool take_snapshot = false);
-
- protected:
-  // Methods made available to tests.
-
-  // Allocates a JS Map in the heap.
-  MUST_USE_RESULT AllocationResult AllocateMap(
-      InstanceType instance_type,
-      int instance_size,
-      ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND);
-
-  // Allocates and initializes a new JavaScript object based on a
-  // constructor.
-  // If allocation_site is non-null, then a memento is emitted after the object
-  // that points to the site.
-  MUST_USE_RESULT AllocationResult AllocateJSObject(
-      JSFunction* constructor,
-      PretenureFlag pretenure = NOT_TENURED,
-      AllocationSite* allocation_site = NULL);
-
-  // Allocates and initializes a new JavaScript object based on a map.
-  // Passing an allocation site means that a memento will be created that
-  // points to the site.
-  MUST_USE_RESULT AllocationResult AllocateJSObjectFromMap(
-      Map* map,
-      PretenureFlag pretenure = NOT_TENURED,
-      bool alloc_props = true,
-      AllocationSite* allocation_site = NULL);
-
-  // Allocated a HeapNumber from value.
-  MUST_USE_RESULT AllocationResult AllocateHeapNumber(
-      double value, PretenureFlag pretenure = NOT_TENURED);
-
-  // Allocate a byte array of the specified length
-  MUST_USE_RESULT AllocationResult AllocateByteArray(
-      int length,
-      PretenureFlag pretenure = NOT_TENURED);
-
-  // Allocates an arguments object - optionally with an elements array.
-  MUST_USE_RESULT AllocationResult AllocateArgumentsObject(
-      Object* callee, int length);
-
-  // Copy the code and scope info part of the code object, but insert
-  // the provided data as the relocation information.
-  MUST_USE_RESULT AllocationResult CopyCode(Code* code,
-                                            Vector<byte> reloc_info);
-
-  MUST_USE_RESULT AllocationResult CopyCode(Code* code);
-
-  // Allocates a fixed array initialized with undefined values
-  MUST_USE_RESULT AllocationResult AllocateFixedArray(
-      int length,
-      PretenureFlag pretenure = NOT_TENURED);
-
- private:
-  Heap();
-
-  // The amount of external memory registered through the API kept alive
-  // by global handles
-  int64_t amount_of_external_allocated_memory_;
-
-  // Caches the amount of external memory registered at the last global gc.
-  int64_t amount_of_external_allocated_memory_at_last_global_gc_;
-
-  // This can be calculated directly from a pointer to the heap; however, it is
-  // more expedient to get at the isolate directly from within Heap methods.
-  Isolate* isolate_;
-
-  Object* roots_[kRootListLength];
-
-  size_t code_range_size_;
-  int reserved_semispace_size_;
-  int max_semi_space_size_;
-  int initial_semispace_size_;
-  intptr_t max_old_generation_size_;
-  intptr_t max_executable_size_;
-  intptr_t maximum_committed_;
-
-  // For keeping track of how much data has survived
-  // scavenge since last new space expansion.
-  int survived_since_last_expansion_;
-
-  // For keeping track on when to flush RegExp code.
-  int sweep_generation_;
-
-  int always_allocate_scope_depth_;
-  int linear_allocation_scope_depth_;
-
-  // For keeping track of context disposals.
-  int contexts_disposed_;
-
-  int global_ic_age_;
-
-  bool flush_monomorphic_ics_;
-
-  int scan_on_scavenge_pages_;
-
-  NewSpace new_space_;
-  OldSpace* old_pointer_space_;
-  OldSpace* old_data_space_;
-  OldSpace* code_space_;
-  MapSpace* map_space_;
-  CellSpace* cell_space_;
-  PropertyCellSpace* property_cell_space_;
-  LargeObjectSpace* lo_space_;
-  HeapState gc_state_;
-  int gc_post_processing_depth_;
-  Address new_space_top_after_last_gc_;
-
-  // Returns the amount of external memory registered since last global gc.
-  int64_t PromotedExternalMemorySize();
-
-  unsigned int ms_count_;  // how many mark-sweep collections happened
-  unsigned int gc_count_;  // how many gc happened
-
-  // For post mortem debugging.
-  static const int kRememberedUnmappedPages = 128;
-  int remembered_unmapped_pages_index_;
-  Address remembered_unmapped_pages_[kRememberedUnmappedPages];
-
-  // Total length of the strings we failed to flatten since the last GC.
-  int unflattened_strings_length_;
-
-#define ROOT_ACCESSOR(type, name, camel_name)                                  \
-  inline void set_##name(type* value) {                                        \
-    /* The deserializer makes use of the fact that these common roots are */   \
-    /* never in new space and never on a page that is being compacted.    */   \
-    ASSERT(k##camel_name##RootIndex >= kOldSpaceRoots || !InNewSpace(value));  \
-    roots_[k##camel_name##RootIndex] = value;                                  \
-  }
-  ROOT_LIST(ROOT_ACCESSOR)
-#undef ROOT_ACCESSOR
-
-#ifdef DEBUG
-  // If the --gc-interval flag is set to a positive value, this
-  // variable holds the value indicating the number of allocations
-  // remain until the next failure and garbage collection.
-  int allocation_timeout_;
-#endif  // DEBUG
-
-  // Limit that triggers a global GC on the next (normally caused) GC.  This
-  // is checked when we have already decided to do a GC to help determine
-  // which collector to invoke, before expanding a paged space in the old
-  // generation and on every allocation in large object space.
-  intptr_t old_generation_allocation_limit_;
-
-  // Indicates that an allocation has failed in the old generation since the
-  // last GC.
-  bool old_gen_exhausted_;
-
-  // Indicates that inline bump-pointer allocation has been globally disabled
-  // for all spaces. This is used to disable allocations in generated code.
-  bool inline_allocation_disabled_;
-
-  // Weak list heads, threaded through the objects.
-  // List heads are initilized lazily and contain the undefined_value at start.
-  Object* native_contexts_list_;
-  Object* array_buffers_list_;
-  Object* allocation_sites_list_;
-
-  // WeakHashTable that maps objects embedded in optimized code to dependent
-  // code list. It is initilized lazily and contains the undefined_value at
-  // start.
-  Object* weak_object_to_code_table_;
-
-  // List of encountered weak collections (JSWeakMap and JSWeakSet) during
-  // marking. It is initialized during marking, destroyed after marking and
-  // contains Smi(0) while marking is not active.
-  Object* encountered_weak_collections_;
-
-  StoreBufferRebuilder store_buffer_rebuilder_;
-
-  struct StringTypeTable {
-    InstanceType type;
-    int size;
-    RootListIndex index;
-  };
-
-  struct ConstantStringTable {
-    const char* contents;
-    RootListIndex index;
-  };
-
-  struct StructTable {
-    InstanceType type;
-    int size;
-    RootListIndex index;
-  };
-
-  static const StringTypeTable string_type_table[];
-  static const ConstantStringTable constant_string_table[];
-  static const StructTable struct_table[];
-
-  // The special hidden string which is an empty string, but does not match
-  // any string when looked up in properties.
-  String* hidden_string_;
-
-  // GC callback function, called before and after mark-compact GC.
-  // Allocations in the callback function are disallowed.
-  struct GCPrologueCallbackPair {
-    GCPrologueCallbackPair(v8::Isolate::GCPrologueCallback callback,
-                           GCType gc_type,
-                           bool pass_isolate)
-        : callback(callback), gc_type(gc_type), pass_isolate_(pass_isolate) {
-    }
-    bool operator==(const GCPrologueCallbackPair& pair) const {
-      return pair.callback == callback;
-    }
-    v8::Isolate::GCPrologueCallback callback;
-    GCType gc_type;
-    // TODO(dcarney): remove variable
-    bool pass_isolate_;
-  };
-  List<GCPrologueCallbackPair> gc_prologue_callbacks_;
-
-  struct GCEpilogueCallbackPair {
-    GCEpilogueCallbackPair(v8::Isolate::GCPrologueCallback callback,
-                           GCType gc_type,
-                           bool pass_isolate)
-        : callback(callback), gc_type(gc_type), pass_isolate_(pass_isolate) {
-    }
-    bool operator==(const GCEpilogueCallbackPair& pair) const {
-      return pair.callback == callback;
-    }
-    v8::Isolate::GCPrologueCallback callback;
-    GCType gc_type;
-    // TODO(dcarney): remove variable
-    bool pass_isolate_;
-  };
-  List<GCEpilogueCallbackPair> gc_epilogue_callbacks_;
-
-  // Support for computing object sizes during GC.
-  HeapObjectCallback gc_safe_size_of_old_object_;
-  static int GcSafeSizeOfOldObject(HeapObject* object);
-
-  // Update the GC state. Called from the mark-compact collector.
-  void MarkMapPointersAsEncoded(bool encoded) {
-    ASSERT(!encoded);
-    gc_safe_size_of_old_object_ = &GcSafeSizeOfOldObject;
-  }
-
-  // Code that should be run before and after each GC.  Includes some
-  // reporting/verification activities when compiled with DEBUG set.
-  void GarbageCollectionPrologue();
-  void GarbageCollectionEpilogue();
-
-  // Pretenuring decisions are made based on feedback collected during new
-  // space evacuation. Note that between feedback collection and calling this
-  // method object in old space must not move.
-  // Right now we only process pretenuring feedback in high promotion mode.
-  void ProcessPretenuringFeedback();
-
-  // Checks whether a global GC is necessary
-  GarbageCollector SelectGarbageCollector(AllocationSpace space,
-                                          const char** reason);
-
-  // Make sure there is a filler value behind the top of the new space
-  // so that the GC does not confuse some unintialized/stale memory
-  // with the allocation memento of the object at the top
-  void EnsureFillerObjectAtTop();
-
-  // Ensure that we have swept all spaces in such a way that we can iterate
-  // over all objects.  May cause a GC.
-  void MakeHeapIterable();
-
-  // Performs garbage collection operation.
-  // Returns whether there is a chance that another major GC could
-  // collect more garbage.
-  bool CollectGarbage(
-      GarbageCollector collector,
-      const char* gc_reason,
-      const char* collector_reason,
-      const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
-
-  // Performs garbage collection
-  // Returns whether there is a chance another major GC could
-  // collect more garbage.
-  bool PerformGarbageCollection(
-      GarbageCollector collector,
-      GCTracer* tracer,
-      const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
-
-  inline void UpdateOldSpaceLimits();
-
-  // Selects the proper allocation space depending on the given object
-  // size, pretenuring decision, and preferred old-space.
-  static AllocationSpace SelectSpace(int object_size,
-                                     AllocationSpace preferred_old_space,
-                                     PretenureFlag pretenure) {
-    ASSERT(preferred_old_space == OLD_POINTER_SPACE ||
-           preferred_old_space == OLD_DATA_SPACE);
-    if (object_size > Page::kMaxRegularHeapObjectSize) return LO_SPACE;
-    return (pretenure == TENURED) ? preferred_old_space : NEW_SPACE;
-  }
-
-  // Allocate an uninitialized object.  The memory is non-executable if the
-  // hardware and OS allow.  This is the single choke-point for allocations
-  // performed by the runtime and should not be bypassed (to extend this to
-  // inlined allocations, use the Heap::DisableInlineAllocation() support).
-  MUST_USE_RESULT inline AllocationResult AllocateRaw(
-      int size_in_bytes,
-      AllocationSpace space,
-      AllocationSpace retry_space);
-
-  // Allocates a heap object based on the map.
-  MUST_USE_RESULT AllocationResult Allocate(
-      Map* map,
-      AllocationSpace space,
-      AllocationSite* allocation_site = NULL);
-
-  // Allocates a partial map for bootstrapping.
-  MUST_USE_RESULT AllocationResult AllocatePartialMap(
-      InstanceType instance_type,
-      int instance_size);
-
-  // Initializes a JSObject based on its map.
-  void InitializeJSObjectFromMap(JSObject* obj,
-                                 FixedArray* properties,
-                                 Map* map);
-  void InitializeAllocationMemento(AllocationMemento* memento,
-                                   AllocationSite* allocation_site);
-
-  // Allocate a block of memory in the given space (filled with a filler).
-  // Used as a fall-back for generated code when the space is full.
-  MUST_USE_RESULT AllocationResult AllocateFillerObject(int size,
-                                                    bool double_align,
-                                                    AllocationSpace space);
-
-  // Allocate an uninitialized fixed array.
-  MUST_USE_RESULT AllocationResult AllocateRawFixedArray(
-      int length, PretenureFlag pretenure);
-
-  // Allocate an uninitialized fixed double array.
-  MUST_USE_RESULT AllocationResult AllocateRawFixedDoubleArray(
-      int length, PretenureFlag pretenure);
-
-  // Allocate an initialized fixed array with the given filler value.
-  MUST_USE_RESULT AllocationResult AllocateFixedArrayWithFiller(
-      int length, PretenureFlag pretenure, Object* filler);
-
-  // Allocate and partially initializes a String.  There are two String
-  // encodings: ASCII and two byte.  These functions allocate a string of the
-  // given length and set its map and length fields.  The characters of the
-  // string are uninitialized.
-  MUST_USE_RESULT AllocationResult AllocateRawOneByteString(
-      int length, PretenureFlag pretenure);
-  MUST_USE_RESULT AllocationResult AllocateRawTwoByteString(
-      int length, PretenureFlag pretenure);
-
-  bool CreateInitialMaps();
-  void CreateInitialObjects();
-
-  // Allocates an internalized string in old space based on the character
-  // stream.
-  MUST_USE_RESULT inline AllocationResult AllocateInternalizedStringFromUtf8(
-      Vector<const char> str,
-      int chars,
-      uint32_t hash_field);
-
-  MUST_USE_RESULT inline AllocationResult AllocateOneByteInternalizedString(
-        Vector<const uint8_t> str,
-        uint32_t hash_field);
-
-  MUST_USE_RESULT inline AllocationResult AllocateTwoByteInternalizedString(
-        Vector<const uc16> str,
-        uint32_t hash_field);
-
-  template<bool is_one_byte, typename T>
-  MUST_USE_RESULT AllocationResult AllocateInternalizedStringImpl(
-      T t, int chars, uint32_t hash_field);
-
-  template<typename T>
-  MUST_USE_RESULT inline AllocationResult AllocateInternalizedStringImpl(
-      T t, int chars, uint32_t hash_field);
-
-  // Allocates an uninitialized fixed array. It must be filled by the caller.
-  MUST_USE_RESULT AllocationResult AllocateUninitializedFixedArray(int length);
-
-  // Make a copy of src and return it. Returns
-  // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
-  MUST_USE_RESULT inline AllocationResult CopyFixedArray(FixedArray* src);
-
-  // Make a copy of src, set the map, and return the copy. Returns
-  // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
-  MUST_USE_RESULT AllocationResult CopyFixedArrayWithMap(FixedArray* src,
-                                                         Map* map);
-
-  // Make a copy of src and return it. Returns
-  // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
-  MUST_USE_RESULT inline AllocationResult CopyFixedDoubleArray(
-      FixedDoubleArray* src);
-
-  // Make a copy of src and return it. Returns
-  // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
-  MUST_USE_RESULT inline AllocationResult CopyConstantPoolArray(
-      ConstantPoolArray* src);
-
-
-  // Computes a single character string where the character has code.
-  // A cache is used for ASCII codes.
-  MUST_USE_RESULT AllocationResult LookupSingleCharacterStringFromCode(
-      uint16_t code);
-
-  // Allocate a symbol in old space.
-  MUST_USE_RESULT AllocationResult AllocateSymbol();
-
-  // Make a copy of src, set the map, and return the copy.
-  MUST_USE_RESULT AllocationResult CopyConstantPoolArrayWithMap(
-      ConstantPoolArray* src, Map* map);
-
-  MUST_USE_RESULT AllocationResult AllocateConstantPoolArray(
-      const ConstantPoolArray::NumberOfEntries& small);
-
-  MUST_USE_RESULT AllocationResult AllocateExtendedConstantPoolArray(
-      const ConstantPoolArray::NumberOfEntries& small,
-      const ConstantPoolArray::NumberOfEntries& extended);
-
-  // Allocates an external array of the specified length and type.
-  MUST_USE_RESULT AllocationResult AllocateExternalArray(
-      int length,
-      ExternalArrayType array_type,
-      void* external_pointer,
-      PretenureFlag pretenure);
-
-  // Allocates a fixed typed array of the specified length and type.
-  MUST_USE_RESULT AllocationResult AllocateFixedTypedArray(
-      int length,
-      ExternalArrayType array_type,
-      PretenureFlag pretenure);
-
-  // Make a copy of src and return it.
-  MUST_USE_RESULT AllocationResult CopyAndTenureFixedCOWArray(FixedArray* src);
-
-  // Make a copy of src, set the map, and return the copy.
-  MUST_USE_RESULT AllocationResult CopyFixedDoubleArrayWithMap(
-      FixedDoubleArray* src, Map* map);
-
-  // Allocates a fixed double array with uninitialized values. Returns
-  MUST_USE_RESULT AllocationResult AllocateUninitializedFixedDoubleArray(
-      int length,
-      PretenureFlag pretenure = NOT_TENURED);
-
-  // These five Create*EntryStub functions are here and forced to not be inlined
-  // because of a gcc-4.4 bug that assigns wrong vtable entries.
-  NO_INLINE(void CreateJSEntryStub());
-  NO_INLINE(void CreateJSConstructEntryStub());
-
-  void CreateFixedStubs();
-
-  // Allocate empty fixed array.
-  MUST_USE_RESULT AllocationResult AllocateEmptyFixedArray();
-
-  // Allocate empty external array of given type.
-  MUST_USE_RESULT AllocationResult AllocateEmptyExternalArray(
-      ExternalArrayType array_type);
-
-  // Allocate empty fixed typed array of given type.
-  MUST_USE_RESULT AllocationResult AllocateEmptyFixedTypedArray(
-      ExternalArrayType array_type);
-
-  // Allocate empty constant pool array.
-  MUST_USE_RESULT AllocationResult AllocateEmptyConstantPoolArray();
-
-  // Allocate a tenured simple cell.
-  MUST_USE_RESULT AllocationResult AllocateCell(Object* value);
-
-  // Allocate a tenured JS global property cell initialized with the hole.
-  MUST_USE_RESULT AllocationResult AllocatePropertyCell();
-
-  // Allocates a new utility object in the old generation.
-  MUST_USE_RESULT AllocationResult AllocateStruct(InstanceType type);
-
-  // Allocates a new foreign object.
-  MUST_USE_RESULT AllocationResult AllocateForeign(
-      Address address, PretenureFlag pretenure = NOT_TENURED);
-
-  MUST_USE_RESULT AllocationResult AllocateCode(int object_size,
-                                                bool immovable);
-
-  MUST_USE_RESULT AllocationResult InternalizeStringWithKey(HashTableKey* key);
-
-  MUST_USE_RESULT AllocationResult InternalizeString(String* str);
-
-  // Performs a minor collection in new generation.
-  void Scavenge();
-
-  // Commits from space if it is uncommitted.
-  void EnsureFromSpaceIsCommitted();
-
-  // Uncommit unused semi space.
-  bool UncommitFromSpace() { return new_space_.UncommitFromSpace(); }
-
-  // Fill in bogus values in from space
-  void ZapFromSpace();
-
-  static String* UpdateNewSpaceReferenceInExternalStringTableEntry(
-      Heap* heap,
-      Object** pointer);
-
-  Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front);
-  static void ScavengeStoreBufferCallback(Heap* heap,
-                                          MemoryChunk* page,
-                                          StoreBufferEvent event);
-
-  // Performs a major collection in the whole heap.
-  void MarkCompact(GCTracer* tracer);
-
-  // Code to be run before and after mark-compact.
-  void MarkCompactPrologue();
-
-  void ProcessNativeContexts(WeakObjectRetainer* retainer);
-  void ProcessArrayBuffers(WeakObjectRetainer* retainer);
-  void ProcessAllocationSites(WeakObjectRetainer* retainer);
-
-  // Deopts all code that contains allocation instruction which are tenured or
-  // not tenured. Moreover it clears the pretenuring allocation site statistics.
-  void ResetAllAllocationSitesDependentCode(PretenureFlag flag);
-
-  // Evaluates local pretenuring for the old space and calls
-  // ResetAllTenuredAllocationSitesDependentCode if too many objects died in
-  // the old space.
-  void EvaluateOldSpaceLocalPretenuring(uint64_t size_of_objects_before_gc);
-
-  // Called on heap tear-down.
-  void TearDownArrayBuffers();
-
-  // Record statistics before and after garbage collection.
-  void ReportStatisticsBeforeGC();
-  void ReportStatisticsAfterGC();
-
-  // Slow part of scavenge object.
-  static void ScavengeObjectSlow(HeapObject** p, HeapObject* object);
-
-  // Total RegExp code ever generated
-  double total_regexp_code_generated_;
-
-  GCTracer* tracer_;
-
-  // Creates and installs the full-sized number string cache.
-  int FullSizeNumberStringCacheLength();
-  // Flush the number to string cache.
-  void FlushNumberStringCache();
-
-  // Sets used allocation sites entries to undefined.
-  void FlushAllocationSitesScratchpad();
-
-  // Initializes the allocation sites scratchpad with undefined values.
-  void InitializeAllocationSitesScratchpad();
-
-  // Adds an allocation site to the scratchpad if there is space left.
-  void AddAllocationSiteToScratchpad(AllocationSite* site,
-                                     ScratchpadSlotMode mode);
-
-  void UpdateSurvivalStatistics(int start_new_space_size);
-
-  static const int kYoungSurvivalRateHighThreshold = 90;
-  static const int kYoungSurvivalRateAllowedDeviation = 15;
-
-  static const int kOldSurvivalRateLowThreshold = 10;
-
-  int high_survival_rate_period_length_;
-  intptr_t promoted_objects_size_;
-  double promotion_rate_;
-  intptr_t semi_space_copied_object_size_;
-  double semi_space_copied_rate_;
-
-  // This is the pretenuring trigger for allocation sites that are in maybe
-  // tenure state. When we switched to the maximum new space size we deoptimize
-  // the code that belongs to the allocation site and derive the lifetime
-  // of the allocation site.
-  unsigned int maximum_size_scavenges_;
-
-  // TODO(hpayer): Allocation site pretenuring may make this method obsolete.
-  // Re-visit incremental marking heuristics.
-  bool IsHighSurvivalRate() {
-    return high_survival_rate_period_length_ > 0;
-  }
-
-  void SelectScavengingVisitorsTable();
-
-  void StartIdleRound() {
-    mark_sweeps_since_idle_round_started_ = 0;
-  }
-
-  void FinishIdleRound() {
-    mark_sweeps_since_idle_round_started_ = kMaxMarkSweepsInIdleRound;
-    scavenges_since_last_idle_round_ = 0;
-  }
-
-  bool EnoughGarbageSinceLastIdleRound() {
-    return (scavenges_since_last_idle_round_ >= kIdleScavengeThreshold);
-  }
-
-  // Estimates how many milliseconds a Mark-Sweep would take to complete.
-  // In idle notification handler we assume that this function will return:
-  // - a number less than 10 for small heaps, which are less than 8Mb.
-  // - a number greater than 10 for large heaps, which are greater than 32Mb.
-  int TimeMarkSweepWouldTakeInMs() {
-    // Rough estimate of how many megabytes of heap can be processed in 1 ms.
-    static const int kMbPerMs = 2;
-
-    int heap_size_mb = static_cast<int>(SizeOfObjects() / MB);
-    return heap_size_mb / kMbPerMs;
-  }
-
-  // Returns true if no more GC work is left.
-  bool IdleGlobalGC();
-
-  void AdvanceIdleIncrementalMarking(intptr_t step_size);
-
-  void ClearObjectStats(bool clear_last_time_stats = false);
-
-  void set_weak_object_to_code_table(Object* value) {
-    ASSERT(!InNewSpace(value));
-    weak_object_to_code_table_ = value;
-  }
-
-  Object** weak_object_to_code_table_address() {
-    return &weak_object_to_code_table_;
-  }
-
-  static const int kInitialStringTableSize = 2048;
-  static const int kInitialEvalCacheSize = 64;
-  static const int kInitialNumberStringCacheSize = 256;
-
-  // Object counts and used memory by InstanceType
-  size_t object_counts_[OBJECT_STATS_COUNT];
-  size_t object_counts_last_time_[OBJECT_STATS_COUNT];
-  size_t object_sizes_[OBJECT_STATS_COUNT];
-  size_t object_sizes_last_time_[OBJECT_STATS_COUNT];
-
-  // Maximum GC pause.
-  double max_gc_pause_;
-
-  // Total time spent in GC.
-  double total_gc_time_ms_;
-
-  // Maximum size of objects alive after GC.
-  intptr_t max_alive_after_gc_;
-
-  // Minimal interval between two subsequent collections.
-  double min_in_mutator_;
-
-  // Size of objects alive after last GC.
-  intptr_t alive_after_last_gc_;
-
-  double last_gc_end_timestamp_;
-
-  // Cumulative GC time spent in marking
-  double marking_time_;
-
-  // Cumulative GC time spent in sweeping
-  double sweeping_time_;
-
-  MarkCompactCollector mark_compact_collector_;
-
-  StoreBuffer store_buffer_;
-
-  Marking marking_;
-
-  IncrementalMarking incremental_marking_;
-
-  int number_idle_notifications_;
-  unsigned int last_idle_notification_gc_count_;
-  bool last_idle_notification_gc_count_init_;
-
-  int mark_sweeps_since_idle_round_started_;
-  unsigned int gc_count_at_last_idle_gc_;
-  int scavenges_since_last_idle_round_;
-
-  // These two counters are monotomically increasing and never reset.
-  size_t full_codegen_bytes_generated_;
-  size_t crankshaft_codegen_bytes_generated_;
-
-  // If the --deopt_every_n_garbage_collections flag is set to a positive value,
-  // this variable holds the number of garbage collections since the last
-  // deoptimization triggered by garbage collection.
-  int gcs_since_last_deopt_;
-
-#ifdef VERIFY_HEAP
-  int no_weak_object_verification_scope_depth_;
-#endif
-
-  static const int kAllocationSiteScratchpadSize = 256;
-  int allocation_sites_scratchpad_length_;
-
-  static const int kMaxMarkSweepsInIdleRound = 7;
-  static const int kIdleScavengeThreshold = 5;
-
-  // Shared state read by the scavenge collector and set by ScavengeObject.
-  PromotionQueue promotion_queue_;
-
-  // Flag is set when the heap has been configured.  The heap can be repeatedly
-  // configured through the API until it is set up.
-  bool configured_;
-
-  ExternalStringTable external_string_table_;
-
-  VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_;
-
-  MemoryChunk* chunks_queued_for_free_;
-
-  Mutex relocation_mutex_;
-
-  int gc_callbacks_depth_;
-
-  friend class AlwaysAllocateScope;
-  friend class Factory;
-  friend class GCCallbacksScope;
-  friend class GCTracer;
-  friend class HeapIterator;
-  friend class Isolate;
-  friend class MarkCompactCollector;
-  friend class MarkCompactMarkingVisitor;
-  friend class MapCompact;
-#ifdef VERIFY_HEAP
-  friend class NoWeakObjectVerificationScope;
-#endif
-  friend class Page;
-
-  DISALLOW_COPY_AND_ASSIGN(Heap);
-};
-
-
-class HeapStats {
- public:
-  static const int kStartMarker = 0xDECADE00;
-  static const int kEndMarker = 0xDECADE01;
-
-  int* start_marker;                    //  0
-  int* new_space_size;                  //  1
-  int* new_space_capacity;              //  2
-  intptr_t* old_pointer_space_size;          //  3
-  intptr_t* old_pointer_space_capacity;      //  4
-  intptr_t* old_data_space_size;             //  5
-  intptr_t* old_data_space_capacity;         //  6
-  intptr_t* code_space_size;                 //  7
-  intptr_t* code_space_capacity;             //  8
-  intptr_t* map_space_size;                  //  9
-  intptr_t* map_space_capacity;              // 10
-  intptr_t* cell_space_size;                 // 11
-  intptr_t* cell_space_capacity;             // 12
-  intptr_t* lo_space_size;                   // 13
-  int* global_handle_count;             // 14
-  int* weak_global_handle_count;        // 15
-  int* pending_global_handle_count;     // 16
-  int* near_death_global_handle_count;  // 17
-  int* free_global_handle_count;        // 18
-  intptr_t* memory_allocator_size;           // 19
-  intptr_t* memory_allocator_capacity;       // 20
-  int* objects_per_type;                // 21
-  int* size_per_type;                   // 22
-  int* os_error;                        // 23
-  int* end_marker;                      // 24
-  intptr_t* property_cell_space_size;   // 25
-  intptr_t* property_cell_space_capacity;    // 26
-};
-
-
-class AlwaysAllocateScope {
- public:
-  explicit inline AlwaysAllocateScope(Isolate* isolate);
-  inline ~AlwaysAllocateScope();
-
- private:
-  // Implicitly disable artificial allocation failures.
-  Heap* heap_;
-  DisallowAllocationFailure daf_;
-};
-
-
-#ifdef VERIFY_HEAP
-class NoWeakObjectVerificationScope {
- public:
-  inline NoWeakObjectVerificationScope();
-  inline ~NoWeakObjectVerificationScope();
-};
-#endif
-
-
-class GCCallbacksScope {
- public:
-  explicit inline GCCallbacksScope(Heap* heap);
-  inline ~GCCallbacksScope();
-
-  inline bool CheckReenter();
-
- private:
-  Heap* heap_;
-};
-
-
-// Visitor class to verify interior pointers in spaces that do not contain
-// or care about intergenerational references. All heap object pointers have to
-// point into the heap to a location that has a map pointer at its first word.
-// Caveat: Heap::Contains is an approximation because it can return true for
-// objects in a heap space but above the allocation pointer.
-class VerifyPointersVisitor: public ObjectVisitor {
- public:
-  inline void VisitPointers(Object** start, Object** end);
-};
-
-
-// Verify that all objects are Smis.
-class VerifySmisVisitor: public ObjectVisitor {
- public:
-  inline void VisitPointers(Object** start, Object** end);
-};
-
-
-// Space iterator for iterating over all spaces of the heap.  Returns each space
-// in turn, and null when it is done.
-class AllSpaces BASE_EMBEDDED {
- public:
-  explicit AllSpaces(Heap* heap) : heap_(heap), counter_(FIRST_SPACE) {}
-  Space* next();
- private:
-  Heap* heap_;
-  int counter_;
-};
-
-
-// Space iterator for iterating over all old spaces of the heap: Old pointer
-// space, old data space and code space.  Returns each space in turn, and null
-// when it is done.
-class OldSpaces BASE_EMBEDDED {
- public:
-  explicit OldSpaces(Heap* heap) : heap_(heap), counter_(OLD_POINTER_SPACE) {}
-  OldSpace* next();
- private:
-  Heap* heap_;
-  int counter_;
-};
-
-
-// Space iterator for iterating over all the paged spaces of the heap: Map
-// space, old pointer space, old data space, code space and cell space.  Returns
-// each space in turn, and null when it is done.
-class PagedSpaces BASE_EMBEDDED {
- public:
-  explicit PagedSpaces(Heap* heap) : heap_(heap), counter_(OLD_POINTER_SPACE) {}
-  PagedSpace* next();
- private:
-  Heap* heap_;
-  int counter_;
-};
-
-
-// Space iterator for iterating over all spaces of the heap.
-// For each space an object iterator is provided. The deallocation of the
-// returned object iterators is handled by the space iterator.
-class SpaceIterator : public Malloced {
- public:
-  explicit SpaceIterator(Heap* heap);
-  SpaceIterator(Heap* heap, HeapObjectCallback size_func);
-  virtual ~SpaceIterator();
-
-  bool has_next();
-  ObjectIterator* next();
-
- private:
-  ObjectIterator* CreateIterator();
-
-  Heap* heap_;
-  int current_space_;  // from enum AllocationSpace.
-  ObjectIterator* iterator_;  // object iterator for the current space.
-  HeapObjectCallback size_func_;
-};
-
-
-// A HeapIterator provides iteration over the whole heap. It
-// aggregates the specific iterators for the different spaces as
-// these can only iterate over one space only.
-//
-// HeapIterator ensures there is no allocation during its lifetime
-// (using an embedded DisallowHeapAllocation instance).
-//
-// HeapIterator can skip free list nodes (that is, de-allocated heap
-// objects that still remain in the heap). As implementation of free
-// nodes filtering uses GC marks, it can't be used during MS/MC GC
-// phases. Also, it is forbidden to interrupt iteration in this mode,
-// as this will leave heap objects marked (and thus, unusable).
-class HeapObjectsFilter;
-
-class HeapIterator BASE_EMBEDDED {
- public:
-  enum HeapObjectsFiltering {
-    kNoFiltering,
-    kFilterUnreachable
-  };
-
-  explicit HeapIterator(Heap* heap);
-  HeapIterator(Heap* heap, HeapObjectsFiltering filtering);
-  ~HeapIterator();
-
-  HeapObject* next();
-  void reset();
-
- private:
-  struct MakeHeapIterableHelper {
-    explicit MakeHeapIterableHelper(Heap* heap) { heap->MakeHeapIterable(); }
-  };
-
-  // Perform the initialization.
-  void Init();
-  // Perform all necessary shutdown (destruction) work.
-  void Shutdown();
-  HeapObject* NextObject();
-
-  MakeHeapIterableHelper make_heap_iterable_helper_;
-  DisallowHeapAllocation no_heap_allocation_;
-  Heap* heap_;
-  HeapObjectsFiltering filtering_;
-  HeapObjectsFilter* filter_;
-  // Space iterator for iterating all the spaces.
-  SpaceIterator* space_iterator_;
-  // Object iterator for the space currently being iterated.
-  ObjectIterator* object_iterator_;
-};
-
-
-// Cache for mapping (map, property name) into field offset.
-// Cleared at startup and prior to mark sweep collection.
-class KeyedLookupCache {
- public:
-  // Lookup field offset for (map, name). If absent, -1 is returned.
-  int Lookup(Handle<Map> map, Handle<Name> name);
-
-  // Update an element in the cache.
-  void Update(Handle<Map> map, Handle<Name> name, int field_offset);
-
-  // Clear the cache.
-  void Clear();
-
-  static const int kLength = 256;
-  static const int kCapacityMask = kLength - 1;
-  static const int kMapHashShift = 5;
-  static const int kHashMask = -4;  // Zero the last two bits.
-  static const int kEntriesPerBucket = 4;
-  static const int kEntryLength = 2;
-  static const int kMapIndex = 0;
-  static const int kKeyIndex = 1;
-  static const int kNotFound = -1;
-
-  // kEntriesPerBucket should be a power of 2.
-  STATIC_ASSERT((kEntriesPerBucket & (kEntriesPerBucket - 1)) == 0);
-  STATIC_ASSERT(kEntriesPerBucket == -kHashMask);
-
- private:
-  KeyedLookupCache() {
-    for (int i = 0; i < kLength; ++i) {
-      keys_[i].map = NULL;
-      keys_[i].name = NULL;
-      field_offsets_[i] = kNotFound;
-    }
-  }
-
-  static inline int Hash(Handle<Map> map, Handle<Name> name);
-
-  // Get the address of the keys and field_offsets arrays.  Used in
-  // generated code to perform cache lookups.
-  Address keys_address() {
-    return reinterpret_cast<Address>(&keys_);
-  }
-
-  Address field_offsets_address() {
-    return reinterpret_cast<Address>(&field_offsets_);
-  }
-
-  struct Key {
-    Map* map;
-    Name* name;
-  };
-
-  Key keys_[kLength];
-  int field_offsets_[kLength];
-
-  friend class ExternalReference;
-  friend class Isolate;
-  DISALLOW_COPY_AND_ASSIGN(KeyedLookupCache);
-};
-
-
-// Cache for mapping (map, property name) into descriptor index.
-// The cache contains both positive and negative results.
-// Descriptor index equals kNotFound means the property is absent.
-// Cleared at startup and prior to any gc.
-class DescriptorLookupCache {
- public:
-  // Lookup descriptor index for (map, name).
-  // If absent, kAbsent is returned.
-  int Lookup(Map* source, Name* name) {
-    if (!name->IsUniqueName()) return kAbsent;
-    int index = Hash(source, name);
-    Key& key = keys_[index];
-    if ((key.source == source) && (key.name == name)) return results_[index];
-    return kAbsent;
-  }
-
-  // Update an element in the cache.
-  void Update(Map* source, Name* name, int result) {
-    ASSERT(result != kAbsent);
-    if (name->IsUniqueName()) {
-      int index = Hash(source, name);
-      Key& key = keys_[index];
-      key.source = source;
-      key.name = name;
-      results_[index] = result;
-    }
-  }
-
-  // Clear the cache.
-  void Clear();
-
-  static const int kAbsent = -2;
-
- private:
-  DescriptorLookupCache() {
-    for (int i = 0; i < kLength; ++i) {
-      keys_[i].source = NULL;
-      keys_[i].name = NULL;
-      results_[i] = kAbsent;
-    }
-  }
-
-  static int Hash(Object* source, Name* name) {
-    // Uses only lower 32 bits if pointers are larger.
-    uint32_t source_hash =
-        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(source))
-            >> kPointerSizeLog2;
-    uint32_t name_hash =
-        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name))
-            >> kPointerSizeLog2;
-    return (source_hash ^ name_hash) % kLength;
-  }
-
-  static const int kLength = 64;
-  struct Key {
-    Map* source;
-    Name* name;
-  };
-
-  Key keys_[kLength];
-  int results_[kLength];
-
-  friend class Isolate;
-  DISALLOW_COPY_AND_ASSIGN(DescriptorLookupCache);
-};
-
-
-// GCTracer collects and prints ONE line after each garbage collector
-// invocation IFF --trace_gc is used.
-
-class GCTracer BASE_EMBEDDED {
- public:
-  class Scope BASE_EMBEDDED {
-   public:
-    enum ScopeId {
-      EXTERNAL,
-      MC_MARK,
-      MC_SWEEP,
-      MC_SWEEP_NEWSPACE,
-      MC_SWEEP_OLDSPACE,
-      MC_EVACUATE_PAGES,
-      MC_UPDATE_NEW_TO_NEW_POINTERS,
-      MC_UPDATE_ROOT_TO_NEW_POINTERS,
-      MC_UPDATE_OLD_TO_NEW_POINTERS,
-      MC_UPDATE_POINTERS_TO_EVACUATED,
-      MC_UPDATE_POINTERS_BETWEEN_EVACUATED,
-      MC_UPDATE_MISC_POINTERS,
-      MC_WEAKCOLLECTION_PROCESS,
-      MC_WEAKCOLLECTION_CLEAR,
-      MC_FLUSH_CODE,
-      kNumberOfScopes
-    };
-
-    Scope(GCTracer* tracer, ScopeId scope)
-        : tracer_(tracer),
-        scope_(scope) {
-      start_time_ = OS::TimeCurrentMillis();
-    }
-
-    ~Scope() {
-      ASSERT(scope_ < kNumberOfScopes);  // scope_ is unsigned.
-      tracer_->scopes_[scope_] += OS::TimeCurrentMillis() - start_time_;
-    }
-
-   private:
-    GCTracer* tracer_;
-    ScopeId scope_;
-    double start_time_;
-  };
-
-  explicit GCTracer(Heap* heap,
-                    const char* gc_reason,
-                    const char* collector_reason);
-  ~GCTracer();
-
-  // Sets the collector.
-  void set_collector(GarbageCollector collector) { collector_ = collector; }
-
-  // Sets the GC count.
-  void set_gc_count(unsigned int count) { gc_count_ = count; }
-
-  // Sets the full GC count.
-  void set_full_gc_count(int count) { full_gc_count_ = count; }
-
-  void increment_nodes_died_in_new_space() {
-    nodes_died_in_new_space_++;
-  }
-
-  void increment_nodes_copied_in_new_space() {
-    nodes_copied_in_new_space_++;
-  }
-
-  void increment_nodes_promoted() {
-    nodes_promoted_++;
-  }
-
- private:
-  // Returns a string matching the collector.
-  const char* CollectorString();
-
-  // Returns size of object in heap (in MB).
-  inline double SizeOfHeapObjects();
-
-  // Timestamp set in the constructor.
-  double start_time_;
-
-  // Size of objects in heap set in constructor.
-  intptr_t start_object_size_;
-
-  // Size of memory allocated from OS set in constructor.
-  intptr_t start_memory_size_;
-
-  // Type of collector.
-  GarbageCollector collector_;
-
-  // A count (including this one, e.g. the first collection is 1) of the
-  // number of garbage collections.
-  unsigned int gc_count_;
-
-  // A count (including this one) of the number of full garbage collections.
-  int full_gc_count_;
-
-  // Amounts of time spent in different scopes during GC.
-  double scopes_[Scope::kNumberOfScopes];
-
-  // Total amount of space either wasted or contained in one of free lists
-  // before the current GC.
-  intptr_t in_free_list_or_wasted_before_gc_;
-
-  // Difference between space used in the heap at the beginning of the current
-  // collection and the end of the previous collection.
-  intptr_t allocated_since_last_gc_;
-
-  // Amount of time spent in mutator that is time elapsed between end of the
-  // previous collection and the beginning of the current one.
-  double spent_in_mutator_;
-
-  // Number of died nodes in the new space.
-  int nodes_died_in_new_space_;
-
-  // Number of copied nodes to the new space.
-  int nodes_copied_in_new_space_;
-
-  // Number of promoted nodes to the old space.
-  int nodes_promoted_;
-
-  // Incremental marking steps counters.
-  int steps_count_;
-  double steps_took_;
-  double longest_step_;
-  int steps_count_since_last_gc_;
-  double steps_took_since_last_gc_;
-
-  Heap* heap_;
-
-  const char* gc_reason_;
-  const char* collector_reason_;
-};
-
-
-class RegExpResultsCache {
- public:
-  enum ResultsCacheType { REGEXP_MULTIPLE_INDICES, STRING_SPLIT_SUBSTRINGS };
-
-  // Attempt to retrieve a cached result.  On failure, 0 is returned as a Smi.
-  // On success, the returned result is guaranteed to be a COW-array.
-  static Object* Lookup(Heap* heap,
-                        String* key_string,
-                        Object* key_pattern,
-                        ResultsCacheType type);
-  // Attempt to add value_array to the cache specified by type.  On success,
-  // value_array is turned into a COW-array.
-  static void Enter(Isolate* isolate,
-                    Handle<String> key_string,
-                    Handle<Object> key_pattern,
-                    Handle<FixedArray> value_array,
-                    ResultsCacheType type);
-  static void Clear(FixedArray* cache);
-  static const int kRegExpResultsCacheSize = 0x100;
-
- private:
-  static const int kArrayEntriesPerCacheEntry = 4;
-  static const int kStringOffset = 0;
-  static const int kPatternOffset = 1;
-  static const int kArrayOffset = 2;
-};
-
-
-// Abstract base class for checking whether a weak object should be retained.
-class WeakObjectRetainer {
- public:
-  virtual ~WeakObjectRetainer() {}
-
-  // Return whether this object should be retained. If NULL is returned the
-  // object has no references. Otherwise the address of the retained object
-  // should be returned as in some GC situations the object has been moved.
-  virtual Object* RetainAs(Object* object) = 0;
-};
-
-
-// Intrusive object marking uses least significant bit of
-// heap object's map word to mark objects.
-// Normally all map words have least significant bit set
-// because they contain tagged map pointer.
-// If the bit is not set object is marked.
-// All objects should be unmarked before resuming
-// JavaScript execution.
-class IntrusiveMarking {
- public:
-  static bool IsMarked(HeapObject* object) {
-    return (object->map_word().ToRawValue() & kNotMarkedBit) == 0;
-  }
-
-  static void ClearMark(HeapObject* object) {
-    uintptr_t map_word = object->map_word().ToRawValue();
-    object->set_map_word(MapWord::FromRawValue(map_word | kNotMarkedBit));
-    ASSERT(!IsMarked(object));
-  }
-
-  static void SetMark(HeapObject* object) {
-    uintptr_t map_word = object->map_word().ToRawValue();
-    object->set_map_word(MapWord::FromRawValue(map_word & ~kNotMarkedBit));
-    ASSERT(IsMarked(object));
-  }
-
-  static Map* MapOfMarkedObject(HeapObject* object) {
-    uintptr_t map_word = object->map_word().ToRawValue();
-    return MapWord::FromRawValue(map_word | kNotMarkedBit).ToMap();
-  }
-
-  static int SizeOfMarkedObject(HeapObject* object) {
-    return object->SizeFromMap(MapOfMarkedObject(object));
-  }
-
- private:
-  static const uintptr_t kNotMarkedBit = 0x1;
-  STATIC_ASSERT((kHeapObjectTag & kNotMarkedBit) != 0);  // NOLINT
-};
-
-
-#ifdef DEBUG
-// Helper class for tracing paths to a search target Object from all roots.
-// The TracePathFrom() method can be used to trace paths from a specific
-// object to the search target object.
-class PathTracer : public ObjectVisitor {
- public:
-  enum WhatToFind {
-    FIND_ALL,   // Will find all matches.
-    FIND_FIRST  // Will stop the search after first match.
-  };
-
-  // Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject.
-  static const int kMarkTag = 2;
-
-  // For the WhatToFind arg, if FIND_FIRST is specified, tracing will stop
-  // after the first match.  If FIND_ALL is specified, then tracing will be
-  // done for all matches.
-  PathTracer(Object* search_target,
-             WhatToFind what_to_find,
-             VisitMode visit_mode)
-      : search_target_(search_target),
-        found_target_(false),
-        found_target_in_trace_(false),
-        what_to_find_(what_to_find),
-        visit_mode_(visit_mode),
-        object_stack_(20),
-        no_allocation() {}
-
-  virtual void VisitPointers(Object** start, Object** end);
-
-  void Reset();
-  void TracePathFrom(Object** root);
-
-  bool found() const { return found_target_; }
-
-  static Object* const kAnyGlobalObject;
-
- protected:
-  class MarkVisitor;
-  class UnmarkVisitor;
-
-  void MarkRecursively(Object** p, MarkVisitor* mark_visitor);
-  void UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor);
-  virtual void ProcessResults();
-
-  Object* search_target_;
-  bool found_target_;
-  bool found_target_in_trace_;
-  WhatToFind what_to_find_;
-  VisitMode visit_mode_;
-  List<Object*> object_stack_;
-
-  DisallowHeapAllocation no_allocation;  // i.e. no gc allowed.
-
- private:
-  DISALLOW_IMPLICIT_CONSTRUCTORS(PathTracer);
-};
-#endif  // DEBUG
-
-} }  // namespace v8::internal
-
-#endif  // V8_HEAP_H_
diff --git a/src/heap/gc-idle-time-handler-unittest.cc b/src/heap/gc-idle-time-handler-unittest.cc
new file mode 100644
index 0000000..b4f2f74
--- /dev/null
+++ b/src/heap/gc-idle-time-handler-unittest.cc
@@ -0,0 +1,348 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <limits>
+
+#include "src/heap/gc-idle-time-handler.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace v8 {
+namespace internal {
+
+namespace {
+
+class GCIdleTimeHandlerTest : public ::testing::Test {
+ public:
+  GCIdleTimeHandlerTest() {}
+  virtual ~GCIdleTimeHandlerTest() {}
+
+  GCIdleTimeHandler* handler() { return &handler_; }
+
+  GCIdleTimeHandler::HeapState DefaultHeapState() {
+    GCIdleTimeHandler::HeapState result;
+    result.contexts_disposed = 0;
+    result.size_of_objects = kSizeOfObjects;
+    result.incremental_marking_stopped = false;
+    result.can_start_incremental_marking = true;
+    result.sweeping_in_progress = false;
+    result.mark_compact_speed_in_bytes_per_ms = kMarkCompactSpeed;
+    result.incremental_marking_speed_in_bytes_per_ms = kMarkingSpeed;
+    result.scavenge_speed_in_bytes_per_ms = kScavengeSpeed;
+    result.available_new_space_memory = kNewSpaceCapacity;
+    result.new_space_capacity = kNewSpaceCapacity;
+    result.new_space_allocation_throughput_in_bytes_per_ms =
+        kNewSpaceAllocationThroughput;
+    return result;
+  }
+
+  static const size_t kSizeOfObjects = 100 * MB;
+  static const size_t kMarkCompactSpeed = 200 * KB;
+  static const size_t kMarkingSpeed = 200 * KB;
+  static const size_t kScavengeSpeed = 100 * KB;
+  static const size_t kNewSpaceCapacity = 1 * MB;
+  static const size_t kNewSpaceAllocationThroughput = 10 * KB;
+
+ private:
+  GCIdleTimeHandler handler_;
+};
+
+}  // namespace
+
+
+TEST(GCIdleTimeHandler, EstimateMarkingStepSizeInitial) {
+  size_t step_size = GCIdleTimeHandler::EstimateMarkingStepSize(1, 0);
+  EXPECT_EQ(
+      static_cast<size_t>(GCIdleTimeHandler::kInitialConservativeMarkingSpeed *
+                          GCIdleTimeHandler::kConservativeTimeRatio),
+      step_size);
+}
+
+
+TEST(GCIdleTimeHandler, EstimateMarkingStepSizeNonZero) {
+  size_t marking_speed_in_bytes_per_millisecond = 100;
+  size_t step_size = GCIdleTimeHandler::EstimateMarkingStepSize(
+      1, marking_speed_in_bytes_per_millisecond);
+  EXPECT_EQ(static_cast<size_t>(marking_speed_in_bytes_per_millisecond *
+                                GCIdleTimeHandler::kConservativeTimeRatio),
+            step_size);
+}
+
+
+TEST(GCIdleTimeHandler, EstimateMarkingStepSizeOverflow1) {
+  size_t step_size = GCIdleTimeHandler::EstimateMarkingStepSize(
+      10, std::numeric_limits<size_t>::max());
+  EXPECT_EQ(static_cast<size_t>(GCIdleTimeHandler::kMaximumMarkingStepSize),
+            step_size);
+}
+
+
+TEST(GCIdleTimeHandler, EstimateMarkingStepSizeOverflow2) {
+  size_t step_size = GCIdleTimeHandler::EstimateMarkingStepSize(
+      std::numeric_limits<size_t>::max(), 10);
+  EXPECT_EQ(static_cast<size_t>(GCIdleTimeHandler::kMaximumMarkingStepSize),
+            step_size);
+}
+
+
+TEST(GCIdleTimeHandler, EstimateMarkCompactTimeInitial) {
+  size_t size = 100 * MB;
+  size_t time = GCIdleTimeHandler::EstimateMarkCompactTime(size, 0);
+  EXPECT_EQ(size / GCIdleTimeHandler::kInitialConservativeMarkCompactSpeed,
+            time);
+}
+
+
+TEST(GCIdleTimeHandler, EstimateMarkCompactTimeNonZero) {
+  size_t size = 100 * MB;
+  size_t speed = 1 * MB;
+  size_t time = GCIdleTimeHandler::EstimateMarkCompactTime(size, speed);
+  EXPECT_EQ(size / speed, time);
+}
+
+
+TEST(GCIdleTimeHandler, EstimateMarkCompactTimeMax) {
+  size_t size = std::numeric_limits<size_t>::max();
+  size_t speed = 1;
+  size_t time = GCIdleTimeHandler::EstimateMarkCompactTime(size, speed);
+  EXPECT_EQ(GCIdleTimeHandler::kMaxMarkCompactTimeInMs, time);
+}
+
+
+TEST(GCIdleTimeHandler, EstimateScavengeTimeInitial) {
+  size_t size = 1 * MB;
+  size_t time = GCIdleTimeHandler::EstimateScavengeTime(size, 0);
+  EXPECT_EQ(size / GCIdleTimeHandler::kInitialConservativeScavengeSpeed, time);
+}
+
+
+TEST(GCIdleTimeHandler, EstimateScavengeTimeNonZero) {
+  size_t size = 1 * MB;
+  size_t speed = 1 * MB;
+  size_t time = GCIdleTimeHandler::EstimateScavengeTime(size, speed);
+  EXPECT_EQ(size / speed, time);
+}
+
+
+TEST(GCIdleTimeHandler, ScavangeMayHappenSoonInitial) {
+  size_t available = 100 * KB;
+  EXPECT_FALSE(GCIdleTimeHandler::ScavangeMayHappenSoon(available, 0));
+}
+
+
+TEST(GCIdleTimeHandler, ScavangeMayHappenSoonNonZeroFalse) {
+  size_t available = (GCIdleTimeHandler::kMaxFrameRenderingIdleTime + 1) * KB;
+  size_t speed = 1 * KB;
+  EXPECT_FALSE(GCIdleTimeHandler::ScavangeMayHappenSoon(available, speed));
+}
+
+
+TEST(GCIdleTimeHandler, ScavangeMayHappenSoonNonZeroTrue) {
+  size_t available = GCIdleTimeHandler::kMaxFrameRenderingIdleTime * KB;
+  size_t speed = 1 * KB;
+  EXPECT_TRUE(GCIdleTimeHandler::ScavangeMayHappenSoon(available, speed));
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, AfterContextDisposeLargeIdleTime) {
+  GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+  heap_state.contexts_disposed = 1;
+  heap_state.incremental_marking_stopped = true;
+  size_t speed = heap_state.mark_compact_speed_in_bytes_per_ms;
+  int idle_time_ms =
+      static_cast<int>((heap_state.size_of_objects + speed - 1) / speed);
+  GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+  EXPECT_EQ(DO_FULL_GC, action.type);
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, AfterContextDisposeSmallIdleTime1) {
+  GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+  heap_state.contexts_disposed = 1;
+  heap_state.incremental_marking_stopped = true;
+  size_t speed = heap_state.mark_compact_speed_in_bytes_per_ms;
+  int idle_time_ms = static_cast<int>(heap_state.size_of_objects / speed - 1);
+  GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+  EXPECT_EQ(DO_INCREMENTAL_MARKING, action.type);
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, AfterContextDisposeSmallIdleTime2) {
+  GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+  heap_state.contexts_disposed = 1;
+  size_t speed = heap_state.mark_compact_speed_in_bytes_per_ms;
+  int idle_time_ms = static_cast<int>(heap_state.size_of_objects / speed - 1);
+  GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+  EXPECT_EQ(DO_INCREMENTAL_MARKING, action.type);
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, IncrementalMarking1) {
+  GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+  size_t speed = heap_state.incremental_marking_speed_in_bytes_per_ms;
+  int idle_time_ms = 10;
+  GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+  EXPECT_EQ(DO_INCREMENTAL_MARKING, action.type);
+  EXPECT_GT(speed * static_cast<size_t>(idle_time_ms),
+            static_cast<size_t>(action.parameter));
+  EXPECT_LT(0, action.parameter);
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, IncrementalMarking2) {
+  GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+  heap_state.incremental_marking_stopped = true;
+  size_t speed = heap_state.incremental_marking_speed_in_bytes_per_ms;
+  int idle_time_ms = 10;
+  GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+  EXPECT_EQ(DO_INCREMENTAL_MARKING, action.type);
+  EXPECT_GT(speed * static_cast<size_t>(idle_time_ms),
+            static_cast<size_t>(action.parameter));
+  EXPECT_LT(0, action.parameter);
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, NotEnoughTime) {
+  GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+  heap_state.incremental_marking_stopped = true;
+  heap_state.can_start_incremental_marking = false;
+  size_t speed = heap_state.mark_compact_speed_in_bytes_per_ms;
+  int idle_time_ms = static_cast<int>(heap_state.size_of_objects / speed - 1);
+  GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+  EXPECT_EQ(DO_NOTHING, action.type);
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, StopEventually1) {
+  GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+  heap_state.incremental_marking_stopped = true;
+  heap_state.can_start_incremental_marking = false;
+  size_t speed = heap_state.mark_compact_speed_in_bytes_per_ms;
+  int idle_time_ms = static_cast<int>(heap_state.size_of_objects / speed + 1);
+  for (int i = 0; i < GCIdleTimeHandler::kMaxMarkCompactsInIdleRound; i++) {
+    GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+    EXPECT_EQ(DO_FULL_GC, action.type);
+    handler()->NotifyIdleMarkCompact();
+  }
+  GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+  EXPECT_EQ(DONE, action.type);
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, StopEventually2) {
+  GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+  int idle_time_ms = 10;
+  for (int i = 0; i < GCIdleTimeHandler::kMaxMarkCompactsInIdleRound; i++) {
+    GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+    EXPECT_EQ(DO_INCREMENTAL_MARKING, action.type);
+    // In this case we emulate incremental marking steps that finish with a
+    // full gc.
+    handler()->NotifyIdleMarkCompact();
+  }
+  heap_state.can_start_incremental_marking = false;
+  GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+  EXPECT_EQ(DONE, action.type);
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, ContinueAfterStop1) {
+  GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+  heap_state.incremental_marking_stopped = true;
+  heap_state.can_start_incremental_marking = false;
+  size_t speed = heap_state.mark_compact_speed_in_bytes_per_ms;
+  int idle_time_ms = static_cast<int>(heap_state.size_of_objects / speed + 1);
+  for (int i = 0; i < GCIdleTimeHandler::kMaxMarkCompactsInIdleRound; i++) {
+    GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+    EXPECT_EQ(DO_FULL_GC, action.type);
+    handler()->NotifyIdleMarkCompact();
+  }
+  GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+  EXPECT_EQ(DONE, action.type);
+  // Emulate mutator work.
+  for (int i = 0; i < GCIdleTimeHandler::kIdleScavengeThreshold; i++) {
+    handler()->NotifyScavenge();
+  }
+  action = handler()->Compute(idle_time_ms, heap_state);
+  EXPECT_EQ(DO_FULL_GC, action.type);
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, ContinueAfterStop2) {
+  GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+  int idle_time_ms = 10;
+  for (int i = 0; i < GCIdleTimeHandler::kMaxMarkCompactsInIdleRound; i++) {
+    GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+    if (action.type == DONE) break;
+    EXPECT_EQ(DO_INCREMENTAL_MARKING, action.type);
+    // In this case we try to emulate incremental marking steps the finish with
+    // a full gc.
+    handler()->NotifyIdleMarkCompact();
+  }
+  heap_state.can_start_incremental_marking = false;
+  GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+  EXPECT_EQ(DONE, action.type);
+  // Emulate mutator work.
+  for (int i = 0; i < GCIdleTimeHandler::kIdleScavengeThreshold; i++) {
+    handler()->NotifyScavenge();
+  }
+  heap_state.can_start_incremental_marking = true;
+  action = handler()->Compute(idle_time_ms, heap_state);
+  EXPECT_EQ(DO_INCREMENTAL_MARKING, action.type);
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, Scavenge) {
+  GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+  int idle_time_ms = 10;
+  heap_state.available_new_space_memory =
+      kNewSpaceAllocationThroughput * idle_time_ms;
+  GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+  EXPECT_EQ(DO_SCAVENGE, action.type);
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, ScavengeAndDone) {
+  GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+  int idle_time_ms = 10;
+  heap_state.can_start_incremental_marking = false;
+  heap_state.incremental_marking_stopped = true;
+  heap_state.available_new_space_memory =
+      kNewSpaceAllocationThroughput * idle_time_ms;
+  GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+  EXPECT_EQ(DO_SCAVENGE, action.type);
+  heap_state.available_new_space_memory = kNewSpaceCapacity;
+  action = handler()->Compute(idle_time_ms, heap_state);
+  EXPECT_EQ(DO_NOTHING, action.type);
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, ZeroIdleTimeNothingToDo) {
+  GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+  int idle_time_ms = 0;
+  GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+  EXPECT_EQ(DO_NOTHING, action.type);
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, ZeroIdleTimeDoNothingButStartIdleRound) {
+  GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+  int idle_time_ms = 10;
+  for (int i = 0; i < GCIdleTimeHandler::kMaxMarkCompactsInIdleRound; i++) {
+    GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+    if (action.type == DONE) break;
+    EXPECT_EQ(DO_INCREMENTAL_MARKING, action.type);
+    // In this case we try to emulate incremental marking steps the finish with
+    // a full gc.
+    handler()->NotifyIdleMarkCompact();
+  }
+  GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+  // Emulate mutator work.
+  for (int i = 0; i < GCIdleTimeHandler::kIdleScavengeThreshold; i++) {
+    handler()->NotifyScavenge();
+  }
+  action = handler()->Compute(0, heap_state);
+  EXPECT_EQ(DO_NOTHING, action.type);
+}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/heap/gc-idle-time-handler.cc b/src/heap/gc-idle-time-handler.cc
new file mode 100644
index 0000000..b9a99b2
--- /dev/null
+++ b/src/heap/gc-idle-time-handler.cc
@@ -0,0 +1,174 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/gc-idle-time-handler.h"
+#include "src/heap/gc-tracer.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+const double GCIdleTimeHandler::kConservativeTimeRatio = 0.9;
+const size_t GCIdleTimeHandler::kMaxMarkCompactTimeInMs = 1000;
+const size_t GCIdleTimeHandler::kMinTimeForFinalizeSweeping = 100;
+const int GCIdleTimeHandler::kMaxMarkCompactsInIdleRound = 7;
+const int GCIdleTimeHandler::kIdleScavengeThreshold = 5;
+
+
+void GCIdleTimeAction::Print() {
+  switch (type) {
+    case DONE:
+      PrintF("done");
+      break;
+    case DO_NOTHING:
+      PrintF("no action");
+      break;
+    case DO_INCREMENTAL_MARKING:
+      PrintF("incremental marking with step %" V8_PTR_PREFIX "d", parameter);
+      break;
+    case DO_SCAVENGE:
+      PrintF("scavenge");
+      break;
+    case DO_FULL_GC:
+      PrintF("full GC");
+      break;
+    case DO_FINALIZE_SWEEPING:
+      PrintF("finalize sweeping");
+      break;
+  }
+}
+
+
+size_t GCIdleTimeHandler::EstimateMarkingStepSize(
+    size_t idle_time_in_ms, size_t marking_speed_in_bytes_per_ms) {
+  DCHECK(idle_time_in_ms > 0);
+
+  if (marking_speed_in_bytes_per_ms == 0) {
+    marking_speed_in_bytes_per_ms = kInitialConservativeMarkingSpeed;
+  }
+
+  size_t marking_step_size = marking_speed_in_bytes_per_ms * idle_time_in_ms;
+  if (marking_step_size / marking_speed_in_bytes_per_ms != idle_time_in_ms) {
+    // In the case of an overflow we return maximum marking step size.
+    return kMaximumMarkingStepSize;
+  }
+
+  if (marking_step_size > kMaximumMarkingStepSize)
+    return kMaximumMarkingStepSize;
+
+  return static_cast<size_t>(marking_step_size * kConservativeTimeRatio);
+}
+
+
+size_t GCIdleTimeHandler::EstimateMarkCompactTime(
+    size_t size_of_objects, size_t mark_compact_speed_in_bytes_per_ms) {
+  if (mark_compact_speed_in_bytes_per_ms == 0) {
+    mark_compact_speed_in_bytes_per_ms = kInitialConservativeMarkCompactSpeed;
+  }
+  size_t result = size_of_objects / mark_compact_speed_in_bytes_per_ms;
+  return Min(result, kMaxMarkCompactTimeInMs);
+}
+
+
+size_t GCIdleTimeHandler::EstimateScavengeTime(
+    size_t new_space_size, size_t scavenge_speed_in_bytes_per_ms) {
+  if (scavenge_speed_in_bytes_per_ms == 0) {
+    scavenge_speed_in_bytes_per_ms = kInitialConservativeScavengeSpeed;
+  }
+  return new_space_size / scavenge_speed_in_bytes_per_ms;
+}
+
+
+bool GCIdleTimeHandler::ScavangeMayHappenSoon(
+    size_t available_new_space_memory,
+    size_t new_space_allocation_throughput_in_bytes_per_ms) {
+  if (available_new_space_memory <=
+      new_space_allocation_throughput_in_bytes_per_ms *
+          kMaxFrameRenderingIdleTime) {
+    return true;
+  }
+  return false;
+}
+
+
+// The following logic is implemented by the controller:
+// (1) If the new space is almost full and we can effort a Scavenge, then a
+// Scavenge is performed.
+// (2) If there is currently no MarkCompact idle round going on, we start a
+// new idle round if enough garbage was created or we received a context
+// disposal event. Otherwise we do not perform garbage collection to keep
+// system utilization low.
+// (3) If incremental marking is done, we perform a full garbage collection
+// if context was disposed or if we are allowed to still do full garbage
+// collections during this idle round or if we are not allowed to start
+// incremental marking. Otherwise we do not perform garbage collection to
+// keep system utilization low.
+// (4) If sweeping is in progress and we received a large enough idle time
+// request, we finalize sweeping here.
+// (5) If incremental marking is in progress, we perform a marking step. Note,
+// that this currently may trigger a full garbage collection.
+GCIdleTimeAction GCIdleTimeHandler::Compute(size_t idle_time_in_ms,
+                                            HeapState heap_state) {
+  if (idle_time_in_ms <= kMaxFrameRenderingIdleTime &&
+      ScavangeMayHappenSoon(
+          heap_state.available_new_space_memory,
+          heap_state.new_space_allocation_throughput_in_bytes_per_ms) &&
+      idle_time_in_ms >=
+          EstimateScavengeTime(heap_state.new_space_capacity,
+                               heap_state.scavenge_speed_in_bytes_per_ms)) {
+    return GCIdleTimeAction::Scavenge();
+  }
+  if (IsMarkCompactIdleRoundFinished()) {
+    if (EnoughGarbageSinceLastIdleRound() || heap_state.contexts_disposed > 0) {
+      StartIdleRound();
+    } else {
+      return GCIdleTimeAction::Done();
+    }
+  }
+
+  if (idle_time_in_ms == 0) {
+    return GCIdleTimeAction::Nothing();
+  }
+
+  if (heap_state.incremental_marking_stopped) {
+    size_t estimated_time_in_ms =
+        EstimateMarkCompactTime(heap_state.size_of_objects,
+                                heap_state.mark_compact_speed_in_bytes_per_ms);
+    if (idle_time_in_ms >= estimated_time_in_ms ||
+        (heap_state.size_of_objects < kSmallHeapSize &&
+         heap_state.contexts_disposed > 0)) {
+      // If there are no more than two GCs left in this idle round and we are
+      // allowed to do a full GC, then make those GCs full in order to compact
+      // the code space.
+      // TODO(ulan): Once we enable code compaction for incremental marking, we
+      // can get rid of this special case and always start incremental marking.
+      int remaining_mark_sweeps =
+          kMaxMarkCompactsInIdleRound - mark_compacts_since_idle_round_started_;
+      if (heap_state.contexts_disposed > 0 ||
+          (idle_time_in_ms > kMaxFrameRenderingIdleTime &&
+           (remaining_mark_sweeps <= 2 ||
+            !heap_state.can_start_incremental_marking))) {
+        return GCIdleTimeAction::FullGC();
+      }
+    }
+    if (!heap_state.can_start_incremental_marking) {
+      return GCIdleTimeAction::Nothing();
+    }
+  }
+  // TODO(hpayer): Estimate finalize sweeping time.
+  if (heap_state.sweeping_in_progress &&
+      idle_time_in_ms >= kMinTimeForFinalizeSweeping) {
+    return GCIdleTimeAction::FinalizeSweeping();
+  }
+
+  if (heap_state.incremental_marking_stopped &&
+      !heap_state.can_start_incremental_marking) {
+    return GCIdleTimeAction::Nothing();
+  }
+  size_t step_size = EstimateMarkingStepSize(
+      idle_time_in_ms, heap_state.incremental_marking_speed_in_bytes_per_ms);
+  return GCIdleTimeAction::IncrementalMarking(step_size);
+}
+}
+}
diff --git a/src/heap/gc-idle-time-handler.h b/src/heap/gc-idle-time-handler.h
new file mode 100644
index 0000000..daab616
--- /dev/null
+++ b/src/heap/gc-idle-time-handler.h
@@ -0,0 +1,188 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_GC_IDLE_TIME_HANDLER_H_
+#define V8_HEAP_GC_IDLE_TIME_HANDLER_H_
+
+#include "src/globals.h"
+
+namespace v8 {
+namespace internal {
+
+enum GCIdleTimeActionType {
+  DONE,
+  DO_NOTHING,
+  DO_INCREMENTAL_MARKING,
+  DO_SCAVENGE,
+  DO_FULL_GC,
+  DO_FINALIZE_SWEEPING
+};
+
+
+class GCIdleTimeAction {
+ public:
+  static GCIdleTimeAction Done() {
+    GCIdleTimeAction result;
+    result.type = DONE;
+    result.parameter = 0;
+    return result;
+  }
+
+  static GCIdleTimeAction Nothing() {
+    GCIdleTimeAction result;
+    result.type = DO_NOTHING;
+    result.parameter = 0;
+    return result;
+  }
+
+  static GCIdleTimeAction IncrementalMarking(intptr_t step_size) {
+    GCIdleTimeAction result;
+    result.type = DO_INCREMENTAL_MARKING;
+    result.parameter = step_size;
+    return result;
+  }
+
+  static GCIdleTimeAction Scavenge() {
+    GCIdleTimeAction result;
+    result.type = DO_SCAVENGE;
+    result.parameter = 0;
+    return result;
+  }
+
+  static GCIdleTimeAction FullGC() {
+    GCIdleTimeAction result;
+    result.type = DO_FULL_GC;
+    result.parameter = 0;
+    return result;
+  }
+
+  static GCIdleTimeAction FinalizeSweeping() {
+    GCIdleTimeAction result;
+    result.type = DO_FINALIZE_SWEEPING;
+    result.parameter = 0;
+    return result;
+  }
+
+  void Print();
+
+  GCIdleTimeActionType type;
+  intptr_t parameter;
+};
+
+
+class GCTracer;
+
+// The idle time handler makes decisions about which garbage collection
+// operations are executing during IdleNotification.
+class GCIdleTimeHandler {
+ public:
+  // If we haven't recorded any incremental marking events yet, we carefully
+  // mark with a conservative lower bound for the marking speed.
+  static const size_t kInitialConservativeMarkingSpeed = 100 * KB;
+
+  // Maximum marking step size returned by EstimateMarkingStepSize.
+  static const size_t kMaximumMarkingStepSize = 700 * MB;
+
+  // We have to make sure that we finish the IdleNotification before
+  // idle_time_in_ms. Hence, we conservatively prune our workload estimate.
+  static const double kConservativeTimeRatio;
+
+  // If we haven't recorded any mark-compact events yet, we use
+  // conservative lower bound for the mark-compact speed.
+  static const size_t kInitialConservativeMarkCompactSpeed = 2 * MB;
+
+  // Maximum mark-compact time returned by EstimateMarkCompactTime.
+  static const size_t kMaxMarkCompactTimeInMs;
+
+  // Minimum time to finalize sweeping phase. The main thread may wait for
+  // sweeper threads.
+  static const size_t kMinTimeForFinalizeSweeping;
+
+  // Number of idle mark-compact events, after which idle handler will finish
+  // idle round.
+  static const int kMaxMarkCompactsInIdleRound;
+
+  // Number of scavenges that will trigger start of new idle round.
+  static const int kIdleScavengeThreshold;
+
+  // Heap size threshold below which we prefer mark-compact over incremental
+  // step.
+  static const size_t kSmallHeapSize = 4 * kPointerSize * MB;
+
+  // That is the maximum idle time we will have during frame rendering.
+  static const size_t kMaxFrameRenderingIdleTime = 16;
+
+  // If less than that much memory is left in the new space, we consider it
+  // as almost full and force a new space collection earlier in the idle time.
+  static const size_t kNewSpaceAlmostFullTreshold = 100 * KB;
+
+  // If we haven't recorded any scavenger events yet, we use a conservative
+  // lower bound for the scavenger speed.
+  static const size_t kInitialConservativeScavengeSpeed = 100 * KB;
+
+  struct HeapState {
+    int contexts_disposed;
+    size_t size_of_objects;
+    bool incremental_marking_stopped;
+    bool can_start_incremental_marking;
+    bool sweeping_in_progress;
+    size_t mark_compact_speed_in_bytes_per_ms;
+    size_t incremental_marking_speed_in_bytes_per_ms;
+    size_t scavenge_speed_in_bytes_per_ms;
+    size_t available_new_space_memory;
+    size_t new_space_capacity;
+    size_t new_space_allocation_throughput_in_bytes_per_ms;
+  };
+
+  GCIdleTimeHandler()
+      : mark_compacts_since_idle_round_started_(0),
+        scavenges_since_last_idle_round_(0) {}
+
+  GCIdleTimeAction Compute(size_t idle_time_in_ms, HeapState heap_state);
+
+  void NotifyIdleMarkCompact() {
+    if (mark_compacts_since_idle_round_started_ < kMaxMarkCompactsInIdleRound) {
+      ++mark_compacts_since_idle_round_started_;
+      if (mark_compacts_since_idle_round_started_ ==
+          kMaxMarkCompactsInIdleRound) {
+        scavenges_since_last_idle_round_ = 0;
+      }
+    }
+  }
+
+  void NotifyScavenge() { ++scavenges_since_last_idle_round_; }
+
+  static size_t EstimateMarkingStepSize(size_t idle_time_in_ms,
+                                        size_t marking_speed_in_bytes_per_ms);
+
+  static size_t EstimateMarkCompactTime(
+      size_t size_of_objects, size_t mark_compact_speed_in_bytes_per_ms);
+
+  static size_t EstimateScavengeTime(size_t new_space_size,
+                                     size_t scavenger_speed_in_bytes_per_ms);
+
+  static bool ScavangeMayHappenSoon(
+      size_t available_new_space_memory,
+      size_t new_space_allocation_throughput_in_bytes_per_ms);
+
+ private:
+  void StartIdleRound() { mark_compacts_since_idle_round_started_ = 0; }
+  bool IsMarkCompactIdleRoundFinished() {
+    return mark_compacts_since_idle_round_started_ ==
+           kMaxMarkCompactsInIdleRound;
+  }
+  bool EnoughGarbageSinceLastIdleRound() {
+    return scavenges_since_last_idle_round_ >= kIdleScavengeThreshold;
+  }
+
+  int mark_compacts_since_idle_round_started_;
+  int scavenges_since_last_idle_round_;
+
+  DISALLOW_COPY_AND_ASSIGN(GCIdleTimeHandler);
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_HEAP_GC_IDLE_TIME_HANDLER_H_
diff --git a/src/heap/gc-tracer.cc b/src/heap/gc-tracer.cc
new file mode 100644
index 0000000..8a40b53
--- /dev/null
+++ b/src/heap/gc-tracer.cc
@@ -0,0 +1,480 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/heap/gc-tracer.h"
+
+namespace v8 {
+namespace internal {
+
+static intptr_t CountTotalHolesSize(Heap* heap) {
+  intptr_t holes_size = 0;
+  OldSpaces spaces(heap);
+  for (OldSpace* space = spaces.next(); space != NULL; space = spaces.next()) {
+    holes_size += space->Waste() + space->Available();
+  }
+  return holes_size;
+}
+
+
+GCTracer::AllocationEvent::AllocationEvent(double duration,
+                                           intptr_t allocation_in_bytes) {
+  duration_ = duration;
+  allocation_in_bytes_ = allocation_in_bytes;
+}
+
+
+GCTracer::Event::Event(Type type, const char* gc_reason,
+                       const char* collector_reason)
+    : type(type),
+      gc_reason(gc_reason),
+      collector_reason(collector_reason),
+      start_time(0.0),
+      end_time(0.0),
+      start_object_size(0),
+      end_object_size(0),
+      start_memory_size(0),
+      end_memory_size(0),
+      start_holes_size(0),
+      end_holes_size(0),
+      cumulative_incremental_marking_steps(0),
+      incremental_marking_steps(0),
+      cumulative_incremental_marking_bytes(0),
+      incremental_marking_bytes(0),
+      cumulative_incremental_marking_duration(0.0),
+      incremental_marking_duration(0.0),
+      cumulative_pure_incremental_marking_duration(0.0),
+      pure_incremental_marking_duration(0.0),
+      longest_incremental_marking_step(0.0) {
+  for (int i = 0; i < Scope::NUMBER_OF_SCOPES; i++) {
+    scopes[i] = 0;
+  }
+}
+
+
+const char* GCTracer::Event::TypeName(bool short_name) const {
+  switch (type) {
+    case SCAVENGER:
+      if (short_name) {
+        return "s";
+      } else {
+        return "Scavenge";
+      }
+    case MARK_COMPACTOR:
+      if (short_name) {
+        return "ms";
+      } else {
+        return "Mark-sweep";
+      }
+    case START:
+      if (short_name) {
+        return "st";
+      } else {
+        return "Start";
+      }
+  }
+  return "Unknown Event Type";
+}
+
+
+GCTracer::GCTracer(Heap* heap)
+    : heap_(heap),
+      cumulative_incremental_marking_steps_(0),
+      cumulative_incremental_marking_bytes_(0),
+      cumulative_incremental_marking_duration_(0.0),
+      cumulative_pure_incremental_marking_duration_(0.0),
+      longest_incremental_marking_step_(0.0),
+      cumulative_marking_duration_(0.0),
+      cumulative_sweeping_duration_(0.0),
+      new_space_top_after_gc_(0) {
+  current_ = Event(Event::START, NULL, NULL);
+  current_.end_time = base::OS::TimeCurrentMillis();
+  previous_ = previous_mark_compactor_event_ = current_;
+}
+
+
+void GCTracer::Start(GarbageCollector collector, const char* gc_reason,
+                     const char* collector_reason) {
+  previous_ = current_;
+  double start_time = base::OS::TimeCurrentMillis();
+  if (new_space_top_after_gc_ != 0) {
+    AddNewSpaceAllocationTime(
+        start_time - previous_.end_time,
+        reinterpret_cast<intptr_t>((heap_->new_space()->top()) -
+                                   new_space_top_after_gc_));
+  }
+  if (current_.type == Event::MARK_COMPACTOR)
+    previous_mark_compactor_event_ = current_;
+
+  if (collector == SCAVENGER) {
+    current_ = Event(Event::SCAVENGER, gc_reason, collector_reason);
+  } else {
+    current_ = Event(Event::MARK_COMPACTOR, gc_reason, collector_reason);
+  }
+
+  current_.start_time = start_time;
+  current_.start_object_size = heap_->SizeOfObjects();
+  current_.start_memory_size = heap_->isolate()->memory_allocator()->Size();
+  current_.start_holes_size = CountTotalHolesSize(heap_);
+  current_.new_space_object_size =
+      heap_->new_space()->top() - heap_->new_space()->bottom();
+
+  current_.cumulative_incremental_marking_steps =
+      cumulative_incremental_marking_steps_;
+  current_.cumulative_incremental_marking_bytes =
+      cumulative_incremental_marking_bytes_;
+  current_.cumulative_incremental_marking_duration =
+      cumulative_incremental_marking_duration_;
+  current_.cumulative_pure_incremental_marking_duration =
+      cumulative_pure_incremental_marking_duration_;
+  current_.longest_incremental_marking_step = longest_incremental_marking_step_;
+
+  for (int i = 0; i < Scope::NUMBER_OF_SCOPES; i++) {
+    current_.scopes[i] = 0;
+  }
+}
+
+
+void GCTracer::Stop() {
+  current_.end_time = base::OS::TimeCurrentMillis();
+  current_.end_object_size = heap_->SizeOfObjects();
+  current_.end_memory_size = heap_->isolate()->memory_allocator()->Size();
+  current_.end_holes_size = CountTotalHolesSize(heap_);
+  new_space_top_after_gc_ =
+      reinterpret_cast<intptr_t>(heap_->new_space()->top());
+
+  if (current_.type == Event::SCAVENGER) {
+    current_.incremental_marking_steps =
+        current_.cumulative_incremental_marking_steps -
+        previous_.cumulative_incremental_marking_steps;
+    current_.incremental_marking_bytes =
+        current_.cumulative_incremental_marking_bytes -
+        previous_.cumulative_incremental_marking_bytes;
+    current_.incremental_marking_duration =
+        current_.cumulative_incremental_marking_duration -
+        previous_.cumulative_incremental_marking_duration;
+    current_.pure_incremental_marking_duration =
+        current_.cumulative_pure_incremental_marking_duration -
+        previous_.cumulative_pure_incremental_marking_duration;
+    scavenger_events_.push_front(current_);
+  } else {
+    current_.incremental_marking_steps =
+        current_.cumulative_incremental_marking_steps -
+        previous_mark_compactor_event_.cumulative_incremental_marking_steps;
+    current_.incremental_marking_bytes =
+        current_.cumulative_incremental_marking_bytes -
+        previous_mark_compactor_event_.cumulative_incremental_marking_bytes;
+    current_.incremental_marking_duration =
+        current_.cumulative_incremental_marking_duration -
+        previous_mark_compactor_event_.cumulative_incremental_marking_duration;
+    current_.pure_incremental_marking_duration =
+        current_.cumulative_pure_incremental_marking_duration -
+        previous_mark_compactor_event_
+            .cumulative_pure_incremental_marking_duration;
+    longest_incremental_marking_step_ = 0.0;
+    mark_compactor_events_.push_front(current_);
+  }
+
+  // TODO(ernstm): move the code below out of GCTracer.
+
+  if (!FLAG_trace_gc && !FLAG_print_cumulative_gc_stat) return;
+
+  double duration = current_.end_time - current_.start_time;
+  double spent_in_mutator = Max(current_.start_time - previous_.end_time, 0.0);
+
+  heap_->UpdateCumulativeGCStatistics(duration, spent_in_mutator,
+                                      current_.scopes[Scope::MC_MARK]);
+
+  if (current_.type == Event::SCAVENGER && FLAG_trace_gc_ignore_scavenger)
+    return;
+
+  if (FLAG_trace_gc) {
+    if (FLAG_trace_gc_nvp)
+      PrintNVP();
+    else
+      Print();
+
+    heap_->PrintShortHeapStatistics();
+  }
+}
+
+
+void GCTracer::AddNewSpaceAllocationTime(double duration,
+                                         intptr_t allocation_in_bytes) {
+  allocation_events_.push_front(AllocationEvent(duration, allocation_in_bytes));
+}
+
+
+void GCTracer::AddIncrementalMarkingStep(double duration, intptr_t bytes) {
+  cumulative_incremental_marking_steps_++;
+  cumulative_incremental_marking_bytes_ += bytes;
+  cumulative_incremental_marking_duration_ += duration;
+  longest_incremental_marking_step_ =
+      Max(longest_incremental_marking_step_, duration);
+  cumulative_marking_duration_ += duration;
+  if (bytes > 0) {
+    cumulative_pure_incremental_marking_duration_ += duration;
+  }
+}
+
+
+void GCTracer::Print() const {
+  PrintPID("%8.0f ms: ", heap_->isolate()->time_millis_since_init());
+
+  PrintF("%s %.1f (%.1f) -> %.1f (%.1f) MB, ", current_.TypeName(false),
+         static_cast<double>(current_.start_object_size) / MB,
+         static_cast<double>(current_.start_memory_size) / MB,
+         static_cast<double>(current_.end_object_size) / MB,
+         static_cast<double>(current_.end_memory_size) / MB);
+
+  int external_time = static_cast<int>(current_.scopes[Scope::EXTERNAL]);
+  if (external_time > 0) PrintF("%d / ", external_time);
+
+  double duration = current_.end_time - current_.start_time;
+  PrintF("%.1f ms", duration);
+  if (current_.type == Event::SCAVENGER) {
+    if (current_.incremental_marking_steps > 0) {
+      PrintF(" (+ %.1f ms in %d steps since last GC)",
+             current_.incremental_marking_duration,
+             current_.incremental_marking_steps);
+    }
+  } else {
+    if (current_.incremental_marking_steps > 0) {
+      PrintF(
+          " (+ %.1f ms in %d steps since start of marking, "
+          "biggest step %.1f ms)",
+          current_.incremental_marking_duration,
+          current_.incremental_marking_steps,
+          current_.longest_incremental_marking_step);
+    }
+  }
+
+  if (current_.gc_reason != NULL) {
+    PrintF(" [%s]", current_.gc_reason);
+  }
+
+  if (current_.collector_reason != NULL) {
+    PrintF(" [%s]", current_.collector_reason);
+  }
+
+  PrintF(".\n");
+}
+
+
+void GCTracer::PrintNVP() const {
+  PrintPID("%8.0f ms: ", heap_->isolate()->time_millis_since_init());
+
+  double duration = current_.end_time - current_.start_time;
+  double spent_in_mutator = current_.start_time - previous_.end_time;
+
+  PrintF("pause=%.1f ", duration);
+  PrintF("mutator=%.1f ", spent_in_mutator);
+  PrintF("gc=%s ", current_.TypeName(true));
+
+  PrintF("external=%.1f ", current_.scopes[Scope::EXTERNAL]);
+  PrintF("mark=%.1f ", current_.scopes[Scope::MC_MARK]);
+  PrintF("sweep=%.2f ", current_.scopes[Scope::MC_SWEEP]);
+  PrintF("sweepns=%.2f ", current_.scopes[Scope::MC_SWEEP_NEWSPACE]);
+  PrintF("sweepos=%.2f ", current_.scopes[Scope::MC_SWEEP_OLDSPACE]);
+  PrintF("sweepcode=%.2f ", current_.scopes[Scope::MC_SWEEP_CODE]);
+  PrintF("sweepcell=%.2f ", current_.scopes[Scope::MC_SWEEP_CELL]);
+  PrintF("sweepmap=%.2f ", current_.scopes[Scope::MC_SWEEP_MAP]);
+  PrintF("evacuate=%.1f ", current_.scopes[Scope::MC_EVACUATE_PAGES]);
+  PrintF("new_new=%.1f ",
+         current_.scopes[Scope::MC_UPDATE_NEW_TO_NEW_POINTERS]);
+  PrintF("root_new=%.1f ",
+         current_.scopes[Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS]);
+  PrintF("old_new=%.1f ",
+         current_.scopes[Scope::MC_UPDATE_OLD_TO_NEW_POINTERS]);
+  PrintF("compaction_ptrs=%.1f ",
+         current_.scopes[Scope::MC_UPDATE_POINTERS_TO_EVACUATED]);
+  PrintF("intracompaction_ptrs=%.1f ",
+         current_.scopes[Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED]);
+  PrintF("misc_compaction=%.1f ",
+         current_.scopes[Scope::MC_UPDATE_MISC_POINTERS]);
+  PrintF("weakcollection_process=%.1f ",
+         current_.scopes[Scope::MC_WEAKCOLLECTION_PROCESS]);
+  PrintF("weakcollection_clear=%.1f ",
+         current_.scopes[Scope::MC_WEAKCOLLECTION_CLEAR]);
+  PrintF("weakcollection_abort=%.1f ",
+         current_.scopes[Scope::MC_WEAKCOLLECTION_ABORT]);
+
+  PrintF("total_size_before=%" V8_PTR_PREFIX "d ", current_.start_object_size);
+  PrintF("total_size_after=%" V8_PTR_PREFIX "d ", current_.end_object_size);
+  PrintF("holes_size_before=%" V8_PTR_PREFIX "d ", current_.start_holes_size);
+  PrintF("holes_size_after=%" V8_PTR_PREFIX "d ", current_.end_holes_size);
+
+  intptr_t allocated_since_last_gc =
+      current_.start_object_size - previous_.end_object_size;
+  PrintF("allocated=%" V8_PTR_PREFIX "d ", allocated_since_last_gc);
+  PrintF("promoted=%" V8_PTR_PREFIX "d ", heap_->promoted_objects_size_);
+  PrintF("semi_space_copied=%" V8_PTR_PREFIX "d ",
+         heap_->semi_space_copied_object_size_);
+  PrintF("nodes_died_in_new=%d ", heap_->nodes_died_in_new_space_);
+  PrintF("nodes_copied_in_new=%d ", heap_->nodes_copied_in_new_space_);
+  PrintF("nodes_promoted=%d ", heap_->nodes_promoted_);
+  PrintF("promotion_rate=%.1f%% ", heap_->promotion_rate_);
+  PrintF("semi_space_copy_rate=%.1f%% ", heap_->semi_space_copied_rate_);
+  PrintF("new_space_allocation_throughput=%" V8_PTR_PREFIX "d ",
+         NewSpaceAllocationThroughputInBytesPerMillisecond());
+
+  if (current_.type == Event::SCAVENGER) {
+    PrintF("steps_count=%d ", current_.incremental_marking_steps);
+    PrintF("steps_took=%.1f ", current_.incremental_marking_duration);
+    PrintF("scavenge_throughput=%" V8_PTR_PREFIX "d ",
+           ScavengeSpeedInBytesPerMillisecond());
+  } else {
+    PrintF("steps_count=%d ", current_.incremental_marking_steps);
+    PrintF("steps_took=%.1f ", current_.incremental_marking_duration);
+    PrintF("longest_step=%.1f ", current_.longest_incremental_marking_step);
+    PrintF("incremental_marking_throughput=%" V8_PTR_PREFIX "d ",
+           IncrementalMarkingSpeedInBytesPerMillisecond());
+  }
+
+  PrintF("\n");
+}
+
+
+double GCTracer::MeanDuration(const EventBuffer& events) const {
+  if (events.empty()) return 0.0;
+
+  double mean = 0.0;
+  EventBuffer::const_iterator iter = events.begin();
+  while (iter != events.end()) {
+    mean += iter->end_time - iter->start_time;
+    ++iter;
+  }
+
+  return mean / events.size();
+}
+
+
+double GCTracer::MaxDuration(const EventBuffer& events) const {
+  if (events.empty()) return 0.0;
+
+  double maximum = 0.0f;
+  EventBuffer::const_iterator iter = events.begin();
+  while (iter != events.end()) {
+    maximum = Max(iter->end_time - iter->start_time, maximum);
+    ++iter;
+  }
+
+  return maximum;
+}
+
+
+double GCTracer::MeanIncrementalMarkingDuration() const {
+  if (cumulative_incremental_marking_steps_ == 0) return 0.0;
+
+  // We haven't completed an entire round of incremental marking, yet.
+  // Use data from GCTracer instead of data from event buffers.
+  if (mark_compactor_events_.empty()) {
+    return cumulative_incremental_marking_duration_ /
+           cumulative_incremental_marking_steps_;
+  }
+
+  int steps = 0;
+  double durations = 0.0;
+  EventBuffer::const_iterator iter = mark_compactor_events_.begin();
+  while (iter != mark_compactor_events_.end()) {
+    steps += iter->incremental_marking_steps;
+    durations += iter->incremental_marking_duration;
+    ++iter;
+  }
+
+  if (steps == 0) return 0.0;
+
+  return durations / steps;
+}
+
+
+double GCTracer::MaxIncrementalMarkingDuration() const {
+  // We haven't completed an entire round of incremental marking, yet.
+  // Use data from GCTracer instead of data from event buffers.
+  if (mark_compactor_events_.empty()) return longest_incremental_marking_step_;
+
+  double max_duration = 0.0;
+  EventBuffer::const_iterator iter = mark_compactor_events_.begin();
+  while (iter != mark_compactor_events_.end())
+    max_duration = Max(iter->longest_incremental_marking_step, max_duration);
+
+  return max_duration;
+}
+
+
+intptr_t GCTracer::IncrementalMarkingSpeedInBytesPerMillisecond() const {
+  if (cumulative_incremental_marking_duration_ == 0.0) return 0;
+
+  // We haven't completed an entire round of incremental marking, yet.
+  // Use data from GCTracer instead of data from event buffers.
+  if (mark_compactor_events_.empty()) {
+    return static_cast<intptr_t>(cumulative_incremental_marking_bytes_ /
+                                 cumulative_pure_incremental_marking_duration_);
+  }
+
+  intptr_t bytes = 0;
+  double durations = 0.0;
+  EventBuffer::const_iterator iter = mark_compactor_events_.begin();
+  while (iter != mark_compactor_events_.end()) {
+    bytes += iter->incremental_marking_bytes;
+    durations += iter->pure_incremental_marking_duration;
+    ++iter;
+  }
+
+  if (durations == 0.0) return 0;
+
+  return static_cast<intptr_t>(bytes / durations);
+}
+
+
+intptr_t GCTracer::ScavengeSpeedInBytesPerMillisecond() const {
+  intptr_t bytes = 0;
+  double durations = 0.0;
+  EventBuffer::const_iterator iter = scavenger_events_.begin();
+  while (iter != scavenger_events_.end()) {
+    bytes += iter->new_space_object_size;
+    durations += iter->end_time - iter->start_time;
+    ++iter;
+  }
+
+  if (durations == 0.0) return 0;
+
+  return static_cast<intptr_t>(bytes / durations);
+}
+
+
+intptr_t GCTracer::MarkCompactSpeedInBytesPerMillisecond() const {
+  intptr_t bytes = 0;
+  double durations = 0.0;
+  EventBuffer::const_iterator iter = mark_compactor_events_.begin();
+  while (iter != mark_compactor_events_.end()) {
+    bytes += iter->start_object_size;
+    durations += iter->end_time - iter->start_time +
+                 iter->pure_incremental_marking_duration;
+    ++iter;
+  }
+
+  if (durations == 0.0) return 0;
+
+  return static_cast<intptr_t>(bytes / durations);
+}
+
+
+intptr_t GCTracer::NewSpaceAllocationThroughputInBytesPerMillisecond() const {
+  intptr_t bytes = 0;
+  double durations = 0.0;
+  AllocationEventBuffer::const_iterator iter = allocation_events_.begin();
+  while (iter != allocation_events_.end()) {
+    bytes += iter->allocation_in_bytes_;
+    durations += iter->duration_;
+    ++iter;
+  }
+
+  if (durations == 0.0) return 0;
+
+  return static_cast<intptr_t>(bytes / durations);
+}
+}
+}  // namespace v8::internal
diff --git a/src/heap/gc-tracer.h b/src/heap/gc-tracer.h
new file mode 100644
index 0000000..4e70f07
--- /dev/null
+++ b/src/heap/gc-tracer.h
@@ -0,0 +1,401 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_GC_TRACER_H_
+#define V8_HEAP_GC_TRACER_H_
+
+#include "src/base/platform/platform.h"
+
+namespace v8 {
+namespace internal {
+
+// A simple ring buffer class with maximum size known at compile time.
+// The class only implements the functionality required in GCTracer.
+template <typename T, size_t MAX_SIZE>
+class RingBuffer {
+ public:
+  class const_iterator {
+   public:
+    const_iterator() : index_(0), elements_(NULL) {}
+
+    const_iterator(size_t index, const T* elements)
+        : index_(index), elements_(elements) {}
+
+    bool operator==(const const_iterator& rhs) const {
+      return elements_ == rhs.elements_ && index_ == rhs.index_;
+    }
+
+    bool operator!=(const const_iterator& rhs) const {
+      return elements_ != rhs.elements_ || index_ != rhs.index_;
+    }
+
+    operator const T*() const { return elements_ + index_; }
+
+    const T* operator->() const { return elements_ + index_; }
+
+    const T& operator*() const { return elements_[index_]; }
+
+    const_iterator& operator++() {
+      index_ = (index_ + 1) % (MAX_SIZE + 1);
+      return *this;
+    }
+
+    const_iterator& operator--() {
+      index_ = (index_ + MAX_SIZE) % (MAX_SIZE + 1);
+      return *this;
+    }
+
+   private:
+    size_t index_;
+    const T* elements_;
+  };
+
+  RingBuffer() : begin_(0), end_(0) {}
+
+  bool empty() const { return begin_ == end_; }
+  size_t size() const {
+    return (end_ - begin_ + MAX_SIZE + 1) % (MAX_SIZE + 1);
+  }
+  const_iterator begin() const { return const_iterator(begin_, elements_); }
+  const_iterator end() const { return const_iterator(end_, elements_); }
+  const_iterator back() const { return --end(); }
+  void push_back(const T& element) {
+    elements_[end_] = element;
+    end_ = (end_ + 1) % (MAX_SIZE + 1);
+    if (end_ == begin_) begin_ = (begin_ + 1) % (MAX_SIZE + 1);
+  }
+  void push_front(const T& element) {
+    begin_ = (begin_ + MAX_SIZE) % (MAX_SIZE + 1);
+    if (begin_ == end_) end_ = (end_ + MAX_SIZE) % (MAX_SIZE + 1);
+    elements_[begin_] = element;
+  }
+
+ private:
+  T elements_[MAX_SIZE + 1];
+  size_t begin_;
+  size_t end_;
+
+  DISALLOW_COPY_AND_ASSIGN(RingBuffer);
+};
+
+
+// GCTracer collects and prints ONE line after each garbage collector
+// invocation IFF --trace_gc is used.
+// TODO(ernstm): Unit tests.
+class GCTracer {
+ public:
+  class Scope {
+   public:
+    enum ScopeId {
+      EXTERNAL,
+      MC_MARK,
+      MC_SWEEP,
+      MC_SWEEP_NEWSPACE,
+      MC_SWEEP_OLDSPACE,
+      MC_SWEEP_CODE,
+      MC_SWEEP_CELL,
+      MC_SWEEP_MAP,
+      MC_EVACUATE_PAGES,
+      MC_UPDATE_NEW_TO_NEW_POINTERS,
+      MC_UPDATE_ROOT_TO_NEW_POINTERS,
+      MC_UPDATE_OLD_TO_NEW_POINTERS,
+      MC_UPDATE_POINTERS_TO_EVACUATED,
+      MC_UPDATE_POINTERS_BETWEEN_EVACUATED,
+      MC_UPDATE_MISC_POINTERS,
+      MC_WEAKCOLLECTION_PROCESS,
+      MC_WEAKCOLLECTION_CLEAR,
+      MC_WEAKCOLLECTION_ABORT,
+      MC_FLUSH_CODE,
+      NUMBER_OF_SCOPES
+    };
+
+    Scope(GCTracer* tracer, ScopeId scope) : tracer_(tracer), scope_(scope) {
+      start_time_ = base::OS::TimeCurrentMillis();
+    }
+
+    ~Scope() {
+      DCHECK(scope_ < NUMBER_OF_SCOPES);  // scope_ is unsigned.
+      tracer_->current_.scopes[scope_] +=
+          base::OS::TimeCurrentMillis() - start_time_;
+    }
+
+   private:
+    GCTracer* tracer_;
+    ScopeId scope_;
+    double start_time_;
+
+    DISALLOW_COPY_AND_ASSIGN(Scope);
+  };
+
+
+  class AllocationEvent {
+   public:
+    // Default constructor leaves the event uninitialized.
+    AllocationEvent() {}
+
+    AllocationEvent(double duration, intptr_t allocation_in_bytes);
+
+    // Time spent in the mutator during the end of the last garbage collection
+    // to the beginning of the next garbage collection.
+    double duration_;
+
+    // Memory allocated in the new space during the end of the last garbage
+    // collection to the beginning of the next garbage collection.
+    intptr_t allocation_in_bytes_;
+  };
+
+  class Event {
+   public:
+    enum Type { SCAVENGER = 0, MARK_COMPACTOR = 1, START = 2 };
+
+    // Default constructor leaves the event uninitialized.
+    Event() {}
+
+    Event(Type type, const char* gc_reason, const char* collector_reason);
+
+    // Returns a string describing the event type.
+    const char* TypeName(bool short_name) const;
+
+    // Type of event
+    Type type;
+
+    const char* gc_reason;
+    const char* collector_reason;
+
+    // Timestamp set in the constructor.
+    double start_time;
+
+    // Timestamp set in the destructor.
+    double end_time;
+
+    // Size of objects in heap set in constructor.
+    intptr_t start_object_size;
+
+    // Size of objects in heap set in destructor.
+    intptr_t end_object_size;
+
+    // Size of memory allocated from OS set in constructor.
+    intptr_t start_memory_size;
+
+    // Size of memory allocated from OS set in destructor.
+    intptr_t end_memory_size;
+
+    // Total amount of space either wasted or contained in one of free lists
+    // before the current GC.
+    intptr_t start_holes_size;
+
+    // Total amount of space either wasted or contained in one of free lists
+    // after the current GC.
+    intptr_t end_holes_size;
+
+    // Size of new space objects in constructor.
+    intptr_t new_space_object_size;
+
+    // Number of incremental marking steps since creation of tracer.
+    // (value at start of event)
+    int cumulative_incremental_marking_steps;
+
+    // Incremental marking steps since
+    // - last event for SCAVENGER events
+    // - last MARK_COMPACTOR event for MARK_COMPACTOR events
+    int incremental_marking_steps;
+
+    // Bytes marked since creation of tracer (value at start of event).
+    intptr_t cumulative_incremental_marking_bytes;
+
+    // Bytes marked since
+    // - last event for SCAVENGER events
+    // - last MARK_COMPACTOR event for MARK_COMPACTOR events
+    intptr_t incremental_marking_bytes;
+
+    // Cumulative duration of incremental marking steps since creation of
+    // tracer. (value at start of event)
+    double cumulative_incremental_marking_duration;
+
+    // Duration of incremental marking steps since
+    // - last event for SCAVENGER events
+    // - last MARK_COMPACTOR event for MARK_COMPACTOR events
+    double incremental_marking_duration;
+
+    // Cumulative pure duration of incremental marking steps since creation of
+    // tracer. (value at start of event)
+    double cumulative_pure_incremental_marking_duration;
+
+    // Duration of pure incremental marking steps since
+    // - last event for SCAVENGER events
+    // - last MARK_COMPACTOR event for MARK_COMPACTOR events
+    double pure_incremental_marking_duration;
+
+    // Longest incremental marking step since start of marking.
+    // (value at start of event)
+    double longest_incremental_marking_step;
+
+    // Amounts of time spent in different scopes during GC.
+    double scopes[Scope::NUMBER_OF_SCOPES];
+  };
+
+  static const int kRingBufferMaxSize = 10;
+
+  typedef RingBuffer<Event, kRingBufferMaxSize> EventBuffer;
+
+  typedef RingBuffer<AllocationEvent, kRingBufferMaxSize> AllocationEventBuffer;
+
+  explicit GCTracer(Heap* heap);
+
+  // Start collecting data.
+  void Start(GarbageCollector collector, const char* gc_reason,
+             const char* collector_reason);
+
+  // Stop collecting data and print results.
+  void Stop();
+
+  // Log an allocation throughput event.
+  void AddNewSpaceAllocationTime(double duration, intptr_t allocation_in_bytes);
+
+  // Log an incremental marking step.
+  void AddIncrementalMarkingStep(double duration, intptr_t bytes);
+
+  // Log time spent in marking.
+  void AddMarkingTime(double duration) {
+    cumulative_marking_duration_ += duration;
+  }
+
+  // Time spent in marking.
+  double cumulative_marking_duration() const {
+    return cumulative_marking_duration_;
+  }
+
+  // Log time spent in sweeping on main thread.
+  void AddSweepingTime(double duration) {
+    cumulative_sweeping_duration_ += duration;
+  }
+
+  // Time spent in sweeping on main thread.
+  double cumulative_sweeping_duration() const {
+    return cumulative_sweeping_duration_;
+  }
+
+  // Compute the mean duration of the last scavenger events. Returns 0 if no
+  // events have been recorded.
+  double MeanScavengerDuration() const {
+    return MeanDuration(scavenger_events_);
+  }
+
+  // Compute the max duration of the last scavenger events. Returns 0 if no
+  // events have been recorded.
+  double MaxScavengerDuration() const { return MaxDuration(scavenger_events_); }
+
+  // Compute the mean duration of the last mark compactor events. Returns 0 if
+  // no events have been recorded.
+  double MeanMarkCompactorDuration() const {
+    return MeanDuration(mark_compactor_events_);
+  }
+
+  // Compute the max duration of the last mark compactor events. Return 0 if no
+  // events have been recorded.
+  double MaxMarkCompactorDuration() const {
+    return MaxDuration(mark_compactor_events_);
+  }
+
+  // Compute the mean step duration of the last incremental marking round.
+  // Returns 0 if no incremental marking round has been completed.
+  double MeanIncrementalMarkingDuration() const;
+
+  // Compute the max step duration of the last incremental marking round.
+  // Returns 0 if no incremental marking round has been completed.
+  double MaxIncrementalMarkingDuration() const;
+
+  // Compute the average incremental marking speed in bytes/millisecond.
+  // Returns 0 if no events have been recorded.
+  intptr_t IncrementalMarkingSpeedInBytesPerMillisecond() const;
+
+  // Compute the average scavenge speed in bytes/millisecond.
+  // Returns 0 if no events have been recorded.
+  intptr_t ScavengeSpeedInBytesPerMillisecond() const;
+
+  // Compute the max mark-sweep speed in bytes/millisecond.
+  // Returns 0 if no events have been recorded.
+  intptr_t MarkCompactSpeedInBytesPerMillisecond() const;
+
+  // Allocation throughput in the new space in bytes/millisecond.
+  // Returns 0 if no events have been recorded.
+  intptr_t NewSpaceAllocationThroughputInBytesPerMillisecond() const;
+
+ private:
+  // Print one detailed trace line in name=value format.
+  // TODO(ernstm): Move to Heap.
+  void PrintNVP() const;
+
+  // Print one trace line.
+  // TODO(ernstm): Move to Heap.
+  void Print() const;
+
+  // Compute the mean duration of the events in the given ring buffer.
+  double MeanDuration(const EventBuffer& events) const;
+
+  // Compute the max duration of the events in the given ring buffer.
+  double MaxDuration(const EventBuffer& events) const;
+
+  // Pointer to the heap that owns this tracer.
+  Heap* heap_;
+
+  // Current tracer event. Populated during Start/Stop cycle. Valid after Stop()
+  // has returned.
+  Event current_;
+
+  // Previous tracer event.
+  Event previous_;
+
+  // Previous MARK_COMPACTOR event.
+  Event previous_mark_compactor_event_;
+
+  // RingBuffers for SCAVENGER events.
+  EventBuffer scavenger_events_;
+
+  // RingBuffers for MARK_COMPACTOR events.
+  EventBuffer mark_compactor_events_;
+
+  // RingBuffer for allocation events.
+  AllocationEventBuffer allocation_events_;
+
+  // Cumulative number of incremental marking steps since creation of tracer.
+  int cumulative_incremental_marking_steps_;
+
+  // Cumulative size of incremental marking steps (in bytes) since creation of
+  // tracer.
+  intptr_t cumulative_incremental_marking_bytes_;
+
+  // Cumulative duration of incremental marking steps since creation of tracer.
+  double cumulative_incremental_marking_duration_;
+
+  // Cumulative duration of pure incremental marking steps since creation of
+  // tracer.
+  double cumulative_pure_incremental_marking_duration_;
+
+  // Longest incremental marking step since start of marking.
+  double longest_incremental_marking_step_;
+
+  // Total marking time.
+  // This timer is precise when run with --print-cumulative-gc-stat
+  double cumulative_marking_duration_;
+
+  // Total sweeping time on the main thread.
+  // This timer is precise when run with --print-cumulative-gc-stat
+  // TODO(hpayer): Account for sweeping time on sweeper threads. Add a
+  // different field for that.
+  // TODO(hpayer): This timer right now just holds the sweeping time
+  // of the initial atomic sweeping pause. Make sure that it accumulates
+  // all sweeping operations performed on the main thread.
+  double cumulative_sweeping_duration_;
+
+  // Holds the new space top pointer recorded at the end of the last garbage
+  // collection.
+  intptr_t new_space_top_after_gc_;
+
+  DISALLOW_COPY_AND_ASSIGN(GCTracer);
+};
+}
+}  // namespace v8::internal
+
+#endif  // V8_HEAP_GC_TRACER_H_
diff --git a/src/heap/heap-inl.h b/src/heap/heap-inl.h
new file mode 100644
index 0000000..e658224
--- /dev/null
+++ b/src/heap/heap-inl.h
@@ -0,0 +1,780 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_HEAP_INL_H_
+#define V8_HEAP_HEAP_INL_H_
+
+#include <cmath>
+
+#include "src/base/platform/platform.h"
+#include "src/cpu-profiler.h"
+#include "src/heap/heap.h"
+#include "src/heap/store-buffer.h"
+#include "src/heap/store-buffer-inl.h"
+#include "src/heap-profiler.h"
+#include "src/isolate.h"
+#include "src/list-inl.h"
+#include "src/msan.h"
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+
+void PromotionQueue::insert(HeapObject* target, int size) {
+  if (emergency_stack_ != NULL) {
+    emergency_stack_->Add(Entry(target, size));
+    return;
+  }
+
+  if (NewSpacePage::IsAtStart(reinterpret_cast<Address>(rear_))) {
+    NewSpacePage* rear_page =
+        NewSpacePage::FromAddress(reinterpret_cast<Address>(rear_));
+    DCHECK(!rear_page->prev_page()->is_anchor());
+    rear_ = reinterpret_cast<intptr_t*>(rear_page->prev_page()->area_end());
+  }
+
+  if ((rear_ - 2) < limit_) {
+    RelocateQueueHead();
+    emergency_stack_->Add(Entry(target, size));
+    return;
+  }
+
+  *(--rear_) = reinterpret_cast<intptr_t>(target);
+  *(--rear_) = size;
+// Assert no overflow into live objects.
+#ifdef DEBUG
+  SemiSpace::AssertValidRange(target->GetIsolate()->heap()->new_space()->top(),
+                              reinterpret_cast<Address>(rear_));
+#endif
+}
+
+
+template <>
+bool inline Heap::IsOneByte(Vector<const char> str, int chars) {
+  // TODO(dcarney): incorporate Latin-1 check when Latin-1 is supported?
+  return chars == str.length();
+}
+
+
+template <>
+bool inline Heap::IsOneByte(String* str, int chars) {
+  return str->IsOneByteRepresentation();
+}
+
+
+AllocationResult Heap::AllocateInternalizedStringFromUtf8(
+    Vector<const char> str, int chars, uint32_t hash_field) {
+  if (IsOneByte(str, chars)) {
+    return AllocateOneByteInternalizedString(Vector<const uint8_t>::cast(str),
+                                             hash_field);
+  }
+  return AllocateInternalizedStringImpl<false>(str, chars, hash_field);
+}
+
+
+template <typename T>
+AllocationResult Heap::AllocateInternalizedStringImpl(T t, int chars,
+                                                      uint32_t hash_field) {
+  if (IsOneByte(t, chars)) {
+    return AllocateInternalizedStringImpl<true>(t, chars, hash_field);
+  }
+  return AllocateInternalizedStringImpl<false>(t, chars, hash_field);
+}
+
+
+AllocationResult Heap::AllocateOneByteInternalizedString(
+    Vector<const uint8_t> str, uint32_t hash_field) {
+  CHECK_GE(String::kMaxLength, str.length());
+  // Compute map and object size.
+  Map* map = one_byte_internalized_string_map();
+  int size = SeqOneByteString::SizeFor(str.length());
+  AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, TENURED);
+
+  // Allocate string.
+  HeapObject* result;
+  {
+    AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
+    if (!allocation.To(&result)) return allocation;
+  }
+
+  // String maps are all immortal immovable objects.
+  result->set_map_no_write_barrier(map);
+  // Set length and hash fields of the allocated string.
+  String* answer = String::cast(result);
+  answer->set_length(str.length());
+  answer->set_hash_field(hash_field);
+
+  DCHECK_EQ(size, answer->Size());
+
+  // Fill in the characters.
+  MemCopy(answer->address() + SeqOneByteString::kHeaderSize, str.start(),
+          str.length());
+
+  return answer;
+}
+
+
+AllocationResult Heap::AllocateTwoByteInternalizedString(Vector<const uc16> str,
+                                                         uint32_t hash_field) {
+  CHECK_GE(String::kMaxLength, str.length());
+  // Compute map and object size.
+  Map* map = internalized_string_map();
+  int size = SeqTwoByteString::SizeFor(str.length());
+  AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, TENURED);
+
+  // Allocate string.
+  HeapObject* result;
+  {
+    AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
+    if (!allocation.To(&result)) return allocation;
+  }
+
+  result->set_map(map);
+  // Set length and hash fields of the allocated string.
+  String* answer = String::cast(result);
+  answer->set_length(str.length());
+  answer->set_hash_field(hash_field);
+
+  DCHECK_EQ(size, answer->Size());
+
+  // Fill in the characters.
+  MemCopy(answer->address() + SeqTwoByteString::kHeaderSize, str.start(),
+          str.length() * kUC16Size);
+
+  return answer;
+}
+
+AllocationResult Heap::CopyFixedArray(FixedArray* src) {
+  if (src->length() == 0) return src;
+  return CopyFixedArrayWithMap(src, src->map());
+}
+
+
+AllocationResult Heap::CopyFixedDoubleArray(FixedDoubleArray* src) {
+  if (src->length() == 0) return src;
+  return CopyFixedDoubleArrayWithMap(src, src->map());
+}
+
+
+AllocationResult Heap::CopyConstantPoolArray(ConstantPoolArray* src) {
+  if (src->length() == 0) return src;
+  return CopyConstantPoolArrayWithMap(src, src->map());
+}
+
+
+AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
+                                   AllocationSpace retry_space) {
+  DCHECK(AllowHandleAllocation::IsAllowed());
+  DCHECK(AllowHeapAllocation::IsAllowed());
+  DCHECK(gc_state_ == NOT_IN_GC);
+#ifdef DEBUG
+  if (FLAG_gc_interval >= 0 && AllowAllocationFailure::IsAllowed(isolate_) &&
+      Heap::allocation_timeout_-- <= 0) {
+    return AllocationResult::Retry(space);
+  }
+  isolate_->counters()->objs_since_last_full()->Increment();
+  isolate_->counters()->objs_since_last_young()->Increment();
+#endif
+
+  HeapObject* object;
+  AllocationResult allocation;
+  if (NEW_SPACE == space) {
+    allocation = new_space_.AllocateRaw(size_in_bytes);
+    if (always_allocate() && allocation.IsRetry() && retry_space != NEW_SPACE) {
+      space = retry_space;
+    } else {
+      if (allocation.To(&object)) {
+        OnAllocationEvent(object, size_in_bytes);
+      }
+      return allocation;
+    }
+  }
+
+  if (OLD_POINTER_SPACE == space) {
+    allocation = old_pointer_space_->AllocateRaw(size_in_bytes);
+  } else if (OLD_DATA_SPACE == space) {
+    allocation = old_data_space_->AllocateRaw(size_in_bytes);
+  } else if (CODE_SPACE == space) {
+    if (size_in_bytes <= code_space()->AreaSize()) {
+      allocation = code_space_->AllocateRaw(size_in_bytes);
+    } else {
+      // Large code objects are allocated in large object space.
+      allocation = lo_space_->AllocateRaw(size_in_bytes, EXECUTABLE);
+    }
+  } else if (LO_SPACE == space) {
+    allocation = lo_space_->AllocateRaw(size_in_bytes, NOT_EXECUTABLE);
+  } else if (CELL_SPACE == space) {
+    allocation = cell_space_->AllocateRaw(size_in_bytes);
+  } else if (PROPERTY_CELL_SPACE == space) {
+    allocation = property_cell_space_->AllocateRaw(size_in_bytes);
+  } else {
+    DCHECK(MAP_SPACE == space);
+    allocation = map_space_->AllocateRaw(size_in_bytes);
+  }
+  if (allocation.To(&object)) {
+    OnAllocationEvent(object, size_in_bytes);
+  } else {
+    old_gen_exhausted_ = true;
+  }
+  return allocation;
+}
+
+
+void Heap::OnAllocationEvent(HeapObject* object, int size_in_bytes) {
+  HeapProfiler* profiler = isolate_->heap_profiler();
+  if (profiler->is_tracking_allocations()) {
+    profiler->AllocationEvent(object->address(), size_in_bytes);
+  }
+
+  if (FLAG_verify_predictable) {
+    ++allocations_count_;
+
+    UpdateAllocationsHash(object);
+    UpdateAllocationsHash(size_in_bytes);
+
+    if ((FLAG_dump_allocations_digest_at_alloc > 0) &&
+        (--dump_allocations_hash_countdown_ == 0)) {
+      dump_allocations_hash_countdown_ = FLAG_dump_allocations_digest_at_alloc;
+      PrintAlloctionsHash();
+    }
+  }
+}
+
+
+void Heap::OnMoveEvent(HeapObject* target, HeapObject* source,
+                       int size_in_bytes) {
+  HeapProfiler* heap_profiler = isolate_->heap_profiler();
+  if (heap_profiler->is_tracking_object_moves()) {
+    heap_profiler->ObjectMoveEvent(source->address(), target->address(),
+                                   size_in_bytes);
+  }
+
+  if (isolate_->logger()->is_logging_code_events() ||
+      isolate_->cpu_profiler()->is_profiling()) {
+    if (target->IsSharedFunctionInfo()) {
+      PROFILE(isolate_, SharedFunctionInfoMoveEvent(source->address(),
+                                                    target->address()));
+    }
+  }
+
+  if (FLAG_verify_predictable) {
+    ++allocations_count_;
+
+    UpdateAllocationsHash(source);
+    UpdateAllocationsHash(target);
+    UpdateAllocationsHash(size_in_bytes);
+
+    if ((FLAG_dump_allocations_digest_at_alloc > 0) &&
+        (--dump_allocations_hash_countdown_ == 0)) {
+      dump_allocations_hash_countdown_ = FLAG_dump_allocations_digest_at_alloc;
+      PrintAlloctionsHash();
+    }
+  }
+}
+
+
+void Heap::UpdateAllocationsHash(HeapObject* object) {
+  Address object_address = object->address();
+  MemoryChunk* memory_chunk = MemoryChunk::FromAddress(object_address);
+  AllocationSpace allocation_space = memory_chunk->owner()->identity();
+
+  STATIC_ASSERT(kSpaceTagSize + kPageSizeBits <= 32);
+  uint32_t value =
+      static_cast<uint32_t>(object_address - memory_chunk->address()) |
+      (static_cast<uint32_t>(allocation_space) << kPageSizeBits);
+
+  UpdateAllocationsHash(value);
+}
+
+
+void Heap::UpdateAllocationsHash(uint32_t value) {
+  uint16_t c1 = static_cast<uint16_t>(value);
+  uint16_t c2 = static_cast<uint16_t>(value >> 16);
+  raw_allocations_hash_ =
+      StringHasher::AddCharacterCore(raw_allocations_hash_, c1);
+  raw_allocations_hash_ =
+      StringHasher::AddCharacterCore(raw_allocations_hash_, c2);
+}
+
+
+void Heap::PrintAlloctionsHash() {
+  uint32_t hash = StringHasher::GetHashCore(raw_allocations_hash_);
+  PrintF("\n### Allocations = %u, hash = 0x%08x\n", allocations_count_, hash);
+}
+
+
+void Heap::FinalizeExternalString(String* string) {
+  DCHECK(string->IsExternalString());
+  v8::String::ExternalStringResourceBase** resource_addr =
+      reinterpret_cast<v8::String::ExternalStringResourceBase**>(
+          reinterpret_cast<byte*>(string) + ExternalString::kResourceOffset -
+          kHeapObjectTag);
+
+  // Dispose of the C++ object if it has not already been disposed.
+  if (*resource_addr != NULL) {
+    (*resource_addr)->Dispose();
+    *resource_addr = NULL;
+  }
+}
+
+
+bool Heap::InNewSpace(Object* object) {
+  bool result = new_space_.Contains(object);
+  DCHECK(!result ||                 // Either not in new space
+         gc_state_ != NOT_IN_GC ||  // ... or in the middle of GC
+         InToSpace(object));        // ... or in to-space (where we allocate).
+  return result;
+}
+
+
+bool Heap::InNewSpace(Address address) { return new_space_.Contains(address); }
+
+
+bool Heap::InFromSpace(Object* object) {
+  return new_space_.FromSpaceContains(object);
+}
+
+
+bool Heap::InToSpace(Object* object) {
+  return new_space_.ToSpaceContains(object);
+}
+
+
+bool Heap::InOldPointerSpace(Address address) {
+  return old_pointer_space_->Contains(address);
+}
+
+
+bool Heap::InOldPointerSpace(Object* object) {
+  return InOldPointerSpace(reinterpret_cast<Address>(object));
+}
+
+
+bool Heap::InOldDataSpace(Address address) {
+  return old_data_space_->Contains(address);
+}
+
+
+bool Heap::InOldDataSpace(Object* object) {
+  return InOldDataSpace(reinterpret_cast<Address>(object));
+}
+
+
+bool Heap::OldGenerationAllocationLimitReached() {
+  if (!incremental_marking()->IsStopped()) return false;
+  return OldGenerationSpaceAvailable() < 0;
+}
+
+
+bool Heap::ShouldBePromoted(Address old_address, int object_size) {
+  NewSpacePage* page = NewSpacePage::FromAddress(old_address);
+  Address age_mark = new_space_.age_mark();
+  return page->IsFlagSet(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK) &&
+         (!page->ContainsLimit(age_mark) || old_address < age_mark);
+}
+
+
+void Heap::RecordWrite(Address address, int offset) {
+  if (!InNewSpace(address)) store_buffer_.Mark(address + offset);
+}
+
+
+void Heap::RecordWrites(Address address, int start, int len) {
+  if (!InNewSpace(address)) {
+    for (int i = 0; i < len; i++) {
+      store_buffer_.Mark(address + start + i * kPointerSize);
+    }
+  }
+}
+
+
+OldSpace* Heap::TargetSpace(HeapObject* object) {
+  InstanceType type = object->map()->instance_type();
+  AllocationSpace space = TargetSpaceId(type);
+  return (space == OLD_POINTER_SPACE) ? old_pointer_space_ : old_data_space_;
+}
+
+
+AllocationSpace Heap::TargetSpaceId(InstanceType type) {
+  // Heap numbers and sequential strings are promoted to old data space, all
+  // other object types are promoted to old pointer space.  We do not use
+  // object->IsHeapNumber() and object->IsSeqString() because we already
+  // know that object has the heap object tag.
+
+  // These objects are never allocated in new space.
+  DCHECK(type != MAP_TYPE);
+  DCHECK(type != CODE_TYPE);
+  DCHECK(type != ODDBALL_TYPE);
+  DCHECK(type != CELL_TYPE);
+  DCHECK(type != PROPERTY_CELL_TYPE);
+
+  if (type <= LAST_NAME_TYPE) {
+    if (type == SYMBOL_TYPE) return OLD_POINTER_SPACE;
+    DCHECK(type < FIRST_NONSTRING_TYPE);
+    // There are four string representations: sequential strings, external
+    // strings, cons strings, and sliced strings.
+    // Only the latter two contain non-map-word pointers to heap objects.
+    return ((type & kIsIndirectStringMask) == kIsIndirectStringTag)
+               ? OLD_POINTER_SPACE
+               : OLD_DATA_SPACE;
+  } else {
+    return (type <= LAST_DATA_TYPE) ? OLD_DATA_SPACE : OLD_POINTER_SPACE;
+  }
+}
+
+
+bool Heap::AllowedToBeMigrated(HeapObject* obj, AllocationSpace dst) {
+  // Object migration is governed by the following rules:
+  //
+  // 1) Objects in new-space can be migrated to one of the old spaces
+  //    that matches their target space or they stay in new-space.
+  // 2) Objects in old-space stay in the same space when migrating.
+  // 3) Fillers (two or more words) can migrate due to left-trimming of
+  //    fixed arrays in new-space, old-data-space and old-pointer-space.
+  // 4) Fillers (one word) can never migrate, they are skipped by
+  //    incremental marking explicitly to prevent invalid pattern.
+  // 5) Short external strings can end up in old pointer space when a cons
+  //    string in old pointer space is made external (String::MakeExternal).
+  //
+  // Since this function is used for debugging only, we do not place
+  // asserts here, but check everything explicitly.
+  if (obj->map() == one_pointer_filler_map()) return false;
+  InstanceType type = obj->map()->instance_type();
+  MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
+  AllocationSpace src = chunk->owner()->identity();
+  switch (src) {
+    case NEW_SPACE:
+      return dst == src || dst == TargetSpaceId(type);
+    case OLD_POINTER_SPACE:
+      return dst == src && (dst == TargetSpaceId(type) || obj->IsFiller() ||
+                            obj->IsExternalString());
+    case OLD_DATA_SPACE:
+      return dst == src && dst == TargetSpaceId(type);
+    case CODE_SPACE:
+      return dst == src && type == CODE_TYPE;
+    case MAP_SPACE:
+    case CELL_SPACE:
+    case PROPERTY_CELL_SPACE:
+    case LO_SPACE:
+      return false;
+    case INVALID_SPACE:
+      break;
+  }
+  UNREACHABLE();
+  return false;
+}
+
+
+void Heap::CopyBlock(Address dst, Address src, int byte_size) {
+  CopyWords(reinterpret_cast<Object**>(dst), reinterpret_cast<Object**>(src),
+            static_cast<size_t>(byte_size / kPointerSize));
+}
+
+
+void Heap::MoveBlock(Address dst, Address src, int byte_size) {
+  DCHECK(IsAligned(byte_size, kPointerSize));
+
+  int size_in_words = byte_size / kPointerSize;
+
+  if ((dst < src) || (dst >= (src + byte_size))) {
+    Object** src_slot = reinterpret_cast<Object**>(src);
+    Object** dst_slot = reinterpret_cast<Object**>(dst);
+    Object** end_slot = src_slot + size_in_words;
+
+    while (src_slot != end_slot) {
+      *dst_slot++ = *src_slot++;
+    }
+  } else {
+    MemMove(dst, src, static_cast<size_t>(byte_size));
+  }
+}
+
+
+void Heap::ScavengePointer(HeapObject** p) { ScavengeObject(p, *p); }
+
+
+AllocationMemento* Heap::FindAllocationMemento(HeapObject* object) {
+  // Check if there is potentially a memento behind the object. If
+  // the last word of the memento is on another page we return
+  // immediately.
+  Address object_address = object->address();
+  Address memento_address = object_address + object->Size();
+  Address last_memento_word_address = memento_address + kPointerSize;
+  if (!NewSpacePage::OnSamePage(object_address, last_memento_word_address)) {
+    return NULL;
+  }
+
+  HeapObject* candidate = HeapObject::FromAddress(memento_address);
+  Map* candidate_map = candidate->map();
+  // This fast check may peek at an uninitialized word. However, the slow check
+  // below (memento_address == top) ensures that this is safe. Mark the word as
+  // initialized to silence MemorySanitizer warnings.
+  MSAN_MEMORY_IS_INITIALIZED(&candidate_map, sizeof(candidate_map));
+  if (candidate_map != allocation_memento_map()) return NULL;
+
+  // Either the object is the last object in the new space, or there is another
+  // object of at least word size (the header map word) following it, so
+  // suffices to compare ptr and top here. Note that technically we do not have
+  // to compare with the current top pointer of the from space page during GC,
+  // since we always install filler objects above the top pointer of a from
+  // space page when performing a garbage collection. However, always performing
+  // the test makes it possible to have a single, unified version of
+  // FindAllocationMemento that is used both by the GC and the mutator.
+  Address top = NewSpaceTop();
+  DCHECK(memento_address == top ||
+         memento_address + HeapObject::kHeaderSize <= top ||
+         !NewSpacePage::OnSamePage(memento_address, top));
+  if (memento_address == top) return NULL;
+
+  AllocationMemento* memento = AllocationMemento::cast(candidate);
+  if (!memento->IsValid()) return NULL;
+  return memento;
+}
+
+
+void Heap::UpdateAllocationSiteFeedback(HeapObject* object,
+                                        ScratchpadSlotMode mode) {
+  Heap* heap = object->GetHeap();
+  DCHECK(heap->InFromSpace(object));
+
+  if (!FLAG_allocation_site_pretenuring ||
+      !AllocationSite::CanTrack(object->map()->instance_type()))
+    return;
+
+  AllocationMemento* memento = heap->FindAllocationMemento(object);
+  if (memento == NULL) return;
+
+  if (memento->GetAllocationSite()->IncrementMementoFoundCount()) {
+    heap->AddAllocationSiteToScratchpad(memento->GetAllocationSite(), mode);
+  }
+}
+
+
+void Heap::ScavengeObject(HeapObject** p, HeapObject* object) {
+  DCHECK(object->GetIsolate()->heap()->InFromSpace(object));
+
+  // We use the first word (where the map pointer usually is) of a heap
+  // object to record the forwarding pointer.  A forwarding pointer can
+  // point to an old space, the code space, or the to space of the new
+  // generation.
+  MapWord first_word = object->map_word();
+
+  // If the first word is a forwarding address, the object has already been
+  // copied.
+  if (first_word.IsForwardingAddress()) {
+    HeapObject* dest = first_word.ToForwardingAddress();
+    DCHECK(object->GetIsolate()->heap()->InFromSpace(*p));
+    *p = dest;
+    return;
+  }
+
+  UpdateAllocationSiteFeedback(object, IGNORE_SCRATCHPAD_SLOT);
+
+  // AllocationMementos are unrooted and shouldn't survive a scavenge
+  DCHECK(object->map() != object->GetHeap()->allocation_memento_map());
+  // Call the slow part of scavenge object.
+  return ScavengeObjectSlow(p, object);
+}
+
+
+bool Heap::CollectGarbage(AllocationSpace space, const char* gc_reason,
+                          const v8::GCCallbackFlags callbackFlags) {
+  const char* collector_reason = NULL;
+  GarbageCollector collector = SelectGarbageCollector(space, &collector_reason);
+  return CollectGarbage(collector, gc_reason, collector_reason, callbackFlags);
+}
+
+
+Isolate* Heap::isolate() {
+  return reinterpret_cast<Isolate*>(
+      reinterpret_cast<intptr_t>(this) -
+      reinterpret_cast<size_t>(reinterpret_cast<Isolate*>(4)->heap()) + 4);
+}
+
+
+// Calls the FUNCTION_CALL function and retries it up to three times
+// to guarantee that any allocations performed during the call will
+// succeed if there's enough memory.
+
+// Warning: Do not use the identifiers __object__, __maybe_object__ or
+// __scope__ in a call to this macro.
+
+#define RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE) \
+  if (__allocation__.To(&__object__)) {                   \
+    DCHECK(__object__ != (ISOLATE)->heap()->exception()); \
+    RETURN_VALUE;                                         \
+  }
+
+#define CALL_AND_RETRY(ISOLATE, FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY)    \
+  do {                                                                        \
+    AllocationResult __allocation__ = FUNCTION_CALL;                          \
+    Object* __object__ = NULL;                                                \
+    RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE)                         \
+    (ISOLATE)->heap()->CollectGarbage(__allocation__.RetrySpace(),            \
+                                      "allocation failure");                  \
+    __allocation__ = FUNCTION_CALL;                                           \
+    RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE)                         \
+    (ISOLATE)->counters()->gc_last_resort_from_handles()->Increment();        \
+    (ISOLATE)->heap()->CollectAllAvailableGarbage("last resort gc");          \
+    {                                                                         \
+      AlwaysAllocateScope __scope__(ISOLATE);                                 \
+      __allocation__ = FUNCTION_CALL;                                         \
+    }                                                                         \
+    RETURN_OBJECT_UNLESS_RETRY(ISOLATE, RETURN_VALUE)                         \
+    /* TODO(1181417): Fix this. */                                            \
+    v8::internal::Heap::FatalProcessOutOfMemory("CALL_AND_RETRY_LAST", true); \
+    RETURN_EMPTY;                                                             \
+  } while (false)
+
+#define CALL_AND_RETRY_OR_DIE(ISOLATE, FUNCTION_CALL, RETURN_VALUE, \
+                              RETURN_EMPTY)                         \
+  CALL_AND_RETRY(ISOLATE, FUNCTION_CALL, RETURN_VALUE, RETURN_EMPTY)
+
+#define CALL_HEAP_FUNCTION(ISOLATE, FUNCTION_CALL, TYPE)                      \
+  CALL_AND_RETRY_OR_DIE(ISOLATE, FUNCTION_CALL,                               \
+                        return Handle<TYPE>(TYPE::cast(__object__), ISOLATE), \
+                        return Handle<TYPE>())
+
+
+#define CALL_HEAP_FUNCTION_VOID(ISOLATE, FUNCTION_CALL) \
+  CALL_AND_RETRY_OR_DIE(ISOLATE, FUNCTION_CALL, return, return)
+
+
+void ExternalStringTable::AddString(String* string) {
+  DCHECK(string->IsExternalString());
+  if (heap_->InNewSpace(string)) {
+    new_space_strings_.Add(string);
+  } else {
+    old_space_strings_.Add(string);
+  }
+}
+
+
+void ExternalStringTable::Iterate(ObjectVisitor* v) {
+  if (!new_space_strings_.is_empty()) {
+    Object** start = &new_space_strings_[0];
+    v->VisitPointers(start, start + new_space_strings_.length());
+  }
+  if (!old_space_strings_.is_empty()) {
+    Object** start = &old_space_strings_[0];
+    v->VisitPointers(start, start + old_space_strings_.length());
+  }
+}
+
+
+// Verify() is inline to avoid ifdef-s around its calls in release
+// mode.
+void ExternalStringTable::Verify() {
+#ifdef DEBUG
+  for (int i = 0; i < new_space_strings_.length(); ++i) {
+    Object* obj = Object::cast(new_space_strings_[i]);
+    DCHECK(heap_->InNewSpace(obj));
+    DCHECK(obj != heap_->the_hole_value());
+  }
+  for (int i = 0; i < old_space_strings_.length(); ++i) {
+    Object* obj = Object::cast(old_space_strings_[i]);
+    DCHECK(!heap_->InNewSpace(obj));
+    DCHECK(obj != heap_->the_hole_value());
+  }
+#endif
+}
+
+
+void ExternalStringTable::AddOldString(String* string) {
+  DCHECK(string->IsExternalString());
+  DCHECK(!heap_->InNewSpace(string));
+  old_space_strings_.Add(string);
+}
+
+
+void ExternalStringTable::ShrinkNewStrings(int position) {
+  new_space_strings_.Rewind(position);
+#ifdef VERIFY_HEAP
+  if (FLAG_verify_heap) {
+    Verify();
+  }
+#endif
+}
+
+
+void Heap::ClearInstanceofCache() {
+  set_instanceof_cache_function(the_hole_value());
+}
+
+
+Object* Heap::ToBoolean(bool condition) {
+  return condition ? true_value() : false_value();
+}
+
+
+void Heap::CompletelyClearInstanceofCache() {
+  set_instanceof_cache_map(the_hole_value());
+  set_instanceof_cache_function(the_hole_value());
+}
+
+
+AlwaysAllocateScope::AlwaysAllocateScope(Isolate* isolate)
+    : heap_(isolate->heap()), daf_(isolate) {
+  // We shouldn't hit any nested scopes, because that requires
+  // non-handle code to call handle code. The code still works but
+  // performance will degrade, so we want to catch this situation
+  // in debug mode.
+  DCHECK(heap_->always_allocate_scope_depth_ == 0);
+  heap_->always_allocate_scope_depth_++;
+}
+
+
+AlwaysAllocateScope::~AlwaysAllocateScope() {
+  heap_->always_allocate_scope_depth_--;
+  DCHECK(heap_->always_allocate_scope_depth_ == 0);
+}
+
+
+#ifdef VERIFY_HEAP
+NoWeakObjectVerificationScope::NoWeakObjectVerificationScope() {
+  Isolate* isolate = Isolate::Current();
+  isolate->heap()->no_weak_object_verification_scope_depth_++;
+}
+
+
+NoWeakObjectVerificationScope::~NoWeakObjectVerificationScope() {
+  Isolate* isolate = Isolate::Current();
+  isolate->heap()->no_weak_object_verification_scope_depth_--;
+}
+#endif
+
+
+GCCallbacksScope::GCCallbacksScope(Heap* heap) : heap_(heap) {
+  heap_->gc_callbacks_depth_++;
+}
+
+
+GCCallbacksScope::~GCCallbacksScope() { heap_->gc_callbacks_depth_--; }
+
+
+bool GCCallbacksScope::CheckReenter() {
+  return heap_->gc_callbacks_depth_ == 1;
+}
+
+
+void VerifyPointersVisitor::VisitPointers(Object** start, Object** end) {
+  for (Object** current = start; current < end; current++) {
+    if ((*current)->IsHeapObject()) {
+      HeapObject* object = HeapObject::cast(*current);
+      CHECK(object->GetIsolate()->heap()->Contains(object));
+      CHECK(object->map()->IsMap());
+    }
+  }
+}
+
+
+void VerifySmisVisitor::VisitPointers(Object** start, Object** end) {
+  for (Object** current = start; current < end; current++) {
+    CHECK((*current)->IsSmi());
+  }
+}
+}
+}  // namespace v8::internal
+
+#endif  // V8_HEAP_HEAP_INL_H_
diff --git a/src/heap/heap.cc b/src/heap/heap.cc
new file mode 100644
index 0000000..dfe60fe
--- /dev/null
+++ b/src/heap/heap.cc
@@ -0,0 +1,6159 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/accessors.h"
+#include "src/api.h"
+#include "src/base/bits.h"
+#include "src/base/once.h"
+#include "src/base/utils/random-number-generator.h"
+#include "src/bootstrapper.h"
+#include "src/codegen.h"
+#include "src/compilation-cache.h"
+#include "src/conversions.h"
+#include "src/cpu-profiler.h"
+#include "src/debug.h"
+#include "src/deoptimizer.h"
+#include "src/global-handles.h"
+#include "src/heap/gc-idle-time-handler.h"
+#include "src/heap/incremental-marking.h"
+#include "src/heap/mark-compact.h"
+#include "src/heap/objects-visiting-inl.h"
+#include "src/heap/objects-visiting.h"
+#include "src/heap/store-buffer.h"
+#include "src/heap-profiler.h"
+#include "src/isolate-inl.h"
+#include "src/natives.h"
+#include "src/runtime-profiler.h"
+#include "src/scopeinfo.h"
+#include "src/snapshot.h"
+#include "src/utils.h"
+#include "src/v8threads.h"
+#include "src/vm-state-inl.h"
+
+#if V8_TARGET_ARCH_ARM && !V8_INTERPRETED_REGEXP
+#include "src/regexp-macro-assembler.h"          // NOLINT
+#include "src/arm/regexp-macro-assembler-arm.h"  // NOLINT
+#endif
+#if V8_TARGET_ARCH_MIPS && !V8_INTERPRETED_REGEXP
+#include "src/regexp-macro-assembler.h"            // NOLINT
+#include "src/mips/regexp-macro-assembler-mips.h"  // NOLINT
+#endif
+#if V8_TARGET_ARCH_MIPS64 && !V8_INTERPRETED_REGEXP
+#include "src/regexp-macro-assembler.h"
+#include "src/mips64/regexp-macro-assembler-mips64.h"
+#endif
+
+namespace v8 {
+namespace internal {
+
+
+Heap::Heap()
+    : amount_of_external_allocated_memory_(0),
+      amount_of_external_allocated_memory_at_last_global_gc_(0),
+      isolate_(NULL),
+      code_range_size_(0),
+      // semispace_size_ should be a power of 2 and old_generation_size_ should
+      // be a multiple of Page::kPageSize.
+      reserved_semispace_size_(8 * (kPointerSize / 4) * MB),
+      max_semi_space_size_(8 * (kPointerSize / 4) * MB),
+      initial_semispace_size_(Page::kPageSize),
+      max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
+      max_executable_size_(256ul * (kPointerSize / 4) * MB),
+      // Variables set based on semispace_size_ and old_generation_size_ in
+      // ConfigureHeap.
+      // Will be 4 * reserved_semispace_size_ to ensure that young
+      // generation can be aligned to its size.
+      maximum_committed_(0),
+      survived_since_last_expansion_(0),
+      sweep_generation_(0),
+      always_allocate_scope_depth_(0),
+      contexts_disposed_(0),
+      global_ic_age_(0),
+      flush_monomorphic_ics_(false),
+      scan_on_scavenge_pages_(0),
+      new_space_(this),
+      old_pointer_space_(NULL),
+      old_data_space_(NULL),
+      code_space_(NULL),
+      map_space_(NULL),
+      cell_space_(NULL),
+      property_cell_space_(NULL),
+      lo_space_(NULL),
+      gc_state_(NOT_IN_GC),
+      gc_post_processing_depth_(0),
+      allocations_count_(0),
+      raw_allocations_hash_(0),
+      dump_allocations_hash_countdown_(FLAG_dump_allocations_digest_at_alloc),
+      ms_count_(0),
+      gc_count_(0),
+      remembered_unmapped_pages_index_(0),
+      unflattened_strings_length_(0),
+#ifdef DEBUG
+      allocation_timeout_(0),
+#endif  // DEBUG
+      old_generation_allocation_limit_(kMinimumOldGenerationAllocationLimit),
+      old_gen_exhausted_(false),
+      inline_allocation_disabled_(false),
+      store_buffer_rebuilder_(store_buffer()),
+      hidden_string_(NULL),
+      gc_safe_size_of_old_object_(NULL),
+      total_regexp_code_generated_(0),
+      tracer_(this),
+      high_survival_rate_period_length_(0),
+      promoted_objects_size_(0),
+      promotion_rate_(0),
+      semi_space_copied_object_size_(0),
+      semi_space_copied_rate_(0),
+      nodes_died_in_new_space_(0),
+      nodes_copied_in_new_space_(0),
+      nodes_promoted_(0),
+      maximum_size_scavenges_(0),
+      max_gc_pause_(0.0),
+      total_gc_time_ms_(0.0),
+      max_alive_after_gc_(0),
+      min_in_mutator_(kMaxInt),
+      marking_time_(0.0),
+      sweeping_time_(0.0),
+      mark_compact_collector_(this),
+      store_buffer_(this),
+      marking_(this),
+      incremental_marking_(this),
+      gc_count_at_last_idle_gc_(0),
+      full_codegen_bytes_generated_(0),
+      crankshaft_codegen_bytes_generated_(0),
+      gcs_since_last_deopt_(0),
+#ifdef VERIFY_HEAP
+      no_weak_object_verification_scope_depth_(0),
+#endif
+      allocation_sites_scratchpad_length_(0),
+      promotion_queue_(this),
+      configured_(false),
+      external_string_table_(this),
+      chunks_queued_for_free_(NULL),
+      gc_callbacks_depth_(0) {
+// Allow build-time customization of the max semispace size. Building
+// V8 with snapshots and a non-default max semispace size is much
+// easier if you can define it as part of the build environment.
+#if defined(V8_MAX_SEMISPACE_SIZE)
+  max_semi_space_size_ = reserved_semispace_size_ = V8_MAX_SEMISPACE_SIZE;
+#endif
+
+  // Ensure old_generation_size_ is a multiple of kPageSize.
+  DCHECK(MB >= Page::kPageSize);
+
+  memset(roots_, 0, sizeof(roots_[0]) * kRootListLength);
+  set_native_contexts_list(NULL);
+  set_array_buffers_list(Smi::FromInt(0));
+  set_allocation_sites_list(Smi::FromInt(0));
+  set_encountered_weak_collections(Smi::FromInt(0));
+  // Put a dummy entry in the remembered pages so we can find the list the
+  // minidump even if there are no real unmapped pages.
+  RememberUnmappedPage(NULL, false);
+
+  ClearObjectStats(true);
+}
+
+
+intptr_t Heap::Capacity() {
+  if (!HasBeenSetUp()) return 0;
+
+  return new_space_.Capacity() + old_pointer_space_->Capacity() +
+         old_data_space_->Capacity() + code_space_->Capacity() +
+         map_space_->Capacity() + cell_space_->Capacity() +
+         property_cell_space_->Capacity();
+}
+
+
+intptr_t Heap::CommittedMemory() {
+  if (!HasBeenSetUp()) return 0;
+
+  return new_space_.CommittedMemory() + old_pointer_space_->CommittedMemory() +
+         old_data_space_->CommittedMemory() + code_space_->CommittedMemory() +
+         map_space_->CommittedMemory() + cell_space_->CommittedMemory() +
+         property_cell_space_->CommittedMemory() + lo_space_->Size();
+}
+
+
+size_t Heap::CommittedPhysicalMemory() {
+  if (!HasBeenSetUp()) return 0;
+
+  return new_space_.CommittedPhysicalMemory() +
+         old_pointer_space_->CommittedPhysicalMemory() +
+         old_data_space_->CommittedPhysicalMemory() +
+         code_space_->CommittedPhysicalMemory() +
+         map_space_->CommittedPhysicalMemory() +
+         cell_space_->CommittedPhysicalMemory() +
+         property_cell_space_->CommittedPhysicalMemory() +
+         lo_space_->CommittedPhysicalMemory();
+}
+
+
+intptr_t Heap::CommittedMemoryExecutable() {
+  if (!HasBeenSetUp()) return 0;
+
+  return isolate()->memory_allocator()->SizeExecutable();
+}
+
+
+void Heap::UpdateMaximumCommitted() {
+  if (!HasBeenSetUp()) return;
+
+  intptr_t current_committed_memory = CommittedMemory();
+  if (current_committed_memory > maximum_committed_) {
+    maximum_committed_ = current_committed_memory;
+  }
+}
+
+
+intptr_t Heap::Available() {
+  if (!HasBeenSetUp()) return 0;
+
+  return new_space_.Available() + old_pointer_space_->Available() +
+         old_data_space_->Available() + code_space_->Available() +
+         map_space_->Available() + cell_space_->Available() +
+         property_cell_space_->Available();
+}
+
+
+bool Heap::HasBeenSetUp() {
+  return old_pointer_space_ != NULL && old_data_space_ != NULL &&
+         code_space_ != NULL && map_space_ != NULL && cell_space_ != NULL &&
+         property_cell_space_ != NULL && lo_space_ != NULL;
+}
+
+
+int Heap::GcSafeSizeOfOldObject(HeapObject* object) {
+  if (IntrusiveMarking::IsMarked(object)) {
+    return IntrusiveMarking::SizeOfMarkedObject(object);
+  }
+  return object->SizeFromMap(object->map());
+}
+
+
+GarbageCollector Heap::SelectGarbageCollector(AllocationSpace space,
+                                              const char** reason) {
+  // Is global GC requested?
+  if (space != NEW_SPACE) {
+    isolate_->counters()->gc_compactor_caused_by_request()->Increment();
+    *reason = "GC in old space requested";
+    return MARK_COMPACTOR;
+  }
+
+  if (FLAG_gc_global || (FLAG_stress_compaction && (gc_count_ & 1) != 0)) {
+    *reason = "GC in old space forced by flags";
+    return MARK_COMPACTOR;
+  }
+
+  // Is enough data promoted to justify a global GC?
+  if (OldGenerationAllocationLimitReached()) {
+    isolate_->counters()->gc_compactor_caused_by_promoted_data()->Increment();
+    *reason = "promotion limit reached";
+    return MARK_COMPACTOR;
+  }
+
+  // Have allocation in OLD and LO failed?
+  if (old_gen_exhausted_) {
+    isolate_->counters()
+        ->gc_compactor_caused_by_oldspace_exhaustion()
+        ->Increment();
+    *reason = "old generations exhausted";
+    return MARK_COMPACTOR;
+  }
+
+  // Is there enough space left in OLD to guarantee that a scavenge can
+  // succeed?
+  //
+  // Note that MemoryAllocator->MaxAvailable() undercounts the memory available
+  // for object promotion. It counts only the bytes that the memory
+  // allocator has not yet allocated from the OS and assigned to any space,
+  // and does not count available bytes already in the old space or code
+  // space.  Undercounting is safe---we may get an unrequested full GC when
+  // a scavenge would have succeeded.
+  if (isolate_->memory_allocator()->MaxAvailable() <= new_space_.Size()) {
+    isolate_->counters()
+        ->gc_compactor_caused_by_oldspace_exhaustion()
+        ->Increment();
+    *reason = "scavenge might not succeed";
+    return MARK_COMPACTOR;
+  }
+
+  // Default
+  *reason = NULL;
+  return SCAVENGER;
+}
+
+
+// TODO(1238405): Combine the infrastructure for --heap-stats and
+// --log-gc to avoid the complicated preprocessor and flag testing.
+void Heap::ReportStatisticsBeforeGC() {
+// Heap::ReportHeapStatistics will also log NewSpace statistics when
+// compiled --log-gc is set.  The following logic is used to avoid
+// double logging.
+#ifdef DEBUG
+  if (FLAG_heap_stats || FLAG_log_gc) new_space_.CollectStatistics();
+  if (FLAG_heap_stats) {
+    ReportHeapStatistics("Before GC");
+  } else if (FLAG_log_gc) {
+    new_space_.ReportStatistics();
+  }
+  if (FLAG_heap_stats || FLAG_log_gc) new_space_.ClearHistograms();
+#else
+  if (FLAG_log_gc) {
+    new_space_.CollectStatistics();
+    new_space_.ReportStatistics();
+    new_space_.ClearHistograms();
+  }
+#endif  // DEBUG
+}
+
+
+void Heap::PrintShortHeapStatistics() {
+  if (!FLAG_trace_gc_verbose) return;
+  PrintPID("Memory allocator,   used: %6" V8_PTR_PREFIX
+           "d KB"
+           ", available: %6" V8_PTR_PREFIX "d KB\n",
+           isolate_->memory_allocator()->Size() / KB,
+           isolate_->memory_allocator()->Available() / KB);
+  PrintPID("New space,          used: %6" V8_PTR_PREFIX
+           "d KB"
+           ", available: %6" V8_PTR_PREFIX
+           "d KB"
+           ", committed: %6" V8_PTR_PREFIX "d KB\n",
+           new_space_.Size() / KB, new_space_.Available() / KB,
+           new_space_.CommittedMemory() / KB);
+  PrintPID("Old pointers,       used: %6" V8_PTR_PREFIX
+           "d KB"
+           ", available: %6" V8_PTR_PREFIX
+           "d KB"
+           ", committed: %6" V8_PTR_PREFIX "d KB\n",
+           old_pointer_space_->SizeOfObjects() / KB,
+           old_pointer_space_->Available() / KB,
+           old_pointer_space_->CommittedMemory() / KB);
+  PrintPID("Old data space,     used: %6" V8_PTR_PREFIX
+           "d KB"
+           ", available: %6" V8_PTR_PREFIX
+           "d KB"
+           ", committed: %6" V8_PTR_PREFIX "d KB\n",
+           old_data_space_->SizeOfObjects() / KB,
+           old_data_space_->Available() / KB,
+           old_data_space_->CommittedMemory() / KB);
+  PrintPID("Code space,         used: %6" V8_PTR_PREFIX
+           "d KB"
+           ", available: %6" V8_PTR_PREFIX
+           "d KB"
+           ", committed: %6" V8_PTR_PREFIX "d KB\n",
+           code_space_->SizeOfObjects() / KB, code_space_->Available() / KB,
+           code_space_->CommittedMemory() / KB);
+  PrintPID("Map space,          used: %6" V8_PTR_PREFIX
+           "d KB"
+           ", available: %6" V8_PTR_PREFIX
+           "d KB"
+           ", committed: %6" V8_PTR_PREFIX "d KB\n",
+           map_space_->SizeOfObjects() / KB, map_space_->Available() / KB,
+           map_space_->CommittedMemory() / KB);
+  PrintPID("Cell space,         used: %6" V8_PTR_PREFIX
+           "d KB"
+           ", available: %6" V8_PTR_PREFIX
+           "d KB"
+           ", committed: %6" V8_PTR_PREFIX "d KB\n",
+           cell_space_->SizeOfObjects() / KB, cell_space_->Available() / KB,
+           cell_space_->CommittedMemory() / KB);
+  PrintPID("PropertyCell space, used: %6" V8_PTR_PREFIX
+           "d KB"
+           ", available: %6" V8_PTR_PREFIX
+           "d KB"
+           ", committed: %6" V8_PTR_PREFIX "d KB\n",
+           property_cell_space_->SizeOfObjects() / KB,
+           property_cell_space_->Available() / KB,
+           property_cell_space_->CommittedMemory() / KB);
+  PrintPID("Large object space, used: %6" V8_PTR_PREFIX
+           "d KB"
+           ", available: %6" V8_PTR_PREFIX
+           "d KB"
+           ", committed: %6" V8_PTR_PREFIX "d KB\n",
+           lo_space_->SizeOfObjects() / KB, lo_space_->Available() / KB,
+           lo_space_->CommittedMemory() / KB);
+  PrintPID("All spaces,         used: %6" V8_PTR_PREFIX
+           "d KB"
+           ", available: %6" V8_PTR_PREFIX
+           "d KB"
+           ", committed: %6" V8_PTR_PREFIX "d KB\n",
+           this->SizeOfObjects() / KB, this->Available() / KB,
+           this->CommittedMemory() / KB);
+  PrintPID("External memory reported: %6" V8_PTR_PREFIX "d KB\n",
+           static_cast<intptr_t>(amount_of_external_allocated_memory_ / KB));
+  PrintPID("Total time spent in GC  : %.1f ms\n", total_gc_time_ms_);
+}
+
+
+// TODO(1238405): Combine the infrastructure for --heap-stats and
+// --log-gc to avoid the complicated preprocessor and flag testing.
+void Heap::ReportStatisticsAfterGC() {
+// Similar to the before GC, we use some complicated logic to ensure that
+// NewSpace statistics are logged exactly once when --log-gc is turned on.
+#if defined(DEBUG)
+  if (FLAG_heap_stats) {
+    new_space_.CollectStatistics();
+    ReportHeapStatistics("After GC");
+  } else if (FLAG_log_gc) {
+    new_space_.ReportStatistics();
+  }
+#else
+  if (FLAG_log_gc) new_space_.ReportStatistics();
+#endif  // DEBUG
+}
+
+
+void Heap::GarbageCollectionPrologue() {
+  {
+    AllowHeapAllocation for_the_first_part_of_prologue;
+    ClearJSFunctionResultCaches();
+    gc_count_++;
+    unflattened_strings_length_ = 0;
+
+    if (FLAG_flush_code && FLAG_flush_code_incrementally) {
+      mark_compact_collector()->EnableCodeFlushing(true);
+    }
+
+#ifdef VERIFY_HEAP
+    if (FLAG_verify_heap) {
+      Verify();
+    }
+#endif
+  }
+
+  // Reset GC statistics.
+  promoted_objects_size_ = 0;
+  semi_space_copied_object_size_ = 0;
+  nodes_died_in_new_space_ = 0;
+  nodes_copied_in_new_space_ = 0;
+  nodes_promoted_ = 0;
+
+  UpdateMaximumCommitted();
+
+#ifdef DEBUG
+  DCHECK(!AllowHeapAllocation::IsAllowed() && gc_state_ == NOT_IN_GC);
+
+  if (FLAG_gc_verbose) Print();
+
+  ReportStatisticsBeforeGC();
+#endif  // DEBUG
+
+  store_buffer()->GCPrologue();
+
+  if (isolate()->concurrent_osr_enabled()) {
+    isolate()->optimizing_compiler_thread()->AgeBufferedOsrJobs();
+  }
+
+  if (new_space_.IsAtMaximumCapacity()) {
+    maximum_size_scavenges_++;
+  } else {
+    maximum_size_scavenges_ = 0;
+  }
+  CheckNewSpaceExpansionCriteria();
+}
+
+
+intptr_t Heap::SizeOfObjects() {
+  intptr_t total = 0;
+  AllSpaces spaces(this);
+  for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
+    total += space->SizeOfObjects();
+  }
+  return total;
+}
+
+
+void Heap::ClearAllICsByKind(Code::Kind kind) {
+  HeapObjectIterator it(code_space());
+
+  for (Object* object = it.Next(); object != NULL; object = it.Next()) {
+    Code* code = Code::cast(object);
+    Code::Kind current_kind = code->kind();
+    if (current_kind == Code::FUNCTION ||
+        current_kind == Code::OPTIMIZED_FUNCTION) {
+      code->ClearInlineCaches(kind);
+    }
+  }
+}
+
+
+void Heap::RepairFreeListsAfterBoot() {
+  PagedSpaces spaces(this);
+  for (PagedSpace* space = spaces.next(); space != NULL;
+       space = spaces.next()) {
+    space->RepairFreeListsAfterBoot();
+  }
+}
+
+
+void Heap::ProcessPretenuringFeedback() {
+  if (FLAG_allocation_site_pretenuring) {
+    int tenure_decisions = 0;
+    int dont_tenure_decisions = 0;
+    int allocation_mementos_found = 0;
+    int allocation_sites = 0;
+    int active_allocation_sites = 0;
+
+    // If the scratchpad overflowed, we have to iterate over the allocation
+    // sites list.
+    // TODO(hpayer): We iterate over the whole list of allocation sites when
+    // we grew to the maximum semi-space size to deopt maybe tenured
+    // allocation sites. We could hold the maybe tenured allocation sites
+    // in a seperate data structure if this is a performance problem.
+    bool deopt_maybe_tenured = DeoptMaybeTenuredAllocationSites();
+    bool use_scratchpad =
+        allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize &&
+        !deopt_maybe_tenured;
+
+    int i = 0;
+    Object* list_element = allocation_sites_list();
+    bool trigger_deoptimization = false;
+    bool maximum_size_scavenge = MaximumSizeScavenge();
+    while (use_scratchpad ? i < allocation_sites_scratchpad_length_
+                          : list_element->IsAllocationSite()) {
+      AllocationSite* site =
+          use_scratchpad
+              ? AllocationSite::cast(allocation_sites_scratchpad()->get(i))
+              : AllocationSite::cast(list_element);
+      allocation_mementos_found += site->memento_found_count();
+      if (site->memento_found_count() > 0) {
+        active_allocation_sites++;
+        if (site->DigestPretenuringFeedback(maximum_size_scavenge)) {
+          trigger_deoptimization = true;
+        }
+        if (site->GetPretenureMode() == TENURED) {
+          tenure_decisions++;
+        } else {
+          dont_tenure_decisions++;
+        }
+        allocation_sites++;
+      }
+
+      if (deopt_maybe_tenured && site->IsMaybeTenure()) {
+        site->set_deopt_dependent_code(true);
+        trigger_deoptimization = true;
+      }
+
+      if (use_scratchpad) {
+        i++;
+      } else {
+        list_element = site->weak_next();
+      }
+    }
+
+    if (trigger_deoptimization) {
+      isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
+    }
+
+    FlushAllocationSitesScratchpad();
+
+    if (FLAG_trace_pretenuring_statistics &&
+        (allocation_mementos_found > 0 || tenure_decisions > 0 ||
+         dont_tenure_decisions > 0)) {
+      PrintF(
+          "GC: (mode, #visited allocation sites, #active allocation sites, "
+          "#mementos, #tenure decisions, #donttenure decisions) "
+          "(%s, %d, %d, %d, %d, %d)\n",
+          use_scratchpad ? "use scratchpad" : "use list", allocation_sites,
+          active_allocation_sites, allocation_mementos_found, tenure_decisions,
+          dont_tenure_decisions);
+    }
+  }
+}
+
+
+void Heap::DeoptMarkedAllocationSites() {
+  // TODO(hpayer): If iterating over the allocation sites list becomes a
+  // performance issue, use a cache heap data structure instead (similar to the
+  // allocation sites scratchpad).
+  Object* list_element = allocation_sites_list();
+  while (list_element->IsAllocationSite()) {
+    AllocationSite* site = AllocationSite::cast(list_element);
+    if (site->deopt_dependent_code()) {
+      site->dependent_code()->MarkCodeForDeoptimization(
+          isolate_, DependentCode::kAllocationSiteTenuringChangedGroup);
+      site->set_deopt_dependent_code(false);
+    }
+    list_element = site->weak_next();
+  }
+  Deoptimizer::DeoptimizeMarkedCode(isolate_);
+}
+
+
+void Heap::GarbageCollectionEpilogue() {
+  store_buffer()->GCEpilogue();
+
+  // In release mode, we only zap the from space under heap verification.
+  if (Heap::ShouldZapGarbage()) {
+    ZapFromSpace();
+  }
+
+  // Process pretenuring feedback and update allocation sites.
+  ProcessPretenuringFeedback();
+
+#ifdef VERIFY_HEAP
+  if (FLAG_verify_heap) {
+    Verify();
+  }
+#endif
+
+  AllowHeapAllocation for_the_rest_of_the_epilogue;
+
+#ifdef DEBUG
+  if (FLAG_print_global_handles) isolate_->global_handles()->Print();
+  if (FLAG_print_handles) PrintHandles();
+  if (FLAG_gc_verbose) Print();
+  if (FLAG_code_stats) ReportCodeStatistics("After GC");
+#endif
+  if (FLAG_deopt_every_n_garbage_collections > 0) {
+    // TODO(jkummerow/ulan/jarin): This is not safe! We can't assume that
+    // the topmost optimized frame can be deoptimized safely, because it
+    // might not have a lazy bailout point right after its current PC.
+    if (++gcs_since_last_deopt_ == FLAG_deopt_every_n_garbage_collections) {
+      Deoptimizer::DeoptimizeAll(isolate());
+      gcs_since_last_deopt_ = 0;
+    }
+  }
+
+  UpdateMaximumCommitted();
+
+  isolate_->counters()->alive_after_last_gc()->Set(
+      static_cast<int>(SizeOfObjects()));
+
+  isolate_->counters()->string_table_capacity()->Set(
+      string_table()->Capacity());
+  isolate_->counters()->number_of_symbols()->Set(
+      string_table()->NumberOfElements());
+
+  if (full_codegen_bytes_generated_ + crankshaft_codegen_bytes_generated_ > 0) {
+    isolate_->counters()->codegen_fraction_crankshaft()->AddSample(
+        static_cast<int>((crankshaft_codegen_bytes_generated_ * 100.0) /
+                         (crankshaft_codegen_bytes_generated_ +
+                          full_codegen_bytes_generated_)));
+  }
+
+  if (CommittedMemory() > 0) {
+    isolate_->counters()->external_fragmentation_total()->AddSample(
+        static_cast<int>(100 - (SizeOfObjects() * 100.0) / CommittedMemory()));
+
+    isolate_->counters()->heap_fraction_new_space()->AddSample(static_cast<int>(
+        (new_space()->CommittedMemory() * 100.0) / CommittedMemory()));
+    isolate_->counters()->heap_fraction_old_pointer_space()->AddSample(
+        static_cast<int>((old_pointer_space()->CommittedMemory() * 100.0) /
+                         CommittedMemory()));
+    isolate_->counters()->heap_fraction_old_data_space()->AddSample(
+        static_cast<int>((old_data_space()->CommittedMemory() * 100.0) /
+                         CommittedMemory()));
+    isolate_->counters()->heap_fraction_code_space()->AddSample(
+        static_cast<int>((code_space()->CommittedMemory() * 100.0) /
+                         CommittedMemory()));
+    isolate_->counters()->heap_fraction_map_space()->AddSample(static_cast<int>(
+        (map_space()->CommittedMemory() * 100.0) / CommittedMemory()));
+    isolate_->counters()->heap_fraction_cell_space()->AddSample(
+        static_cast<int>((cell_space()->CommittedMemory() * 100.0) /
+                         CommittedMemory()));
+    isolate_->counters()->heap_fraction_property_cell_space()->AddSample(
+        static_cast<int>((property_cell_space()->CommittedMemory() * 100.0) /
+                         CommittedMemory()));
+    isolate_->counters()->heap_fraction_lo_space()->AddSample(static_cast<int>(
+        (lo_space()->CommittedMemory() * 100.0) / CommittedMemory()));
+
+    isolate_->counters()->heap_sample_total_committed()->AddSample(
+        static_cast<int>(CommittedMemory() / KB));
+    isolate_->counters()->heap_sample_total_used()->AddSample(
+        static_cast<int>(SizeOfObjects() / KB));
+    isolate_->counters()->heap_sample_map_space_committed()->AddSample(
+        static_cast<int>(map_space()->CommittedMemory() / KB));
+    isolate_->counters()->heap_sample_cell_space_committed()->AddSample(
+        static_cast<int>(cell_space()->CommittedMemory() / KB));
+    isolate_->counters()
+        ->heap_sample_property_cell_space_committed()
+        ->AddSample(
+            static_cast<int>(property_cell_space()->CommittedMemory() / KB));
+    isolate_->counters()->heap_sample_code_space_committed()->AddSample(
+        static_cast<int>(code_space()->CommittedMemory() / KB));
+
+    isolate_->counters()->heap_sample_maximum_committed()->AddSample(
+        static_cast<int>(MaximumCommittedMemory() / KB));
+  }
+
+#define UPDATE_COUNTERS_FOR_SPACE(space)                \
+  isolate_->counters()->space##_bytes_available()->Set( \
+      static_cast<int>(space()->Available()));          \
+  isolate_->counters()->space##_bytes_committed()->Set( \
+      static_cast<int>(space()->CommittedMemory()));    \
+  isolate_->counters()->space##_bytes_used()->Set(      \
+      static_cast<int>(space()->SizeOfObjects()));
+#define UPDATE_FRAGMENTATION_FOR_SPACE(space)                          \
+  if (space()->CommittedMemory() > 0) {                                \
+    isolate_->counters()->external_fragmentation_##space()->AddSample( \
+        static_cast<int>(100 -                                         \
+                         (space()->SizeOfObjects() * 100.0) /          \
+                             space()->CommittedMemory()));             \
+  }
+#define UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(space) \
+  UPDATE_COUNTERS_FOR_SPACE(space)                         \
+  UPDATE_FRAGMENTATION_FOR_SPACE(space)
+
+  UPDATE_COUNTERS_FOR_SPACE(new_space)
+  UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_pointer_space)
+  UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(old_data_space)
+  UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(code_space)
+  UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(map_space)
+  UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(cell_space)
+  UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(property_cell_space)
+  UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE(lo_space)
+#undef UPDATE_COUNTERS_FOR_SPACE
+#undef UPDATE_FRAGMENTATION_FOR_SPACE
+#undef UPDATE_COUNTERS_AND_FRAGMENTATION_FOR_SPACE
+
+#ifdef DEBUG
+  ReportStatisticsAfterGC();
+#endif  // DEBUG
+
+  // Remember the last top pointer so that we can later find out
+  // whether we allocated in new space since the last GC.
+  new_space_top_after_last_gc_ = new_space()->top();
+}
+
+
+void Heap::CollectAllGarbage(int flags, const char* gc_reason,
+                             const v8::GCCallbackFlags gc_callback_flags) {
+  // Since we are ignoring the return value, the exact choice of space does
+  // not matter, so long as we do not specify NEW_SPACE, which would not
+  // cause a full GC.
+  mark_compact_collector_.SetFlags(flags);
+  CollectGarbage(OLD_POINTER_SPACE, gc_reason, gc_callback_flags);
+  mark_compact_collector_.SetFlags(kNoGCFlags);
+}
+
+
+void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
+  // Since we are ignoring the return value, the exact choice of space does
+  // not matter, so long as we do not specify NEW_SPACE, which would not
+  // cause a full GC.
+  // Major GC would invoke weak handle callbacks on weakly reachable
+  // handles, but won't collect weakly reachable objects until next
+  // major GC.  Therefore if we collect aggressively and weak handle callback
+  // has been invoked, we rerun major GC to release objects which become
+  // garbage.
+  // Note: as weak callbacks can execute arbitrary code, we cannot
+  // hope that eventually there will be no weak callbacks invocations.
+  // Therefore stop recollecting after several attempts.
+  if (isolate()->concurrent_recompilation_enabled()) {
+    // The optimizing compiler may be unnecessarily holding on to memory.
+    DisallowHeapAllocation no_recursive_gc;
+    isolate()->optimizing_compiler_thread()->Flush();
+  }
+  mark_compact_collector()->SetFlags(kMakeHeapIterableMask |
+                                     kReduceMemoryFootprintMask);
+  isolate_->compilation_cache()->Clear();
+  const int kMaxNumberOfAttempts = 7;
+  const int kMinNumberOfAttempts = 2;
+  for (int attempt = 0; attempt < kMaxNumberOfAttempts; attempt++) {
+    if (!CollectGarbage(MARK_COMPACTOR, gc_reason, NULL) &&
+        attempt + 1 >= kMinNumberOfAttempts) {
+      break;
+    }
+  }
+  mark_compact_collector()->SetFlags(kNoGCFlags);
+  new_space_.Shrink();
+  UncommitFromSpace();
+  incremental_marking()->UncommitMarkingDeque();
+}
+
+
+void Heap::EnsureFillerObjectAtTop() {
+  // There may be an allocation memento behind every object in new space.
+  // If we evacuate a not full new space or if we are on the last page of
+  // the new space, then there may be uninitialized memory behind the top
+  // pointer of the new space page. We store a filler object there to
+  // identify the unused space.
+  Address from_top = new_space_.top();
+  Address from_limit = new_space_.limit();
+  if (from_top < from_limit) {
+    int remaining_in_page = static_cast<int>(from_limit - from_top);
+    CreateFillerObjectAt(from_top, remaining_in_page);
+  }
+}
+
+
+bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason,
+                          const char* collector_reason,
+                          const v8::GCCallbackFlags gc_callback_flags) {
+  // The VM is in the GC state until exiting this function.
+  VMState<GC> state(isolate_);
+
+#ifdef DEBUG
+  // Reset the allocation timeout to the GC interval, but make sure to
+  // allow at least a few allocations after a collection. The reason
+  // for this is that we have a lot of allocation sequences and we
+  // assume that a garbage collection will allow the subsequent
+  // allocation attempts to go through.
+  allocation_timeout_ = Max(6, FLAG_gc_interval);
+#endif
+
+  EnsureFillerObjectAtTop();
+
+  if (collector == SCAVENGER && !incremental_marking()->IsStopped()) {
+    if (FLAG_trace_incremental_marking) {
+      PrintF("[IncrementalMarking] Scavenge during marking.\n");
+    }
+  }
+
+  if (collector == MARK_COMPACTOR &&
+      !mark_compact_collector()->abort_incremental_marking() &&
+      !incremental_marking()->IsStopped() &&
+      !incremental_marking()->should_hurry() &&
+      FLAG_incremental_marking_steps) {
+    // Make progress in incremental marking.
+    const intptr_t kStepSizeWhenDelayedByScavenge = 1 * MB;
+    incremental_marking()->Step(kStepSizeWhenDelayedByScavenge,
+                                IncrementalMarking::NO_GC_VIA_STACK_GUARD);
+    if (!incremental_marking()->IsComplete() && !FLAG_gc_global) {
+      if (FLAG_trace_incremental_marking) {
+        PrintF("[IncrementalMarking] Delaying MarkSweep.\n");
+      }
+      collector = SCAVENGER;
+      collector_reason = "incremental marking delaying mark-sweep";
+    }
+  }
+
+  bool next_gc_likely_to_collect_more = false;
+
+  {
+    tracer()->Start(collector, gc_reason, collector_reason);
+    DCHECK(AllowHeapAllocation::IsAllowed());
+    DisallowHeapAllocation no_allocation_during_gc;
+    GarbageCollectionPrologue();
+
+    {
+      HistogramTimerScope histogram_timer_scope(
+          (collector == SCAVENGER) ? isolate_->counters()->gc_scavenger()
+                                   : isolate_->counters()->gc_compactor());
+      next_gc_likely_to_collect_more =
+          PerformGarbageCollection(collector, gc_callback_flags);
+    }
+
+    GarbageCollectionEpilogue();
+    tracer()->Stop();
+  }
+
+  // Start incremental marking for the next cycle. The heap snapshot
+  // generator needs incremental marking to stay off after it aborted.
+  if (!mark_compact_collector()->abort_incremental_marking() &&
+      WorthActivatingIncrementalMarking()) {
+    incremental_marking()->Start();
+  }
+
+  return next_gc_likely_to_collect_more;
+}
+
+
+int Heap::NotifyContextDisposed() {
+  if (isolate()->concurrent_recompilation_enabled()) {
+    // Flush the queued recompilation tasks.
+    isolate()->optimizing_compiler_thread()->Flush();
+  }
+  flush_monomorphic_ics_ = true;
+  AgeInlineCaches();
+  return ++contexts_disposed_;
+}
+
+
+void Heap::MoveElements(FixedArray* array, int dst_index, int src_index,
+                        int len) {
+  if (len == 0) return;
+
+  DCHECK(array->map() != fixed_cow_array_map());
+  Object** dst_objects = array->data_start() + dst_index;
+  MemMove(dst_objects, array->data_start() + src_index, len * kPointerSize);
+  if (!InNewSpace(array)) {
+    for (int i = 0; i < len; i++) {
+      // TODO(hpayer): check store buffer for entries
+      if (InNewSpace(dst_objects[i])) {
+        RecordWrite(array->address(), array->OffsetOfElementAt(dst_index + i));
+      }
+    }
+  }
+  incremental_marking()->RecordWrites(array);
+}
+
+
+#ifdef VERIFY_HEAP
+// Helper class for verifying the string table.
+class StringTableVerifier : public ObjectVisitor {
+ public:
+  void VisitPointers(Object** start, Object** end) {
+    // Visit all HeapObject pointers in [start, end).
+    for (Object** p = start; p < end; p++) {
+      if ((*p)->IsHeapObject()) {
+        // Check that the string is actually internalized.
+        CHECK((*p)->IsTheHole() || (*p)->IsUndefined() ||
+              (*p)->IsInternalizedString());
+      }
+    }
+  }
+};
+
+
+static void VerifyStringTable(Heap* heap) {
+  StringTableVerifier verifier;
+  heap->string_table()->IterateElements(&verifier);
+}
+#endif  // VERIFY_HEAP
+
+
+static bool AbortIncrementalMarkingAndCollectGarbage(
+    Heap* heap, AllocationSpace space, const char* gc_reason = NULL) {
+  heap->mark_compact_collector()->SetFlags(Heap::kAbortIncrementalMarkingMask);
+  bool result = heap->CollectGarbage(space, gc_reason);
+  heap->mark_compact_collector()->SetFlags(Heap::kNoGCFlags);
+  return result;
+}
+
+
+void Heap::ReserveSpace(int* sizes, Address* locations_out) {
+  bool gc_performed = true;
+  int counter = 0;
+  static const int kThreshold = 20;
+  while (gc_performed && counter++ < kThreshold) {
+    gc_performed = false;
+    DCHECK(NEW_SPACE == FIRST_PAGED_SPACE - 1);
+    for (int space = NEW_SPACE; space <= LAST_PAGED_SPACE; space++) {
+      if (sizes[space] != 0) {
+        AllocationResult allocation;
+        if (space == NEW_SPACE) {
+          allocation = new_space()->AllocateRaw(sizes[space]);
+        } else {
+          allocation = paged_space(space)->AllocateRaw(sizes[space]);
+        }
+        FreeListNode* node;
+        if (!allocation.To(&node)) {
+          if (space == NEW_SPACE) {
+            Heap::CollectGarbage(NEW_SPACE,
+                                 "failed to reserve space in the new space");
+          } else {
+            AbortIncrementalMarkingAndCollectGarbage(
+                this, static_cast<AllocationSpace>(space),
+                "failed to reserve space in paged space");
+          }
+          gc_performed = true;
+          break;
+        } else {
+          // Mark with a free list node, in case we have a GC before
+          // deserializing.
+          node->set_size(this, sizes[space]);
+          locations_out[space] = node->address();
+        }
+      }
+    }
+  }
+
+  if (gc_performed) {
+    // Failed to reserve the space after several attempts.
+    V8::FatalProcessOutOfMemory("Heap::ReserveSpace");
+  }
+}
+
+
+void Heap::EnsureFromSpaceIsCommitted() {
+  if (new_space_.CommitFromSpaceIfNeeded()) return;
+
+  // Committing memory to from space failed.
+  // Memory is exhausted and we will die.
+  V8::FatalProcessOutOfMemory("Committing semi space failed.");
+}
+
+
+void Heap::ClearJSFunctionResultCaches() {
+  if (isolate_->bootstrapper()->IsActive()) return;
+
+  Object* context = native_contexts_list();
+  while (!context->IsUndefined()) {
+    // Get the caches for this context. GC can happen when the context
+    // is not fully initialized, so the caches can be undefined.
+    Object* caches_or_undefined =
+        Context::cast(context)->get(Context::JSFUNCTION_RESULT_CACHES_INDEX);
+    if (!caches_or_undefined->IsUndefined()) {
+      FixedArray* caches = FixedArray::cast(caches_or_undefined);
+      // Clear the caches:
+      int length = caches->length();
+      for (int i = 0; i < length; i++) {
+        JSFunctionResultCache::cast(caches->get(i))->Clear();
+      }
+    }
+    // Get the next context:
+    context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
+  }
+}
+
+
+void Heap::ClearNormalizedMapCaches() {
+  if (isolate_->bootstrapper()->IsActive() &&
+      !incremental_marking()->IsMarking()) {
+    return;
+  }
+
+  Object* context = native_contexts_list();
+  while (!context->IsUndefined()) {
+    // GC can happen when the context is not fully initialized,
+    // so the cache can be undefined.
+    Object* cache =
+        Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX);
+    if (!cache->IsUndefined()) {
+      NormalizedMapCache::cast(cache)->Clear();
+    }
+    context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
+  }
+}
+
+
+void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
+  if (start_new_space_size == 0) return;
+
+  promotion_rate_ = (static_cast<double>(promoted_objects_size_) /
+                     static_cast<double>(start_new_space_size) * 100);
+
+  semi_space_copied_rate_ =
+      (static_cast<double>(semi_space_copied_object_size_) /
+       static_cast<double>(start_new_space_size) * 100);
+
+  double survival_rate = promotion_rate_ + semi_space_copied_rate_;
+
+  if (survival_rate > kYoungSurvivalRateHighThreshold) {
+    high_survival_rate_period_length_++;
+  } else {
+    high_survival_rate_period_length_ = 0;
+  }
+}
+
+bool Heap::PerformGarbageCollection(
+    GarbageCollector collector, const v8::GCCallbackFlags gc_callback_flags) {
+  int freed_global_handles = 0;
+
+  if (collector != SCAVENGER) {
+    PROFILE(isolate_, CodeMovingGCEvent());
+  }
+
+#ifdef VERIFY_HEAP
+  if (FLAG_verify_heap) {
+    VerifyStringTable(this);
+  }
+#endif
+
+  GCType gc_type =
+      collector == MARK_COMPACTOR ? kGCTypeMarkSweepCompact : kGCTypeScavenge;
+
+  {
+    GCCallbacksScope scope(this);
+    if (scope.CheckReenter()) {
+      AllowHeapAllocation allow_allocation;
+      GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
+      VMState<EXTERNAL> state(isolate_);
+      HandleScope handle_scope(isolate_);
+      CallGCPrologueCallbacks(gc_type, kNoGCCallbackFlags);
+    }
+  }
+
+  EnsureFromSpaceIsCommitted();
+
+  int start_new_space_size = Heap::new_space()->SizeAsInt();
+
+  if (IsHighSurvivalRate()) {
+    // We speed up the incremental marker if it is running so that it
+    // does not fall behind the rate of promotion, which would cause a
+    // constantly growing old space.
+    incremental_marking()->NotifyOfHighPromotionRate();
+  }
+
+  if (collector == MARK_COMPACTOR) {
+    // Perform mark-sweep with optional compaction.
+    MarkCompact();
+    sweep_generation_++;
+    // Temporarily set the limit for case when PostGarbageCollectionProcessing
+    // allocates and triggers GC. The real limit is set at after
+    // PostGarbageCollectionProcessing.
+    old_generation_allocation_limit_ =
+        OldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(), 0);
+    old_gen_exhausted_ = false;
+  } else {
+    Scavenge();
+  }
+
+  UpdateSurvivalStatistics(start_new_space_size);
+
+  isolate_->counters()->objs_since_last_young()->Set(0);
+
+  // Callbacks that fire after this point might trigger nested GCs and
+  // restart incremental marking, the assertion can't be moved down.
+  DCHECK(collector == SCAVENGER || incremental_marking()->IsStopped());
+
+  gc_post_processing_depth_++;
+  {
+    AllowHeapAllocation allow_allocation;
+    GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
+    freed_global_handles =
+        isolate_->global_handles()->PostGarbageCollectionProcessing(collector);
+  }
+  gc_post_processing_depth_--;
+
+  isolate_->eternal_handles()->PostGarbageCollectionProcessing(this);
+
+  // Update relocatables.
+  Relocatable::PostGarbageCollectionProcessing(isolate_);
+
+  if (collector == MARK_COMPACTOR) {
+    // Register the amount of external allocated memory.
+    amount_of_external_allocated_memory_at_last_global_gc_ =
+        amount_of_external_allocated_memory_;
+    old_generation_allocation_limit_ = OldGenerationAllocationLimit(
+        PromotedSpaceSizeOfObjects(), freed_global_handles);
+  }
+
+  {
+    GCCallbacksScope scope(this);
+    if (scope.CheckReenter()) {
+      AllowHeapAllocation allow_allocation;
+      GCTracer::Scope scope(tracer(), GCTracer::Scope::EXTERNAL);
+      VMState<EXTERNAL> state(isolate_);
+      HandleScope handle_scope(isolate_);
+      CallGCEpilogueCallbacks(gc_type, gc_callback_flags);
+    }
+  }
+
+#ifdef VERIFY_HEAP
+  if (FLAG_verify_heap) {
+    VerifyStringTable(this);
+  }
+#endif
+
+  return freed_global_handles > 0;
+}
+
+
+void Heap::CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags) {
+  for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
+    if (gc_type & gc_prologue_callbacks_[i].gc_type) {
+      if (!gc_prologue_callbacks_[i].pass_isolate_) {
+        v8::GCPrologueCallback callback =
+            reinterpret_cast<v8::GCPrologueCallback>(
+                gc_prologue_callbacks_[i].callback);
+        callback(gc_type, flags);
+      } else {
+        v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
+        gc_prologue_callbacks_[i].callback(isolate, gc_type, flags);
+      }
+    }
+  }
+}
+
+
+void Heap::CallGCEpilogueCallbacks(GCType gc_type,
+                                   GCCallbackFlags gc_callback_flags) {
+  for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
+    if (gc_type & gc_epilogue_callbacks_[i].gc_type) {
+      if (!gc_epilogue_callbacks_[i].pass_isolate_) {
+        v8::GCPrologueCallback callback =
+            reinterpret_cast<v8::GCPrologueCallback>(
+                gc_epilogue_callbacks_[i].callback);
+        callback(gc_type, gc_callback_flags);
+      } else {
+        v8::Isolate* isolate = reinterpret_cast<v8::Isolate*>(this->isolate());
+        gc_epilogue_callbacks_[i].callback(isolate, gc_type, gc_callback_flags);
+      }
+    }
+  }
+}
+
+
+void Heap::MarkCompact() {
+  gc_state_ = MARK_COMPACT;
+  LOG(isolate_, ResourceEvent("markcompact", "begin"));
+
+  uint64_t size_of_objects_before_gc = SizeOfObjects();
+
+  mark_compact_collector_.Prepare();
+
+  ms_count_++;
+
+  MarkCompactPrologue();
+
+  mark_compact_collector_.CollectGarbage();
+
+  LOG(isolate_, ResourceEvent("markcompact", "end"));
+
+  gc_state_ = NOT_IN_GC;
+
+  isolate_->counters()->objs_since_last_full()->Set(0);
+
+  flush_monomorphic_ics_ = false;
+
+  if (FLAG_allocation_site_pretenuring) {
+    EvaluateOldSpaceLocalPretenuring(size_of_objects_before_gc);
+  }
+}
+
+
+void Heap::MarkCompactPrologue() {
+  // At any old GC clear the keyed lookup cache to enable collection of unused
+  // maps.
+  isolate_->keyed_lookup_cache()->Clear();
+  isolate_->context_slot_cache()->Clear();
+  isolate_->descriptor_lookup_cache()->Clear();
+  RegExpResultsCache::Clear(string_split_cache());
+  RegExpResultsCache::Clear(regexp_multiple_cache());
+
+  isolate_->compilation_cache()->MarkCompactPrologue();
+
+  CompletelyClearInstanceofCache();
+
+  FlushNumberStringCache();
+  if (FLAG_cleanup_code_caches_at_gc) {
+    polymorphic_code_cache()->set_cache(undefined_value());
+  }
+
+  ClearNormalizedMapCaches();
+}
+
+
+// Helper class for copying HeapObjects
+class ScavengeVisitor : public ObjectVisitor {
+ public:
+  explicit ScavengeVisitor(Heap* heap) : heap_(heap) {}
+
+  void VisitPointer(Object** p) { ScavengePointer(p); }
+
+  void VisitPointers(Object** start, Object** end) {
+    // Copy all HeapObject pointers in [start, end)
+    for (Object** p = start; p < end; p++) ScavengePointer(p);
+  }
+
+ private:
+  void ScavengePointer(Object** p) {
+    Object* object = *p;
+    if (!heap_->InNewSpace(object)) return;
+    Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
+                         reinterpret_cast<HeapObject*>(object));
+  }
+
+  Heap* heap_;
+};
+
+
+#ifdef VERIFY_HEAP
+// Visitor class to verify pointers in code or data space do not point into
+// new space.
+class VerifyNonPointerSpacePointersVisitor : public ObjectVisitor {
+ public:
+  explicit VerifyNonPointerSpacePointersVisitor(Heap* heap) : heap_(heap) {}
+  void VisitPointers(Object** start, Object** end) {
+    for (Object** current = start; current < end; current++) {
+      if ((*current)->IsHeapObject()) {
+        CHECK(!heap_->InNewSpace(HeapObject::cast(*current)));
+      }
+    }
+  }
+
+ private:
+  Heap* heap_;
+};
+
+
+static void VerifyNonPointerSpacePointers(Heap* heap) {
+  // Verify that there are no pointers to new space in spaces where we
+  // do not expect them.
+  VerifyNonPointerSpacePointersVisitor v(heap);
+  HeapObjectIterator code_it(heap->code_space());
+  for (HeapObject* object = code_it.Next(); object != NULL;
+       object = code_it.Next())
+    object->Iterate(&v);
+
+    HeapObjectIterator data_it(heap->old_data_space());
+    for (HeapObject* object = data_it.Next(); object != NULL;
+         object = data_it.Next())
+      object->Iterate(&v);
+}
+#endif  // VERIFY_HEAP
+
+
+void Heap::CheckNewSpaceExpansionCriteria() {
+  if (new_space_.TotalCapacity() < new_space_.MaximumCapacity() &&
+      survived_since_last_expansion_ > new_space_.TotalCapacity()) {
+    // Grow the size of new space if there is room to grow, enough data
+    // has survived scavenge since the last expansion and we are not in
+    // high promotion mode.
+    new_space_.Grow();
+    survived_since_last_expansion_ = 0;
+  }
+}
+
+
+static bool IsUnscavengedHeapObject(Heap* heap, Object** p) {
+  return heap->InNewSpace(*p) &&
+         !HeapObject::cast(*p)->map_word().IsForwardingAddress();
+}
+
+
+void Heap::ScavengeStoreBufferCallback(Heap* heap, MemoryChunk* page,
+                                       StoreBufferEvent event) {
+  heap->store_buffer_rebuilder_.Callback(page, event);
+}
+
+
+void StoreBufferRebuilder::Callback(MemoryChunk* page, StoreBufferEvent event) {
+  if (event == kStoreBufferStartScanningPagesEvent) {
+    start_of_current_page_ = NULL;
+    current_page_ = NULL;
+  } else if (event == kStoreBufferScanningPageEvent) {
+    if (current_page_ != NULL) {
+      // If this page already overflowed the store buffer during this iteration.
+      if (current_page_->scan_on_scavenge()) {
+        // Then we should wipe out the entries that have been added for it.
+        store_buffer_->SetTop(start_of_current_page_);
+      } else if (store_buffer_->Top() - start_of_current_page_ >=
+                 (store_buffer_->Limit() - store_buffer_->Top()) >> 2) {
+        // Did we find too many pointers in the previous page?  The heuristic is
+        // that no page can take more then 1/5 the remaining slots in the store
+        // buffer.
+        current_page_->set_scan_on_scavenge(true);
+        store_buffer_->SetTop(start_of_current_page_);
+      } else {
+        // In this case the page we scanned took a reasonable number of slots in
+        // the store buffer.  It has now been rehabilitated and is no longer
+        // marked scan_on_scavenge.
+        DCHECK(!current_page_->scan_on_scavenge());
+      }
+    }
+    start_of_current_page_ = store_buffer_->Top();
+    current_page_ = page;
+  } else if (event == kStoreBufferFullEvent) {
+    // The current page overflowed the store buffer again.  Wipe out its entries
+    // in the store buffer and mark it scan-on-scavenge again.  This may happen
+    // several times while scanning.
+    if (current_page_ == NULL) {
+      // Store Buffer overflowed while scanning promoted objects.  These are not
+      // in any particular page, though they are likely to be clustered by the
+      // allocation routines.
+      store_buffer_->EnsureSpace(StoreBuffer::kStoreBufferSize / 2);
+    } else {
+      // Store Buffer overflowed while scanning a particular old space page for
+      // pointers to new space.
+      DCHECK(current_page_ == page);
+      DCHECK(page != NULL);
+      current_page_->set_scan_on_scavenge(true);
+      DCHECK(start_of_current_page_ != store_buffer_->Top());
+      store_buffer_->SetTop(start_of_current_page_);
+    }
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
+void PromotionQueue::Initialize() {
+  // Assumes that a NewSpacePage exactly fits a number of promotion queue
+  // entries (where each is a pair of intptr_t). This allows us to simplify
+  // the test fpr when to switch pages.
+  DCHECK((Page::kPageSize - MemoryChunk::kBodyOffset) % (2 * kPointerSize) ==
+         0);
+  limit_ = reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceStart());
+  front_ = rear_ =
+      reinterpret_cast<intptr_t*>(heap_->new_space()->ToSpaceEnd());
+  emergency_stack_ = NULL;
+}
+
+
+void PromotionQueue::RelocateQueueHead() {
+  DCHECK(emergency_stack_ == NULL);
+
+  Page* p = Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
+  intptr_t* head_start = rear_;
+  intptr_t* head_end = Min(front_, reinterpret_cast<intptr_t*>(p->area_end()));
+
+  int entries_count =
+      static_cast<int>(head_end - head_start) / kEntrySizeInWords;
+
+  emergency_stack_ = new List<Entry>(2 * entries_count);
+
+  while (head_start != head_end) {
+    int size = static_cast<int>(*(head_start++));
+    HeapObject* obj = reinterpret_cast<HeapObject*>(*(head_start++));
+    emergency_stack_->Add(Entry(obj, size));
+  }
+  rear_ = head_end;
+}
+
+
+class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
+ public:
+  explicit ScavengeWeakObjectRetainer(Heap* heap) : heap_(heap) {}
+
+  virtual Object* RetainAs(Object* object) {
+    if (!heap_->InFromSpace(object)) {
+      return object;
+    }
+
+    MapWord map_word = HeapObject::cast(object)->map_word();
+    if (map_word.IsForwardingAddress()) {
+      return map_word.ToForwardingAddress();
+    }
+    return NULL;
+  }
+
+ private:
+  Heap* heap_;
+};
+
+
+void Heap::Scavenge() {
+  RelocationLock relocation_lock(this);
+
+#ifdef VERIFY_HEAP
+  if (FLAG_verify_heap) VerifyNonPointerSpacePointers(this);
+#endif
+
+  gc_state_ = SCAVENGE;
+
+  // Implements Cheney's copying algorithm
+  LOG(isolate_, ResourceEvent("scavenge", "begin"));
+
+  // Clear descriptor cache.
+  isolate_->descriptor_lookup_cache()->Clear();
+
+  // Used for updating survived_since_last_expansion_ at function end.
+  intptr_t survived_watermark = PromotedSpaceSizeOfObjects();
+
+  SelectScavengingVisitorsTable();
+
+  incremental_marking()->PrepareForScavenge();
+
+  // Flip the semispaces.  After flipping, to space is empty, from space has
+  // live objects.
+  new_space_.Flip();
+  new_space_.ResetAllocationInfo();
+
+  // We need to sweep newly copied objects which can be either in the
+  // to space or promoted to the old generation.  For to-space
+  // objects, we treat the bottom of the to space as a queue.  Newly
+  // copied and unswept objects lie between a 'front' mark and the
+  // allocation pointer.
+  //
+  // Promoted objects can go into various old-generation spaces, and
+  // can be allocated internally in the spaces (from the free list).
+  // We treat the top of the to space as a queue of addresses of
+  // promoted objects.  The addresses of newly promoted and unswept
+  // objects lie between a 'front' mark and a 'rear' mark that is
+  // updated as a side effect of promoting an object.
+  //
+  // There is guaranteed to be enough room at the top of the to space
+  // for the addresses of promoted objects: every object promoted
+  // frees up its size in bytes from the top of the new space, and
+  // objects are at least one pointer in size.
+  Address new_space_front = new_space_.ToSpaceStart();
+  promotion_queue_.Initialize();
+
+#ifdef DEBUG
+  store_buffer()->Clean();
+#endif
+
+  ScavengeVisitor scavenge_visitor(this);
+  // Copy roots.
+  IterateRoots(&scavenge_visitor, VISIT_ALL_IN_SCAVENGE);
+
+  // Copy objects reachable from the old generation.
+  {
+    StoreBufferRebuildScope scope(this, store_buffer(),
+                                  &ScavengeStoreBufferCallback);
+    store_buffer()->IteratePointersToNewSpace(&ScavengeObject);
+  }
+
+  // Copy objects reachable from simple cells by scavenging cell values
+  // directly.
+  HeapObjectIterator cell_iterator(cell_space_);
+  for (HeapObject* heap_object = cell_iterator.Next(); heap_object != NULL;
+       heap_object = cell_iterator.Next()) {
+    if (heap_object->IsCell()) {
+      Cell* cell = Cell::cast(heap_object);
+      Address value_address = cell->ValueAddress();
+      scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
+    }
+  }
+
+  // Copy objects reachable from global property cells by scavenging global
+  // property cell values directly.
+  HeapObjectIterator js_global_property_cell_iterator(property_cell_space_);
+  for (HeapObject* heap_object = js_global_property_cell_iterator.Next();
+       heap_object != NULL;
+       heap_object = js_global_property_cell_iterator.Next()) {
+    if (heap_object->IsPropertyCell()) {
+      PropertyCell* cell = PropertyCell::cast(heap_object);
+      Address value_address = cell->ValueAddress();
+      scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(value_address));
+      Address type_address = cell->TypeAddress();
+      scavenge_visitor.VisitPointer(reinterpret_cast<Object**>(type_address));
+    }
+  }
+
+  // Copy objects reachable from the encountered weak collections list.
+  scavenge_visitor.VisitPointer(&encountered_weak_collections_);
+
+  // Copy objects reachable from the code flushing candidates list.
+  MarkCompactCollector* collector = mark_compact_collector();
+  if (collector->is_code_flushing_enabled()) {
+    collector->code_flusher()->IteratePointersToFromSpace(&scavenge_visitor);
+  }
+
+  new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
+
+  while (isolate()->global_handles()->IterateObjectGroups(
+      &scavenge_visitor, &IsUnscavengedHeapObject)) {
+    new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
+  }
+  isolate()->global_handles()->RemoveObjectGroups();
+  isolate()->global_handles()->RemoveImplicitRefGroups();
+
+  isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
+      &IsUnscavengedHeapObject);
+  isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots(
+      &scavenge_visitor);
+  new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
+
+  UpdateNewSpaceReferencesInExternalStringTable(
+      &UpdateNewSpaceReferenceInExternalStringTableEntry);
+
+  promotion_queue_.Destroy();
+
+  incremental_marking()->UpdateMarkingDequeAfterScavenge();
+
+  ScavengeWeakObjectRetainer weak_object_retainer(this);
+  ProcessWeakReferences(&weak_object_retainer);
+
+  DCHECK(new_space_front == new_space_.top());
+
+  // Set age mark.
+  new_space_.set_age_mark(new_space_.top());
+
+  new_space_.LowerInlineAllocationLimit(
+      new_space_.inline_allocation_limit_step());
+
+  // Update how much has survived scavenge.
+  IncrementYoungSurvivorsCounter(static_cast<int>(
+      (PromotedSpaceSizeOfObjects() - survived_watermark) + new_space_.Size()));
+
+  LOG(isolate_, ResourceEvent("scavenge", "end"));
+
+  gc_state_ = NOT_IN_GC;
+
+  gc_idle_time_handler_.NotifyScavenge();
+}
+
+
+String* Heap::UpdateNewSpaceReferenceInExternalStringTableEntry(Heap* heap,
+                                                                Object** p) {
+  MapWord first_word = HeapObject::cast(*p)->map_word();
+
+  if (!first_word.IsForwardingAddress()) {
+    // Unreachable external string can be finalized.
+    heap->FinalizeExternalString(String::cast(*p));
+    return NULL;
+  }
+
+  // String is still reachable.
+  return String::cast(first_word.ToForwardingAddress());
+}
+
+
+void Heap::UpdateNewSpaceReferencesInExternalStringTable(
+    ExternalStringTableUpdaterCallback updater_func) {
+#ifdef VERIFY_HEAP
+  if (FLAG_verify_heap) {
+    external_string_table_.Verify();
+  }
+#endif
+
+  if (external_string_table_.new_space_strings_.is_empty()) return;
+
+  Object** start = &external_string_table_.new_space_strings_[0];
+  Object** end = start + external_string_table_.new_space_strings_.length();
+  Object** last = start;
+
+  for (Object** p = start; p < end; ++p) {
+    DCHECK(InFromSpace(*p));
+    String* target = updater_func(this, p);
+
+    if (target == NULL) continue;
+
+    DCHECK(target->IsExternalString());
+
+    if (InNewSpace(target)) {
+      // String is still in new space.  Update the table entry.
+      *last = target;
+      ++last;
+    } else {
+      // String got promoted.  Move it to the old string list.
+      external_string_table_.AddOldString(target);
+    }
+  }
+
+  DCHECK(last <= end);
+  external_string_table_.ShrinkNewStrings(static_cast<int>(last - start));
+}
+
+
+void Heap::UpdateReferencesInExternalStringTable(
+    ExternalStringTableUpdaterCallback updater_func) {
+  // Update old space string references.
+  if (external_string_table_.old_space_strings_.length() > 0) {
+    Object** start = &external_string_table_.old_space_strings_[0];
+    Object** end = start + external_string_table_.old_space_strings_.length();
+    for (Object** p = start; p < end; ++p) *p = updater_func(this, p);
+  }
+
+  UpdateNewSpaceReferencesInExternalStringTable(updater_func);
+}
+
+
+void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
+  ProcessArrayBuffers(retainer);
+  ProcessNativeContexts(retainer);
+  // TODO(mvstanton): AllocationSites only need to be processed during
+  // MARK_COMPACT, as they live in old space. Verify and address.
+  ProcessAllocationSites(retainer);
+}
+
+
+void Heap::ProcessNativeContexts(WeakObjectRetainer* retainer) {
+  Object* head = VisitWeakList<Context>(this, native_contexts_list(), retainer);
+  // Update the head of the list of contexts.
+  set_native_contexts_list(head);
+}
+
+
+void Heap::ProcessArrayBuffers(WeakObjectRetainer* retainer) {
+  Object* array_buffer_obj =
+      VisitWeakList<JSArrayBuffer>(this, array_buffers_list(), retainer);
+  set_array_buffers_list(array_buffer_obj);
+}
+
+
+void Heap::TearDownArrayBuffers() {
+  Object* undefined = undefined_value();
+  for (Object* o = array_buffers_list(); o != undefined;) {
+    JSArrayBuffer* buffer = JSArrayBuffer::cast(o);
+    Runtime::FreeArrayBuffer(isolate(), buffer);
+    o = buffer->weak_next();
+  }
+  set_array_buffers_list(undefined);
+}
+
+
+void Heap::ProcessAllocationSites(WeakObjectRetainer* retainer) {
+  Object* allocation_site_obj =
+      VisitWeakList<AllocationSite>(this, allocation_sites_list(), retainer);
+  set_allocation_sites_list(allocation_site_obj);
+}
+
+
+void Heap::ResetAllAllocationSitesDependentCode(PretenureFlag flag) {
+  DisallowHeapAllocation no_allocation_scope;
+  Object* cur = allocation_sites_list();
+  bool marked = false;
+  while (cur->IsAllocationSite()) {
+    AllocationSite* casted = AllocationSite::cast(cur);
+    if (casted->GetPretenureMode() == flag) {
+      casted->ResetPretenureDecision();
+      casted->set_deopt_dependent_code(true);
+      marked = true;
+    }
+    cur = casted->weak_next();
+  }
+  if (marked) isolate_->stack_guard()->RequestDeoptMarkedAllocationSites();
+}
+
+
+void Heap::EvaluateOldSpaceLocalPretenuring(
+    uint64_t size_of_objects_before_gc) {
+  uint64_t size_of_objects_after_gc = SizeOfObjects();
+  double old_generation_survival_rate =
+      (static_cast<double>(size_of_objects_after_gc) * 100) /
+      static_cast<double>(size_of_objects_before_gc);
+
+  if (old_generation_survival_rate < kOldSurvivalRateLowThreshold) {
+    // Too many objects died in the old generation, pretenuring of wrong
+    // allocation sites may be the cause for that. We have to deopt all
+    // dependent code registered in the allocation sites to re-evaluate
+    // our pretenuring decisions.
+    ResetAllAllocationSitesDependentCode(TENURED);
+    if (FLAG_trace_pretenuring) {
+      PrintF(
+          "Deopt all allocation sites dependent code due to low survival "
+          "rate in the old generation %f\n",
+          old_generation_survival_rate);
+    }
+  }
+}
+
+
+void Heap::VisitExternalResources(v8::ExternalResourceVisitor* visitor) {
+  DisallowHeapAllocation no_allocation;
+  // All external strings are listed in the external string table.
+
+  class ExternalStringTableVisitorAdapter : public ObjectVisitor {
+   public:
+    explicit ExternalStringTableVisitorAdapter(
+        v8::ExternalResourceVisitor* visitor)
+        : visitor_(visitor) {}
+    virtual void VisitPointers(Object** start, Object** end) {
+      for (Object** p = start; p < end; p++) {
+        DCHECK((*p)->IsExternalString());
+        visitor_->VisitExternalString(
+            Utils::ToLocal(Handle<String>(String::cast(*p))));
+      }
+    }
+
+   private:
+    v8::ExternalResourceVisitor* visitor_;
+  } external_string_table_visitor(visitor);
+
+  external_string_table_.Iterate(&external_string_table_visitor);
+}
+
+
+class NewSpaceScavenger : public StaticNewSpaceVisitor<NewSpaceScavenger> {
+ public:
+  static inline void VisitPointer(Heap* heap, Object** p) {
+    Object* object = *p;
+    if (!heap->InNewSpace(object)) return;
+    Heap::ScavengeObject(reinterpret_cast<HeapObject**>(p),
+                         reinterpret_cast<HeapObject*>(object));
+  }
+};
+
+
+Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
+                         Address new_space_front) {
+  do {
+    SemiSpace::AssertValidRange(new_space_front, new_space_.top());
+    // The addresses new_space_front and new_space_.top() define a
+    // queue of unprocessed copied objects.  Process them until the
+    // queue is empty.
+    while (new_space_front != new_space_.top()) {
+      if (!NewSpacePage::IsAtEnd(new_space_front)) {
+        HeapObject* object = HeapObject::FromAddress(new_space_front);
+        new_space_front +=
+            NewSpaceScavenger::IterateBody(object->map(), object);
+      } else {
+        new_space_front =
+            NewSpacePage::FromLimit(new_space_front)->next_page()->area_start();
+      }
+    }
+
+    // Promote and process all the to-be-promoted objects.
+    {
+      StoreBufferRebuildScope scope(this, store_buffer(),
+                                    &ScavengeStoreBufferCallback);
+      while (!promotion_queue()->is_empty()) {
+        HeapObject* target;
+        int size;
+        promotion_queue()->remove(&target, &size);
+
+        // Promoted object might be already partially visited
+        // during old space pointer iteration. Thus we search specificly
+        // for pointers to from semispace instead of looking for pointers
+        // to new space.
+        DCHECK(!target->IsMap());
+        IterateAndMarkPointersToFromSpace(
+            target->address(), target->address() + size, &ScavengeObject);
+      }
+    }
+
+    // Take another spin if there are now unswept objects in new space
+    // (there are currently no more unswept promoted objects).
+  } while (new_space_front != new_space_.top());
+
+  return new_space_front;
+}
+
+
+STATIC_ASSERT((FixedDoubleArray::kHeaderSize & kDoubleAlignmentMask) ==
+              0);  // NOLINT
+STATIC_ASSERT((ConstantPoolArray::kFirstEntryOffset & kDoubleAlignmentMask) ==
+              0);  // NOLINT
+STATIC_ASSERT((ConstantPoolArray::kExtendedFirstOffset &
+               kDoubleAlignmentMask) == 0);  // NOLINT
+
+
+INLINE(static HeapObject* EnsureDoubleAligned(Heap* heap, HeapObject* object,
+                                              int size));
+
+static HeapObject* EnsureDoubleAligned(Heap* heap, HeapObject* object,
+                                       int size) {
+  if ((OffsetFrom(object->address()) & kDoubleAlignmentMask) != 0) {
+    heap->CreateFillerObjectAt(object->address(), kPointerSize);
+    return HeapObject::FromAddress(object->address() + kPointerSize);
+  } else {
+    heap->CreateFillerObjectAt(object->address() + size - kPointerSize,
+                               kPointerSize);
+    return object;
+  }
+}
+
+
+enum LoggingAndProfiling {
+  LOGGING_AND_PROFILING_ENABLED,
+  LOGGING_AND_PROFILING_DISABLED
+};
+
+
+enum MarksHandling { TRANSFER_MARKS, IGNORE_MARKS };
+
+
+template <MarksHandling marks_handling,
+          LoggingAndProfiling logging_and_profiling_mode>
+class ScavengingVisitor : public StaticVisitorBase {
+ public:
+  static void Initialize() {
+    table_.Register(kVisitSeqOneByteString, &EvacuateSeqOneByteString);
+    table_.Register(kVisitSeqTwoByteString, &EvacuateSeqTwoByteString);
+    table_.Register(kVisitShortcutCandidate, &EvacuateShortcutCandidate);
+    table_.Register(kVisitByteArray, &EvacuateByteArray);
+    table_.Register(kVisitFixedArray, &EvacuateFixedArray);
+    table_.Register(kVisitFixedDoubleArray, &EvacuateFixedDoubleArray);
+    table_.Register(kVisitFixedTypedArray, &EvacuateFixedTypedArray);
+    table_.Register(kVisitFixedFloat64Array, &EvacuateFixedFloat64Array);
+
+    table_.Register(
+        kVisitNativeContext,
+        &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
+            Context::kSize>);
+
+    table_.Register(
+        kVisitConsString,
+        &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
+            ConsString::kSize>);
+
+    table_.Register(
+        kVisitSlicedString,
+        &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
+            SlicedString::kSize>);
+
+    table_.Register(
+        kVisitSymbol,
+        &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
+            Symbol::kSize>);
+
+    table_.Register(
+        kVisitSharedFunctionInfo,
+        &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
+            SharedFunctionInfo::kSize>);
+
+    table_.Register(kVisitJSWeakCollection,
+                    &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
+
+    table_.Register(kVisitJSArrayBuffer,
+                    &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
+
+    table_.Register(kVisitJSTypedArray,
+                    &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
+
+    table_.Register(kVisitJSDataView,
+                    &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
+
+    table_.Register(kVisitJSRegExp,
+                    &ObjectEvacuationStrategy<POINTER_OBJECT>::Visit);
+
+    if (marks_handling == IGNORE_MARKS) {
+      table_.Register(
+          kVisitJSFunction,
+          &ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
+              JSFunction::kSize>);
+    } else {
+      table_.Register(kVisitJSFunction, &EvacuateJSFunction);
+    }
+
+    table_.RegisterSpecializations<ObjectEvacuationStrategy<DATA_OBJECT>,
+                                   kVisitDataObject, kVisitDataObjectGeneric>();
+
+    table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
+                                   kVisitJSObject, kVisitJSObjectGeneric>();
+
+    table_.RegisterSpecializations<ObjectEvacuationStrategy<POINTER_OBJECT>,
+                                   kVisitStruct, kVisitStructGeneric>();
+  }
+
+  static VisitorDispatchTable<ScavengingCallback>* GetTable() {
+    return &table_;
+  }
+
+ private:
+  enum ObjectContents { DATA_OBJECT, POINTER_OBJECT };
+
+  static void RecordCopiedObject(Heap* heap, HeapObject* obj) {
+    bool should_record = false;
+#ifdef DEBUG
+    should_record = FLAG_heap_stats;
+#endif
+    should_record = should_record || FLAG_log_gc;
+    if (should_record) {
+      if (heap->new_space()->Contains(obj)) {
+        heap->new_space()->RecordAllocation(obj);
+      } else {
+        heap->new_space()->RecordPromotion(obj);
+      }
+    }
+  }
+
+  // Helper function used by CopyObject to copy a source object to an
+  // allocated target object and update the forwarding pointer in the source
+  // object.  Returns the target object.
+  INLINE(static void MigrateObject(Heap* heap, HeapObject* source,
+                                   HeapObject* target, int size)) {
+    // If we migrate into to-space, then the to-space top pointer should be
+    // right after the target object. Incorporate double alignment
+    // over-allocation.
+    DCHECK(!heap->InToSpace(target) ||
+           target->address() + size == heap->new_space()->top() ||
+           target->address() + size + kPointerSize == heap->new_space()->top());
+
+    // Make sure that we do not overwrite the promotion queue which is at
+    // the end of to-space.
+    DCHECK(!heap->InToSpace(target) ||
+           heap->promotion_queue()->IsBelowPromotionQueue(
+               heap->new_space()->top()));
+
+    // Copy the content of source to target.
+    heap->CopyBlock(target->address(), source->address(), size);
+
+    // Set the forwarding address.
+    source->set_map_word(MapWord::FromForwardingAddress(target));
+
+    if (logging_and_profiling_mode == LOGGING_AND_PROFILING_ENABLED) {
+      // Update NewSpace stats if necessary.
+      RecordCopiedObject(heap, target);
+      heap->OnMoveEvent(target, source, size);
+    }
+
+    if (marks_handling == TRANSFER_MARKS) {
+      if (Marking::TransferColor(source, target)) {
+        MemoryChunk::IncrementLiveBytesFromGC(target->address(), size);
+      }
+    }
+  }
+
+  template <int alignment>
+  static inline bool SemiSpaceCopyObject(Map* map, HeapObject** slot,
+                                         HeapObject* object, int object_size) {
+    Heap* heap = map->GetHeap();
+
+    int allocation_size = object_size;
+    if (alignment != kObjectAlignment) {
+      DCHECK(alignment == kDoubleAlignment);
+      allocation_size += kPointerSize;
+    }
+
+    DCHECK(heap->AllowedToBeMigrated(object, NEW_SPACE));
+    AllocationResult allocation =
+        heap->new_space()->AllocateRaw(allocation_size);
+
+    HeapObject* target = NULL;  // Initialization to please compiler.
+    if (allocation.To(&target)) {
+      // Order is important here: Set the promotion limit before storing a
+      // filler for double alignment or migrating the object. Otherwise we
+      // may end up overwriting promotion queue entries when we migrate the
+      // object.
+      heap->promotion_queue()->SetNewLimit(heap->new_space()->top());
+
+      if (alignment != kObjectAlignment) {
+        target = EnsureDoubleAligned(heap, target, allocation_size);
+      }
+
+      // Order is important: slot might be inside of the target if target
+      // was allocated over a dead object and slot comes from the store
+      // buffer.
+      *slot = target;
+      MigrateObject(heap, object, target, object_size);
+
+      heap->IncrementSemiSpaceCopiedObjectSize(object_size);
+      return true;
+    }
+    return false;
+  }
+
+
+  template <ObjectContents object_contents, int alignment>
+  static inline bool PromoteObject(Map* map, HeapObject** slot,
+                                   HeapObject* object, int object_size) {
+    Heap* heap = map->GetHeap();
+
+    int allocation_size = object_size;
+    if (alignment != kObjectAlignment) {
+      DCHECK(alignment == kDoubleAlignment);
+      allocation_size += kPointerSize;
+    }
+
+    AllocationResult allocation;
+    if (object_contents == DATA_OBJECT) {
+      DCHECK(heap->AllowedToBeMigrated(object, OLD_DATA_SPACE));
+      allocation = heap->old_data_space()->AllocateRaw(allocation_size);
+    } else {
+      DCHECK(heap->AllowedToBeMigrated(object, OLD_POINTER_SPACE));
+      allocation = heap->old_pointer_space()->AllocateRaw(allocation_size);
+    }
+
+    HeapObject* target = NULL;  // Initialization to please compiler.
+    if (allocation.To(&target)) {
+      if (alignment != kObjectAlignment) {
+        target = EnsureDoubleAligned(heap, target, allocation_size);
+      }
+
+      // Order is important: slot might be inside of the target if target
+      // was allocated over a dead object and slot comes from the store
+      // buffer.
+      *slot = target;
+      MigrateObject(heap, object, target, object_size);
+
+      if (object_contents == POINTER_OBJECT) {
+        if (map->instance_type() == JS_FUNCTION_TYPE) {
+          heap->promotion_queue()->insert(target,
+                                          JSFunction::kNonWeakFieldsEndOffset);
+        } else {
+          heap->promotion_queue()->insert(target, object_size);
+        }
+      }
+      heap->IncrementPromotedObjectsSize(object_size);
+      return true;
+    }
+    return false;
+  }
+
+
+  template <ObjectContents object_contents, int alignment>
+  static inline void EvacuateObject(Map* map, HeapObject** slot,
+                                    HeapObject* object, int object_size) {
+    SLOW_DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+    SLOW_DCHECK(object->Size() == object_size);
+    Heap* heap = map->GetHeap();
+
+    if (!heap->ShouldBePromoted(object->address(), object_size)) {
+      // A semi-space copy may fail due to fragmentation. In that case, we
+      // try to promote the object.
+      if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) {
+        return;
+      }
+    }
+
+    if (PromoteObject<object_contents, alignment>(map, slot, object,
+                                                  object_size)) {
+      return;
+    }
+
+    // If promotion failed, we try to copy the object to the other semi-space
+    if (SemiSpaceCopyObject<alignment>(map, slot, object, object_size)) return;
+
+    UNREACHABLE();
+  }
+
+
+  static inline void EvacuateJSFunction(Map* map, HeapObject** slot,
+                                        HeapObject* object) {
+    ObjectEvacuationStrategy<POINTER_OBJECT>::template VisitSpecialized<
+        JSFunction::kSize>(map, slot, object);
+
+    MapWord map_word = object->map_word();
+    DCHECK(map_word.IsForwardingAddress());
+    HeapObject* target = map_word.ToForwardingAddress();
+
+    MarkBit mark_bit = Marking::MarkBitFrom(target);
+    if (Marking::IsBlack(mark_bit)) {
+      // This object is black and it might not be rescanned by marker.
+      // We should explicitly record code entry slot for compaction because
+      // promotion queue processing (IterateAndMarkPointersToFromSpace) will
+      // miss it as it is not HeapObject-tagged.
+      Address code_entry_slot =
+          target->address() + JSFunction::kCodeEntryOffset;
+      Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot));
+      map->GetHeap()->mark_compact_collector()->RecordCodeEntrySlot(
+          code_entry_slot, code);
+    }
+  }
+
+
+  static inline void EvacuateFixedArray(Map* map, HeapObject** slot,
+                                        HeapObject* object) {
+    int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
+    EvacuateObject<POINTER_OBJECT, kObjectAlignment>(map, slot, object,
+                                                     object_size);
+  }
+
+
+  static inline void EvacuateFixedDoubleArray(Map* map, HeapObject** slot,
+                                              HeapObject* object) {
+    int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
+    int object_size = FixedDoubleArray::SizeFor(length);
+    EvacuateObject<DATA_OBJECT, kDoubleAlignment>(map, slot, object,
+                                                  object_size);
+  }
+
+
+  static inline void EvacuateFixedTypedArray(Map* map, HeapObject** slot,
+                                             HeapObject* object) {
+    int object_size = reinterpret_cast<FixedTypedArrayBase*>(object)->size();
+    EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
+                                                  object_size);
+  }
+
+
+  static inline void EvacuateFixedFloat64Array(Map* map, HeapObject** slot,
+                                               HeapObject* object) {
+    int object_size = reinterpret_cast<FixedFloat64Array*>(object)->size();
+    EvacuateObject<DATA_OBJECT, kDoubleAlignment>(map, slot, object,
+                                                  object_size);
+  }
+
+
+  static inline void EvacuateByteArray(Map* map, HeapObject** slot,
+                                       HeapObject* object) {
+    int object_size = reinterpret_cast<ByteArray*>(object)->ByteArraySize();
+    EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
+                                                  object_size);
+  }
+
+
+  static inline void EvacuateSeqOneByteString(Map* map, HeapObject** slot,
+                                              HeapObject* object) {
+    int object_size = SeqOneByteString::cast(object)
+                          ->SeqOneByteStringSize(map->instance_type());
+    EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
+                                                  object_size);
+  }
+
+
+  static inline void EvacuateSeqTwoByteString(Map* map, HeapObject** slot,
+                                              HeapObject* object) {
+    int object_size = SeqTwoByteString::cast(object)
+                          ->SeqTwoByteStringSize(map->instance_type());
+    EvacuateObject<DATA_OBJECT, kObjectAlignment>(map, slot, object,
+                                                  object_size);
+  }
+
+
+  static inline void EvacuateShortcutCandidate(Map* map, HeapObject** slot,
+                                               HeapObject* object) {
+    DCHECK(IsShortcutCandidate(map->instance_type()));
+
+    Heap* heap = map->GetHeap();
+
+    if (marks_handling == IGNORE_MARKS &&
+        ConsString::cast(object)->unchecked_second() == heap->empty_string()) {
+      HeapObject* first =
+          HeapObject::cast(ConsString::cast(object)->unchecked_first());
+
+      *slot = first;
+
+      if (!heap->InNewSpace(first)) {
+        object->set_map_word(MapWord::FromForwardingAddress(first));
+        return;
+      }
+
+      MapWord first_word = first->map_word();
+      if (first_word.IsForwardingAddress()) {
+        HeapObject* target = first_word.ToForwardingAddress();
+
+        *slot = target;
+        object->set_map_word(MapWord::FromForwardingAddress(target));
+        return;
+      }
+
+      heap->DoScavengeObject(first->map(), slot, first);
+      object->set_map_word(MapWord::FromForwardingAddress(*slot));
+      return;
+    }
+
+    int object_size = ConsString::kSize;
+    EvacuateObject<POINTER_OBJECT, kObjectAlignment>(map, slot, object,
+                                                     object_size);
+  }
+
+  template <ObjectContents object_contents>
+  class ObjectEvacuationStrategy {
+   public:
+    template <int object_size>
+    static inline void VisitSpecialized(Map* map, HeapObject** slot,
+                                        HeapObject* object) {
+      EvacuateObject<object_contents, kObjectAlignment>(map, slot, object,
+                                                        object_size);
+    }
+
+    static inline void Visit(Map* map, HeapObject** slot, HeapObject* object) {
+      int object_size = map->instance_size();
+      EvacuateObject<object_contents, kObjectAlignment>(map, slot, object,
+                                                        object_size);
+    }
+  };
+
+  static VisitorDispatchTable<ScavengingCallback> table_;
+};
+
+
+template <MarksHandling marks_handling,
+          LoggingAndProfiling logging_and_profiling_mode>
+VisitorDispatchTable<ScavengingCallback>
+    ScavengingVisitor<marks_handling, logging_and_profiling_mode>::table_;
+
+
+static void InitializeScavengingVisitorsTables() {
+  ScavengingVisitor<TRANSFER_MARKS,
+                    LOGGING_AND_PROFILING_DISABLED>::Initialize();
+  ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::Initialize();
+  ScavengingVisitor<TRANSFER_MARKS,
+                    LOGGING_AND_PROFILING_ENABLED>::Initialize();
+  ScavengingVisitor<IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::Initialize();
+}
+
+
+void Heap::SelectScavengingVisitorsTable() {
+  bool logging_and_profiling =
+      FLAG_verify_predictable || isolate()->logger()->is_logging() ||
+      isolate()->cpu_profiler()->is_profiling() ||
+      (isolate()->heap_profiler() != NULL &&
+       isolate()->heap_profiler()->is_tracking_object_moves());
+
+  if (!incremental_marking()->IsMarking()) {
+    if (!logging_and_profiling) {
+      scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
+          IGNORE_MARKS, LOGGING_AND_PROFILING_DISABLED>::GetTable());
+    } else {
+      scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
+          IGNORE_MARKS, LOGGING_AND_PROFILING_ENABLED>::GetTable());
+    }
+  } else {
+    if (!logging_and_profiling) {
+      scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
+          TRANSFER_MARKS, LOGGING_AND_PROFILING_DISABLED>::GetTable());
+    } else {
+      scavenging_visitors_table_.CopyFrom(ScavengingVisitor<
+          TRANSFER_MARKS, LOGGING_AND_PROFILING_ENABLED>::GetTable());
+    }
+
+    if (incremental_marking()->IsCompacting()) {
+      // When compacting forbid short-circuiting of cons-strings.
+      // Scavenging code relies on the fact that new space object
+      // can't be evacuated into evacuation candidate but
+      // short-circuiting violates this assumption.
+      scavenging_visitors_table_.Register(
+          StaticVisitorBase::kVisitShortcutCandidate,
+          scavenging_visitors_table_.GetVisitorById(
+              StaticVisitorBase::kVisitConsString));
+    }
+  }
+}
+
+
+void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
+  SLOW_DCHECK(object->GetIsolate()->heap()->InFromSpace(object));
+  MapWord first_word = object->map_word();
+  SLOW_DCHECK(!first_word.IsForwardingAddress());
+  Map* map = first_word.ToMap();
+  map->GetHeap()->DoScavengeObject(map, p, object);
+}
+
+
+AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
+                                          int instance_size) {
+  Object* result;
+  AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
+  if (!allocation.To(&result)) return allocation;
+
+  // Map::cast cannot be used due to uninitialized map field.
+  reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
+  reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
+  reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
+  reinterpret_cast<Map*>(result)->set_visitor_id(
+      StaticVisitorBase::GetVisitorId(instance_type, instance_size));
+  reinterpret_cast<Map*>(result)->set_inobject_properties(0);
+  reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
+  reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
+  reinterpret_cast<Map*>(result)->set_bit_field(0);
+  reinterpret_cast<Map*>(result)->set_bit_field2(0);
+  int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
+                   Map::OwnsDescriptors::encode(true);
+  reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
+  return result;
+}
+
+
+AllocationResult Heap::AllocateMap(InstanceType instance_type,
+                                   int instance_size,
+                                   ElementsKind elements_kind) {
+  HeapObject* result;
+  AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
+  if (!allocation.To(&result)) return allocation;
+
+  result->set_map_no_write_barrier(meta_map());
+  Map* map = Map::cast(result);
+  map->set_instance_type(instance_type);
+  map->set_visitor_id(
+      StaticVisitorBase::GetVisitorId(instance_type, instance_size));
+  map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
+  map->set_constructor(null_value(), SKIP_WRITE_BARRIER);
+  map->set_instance_size(instance_size);
+  map->set_inobject_properties(0);
+  map->set_pre_allocated_property_fields(0);
+  map->set_code_cache(empty_fixed_array(), SKIP_WRITE_BARRIER);
+  map->set_dependent_code(DependentCode::cast(empty_fixed_array()),
+                          SKIP_WRITE_BARRIER);
+  map->init_back_pointer(undefined_value());
+  map->set_unused_property_fields(0);
+  map->set_instance_descriptors(empty_descriptor_array());
+  map->set_bit_field(0);
+  map->set_bit_field2(1 << Map::kIsExtensible);
+  int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
+                   Map::OwnsDescriptors::encode(true);
+  map->set_bit_field3(bit_field3);
+  map->set_elements_kind(elements_kind);
+
+  return map;
+}
+
+
+AllocationResult Heap::AllocateFillerObject(int size, bool double_align,
+                                            AllocationSpace space) {
+  HeapObject* obj;
+  {
+    AllocationResult allocation = AllocateRaw(size, space, space);
+    if (!allocation.To(&obj)) return allocation;
+  }
+#ifdef DEBUG
+  MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
+  DCHECK(chunk->owner()->identity() == space);
+#endif
+  CreateFillerObjectAt(obj->address(), size);
+  return obj;
+}
+
+
+const Heap::StringTypeTable Heap::string_type_table[] = {
+#define STRING_TYPE_ELEMENT(type, size, name, camel_name) \
+  { type, size, k##camel_name##MapRootIndex }             \
+  ,
+    STRING_TYPE_LIST(STRING_TYPE_ELEMENT)
+#undef STRING_TYPE_ELEMENT
+};
+
+
+const Heap::ConstantStringTable Heap::constant_string_table[] = {
+#define CONSTANT_STRING_ELEMENT(name, contents) \
+  { contents, k##name##RootIndex }              \
+  ,
+    INTERNALIZED_STRING_LIST(CONSTANT_STRING_ELEMENT)
+#undef CONSTANT_STRING_ELEMENT
+};
+
+
+const Heap::StructTable Heap::struct_table[] = {
+#define STRUCT_TABLE_ELEMENT(NAME, Name, name)        \
+  { NAME##_TYPE, Name::kSize, k##Name##MapRootIndex } \
+  ,
+    STRUCT_LIST(STRUCT_TABLE_ELEMENT)
+#undef STRUCT_TABLE_ELEMENT
+};
+
+
+bool Heap::CreateInitialMaps() {
+  HeapObject* obj;
+  {
+    AllocationResult allocation = AllocatePartialMap(MAP_TYPE, Map::kSize);
+    if (!allocation.To(&obj)) return false;
+  }
+  // Map::cast cannot be used due to uninitialized map field.
+  Map* new_meta_map = reinterpret_cast<Map*>(obj);
+  set_meta_map(new_meta_map);
+  new_meta_map->set_map(new_meta_map);
+
+  {  // Partial map allocation
+#define ALLOCATE_PARTIAL_MAP(instance_type, size, field_name)                \
+  {                                                                          \
+    Map* map;                                                                \
+    if (!AllocatePartialMap((instance_type), (size)).To(&map)) return false; \
+    set_##field_name##_map(map);                                             \
+  }
+
+    ALLOCATE_PARTIAL_MAP(FIXED_ARRAY_TYPE, kVariableSizeSentinel, fixed_array);
+    ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, undefined);
+    ALLOCATE_PARTIAL_MAP(ODDBALL_TYPE, Oddball::kSize, null);
+    ALLOCATE_PARTIAL_MAP(CONSTANT_POOL_ARRAY_TYPE, kVariableSizeSentinel,
+                         constant_pool_array);
+
+#undef ALLOCATE_PARTIAL_MAP
+  }
+
+  // Allocate the empty array.
+  {
+    AllocationResult allocation = AllocateEmptyFixedArray();
+    if (!allocation.To(&obj)) return false;
+  }
+  set_empty_fixed_array(FixedArray::cast(obj));
+
+  {
+    AllocationResult allocation = Allocate(null_map(), OLD_POINTER_SPACE);
+    if (!allocation.To(&obj)) return false;
+  }
+  set_null_value(Oddball::cast(obj));
+  Oddball::cast(obj)->set_kind(Oddball::kNull);
+
+  {
+    AllocationResult allocation = Allocate(undefined_map(), OLD_POINTER_SPACE);
+    if (!allocation.To(&obj)) return false;
+  }
+  set_undefined_value(Oddball::cast(obj));
+  Oddball::cast(obj)->set_kind(Oddball::kUndefined);
+  DCHECK(!InNewSpace(undefined_value()));
+
+  // Set preliminary exception sentinel value before actually initializing it.
+  set_exception(null_value());
+
+  // Allocate the empty descriptor array.
+  {
+    AllocationResult allocation = AllocateEmptyFixedArray();
+    if (!allocation.To(&obj)) return false;
+  }
+  set_empty_descriptor_array(DescriptorArray::cast(obj));
+
+  // Allocate the constant pool array.
+  {
+    AllocationResult allocation = AllocateEmptyConstantPoolArray();
+    if (!allocation.To(&obj)) return false;
+  }
+  set_empty_constant_pool_array(ConstantPoolArray::cast(obj));
+
+  // Fix the instance_descriptors for the existing maps.
+  meta_map()->set_code_cache(empty_fixed_array());
+  meta_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
+  meta_map()->init_back_pointer(undefined_value());
+  meta_map()->set_instance_descriptors(empty_descriptor_array());
+
+  fixed_array_map()->set_code_cache(empty_fixed_array());
+  fixed_array_map()->set_dependent_code(
+      DependentCode::cast(empty_fixed_array()));
+  fixed_array_map()->init_back_pointer(undefined_value());
+  fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
+
+  undefined_map()->set_code_cache(empty_fixed_array());
+  undefined_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
+  undefined_map()->init_back_pointer(undefined_value());
+  undefined_map()->set_instance_descriptors(empty_descriptor_array());
+
+  null_map()->set_code_cache(empty_fixed_array());
+  null_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
+  null_map()->init_back_pointer(undefined_value());
+  null_map()->set_instance_descriptors(empty_descriptor_array());
+
+  constant_pool_array_map()->set_code_cache(empty_fixed_array());
+  constant_pool_array_map()->set_dependent_code(
+      DependentCode::cast(empty_fixed_array()));
+  constant_pool_array_map()->init_back_pointer(undefined_value());
+  constant_pool_array_map()->set_instance_descriptors(empty_descriptor_array());
+
+  // Fix prototype object for existing maps.
+  meta_map()->set_prototype(null_value());
+  meta_map()->set_constructor(null_value());
+
+  fixed_array_map()->set_prototype(null_value());
+  fixed_array_map()->set_constructor(null_value());
+
+  undefined_map()->set_prototype(null_value());
+  undefined_map()->set_constructor(null_value());
+
+  null_map()->set_prototype(null_value());
+  null_map()->set_constructor(null_value());
+
+  constant_pool_array_map()->set_prototype(null_value());
+  constant_pool_array_map()->set_constructor(null_value());
+
+  {  // Map allocation
+#define ALLOCATE_MAP(instance_type, size, field_name)               \
+  {                                                                 \
+    Map* map;                                                       \
+    if (!AllocateMap((instance_type), size).To(&map)) return false; \
+    set_##field_name##_map(map);                                    \
+  }
+
+#define ALLOCATE_VARSIZE_MAP(instance_type, field_name) \
+  ALLOCATE_MAP(instance_type, kVariableSizeSentinel, field_name)
+
+    ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, fixed_cow_array)
+    DCHECK(fixed_array_map() != fixed_cow_array_map());
+
+    ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, scope_info)
+    ALLOCATE_MAP(HEAP_NUMBER_TYPE, HeapNumber::kSize, heap_number)
+    ALLOCATE_MAP(MUTABLE_HEAP_NUMBER_TYPE, HeapNumber::kSize,
+                 mutable_heap_number)
+    ALLOCATE_MAP(SYMBOL_TYPE, Symbol::kSize, symbol)
+    ALLOCATE_MAP(FOREIGN_TYPE, Foreign::kSize, foreign)
+
+    ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, the_hole);
+    ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, boolean);
+    ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, uninitialized);
+    ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, arguments_marker);
+    ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, no_interceptor_result_sentinel);
+    ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, exception);
+    ALLOCATE_MAP(ODDBALL_TYPE, Oddball::kSize, termination_exception);
+
+    for (unsigned i = 0; i < arraysize(string_type_table); i++) {
+      const StringTypeTable& entry = string_type_table[i];
+      {
+        AllocationResult allocation = AllocateMap(entry.type, entry.size);
+        if (!allocation.To(&obj)) return false;
+      }
+      // Mark cons string maps as unstable, because their objects can change
+      // maps during GC.
+      Map* map = Map::cast(obj);
+      if (StringShape(entry.type).IsCons()) map->mark_unstable();
+      roots_[entry.index] = map;
+    }
+
+    ALLOCATE_VARSIZE_MAP(STRING_TYPE, undetectable_string)
+    undetectable_string_map()->set_is_undetectable();
+
+    ALLOCATE_VARSIZE_MAP(ONE_BYTE_STRING_TYPE, undetectable_one_byte_string);
+    undetectable_one_byte_string_map()->set_is_undetectable();
+
+    ALLOCATE_VARSIZE_MAP(FIXED_DOUBLE_ARRAY_TYPE, fixed_double_array)
+    ALLOCATE_VARSIZE_MAP(BYTE_ARRAY_TYPE, byte_array)
+    ALLOCATE_VARSIZE_MAP(FREE_SPACE_TYPE, free_space)
+
+#define ALLOCATE_EXTERNAL_ARRAY_MAP(Type, type, TYPE, ctype, size)        \
+  ALLOCATE_MAP(EXTERNAL_##TYPE##_ARRAY_TYPE, ExternalArray::kAlignedSize, \
+               external_##type##_array)
+
+    TYPED_ARRAYS(ALLOCATE_EXTERNAL_ARRAY_MAP)
+#undef ALLOCATE_EXTERNAL_ARRAY_MAP
+
+#define ALLOCATE_FIXED_TYPED_ARRAY_MAP(Type, type, TYPE, ctype, size) \
+  ALLOCATE_VARSIZE_MAP(FIXED_##TYPE##_ARRAY_TYPE, fixed_##type##_array)
+
+    TYPED_ARRAYS(ALLOCATE_FIXED_TYPED_ARRAY_MAP)
+#undef ALLOCATE_FIXED_TYPED_ARRAY_MAP
+
+    ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, sloppy_arguments_elements)
+
+    ALLOCATE_VARSIZE_MAP(CODE_TYPE, code)
+
+    ALLOCATE_MAP(CELL_TYPE, Cell::kSize, cell)
+    ALLOCATE_MAP(PROPERTY_CELL_TYPE, PropertyCell::kSize, global_property_cell)
+    ALLOCATE_MAP(FILLER_TYPE, kPointerSize, one_pointer_filler)
+    ALLOCATE_MAP(FILLER_TYPE, 2 * kPointerSize, two_pointer_filler)
+
+
+    for (unsigned i = 0; i < arraysize(struct_table); i++) {
+      const StructTable& entry = struct_table[i];
+      Map* map;
+      if (!AllocateMap(entry.type, entry.size).To(&map)) return false;
+      roots_[entry.index] = map;
+    }
+
+    ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, hash_table)
+    ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, ordered_hash_table)
+
+    ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, function_context)
+    ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, catch_context)
+    ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, with_context)
+    ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, block_context)
+    ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_context)
+    ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, global_context)
+
+    ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, native_context)
+    native_context_map()->set_dictionary_map(true);
+    native_context_map()->set_visitor_id(
+        StaticVisitorBase::kVisitNativeContext);
+
+    ALLOCATE_MAP(SHARED_FUNCTION_INFO_TYPE, SharedFunctionInfo::kAlignedSize,
+                 shared_function_info)
+
+    ALLOCATE_MAP(JS_MESSAGE_OBJECT_TYPE, JSMessageObject::kSize, message_object)
+    ALLOCATE_MAP(JS_OBJECT_TYPE, JSObject::kHeaderSize + kPointerSize, external)
+    external_map()->set_is_extensible(false);
+#undef ALLOCATE_VARSIZE_MAP
+#undef ALLOCATE_MAP
+  }
+
+  {  // Empty arrays
+    {
+      ByteArray* byte_array;
+      if (!AllocateByteArray(0, TENURED).To(&byte_array)) return false;
+      set_empty_byte_array(byte_array);
+    }
+
+#define ALLOCATE_EMPTY_EXTERNAL_ARRAY(Type, type, TYPE, ctype, size)  \
+  {                                                                   \
+    ExternalArray* obj;                                               \
+    if (!AllocateEmptyExternalArray(kExternal##Type##Array).To(&obj)) \
+      return false;                                                   \
+    set_empty_external_##type##_array(obj);                           \
+  }
+
+    TYPED_ARRAYS(ALLOCATE_EMPTY_EXTERNAL_ARRAY)
+#undef ALLOCATE_EMPTY_EXTERNAL_ARRAY
+
+#define ALLOCATE_EMPTY_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
+  {                                                                     \
+    FixedTypedArrayBase* obj;                                           \
+    if (!AllocateEmptyFixedTypedArray(kExternal##Type##Array).To(&obj)) \
+      return false;                                                     \
+    set_empty_fixed_##type##_array(obj);                                \
+  }
+
+    TYPED_ARRAYS(ALLOCATE_EMPTY_FIXED_TYPED_ARRAY)
+#undef ALLOCATE_EMPTY_FIXED_TYPED_ARRAY
+  }
+  DCHECK(!InNewSpace(empty_fixed_array()));
+  return true;
+}
+
+
+AllocationResult Heap::AllocateHeapNumber(double value, MutableMode mode,
+                                          PretenureFlag pretenure) {
+  // Statically ensure that it is safe to allocate heap numbers in paged
+  // spaces.
+  int size = HeapNumber::kSize;
+  STATIC_ASSERT(HeapNumber::kSize <= Page::kMaxRegularHeapObjectSize);
+
+  AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
+
+  HeapObject* result;
+  {
+    AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
+    if (!allocation.To(&result)) return allocation;
+  }
+
+  Map* map = mode == MUTABLE ? mutable_heap_number_map() : heap_number_map();
+  HeapObject::cast(result)->set_map_no_write_barrier(map);
+  HeapNumber::cast(result)->set_value(value);
+  return result;
+}
+
+
+AllocationResult Heap::AllocateCell(Object* value) {
+  int size = Cell::kSize;
+  STATIC_ASSERT(Cell::kSize <= Page::kMaxRegularHeapObjectSize);
+
+  HeapObject* result;
+  {
+    AllocationResult allocation = AllocateRaw(size, CELL_SPACE, CELL_SPACE);
+    if (!allocation.To(&result)) return allocation;
+  }
+  result->set_map_no_write_barrier(cell_map());
+  Cell::cast(result)->set_value(value);
+  return result;
+}
+
+
+AllocationResult Heap::AllocatePropertyCell() {
+  int size = PropertyCell::kSize;
+  STATIC_ASSERT(PropertyCell::kSize <= Page::kMaxRegularHeapObjectSize);
+
+  HeapObject* result;
+  AllocationResult allocation =
+      AllocateRaw(size, PROPERTY_CELL_SPACE, PROPERTY_CELL_SPACE);
+  if (!allocation.To(&result)) return allocation;
+
+  result->set_map_no_write_barrier(global_property_cell_map());
+  PropertyCell* cell = PropertyCell::cast(result);
+  cell->set_dependent_code(DependentCode::cast(empty_fixed_array()),
+                           SKIP_WRITE_BARRIER);
+  cell->set_value(the_hole_value());
+  cell->set_type(HeapType::None());
+  return result;
+}
+
+
+void Heap::CreateApiObjects() {
+  HandleScope scope(isolate());
+  Factory* factory = isolate()->factory();
+  Handle<Map> new_neander_map =
+      factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
+
+  // Don't use Smi-only elements optimizations for objects with the neander
+  // map. There are too many cases where element values are set directly with a
+  // bottleneck to trap the Smi-only -> fast elements transition, and there
+  // appears to be no benefit for optimize this case.
+  new_neander_map->set_elements_kind(TERMINAL_FAST_ELEMENTS_KIND);
+  set_neander_map(*new_neander_map);
+
+  Handle<JSObject> listeners = factory->NewNeanderObject();
+  Handle<FixedArray> elements = factory->NewFixedArray(2);
+  elements->set(0, Smi::FromInt(0));
+  listeners->set_elements(*elements);
+  set_message_listeners(*listeners);
+}
+
+
+void Heap::CreateJSEntryStub() {
+  JSEntryStub stub(isolate(), StackFrame::ENTRY);
+  set_js_entry_code(*stub.GetCode());
+}
+
+
+void Heap::CreateJSConstructEntryStub() {
+  JSEntryStub stub(isolate(), StackFrame::ENTRY_CONSTRUCT);
+  set_js_construct_entry_code(*stub.GetCode());
+}
+
+
+void Heap::CreateFixedStubs() {
+  // Here we create roots for fixed stubs. They are needed at GC
+  // for cooking and uncooking (check out frames.cc).
+  // The eliminates the need for doing dictionary lookup in the
+  // stub cache for these stubs.
+  HandleScope scope(isolate());
+
+  // Create stubs that should be there, so we don't unexpectedly have to
+  // create them if we need them during the creation of another stub.
+  // Stub creation mixes raw pointers and handles in an unsafe manner so
+  // we cannot create stubs while we are creating stubs.
+  CodeStub::GenerateStubsAheadOfTime(isolate());
+
+  // MacroAssembler::Abort calls (usually enabled with --debug-code) depend on
+  // CEntryStub, so we need to call GenerateStubsAheadOfTime before JSEntryStub
+  // is created.
+
+  // gcc-4.4 has problem generating correct code of following snippet:
+  // {  JSEntryStub stub;
+  //    js_entry_code_ = *stub.GetCode();
+  // }
+  // {  JSConstructEntryStub stub;
+  //    js_construct_entry_code_ = *stub.GetCode();
+  // }
+  // To workaround the problem, make separate functions without inlining.
+  Heap::CreateJSEntryStub();
+  Heap::CreateJSConstructEntryStub();
+}
+
+
+void Heap::CreateInitialObjects() {
+  HandleScope scope(isolate());
+  Factory* factory = isolate()->factory();
+
+  // The -0 value must be set before NewNumber works.
+  set_minus_zero_value(*factory->NewHeapNumber(-0.0, IMMUTABLE, TENURED));
+  DCHECK(std::signbit(minus_zero_value()->Number()) != 0);
+
+  set_nan_value(
+      *factory->NewHeapNumber(base::OS::nan_value(), IMMUTABLE, TENURED));
+  set_infinity_value(*factory->NewHeapNumber(V8_INFINITY, IMMUTABLE, TENURED));
+
+  // The hole has not been created yet, but we want to put something
+  // predictable in the gaps in the string table, so lets make that Smi zero.
+  set_the_hole_value(reinterpret_cast<Oddball*>(Smi::FromInt(0)));
+
+  // Allocate initial string table.
+  set_string_table(*StringTable::New(isolate(), kInitialStringTableSize));
+
+  // Finish initializing oddballs after creating the string table.
+  Oddball::Initialize(isolate(), factory->undefined_value(), "undefined",
+                      factory->nan_value(), Oddball::kUndefined);
+
+  // Initialize the null_value.
+  Oddball::Initialize(isolate(), factory->null_value(), "null",
+                      handle(Smi::FromInt(0), isolate()), Oddball::kNull);
+
+  set_true_value(*factory->NewOddball(factory->boolean_map(), "true",
+                                      handle(Smi::FromInt(1), isolate()),
+                                      Oddball::kTrue));
+
+  set_false_value(*factory->NewOddball(factory->boolean_map(), "false",
+                                       handle(Smi::FromInt(0), isolate()),
+                                       Oddball::kFalse));
+
+  set_the_hole_value(*factory->NewOddball(factory->the_hole_map(), "hole",
+                                          handle(Smi::FromInt(-1), isolate()),
+                                          Oddball::kTheHole));
+
+  set_uninitialized_value(*factory->NewOddball(
+      factory->uninitialized_map(), "uninitialized",
+      handle(Smi::FromInt(-1), isolate()), Oddball::kUninitialized));
+
+  set_arguments_marker(*factory->NewOddball(
+      factory->arguments_marker_map(), "arguments_marker",
+      handle(Smi::FromInt(-4), isolate()), Oddball::kArgumentMarker));
+
+  set_no_interceptor_result_sentinel(*factory->NewOddball(
+      factory->no_interceptor_result_sentinel_map(),
+      "no_interceptor_result_sentinel", handle(Smi::FromInt(-2), isolate()),
+      Oddball::kOther));
+
+  set_termination_exception(*factory->NewOddball(
+      factory->termination_exception_map(), "termination_exception",
+      handle(Smi::FromInt(-3), isolate()), Oddball::kOther));
+
+  set_exception(*factory->NewOddball(factory->exception_map(), "exception",
+                                     handle(Smi::FromInt(-5), isolate()),
+                                     Oddball::kException));
+
+  for (unsigned i = 0; i < arraysize(constant_string_table); i++) {
+    Handle<String> str =
+        factory->InternalizeUtf8String(constant_string_table[i].contents);
+    roots_[constant_string_table[i].index] = *str;
+  }
+
+  // Allocate the hidden string which is used to identify the hidden properties
+  // in JSObjects. The hash code has a special value so that it will not match
+  // the empty string when searching for the property. It cannot be part of the
+  // loop above because it needs to be allocated manually with the special
+  // hash code in place. The hash code for the hidden_string is zero to ensure
+  // that it will always be at the first entry in property descriptors.
+  hidden_string_ = *factory->NewOneByteInternalizedString(
+      OneByteVector("", 0), String::kEmptyStringHash);
+
+  // Create the code_stubs dictionary. The initial size is set to avoid
+  // expanding the dictionary during bootstrapping.
+  set_code_stubs(*UnseededNumberDictionary::New(isolate(), 128));
+
+  // Create the non_monomorphic_cache used in stub-cache.cc. The initial size
+  // is set to avoid expanding the dictionary during bootstrapping.
+  set_non_monomorphic_cache(*UnseededNumberDictionary::New(isolate(), 64));
+
+  set_polymorphic_code_cache(PolymorphicCodeCache::cast(
+      *factory->NewStruct(POLYMORPHIC_CODE_CACHE_TYPE)));
+
+  set_instanceof_cache_function(Smi::FromInt(0));
+  set_instanceof_cache_map(Smi::FromInt(0));
+  set_instanceof_cache_answer(Smi::FromInt(0));
+
+  CreateFixedStubs();
+
+  // Allocate the dictionary of intrinsic function names.
+  Handle<NameDictionary> intrinsic_names =
+      NameDictionary::New(isolate(), Runtime::kNumFunctions, TENURED);
+  Runtime::InitializeIntrinsicFunctionNames(isolate(), intrinsic_names);
+  set_intrinsic_function_names(*intrinsic_names);
+
+  set_number_string_cache(
+      *factory->NewFixedArray(kInitialNumberStringCacheSize * 2, TENURED));
+
+  // Allocate cache for single character one byte strings.
+  set_single_character_string_cache(
+      *factory->NewFixedArray(String::kMaxOneByteCharCode + 1, TENURED));
+
+  // Allocate cache for string split and regexp-multiple.
+  set_string_split_cache(*factory->NewFixedArray(
+      RegExpResultsCache::kRegExpResultsCacheSize, TENURED));
+  set_regexp_multiple_cache(*factory->NewFixedArray(
+      RegExpResultsCache::kRegExpResultsCacheSize, TENURED));
+
+  // Allocate cache for external strings pointing to native source code.
+  set_natives_source_cache(
+      *factory->NewFixedArray(Natives::GetBuiltinsCount()));
+
+  set_undefined_cell(*factory->NewCell(factory->undefined_value()));
+
+  // The symbol registry is initialized lazily.
+  set_symbol_registry(undefined_value());
+
+  // Allocate object to hold object observation state.
+  set_observation_state(*factory->NewJSObjectFromMap(
+      factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize)));
+
+  // Microtask queue uses the empty fixed array as a sentinel for "empty".
+  // Number of queued microtasks stored in Isolate::pending_microtask_count().
+  set_microtask_queue(empty_fixed_array());
+
+  set_detailed_stack_trace_symbol(*factory->NewPrivateOwnSymbol());
+  set_elements_transition_symbol(*factory->NewPrivateOwnSymbol());
+  set_frozen_symbol(*factory->NewPrivateOwnSymbol());
+  set_megamorphic_symbol(*factory->NewPrivateOwnSymbol());
+  set_premonomorphic_symbol(*factory->NewPrivateOwnSymbol());
+  set_generic_symbol(*factory->NewPrivateOwnSymbol());
+  set_nonexistent_symbol(*factory->NewPrivateOwnSymbol());
+  set_normal_ic_symbol(*factory->NewPrivateOwnSymbol());
+  set_observed_symbol(*factory->NewPrivateOwnSymbol());
+  set_stack_trace_symbol(*factory->NewPrivateOwnSymbol());
+  set_uninitialized_symbol(*factory->NewPrivateOwnSymbol());
+  set_home_object_symbol(*factory->NewPrivateOwnSymbol());
+
+  Handle<SeededNumberDictionary> slow_element_dictionary =
+      SeededNumberDictionary::New(isolate(), 0, TENURED);
+  slow_element_dictionary->set_requires_slow_elements();
+  set_empty_slow_element_dictionary(*slow_element_dictionary);
+
+  set_materialized_objects(*factory->NewFixedArray(0, TENURED));
+
+  // Handling of script id generation is in Factory::NewScript.
+  set_last_script_id(Smi::FromInt(v8::UnboundScript::kNoScriptId));
+
+  set_allocation_sites_scratchpad(
+      *factory->NewFixedArray(kAllocationSiteScratchpadSize, TENURED));
+  InitializeAllocationSitesScratchpad();
+
+  // Initialize keyed lookup cache.
+  isolate_->keyed_lookup_cache()->Clear();
+
+  // Initialize context slot cache.
+  isolate_->context_slot_cache()->Clear();
+
+  // Initialize descriptor cache.
+  isolate_->descriptor_lookup_cache()->Clear();
+
+  // Initialize compilation cache.
+  isolate_->compilation_cache()->Clear();
+}
+
+
+bool Heap::RootCanBeWrittenAfterInitialization(Heap::RootListIndex root_index) {
+  RootListIndex writable_roots[] = {
+      kStoreBufferTopRootIndex,
+      kStackLimitRootIndex,
+      kNumberStringCacheRootIndex,
+      kInstanceofCacheFunctionRootIndex,
+      kInstanceofCacheMapRootIndex,
+      kInstanceofCacheAnswerRootIndex,
+      kCodeStubsRootIndex,
+      kNonMonomorphicCacheRootIndex,
+      kPolymorphicCodeCacheRootIndex,
+      kLastScriptIdRootIndex,
+      kEmptyScriptRootIndex,
+      kRealStackLimitRootIndex,
+      kArgumentsAdaptorDeoptPCOffsetRootIndex,
+      kConstructStubDeoptPCOffsetRootIndex,
+      kGetterStubDeoptPCOffsetRootIndex,
+      kSetterStubDeoptPCOffsetRootIndex,
+      kStringTableRootIndex,
+  };
+
+  for (unsigned int i = 0; i < arraysize(writable_roots); i++) {
+    if (root_index == writable_roots[i]) return true;
+  }
+  return false;
+}
+
+
+bool Heap::RootCanBeTreatedAsConstant(RootListIndex root_index) {
+  return !RootCanBeWrittenAfterInitialization(root_index) &&
+         !InNewSpace(roots_array_start()[root_index]);
+}
+
+
+Object* RegExpResultsCache::Lookup(Heap* heap, String* key_string,
+                                   Object* key_pattern, ResultsCacheType type) {
+  FixedArray* cache;
+  if (!key_string->IsInternalizedString()) return Smi::FromInt(0);
+  if (type == STRING_SPLIT_SUBSTRINGS) {
+    DCHECK(key_pattern->IsString());
+    if (!key_pattern->IsInternalizedString()) return Smi::FromInt(0);
+    cache = heap->string_split_cache();
+  } else {
+    DCHECK(type == REGEXP_MULTIPLE_INDICES);
+    DCHECK(key_pattern->IsFixedArray());
+    cache = heap->regexp_multiple_cache();
+  }
+
+  uint32_t hash = key_string->Hash();
+  uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
+                    ~(kArrayEntriesPerCacheEntry - 1));
+  if (cache->get(index + kStringOffset) == key_string &&
+      cache->get(index + kPatternOffset) == key_pattern) {
+    return cache->get(index + kArrayOffset);
+  }
+  index =
+      ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
+  if (cache->get(index + kStringOffset) == key_string &&
+      cache->get(index + kPatternOffset) == key_pattern) {
+    return cache->get(index + kArrayOffset);
+  }
+  return Smi::FromInt(0);
+}
+
+
+void RegExpResultsCache::Enter(Isolate* isolate, Handle<String> key_string,
+                               Handle<Object> key_pattern,
+                               Handle<FixedArray> value_array,
+                               ResultsCacheType type) {
+  Factory* factory = isolate->factory();
+  Handle<FixedArray> cache;
+  if (!key_string->IsInternalizedString()) return;
+  if (type == STRING_SPLIT_SUBSTRINGS) {
+    DCHECK(key_pattern->IsString());
+    if (!key_pattern->IsInternalizedString()) return;
+    cache = factory->string_split_cache();
+  } else {
+    DCHECK(type == REGEXP_MULTIPLE_INDICES);
+    DCHECK(key_pattern->IsFixedArray());
+    cache = factory->regexp_multiple_cache();
+  }
+
+  uint32_t hash = key_string->Hash();
+  uint32_t index = ((hash & (kRegExpResultsCacheSize - 1)) &
+                    ~(kArrayEntriesPerCacheEntry - 1));
+  if (cache->get(index + kStringOffset) == Smi::FromInt(0)) {
+    cache->set(index + kStringOffset, *key_string);
+    cache->set(index + kPatternOffset, *key_pattern);
+    cache->set(index + kArrayOffset, *value_array);
+  } else {
+    uint32_t index2 =
+        ((index + kArrayEntriesPerCacheEntry) & (kRegExpResultsCacheSize - 1));
+    if (cache->get(index2 + kStringOffset) == Smi::FromInt(0)) {
+      cache->set(index2 + kStringOffset, *key_string);
+      cache->set(index2 + kPatternOffset, *key_pattern);
+      cache->set(index2 + kArrayOffset, *value_array);
+    } else {
+      cache->set(index2 + kStringOffset, Smi::FromInt(0));
+      cache->set(index2 + kPatternOffset, Smi::FromInt(0));
+      cache->set(index2 + kArrayOffset, Smi::FromInt(0));
+      cache->set(index + kStringOffset, *key_string);
+      cache->set(index + kPatternOffset, *key_pattern);
+      cache->set(index + kArrayOffset, *value_array);
+    }
+  }
+  // If the array is a reasonably short list of substrings, convert it into a
+  // list of internalized strings.
+  if (type == STRING_SPLIT_SUBSTRINGS && value_array->length() < 100) {
+    for (int i = 0; i < value_array->length(); i++) {
+      Handle<String> str(String::cast(value_array->get(i)), isolate);
+      Handle<String> internalized_str = factory->InternalizeString(str);
+      value_array->set(i, *internalized_str);
+    }
+  }
+  // Convert backing store to a copy-on-write array.
+  value_array->set_map_no_write_barrier(*factory->fixed_cow_array_map());
+}
+
+
+void RegExpResultsCache::Clear(FixedArray* cache) {
+  for (int i = 0; i < kRegExpResultsCacheSize; i++) {
+    cache->set(i, Smi::FromInt(0));
+  }
+}
+
+
+int Heap::FullSizeNumberStringCacheLength() {
+  // Compute the size of the number string cache based on the max newspace size.
+  // The number string cache has a minimum size based on twice the initial cache
+  // size to ensure that it is bigger after being made 'full size'.
+  int number_string_cache_size = max_semi_space_size_ / 512;
+  number_string_cache_size = Max(kInitialNumberStringCacheSize * 2,
+                                 Min(0x4000, number_string_cache_size));
+  // There is a string and a number per entry so the length is twice the number
+  // of entries.
+  return number_string_cache_size * 2;
+}
+
+
+void Heap::FlushNumberStringCache() {
+  // Flush the number to string cache.
+  int len = number_string_cache()->length();
+  for (int i = 0; i < len; i++) {
+    number_string_cache()->set_undefined(i);
+  }
+}
+
+
+void Heap::FlushAllocationSitesScratchpad() {
+  for (int i = 0; i < allocation_sites_scratchpad_length_; i++) {
+    allocation_sites_scratchpad()->set_undefined(i);
+  }
+  allocation_sites_scratchpad_length_ = 0;
+}
+
+
+void Heap::InitializeAllocationSitesScratchpad() {
+  DCHECK(allocation_sites_scratchpad()->length() ==
+         kAllocationSiteScratchpadSize);
+  for (int i = 0; i < kAllocationSiteScratchpadSize; i++) {
+    allocation_sites_scratchpad()->set_undefined(i);
+  }
+}
+
+
+void Heap::AddAllocationSiteToScratchpad(AllocationSite* site,
+                                         ScratchpadSlotMode mode) {
+  if (allocation_sites_scratchpad_length_ < kAllocationSiteScratchpadSize) {
+    // We cannot use the normal write-barrier because slots need to be
+    // recorded with non-incremental marking as well. We have to explicitly
+    // record the slot to take evacuation candidates into account.
+    allocation_sites_scratchpad()->set(allocation_sites_scratchpad_length_,
+                                       site, SKIP_WRITE_BARRIER);
+    Object** slot = allocation_sites_scratchpad()->RawFieldOfElementAt(
+        allocation_sites_scratchpad_length_);
+
+    if (mode == RECORD_SCRATCHPAD_SLOT) {
+      // We need to allow slots buffer overflow here since the evacuation
+      // candidates are not part of the global list of old space pages and
+      // releasing an evacuation candidate due to a slots buffer overflow
+      // results in lost pages.
+      mark_compact_collector()->RecordSlot(slot, slot, *slot,
+                                           SlotsBuffer::IGNORE_OVERFLOW);
+    }
+    allocation_sites_scratchpad_length_++;
+  }
+}
+
+
+Map* Heap::MapForExternalArrayType(ExternalArrayType array_type) {
+  return Map::cast(roots_[RootIndexForExternalArrayType(array_type)]);
+}
+
+
+Heap::RootListIndex Heap::RootIndexForExternalArrayType(
+    ExternalArrayType array_type) {
+  switch (array_type) {
+#define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
+  case kExternal##Type##Array:                                  \
+    return kExternal##Type##ArrayMapRootIndex;
+
+    TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
+#undef ARRAY_TYPE_TO_ROOT_INDEX
+
+    default:
+      UNREACHABLE();
+      return kUndefinedValueRootIndex;
+  }
+}
+
+
+Map* Heap::MapForFixedTypedArray(ExternalArrayType array_type) {
+  return Map::cast(roots_[RootIndexForFixedTypedArray(array_type)]);
+}
+
+
+Heap::RootListIndex Heap::RootIndexForFixedTypedArray(
+    ExternalArrayType array_type) {
+  switch (array_type) {
+#define ARRAY_TYPE_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
+  case kExternal##Type##Array:                                  \
+    return kFixed##Type##ArrayMapRootIndex;
+
+    TYPED_ARRAYS(ARRAY_TYPE_TO_ROOT_INDEX)
+#undef ARRAY_TYPE_TO_ROOT_INDEX
+
+    default:
+      UNREACHABLE();
+      return kUndefinedValueRootIndex;
+  }
+}
+
+
+Heap::RootListIndex Heap::RootIndexForEmptyExternalArray(
+    ElementsKind elementsKind) {
+  switch (elementsKind) {
+#define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
+  case EXTERNAL_##TYPE##_ELEMENTS:                                \
+    return kEmptyExternal##Type##ArrayRootIndex;
+
+    TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
+#undef ELEMENT_KIND_TO_ROOT_INDEX
+
+    default:
+      UNREACHABLE();
+      return kUndefinedValueRootIndex;
+  }
+}
+
+
+Heap::RootListIndex Heap::RootIndexForEmptyFixedTypedArray(
+    ElementsKind elementsKind) {
+  switch (elementsKind) {
+#define ELEMENT_KIND_TO_ROOT_INDEX(Type, type, TYPE, ctype, size) \
+  case TYPE##_ELEMENTS:                                           \
+    return kEmptyFixed##Type##ArrayRootIndex;
+
+    TYPED_ARRAYS(ELEMENT_KIND_TO_ROOT_INDEX)
+#undef ELEMENT_KIND_TO_ROOT_INDEX
+    default:
+      UNREACHABLE();
+      return kUndefinedValueRootIndex;
+  }
+}
+
+
+ExternalArray* Heap::EmptyExternalArrayForMap(Map* map) {
+  return ExternalArray::cast(
+      roots_[RootIndexForEmptyExternalArray(map->elements_kind())]);
+}
+
+
+FixedTypedArrayBase* Heap::EmptyFixedTypedArrayForMap(Map* map) {
+  return FixedTypedArrayBase::cast(
+      roots_[RootIndexForEmptyFixedTypedArray(map->elements_kind())]);
+}
+
+
+AllocationResult Heap::AllocateForeign(Address address,
+                                       PretenureFlag pretenure) {
+  // Statically ensure that it is safe to allocate foreigns in paged spaces.
+  STATIC_ASSERT(Foreign::kSize <= Page::kMaxRegularHeapObjectSize);
+  AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
+  Foreign* result;
+  AllocationResult allocation = Allocate(foreign_map(), space);
+  if (!allocation.To(&result)) return allocation;
+  result->set_foreign_address(address);
+  return result;
+}
+
+
+AllocationResult Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
+  if (length < 0 || length > ByteArray::kMaxLength) {
+    v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
+  }
+  int size = ByteArray::SizeFor(length);
+  AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
+  HeapObject* result;
+  {
+    AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
+    if (!allocation.To(&result)) return allocation;
+  }
+
+  result->set_map_no_write_barrier(byte_array_map());
+  ByteArray::cast(result)->set_length(length);
+  return result;
+}
+
+
+void Heap::CreateFillerObjectAt(Address addr, int size) {
+  if (size == 0) return;
+  HeapObject* filler = HeapObject::FromAddress(addr);
+  if (size == kPointerSize) {
+    filler->set_map_no_write_barrier(one_pointer_filler_map());
+  } else if (size == 2 * kPointerSize) {
+    filler->set_map_no_write_barrier(two_pointer_filler_map());
+  } else {
+    filler->set_map_no_write_barrier(free_space_map());
+    FreeSpace::cast(filler)->set_size(size);
+  }
+}
+
+
+bool Heap::CanMoveObjectStart(HeapObject* object) {
+  Address address = object->address();
+  bool is_in_old_pointer_space = InOldPointerSpace(address);
+  bool is_in_old_data_space = InOldDataSpace(address);
+
+  if (lo_space()->Contains(object)) return false;
+
+  Page* page = Page::FromAddress(address);
+  // We can move the object start if:
+  // (1) the object is not in old pointer or old data space,
+  // (2) the page of the object was already swept,
+  // (3) the page was already concurrently swept. This case is an optimization
+  // for concurrent sweeping. The WasSwept predicate for concurrently swept
+  // pages is set after sweeping all pages.
+  return (!is_in_old_pointer_space && !is_in_old_data_space) ||
+         page->WasSwept() || page->SweepingCompleted();
+}
+
+
+void Heap::AdjustLiveBytes(Address address, int by, InvocationMode mode) {
+  if (incremental_marking()->IsMarking() &&
+      Marking::IsBlack(Marking::MarkBitFrom(address))) {
+    if (mode == FROM_GC) {
+      MemoryChunk::IncrementLiveBytesFromGC(address, by);
+    } else {
+      MemoryChunk::IncrementLiveBytesFromMutator(address, by);
+    }
+  }
+}
+
+
+FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
+                                         int elements_to_trim) {
+  const int element_size = object->IsFixedArray() ? kPointerSize : kDoubleSize;
+  const int bytes_to_trim = elements_to_trim * element_size;
+  Map* map = object->map();
+
+  // For now this trick is only applied to objects in new and paged space.
+  // In large object space the object's start must coincide with chunk
+  // and thus the trick is just not applicable.
+  DCHECK(!lo_space()->Contains(object));
+  DCHECK(object->map() != fixed_cow_array_map());
+
+  STATIC_ASSERT(FixedArrayBase::kMapOffset == 0);
+  STATIC_ASSERT(FixedArrayBase::kLengthOffset == kPointerSize);
+  STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kPointerSize);
+
+  const int len = object->length();
+  DCHECK(elements_to_trim <= len);
+
+  // Calculate location of new array start.
+  Address new_start = object->address() + bytes_to_trim;
+
+  // Technically in new space this write might be omitted (except for
+  // debug mode which iterates through the heap), but to play safer
+  // we still do it.
+  CreateFillerObjectAt(object->address(), bytes_to_trim);
+
+  // Initialize header of the trimmed array. Since left trimming is only
+  // performed on pages which are not concurrently swept creating a filler
+  // object does not require synchronization.
+  DCHECK(CanMoveObjectStart(object));
+  Object** former_start = HeapObject::RawField(object, 0);
+  int new_start_index = elements_to_trim * (element_size / kPointerSize);
+  former_start[new_start_index] = map;
+  former_start[new_start_index + 1] = Smi::FromInt(len - elements_to_trim);
+  FixedArrayBase* new_object =
+      FixedArrayBase::cast(HeapObject::FromAddress(new_start));
+
+  // Maintain consistency of live bytes during incremental marking
+  marking()->TransferMark(object->address(), new_start);
+  AdjustLiveBytes(new_start, -bytes_to_trim, Heap::FROM_MUTATOR);
+
+  // Notify the heap profiler of change in object layout.
+  OnMoveEvent(new_object, object, new_object->Size());
+  return new_object;
+}
+
+
+// Force instantiation of templatized method.
+template
+void Heap::RightTrimFixedArray<Heap::FROM_GC>(FixedArrayBase*, int);
+template
+void Heap::RightTrimFixedArray<Heap::FROM_MUTATOR>(FixedArrayBase*, int);
+
+
+template<Heap::InvocationMode mode>
+void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) {
+  const int element_size = object->IsFixedArray() ? kPointerSize : kDoubleSize;
+  const int bytes_to_trim = elements_to_trim * element_size;
+
+  // For now this trick is only applied to objects in new and paged space.
+  DCHECK(object->map() != fixed_cow_array_map());
+
+  const int len = object->length();
+  DCHECK(elements_to_trim < len);
+
+  // Calculate location of new array end.
+  Address new_end = object->address() + object->Size() - bytes_to_trim;
+
+  // Technically in new space this write might be omitted (except for
+  // debug mode which iterates through the heap), but to play safer
+  // we still do it.
+  // We do not create a filler for objects in large object space.
+  // TODO(hpayer): We should shrink the large object page if the size
+  // of the object changed significantly.
+  if (!lo_space()->Contains(object)) {
+    CreateFillerObjectAt(new_end, bytes_to_trim);
+  }
+
+  // Initialize header of the trimmed array. We are storing the new length
+  // using release store after creating a filler for the left-over space to
+  // avoid races with the sweeper thread.
+  object->synchronized_set_length(len - elements_to_trim);
+
+  // Maintain consistency of live bytes during incremental marking
+  AdjustLiveBytes(object->address(), -bytes_to_trim, mode);
+
+  // Notify the heap profiler of change in object layout. The array may not be
+  // moved during GC, and size has to be adjusted nevertheless.
+  HeapProfiler* profiler = isolate()->heap_profiler();
+  if (profiler->is_tracking_allocations()) {
+    profiler->UpdateObjectSizeEvent(object->address(), object->Size());
+  }
+}
+
+
+AllocationResult Heap::AllocateExternalArray(int length,
+                                             ExternalArrayType array_type,
+                                             void* external_pointer,
+                                             PretenureFlag pretenure) {
+  int size = ExternalArray::kAlignedSize;
+  AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
+  HeapObject* result;
+  {
+    AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
+    if (!allocation.To(&result)) return allocation;
+  }
+
+  result->set_map_no_write_barrier(MapForExternalArrayType(array_type));
+  ExternalArray::cast(result)->set_length(length);
+  ExternalArray::cast(result)->set_external_pointer(external_pointer);
+  return result;
+}
+
+static void ForFixedTypedArray(ExternalArrayType array_type, int* element_size,
+                               ElementsKind* element_kind) {
+  switch (array_type) {
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+  case kExternal##Type##Array:                          \
+    *element_size = size;                               \
+    *element_kind = TYPE##_ELEMENTS;                    \
+    return;
+
+    TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+
+    default:
+      *element_size = 0;               // Bogus
+      *element_kind = UINT8_ELEMENTS;  // Bogus
+      UNREACHABLE();
+  }
+}
+
+
+AllocationResult Heap::AllocateFixedTypedArray(int length,
+                                               ExternalArrayType array_type,
+                                               PretenureFlag pretenure) {
+  int element_size;
+  ElementsKind elements_kind;
+  ForFixedTypedArray(array_type, &element_size, &elements_kind);
+  int size = OBJECT_POINTER_ALIGN(length * element_size +
+                                  FixedTypedArrayBase::kDataOffset);
+#ifndef V8_HOST_ARCH_64_BIT
+  if (array_type == kExternalFloat64Array) {
+    size += kPointerSize;
+  }
+#endif
+  AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
+
+  HeapObject* object;
+  AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
+  if (!allocation.To(&object)) return allocation;
+
+  if (array_type == kExternalFloat64Array) {
+    object = EnsureDoubleAligned(this, object, size);
+  }
+
+  object->set_map(MapForFixedTypedArray(array_type));
+  FixedTypedArrayBase* elements = FixedTypedArrayBase::cast(object);
+  elements->set_length(length);
+  memset(elements->DataPtr(), 0, elements->DataSize());
+  return elements;
+}
+
+
+AllocationResult Heap::AllocateCode(int object_size, bool immovable) {
+  DCHECK(IsAligned(static_cast<intptr_t>(object_size), kCodeAlignment));
+  AllocationResult allocation =
+      AllocateRaw(object_size, CODE_SPACE, CODE_SPACE);
+
+  HeapObject* result;
+  if (!allocation.To(&result)) return allocation;
+
+  if (immovable) {
+    Address address = result->address();
+    // Code objects which should stay at a fixed address are allocated either
+    // in the first page of code space (objects on the first page of each space
+    // are never moved) or in large object space.
+    if (!code_space_->FirstPage()->Contains(address) &&
+        MemoryChunk::FromAddress(address)->owner()->identity() != LO_SPACE) {
+      // Discard the first code allocation, which was on a page where it could
+      // be moved.
+      CreateFillerObjectAt(result->address(), object_size);
+      allocation = lo_space_->AllocateRaw(object_size, EXECUTABLE);
+      if (!allocation.To(&result)) return allocation;
+      OnAllocationEvent(result, object_size);
+    }
+  }
+
+  result->set_map_no_write_barrier(code_map());
+  Code* code = Code::cast(result);
+  DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
+         isolate_->code_range()->contains(code->address()));
+  code->set_gc_metadata(Smi::FromInt(0));
+  code->set_ic_age(global_ic_age_);
+  return code;
+}
+
+
+AllocationResult Heap::CopyCode(Code* code) {
+  AllocationResult allocation;
+  HeapObject* new_constant_pool;
+  if (FLAG_enable_ool_constant_pool &&
+      code->constant_pool() != empty_constant_pool_array()) {
+    // Copy the constant pool, since edits to the copied code may modify
+    // the constant pool.
+    allocation = CopyConstantPoolArray(code->constant_pool());
+    if (!allocation.To(&new_constant_pool)) return allocation;
+  } else {
+    new_constant_pool = empty_constant_pool_array();
+  }
+
+  HeapObject* result;
+  // Allocate an object the same size as the code object.
+  int obj_size = code->Size();
+  allocation = AllocateRaw(obj_size, CODE_SPACE, CODE_SPACE);
+  if (!allocation.To(&result)) return allocation;
+
+  // Copy code object.
+  Address old_addr = code->address();
+  Address new_addr = result->address();
+  CopyBlock(new_addr, old_addr, obj_size);
+  Code* new_code = Code::cast(result);
+
+  // Update the constant pool.
+  new_code->set_constant_pool(new_constant_pool);
+
+  // Relocate the copy.
+  DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
+         isolate_->code_range()->contains(code->address()));
+  new_code->Relocate(new_addr - old_addr);
+  return new_code;
+}
+
+
+AllocationResult Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
+  // Allocate ByteArray and ConstantPoolArray before the Code object, so that we
+  // do not risk leaving uninitialized Code object (and breaking the heap).
+  ByteArray* reloc_info_array;
+  {
+    AllocationResult allocation =
+        AllocateByteArray(reloc_info.length(), TENURED);
+    if (!allocation.To(&reloc_info_array)) return allocation;
+  }
+  HeapObject* new_constant_pool;
+  if (FLAG_enable_ool_constant_pool &&
+      code->constant_pool() != empty_constant_pool_array()) {
+    // Copy the constant pool, since edits to the copied code may modify
+    // the constant pool.
+    AllocationResult allocation = CopyConstantPoolArray(code->constant_pool());
+    if (!allocation.To(&new_constant_pool)) return allocation;
+  } else {
+    new_constant_pool = empty_constant_pool_array();
+  }
+
+  int new_body_size = RoundUp(code->instruction_size(), kObjectAlignment);
+
+  int new_obj_size = Code::SizeFor(new_body_size);
+
+  Address old_addr = code->address();
+
+  size_t relocation_offset =
+      static_cast<size_t>(code->instruction_end() - old_addr);
+
+  HeapObject* result;
+  AllocationResult allocation =
+      AllocateRaw(new_obj_size, CODE_SPACE, CODE_SPACE);
+  if (!allocation.To(&result)) return allocation;
+
+  // Copy code object.
+  Address new_addr = result->address();
+
+  // Copy header and instructions.
+  CopyBytes(new_addr, old_addr, relocation_offset);
+
+  Code* new_code = Code::cast(result);
+  new_code->set_relocation_info(reloc_info_array);
+
+  // Update constant pool.
+  new_code->set_constant_pool(new_constant_pool);
+
+  // Copy patched rinfo.
+  CopyBytes(new_code->relocation_start(), reloc_info.start(),
+            static_cast<size_t>(reloc_info.length()));
+
+  // Relocate the copy.
+  DCHECK(isolate_->code_range() == NULL || !isolate_->code_range()->valid() ||
+         isolate_->code_range()->contains(code->address()));
+  new_code->Relocate(new_addr - old_addr);
+
+#ifdef VERIFY_HEAP
+  if (FLAG_verify_heap) code->ObjectVerify();
+#endif
+  return new_code;
+}
+
+
+void Heap::InitializeAllocationMemento(AllocationMemento* memento,
+                                       AllocationSite* allocation_site) {
+  memento->set_map_no_write_barrier(allocation_memento_map());
+  DCHECK(allocation_site->map() == allocation_site_map());
+  memento->set_allocation_site(allocation_site, SKIP_WRITE_BARRIER);
+  if (FLAG_allocation_site_pretenuring) {
+    allocation_site->IncrementMementoCreateCount();
+  }
+}
+
+
+AllocationResult Heap::Allocate(Map* map, AllocationSpace space,
+                                AllocationSite* allocation_site) {
+  DCHECK(gc_state_ == NOT_IN_GC);
+  DCHECK(map->instance_type() != MAP_TYPE);
+  // If allocation failures are disallowed, we may allocate in a different
+  // space when new space is full and the object is not a large object.
+  AllocationSpace retry_space =
+      (space != NEW_SPACE) ? space : TargetSpaceId(map->instance_type());
+  int size = map->instance_size();
+  if (allocation_site != NULL) {
+    size += AllocationMemento::kSize;
+  }
+  HeapObject* result;
+  AllocationResult allocation = AllocateRaw(size, space, retry_space);
+  if (!allocation.To(&result)) return allocation;
+  // No need for write barrier since object is white and map is in old space.
+  result->set_map_no_write_barrier(map);
+  if (allocation_site != NULL) {
+    AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
+        reinterpret_cast<Address>(result) + map->instance_size());
+    InitializeAllocationMemento(alloc_memento, allocation_site);
+  }
+  return result;
+}
+
+
+void Heap::InitializeJSObjectFromMap(JSObject* obj, FixedArray* properties,
+                                     Map* map) {
+  obj->set_properties(properties);
+  obj->initialize_elements();
+  // TODO(1240798): Initialize the object's body using valid initial values
+  // according to the object's initial map.  For example, if the map's
+  // instance type is JS_ARRAY_TYPE, the length field should be initialized
+  // to a number (e.g. Smi::FromInt(0)) and the elements initialized to a
+  // fixed array (e.g. Heap::empty_fixed_array()).  Currently, the object
+  // verification code has to cope with (temporarily) invalid objects.  See
+  // for example, JSArray::JSArrayVerify).
+  Object* filler;
+  // We cannot always fill with one_pointer_filler_map because objects
+  // created from API functions expect their internal fields to be initialized
+  // with undefined_value.
+  // Pre-allocated fields need to be initialized with undefined_value as well
+  // so that object accesses before the constructor completes (e.g. in the
+  // debugger) will not cause a crash.
+  if (map->constructor()->IsJSFunction() &&
+      JSFunction::cast(map->constructor())
+          ->IsInobjectSlackTrackingInProgress()) {
+    // We might want to shrink the object later.
+    DCHECK(obj->GetInternalFieldCount() == 0);
+    filler = Heap::one_pointer_filler_map();
+  } else {
+    filler = Heap::undefined_value();
+  }
+  obj->InitializeBody(map, Heap::undefined_value(), filler);
+}
+
+
+AllocationResult Heap::AllocateJSObjectFromMap(
+    Map* map, PretenureFlag pretenure, bool allocate_properties,
+    AllocationSite* allocation_site) {
+  // JSFunctions should be allocated using AllocateFunction to be
+  // properly initialized.
+  DCHECK(map->instance_type() != JS_FUNCTION_TYPE);
+
+  // Both types of global objects should be allocated using
+  // AllocateGlobalObject to be properly initialized.
+  DCHECK(map->instance_type() != JS_GLOBAL_OBJECT_TYPE);
+  DCHECK(map->instance_type() != JS_BUILTINS_OBJECT_TYPE);
+
+  // Allocate the backing storage for the properties.
+  FixedArray* properties;
+  if (allocate_properties) {
+    int prop_size = map->InitialPropertiesLength();
+    DCHECK(prop_size >= 0);
+    {
+      AllocationResult allocation = AllocateFixedArray(prop_size, pretenure);
+      if (!allocation.To(&properties)) return allocation;
+    }
+  } else {
+    properties = empty_fixed_array();
+  }
+
+  // Allocate the JSObject.
+  int size = map->instance_size();
+  AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure);
+  JSObject* js_obj;
+  AllocationResult allocation = Allocate(map, space, allocation_site);
+  if (!allocation.To(&js_obj)) return allocation;
+
+  // Initialize the JSObject.
+  InitializeJSObjectFromMap(js_obj, properties, map);
+  DCHECK(js_obj->HasFastElements() || js_obj->HasExternalArrayElements() ||
+         js_obj->HasFixedTypedArrayElements());
+  return js_obj;
+}
+
+
+AllocationResult Heap::AllocateJSObject(JSFunction* constructor,
+                                        PretenureFlag pretenure,
+                                        AllocationSite* allocation_site) {
+  DCHECK(constructor->has_initial_map());
+
+  // Allocate the object based on the constructors initial map.
+  AllocationResult allocation = AllocateJSObjectFromMap(
+      constructor->initial_map(), pretenure, true, allocation_site);
+#ifdef DEBUG
+  // Make sure result is NOT a global object if valid.
+  HeapObject* obj;
+  DCHECK(!allocation.To(&obj) || !obj->IsGlobalObject());
+#endif
+  return allocation;
+}
+
+
+AllocationResult Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
+  // Never used to copy functions.  If functions need to be copied we
+  // have to be careful to clear the literals array.
+  SLOW_DCHECK(!source->IsJSFunction());
+
+  // Make the clone.
+  Map* map = source->map();
+  int object_size = map->instance_size();
+  HeapObject* clone;
+
+  DCHECK(site == NULL || AllocationSite::CanTrack(map->instance_type()));
+
+  WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
+
+  // If we're forced to always allocate, we use the general allocation
+  // functions which may leave us with an object in old space.
+  if (always_allocate()) {
+    {
+      AllocationResult allocation =
+          AllocateRaw(object_size, NEW_SPACE, OLD_POINTER_SPACE);
+      if (!allocation.To(&clone)) return allocation;
+    }
+    Address clone_address = clone->address();
+    CopyBlock(clone_address, source->address(), object_size);
+    // Update write barrier for all fields that lie beyond the header.
+    RecordWrites(clone_address, JSObject::kHeaderSize,
+                 (object_size - JSObject::kHeaderSize) / kPointerSize);
+  } else {
+    wb_mode = SKIP_WRITE_BARRIER;
+
+    {
+      int adjusted_object_size =
+          site != NULL ? object_size + AllocationMemento::kSize : object_size;
+      AllocationResult allocation =
+          AllocateRaw(adjusted_object_size, NEW_SPACE, NEW_SPACE);
+      if (!allocation.To(&clone)) return allocation;
+    }
+    SLOW_DCHECK(InNewSpace(clone));
+    // Since we know the clone is allocated in new space, we can copy
+    // the contents without worrying about updating the write barrier.
+    CopyBlock(clone->address(), source->address(), object_size);
+
+    if (site != NULL) {
+      AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
+          reinterpret_cast<Address>(clone) + object_size);
+      InitializeAllocationMemento(alloc_memento, site);
+    }
+  }
+
+  SLOW_DCHECK(JSObject::cast(clone)->GetElementsKind() ==
+              source->GetElementsKind());
+  FixedArrayBase* elements = FixedArrayBase::cast(source->elements());
+  FixedArray* properties = FixedArray::cast(source->properties());
+  // Update elements if necessary.
+  if (elements->length() > 0) {
+    FixedArrayBase* elem;
+    {
+      AllocationResult allocation;
+      if (elements->map() == fixed_cow_array_map()) {
+        allocation = FixedArray::cast(elements);
+      } else if (source->HasFastDoubleElements()) {
+        allocation = CopyFixedDoubleArray(FixedDoubleArray::cast(elements));
+      } else {
+        allocation = CopyFixedArray(FixedArray::cast(elements));
+      }
+      if (!allocation.To(&elem)) return allocation;
+    }
+    JSObject::cast(clone)->set_elements(elem, wb_mode);
+  }
+  // Update properties if necessary.
+  if (properties->length() > 0) {
+    FixedArray* prop;
+    {
+      AllocationResult allocation = CopyFixedArray(properties);
+      if (!allocation.To(&prop)) return allocation;
+    }
+    JSObject::cast(clone)->set_properties(prop, wb_mode);
+  }
+  // Return the new clone.
+  return clone;
+}
+
+
+static inline void WriteOneByteData(Vector<const char> vector, uint8_t* chars,
+                                    int len) {
+  // Only works for one byte strings.
+  DCHECK(vector.length() == len);
+  MemCopy(chars, vector.start(), len);
+}
+
+static inline void WriteTwoByteData(Vector<const char> vector, uint16_t* chars,
+                                    int len) {
+  const uint8_t* stream = reinterpret_cast<const uint8_t*>(vector.start());
+  unsigned stream_length = vector.length();
+  while (stream_length != 0) {
+    unsigned consumed = 0;
+    uint32_t c = unibrow::Utf8::ValueOf(stream, stream_length, &consumed);
+    DCHECK(c != unibrow::Utf8::kBadChar);
+    DCHECK(consumed <= stream_length);
+    stream_length -= consumed;
+    stream += consumed;
+    if (c > unibrow::Utf16::kMaxNonSurrogateCharCode) {
+      len -= 2;
+      if (len < 0) break;
+      *chars++ = unibrow::Utf16::LeadSurrogate(c);
+      *chars++ = unibrow::Utf16::TrailSurrogate(c);
+    } else {
+      len -= 1;
+      if (len < 0) break;
+      *chars++ = c;
+    }
+  }
+  DCHECK(stream_length == 0);
+  DCHECK(len == 0);
+}
+
+
+static inline void WriteOneByteData(String* s, uint8_t* chars, int len) {
+  DCHECK(s->length() == len);
+  String::WriteToFlat(s, chars, 0, len);
+}
+
+
+static inline void WriteTwoByteData(String* s, uint16_t* chars, int len) {
+  DCHECK(s->length() == len);
+  String::WriteToFlat(s, chars, 0, len);
+}
+
+
+template <bool is_one_byte, typename T>
+AllocationResult Heap::AllocateInternalizedStringImpl(T t, int chars,
+                                                      uint32_t hash_field) {
+  DCHECK(chars >= 0);
+  // Compute map and object size.
+  int size;
+  Map* map;
+
+  DCHECK_LE(0, chars);
+  DCHECK_GE(String::kMaxLength, chars);
+  if (is_one_byte) {
+    map = one_byte_internalized_string_map();
+    size = SeqOneByteString::SizeFor(chars);
+  } else {
+    map = internalized_string_map();
+    size = SeqTwoByteString::SizeFor(chars);
+  }
+  AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, TENURED);
+
+  // Allocate string.
+  HeapObject* result;
+  {
+    AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
+    if (!allocation.To(&result)) return allocation;
+  }
+
+  result->set_map_no_write_barrier(map);
+  // Set length and hash fields of the allocated string.
+  String* answer = String::cast(result);
+  answer->set_length(chars);
+  answer->set_hash_field(hash_field);
+
+  DCHECK_EQ(size, answer->Size());
+
+  if (is_one_byte) {
+    WriteOneByteData(t, SeqOneByteString::cast(answer)->GetChars(), chars);
+  } else {
+    WriteTwoByteData(t, SeqTwoByteString::cast(answer)->GetChars(), chars);
+  }
+  return answer;
+}
+
+
+// Need explicit instantiations.
+template AllocationResult Heap::AllocateInternalizedStringImpl<true>(String*,
+                                                                     int,
+                                                                     uint32_t);
+template AllocationResult Heap::AllocateInternalizedStringImpl<false>(String*,
+                                                                      int,
+                                                                      uint32_t);
+template AllocationResult Heap::AllocateInternalizedStringImpl<false>(
+    Vector<const char>, int, uint32_t);
+
+
+AllocationResult Heap::AllocateRawOneByteString(int length,
+                                                PretenureFlag pretenure) {
+  DCHECK_LE(0, length);
+  DCHECK_GE(String::kMaxLength, length);
+  int size = SeqOneByteString::SizeFor(length);
+  DCHECK(size <= SeqOneByteString::kMaxSize);
+  AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
+
+  HeapObject* result;
+  {
+    AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
+    if (!allocation.To(&result)) return allocation;
+  }
+
+  // Partially initialize the object.
+  result->set_map_no_write_barrier(one_byte_string_map());
+  String::cast(result)->set_length(length);
+  String::cast(result)->set_hash_field(String::kEmptyHashField);
+  DCHECK_EQ(size, HeapObject::cast(result)->Size());
+
+  return result;
+}
+
+
+AllocationResult Heap::AllocateRawTwoByteString(int length,
+                                                PretenureFlag pretenure) {
+  DCHECK_LE(0, length);
+  DCHECK_GE(String::kMaxLength, length);
+  int size = SeqTwoByteString::SizeFor(length);
+  DCHECK(size <= SeqTwoByteString::kMaxSize);
+  AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
+
+  HeapObject* result;
+  {
+    AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
+    if (!allocation.To(&result)) return allocation;
+  }
+
+  // Partially initialize the object.
+  result->set_map_no_write_barrier(string_map());
+  String::cast(result)->set_length(length);
+  String::cast(result)->set_hash_field(String::kEmptyHashField);
+  DCHECK_EQ(size, HeapObject::cast(result)->Size());
+  return result;
+}
+
+
+AllocationResult Heap::AllocateEmptyFixedArray() {
+  int size = FixedArray::SizeFor(0);
+  HeapObject* result;
+  {
+    AllocationResult allocation =
+        AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
+    if (!allocation.To(&result)) return allocation;
+  }
+  // Initialize the object.
+  result->set_map_no_write_barrier(fixed_array_map());
+  FixedArray::cast(result)->set_length(0);
+  return result;
+}
+
+
+AllocationResult Heap::AllocateEmptyExternalArray(
+    ExternalArrayType array_type) {
+  return AllocateExternalArray(0, array_type, NULL, TENURED);
+}
+
+
+AllocationResult Heap::CopyAndTenureFixedCOWArray(FixedArray* src) {
+  if (!InNewSpace(src)) {
+    return src;
+  }
+
+  int len = src->length();
+  HeapObject* obj;
+  {
+    AllocationResult allocation = AllocateRawFixedArray(len, TENURED);
+    if (!allocation.To(&obj)) return allocation;
+  }
+  obj->set_map_no_write_barrier(fixed_array_map());
+  FixedArray* result = FixedArray::cast(obj);
+  result->set_length(len);
+
+  // Copy the content
+  DisallowHeapAllocation no_gc;
+  WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
+  for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
+
+  // TODO(mvstanton): The map is set twice because of protection against calling
+  // set() on a COW FixedArray. Issue v8:3221 created to track this, and
+  // we might then be able to remove this whole method.
+  HeapObject::cast(obj)->set_map_no_write_barrier(fixed_cow_array_map());
+  return result;
+}
+
+
+AllocationResult Heap::AllocateEmptyFixedTypedArray(
+    ExternalArrayType array_type) {
+  return AllocateFixedTypedArray(0, array_type, TENURED);
+}
+
+
+AllocationResult Heap::CopyFixedArrayWithMap(FixedArray* src, Map* map) {
+  int len = src->length();
+  HeapObject* obj;
+  {
+    AllocationResult allocation = AllocateRawFixedArray(len, NOT_TENURED);
+    if (!allocation.To(&obj)) return allocation;
+  }
+  if (InNewSpace(obj)) {
+    obj->set_map_no_write_barrier(map);
+    CopyBlock(obj->address() + kPointerSize, src->address() + kPointerSize,
+              FixedArray::SizeFor(len) - kPointerSize);
+    return obj;
+  }
+  obj->set_map_no_write_barrier(map);
+  FixedArray* result = FixedArray::cast(obj);
+  result->set_length(len);
+
+  // Copy the content
+  DisallowHeapAllocation no_gc;
+  WriteBarrierMode mode = result->GetWriteBarrierMode(no_gc);
+  for (int i = 0; i < len; i++) result->set(i, src->get(i), mode);
+  return result;
+}
+
+
+AllocationResult Heap::CopyFixedDoubleArrayWithMap(FixedDoubleArray* src,
+                                                   Map* map) {
+  int len = src->length();
+  HeapObject* obj;
+  {
+    AllocationResult allocation = AllocateRawFixedDoubleArray(len, NOT_TENURED);
+    if (!allocation.To(&obj)) return allocation;
+  }
+  obj->set_map_no_write_barrier(map);
+  CopyBlock(obj->address() + FixedDoubleArray::kLengthOffset,
+            src->address() + FixedDoubleArray::kLengthOffset,
+            FixedDoubleArray::SizeFor(len) - FixedDoubleArray::kLengthOffset);
+  return obj;
+}
+
+
+AllocationResult Heap::CopyConstantPoolArrayWithMap(ConstantPoolArray* src,
+                                                    Map* map) {
+  HeapObject* obj;
+  if (src->is_extended_layout()) {
+    ConstantPoolArray::NumberOfEntries small(src,
+                                             ConstantPoolArray::SMALL_SECTION);
+    ConstantPoolArray::NumberOfEntries extended(
+        src, ConstantPoolArray::EXTENDED_SECTION);
+    AllocationResult allocation =
+        AllocateExtendedConstantPoolArray(small, extended);
+    if (!allocation.To(&obj)) return allocation;
+  } else {
+    ConstantPoolArray::NumberOfEntries small(src,
+                                             ConstantPoolArray::SMALL_SECTION);
+    AllocationResult allocation = AllocateConstantPoolArray(small);
+    if (!allocation.To(&obj)) return allocation;
+  }
+  obj->set_map_no_write_barrier(map);
+  CopyBlock(obj->address() + ConstantPoolArray::kFirstEntryOffset,
+            src->address() + ConstantPoolArray::kFirstEntryOffset,
+            src->size() - ConstantPoolArray::kFirstEntryOffset);
+  return obj;
+}
+
+
+AllocationResult Heap::AllocateRawFixedArray(int length,
+                                             PretenureFlag pretenure) {
+  if (length < 0 || length > FixedArray::kMaxLength) {
+    v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
+  }
+  int size = FixedArray::SizeFor(length);
+  AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, pretenure);
+
+  return AllocateRaw(size, space, OLD_POINTER_SPACE);
+}
+
+
+AllocationResult Heap::AllocateFixedArrayWithFiller(int length,
+                                                    PretenureFlag pretenure,
+                                                    Object* filler) {
+  DCHECK(length >= 0);
+  DCHECK(empty_fixed_array()->IsFixedArray());
+  if (length == 0) return empty_fixed_array();
+
+  DCHECK(!InNewSpace(filler));
+  HeapObject* result;
+  {
+    AllocationResult allocation = AllocateRawFixedArray(length, pretenure);
+    if (!allocation.To(&result)) return allocation;
+  }
+
+  result->set_map_no_write_barrier(fixed_array_map());
+  FixedArray* array = FixedArray::cast(result);
+  array->set_length(length);
+  MemsetPointer(array->data_start(), filler, length);
+  return array;
+}
+
+
+AllocationResult Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
+  return AllocateFixedArrayWithFiller(length, pretenure, undefined_value());
+}
+
+
+AllocationResult Heap::AllocateUninitializedFixedArray(int length) {
+  if (length == 0) return empty_fixed_array();
+
+  HeapObject* obj;
+  {
+    AllocationResult allocation = AllocateRawFixedArray(length, NOT_TENURED);
+    if (!allocation.To(&obj)) return allocation;
+  }
+
+  obj->set_map_no_write_barrier(fixed_array_map());
+  FixedArray::cast(obj)->set_length(length);
+  return obj;
+}
+
+
+AllocationResult Heap::AllocateUninitializedFixedDoubleArray(
+    int length, PretenureFlag pretenure) {
+  if (length == 0) return empty_fixed_array();
+
+  HeapObject* elements;
+  AllocationResult allocation = AllocateRawFixedDoubleArray(length, pretenure);
+  if (!allocation.To(&elements)) return allocation;
+
+  elements->set_map_no_write_barrier(fixed_double_array_map());
+  FixedDoubleArray::cast(elements)->set_length(length);
+  return elements;
+}
+
+
+AllocationResult Heap::AllocateRawFixedDoubleArray(int length,
+                                                   PretenureFlag pretenure) {
+  if (length < 0 || length > FixedDoubleArray::kMaxLength) {
+    v8::internal::Heap::FatalProcessOutOfMemory("invalid array length", true);
+  }
+  int size = FixedDoubleArray::SizeFor(length);
+#ifndef V8_HOST_ARCH_64_BIT
+  size += kPointerSize;
+#endif
+  AllocationSpace space = SelectSpace(size, OLD_DATA_SPACE, pretenure);
+
+  HeapObject* object;
+  {
+    AllocationResult allocation = AllocateRaw(size, space, OLD_DATA_SPACE);
+    if (!allocation.To(&object)) return allocation;
+  }
+
+  return EnsureDoubleAligned(this, object, size);
+}
+
+
+AllocationResult Heap::AllocateConstantPoolArray(
+    const ConstantPoolArray::NumberOfEntries& small) {
+  CHECK(small.are_in_range(0, ConstantPoolArray::kMaxSmallEntriesPerType));
+  int size = ConstantPoolArray::SizeFor(small);
+#ifndef V8_HOST_ARCH_64_BIT
+  size += kPointerSize;
+#endif
+  AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
+
+  HeapObject* object;
+  {
+    AllocationResult allocation = AllocateRaw(size, space, OLD_POINTER_SPACE);
+    if (!allocation.To(&object)) return allocation;
+  }
+  object = EnsureDoubleAligned(this, object, size);
+  object->set_map_no_write_barrier(constant_pool_array_map());
+
+  ConstantPoolArray* constant_pool = ConstantPoolArray::cast(object);
+  constant_pool->Init(small);
+  constant_pool->ClearPtrEntries(isolate());
+  return constant_pool;
+}
+
+
+AllocationResult Heap::AllocateExtendedConstantPoolArray(
+    const ConstantPoolArray::NumberOfEntries& small,
+    const ConstantPoolArray::NumberOfEntries& extended) {
+  CHECK(small.are_in_range(0, ConstantPoolArray::kMaxSmallEntriesPerType));
+  CHECK(extended.are_in_range(0, kMaxInt));
+  int size = ConstantPoolArray::SizeForExtended(small, extended);
+#ifndef V8_HOST_ARCH_64_BIT
+  size += kPointerSize;
+#endif
+  AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
+
+  HeapObject* object;
+  {
+    AllocationResult allocation = AllocateRaw(size, space, OLD_POINTER_SPACE);
+    if (!allocation.To(&object)) return allocation;
+  }
+  object = EnsureDoubleAligned(this, object, size);
+  object->set_map_no_write_barrier(constant_pool_array_map());
+
+  ConstantPoolArray* constant_pool = ConstantPoolArray::cast(object);
+  constant_pool->InitExtended(small, extended);
+  constant_pool->ClearPtrEntries(isolate());
+  return constant_pool;
+}
+
+
+AllocationResult Heap::AllocateEmptyConstantPoolArray() {
+  ConstantPoolArray::NumberOfEntries small(0, 0, 0, 0);
+  int size = ConstantPoolArray::SizeFor(small);
+  HeapObject* result;
+  {
+    AllocationResult allocation =
+        AllocateRaw(size, OLD_DATA_SPACE, OLD_DATA_SPACE);
+    if (!allocation.To(&result)) return allocation;
+  }
+  result->set_map_no_write_barrier(constant_pool_array_map());
+  ConstantPoolArray::cast(result)->Init(small);
+  return result;
+}
+
+
+AllocationResult Heap::AllocateSymbol() {
+  // Statically ensure that it is safe to allocate symbols in paged spaces.
+  STATIC_ASSERT(Symbol::kSize <= Page::kMaxRegularHeapObjectSize);
+
+  HeapObject* result;
+  AllocationResult allocation =
+      AllocateRaw(Symbol::kSize, OLD_POINTER_SPACE, OLD_POINTER_SPACE);
+  if (!allocation.To(&result)) return allocation;
+
+  result->set_map_no_write_barrier(symbol_map());
+
+  // Generate a random hash value.
+  int hash;
+  int attempts = 0;
+  do {
+    hash = isolate()->random_number_generator()->NextInt() & Name::kHashBitMask;
+    attempts++;
+  } while (hash == 0 && attempts < 30);
+  if (hash == 0) hash = 1;  // never return 0
+
+  Symbol::cast(result)
+      ->set_hash_field(Name::kIsNotArrayIndexMask | (hash << Name::kHashShift));
+  Symbol::cast(result)->set_name(undefined_value());
+  Symbol::cast(result)->set_flags(Smi::FromInt(0));
+
+  DCHECK(!Symbol::cast(result)->is_private());
+  return result;
+}
+
+
+AllocationResult Heap::AllocateStruct(InstanceType type) {
+  Map* map;
+  switch (type) {
+#define MAKE_CASE(NAME, Name, name) \
+  case NAME##_TYPE:                 \
+    map = name##_map();             \
+    break;
+    STRUCT_LIST(MAKE_CASE)
+#undef MAKE_CASE
+    default:
+      UNREACHABLE();
+      return exception();
+  }
+  int size = map->instance_size();
+  AllocationSpace space = SelectSpace(size, OLD_POINTER_SPACE, TENURED);
+  Struct* result;
+  {
+    AllocationResult allocation = Allocate(map, space);
+    if (!allocation.To(&result)) return allocation;
+  }
+  result->InitializeBody(size);
+  return result;
+}
+
+
+bool Heap::IsHeapIterable() {
+  // TODO(hpayer): This function is not correct. Allocation folding in old
+  // space breaks the iterability.
+  return new_space_top_after_last_gc_ == new_space()->top();
+}
+
+
+void Heap::MakeHeapIterable() {
+  DCHECK(AllowHeapAllocation::IsAllowed());
+  if (!IsHeapIterable()) {
+    CollectAllGarbage(kMakeHeapIterableMask, "Heap::MakeHeapIterable");
+  }
+  if (mark_compact_collector()->sweeping_in_progress()) {
+    mark_compact_collector()->EnsureSweepingCompleted();
+  }
+  DCHECK(IsHeapIterable());
+}
+
+
+void Heap::IdleMarkCompact(const char* message) {
+  bool uncommit = false;
+  if (gc_count_at_last_idle_gc_ == gc_count_) {
+    // No GC since the last full GC, the mutator is probably not active.
+    isolate_->compilation_cache()->Clear();
+    uncommit = true;
+  }
+  CollectAllGarbage(kReduceMemoryFootprintMask, message);
+  gc_idle_time_handler_.NotifyIdleMarkCompact();
+  gc_count_at_last_idle_gc_ = gc_count_;
+  if (uncommit) {
+    new_space_.Shrink();
+    UncommitFromSpace();
+  }
+}
+
+
+void Heap::AdvanceIdleIncrementalMarking(intptr_t step_size) {
+  incremental_marking()->Step(step_size,
+                              IncrementalMarking::NO_GC_VIA_STACK_GUARD, true);
+
+  if (incremental_marking()->IsComplete()) {
+    IdleMarkCompact("idle notification: finalize incremental");
+  }
+}
+
+
+bool Heap::WorthActivatingIncrementalMarking() {
+  return incremental_marking()->IsStopped() &&
+         incremental_marking()->WorthActivating() && NextGCIsLikelyToBeFull();
+}
+
+
+bool Heap::IdleNotification(int idle_time_in_ms) {
+  // If incremental marking is off, we do not perform idle notification.
+  if (!FLAG_incremental_marking) return true;
+  base::ElapsedTimer timer;
+  timer.Start();
+  isolate()->counters()->gc_idle_time_allotted_in_ms()->AddSample(
+      idle_time_in_ms);
+  HistogramTimerScope idle_notification_scope(
+      isolate_->counters()->gc_idle_notification());
+
+  GCIdleTimeHandler::HeapState heap_state;
+  heap_state.contexts_disposed = contexts_disposed_;
+  heap_state.size_of_objects = static_cast<size_t>(SizeOfObjects());
+  heap_state.incremental_marking_stopped = incremental_marking()->IsStopped();
+  // TODO(ulan): Start incremental marking only for large heaps.
+  heap_state.can_start_incremental_marking =
+      incremental_marking()->ShouldActivate();
+  heap_state.sweeping_in_progress =
+      mark_compact_collector()->sweeping_in_progress();
+  heap_state.mark_compact_speed_in_bytes_per_ms =
+      static_cast<size_t>(tracer()->MarkCompactSpeedInBytesPerMillisecond());
+  heap_state.incremental_marking_speed_in_bytes_per_ms = static_cast<size_t>(
+      tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
+  heap_state.scavenge_speed_in_bytes_per_ms =
+      static_cast<size_t>(tracer()->ScavengeSpeedInBytesPerMillisecond());
+  heap_state.available_new_space_memory = new_space_.Available();
+  heap_state.new_space_capacity = new_space_.Capacity();
+  heap_state.new_space_allocation_throughput_in_bytes_per_ms =
+      static_cast<size_t>(
+          tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond());
+
+  GCIdleTimeAction action =
+      gc_idle_time_handler_.Compute(idle_time_in_ms, heap_state);
+
+  bool result = false;
+  switch (action.type) {
+    case DONE:
+      result = true;
+      break;
+    case DO_INCREMENTAL_MARKING:
+      if (incremental_marking()->IsStopped()) {
+        incremental_marking()->Start();
+      }
+      AdvanceIdleIncrementalMarking(action.parameter);
+      break;
+    case DO_FULL_GC: {
+      HistogramTimerScope scope(isolate_->counters()->gc_context());
+      if (contexts_disposed_) {
+        CollectAllGarbage(kReduceMemoryFootprintMask,
+                          "idle notification: contexts disposed");
+        gc_idle_time_handler_.NotifyIdleMarkCompact();
+        gc_count_at_last_idle_gc_ = gc_count_;
+      } else {
+        IdleMarkCompact("idle notification: finalize idle round");
+      }
+      break;
+    }
+    case DO_SCAVENGE:
+      CollectGarbage(NEW_SPACE, "idle notification: scavenge");
+      break;
+    case DO_FINALIZE_SWEEPING:
+      mark_compact_collector()->EnsureSweepingCompleted();
+      break;
+    case DO_NOTHING:
+      break;
+  }
+
+  int actual_time_ms = static_cast<int>(timer.Elapsed().InMilliseconds());
+  if (actual_time_ms <= idle_time_in_ms) {
+    isolate()->counters()->gc_idle_time_limit_undershot()->AddSample(
+        idle_time_in_ms - actual_time_ms);
+  } else {
+    isolate()->counters()->gc_idle_time_limit_overshot()->AddSample(
+        actual_time_ms - idle_time_in_ms);
+  }
+
+  if (FLAG_trace_idle_notification) {
+    PrintF("Idle notification: requested idle time %d ms, actual time %d ms [",
+           idle_time_in_ms, actual_time_ms);
+    action.Print();
+    PrintF("]\n");
+  }
+
+  contexts_disposed_ = 0;
+  return result;
+}
+
+
+#ifdef DEBUG
+
+void Heap::Print() {
+  if (!HasBeenSetUp()) return;
+  isolate()->PrintStack(stdout);
+  AllSpaces spaces(this);
+  for (Space* space = spaces.next(); space != NULL; space = spaces.next()) {
+    space->Print();
+  }
+}
+
+
+void Heap::ReportCodeStatistics(const char* title) {
+  PrintF(">>>>>> Code Stats (%s) >>>>>>\n", title);
+  PagedSpace::ResetCodeStatistics(isolate());
+  // We do not look for code in new space, map space, or old space.  If code
+  // somehow ends up in those spaces, we would miss it here.
+  code_space_->CollectCodeStatistics();
+  lo_space_->CollectCodeStatistics();
+  PagedSpace::ReportCodeStatistics(isolate());
+}
+
+
+// This function expects that NewSpace's allocated objects histogram is
+// populated (via a call to CollectStatistics or else as a side effect of a
+// just-completed scavenge collection).
+void Heap::ReportHeapStatistics(const char* title) {
+  USE(title);
+  PrintF(">>>>>> =============== %s (%d) =============== >>>>>>\n", title,
+         gc_count_);
+  PrintF("old_generation_allocation_limit_ %" V8_PTR_PREFIX "d\n",
+         old_generation_allocation_limit_);
+
+  PrintF("\n");
+  PrintF("Number of handles : %d\n", HandleScope::NumberOfHandles(isolate_));
+  isolate_->global_handles()->PrintStats();
+  PrintF("\n");
+
+  PrintF("Heap statistics : ");
+  isolate_->memory_allocator()->ReportStatistics();
+  PrintF("To space : ");
+  new_space_.ReportStatistics();
+  PrintF("Old pointer space : ");
+  old_pointer_space_->ReportStatistics();
+  PrintF("Old data space : ");
+  old_data_space_->ReportStatistics();
+  PrintF("Code space : ");
+  code_space_->ReportStatistics();
+  PrintF("Map space : ");
+  map_space_->ReportStatistics();
+  PrintF("Cell space : ");
+  cell_space_->ReportStatistics();
+  PrintF("PropertyCell space : ");
+  property_cell_space_->ReportStatistics();
+  PrintF("Large object space : ");
+  lo_space_->ReportStatistics();
+  PrintF(">>>>>> ========================================= >>>>>>\n");
+}
+
+#endif  // DEBUG
+
+bool Heap::Contains(HeapObject* value) { return Contains(value->address()); }
+
+
+bool Heap::Contains(Address addr) {
+  if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
+  return HasBeenSetUp() &&
+         (new_space_.ToSpaceContains(addr) ||
+          old_pointer_space_->Contains(addr) ||
+          old_data_space_->Contains(addr) || code_space_->Contains(addr) ||
+          map_space_->Contains(addr) || cell_space_->Contains(addr) ||
+          property_cell_space_->Contains(addr) ||
+          lo_space_->SlowContains(addr));
+}
+
+
+bool Heap::InSpace(HeapObject* value, AllocationSpace space) {
+  return InSpace(value->address(), space);
+}
+
+
+bool Heap::InSpace(Address addr, AllocationSpace space) {
+  if (isolate_->memory_allocator()->IsOutsideAllocatedSpace(addr)) return false;
+  if (!HasBeenSetUp()) return false;
+
+  switch (space) {
+    case NEW_SPACE:
+      return new_space_.ToSpaceContains(addr);
+    case OLD_POINTER_SPACE:
+      return old_pointer_space_->Contains(addr);
+    case OLD_DATA_SPACE:
+      return old_data_space_->Contains(addr);
+    case CODE_SPACE:
+      return code_space_->Contains(addr);
+    case MAP_SPACE:
+      return map_space_->Contains(addr);
+    case CELL_SPACE:
+      return cell_space_->Contains(addr);
+    case PROPERTY_CELL_SPACE:
+      return property_cell_space_->Contains(addr);
+    case LO_SPACE:
+      return lo_space_->SlowContains(addr);
+    case INVALID_SPACE:
+      break;
+  }
+  UNREACHABLE();
+  return false;
+}
+
+
+#ifdef VERIFY_HEAP
+void Heap::Verify() {
+  CHECK(HasBeenSetUp());
+  HandleScope scope(isolate());
+
+  store_buffer()->Verify();
+
+  if (mark_compact_collector()->sweeping_in_progress()) {
+    // We have to wait here for the sweeper threads to have an iterable heap.
+    mark_compact_collector()->EnsureSweepingCompleted();
+  }
+
+  VerifyPointersVisitor visitor;
+  IterateRoots(&visitor, VISIT_ONLY_STRONG);
+
+  VerifySmisVisitor smis_visitor;
+  IterateSmiRoots(&smis_visitor);
+
+  new_space_.Verify();
+
+  old_pointer_space_->Verify(&visitor);
+  map_space_->Verify(&visitor);
+
+  VerifyPointersVisitor no_dirty_regions_visitor;
+  old_data_space_->Verify(&no_dirty_regions_visitor);
+  code_space_->Verify(&no_dirty_regions_visitor);
+  cell_space_->Verify(&no_dirty_regions_visitor);
+  property_cell_space_->Verify(&no_dirty_regions_visitor);
+
+  lo_space_->Verify();
+}
+#endif
+
+
+void Heap::ZapFromSpace() {
+  NewSpacePageIterator it(new_space_.FromSpaceStart(),
+                          new_space_.FromSpaceEnd());
+  while (it.has_next()) {
+    NewSpacePage* page = it.next();
+    for (Address cursor = page->area_start(), limit = page->area_end();
+         cursor < limit; cursor += kPointerSize) {
+      Memory::Address_at(cursor) = kFromSpaceZapValue;
+    }
+  }
+}
+
+
+void Heap::IterateAndMarkPointersToFromSpace(Address start, Address end,
+                                             ObjectSlotCallback callback) {
+  Address slot_address = start;
+
+  // We are not collecting slots on new space objects during mutation
+  // thus we have to scan for pointers to evacuation candidates when we
+  // promote objects. But we should not record any slots in non-black
+  // objects. Grey object's slots would be rescanned.
+  // White object might not survive until the end of collection
+  // it would be a violation of the invariant to record it's slots.
+  bool record_slots = false;
+  if (incremental_marking()->IsCompacting()) {
+    MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::FromAddress(start));
+    record_slots = Marking::IsBlack(mark_bit);
+  }
+
+  while (slot_address < end) {
+    Object** slot = reinterpret_cast<Object**>(slot_address);
+    Object* object = *slot;
+    // If the store buffer becomes overfull we mark pages as being exempt from
+    // the store buffer.  These pages are scanned to find pointers that point
+    // to the new space.  In that case we may hit newly promoted objects and
+    // fix the pointers before the promotion queue gets to them.  Thus the 'if'.
+    if (object->IsHeapObject()) {
+      if (Heap::InFromSpace(object)) {
+        callback(reinterpret_cast<HeapObject**>(slot),
+                 HeapObject::cast(object));
+        Object* new_object = *slot;
+        if (InNewSpace(new_object)) {
+          SLOW_DCHECK(Heap::InToSpace(new_object));
+          SLOW_DCHECK(new_object->IsHeapObject());
+          store_buffer_.EnterDirectlyIntoStoreBuffer(
+              reinterpret_cast<Address>(slot));
+        }
+        SLOW_DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_object));
+      } else if (record_slots &&
+                 MarkCompactCollector::IsOnEvacuationCandidate(object)) {
+        mark_compact_collector()->RecordSlot(slot, slot, object);
+      }
+    }
+    slot_address += kPointerSize;
+  }
+}
+
+
+#ifdef DEBUG
+typedef bool (*CheckStoreBufferFilter)(Object** addr);
+
+
+bool IsAMapPointerAddress(Object** addr) {
+  uintptr_t a = reinterpret_cast<uintptr_t>(addr);
+  int mod = a % Map::kSize;
+  return mod >= Map::kPointerFieldsBeginOffset &&
+         mod < Map::kPointerFieldsEndOffset;
+}
+
+
+bool EverythingsAPointer(Object** addr) { return true; }
+
+
+static void CheckStoreBuffer(Heap* heap, Object** current, Object** limit,
+                             Object**** store_buffer_position,
+                             Object*** store_buffer_top,
+                             CheckStoreBufferFilter filter,
+                             Address special_garbage_start,
+                             Address special_garbage_end) {
+  Map* free_space_map = heap->free_space_map();
+  for (; current < limit; current++) {
+    Object* o = *current;
+    Address current_address = reinterpret_cast<Address>(current);
+    // Skip free space.
+    if (o == free_space_map) {
+      Address current_address = reinterpret_cast<Address>(current);
+      FreeSpace* free_space =
+          FreeSpace::cast(HeapObject::FromAddress(current_address));
+      int skip = free_space->Size();
+      DCHECK(current_address + skip <= reinterpret_cast<Address>(limit));
+      DCHECK(skip > 0);
+      current_address += skip - kPointerSize;
+      current = reinterpret_cast<Object**>(current_address);
+      continue;
+    }
+    // Skip the current linear allocation space between top and limit which is
+    // unmarked with the free space map, but can contain junk.
+    if (current_address == special_garbage_start &&
+        special_garbage_end != special_garbage_start) {
+      current_address = special_garbage_end - kPointerSize;
+      current = reinterpret_cast<Object**>(current_address);
+      continue;
+    }
+    if (!(*filter)(current)) continue;
+    DCHECK(current_address < special_garbage_start ||
+           current_address >= special_garbage_end);
+    DCHECK(reinterpret_cast<uintptr_t>(o) != kFreeListZapValue);
+    // We have to check that the pointer does not point into new space
+    // without trying to cast it to a heap object since the hash field of
+    // a string can contain values like 1 and 3 which are tagged null
+    // pointers.
+    if (!heap->InNewSpace(o)) continue;
+    while (**store_buffer_position < current &&
+           *store_buffer_position < store_buffer_top) {
+      (*store_buffer_position)++;
+    }
+    if (**store_buffer_position != current ||
+        *store_buffer_position == store_buffer_top) {
+      Object** obj_start = current;
+      while (!(*obj_start)->IsMap()) obj_start--;
+      UNREACHABLE();
+    }
+  }
+}
+
+
+// Check that the store buffer contains all intergenerational pointers by
+// scanning a page and ensuring that all pointers to young space are in the
+// store buffer.
+void Heap::OldPointerSpaceCheckStoreBuffer() {
+  OldSpace* space = old_pointer_space();
+  PageIterator pages(space);
+
+  store_buffer()->SortUniq();
+
+  while (pages.has_next()) {
+    Page* page = pages.next();
+    Object** current = reinterpret_cast<Object**>(page->area_start());
+
+    Address end = page->area_end();
+
+    Object*** store_buffer_position = store_buffer()->Start();
+    Object*** store_buffer_top = store_buffer()->Top();
+
+    Object** limit = reinterpret_cast<Object**>(end);
+    CheckStoreBuffer(this, current, limit, &store_buffer_position,
+                     store_buffer_top, &EverythingsAPointer, space->top(),
+                     space->limit());
+  }
+}
+
+
+void Heap::MapSpaceCheckStoreBuffer() {
+  MapSpace* space = map_space();
+  PageIterator pages(space);
+
+  store_buffer()->SortUniq();
+
+  while (pages.has_next()) {
+    Page* page = pages.next();
+    Object** current = reinterpret_cast<Object**>(page->area_start());
+
+    Address end = page->area_end();
+
+    Object*** store_buffer_position = store_buffer()->Start();
+    Object*** store_buffer_top = store_buffer()->Top();
+
+    Object** limit = reinterpret_cast<Object**>(end);
+    CheckStoreBuffer(this, current, limit, &store_buffer_position,
+                     store_buffer_top, &IsAMapPointerAddress, space->top(),
+                     space->limit());
+  }
+}
+
+
+void Heap::LargeObjectSpaceCheckStoreBuffer() {
+  LargeObjectIterator it(lo_space());
+  for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
+    // We only have code, sequential strings, or fixed arrays in large
+    // object space, and only fixed arrays can possibly contain pointers to
+    // the young generation.
+    if (object->IsFixedArray()) {
+      Object*** store_buffer_position = store_buffer()->Start();
+      Object*** store_buffer_top = store_buffer()->Top();
+      Object** current = reinterpret_cast<Object**>(object->address());
+      Object** limit =
+          reinterpret_cast<Object**>(object->address() + object->Size());
+      CheckStoreBuffer(this, current, limit, &store_buffer_position,
+                       store_buffer_top, &EverythingsAPointer, NULL, NULL);
+    }
+  }
+}
+#endif
+
+
+void Heap::IterateRoots(ObjectVisitor* v, VisitMode mode) {
+  IterateStrongRoots(v, mode);
+  IterateWeakRoots(v, mode);
+}
+
+
+void Heap::IterateWeakRoots(ObjectVisitor* v, VisitMode mode) {
+  v->VisitPointer(reinterpret_cast<Object**>(&roots_[kStringTableRootIndex]));
+  v->Synchronize(VisitorSynchronization::kStringTable);
+  if (mode != VISIT_ALL_IN_SCAVENGE && mode != VISIT_ALL_IN_SWEEP_NEWSPACE) {
+    // Scavenge collections have special processing for this.
+    external_string_table_.Iterate(v);
+  }
+  v->Synchronize(VisitorSynchronization::kExternalStringsTable);
+}
+
+
+void Heap::IterateSmiRoots(ObjectVisitor* v) {
+  // Acquire execution access since we are going to read stack limit values.
+  ExecutionAccess access(isolate());
+  v->VisitPointers(&roots_[kSmiRootsStart], &roots_[kRootListLength]);
+  v->Synchronize(VisitorSynchronization::kSmiRootList);
+}
+
+
+void Heap::IterateStrongRoots(ObjectVisitor* v, VisitMode mode) {
+  v->VisitPointers(&roots_[0], &roots_[kStrongRootListLength]);
+  v->Synchronize(VisitorSynchronization::kStrongRootList);
+
+  v->VisitPointer(bit_cast<Object**>(&hidden_string_));
+  v->Synchronize(VisitorSynchronization::kInternalizedString);
+
+  isolate_->bootstrapper()->Iterate(v);
+  v->Synchronize(VisitorSynchronization::kBootstrapper);
+  isolate_->Iterate(v);
+  v->Synchronize(VisitorSynchronization::kTop);
+  Relocatable::Iterate(isolate_, v);
+  v->Synchronize(VisitorSynchronization::kRelocatable);
+
+  if (isolate_->deoptimizer_data() != NULL) {
+    isolate_->deoptimizer_data()->Iterate(v);
+  }
+  v->Synchronize(VisitorSynchronization::kDebug);
+  isolate_->compilation_cache()->Iterate(v);
+  v->Synchronize(VisitorSynchronization::kCompilationCache);
+
+  // Iterate over local handles in handle scopes.
+  isolate_->handle_scope_implementer()->Iterate(v);
+  isolate_->IterateDeferredHandles(v);
+  v->Synchronize(VisitorSynchronization::kHandleScope);
+
+  // Iterate over the builtin code objects and code stubs in the
+  // heap. Note that it is not necessary to iterate over code objects
+  // on scavenge collections.
+  if (mode != VISIT_ALL_IN_SCAVENGE) {
+    isolate_->builtins()->IterateBuiltins(v);
+  }
+  v->Synchronize(VisitorSynchronization::kBuiltins);
+
+  // Iterate over global handles.
+  switch (mode) {
+    case VISIT_ONLY_STRONG:
+      isolate_->global_handles()->IterateStrongRoots(v);
+      break;
+    case VISIT_ALL_IN_SCAVENGE:
+      isolate_->global_handles()->IterateNewSpaceStrongAndDependentRoots(v);
+      break;
+    case VISIT_ALL_IN_SWEEP_NEWSPACE:
+    case VISIT_ALL:
+      isolate_->global_handles()->IterateAllRoots(v);
+      break;
+  }
+  v->Synchronize(VisitorSynchronization::kGlobalHandles);
+
+  // Iterate over eternal handles.
+  if (mode == VISIT_ALL_IN_SCAVENGE) {
+    isolate_->eternal_handles()->IterateNewSpaceRoots(v);
+  } else {
+    isolate_->eternal_handles()->IterateAllRoots(v);
+  }
+  v->Synchronize(VisitorSynchronization::kEternalHandles);
+
+  // Iterate over pointers being held by inactive threads.
+  isolate_->thread_manager()->Iterate(v);
+  v->Synchronize(VisitorSynchronization::kThreadManager);
+
+  // Iterate over the pointers the Serialization/Deserialization code is
+  // holding.
+  // During garbage collection this keeps the partial snapshot cache alive.
+  // During deserialization of the startup snapshot this creates the partial
+  // snapshot cache and deserializes the objects it refers to.  During
+  // serialization this does nothing, since the partial snapshot cache is
+  // empty.  However the next thing we do is create the partial snapshot,
+  // filling up the partial snapshot cache with objects it needs as we go.
+  SerializerDeserializer::Iterate(isolate_, v);
+  // We don't do a v->Synchronize call here, because in debug mode that will
+  // output a flag to the snapshot.  However at this point the serializer and
+  // deserializer are deliberately a little unsynchronized (see above) so the
+  // checking of the sync flag in the snapshot would fail.
+}
+
+
+// TODO(1236194): Since the heap size is configurable on the command line
+// and through the API, we should gracefully handle the case that the heap
+// size is not big enough to fit all the initial objects.
+bool Heap::ConfigureHeap(int max_semi_space_size, int max_old_space_size,
+                         int max_executable_size, size_t code_range_size) {
+  if (HasBeenSetUp()) return false;
+
+  // Overwrite default configuration.
+  if (max_semi_space_size > 0) {
+    max_semi_space_size_ = max_semi_space_size * MB;
+  }
+  if (max_old_space_size > 0) {
+    max_old_generation_size_ = max_old_space_size * MB;
+  }
+  if (max_executable_size > 0) {
+    max_executable_size_ = max_executable_size * MB;
+  }
+
+  // If max space size flags are specified overwrite the configuration.
+  if (FLAG_max_semi_space_size > 0) {
+    max_semi_space_size_ = FLAG_max_semi_space_size * MB;
+  }
+  if (FLAG_max_old_space_size > 0) {
+    max_old_generation_size_ = FLAG_max_old_space_size * MB;
+  }
+  if (FLAG_max_executable_size > 0) {
+    max_executable_size_ = FLAG_max_executable_size * MB;
+  }
+
+  if (FLAG_stress_compaction) {
+    // This will cause more frequent GCs when stressing.
+    max_semi_space_size_ = Page::kPageSize;
+  }
+
+  if (Snapshot::HaveASnapshotToStartFrom()) {
+    // If we are using a snapshot we always reserve the default amount
+    // of memory for each semispace because code in the snapshot has
+    // write-barrier code that relies on the size and alignment of new
+    // space.  We therefore cannot use a larger max semispace size
+    // than the default reserved semispace size.
+    if (max_semi_space_size_ > reserved_semispace_size_) {
+      max_semi_space_size_ = reserved_semispace_size_;
+      if (FLAG_trace_gc) {
+        PrintPID("Max semi-space size cannot be more than %d kbytes\n",
+                 reserved_semispace_size_ >> 10);
+      }
+    }
+  } else {
+    // If we are not using snapshots we reserve space for the actual
+    // max semispace size.
+    reserved_semispace_size_ = max_semi_space_size_;
+  }
+
+  // The max executable size must be less than or equal to the max old
+  // generation size.
+  if (max_executable_size_ > max_old_generation_size_) {
+    max_executable_size_ = max_old_generation_size_;
+  }
+
+  // The new space size must be a power of two to support single-bit testing
+  // for containment.
+  max_semi_space_size_ =
+      base::bits::RoundUpToPowerOfTwo32(max_semi_space_size_);
+  reserved_semispace_size_ =
+      base::bits::RoundUpToPowerOfTwo32(reserved_semispace_size_);
+
+  if (FLAG_min_semi_space_size > 0) {
+    int initial_semispace_size = FLAG_min_semi_space_size * MB;
+    if (initial_semispace_size > max_semi_space_size_) {
+      initial_semispace_size_ = max_semi_space_size_;
+      if (FLAG_trace_gc) {
+        PrintPID(
+            "Min semi-space size cannot be more than the maximum"
+            "semi-space size of %d MB\n",
+            max_semi_space_size_);
+      }
+    } else {
+      initial_semispace_size_ = initial_semispace_size;
+    }
+  }
+
+  initial_semispace_size_ = Min(initial_semispace_size_, max_semi_space_size_);
+
+  // The old generation is paged and needs at least one page for each space.
+  int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
+  max_old_generation_size_ =
+      Max(static_cast<intptr_t>(paged_space_count * Page::kPageSize),
+          max_old_generation_size_);
+
+  // We rely on being able to allocate new arrays in paged spaces.
+  DCHECK(Page::kMaxRegularHeapObjectSize >=
+         (JSArray::kSize +
+          FixedArray::SizeFor(JSObject::kInitialMaxFastElementArray) +
+          AllocationMemento::kSize));
+
+  code_range_size_ = code_range_size * MB;
+
+  configured_ = true;
+  return true;
+}
+
+
+bool Heap::ConfigureHeapDefault() { return ConfigureHeap(0, 0, 0, 0); }
+
+
+void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
+  *stats->start_marker = HeapStats::kStartMarker;
+  *stats->end_marker = HeapStats::kEndMarker;
+  *stats->new_space_size = new_space_.SizeAsInt();
+  *stats->new_space_capacity = static_cast<int>(new_space_.Capacity());
+  *stats->old_pointer_space_size = old_pointer_space_->SizeOfObjects();
+  *stats->old_pointer_space_capacity = old_pointer_space_->Capacity();
+  *stats->old_data_space_size = old_data_space_->SizeOfObjects();
+  *stats->old_data_space_capacity = old_data_space_->Capacity();
+  *stats->code_space_size = code_space_->SizeOfObjects();
+  *stats->code_space_capacity = code_space_->Capacity();
+  *stats->map_space_size = map_space_->SizeOfObjects();
+  *stats->map_space_capacity = map_space_->Capacity();
+  *stats->cell_space_size = cell_space_->SizeOfObjects();
+  *stats->cell_space_capacity = cell_space_->Capacity();
+  *stats->property_cell_space_size = property_cell_space_->SizeOfObjects();
+  *stats->property_cell_space_capacity = property_cell_space_->Capacity();
+  *stats->lo_space_size = lo_space_->Size();
+  isolate_->global_handles()->RecordStats(stats);
+  *stats->memory_allocator_size = isolate()->memory_allocator()->Size();
+  *stats->memory_allocator_capacity =
+      isolate()->memory_allocator()->Size() +
+      isolate()->memory_allocator()->Available();
+  *stats->os_error = base::OS::GetLastError();
+  isolate()->memory_allocator()->Available();
+  if (take_snapshot) {
+    HeapIterator iterator(this);
+    for (HeapObject* obj = iterator.next(); obj != NULL;
+         obj = iterator.next()) {
+      InstanceType type = obj->map()->instance_type();
+      DCHECK(0 <= type && type <= LAST_TYPE);
+      stats->objects_per_type[type]++;
+      stats->size_per_type[type] += obj->Size();
+    }
+  }
+}
+
+
+intptr_t Heap::PromotedSpaceSizeOfObjects() {
+  return old_pointer_space_->SizeOfObjects() +
+         old_data_space_->SizeOfObjects() + code_space_->SizeOfObjects() +
+         map_space_->SizeOfObjects() + cell_space_->SizeOfObjects() +
+         property_cell_space_->SizeOfObjects() + lo_space_->SizeOfObjects();
+}
+
+
+int64_t Heap::PromotedExternalMemorySize() {
+  if (amount_of_external_allocated_memory_ <=
+      amount_of_external_allocated_memory_at_last_global_gc_)
+    return 0;
+  return amount_of_external_allocated_memory_ -
+         amount_of_external_allocated_memory_at_last_global_gc_;
+}
+
+
+intptr_t Heap::OldGenerationAllocationLimit(intptr_t old_gen_size,
+                                            int freed_global_handles) {
+  const int kMaxHandles = 1000;
+  const int kMinHandles = 100;
+  double min_factor = 1.1;
+  double max_factor = 4;
+  // We set the old generation growing factor to 2 to grow the heap slower on
+  // memory-constrained devices.
+  if (max_old_generation_size_ <= kMaxOldSpaceSizeMediumMemoryDevice) {
+    max_factor = 2;
+  }
+  // If there are many freed global handles, then the next full GC will
+  // likely collect a lot of garbage. Choose the heap growing factor
+  // depending on freed global handles.
+  // TODO(ulan, hpayer): Take into account mutator utilization.
+  double factor;
+  if (freed_global_handles <= kMinHandles) {
+    factor = max_factor;
+  } else if (freed_global_handles >= kMaxHandles) {
+    factor = min_factor;
+  } else {
+    // Compute factor using linear interpolation between points
+    // (kMinHandles, max_factor) and (kMaxHandles, min_factor).
+    factor = max_factor -
+             (freed_global_handles - kMinHandles) * (max_factor - min_factor) /
+                 (kMaxHandles - kMinHandles);
+  }
+
+  if (FLAG_stress_compaction ||
+      mark_compact_collector()->reduce_memory_footprint_) {
+    factor = min_factor;
+  }
+
+  intptr_t limit = static_cast<intptr_t>(old_gen_size * factor);
+  limit = Max(limit, kMinimumOldGenerationAllocationLimit);
+  limit += new_space_.Capacity();
+  intptr_t halfway_to_the_max = (old_gen_size + max_old_generation_size_) / 2;
+  return Min(limit, halfway_to_the_max);
+}
+
+
+void Heap::EnableInlineAllocation() {
+  if (!inline_allocation_disabled_) return;
+  inline_allocation_disabled_ = false;
+
+  // Update inline allocation limit for new space.
+  new_space()->UpdateInlineAllocationLimit(0);
+}
+
+
+void Heap::DisableInlineAllocation() {
+  if (inline_allocation_disabled_) return;
+  inline_allocation_disabled_ = true;
+
+  // Update inline allocation limit for new space.
+  new_space()->UpdateInlineAllocationLimit(0);
+
+  // Update inline allocation limit for old spaces.
+  PagedSpaces spaces(this);
+  for (PagedSpace* space = spaces.next(); space != NULL;
+       space = spaces.next()) {
+    space->EmptyAllocationInfo();
+  }
+}
+
+
+V8_DECLARE_ONCE(initialize_gc_once);
+
+static void InitializeGCOnce() {
+  InitializeScavengingVisitorsTables();
+  NewSpaceScavenger::Initialize();
+  MarkCompactCollector::Initialize();
+}
+
+
+bool Heap::SetUp() {
+#ifdef DEBUG
+  allocation_timeout_ = FLAG_gc_interval;
+#endif
+
+  // Initialize heap spaces and initial maps and objects. Whenever something
+  // goes wrong, just return false. The caller should check the results and
+  // call Heap::TearDown() to release allocated memory.
+  //
+  // If the heap is not yet configured (e.g. through the API), configure it.
+  // Configuration is based on the flags new-space-size (really the semispace
+  // size) and old-space-size if set or the initial values of semispace_size_
+  // and old_generation_size_ otherwise.
+  if (!configured_) {
+    if (!ConfigureHeapDefault()) return false;
+  }
+
+  base::CallOnce(&initialize_gc_once, &InitializeGCOnce);
+
+  MarkMapPointersAsEncoded(false);
+
+  // Set up memory allocator.
+  if (!isolate_->memory_allocator()->SetUp(MaxReserved(), MaxExecutableSize()))
+    return false;
+
+  // Set up new space.
+  if (!new_space_.SetUp(reserved_semispace_size_, max_semi_space_size_)) {
+    return false;
+  }
+  new_space_top_after_last_gc_ = new_space()->top();
+
+  // Initialize old pointer space.
+  old_pointer_space_ = new OldSpace(this, max_old_generation_size_,
+                                    OLD_POINTER_SPACE, NOT_EXECUTABLE);
+  if (old_pointer_space_ == NULL) return false;
+  if (!old_pointer_space_->SetUp()) return false;
+
+  // Initialize old data space.
+  old_data_space_ = new OldSpace(this, max_old_generation_size_, OLD_DATA_SPACE,
+                                 NOT_EXECUTABLE);
+  if (old_data_space_ == NULL) return false;
+  if (!old_data_space_->SetUp()) return false;
+
+  if (!isolate_->code_range()->SetUp(code_range_size_)) return false;
+
+  // Initialize the code space, set its maximum capacity to the old
+  // generation size. It needs executable memory.
+  code_space_ =
+      new OldSpace(this, max_old_generation_size_, CODE_SPACE, EXECUTABLE);
+  if (code_space_ == NULL) return false;
+  if (!code_space_->SetUp()) return false;
+
+  // Initialize map space.
+  map_space_ = new MapSpace(this, max_old_generation_size_, MAP_SPACE);
+  if (map_space_ == NULL) return false;
+  if (!map_space_->SetUp()) return false;
+
+  // Initialize simple cell space.
+  cell_space_ = new CellSpace(this, max_old_generation_size_, CELL_SPACE);
+  if (cell_space_ == NULL) return false;
+  if (!cell_space_->SetUp()) return false;
+
+  // Initialize global property cell space.
+  property_cell_space_ = new PropertyCellSpace(this, max_old_generation_size_,
+                                               PROPERTY_CELL_SPACE);
+  if (property_cell_space_ == NULL) return false;
+  if (!property_cell_space_->SetUp()) return false;
+
+  // The large object code space may contain code or data.  We set the memory
+  // to be non-executable here for safety, but this means we need to enable it
+  // explicitly when allocating large code objects.
+  lo_space_ = new LargeObjectSpace(this, max_old_generation_size_, LO_SPACE);
+  if (lo_space_ == NULL) return false;
+  if (!lo_space_->SetUp()) return false;
+
+  // Set up the seed that is used to randomize the string hash function.
+  DCHECK(hash_seed() == 0);
+  if (FLAG_randomize_hashes) {
+    if (FLAG_hash_seed == 0) {
+      int rnd = isolate()->random_number_generator()->NextInt();
+      set_hash_seed(Smi::FromInt(rnd & Name::kHashBitMask));
+    } else {
+      set_hash_seed(Smi::FromInt(FLAG_hash_seed));
+    }
+  }
+
+  LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
+  LOG(isolate_, IntPtrTEvent("heap-available", Available()));
+
+  store_buffer()->SetUp();
+
+  mark_compact_collector()->SetUp();
+
+  return true;
+}
+
+
+bool Heap::CreateHeapObjects() {
+  // Create initial maps.
+  if (!CreateInitialMaps()) return false;
+  CreateApiObjects();
+
+  // Create initial objects
+  CreateInitialObjects();
+  CHECK_EQ(0, gc_count_);
+
+  set_native_contexts_list(undefined_value());
+  set_array_buffers_list(undefined_value());
+  set_allocation_sites_list(undefined_value());
+  weak_object_to_code_table_ = undefined_value();
+  return true;
+}
+
+
+void Heap::SetStackLimits() {
+  DCHECK(isolate_ != NULL);
+  DCHECK(isolate_ == isolate());
+  // On 64 bit machines, pointers are generally out of range of Smis.  We write
+  // something that looks like an out of range Smi to the GC.
+
+  // Set up the special root array entries containing the stack limits.
+  // These are actually addresses, but the tag makes the GC ignore it.
+  roots_[kStackLimitRootIndex] = reinterpret_cast<Object*>(
+      (isolate_->stack_guard()->jslimit() & ~kSmiTagMask) | kSmiTag);
+  roots_[kRealStackLimitRootIndex] = reinterpret_cast<Object*>(
+      (isolate_->stack_guard()->real_jslimit() & ~kSmiTagMask) | kSmiTag);
+}
+
+
+void Heap::TearDown() {
+#ifdef VERIFY_HEAP
+  if (FLAG_verify_heap) {
+    Verify();
+  }
+#endif
+
+  UpdateMaximumCommitted();
+
+  if (FLAG_print_cumulative_gc_stat) {
+    PrintF("\n");
+    PrintF("gc_count=%d ", gc_count_);
+    PrintF("mark_sweep_count=%d ", ms_count_);
+    PrintF("max_gc_pause=%.1f ", get_max_gc_pause());
+    PrintF("total_gc_time=%.1f ", total_gc_time_ms_);
+    PrintF("min_in_mutator=%.1f ", get_min_in_mutator());
+    PrintF("max_alive_after_gc=%" V8_PTR_PREFIX "d ", get_max_alive_after_gc());
+    PrintF("total_marking_time=%.1f ", tracer_.cumulative_sweeping_duration());
+    PrintF("total_sweeping_time=%.1f ", tracer_.cumulative_sweeping_duration());
+    PrintF("\n\n");
+  }
+
+  if (FLAG_print_max_heap_committed) {
+    PrintF("\n");
+    PrintF("maximum_committed_by_heap=%" V8_PTR_PREFIX "d ",
+           MaximumCommittedMemory());
+    PrintF("maximum_committed_by_new_space=%" V8_PTR_PREFIX "d ",
+           new_space_.MaximumCommittedMemory());
+    PrintF("maximum_committed_by_old_pointer_space=%" V8_PTR_PREFIX "d ",
+           old_data_space_->MaximumCommittedMemory());
+    PrintF("maximum_committed_by_old_data_space=%" V8_PTR_PREFIX "d ",
+           old_pointer_space_->MaximumCommittedMemory());
+    PrintF("maximum_committed_by_old_data_space=%" V8_PTR_PREFIX "d ",
+           old_pointer_space_->MaximumCommittedMemory());
+    PrintF("maximum_committed_by_code_space=%" V8_PTR_PREFIX "d ",
+           code_space_->MaximumCommittedMemory());
+    PrintF("maximum_committed_by_map_space=%" V8_PTR_PREFIX "d ",
+           map_space_->MaximumCommittedMemory());
+    PrintF("maximum_committed_by_cell_space=%" V8_PTR_PREFIX "d ",
+           cell_space_->MaximumCommittedMemory());
+    PrintF("maximum_committed_by_property_space=%" V8_PTR_PREFIX "d ",
+           property_cell_space_->MaximumCommittedMemory());
+    PrintF("maximum_committed_by_lo_space=%" V8_PTR_PREFIX "d ",
+           lo_space_->MaximumCommittedMemory());
+    PrintF("\n\n");
+  }
+
+  if (FLAG_verify_predictable) {
+    PrintAlloctionsHash();
+  }
+
+  TearDownArrayBuffers();
+
+  isolate_->global_handles()->TearDown();
+
+  external_string_table_.TearDown();
+
+  mark_compact_collector()->TearDown();
+
+  new_space_.TearDown();
+
+  if (old_pointer_space_ != NULL) {
+    old_pointer_space_->TearDown();
+    delete old_pointer_space_;
+    old_pointer_space_ = NULL;
+  }
+
+  if (old_data_space_ != NULL) {
+    old_data_space_->TearDown();
+    delete old_data_space_;
+    old_data_space_ = NULL;
+  }
+
+  if (code_space_ != NULL) {
+    code_space_->TearDown();
+    delete code_space_;
+    code_space_ = NULL;
+  }
+
+  if (map_space_ != NULL) {
+    map_space_->TearDown();
+    delete map_space_;
+    map_space_ = NULL;
+  }
+
+  if (cell_space_ != NULL) {
+    cell_space_->TearDown();
+    delete cell_space_;
+    cell_space_ = NULL;
+  }
+
+  if (property_cell_space_ != NULL) {
+    property_cell_space_->TearDown();
+    delete property_cell_space_;
+    property_cell_space_ = NULL;
+  }
+
+  if (lo_space_ != NULL) {
+    lo_space_->TearDown();
+    delete lo_space_;
+    lo_space_ = NULL;
+  }
+
+  store_buffer()->TearDown();
+  incremental_marking()->TearDown();
+
+  isolate_->memory_allocator()->TearDown();
+}
+
+
+void Heap::AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback,
+                                 GCType gc_type, bool pass_isolate) {
+  DCHECK(callback != NULL);
+  GCPrologueCallbackPair pair(callback, gc_type, pass_isolate);
+  DCHECK(!gc_prologue_callbacks_.Contains(pair));
+  return gc_prologue_callbacks_.Add(pair);
+}
+
+
+void Heap::RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback) {
+  DCHECK(callback != NULL);
+  for (int i = 0; i < gc_prologue_callbacks_.length(); ++i) {
+    if (gc_prologue_callbacks_[i].callback == callback) {
+      gc_prologue_callbacks_.Remove(i);
+      return;
+    }
+  }
+  UNREACHABLE();
+}
+
+
+void Heap::AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback,
+                                 GCType gc_type, bool pass_isolate) {
+  DCHECK(callback != NULL);
+  GCEpilogueCallbackPair pair(callback, gc_type, pass_isolate);
+  DCHECK(!gc_epilogue_callbacks_.Contains(pair));
+  return gc_epilogue_callbacks_.Add(pair);
+}
+
+
+void Heap::RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback) {
+  DCHECK(callback != NULL);
+  for (int i = 0; i < gc_epilogue_callbacks_.length(); ++i) {
+    if (gc_epilogue_callbacks_[i].callback == callback) {
+      gc_epilogue_callbacks_.Remove(i);
+      return;
+    }
+  }
+  UNREACHABLE();
+}
+
+
+// TODO(ishell): Find a better place for this.
+void Heap::AddWeakObjectToCodeDependency(Handle<Object> obj,
+                                         Handle<DependentCode> dep) {
+  DCHECK(!InNewSpace(*obj));
+  DCHECK(!InNewSpace(*dep));
+  // This handle scope keeps the table handle local to this function, which
+  // allows us to safely skip write barriers in table update operations.
+  HandleScope scope(isolate());
+  Handle<WeakHashTable> table(WeakHashTable::cast(weak_object_to_code_table_),
+                              isolate());
+  table = WeakHashTable::Put(table, obj, dep);
+
+  if (ShouldZapGarbage() && weak_object_to_code_table_ != *table) {
+    WeakHashTable::cast(weak_object_to_code_table_)->Zap(the_hole_value());
+  }
+  set_weak_object_to_code_table(*table);
+  DCHECK_EQ(*dep, table->Lookup(obj));
+}
+
+
+DependentCode* Heap::LookupWeakObjectToCodeDependency(Handle<Object> obj) {
+  Object* dep = WeakHashTable::cast(weak_object_to_code_table_)->Lookup(obj);
+  if (dep->IsDependentCode()) return DependentCode::cast(dep);
+  return DependentCode::cast(empty_fixed_array());
+}
+
+
+void Heap::EnsureWeakObjectToCodeTable() {
+  if (!weak_object_to_code_table()->IsHashTable()) {
+    set_weak_object_to_code_table(
+        *WeakHashTable::New(isolate(), 16, USE_DEFAULT_MINIMUM_CAPACITY,
+                            TENURED));
+  }
+}
+
+
+void Heap::FatalProcessOutOfMemory(const char* location, bool take_snapshot) {
+  v8::internal::V8::FatalProcessOutOfMemory(location, take_snapshot);
+}
+
+#ifdef DEBUG
+
+class PrintHandleVisitor : public ObjectVisitor {
+ public:
+  void VisitPointers(Object** start, Object** end) {
+    for (Object** p = start; p < end; p++)
+      PrintF("  handle %p to %p\n", reinterpret_cast<void*>(p),
+             reinterpret_cast<void*>(*p));
+  }
+};
+
+
+void Heap::PrintHandles() {
+  PrintF("Handles:\n");
+  PrintHandleVisitor v;
+  isolate_->handle_scope_implementer()->Iterate(&v);
+}
+
+#endif
+
+
+Space* AllSpaces::next() {
+  switch (counter_++) {
+    case NEW_SPACE:
+      return heap_->new_space();
+    case OLD_POINTER_SPACE:
+      return heap_->old_pointer_space();
+    case OLD_DATA_SPACE:
+      return heap_->old_data_space();
+    case CODE_SPACE:
+      return heap_->code_space();
+    case MAP_SPACE:
+      return heap_->map_space();
+    case CELL_SPACE:
+      return heap_->cell_space();
+    case PROPERTY_CELL_SPACE:
+      return heap_->property_cell_space();
+    case LO_SPACE:
+      return heap_->lo_space();
+    default:
+      return NULL;
+  }
+}
+
+
+PagedSpace* PagedSpaces::next() {
+  switch (counter_++) {
+    case OLD_POINTER_SPACE:
+      return heap_->old_pointer_space();
+    case OLD_DATA_SPACE:
+      return heap_->old_data_space();
+    case CODE_SPACE:
+      return heap_->code_space();
+    case MAP_SPACE:
+      return heap_->map_space();
+    case CELL_SPACE:
+      return heap_->cell_space();
+    case PROPERTY_CELL_SPACE:
+      return heap_->property_cell_space();
+    default:
+      return NULL;
+  }
+}
+
+
+OldSpace* OldSpaces::next() {
+  switch (counter_++) {
+    case OLD_POINTER_SPACE:
+      return heap_->old_pointer_space();
+    case OLD_DATA_SPACE:
+      return heap_->old_data_space();
+    case CODE_SPACE:
+      return heap_->code_space();
+    default:
+      return NULL;
+  }
+}
+
+
+SpaceIterator::SpaceIterator(Heap* heap)
+    : heap_(heap),
+      current_space_(FIRST_SPACE),
+      iterator_(NULL),
+      size_func_(NULL) {}
+
+
+SpaceIterator::SpaceIterator(Heap* heap, HeapObjectCallback size_func)
+    : heap_(heap),
+      current_space_(FIRST_SPACE),
+      iterator_(NULL),
+      size_func_(size_func) {}
+
+
+SpaceIterator::~SpaceIterator() {
+  // Delete active iterator if any.
+  delete iterator_;
+}
+
+
+bool SpaceIterator::has_next() {
+  // Iterate until no more spaces.
+  return current_space_ != LAST_SPACE;
+}
+
+
+ObjectIterator* SpaceIterator::next() {
+  if (iterator_ != NULL) {
+    delete iterator_;
+    iterator_ = NULL;
+    // Move to the next space
+    current_space_++;
+    if (current_space_ > LAST_SPACE) {
+      return NULL;
+    }
+  }
+
+  // Return iterator for the new current space.
+  return CreateIterator();
+}
+
+
+// Create an iterator for the space to iterate.
+ObjectIterator* SpaceIterator::CreateIterator() {
+  DCHECK(iterator_ == NULL);
+
+  switch (current_space_) {
+    case NEW_SPACE:
+      iterator_ = new SemiSpaceIterator(heap_->new_space(), size_func_);
+      break;
+    case OLD_POINTER_SPACE:
+      iterator_ =
+          new HeapObjectIterator(heap_->old_pointer_space(), size_func_);
+      break;
+    case OLD_DATA_SPACE:
+      iterator_ = new HeapObjectIterator(heap_->old_data_space(), size_func_);
+      break;
+    case CODE_SPACE:
+      iterator_ = new HeapObjectIterator(heap_->code_space(), size_func_);
+      break;
+    case MAP_SPACE:
+      iterator_ = new HeapObjectIterator(heap_->map_space(), size_func_);
+      break;
+    case CELL_SPACE:
+      iterator_ = new HeapObjectIterator(heap_->cell_space(), size_func_);
+      break;
+    case PROPERTY_CELL_SPACE:
+      iterator_ =
+          new HeapObjectIterator(heap_->property_cell_space(), size_func_);
+      break;
+    case LO_SPACE:
+      iterator_ = new LargeObjectIterator(heap_->lo_space(), size_func_);
+      break;
+  }
+
+  // Return the newly allocated iterator;
+  DCHECK(iterator_ != NULL);
+  return iterator_;
+}
+
+
+class HeapObjectsFilter {
+ public:
+  virtual ~HeapObjectsFilter() {}
+  virtual bool SkipObject(HeapObject* object) = 0;
+};
+
+
+class UnreachableObjectsFilter : public HeapObjectsFilter {
+ public:
+  explicit UnreachableObjectsFilter(Heap* heap) : heap_(heap) {
+    MarkReachableObjects();
+  }
+
+  ~UnreachableObjectsFilter() {
+    heap_->mark_compact_collector()->ClearMarkbits();
+  }
+
+  bool SkipObject(HeapObject* object) {
+    MarkBit mark_bit = Marking::MarkBitFrom(object);
+    return !mark_bit.Get();
+  }
+
+ private:
+  class MarkingVisitor : public ObjectVisitor {
+   public:
+    MarkingVisitor() : marking_stack_(10) {}
+
+    void VisitPointers(Object** start, Object** end) {
+      for (Object** p = start; p < end; p++) {
+        if (!(*p)->IsHeapObject()) continue;
+        HeapObject* obj = HeapObject::cast(*p);
+        MarkBit mark_bit = Marking::MarkBitFrom(obj);
+        if (!mark_bit.Get()) {
+          mark_bit.Set();
+          marking_stack_.Add(obj);
+        }
+      }
+    }
+
+    void TransitiveClosure() {
+      while (!marking_stack_.is_empty()) {
+        HeapObject* obj = marking_stack_.RemoveLast();
+        obj->Iterate(this);
+      }
+    }
+
+   private:
+    List<HeapObject*> marking_stack_;
+  };
+
+  void MarkReachableObjects() {
+    MarkingVisitor visitor;
+    heap_->IterateRoots(&visitor, VISIT_ALL);
+    visitor.TransitiveClosure();
+  }
+
+  Heap* heap_;
+  DisallowHeapAllocation no_allocation_;
+};
+
+
+HeapIterator::HeapIterator(Heap* heap)
+    : make_heap_iterable_helper_(heap),
+      no_heap_allocation_(),
+      heap_(heap),
+      filtering_(HeapIterator::kNoFiltering),
+      filter_(NULL) {
+  Init();
+}
+
+
+HeapIterator::HeapIterator(Heap* heap,
+                           HeapIterator::HeapObjectsFiltering filtering)
+    : make_heap_iterable_helper_(heap),
+      no_heap_allocation_(),
+      heap_(heap),
+      filtering_(filtering),
+      filter_(NULL) {
+  Init();
+}
+
+
+HeapIterator::~HeapIterator() { Shutdown(); }
+
+
+void HeapIterator::Init() {
+  // Start the iteration.
+  space_iterator_ = new SpaceIterator(heap_);
+  switch (filtering_) {
+    case kFilterUnreachable:
+      filter_ = new UnreachableObjectsFilter(heap_);
+      break;
+    default:
+      break;
+  }
+  object_iterator_ = space_iterator_->next();
+}
+
+
+void HeapIterator::Shutdown() {
+#ifdef DEBUG
+  // Assert that in filtering mode we have iterated through all
+  // objects. Otherwise, heap will be left in an inconsistent state.
+  if (filtering_ != kNoFiltering) {
+    DCHECK(object_iterator_ == NULL);
+  }
+#endif
+  // Make sure the last iterator is deallocated.
+  delete space_iterator_;
+  space_iterator_ = NULL;
+  object_iterator_ = NULL;
+  delete filter_;
+  filter_ = NULL;
+}
+
+
+HeapObject* HeapIterator::next() {
+  if (filter_ == NULL) return NextObject();
+
+  HeapObject* obj = NextObject();
+  while (obj != NULL && filter_->SkipObject(obj)) obj = NextObject();
+  return obj;
+}
+
+
+HeapObject* HeapIterator::NextObject() {
+  // No iterator means we are done.
+  if (object_iterator_ == NULL) return NULL;
+
+  if (HeapObject* obj = object_iterator_->next_object()) {
+    // If the current iterator has more objects we are fine.
+    return obj;
+  } else {
+    // Go though the spaces looking for one that has objects.
+    while (space_iterator_->has_next()) {
+      object_iterator_ = space_iterator_->next();
+      if (HeapObject* obj = object_iterator_->next_object()) {
+        return obj;
+      }
+    }
+  }
+  // Done with the last space.
+  object_iterator_ = NULL;
+  return NULL;
+}
+
+
+void HeapIterator::reset() {
+  // Restart the iterator.
+  Shutdown();
+  Init();
+}
+
+
+#ifdef DEBUG
+
+Object* const PathTracer::kAnyGlobalObject = NULL;
+
+class PathTracer::MarkVisitor : public ObjectVisitor {
+ public:
+  explicit MarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
+  void VisitPointers(Object** start, Object** end) {
+    // Scan all HeapObject pointers in [start, end)
+    for (Object** p = start; !tracer_->found() && (p < end); p++) {
+      if ((*p)->IsHeapObject()) tracer_->MarkRecursively(p, this);
+    }
+  }
+
+ private:
+  PathTracer* tracer_;
+};
+
+
+class PathTracer::UnmarkVisitor : public ObjectVisitor {
+ public:
+  explicit UnmarkVisitor(PathTracer* tracer) : tracer_(tracer) {}
+  void VisitPointers(Object** start, Object** end) {
+    // Scan all HeapObject pointers in [start, end)
+    for (Object** p = start; p < end; p++) {
+      if ((*p)->IsHeapObject()) tracer_->UnmarkRecursively(p, this);
+    }
+  }
+
+ private:
+  PathTracer* tracer_;
+};
+
+
+void PathTracer::VisitPointers(Object** start, Object** end) {
+  bool done = ((what_to_find_ == FIND_FIRST) && found_target_);
+  // Visit all HeapObject pointers in [start, end)
+  for (Object** p = start; !done && (p < end); p++) {
+    if ((*p)->IsHeapObject()) {
+      TracePathFrom(p);
+      done = ((what_to_find_ == FIND_FIRST) && found_target_);
+    }
+  }
+}
+
+
+void PathTracer::Reset() {
+  found_target_ = false;
+  object_stack_.Clear();
+}
+
+
+void PathTracer::TracePathFrom(Object** root) {
+  DCHECK((search_target_ == kAnyGlobalObject) ||
+         search_target_->IsHeapObject());
+  found_target_in_trace_ = false;
+  Reset();
+
+  MarkVisitor mark_visitor(this);
+  MarkRecursively(root, &mark_visitor);
+
+  UnmarkVisitor unmark_visitor(this);
+  UnmarkRecursively(root, &unmark_visitor);
+
+  ProcessResults();
+}
+
+
+static bool SafeIsNativeContext(HeapObject* obj) {
+  return obj->map() == obj->GetHeap()->raw_unchecked_native_context_map();
+}
+
+
+void PathTracer::MarkRecursively(Object** p, MarkVisitor* mark_visitor) {
+  if (!(*p)->IsHeapObject()) return;
+
+  HeapObject* obj = HeapObject::cast(*p);
+
+  MapWord map_word = obj->map_word();
+  if (!map_word.ToMap()->IsHeapObject()) return;  // visited before
+
+  if (found_target_in_trace_) return;  // stop if target found
+  object_stack_.Add(obj);
+  if (((search_target_ == kAnyGlobalObject) && obj->IsJSGlobalObject()) ||
+      (obj == search_target_)) {
+    found_target_in_trace_ = true;
+    found_target_ = true;
+    return;
+  }
+
+  bool is_native_context = SafeIsNativeContext(obj);
+
+  // not visited yet
+  Map* map = Map::cast(map_word.ToMap());
+
+  MapWord marked_map_word =
+      MapWord::FromRawValue(obj->map_word().ToRawValue() + kMarkTag);
+  obj->set_map_word(marked_map_word);
+
+  // Scan the object body.
+  if (is_native_context && (visit_mode_ == VISIT_ONLY_STRONG)) {
+    // This is specialized to scan Context's properly.
+    Object** start =
+        reinterpret_cast<Object**>(obj->address() + Context::kHeaderSize);
+    Object** end =
+        reinterpret_cast<Object**>(obj->address() + Context::kHeaderSize +
+                                   Context::FIRST_WEAK_SLOT * kPointerSize);
+    mark_visitor->VisitPointers(start, end);
+  } else {
+    obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), mark_visitor);
+  }
+
+  // Scan the map after the body because the body is a lot more interesting
+  // when doing leak detection.
+  MarkRecursively(reinterpret_cast<Object**>(&map), mark_visitor);
+
+  if (!found_target_in_trace_) {  // don't pop if found the target
+    object_stack_.RemoveLast();
+  }
+}
+
+
+void PathTracer::UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor) {
+  if (!(*p)->IsHeapObject()) return;
+
+  HeapObject* obj = HeapObject::cast(*p);
+
+  MapWord map_word = obj->map_word();
+  if (map_word.ToMap()->IsHeapObject()) return;  // unmarked already
+
+  MapWord unmarked_map_word =
+      MapWord::FromRawValue(map_word.ToRawValue() - kMarkTag);
+  obj->set_map_word(unmarked_map_word);
+
+  Map* map = Map::cast(unmarked_map_word.ToMap());
+
+  UnmarkRecursively(reinterpret_cast<Object**>(&map), unmark_visitor);
+
+  obj->IterateBody(map->instance_type(), obj->SizeFromMap(map), unmark_visitor);
+}
+
+
+void PathTracer::ProcessResults() {
+  if (found_target_) {
+    OFStream os(stdout);
+    os << "=====================================\n"
+       << "====        Path to object       ====\n"
+       << "=====================================\n\n";
+
+    DCHECK(!object_stack_.is_empty());
+    for (int i = 0; i < object_stack_.length(); i++) {
+      if (i > 0) os << "\n     |\n     |\n     V\n\n";
+      object_stack_[i]->Print(os);
+    }
+    os << "=====================================\n";
+  }
+}
+
+
+// Triggers a depth-first traversal of reachable objects from one
+// given root object and finds a path to a specific heap object and
+// prints it.
+void Heap::TracePathToObjectFrom(Object* target, Object* root) {
+  PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
+  tracer.VisitPointer(&root);
+}
+
+
+// Triggers a depth-first traversal of reachable objects from roots
+// and finds a path to a specific heap object and prints it.
+void Heap::TracePathToObject(Object* target) {
+  PathTracer tracer(target, PathTracer::FIND_ALL, VISIT_ALL);
+  IterateRoots(&tracer, VISIT_ONLY_STRONG);
+}
+
+
+// Triggers a depth-first traversal of reachable objects from roots
+// and finds a path to any global object and prints it. Useful for
+// determining the source for leaks of global objects.
+void Heap::TracePathToGlobal() {
+  PathTracer tracer(PathTracer::kAnyGlobalObject, PathTracer::FIND_ALL,
+                    VISIT_ALL);
+  IterateRoots(&tracer, VISIT_ONLY_STRONG);
+}
+#endif
+
+
+void Heap::UpdateCumulativeGCStatistics(double duration,
+                                        double spent_in_mutator,
+                                        double marking_time) {
+  if (FLAG_print_cumulative_gc_stat) {
+    total_gc_time_ms_ += duration;
+    max_gc_pause_ = Max(max_gc_pause_, duration);
+    max_alive_after_gc_ = Max(max_alive_after_gc_, SizeOfObjects());
+    min_in_mutator_ = Min(min_in_mutator_, spent_in_mutator);
+  } else if (FLAG_trace_gc_verbose) {
+    total_gc_time_ms_ += duration;
+  }
+
+  marking_time_ += marking_time;
+}
+
+
+int KeyedLookupCache::Hash(Handle<Map> map, Handle<Name> name) {
+  DisallowHeapAllocation no_gc;
+  // Uses only lower 32 bits if pointers are larger.
+  uintptr_t addr_hash =
+      static_cast<uint32_t>(reinterpret_cast<uintptr_t>(*map)) >> kMapHashShift;
+  return static_cast<uint32_t>((addr_hash ^ name->Hash()) & kCapacityMask);
+}
+
+
+int KeyedLookupCache::Lookup(Handle<Map> map, Handle<Name> name) {
+  DisallowHeapAllocation no_gc;
+  int index = (Hash(map, name) & kHashMask);
+  for (int i = 0; i < kEntriesPerBucket; i++) {
+    Key& key = keys_[index + i];
+    if ((key.map == *map) && key.name->Equals(*name)) {
+      return field_offsets_[index + i];
+    }
+  }
+  return kNotFound;
+}
+
+
+void KeyedLookupCache::Update(Handle<Map> map, Handle<Name> name,
+                              int field_offset) {
+  DisallowHeapAllocation no_gc;
+  if (!name->IsUniqueName()) {
+    if (!StringTable::InternalizeStringIfExists(
+             name->GetIsolate(), Handle<String>::cast(name)).ToHandle(&name)) {
+      return;
+    }
+  }
+  // This cache is cleared only between mark compact passes, so we expect the
+  // cache to only contain old space names.
+  DCHECK(!map->GetIsolate()->heap()->InNewSpace(*name));
+
+  int index = (Hash(map, name) & kHashMask);
+  // After a GC there will be free slots, so we use them in order (this may
+  // help to get the most frequently used one in position 0).
+  for (int i = 0; i < kEntriesPerBucket; i++) {
+    Key& key = keys_[index];
+    Object* free_entry_indicator = NULL;
+    if (key.map == free_entry_indicator) {
+      key.map = *map;
+      key.name = *name;
+      field_offsets_[index + i] = field_offset;
+      return;
+    }
+  }
+  // No free entry found in this bucket, so we move them all down one and
+  // put the new entry at position zero.
+  for (int i = kEntriesPerBucket - 1; i > 0; i--) {
+    Key& key = keys_[index + i];
+    Key& key2 = keys_[index + i - 1];
+    key = key2;
+    field_offsets_[index + i] = field_offsets_[index + i - 1];
+  }
+
+  // Write the new first entry.
+  Key& key = keys_[index];
+  key.map = *map;
+  key.name = *name;
+  field_offsets_[index] = field_offset;
+}
+
+
+void KeyedLookupCache::Clear() {
+  for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
+}
+
+
+void DescriptorLookupCache::Clear() {
+  for (int index = 0; index < kLength; index++) keys_[index].source = NULL;
+}
+
+
+void ExternalStringTable::CleanUp() {
+  int last = 0;
+  for (int i = 0; i < new_space_strings_.length(); ++i) {
+    if (new_space_strings_[i] == heap_->the_hole_value()) {
+      continue;
+    }
+    DCHECK(new_space_strings_[i]->IsExternalString());
+    if (heap_->InNewSpace(new_space_strings_[i])) {
+      new_space_strings_[last++] = new_space_strings_[i];
+    } else {
+      old_space_strings_.Add(new_space_strings_[i]);
+    }
+  }
+  new_space_strings_.Rewind(last);
+  new_space_strings_.Trim();
+
+  last = 0;
+  for (int i = 0; i < old_space_strings_.length(); ++i) {
+    if (old_space_strings_[i] == heap_->the_hole_value()) {
+      continue;
+    }
+    DCHECK(old_space_strings_[i]->IsExternalString());
+    DCHECK(!heap_->InNewSpace(old_space_strings_[i]));
+    old_space_strings_[last++] = old_space_strings_[i];
+  }
+  old_space_strings_.Rewind(last);
+  old_space_strings_.Trim();
+#ifdef VERIFY_HEAP
+  if (FLAG_verify_heap) {
+    Verify();
+  }
+#endif
+}
+
+
+void ExternalStringTable::TearDown() {
+  for (int i = 0; i < new_space_strings_.length(); ++i) {
+    heap_->FinalizeExternalString(ExternalString::cast(new_space_strings_[i]));
+  }
+  new_space_strings_.Free();
+  for (int i = 0; i < old_space_strings_.length(); ++i) {
+    heap_->FinalizeExternalString(ExternalString::cast(old_space_strings_[i]));
+  }
+  old_space_strings_.Free();
+}
+
+
+void Heap::QueueMemoryChunkForFree(MemoryChunk* chunk) {
+  chunk->set_next_chunk(chunks_queued_for_free_);
+  chunks_queued_for_free_ = chunk;
+}
+
+
+void Heap::FreeQueuedChunks() {
+  if (chunks_queued_for_free_ == NULL) return;
+  MemoryChunk* next;
+  MemoryChunk* chunk;
+  for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
+    next = chunk->next_chunk();
+    chunk->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
+
+    if (chunk->owner()->identity() == LO_SPACE) {
+      // StoreBuffer::Filter relies on MemoryChunk::FromAnyPointerAddress.
+      // If FromAnyPointerAddress encounters a slot that belongs to a large
+      // chunk queued for deletion it will fail to find the chunk because
+      // it try to perform a search in the list of pages owned by of the large
+      // object space and queued chunks were detached from that list.
+      // To work around this we split large chunk into normal kPageSize aligned
+      // pieces and initialize size, owner and flags field of every piece.
+      // If FromAnyPointerAddress encounters a slot that belongs to one of
+      // these smaller pieces it will treat it as a slot on a normal Page.
+      Address chunk_end = chunk->address() + chunk->size();
+      MemoryChunk* inner =
+          MemoryChunk::FromAddress(chunk->address() + Page::kPageSize);
+      MemoryChunk* inner_last = MemoryChunk::FromAddress(chunk_end - 1);
+      while (inner <= inner_last) {
+        // Size of a large chunk is always a multiple of
+        // OS::AllocateAlignment() so there is always
+        // enough space for a fake MemoryChunk header.
+        Address area_end = Min(inner->address() + Page::kPageSize, chunk_end);
+        // Guard against overflow.
+        if (area_end < inner->address()) area_end = chunk_end;
+        inner->SetArea(inner->address(), area_end);
+        inner->set_size(Page::kPageSize);
+        inner->set_owner(lo_space());
+        inner->SetFlag(MemoryChunk::ABOUT_TO_BE_FREED);
+        inner = MemoryChunk::FromAddress(inner->address() + Page::kPageSize);
+      }
+    }
+  }
+  isolate_->heap()->store_buffer()->Compact();
+  isolate_->heap()->store_buffer()->Filter(MemoryChunk::ABOUT_TO_BE_FREED);
+  for (chunk = chunks_queued_for_free_; chunk != NULL; chunk = next) {
+    next = chunk->next_chunk();
+    isolate_->memory_allocator()->Free(chunk);
+  }
+  chunks_queued_for_free_ = NULL;
+}
+
+
+void Heap::RememberUnmappedPage(Address page, bool compacted) {
+  uintptr_t p = reinterpret_cast<uintptr_t>(page);
+  // Tag the page pointer to make it findable in the dump file.
+  if (compacted) {
+    p ^= 0xc1ead & (Page::kPageSize - 1);  // Cleared.
+  } else {
+    p ^= 0x1d1ed & (Page::kPageSize - 1);  // I died.
+  }
+  remembered_unmapped_pages_[remembered_unmapped_pages_index_] =
+      reinterpret_cast<Address>(p);
+  remembered_unmapped_pages_index_++;
+  remembered_unmapped_pages_index_ %= kRememberedUnmappedPages;
+}
+
+
+void Heap::ClearObjectStats(bool clear_last_time_stats) {
+  memset(object_counts_, 0, sizeof(object_counts_));
+  memset(object_sizes_, 0, sizeof(object_sizes_));
+  if (clear_last_time_stats) {
+    memset(object_counts_last_time_, 0, sizeof(object_counts_last_time_));
+    memset(object_sizes_last_time_, 0, sizeof(object_sizes_last_time_));
+  }
+}
+
+
+static base::LazyMutex checkpoint_object_stats_mutex = LAZY_MUTEX_INITIALIZER;
+
+
+void Heap::CheckpointObjectStats() {
+  base::LockGuard<base::Mutex> lock_guard(
+      checkpoint_object_stats_mutex.Pointer());
+  Counters* counters = isolate()->counters();
+#define ADJUST_LAST_TIME_OBJECT_COUNT(name)              \
+  counters->count_of_##name()->Increment(                \
+      static_cast<int>(object_counts_[name]));           \
+  counters->count_of_##name()->Decrement(                \
+      static_cast<int>(object_counts_last_time_[name])); \
+  counters->size_of_##name()->Increment(                 \
+      static_cast<int>(object_sizes_[name]));            \
+  counters->size_of_##name()->Decrement(                 \
+      static_cast<int>(object_sizes_last_time_[name]));
+  INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
+#undef ADJUST_LAST_TIME_OBJECT_COUNT
+  int index;
+#define ADJUST_LAST_TIME_OBJECT_COUNT(name)               \
+  index = FIRST_CODE_KIND_SUB_TYPE + Code::name;          \
+  counters->count_of_CODE_TYPE_##name()->Increment(       \
+      static_cast<int>(object_counts_[index]));           \
+  counters->count_of_CODE_TYPE_##name()->Decrement(       \
+      static_cast<int>(object_counts_last_time_[index])); \
+  counters->size_of_CODE_TYPE_##name()->Increment(        \
+      static_cast<int>(object_sizes_[index]));            \
+  counters->size_of_CODE_TYPE_##name()->Decrement(        \
+      static_cast<int>(object_sizes_last_time_[index]));
+  CODE_KIND_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
+#undef ADJUST_LAST_TIME_OBJECT_COUNT
+#define ADJUST_LAST_TIME_OBJECT_COUNT(name)               \
+  index = FIRST_FIXED_ARRAY_SUB_TYPE + name;              \
+  counters->count_of_FIXED_ARRAY_##name()->Increment(     \
+      static_cast<int>(object_counts_[index]));           \
+  counters->count_of_FIXED_ARRAY_##name()->Decrement(     \
+      static_cast<int>(object_counts_last_time_[index])); \
+  counters->size_of_FIXED_ARRAY_##name()->Increment(      \
+      static_cast<int>(object_sizes_[index]));            \
+  counters->size_of_FIXED_ARRAY_##name()->Decrement(      \
+      static_cast<int>(object_sizes_last_time_[index]));
+  FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
+#undef ADJUST_LAST_TIME_OBJECT_COUNT
+#define ADJUST_LAST_TIME_OBJECT_COUNT(name)                                   \
+  index =                                                                     \
+      FIRST_CODE_AGE_SUB_TYPE + Code::k##name##CodeAge - Code::kFirstCodeAge; \
+  counters->count_of_CODE_AGE_##name()->Increment(                            \
+      static_cast<int>(object_counts_[index]));                               \
+  counters->count_of_CODE_AGE_##name()->Decrement(                            \
+      static_cast<int>(object_counts_last_time_[index]));                     \
+  counters->size_of_CODE_AGE_##name()->Increment(                             \
+      static_cast<int>(object_sizes_[index]));                                \
+  counters->size_of_CODE_AGE_##name()->Decrement(                             \
+      static_cast<int>(object_sizes_last_time_[index]));
+  CODE_AGE_LIST_COMPLETE(ADJUST_LAST_TIME_OBJECT_COUNT)
+#undef ADJUST_LAST_TIME_OBJECT_COUNT
+
+  MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
+  MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
+  ClearObjectStats();
+}
+}
+}  // namespace v8::internal
diff --git a/src/heap/heap.gyp b/src/heap/heap.gyp
new file mode 100644
index 0000000..2970eb8
--- /dev/null
+++ b/src/heap/heap.gyp
@@ -0,0 +1,52 @@
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+  'variables': {
+    'v8_code': 1,
+  },
+  'includes': ['../../build/toolchain.gypi', '../../build/features.gypi'],
+  'targets': [
+    {
+      'target_name': 'heap-unittests',
+      'type': 'executable',
+      'dependencies': [
+        '../../testing/gtest.gyp:gtest',
+        '../../testing/gtest.gyp:gtest_main',
+        '../../tools/gyp/v8.gyp:v8_libplatform',
+      ],
+      'include_dirs': [
+        '../..',
+      ],
+      'sources': [  ### gcmole(all) ###
+        'gc-idle-time-handler-unittest.cc',
+      ],
+      'conditions': [
+        ['component=="shared_library"', {
+          # heap-unittests can't be built against a shared library, so we
+          # need to depend on the underlying static target in that case.
+          'conditions': [
+            ['v8_use_snapshot=="true"', {
+              'dependencies': ['../../tools/gyp/v8.gyp:v8_snapshot'],
+            },
+            {
+              'dependencies': [
+                '../../tools/gyp/v8.gyp:v8_nosnapshot',
+              ],
+            }],
+          ],
+        }, {
+          'dependencies': ['../../tools/gyp/v8.gyp:v8'],
+        }],
+        ['os_posix == 1', {
+          # TODO(svenpanne): This is a temporary work-around to fix the warnings
+          # that show up because we use -std=gnu++0x instead of -std=c++11.
+          'cflags!': [
+            '-pedantic',
+          ],
+        }],
+      ],
+    },
+  ],
+}
diff --git a/src/heap/heap.h b/src/heap/heap.h
new file mode 100644
index 0000000..c9d0f31
--- /dev/null
+++ b/src/heap/heap.h
@@ -0,0 +1,2503 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_HEAP_H_
+#define V8_HEAP_HEAP_H_
+
+#include <cmath>
+
+#include "src/allocation.h"
+#include "src/assert-scope.h"
+#include "src/counters.h"
+#include "src/globals.h"
+#include "src/heap/gc-idle-time-handler.h"
+#include "src/heap/gc-tracer.h"
+#include "src/heap/incremental-marking.h"
+#include "src/heap/mark-compact.h"
+#include "src/heap/objects-visiting.h"
+#include "src/heap/spaces.h"
+#include "src/heap/store-buffer.h"
+#include "src/list.h"
+#include "src/splay-tree-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// Defines all the roots in Heap.
+#define STRONG_ROOT_LIST(V)                                                    \
+  V(Map, byte_array_map, ByteArrayMap)                                         \
+  V(Map, free_space_map, FreeSpaceMap)                                         \
+  V(Map, one_pointer_filler_map, OnePointerFillerMap)                          \
+  V(Map, two_pointer_filler_map, TwoPointerFillerMap)                          \
+  /* Cluster the most popular ones in a few cache lines here at the top.    */ \
+  V(Smi, store_buffer_top, StoreBufferTop)                                     \
+  V(Oddball, undefined_value, UndefinedValue)                                  \
+  V(Oddball, the_hole_value, TheHoleValue)                                     \
+  V(Oddball, null_value, NullValue)                                            \
+  V(Oddball, true_value, TrueValue)                                            \
+  V(Oddball, false_value, FalseValue)                                          \
+  V(Oddball, uninitialized_value, UninitializedValue)                          \
+  V(Oddball, exception, Exception)                                             \
+  V(Map, cell_map, CellMap)                                                    \
+  V(Map, global_property_cell_map, GlobalPropertyCellMap)                      \
+  V(Map, shared_function_info_map, SharedFunctionInfoMap)                      \
+  V(Map, meta_map, MetaMap)                                                    \
+  V(Map, heap_number_map, HeapNumberMap)                                       \
+  V(Map, mutable_heap_number_map, MutableHeapNumberMap)                        \
+  V(Map, native_context_map, NativeContextMap)                                 \
+  V(Map, fixed_array_map, FixedArrayMap)                                       \
+  V(Map, code_map, CodeMap)                                                    \
+  V(Map, scope_info_map, ScopeInfoMap)                                         \
+  V(Map, fixed_cow_array_map, FixedCOWArrayMap)                                \
+  V(Map, fixed_double_array_map, FixedDoubleArrayMap)                          \
+  V(Map, constant_pool_array_map, ConstantPoolArrayMap)                        \
+  V(Oddball, no_interceptor_result_sentinel, NoInterceptorResultSentinel)      \
+  V(Map, hash_table_map, HashTableMap)                                         \
+  V(Map, ordered_hash_table_map, OrderedHashTableMap)                          \
+  V(FixedArray, empty_fixed_array, EmptyFixedArray)                            \
+  V(ByteArray, empty_byte_array, EmptyByteArray)                               \
+  V(DescriptorArray, empty_descriptor_array, EmptyDescriptorArray)             \
+  V(ConstantPoolArray, empty_constant_pool_array, EmptyConstantPoolArray)      \
+  V(Oddball, arguments_marker, ArgumentsMarker)                                \
+  /* The roots above this line should be boring from a GC point of view.    */ \
+  /* This means they are never in new space and never on a page that is     */ \
+  /* being compacted.                                                       */ \
+  V(FixedArray, number_string_cache, NumberStringCache)                        \
+  V(Object, instanceof_cache_function, InstanceofCacheFunction)                \
+  V(Object, instanceof_cache_map, InstanceofCacheMap)                          \
+  V(Object, instanceof_cache_answer, InstanceofCacheAnswer)                    \
+  V(FixedArray, single_character_string_cache, SingleCharacterStringCache)     \
+  V(FixedArray, string_split_cache, StringSplitCache)                          \
+  V(FixedArray, regexp_multiple_cache, RegExpMultipleCache)                    \
+  V(Oddball, termination_exception, TerminationException)                      \
+  V(Smi, hash_seed, HashSeed)                                                  \
+  V(Map, symbol_map, SymbolMap)                                                \
+  V(Map, string_map, StringMap)                                                \
+  V(Map, one_byte_string_map, OneByteStringMap)                                \
+  V(Map, cons_string_map, ConsStringMap)                                       \
+  V(Map, cons_one_byte_string_map, ConsOneByteStringMap)                       \
+  V(Map, sliced_string_map, SlicedStringMap)                                   \
+  V(Map, sliced_one_byte_string_map, SlicedOneByteStringMap)                   \
+  V(Map, external_string_map, ExternalStringMap)                               \
+  V(Map, external_string_with_one_byte_data_map,                               \
+    ExternalStringWithOneByteDataMap)                                          \
+  V(Map, external_one_byte_string_map, ExternalOneByteStringMap)               \
+  V(Map, short_external_string_map, ShortExternalStringMap)                    \
+  V(Map, short_external_string_with_one_byte_data_map,                         \
+    ShortExternalStringWithOneByteDataMap)                                     \
+  V(Map, internalized_string_map, InternalizedStringMap)                       \
+  V(Map, one_byte_internalized_string_map, OneByteInternalizedStringMap)       \
+  V(Map, external_internalized_string_map, ExternalInternalizedStringMap)      \
+  V(Map, external_internalized_string_with_one_byte_data_map,                  \
+    ExternalInternalizedStringWithOneByteDataMap)                              \
+  V(Map, external_one_byte_internalized_string_map,                            \
+    ExternalOneByteInternalizedStringMap)                                      \
+  V(Map, short_external_internalized_string_map,                               \
+    ShortExternalInternalizedStringMap)                                        \
+  V(Map, short_external_internalized_string_with_one_byte_data_map,            \
+    ShortExternalInternalizedStringWithOneByteDataMap)                         \
+  V(Map, short_external_one_byte_internalized_string_map,                      \
+    ShortExternalOneByteInternalizedStringMap)                                 \
+  V(Map, short_external_one_byte_string_map, ShortExternalOneByteStringMap)    \
+  V(Map, undetectable_string_map, UndetectableStringMap)                       \
+  V(Map, undetectable_one_byte_string_map, UndetectableOneByteStringMap)       \
+  V(Map, external_int8_array_map, ExternalInt8ArrayMap)                        \
+  V(Map, external_uint8_array_map, ExternalUint8ArrayMap)                      \
+  V(Map, external_int16_array_map, ExternalInt16ArrayMap)                      \
+  V(Map, external_uint16_array_map, ExternalUint16ArrayMap)                    \
+  V(Map, external_int32_array_map, ExternalInt32ArrayMap)                      \
+  V(Map, external_uint32_array_map, ExternalUint32ArrayMap)                    \
+  V(Map, external_float32_array_map, ExternalFloat32ArrayMap)                  \
+  V(Map, external_float64_array_map, ExternalFloat64ArrayMap)                  \
+  V(Map, external_uint8_clamped_array_map, ExternalUint8ClampedArrayMap)       \
+  V(ExternalArray, empty_external_int8_array, EmptyExternalInt8Array)          \
+  V(ExternalArray, empty_external_uint8_array, EmptyExternalUint8Array)        \
+  V(ExternalArray, empty_external_int16_array, EmptyExternalInt16Array)        \
+  V(ExternalArray, empty_external_uint16_array, EmptyExternalUint16Array)      \
+  V(ExternalArray, empty_external_int32_array, EmptyExternalInt32Array)        \
+  V(ExternalArray, empty_external_uint32_array, EmptyExternalUint32Array)      \
+  V(ExternalArray, empty_external_float32_array, EmptyExternalFloat32Array)    \
+  V(ExternalArray, empty_external_float64_array, EmptyExternalFloat64Array)    \
+  V(ExternalArray, empty_external_uint8_clamped_array,                         \
+    EmptyExternalUint8ClampedArray)                                            \
+  V(Map, fixed_uint8_array_map, FixedUint8ArrayMap)                            \
+  V(Map, fixed_int8_array_map, FixedInt8ArrayMap)                              \
+  V(Map, fixed_uint16_array_map, FixedUint16ArrayMap)                          \
+  V(Map, fixed_int16_array_map, FixedInt16ArrayMap)                            \
+  V(Map, fixed_uint32_array_map, FixedUint32ArrayMap)                          \
+  V(Map, fixed_int32_array_map, FixedInt32ArrayMap)                            \
+  V(Map, fixed_float32_array_map, FixedFloat32ArrayMap)                        \
+  V(Map, fixed_float64_array_map, FixedFloat64ArrayMap)                        \
+  V(Map, fixed_uint8_clamped_array_map, FixedUint8ClampedArrayMap)             \
+  V(FixedTypedArrayBase, empty_fixed_uint8_array, EmptyFixedUint8Array)        \
+  V(FixedTypedArrayBase, empty_fixed_int8_array, EmptyFixedInt8Array)          \
+  V(FixedTypedArrayBase, empty_fixed_uint16_array, EmptyFixedUint16Array)      \
+  V(FixedTypedArrayBase, empty_fixed_int16_array, EmptyFixedInt16Array)        \
+  V(FixedTypedArrayBase, empty_fixed_uint32_array, EmptyFixedUint32Array)      \
+  V(FixedTypedArrayBase, empty_fixed_int32_array, EmptyFixedInt32Array)        \
+  V(FixedTypedArrayBase, empty_fixed_float32_array, EmptyFixedFloat32Array)    \
+  V(FixedTypedArrayBase, empty_fixed_float64_array, EmptyFixedFloat64Array)    \
+  V(FixedTypedArrayBase, empty_fixed_uint8_clamped_array,                      \
+    EmptyFixedUint8ClampedArray)                                               \
+  V(Map, sloppy_arguments_elements_map, SloppyArgumentsElementsMap)            \
+  V(Map, function_context_map, FunctionContextMap)                             \
+  V(Map, catch_context_map, CatchContextMap)                                   \
+  V(Map, with_context_map, WithContextMap)                                     \
+  V(Map, block_context_map, BlockContextMap)                                   \
+  V(Map, module_context_map, ModuleContextMap)                                 \
+  V(Map, global_context_map, GlobalContextMap)                                 \
+  V(Map, undefined_map, UndefinedMap)                                          \
+  V(Map, the_hole_map, TheHoleMap)                                             \
+  V(Map, null_map, NullMap)                                                    \
+  V(Map, boolean_map, BooleanMap)                                              \
+  V(Map, uninitialized_map, UninitializedMap)                                  \
+  V(Map, arguments_marker_map, ArgumentsMarkerMap)                             \
+  V(Map, no_interceptor_result_sentinel_map, NoInterceptorResultSentinelMap)   \
+  V(Map, exception_map, ExceptionMap)                                          \
+  V(Map, termination_exception_map, TerminationExceptionMap)                   \
+  V(Map, message_object_map, JSMessageObjectMap)                               \
+  V(Map, foreign_map, ForeignMap)                                              \
+  V(HeapNumber, nan_value, NanValue)                                           \
+  V(HeapNumber, infinity_value, InfinityValue)                                 \
+  V(HeapNumber, minus_zero_value, MinusZeroValue)                              \
+  V(Map, neander_map, NeanderMap)                                              \
+  V(JSObject, message_listeners, MessageListeners)                             \
+  V(UnseededNumberDictionary, code_stubs, CodeStubs)                           \
+  V(UnseededNumberDictionary, non_monomorphic_cache, NonMonomorphicCache)      \
+  V(PolymorphicCodeCache, polymorphic_code_cache, PolymorphicCodeCache)        \
+  V(Code, js_entry_code, JsEntryCode)                                          \
+  V(Code, js_construct_entry_code, JsConstructEntryCode)                       \
+  V(FixedArray, natives_source_cache, NativesSourceCache)                      \
+  V(Script, empty_script, EmptyScript)                                         \
+  V(NameDictionary, intrinsic_function_names, IntrinsicFunctionNames)          \
+  V(Cell, undefined_cell, UndefineCell)                                        \
+  V(JSObject, observation_state, ObservationState)                             \
+  V(Map, external_map, ExternalMap)                                            \
+  V(Object, symbol_registry, SymbolRegistry)                                   \
+  V(Symbol, frozen_symbol, FrozenSymbol)                                       \
+  V(Symbol, nonexistent_symbol, NonExistentSymbol)                             \
+  V(Symbol, elements_transition_symbol, ElementsTransitionSymbol)              \
+  V(SeededNumberDictionary, empty_slow_element_dictionary,                     \
+    EmptySlowElementDictionary)                                                \
+  V(Symbol, observed_symbol, ObservedSymbol)                                   \
+  V(Symbol, uninitialized_symbol, UninitializedSymbol)                         \
+  V(Symbol, megamorphic_symbol, MegamorphicSymbol)                             \
+  V(Symbol, premonomorphic_symbol, PremonomorphicSymbol)                       \
+  V(Symbol, generic_symbol, GenericSymbol)                                     \
+  V(Symbol, stack_trace_symbol, StackTraceSymbol)                              \
+  V(Symbol, detailed_stack_trace_symbol, DetailedStackTraceSymbol)             \
+  V(Symbol, normal_ic_symbol, NormalICSymbol)                                  \
+  V(Symbol, home_object_symbol, HomeObjectSymbol)                              \
+  V(FixedArray, materialized_objects, MaterializedObjects)                     \
+  V(FixedArray, allocation_sites_scratchpad, AllocationSitesScratchpad)        \
+  V(FixedArray, microtask_queue, MicrotaskQueue)
+
+// Entries in this list are limited to Smis and are not visited during GC.
+#define SMI_ROOT_LIST(V)                                                   \
+  V(Smi, stack_limit, StackLimit)                                          \
+  V(Smi, real_stack_limit, RealStackLimit)                                 \
+  V(Smi, last_script_id, LastScriptId)                                     \
+  V(Smi, arguments_adaptor_deopt_pc_offset, ArgumentsAdaptorDeoptPCOffset) \
+  V(Smi, construct_stub_deopt_pc_offset, ConstructStubDeoptPCOffset)       \
+  V(Smi, getter_stub_deopt_pc_offset, GetterStubDeoptPCOffset)             \
+  V(Smi, setter_stub_deopt_pc_offset, SetterStubDeoptPCOffset)
+
+#define ROOT_LIST(V)  \
+  STRONG_ROOT_LIST(V) \
+  SMI_ROOT_LIST(V)    \
+  V(StringTable, string_table, StringTable)
+
+// Heap roots that are known to be immortal immovable, for which we can safely
+// skip write barriers.
+#define IMMORTAL_IMMOVABLE_ROOT_LIST(V) \
+  V(byte_array_map)                     \
+  V(free_space_map)                     \
+  V(one_pointer_filler_map)             \
+  V(two_pointer_filler_map)             \
+  V(undefined_value)                    \
+  V(the_hole_value)                     \
+  V(null_value)                         \
+  V(true_value)                         \
+  V(false_value)                        \
+  V(uninitialized_value)                \
+  V(cell_map)                           \
+  V(global_property_cell_map)           \
+  V(shared_function_info_map)           \
+  V(meta_map)                           \
+  V(heap_number_map)                    \
+  V(mutable_heap_number_map)            \
+  V(native_context_map)                 \
+  V(fixed_array_map)                    \
+  V(code_map)                           \
+  V(scope_info_map)                     \
+  V(fixed_cow_array_map)                \
+  V(fixed_double_array_map)             \
+  V(constant_pool_array_map)            \
+  V(no_interceptor_result_sentinel)     \
+  V(hash_table_map)                     \
+  V(ordered_hash_table_map)             \
+  V(empty_fixed_array)                  \
+  V(empty_byte_array)                   \
+  V(empty_descriptor_array)             \
+  V(empty_constant_pool_array)          \
+  V(arguments_marker)                   \
+  V(symbol_map)                         \
+  V(sloppy_arguments_elements_map)      \
+  V(function_context_map)               \
+  V(catch_context_map)                  \
+  V(with_context_map)                   \
+  V(block_context_map)                  \
+  V(module_context_map)                 \
+  V(global_context_map)                 \
+  V(undefined_map)                      \
+  V(the_hole_map)                       \
+  V(null_map)                           \
+  V(boolean_map)                        \
+  V(uninitialized_map)                  \
+  V(message_object_map)                 \
+  V(foreign_map)                        \
+  V(neander_map)
+
+#define INTERNALIZED_STRING_LIST(V)                                \
+  V(Object_string, "Object")                                       \
+  V(proto_string, "__proto__")                                     \
+  V(arguments_string, "arguments")                                 \
+  V(Arguments_string, "Arguments")                                 \
+  V(caller_string, "caller")                                       \
+  V(boolean_string, "boolean")                                     \
+  V(Boolean_string, "Boolean")                                     \
+  V(callee_string, "callee")                                       \
+  V(constructor_string, "constructor")                             \
+  V(dot_result_string, ".result")                                  \
+  V(dot_for_string, ".for.")                                       \
+  V(eval_string, "eval")                                           \
+  V(empty_string, "")                                              \
+  V(function_string, "function")                                   \
+  V(Function_string, "Function")                                   \
+  V(length_string, "length")                                       \
+  V(name_string, "name")                                           \
+  V(null_string, "null")                                           \
+  V(number_string, "number")                                       \
+  V(Number_string, "Number")                                       \
+  V(nan_string, "NaN")                                             \
+  V(source_string, "source")                                       \
+  V(source_url_string, "source_url")                               \
+  V(source_mapping_url_string, "source_mapping_url")               \
+  V(global_string, "global")                                       \
+  V(ignore_case_string, "ignoreCase")                              \
+  V(multiline_string, "multiline")                                 \
+  V(sticky_string, "sticky")                                       \
+  V(harmony_regexps_string, "harmony_regexps")                     \
+  V(input_string, "input")                                         \
+  V(index_string, "index")                                         \
+  V(last_index_string, "lastIndex")                                \
+  V(object_string, "object")                                       \
+  V(prototype_string, "prototype")                                 \
+  V(string_string, "string")                                       \
+  V(String_string, "String")                                       \
+  V(symbol_string, "symbol")                                       \
+  V(Symbol_string, "Symbol")                                       \
+  V(Map_string, "Map")                                             \
+  V(Set_string, "Set")                                             \
+  V(WeakMap_string, "WeakMap")                                     \
+  V(WeakSet_string, "WeakSet")                                     \
+  V(for_string, "for")                                             \
+  V(for_api_string, "for_api")                                     \
+  V(for_intern_string, "for_intern")                               \
+  V(private_api_string, "private_api")                             \
+  V(private_intern_string, "private_intern")                       \
+  V(Date_string, "Date")                                           \
+  V(char_at_string, "CharAt")                                      \
+  V(undefined_string, "undefined")                                 \
+  V(value_of_string, "valueOf")                                    \
+  V(stack_string, "stack")                                         \
+  V(toJSON_string, "toJSON")                                       \
+  V(KeyedLoadMonomorphic_string, "KeyedLoadMonomorphic")           \
+  V(KeyedStoreMonomorphic_string, "KeyedStoreMonomorphic")         \
+  V(stack_overflow_string, "kStackOverflowBoilerplate")            \
+  V(illegal_access_string, "illegal access")                       \
+  V(cell_value_string, "%cell_value")                              \
+  V(illegal_argument_string, "illegal argument")                   \
+  V(identity_hash_string, "v8::IdentityHash")                      \
+  V(closure_string, "(closure)")                                   \
+  V(dot_string, ".")                                               \
+  V(compare_ic_string, "==")                                       \
+  V(strict_compare_ic_string, "===")                               \
+  V(infinity_string, "Infinity")                                   \
+  V(minus_infinity_string, "-Infinity")                            \
+  V(query_colon_string, "(?:)")                                    \
+  V(Generator_string, "Generator")                                 \
+  V(throw_string, "throw")                                         \
+  V(done_string, "done")                                           \
+  V(value_string, "value")                                         \
+  V(next_string, "next")                                           \
+  V(byte_length_string, "byteLength")                              \
+  V(byte_offset_string, "byteOffset")                              \
+  V(intl_initialized_marker_string, "v8::intl_initialized_marker") \
+  V(intl_impl_object_string, "v8::intl_object")
+
+// Forward declarations.
+class HeapStats;
+class Isolate;
+class WeakObjectRetainer;
+
+
+typedef String* (*ExternalStringTableUpdaterCallback)(Heap* heap,
+                                                      Object** pointer);
+
+class StoreBufferRebuilder {
+ public:
+  explicit StoreBufferRebuilder(StoreBuffer* store_buffer)
+      : store_buffer_(store_buffer) {}
+
+  void Callback(MemoryChunk* page, StoreBufferEvent event);
+
+ private:
+  StoreBuffer* store_buffer_;
+
+  // We record in this variable how full the store buffer was when we started
+  // iterating over the current page, finding pointers to new space.  If the
+  // store buffer overflows again we can exempt the page from the store buffer
+  // by rewinding to this point instead of having to search the store buffer.
+  Object*** start_of_current_page_;
+  // The current page we are scanning in the store buffer iterator.
+  MemoryChunk* current_page_;
+};
+
+
+// A queue of objects promoted during scavenge. Each object is accompanied
+// by it's size to avoid dereferencing a map pointer for scanning.
+class PromotionQueue {
+ public:
+  explicit PromotionQueue(Heap* heap)
+      : front_(NULL),
+        rear_(NULL),
+        limit_(NULL),
+        emergency_stack_(0),
+        heap_(heap) {}
+
+  void Initialize();
+
+  void Destroy() {
+    DCHECK(is_empty());
+    delete emergency_stack_;
+    emergency_stack_ = NULL;
+  }
+
+  Page* GetHeadPage() {
+    return Page::FromAllocationTop(reinterpret_cast<Address>(rear_));
+  }
+
+  void SetNewLimit(Address limit) {
+    limit_ = reinterpret_cast<intptr_t*>(limit);
+
+    if (limit_ <= rear_) {
+      return;
+    }
+
+    RelocateQueueHead();
+  }
+
+  bool IsBelowPromotionQueue(Address to_space_top) {
+    // If the given to-space top pointer and the head of the promotion queue
+    // are not on the same page, then the to-space objects are below the
+    // promotion queue.
+    if (GetHeadPage() != Page::FromAddress(to_space_top)) {
+      return true;
+    }
+    // If the to space top pointer is smaller or equal than the promotion
+    // queue head, then the to-space objects are below the promotion queue.
+    return reinterpret_cast<intptr_t*>(to_space_top) <= rear_;
+  }
+
+  bool is_empty() {
+    return (front_ == rear_) &&
+           (emergency_stack_ == NULL || emergency_stack_->length() == 0);
+  }
+
+  inline void insert(HeapObject* target, int size);
+
+  void remove(HeapObject** target, int* size) {
+    DCHECK(!is_empty());
+    if (front_ == rear_) {
+      Entry e = emergency_stack_->RemoveLast();
+      *target = e.obj_;
+      *size = e.size_;
+      return;
+    }
+
+    if (NewSpacePage::IsAtStart(reinterpret_cast<Address>(front_))) {
+      NewSpacePage* front_page =
+          NewSpacePage::FromAddress(reinterpret_cast<Address>(front_));
+      DCHECK(!front_page->prev_page()->is_anchor());
+      front_ = reinterpret_cast<intptr_t*>(front_page->prev_page()->area_end());
+    }
+    *target = reinterpret_cast<HeapObject*>(*(--front_));
+    *size = static_cast<int>(*(--front_));
+    // Assert no underflow.
+    SemiSpace::AssertValidRange(reinterpret_cast<Address>(rear_),
+                                reinterpret_cast<Address>(front_));
+  }
+
+ private:
+  // The front of the queue is higher in the memory page chain than the rear.
+  intptr_t* front_;
+  intptr_t* rear_;
+  intptr_t* limit_;
+
+  static const int kEntrySizeInWords = 2;
+
+  struct Entry {
+    Entry(HeapObject* obj, int size) : obj_(obj), size_(size) {}
+
+    HeapObject* obj_;
+    int size_;
+  };
+  List<Entry>* emergency_stack_;
+
+  Heap* heap_;
+
+  void RelocateQueueHead();
+
+  DISALLOW_COPY_AND_ASSIGN(PromotionQueue);
+};
+
+
+typedef void (*ScavengingCallback)(Map* map, HeapObject** slot,
+                                   HeapObject* object);
+
+
+// External strings table is a place where all external strings are
+// registered.  We need to keep track of such strings to properly
+// finalize them.
+class ExternalStringTable {
+ public:
+  // Registers an external string.
+  inline void AddString(String* string);
+
+  inline void Iterate(ObjectVisitor* v);
+
+  // Restores internal invariant and gets rid of collected strings.
+  // Must be called after each Iterate() that modified the strings.
+  void CleanUp();
+
+  // Destroys all allocated memory.
+  void TearDown();
+
+ private:
+  explicit ExternalStringTable(Heap* heap) : heap_(heap) {}
+
+  friend class Heap;
+
+  inline void Verify();
+
+  inline void AddOldString(String* string);
+
+  // Notifies the table that only a prefix of the new list is valid.
+  inline void ShrinkNewStrings(int position);
+
+  // To speed up scavenge collections new space string are kept
+  // separate from old space strings.
+  List<Object*> new_space_strings_;
+  List<Object*> old_space_strings_;
+
+  Heap* heap_;
+
+  DISALLOW_COPY_AND_ASSIGN(ExternalStringTable);
+};
+
+
+enum ArrayStorageAllocationMode {
+  DONT_INITIALIZE_ARRAY_ELEMENTS,
+  INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE
+};
+
+
+class Heap {
+ public:
+  // Configure heap size in MB before setup. Return false if the heap has been
+  // set up already.
+  bool ConfigureHeap(int max_semi_space_size, int max_old_space_size,
+                     int max_executable_size, size_t code_range_size);
+  bool ConfigureHeapDefault();
+
+  // Prepares the heap, setting up memory areas that are needed in the isolate
+  // without actually creating any objects.
+  bool SetUp();
+
+  // Bootstraps the object heap with the core set of objects required to run.
+  // Returns whether it succeeded.
+  bool CreateHeapObjects();
+
+  // Destroys all memory allocated by the heap.
+  void TearDown();
+
+  // Set the stack limit in the roots_ array.  Some architectures generate
+  // code that looks here, because it is faster than loading from the static
+  // jslimit_/real_jslimit_ variable in the StackGuard.
+  void SetStackLimits();
+
+  // Returns whether SetUp has been called.
+  bool HasBeenSetUp();
+
+  // Returns the maximum amount of memory reserved for the heap.  For
+  // the young generation, we reserve 4 times the amount needed for a
+  // semi space.  The young generation consists of two semi spaces and
+  // we reserve twice the amount needed for those in order to ensure
+  // that new space can be aligned to its size.
+  intptr_t MaxReserved() {
+    return 4 * reserved_semispace_size_ + max_old_generation_size_;
+  }
+  int MaxSemiSpaceSize() { return max_semi_space_size_; }
+  int ReservedSemiSpaceSize() { return reserved_semispace_size_; }
+  int InitialSemiSpaceSize() { return initial_semispace_size_; }
+  intptr_t MaxOldGenerationSize() { return max_old_generation_size_; }
+  intptr_t MaxExecutableSize() { return max_executable_size_; }
+
+  // Returns the capacity of the heap in bytes w/o growing. Heap grows when
+  // more spaces are needed until it reaches the limit.
+  intptr_t Capacity();
+
+  // Returns the amount of memory currently committed for the heap.
+  intptr_t CommittedMemory();
+
+  // Returns the amount of executable memory currently committed for the heap.
+  intptr_t CommittedMemoryExecutable();
+
+  // Returns the amount of phyical memory currently committed for the heap.
+  size_t CommittedPhysicalMemory();
+
+  // Returns the maximum amount of memory ever committed for the heap.
+  intptr_t MaximumCommittedMemory() { return maximum_committed_; }
+
+  // Updates the maximum committed memory for the heap. Should be called
+  // whenever a space grows.
+  void UpdateMaximumCommitted();
+
+  // Returns the available bytes in space w/o growing.
+  // Heap doesn't guarantee that it can allocate an object that requires
+  // all available bytes. Check MaxHeapObjectSize() instead.
+  intptr_t Available();
+
+  // Returns of size of all objects residing in the heap.
+  intptr_t SizeOfObjects();
+
+  // Return the starting address and a mask for the new space.  And-masking an
+  // address with the mask will result in the start address of the new space
+  // for all addresses in either semispace.
+  Address NewSpaceStart() { return new_space_.start(); }
+  uintptr_t NewSpaceMask() { return new_space_.mask(); }
+  Address NewSpaceTop() { return new_space_.top(); }
+
+  NewSpace* new_space() { return &new_space_; }
+  OldSpace* old_pointer_space() { return old_pointer_space_; }
+  OldSpace* old_data_space() { return old_data_space_; }
+  OldSpace* code_space() { return code_space_; }
+  MapSpace* map_space() { return map_space_; }
+  CellSpace* cell_space() { return cell_space_; }
+  PropertyCellSpace* property_cell_space() { return property_cell_space_; }
+  LargeObjectSpace* lo_space() { return lo_space_; }
+  PagedSpace* paged_space(int idx) {
+    switch (idx) {
+      case OLD_POINTER_SPACE:
+        return old_pointer_space();
+      case OLD_DATA_SPACE:
+        return old_data_space();
+      case MAP_SPACE:
+        return map_space();
+      case CELL_SPACE:
+        return cell_space();
+      case PROPERTY_CELL_SPACE:
+        return property_cell_space();
+      case CODE_SPACE:
+        return code_space();
+      case NEW_SPACE:
+      case LO_SPACE:
+        UNREACHABLE();
+    }
+    return NULL;
+  }
+
+  bool always_allocate() { return always_allocate_scope_depth_ != 0; }
+  Address always_allocate_scope_depth_address() {
+    return reinterpret_cast<Address>(&always_allocate_scope_depth_);
+  }
+
+  Address* NewSpaceAllocationTopAddress() {
+    return new_space_.allocation_top_address();
+  }
+  Address* NewSpaceAllocationLimitAddress() {
+    return new_space_.allocation_limit_address();
+  }
+
+  Address* OldPointerSpaceAllocationTopAddress() {
+    return old_pointer_space_->allocation_top_address();
+  }
+  Address* OldPointerSpaceAllocationLimitAddress() {
+    return old_pointer_space_->allocation_limit_address();
+  }
+
+  Address* OldDataSpaceAllocationTopAddress() {
+    return old_data_space_->allocation_top_address();
+  }
+  Address* OldDataSpaceAllocationLimitAddress() {
+    return old_data_space_->allocation_limit_address();
+  }
+
+  // Returns a deep copy of the JavaScript object.
+  // Properties and elements are copied too.
+  // Optionally takes an AllocationSite to be appended in an AllocationMemento.
+  MUST_USE_RESULT AllocationResult
+      CopyJSObject(JSObject* source, AllocationSite* site = NULL);
+
+  // Clear the Instanceof cache (used when a prototype changes).
+  inline void ClearInstanceofCache();
+
+  // Iterates the whole code space to clear all ICs of the given kind.
+  void ClearAllICsByKind(Code::Kind kind);
+
+  // For use during bootup.
+  void RepairFreeListsAfterBoot();
+
+  template <typename T>
+  static inline bool IsOneByte(T t, int chars);
+
+  // Move len elements within a given array from src_index index to dst_index
+  // index.
+  void MoveElements(FixedArray* array, int dst_index, int src_index, int len);
+
+  // Sloppy mode arguments object size.
+  static const int kSloppyArgumentsObjectSize =
+      JSObject::kHeaderSize + 2 * kPointerSize;
+  // Strict mode arguments has no callee so it is smaller.
+  static const int kStrictArgumentsObjectSize =
+      JSObject::kHeaderSize + 1 * kPointerSize;
+  // Indicies for direct access into argument objects.
+  static const int kArgumentsLengthIndex = 0;
+  // callee is only valid in sloppy mode.
+  static const int kArgumentsCalleeIndex = 1;
+
+  // Finalizes an external string by deleting the associated external
+  // data and clearing the resource pointer.
+  inline void FinalizeExternalString(String* string);
+
+  // Initialize a filler object to keep the ability to iterate over the heap
+  // when introducing gaps within pages.
+  void CreateFillerObjectAt(Address addr, int size);
+
+  bool CanMoveObjectStart(HeapObject* object);
+
+  // Indicates whether live bytes adjustment is triggered from within the GC
+  // code or from mutator code.
+  enum InvocationMode { FROM_GC, FROM_MUTATOR };
+
+  // Maintain consistency of live bytes during incremental marking.
+  void AdjustLiveBytes(Address address, int by, InvocationMode mode);
+
+  // Trim the given array from the left. Note that this relocates the object
+  // start and hence is only valid if there is only a single reference to it.
+  FixedArrayBase* LeftTrimFixedArray(FixedArrayBase* obj, int elements_to_trim);
+
+  // Trim the given array from the right.
+  template<Heap::InvocationMode mode>
+  void RightTrimFixedArray(FixedArrayBase* obj, int elements_to_trim);
+
+  // Converts the given boolean condition to JavaScript boolean value.
+  inline Object* ToBoolean(bool condition);
+
+  // Performs garbage collection operation.
+  // Returns whether there is a chance that another major GC could
+  // collect more garbage.
+  inline bool CollectGarbage(
+      AllocationSpace space, const char* gc_reason = NULL,
+      const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
+
+  static const int kNoGCFlags = 0;
+  static const int kReduceMemoryFootprintMask = 1;
+  static const int kAbortIncrementalMarkingMask = 2;
+
+  // Making the heap iterable requires us to abort incremental marking.
+  static const int kMakeHeapIterableMask = kAbortIncrementalMarkingMask;
+
+  // Performs a full garbage collection.  If (flags & kMakeHeapIterableMask) is
+  // non-zero, then the slower precise sweeper is used, which leaves the heap
+  // in a state where we can iterate over the heap visiting all objects.
+  void CollectAllGarbage(
+      int flags, const char* gc_reason = NULL,
+      const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
+
+  // Last hope GC, should try to squeeze as much as possible.
+  void CollectAllAvailableGarbage(const char* gc_reason = NULL);
+
+  // Check whether the heap is currently iterable.
+  bool IsHeapIterable();
+
+  // Notify the heap that a context has been disposed.
+  int NotifyContextDisposed();
+
+  inline void increment_scan_on_scavenge_pages() {
+    scan_on_scavenge_pages_++;
+    if (FLAG_gc_verbose) {
+      PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_);
+    }
+  }
+
+  inline void decrement_scan_on_scavenge_pages() {
+    scan_on_scavenge_pages_--;
+    if (FLAG_gc_verbose) {
+      PrintF("Scan-on-scavenge pages: %d\n", scan_on_scavenge_pages_);
+    }
+  }
+
+  PromotionQueue* promotion_queue() { return &promotion_queue_; }
+
+  void AddGCPrologueCallback(v8::Isolate::GCPrologueCallback callback,
+                             GCType gc_type_filter, bool pass_isolate = true);
+  void RemoveGCPrologueCallback(v8::Isolate::GCPrologueCallback callback);
+
+  void AddGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback,
+                             GCType gc_type_filter, bool pass_isolate = true);
+  void RemoveGCEpilogueCallback(v8::Isolate::GCEpilogueCallback callback);
+
+// Heap root getters.  We have versions with and without type::cast() here.
+// You can't use type::cast during GC because the assert fails.
+// TODO(1490): Try removing the unchecked accessors, now that GC marking does
+// not corrupt the map.
+#define ROOT_ACCESSOR(type, name, camel_name)                           \
+  type* name() { return type::cast(roots_[k##camel_name##RootIndex]); } \
+  type* raw_unchecked_##name() {                                        \
+    return reinterpret_cast<type*>(roots_[k##camel_name##RootIndex]);   \
+  }
+  ROOT_LIST(ROOT_ACCESSOR)
+#undef ROOT_ACCESSOR
+
+// Utility type maps
+#define STRUCT_MAP_ACCESSOR(NAME, Name, name) \
+  Map* name##_map() { return Map::cast(roots_[k##Name##MapRootIndex]); }
+  STRUCT_LIST(STRUCT_MAP_ACCESSOR)
+#undef STRUCT_MAP_ACCESSOR
+
+#define STRING_ACCESSOR(name, str) \
+  String* name() { return String::cast(roots_[k##name##RootIndex]); }
+  INTERNALIZED_STRING_LIST(STRING_ACCESSOR)
+#undef STRING_ACCESSOR
+
+  // The hidden_string is special because it is the empty string, but does
+  // not match the empty string.
+  String* hidden_string() { return hidden_string_; }
+
+  void set_native_contexts_list(Object* object) {
+    native_contexts_list_ = object;
+  }
+  Object* native_contexts_list() const { return native_contexts_list_; }
+
+  void set_array_buffers_list(Object* object) { array_buffers_list_ = object; }
+  Object* array_buffers_list() const { return array_buffers_list_; }
+
+  void set_allocation_sites_list(Object* object) {
+    allocation_sites_list_ = object;
+  }
+  Object* allocation_sites_list() { return allocation_sites_list_; }
+
+  // Used in CreateAllocationSiteStub and the (de)serializer.
+  Object** allocation_sites_list_address() { return &allocation_sites_list_; }
+
+  Object* weak_object_to_code_table() { return weak_object_to_code_table_; }
+
+  void set_encountered_weak_collections(Object* weak_collection) {
+    encountered_weak_collections_ = weak_collection;
+  }
+  Object* encountered_weak_collections() const {
+    return encountered_weak_collections_;
+  }
+
+  // Number of mark-sweeps.
+  unsigned int ms_count() { return ms_count_; }
+
+  // Iterates over all roots in the heap.
+  void IterateRoots(ObjectVisitor* v, VisitMode mode);
+  // Iterates over all strong roots in the heap.
+  void IterateStrongRoots(ObjectVisitor* v, VisitMode mode);
+  // Iterates over entries in the smi roots list.  Only interesting to the
+  // serializer/deserializer, since GC does not care about smis.
+  void IterateSmiRoots(ObjectVisitor* v);
+  // Iterates over all the other roots in the heap.
+  void IterateWeakRoots(ObjectVisitor* v, VisitMode mode);
+
+  // Iterate pointers to from semispace of new space found in memory interval
+  // from start to end.
+  void IterateAndMarkPointersToFromSpace(Address start, Address end,
+                                         ObjectSlotCallback callback);
+
+  // Returns whether the object resides in new space.
+  inline bool InNewSpace(Object* object);
+  inline bool InNewSpace(Address address);
+  inline bool InNewSpacePage(Address address);
+  inline bool InFromSpace(Object* object);
+  inline bool InToSpace(Object* object);
+
+  // Returns whether the object resides in old pointer space.
+  inline bool InOldPointerSpace(Address address);
+  inline bool InOldPointerSpace(Object* object);
+
+  // Returns whether the object resides in old data space.
+  inline bool InOldDataSpace(Address address);
+  inline bool InOldDataSpace(Object* object);
+
+  // Checks whether an address/object in the heap (including auxiliary
+  // area and unused area).
+  bool Contains(Address addr);
+  bool Contains(HeapObject* value);
+
+  // Checks whether an address/object in a space.
+  // Currently used by tests, serialization and heap verification only.
+  bool InSpace(Address addr, AllocationSpace space);
+  bool InSpace(HeapObject* value, AllocationSpace space);
+
+  // Finds out which space an object should get promoted to based on its type.
+  inline OldSpace* TargetSpace(HeapObject* object);
+  static inline AllocationSpace TargetSpaceId(InstanceType type);
+
+  // Checks whether the given object is allowed to be migrated from it's
+  // current space into the given destination space. Used for debugging.
+  inline bool AllowedToBeMigrated(HeapObject* object, AllocationSpace dest);
+
+  // Sets the stub_cache_ (only used when expanding the dictionary).
+  void public_set_code_stubs(UnseededNumberDictionary* value) {
+    roots_[kCodeStubsRootIndex] = value;
+  }
+
+  // Support for computing object sizes for old objects during GCs. Returns
+  // a function that is guaranteed to be safe for computing object sizes in
+  // the current GC phase.
+  HeapObjectCallback GcSafeSizeOfOldObjectFunction() {
+    return gc_safe_size_of_old_object_;
+  }
+
+  // Sets the non_monomorphic_cache_ (only used when expanding the dictionary).
+  void public_set_non_monomorphic_cache(UnseededNumberDictionary* value) {
+    roots_[kNonMonomorphicCacheRootIndex] = value;
+  }
+
+  void public_set_empty_script(Script* script) {
+    roots_[kEmptyScriptRootIndex] = script;
+  }
+
+  void public_set_store_buffer_top(Address* top) {
+    roots_[kStoreBufferTopRootIndex] = reinterpret_cast<Smi*>(top);
+  }
+
+  void public_set_materialized_objects(FixedArray* objects) {
+    roots_[kMaterializedObjectsRootIndex] = objects;
+  }
+
+  // Generated code can embed this address to get access to the roots.
+  Object** roots_array_start() { return roots_; }
+
+  Address* store_buffer_top_address() {
+    return reinterpret_cast<Address*>(&roots_[kStoreBufferTopRootIndex]);
+  }
+
+#ifdef VERIFY_HEAP
+  // Verify the heap is in its normal state before or after a GC.
+  void Verify();
+
+
+  bool weak_embedded_objects_verification_enabled() {
+    return no_weak_object_verification_scope_depth_ == 0;
+  }
+#endif
+
+#ifdef DEBUG
+  void Print();
+  void PrintHandles();
+
+  void OldPointerSpaceCheckStoreBuffer();
+  void MapSpaceCheckStoreBuffer();
+  void LargeObjectSpaceCheckStoreBuffer();
+
+  // Report heap statistics.
+  void ReportHeapStatistics(const char* title);
+  void ReportCodeStatistics(const char* title);
+#endif
+
+  // Zapping is needed for verify heap, and always done in debug builds.
+  static inline bool ShouldZapGarbage() {
+#ifdef DEBUG
+    return true;
+#else
+#ifdef VERIFY_HEAP
+    return FLAG_verify_heap;
+#else
+    return false;
+#endif
+#endif
+  }
+
+  // Number of "runtime allocations" done so far.
+  uint32_t allocations_count() { return allocations_count_; }
+
+  // Returns deterministic "time" value in ms. Works only with
+  // FLAG_verify_predictable.
+  double synthetic_time() { return allocations_count_ / 2.0; }
+
+  // Print short heap statistics.
+  void PrintShortHeapStatistics();
+
+  // Write barrier support for address[offset] = o.
+  INLINE(void RecordWrite(Address address, int offset));
+
+  // Write barrier support for address[start : start + len[ = o.
+  INLINE(void RecordWrites(Address address, int start, int len));
+
+  enum HeapState { NOT_IN_GC, SCAVENGE, MARK_COMPACT };
+  inline HeapState gc_state() { return gc_state_; }
+
+  inline bool IsInGCPostProcessing() { return gc_post_processing_depth_ > 0; }
+
+#ifdef DEBUG
+  void set_allocation_timeout(int timeout) { allocation_timeout_ = timeout; }
+
+  void TracePathToObjectFrom(Object* target, Object* root);
+  void TracePathToObject(Object* target);
+  void TracePathToGlobal();
+#endif
+
+  // Callback function passed to Heap::Iterate etc.  Copies an object if
+  // necessary, the object might be promoted to an old space.  The caller must
+  // ensure the precondition that the object is (a) a heap object and (b) in
+  // the heap's from space.
+  static inline void ScavengePointer(HeapObject** p);
+  static inline void ScavengeObject(HeapObject** p, HeapObject* object);
+
+  enum ScratchpadSlotMode { IGNORE_SCRATCHPAD_SLOT, RECORD_SCRATCHPAD_SLOT };
+
+  // If an object has an AllocationMemento trailing it, return it, otherwise
+  // return NULL;
+  inline AllocationMemento* FindAllocationMemento(HeapObject* object);
+
+  // An object may have an AllocationSite associated with it through a trailing
+  // AllocationMemento. Its feedback should be updated when objects are found
+  // in the heap.
+  static inline void UpdateAllocationSiteFeedback(HeapObject* object,
+                                                  ScratchpadSlotMode mode);
+
+  // Support for partial snapshots.  After calling this we have a linear
+  // space to write objects in each space.
+  void ReserveSpace(int* sizes, Address* addresses);
+
+  //
+  // Support for the API.
+  //
+
+  void CreateApiObjects();
+
+  inline intptr_t PromotedTotalSize() {
+    int64_t total = PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize();
+    if (total > kMaxInt) return static_cast<intptr_t>(kMaxInt);
+    if (total < 0) return 0;
+    return static_cast<intptr_t>(total);
+  }
+
+  inline intptr_t OldGenerationSpaceAvailable() {
+    return old_generation_allocation_limit_ - PromotedTotalSize();
+  }
+
+  inline intptr_t OldGenerationCapacityAvailable() {
+    return max_old_generation_size_ - PromotedTotalSize();
+  }
+
+  static const intptr_t kMinimumOldGenerationAllocationLimit =
+      8 * (Page::kPageSize > MB ? Page::kPageSize : MB);
+
+  static const int kPointerMultiplier = i::kPointerSize / 4;
+
+  // The new space size has to be a power of 2. Sizes are in MB.
+  static const int kMaxSemiSpaceSizeLowMemoryDevice = 1 * kPointerMultiplier;
+  static const int kMaxSemiSpaceSizeMediumMemoryDevice = 4 * kPointerMultiplier;
+  static const int kMaxSemiSpaceSizeHighMemoryDevice = 8 * kPointerMultiplier;
+  static const int kMaxSemiSpaceSizeHugeMemoryDevice = 8 * kPointerMultiplier;
+
+  // The old space size has to be a multiple of Page::kPageSize.
+  // Sizes are in MB.
+  static const int kMaxOldSpaceSizeLowMemoryDevice = 128 * kPointerMultiplier;
+  static const int kMaxOldSpaceSizeMediumMemoryDevice =
+      256 * kPointerMultiplier;
+  static const int kMaxOldSpaceSizeHighMemoryDevice = 512 * kPointerMultiplier;
+  static const int kMaxOldSpaceSizeHugeMemoryDevice = 700 * kPointerMultiplier;
+
+  // The executable size has to be a multiple of Page::kPageSize.
+  // Sizes are in MB.
+  static const int kMaxExecutableSizeLowMemoryDevice = 96 * kPointerMultiplier;
+  static const int kMaxExecutableSizeMediumMemoryDevice =
+      192 * kPointerMultiplier;
+  static const int kMaxExecutableSizeHighMemoryDevice =
+      256 * kPointerMultiplier;
+  static const int kMaxExecutableSizeHugeMemoryDevice =
+      256 * kPointerMultiplier;
+
+  intptr_t OldGenerationAllocationLimit(intptr_t old_gen_size,
+                                        int freed_global_handles);
+
+  // Indicates whether inline bump-pointer allocation has been disabled.
+  bool inline_allocation_disabled() { return inline_allocation_disabled_; }
+
+  // Switch whether inline bump-pointer allocation should be used.
+  void EnableInlineAllocation();
+  void DisableInlineAllocation();
+
+  // Implements the corresponding V8 API function.
+  bool IdleNotification(int idle_time_in_ms);
+
+  // Declare all the root indices.  This defines the root list order.
+  enum RootListIndex {
+#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
+    STRONG_ROOT_LIST(ROOT_INDEX_DECLARATION)
+#undef ROOT_INDEX_DECLARATION
+
+#define STRING_INDEX_DECLARATION(name, str) k##name##RootIndex,
+    INTERNALIZED_STRING_LIST(STRING_INDEX_DECLARATION)
+#undef STRING_DECLARATION
+
+// Utility type maps
+#define DECLARE_STRUCT_MAP(NAME, Name, name) k##Name##MapRootIndex,
+    STRUCT_LIST(DECLARE_STRUCT_MAP)
+#undef DECLARE_STRUCT_MAP
+    kStringTableRootIndex,
+
+#define ROOT_INDEX_DECLARATION(type, name, camel_name) k##camel_name##RootIndex,
+    SMI_ROOT_LIST(ROOT_INDEX_DECLARATION)
+#undef ROOT_INDEX_DECLARATION
+    kRootListLength,
+    kStrongRootListLength = kStringTableRootIndex,
+    kSmiRootsStart = kStringTableRootIndex + 1
+  };
+
+  STATIC_ASSERT(kUndefinedValueRootIndex ==
+                Internals::kUndefinedValueRootIndex);
+  STATIC_ASSERT(kNullValueRootIndex == Internals::kNullValueRootIndex);
+  STATIC_ASSERT(kTrueValueRootIndex == Internals::kTrueValueRootIndex);
+  STATIC_ASSERT(kFalseValueRootIndex == Internals::kFalseValueRootIndex);
+  STATIC_ASSERT(kempty_stringRootIndex == Internals::kEmptyStringRootIndex);
+
+  // Generated code can embed direct references to non-writable roots if
+  // they are in new space.
+  static bool RootCanBeWrittenAfterInitialization(RootListIndex root_index);
+  // Generated code can treat direct references to this root as constant.
+  bool RootCanBeTreatedAsConstant(RootListIndex root_index);
+
+  Map* MapForFixedTypedArray(ExternalArrayType array_type);
+  RootListIndex RootIndexForFixedTypedArray(ExternalArrayType array_type);
+
+  Map* MapForExternalArrayType(ExternalArrayType array_type);
+  RootListIndex RootIndexForExternalArrayType(ExternalArrayType array_type);
+
+  RootListIndex RootIndexForEmptyExternalArray(ElementsKind kind);
+  RootListIndex RootIndexForEmptyFixedTypedArray(ElementsKind kind);
+  ExternalArray* EmptyExternalArrayForMap(Map* map);
+  FixedTypedArrayBase* EmptyFixedTypedArrayForMap(Map* map);
+
+  void RecordStats(HeapStats* stats, bool take_snapshot = false);
+
+  // Copy block of memory from src to dst. Size of block should be aligned
+  // by pointer size.
+  static inline void CopyBlock(Address dst, Address src, int byte_size);
+
+  // Optimized version of memmove for blocks with pointer size aligned sizes and
+  // pointer size aligned addresses.
+  static inline void MoveBlock(Address dst, Address src, int byte_size);
+
+  // Check new space expansion criteria and expand semispaces if it was hit.
+  void CheckNewSpaceExpansionCriteria();
+
+  inline void IncrementPromotedObjectsSize(int object_size) {
+    DCHECK(object_size > 0);
+    promoted_objects_size_ += object_size;
+  }
+
+  inline void IncrementSemiSpaceCopiedObjectSize(int object_size) {
+    DCHECK(object_size > 0);
+    semi_space_copied_object_size_ += object_size;
+  }
+
+  inline void IncrementNodesDiedInNewSpace() { nodes_died_in_new_space_++; }
+
+  inline void IncrementNodesCopiedInNewSpace() { nodes_copied_in_new_space_++; }
+
+  inline void IncrementNodesPromoted() { nodes_promoted_++; }
+
+  inline void IncrementYoungSurvivorsCounter(int survived) {
+    DCHECK(survived >= 0);
+    survived_since_last_expansion_ += survived;
+  }
+
+  inline bool NextGCIsLikelyToBeFull() {
+    if (FLAG_gc_global) return true;
+
+    if (FLAG_stress_compaction && (gc_count_ & 1) != 0) return true;
+
+    intptr_t adjusted_allocation_limit =
+        old_generation_allocation_limit_ - new_space_.Capacity();
+
+    if (PromotedTotalSize() >= adjusted_allocation_limit) return true;
+
+    return false;
+  }
+
+  void UpdateNewSpaceReferencesInExternalStringTable(
+      ExternalStringTableUpdaterCallback updater_func);
+
+  void UpdateReferencesInExternalStringTable(
+      ExternalStringTableUpdaterCallback updater_func);
+
+  void ProcessWeakReferences(WeakObjectRetainer* retainer);
+
+  void VisitExternalResources(v8::ExternalResourceVisitor* visitor);
+
+  // An object should be promoted if the object has survived a
+  // scavenge operation.
+  inline bool ShouldBePromoted(Address old_address, int object_size);
+
+  void ClearJSFunctionResultCaches();
+
+  void ClearNormalizedMapCaches();
+
+  GCTracer* tracer() { return &tracer_; }
+
+  // Returns the size of objects residing in non new spaces.
+  intptr_t PromotedSpaceSizeOfObjects();
+
+  double total_regexp_code_generated() { return total_regexp_code_generated_; }
+  void IncreaseTotalRegexpCodeGenerated(int size) {
+    total_regexp_code_generated_ += size;
+  }
+
+  void IncrementCodeGeneratedBytes(bool is_crankshafted, int size) {
+    if (is_crankshafted) {
+      crankshaft_codegen_bytes_generated_ += size;
+    } else {
+      full_codegen_bytes_generated_ += size;
+    }
+  }
+
+  // Update GC statistics that are tracked on the Heap.
+  void UpdateCumulativeGCStatistics(double duration, double spent_in_mutator,
+                                    double marking_time);
+
+  // Returns maximum GC pause.
+  double get_max_gc_pause() { return max_gc_pause_; }
+
+  // Returns maximum size of objects alive after GC.
+  intptr_t get_max_alive_after_gc() { return max_alive_after_gc_; }
+
+  // Returns minimal interval between two subsequent collections.
+  double get_min_in_mutator() { return min_in_mutator_; }
+
+  MarkCompactCollector* mark_compact_collector() {
+    return &mark_compact_collector_;
+  }
+
+  StoreBuffer* store_buffer() { return &store_buffer_; }
+
+  Marking* marking() { return &marking_; }
+
+  IncrementalMarking* incremental_marking() { return &incremental_marking_; }
+
+  ExternalStringTable* external_string_table() {
+    return &external_string_table_;
+  }
+
+  // Returns the current sweep generation.
+  int sweep_generation() { return sweep_generation_; }
+
+  inline Isolate* isolate();
+
+  void CallGCPrologueCallbacks(GCType gc_type, GCCallbackFlags flags);
+  void CallGCEpilogueCallbacks(GCType gc_type, GCCallbackFlags flags);
+
+  inline bool OldGenerationAllocationLimitReached();
+
+  inline void DoScavengeObject(Map* map, HeapObject** slot, HeapObject* obj) {
+    scavenging_visitors_table_.GetVisitor(map)(map, slot, obj);
+  }
+
+  void QueueMemoryChunkForFree(MemoryChunk* chunk);
+  void FreeQueuedChunks();
+
+  int gc_count() const { return gc_count_; }
+
+  // Completely clear the Instanceof cache (to stop it keeping objects alive
+  // around a GC).
+  inline void CompletelyClearInstanceofCache();
+
+  // The roots that have an index less than this are always in old space.
+  static const int kOldSpaceRoots = 0x20;
+
+  uint32_t HashSeed() {
+    uint32_t seed = static_cast<uint32_t>(hash_seed()->value());
+    DCHECK(FLAG_randomize_hashes || seed == 0);
+    return seed;
+  }
+
+  void SetArgumentsAdaptorDeoptPCOffset(int pc_offset) {
+    DCHECK(arguments_adaptor_deopt_pc_offset() == Smi::FromInt(0));
+    set_arguments_adaptor_deopt_pc_offset(Smi::FromInt(pc_offset));
+  }
+
+  void SetConstructStubDeoptPCOffset(int pc_offset) {
+    DCHECK(construct_stub_deopt_pc_offset() == Smi::FromInt(0));
+    set_construct_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
+  }
+
+  void SetGetterStubDeoptPCOffset(int pc_offset) {
+    DCHECK(getter_stub_deopt_pc_offset() == Smi::FromInt(0));
+    set_getter_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
+  }
+
+  void SetSetterStubDeoptPCOffset(int pc_offset) {
+    DCHECK(setter_stub_deopt_pc_offset() == Smi::FromInt(0));
+    set_setter_stub_deopt_pc_offset(Smi::FromInt(pc_offset));
+  }
+
+  // For post mortem debugging.
+  void RememberUnmappedPage(Address page, bool compacted);
+
+  // Global inline caching age: it is incremented on some GCs after context
+  // disposal. We use it to flush inline caches.
+  int global_ic_age() { return global_ic_age_; }
+
+  void AgeInlineCaches() {
+    global_ic_age_ = (global_ic_age_ + 1) & SharedFunctionInfo::ICAgeBits::kMax;
+  }
+
+  bool flush_monomorphic_ics() { return flush_monomorphic_ics_; }
+
+  int64_t amount_of_external_allocated_memory() {
+    return amount_of_external_allocated_memory_;
+  }
+
+  void DeoptMarkedAllocationSites();
+
+  bool MaximumSizeScavenge() { return maximum_size_scavenges_ > 0; }
+
+  bool DeoptMaybeTenuredAllocationSites() {
+    return new_space_.IsAtMaximumCapacity() && maximum_size_scavenges_ == 0;
+  }
+
+  // ObjectStats are kept in two arrays, counts and sizes. Related stats are
+  // stored in a contiguous linear buffer. Stats groups are stored one after
+  // another.
+  enum {
+    FIRST_CODE_KIND_SUB_TYPE = LAST_TYPE + 1,
+    FIRST_FIXED_ARRAY_SUB_TYPE =
+        FIRST_CODE_KIND_SUB_TYPE + Code::NUMBER_OF_KINDS,
+    FIRST_CODE_AGE_SUB_TYPE =
+        FIRST_FIXED_ARRAY_SUB_TYPE + LAST_FIXED_ARRAY_SUB_TYPE + 1,
+    OBJECT_STATS_COUNT = FIRST_CODE_AGE_SUB_TYPE + Code::kCodeAgeCount + 1
+  };
+
+  void RecordObjectStats(InstanceType type, size_t size) {
+    DCHECK(type <= LAST_TYPE);
+    object_counts_[type]++;
+    object_sizes_[type] += size;
+  }
+
+  void RecordCodeSubTypeStats(int code_sub_type, int code_age, size_t size) {
+    int code_sub_type_index = FIRST_CODE_KIND_SUB_TYPE + code_sub_type;
+    int code_age_index =
+        FIRST_CODE_AGE_SUB_TYPE + code_age - Code::kFirstCodeAge;
+    DCHECK(code_sub_type_index >= FIRST_CODE_KIND_SUB_TYPE &&
+           code_sub_type_index < FIRST_CODE_AGE_SUB_TYPE);
+    DCHECK(code_age_index >= FIRST_CODE_AGE_SUB_TYPE &&
+           code_age_index < OBJECT_STATS_COUNT);
+    object_counts_[code_sub_type_index]++;
+    object_sizes_[code_sub_type_index] += size;
+    object_counts_[code_age_index]++;
+    object_sizes_[code_age_index] += size;
+  }
+
+  void RecordFixedArraySubTypeStats(int array_sub_type, size_t size) {
+    DCHECK(array_sub_type <= LAST_FIXED_ARRAY_SUB_TYPE);
+    object_counts_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type]++;
+    object_sizes_[FIRST_FIXED_ARRAY_SUB_TYPE + array_sub_type] += size;
+  }
+
+  void CheckpointObjectStats();
+
+  // We don't use a LockGuard here since we want to lock the heap
+  // only when FLAG_concurrent_recompilation is true.
+  class RelocationLock {
+   public:
+    explicit RelocationLock(Heap* heap) : heap_(heap) {
+      heap_->relocation_mutex_.Lock();
+    }
+
+
+    ~RelocationLock() { heap_->relocation_mutex_.Unlock(); }
+
+   private:
+    Heap* heap_;
+  };
+
+  void AddWeakObjectToCodeDependency(Handle<Object> obj,
+                                     Handle<DependentCode> dep);
+
+  DependentCode* LookupWeakObjectToCodeDependency(Handle<Object> obj);
+
+  void InitializeWeakObjectToCodeTable() {
+    set_weak_object_to_code_table(undefined_value());
+  }
+
+  void EnsureWeakObjectToCodeTable();
+
+  static void FatalProcessOutOfMemory(const char* location,
+                                      bool take_snapshot = false);
+
+  // This event is triggered after successful allocation of a new object made
+  // by runtime. Allocations of target space for object evacuation do not
+  // trigger the event. In order to track ALL allocations one must turn off
+  // FLAG_inline_new and FLAG_use_allocation_folding.
+  inline void OnAllocationEvent(HeapObject* object, int size_in_bytes);
+
+  // This event is triggered after object is moved to a new place.
+  inline void OnMoveEvent(HeapObject* target, HeapObject* source,
+                          int size_in_bytes);
+
+ protected:
+  // Methods made available to tests.
+
+  // Allocates a JS Map in the heap.
+  MUST_USE_RESULT AllocationResult
+      AllocateMap(InstanceType instance_type, int instance_size,
+                  ElementsKind elements_kind = TERMINAL_FAST_ELEMENTS_KIND);
+
+  // Allocates and initializes a new JavaScript object based on a
+  // constructor.
+  // If allocation_site is non-null, then a memento is emitted after the object
+  // that points to the site.
+  MUST_USE_RESULT AllocationResult
+      AllocateJSObject(JSFunction* constructor,
+                       PretenureFlag pretenure = NOT_TENURED,
+                       AllocationSite* allocation_site = NULL);
+
+  // Allocates and initializes a new JavaScript object based on a map.
+  // Passing an allocation site means that a memento will be created that
+  // points to the site.
+  MUST_USE_RESULT AllocationResult
+      AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure = NOT_TENURED,
+                              bool alloc_props = true,
+                              AllocationSite* allocation_site = NULL);
+
+  // Allocated a HeapNumber from value.
+  MUST_USE_RESULT AllocationResult
+      AllocateHeapNumber(double value, MutableMode mode = IMMUTABLE,
+                         PretenureFlag pretenure = NOT_TENURED);
+
+  // Allocate a byte array of the specified length
+  MUST_USE_RESULT AllocationResult
+      AllocateByteArray(int length, PretenureFlag pretenure = NOT_TENURED);
+
+  // Copy the code and scope info part of the code object, but insert
+  // the provided data as the relocation information.
+  MUST_USE_RESULT AllocationResult
+      CopyCode(Code* code, Vector<byte> reloc_info);
+
+  MUST_USE_RESULT AllocationResult CopyCode(Code* code);
+
+  // Allocates a fixed array initialized with undefined values
+  MUST_USE_RESULT AllocationResult
+      AllocateFixedArray(int length, PretenureFlag pretenure = NOT_TENURED);
+
+ private:
+  Heap();
+
+  // The amount of external memory registered through the API kept alive
+  // by global handles
+  int64_t amount_of_external_allocated_memory_;
+
+  // Caches the amount of external memory registered at the last global gc.
+  int64_t amount_of_external_allocated_memory_at_last_global_gc_;
+
+  // This can be calculated directly from a pointer to the heap; however, it is
+  // more expedient to get at the isolate directly from within Heap methods.
+  Isolate* isolate_;
+
+  Object* roots_[kRootListLength];
+
+  size_t code_range_size_;
+  int reserved_semispace_size_;
+  int max_semi_space_size_;
+  int initial_semispace_size_;
+  intptr_t max_old_generation_size_;
+  intptr_t max_executable_size_;
+  intptr_t maximum_committed_;
+
+  // For keeping track of how much data has survived
+  // scavenge since last new space expansion.
+  int survived_since_last_expansion_;
+
+  // For keeping track on when to flush RegExp code.
+  int sweep_generation_;
+
+  int always_allocate_scope_depth_;
+
+  // For keeping track of context disposals.
+  int contexts_disposed_;
+
+  int global_ic_age_;
+
+  bool flush_monomorphic_ics_;
+
+  int scan_on_scavenge_pages_;
+
+  NewSpace new_space_;
+  OldSpace* old_pointer_space_;
+  OldSpace* old_data_space_;
+  OldSpace* code_space_;
+  MapSpace* map_space_;
+  CellSpace* cell_space_;
+  PropertyCellSpace* property_cell_space_;
+  LargeObjectSpace* lo_space_;
+  HeapState gc_state_;
+  int gc_post_processing_depth_;
+  Address new_space_top_after_last_gc_;
+
+  // Returns the amount of external memory registered since last global gc.
+  int64_t PromotedExternalMemorySize();
+
+  // How many "runtime allocations" happened.
+  uint32_t allocations_count_;
+
+  // Running hash over allocations performed.
+  uint32_t raw_allocations_hash_;
+
+  // Countdown counter, dumps allocation hash when 0.
+  uint32_t dump_allocations_hash_countdown_;
+
+  // How many mark-sweep collections happened.
+  unsigned int ms_count_;
+
+  // How many gc happened.
+  unsigned int gc_count_;
+
+  // For post mortem debugging.
+  static const int kRememberedUnmappedPages = 128;
+  int remembered_unmapped_pages_index_;
+  Address remembered_unmapped_pages_[kRememberedUnmappedPages];
+
+  // Total length of the strings we failed to flatten since the last GC.
+  int unflattened_strings_length_;
+
+#define ROOT_ACCESSOR(type, name, camel_name)                                 \
+  inline void set_##name(type* value) {                                       \
+    /* The deserializer makes use of the fact that these common roots are */  \
+    /* never in new space and never on a page that is being compacted.    */  \
+    DCHECK(k##camel_name##RootIndex >= kOldSpaceRoots || !InNewSpace(value)); \
+    roots_[k##camel_name##RootIndex] = value;                                 \
+  }
+  ROOT_LIST(ROOT_ACCESSOR)
+#undef ROOT_ACCESSOR
+
+#ifdef DEBUG
+  // If the --gc-interval flag is set to a positive value, this
+  // variable holds the value indicating the number of allocations
+  // remain until the next failure and garbage collection.
+  int allocation_timeout_;
+#endif  // DEBUG
+
+  // Limit that triggers a global GC on the next (normally caused) GC.  This
+  // is checked when we have already decided to do a GC to help determine
+  // which collector to invoke, before expanding a paged space in the old
+  // generation and on every allocation in large object space.
+  intptr_t old_generation_allocation_limit_;
+
+  // Indicates that an allocation has failed in the old generation since the
+  // last GC.
+  bool old_gen_exhausted_;
+
+  // Indicates that inline bump-pointer allocation has been globally disabled
+  // for all spaces. This is used to disable allocations in generated code.
+  bool inline_allocation_disabled_;
+
+  // Weak list heads, threaded through the objects.
+  // List heads are initilized lazily and contain the undefined_value at start.
+  Object* native_contexts_list_;
+  Object* array_buffers_list_;
+  Object* allocation_sites_list_;
+
+  // WeakHashTable that maps objects embedded in optimized code to dependent
+  // code list. It is initilized lazily and contains the undefined_value at
+  // start.
+  Object* weak_object_to_code_table_;
+
+  // List of encountered weak collections (JSWeakMap and JSWeakSet) during
+  // marking. It is initialized during marking, destroyed after marking and
+  // contains Smi(0) while marking is not active.
+  Object* encountered_weak_collections_;
+
+  StoreBufferRebuilder store_buffer_rebuilder_;
+
+  struct StringTypeTable {
+    InstanceType type;
+    int size;
+    RootListIndex index;
+  };
+
+  struct ConstantStringTable {
+    const char* contents;
+    RootListIndex index;
+  };
+
+  struct StructTable {
+    InstanceType type;
+    int size;
+    RootListIndex index;
+  };
+
+  static const StringTypeTable string_type_table[];
+  static const ConstantStringTable constant_string_table[];
+  static const StructTable struct_table[];
+
+  // The special hidden string which is an empty string, but does not match
+  // any string when looked up in properties.
+  String* hidden_string_;
+
+  // GC callback function, called before and after mark-compact GC.
+  // Allocations in the callback function are disallowed.
+  struct GCPrologueCallbackPair {
+    GCPrologueCallbackPair(v8::Isolate::GCPrologueCallback callback,
+                           GCType gc_type, bool pass_isolate)
+        : callback(callback), gc_type(gc_type), pass_isolate_(pass_isolate) {}
+    bool operator==(const GCPrologueCallbackPair& pair) const {
+      return pair.callback == callback;
+    }
+    v8::Isolate::GCPrologueCallback callback;
+    GCType gc_type;
+    // TODO(dcarney): remove variable
+    bool pass_isolate_;
+  };
+  List<GCPrologueCallbackPair> gc_prologue_callbacks_;
+
+  struct GCEpilogueCallbackPair {
+    GCEpilogueCallbackPair(v8::Isolate::GCPrologueCallback callback,
+                           GCType gc_type, bool pass_isolate)
+        : callback(callback), gc_type(gc_type), pass_isolate_(pass_isolate) {}
+    bool operator==(const GCEpilogueCallbackPair& pair) const {
+      return pair.callback == callback;
+    }
+    v8::Isolate::GCPrologueCallback callback;
+    GCType gc_type;
+    // TODO(dcarney): remove variable
+    bool pass_isolate_;
+  };
+  List<GCEpilogueCallbackPair> gc_epilogue_callbacks_;
+
+  // Support for computing object sizes during GC.
+  HeapObjectCallback gc_safe_size_of_old_object_;
+  static int GcSafeSizeOfOldObject(HeapObject* object);
+
+  // Update the GC state. Called from the mark-compact collector.
+  void MarkMapPointersAsEncoded(bool encoded) {
+    DCHECK(!encoded);
+    gc_safe_size_of_old_object_ = &GcSafeSizeOfOldObject;
+  }
+
+  // Code that should be run before and after each GC.  Includes some
+  // reporting/verification activities when compiled with DEBUG set.
+  void GarbageCollectionPrologue();
+  void GarbageCollectionEpilogue();
+
+  // Pretenuring decisions are made based on feedback collected during new
+  // space evacuation. Note that between feedback collection and calling this
+  // method object in old space must not move.
+  // Right now we only process pretenuring feedback in high promotion mode.
+  void ProcessPretenuringFeedback();
+
+  // Checks whether a global GC is necessary
+  GarbageCollector SelectGarbageCollector(AllocationSpace space,
+                                          const char** reason);
+
+  // Make sure there is a filler value behind the top of the new space
+  // so that the GC does not confuse some unintialized/stale memory
+  // with the allocation memento of the object at the top
+  void EnsureFillerObjectAtTop();
+
+  // Ensure that we have swept all spaces in such a way that we can iterate
+  // over all objects.  May cause a GC.
+  void MakeHeapIterable();
+
+  // Performs garbage collection operation.
+  // Returns whether there is a chance that another major GC could
+  // collect more garbage.
+  bool CollectGarbage(
+      GarbageCollector collector, const char* gc_reason,
+      const char* collector_reason,
+      const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
+
+  // Performs garbage collection
+  // Returns whether there is a chance another major GC could
+  // collect more garbage.
+  bool PerformGarbageCollection(
+      GarbageCollector collector,
+      const GCCallbackFlags gc_callback_flags = kNoGCCallbackFlags);
+
+  inline void UpdateOldSpaceLimits();
+
+  // Selects the proper allocation space depending on the given object
+  // size, pretenuring decision, and preferred old-space.
+  static AllocationSpace SelectSpace(int object_size,
+                                     AllocationSpace preferred_old_space,
+                                     PretenureFlag pretenure) {
+    DCHECK(preferred_old_space == OLD_POINTER_SPACE ||
+           preferred_old_space == OLD_DATA_SPACE);
+    if (object_size > Page::kMaxRegularHeapObjectSize) return LO_SPACE;
+    return (pretenure == TENURED) ? preferred_old_space : NEW_SPACE;
+  }
+
+  // Allocate an uninitialized object.  The memory is non-executable if the
+  // hardware and OS allow.  This is the single choke-point for allocations
+  // performed by the runtime and should not be bypassed (to extend this to
+  // inlined allocations, use the Heap::DisableInlineAllocation() support).
+  MUST_USE_RESULT inline AllocationResult AllocateRaw(
+      int size_in_bytes, AllocationSpace space, AllocationSpace retry_space);
+
+  // Allocates a heap object based on the map.
+  MUST_USE_RESULT AllocationResult
+      Allocate(Map* map, AllocationSpace space,
+               AllocationSite* allocation_site = NULL);
+
+  // Allocates a partial map for bootstrapping.
+  MUST_USE_RESULT AllocationResult
+      AllocatePartialMap(InstanceType instance_type, int instance_size);
+
+  // Initializes a JSObject based on its map.
+  void InitializeJSObjectFromMap(JSObject* obj, FixedArray* properties,
+                                 Map* map);
+  void InitializeAllocationMemento(AllocationMemento* memento,
+                                   AllocationSite* allocation_site);
+
+  // Allocate a block of memory in the given space (filled with a filler).
+  // Used as a fall-back for generated code when the space is full.
+  MUST_USE_RESULT AllocationResult
+      AllocateFillerObject(int size, bool double_align, AllocationSpace space);
+
+  // Allocate an uninitialized fixed array.
+  MUST_USE_RESULT AllocationResult
+      AllocateRawFixedArray(int length, PretenureFlag pretenure);
+
+  // Allocate an uninitialized fixed double array.
+  MUST_USE_RESULT AllocationResult
+      AllocateRawFixedDoubleArray(int length, PretenureFlag pretenure);
+
+  // Allocate an initialized fixed array with the given filler value.
+  MUST_USE_RESULT AllocationResult
+      AllocateFixedArrayWithFiller(int length, PretenureFlag pretenure,
+                                   Object* filler);
+
+  // Allocate and partially initializes a String.  There are two String
+  // encodings: one-byte and two-byte.  These functions allocate a string of
+  // the given length and set its map and length fields.  The characters of
+  // the string are uninitialized.
+  MUST_USE_RESULT AllocationResult
+      AllocateRawOneByteString(int length, PretenureFlag pretenure);
+  MUST_USE_RESULT AllocationResult
+      AllocateRawTwoByteString(int length, PretenureFlag pretenure);
+
+  bool CreateInitialMaps();
+  void CreateInitialObjects();
+
+  // Allocates an internalized string in old space based on the character
+  // stream.
+  MUST_USE_RESULT inline AllocationResult AllocateInternalizedStringFromUtf8(
+      Vector<const char> str, int chars, uint32_t hash_field);
+
+  MUST_USE_RESULT inline AllocationResult AllocateOneByteInternalizedString(
+      Vector<const uint8_t> str, uint32_t hash_field);
+
+  MUST_USE_RESULT inline AllocationResult AllocateTwoByteInternalizedString(
+      Vector<const uc16> str, uint32_t hash_field);
+
+  template <bool is_one_byte, typename T>
+  MUST_USE_RESULT AllocationResult
+      AllocateInternalizedStringImpl(T t, int chars, uint32_t hash_field);
+
+  template <typename T>
+  MUST_USE_RESULT inline AllocationResult AllocateInternalizedStringImpl(
+      T t, int chars, uint32_t hash_field);
+
+  // Allocates an uninitialized fixed array. It must be filled by the caller.
+  MUST_USE_RESULT AllocationResult AllocateUninitializedFixedArray(int length);
+
+  // Make a copy of src and return it. Returns
+  // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
+  MUST_USE_RESULT inline AllocationResult CopyFixedArray(FixedArray* src);
+
+  // Make a copy of src, set the map, and return the copy. Returns
+  // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
+  MUST_USE_RESULT AllocationResult
+      CopyFixedArrayWithMap(FixedArray* src, Map* map);
+
+  // Make a copy of src and return it. Returns
+  // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
+  MUST_USE_RESULT inline AllocationResult CopyFixedDoubleArray(
+      FixedDoubleArray* src);
+
+  // Make a copy of src and return it. Returns
+  // Failure::RetryAfterGC(requested_bytes, space) if the allocation failed.
+  MUST_USE_RESULT inline AllocationResult CopyConstantPoolArray(
+      ConstantPoolArray* src);
+
+
+  // Computes a single character string where the character has code.
+  // A cache is used for one-byte (Latin1) codes.
+  MUST_USE_RESULT AllocationResult
+      LookupSingleCharacterStringFromCode(uint16_t code);
+
+  // Allocate a symbol in old space.
+  MUST_USE_RESULT AllocationResult AllocateSymbol();
+
+  // Make a copy of src, set the map, and return the copy.
+  MUST_USE_RESULT AllocationResult
+      CopyConstantPoolArrayWithMap(ConstantPoolArray* src, Map* map);
+
+  MUST_USE_RESULT AllocationResult AllocateConstantPoolArray(
+      const ConstantPoolArray::NumberOfEntries& small);
+
+  MUST_USE_RESULT AllocationResult AllocateExtendedConstantPoolArray(
+      const ConstantPoolArray::NumberOfEntries& small,
+      const ConstantPoolArray::NumberOfEntries& extended);
+
+  // Allocates an external array of the specified length and type.
+  MUST_USE_RESULT AllocationResult
+      AllocateExternalArray(int length, ExternalArrayType array_type,
+                            void* external_pointer, PretenureFlag pretenure);
+
+  // Allocates a fixed typed array of the specified length and type.
+  MUST_USE_RESULT AllocationResult
+      AllocateFixedTypedArray(int length, ExternalArrayType array_type,
+                              PretenureFlag pretenure);
+
+  // Make a copy of src and return it.
+  MUST_USE_RESULT AllocationResult CopyAndTenureFixedCOWArray(FixedArray* src);
+
+  // Make a copy of src, set the map, and return the copy.
+  MUST_USE_RESULT AllocationResult
+      CopyFixedDoubleArrayWithMap(FixedDoubleArray* src, Map* map);
+
+  // Allocates a fixed double array with uninitialized values. Returns
+  MUST_USE_RESULT AllocationResult AllocateUninitializedFixedDoubleArray(
+      int length, PretenureFlag pretenure = NOT_TENURED);
+
+  // These five Create*EntryStub functions are here and forced to not be inlined
+  // because of a gcc-4.4 bug that assigns wrong vtable entries.
+  NO_INLINE(void CreateJSEntryStub());
+  NO_INLINE(void CreateJSConstructEntryStub());
+
+  void CreateFixedStubs();
+
+  // Allocate empty fixed array.
+  MUST_USE_RESULT AllocationResult AllocateEmptyFixedArray();
+
+  // Allocate empty external array of given type.
+  MUST_USE_RESULT AllocationResult
+      AllocateEmptyExternalArray(ExternalArrayType array_type);
+
+  // Allocate empty fixed typed array of given type.
+  MUST_USE_RESULT AllocationResult
+      AllocateEmptyFixedTypedArray(ExternalArrayType array_type);
+
+  // Allocate empty constant pool array.
+  MUST_USE_RESULT AllocationResult AllocateEmptyConstantPoolArray();
+
+  // Allocate a tenured simple cell.
+  MUST_USE_RESULT AllocationResult AllocateCell(Object* value);
+
+  // Allocate a tenured JS global property cell initialized with the hole.
+  MUST_USE_RESULT AllocationResult AllocatePropertyCell();
+
+  // Allocates a new utility object in the old generation.
+  MUST_USE_RESULT AllocationResult AllocateStruct(InstanceType type);
+
+  // Allocates a new foreign object.
+  MUST_USE_RESULT AllocationResult
+      AllocateForeign(Address address, PretenureFlag pretenure = NOT_TENURED);
+
+  MUST_USE_RESULT AllocationResult
+      AllocateCode(int object_size, bool immovable);
+
+  MUST_USE_RESULT AllocationResult InternalizeStringWithKey(HashTableKey* key);
+
+  MUST_USE_RESULT AllocationResult InternalizeString(String* str);
+
+  // Performs a minor collection in new generation.
+  void Scavenge();
+
+  // Commits from space if it is uncommitted.
+  void EnsureFromSpaceIsCommitted();
+
+  // Uncommit unused semi space.
+  bool UncommitFromSpace() { return new_space_.UncommitFromSpace(); }
+
+  // Fill in bogus values in from space
+  void ZapFromSpace();
+
+  static String* UpdateNewSpaceReferenceInExternalStringTableEntry(
+      Heap* heap, Object** pointer);
+
+  Address DoScavenge(ObjectVisitor* scavenge_visitor, Address new_space_front);
+  static void ScavengeStoreBufferCallback(Heap* heap, MemoryChunk* page,
+                                          StoreBufferEvent event);
+
+  // Performs a major collection in the whole heap.
+  void MarkCompact();
+
+  // Code to be run before and after mark-compact.
+  void MarkCompactPrologue();
+
+  void ProcessNativeContexts(WeakObjectRetainer* retainer);
+  void ProcessArrayBuffers(WeakObjectRetainer* retainer);
+  void ProcessAllocationSites(WeakObjectRetainer* retainer);
+
+  // Deopts all code that contains allocation instruction which are tenured or
+  // not tenured. Moreover it clears the pretenuring allocation site statistics.
+  void ResetAllAllocationSitesDependentCode(PretenureFlag flag);
+
+  // Evaluates local pretenuring for the old space and calls
+  // ResetAllTenuredAllocationSitesDependentCode if too many objects died in
+  // the old space.
+  void EvaluateOldSpaceLocalPretenuring(uint64_t size_of_objects_before_gc);
+
+  // Called on heap tear-down.
+  void TearDownArrayBuffers();
+
+  // Record statistics before and after garbage collection.
+  void ReportStatisticsBeforeGC();
+  void ReportStatisticsAfterGC();
+
+  // Slow part of scavenge object.
+  static void ScavengeObjectSlow(HeapObject** p, HeapObject* object);
+
+  // Total RegExp code ever generated
+  double total_regexp_code_generated_;
+
+  GCTracer tracer_;
+
+  // Creates and installs the full-sized number string cache.
+  int FullSizeNumberStringCacheLength();
+  // Flush the number to string cache.
+  void FlushNumberStringCache();
+
+  // Sets used allocation sites entries to undefined.
+  void FlushAllocationSitesScratchpad();
+
+  // Initializes the allocation sites scratchpad with undefined values.
+  void InitializeAllocationSitesScratchpad();
+
+  // Adds an allocation site to the scratchpad if there is space left.
+  void AddAllocationSiteToScratchpad(AllocationSite* site,
+                                     ScratchpadSlotMode mode);
+
+  void UpdateSurvivalStatistics(int start_new_space_size);
+
+  static const int kYoungSurvivalRateHighThreshold = 90;
+  static const int kYoungSurvivalRateAllowedDeviation = 15;
+
+  static const int kOldSurvivalRateLowThreshold = 10;
+
+  int high_survival_rate_period_length_;
+  intptr_t promoted_objects_size_;
+  double promotion_rate_;
+  intptr_t semi_space_copied_object_size_;
+  double semi_space_copied_rate_;
+  int nodes_died_in_new_space_;
+  int nodes_copied_in_new_space_;
+  int nodes_promoted_;
+
+  // This is the pretenuring trigger for allocation sites that are in maybe
+  // tenure state. When we switched to the maximum new space size we deoptimize
+  // the code that belongs to the allocation site and derive the lifetime
+  // of the allocation site.
+  unsigned int maximum_size_scavenges_;
+
+  // TODO(hpayer): Allocation site pretenuring may make this method obsolete.
+  // Re-visit incremental marking heuristics.
+  bool IsHighSurvivalRate() { return high_survival_rate_period_length_ > 0; }
+
+  void SelectScavengingVisitorsTable();
+
+  void IdleMarkCompact(const char* message);
+
+  void AdvanceIdleIncrementalMarking(intptr_t step_size);
+
+  bool WorthActivatingIncrementalMarking();
+
+  void ClearObjectStats(bool clear_last_time_stats = false);
+
+  void set_weak_object_to_code_table(Object* value) {
+    DCHECK(!InNewSpace(value));
+    weak_object_to_code_table_ = value;
+  }
+
+  Object** weak_object_to_code_table_address() {
+    return &weak_object_to_code_table_;
+  }
+
+  inline void UpdateAllocationsHash(HeapObject* object);
+  inline void UpdateAllocationsHash(uint32_t value);
+  inline void PrintAlloctionsHash();
+
+  static const int kInitialStringTableSize = 2048;
+  static const int kInitialEvalCacheSize = 64;
+  static const int kInitialNumberStringCacheSize = 256;
+
+  // Object counts and used memory by InstanceType
+  size_t object_counts_[OBJECT_STATS_COUNT];
+  size_t object_counts_last_time_[OBJECT_STATS_COUNT];
+  size_t object_sizes_[OBJECT_STATS_COUNT];
+  size_t object_sizes_last_time_[OBJECT_STATS_COUNT];
+
+  // Maximum GC pause.
+  double max_gc_pause_;
+
+  // Total time spent in GC.
+  double total_gc_time_ms_;
+
+  // Maximum size of objects alive after GC.
+  intptr_t max_alive_after_gc_;
+
+  // Minimal interval between two subsequent collections.
+  double min_in_mutator_;
+
+  // Cumulative GC time spent in marking
+  double marking_time_;
+
+  // Cumulative GC time spent in sweeping
+  double sweeping_time_;
+
+  MarkCompactCollector mark_compact_collector_;
+
+  StoreBuffer store_buffer_;
+
+  Marking marking_;
+
+  IncrementalMarking incremental_marking_;
+
+  GCIdleTimeHandler gc_idle_time_handler_;
+  unsigned int gc_count_at_last_idle_gc_;
+
+  // These two counters are monotomically increasing and never reset.
+  size_t full_codegen_bytes_generated_;
+  size_t crankshaft_codegen_bytes_generated_;
+
+  // If the --deopt_every_n_garbage_collections flag is set to a positive value,
+  // this variable holds the number of garbage collections since the last
+  // deoptimization triggered by garbage collection.
+  int gcs_since_last_deopt_;
+
+#ifdef VERIFY_HEAP
+  int no_weak_object_verification_scope_depth_;
+#endif
+
+  static const int kAllocationSiteScratchpadSize = 256;
+  int allocation_sites_scratchpad_length_;
+
+  static const int kMaxMarkCompactsInIdleRound = 7;
+  static const int kIdleScavengeThreshold = 5;
+
+  // Shared state read by the scavenge collector and set by ScavengeObject.
+  PromotionQueue promotion_queue_;
+
+  // Flag is set when the heap has been configured.  The heap can be repeatedly
+  // configured through the API until it is set up.
+  bool configured_;
+
+  ExternalStringTable external_string_table_;
+
+  VisitorDispatchTable<ScavengingCallback> scavenging_visitors_table_;
+
+  MemoryChunk* chunks_queued_for_free_;
+
+  base::Mutex relocation_mutex_;
+
+  int gc_callbacks_depth_;
+
+  friend class AlwaysAllocateScope;
+  friend class Factory;
+  friend class GCCallbacksScope;
+  friend class GCTracer;
+  friend class HeapIterator;
+  friend class Isolate;
+  friend class MarkCompactCollector;
+  friend class MarkCompactMarkingVisitor;
+  friend class MapCompact;
+#ifdef VERIFY_HEAP
+  friend class NoWeakObjectVerificationScope;
+#endif
+  friend class Page;
+
+  DISALLOW_COPY_AND_ASSIGN(Heap);
+};
+
+
+class HeapStats {
+ public:
+  static const int kStartMarker = 0xDECADE00;
+  static const int kEndMarker = 0xDECADE01;
+
+  int* start_marker;                       //  0
+  int* new_space_size;                     //  1
+  int* new_space_capacity;                 //  2
+  intptr_t* old_pointer_space_size;        //  3
+  intptr_t* old_pointer_space_capacity;    //  4
+  intptr_t* old_data_space_size;           //  5
+  intptr_t* old_data_space_capacity;       //  6
+  intptr_t* code_space_size;               //  7
+  intptr_t* code_space_capacity;           //  8
+  intptr_t* map_space_size;                //  9
+  intptr_t* map_space_capacity;            // 10
+  intptr_t* cell_space_size;               // 11
+  intptr_t* cell_space_capacity;           // 12
+  intptr_t* lo_space_size;                 // 13
+  int* global_handle_count;                // 14
+  int* weak_global_handle_count;           // 15
+  int* pending_global_handle_count;        // 16
+  int* near_death_global_handle_count;     // 17
+  int* free_global_handle_count;           // 18
+  intptr_t* memory_allocator_size;         // 19
+  intptr_t* memory_allocator_capacity;     // 20
+  int* objects_per_type;                   // 21
+  int* size_per_type;                      // 22
+  int* os_error;                           // 23
+  int* end_marker;                         // 24
+  intptr_t* property_cell_space_size;      // 25
+  intptr_t* property_cell_space_capacity;  // 26
+};
+
+
+class AlwaysAllocateScope {
+ public:
+  explicit inline AlwaysAllocateScope(Isolate* isolate);
+  inline ~AlwaysAllocateScope();
+
+ private:
+  // Implicitly disable artificial allocation failures.
+  Heap* heap_;
+  DisallowAllocationFailure daf_;
+};
+
+
+#ifdef VERIFY_HEAP
+class NoWeakObjectVerificationScope {
+ public:
+  inline NoWeakObjectVerificationScope();
+  inline ~NoWeakObjectVerificationScope();
+};
+#endif
+
+
+class GCCallbacksScope {
+ public:
+  explicit inline GCCallbacksScope(Heap* heap);
+  inline ~GCCallbacksScope();
+
+  inline bool CheckReenter();
+
+ private:
+  Heap* heap_;
+};
+
+
+// Visitor class to verify interior pointers in spaces that do not contain
+// or care about intergenerational references. All heap object pointers have to
+// point into the heap to a location that has a map pointer at its first word.
+// Caveat: Heap::Contains is an approximation because it can return true for
+// objects in a heap space but above the allocation pointer.
+class VerifyPointersVisitor : public ObjectVisitor {
+ public:
+  inline void VisitPointers(Object** start, Object** end);
+};
+
+
+// Verify that all objects are Smis.
+class VerifySmisVisitor : public ObjectVisitor {
+ public:
+  inline void VisitPointers(Object** start, Object** end);
+};
+
+
+// Space iterator for iterating over all spaces of the heap.  Returns each space
+// in turn, and null when it is done.
+class AllSpaces BASE_EMBEDDED {
+ public:
+  explicit AllSpaces(Heap* heap) : heap_(heap), counter_(FIRST_SPACE) {}
+  Space* next();
+
+ private:
+  Heap* heap_;
+  int counter_;
+};
+
+
+// Space iterator for iterating over all old spaces of the heap: Old pointer
+// space, old data space and code space.  Returns each space in turn, and null
+// when it is done.
+class OldSpaces BASE_EMBEDDED {
+ public:
+  explicit OldSpaces(Heap* heap) : heap_(heap), counter_(OLD_POINTER_SPACE) {}
+  OldSpace* next();
+
+ private:
+  Heap* heap_;
+  int counter_;
+};
+
+
+// Space iterator for iterating over all the paged spaces of the heap: Map
+// space, old pointer space, old data space, code space and cell space.  Returns
+// each space in turn, and null when it is done.
+class PagedSpaces BASE_EMBEDDED {
+ public:
+  explicit PagedSpaces(Heap* heap) : heap_(heap), counter_(OLD_POINTER_SPACE) {}
+  PagedSpace* next();
+
+ private:
+  Heap* heap_;
+  int counter_;
+};
+
+
+// Space iterator for iterating over all spaces of the heap.
+// For each space an object iterator is provided. The deallocation of the
+// returned object iterators is handled by the space iterator.
+class SpaceIterator : public Malloced {
+ public:
+  explicit SpaceIterator(Heap* heap);
+  SpaceIterator(Heap* heap, HeapObjectCallback size_func);
+  virtual ~SpaceIterator();
+
+  bool has_next();
+  ObjectIterator* next();
+
+ private:
+  ObjectIterator* CreateIterator();
+
+  Heap* heap_;
+  int current_space_;         // from enum AllocationSpace.
+  ObjectIterator* iterator_;  // object iterator for the current space.
+  HeapObjectCallback size_func_;
+};
+
+
+// A HeapIterator provides iteration over the whole heap. It
+// aggregates the specific iterators for the different spaces as
+// these can only iterate over one space only.
+//
+// HeapIterator ensures there is no allocation during its lifetime
+// (using an embedded DisallowHeapAllocation instance).
+//
+// HeapIterator can skip free list nodes (that is, de-allocated heap
+// objects that still remain in the heap). As implementation of free
+// nodes filtering uses GC marks, it can't be used during MS/MC GC
+// phases. Also, it is forbidden to interrupt iteration in this mode,
+// as this will leave heap objects marked (and thus, unusable).
+class HeapObjectsFilter;
+
+class HeapIterator BASE_EMBEDDED {
+ public:
+  enum HeapObjectsFiltering { kNoFiltering, kFilterUnreachable };
+
+  explicit HeapIterator(Heap* heap);
+  HeapIterator(Heap* heap, HeapObjectsFiltering filtering);
+  ~HeapIterator();
+
+  HeapObject* next();
+  void reset();
+
+ private:
+  struct MakeHeapIterableHelper {
+    explicit MakeHeapIterableHelper(Heap* heap) { heap->MakeHeapIterable(); }
+  };
+
+  // Perform the initialization.
+  void Init();
+  // Perform all necessary shutdown (destruction) work.
+  void Shutdown();
+  HeapObject* NextObject();
+
+  MakeHeapIterableHelper make_heap_iterable_helper_;
+  DisallowHeapAllocation no_heap_allocation_;
+  Heap* heap_;
+  HeapObjectsFiltering filtering_;
+  HeapObjectsFilter* filter_;
+  // Space iterator for iterating all the spaces.
+  SpaceIterator* space_iterator_;
+  // Object iterator for the space currently being iterated.
+  ObjectIterator* object_iterator_;
+};
+
+
+// Cache for mapping (map, property name) into field offset.
+// Cleared at startup and prior to mark sweep collection.
+class KeyedLookupCache {
+ public:
+  // Lookup field offset for (map, name). If absent, -1 is returned.
+  int Lookup(Handle<Map> map, Handle<Name> name);
+
+  // Update an element in the cache.
+  void Update(Handle<Map> map, Handle<Name> name, int field_offset);
+
+  // Clear the cache.
+  void Clear();
+
+  static const int kLength = 256;
+  static const int kCapacityMask = kLength - 1;
+  static const int kMapHashShift = 5;
+  static const int kHashMask = -4;  // Zero the last two bits.
+  static const int kEntriesPerBucket = 4;
+  static const int kEntryLength = 2;
+  static const int kMapIndex = 0;
+  static const int kKeyIndex = 1;
+  static const int kNotFound = -1;
+
+  // kEntriesPerBucket should be a power of 2.
+  STATIC_ASSERT((kEntriesPerBucket & (kEntriesPerBucket - 1)) == 0);
+  STATIC_ASSERT(kEntriesPerBucket == -kHashMask);
+
+ private:
+  KeyedLookupCache() {
+    for (int i = 0; i < kLength; ++i) {
+      keys_[i].map = NULL;
+      keys_[i].name = NULL;
+      field_offsets_[i] = kNotFound;
+    }
+  }
+
+  static inline int Hash(Handle<Map> map, Handle<Name> name);
+
+  // Get the address of the keys and field_offsets arrays.  Used in
+  // generated code to perform cache lookups.
+  Address keys_address() { return reinterpret_cast<Address>(&keys_); }
+
+  Address field_offsets_address() {
+    return reinterpret_cast<Address>(&field_offsets_);
+  }
+
+  struct Key {
+    Map* map;
+    Name* name;
+  };
+
+  Key keys_[kLength];
+  int field_offsets_[kLength];
+
+  friend class ExternalReference;
+  friend class Isolate;
+  DISALLOW_COPY_AND_ASSIGN(KeyedLookupCache);
+};
+
+
+// Cache for mapping (map, property name) into descriptor index.
+// The cache contains both positive and negative results.
+// Descriptor index equals kNotFound means the property is absent.
+// Cleared at startup and prior to any gc.
+class DescriptorLookupCache {
+ public:
+  // Lookup descriptor index for (map, name).
+  // If absent, kAbsent is returned.
+  int Lookup(Map* source, Name* name) {
+    if (!name->IsUniqueName()) return kAbsent;
+    int index = Hash(source, name);
+    Key& key = keys_[index];
+    if ((key.source == source) && (key.name == name)) return results_[index];
+    return kAbsent;
+  }
+
+  // Update an element in the cache.
+  void Update(Map* source, Name* name, int result) {
+    DCHECK(result != kAbsent);
+    if (name->IsUniqueName()) {
+      int index = Hash(source, name);
+      Key& key = keys_[index];
+      key.source = source;
+      key.name = name;
+      results_[index] = result;
+    }
+  }
+
+  // Clear the cache.
+  void Clear();
+
+  static const int kAbsent = -2;
+
+ private:
+  DescriptorLookupCache() {
+    for (int i = 0; i < kLength; ++i) {
+      keys_[i].source = NULL;
+      keys_[i].name = NULL;
+      results_[i] = kAbsent;
+    }
+  }
+
+  static int Hash(Object* source, Name* name) {
+    // Uses only lower 32 bits if pointers are larger.
+    uint32_t source_hash =
+        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(source)) >>
+        kPointerSizeLog2;
+    uint32_t name_hash =
+        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name)) >>
+        kPointerSizeLog2;
+    return (source_hash ^ name_hash) % kLength;
+  }
+
+  static const int kLength = 64;
+  struct Key {
+    Map* source;
+    Name* name;
+  };
+
+  Key keys_[kLength];
+  int results_[kLength];
+
+  friend class Isolate;
+  DISALLOW_COPY_AND_ASSIGN(DescriptorLookupCache);
+};
+
+
+class RegExpResultsCache {
+ public:
+  enum ResultsCacheType { REGEXP_MULTIPLE_INDICES, STRING_SPLIT_SUBSTRINGS };
+
+  // Attempt to retrieve a cached result.  On failure, 0 is returned as a Smi.
+  // On success, the returned result is guaranteed to be a COW-array.
+  static Object* Lookup(Heap* heap, String* key_string, Object* key_pattern,
+                        ResultsCacheType type);
+  // Attempt to add value_array to the cache specified by type.  On success,
+  // value_array is turned into a COW-array.
+  static void Enter(Isolate* isolate, Handle<String> key_string,
+                    Handle<Object> key_pattern, Handle<FixedArray> value_array,
+                    ResultsCacheType type);
+  static void Clear(FixedArray* cache);
+  static const int kRegExpResultsCacheSize = 0x100;
+
+ private:
+  static const int kArrayEntriesPerCacheEntry = 4;
+  static const int kStringOffset = 0;
+  static const int kPatternOffset = 1;
+  static const int kArrayOffset = 2;
+};
+
+
+// Abstract base class for checking whether a weak object should be retained.
+class WeakObjectRetainer {
+ public:
+  virtual ~WeakObjectRetainer() {}
+
+  // Return whether this object should be retained. If NULL is returned the
+  // object has no references. Otherwise the address of the retained object
+  // should be returned as in some GC situations the object has been moved.
+  virtual Object* RetainAs(Object* object) = 0;
+};
+
+
+// Intrusive object marking uses least significant bit of
+// heap object's map word to mark objects.
+// Normally all map words have least significant bit set
+// because they contain tagged map pointer.
+// If the bit is not set object is marked.
+// All objects should be unmarked before resuming
+// JavaScript execution.
+class IntrusiveMarking {
+ public:
+  static bool IsMarked(HeapObject* object) {
+    return (object->map_word().ToRawValue() & kNotMarkedBit) == 0;
+  }
+
+  static void ClearMark(HeapObject* object) {
+    uintptr_t map_word = object->map_word().ToRawValue();
+    object->set_map_word(MapWord::FromRawValue(map_word | kNotMarkedBit));
+    DCHECK(!IsMarked(object));
+  }
+
+  static void SetMark(HeapObject* object) {
+    uintptr_t map_word = object->map_word().ToRawValue();
+    object->set_map_word(MapWord::FromRawValue(map_word & ~kNotMarkedBit));
+    DCHECK(IsMarked(object));
+  }
+
+  static Map* MapOfMarkedObject(HeapObject* object) {
+    uintptr_t map_word = object->map_word().ToRawValue();
+    return MapWord::FromRawValue(map_word | kNotMarkedBit).ToMap();
+  }
+
+  static int SizeOfMarkedObject(HeapObject* object) {
+    return object->SizeFromMap(MapOfMarkedObject(object));
+  }
+
+ private:
+  static const uintptr_t kNotMarkedBit = 0x1;
+  STATIC_ASSERT((kHeapObjectTag & kNotMarkedBit) != 0);  // NOLINT
+};
+
+
+#ifdef DEBUG
+// Helper class for tracing paths to a search target Object from all roots.
+// The TracePathFrom() method can be used to trace paths from a specific
+// object to the search target object.
+class PathTracer : public ObjectVisitor {
+ public:
+  enum WhatToFind {
+    FIND_ALL,   // Will find all matches.
+    FIND_FIRST  // Will stop the search after first match.
+  };
+
+  // Tags 0, 1, and 3 are used. Use 2 for marking visited HeapObject.
+  static const int kMarkTag = 2;
+
+  // For the WhatToFind arg, if FIND_FIRST is specified, tracing will stop
+  // after the first match.  If FIND_ALL is specified, then tracing will be
+  // done for all matches.
+  PathTracer(Object* search_target, WhatToFind what_to_find,
+             VisitMode visit_mode)
+      : search_target_(search_target),
+        found_target_(false),
+        found_target_in_trace_(false),
+        what_to_find_(what_to_find),
+        visit_mode_(visit_mode),
+        object_stack_(20),
+        no_allocation() {}
+
+  virtual void VisitPointers(Object** start, Object** end);
+
+  void Reset();
+  void TracePathFrom(Object** root);
+
+  bool found() const { return found_target_; }
+
+  static Object* const kAnyGlobalObject;
+
+ protected:
+  class MarkVisitor;
+  class UnmarkVisitor;
+
+  void MarkRecursively(Object** p, MarkVisitor* mark_visitor);
+  void UnmarkRecursively(Object** p, UnmarkVisitor* unmark_visitor);
+  virtual void ProcessResults();
+
+  Object* search_target_;
+  bool found_target_;
+  bool found_target_in_trace_;
+  WhatToFind what_to_find_;
+  VisitMode visit_mode_;
+  List<Object*> object_stack_;
+
+  DisallowHeapAllocation no_allocation;  // i.e. no gc allowed.
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(PathTracer);
+};
+#endif  // DEBUG
+}
+}  // namespace v8::internal
+
+#endif  // V8_HEAP_HEAP_H_
diff --git a/src/heap/incremental-marking-inl.h b/src/heap/incremental-marking-inl.h
new file mode 100644
index 0000000..5258c5c
--- /dev/null
+++ b/src/heap/incremental-marking-inl.h
@@ -0,0 +1,117 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_INCREMENTAL_MARKING_INL_H_
+#define V8_HEAP_INCREMENTAL_MARKING_INL_H_
+
+#include "src/heap/incremental-marking.h"
+
+namespace v8 {
+namespace internal {
+
+
+bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, Object** slot,
+                                         Object* value) {
+  HeapObject* value_heap_obj = HeapObject::cast(value);
+  MarkBit value_bit = Marking::MarkBitFrom(value_heap_obj);
+  if (Marking::IsWhite(value_bit)) {
+    MarkBit obj_bit = Marking::MarkBitFrom(obj);
+    if (Marking::IsBlack(obj_bit)) {
+      MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
+      if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
+        if (chunk->IsLeftOfProgressBar(slot)) {
+          WhiteToGreyAndPush(value_heap_obj, value_bit);
+          RestartIfNotMarking();
+        } else {
+          return false;
+        }
+      } else {
+        BlackToGreyAndUnshift(obj, obj_bit);
+        RestartIfNotMarking();
+        return false;
+      }
+    } else {
+      return false;
+    }
+  }
+  if (!is_compacting_) return false;
+  MarkBit obj_bit = Marking::MarkBitFrom(obj);
+  return Marking::IsBlack(obj_bit);
+}
+
+
+void IncrementalMarking::RecordWrite(HeapObject* obj, Object** slot,
+                                     Object* value) {
+  if (IsMarking() && value->IsHeapObject()) {
+    RecordWriteSlow(obj, slot, value);
+  }
+}
+
+
+void IncrementalMarking::RecordWriteOfCodeEntry(JSFunction* host, Object** slot,
+                                                Code* value) {
+  if (IsMarking()) RecordWriteOfCodeEntrySlow(host, slot, value);
+}
+
+
+void IncrementalMarking::RecordWriteIntoCode(HeapObject* obj, RelocInfo* rinfo,
+                                             Object* value) {
+  if (IsMarking() && value->IsHeapObject()) {
+    RecordWriteIntoCodeSlow(obj, rinfo, value);
+  }
+}
+
+
+void IncrementalMarking::RecordWrites(HeapObject* obj) {
+  if (IsMarking()) {
+    MarkBit obj_bit = Marking::MarkBitFrom(obj);
+    if (Marking::IsBlack(obj_bit)) {
+      MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
+      if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
+        chunk->set_progress_bar(0);
+      }
+      BlackToGreyAndUnshift(obj, obj_bit);
+      RestartIfNotMarking();
+    }
+  }
+}
+
+
+void IncrementalMarking::BlackToGreyAndUnshift(HeapObject* obj,
+                                               MarkBit mark_bit) {
+  DCHECK(Marking::MarkBitFrom(obj) == mark_bit);
+  DCHECK(obj->Size() >= 2 * kPointerSize);
+  DCHECK(IsMarking());
+  Marking::BlackToGrey(mark_bit);
+  int obj_size = obj->Size();
+  MemoryChunk::IncrementLiveBytesFromGC(obj->address(), -obj_size);
+  bytes_scanned_ -= obj_size;
+  int64_t old_bytes_rescanned = bytes_rescanned_;
+  bytes_rescanned_ = old_bytes_rescanned + obj_size;
+  if ((bytes_rescanned_ >> 20) != (old_bytes_rescanned >> 20)) {
+    if (bytes_rescanned_ > 2 * heap_->PromotedSpaceSizeOfObjects()) {
+      // If we have queued twice the heap size for rescanning then we are
+      // going around in circles, scanning the same objects again and again
+      // as the program mutates the heap faster than we can incrementally
+      // trace it.  In this case we switch to non-incremental marking in
+      // order to finish off this marking phase.
+      if (FLAG_trace_gc) {
+        PrintPID("Hurrying incremental marking because of lack of progress\n");
+      }
+      marking_speed_ = kMaxMarkingSpeed;
+    }
+  }
+
+  marking_deque_.UnshiftGrey(obj);
+}
+
+
+void IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit) {
+  Marking::WhiteToGrey(mark_bit);
+  marking_deque_.PushGrey(obj);
+}
+}
+}  // namespace v8::internal
+
+#endif  // V8_HEAP_INCREMENTAL_MARKING_INL_H_
diff --git a/src/heap/incremental-marking.cc b/src/heap/incremental-marking.cc
new file mode 100644
index 0000000..d72423a
--- /dev/null
+++ b/src/heap/incremental-marking.cc
@@ -0,0 +1,982 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/heap/incremental-marking.h"
+
+#include "src/code-stubs.h"
+#include "src/compilation-cache.h"
+#include "src/conversions.h"
+#include "src/heap/objects-visiting.h"
+#include "src/heap/objects-visiting-inl.h"
+
+namespace v8 {
+namespace internal {
+
+
+IncrementalMarking::IncrementalMarking(Heap* heap)
+    : heap_(heap),
+      state_(STOPPED),
+      marking_deque_memory_(NULL),
+      marking_deque_memory_committed_(false),
+      steps_count_(0),
+      old_generation_space_available_at_start_of_incremental_(0),
+      old_generation_space_used_at_start_of_incremental_(0),
+      should_hurry_(false),
+      marking_speed_(0),
+      allocated_(0),
+      no_marking_scope_depth_(0),
+      unscanned_bytes_of_large_object_(0) {}
+
+
+void IncrementalMarking::TearDown() { delete marking_deque_memory_; }
+
+
+void IncrementalMarking::RecordWriteSlow(HeapObject* obj, Object** slot,
+                                         Object* value) {
+  if (BaseRecordWrite(obj, slot, value) && slot != NULL) {
+    MarkBit obj_bit = Marking::MarkBitFrom(obj);
+    if (Marking::IsBlack(obj_bit)) {
+      // Object is not going to be rescanned we need to record the slot.
+      heap_->mark_compact_collector()->RecordSlot(HeapObject::RawField(obj, 0),
+                                                  slot, value);
+    }
+  }
+}
+
+
+void IncrementalMarking::RecordWriteFromCode(HeapObject* obj, Object** slot,
+                                             Isolate* isolate) {
+  DCHECK(obj->IsHeapObject());
+  IncrementalMarking* marking = isolate->heap()->incremental_marking();
+
+  MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
+  int counter = chunk->write_barrier_counter();
+  if (counter < (MemoryChunk::kWriteBarrierCounterGranularity / 2)) {
+    marking->write_barriers_invoked_since_last_step_ +=
+        MemoryChunk::kWriteBarrierCounterGranularity -
+        chunk->write_barrier_counter();
+    chunk->set_write_barrier_counter(
+        MemoryChunk::kWriteBarrierCounterGranularity);
+  }
+
+  marking->RecordWrite(obj, slot, *slot);
+}
+
+
+void IncrementalMarking::RecordCodeTargetPatch(Code* host, Address pc,
+                                               HeapObject* value) {
+  if (IsMarking()) {
+    RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
+    RecordWriteIntoCode(host, &rinfo, value);
+  }
+}
+
+
+void IncrementalMarking::RecordCodeTargetPatch(Address pc, HeapObject* value) {
+  if (IsMarking()) {
+    Code* host = heap_->isolate()
+                     ->inner_pointer_to_code_cache()
+                     ->GcSafeFindCodeForInnerPointer(pc);
+    RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
+    RecordWriteIntoCode(host, &rinfo, value);
+  }
+}
+
+
+void IncrementalMarking::RecordWriteOfCodeEntrySlow(JSFunction* host,
+                                                    Object** slot,
+                                                    Code* value) {
+  if (BaseRecordWrite(host, slot, value)) {
+    DCHECK(slot != NULL);
+    heap_->mark_compact_collector()->RecordCodeEntrySlot(
+        reinterpret_cast<Address>(slot), value);
+  }
+}
+
+
+void IncrementalMarking::RecordWriteIntoCodeSlow(HeapObject* obj,
+                                                 RelocInfo* rinfo,
+                                                 Object* value) {
+  MarkBit value_bit = Marking::MarkBitFrom(HeapObject::cast(value));
+  if (Marking::IsWhite(value_bit)) {
+    MarkBit obj_bit = Marking::MarkBitFrom(obj);
+    if (Marking::IsBlack(obj_bit)) {
+      BlackToGreyAndUnshift(obj, obj_bit);
+      RestartIfNotMarking();
+    }
+    // Object is either grey or white.  It will be scanned if survives.
+    return;
+  }
+
+  if (is_compacting_) {
+    MarkBit obj_bit = Marking::MarkBitFrom(obj);
+    if (Marking::IsBlack(obj_bit)) {
+      // Object is not going to be rescanned.  We need to record the slot.
+      heap_->mark_compact_collector()->RecordRelocSlot(rinfo,
+                                                       Code::cast(value));
+    }
+  }
+}
+
+
+static void MarkObjectGreyDoNotEnqueue(Object* obj) {
+  if (obj->IsHeapObject()) {
+    HeapObject* heap_obj = HeapObject::cast(obj);
+    MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(obj));
+    if (Marking::IsBlack(mark_bit)) {
+      MemoryChunk::IncrementLiveBytesFromGC(heap_obj->address(),
+                                            -heap_obj->Size());
+    }
+    Marking::AnyToGrey(mark_bit);
+  }
+}
+
+
+static inline void MarkBlackOrKeepGrey(HeapObject* heap_object,
+                                       MarkBit mark_bit, int size) {
+  DCHECK(!Marking::IsImpossible(mark_bit));
+  if (mark_bit.Get()) return;
+  mark_bit.Set();
+  MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(), size);
+  DCHECK(Marking::IsBlack(mark_bit));
+}
+
+
+static inline void MarkBlackOrKeepBlack(HeapObject* heap_object,
+                                        MarkBit mark_bit, int size) {
+  DCHECK(!Marking::IsImpossible(mark_bit));
+  if (Marking::IsBlack(mark_bit)) return;
+  Marking::MarkBlack(mark_bit);
+  MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(), size);
+  DCHECK(Marking::IsBlack(mark_bit));
+}
+
+
+class IncrementalMarkingMarkingVisitor
+    : public StaticMarkingVisitor<IncrementalMarkingMarkingVisitor> {
+ public:
+  static void Initialize() {
+    StaticMarkingVisitor<IncrementalMarkingMarkingVisitor>::Initialize();
+    table_.Register(kVisitFixedArray, &VisitFixedArrayIncremental);
+    table_.Register(kVisitNativeContext, &VisitNativeContextIncremental);
+    table_.Register(kVisitJSRegExp, &VisitJSRegExp);
+  }
+
+  static const int kProgressBarScanningChunk = 32 * 1024;
+
+  static void VisitFixedArrayIncremental(Map* map, HeapObject* object) {
+    MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
+    // TODO(mstarzinger): Move setting of the flag to the allocation site of
+    // the array. The visitor should just check the flag.
+    if (FLAG_use_marking_progress_bar &&
+        chunk->owner()->identity() == LO_SPACE) {
+      chunk->SetFlag(MemoryChunk::HAS_PROGRESS_BAR);
+    }
+    if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
+      Heap* heap = map->GetHeap();
+      // When using a progress bar for large fixed arrays, scan only a chunk of
+      // the array and try to push it onto the marking deque again until it is
+      // fully scanned. Fall back to scanning it through to the end in case this
+      // fails because of a full deque.
+      int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
+      int start_offset =
+          Max(FixedArray::BodyDescriptor::kStartOffset, chunk->progress_bar());
+      int end_offset =
+          Min(object_size, start_offset + kProgressBarScanningChunk);
+      int already_scanned_offset = start_offset;
+      bool scan_until_end = false;
+      do {
+        VisitPointersWithAnchor(heap, HeapObject::RawField(object, 0),
+                                HeapObject::RawField(object, start_offset),
+                                HeapObject::RawField(object, end_offset));
+        start_offset = end_offset;
+        end_offset = Min(object_size, end_offset + kProgressBarScanningChunk);
+        scan_until_end = heap->incremental_marking()->marking_deque()->IsFull();
+      } while (scan_until_end && start_offset < object_size);
+      chunk->set_progress_bar(start_offset);
+      if (start_offset < object_size) {
+        heap->incremental_marking()->marking_deque()->UnshiftGrey(object);
+        heap->incremental_marking()->NotifyIncompleteScanOfObject(
+            object_size - (start_offset - already_scanned_offset));
+      }
+    } else {
+      FixedArrayVisitor::Visit(map, object);
+    }
+  }
+
+  static void VisitNativeContextIncremental(Map* map, HeapObject* object) {
+    Context* context = Context::cast(object);
+
+    // We will mark cache black with a separate pass when we finish marking.
+    // Note that GC can happen when the context is not fully initialized,
+    // so the cache can be undefined.
+    Object* cache = context->get(Context::NORMALIZED_MAP_CACHE_INDEX);
+    if (!cache->IsUndefined()) {
+      MarkObjectGreyDoNotEnqueue(cache);
+    }
+    VisitNativeContext(map, context);
+  }
+
+  INLINE(static void VisitPointer(Heap* heap, Object** p)) {
+    Object* obj = *p;
+    if (obj->IsHeapObject()) {
+      heap->mark_compact_collector()->RecordSlot(p, p, obj);
+      MarkObject(heap, obj);
+    }
+  }
+
+  INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
+    for (Object** p = start; p < end; p++) {
+      Object* obj = *p;
+      if (obj->IsHeapObject()) {
+        heap->mark_compact_collector()->RecordSlot(start, p, obj);
+        MarkObject(heap, obj);
+      }
+    }
+  }
+
+  INLINE(static void VisitPointersWithAnchor(Heap* heap, Object** anchor,
+                                             Object** start, Object** end)) {
+    for (Object** p = start; p < end; p++) {
+      Object* obj = *p;
+      if (obj->IsHeapObject()) {
+        heap->mark_compact_collector()->RecordSlot(anchor, p, obj);
+        MarkObject(heap, obj);
+      }
+    }
+  }
+
+  // Marks the object grey and pushes it on the marking stack.
+  INLINE(static void MarkObject(Heap* heap, Object* obj)) {
+    HeapObject* heap_object = HeapObject::cast(obj);
+    MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
+    if (mark_bit.data_only()) {
+      MarkBlackOrKeepGrey(heap_object, mark_bit, heap_object->Size());
+    } else if (Marking::IsWhite(mark_bit)) {
+      heap->incremental_marking()->WhiteToGreyAndPush(heap_object, mark_bit);
+    }
+  }
+
+  // Marks the object black without pushing it on the marking stack.
+  // Returns true if object needed marking and false otherwise.
+  INLINE(static bool MarkObjectWithoutPush(Heap* heap, Object* obj)) {
+    HeapObject* heap_object = HeapObject::cast(obj);
+    MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
+    if (Marking::IsWhite(mark_bit)) {
+      mark_bit.Set();
+      MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(),
+                                            heap_object->Size());
+      return true;
+    }
+    return false;
+  }
+};
+
+
+class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor {
+ public:
+  explicit IncrementalMarkingRootMarkingVisitor(
+      IncrementalMarking* incremental_marking)
+      : incremental_marking_(incremental_marking) {}
+
+  void VisitPointer(Object** p) { MarkObjectByPointer(p); }
+
+  void VisitPointers(Object** start, Object** end) {
+    for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
+  }
+
+ private:
+  void MarkObjectByPointer(Object** p) {
+    Object* obj = *p;
+    if (!obj->IsHeapObject()) return;
+
+    HeapObject* heap_object = HeapObject::cast(obj);
+    MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
+    if (mark_bit.data_only()) {
+      MarkBlackOrKeepGrey(heap_object, mark_bit, heap_object->Size());
+    } else {
+      if (Marking::IsWhite(mark_bit)) {
+        incremental_marking_->WhiteToGreyAndPush(heap_object, mark_bit);
+      }
+    }
+  }
+
+  IncrementalMarking* incremental_marking_;
+};
+
+
+void IncrementalMarking::Initialize() {
+  IncrementalMarkingMarkingVisitor::Initialize();
+}
+
+
+void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk,
+                                              bool is_marking,
+                                              bool is_compacting) {
+  if (is_marking) {
+    chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
+    chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+
+    // It's difficult to filter out slots recorded for large objects.
+    if (chunk->owner()->identity() == LO_SPACE &&
+        chunk->size() > static_cast<size_t>(Page::kPageSize) && is_compacting) {
+      chunk->SetFlag(MemoryChunk::RESCAN_ON_EVACUATION);
+    }
+  } else if (chunk->owner()->identity() == CELL_SPACE ||
+             chunk->owner()->identity() == PROPERTY_CELL_SPACE ||
+             chunk->scan_on_scavenge()) {
+    chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
+    chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+  } else {
+    chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
+    chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+  }
+}
+
+
+void IncrementalMarking::SetNewSpacePageFlags(NewSpacePage* chunk,
+                                              bool is_marking) {
+  chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
+  if (is_marking) {
+    chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+  } else {
+    chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
+  }
+  chunk->SetFlag(MemoryChunk::SCAN_ON_SCAVENGE);
+}
+
+
+void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
+    PagedSpace* space) {
+  PageIterator it(space);
+  while (it.has_next()) {
+    Page* p = it.next();
+    SetOldSpacePageFlags(p, false, false);
+  }
+}
+
+
+void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
+    NewSpace* space) {
+  NewSpacePageIterator it(space);
+  while (it.has_next()) {
+    NewSpacePage* p = it.next();
+    SetNewSpacePageFlags(p, false);
+  }
+}
+
+
+void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
+  DeactivateIncrementalWriteBarrierForSpace(heap_->old_pointer_space());
+  DeactivateIncrementalWriteBarrierForSpace(heap_->old_data_space());
+  DeactivateIncrementalWriteBarrierForSpace(heap_->cell_space());
+  DeactivateIncrementalWriteBarrierForSpace(heap_->property_cell_space());
+  DeactivateIncrementalWriteBarrierForSpace(heap_->map_space());
+  DeactivateIncrementalWriteBarrierForSpace(heap_->code_space());
+  DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
+
+  LargePage* lop = heap_->lo_space()->first_page();
+  while (lop->is_valid()) {
+    SetOldSpacePageFlags(lop, false, false);
+    lop = lop->next_page();
+  }
+}
+
+
+void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) {
+  PageIterator it(space);
+  while (it.has_next()) {
+    Page* p = it.next();
+    SetOldSpacePageFlags(p, true, is_compacting_);
+  }
+}
+
+
+void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) {
+  NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
+  while (it.has_next()) {
+    NewSpacePage* p = it.next();
+    SetNewSpacePageFlags(p, true);
+  }
+}
+
+
+void IncrementalMarking::ActivateIncrementalWriteBarrier() {
+  ActivateIncrementalWriteBarrier(heap_->old_pointer_space());
+  ActivateIncrementalWriteBarrier(heap_->old_data_space());
+  ActivateIncrementalWriteBarrier(heap_->cell_space());
+  ActivateIncrementalWriteBarrier(heap_->property_cell_space());
+  ActivateIncrementalWriteBarrier(heap_->map_space());
+  ActivateIncrementalWriteBarrier(heap_->code_space());
+  ActivateIncrementalWriteBarrier(heap_->new_space());
+
+  LargePage* lop = heap_->lo_space()->first_page();
+  while (lop->is_valid()) {
+    SetOldSpacePageFlags(lop, true, is_compacting_);
+    lop = lop->next_page();
+  }
+}
+
+
+bool IncrementalMarking::ShouldActivate() {
+  return WorthActivating() && heap_->NextGCIsLikelyToBeFull();
+}
+
+
+bool IncrementalMarking::WorthActivating() {
+#ifndef DEBUG
+  static const intptr_t kActivationThreshold = 8 * MB;
+#else
+  // TODO(gc) consider setting this to some low level so that some
+  // debug tests run with incremental marking and some without.
+  static const intptr_t kActivationThreshold = 0;
+#endif
+  // Only start incremental marking in a safe state: 1) when incremental
+  // marking is turned on, 2) when we are currently not in a GC, and
+  // 3) when we are currently not serializing or deserializing the heap.
+  return FLAG_incremental_marking && FLAG_incremental_marking_steps &&
+         heap_->gc_state() == Heap::NOT_IN_GC &&
+         !heap_->isolate()->serializer_enabled() &&
+         heap_->isolate()->IsInitialized() &&
+         heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold;
+}
+
+
+void IncrementalMarking::ActivateGeneratedStub(Code* stub) {
+  DCHECK(RecordWriteStub::GetMode(stub) == RecordWriteStub::STORE_BUFFER_ONLY);
+
+  if (!IsMarking()) {
+    // Initially stub is generated in STORE_BUFFER_ONLY mode thus
+    // we don't need to do anything if incremental marking is
+    // not active.
+  } else if (IsCompacting()) {
+    RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL_COMPACTION);
+  } else {
+    RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL);
+  }
+}
+
+
+static void PatchIncrementalMarkingRecordWriteStubs(
+    Heap* heap, RecordWriteStub::Mode mode) {
+  UnseededNumberDictionary* stubs = heap->code_stubs();
+
+  int capacity = stubs->Capacity();
+  for (int i = 0; i < capacity; i++) {
+    Object* k = stubs->KeyAt(i);
+    if (stubs->IsKey(k)) {
+      uint32_t key = NumberToUint32(k);
+
+      if (CodeStub::MajorKeyFromKey(key) == CodeStub::RecordWrite) {
+        Object* e = stubs->ValueAt(i);
+        if (e->IsCode()) {
+          RecordWriteStub::Patch(Code::cast(e), mode);
+        }
+      }
+    }
+  }
+}
+
+
+void IncrementalMarking::EnsureMarkingDequeIsCommitted() {
+  if (marking_deque_memory_ == NULL) {
+    marking_deque_memory_ = new base::VirtualMemory(4 * MB);
+  }
+  if (!marking_deque_memory_committed_) {
+    bool success = marking_deque_memory_->Commit(
+        reinterpret_cast<Address>(marking_deque_memory_->address()),
+        marking_deque_memory_->size(),
+        false);  // Not executable.
+    CHECK(success);
+    marking_deque_memory_committed_ = true;
+  }
+}
+
+
+void IncrementalMarking::UncommitMarkingDeque() {
+  if (state_ == STOPPED && marking_deque_memory_committed_) {
+    bool success = marking_deque_memory_->Uncommit(
+        reinterpret_cast<Address>(marking_deque_memory_->address()),
+        marking_deque_memory_->size());
+    CHECK(success);
+    marking_deque_memory_committed_ = false;
+  }
+}
+
+
+void IncrementalMarking::Start(CompactionFlag flag) {
+  if (FLAG_trace_incremental_marking) {
+    PrintF("[IncrementalMarking] Start\n");
+  }
+  DCHECK(FLAG_incremental_marking);
+  DCHECK(FLAG_incremental_marking_steps);
+  DCHECK(state_ == STOPPED);
+  DCHECK(heap_->gc_state() == Heap::NOT_IN_GC);
+  DCHECK(!heap_->isolate()->serializer_enabled());
+  DCHECK(heap_->isolate()->IsInitialized());
+
+  ResetStepCounters();
+
+  if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
+    StartMarking(flag);
+  } else {
+    if (FLAG_trace_incremental_marking) {
+      PrintF("[IncrementalMarking] Start sweeping.\n");
+    }
+    state_ = SWEEPING;
+  }
+
+  heap_->new_space()->LowerInlineAllocationLimit(kAllocatedThreshold);
+}
+
+
+void IncrementalMarking::StartMarking(CompactionFlag flag) {
+  if (FLAG_trace_incremental_marking) {
+    PrintF("[IncrementalMarking] Start marking\n");
+  }
+
+  is_compacting_ = !FLAG_never_compact && (flag == ALLOW_COMPACTION) &&
+                   heap_->mark_compact_collector()->StartCompaction(
+                       MarkCompactCollector::INCREMENTAL_COMPACTION);
+
+  state_ = MARKING;
+
+  RecordWriteStub::Mode mode = is_compacting_
+                                   ? RecordWriteStub::INCREMENTAL_COMPACTION
+                                   : RecordWriteStub::INCREMENTAL;
+
+  PatchIncrementalMarkingRecordWriteStubs(heap_, mode);
+
+  EnsureMarkingDequeIsCommitted();
+
+  // Initialize marking stack.
+  Address addr = static_cast<Address>(marking_deque_memory_->address());
+  size_t size = marking_deque_memory_->size();
+  if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize;
+  marking_deque_.Initialize(addr, addr + size);
+
+  ActivateIncrementalWriteBarrier();
+
+// Marking bits are cleared by the sweeper.
+#ifdef VERIFY_HEAP
+  if (FLAG_verify_heap) {
+    heap_->mark_compact_collector()->VerifyMarkbitsAreClean();
+  }
+#endif
+
+  heap_->CompletelyClearInstanceofCache();
+  heap_->isolate()->compilation_cache()->MarkCompactPrologue();
+
+  if (FLAG_cleanup_code_caches_at_gc) {
+    // We will mark cache black with a separate pass
+    // when we finish marking.
+    MarkObjectGreyDoNotEnqueue(heap_->polymorphic_code_cache());
+  }
+
+  // Mark strong roots grey.
+  IncrementalMarkingRootMarkingVisitor visitor(this);
+  heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
+
+  heap_->mark_compact_collector()->MarkWeakObjectToCodeTable();
+
+  // Ready to start incremental marking.
+  if (FLAG_trace_incremental_marking) {
+    PrintF("[IncrementalMarking] Running\n");
+  }
+}
+
+
+void IncrementalMarking::PrepareForScavenge() {
+  if (!IsMarking()) return;
+  NewSpacePageIterator it(heap_->new_space()->FromSpaceStart(),
+                          heap_->new_space()->FromSpaceEnd());
+  while (it.has_next()) {
+    Bitmap::Clear(it.next());
+  }
+}
+
+
+void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
+  if (!IsMarking()) return;
+
+  int current = marking_deque_.bottom();
+  int mask = marking_deque_.mask();
+  int limit = marking_deque_.top();
+  HeapObject** array = marking_deque_.array();
+  int new_top = current;
+
+  Map* filler_map = heap_->one_pointer_filler_map();
+
+  while (current != limit) {
+    HeapObject* obj = array[current];
+    DCHECK(obj->IsHeapObject());
+    current = ((current + 1) & mask);
+    if (heap_->InNewSpace(obj)) {
+      MapWord map_word = obj->map_word();
+      if (map_word.IsForwardingAddress()) {
+        HeapObject* dest = map_word.ToForwardingAddress();
+        array[new_top] = dest;
+        new_top = ((new_top + 1) & mask);
+        DCHECK(new_top != marking_deque_.bottom());
+#ifdef DEBUG
+        MarkBit mark_bit = Marking::MarkBitFrom(obj);
+        DCHECK(Marking::IsGrey(mark_bit) ||
+               (obj->IsFiller() && Marking::IsWhite(mark_bit)));
+#endif
+      }
+    } else if (obj->map() != filler_map) {
+      // Skip one word filler objects that appear on the
+      // stack when we perform in place array shift.
+      array[new_top] = obj;
+      new_top = ((new_top + 1) & mask);
+      DCHECK(new_top != marking_deque_.bottom());
+#ifdef DEBUG
+      MarkBit mark_bit = Marking::MarkBitFrom(obj);
+      MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
+      DCHECK(Marking::IsGrey(mark_bit) ||
+             (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
+             (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
+              Marking::IsBlack(mark_bit)));
+#endif
+    }
+  }
+  marking_deque_.set_top(new_top);
+}
+
+
+void IncrementalMarking::VisitObject(Map* map, HeapObject* obj, int size) {
+  MarkBit map_mark_bit = Marking::MarkBitFrom(map);
+  if (Marking::IsWhite(map_mark_bit)) {
+    WhiteToGreyAndPush(map, map_mark_bit);
+  }
+
+  IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
+
+  MarkBit mark_bit = Marking::MarkBitFrom(obj);
+#if ENABLE_SLOW_DCHECKS
+  MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
+  SLOW_DCHECK(Marking::IsGrey(mark_bit) ||
+              (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
+              (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
+               Marking::IsBlack(mark_bit)));
+#endif
+  MarkBlackOrKeepBlack(obj, mark_bit, size);
+}
+
+
+intptr_t IncrementalMarking::ProcessMarkingDeque(intptr_t bytes_to_process) {
+  intptr_t bytes_processed = 0;
+  Map* filler_map = heap_->one_pointer_filler_map();
+  while (!marking_deque_.IsEmpty() && bytes_processed < bytes_to_process) {
+    HeapObject* obj = marking_deque_.Pop();
+
+    // Explicitly skip one word fillers. Incremental markbit patterns are
+    // correct only for objects that occupy at least two words.
+    Map* map = obj->map();
+    if (map == filler_map) continue;
+
+    int size = obj->SizeFromMap(map);
+    unscanned_bytes_of_large_object_ = 0;
+    VisitObject(map, obj, size);
+    int delta = (size - unscanned_bytes_of_large_object_);
+    // TODO(jochen): remove after http://crbug.com/381820 is resolved.
+    CHECK_LT(0, delta);
+    bytes_processed += delta;
+  }
+  return bytes_processed;
+}
+
+
+void IncrementalMarking::ProcessMarkingDeque() {
+  Map* filler_map = heap_->one_pointer_filler_map();
+  while (!marking_deque_.IsEmpty()) {
+    HeapObject* obj = marking_deque_.Pop();
+
+    // Explicitly skip one word fillers. Incremental markbit patterns are
+    // correct only for objects that occupy at least two words.
+    Map* map = obj->map();
+    if (map == filler_map) continue;
+
+    VisitObject(map, obj, obj->SizeFromMap(map));
+  }
+}
+
+
+void IncrementalMarking::Hurry() {
+  if (state() == MARKING) {
+    double start = 0.0;
+    if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
+      start = base::OS::TimeCurrentMillis();
+      if (FLAG_trace_incremental_marking) {
+        PrintF("[IncrementalMarking] Hurry\n");
+      }
+    }
+    // TODO(gc) hurry can mark objects it encounters black as mutator
+    // was stopped.
+    ProcessMarkingDeque();
+    state_ = COMPLETE;
+    if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
+      double end = base::OS::TimeCurrentMillis();
+      double delta = end - start;
+      heap_->tracer()->AddMarkingTime(delta);
+      if (FLAG_trace_incremental_marking) {
+        PrintF("[IncrementalMarking] Complete (hurry), spent %d ms.\n",
+               static_cast<int>(delta));
+      }
+    }
+  }
+
+  if (FLAG_cleanup_code_caches_at_gc) {
+    PolymorphicCodeCache* poly_cache = heap_->polymorphic_code_cache();
+    Marking::GreyToBlack(Marking::MarkBitFrom(poly_cache));
+    MemoryChunk::IncrementLiveBytesFromGC(poly_cache->address(),
+                                          PolymorphicCodeCache::kSize);
+  }
+
+  Object* context = heap_->native_contexts_list();
+  while (!context->IsUndefined()) {
+    // GC can happen when the context is not fully initialized,
+    // so the cache can be undefined.
+    HeapObject* cache = HeapObject::cast(
+        Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX));
+    if (!cache->IsUndefined()) {
+      MarkBit mark_bit = Marking::MarkBitFrom(cache);
+      if (Marking::IsGrey(mark_bit)) {
+        Marking::GreyToBlack(mark_bit);
+        MemoryChunk::IncrementLiveBytesFromGC(cache->address(), cache->Size());
+      }
+    }
+    context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
+  }
+}
+
+
+void IncrementalMarking::Abort() {
+  if (IsStopped()) return;
+  if (FLAG_trace_incremental_marking) {
+    PrintF("[IncrementalMarking] Aborting.\n");
+  }
+  heap_->new_space()->LowerInlineAllocationLimit(0);
+  IncrementalMarking::set_should_hurry(false);
+  ResetStepCounters();
+  if (IsMarking()) {
+    PatchIncrementalMarkingRecordWriteStubs(heap_,
+                                            RecordWriteStub::STORE_BUFFER_ONLY);
+    DeactivateIncrementalWriteBarrier();
+
+    if (is_compacting_) {
+      LargeObjectIterator it(heap_->lo_space());
+      for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
+        Page* p = Page::FromAddress(obj->address());
+        if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
+          p->ClearFlag(Page::RESCAN_ON_EVACUATION);
+        }
+      }
+    }
+  }
+  heap_->isolate()->stack_guard()->ClearGC();
+  state_ = STOPPED;
+  is_compacting_ = false;
+}
+
+
+void IncrementalMarking::Finalize() {
+  Hurry();
+  state_ = STOPPED;
+  is_compacting_ = false;
+  heap_->new_space()->LowerInlineAllocationLimit(0);
+  IncrementalMarking::set_should_hurry(false);
+  ResetStepCounters();
+  PatchIncrementalMarkingRecordWriteStubs(heap_,
+                                          RecordWriteStub::STORE_BUFFER_ONLY);
+  DeactivateIncrementalWriteBarrier();
+  DCHECK(marking_deque_.IsEmpty());
+  heap_->isolate()->stack_guard()->ClearGC();
+}
+
+
+void IncrementalMarking::MarkingComplete(CompletionAction action) {
+  state_ = COMPLETE;
+  // We will set the stack guard to request a GC now.  This will mean the rest
+  // of the GC gets performed as soon as possible (we can't do a GC here in a
+  // record-write context).  If a few things get allocated between now and then
+  // that shouldn't make us do a scavenge and keep being incremental, so we set
+  // the should-hurry flag to indicate that there can't be much work left to do.
+  set_should_hurry(true);
+  if (FLAG_trace_incremental_marking) {
+    PrintF("[IncrementalMarking] Complete (normal).\n");
+  }
+  if (action == GC_VIA_STACK_GUARD) {
+    heap_->isolate()->stack_guard()->RequestGC();
+  }
+}
+
+
+void IncrementalMarking::OldSpaceStep(intptr_t allocated) {
+  if (IsStopped() && ShouldActivate()) {
+    // TODO(hpayer): Let's play safe for now, but compaction should be
+    // in principle possible.
+    Start(PREVENT_COMPACTION);
+  } else {
+    Step(allocated * kFastMarking / kInitialMarkingSpeed, GC_VIA_STACK_GUARD);
+  }
+}
+
+
+void IncrementalMarking::SpeedUp() {
+  bool speed_up = false;
+
+  if ((steps_count_ % kMarkingSpeedAccellerationInterval) == 0) {
+    if (FLAG_trace_gc) {
+      PrintPID("Speed up marking after %d steps\n",
+               static_cast<int>(kMarkingSpeedAccellerationInterval));
+    }
+    speed_up = true;
+  }
+
+  bool space_left_is_very_small =
+      (old_generation_space_available_at_start_of_incremental_ < 10 * MB);
+
+  bool only_1_nth_of_space_that_was_available_still_left =
+      (SpaceLeftInOldSpace() * (marking_speed_ + 1) <
+       old_generation_space_available_at_start_of_incremental_);
+
+  if (space_left_is_very_small ||
+      only_1_nth_of_space_that_was_available_still_left) {
+    if (FLAG_trace_gc) PrintPID("Speed up marking because of low space left\n");
+    speed_up = true;
+  }
+
+  bool size_of_old_space_multiplied_by_n_during_marking =
+      (heap_->PromotedTotalSize() >
+       (marking_speed_ + 1) *
+           old_generation_space_used_at_start_of_incremental_);
+  if (size_of_old_space_multiplied_by_n_during_marking) {
+    speed_up = true;
+    if (FLAG_trace_gc) {
+      PrintPID("Speed up marking because of heap size increase\n");
+    }
+  }
+
+  int64_t promoted_during_marking =
+      heap_->PromotedTotalSize() -
+      old_generation_space_used_at_start_of_incremental_;
+  intptr_t delay = marking_speed_ * MB;
+  intptr_t scavenge_slack = heap_->MaxSemiSpaceSize();
+
+  // We try to scan at at least twice the speed that we are allocating.
+  if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) {
+    if (FLAG_trace_gc) {
+      PrintPID("Speed up marking because marker was not keeping up\n");
+    }
+    speed_up = true;
+  }
+
+  if (speed_up) {
+    if (state_ != MARKING) {
+      if (FLAG_trace_gc) {
+        PrintPID("Postponing speeding up marking until marking starts\n");
+      }
+    } else {
+      marking_speed_ += kMarkingSpeedAccelleration;
+      marking_speed_ = static_cast<int>(
+          Min(kMaxMarkingSpeed, static_cast<intptr_t>(marking_speed_ * 1.3)));
+      if (FLAG_trace_gc) {
+        PrintPID("Marking speed increased to %d\n", marking_speed_);
+      }
+    }
+  }
+}
+
+
+void IncrementalMarking::Step(intptr_t allocated_bytes, CompletionAction action,
+                              bool force_marking) {
+  if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking ||
+      !FLAG_incremental_marking_steps ||
+      (state_ != SWEEPING && state_ != MARKING)) {
+    return;
+  }
+
+  allocated_ += allocated_bytes;
+
+  if (!force_marking && allocated_ < kAllocatedThreshold &&
+      write_barriers_invoked_since_last_step_ <
+          kWriteBarriersInvokedThreshold) {
+    return;
+  }
+
+  if (state_ == MARKING && no_marking_scope_depth_ > 0) return;
+
+  {
+    HistogramTimerScope incremental_marking_scope(
+        heap_->isolate()->counters()->gc_incremental_marking());
+    double start = base::OS::TimeCurrentMillis();
+
+    // The marking speed is driven either by the allocation rate or by the rate
+    // at which we are having to check the color of objects in the write
+    // barrier.
+    // It is possible for a tight non-allocating loop to run a lot of write
+    // barriers before we get here and check them (marking can only take place
+    // on
+    // allocation), so to reduce the lumpiness we don't use the write barriers
+    // invoked since last step directly to determine the amount of work to do.
+    intptr_t bytes_to_process =
+        marking_speed_ *
+        Max(allocated_, write_barriers_invoked_since_last_step_);
+    allocated_ = 0;
+    write_barriers_invoked_since_last_step_ = 0;
+
+    bytes_scanned_ += bytes_to_process;
+    intptr_t bytes_processed = 0;
+
+    if (state_ == SWEEPING) {
+      if (heap_->mark_compact_collector()->sweeping_in_progress() &&
+          heap_->mark_compact_collector()->IsSweepingCompleted()) {
+        heap_->mark_compact_collector()->EnsureSweepingCompleted();
+      }
+      if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
+        bytes_scanned_ = 0;
+        StartMarking(PREVENT_COMPACTION);
+      }
+    } else if (state_ == MARKING) {
+      bytes_processed = ProcessMarkingDeque(bytes_to_process);
+      if (marking_deque_.IsEmpty()) MarkingComplete(action);
+    }
+
+    steps_count_++;
+
+    // Speed up marking if we are marking too slow or if we are almost done
+    // with marking.
+    SpeedUp();
+
+    double end = base::OS::TimeCurrentMillis();
+    double duration = (end - start);
+    // Note that we report zero bytes here when sweeping was in progress or
+    // when we just started incremental marking. In these cases we did not
+    // process the marking deque.
+    heap_->tracer()->AddIncrementalMarkingStep(duration, bytes_processed);
+  }
+}
+
+
+void IncrementalMarking::ResetStepCounters() {
+  steps_count_ = 0;
+  old_generation_space_available_at_start_of_incremental_ =
+      SpaceLeftInOldSpace();
+  old_generation_space_used_at_start_of_incremental_ =
+      heap_->PromotedTotalSize();
+  bytes_rescanned_ = 0;
+  marking_speed_ = kInitialMarkingSpeed;
+  bytes_scanned_ = 0;
+  write_barriers_invoked_since_last_step_ = 0;
+}
+
+
+int64_t IncrementalMarking::SpaceLeftInOldSpace() {
+  return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects();
+}
+}
+}  // namespace v8::internal
diff --git a/src/heap/incremental-marking.h b/src/heap/incremental-marking.h
new file mode 100644
index 0000000..e4a8e97
--- /dev/null
+++ b/src/heap/incremental-marking.h
@@ -0,0 +1,226 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_INCREMENTAL_MARKING_H_
+#define V8_HEAP_INCREMENTAL_MARKING_H_
+
+
+#include "src/execution.h"
+#include "src/heap/mark-compact.h"
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+
+
+class IncrementalMarking {
+ public:
+  enum State { STOPPED, SWEEPING, MARKING, COMPLETE };
+
+  enum CompletionAction { GC_VIA_STACK_GUARD, NO_GC_VIA_STACK_GUARD };
+
+  explicit IncrementalMarking(Heap* heap);
+
+  static void Initialize();
+
+  void TearDown();
+
+  State state() {
+    DCHECK(state_ == STOPPED || FLAG_incremental_marking);
+    return state_;
+  }
+
+  bool should_hurry() { return should_hurry_; }
+  void set_should_hurry(bool val) { should_hurry_ = val; }
+
+  inline bool IsStopped() { return state() == STOPPED; }
+
+  INLINE(bool IsMarking()) { return state() >= MARKING; }
+
+  inline bool IsMarkingIncomplete() { return state() == MARKING; }
+
+  inline bool IsComplete() { return state() == COMPLETE; }
+
+  bool WorthActivating();
+
+  bool ShouldActivate();
+
+  enum CompactionFlag { ALLOW_COMPACTION, PREVENT_COMPACTION };
+
+  void Start(CompactionFlag flag = ALLOW_COMPACTION);
+
+  void Stop();
+
+  void PrepareForScavenge();
+
+  void UpdateMarkingDequeAfterScavenge();
+
+  void Hurry();
+
+  void Finalize();
+
+  void Abort();
+
+  void MarkingComplete(CompletionAction action);
+
+  // It's hard to know how much work the incremental marker should do to make
+  // progress in the face of the mutator creating new work for it.  We start
+  // of at a moderate rate of work and gradually increase the speed of the
+  // incremental marker until it completes.
+  // Do some marking every time this much memory has been allocated or that many
+  // heavy (color-checking) write barriers have been invoked.
+  static const intptr_t kAllocatedThreshold = 65536;
+  static const intptr_t kWriteBarriersInvokedThreshold = 32768;
+  // Start off by marking this many times more memory than has been allocated.
+  static const intptr_t kInitialMarkingSpeed = 1;
+  // But if we are promoting a lot of data we need to mark faster to keep up
+  // with the data that is entering the old space through promotion.
+  static const intptr_t kFastMarking = 3;
+  // After this many steps we increase the marking/allocating factor.
+  static const intptr_t kMarkingSpeedAccellerationInterval = 1024;
+  // This is how much we increase the marking/allocating factor by.
+  static const intptr_t kMarkingSpeedAccelleration = 2;
+  static const intptr_t kMaxMarkingSpeed = 1000;
+
+  void OldSpaceStep(intptr_t allocated);
+
+  void Step(intptr_t allocated, CompletionAction action,
+            bool force_marking = false);
+
+  inline void RestartIfNotMarking() {
+    if (state_ == COMPLETE) {
+      state_ = MARKING;
+      if (FLAG_trace_incremental_marking) {
+        PrintF("[IncrementalMarking] Restarting (new grey objects)\n");
+      }
+    }
+  }
+
+  static void RecordWriteFromCode(HeapObject* obj, Object** slot,
+                                  Isolate* isolate);
+
+  // Record a slot for compaction.  Returns false for objects that are
+  // guaranteed to be rescanned or not guaranteed to survive.
+  //
+  // No slots in white objects should be recorded, as some slots are typed and
+  // cannot be interpreted correctly if the underlying object does not survive
+  // the incremental cycle (stays white).
+  INLINE(bool BaseRecordWrite(HeapObject* obj, Object** slot, Object* value));
+  INLINE(void RecordWrite(HeapObject* obj, Object** slot, Object* value));
+  INLINE(void RecordWriteIntoCode(HeapObject* obj, RelocInfo* rinfo,
+                                  Object* value));
+  INLINE(void RecordWriteOfCodeEntry(JSFunction* host, Object** slot,
+                                     Code* value));
+
+
+  void RecordWriteSlow(HeapObject* obj, Object** slot, Object* value);
+  void RecordWriteIntoCodeSlow(HeapObject* obj, RelocInfo* rinfo,
+                               Object* value);
+  void RecordWriteOfCodeEntrySlow(JSFunction* host, Object** slot, Code* value);
+  void RecordCodeTargetPatch(Code* host, Address pc, HeapObject* value);
+  void RecordCodeTargetPatch(Address pc, HeapObject* value);
+
+  inline void RecordWrites(HeapObject* obj);
+
+  inline void BlackToGreyAndUnshift(HeapObject* obj, MarkBit mark_bit);
+
+  inline void WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit);
+
+  inline void SetOldSpacePageFlags(MemoryChunk* chunk) {
+    SetOldSpacePageFlags(chunk, IsMarking(), IsCompacting());
+  }
+
+  inline void SetNewSpacePageFlags(NewSpacePage* chunk) {
+    SetNewSpacePageFlags(chunk, IsMarking());
+  }
+
+  MarkingDeque* marking_deque() { return &marking_deque_; }
+
+  bool IsCompacting() { return IsMarking() && is_compacting_; }
+
+  void ActivateGeneratedStub(Code* stub);
+
+  void NotifyOfHighPromotionRate() {
+    if (IsMarking()) {
+      if (marking_speed_ < kFastMarking) {
+        if (FLAG_trace_gc) {
+          PrintPID(
+              "Increasing marking speed to %d "
+              "due to high promotion rate\n",
+              static_cast<int>(kFastMarking));
+        }
+        marking_speed_ = kFastMarking;
+      }
+    }
+  }
+
+  void EnterNoMarkingScope() { no_marking_scope_depth_++; }
+
+  void LeaveNoMarkingScope() { no_marking_scope_depth_--; }
+
+  void UncommitMarkingDeque();
+
+  void NotifyIncompleteScanOfObject(int unscanned_bytes) {
+    unscanned_bytes_of_large_object_ = unscanned_bytes;
+  }
+
+ private:
+  int64_t SpaceLeftInOldSpace();
+
+  void SpeedUp();
+
+  void ResetStepCounters();
+
+  void StartMarking(CompactionFlag flag);
+
+  void ActivateIncrementalWriteBarrier(PagedSpace* space);
+  static void ActivateIncrementalWriteBarrier(NewSpace* space);
+  void ActivateIncrementalWriteBarrier();
+
+  static void DeactivateIncrementalWriteBarrierForSpace(PagedSpace* space);
+  static void DeactivateIncrementalWriteBarrierForSpace(NewSpace* space);
+  void DeactivateIncrementalWriteBarrier();
+
+  static void SetOldSpacePageFlags(MemoryChunk* chunk, bool is_marking,
+                                   bool is_compacting);
+
+  static void SetNewSpacePageFlags(NewSpacePage* chunk, bool is_marking);
+
+  void EnsureMarkingDequeIsCommitted();
+
+  INLINE(void ProcessMarkingDeque());
+
+  INLINE(intptr_t ProcessMarkingDeque(intptr_t bytes_to_process));
+
+  INLINE(void VisitObject(Map* map, HeapObject* obj, int size));
+
+  Heap* heap_;
+
+  State state_;
+  bool is_compacting_;
+
+  base::VirtualMemory* marking_deque_memory_;
+  bool marking_deque_memory_committed_;
+  MarkingDeque marking_deque_;
+
+  int steps_count_;
+  int64_t old_generation_space_available_at_start_of_incremental_;
+  int64_t old_generation_space_used_at_start_of_incremental_;
+  int64_t bytes_rescanned_;
+  bool should_hurry_;
+  int marking_speed_;
+  intptr_t bytes_scanned_;
+  intptr_t allocated_;
+  intptr_t write_barriers_invoked_since_last_step_;
+
+  int no_marking_scope_depth_;
+
+  int unscanned_bytes_of_large_object_;
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(IncrementalMarking);
+};
+}
+}  // namespace v8::internal
+
+#endif  // V8_HEAP_INCREMENTAL_MARKING_H_
diff --git a/src/heap/mark-compact-inl.h b/src/heap/mark-compact-inl.h
new file mode 100644
index 0000000..66b0a59
--- /dev/null
+++ b/src/heap/mark-compact-inl.h
@@ -0,0 +1,72 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_MARK_COMPACT_INL_H_
+#define V8_HEAP_MARK_COMPACT_INL_H_
+
+#include "src/heap/mark-compact.h"
+#include "src/isolate.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+MarkBit Marking::MarkBitFrom(Address addr) {
+  MemoryChunk* p = MemoryChunk::FromAddress(addr);
+  return p->markbits()->MarkBitFromIndex(p->AddressToMarkbitIndex(addr),
+                                         p->ContainsOnlyData());
+}
+
+
+void MarkCompactCollector::SetFlags(int flags) {
+  reduce_memory_footprint_ = ((flags & Heap::kReduceMemoryFootprintMask) != 0);
+  abort_incremental_marking_ =
+      ((flags & Heap::kAbortIncrementalMarkingMask) != 0);
+}
+
+
+void MarkCompactCollector::MarkObject(HeapObject* obj, MarkBit mark_bit) {
+  DCHECK(Marking::MarkBitFrom(obj) == mark_bit);
+  if (!mark_bit.Get()) {
+    mark_bit.Set();
+    MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size());
+    DCHECK(IsMarked(obj));
+    DCHECK(obj->GetIsolate()->heap()->Contains(obj));
+    marking_deque_.PushBlack(obj);
+  }
+}
+
+
+void MarkCompactCollector::SetMark(HeapObject* obj, MarkBit mark_bit) {
+  DCHECK(!mark_bit.Get());
+  DCHECK(Marking::MarkBitFrom(obj) == mark_bit);
+  mark_bit.Set();
+  MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size());
+}
+
+
+bool MarkCompactCollector::IsMarked(Object* obj) {
+  DCHECK(obj->IsHeapObject());
+  HeapObject* heap_object = HeapObject::cast(obj);
+  return Marking::MarkBitFrom(heap_object).Get();
+}
+
+
+void MarkCompactCollector::RecordSlot(Object** anchor_slot, Object** slot,
+                                      Object* object,
+                                      SlotsBuffer::AdditionMode mode) {
+  Page* object_page = Page::FromAddress(reinterpret_cast<Address>(object));
+  if (object_page->IsEvacuationCandidate() &&
+      !ShouldSkipEvacuationSlotRecording(anchor_slot)) {
+    if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
+                            object_page->slots_buffer_address(), slot, mode)) {
+      EvictEvacuationCandidate(object_page);
+    }
+  }
+}
+}
+}  // namespace v8::internal
+
+#endif  // V8_HEAP_MARK_COMPACT_INL_H_
diff --git a/src/heap/mark-compact.cc b/src/heap/mark-compact.cc
new file mode 100644
index 0000000..9f9a658
--- /dev/null
+++ b/src/heap/mark-compact.cc
@@ -0,0 +1,4562 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/base/atomicops.h"
+#include "src/base/bits.h"
+#include "src/code-stubs.h"
+#include "src/compilation-cache.h"
+#include "src/cpu-profiler.h"
+#include "src/deoptimizer.h"
+#include "src/execution.h"
+#include "src/gdb-jit.h"
+#include "src/global-handles.h"
+#include "src/heap/incremental-marking.h"
+#include "src/heap/mark-compact.h"
+#include "src/heap/objects-visiting.h"
+#include "src/heap/objects-visiting-inl.h"
+#include "src/heap/spaces-inl.h"
+#include "src/heap/sweeper-thread.h"
+#include "src/heap-profiler.h"
+#include "src/ic/ic.h"
+#include "src/ic/stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+
+const char* Marking::kWhiteBitPattern = "00";
+const char* Marking::kBlackBitPattern = "10";
+const char* Marking::kGreyBitPattern = "11";
+const char* Marking::kImpossibleBitPattern = "01";
+
+
+// -------------------------------------------------------------------------
+// MarkCompactCollector
+
+MarkCompactCollector::MarkCompactCollector(Heap* heap)
+    :  // NOLINT
+#ifdef DEBUG
+      state_(IDLE),
+#endif
+      reduce_memory_footprint_(false),
+      abort_incremental_marking_(false),
+      marking_parity_(ODD_MARKING_PARITY),
+      compacting_(false),
+      was_marked_incrementally_(false),
+      sweeping_in_progress_(false),
+      pending_sweeper_jobs_semaphore_(0),
+      sequential_sweeping_(false),
+      migration_slots_buffer_(NULL),
+      heap_(heap),
+      code_flusher_(NULL),
+      have_code_to_deoptimize_(false) {
+}
+
+#ifdef VERIFY_HEAP
+class VerifyMarkingVisitor : public ObjectVisitor {
+ public:
+  explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {}
+
+  void VisitPointers(Object** start, Object** end) {
+    for (Object** current = start; current < end; current++) {
+      if ((*current)->IsHeapObject()) {
+        HeapObject* object = HeapObject::cast(*current);
+        CHECK(heap_->mark_compact_collector()->IsMarked(object));
+      }
+    }
+  }
+
+  void VisitEmbeddedPointer(RelocInfo* rinfo) {
+    DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
+    if (!rinfo->host()->IsWeakObject(rinfo->target_object())) {
+      Object* p = rinfo->target_object();
+      VisitPointer(&p);
+    }
+  }
+
+  void VisitCell(RelocInfo* rinfo) {
+    Code* code = rinfo->host();
+    DCHECK(rinfo->rmode() == RelocInfo::CELL);
+    if (!code->IsWeakObject(rinfo->target_cell())) {
+      ObjectVisitor::VisitCell(rinfo);
+    }
+  }
+
+ private:
+  Heap* heap_;
+};
+
+
+static void VerifyMarking(Heap* heap, Address bottom, Address top) {
+  VerifyMarkingVisitor visitor(heap);
+  HeapObject* object;
+  Address next_object_must_be_here_or_later = bottom;
+
+  for (Address current = bottom; current < top; current += kPointerSize) {
+    object = HeapObject::FromAddress(current);
+    if (MarkCompactCollector::IsMarked(object)) {
+      CHECK(current >= next_object_must_be_here_or_later);
+      object->Iterate(&visitor);
+      next_object_must_be_here_or_later = current + object->Size();
+    }
+  }
+}
+
+
+static void VerifyMarking(NewSpace* space) {
+  Address end = space->top();
+  NewSpacePageIterator it(space->bottom(), end);
+  // The bottom position is at the start of its page. Allows us to use
+  // page->area_start() as start of range on all pages.
+  CHECK_EQ(space->bottom(),
+           NewSpacePage::FromAddress(space->bottom())->area_start());
+  while (it.has_next()) {
+    NewSpacePage* page = it.next();
+    Address limit = it.has_next() ? page->area_end() : end;
+    CHECK(limit == end || !page->Contains(end));
+    VerifyMarking(space->heap(), page->area_start(), limit);
+  }
+}
+
+
+static void VerifyMarking(PagedSpace* space) {
+  PageIterator it(space);
+
+  while (it.has_next()) {
+    Page* p = it.next();
+    VerifyMarking(space->heap(), p->area_start(), p->area_end());
+  }
+}
+
+
+static void VerifyMarking(Heap* heap) {
+  VerifyMarking(heap->old_pointer_space());
+  VerifyMarking(heap->old_data_space());
+  VerifyMarking(heap->code_space());
+  VerifyMarking(heap->cell_space());
+  VerifyMarking(heap->property_cell_space());
+  VerifyMarking(heap->map_space());
+  VerifyMarking(heap->new_space());
+
+  VerifyMarkingVisitor visitor(heap);
+
+  LargeObjectIterator it(heap->lo_space());
+  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
+    if (MarkCompactCollector::IsMarked(obj)) {
+      obj->Iterate(&visitor);
+    }
+  }
+
+  heap->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
+}
+
+
+class VerifyEvacuationVisitor : public ObjectVisitor {
+ public:
+  void VisitPointers(Object** start, Object** end) {
+    for (Object** current = start; current < end; current++) {
+      if ((*current)->IsHeapObject()) {
+        HeapObject* object = HeapObject::cast(*current);
+        CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object));
+      }
+    }
+  }
+};
+
+
+static void VerifyEvacuation(Page* page) {
+  VerifyEvacuationVisitor visitor;
+  HeapObjectIterator iterator(page, NULL);
+  for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
+       heap_object = iterator.Next()) {
+    // We skip free space objects.
+    if (!heap_object->IsFiller()) {
+      heap_object->Iterate(&visitor);
+    }
+  }
+}
+
+
+static void VerifyEvacuation(NewSpace* space) {
+  NewSpacePageIterator it(space->bottom(), space->top());
+  VerifyEvacuationVisitor visitor;
+
+  while (it.has_next()) {
+    NewSpacePage* page = it.next();
+    Address current = page->area_start();
+    Address limit = it.has_next() ? page->area_end() : space->top();
+    CHECK(limit == space->top() || !page->Contains(space->top()));
+    while (current < limit) {
+      HeapObject* object = HeapObject::FromAddress(current);
+      object->Iterate(&visitor);
+      current += object->Size();
+    }
+  }
+}
+
+
+static void VerifyEvacuation(Heap* heap, PagedSpace* space) {
+  if (FLAG_use_allocation_folding &&
+      (space == heap->old_pointer_space() || space == heap->old_data_space())) {
+    return;
+  }
+  PageIterator it(space);
+
+  while (it.has_next()) {
+    Page* p = it.next();
+    if (p->IsEvacuationCandidate()) continue;
+    VerifyEvacuation(p);
+  }
+}
+
+
+static void VerifyEvacuation(Heap* heap) {
+  VerifyEvacuation(heap, heap->old_pointer_space());
+  VerifyEvacuation(heap, heap->old_data_space());
+  VerifyEvacuation(heap, heap->code_space());
+  VerifyEvacuation(heap, heap->cell_space());
+  VerifyEvacuation(heap, heap->property_cell_space());
+  VerifyEvacuation(heap, heap->map_space());
+  VerifyEvacuation(heap->new_space());
+
+  VerifyEvacuationVisitor visitor;
+  heap->IterateStrongRoots(&visitor, VISIT_ALL);
+}
+#endif  // VERIFY_HEAP
+
+
+#ifdef DEBUG
+class VerifyNativeContextSeparationVisitor : public ObjectVisitor {
+ public:
+  VerifyNativeContextSeparationVisitor() : current_native_context_(NULL) {}
+
+  void VisitPointers(Object** start, Object** end) {
+    for (Object** current = start; current < end; current++) {
+      if ((*current)->IsHeapObject()) {
+        HeapObject* object = HeapObject::cast(*current);
+        if (object->IsString()) continue;
+        switch (object->map()->instance_type()) {
+          case JS_FUNCTION_TYPE:
+            CheckContext(JSFunction::cast(object)->context());
+            break;
+          case JS_GLOBAL_PROXY_TYPE:
+            CheckContext(JSGlobalProxy::cast(object)->native_context());
+            break;
+          case JS_GLOBAL_OBJECT_TYPE:
+          case JS_BUILTINS_OBJECT_TYPE:
+            CheckContext(GlobalObject::cast(object)->native_context());
+            break;
+          case JS_ARRAY_TYPE:
+          case JS_DATE_TYPE:
+          case JS_OBJECT_TYPE:
+          case JS_REGEXP_TYPE:
+            VisitPointer(HeapObject::RawField(object, JSObject::kMapOffset));
+            break;
+          case MAP_TYPE:
+            VisitPointer(HeapObject::RawField(object, Map::kPrototypeOffset));
+            VisitPointer(HeapObject::RawField(object, Map::kConstructorOffset));
+            break;
+          case FIXED_ARRAY_TYPE:
+            if (object->IsContext()) {
+              CheckContext(object);
+            } else {
+              FixedArray* array = FixedArray::cast(object);
+              int length = array->length();
+              // Set array length to zero to prevent cycles while iterating
+              // over array bodies, this is easier than intrusive marking.
+              array->set_length(0);
+              array->IterateBody(FIXED_ARRAY_TYPE, FixedArray::SizeFor(length),
+                                 this);
+              array->set_length(length);
+            }
+            break;
+          case CELL_TYPE:
+          case JS_PROXY_TYPE:
+          case JS_VALUE_TYPE:
+          case TYPE_FEEDBACK_INFO_TYPE:
+            object->Iterate(this);
+            break;
+          case DECLARED_ACCESSOR_INFO_TYPE:
+          case EXECUTABLE_ACCESSOR_INFO_TYPE:
+          case BYTE_ARRAY_TYPE:
+          case CALL_HANDLER_INFO_TYPE:
+          case CODE_TYPE:
+          case FIXED_DOUBLE_ARRAY_TYPE:
+          case HEAP_NUMBER_TYPE:
+          case MUTABLE_HEAP_NUMBER_TYPE:
+          case INTERCEPTOR_INFO_TYPE:
+          case ODDBALL_TYPE:
+          case SCRIPT_TYPE:
+          case SHARED_FUNCTION_INFO_TYPE:
+            break;
+          default:
+            UNREACHABLE();
+        }
+      }
+    }
+  }
+
+ private:
+  void CheckContext(Object* context) {
+    if (!context->IsContext()) return;
+    Context* native_context = Context::cast(context)->native_context();
+    if (current_native_context_ == NULL) {
+      current_native_context_ = native_context;
+    } else {
+      CHECK_EQ(current_native_context_, native_context);
+    }
+  }
+
+  Context* current_native_context_;
+};
+
+
+static void VerifyNativeContextSeparation(Heap* heap) {
+  HeapObjectIterator it(heap->code_space());
+
+  for (Object* object = it.Next(); object != NULL; object = it.Next()) {
+    VerifyNativeContextSeparationVisitor visitor;
+    Code::cast(object)->CodeIterateBody(&visitor);
+  }
+}
+#endif
+
+
+void MarkCompactCollector::SetUp() {
+  free_list_old_data_space_.Reset(new FreeList(heap_->old_data_space()));
+  free_list_old_pointer_space_.Reset(new FreeList(heap_->old_pointer_space()));
+}
+
+
+void MarkCompactCollector::TearDown() { AbortCompaction(); }
+
+
+void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
+  p->MarkEvacuationCandidate();
+  evacuation_candidates_.Add(p);
+}
+
+
+static void TraceFragmentation(PagedSpace* space) {
+  int number_of_pages = space->CountTotalPages();
+  intptr_t reserved = (number_of_pages * space->AreaSize());
+  intptr_t free = reserved - space->SizeOfObjects();
+  PrintF("[%s]: %d pages, %d (%.1f%%) free\n",
+         AllocationSpaceName(space->identity()), number_of_pages,
+         static_cast<int>(free), static_cast<double>(free) * 100 / reserved);
+}
+
+
+bool MarkCompactCollector::StartCompaction(CompactionMode mode) {
+  if (!compacting_) {
+    DCHECK(evacuation_candidates_.length() == 0);
+
+#ifdef ENABLE_GDB_JIT_INTERFACE
+    // If GDBJIT interface is active disable compaction.
+    if (FLAG_gdbjit) return false;
+#endif
+
+    CollectEvacuationCandidates(heap()->old_pointer_space());
+    CollectEvacuationCandidates(heap()->old_data_space());
+
+    if (FLAG_compact_code_space && (mode == NON_INCREMENTAL_COMPACTION ||
+                                    FLAG_incremental_code_compaction)) {
+      CollectEvacuationCandidates(heap()->code_space());
+    } else if (FLAG_trace_fragmentation) {
+      TraceFragmentation(heap()->code_space());
+    }
+
+    if (FLAG_trace_fragmentation) {
+      TraceFragmentation(heap()->map_space());
+      TraceFragmentation(heap()->cell_space());
+      TraceFragmentation(heap()->property_cell_space());
+    }
+
+    heap()->old_pointer_space()->EvictEvacuationCandidatesFromFreeLists();
+    heap()->old_data_space()->EvictEvacuationCandidatesFromFreeLists();
+    heap()->code_space()->EvictEvacuationCandidatesFromFreeLists();
+
+    compacting_ = evacuation_candidates_.length() > 0;
+  }
+
+  return compacting_;
+}
+
+
+void MarkCompactCollector::CollectGarbage() {
+  // Make sure that Prepare() has been called. The individual steps below will
+  // update the state as they proceed.
+  DCHECK(state_ == PREPARE_GC);
+
+  MarkLiveObjects();
+  DCHECK(heap_->incremental_marking()->IsStopped());
+
+  if (FLAG_collect_maps) ClearNonLiveReferences();
+
+  ClearWeakCollections();
+
+#ifdef VERIFY_HEAP
+  if (FLAG_verify_heap) {
+    VerifyMarking(heap_);
+  }
+#endif
+
+  SweepSpaces();
+
+#ifdef DEBUG
+  if (FLAG_verify_native_context_separation) {
+    VerifyNativeContextSeparation(heap_);
+  }
+#endif
+
+#ifdef VERIFY_HEAP
+  if (heap()->weak_embedded_objects_verification_enabled()) {
+    VerifyWeakEmbeddedObjectsInCode();
+  }
+  if (FLAG_collect_maps && FLAG_omit_map_checks_for_leaf_maps) {
+    VerifyOmittedMapChecks();
+  }
+#endif
+
+  Finish();
+
+  if (marking_parity_ == EVEN_MARKING_PARITY) {
+    marking_parity_ = ODD_MARKING_PARITY;
+  } else {
+    DCHECK(marking_parity_ == ODD_MARKING_PARITY);
+    marking_parity_ = EVEN_MARKING_PARITY;
+  }
+}
+
+
+#ifdef VERIFY_HEAP
+void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
+  PageIterator it(space);
+
+  while (it.has_next()) {
+    Page* p = it.next();
+    CHECK(p->markbits()->IsClean());
+    CHECK_EQ(0, p->LiveBytes());
+  }
+}
+
+
+void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
+  NewSpacePageIterator it(space->bottom(), space->top());
+
+  while (it.has_next()) {
+    NewSpacePage* p = it.next();
+    CHECK(p->markbits()->IsClean());
+    CHECK_EQ(0, p->LiveBytes());
+  }
+}
+
+
+void MarkCompactCollector::VerifyMarkbitsAreClean() {
+  VerifyMarkbitsAreClean(heap_->old_pointer_space());
+  VerifyMarkbitsAreClean(heap_->old_data_space());
+  VerifyMarkbitsAreClean(heap_->code_space());
+  VerifyMarkbitsAreClean(heap_->cell_space());
+  VerifyMarkbitsAreClean(heap_->property_cell_space());
+  VerifyMarkbitsAreClean(heap_->map_space());
+  VerifyMarkbitsAreClean(heap_->new_space());
+
+  LargeObjectIterator it(heap_->lo_space());
+  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
+    MarkBit mark_bit = Marking::MarkBitFrom(obj);
+    CHECK(Marking::IsWhite(mark_bit));
+    CHECK_EQ(0, Page::FromAddress(obj->address())->LiveBytes());
+  }
+}
+
+
+void MarkCompactCollector::VerifyWeakEmbeddedObjectsInCode() {
+  HeapObjectIterator code_iterator(heap()->code_space());
+  for (HeapObject* obj = code_iterator.Next(); obj != NULL;
+       obj = code_iterator.Next()) {
+    Code* code = Code::cast(obj);
+    if (!code->is_optimized_code() && !code->is_weak_stub()) continue;
+    if (WillBeDeoptimized(code)) continue;
+    code->VerifyEmbeddedObjectsDependency();
+  }
+}
+
+
+void MarkCompactCollector::VerifyOmittedMapChecks() {
+  HeapObjectIterator iterator(heap()->map_space());
+  for (HeapObject* obj = iterator.Next(); obj != NULL; obj = iterator.Next()) {
+    Map* map = Map::cast(obj);
+    map->VerifyOmittedMapChecks();
+  }
+}
+#endif  // VERIFY_HEAP
+
+
+static void ClearMarkbitsInPagedSpace(PagedSpace* space) {
+  PageIterator it(space);
+
+  while (it.has_next()) {
+    Bitmap::Clear(it.next());
+  }
+}
+
+
+static void ClearMarkbitsInNewSpace(NewSpace* space) {
+  NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
+
+  while (it.has_next()) {
+    Bitmap::Clear(it.next());
+  }
+}
+
+
+void MarkCompactCollector::ClearMarkbits() {
+  ClearMarkbitsInPagedSpace(heap_->code_space());
+  ClearMarkbitsInPagedSpace(heap_->map_space());
+  ClearMarkbitsInPagedSpace(heap_->old_pointer_space());
+  ClearMarkbitsInPagedSpace(heap_->old_data_space());
+  ClearMarkbitsInPagedSpace(heap_->cell_space());
+  ClearMarkbitsInPagedSpace(heap_->property_cell_space());
+  ClearMarkbitsInNewSpace(heap_->new_space());
+
+  LargeObjectIterator it(heap_->lo_space());
+  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
+    MarkBit mark_bit = Marking::MarkBitFrom(obj);
+    mark_bit.Clear();
+    mark_bit.Next().Clear();
+    Page::FromAddress(obj->address())->ResetProgressBar();
+    Page::FromAddress(obj->address())->ResetLiveBytes();
+  }
+}
+
+
+class MarkCompactCollector::SweeperTask : public v8::Task {
+ public:
+  SweeperTask(Heap* heap, PagedSpace* space) : heap_(heap), space_(space) {}
+
+  virtual ~SweeperTask() {}
+
+ private:
+  // v8::Task overrides.
+  virtual void Run() OVERRIDE {
+    heap_->mark_compact_collector()->SweepInParallel(space_, 0);
+    heap_->mark_compact_collector()->pending_sweeper_jobs_semaphore_.Signal();
+  }
+
+  Heap* heap_;
+  PagedSpace* space_;
+
+  DISALLOW_COPY_AND_ASSIGN(SweeperTask);
+};
+
+
+void MarkCompactCollector::StartSweeperThreads() {
+  DCHECK(free_list_old_pointer_space_.get()->IsEmpty());
+  DCHECK(free_list_old_data_space_.get()->IsEmpty());
+  sweeping_in_progress_ = true;
+  for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
+    isolate()->sweeper_threads()[i]->StartSweeping();
+  }
+  if (FLAG_job_based_sweeping) {
+    V8::GetCurrentPlatform()->CallOnBackgroundThread(
+        new SweeperTask(heap(), heap()->old_data_space()),
+        v8::Platform::kShortRunningTask);
+    V8::GetCurrentPlatform()->CallOnBackgroundThread(
+        new SweeperTask(heap(), heap()->old_pointer_space()),
+        v8::Platform::kShortRunningTask);
+  }
+}
+
+
+void MarkCompactCollector::EnsureSweepingCompleted() {
+  DCHECK(sweeping_in_progress_ == true);
+
+  // If sweeping is not completed, we try to complete it here. If we do not
+  // have sweeper threads we have to complete since we do not have a good
+  // indicator for a swept space in that case.
+  if (!AreSweeperThreadsActivated() || !IsSweepingCompleted()) {
+    SweepInParallel(heap()->paged_space(OLD_DATA_SPACE), 0);
+    SweepInParallel(heap()->paged_space(OLD_POINTER_SPACE), 0);
+  }
+
+  for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
+    isolate()->sweeper_threads()[i]->WaitForSweeperThread();
+  }
+  if (FLAG_job_based_sweeping) {
+    // Wait twice for both jobs.
+    pending_sweeper_jobs_semaphore_.Wait();
+    pending_sweeper_jobs_semaphore_.Wait();
+  }
+  ParallelSweepSpacesComplete();
+  sweeping_in_progress_ = false;
+  RefillFreeList(heap()->paged_space(OLD_DATA_SPACE));
+  RefillFreeList(heap()->paged_space(OLD_POINTER_SPACE));
+  heap()->paged_space(OLD_DATA_SPACE)->ResetUnsweptFreeBytes();
+  heap()->paged_space(OLD_POINTER_SPACE)->ResetUnsweptFreeBytes();
+
+#ifdef VERIFY_HEAP
+  if (FLAG_verify_heap) {
+    VerifyEvacuation(heap_);
+  }
+#endif
+}
+
+
+bool MarkCompactCollector::IsSweepingCompleted() {
+  for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
+    if (!isolate()->sweeper_threads()[i]->SweepingCompleted()) {
+      return false;
+    }
+  }
+
+  if (FLAG_job_based_sweeping) {
+    if (!pending_sweeper_jobs_semaphore_.WaitFor(
+            base::TimeDelta::FromSeconds(0))) {
+      return false;
+    }
+    pending_sweeper_jobs_semaphore_.Signal();
+  }
+
+  return true;
+}
+
+
+void MarkCompactCollector::RefillFreeList(PagedSpace* space) {
+  FreeList* free_list;
+
+  if (space == heap()->old_pointer_space()) {
+    free_list = free_list_old_pointer_space_.get();
+  } else if (space == heap()->old_data_space()) {
+    free_list = free_list_old_data_space_.get();
+  } else {
+    // Any PagedSpace might invoke RefillFreeLists, so we need to make sure
+    // to only refill them for old data and pointer spaces.
+    return;
+  }
+
+  intptr_t freed_bytes = space->free_list()->Concatenate(free_list);
+  space->AddToAccountingStats(freed_bytes);
+  space->DecrementUnsweptFreeBytes(freed_bytes);
+}
+
+
+bool MarkCompactCollector::AreSweeperThreadsActivated() {
+  return isolate()->sweeper_threads() != NULL || FLAG_job_based_sweeping;
+}
+
+
+void Marking::TransferMark(Address old_start, Address new_start) {
+  // This is only used when resizing an object.
+  DCHECK(MemoryChunk::FromAddress(old_start) ==
+         MemoryChunk::FromAddress(new_start));
+
+  if (!heap_->incremental_marking()->IsMarking()) return;
+
+  // If the mark doesn't move, we don't check the color of the object.
+  // It doesn't matter whether the object is black, since it hasn't changed
+  // size, so the adjustment to the live data count will be zero anyway.
+  if (old_start == new_start) return;
+
+  MarkBit new_mark_bit = MarkBitFrom(new_start);
+  MarkBit old_mark_bit = MarkBitFrom(old_start);
+
+#ifdef DEBUG
+  ObjectColor old_color = Color(old_mark_bit);
+#endif
+
+  if (Marking::IsBlack(old_mark_bit)) {
+    old_mark_bit.Clear();
+    DCHECK(IsWhite(old_mark_bit));
+    Marking::MarkBlack(new_mark_bit);
+    return;
+  } else if (Marking::IsGrey(old_mark_bit)) {
+    old_mark_bit.Clear();
+    old_mark_bit.Next().Clear();
+    DCHECK(IsWhite(old_mark_bit));
+    heap_->incremental_marking()->WhiteToGreyAndPush(
+        HeapObject::FromAddress(new_start), new_mark_bit);
+    heap_->incremental_marking()->RestartIfNotMarking();
+  }
+
+#ifdef DEBUG
+  ObjectColor new_color = Color(new_mark_bit);
+  DCHECK(new_color == old_color);
+#endif
+}
+
+
+const char* AllocationSpaceName(AllocationSpace space) {
+  switch (space) {
+    case NEW_SPACE:
+      return "NEW_SPACE";
+    case OLD_POINTER_SPACE:
+      return "OLD_POINTER_SPACE";
+    case OLD_DATA_SPACE:
+      return "OLD_DATA_SPACE";
+    case CODE_SPACE:
+      return "CODE_SPACE";
+    case MAP_SPACE:
+      return "MAP_SPACE";
+    case CELL_SPACE:
+      return "CELL_SPACE";
+    case PROPERTY_CELL_SPACE:
+      return "PROPERTY_CELL_SPACE";
+    case LO_SPACE:
+      return "LO_SPACE";
+    default:
+      UNREACHABLE();
+  }
+
+  return NULL;
+}
+
+
+// Returns zero for pages that have so little fragmentation that it is not
+// worth defragmenting them.  Otherwise a positive integer that gives an
+// estimate of fragmentation on an arbitrary scale.
+static int FreeListFragmentation(PagedSpace* space, Page* p) {
+  // If page was not swept then there are no free list items on it.
+  if (!p->WasSwept()) {
+    if (FLAG_trace_fragmentation) {
+      PrintF("%p [%s]: %d bytes live (unswept)\n", reinterpret_cast<void*>(p),
+             AllocationSpaceName(space->identity()), p->LiveBytes());
+    }
+    return 0;
+  }
+
+  PagedSpace::SizeStats sizes;
+  space->ObtainFreeListStatistics(p, &sizes);
+
+  intptr_t ratio;
+  intptr_t ratio_threshold;
+  intptr_t area_size = space->AreaSize();
+  if (space->identity() == CODE_SPACE) {
+    ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 / area_size;
+    ratio_threshold = 10;
+  } else {
+    ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 / area_size;
+    ratio_threshold = 15;
+  }
+
+  if (FLAG_trace_fragmentation) {
+    PrintF("%p [%s]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n",
+           reinterpret_cast<void*>(p), AllocationSpaceName(space->identity()),
+           static_cast<int>(sizes.small_size_),
+           static_cast<double>(sizes.small_size_ * 100) / area_size,
+           static_cast<int>(sizes.medium_size_),
+           static_cast<double>(sizes.medium_size_ * 100) / area_size,
+           static_cast<int>(sizes.large_size_),
+           static_cast<double>(sizes.large_size_ * 100) / area_size,
+           static_cast<int>(sizes.huge_size_),
+           static_cast<double>(sizes.huge_size_ * 100) / area_size,
+           (ratio > ratio_threshold) ? "[fragmented]" : "");
+  }
+
+  if (FLAG_always_compact && sizes.Total() != area_size) {
+    return 1;
+  }
+
+  if (ratio <= ratio_threshold) return 0;  // Not fragmented.
+
+  return static_cast<int>(ratio - ratio_threshold);
+}
+
+
+void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
+  DCHECK(space->identity() == OLD_POINTER_SPACE ||
+         space->identity() == OLD_DATA_SPACE ||
+         space->identity() == CODE_SPACE);
+
+  static const int kMaxMaxEvacuationCandidates = 1000;
+  int number_of_pages = space->CountTotalPages();
+  int max_evacuation_candidates =
+      static_cast<int>(std::sqrt(number_of_pages / 2.0) + 1);
+
+  if (FLAG_stress_compaction || FLAG_always_compact) {
+    max_evacuation_candidates = kMaxMaxEvacuationCandidates;
+  }
+
+  class Candidate {
+   public:
+    Candidate() : fragmentation_(0), page_(NULL) {}
+    Candidate(int f, Page* p) : fragmentation_(f), page_(p) {}
+
+    int fragmentation() { return fragmentation_; }
+    Page* page() { return page_; }
+
+   private:
+    int fragmentation_;
+    Page* page_;
+  };
+
+  enum CompactionMode { COMPACT_FREE_LISTS, REDUCE_MEMORY_FOOTPRINT };
+
+  CompactionMode mode = COMPACT_FREE_LISTS;
+
+  intptr_t reserved = number_of_pages * space->AreaSize();
+  intptr_t over_reserved = reserved - space->SizeOfObjects();
+  static const intptr_t kFreenessThreshold = 50;
+
+  if (reduce_memory_footprint_ && over_reserved >= space->AreaSize()) {
+    // If reduction of memory footprint was requested, we are aggressive
+    // about choosing pages to free.  We expect that half-empty pages
+    // are easier to compact so slightly bump the limit.
+    mode = REDUCE_MEMORY_FOOTPRINT;
+    max_evacuation_candidates += 2;
+  }
+
+
+  if (over_reserved > reserved / 3 && over_reserved >= 2 * space->AreaSize()) {
+    // If over-usage is very high (more than a third of the space), we
+    // try to free all mostly empty pages.  We expect that almost empty
+    // pages are even easier to compact so bump the limit even more.
+    mode = REDUCE_MEMORY_FOOTPRINT;
+    max_evacuation_candidates *= 2;
+  }
+
+  if (FLAG_trace_fragmentation && mode == REDUCE_MEMORY_FOOTPRINT) {
+    PrintF(
+        "Estimated over reserved memory: %.1f / %.1f MB (threshold %d), "
+        "evacuation candidate limit: %d\n",
+        static_cast<double>(over_reserved) / MB,
+        static_cast<double>(reserved) / MB,
+        static_cast<int>(kFreenessThreshold), max_evacuation_candidates);
+  }
+
+  intptr_t estimated_release = 0;
+
+  Candidate candidates[kMaxMaxEvacuationCandidates];
+
+  max_evacuation_candidates =
+      Min(kMaxMaxEvacuationCandidates, max_evacuation_candidates);
+
+  int count = 0;
+  int fragmentation = 0;
+  Candidate* least = NULL;
+
+  PageIterator it(space);
+  if (it.has_next()) it.next();  // Never compact the first page.
+
+  while (it.has_next()) {
+    Page* p = it.next();
+    p->ClearEvacuationCandidate();
+
+    if (FLAG_stress_compaction) {
+      unsigned int counter = space->heap()->ms_count();
+      uintptr_t page_number = reinterpret_cast<uintptr_t>(p) >> kPageSizeBits;
+      if ((counter & 1) == (page_number & 1)) fragmentation = 1;
+    } else if (mode == REDUCE_MEMORY_FOOTPRINT) {
+      // Don't try to release too many pages.
+      if (estimated_release >= over_reserved) {
+        continue;
+      }
+
+      intptr_t free_bytes = 0;
+
+      if (!p->WasSwept()) {
+        free_bytes = (p->area_size() - p->LiveBytes());
+      } else {
+        PagedSpace::SizeStats sizes;
+        space->ObtainFreeListStatistics(p, &sizes);
+        free_bytes = sizes.Total();
+      }
+
+      int free_pct = static_cast<int>(free_bytes * 100) / p->area_size();
+
+      if (free_pct >= kFreenessThreshold) {
+        estimated_release += free_bytes;
+        fragmentation = free_pct;
+      } else {
+        fragmentation = 0;
+      }
+
+      if (FLAG_trace_fragmentation) {
+        PrintF("%p [%s]: %d (%.2f%%) free %s\n", reinterpret_cast<void*>(p),
+               AllocationSpaceName(space->identity()),
+               static_cast<int>(free_bytes),
+               static_cast<double>(free_bytes * 100) / p->area_size(),
+               (fragmentation > 0) ? "[fragmented]" : "");
+      }
+    } else {
+      fragmentation = FreeListFragmentation(space, p);
+    }
+
+    if (fragmentation != 0) {
+      if (count < max_evacuation_candidates) {
+        candidates[count++] = Candidate(fragmentation, p);
+      } else {
+        if (least == NULL) {
+          for (int i = 0; i < max_evacuation_candidates; i++) {
+            if (least == NULL ||
+                candidates[i].fragmentation() < least->fragmentation()) {
+              least = candidates + i;
+            }
+          }
+        }
+        if (least->fragmentation() < fragmentation) {
+          *least = Candidate(fragmentation, p);
+          least = NULL;
+        }
+      }
+    }
+  }
+
+  for (int i = 0; i < count; i++) {
+    AddEvacuationCandidate(candidates[i].page());
+  }
+
+  if (count > 0 && FLAG_trace_fragmentation) {
+    PrintF("Collected %d evacuation candidates for space %s\n", count,
+           AllocationSpaceName(space->identity()));
+  }
+}
+
+
+void MarkCompactCollector::AbortCompaction() {
+  if (compacting_) {
+    int npages = evacuation_candidates_.length();
+    for (int i = 0; i < npages; i++) {
+      Page* p = evacuation_candidates_[i];
+      slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
+      p->ClearEvacuationCandidate();
+      p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
+    }
+    compacting_ = false;
+    evacuation_candidates_.Rewind(0);
+    invalidated_code_.Rewind(0);
+  }
+  DCHECK_EQ(0, evacuation_candidates_.length());
+}
+
+
+void MarkCompactCollector::Prepare() {
+  was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
+
+#ifdef DEBUG
+  DCHECK(state_ == IDLE);
+  state_ = PREPARE_GC;
+#endif
+
+  DCHECK(!FLAG_never_compact || !FLAG_always_compact);
+
+  if (sweeping_in_progress()) {
+    // Instead of waiting we could also abort the sweeper threads here.
+    EnsureSweepingCompleted();
+  }
+
+  // Clear marking bits if incremental marking is aborted.
+  if (was_marked_incrementally_ && abort_incremental_marking_) {
+    heap()->incremental_marking()->Abort();
+    ClearMarkbits();
+    AbortWeakCollections();
+    AbortCompaction();
+    was_marked_incrementally_ = false;
+  }
+
+  // Don't start compaction if we are in the middle of incremental
+  // marking cycle. We did not collect any slots.
+  if (!FLAG_never_compact && !was_marked_incrementally_) {
+    StartCompaction(NON_INCREMENTAL_COMPACTION);
+  }
+
+  PagedSpaces spaces(heap());
+  for (PagedSpace* space = spaces.next(); space != NULL;
+       space = spaces.next()) {
+    space->PrepareForMarkCompact();
+  }
+
+#ifdef VERIFY_HEAP
+  if (!was_marked_incrementally_ && FLAG_verify_heap) {
+    VerifyMarkbitsAreClean();
+  }
+#endif
+}
+
+
+void MarkCompactCollector::Finish() {
+#ifdef DEBUG
+  DCHECK(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
+  state_ = IDLE;
+#endif
+  // The stub cache is not traversed during GC; clear the cache to
+  // force lazy re-initialization of it. This must be done after the
+  // GC, because it relies on the new address of certain old space
+  // objects (empty string, illegal builtin).
+  isolate()->stub_cache()->Clear();
+
+  if (have_code_to_deoptimize_) {
+    // Some code objects were marked for deoptimization during the GC.
+    Deoptimizer::DeoptimizeMarkedCode(isolate());
+    have_code_to_deoptimize_ = false;
+  }
+}
+
+
+// -------------------------------------------------------------------------
+// Phase 1: tracing and marking live objects.
+//   before: all objects are in normal state.
+//   after: a live object's map pointer is marked as '00'.
+
+// Marking all live objects in the heap as part of mark-sweep or mark-compact
+// collection.  Before marking, all objects are in their normal state.  After
+// marking, live objects' map pointers are marked indicating that the object
+// has been found reachable.
+//
+// The marking algorithm is a (mostly) depth-first (because of possible stack
+// overflow) traversal of the graph of objects reachable from the roots.  It
+// uses an explicit stack of pointers rather than recursion.  The young
+// generation's inactive ('from') space is used as a marking stack.  The
+// objects in the marking stack are the ones that have been reached and marked
+// but their children have not yet been visited.
+//
+// The marking stack can overflow during traversal.  In that case, we set an
+// overflow flag.  When the overflow flag is set, we continue marking objects
+// reachable from the objects on the marking stack, but no longer push them on
+// the marking stack.  Instead, we mark them as both marked and overflowed.
+// When the stack is in the overflowed state, objects marked as overflowed
+// have been reached and marked but their children have not been visited yet.
+// After emptying the marking stack, we clear the overflow flag and traverse
+// the heap looking for objects marked as overflowed, push them on the stack,
+// and continue with marking.  This process repeats until all reachable
+// objects have been marked.
+
+void CodeFlusher::ProcessJSFunctionCandidates() {
+  Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kCompileLazy);
+  Object* undefined = isolate_->heap()->undefined_value();
+
+  JSFunction* candidate = jsfunction_candidates_head_;
+  JSFunction* next_candidate;
+  while (candidate != NULL) {
+    next_candidate = GetNextCandidate(candidate);
+    ClearNextCandidate(candidate, undefined);
+
+    SharedFunctionInfo* shared = candidate->shared();
+
+    Code* code = shared->code();
+    MarkBit code_mark = Marking::MarkBitFrom(code);
+    if (!code_mark.Get()) {
+      if (FLAG_trace_code_flushing && shared->is_compiled()) {
+        PrintF("[code-flushing clears: ");
+        shared->ShortPrint();
+        PrintF(" - age: %d]\n", code->GetAge());
+      }
+      shared->set_code(lazy_compile);
+      candidate->set_code(lazy_compile);
+    } else {
+      candidate->set_code(code);
+    }
+
+    // We are in the middle of a GC cycle so the write barrier in the code
+    // setter did not record the slot update and we have to do that manually.
+    Address slot = candidate->address() + JSFunction::kCodeEntryOffset;
+    Code* target = Code::cast(Code::GetObjectFromEntryAddress(slot));
+    isolate_->heap()->mark_compact_collector()->RecordCodeEntrySlot(slot,
+                                                                    target);
+
+    Object** shared_code_slot =
+        HeapObject::RawField(shared, SharedFunctionInfo::kCodeOffset);
+    isolate_->heap()->mark_compact_collector()->RecordSlot(
+        shared_code_slot, shared_code_slot, *shared_code_slot);
+
+    candidate = next_candidate;
+  }
+
+  jsfunction_candidates_head_ = NULL;
+}
+
+
+void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
+  Code* lazy_compile = isolate_->builtins()->builtin(Builtins::kCompileLazy);
+
+  SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
+  SharedFunctionInfo* next_candidate;
+  while (candidate != NULL) {
+    next_candidate = GetNextCandidate(candidate);
+    ClearNextCandidate(candidate);
+
+    Code* code = candidate->code();
+    MarkBit code_mark = Marking::MarkBitFrom(code);
+    if (!code_mark.Get()) {
+      if (FLAG_trace_code_flushing && candidate->is_compiled()) {
+        PrintF("[code-flushing clears: ");
+        candidate->ShortPrint();
+        PrintF(" - age: %d]\n", code->GetAge());
+      }
+      candidate->set_code(lazy_compile);
+    }
+
+    Object** code_slot =
+        HeapObject::RawField(candidate, SharedFunctionInfo::kCodeOffset);
+    isolate_->heap()->mark_compact_collector()->RecordSlot(code_slot, code_slot,
+                                                           *code_slot);
+
+    candidate = next_candidate;
+  }
+
+  shared_function_info_candidates_head_ = NULL;
+}
+
+
+void CodeFlusher::ProcessOptimizedCodeMaps() {
+  STATIC_ASSERT(SharedFunctionInfo::kEntryLength == 4);
+
+  SharedFunctionInfo* holder = optimized_code_map_holder_head_;
+  SharedFunctionInfo* next_holder;
+
+  while (holder != NULL) {
+    next_holder = GetNextCodeMap(holder);
+    ClearNextCodeMap(holder);
+
+    FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
+    int new_length = SharedFunctionInfo::kEntriesStart;
+    int old_length = code_map->length();
+    for (int i = SharedFunctionInfo::kEntriesStart; i < old_length;
+         i += SharedFunctionInfo::kEntryLength) {
+      Code* code =
+          Code::cast(code_map->get(i + SharedFunctionInfo::kCachedCodeOffset));
+      if (!Marking::MarkBitFrom(code).Get()) continue;
+
+      // Move every slot in the entry.
+      for (int j = 0; j < SharedFunctionInfo::kEntryLength; j++) {
+        int dst_index = new_length++;
+        Object** slot = code_map->RawFieldOfElementAt(dst_index);
+        Object* object = code_map->get(i + j);
+        code_map->set(dst_index, object);
+        if (j == SharedFunctionInfo::kOsrAstIdOffset) {
+          DCHECK(object->IsSmi());
+        } else {
+          DCHECK(
+              Marking::IsBlack(Marking::MarkBitFrom(HeapObject::cast(*slot))));
+          isolate_->heap()->mark_compact_collector()->RecordSlot(slot, slot,
+                                                                 *slot);
+        }
+      }
+    }
+
+    // Trim the optimized code map if entries have been removed.
+    if (new_length < old_length) {
+      holder->TrimOptimizedCodeMap(old_length - new_length);
+    }
+
+    holder = next_holder;
+  }
+
+  optimized_code_map_holder_head_ = NULL;
+}
+
+
+void CodeFlusher::EvictCandidate(SharedFunctionInfo* shared_info) {
+  // Make sure previous flushing decisions are revisited.
+  isolate_->heap()->incremental_marking()->RecordWrites(shared_info);
+
+  if (FLAG_trace_code_flushing) {
+    PrintF("[code-flushing abandons function-info: ");
+    shared_info->ShortPrint();
+    PrintF("]\n");
+  }
+
+  SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
+  SharedFunctionInfo* next_candidate;
+  if (candidate == shared_info) {
+    next_candidate = GetNextCandidate(shared_info);
+    shared_function_info_candidates_head_ = next_candidate;
+    ClearNextCandidate(shared_info);
+  } else {
+    while (candidate != NULL) {
+      next_candidate = GetNextCandidate(candidate);
+
+      if (next_candidate == shared_info) {
+        next_candidate = GetNextCandidate(shared_info);
+        SetNextCandidate(candidate, next_candidate);
+        ClearNextCandidate(shared_info);
+        break;
+      }
+
+      candidate = next_candidate;
+    }
+  }
+}
+
+
+void CodeFlusher::EvictCandidate(JSFunction* function) {
+  DCHECK(!function->next_function_link()->IsUndefined());
+  Object* undefined = isolate_->heap()->undefined_value();
+
+  // Make sure previous flushing decisions are revisited.
+  isolate_->heap()->incremental_marking()->RecordWrites(function);
+  isolate_->heap()->incremental_marking()->RecordWrites(function->shared());
+
+  if (FLAG_trace_code_flushing) {
+    PrintF("[code-flushing abandons closure: ");
+    function->shared()->ShortPrint();
+    PrintF("]\n");
+  }
+
+  JSFunction* candidate = jsfunction_candidates_head_;
+  JSFunction* next_candidate;
+  if (candidate == function) {
+    next_candidate = GetNextCandidate(function);
+    jsfunction_candidates_head_ = next_candidate;
+    ClearNextCandidate(function, undefined);
+  } else {
+    while (candidate != NULL) {
+      next_candidate = GetNextCandidate(candidate);
+
+      if (next_candidate == function) {
+        next_candidate = GetNextCandidate(function);
+        SetNextCandidate(candidate, next_candidate);
+        ClearNextCandidate(function, undefined);
+        break;
+      }
+
+      candidate = next_candidate;
+    }
+  }
+}
+
+
+void CodeFlusher::EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder) {
+  DCHECK(!FixedArray::cast(code_map_holder->optimized_code_map())
+              ->get(SharedFunctionInfo::kNextMapIndex)
+              ->IsUndefined());
+
+  // Make sure previous flushing decisions are revisited.
+  isolate_->heap()->incremental_marking()->RecordWrites(code_map_holder);
+
+  if (FLAG_trace_code_flushing) {
+    PrintF("[code-flushing abandons code-map: ");
+    code_map_holder->ShortPrint();
+    PrintF("]\n");
+  }
+
+  SharedFunctionInfo* holder = optimized_code_map_holder_head_;
+  SharedFunctionInfo* next_holder;
+  if (holder == code_map_holder) {
+    next_holder = GetNextCodeMap(code_map_holder);
+    optimized_code_map_holder_head_ = next_holder;
+    ClearNextCodeMap(code_map_holder);
+  } else {
+    while (holder != NULL) {
+      next_holder = GetNextCodeMap(holder);
+
+      if (next_holder == code_map_holder) {
+        next_holder = GetNextCodeMap(code_map_holder);
+        SetNextCodeMap(holder, next_holder);
+        ClearNextCodeMap(code_map_holder);
+        break;
+      }
+
+      holder = next_holder;
+    }
+  }
+}
+
+
+void CodeFlusher::EvictJSFunctionCandidates() {
+  JSFunction* candidate = jsfunction_candidates_head_;
+  JSFunction* next_candidate;
+  while (candidate != NULL) {
+    next_candidate = GetNextCandidate(candidate);
+    EvictCandidate(candidate);
+    candidate = next_candidate;
+  }
+  DCHECK(jsfunction_candidates_head_ == NULL);
+}
+
+
+void CodeFlusher::EvictSharedFunctionInfoCandidates() {
+  SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
+  SharedFunctionInfo* next_candidate;
+  while (candidate != NULL) {
+    next_candidate = GetNextCandidate(candidate);
+    EvictCandidate(candidate);
+    candidate = next_candidate;
+  }
+  DCHECK(shared_function_info_candidates_head_ == NULL);
+}
+
+
+void CodeFlusher::EvictOptimizedCodeMaps() {
+  SharedFunctionInfo* holder = optimized_code_map_holder_head_;
+  SharedFunctionInfo* next_holder;
+  while (holder != NULL) {
+    next_holder = GetNextCodeMap(holder);
+    EvictOptimizedCodeMap(holder);
+    holder = next_holder;
+  }
+  DCHECK(optimized_code_map_holder_head_ == NULL);
+}
+
+
+void CodeFlusher::IteratePointersToFromSpace(ObjectVisitor* v) {
+  Heap* heap = isolate_->heap();
+
+  JSFunction** slot = &jsfunction_candidates_head_;
+  JSFunction* candidate = jsfunction_candidates_head_;
+  while (candidate != NULL) {
+    if (heap->InFromSpace(candidate)) {
+      v->VisitPointer(reinterpret_cast<Object**>(slot));
+    }
+    candidate = GetNextCandidate(*slot);
+    slot = GetNextCandidateSlot(*slot);
+  }
+}
+
+
+MarkCompactCollector::~MarkCompactCollector() {
+  if (code_flusher_ != NULL) {
+    delete code_flusher_;
+    code_flusher_ = NULL;
+  }
+}
+
+
+static inline HeapObject* ShortCircuitConsString(Object** p) {
+  // Optimization: If the heap object pointed to by p is a non-internalized
+  // cons string whose right substring is HEAP->empty_string, update
+  // it in place to its left substring.  Return the updated value.
+  //
+  // Here we assume that if we change *p, we replace it with a heap object
+  // (i.e., the left substring of a cons string is always a heap object).
+  //
+  // The check performed is:
+  //   object->IsConsString() && !object->IsInternalizedString() &&
+  //   (ConsString::cast(object)->second() == HEAP->empty_string())
+  // except the maps for the object and its possible substrings might be
+  // marked.
+  HeapObject* object = HeapObject::cast(*p);
+  if (!FLAG_clever_optimizations) return object;
+  Map* map = object->map();
+  InstanceType type = map->instance_type();
+  if (!IsShortcutCandidate(type)) return object;
+
+  Object* second = reinterpret_cast<ConsString*>(object)->second();
+  Heap* heap = map->GetHeap();
+  if (second != heap->empty_string()) {
+    return object;
+  }
+
+  // Since we don't have the object's start, it is impossible to update the
+  // page dirty marks. Therefore, we only replace the string with its left
+  // substring when page dirty marks do not change.
+  Object* first = reinterpret_cast<ConsString*>(object)->first();
+  if (!heap->InNewSpace(object) && heap->InNewSpace(first)) return object;
+
+  *p = first;
+  return HeapObject::cast(first);
+}
+
+
+class MarkCompactMarkingVisitor
+    : public StaticMarkingVisitor<MarkCompactMarkingVisitor> {
+ public:
+  static void ObjectStatsVisitBase(StaticVisitorBase::VisitorId id, Map* map,
+                                   HeapObject* obj);
+
+  static void ObjectStatsCountFixedArray(
+      FixedArrayBase* fixed_array, FixedArraySubInstanceType fast_type,
+      FixedArraySubInstanceType dictionary_type);
+
+  template <MarkCompactMarkingVisitor::VisitorId id>
+  class ObjectStatsTracker {
+   public:
+    static inline void Visit(Map* map, HeapObject* obj);
+  };
+
+  static void Initialize();
+
+  INLINE(static void VisitPointer(Heap* heap, Object** p)) {
+    MarkObjectByPointer(heap->mark_compact_collector(), p, p);
+  }
+
+  INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
+    // Mark all objects pointed to in [start, end).
+    const int kMinRangeForMarkingRecursion = 64;
+    if (end - start >= kMinRangeForMarkingRecursion) {
+      if (VisitUnmarkedObjects(heap, start, end)) return;
+      // We are close to a stack overflow, so just mark the objects.
+    }
+    MarkCompactCollector* collector = heap->mark_compact_collector();
+    for (Object** p = start; p < end; p++) {
+      MarkObjectByPointer(collector, start, p);
+    }
+  }
+
+  // Marks the object black and pushes it on the marking stack.
+  INLINE(static void MarkObject(Heap* heap, HeapObject* object)) {
+    MarkBit mark = Marking::MarkBitFrom(object);
+    heap->mark_compact_collector()->MarkObject(object, mark);
+  }
+
+  // Marks the object black without pushing it on the marking stack.
+  // Returns true if object needed marking and false otherwise.
+  INLINE(static bool MarkObjectWithoutPush(Heap* heap, HeapObject* object)) {
+    MarkBit mark_bit = Marking::MarkBitFrom(object);
+    if (!mark_bit.Get()) {
+      heap->mark_compact_collector()->SetMark(object, mark_bit);
+      return true;
+    }
+    return false;
+  }
+
+  // Mark object pointed to by p.
+  INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector,
+                                         Object** anchor_slot, Object** p)) {
+    if (!(*p)->IsHeapObject()) return;
+    HeapObject* object = ShortCircuitConsString(p);
+    collector->RecordSlot(anchor_slot, p, object);
+    MarkBit mark = Marking::MarkBitFrom(object);
+    collector->MarkObject(object, mark);
+  }
+
+
+  // Visit an unmarked object.
+  INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector,
+                                         HeapObject* obj)) {
+#ifdef DEBUG
+    DCHECK(collector->heap()->Contains(obj));
+    DCHECK(!collector->heap()->mark_compact_collector()->IsMarked(obj));
+#endif
+    Map* map = obj->map();
+    Heap* heap = obj->GetHeap();
+    MarkBit mark = Marking::MarkBitFrom(obj);
+    heap->mark_compact_collector()->SetMark(obj, mark);
+    // Mark the map pointer and the body.
+    MarkBit map_mark = Marking::MarkBitFrom(map);
+    heap->mark_compact_collector()->MarkObject(map, map_mark);
+    IterateBody(map, obj);
+  }
+
+  // Visit all unmarked objects pointed to by [start, end).
+  // Returns false if the operation fails (lack of stack space).
+  INLINE(static bool VisitUnmarkedObjects(Heap* heap, Object** start,
+                                          Object** end)) {
+    // Return false is we are close to the stack limit.
+    StackLimitCheck check(heap->isolate());
+    if (check.HasOverflowed()) return false;
+
+    MarkCompactCollector* collector = heap->mark_compact_collector();
+    // Visit the unmarked objects.
+    for (Object** p = start; p < end; p++) {
+      Object* o = *p;
+      if (!o->IsHeapObject()) continue;
+      collector->RecordSlot(start, p, o);
+      HeapObject* obj = HeapObject::cast(o);
+      MarkBit mark = Marking::MarkBitFrom(obj);
+      if (mark.Get()) continue;
+      VisitUnmarkedObject(collector, obj);
+    }
+    return true;
+  }
+
+ private:
+  template <int id>
+  static inline void TrackObjectStatsAndVisit(Map* map, HeapObject* obj);
+
+  // Code flushing support.
+
+  static const int kRegExpCodeThreshold = 5;
+
+  static void UpdateRegExpCodeAgeAndFlush(Heap* heap, JSRegExp* re,
+                                          bool is_one_byte) {
+    // Make sure that the fixed array is in fact initialized on the RegExp.
+    // We could potentially trigger a GC when initializing the RegExp.
+    if (HeapObject::cast(re->data())->map()->instance_type() !=
+        FIXED_ARRAY_TYPE)
+      return;
+
+    // Make sure this is a RegExp that actually contains code.
+    if (re->TypeTag() != JSRegExp::IRREGEXP) return;
+
+    Object* code = re->DataAt(JSRegExp::code_index(is_one_byte));
+    if (!code->IsSmi() &&
+        HeapObject::cast(code)->map()->instance_type() == CODE_TYPE) {
+      // Save a copy that can be reinstated if we need the code again.
+      re->SetDataAt(JSRegExp::saved_code_index(is_one_byte), code);
+
+      // Saving a copy might create a pointer into compaction candidate
+      // that was not observed by marker.  This might happen if JSRegExp data
+      // was marked through the compilation cache before marker reached JSRegExp
+      // object.
+      FixedArray* data = FixedArray::cast(re->data());
+      Object** slot =
+          data->data_start() + JSRegExp::saved_code_index(is_one_byte);
+      heap->mark_compact_collector()->RecordSlot(slot, slot, code);
+
+      // Set a number in the 0-255 range to guarantee no smi overflow.
+      re->SetDataAt(JSRegExp::code_index(is_one_byte),
+                    Smi::FromInt(heap->sweep_generation() & 0xff));
+    } else if (code->IsSmi()) {
+      int value = Smi::cast(code)->value();
+      // The regexp has not been compiled yet or there was a compilation error.
+      if (value == JSRegExp::kUninitializedValue ||
+          value == JSRegExp::kCompilationErrorValue) {
+        return;
+      }
+
+      // Check if we should flush now.
+      if (value == ((heap->sweep_generation() - kRegExpCodeThreshold) & 0xff)) {
+        re->SetDataAt(JSRegExp::code_index(is_one_byte),
+                      Smi::FromInt(JSRegExp::kUninitializedValue));
+        re->SetDataAt(JSRegExp::saved_code_index(is_one_byte),
+                      Smi::FromInt(JSRegExp::kUninitializedValue));
+      }
+    }
+  }
+
+
+  // Works by setting the current sweep_generation (as a smi) in the
+  // code object place in the data array of the RegExp and keeps a copy
+  // around that can be reinstated if we reuse the RegExp before flushing.
+  // If we did not use the code for kRegExpCodeThreshold mark sweep GCs
+  // we flush the code.
+  static void VisitRegExpAndFlushCode(Map* map, HeapObject* object) {
+    Heap* heap = map->GetHeap();
+    MarkCompactCollector* collector = heap->mark_compact_collector();
+    if (!collector->is_code_flushing_enabled()) {
+      VisitJSRegExp(map, object);
+      return;
+    }
+    JSRegExp* re = reinterpret_cast<JSRegExp*>(object);
+    // Flush code or set age on both one byte and two byte code.
+    UpdateRegExpCodeAgeAndFlush(heap, re, true);
+    UpdateRegExpCodeAgeAndFlush(heap, re, false);
+    // Visit the fields of the RegExp, including the updated FixedArray.
+    VisitJSRegExp(map, object);
+  }
+
+  static VisitorDispatchTable<Callback> non_count_table_;
+};
+
+
+void MarkCompactMarkingVisitor::ObjectStatsCountFixedArray(
+    FixedArrayBase* fixed_array, FixedArraySubInstanceType fast_type,
+    FixedArraySubInstanceType dictionary_type) {
+  Heap* heap = fixed_array->map()->GetHeap();
+  if (fixed_array->map() != heap->fixed_cow_array_map() &&
+      fixed_array->map() != heap->fixed_double_array_map() &&
+      fixed_array != heap->empty_fixed_array()) {
+    if (fixed_array->IsDictionary()) {
+      heap->RecordFixedArraySubTypeStats(dictionary_type, fixed_array->Size());
+    } else {
+      heap->RecordFixedArraySubTypeStats(fast_type, fixed_array->Size());
+    }
+  }
+}
+
+
+void MarkCompactMarkingVisitor::ObjectStatsVisitBase(
+    MarkCompactMarkingVisitor::VisitorId id, Map* map, HeapObject* obj) {
+  Heap* heap = map->GetHeap();
+  int object_size = obj->Size();
+  heap->RecordObjectStats(map->instance_type(), object_size);
+  non_count_table_.GetVisitorById(id)(map, obj);
+  if (obj->IsJSObject()) {
+    JSObject* object = JSObject::cast(obj);
+    ObjectStatsCountFixedArray(object->elements(), DICTIONARY_ELEMENTS_SUB_TYPE,
+                               FAST_ELEMENTS_SUB_TYPE);
+    ObjectStatsCountFixedArray(object->properties(),
+                               DICTIONARY_PROPERTIES_SUB_TYPE,
+                               FAST_PROPERTIES_SUB_TYPE);
+  }
+}
+
+
+template <MarkCompactMarkingVisitor::VisitorId id>
+void MarkCompactMarkingVisitor::ObjectStatsTracker<id>::Visit(Map* map,
+                                                              HeapObject* obj) {
+  ObjectStatsVisitBase(id, map, obj);
+}
+
+
+template <>
+class MarkCompactMarkingVisitor::ObjectStatsTracker<
+    MarkCompactMarkingVisitor::kVisitMap> {
+ public:
+  static inline void Visit(Map* map, HeapObject* obj) {
+    Heap* heap = map->GetHeap();
+    Map* map_obj = Map::cast(obj);
+    DCHECK(map->instance_type() == MAP_TYPE);
+    DescriptorArray* array = map_obj->instance_descriptors();
+    if (map_obj->owns_descriptors() &&
+        array != heap->empty_descriptor_array()) {
+      int fixed_array_size = array->Size();
+      heap->RecordFixedArraySubTypeStats(DESCRIPTOR_ARRAY_SUB_TYPE,
+                                         fixed_array_size);
+    }
+    if (map_obj->HasTransitionArray()) {
+      int fixed_array_size = map_obj->transitions()->Size();
+      heap->RecordFixedArraySubTypeStats(TRANSITION_ARRAY_SUB_TYPE,
+                                         fixed_array_size);
+    }
+    if (map_obj->has_code_cache()) {
+      CodeCache* cache = CodeCache::cast(map_obj->code_cache());
+      heap->RecordFixedArraySubTypeStats(MAP_CODE_CACHE_SUB_TYPE,
+                                         cache->default_cache()->Size());
+      if (!cache->normal_type_cache()->IsUndefined()) {
+        heap->RecordFixedArraySubTypeStats(
+            MAP_CODE_CACHE_SUB_TYPE,
+            FixedArray::cast(cache->normal_type_cache())->Size());
+      }
+    }
+    ObjectStatsVisitBase(kVisitMap, map, obj);
+  }
+};
+
+
+template <>
+class MarkCompactMarkingVisitor::ObjectStatsTracker<
+    MarkCompactMarkingVisitor::kVisitCode> {
+ public:
+  static inline void Visit(Map* map, HeapObject* obj) {
+    Heap* heap = map->GetHeap();
+    int object_size = obj->Size();
+    DCHECK(map->instance_type() == CODE_TYPE);
+    Code* code_obj = Code::cast(obj);
+    heap->RecordCodeSubTypeStats(code_obj->kind(), code_obj->GetRawAge(),
+                                 object_size);
+    ObjectStatsVisitBase(kVisitCode, map, obj);
+  }
+};
+
+
+template <>
+class MarkCompactMarkingVisitor::ObjectStatsTracker<
+    MarkCompactMarkingVisitor::kVisitSharedFunctionInfo> {
+ public:
+  static inline void Visit(Map* map, HeapObject* obj) {
+    Heap* heap = map->GetHeap();
+    SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj);
+    if (sfi->scope_info() != heap->empty_fixed_array()) {
+      heap->RecordFixedArraySubTypeStats(
+          SCOPE_INFO_SUB_TYPE, FixedArray::cast(sfi->scope_info())->Size());
+    }
+    ObjectStatsVisitBase(kVisitSharedFunctionInfo, map, obj);
+  }
+};
+
+
+template <>
+class MarkCompactMarkingVisitor::ObjectStatsTracker<
+    MarkCompactMarkingVisitor::kVisitFixedArray> {
+ public:
+  static inline void Visit(Map* map, HeapObject* obj) {
+    Heap* heap = map->GetHeap();
+    FixedArray* fixed_array = FixedArray::cast(obj);
+    if (fixed_array == heap->string_table()) {
+      heap->RecordFixedArraySubTypeStats(STRING_TABLE_SUB_TYPE,
+                                         fixed_array->Size());
+    }
+    ObjectStatsVisitBase(kVisitFixedArray, map, obj);
+  }
+};
+
+
+void MarkCompactMarkingVisitor::Initialize() {
+  StaticMarkingVisitor<MarkCompactMarkingVisitor>::Initialize();
+
+  table_.Register(kVisitJSRegExp, &VisitRegExpAndFlushCode);
+
+  if (FLAG_track_gc_object_stats) {
+    // Copy the visitor table to make call-through possible.
+    non_count_table_.CopyFrom(&table_);
+#define VISITOR_ID_COUNT_FUNCTION(id) \
+  table_.Register(kVisit##id, ObjectStatsTracker<kVisit##id>::Visit);
+    VISITOR_ID_LIST(VISITOR_ID_COUNT_FUNCTION)
+#undef VISITOR_ID_COUNT_FUNCTION
+  }
+}
+
+
+VisitorDispatchTable<MarkCompactMarkingVisitor::Callback>
+    MarkCompactMarkingVisitor::non_count_table_;
+
+
+class CodeMarkingVisitor : public ThreadVisitor {
+ public:
+  explicit CodeMarkingVisitor(MarkCompactCollector* collector)
+      : collector_(collector) {}
+
+  void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
+    collector_->PrepareThreadForCodeFlushing(isolate, top);
+  }
+
+ private:
+  MarkCompactCollector* collector_;
+};
+
+
+class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
+ public:
+  explicit SharedFunctionInfoMarkingVisitor(MarkCompactCollector* collector)
+      : collector_(collector) {}
+
+  void VisitPointers(Object** start, Object** end) {
+    for (Object** p = start; p < end; p++) VisitPointer(p);
+  }
+
+  void VisitPointer(Object** slot) {
+    Object* obj = *slot;
+    if (obj->IsSharedFunctionInfo()) {
+      SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
+      MarkBit shared_mark = Marking::MarkBitFrom(shared);
+      MarkBit code_mark = Marking::MarkBitFrom(shared->code());
+      collector_->MarkObject(shared->code(), code_mark);
+      collector_->MarkObject(shared, shared_mark);
+    }
+  }
+
+ private:
+  MarkCompactCollector* collector_;
+};
+
+
+void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate,
+                                                        ThreadLocalTop* top) {
+  for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
+    // Note: for the frame that has a pending lazy deoptimization
+    // StackFrame::unchecked_code will return a non-optimized code object for
+    // the outermost function and StackFrame::LookupCode will return
+    // actual optimized code object.
+    StackFrame* frame = it.frame();
+    Code* code = frame->unchecked_code();
+    MarkBit code_mark = Marking::MarkBitFrom(code);
+    MarkObject(code, code_mark);
+    if (frame->is_optimized()) {
+      MarkCompactMarkingVisitor::MarkInlinedFunctionsCode(heap(),
+                                                          frame->LookupCode());
+    }
+  }
+}
+
+
+void MarkCompactCollector::PrepareForCodeFlushing() {
+  // Enable code flushing for non-incremental cycles.
+  if (FLAG_flush_code && !FLAG_flush_code_incrementally) {
+    EnableCodeFlushing(!was_marked_incrementally_);
+  }
+
+  // If code flushing is disabled, there is no need to prepare for it.
+  if (!is_code_flushing_enabled()) return;
+
+  // Ensure that empty descriptor array is marked. Method MarkDescriptorArray
+  // relies on it being marked before any other descriptor array.
+  HeapObject* descriptor_array = heap()->empty_descriptor_array();
+  MarkBit descriptor_array_mark = Marking::MarkBitFrom(descriptor_array);
+  MarkObject(descriptor_array, descriptor_array_mark);
+
+  // Make sure we are not referencing the code from the stack.
+  DCHECK(this == heap()->mark_compact_collector());
+  PrepareThreadForCodeFlushing(heap()->isolate(),
+                               heap()->isolate()->thread_local_top());
+
+  // Iterate the archived stacks in all threads to check if
+  // the code is referenced.
+  CodeMarkingVisitor code_marking_visitor(this);
+  heap()->isolate()->thread_manager()->IterateArchivedThreads(
+      &code_marking_visitor);
+
+  SharedFunctionInfoMarkingVisitor visitor(this);
+  heap()->isolate()->compilation_cache()->IterateFunctions(&visitor);
+  heap()->isolate()->handle_scope_implementer()->Iterate(&visitor);
+
+  ProcessMarkingDeque();
+}
+
+
+// Visitor class for marking heap roots.
+class RootMarkingVisitor : public ObjectVisitor {
+ public:
+  explicit RootMarkingVisitor(Heap* heap)
+      : collector_(heap->mark_compact_collector()) {}
+
+  void VisitPointer(Object** p) { MarkObjectByPointer(p); }
+
+  void VisitPointers(Object** start, Object** end) {
+    for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
+  }
+
+  // Skip the weak next code link in a code object, which is visited in
+  // ProcessTopOptimizedFrame.
+  void VisitNextCodeLink(Object** p) {}
+
+ private:
+  void MarkObjectByPointer(Object** p) {
+    if (!(*p)->IsHeapObject()) return;
+
+    // Replace flat cons strings in place.
+    HeapObject* object = ShortCircuitConsString(p);
+    MarkBit mark_bit = Marking::MarkBitFrom(object);
+    if (mark_bit.Get()) return;
+
+    Map* map = object->map();
+    // Mark the object.
+    collector_->SetMark(object, mark_bit);
+
+    // Mark the map pointer and body, and push them on the marking stack.
+    MarkBit map_mark = Marking::MarkBitFrom(map);
+    collector_->MarkObject(map, map_mark);
+    MarkCompactMarkingVisitor::IterateBody(map, object);
+
+    // Mark all the objects reachable from the map and body.  May leave
+    // overflowed objects in the heap.
+    collector_->EmptyMarkingDeque();
+  }
+
+  MarkCompactCollector* collector_;
+};
+
+
+// Helper class for pruning the string table.
+template <bool finalize_external_strings>
+class StringTableCleaner : public ObjectVisitor {
+ public:
+  explicit StringTableCleaner(Heap* heap) : heap_(heap), pointers_removed_(0) {}
+
+  virtual void VisitPointers(Object** start, Object** end) {
+    // Visit all HeapObject pointers in [start, end).
+    for (Object** p = start; p < end; p++) {
+      Object* o = *p;
+      if (o->IsHeapObject() &&
+          !Marking::MarkBitFrom(HeapObject::cast(o)).Get()) {
+        if (finalize_external_strings) {
+          DCHECK(o->IsExternalString());
+          heap_->FinalizeExternalString(String::cast(*p));
+        } else {
+          pointers_removed_++;
+        }
+        // Set the entry to the_hole_value (as deleted).
+        *p = heap_->the_hole_value();
+      }
+    }
+  }
+
+  int PointersRemoved() {
+    DCHECK(!finalize_external_strings);
+    return pointers_removed_;
+  }
+
+ private:
+  Heap* heap_;
+  int pointers_removed_;
+};
+
+
+typedef StringTableCleaner<false> InternalizedStringTableCleaner;
+typedef StringTableCleaner<true> ExternalStringTableCleaner;
+
+
+// Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
+// are retained.
+class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
+ public:
+  virtual Object* RetainAs(Object* object) {
+    if (Marking::MarkBitFrom(HeapObject::cast(object)).Get()) {
+      return object;
+    } else if (object->IsAllocationSite() &&
+               !(AllocationSite::cast(object)->IsZombie())) {
+      // "dead" AllocationSites need to live long enough for a traversal of new
+      // space. These sites get a one-time reprieve.
+      AllocationSite* site = AllocationSite::cast(object);
+      site->MarkZombie();
+      site->GetHeap()->mark_compact_collector()->MarkAllocationSite(site);
+      return object;
+    } else {
+      return NULL;
+    }
+  }
+};
+
+
+// Fill the marking stack with overflowed objects returned by the given
+// iterator.  Stop when the marking stack is filled or the end of the space
+// is reached, whichever comes first.
+template <class T>
+static void DiscoverGreyObjectsWithIterator(Heap* heap,
+                                            MarkingDeque* marking_deque,
+                                            T* it) {
+  // The caller should ensure that the marking stack is initially not full,
+  // so that we don't waste effort pointlessly scanning for objects.
+  DCHECK(!marking_deque->IsFull());
+
+  Map* filler_map = heap->one_pointer_filler_map();
+  for (HeapObject* object = it->Next(); object != NULL; object = it->Next()) {
+    MarkBit markbit = Marking::MarkBitFrom(object);
+    if ((object->map() != filler_map) && Marking::IsGrey(markbit)) {
+      Marking::GreyToBlack(markbit);
+      MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
+      marking_deque->PushBlack(object);
+      if (marking_deque->IsFull()) return;
+    }
+  }
+}
+
+
+static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts);
+
+
+static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque,
+                                      MemoryChunk* p) {
+  DCHECK(!marking_deque->IsFull());
+  DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+  DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
+  DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
+  DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+
+  for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
+    Address cell_base = it.CurrentCellBase();
+    MarkBit::CellType* cell = it.CurrentCell();
+
+    const MarkBit::CellType current_cell = *cell;
+    if (current_cell == 0) continue;
+
+    MarkBit::CellType grey_objects;
+    if (it.HasNext()) {
+      const MarkBit::CellType next_cell = *(cell + 1);
+      grey_objects = current_cell & ((current_cell >> 1) |
+                                     (next_cell << (Bitmap::kBitsPerCell - 1)));
+    } else {
+      grey_objects = current_cell & (current_cell >> 1);
+    }
+
+    int offset = 0;
+    while (grey_objects != 0) {
+      int trailing_zeros = base::bits::CountTrailingZeros32(grey_objects);
+      grey_objects >>= trailing_zeros;
+      offset += trailing_zeros;
+      MarkBit markbit(cell, 1 << offset, false);
+      DCHECK(Marking::IsGrey(markbit));
+      Marking::GreyToBlack(markbit);
+      Address addr = cell_base + offset * kPointerSize;
+      HeapObject* object = HeapObject::FromAddress(addr);
+      MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
+      marking_deque->PushBlack(object);
+      if (marking_deque->IsFull()) return;
+      offset += 2;
+      grey_objects >>= 2;
+    }
+
+    grey_objects >>= (Bitmap::kBitsPerCell - 1);
+  }
+}
+
+
+int MarkCompactCollector::DiscoverAndEvacuateBlackObjectsOnPage(
+    NewSpace* new_space, NewSpacePage* p) {
+  DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+  DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
+  DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
+  DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+
+  MarkBit::CellType* cells = p->markbits()->cells();
+  int survivors_size = 0;
+
+  for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
+    Address cell_base = it.CurrentCellBase();
+    MarkBit::CellType* cell = it.CurrentCell();
+
+    MarkBit::CellType current_cell = *cell;
+    if (current_cell == 0) continue;
+
+    int offset = 0;
+    while (current_cell != 0) {
+      int trailing_zeros = base::bits::CountTrailingZeros32(current_cell);
+      current_cell >>= trailing_zeros;
+      offset += trailing_zeros;
+      Address address = cell_base + offset * kPointerSize;
+      HeapObject* object = HeapObject::FromAddress(address);
+
+      int size = object->Size();
+      survivors_size += size;
+
+      Heap::UpdateAllocationSiteFeedback(object, Heap::RECORD_SCRATCHPAD_SLOT);
+
+      offset++;
+      current_cell >>= 1;
+
+      // TODO(hpayer): Refactor EvacuateObject and call this function instead.
+      if (heap()->ShouldBePromoted(object->address(), size) &&
+          TryPromoteObject(object, size)) {
+        continue;
+      }
+
+      AllocationResult allocation = new_space->AllocateRaw(size);
+      if (allocation.IsRetry()) {
+        if (!new_space->AddFreshPage()) {
+          // Shouldn't happen. We are sweeping linearly, and to-space
+          // has the same number of pages as from-space, so there is
+          // always room.
+          UNREACHABLE();
+        }
+        allocation = new_space->AllocateRaw(size);
+        DCHECK(!allocation.IsRetry());
+      }
+      Object* target = allocation.ToObjectChecked();
+
+      MigrateObject(HeapObject::cast(target), object, size, NEW_SPACE);
+      heap()->IncrementSemiSpaceCopiedObjectSize(size);
+    }
+    *cells = 0;
+  }
+  return survivors_size;
+}
+
+
+static void DiscoverGreyObjectsInSpace(Heap* heap, MarkingDeque* marking_deque,
+                                       PagedSpace* space) {
+  PageIterator it(space);
+  while (it.has_next()) {
+    Page* p = it.next();
+    DiscoverGreyObjectsOnPage(marking_deque, p);
+    if (marking_deque->IsFull()) return;
+  }
+}
+
+
+static void DiscoverGreyObjectsInNewSpace(Heap* heap,
+                                          MarkingDeque* marking_deque) {
+  NewSpace* space = heap->new_space();
+  NewSpacePageIterator it(space->bottom(), space->top());
+  while (it.has_next()) {
+    NewSpacePage* page = it.next();
+    DiscoverGreyObjectsOnPage(marking_deque, page);
+    if (marking_deque->IsFull()) return;
+  }
+}
+
+
+bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
+  Object* o = *p;
+  if (!o->IsHeapObject()) return false;
+  HeapObject* heap_object = HeapObject::cast(o);
+  MarkBit mark = Marking::MarkBitFrom(heap_object);
+  return !mark.Get();
+}
+
+
+bool MarkCompactCollector::IsUnmarkedHeapObjectWithHeap(Heap* heap,
+                                                        Object** p) {
+  Object* o = *p;
+  DCHECK(o->IsHeapObject());
+  HeapObject* heap_object = HeapObject::cast(o);
+  MarkBit mark = Marking::MarkBitFrom(heap_object);
+  return !mark.Get();
+}
+
+
+void MarkCompactCollector::MarkStringTable(RootMarkingVisitor* visitor) {
+  StringTable* string_table = heap()->string_table();
+  // Mark the string table itself.
+  MarkBit string_table_mark = Marking::MarkBitFrom(string_table);
+  if (!string_table_mark.Get()) {
+    // String table could have already been marked by visiting the handles list.
+    SetMark(string_table, string_table_mark);
+  }
+  // Explicitly mark the prefix.
+  string_table->IteratePrefix(visitor);
+  ProcessMarkingDeque();
+}
+
+
+void MarkCompactCollector::MarkAllocationSite(AllocationSite* site) {
+  MarkBit mark_bit = Marking::MarkBitFrom(site);
+  SetMark(site, mark_bit);
+}
+
+
+void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
+  // Mark the heap roots including global variables, stack variables,
+  // etc., and all objects reachable from them.
+  heap()->IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
+
+  // Handle the string table specially.
+  MarkStringTable(visitor);
+
+  MarkWeakObjectToCodeTable();
+
+  // There may be overflowed objects in the heap.  Visit them now.
+  while (marking_deque_.overflowed()) {
+    RefillMarkingDeque();
+    EmptyMarkingDeque();
+  }
+}
+
+
+void MarkCompactCollector::MarkImplicitRefGroups() {
+  List<ImplicitRefGroup*>* ref_groups =
+      isolate()->global_handles()->implicit_ref_groups();
+
+  int last = 0;
+  for (int i = 0; i < ref_groups->length(); i++) {
+    ImplicitRefGroup* entry = ref_groups->at(i);
+    DCHECK(entry != NULL);
+
+    if (!IsMarked(*entry->parent)) {
+      (*ref_groups)[last++] = entry;
+      continue;
+    }
+
+    Object*** children = entry->children;
+    // A parent object is marked, so mark all child heap objects.
+    for (size_t j = 0; j < entry->length; ++j) {
+      if ((*children[j])->IsHeapObject()) {
+        HeapObject* child = HeapObject::cast(*children[j]);
+        MarkBit mark = Marking::MarkBitFrom(child);
+        MarkObject(child, mark);
+      }
+    }
+
+    // Once the entire group has been marked, dispose it because it's
+    // not needed anymore.
+    delete entry;
+  }
+  ref_groups->Rewind(last);
+}
+
+
+void MarkCompactCollector::MarkWeakObjectToCodeTable() {
+  HeapObject* weak_object_to_code_table =
+      HeapObject::cast(heap()->weak_object_to_code_table());
+  if (!IsMarked(weak_object_to_code_table)) {
+    MarkBit mark = Marking::MarkBitFrom(weak_object_to_code_table);
+    SetMark(weak_object_to_code_table, mark);
+  }
+}
+
+
+// Mark all objects reachable from the objects on the marking stack.
+// Before: the marking stack contains zero or more heap object pointers.
+// After: the marking stack is empty, and all objects reachable from the
+// marking stack have been marked, or are overflowed in the heap.
+void MarkCompactCollector::EmptyMarkingDeque() {
+  while (!marking_deque_.IsEmpty()) {
+    HeapObject* object = marking_deque_.Pop();
+    DCHECK(object->IsHeapObject());
+    DCHECK(heap()->Contains(object));
+    DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
+
+    Map* map = object->map();
+    MarkBit map_mark = Marking::MarkBitFrom(map);
+    MarkObject(map, map_mark);
+
+    MarkCompactMarkingVisitor::IterateBody(map, object);
+  }
+}
+
+
+// Sweep the heap for overflowed objects, clear their overflow bits, and
+// push them on the marking stack.  Stop early if the marking stack fills
+// before sweeping completes.  If sweeping completes, there are no remaining
+// overflowed objects in the heap so the overflow flag on the markings stack
+// is cleared.
+void MarkCompactCollector::RefillMarkingDeque() {
+  DCHECK(marking_deque_.overflowed());
+
+  DiscoverGreyObjectsInNewSpace(heap(), &marking_deque_);
+  if (marking_deque_.IsFull()) return;
+
+  DiscoverGreyObjectsInSpace(heap(), &marking_deque_,
+                             heap()->old_pointer_space());
+  if (marking_deque_.IsFull()) return;
+
+  DiscoverGreyObjectsInSpace(heap(), &marking_deque_, heap()->old_data_space());
+  if (marking_deque_.IsFull()) return;
+
+  DiscoverGreyObjectsInSpace(heap(), &marking_deque_, heap()->code_space());
+  if (marking_deque_.IsFull()) return;
+
+  DiscoverGreyObjectsInSpace(heap(), &marking_deque_, heap()->map_space());
+  if (marking_deque_.IsFull()) return;
+
+  DiscoverGreyObjectsInSpace(heap(), &marking_deque_, heap()->cell_space());
+  if (marking_deque_.IsFull()) return;
+
+  DiscoverGreyObjectsInSpace(heap(), &marking_deque_,
+                             heap()->property_cell_space());
+  if (marking_deque_.IsFull()) return;
+
+  LargeObjectIterator lo_it(heap()->lo_space());
+  DiscoverGreyObjectsWithIterator(heap(), &marking_deque_, &lo_it);
+  if (marking_deque_.IsFull()) return;
+
+  marking_deque_.ClearOverflowed();
+}
+
+
+// Mark all objects reachable (transitively) from objects on the marking
+// stack.  Before: the marking stack contains zero or more heap object
+// pointers.  After: the marking stack is empty and there are no overflowed
+// objects in the heap.
+void MarkCompactCollector::ProcessMarkingDeque() {
+  EmptyMarkingDeque();
+  while (marking_deque_.overflowed()) {
+    RefillMarkingDeque();
+    EmptyMarkingDeque();
+  }
+}
+
+
+// Mark all objects reachable (transitively) from objects on the marking
+// stack including references only considered in the atomic marking pause.
+void MarkCompactCollector::ProcessEphemeralMarking(ObjectVisitor* visitor) {
+  bool work_to_do = true;
+  DCHECK(marking_deque_.IsEmpty());
+  while (work_to_do) {
+    isolate()->global_handles()->IterateObjectGroups(
+        visitor, &IsUnmarkedHeapObjectWithHeap);
+    MarkImplicitRefGroups();
+    ProcessWeakCollections();
+    work_to_do = !marking_deque_.IsEmpty();
+    ProcessMarkingDeque();
+  }
+}
+
+
+void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
+  for (StackFrameIterator it(isolate(), isolate()->thread_local_top());
+       !it.done(); it.Advance()) {
+    if (it.frame()->type() == StackFrame::JAVA_SCRIPT) {
+      return;
+    }
+    if (it.frame()->type() == StackFrame::OPTIMIZED) {
+      Code* code = it.frame()->LookupCode();
+      if (!code->CanDeoptAt(it.frame()->pc())) {
+        code->CodeIterateBody(visitor);
+      }
+      ProcessMarkingDeque();
+      return;
+    }
+  }
+}
+
+
+void MarkCompactCollector::MarkLiveObjects() {
+  GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_MARK);
+  double start_time = 0.0;
+  if (FLAG_print_cumulative_gc_stat) {
+    start_time = base::OS::TimeCurrentMillis();
+  }
+  // The recursive GC marker detects when it is nearing stack overflow,
+  // and switches to a different marking system.  JS interrupts interfere
+  // with the C stack limit check.
+  PostponeInterruptsScope postpone(isolate());
+
+  bool incremental_marking_overflowed = false;
+  IncrementalMarking* incremental_marking = heap_->incremental_marking();
+  if (was_marked_incrementally_) {
+    // Finalize the incremental marking and check whether we had an overflow.
+    // Both markers use grey color to mark overflowed objects so
+    // non-incremental marker can deal with them as if overflow
+    // occured during normal marking.
+    // But incremental marker uses a separate marking deque
+    // so we have to explicitly copy its overflow state.
+    incremental_marking->Finalize();
+    incremental_marking_overflowed =
+        incremental_marking->marking_deque()->overflowed();
+    incremental_marking->marking_deque()->ClearOverflowed();
+  } else {
+    // Abort any pending incremental activities e.g. incremental sweeping.
+    incremental_marking->Abort();
+  }
+
+#ifdef DEBUG
+  DCHECK(state_ == PREPARE_GC);
+  state_ = MARK_LIVE_OBJECTS;
+#endif
+  // The to space contains live objects, a page in from space is used as a
+  // marking stack.
+  Address marking_deque_start = heap()->new_space()->FromSpacePageLow();
+  Address marking_deque_end = heap()->new_space()->FromSpacePageHigh();
+  if (FLAG_force_marking_deque_overflows) {
+    marking_deque_end = marking_deque_start + 64 * kPointerSize;
+  }
+  marking_deque_.Initialize(marking_deque_start, marking_deque_end);
+  DCHECK(!marking_deque_.overflowed());
+
+  if (incremental_marking_overflowed) {
+    // There are overflowed objects left in the heap after incremental marking.
+    marking_deque_.SetOverflowed();
+  }
+
+  PrepareForCodeFlushing();
+
+  if (was_marked_incrementally_) {
+    // There is no write barrier on cells so we have to scan them now at the end
+    // of the incremental marking.
+    {
+      HeapObjectIterator cell_iterator(heap()->cell_space());
+      HeapObject* cell;
+      while ((cell = cell_iterator.Next()) != NULL) {
+        DCHECK(cell->IsCell());
+        if (IsMarked(cell)) {
+          int offset = Cell::kValueOffset;
+          MarkCompactMarkingVisitor::VisitPointer(
+              heap(), reinterpret_cast<Object**>(cell->address() + offset));
+        }
+      }
+    }
+    {
+      HeapObjectIterator js_global_property_cell_iterator(
+          heap()->property_cell_space());
+      HeapObject* cell;
+      while ((cell = js_global_property_cell_iterator.Next()) != NULL) {
+        DCHECK(cell->IsPropertyCell());
+        if (IsMarked(cell)) {
+          MarkCompactMarkingVisitor::VisitPropertyCell(cell->map(), cell);
+        }
+      }
+    }
+  }
+
+  RootMarkingVisitor root_visitor(heap());
+  MarkRoots(&root_visitor);
+
+  ProcessTopOptimizedFrame(&root_visitor);
+
+  // The objects reachable from the roots are marked, yet unreachable
+  // objects are unmarked.  Mark objects reachable due to host
+  // application specific logic or through Harmony weak maps.
+  ProcessEphemeralMarking(&root_visitor);
+
+  // The objects reachable from the roots, weak maps or object groups
+  // are marked, yet unreachable objects are unmarked.  Mark objects
+  // reachable only from weak global handles.
+  //
+  // First we identify nonlive weak handles and mark them as pending
+  // destruction.
+  heap()->isolate()->global_handles()->IdentifyWeakHandles(
+      &IsUnmarkedHeapObject);
+  // Then we mark the objects and process the transitive closure.
+  heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
+  while (marking_deque_.overflowed()) {
+    RefillMarkingDeque();
+    EmptyMarkingDeque();
+  }
+
+  // Repeat host application specific and Harmony weak maps marking to
+  // mark unmarked objects reachable from the weak roots.
+  ProcessEphemeralMarking(&root_visitor);
+
+  AfterMarking();
+
+  if (FLAG_print_cumulative_gc_stat) {
+    heap_->tracer()->AddMarkingTime(base::OS::TimeCurrentMillis() - start_time);
+  }
+}
+
+
+void MarkCompactCollector::AfterMarking() {
+  // Object literal map caches reference strings (cache keys) and maps
+  // (cache values). At this point still useful maps have already been
+  // marked. Mark the keys for the alive values before we process the
+  // string table.
+  ProcessMapCaches();
+
+  // Prune the string table removing all strings only pointed to by the
+  // string table.  Cannot use string_table() here because the string
+  // table is marked.
+  StringTable* string_table = heap()->string_table();
+  InternalizedStringTableCleaner internalized_visitor(heap());
+  string_table->IterateElements(&internalized_visitor);
+  string_table->ElementsRemoved(internalized_visitor.PointersRemoved());
+
+  ExternalStringTableCleaner external_visitor(heap());
+  heap()->external_string_table_.Iterate(&external_visitor);
+  heap()->external_string_table_.CleanUp();
+
+  // Process the weak references.
+  MarkCompactWeakObjectRetainer mark_compact_object_retainer;
+  heap()->ProcessWeakReferences(&mark_compact_object_retainer);
+
+  // Remove object groups after marking phase.
+  heap()->isolate()->global_handles()->RemoveObjectGroups();
+  heap()->isolate()->global_handles()->RemoveImplicitRefGroups();
+
+  // Flush code from collected candidates.
+  if (is_code_flushing_enabled()) {
+    code_flusher_->ProcessCandidates();
+    // If incremental marker does not support code flushing, we need to
+    // disable it before incremental marking steps for next cycle.
+    if (FLAG_flush_code && !FLAG_flush_code_incrementally) {
+      EnableCodeFlushing(false);
+    }
+  }
+
+  if (FLAG_track_gc_object_stats) {
+    heap()->CheckpointObjectStats();
+  }
+}
+
+
+void MarkCompactCollector::ProcessMapCaches() {
+  Object* raw_context = heap()->native_contexts_list();
+  while (raw_context != heap()->undefined_value()) {
+    Context* context = reinterpret_cast<Context*>(raw_context);
+    if (IsMarked(context)) {
+      HeapObject* raw_map_cache =
+          HeapObject::cast(context->get(Context::MAP_CACHE_INDEX));
+      // A map cache may be reachable from the stack. In this case
+      // it's already transitively marked and it's too late to clean
+      // up its parts.
+      if (!IsMarked(raw_map_cache) &&
+          raw_map_cache != heap()->undefined_value()) {
+        MapCache* map_cache = reinterpret_cast<MapCache*>(raw_map_cache);
+        int existing_elements = map_cache->NumberOfElements();
+        int used_elements = 0;
+        for (int i = MapCache::kElementsStartIndex; i < map_cache->length();
+             i += MapCache::kEntrySize) {
+          Object* raw_key = map_cache->get(i);
+          if (raw_key == heap()->undefined_value() ||
+              raw_key == heap()->the_hole_value())
+            continue;
+          STATIC_ASSERT(MapCache::kEntrySize == 2);
+          Object* raw_map = map_cache->get(i + 1);
+          if (raw_map->IsHeapObject() && IsMarked(raw_map)) {
+            ++used_elements;
+          } else {
+            // Delete useless entries with unmarked maps.
+            DCHECK(raw_map->IsMap());
+            map_cache->set_the_hole(i);
+            map_cache->set_the_hole(i + 1);
+          }
+        }
+        if (used_elements == 0) {
+          context->set(Context::MAP_CACHE_INDEX, heap()->undefined_value());
+        } else {
+          // Note: we don't actually shrink the cache here to avoid
+          // extra complexity during GC. We rely on subsequent cache
+          // usages (EnsureCapacity) to do this.
+          map_cache->ElementsRemoved(existing_elements - used_elements);
+          MarkBit map_cache_markbit = Marking::MarkBitFrom(map_cache);
+          MarkObject(map_cache, map_cache_markbit);
+        }
+      }
+    }
+    // Move to next element in the list.
+    raw_context = context->get(Context::NEXT_CONTEXT_LINK);
+  }
+  ProcessMarkingDeque();
+}
+
+
+void MarkCompactCollector::ClearNonLiveReferences() {
+  // Iterate over the map space, setting map transitions that go from
+  // a marked map to an unmarked map to null transitions.  This action
+  // is carried out only on maps of JSObjects and related subtypes.
+  HeapObjectIterator map_iterator(heap()->map_space());
+  for (HeapObject* obj = map_iterator.Next(); obj != NULL;
+       obj = map_iterator.Next()) {
+    Map* map = Map::cast(obj);
+
+    if (!map->CanTransition()) continue;
+
+    MarkBit map_mark = Marking::MarkBitFrom(map);
+    ClearNonLivePrototypeTransitions(map);
+    ClearNonLiveMapTransitions(map, map_mark);
+
+    if (map_mark.Get()) {
+      ClearNonLiveDependentCode(map->dependent_code());
+    } else {
+      ClearDependentCode(map->dependent_code());
+      map->set_dependent_code(DependentCode::cast(heap()->empty_fixed_array()));
+    }
+  }
+
+  // Iterate over property cell space, removing dependent code that is not
+  // otherwise kept alive by strong references.
+  HeapObjectIterator cell_iterator(heap_->property_cell_space());
+  for (HeapObject* cell = cell_iterator.Next(); cell != NULL;
+       cell = cell_iterator.Next()) {
+    if (IsMarked(cell)) {
+      ClearNonLiveDependentCode(PropertyCell::cast(cell)->dependent_code());
+    }
+  }
+
+  // Iterate over allocation sites, removing dependent code that is not
+  // otherwise kept alive by strong references.
+  Object* undefined = heap()->undefined_value();
+  for (Object* site = heap()->allocation_sites_list(); site != undefined;
+       site = AllocationSite::cast(site)->weak_next()) {
+    if (IsMarked(site)) {
+      ClearNonLiveDependentCode(AllocationSite::cast(site)->dependent_code());
+    }
+  }
+
+  if (heap_->weak_object_to_code_table()->IsHashTable()) {
+    WeakHashTable* table =
+        WeakHashTable::cast(heap_->weak_object_to_code_table());
+    uint32_t capacity = table->Capacity();
+    for (uint32_t i = 0; i < capacity; i++) {
+      uint32_t key_index = table->EntryToIndex(i);
+      Object* key = table->get(key_index);
+      if (!table->IsKey(key)) continue;
+      uint32_t value_index = table->EntryToValueIndex(i);
+      Object* value = table->get(value_index);
+      if (key->IsCell() && !IsMarked(key)) {
+        Cell* cell = Cell::cast(key);
+        Object* object = cell->value();
+        if (IsMarked(object)) {
+          MarkBit mark = Marking::MarkBitFrom(cell);
+          SetMark(cell, mark);
+          Object** value_slot = HeapObject::RawField(cell, Cell::kValueOffset);
+          RecordSlot(value_slot, value_slot, *value_slot);
+        }
+      }
+      if (IsMarked(key)) {
+        if (!IsMarked(value)) {
+          HeapObject* obj = HeapObject::cast(value);
+          MarkBit mark = Marking::MarkBitFrom(obj);
+          SetMark(obj, mark);
+        }
+        ClearNonLiveDependentCode(DependentCode::cast(value));
+      } else {
+        ClearDependentCode(DependentCode::cast(value));
+        table->set(key_index, heap_->the_hole_value());
+        table->set(value_index, heap_->the_hole_value());
+        table->ElementRemoved();
+      }
+    }
+  }
+}
+
+
+void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) {
+  int number_of_transitions = map->NumberOfProtoTransitions();
+  FixedArray* prototype_transitions = map->GetPrototypeTransitions();
+
+  int new_number_of_transitions = 0;
+  const int header = Map::kProtoTransitionHeaderSize;
+  const int proto_offset = header + Map::kProtoTransitionPrototypeOffset;
+  const int map_offset = header + Map::kProtoTransitionMapOffset;
+  const int step = Map::kProtoTransitionElementsPerEntry;
+  for (int i = 0; i < number_of_transitions; i++) {
+    Object* prototype = prototype_transitions->get(proto_offset + i * step);
+    Object* cached_map = prototype_transitions->get(map_offset + i * step);
+    if (IsMarked(prototype) && IsMarked(cached_map)) {
+      DCHECK(!prototype->IsUndefined());
+      int proto_index = proto_offset + new_number_of_transitions * step;
+      int map_index = map_offset + new_number_of_transitions * step;
+      if (new_number_of_transitions != i) {
+        prototype_transitions->set(proto_index, prototype,
+                                   UPDATE_WRITE_BARRIER);
+        prototype_transitions->set(map_index, cached_map, SKIP_WRITE_BARRIER);
+      }
+      Object** slot = prototype_transitions->RawFieldOfElementAt(proto_index);
+      RecordSlot(slot, slot, prototype);
+      new_number_of_transitions++;
+    }
+  }
+
+  if (new_number_of_transitions != number_of_transitions) {
+    map->SetNumberOfProtoTransitions(new_number_of_transitions);
+  }
+
+  // Fill slots that became free with undefined value.
+  for (int i = new_number_of_transitions * step;
+       i < number_of_transitions * step; i++) {
+    prototype_transitions->set_undefined(header + i);
+  }
+}
+
+
+void MarkCompactCollector::ClearNonLiveMapTransitions(Map* map,
+                                                      MarkBit map_mark) {
+  Object* potential_parent = map->GetBackPointer();
+  if (!potential_parent->IsMap()) return;
+  Map* parent = Map::cast(potential_parent);
+
+  // Follow back pointer, check whether we are dealing with a map transition
+  // from a live map to a dead path and in case clear transitions of parent.
+  bool current_is_alive = map_mark.Get();
+  bool parent_is_alive = Marking::MarkBitFrom(parent).Get();
+  if (!current_is_alive && parent_is_alive) {
+    ClearMapTransitions(parent);
+  }
+}
+
+
+// Clear a possible back pointer in case the transition leads to a dead map.
+// Return true in case a back pointer has been cleared and false otherwise.
+bool MarkCompactCollector::ClearMapBackPointer(Map* target) {
+  if (Marking::MarkBitFrom(target).Get()) return false;
+  target->SetBackPointer(heap_->undefined_value(), SKIP_WRITE_BARRIER);
+  return true;
+}
+
+
+void MarkCompactCollector::ClearMapTransitions(Map* map) {
+  // If there are no transitions to be cleared, return.
+  // TODO(verwaest) Should be an assert, otherwise back pointers are not
+  // properly cleared.
+  if (!map->HasTransitionArray()) return;
+
+  TransitionArray* t = map->transitions();
+
+  int transition_index = 0;
+
+  DescriptorArray* descriptors = map->instance_descriptors();
+  bool descriptors_owner_died = false;
+
+  // Compact all live descriptors to the left.
+  for (int i = 0; i < t->number_of_transitions(); ++i) {
+    Map* target = t->GetTarget(i);
+    if (ClearMapBackPointer(target)) {
+      if (target->instance_descriptors() == descriptors) {
+        descriptors_owner_died = true;
+      }
+    } else {
+      if (i != transition_index) {
+        Name* key = t->GetKey(i);
+        t->SetKey(transition_index, key);
+        Object** key_slot = t->GetKeySlot(transition_index);
+        RecordSlot(key_slot, key_slot, key);
+        // Target slots do not need to be recorded since maps are not compacted.
+        t->SetTarget(transition_index, t->GetTarget(i));
+      }
+      transition_index++;
+    }
+  }
+
+  // If there are no transitions to be cleared, return.
+  // TODO(verwaest) Should be an assert, otherwise back pointers are not
+  // properly cleared.
+  if (transition_index == t->number_of_transitions()) return;
+
+  int number_of_own_descriptors = map->NumberOfOwnDescriptors();
+
+  if (descriptors_owner_died) {
+    if (number_of_own_descriptors > 0) {
+      TrimDescriptorArray(map, descriptors, number_of_own_descriptors);
+      DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
+      map->set_owns_descriptors(true);
+    } else {
+      DCHECK(descriptors == heap_->empty_descriptor_array());
+    }
+  }
+
+  // Note that we never eliminate a transition array, though we might right-trim
+  // such that number_of_transitions() == 0. If this assumption changes,
+  // TransitionArray::CopyInsert() will need to deal with the case that a
+  // transition array disappeared during GC.
+  int trim = t->number_of_transitions() - transition_index;
+  if (trim > 0) {
+    heap_->RightTrimFixedArray<Heap::FROM_GC>(
+        t, t->IsSimpleTransition() ? trim
+                                   : trim * TransitionArray::kTransitionSize);
+  }
+  DCHECK(map->HasTransitionArray());
+}
+
+
+void MarkCompactCollector::TrimDescriptorArray(Map* map,
+                                               DescriptorArray* descriptors,
+                                               int number_of_own_descriptors) {
+  int number_of_descriptors = descriptors->number_of_descriptors_storage();
+  int to_trim = number_of_descriptors - number_of_own_descriptors;
+  if (to_trim == 0) return;
+
+  heap_->RightTrimFixedArray<Heap::FROM_GC>(
+      descriptors, to_trim * DescriptorArray::kDescriptorSize);
+  descriptors->SetNumberOfDescriptors(number_of_own_descriptors);
+
+  if (descriptors->HasEnumCache()) TrimEnumCache(map, descriptors);
+  descriptors->Sort();
+}
+
+
+void MarkCompactCollector::TrimEnumCache(Map* map,
+                                         DescriptorArray* descriptors) {
+  int live_enum = map->EnumLength();
+  if (live_enum == kInvalidEnumCacheSentinel) {
+    live_enum = map->NumberOfDescribedProperties(OWN_DESCRIPTORS, DONT_ENUM);
+  }
+  if (live_enum == 0) return descriptors->ClearEnumCache();
+
+  FixedArray* enum_cache = descriptors->GetEnumCache();
+
+  int to_trim = enum_cache->length() - live_enum;
+  if (to_trim <= 0) return;
+  heap_->RightTrimFixedArray<Heap::FROM_GC>(descriptors->GetEnumCache(),
+                                            to_trim);
+
+  if (!descriptors->HasEnumIndicesCache()) return;
+  FixedArray* enum_indices_cache = descriptors->GetEnumIndicesCache();
+  heap_->RightTrimFixedArray<Heap::FROM_GC>(enum_indices_cache, to_trim);
+}
+
+
+void MarkCompactCollector::ClearDependentICList(Object* head) {
+  Object* current = head;
+  Object* undefined = heap()->undefined_value();
+  while (current != undefined) {
+    Code* code = Code::cast(current);
+    if (IsMarked(code)) {
+      DCHECK(code->is_weak_stub());
+      IC::InvalidateMaps(code);
+    }
+    current = code->next_code_link();
+    code->set_next_code_link(undefined);
+  }
+}
+
+
+void MarkCompactCollector::ClearDependentCode(DependentCode* entries) {
+  DisallowHeapAllocation no_allocation;
+  DependentCode::GroupStartIndexes starts(entries);
+  int number_of_entries = starts.number_of_entries();
+  if (number_of_entries == 0) return;
+  int g = DependentCode::kWeakICGroup;
+  if (starts.at(g) != starts.at(g + 1)) {
+    int i = starts.at(g);
+    DCHECK(i + 1 == starts.at(g + 1));
+    Object* head = entries->object_at(i);
+    ClearDependentICList(head);
+  }
+  g = DependentCode::kWeakCodeGroup;
+  for (int i = starts.at(g); i < starts.at(g + 1); i++) {
+    // If the entry is compilation info then the map must be alive,
+    // and ClearDependentCode shouldn't be called.
+    DCHECK(entries->is_code_at(i));
+    Code* code = entries->code_at(i);
+    if (IsMarked(code) && !code->marked_for_deoptimization()) {
+      DependentCode::SetMarkedForDeoptimization(
+          code, static_cast<DependentCode::DependencyGroup>(g));
+      code->InvalidateEmbeddedObjects();
+      have_code_to_deoptimize_ = true;
+    }
+  }
+  for (int i = 0; i < number_of_entries; i++) {
+    entries->clear_at(i);
+  }
+}
+
+
+int MarkCompactCollector::ClearNonLiveDependentCodeInGroup(
+    DependentCode* entries, int group, int start, int end, int new_start) {
+  int survived = 0;
+  if (group == DependentCode::kWeakICGroup) {
+    // Dependent weak IC stubs form a linked list and only the head is stored
+    // in the dependent code array.
+    if (start != end) {
+      DCHECK(start + 1 == end);
+      Object* old_head = entries->object_at(start);
+      MarkCompactWeakObjectRetainer retainer;
+      Object* head = VisitWeakList<Code>(heap(), old_head, &retainer);
+      entries->set_object_at(new_start, head);
+      Object** slot = entries->slot_at(new_start);
+      RecordSlot(slot, slot, head);
+      // We do not compact this group even if the head is undefined,
+      // more dependent ICs are likely to be added later.
+      survived = 1;
+    }
+  } else {
+    for (int i = start; i < end; i++) {
+      Object* obj = entries->object_at(i);
+      DCHECK(obj->IsCode() || IsMarked(obj));
+      if (IsMarked(obj) &&
+          (!obj->IsCode() || !WillBeDeoptimized(Code::cast(obj)))) {
+        if (new_start + survived != i) {
+          entries->set_object_at(new_start + survived, obj);
+        }
+        Object** slot = entries->slot_at(new_start + survived);
+        RecordSlot(slot, slot, obj);
+        survived++;
+      }
+    }
+  }
+  entries->set_number_of_entries(
+      static_cast<DependentCode::DependencyGroup>(group), survived);
+  return survived;
+}
+
+
+void MarkCompactCollector::ClearNonLiveDependentCode(DependentCode* entries) {
+  DisallowHeapAllocation no_allocation;
+  DependentCode::GroupStartIndexes starts(entries);
+  int number_of_entries = starts.number_of_entries();
+  if (number_of_entries == 0) return;
+  int new_number_of_entries = 0;
+  // Go through all groups, remove dead codes and compact.
+  for (int g = 0; g < DependentCode::kGroupCount; g++) {
+    int survived = ClearNonLiveDependentCodeInGroup(
+        entries, g, starts.at(g), starts.at(g + 1), new_number_of_entries);
+    new_number_of_entries += survived;
+  }
+  for (int i = new_number_of_entries; i < number_of_entries; i++) {
+    entries->clear_at(i);
+  }
+}
+
+
+void MarkCompactCollector::ProcessWeakCollections() {
+  GCTracer::Scope gc_scope(heap()->tracer(),
+                           GCTracer::Scope::MC_WEAKCOLLECTION_PROCESS);
+  Object* weak_collection_obj = heap()->encountered_weak_collections();
+  while (weak_collection_obj != Smi::FromInt(0)) {
+    JSWeakCollection* weak_collection =
+        reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
+    DCHECK(MarkCompactCollector::IsMarked(weak_collection));
+    if (weak_collection->table()->IsHashTable()) {
+      ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
+      Object** anchor = reinterpret_cast<Object**>(table->address());
+      for (int i = 0; i < table->Capacity(); i++) {
+        if (MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
+          Object** key_slot =
+              table->RawFieldOfElementAt(ObjectHashTable::EntryToIndex(i));
+          RecordSlot(anchor, key_slot, *key_slot);
+          Object** value_slot =
+              table->RawFieldOfElementAt(ObjectHashTable::EntryToValueIndex(i));
+          MarkCompactMarkingVisitor::MarkObjectByPointer(this, anchor,
+                                                         value_slot);
+        }
+      }
+    }
+    weak_collection_obj = weak_collection->next();
+  }
+}
+
+
+void MarkCompactCollector::ClearWeakCollections() {
+  GCTracer::Scope gc_scope(heap()->tracer(),
+                           GCTracer::Scope::MC_WEAKCOLLECTION_CLEAR);
+  Object* weak_collection_obj = heap()->encountered_weak_collections();
+  while (weak_collection_obj != Smi::FromInt(0)) {
+    JSWeakCollection* weak_collection =
+        reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
+    DCHECK(MarkCompactCollector::IsMarked(weak_collection));
+    if (weak_collection->table()->IsHashTable()) {
+      ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
+      for (int i = 0; i < table->Capacity(); i++) {
+        HeapObject* key = HeapObject::cast(table->KeyAt(i));
+        if (!MarkCompactCollector::IsMarked(key)) {
+          table->RemoveEntry(i);
+        }
+      }
+    }
+    weak_collection_obj = weak_collection->next();
+    weak_collection->set_next(heap()->undefined_value());
+  }
+  heap()->set_encountered_weak_collections(Smi::FromInt(0));
+}
+
+
+void MarkCompactCollector::AbortWeakCollections() {
+  GCTracer::Scope gc_scope(heap()->tracer(),
+                           GCTracer::Scope::MC_WEAKCOLLECTION_ABORT);
+  Object* weak_collection_obj = heap()->encountered_weak_collections();
+  while (weak_collection_obj != Smi::FromInt(0)) {
+    JSWeakCollection* weak_collection =
+        reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
+    weak_collection_obj = weak_collection->next();
+    weak_collection->set_next(heap()->undefined_value());
+  }
+  heap()->set_encountered_weak_collections(Smi::FromInt(0));
+}
+
+
+void MarkCompactCollector::RecordMigratedSlot(Object* value, Address slot) {
+  if (heap_->InNewSpace(value)) {
+    heap_->store_buffer()->Mark(slot);
+  } else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) {
+    SlotsBuffer::AddTo(&slots_buffer_allocator_, &migration_slots_buffer_,
+                       reinterpret_cast<Object**>(slot),
+                       SlotsBuffer::IGNORE_OVERFLOW);
+  }
+}
+
+
+// We scavange new space simultaneously with sweeping. This is done in two
+// passes.
+//
+// The first pass migrates all alive objects from one semispace to another or
+// promotes them to old space.  Forwarding address is written directly into
+// first word of object without any encoding.  If object is dead we write
+// NULL as a forwarding address.
+//
+// The second pass updates pointers to new space in all spaces.  It is possible
+// to encounter pointers to dead new space objects during traversal of pointers
+// to new space.  We should clear them to avoid encountering them during next
+// pointer iteration.  This is an issue if the store buffer overflows and we
+// have to scan the entire old space, including dead objects, looking for
+// pointers to new space.
+void MarkCompactCollector::MigrateObject(HeapObject* dst, HeapObject* src,
+                                         int size, AllocationSpace dest) {
+  Address dst_addr = dst->address();
+  Address src_addr = src->address();
+  DCHECK(heap()->AllowedToBeMigrated(src, dest));
+  DCHECK(dest != LO_SPACE && size <= Page::kMaxRegularHeapObjectSize);
+  if (dest == OLD_POINTER_SPACE) {
+    Address src_slot = src_addr;
+    Address dst_slot = dst_addr;
+    DCHECK(IsAligned(size, kPointerSize));
+
+    for (int remaining = size / kPointerSize; remaining > 0; remaining--) {
+      Object* value = Memory::Object_at(src_slot);
+
+      Memory::Object_at(dst_slot) = value;
+
+      if (!src->MayContainRawValues()) {
+        RecordMigratedSlot(value, dst_slot);
+      }
+
+      src_slot += kPointerSize;
+      dst_slot += kPointerSize;
+    }
+
+    if (compacting_ && dst->IsJSFunction()) {
+      Address code_entry_slot = dst_addr + JSFunction::kCodeEntryOffset;
+      Address code_entry = Memory::Address_at(code_entry_slot);
+
+      if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
+        SlotsBuffer::AddTo(&slots_buffer_allocator_, &migration_slots_buffer_,
+                           SlotsBuffer::CODE_ENTRY_SLOT, code_entry_slot,
+                           SlotsBuffer::IGNORE_OVERFLOW);
+      }
+    } else if (dst->IsConstantPoolArray()) {
+      // We special case ConstantPoolArrays since they could contain integers
+      // value entries which look like tagged pointers.
+      // TODO(mstarzinger): restructure this code to avoid this special-casing.
+      ConstantPoolArray* array = ConstantPoolArray::cast(dst);
+      ConstantPoolArray::Iterator code_iter(array, ConstantPoolArray::CODE_PTR);
+      while (!code_iter.is_finished()) {
+        Address code_entry_slot =
+            dst_addr + array->OffsetOfElementAt(code_iter.next_index());
+        Address code_entry = Memory::Address_at(code_entry_slot);
+
+        if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
+          SlotsBuffer::AddTo(&slots_buffer_allocator_, &migration_slots_buffer_,
+                             SlotsBuffer::CODE_ENTRY_SLOT, code_entry_slot,
+                             SlotsBuffer::IGNORE_OVERFLOW);
+        }
+      }
+      ConstantPoolArray::Iterator heap_iter(array, ConstantPoolArray::HEAP_PTR);
+      while (!heap_iter.is_finished()) {
+        Address heap_slot =
+            dst_addr + array->OffsetOfElementAt(heap_iter.next_index());
+        Object* value = Memory::Object_at(heap_slot);
+        RecordMigratedSlot(value, heap_slot);
+      }
+    }
+  } else if (dest == CODE_SPACE) {
+    PROFILE(isolate(), CodeMoveEvent(src_addr, dst_addr));
+    heap()->MoveBlock(dst_addr, src_addr, size);
+    SlotsBuffer::AddTo(&slots_buffer_allocator_, &migration_slots_buffer_,
+                       SlotsBuffer::RELOCATED_CODE_OBJECT, dst_addr,
+                       SlotsBuffer::IGNORE_OVERFLOW);
+    Code::cast(dst)->Relocate(dst_addr - src_addr);
+  } else {
+    DCHECK(dest == OLD_DATA_SPACE || dest == NEW_SPACE);
+    heap()->MoveBlock(dst_addr, src_addr, size);
+  }
+  heap()->OnMoveEvent(dst, src, size);
+  Memory::Address_at(src_addr) = dst_addr;
+}
+
+
+// Visitor for updating pointers from live objects in old spaces to new space.
+// It does not expect to encounter pointers to dead objects.
+class PointersUpdatingVisitor : public ObjectVisitor {
+ public:
+  explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) {}
+
+  void VisitPointer(Object** p) { UpdatePointer(p); }
+
+  void VisitPointers(Object** start, Object** end) {
+    for (Object** p = start; p < end; p++) UpdatePointer(p);
+  }
+
+  void VisitEmbeddedPointer(RelocInfo* rinfo) {
+    DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
+    Object* target = rinfo->target_object();
+    Object* old_target = target;
+    VisitPointer(&target);
+    // Avoid unnecessary changes that might unnecessary flush the instruction
+    // cache.
+    if (target != old_target) {
+      rinfo->set_target_object(target);
+    }
+  }
+
+  void VisitCodeTarget(RelocInfo* rinfo) {
+    DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
+    Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+    Object* old_target = target;
+    VisitPointer(&target);
+    if (target != old_target) {
+      rinfo->set_target_address(Code::cast(target)->instruction_start());
+    }
+  }
+
+  void VisitCodeAgeSequence(RelocInfo* rinfo) {
+    DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
+    Object* stub = rinfo->code_age_stub();
+    DCHECK(stub != NULL);
+    VisitPointer(&stub);
+    if (stub != rinfo->code_age_stub()) {
+      rinfo->set_code_age_stub(Code::cast(stub));
+    }
+  }
+
+  void VisitDebugTarget(RelocInfo* rinfo) {
+    DCHECK((RelocInfo::IsJSReturn(rinfo->rmode()) &&
+            rinfo->IsPatchedReturnSequence()) ||
+           (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
+            rinfo->IsPatchedDebugBreakSlotSequence()));
+    Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
+    VisitPointer(&target);
+    rinfo->set_call_address(Code::cast(target)->instruction_start());
+  }
+
+  static inline void UpdateSlot(Heap* heap, Object** slot) {
+    Object* obj = *slot;
+
+    if (!obj->IsHeapObject()) return;
+
+    HeapObject* heap_obj = HeapObject::cast(obj);
+
+    MapWord map_word = heap_obj->map_word();
+    if (map_word.IsForwardingAddress()) {
+      DCHECK(heap->InFromSpace(heap_obj) ||
+             MarkCompactCollector::IsOnEvacuationCandidate(heap_obj));
+      HeapObject* target = map_word.ToForwardingAddress();
+      *slot = target;
+      DCHECK(!heap->InFromSpace(target) &&
+             !MarkCompactCollector::IsOnEvacuationCandidate(target));
+    }
+  }
+
+ private:
+  inline void UpdatePointer(Object** p) { UpdateSlot(heap_, p); }
+
+  Heap* heap_;
+};
+
+
+static void UpdatePointer(HeapObject** address, HeapObject* object) {
+  Address new_addr = Memory::Address_at(object->address());
+
+  // The new space sweep will overwrite the map word of dead objects
+  // with NULL. In this case we do not need to transfer this entry to
+  // the store buffer which we are rebuilding.
+  // We perform the pointer update with a no barrier compare-and-swap. The
+  // compare and swap may fail in the case where the pointer update tries to
+  // update garbage memory which was concurrently accessed by the sweeper.
+  if (new_addr != NULL) {
+    base::NoBarrier_CompareAndSwap(
+        reinterpret_cast<base::AtomicWord*>(address),
+        reinterpret_cast<base::AtomicWord>(object),
+        reinterpret_cast<base::AtomicWord>(HeapObject::FromAddress(new_addr)));
+  }
+}
+
+
+static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
+                                                         Object** p) {
+  MapWord map_word = HeapObject::cast(*p)->map_word();
+
+  if (map_word.IsForwardingAddress()) {
+    return String::cast(map_word.ToForwardingAddress());
+  }
+
+  return String::cast(*p);
+}
+
+
+bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
+                                            int object_size) {
+  DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+
+  OldSpace* target_space = heap()->TargetSpace(object);
+
+  DCHECK(target_space == heap()->old_pointer_space() ||
+         target_space == heap()->old_data_space());
+  HeapObject* target;
+  AllocationResult allocation = target_space->AllocateRaw(object_size);
+  if (allocation.To(&target)) {
+    MigrateObject(target, object, object_size, target_space->identity());
+    heap()->IncrementPromotedObjectsSize(object_size);
+    return true;
+  }
+
+  return false;
+}
+
+
+void MarkCompactCollector::EvacuateNewSpace() {
+  // There are soft limits in the allocation code, designed trigger a mark
+  // sweep collection by failing allocations.  But since we are already in
+  // a mark-sweep allocation, there is no sense in trying to trigger one.
+  AlwaysAllocateScope scope(isolate());
+
+  NewSpace* new_space = heap()->new_space();
+
+  // Store allocation range before flipping semispaces.
+  Address from_bottom = new_space->bottom();
+  Address from_top = new_space->top();
+
+  // Flip the semispaces.  After flipping, to space is empty, from space has
+  // live objects.
+  new_space->Flip();
+  new_space->ResetAllocationInfo();
+
+  int survivors_size = 0;
+
+  // First pass: traverse all objects in inactive semispace, remove marks,
+  // migrate live objects and write forwarding addresses.  This stage puts
+  // new entries in the store buffer and may cause some pages to be marked
+  // scan-on-scavenge.
+  NewSpacePageIterator it(from_bottom, from_top);
+  while (it.has_next()) {
+    NewSpacePage* p = it.next();
+    survivors_size += DiscoverAndEvacuateBlackObjectsOnPage(new_space, p);
+  }
+
+  heap_->IncrementYoungSurvivorsCounter(survivors_size);
+  new_space->set_age_mark(new_space->top());
+}
+
+
+void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
+  AlwaysAllocateScope always_allocate(isolate());
+  PagedSpace* space = static_cast<PagedSpace*>(p->owner());
+  DCHECK(p->IsEvacuationCandidate() && !p->WasSwept());
+  p->SetWasSwept();
+
+  int offsets[16];
+
+  for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
+    Address cell_base = it.CurrentCellBase();
+    MarkBit::CellType* cell = it.CurrentCell();
+
+    if (*cell == 0) continue;
+
+    int live_objects = MarkWordToObjectStarts(*cell, offsets);
+    for (int i = 0; i < live_objects; i++) {
+      Address object_addr = cell_base + offsets[i] * kPointerSize;
+      HeapObject* object = HeapObject::FromAddress(object_addr);
+      DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
+
+      int size = object->Size();
+
+      HeapObject* target_object;
+      AllocationResult allocation = space->AllocateRaw(size);
+      if (!allocation.To(&target_object)) {
+        // If allocation failed, use emergency memory and re-try allocation.
+        CHECK(space->HasEmergencyMemory());
+        space->UseEmergencyMemory();
+        allocation = space->AllocateRaw(size);
+      }
+      if (!allocation.To(&target_object)) {
+        // OS refused to give us memory.
+        V8::FatalProcessOutOfMemory("Evacuation");
+        return;
+      }
+
+      MigrateObject(target_object, object, size, space->identity());
+      DCHECK(object->map_word().IsForwardingAddress());
+    }
+
+    // Clear marking bits for current cell.
+    *cell = 0;
+  }
+  p->ResetLiveBytes();
+}
+
+
+void MarkCompactCollector::EvacuatePages() {
+  int npages = evacuation_candidates_.length();
+  for (int i = 0; i < npages; i++) {
+    Page* p = evacuation_candidates_[i];
+    DCHECK(p->IsEvacuationCandidate() ||
+           p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
+    DCHECK(static_cast<int>(p->parallel_sweeping()) ==
+           MemoryChunk::SWEEPING_DONE);
+    PagedSpace* space = static_cast<PagedSpace*>(p->owner());
+    // Allocate emergency memory for the case when compaction fails due to out
+    // of memory.
+    if (!space->HasEmergencyMemory()) {
+      space->CreateEmergencyMemory();
+    }
+    if (p->IsEvacuationCandidate()) {
+      // During compaction we might have to request a new page. Check that we
+      // have an emergency page and the space still has room for that.
+      if (space->HasEmergencyMemory() && space->CanExpand()) {
+        EvacuateLiveObjectsFromPage(p);
+      } else {
+        // Without room for expansion evacuation is not guaranteed to succeed.
+        // Pessimistically abandon unevacuated pages.
+        for (int j = i; j < npages; j++) {
+          Page* page = evacuation_candidates_[j];
+          slots_buffer_allocator_.DeallocateChain(page->slots_buffer_address());
+          page->ClearEvacuationCandidate();
+          page->SetFlag(Page::RESCAN_ON_EVACUATION);
+        }
+        break;
+      }
+    }
+  }
+  if (npages > 0) {
+    // Release emergency memory.
+    PagedSpaces spaces(heap());
+    for (PagedSpace* space = spaces.next(); space != NULL;
+         space = spaces.next()) {
+      if (space->HasEmergencyMemory()) {
+        space->FreeEmergencyMemory();
+      }
+    }
+  }
+}
+
+
+class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
+ public:
+  virtual Object* RetainAs(Object* object) {
+    if (object->IsHeapObject()) {
+      HeapObject* heap_object = HeapObject::cast(object);
+      MapWord map_word = heap_object->map_word();
+      if (map_word.IsForwardingAddress()) {
+        return map_word.ToForwardingAddress();
+      }
+    }
+    return object;
+  }
+};
+
+
+static inline void UpdateSlot(Isolate* isolate, ObjectVisitor* v,
+                              SlotsBuffer::SlotType slot_type, Address addr) {
+  switch (slot_type) {
+    case SlotsBuffer::CODE_TARGET_SLOT: {
+      RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, NULL);
+      rinfo.Visit(isolate, v);
+      break;
+    }
+    case SlotsBuffer::CODE_ENTRY_SLOT: {
+      v->VisitCodeEntry(addr);
+      break;
+    }
+    case SlotsBuffer::RELOCATED_CODE_OBJECT: {
+      HeapObject* obj = HeapObject::FromAddress(addr);
+      Code::cast(obj)->CodeIterateBody(v);
+      break;
+    }
+    case SlotsBuffer::DEBUG_TARGET_SLOT: {
+      RelocInfo rinfo(addr, RelocInfo::DEBUG_BREAK_SLOT, 0, NULL);
+      if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(isolate, v);
+      break;
+    }
+    case SlotsBuffer::JS_RETURN_SLOT: {
+      RelocInfo rinfo(addr, RelocInfo::JS_RETURN, 0, NULL);
+      if (rinfo.IsPatchedReturnSequence()) rinfo.Visit(isolate, v);
+      break;
+    }
+    case SlotsBuffer::EMBEDDED_OBJECT_SLOT: {
+      RelocInfo rinfo(addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL);
+      rinfo.Visit(isolate, v);
+      break;
+    }
+    default:
+      UNREACHABLE();
+      break;
+  }
+}
+
+
+enum SweepingMode { SWEEP_ONLY, SWEEP_AND_VISIT_LIVE_OBJECTS };
+
+
+enum SkipListRebuildingMode { REBUILD_SKIP_LIST, IGNORE_SKIP_LIST };
+
+
+enum FreeSpaceTreatmentMode { IGNORE_FREE_SPACE, ZAP_FREE_SPACE };
+
+
+template <MarkCompactCollector::SweepingParallelism mode>
+static intptr_t Free(PagedSpace* space, FreeList* free_list, Address start,
+                     int size) {
+  if (mode == MarkCompactCollector::SWEEP_ON_MAIN_THREAD) {
+    DCHECK(free_list == NULL);
+    return space->Free(start, size);
+  } else {
+    // TODO(hpayer): account for wasted bytes in concurrent sweeping too.
+    return size - free_list->Free(start, size);
+  }
+}
+
+
+// Sweeps a page. After sweeping the page can be iterated.
+// Slots in live objects pointing into evacuation candidates are updated
+// if requested.
+// Returns the size of the biggest continuous freed memory chunk in bytes.
+template <SweepingMode sweeping_mode,
+          MarkCompactCollector::SweepingParallelism parallelism,
+          SkipListRebuildingMode skip_list_mode,
+          FreeSpaceTreatmentMode free_space_mode>
+static int Sweep(PagedSpace* space, FreeList* free_list, Page* p,
+                 ObjectVisitor* v) {
+  DCHECK(!p->IsEvacuationCandidate() && !p->WasSwept());
+  DCHECK_EQ(skip_list_mode == REBUILD_SKIP_LIST,
+            space->identity() == CODE_SPACE);
+  DCHECK((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST));
+  DCHECK(parallelism == MarkCompactCollector::SWEEP_ON_MAIN_THREAD ||
+         sweeping_mode == SWEEP_ONLY);
+
+  Address free_start = p->area_start();
+  DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
+  int offsets[16];
+
+  SkipList* skip_list = p->skip_list();
+  int curr_region = -1;
+  if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) {
+    skip_list->Clear();
+  }
+
+  intptr_t freed_bytes = 0;
+  intptr_t max_freed_bytes = 0;
+
+  for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
+    Address cell_base = it.CurrentCellBase();
+    MarkBit::CellType* cell = it.CurrentCell();
+    int live_objects = MarkWordToObjectStarts(*cell, offsets);
+    int live_index = 0;
+    for (; live_objects != 0; live_objects--) {
+      Address free_end = cell_base + offsets[live_index++] * kPointerSize;
+      if (free_end != free_start) {
+        int size = static_cast<int>(free_end - free_start);
+        if (free_space_mode == ZAP_FREE_SPACE) {
+          memset(free_start, 0xcc, size);
+        }
+        freed_bytes = Free<parallelism>(space, free_list, free_start, size);
+        max_freed_bytes = Max(freed_bytes, max_freed_bytes);
+#ifdef ENABLE_GDB_JIT_INTERFACE
+        if (FLAG_gdbjit && space->identity() == CODE_SPACE) {
+          GDBJITInterface::RemoveCodeRange(free_start, free_end);
+        }
+#endif
+      }
+      HeapObject* live_object = HeapObject::FromAddress(free_end);
+      DCHECK(Marking::IsBlack(Marking::MarkBitFrom(live_object)));
+      Map* map = live_object->map();
+      int size = live_object->SizeFromMap(map);
+      if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) {
+        live_object->IterateBody(map->instance_type(), size, v);
+      }
+      if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) {
+        int new_region_start = SkipList::RegionNumber(free_end);
+        int new_region_end =
+            SkipList::RegionNumber(free_end + size - kPointerSize);
+        if (new_region_start != curr_region || new_region_end != curr_region) {
+          skip_list->AddObject(free_end, size);
+          curr_region = new_region_end;
+        }
+      }
+      free_start = free_end + size;
+    }
+    // Clear marking bits for current cell.
+    *cell = 0;
+  }
+  if (free_start != p->area_end()) {
+    int size = static_cast<int>(p->area_end() - free_start);
+    if (free_space_mode == ZAP_FREE_SPACE) {
+      memset(free_start, 0xcc, size);
+    }
+    freed_bytes = Free<parallelism>(space, free_list, free_start, size);
+    max_freed_bytes = Max(freed_bytes, max_freed_bytes);
+#ifdef ENABLE_GDB_JIT_INTERFACE
+    if (FLAG_gdbjit && space->identity() == CODE_SPACE) {
+      GDBJITInterface::RemoveCodeRange(free_start, p->area_end());
+    }
+#endif
+  }
+  p->ResetLiveBytes();
+
+  if (parallelism == MarkCompactCollector::SWEEP_IN_PARALLEL) {
+    // When concurrent sweeping is active, the page will be marked after
+    // sweeping by the main thread.
+    p->set_parallel_sweeping(MemoryChunk::SWEEPING_FINALIZE);
+  } else {
+    p->SetWasSwept();
+  }
+  return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes));
+}
+
+
+static bool SetMarkBitsUnderInvalidatedCode(Code* code, bool value) {
+  Page* p = Page::FromAddress(code->address());
+
+  if (p->IsEvacuationCandidate() || p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
+    return false;
+  }
+
+  Address code_start = code->address();
+  Address code_end = code_start + code->Size();
+
+  uint32_t start_index = MemoryChunk::FastAddressToMarkbitIndex(code_start);
+  uint32_t end_index =
+      MemoryChunk::FastAddressToMarkbitIndex(code_end - kPointerSize);
+
+  Bitmap* b = p->markbits();
+
+  MarkBit start_mark_bit = b->MarkBitFromIndex(start_index);
+  MarkBit end_mark_bit = b->MarkBitFromIndex(end_index);
+
+  MarkBit::CellType* start_cell = start_mark_bit.cell();
+  MarkBit::CellType* end_cell = end_mark_bit.cell();
+
+  if (value) {
+    MarkBit::CellType start_mask = ~(start_mark_bit.mask() - 1);
+    MarkBit::CellType end_mask = (end_mark_bit.mask() << 1) - 1;
+
+    if (start_cell == end_cell) {
+      *start_cell |= start_mask & end_mask;
+    } else {
+      *start_cell |= start_mask;
+      for (MarkBit::CellType* cell = start_cell + 1; cell < end_cell; cell++) {
+        *cell = ~0;
+      }
+      *end_cell |= end_mask;
+    }
+  } else {
+    for (MarkBit::CellType* cell = start_cell; cell <= end_cell; cell++) {
+      *cell = 0;
+    }
+  }
+
+  return true;
+}
+
+
+static bool IsOnInvalidatedCodeObject(Address addr) {
+  // We did not record any slots in large objects thus
+  // we can safely go to the page from the slot address.
+  Page* p = Page::FromAddress(addr);
+
+  // First check owner's identity because old pointer and old data spaces
+  // are swept lazily and might still have non-zero mark-bits on some
+  // pages.
+  if (p->owner()->identity() != CODE_SPACE) return false;
+
+  // In code space only bits on evacuation candidates (but we don't record
+  // any slots on them) and under invalidated code objects are non-zero.
+  MarkBit mark_bit =
+      p->markbits()->MarkBitFromIndex(Page::FastAddressToMarkbitIndex(addr));
+
+  return mark_bit.Get();
+}
+
+
+void MarkCompactCollector::InvalidateCode(Code* code) {
+  if (heap_->incremental_marking()->IsCompacting() &&
+      !ShouldSkipEvacuationSlotRecording(code)) {
+    DCHECK(compacting_);
+
+    // If the object is white than no slots were recorded on it yet.
+    MarkBit mark_bit = Marking::MarkBitFrom(code);
+    if (Marking::IsWhite(mark_bit)) return;
+
+    invalidated_code_.Add(code);
+  }
+}
+
+
+// Return true if the given code is deoptimized or will be deoptimized.
+bool MarkCompactCollector::WillBeDeoptimized(Code* code) {
+  return code->is_optimized_code() && code->marked_for_deoptimization();
+}
+
+
+bool MarkCompactCollector::MarkInvalidatedCode() {
+  bool code_marked = false;
+
+  int length = invalidated_code_.length();
+  for (int i = 0; i < length; i++) {
+    Code* code = invalidated_code_[i];
+
+    if (SetMarkBitsUnderInvalidatedCode(code, true)) {
+      code_marked = true;
+    }
+  }
+
+  return code_marked;
+}
+
+
+void MarkCompactCollector::RemoveDeadInvalidatedCode() {
+  int length = invalidated_code_.length();
+  for (int i = 0; i < length; i++) {
+    if (!IsMarked(invalidated_code_[i])) invalidated_code_[i] = NULL;
+  }
+}
+
+
+void MarkCompactCollector::ProcessInvalidatedCode(ObjectVisitor* visitor) {
+  int length = invalidated_code_.length();
+  for (int i = 0; i < length; i++) {
+    Code* code = invalidated_code_[i];
+    if (code != NULL) {
+      code->Iterate(visitor);
+      SetMarkBitsUnderInvalidatedCode(code, false);
+    }
+  }
+  invalidated_code_.Rewind(0);
+}
+
+
+void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
+  Heap::RelocationLock relocation_lock(heap());
+
+  bool code_slots_filtering_required;
+  {
+    GCTracer::Scope gc_scope(heap()->tracer(),
+                             GCTracer::Scope::MC_SWEEP_NEWSPACE);
+    code_slots_filtering_required = MarkInvalidatedCode();
+    EvacuateNewSpace();
+  }
+
+  {
+    GCTracer::Scope gc_scope(heap()->tracer(),
+                             GCTracer::Scope::MC_EVACUATE_PAGES);
+    EvacuatePages();
+  }
+
+  // Second pass: find pointers to new space and update them.
+  PointersUpdatingVisitor updating_visitor(heap());
+
+  {
+    GCTracer::Scope gc_scope(heap()->tracer(),
+                             GCTracer::Scope::MC_UPDATE_NEW_TO_NEW_POINTERS);
+    // Update pointers in to space.
+    SemiSpaceIterator to_it(heap()->new_space()->bottom(),
+                            heap()->new_space()->top());
+    for (HeapObject* object = to_it.Next(); object != NULL;
+         object = to_it.Next()) {
+      Map* map = object->map();
+      object->IterateBody(map->instance_type(), object->SizeFromMap(map),
+                          &updating_visitor);
+    }
+  }
+
+  {
+    GCTracer::Scope gc_scope(heap()->tracer(),
+                             GCTracer::Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS);
+    // Update roots.
+    heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
+  }
+
+  {
+    GCTracer::Scope gc_scope(heap()->tracer(),
+                             GCTracer::Scope::MC_UPDATE_OLD_TO_NEW_POINTERS);
+    StoreBufferRebuildScope scope(heap_, heap_->store_buffer(),
+                                  &Heap::ScavengeStoreBufferCallback);
+    heap_->store_buffer()->IteratePointersToNewSpaceAndClearMaps(
+        &UpdatePointer);
+  }
+
+  {
+    GCTracer::Scope gc_scope(heap()->tracer(),
+                             GCTracer::Scope::MC_UPDATE_POINTERS_TO_EVACUATED);
+    SlotsBuffer::UpdateSlotsRecordedIn(heap_, migration_slots_buffer_,
+                                       code_slots_filtering_required);
+    if (FLAG_trace_fragmentation) {
+      PrintF("  migration slots buffer: %d\n",
+             SlotsBuffer::SizeOfChain(migration_slots_buffer_));
+    }
+
+    if (compacting_ && was_marked_incrementally_) {
+      // It's difficult to filter out slots recorded for large objects.
+      LargeObjectIterator it(heap_->lo_space());
+      for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
+        // LargeObjectSpace is not swept yet thus we have to skip
+        // dead objects explicitly.
+        if (!IsMarked(obj)) continue;
+
+        Page* p = Page::FromAddress(obj->address());
+        if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
+          obj->Iterate(&updating_visitor);
+          p->ClearFlag(Page::RESCAN_ON_EVACUATION);
+        }
+      }
+    }
+  }
+
+  int npages = evacuation_candidates_.length();
+  {
+    GCTracer::Scope gc_scope(
+        heap()->tracer(),
+        GCTracer::Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED);
+    for (int i = 0; i < npages; i++) {
+      Page* p = evacuation_candidates_[i];
+      DCHECK(p->IsEvacuationCandidate() ||
+             p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
+
+      if (p->IsEvacuationCandidate()) {
+        SlotsBuffer::UpdateSlotsRecordedIn(heap_, p->slots_buffer(),
+                                           code_slots_filtering_required);
+        if (FLAG_trace_fragmentation) {
+          PrintF("  page %p slots buffer: %d\n", reinterpret_cast<void*>(p),
+                 SlotsBuffer::SizeOfChain(p->slots_buffer()));
+        }
+
+        // Important: skip list should be cleared only after roots were updated
+        // because root iteration traverses the stack and might have to find
+        // code objects from non-updated pc pointing into evacuation candidate.
+        SkipList* list = p->skip_list();
+        if (list != NULL) list->Clear();
+      } else {
+        if (FLAG_gc_verbose) {
+          PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n",
+                 reinterpret_cast<intptr_t>(p));
+        }
+        PagedSpace* space = static_cast<PagedSpace*>(p->owner());
+        p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
+
+        switch (space->identity()) {
+          case OLD_DATA_SPACE:
+            Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
+                  IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>(space, NULL, p,
+                                                       &updating_visitor);
+            break;
+          case OLD_POINTER_SPACE:
+            Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
+                  IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>(space, NULL, p,
+                                                       &updating_visitor);
+            break;
+          case CODE_SPACE:
+            if (FLAG_zap_code_space) {
+              Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
+                    REBUILD_SKIP_LIST, ZAP_FREE_SPACE>(space, NULL, p,
+                                                       &updating_visitor);
+            } else {
+              Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
+                    REBUILD_SKIP_LIST, IGNORE_FREE_SPACE>(space, NULL, p,
+                                                          &updating_visitor);
+            }
+            break;
+          default:
+            UNREACHABLE();
+            break;
+        }
+      }
+    }
+  }
+
+  GCTracer::Scope gc_scope(heap()->tracer(),
+                           GCTracer::Scope::MC_UPDATE_MISC_POINTERS);
+
+  // Update pointers from cells.
+  HeapObjectIterator cell_iterator(heap_->cell_space());
+  for (HeapObject* cell = cell_iterator.Next(); cell != NULL;
+       cell = cell_iterator.Next()) {
+    if (cell->IsCell()) {
+      Cell::BodyDescriptor::IterateBody(cell, &updating_visitor);
+    }
+  }
+
+  HeapObjectIterator js_global_property_cell_iterator(
+      heap_->property_cell_space());
+  for (HeapObject* cell = js_global_property_cell_iterator.Next(); cell != NULL;
+       cell = js_global_property_cell_iterator.Next()) {
+    if (cell->IsPropertyCell()) {
+      PropertyCell::BodyDescriptor::IterateBody(cell, &updating_visitor);
+    }
+  }
+
+  heap_->string_table()->Iterate(&updating_visitor);
+  updating_visitor.VisitPointer(heap_->weak_object_to_code_table_address());
+  if (heap_->weak_object_to_code_table()->IsHashTable()) {
+    WeakHashTable* table =
+        WeakHashTable::cast(heap_->weak_object_to_code_table());
+    table->Iterate(&updating_visitor);
+    table->Rehash(heap_->isolate()->factory()->undefined_value());
+  }
+
+  // Update pointers from external string table.
+  heap_->UpdateReferencesInExternalStringTable(
+      &UpdateReferenceInExternalStringTableEntry);
+
+  EvacuationWeakObjectRetainer evacuation_object_retainer;
+  heap()->ProcessWeakReferences(&evacuation_object_retainer);
+
+  // Visit invalidated code (we ignored all slots on it) and clear mark-bits
+  // under it.
+  ProcessInvalidatedCode(&updating_visitor);
+
+  heap_->isolate()->inner_pointer_to_code_cache()->Flush();
+
+  slots_buffer_allocator_.DeallocateChain(&migration_slots_buffer_);
+  DCHECK(migration_slots_buffer_ == NULL);
+}
+
+
+void MarkCompactCollector::MoveEvacuationCandidatesToEndOfPagesList() {
+  int npages = evacuation_candidates_.length();
+  for (int i = 0; i < npages; i++) {
+    Page* p = evacuation_candidates_[i];
+    if (!p->IsEvacuationCandidate()) continue;
+    p->Unlink();
+    PagedSpace* space = static_cast<PagedSpace*>(p->owner());
+    p->InsertAfter(space->LastPage());
+  }
+}
+
+
+void MarkCompactCollector::ReleaseEvacuationCandidates() {
+  int npages = evacuation_candidates_.length();
+  for (int i = 0; i < npages; i++) {
+    Page* p = evacuation_candidates_[i];
+    if (!p->IsEvacuationCandidate()) continue;
+    PagedSpace* space = static_cast<PagedSpace*>(p->owner());
+    space->Free(p->area_start(), p->area_size());
+    p->set_scan_on_scavenge(false);
+    slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
+    p->ResetLiveBytes();
+    space->ReleasePage(p);
+  }
+  evacuation_candidates_.Rewind(0);
+  compacting_ = false;
+  heap()->FreeQueuedChunks();
+}
+
+
+static const int kStartTableEntriesPerLine = 5;
+static const int kStartTableLines = 171;
+static const int kStartTableInvalidLine = 127;
+static const int kStartTableUnusedEntry = 126;
+
+#define _ kStartTableUnusedEntry
+#define X kStartTableInvalidLine
+// Mark-bit to object start offset table.
+//
+// The line is indexed by the mark bits in a byte.  The first number on
+// the line describes the number of live object starts for the line and the
+// other numbers on the line describe the offsets (in words) of the object
+// starts.
+//
+// Since objects are at least 2 words large we don't have entries for two
+// consecutive 1 bits.  All entries after 170 have at least 2 consecutive bits.
+char kStartTable[kStartTableLines * kStartTableEntriesPerLine] = {
+    0, _, _,
+    _, _,  // 0
+    1, 0, _,
+    _, _,  // 1
+    1, 1, _,
+    _, _,  // 2
+    X, _, _,
+    _, _,  // 3
+    1, 2, _,
+    _, _,  // 4
+    2, 0, 2,
+    _, _,  // 5
+    X, _, _,
+    _, _,  // 6
+    X, _, _,
+    _, _,  // 7
+    1, 3, _,
+    _, _,  // 8
+    2, 0, 3,
+    _, _,  // 9
+    2, 1, 3,
+    _, _,  // 10
+    X, _, _,
+    _, _,  // 11
+    X, _, _,
+    _, _,  // 12
+    X, _, _,
+    _, _,  // 13
+    X, _, _,
+    _, _,  // 14
+    X, _, _,
+    _, _,  // 15
+    1, 4, _,
+    _, _,  // 16
+    2, 0, 4,
+    _, _,  // 17
+    2, 1, 4,
+    _, _,  // 18
+    X, _, _,
+    _, _,  // 19
+    2, 2, 4,
+    _, _,  // 20
+    3, 0, 2,
+    4, _,  // 21
+    X, _, _,
+    _, _,  // 22
+    X, _, _,
+    _, _,  // 23
+    X, _, _,
+    _, _,  // 24
+    X, _, _,
+    _, _,  // 25
+    X, _, _,
+    _, _,  // 26
+    X, _, _,
+    _, _,  // 27
+    X, _, _,
+    _, _,  // 28
+    X, _, _,
+    _, _,  // 29
+    X, _, _,
+    _, _,  // 30
+    X, _, _,
+    _, _,  // 31
+    1, 5, _,
+    _, _,  // 32
+    2, 0, 5,
+    _, _,  // 33
+    2, 1, 5,
+    _, _,  // 34
+    X, _, _,
+    _, _,  // 35
+    2, 2, 5,
+    _, _,  // 36
+    3, 0, 2,
+    5, _,  // 37
+    X, _, _,
+    _, _,  // 38
+    X, _, _,
+    _, _,  // 39
+    2, 3, 5,
+    _, _,  // 40
+    3, 0, 3,
+    5, _,  // 41
+    3, 1, 3,
+    5, _,  // 42
+    X, _, _,
+    _, _,  // 43
+    X, _, _,
+    _, _,  // 44
+    X, _, _,
+    _, _,  // 45
+    X, _, _,
+    _, _,  // 46
+    X, _, _,
+    _, _,  // 47
+    X, _, _,
+    _, _,  // 48
+    X, _, _,
+    _, _,  // 49
+    X, _, _,
+    _, _,  // 50
+    X, _, _,
+    _, _,  // 51
+    X, _, _,
+    _, _,  // 52
+    X, _, _,
+    _, _,  // 53
+    X, _, _,
+    _, _,  // 54
+    X, _, _,
+    _, _,  // 55
+    X, _, _,
+    _, _,  // 56
+    X, _, _,
+    _, _,  // 57
+    X, _, _,
+    _, _,  // 58
+    X, _, _,
+    _, _,  // 59
+    X, _, _,
+    _, _,  // 60
+    X, _, _,
+    _, _,  // 61
+    X, _, _,
+    _, _,  // 62
+    X, _, _,
+    _, _,  // 63
+    1, 6, _,
+    _, _,  // 64
+    2, 0, 6,
+    _, _,  // 65
+    2, 1, 6,
+    _, _,  // 66
+    X, _, _,
+    _, _,  // 67
+    2, 2, 6,
+    _, _,  // 68
+    3, 0, 2,
+    6, _,  // 69
+    X, _, _,
+    _, _,  // 70
+    X, _, _,
+    _, _,  // 71
+    2, 3, 6,
+    _, _,  // 72
+    3, 0, 3,
+    6, _,  // 73
+    3, 1, 3,
+    6, _,  // 74
+    X, _, _,
+    _, _,  // 75
+    X, _, _,
+    _, _,  // 76
+    X, _, _,
+    _, _,  // 77
+    X, _, _,
+    _, _,  // 78
+    X, _, _,
+    _, _,  // 79
+    2, 4, 6,
+    _, _,  // 80
+    3, 0, 4,
+    6, _,  // 81
+    3, 1, 4,
+    6, _,  // 82
+    X, _, _,
+    _, _,  // 83
+    3, 2, 4,
+    6, _,  // 84
+    4, 0, 2,
+    4, 6,  // 85
+    X, _, _,
+    _, _,  // 86
+    X, _, _,
+    _, _,  // 87
+    X, _, _,
+    _, _,  // 88
+    X, _, _,
+    _, _,  // 89
+    X, _, _,
+    _, _,  // 90
+    X, _, _,
+    _, _,  // 91
+    X, _, _,
+    _, _,  // 92
+    X, _, _,
+    _, _,  // 93
+    X, _, _,
+    _, _,  // 94
+    X, _, _,
+    _, _,  // 95
+    X, _, _,
+    _, _,  // 96
+    X, _, _,
+    _, _,  // 97
+    X, _, _,
+    _, _,  // 98
+    X, _, _,
+    _, _,  // 99
+    X, _, _,
+    _, _,  // 100
+    X, _, _,
+    _, _,  // 101
+    X, _, _,
+    _, _,  // 102
+    X, _, _,
+    _, _,  // 103
+    X, _, _,
+    _, _,  // 104
+    X, _, _,
+    _, _,  // 105
+    X, _, _,
+    _, _,  // 106
+    X, _, _,
+    _, _,  // 107
+    X, _, _,
+    _, _,  // 108
+    X, _, _,
+    _, _,  // 109
+    X, _, _,
+    _, _,  // 110
+    X, _, _,
+    _, _,  // 111
+    X, _, _,
+    _, _,  // 112
+    X, _, _,
+    _, _,  // 113
+    X, _, _,
+    _, _,  // 114
+    X, _, _,
+    _, _,  // 115
+    X, _, _,
+    _, _,  // 116
+    X, _, _,
+    _, _,  // 117
+    X, _, _,
+    _, _,  // 118
+    X, _, _,
+    _, _,  // 119
+    X, _, _,
+    _, _,  // 120
+    X, _, _,
+    _, _,  // 121
+    X, _, _,
+    _, _,  // 122
+    X, _, _,
+    _, _,  // 123
+    X, _, _,
+    _, _,  // 124
+    X, _, _,
+    _, _,  // 125
+    X, _, _,
+    _, _,  // 126
+    X, _, _,
+    _, _,  // 127
+    1, 7, _,
+    _, _,  // 128
+    2, 0, 7,
+    _, _,  // 129
+    2, 1, 7,
+    _, _,  // 130
+    X, _, _,
+    _, _,  // 131
+    2, 2, 7,
+    _, _,  // 132
+    3, 0, 2,
+    7, _,  // 133
+    X, _, _,
+    _, _,  // 134
+    X, _, _,
+    _, _,  // 135
+    2, 3, 7,
+    _, _,  // 136
+    3, 0, 3,
+    7, _,  // 137
+    3, 1, 3,
+    7, _,  // 138
+    X, _, _,
+    _, _,  // 139
+    X, _, _,
+    _, _,  // 140
+    X, _, _,
+    _, _,  // 141
+    X, _, _,
+    _, _,  // 142
+    X, _, _,
+    _, _,  // 143
+    2, 4, 7,
+    _, _,  // 144
+    3, 0, 4,
+    7, _,  // 145
+    3, 1, 4,
+    7, _,  // 146
+    X, _, _,
+    _, _,  // 147
+    3, 2, 4,
+    7, _,  // 148
+    4, 0, 2,
+    4, 7,  // 149
+    X, _, _,
+    _, _,  // 150
+    X, _, _,
+    _, _,  // 151
+    X, _, _,
+    _, _,  // 152
+    X, _, _,
+    _, _,  // 153
+    X, _, _,
+    _, _,  // 154
+    X, _, _,
+    _, _,  // 155
+    X, _, _,
+    _, _,  // 156
+    X, _, _,
+    _, _,  // 157
+    X, _, _,
+    _, _,  // 158
+    X, _, _,
+    _, _,  // 159
+    2, 5, 7,
+    _, _,  // 160
+    3, 0, 5,
+    7, _,  // 161
+    3, 1, 5,
+    7, _,  // 162
+    X, _, _,
+    _, _,  // 163
+    3, 2, 5,
+    7, _,  // 164
+    4, 0, 2,
+    5, 7,  // 165
+    X, _, _,
+    _, _,  // 166
+    X, _, _,
+    _, _,  // 167
+    3, 3, 5,
+    7, _,  // 168
+    4, 0, 3,
+    5, 7,  // 169
+    4, 1, 3,
+    5, 7  // 170
+};
+#undef _
+#undef X
+
+
+// Takes a word of mark bits.  Returns the number of objects that start in the
+// range.  Puts the offsets of the words in the supplied array.
+static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts) {
+  int objects = 0;
+  int offset = 0;
+
+  // No consecutive 1 bits.
+  DCHECK((mark_bits & 0x180) != 0x180);
+  DCHECK((mark_bits & 0x18000) != 0x18000);
+  DCHECK((mark_bits & 0x1800000) != 0x1800000);
+
+  while (mark_bits != 0) {
+    int byte = (mark_bits & 0xff);
+    mark_bits >>= 8;
+    if (byte != 0) {
+      DCHECK(byte < kStartTableLines);  // No consecutive 1 bits.
+      char* table = kStartTable + byte * kStartTableEntriesPerLine;
+      int objects_in_these_8_words = table[0];
+      DCHECK(objects_in_these_8_words != kStartTableInvalidLine);
+      DCHECK(objects_in_these_8_words < kStartTableEntriesPerLine);
+      for (int i = 0; i < objects_in_these_8_words; i++) {
+        starts[objects++] = offset + table[1 + i];
+      }
+    }
+    offset += 8;
+  }
+  return objects;
+}
+
+
+int MarkCompactCollector::SweepInParallel(PagedSpace* space,
+                                          int required_freed_bytes) {
+  int max_freed = 0;
+  int max_freed_overall = 0;
+  PageIterator it(space);
+  while (it.has_next()) {
+    Page* p = it.next();
+    max_freed = SweepInParallel(p, space);
+    DCHECK(max_freed >= 0);
+    if (required_freed_bytes > 0 && max_freed >= required_freed_bytes) {
+      return max_freed;
+    }
+    max_freed_overall = Max(max_freed, max_freed_overall);
+    if (p == space->end_of_unswept_pages()) break;
+  }
+  return max_freed_overall;
+}
+
+
+int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) {
+  int max_freed = 0;
+  if (page->TryParallelSweeping()) {
+    FreeList* free_list = space == heap()->old_pointer_space()
+                              ? free_list_old_pointer_space_.get()
+                              : free_list_old_data_space_.get();
+    FreeList private_free_list(space);
+    max_freed = Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
+                      IGNORE_FREE_SPACE>(space, &private_free_list, page, NULL);
+    free_list->Concatenate(&private_free_list);
+  }
+  return max_freed;
+}
+
+
+void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
+  space->ClearStats();
+
+  // We defensively initialize end_of_unswept_pages_ here with the first page
+  // of the pages list.
+  space->set_end_of_unswept_pages(space->FirstPage());
+
+  PageIterator it(space);
+
+  int pages_swept = 0;
+  bool unused_page_present = false;
+  bool parallel_sweeping_active = false;
+
+  while (it.has_next()) {
+    Page* p = it.next();
+    DCHECK(p->parallel_sweeping() == MemoryChunk::SWEEPING_DONE);
+
+    // Clear sweeping flags indicating that marking bits are still intact.
+    p->ClearWasSwept();
+
+    if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION) ||
+        p->IsEvacuationCandidate()) {
+      // Will be processed in EvacuateNewSpaceAndCandidates.
+      DCHECK(evacuation_candidates_.length() > 0);
+      continue;
+    }
+
+    // One unused page is kept, all further are released before sweeping them.
+    if (p->LiveBytes() == 0) {
+      if (unused_page_present) {
+        if (FLAG_gc_verbose) {
+          PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n",
+                 reinterpret_cast<intptr_t>(p));
+        }
+        // Adjust unswept free bytes because releasing a page expects said
+        // counter to be accurate for unswept pages.
+        space->IncreaseUnsweptFreeBytes(p);
+        space->ReleasePage(p);
+        continue;
+      }
+      unused_page_present = true;
+    }
+
+    switch (sweeper) {
+      case CONCURRENT_SWEEPING:
+      case PARALLEL_SWEEPING:
+        if (!parallel_sweeping_active) {
+          if (FLAG_gc_verbose) {
+            PrintF("Sweeping 0x%" V8PRIxPTR ".\n",
+                   reinterpret_cast<intptr_t>(p));
+          }
+          Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
+                IGNORE_FREE_SPACE>(space, NULL, p, NULL);
+          pages_swept++;
+          parallel_sweeping_active = true;
+        } else {
+          if (FLAG_gc_verbose) {
+            PrintF("Sweeping 0x%" V8PRIxPTR " in parallel.\n",
+                   reinterpret_cast<intptr_t>(p));
+          }
+          p->set_parallel_sweeping(MemoryChunk::SWEEPING_PENDING);
+          space->IncreaseUnsweptFreeBytes(p);
+        }
+        space->set_end_of_unswept_pages(p);
+        break;
+      case SEQUENTIAL_SWEEPING: {
+        if (FLAG_gc_verbose) {
+          PrintF("Sweeping 0x%" V8PRIxPTR ".\n", reinterpret_cast<intptr_t>(p));
+        }
+        if (space->identity() == CODE_SPACE && FLAG_zap_code_space) {
+          Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
+                ZAP_FREE_SPACE>(space, NULL, p, NULL);
+        } else if (space->identity() == CODE_SPACE) {
+          Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
+                IGNORE_FREE_SPACE>(space, NULL, p, NULL);
+        } else {
+          Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
+                IGNORE_FREE_SPACE>(space, NULL, p, NULL);
+        }
+        pages_swept++;
+        break;
+      }
+      default: { UNREACHABLE(); }
+    }
+  }
+
+  if (FLAG_gc_verbose) {
+    PrintF("SweepSpace: %s (%d pages swept)\n",
+           AllocationSpaceName(space->identity()), pages_swept);
+  }
+
+  // Give pages that are queued to be freed back to the OS.
+  heap()->FreeQueuedChunks();
+}
+
+
+static bool ShouldStartSweeperThreads(MarkCompactCollector::SweeperType type) {
+  return type == MarkCompactCollector::PARALLEL_SWEEPING ||
+         type == MarkCompactCollector::CONCURRENT_SWEEPING;
+}
+
+
+static bool ShouldWaitForSweeperThreads(
+    MarkCompactCollector::SweeperType type) {
+  return type == MarkCompactCollector::PARALLEL_SWEEPING;
+}
+
+
+void MarkCompactCollector::SweepSpaces() {
+  GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_SWEEP);
+  double start_time = 0.0;
+  if (FLAG_print_cumulative_gc_stat) {
+    start_time = base::OS::TimeCurrentMillis();
+  }
+
+#ifdef DEBUG
+  state_ = SWEEP_SPACES;
+#endif
+  SweeperType how_to_sweep = CONCURRENT_SWEEPING;
+  if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_SWEEPING;
+  if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_SWEEPING;
+
+  MoveEvacuationCandidatesToEndOfPagesList();
+
+  // Noncompacting collections simply sweep the spaces to clear the mark
+  // bits and free the nonlive blocks (for old and map spaces).  We sweep
+  // the map space last because freeing non-live maps overwrites them and
+  // the other spaces rely on possibly non-live maps to get the sizes for
+  // non-live objects.
+  {
+    GCTracer::Scope sweep_scope(heap()->tracer(),
+                                GCTracer::Scope::MC_SWEEP_OLDSPACE);
+    {
+      SequentialSweepingScope scope(this);
+      SweepSpace(heap()->old_pointer_space(), how_to_sweep);
+      SweepSpace(heap()->old_data_space(), how_to_sweep);
+    }
+
+    if (ShouldStartSweeperThreads(how_to_sweep)) {
+      StartSweeperThreads();
+    }
+
+    if (ShouldWaitForSweeperThreads(how_to_sweep)) {
+      EnsureSweepingCompleted();
+    }
+  }
+  RemoveDeadInvalidatedCode();
+
+  {
+    GCTracer::Scope sweep_scope(heap()->tracer(),
+                                GCTracer::Scope::MC_SWEEP_CODE);
+    SweepSpace(heap()->code_space(), SEQUENTIAL_SWEEPING);
+  }
+
+  {
+    GCTracer::Scope sweep_scope(heap()->tracer(),
+                                GCTracer::Scope::MC_SWEEP_CELL);
+    SweepSpace(heap()->cell_space(), SEQUENTIAL_SWEEPING);
+    SweepSpace(heap()->property_cell_space(), SEQUENTIAL_SWEEPING);
+  }
+
+  EvacuateNewSpaceAndCandidates();
+
+  // ClearNonLiveTransitions depends on precise sweeping of map space to
+  // detect whether unmarked map became dead in this collection or in one
+  // of the previous ones.
+  {
+    GCTracer::Scope sweep_scope(heap()->tracer(),
+                                GCTracer::Scope::MC_SWEEP_MAP);
+    SweepSpace(heap()->map_space(), SEQUENTIAL_SWEEPING);
+  }
+
+  // Deallocate unmarked objects and clear marked bits for marked objects.
+  heap_->lo_space()->FreeUnmarkedObjects();
+
+  // Deallocate evacuated candidate pages.
+  ReleaseEvacuationCandidates();
+
+  if (FLAG_print_cumulative_gc_stat) {
+    heap_->tracer()->AddSweepingTime(base::OS::TimeCurrentMillis() -
+                                     start_time);
+  }
+}
+
+
+void MarkCompactCollector::ParallelSweepSpaceComplete(PagedSpace* space) {
+  PageIterator it(space);
+  while (it.has_next()) {
+    Page* p = it.next();
+    if (p->parallel_sweeping() == MemoryChunk::SWEEPING_FINALIZE) {
+      p->set_parallel_sweeping(MemoryChunk::SWEEPING_DONE);
+      p->SetWasSwept();
+    }
+    DCHECK(p->parallel_sweeping() == MemoryChunk::SWEEPING_DONE);
+  }
+}
+
+
+void MarkCompactCollector::ParallelSweepSpacesComplete() {
+  ParallelSweepSpaceComplete(heap()->old_pointer_space());
+  ParallelSweepSpaceComplete(heap()->old_data_space());
+}
+
+
+void MarkCompactCollector::EnableCodeFlushing(bool enable) {
+  if (isolate()->debug()->is_loaded() ||
+      isolate()->debug()->has_break_points()) {
+    enable = false;
+  }
+
+  if (enable) {
+    if (code_flusher_ != NULL) return;
+    code_flusher_ = new CodeFlusher(isolate());
+  } else {
+    if (code_flusher_ == NULL) return;
+    code_flusher_->EvictAllCandidates();
+    delete code_flusher_;
+    code_flusher_ = NULL;
+  }
+
+  if (FLAG_trace_code_flushing) {
+    PrintF("[code-flushing is now %s]\n", enable ? "on" : "off");
+  }
+}
+
+
+// TODO(1466) ReportDeleteIfNeeded is not called currently.
+// Our profiling tools do not expect intersections between
+// code objects. We should either reenable it or change our tools.
+void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj,
+                                                Isolate* isolate) {
+  if (obj->IsCode()) {
+    PROFILE(isolate, CodeDeleteEvent(obj->address()));
+  }
+}
+
+
+Isolate* MarkCompactCollector::isolate() const { return heap_->isolate(); }
+
+
+void MarkCompactCollector::Initialize() {
+  MarkCompactMarkingVisitor::Initialize();
+  IncrementalMarking::Initialize();
+}
+
+
+bool SlotsBuffer::IsTypedSlot(ObjectSlot slot) {
+  return reinterpret_cast<uintptr_t>(slot) < NUMBER_OF_SLOT_TYPES;
+}
+
+
+bool SlotsBuffer::AddTo(SlotsBufferAllocator* allocator,
+                        SlotsBuffer** buffer_address, SlotType type,
+                        Address addr, AdditionMode mode) {
+  SlotsBuffer* buffer = *buffer_address;
+  if (buffer == NULL || !buffer->HasSpaceForTypedSlot()) {
+    if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
+      allocator->DeallocateChain(buffer_address);
+      return false;
+    }
+    buffer = allocator->AllocateBuffer(buffer);
+    *buffer_address = buffer;
+  }
+  DCHECK(buffer->HasSpaceForTypedSlot());
+  buffer->Add(reinterpret_cast<ObjectSlot>(type));
+  buffer->Add(reinterpret_cast<ObjectSlot>(addr));
+  return true;
+}
+
+
+static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) {
+  if (RelocInfo::IsCodeTarget(rmode)) {
+    return SlotsBuffer::CODE_TARGET_SLOT;
+  } else if (RelocInfo::IsEmbeddedObject(rmode)) {
+    return SlotsBuffer::EMBEDDED_OBJECT_SLOT;
+  } else if (RelocInfo::IsDebugBreakSlot(rmode)) {
+    return SlotsBuffer::DEBUG_TARGET_SLOT;
+  } else if (RelocInfo::IsJSReturn(rmode)) {
+    return SlotsBuffer::JS_RETURN_SLOT;
+  }
+  UNREACHABLE();
+  return SlotsBuffer::NUMBER_OF_SLOT_TYPES;
+}
+
+
+void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Object* target) {
+  Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
+  RelocInfo::Mode rmode = rinfo->rmode();
+  if (target_page->IsEvacuationCandidate() &&
+      (rinfo->host() == NULL ||
+       !ShouldSkipEvacuationSlotRecording(rinfo->host()))) {
+    bool success;
+    if (RelocInfo::IsEmbeddedObject(rmode) && rinfo->IsInConstantPool()) {
+      // This doesn't need to be typed since it is just a normal heap pointer.
+      Object** target_pointer =
+          reinterpret_cast<Object**>(rinfo->constant_pool_entry_address());
+      success = SlotsBuffer::AddTo(
+          &slots_buffer_allocator_, target_page->slots_buffer_address(),
+          target_pointer, SlotsBuffer::FAIL_ON_OVERFLOW);
+    } else if (RelocInfo::IsCodeTarget(rmode) && rinfo->IsInConstantPool()) {
+      success = SlotsBuffer::AddTo(
+          &slots_buffer_allocator_, target_page->slots_buffer_address(),
+          SlotsBuffer::CODE_ENTRY_SLOT, rinfo->constant_pool_entry_address(),
+          SlotsBuffer::FAIL_ON_OVERFLOW);
+    } else {
+      success = SlotsBuffer::AddTo(
+          &slots_buffer_allocator_, target_page->slots_buffer_address(),
+          SlotTypeForRMode(rmode), rinfo->pc(), SlotsBuffer::FAIL_ON_OVERFLOW);
+    }
+    if (!success) {
+      EvictEvacuationCandidate(target_page);
+    }
+  }
+}
+
+
+void MarkCompactCollector::RecordCodeEntrySlot(Address slot, Code* target) {
+  Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
+  if (target_page->IsEvacuationCandidate() &&
+      !ShouldSkipEvacuationSlotRecording(reinterpret_cast<Object**>(slot))) {
+    if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
+                            target_page->slots_buffer_address(),
+                            SlotsBuffer::CODE_ENTRY_SLOT, slot,
+                            SlotsBuffer::FAIL_ON_OVERFLOW)) {
+      EvictEvacuationCandidate(target_page);
+    }
+  }
+}
+
+
+void MarkCompactCollector::RecordCodeTargetPatch(Address pc, Code* target) {
+  DCHECK(heap()->gc_state() == Heap::MARK_COMPACT);
+  if (is_compacting()) {
+    Code* host =
+        isolate()->inner_pointer_to_code_cache()->GcSafeFindCodeForInnerPointer(
+            pc);
+    MarkBit mark_bit = Marking::MarkBitFrom(host);
+    if (Marking::IsBlack(mark_bit)) {
+      RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
+      RecordRelocSlot(&rinfo, target);
+    }
+  }
+}
+
+
+static inline SlotsBuffer::SlotType DecodeSlotType(
+    SlotsBuffer::ObjectSlot slot) {
+  return static_cast<SlotsBuffer::SlotType>(reinterpret_cast<intptr_t>(slot));
+}
+
+
+void SlotsBuffer::UpdateSlots(Heap* heap) {
+  PointersUpdatingVisitor v(heap);
+
+  for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
+    ObjectSlot slot = slots_[slot_idx];
+    if (!IsTypedSlot(slot)) {
+      PointersUpdatingVisitor::UpdateSlot(heap, slot);
+    } else {
+      ++slot_idx;
+      DCHECK(slot_idx < idx_);
+      UpdateSlot(heap->isolate(), &v, DecodeSlotType(slot),
+                 reinterpret_cast<Address>(slots_[slot_idx]));
+    }
+  }
+}
+
+
+void SlotsBuffer::UpdateSlotsWithFilter(Heap* heap) {
+  PointersUpdatingVisitor v(heap);
+
+  for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
+    ObjectSlot slot = slots_[slot_idx];
+    if (!IsTypedSlot(slot)) {
+      if (!IsOnInvalidatedCodeObject(reinterpret_cast<Address>(slot))) {
+        PointersUpdatingVisitor::UpdateSlot(heap, slot);
+      }
+    } else {
+      ++slot_idx;
+      DCHECK(slot_idx < idx_);
+      Address pc = reinterpret_cast<Address>(slots_[slot_idx]);
+      if (!IsOnInvalidatedCodeObject(pc)) {
+        UpdateSlot(heap->isolate(), &v, DecodeSlotType(slot),
+                   reinterpret_cast<Address>(slots_[slot_idx]));
+      }
+    }
+  }
+}
+
+
+SlotsBuffer* SlotsBufferAllocator::AllocateBuffer(SlotsBuffer* next_buffer) {
+  return new SlotsBuffer(next_buffer);
+}
+
+
+void SlotsBufferAllocator::DeallocateBuffer(SlotsBuffer* buffer) {
+  delete buffer;
+}
+
+
+void SlotsBufferAllocator::DeallocateChain(SlotsBuffer** buffer_address) {
+  SlotsBuffer* buffer = *buffer_address;
+  while (buffer != NULL) {
+    SlotsBuffer* next_buffer = buffer->next();
+    DeallocateBuffer(buffer);
+    buffer = next_buffer;
+  }
+  *buffer_address = NULL;
+}
+}
+}  // namespace v8::internal
diff --git a/src/heap/mark-compact.h b/src/heap/mark-compact.h
new file mode 100644
index 0000000..c5087b4
--- /dev/null
+++ b/src/heap/mark-compact.h
@@ -0,0 +1,956 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_MARK_COMPACT_H_
+#define V8_HEAP_MARK_COMPACT_H_
+
+#include "src/base/bits.h"
+#include "src/heap/spaces.h"
+
+namespace v8 {
+namespace internal {
+
+// Callback function, returns whether an object is alive. The heap size
+// of the object is returned in size. It optionally updates the offset
+// to the first live object in the page (only used for old and map objects).
+typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset);
+
+// Forward declarations.
+class CodeFlusher;
+class MarkCompactCollector;
+class MarkingVisitor;
+class RootMarkingVisitor;
+
+
+class Marking {
+ public:
+  explicit Marking(Heap* heap) : heap_(heap) {}
+
+  INLINE(static MarkBit MarkBitFrom(Address addr));
+
+  INLINE(static MarkBit MarkBitFrom(HeapObject* obj)) {
+    return MarkBitFrom(reinterpret_cast<Address>(obj));
+  }
+
+  // Impossible markbits: 01
+  static const char* kImpossibleBitPattern;
+  INLINE(static bool IsImpossible(MarkBit mark_bit)) {
+    return !mark_bit.Get() && mark_bit.Next().Get();
+  }
+
+  // Black markbits: 10 - this is required by the sweeper.
+  static const char* kBlackBitPattern;
+  INLINE(static bool IsBlack(MarkBit mark_bit)) {
+    return mark_bit.Get() && !mark_bit.Next().Get();
+  }
+
+  // White markbits: 00 - this is required by the mark bit clearer.
+  static const char* kWhiteBitPattern;
+  INLINE(static bool IsWhite(MarkBit mark_bit)) { return !mark_bit.Get(); }
+
+  // Grey markbits: 11
+  static const char* kGreyBitPattern;
+  INLINE(static bool IsGrey(MarkBit mark_bit)) {
+    return mark_bit.Get() && mark_bit.Next().Get();
+  }
+
+  INLINE(static void MarkBlack(MarkBit mark_bit)) {
+    mark_bit.Set();
+    mark_bit.Next().Clear();
+  }
+
+  INLINE(static void BlackToGrey(MarkBit markbit)) { markbit.Next().Set(); }
+
+  INLINE(static void WhiteToGrey(MarkBit markbit)) {
+    markbit.Set();
+    markbit.Next().Set();
+  }
+
+  INLINE(static void GreyToBlack(MarkBit markbit)) { markbit.Next().Clear(); }
+
+  INLINE(static void BlackToGrey(HeapObject* obj)) {
+    BlackToGrey(MarkBitFrom(obj));
+  }
+
+  INLINE(static void AnyToGrey(MarkBit markbit)) {
+    markbit.Set();
+    markbit.Next().Set();
+  }
+
+  void TransferMark(Address old_start, Address new_start);
+
+#ifdef DEBUG
+  enum ObjectColor {
+    BLACK_OBJECT,
+    WHITE_OBJECT,
+    GREY_OBJECT,
+    IMPOSSIBLE_COLOR
+  };
+
+  static const char* ColorName(ObjectColor color) {
+    switch (color) {
+      case BLACK_OBJECT:
+        return "black";
+      case WHITE_OBJECT:
+        return "white";
+      case GREY_OBJECT:
+        return "grey";
+      case IMPOSSIBLE_COLOR:
+        return "impossible";
+    }
+    return "error";
+  }
+
+  static ObjectColor Color(HeapObject* obj) {
+    return Color(Marking::MarkBitFrom(obj));
+  }
+
+  static ObjectColor Color(MarkBit mark_bit) {
+    if (IsBlack(mark_bit)) return BLACK_OBJECT;
+    if (IsWhite(mark_bit)) return WHITE_OBJECT;
+    if (IsGrey(mark_bit)) return GREY_OBJECT;
+    UNREACHABLE();
+    return IMPOSSIBLE_COLOR;
+  }
+#endif
+
+  // Returns true if the transferred color is black.
+  INLINE(static bool TransferColor(HeapObject* from, HeapObject* to)) {
+    MarkBit from_mark_bit = MarkBitFrom(from);
+    MarkBit to_mark_bit = MarkBitFrom(to);
+    bool is_black = false;
+    if (from_mark_bit.Get()) {
+      to_mark_bit.Set();
+      is_black = true;  // Looks black so far.
+    }
+    if (from_mark_bit.Next().Get()) {
+      to_mark_bit.Next().Set();
+      is_black = false;  // Was actually gray.
+    }
+    return is_black;
+  }
+
+ private:
+  Heap* heap_;
+};
+
+// ----------------------------------------------------------------------------
+// Marking deque for tracing live objects.
+class MarkingDeque {
+ public:
+  MarkingDeque()
+      : array_(NULL), top_(0), bottom_(0), mask_(0), overflowed_(false) {}
+
+  void Initialize(Address low, Address high) {
+    HeapObject** obj_low = reinterpret_cast<HeapObject**>(low);
+    HeapObject** obj_high = reinterpret_cast<HeapObject**>(high);
+    array_ = obj_low;
+    mask_ = base::bits::RoundDownToPowerOfTwo32(
+                static_cast<uint32_t>(obj_high - obj_low)) -
+            1;
+    top_ = bottom_ = 0;
+    overflowed_ = false;
+  }
+
+  inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; }
+
+  inline bool IsEmpty() { return top_ == bottom_; }
+
+  bool overflowed() const { return overflowed_; }
+
+  void ClearOverflowed() { overflowed_ = false; }
+
+  void SetOverflowed() { overflowed_ = true; }
+
+  // Push the (marked) object on the marking stack if there is room,
+  // otherwise mark the object as overflowed and wait for a rescan of the
+  // heap.
+  INLINE(void PushBlack(HeapObject* object)) {
+    DCHECK(object->IsHeapObject());
+    if (IsFull()) {
+      Marking::BlackToGrey(object);
+      MemoryChunk::IncrementLiveBytesFromGC(object->address(), -object->Size());
+      SetOverflowed();
+    } else {
+      array_[top_] = object;
+      top_ = ((top_ + 1) & mask_);
+    }
+  }
+
+  INLINE(void PushGrey(HeapObject* object)) {
+    DCHECK(object->IsHeapObject());
+    if (IsFull()) {
+      SetOverflowed();
+    } else {
+      array_[top_] = object;
+      top_ = ((top_ + 1) & mask_);
+    }
+  }
+
+  INLINE(HeapObject* Pop()) {
+    DCHECK(!IsEmpty());
+    top_ = ((top_ - 1) & mask_);
+    HeapObject* object = array_[top_];
+    DCHECK(object->IsHeapObject());
+    return object;
+  }
+
+  INLINE(void UnshiftGrey(HeapObject* object)) {
+    DCHECK(object->IsHeapObject());
+    if (IsFull()) {
+      SetOverflowed();
+    } else {
+      bottom_ = ((bottom_ - 1) & mask_);
+      array_[bottom_] = object;
+    }
+  }
+
+  HeapObject** array() { return array_; }
+  int bottom() { return bottom_; }
+  int top() { return top_; }
+  int mask() { return mask_; }
+  void set_top(int top) { top_ = top; }
+
+ private:
+  HeapObject** array_;
+  // array_[(top - 1) & mask_] is the top element in the deque.  The Deque is
+  // empty when top_ == bottom_.  It is full when top_ + 1 == bottom
+  // (mod mask + 1).
+  int top_;
+  int bottom_;
+  int mask_;
+  bool overflowed_;
+
+  DISALLOW_COPY_AND_ASSIGN(MarkingDeque);
+};
+
+
+class SlotsBufferAllocator {
+ public:
+  SlotsBuffer* AllocateBuffer(SlotsBuffer* next_buffer);
+  void DeallocateBuffer(SlotsBuffer* buffer);
+
+  void DeallocateChain(SlotsBuffer** buffer_address);
+};
+
+
+// SlotsBuffer records a sequence of slots that has to be updated
+// after live objects were relocated from evacuation candidates.
+// All slots are either untyped or typed:
+//    - Untyped slots are expected to contain a tagged object pointer.
+//      They are recorded by an address.
+//    - Typed slots are expected to contain an encoded pointer to a heap
+//      object where the way of encoding depends on the type of the slot.
+//      They are recorded as a pair (SlotType, slot address).
+// We assume that zero-page is never mapped this allows us to distinguish
+// untyped slots from typed slots during iteration by a simple comparison:
+// if element of slots buffer is less than NUMBER_OF_SLOT_TYPES then it
+// is the first element of typed slot's pair.
+class SlotsBuffer {
+ public:
+  typedef Object** ObjectSlot;
+
+  explicit SlotsBuffer(SlotsBuffer* next_buffer)
+      : idx_(0), chain_length_(1), next_(next_buffer) {
+    if (next_ != NULL) {
+      chain_length_ = next_->chain_length_ + 1;
+    }
+  }
+
+  ~SlotsBuffer() {}
+
+  void Add(ObjectSlot slot) {
+    DCHECK(0 <= idx_ && idx_ < kNumberOfElements);
+    slots_[idx_++] = slot;
+  }
+
+  enum SlotType {
+    EMBEDDED_OBJECT_SLOT,
+    RELOCATED_CODE_OBJECT,
+    CODE_TARGET_SLOT,
+    CODE_ENTRY_SLOT,
+    DEBUG_TARGET_SLOT,
+    JS_RETURN_SLOT,
+    NUMBER_OF_SLOT_TYPES
+  };
+
+  static const char* SlotTypeToString(SlotType type) {
+    switch (type) {
+      case EMBEDDED_OBJECT_SLOT:
+        return "EMBEDDED_OBJECT_SLOT";
+      case RELOCATED_CODE_OBJECT:
+        return "RELOCATED_CODE_OBJECT";
+      case CODE_TARGET_SLOT:
+        return "CODE_TARGET_SLOT";
+      case CODE_ENTRY_SLOT:
+        return "CODE_ENTRY_SLOT";
+      case DEBUG_TARGET_SLOT:
+        return "DEBUG_TARGET_SLOT";
+      case JS_RETURN_SLOT:
+        return "JS_RETURN_SLOT";
+      case NUMBER_OF_SLOT_TYPES:
+        return "NUMBER_OF_SLOT_TYPES";
+    }
+    return "UNKNOWN SlotType";
+  }
+
+  void UpdateSlots(Heap* heap);
+
+  void UpdateSlotsWithFilter(Heap* heap);
+
+  SlotsBuffer* next() { return next_; }
+
+  static int SizeOfChain(SlotsBuffer* buffer) {
+    if (buffer == NULL) return 0;
+    return static_cast<int>(buffer->idx_ +
+                            (buffer->chain_length_ - 1) * kNumberOfElements);
+  }
+
+  inline bool IsFull() { return idx_ == kNumberOfElements; }
+
+  inline bool HasSpaceForTypedSlot() { return idx_ < kNumberOfElements - 1; }
+
+  static void UpdateSlotsRecordedIn(Heap* heap, SlotsBuffer* buffer,
+                                    bool code_slots_filtering_required) {
+    while (buffer != NULL) {
+      if (code_slots_filtering_required) {
+        buffer->UpdateSlotsWithFilter(heap);
+      } else {
+        buffer->UpdateSlots(heap);
+      }
+      buffer = buffer->next();
+    }
+  }
+
+  enum AdditionMode { FAIL_ON_OVERFLOW, IGNORE_OVERFLOW };
+
+  static bool ChainLengthThresholdReached(SlotsBuffer* buffer) {
+    return buffer != NULL && buffer->chain_length_ >= kChainLengthThreshold;
+  }
+
+  INLINE(static bool AddTo(SlotsBufferAllocator* allocator,
+                           SlotsBuffer** buffer_address, ObjectSlot slot,
+                           AdditionMode mode)) {
+    SlotsBuffer* buffer = *buffer_address;
+    if (buffer == NULL || buffer->IsFull()) {
+      if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
+        allocator->DeallocateChain(buffer_address);
+        return false;
+      }
+      buffer = allocator->AllocateBuffer(buffer);
+      *buffer_address = buffer;
+    }
+    buffer->Add(slot);
+    return true;
+  }
+
+  static bool IsTypedSlot(ObjectSlot slot);
+
+  static bool AddTo(SlotsBufferAllocator* allocator,
+                    SlotsBuffer** buffer_address, SlotType type, Address addr,
+                    AdditionMode mode);
+
+  static const int kNumberOfElements = 1021;
+
+ private:
+  static const int kChainLengthThreshold = 15;
+
+  intptr_t idx_;
+  intptr_t chain_length_;
+  SlotsBuffer* next_;
+  ObjectSlot slots_[kNumberOfElements];
+};
+
+
+// CodeFlusher collects candidates for code flushing during marking and
+// processes those candidates after marking has completed in order to
+// reset those functions referencing code objects that would otherwise
+// be unreachable. Code objects can be referenced in three ways:
+//    - SharedFunctionInfo references unoptimized code.
+//    - JSFunction references either unoptimized or optimized code.
+//    - OptimizedCodeMap references optimized code.
+// We are not allowed to flush unoptimized code for functions that got
+// optimized or inlined into optimized code, because we might bailout
+// into the unoptimized code again during deoptimization.
+class CodeFlusher {
+ public:
+  explicit CodeFlusher(Isolate* isolate)
+      : isolate_(isolate),
+        jsfunction_candidates_head_(NULL),
+        shared_function_info_candidates_head_(NULL),
+        optimized_code_map_holder_head_(NULL) {}
+
+  void AddCandidate(SharedFunctionInfo* shared_info) {
+    if (GetNextCandidate(shared_info) == NULL) {
+      SetNextCandidate(shared_info, shared_function_info_candidates_head_);
+      shared_function_info_candidates_head_ = shared_info;
+    }
+  }
+
+  void AddCandidate(JSFunction* function) {
+    DCHECK(function->code() == function->shared()->code());
+    if (GetNextCandidate(function)->IsUndefined()) {
+      SetNextCandidate(function, jsfunction_candidates_head_);
+      jsfunction_candidates_head_ = function;
+    }
+  }
+
+  void AddOptimizedCodeMap(SharedFunctionInfo* code_map_holder) {
+    if (GetNextCodeMap(code_map_holder)->IsUndefined()) {
+      SetNextCodeMap(code_map_holder, optimized_code_map_holder_head_);
+      optimized_code_map_holder_head_ = code_map_holder;
+    }
+  }
+
+  void EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder);
+  void EvictCandidate(SharedFunctionInfo* shared_info);
+  void EvictCandidate(JSFunction* function);
+
+  void ProcessCandidates() {
+    ProcessOptimizedCodeMaps();
+    ProcessSharedFunctionInfoCandidates();
+    ProcessJSFunctionCandidates();
+  }
+
+  void EvictAllCandidates() {
+    EvictOptimizedCodeMaps();
+    EvictJSFunctionCandidates();
+    EvictSharedFunctionInfoCandidates();
+  }
+
+  void IteratePointersToFromSpace(ObjectVisitor* v);
+
+ private:
+  void ProcessOptimizedCodeMaps();
+  void ProcessJSFunctionCandidates();
+  void ProcessSharedFunctionInfoCandidates();
+  void EvictOptimizedCodeMaps();
+  void EvictJSFunctionCandidates();
+  void EvictSharedFunctionInfoCandidates();
+
+  static JSFunction** GetNextCandidateSlot(JSFunction* candidate) {
+    return reinterpret_cast<JSFunction**>(
+        HeapObject::RawField(candidate, JSFunction::kNextFunctionLinkOffset));
+  }
+
+  static JSFunction* GetNextCandidate(JSFunction* candidate) {
+    Object* next_candidate = candidate->next_function_link();
+    return reinterpret_cast<JSFunction*>(next_candidate);
+  }
+
+  static void SetNextCandidate(JSFunction* candidate,
+                               JSFunction* next_candidate) {
+    candidate->set_next_function_link(next_candidate);
+  }
+
+  static void ClearNextCandidate(JSFunction* candidate, Object* undefined) {
+    DCHECK(undefined->IsUndefined());
+    candidate->set_next_function_link(undefined, SKIP_WRITE_BARRIER);
+  }
+
+  static SharedFunctionInfo* GetNextCandidate(SharedFunctionInfo* candidate) {
+    Object* next_candidate = candidate->code()->gc_metadata();
+    return reinterpret_cast<SharedFunctionInfo*>(next_candidate);
+  }
+
+  static void SetNextCandidate(SharedFunctionInfo* candidate,
+                               SharedFunctionInfo* next_candidate) {
+    candidate->code()->set_gc_metadata(next_candidate);
+  }
+
+  static void ClearNextCandidate(SharedFunctionInfo* candidate) {
+    candidate->code()->set_gc_metadata(NULL, SKIP_WRITE_BARRIER);
+  }
+
+  static SharedFunctionInfo* GetNextCodeMap(SharedFunctionInfo* holder) {
+    FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
+    Object* next_map = code_map->get(SharedFunctionInfo::kNextMapIndex);
+    return reinterpret_cast<SharedFunctionInfo*>(next_map);
+  }
+
+  static void SetNextCodeMap(SharedFunctionInfo* holder,
+                             SharedFunctionInfo* next_holder) {
+    FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
+    code_map->set(SharedFunctionInfo::kNextMapIndex, next_holder);
+  }
+
+  static void ClearNextCodeMap(SharedFunctionInfo* holder) {
+    FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
+    code_map->set_undefined(SharedFunctionInfo::kNextMapIndex);
+  }
+
+  Isolate* isolate_;
+  JSFunction* jsfunction_candidates_head_;
+  SharedFunctionInfo* shared_function_info_candidates_head_;
+  SharedFunctionInfo* optimized_code_map_holder_head_;
+
+  DISALLOW_COPY_AND_ASSIGN(CodeFlusher);
+};
+
+
+// Defined in isolate.h.
+class ThreadLocalTop;
+
+
+// -------------------------------------------------------------------------
+// Mark-Compact collector
+class MarkCompactCollector {
+ public:
+  // Set the global flags, it must be called before Prepare to take effect.
+  inline void SetFlags(int flags);
+
+  static void Initialize();
+
+  void SetUp();
+
+  void TearDown();
+
+  void CollectEvacuationCandidates(PagedSpace* space);
+
+  void AddEvacuationCandidate(Page* p);
+
+  // Prepares for GC by resetting relocation info in old and map spaces and
+  // choosing spaces to compact.
+  void Prepare();
+
+  // Performs a global garbage collection.
+  void CollectGarbage();
+
+  enum CompactionMode { INCREMENTAL_COMPACTION, NON_INCREMENTAL_COMPACTION };
+
+  bool StartCompaction(CompactionMode mode);
+
+  void AbortCompaction();
+
+#ifdef DEBUG
+  // Checks whether performing mark-compact collection.
+  bool in_use() { return state_ > PREPARE_GC; }
+  bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; }
+#endif
+
+  // Determine type of object and emit deletion log event.
+  static void ReportDeleteIfNeeded(HeapObject* obj, Isolate* isolate);
+
+  // Distinguishable invalid map encodings (for single word and multiple words)
+  // that indicate free regions.
+  static const uint32_t kSingleFreeEncoding = 0;
+  static const uint32_t kMultiFreeEncoding = 1;
+
+  static inline bool IsMarked(Object* obj);
+
+  inline Heap* heap() const { return heap_; }
+  inline Isolate* isolate() const;
+
+  CodeFlusher* code_flusher() { return code_flusher_; }
+  inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; }
+  void EnableCodeFlushing(bool enable);
+
+  enum SweeperType {
+    PARALLEL_SWEEPING,
+    CONCURRENT_SWEEPING,
+    SEQUENTIAL_SWEEPING
+  };
+
+  enum SweepingParallelism { SWEEP_ON_MAIN_THREAD, SWEEP_IN_PARALLEL };
+
+#ifdef VERIFY_HEAP
+  void VerifyMarkbitsAreClean();
+  static void VerifyMarkbitsAreClean(PagedSpace* space);
+  static void VerifyMarkbitsAreClean(NewSpace* space);
+  void VerifyWeakEmbeddedObjectsInCode();
+  void VerifyOmittedMapChecks();
+#endif
+
+  INLINE(static bool ShouldSkipEvacuationSlotRecording(Object** anchor)) {
+    return Page::FromAddress(reinterpret_cast<Address>(anchor))
+        ->ShouldSkipEvacuationSlotRecording();
+  }
+
+  INLINE(static bool ShouldSkipEvacuationSlotRecording(Object* host)) {
+    return Page::FromAddress(reinterpret_cast<Address>(host))
+        ->ShouldSkipEvacuationSlotRecording();
+  }
+
+  INLINE(static bool IsOnEvacuationCandidate(Object* obj)) {
+    return Page::FromAddress(reinterpret_cast<Address>(obj))
+        ->IsEvacuationCandidate();
+  }
+
+  INLINE(void EvictEvacuationCandidate(Page* page)) {
+    if (FLAG_trace_fragmentation) {
+      PrintF("Page %p is too popular. Disabling evacuation.\n",
+             reinterpret_cast<void*>(page));
+    }
+
+    // TODO(gc) If all evacuation candidates are too popular we
+    // should stop slots recording entirely.
+    page->ClearEvacuationCandidate();
+
+    // We were not collecting slots on this page that point
+    // to other evacuation candidates thus we have to
+    // rescan the page after evacuation to discover and update all
+    // pointers to evacuated objects.
+    if (page->owner()->identity() == OLD_DATA_SPACE) {
+      evacuation_candidates_.RemoveElement(page);
+    } else {
+      page->SetFlag(Page::RESCAN_ON_EVACUATION);
+    }
+  }
+
+  void RecordRelocSlot(RelocInfo* rinfo, Object* target);
+  void RecordCodeEntrySlot(Address slot, Code* target);
+  void RecordCodeTargetPatch(Address pc, Code* target);
+
+  INLINE(void RecordSlot(
+      Object** anchor_slot, Object** slot, Object* object,
+      SlotsBuffer::AdditionMode mode = SlotsBuffer::FAIL_ON_OVERFLOW));
+
+  void MigrateObject(HeapObject* dst, HeapObject* src, int size,
+                     AllocationSpace to_old_space);
+
+  bool TryPromoteObject(HeapObject* object, int object_size);
+
+  void InvalidateCode(Code* code);
+
+  void ClearMarkbits();
+
+  bool abort_incremental_marking() const { return abort_incremental_marking_; }
+
+  bool is_compacting() const { return compacting_; }
+
+  MarkingParity marking_parity() { return marking_parity_; }
+
+  // Concurrent and parallel sweeping support. If required_freed_bytes was set
+  // to a value larger than 0, then sweeping returns after a block of at least
+  // required_freed_bytes was freed. If required_freed_bytes was set to zero
+  // then the whole given space is swept. It returns the size of the maximum
+  // continuous freed memory chunk.
+  int SweepInParallel(PagedSpace* space, int required_freed_bytes);
+
+  // Sweeps a given page concurrently to the sweeper threads. It returns the
+  // size of the maximum continuous freed memory chunk.
+  int SweepInParallel(Page* page, PagedSpace* space);
+
+  void EnsureSweepingCompleted();
+
+  // If sweeper threads are not active this method will return true. If
+  // this is a latency issue we should be smarter here. Otherwise, it will
+  // return true if the sweeper threads are done processing the pages.
+  bool IsSweepingCompleted();
+
+  void RefillFreeList(PagedSpace* space);
+
+  bool AreSweeperThreadsActivated();
+
+  // Checks if sweeping is in progress right now on any space.
+  bool sweeping_in_progress() { return sweeping_in_progress_; }
+
+  void set_sequential_sweeping(bool sequential_sweeping) {
+    sequential_sweeping_ = sequential_sweeping;
+  }
+
+  bool sequential_sweeping() const { return sequential_sweeping_; }
+
+  // Mark the global table which maps weak objects to dependent code without
+  // marking its contents.
+  void MarkWeakObjectToCodeTable();
+
+  // Special case for processing weak references in a full collection. We need
+  // to artificially keep AllocationSites alive for a time.
+  void MarkAllocationSite(AllocationSite* site);
+
+ private:
+  class SweeperTask;
+
+  explicit MarkCompactCollector(Heap* heap);
+  ~MarkCompactCollector();
+
+  bool MarkInvalidatedCode();
+  bool WillBeDeoptimized(Code* code);
+  void RemoveDeadInvalidatedCode();
+  void ProcessInvalidatedCode(ObjectVisitor* visitor);
+
+  void StartSweeperThreads();
+
+#ifdef DEBUG
+  enum CollectorState {
+    IDLE,
+    PREPARE_GC,
+    MARK_LIVE_OBJECTS,
+    SWEEP_SPACES,
+    ENCODE_FORWARDING_ADDRESSES,
+    UPDATE_POINTERS,
+    RELOCATE_OBJECTS
+  };
+
+  // The current stage of the collector.
+  CollectorState state_;
+#endif
+
+  bool reduce_memory_footprint_;
+
+  bool abort_incremental_marking_;
+
+  MarkingParity marking_parity_;
+
+  // True if we are collecting slots to perform evacuation from evacuation
+  // candidates.
+  bool compacting_;
+
+  bool was_marked_incrementally_;
+
+  // True if concurrent or parallel sweeping is currently in progress.
+  bool sweeping_in_progress_;
+
+  base::Semaphore pending_sweeper_jobs_semaphore_;
+
+  bool sequential_sweeping_;
+
+  SlotsBufferAllocator slots_buffer_allocator_;
+
+  SlotsBuffer* migration_slots_buffer_;
+
+  // Finishes GC, performs heap verification if enabled.
+  void Finish();
+
+  // -----------------------------------------------------------------------
+  // Phase 1: Marking live objects.
+  //
+  //  Before: The heap has been prepared for garbage collection by
+  //          MarkCompactCollector::Prepare() and is otherwise in its
+  //          normal state.
+  //
+  //   After: Live objects are marked and non-live objects are unmarked.
+
+  friend class RootMarkingVisitor;
+  friend class MarkingVisitor;
+  friend class MarkCompactMarkingVisitor;
+  friend class CodeMarkingVisitor;
+  friend class SharedFunctionInfoMarkingVisitor;
+
+  // Mark code objects that are active on the stack to prevent them
+  // from being flushed.
+  void PrepareThreadForCodeFlushing(Isolate* isolate, ThreadLocalTop* top);
+
+  void PrepareForCodeFlushing();
+
+  // Marking operations for objects reachable from roots.
+  void MarkLiveObjects();
+
+  void AfterMarking();
+
+  // Marks the object black and pushes it on the marking stack.
+  // This is for non-incremental marking only.
+  INLINE(void MarkObject(HeapObject* obj, MarkBit mark_bit));
+
+  // Marks the object black assuming that it is not yet marked.
+  // This is for non-incremental marking only.
+  INLINE(void SetMark(HeapObject* obj, MarkBit mark_bit));
+
+  // Mark the heap roots and all objects reachable from them.
+  void MarkRoots(RootMarkingVisitor* visitor);
+
+  // Mark the string table specially.  References to internalized strings from
+  // the string table are weak.
+  void MarkStringTable(RootMarkingVisitor* visitor);
+
+  // Mark objects in implicit references groups if their parent object
+  // is marked.
+  void MarkImplicitRefGroups();
+
+  // Mark objects reachable (transitively) from objects in the marking stack
+  // or overflowed in the heap.
+  void ProcessMarkingDeque();
+
+  // Mark objects reachable (transitively) from objects in the marking stack
+  // or overflowed in the heap.  This respects references only considered in
+  // the final atomic marking pause including the following:
+  //    - Processing of objects reachable through Harmony WeakMaps.
+  //    - Objects reachable due to host application logic like object groups
+  //      or implicit references' groups.
+  void ProcessEphemeralMarking(ObjectVisitor* visitor);
+
+  // If the call-site of the top optimized code was not prepared for
+  // deoptimization, then treat the maps in the code as strong pointers,
+  // otherwise a map can die and deoptimize the code.
+  void ProcessTopOptimizedFrame(ObjectVisitor* visitor);
+
+  // Mark objects reachable (transitively) from objects in the marking
+  // stack.  This function empties the marking stack, but may leave
+  // overflowed objects in the heap, in which case the marking stack's
+  // overflow flag will be set.
+  void EmptyMarkingDeque();
+
+  // Refill the marking stack with overflowed objects from the heap.  This
+  // function either leaves the marking stack full or clears the overflow
+  // flag on the marking stack.
+  void RefillMarkingDeque();
+
+  // After reachable maps have been marked process per context object
+  // literal map caches removing unmarked entries.
+  void ProcessMapCaches();
+
+  // Callback function for telling whether the object *p is an unmarked
+  // heap object.
+  static bool IsUnmarkedHeapObject(Object** p);
+  static bool IsUnmarkedHeapObjectWithHeap(Heap* heap, Object** p);
+
+  // Map transitions from a live map to a dead map must be killed.
+  // We replace them with a null descriptor, with the same key.
+  void ClearNonLiveReferences();
+  void ClearNonLivePrototypeTransitions(Map* map);
+  void ClearNonLiveMapTransitions(Map* map, MarkBit map_mark);
+  void ClearMapTransitions(Map* map);
+  bool ClearMapBackPointer(Map* map);
+  void TrimDescriptorArray(Map* map, DescriptorArray* descriptors,
+                           int number_of_own_descriptors);
+  void TrimEnumCache(Map* map, DescriptorArray* descriptors);
+
+  void ClearDependentCode(DependentCode* dependent_code);
+  void ClearDependentICList(Object* head);
+  void ClearNonLiveDependentCode(DependentCode* dependent_code);
+  int ClearNonLiveDependentCodeInGroup(DependentCode* dependent_code, int group,
+                                       int start, int end, int new_start);
+
+  // Mark all values associated with reachable keys in weak collections
+  // encountered so far.  This might push new object or even new weak maps onto
+  // the marking stack.
+  void ProcessWeakCollections();
+
+  // After all reachable objects have been marked those weak map entries
+  // with an unreachable key are removed from all encountered weak maps.
+  // The linked list of all encountered weak maps is destroyed.
+  void ClearWeakCollections();
+
+  // We have to remove all encountered weak maps from the list of weak
+  // collections when incremental marking is aborted.
+  void AbortWeakCollections();
+
+  // -----------------------------------------------------------------------
+  // Phase 2: Sweeping to clear mark bits and free non-live objects for
+  // a non-compacting collection.
+  //
+  //  Before: Live objects are marked and non-live objects are unmarked.
+  //
+  //   After: Live objects are unmarked, non-live regions have been added to
+  //          their space's free list. Active eden semispace is compacted by
+  //          evacuation.
+  //
+
+  // If we are not compacting the heap, we simply sweep the spaces except
+  // for the large object space, clearing mark bits and adding unmarked
+  // regions to each space's free list.
+  void SweepSpaces();
+
+  int DiscoverAndEvacuateBlackObjectsOnPage(NewSpace* new_space,
+                                            NewSpacePage* p);
+
+  void EvacuateNewSpace();
+
+  void EvacuateLiveObjectsFromPage(Page* p);
+
+  void EvacuatePages();
+
+  void EvacuateNewSpaceAndCandidates();
+
+  void ReleaseEvacuationCandidates();
+
+  // Moves the pages of the evacuation_candidates_ list to the end of their
+  // corresponding space pages list.
+  void MoveEvacuationCandidatesToEndOfPagesList();
+
+  void SweepSpace(PagedSpace* space, SweeperType sweeper);
+
+  // Finalizes the parallel sweeping phase. Marks all the pages that were
+  // swept in parallel.
+  void ParallelSweepSpacesComplete();
+
+  void ParallelSweepSpaceComplete(PagedSpace* space);
+
+  // Updates store buffer and slot buffer for a pointer in a migrating object.
+  void RecordMigratedSlot(Object* value, Address slot);
+
+#ifdef DEBUG
+  friend class MarkObjectVisitor;
+  static void VisitObject(HeapObject* obj);
+
+  friend class UnmarkObjectVisitor;
+  static void UnmarkObject(HeapObject* obj);
+#endif
+
+  Heap* heap_;
+  MarkingDeque marking_deque_;
+  CodeFlusher* code_flusher_;
+  bool have_code_to_deoptimize_;
+
+  List<Page*> evacuation_candidates_;
+  List<Code*> invalidated_code_;
+
+  SmartPointer<FreeList> free_list_old_data_space_;
+  SmartPointer<FreeList> free_list_old_pointer_space_;
+
+  friend class Heap;
+};
+
+
+class MarkBitCellIterator BASE_EMBEDDED {
+ public:
+  explicit MarkBitCellIterator(MemoryChunk* chunk) : chunk_(chunk) {
+    last_cell_index_ = Bitmap::IndexToCell(Bitmap::CellAlignIndex(
+        chunk_->AddressToMarkbitIndex(chunk_->area_end())));
+    cell_base_ = chunk_->area_start();
+    cell_index_ = Bitmap::IndexToCell(
+        Bitmap::CellAlignIndex(chunk_->AddressToMarkbitIndex(cell_base_)));
+    cells_ = chunk_->markbits()->cells();
+  }
+
+  inline bool Done() { return cell_index_ == last_cell_index_; }
+
+  inline bool HasNext() { return cell_index_ < last_cell_index_ - 1; }
+
+  inline MarkBit::CellType* CurrentCell() {
+    DCHECK(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
+                              chunk_->AddressToMarkbitIndex(cell_base_))));
+    return &cells_[cell_index_];
+  }
+
+  inline Address CurrentCellBase() {
+    DCHECK(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
+                              chunk_->AddressToMarkbitIndex(cell_base_))));
+    return cell_base_;
+  }
+
+  inline void Advance() {
+    cell_index_++;
+    cell_base_ += 32 * kPointerSize;
+  }
+
+ private:
+  MemoryChunk* chunk_;
+  MarkBit::CellType* cells_;
+  unsigned int last_cell_index_;
+  unsigned int cell_index_;
+  Address cell_base_;
+};
+
+
+class SequentialSweepingScope BASE_EMBEDDED {
+ public:
+  explicit SequentialSweepingScope(MarkCompactCollector* collector)
+      : collector_(collector) {
+    collector_->set_sequential_sweeping(true);
+  }
+
+  ~SequentialSweepingScope() { collector_->set_sequential_sweeping(false); }
+
+ private:
+  MarkCompactCollector* collector_;
+};
+
+
+const char* AllocationSpaceName(AllocationSpace space);
+}
+}  // namespace v8::internal
+
+#endif  // V8_HEAP_MARK_COMPACT_H_
diff --git a/src/heap/objects-visiting-inl.h b/src/heap/objects-visiting-inl.h
new file mode 100644
index 0000000..d220118
--- /dev/null
+++ b/src/heap/objects-visiting-inl.h
@@ -0,0 +1,934 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_VISITING_INL_H_
+#define V8_OBJECTS_VISITING_INL_H_
+
+
+namespace v8 {
+namespace internal {
+
+template <typename StaticVisitor>
+void StaticNewSpaceVisitor<StaticVisitor>::Initialize() {
+  table_.Register(
+      kVisitShortcutCandidate,
+      &FixedBodyVisitor<StaticVisitor, ConsString::BodyDescriptor, int>::Visit);
+
+  table_.Register(
+      kVisitConsString,
+      &FixedBodyVisitor<StaticVisitor, ConsString::BodyDescriptor, int>::Visit);
+
+  table_.Register(kVisitSlicedString,
+                  &FixedBodyVisitor<StaticVisitor, SlicedString::BodyDescriptor,
+                                    int>::Visit);
+
+  table_.Register(
+      kVisitSymbol,
+      &FixedBodyVisitor<StaticVisitor, Symbol::BodyDescriptor, int>::Visit);
+
+  table_.Register(kVisitFixedArray,
+                  &FlexibleBodyVisitor<StaticVisitor,
+                                       FixedArray::BodyDescriptor, int>::Visit);
+
+  table_.Register(kVisitFixedDoubleArray, &VisitFixedDoubleArray);
+  table_.Register(kVisitFixedTypedArray, &VisitFixedTypedArray);
+  table_.Register(kVisitFixedFloat64Array, &VisitFixedTypedArray);
+
+  table_.Register(
+      kVisitNativeContext,
+      &FixedBodyVisitor<StaticVisitor, Context::ScavengeBodyDescriptor,
+                        int>::Visit);
+
+  table_.Register(kVisitByteArray, &VisitByteArray);
+
+  table_.Register(
+      kVisitSharedFunctionInfo,
+      &FixedBodyVisitor<StaticVisitor, SharedFunctionInfo::BodyDescriptor,
+                        int>::Visit);
+
+  table_.Register(kVisitSeqOneByteString, &VisitSeqOneByteString);
+
+  table_.Register(kVisitSeqTwoByteString, &VisitSeqTwoByteString);
+
+  table_.Register(kVisitJSFunction, &VisitJSFunction);
+
+  table_.Register(kVisitJSArrayBuffer, &VisitJSArrayBuffer);
+
+  table_.Register(kVisitJSTypedArray, &VisitJSTypedArray);
+
+  table_.Register(kVisitJSDataView, &VisitJSDataView);
+
+  table_.Register(kVisitFreeSpace, &VisitFreeSpace);
+
+  table_.Register(kVisitJSWeakCollection, &JSObjectVisitor::Visit);
+
+  table_.Register(kVisitJSRegExp, &JSObjectVisitor::Visit);
+
+  table_.template RegisterSpecializations<DataObjectVisitor, kVisitDataObject,
+                                          kVisitDataObjectGeneric>();
+
+  table_.template RegisterSpecializations<JSObjectVisitor, kVisitJSObject,
+                                          kVisitJSObjectGeneric>();
+  table_.template RegisterSpecializations<StructVisitor, kVisitStruct,
+                                          kVisitStructGeneric>();
+}
+
+
+template <typename StaticVisitor>
+int StaticNewSpaceVisitor<StaticVisitor>::VisitJSArrayBuffer(
+    Map* map, HeapObject* object) {
+  Heap* heap = map->GetHeap();
+
+  STATIC_ASSERT(JSArrayBuffer::kWeakFirstViewOffset ==
+                JSArrayBuffer::kWeakNextOffset + kPointerSize);
+  VisitPointers(heap, HeapObject::RawField(
+                          object, JSArrayBuffer::BodyDescriptor::kStartOffset),
+                HeapObject::RawField(object, JSArrayBuffer::kWeakNextOffset));
+  VisitPointers(
+      heap, HeapObject::RawField(
+                object, JSArrayBuffer::kWeakNextOffset + 2 * kPointerSize),
+      HeapObject::RawField(object, JSArrayBuffer::kSizeWithInternalFields));
+  return JSArrayBuffer::kSizeWithInternalFields;
+}
+
+
+template <typename StaticVisitor>
+int StaticNewSpaceVisitor<StaticVisitor>::VisitJSTypedArray(
+    Map* map, HeapObject* object) {
+  VisitPointers(
+      map->GetHeap(),
+      HeapObject::RawField(object, JSTypedArray::BodyDescriptor::kStartOffset),
+      HeapObject::RawField(object, JSTypedArray::kWeakNextOffset));
+  VisitPointers(
+      map->GetHeap(), HeapObject::RawField(
+                          object, JSTypedArray::kWeakNextOffset + kPointerSize),
+      HeapObject::RawField(object, JSTypedArray::kSizeWithInternalFields));
+  return JSTypedArray::kSizeWithInternalFields;
+}
+
+
+template <typename StaticVisitor>
+int StaticNewSpaceVisitor<StaticVisitor>::VisitJSDataView(Map* map,
+                                                          HeapObject* object) {
+  VisitPointers(
+      map->GetHeap(),
+      HeapObject::RawField(object, JSDataView::BodyDescriptor::kStartOffset),
+      HeapObject::RawField(object, JSDataView::kWeakNextOffset));
+  VisitPointers(
+      map->GetHeap(),
+      HeapObject::RawField(object, JSDataView::kWeakNextOffset + kPointerSize),
+      HeapObject::RawField(object, JSDataView::kSizeWithInternalFields));
+  return JSDataView::kSizeWithInternalFields;
+}
+
+
+template <typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::Initialize() {
+  table_.Register(kVisitShortcutCandidate,
+                  &FixedBodyVisitor<StaticVisitor, ConsString::BodyDescriptor,
+                                    void>::Visit);
+
+  table_.Register(kVisitConsString,
+                  &FixedBodyVisitor<StaticVisitor, ConsString::BodyDescriptor,
+                                    void>::Visit);
+
+  table_.Register(kVisitSlicedString,
+                  &FixedBodyVisitor<StaticVisitor, SlicedString::BodyDescriptor,
+                                    void>::Visit);
+
+  table_.Register(
+      kVisitSymbol,
+      &FixedBodyVisitor<StaticVisitor, Symbol::BodyDescriptor, void>::Visit);
+
+  table_.Register(kVisitFixedArray, &FixedArrayVisitor::Visit);
+
+  table_.Register(kVisitFixedDoubleArray, &DataObjectVisitor::Visit);
+
+  table_.Register(kVisitFixedTypedArray, &DataObjectVisitor::Visit);
+
+  table_.Register(kVisitFixedFloat64Array, &DataObjectVisitor::Visit);
+
+  table_.Register(kVisitConstantPoolArray, &VisitConstantPoolArray);
+
+  table_.Register(kVisitNativeContext, &VisitNativeContext);
+
+  table_.Register(kVisitAllocationSite, &VisitAllocationSite);
+
+  table_.Register(kVisitByteArray, &DataObjectVisitor::Visit);
+
+  table_.Register(kVisitFreeSpace, &DataObjectVisitor::Visit);
+
+  table_.Register(kVisitSeqOneByteString, &DataObjectVisitor::Visit);
+
+  table_.Register(kVisitSeqTwoByteString, &DataObjectVisitor::Visit);
+
+  table_.Register(kVisitJSWeakCollection, &VisitWeakCollection);
+
+  table_.Register(
+      kVisitOddball,
+      &FixedBodyVisitor<StaticVisitor, Oddball::BodyDescriptor, void>::Visit);
+
+  table_.Register(kVisitMap, &VisitMap);
+
+  table_.Register(kVisitCode, &VisitCode);
+
+  table_.Register(kVisitSharedFunctionInfo, &VisitSharedFunctionInfo);
+
+  table_.Register(kVisitJSFunction, &VisitJSFunction);
+
+  table_.Register(kVisitJSArrayBuffer, &VisitJSArrayBuffer);
+
+  table_.Register(kVisitJSTypedArray, &VisitJSTypedArray);
+
+  table_.Register(kVisitJSDataView, &VisitJSDataView);
+
+  // Registration for kVisitJSRegExp is done by StaticVisitor.
+
+  table_.Register(
+      kVisitCell,
+      &FixedBodyVisitor<StaticVisitor, Cell::BodyDescriptor, void>::Visit);
+
+  table_.Register(kVisitPropertyCell, &VisitPropertyCell);
+
+  table_.template RegisterSpecializations<DataObjectVisitor, kVisitDataObject,
+                                          kVisitDataObjectGeneric>();
+
+  table_.template RegisterSpecializations<JSObjectVisitor, kVisitJSObject,
+                                          kVisitJSObjectGeneric>();
+
+  table_.template RegisterSpecializations<StructObjectVisitor, kVisitStruct,
+                                          kVisitStructGeneric>();
+}
+
+
+template <typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitCodeEntry(
+    Heap* heap, Address entry_address) {
+  Code* code = Code::cast(Code::GetObjectFromEntryAddress(entry_address));
+  heap->mark_compact_collector()->RecordCodeEntrySlot(entry_address, code);
+  StaticVisitor::MarkObject(heap, code);
+}
+
+
+template <typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitEmbeddedPointer(
+    Heap* heap, RelocInfo* rinfo) {
+  DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
+  HeapObject* object = HeapObject::cast(rinfo->target_object());
+  heap->mark_compact_collector()->RecordRelocSlot(rinfo, object);
+  // TODO(ulan): It could be better to record slots only for strongly embedded
+  // objects here and record slots for weakly embedded object during clearing
+  // of non-live references in mark-compact.
+  if (!rinfo->host()->IsWeakObject(object)) {
+    StaticVisitor::MarkObject(heap, object);
+  }
+}
+
+
+template <typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitCell(Heap* heap,
+                                                    RelocInfo* rinfo) {
+  DCHECK(rinfo->rmode() == RelocInfo::CELL);
+  Cell* cell = rinfo->target_cell();
+  // No need to record slots because the cell space is not compacted during GC.
+  if (!rinfo->host()->IsWeakObject(cell)) {
+    StaticVisitor::MarkObject(heap, cell);
+  }
+}
+
+
+template <typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitDebugTarget(Heap* heap,
+                                                           RelocInfo* rinfo) {
+  DCHECK((RelocInfo::IsJSReturn(rinfo->rmode()) &&
+          rinfo->IsPatchedReturnSequence()) ||
+         (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
+          rinfo->IsPatchedDebugBreakSlotSequence()));
+  Code* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
+  heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
+  StaticVisitor::MarkObject(heap, target);
+}
+
+
+template <typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitCodeTarget(Heap* heap,
+                                                          RelocInfo* rinfo) {
+  DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
+  Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+  // Monomorphic ICs are preserved when possible, but need to be flushed
+  // when they might be keeping a Context alive, or when the heap is about
+  // to be serialized.
+  if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub() &&
+      (target->ic_state() == MEGAMORPHIC || target->ic_state() == GENERIC ||
+       target->ic_state() == POLYMORPHIC ||
+       (heap->flush_monomorphic_ics() && !target->is_weak_stub()) ||
+       heap->isolate()->serializer_enabled() ||
+       target->ic_age() != heap->global_ic_age() ||
+       target->is_invalidated_weak_stub())) {
+    ICUtility::Clear(heap->isolate(), rinfo->pc(),
+                     rinfo->host()->constant_pool());
+    target = Code::GetCodeFromTargetAddress(rinfo->target_address());
+  }
+  heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
+  StaticVisitor::MarkObject(heap, target);
+}
+
+
+template <typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitCodeAgeSequence(
+    Heap* heap, RelocInfo* rinfo) {
+  DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
+  Code* target = rinfo->code_age_stub();
+  DCHECK(target != NULL);
+  heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
+  StaticVisitor::MarkObject(heap, target);
+}
+
+
+template <typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitNativeContext(
+    Map* map, HeapObject* object) {
+  FixedBodyVisitor<StaticVisitor, Context::MarkCompactBodyDescriptor,
+                   void>::Visit(map, object);
+
+  MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
+  for (int idx = Context::FIRST_WEAK_SLOT; idx < Context::NATIVE_CONTEXT_SLOTS;
+       ++idx) {
+    Object** slot = Context::cast(object)->RawFieldOfElementAt(idx);
+    collector->RecordSlot(slot, slot, *slot);
+  }
+}
+
+
+template <typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitMap(Map* map,
+                                                   HeapObject* object) {
+  Heap* heap = map->GetHeap();
+  Map* map_object = Map::cast(object);
+
+  // Clears the cache of ICs related to this map.
+  if (FLAG_cleanup_code_caches_at_gc) {
+    map_object->ClearCodeCache(heap);
+  }
+
+  // When map collection is enabled we have to mark through map's transitions
+  // and back pointers in a special way to make these links weak.
+  if (FLAG_collect_maps && map_object->CanTransition()) {
+    MarkMapContents(heap, map_object);
+  } else {
+    StaticVisitor::VisitPointers(
+        heap, HeapObject::RawField(object, Map::kPointerFieldsBeginOffset),
+        HeapObject::RawField(object, Map::kPointerFieldsEndOffset));
+  }
+}
+
+
+template <typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitPropertyCell(
+    Map* map, HeapObject* object) {
+  Heap* heap = map->GetHeap();
+
+  Object** slot =
+      HeapObject::RawField(object, PropertyCell::kDependentCodeOffset);
+  if (FLAG_collect_maps) {
+    // Mark property cell dependent codes array but do not push it onto marking
+    // stack, this will make references from it weak. We will clean dead
+    // codes when we iterate over property cells in ClearNonLiveReferences.
+    HeapObject* obj = HeapObject::cast(*slot);
+    heap->mark_compact_collector()->RecordSlot(slot, slot, obj);
+    StaticVisitor::MarkObjectWithoutPush(heap, obj);
+  } else {
+    StaticVisitor::VisitPointer(heap, slot);
+  }
+
+  StaticVisitor::VisitPointers(
+      heap,
+      HeapObject::RawField(object, PropertyCell::kPointerFieldsBeginOffset),
+      HeapObject::RawField(object, PropertyCell::kPointerFieldsEndOffset));
+}
+
+
+template <typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitAllocationSite(
+    Map* map, HeapObject* object) {
+  Heap* heap = map->GetHeap();
+
+  Object** slot =
+      HeapObject::RawField(object, AllocationSite::kDependentCodeOffset);
+  if (FLAG_collect_maps) {
+    // Mark allocation site dependent codes array but do not push it onto
+    // marking stack, this will make references from it weak. We will clean
+    // dead codes when we iterate over allocation sites in
+    // ClearNonLiveReferences.
+    HeapObject* obj = HeapObject::cast(*slot);
+    heap->mark_compact_collector()->RecordSlot(slot, slot, obj);
+    StaticVisitor::MarkObjectWithoutPush(heap, obj);
+  } else {
+    StaticVisitor::VisitPointer(heap, slot);
+  }
+
+  StaticVisitor::VisitPointers(
+      heap,
+      HeapObject::RawField(object, AllocationSite::kPointerFieldsBeginOffset),
+      HeapObject::RawField(object, AllocationSite::kPointerFieldsEndOffset));
+}
+
+
+template <typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitWeakCollection(
+    Map* map, HeapObject* object) {
+  Heap* heap = map->GetHeap();
+  JSWeakCollection* weak_collection =
+      reinterpret_cast<JSWeakCollection*>(object);
+
+  // Enqueue weak collection in linked list of encountered weak collections.
+  if (weak_collection->next() == heap->undefined_value()) {
+    weak_collection->set_next(heap->encountered_weak_collections());
+    heap->set_encountered_weak_collections(weak_collection);
+  }
+
+  // Skip visiting the backing hash table containing the mappings and the
+  // pointer to the other enqueued weak collections, both are post-processed.
+  StaticVisitor::VisitPointers(
+      heap, HeapObject::RawField(object, JSWeakCollection::kPropertiesOffset),
+      HeapObject::RawField(object, JSWeakCollection::kTableOffset));
+  STATIC_ASSERT(JSWeakCollection::kTableOffset + kPointerSize ==
+                JSWeakCollection::kNextOffset);
+  STATIC_ASSERT(JSWeakCollection::kNextOffset + kPointerSize ==
+                JSWeakCollection::kSize);
+
+  // Partially initialized weak collection is enqueued, but table is ignored.
+  if (!weak_collection->table()->IsHashTable()) return;
+
+  // Mark the backing hash table without pushing it on the marking stack.
+  Object** slot = HeapObject::RawField(object, JSWeakCollection::kTableOffset);
+  HeapObject* obj = HeapObject::cast(*slot);
+  heap->mark_compact_collector()->RecordSlot(slot, slot, obj);
+  StaticVisitor::MarkObjectWithoutPush(heap, obj);
+}
+
+
+template <typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitCode(Map* map,
+                                                    HeapObject* object) {
+  Heap* heap = map->GetHeap();
+  Code* code = Code::cast(object);
+  if (FLAG_age_code && !heap->isolate()->serializer_enabled()) {
+    code->MakeOlder(heap->mark_compact_collector()->marking_parity());
+  }
+  code->CodeIterateBody<StaticVisitor>(heap);
+}
+
+
+template <typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfo(
+    Map* map, HeapObject* object) {
+  Heap* heap = map->GetHeap();
+  SharedFunctionInfo* shared = SharedFunctionInfo::cast(object);
+  if (shared->ic_age() != heap->global_ic_age()) {
+    shared->ResetForNewContext(heap->global_ic_age());
+  }
+  if (FLAG_cleanup_code_caches_at_gc) {
+    shared->ClearTypeFeedbackInfo();
+  }
+  if (FLAG_cache_optimized_code && FLAG_flush_optimized_code_cache &&
+      !shared->optimized_code_map()->IsSmi()) {
+    // Always flush the optimized code map if requested by flag.
+    shared->ClearOptimizedCodeMap();
+  }
+  MarkCompactCollector* collector = heap->mark_compact_collector();
+  if (collector->is_code_flushing_enabled()) {
+    if (FLAG_cache_optimized_code && !shared->optimized_code_map()->IsSmi()) {
+      // Add the shared function info holding an optimized code map to
+      // the code flusher for processing of code maps after marking.
+      collector->code_flusher()->AddOptimizedCodeMap(shared);
+      // Treat all references within the code map weakly by marking the
+      // code map itself but not pushing it onto the marking deque.
+      FixedArray* code_map = FixedArray::cast(shared->optimized_code_map());
+      StaticVisitor::MarkObjectWithoutPush(heap, code_map);
+    }
+    if (IsFlushable(heap, shared)) {
+      // This function's code looks flushable. But we have to postpone
+      // the decision until we see all functions that point to the same
+      // SharedFunctionInfo because some of them might be optimized.
+      // That would also make the non-optimized version of the code
+      // non-flushable, because it is required for bailing out from
+      // optimized code.
+      collector->code_flusher()->AddCandidate(shared);
+      // Treat the reference to the code object weakly.
+      VisitSharedFunctionInfoWeakCode(heap, object);
+      return;
+    }
+  } else {
+    if (FLAG_cache_optimized_code && !shared->optimized_code_map()->IsSmi()) {
+      // Flush optimized code map on major GCs without code flushing,
+      // needed because cached code doesn't contain breakpoints.
+      shared->ClearOptimizedCodeMap();
+    }
+  }
+  VisitSharedFunctionInfoStrongCode(heap, object);
+}
+
+
+template <typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitConstantPoolArray(
+    Map* map, HeapObject* object) {
+  Heap* heap = map->GetHeap();
+  ConstantPoolArray* array = ConstantPoolArray::cast(object);
+  ConstantPoolArray::Iterator code_iter(array, ConstantPoolArray::CODE_PTR);
+  while (!code_iter.is_finished()) {
+    Address code_entry = reinterpret_cast<Address>(
+        array->RawFieldOfElementAt(code_iter.next_index()));
+    StaticVisitor::VisitCodeEntry(heap, code_entry);
+  }
+
+  ConstantPoolArray::Iterator heap_iter(array, ConstantPoolArray::HEAP_PTR);
+  while (!heap_iter.is_finished()) {
+    Object** slot = array->RawFieldOfElementAt(heap_iter.next_index());
+    HeapObject* object = HeapObject::cast(*slot);
+    heap->mark_compact_collector()->RecordSlot(slot, slot, object);
+    bool is_weak_object =
+        (array->get_weak_object_state() ==
+             ConstantPoolArray::WEAK_OBJECTS_IN_OPTIMIZED_CODE &&
+         Code::IsWeakObjectInOptimizedCode(object)) ||
+        (array->get_weak_object_state() ==
+             ConstantPoolArray::WEAK_OBJECTS_IN_IC &&
+         Code::IsWeakObjectInIC(object));
+    if (!is_weak_object) {
+      StaticVisitor::MarkObject(heap, object);
+    }
+  }
+}
+
+
+template <typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitJSFunction(Map* map,
+                                                          HeapObject* object) {
+  Heap* heap = map->GetHeap();
+  JSFunction* function = JSFunction::cast(object);
+  MarkCompactCollector* collector = heap->mark_compact_collector();
+  if (collector->is_code_flushing_enabled()) {
+    if (IsFlushable(heap, function)) {
+      // This function's code looks flushable. But we have to postpone
+      // the decision until we see all functions that point to the same
+      // SharedFunctionInfo because some of them might be optimized.
+      // That would also make the non-optimized version of the code
+      // non-flushable, because it is required for bailing out from
+      // optimized code.
+      collector->code_flusher()->AddCandidate(function);
+      // Visit shared function info immediately to avoid double checking
+      // of its flushability later. This is just an optimization because
+      // the shared function info would eventually be visited.
+      SharedFunctionInfo* shared = function->shared();
+      if (StaticVisitor::MarkObjectWithoutPush(heap, shared)) {
+        StaticVisitor::MarkObject(heap, shared->map());
+        VisitSharedFunctionInfoWeakCode(heap, shared);
+      }
+      // Treat the reference to the code object weakly.
+      VisitJSFunctionWeakCode(heap, object);
+      return;
+    } else {
+      // Visit all unoptimized code objects to prevent flushing them.
+      StaticVisitor::MarkObject(heap, function->shared()->code());
+      if (function->code()->kind() == Code::OPTIMIZED_FUNCTION) {
+        MarkInlinedFunctionsCode(heap, function->code());
+      }
+    }
+  }
+  VisitJSFunctionStrongCode(heap, object);
+}
+
+
+template <typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitJSRegExp(Map* map,
+                                                        HeapObject* object) {
+  int last_property_offset =
+      JSRegExp::kSize + kPointerSize * map->inobject_properties();
+  StaticVisitor::VisitPointers(
+      map->GetHeap(), HeapObject::RawField(object, JSRegExp::kPropertiesOffset),
+      HeapObject::RawField(object, last_property_offset));
+}
+
+
+template <typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitJSArrayBuffer(
+    Map* map, HeapObject* object) {
+  Heap* heap = map->GetHeap();
+
+  STATIC_ASSERT(JSArrayBuffer::kWeakFirstViewOffset ==
+                JSArrayBuffer::kWeakNextOffset + kPointerSize);
+  StaticVisitor::VisitPointers(
+      heap,
+      HeapObject::RawField(object, JSArrayBuffer::BodyDescriptor::kStartOffset),
+      HeapObject::RawField(object, JSArrayBuffer::kWeakNextOffset));
+  StaticVisitor::VisitPointers(
+      heap, HeapObject::RawField(
+                object, JSArrayBuffer::kWeakNextOffset + 2 * kPointerSize),
+      HeapObject::RawField(object, JSArrayBuffer::kSizeWithInternalFields));
+}
+
+
+template <typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitJSTypedArray(
+    Map* map, HeapObject* object) {
+  StaticVisitor::VisitPointers(
+      map->GetHeap(),
+      HeapObject::RawField(object, JSTypedArray::BodyDescriptor::kStartOffset),
+      HeapObject::RawField(object, JSTypedArray::kWeakNextOffset));
+  StaticVisitor::VisitPointers(
+      map->GetHeap(), HeapObject::RawField(
+                          object, JSTypedArray::kWeakNextOffset + kPointerSize),
+      HeapObject::RawField(object, JSTypedArray::kSizeWithInternalFields));
+}
+
+
+template <typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitJSDataView(Map* map,
+                                                          HeapObject* object) {
+  StaticVisitor::VisitPointers(
+      map->GetHeap(),
+      HeapObject::RawField(object, JSDataView::BodyDescriptor::kStartOffset),
+      HeapObject::RawField(object, JSDataView::kWeakNextOffset));
+  StaticVisitor::VisitPointers(
+      map->GetHeap(),
+      HeapObject::RawField(object, JSDataView::kWeakNextOffset + kPointerSize),
+      HeapObject::RawField(object, JSDataView::kSizeWithInternalFields));
+}
+
+
+template <typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::MarkMapContents(Heap* heap,
+                                                          Map* map) {
+  // Make sure that the back pointer stored either in the map itself or
+  // inside its transitions array is marked. Skip recording the back
+  // pointer slot since map space is not compacted.
+  StaticVisitor::MarkObject(heap, HeapObject::cast(map->GetBackPointer()));
+
+  // Treat pointers in the transitions array as weak and also mark that
+  // array to prevent visiting it later. Skip recording the transition
+  // array slot, since it will be implicitly recorded when the pointer
+  // fields of this map are visited.
+  if (map->HasTransitionArray()) {
+    TransitionArray* transitions = map->transitions();
+    MarkTransitionArray(heap, transitions);
+  }
+
+  // Since descriptor arrays are potentially shared, ensure that only the
+  // descriptors that belong to this map are marked. The first time a
+  // non-empty descriptor array is marked, its header is also visited. The slot
+  // holding the descriptor array will be implicitly recorded when the pointer
+  // fields of this map are visited.
+  DescriptorArray* descriptors = map->instance_descriptors();
+  if (StaticVisitor::MarkObjectWithoutPush(heap, descriptors) &&
+      descriptors->length() > 0) {
+    StaticVisitor::VisitPointers(heap, descriptors->GetFirstElementAddress(),
+                                 descriptors->GetDescriptorEndSlot(0));
+  }
+  int start = 0;
+  int end = map->NumberOfOwnDescriptors();
+  if (start < end) {
+    StaticVisitor::VisitPointers(heap,
+                                 descriptors->GetDescriptorStartSlot(start),
+                                 descriptors->GetDescriptorEndSlot(end));
+  }
+
+  // Mark prototype dependent codes array but do not push it onto marking
+  // stack, this will make references from it weak. We will clean dead
+  // codes when we iterate over maps in ClearNonLiveTransitions.
+  Object** slot = HeapObject::RawField(map, Map::kDependentCodeOffset);
+  HeapObject* obj = HeapObject::cast(*slot);
+  heap->mark_compact_collector()->RecordSlot(slot, slot, obj);
+  StaticVisitor::MarkObjectWithoutPush(heap, obj);
+
+  // Mark the pointer fields of the Map. Since the transitions array has
+  // been marked already, it is fine that one of these fields contains a
+  // pointer to it.
+  StaticVisitor::VisitPointers(
+      heap, HeapObject::RawField(map, Map::kPointerFieldsBeginOffset),
+      HeapObject::RawField(map, Map::kPointerFieldsEndOffset));
+}
+
+
+template <typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::MarkTransitionArray(
+    Heap* heap, TransitionArray* transitions) {
+  if (!StaticVisitor::MarkObjectWithoutPush(heap, transitions)) return;
+
+  // Simple transitions do not have keys nor prototype transitions.
+  if (transitions->IsSimpleTransition()) return;
+
+  if (transitions->HasPrototypeTransitions()) {
+    // Mark prototype transitions array but do not push it onto marking
+    // stack, this will make references from it weak. We will clean dead
+    // prototype transitions in ClearNonLiveTransitions.
+    Object** slot = transitions->GetPrototypeTransitionsSlot();
+    HeapObject* obj = HeapObject::cast(*slot);
+    heap->mark_compact_collector()->RecordSlot(slot, slot, obj);
+    StaticVisitor::MarkObjectWithoutPush(heap, obj);
+  }
+
+  for (int i = 0; i < transitions->number_of_transitions(); ++i) {
+    StaticVisitor::VisitPointer(heap, transitions->GetKeySlot(i));
+  }
+}
+
+
+template <typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::MarkInlinedFunctionsCode(Heap* heap,
+                                                                   Code* code) {
+  // Skip in absence of inlining.
+  // TODO(turbofan): Revisit once we support inlining.
+  if (code->is_turbofanned()) return;
+  // For optimized functions we should retain both non-optimized version
+  // of its code and non-optimized version of all inlined functions.
+  // This is required to support bailing out from inlined code.
+  DeoptimizationInputData* data =
+      DeoptimizationInputData::cast(code->deoptimization_data());
+  FixedArray* literals = data->LiteralArray();
+  for (int i = 0, count = data->InlinedFunctionCount()->value(); i < count;
+       i++) {
+    JSFunction* inlined = JSFunction::cast(literals->get(i));
+    StaticVisitor::MarkObject(heap, inlined->shared()->code());
+  }
+}
+
+
+inline static bool IsValidNonBuiltinContext(Object* context) {
+  return context->IsContext() &&
+         !Context::cast(context)->global_object()->IsJSBuiltinsObject();
+}
+
+
+inline static bool HasSourceCode(Heap* heap, SharedFunctionInfo* info) {
+  Object* undefined = heap->undefined_value();
+  return (info->script() != undefined) &&
+         (reinterpret_cast<Script*>(info->script())->source() != undefined);
+}
+
+
+template <typename StaticVisitor>
+bool StaticMarkingVisitor<StaticVisitor>::IsFlushable(Heap* heap,
+                                                      JSFunction* function) {
+  SharedFunctionInfo* shared_info = function->shared();
+
+  // Code is either on stack, in compilation cache or referenced
+  // by optimized version of function.
+  MarkBit code_mark = Marking::MarkBitFrom(function->code());
+  if (code_mark.Get()) {
+    return false;
+  }
+
+  // The function must have a valid context and not be a builtin.
+  if (!IsValidNonBuiltinContext(function->context())) {
+    return false;
+  }
+
+  // We do not (yet) flush code for optimized functions.
+  if (function->code() != shared_info->code()) {
+    return false;
+  }
+
+  // Check age of optimized code.
+  if (FLAG_age_code && !function->code()->IsOld()) {
+    return false;
+  }
+
+  return IsFlushable(heap, shared_info);
+}
+
+
+template <typename StaticVisitor>
+bool StaticMarkingVisitor<StaticVisitor>::IsFlushable(
+    Heap* heap, SharedFunctionInfo* shared_info) {
+  // Code is either on stack, in compilation cache or referenced
+  // by optimized version of function.
+  MarkBit code_mark = Marking::MarkBitFrom(shared_info->code());
+  if (code_mark.Get()) {
+    return false;
+  }
+
+  // The function must be compiled and have the source code available,
+  // to be able to recompile it in case we need the function again.
+  if (!(shared_info->is_compiled() && HasSourceCode(heap, shared_info))) {
+    return false;
+  }
+
+  // We never flush code for API functions.
+  Object* function_data = shared_info->function_data();
+  if (function_data->IsFunctionTemplateInfo()) {
+    return false;
+  }
+
+  // Only flush code for functions.
+  if (shared_info->code()->kind() != Code::FUNCTION) {
+    return false;
+  }
+
+  // Function must be lazy compilable.
+  if (!shared_info->allows_lazy_compilation()) {
+    return false;
+  }
+
+  // We do not (yet?) flush code for generator functions, because we don't know
+  // if there are still live activations (generator objects) on the heap.
+  if (shared_info->is_generator()) {
+    return false;
+  }
+
+  // If this is a full script wrapped in a function we do not flush the code.
+  if (shared_info->is_toplevel()) {
+    return false;
+  }
+
+  // If this is a function initialized with %SetCode then the one-to-one
+  // relation between SharedFunctionInfo and Code is broken.
+  if (shared_info->dont_flush()) {
+    return false;
+  }
+
+  // Check age of code. If code aging is disabled we never flush.
+  if (!FLAG_age_code || !shared_info->code()->IsOld()) {
+    return false;
+  }
+
+  return true;
+}
+
+
+template <typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfoStrongCode(
+    Heap* heap, HeapObject* object) {
+  Object** start_slot = HeapObject::RawField(
+      object, SharedFunctionInfo::BodyDescriptor::kStartOffset);
+  Object** end_slot = HeapObject::RawField(
+      object, SharedFunctionInfo::BodyDescriptor::kEndOffset);
+  StaticVisitor::VisitPointers(heap, start_slot, end_slot);
+}
+
+
+template <typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfoWeakCode(
+    Heap* heap, HeapObject* object) {
+  Object** name_slot =
+      HeapObject::RawField(object, SharedFunctionInfo::kNameOffset);
+  StaticVisitor::VisitPointer(heap, name_slot);
+
+  // Skip visiting kCodeOffset as it is treated weakly here.
+  STATIC_ASSERT(SharedFunctionInfo::kNameOffset + kPointerSize ==
+                SharedFunctionInfo::kCodeOffset);
+  STATIC_ASSERT(SharedFunctionInfo::kCodeOffset + kPointerSize ==
+                SharedFunctionInfo::kOptimizedCodeMapOffset);
+
+  Object** start_slot =
+      HeapObject::RawField(object, SharedFunctionInfo::kOptimizedCodeMapOffset);
+  Object** end_slot = HeapObject::RawField(
+      object, SharedFunctionInfo::BodyDescriptor::kEndOffset);
+  StaticVisitor::VisitPointers(heap, start_slot, end_slot);
+}
+
+
+template <typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitJSFunctionStrongCode(
+    Heap* heap, HeapObject* object) {
+  Object** start_slot =
+      HeapObject::RawField(object, JSFunction::kPropertiesOffset);
+  Object** end_slot =
+      HeapObject::RawField(object, JSFunction::kCodeEntryOffset);
+  StaticVisitor::VisitPointers(heap, start_slot, end_slot);
+
+  VisitCodeEntry(heap, object->address() + JSFunction::kCodeEntryOffset);
+  STATIC_ASSERT(JSFunction::kCodeEntryOffset + kPointerSize ==
+                JSFunction::kPrototypeOrInitialMapOffset);
+
+  start_slot =
+      HeapObject::RawField(object, JSFunction::kPrototypeOrInitialMapOffset);
+  end_slot = HeapObject::RawField(object, JSFunction::kNonWeakFieldsEndOffset);
+  StaticVisitor::VisitPointers(heap, start_slot, end_slot);
+}
+
+
+template <typename StaticVisitor>
+void StaticMarkingVisitor<StaticVisitor>::VisitJSFunctionWeakCode(
+    Heap* heap, HeapObject* object) {
+  Object** start_slot =
+      HeapObject::RawField(object, JSFunction::kPropertiesOffset);
+  Object** end_slot =
+      HeapObject::RawField(object, JSFunction::kCodeEntryOffset);
+  StaticVisitor::VisitPointers(heap, start_slot, end_slot);
+
+  // Skip visiting kCodeEntryOffset as it is treated weakly here.
+  STATIC_ASSERT(JSFunction::kCodeEntryOffset + kPointerSize ==
+                JSFunction::kPrototypeOrInitialMapOffset);
+
+  start_slot =
+      HeapObject::RawField(object, JSFunction::kPrototypeOrInitialMapOffset);
+  end_slot = HeapObject::RawField(object, JSFunction::kNonWeakFieldsEndOffset);
+  StaticVisitor::VisitPointers(heap, start_slot, end_slot);
+}
+
+
+void Code::CodeIterateBody(ObjectVisitor* v) {
+  int mode_mask = RelocInfo::kCodeTargetMask |
+                  RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+                  RelocInfo::ModeMask(RelocInfo::CELL) |
+                  RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
+                  RelocInfo::ModeMask(RelocInfo::JS_RETURN) |
+                  RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
+                  RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
+
+  // There are two places where we iterate code bodies: here and the
+  // templated CodeIterateBody (below). They should be kept in sync.
+  IteratePointer(v, kRelocationInfoOffset);
+  IteratePointer(v, kHandlerTableOffset);
+  IteratePointer(v, kDeoptimizationDataOffset);
+  IteratePointer(v, kTypeFeedbackInfoOffset);
+  IterateNextCodeLink(v, kNextCodeLinkOffset);
+  IteratePointer(v, kConstantPoolOffset);
+
+  RelocIterator it(this, mode_mask);
+  Isolate* isolate = this->GetIsolate();
+  for (; !it.done(); it.next()) {
+    it.rinfo()->Visit(isolate, v);
+  }
+}
+
+
+template <typename StaticVisitor>
+void Code::CodeIterateBody(Heap* heap) {
+  int mode_mask = RelocInfo::kCodeTargetMask |
+                  RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
+                  RelocInfo::ModeMask(RelocInfo::CELL) |
+                  RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
+                  RelocInfo::ModeMask(RelocInfo::JS_RETURN) |
+                  RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
+                  RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
+
+  // There are two places where we iterate code bodies: here and the non-
+  // templated CodeIterateBody (above). They should be kept in sync.
+  StaticVisitor::VisitPointer(
+      heap,
+      reinterpret_cast<Object**>(this->address() + kRelocationInfoOffset));
+  StaticVisitor::VisitPointer(
+      heap, reinterpret_cast<Object**>(this->address() + kHandlerTableOffset));
+  StaticVisitor::VisitPointer(
+      heap,
+      reinterpret_cast<Object**>(this->address() + kDeoptimizationDataOffset));
+  StaticVisitor::VisitPointer(
+      heap,
+      reinterpret_cast<Object**>(this->address() + kTypeFeedbackInfoOffset));
+  StaticVisitor::VisitNextCodeLink(
+      heap, reinterpret_cast<Object**>(this->address() + kNextCodeLinkOffset));
+  StaticVisitor::VisitPointer(
+      heap, reinterpret_cast<Object**>(this->address() + kConstantPoolOffset));
+
+
+  RelocIterator it(this, mode_mask);
+  for (; !it.done(); it.next()) {
+    it.rinfo()->template Visit<StaticVisitor>(heap);
+  }
+}
+}
+}  // namespace v8::internal
+
+#endif  // V8_OBJECTS_VISITING_INL_H_
diff --git a/src/heap/objects-visiting.cc b/src/heap/objects-visiting.cc
new file mode 100644
index 0000000..a0fc231
--- /dev/null
+++ b/src/heap/objects-visiting.cc
@@ -0,0 +1,413 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/heap/objects-visiting.h"
+
+namespace v8 {
+namespace internal {
+
+
+StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
+    int instance_type, int instance_size) {
+  if (instance_type < FIRST_NONSTRING_TYPE) {
+    switch (instance_type & kStringRepresentationMask) {
+      case kSeqStringTag:
+        if ((instance_type & kStringEncodingMask) == kOneByteStringTag) {
+          return kVisitSeqOneByteString;
+        } else {
+          return kVisitSeqTwoByteString;
+        }
+
+      case kConsStringTag:
+        if (IsShortcutCandidate(instance_type)) {
+          return kVisitShortcutCandidate;
+        } else {
+          return kVisitConsString;
+        }
+
+      case kSlicedStringTag:
+        return kVisitSlicedString;
+
+      case kExternalStringTag:
+        return GetVisitorIdForSize(kVisitDataObject, kVisitDataObjectGeneric,
+                                   instance_size);
+    }
+    UNREACHABLE();
+  }
+
+  switch (instance_type) {
+    case BYTE_ARRAY_TYPE:
+      return kVisitByteArray;
+
+    case FREE_SPACE_TYPE:
+      return kVisitFreeSpace;
+
+    case FIXED_ARRAY_TYPE:
+      return kVisitFixedArray;
+
+    case FIXED_DOUBLE_ARRAY_TYPE:
+      return kVisitFixedDoubleArray;
+
+    case CONSTANT_POOL_ARRAY_TYPE:
+      return kVisitConstantPoolArray;
+
+    case ODDBALL_TYPE:
+      return kVisitOddball;
+
+    case MAP_TYPE:
+      return kVisitMap;
+
+    case CODE_TYPE:
+      return kVisitCode;
+
+    case CELL_TYPE:
+      return kVisitCell;
+
+    case PROPERTY_CELL_TYPE:
+      return kVisitPropertyCell;
+
+    case JS_SET_TYPE:
+      return GetVisitorIdForSize(kVisitStruct, kVisitStructGeneric,
+                                 JSSet::kSize);
+
+    case JS_MAP_TYPE:
+      return GetVisitorIdForSize(kVisitStruct, kVisitStructGeneric,
+                                 JSMap::kSize);
+
+    case JS_WEAK_MAP_TYPE:
+    case JS_WEAK_SET_TYPE:
+      return kVisitJSWeakCollection;
+
+    case JS_REGEXP_TYPE:
+      return kVisitJSRegExp;
+
+    case SHARED_FUNCTION_INFO_TYPE:
+      return kVisitSharedFunctionInfo;
+
+    case JS_PROXY_TYPE:
+      return GetVisitorIdForSize(kVisitStruct, kVisitStructGeneric,
+                                 JSProxy::kSize);
+
+    case JS_FUNCTION_PROXY_TYPE:
+      return GetVisitorIdForSize(kVisitStruct, kVisitStructGeneric,
+                                 JSFunctionProxy::kSize);
+
+    case FOREIGN_TYPE:
+      return GetVisitorIdForSize(kVisitDataObject, kVisitDataObjectGeneric,
+                                 Foreign::kSize);
+
+    case SYMBOL_TYPE:
+      return kVisitSymbol;
+
+    case FILLER_TYPE:
+      return kVisitDataObjectGeneric;
+
+    case JS_ARRAY_BUFFER_TYPE:
+      return kVisitJSArrayBuffer;
+
+    case JS_TYPED_ARRAY_TYPE:
+      return kVisitJSTypedArray;
+
+    case JS_DATA_VIEW_TYPE:
+      return kVisitJSDataView;
+
+    case JS_OBJECT_TYPE:
+    case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
+    case JS_GENERATOR_OBJECT_TYPE:
+    case JS_MODULE_TYPE:
+    case JS_VALUE_TYPE:
+    case JS_DATE_TYPE:
+    case JS_ARRAY_TYPE:
+    case JS_GLOBAL_PROXY_TYPE:
+    case JS_GLOBAL_OBJECT_TYPE:
+    case JS_BUILTINS_OBJECT_TYPE:
+    case JS_MESSAGE_OBJECT_TYPE:
+    case JS_SET_ITERATOR_TYPE:
+    case JS_MAP_ITERATOR_TYPE:
+      return GetVisitorIdForSize(kVisitJSObject, kVisitJSObjectGeneric,
+                                 instance_size);
+
+    case JS_FUNCTION_TYPE:
+      return kVisitJSFunction;
+
+    case HEAP_NUMBER_TYPE:
+    case MUTABLE_HEAP_NUMBER_TYPE:
+#define EXTERNAL_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+  case EXTERNAL_##TYPE##_ARRAY_TYPE:
+
+      TYPED_ARRAYS(EXTERNAL_ARRAY_CASE)
+      return GetVisitorIdForSize(kVisitDataObject, kVisitDataObjectGeneric,
+                                 instance_size);
+#undef EXTERNAL_ARRAY_CASE
+
+    case FIXED_UINT8_ARRAY_TYPE:
+    case FIXED_INT8_ARRAY_TYPE:
+    case FIXED_UINT16_ARRAY_TYPE:
+    case FIXED_INT16_ARRAY_TYPE:
+    case FIXED_UINT32_ARRAY_TYPE:
+    case FIXED_INT32_ARRAY_TYPE:
+    case FIXED_FLOAT32_ARRAY_TYPE:
+    case FIXED_UINT8_CLAMPED_ARRAY_TYPE:
+      return kVisitFixedTypedArray;
+
+    case FIXED_FLOAT64_ARRAY_TYPE:
+      return kVisitFixedFloat64Array;
+
+#define MAKE_STRUCT_CASE(NAME, Name, name) case NAME##_TYPE:
+      STRUCT_LIST(MAKE_STRUCT_CASE)
+#undef MAKE_STRUCT_CASE
+      if (instance_type == ALLOCATION_SITE_TYPE) {
+        return kVisitAllocationSite;
+      }
+
+      return GetVisitorIdForSize(kVisitStruct, kVisitStructGeneric,
+                                 instance_size);
+
+    default:
+      UNREACHABLE();
+      return kVisitorIdCount;
+  }
+}
+
+
+// We don't record weak slots during marking or scavenges. Instead we do it
+// once when we complete mark-compact cycle.  Note that write barrier has no
+// effect if we are already in the middle of compacting mark-sweep cycle and we
+// have to record slots manually.
+static bool MustRecordSlots(Heap* heap) {
+  return heap->gc_state() == Heap::MARK_COMPACT &&
+         heap->mark_compact_collector()->is_compacting();
+}
+
+
+template <class T>
+struct WeakListVisitor;
+
+
+template <class T>
+Object* VisitWeakList(Heap* heap, Object* list, WeakObjectRetainer* retainer) {
+  Object* undefined = heap->undefined_value();
+  Object* head = undefined;
+  T* tail = NULL;
+  MarkCompactCollector* collector = heap->mark_compact_collector();
+  bool record_slots = MustRecordSlots(heap);
+  while (list != undefined) {
+    // Check whether to keep the candidate in the list.
+    T* candidate = reinterpret_cast<T*>(list);
+    Object* retained = retainer->RetainAs(list);
+    if (retained != NULL) {
+      if (head == undefined) {
+        // First element in the list.
+        head = retained;
+      } else {
+        // Subsequent elements in the list.
+        DCHECK(tail != NULL);
+        WeakListVisitor<T>::SetWeakNext(tail, retained);
+        if (record_slots) {
+          Object** next_slot =
+              HeapObject::RawField(tail, WeakListVisitor<T>::WeakNextOffset());
+          collector->RecordSlot(next_slot, next_slot, retained);
+        }
+      }
+      // Retained object is new tail.
+      DCHECK(!retained->IsUndefined());
+      candidate = reinterpret_cast<T*>(retained);
+      tail = candidate;
+
+
+      // tail is a live object, visit it.
+      WeakListVisitor<T>::VisitLiveObject(heap, tail, retainer);
+    } else {
+      WeakListVisitor<T>::VisitPhantomObject(heap, candidate);
+    }
+
+    // Move to next element in the list.
+    list = WeakListVisitor<T>::WeakNext(candidate);
+  }
+
+  // Terminate the list if there is one or more elements.
+  if (tail != NULL) {
+    WeakListVisitor<T>::SetWeakNext(tail, undefined);
+  }
+  return head;
+}
+
+
+template <class T>
+static void ClearWeakList(Heap* heap, Object* list) {
+  Object* undefined = heap->undefined_value();
+  while (list != undefined) {
+    T* candidate = reinterpret_cast<T*>(list);
+    list = WeakListVisitor<T>::WeakNext(candidate);
+    WeakListVisitor<T>::SetWeakNext(candidate, undefined);
+  }
+}
+
+
+template <>
+struct WeakListVisitor<JSFunction> {
+  static void SetWeakNext(JSFunction* function, Object* next) {
+    function->set_next_function_link(next);
+  }
+
+  static Object* WeakNext(JSFunction* function) {
+    return function->next_function_link();
+  }
+
+  static int WeakNextOffset() { return JSFunction::kNextFunctionLinkOffset; }
+
+  static void VisitLiveObject(Heap*, JSFunction*, WeakObjectRetainer*) {}
+
+  static void VisitPhantomObject(Heap*, JSFunction*) {}
+};
+
+
+template <>
+struct WeakListVisitor<Code> {
+  static void SetWeakNext(Code* code, Object* next) {
+    code->set_next_code_link(next);
+  }
+
+  static Object* WeakNext(Code* code) { return code->next_code_link(); }
+
+  static int WeakNextOffset() { return Code::kNextCodeLinkOffset; }
+
+  static void VisitLiveObject(Heap*, Code*, WeakObjectRetainer*) {}
+
+  static void VisitPhantomObject(Heap*, Code*) {}
+};
+
+
+template <>
+struct WeakListVisitor<Context> {
+  static void SetWeakNext(Context* context, Object* next) {
+    context->set(Context::NEXT_CONTEXT_LINK, next, UPDATE_WRITE_BARRIER);
+  }
+
+  static Object* WeakNext(Context* context) {
+    return context->get(Context::NEXT_CONTEXT_LINK);
+  }
+
+  static int WeakNextOffset() {
+    return FixedArray::SizeFor(Context::NEXT_CONTEXT_LINK);
+  }
+
+  static void VisitLiveObject(Heap* heap, Context* context,
+                              WeakObjectRetainer* retainer) {
+    // Process the three weak lists linked off the context.
+    DoWeakList<JSFunction>(heap, context, retainer,
+                           Context::OPTIMIZED_FUNCTIONS_LIST);
+    DoWeakList<Code>(heap, context, retainer, Context::OPTIMIZED_CODE_LIST);
+    DoWeakList<Code>(heap, context, retainer, Context::DEOPTIMIZED_CODE_LIST);
+  }
+
+  template <class T>
+  static void DoWeakList(Heap* heap, Context* context,
+                         WeakObjectRetainer* retainer, int index) {
+    // Visit the weak list, removing dead intermediate elements.
+    Object* list_head = VisitWeakList<T>(heap, context->get(index), retainer);
+
+    // Update the list head.
+    context->set(index, list_head, UPDATE_WRITE_BARRIER);
+
+    if (MustRecordSlots(heap)) {
+      // Record the updated slot if necessary.
+      Object** head_slot =
+          HeapObject::RawField(context, FixedArray::SizeFor(index));
+      heap->mark_compact_collector()->RecordSlot(head_slot, head_slot,
+                                                 list_head);
+    }
+  }
+
+  static void VisitPhantomObject(Heap* heap, Context* context) {
+    ClearWeakList<JSFunction>(heap,
+                              context->get(Context::OPTIMIZED_FUNCTIONS_LIST));
+    ClearWeakList<Code>(heap, context->get(Context::OPTIMIZED_CODE_LIST));
+    ClearWeakList<Code>(heap, context->get(Context::DEOPTIMIZED_CODE_LIST));
+  }
+};
+
+
+template <>
+struct WeakListVisitor<JSArrayBufferView> {
+  static void SetWeakNext(JSArrayBufferView* obj, Object* next) {
+    obj->set_weak_next(next);
+  }
+
+  static Object* WeakNext(JSArrayBufferView* obj) { return obj->weak_next(); }
+
+  static int WeakNextOffset() { return JSArrayBufferView::kWeakNextOffset; }
+
+  static void VisitLiveObject(Heap*, JSArrayBufferView*, WeakObjectRetainer*) {}
+
+  static void VisitPhantomObject(Heap*, JSArrayBufferView*) {}
+};
+
+
+template <>
+struct WeakListVisitor<JSArrayBuffer> {
+  static void SetWeakNext(JSArrayBuffer* obj, Object* next) {
+    obj->set_weak_next(next);
+  }
+
+  static Object* WeakNext(JSArrayBuffer* obj) { return obj->weak_next(); }
+
+  static int WeakNextOffset() { return JSArrayBuffer::kWeakNextOffset; }
+
+  static void VisitLiveObject(Heap* heap, JSArrayBuffer* array_buffer,
+                              WeakObjectRetainer* retainer) {
+    Object* typed_array_obj = VisitWeakList<JSArrayBufferView>(
+        heap, array_buffer->weak_first_view(), retainer);
+    array_buffer->set_weak_first_view(typed_array_obj);
+    if (typed_array_obj != heap->undefined_value() && MustRecordSlots(heap)) {
+      Object** slot = HeapObject::RawField(array_buffer,
+                                           JSArrayBuffer::kWeakFirstViewOffset);
+      heap->mark_compact_collector()->RecordSlot(slot, slot, typed_array_obj);
+    }
+  }
+
+  static void VisitPhantomObject(Heap* heap, JSArrayBuffer* phantom) {
+    Runtime::FreeArrayBuffer(heap->isolate(), phantom);
+  }
+};
+
+
+template <>
+struct WeakListVisitor<AllocationSite> {
+  static void SetWeakNext(AllocationSite* obj, Object* next) {
+    obj->set_weak_next(next);
+  }
+
+  static Object* WeakNext(AllocationSite* obj) { return obj->weak_next(); }
+
+  static int WeakNextOffset() { return AllocationSite::kWeakNextOffset; }
+
+  static void VisitLiveObject(Heap*, AllocationSite*, WeakObjectRetainer*) {}
+
+  static void VisitPhantomObject(Heap*, AllocationSite*) {}
+};
+
+
+template Object* VisitWeakList<Code>(Heap* heap, Object* list,
+                                     WeakObjectRetainer* retainer);
+
+
+template Object* VisitWeakList<JSFunction>(Heap* heap, Object* list,
+                                           WeakObjectRetainer* retainer);
+
+
+template Object* VisitWeakList<Context>(Heap* heap, Object* list,
+                                        WeakObjectRetainer* retainer);
+
+
+template Object* VisitWeakList<JSArrayBuffer>(Heap* heap, Object* list,
+                                              WeakObjectRetainer* retainer);
+
+
+template Object* VisitWeakList<AllocationSite>(Heap* heap, Object* list,
+                                               WeakObjectRetainer* retainer);
+}
+}  // namespace v8::internal
diff --git a/src/heap/objects-visiting.h b/src/heap/objects-visiting.h
new file mode 100644
index 0000000..919a800
--- /dev/null
+++ b/src/heap/objects-visiting.h
@@ -0,0 +1,452 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OBJECTS_VISITING_H_
+#define V8_OBJECTS_VISITING_H_
+
+#include "src/allocation.h"
+
+// This file provides base classes and auxiliary methods for defining
+// static object visitors used during GC.
+// Visiting HeapObject body with a normal ObjectVisitor requires performing
+// two switches on object's instance type to determine object size and layout
+// and one or more virtual method calls on visitor itself.
+// Static visitor is different: it provides a dispatch table which contains
+// pointers to specialized visit functions. Each map has the visitor_id
+// field which contains an index of specialized visitor to use.
+
+namespace v8 {
+namespace internal {
+
+
+// Base class for all static visitors.
+class StaticVisitorBase : public AllStatic {
+ public:
+#define VISITOR_ID_LIST(V) \
+  V(SeqOneByteString)      \
+  V(SeqTwoByteString)      \
+  V(ShortcutCandidate)     \
+  V(ByteArray)             \
+  V(FreeSpace)             \
+  V(FixedArray)            \
+  V(FixedDoubleArray)      \
+  V(FixedTypedArray)       \
+  V(FixedFloat64Array)     \
+  V(ConstantPoolArray)     \
+  V(NativeContext)         \
+  V(AllocationSite)        \
+  V(DataObject2)           \
+  V(DataObject3)           \
+  V(DataObject4)           \
+  V(DataObject5)           \
+  V(DataObject6)           \
+  V(DataObject7)           \
+  V(DataObject8)           \
+  V(DataObject9)           \
+  V(DataObjectGeneric)     \
+  V(JSObject2)             \
+  V(JSObject3)             \
+  V(JSObject4)             \
+  V(JSObject5)             \
+  V(JSObject6)             \
+  V(JSObject7)             \
+  V(JSObject8)             \
+  V(JSObject9)             \
+  V(JSObjectGeneric)       \
+  V(Struct2)               \
+  V(Struct3)               \
+  V(Struct4)               \
+  V(Struct5)               \
+  V(Struct6)               \
+  V(Struct7)               \
+  V(Struct8)               \
+  V(Struct9)               \
+  V(StructGeneric)         \
+  V(ConsString)            \
+  V(SlicedString)          \
+  V(Symbol)                \
+  V(Oddball)               \
+  V(Code)                  \
+  V(Map)                   \
+  V(Cell)                  \
+  V(PropertyCell)          \
+  V(SharedFunctionInfo)    \
+  V(JSFunction)            \
+  V(JSWeakCollection)      \
+  V(JSArrayBuffer)         \
+  V(JSTypedArray)          \
+  V(JSDataView)            \
+  V(JSRegExp)
+
+  // For data objects, JS objects and structs along with generic visitor which
+  // can visit object of any size we provide visitors specialized by
+  // object size in words.
+  // Ids of specialized visitors are declared in a linear order (without
+  // holes) starting from the id of visitor specialized for 2 words objects
+  // (base visitor id) and ending with the id of generic visitor.
+  // Method GetVisitorIdForSize depends on this ordering to calculate visitor
+  // id of specialized visitor from given instance size, base visitor id and
+  // generic visitor's id.
+  enum VisitorId {
+#define VISITOR_ID_ENUM_DECL(id) kVisit##id,
+    VISITOR_ID_LIST(VISITOR_ID_ENUM_DECL)
+#undef VISITOR_ID_ENUM_DECL
+    kVisitorIdCount,
+    kVisitDataObject = kVisitDataObject2,
+    kVisitJSObject = kVisitJSObject2,
+    kVisitStruct = kVisitStruct2,
+    kMinObjectSizeInWords = 2
+  };
+
+  // Visitor ID should fit in one byte.
+  STATIC_ASSERT(kVisitorIdCount <= 256);
+
+  // Determine which specialized visitor should be used for given instance type
+  // and instance type.
+  static VisitorId GetVisitorId(int instance_type, int instance_size);
+
+  static VisitorId GetVisitorId(Map* map) {
+    return GetVisitorId(map->instance_type(), map->instance_size());
+  }
+
+  // For visitors that allow specialization by size calculate VisitorId based
+  // on size, base visitor id and generic visitor id.
+  static VisitorId GetVisitorIdForSize(VisitorId base, VisitorId generic,
+                                       int object_size) {
+    DCHECK((base == kVisitDataObject) || (base == kVisitStruct) ||
+           (base == kVisitJSObject));
+    DCHECK(IsAligned(object_size, kPointerSize));
+    DCHECK(kMinObjectSizeInWords * kPointerSize <= object_size);
+    DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+
+    const VisitorId specialization = static_cast<VisitorId>(
+        base + (object_size >> kPointerSizeLog2) - kMinObjectSizeInWords);
+
+    return Min(specialization, generic);
+  }
+};
+
+
+template <typename Callback>
+class VisitorDispatchTable {
+ public:
+  void CopyFrom(VisitorDispatchTable* other) {
+    // We are not using memcpy to guarantee that during update
+    // every element of callbacks_ array will remain correct
+    // pointer (memcpy might be implemented as a byte copying loop).
+    for (int i = 0; i < StaticVisitorBase::kVisitorIdCount; i++) {
+      base::NoBarrier_Store(&callbacks_[i], other->callbacks_[i]);
+    }
+  }
+
+  inline Callback GetVisitorById(StaticVisitorBase::VisitorId id) {
+    return reinterpret_cast<Callback>(callbacks_[id]);
+  }
+
+  inline Callback GetVisitor(Map* map) {
+    return reinterpret_cast<Callback>(callbacks_[map->visitor_id()]);
+  }
+
+  void Register(StaticVisitorBase::VisitorId id, Callback callback) {
+    DCHECK(id < StaticVisitorBase::kVisitorIdCount);  // id is unsigned.
+    callbacks_[id] = reinterpret_cast<base::AtomicWord>(callback);
+  }
+
+  template <typename Visitor, StaticVisitorBase::VisitorId base,
+            StaticVisitorBase::VisitorId generic, int object_size_in_words>
+  void RegisterSpecialization() {
+    static const int size = object_size_in_words * kPointerSize;
+    Register(StaticVisitorBase::GetVisitorIdForSize(base, generic, size),
+             &Visitor::template VisitSpecialized<size>);
+  }
+
+
+  template <typename Visitor, StaticVisitorBase::VisitorId base,
+            StaticVisitorBase::VisitorId generic>
+  void RegisterSpecializations() {
+    STATIC_ASSERT((generic - base + StaticVisitorBase::kMinObjectSizeInWords) ==
+                  10);
+    RegisterSpecialization<Visitor, base, generic, 2>();
+    RegisterSpecialization<Visitor, base, generic, 3>();
+    RegisterSpecialization<Visitor, base, generic, 4>();
+    RegisterSpecialization<Visitor, base, generic, 5>();
+    RegisterSpecialization<Visitor, base, generic, 6>();
+    RegisterSpecialization<Visitor, base, generic, 7>();
+    RegisterSpecialization<Visitor, base, generic, 8>();
+    RegisterSpecialization<Visitor, base, generic, 9>();
+    Register(generic, &Visitor::Visit);
+  }
+
+ private:
+  base::AtomicWord callbacks_[StaticVisitorBase::kVisitorIdCount];
+};
+
+
+template <typename StaticVisitor>
+class BodyVisitorBase : public AllStatic {
+ public:
+  INLINE(static void IteratePointers(Heap* heap, HeapObject* object,
+                                     int start_offset, int end_offset)) {
+    Object** start_slot =
+        reinterpret_cast<Object**>(object->address() + start_offset);
+    Object** end_slot =
+        reinterpret_cast<Object**>(object->address() + end_offset);
+    StaticVisitor::VisitPointers(heap, start_slot, end_slot);
+  }
+};
+
+
+template <typename StaticVisitor, typename BodyDescriptor, typename ReturnType>
+class FlexibleBodyVisitor : public BodyVisitorBase<StaticVisitor> {
+ public:
+  INLINE(static ReturnType Visit(Map* map, HeapObject* object)) {
+    int object_size = BodyDescriptor::SizeOf(map, object);
+    BodyVisitorBase<StaticVisitor>::IteratePointers(
+        map->GetHeap(), object, BodyDescriptor::kStartOffset, object_size);
+    return static_cast<ReturnType>(object_size);
+  }
+
+  template <int object_size>
+  static inline ReturnType VisitSpecialized(Map* map, HeapObject* object) {
+    DCHECK(BodyDescriptor::SizeOf(map, object) == object_size);
+    BodyVisitorBase<StaticVisitor>::IteratePointers(
+        map->GetHeap(), object, BodyDescriptor::kStartOffset, object_size);
+    return static_cast<ReturnType>(object_size);
+  }
+};
+
+
+template <typename StaticVisitor, typename BodyDescriptor, typename ReturnType>
+class FixedBodyVisitor : public BodyVisitorBase<StaticVisitor> {
+ public:
+  INLINE(static ReturnType Visit(Map* map, HeapObject* object)) {
+    BodyVisitorBase<StaticVisitor>::IteratePointers(
+        map->GetHeap(), object, BodyDescriptor::kStartOffset,
+        BodyDescriptor::kEndOffset);
+    return static_cast<ReturnType>(BodyDescriptor::kSize);
+  }
+};
+
+
+// Base class for visitors used for a linear new space iteration.
+// IterateBody returns size of visited object.
+// Certain types of objects (i.e. Code objects) are not handled
+// by dispatch table of this visitor because they cannot appear
+// in the new space.
+//
+// This class is intended to be used in the following way:
+//
+//   class SomeVisitor : public StaticNewSpaceVisitor<SomeVisitor> {
+//     ...
+//   }
+//
+// This is an example of Curiously recurring template pattern
+// (see http://en.wikipedia.org/wiki/Curiously_recurring_template_pattern).
+// We use CRTP to guarantee aggressive compile time optimizations (i.e.
+// inlining and specialization of StaticVisitor::VisitPointers methods).
+template <typename StaticVisitor>
+class StaticNewSpaceVisitor : public StaticVisitorBase {
+ public:
+  static void Initialize();
+
+  INLINE(static int IterateBody(Map* map, HeapObject* obj)) {
+    return table_.GetVisitor(map)(map, obj);
+  }
+
+  INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
+    for (Object** p = start; p < end; p++) StaticVisitor::VisitPointer(heap, p);
+  }
+
+ private:
+  INLINE(static int VisitJSFunction(Map* map, HeapObject* object)) {
+    Heap* heap = map->GetHeap();
+    VisitPointers(heap,
+                  HeapObject::RawField(object, JSFunction::kPropertiesOffset),
+                  HeapObject::RawField(object, JSFunction::kCodeEntryOffset));
+
+    // Don't visit code entry. We are using this visitor only during scavenges.
+
+    VisitPointers(
+        heap, HeapObject::RawField(object,
+                                   JSFunction::kCodeEntryOffset + kPointerSize),
+        HeapObject::RawField(object, JSFunction::kNonWeakFieldsEndOffset));
+    return JSFunction::kSize;
+  }
+
+  INLINE(static int VisitByteArray(Map* map, HeapObject* object)) {
+    return reinterpret_cast<ByteArray*>(object)->ByteArraySize();
+  }
+
+  INLINE(static int VisitFixedDoubleArray(Map* map, HeapObject* object)) {
+    int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
+    return FixedDoubleArray::SizeFor(length);
+  }
+
+  INLINE(static int VisitFixedTypedArray(Map* map, HeapObject* object)) {
+    return reinterpret_cast<FixedTypedArrayBase*>(object)->size();
+  }
+
+  INLINE(static int VisitJSObject(Map* map, HeapObject* object)) {
+    return JSObjectVisitor::Visit(map, object);
+  }
+
+  INLINE(static int VisitSeqOneByteString(Map* map, HeapObject* object)) {
+    return SeqOneByteString::cast(object)
+        ->SeqOneByteStringSize(map->instance_type());
+  }
+
+  INLINE(static int VisitSeqTwoByteString(Map* map, HeapObject* object)) {
+    return SeqTwoByteString::cast(object)
+        ->SeqTwoByteStringSize(map->instance_type());
+  }
+
+  INLINE(static int VisitFreeSpace(Map* map, HeapObject* object)) {
+    return FreeSpace::cast(object)->Size();
+  }
+
+  INLINE(static int VisitJSArrayBuffer(Map* map, HeapObject* object));
+  INLINE(static int VisitJSTypedArray(Map* map, HeapObject* object));
+  INLINE(static int VisitJSDataView(Map* map, HeapObject* object));
+
+  class DataObjectVisitor {
+   public:
+    template <int object_size>
+    static inline int VisitSpecialized(Map* map, HeapObject* object) {
+      return object_size;
+    }
+
+    INLINE(static int Visit(Map* map, HeapObject* object)) {
+      return map->instance_size();
+    }
+  };
+
+  typedef FlexibleBodyVisitor<StaticVisitor, StructBodyDescriptor, int>
+      StructVisitor;
+
+  typedef FlexibleBodyVisitor<StaticVisitor, JSObject::BodyDescriptor, int>
+      JSObjectVisitor;
+
+  typedef int (*Callback)(Map* map, HeapObject* object);
+
+  static VisitorDispatchTable<Callback> table_;
+};
+
+
+template <typename StaticVisitor>
+VisitorDispatchTable<typename StaticNewSpaceVisitor<StaticVisitor>::Callback>
+    StaticNewSpaceVisitor<StaticVisitor>::table_;
+
+
+// Base class for visitors used to transitively mark the entire heap.
+// IterateBody returns nothing.
+// Certain types of objects might not be handled by this base class and
+// no visitor function is registered by the generic initialization. A
+// specialized visitor function needs to be provided by the inheriting
+// class itself for those cases.
+//
+// This class is intended to be used in the following way:
+//
+//   class SomeVisitor : public StaticMarkingVisitor<SomeVisitor> {
+//     ...
+//   }
+//
+// This is an example of Curiously recurring template pattern.
+template <typename StaticVisitor>
+class StaticMarkingVisitor : public StaticVisitorBase {
+ public:
+  static void Initialize();
+
+  INLINE(static void IterateBody(Map* map, HeapObject* obj)) {
+    table_.GetVisitor(map)(map, obj);
+  }
+
+  INLINE(static void VisitPropertyCell(Map* map, HeapObject* object));
+  INLINE(static void VisitCodeEntry(Heap* heap, Address entry_address));
+  INLINE(static void VisitEmbeddedPointer(Heap* heap, RelocInfo* rinfo));
+  INLINE(static void VisitCell(Heap* heap, RelocInfo* rinfo));
+  INLINE(static void VisitDebugTarget(Heap* heap, RelocInfo* rinfo));
+  INLINE(static void VisitCodeTarget(Heap* heap, RelocInfo* rinfo));
+  INLINE(static void VisitCodeAgeSequence(Heap* heap, RelocInfo* rinfo));
+  INLINE(static void VisitExternalReference(RelocInfo* rinfo)) {}
+  INLINE(static void VisitRuntimeEntry(RelocInfo* rinfo)) {}
+  // Skip the weak next code link in a code object.
+  INLINE(static void VisitNextCodeLink(Heap* heap, Object** slot)) {}
+
+  // TODO(mstarzinger): This should be made protected once refactoring is done.
+  // Mark non-optimize code for functions inlined into the given optimized
+  // code. This will prevent it from being flushed.
+  static void MarkInlinedFunctionsCode(Heap* heap, Code* code);
+
+ protected:
+  INLINE(static void VisitMap(Map* map, HeapObject* object));
+  INLINE(static void VisitCode(Map* map, HeapObject* object));
+  INLINE(static void VisitSharedFunctionInfo(Map* map, HeapObject* object));
+  INLINE(static void VisitConstantPoolArray(Map* map, HeapObject* object));
+  INLINE(static void VisitAllocationSite(Map* map, HeapObject* object));
+  INLINE(static void VisitWeakCollection(Map* map, HeapObject* object));
+  INLINE(static void VisitJSFunction(Map* map, HeapObject* object));
+  INLINE(static void VisitJSRegExp(Map* map, HeapObject* object));
+  INLINE(static void VisitJSArrayBuffer(Map* map, HeapObject* object));
+  INLINE(static void VisitJSTypedArray(Map* map, HeapObject* object));
+  INLINE(static void VisitJSDataView(Map* map, HeapObject* object));
+  INLINE(static void VisitNativeContext(Map* map, HeapObject* object));
+
+  // Mark pointers in a Map and its TransitionArray together, possibly
+  // treating transitions or back pointers weak.
+  static void MarkMapContents(Heap* heap, Map* map);
+  static void MarkTransitionArray(Heap* heap, TransitionArray* transitions);
+
+  // Code flushing support.
+  INLINE(static bool IsFlushable(Heap* heap, JSFunction* function));
+  INLINE(static bool IsFlushable(Heap* heap, SharedFunctionInfo* shared_info));
+
+  // Helpers used by code flushing support that visit pointer fields and treat
+  // references to code objects either strongly or weakly.
+  static void VisitSharedFunctionInfoStrongCode(Heap* heap, HeapObject* object);
+  static void VisitSharedFunctionInfoWeakCode(Heap* heap, HeapObject* object);
+  static void VisitJSFunctionStrongCode(Heap* heap, HeapObject* object);
+  static void VisitJSFunctionWeakCode(Heap* heap, HeapObject* object);
+
+  class DataObjectVisitor {
+   public:
+    template <int size>
+    static inline void VisitSpecialized(Map* map, HeapObject* object) {}
+
+    INLINE(static void Visit(Map* map, HeapObject* object)) {}
+  };
+
+  typedef FlexibleBodyVisitor<StaticVisitor, FixedArray::BodyDescriptor, void>
+      FixedArrayVisitor;
+
+  typedef FlexibleBodyVisitor<StaticVisitor, JSObject::BodyDescriptor, void>
+      JSObjectVisitor;
+
+  typedef FlexibleBodyVisitor<StaticVisitor, StructBodyDescriptor, void>
+      StructObjectVisitor;
+
+  typedef void (*Callback)(Map* map, HeapObject* object);
+
+  static VisitorDispatchTable<Callback> table_;
+};
+
+
+template <typename StaticVisitor>
+VisitorDispatchTable<typename StaticMarkingVisitor<StaticVisitor>::Callback>
+    StaticMarkingVisitor<StaticVisitor>::table_;
+
+
+class WeakObjectRetainer;
+
+
+// A weak list is single linked list where each element has a weak pointer to
+// the next element. Given the head of the list, this function removes dead
+// elements from the list and if requested records slots for next-element
+// pointers. The template parameter T is a WeakListVisitor that defines how to
+// access the next-element pointers.
+template <class T>
+Object* VisitWeakList(Heap* heap, Object* list, WeakObjectRetainer* retainer);
+}
+}  // namespace v8::internal
+
+#endif  // V8_OBJECTS_VISITING_H_
diff --git a/src/heap/spaces-inl.h b/src/heap/spaces-inl.h
new file mode 100644
index 0000000..d81d253
--- /dev/null
+++ b/src/heap/spaces-inl.h
@@ -0,0 +1,313 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_SPACES_INL_H_
+#define V8_HEAP_SPACES_INL_H_
+
+#include "src/heap/spaces.h"
+#include "src/heap-profiler.h"
+#include "src/isolate.h"
+#include "src/msan.h"
+#include "src/v8memory.h"
+
+namespace v8 {
+namespace internal {
+
+
+// -----------------------------------------------------------------------------
+// Bitmap
+
+void Bitmap::Clear(MemoryChunk* chunk) {
+  Bitmap* bitmap = chunk->markbits();
+  for (int i = 0; i < bitmap->CellsCount(); i++) bitmap->cells()[i] = 0;
+  chunk->ResetLiveBytes();
+}
+
+
+// -----------------------------------------------------------------------------
+// PageIterator
+
+
+PageIterator::PageIterator(PagedSpace* space)
+    : space_(space),
+      prev_page_(&space->anchor_),
+      next_page_(prev_page_->next_page()) {}
+
+
+bool PageIterator::has_next() { return next_page_ != &space_->anchor_; }
+
+
+Page* PageIterator::next() {
+  DCHECK(has_next());
+  prev_page_ = next_page_;
+  next_page_ = next_page_->next_page();
+  return prev_page_;
+}
+
+
+// -----------------------------------------------------------------------------
+// NewSpacePageIterator
+
+
+NewSpacePageIterator::NewSpacePageIterator(NewSpace* space)
+    : prev_page_(NewSpacePage::FromAddress(space->ToSpaceStart())->prev_page()),
+      next_page_(NewSpacePage::FromAddress(space->ToSpaceStart())),
+      last_page_(NewSpacePage::FromLimit(space->ToSpaceEnd())) {}
+
+NewSpacePageIterator::NewSpacePageIterator(SemiSpace* space)
+    : prev_page_(space->anchor()),
+      next_page_(prev_page_->next_page()),
+      last_page_(prev_page_->prev_page()) {}
+
+NewSpacePageIterator::NewSpacePageIterator(Address start, Address limit)
+    : prev_page_(NewSpacePage::FromAddress(start)->prev_page()),
+      next_page_(NewSpacePage::FromAddress(start)),
+      last_page_(NewSpacePage::FromLimit(limit)) {
+  SemiSpace::AssertValidRange(start, limit);
+}
+
+
+bool NewSpacePageIterator::has_next() { return prev_page_ != last_page_; }
+
+
+NewSpacePage* NewSpacePageIterator::next() {
+  DCHECK(has_next());
+  prev_page_ = next_page_;
+  next_page_ = next_page_->next_page();
+  return prev_page_;
+}
+
+
+// -----------------------------------------------------------------------------
+// HeapObjectIterator
+HeapObject* HeapObjectIterator::FromCurrentPage() {
+  while (cur_addr_ != cur_end_) {
+    if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
+      cur_addr_ = space_->limit();
+      continue;
+    }
+    HeapObject* obj = HeapObject::FromAddress(cur_addr_);
+    int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
+    cur_addr_ += obj_size;
+    DCHECK(cur_addr_ <= cur_end_);
+    if (!obj->IsFiller()) {
+      DCHECK_OBJECT_SIZE(obj_size);
+      return obj;
+    }
+  }
+  return NULL;
+}
+
+
+// -----------------------------------------------------------------------------
+// MemoryAllocator
+
+#ifdef ENABLE_HEAP_PROTECTION
+
+void MemoryAllocator::Protect(Address start, size_t size) {
+  base::OS::Protect(start, size);
+}
+
+
+void MemoryAllocator::Unprotect(Address start, size_t size,
+                                Executability executable) {
+  base::OS::Unprotect(start, size, executable);
+}
+
+
+void MemoryAllocator::ProtectChunkFromPage(Page* page) {
+  int id = GetChunkId(page);
+  base::OS::Protect(chunks_[id].address(), chunks_[id].size());
+}
+
+
+void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
+  int id = GetChunkId(page);
+  base::OS::Unprotect(chunks_[id].address(), chunks_[id].size(),
+                      chunks_[id].owner()->executable() == EXECUTABLE);
+}
+
+#endif
+
+
+// --------------------------------------------------------------------------
+// PagedSpace
+Page* Page::Initialize(Heap* heap, MemoryChunk* chunk, Executability executable,
+                       PagedSpace* owner) {
+  Page* page = reinterpret_cast<Page*>(chunk);
+  DCHECK(page->area_size() <= kMaxRegularHeapObjectSize);
+  DCHECK(chunk->owner() == owner);
+  owner->IncreaseCapacity(page->area_size());
+  owner->Free(page->area_start(), page->area_size());
+
+  heap->incremental_marking()->SetOldSpacePageFlags(chunk);
+
+  return page;
+}
+
+
+bool PagedSpace::Contains(Address addr) {
+  Page* p = Page::FromAddress(addr);
+  if (!p->is_valid()) return false;
+  return p->owner() == this;
+}
+
+
+void MemoryChunk::set_scan_on_scavenge(bool scan) {
+  if (scan) {
+    if (!scan_on_scavenge()) heap_->increment_scan_on_scavenge_pages();
+    SetFlag(SCAN_ON_SCAVENGE);
+  } else {
+    if (scan_on_scavenge()) heap_->decrement_scan_on_scavenge_pages();
+    ClearFlag(SCAN_ON_SCAVENGE);
+  }
+  heap_->incremental_marking()->SetOldSpacePageFlags(this);
+}
+
+
+MemoryChunk* MemoryChunk::FromAnyPointerAddress(Heap* heap, Address addr) {
+  MemoryChunk* maybe = reinterpret_cast<MemoryChunk*>(
+      OffsetFrom(addr) & ~Page::kPageAlignmentMask);
+  if (maybe->owner() != NULL) return maybe;
+  LargeObjectIterator iterator(heap->lo_space());
+  for (HeapObject* o = iterator.Next(); o != NULL; o = iterator.Next()) {
+    // Fixed arrays are the only pointer-containing objects in large object
+    // space.
+    if (o->IsFixedArray()) {
+      MemoryChunk* chunk = MemoryChunk::FromAddress(o->address());
+      if (chunk->Contains(addr)) {
+        return chunk;
+      }
+    }
+  }
+  UNREACHABLE();
+  return NULL;
+}
+
+
+void MemoryChunk::UpdateHighWaterMark(Address mark) {
+  if (mark == NULL) return;
+  // Need to subtract one from the mark because when a chunk is full the
+  // top points to the next address after the chunk, which effectively belongs
+  // to another chunk. See the comment to Page::FromAllocationTop.
+  MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
+  int new_mark = static_cast<int>(mark - chunk->address());
+  if (new_mark > chunk->high_water_mark_) {
+    chunk->high_water_mark_ = new_mark;
+  }
+}
+
+
+PointerChunkIterator::PointerChunkIterator(Heap* heap)
+    : state_(kOldPointerState),
+      old_pointer_iterator_(heap->old_pointer_space()),
+      map_iterator_(heap->map_space()),
+      lo_iterator_(heap->lo_space()) {}
+
+
+Page* Page::next_page() {
+  DCHECK(next_chunk()->owner() == owner());
+  return static_cast<Page*>(next_chunk());
+}
+
+
+Page* Page::prev_page() {
+  DCHECK(prev_chunk()->owner() == owner());
+  return static_cast<Page*>(prev_chunk());
+}
+
+
+void Page::set_next_page(Page* page) {
+  DCHECK(page->owner() == owner());
+  set_next_chunk(page);
+}
+
+
+void Page::set_prev_page(Page* page) {
+  DCHECK(page->owner() == owner());
+  set_prev_chunk(page);
+}
+
+
+// Try linear allocation in the page of alloc_info's allocation top.  Does
+// not contain slow case logic (e.g. move to the next page or try free list
+// allocation) so it can be used by all the allocation functions and for all
+// the paged spaces.
+HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
+  Address current_top = allocation_info_.top();
+  Address new_top = current_top + size_in_bytes;
+  if (new_top > allocation_info_.limit()) return NULL;
+
+  allocation_info_.set_top(new_top);
+  return HeapObject::FromAddress(current_top);
+}
+
+
+// Raw allocation.
+AllocationResult PagedSpace::AllocateRaw(int size_in_bytes) {
+  HeapObject* object = AllocateLinearly(size_in_bytes);
+
+  if (object == NULL) {
+    object = free_list_.Allocate(size_in_bytes);
+    if (object == NULL) {
+      object = SlowAllocateRaw(size_in_bytes);
+    }
+  }
+
+  if (object != NULL) {
+    if (identity() == CODE_SPACE) {
+      SkipList::Update(object->address(), size_in_bytes);
+    }
+    MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), size_in_bytes);
+    return object;
+  }
+
+  return AllocationResult::Retry(identity());
+}
+
+
+// -----------------------------------------------------------------------------
+// NewSpace
+
+
+AllocationResult NewSpace::AllocateRaw(int size_in_bytes) {
+  Address old_top = allocation_info_.top();
+
+  if (allocation_info_.limit() - old_top < size_in_bytes) {
+    return SlowAllocateRaw(size_in_bytes);
+  }
+
+  HeapObject* obj = HeapObject::FromAddress(old_top);
+  allocation_info_.set_top(allocation_info_.top() + size_in_bytes);
+  DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+
+  // The slow path above ultimately goes through AllocateRaw, so this suffices.
+  MSAN_ALLOCATED_UNINITIALIZED_MEMORY(obj->address(), size_in_bytes);
+
+  return obj;
+}
+
+
+LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk) {
+  heap->incremental_marking()->SetOldSpacePageFlags(chunk);
+  return static_cast<LargePage*>(chunk);
+}
+
+
+intptr_t LargeObjectSpace::Available() {
+  return ObjectSizeFor(heap()->isolate()->memory_allocator()->Available());
+}
+
+
+bool FreeListNode::IsFreeListNode(HeapObject* object) {
+  Map* map = object->map();
+  Heap* heap = object->GetHeap();
+  return map == heap->raw_unchecked_free_space_map() ||
+         map == heap->raw_unchecked_one_pointer_filler_map() ||
+         map == heap->raw_unchecked_two_pointer_filler_map();
+}
+}
+}  // namespace v8::internal
+
+#endif  // V8_HEAP_SPACES_INL_H_
diff --git a/src/heap/spaces.cc b/src/heap/spaces.cc
new file mode 100644
index 0000000..f8d340f
--- /dev/null
+++ b/src/heap/spaces.cc
@@ -0,0 +1,3107 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/base/bits.h"
+#include "src/base/platform/platform.h"
+#include "src/full-codegen.h"
+#include "src/heap/mark-compact.h"
+#include "src/macro-assembler.h"
+#include "src/msan.h"
+
+namespace v8 {
+namespace internal {
+
+
+// ----------------------------------------------------------------------------
+// HeapObjectIterator
+
+HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
+  // You can't actually iterate over the anchor page.  It is not a real page,
+  // just an anchor for the double linked page list.  Initialize as if we have
+  // reached the end of the anchor page, then the first iteration will move on
+  // to the first page.
+  Initialize(space, NULL, NULL, kAllPagesInSpace, NULL);
+}
+
+
+HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
+                                       HeapObjectCallback size_func) {
+  // You can't actually iterate over the anchor page.  It is not a real page,
+  // just an anchor for the double linked page list.  Initialize the current
+  // address and end as NULL, then the first iteration will move on
+  // to the first page.
+  Initialize(space, NULL, NULL, kAllPagesInSpace, size_func);
+}
+
+
+HeapObjectIterator::HeapObjectIterator(Page* page,
+                                       HeapObjectCallback size_func) {
+  Space* owner = page->owner();
+  DCHECK(owner == page->heap()->old_pointer_space() ||
+         owner == page->heap()->old_data_space() ||
+         owner == page->heap()->map_space() ||
+         owner == page->heap()->cell_space() ||
+         owner == page->heap()->property_cell_space() ||
+         owner == page->heap()->code_space());
+  Initialize(reinterpret_cast<PagedSpace*>(owner), page->area_start(),
+             page->area_end(), kOnePageOnly, size_func);
+  DCHECK(page->WasSwept() || page->SweepingCompleted());
+}
+
+
+void HeapObjectIterator::Initialize(PagedSpace* space, Address cur, Address end,
+                                    HeapObjectIterator::PageMode mode,
+                                    HeapObjectCallback size_f) {
+  space_ = space;
+  cur_addr_ = cur;
+  cur_end_ = end;
+  page_mode_ = mode;
+  size_func_ = size_f;
+}
+
+
+// We have hit the end of the page and should advance to the next block of
+// objects.  This happens at the end of the page.
+bool HeapObjectIterator::AdvanceToNextPage() {
+  DCHECK(cur_addr_ == cur_end_);
+  if (page_mode_ == kOnePageOnly) return false;
+  Page* cur_page;
+  if (cur_addr_ == NULL) {
+    cur_page = space_->anchor();
+  } else {
+    cur_page = Page::FromAddress(cur_addr_ - 1);
+    DCHECK(cur_addr_ == cur_page->area_end());
+  }
+  cur_page = cur_page->next_page();
+  if (cur_page == space_->anchor()) return false;
+  cur_addr_ = cur_page->area_start();
+  cur_end_ = cur_page->area_end();
+  DCHECK(cur_page->WasSwept() || cur_page->SweepingCompleted());
+  return true;
+}
+
+
+// -----------------------------------------------------------------------------
+// CodeRange
+
+
+CodeRange::CodeRange(Isolate* isolate)
+    : isolate_(isolate),
+      code_range_(NULL),
+      free_list_(0),
+      allocation_list_(0),
+      current_allocation_block_index_(0) {}
+
+
+bool CodeRange::SetUp(size_t requested) {
+  DCHECK(code_range_ == NULL);
+
+  if (requested == 0) {
+    // When a target requires the code range feature, we put all code objects
+    // in a kMaximalCodeRangeSize range of virtual address space, so that
+    // they can call each other with near calls.
+    if (kRequiresCodeRange) {
+      requested = kMaximalCodeRangeSize;
+    } else {
+      return true;
+    }
+  }
+
+  DCHECK(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
+  code_range_ = new base::VirtualMemory(requested);
+  CHECK(code_range_ != NULL);
+  if (!code_range_->IsReserved()) {
+    delete code_range_;
+    code_range_ = NULL;
+    return false;
+  }
+
+  // We are sure that we have mapped a block of requested addresses.
+  DCHECK(code_range_->size() == requested);
+  LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
+  Address base = reinterpret_cast<Address>(code_range_->address());
+  Address aligned_base =
+      RoundUp(reinterpret_cast<Address>(code_range_->address()),
+              MemoryChunk::kAlignment);
+  size_t size = code_range_->size() - (aligned_base - base);
+  allocation_list_.Add(FreeBlock(aligned_base, size));
+  current_allocation_block_index_ = 0;
+  return true;
+}
+
+
+int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
+                                       const FreeBlock* right) {
+  // The entire point of CodeRange is that the difference between two
+  // addresses in the range can be represented as a signed 32-bit int,
+  // so the cast is semantically correct.
+  return static_cast<int>(left->start - right->start);
+}
+
+
+bool CodeRange::GetNextAllocationBlock(size_t requested) {
+  for (current_allocation_block_index_++;
+       current_allocation_block_index_ < allocation_list_.length();
+       current_allocation_block_index_++) {
+    if (requested <= allocation_list_[current_allocation_block_index_].size) {
+      return true;  // Found a large enough allocation block.
+    }
+  }
+
+  // Sort and merge the free blocks on the free list and the allocation list.
+  free_list_.AddAll(allocation_list_);
+  allocation_list_.Clear();
+  free_list_.Sort(&CompareFreeBlockAddress);
+  for (int i = 0; i < free_list_.length();) {
+    FreeBlock merged = free_list_[i];
+    i++;
+    // Add adjacent free blocks to the current merged block.
+    while (i < free_list_.length() &&
+           free_list_[i].start == merged.start + merged.size) {
+      merged.size += free_list_[i].size;
+      i++;
+    }
+    if (merged.size > 0) {
+      allocation_list_.Add(merged);
+    }
+  }
+  free_list_.Clear();
+
+  for (current_allocation_block_index_ = 0;
+       current_allocation_block_index_ < allocation_list_.length();
+       current_allocation_block_index_++) {
+    if (requested <= allocation_list_[current_allocation_block_index_].size) {
+      return true;  // Found a large enough allocation block.
+    }
+  }
+  current_allocation_block_index_ = 0;
+  // Code range is full or too fragmented.
+  return false;
+}
+
+
+Address CodeRange::AllocateRawMemory(const size_t requested_size,
+                                     const size_t commit_size,
+                                     size_t* allocated) {
+  DCHECK(commit_size <= requested_size);
+  DCHECK(allocation_list_.length() == 0 ||
+         current_allocation_block_index_ < allocation_list_.length());
+  if (allocation_list_.length() == 0 ||
+      requested_size > allocation_list_[current_allocation_block_index_].size) {
+    // Find an allocation block large enough.
+    if (!GetNextAllocationBlock(requested_size)) return NULL;
+  }
+  // Commit the requested memory at the start of the current allocation block.
+  size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment);
+  FreeBlock current = allocation_list_[current_allocation_block_index_];
+  if (aligned_requested >= (current.size - Page::kPageSize)) {
+    // Don't leave a small free block, useless for a large object or chunk.
+    *allocated = current.size;
+  } else {
+    *allocated = aligned_requested;
+  }
+  DCHECK(*allocated <= current.size);
+  DCHECK(IsAddressAligned(current.start, MemoryChunk::kAlignment));
+  if (!isolate_->memory_allocator()->CommitExecutableMemory(
+          code_range_, current.start, commit_size, *allocated)) {
+    *allocated = 0;
+    return NULL;
+  }
+  allocation_list_[current_allocation_block_index_].start += *allocated;
+  allocation_list_[current_allocation_block_index_].size -= *allocated;
+  if (*allocated == current.size) {
+    // This block is used up, get the next one.
+    GetNextAllocationBlock(0);
+  }
+  return current.start;
+}
+
+
+bool CodeRange::CommitRawMemory(Address start, size_t length) {
+  return isolate_->memory_allocator()->CommitMemory(start, length, EXECUTABLE);
+}
+
+
+bool CodeRange::UncommitRawMemory(Address start, size_t length) {
+  return code_range_->Uncommit(start, length);
+}
+
+
+void CodeRange::FreeRawMemory(Address address, size_t length) {
+  DCHECK(IsAddressAligned(address, MemoryChunk::kAlignment));
+  free_list_.Add(FreeBlock(address, length));
+  code_range_->Uncommit(address, length);
+}
+
+
+void CodeRange::TearDown() {
+  delete code_range_;  // Frees all memory in the virtual memory range.
+  code_range_ = NULL;
+  free_list_.Free();
+  allocation_list_.Free();
+}
+
+
+// -----------------------------------------------------------------------------
+// MemoryAllocator
+//
+
+MemoryAllocator::MemoryAllocator(Isolate* isolate)
+    : isolate_(isolate),
+      capacity_(0),
+      capacity_executable_(0),
+      size_(0),
+      size_executable_(0),
+      lowest_ever_allocated_(reinterpret_cast<void*>(-1)),
+      highest_ever_allocated_(reinterpret_cast<void*>(0)) {}
+
+
+bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
+  capacity_ = RoundUp(capacity, Page::kPageSize);
+  capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
+  DCHECK_GE(capacity_, capacity_executable_);
+
+  size_ = 0;
+  size_executable_ = 0;
+
+  return true;
+}
+
+
+void MemoryAllocator::TearDown() {
+  // Check that spaces were torn down before MemoryAllocator.
+  DCHECK(size_ == 0);
+  // TODO(gc) this will be true again when we fix FreeMemory.
+  // DCHECK(size_executable_ == 0);
+  capacity_ = 0;
+  capacity_executable_ = 0;
+}
+
+
+bool MemoryAllocator::CommitMemory(Address base, size_t size,
+                                   Executability executable) {
+  if (!base::VirtualMemory::CommitRegion(base, size,
+                                         executable == EXECUTABLE)) {
+    return false;
+  }
+  UpdateAllocatedSpaceLimits(base, base + size);
+  return true;
+}
+
+
+void MemoryAllocator::FreeMemory(base::VirtualMemory* reservation,
+                                 Executability executable) {
+  // TODO(gc) make code_range part of memory allocator?
+  DCHECK(reservation->IsReserved());
+  size_t size = reservation->size();
+  DCHECK(size_ >= size);
+  size_ -= size;
+
+  isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
+
+  if (executable == EXECUTABLE) {
+    DCHECK(size_executable_ >= size);
+    size_executable_ -= size;
+  }
+  // Code which is part of the code-range does not have its own VirtualMemory.
+  DCHECK(isolate_->code_range() == NULL ||
+         !isolate_->code_range()->contains(
+             static_cast<Address>(reservation->address())));
+  DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL ||
+         !isolate_->code_range()->valid());
+  reservation->Release();
+}
+
+
+void MemoryAllocator::FreeMemory(Address base, size_t size,
+                                 Executability executable) {
+  // TODO(gc) make code_range part of memory allocator?
+  DCHECK(size_ >= size);
+  size_ -= size;
+
+  isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
+
+  if (executable == EXECUTABLE) {
+    DCHECK(size_executable_ >= size);
+    size_executable_ -= size;
+  }
+  if (isolate_->code_range() != NULL &&
+      isolate_->code_range()->contains(static_cast<Address>(base))) {
+    DCHECK(executable == EXECUTABLE);
+    isolate_->code_range()->FreeRawMemory(base, size);
+  } else {
+    DCHECK(executable == NOT_EXECUTABLE || isolate_->code_range() == NULL ||
+           !isolate_->code_range()->valid());
+    bool result = base::VirtualMemory::ReleaseRegion(base, size);
+    USE(result);
+    DCHECK(result);
+  }
+}
+
+
+Address MemoryAllocator::ReserveAlignedMemory(size_t size, size_t alignment,
+                                              base::VirtualMemory* controller) {
+  base::VirtualMemory reservation(size, alignment);
+
+  if (!reservation.IsReserved()) return NULL;
+  size_ += reservation.size();
+  Address base =
+      RoundUp(static_cast<Address>(reservation.address()), alignment);
+  controller->TakeControl(&reservation);
+  return base;
+}
+
+
+Address MemoryAllocator::AllocateAlignedMemory(
+    size_t reserve_size, size_t commit_size, size_t alignment,
+    Executability executable, base::VirtualMemory* controller) {
+  DCHECK(commit_size <= reserve_size);
+  base::VirtualMemory reservation;
+  Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation);
+  if (base == NULL) return NULL;
+
+  if (executable == EXECUTABLE) {
+    if (!CommitExecutableMemory(&reservation, base, commit_size,
+                                reserve_size)) {
+      base = NULL;
+    }
+  } else {
+    if (reservation.Commit(base, commit_size, false)) {
+      UpdateAllocatedSpaceLimits(base, base + commit_size);
+    } else {
+      base = NULL;
+    }
+  }
+
+  if (base == NULL) {
+    // Failed to commit the body. Release the mapping and any partially
+    // commited regions inside it.
+    reservation.Release();
+    return NULL;
+  }
+
+  controller->TakeControl(&reservation);
+  return base;
+}
+
+
+void Page::InitializeAsAnchor(PagedSpace* owner) {
+  set_owner(owner);
+  set_prev_page(this);
+  set_next_page(this);
+}
+
+
+NewSpacePage* NewSpacePage::Initialize(Heap* heap, Address start,
+                                       SemiSpace* semi_space) {
+  Address area_start = start + NewSpacePage::kObjectStartOffset;
+  Address area_end = start + Page::kPageSize;
+
+  MemoryChunk* chunk =
+      MemoryChunk::Initialize(heap, start, Page::kPageSize, area_start,
+                              area_end, NOT_EXECUTABLE, semi_space);
+  chunk->set_next_chunk(NULL);
+  chunk->set_prev_chunk(NULL);
+  chunk->initialize_scan_on_scavenge(true);
+  bool in_to_space = (semi_space->id() != kFromSpace);
+  chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
+                             : MemoryChunk::IN_FROM_SPACE);
+  DCHECK(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
+                                       : MemoryChunk::IN_TO_SPACE));
+  NewSpacePage* page = static_cast<NewSpacePage*>(chunk);
+  heap->incremental_marking()->SetNewSpacePageFlags(page);
+  return page;
+}
+
+
+void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) {
+  set_owner(semi_space);
+  set_next_chunk(this);
+  set_prev_chunk(this);
+  // Flags marks this invalid page as not being in new-space.
+  // All real new-space pages will be in new-space.
+  SetFlags(0, ~0);
+}
+
+
+MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
+                                     Address area_start, Address area_end,
+                                     Executability executable, Space* owner) {
+  MemoryChunk* chunk = FromAddress(base);
+
+  DCHECK(base == chunk->address());
+
+  chunk->heap_ = heap;
+  chunk->size_ = size;
+  chunk->area_start_ = area_start;
+  chunk->area_end_ = area_end;
+  chunk->flags_ = 0;
+  chunk->set_owner(owner);
+  chunk->InitializeReservedMemory();
+  chunk->slots_buffer_ = NULL;
+  chunk->skip_list_ = NULL;
+  chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
+  chunk->progress_bar_ = 0;
+  chunk->high_water_mark_ = static_cast<int>(area_start - base);
+  chunk->set_parallel_sweeping(SWEEPING_DONE);
+  chunk->available_in_small_free_list_ = 0;
+  chunk->available_in_medium_free_list_ = 0;
+  chunk->available_in_large_free_list_ = 0;
+  chunk->available_in_huge_free_list_ = 0;
+  chunk->non_available_small_blocks_ = 0;
+  chunk->ResetLiveBytes();
+  Bitmap::Clear(chunk);
+  chunk->initialize_scan_on_scavenge(false);
+  chunk->SetFlag(WAS_SWEPT);
+
+  DCHECK(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
+  DCHECK(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
+
+  if (executable == EXECUTABLE) {
+    chunk->SetFlag(IS_EXECUTABLE);
+  }
+
+  if (owner == heap->old_data_space()) {
+    chunk->SetFlag(CONTAINS_ONLY_DATA);
+  }
+
+  return chunk;
+}
+
+
+// Commit MemoryChunk area to the requested size.
+bool MemoryChunk::CommitArea(size_t requested) {
+  size_t guard_size =
+      IsFlagSet(IS_EXECUTABLE) ? MemoryAllocator::CodePageGuardSize() : 0;
+  size_t header_size = area_start() - address() - guard_size;
+  size_t commit_size =
+      RoundUp(header_size + requested, base::OS::CommitPageSize());
+  size_t committed_size = RoundUp(header_size + (area_end() - area_start()),
+                                  base::OS::CommitPageSize());
+
+  if (commit_size > committed_size) {
+    // Commit size should be less or equal than the reserved size.
+    DCHECK(commit_size <= size() - 2 * guard_size);
+    // Append the committed area.
+    Address start = address() + committed_size + guard_size;
+    size_t length = commit_size - committed_size;
+    if (reservation_.IsReserved()) {
+      Executability executable =
+          IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
+      if (!heap()->isolate()->memory_allocator()->CommitMemory(start, length,
+                                                               executable)) {
+        return false;
+      }
+    } else {
+      CodeRange* code_range = heap_->isolate()->code_range();
+      DCHECK(code_range != NULL && code_range->valid() &&
+             IsFlagSet(IS_EXECUTABLE));
+      if (!code_range->CommitRawMemory(start, length)) return false;
+    }
+
+    if (Heap::ShouldZapGarbage()) {
+      heap_->isolate()->memory_allocator()->ZapBlock(start, length);
+    }
+  } else if (commit_size < committed_size) {
+    DCHECK(commit_size > 0);
+    // Shrink the committed area.
+    size_t length = committed_size - commit_size;
+    Address start = address() + committed_size + guard_size - length;
+    if (reservation_.IsReserved()) {
+      if (!reservation_.Uncommit(start, length)) return false;
+    } else {
+      CodeRange* code_range = heap_->isolate()->code_range();
+      DCHECK(code_range != NULL && code_range->valid() &&
+             IsFlagSet(IS_EXECUTABLE));
+      if (!code_range->UncommitRawMemory(start, length)) return false;
+    }
+  }
+
+  area_end_ = area_start_ + requested;
+  return true;
+}
+
+
+void MemoryChunk::InsertAfter(MemoryChunk* other) {
+  MemoryChunk* other_next = other->next_chunk();
+
+  set_next_chunk(other_next);
+  set_prev_chunk(other);
+  other_next->set_prev_chunk(this);
+  other->set_next_chunk(this);
+}
+
+
+void MemoryChunk::Unlink() {
+  MemoryChunk* next_element = next_chunk();
+  MemoryChunk* prev_element = prev_chunk();
+  next_element->set_prev_chunk(prev_element);
+  prev_element->set_next_chunk(next_element);
+  set_prev_chunk(NULL);
+  set_next_chunk(NULL);
+}
+
+
+MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
+                                            intptr_t commit_area_size,
+                                            Executability executable,
+                                            Space* owner) {
+  DCHECK(commit_area_size <= reserve_area_size);
+
+  size_t chunk_size;
+  Heap* heap = isolate_->heap();
+  Address base = NULL;
+  base::VirtualMemory reservation;
+  Address area_start = NULL;
+  Address area_end = NULL;
+
+  //
+  // MemoryChunk layout:
+  //
+  //             Executable
+  // +----------------------------+<- base aligned with MemoryChunk::kAlignment
+  // |           Header           |
+  // +----------------------------+<- base + CodePageGuardStartOffset
+  // |           Guard            |
+  // +----------------------------+<- area_start_
+  // |           Area             |
+  // +----------------------------+<- area_end_ (area_start + commit_area_size)
+  // |   Committed but not used   |
+  // +----------------------------+<- aligned at OS page boundary
+  // | Reserved but not committed |
+  // +----------------------------+<- aligned at OS page boundary
+  // |           Guard            |
+  // +----------------------------+<- base + chunk_size
+  //
+  //           Non-executable
+  // +----------------------------+<- base aligned with MemoryChunk::kAlignment
+  // |          Header            |
+  // +----------------------------+<- area_start_ (base + kObjectStartOffset)
+  // |           Area             |
+  // +----------------------------+<- area_end_ (area_start + commit_area_size)
+  // |  Committed but not used    |
+  // +----------------------------+<- aligned at OS page boundary
+  // | Reserved but not committed |
+  // +----------------------------+<- base + chunk_size
+  //
+
+  if (executable == EXECUTABLE) {
+    chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size,
+                         base::OS::CommitPageSize()) +
+                 CodePageGuardSize();
+
+    // Check executable memory limit.
+    if (size_executable_ + chunk_size > capacity_executable_) {
+      LOG(isolate_, StringEvent("MemoryAllocator::AllocateRawMemory",
+                                "V8 Executable Allocation capacity exceeded"));
+      return NULL;
+    }
+
+    // Size of header (not executable) plus area (executable).
+    size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size,
+                                 base::OS::CommitPageSize());
+    // Allocate executable memory either from code range or from the
+    // OS.
+    if (isolate_->code_range() != NULL && isolate_->code_range()->valid()) {
+      base = isolate_->code_range()->AllocateRawMemory(chunk_size, commit_size,
+                                                       &chunk_size);
+      DCHECK(
+          IsAligned(reinterpret_cast<intptr_t>(base), MemoryChunk::kAlignment));
+      if (base == NULL) return NULL;
+      size_ += chunk_size;
+      // Update executable memory size.
+      size_executable_ += chunk_size;
+    } else {
+      base = AllocateAlignedMemory(chunk_size, commit_size,
+                                   MemoryChunk::kAlignment, executable,
+                                   &reservation);
+      if (base == NULL) return NULL;
+      // Update executable memory size.
+      size_executable_ += reservation.size();
+    }
+
+    if (Heap::ShouldZapGarbage()) {
+      ZapBlock(base, CodePageGuardStartOffset());
+      ZapBlock(base + CodePageAreaStartOffset(), commit_area_size);
+    }
+
+    area_start = base + CodePageAreaStartOffset();
+    area_end = area_start + commit_area_size;
+  } else {
+    chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size,
+                         base::OS::CommitPageSize());
+    size_t commit_size =
+        RoundUp(MemoryChunk::kObjectStartOffset + commit_area_size,
+                base::OS::CommitPageSize());
+    base =
+        AllocateAlignedMemory(chunk_size, commit_size, MemoryChunk::kAlignment,
+                              executable, &reservation);
+
+    if (base == NULL) return NULL;
+
+    if (Heap::ShouldZapGarbage()) {
+      ZapBlock(base, Page::kObjectStartOffset + commit_area_size);
+    }
+
+    area_start = base + Page::kObjectStartOffset;
+    area_end = area_start + commit_area_size;
+  }
+
+  // Use chunk_size for statistics and callbacks because we assume that they
+  // treat reserved but not-yet committed memory regions of chunks as allocated.
+  isolate_->counters()->memory_allocated()->Increment(
+      static_cast<int>(chunk_size));
+
+  LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
+  if (owner != NULL) {
+    ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
+    PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
+  }
+
+  MemoryChunk* result = MemoryChunk::Initialize(
+      heap, base, chunk_size, area_start, area_end, executable, owner);
+  result->set_reserved_memory(&reservation);
+  return result;
+}
+
+
+void Page::ResetFreeListStatistics() {
+  non_available_small_blocks_ = 0;
+  available_in_small_free_list_ = 0;
+  available_in_medium_free_list_ = 0;
+  available_in_large_free_list_ = 0;
+  available_in_huge_free_list_ = 0;
+}
+
+
+Page* MemoryAllocator::AllocatePage(intptr_t size, PagedSpace* owner,
+                                    Executability executable) {
+  MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
+
+  if (chunk == NULL) return NULL;
+
+  return Page::Initialize(isolate_->heap(), chunk, executable, owner);
+}
+
+
+LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
+                                              Space* owner,
+                                              Executability executable) {
+  MemoryChunk* chunk =
+      AllocateChunk(object_size, object_size, executable, owner);
+  if (chunk == NULL) return NULL;
+  return LargePage::Initialize(isolate_->heap(), chunk);
+}
+
+
+void MemoryAllocator::Free(MemoryChunk* chunk) {
+  LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
+  if (chunk->owner() != NULL) {
+    ObjectSpace space =
+        static_cast<ObjectSpace>(1 << chunk->owner()->identity());
+    PerformAllocationCallback(space, kAllocationActionFree, chunk->size());
+  }
+
+  isolate_->heap()->RememberUnmappedPage(reinterpret_cast<Address>(chunk),
+                                         chunk->IsEvacuationCandidate());
+
+  delete chunk->slots_buffer();
+  delete chunk->skip_list();
+
+  base::VirtualMemory* reservation = chunk->reserved_memory();
+  if (reservation->IsReserved()) {
+    FreeMemory(reservation, chunk->executable());
+  } else {
+    FreeMemory(chunk->address(), chunk->size(), chunk->executable());
+  }
+}
+
+
+bool MemoryAllocator::CommitBlock(Address start, size_t size,
+                                  Executability executable) {
+  if (!CommitMemory(start, size, executable)) return false;
+
+  if (Heap::ShouldZapGarbage()) {
+    ZapBlock(start, size);
+  }
+
+  isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
+  return true;
+}
+
+
+bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
+  if (!base::VirtualMemory::UncommitRegion(start, size)) return false;
+  isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
+  return true;
+}
+
+
+void MemoryAllocator::ZapBlock(Address start, size_t size) {
+  for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
+    Memory::Address_at(start + s) = kZapValue;
+  }
+}
+
+
+void MemoryAllocator::PerformAllocationCallback(ObjectSpace space,
+                                                AllocationAction action,
+                                                size_t size) {
+  for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
+    MemoryAllocationCallbackRegistration registration =
+        memory_allocation_callbacks_[i];
+    if ((registration.space & space) == space &&
+        (registration.action & action) == action)
+      registration.callback(space, action, static_cast<int>(size));
+  }
+}
+
+
+bool MemoryAllocator::MemoryAllocationCallbackRegistered(
+    MemoryAllocationCallback callback) {
+  for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
+    if (memory_allocation_callbacks_[i].callback == callback) return true;
+  }
+  return false;
+}
+
+
+void MemoryAllocator::AddMemoryAllocationCallback(
+    MemoryAllocationCallback callback, ObjectSpace space,
+    AllocationAction action) {
+  DCHECK(callback != NULL);
+  MemoryAllocationCallbackRegistration registration(callback, space, action);
+  DCHECK(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback));
+  return memory_allocation_callbacks_.Add(registration);
+}
+
+
+void MemoryAllocator::RemoveMemoryAllocationCallback(
+    MemoryAllocationCallback callback) {
+  DCHECK(callback != NULL);
+  for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
+    if (memory_allocation_callbacks_[i].callback == callback) {
+      memory_allocation_callbacks_.Remove(i);
+      return;
+    }
+  }
+  UNREACHABLE();
+}
+
+
+#ifdef DEBUG
+void MemoryAllocator::ReportStatistics() {
+  float pct = static_cast<float>(capacity_ - size_) / capacity_;
+  PrintF("  capacity: %" V8_PTR_PREFIX
+         "d"
+         ", used: %" V8_PTR_PREFIX
+         "d"
+         ", available: %%%d\n\n",
+         capacity_, size_, static_cast<int>(pct * 100));
+}
+#endif
+
+
+int MemoryAllocator::CodePageGuardStartOffset() {
+  // We are guarding code pages: the first OS page after the header
+  // will be protected as non-writable.
+  return RoundUp(Page::kObjectStartOffset, base::OS::CommitPageSize());
+}
+
+
+int MemoryAllocator::CodePageGuardSize() {
+  return static_cast<int>(base::OS::CommitPageSize());
+}
+
+
+int MemoryAllocator::CodePageAreaStartOffset() {
+  // We are guarding code pages: the first OS page after the header
+  // will be protected as non-writable.
+  return CodePageGuardStartOffset() + CodePageGuardSize();
+}
+
+
+int MemoryAllocator::CodePageAreaEndOffset() {
+  // We are guarding code pages: the last OS page will be protected as
+  // non-writable.
+  return Page::kPageSize - static_cast<int>(base::OS::CommitPageSize());
+}
+
+
+bool MemoryAllocator::CommitExecutableMemory(base::VirtualMemory* vm,
+                                             Address start, size_t commit_size,
+                                             size_t reserved_size) {
+  // Commit page header (not executable).
+  if (!vm->Commit(start, CodePageGuardStartOffset(), false)) {
+    return false;
+  }
+
+  // Create guard page after the header.
+  if (!vm->Guard(start + CodePageGuardStartOffset())) {
+    return false;
+  }
+
+  // Commit page body (executable).
+  if (!vm->Commit(start + CodePageAreaStartOffset(),
+                  commit_size - CodePageGuardStartOffset(), true)) {
+    return false;
+  }
+
+  // Create guard page before the end.
+  if (!vm->Guard(start + reserved_size - CodePageGuardSize())) {
+    return false;
+  }
+
+  UpdateAllocatedSpaceLimits(start, start + CodePageAreaStartOffset() +
+                                        commit_size -
+                                        CodePageGuardStartOffset());
+  return true;
+}
+
+
+// -----------------------------------------------------------------------------
+// MemoryChunk implementation
+
+void MemoryChunk::IncrementLiveBytesFromMutator(Address address, int by) {
+  MemoryChunk* chunk = MemoryChunk::FromAddress(address);
+  if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) {
+    static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by);
+  }
+  chunk->IncrementLiveBytes(by);
+}
+
+
+// -----------------------------------------------------------------------------
+// PagedSpace implementation
+
+PagedSpace::PagedSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id,
+                       Executability executable)
+    : Space(heap, id, executable),
+      free_list_(this),
+      unswept_free_bytes_(0),
+      end_of_unswept_pages_(NULL),
+      emergency_memory_(NULL) {
+  if (id == CODE_SPACE) {
+    area_size_ = heap->isolate()->memory_allocator()->CodePageAreaSize();
+  } else {
+    area_size_ = Page::kPageSize - Page::kObjectStartOffset;
+  }
+  max_capacity_ =
+      (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize) * AreaSize();
+  accounting_stats_.Clear();
+
+  allocation_info_.set_top(NULL);
+  allocation_info_.set_limit(NULL);
+
+  anchor_.InitializeAsAnchor(this);
+}
+
+
+bool PagedSpace::SetUp() { return true; }
+
+
+bool PagedSpace::HasBeenSetUp() { return true; }
+
+
+void PagedSpace::TearDown() {
+  PageIterator iterator(this);
+  while (iterator.has_next()) {
+    heap()->isolate()->memory_allocator()->Free(iterator.next());
+  }
+  anchor_.set_next_page(&anchor_);
+  anchor_.set_prev_page(&anchor_);
+  accounting_stats_.Clear();
+}
+
+
+size_t PagedSpace::CommittedPhysicalMemory() {
+  if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
+  MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
+  size_t size = 0;
+  PageIterator it(this);
+  while (it.has_next()) {
+    size += it.next()->CommittedPhysicalMemory();
+  }
+  return size;
+}
+
+
+Object* PagedSpace::FindObject(Address addr) {
+  // Note: this function can only be called on iterable spaces.
+  DCHECK(!heap()->mark_compact_collector()->in_use());
+
+  if (!Contains(addr)) return Smi::FromInt(0);  // Signaling not found.
+
+  Page* p = Page::FromAddress(addr);
+  HeapObjectIterator it(p, NULL);
+  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
+    Address cur = obj->address();
+    Address next = cur + obj->Size();
+    if ((cur <= addr) && (addr < next)) return obj;
+  }
+
+  UNREACHABLE();
+  return Smi::FromInt(0);
+}
+
+
+bool PagedSpace::CanExpand() {
+  DCHECK(max_capacity_ % AreaSize() == 0);
+
+  if (Capacity() == max_capacity_) return false;
+
+  DCHECK(Capacity() < max_capacity_);
+
+  // Are we going to exceed capacity for this space?
+  if ((Capacity() + Page::kPageSize) > max_capacity_) return false;
+
+  return true;
+}
+
+
+bool PagedSpace::Expand() {
+  if (!CanExpand()) return false;
+
+  intptr_t size = AreaSize();
+
+  if (anchor_.next_page() == &anchor_) {
+    size = SizeOfFirstPage();
+  }
+
+  Page* p = heap()->isolate()->memory_allocator()->AllocatePage(size, this,
+                                                                executable());
+  if (p == NULL) return false;
+
+  DCHECK(Capacity() <= max_capacity_);
+
+  p->InsertAfter(anchor_.prev_page());
+
+  return true;
+}
+
+
+intptr_t PagedSpace::SizeOfFirstPage() {
+  // If using an ool constant pool then transfer the constant pool allowance
+  // from the code space to the old pointer space.
+  static const int constant_pool_delta = FLAG_enable_ool_constant_pool ? 48 : 0;
+  int size = 0;
+  switch (identity()) {
+    case OLD_POINTER_SPACE:
+      size = (112 + constant_pool_delta) * kPointerSize * KB;
+      break;
+    case OLD_DATA_SPACE:
+      size = 192 * KB;
+      break;
+    case MAP_SPACE:
+      size = 16 * kPointerSize * KB;
+      break;
+    case CELL_SPACE:
+      size = 16 * kPointerSize * KB;
+      break;
+    case PROPERTY_CELL_SPACE:
+      size = 8 * kPointerSize * KB;
+      break;
+    case CODE_SPACE: {
+      CodeRange* code_range = heap()->isolate()->code_range();
+      if (code_range != NULL && code_range->valid()) {
+        // When code range exists, code pages are allocated in a special way
+        // (from the reserved code range). That part of the code is not yet
+        // upgraded to handle small pages.
+        size = AreaSize();
+      } else {
+        size = RoundUp((480 - constant_pool_delta) * KB *
+                           FullCodeGenerator::kBootCodeSizeMultiplier / 100,
+                       kPointerSize);
+      }
+      break;
+    }
+    default:
+      UNREACHABLE();
+  }
+  return Min(size, AreaSize());
+}
+
+
+int PagedSpace::CountTotalPages() {
+  PageIterator it(this);
+  int count = 0;
+  while (it.has_next()) {
+    it.next();
+    count++;
+  }
+  return count;
+}
+
+
+void PagedSpace::ObtainFreeListStatistics(Page* page, SizeStats* sizes) {
+  sizes->huge_size_ = page->available_in_huge_free_list();
+  sizes->small_size_ = page->available_in_small_free_list();
+  sizes->medium_size_ = page->available_in_medium_free_list();
+  sizes->large_size_ = page->available_in_large_free_list();
+}
+
+
+void PagedSpace::ResetFreeListStatistics() {
+  PageIterator page_iterator(this);
+  while (page_iterator.has_next()) {
+    Page* page = page_iterator.next();
+    page->ResetFreeListStatistics();
+  }
+}
+
+
+void PagedSpace::IncreaseCapacity(int size) {
+  accounting_stats_.ExpandSpace(size);
+}
+
+
+void PagedSpace::ReleasePage(Page* page) {
+  DCHECK(page->LiveBytes() == 0);
+  DCHECK(AreaSize() == page->area_size());
+
+  if (page->WasSwept()) {
+    intptr_t size = free_list_.EvictFreeListItems(page);
+    accounting_stats_.AllocateBytes(size);
+    DCHECK_EQ(AreaSize(), static_cast<int>(size));
+  } else {
+    DecreaseUnsweptFreeBytes(page);
+  }
+
+  if (page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE)) {
+    heap()->decrement_scan_on_scavenge_pages();
+    page->ClearFlag(MemoryChunk::SCAN_ON_SCAVENGE);
+  }
+
+  DCHECK(!free_list_.ContainsPageFreeListItems(page));
+
+  if (Page::FromAllocationTop(allocation_info_.top()) == page) {
+    allocation_info_.set_top(NULL);
+    allocation_info_.set_limit(NULL);
+  }
+
+  page->Unlink();
+  if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
+    heap()->isolate()->memory_allocator()->Free(page);
+  } else {
+    heap()->QueueMemoryChunkForFree(page);
+  }
+
+  DCHECK(Capacity() > 0);
+  accounting_stats_.ShrinkSpace(AreaSize());
+}
+
+
+void PagedSpace::CreateEmergencyMemory() {
+  emergency_memory_ = heap()->isolate()->memory_allocator()->AllocateChunk(
+      AreaSize(), AreaSize(), executable(), this);
+}
+
+
+void PagedSpace::FreeEmergencyMemory() {
+  Page* page = static_cast<Page*>(emergency_memory_);
+  DCHECK(page->LiveBytes() == 0);
+  DCHECK(AreaSize() == page->area_size());
+  DCHECK(!free_list_.ContainsPageFreeListItems(page));
+  heap()->isolate()->memory_allocator()->Free(page);
+  emergency_memory_ = NULL;
+}
+
+
+void PagedSpace::UseEmergencyMemory() {
+  Page* page = Page::Initialize(heap(), emergency_memory_, executable(), this);
+  page->InsertAfter(anchor_.prev_page());
+  emergency_memory_ = NULL;
+}
+
+
+#ifdef DEBUG
+void PagedSpace::Print() {}
+#endif
+
+#ifdef VERIFY_HEAP
+void PagedSpace::Verify(ObjectVisitor* visitor) {
+  bool allocation_pointer_found_in_space =
+      (allocation_info_.top() == allocation_info_.limit());
+  PageIterator page_iterator(this);
+  while (page_iterator.has_next()) {
+    Page* page = page_iterator.next();
+    CHECK(page->owner() == this);
+    if (page == Page::FromAllocationTop(allocation_info_.top())) {
+      allocation_pointer_found_in_space = true;
+    }
+    CHECK(page->WasSwept());
+    HeapObjectIterator it(page, NULL);
+    Address end_of_previous_object = page->area_start();
+    Address top = page->area_end();
+    int black_size = 0;
+    for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
+      CHECK(end_of_previous_object <= object->address());
+
+      // The first word should be a map, and we expect all map pointers to
+      // be in map space.
+      Map* map = object->map();
+      CHECK(map->IsMap());
+      CHECK(heap()->map_space()->Contains(map));
+
+      // Perform space-specific object verification.
+      VerifyObject(object);
+
+      // The object itself should look OK.
+      object->ObjectVerify();
+
+      // All the interior pointers should be contained in the heap.
+      int size = object->Size();
+      object->IterateBody(map->instance_type(), size, visitor);
+      if (Marking::IsBlack(Marking::MarkBitFrom(object))) {
+        black_size += size;
+      }
+
+      CHECK(object->address() + size <= top);
+      end_of_previous_object = object->address() + size;
+    }
+    CHECK_LE(black_size, page->LiveBytes());
+  }
+  CHECK(allocation_pointer_found_in_space);
+}
+#endif  // VERIFY_HEAP
+
+// -----------------------------------------------------------------------------
+// NewSpace implementation
+
+
+bool NewSpace::SetUp(int reserved_semispace_capacity,
+                     int maximum_semispace_capacity) {
+  // Set up new space based on the preallocated memory block defined by
+  // start and size. The provided space is divided into two semi-spaces.
+  // To support fast containment testing in the new space, the size of
+  // this chunk must be a power of two and it must be aligned to its size.
+  int initial_semispace_capacity = heap()->InitialSemiSpaceSize();
+
+  size_t size = 2 * reserved_semispace_capacity;
+  Address base = heap()->isolate()->memory_allocator()->ReserveAlignedMemory(
+      size, size, &reservation_);
+  if (base == NULL) return false;
+
+  chunk_base_ = base;
+  chunk_size_ = static_cast<uintptr_t>(size);
+  LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_));
+
+  DCHECK(initial_semispace_capacity <= maximum_semispace_capacity);
+  DCHECK(base::bits::IsPowerOfTwo32(maximum_semispace_capacity));
+
+  // Allocate and set up the histogram arrays if necessary.
+  allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
+  promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
+
+#define SET_NAME(name)                        \
+  allocated_histogram_[name].set_name(#name); \
+  promoted_histogram_[name].set_name(#name);
+  INSTANCE_TYPE_LIST(SET_NAME)
+#undef SET_NAME
+
+  DCHECK(reserved_semispace_capacity == heap()->ReservedSemiSpaceSize());
+  DCHECK(static_cast<intptr_t>(chunk_size_) >=
+         2 * heap()->ReservedSemiSpaceSize());
+  DCHECK(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0));
+
+  to_space_.SetUp(chunk_base_, initial_semispace_capacity,
+                  maximum_semispace_capacity);
+  from_space_.SetUp(chunk_base_ + reserved_semispace_capacity,
+                    initial_semispace_capacity, maximum_semispace_capacity);
+  if (!to_space_.Commit()) {
+    return false;
+  }
+  DCHECK(!from_space_.is_committed());  // No need to use memory yet.
+
+  start_ = chunk_base_;
+  address_mask_ = ~(2 * reserved_semispace_capacity - 1);
+  object_mask_ = address_mask_ | kHeapObjectTagMask;
+  object_expected_ = reinterpret_cast<uintptr_t>(start_) | kHeapObjectTag;
+
+  ResetAllocationInfo();
+
+  return true;
+}
+
+
+void NewSpace::TearDown() {
+  if (allocated_histogram_) {
+    DeleteArray(allocated_histogram_);
+    allocated_histogram_ = NULL;
+  }
+  if (promoted_histogram_) {
+    DeleteArray(promoted_histogram_);
+    promoted_histogram_ = NULL;
+  }
+
+  start_ = NULL;
+  allocation_info_.set_top(NULL);
+  allocation_info_.set_limit(NULL);
+
+  to_space_.TearDown();
+  from_space_.TearDown();
+
+  LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_));
+
+  DCHECK(reservation_.IsReserved());
+  heap()->isolate()->memory_allocator()->FreeMemory(&reservation_,
+                                                    NOT_EXECUTABLE);
+  chunk_base_ = NULL;
+  chunk_size_ = 0;
+}
+
+
+void NewSpace::Flip() { SemiSpace::Swap(&from_space_, &to_space_); }
+
+
+void NewSpace::Grow() {
+  // Double the semispace size but only up to maximum capacity.
+  DCHECK(TotalCapacity() < MaximumCapacity());
+  int new_capacity =
+      Min(MaximumCapacity(), 2 * static_cast<int>(TotalCapacity()));
+  if (to_space_.GrowTo(new_capacity)) {
+    // Only grow from space if we managed to grow to-space.
+    if (!from_space_.GrowTo(new_capacity)) {
+      // If we managed to grow to-space but couldn't grow from-space,
+      // attempt to shrink to-space.
+      if (!to_space_.ShrinkTo(from_space_.TotalCapacity())) {
+        // We are in an inconsistent state because we could not
+        // commit/uncommit memory from new space.
+        V8::FatalProcessOutOfMemory("Failed to grow new space.");
+      }
+    }
+  }
+  DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+}
+
+
+void NewSpace::Shrink() {
+  int new_capacity = Max(InitialTotalCapacity(), 2 * SizeAsInt());
+  int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize);
+  if (rounded_new_capacity < TotalCapacity() &&
+      to_space_.ShrinkTo(rounded_new_capacity)) {
+    // Only shrink from-space if we managed to shrink to-space.
+    from_space_.Reset();
+    if (!from_space_.ShrinkTo(rounded_new_capacity)) {
+      // If we managed to shrink to-space but couldn't shrink from
+      // space, attempt to grow to-space again.
+      if (!to_space_.GrowTo(from_space_.TotalCapacity())) {
+        // We are in an inconsistent state because we could not
+        // commit/uncommit memory from new space.
+        V8::FatalProcessOutOfMemory("Failed to shrink new space.");
+      }
+    }
+  }
+  DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+}
+
+
+void NewSpace::UpdateAllocationInfo() {
+  MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
+  allocation_info_.set_top(to_space_.page_low());
+  allocation_info_.set_limit(to_space_.page_high());
+  UpdateInlineAllocationLimit(0);
+  DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+}
+
+
+void NewSpace::ResetAllocationInfo() {
+  to_space_.Reset();
+  UpdateAllocationInfo();
+  pages_used_ = 0;
+  // Clear all mark-bits in the to-space.
+  NewSpacePageIterator it(&to_space_);
+  while (it.has_next()) {
+    Bitmap::Clear(it.next());
+  }
+}
+
+
+void NewSpace::UpdateInlineAllocationLimit(int size_in_bytes) {
+  if (heap()->inline_allocation_disabled()) {
+    // Lowest limit when linear allocation was disabled.
+    Address high = to_space_.page_high();
+    Address new_top = allocation_info_.top() + size_in_bytes;
+    allocation_info_.set_limit(Min(new_top, high));
+  } else if (inline_allocation_limit_step() == 0) {
+    // Normal limit is the end of the current page.
+    allocation_info_.set_limit(to_space_.page_high());
+  } else {
+    // Lower limit during incremental marking.
+    Address high = to_space_.page_high();
+    Address new_top = allocation_info_.top() + size_in_bytes;
+    Address new_limit = new_top + inline_allocation_limit_step_;
+    allocation_info_.set_limit(Min(new_limit, high));
+  }
+  DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+}
+
+
+bool NewSpace::AddFreshPage() {
+  Address top = allocation_info_.top();
+  if (NewSpacePage::IsAtStart(top)) {
+    // The current page is already empty. Don't try to make another.
+
+    // We should only get here if someone asks to allocate more
+    // than what can be stored in a single page.
+    // TODO(gc): Change the limit on new-space allocation to prevent this
+    // from happening (all such allocations should go directly to LOSpace).
+    return false;
+  }
+  if (!to_space_.AdvancePage()) {
+    // Failed to get a new page in to-space.
+    return false;
+  }
+
+  // Clear remainder of current page.
+  Address limit = NewSpacePage::FromLimit(top)->area_end();
+  if (heap()->gc_state() == Heap::SCAVENGE) {
+    heap()->promotion_queue()->SetNewLimit(limit);
+  }
+
+  int remaining_in_page = static_cast<int>(limit - top);
+  heap()->CreateFillerObjectAt(top, remaining_in_page);
+  pages_used_++;
+  UpdateAllocationInfo();
+
+  return true;
+}
+
+
+AllocationResult NewSpace::SlowAllocateRaw(int size_in_bytes) {
+  Address old_top = allocation_info_.top();
+  Address high = to_space_.page_high();
+  if (allocation_info_.limit() < high) {
+    // Either the limit has been lowered because linear allocation was disabled
+    // or because incremental marking wants to get a chance to do a step. Set
+    // the new limit accordingly.
+    Address new_top = old_top + size_in_bytes;
+    int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
+    heap()->incremental_marking()->Step(bytes_allocated,
+                                        IncrementalMarking::GC_VIA_STACK_GUARD);
+    UpdateInlineAllocationLimit(size_in_bytes);
+    top_on_previous_step_ = new_top;
+    return AllocateRaw(size_in_bytes);
+  } else if (AddFreshPage()) {
+    // Switched to new page. Try allocating again.
+    int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
+    heap()->incremental_marking()->Step(bytes_allocated,
+                                        IncrementalMarking::GC_VIA_STACK_GUARD);
+    top_on_previous_step_ = to_space_.page_low();
+    return AllocateRaw(size_in_bytes);
+  } else {
+    return AllocationResult::Retry();
+  }
+}
+
+
+#ifdef VERIFY_HEAP
+// We do not use the SemiSpaceIterator because verification doesn't assume
+// that it works (it depends on the invariants we are checking).
+void NewSpace::Verify() {
+  // The allocation pointer should be in the space or at the very end.
+  DCHECK_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
+
+  // There should be objects packed in from the low address up to the
+  // allocation pointer.
+  Address current = to_space_.first_page()->area_start();
+  CHECK_EQ(current, to_space_.space_start());
+
+  while (current != top()) {
+    if (!NewSpacePage::IsAtEnd(current)) {
+      // The allocation pointer should not be in the middle of an object.
+      CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) ||
+            current < top());
+
+      HeapObject* object = HeapObject::FromAddress(current);
+
+      // The first word should be a map, and we expect all map pointers to
+      // be in map space.
+      Map* map = object->map();
+      CHECK(map->IsMap());
+      CHECK(heap()->map_space()->Contains(map));
+
+      // The object should not be code or a map.
+      CHECK(!object->IsMap());
+      CHECK(!object->IsCode());
+
+      // The object itself should look OK.
+      object->ObjectVerify();
+
+      // All the interior pointers should be contained in the heap.
+      VerifyPointersVisitor visitor;
+      int size = object->Size();
+      object->IterateBody(map->instance_type(), size, &visitor);
+
+      current += size;
+    } else {
+      // At end of page, switch to next page.
+      NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page();
+      // Next page should be valid.
+      CHECK(!page->is_anchor());
+      current = page->area_start();
+    }
+  }
+
+  // Check semi-spaces.
+  CHECK_EQ(from_space_.id(), kFromSpace);
+  CHECK_EQ(to_space_.id(), kToSpace);
+  from_space_.Verify();
+  to_space_.Verify();
+}
+#endif
+
+// -----------------------------------------------------------------------------
+// SemiSpace implementation
+
+void SemiSpace::SetUp(Address start, int initial_capacity,
+                      int maximum_capacity) {
+  // Creates a space in the young generation. The constructor does not
+  // allocate memory from the OS.  A SemiSpace is given a contiguous chunk of
+  // memory of size 'capacity' when set up, and does not grow or shrink
+  // otherwise.  In the mark-compact collector, the memory region of the from
+  // space is used as the marking stack. It requires contiguous memory
+  // addresses.
+  DCHECK(maximum_capacity >= Page::kPageSize);
+  initial_total_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
+  total_capacity_ = initial_capacity;
+  maximum_total_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
+  maximum_committed_ = 0;
+  committed_ = false;
+  start_ = start;
+  address_mask_ = ~(maximum_capacity - 1);
+  object_mask_ = address_mask_ | kHeapObjectTagMask;
+  object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
+  age_mark_ = start_;
+}
+
+
+void SemiSpace::TearDown() {
+  start_ = NULL;
+  total_capacity_ = 0;
+}
+
+
+bool SemiSpace::Commit() {
+  DCHECK(!is_committed());
+  int pages = total_capacity_ / Page::kPageSize;
+  if (!heap()->isolate()->memory_allocator()->CommitBlock(
+          start_, total_capacity_, executable())) {
+    return false;
+  }
+
+  NewSpacePage* current = anchor();
+  for (int i = 0; i < pages; i++) {
+    NewSpacePage* new_page =
+        NewSpacePage::Initialize(heap(), start_ + i * Page::kPageSize, this);
+    new_page->InsertAfter(current);
+    current = new_page;
+  }
+
+  SetCapacity(total_capacity_);
+  committed_ = true;
+  Reset();
+  return true;
+}
+
+
+bool SemiSpace::Uncommit() {
+  DCHECK(is_committed());
+  Address start = start_ + maximum_total_capacity_ - total_capacity_;
+  if (!heap()->isolate()->memory_allocator()->UncommitBlock(start,
+                                                            total_capacity_)) {
+    return false;
+  }
+  anchor()->set_next_page(anchor());
+  anchor()->set_prev_page(anchor());
+
+  committed_ = false;
+  return true;
+}
+
+
+size_t SemiSpace::CommittedPhysicalMemory() {
+  if (!is_committed()) return 0;
+  size_t size = 0;
+  NewSpacePageIterator it(this);
+  while (it.has_next()) {
+    size += it.next()->CommittedPhysicalMemory();
+  }
+  return size;
+}
+
+
+bool SemiSpace::GrowTo(int new_capacity) {
+  if (!is_committed()) {
+    if (!Commit()) return false;
+  }
+  DCHECK((new_capacity & Page::kPageAlignmentMask) == 0);
+  DCHECK(new_capacity <= maximum_total_capacity_);
+  DCHECK(new_capacity > total_capacity_);
+  int pages_before = total_capacity_ / Page::kPageSize;
+  int pages_after = new_capacity / Page::kPageSize;
+
+  size_t delta = new_capacity - total_capacity_;
+
+  DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
+  if (!heap()->isolate()->memory_allocator()->CommitBlock(
+          start_ + total_capacity_, delta, executable())) {
+    return false;
+  }
+  SetCapacity(new_capacity);
+  NewSpacePage* last_page = anchor()->prev_page();
+  DCHECK(last_page != anchor());
+  for (int i = pages_before; i < pages_after; i++) {
+    Address page_address = start_ + i * Page::kPageSize;
+    NewSpacePage* new_page =
+        NewSpacePage::Initialize(heap(), page_address, this);
+    new_page->InsertAfter(last_page);
+    Bitmap::Clear(new_page);
+    // Duplicate the flags that was set on the old page.
+    new_page->SetFlags(last_page->GetFlags(),
+                       NewSpacePage::kCopyOnFlipFlagsMask);
+    last_page = new_page;
+  }
+  return true;
+}
+
+
+bool SemiSpace::ShrinkTo(int new_capacity) {
+  DCHECK((new_capacity & Page::kPageAlignmentMask) == 0);
+  DCHECK(new_capacity >= initial_total_capacity_);
+  DCHECK(new_capacity < total_capacity_);
+  if (is_committed()) {
+    size_t delta = total_capacity_ - new_capacity;
+    DCHECK(IsAligned(delta, base::OS::AllocateAlignment()));
+
+    MemoryAllocator* allocator = heap()->isolate()->memory_allocator();
+    if (!allocator->UncommitBlock(start_ + new_capacity, delta)) {
+      return false;
+    }
+
+    int pages_after = new_capacity / Page::kPageSize;
+    NewSpacePage* new_last_page =
+        NewSpacePage::FromAddress(start_ + (pages_after - 1) * Page::kPageSize);
+    new_last_page->set_next_page(anchor());
+    anchor()->set_prev_page(new_last_page);
+    DCHECK((current_page_ >= first_page()) && (current_page_ <= new_last_page));
+  }
+
+  SetCapacity(new_capacity);
+
+  return true;
+}
+
+
+void SemiSpace::FlipPages(intptr_t flags, intptr_t mask) {
+  anchor_.set_owner(this);
+  // Fixup back-pointers to anchor. Address of anchor changes
+  // when we swap.
+  anchor_.prev_page()->set_next_page(&anchor_);
+  anchor_.next_page()->set_prev_page(&anchor_);
+
+  bool becomes_to_space = (id_ == kFromSpace);
+  id_ = becomes_to_space ? kToSpace : kFromSpace;
+  NewSpacePage* page = anchor_.next_page();
+  while (page != &anchor_) {
+    page->set_owner(this);
+    page->SetFlags(flags, mask);
+    if (becomes_to_space) {
+      page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
+      page->SetFlag(MemoryChunk::IN_TO_SPACE);
+      page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
+      page->ResetLiveBytes();
+    } else {
+      page->SetFlag(MemoryChunk::IN_FROM_SPACE);
+      page->ClearFlag(MemoryChunk::IN_TO_SPACE);
+    }
+    DCHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
+    DCHECK(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) ||
+           page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
+    page = page->next_page();
+  }
+}
+
+
+void SemiSpace::Reset() {
+  DCHECK(anchor_.next_page() != &anchor_);
+  current_page_ = anchor_.next_page();
+}
+
+
+void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
+  // We won't be swapping semispaces without data in them.
+  DCHECK(from->anchor_.next_page() != &from->anchor_);
+  DCHECK(to->anchor_.next_page() != &to->anchor_);
+
+  // Swap bits.
+  SemiSpace tmp = *from;
+  *from = *to;
+  *to = tmp;
+
+  // Fixup back-pointers to the page list anchor now that its address
+  // has changed.
+  // Swap to/from-space bits on pages.
+  // Copy GC flags from old active space (from-space) to new (to-space).
+  intptr_t flags = from->current_page()->GetFlags();
+  to->FlipPages(flags, NewSpacePage::kCopyOnFlipFlagsMask);
+
+  from->FlipPages(0, 0);
+}
+
+
+void SemiSpace::SetCapacity(int new_capacity) {
+  total_capacity_ = new_capacity;
+  if (total_capacity_ > maximum_committed_) {
+    maximum_committed_ = total_capacity_;
+  }
+}
+
+
+void SemiSpace::set_age_mark(Address mark) {
+  DCHECK(NewSpacePage::FromLimit(mark)->semi_space() == this);
+  age_mark_ = mark;
+  // Mark all pages up to the one containing mark.
+  NewSpacePageIterator it(space_start(), mark);
+  while (it.has_next()) {
+    it.next()->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
+  }
+}
+
+
+#ifdef DEBUG
+void SemiSpace::Print() {}
+#endif
+
+#ifdef VERIFY_HEAP
+void SemiSpace::Verify() {
+  bool is_from_space = (id_ == kFromSpace);
+  NewSpacePage* page = anchor_.next_page();
+  CHECK(anchor_.semi_space() == this);
+  while (page != &anchor_) {
+    CHECK(page->semi_space() == this);
+    CHECK(page->InNewSpace());
+    CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
+                                        : MemoryChunk::IN_TO_SPACE));
+    CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE
+                                         : MemoryChunk::IN_FROM_SPACE));
+    CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
+    if (!is_from_space) {
+      // The pointers-from-here-are-interesting flag isn't updated dynamically
+      // on from-space pages, so it might be out of sync with the marking state.
+      if (page->heap()->incremental_marking()->IsMarking()) {
+        CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
+      } else {
+        CHECK(
+            !page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
+      }
+      // TODO(gc): Check that the live_bytes_count_ field matches the
+      // black marking on the page (if we make it match in new-space).
+    }
+    CHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
+    CHECK(page->prev_page()->next_page() == page);
+    page = page->next_page();
+  }
+}
+#endif
+
+#ifdef DEBUG
+void SemiSpace::AssertValidRange(Address start, Address end) {
+  // Addresses belong to same semi-space
+  NewSpacePage* page = NewSpacePage::FromLimit(start);
+  NewSpacePage* end_page = NewSpacePage::FromLimit(end);
+  SemiSpace* space = page->semi_space();
+  CHECK_EQ(space, end_page->semi_space());
+  // Start address is before end address, either on same page,
+  // or end address is on a later page in the linked list of
+  // semi-space pages.
+  if (page == end_page) {
+    CHECK(start <= end);
+  } else {
+    while (page != end_page) {
+      page = page->next_page();
+      CHECK_NE(page, space->anchor());
+    }
+  }
+}
+#endif
+
+
+// -----------------------------------------------------------------------------
+// SemiSpaceIterator implementation.
+SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
+  Initialize(space->bottom(), space->top(), NULL);
+}
+
+
+SemiSpaceIterator::SemiSpaceIterator(NewSpace* space,
+                                     HeapObjectCallback size_func) {
+  Initialize(space->bottom(), space->top(), size_func);
+}
+
+
+SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) {
+  Initialize(start, space->top(), NULL);
+}
+
+
+SemiSpaceIterator::SemiSpaceIterator(Address from, Address to) {
+  Initialize(from, to, NULL);
+}
+
+
+void SemiSpaceIterator::Initialize(Address start, Address end,
+                                   HeapObjectCallback size_func) {
+  SemiSpace::AssertValidRange(start, end);
+  current_ = start;
+  limit_ = end;
+  size_func_ = size_func;
+}
+
+
+#ifdef DEBUG
+// heap_histograms is shared, always clear it before using it.
+static void ClearHistograms(Isolate* isolate) {
+// We reset the name each time, though it hasn't changed.
+#define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name);
+  INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
+#undef DEF_TYPE_NAME
+
+#define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear();
+  INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
+#undef CLEAR_HISTOGRAM
+
+  isolate->js_spill_information()->Clear();
+}
+
+
+static void ClearCodeKindStatistics(int* code_kind_statistics) {
+  for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
+    code_kind_statistics[i] = 0;
+  }
+}
+
+
+static void ReportCodeKindStatistics(int* code_kind_statistics) {
+  PrintF("\n   Code kind histograms: \n");
+  for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
+    if (code_kind_statistics[i] > 0) {
+      PrintF("     %-20s: %10d bytes\n",
+             Code::Kind2String(static_cast<Code::Kind>(i)),
+             code_kind_statistics[i]);
+    }
+  }
+  PrintF("\n");
+}
+
+
+static int CollectHistogramInfo(HeapObject* obj) {
+  Isolate* isolate = obj->GetIsolate();
+  InstanceType type = obj->map()->instance_type();
+  DCHECK(0 <= type && type <= LAST_TYPE);
+  DCHECK(isolate->heap_histograms()[type].name() != NULL);
+  isolate->heap_histograms()[type].increment_number(1);
+  isolate->heap_histograms()[type].increment_bytes(obj->Size());
+
+  if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
+    JSObject::cast(obj)
+        ->IncrementSpillStatistics(isolate->js_spill_information());
+  }
+
+  return obj->Size();
+}
+
+
+static void ReportHistogram(Isolate* isolate, bool print_spill) {
+  PrintF("\n  Object Histogram:\n");
+  for (int i = 0; i <= LAST_TYPE; i++) {
+    if (isolate->heap_histograms()[i].number() > 0) {
+      PrintF("    %-34s%10d (%10d bytes)\n",
+             isolate->heap_histograms()[i].name(),
+             isolate->heap_histograms()[i].number(),
+             isolate->heap_histograms()[i].bytes());
+    }
+  }
+  PrintF("\n");
+
+  // Summarize string types.
+  int string_number = 0;
+  int string_bytes = 0;
+#define INCREMENT(type, size, name, camel_name)               \
+  string_number += isolate->heap_histograms()[type].number(); \
+  string_bytes += isolate->heap_histograms()[type].bytes();
+  STRING_TYPE_LIST(INCREMENT)
+#undef INCREMENT
+  if (string_number > 0) {
+    PrintF("    %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
+           string_bytes);
+  }
+
+  if (FLAG_collect_heap_spill_statistics && print_spill) {
+    isolate->js_spill_information()->Print();
+  }
+}
+#endif  // DEBUG
+
+
+// Support for statistics gathering for --heap-stats and --log-gc.
+void NewSpace::ClearHistograms() {
+  for (int i = 0; i <= LAST_TYPE; i++) {
+    allocated_histogram_[i].clear();
+    promoted_histogram_[i].clear();
+  }
+}
+
+
+// Because the copying collector does not touch garbage objects, we iterate
+// the new space before a collection to get a histogram of allocated objects.
+// This only happens when --log-gc flag is set.
+void NewSpace::CollectStatistics() {
+  ClearHistograms();
+  SemiSpaceIterator it(this);
+  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next())
+    RecordAllocation(obj);
+}
+
+
+static void DoReportStatistics(Isolate* isolate, HistogramInfo* info,
+                               const char* description) {
+  LOG(isolate, HeapSampleBeginEvent("NewSpace", description));
+  // Lump all the string types together.
+  int string_number = 0;
+  int string_bytes = 0;
+#define INCREMENT(type, size, name, camel_name) \
+  string_number += info[type].number();         \
+  string_bytes += info[type].bytes();
+  STRING_TYPE_LIST(INCREMENT)
+#undef INCREMENT
+  if (string_number > 0) {
+    LOG(isolate,
+        HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
+  }
+
+  // Then do the other types.
+  for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
+    if (info[i].number() > 0) {
+      LOG(isolate, HeapSampleItemEvent(info[i].name(), info[i].number(),
+                                       info[i].bytes()));
+    }
+  }
+  LOG(isolate, HeapSampleEndEvent("NewSpace", description));
+}
+
+
+void NewSpace::ReportStatistics() {
+#ifdef DEBUG
+  if (FLAG_heap_stats) {
+    float pct = static_cast<float>(Available()) / TotalCapacity();
+    PrintF("  capacity: %" V8_PTR_PREFIX
+           "d"
+           ", available: %" V8_PTR_PREFIX "d, %%%d\n",
+           TotalCapacity(), Available(), static_cast<int>(pct * 100));
+    PrintF("\n  Object Histogram:\n");
+    for (int i = 0; i <= LAST_TYPE; i++) {
+      if (allocated_histogram_[i].number() > 0) {
+        PrintF("    %-34s%10d (%10d bytes)\n", allocated_histogram_[i].name(),
+               allocated_histogram_[i].number(),
+               allocated_histogram_[i].bytes());
+      }
+    }
+    PrintF("\n");
+  }
+#endif  // DEBUG
+
+  if (FLAG_log_gc) {
+    Isolate* isolate = heap()->isolate();
+    DoReportStatistics(isolate, allocated_histogram_, "allocated");
+    DoReportStatistics(isolate, promoted_histogram_, "promoted");
+  }
+}
+
+
+void NewSpace::RecordAllocation(HeapObject* obj) {
+  InstanceType type = obj->map()->instance_type();
+  DCHECK(0 <= type && type <= LAST_TYPE);
+  allocated_histogram_[type].increment_number(1);
+  allocated_histogram_[type].increment_bytes(obj->Size());
+}
+
+
+void NewSpace::RecordPromotion(HeapObject* obj) {
+  InstanceType type = obj->map()->instance_type();
+  DCHECK(0 <= type && type <= LAST_TYPE);
+  promoted_histogram_[type].increment_number(1);
+  promoted_histogram_[type].increment_bytes(obj->Size());
+}
+
+
+size_t NewSpace::CommittedPhysicalMemory() {
+  if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
+  MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
+  size_t size = to_space_.CommittedPhysicalMemory();
+  if (from_space_.is_committed()) {
+    size += from_space_.CommittedPhysicalMemory();
+  }
+  return size;
+}
+
+
+// -----------------------------------------------------------------------------
+// Free lists for old object spaces implementation
+
+void FreeListNode::set_size(Heap* heap, int size_in_bytes) {
+  DCHECK(size_in_bytes > 0);
+  DCHECK(IsAligned(size_in_bytes, kPointerSize));
+
+  // We write a map and possibly size information to the block.  If the block
+  // is big enough to be a FreeSpace with at least one extra word (the next
+  // pointer), we set its map to be the free space map and its size to an
+  // appropriate array length for the desired size from HeapObject::Size().
+  // If the block is too small (eg, one or two words), to hold both a size
+  // field and a next pointer, we give it a filler map that gives it the
+  // correct size.
+  if (size_in_bytes > FreeSpace::kHeaderSize) {
+    // Can't use FreeSpace::cast because it fails during deserialization.
+    // We have to set the size first with a release store before we store
+    // the map because a concurrent store buffer scan on scavenge must not
+    // observe a map with an invalid size.
+    FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this);
+    this_as_free_space->nobarrier_set_size(size_in_bytes);
+    synchronized_set_map_no_write_barrier(heap->raw_unchecked_free_space_map());
+  } else if (size_in_bytes == kPointerSize) {
+    set_map_no_write_barrier(heap->raw_unchecked_one_pointer_filler_map());
+  } else if (size_in_bytes == 2 * kPointerSize) {
+    set_map_no_write_barrier(heap->raw_unchecked_two_pointer_filler_map());
+  } else {
+    UNREACHABLE();
+  }
+  // We would like to DCHECK(Size() == size_in_bytes) but this would fail during
+  // deserialization because the free space map is not done yet.
+}
+
+
+FreeListNode* FreeListNode::next() {
+  DCHECK(IsFreeListNode(this));
+  if (map() == GetHeap()->raw_unchecked_free_space_map()) {
+    DCHECK(map() == NULL || Size() >= kNextOffset + kPointerSize);
+    return reinterpret_cast<FreeListNode*>(
+        Memory::Address_at(address() + kNextOffset));
+  } else {
+    return reinterpret_cast<FreeListNode*>(
+        Memory::Address_at(address() + kPointerSize));
+  }
+}
+
+
+FreeListNode** FreeListNode::next_address() {
+  DCHECK(IsFreeListNode(this));
+  if (map() == GetHeap()->raw_unchecked_free_space_map()) {
+    DCHECK(Size() >= kNextOffset + kPointerSize);
+    return reinterpret_cast<FreeListNode**>(address() + kNextOffset);
+  } else {
+    return reinterpret_cast<FreeListNode**>(address() + kPointerSize);
+  }
+}
+
+
+void FreeListNode::set_next(FreeListNode* next) {
+  DCHECK(IsFreeListNode(this));
+  // While we are booting the VM the free space map will actually be null.  So
+  // we have to make sure that we don't try to use it for anything at that
+  // stage.
+  if (map() == GetHeap()->raw_unchecked_free_space_map()) {
+    DCHECK(map() == NULL || Size() >= kNextOffset + kPointerSize);
+    base::NoBarrier_Store(
+        reinterpret_cast<base::AtomicWord*>(address() + kNextOffset),
+        reinterpret_cast<base::AtomicWord>(next));
+  } else {
+    base::NoBarrier_Store(
+        reinterpret_cast<base::AtomicWord*>(address() + kPointerSize),
+        reinterpret_cast<base::AtomicWord>(next));
+  }
+}
+
+
+intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
+  intptr_t free_bytes = 0;
+  if (category->top() != NULL) {
+    // This is safe (not going to deadlock) since Concatenate operations
+    // are never performed on the same free lists at the same time in
+    // reverse order.
+    base::LockGuard<base::Mutex> target_lock_guard(mutex());
+    base::LockGuard<base::Mutex> source_lock_guard(category->mutex());
+    DCHECK(category->end_ != NULL);
+    free_bytes = category->available();
+    if (end_ == NULL) {
+      end_ = category->end();
+    } else {
+      category->end()->set_next(top());
+    }
+    set_top(category->top());
+    base::NoBarrier_Store(&top_, category->top_);
+    available_ += category->available();
+    category->Reset();
+  }
+  return free_bytes;
+}
+
+
+void FreeListCategory::Reset() {
+  set_top(NULL);
+  set_end(NULL);
+  set_available(0);
+}
+
+
+intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) {
+  int sum = 0;
+  FreeListNode* t = top();
+  FreeListNode** n = &t;
+  while (*n != NULL) {
+    if (Page::FromAddress((*n)->address()) == p) {
+      FreeSpace* free_space = reinterpret_cast<FreeSpace*>(*n);
+      sum += free_space->Size();
+      *n = (*n)->next();
+    } else {
+      n = (*n)->next_address();
+    }
+  }
+  set_top(t);
+  if (top() == NULL) {
+    set_end(NULL);
+  }
+  available_ -= sum;
+  return sum;
+}
+
+
+bool FreeListCategory::ContainsPageFreeListItemsInList(Page* p) {
+  FreeListNode* node = top();
+  while (node != NULL) {
+    if (Page::FromAddress(node->address()) == p) return true;
+    node = node->next();
+  }
+  return false;
+}
+
+
+FreeListNode* FreeListCategory::PickNodeFromList(int* node_size) {
+  FreeListNode* node = top();
+
+  if (node == NULL) return NULL;
+
+  while (node != NULL &&
+         Page::FromAddress(node->address())->IsEvacuationCandidate()) {
+    available_ -= reinterpret_cast<FreeSpace*>(node)->Size();
+    node = node->next();
+  }
+
+  if (node != NULL) {
+    set_top(node->next());
+    *node_size = reinterpret_cast<FreeSpace*>(node)->Size();
+    available_ -= *node_size;
+  } else {
+    set_top(NULL);
+  }
+
+  if (top() == NULL) {
+    set_end(NULL);
+  }
+
+  return node;
+}
+
+
+FreeListNode* FreeListCategory::PickNodeFromList(int size_in_bytes,
+                                                 int* node_size) {
+  FreeListNode* node = PickNodeFromList(node_size);
+  if (node != NULL && *node_size < size_in_bytes) {
+    Free(node, *node_size);
+    *node_size = 0;
+    return NULL;
+  }
+  return node;
+}
+
+
+void FreeListCategory::Free(FreeListNode* node, int size_in_bytes) {
+  node->set_next(top());
+  set_top(node);
+  if (end_ == NULL) {
+    end_ = node;
+  }
+  available_ += size_in_bytes;
+}
+
+
+void FreeListCategory::RepairFreeList(Heap* heap) {
+  FreeListNode* n = top();
+  while (n != NULL) {
+    Map** map_location = reinterpret_cast<Map**>(n->address());
+    if (*map_location == NULL) {
+      *map_location = heap->free_space_map();
+    } else {
+      DCHECK(*map_location == heap->free_space_map());
+    }
+    n = n->next();
+  }
+}
+
+
+FreeList::FreeList(PagedSpace* owner) : owner_(owner), heap_(owner->heap()) {
+  Reset();
+}
+
+
+intptr_t FreeList::Concatenate(FreeList* free_list) {
+  intptr_t free_bytes = 0;
+  free_bytes += small_list_.Concatenate(free_list->small_list());
+  free_bytes += medium_list_.Concatenate(free_list->medium_list());
+  free_bytes += large_list_.Concatenate(free_list->large_list());
+  free_bytes += huge_list_.Concatenate(free_list->huge_list());
+  return free_bytes;
+}
+
+
+void FreeList::Reset() {
+  small_list_.Reset();
+  medium_list_.Reset();
+  large_list_.Reset();
+  huge_list_.Reset();
+}
+
+
+int FreeList::Free(Address start, int size_in_bytes) {
+  if (size_in_bytes == 0) return 0;
+
+  FreeListNode* node = FreeListNode::FromAddress(start);
+  node->set_size(heap_, size_in_bytes);
+  Page* page = Page::FromAddress(start);
+
+  // Early return to drop too-small blocks on the floor.
+  if (size_in_bytes < kSmallListMin) {
+    page->add_non_available_small_blocks(size_in_bytes);
+    return size_in_bytes;
+  }
+
+  // Insert other blocks at the head of a free list of the appropriate
+  // magnitude.
+  if (size_in_bytes <= kSmallListMax) {
+    small_list_.Free(node, size_in_bytes);
+    page->add_available_in_small_free_list(size_in_bytes);
+  } else if (size_in_bytes <= kMediumListMax) {
+    medium_list_.Free(node, size_in_bytes);
+    page->add_available_in_medium_free_list(size_in_bytes);
+  } else if (size_in_bytes <= kLargeListMax) {
+    large_list_.Free(node, size_in_bytes);
+    page->add_available_in_large_free_list(size_in_bytes);
+  } else {
+    huge_list_.Free(node, size_in_bytes);
+    page->add_available_in_huge_free_list(size_in_bytes);
+  }
+
+  DCHECK(IsVeryLong() || available() == SumFreeLists());
+  return 0;
+}
+
+
+FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
+  FreeListNode* node = NULL;
+  Page* page = NULL;
+
+  if (size_in_bytes <= kSmallAllocationMax) {
+    node = small_list_.PickNodeFromList(node_size);
+    if (node != NULL) {
+      DCHECK(size_in_bytes <= *node_size);
+      page = Page::FromAddress(node->address());
+      page->add_available_in_small_free_list(-(*node_size));
+      DCHECK(IsVeryLong() || available() == SumFreeLists());
+      return node;
+    }
+  }
+
+  if (size_in_bytes <= kMediumAllocationMax) {
+    node = medium_list_.PickNodeFromList(node_size);
+    if (node != NULL) {
+      DCHECK(size_in_bytes <= *node_size);
+      page = Page::FromAddress(node->address());
+      page->add_available_in_medium_free_list(-(*node_size));
+      DCHECK(IsVeryLong() || available() == SumFreeLists());
+      return node;
+    }
+  }
+
+  if (size_in_bytes <= kLargeAllocationMax) {
+    node = large_list_.PickNodeFromList(node_size);
+    if (node != NULL) {
+      DCHECK(size_in_bytes <= *node_size);
+      page = Page::FromAddress(node->address());
+      page->add_available_in_large_free_list(-(*node_size));
+      DCHECK(IsVeryLong() || available() == SumFreeLists());
+      return node;
+    }
+  }
+
+  int huge_list_available = huge_list_.available();
+  FreeListNode* top_node = huge_list_.top();
+  for (FreeListNode** cur = &top_node; *cur != NULL;
+       cur = (*cur)->next_address()) {
+    FreeListNode* cur_node = *cur;
+    while (cur_node != NULL &&
+           Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
+      int size = reinterpret_cast<FreeSpace*>(cur_node)->Size();
+      huge_list_available -= size;
+      page = Page::FromAddress(cur_node->address());
+      page->add_available_in_huge_free_list(-size);
+      cur_node = cur_node->next();
+    }
+
+    *cur = cur_node;
+    if (cur_node == NULL) {
+      huge_list_.set_end(NULL);
+      break;
+    }
+
+    DCHECK((*cur)->map() == heap_->raw_unchecked_free_space_map());
+    FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur);
+    int size = cur_as_free_space->Size();
+    if (size >= size_in_bytes) {
+      // Large enough node found.  Unlink it from the list.
+      node = *cur;
+      *cur = node->next();
+      *node_size = size;
+      huge_list_available -= size;
+      page = Page::FromAddress(node->address());
+      page->add_available_in_huge_free_list(-size);
+      break;
+    }
+  }
+
+  huge_list_.set_top(top_node);
+  if (huge_list_.top() == NULL) {
+    huge_list_.set_end(NULL);
+  }
+  huge_list_.set_available(huge_list_available);
+
+  if (node != NULL) {
+    DCHECK(IsVeryLong() || available() == SumFreeLists());
+    return node;
+  }
+
+  if (size_in_bytes <= kSmallListMax) {
+    node = small_list_.PickNodeFromList(size_in_bytes, node_size);
+    if (node != NULL) {
+      DCHECK(size_in_bytes <= *node_size);
+      page = Page::FromAddress(node->address());
+      page->add_available_in_small_free_list(-(*node_size));
+    }
+  } else if (size_in_bytes <= kMediumListMax) {
+    node = medium_list_.PickNodeFromList(size_in_bytes, node_size);
+    if (node != NULL) {
+      DCHECK(size_in_bytes <= *node_size);
+      page = Page::FromAddress(node->address());
+      page->add_available_in_medium_free_list(-(*node_size));
+    }
+  } else if (size_in_bytes <= kLargeListMax) {
+    node = large_list_.PickNodeFromList(size_in_bytes, node_size);
+    if (node != NULL) {
+      DCHECK(size_in_bytes <= *node_size);
+      page = Page::FromAddress(node->address());
+      page->add_available_in_large_free_list(-(*node_size));
+    }
+  }
+
+  DCHECK(IsVeryLong() || available() == SumFreeLists());
+  return node;
+}
+
+
+// Allocation on the old space free list.  If it succeeds then a new linear
+// allocation space has been set up with the top and limit of the space.  If
+// the allocation fails then NULL is returned, and the caller can perform a GC
+// or allocate a new page before retrying.
+HeapObject* FreeList::Allocate(int size_in_bytes) {
+  DCHECK(0 < size_in_bytes);
+  DCHECK(size_in_bytes <= kMaxBlockSize);
+  DCHECK(IsAligned(size_in_bytes, kPointerSize));
+  // Don't free list allocate if there is linear space available.
+  DCHECK(owner_->limit() - owner_->top() < size_in_bytes);
+
+  int old_linear_size = static_cast<int>(owner_->limit() - owner_->top());
+  // Mark the old linear allocation area with a free space map so it can be
+  // skipped when scanning the heap.  This also puts it back in the free list
+  // if it is big enough.
+  owner_->Free(owner_->top(), old_linear_size);
+
+  owner_->heap()->incremental_marking()->OldSpaceStep(size_in_bytes -
+                                                      old_linear_size);
+
+  int new_node_size = 0;
+  FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
+  if (new_node == NULL) {
+    owner_->SetTopAndLimit(NULL, NULL);
+    return NULL;
+  }
+
+  int bytes_left = new_node_size - size_in_bytes;
+  DCHECK(bytes_left >= 0);
+
+#ifdef DEBUG
+  for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
+    reinterpret_cast<Object**>(new_node->address())[i] =
+        Smi::FromInt(kCodeZapValue);
+  }
+#endif
+
+  // The old-space-step might have finished sweeping and restarted marking.
+  // Verify that it did not turn the page of the new node into an evacuation
+  // candidate.
+  DCHECK(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
+
+  const int kThreshold = IncrementalMarking::kAllocatedThreshold;
+
+  // Memory in the linear allocation area is counted as allocated.  We may free
+  // a little of this again immediately - see below.
+  owner_->Allocate(new_node_size);
+
+  if (owner_->heap()->inline_allocation_disabled()) {
+    // Keep the linear allocation area empty if requested to do so, just
+    // return area back to the free list instead.
+    owner_->Free(new_node->address() + size_in_bytes, bytes_left);
+    DCHECK(owner_->top() == NULL && owner_->limit() == NULL);
+  } else if (bytes_left > kThreshold &&
+             owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
+             FLAG_incremental_marking_steps) {
+    int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
+    // We don't want to give too large linear areas to the allocator while
+    // incremental marking is going on, because we won't check again whether
+    // we want to do another increment until the linear area is used up.
+    owner_->Free(new_node->address() + size_in_bytes + linear_size,
+                 new_node_size - size_in_bytes - linear_size);
+    owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
+                           new_node->address() + size_in_bytes + linear_size);
+  } else if (bytes_left > 0) {
+    // Normally we give the rest of the node to the allocator as its new
+    // linear allocation area.
+    owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
+                           new_node->address() + new_node_size);
+  } else {
+    // TODO(gc) Try not freeing linear allocation region when bytes_left
+    // are zero.
+    owner_->SetTopAndLimit(NULL, NULL);
+  }
+
+  return new_node;
+}
+
+
+intptr_t FreeList::EvictFreeListItems(Page* p) {
+  intptr_t sum = huge_list_.EvictFreeListItemsInList(p);
+  p->set_available_in_huge_free_list(0);
+
+  if (sum < p->area_size()) {
+    sum += small_list_.EvictFreeListItemsInList(p) +
+           medium_list_.EvictFreeListItemsInList(p) +
+           large_list_.EvictFreeListItemsInList(p);
+    p->set_available_in_small_free_list(0);
+    p->set_available_in_medium_free_list(0);
+    p->set_available_in_large_free_list(0);
+  }
+
+  return sum;
+}
+
+
+bool FreeList::ContainsPageFreeListItems(Page* p) {
+  return huge_list_.EvictFreeListItemsInList(p) ||
+         small_list_.EvictFreeListItemsInList(p) ||
+         medium_list_.EvictFreeListItemsInList(p) ||
+         large_list_.EvictFreeListItemsInList(p);
+}
+
+
+void FreeList::RepairLists(Heap* heap) {
+  small_list_.RepairFreeList(heap);
+  medium_list_.RepairFreeList(heap);
+  large_list_.RepairFreeList(heap);
+  huge_list_.RepairFreeList(heap);
+}
+
+
+#ifdef DEBUG
+intptr_t FreeListCategory::SumFreeList() {
+  intptr_t sum = 0;
+  FreeListNode* cur = top();
+  while (cur != NULL) {
+    DCHECK(cur->map() == cur->GetHeap()->raw_unchecked_free_space_map());
+    FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur);
+    sum += cur_as_free_space->nobarrier_size();
+    cur = cur->next();
+  }
+  return sum;
+}
+
+
+static const int kVeryLongFreeList = 500;
+
+
+int FreeListCategory::FreeListLength() {
+  int length = 0;
+  FreeListNode* cur = top();
+  while (cur != NULL) {
+    length++;
+    cur = cur->next();
+    if (length == kVeryLongFreeList) return length;
+  }
+  return length;
+}
+
+
+bool FreeList::IsVeryLong() {
+  if (small_list_.FreeListLength() == kVeryLongFreeList) return true;
+  if (medium_list_.FreeListLength() == kVeryLongFreeList) return true;
+  if (large_list_.FreeListLength() == kVeryLongFreeList) return true;
+  if (huge_list_.FreeListLength() == kVeryLongFreeList) return true;
+  return false;
+}
+
+
+// This can take a very long time because it is linear in the number of entries
+// on the free list, so it should not be called if FreeListLength returns
+// kVeryLongFreeList.
+intptr_t FreeList::SumFreeLists() {
+  intptr_t sum = small_list_.SumFreeList();
+  sum += medium_list_.SumFreeList();
+  sum += large_list_.SumFreeList();
+  sum += huge_list_.SumFreeList();
+  return sum;
+}
+#endif
+
+
+// -----------------------------------------------------------------------------
+// OldSpace implementation
+
+void PagedSpace::PrepareForMarkCompact() {
+  // We don't have a linear allocation area while sweeping.  It will be restored
+  // on the first allocation after the sweep.
+  EmptyAllocationInfo();
+
+  // This counter will be increased for pages which will be swept by the
+  // sweeper threads.
+  unswept_free_bytes_ = 0;
+
+  // Clear the free list before a full GC---it will be rebuilt afterward.
+  free_list_.Reset();
+}
+
+
+intptr_t PagedSpace::SizeOfObjects() {
+  DCHECK(heap()->mark_compact_collector()->sweeping_in_progress() ||
+         (unswept_free_bytes_ == 0));
+  return Size() - unswept_free_bytes_ - (limit() - top());
+}
+
+
+// After we have booted, we have created a map which represents free space
+// on the heap.  If there was already a free list then the elements on it
+// were created with the wrong FreeSpaceMap (normally NULL), so we need to
+// fix them.
+void PagedSpace::RepairFreeListsAfterBoot() { free_list_.RepairLists(heap()); }
+
+
+void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
+  if (allocation_info_.top() >= allocation_info_.limit()) return;
+
+  if (Page::FromAllocationTop(allocation_info_.top())
+          ->IsEvacuationCandidate()) {
+    // Create filler object to keep page iterable if it was iterable.
+    int remaining =
+        static_cast<int>(allocation_info_.limit() - allocation_info_.top());
+    heap()->CreateFillerObjectAt(allocation_info_.top(), remaining);
+
+    allocation_info_.set_top(NULL);
+    allocation_info_.set_limit(NULL);
+  }
+}
+
+
+HeapObject* PagedSpace::WaitForSweeperThreadsAndRetryAllocation(
+    int size_in_bytes) {
+  MarkCompactCollector* collector = heap()->mark_compact_collector();
+  if (collector->sweeping_in_progress()) {
+    // Wait for the sweeper threads here and complete the sweeping phase.
+    collector->EnsureSweepingCompleted();
+
+    // After waiting for the sweeper threads, there may be new free-list
+    // entries.
+    return free_list_.Allocate(size_in_bytes);
+  }
+  return NULL;
+}
+
+
+HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
+  // Allocation in this space has failed.
+
+  MarkCompactCollector* collector = heap()->mark_compact_collector();
+  // Sweeping is still in progress.
+  if (collector->sweeping_in_progress()) {
+    // First try to refill the free-list, concurrent sweeper threads
+    // may have freed some objects in the meantime.
+    collector->RefillFreeList(this);
+
+    // Retry the free list allocation.
+    HeapObject* object = free_list_.Allocate(size_in_bytes);
+    if (object != NULL) return object;
+
+    // If sweeping is still in progress try to sweep pages on the main thread.
+    int free_chunk = collector->SweepInParallel(this, size_in_bytes);
+    collector->RefillFreeList(this);
+    if (free_chunk >= size_in_bytes) {
+      HeapObject* object = free_list_.Allocate(size_in_bytes);
+      // We should be able to allocate an object here since we just freed that
+      // much memory.
+      DCHECK(object != NULL);
+      if (object != NULL) return object;
+    }
+  }
+
+  // Free list allocation failed and there is no next page.  Fail if we have
+  // hit the old generation size limit that should cause a garbage
+  // collection.
+  if (!heap()->always_allocate() &&
+      heap()->OldGenerationAllocationLimitReached()) {
+    // If sweeper threads are active, wait for them at that point and steal
+    // elements form their free-lists.
+    HeapObject* object = WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
+    if (object != NULL) return object;
+  }
+
+  // Try to expand the space and allocate in the new next page.
+  if (Expand()) {
+    DCHECK(CountTotalPages() > 1 || size_in_bytes <= free_list_.available());
+    return free_list_.Allocate(size_in_bytes);
+  }
+
+  // If sweeper threads are active, wait for them at that point and steal
+  // elements form their free-lists. Allocation may still fail their which
+  // would indicate that there is not enough memory for the given allocation.
+  return WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
+}
+
+
+#ifdef DEBUG
+void PagedSpace::ReportCodeStatistics(Isolate* isolate) {
+  CommentStatistic* comments_statistics =
+      isolate->paged_space_comments_statistics();
+  ReportCodeKindStatistics(isolate->code_kind_statistics());
+  PrintF(
+      "Code comment statistics (\"   [ comment-txt   :    size/   "
+      "count  (average)\"):\n");
+  for (int i = 0; i <= CommentStatistic::kMaxComments; i++) {
+    const CommentStatistic& cs = comments_statistics[i];
+    if (cs.size > 0) {
+      PrintF("   %-30s: %10d/%6d     (%d)\n", cs.comment, cs.size, cs.count,
+             cs.size / cs.count);
+    }
+  }
+  PrintF("\n");
+}
+
+
+void PagedSpace::ResetCodeStatistics(Isolate* isolate) {
+  CommentStatistic* comments_statistics =
+      isolate->paged_space_comments_statistics();
+  ClearCodeKindStatistics(isolate->code_kind_statistics());
+  for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
+    comments_statistics[i].Clear();
+  }
+  comments_statistics[CommentStatistic::kMaxComments].comment = "Unknown";
+  comments_statistics[CommentStatistic::kMaxComments].size = 0;
+  comments_statistics[CommentStatistic::kMaxComments].count = 0;
+}
+
+
+// Adds comment to 'comment_statistics' table. Performance OK as long as
+// 'kMaxComments' is small
+static void EnterComment(Isolate* isolate, const char* comment, int delta) {
+  CommentStatistic* comments_statistics =
+      isolate->paged_space_comments_statistics();
+  // Do not count empty comments
+  if (delta <= 0) return;
+  CommentStatistic* cs = &comments_statistics[CommentStatistic::kMaxComments];
+  // Search for a free or matching entry in 'comments_statistics': 'cs'
+  // points to result.
+  for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
+    if (comments_statistics[i].comment == NULL) {
+      cs = &comments_statistics[i];
+      cs->comment = comment;
+      break;
+    } else if (strcmp(comments_statistics[i].comment, comment) == 0) {
+      cs = &comments_statistics[i];
+      break;
+    }
+  }
+  // Update entry for 'comment'
+  cs->size += delta;
+  cs->count += 1;
+}
+
+
+// Call for each nested comment start (start marked with '[ xxx', end marked
+// with ']'.  RelocIterator 'it' must point to a comment reloc info.
+static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) {
+  DCHECK(!it->done());
+  DCHECK(it->rinfo()->rmode() == RelocInfo::COMMENT);
+  const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
+  if (tmp[0] != '[') {
+    // Not a nested comment; skip
+    return;
+  }
+
+  // Search for end of nested comment or a new nested comment
+  const char* const comment_txt =
+      reinterpret_cast<const char*>(it->rinfo()->data());
+  const byte* prev_pc = it->rinfo()->pc();
+  int flat_delta = 0;
+  it->next();
+  while (true) {
+    // All nested comments must be terminated properly, and therefore exit
+    // from loop.
+    DCHECK(!it->done());
+    if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
+      const char* const txt =
+          reinterpret_cast<const char*>(it->rinfo()->data());
+      flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc);
+      if (txt[0] == ']') break;  // End of nested  comment
+      // A new comment
+      CollectCommentStatistics(isolate, it);
+      // Skip code that was covered with previous comment
+      prev_pc = it->rinfo()->pc();
+    }
+    it->next();
+  }
+  EnterComment(isolate, comment_txt, flat_delta);
+}
+
+
+// Collects code size statistics:
+// - by code kind
+// - by code comment
+void PagedSpace::CollectCodeStatistics() {
+  Isolate* isolate = heap()->isolate();
+  HeapObjectIterator obj_it(this);
+  for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
+    if (obj->IsCode()) {
+      Code* code = Code::cast(obj);
+      isolate->code_kind_statistics()[code->kind()] += code->Size();
+      RelocIterator it(code);
+      int delta = 0;
+      const byte* prev_pc = code->instruction_start();
+      while (!it.done()) {
+        if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
+          delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
+          CollectCommentStatistics(isolate, &it);
+          prev_pc = it.rinfo()->pc();
+        }
+        it.next();
+      }
+
+      DCHECK(code->instruction_start() <= prev_pc &&
+             prev_pc <= code->instruction_end());
+      delta += static_cast<int>(code->instruction_end() - prev_pc);
+      EnterComment(isolate, "NoComment", delta);
+    }
+  }
+}
+
+
+void PagedSpace::ReportStatistics() {
+  int pct = static_cast<int>(Available() * 100 / Capacity());
+  PrintF("  capacity: %" V8_PTR_PREFIX
+         "d"
+         ", waste: %" V8_PTR_PREFIX
+         "d"
+         ", available: %" V8_PTR_PREFIX "d, %%%d\n",
+         Capacity(), Waste(), Available(), pct);
+
+  if (heap()->mark_compact_collector()->sweeping_in_progress()) {
+    heap()->mark_compact_collector()->EnsureSweepingCompleted();
+  }
+  ClearHistograms(heap()->isolate());
+  HeapObjectIterator obj_it(this);
+  for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next())
+    CollectHistogramInfo(obj);
+  ReportHistogram(heap()->isolate(), true);
+}
+#endif
+
+
+// -----------------------------------------------------------------------------
+// MapSpace implementation
+// TODO(mvstanton): this is weird...the compiler can't make a vtable unless
+// there is at least one non-inlined virtual function. I would prefer to hide
+// the VerifyObject definition behind VERIFY_HEAP.
+
+void MapSpace::VerifyObject(HeapObject* object) { CHECK(object->IsMap()); }
+
+
+// -----------------------------------------------------------------------------
+// CellSpace and PropertyCellSpace implementation
+// TODO(mvstanton): this is weird...the compiler can't make a vtable unless
+// there is at least one non-inlined virtual function. I would prefer to hide
+// the VerifyObject definition behind VERIFY_HEAP.
+
+void CellSpace::VerifyObject(HeapObject* object) { CHECK(object->IsCell()); }
+
+
+void PropertyCellSpace::VerifyObject(HeapObject* object) {
+  CHECK(object->IsPropertyCell());
+}
+
+
+// -----------------------------------------------------------------------------
+// LargeObjectIterator
+
+LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
+  current_ = space->first_page_;
+  size_func_ = NULL;
+}
+
+
+LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space,
+                                         HeapObjectCallback size_func) {
+  current_ = space->first_page_;
+  size_func_ = size_func;
+}
+
+
+HeapObject* LargeObjectIterator::Next() {
+  if (current_ == NULL) return NULL;
+
+  HeapObject* object = current_->GetObject();
+  current_ = current_->next_page();
+  return object;
+}
+
+
+// -----------------------------------------------------------------------------
+// LargeObjectSpace
+static bool ComparePointers(void* key1, void* key2) { return key1 == key2; }
+
+
+LargeObjectSpace::LargeObjectSpace(Heap* heap, intptr_t max_capacity,
+                                   AllocationSpace id)
+    : Space(heap, id, NOT_EXECUTABLE),  // Managed on a per-allocation basis
+      max_capacity_(max_capacity),
+      first_page_(NULL),
+      size_(0),
+      page_count_(0),
+      objects_size_(0),
+      chunk_map_(ComparePointers, 1024) {}
+
+
+bool LargeObjectSpace::SetUp() {
+  first_page_ = NULL;
+  size_ = 0;
+  maximum_committed_ = 0;
+  page_count_ = 0;
+  objects_size_ = 0;
+  chunk_map_.Clear();
+  return true;
+}
+
+
+void LargeObjectSpace::TearDown() {
+  while (first_page_ != NULL) {
+    LargePage* page = first_page_;
+    first_page_ = first_page_->next_page();
+    LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address()));
+
+    ObjectSpace space = static_cast<ObjectSpace>(1 << identity());
+    heap()->isolate()->memory_allocator()->PerformAllocationCallback(
+        space, kAllocationActionFree, page->size());
+    heap()->isolate()->memory_allocator()->Free(page);
+  }
+  SetUp();
+}
+
+
+AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
+                                               Executability executable) {
+  // Check if we want to force a GC before growing the old space further.
+  // If so, fail the allocation.
+  if (!heap()->always_allocate() &&
+      heap()->OldGenerationAllocationLimitReached()) {
+    return AllocationResult::Retry(identity());
+  }
+
+  if (Size() + object_size > max_capacity_) {
+    return AllocationResult::Retry(identity());
+  }
+
+  LargePage* page = heap()->isolate()->memory_allocator()->AllocateLargePage(
+      object_size, this, executable);
+  if (page == NULL) return AllocationResult::Retry(identity());
+  DCHECK(page->area_size() >= object_size);
+
+  size_ += static_cast<int>(page->size());
+  objects_size_ += object_size;
+  page_count_++;
+  page->set_next_page(first_page_);
+  first_page_ = page;
+
+  if (size_ > maximum_committed_) {
+    maximum_committed_ = size_;
+  }
+
+  // Register all MemoryChunk::kAlignment-aligned chunks covered by
+  // this large page in the chunk map.
+  uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment;
+  uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment;
+  for (uintptr_t key = base; key <= limit; key++) {
+    HashMap::Entry* entry = chunk_map_.Lookup(reinterpret_cast<void*>(key),
+                                              static_cast<uint32_t>(key), true);
+    DCHECK(entry != NULL);
+    entry->value = page;
+  }
+
+  HeapObject* object = page->GetObject();
+
+  MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), object_size);
+
+  if (Heap::ShouldZapGarbage()) {
+    // Make the object consistent so the heap can be verified in OldSpaceStep.
+    // We only need to do this in debug builds or if verify_heap is on.
+    reinterpret_cast<Object**>(object->address())[0] =
+        heap()->fixed_array_map();
+    reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
+  }
+
+  heap()->incremental_marking()->OldSpaceStep(object_size);
+  return object;
+}
+
+
+size_t LargeObjectSpace::CommittedPhysicalMemory() {
+  if (!base::VirtualMemory::HasLazyCommits()) return CommittedMemory();
+  size_t size = 0;
+  LargePage* current = first_page_;
+  while (current != NULL) {
+    size += current->CommittedPhysicalMemory();
+    current = current->next_page();
+  }
+  return size;
+}
+
+
+// GC support
+Object* LargeObjectSpace::FindObject(Address a) {
+  LargePage* page = FindPage(a);
+  if (page != NULL) {
+    return page->GetObject();
+  }
+  return Smi::FromInt(0);  // Signaling not found.
+}
+
+
+LargePage* LargeObjectSpace::FindPage(Address a) {
+  uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment;
+  HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key),
+                                        static_cast<uint32_t>(key), false);
+  if (e != NULL) {
+    DCHECK(e->value != NULL);
+    LargePage* page = reinterpret_cast<LargePage*>(e->value);
+    DCHECK(page->is_valid());
+    if (page->Contains(a)) {
+      return page;
+    }
+  }
+  return NULL;
+}
+
+
+void LargeObjectSpace::FreeUnmarkedObjects() {
+  LargePage* previous = NULL;
+  LargePage* current = first_page_;
+  while (current != NULL) {
+    HeapObject* object = current->GetObject();
+    // Can this large page contain pointers to non-trivial objects.  No other
+    // pointer object is this big.
+    bool is_pointer_object = object->IsFixedArray();
+    MarkBit mark_bit = Marking::MarkBitFrom(object);
+    if (mark_bit.Get()) {
+      mark_bit.Clear();
+      Page::FromAddress(object->address())->ResetProgressBar();
+      Page::FromAddress(object->address())->ResetLiveBytes();
+      previous = current;
+      current = current->next_page();
+    } else {
+      LargePage* page = current;
+      // Cut the chunk out from the chunk list.
+      current = current->next_page();
+      if (previous == NULL) {
+        first_page_ = current;
+      } else {
+        previous->set_next_page(current);
+      }
+
+      // Free the chunk.
+      heap()->mark_compact_collector()->ReportDeleteIfNeeded(object,
+                                                             heap()->isolate());
+      size_ -= static_cast<int>(page->size());
+      objects_size_ -= object->Size();
+      page_count_--;
+
+      // Remove entries belonging to this page.
+      // Use variable alignment to help pass length check (<= 80 characters)
+      // of single line in tools/presubmit.py.
+      const intptr_t alignment = MemoryChunk::kAlignment;
+      uintptr_t base = reinterpret_cast<uintptr_t>(page) / alignment;
+      uintptr_t limit = base + (page->size() - 1) / alignment;
+      for (uintptr_t key = base; key <= limit; key++) {
+        chunk_map_.Remove(reinterpret_cast<void*>(key),
+                          static_cast<uint32_t>(key));
+      }
+
+      if (is_pointer_object) {
+        heap()->QueueMemoryChunkForFree(page);
+      } else {
+        heap()->isolate()->memory_allocator()->Free(page);
+      }
+    }
+  }
+  heap()->FreeQueuedChunks();
+}
+
+
+bool LargeObjectSpace::Contains(HeapObject* object) {
+  Address address = object->address();
+  MemoryChunk* chunk = MemoryChunk::FromAddress(address);
+
+  bool owned = (chunk->owner() == this);
+
+  SLOW_DCHECK(!owned || FindObject(address)->IsHeapObject());
+
+  return owned;
+}
+
+
+#ifdef VERIFY_HEAP
+// We do not assume that the large object iterator works, because it depends
+// on the invariants we are checking during verification.
+void LargeObjectSpace::Verify() {
+  for (LargePage* chunk = first_page_; chunk != NULL;
+       chunk = chunk->next_page()) {
+    // Each chunk contains an object that starts at the large object page's
+    // object area start.
+    HeapObject* object = chunk->GetObject();
+    Page* page = Page::FromAddress(object->address());
+    CHECK(object->address() == page->area_start());
+
+    // The first word should be a map, and we expect all map pointers to be
+    // in map space.
+    Map* map = object->map();
+    CHECK(map->IsMap());
+    CHECK(heap()->map_space()->Contains(map));
+
+    // We have only code, sequential strings, external strings
+    // (sequential strings that have been morphed into external
+    // strings), fixed arrays, byte arrays, and constant pool arrays in the
+    // large object space.
+    CHECK(object->IsCode() || object->IsSeqString() ||
+          object->IsExternalString() || object->IsFixedArray() ||
+          object->IsFixedDoubleArray() || object->IsByteArray() ||
+          object->IsConstantPoolArray());
+
+    // The object itself should look OK.
+    object->ObjectVerify();
+
+    // Byte arrays and strings don't have interior pointers.
+    if (object->IsCode()) {
+      VerifyPointersVisitor code_visitor;
+      object->IterateBody(map->instance_type(), object->Size(), &code_visitor);
+    } else if (object->IsFixedArray()) {
+      FixedArray* array = FixedArray::cast(object);
+      for (int j = 0; j < array->length(); j++) {
+        Object* element = array->get(j);
+        if (element->IsHeapObject()) {
+          HeapObject* element_object = HeapObject::cast(element);
+          CHECK(heap()->Contains(element_object));
+          CHECK(element_object->map()->IsMap());
+        }
+      }
+    }
+  }
+}
+#endif
+
+
+#ifdef DEBUG
+void LargeObjectSpace::Print() {
+  OFStream os(stdout);
+  LargeObjectIterator it(this);
+  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
+    obj->Print(os);
+  }
+}
+
+
+void LargeObjectSpace::ReportStatistics() {
+  PrintF("  size: %" V8_PTR_PREFIX "d\n", size_);
+  int num_objects = 0;
+  ClearHistograms(heap()->isolate());
+  LargeObjectIterator it(this);
+  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
+    num_objects++;
+    CollectHistogramInfo(obj);
+  }
+
+  PrintF(
+      "  number of objects %d, "
+      "size of objects %" V8_PTR_PREFIX "d\n",
+      num_objects, objects_size_);
+  if (num_objects > 0) ReportHistogram(heap()->isolate(), false);
+}
+
+
+void LargeObjectSpace::CollectCodeStatistics() {
+  Isolate* isolate = heap()->isolate();
+  LargeObjectIterator obj_it(this);
+  for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
+    if (obj->IsCode()) {
+      Code* code = Code::cast(obj);
+      isolate->code_kind_statistics()[code->kind()] += code->Size();
+    }
+  }
+}
+
+
+void Page::Print() {
+  // Make a best-effort to print the objects in the page.
+  PrintF("Page@%p in %s\n", this->address(),
+         AllocationSpaceName(this->owner()->identity()));
+  printf(" --------------------------------------\n");
+  HeapObjectIterator objects(this, heap()->GcSafeSizeOfOldObjectFunction());
+  unsigned mark_size = 0;
+  for (HeapObject* object = objects.Next(); object != NULL;
+       object = objects.Next()) {
+    bool is_marked = Marking::MarkBitFrom(object).Get();
+    PrintF(" %c ", (is_marked ? '!' : ' '));  // Indent a little.
+    if (is_marked) {
+      mark_size += heap()->GcSafeSizeOfOldObjectFunction()(object);
+    }
+    object->ShortPrint();
+    PrintF("\n");
+  }
+  printf(" --------------------------------------\n");
+  printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
+}
+
+#endif  // DEBUG
+}
+}  // namespace v8::internal
diff --git a/src/heap/spaces.h b/src/heap/spaces.h
new file mode 100644
index 0000000..9ecb3c4
--- /dev/null
+++ b/src/heap/spaces.h
@@ -0,0 +1,2886 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_SPACES_H_
+#define V8_HEAP_SPACES_H_
+
+#include "src/allocation.h"
+#include "src/base/atomicops.h"
+#include "src/base/bits.h"
+#include "src/base/platform/mutex.h"
+#include "src/hashmap.h"
+#include "src/list.h"
+#include "src/log.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+class Isolate;
+
+// -----------------------------------------------------------------------------
+// Heap structures:
+//
+// A JS heap consists of a young generation, an old generation, and a large
+// object space. The young generation is divided into two semispaces. A
+// scavenger implements Cheney's copying algorithm. The old generation is
+// separated into a map space and an old object space. The map space contains
+// all (and only) map objects, the rest of old objects go into the old space.
+// The old generation is collected by a mark-sweep-compact collector.
+//
+// The semispaces of the young generation are contiguous.  The old and map
+// spaces consists of a list of pages. A page has a page header and an object
+// area.
+//
+// There is a separate large object space for objects larger than
+// Page::kMaxHeapObjectSize, so that they do not have to move during
+// collection. The large object space is paged. Pages in large object space
+// may be larger than the page size.
+//
+// A store-buffer based write barrier is used to keep track of intergenerational
+// references.  See heap/store-buffer.h.
+//
+// During scavenges and mark-sweep collections we sometimes (after a store
+// buffer overflow) iterate intergenerational pointers without decoding heap
+// object maps so if the page belongs to old pointer space or large object
+// space it is essential to guarantee that the page does not contain any
+// garbage pointers to new space: every pointer aligned word which satisfies
+// the Heap::InNewSpace() predicate must be a pointer to a live heap object in
+// new space. Thus objects in old pointer and large object spaces should have a
+// special layout (e.g. no bare integer fields). This requirement does not
+// apply to map space which is iterated in a special fashion. However we still
+// require pointer fields of dead maps to be cleaned.
+//
+// To enable lazy cleaning of old space pages we can mark chunks of the page
+// as being garbage.  Garbage sections are marked with a special map.  These
+// sections are skipped when scanning the page, even if we are otherwise
+// scanning without regard for object boundaries.  Garbage sections are chained
+// together to form a free list after a GC.  Garbage sections created outside
+// of GCs by object trunctation etc. may not be in the free list chain.  Very
+// small free spaces are ignored, they need only be cleaned of bogus pointers
+// into new space.
+//
+// Each page may have up to one special garbage section.  The start of this
+// section is denoted by the top field in the space.  The end of the section
+// is denoted by the limit field in the space.  This special garbage section
+// is not marked with a free space map in the data.  The point of this section
+// is to enable linear allocation without having to constantly update the byte
+// array every time the top field is updated and a new object is created.  The
+// special garbage section is not in the chain of garbage sections.
+//
+// Since the top and limit fields are in the space, not the page, only one page
+// has a special garbage section, and if the top and limit are equal then there
+// is no special garbage section.
+
+// Some assertion macros used in the debugging mode.
+
+#define DCHECK_PAGE_ALIGNED(address) \
+  DCHECK((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
+
+#define DCHECK_OBJECT_ALIGNED(address) \
+  DCHECK((OffsetFrom(address) & kObjectAlignmentMask) == 0)
+
+#define DCHECK_OBJECT_SIZE(size) \
+  DCHECK((0 < size) && (size <= Page::kMaxRegularHeapObjectSize))
+
+#define DCHECK_PAGE_OFFSET(offset) \
+  DCHECK((Page::kObjectStartOffset <= offset) && (offset <= Page::kPageSize))
+
+#define DCHECK_MAP_PAGE_INDEX(index) \
+  DCHECK((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
+
+
+class PagedSpace;
+class MemoryAllocator;
+class AllocationInfo;
+class Space;
+class FreeList;
+class MemoryChunk;
+
+class MarkBit {
+ public:
+  typedef uint32_t CellType;
+
+  inline MarkBit(CellType* cell, CellType mask, bool data_only)
+      : cell_(cell), mask_(mask), data_only_(data_only) {}
+
+  inline CellType* cell() { return cell_; }
+  inline CellType mask() { return mask_; }
+
+#ifdef DEBUG
+  bool operator==(const MarkBit& other) {
+    return cell_ == other.cell_ && mask_ == other.mask_;
+  }
+#endif
+
+  inline void Set() { *cell_ |= mask_; }
+  inline bool Get() { return (*cell_ & mask_) != 0; }
+  inline void Clear() { *cell_ &= ~mask_; }
+
+  inline bool data_only() { return data_only_; }
+
+  inline MarkBit Next() {
+    CellType new_mask = mask_ << 1;
+    if (new_mask == 0) {
+      return MarkBit(cell_ + 1, 1, data_only_);
+    } else {
+      return MarkBit(cell_, new_mask, data_only_);
+    }
+  }
+
+ private:
+  CellType* cell_;
+  CellType mask_;
+  // This boolean indicates that the object is in a data-only space with no
+  // pointers.  This enables some optimizations when marking.
+  // It is expected that this field is inlined and turned into control flow
+  // at the place where the MarkBit object is created.
+  bool data_only_;
+};
+
+
+// Bitmap is a sequence of cells each containing fixed number of bits.
+class Bitmap {
+ public:
+  static const uint32_t kBitsPerCell = 32;
+  static const uint32_t kBitsPerCellLog2 = 5;
+  static const uint32_t kBitIndexMask = kBitsPerCell - 1;
+  static const uint32_t kBytesPerCell = kBitsPerCell / kBitsPerByte;
+  static const uint32_t kBytesPerCellLog2 = kBitsPerCellLog2 - kBitsPerByteLog2;
+
+  static const size_t kLength = (1 << kPageSizeBits) >> (kPointerSizeLog2);
+
+  static const size_t kSize =
+      (1 << kPageSizeBits) >> (kPointerSizeLog2 + kBitsPerByteLog2);
+
+
+  static int CellsForLength(int length) {
+    return (length + kBitsPerCell - 1) >> kBitsPerCellLog2;
+  }
+
+  int CellsCount() { return CellsForLength(kLength); }
+
+  static int SizeFor(int cells_count) {
+    return sizeof(MarkBit::CellType) * cells_count;
+  }
+
+  INLINE(static uint32_t IndexToCell(uint32_t index)) {
+    return index >> kBitsPerCellLog2;
+  }
+
+  INLINE(static uint32_t CellToIndex(uint32_t index)) {
+    return index << kBitsPerCellLog2;
+  }
+
+  INLINE(static uint32_t CellAlignIndex(uint32_t index)) {
+    return (index + kBitIndexMask) & ~kBitIndexMask;
+  }
+
+  INLINE(MarkBit::CellType* cells()) {
+    return reinterpret_cast<MarkBit::CellType*>(this);
+  }
+
+  INLINE(Address address()) { return reinterpret_cast<Address>(this); }
+
+  INLINE(static Bitmap* FromAddress(Address addr)) {
+    return reinterpret_cast<Bitmap*>(addr);
+  }
+
+  inline MarkBit MarkBitFromIndex(uint32_t index, bool data_only = false) {
+    MarkBit::CellType mask = 1 << (index & kBitIndexMask);
+    MarkBit::CellType* cell = this->cells() + (index >> kBitsPerCellLog2);
+    return MarkBit(cell, mask, data_only);
+  }
+
+  static inline void Clear(MemoryChunk* chunk);
+
+  static void PrintWord(uint32_t word, uint32_t himask = 0) {
+    for (uint32_t mask = 1; mask != 0; mask <<= 1) {
+      if ((mask & himask) != 0) PrintF("[");
+      PrintF((mask & word) ? "1" : "0");
+      if ((mask & himask) != 0) PrintF("]");
+    }
+  }
+
+  class CellPrinter {
+   public:
+    CellPrinter() : seq_start(0), seq_type(0), seq_length(0) {}
+
+    void Print(uint32_t pos, uint32_t cell) {
+      if (cell == seq_type) {
+        seq_length++;
+        return;
+      }
+
+      Flush();
+
+      if (IsSeq(cell)) {
+        seq_start = pos;
+        seq_length = 0;
+        seq_type = cell;
+        return;
+      }
+
+      PrintF("%d: ", pos);
+      PrintWord(cell);
+      PrintF("\n");
+    }
+
+    void Flush() {
+      if (seq_length > 0) {
+        PrintF("%d: %dx%d\n", seq_start, seq_type == 0 ? 0 : 1,
+               seq_length * kBitsPerCell);
+        seq_length = 0;
+      }
+    }
+
+    static bool IsSeq(uint32_t cell) { return cell == 0 || cell == 0xFFFFFFFF; }
+
+   private:
+    uint32_t seq_start;
+    uint32_t seq_type;
+    uint32_t seq_length;
+  };
+
+  void Print() {
+    CellPrinter printer;
+    for (int i = 0; i < CellsCount(); i++) {
+      printer.Print(i, cells()[i]);
+    }
+    printer.Flush();
+    PrintF("\n");
+  }
+
+  bool IsClean() {
+    for (int i = 0; i < CellsCount(); i++) {
+      if (cells()[i] != 0) {
+        return false;
+      }
+    }
+    return true;
+  }
+};
+
+
+class SkipList;
+class SlotsBuffer;
+
+// MemoryChunk represents a memory region owned by a specific space.
+// It is divided into the header and the body. Chunk start is always
+// 1MB aligned. Start of the body is aligned so it can accommodate
+// any heap object.
+class MemoryChunk {
+ public:
+  // Only works if the pointer is in the first kPageSize of the MemoryChunk.
+  static MemoryChunk* FromAddress(Address a) {
+    return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
+  }
+  static const MemoryChunk* FromAddress(const byte* a) {
+    return reinterpret_cast<const MemoryChunk*>(OffsetFrom(a) &
+                                                ~kAlignmentMask);
+  }
+
+  // Only works for addresses in pointer spaces, not data or code spaces.
+  static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr);
+
+  Address address() { return reinterpret_cast<Address>(this); }
+
+  bool is_valid() { return address() != NULL; }
+
+  MemoryChunk* next_chunk() const {
+    return reinterpret_cast<MemoryChunk*>(base::Acquire_Load(&next_chunk_));
+  }
+
+  MemoryChunk* prev_chunk() const {
+    return reinterpret_cast<MemoryChunk*>(base::Acquire_Load(&prev_chunk_));
+  }
+
+  void set_next_chunk(MemoryChunk* next) {
+    base::Release_Store(&next_chunk_, reinterpret_cast<base::AtomicWord>(next));
+  }
+
+  void set_prev_chunk(MemoryChunk* prev) {
+    base::Release_Store(&prev_chunk_, reinterpret_cast<base::AtomicWord>(prev));
+  }
+
+  Space* owner() const {
+    if ((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) ==
+        kPageHeaderTag) {
+      return reinterpret_cast<Space*>(reinterpret_cast<intptr_t>(owner_) -
+                                      kPageHeaderTag);
+    } else {
+      return NULL;
+    }
+  }
+
+  void set_owner(Space* space) {
+    DCHECK((reinterpret_cast<intptr_t>(space) & kPageHeaderTagMask) == 0);
+    owner_ = reinterpret_cast<Address>(space) + kPageHeaderTag;
+    DCHECK((reinterpret_cast<intptr_t>(owner_) & kPageHeaderTagMask) ==
+           kPageHeaderTag);
+  }
+
+  base::VirtualMemory* reserved_memory() { return &reservation_; }
+
+  void InitializeReservedMemory() { reservation_.Reset(); }
+
+  void set_reserved_memory(base::VirtualMemory* reservation) {
+    DCHECK_NOT_NULL(reservation);
+    reservation_.TakeControl(reservation);
+  }
+
+  bool scan_on_scavenge() { return IsFlagSet(SCAN_ON_SCAVENGE); }
+  void initialize_scan_on_scavenge(bool scan) {
+    if (scan) {
+      SetFlag(SCAN_ON_SCAVENGE);
+    } else {
+      ClearFlag(SCAN_ON_SCAVENGE);
+    }
+  }
+  inline void set_scan_on_scavenge(bool scan);
+
+  int store_buffer_counter() { return store_buffer_counter_; }
+  void set_store_buffer_counter(int counter) {
+    store_buffer_counter_ = counter;
+  }
+
+  bool Contains(Address addr) {
+    return addr >= area_start() && addr < area_end();
+  }
+
+  // Checks whether addr can be a limit of addresses in this page.
+  // It's a limit if it's in the page, or if it's just after the
+  // last byte of the page.
+  bool ContainsLimit(Address addr) {
+    return addr >= area_start() && addr <= area_end();
+  }
+
+  // Every n write barrier invocations we go to runtime even though
+  // we could have handled it in generated code.  This lets us check
+  // whether we have hit the limit and should do some more marking.
+  static const int kWriteBarrierCounterGranularity = 500;
+
+  enum MemoryChunkFlags {
+    IS_EXECUTABLE,
+    ABOUT_TO_BE_FREED,
+    POINTERS_TO_HERE_ARE_INTERESTING,
+    POINTERS_FROM_HERE_ARE_INTERESTING,
+    SCAN_ON_SCAVENGE,
+    IN_FROM_SPACE,  // Mutually exclusive with IN_TO_SPACE.
+    IN_TO_SPACE,    // All pages in new space has one of these two set.
+    NEW_SPACE_BELOW_AGE_MARK,
+    CONTAINS_ONLY_DATA,
+    EVACUATION_CANDIDATE,
+    RESCAN_ON_EVACUATION,
+
+    // WAS_SWEPT indicates that marking bits have been cleared by the sweeper,
+    // otherwise marking bits are still intact.
+    WAS_SWEPT,
+
+    // Large objects can have a progress bar in their page header. These object
+    // are scanned in increments and will be kept black while being scanned.
+    // Even if the mutator writes to them they will be kept black and a white
+    // to grey transition is performed in the value.
+    HAS_PROGRESS_BAR,
+
+    // Last flag, keep at bottom.
+    NUM_MEMORY_CHUNK_FLAGS
+  };
+
+
+  static const int kPointersToHereAreInterestingMask =
+      1 << POINTERS_TO_HERE_ARE_INTERESTING;
+
+  static const int kPointersFromHereAreInterestingMask =
+      1 << POINTERS_FROM_HERE_ARE_INTERESTING;
+
+  static const int kEvacuationCandidateMask = 1 << EVACUATION_CANDIDATE;
+
+  static const int kSkipEvacuationSlotsRecordingMask =
+      (1 << EVACUATION_CANDIDATE) | (1 << RESCAN_ON_EVACUATION) |
+      (1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE);
+
+
+  void SetFlag(int flag) { flags_ |= static_cast<uintptr_t>(1) << flag; }
+
+  void ClearFlag(int flag) { flags_ &= ~(static_cast<uintptr_t>(1) << flag); }
+
+  void SetFlagTo(int flag, bool value) {
+    if (value) {
+      SetFlag(flag);
+    } else {
+      ClearFlag(flag);
+    }
+  }
+
+  bool IsFlagSet(int flag) {
+    return (flags_ & (static_cast<uintptr_t>(1) << flag)) != 0;
+  }
+
+  // Set or clear multiple flags at a time. The flags in the mask
+  // are set to the value in "flags", the rest retain the current value
+  // in flags_.
+  void SetFlags(intptr_t flags, intptr_t mask) {
+    flags_ = (flags_ & ~mask) | (flags & mask);
+  }
+
+  // Return all current flags.
+  intptr_t GetFlags() { return flags_; }
+
+
+  // SWEEPING_DONE - The page state when sweeping is complete or sweeping must
+  // not be performed on that page.
+  // SWEEPING_FINALIZE - A sweeper thread is done sweeping this page and will
+  // not touch the page memory anymore.
+  // SWEEPING_IN_PROGRESS - This page is currently swept by a sweeper thread.
+  // SWEEPING_PENDING - This page is ready for parallel sweeping.
+  enum ParallelSweepingState {
+    SWEEPING_DONE,
+    SWEEPING_FINALIZE,
+    SWEEPING_IN_PROGRESS,
+    SWEEPING_PENDING
+  };
+
+  ParallelSweepingState parallel_sweeping() {
+    return static_cast<ParallelSweepingState>(
+        base::Acquire_Load(&parallel_sweeping_));
+  }
+
+  void set_parallel_sweeping(ParallelSweepingState state) {
+    base::Release_Store(&parallel_sweeping_, state);
+  }
+
+  bool TryParallelSweeping() {
+    return base::Acquire_CompareAndSwap(&parallel_sweeping_, SWEEPING_PENDING,
+                                        SWEEPING_IN_PROGRESS) ==
+           SWEEPING_PENDING;
+  }
+
+  bool SweepingCompleted() { return parallel_sweeping() <= SWEEPING_FINALIZE; }
+
+  // Manage live byte count (count of bytes known to be live,
+  // because they are marked black).
+  void ResetLiveBytes() {
+    if (FLAG_gc_verbose) {
+      PrintF("ResetLiveBytes:%p:%x->0\n", static_cast<void*>(this),
+             live_byte_count_);
+    }
+    live_byte_count_ = 0;
+  }
+  void IncrementLiveBytes(int by) {
+    if (FLAG_gc_verbose) {
+      printf("UpdateLiveBytes:%p:%x%c=%x->%x\n", static_cast<void*>(this),
+             live_byte_count_, ((by < 0) ? '-' : '+'), ((by < 0) ? -by : by),
+             live_byte_count_ + by);
+    }
+    live_byte_count_ += by;
+    DCHECK_LE(static_cast<unsigned>(live_byte_count_), size_);
+  }
+  int LiveBytes() {
+    DCHECK(static_cast<unsigned>(live_byte_count_) <= size_);
+    return live_byte_count_;
+  }
+
+  int write_barrier_counter() {
+    return static_cast<int>(write_barrier_counter_);
+  }
+
+  void set_write_barrier_counter(int counter) {
+    write_barrier_counter_ = counter;
+  }
+
+  int progress_bar() {
+    DCHECK(IsFlagSet(HAS_PROGRESS_BAR));
+    return progress_bar_;
+  }
+
+  void set_progress_bar(int progress_bar) {
+    DCHECK(IsFlagSet(HAS_PROGRESS_BAR));
+    progress_bar_ = progress_bar;
+  }
+
+  void ResetProgressBar() {
+    if (IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
+      set_progress_bar(0);
+      ClearFlag(MemoryChunk::HAS_PROGRESS_BAR);
+    }
+  }
+
+  bool IsLeftOfProgressBar(Object** slot) {
+    Address slot_address = reinterpret_cast<Address>(slot);
+    DCHECK(slot_address > this->address());
+    return (slot_address - (this->address() + kObjectStartOffset)) <
+           progress_bar();
+  }
+
+  static void IncrementLiveBytesFromGC(Address address, int by) {
+    MemoryChunk::FromAddress(address)->IncrementLiveBytes(by);
+  }
+
+  static void IncrementLiveBytesFromMutator(Address address, int by);
+
+  static const intptr_t kAlignment =
+      (static_cast<uintptr_t>(1) << kPageSizeBits);
+
+  static const intptr_t kAlignmentMask = kAlignment - 1;
+
+  static const intptr_t kSizeOffset = 0;
+
+  static const intptr_t kLiveBytesOffset =
+      kSizeOffset + kPointerSize + kPointerSize + kPointerSize + kPointerSize +
+      kPointerSize + kPointerSize + kPointerSize + kPointerSize + kIntSize;
+
+  static const size_t kSlotsBufferOffset = kLiveBytesOffset + kIntSize;
+
+  static const size_t kWriteBarrierCounterOffset =
+      kSlotsBufferOffset + kPointerSize + kPointerSize;
+
+  static const size_t kHeaderSize =
+      kWriteBarrierCounterOffset + kPointerSize + kIntSize + kIntSize +
+      kPointerSize + 5 * kPointerSize + kPointerSize + kPointerSize;
+
+  static const int kBodyOffset =
+      CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
+
+  // The start offset of the object area in a page. Aligned to both maps and
+  // code alignment to be suitable for both.  Also aligned to 32 words because
+  // the marking bitmap is arranged in 32 bit chunks.
+  static const int kObjectStartAlignment = 32 * kPointerSize;
+  static const int kObjectStartOffset =
+      kBodyOffset - 1 +
+      (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment);
+
+  size_t size() const { return size_; }
+
+  void set_size(size_t size) { size_ = size; }
+
+  void SetArea(Address area_start, Address area_end) {
+    area_start_ = area_start;
+    area_end_ = area_end;
+  }
+
+  Executability executable() {
+    return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
+  }
+
+  bool ContainsOnlyData() { return IsFlagSet(CONTAINS_ONLY_DATA); }
+
+  bool InNewSpace() {
+    return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0;
+  }
+
+  bool InToSpace() { return IsFlagSet(IN_TO_SPACE); }
+
+  bool InFromSpace() { return IsFlagSet(IN_FROM_SPACE); }
+
+  // ---------------------------------------------------------------------
+  // Markbits support
+
+  inline Bitmap* markbits() {
+    return Bitmap::FromAddress(address() + kHeaderSize);
+  }
+
+  void PrintMarkbits() { markbits()->Print(); }
+
+  inline uint32_t AddressToMarkbitIndex(Address addr) {
+    return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2;
+  }
+
+  inline static uint32_t FastAddressToMarkbitIndex(Address addr) {
+    const intptr_t offset = reinterpret_cast<intptr_t>(addr) & kAlignmentMask;
+
+    return static_cast<uint32_t>(offset) >> kPointerSizeLog2;
+  }
+
+  inline Address MarkbitIndexToAddress(uint32_t index) {
+    return this->address() + (index << kPointerSizeLog2);
+  }
+
+  void InsertAfter(MemoryChunk* other);
+  void Unlink();
+
+  inline Heap* heap() const { return heap_; }
+
+  static const int kFlagsOffset = kPointerSize;
+
+  bool IsEvacuationCandidate() { return IsFlagSet(EVACUATION_CANDIDATE); }
+
+  bool ShouldSkipEvacuationSlotRecording() {
+    return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0;
+  }
+
+  inline SkipList* skip_list() { return skip_list_; }
+
+  inline void set_skip_list(SkipList* skip_list) { skip_list_ = skip_list; }
+
+  inline SlotsBuffer* slots_buffer() { return slots_buffer_; }
+
+  inline SlotsBuffer** slots_buffer_address() { return &slots_buffer_; }
+
+  void MarkEvacuationCandidate() {
+    DCHECK(slots_buffer_ == NULL);
+    SetFlag(EVACUATION_CANDIDATE);
+  }
+
+  void ClearEvacuationCandidate() {
+    DCHECK(slots_buffer_ == NULL);
+    ClearFlag(EVACUATION_CANDIDATE);
+  }
+
+  Address area_start() { return area_start_; }
+  Address area_end() { return area_end_; }
+  int area_size() { return static_cast<int>(area_end() - area_start()); }
+  bool CommitArea(size_t requested);
+
+  // Approximate amount of physical memory committed for this chunk.
+  size_t CommittedPhysicalMemory() { return high_water_mark_; }
+
+  static inline void UpdateHighWaterMark(Address mark);
+
+ protected:
+  size_t size_;
+  intptr_t flags_;
+
+  // Start and end of allocatable memory on this chunk.
+  Address area_start_;
+  Address area_end_;
+
+  // If the chunk needs to remember its memory reservation, it is stored here.
+  base::VirtualMemory reservation_;
+  // The identity of the owning space.  This is tagged as a failure pointer, but
+  // no failure can be in an object, so this can be distinguished from any entry
+  // in a fixed array.
+  Address owner_;
+  Heap* heap_;
+  // Used by the store buffer to keep track of which pages to mark scan-on-
+  // scavenge.
+  int store_buffer_counter_;
+  // Count of bytes marked black on page.
+  int live_byte_count_;
+  SlotsBuffer* slots_buffer_;
+  SkipList* skip_list_;
+  intptr_t write_barrier_counter_;
+  // Used by the incremental marker to keep track of the scanning progress in
+  // large objects that have a progress bar and are scanned in increments.
+  int progress_bar_;
+  // Assuming the initial allocation on a page is sequential,
+  // count highest number of bytes ever allocated on the page.
+  int high_water_mark_;
+
+  base::AtomicWord parallel_sweeping_;
+
+  // PagedSpace free-list statistics.
+  intptr_t available_in_small_free_list_;
+  intptr_t available_in_medium_free_list_;
+  intptr_t available_in_large_free_list_;
+  intptr_t available_in_huge_free_list_;
+  intptr_t non_available_small_blocks_;
+
+  static MemoryChunk* Initialize(Heap* heap, Address base, size_t size,
+                                 Address area_start, Address area_end,
+                                 Executability executable, Space* owner);
+
+ private:
+  // next_chunk_ holds a pointer of type MemoryChunk
+  base::AtomicWord next_chunk_;
+  // prev_chunk_ holds a pointer of type MemoryChunk
+  base::AtomicWord prev_chunk_;
+
+  friend class MemoryAllocator;
+};
+
+
+STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
+
+
+// -----------------------------------------------------------------------------
+// A page is a memory chunk of a size 1MB. Large object pages may be larger.
+//
+// The only way to get a page pointer is by calling factory methods:
+//   Page* p = Page::FromAddress(addr); or
+//   Page* p = Page::FromAllocationTop(top);
+class Page : public MemoryChunk {
+ public:
+  // Returns the page containing a given address. The address ranges
+  // from [page_addr .. page_addr + kPageSize[
+  // This only works if the object is in fact in a page.  See also MemoryChunk::
+  // FromAddress() and FromAnyAddress().
+  INLINE(static Page* FromAddress(Address a)) {
+    return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
+  }
+
+  // Returns the page containing an allocation top. Because an allocation
+  // top address can be the upper bound of the page, we need to subtract
+  // it with kPointerSize first. The address ranges from
+  // [page_addr + kObjectStartOffset .. page_addr + kPageSize].
+  INLINE(static Page* FromAllocationTop(Address top)) {
+    Page* p = FromAddress(top - kPointerSize);
+    return p;
+  }
+
+  // Returns the next page in the chain of pages owned by a space.
+  inline Page* next_page();
+  inline Page* prev_page();
+  inline void set_next_page(Page* page);
+  inline void set_prev_page(Page* page);
+
+  // Checks whether an address is page aligned.
+  static bool IsAlignedToPageSize(Address a) {
+    return 0 == (OffsetFrom(a) & kPageAlignmentMask);
+  }
+
+  // Returns the offset of a given address to this page.
+  INLINE(int Offset(Address a)) {
+    int offset = static_cast<int>(a - address());
+    return offset;
+  }
+
+  // Returns the address for a given offset to the this page.
+  Address OffsetToAddress(int offset) {
+    DCHECK_PAGE_OFFSET(offset);
+    return address() + offset;
+  }
+
+  // ---------------------------------------------------------------------
+
+  // Page size in bytes.  This must be a multiple of the OS page size.
+  static const int kPageSize = 1 << kPageSizeBits;
+
+  // Maximum object size that fits in a page. Objects larger than that size
+  // are allocated in large object space and are never moved in memory. This
+  // also applies to new space allocation, since objects are never migrated
+  // from new space to large object space.  Takes double alignment into account.
+  static const int kMaxRegularHeapObjectSize = kPageSize - kObjectStartOffset;
+
+  // Page size mask.
+  static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
+
+  inline void ClearGCFields();
+
+  static inline Page* Initialize(Heap* heap, MemoryChunk* chunk,
+                                 Executability executable, PagedSpace* owner);
+
+  void InitializeAsAnchor(PagedSpace* owner);
+
+  bool WasSwept() { return IsFlagSet(WAS_SWEPT); }
+  void SetWasSwept() { SetFlag(WAS_SWEPT); }
+  void ClearWasSwept() { ClearFlag(WAS_SWEPT); }
+
+  void ResetFreeListStatistics();
+
+#define FRAGMENTATION_STATS_ACCESSORS(type, name) \
+  type name() { return name##_; }                 \
+  void set_##name(type name) { name##_ = name; }  \
+  void add_##name(type name) { name##_ += name; }
+
+  FRAGMENTATION_STATS_ACCESSORS(intptr_t, non_available_small_blocks)
+  FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_small_free_list)
+  FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_medium_free_list)
+  FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_large_free_list)
+  FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_huge_free_list)
+
+#undef FRAGMENTATION_STATS_ACCESSORS
+
+#ifdef DEBUG
+  void Print();
+#endif  // DEBUG
+
+  friend class MemoryAllocator;
+};
+
+
+STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize);
+
+
+class LargePage : public MemoryChunk {
+ public:
+  HeapObject* GetObject() { return HeapObject::FromAddress(area_start()); }
+
+  inline LargePage* next_page() const {
+    return static_cast<LargePage*>(next_chunk());
+  }
+
+  inline void set_next_page(LargePage* page) { set_next_chunk(page); }
+
+ private:
+  static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk);
+
+  friend class MemoryAllocator;
+};
+
+STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
+
+// ----------------------------------------------------------------------------
+// Space is the abstract superclass for all allocation spaces.
+class Space : public Malloced {
+ public:
+  Space(Heap* heap, AllocationSpace id, Executability executable)
+      : heap_(heap), id_(id), executable_(executable) {}
+
+  virtual ~Space() {}
+
+  Heap* heap() const { return heap_; }
+
+  // Does the space need executable memory?
+  Executability executable() { return executable_; }
+
+  // Identity used in error reporting.
+  AllocationSpace identity() { return id_; }
+
+  // Returns allocated size.
+  virtual intptr_t Size() = 0;
+
+  // Returns size of objects. Can differ from the allocated size
+  // (e.g. see LargeObjectSpace).
+  virtual intptr_t SizeOfObjects() { return Size(); }
+
+  virtual int RoundSizeDownToObjectAlignment(int size) {
+    if (id_ == CODE_SPACE) {
+      return RoundDown(size, kCodeAlignment);
+    } else {
+      return RoundDown(size, kPointerSize);
+    }
+  }
+
+#ifdef DEBUG
+  virtual void Print() = 0;
+#endif
+
+ private:
+  Heap* heap_;
+  AllocationSpace id_;
+  Executability executable_;
+};
+
+
+// ----------------------------------------------------------------------------
+// All heap objects containing executable code (code objects) must be allocated
+// from a 2 GB range of memory, so that they can call each other using 32-bit
+// displacements.  This happens automatically on 32-bit platforms, where 32-bit
+// displacements cover the entire 4GB virtual address space.  On 64-bit
+// platforms, we support this using the CodeRange object, which reserves and
+// manages a range of virtual memory.
+class CodeRange {
+ public:
+  explicit CodeRange(Isolate* isolate);
+  ~CodeRange() { TearDown(); }
+
+  // Reserves a range of virtual memory, but does not commit any of it.
+  // Can only be called once, at heap initialization time.
+  // Returns false on failure.
+  bool SetUp(size_t requested_size);
+
+  // Frees the range of virtual memory, and frees the data structures used to
+  // manage it.
+  void TearDown();
+
+  bool valid() { return code_range_ != NULL; }
+  Address start() {
+    DCHECK(valid());
+    return static_cast<Address>(code_range_->address());
+  }
+  bool contains(Address address) {
+    if (!valid()) return false;
+    Address start = static_cast<Address>(code_range_->address());
+    return start <= address && address < start + code_range_->size();
+  }
+
+  // Allocates a chunk of memory from the large-object portion of
+  // the code range.  On platforms with no separate code range, should
+  // not be called.
+  MUST_USE_RESULT Address AllocateRawMemory(const size_t requested_size,
+                                            const size_t commit_size,
+                                            size_t* allocated);
+  bool CommitRawMemory(Address start, size_t length);
+  bool UncommitRawMemory(Address start, size_t length);
+  void FreeRawMemory(Address buf, size_t length);
+
+ private:
+  Isolate* isolate_;
+
+  // The reserved range of virtual memory that all code objects are put in.
+  base::VirtualMemory* code_range_;
+  // Plain old data class, just a struct plus a constructor.
+  class FreeBlock {
+   public:
+    FreeBlock(Address start_arg, size_t size_arg)
+        : start(start_arg), size(size_arg) {
+      DCHECK(IsAddressAligned(start, MemoryChunk::kAlignment));
+      DCHECK(size >= static_cast<size_t>(Page::kPageSize));
+    }
+    FreeBlock(void* start_arg, size_t size_arg)
+        : start(static_cast<Address>(start_arg)), size(size_arg) {
+      DCHECK(IsAddressAligned(start, MemoryChunk::kAlignment));
+      DCHECK(size >= static_cast<size_t>(Page::kPageSize));
+    }
+
+    Address start;
+    size_t size;
+  };
+
+  // Freed blocks of memory are added to the free list.  When the allocation
+  // list is exhausted, the free list is sorted and merged to make the new
+  // allocation list.
+  List<FreeBlock> free_list_;
+  // Memory is allocated from the free blocks on the allocation list.
+  // The block at current_allocation_block_index_ is the current block.
+  List<FreeBlock> allocation_list_;
+  int current_allocation_block_index_;
+
+  // Finds a block on the allocation list that contains at least the
+  // requested amount of memory.  If none is found, sorts and merges
+  // the existing free memory blocks, and searches again.
+  // If none can be found, returns false.
+  bool GetNextAllocationBlock(size_t requested);
+  // Compares the start addresses of two free blocks.
+  static int CompareFreeBlockAddress(const FreeBlock* left,
+                                     const FreeBlock* right);
+
+  DISALLOW_COPY_AND_ASSIGN(CodeRange);
+};
+
+
+class SkipList {
+ public:
+  SkipList() { Clear(); }
+
+  void Clear() {
+    for (int idx = 0; idx < kSize; idx++) {
+      starts_[idx] = reinterpret_cast<Address>(-1);
+    }
+  }
+
+  Address StartFor(Address addr) { return starts_[RegionNumber(addr)]; }
+
+  void AddObject(Address addr, int size) {
+    int start_region = RegionNumber(addr);
+    int end_region = RegionNumber(addr + size - kPointerSize);
+    for (int idx = start_region; idx <= end_region; idx++) {
+      if (starts_[idx] > addr) starts_[idx] = addr;
+    }
+  }
+
+  static inline int RegionNumber(Address addr) {
+    return (OffsetFrom(addr) & Page::kPageAlignmentMask) >> kRegionSizeLog2;
+  }
+
+  static void Update(Address addr, int size) {
+    Page* page = Page::FromAddress(addr);
+    SkipList* list = page->skip_list();
+    if (list == NULL) {
+      list = new SkipList();
+      page->set_skip_list(list);
+    }
+
+    list->AddObject(addr, size);
+  }
+
+ private:
+  static const int kRegionSizeLog2 = 13;
+  static const int kRegionSize = 1 << kRegionSizeLog2;
+  static const int kSize = Page::kPageSize / kRegionSize;
+
+  STATIC_ASSERT(Page::kPageSize % kRegionSize == 0);
+
+  Address starts_[kSize];
+};
+
+
+// ----------------------------------------------------------------------------
+// A space acquires chunks of memory from the operating system. The memory
+// allocator allocated and deallocates pages for the paged heap spaces and large
+// pages for large object space.
+//
+// Each space has to manage it's own pages.
+//
+class MemoryAllocator {
+ public:
+  explicit MemoryAllocator(Isolate* isolate);
+
+  // Initializes its internal bookkeeping structures.
+  // Max capacity of the total space and executable memory limit.
+  bool SetUp(intptr_t max_capacity, intptr_t capacity_executable);
+
+  void TearDown();
+
+  Page* AllocatePage(intptr_t size, PagedSpace* owner,
+                     Executability executable);
+
+  LargePage* AllocateLargePage(intptr_t object_size, Space* owner,
+                               Executability executable);
+
+  void Free(MemoryChunk* chunk);
+
+  // Returns the maximum available bytes of heaps.
+  intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
+
+  // Returns allocated spaces in bytes.
+  intptr_t Size() { return size_; }
+
+  // Returns the maximum available executable bytes of heaps.
+  intptr_t AvailableExecutable() {
+    if (capacity_executable_ < size_executable_) return 0;
+    return capacity_executable_ - size_executable_;
+  }
+
+  // Returns allocated executable spaces in bytes.
+  intptr_t SizeExecutable() { return size_executable_; }
+
+  // Returns maximum available bytes that the old space can have.
+  intptr_t MaxAvailable() {
+    return (Available() / Page::kPageSize) * Page::kMaxRegularHeapObjectSize;
+  }
+
+  // Returns an indication of whether a pointer is in a space that has
+  // been allocated by this MemoryAllocator.
+  V8_INLINE bool IsOutsideAllocatedSpace(const void* address) const {
+    return address < lowest_ever_allocated_ ||
+           address >= highest_ever_allocated_;
+  }
+
+#ifdef DEBUG
+  // Reports statistic info of the space.
+  void ReportStatistics();
+#endif
+
+  // Returns a MemoryChunk in which the memory region from commit_area_size to
+  // reserve_area_size of the chunk area is reserved but not committed, it
+  // could be committed later by calling MemoryChunk::CommitArea.
+  MemoryChunk* AllocateChunk(intptr_t reserve_area_size,
+                             intptr_t commit_area_size,
+                             Executability executable, Space* space);
+
+  Address ReserveAlignedMemory(size_t requested, size_t alignment,
+                               base::VirtualMemory* controller);
+  Address AllocateAlignedMemory(size_t reserve_size, size_t commit_size,
+                                size_t alignment, Executability executable,
+                                base::VirtualMemory* controller);
+
+  bool CommitMemory(Address addr, size_t size, Executability executable);
+
+  void FreeMemory(base::VirtualMemory* reservation, Executability executable);
+  void FreeMemory(Address addr, size_t size, Executability executable);
+
+  // Commit a contiguous block of memory from the initial chunk.  Assumes that
+  // the address is not NULL, the size is greater than zero, and that the
+  // block is contained in the initial chunk.  Returns true if it succeeded
+  // and false otherwise.
+  bool CommitBlock(Address start, size_t size, Executability executable);
+
+  // Uncommit a contiguous block of memory [start..(start+size)[.
+  // start is not NULL, the size is greater than zero, and the
+  // block is contained in the initial chunk.  Returns true if it succeeded
+  // and false otherwise.
+  bool UncommitBlock(Address start, size_t size);
+
+  // Zaps a contiguous block of memory [start..(start+size)[ thus
+  // filling it up with a recognizable non-NULL bit pattern.
+  void ZapBlock(Address start, size_t size);
+
+  void PerformAllocationCallback(ObjectSpace space, AllocationAction action,
+                                 size_t size);
+
+  void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
+                                   ObjectSpace space, AllocationAction action);
+
+  void RemoveMemoryAllocationCallback(MemoryAllocationCallback callback);
+
+  bool MemoryAllocationCallbackRegistered(MemoryAllocationCallback callback);
+
+  static int CodePageGuardStartOffset();
+
+  static int CodePageGuardSize();
+
+  static int CodePageAreaStartOffset();
+
+  static int CodePageAreaEndOffset();
+
+  static int CodePageAreaSize() {
+    return CodePageAreaEndOffset() - CodePageAreaStartOffset();
+  }
+
+  MUST_USE_RESULT bool CommitExecutableMemory(base::VirtualMemory* vm,
+                                              Address start, size_t commit_size,
+                                              size_t reserved_size);
+
+ private:
+  Isolate* isolate_;
+
+  // Maximum space size in bytes.
+  size_t capacity_;
+  // Maximum subset of capacity_ that can be executable
+  size_t capacity_executable_;
+
+  // Allocated space size in bytes.
+  size_t size_;
+  // Allocated executable space size in bytes.
+  size_t size_executable_;
+
+  // We keep the lowest and highest addresses allocated as a quick way
+  // of determining that pointers are outside the heap. The estimate is
+  // conservative, i.e. not all addrsses in 'allocated' space are allocated
+  // to our heap. The range is [lowest, highest[, inclusive on the low end
+  // and exclusive on the high end.
+  void* lowest_ever_allocated_;
+  void* highest_ever_allocated_;
+
+  struct MemoryAllocationCallbackRegistration {
+    MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
+                                         ObjectSpace space,
+                                         AllocationAction action)
+        : callback(callback), space(space), action(action) {}
+    MemoryAllocationCallback callback;
+    ObjectSpace space;
+    AllocationAction action;
+  };
+
+  // A List of callback that are triggered when memory is allocated or free'd
+  List<MemoryAllocationCallbackRegistration> memory_allocation_callbacks_;
+
+  // Initializes pages in a chunk. Returns the first page address.
+  // This function and GetChunkId() are provided for the mark-compact
+  // collector to rebuild page headers in the from space, which is
+  // used as a marking stack and its page headers are destroyed.
+  Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
+                               PagedSpace* owner);
+
+  void UpdateAllocatedSpaceLimits(void* low, void* high) {
+    lowest_ever_allocated_ = Min(lowest_ever_allocated_, low);
+    highest_ever_allocated_ = Max(highest_ever_allocated_, high);
+  }
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
+};
+
+
+// -----------------------------------------------------------------------------
+// Interface for heap object iterator to be implemented by all object space
+// object iterators.
+//
+// NOTE: The space specific object iterators also implements the own next()
+//       method which is used to avoid using virtual functions
+//       iterating a specific space.
+
+class ObjectIterator : public Malloced {
+ public:
+  virtual ~ObjectIterator() {}
+
+  virtual HeapObject* next_object() = 0;
+};
+
+
+// -----------------------------------------------------------------------------
+// Heap object iterator in new/old/map spaces.
+//
+// A HeapObjectIterator iterates objects from the bottom of the given space
+// to its top or from the bottom of the given page to its top.
+//
+// If objects are allocated in the page during iteration the iterator may
+// or may not iterate over those objects.  The caller must create a new
+// iterator in order to be sure to visit these new objects.
+class HeapObjectIterator : public ObjectIterator {
+ public:
+  // Creates a new object iterator in a given space.
+  // If the size function is not given, the iterator calls the default
+  // Object::Size().
+  explicit HeapObjectIterator(PagedSpace* space);
+  HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func);
+  HeapObjectIterator(Page* page, HeapObjectCallback size_func);
+
+  // Advance to the next object, skipping free spaces and other fillers and
+  // skipping the special garbage section of which there is one per space.
+  // Returns NULL when the iteration has ended.
+  inline HeapObject* Next() {
+    do {
+      HeapObject* next_obj = FromCurrentPage();
+      if (next_obj != NULL) return next_obj;
+    } while (AdvanceToNextPage());
+    return NULL;
+  }
+
+  virtual HeapObject* next_object() { return Next(); }
+
+ private:
+  enum PageMode { kOnePageOnly, kAllPagesInSpace };
+
+  Address cur_addr_;              // Current iteration point.
+  Address cur_end_;               // End iteration point.
+  HeapObjectCallback size_func_;  // Size function or NULL.
+  PagedSpace* space_;
+  PageMode page_mode_;
+
+  // Fast (inlined) path of next().
+  inline HeapObject* FromCurrentPage();
+
+  // Slow path of next(), goes into the next page.  Returns false if the
+  // iteration has ended.
+  bool AdvanceToNextPage();
+
+  // Initializes fields.
+  inline void Initialize(PagedSpace* owner, Address start, Address end,
+                         PageMode mode, HeapObjectCallback size_func);
+};
+
+
+// -----------------------------------------------------------------------------
+// A PageIterator iterates the pages in a paged space.
+
+class PageIterator BASE_EMBEDDED {
+ public:
+  explicit inline PageIterator(PagedSpace* space);
+
+  inline bool has_next();
+  inline Page* next();
+
+ private:
+  PagedSpace* space_;
+  Page* prev_page_;  // Previous page returned.
+  // Next page that will be returned.  Cached here so that we can use this
+  // iterator for operations that deallocate pages.
+  Page* next_page_;
+};
+
+
+// -----------------------------------------------------------------------------
+// A space has a circular list of pages. The next page can be accessed via
+// Page::next_page() call.
+
+// An abstraction of allocation and relocation pointers in a page-structured
+// space.
+class AllocationInfo {
+ public:
+  AllocationInfo() : top_(NULL), limit_(NULL) {}
+
+  INLINE(void set_top(Address top)) {
+    SLOW_DCHECK(top == NULL ||
+                (reinterpret_cast<intptr_t>(top) & HeapObjectTagMask()) == 0);
+    top_ = top;
+  }
+
+  INLINE(Address top()) const {
+    SLOW_DCHECK(top_ == NULL ||
+                (reinterpret_cast<intptr_t>(top_) & HeapObjectTagMask()) == 0);
+    return top_;
+  }
+
+  Address* top_address() { return &top_; }
+
+  INLINE(void set_limit(Address limit)) {
+    SLOW_DCHECK(limit == NULL ||
+                (reinterpret_cast<intptr_t>(limit) & HeapObjectTagMask()) == 0);
+    limit_ = limit;
+  }
+
+  INLINE(Address limit()) const {
+    SLOW_DCHECK(limit_ == NULL ||
+                (reinterpret_cast<intptr_t>(limit_) & HeapObjectTagMask()) ==
+                    0);
+    return limit_;
+  }
+
+  Address* limit_address() { return &limit_; }
+
+#ifdef DEBUG
+  bool VerifyPagedAllocation() {
+    return (Page::FromAllocationTop(top_) == Page::FromAllocationTop(limit_)) &&
+           (top_ <= limit_);
+  }
+#endif
+
+ private:
+  // Current allocation top.
+  Address top_;
+  // Current allocation limit.
+  Address limit_;
+};
+
+
+// An abstraction of the accounting statistics of a page-structured space.
+// The 'capacity' of a space is the number of object-area bytes (i.e., not
+// including page bookkeeping structures) currently in the space. The 'size'
+// of a space is the number of allocated bytes, the 'waste' in the space is
+// the number of bytes that are not allocated and not available to
+// allocation without reorganizing the space via a GC (e.g. small blocks due
+// to internal fragmentation, top of page areas in map space), and the bytes
+// 'available' is the number of unallocated bytes that are not waste.  The
+// capacity is the sum of size, waste, and available.
+//
+// The stats are only set by functions that ensure they stay balanced. These
+// functions increase or decrease one of the non-capacity stats in
+// conjunction with capacity, or else they always balance increases and
+// decreases to the non-capacity stats.
+class AllocationStats BASE_EMBEDDED {
+ public:
+  AllocationStats() { Clear(); }
+
+  // Zero out all the allocation statistics (i.e., no capacity).
+  void Clear() {
+    capacity_ = 0;
+    max_capacity_ = 0;
+    size_ = 0;
+    waste_ = 0;
+  }
+
+  void ClearSizeWaste() {
+    size_ = capacity_;
+    waste_ = 0;
+  }
+
+  // Reset the allocation statistics (i.e., available = capacity with no
+  // wasted or allocated bytes).
+  void Reset() {
+    size_ = 0;
+    waste_ = 0;
+  }
+
+  // Accessors for the allocation statistics.
+  intptr_t Capacity() { return capacity_; }
+  intptr_t MaxCapacity() { return max_capacity_; }
+  intptr_t Size() { return size_; }
+  intptr_t Waste() { return waste_; }
+
+  // Grow the space by adding available bytes.  They are initially marked as
+  // being in use (part of the size), but will normally be immediately freed,
+  // putting them on the free list and removing them from size_.
+  void ExpandSpace(int size_in_bytes) {
+    capacity_ += size_in_bytes;
+    size_ += size_in_bytes;
+    if (capacity_ > max_capacity_) {
+      max_capacity_ = capacity_;
+    }
+    DCHECK(size_ >= 0);
+  }
+
+  // Shrink the space by removing available bytes.  Since shrinking is done
+  // during sweeping, bytes have been marked as being in use (part of the size)
+  // and are hereby freed.
+  void ShrinkSpace(int size_in_bytes) {
+    capacity_ -= size_in_bytes;
+    size_ -= size_in_bytes;
+    DCHECK(size_ >= 0);
+  }
+
+  // Allocate from available bytes (available -> size).
+  void AllocateBytes(intptr_t size_in_bytes) {
+    size_ += size_in_bytes;
+    DCHECK(size_ >= 0);
+  }
+
+  // Free allocated bytes, making them available (size -> available).
+  void DeallocateBytes(intptr_t size_in_bytes) {
+    size_ -= size_in_bytes;
+    DCHECK(size_ >= 0);
+  }
+
+  // Waste free bytes (available -> waste).
+  void WasteBytes(int size_in_bytes) {
+    DCHECK(size_in_bytes >= 0);
+    waste_ += size_in_bytes;
+  }
+
+ private:
+  intptr_t capacity_;
+  intptr_t max_capacity_;
+  intptr_t size_;
+  intptr_t waste_;
+};
+
+
+// -----------------------------------------------------------------------------
+// Free lists for old object spaces
+//
+// Free-list nodes are free blocks in the heap.  They look like heap objects
+// (free-list node pointers have the heap object tag, and they have a map like
+// a heap object).  They have a size and a next pointer.  The next pointer is
+// the raw address of the next free list node (or NULL).
+class FreeListNode : public HeapObject {
+ public:
+  // Obtain a free-list node from a raw address.  This is not a cast because
+  // it does not check nor require that the first word at the address is a map
+  // pointer.
+  static FreeListNode* FromAddress(Address address) {
+    return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
+  }
+
+  static inline bool IsFreeListNode(HeapObject* object);
+
+  // Set the size in bytes, which can be read with HeapObject::Size().  This
+  // function also writes a map to the first word of the block so that it
+  // looks like a heap object to the garbage collector and heap iteration
+  // functions.
+  void set_size(Heap* heap, int size_in_bytes);
+
+  // Accessors for the next field.
+  inline FreeListNode* next();
+  inline FreeListNode** next_address();
+  inline void set_next(FreeListNode* next);
+
+  inline void Zap();
+
+  static inline FreeListNode* cast(Object* object) {
+    return reinterpret_cast<FreeListNode*>(object);
+  }
+
+ private:
+  static const int kNextOffset = POINTER_SIZE_ALIGN(FreeSpace::kHeaderSize);
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
+};
+
+
+// The free list category holds a pointer to the top element and a pointer to
+// the end element of the linked list of free memory blocks.
+class FreeListCategory {
+ public:
+  FreeListCategory() : top_(0), end_(NULL), available_(0) {}
+
+  intptr_t Concatenate(FreeListCategory* category);
+
+  void Reset();
+
+  void Free(FreeListNode* node, int size_in_bytes);
+
+  FreeListNode* PickNodeFromList(int* node_size);
+  FreeListNode* PickNodeFromList(int size_in_bytes, int* node_size);
+
+  intptr_t EvictFreeListItemsInList(Page* p);
+  bool ContainsPageFreeListItemsInList(Page* p);
+
+  void RepairFreeList(Heap* heap);
+
+  FreeListNode* top() const {
+    return reinterpret_cast<FreeListNode*>(base::NoBarrier_Load(&top_));
+  }
+
+  void set_top(FreeListNode* top) {
+    base::NoBarrier_Store(&top_, reinterpret_cast<base::AtomicWord>(top));
+  }
+
+  FreeListNode** GetEndAddress() { return &end_; }
+  FreeListNode* end() const { return end_; }
+  void set_end(FreeListNode* end) { end_ = end; }
+
+  int* GetAvailableAddress() { return &available_; }
+  int available() const { return available_; }
+  void set_available(int available) { available_ = available; }
+
+  base::Mutex* mutex() { return &mutex_; }
+
+  bool IsEmpty() { return top() == 0; }
+
+#ifdef DEBUG
+  intptr_t SumFreeList();
+  int FreeListLength();
+#endif
+
+ private:
+  // top_ points to the top FreeListNode* in the free list category.
+  base::AtomicWord top_;
+  FreeListNode* end_;
+  base::Mutex mutex_;
+
+  // Total available bytes in all blocks of this free list category.
+  int available_;
+};
+
+
+// The free list for the old space.  The free list is organized in such a way
+// as to encourage objects allocated around the same time to be near each
+// other.  The normal way to allocate is intended to be by bumping a 'top'
+// pointer until it hits a 'limit' pointer.  When the limit is hit we need to
+// find a new space to allocate from.  This is done with the free list, which
+// is divided up into rough categories to cut down on waste.  Having finer
+// categories would scatter allocation more.
+
+// The old space free list is organized in categories.
+// 1-31 words:  Such small free areas are discarded for efficiency reasons.
+//     They can be reclaimed by the compactor.  However the distance between top
+//     and limit may be this small.
+// 32-255 words: There is a list of spaces this large.  It is used for top and
+//     limit when the object we need to allocate is 1-31 words in size.  These
+//     spaces are called small.
+// 256-2047 words: There is a list of spaces this large.  It is used for top and
+//     limit when the object we need to allocate is 32-255 words in size.  These
+//     spaces are called medium.
+// 1048-16383 words: There is a list of spaces this large.  It is used for top
+//     and limit when the object we need to allocate is 256-2047 words in size.
+//     These spaces are call large.
+// At least 16384 words.  This list is for objects of 2048 words or larger.
+//     Empty pages are added to this list.  These spaces are called huge.
+class FreeList {
+ public:
+  explicit FreeList(PagedSpace* owner);
+
+  intptr_t Concatenate(FreeList* free_list);
+
+  // Clear the free list.
+  void Reset();
+
+  // Return the number of bytes available on the free list.
+  intptr_t available() {
+    return small_list_.available() + medium_list_.available() +
+           large_list_.available() + huge_list_.available();
+  }
+
+  // Place a node on the free list.  The block of size 'size_in_bytes'
+  // starting at 'start' is placed on the free list.  The return value is the
+  // number of bytes that have been lost due to internal fragmentation by
+  // freeing the block.  Bookkeeping information will be written to the block,
+  // i.e., its contents will be destroyed.  The start address should be word
+  // aligned, and the size should be a non-zero multiple of the word size.
+  int Free(Address start, int size_in_bytes);
+
+  // This method returns how much memory can be allocated after freeing
+  // maximum_freed memory.
+  static inline int GuaranteedAllocatable(int maximum_freed) {
+    if (maximum_freed < kSmallListMin) {
+      return 0;
+    } else if (maximum_freed <= kSmallListMax) {
+      return kSmallAllocationMax;
+    } else if (maximum_freed <= kMediumListMax) {
+      return kMediumAllocationMax;
+    } else if (maximum_freed <= kLargeListMax) {
+      return kLargeAllocationMax;
+    }
+    return maximum_freed;
+  }
+
+  // Allocate a block of size 'size_in_bytes' from the free list.  The block
+  // is unitialized.  A failure is returned if no block is available.  The
+  // number of bytes lost to fragmentation is returned in the output parameter
+  // 'wasted_bytes'.  The size should be a non-zero multiple of the word size.
+  MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes);
+
+  bool IsEmpty() {
+    return small_list_.IsEmpty() && medium_list_.IsEmpty() &&
+           large_list_.IsEmpty() && huge_list_.IsEmpty();
+  }
+
+#ifdef DEBUG
+  void Zap();
+  intptr_t SumFreeLists();
+  bool IsVeryLong();
+#endif
+
+  // Used after booting the VM.
+  void RepairLists(Heap* heap);
+
+  intptr_t EvictFreeListItems(Page* p);
+  bool ContainsPageFreeListItems(Page* p);
+
+  FreeListCategory* small_list() { return &small_list_; }
+  FreeListCategory* medium_list() { return &medium_list_; }
+  FreeListCategory* large_list() { return &large_list_; }
+  FreeListCategory* huge_list() { return &huge_list_; }
+
+ private:
+  // The size range of blocks, in bytes.
+  static const int kMinBlockSize = 3 * kPointerSize;
+  static const int kMaxBlockSize = Page::kMaxRegularHeapObjectSize;
+
+  FreeListNode* FindNodeFor(int size_in_bytes, int* node_size);
+
+  PagedSpace* owner_;
+  Heap* heap_;
+
+  static const int kSmallListMin = 0x20 * kPointerSize;
+  static const int kSmallListMax = 0xff * kPointerSize;
+  static const int kMediumListMax = 0x7ff * kPointerSize;
+  static const int kLargeListMax = 0x3fff * kPointerSize;
+  static const int kSmallAllocationMax = kSmallListMin - kPointerSize;
+  static const int kMediumAllocationMax = kSmallListMax;
+  static const int kLargeAllocationMax = kMediumListMax;
+  FreeListCategory small_list_;
+  FreeListCategory medium_list_;
+  FreeListCategory large_list_;
+  FreeListCategory huge_list_;
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList);
+};
+
+
+class AllocationResult {
+ public:
+  // Implicit constructor from Object*.
+  AllocationResult(Object* object)  // NOLINT
+      : object_(object),
+        retry_space_(INVALID_SPACE) {}
+
+  AllocationResult() : object_(NULL), retry_space_(INVALID_SPACE) {}
+
+  static inline AllocationResult Retry(AllocationSpace space = NEW_SPACE) {
+    return AllocationResult(space);
+  }
+
+  inline bool IsRetry() { return retry_space_ != INVALID_SPACE; }
+
+  template <typename T>
+  bool To(T** obj) {
+    if (IsRetry()) return false;
+    *obj = T::cast(object_);
+    return true;
+  }
+
+  Object* ToObjectChecked() {
+    CHECK(!IsRetry());
+    return object_;
+  }
+
+  AllocationSpace RetrySpace() {
+    DCHECK(IsRetry());
+    return retry_space_;
+  }
+
+ private:
+  explicit AllocationResult(AllocationSpace space)
+      : object_(NULL), retry_space_(space) {}
+
+  Object* object_;
+  AllocationSpace retry_space_;
+};
+
+
+class PagedSpace : public Space {
+ public:
+  // Creates a space with a maximum capacity, and an id.
+  PagedSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id,
+             Executability executable);
+
+  virtual ~PagedSpace() {}
+
+  // Set up the space using the given address range of virtual memory (from
+  // the memory allocator's initial chunk) if possible.  If the block of
+  // addresses is not big enough to contain a single page-aligned page, a
+  // fresh chunk will be allocated.
+  bool SetUp();
+
+  // Returns true if the space has been successfully set up and not
+  // subsequently torn down.
+  bool HasBeenSetUp();
+
+  // Cleans up the space, frees all pages in this space except those belonging
+  // to the initial chunk, uncommits addresses in the initial chunk.
+  void TearDown();
+
+  // Checks whether an object/address is in this space.
+  inline bool Contains(Address a);
+  bool Contains(HeapObject* o) { return Contains(o->address()); }
+
+  // Given an address occupied by a live object, return that object if it is
+  // in this space, or a Smi if it is not.  The implementation iterates over
+  // objects in the page containing the address, the cost is linear in the
+  // number of objects in the page.  It may be slow.
+  Object* FindObject(Address addr);
+
+  // During boot the free_space_map is created, and afterwards we may need
+  // to write it into the free list nodes that were already created.
+  void RepairFreeListsAfterBoot();
+
+  // Prepares for a mark-compact GC.
+  void PrepareForMarkCompact();
+
+  // Current capacity without growing (Size() + Available()).
+  intptr_t Capacity() { return accounting_stats_.Capacity(); }
+
+  // Total amount of memory committed for this space.  For paged
+  // spaces this equals the capacity.
+  intptr_t CommittedMemory() { return Capacity(); }
+
+  // The maximum amount of memory ever committed for this space.
+  intptr_t MaximumCommittedMemory() { return accounting_stats_.MaxCapacity(); }
+
+  // Approximate amount of physical memory committed for this space.
+  size_t CommittedPhysicalMemory();
+
+  struct SizeStats {
+    intptr_t Total() {
+      return small_size_ + medium_size_ + large_size_ + huge_size_;
+    }
+
+    intptr_t small_size_;
+    intptr_t medium_size_;
+    intptr_t large_size_;
+    intptr_t huge_size_;
+  };
+
+  void ObtainFreeListStatistics(Page* p, SizeStats* sizes);
+  void ResetFreeListStatistics();
+
+  // Sets the capacity, the available space and the wasted space to zero.
+  // The stats are rebuilt during sweeping by adding each page to the
+  // capacity and the size when it is encountered.  As free spaces are
+  // discovered during the sweeping they are subtracted from the size and added
+  // to the available and wasted totals.
+  void ClearStats() {
+    accounting_stats_.ClearSizeWaste();
+    ResetFreeListStatistics();
+  }
+
+  // Increases the number of available bytes of that space.
+  void AddToAccountingStats(intptr_t bytes) {
+    accounting_stats_.DeallocateBytes(bytes);
+  }
+
+  // Available bytes without growing.  These are the bytes on the free list.
+  // The bytes in the linear allocation area are not included in this total
+  // because updating the stats would slow down allocation.  New pages are
+  // immediately added to the free list so they show up here.
+  intptr_t Available() { return free_list_.available(); }
+
+  // Allocated bytes in this space.  Garbage bytes that were not found due to
+  // concurrent sweeping are counted as being allocated!  The bytes in the
+  // current linear allocation area (between top and limit) are also counted
+  // here.
+  virtual intptr_t Size() { return accounting_stats_.Size(); }
+
+  // As size, but the bytes in lazily swept pages are estimated and the bytes
+  // in the current linear allocation area are not included.
+  virtual intptr_t SizeOfObjects();
+
+  // Wasted bytes in this space.  These are just the bytes that were thrown away
+  // due to being too small to use for allocation.  They do not include the
+  // free bytes that were not found at all due to lazy sweeping.
+  virtual intptr_t Waste() { return accounting_stats_.Waste(); }
+
+  // Returns the allocation pointer in this space.
+  Address top() { return allocation_info_.top(); }
+  Address limit() { return allocation_info_.limit(); }
+
+  // The allocation top address.
+  Address* allocation_top_address() { return allocation_info_.top_address(); }
+
+  // The allocation limit address.
+  Address* allocation_limit_address() {
+    return allocation_info_.limit_address();
+  }
+
+  // Allocate the requested number of bytes in the space if possible, return a
+  // failure object if not.
+  MUST_USE_RESULT inline AllocationResult AllocateRaw(int size_in_bytes);
+
+  // Give a block of memory to the space's free list.  It might be added to
+  // the free list or accounted as waste.
+  // If add_to_freelist is false then just accounting stats are updated and
+  // no attempt to add area to free list is made.
+  int Free(Address start, int size_in_bytes) {
+    int wasted = free_list_.Free(start, size_in_bytes);
+    accounting_stats_.DeallocateBytes(size_in_bytes);
+    accounting_stats_.WasteBytes(wasted);
+    return size_in_bytes - wasted;
+  }
+
+  void ResetFreeList() { free_list_.Reset(); }
+
+  // Set space allocation info.
+  void SetTopAndLimit(Address top, Address limit) {
+    DCHECK(top == limit ||
+           Page::FromAddress(top) == Page::FromAddress(limit - 1));
+    MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
+    allocation_info_.set_top(top);
+    allocation_info_.set_limit(limit);
+  }
+
+  // Empty space allocation info, returning unused area to free list.
+  void EmptyAllocationInfo() {
+    // Mark the old linear allocation area with a free space map so it can be
+    // skipped when scanning the heap.
+    int old_linear_size = static_cast<int>(limit() - top());
+    Free(top(), old_linear_size);
+    SetTopAndLimit(NULL, NULL);
+  }
+
+  void Allocate(int bytes) { accounting_stats_.AllocateBytes(bytes); }
+
+  void IncreaseCapacity(int size);
+
+  // Releases an unused page and shrinks the space.
+  void ReleasePage(Page* page);
+
+  // The dummy page that anchors the linked list of pages.
+  Page* anchor() { return &anchor_; }
+
+#ifdef VERIFY_HEAP
+  // Verify integrity of this space.
+  virtual void Verify(ObjectVisitor* visitor);
+
+  // Overridden by subclasses to verify space-specific object
+  // properties (e.g., only maps or free-list nodes are in map space).
+  virtual void VerifyObject(HeapObject* obj) {}
+#endif
+
+#ifdef DEBUG
+  // Print meta info and objects in this space.
+  virtual void Print();
+
+  // Reports statistics for the space
+  void ReportStatistics();
+
+  // Report code object related statistics
+  void CollectCodeStatistics();
+  static void ReportCodeStatistics(Isolate* isolate);
+  static void ResetCodeStatistics(Isolate* isolate);
+#endif
+
+  // Evacuation candidates are swept by evacuator.  Needs to return a valid
+  // result before _and_ after evacuation has finished.
+  static bool ShouldBeSweptBySweeperThreads(Page* p) {
+    return !p->IsEvacuationCandidate() &&
+           !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) && !p->WasSwept();
+  }
+
+  void IncrementUnsweptFreeBytes(intptr_t by) { unswept_free_bytes_ += by; }
+
+  void IncreaseUnsweptFreeBytes(Page* p) {
+    DCHECK(ShouldBeSweptBySweeperThreads(p));
+    unswept_free_bytes_ += (p->area_size() - p->LiveBytes());
+  }
+
+  void DecrementUnsweptFreeBytes(intptr_t by) { unswept_free_bytes_ -= by; }
+
+  void DecreaseUnsweptFreeBytes(Page* p) {
+    DCHECK(ShouldBeSweptBySweeperThreads(p));
+    unswept_free_bytes_ -= (p->area_size() - p->LiveBytes());
+  }
+
+  void ResetUnsweptFreeBytes() { unswept_free_bytes_ = 0; }
+
+  // This function tries to steal size_in_bytes memory from the sweeper threads
+  // free-lists. If it does not succeed stealing enough memory, it will wait
+  // for the sweeper threads to finish sweeping.
+  // It returns true when sweeping is completed and false otherwise.
+  bool EnsureSweeperProgress(intptr_t size_in_bytes);
+
+  void set_end_of_unswept_pages(Page* page) { end_of_unswept_pages_ = page; }
+
+  Page* end_of_unswept_pages() { return end_of_unswept_pages_; }
+
+  Page* FirstPage() { return anchor_.next_page(); }
+  Page* LastPage() { return anchor_.prev_page(); }
+
+  void EvictEvacuationCandidatesFromFreeLists();
+
+  bool CanExpand();
+
+  // Returns the number of total pages in this space.
+  int CountTotalPages();
+
+  // Return size of allocatable area on a page in this space.
+  inline int AreaSize() { return area_size_; }
+
+  void CreateEmergencyMemory();
+  void FreeEmergencyMemory();
+  void UseEmergencyMemory();
+
+  bool HasEmergencyMemory() { return emergency_memory_ != NULL; }
+
+ protected:
+  FreeList* free_list() { return &free_list_; }
+
+  int area_size_;
+
+  // Maximum capacity of this space.
+  intptr_t max_capacity_;
+
+  intptr_t SizeOfFirstPage();
+
+  // Accounting information for this space.
+  AllocationStats accounting_stats_;
+
+  // The dummy page that anchors the double linked list of pages.
+  Page anchor_;
+
+  // The space's free list.
+  FreeList free_list_;
+
+  // Normal allocation information.
+  AllocationInfo allocation_info_;
+
+  // The number of free bytes which could be reclaimed by advancing the
+  // concurrent sweeper threads.
+  intptr_t unswept_free_bytes_;
+
+  // The sweeper threads iterate over the list of pointer and data space pages
+  // and sweep these pages concurrently. They will stop sweeping after the
+  // end_of_unswept_pages_ page.
+  Page* end_of_unswept_pages_;
+
+  // Emergency memory is the memory of a full page for a given space, allocated
+  // conservatively before evacuating a page. If compaction fails due to out
+  // of memory error the emergency memory can be used to complete compaction.
+  // If not used, the emergency memory is released after compaction.
+  MemoryChunk* emergency_memory_;
+
+  // Expands the space by allocating a fixed number of pages. Returns false if
+  // it cannot allocate requested number of pages from OS, or if the hard heap
+  // size limit has been hit.
+  bool Expand();
+
+  // Generic fast case allocation function that tries linear allocation at the
+  // address denoted by top in allocation_info_.
+  inline HeapObject* AllocateLinearly(int size_in_bytes);
+
+  // If sweeping is still in progress try to sweep unswept pages. If that is
+  // not successful, wait for the sweeper threads and re-try free-list
+  // allocation.
+  MUST_USE_RESULT HeapObject* WaitForSweeperThreadsAndRetryAllocation(
+      int size_in_bytes);
+
+  // Slow path of AllocateRaw.  This function is space-dependent.
+  MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
+
+  friend class PageIterator;
+  friend class MarkCompactCollector;
+};
+
+
+class NumberAndSizeInfo BASE_EMBEDDED {
+ public:
+  NumberAndSizeInfo() : number_(0), bytes_(0) {}
+
+  int number() const { return number_; }
+  void increment_number(int num) { number_ += num; }
+
+  int bytes() const { return bytes_; }
+  void increment_bytes(int size) { bytes_ += size; }
+
+  void clear() {
+    number_ = 0;
+    bytes_ = 0;
+  }
+
+ private:
+  int number_;
+  int bytes_;
+};
+
+
+// HistogramInfo class for recording a single "bar" of a histogram.  This
+// class is used for collecting statistics to print to the log file.
+class HistogramInfo : public NumberAndSizeInfo {
+ public:
+  HistogramInfo() : NumberAndSizeInfo() {}
+
+  const char* name() { return name_; }
+  void set_name(const char* name) { name_ = name; }
+
+ private:
+  const char* name_;
+};
+
+
+enum SemiSpaceId { kFromSpace = 0, kToSpace = 1 };
+
+
+class SemiSpace;
+
+
+class NewSpacePage : public MemoryChunk {
+ public:
+  // GC related flags copied from from-space to to-space when
+  // flipping semispaces.
+  static const intptr_t kCopyOnFlipFlagsMask =
+      (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
+      (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
+      (1 << MemoryChunk::SCAN_ON_SCAVENGE);
+
+  static const int kAreaSize = Page::kMaxRegularHeapObjectSize;
+
+  inline NewSpacePage* next_page() const {
+    return static_cast<NewSpacePage*>(next_chunk());
+  }
+
+  inline void set_next_page(NewSpacePage* page) { set_next_chunk(page); }
+
+  inline NewSpacePage* prev_page() const {
+    return static_cast<NewSpacePage*>(prev_chunk());
+  }
+
+  inline void set_prev_page(NewSpacePage* page) { set_prev_chunk(page); }
+
+  SemiSpace* semi_space() { return reinterpret_cast<SemiSpace*>(owner()); }
+
+  bool is_anchor() { return !this->InNewSpace(); }
+
+  static bool IsAtStart(Address addr) {
+    return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) ==
+           kObjectStartOffset;
+  }
+
+  static bool IsAtEnd(Address addr) {
+    return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) == 0;
+  }
+
+  Address address() { return reinterpret_cast<Address>(this); }
+
+  // Finds the NewSpacePage containing the given address.
+  static inline NewSpacePage* FromAddress(Address address_in_page) {
+    Address page_start =
+        reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address_in_page) &
+                                  ~Page::kPageAlignmentMask);
+    NewSpacePage* page = reinterpret_cast<NewSpacePage*>(page_start);
+    return page;
+  }
+
+  // Find the page for a limit address. A limit address is either an address
+  // inside a page, or the address right after the last byte of a page.
+  static inline NewSpacePage* FromLimit(Address address_limit) {
+    return NewSpacePage::FromAddress(address_limit - 1);
+  }
+
+  // Checks if address1 and address2 are on the same new space page.
+  static inline bool OnSamePage(Address address1, Address address2) {
+    return NewSpacePage::FromAddress(address1) ==
+           NewSpacePage::FromAddress(address2);
+  }
+
+ private:
+  // Create a NewSpacePage object that is only used as anchor
+  // for the doubly-linked list of real pages.
+  explicit NewSpacePage(SemiSpace* owner) { InitializeAsAnchor(owner); }
+
+  static NewSpacePage* Initialize(Heap* heap, Address start,
+                                  SemiSpace* semi_space);
+
+  // Intialize a fake NewSpacePage used as sentinel at the ends
+  // of a doubly-linked list of real NewSpacePages.
+  // Only uses the prev/next links, and sets flags to not be in new-space.
+  void InitializeAsAnchor(SemiSpace* owner);
+
+  friend class SemiSpace;
+  friend class SemiSpaceIterator;
+};
+
+
+// -----------------------------------------------------------------------------
+// SemiSpace in young generation
+//
+// A semispace is a contiguous chunk of memory holding page-like memory
+// chunks. The mark-compact collector  uses the memory of the first page in
+// the from space as a marking stack when tracing live objects.
+
+class SemiSpace : public Space {
+ public:
+  // Constructor.
+  SemiSpace(Heap* heap, SemiSpaceId semispace)
+      : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
+        start_(NULL),
+        age_mark_(NULL),
+        id_(semispace),
+        anchor_(this),
+        current_page_(NULL) {}
+
+  // Sets up the semispace using the given chunk.
+  void SetUp(Address start, int initial_capacity, int maximum_capacity);
+
+  // Tear down the space.  Heap memory was not allocated by the space, so it
+  // is not deallocated here.
+  void TearDown();
+
+  // True if the space has been set up but not torn down.
+  bool HasBeenSetUp() { return start_ != NULL; }
+
+  // Grow the semispace to the new capacity.  The new capacity
+  // requested must be larger than the current capacity and less than
+  // the maximum capacity.
+  bool GrowTo(int new_capacity);
+
+  // Shrinks the semispace to the new capacity.  The new capacity
+  // requested must be more than the amount of used memory in the
+  // semispace and less than the current capacity.
+  bool ShrinkTo(int new_capacity);
+
+  // Returns the start address of the first page of the space.
+  Address space_start() {
+    DCHECK(anchor_.next_page() != &anchor_);
+    return anchor_.next_page()->area_start();
+  }
+
+  // Returns the start address of the current page of the space.
+  Address page_low() { return current_page_->area_start(); }
+
+  // Returns one past the end address of the space.
+  Address space_end() { return anchor_.prev_page()->area_end(); }
+
+  // Returns one past the end address of the current page of the space.
+  Address page_high() { return current_page_->area_end(); }
+
+  bool AdvancePage() {
+    NewSpacePage* next_page = current_page_->next_page();
+    if (next_page == anchor()) return false;
+    current_page_ = next_page;
+    return true;
+  }
+
+  // Resets the space to using the first page.
+  void Reset();
+
+  // Age mark accessors.
+  Address age_mark() { return age_mark_; }
+  void set_age_mark(Address mark);
+
+  // True if the address is in the address range of this semispace (not
+  // necessarily below the allocation pointer).
+  bool Contains(Address a) {
+    return (reinterpret_cast<uintptr_t>(a) & address_mask_) ==
+           reinterpret_cast<uintptr_t>(start_);
+  }
+
+  // True if the object is a heap object in the address range of this
+  // semispace (not necessarily below the allocation pointer).
+  bool Contains(Object* o) {
+    return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
+  }
+
+  // If we don't have these here then SemiSpace will be abstract.  However
+  // they should never be called.
+  virtual intptr_t Size() {
+    UNREACHABLE();
+    return 0;
+  }
+
+  bool is_committed() { return committed_; }
+  bool Commit();
+  bool Uncommit();
+
+  NewSpacePage* first_page() { return anchor_.next_page(); }
+  NewSpacePage* current_page() { return current_page_; }
+
+#ifdef VERIFY_HEAP
+  virtual void Verify();
+#endif
+
+#ifdef DEBUG
+  virtual void Print();
+  // Validate a range of of addresses in a SemiSpace.
+  // The "from" address must be on a page prior to the "to" address,
+  // in the linked page order, or it must be earlier on the same page.
+  static void AssertValidRange(Address from, Address to);
+#else
+  // Do nothing.
+  inline static void AssertValidRange(Address from, Address to) {}
+#endif
+
+  // Returns the current total capacity of the semispace.
+  int TotalCapacity() { return total_capacity_; }
+
+  // Returns the maximum total capacity of the semispace.
+  int MaximumTotalCapacity() { return maximum_total_capacity_; }
+
+  // Returns the initial capacity of the semispace.
+  int InitialTotalCapacity() { return initial_total_capacity_; }
+
+  SemiSpaceId id() { return id_; }
+
+  static void Swap(SemiSpace* from, SemiSpace* to);
+
+  // Returns the maximum amount of memory ever committed by the semi space.
+  size_t MaximumCommittedMemory() { return maximum_committed_; }
+
+  // Approximate amount of physical memory committed for this space.
+  size_t CommittedPhysicalMemory();
+
+ private:
+  // Flips the semispace between being from-space and to-space.
+  // Copies the flags into the masked positions on all pages in the space.
+  void FlipPages(intptr_t flags, intptr_t flag_mask);
+
+  // Updates Capacity and MaximumCommitted based on new capacity.
+  void SetCapacity(int new_capacity);
+
+  NewSpacePage* anchor() { return &anchor_; }
+
+  // The current and maximum total capacity of the space.
+  int total_capacity_;
+  int maximum_total_capacity_;
+  int initial_total_capacity_;
+
+  intptr_t maximum_committed_;
+
+  // The start address of the space.
+  Address start_;
+  // Used to govern object promotion during mark-compact collection.
+  Address age_mark_;
+
+  // Masks and comparison values to test for containment in this semispace.
+  uintptr_t address_mask_;
+  uintptr_t object_mask_;
+  uintptr_t object_expected_;
+
+  bool committed_;
+  SemiSpaceId id_;
+
+  NewSpacePage anchor_;
+  NewSpacePage* current_page_;
+
+  friend class SemiSpaceIterator;
+  friend class NewSpacePageIterator;
+
+ public:
+  TRACK_MEMORY("SemiSpace")
+};
+
+
+// A SemiSpaceIterator is an ObjectIterator that iterates over the active
+// semispace of the heap's new space.  It iterates over the objects in the
+// semispace from a given start address (defaulting to the bottom of the
+// semispace) to the top of the semispace.  New objects allocated after the
+// iterator is created are not iterated.
+class SemiSpaceIterator : public ObjectIterator {
+ public:
+  // Create an iterator over the objects in the given space.  If no start
+  // address is given, the iterator starts from the bottom of the space.  If
+  // no size function is given, the iterator calls Object::Size().
+
+  // Iterate over all of allocated to-space.
+  explicit SemiSpaceIterator(NewSpace* space);
+  // Iterate over all of allocated to-space, with a custome size function.
+  SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func);
+  // Iterate over part of allocated to-space, from start to the end
+  // of allocation.
+  SemiSpaceIterator(NewSpace* space, Address start);
+  // Iterate from one address to another in the same semi-space.
+  SemiSpaceIterator(Address from, Address to);
+
+  HeapObject* Next() {
+    if (current_ == limit_) return NULL;
+    if (NewSpacePage::IsAtEnd(current_)) {
+      NewSpacePage* page = NewSpacePage::FromLimit(current_);
+      page = page->next_page();
+      DCHECK(!page->is_anchor());
+      current_ = page->area_start();
+      if (current_ == limit_) return NULL;
+    }
+
+    HeapObject* object = HeapObject::FromAddress(current_);
+    int size = (size_func_ == NULL) ? object->Size() : size_func_(object);
+
+    current_ += size;
+    return object;
+  }
+
+  // Implementation of the ObjectIterator functions.
+  virtual HeapObject* next_object() { return Next(); }
+
+ private:
+  void Initialize(Address start, Address end, HeapObjectCallback size_func);
+
+  // The current iteration point.
+  Address current_;
+  // The end of iteration.
+  Address limit_;
+  // The callback function.
+  HeapObjectCallback size_func_;
+};
+
+
+// -----------------------------------------------------------------------------
+// A PageIterator iterates the pages in a semi-space.
+class NewSpacePageIterator BASE_EMBEDDED {
+ public:
+  // Make an iterator that runs over all pages in to-space.
+  explicit inline NewSpacePageIterator(NewSpace* space);
+
+  // Make an iterator that runs over all pages in the given semispace,
+  // even those not used in allocation.
+  explicit inline NewSpacePageIterator(SemiSpace* space);
+
+  // Make iterator that iterates from the page containing start
+  // to the page that contains limit in the same semispace.
+  inline NewSpacePageIterator(Address start, Address limit);
+
+  inline bool has_next();
+  inline NewSpacePage* next();
+
+ private:
+  NewSpacePage* prev_page_;  // Previous page returned.
+  // Next page that will be returned.  Cached here so that we can use this
+  // iterator for operations that deallocate pages.
+  NewSpacePage* next_page_;
+  // Last page returned.
+  NewSpacePage* last_page_;
+};
+
+
+// -----------------------------------------------------------------------------
+// The young generation space.
+//
+// The new space consists of a contiguous pair of semispaces.  It simply
+// forwards most functions to the appropriate semispace.
+
+class NewSpace : public Space {
+ public:
+  // Constructor.
+  explicit NewSpace(Heap* heap)
+      : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
+        to_space_(heap, kToSpace),
+        from_space_(heap, kFromSpace),
+        reservation_(),
+        inline_allocation_limit_step_(0) {}
+
+  // Sets up the new space using the given chunk.
+  bool SetUp(int reserved_semispace_size_, int max_semi_space_size);
+
+  // Tears down the space.  Heap memory was not allocated by the space, so it
+  // is not deallocated here.
+  void TearDown();
+
+  // True if the space has been set up but not torn down.
+  bool HasBeenSetUp() {
+    return to_space_.HasBeenSetUp() && from_space_.HasBeenSetUp();
+  }
+
+  // Flip the pair of spaces.
+  void Flip();
+
+  // Grow the capacity of the semispaces.  Assumes that they are not at
+  // their maximum capacity.
+  void Grow();
+
+  // Shrink the capacity of the semispaces.
+  void Shrink();
+
+  // True if the address or object lies in the address range of either
+  // semispace (not necessarily below the allocation pointer).
+  bool Contains(Address a) {
+    return (reinterpret_cast<uintptr_t>(a) & address_mask_) ==
+           reinterpret_cast<uintptr_t>(start_);
+  }
+
+  bool Contains(Object* o) {
+    Address a = reinterpret_cast<Address>(o);
+    return (reinterpret_cast<uintptr_t>(a) & object_mask_) == object_expected_;
+  }
+
+  // Return the allocated bytes in the active semispace.
+  virtual intptr_t Size() {
+    return pages_used_ * NewSpacePage::kAreaSize +
+           static_cast<int>(top() - to_space_.page_low());
+  }
+
+  // The same, but returning an int.  We have to have the one that returns
+  // intptr_t because it is inherited, but if we know we are dealing with the
+  // new space, which can't get as big as the other spaces then this is useful:
+  int SizeAsInt() { return static_cast<int>(Size()); }
+
+  // Return the allocatable capacity of a semispace.
+  intptr_t Capacity() {
+    SLOW_DCHECK(to_space_.TotalCapacity() == from_space_.TotalCapacity());
+    return (to_space_.TotalCapacity() / Page::kPageSize) *
+           NewSpacePage::kAreaSize;
+  }
+
+  // Return the current size of a semispace, allocatable and non-allocatable
+  // memory.
+  intptr_t TotalCapacity() {
+    DCHECK(to_space_.TotalCapacity() == from_space_.TotalCapacity());
+    return to_space_.TotalCapacity();
+  }
+
+  // Return the total amount of memory committed for new space.
+  intptr_t CommittedMemory() {
+    if (from_space_.is_committed()) return 2 * Capacity();
+    return TotalCapacity();
+  }
+
+  // Return the total amount of memory committed for new space.
+  intptr_t MaximumCommittedMemory() {
+    return to_space_.MaximumCommittedMemory() +
+           from_space_.MaximumCommittedMemory();
+  }
+
+  // Approximate amount of physical memory committed for this space.
+  size_t CommittedPhysicalMemory();
+
+  // Return the available bytes without growing.
+  intptr_t Available() { return Capacity() - Size(); }
+
+  // Return the maximum capacity of a semispace.
+  int MaximumCapacity() {
+    DCHECK(to_space_.MaximumTotalCapacity() ==
+           from_space_.MaximumTotalCapacity());
+    return to_space_.MaximumTotalCapacity();
+  }
+
+  bool IsAtMaximumCapacity() { return TotalCapacity() == MaximumCapacity(); }
+
+  // Returns the initial capacity of a semispace.
+  int InitialTotalCapacity() {
+    DCHECK(to_space_.InitialTotalCapacity() ==
+           from_space_.InitialTotalCapacity());
+    return to_space_.InitialTotalCapacity();
+  }
+
+  // Return the address of the allocation pointer in the active semispace.
+  Address top() {
+    DCHECK(to_space_.current_page()->ContainsLimit(allocation_info_.top()));
+    return allocation_info_.top();
+  }
+
+  void set_top(Address top) {
+    DCHECK(to_space_.current_page()->ContainsLimit(top));
+    allocation_info_.set_top(top);
+  }
+
+  // Return the address of the allocation pointer limit in the active semispace.
+  Address limit() {
+    DCHECK(to_space_.current_page()->ContainsLimit(allocation_info_.limit()));
+    return allocation_info_.limit();
+  }
+
+  // Return the address of the first object in the active semispace.
+  Address bottom() { return to_space_.space_start(); }
+
+  // Get the age mark of the inactive semispace.
+  Address age_mark() { return from_space_.age_mark(); }
+  // Set the age mark in the active semispace.
+  void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
+
+  // The start address of the space and a bit mask. Anding an address in the
+  // new space with the mask will result in the start address.
+  Address start() { return start_; }
+  uintptr_t mask() { return address_mask_; }
+
+  INLINE(uint32_t AddressToMarkbitIndex(Address addr)) {
+    DCHECK(Contains(addr));
+    DCHECK(IsAligned(OffsetFrom(addr), kPointerSize) ||
+           IsAligned(OffsetFrom(addr) - 1, kPointerSize));
+    return static_cast<uint32_t>(addr - start_) >> kPointerSizeLog2;
+  }
+
+  INLINE(Address MarkbitIndexToAddress(uint32_t index)) {
+    return reinterpret_cast<Address>(index << kPointerSizeLog2);
+  }
+
+  // The allocation top and limit address.
+  Address* allocation_top_address() { return allocation_info_.top_address(); }
+
+  // The allocation limit address.
+  Address* allocation_limit_address() {
+    return allocation_info_.limit_address();
+  }
+
+  MUST_USE_RESULT INLINE(AllocationResult AllocateRaw(int size_in_bytes));
+
+  // Reset the allocation pointer to the beginning of the active semispace.
+  void ResetAllocationInfo();
+
+  void UpdateInlineAllocationLimit(int size_in_bytes);
+  void LowerInlineAllocationLimit(intptr_t step) {
+    inline_allocation_limit_step_ = step;
+    UpdateInlineAllocationLimit(0);
+    top_on_previous_step_ = allocation_info_.top();
+  }
+
+  // Get the extent of the inactive semispace (for use as a marking stack,
+  // or to zap it). Notice: space-addresses are not necessarily on the
+  // same page, so FromSpaceStart() might be above FromSpaceEnd().
+  Address FromSpacePageLow() { return from_space_.page_low(); }
+  Address FromSpacePageHigh() { return from_space_.page_high(); }
+  Address FromSpaceStart() { return from_space_.space_start(); }
+  Address FromSpaceEnd() { return from_space_.space_end(); }
+
+  // Get the extent of the active semispace's pages' memory.
+  Address ToSpaceStart() { return to_space_.space_start(); }
+  Address ToSpaceEnd() { return to_space_.space_end(); }
+
+  inline bool ToSpaceContains(Address address) {
+    return to_space_.Contains(address);
+  }
+  inline bool FromSpaceContains(Address address) {
+    return from_space_.Contains(address);
+  }
+
+  // True if the object is a heap object in the address range of the
+  // respective semispace (not necessarily below the allocation pointer of the
+  // semispace).
+  inline bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
+  inline bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }
+
+  // Try to switch the active semispace to a new, empty, page.
+  // Returns false if this isn't possible or reasonable (i.e., there
+  // are no pages, or the current page is already empty), or true
+  // if successful.
+  bool AddFreshPage();
+
+#ifdef VERIFY_HEAP
+  // Verify the active semispace.
+  virtual void Verify();
+#endif
+
+#ifdef DEBUG
+  // Print the active semispace.
+  virtual void Print() { to_space_.Print(); }
+#endif
+
+  // Iterates the active semispace to collect statistics.
+  void CollectStatistics();
+  // Reports previously collected statistics of the active semispace.
+  void ReportStatistics();
+  // Clears previously collected statistics.
+  void ClearHistograms();
+
+  // Record the allocation or promotion of a heap object.  Note that we don't
+  // record every single allocation, but only those that happen in the
+  // to space during a scavenge GC.
+  void RecordAllocation(HeapObject* obj);
+  void RecordPromotion(HeapObject* obj);
+
+  // Return whether the operation succeded.
+  bool CommitFromSpaceIfNeeded() {
+    if (from_space_.is_committed()) return true;
+    return from_space_.Commit();
+  }
+
+  bool UncommitFromSpace() {
+    if (!from_space_.is_committed()) return true;
+    return from_space_.Uncommit();
+  }
+
+  inline intptr_t inline_allocation_limit_step() {
+    return inline_allocation_limit_step_;
+  }
+
+  SemiSpace* active_space() { return &to_space_; }
+
+ private:
+  // Update allocation info to match the current to-space page.
+  void UpdateAllocationInfo();
+
+  Address chunk_base_;
+  uintptr_t chunk_size_;
+
+  // The semispaces.
+  SemiSpace to_space_;
+  SemiSpace from_space_;
+  base::VirtualMemory reservation_;
+  int pages_used_;
+
+  // Start address and bit mask for containment testing.
+  Address start_;
+  uintptr_t address_mask_;
+  uintptr_t object_mask_;
+  uintptr_t object_expected_;
+
+  // Allocation pointer and limit for normal allocation and allocation during
+  // mark-compact collection.
+  AllocationInfo allocation_info_;
+
+  // When incremental marking is active we will set allocation_info_.limit
+  // to be lower than actual limit and then will gradually increase it
+  // in steps to guarantee that we do incremental marking steps even
+  // when all allocation is performed from inlined generated code.
+  intptr_t inline_allocation_limit_step_;
+
+  Address top_on_previous_step_;
+
+  HistogramInfo* allocated_histogram_;
+  HistogramInfo* promoted_histogram_;
+
+  MUST_USE_RESULT AllocationResult SlowAllocateRaw(int size_in_bytes);
+
+  friend class SemiSpaceIterator;
+
+ public:
+  TRACK_MEMORY("NewSpace")
+};
+
+
+// -----------------------------------------------------------------------------
+// Old object space (excluding map objects)
+
+class OldSpace : public PagedSpace {
+ public:
+  // Creates an old space object with a given maximum capacity.
+  // The constructor does not allocate pages from OS.
+  OldSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id,
+           Executability executable)
+      : PagedSpace(heap, max_capacity, id, executable) {}
+
+ public:
+  TRACK_MEMORY("OldSpace")
+};
+
+
+// For contiguous spaces, top should be in the space (or at the end) and limit
+// should be the end of the space.
+#define DCHECK_SEMISPACE_ALLOCATION_INFO(info, space) \
+  SLOW_DCHECK((space).page_low() <= (info).top() &&   \
+              (info).top() <= (space).page_high() &&  \
+              (info).limit() <= (space).page_high())
+
+
+// -----------------------------------------------------------------------------
+// Old space for all map objects
+
+class MapSpace : public PagedSpace {
+ public:
+  // Creates a map space object with a maximum capacity.
+  MapSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
+      : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
+        max_map_space_pages_(kMaxMapPageIndex - 1) {}
+
+  // Given an index, returns the page address.
+  // TODO(1600): this limit is artifical just to keep code compilable
+  static const int kMaxMapPageIndex = 1 << 16;
+
+  virtual int RoundSizeDownToObjectAlignment(int size) {
+    if (base::bits::IsPowerOfTwo32(Map::kSize)) {
+      return RoundDown(size, Map::kSize);
+    } else {
+      return (size / Map::kSize) * Map::kSize;
+    }
+  }
+
+ protected:
+  virtual void VerifyObject(HeapObject* obj);
+
+ private:
+  static const int kMapsPerPage = Page::kMaxRegularHeapObjectSize / Map::kSize;
+
+  // Do map space compaction if there is a page gap.
+  int CompactionThreshold() {
+    return kMapsPerPage * (max_map_space_pages_ - 1);
+  }
+
+  const int max_map_space_pages_;
+
+ public:
+  TRACK_MEMORY("MapSpace")
+};
+
+
+// -----------------------------------------------------------------------------
+// Old space for simple property cell objects
+
+class CellSpace : public PagedSpace {
+ public:
+  // Creates a property cell space object with a maximum capacity.
+  CellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
+      : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE) {}
+
+  virtual int RoundSizeDownToObjectAlignment(int size) {
+    if (base::bits::IsPowerOfTwo32(Cell::kSize)) {
+      return RoundDown(size, Cell::kSize);
+    } else {
+      return (size / Cell::kSize) * Cell::kSize;
+    }
+  }
+
+ protected:
+  virtual void VerifyObject(HeapObject* obj);
+
+ public:
+  TRACK_MEMORY("CellSpace")
+};
+
+
+// -----------------------------------------------------------------------------
+// Old space for all global object property cell objects
+
+class PropertyCellSpace : public PagedSpace {
+ public:
+  // Creates a property cell space object with a maximum capacity.
+  PropertyCellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
+      : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE) {}
+
+  virtual int RoundSizeDownToObjectAlignment(int size) {
+    if (base::bits::IsPowerOfTwo32(PropertyCell::kSize)) {
+      return RoundDown(size, PropertyCell::kSize);
+    } else {
+      return (size / PropertyCell::kSize) * PropertyCell::kSize;
+    }
+  }
+
+ protected:
+  virtual void VerifyObject(HeapObject* obj);
+
+ public:
+  TRACK_MEMORY("PropertyCellSpace")
+};
+
+
+// -----------------------------------------------------------------------------
+// Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by
+// the large object space. A large object is allocated from OS heap with
+// extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
+// A large object always starts at Page::kObjectStartOffset to a page.
+// Large objects do not move during garbage collections.
+
+class LargeObjectSpace : public Space {
+ public:
+  LargeObjectSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id);
+  virtual ~LargeObjectSpace() {}
+
+  // Initializes internal data structures.
+  bool SetUp();
+
+  // Releases internal resources, frees objects in this space.
+  void TearDown();
+
+  static intptr_t ObjectSizeFor(intptr_t chunk_size) {
+    if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
+    return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
+  }
+
+  // Shared implementation of AllocateRaw, AllocateRawCode and
+  // AllocateRawFixedArray.
+  MUST_USE_RESULT AllocationResult
+      AllocateRaw(int object_size, Executability executable);
+
+  // Available bytes for objects in this space.
+  inline intptr_t Available();
+
+  virtual intptr_t Size() { return size_; }
+
+  virtual intptr_t SizeOfObjects() { return objects_size_; }
+
+  intptr_t MaximumCommittedMemory() { return maximum_committed_; }
+
+  intptr_t CommittedMemory() { return Size(); }
+
+  // Approximate amount of physical memory committed for this space.
+  size_t CommittedPhysicalMemory();
+
+  int PageCount() { return page_count_; }
+
+  // Finds an object for a given address, returns a Smi if it is not found.
+  // The function iterates through all objects in this space, may be slow.
+  Object* FindObject(Address a);
+
+  // Finds a large object page containing the given address, returns NULL
+  // if such a page doesn't exist.
+  LargePage* FindPage(Address a);
+
+  // Frees unmarked objects.
+  void FreeUnmarkedObjects();
+
+  // Checks whether a heap object is in this space; O(1).
+  bool Contains(HeapObject* obj);
+
+  // Checks whether the space is empty.
+  bool IsEmpty() { return first_page_ == NULL; }
+
+  LargePage* first_page() { return first_page_; }
+
+#ifdef VERIFY_HEAP
+  virtual void Verify();
+#endif
+
+#ifdef DEBUG
+  virtual void Print();
+  void ReportStatistics();
+  void CollectCodeStatistics();
+#endif
+  // Checks whether an address is in the object area in this space.  It
+  // iterates all objects in the space. May be slow.
+  bool SlowContains(Address addr) { return FindObject(addr)->IsHeapObject(); }
+
+ private:
+  intptr_t max_capacity_;
+  intptr_t maximum_committed_;
+  // The head of the linked list of large object chunks.
+  LargePage* first_page_;
+  intptr_t size_;          // allocated bytes
+  int page_count_;         // number of chunks
+  intptr_t objects_size_;  // size of objects
+  // Map MemoryChunk::kAlignment-aligned chunks to large pages covering them
+  HashMap chunk_map_;
+
+  friend class LargeObjectIterator;
+
+ public:
+  TRACK_MEMORY("LargeObjectSpace")
+};
+
+
+class LargeObjectIterator : public ObjectIterator {
+ public:
+  explicit LargeObjectIterator(LargeObjectSpace* space);
+  LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func);
+
+  HeapObject* Next();
+
+  // implementation of ObjectIterator.
+  virtual HeapObject* next_object() { return Next(); }
+
+ private:
+  LargePage* current_;
+  HeapObjectCallback size_func_;
+};
+
+
+// Iterates over the chunks (pages and large object pages) that can contain
+// pointers to new space.
+class PointerChunkIterator BASE_EMBEDDED {
+ public:
+  inline explicit PointerChunkIterator(Heap* heap);
+
+  // Return NULL when the iterator is done.
+  MemoryChunk* next() {
+    switch (state_) {
+      case kOldPointerState: {
+        if (old_pointer_iterator_.has_next()) {
+          return old_pointer_iterator_.next();
+        }
+        state_ = kMapState;
+        // Fall through.
+      }
+      case kMapState: {
+        if (map_iterator_.has_next()) {
+          return map_iterator_.next();
+        }
+        state_ = kLargeObjectState;
+        // Fall through.
+      }
+      case kLargeObjectState: {
+        HeapObject* heap_object;
+        do {
+          heap_object = lo_iterator_.Next();
+          if (heap_object == NULL) {
+            state_ = kFinishedState;
+            return NULL;
+          }
+          // Fixed arrays are the only pointer-containing objects in large
+          // object space.
+        } while (!heap_object->IsFixedArray());
+        MemoryChunk* answer = MemoryChunk::FromAddress(heap_object->address());
+        return answer;
+      }
+      case kFinishedState:
+        return NULL;
+      default:
+        break;
+    }
+    UNREACHABLE();
+    return NULL;
+  }
+
+
+ private:
+  enum State { kOldPointerState, kMapState, kLargeObjectState, kFinishedState };
+  State state_;
+  PageIterator old_pointer_iterator_;
+  PageIterator map_iterator_;
+  LargeObjectIterator lo_iterator_;
+};
+
+
+#ifdef DEBUG
+struct CommentStatistic {
+  const char* comment;
+  int size;
+  int count;
+  void Clear() {
+    comment = NULL;
+    size = 0;
+    count = 0;
+  }
+  // Must be small, since an iteration is used for lookup.
+  static const int kMaxComments = 64;
+};
+#endif
+}
+}  // namespace v8::internal
+
+#endif  // V8_HEAP_SPACES_H_
diff --git a/src/heap/store-buffer-inl.h b/src/heap/store-buffer-inl.h
new file mode 100644
index 0000000..1606465
--- /dev/null
+++ b/src/heap/store-buffer-inl.h
@@ -0,0 +1,63 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_STORE_BUFFER_INL_H_
+#define V8_STORE_BUFFER_INL_H_
+
+#include "src/heap/store-buffer.h"
+
+namespace v8 {
+namespace internal {
+
+Address StoreBuffer::TopAddress() {
+  return reinterpret_cast<Address>(heap_->store_buffer_top_address());
+}
+
+
+void StoreBuffer::Mark(Address addr) {
+  DCHECK(!heap_->cell_space()->Contains(addr));
+  DCHECK(!heap_->code_space()->Contains(addr));
+  DCHECK(!heap_->old_data_space()->Contains(addr));
+  Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
+  *top++ = addr;
+  heap_->public_set_store_buffer_top(top);
+  if ((reinterpret_cast<uintptr_t>(top) & kStoreBufferOverflowBit) != 0) {
+    DCHECK(top == limit_);
+    Compact();
+  } else {
+    DCHECK(top < limit_);
+  }
+}
+
+
+void StoreBuffer::EnterDirectlyIntoStoreBuffer(Address addr) {
+  if (store_buffer_rebuilding_enabled_) {
+    SLOW_DCHECK(!heap_->cell_space()->Contains(addr) &&
+                !heap_->code_space()->Contains(addr) &&
+                !heap_->old_data_space()->Contains(addr) &&
+                !heap_->new_space()->Contains(addr));
+    Address* top = old_top_;
+    *top++ = addr;
+    old_top_ = top;
+    old_buffer_is_sorted_ = false;
+    old_buffer_is_filtered_ = false;
+    if (top >= old_limit_) {
+      DCHECK(callback_ != NULL);
+      (*callback_)(heap_, MemoryChunk::FromAnyPointerAddress(heap_, addr),
+                   kStoreBufferFullEvent);
+    }
+  }
+}
+
+
+void StoreBuffer::ClearDeadObject(HeapObject* object) {
+  Address& map_field = Memory::Address_at(object->address());
+  if (heap_->map_space()->Contains(map_field)) {
+    map_field = NULL;
+  }
+}
+}
+}  // namespace v8::internal
+
+#endif  // V8_STORE_BUFFER_INL_H_
diff --git a/src/heap/store-buffer.cc b/src/heap/store-buffer.cc
new file mode 100644
index 0000000..278e9f2
--- /dev/null
+++ b/src/heap/store-buffer.cc
@@ -0,0 +1,581 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <algorithm>
+
+#include "src/v8.h"
+
+#include "src/base/atomicops.h"
+#include "src/counters.h"
+#include "src/heap/store-buffer-inl.h"
+
+namespace v8 {
+namespace internal {
+
+StoreBuffer::StoreBuffer(Heap* heap)
+    : heap_(heap),
+      start_(NULL),
+      limit_(NULL),
+      old_start_(NULL),
+      old_limit_(NULL),
+      old_top_(NULL),
+      old_reserved_limit_(NULL),
+      old_buffer_is_sorted_(false),
+      old_buffer_is_filtered_(false),
+      during_gc_(false),
+      store_buffer_rebuilding_enabled_(false),
+      callback_(NULL),
+      may_move_store_buffer_entries_(true),
+      virtual_memory_(NULL),
+      hash_set_1_(NULL),
+      hash_set_2_(NULL),
+      hash_sets_are_empty_(true) {}
+
+
+void StoreBuffer::SetUp() {
+  virtual_memory_ = new base::VirtualMemory(kStoreBufferSize * 3);
+  uintptr_t start_as_int =
+      reinterpret_cast<uintptr_t>(virtual_memory_->address());
+  start_ =
+      reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2));
+  limit_ = start_ + (kStoreBufferSize / kPointerSize);
+
+  old_virtual_memory_ =
+      new base::VirtualMemory(kOldStoreBufferLength * kPointerSize);
+  old_top_ = old_start_ =
+      reinterpret_cast<Address*>(old_virtual_memory_->address());
+  // Don't know the alignment requirements of the OS, but it is certainly not
+  // less than 0xfff.
+  DCHECK((reinterpret_cast<uintptr_t>(old_start_) & 0xfff) == 0);
+  int initial_length =
+      static_cast<int>(base::OS::CommitPageSize() / kPointerSize);
+  DCHECK(initial_length > 0);
+  DCHECK(initial_length <= kOldStoreBufferLength);
+  old_limit_ = old_start_ + initial_length;
+  old_reserved_limit_ = old_start_ + kOldStoreBufferLength;
+
+  CHECK(old_virtual_memory_->Commit(reinterpret_cast<void*>(old_start_),
+                                    (old_limit_ - old_start_) * kPointerSize,
+                                    false));
+
+  DCHECK(reinterpret_cast<Address>(start_) >= virtual_memory_->address());
+  DCHECK(reinterpret_cast<Address>(limit_) >= virtual_memory_->address());
+  Address* vm_limit = reinterpret_cast<Address*>(
+      reinterpret_cast<char*>(virtual_memory_->address()) +
+      virtual_memory_->size());
+  DCHECK(start_ <= vm_limit);
+  DCHECK(limit_ <= vm_limit);
+  USE(vm_limit);
+  DCHECK((reinterpret_cast<uintptr_t>(limit_) & kStoreBufferOverflowBit) != 0);
+  DCHECK((reinterpret_cast<uintptr_t>(limit_ - 1) & kStoreBufferOverflowBit) ==
+         0);
+
+  CHECK(virtual_memory_->Commit(reinterpret_cast<Address>(start_),
+                                kStoreBufferSize,
+                                false));  // Not executable.
+  heap_->public_set_store_buffer_top(start_);
+
+  hash_set_1_ = new uintptr_t[kHashSetLength];
+  hash_set_2_ = new uintptr_t[kHashSetLength];
+  hash_sets_are_empty_ = false;
+
+  ClearFilteringHashSets();
+}
+
+
+void StoreBuffer::TearDown() {
+  delete virtual_memory_;
+  delete old_virtual_memory_;
+  delete[] hash_set_1_;
+  delete[] hash_set_2_;
+  old_start_ = old_top_ = old_limit_ = old_reserved_limit_ = NULL;
+  start_ = limit_ = NULL;
+  heap_->public_set_store_buffer_top(start_);
+}
+
+
+void StoreBuffer::StoreBufferOverflow(Isolate* isolate) {
+  isolate->heap()->store_buffer()->Compact();
+  isolate->counters()->store_buffer_overflows()->Increment();
+}
+
+
+void StoreBuffer::Uniq() {
+  // Remove adjacent duplicates and cells that do not point at new space.
+  Address previous = NULL;
+  Address* write = old_start_;
+  DCHECK(may_move_store_buffer_entries_);
+  for (Address* read = old_start_; read < old_top_; read++) {
+    Address current = *read;
+    if (current != previous) {
+      if (heap_->InNewSpace(*reinterpret_cast<Object**>(current))) {
+        *write++ = current;
+      }
+    }
+    previous = current;
+  }
+  old_top_ = write;
+}
+
+
+bool StoreBuffer::SpaceAvailable(intptr_t space_needed) {
+  return old_limit_ - old_top_ >= space_needed;
+}
+
+
+void StoreBuffer::EnsureSpace(intptr_t space_needed) {
+  while (old_limit_ - old_top_ < space_needed &&
+         old_limit_ < old_reserved_limit_) {
+    size_t grow = old_limit_ - old_start_;  // Double size.
+    CHECK(old_virtual_memory_->Commit(reinterpret_cast<void*>(old_limit_),
+                                      grow * kPointerSize, false));
+    old_limit_ += grow;
+  }
+
+  if (SpaceAvailable(space_needed)) return;
+
+  if (old_buffer_is_filtered_) return;
+  DCHECK(may_move_store_buffer_entries_);
+  Compact();
+
+  old_buffer_is_filtered_ = true;
+  bool page_has_scan_on_scavenge_flag = false;
+
+  PointerChunkIterator it(heap_);
+  MemoryChunk* chunk;
+  while ((chunk = it.next()) != NULL) {
+    if (chunk->scan_on_scavenge()) {
+      page_has_scan_on_scavenge_flag = true;
+      break;
+    }
+  }
+
+  if (page_has_scan_on_scavenge_flag) {
+    Filter(MemoryChunk::SCAN_ON_SCAVENGE);
+  }
+
+  if (SpaceAvailable(space_needed)) return;
+
+  // Sample 1 entry in 97 and filter out the pages where we estimate that more
+  // than 1 in 8 pointers are to new space.
+  static const int kSampleFinenesses = 5;
+  static const struct Samples {
+    int prime_sample_step;
+    int threshold;
+  } samples[kSampleFinenesses] = {
+        {97, ((Page::kPageSize / kPointerSize) / 97) / 8},
+        {23, ((Page::kPageSize / kPointerSize) / 23) / 16},
+        {7, ((Page::kPageSize / kPointerSize) / 7) / 32},
+        {3, ((Page::kPageSize / kPointerSize) / 3) / 256},
+        {1, 0}};
+  for (int i = 0; i < kSampleFinenesses; i++) {
+    ExemptPopularPages(samples[i].prime_sample_step, samples[i].threshold);
+    // As a last resort we mark all pages as being exempt from the store buffer.
+    DCHECK(i != (kSampleFinenesses - 1) || old_top_ == old_start_);
+    if (SpaceAvailable(space_needed)) return;
+  }
+  UNREACHABLE();
+}
+
+
+// Sample the store buffer to see if some pages are taking up a lot of space
+// in the store buffer.
+void StoreBuffer::ExemptPopularPages(int prime_sample_step, int threshold) {
+  PointerChunkIterator it(heap_);
+  MemoryChunk* chunk;
+  while ((chunk = it.next()) != NULL) {
+    chunk->set_store_buffer_counter(0);
+  }
+  bool created_new_scan_on_scavenge_pages = false;
+  MemoryChunk* previous_chunk = NULL;
+  for (Address* p = old_start_; p < old_top_; p += prime_sample_step) {
+    Address addr = *p;
+    MemoryChunk* containing_chunk = NULL;
+    if (previous_chunk != NULL && previous_chunk->Contains(addr)) {
+      containing_chunk = previous_chunk;
+    } else {
+      containing_chunk = MemoryChunk::FromAnyPointerAddress(heap_, addr);
+    }
+    int old_counter = containing_chunk->store_buffer_counter();
+    if (old_counter >= threshold) {
+      containing_chunk->set_scan_on_scavenge(true);
+      created_new_scan_on_scavenge_pages = true;
+    }
+    containing_chunk->set_store_buffer_counter(old_counter + 1);
+    previous_chunk = containing_chunk;
+  }
+  if (created_new_scan_on_scavenge_pages) {
+    Filter(MemoryChunk::SCAN_ON_SCAVENGE);
+  }
+  old_buffer_is_filtered_ = true;
+}
+
+
+void StoreBuffer::Filter(int flag) {
+  Address* new_top = old_start_;
+  MemoryChunk* previous_chunk = NULL;
+  for (Address* p = old_start_; p < old_top_; p++) {
+    Address addr = *p;
+    MemoryChunk* containing_chunk = NULL;
+    if (previous_chunk != NULL && previous_chunk->Contains(addr)) {
+      containing_chunk = previous_chunk;
+    } else {
+      containing_chunk = MemoryChunk::FromAnyPointerAddress(heap_, addr);
+      previous_chunk = containing_chunk;
+    }
+    if (!containing_chunk->IsFlagSet(flag)) {
+      *new_top++ = addr;
+    }
+  }
+  old_top_ = new_top;
+
+  // Filtering hash sets are inconsistent with the store buffer after this
+  // operation.
+  ClearFilteringHashSets();
+}
+
+
+void StoreBuffer::SortUniq() {
+  Compact();
+  if (old_buffer_is_sorted_) return;
+  std::sort(old_start_, old_top_);
+  Uniq();
+
+  old_buffer_is_sorted_ = true;
+
+  // Filtering hash sets are inconsistent with the store buffer after this
+  // operation.
+  ClearFilteringHashSets();
+}
+
+
+bool StoreBuffer::PrepareForIteration() {
+  Compact();
+  PointerChunkIterator it(heap_);
+  MemoryChunk* chunk;
+  bool page_has_scan_on_scavenge_flag = false;
+  while ((chunk = it.next()) != NULL) {
+    if (chunk->scan_on_scavenge()) {
+      page_has_scan_on_scavenge_flag = true;
+      break;
+    }
+  }
+
+  if (page_has_scan_on_scavenge_flag) {
+    Filter(MemoryChunk::SCAN_ON_SCAVENGE);
+  }
+
+  // Filtering hash sets are inconsistent with the store buffer after
+  // iteration.
+  ClearFilteringHashSets();
+
+  return page_has_scan_on_scavenge_flag;
+}
+
+
+#ifdef DEBUG
+void StoreBuffer::Clean() {
+  ClearFilteringHashSets();
+  Uniq();  // Also removes things that no longer point to new space.
+  EnsureSpace(kStoreBufferSize / 2);
+}
+
+
+static Address* in_store_buffer_1_element_cache = NULL;
+
+
+bool StoreBuffer::CellIsInStoreBuffer(Address cell_address) {
+  if (!FLAG_enable_slow_asserts) return true;
+  if (in_store_buffer_1_element_cache != NULL &&
+      *in_store_buffer_1_element_cache == cell_address) {
+    return true;
+  }
+  Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
+  for (Address* current = top - 1; current >= start_; current--) {
+    if (*current == cell_address) {
+      in_store_buffer_1_element_cache = current;
+      return true;
+    }
+  }
+  for (Address* current = old_top_ - 1; current >= old_start_; current--) {
+    if (*current == cell_address) {
+      in_store_buffer_1_element_cache = current;
+      return true;
+    }
+  }
+  return false;
+}
+#endif
+
+
+void StoreBuffer::ClearFilteringHashSets() {
+  if (!hash_sets_are_empty_) {
+    memset(reinterpret_cast<void*>(hash_set_1_), 0,
+           sizeof(uintptr_t) * kHashSetLength);
+    memset(reinterpret_cast<void*>(hash_set_2_), 0,
+           sizeof(uintptr_t) * kHashSetLength);
+    hash_sets_are_empty_ = true;
+  }
+}
+
+
+void StoreBuffer::GCPrologue() {
+  ClearFilteringHashSets();
+  during_gc_ = true;
+}
+
+
+#ifdef VERIFY_HEAP
+void StoreBuffer::VerifyPointers(LargeObjectSpace* space) {
+  LargeObjectIterator it(space);
+  for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
+    if (object->IsFixedArray()) {
+      Address slot_address = object->address();
+      Address end = object->address() + object->Size();
+
+      while (slot_address < end) {
+        HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address);
+        // When we are not in GC the Heap::InNewSpace() predicate
+        // checks that pointers which satisfy predicate point into
+        // the active semispace.
+        Object* object = reinterpret_cast<Object*>(
+            base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
+        heap_->InNewSpace(object);
+        slot_address += kPointerSize;
+      }
+    }
+  }
+}
+#endif
+
+
+void StoreBuffer::Verify() {
+#ifdef VERIFY_HEAP
+  VerifyPointers(heap_->lo_space());
+#endif
+}
+
+
+void StoreBuffer::GCEpilogue() {
+  during_gc_ = false;
+#ifdef VERIFY_HEAP
+  if (FLAG_verify_heap) {
+    Verify();
+  }
+#endif
+}
+
+
+void StoreBuffer::FindPointersToNewSpaceInRegion(
+    Address start, Address end, ObjectSlotCallback slot_callback,
+    bool clear_maps) {
+  for (Address slot_address = start; slot_address < end;
+       slot_address += kPointerSize) {
+    Object** slot = reinterpret_cast<Object**>(slot_address);
+    Object* object = reinterpret_cast<Object*>(
+        base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
+    if (heap_->InNewSpace(object)) {
+      HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
+      DCHECK(heap_object->IsHeapObject());
+      // The new space object was not promoted if it still contains a map
+      // pointer. Clear the map field now lazily.
+      if (clear_maps) ClearDeadObject(heap_object);
+      slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object);
+      object = reinterpret_cast<Object*>(
+          base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
+      if (heap_->InNewSpace(object)) {
+        EnterDirectlyIntoStoreBuffer(slot_address);
+      }
+    }
+  }
+}
+
+
+void StoreBuffer::IteratePointersInStoreBuffer(ObjectSlotCallback slot_callback,
+                                               bool clear_maps) {
+  Address* limit = old_top_;
+  old_top_ = old_start_;
+  {
+    DontMoveStoreBufferEntriesScope scope(this);
+    for (Address* current = old_start_; current < limit; current++) {
+#ifdef DEBUG
+      Address* saved_top = old_top_;
+#endif
+      Object** slot = reinterpret_cast<Object**>(*current);
+      Object* object = reinterpret_cast<Object*>(
+          base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
+      if (heap_->InFromSpace(object)) {
+        HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
+        // The new space object was not promoted if it still contains a map
+        // pointer. Clear the map field now lazily.
+        if (clear_maps) ClearDeadObject(heap_object);
+        slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object);
+        object = reinterpret_cast<Object*>(
+            base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
+        if (heap_->InNewSpace(object)) {
+          EnterDirectlyIntoStoreBuffer(reinterpret_cast<Address>(slot));
+        }
+      }
+      DCHECK(old_top_ == saved_top + 1 || old_top_ == saved_top);
+    }
+  }
+}
+
+
+void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) {
+  IteratePointersToNewSpace(slot_callback, false);
+}
+
+
+void StoreBuffer::IteratePointersToNewSpaceAndClearMaps(
+    ObjectSlotCallback slot_callback) {
+  IteratePointersToNewSpace(slot_callback, true);
+}
+
+
+void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback,
+                                            bool clear_maps) {
+  // We do not sort or remove duplicated entries from the store buffer because
+  // we expect that callback will rebuild the store buffer thus removing
+  // all duplicates and pointers to old space.
+  bool some_pages_to_scan = PrepareForIteration();
+
+  // TODO(gc): we want to skip slots on evacuation candidates
+  // but we can't simply figure that out from slot address
+  // because slot can belong to a large object.
+  IteratePointersInStoreBuffer(slot_callback, clear_maps);
+
+  // We are done scanning all the pointers that were in the store buffer, but
+  // there may be some pages marked scan_on_scavenge that have pointers to new
+  // space that are not in the store buffer.  We must scan them now.  As we
+  // scan, the surviving pointers to new space will be added to the store
+  // buffer.  If there are still a lot of pointers to new space then we will
+  // keep the scan_on_scavenge flag on the page and discard the pointers that
+  // were added to the store buffer.  If there are not many pointers to new
+  // space left on the page we will keep the pointers in the store buffer and
+  // remove the flag from the page.
+  if (some_pages_to_scan) {
+    if (callback_ != NULL) {
+      (*callback_)(heap_, NULL, kStoreBufferStartScanningPagesEvent);
+    }
+    PointerChunkIterator it(heap_);
+    MemoryChunk* chunk;
+    while ((chunk = it.next()) != NULL) {
+      if (chunk->scan_on_scavenge()) {
+        chunk->set_scan_on_scavenge(false);
+        if (callback_ != NULL) {
+          (*callback_)(heap_, chunk, kStoreBufferScanningPageEvent);
+        }
+        if (chunk->owner() == heap_->lo_space()) {
+          LargePage* large_page = reinterpret_cast<LargePage*>(chunk);
+          HeapObject* array = large_page->GetObject();
+          DCHECK(array->IsFixedArray());
+          Address start = array->address();
+          Address end = start + array->Size();
+          FindPointersToNewSpaceInRegion(start, end, slot_callback, clear_maps);
+        } else {
+          Page* page = reinterpret_cast<Page*>(chunk);
+          PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
+          if (owner == heap_->map_space()) {
+            DCHECK(page->WasSwept());
+            HeapObjectIterator iterator(page, NULL);
+            for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
+                 heap_object = iterator.Next()) {
+              // We skip free space objects.
+              if (!heap_object->IsFiller()) {
+                DCHECK(heap_object->IsMap());
+                FindPointersToNewSpaceInRegion(
+                    heap_object->address() + Map::kPointerFieldsBeginOffset,
+                    heap_object->address() + Map::kPointerFieldsEndOffset,
+                    slot_callback, clear_maps);
+              }
+            }
+          } else {
+            if (!page->SweepingCompleted()) {
+              heap_->mark_compact_collector()->SweepInParallel(page, owner);
+              if (!page->SweepingCompleted()) {
+                // We were not able to sweep that page, i.e., a concurrent
+                // sweeper thread currently owns this page.
+                // TODO(hpayer): This may introduce a huge pause here. We
+                // just care about finish sweeping of the scan on scavenge page.
+                heap_->mark_compact_collector()->EnsureSweepingCompleted();
+              }
+            }
+            CHECK(page->owner() == heap_->old_pointer_space());
+            HeapObjectIterator iterator(page, NULL);
+            for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
+                 heap_object = iterator.Next()) {
+              // We iterate over objects that contain new space pointers only.
+              if (!heap_object->MayContainRawValues()) {
+                FindPointersToNewSpaceInRegion(
+                    heap_object->address() + HeapObject::kHeaderSize,
+                    heap_object->address() + heap_object->Size(), slot_callback,
+                    clear_maps);
+              }
+            }
+          }
+        }
+      }
+    }
+    if (callback_ != NULL) {
+      (*callback_)(heap_, NULL, kStoreBufferScanningPageEvent);
+    }
+  }
+}
+
+
+void StoreBuffer::Compact() {
+  Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
+
+  if (top == start_) return;
+
+  // There's no check of the limit in the loop below so we check here for
+  // the worst case (compaction doesn't eliminate any pointers).
+  DCHECK(top <= limit_);
+  heap_->public_set_store_buffer_top(start_);
+  EnsureSpace(top - start_);
+  DCHECK(may_move_store_buffer_entries_);
+  // Goes through the addresses in the store buffer attempting to remove
+  // duplicates.  In the interest of speed this is a lossy operation.  Some
+  // duplicates will remain.  We have two hash sets with different hash
+  // functions to reduce the number of unnecessary clashes.
+  hash_sets_are_empty_ = false;  // Hash sets are in use.
+  for (Address* current = start_; current < top; current++) {
+    DCHECK(!heap_->cell_space()->Contains(*current));
+    DCHECK(!heap_->code_space()->Contains(*current));
+    DCHECK(!heap_->old_data_space()->Contains(*current));
+    uintptr_t int_addr = reinterpret_cast<uintptr_t>(*current);
+    // Shift out the last bits including any tags.
+    int_addr >>= kPointerSizeLog2;
+    // The upper part of an address is basically random because of ASLR and OS
+    // non-determinism, so we use only the bits within a page for hashing to
+    // make v8's behavior (more) deterministic.
+    uintptr_t hash_addr =
+        int_addr & (Page::kPageAlignmentMask >> kPointerSizeLog2);
+    int hash1 = ((hash_addr ^ (hash_addr >> kHashSetLengthLog2)) &
+                 (kHashSetLength - 1));
+    if (hash_set_1_[hash1] == int_addr) continue;
+    uintptr_t hash2 = (hash_addr - (hash_addr >> kHashSetLengthLog2));
+    hash2 ^= hash2 >> (kHashSetLengthLog2 * 2);
+    hash2 &= (kHashSetLength - 1);
+    if (hash_set_2_[hash2] == int_addr) continue;
+    if (hash_set_1_[hash1] == 0) {
+      hash_set_1_[hash1] = int_addr;
+    } else if (hash_set_2_[hash2] == 0) {
+      hash_set_2_[hash2] = int_addr;
+    } else {
+      // Rather than slowing down we just throw away some entries.  This will
+      // cause some duplicates to remain undetected.
+      hash_set_1_[hash1] = int_addr;
+      hash_set_2_[hash2] = 0;
+    }
+    old_buffer_is_sorted_ = false;
+    old_buffer_is_filtered_ = false;
+    *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2);
+    DCHECK(old_top_ <= old_limit_);
+  }
+  heap_->isolate()->counters()->store_buffer_compactions()->Increment();
+}
+}
+}  // namespace v8::internal
diff --git a/src/heap/store-buffer.h b/src/heap/store-buffer.h
new file mode 100644
index 0000000..5efd692
--- /dev/null
+++ b/src/heap/store-buffer.h
@@ -0,0 +1,221 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_STORE_BUFFER_H_
+#define V8_STORE_BUFFER_H_
+
+#include "src/allocation.h"
+#include "src/base/logging.h"
+#include "src/base/platform/platform.h"
+#include "src/globals.h"
+
+namespace v8 {
+namespace internal {
+
+class Page;
+class PagedSpace;
+class StoreBuffer;
+
+typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to);
+
+typedef void (StoreBuffer::*RegionCallback)(Address start, Address end,
+                                            ObjectSlotCallback slot_callback,
+                                            bool clear_maps);
+
+// Used to implement the write barrier by collecting addresses of pointers
+// between spaces.
+class StoreBuffer {
+ public:
+  explicit StoreBuffer(Heap* heap);
+
+  static void StoreBufferOverflow(Isolate* isolate);
+
+  inline Address TopAddress();
+
+  void SetUp();
+  void TearDown();
+
+  // This is used by the mutator to enter addresses into the store buffer.
+  inline void Mark(Address addr);
+
+  // This is used by the heap traversal to enter the addresses into the store
+  // buffer that should still be in the store buffer after GC.  It enters
+  // addresses directly into the old buffer because the GC starts by wiping the
+  // old buffer and thereafter only visits each cell once so there is no need
+  // to attempt to remove any dupes.  During the first part of a GC we
+  // are using the store buffer to access the old spaces and at the same time
+  // we are rebuilding the store buffer using this function.  There is, however
+  // no issue of overwriting the buffer we are iterating over, because this
+  // stage of the scavenge can only reduce the number of addresses in the store
+  // buffer (some objects are promoted so pointers to them do not need to be in
+  // the store buffer).  The later parts of the GC scan the pages that are
+  // exempt from the store buffer and process the promotion queue.  These steps
+  // can overflow this buffer.  We check for this and on overflow we call the
+  // callback set up with the StoreBufferRebuildScope object.
+  inline void EnterDirectlyIntoStoreBuffer(Address addr);
+
+  // Iterates over all pointers that go from old space to new space.  It will
+  // delete the store buffer as it starts so the callback should reenter
+  // surviving old-to-new pointers into the store buffer to rebuild it.
+  void IteratePointersToNewSpace(ObjectSlotCallback callback);
+
+  // Same as IteratePointersToNewSpace but additonally clears maps in objects
+  // referenced from the store buffer that do not contain a forwarding pointer.
+  void IteratePointersToNewSpaceAndClearMaps(ObjectSlotCallback callback);
+
+  static const int kStoreBufferOverflowBit = 1 << (14 + kPointerSizeLog2);
+  static const int kStoreBufferSize = kStoreBufferOverflowBit;
+  static const int kStoreBufferLength = kStoreBufferSize / sizeof(Address);
+  static const int kOldStoreBufferLength = kStoreBufferLength * 16;
+  static const int kHashSetLengthLog2 = 12;
+  static const int kHashSetLength = 1 << kHashSetLengthLog2;
+
+  void Compact();
+
+  void GCPrologue();
+  void GCEpilogue();
+
+  Object*** Limit() { return reinterpret_cast<Object***>(old_limit_); }
+  Object*** Start() { return reinterpret_cast<Object***>(old_start_); }
+  Object*** Top() { return reinterpret_cast<Object***>(old_top_); }
+  void SetTop(Object*** top) {
+    DCHECK(top >= Start());
+    DCHECK(top <= Limit());
+    old_top_ = reinterpret_cast<Address*>(top);
+  }
+
+  bool old_buffer_is_sorted() { return old_buffer_is_sorted_; }
+  bool old_buffer_is_filtered() { return old_buffer_is_filtered_; }
+
+  // Goes through the store buffer removing pointers to things that have
+  // been promoted.  Rebuilds the store buffer completely if it overflowed.
+  void SortUniq();
+
+  void EnsureSpace(intptr_t space_needed);
+  void Verify();
+
+  bool PrepareForIteration();
+
+#ifdef DEBUG
+  void Clean();
+  // Slow, for asserts only.
+  bool CellIsInStoreBuffer(Address cell);
+#endif
+
+  void Filter(int flag);
+
+ private:
+  Heap* heap_;
+
+  // The store buffer is divided up into a new buffer that is constantly being
+  // filled by mutator activity and an old buffer that is filled with the data
+  // from the new buffer after compression.
+  Address* start_;
+  Address* limit_;
+
+  Address* old_start_;
+  Address* old_limit_;
+  Address* old_top_;
+  Address* old_reserved_limit_;
+  base::VirtualMemory* old_virtual_memory_;
+
+  bool old_buffer_is_sorted_;
+  bool old_buffer_is_filtered_;
+  bool during_gc_;
+  // The garbage collector iterates over many pointers to new space that are not
+  // handled by the store buffer.  This flag indicates whether the pointers
+  // found by the callbacks should be added to the store buffer or not.
+  bool store_buffer_rebuilding_enabled_;
+  StoreBufferCallback callback_;
+  bool may_move_store_buffer_entries_;
+
+  base::VirtualMemory* virtual_memory_;
+
+  // Two hash sets used for filtering.
+  // If address is in the hash set then it is guaranteed to be in the
+  // old part of the store buffer.
+  uintptr_t* hash_set_1_;
+  uintptr_t* hash_set_2_;
+  bool hash_sets_are_empty_;
+
+  void ClearFilteringHashSets();
+
+  bool SpaceAvailable(intptr_t space_needed);
+  void Uniq();
+  void ExemptPopularPages(int prime_sample_step, int threshold);
+
+  // Set the map field of the object to NULL if contains a map.
+  inline void ClearDeadObject(HeapObject* object);
+
+  void IteratePointersToNewSpace(ObjectSlotCallback callback, bool clear_maps);
+
+  void FindPointersToNewSpaceInRegion(Address start, Address end,
+                                      ObjectSlotCallback slot_callback,
+                                      bool clear_maps);
+
+  // For each region of pointers on a page in use from an old space call
+  // visit_pointer_region callback.
+  // If either visit_pointer_region or callback can cause an allocation
+  // in old space and changes in allocation watermark then
+  // can_preallocate_during_iteration should be set to true.
+  void IteratePointersOnPage(PagedSpace* space, Page* page,
+                             RegionCallback region_callback,
+                             ObjectSlotCallback slot_callback);
+
+  void IteratePointersInStoreBuffer(ObjectSlotCallback slot_callback,
+                                    bool clear_maps);
+
+#ifdef VERIFY_HEAP
+  void VerifyPointers(LargeObjectSpace* space);
+#endif
+
+  friend class StoreBufferRebuildScope;
+  friend class DontMoveStoreBufferEntriesScope;
+};
+
+
+class StoreBufferRebuildScope {
+ public:
+  explicit StoreBufferRebuildScope(Heap* heap, StoreBuffer* store_buffer,
+                                   StoreBufferCallback callback)
+      : store_buffer_(store_buffer),
+        stored_state_(store_buffer->store_buffer_rebuilding_enabled_),
+        stored_callback_(store_buffer->callback_) {
+    store_buffer_->store_buffer_rebuilding_enabled_ = true;
+    store_buffer_->callback_ = callback;
+    (*callback)(heap, NULL, kStoreBufferStartScanningPagesEvent);
+  }
+
+  ~StoreBufferRebuildScope() {
+    store_buffer_->callback_ = stored_callback_;
+    store_buffer_->store_buffer_rebuilding_enabled_ = stored_state_;
+  }
+
+ private:
+  StoreBuffer* store_buffer_;
+  bool stored_state_;
+  StoreBufferCallback stored_callback_;
+};
+
+
+class DontMoveStoreBufferEntriesScope {
+ public:
+  explicit DontMoveStoreBufferEntriesScope(StoreBuffer* store_buffer)
+      : store_buffer_(store_buffer),
+        stored_state_(store_buffer->may_move_store_buffer_entries_) {
+    store_buffer_->may_move_store_buffer_entries_ = false;
+  }
+
+  ~DontMoveStoreBufferEntriesScope() {
+    store_buffer_->may_move_store_buffer_entries_ = stored_state_;
+  }
+
+ private:
+  StoreBuffer* store_buffer_;
+  bool stored_state_;
+};
+}
+}  // namespace v8::internal
+
+#endif  // V8_STORE_BUFFER_H_
diff --git a/src/heap/sweeper-thread.cc b/src/heap/sweeper-thread.cc
new file mode 100644
index 0000000..b0e8cea
--- /dev/null
+++ b/src/heap/sweeper-thread.cc
@@ -0,0 +1,82 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/heap/sweeper-thread.h"
+
+#include "src/v8.h"
+
+#include "src/isolate.h"
+#include "src/v8threads.h"
+
+namespace v8 {
+namespace internal {
+
+static const int kSweeperThreadStackSize = 64 * KB;
+
+SweeperThread::SweeperThread(Isolate* isolate)
+    : Thread(Thread::Options("v8:SweeperThread", kSweeperThreadStackSize)),
+      isolate_(isolate),
+      heap_(isolate->heap()),
+      collector_(heap_->mark_compact_collector()),
+      start_sweeping_semaphore_(0),
+      end_sweeping_semaphore_(0),
+      stop_semaphore_(0) {
+  DCHECK(!FLAG_job_based_sweeping);
+  base::NoBarrier_Store(&stop_thread_, static_cast<base::AtomicWord>(false));
+}
+
+
+void SweeperThread::Run() {
+  Isolate::SetIsolateThreadLocals(isolate_, NULL);
+  DisallowHeapAllocation no_allocation;
+  DisallowHandleAllocation no_handles;
+  DisallowHandleDereference no_deref;
+
+  while (true) {
+    start_sweeping_semaphore_.Wait();
+
+    if (base::Acquire_Load(&stop_thread_)) {
+      stop_semaphore_.Signal();
+      return;
+    }
+
+    collector_->SweepInParallel(heap_->old_data_space(), 0);
+    collector_->SweepInParallel(heap_->old_pointer_space(), 0);
+    end_sweeping_semaphore_.Signal();
+  }
+}
+
+
+void SweeperThread::Stop() {
+  base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(true));
+  start_sweeping_semaphore_.Signal();
+  stop_semaphore_.Wait();
+  Join();
+}
+
+
+void SweeperThread::StartSweeping() { start_sweeping_semaphore_.Signal(); }
+
+
+void SweeperThread::WaitForSweeperThread() { end_sweeping_semaphore_.Wait(); }
+
+
+bool SweeperThread::SweepingCompleted() {
+  bool value = end_sweeping_semaphore_.WaitFor(base::TimeDelta::FromSeconds(0));
+  if (value) {
+    end_sweeping_semaphore_.Signal();
+  }
+  return value;
+}
+
+
+int SweeperThread::NumberOfThreads(int max_available) {
+  if (!FLAG_concurrent_sweeping && !FLAG_parallel_sweeping) return 0;
+  if (FLAG_sweeper_threads > 0) return FLAG_sweeper_threads;
+  if (FLAG_concurrent_sweeping) return max_available - 1;
+  DCHECK(FLAG_parallel_sweeping);
+  return max_available;
+}
+}
+}  // namespace v8::internal
diff --git a/src/heap/sweeper-thread.h b/src/heap/sweeper-thread.h
new file mode 100644
index 0000000..fc6bdda
--- /dev/null
+++ b/src/heap/sweeper-thread.h
@@ -0,0 +1,45 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_HEAP_SWEEPER_THREAD_H_
+#define V8_HEAP_SWEEPER_THREAD_H_
+
+#include "src/base/atomicops.h"
+#include "src/base/platform/platform.h"
+#include "src/flags.h"
+#include "src/utils.h"
+
+#include "src/heap/spaces.h"
+
+#include "src/heap/heap.h"
+
+namespace v8 {
+namespace internal {
+
+class SweeperThread : public base::Thread {
+ public:
+  explicit SweeperThread(Isolate* isolate);
+  ~SweeperThread() {}
+
+  void Run();
+  void Stop();
+  void StartSweeping();
+  void WaitForSweeperThread();
+  bool SweepingCompleted();
+
+  static int NumberOfThreads(int max_available);
+
+ private:
+  Isolate* isolate_;
+  Heap* heap_;
+  MarkCompactCollector* collector_;
+  base::Semaphore start_sweeping_semaphore_;
+  base::Semaphore end_sweeping_semaphore_;
+  base::Semaphore stop_semaphore_;
+  volatile base::AtomicWord stop_thread_;
+};
+}
+}  // namespace v8::internal
+
+#endif  // V8_HEAP_SWEEPER_THREAD_H_
diff --git a/src/hydrogen-bce.cc b/src/hydrogen-bce.cc
index 5b13429..18bd0af 100644
--- a/src/hydrogen-bce.cc
+++ b/src/hydrogen-bce.cc
@@ -113,7 +113,7 @@
   void UpdateUpperOffsets(HBoundsCheck* check, int32_t offset) {
     BoundsCheckBbData* data = FatherInDominatorTree();
     while (data != NULL && data->UpperCheck() == check) {
-      ASSERT(data->upper_offset_ < offset);
+      DCHECK(data->upper_offset_ < offset);
       data->upper_offset_ = offset;
       data = data->FatherInDominatorTree();
     }
@@ -122,7 +122,7 @@
   void UpdateLowerOffsets(HBoundsCheck* check, int32_t offset) {
     BoundsCheckBbData* data = FatherInDominatorTree();
     while (data != NULL && data->LowerCheck() == check) {
-      ASSERT(data->lower_offset_ > offset);
+      DCHECK(data->lower_offset_ > offset);
       data->lower_offset_ = offset;
       data = data->FatherInDominatorTree();
     }
@@ -142,7 +142,7 @@
   // new_offset, and new_check is removed.
   void CoverCheck(HBoundsCheck* new_check,
                   int32_t new_offset) {
-    ASSERT(new_check->index()->representation().IsSmiOrInteger32());
+    DCHECK(new_check->index()->representation().IsSmiOrInteger32());
     bool keep_new_check = false;
 
     if (new_offset > upper_offset_) {
@@ -170,8 +170,8 @@
 
     if (!keep_new_check) {
       if (FLAG_trace_bce) {
-        OS::Print("Eliminating check #%d after tightening\n",
-                  new_check->id());
+        base::OS::Print("Eliminating check #%d after tightening\n",
+                        new_check->id());
       }
       new_check->block()->graph()->isolate()->counters()->
           bounds_checks_eliminated()->Increment();
@@ -180,11 +180,11 @@
       HBoundsCheck* first_check = new_check == lower_check_ ? upper_check_
                                                             : lower_check_;
       if (FLAG_trace_bce) {
-        OS::Print("Moving second check #%d after first check #%d\n",
-                  new_check->id(), first_check->id());
+        base::OS::Print("Moving second check #%d after first check #%d\n",
+                        new_check->id(), first_check->id());
       }
       // The length is guaranteed to be live at first_check.
-      ASSERT(new_check->length() == first_check->length());
+      DCHECK(new_check->length() == first_check->length());
       HInstruction* old_position = new_check->next();
       new_check->Unlink();
       new_check->InsertAfter(first_check);
@@ -278,13 +278,13 @@
   void TightenCheck(HBoundsCheck* original_check,
                     HBoundsCheck* tighter_check,
                     int32_t new_offset) {
-    ASSERT(original_check->length() == tighter_check->length());
+    DCHECK(original_check->length() == tighter_check->length());
     MoveIndexIfNecessary(tighter_check->index(), original_check, tighter_check);
     original_check->ReplaceAllUsesWith(original_check->index());
     original_check->SetOperandAt(0, tighter_check->index());
     if (FLAG_trace_bce) {
-      OS::Print("Tightened check #%d with offset %d from #%d\n",
-                original_check->id(), new_offset, tighter_check->id());
+      base::OS::Print("Tightened check #%d with offset %d from #%d\n",
+                      original_check->id(), new_offset, tighter_check->id());
     }
   }
 
@@ -396,15 +396,15 @@
                                                    NULL);
       *data_p = bb_data_list;
       if (FLAG_trace_bce) {
-        OS::Print("Fresh bounds check data for block #%d: [%d]\n",
-                  bb->block_id(), offset);
+        base::OS::Print("Fresh bounds check data for block #%d: [%d]\n",
+                        bb->block_id(), offset);
       }
     } else if (data->OffsetIsCovered(offset)) {
       bb->graph()->isolate()->counters()->
           bounds_checks_eliminated()->Increment();
       if (FLAG_trace_bce) {
-        OS::Print("Eliminating bounds check #%d, offset %d is covered\n",
-                  check->id(), offset);
+        base::OS::Print("Eliminating bounds check #%d, offset %d is covered\n",
+                        check->id(), offset);
       }
       check->DeleteAndReplaceWith(check->ActualValue());
     } else if (data->BasicBlock() == bb) {
@@ -439,8 +439,8 @@
                                                    bb_data_list,
                                                    data);
       if (FLAG_trace_bce) {
-        OS::Print("Updated bounds check data for block #%d: [%d - %d]\n",
-                  bb->block_id(), new_lower_offset, new_upper_offset);
+        base::OS::Print("Updated bounds check data for block #%d: [%d - %d]\n",
+                        bb->block_id(), new_lower_offset, new_upper_offset);
       }
       table_.Insert(key, bb_data_list, zone());
     }
diff --git a/src/hydrogen-bch.cc b/src/hydrogen-bch.cc
index 34216c6..5af6030 100644
--- a/src/hydrogen-bch.cc
+++ b/src/hydrogen-bch.cc
@@ -44,7 +44,7 @@
      * induction variable).
      */
     void InitializeLoop(InductionVariableData* data) {
-      ASSERT(data->limit() != NULL);
+      DCHECK(data->limit() != NULL);
       HLoopInformation* loop = data->phi()->block()->current_loop();
       is_start_ = (block() == loop->loop_header());
       is_proper_exit_ = (block() == data->induction_exit_target());
@@ -55,7 +55,7 @@
     // Utility methods to iterate over dominated blocks.
     void ResetCurrentDominatedBlock() { current_dominated_block_ = kNoBlock; }
     HBasicBlock* CurrentDominatedBlock() {
-      ASSERT(current_dominated_block_ != kNoBlock);
+      DCHECK(current_dominated_block_ != kNoBlock);
       return current_dominated_block_ < block()->dominated_blocks()->length() ?
           block()->dominated_blocks()->at(current_dominated_block_) : NULL;
     }
@@ -181,7 +181,7 @@
       Element element;
       element.set_block(graph->blocks()->at(i));
       elements_.Add(element, graph->zone());
-      ASSERT(at(i)->block()->block_id() == i);
+      DCHECK(at(i)->block()->block_id() == i);
     }
   }
 
diff --git a/src/hydrogen-check-elimination.cc b/src/hydrogen-check-elimination.cc
index 98e3d3d..1530fe1 100644
--- a/src/hydrogen-check-elimination.cc
+++ b/src/hydrogen-check-elimination.cc
@@ -57,7 +57,7 @@
         (state2 == CHECKED && state1 == CHECKED_STABLE)) {
       return CHECKED;
     }
-    ASSERT((state1 == CHECKED_STABLE && state2 == UNCHECKED_STABLE) ||
+    DCHECK((state1 == CHECKED_STABLE && state2 == UNCHECKED_STABLE) ||
            (state2 == CHECKED_STABLE && state1 == UNCHECKED_STABLE));
     return UNCHECKED_STABLE;
   }
@@ -177,7 +177,7 @@
     HCheckTable* copy = new(zone) HCheckTable(phase_);
     for (int i = 0; i < size_; i++) {
       HCheckTableEntry* old_entry = &entries_[i];
-      ASSERT(old_entry->maps_->size() > 0);
+      DCHECK(old_entry->maps_->size() > 0);
       HCheckTableEntry* new_entry = &copy->entries_[i];
       new_entry->object_ = old_entry->object_;
       new_entry->maps_ = old_entry->maps_;
@@ -241,7 +241,7 @@
             UniqueSet<Map>* maps = entry->maps_->Copy(zone);
             maps->Remove(cmp->map());
             entry->maps_ = maps;
-            ASSERT_NE(HCheckTableEntry::UNCHECKED_STABLE, entry->state_);
+            DCHECK_NE(HCheckTableEntry::UNCHECKED_STABLE, entry->state_);
           }
         }
         learned = true;
@@ -265,8 +265,8 @@
           le->maps_ = re->maps_ = le->maps_->Intersect(re->maps_, zone);
           le->state_ = re->state_ = HCheckTableEntry::StateMerge(
               le->state_, re->state_);
-          ASSERT_NE(HCheckTableEntry::UNCHECKED_STABLE, le->state_);
-          ASSERT_NE(HCheckTableEntry::UNCHECKED_STABLE, re->state_);
+          DCHECK_NE(HCheckTableEntry::UNCHECKED_STABLE, le->state_);
+          DCHECK_NE(HCheckTableEntry::UNCHECKED_STABLE, re->state_);
         }
         learned = true;
       } else if (end->IsIsStringAndBranch()) {
@@ -281,14 +281,14 @@
           } else {
             EnsureChecked(entry, object, cmp);
             entry->maps_ = entry->maps_->Intersect(string_maps(), zone);
-            ASSERT_NE(HCheckTableEntry::UNCHECKED_STABLE, entry->state_);
+            DCHECK_NE(HCheckTableEntry::UNCHECKED_STABLE, entry->state_);
           }
         } else {
           // Learn on the false branch of if(IsString(x)).
           if (entry != NULL) {
             EnsureChecked(entry, object, cmp);
             entry->maps_ = entry->maps_->Subtract(string_maps(), zone);
-            ASSERT_NE(HCheckTableEntry::UNCHECKED_STABLE, entry->state_);
+            DCHECK_NE(HCheckTableEntry::UNCHECKED_STABLE, entry->state_);
           }
         }
       }
@@ -344,7 +344,7 @@
           if (this_entry->check_ != that_entry->check_) {
             this_entry->check_ = NULL;
           }
-          ASSERT(this_entry->maps_->size() > 0);
+          DCHECK(this_entry->maps_->size() > 0);
         }
       }
       if (compact) Compact();
@@ -367,13 +367,13 @@
       if (entry->maps_->IsSubset(instr->maps())) {
         // The first check is more strict; the second is redundant.
         if (entry->check_ != NULL) {
-          ASSERT_NE(HCheckTableEntry::UNCHECKED_STABLE, entry->state_);
+          DCHECK_NE(HCheckTableEntry::UNCHECKED_STABLE, entry->state_);
           TRACE(("Replacing redundant CheckMaps #%d at B%d with #%d\n",
               instr->id(), instr->block()->block_id(), entry->check_->id()));
           instr->DeleteAndReplaceWith(entry->check_);
           INC_STAT(redundant_);
         } else if (entry->state_ == HCheckTableEntry::UNCHECKED_STABLE) {
-          ASSERT_EQ(NULL, entry->check_);
+          DCHECK_EQ(NULL, entry->check_);
           TRACE(("Marking redundant CheckMaps #%d at B%d as stability check\n",
                  instr->id(), instr->block()->block_id()));
           instr->set_maps(entry->maps_->Copy(graph->zone()));
@@ -413,7 +413,7 @@
             // There is a check in the same block so replace it with a more
             // strict check and eliminate the second check entirely.
             HCheckMaps* check = HCheckMaps::cast(entry->check_);
-            ASSERT(!check->IsStabilityCheck());
+            DCHECK(!check->IsStabilityCheck());
             TRACE(("CheckMaps #%d at B%d narrowed\n", check->id(),
                 check->block()->block_id()));
             // Update map set and ensure that the check is alive.
@@ -495,7 +495,7 @@
       // Check if we introduce field maps here.
       MapSet maps = instr->maps();
       if (maps != NULL) {
-        ASSERT_NE(0, maps->size());
+        DCHECK_NE(0, maps->size());
         Insert(instr, NULL, maps, HCheckTableEntry::UNCHECKED_STABLE);
       }
       return;
@@ -666,7 +666,7 @@
     bool compact = false;
     for (int i = 0; i < size_; ++i) {
       HCheckTableEntry* entry = &entries_[i];
-      ASSERT_NOT_NULL(entry->object_);
+      DCHECK_NOT_NULL(entry->object_);
       if (entry->state_ == HCheckTableEntry::CHECKED) {
         entry->object_ = NULL;
         compact = true;
@@ -684,14 +684,14 @@
     bool compact = false;
     for (int i = 0; i < size_; i++) {
       HCheckTableEntry* entry = &entries_[i];
-      ASSERT(entry->object_ != NULL);
+      DCHECK(entry->object_ != NULL);
       if (phase_->aliasing_->MayAlias(entry->object_, object)) {
         entry->object_ = NULL;
         compact = true;
       }
     }
     if (compact) Compact();
-    ASSERT(Find(object) == NULL);
+    DCHECK(Find(object) == NULL);
   }
 
   void Compact() {
@@ -706,8 +706,8 @@
         size_--;
       }
     }
-    ASSERT(size_ == dest);
-    ASSERT(cursor_ <= size_);
+    DCHECK(size_ == dest);
+    DCHECK(cursor_ <= size_);
 
     // Preserve the age of the entries by moving the older entries to the end.
     if (cursor_ == size_) return;  // Cursor already points at end.
@@ -734,7 +734,7 @@
 
     for (int i = 0; i < table->size_; i++) {
       HCheckTableEntry* entry = &table->entries_[i];
-      ASSERT(entry->object_ != NULL);
+      DCHECK(entry->object_ != NULL);
       PrintF("  checkmaps-table @%d: %s #%d ", i,
              entry->object_->IsPhi() ? "phi" : "object", entry->object_->id());
       if (entry->check_ != NULL) {
@@ -755,7 +755,7 @@
     for (int i = size_ - 1; i >= 0; i--) {
       // Search from most-recently-inserted to least-recently-inserted.
       HCheckTableEntry* entry = &entries_[i];
-      ASSERT(entry->object_ != NULL);
+      DCHECK(entry->object_ != NULL);
       if (phase_->aliasing_->MustAlias(entry->object_, object)) return entry;
     }
     return NULL;
@@ -772,7 +772,7 @@
               HInstruction* check,
               MapSet maps,
               HCheckTableEntry::State state) {
-    ASSERT(state != HCheckTableEntry::UNCHECKED_STABLE || check == NULL);
+    DCHECK(state != HCheckTableEntry::UNCHECKED_STABLE || check == NULL);
     HCheckTableEntry* entry = &entries_[cursor_++];
     entry->object_ = object;
     entry->check_ = check;
diff --git a/src/hydrogen-check-elimination.h b/src/hydrogen-check-elimination.h
index 16f758b..7102a43 100644
--- a/src/hydrogen-check-elimination.h
+++ b/src/hydrogen-check-elimination.h
@@ -24,7 +24,7 @@
               graph->isolate()->factory()->name##_map()), zone());
     STRING_TYPE_LIST(ADD_STRING_MAP)
     #undef ADD_STRING_MAP
-    ASSERT_EQ(kStringMapsSize, string_maps_.size());
+    DCHECK_EQ(kStringMapsSize, string_maps_.size());
 #ifdef DEBUG
     redundant_ = 0;
     removed_ = 0;
diff --git a/src/hydrogen-dce.cc b/src/hydrogen-dce.cc
index 96f088d..c55426d 100644
--- a/src/hydrogen-dce.cc
+++ b/src/hydrogen-dce.cc
@@ -32,16 +32,14 @@
 
 
 void HDeadCodeEliminationPhase::PrintLive(HValue* ref, HValue* instr) {
-  HeapStringAllocator allocator;
-  StringStream stream(&allocator);
+  OFStream os(stdout);
+  os << "[MarkLive ";
   if (ref != NULL) {
-    ref->PrintTo(&stream);
+    os << *ref;
   } else {
-    stream.Add("root ");
+    os << "root ";
   }
-  stream.Add(" -> ");
-  instr->PrintTo(&stream);
-  PrintF("[MarkLive %s]\n", stream.ToCString().get());
+  os << " -> " << *instr << "]" << endl;
 }
 
 
@@ -61,7 +59,7 @@
     }
   }
 
-  ASSERT(worklist.is_empty());  // Should have processed everything.
+  DCHECK(worklist.is_empty());  // Should have processed everything.
 }
 
 
diff --git a/src/hydrogen-dehoist.cc b/src/hydrogen-dehoist.cc
index fe0ae76..0c7a9b9 100644
--- a/src/hydrogen-dehoist.cc
+++ b/src/hydrogen-dehoist.cc
@@ -3,6 +3,7 @@
 // found in the LICENSE file.
 
 #include "src/hydrogen-dehoist.h"
+#include "src/base/safe_math.h"
 
 namespace v8 {
 namespace internal {
@@ -28,15 +29,25 @@
   if (!constant->HasInteger32Value()) return;
   int32_t sign = binary_operation->IsSub() ? -1 : 1;
   int32_t value = constant->Integer32Value() * sign;
-  // We limit offset values to 30 bits because we want to avoid the risk of
-  // overflows when the offset is added to the object header size.
-  if (value >= 1 << array_operation->MaxBaseOffsetBits() || value < 0) return;
+  if (value < 0) return;
+
+  // Multiply value by elements size, bailing out on overflow.
+  int32_t elements_kind_size =
+      1 << ElementsKindToShiftSize(array_operation->elements_kind());
+  v8::base::internal::CheckedNumeric<int32_t> multiply_result = value;
+  multiply_result = multiply_result * elements_kind_size;
+  if (!multiply_result.IsValid()) return;
+  value = multiply_result.ValueOrDie();
+
+  // Ensure that the array operation can add value to existing base offset
+  // without overflowing.
+  if (!array_operation->TryIncreaseBaseOffset(value)) return;
+
   array_operation->SetKey(subexpression);
   if (binary_operation->HasNoUses()) {
     binary_operation->DeleteAndReplaceWith(NULL);
   }
-  value <<= ElementsKindToShiftSize(array_operation->elements_kind());
-  array_operation->IncreaseBaseOffset(static_cast<uint32_t>(value));
+
   array_operation->SetDehoisted(true);
 }
 
diff --git a/src/hydrogen-environment-liveness.cc b/src/hydrogen-environment-liveness.cc
index a72dfda..8e9018f 100644
--- a/src/hydrogen-environment-liveness.cc
+++ b/src/hydrogen-environment-liveness.cc
@@ -22,7 +22,7 @@
       collect_markers_(true),
       last_simulate_(NULL),
       went_live_since_last_simulate_(maximum_environment_size_, zone()) {
-  ASSERT(maximum_environment_size_ > 0);
+  DCHECK(maximum_environment_size_ > 0);
   for (int i = 0; i < block_count_; ++i) {
     live_at_block_start_.Add(
         new(zone()) BitVector(maximum_environment_size_, zone()), zone());
@@ -61,7 +61,7 @@
       }
       HSimulate* simulate = first_simulate_.at(successor_id);
       if (simulate == NULL) continue;
-      ASSERT(VerifyClosures(simulate->closure(),
+      DCHECK(VerifyClosures(simulate->closure(),
           block->last_environment()->closure()));
       ZapEnvironmentSlot(i, simulate);
     }
@@ -74,7 +74,7 @@
   if (!marker->CheckFlag(HValue::kEndsLiveRange)) return;
   HSimulate* simulate = marker->next_simulate();
   if (simulate != NULL) {
-    ASSERT(VerifyClosures(simulate->closure(), marker->closure()));
+    DCHECK(VerifyClosures(simulate->closure(), marker->closure()));
     ZapEnvironmentSlot(marker->index(), simulate);
   }
 }
@@ -109,7 +109,7 @@
       if (marker->kind() == HEnvironmentMarker::LOOKUP) {
         live->Add(index);
       } else {
-        ASSERT(marker->kind() == HEnvironmentMarker::BIND);
+        DCHECK(marker->kind() == HEnvironmentMarker::BIND);
         live->Remove(index);
         went_live_since_last_simulate_.Add(index);
       }
@@ -124,10 +124,10 @@
       live->Clear();
       last_simulate_ = NULL;
 
-      // The following ASSERTs guard the assumption used in case
+      // The following DCHECKs guard the assumption used in case
       // kEnterInlined below:
-      ASSERT(instr->next()->IsSimulate());
-      ASSERT(instr->next()->next()->IsGoto());
+      DCHECK(instr->next()->IsSimulate());
+      DCHECK(instr->next()->next()->IsGoto());
 
       break;
     case HValue::kEnterInlined: {
@@ -135,7 +135,7 @@
       // target block. Here we make use of the fact that the end of an
       // inline sequence always looks like this: HLeaveInlined, HSimulate,
       // HGoto (to return_target block), with no environment lookups in
-      // between (see ASSERTs above).
+      // between (see DCHECKs above).
       HEnterInlined* enter = HEnterInlined::cast(instr);
       live->Clear();
       for (int i = 0; i < enter->return_targets()->length(); ++i) {
@@ -156,7 +156,7 @@
 
 
 void HEnvironmentLivenessAnalysisPhase::Run() {
-  ASSERT(maximum_environment_size_ > 0);
+  DCHECK(maximum_environment_size_ > 0);
 
   // Main iteration. Compute liveness of environment slots, and store it
   // for each block until it doesn't change any more. For efficiency, visit
diff --git a/src/hydrogen-escape-analysis.cc b/src/hydrogen-escape-analysis.cc
index 23ca468..3b0f158 100644
--- a/src/hydrogen-escape-analysis.cc
+++ b/src/hydrogen-escape-analysis.cc
@@ -189,7 +189,7 @@
           HLoadNamedField* load = HLoadNamedField::cast(instr);
           int index = load->access().offset() / kPointerSize;
           if (load->object() != allocate) continue;
-          ASSERT(load->access().IsInobject());
+          DCHECK(load->access().IsInobject());
           HValue* replacement =
             NewLoadReplacement(load, state->OperandAt(index));
           load->DeleteAndReplaceWith(replacement);
@@ -203,7 +203,7 @@
           HStoreNamedField* store = HStoreNamedField::cast(instr);
           int index = store->access().offset() / kPointerSize;
           if (store->object() != allocate) continue;
-          ASSERT(store->access().IsInobject());
+          DCHECK(store->access().IsInobject());
           state = NewStateCopy(store->previous(), state);
           state->SetOperandAt(index, store->value());
           if (store->has_transition()) {
@@ -286,7 +286,7 @@
   }
 
   // All uses have been handled.
-  ASSERT(allocate->HasNoUses());
+  DCHECK(allocate->HasNoUses());
   allocate->DeleteAndReplaceWith(NULL);
 }
 
@@ -305,8 +305,8 @@
     AnalyzeDataFlow(allocate);
 
     cumulative_values_ += number_of_values_;
-    ASSERT(allocate->HasNoUses());
-    ASSERT(!allocate->IsLinked());
+    DCHECK(allocate->HasNoUses());
+    DCHECK(!allocate->IsLinked());
   }
 }
 
diff --git a/src/hydrogen-flow-engine.h b/src/hydrogen-flow-engine.h
index 5ce320a..257ab46 100644
--- a/src/hydrogen-flow-engine.h
+++ b/src/hydrogen-flow-engine.h
@@ -102,7 +102,7 @@
       State* state = State::Finish(StateAt(block), block, zone_);
 
       if (block->IsReachable()) {
-        ASSERT(state != NULL);
+        DCHECK(state != NULL);
         if (block->IsLoopHeader()) {
           // Apply loop effects before analyzing loop body.
           ComputeLoopEffects(block)->Apply(state);
@@ -139,7 +139,7 @@
   // Computes and caches the loop effects for the loop which has the given
   // block as its loop header.
   Effects* ComputeLoopEffects(HBasicBlock* block) {
-    ASSERT(block->IsLoopHeader());
+    DCHECK(block->IsLoopHeader());
     Effects* effects = loop_effects_[block->block_id()];
     if (effects != NULL) return effects;  // Already analyzed this loop.
 
@@ -154,7 +154,7 @@
       HBasicBlock* member = graph_->blocks()->at(i);
       if (i != block->block_id() && member->IsLoopHeader()) {
         // Recursively compute and cache the effects of the nested loop.
-        ASSERT(member->loop_information()->parent_loop() == loop);
+        DCHECK(member->loop_information()->parent_loop() == loop);
         Effects* nested = ComputeLoopEffects(member);
         effects->Union(nested, zone_);
         // Skip the nested loop's blocks.
@@ -162,7 +162,7 @@
       } else {
         // Process all the effects of the block.
         if (member->IsUnreachable()) continue;
-        ASSERT(member->current_loop() == loop);
+        DCHECK(member->current_loop() == loop);
         for (HInstructionIterator it(member); !it.Done(); it.Advance()) {
           effects->Process(it.Current(), zone_);
         }
@@ -195,7 +195,7 @@
   }
 
   inline void CheckPredecessorCount(HBasicBlock* block) {
-    ASSERT(block->predecessors()->length() == pred_counts_[block->block_id()]);
+    DCHECK(block->predecessors()->length() == pred_counts_[block->block_id()]);
   }
 
   inline void IncrementPredecessorCount(HBasicBlock* block) {
diff --git a/src/hydrogen-gvn.cc b/src/hydrogen-gvn.cc
index e6f1ae9..be1e17b 100644
--- a/src/hydrogen-gvn.cc
+++ b/src/hydrogen-gvn.cc
@@ -9,7 +9,7 @@
 namespace v8 {
 namespace internal {
 
-class HInstructionMap V8_FINAL : public ZoneObject {
+class HInstructionMap FINAL : public ZoneObject {
  public:
   HInstructionMap(Zone* zone, SideEffectsTracker* side_effects_tracker)
       : array_size_(0),
@@ -70,7 +70,7 @@
 };
 
 
-class HSideEffectMap V8_FINAL BASE_EMBEDDED {
+class HSideEffectMap FINAL BASE_EMBEDDED {
  public:
   HSideEffectMap();
   explicit HSideEffectMap(HSideEffectMap* other);
@@ -83,8 +83,8 @@
   bool IsEmpty() const { return count_ == 0; }
 
   inline HInstruction* operator[](int i) const {
-    ASSERT(0 <= i);
-    ASSERT(i < kNumberOfTrackedSideEffects);
+    DCHECK(0 <= i);
+    DCHECK(i < kNumberOfTrackedSideEffects);
     return data_[i];
   }
   inline HInstruction* at(int i) const { return operator[](i); }
@@ -98,7 +98,7 @@
 void TraceGVN(const char* msg, ...) {
   va_list arguments;
   va_start(arguments, msg);
-  OS::VPrint(msg, arguments);
+  base::OS::VPrint(msg, arguments);
   va_end(arguments);
 }
 
@@ -212,7 +212,7 @@
 
 
 void HInstructionMap::Resize(int new_size, Zone* zone) {
-  ASSERT(new_size > count_);
+  DCHECK(new_size > count_);
   // Hashing the values into the new array has no more collisions than in the
   // old hash map, so we can use the existing lists_ array, if we are careful.
 
@@ -252,12 +252,12 @@
     }
   }
   USE(old_count);
-  ASSERT(count_ == old_count);
+  DCHECK(count_ == old_count);
 }
 
 
 void HInstructionMap::ResizeLists(int new_size, Zone* zone) {
-  ASSERT(new_size > lists_size_);
+  DCHECK(new_size > lists_size_);
 
   HInstructionMapListElement* new_lists =
       zone->NewArray<HInstructionMapListElement>(new_size);
@@ -280,10 +280,10 @@
 
 
 void HInstructionMap::Insert(HInstruction* instr, Zone* zone) {
-  ASSERT(instr != NULL);
+  DCHECK(instr != NULL);
   // Resizing when half of the hashtable is filled up.
   if (count_ >= array_size_ >> 1) Resize(array_size_ << 1, zone);
-  ASSERT(count_ < array_size_);
+  DCHECK(count_ < array_size_);
   count_++;
   uint32_t pos = Bound(static_cast<uint32_t>(instr->Hashcode()));
   if (array_[pos].instr == NULL) {
@@ -294,11 +294,11 @@
       ResizeLists(lists_size_ << 1, zone);
     }
     int new_element_pos = free_list_head_;
-    ASSERT(new_element_pos != kNil);
+    DCHECK(new_element_pos != kNil);
     free_list_head_ = lists_[free_list_head_].next;
     lists_[new_element_pos].instr = instr;
     lists_[new_element_pos].next = array_[pos].next;
-    ASSERT(array_[pos].next == kNil || lists_[array_[pos].next].instr != NULL);
+    DCHECK(array_[pos].next == kNil || lists_[array_[pos].next].instr != NULL);
     array_[pos].next = new_element_pos;
   }
 }
@@ -400,20 +400,20 @@
 }
 
 
-void SideEffectsTracker::PrintSideEffectsTo(StringStream* stream,
-                                          SideEffects side_effects) const {
+OStream& operator<<(OStream& os, const TrackedEffects& te) {
+  SideEffectsTracker* t = te.tracker;
   const char* separator = "";
-  stream->Add("[");
+  os << "[";
   for (int bit = 0; bit < kNumberOfFlags; ++bit) {
     GVNFlag flag = GVNFlagFromInt(bit);
-    if (side_effects.ContainsFlag(flag)) {
-      stream->Add(separator);
+    if (te.effects.ContainsFlag(flag)) {
+      os << separator;
       separator = ", ";
       switch (flag) {
-#define DECLARE_FLAG(Type)      \
-        case k##Type:           \
-          stream->Add(#Type);   \
-          break;
+#define DECLARE_FLAG(Type) \
+  case k##Type:            \
+    os << #Type;           \
+    break;
 GVN_TRACKED_FLAG_LIST(DECLARE_FLAG)
 GVN_UNTRACKED_FLAG_LIST(DECLARE_FLAG)
 #undef DECLARE_FLAG
@@ -422,21 +422,20 @@
       }
     }
   }
-  for (int index = 0; index < num_global_vars_; ++index) {
-    if (side_effects.ContainsSpecial(GlobalVar(index))) {
-      stream->Add(separator);
+  for (int index = 0; index < t->num_global_vars_; ++index) {
+    if (te.effects.ContainsSpecial(t->GlobalVar(index))) {
+      os << separator << "[" << *t->global_vars_[index].handle() << "]";
       separator = ", ";
-      stream->Add("[%p]", *global_vars_[index].handle());
     }
   }
-  for (int index = 0; index < num_inobject_fields_; ++index) {
-    if (side_effects.ContainsSpecial(InobjectField(index))) {
-      stream->Add(separator);
+  for (int index = 0; index < t->num_inobject_fields_; ++index) {
+    if (te.effects.ContainsSpecial(t->InobjectField(index))) {
+      os << separator << t->inobject_fields_[index];
       separator = ", ";
-      inobject_fields_[index].PrintTo(stream);
     }
   }
-  stream->Add("]");
+  os << "]";
+  return os;
 }
 
 
@@ -449,11 +448,9 @@
   }
   if (num_global_vars_ < kNumberOfGlobalVars) {
     if (FLAG_trace_gvn) {
-      HeapStringAllocator allocator;
-      StringStream stream(&allocator);
-      stream.Add("Tracking global var [%p] (mapped to index %d)\n",
-                 *cell.handle(), num_global_vars_);
-      stream.OutputToStdOut();
+      OFStream os(stdout);
+      os << "Tracking global var [" << *cell.handle() << "] "
+         << "(mapped to index " << num_global_vars_ << ")" << endl;
     }
     *index = num_global_vars_;
     global_vars_[num_global_vars_++] = cell;
@@ -473,12 +470,9 @@
   }
   if (num_inobject_fields_ < kNumberOfInobjectFields) {
     if (FLAG_trace_gvn) {
-      HeapStringAllocator allocator;
-      StringStream stream(&allocator);
-      stream.Add("Tracking inobject field access ");
-      access.PrintTo(&stream);
-      stream.Add(" (mapped to index %d)\n", num_inobject_fields_);
-      stream.OutputToStdOut();
+      OFStream os(stdout);
+      os << "Tracking inobject field access " << access << " (mapped to index "
+         << num_inobject_fields_ << ")" << endl;
     }
     *index = num_inobject_fields_;
     inobject_fields_[num_inobject_fields_++] = access;
@@ -494,7 +488,7 @@
       block_side_effects_(graph->blocks()->length(), zone()),
       loop_side_effects_(graph->blocks()->length(), zone()),
       visited_on_paths_(graph->blocks()->length(), zone()) {
-  ASSERT(!AllowHandleAllocation::IsAllowed());
+  DCHECK(!AllowHandleAllocation::IsAllowed());
   block_side_effects_.AddBlock(
       SideEffects(), graph->blocks()->length(), zone());
   loop_side_effects_.AddBlock(
@@ -503,7 +497,7 @@
 
 
 void HGlobalValueNumberingPhase::Run() {
-  ASSERT(!removed_side_effects_);
+  DCHECK(!removed_side_effects_);
   for (int i = FLAG_gvn_iterations; i > 0; --i) {
     // Compute the side effects.
     ComputeBlockSideEffects();
@@ -519,8 +513,8 @@
     removed_side_effects_ = false;
 
     // Clear all side effects.
-    ASSERT_EQ(block_side_effects_.length(), graph()->blocks()->length());
-    ASSERT_EQ(loop_side_effects_.length(), graph()->blocks()->length());
+    DCHECK_EQ(block_side_effects_.length(), graph()->blocks()->length());
+    DCHECK_EQ(loop_side_effects_.length(), graph()->blocks()->length());
     for (int i = 0; i < graph()->blocks()->length(); ++i) {
       block_side_effects_[i].RemoveAll();
       loop_side_effects_[i].RemoveAll();
@@ -571,13 +565,9 @@
     if (block->IsLoopHeader()) {
       SideEffects side_effects = loop_side_effects_[block->block_id()];
       if (FLAG_trace_gvn) {
-        HeapStringAllocator allocator;
-        StringStream stream(&allocator);
-        stream.Add("Try loop invariant motion for block B%d changes ",
-                   block->block_id());
-        side_effects_tracker_.PrintSideEffectsTo(&stream, side_effects);
-        stream.Add("\n");
-        stream.OutputToStdOut();
+        OFStream os(stdout);
+        os << "Try loop invariant motion for " << *block << " changes "
+           << Print(side_effects) << endl;
       }
       HBasicBlock* last = block->loop_information()->GetLastBackEdge();
       for (int j = block->block_id(); j <= last->block_id(); ++j) {
@@ -594,13 +584,9 @@
     SideEffects loop_kills) {
   HBasicBlock* pre_header = loop_header->predecessors()->at(0);
   if (FLAG_trace_gvn) {
-    HeapStringAllocator allocator;
-    StringStream stream(&allocator);
-    stream.Add("Loop invariant code motion for B%d depends on ",
-               block->block_id());
-    side_effects_tracker_.PrintSideEffectsTo(&stream, loop_kills);
-    stream.Add("\n");
-    stream.OutputToStdOut();
+    OFStream os(stdout);
+    os << "Loop invariant code motion for " << *block << " depends on "
+       << Print(loop_kills) << endl;
   }
   HInstruction* instr = block->first();
   while (instr != NULL) {
@@ -609,17 +595,11 @@
       SideEffects changes = side_effects_tracker_.ComputeChanges(instr);
       SideEffects depends_on = side_effects_tracker_.ComputeDependsOn(instr);
       if (FLAG_trace_gvn) {
-        HeapStringAllocator allocator;
-        StringStream stream(&allocator);
-        stream.Add("Checking instruction i%d (%s) changes ",
-                   instr->id(), instr->Mnemonic());
-        side_effects_tracker_.PrintSideEffectsTo(&stream, changes);
-        stream.Add(", depends on ");
-        side_effects_tracker_.PrintSideEffectsTo(&stream, depends_on);
-        stream.Add(". Loop changes ");
-        side_effects_tracker_.PrintSideEffectsTo(&stream, loop_kills);
-        stream.Add("\n");
-        stream.OutputToStdOut();
+        OFStream os(stdout);
+        os << "Checking instruction i" << instr->id() << " ("
+           << instr->Mnemonic() << ") changes " << Print(changes)
+           << ", depends on " << Print(depends_on) << ". Loop changes "
+           << Print(loop_kills) << endl;
       }
       bool can_hoist = !depends_on.ContainsAnyOf(loop_kills);
       if (can_hoist && !graph()->use_optimistic_licm()) {
@@ -854,20 +834,17 @@
         map->Kill(changes);
         dominators->Store(changes, instr);
         if (FLAG_trace_gvn) {
-          HeapStringAllocator allocator;
-          StringStream stream(&allocator);
-          stream.Add("Instruction i%d changes ", instr->id());
-          side_effects_tracker_.PrintSideEffectsTo(&stream, changes);
-          stream.Add("\n");
-          stream.OutputToStdOut();
+          OFStream os(stdout);
+          os << "Instruction i" << instr->id() << " changes " << Print(changes)
+             << endl;
         }
       }
       if (instr->CheckFlag(HValue::kUseGVN) &&
           !instr->CheckFlag(HValue::kCantBeReplaced)) {
-        ASSERT(!instr->HasObservableSideEffects());
+        DCHECK(!instr->HasObservableSideEffects());
         HInstruction* other = map->Lookup(instr);
         if (other != NULL) {
-          ASSERT(instr->Equals(other) && other->Equals(instr));
+          DCHECK(instr->Equals(other) && other->Equals(instr));
           TRACE_GVN_4("Replacing instruction i%d (%s) with i%d (%s)\n",
                       instr->id(),
                       instr->Mnemonic(),
diff --git a/src/hydrogen-gvn.h b/src/hydrogen-gvn.h
index ad97c15..8cdeb99 100644
--- a/src/hydrogen-gvn.h
+++ b/src/hydrogen-gvn.h
@@ -5,24 +5,26 @@
 #ifndef V8_HYDROGEN_GVN_H_
 #define V8_HYDROGEN_GVN_H_
 
+#include "src/compiler.h"
 #include "src/hydrogen.h"
 #include "src/hydrogen-instructions.h"
-#include "src/compiler.h"
 #include "src/zone.h"
 
 namespace v8 {
 namespace internal {
 
+class OStream;
+
 // This class extends GVNFlagSet with additional "special" dynamic side effects,
 // which can be used to represent side effects that cannot be expressed using
 // the GVNFlags of an HInstruction. These special side effects are tracked by a
 // SideEffectsTracker (see below).
-class SideEffects V8_FINAL {
+class SideEffects FINAL {
  public:
   static const int kNumberOfSpecials = 64 - kNumberOfFlags;
 
   SideEffects() : bits_(0) {
-    ASSERT(kNumberOfFlags + kNumberOfSpecials == sizeof(bits_) * CHAR_BIT);
+    DCHECK(kNumberOfFlags + kNumberOfSpecials == sizeof(bits_) * CHAR_BIT);
   }
   explicit SideEffects(GVNFlagSet flags) : bits_(flags.ToIntegral()) {}
   bool IsEmpty() const { return bits_ == 0; }
@@ -38,15 +40,14 @@
   void RemoveFlag(GVNFlag flag) { bits_ &= ~MaskFlag(flag); }
   void RemoveAll() { bits_ = 0; }
   uint64_t ToIntegral() const { return bits_; }
-  void PrintTo(StringStream* stream) const;
 
  private:
   uint64_t MaskFlag(GVNFlag flag) const {
     return static_cast<uint64_t>(1) << static_cast<unsigned>(flag);
   }
   uint64_t MaskSpecial(int special) const {
-    ASSERT(special >= 0);
-    ASSERT(special < kNumberOfSpecials);
+    DCHECK(special >= 0);
+    DCHECK(special < kNumberOfSpecials);
     return static_cast<uint64_t>(1) << static_cast<unsigned>(
         special + kNumberOfFlags);
   }
@@ -55,30 +56,32 @@
 };
 
 
+struct TrackedEffects;
+
 // Tracks global variable and inobject field loads/stores in a fine grained
 // fashion, and represents them using the "special" dynamic side effects of the
 // SideEffects class (see above). This way unrelated global variable/inobject
 // field stores don't prevent hoisting and merging of global variable/inobject
 // field loads.
-class SideEffectsTracker V8_FINAL BASE_EMBEDDED {
+class SideEffectsTracker FINAL BASE_EMBEDDED {
  public:
   SideEffectsTracker() : num_global_vars_(0), num_inobject_fields_(0) {}
   SideEffects ComputeChanges(HInstruction* instr);
   SideEffects ComputeDependsOn(HInstruction* instr);
-  void PrintSideEffectsTo(StringStream* stream, SideEffects side_effects) const;
 
  private:
+  friend OStream& operator<<(OStream& os, const TrackedEffects& f);
   bool ComputeGlobalVar(Unique<Cell> cell, int* index);
   bool ComputeInobjectField(HObjectAccess access, int* index);
 
   static int GlobalVar(int index) {
-    ASSERT(index >= 0);
-    ASSERT(index < kNumberOfGlobalVars);
+    DCHECK(index >= 0);
+    DCHECK(index < kNumberOfGlobalVars);
     return index;
   }
   static int InobjectField(int index) {
-    ASSERT(index >= 0);
-    ASSERT(index < kNumberOfInobjectFields);
+    DCHECK(index >= 0);
+    DCHECK(index < kNumberOfInobjectFields);
     return index + kNumberOfGlobalVars;
   }
 
@@ -95,8 +98,20 @@
 };
 
 
+// Helper class for printing, because the effects don't know their tracker.
+struct TrackedEffects {
+  TrackedEffects(SideEffectsTracker* t, SideEffects e)
+      : tracker(t), effects(e) {}
+  SideEffectsTracker* tracker;
+  SideEffects effects;
+};
+
+
+OStream& operator<<(OStream& os, const TrackedEffects& f);
+
+
 // Perform common subexpression elimination and loop-invariant code motion.
-class HGlobalValueNumberingPhase V8_FINAL : public HPhase {
+class HGlobalValueNumberingPhase FINAL : public HPhase {
  public:
   explicit HGlobalValueNumberingPhase(HGraph* graph);
 
@@ -114,6 +129,9 @@
                         SideEffects loop_kills);
   bool AllowCodeMotion();
   bool ShouldMove(HInstruction* instr, HBasicBlock* loop_header);
+  TrackedEffects Print(SideEffects side_effects) {
+    return TrackedEffects(&side_effects_tracker_, side_effects);
+  }
 
   SideEffectsTracker side_effects_tracker_;
   bool removed_side_effects_;
diff --git a/src/hydrogen-infer-types.cc b/src/hydrogen-infer-types.cc
index 0b7c24b..e69b4fa 100644
--- a/src/hydrogen-infer-types.cc
+++ b/src/hydrogen-infer-types.cc
@@ -46,7 +46,7 @@
           }
         }
       }
-      ASSERT(in_worklist_.IsEmpty());
+      DCHECK(in_worklist_.IsEmpty());
     }
   }
 }
diff --git a/src/hydrogen-instructions.cc b/src/hydrogen-instructions.cc
index 8b40a24..a057217 100644
--- a/src/hydrogen-instructions.cc
+++ b/src/hydrogen-instructions.cc
@@ -4,27 +4,32 @@
 
 #include "src/v8.h"
 
+#include "src/base/bits.h"
 #include "src/double.h"
 #include "src/factory.h"
 #include "src/hydrogen-infer-representation.h"
 #include "src/property-details-inl.h"
 
 #if V8_TARGET_ARCH_IA32
-#include "src/ia32/lithium-ia32.h"
+#include "src/ia32/lithium-ia32.h"  // NOLINT
 #elif V8_TARGET_ARCH_X64
-#include "src/x64/lithium-x64.h"
+#include "src/x64/lithium-x64.h"  // NOLINT
 #elif V8_TARGET_ARCH_ARM64
-#include "src/arm64/lithium-arm64.h"
+#include "src/arm64/lithium-arm64.h"  // NOLINT
 #elif V8_TARGET_ARCH_ARM
-#include "src/arm/lithium-arm.h"
+#include "src/arm/lithium-arm.h"  // NOLINT
 #elif V8_TARGET_ARCH_MIPS
-#include "src/mips/lithium-mips.h"
+#include "src/mips/lithium-mips.h"  // NOLINT
+#elif V8_TARGET_ARCH_MIPS64
+#include "src/mips64/lithium-mips64.h"  // NOLINT
 #elif V8_TARGET_ARCH_X87
-#include "src/x87/lithium-x87.h"
+#include "src/x87/lithium-x87.h"  // NOLINT
 #else
 #error Unsupported target architecture.
 #endif
 
+#include "src/base/safe_math.h"
+
 namespace v8 {
 namespace internal {
 
@@ -37,7 +42,7 @@
 
 
 Isolate* HValue::isolate() const {
-  ASSERT(block() != NULL);
+  DCHECK(block() != NULL);
   return block()->isolate();
 }
 
@@ -53,7 +58,7 @@
 
 
 void HValue::InferRepresentation(HInferRepresentationPhase* h_infer) {
-  ASSERT(CheckFlag(kFlexibleRepresentation));
+  DCHECK(CheckFlag(kFlexibleRepresentation));
   Representation new_rep = RepresentationFromInputs();
   UpdateRepresentation(new_rep, h_infer, "inputs");
   new_rep = RepresentationFromUses();
@@ -288,7 +293,7 @@
 
 #ifdef DEBUG
 void Range::Verify() const {
-  ASSERT(lower_ <= upper_);
+  DCHECK(lower_ <= upper_);
 }
 #endif
 
@@ -417,7 +422,7 @@
     if (OperandAt(i)->id() != other->OperandAt(i)->id()) return false;
   }
   bool result = DataEquals(other);
-  ASSERT(!result || Hashcode() == other->Hashcode());
+  DCHECK(!result || Hashcode() == other->Hashcode());
   return result;
 }
 
@@ -489,7 +494,7 @@
   while (use_list_ != NULL) {
     HUseListNode* list_node = use_list_;
     HValue* value = list_node->value();
-    ASSERT(!value->block()->IsStartBlock());
+    DCHECK(!value->block()->IsStartBlock());
     value->InternalSetOperandAt(list_node->index(), other);
     use_list_ = list_node->tail();
     list_node->set_tail(other->use_list_);
@@ -515,7 +520,7 @@
 
 
 void HValue::SetBlock(HBasicBlock* block) {
-  ASSERT(block_ == NULL || block == NULL);
+  DCHECK(block_ == NULL || block == NULL);
   block_ = block;
   if (id_ == kNoNumber && block != NULL) {
     id_ = block->graph()->GetNextValueID(this);
@@ -523,36 +528,36 @@
 }
 
 
-void HValue::PrintTypeTo(StringStream* stream) {
-  if (!representation().IsTagged() || type().Equals(HType::Tagged())) return;
-  stream->Add(" type:%s", type().ToString());
+OStream& operator<<(OStream& os, const HValue& v) { return v.PrintTo(os); }
+
+
+OStream& operator<<(OStream& os, const TypeOf& t) {
+  if (t.value->representation().IsTagged() &&
+      !t.value->type().Equals(HType::Tagged()))
+    return os;
+  return os << " type:" << t.value->type();
 }
 
 
-void HValue::PrintChangesTo(StringStream* stream) {
-  GVNFlagSet changes_flags = ChangesFlags();
-  if (changes_flags.IsEmpty()) return;
-  stream->Add(" changes[");
-  if (changes_flags == AllSideEffectsFlagSet()) {
-    stream->Add("*");
+OStream& operator<<(OStream& os, const ChangesOf& c) {
+  GVNFlagSet changes_flags = c.value->ChangesFlags();
+  if (changes_flags.IsEmpty()) return os;
+  os << " changes[";
+  if (changes_flags == c.value->AllSideEffectsFlagSet()) {
+    os << "*";
   } else {
     bool add_comma = false;
-#define PRINT_DO(Type)                      \
-    if (changes_flags.Contains(k##Type)) {  \
-      if (add_comma) stream->Add(",");      \
-      add_comma = true;                     \
-      stream->Add(#Type);                   \
-    }
+#define PRINT_DO(Type)                   \
+  if (changes_flags.Contains(k##Type)) { \
+    if (add_comma) os << ",";            \
+    add_comma = true;                    \
+    os << #Type;                         \
+  }
     GVN_TRACKED_FLAG_LIST(PRINT_DO);
     GVN_UNTRACKED_FLAG_LIST(PRINT_DO);
 #undef PRINT_DO
   }
-  stream->Add("]");
-}
-
-
-void HValue::PrintNameTo(StringStream* stream) {
-  stream->Add("%s%d", representation_.Mnemonic(), id());
+  return os << "]";
 }
 
 
@@ -593,74 +598,63 @@
 void HValue::AddNewRange(Range* r, Zone* zone) {
   if (!HasRange()) ComputeInitialRange(zone);
   if (!HasRange()) range_ = new(zone) Range();
-  ASSERT(HasRange());
+  DCHECK(HasRange());
   r->StackUpon(range_);
   range_ = r;
 }
 
 
 void HValue::RemoveLastAddedRange() {
-  ASSERT(HasRange());
-  ASSERT(range_->next() != NULL);
+  DCHECK(HasRange());
+  DCHECK(range_->next() != NULL);
   range_ = range_->next();
 }
 
 
 void HValue::ComputeInitialRange(Zone* zone) {
-  ASSERT(!HasRange());
+  DCHECK(!HasRange());
   range_ = InferRange(zone);
-  ASSERT(HasRange());
+  DCHECK(HasRange());
 }
 
 
-void HSourcePosition::PrintTo(FILE* out) {
-  if (IsUnknown()) {
-    PrintF(out, "<?>");
+OStream& operator<<(OStream& os, const HSourcePosition& p) {
+  if (p.IsUnknown()) {
+    return os << "<?>";
+  } else if (FLAG_hydrogen_track_positions) {
+    return os << "<" << p.inlining_id() << ":" << p.position() << ">";
   } else {
-    if (FLAG_hydrogen_track_positions) {
-      PrintF(out, "<%d:%d>", inlining_id(), position());
-    } else {
-      PrintF(out, "<0:%d>", raw());
-    }
+    return os << "<0:" << p.raw() << ">";
   }
 }
 
 
-void HInstruction::PrintTo(StringStream* stream) {
-  PrintMnemonicTo(stream);
-  PrintDataTo(stream);
-  PrintChangesTo(stream);
-  PrintTypeTo(stream);
-  if (CheckFlag(HValue::kHasNoObservableSideEffects)) {
-    stream->Add(" [noOSE]");
-  }
-  if (CheckFlag(HValue::kIsDead)) {
-    stream->Add(" [dead]");
-  }
+OStream& HInstruction::PrintTo(OStream& os) const {  // NOLINT
+  os << Mnemonic() << " ";
+  PrintDataTo(os) << ChangesOf(this) << TypeOf(this);
+  if (CheckFlag(HValue::kHasNoObservableSideEffects)) os << " [noOSE]";
+  if (CheckFlag(HValue::kIsDead)) os << " [dead]";
+  return os;
 }
 
 
-void HInstruction::PrintDataTo(StringStream *stream) {
+OStream& HInstruction::PrintDataTo(OStream& os) const {  // NOLINT
   for (int i = 0; i < OperandCount(); ++i) {
-    if (i > 0) stream->Add(" ");
-    OperandAt(i)->PrintNameTo(stream);
+    if (i > 0) os << " ";
+    os << NameOf(OperandAt(i));
   }
-}
-
-
-void HInstruction::PrintMnemonicTo(StringStream* stream) {
-  stream->Add("%s ", Mnemonic());
+  return os;
 }
 
 
 void HInstruction::Unlink() {
-  ASSERT(IsLinked());
-  ASSERT(!IsControlInstruction());  // Must never move control instructions.
-  ASSERT(!IsBlockEntry());  // Doesn't make sense to delete these.
-  ASSERT(previous_ != NULL);
+  DCHECK(IsLinked());
+  DCHECK(!IsControlInstruction());  // Must never move control instructions.
+  DCHECK(!IsBlockEntry());  // Doesn't make sense to delete these.
+  DCHECK(previous_ != NULL);
   previous_->next_ = next_;
   if (next_ == NULL) {
-    ASSERT(block()->last() == this);
+    DCHECK(block()->last() == this);
     block()->set_last(previous_);
   } else {
     next_->previous_ = previous_;
@@ -670,11 +664,11 @@
 
 
 void HInstruction::InsertBefore(HInstruction* next) {
-  ASSERT(!IsLinked());
-  ASSERT(!next->IsBlockEntry());
-  ASSERT(!IsControlInstruction());
-  ASSERT(!next->block()->IsStartBlock());
-  ASSERT(next->previous_ != NULL);
+  DCHECK(!IsLinked());
+  DCHECK(!next->IsBlockEntry());
+  DCHECK(!IsControlInstruction());
+  DCHECK(!next->block()->IsStartBlock());
+  DCHECK(next->previous_ != NULL);
   HInstruction* prev = next->previous();
   prev->next_ = this;
   next->previous_ = this;
@@ -688,14 +682,14 @@
 
 
 void HInstruction::InsertAfter(HInstruction* previous) {
-  ASSERT(!IsLinked());
-  ASSERT(!previous->IsControlInstruction());
-  ASSERT(!IsControlInstruction() || previous->next_ == NULL);
+  DCHECK(!IsLinked());
+  DCHECK(!previous->IsControlInstruction());
+  DCHECK(!IsControlInstruction() || previous->next_ == NULL);
   HBasicBlock* block = previous->block();
   // Never insert anything except constants into the start block after finishing
   // it.
   if (block->IsStartBlock() && block->IsFinished() && !IsConstant()) {
-    ASSERT(block->end()->SecondSuccessor() == NULL);
+    DCHECK(block->end()->SecondSuccessor() == NULL);
     InsertAfter(block->end()->FirstSuccessor()->first());
     return;
   }
@@ -705,7 +699,7 @@
   // simulate instruction instead.
   HInstruction* next = previous->next_;
   if (previous->HasObservableSideEffects() && next != NULL) {
-    ASSERT(next->IsSimulate());
+    DCHECK(next->IsSimulate());
     previous = next;
     next = previous->next_;
   }
@@ -755,19 +749,19 @@
           cur = cur->previous();
         }
         // Must reach other operand in the same block!
-        ASSERT(cur == other_operand);
+        DCHECK(cur == other_operand);
       }
     } else {
       // If the following assert fires, you may have forgotten an
       // AddInstruction.
-      ASSERT(other_block->Dominates(cur_block));
+      DCHECK(other_block->Dominates(cur_block));
     }
   }
 
   // Verify that instructions that may have side-effects are followed
   // by a simulate instruction.
   if (HasObservableSideEffects() && !IsOsrEntry()) {
-    ASSERT(next()->IsSimulate());
+    DCHECK(next()->IsSimulate());
   }
 
   // Verify that instructions that can be eliminated by GVN have overridden
@@ -778,7 +772,7 @@
   // Verify that all uses are in the graph.
   for (HUseIterator use = uses(); !use.Done(); use.Advance()) {
     if (use.value()->IsInstruction()) {
-      ASSERT(HInstruction::cast(use.value())->IsLinked());
+      DCHECK(HInstruction::cast(use.value())->IsLinked());
     }
   }
 }
@@ -852,6 +846,7 @@
     case HValue::kStoreNamedGeneric:
     case HValue::kStringCharCodeAt:
     case HValue::kStringCharFromCode:
+    case HValue::kTailCallThroughMegamorphicCache:
     case HValue::kThisFunction:
     case HValue::kTypeofIsAndBranch:
     case HValue::kUnknownOSRValue:
@@ -917,27 +912,28 @@
 }
 
 
-void HDummyUse::PrintDataTo(StringStream* stream) {
-  value()->PrintNameTo(stream);
+OStream& operator<<(OStream& os, const NameOf& v) {
+  return os << v.value->representation().Mnemonic() << v.value->id();
+}
+
+OStream& HDummyUse::PrintDataTo(OStream& os) const {  // NOLINT
+  return os << NameOf(value());
 }
 
 
-void HEnvironmentMarker::PrintDataTo(StringStream* stream) {
-  stream->Add("%s var[%d]", kind() == BIND ? "bind" : "lookup", index());
+OStream& HEnvironmentMarker::PrintDataTo(OStream& os) const {  // NOLINT
+  return os << (kind() == BIND ? "bind" : "lookup") << " var[" << index()
+            << "]";
 }
 
 
-void HUnaryCall::PrintDataTo(StringStream* stream) {
-  value()->PrintNameTo(stream);
-  stream->Add(" ");
-  stream->Add("#%d", argument_count());
+OStream& HUnaryCall::PrintDataTo(OStream& os) const {  // NOLINT
+  return os << NameOf(value()) << " #" << argument_count();
 }
 
 
-void HCallJSFunction::PrintDataTo(StringStream* stream) {
-  function()->PrintNameTo(stream);
-  stream->Add(" ");
-  stream->Add("#%d", argument_count());
+OStream& HCallJSFunction::PrintDataTo(OStream& os) const {  // NOLINT
+  return os << NameOf(function()) << " #" << argument_count();
 }
 
 
@@ -963,14 +959,9 @@
 }
 
 
-
-
-void HBinaryCall::PrintDataTo(StringStream* stream) {
-  first()->PrintNameTo(stream);
-  stream->Add(" ");
-  second()->PrintNameTo(stream);
-  stream->Add(" ");
-  stream->Add("#%d", argument_count());
+OStream& HBinaryCall::PrintDataTo(OStream& os) const {  // NOLINT
+  return os << NameOf(first()) << " " << NameOf(second()) << " #"
+            << argument_count();
 }
 
 
@@ -980,7 +971,7 @@
   DecompositionResult decomposition;
   bool index_is_decomposable = index()->TryDecompose(&decomposition);
   if (index_is_decomposable) {
-    ASSERT(decomposition.base() == base());
+    DCHECK(decomposition.base() == base());
     if (decomposition.offset() == offset() &&
         decomposition.scale() == scale()) return;
   } else {
@@ -1024,27 +1015,24 @@
 }
 
 
-void HBoundsCheck::PrintDataTo(StringStream* stream) {
-  index()->PrintNameTo(stream);
-  stream->Add(" ");
-  length()->PrintNameTo(stream);
+OStream& HBoundsCheck::PrintDataTo(OStream& os) const {  // NOLINT
+  os << NameOf(index()) << " " << NameOf(length());
   if (base() != NULL && (offset() != 0 || scale() != 0)) {
-    stream->Add(" base: ((");
+    os << " base: ((";
     if (base() != index()) {
-      index()->PrintNameTo(stream);
+      os << NameOf(index());
     } else {
-      stream->Add("index");
+      os << "index";
     }
-    stream->Add(" + %d) >> %d)", offset(), scale());
+    os << " + " << offset() << ") >> " << scale() << ")";
   }
-  if (skip_check()) {
-    stream->Add(" [DISABLED]");
-  }
+  if (skip_check()) os << " [DISABLED]";
+  return os;
 }
 
 
 void HBoundsCheck::InferRepresentation(HInferRepresentationPhase* h_infer) {
-  ASSERT(CheckFlag(kFlexibleRepresentation));
+  DCHECK(CheckFlag(kFlexibleRepresentation));
   HValue* actual_index = index()->ActualValue();
   HValue* actual_length = length()->ActualValue();
   Representation index_rep = actual_index->representation();
@@ -1082,91 +1070,78 @@
 }
 
 
-void HBoundsCheckBaseIndexInformation::PrintDataTo(StringStream* stream) {
-  stream->Add("base: ");
-  base_index()->PrintNameTo(stream);
-  stream->Add(", check: ");
-  base_index()->PrintNameTo(stream);
+OStream& HBoundsCheckBaseIndexInformation::PrintDataTo(
+    OStream& os) const {  // NOLINT
+  // TODO(svenpanne) This 2nd base_index() looks wrong...
+  return os << "base: " << NameOf(base_index())
+            << ", check: " << NameOf(base_index());
 }
 
 
-void HCallWithDescriptor::PrintDataTo(StringStream* stream) {
+OStream& HCallWithDescriptor::PrintDataTo(OStream& os) const {  // NOLINT
   for (int i = 0; i < OperandCount(); i++) {
-    OperandAt(i)->PrintNameTo(stream);
-    stream->Add(" ");
+    os << NameOf(OperandAt(i)) << " ";
   }
-  stream->Add("#%d", argument_count());
+  return os << "#" << argument_count();
 }
 
 
-void HCallNewArray::PrintDataTo(StringStream* stream) {
-  stream->Add(ElementsKindToString(elements_kind()));
-  stream->Add(" ");
-  HBinaryCall::PrintDataTo(stream);
+OStream& HCallNewArray::PrintDataTo(OStream& os) const {  // NOLINT
+  os << ElementsKindToString(elements_kind()) << " ";
+  return HBinaryCall::PrintDataTo(os);
 }
 
 
-void HCallRuntime::PrintDataTo(StringStream* stream) {
-  stream->Add("%o ", *name());
-  if (save_doubles() == kSaveFPRegs) {
-    stream->Add("[save doubles] ");
-  }
-  stream->Add("#%d", argument_count());
+OStream& HCallRuntime::PrintDataTo(OStream& os) const {  // NOLINT
+  os << name()->ToCString().get() << " ";
+  if (save_doubles() == kSaveFPRegs) os << "[save doubles] ";
+  return os << "#" << argument_count();
 }
 
 
-void HClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
-  stream->Add("class_of_test(");
-  value()->PrintNameTo(stream);
-  stream->Add(", \"%o\")", *class_name());
+OStream& HClassOfTestAndBranch::PrintDataTo(OStream& os) const {  // NOLINT
+  return os << "class_of_test(" << NameOf(value()) << ", \""
+            << class_name()->ToCString().get() << "\")";
 }
 
 
-void HWrapReceiver::PrintDataTo(StringStream* stream) {
-  receiver()->PrintNameTo(stream);
-  stream->Add(" ");
-  function()->PrintNameTo(stream);
+OStream& HWrapReceiver::PrintDataTo(OStream& os) const {  // NOLINT
+  return os << NameOf(receiver()) << " " << NameOf(function());
 }
 
 
-void HAccessArgumentsAt::PrintDataTo(StringStream* stream) {
-  arguments()->PrintNameTo(stream);
-  stream->Add("[");
-  index()->PrintNameTo(stream);
-  stream->Add("], length ");
-  length()->PrintNameTo(stream);
+OStream& HAccessArgumentsAt::PrintDataTo(OStream& os) const {  // NOLINT
+  return os << NameOf(arguments()) << "[" << NameOf(index()) << "], length "
+            << NameOf(length());
 }
 
 
-void HAllocateBlockContext::PrintDataTo(StringStream* stream) {
-  context()->PrintNameTo(stream);
-  stream->Add(" ");
-  function()->PrintNameTo(stream);
+OStream& HAllocateBlockContext::PrintDataTo(OStream& os) const {  // NOLINT
+  return os << NameOf(context()) << " " << NameOf(function());
 }
 
 
-void HControlInstruction::PrintDataTo(StringStream* stream) {
-  stream->Add(" goto (");
+OStream& HControlInstruction::PrintDataTo(OStream& os) const {  // NOLINT
+  os << " goto (";
   bool first_block = true;
   for (HSuccessorIterator it(this); !it.Done(); it.Advance()) {
-    stream->Add(first_block ? "B%d" : ", B%d", it.Current()->block_id());
+    if (!first_block) os << ", ";
+    os << *it.Current();
     first_block = false;
   }
-  stream->Add(")");
+  return os << ")";
 }
 
 
-void HUnaryControlInstruction::PrintDataTo(StringStream* stream) {
-  value()->PrintNameTo(stream);
-  HControlInstruction::PrintDataTo(stream);
+OStream& HUnaryControlInstruction::PrintDataTo(OStream& os) const {  // NOLINT
+  os << NameOf(value());
+  return HControlInstruction::PrintDataTo(os);
 }
 
 
-void HReturn::PrintDataTo(StringStream* stream) {
-  value()->PrintNameTo(stream);
-  stream->Add(" (pop ");
-  parameter_count()->PrintNameTo(stream);
-  stream->Add(" values)");
+OStream& HReturn::PrintDataTo(OStream& os) const {  // NOLINT
+  return os << NameOf(value()) << " (pop " << NameOf(parameter_count())
+            << " values)";
 }
 
 
@@ -1198,8 +1173,8 @@
 bool HBranch::KnownSuccessorBlock(HBasicBlock** block) {
   HValue* value = this->value();
   if (value->EmitAtUses()) {
-    ASSERT(value->IsConstant());
-    ASSERT(!value->representation().IsDouble());
+    DCHECK(value->IsConstant());
+    DCHECK(!value->representation().IsDouble());
     *block = HConstant::cast(value)->BooleanValue()
         ? FirstSuccessor()
         : SecondSuccessor();
@@ -1210,35 +1185,44 @@
 }
 
 
-void HBranch::PrintDataTo(StringStream* stream) {
-  HUnaryControlInstruction::PrintDataTo(stream);
-  stream->Add(" ");
-  expected_input_types().Print(stream);
+OStream& HBranch::PrintDataTo(OStream& os) const {  // NOLINT
+  return HUnaryControlInstruction::PrintDataTo(os) << " "
+                                                   << expected_input_types();
 }
 
 
-void HCompareMap::PrintDataTo(StringStream* stream) {
-  value()->PrintNameTo(stream);
-  stream->Add(" (%p)", *map().handle());
-  HControlInstruction::PrintDataTo(stream);
+OStream& HCompareMap::PrintDataTo(OStream& os) const {  // NOLINT
+  os << NameOf(value()) << " (" << *map().handle() << ")";
+  HControlInstruction::PrintDataTo(os);
   if (known_successor_index() == 0) {
-    stream->Add(" [true]");
+    os << " [true]";
   } else if (known_successor_index() == 1) {
-    stream->Add(" [false]");
+    os << " [false]";
   }
+  return os;
 }
 
 
 const char* HUnaryMathOperation::OpName() const {
   switch (op()) {
-    case kMathFloor: return "floor";
-    case kMathRound: return "round";
-    case kMathAbs: return "abs";
-    case kMathLog: return "log";
-    case kMathExp: return "exp";
-    case kMathSqrt: return "sqrt";
-    case kMathPowHalf: return "pow-half";
-    case kMathClz32: return "clz32";
+    case kMathFloor:
+      return "floor";
+    case kMathFround:
+      return "fround";
+    case kMathRound:
+      return "round";
+    case kMathAbs:
+      return "abs";
+    case kMathLog:
+      return "log";
+    case kMathExp:
+      return "exp";
+    case kMathSqrt:
+      return "sqrt";
+    case kMathPowHalf:
+      return "pow-half";
+    case kMathClz32:
+      return "clz32";
     default:
       UNREACHABLE();
       return NULL;
@@ -1271,43 +1255,41 @@
 }
 
 
-void HUnaryMathOperation::PrintDataTo(StringStream* stream) {
-  const char* name = OpName();
-  stream->Add("%s ", name);
-  value()->PrintNameTo(stream);
+OStream& HUnaryMathOperation::PrintDataTo(OStream& os) const {  // NOLINT
+  return os << OpName() << " " << NameOf(value());
 }
 
 
-void HUnaryOperation::PrintDataTo(StringStream* stream) {
-  value()->PrintNameTo(stream);
+OStream& HUnaryOperation::PrintDataTo(OStream& os) const {  // NOLINT
+  return os << NameOf(value());
 }
 
 
-void HHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
-  value()->PrintNameTo(stream);
+OStream& HHasInstanceTypeAndBranch::PrintDataTo(OStream& os) const {  // NOLINT
+  os << NameOf(value());
   switch (from_) {
     case FIRST_JS_RECEIVER_TYPE:
-      if (to_ == LAST_TYPE) stream->Add(" spec_object");
+      if (to_ == LAST_TYPE) os << " spec_object";
       break;
     case JS_REGEXP_TYPE:
-      if (to_ == JS_REGEXP_TYPE) stream->Add(" reg_exp");
+      if (to_ == JS_REGEXP_TYPE) os << " reg_exp";
       break;
     case JS_ARRAY_TYPE:
-      if (to_ == JS_ARRAY_TYPE) stream->Add(" array");
+      if (to_ == JS_ARRAY_TYPE) os << " array";
       break;
     case JS_FUNCTION_TYPE:
-      if (to_ == JS_FUNCTION_TYPE) stream->Add(" function");
+      if (to_ == JS_FUNCTION_TYPE) os << " function";
       break;
     default:
       break;
   }
+  return os;
 }
 
 
-void HTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
-  value()->PrintNameTo(stream);
-  stream->Add(" == %o", *type_literal_.handle());
-  HControlInstruction::PrintDataTo(stream);
+OStream& HTypeofIsAndBranch::PrintDataTo(OStream& os) const {  // NOLINT
+  os << NameOf(value()) << " == " << type_literal()->ToCString().get();
+  return HControlInstruction::PrintDataTo(os);
 }
 
 
@@ -1324,10 +1306,9 @@
         return heap->boolean_string();
       }
       if (unique.IsKnownGlobal(heap->null_value())) {
-        return FLAG_harmony_typeof ? heap->null_string()
-                                   : heap->object_string();
+        return heap->object_string();
       }
-      ASSERT(unique.IsKnownGlobal(heap->undefined_value()));
+      DCHECK(unique.IsKnownGlobal(heap->undefined_value()));
       return heap->undefined_string();
     }
     case SYMBOL_TYPE:
@@ -1359,10 +1340,8 @@
 }
 
 
-void HCheckMapValue::PrintDataTo(StringStream* stream) {
-  value()->PrintNameTo(stream);
-  stream->Add(" ");
-  map()->PrintNameTo(stream);
+OStream& HCheckMapValue::PrintDataTo(OStream& os) const {  // NOLINT
+  return os << NameOf(value()) << " " << NameOf(map());
 }
 
 
@@ -1377,23 +1356,19 @@
 }
 
 
-void HForInPrepareMap::PrintDataTo(StringStream* stream) {
-  enumerable()->PrintNameTo(stream);
+OStream& HForInPrepareMap::PrintDataTo(OStream& os) const {  // NOLINT
+  return os << NameOf(enumerable());
 }
 
 
-void HForInCacheArray::PrintDataTo(StringStream* stream) {
-  enumerable()->PrintNameTo(stream);
-  stream->Add(" ");
-  map()->PrintNameTo(stream);
-  stream->Add("[%d]", idx_);
+OStream& HForInCacheArray::PrintDataTo(OStream& os) const {  // NOLINT
+  return os << NameOf(enumerable()) << " " << NameOf(map()) << "[" << idx_
+            << "]";
 }
 
 
-void HLoadFieldByIndex::PrintDataTo(StringStream* stream) {
-  object()->PrintNameTo(stream);
-  stream->Add(" ");
-  index()->PrintNameTo(stream);
+OStream& HLoadFieldByIndex::PrintDataTo(OStream& os) const {  // NOLINT
+  return os << NameOf(object()) << " " << NameOf(index());
 }
 
 
@@ -1529,8 +1504,8 @@
 }
 
 
-void HTypeof::PrintDataTo(StringStream* stream) {
-  value()->PrintNameTo(stream);
+OStream& HTypeof::PrintDataTo(OStream& os) const {  // NOLINT
+  return os << NameOf(value());
 }
 
 
@@ -1538,36 +1513,27 @@
        HValue* value, Representation representation) {
   if (FLAG_fold_constants && value->IsConstant()) {
     HConstant* c = HConstant::cast(value);
-    if (c->HasNumberValue()) {
-      double double_res = c->DoubleValue();
-      if (representation.IsDouble()) {
-        return HConstant::New(zone, context, double_res);
-
-      } else if (representation.CanContainDouble(double_res)) {
-        return HConstant::New(zone, context,
-                              static_cast<int32_t>(double_res),
-                              representation);
-      }
-    }
+    c = c->CopyToRepresentation(representation, zone);
+    if (c != NULL) return c;
   }
   return new(zone) HForceRepresentation(value, representation);
 }
 
 
-void HForceRepresentation::PrintDataTo(StringStream* stream) {
-  stream->Add("%s ", representation().Mnemonic());
-  value()->PrintNameTo(stream);
+OStream& HForceRepresentation::PrintDataTo(OStream& os) const {  // NOLINT
+  return os << representation().Mnemonic() << " " << NameOf(value());
 }
 
 
-void HChange::PrintDataTo(StringStream* stream) {
-  HUnaryOperation::PrintDataTo(stream);
-  stream->Add(" %s to %s", from().Mnemonic(), to().Mnemonic());
+OStream& HChange::PrintDataTo(OStream& os) const {  // NOLINT
+  HUnaryOperation::PrintDataTo(os);
+  os << " " << from().Mnemonic() << " to " << to().Mnemonic();
 
-  if (CanTruncateToSmi()) stream->Add(" truncating-smi");
-  if (CanTruncateToInt32()) stream->Add(" truncating-int32");
-  if (CheckFlag(kBailoutOnMinusZero)) stream->Add(" -0?");
-  if (CheckFlag(kAllowUndefinedAsNaN)) stream->Add(" allow-undefined-as-nan");
+  if (CanTruncateToSmi()) os << " truncating-smi";
+  if (CanTruncateToInt32()) os << " truncating-int32";
+  if (CheckFlag(kBailoutOnMinusZero)) os << " -0?";
+  if (CheckFlag(kAllowUndefinedAsNaN)) os << " allow-undefined-as-nan";
+  return os;
 }
 
 
@@ -1581,7 +1547,7 @@
           val, representation(), false, false));
     }
   }
-  if (op() == kMathFloor && value()->IsDiv() && value()->UseCount() == 1) {
+  if (op() == kMathFloor && value()->IsDiv() && value()->HasOneUse()) {
     HDiv* hdiv = HDiv::cast(value());
 
     HValue* left = hdiv->left();
@@ -1639,7 +1605,7 @@
 
 void HCheckInstanceType::GetCheckInterval(InstanceType* first,
                                           InstanceType* last) {
-  ASSERT(is_interval_check());
+  DCHECK(is_interval_check());
   switch (check_) {
     case IS_SPEC_OBJECT:
       *first = FIRST_SPEC_OBJECT_TYPE;
@@ -1655,7 +1621,7 @@
 
 
 void HCheckInstanceType::GetCheckMaskAndTag(uint8_t* mask, uint8_t* tag) {
-  ASSERT(!is_interval_check());
+  DCHECK(!is_interval_check());
   switch (check_) {
     case IS_STRING:
       *mask = kIsNotStringMask;
@@ -1671,13 +1637,14 @@
 }
 
 
-void HCheckMaps::PrintDataTo(StringStream* stream) {
-  value()->PrintNameTo(stream);
-  stream->Add(" [%p", *maps()->at(0).handle());
+OStream& HCheckMaps::PrintDataTo(OStream& os) const {  // NOLINT
+  os << NameOf(value()) << " [" << *maps()->at(0).handle();
   for (int i = 1; i < maps()->size(); ++i) {
-    stream->Add(",%p", *maps()->at(i).handle());
+    os << "," << *maps()->at(i).handle();
   }
-  stream->Add("]%s", IsStabilityCheck() ? "(stability-check)" : "");
+  os << "]";
+  if (IsStabilityCheck()) os << "(stability-check)";
+  return os;
 }
 
 
@@ -1701,10 +1668,8 @@
 }
 
 
-void HCheckValue::PrintDataTo(StringStream* stream) {
-  value()->PrintNameTo(stream);
-  stream->Add(" ");
-  object().handle()->ShortPrint(stream);
+OStream& HCheckValue::PrintDataTo(OStream& os) const {  // NOLINT
+  return os << NameOf(value()) << " " << Brief(*object().handle());
 }
 
 
@@ -1714,7 +1679,7 @@
 }
 
 
-const char* HCheckInstanceType::GetCheckName() {
+const char* HCheckInstanceType::GetCheckName() const {
   switch (check_) {
     case IS_SPEC_OBJECT: return "object";
     case IS_JS_ARRAY: return "array";
@@ -1726,34 +1691,39 @@
 }
 
 
-void HCheckInstanceType::PrintDataTo(StringStream* stream) {
-  stream->Add("%s ", GetCheckName());
-  HUnaryOperation::PrintDataTo(stream);
+OStream& HCheckInstanceType::PrintDataTo(OStream& os) const {  // NOLINT
+  os << GetCheckName() << " ";
+  return HUnaryOperation::PrintDataTo(os);
 }
 
 
-void HCallStub::PrintDataTo(StringStream* stream) {
-  stream->Add("%s ",
-              CodeStub::MajorName(major_key_, false));
-  HUnaryCall::PrintDataTo(stream);
+OStream& HCallStub::PrintDataTo(OStream& os) const {  // NOLINT
+  os << CodeStub::MajorName(major_key_, false) << " ";
+  return HUnaryCall::PrintDataTo(os);
 }
 
 
-void HUnknownOSRValue::PrintDataTo(StringStream *stream) {
+OStream& HTailCallThroughMegamorphicCache::PrintDataTo(
+    OStream& os) const {  // NOLINT
+  for (int i = 0; i < OperandCount(); i++) {
+    os << NameOf(OperandAt(i)) << " ";
+  }
+  return os << "flags: " << flags();
+}
+
+
+OStream& HUnknownOSRValue::PrintDataTo(OStream& os) const {  // NOLINT
   const char* type = "expression";
   if (environment_->is_local_index(index_)) type = "local";
   if (environment_->is_special_index(index_)) type = "special";
   if (environment_->is_parameter_index(index_)) type = "parameter";
-  stream->Add("%s @ %d", type, index_);
+  return os << type << " @ " << index_;
 }
 
 
-void HInstanceOf::PrintDataTo(StringStream* stream) {
-  left()->PrintNameTo(stream);
-  stream->Add(" ");
-  right()->PrintNameTo(stream);
-  stream->Add(" ");
-  context()->PrintNameTo(stream);
+OStream& HInstanceOf::PrintDataTo(OStream& os) const {  // NOLINT
+  return os << NameOf(left()) << " " << NameOf(right()) << " "
+            << NameOf(context());
 }
 
 
@@ -1963,15 +1933,18 @@
 }
 
 
+// Returns the absolute value of its argument minus one, avoiding undefined
+// behavior at kMinInt.
+static int32_t AbsMinus1(int32_t a) { return a < 0 ? -(a + 1) : (a - 1); }
+
+
 Range* HMod::InferRange(Zone* zone) {
   if (representation().IsInteger32()) {
     Range* a = left()->range();
     Range* b = right()->range();
 
-    // The magnitude of the modulus is bounded by the right operand. Note that
-    // apart for the cases involving kMinInt, the calculation below is the same
-    // as Max(Abs(b->lower()), Abs(b->upper())) - 1.
-    int32_t positive_bound = -(Min(NegAbs(b->lower()), NegAbs(b->upper())) + 1);
+    // The magnitude of the modulus is bounded by the right operand.
+    int32_t positive_bound = Max(AbsMinus1(b->lower()), AbsMinus1(b->upper()));
 
     // The result of the modulo operation has the sign of its left operand.
     bool left_can_be_negative = a->CanBeMinusZero() || a->CanBeNegative();
@@ -2084,7 +2057,7 @@
 
 void InductionVariableData::AddCheck(HBoundsCheck* check,
                                      int32_t upper_limit) {
-  ASSERT(limit_validity() != NULL);
+  DCHECK(limit_validity() != NULL);
   if (limit_validity() != check->block() &&
       !limit_validity()->Dominates(check->block())) return;
   if (!phi()->block()->current_loop()->IsNestedInThisLoop(
@@ -2122,9 +2095,9 @@
     int32_t mask,
     HValue* index_base,
     HValue* context) {
-  ASSERT(first_check_in_block() != NULL);
+  DCHECK(first_check_in_block() != NULL);
   HValue* previous_index = first_check_in_block()->index();
-  ASSERT(context != NULL);
+  DCHECK(context != NULL);
 
   Zone* zone = index_base->block()->graph()->zone();
   set_added_constant(HConstant::New(zone, context, mask));
@@ -2138,18 +2111,18 @@
     first_check_in_block()->ReplaceAllUsesWith(first_check_in_block()->index());
     HInstruction* new_index =  HBitwise::New(zone, context, token, index_base,
                                              added_constant());
-    ASSERT(new_index->IsBitwise());
+    DCHECK(new_index->IsBitwise());
     new_index->ClearAllSideEffects();
     new_index->AssumeRepresentation(Representation::Integer32());
     set_added_index(HBitwise::cast(new_index));
     added_index()->InsertBefore(first_check_in_block());
   }
-  ASSERT(added_index()->op() == token);
+  DCHECK(added_index()->op() == token);
 
   added_index()->SetOperandAt(1, index_base);
   added_index()->SetOperandAt(2, added_constant());
   first_check_in_block()->SetOperandAt(0, added_index());
-  if (previous_index->UseCount() == 0) {
+  if (previous_index->HasNoUses()) {
     previous_index->DeleteAndReplaceWith(NULL);
   }
 }
@@ -2254,7 +2227,7 @@
  */
 void InductionVariableData::UpdateAdditionalLimit(
     InductionVariableLimitUpdate* update) {
-  ASSERT(update->updated_variable == this);
+  DCHECK(update->updated_variable == this);
   if (update->limit_is_upper) {
     swap(&additional_upper_limit_, &update->limit);
     swap(&additional_upper_limit_is_included_, &update->limit_is_included);
@@ -2385,7 +2358,7 @@
   } else {
     other_target = branch->SuccessorAt(0);
     token = Token::NegateCompareOp(token);
-    ASSERT(block == branch->SuccessorAt(1));
+    DCHECK(block == branch->SuccessorAt(1));
   }
 
   InductionVariableData* data;
@@ -2450,7 +2423,7 @@
     if (operation_ == kMathMax) {
       res->CombinedMax(b);
     } else {
-      ASSERT(operation_ == kMathMin);
+      DCHECK(operation_ == kMathMin);
       res->CombinedMin(b);
     }
     return res;
@@ -2466,22 +2439,17 @@
 }
 
 
-void HPhi::PrintTo(StringStream* stream) {
-  stream->Add("[");
+OStream& HPhi::PrintTo(OStream& os) const {  // NOLINT
+  os << "[";
   for (int i = 0; i < OperandCount(); ++i) {
-    HValue* value = OperandAt(i);
-    stream->Add(" ");
-    value->PrintNameTo(stream);
-    stream->Add(" ");
+    os << " " << NameOf(OperandAt(i)) << " ";
   }
-  stream->Add(" uses:%d_%ds_%di_%dd_%dt",
-              UseCount(),
-              smi_non_phi_uses() + smi_indirect_uses(),
-              int32_non_phi_uses() + int32_indirect_uses(),
-              double_non_phi_uses() + double_indirect_uses(),
-              tagged_non_phi_uses() + tagged_indirect_uses());
-  PrintTypeTo(stream);
-  stream->Add("]");
+  return os << " uses:" << UseCount() << "_"
+            << smi_non_phi_uses() + smi_indirect_uses() << "s_"
+            << int32_non_phi_uses() + int32_indirect_uses() << "i_"
+            << double_non_phi_uses() + double_indirect_uses() << "d_"
+            << tagged_non_phi_uses() + tagged_indirect_uses() << "t"
+            << TypeOf(this) << "]";
 }
 
 
@@ -2515,15 +2483,15 @@
     HValue* current = OperandAt(position++);
     if (current != this && current != candidate) return NULL;
   }
-  ASSERT(candidate != this);
+  DCHECK(candidate != this);
   return candidate;
 }
 
 
 void HPhi::DeleteFromGraph() {
-  ASSERT(block() != NULL);
+  DCHECK(block() != NULL);
   block()->RemovePhi(this);
-  ASSERT(block() == NULL);
+  DCHECK(block() == NULL);
 }
 
 
@@ -2603,27 +2571,28 @@
 }
 
 
-void HSimulate::PrintDataTo(StringStream* stream) {
-  stream->Add("id=%d", ast_id().ToInt());
-  if (pop_count_ > 0) stream->Add(" pop %d", pop_count_);
+OStream& HSimulate::PrintDataTo(OStream& os) const {  // NOLINT
+  os << "id=" << ast_id().ToInt();
+  if (pop_count_ > 0) os << " pop " << pop_count_;
   if (values_.length() > 0) {
-    if (pop_count_ > 0) stream->Add(" /");
+    if (pop_count_ > 0) os << " /";
     for (int i = values_.length() - 1; i >= 0; --i) {
       if (HasAssignedIndexAt(i)) {
-        stream->Add(" var[%d] = ", GetAssignedIndexAt(i));
+        os << " var[" << GetAssignedIndexAt(i) << "] = ";
       } else {
-        stream->Add(" push ");
+        os << " push ";
       }
-      values_[i]->PrintNameTo(stream);
-      if (i > 0) stream->Add(",");
+      os << NameOf(values_[i]);
+      if (i > 0) os << ",";
     }
   }
+  return os;
 }
 
 
 void HSimulate::ReplayEnvironment(HEnvironment* env) {
   if (done_with_replay_) return;
-  ASSERT(env != NULL);
+  DCHECK(env != NULL);
   env->set_ast_id(ast_id());
   env->Drop(pop_count());
   for (int i = values()->length() - 1; i >= 0; --i) {
@@ -2656,7 +2625,7 @@
 // Replay captured objects by replacing all captured objects with the
 // same capture id in the current and all outer environments.
 void HCapturedObject::ReplayEnvironment(HEnvironment* env) {
-  ASSERT(env != NULL);
+  DCHECK(env != NULL);
   while (env != NULL) {
     ReplayEnvironmentNested(env->values(), this);
     env = env->outer();
@@ -2664,28 +2633,28 @@
 }
 
 
-void HCapturedObject::PrintDataTo(StringStream* stream) {
-  stream->Add("#%d ", capture_id());
-  HDematerializedObject::PrintDataTo(stream);
+OStream& HCapturedObject::PrintDataTo(OStream& os) const {  // NOLINT
+  os << "#" << capture_id() << " ";
+  return HDematerializedObject::PrintDataTo(os);
 }
 
 
 void HEnterInlined::RegisterReturnTarget(HBasicBlock* return_target,
                                          Zone* zone) {
-  ASSERT(return_target->IsInlineReturnTarget());
+  DCHECK(return_target->IsInlineReturnTarget());
   return_targets_.Add(return_target, zone);
 }
 
 
-void HEnterInlined::PrintDataTo(StringStream* stream) {
-  SmartArrayPointer<char> name = function()->debug_name()->ToCString();
-  stream->Add("%s, id=%d", name.get(), function()->id().ToInt());
+OStream& HEnterInlined::PrintDataTo(OStream& os) const {  // NOLINT
+  return os << function()->debug_name()->ToCString().get()
+            << ", id=" << function()->id().ToInt();
 }
 
 
 static bool IsInteger32(double value) {
   double roundtrip_value = static_cast<double>(static_cast<int32_t>(value));
-  return BitCast<int64_t>(roundtrip_value) == BitCast<int64_t>(value);
+  return bit_cast<int64_t>(roundtrip_value) == bit_cast<int64_t>(value);
 }
 
 
@@ -2748,8 +2717,8 @@
     boolean_value_(boolean_value),
     is_undetectable_(is_undetectable),
     instance_type_(instance_type) {
-  ASSERT(!object.handle().is_null());
-  ASSERT(!type.IsTaggedNumber() || type.IsNone());
+  DCHECK(!object.handle().is_null());
+  DCHECK(!type.IsTaggedNumber() || type.IsNone());
   Initialize(r);
 }
 
@@ -2846,6 +2815,13 @@
       r = Representation::Tagged();
     }
   }
+  if (r.IsSmi()) {
+    // If we have an existing handle, zap it, because it might be a heap
+    // number which we must not re-use when copying this HConstant to
+    // Tagged representation later, because having Smi representation now
+    // could cause heap object checks not to get emitted.
+    object_ = Unique<Object>(Handle<Object>::null());
+  }
   set_representation(r);
   SetFlag(kUseGVN);
 }
@@ -2865,10 +2841,10 @@
     return false;
   }
 
-  ASSERT(!object_.handle().is_null());
+  DCHECK(!object_.handle().is_null());
   Heap* heap = isolate()->heap();
-  ASSERT(!object_.IsKnownGlobal(heap->minus_zero_value()));
-  ASSERT(!object_.IsKnownGlobal(heap->nan_value()));
+  DCHECK(!object_.IsKnownGlobal(heap->minus_zero_value()));
+  DCHECK(!object_.IsKnownGlobal(heap->nan_value()));
   return
 #define IMMORTAL_IMMOVABLE_ROOT(name) \
       object_.IsKnownGlobal(heap->name()) ||
@@ -2887,13 +2863,13 @@
 
 
 bool HConstant::EmitAtUses() {
-  ASSERT(IsLinked());
+  DCHECK(IsLinked());
   if (block()->graph()->has_osr() &&
       block()->graph()->IsStandardConstant(this)) {
     // TODO(titzer): this seems like a hack that should be fixed by custom OSR.
     return true;
   }
-  if (UseCount() == 0) return true;
+  if (HasNoUses()) return true;
   if (IsCell()) return false;
   if (representation().IsDouble()) return false;
   if (representation().IsExternal()) return false;
@@ -2915,7 +2891,7 @@
   if (has_external_reference_value_) {
     return new(zone) HConstant(external_reference_value_);
   }
-  ASSERT(!object_.handle().is_null());
+  DCHECK(!object_.handle().is_null());
   return new(zone) HConstant(object_,
                              object_map_,
                              has_stable_map_value_,
@@ -2952,7 +2928,7 @@
     res = handle->BooleanValue() ?
       new(zone) HConstant(1) : new(zone) HConstant(0);
   } else if (handle->IsUndefined()) {
-    res = new(zone) HConstant(OS::nan_value());
+    res = new(zone) HConstant(base::OS::nan_value());
   } else if (handle->IsNull()) {
     res = new(zone) HConstant(0);
   }
@@ -2960,41 +2936,35 @@
 }
 
 
-void HConstant::PrintDataTo(StringStream* stream) {
+OStream& HConstant::PrintDataTo(OStream& os) const {  // NOLINT
   if (has_int32_value_) {
-    stream->Add("%d ", int32_value_);
+    os << int32_value_ << " ";
   } else if (has_double_value_) {
-    stream->Add("%f ", FmtElm(double_value_));
+    os << double_value_ << " ";
   } else if (has_external_reference_value_) {
-    stream->Add("%p ", reinterpret_cast<void*>(
-            external_reference_value_.address()));
+    os << reinterpret_cast<void*>(external_reference_value_.address()) << " ";
   } else {
-    handle(Isolate::Current())->ShortPrint(stream);
-    stream->Add(" ");
-    if (HasStableMapValue()) {
-      stream->Add("[stable-map] ");
-    }
-    if (HasObjectMap()) {
-      stream->Add("[map %p] ", *ObjectMap().handle());
-    }
+    // The handle() method is silently and lazily mutating the object.
+    Handle<Object> h = const_cast<HConstant*>(this)->handle(Isolate::Current());
+    os << Brief(*h) << " ";
+    if (HasStableMapValue()) os << "[stable-map] ";
+    if (HasObjectMap()) os << "[map " << *ObjectMap().handle() << "] ";
   }
-  if (!is_not_in_new_space_) {
-    stream->Add("[new space] ");
-  }
+  if (!is_not_in_new_space_) os << "[new space] ";
+  return os;
 }
 
 
-void HBinaryOperation::PrintDataTo(StringStream* stream) {
-  left()->PrintNameTo(stream);
-  stream->Add(" ");
-  right()->PrintNameTo(stream);
-  if (CheckFlag(kCanOverflow)) stream->Add(" !");
-  if (CheckFlag(kBailoutOnMinusZero)) stream->Add(" -0?");
+OStream& HBinaryOperation::PrintDataTo(OStream& os) const {  // NOLINT
+  os << NameOf(left()) << " " << NameOf(right());
+  if (CheckFlag(kCanOverflow)) os << " !";
+  if (CheckFlag(kBailoutOnMinusZero)) os << " -0?";
+  return os;
 }
 
 
 void HBinaryOperation::InferRepresentation(HInferRepresentationPhase* h_infer) {
-  ASSERT(CheckFlag(kFlexibleRepresentation));
+  DCHECK(CheckFlag(kFlexibleRepresentation));
   Representation new_rep = RepresentationFromInputs();
   UpdateRepresentation(new_rep, h_infer, "inputs");
 
@@ -3061,7 +3031,7 @@
 
 
 void HMathMinMax::InferRepresentation(HInferRepresentationPhase* h_infer) {
-  ASSERT(CheckFlag(kFlexibleRepresentation));
+  DCHECK(CheckFlag(kFlexibleRepresentation));
   Representation new_rep = RepresentationFromInputs();
   UpdateRepresentation(new_rep, h_infer, "inputs");
   // Do not care about uses.
@@ -3212,35 +3182,27 @@
 }
 
 
-void HCompareGeneric::PrintDataTo(StringStream* stream) {
-  stream->Add(Token::Name(token()));
-  stream->Add(" ");
-  HBinaryOperation::PrintDataTo(stream);
+OStream& HCompareGeneric::PrintDataTo(OStream& os) const {  // NOLINT
+  os << Token::Name(token()) << " ";
+  return HBinaryOperation::PrintDataTo(os);
 }
 
 
-void HStringCompareAndBranch::PrintDataTo(StringStream* stream) {
-  stream->Add(Token::Name(token()));
-  stream->Add(" ");
-  HControlInstruction::PrintDataTo(stream);
+OStream& HStringCompareAndBranch::PrintDataTo(OStream& os) const {  // NOLINT
+  os << Token::Name(token()) << " ";
+  return HControlInstruction::PrintDataTo(os);
 }
 
 
-void HCompareNumericAndBranch::PrintDataTo(StringStream* stream) {
-  stream->Add(Token::Name(token()));
-  stream->Add(" ");
-  left()->PrintNameTo(stream);
-  stream->Add(" ");
-  right()->PrintNameTo(stream);
-  HControlInstruction::PrintDataTo(stream);
+OStream& HCompareNumericAndBranch::PrintDataTo(OStream& os) const {  // NOLINT
+  os << Token::Name(token()) << " " << NameOf(left()) << " " << NameOf(right());
+  return HControlInstruction::PrintDataTo(os);
 }
 
 
-void HCompareObjectEqAndBranch::PrintDataTo(StringStream* stream) {
-  left()->PrintNameTo(stream);
-  stream->Add(" ");
-  right()->PrintNameTo(stream);
-  HControlInstruction::PrintDataTo(stream);
+OStream& HCompareObjectEqAndBranch::PrintDataTo(OStream& os) const {  // NOLINT
+  os << NameOf(left()) << " " << NameOf(right());
+  return HControlInstruction::PrintDataTo(os);
 }
 
 
@@ -3378,9 +3340,8 @@
 }
 
 
-
-void HGoto::PrintDataTo(StringStream* stream) {
-  stream->Add("B%d", SuccessorAt(0)->block_id());
+OStream& HGoto::PrintDataTo(OStream& os) const {  // NOLINT
+  return os << *SuccessorAt(0);
 }
 
 
@@ -3423,64 +3384,65 @@
 }
 
 
-void HParameter::PrintDataTo(StringStream* stream) {
-  stream->Add("%u", index());
+OStream& HParameter::PrintDataTo(OStream& os) const {  // NOLINT
+  return os << index();
 }
 
 
-void HLoadNamedField::PrintDataTo(StringStream* stream) {
-  object()->PrintNameTo(stream);
-  access_.PrintTo(stream);
+OStream& HLoadNamedField::PrintDataTo(OStream& os) const {  // NOLINT
+  os << NameOf(object()) << access_;
 
   if (maps() != NULL) {
-    stream->Add(" [%p", *maps()->at(0).handle());
+    os << " [" << *maps()->at(0).handle();
     for (int i = 1; i < maps()->size(); ++i) {
-      stream->Add(",%p", *maps()->at(i).handle());
+      os << "," << *maps()->at(i).handle();
     }
-    stream->Add("]");
+    os << "]";
   }
 
-  if (HasDependency()) {
-    stream->Add(" ");
-    dependency()->PrintNameTo(stream);
-  }
+  if (HasDependency()) os << " " << NameOf(dependency());
+  return os;
 }
 
 
-void HLoadNamedGeneric::PrintDataTo(StringStream* stream) {
-  object()->PrintNameTo(stream);
-  stream->Add(".");
-  stream->Add(String::cast(*name())->ToCString().get());
+OStream& HLoadNamedGeneric::PrintDataTo(OStream& os) const {  // NOLINT
+  Handle<String> n = Handle<String>::cast(name());
+  return os << NameOf(object()) << "." << n->ToCString().get();
 }
 
 
-void HLoadKeyed::PrintDataTo(StringStream* stream) {
+OStream& HLoadKeyed::PrintDataTo(OStream& os) const {  // NOLINT
   if (!is_external()) {
-    elements()->PrintNameTo(stream);
+    os << NameOf(elements());
   } else {
-    ASSERT(elements_kind() >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND &&
+    DCHECK(elements_kind() >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND &&
            elements_kind() <= LAST_EXTERNAL_ARRAY_ELEMENTS_KIND);
-    elements()->PrintNameTo(stream);
-    stream->Add(".");
-    stream->Add(ElementsKindToString(elements_kind()));
+    os << NameOf(elements()) << "." << ElementsKindToString(elements_kind());
   }
 
-  stream->Add("[");
-  key()->PrintNameTo(stream);
-  if (IsDehoisted()) {
-    stream->Add(" + %d]", base_offset());
-  } else {
-    stream->Add("]");
-  }
+  os << "[" << NameOf(key());
+  if (IsDehoisted()) os << " + " << base_offset();
+  os << "]";
 
-  if (HasDependency()) {
-    stream->Add(" ");
-    dependency()->PrintNameTo(stream);
-  }
+  if (HasDependency()) os << " " << NameOf(dependency());
+  if (RequiresHoleCheck()) os << " check_hole";
+  return os;
+}
 
-  if (RequiresHoleCheck()) {
-    stream->Add(" check_hole");
-  }
+
+bool HLoadKeyed::TryIncreaseBaseOffset(uint32_t increase_by_value) {
+  // The base offset is usually simply the size of the array header, except
+  // with dehoisting adds an addition offset due to a array index key
+  // manipulation, in which case it becomes (array header size +
+  // constant-offset-from-key * kPointerSize)
+  uint32_t base_offset = BaseOffsetField::decode(bit_field_);
+  v8::base::internal::CheckedNumeric<uint32_t> addition_result = base_offset;
+  addition_result += increase_by_value;
+  if (!addition_result.IsValid()) return false;
+  base_offset = addition_result.ValueOrDie();
+  if (!BaseOffsetField::is_valid(base_offset)) return false;
+  bit_field_ = BaseOffsetField::update(bit_field_, base_offset);
+  return true;
 }
 
 
@@ -3537,11 +3499,8 @@
 }
 
 
-void HLoadKeyedGeneric::PrintDataTo(StringStream* stream) {
-  object()->PrintNameTo(stream);
-  stream->Add("[");
-  key()->PrintNameTo(stream);
-  stream->Add("]");
+OStream& HLoadKeyedGeneric::PrintDataTo(OStream& os) const {  // NOLINT
+  return os << NameOf(object()) << "[" << NameOf(key()) << "]";
 }
 
 
@@ -3582,84 +3541,65 @@
 }
 
 
-void HStoreNamedGeneric::PrintDataTo(StringStream* stream) {
-  object()->PrintNameTo(stream);
-  stream->Add(".");
-  ASSERT(name()->IsString());
-  stream->Add(String::cast(*name())->ToCString().get());
-  stream->Add(" = ");
-  value()->PrintNameTo(stream);
+OStream& HStoreNamedGeneric::PrintDataTo(OStream& os) const {  // NOLINT
+  Handle<String> n = Handle<String>::cast(name());
+  return os << NameOf(object()) << "." << n->ToCString().get() << " = "
+            << NameOf(value());
 }
 
 
-void HStoreNamedField::PrintDataTo(StringStream* stream) {
-  object()->PrintNameTo(stream);
-  access_.PrintTo(stream);
-  stream->Add(" = ");
-  value()->PrintNameTo(stream);
-  if (NeedsWriteBarrier()) {
-    stream->Add(" (write-barrier)");
-  }
-  if (has_transition()) {
-    stream->Add(" (transition map %p)", *transition_map());
-  }
+OStream& HStoreNamedField::PrintDataTo(OStream& os) const {  // NOLINT
+  os << NameOf(object()) << access_ << " = " << NameOf(value());
+  if (NeedsWriteBarrier()) os << " (write-barrier)";
+  if (has_transition()) os << " (transition map " << *transition_map() << ")";
+  return os;
 }
 
 
-void HStoreKeyed::PrintDataTo(StringStream* stream) {
+OStream& HStoreKeyed::PrintDataTo(OStream& os) const {  // NOLINT
   if (!is_external()) {
-    elements()->PrintNameTo(stream);
+    os << NameOf(elements());
   } else {
-    elements()->PrintNameTo(stream);
-    stream->Add(".");
-    stream->Add(ElementsKindToString(elements_kind()));
-    ASSERT(elements_kind() >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND &&
+    DCHECK(elements_kind() >= FIRST_EXTERNAL_ARRAY_ELEMENTS_KIND &&
            elements_kind() <= LAST_EXTERNAL_ARRAY_ELEMENTS_KIND);
+    os << NameOf(elements()) << "." << ElementsKindToString(elements_kind());
   }
 
-  stream->Add("[");
-  key()->PrintNameTo(stream);
-  if (IsDehoisted()) {
-    stream->Add(" + %d] = ", base_offset());
-  } else {
-    stream->Add("] = ");
-  }
-
-  value()->PrintNameTo(stream);
+  os << "[" << NameOf(key());
+  if (IsDehoisted()) os << " + " << base_offset();
+  return os << "] = " << NameOf(value());
 }
 
 
-void HStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
-  object()->PrintNameTo(stream);
-  stream->Add("[");
-  key()->PrintNameTo(stream);
-  stream->Add("] = ");
-  value()->PrintNameTo(stream);
+OStream& HStoreKeyedGeneric::PrintDataTo(OStream& os) const {  // NOLINT
+  return os << NameOf(object()) << "[" << NameOf(key())
+            << "] = " << NameOf(value());
 }
 
 
-void HTransitionElementsKind::PrintDataTo(StringStream* stream) {
-  object()->PrintNameTo(stream);
+OStream& HTransitionElementsKind::PrintDataTo(OStream& os) const {  // NOLINT
+  os << NameOf(object());
   ElementsKind from_kind = original_map().handle()->elements_kind();
   ElementsKind to_kind = transitioned_map().handle()->elements_kind();
-  stream->Add(" %p [%s] -> %p [%s]",
-              *original_map().handle(),
-              ElementsAccessor::ForKind(from_kind)->name(),
-              *transitioned_map().handle(),
-              ElementsAccessor::ForKind(to_kind)->name());
-  if (IsSimpleMapChangeTransition(from_kind, to_kind)) stream->Add(" (simple)");
+  os << " " << *original_map().handle() << " ["
+     << ElementsAccessor::ForKind(from_kind)->name() << "] -> "
+     << *transitioned_map().handle() << " ["
+     << ElementsAccessor::ForKind(to_kind)->name() << "]";
+  if (IsSimpleMapChangeTransition(from_kind, to_kind)) os << " (simple)";
+  return os;
 }
 
 
-void HLoadGlobalCell::PrintDataTo(StringStream* stream) {
-  stream->Add("[%p]", *cell().handle());
-  if (!details_.IsDontDelete()) stream->Add(" (deleteable)");
-  if (details_.IsReadOnly()) stream->Add(" (read-only)");
+OStream& HLoadGlobalCell::PrintDataTo(OStream& os) const {  // NOLINT
+  os << "[" << *cell().handle() << "]";
+  if (details_.IsConfigurable()) os << " (configurable)";
+  if (details_.IsReadOnly()) os << " (read-only)";
+  return os;
 }
 
 
 bool HLoadGlobalCell::RequiresHoleCheck() const {
-  if (details_.IsDontDelete() && !details_.IsReadOnly()) return false;
+  if (!details_.IsConfigurable()) return false;
   for (HUseIterator it(uses()); !it.Done(); it.Advance()) {
     HValue* use = it.value();
     if (!use->IsChange()) return true;
@@ -3668,36 +3608,33 @@
 }
 
 
-void HLoadGlobalGeneric::PrintDataTo(StringStream* stream) {
-  stream->Add("%o ", *name());
+OStream& HLoadGlobalGeneric::PrintDataTo(OStream& os) const {  // NOLINT
+  return os << name()->ToCString().get() << " ";
 }
 
 
-void HInnerAllocatedObject::PrintDataTo(StringStream* stream) {
-  base_object()->PrintNameTo(stream);
-  stream->Add(" offset ");
-  offset()->PrintTo(stream);
+OStream& HInnerAllocatedObject::PrintDataTo(OStream& os) const {  // NOLINT
+  os << NameOf(base_object()) << " offset ";
+  return offset()->PrintTo(os);
 }
 
 
-void HStoreGlobalCell::PrintDataTo(StringStream* stream) {
-  stream->Add("[%p] = ", *cell().handle());
-  value()->PrintNameTo(stream);
-  if (!details_.IsDontDelete()) stream->Add(" (deleteable)");
-  if (details_.IsReadOnly()) stream->Add(" (read-only)");
+OStream& HStoreGlobalCell::PrintDataTo(OStream& os) const {  // NOLINT
+  os << "[" << *cell().handle() << "] = " << NameOf(value());
+  if (details_.IsConfigurable()) os << " (configurable)";
+  if (details_.IsReadOnly()) os << " (read-only)";
+  return os;
 }
 
 
-void HLoadContextSlot::PrintDataTo(StringStream* stream) {
-  value()->PrintNameTo(stream);
-  stream->Add("[%d]", slot_index());
+OStream& HLoadContextSlot::PrintDataTo(OStream& os) const {  // NOLINT
+  return os << NameOf(value()) << "[" << slot_index() << "]";
 }
 
 
-void HStoreContextSlot::PrintDataTo(StringStream* stream) {
-  context()->PrintNameTo(stream);
-  stream->Add("[%d] = ", slot_index());
-  value()->PrintNameTo(stream);
+OStream& HStoreContextSlot::PrintDataTo(OStream& os) const {  // NOLINT
+  return os << NameOf(context()) << "[" << slot_index()
+            << "] = " << NameOf(value());
 }
 
 
@@ -3746,7 +3683,7 @@
 
 bool HAllocate::HandleSideEffectDominator(GVNFlag side_effect,
                                           HValue* dominator) {
-  ASSERT(side_effect == kNewSpacePromotion);
+  DCHECK(side_effect == kNewSpacePromotion);
   Zone* zone = block()->zone();
   if (!FLAG_use_allocation_folding) return false;
 
@@ -3799,7 +3736,7 @@
   if (!current_size->IsInteger32Constant()) {
     // If it's not constant then it is a size_in_bytes calculation graph
     // like this: (const_header_size + const_element_size * size).
-    ASSERT(current_size->IsInstruction());
+    DCHECK(current_size->IsInstruction());
 
     HInstruction* current_instr = HInstruction::cast(current_size);
     if (!current_instr->Dominates(dominator_allocate)) {
@@ -3813,7 +3750,7 @@
     }
   }
 
-  ASSERT((IsNewSpaceAllocation() &&
+  DCHECK((IsNewSpaceAllocation() &&
          dominator_allocate->IsNewSpaceAllocation()) ||
          (IsOldDataSpaceAllocation() &&
          dominator_allocate->IsOldDataSpaceAllocation()) ||
@@ -3961,7 +3898,7 @@
       return NULL;
     }
 
-    ASSERT((IsOldDataSpaceAllocation() &&
+    DCHECK((IsOldDataSpaceAllocation() &&
            dominator_dominator->IsOldDataSpaceAllocation()) ||
            (IsOldPointerSpaceAllocation() &&
            dominator_dominator->IsOldPointerSpaceAllocation()));
@@ -3987,7 +3924,7 @@
 
 
 void HAllocate::UpdateFreeSpaceFiller(int32_t free_space_size) {
-  ASSERT(filler_free_space_size_ != NULL);
+  DCHECK(filler_free_space_size_ != NULL);
   Zone* zone = block()->zone();
   // We must explicitly force Smi representation here because on x64 we
   // would otherwise automatically choose int32, but the actual store
@@ -4004,7 +3941,7 @@
 
 
 void HAllocate::CreateFreeSpaceFiller(int32_t free_space_size) {
-  ASSERT(filler_free_space_size_ == NULL);
+  DCHECK(filler_free_space_size_ == NULL);
   Zone* zone = block()->zone();
   HInstruction* free_space_instr =
       HInnerAllocatedObject::New(zone, context(), dominating_allocate_,
@@ -4050,15 +3987,27 @@
 }
 
 
-void HAllocate::PrintDataTo(StringStream* stream) {
-  size()->PrintNameTo(stream);
-  stream->Add(" (");
-  if (IsNewSpaceAllocation()) stream->Add("N");
-  if (IsOldPointerSpaceAllocation()) stream->Add("P");
-  if (IsOldDataSpaceAllocation()) stream->Add("D");
-  if (MustAllocateDoubleAligned()) stream->Add("A");
-  if (MustPrefillWithFiller()) stream->Add("F");
-  stream->Add(")");
+OStream& HAllocate::PrintDataTo(OStream& os) const {  // NOLINT
+  os << NameOf(size()) << " (";
+  if (IsNewSpaceAllocation()) os << "N";
+  if (IsOldPointerSpaceAllocation()) os << "P";
+  if (IsOldDataSpaceAllocation()) os << "D";
+  if (MustAllocateDoubleAligned()) os << "A";
+  if (MustPrefillWithFiller()) os << "F";
+  return os << ")";
+}
+
+
+bool HStoreKeyed::TryIncreaseBaseOffset(uint32_t increase_by_value) {
+  // The base offset is usually simply the size of the array header, except
+  // with dehoisting adds an addition offset due to a array index key
+  // manipulation, in which case it becomes (array header size +
+  // constant-offset-from-key * kPointerSize)
+  v8::base::internal::CheckedNumeric<uint32_t> addition_result = base_offset_;
+  addition_result += increase_by_value;
+  if (!addition_result.IsValid()) return false;
+  base_offset_ = addition_result.ValueOrDie();
+  return true;
 }
 
 
@@ -4136,10 +4085,9 @@
       Handle<String> right_string = c_right->StringValue();
       // Prevent possible exception by invalid string length.
       if (left_string->length() + right_string->length() < String::kMaxLength) {
-        Handle<String> concat = zone->isolate()->factory()->NewFlatConcatString(
+        MaybeHandle<String> concat = zone->isolate()->factory()->NewConsString(
             c_left->StringValue(), c_right->StringValue());
-        ASSERT(!concat.is_null());
-        return HConstant::New(zone, context, concat);
+        return HConstant::New(zone, context, concat.ToHandleChecked());
       }
     }
   }
@@ -4148,19 +4096,21 @@
 }
 
 
-void HStringAdd::PrintDataTo(StringStream* stream) {
+OStream& HStringAdd::PrintDataTo(OStream& os) const {  // NOLINT
   if ((flags() & STRING_ADD_CHECK_BOTH) == STRING_ADD_CHECK_BOTH) {
-    stream->Add("_CheckBoth");
+    os << "_CheckBoth";
   } else if ((flags() & STRING_ADD_CHECK_BOTH) == STRING_ADD_CHECK_LEFT) {
-    stream->Add("_CheckLeft");
+    os << "_CheckLeft";
   } else if ((flags() & STRING_ADD_CHECK_BOTH) == STRING_ADD_CHECK_RIGHT) {
-    stream->Add("_CheckRight");
+    os << "_CheckRight";
   }
-  HBinaryOperation::PrintDataTo(stream);
-  stream->Add(" (");
-  if (pretenure_flag() == NOT_TENURED) stream->Add("N");
-  else if (pretenure_flag() == TENURED) stream->Add("D");
-  stream->Add(")");
+  HBinaryOperation::PrintDataTo(os);
+  os << " (";
+  if (pretenure_flag() == NOT_TENURED)
+    os << "N";
+  else if (pretenure_flag() == TENURED)
+    os << "D";
+  return os << ")";
 }
 
 
@@ -4191,7 +4141,7 @@
     if (!constant->HasNumberValue()) break;
     double d = constant->DoubleValue();
     if (std::isnan(d)) {  // NaN poisons everything.
-      return H_CONSTANT_DOUBLE(OS::nan_value());
+      return H_CONSTANT_DOUBLE(base::OS::nan_value());
     }
     if (std::isinf(d)) {  // +Infinity and -Infinity.
       switch (op) {
@@ -4199,11 +4149,12 @@
           return H_CONSTANT_DOUBLE((d > 0.0) ? d : 0.0);
         case kMathLog:
         case kMathSqrt:
-          return H_CONSTANT_DOUBLE((d > 0.0) ? d : OS::nan_value());
+          return H_CONSTANT_DOUBLE((d > 0.0) ? d : base::OS::nan_value());
         case kMathPowHalf:
         case kMathAbs:
           return H_CONSTANT_DOUBLE((d > 0.0) ? d : -d);
         case kMathRound:
+        case kMathFround:
         case kMathFloor:
           return H_CONSTANT_DOUBLE(d);
         case kMathClz32:
@@ -4230,13 +4181,14 @@
         // Doubles are represented as Significant * 2 ^ Exponent. If the
         // Exponent is not negative, the double value is already an integer.
         if (Double(d).Exponent() >= 0) return H_CONSTANT_DOUBLE(d);
-        return H_CONSTANT_DOUBLE(std::floor(d + 0.5));
+        return H_CONSTANT_DOUBLE(Floor(d + 0.5));
+      case kMathFround:
+        return H_CONSTANT_DOUBLE(static_cast<double>(static_cast<float>(d)));
       case kMathFloor:
-        return H_CONSTANT_DOUBLE(std::floor(d));
+        return H_CONSTANT_DOUBLE(Floor(d));
       case kMathClz32: {
         uint32_t i = DoubleToUint32(d);
-        return H_CONSTANT_INT(
-            (i == 0) ? 32 : CompilerIntrinsics::CountLeadingZeros(i));
+        return H_CONSTANT_INT(base::bits::CountLeadingZeros32(i));
       }
       default:
         UNREACHABLE();
@@ -4294,7 +4246,8 @@
     if (c_left->HasNumberValue() && c_right->HasNumberValue()) {
       double result = power_helper(c_left->DoubleValue(),
                                    c_right->DoubleValue());
-      return H_CONSTANT_DOUBLE(std::isnan(result) ?  OS::nan_value() : result);
+      return H_CONSTANT_DOUBLE(std::isnan(result) ? base::OS::nan_value()
+                                                  : result);
     }
   }
   return new(zone) HPower(left, right);
@@ -4327,7 +4280,7 @@
         }
       }
       // All comparisons failed, must be NaN.
-      return H_CONSTANT_DOUBLE(OS::nan_value());
+      return H_CONSTANT_DOUBLE(base::OS::nan_value());
     }
   }
   return new(zone) HMathMinMax(context, left, right, op);
@@ -4465,8 +4418,8 @@
     if (c_string->HasStringValue() && c_index->HasInteger32Value()) {
       Handle<String> s = c_string->StringValue();
       int32_t i = c_index->Integer32Value();
-      ASSERT_LE(0, i);
-      ASSERT_LT(i, s->length());
+      DCHECK_LE(0, i);
+      DCHECK_LT(i, s->length());
       return H_CONSTANT_INT(s->Get(i));
     }
   }
@@ -4478,10 +4431,9 @@
 #undef H_CONSTANT_DOUBLE
 
 
-void HBitwise::PrintDataTo(StringStream* stream) {
-  stream->Add(Token::Name(op_));
-  stream->Add(" ");
-  HBitwiseBinaryOperation::PrintDataTo(stream);
+OStream& HBitwise::PrintDataTo(OStream& os) const {  // NOLINT
+  os << Token::Name(op_) << " ";
+  return HBitwiseBinaryOperation::PrintDataTo(os);
 }
 
 
@@ -4522,7 +4474,7 @@
 
 
 void HPhi::InferRepresentation(HInferRepresentationPhase* h_infer) {
-  ASSERT(CheckFlag(kFlexibleRepresentation));
+  DCHECK(CheckFlag(kFlexibleRepresentation));
   Representation new_rep = RepresentationFromInputs();
   UpdateRepresentation(new_rep, h_infer, "inputs");
   new_rep = RepresentationFromUses();
@@ -4586,12 +4538,12 @@
 #ifdef DEBUG
 
 void HPhi::Verify() {
-  ASSERT(OperandCount() == block()->predecessors()->length());
+  DCHECK(OperandCount() == block()->predecessors()->length());
   for (int i = 0; i < OperandCount(); ++i) {
     HValue* value = OperandAt(i);
     HBasicBlock* defining_block = value->block();
     HBasicBlock* predecessor_block = block()->predecessors()->at(i);
-    ASSERT(defining_block == predecessor_block ||
+    DCHECK(defining_block == predecessor_block ||
            defining_block->Dominates(predecessor_block));
   }
 }
@@ -4599,27 +4551,27 @@
 
 void HSimulate::Verify() {
   HInstruction::Verify();
-  ASSERT(HasAstId() || next()->IsEnterInlined());
+  DCHECK(HasAstId() || next()->IsEnterInlined());
 }
 
 
 void HCheckHeapObject::Verify() {
   HInstruction::Verify();
-  ASSERT(HasNoUses());
+  DCHECK(HasNoUses());
 }
 
 
 void HCheckValue::Verify() {
   HInstruction::Verify();
-  ASSERT(HasNoUses());
+  DCHECK(HasNoUses());
 }
 
 #endif
 
 
 HObjectAccess HObjectAccess::ForFixedArrayHeader(int offset) {
-  ASSERT(offset >= 0);
-  ASSERT(offset < FixedArray::kHeaderSize);
+  DCHECK(offset >= 0);
+  DCHECK(offset < FixedArray::kHeaderSize);
   if (offset == FixedArray::kLengthOffset) return ForFixedArrayLength();
   return HObjectAccess(kInobject, offset);
 }
@@ -4627,7 +4579,7 @@
 
 HObjectAccess HObjectAccess::ForMapAndOffset(Handle<Map> map, int offset,
     Representation representation) {
-  ASSERT(offset >= 0);
+  DCHECK(offset >= 0);
   Portion portion = kInobject;
 
   if (offset == JSObject::kElementsOffset) {
@@ -4667,16 +4619,16 @@
 
 
 HObjectAccess HObjectAccess::ForContextSlot(int index) {
-  ASSERT(index >= 0);
+  DCHECK(index >= 0);
   Portion portion = kInobject;
   int offset = Context::kHeaderSize + index * kPointerSize;
-  ASSERT_EQ(offset, Context::SlotOffset(index) + kHeapObjectTag);
+  DCHECK_EQ(offset, Context::SlotOffset(index) + kHeapObjectTag);
   return HObjectAccess(portion, offset, Representation::Tagged());
 }
 
 
 HObjectAccess HObjectAccess::ForJSArrayOffset(int offset) {
-  ASSERT(offset >= 0);
+  DCHECK(offset >= 0);
   Portion portion = kInobject;
 
   if (offset == JSObject::kElementsOffset) {
@@ -4692,30 +4644,15 @@
 
 HObjectAccess HObjectAccess::ForBackingStoreOffset(int offset,
     Representation representation) {
-  ASSERT(offset >= 0);
+  DCHECK(offset >= 0);
   return HObjectAccess(kBackingStore, offset, representation,
                        Handle<String>::null(), false, false);
 }
 
 
-HObjectAccess HObjectAccess::ForField(Handle<Map> map,
-                                      LookupResult* lookup,
+HObjectAccess HObjectAccess::ForField(Handle<Map> map, int index,
+                                      Representation representation,
                                       Handle<String> name) {
-  ASSERT(lookup->IsField() || lookup->IsTransitionToField());
-  int index;
-  Representation representation;
-  if (lookup->IsField()) {
-    index = lookup->GetLocalFieldIndexFromMap(*map);
-    representation = lookup->representation();
-  } else {
-    Map* transition = lookup->GetTransitionTarget();
-    int descriptor = transition->LastAdded();
-    index = transition->instance_descriptors()->GetFieldIndex(descriptor) -
-        map->inobject_properties();
-    PropertyDetails details =
-        transition->instance_descriptors()->GetDetails(descriptor);
-    representation = details.representation();
-  }
   if (index < 0) {
     // Negative property indices are in-object properties, indexed
     // from the end of the fixed part of the object.
@@ -4731,9 +4668,8 @@
 
 
 HObjectAccess HObjectAccess::ForCellPayload(Isolate* isolate) {
-  return HObjectAccess(
-      kInobject, Cell::kValueOffset, Representation::Tagged(),
-      Handle<String>(isolate->heap()->cell_value_string()));
+  return HObjectAccess(kInobject, Cell::kValueOffset, Representation::Tagged(),
+                       isolate->factory()->cell_value_string());
 }
 
 
@@ -4810,39 +4746,39 @@
 }
 
 
-void HObjectAccess::PrintTo(StringStream* stream) const {
-  stream->Add(".");
+OStream& operator<<(OStream& os, const HObjectAccess& access) {
+  os << ".";
 
-  switch (portion()) {
-    case kArrayLengths:
-    case kStringLengths:
-      stream->Add("%length");
+  switch (access.portion()) {
+    case HObjectAccess::kArrayLengths:
+    case HObjectAccess::kStringLengths:
+      os << "%length";
       break;
-    case kElementsPointer:
-      stream->Add("%elements");
+    case HObjectAccess::kElementsPointer:
+      os << "%elements";
       break;
-    case kMaps:
-      stream->Add("%map");
+    case HObjectAccess::kMaps:
+      os << "%map";
       break;
-    case kDouble:  // fall through
-    case kInobject:
-      if (!name_.is_null()) {
-        stream->Add(String::cast(*name_)->ToCString().get());
+    case HObjectAccess::kDouble:  // fall through
+    case HObjectAccess::kInobject:
+      if (!access.name().is_null()) {
+        os << Handle<String>::cast(access.name())->ToCString().get();
       }
-      stream->Add("[in-object]");
+      os << "[in-object]";
       break;
-    case kBackingStore:
-      if (!name_.is_null()) {
-        stream->Add(String::cast(*name_)->ToCString().get());
+    case HObjectAccess::kBackingStore:
+      if (!access.name().is_null()) {
+        os << Handle<String>::cast(access.name())->ToCString().get();
       }
-      stream->Add("[backing-store]");
+      os << "[backing-store]";
       break;
-    case kExternalMemory:
-      stream->Add("[external-memory]");
+    case HObjectAccess::kExternalMemory:
+      os << "[external-memory]";
       break;
   }
 
-  stream->Add("@%d", offset());
+  return os << "@" << access.offset();
 }
 
 } }  // namespace v8::internal
diff --git a/src/hydrogen-instructions.h b/src/hydrogen-instructions.h
index 47ce499..695c629 100644
--- a/src/hydrogen-instructions.h
+++ b/src/hydrogen-instructions.h
@@ -8,13 +8,14 @@
 #include "src/v8.h"
 
 #include "src/allocation.h"
+#include "src/base/bits.h"
 #include "src/code-stubs.h"
 #include "src/conversions.h"
 #include "src/data-flow.h"
 #include "src/deoptimizer.h"
+#include "src/feedback-slots.h"
 #include "src/hydrogen-types.h"
 #include "src/small-pointer-list.h"
-#include "src/string-stream.h"
 #include "src/unique.h"
 #include "src/utils.h"
 #include "src/zone.h"
@@ -23,6 +24,7 @@
 namespace internal {
 
 // Forward declarations.
+struct ChangesOf;
 class HBasicBlock;
 class HDiv;
 class HEnvironment;
@@ -33,134 +35,136 @@
 class HValue;
 class LInstruction;
 class LChunkBuilder;
+class OStream;
 
-#define HYDROGEN_ABSTRACT_INSTRUCTION_LIST(V)  \
-  V(ArithmeticBinaryOperation)                 \
-  V(BinaryOperation)                           \
-  V(BitwiseBinaryOperation)                    \
-  V(ControlInstruction)                        \
-  V(Instruction)                               \
+#define HYDROGEN_ABSTRACT_INSTRUCTION_LIST(V) \
+  V(ArithmeticBinaryOperation)                \
+  V(BinaryOperation)                          \
+  V(BitwiseBinaryOperation)                   \
+  V(ControlInstruction)                       \
+  V(Instruction)
 
 
-#define HYDROGEN_CONCRETE_INSTRUCTION_LIST(V)  \
-  V(AbnormalExit)                              \
-  V(AccessArgumentsAt)                         \
-  V(Add)                                       \
-  V(AllocateBlockContext)                      \
-  V(Allocate)                                  \
-  V(ApplyArguments)                            \
-  V(ArgumentsElements)                         \
-  V(ArgumentsLength)                           \
-  V(ArgumentsObject)                           \
-  V(Bitwise)                                   \
-  V(BlockEntry)                                \
-  V(BoundsCheck)                               \
-  V(BoundsCheckBaseIndexInformation)           \
-  V(Branch)                                    \
-  V(CallWithDescriptor)                        \
-  V(CallJSFunction)                            \
-  V(CallFunction)                              \
-  V(CallNew)                                   \
-  V(CallNewArray)                              \
-  V(CallRuntime)                               \
-  V(CallStub)                                  \
-  V(CapturedObject)                            \
-  V(Change)                                    \
-  V(CheckHeapObject)                           \
-  V(CheckInstanceType)                         \
-  V(CheckMaps)                                 \
-  V(CheckMapValue)                             \
-  V(CheckSmi)                                  \
-  V(CheckValue)                                \
-  V(ClampToUint8)                              \
-  V(ClassOfTestAndBranch)                      \
-  V(CompareNumericAndBranch)                   \
-  V(CompareHoleAndBranch)                      \
-  V(CompareGeneric)                            \
-  V(CompareMinusZeroAndBranch)                 \
-  V(CompareObjectEqAndBranch)                  \
-  V(CompareMap)                                \
-  V(Constant)                                  \
-  V(ConstructDouble)                           \
-  V(Context)                                   \
-  V(DateField)                                 \
-  V(DebugBreak)                                \
-  V(DeclareGlobals)                            \
-  V(Deoptimize)                                \
-  V(Div)                                       \
-  V(DoubleBits)                                \
-  V(DummyUse)                                  \
-  V(EnterInlined)                              \
-  V(EnvironmentMarker)                         \
-  V(ForceRepresentation)                       \
-  V(ForInCacheArray)                           \
-  V(ForInPrepareMap)                           \
-  V(FunctionLiteral)                           \
-  V(GetCachedArrayIndex)                       \
-  V(Goto)                                      \
-  V(HasCachedArrayIndexAndBranch)              \
-  V(HasInstanceTypeAndBranch)                  \
-  V(InnerAllocatedObject)                      \
-  V(InstanceOf)                                \
-  V(InstanceOfKnownGlobal)                     \
-  V(InvokeFunction)                            \
-  V(IsConstructCallAndBranch)                  \
-  V(IsObjectAndBranch)                         \
-  V(IsStringAndBranch)                         \
-  V(IsSmiAndBranch)                            \
-  V(IsUndetectableAndBranch)                   \
-  V(LeaveInlined)                              \
-  V(LoadContextSlot)                           \
-  V(LoadFieldByIndex)                          \
-  V(LoadFunctionPrototype)                     \
-  V(LoadGlobalCell)                            \
-  V(LoadGlobalGeneric)                         \
-  V(LoadKeyed)                                 \
-  V(LoadKeyedGeneric)                          \
-  V(LoadNamedField)                            \
-  V(LoadNamedGeneric)                          \
-  V(LoadRoot)                                  \
-  V(MapEnumLength)                             \
-  V(MathFloorOfDiv)                            \
-  V(MathMinMax)                                \
-  V(Mod)                                       \
-  V(Mul)                                       \
-  V(OsrEntry)                                  \
-  V(Parameter)                                 \
-  V(Power)                                     \
-  V(PushArguments)                             \
-  V(RegExpLiteral)                             \
-  V(Return)                                    \
-  V(Ror)                                       \
-  V(Sar)                                       \
-  V(SeqStringGetChar)                          \
-  V(SeqStringSetChar)                          \
-  V(Shl)                                       \
-  V(Shr)                                       \
-  V(Simulate)                                  \
-  V(StackCheck)                                \
-  V(StoreCodeEntry)                            \
-  V(StoreContextSlot)                          \
-  V(StoreFrameContext)                         \
-  V(StoreGlobalCell)                           \
-  V(StoreKeyed)                                \
-  V(StoreKeyedGeneric)                         \
-  V(StoreNamedField)                           \
-  V(StoreNamedGeneric)                         \
-  V(StringAdd)                                 \
-  V(StringCharCodeAt)                          \
-  V(StringCharFromCode)                        \
-  V(StringCompareAndBranch)                    \
-  V(Sub)                                       \
-  V(ThisFunction)                              \
-  V(ToFastProperties)                          \
-  V(TransitionElementsKind)                    \
-  V(TrapAllocationMemento)                     \
-  V(Typeof)                                    \
-  V(TypeofIsAndBranch)                         \
-  V(UnaryMathOperation)                        \
-  V(UnknownOSRValue)                           \
-  V(UseConst)                                  \
+#define HYDROGEN_CONCRETE_INSTRUCTION_LIST(V) \
+  V(AbnormalExit)                             \
+  V(AccessArgumentsAt)                        \
+  V(Add)                                      \
+  V(AllocateBlockContext)                     \
+  V(Allocate)                                 \
+  V(ApplyArguments)                           \
+  V(ArgumentsElements)                        \
+  V(ArgumentsLength)                          \
+  V(ArgumentsObject)                          \
+  V(Bitwise)                                  \
+  V(BlockEntry)                               \
+  V(BoundsCheck)                              \
+  V(BoundsCheckBaseIndexInformation)          \
+  V(Branch)                                   \
+  V(CallWithDescriptor)                       \
+  V(CallJSFunction)                           \
+  V(CallFunction)                             \
+  V(CallNew)                                  \
+  V(CallNewArray)                             \
+  V(CallRuntime)                              \
+  V(CallStub)                                 \
+  V(CapturedObject)                           \
+  V(Change)                                   \
+  V(CheckHeapObject)                          \
+  V(CheckInstanceType)                        \
+  V(CheckMaps)                                \
+  V(CheckMapValue)                            \
+  V(CheckSmi)                                 \
+  V(CheckValue)                               \
+  V(ClampToUint8)                             \
+  V(ClassOfTestAndBranch)                     \
+  V(CompareNumericAndBranch)                  \
+  V(CompareHoleAndBranch)                     \
+  V(CompareGeneric)                           \
+  V(CompareMinusZeroAndBranch)                \
+  V(CompareObjectEqAndBranch)                 \
+  V(CompareMap)                               \
+  V(Constant)                                 \
+  V(ConstructDouble)                          \
+  V(Context)                                  \
+  V(DateField)                                \
+  V(DebugBreak)                               \
+  V(DeclareGlobals)                           \
+  V(Deoptimize)                               \
+  V(Div)                                      \
+  V(DoubleBits)                               \
+  V(DummyUse)                                 \
+  V(EnterInlined)                             \
+  V(EnvironmentMarker)                        \
+  V(ForceRepresentation)                      \
+  V(ForInCacheArray)                          \
+  V(ForInPrepareMap)                          \
+  V(FunctionLiteral)                          \
+  V(GetCachedArrayIndex)                      \
+  V(Goto)                                     \
+  V(HasCachedArrayIndexAndBranch)             \
+  V(HasInstanceTypeAndBranch)                 \
+  V(InnerAllocatedObject)                     \
+  V(InstanceOf)                               \
+  V(InstanceOfKnownGlobal)                    \
+  V(InvokeFunction)                           \
+  V(IsConstructCallAndBranch)                 \
+  V(IsObjectAndBranch)                        \
+  V(IsStringAndBranch)                        \
+  V(IsSmiAndBranch)                           \
+  V(IsUndetectableAndBranch)                  \
+  V(LeaveInlined)                             \
+  V(LoadContextSlot)                          \
+  V(LoadFieldByIndex)                         \
+  V(LoadFunctionPrototype)                    \
+  V(LoadGlobalCell)                           \
+  V(LoadGlobalGeneric)                        \
+  V(LoadKeyed)                                \
+  V(LoadKeyedGeneric)                         \
+  V(LoadNamedField)                           \
+  V(LoadNamedGeneric)                         \
+  V(LoadRoot)                                 \
+  V(MapEnumLength)                            \
+  V(MathFloorOfDiv)                           \
+  V(MathMinMax)                               \
+  V(Mod)                                      \
+  V(Mul)                                      \
+  V(OsrEntry)                                 \
+  V(Parameter)                                \
+  V(Power)                                    \
+  V(PushArguments)                            \
+  V(RegExpLiteral)                            \
+  V(Return)                                   \
+  V(Ror)                                      \
+  V(Sar)                                      \
+  V(SeqStringGetChar)                         \
+  V(SeqStringSetChar)                         \
+  V(Shl)                                      \
+  V(Shr)                                      \
+  V(Simulate)                                 \
+  V(StackCheck)                               \
+  V(StoreCodeEntry)                           \
+  V(StoreContextSlot)                         \
+  V(StoreFrameContext)                        \
+  V(StoreGlobalCell)                          \
+  V(StoreKeyed)                               \
+  V(StoreKeyedGeneric)                        \
+  V(StoreNamedField)                          \
+  V(StoreNamedGeneric)                        \
+  V(StringAdd)                                \
+  V(StringCharCodeAt)                         \
+  V(StringCharFromCode)                       \
+  V(StringCompareAndBranch)                   \
+  V(Sub)                                      \
+  V(TailCallThroughMegamorphicCache)          \
+  V(ThisFunction)                             \
+  V(ToFastProperties)                         \
+  V(TransitionElementsKind)                   \
+  V(TrapAllocationMemento)                    \
+  V(Typeof)                                   \
+  V(TypeofIsAndBranch)                        \
+  V(UnaryMathOperation)                       \
+  V(UnknownOSRValue)                          \
+  V(UseConst)                                 \
   V(WrapReceiver)
 
 #define GVN_TRACKED_FLAG_LIST(V)               \
@@ -187,21 +191,21 @@
 
 
 #define DECLARE_ABSTRACT_INSTRUCTION(type)                              \
-  virtual bool Is##type() const V8_FINAL V8_OVERRIDE { return true; }   \
+  virtual bool Is##type() const FINAL OVERRIDE { return true; }   \
   static H##type* cast(HValue* value) {                                 \
-    ASSERT(value->Is##type());                                          \
+    DCHECK(value->Is##type());                                          \
     return reinterpret_cast<H##type*>(value);                           \
   }
 
 
 #define DECLARE_CONCRETE_INSTRUCTION(type)              \
   virtual LInstruction* CompileToLithium(               \
-     LChunkBuilder* builder) V8_FINAL V8_OVERRIDE;      \
+     LChunkBuilder* builder) FINAL OVERRIDE;      \
   static H##type* cast(HValue* value) {                 \
-    ASSERT(value->Is##type());                          \
+    DCHECK(value->Is##type());                          \
     return reinterpret_cast<H##type*>(value);           \
   }                                                     \
-  virtual Opcode opcode() const V8_FINAL V8_OVERRIDE {  \
+  virtual Opcode opcode() const FINAL OVERRIDE {  \
     return HValue::k##type;                             \
   }
 
@@ -209,7 +213,7 @@
 enum PropertyAccessType { LOAD, STORE };
 
 
-class Range V8_FINAL : public ZoneObject {
+class Range FINAL : public ZoneObject {
  public:
   Range()
       : lower_(kMinInt),
@@ -313,18 +317,18 @@
 
 // We reuse use list nodes behind the scenes as uses are added and deleted.
 // This class is the safe way to iterate uses while deleting them.
-class HUseIterator V8_FINAL BASE_EMBEDDED {
+class HUseIterator FINAL BASE_EMBEDDED {
  public:
   bool Done() { return current_ == NULL; }
   void Advance();
 
   HValue* value() {
-    ASSERT(!Done());
+    DCHECK(!Done());
     return value_;
   }
 
   int index() {
-    ASSERT(!Done());
+    DCHECK(!Done());
     return index_;
   }
 
@@ -356,13 +360,13 @@
 
 
 static inline GVNFlag GVNFlagFromInt(int i) {
-  ASSERT(i >= 0);
-  ASSERT(i < kNumberOfFlags);
+  DCHECK(i >= 0);
+  DCHECK(i < kNumberOfFlags);
   return static_cast<GVNFlag>(i);
 }
 
 
-class DecompositionResult V8_FINAL BASE_EMBEDDED {
+class DecompositionResult FINAL BASE_EMBEDDED {
  public:
   DecompositionResult() : base_(NULL), offset_(0), scale_(0) {}
 
@@ -445,18 +449,16 @@
 
   int raw() const { return value_; }
 
-  void PrintTo(FILE* f);
-
  private:
   typedef BitField<int, 0, 9> InliningIdField;
 
   // Offset from the start of the inlined function.
-  typedef BitField<int, 9, 22> PositionField;
+  typedef BitField<int, 9, 23> PositionField;
 
-  // On HPositionInfo can use this constructor.
   explicit HSourcePosition(int value) : value_(value) { }
 
   friend class HPositionInfo;
+  friend class LCodeGenBase;
 
   // If FLAG_hydrogen_track_positions is set contains bitfields InliningIdField
   // and PositionField.
@@ -465,6 +467,9 @@
 };
 
 
+OStream& operator<<(OStream& os, const HSourcePosition& p);
+
+
 class HValue : public ZoneObject {
  public:
   static const int kNoNumber = -1;
@@ -548,7 +553,7 @@
     return IsShl() || IsShr() || IsSar();
   }
 
-  HValue(HType type = HType::Tagged())
+  explicit HValue(HType type = HType::Tagged())
       : block_(NULL),
         id_(kNoNumber),
         type_(type),
@@ -582,8 +587,8 @@
 
   Representation representation() const { return representation_; }
   void ChangeRepresentation(Representation r) {
-    ASSERT(CheckFlag(kFlexibleRepresentation));
-    ASSERT(!CheckFlag(kCannotBeTagged) || !r.IsTagged());
+    DCHECK(CheckFlag(kFlexibleRepresentation));
+    DCHECK(!CheckFlag(kCannotBeTagged) || !r.IsTagged());
     RepresentationChanged(r);
     representation_ = r;
     if (r.IsTagged()) {
@@ -607,7 +612,7 @@
 
   HType type() const { return type_; }
   void set_type(HType new_type) {
-    ASSERT(new_type.IsSubtypeOf(type_));
+    DCHECK(new_type.IsSubtypeOf(type_));
     type_ = new_type;
   }
 
@@ -656,13 +661,16 @@
   bool IsDefinedAfter(HBasicBlock* other) const;
 
   // Operands.
-  virtual int OperandCount() = 0;
+  virtual int OperandCount() const = 0;
   virtual HValue* OperandAt(int index) const = 0;
   void SetOperandAt(int index, HValue* value);
 
   void DeleteAndReplaceWith(HValue* other);
   void ReplaceAllUsesWith(HValue* other);
   bool HasNoUses() const { return use_list_ == NULL; }
+  bool HasOneUse() const {
+    return use_list_ != NULL && use_list_->tail() == NULL;
+  }
   bool HasMultipleUses() const {
     return use_list_ != NULL && use_list_->tail() != NULL;
   }
@@ -724,11 +732,11 @@
   }
 
   Range* range() const {
-    ASSERT(!range_poisoned_);
+    DCHECK(!range_poisoned_);
     return range_;
   }
   bool HasRange() const {
-    ASSERT(!range_poisoned_);
+    DCHECK(!range_poisoned_);
     return range_ != NULL;
   }
 #ifdef DEBUG
@@ -762,10 +770,7 @@
   virtual void FinalizeUniqueness() { }
 
   // Printing support.
-  virtual void PrintTo(StringStream* stream) = 0;
-  void PrintNameTo(StringStream* stream);
-  void PrintTypeTo(StringStream* stream);
-  void PrintChangesTo(StringStream* stream);
+  virtual OStream& PrintTo(OStream& os) const = 0;  // NOLINT
 
   const char* Mnemonic() const;
 
@@ -858,12 +863,12 @@
   virtual void DeleteFromGraph() = 0;
   virtual void InternalSetOperandAt(int index, HValue* value) = 0;
   void clear_block() {
-    ASSERT(block_ != NULL);
+    DCHECK(block_ != NULL);
     block_ = NULL;
   }
 
   void set_representation(Representation r) {
-    ASSERT(representation_.IsNone() && !r.IsNone());
+    DCHECK(representation_.IsNone() && !r.IsNone());
     representation_ = r;
   }
 
@@ -882,6 +887,7 @@
     result.Remove(kOsrEntries);
     return result;
   }
+  friend OStream& operator<<(OStream& os, const ChangesOf& v);
 
   // A flag mask of all side effects that can make observable changes in
   // an executing program (i.e. are not safe to repeat, move or remove);
@@ -923,6 +929,30 @@
   DISALLOW_COPY_AND_ASSIGN(HValue);
 };
 
+// Support for printing various aspects of an HValue.
+struct NameOf {
+  explicit NameOf(const HValue* const v) : value(v) {}
+  const HValue* value;
+};
+
+
+struct TypeOf {
+  explicit TypeOf(const HValue* const v) : value(v) {}
+  const HValue* value;
+};
+
+
+struct ChangesOf {
+  explicit ChangesOf(const HValue* const v) : value(v) {}
+  const HValue* value;
+};
+
+
+OStream& operator<<(OStream& os, const HValue& v);
+OStream& operator<<(OStream& os, const NameOf& v);
+OStream& operator<<(OStream& os, const TypeOf& v);
+OStream& operator<<(OStream& os, const ChangesOf& v);
+
 
 #define DECLARE_INSTRUCTION_FACTORY_P0(I)                                      \
   static I* New(Zone* zone, HValue* context) {                                 \
@@ -1061,7 +1091,7 @@
     data_ = reinterpret_cast<intptr_t>(positions);
     set_position(pos);
 
-    ASSERT(has_operand_positions());
+    DCHECK(has_operand_positions());
   }
 
   HSourcePosition operand_position(int idx) const {
@@ -1080,7 +1110,7 @@
   static const intptr_t kFirstOperandPosIndex = 1;
 
   HSourcePosition* operand_position_slot(int idx) const {
-    ASSERT(has_operand_positions());
+    DCHECK(has_operand_positions());
     return &(operand_positions()[kFirstOperandPosIndex + idx]);
   }
 
@@ -1089,7 +1119,7 @@
   }
 
   HSourcePosition* operand_positions() const {
-    ASSERT(has_operand_positions());
+    DCHECK(has_operand_positions());
     return reinterpret_cast<HSourcePosition*>(data_);
   }
 
@@ -1099,12 +1129,12 @@
     return (val & kPositionTag) != 0;
   }
   static intptr_t UntagPosition(intptr_t val) {
-    ASSERT(IsTaggedPosition(val));
+    DCHECK(IsTaggedPosition(val));
     return val >> kPositionShift;
   }
   static intptr_t TagPosition(intptr_t val) {
     const intptr_t result = (val << kPositionShift) | kPositionTag;
-    ASSERT(UntagPosition(result) == val);
+    DCHECK(UntagPosition(result) == val);
     return result;
   }
 
@@ -1117,8 +1147,8 @@
   HInstruction* next() const { return next_; }
   HInstruction* previous() const { return previous_; }
 
-  virtual void PrintTo(StringStream* stream) V8_OVERRIDE;
-  virtual void PrintDataTo(StringStream* stream);
+  virtual OStream& PrintTo(OStream& os) const OVERRIDE;  // NOLINT
+  virtual OStream& PrintDataTo(OStream& os) const;          // NOLINT
 
   bool IsLinked() const { return block() != NULL; }
   void Unlink();
@@ -1138,24 +1168,24 @@
   }
 
   // The position is a write-once variable.
-  virtual HSourcePosition position() const V8_OVERRIDE {
+  virtual HSourcePosition position() const OVERRIDE {
     return HSourcePosition(position_.position());
   }
   bool has_position() const {
     return !position().IsUnknown();
   }
   void set_position(HSourcePosition position) {
-    ASSERT(!has_position());
-    ASSERT(!position.IsUnknown());
+    DCHECK(!has_position());
+    DCHECK(!position.IsUnknown());
     position_.set_position(position);
   }
 
-  virtual HSourcePosition operand_position(int index) const V8_OVERRIDE {
+  virtual HSourcePosition operand_position(int index) const OVERRIDE {
     const HSourcePosition pos = position_.operand_position(index);
     return pos.IsUnknown() ? position() : pos;
   }
   void set_operand_position(Zone* zone, int index, HSourcePosition pos) {
-    ASSERT(0 <= index && index < OperandCount());
+    DCHECK(0 <= index && index < OperandCount());
     position_.ensure_storage_for_operand_positions(zone, OperandCount());
     position_.set_operand_position(index, pos);
   }
@@ -1167,7 +1197,7 @@
   virtual LInstruction* CompileToLithium(LChunkBuilder* builder) = 0;
 
 #ifdef DEBUG
-  virtual void Verify() V8_OVERRIDE;
+  virtual void Verify() OVERRIDE;
 #endif
 
   bool CanDeoptimize();
@@ -1177,7 +1207,7 @@
   DECLARE_ABSTRACT_INSTRUCTION(Instruction)
 
  protected:
-  HInstruction(HType type = HType::Tagged())
+  explicit HInstruction(HType type = HType::Tagged())
       : HValue(type),
         next_(NULL),
         previous_(NULL),
@@ -1185,16 +1215,14 @@
     SetDependsOnFlag(kOsrEntries);
   }
 
-  virtual void DeleteFromGraph() V8_OVERRIDE { Unlink(); }
+  virtual void DeleteFromGraph() OVERRIDE { Unlink(); }
 
  private:
   void InitializeAsFirst(HBasicBlock* block) {
-    ASSERT(!IsLinked());
+    DCHECK(!IsLinked());
     SetBlock(block);
   }
 
-  void PrintMnemonicTo(StringStream* stream);
-
   HInstruction* next_;
   HInstruction* previous_;
   HPositionInfo position_;
@@ -1206,15 +1234,16 @@
 template<int V>
 class HTemplateInstruction : public HInstruction {
  public:
-  virtual int OperandCount() V8_FINAL V8_OVERRIDE { return V; }
-  virtual HValue* OperandAt(int i) const V8_FINAL V8_OVERRIDE {
+  virtual int OperandCount() const FINAL OVERRIDE { return V; }
+  virtual HValue* OperandAt(int i) const FINAL OVERRIDE {
     return inputs_[i];
   }
 
  protected:
-  HTemplateInstruction(HType type = HType::Tagged()) : HInstruction(type) {}
+  explicit HTemplateInstruction(HType type = HType::Tagged())
+      : HInstruction(type) {}
 
-  virtual void InternalSetOperandAt(int i, HValue* value) V8_FINAL V8_OVERRIDE {
+  virtual void InternalSetOperandAt(int i, HValue* value) FINAL OVERRIDE {
     inputs_[i] = value;
   }
 
@@ -1225,11 +1254,11 @@
 
 class HControlInstruction : public HInstruction {
  public:
-  virtual HBasicBlock* SuccessorAt(int i) = 0;
-  virtual int SuccessorCount() = 0;
+  virtual HBasicBlock* SuccessorAt(int i) const = 0;
+  virtual int SuccessorCount() const = 0;
   virtual void SetSuccessorAt(int i, HBasicBlock* block) = 0;
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;  // NOLINT
 
   virtual bool KnownSuccessorBlock(HBasicBlock** block) {
     *block = NULL;
@@ -1253,17 +1282,17 @@
 };
 
 
-class HSuccessorIterator V8_FINAL BASE_EMBEDDED {
+class HSuccessorIterator FINAL BASE_EMBEDDED {
  public:
-  explicit HSuccessorIterator(HControlInstruction* instr)
-      : instr_(instr), current_(0) { }
+  explicit HSuccessorIterator(const HControlInstruction* instr)
+      : instr_(instr), current_(0) {}
 
   bool Done() { return current_ >= instr_->SuccessorCount(); }
   HBasicBlock* Current() { return instr_->SuccessorAt(current_); }
   void Advance() { current_++; }
 
  private:
-  HControlInstruction* instr_;
+  const HControlInstruction* instr_;
   int current_;
 };
 
@@ -1271,18 +1300,18 @@
 template<int S, int V>
 class HTemplateControlInstruction : public HControlInstruction {
  public:
-  int SuccessorCount() V8_OVERRIDE { return S; }
-  HBasicBlock* SuccessorAt(int i) V8_OVERRIDE { return successors_[i]; }
-  void SetSuccessorAt(int i, HBasicBlock* block) V8_OVERRIDE {
+  int SuccessorCount() const OVERRIDE { return S; }
+  HBasicBlock* SuccessorAt(int i) const OVERRIDE { return successors_[i]; }
+  void SetSuccessorAt(int i, HBasicBlock* block) OVERRIDE {
     successors_[i] = block;
   }
 
-  int OperandCount() V8_OVERRIDE { return V; }
-  HValue* OperandAt(int i) const V8_OVERRIDE { return inputs_[i]; }
+  int OperandCount() const OVERRIDE { return V; }
+  HValue* OperandAt(int i) const OVERRIDE { return inputs_[i]; }
 
 
  protected:
-  void InternalSetOperandAt(int i, HValue* value) V8_OVERRIDE {
+  void InternalSetOperandAt(int i, HValue* value) OVERRIDE {
     inputs_[i] = value;
   }
 
@@ -1292,9 +1321,9 @@
 };
 
 
-class HBlockEntry V8_FINAL : public HTemplateInstruction<0> {
+class HBlockEntry FINAL : public HTemplateInstruction<0> {
  public:
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::None();
   }
 
@@ -1302,7 +1331,7 @@
 };
 
 
-class HDummyUse V8_FINAL : public HTemplateInstruction<1> {
+class HDummyUse FINAL : public HTemplateInstruction<1> {
  public:
   explicit HDummyUse(HValue* value)
       : HTemplateInstruction<1>(HType::Smi()) {
@@ -1312,25 +1341,25 @@
     set_representation(Representation::Tagged());
   }
 
-  HValue* value() { return OperandAt(0); }
+  HValue* value() const { return OperandAt(0); }
 
-  virtual bool HasEscapingOperandAt(int index) V8_OVERRIDE { return false; }
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual bool HasEscapingOperandAt(int index) OVERRIDE { return false; }
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::None();
   }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;  // NOLINT
 
   DECLARE_CONCRETE_INSTRUCTION(DummyUse);
 };
 
 
 // Inserts an int3/stop break instruction for debugging purposes.
-class HDebugBreak V8_FINAL : public HTemplateInstruction<0> {
+class HDebugBreak FINAL : public HTemplateInstruction<0> {
  public:
   DECLARE_INSTRUCTION_FACTORY_P0(HDebugBreak);
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::None();
   }
 
@@ -1338,28 +1367,28 @@
 };
 
 
-class HGoto V8_FINAL : public HTemplateControlInstruction<1, 0> {
+class HGoto FINAL : public HTemplateControlInstruction<1, 0> {
  public:
   explicit HGoto(HBasicBlock* target) {
     SetSuccessorAt(0, target);
   }
 
-  virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE {
+  virtual bool KnownSuccessorBlock(HBasicBlock** block) OVERRIDE {
     *block = FirstSuccessor();
     return true;
   }
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::None();
   }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;  // NOLINT
 
   DECLARE_CONCRETE_INSTRUCTION(Goto)
 };
 
 
-class HDeoptimize V8_FINAL : public HTemplateControlInstruction<1, 0> {
+class HDeoptimize FINAL : public HTemplateControlInstruction<1, 0> {
  public:
   static HDeoptimize* New(Zone* zone,
                           HValue* context,
@@ -1369,12 +1398,12 @@
     return new(zone) HDeoptimize(reason, type, unreachable_continuation);
   }
 
-  virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE {
+  virtual bool KnownSuccessorBlock(HBasicBlock** block) OVERRIDE {
     *block = NULL;
     return true;
   }
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::None();
   }
 
@@ -1406,13 +1435,13 @@
     SetSuccessorAt(1, false_target);
   }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;  // NOLINT
 
-  HValue* value() { return OperandAt(0); }
+  HValue* value() const { return OperandAt(0); }
 };
 
 
-class HBranch V8_FINAL : public HUnaryControlInstruction {
+class HBranch FINAL : public HUnaryControlInstruction {
  public:
   DECLARE_INSTRUCTION_FACTORY_P1(HBranch, HValue*);
   DECLARE_INSTRUCTION_FACTORY_P2(HBranch, HValue*,
@@ -1421,14 +1450,14 @@
                                  ToBooleanStub::Types,
                                  HBasicBlock*, HBasicBlock*);
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::None();
   }
-  virtual Representation observed_input_representation(int index) V8_OVERRIDE;
+  virtual Representation observed_input_representation(int index) OVERRIDE;
 
-  virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE;
+  virtual bool KnownSuccessorBlock(HBasicBlock** block) OVERRIDE;
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;  // NOLINT
 
   ToBooleanStub::Types expected_input_types() const {
     return expected_input_types_;
@@ -1450,13 +1479,13 @@
 };
 
 
-class HCompareMap V8_FINAL : public HUnaryControlInstruction {
+class HCompareMap FINAL : public HUnaryControlInstruction {
  public:
   DECLARE_INSTRUCTION_FACTORY_P2(HCompareMap, HValue*, Handle<Map>);
   DECLARE_INSTRUCTION_FACTORY_P4(HCompareMap, HValue*, Handle<Map>,
                                  HBasicBlock*, HBasicBlock*);
 
-  virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE {
+  virtual bool KnownSuccessorBlock(HBasicBlock** block) OVERRIDE {
     if (known_successor_index() != kNoKnownSuccessorIndex) {
       *block = SuccessorAt(known_successor_index());
       return true;
@@ -1465,7 +1494,7 @@
     return false;
   }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;  // NOLINT
 
   static const int kNoKnownSuccessorIndex = -1;
   int known_successor_index() const { return known_successor_index_; }
@@ -1476,7 +1505,7 @@
   Unique<Map> map() const { return map_; }
   bool map_is_stable() const { return map_is_stable_; }
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
@@ -1503,20 +1532,20 @@
 };
 
 
-class HContext V8_FINAL : public HTemplateInstruction<0> {
+class HContext FINAL : public HTemplateInstruction<0> {
  public:
   static HContext* New(Zone* zone) {
     return new(zone) HContext();
   }
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::None();
   }
 
   DECLARE_CONCRETE_INSTRUCTION(Context)
 
  protected:
-  virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+  virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
 
  private:
   HContext() {
@@ -1524,26 +1553,26 @@
     SetFlag(kUseGVN);
   }
 
-  virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+  virtual bool IsDeletable() const OVERRIDE { return true; }
 };
 
 
-class HReturn V8_FINAL : public HTemplateControlInstruction<0, 3> {
+class HReturn FINAL : public HTemplateControlInstruction<0, 3> {
  public:
   DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HReturn, HValue*, HValue*);
   DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P1(HReturn, HValue*);
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     // TODO(titzer): require an Int32 input for faster returns.
     if (index == 2) return Representation::Smi();
     return Representation::Tagged();
   }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;  // NOLINT
 
-  HValue* value() { return OperandAt(0); }
-  HValue* context() { return OperandAt(1); }
-  HValue* parameter_count() { return OperandAt(2); }
+  HValue* value() const { return OperandAt(0); }
+  HValue* context() const { return OperandAt(1); }
+  HValue* parameter_count() const { return OperandAt(2); }
 
   DECLARE_CONCRETE_INSTRUCTION(Return)
 
@@ -1556,11 +1585,11 @@
 };
 
 
-class HAbnormalExit V8_FINAL : public HTemplateControlInstruction<0, 0> {
+class HAbnormalExit FINAL : public HTemplateControlInstruction<0, 0> {
  public:
   DECLARE_INSTRUCTION_FACTORY_P0(HAbnormalExit);
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::None();
   }
 
@@ -1572,7 +1601,7 @@
 
 class HUnaryOperation : public HTemplateInstruction<1> {
  public:
-  HUnaryOperation(HValue* value, HType type = HType::Tagged())
+  explicit HUnaryOperation(HValue* value, HType type = HType::Tagged())
       : HTemplateInstruction<1>(type) {
     SetOperandAt(0, value);
   }
@@ -1582,15 +1611,15 @@
   }
 
   HValue* value() const { return OperandAt(0); }
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;  // NOLINT
 };
 
 
-class HUseConst V8_FINAL : public HUnaryOperation {
+class HUseConst FINAL : public HUnaryOperation {
  public:
   DECLARE_INSTRUCTION_FACTORY_P1(HUseConst, HValue*);
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::None();
   }
 
@@ -1601,18 +1630,18 @@
 };
 
 
-class HForceRepresentation V8_FINAL : public HTemplateInstruction<1> {
+class HForceRepresentation FINAL : public HTemplateInstruction<1> {
  public:
   static HInstruction* New(Zone* zone, HValue* context, HValue* value,
                            Representation required_representation);
 
-  HValue* value() { return OperandAt(0); }
+  HValue* value() const { return OperandAt(0); }
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return representation();  // Same as the output representation.
   }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;  // NOLINT
 
   DECLARE_CONCRETE_INSTRUCTION(ForceRepresentation)
 
@@ -1624,16 +1653,16 @@
 };
 
 
-class HChange V8_FINAL : public HUnaryOperation {
+class HChange FINAL : public HUnaryOperation {
  public:
   HChange(HValue* value,
           Representation to,
           bool is_truncating_to_smi,
           bool is_truncating_to_int32)
       : HUnaryOperation(value) {
-    ASSERT(!value->representation().IsNone());
-    ASSERT(!to.IsNone());
-    ASSERT(!value->representation().Equals(to));
+    DCHECK(!value->representation().IsNone());
+    DCHECK(!to.IsNone());
+    DCHECK(!value->representation().Equals(to));
     set_representation(to);
     SetFlag(kUseGVN);
     SetFlag(kCanOverflow);
@@ -1654,46 +1683,46 @@
     return CheckUsesForFlag(kAllowUndefinedAsNaN);
   }
 
-  virtual HType CalculateInferredType() V8_OVERRIDE;
-  virtual HValue* Canonicalize() V8_OVERRIDE;
+  virtual HType CalculateInferredType() OVERRIDE;
+  virtual HValue* Canonicalize() OVERRIDE;
 
   Representation from() const { return value()->representation(); }
   Representation to() const { return representation(); }
   bool deoptimize_on_minus_zero() const {
     return CheckFlag(kBailoutOnMinusZero);
   }
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return from();
   }
 
-  virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
+  virtual Range* InferRange(Zone* zone) OVERRIDE;
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;  // NOLINT
 
   DECLARE_CONCRETE_INSTRUCTION(Change)
 
  protected:
-  virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+  virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
 
  private:
-  virtual bool IsDeletable() const V8_OVERRIDE {
+  virtual bool IsDeletable() const OVERRIDE {
     return !from().IsTagged() || value()->type().IsSmi();
   }
 };
 
 
-class HClampToUint8 V8_FINAL : public HUnaryOperation {
+class HClampToUint8 FINAL : public HUnaryOperation {
  public:
   DECLARE_INSTRUCTION_FACTORY_P1(HClampToUint8, HValue*);
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::None();
   }
 
   DECLARE_CONCRETE_INSTRUCTION(ClampToUint8)
 
  protected:
-  virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+  virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
 
  private:
   explicit HClampToUint8(HValue* value)
@@ -1703,16 +1732,16 @@
     SetFlag(kUseGVN);
   }
 
-  virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+  virtual bool IsDeletable() const OVERRIDE { return true; }
 };
 
 
-class HDoubleBits V8_FINAL : public HUnaryOperation {
+class HDoubleBits FINAL : public HUnaryOperation {
  public:
   enum Bits { HIGH, LOW };
   DECLARE_INSTRUCTION_FACTORY_P2(HDoubleBits, HValue*, Bits);
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Double();
   }
 
@@ -1721,7 +1750,7 @@
   Bits bits() { return bits_; }
 
  protected:
-  virtual bool DataEquals(HValue* other) V8_OVERRIDE {
+  virtual bool DataEquals(HValue* other) OVERRIDE {
     return other->IsDoubleBits() && HDoubleBits::cast(other)->bits() == bits();
   }
 
@@ -1732,17 +1761,17 @@
     SetFlag(kUseGVN);
   }
 
-  virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+  virtual bool IsDeletable() const OVERRIDE { return true; }
 
   Bits bits_;
 };
 
 
-class HConstructDouble V8_FINAL : public HTemplateInstruction<2> {
+class HConstructDouble FINAL : public HTemplateInstruction<2> {
  public:
   DECLARE_INSTRUCTION_FACTORY_P2(HConstructDouble, HValue*, HValue*);
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Integer32();
   }
 
@@ -1752,7 +1781,7 @@
   HValue* lo() { return OperandAt(1); }
 
  protected:
-  virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+  virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
 
  private:
   explicit HConstructDouble(HValue* hi, HValue* lo) {
@@ -1762,7 +1791,7 @@
     SetOperandAt(1, lo);
   }
 
-  virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+  virtual bool IsDeletable() const OVERRIDE { return true; }
 };
 
 
@@ -1772,7 +1801,7 @@
 };
 
 
-class HSimulate V8_FINAL : public HInstruction {
+class HSimulate FINAL : public HInstruction {
  public:
   HSimulate(BailoutId ast_id,
             int pop_count,
@@ -1787,19 +1816,19 @@
         done_with_replay_(false) {}
   ~HSimulate() {}
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;  // NOLINT
 
   bool HasAstId() const { return !ast_id_.IsNone(); }
   BailoutId ast_id() const { return ast_id_; }
   void set_ast_id(BailoutId id) {
-    ASSERT(!HasAstId());
+    DCHECK(!HasAstId());
     ast_id_ = id;
   }
 
   int pop_count() const { return pop_count_; }
   const ZoneList<HValue*>* values() const { return &values_; }
   int GetAssignedIndexAt(int index) const {
-    ASSERT(HasAssignedIndexAt(index));
+    DCHECK(HasAssignedIndexAt(index));
     return assigned_indexes_[index];
   }
   bool HasAssignedIndexAt(int index) const {
@@ -1817,13 +1846,13 @@
     }
     return -1;
   }
-  virtual int OperandCount() V8_OVERRIDE { return values_.length(); }
-  virtual HValue* OperandAt(int index) const V8_OVERRIDE {
+  virtual int OperandCount() const OVERRIDE { return values_.length(); }
+  virtual HValue* OperandAt(int index) const OVERRIDE {
     return values_[index];
   }
 
-  virtual bool HasEscapingOperandAt(int index) V8_OVERRIDE { return false; }
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual bool HasEscapingOperandAt(int index) OVERRIDE { return false; }
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::None();
   }
 
@@ -1836,13 +1865,13 @@
   DECLARE_CONCRETE_INSTRUCTION(Simulate)
 
 #ifdef DEBUG
-  virtual void Verify() V8_OVERRIDE;
+  virtual void Verify() OVERRIDE;
   void set_closure(Handle<JSFunction> closure) { closure_ = closure; }
   Handle<JSFunction> closure() const { return closure_; }
 #endif
 
  protected:
-  virtual void InternalSetOperandAt(int index, HValue* value) V8_OVERRIDE {
+  virtual void InternalSetOperandAt(int index, HValue* value) OVERRIDE {
     values_[index] = value;
   }
 
@@ -1876,29 +1905,29 @@
 };
 
 
-class HEnvironmentMarker V8_FINAL : public HTemplateInstruction<1> {
+class HEnvironmentMarker FINAL : public HTemplateInstruction<1> {
  public:
   enum Kind { BIND, LOOKUP };
 
   DECLARE_INSTRUCTION_FACTORY_P2(HEnvironmentMarker, Kind, int);
 
-  Kind kind() { return kind_; }
-  int index() { return index_; }
+  Kind kind() const { return kind_; }
+  int index() const { return index_; }
   HSimulate* next_simulate() { return next_simulate_; }
   void set_next_simulate(HSimulate* simulate) {
     next_simulate_ = simulate;
   }
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::None();
   }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;  // NOLINT
 
 #ifdef DEBUG
   void set_closure(Handle<JSFunction> closure) {
-    ASSERT(closure_.is_null());
-    ASSERT(!closure.is_null());
+    DCHECK(closure_.is_null());
+    DCHECK(!closure.is_null());
     closure_ = closure;
   }
   Handle<JSFunction> closure() const { return closure_; }
@@ -1920,7 +1949,7 @@
 };
 
 
-class HStackCheck V8_FINAL : public HTemplateInstruction<1> {
+class HStackCheck FINAL : public HTemplateInstruction<1> {
  public:
   enum Type {
     kFunctionEntry,
@@ -1931,7 +1960,7 @@
 
   HValue* context() { return OperandAt(0); }
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
@@ -1967,30 +1996,29 @@
 
 
 class HArgumentsObject;
+class HConstant;
 
 
-class HEnterInlined V8_FINAL : public HTemplateInstruction<0> {
+class HEnterInlined FINAL : public HTemplateInstruction<0> {
  public:
-  static HEnterInlined* New(Zone* zone,
-                            HValue* context,
-                            BailoutId return_id,
+  static HEnterInlined* New(Zone* zone, HValue* context, BailoutId return_id,
                             Handle<JSFunction> closure,
-                            int arguments_count,
+                            HConstant* closure_context, int arguments_count,
                             FunctionLiteral* function,
-                            InliningKind inlining_kind,
-                            Variable* arguments_var,
+                            InliningKind inlining_kind, Variable* arguments_var,
                             HArgumentsObject* arguments_object) {
-    return new(zone) HEnterInlined(return_id, closure, arguments_count,
-                                   function, inlining_kind, arguments_var,
-                                   arguments_object, zone);
+    return new (zone) HEnterInlined(return_id, closure, closure_context,
+                                    arguments_count, function, inlining_kind,
+                                    arguments_var, arguments_object, zone);
   }
 
   void RegisterReturnTarget(HBasicBlock* return_target, Zone* zone);
   ZoneList<HBasicBlock*>* return_targets() { return &return_targets_; }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;  // NOLINT
 
   Handle<JSFunction> closure() const { return closure_; }
+  HConstant* closure_context() const { return closure_context_; }
   int arguments_count() const { return arguments_count_; }
   bool arguments_pushed() const { return arguments_pushed_; }
   void set_arguments_pushed() { arguments_pushed_ = true; }
@@ -1998,7 +2026,7 @@
   InliningKind inlining_kind() const { return inlining_kind_; }
   BailoutId ReturnId() const { return return_id_; }
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::None();
   }
 
@@ -2008,27 +2036,25 @@
   DECLARE_CONCRETE_INSTRUCTION(EnterInlined)
 
  private:
-  HEnterInlined(BailoutId return_id,
-                Handle<JSFunction> closure,
-                int arguments_count,
-                FunctionLiteral* function,
-                InliningKind inlining_kind,
-                Variable* arguments_var,
-                HArgumentsObject* arguments_object,
+  HEnterInlined(BailoutId return_id, Handle<JSFunction> closure,
+                HConstant* closure_context, int arguments_count,
+                FunctionLiteral* function, InliningKind inlining_kind,
+                Variable* arguments_var, HArgumentsObject* arguments_object,
                 Zone* zone)
       : return_id_(return_id),
         closure_(closure),
+        closure_context_(closure_context),
         arguments_count_(arguments_count),
         arguments_pushed_(false),
         function_(function),
         inlining_kind_(inlining_kind),
         arguments_var_(arguments_var),
         arguments_object_(arguments_object),
-        return_targets_(2, zone) {
-  }
+        return_targets_(2, zone) {}
 
   BailoutId return_id_;
   Handle<JSFunction> closure_;
+  HConstant* closure_context_;
   int arguments_count_;
   bool arguments_pushed_;
   FunctionLiteral* function_;
@@ -2039,18 +2065,18 @@
 };
 
 
-class HLeaveInlined V8_FINAL : public HTemplateInstruction<0> {
+class HLeaveInlined FINAL : public HTemplateInstruction<0> {
  public:
   HLeaveInlined(HEnterInlined* entry,
                 int drop_count)
       : entry_(entry),
         drop_count_(drop_count) { }
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::None();
   }
 
-  virtual int argument_delta() const V8_OVERRIDE {
+  virtual int argument_delta() const OVERRIDE {
     return entry_->arguments_pushed() ? -drop_count_ : 0;
   }
 
@@ -2062,7 +2088,7 @@
 };
 
 
-class HPushArguments V8_FINAL : public HInstruction {
+class HPushArguments FINAL : public HInstruction {
  public:
   static HPushArguments* New(Zone* zone, HValue* context) {
     return new(zone) HPushArguments(zone);
@@ -2097,15 +2123,17 @@
     return instr;
   }
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
-  virtual int argument_delta() const V8_OVERRIDE { return inputs_.length(); }
+  virtual int argument_delta() const OVERRIDE { return inputs_.length(); }
   HValue* argument(int i) { return OperandAt(i); }
 
-  virtual int OperandCount() V8_FINAL V8_OVERRIDE { return inputs_.length(); }
-  virtual HValue* OperandAt(int i) const V8_FINAL V8_OVERRIDE {
+  virtual int OperandCount() const FINAL OVERRIDE {
+    return inputs_.length();
+  }
+  virtual HValue* OperandAt(int i) const FINAL OVERRIDE {
     return inputs_[i];
   }
 
@@ -2114,7 +2142,7 @@
   DECLARE_CONCRETE_INSTRUCTION(PushArguments)
 
  protected:
-  virtual void InternalSetOperandAt(int i, HValue* value) V8_FINAL V8_OVERRIDE {
+  virtual void InternalSetOperandAt(int i, HValue* value) FINAL OVERRIDE {
     inputs_[i] = value;
   }
 
@@ -2128,18 +2156,18 @@
 };
 
 
-class HThisFunction V8_FINAL : public HTemplateInstruction<0> {
+class HThisFunction FINAL : public HTemplateInstruction<0> {
  public:
   DECLARE_INSTRUCTION_FACTORY_P0(HThisFunction);
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::None();
   }
 
   DECLARE_CONCRETE_INSTRUCTION(ThisFunction)
 
  protected:
-  virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+  virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
 
  private:
   HThisFunction() {
@@ -2147,11 +2175,11 @@
     SetFlag(kUseGVN);
   }
 
-  virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+  virtual bool IsDeletable() const OVERRIDE { return true; }
 };
 
 
-class HDeclareGlobals V8_FINAL : public HUnaryOperation {
+class HDeclareGlobals FINAL : public HUnaryOperation {
  public:
   DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HDeclareGlobals,
                                               Handle<FixedArray>,
@@ -2163,7 +2191,7 @@
 
   DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals)
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
@@ -2192,7 +2220,7 @@
     this->SetAllSideEffects();
   }
 
-  virtual HType CalculateInferredType() V8_FINAL V8_OVERRIDE {
+  virtual HType CalculateInferredType() FINAL OVERRIDE {
     return HType::Tagged();
   }
 
@@ -2200,7 +2228,7 @@
     return argument_count_;
   }
 
-  virtual int argument_delta() const V8_OVERRIDE {
+  virtual int argument_delta() const OVERRIDE {
     return -argument_count();
   }
 
@@ -2217,13 +2245,13 @@
   }
 
   virtual Representation RequiredInputRepresentation(
-      int index) V8_FINAL V8_OVERRIDE {
+      int index) FINAL OVERRIDE {
     return Representation::Tagged();
   }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;  // NOLINT
 
-  HValue* value() { return OperandAt(0); }
+  HValue* value() const { return OperandAt(0); }
 };
 
 
@@ -2235,19 +2263,19 @@
     SetOperandAt(1, second);
   }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;  // NOLINT
 
   virtual Representation RequiredInputRepresentation(
-      int index) V8_FINAL V8_OVERRIDE {
+      int index) FINAL OVERRIDE {
     return Representation::Tagged();
   }
 
-  HValue* first() { return OperandAt(0); }
-  HValue* second() { return OperandAt(1); }
+  HValue* first() const { return OperandAt(0); }
+  HValue* second() const { return OperandAt(1); }
 };
 
 
-class HCallJSFunction V8_FINAL : public HCall<1> {
+class HCallJSFunction FINAL : public HCall<1> {
  public:
   static HCallJSFunction* New(Zone* zone,
                               HValue* context,
@@ -2255,19 +2283,19 @@
                               int argument_count,
                               bool pass_argument_count);
 
-  HValue* function() { return OperandAt(0); }
+  HValue* function() const { return OperandAt(0); }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;  // NOLINT
 
   virtual Representation RequiredInputRepresentation(
-      int index) V8_FINAL V8_OVERRIDE {
-    ASSERT(index == 0);
+      int index) FINAL OVERRIDE {
+    DCHECK(index == 0);
     return Representation::Tagged();
   }
 
   bool pass_argument_count() const { return pass_argument_count_; }
 
-  virtual bool HasStackCheck() V8_FINAL V8_OVERRIDE {
+  virtual bool HasStackCheck() FINAL OVERRIDE {
     return has_stack_check_;
   }
 
@@ -2290,39 +2318,39 @@
 };
 
 
-class HCallWithDescriptor V8_FINAL : public HInstruction {
+class HCallWithDescriptor FINAL : public HInstruction {
  public:
-  static HCallWithDescriptor* New(Zone* zone, HValue* context,
-      HValue* target,
-      int argument_count,
-      const CallInterfaceDescriptor* descriptor,
-      const Vector<HValue*>& operands) {
-    ASSERT(operands.length() == descriptor->environment_length());
-    HCallWithDescriptor* res =
-        new(zone) HCallWithDescriptor(target, argument_count,
-                                      descriptor, operands, zone);
+  static HCallWithDescriptor* New(Zone* zone, HValue* context, HValue* target,
+                                  int argument_count,
+                                  CallInterfaceDescriptor descriptor,
+                                  const Vector<HValue*>& operands) {
+    DCHECK(operands.length() == descriptor.GetEnvironmentLength());
+    HCallWithDescriptor* res = new (zone)
+        HCallWithDescriptor(target, argument_count, descriptor, operands, zone);
     return res;
   }
 
-  virtual int OperandCount() V8_FINAL V8_OVERRIDE { return values_.length(); }
-  virtual HValue* OperandAt(int index) const V8_FINAL V8_OVERRIDE {
+  virtual int OperandCount() const FINAL OVERRIDE {
+    return values_.length();
+  }
+  virtual HValue* OperandAt(int index) const FINAL OVERRIDE {
     return values_[index];
   }
 
   virtual Representation RequiredInputRepresentation(
-      int index) V8_FINAL V8_OVERRIDE {
+      int index) FINAL OVERRIDE {
     if (index == 0) {
       return Representation::Tagged();
     } else {
       int par_index = index - 1;
-      ASSERT(par_index < descriptor_->environment_length());
-      return descriptor_->GetParameterRepresentation(par_index);
+      DCHECK(par_index < descriptor_.GetEnvironmentLength());
+      return descriptor_.GetParameterRepresentation(par_index);
     }
   }
 
   DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor)
 
-  virtual HType CalculateInferredType() V8_FINAL V8_OVERRIDE {
+  virtual HType CalculateInferredType() FINAL OVERRIDE {
     return HType::Tagged();
   }
 
@@ -2330,29 +2358,25 @@
     return argument_count_;
   }
 
-  virtual int argument_delta() const V8_OVERRIDE {
+  virtual int argument_delta() const OVERRIDE {
     return -argument_count_;
   }
 
-  const CallInterfaceDescriptor* descriptor() const {
-    return descriptor_;
-  }
+  CallInterfaceDescriptor descriptor() const { return descriptor_; }
 
   HValue* target() {
     return OperandAt(0);
   }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;  // NOLINT
 
  private:
   // The argument count includes the receiver.
-  HCallWithDescriptor(HValue* target,
-                      int argument_count,
-                      const CallInterfaceDescriptor* descriptor,
-                      const Vector<HValue*>& operands,
-                      Zone* zone)
-    : descriptor_(descriptor),
-      values_(descriptor->environment_length() + 1, zone) {
+  HCallWithDescriptor(HValue* target, int argument_count,
+                      CallInterfaceDescriptor descriptor,
+                      const Vector<HValue*>& operands, Zone* zone)
+      : descriptor_(descriptor),
+        values_(descriptor.GetEnvironmentLength() + 1, zone) {
     argument_count_ = argument_count;
     AddOperand(target, zone);
     for (int i = 0; i < operands.length(); i++) {
@@ -2368,17 +2392,17 @@
   }
 
   void InternalSetOperandAt(int index,
-                            HValue* value) V8_FINAL V8_OVERRIDE {
+                            HValue* value) FINAL OVERRIDE {
     values_[index] = value;
   }
 
-  const CallInterfaceDescriptor* descriptor_;
+  CallInterfaceDescriptor descriptor_;
   ZoneList<HValue*> values_;
   int argument_count_;
 };
 
 
-class HInvokeFunction V8_FINAL : public HBinaryCall {
+class HInvokeFunction FINAL : public HBinaryCall {
  public:
   DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HInvokeFunction, HValue*, int);
 
@@ -2409,7 +2433,7 @@
   Handle<JSFunction> known_function() { return known_function_; }
   int formal_parameter_count() const { return formal_parameter_count_; }
 
-  virtual bool HasStackCheck() V8_FINAL V8_OVERRIDE {
+  virtual bool HasStackCheck() FINAL OVERRIDE {
     return has_stack_check_;
   }
 
@@ -2427,7 +2451,7 @@
 };
 
 
-class HCallFunction V8_FINAL : public HBinaryCall {
+class HCallFunction FINAL : public HBinaryCall {
  public:
   DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HCallFunction, HValue*, int);
   DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(
@@ -2439,7 +2463,7 @@
 
   DECLARE_CONCRETE_INSTRUCTION(CallFunction)
 
-  virtual int argument_delta() const V8_OVERRIDE { return -argument_count(); }
+  virtual int argument_delta() const OVERRIDE { return -argument_count(); }
 
  private:
   HCallFunction(HValue* context,
@@ -2452,7 +2476,7 @@
 };
 
 
-class HCallNew V8_FINAL : public HBinaryCall {
+class HCallNew FINAL : public HBinaryCall {
  public:
   DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HCallNew, HValue*, int);
 
@@ -2467,7 +2491,7 @@
 };
 
 
-class HCallNewArray V8_FINAL : public HBinaryCall {
+class HCallNewArray FINAL : public HBinaryCall {
  public:
   DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HCallNewArray,
                                               HValue*,
@@ -2477,7 +2501,7 @@
   HValue* context() { return first(); }
   HValue* constructor() { return second(); }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;  // NOLINT
 
   ElementsKind elements_kind() const { return elements_kind_; }
 
@@ -2493,14 +2517,14 @@
 };
 
 
-class HCallRuntime V8_FINAL : public HCall<1> {
+class HCallRuntime FINAL : public HCall<1> {
  public:
   DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HCallRuntime,
                                               Handle<String>,
                                               const Runtime::Function*,
                                               int);
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;  // NOLINT
 
   HValue* context() { return OperandAt(0); }
   const Runtime::Function* function() const { return c_function_; }
@@ -2510,7 +2534,7 @@
     save_doubles_ = save_doubles;
   }
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
@@ -2532,18 +2556,18 @@
 };
 
 
-class HMapEnumLength V8_FINAL : public HUnaryOperation {
+class HMapEnumLength FINAL : public HUnaryOperation {
  public:
   DECLARE_INSTRUCTION_FACTORY_P1(HMapEnumLength, HValue*);
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
   DECLARE_CONCRETE_INSTRUCTION(MapEnumLength)
 
  protected:
-  virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+  virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
 
  private:
   explicit HMapEnumLength(HValue* value)
@@ -2553,29 +2577,30 @@
     SetDependsOnFlag(kMaps);
   }
 
-  virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+  virtual bool IsDeletable() const OVERRIDE { return true; }
 };
 
 
-class HUnaryMathOperation V8_FINAL : public HTemplateInstruction<2> {
+class HUnaryMathOperation FINAL : public HTemplateInstruction<2> {
  public:
   static HInstruction* New(Zone* zone,
                            HValue* context,
                            HValue* value,
                            BuiltinFunctionId op);
 
-  HValue* context() { return OperandAt(0); }
-  HValue* value() { return OperandAt(1); }
+  HValue* context() const { return OperandAt(0); }
+  HValue* value() const { return OperandAt(1); }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;  // NOLINT
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     if (index == 0) {
       return Representation::Tagged();
     } else {
       switch (op_) {
         case kMathFloor:
         case kMathRound:
+        case kMathFround:
         case kMathSqrt:
         case kMathPowHalf:
         case kMathLog:
@@ -2592,11 +2617,11 @@
     }
   }
 
-  virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
+  virtual Range* InferRange(Zone* zone) OVERRIDE;
 
-  virtual HValue* Canonicalize() V8_OVERRIDE;
-  virtual Representation RepresentationFromUses() V8_OVERRIDE;
-  virtual Representation RepresentationFromInputs() V8_OVERRIDE;
+  virtual HValue* Canonicalize() OVERRIDE;
+  virtual Representation RepresentationFromUses() OVERRIDE;
+  virtual Representation RepresentationFromInputs() OVERRIDE;
 
   BuiltinFunctionId op() const { return op_; }
   const char* OpName() const;
@@ -2604,7 +2629,7 @@
   DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation)
 
  protected:
-  virtual bool DataEquals(HValue* other) V8_OVERRIDE {
+  virtual bool DataEquals(HValue* other) OVERRIDE {
     HUnaryMathOperation* b = HUnaryMathOperation::cast(other);
     return op_ == b->op();
   }
@@ -2642,6 +2667,7 @@
         // is tagged, and not when it is an unboxed double or unboxed integer.
         SetChangesFlag(kNewSpacePromotion);
         break;
+      case kMathFround:
       case kMathLog:
       case kMathExp:
       case kMathSqrt:
@@ -2655,7 +2681,7 @@
     SetFlag(kAllowUndefinedAsNaN);
   }
 
-  virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+  virtual bool IsDeletable() const OVERRIDE { return true; }
 
   HValue* SimplifiedDividendForMathFloorOfDiv(HDiv* hdiv);
   HValue* SimplifiedDivisorForMathFloorOfDiv(HDiv* hdiv);
@@ -2664,12 +2690,12 @@
 };
 
 
-class HLoadRoot V8_FINAL : public HTemplateInstruction<0> {
+class HLoadRoot FINAL : public HTemplateInstruction<0> {
  public:
   DECLARE_INSTRUCTION_FACTORY_P1(HLoadRoot, Heap::RootListIndex);
   DECLARE_INSTRUCTION_FACTORY_P2(HLoadRoot, Heap::RootListIndex, HType);
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::None();
   }
 
@@ -2678,13 +2704,13 @@
   DECLARE_CONCRETE_INSTRUCTION(LoadRoot)
 
  protected:
-  virtual bool DataEquals(HValue* other) V8_OVERRIDE {
+  virtual bool DataEquals(HValue* other) OVERRIDE {
     HLoadRoot* b = HLoadRoot::cast(other);
     return index_ == b->index_;
   }
 
  private:
-  HLoadRoot(Heap::RootListIndex index, HType type = HType::Tagged())
+  explicit HLoadRoot(Heap::RootListIndex index, HType type = HType::Tagged())
       : HTemplateInstruction<0>(type), index_(index) {
     SetFlag(kUseGVN);
     // TODO(bmeurer): We'll need kDependsOnRoots once we add the
@@ -2692,13 +2718,13 @@
     SetDependsOnFlag(kCalls);
   }
 
-  virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+  virtual bool IsDeletable() const OVERRIDE { return true; }
 
   const Heap::RootListIndex index_;
 };
 
 
-class HCheckMaps V8_FINAL : public HTemplateInstruction<2> {
+class HCheckMaps FINAL : public HTemplateInstruction<2> {
  public:
   static HCheckMaps* New(Zone* zone, HValue* context, HValue* value,
                          Handle<Map> map, HValue* typecheck = NULL) {
@@ -2725,17 +2751,17 @@
     ClearDependsOnFlag(kMaps);
   }
 
-  virtual bool HasEscapingOperandAt(int index) V8_OVERRIDE { return false; }
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual bool HasEscapingOperandAt(int index) OVERRIDE { return false; }
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
-  virtual HType CalculateInferredType() V8_OVERRIDE {
+  virtual HType CalculateInferredType() OVERRIDE {
     if (value()->type().IsHeapObject()) return value()->type();
     return HType::HeapObject();
   }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;  // NOLINT
 
   HValue* value() const { return OperandAt(0); }
   HValue* typecheck() const { return OperandAt(1); }
@@ -2747,7 +2773,7 @@
 
   bool HasMigrationTarget() const { return has_migration_target_; }
 
-  virtual HValue* Canonicalize() V8_OVERRIDE;
+  virtual HValue* Canonicalize() OVERRIDE;
 
   static HCheckMaps* CreateAndInsertAfter(Zone* zone,
                                           HValue* value,
@@ -2769,7 +2795,7 @@
   DECLARE_CONCRETE_INSTRUCTION(CheckMaps)
 
  protected:
-  virtual bool DataEquals(HValue* other) V8_OVERRIDE {
+  virtual bool DataEquals(HValue* other) OVERRIDE {
     return this->maps()->Equals(HCheckMaps::cast(other)->maps());
   }
 
@@ -2780,7 +2806,7 @@
       : HTemplateInstruction<2>(HType::HeapObject()), maps_(maps),
         has_migration_target_(false), is_stability_check_(false),
         maps_are_stable_(maps_are_stable) {
-    ASSERT_NE(0, maps->size());
+    DCHECK_NE(0, maps->size());
     SetOperandAt(0, value);
     // Use the object value for the dependency.
     SetOperandAt(1, value);
@@ -2794,7 +2820,7 @@
       : HTemplateInstruction<2>(HType::HeapObject()), maps_(maps),
         has_migration_target_(false), is_stability_check_(false),
         maps_are_stable_(true) {
-    ASSERT_NE(0, maps->size());
+    DCHECK_NE(0, maps->size());
     SetOperandAt(0, value);
     // Use the object value for the dependency if NULL is passed.
     SetOperandAt(1, typecheck ? typecheck : value);
@@ -2817,7 +2843,7 @@
 };
 
 
-class HCheckValue V8_FINAL : public HUnaryOperation {
+class HCheckValue FINAL : public HUnaryOperation {
  public:
   static HCheckValue* New(Zone* zone, HValue* context,
                           HValue* value, Handle<JSFunction> func) {
@@ -2836,19 +2862,19 @@
     return new(zone) HCheckValue(value, target, object_in_new_space);
   }
 
-  virtual void FinalizeUniqueness() V8_OVERRIDE {
+  virtual void FinalizeUniqueness() OVERRIDE {
     object_ = Unique<HeapObject>(object_.handle());
   }
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;  // NOLINT
 
-  virtual HValue* Canonicalize() V8_OVERRIDE;
+  virtual HValue* Canonicalize() OVERRIDE;
 
 #ifdef DEBUG
-  virtual void Verify() V8_OVERRIDE;
+  virtual void Verify() OVERRIDE;
 #endif
 
   Unique<HeapObject> object() const { return object_; }
@@ -2857,7 +2883,7 @@
   DECLARE_CONCRETE_INSTRUCTION(CheckValue)
 
  protected:
-  virtual bool DataEquals(HValue* other) V8_OVERRIDE {
+  virtual bool DataEquals(HValue* other) OVERRIDE {
     HCheckValue* b = HCheckValue::cast(other);
     return object_ == b->object_;
   }
@@ -2877,7 +2903,7 @@
 };
 
 
-class HCheckInstanceType V8_FINAL : public HUnaryOperation {
+class HCheckInstanceType FINAL : public HUnaryOperation {
  public:
   enum Check {
     IS_SPEC_OBJECT,
@@ -2889,13 +2915,13 @@
 
   DECLARE_INSTRUCTION_FACTORY_P2(HCheckInstanceType, HValue*, Check);
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;  // NOLINT
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
-  virtual HType CalculateInferredType() V8_OVERRIDE {
+  virtual HType CalculateInferredType() OVERRIDE {
     switch (check_) {
       case IS_SPEC_OBJECT: return HType::JSObject();
       case IS_JS_ARRAY: return HType::JSArray();
@@ -2906,7 +2932,7 @@
     return HType::Tagged();
   }
 
-  virtual HValue* Canonicalize() V8_OVERRIDE;
+  virtual HValue* Canonicalize() OVERRIDE;
 
   bool is_interval_check() const { return check_ <= LAST_INTERVAL_CHECK; }
   void GetCheckInterval(InstanceType* first, InstanceType* last);
@@ -2920,7 +2946,7 @@
   // TODO(ager): It could be nice to allow the ommision of instance
   // type checks if we have already performed an instance type check
   // with a larger range.
-  virtual bool DataEquals(HValue* other) V8_OVERRIDE {
+  virtual bool DataEquals(HValue* other) OVERRIDE {
     HCheckInstanceType* b = HCheckInstanceType::cast(other);
     return check_ == b->check_;
   }
@@ -2928,7 +2954,7 @@
   virtual int RedefinedOperandIndex() { return 0; }
 
  private:
-  const char* GetCheckName();
+  const char* GetCheckName() const;
 
   HCheckInstanceType(HValue* value, Check check)
       : HUnaryOperation(value, HType::HeapObject()), check_(check) {
@@ -2940,15 +2966,15 @@
 };
 
 
-class HCheckSmi V8_FINAL : public HUnaryOperation {
+class HCheckSmi FINAL : public HUnaryOperation {
  public:
   DECLARE_INSTRUCTION_FACTORY_P1(HCheckSmi, HValue*);
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
-  virtual HValue* Canonicalize() V8_OVERRIDE {
+  virtual HValue* Canonicalize() OVERRIDE {
     HType value_type = value()->type();
     if (value_type.IsSmi()) {
       return NULL;
@@ -2959,7 +2985,7 @@
   DECLARE_CONCRETE_INSTRUCTION(CheckSmi)
 
  protected:
-  virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+  virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
 
  private:
   explicit HCheckSmi(HValue* value) : HUnaryOperation(value, HType::Smi()) {
@@ -2969,32 +2995,32 @@
 };
 
 
-class HCheckHeapObject V8_FINAL : public HUnaryOperation {
+class HCheckHeapObject FINAL : public HUnaryOperation {
  public:
   DECLARE_INSTRUCTION_FACTORY_P1(HCheckHeapObject, HValue*);
 
-  virtual bool HasEscapingOperandAt(int index) V8_OVERRIDE { return false; }
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual bool HasEscapingOperandAt(int index) OVERRIDE { return false; }
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
-  virtual HType CalculateInferredType() V8_OVERRIDE {
+  virtual HType CalculateInferredType() OVERRIDE {
     if (value()->type().IsHeapObject()) return value()->type();
     return HType::HeapObject();
   }
 
 #ifdef DEBUG
-  virtual void Verify() V8_OVERRIDE;
+  virtual void Verify() OVERRIDE;
 #endif
 
-  virtual HValue* Canonicalize() V8_OVERRIDE {
+  virtual HValue* Canonicalize() OVERRIDE {
     return value()->type().IsHeapObject() ? NULL : this;
   }
 
   DECLARE_CONCRETE_INSTRUCTION(CheckHeapObject)
 
  protected:
-  virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+  virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
 
  private:
   explicit HCheckHeapObject(HValue* value) : HUnaryOperation(value) {
@@ -3021,11 +3047,10 @@
 
 class HBoundsCheck;
 class HPhi;
-class HConstant;
 class HBitwise;
 
 
-class InductionVariableData V8_FINAL : public ZoneObject {
+class InductionVariableData FINAL : public ZoneObject {
  public:
   class InductionVariableCheck : public ZoneObject {
    public:
@@ -3033,7 +3058,7 @@
     InductionVariableCheck* next() { return next_; }
     bool HasUpperLimit() { return upper_limit_ >= 0; }
     int32_t upper_limit() {
-      ASSERT(HasUpperLimit());
+      DCHECK(HasUpperLimit());
       return upper_limit_;
     }
     void set_upper_limit(int32_t upper_limit) {
@@ -3225,7 +3250,7 @@
 };
 
 
-class HPhi V8_FINAL : public HValue {
+class HPhi FINAL : public HValue {
  public:
   HPhi(int merged_index, Zone* zone)
       : inputs_(2, zone),
@@ -3236,25 +3261,25 @@
       non_phi_uses_[i] = 0;
       indirect_uses_[i] = 0;
     }
-    ASSERT(merged_index >= 0 || merged_index == kInvalidMergedIndex);
+    DCHECK(merged_index >= 0 || merged_index == kInvalidMergedIndex);
     SetFlag(kFlexibleRepresentation);
     SetFlag(kAllowUndefinedAsNaN);
   }
 
-  virtual Representation RepresentationFromInputs() V8_OVERRIDE;
+  virtual Representation RepresentationFromInputs() OVERRIDE;
 
-  virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
+  virtual Range* InferRange(Zone* zone) OVERRIDE;
   virtual void InferRepresentation(
-      HInferRepresentationPhase* h_infer) V8_OVERRIDE;
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+      HInferRepresentationPhase* h_infer) OVERRIDE;
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return representation();
   }
-  virtual Representation KnownOptimalRepresentation() V8_OVERRIDE {
+  virtual Representation KnownOptimalRepresentation() OVERRIDE {
     return representation();
   }
-  virtual HType CalculateInferredType() V8_OVERRIDE;
-  virtual int OperandCount() V8_OVERRIDE { return inputs_.length(); }
-  virtual HValue* OperandAt(int index) const V8_OVERRIDE {
+  virtual HType CalculateInferredType() OVERRIDE;
+  virtual int OperandCount() const OVERRIDE { return inputs_.length(); }
+  virtual HValue* OperandAt(int index) const OVERRIDE {
     return inputs_[index];
   }
   HValue* GetRedundantReplacement();
@@ -3264,7 +3289,7 @@
   bool IsReceiver() const { return merged_index_ == 0; }
   bool HasMergedIndex() const { return merged_index_ != kInvalidMergedIndex; }
 
-  virtual HSourcePosition position() const V8_OVERRIDE;
+  virtual HSourcePosition position() const OVERRIDE;
 
   int merged_index() const { return merged_index_; }
 
@@ -3279,14 +3304,14 @@
         induction_variable_data_->limit() != NULL;
   }
   void DetectInductionVariable() {
-    ASSERT(induction_variable_data_ == NULL);
+    DCHECK(induction_variable_data_ == NULL);
     induction_variable_data_ = InductionVariableData::ExaminePhi(this);
   }
 
-  virtual void PrintTo(StringStream* stream) V8_OVERRIDE;
+  virtual OStream& PrintTo(OStream& os) const OVERRIDE;  // NOLINT
 
 #ifdef DEBUG
-  virtual void Verify() V8_OVERRIDE;
+  virtual void Verify() OVERRIDE;
 #endif
 
   void InitRealUses(int id);
@@ -3320,10 +3345,10 @@
   int phi_id() { return phi_id_; }
 
   static HPhi* cast(HValue* value) {
-    ASSERT(value->IsPhi());
+    DCHECK(value->IsPhi());
     return reinterpret_cast<HPhi*>(value);
   }
-  virtual Opcode opcode() const V8_OVERRIDE { return HValue::kPhi; }
+  virtual Opcode opcode() const OVERRIDE { return HValue::kPhi; }
 
   void SimplifyConstantInputs();
 
@@ -3331,8 +3356,8 @@
   static const int kInvalidMergedIndex = -1;
 
  protected:
-  virtual void DeleteFromGraph() V8_OVERRIDE;
-  virtual void InternalSetOperandAt(int index, HValue* value) V8_OVERRIDE {
+  virtual void DeleteFromGraph() OVERRIDE;
+  virtual void InternalSetOperandAt(int index, HValue* value) OVERRIDE {
     inputs_[index] = value;
   }
 
@@ -3346,7 +3371,7 @@
   InductionVariableData* induction_variable_data_;
 
   // TODO(titzer): we can't eliminate the receiver for generating backtraces
-  virtual bool IsDeletable() const V8_OVERRIDE { return !IsReceiver(); }
+  virtual bool IsDeletable() const OVERRIDE { return !IsReceiver(); }
 };
 
 
@@ -3355,22 +3380,24 @@
  public:
   HDematerializedObject(int count, Zone* zone) : values_(count, zone) {}
 
-  virtual int OperandCount() V8_FINAL V8_OVERRIDE { return values_.length(); }
-  virtual HValue* OperandAt(int index) const V8_FINAL V8_OVERRIDE {
+  virtual int OperandCount() const FINAL OVERRIDE {
+    return values_.length();
+  }
+  virtual HValue* OperandAt(int index) const FINAL OVERRIDE {
     return values_[index];
   }
 
-  virtual bool HasEscapingOperandAt(int index) V8_FINAL V8_OVERRIDE {
+  virtual bool HasEscapingOperandAt(int index) FINAL OVERRIDE {
     return false;
   }
   virtual Representation RequiredInputRepresentation(
-      int index) V8_FINAL V8_OVERRIDE {
+      int index) FINAL OVERRIDE {
     return Representation::None();
   }
 
  protected:
   virtual void InternalSetOperandAt(int index,
-                                    HValue* value) V8_FINAL V8_OVERRIDE {
+                                    HValue* value) FINAL OVERRIDE {
     values_[index] = value;
   }
 
@@ -3379,7 +3406,7 @@
 };
 
 
-class HArgumentsObject V8_FINAL : public HDematerializedObject {
+class HArgumentsObject FINAL : public HDematerializedObject {
  public:
   static HArgumentsObject* New(Zone* zone, HValue* context, int count) {
     return new(zone) HArgumentsObject(count, zone);
@@ -3406,7 +3433,7 @@
 };
 
 
-class HCapturedObject V8_FINAL : public HDematerializedObject {
+class HCapturedObject FINAL : public HDematerializedObject {
  public:
   HCapturedObject(int length, int id, Zone* zone)
       : HDematerializedObject(length, zone), capture_id_(id) {
@@ -3425,15 +3452,15 @@
   HValue* map_value() const { return values()->first(); }
 
   void ReuseSideEffectsFromStore(HInstruction* store) {
-    ASSERT(store->HasObservableSideEffects());
-    ASSERT(store->IsStoreNamedField());
+    DCHECK(store->HasObservableSideEffects());
+    DCHECK(store->IsStoreNamedField());
     changes_flags_.Add(store->ChangesFlags());
   }
 
   // Replay effects of this instruction on the given environment.
   void ReplayEnvironment(HEnvironment* env);
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;  // NOLINT
 
   DECLARE_CONCRETE_INSTRUCTION(CapturedObject)
 
@@ -3443,11 +3470,11 @@
   // Note that we cannot DCE captured objects as they are used to replay
   // the environment. This method is here as an explicit reminder.
   // TODO(mstarzinger): Turn HSimulates into full snapshots maybe?
-  virtual bool IsDeletable() const V8_FINAL V8_OVERRIDE { return false; }
+  virtual bool IsDeletable() const FINAL OVERRIDE { return false; }
 };
 
 
-class HConstant V8_FINAL : public HTemplateInstruction<0> {
+class HConstant FINAL : public HTemplateInstruction<0> {
  public:
   DECLARE_INSTRUCTION_FACTORY_P1(HConstant, int32_t);
   DECLARE_INSTRUCTION_FACTORY_P2(HConstant, int32_t, Representation);
@@ -3464,6 +3491,14 @@
         zone, context, value, representation));
   }
 
+  virtual Handle<Map> GetMonomorphicJSObjectMap() OVERRIDE {
+    Handle<Object> object = object_.handle();
+    if (!object.is_null() && object->IsHeapObject()) {
+      return v8::internal::handle(HeapObject::cast(*object)->map());
+    }
+    return Handle<Map>();
+  }
+
   static HConstant* CreateAndInsertBefore(Zone* zone,
                                           HValue* context,
                                           int32_t value,
@@ -3501,15 +3536,15 @@
           isolate->factory()->NewNumber(double_value_, TENURED));
     }
     AllowDeferredHandleDereference smi_check;
-    ASSERT(has_int32_value_ || !object_.handle()->IsSmi());
+    DCHECK(has_int32_value_ || !object_.handle()->IsSmi());
     return object_.handle();
   }
 
   bool IsSpecialDouble() const {
     return has_double_value_ &&
-        (BitCast<int64_t>(double_value_) == BitCast<int64_t>(-0.0) ||
-         FixedDoubleArray::is_the_hole_nan(double_value_) ||
-         std::isnan(double_value_));
+           (bit_cast<int64_t>(double_value_) == bit_cast<int64_t>(-0.0) ||
+            FixedDoubleArray::is_the_hole_nan(double_value_) ||
+            std::isnan(double_value_));
   }
 
   bool NotInNewSpace() const {
@@ -3526,11 +3561,11 @@
     return instance_type_ == MAP_TYPE;
   }
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::None();
   }
 
-  virtual Representation KnownOptimalRepresentation() V8_OVERRIDE {
+  virtual Representation KnownOptimalRepresentation() OVERRIDE {
     if (HasSmiValue() && SmiValuesAre31Bits()) return Representation::Smi();
     if (HasInteger32Value()) return Representation::Integer32();
     if (HasNumberValue()) return Representation::Double();
@@ -3538,31 +3573,32 @@
     return Representation::Tagged();
   }
 
-  virtual bool EmitAtUses() V8_OVERRIDE;
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual bool EmitAtUses() OVERRIDE;
+  virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;  // NOLINT
   HConstant* CopyToRepresentation(Representation r, Zone* zone) const;
   Maybe<HConstant*> CopyToTruncatedInt32(Zone* zone);
   Maybe<HConstant*> CopyToTruncatedNumber(Zone* zone);
   bool HasInteger32Value() const { return has_int32_value_; }
   int32_t Integer32Value() const {
-    ASSERT(HasInteger32Value());
+    DCHECK(HasInteger32Value());
     return int32_value_;
   }
   bool HasSmiValue() const { return has_smi_value_; }
   bool HasDoubleValue() const { return has_double_value_; }
   double DoubleValue() const {
-    ASSERT(HasDoubleValue());
+    DCHECK(HasDoubleValue());
     return double_value_;
   }
   bool IsTheHole() const {
     if (HasDoubleValue() && FixedDoubleArray::is_the_hole_nan(double_value_)) {
       return true;
     }
-    return object_.IsKnownGlobal(isolate()->heap()->the_hole_value());
+    return object_.IsInitialized() &&
+           object_.IsKnownGlobal(isolate()->heap()->the_hole_value());
   }
   bool HasNumberValue() const { return has_double_value_; }
   int32_t NumberValueAsInteger32() const {
-    ASSERT(HasNumberValue());
+    DCHECK(HasNumberValue());
     // Irrespective of whether a numeric HConstant can be safely
     // represented as an int32, we store the (in some cases lossy)
     // representation of the number in int32_value_.
@@ -3570,11 +3606,11 @@
   }
   bool HasStringValue() const {
     if (has_double_value_ || has_int32_value_) return false;
-    ASSERT(!object_.handle().is_null());
+    DCHECK(!object_.handle().is_null());
     return instance_type_ < FIRST_NONSTRING_TYPE;
   }
   Handle<String> StringValue() const {
-    ASSERT(HasStringValue());
+    DCHECK(HasStringValue());
     return Handle<String>::cast(object_.handle());
   }
   bool HasInternalizedStringValue() const {
@@ -3595,36 +3631,36 @@
 
   bool HasMapValue() const { return instance_type_ == MAP_TYPE; }
   Unique<Map> MapValue() const {
-    ASSERT(HasMapValue());
+    DCHECK(HasMapValue());
     return Unique<Map>::cast(GetUnique());
   }
   bool HasStableMapValue() const {
-    ASSERT(HasMapValue() || !has_stable_map_value_);
+    DCHECK(HasMapValue() || !has_stable_map_value_);
     return has_stable_map_value_;
   }
 
   bool HasObjectMap() const { return !object_map_.IsNull(); }
   Unique<Map> ObjectMap() const {
-    ASSERT(HasObjectMap());
+    DCHECK(HasObjectMap());
     return object_map_;
   }
 
-  virtual intptr_t Hashcode() V8_OVERRIDE {
+  virtual intptr_t Hashcode() OVERRIDE {
     if (has_int32_value_) {
       return static_cast<intptr_t>(int32_value_);
     } else if (has_double_value_) {
-      return static_cast<intptr_t>(BitCast<int64_t>(double_value_));
+      return static_cast<intptr_t>(bit_cast<int64_t>(double_value_));
     } else if (has_external_reference_value_) {
       return reinterpret_cast<intptr_t>(external_reference_value_.address());
     } else {
-      ASSERT(!object_.handle().is_null());
+      DCHECK(!object_.handle().is_null());
       return object_.Hashcode();
     }
   }
 
-  virtual void FinalizeUniqueness() V8_OVERRIDE {
+  virtual void FinalizeUniqueness() OVERRIDE {
     if (!has_double_value_ && !has_external_reference_value_) {
-      ASSERT(!object_.handle().is_null());
+      DCHECK(!object_.handle().is_null());
       object_ = Unique<Object>(object_.handle());
     }
   }
@@ -3637,15 +3673,15 @@
     return object_.IsInitialized() && object_ == other;
   }
 
-  virtual bool DataEquals(HValue* other) V8_OVERRIDE {
+  virtual bool DataEquals(HValue* other) OVERRIDE {
     HConstant* other_constant = HConstant::cast(other);
     if (has_int32_value_) {
       return other_constant->has_int32_value_ &&
           int32_value_ == other_constant->int32_value_;
     } else if (has_double_value_) {
       return other_constant->has_double_value_ &&
-          BitCast<int64_t>(double_value_) ==
-          BitCast<int64_t>(other_constant->double_value_);
+             bit_cast<int64_t>(double_value_) ==
+                 bit_cast<int64_t>(other_constant->double_value_);
     } else if (has_external_reference_value_) {
       return other_constant->has_external_reference_value_ &&
           external_reference_value_ ==
@@ -3656,23 +3692,24 @@
           other_constant->has_external_reference_value_) {
         return false;
       }
-      ASSERT(!object_.handle().is_null());
+      DCHECK(!object_.handle().is_null());
       return other_constant->object_ == object_;
     }
   }
 
 #ifdef DEBUG
-  virtual void Verify() V8_OVERRIDE { }
+  virtual void Verify() OVERRIDE { }
 #endif
 
   DECLARE_CONCRETE_INSTRUCTION(Constant)
 
  protected:
-  virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
+  virtual Range* InferRange(Zone* zone) OVERRIDE;
 
  private:
   friend class HGraph;
-  HConstant(Handle<Object> handle, Representation r = Representation::None());
+  explicit HConstant(Handle<Object> handle,
+                     Representation r = Representation::None());
   HConstant(int32_t value,
             Representation r = Representation::None(),
             bool is_not_in_new_space = true,
@@ -3695,7 +3732,7 @@
 
   void Initialize(Representation r);
 
-  virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+  virtual bool IsDeletable() const OVERRIDE { return true; }
 
   // If this is a numerical constant, object_ either points to the
   // HeapObject the constant originated from or is null.  If the
@@ -3736,7 +3773,7 @@
                    HType type = HType::Tagged())
       : HTemplateInstruction<3>(type),
         observed_output_representation_(Representation::None()) {
-    ASSERT(left != NULL && right != NULL);
+    DCHECK(left != NULL && right != NULL);
     SetOperandAt(0, context);
     SetOperandAt(1, left);
     SetOperandAt(2, right);
@@ -3760,7 +3797,7 @@
     // Otherwise, if there is only one use of the right operand, it would be
     // better off on the left for platforms that only have 2-arg arithmetic
     // ops (e.g ia32, x64) that clobber the left operand.
-    return right()->UseCount() == 1;
+    return right()->HasOneUse();
   }
 
   HValue* BetterLeftOperand() {
@@ -3772,7 +3809,7 @@
   }
 
   void set_observed_input_representation(int index, Representation rep) {
-    ASSERT(index >= 1 && index <= 2);
+    DCHECK(index >= 1 && index <= 2);
     observed_input_representation_[index - 1] = rep;
   }
 
@@ -3780,30 +3817,30 @@
     observed_output_representation_ = observed;
   }
 
-  virtual Representation observed_input_representation(int index) V8_OVERRIDE {
+  virtual Representation observed_input_representation(int index) OVERRIDE {
     if (index == 0) return Representation::Tagged();
     return observed_input_representation_[index - 1];
   }
 
   virtual void UpdateRepresentation(Representation new_rep,
                                     HInferRepresentationPhase* h_infer,
-                                    const char* reason) V8_OVERRIDE {
+                                    const char* reason) OVERRIDE {
     Representation rep = !FLAG_smi_binop && new_rep.IsSmi()
         ? Representation::Integer32() : new_rep;
     HValue::UpdateRepresentation(rep, h_infer, reason);
   }
 
   virtual void InferRepresentation(
-      HInferRepresentationPhase* h_infer) V8_OVERRIDE;
-  virtual Representation RepresentationFromInputs() V8_OVERRIDE;
+      HInferRepresentationPhase* h_infer) OVERRIDE;
+  virtual Representation RepresentationFromInputs() OVERRIDE;
   Representation RepresentationFromOutput();
-  virtual void AssumeRepresentation(Representation r) V8_OVERRIDE;
+  virtual void AssumeRepresentation(Representation r) OVERRIDE;
 
   virtual bool IsCommutative() const { return false; }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;  // NOLINT
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     if (index == 0) return Representation::Tagged();
     return representation();
   }
@@ -3818,7 +3855,10 @@
   bool RightIsPowerOf2() {
     if (!right()->IsInteger32Constant()) return false;
     int32_t value = right()->GetInteger32Constant();
-    return IsPowerOf2(value) || IsPowerOf2(-value);
+    if (value < 0) {
+      return base::bits::IsPowerOfTwo32(static_cast<uint32_t>(-value));
+    }
+    return base::bits::IsPowerOfTwo32(static_cast<uint32_t>(value));
   }
 
   DECLARE_ABSTRACT_INSTRUCTION(BinaryOperation)
@@ -3831,22 +3871,22 @@
 };
 
 
-class HWrapReceiver V8_FINAL : public HTemplateInstruction<2> {
+class HWrapReceiver FINAL : public HTemplateInstruction<2> {
  public:
   DECLARE_INSTRUCTION_FACTORY_P2(HWrapReceiver, HValue*, HValue*);
 
-  virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+  virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
-  HValue* receiver() { return OperandAt(0); }
-  HValue* function() { return OperandAt(1); }
+  HValue* receiver() const { return OperandAt(0); }
+  HValue* function() const { return OperandAt(1); }
 
-  virtual HValue* Canonicalize() V8_OVERRIDE;
+  virtual HValue* Canonicalize() OVERRIDE;
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;  // NOLINT
   bool known_function() const { return known_function_; }
 
   DECLARE_CONCRETE_INSTRUCTION(WrapReceiver)
@@ -3865,12 +3905,12 @@
 };
 
 
-class HApplyArguments V8_FINAL : public HTemplateInstruction<4> {
+class HApplyArguments FINAL : public HTemplateInstruction<4> {
  public:
   DECLARE_INSTRUCTION_FACTORY_P4(HApplyArguments, HValue*, HValue*, HValue*,
                                  HValue*);
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     // The length is untagged, all other inputs are tagged.
     return (index == 2)
         ? Representation::Integer32()
@@ -3899,20 +3939,20 @@
 };
 
 
-class HArgumentsElements V8_FINAL : public HTemplateInstruction<0> {
+class HArgumentsElements FINAL : public HTemplateInstruction<0> {
  public:
   DECLARE_INSTRUCTION_FACTORY_P1(HArgumentsElements, bool);
 
   DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements)
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::None();
   }
 
   bool from_inlined() const { return from_inlined_; }
 
  protected:
-  virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+  virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
 
  private:
   explicit HArgumentsElements(bool from_inlined) : from_inlined_(from_inlined) {
@@ -3922,24 +3962,24 @@
     SetFlag(kUseGVN);
   }
 
-  virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+  virtual bool IsDeletable() const OVERRIDE { return true; }
 
   bool from_inlined_;
 };
 
 
-class HArgumentsLength V8_FINAL : public HUnaryOperation {
+class HArgumentsLength FINAL : public HUnaryOperation {
  public:
   DECLARE_INSTRUCTION_FACTORY_P1(HArgumentsLength, HValue*);
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
   DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength)
 
  protected:
-  virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+  virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
 
  private:
   explicit HArgumentsLength(HValue* value) : HUnaryOperation(value) {
@@ -3947,26 +3987,26 @@
     SetFlag(kUseGVN);
   }
 
-  virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+  virtual bool IsDeletable() const OVERRIDE { return true; }
 };
 
 
-class HAccessArgumentsAt V8_FINAL : public HTemplateInstruction<3> {
+class HAccessArgumentsAt FINAL : public HTemplateInstruction<3> {
  public:
   DECLARE_INSTRUCTION_FACTORY_P3(HAccessArgumentsAt, HValue*, HValue*, HValue*);
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;  // NOLINT
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     // The arguments elements is considered tagged.
     return index == 0
         ? Representation::Tagged()
         : Representation::Integer32();
   }
 
-  HValue* arguments() { return OperandAt(0); }
-  HValue* length() { return OperandAt(1); }
-  HValue* index() { return OperandAt(2); }
+  HValue* arguments() const { return OperandAt(0); }
+  HValue* length() const { return OperandAt(1); }
+  HValue* index() const { return OperandAt(2); }
 
   DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt)
 
@@ -3979,27 +4019,27 @@
     SetOperandAt(2, index);
   }
 
-  virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+  virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
 };
 
 
 class HBoundsCheckBaseIndexInformation;
 
 
-class HBoundsCheck V8_FINAL : public HTemplateInstruction<2> {
+class HBoundsCheck FINAL : public HTemplateInstruction<2> {
  public:
   DECLARE_INSTRUCTION_FACTORY_P2(HBoundsCheck, HValue*, HValue*);
 
   bool skip_check() const { return skip_check_; }
   void set_skip_check() { skip_check_ = true; }
 
-  HValue* base() { return base_; }
-  int offset() { return offset_; }
-  int scale() { return scale_; }
+  HValue* base() const { return base_; }
+  int offset() const { return offset_; }
+  int scale() const { return scale_; }
 
   void ApplyIndexChange();
   bool DetectCompoundIndex() {
-    ASSERT(base() == NULL);
+    DCHECK(base() == NULL);
 
     DecompositionResult decomposition;
     if (index()->TryDecompose(&decomposition)) {
@@ -4015,21 +4055,21 @@
     }
   }
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return representation();
   }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;  // NOLINT
   virtual void InferRepresentation(
-      HInferRepresentationPhase* h_infer) V8_OVERRIDE;
+      HInferRepresentationPhase* h_infer) OVERRIDE;
 
-  HValue* index() { return OperandAt(0); }
-  HValue* length() { return OperandAt(1); }
-  bool allow_equality() { return allow_equality_; }
+  HValue* index() const { return OperandAt(0); }
+  HValue* length() const { return OperandAt(1); }
+  bool allow_equality() const { return allow_equality_; }
   void set_allow_equality(bool v) { allow_equality_ = v; }
 
-  virtual int RedefinedOperandIndex() V8_OVERRIDE { return 0; }
-  virtual bool IsPurelyInformativeDefinition() V8_OVERRIDE {
+  virtual int RedefinedOperandIndex() OVERRIDE { return 0; }
+  virtual bool IsPurelyInformativeDefinition() OVERRIDE {
     return skip_check();
   }
 
@@ -4038,9 +4078,9 @@
  protected:
   friend class HBoundsCheckBaseIndexInformation;
 
-  virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
+  virtual Range* InferRange(Zone* zone) OVERRIDE;
 
-  virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+  virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
   bool skip_check_;
   HValue* base_;
   int offset_;
@@ -4062,13 +4102,13 @@
     SetFlag(kUseGVN);
   }
 
-  virtual bool IsDeletable() const V8_OVERRIDE {
+  virtual bool IsDeletable() const OVERRIDE {
     return skip_check() && !FLAG_debug_code;
   }
 };
 
 
-class HBoundsCheckBaseIndexInformation V8_FINAL
+class HBoundsCheckBaseIndexInformation FINAL
     : public HTemplateInstruction<2> {
  public:
   explicit HBoundsCheckBaseIndexInformation(HBoundsCheck* check) {
@@ -4081,19 +4121,19 @@
     }
   }
 
-  HValue* base_index() { return OperandAt(0); }
+  HValue* base_index() const { return OperandAt(0); }
   HBoundsCheck* bounds_check() { return HBoundsCheck::cast(OperandAt(1)); }
 
   DECLARE_CONCRETE_INSTRUCTION(BoundsCheckBaseIndexInformation)
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return representation();
   }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;  // NOLINT
 
-  virtual int RedefinedOperandIndex() V8_OVERRIDE { return 0; }
-  virtual bool IsPurelyInformativeDefinition() V8_OVERRIDE { return true; }
+  virtual int RedefinedOperandIndex() OVERRIDE { return 0; }
+  virtual bool IsPurelyInformativeDefinition() OVERRIDE { return true; }
 };
 
 
@@ -4108,7 +4148,7 @@
     SetAllSideEffects();
   }
 
-  virtual void RepresentationChanged(Representation to) V8_OVERRIDE {
+  virtual void RepresentationChanged(Representation to) OVERRIDE {
     if (to.IsTagged() &&
         (left()->ToNumberCanBeObserved() || right()->ToNumberCanBeObserved())) {
       SetAllSideEffects();
@@ -4122,13 +4162,13 @@
 
   virtual void UpdateRepresentation(Representation new_rep,
                                     HInferRepresentationPhase* h_infer,
-                                    const char* reason) V8_OVERRIDE {
+                                    const char* reason) OVERRIDE {
     // We only generate either int32 or generic tagged bitwise operations.
     if (new_rep.IsDouble()) new_rep = Representation::Integer32();
     HBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
   }
 
-  virtual Representation observed_input_representation(int index) V8_OVERRIDE {
+  virtual Representation observed_input_representation(int index) OVERRIDE {
     Representation r = HBinaryOperation::observed_input_representation(index);
     if (r.IsDouble()) return Representation::Integer32();
     return r;
@@ -4142,11 +4182,11 @@
   DECLARE_ABSTRACT_INSTRUCTION(BitwiseBinaryOperation)
 
  private:
-  virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+  virtual bool IsDeletable() const OVERRIDE { return true; }
 };
 
 
-class HMathFloorOfDiv V8_FINAL : public HBinaryOperation {
+class HMathFloorOfDiv FINAL : public HBinaryOperation {
  public:
   DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HMathFloorOfDiv,
                                               HValue*,
@@ -4155,7 +4195,7 @@
   DECLARE_CONCRETE_INSTRUCTION(MathFloorOfDiv)
 
  protected:
-  virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+  virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
 
  private:
   HMathFloorOfDiv(HValue* context, HValue* left, HValue* right)
@@ -4170,9 +4210,9 @@
     SetFlag(kAllowUndefinedAsNaN);
   }
 
-  virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
+  virtual Range* InferRange(Zone* zone) OVERRIDE;
 
-  virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+  virtual bool IsDeletable() const OVERRIDE { return true; }
 };
 
 
@@ -4185,7 +4225,7 @@
     SetFlag(kAllowUndefinedAsNaN);
   }
 
-  virtual void RepresentationChanged(Representation to) V8_OVERRIDE {
+  virtual void RepresentationChanged(Representation to) OVERRIDE {
     if (to.IsTagged() &&
         (left()->ToNumberCanBeObserved() || right()->ToNumberCanBeObserved())) {
       SetAllSideEffects();
@@ -4200,23 +4240,23 @@
   DECLARE_ABSTRACT_INSTRUCTION(ArithmeticBinaryOperation)
 
  private:
-  virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+  virtual bool IsDeletable() const OVERRIDE { return true; }
 };
 
 
-class HCompareGeneric V8_FINAL : public HBinaryOperation {
+class HCompareGeneric FINAL : public HBinaryOperation {
  public:
   DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HCompareGeneric, HValue*,
                                               HValue*, Token::Value);
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return index == 0
         ? Representation::Tagged()
         : representation();
   }
 
   Token::Value token() const { return token_; }
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;  // NOLINT
 
   DECLARE_CONCRETE_INSTRUCTION(CompareGeneric)
 
@@ -4227,7 +4267,7 @@
                   Token::Value token)
       : HBinaryOperation(context, left, right, HType::Boolean()),
         token_(token) {
-    ASSERT(Token::IsCompareOp(token));
+    DCHECK(Token::IsCompareOp(token));
     set_representation(Representation::Tagged());
     SetAllSideEffects();
   }
@@ -4244,8 +4284,8 @@
                                  HValue*, HValue*, Token::Value,
                                  HBasicBlock*, HBasicBlock*);
 
-  HValue* left() { return OperandAt(0); }
-  HValue* right() { return OperandAt(1); }
+  HValue* left() const { return OperandAt(0); }
+  HValue* right() const { return OperandAt(1); }
   Token::Value token() const { return token_; }
 
   void set_observed_input_representation(Representation left,
@@ -4255,18 +4295,18 @@
   }
 
   virtual void InferRepresentation(
-      HInferRepresentationPhase* h_infer) V8_OVERRIDE;
+      HInferRepresentationPhase* h_infer) OVERRIDE;
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return representation();
   }
-  virtual Representation observed_input_representation(int index) V8_OVERRIDE {
+  virtual Representation observed_input_representation(int index) OVERRIDE {
     return observed_input_representation_[index];
   }
 
-  virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE;
+  virtual bool KnownSuccessorBlock(HBasicBlock** block) OVERRIDE;
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;  // NOLINT
 
   void SetOperandPositions(Zone* zone,
                            HSourcePosition left_pos,
@@ -4285,7 +4325,7 @@
                            HBasicBlock* false_target = NULL)
       : token_(token) {
     SetFlag(kFlexibleRepresentation);
-    ASSERT(Token::IsCompareOp(token));
+    DCHECK(Token::IsCompareOp(token));
     SetOperandAt(0, left);
     SetOperandAt(1, right);
     SetSuccessorAt(0, true_target);
@@ -4297,16 +4337,16 @@
 };
 
 
-class HCompareHoleAndBranch V8_FINAL : public HUnaryControlInstruction {
+class HCompareHoleAndBranch FINAL : public HUnaryControlInstruction {
  public:
   DECLARE_INSTRUCTION_FACTORY_P1(HCompareHoleAndBranch, HValue*);
   DECLARE_INSTRUCTION_FACTORY_P3(HCompareHoleAndBranch, HValue*,
                                  HBasicBlock*, HBasicBlock*);
 
   virtual void InferRepresentation(
-      HInferRepresentationPhase* h_infer) V8_OVERRIDE;
+      HInferRepresentationPhase* h_infer) OVERRIDE;
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return representation();
   }
 
@@ -4323,18 +4363,18 @@
 };
 
 
-class HCompareMinusZeroAndBranch V8_FINAL : public HUnaryControlInstruction {
+class HCompareMinusZeroAndBranch FINAL : public HUnaryControlInstruction {
  public:
   DECLARE_INSTRUCTION_FACTORY_P1(HCompareMinusZeroAndBranch, HValue*);
 
   virtual void InferRepresentation(
-      HInferRepresentationPhase* h_infer) V8_OVERRIDE;
+      HInferRepresentationPhase* h_infer) OVERRIDE;
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return representation();
   }
 
-  virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE;
+  virtual bool KnownSuccessorBlock(HBasicBlock** block) OVERRIDE;
 
   DECLARE_CONCRETE_INSTRUCTION(CompareMinusZeroAndBranch)
 
@@ -4351,7 +4391,7 @@
   DECLARE_INSTRUCTION_FACTORY_P4(HCompareObjectEqAndBranch, HValue*, HValue*,
                                  HBasicBlock*, HBasicBlock*);
 
-  virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE;
+  virtual bool KnownSuccessorBlock(HBasicBlock** block) OVERRIDE;
 
   static const int kNoKnownSuccessorIndex = -1;
   int known_successor_index() const { return known_successor_index_; }
@@ -4359,16 +4399,16 @@
     known_successor_index_ = known_successor_index;
   }
 
-  HValue* left() { return OperandAt(0); }
-  HValue* right() { return OperandAt(1); }
+  HValue* left() const { return OperandAt(0); }
+  HValue* right() const { return OperandAt(1); }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;  // NOLINT
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
-  virtual Representation observed_input_representation(int index) V8_OVERRIDE {
+  virtual Representation observed_input_representation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
@@ -4380,12 +4420,6 @@
                             HBasicBlock* true_target = NULL,
                             HBasicBlock* false_target = NULL)
       : known_successor_index_(kNoKnownSuccessorIndex) {
-    ASSERT(!left->IsConstant() ||
-           (!HConstant::cast(left)->HasInteger32Value() ||
-            HConstant::cast(left)->HasSmiValue()));
-    ASSERT(!right->IsConstant() ||
-           (!HConstant::cast(right)->HasInteger32Value() ||
-            HConstant::cast(right)->HasSmiValue()));
     SetOperandAt(0, left);
     SetOperandAt(1, right);
     SetSuccessorAt(0, true_target);
@@ -4396,17 +4430,17 @@
 };
 
 
-class HIsObjectAndBranch V8_FINAL : public HUnaryControlInstruction {
+class HIsObjectAndBranch FINAL : public HUnaryControlInstruction {
  public:
   DECLARE_INSTRUCTION_FACTORY_P1(HIsObjectAndBranch, HValue*);
   DECLARE_INSTRUCTION_FACTORY_P3(HIsObjectAndBranch, HValue*,
                                  HBasicBlock*, HBasicBlock*);
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
-  virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE;
+  virtual bool KnownSuccessorBlock(HBasicBlock** block) OVERRIDE;
 
   DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch)
 
@@ -4418,17 +4452,17 @@
 };
 
 
-class HIsStringAndBranch V8_FINAL : public HUnaryControlInstruction {
+class HIsStringAndBranch FINAL : public HUnaryControlInstruction {
  public:
   DECLARE_INSTRUCTION_FACTORY_P1(HIsStringAndBranch, HValue*);
   DECLARE_INSTRUCTION_FACTORY_P3(HIsStringAndBranch, HValue*,
                                  HBasicBlock*, HBasicBlock*);
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
-  virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE;
+  virtual bool KnownSuccessorBlock(HBasicBlock** block) OVERRIDE;
 
   static const int kNoKnownSuccessorIndex = -1;
   int known_successor_index() const { return known_successor_index_; }
@@ -4452,7 +4486,7 @@
 };
 
 
-class HIsSmiAndBranch V8_FINAL : public HUnaryControlInstruction {
+class HIsSmiAndBranch FINAL : public HUnaryControlInstruction {
  public:
   DECLARE_INSTRUCTION_FACTORY_P1(HIsSmiAndBranch, HValue*);
   DECLARE_INSTRUCTION_FACTORY_P3(HIsSmiAndBranch, HValue*,
@@ -4460,12 +4494,12 @@
 
   DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch)
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
  protected:
-  virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+  virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
   virtual int RedefinedOperandIndex() { return 0; }
 
  private:
@@ -4478,17 +4512,17 @@
 };
 
 
-class HIsUndetectableAndBranch V8_FINAL : public HUnaryControlInstruction {
+class HIsUndetectableAndBranch FINAL : public HUnaryControlInstruction {
  public:
   DECLARE_INSTRUCTION_FACTORY_P1(HIsUndetectableAndBranch, HValue*);
   DECLARE_INSTRUCTION_FACTORY_P3(HIsUndetectableAndBranch, HValue*,
                                  HBasicBlock*, HBasicBlock*);
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
-  virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE;
+  virtual bool KnownSuccessorBlock(HBasicBlock** block) OVERRIDE;
 
   DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch)
 
@@ -4512,9 +4546,9 @@
   HValue* right() { return OperandAt(2); }
   Token::Value token() const { return token_; }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;  // NOLINT
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
@@ -4530,7 +4564,7 @@
                           HValue* right,
                           Token::Value token)
       : token_(token) {
-    ASSERT(Token::IsCompareOp(token));
+    DCHECK(Token::IsCompareOp(token));
     SetOperandAt(0, context);
     SetOperandAt(1, left);
     SetOperandAt(2, right);
@@ -4546,7 +4580,7 @@
  public:
   DECLARE_INSTRUCTION_FACTORY_P0(HIsConstructCallAndBranch);
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::None();
   }
 
@@ -4556,7 +4590,7 @@
 };
 
 
-class HHasInstanceTypeAndBranch V8_FINAL : public HUnaryControlInstruction {
+class HHasInstanceTypeAndBranch FINAL : public HUnaryControlInstruction {
  public:
   DECLARE_INSTRUCTION_FACTORY_P2(
       HHasInstanceTypeAndBranch, HValue*, InstanceType);
@@ -4566,13 +4600,13 @@
   InstanceType from() { return from_; }
   InstanceType to() { return to_; }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;  // NOLINT
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
-  virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE;
+  virtual bool KnownSuccessorBlock(HBasicBlock** block) OVERRIDE;
 
   DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch)
 
@@ -4581,7 +4615,7 @@
       : HUnaryControlInstruction(value, NULL, NULL), from_(type), to_(type) { }
   HHasInstanceTypeAndBranch(HValue* value, InstanceType from, InstanceType to)
       : HUnaryControlInstruction(value, NULL, NULL), from_(from), to_(to) {
-    ASSERT(to == LAST_TYPE);  // Others not implemented yet in backend.
+    DCHECK(to == LAST_TYPE);  // Others not implemented yet in backend.
   }
 
   InstanceType from_;
@@ -4589,11 +4623,11 @@
 };
 
 
-class HHasCachedArrayIndexAndBranch V8_FINAL : public HUnaryControlInstruction {
+class HHasCachedArrayIndexAndBranch FINAL : public HUnaryControlInstruction {
  public:
   DECLARE_INSTRUCTION_FACTORY_P1(HHasCachedArrayIndexAndBranch, HValue*);
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
@@ -4604,18 +4638,18 @@
 };
 
 
-class HGetCachedArrayIndex V8_FINAL : public HUnaryOperation {
+class HGetCachedArrayIndex FINAL : public HUnaryOperation {
  public:
   DECLARE_INSTRUCTION_FACTORY_P1(HGetCachedArrayIndex, HValue*);
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
   DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex)
 
  protected:
-  virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+  virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
 
  private:
   explicit HGetCachedArrayIndex(HValue* value) : HUnaryOperation(value) {
@@ -4623,22 +4657,22 @@
     SetFlag(kUseGVN);
   }
 
-  virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+  virtual bool IsDeletable() const OVERRIDE { return true; }
 };
 
 
-class HClassOfTestAndBranch V8_FINAL : public HUnaryControlInstruction {
+class HClassOfTestAndBranch FINAL : public HUnaryControlInstruction {
  public:
   DECLARE_INSTRUCTION_FACTORY_P2(HClassOfTestAndBranch, HValue*,
                                  Handle<String>);
 
   DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch)
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;  // NOLINT
 
   Handle<String> class_name() const { return class_name_; }
 
@@ -4651,22 +4685,22 @@
 };
 
 
-class HTypeofIsAndBranch V8_FINAL : public HUnaryControlInstruction {
+class HTypeofIsAndBranch FINAL : public HUnaryControlInstruction {
  public:
   DECLARE_INSTRUCTION_FACTORY_P2(HTypeofIsAndBranch, HValue*, Handle<String>);
 
-  Handle<String> type_literal() { return type_literal_.handle(); }
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  Handle<String> type_literal() const { return type_literal_.handle(); }
+  virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;  // NOLINT
 
   DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch)
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::None();
   }
 
-  virtual bool KnownSuccessorBlock(HBasicBlock** block) V8_OVERRIDE;
+  virtual bool KnownSuccessorBlock(HBasicBlock** block) OVERRIDE;
 
-  virtual void FinalizeUniqueness() V8_OVERRIDE {
+  virtual void FinalizeUniqueness() OVERRIDE {
     type_literal_ = Unique<String>(type_literal_.handle());
   }
 
@@ -4679,15 +4713,15 @@
 };
 
 
-class HInstanceOf V8_FINAL : public HBinaryOperation {
+class HInstanceOf FINAL : public HBinaryOperation {
  public:
   DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HInstanceOf, HValue*, HValue*);
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;  // NOLINT
 
   DECLARE_CONCRETE_INSTRUCTION(InstanceOf)
 
@@ -4700,7 +4734,7 @@
 };
 
 
-class HInstanceOfKnownGlobal V8_FINAL : public HTemplateInstruction<2> {
+class HInstanceOfKnownGlobal FINAL : public HTemplateInstruction<2> {
  public:
   DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HInstanceOfKnownGlobal,
                                               HValue*,
@@ -4710,7 +4744,7 @@
   HValue* left() { return OperandAt(1); }
   Handle<JSFunction> function() { return function_; }
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
@@ -4731,7 +4765,7 @@
 };
 
 
-class HPower V8_FINAL : public HTemplateInstruction<2> {
+class HPower FINAL : public HTemplateInstruction<2> {
  public:
   static HInstruction* New(Zone* zone,
                            HValue* context,
@@ -4741,19 +4775,19 @@
   HValue* left() { return OperandAt(0); }
   HValue* right() const { return OperandAt(1); }
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return index == 0
       ? Representation::Double()
       : Representation::None();
   }
-  virtual Representation observed_input_representation(int index) V8_OVERRIDE {
+  virtual Representation observed_input_representation(int index) OVERRIDE {
     return RequiredInputRepresentation(index);
   }
 
   DECLARE_CONCRETE_INSTRUCTION(Power)
 
  protected:
-  virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+  virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
 
  private:
   HPower(HValue* left, HValue* right) {
@@ -4764,13 +4798,13 @@
     SetChangesFlag(kNewSpacePromotion);
   }
 
-  virtual bool IsDeletable() const V8_OVERRIDE {
+  virtual bool IsDeletable() const OVERRIDE {
     return !right()->representation().IsTagged();
   }
 };
 
 
-class HAdd V8_FINAL : public HArithmeticBinaryOperation {
+class HAdd FINAL : public HArithmeticBinaryOperation {
  public:
   static HInstruction* New(Zone* zone,
                            HValue* context,
@@ -4780,13 +4814,13 @@
   // Add is only commutative if two integer values are added and not if two
   // tagged values are added (because it might be a String concatenation).
   // We also do not commute (pointer + offset).
-  virtual bool IsCommutative() const V8_OVERRIDE {
+  virtual bool IsCommutative() const OVERRIDE {
     return !representation().IsTagged() && !representation().IsExternal();
   }
 
-  virtual HValue* Canonicalize() V8_OVERRIDE;
+  virtual HValue* Canonicalize() OVERRIDE;
 
-  virtual bool TryDecompose(DecompositionResult* decomposition) V8_OVERRIDE {
+  virtual bool TryDecompose(DecompositionResult* decomposition) OVERRIDE {
     if (left()->IsInteger32Constant()) {
       decomposition->Apply(right(), left()->GetInteger32Constant());
       return true;
@@ -4798,7 +4832,7 @@
     }
   }
 
-  virtual void RepresentationChanged(Representation to) V8_OVERRIDE {
+  virtual void RepresentationChanged(Representation to) OVERRIDE {
     if (to.IsTagged() &&
         (left()->ToNumberCanBeObserved() || right()->ToNumberCanBeObserved() ||
          left()->ToStringCanBeObserved() || right()->ToStringCanBeObserved())) {
@@ -4814,16 +4848,16 @@
     }
   }
 
-  virtual Representation RepresentationFromInputs() V8_OVERRIDE;
+  virtual Representation RepresentationFromInputs() OVERRIDE;
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE;
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE;
 
   DECLARE_CONCRETE_INSTRUCTION(Add)
 
  protected:
-  virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+  virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
 
-  virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
+  virtual Range* InferRange(Zone* zone) OVERRIDE;
 
  private:
   HAdd(HValue* context, HValue* left, HValue* right)
@@ -4833,16 +4867,16 @@
 };
 
 
-class HSub V8_FINAL : public HArithmeticBinaryOperation {
+class HSub FINAL : public HArithmeticBinaryOperation {
  public:
   static HInstruction* New(Zone* zone,
                            HValue* context,
                            HValue* left,
                            HValue* right);
 
-  virtual HValue* Canonicalize() V8_OVERRIDE;
+  virtual HValue* Canonicalize() OVERRIDE;
 
-  virtual bool TryDecompose(DecompositionResult* decomposition) V8_OVERRIDE {
+  virtual bool TryDecompose(DecompositionResult* decomposition) OVERRIDE {
     if (right()->IsInteger32Constant()) {
       decomposition->Apply(left(), -right()->GetInteger32Constant());
       return true;
@@ -4854,9 +4888,9 @@
   DECLARE_CONCRETE_INSTRUCTION(Sub)
 
  protected:
-  virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+  virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
 
-  virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
+  virtual Range* InferRange(Zone* zone) OVERRIDE;
 
  private:
   HSub(HValue* context, HValue* left, HValue* right)
@@ -4866,7 +4900,7 @@
 };
 
 
-class HMul V8_FINAL : public HArithmeticBinaryOperation {
+class HMul FINAL : public HArithmeticBinaryOperation {
  public:
   static HInstruction* New(Zone* zone,
                            HValue* context,
@@ -4886,16 +4920,16 @@
     return mul;
   }
 
-  virtual HValue* Canonicalize() V8_OVERRIDE;
+  virtual HValue* Canonicalize() OVERRIDE;
 
   // Only commutative if it is certain that not two objects are multiplicated.
-  virtual bool IsCommutative() const V8_OVERRIDE {
+  virtual bool IsCommutative() const OVERRIDE {
     return !representation().IsTagged();
   }
 
   virtual void UpdateRepresentation(Representation new_rep,
                                     HInferRepresentationPhase* h_infer,
-                                    const char* reason) V8_OVERRIDE {
+                                    const char* reason) OVERRIDE {
     HArithmeticBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
   }
 
@@ -4904,9 +4938,9 @@
   DECLARE_CONCRETE_INSTRUCTION(Mul)
 
  protected:
-  virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+  virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
 
-  virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
+  virtual Range* InferRange(Zone* zone) OVERRIDE;
 
  private:
   HMul(HValue* context, HValue* left, HValue* right)
@@ -4916,18 +4950,18 @@
 };
 
 
-class HMod V8_FINAL : public HArithmeticBinaryOperation {
+class HMod FINAL : public HArithmeticBinaryOperation {
  public:
   static HInstruction* New(Zone* zone,
                            HValue* context,
                            HValue* left,
                            HValue* right);
 
-  virtual HValue* Canonicalize() V8_OVERRIDE;
+  virtual HValue* Canonicalize() OVERRIDE;
 
   virtual void UpdateRepresentation(Representation new_rep,
                                     HInferRepresentationPhase* h_infer,
-                                    const char* reason) V8_OVERRIDE {
+                                    const char* reason) OVERRIDE {
     if (new_rep.IsSmi()) new_rep = Representation::Integer32();
     HArithmeticBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
   }
@@ -4935,9 +4969,9 @@
   DECLARE_CONCRETE_INSTRUCTION(Mod)
 
  protected:
-  virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+  virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
 
-  virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
+  virtual Range* InferRange(Zone* zone) OVERRIDE;
 
  private:
   HMod(HValue* context,
@@ -4950,18 +4984,18 @@
 };
 
 
-class HDiv V8_FINAL : public HArithmeticBinaryOperation {
+class HDiv FINAL : public HArithmeticBinaryOperation {
  public:
   static HInstruction* New(Zone* zone,
                            HValue* context,
                            HValue* left,
                            HValue* right);
 
-  virtual HValue* Canonicalize() V8_OVERRIDE;
+  virtual HValue* Canonicalize() OVERRIDE;
 
   virtual void UpdateRepresentation(Representation new_rep,
                                     HInferRepresentationPhase* h_infer,
-                                    const char* reason) V8_OVERRIDE {
+                                    const char* reason) OVERRIDE {
     if (new_rep.IsSmi()) new_rep = Representation::Integer32();
     HArithmeticBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
   }
@@ -4969,9 +5003,9 @@
   DECLARE_CONCRETE_INSTRUCTION(Div)
 
  protected:
-  virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+  virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
 
-  virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
+  virtual Range* InferRange(Zone* zone) OVERRIDE;
 
  private:
   HDiv(HValue* context, HValue* left, HValue* right)
@@ -4982,7 +5016,7 @@
 };
 
 
-class HMathMinMax V8_FINAL : public HArithmeticBinaryOperation {
+class HMathMinMax FINAL : public HArithmeticBinaryOperation {
  public:
   enum Operation { kMathMin, kMathMax };
 
@@ -4992,14 +5026,14 @@
                            HValue* right,
                            Operation op);
 
-  virtual Representation observed_input_representation(int index) V8_OVERRIDE {
+  virtual Representation observed_input_representation(int index) OVERRIDE {
     return RequiredInputRepresentation(index);
   }
 
   virtual void InferRepresentation(
-      HInferRepresentationPhase* h_infer) V8_OVERRIDE;
+      HInferRepresentationPhase* h_infer) OVERRIDE;
 
-  virtual Representation RepresentationFromInputs() V8_OVERRIDE {
+  virtual Representation RepresentationFromInputs() OVERRIDE {
     Representation left_rep = left()->representation();
     Representation right_rep = right()->representation();
     Representation result = Representation::Smi();
@@ -5009,19 +5043,19 @@
     return result;
   }
 
-  virtual bool IsCommutative() const V8_OVERRIDE { return true; }
+  virtual bool IsCommutative() const OVERRIDE { return true; }
 
   Operation operation() { return operation_; }
 
   DECLARE_CONCRETE_INSTRUCTION(MathMinMax)
 
  protected:
-  virtual bool DataEquals(HValue* other) V8_OVERRIDE {
+  virtual bool DataEquals(HValue* other) OVERRIDE {
     return other->IsMathMinMax() &&
         HMathMinMax::cast(other)->operation_ == operation_;
   }
 
-  virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
+  virtual Range* InferRange(Zone* zone) OVERRIDE;
 
  private:
   HMathMinMax(HValue* context, HValue* left, HValue* right, Operation op)
@@ -5032,7 +5066,7 @@
 };
 
 
-class HBitwise V8_FINAL : public HBitwiseBinaryOperation {
+class HBitwise FINAL : public HBitwiseBinaryOperation {
  public:
   static HInstruction* New(Zone* zone,
                            HValue* context,
@@ -5042,20 +5076,20 @@
 
   Token::Value op() const { return op_; }
 
-  virtual bool IsCommutative() const V8_OVERRIDE { return true; }
+  virtual bool IsCommutative() const OVERRIDE { return true; }
 
-  virtual HValue* Canonicalize() V8_OVERRIDE;
+  virtual HValue* Canonicalize() OVERRIDE;
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;  // NOLINT
 
   DECLARE_CONCRETE_INSTRUCTION(Bitwise)
 
  protected:
-  virtual bool DataEquals(HValue* other) V8_OVERRIDE {
+  virtual bool DataEquals(HValue* other) OVERRIDE {
     return op() == HBitwise::cast(other)->op();
   }
 
-  virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
+  virtual Range* InferRange(Zone* zone) OVERRIDE;
 
  private:
   HBitwise(HValue* context,
@@ -5064,7 +5098,7 @@
            HValue* right)
       : HBitwiseBinaryOperation(context, left, right),
         op_(op) {
-    ASSERT(op == Token::BIT_AND || op == Token::BIT_OR || op == Token::BIT_XOR);
+    DCHECK(op == Token::BIT_AND || op == Token::BIT_OR || op == Token::BIT_XOR);
     // BIT_AND with a smi-range positive value will always unset the
     // entire sign-extension of the smi-sign.
     if (op == Token::BIT_AND &&
@@ -5094,18 +5128,18 @@
 };
 
 
-class HShl V8_FINAL : public HBitwiseBinaryOperation {
+class HShl FINAL : public HBitwiseBinaryOperation {
  public:
   static HInstruction* New(Zone* zone,
                            HValue* context,
                            HValue* left,
                            HValue* right);
 
-  virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
+  virtual Range* InferRange(Zone* zone) OVERRIDE;
 
   virtual void UpdateRepresentation(Representation new_rep,
                                     HInferRepresentationPhase* h_infer,
-                                    const char* reason) V8_OVERRIDE {
+                                    const char* reason) OVERRIDE {
     if (new_rep.IsSmi() &&
         !(right()->IsInteger32Constant() &&
           right()->GetInteger32Constant() >= 0)) {
@@ -5117,7 +5151,7 @@
   DECLARE_CONCRETE_INSTRUCTION(Shl)
 
  protected:
-  virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+  virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
 
  private:
   HShl(HValue* context, HValue* left, HValue* right)
@@ -5125,14 +5159,14 @@
 };
 
 
-class HShr V8_FINAL : public HBitwiseBinaryOperation {
+class HShr FINAL : public HBitwiseBinaryOperation {
  public:
   static HInstruction* New(Zone* zone,
                            HValue* context,
                            HValue* left,
                            HValue* right);
 
-  virtual bool TryDecompose(DecompositionResult* decomposition) V8_OVERRIDE {
+  virtual bool TryDecompose(DecompositionResult* decomposition) OVERRIDE {
     if (right()->IsInteger32Constant()) {
       if (decomposition->Apply(left(), 0, right()->GetInteger32Constant())) {
         // This is intended to look for HAdd and HSub, to handle compounds
@@ -5144,11 +5178,11 @@
     return false;
   }
 
-  virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
+  virtual Range* InferRange(Zone* zone) OVERRIDE;
 
   virtual void UpdateRepresentation(Representation new_rep,
                                     HInferRepresentationPhase* h_infer,
-                                    const char* reason) V8_OVERRIDE {
+                                    const char* reason) OVERRIDE {
     if (new_rep.IsSmi()) new_rep = Representation::Integer32();
     HBitwiseBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
   }
@@ -5156,7 +5190,7 @@
   DECLARE_CONCRETE_INSTRUCTION(Shr)
 
  protected:
-  virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+  virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
 
  private:
   HShr(HValue* context, HValue* left, HValue* right)
@@ -5164,14 +5198,14 @@
 };
 
 
-class HSar V8_FINAL : public HBitwiseBinaryOperation {
+class HSar FINAL : public HBitwiseBinaryOperation {
  public:
   static HInstruction* New(Zone* zone,
                            HValue* context,
                            HValue* left,
                            HValue* right);
 
-  virtual bool TryDecompose(DecompositionResult* decomposition) V8_OVERRIDE {
+  virtual bool TryDecompose(DecompositionResult* decomposition) OVERRIDE {
     if (right()->IsInteger32Constant()) {
       if (decomposition->Apply(left(), 0, right()->GetInteger32Constant())) {
         // This is intended to look for HAdd and HSub, to handle compounds
@@ -5183,11 +5217,11 @@
     return false;
   }
 
-  virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
+  virtual Range* InferRange(Zone* zone) OVERRIDE;
 
   virtual void UpdateRepresentation(Representation new_rep,
                                     HInferRepresentationPhase* h_infer,
-                                    const char* reason) V8_OVERRIDE {
+                                    const char* reason) OVERRIDE {
     if (new_rep.IsSmi()) new_rep = Representation::Integer32();
     HBitwiseBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
   }
@@ -5195,7 +5229,7 @@
   DECLARE_CONCRETE_INSTRUCTION(Sar)
 
  protected:
-  virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+  virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
 
  private:
   HSar(HValue* context, HValue* left, HValue* right)
@@ -5203,7 +5237,7 @@
 };
 
 
-class HRor V8_FINAL : public HBitwiseBinaryOperation {
+class HRor FINAL : public HBitwiseBinaryOperation {
  public:
   static HInstruction* New(Zone* zone,
                            HValue* context,
@@ -5214,7 +5248,7 @@
 
   virtual void UpdateRepresentation(Representation new_rep,
                                     HInferRepresentationPhase* h_infer,
-                                    const char* reason) V8_OVERRIDE {
+                                    const char* reason) OVERRIDE {
     if (new_rep.IsSmi()) new_rep = Representation::Integer32();
     HBitwiseBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
   }
@@ -5222,7 +5256,7 @@
   DECLARE_CONCRETE_INSTRUCTION(Ror)
 
  protected:
-  virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+  virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
 
  private:
   HRor(HValue* context, HValue* left, HValue* right)
@@ -5232,13 +5266,13 @@
 };
 
 
-class HOsrEntry V8_FINAL : public HTemplateInstruction<0> {
+class HOsrEntry FINAL : public HTemplateInstruction<0> {
  public:
   DECLARE_INSTRUCTION_FACTORY_P1(HOsrEntry, BailoutId);
 
   BailoutId ast_id() const { return ast_id_; }
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::None();
   }
 
@@ -5254,7 +5288,7 @@
 };
 
 
-class HParameter V8_FINAL : public HTemplateInstruction<0> {
+class HParameter FINAL : public HTemplateInstruction<0> {
  public:
   enum ParameterKind {
     STACK_PARAMETER,
@@ -5269,9 +5303,9 @@
   unsigned index() const { return index_; }
   ParameterKind kind() const { return kind_; }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;  // NOLINT
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::None();
   }
 
@@ -5298,14 +5332,14 @@
 };
 
 
-class HCallStub V8_FINAL : public HUnaryCall {
+class HCallStub FINAL : public HUnaryCall {
  public:
   DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HCallStub, CodeStub::Major, int);
   CodeStub::Major major_key() { return major_key_; }
 
   HValue* context() { return value(); }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;  // NOLINT
 
   DECLARE_CONCRETE_INSTRUCTION(CallStub)
 
@@ -5319,13 +5353,44 @@
 };
 
 
-class HUnknownOSRValue V8_FINAL : public HTemplateInstruction<0> {
+class HTailCallThroughMegamorphicCache FINAL : public HTemplateInstruction<3> {
+ public:
+  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HTailCallThroughMegamorphicCache,
+                                              HValue*, HValue*, Code::Flags);
+
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+    return Representation::Tagged();
+  }
+
+  HValue* context() const { return OperandAt(0); }
+  HValue* receiver() const { return OperandAt(1); }
+  HValue* name() const { return OperandAt(2); }
+  Code::Flags flags() const { return flags_; }
+
+  virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;  // NOLINT
+
+  DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache)
+
+ private:
+  HTailCallThroughMegamorphicCache(HValue* context, HValue* receiver,
+                                   HValue* name, Code::Flags flags)
+      : flags_(flags) {
+    SetOperandAt(0, context);
+    SetOperandAt(1, receiver);
+    SetOperandAt(2, name);
+  }
+
+  Code::Flags flags_;
+};
+
+
+class HUnknownOSRValue FINAL : public HTemplateInstruction<0> {
  public:
   DECLARE_INSTRUCTION_FACTORY_P2(HUnknownOSRValue, HEnvironment*, int);
 
-  virtual void PrintDataTo(StringStream* stream);
+  virtual OStream& PrintDataTo(OStream& os) const;  // NOLINT
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::None();
   }
 
@@ -5334,7 +5399,7 @@
   HEnvironment *environment() { return environment_; }
   int index() { return index_; }
 
-  virtual Representation KnownOptimalRepresentation() V8_OVERRIDE {
+  virtual Representation KnownOptimalRepresentation() OVERRIDE {
     if (incoming_value_ == NULL) return Representation::None();
     return incoming_value_->KnownOptimalRepresentation();
   }
@@ -5355,7 +5420,7 @@
 };
 
 
-class HLoadGlobalCell V8_FINAL : public HTemplateInstruction<0> {
+class HLoadGlobalCell FINAL : public HTemplateInstruction<0> {
  public:
   DECLARE_INSTRUCTION_FACTORY_P2(HLoadGlobalCell, Handle<Cell>,
                                  PropertyDetails);
@@ -5363,24 +5428,24 @@
   Unique<Cell> cell() const { return cell_; }
   bool RequiresHoleCheck() const;
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;  // NOLINT
 
-  virtual intptr_t Hashcode() V8_OVERRIDE {
+  virtual intptr_t Hashcode() OVERRIDE {
     return cell_.Hashcode();
   }
 
-  virtual void FinalizeUniqueness() V8_OVERRIDE {
+  virtual void FinalizeUniqueness() OVERRIDE {
     cell_ = Unique<Cell>(cell_.handle());
   }
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::None();
   }
 
   DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell)
 
  protected:
-  virtual bool DataEquals(HValue* other) V8_OVERRIDE {
+  virtual bool DataEquals(HValue* other) OVERRIDE {
     return cell_ == HLoadGlobalCell::cast(other)->cell_;
   }
 
@@ -5392,50 +5457,61 @@
     SetDependsOnFlag(kGlobalVars);
   }
 
-  virtual bool IsDeletable() const V8_OVERRIDE { return !RequiresHoleCheck(); }
+  virtual bool IsDeletable() const OVERRIDE { return !RequiresHoleCheck(); }
 
   Unique<Cell> cell_;
   PropertyDetails details_;
 };
 
 
-class HLoadGlobalGeneric V8_FINAL : public HTemplateInstruction<2> {
+class HLoadGlobalGeneric FINAL : public HTemplateInstruction<2> {
  public:
   DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HLoadGlobalGeneric, HValue*,
-                                              Handle<Object>, bool);
+                                              Handle<String>, bool);
 
   HValue* context() { return OperandAt(0); }
   HValue* global_object() { return OperandAt(1); }
-  Handle<Object> name() const { return name_; }
+  Handle<String> name() const { return name_; }
   bool for_typeof() const { return for_typeof_; }
+  int slot() const {
+    DCHECK(FLAG_vector_ics &&
+           slot_ != FeedbackSlotInterface::kInvalidFeedbackSlot);
+    return slot_;
+  }
+  Handle<FixedArray> feedback_vector() const { return feedback_vector_; }
+  void SetVectorAndSlot(Handle<FixedArray> vector, int slot) {
+    DCHECK(FLAG_vector_ics);
+    feedback_vector_ = vector;
+    slot_ = slot;
+  }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;  // NOLINT
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
   DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric)
 
  private:
-  HLoadGlobalGeneric(HValue* context,
-                     HValue* global_object,
-                     Handle<Object> name,
-                     bool for_typeof)
-      : name_(name),
-        for_typeof_(for_typeof) {
+  HLoadGlobalGeneric(HValue* context, HValue* global_object,
+                     Handle<String> name, bool for_typeof)
+      : name_(name), for_typeof_(for_typeof),
+        slot_(FeedbackSlotInterface::kInvalidFeedbackSlot) {
     SetOperandAt(0, context);
     SetOperandAt(1, global_object);
     set_representation(Representation::Tagged());
     SetAllSideEffects();
   }
 
-  Handle<Object> name_;
+  Handle<String> name_;
   bool for_typeof_;
+  Handle<FixedArray> feedback_vector_;
+  int slot_;
 };
 
 
-class HAllocate V8_FINAL : public HTemplateInstruction<2> {
+class HAllocate FINAL : public HTemplateInstruction<2> {
  public:
   static bool CompatibleInstanceTypes(InstanceType type1,
                                       InstanceType type2) {
@@ -5458,17 +5534,17 @@
   // Maximum instance size for which allocations will be inlined.
   static const int kMaxInlineSize = 64 * kPointerSize;
 
-  HValue* context() { return OperandAt(0); }
-  HValue* size() { return OperandAt(1); }
+  HValue* context() const { return OperandAt(0); }
+  HValue* size() const { return OperandAt(1); }
 
   bool has_size_upper_bound() { return size_upper_bound_ != NULL; }
   HConstant* size_upper_bound() { return size_upper_bound_; }
   void set_size_upper_bound(HConstant* value) {
-    ASSERT(size_upper_bound_ == NULL);
+    DCHECK(size_upper_bound_ == NULL);
     size_upper_bound_ = value;
   }
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     if (index == 0) {
       return Representation::Tagged();
     } else {
@@ -5476,7 +5552,7 @@
     }
   }
 
-  virtual Handle<Map> GetMonomorphicJSObjectMap() {
+  virtual Handle<Map> GetMonomorphicJSObjectMap() OVERRIDE {
     return known_initial_map_;
   }
 
@@ -5517,9 +5593,9 @@
   }
 
   virtual bool HandleSideEffectDominator(GVNFlag side_effect,
-                                         HValue* dominator) V8_OVERRIDE;
+                                         HValue* dominator) OVERRIDE;
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;  // NOLINT
 
   DECLARE_CONCRETE_INSTRUCTION(Allocate)
 
@@ -5623,7 +5699,7 @@
 };
 
 
-class HStoreCodeEntry V8_FINAL: public HTemplateInstruction<2> {
+class HStoreCodeEntry FINAL: public HTemplateInstruction<2> {
  public:
   static HStoreCodeEntry* New(Zone* zone,
                               HValue* context,
@@ -5649,7 +5725,7 @@
 };
 
 
-class HInnerAllocatedObject V8_FINAL : public HTemplateInstruction<2> {
+class HInnerAllocatedObject FINAL : public HTemplateInstruction<2> {
  public:
   static HInnerAllocatedObject* New(Zone* zone,
                                     HValue* context,
@@ -5659,14 +5735,14 @@
     return new(zone) HInnerAllocatedObject(value, offset, type);
   }
 
-  HValue* base_object() { return OperandAt(0); }
-  HValue* offset() { return OperandAt(1); }
+  HValue* base_object() const { return OperandAt(0); }
+  HValue* offset() const { return OperandAt(1); }
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return index == 0 ? Representation::Tagged() : Representation::Integer32();
   }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;  // NOLINT
 
   DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject)
 
@@ -5674,8 +5750,8 @@
   HInnerAllocatedObject(HValue* value,
                         HValue* offset,
                         HType type) : HTemplateInstruction<2>(type) {
-    ASSERT(value->IsAllocate());
-    ASSERT(type.IsHeapObject());
+    DCHECK(value->IsAllocate());
+    DCHECK(type.IsHeapObject());
     SetOperandAt(0, value);
     SetOperandAt(1, offset);
     set_representation(Representation::Tagged());
@@ -5746,27 +5822,25 @@
 }
 
 
-class HStoreGlobalCell V8_FINAL : public HUnaryOperation {
+class HStoreGlobalCell FINAL : public HUnaryOperation {
  public:
   DECLARE_INSTRUCTION_FACTORY_P3(HStoreGlobalCell, HValue*,
                                  Handle<PropertyCell>, PropertyDetails);
 
   Unique<PropertyCell> cell() const { return cell_; }
-  bool RequiresHoleCheck() {
-    return !details_.IsDontDelete() || details_.IsReadOnly();
-  }
+  bool RequiresHoleCheck() { return details_.IsConfigurable(); }
   bool NeedsWriteBarrier() {
     return StoringValueNeedsWriteBarrier(value());
   }
 
-  virtual void FinalizeUniqueness() V8_OVERRIDE {
+  virtual void FinalizeUniqueness() OVERRIDE {
     cell_ = Unique<PropertyCell>(cell_.handle());
   }
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;  // NOLINT
 
   DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell)
 
@@ -5785,7 +5859,7 @@
 };
 
 
-class HLoadContextSlot V8_FINAL : public HUnaryOperation {
+class HLoadContextSlot FINAL : public HUnaryOperation {
  public:
   enum Mode {
     // Perform a normal load of the context slot without checking its value.
@@ -5818,29 +5892,29 @@
     return mode_ != kNoCheck;
   }
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;  // NOLINT
 
   DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot)
 
  protected:
-  virtual bool DataEquals(HValue* other) V8_OVERRIDE {
+  virtual bool DataEquals(HValue* other) OVERRIDE {
     HLoadContextSlot* b = HLoadContextSlot::cast(other);
     return (slot_index() == b->slot_index());
   }
 
  private:
-  virtual bool IsDeletable() const V8_OVERRIDE { return !RequiresHoleCheck(); }
+  virtual bool IsDeletable() const OVERRIDE { return !RequiresHoleCheck(); }
 
   int slot_index_;
   Mode mode_;
 };
 
 
-class HStoreContextSlot V8_FINAL : public HTemplateInstruction<2> {
+class HStoreContextSlot FINAL : public HTemplateInstruction<2> {
  public:
   enum Mode {
     // Perform a normal store to the context slot without checking its previous
@@ -5858,8 +5932,8 @@
   DECLARE_INSTRUCTION_FACTORY_P4(HStoreContextSlot, HValue*, int,
                                  Mode, HValue*);
 
-  HValue* context() { return OperandAt(0); }
-  HValue* value() { return OperandAt(1); }
+  HValue* context() const { return OperandAt(0); }
+  HValue* value() const { return OperandAt(1); }
   int slot_index() const { return slot_index_; }
   Mode mode() const { return mode_; }
 
@@ -5875,11 +5949,11 @@
     return mode_ != kNoCheck;
   }
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;  // NOLINT
 
   DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot)
 
@@ -5898,7 +5972,7 @@
 
 // Represents an access to a portion of an object, such as the map pointer,
 // array elements pointer, etc, but not accesses to array elements themselves.
-class HObjectAccess V8_FINAL {
+class HObjectAccess FINAL {
  public:
   inline bool IsInobject() const {
     return portion() != kBackingStore && portion() != kExternalMemory;
@@ -6091,10 +6165,12 @@
   }
 
   static HObjectAccess ForMapInstanceTypeAndBitField() {
-    STATIC_ASSERT((Map::kInstanceTypeOffset & 1) == 0);
-    STATIC_ASSERT(Map::kBitFieldOffset == Map::kInstanceTypeOffset + 1);
+    STATIC_ASSERT((Map::kInstanceTypeAndBitFieldOffset & 1) == 0);
+    // Ensure the two fields share one 16-bit word, endian-independent.
+    STATIC_ASSERT((Map::kBitFieldOffset & ~1) ==
+                  (Map::kInstanceTypeOffset & ~1));
     return HObjectAccess(kInobject,
-                         Map::kInstanceTypeOffset,
+                         Map::kInstanceTypeAndBitFieldOffset,
                          Representation::UInteger16());
   }
 
@@ -6115,6 +6191,11 @@
                          Handle<String>::null(), false, false);
   }
 
+  static HObjectAccess ForExternalUInteger8() {
+    return HObjectAccess(kExternalMemory, 0, Representation::UInteger8(),
+                         Handle<String>::null(), false, false);
+  }
+
   // Create an access to an offset in a fixed array header.
   static HObjectAccess ForFixedArrayHeader(int offset);
 
@@ -6144,8 +6225,9 @@
       Representation representation = Representation::Tagged());
 
   // Create an access to a resolved field (in-object or backing store).
-  static HObjectAccess ForField(Handle<Map> map,
-      LookupResult *lookup, Handle<String> name = Handle<String>::null());
+  static HObjectAccess ForField(Handle<Map> map, int index,
+                                Representation representation,
+                                Handle<String> name);
 
   // Create an access for the payload of a Cell or JSGlobalPropertyCell.
   static HObjectAccess ForCellPayload(Isolate* isolate);
@@ -6199,8 +6281,6 @@
     return HObjectAccess(kInobject, GlobalObject::kNativeContextOffset);
   }
 
-  void PrintTo(StringStream* stream) const;
-
   inline bool Equals(HObjectAccess that) const {
     return value_ == that.value_;  // portion and offset must match
   }
@@ -6236,12 +6316,12 @@
              OffsetField::encode(offset)),
       name_(name) {
     // assert that the fields decode correctly
-    ASSERT(this->offset() == offset);
-    ASSERT(this->portion() == portion);
-    ASSERT(this->immutable() == immutable);
-    ASSERT(this->existing_inobject_property() == existing_inobject_property);
-    ASSERT(RepresentationField::decode(value_) == representation.kind());
-    ASSERT(!this->existing_inobject_property() || IsInobject());
+    DCHECK(this->offset() == offset);
+    DCHECK(this->portion() == portion);
+    DCHECK(this->immutable() == immutable);
+    DCHECK(this->existing_inobject_property() == existing_inobject_property);
+    DCHECK(RepresentationField::decode(value_) == representation.kind());
+    DCHECK(!this->existing_inobject_property() || IsInobject());
   }
 
   class PortionField : public BitField<Portion, 0, 3> {};
@@ -6256,6 +6336,7 @@
   friend class HLoadNamedField;
   friend class HStoreNamedField;
   friend class SideEffectsTracker;
+  friend OStream& operator<<(OStream& os, const HObjectAccess& access);
 
   inline Portion portion() const {
     return PortionField::decode(value_);
@@ -6263,16 +6344,19 @@
 };
 
 
-class HLoadNamedField V8_FINAL : public HTemplateInstruction<2> {
+OStream& operator<<(OStream& os, const HObjectAccess& access);
+
+
+class HLoadNamedField FINAL : public HTemplateInstruction<2> {
  public:
   DECLARE_INSTRUCTION_FACTORY_P3(HLoadNamedField, HValue*,
                                  HValue*, HObjectAccess);
   DECLARE_INSTRUCTION_FACTORY_P5(HLoadNamedField, HValue*, HValue*,
                                  HObjectAccess, const UniqueSet<Map>*, HType);
 
-  HValue* object() { return OperandAt(0); }
-  HValue* dependency() {
-    ASSERT(HasDependency());
+  HValue* object() const { return OperandAt(0); }
+  HValue* dependency() const {
+    DCHECK(HasDependency());
     return OperandAt(1);
   }
   bool HasDependency() const { return OperandAt(0) != OperandAt(1); }
@@ -6283,19 +6367,19 @@
 
   const UniqueSet<Map>* maps() const { return maps_; }
 
-  virtual bool HasEscapingOperandAt(int index) V8_OVERRIDE { return false; }
-  virtual bool HasOutOfBoundsAccess(int size) V8_OVERRIDE {
+  virtual bool HasEscapingOperandAt(int index) OVERRIDE { return false; }
+  virtual bool HasOutOfBoundsAccess(int size) OVERRIDE {
     return !access().IsInobject() || access().offset() >= size;
   }
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     if (index == 0 && access().IsExternalMemory()) {
       // object must be external in case of external memory access
       return Representation::External();
     }
     return Representation::Tagged();
   }
-  virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual Range* InferRange(Zone* zone) OVERRIDE;
+  virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;  // NOLINT
 
   bool CanBeReplacedWith(HValue* other) const {
     if (!CheckFlag(HValue::kCantBeReplaced)) return false;
@@ -6311,7 +6395,7 @@
   DECLARE_CONCRETE_INSTRUCTION(LoadNamedField)
 
  protected:
-  virtual bool DataEquals(HValue* other) V8_OVERRIDE {
+  virtual bool DataEquals(HValue* other) OVERRIDE {
     HLoadNamedField* that = HLoadNamedField::cast(other);
     if (!this->access_.Equals(that->access_)) return false;
     if (this->maps_ == that->maps_) return true;
@@ -6325,7 +6409,7 @@
                   HValue* dependency,
                   HObjectAccess access)
       : access_(access), maps_(NULL) {
-    ASSERT_NOT_NULL(object);
+    DCHECK_NOT_NULL(object);
     SetOperandAt(0, object);
     SetOperandAt(1, dependency ? dependency : object);
 
@@ -6361,47 +6445,60 @@
                   const UniqueSet<Map>* maps,
                   HType type)
       : HTemplateInstruction<2>(type), access_(access), maps_(maps) {
-    ASSERT_NOT_NULL(maps);
-    ASSERT_NE(0, maps->size());
+    DCHECK_NOT_NULL(maps);
+    DCHECK_NE(0, maps->size());
 
-    ASSERT_NOT_NULL(object);
+    DCHECK_NOT_NULL(object);
     SetOperandAt(0, object);
     SetOperandAt(1, dependency ? dependency : object);
 
-    ASSERT(access.representation().IsHeapObject());
-    ASSERT(type.IsHeapObject());
+    DCHECK(access.representation().IsHeapObject());
+    DCHECK(type.IsHeapObject());
     set_representation(Representation::Tagged());
 
     access.SetGVNFlags(this, LOAD);
   }
 
-  virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+  virtual bool IsDeletable() const OVERRIDE { return true; }
 
   HObjectAccess access_;
   const UniqueSet<Map>* maps_;
 };
 
 
-class HLoadNamedGeneric V8_FINAL : public HTemplateInstruction<2> {
+class HLoadNamedGeneric FINAL : public HTemplateInstruction<2> {
  public:
   DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HLoadNamedGeneric, HValue*,
                                               Handle<Object>);
 
-  HValue* context() { return OperandAt(0); }
-  HValue* object() { return OperandAt(1); }
+  HValue* context() const { return OperandAt(0); }
+  HValue* object() const { return OperandAt(1); }
   Handle<Object> name() const { return name_; }
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  int slot() const {
+    DCHECK(FLAG_vector_ics &&
+           slot_ != FeedbackSlotInterface::kInvalidFeedbackSlot);
+    return slot_;
+  }
+  Handle<FixedArray> feedback_vector() const { return feedback_vector_; }
+  void SetVectorAndSlot(Handle<FixedArray> vector, int slot) {
+    DCHECK(FLAG_vector_ics);
+    feedback_vector_ = vector;
+    slot_ = slot;
+  }
+
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;  // NOLINT
 
   DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric)
 
  private:
   HLoadNamedGeneric(HValue* context, HValue* object, Handle<Object> name)
-      : name_(name) {
+      : name_(name),
+        slot_(FeedbackSlotInterface::kInvalidFeedbackSlot) {
     SetOperandAt(0, context);
     SetOperandAt(1, object);
     set_representation(Representation::Tagged());
@@ -6409,23 +6506,25 @@
   }
 
   Handle<Object> name_;
+  Handle<FixedArray> feedback_vector_;
+  int slot_;
 };
 
 
-class HLoadFunctionPrototype V8_FINAL : public HUnaryOperation {
+class HLoadFunctionPrototype FINAL : public HUnaryOperation {
  public:
   DECLARE_INSTRUCTION_FACTORY_P1(HLoadFunctionPrototype, HValue*);
 
   HValue* function() { return OperandAt(0); }
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
   DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype)
 
  protected:
-  virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+  virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
 
  private:
   explicit HLoadFunctionPrototype(HValue* function)
@@ -6441,9 +6540,9 @@
   virtual HValue* GetKey() = 0;
   virtual void SetKey(HValue* key) = 0;
   virtual ElementsKind elements_kind() const = 0;
-  virtual void IncreaseBaseOffset(uint32_t base_offset) = 0;
-  virtual int MaxBaseOffsetBits() = 0;
-  virtual bool IsDehoisted() = 0;
+  // TryIncreaseBaseOffset returns false if overflow would result.
+  virtual bool TryIncreaseBaseOffset(uint32_t increase_by_value) = 0;
+  virtual bool IsDehoisted() const = 0;
   virtual void SetDehoisted(bool is_dehoisted) = 0;
   virtual ~ArrayInstructionInterface() { }
 
@@ -6462,7 +6561,7 @@
 };
 
 
-class HLoadKeyed V8_FINAL
+class HLoadKeyed FINAL
     : public HTemplateInstruction<3>, public ArrayInstructionInterface {
  public:
   DECLARE_INSTRUCTION_FACTORY_P4(HLoadKeyed, HValue*, HValue*, HValue*,
@@ -6481,39 +6580,29 @@
   bool is_typed_elements() const {
     return is_external() || is_fixed_typed_array();
   }
-  HValue* elements() { return OperandAt(0); }
-  HValue* key() { return OperandAt(1); }
-  HValue* dependency() {
-    ASSERT(HasDependency());
+  HValue* elements() const { return OperandAt(0); }
+  HValue* key() const { return OperandAt(1); }
+  HValue* dependency() const {
+    DCHECK(HasDependency());
     return OperandAt(2);
   }
   bool HasDependency() const { return OperandAt(0) != OperandAt(2); }
-  uint32_t base_offset() { return BaseOffsetField::decode(bit_field_); }
-  void IncreaseBaseOffset(uint32_t base_offset) {
-    // The base offset is usually simply the size of the array header, except
-    // with dehoisting adds an addition offset due to a array index key
-    // manipulation, in which case it becomes (array header size +
-    // constant-offset-from-key * kPointerSize)
-    base_offset += BaseOffsetField::decode(bit_field_);
-    bit_field_ = BaseOffsetField::update(bit_field_, base_offset);
-  }
-  virtual int MaxBaseOffsetBits() {
-    return kBitsForBaseOffset;
-  }
+  uint32_t base_offset() const { return BaseOffsetField::decode(bit_field_); }
+  bool TryIncreaseBaseOffset(uint32_t increase_by_value);
   HValue* GetKey() { return key(); }
   void SetKey(HValue* key) { SetOperandAt(1, key); }
-  bool IsDehoisted() { return IsDehoistedField::decode(bit_field_); }
+  bool IsDehoisted() const { return IsDehoistedField::decode(bit_field_); }
   void SetDehoisted(bool is_dehoisted) {
     bit_field_ = IsDehoistedField::update(bit_field_, is_dehoisted);
   }
-  virtual ElementsKind elements_kind() const V8_OVERRIDE {
+  virtual ElementsKind elements_kind() const OVERRIDE {
     return ElementsKindField::decode(bit_field_);
   }
   LoadKeyedHoleMode hole_mode() const {
     return HoleModeField::decode(bit_field_);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     // kind_fast:                 tagged[int32] (none)
     // kind_double:               tagged[int32] (none)
     // kind_fixed_typed_array:    tagged[int32] (none)
@@ -6529,22 +6618,22 @@
     return Representation::None();
   }
 
-  virtual Representation observed_input_representation(int index) V8_OVERRIDE {
+  virtual Representation observed_input_representation(int index) OVERRIDE {
     return RequiredInputRepresentation(index);
   }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;  // NOLINT
 
   bool UsesMustHandleHole() const;
   bool AllUsesCanTreatHoleAsNaN() const;
   bool RequiresHoleCheck() const;
 
-  virtual Range* InferRange(Zone* zone) V8_OVERRIDE;
+  virtual Range* InferRange(Zone* zone) OVERRIDE;
 
   DECLARE_CONCRETE_INSTRUCTION(LoadKeyed)
 
  protected:
-  virtual bool DataEquals(HValue* other) V8_OVERRIDE {
+  virtual bool DataEquals(HValue* other) OVERRIDE {
     if (!other->IsLoadKeyed()) return false;
     HLoadKeyed* other_load = HLoadKeyed::cast(other);
 
@@ -6575,7 +6664,7 @@
     if (!is_typed_elements()) {
       // I can detect the case between storing double (holey and fast) and
       // smi/object by looking at elements_kind_.
-      ASSERT(IsFastSmiOrObjectElementsKind(elements_kind) ||
+      DCHECK(IsFastSmiOrObjectElementsKind(elements_kind) ||
              IsFastDoubleElementsKind(elements_kind));
 
       if (IsFastSmiOrObjectElementsKind(elements_kind)) {
@@ -6621,7 +6710,7 @@
     SetFlag(kUseGVN);
   }
 
-  virtual bool IsDeletable() const V8_OVERRIDE {
+  virtual bool IsDeletable() const OVERRIDE {
     return !RequiresHoleCheck();
   }
 
@@ -6657,33 +6746,48 @@
 };
 
 
-class HLoadKeyedGeneric V8_FINAL : public HTemplateInstruction<3> {
+class HLoadKeyedGeneric FINAL : public HTemplateInstruction<3> {
  public:
   DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HLoadKeyedGeneric, HValue*,
                                               HValue*);
-  HValue* object() { return OperandAt(0); }
-  HValue* key() { return OperandAt(1); }
-  HValue* context() { return OperandAt(2); }
+  HValue* object() const { return OperandAt(0); }
+  HValue* key() const { return OperandAt(1); }
+  HValue* context() const { return OperandAt(2); }
+  int slot() const {
+    DCHECK(FLAG_vector_ics &&
+           slot_ != FeedbackSlotInterface::kInvalidFeedbackSlot);
+    return slot_;
+  }
+  Handle<FixedArray> feedback_vector() const { return feedback_vector_; }
+  void SetVectorAndSlot(Handle<FixedArray> vector, int slot) {
+    DCHECK(FLAG_vector_ics);
+    feedback_vector_ = vector;
+    slot_ = slot;
+  }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;  // NOLINT
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     // tagged[tagged]
     return Representation::Tagged();
   }
 
-  virtual HValue* Canonicalize() V8_OVERRIDE;
+  virtual HValue* Canonicalize() OVERRIDE;
 
   DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric)
 
  private:
-  HLoadKeyedGeneric(HValue* context, HValue* obj, HValue* key) {
+  HLoadKeyedGeneric(HValue* context, HValue* obj, HValue* key)
+      : slot_(FeedbackSlotInterface::kInvalidFeedbackSlot) {
     set_representation(Representation::Tagged());
     SetOperandAt(0, obj);
     SetOperandAt(1, key);
     SetOperandAt(2, context);
     SetAllSideEffects();
   }
+
+  Handle<FixedArray> feedback_vector_;
+  int slot_;
 };
 
 
@@ -6698,7 +6802,7 @@
 };
 
 
-class HStoreNamedField V8_FINAL : public HTemplateInstruction<3> {
+class HStoreNamedField FINAL : public HTemplateInstruction<3> {
  public:
   DECLARE_INSTRUCTION_FACTORY_P3(HStoreNamedField, HValue*,
                                  HObjectAccess, HValue*);
@@ -6707,13 +6811,13 @@
 
   DECLARE_CONCRETE_INSTRUCTION(StoreNamedField)
 
-  virtual bool HasEscapingOperandAt(int index) V8_OVERRIDE {
+  virtual bool HasEscapingOperandAt(int index) OVERRIDE {
     return index == 1;
   }
-  virtual bool HasOutOfBoundsAccess(int size) V8_OVERRIDE {
+  virtual bool HasOutOfBoundsAccess(int size) OVERRIDE {
     return !access().IsInobject() || access().offset() >= size;
   }
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     if (index == 0 && access().IsExternalMemory()) {
       // object must be external in case of external memory access
       return Representation::External();
@@ -6738,13 +6842,13 @@
     return Representation::Tagged();
   }
   virtual bool HandleSideEffectDominator(GVNFlag side_effect,
-                                         HValue* dominator) V8_OVERRIDE {
-    ASSERT(side_effect == kNewSpacePromotion);
+                                         HValue* dominator) OVERRIDE {
+    DCHECK(side_effect == kNewSpacePromotion);
     if (!FLAG_use_write_barrier_elimination) return false;
     dominator_ = dominator;
     return false;
   }
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;  // NOLINT
 
   HValue* object() const { return OperandAt(0); }
   HValue* value() const { return OperandAt(1); }
@@ -6765,14 +6869,14 @@
   }
 
   void SetTransition(HConstant* transition) {
-    ASSERT(!has_transition());  // Only set once.
+    DCHECK(!has_transition());  // Only set once.
     SetOperandAt(2, transition);
     has_transition_ = true;
     SetChangesFlag(kMaps);
   }
 
-  bool NeedsWriteBarrier() {
-    ASSERT(!field_representation().IsDouble() || !has_transition());
+  bool NeedsWriteBarrier() const {
+    DCHECK(!field_representation().IsDouble() || !has_transition());
     if (field_representation().IsDouble()) return false;
     if (field_representation().IsSmi()) return false;
     if (field_representation().IsInteger32()) return false;
@@ -6828,7 +6932,7 @@
         store_mode_(store_mode) {
     // Stores to a non existing in-object property are allowed only to the
     // newly allocated objects (via HAllocate or HInnerAllocatedObject).
-    ASSERT(!access.IsInobject() || access.existing_inobject_property() ||
+    DCHECK(!access.IsInobject() || access.existing_inobject_property() ||
            obj->IsAllocate() || obj->IsInnerAllocatedObject());
     SetOperandAt(0, obj);
     SetOperandAt(1, val);
@@ -6843,20 +6947,20 @@
 };
 
 
-class HStoreNamedGeneric V8_FINAL : public HTemplateInstruction<3> {
+class HStoreNamedGeneric FINAL : public HTemplateInstruction<3> {
  public:
   DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HStoreNamedGeneric, HValue*,
                                               Handle<String>, HValue*,
                                               StrictMode);
-  HValue* object() { return OperandAt(0); }
-  HValue* value() { return OperandAt(1); }
-  HValue* context() { return OperandAt(2); }
-  Handle<String> name() { return name_; }
-  StrictMode strict_mode() { return strict_mode_; }
+  HValue* object() const { return OperandAt(0); }
+  HValue* value() const { return OperandAt(1); }
+  HValue* context() const { return OperandAt(2); }
+  Handle<String> name() const { return name_; }
+  StrictMode strict_mode() const { return strict_mode_; }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;  // NOLINT
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
@@ -6881,7 +6985,7 @@
 };
 
 
-class HStoreKeyed V8_FINAL
+class HStoreKeyed FINAL
     : public HTemplateInstruction<3>, public ArrayInstructionInterface {
  public:
   DECLARE_INSTRUCTION_FACTORY_P4(HStoreKeyed, HValue*, HValue*, HValue*,
@@ -6891,7 +6995,7 @@
   DECLARE_INSTRUCTION_FACTORY_P6(HStoreKeyed, HValue*, HValue*, HValue*,
                                  ElementsKind, StoreFieldOrKeyedMode, int);
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     // kind_fast:               tagged[int32] = tagged
     // kind_double:             tagged[int32] = double
     // kind_smi   :             tagged[int32] = smi
@@ -6905,7 +7009,7 @@
           OperandAt(1)->representation());
     }
 
-    ASSERT_EQ(index, 2);
+    DCHECK_EQ(index, 2);
     return RequiredValueRepresentation(elements_kind_, store_mode_);
   }
 
@@ -6942,7 +7046,7 @@
     return is_external() || is_fixed_typed_array();
   }
 
-  virtual Representation observed_input_representation(int index) V8_OVERRIDE {
+  virtual Representation observed_input_representation(int index) OVERRIDE {
     if (index < 2) return RequiredInputRepresentation(index);
     if (IsUninitialized()) {
       return Representation::None();
@@ -6961,20 +7065,11 @@
   }
   StoreFieldOrKeyedMode store_mode() const { return store_mode_; }
   ElementsKind elements_kind() const { return elements_kind_; }
-  uint32_t base_offset() { return base_offset_; }
-  void IncreaseBaseOffset(uint32_t base_offset) {
-    // The base offset is usually simply the size of the array header, except
-    // with dehoisting adds an addition offset due to a array index key
-    // manipulation, in which case it becomes (array header size +
-    // constant-offset-from-key * kPointerSize)
-    base_offset_ += base_offset;
-  }
-  virtual int MaxBaseOffsetBits() {
-    return 31 - ElementsKindToShiftSize(elements_kind_);
-  }
+  uint32_t base_offset() const { return base_offset_; }
+  bool TryIncreaseBaseOffset(uint32_t increase_by_value);
   HValue* GetKey() { return key(); }
   void SetKey(HValue* key) { SetOperandAt(1, key); }
-  bool IsDehoisted() { return is_dehoisted_; }
+  bool IsDehoisted() const { return is_dehoisted_; }
   void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; }
   bool IsUninitialized() { return is_uninitialized_; }
   void SetUninitialized(bool is_uninitialized) {
@@ -6986,8 +7081,8 @@
   }
 
   virtual bool HandleSideEffectDominator(GVNFlag side_effect,
-                                         HValue* dominator) V8_OVERRIDE {
-    ASSERT(side_effect == kNewSpacePromotion);
+                                         HValue* dominator) OVERRIDE {
+    DCHECK(side_effect == kNewSpacePromotion);
     dominator_ = dominator;
     return false;
   }
@@ -7009,7 +7104,7 @@
 
   bool NeedsCanonicalization();
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;  // NOLINT
 
   DECLARE_CONCRETE_INSTRUCTION(StoreKeyed)
 
@@ -7066,23 +7161,23 @@
 };
 
 
-class HStoreKeyedGeneric V8_FINAL : public HTemplateInstruction<4> {
+class HStoreKeyedGeneric FINAL : public HTemplateInstruction<4> {
  public:
   DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HStoreKeyedGeneric, HValue*,
                                               HValue*, HValue*, StrictMode);
 
-  HValue* object() { return OperandAt(0); }
-  HValue* key() { return OperandAt(1); }
-  HValue* value() { return OperandAt(2); }
-  HValue* context() { return OperandAt(3); }
-  StrictMode strict_mode() { return strict_mode_; }
+  HValue* object() const { return OperandAt(0); }
+  HValue* key() const { return OperandAt(1); }
+  HValue* value() const { return OperandAt(2); }
+  HValue* context() const { return OperandAt(3); }
+  StrictMode strict_mode() const { return strict_mode_; }
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     // tagged[tagged] = tagged
     return Representation::Tagged();
   }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;  // NOLINT
 
   DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric)
 
@@ -7104,7 +7199,7 @@
 };
 
 
-class HTransitionElementsKind V8_FINAL : public HTemplateInstruction<2> {
+class HTransitionElementsKind FINAL : public HTemplateInstruction<2> {
  public:
   inline static HTransitionElementsKind* New(Zone* zone,
                                              HValue* context,
@@ -7115,23 +7210,23 @@
                                              original_map, transitioned_map);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
-  HValue* object() { return OperandAt(0); }
-  HValue* context() { return OperandAt(1); }
-  Unique<Map> original_map() { return original_map_; }
-  Unique<Map> transitioned_map() { return transitioned_map_; }
-  ElementsKind from_kind() { return from_kind_; }
-  ElementsKind to_kind() { return to_kind_; }
+  HValue* object() const { return OperandAt(0); }
+  HValue* context() const { return OperandAt(1); }
+  Unique<Map> original_map() const { return original_map_; }
+  Unique<Map> transitioned_map() const { return transitioned_map_; }
+  ElementsKind from_kind() const { return from_kind_; }
+  ElementsKind to_kind() const { return to_kind_; }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;  // NOLINT
 
   DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind)
 
  protected:
-  virtual bool DataEquals(HValue* other) V8_OVERRIDE {
+  virtual bool DataEquals(HValue* other) OVERRIDE {
     HTransitionElementsKind* instr = HTransitionElementsKind::cast(other);
     return original_map_ == instr->original_map_ &&
            transitioned_map_ == instr->transitioned_map_;
@@ -7166,7 +7261,7 @@
 };
 
 
-class HStringAdd V8_FINAL : public HBinaryOperation {
+class HStringAdd FINAL : public HBinaryOperation {
  public:
   static HInstruction* New(Zone* zone,
                            HValue* context,
@@ -7180,16 +7275,16 @@
   StringAddFlags flags() const { return flags_; }
   PretenureFlag pretenure_flag() const { return pretenure_flag_; }
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;  // NOLINT
 
   DECLARE_CONCRETE_INSTRUCTION(StringAdd)
 
  protected:
-  virtual bool DataEquals(HValue* other) V8_OVERRIDE {
+  virtual bool DataEquals(HValue* other) OVERRIDE {
     return flags_ == HStringAdd::cast(other)->flags_ &&
         pretenure_flag_ == HStringAdd::cast(other)->pretenure_flag_;
   }
@@ -7217,14 +7312,14 @@
   }
 
   // No side-effects except possible allocation:
-  virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+  virtual bool IsDeletable() const OVERRIDE { return true; }
 
   const StringAddFlags flags_;
   const PretenureFlag pretenure_flag_;
 };
 
 
-class HStringCharCodeAt V8_FINAL : public HTemplateInstruction<3> {
+class HStringCharCodeAt FINAL : public HTemplateInstruction<3> {
  public:
   DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HStringCharCodeAt,
                                               HValue*,
@@ -7244,9 +7339,9 @@
   DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt)
 
  protected:
-  virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+  virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
 
-  virtual Range* InferRange(Zone* zone) V8_OVERRIDE {
+  virtual Range* InferRange(Zone* zone) OVERRIDE {
     return new(zone) Range(0, String::kMaxUtf16CodeUnit);
   }
 
@@ -7263,17 +7358,17 @@
   }
 
   // No side effects: runtime function assumes string + number inputs.
-  virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+  virtual bool IsDeletable() const OVERRIDE { return true; }
 };
 
 
-class HStringCharFromCode V8_FINAL : public HTemplateInstruction<2> {
+class HStringCharFromCode FINAL : public HTemplateInstruction<2> {
  public:
   static HInstruction* New(Zone* zone,
                            HValue* context,
                            HValue* char_code);
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return index == 0
         ? Representation::Tagged()
         : Representation::Integer32();
@@ -7282,7 +7377,7 @@
   HValue* context() const { return OperandAt(0); }
   HValue* value() const { return OperandAt(1); }
 
-  virtual bool DataEquals(HValue* other) V8_OVERRIDE { return true; }
+  virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
 
   DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode)
 
@@ -7296,7 +7391,7 @@
     SetChangesFlag(kNewSpacePromotion);
   }
 
-  virtual bool IsDeletable() const V8_OVERRIDE {
+  virtual bool IsDeletable() const OVERRIDE {
     return !value()->ToNumberCanBeObserved();
   }
 };
@@ -7323,7 +7418,7 @@
   }
 
  private:
-  virtual bool IsDeletable() const V8_FINAL V8_OVERRIDE { return true; }
+  virtual bool IsDeletable() const FINAL OVERRIDE { return true; }
 
   int literal_index_;
   int depth_;
@@ -7331,7 +7426,7 @@
 };
 
 
-class HRegExpLiteral V8_FINAL : public HMaterializedLiteral<1> {
+class HRegExpLiteral FINAL : public HMaterializedLiteral<1> {
  public:
   DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(HRegExpLiteral,
                                               Handle<FixedArray>,
@@ -7344,7 +7439,7 @@
   Handle<String> pattern() { return pattern_; }
   Handle<String> flags() { return flags_; }
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
@@ -7371,14 +7466,14 @@
 };
 
 
-class HFunctionLiteral V8_FINAL : public HTemplateInstruction<1> {
+class HFunctionLiteral FINAL : public HTemplateInstruction<1> {
  public:
   DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HFunctionLiteral,
                                               Handle<SharedFunctionInfo>,
                                               bool);
   HValue* context() { return OperandAt(0); }
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
@@ -7387,44 +7482,46 @@
   Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
   bool pretenure() const { return pretenure_; }
   bool has_no_literals() const { return has_no_literals_; }
-  bool is_generator() const { return is_generator_; }
+  bool is_arrow() const { return IsArrowFunction(kind_); }
+  bool is_generator() const { return IsGeneratorFunction(kind_); }
+  bool is_concise_method() const { return IsConciseMethod(kind_); }
+  FunctionKind kind() const { return kind_; }
   StrictMode strict_mode() const { return strict_mode_; }
 
  private:
-  HFunctionLiteral(HValue* context,
-                   Handle<SharedFunctionInfo> shared,
+  HFunctionLiteral(HValue* context, Handle<SharedFunctionInfo> shared,
                    bool pretenure)
       : HTemplateInstruction<1>(HType::JSObject()),
         shared_info_(shared),
+        kind_(shared->kind()),
         pretenure_(pretenure),
         has_no_literals_(shared->num_literals() == 0),
-        is_generator_(shared->is_generator()),
         strict_mode_(shared->strict_mode()) {
     SetOperandAt(0, context);
     set_representation(Representation::Tagged());
     SetChangesFlag(kNewSpacePromotion);
   }
 
-  virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+  virtual bool IsDeletable() const OVERRIDE { return true; }
 
   Handle<SharedFunctionInfo> shared_info_;
+  FunctionKind kind_;
   bool pretenure_ : 1;
   bool has_no_literals_ : 1;
-  bool is_generator_ : 1;
   StrictMode strict_mode_;
 };
 
 
-class HTypeof V8_FINAL : public HTemplateInstruction<2> {
+class HTypeof FINAL : public HTemplateInstruction<2> {
  public:
   DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P1(HTypeof, HValue*);
 
-  HValue* context() { return OperandAt(0); }
-  HValue* value() { return OperandAt(1); }
+  HValue* context() const { return OperandAt(0); }
+  HValue* value() const { return OperandAt(1); }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;  // NOLINT
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
@@ -7437,15 +7534,15 @@
     set_representation(Representation::Tagged());
   }
 
-  virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+  virtual bool IsDeletable() const OVERRIDE { return true; }
 };
 
 
-class HTrapAllocationMemento V8_FINAL : public HTemplateInstruction<1> {
+class HTrapAllocationMemento FINAL : public HTemplateInstruction<1> {
  public:
   DECLARE_INSTRUCTION_FACTORY_P1(HTrapAllocationMemento, HValue*);
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
@@ -7460,11 +7557,11 @@
 };
 
 
-class HToFastProperties V8_FINAL : public HUnaryOperation {
+class HToFastProperties FINAL : public HUnaryOperation {
  public:
   DECLARE_INSTRUCTION_FACTORY_P1(HToFastProperties, HValue*);
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
@@ -7478,24 +7575,24 @@
     // This instruction is not marked as kChangesMaps, but does
     // change the map of the input operand. Use it only when creating
     // object literals via a runtime call.
-    ASSERT(value->IsCallRuntime());
+    DCHECK(value->IsCallRuntime());
 #ifdef DEBUG
     const Runtime::Function* function = HCallRuntime::cast(value)->function();
-    ASSERT(function->function_id == Runtime::kHiddenCreateObjectLiteral);
+    DCHECK(function->function_id == Runtime::kCreateObjectLiteral);
 #endif
   }
 
-  virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+  virtual bool IsDeletable() const OVERRIDE { return true; }
 };
 
 
-class HDateField V8_FINAL : public HUnaryOperation {
+class HDateField FINAL : public HUnaryOperation {
  public:
   DECLARE_INSTRUCTION_FACTORY_P2(HDateField, HValue*, Smi*);
 
   Smi* index() const { return index_; }
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
@@ -7511,7 +7608,7 @@
 };
 
 
-class HSeqStringGetChar V8_FINAL : public HTemplateInstruction<2> {
+class HSeqStringGetChar FINAL : public HTemplateInstruction<2> {
  public:
   static HInstruction* New(Zone* zone,
                            HValue* context,
@@ -7519,7 +7616,7 @@
                            HValue* string,
                            HValue* index);
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return (index == 0) ? Representation::Tagged()
                         : Representation::Integer32();
   }
@@ -7531,15 +7628,15 @@
   DECLARE_CONCRETE_INSTRUCTION(SeqStringGetChar)
 
  protected:
-  virtual bool DataEquals(HValue* other) V8_OVERRIDE {
+  virtual bool DataEquals(HValue* other) OVERRIDE {
     return encoding() == HSeqStringGetChar::cast(other)->encoding();
   }
 
-  virtual Range* InferRange(Zone* zone) V8_OVERRIDE {
+  virtual Range* InferRange(Zone* zone) OVERRIDE {
     if (encoding() == String::ONE_BYTE_ENCODING) {
       return new(zone) Range(0, String::kMaxOneByteCharCode);
     } else {
-      ASSERT_EQ(String::TWO_BYTE_ENCODING, encoding());
+      DCHECK_EQ(String::TWO_BYTE_ENCODING, encoding());
       return  new(zone) Range(0, String::kMaxUtf16CodeUnit);
     }
   }
@@ -7555,13 +7652,13 @@
     SetDependsOnFlag(kStringChars);
   }
 
-  virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+  virtual bool IsDeletable() const OVERRIDE { return true; }
 
   String::Encoding encoding_;
 };
 
 
-class HSeqStringSetChar V8_FINAL : public HTemplateInstruction<4> {
+class HSeqStringSetChar FINAL : public HTemplateInstruction<4> {
  public:
   DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P4(
       HSeqStringSetChar, String::Encoding,
@@ -7573,7 +7670,7 @@
   HValue* index() { return OperandAt(2); }
   HValue* value() { return OperandAt(3); }
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return (index <= 1) ? Representation::Tagged()
                         : Representation::Integer32();
   }
@@ -7598,17 +7695,17 @@
 };
 
 
-class HCheckMapValue V8_FINAL : public HTemplateInstruction<2> {
+class HCheckMapValue FINAL : public HTemplateInstruction<2> {
  public:
   DECLARE_INSTRUCTION_FACTORY_P2(HCheckMapValue, HValue*, HValue*);
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;  // NOLINT
 
-  virtual HType CalculateInferredType() V8_OVERRIDE {
+  virtual HType CalculateInferredType() OVERRIDE {
     if (value()->type().IsHeapObject()) return value()->type();
     return HType::HeapObject();
   }
@@ -7616,14 +7713,14 @@
   HValue* value() const { return OperandAt(0); }
   HValue* map() const { return OperandAt(1); }
 
-  virtual HValue* Canonicalize() V8_OVERRIDE;
+  virtual HValue* Canonicalize() OVERRIDE;
 
   DECLARE_CONCRETE_INSTRUCTION(CheckMapValue)
 
  protected:
   virtual int RedefinedOperandIndex() { return 0; }
 
-  virtual bool DataEquals(HValue* other) V8_OVERRIDE {
+  virtual bool DataEquals(HValue* other) OVERRIDE {
     return true;
   }
 
@@ -7640,20 +7737,20 @@
 };
 
 
-class HForInPrepareMap V8_FINAL : public HTemplateInstruction<2> {
+class HForInPrepareMap FINAL : public HTemplateInstruction<2> {
  public:
   DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P1(HForInPrepareMap, HValue*);
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
-  HValue* context() { return OperandAt(0); }
-  HValue* enumerable() { return OperandAt(1); }
+  HValue* context() const { return OperandAt(0); }
+  HValue* enumerable() const { return OperandAt(1); }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;  // NOLINT
 
-  virtual HType CalculateInferredType() V8_OVERRIDE {
+  virtual HType CalculateInferredType() OVERRIDE {
     return HType::Tagged();
   }
 
@@ -7670,17 +7767,17 @@
 };
 
 
-class HForInCacheArray V8_FINAL : public HTemplateInstruction<2> {
+class HForInCacheArray FINAL : public HTemplateInstruction<2> {
  public:
   DECLARE_INSTRUCTION_FACTORY_P3(HForInCacheArray, HValue*, HValue*, int);
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
-  HValue* enumerable() { return OperandAt(0); }
-  HValue* map() { return OperandAt(1); }
-  int idx() { return idx_; }
+  HValue* enumerable() const { return OperandAt(0); }
+  HValue* map() const { return OperandAt(1); }
+  int idx() const { return idx_; }
 
   HForInCacheArray* index_cache() {
     return index_cache_;
@@ -7690,9 +7787,9 @@
     index_cache_ = index_cache;
   }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;  // NOLINT
 
-  virtual HType CalculateInferredType() V8_OVERRIDE {
+  virtual HType CalculateInferredType() OVERRIDE {
     return HType::Tagged();
   }
 
@@ -7712,7 +7809,7 @@
 };
 
 
-class HLoadFieldByIndex V8_FINAL : public HTemplateInstruction<2> {
+class HLoadFieldByIndex FINAL : public HTemplateInstruction<2> {
  public:
   DECLARE_INSTRUCTION_FACTORY_P2(HLoadFieldByIndex, HValue*, HValue*);
 
@@ -7724,7 +7821,7 @@
     set_representation(Representation::Tagged());
   }
 
-  virtual Representation RequiredInputRepresentation(int index) V8_OVERRIDE {
+  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
     if (index == 1) {
       return Representation::Smi();
     } else {
@@ -7732,19 +7829,19 @@
     }
   }
 
-  HValue* object() { return OperandAt(0); }
-  HValue* index() { return OperandAt(1); }
+  HValue* object() const { return OperandAt(0); }
+  HValue* index() const { return OperandAt(1); }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual OStream& PrintDataTo(OStream& os) const OVERRIDE;  // NOLINT
 
-  virtual HType CalculateInferredType() V8_OVERRIDE {
+  virtual HType CalculateInferredType() OVERRIDE {
     return HType::Tagged();
   }
 
   DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex);
 
  private:
-  virtual bool IsDeletable() const V8_OVERRIDE { return true; }
+  virtual bool IsDeletable() const OVERRIDE { return true; }
 };
 
 
@@ -7772,15 +7869,15 @@
  public:
   DECLARE_INSTRUCTION_FACTORY_P3(HAllocateBlockContext, HValue*,
                                  HValue*, Handle<ScopeInfo>);
-  HValue* context() { return OperandAt(0); }
-  HValue* function() { return OperandAt(1); }
-  Handle<ScopeInfo> scope_info() { return scope_info_; }
+  HValue* context() const { return OperandAt(0); }
+  HValue* function() const { return OperandAt(1); }
+  Handle<ScopeInfo> scope_info() const { return scope_info_; }
 
   virtual Representation RequiredInputRepresentation(int index) {
     return Representation::Tagged();
   }
 
-  virtual void PrintDataTo(StringStream* stream);
+  virtual OStream& PrintDataTo(OStream& os) const;  // NOLINT
 
   DECLARE_CONCRETE_INSTRUCTION(AllocateBlockContext)
 
diff --git a/src/hydrogen-load-elimination.cc b/src/hydrogen-load-elimination.cc
index 5cefcf7..c5fd88b 100644
--- a/src/hydrogen-load-elimination.cc
+++ b/src/hydrogen-load-elimination.cc
@@ -3,9 +3,9 @@
 // found in the LICENSE file.
 
 #include "src/hydrogen-alias-analysis.h"
-#include "src/hydrogen-load-elimination.h"
-#include "src/hydrogen-instructions.h"
 #include "src/hydrogen-flow-engine.h"
+#include "src/hydrogen-instructions.h"
+#include "src/hydrogen-load-elimination.h"
 
 namespace v8 {
 namespace internal {
@@ -122,7 +122,7 @@
                                       HLoadEliminationTable* pred_state,
                                       HBasicBlock* pred_block,
                                       Zone* zone) {
-    ASSERT(pred_state != NULL);
+    DCHECK(pred_state != NULL);
     if (succ_state == NULL) {
       return pred_state->Copy(succ_block, pred_block, zone);
     } else {
@@ -135,7 +135,7 @@
   static HLoadEliminationTable* Finish(HLoadEliminationTable* state,
                                        HBasicBlock* block,
                                        Zone* zone) {
-    ASSERT(state != NULL);
+    DCHECK(state != NULL);
     return state;
   }
 
@@ -200,7 +200,7 @@
   // which the load should be replaced. Otherwise, return {instr}.
   HValue* load(HLoadNamedField* instr) {
     // There must be no loads from non observable in-object properties.
-    ASSERT(!instr->access().IsInobject() ||
+    DCHECK(!instr->access().IsInobject() ||
            instr->access().existing_inobject_property());
 
     int field = FieldOf(instr->access());
@@ -382,7 +382,7 @@
   // farthest away from the current instruction.
   HFieldApproximation* ReuseLastApproximation(int field) {
     HFieldApproximation* approx = fields_[field];
-    ASSERT(approx != NULL);
+    DCHECK(approx != NULL);
 
     HFieldApproximation* prev = NULL;
     while (approx->next_ != NULL) {
diff --git a/src/hydrogen-mark-deoptimize.cc b/src/hydrogen-mark-deoptimize.cc
index 998be07..47642e4 100644
--- a/src/hydrogen-mark-deoptimize.cc
+++ b/src/hydrogen-mark-deoptimize.cc
@@ -20,8 +20,8 @@
 
 
 void HMarkDeoptimizeOnUndefinedPhase::ProcessPhi(HPhi* phi) {
-  ASSERT(phi->CheckFlag(HValue::kAllowUndefinedAsNaN));
-  ASSERT(worklist_.is_empty());
+  DCHECK(phi->CheckFlag(HValue::kAllowUndefinedAsNaN));
+  DCHECK(worklist_.is_empty());
 
   // Push the phi onto the worklist
   phi->ClearFlag(HValue::kAllowUndefinedAsNaN);
diff --git a/src/hydrogen-osr.cc b/src/hydrogen-osr.cc
index b2b15f1..89c28ac 100644
--- a/src/hydrogen-osr.cc
+++ b/src/hydrogen-osr.cc
@@ -15,13 +15,13 @@
 
 
 HBasicBlock* HOsrBuilder::BuildOsrLoopEntry(IterationStatement* statement) {
-  ASSERT(HasOsrEntryAt(statement));
+  DCHECK(HasOsrEntryAt(statement));
 
   Zone* zone = builder_->zone();
   HGraph* graph = builder_->graph();
 
   // only one OSR point per compile is allowed.
-  ASSERT(graph->osr() == NULL);
+  DCHECK(graph->osr() == NULL);
 
   // remember this builder as the one OSR builder in the graph.
   graph->set_osr(this);
diff --git a/src/hydrogen-range-analysis.cc b/src/hydrogen-range-analysis.cc
index 64d1dc0..f5c5a9f 100644
--- a/src/hydrogen-range-analysis.cc
+++ b/src/hydrogen-range-analysis.cc
@@ -26,7 +26,7 @@
   if (FLAG_trace_range) {
     va_list arguments;
     va_start(arguments, msg);
-    OS::VPrint(msg, arguments);
+    base::OS::VPrint(msg, arguments);
     va_end(arguments);
   }
 }
@@ -64,9 +64,9 @@
         // Propagate flags for negative zero checks upwards from conversions
         // int32-to-tagged and int32-to-double.
         Representation from = instr->value()->representation();
-        ASSERT(from.Equals(instr->from()));
+        DCHECK(from.Equals(instr->from()));
         if (from.IsSmiOrInteger32()) {
-          ASSERT(instr->to().IsTagged() ||
+          DCHECK(instr->to().IsTagged() ||
                 instr->to().IsDouble() ||
                 instr->to().IsSmiOrInteger32());
           PropagateMinusZeroChecks(instr->value());
@@ -121,7 +121,7 @@
 
 void HRangeAnalysisPhase::InferControlFlowRange(HCompareNumericAndBranch* test,
                                                 HBasicBlock* dest) {
-  ASSERT((test->FirstSuccessor() == dest) == (test->SecondSuccessor() != dest));
+  DCHECK((test->FirstSuccessor() == dest) == (test->SecondSuccessor() != dest));
   if (test->representation().IsSmiOrInteger32()) {
     Token::Value op = test->token();
     if (test->SecondSuccessor() == dest) {
@@ -170,7 +170,7 @@
 
 
 void HRangeAnalysisPhase::InferRange(HValue* value) {
-  ASSERT(!value->HasRange());
+  DCHECK(!value->HasRange());
   if (!value->representation().IsNone()) {
     value->ComputeInitialRange(graph()->zone());
     Range* range = value->range();
@@ -184,7 +184,7 @@
 
 
 void HRangeAnalysisPhase::RollBackTo(int index) {
-  ASSERT(index <= changed_ranges_.length());
+  DCHECK(index <= changed_ranges_.length());
   for (int i = index; i < changed_ranges_.length(); ++i) {
     changed_ranges_[i]->RemoveLastAddedRange();
   }
@@ -213,8 +213,8 @@
 
 
 void HRangeAnalysisPhase::PropagateMinusZeroChecks(HValue* value) {
-  ASSERT(worklist_.is_empty());
-  ASSERT(in_worklist_.IsEmpty());
+  DCHECK(worklist_.is_empty());
+  DCHECK(in_worklist_.IsEmpty());
 
   AddToWorklist(value);
   while (!worklist_.is_empty()) {
@@ -282,8 +282,8 @@
   }
 
   in_worklist_.Clear();
-  ASSERT(in_worklist_.IsEmpty());
-  ASSERT(worklist_.is_empty());
+  DCHECK(in_worklist_.IsEmpty());
+  DCHECK(worklist_.is_empty());
 }
 
 
diff --git a/src/hydrogen-redundant-phi.cc b/src/hydrogen-redundant-phi.cc
index 67d534e..0b9b0aa 100644
--- a/src/hydrogen-redundant-phi.cc
+++ b/src/hydrogen-redundant-phi.cc
@@ -25,7 +25,7 @@
   // Make sure that we *really* removed all redundant phis.
   for (int i = 0; i < blocks->length(); ++i) {
     for (int j = 0; j < blocks->at(i)->phis()->length(); j++) {
-      ASSERT(blocks->at(i)->phis()->at(j)->GetRedundantReplacement() == NULL);
+      DCHECK(blocks->at(i)->phis()->at(j)->GetRedundantReplacement() == NULL);
     }
   }
 #endif
diff --git a/src/hydrogen-removable-simulates.cc b/src/hydrogen-removable-simulates.cc
index 43e9d18..a28021d 100644
--- a/src/hydrogen-removable-simulates.cc
+++ b/src/hydrogen-removable-simulates.cc
@@ -37,7 +37,7 @@
     }
     // Ensure there's a non-foldable HSimulate before an HEnterInlined to avoid
     // folding across HEnterInlined.
-    ASSERT(!(instr->IsEnterInlined() &&
+    DCHECK(!(instr->IsEnterInlined() &&
              HSimulate::cast(instr->previous())->is_candidate_for_removal()));
     if (instr->IsLeaveInlined() || instr->IsReturn()) {
       // Never fold simulates from inlined environments into simulates in the
@@ -64,7 +64,7 @@
       Remember(current_simulate);
       FlushSimulates();
     } else if (current_simulate->ast_id().IsNone()) {
-      ASSERT(current_simulate->next()->IsEnterInlined());
+      DCHECK(current_simulate->next()->IsEnterInlined());
       FlushSimulates();
     } else if (current_simulate->previous()->HasObservableSideEffects()) {
       Remember(current_simulate);
@@ -93,7 +93,7 @@
     }
     // For our current local analysis, we should not remember simulates across
     // block boundaries.
-    ASSERT(!state->HasRememberedSimulates());
+    DCHECK(!state->HasRememberedSimulates());
     // Nasty heuristic: Never remove the first simulate in a block. This
     // just so happens to have a beneficial effect on register allocation.
     state->first_ = true;
@@ -143,8 +143,8 @@
                Zone* zone) {
     // For our current local analysis, we should not remember simulates across
     // block boundaries.
-    ASSERT(!pred_state->HasRememberedSimulates());
-    ASSERT(!HasRememberedSimulates());
+    DCHECK(!pred_state->HasRememberedSimulates());
+    DCHECK(!HasRememberedSimulates());
     if (FLAG_trace_removable_simulates) {
       PrintF("[merge state %p from B%d into %p for B%d]\n",
              reinterpret_cast<void*>(pred_state), pred_block->block_id(),
diff --git a/src/hydrogen-representation-changes.cc b/src/hydrogen-representation-changes.cc
index 6cca536..ebb03b5 100644
--- a/src/hydrogen-representation-changes.cc
+++ b/src/hydrogen-representation-changes.cc
@@ -41,7 +41,7 @@
     if (!use_value->operand_position(use_index).IsUnknown()) {
       new_value->set_position(use_value->operand_position(use_index));
     } else {
-      ASSERT(!FLAG_hydrogen_track_positions ||
+      DCHECK(!FLAG_hydrogen_track_positions ||
              !graph()->info()->IsOptimizing());
     }
   }
@@ -55,7 +55,7 @@
   Representation from_rep = change->from();
   Representation to_rep = change->to();
   // Flags indicating Uint32 operations are set in a later Hydrogen phase.
-  ASSERT(!change->CheckFlag(HValue::kUint32));
+  DCHECK(!change->CheckFlag(HValue::kUint32));
   return from_rep.IsInteger32() && to_rep.IsSmi() && SmiValuesAre32Bits();
 }
 
@@ -93,7 +93,7 @@
     InsertRepresentationChangeForUse(value, use_value, use_index, req);
   }
   if (value->HasNoUses()) {
-    ASSERT(value->IsConstant() || value->IsForceRepresentation());
+    DCHECK(value->IsConstant() || value->IsForceRepresentation());
     value->DeleteAndReplaceWith(NULL);
   } else {
     // The only purpose of a HForceRepresentation is to represent the value
diff --git a/src/hydrogen-store-elimination.cc b/src/hydrogen-store-elimination.cc
index eb2bcf4..ee718e6 100644
--- a/src/hydrogen-store-elimination.cc
+++ b/src/hydrogen-store-elimination.cc
@@ -2,8 +2,8 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/hydrogen-store-elimination.h"
 #include "src/hydrogen-instructions.h"
+#include "src/hydrogen-store-elimination.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/hydrogen-types.cc b/src/hydrogen-types.cc
index 2da8a59..87047a2 100644
--- a/src/hydrogen-types.cc
+++ b/src/hydrogen-types.cc
@@ -4,6 +4,7 @@
 
 #include "src/hydrogen-types.h"
 
+#include "src/ostreams.h"
 #include "src/types-inl.h"
 
 
@@ -41,27 +42,32 @@
 HType HType::FromValue(Handle<Object> value) {
   if (value->IsSmi()) return HType::Smi();
   if (value->IsNull()) return HType::Null();
-  if (value->IsHeapNumber()) return HType::HeapNumber();
+  if (value->IsHeapNumber()) {
+    double n = Handle<v8::internal::HeapNumber>::cast(value)->value();
+    return IsSmiDouble(n) ? HType::Smi() : HType::HeapNumber();
+  }
   if (value->IsString()) return HType::String();
   if (value->IsBoolean()) return HType::Boolean();
   if (value->IsUndefined()) return HType::Undefined();
   if (value->IsJSArray()) return HType::JSArray();
   if (value->IsJSObject()) return HType::JSObject();
-  ASSERT(value->IsHeapObject());
+  DCHECK(value->IsHeapObject());
   return HType::HeapObject();
 }
 
 
-const char* HType::ToString() const {
+OStream& operator<<(OStream& os, const HType& t) {
   // Note: The c1visualizer syntax for locals allows only a sequence of the
   // following characters: A-Za-z0-9_-|:
-  switch (kind_) {
-    #define DEFINE_CASE(Name, mask) case k##Name: return #Name;
+  switch (t.kind_) {
+#define DEFINE_CASE(Name, mask) \
+  case HType::k##Name:          \
+    return os << #Name;
     HTYPE_LIST(DEFINE_CASE)
-    #undef DEFINE_CASE
+#undef DEFINE_CASE
   }
   UNREACHABLE();
-  return NULL;
+  return os;
 }
 
 } }  // namespace v8::internal
diff --git a/src/hydrogen-types.h b/src/hydrogen-types.h
index e924a6b..a42cba5 100644
--- a/src/hydrogen-types.h
+++ b/src/hydrogen-types.h
@@ -15,6 +15,7 @@
 // Forward declarations.
 template <typename T> class Handle;
 class Object;
+class OStream;
 
 #define HTYPE_LIST(V)                                 \
   V(Any, 0x0)              /* 0000 0000 0000 0000 */  \
@@ -33,38 +34,38 @@
   V(JSArray, 0x621)        /* 0000 0110 0010 0001 */  \
   V(None, 0x7ff)           /* 0000 0111 1111 1111 */
 
-class HType V8_FINAL {
+class HType FINAL {
  public:
   #define DECLARE_CONSTRUCTOR(Name, mask) \
-    static HType Name() V8_WARN_UNUSED_RESULT { return HType(k##Name); }
+    static HType Name() WARN_UNUSED_RESULT { return HType(k##Name); }
   HTYPE_LIST(DECLARE_CONSTRUCTOR)
   #undef DECLARE_CONSTRUCTOR
 
   // Return the weakest (least precise) common type.
-  HType Combine(HType other) const V8_WARN_UNUSED_RESULT {
+  HType Combine(HType other) const WARN_UNUSED_RESULT {
     return HType(static_cast<Kind>(kind_ & other.kind_));
   }
 
-  bool Equals(HType other) const V8_WARN_UNUSED_RESULT {
+  bool Equals(HType other) const WARN_UNUSED_RESULT {
     return kind_ == other.kind_;
   }
 
-  bool IsSubtypeOf(HType other) const V8_WARN_UNUSED_RESULT {
+  bool IsSubtypeOf(HType other) const WARN_UNUSED_RESULT {
     return Combine(other).Equals(other);
   }
 
   #define DECLARE_IS_TYPE(Name, mask)               \
-    bool Is##Name() const V8_WARN_UNUSED_RESULT {   \
+    bool Is##Name() const WARN_UNUSED_RESULT {   \
       return IsSubtypeOf(HType::Name());            \
     }
   HTYPE_LIST(DECLARE_IS_TYPE)
   #undef DECLARE_IS_TYPE
 
   template <class T>
-  static HType FromType(typename T::TypeHandle type) V8_WARN_UNUSED_RESULT;
-  static HType FromValue(Handle<Object> value) V8_WARN_UNUSED_RESULT;
+  static HType FromType(typename T::TypeHandle type) WARN_UNUSED_RESULT;
+  static HType FromValue(Handle<Object> value) WARN_UNUSED_RESULT;
 
-  const char* ToString() const V8_WARN_UNUSED_RESULT;
+  friend OStream& operator<<(OStream& os, const HType& t);
 
  private:
   enum Kind {
@@ -82,6 +83,8 @@
   int16_t kind_;
 };
 
+
+OStream& operator<<(OStream& os, const HType& t);
 } }  // namespace v8::internal
 
 #endif  // HYDROGEN_TYPES_H_
diff --git a/src/hydrogen-uint32-analysis.cc b/src/hydrogen-uint32-analysis.cc
index 7616f3d..585a706 100644
--- a/src/hydrogen-uint32-analysis.cc
+++ b/src/hydrogen-uint32-analysis.cc
@@ -41,10 +41,10 @@
     return true;
   } else if (use->IsChange()) {
     // Conversions have special support for uint32.
-    // This ASSERT guards that the conversion in question is actually
+    // This DCHECK guards that the conversion in question is actually
     // implemented. Do not extend the whitelist without adding
     // support to LChunkBuilder::DoChange().
-    ASSERT(HChange::cast(use)->to().IsDouble() ||
+    DCHECK(HChange::cast(use)->to().IsDouble() ||
            HChange::cast(use)->to().IsSmi() ||
            HChange::cast(use)->to().IsTagged());
     return true;
@@ -55,9 +55,9 @@
       // operation.
       if (store->value() == val) {
         // Clamping or a conversion to double should have beed inserted.
-        ASSERT(store->elements_kind() != EXTERNAL_UINT8_CLAMPED_ELEMENTS);
-        ASSERT(store->elements_kind() != EXTERNAL_FLOAT32_ELEMENTS);
-        ASSERT(store->elements_kind() != EXTERNAL_FLOAT64_ELEMENTS);
+        DCHECK(store->elements_kind() != EXTERNAL_UINT8_CLAMPED_ELEMENTS);
+        DCHECK(store->elements_kind() != EXTERNAL_FLOAT32_ELEMENTS);
+        DCHECK(store->elements_kind() != EXTERNAL_FLOAT64_ELEMENTS);
         return true;
       }
     }
diff --git a/src/hydrogen.cc b/src/hydrogen.cc
index 8fff497..37ee2e4 100644
--- a/src/hydrogen.cc
+++ b/src/hydrogen.cc
@@ -7,6 +7,7 @@
 #include <algorithm>
 
 #include "src/v8.h"
+
 #include "src/allocation-site-scopes.h"
 #include "src/codegen.h"
 #include "src/full-codegen.h"
@@ -19,10 +20,10 @@
 #include "src/hydrogen-dehoist.h"
 #include "src/hydrogen-environment-liveness.h"
 #include "src/hydrogen-escape-analysis.h"
+#include "src/hydrogen-gvn.h"
 #include "src/hydrogen-infer-representation.h"
 #include "src/hydrogen-infer-types.h"
 #include "src/hydrogen-load-elimination.h"
-#include "src/hydrogen-gvn.h"
 #include "src/hydrogen-mark-deoptimize.h"
 #include "src/hydrogen-mark-unreachable.h"
 #include "src/hydrogen-osr.h"
@@ -33,26 +34,31 @@
 #include "src/hydrogen-sce.h"
 #include "src/hydrogen-store-elimination.h"
 #include "src/hydrogen-uint32-analysis.h"
+#include "src/ic/call-optimization.h"
+#include "src/ic/ic.h"
+// GetRootConstructor
+#include "src/ic/ic-inl.h"
 #include "src/lithium-allocator.h"
 #include "src/parser.h"
 #include "src/runtime.h"
 #include "src/scopeinfo.h"
 #include "src/scopes.h"
-#include "src/stub-cache.h"
 #include "src/typing.h"
 
 #if V8_TARGET_ARCH_IA32
-#include "src/ia32/lithium-codegen-ia32.h"
+#include "src/ia32/lithium-codegen-ia32.h"  // NOLINT
 #elif V8_TARGET_ARCH_X64
-#include "src/x64/lithium-codegen-x64.h"
+#include "src/x64/lithium-codegen-x64.h"  // NOLINT
 #elif V8_TARGET_ARCH_ARM64
-#include "src/arm64/lithium-codegen-arm64.h"
+#include "src/arm64/lithium-codegen-arm64.h"  // NOLINT
 #elif V8_TARGET_ARCH_ARM
-#include "src/arm/lithium-codegen-arm.h"
+#include "src/arm/lithium-codegen-arm.h"  // NOLINT
 #elif V8_TARGET_ARCH_MIPS
-#include "src/mips/lithium-codegen-mips.h"
+#include "src/mips/lithium-codegen-mips.h"  // NOLINT
+#elif V8_TARGET_ARCH_MIPS64
+#include "src/mips64/lithium-codegen-mips64.h"  // NOLINT
 #elif V8_TARGET_ARCH_X87
-#include "src/x87/lithium-codegen-x87.h"
+#include "src/x87/lithium-codegen-x87.h"  // NOLINT
 #else
 #error Unsupported target architecture.
 #endif
@@ -96,27 +102,27 @@
 
 
 void HBasicBlock::AttachLoopInformation() {
-  ASSERT(!IsLoopHeader());
+  DCHECK(!IsLoopHeader());
   loop_information_ = new(zone()) HLoopInformation(this, zone());
 }
 
 
 void HBasicBlock::DetachLoopInformation() {
-  ASSERT(IsLoopHeader());
+  DCHECK(IsLoopHeader());
   loop_information_ = NULL;
 }
 
 
 void HBasicBlock::AddPhi(HPhi* phi) {
-  ASSERT(!IsStartBlock());
+  DCHECK(!IsStartBlock());
   phis_.Add(phi, zone());
   phi->SetBlock(this);
 }
 
 
 void HBasicBlock::RemovePhi(HPhi* phi) {
-  ASSERT(phi->block() == this);
-  ASSERT(phis_.Contains(phi));
+  DCHECK(phi->block() == this);
+  DCHECK(phis_.Contains(phi));
   phi->Kill();
   phis_.RemoveElement(phi);
   phi->SetBlock(NULL);
@@ -125,22 +131,22 @@
 
 void HBasicBlock::AddInstruction(HInstruction* instr,
                                  HSourcePosition position) {
-  ASSERT(!IsStartBlock() || !IsFinished());
-  ASSERT(!instr->IsLinked());
-  ASSERT(!IsFinished());
+  DCHECK(!IsStartBlock() || !IsFinished());
+  DCHECK(!instr->IsLinked());
+  DCHECK(!IsFinished());
 
   if (!position.IsUnknown()) {
     instr->set_position(position);
   }
   if (first_ == NULL) {
-    ASSERT(last_environment() != NULL);
-    ASSERT(!last_environment()->ast_id().IsNone());
+    DCHECK(last_environment() != NULL);
+    DCHECK(!last_environment()->ast_id().IsNone());
     HBlockEntry* entry = new(zone()) HBlockEntry();
     entry->InitializeAsFirst(this);
     if (!position.IsUnknown()) {
       entry->set_position(position);
     } else {
-      ASSERT(!FLAG_hydrogen_track_positions ||
+      DCHECK(!FLAG_hydrogen_track_positions ||
              !graph()->info()->IsOptimizing());
     }
     first_ = last_ = entry;
@@ -161,9 +167,9 @@
 
 HSimulate* HBasicBlock::CreateSimulate(BailoutId ast_id,
                                        RemovableSimulate removable) {
-  ASSERT(HasEnvironment());
+  DCHECK(HasEnvironment());
   HEnvironment* environment = last_environment();
-  ASSERT(ast_id.IsNone() ||
+  DCHECK(ast_id.IsNone() ||
          ast_id == BailoutId::StubEntry() ||
          environment->closure()->shared()->VerifyBailoutId(ast_id));
 
@@ -194,7 +200,7 @@
 
 
 void HBasicBlock::Finish(HControlInstruction* end, HSourcePosition position) {
-  ASSERT(!IsFinished());
+  DCHECK(!IsFinished());
   AddInstruction(end, position);
   end_ = end;
   for (HSuccessorIterator it(end); !it.Done(); it.Advance()) {
@@ -231,8 +237,8 @@
   HBasicBlock* target = state->function_return();
   bool drop_extra = state->inlining_kind() == NORMAL_RETURN;
 
-  ASSERT(target->IsInlineReturnTarget());
-  ASSERT(return_value != NULL);
+  DCHECK(target->IsInlineReturnTarget());
+  DCHECK(return_value != NULL);
   HEnvironment* env = last_environment();
   int argument_count = env->arguments_environment()->parameter_count();
   AddInstruction(new(zone()) HLeaveInlined(state->entry(), argument_count),
@@ -246,8 +252,8 @@
 
 
 void HBasicBlock::SetInitialEnvironment(HEnvironment* env) {
-  ASSERT(!HasEnvironment());
-  ASSERT(first() == NULL);
+  DCHECK(!HasEnvironment());
+  DCHECK(first() == NULL);
   UpdateEnvironment(env);
 }
 
@@ -260,12 +266,12 @@
 
 void HBasicBlock::SetJoinId(BailoutId ast_id) {
   int length = predecessors_.length();
-  ASSERT(length > 0);
+  DCHECK(length > 0);
   for (int i = 0; i < length; i++) {
     HBasicBlock* predecessor = predecessors_[i];
-    ASSERT(predecessor->end()->IsGoto());
+    DCHECK(predecessor->end()->IsGoto());
     HSimulate* simulate = HSimulate::cast(predecessor->end()->previous());
-    ASSERT(i != 0 ||
+    DCHECK(i != 0 ||
            (predecessor->last_environment()->closure().is_null() ||
             predecessor->last_environment()->closure()->shared()
               ->VerifyBailoutId(ast_id)));
@@ -303,7 +309,7 @@
 
 
 void HBasicBlock::PostProcessLoopHeader(IterationStatement* stmt) {
-  ASSERT(IsLoopHeader());
+  DCHECK(IsLoopHeader());
 
   SetJoinId(stmt->EntryId());
   if (predecessors()->length() == 1) {
@@ -321,10 +327,10 @@
 
 
 void HBasicBlock::MarkSuccEdgeUnreachable(int succ) {
-  ASSERT(IsFinished());
+  DCHECK(IsFinished());
   HBasicBlock* succ_block = end()->SuccessorAt(succ);
 
-  ASSERT(succ_block->predecessors()->length() == 1);
+  DCHECK(succ_block->predecessors()->length() == 1);
   succ_block->MarkUnreachable();
 }
 
@@ -334,10 +340,10 @@
     // Only loop header blocks can have a predecessor added after
     // instructions have been added to the block (they have phis for all
     // values in the environment, these phis may be eliminated later).
-    ASSERT(IsLoopHeader() || first_ == NULL);
+    DCHECK(IsLoopHeader() || first_ == NULL);
     HEnvironment* incoming_env = pred->last_environment();
     if (IsLoopHeader()) {
-      ASSERT(phis()->length() == incoming_env->length());
+      DCHECK(phis()->length() == incoming_env->length());
       for (int i = 0; i < phis_.length(); ++i) {
         phis_[i]->AddInput(incoming_env->values()->at(i));
       }
@@ -345,7 +351,7 @@
       last_environment()->AddIncomingEdge(this, pred->last_environment());
     }
   } else if (!HasEnvironment() && !IsFinished()) {
-    ASSERT(!IsLoopHeader());
+    DCHECK(!IsLoopHeader());
     SetInitialEnvironment(pred->last_environment()->Copy());
   }
 
@@ -354,7 +360,7 @@
 
 
 void HBasicBlock::AddDominatedBlock(HBasicBlock* block) {
-  ASSERT(!dominated_blocks_.Contains(block));
+  DCHECK(!dominated_blocks_.Contains(block));
   // Keep the list of dominated blocks sorted such that if there is two
   // succeeding block in this list, the predecessor is before the successor.
   int index = 0;
@@ -380,11 +386,11 @@
       } else {
         second = second->dominator();
       }
-      ASSERT(first != NULL && second != NULL);
+      DCHECK(first != NULL && second != NULL);
     }
 
     if (dominator_ != first) {
-      ASSERT(dominator_->dominated_blocks_.Contains(this));
+      DCHECK(dominator_->dominated_blocks_.Contains(this));
       dominator_->dominated_blocks_.RemoveElement(this);
       dominator_ = first;
       first->AddDominatedBlock(this);
@@ -426,7 +432,7 @@
     // dominator information about the current loop that's being processed,
     // and not nested loops, which will be processed when
     // AssignLoopSuccessorDominators gets called on their header.
-    ASSERT(outstanding_successors >= 0);
+    DCHECK(outstanding_successors >= 0);
     HBasicBlock* parent_loop_header = dominator_candidate->parent_loop_header();
     if (outstanding_successors == 0 &&
         (parent_loop_header == this && !dominator_candidate->IsLoopHeader())) {
@@ -440,7 +446,7 @@
       if (successor->block_id() > dominator_candidate->block_id() &&
           successor->block_id() <= last->block_id()) {
         // Backwards edges must land on loop headers.
-        ASSERT(successor->block_id() > dominator_candidate->block_id() ||
+        DCHECK(successor->block_id() > dominator_candidate->block_id() ||
                successor->IsLoopHeader());
         outstanding_successors++;
       }
@@ -461,13 +467,13 @@
 #ifdef DEBUG
 void HBasicBlock::Verify() {
   // Check that every block is finished.
-  ASSERT(IsFinished());
-  ASSERT(block_id() >= 0);
+  DCHECK(IsFinished());
+  DCHECK(block_id() >= 0);
 
   // Check that the incoming edges are in edge split form.
   if (predecessors_.length() > 1) {
     for (int i = 0; i < predecessors_.length(); ++i) {
-      ASSERT(predecessors_[i]->end()->SecondSuccessor() == NULL);
+      DCHECK(predecessors_[i]->end()->SecondSuccessor() == NULL);
     }
   }
 }
@@ -570,10 +576,10 @@
     // Check that every block contains at least one node and that only the last
     // node is a control instruction.
     HInstruction* current = block->first();
-    ASSERT(current != NULL && current->IsBlockEntry());
+    DCHECK(current != NULL && current->IsBlockEntry());
     while (current != NULL) {
-      ASSERT((current->next() == NULL) == current->IsControlInstruction());
-      ASSERT(current->block() == block);
+      DCHECK((current->next() == NULL) == current->IsControlInstruction());
+      DCHECK(current->block() == block);
       current->Verify();
       current = current->next();
     }
@@ -581,13 +587,13 @@
     // Check that successors are correctly set.
     HBasicBlock* first = block->end()->FirstSuccessor();
     HBasicBlock* second = block->end()->SecondSuccessor();
-    ASSERT(second == NULL || first != NULL);
+    DCHECK(second == NULL || first != NULL);
 
     // Check that the predecessor array is correct.
     if (first != NULL) {
-      ASSERT(first->predecessors()->Contains(block));
+      DCHECK(first->predecessors()->Contains(block));
       if (second != NULL) {
-        ASSERT(second->predecessors()->Contains(block));
+        DCHECK(second->predecessors()->Contains(block));
       }
     }
 
@@ -604,36 +610,36 @@
           block->predecessors()->first()->last_environment()->ast_id();
       for (int k = 0; k < block->predecessors()->length(); k++) {
         HBasicBlock* predecessor = block->predecessors()->at(k);
-        ASSERT(predecessor->end()->IsGoto() ||
+        DCHECK(predecessor->end()->IsGoto() ||
                predecessor->end()->IsDeoptimize());
-        ASSERT(predecessor->last_environment()->ast_id() == id);
+        DCHECK(predecessor->last_environment()->ast_id() == id);
       }
     }
   }
 
   // Check special property of first block to have no predecessors.
-  ASSERT(blocks_.at(0)->predecessors()->is_empty());
+  DCHECK(blocks_.at(0)->predecessors()->is_empty());
 
   if (do_full_verify) {
     // Check that the graph is fully connected.
     ReachabilityAnalyzer analyzer(entry_block_, blocks_.length(), NULL);
-    ASSERT(analyzer.visited_count() == blocks_.length());
+    DCHECK(analyzer.visited_count() == blocks_.length());
 
     // Check that entry block dominator is NULL.
-    ASSERT(entry_block_->dominator() == NULL);
+    DCHECK(entry_block_->dominator() == NULL);
 
     // Check dominators.
     for (int i = 0; i < blocks_.length(); ++i) {
       HBasicBlock* block = blocks_.at(i);
       if (block->dominator() == NULL) {
         // Only start block may have no dominator assigned to.
-        ASSERT(i == 0);
+        DCHECK(i == 0);
       } else {
         // Assert that block is unreachable if dominator must not be visited.
         ReachabilityAnalyzer dominator_analyzer(entry_block_,
                                                 blocks_.length(),
                                                 block->dominator());
-        ASSERT(!dominator_analyzer.reachable()->Contains(block->block_id()));
+        DCHECK(!dominator_analyzer.reachable()->Contains(block->block_id()));
       }
     }
   }
@@ -744,54 +750,52 @@
 }
 
 
+HGraphBuilder::IfBuilder::IfBuilder() : builder_(NULL), needs_compare_(true) {}
+
+
 HGraphBuilder::IfBuilder::IfBuilder(HGraphBuilder* builder)
-    : builder_(builder),
-      finished_(false),
-      did_then_(false),
-      did_else_(false),
-      did_else_if_(false),
-      did_and_(false),
-      did_or_(false),
-      captured_(false),
-      needs_compare_(true),
-      pending_merge_block_(false),
-      split_edge_merge_block_(NULL),
-      merge_at_join_blocks_(NULL),
-      normal_merge_at_join_block_count_(0),
-      deopt_merge_at_join_block_count_(0) {
+    : needs_compare_(true) {
+  Initialize(builder);
+}
+
+
+HGraphBuilder::IfBuilder::IfBuilder(HGraphBuilder* builder,
+                                    HIfContinuation* continuation)
+    : needs_compare_(false), first_true_block_(NULL), first_false_block_(NULL) {
+  InitializeDontCreateBlocks(builder);
+  continuation->Continue(&first_true_block_, &first_false_block_);
+}
+
+
+void HGraphBuilder::IfBuilder::InitializeDontCreateBlocks(
+    HGraphBuilder* builder) {
+  builder_ = builder;
+  finished_ = false;
+  did_then_ = false;
+  did_else_ = false;
+  did_else_if_ = false;
+  did_and_ = false;
+  did_or_ = false;
+  captured_ = false;
+  pending_merge_block_ = false;
+  split_edge_merge_block_ = NULL;
+  merge_at_join_blocks_ = NULL;
+  normal_merge_at_join_block_count_ = 0;
+  deopt_merge_at_join_block_count_ = 0;
+}
+
+
+void HGraphBuilder::IfBuilder::Initialize(HGraphBuilder* builder) {
+  InitializeDontCreateBlocks(builder);
   HEnvironment* env = builder->environment();
   first_true_block_ = builder->CreateBasicBlock(env->Copy());
   first_false_block_ = builder->CreateBasicBlock(env->Copy());
 }
 
 
-HGraphBuilder::IfBuilder::IfBuilder(
-    HGraphBuilder* builder,
-    HIfContinuation* continuation)
-    : builder_(builder),
-      finished_(false),
-      did_then_(false),
-      did_else_(false),
-      did_else_if_(false),
-      did_and_(false),
-      did_or_(false),
-      captured_(false),
-      needs_compare_(false),
-      pending_merge_block_(false),
-      first_true_block_(NULL),
-      first_false_block_(NULL),
-      split_edge_merge_block_(NULL),
-      merge_at_join_blocks_(NULL),
-      normal_merge_at_join_block_count_(0),
-      deopt_merge_at_join_block_count_(0) {
-  continuation->Continue(&first_true_block_,
-                         &first_false_block_);
-}
-
-
 HControlInstruction* HGraphBuilder::IfBuilder::AddCompare(
     HControlInstruction* compare) {
-  ASSERT(did_then_ == did_else_);
+  DCHECK(did_then_ == did_else_);
   if (did_else_) {
     // Handle if-then-elseif
     did_else_if_ = true;
@@ -801,14 +805,13 @@
     did_or_ = false;
     pending_merge_block_ = false;
     split_edge_merge_block_ = NULL;
-    HEnvironment* env = builder_->environment();
-    first_true_block_ = builder_->CreateBasicBlock(env->Copy());
-    first_false_block_ = builder_->CreateBasicBlock(env->Copy());
+    HEnvironment* env = builder()->environment();
+    first_true_block_ = builder()->CreateBasicBlock(env->Copy());
+    first_false_block_ = builder()->CreateBasicBlock(env->Copy());
   }
   if (split_edge_merge_block_ != NULL) {
     HEnvironment* env = first_false_block_->last_environment();
-    HBasicBlock* split_edge =
-        builder_->CreateBasicBlock(env->Copy());
+    HBasicBlock* split_edge = builder()->CreateBasicBlock(env->Copy());
     if (did_or_) {
       compare->SetSuccessorAt(0, split_edge);
       compare->SetSuccessorAt(1, first_false_block_);
@@ -816,81 +819,80 @@
       compare->SetSuccessorAt(0, first_true_block_);
       compare->SetSuccessorAt(1, split_edge);
     }
-    builder_->GotoNoSimulate(split_edge, split_edge_merge_block_);
+    builder()->GotoNoSimulate(split_edge, split_edge_merge_block_);
   } else {
     compare->SetSuccessorAt(0, first_true_block_);
     compare->SetSuccessorAt(1, first_false_block_);
   }
-  builder_->FinishCurrentBlock(compare);
+  builder()->FinishCurrentBlock(compare);
   needs_compare_ = false;
   return compare;
 }
 
 
 void HGraphBuilder::IfBuilder::Or() {
-  ASSERT(!needs_compare_);
-  ASSERT(!did_and_);
+  DCHECK(!needs_compare_);
+  DCHECK(!did_and_);
   did_or_ = true;
   HEnvironment* env = first_false_block_->last_environment();
   if (split_edge_merge_block_ == NULL) {
-    split_edge_merge_block_ =
-        builder_->CreateBasicBlock(env->Copy());
-    builder_->GotoNoSimulate(first_true_block_, split_edge_merge_block_);
+    split_edge_merge_block_ = builder()->CreateBasicBlock(env->Copy());
+    builder()->GotoNoSimulate(first_true_block_, split_edge_merge_block_);
     first_true_block_ = split_edge_merge_block_;
   }
-  builder_->set_current_block(first_false_block_);
-  first_false_block_ = builder_->CreateBasicBlock(env->Copy());
+  builder()->set_current_block(first_false_block_);
+  first_false_block_ = builder()->CreateBasicBlock(env->Copy());
 }
 
 
 void HGraphBuilder::IfBuilder::And() {
-  ASSERT(!needs_compare_);
-  ASSERT(!did_or_);
+  DCHECK(!needs_compare_);
+  DCHECK(!did_or_);
   did_and_ = true;
   HEnvironment* env = first_false_block_->last_environment();
   if (split_edge_merge_block_ == NULL) {
-    split_edge_merge_block_ = builder_->CreateBasicBlock(env->Copy());
-    builder_->GotoNoSimulate(first_false_block_, split_edge_merge_block_);
+    split_edge_merge_block_ = builder()->CreateBasicBlock(env->Copy());
+    builder()->GotoNoSimulate(first_false_block_, split_edge_merge_block_);
     first_false_block_ = split_edge_merge_block_;
   }
-  builder_->set_current_block(first_true_block_);
-  first_true_block_ = builder_->CreateBasicBlock(env->Copy());
+  builder()->set_current_block(first_true_block_);
+  first_true_block_ = builder()->CreateBasicBlock(env->Copy());
 }
 
 
 void HGraphBuilder::IfBuilder::CaptureContinuation(
     HIfContinuation* continuation) {
-  ASSERT(!did_else_if_);
-  ASSERT(!finished_);
-  ASSERT(!captured_);
+  DCHECK(!did_else_if_);
+  DCHECK(!finished_);
+  DCHECK(!captured_);
 
   HBasicBlock* true_block = NULL;
   HBasicBlock* false_block = NULL;
   Finish(&true_block, &false_block);
-  ASSERT(true_block != NULL);
-  ASSERT(false_block != NULL);
+  DCHECK(true_block != NULL);
+  DCHECK(false_block != NULL);
   continuation->Capture(true_block, false_block);
   captured_ = true;
-  builder_->set_current_block(NULL);
+  builder()->set_current_block(NULL);
   End();
 }
 
 
 void HGraphBuilder::IfBuilder::JoinContinuation(HIfContinuation* continuation) {
-  ASSERT(!did_else_if_);
-  ASSERT(!finished_);
-  ASSERT(!captured_);
+  DCHECK(!did_else_if_);
+  DCHECK(!finished_);
+  DCHECK(!captured_);
   HBasicBlock* true_block = NULL;
   HBasicBlock* false_block = NULL;
   Finish(&true_block, &false_block);
   merge_at_join_blocks_ = NULL;
   if (true_block != NULL && !true_block->IsFinished()) {
-    ASSERT(continuation->IsTrueReachable());
-    builder_->GotoNoSimulate(true_block, continuation->true_branch());
+    DCHECK(continuation->IsTrueReachable());
+    builder()->GotoNoSimulate(true_block, continuation->true_branch());
   }
   if (false_block != NULL && !false_block->IsFinished()) {
-    ASSERT(continuation->IsFalseReachable());
-    builder_->GotoNoSimulate(false_block, continuation->false_branch());
+    DCHECK(continuation->IsFalseReachable());
+    builder()->GotoNoSimulate(false_block, continuation->false_branch());
   }
   captured_ = true;
   End();
@@ -898,75 +900,74 @@
 
 
 void HGraphBuilder::IfBuilder::Then() {
-  ASSERT(!captured_);
-  ASSERT(!finished_);
+  DCHECK(!captured_);
+  DCHECK(!finished_);
   did_then_ = true;
   if (needs_compare_) {
     // Handle if's without any expressions, they jump directly to the "else"
     // branch. However, we must pretend that the "then" branch is reachable,
     // so that the graph builder visits it and sees any live range extending
     // constructs within it.
-    HConstant* constant_false = builder_->graph()->GetConstantFalse();
+    HConstant* constant_false = builder()->graph()->GetConstantFalse();
     ToBooleanStub::Types boolean_type = ToBooleanStub::Types();
     boolean_type.Add(ToBooleanStub::BOOLEAN);
     HBranch* branch = builder()->New<HBranch>(
         constant_false, boolean_type, first_true_block_, first_false_block_);
-    builder_->FinishCurrentBlock(branch);
+    builder()->FinishCurrentBlock(branch);
   }
-  builder_->set_current_block(first_true_block_);
+  builder()->set_current_block(first_true_block_);
   pending_merge_block_ = true;
 }
 
 
 void HGraphBuilder::IfBuilder::Else() {
-  ASSERT(did_then_);
-  ASSERT(!captured_);
-  ASSERT(!finished_);
+  DCHECK(did_then_);
+  DCHECK(!captured_);
+  DCHECK(!finished_);
   AddMergeAtJoinBlock(false);
-  builder_->set_current_block(first_false_block_);
+  builder()->set_current_block(first_false_block_);
   pending_merge_block_ = true;
   did_else_ = true;
 }
 
 
 void HGraphBuilder::IfBuilder::Deopt(const char* reason) {
-  ASSERT(did_then_);
-  builder_->Add<HDeoptimize>(reason, Deoptimizer::EAGER);
+  DCHECK(did_then_);
+  builder()->Add<HDeoptimize>(reason, Deoptimizer::EAGER);
   AddMergeAtJoinBlock(true);
 }
 
 
 void HGraphBuilder::IfBuilder::Return(HValue* value) {
-  HValue* parameter_count = builder_->graph()->GetConstantMinus1();
-  builder_->FinishExitCurrentBlock(
-      builder_->New<HReturn>(value, parameter_count));
+  HValue* parameter_count = builder()->graph()->GetConstantMinus1();
+  builder()->FinishExitCurrentBlock(
+      builder()->New<HReturn>(value, parameter_count));
   AddMergeAtJoinBlock(false);
 }
 
 
 void HGraphBuilder::IfBuilder::AddMergeAtJoinBlock(bool deopt) {
   if (!pending_merge_block_) return;
-  HBasicBlock* block = builder_->current_block();
-  ASSERT(block == NULL || !block->IsFinished());
-  MergeAtJoinBlock* record =
-      new(builder_->zone()) MergeAtJoinBlock(block, deopt,
-                                             merge_at_join_blocks_);
+  HBasicBlock* block = builder()->current_block();
+  DCHECK(block == NULL || !block->IsFinished());
+  MergeAtJoinBlock* record = new (builder()->zone())
+      MergeAtJoinBlock(block, deopt, merge_at_join_blocks_);
   merge_at_join_blocks_ = record;
   if (block != NULL) {
-    ASSERT(block->end() == NULL);
+    DCHECK(block->end() == NULL);
     if (deopt) {
       normal_merge_at_join_block_count_++;
     } else {
       deopt_merge_at_join_block_count_++;
     }
   }
-  builder_->set_current_block(NULL);
+  builder()->set_current_block(NULL);
   pending_merge_block_ = false;
 }
 
 
 void HGraphBuilder::IfBuilder::Finish() {
-  ASSERT(!finished_);
+  DCHECK(!finished_);
   if (!did_then_) {
     Then();
   }
@@ -991,7 +992,7 @@
   if (then_continuation != NULL) {
     *then_continuation = then_record->block_;
   }
-  ASSERT(then_record->next_ == NULL);
+  DCHECK(then_record->next_ == NULL);
 }
 
 
@@ -1001,9 +1002,9 @@
 
   int total_merged_blocks = normal_merge_at_join_block_count_ +
     deopt_merge_at_join_block_count_;
-  ASSERT(total_merged_blocks >= 1);
-  HBasicBlock* merge_block = total_merged_blocks == 1
-      ? NULL : builder_->graph()->CreateBasicBlock();
+  DCHECK(total_merged_blocks >= 1);
+  HBasicBlock* merge_block =
+      total_merged_blocks == 1 ? NULL : builder()->graph()->CreateBasicBlock();
 
   // Merge non-deopt blocks first to ensure environment has right size for
   // padding.
@@ -1014,10 +1015,10 @@
       // if, then just set it as the current block and continue rather then
       // creating an unnecessary merge block.
       if (total_merged_blocks == 1) {
-        builder_->set_current_block(current->block_);
+        builder()->set_current_block(current->block_);
         return;
       }
-      builder_->GotoNoSimulate(current->block_, merge_block);
+      builder()->GotoNoSimulate(current->block_, merge_block);
     }
     current = current->next_;
   }
@@ -1026,44 +1027,48 @@
   current = merge_at_join_blocks_;
   while (current != NULL) {
     if (current->deopt_ && current->block_ != NULL) {
-      current->block_->FinishExit(
-          HAbnormalExit::New(builder_->zone(), NULL),
-          HSourcePosition::Unknown());
+      current->block_->FinishExit(HAbnormalExit::New(builder()->zone(), NULL),
+                                  HSourcePosition::Unknown());
     }
     current = current->next_;
   }
-  builder_->set_current_block(merge_block);
+  builder()->set_current_block(merge_block);
 }
 
 
-HGraphBuilder::LoopBuilder::LoopBuilder(HGraphBuilder* builder,
-                                        HValue* context,
-                                        LoopBuilder::Direction direction)
-    : builder_(builder),
-      context_(context),
-      direction_(direction),
-      finished_(false) {
-  header_block_ = builder->CreateLoopHeaderBlock();
-  body_block_ = NULL;
-  exit_block_ = NULL;
-  exit_trampoline_block_ = NULL;
-  increment_amount_ = builder_->graph()->GetConstant1();
+HGraphBuilder::LoopBuilder::LoopBuilder(HGraphBuilder* builder) {
+  Initialize(builder, NULL, kWhileTrue, NULL);
 }
 
 
-HGraphBuilder::LoopBuilder::LoopBuilder(HGraphBuilder* builder,
-                                        HValue* context,
+HGraphBuilder::LoopBuilder::LoopBuilder(HGraphBuilder* builder, HValue* context,
+                                        LoopBuilder::Direction direction) {
+  Initialize(builder, context, direction, builder->graph()->GetConstant1());
+}
+
+
+HGraphBuilder::LoopBuilder::LoopBuilder(HGraphBuilder* builder, HValue* context,
                                         LoopBuilder::Direction direction,
-                                        HValue* increment_amount)
-    : builder_(builder),
-      context_(context),
-      direction_(direction),
-      finished_(false) {
+                                        HValue* increment_amount) {
+  Initialize(builder, context, direction, increment_amount);
+  increment_amount_ = increment_amount;
+}
+
+
+void HGraphBuilder::LoopBuilder::Initialize(HGraphBuilder* builder,
+                                            HValue* context,
+                                            Direction direction,
+                                            HValue* increment_amount) {
+  builder_ = builder;
+  context_ = context;
+  direction_ = direction;
+  increment_amount_ = increment_amount;
+
+  finished_ = false;
   header_block_ = builder->CreateLoopHeaderBlock();
   body_block_ = NULL;
   exit_block_ = NULL;
   exit_trampoline_block_ = NULL;
-  increment_amount_ = increment_amount;
 }
 
 
@@ -1071,6 +1076,7 @@
     HValue* initial,
     HValue* terminating,
     Token::Value token) {
+  DCHECK(direction_ != kWhileTrue);
   HEnvironment* env = builder_->environment();
   phi_ = header_block_->AddNewPhi(env->values()->length());
   phi_->AddInput(initial);
@@ -1107,12 +1113,26 @@
 }
 
 
+void HGraphBuilder::LoopBuilder::BeginBody(int drop_count) {
+  DCHECK(direction_ == kWhileTrue);
+  HEnvironment* env = builder_->environment();
+  builder_->GotoNoSimulate(header_block_);
+  builder_->set_current_block(header_block_);
+  env->Drop(drop_count);
+}
+
+
 void HGraphBuilder::LoopBuilder::Break() {
   if (exit_trampoline_block_ == NULL) {
     // Its the first time we saw a break.
-    HEnvironment* env = exit_block_->last_environment()->Copy();
-    exit_trampoline_block_ = builder_->CreateBasicBlock(env);
-    builder_->GotoNoSimulate(exit_block_, exit_trampoline_block_);
+    if (direction_ == kWhileTrue) {
+      HEnvironment* env = builder_->environment()->Copy();
+      exit_trampoline_block_ = builder_->CreateBasicBlock(env);
+    } else {
+      HEnvironment* env = exit_block_->last_environment()->Copy();
+      exit_trampoline_block_ = builder_->CreateBasicBlock(env);
+      builder_->GotoNoSimulate(exit_block_, exit_trampoline_block_);
+    }
   }
 
   builder_->GotoNoSimulate(exit_trampoline_block_);
@@ -1121,7 +1141,7 @@
 
 
 void HGraphBuilder::LoopBuilder::EndBody() {
-  ASSERT(!finished_);
+  DCHECK(!finished_);
 
   if (direction_ == kPostIncrement || direction_ == kPostDecrement) {
     if (direction_ == kPostIncrement) {
@@ -1133,8 +1153,11 @@
     builder_->AddInstruction(increment_);
   }
 
-  // Push the new increment value on the expression stack to merge into the phi.
-  builder_->environment()->Push(increment_);
+  if (direction_ != kWhileTrue) {
+    // Push the new increment value on the expression stack to merge into
+    // the phi.
+    builder_->environment()->Push(increment_);
+  }
   HBasicBlock* last_block = builder_->current_block();
   builder_->GotoNoSimulate(last_block, header_block_);
   header_block_->loop_information()->RegisterBackEdge(last_block);
@@ -1160,8 +1183,8 @@
 
 
 HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) {
-  ASSERT(current_block() != NULL);
-  ASSERT(!FLAG_hydrogen_track_positions ||
+  DCHECK(current_block() != NULL);
+  DCHECK(!FLAG_hydrogen_track_positions ||
          !position_.IsUnknown() ||
          !info_->IsOptimizing());
   current_block()->AddInstruction(instr, source_position());
@@ -1173,7 +1196,7 @@
 
 
 void HGraphBuilder::FinishCurrentBlock(HControlInstruction* last) {
-  ASSERT(!FLAG_hydrogen_track_positions ||
+  DCHECK(!FLAG_hydrogen_track_positions ||
          !info_->IsOptimizing() ||
          !position_.IsUnknown());
   current_block()->Finish(last, source_position());
@@ -1184,7 +1207,7 @@
 
 
 void HGraphBuilder::FinishExitCurrentBlock(HControlInstruction* instruction) {
-  ASSERT(!FLAG_hydrogen_track_positions || !info_->IsOptimizing() ||
+  DCHECK(!FLAG_hydrogen_track_positions || !info_->IsOptimizing() ||
          !position_.IsUnknown());
   current_block()->FinishExit(instruction, source_position());
   if (instruction->IsReturn() || instruction->IsAbnormalExit()) {
@@ -1208,8 +1231,8 @@
 
 void HGraphBuilder::AddSimulate(BailoutId id,
                                 RemovableSimulate removable) {
-  ASSERT(current_block() != NULL);
-  ASSERT(!graph()->IsInsideNoSideEffectsScope());
+  DCHECK(current_block() != NULL);
+  DCHECK(!graph()->IsInsideNoSideEffectsScope());
   current_block()->AddNewSimulate(id, source_position(), removable);
 }
 
@@ -1254,7 +1277,7 @@
 
 HValue* HGraphBuilder::BuildCheckString(HValue* string) {
   if (!string->type().IsString()) {
-    ASSERT(!string->IsConstant() ||
+    DCHECK(!string->IsConstant() ||
            !HConstant::cast(string)->HasStringValue());
     BuildCheckHeapObject(string);
     return Add<HCheckInstanceType>(string, HCheckInstanceType::IS_STRING);
@@ -1373,7 +1396,7 @@
                                                 ElementsKind from_kind,
                                                 ElementsKind to_kind,
                                                 bool is_jsarray) {
-  ASSERT(!IsFastHoleyElementsKind(from_kind) ||
+  DCHECK(!IsFastHoleyElementsKind(from_kind) ||
          IsFastHoleyElementsKind(to_kind));
 
   if (AllocationSite::GetMode(from_kind, to_kind) == TRACK_ALLOCATION_SITE) {
@@ -1430,7 +1453,8 @@
                                              mask);
   HValue* sub_result = AddUncasted<HSub>(and_result,
                                          Add<HConstant>(JS_OBJECT_TYPE));
-  Add<HBoundsCheck>(sub_result, Add<HConstant>(0x100 - JS_OBJECT_TYPE));
+  Add<HBoundsCheck>(sub_result,
+                    Add<HConstant>(LAST_JS_OBJECT_TYPE + 1 - JS_OBJECT_TYPE));
 }
 
 
@@ -1506,19 +1530,29 @@
         }
         string_index_if.Else();
         {
-          // Key is a non-index String, check for uniqueness/internalization. If
-          // it's not, deopt.
+          // Key is a non-index String, check for uniqueness/internalization.
+          // If it's not internalized yet, internalize it now.
           HValue* not_internalized_bit = AddUncasted<HBitwise>(
               Token::BIT_AND,
               instance_type,
               Add<HConstant>(static_cast<int>(kIsNotInternalizedMask)));
-          DeoptimizeIf<HCompareNumericAndBranch>(
-              not_internalized_bit,
-              graph()->GetConstant0(),
-              Token::NE,
-              "BuildKeyedIndexCheck: string isn't internalized");
-          // Key guaranteed to be a unqiue string
+
+          IfBuilder internalized(this);
+          internalized.If<HCompareNumericAndBranch>(not_internalized_bit,
+                                                    graph()->GetConstant0(),
+                                                    Token::EQ);
+          internalized.Then();
           Push(key);
+
+          internalized.Else();
+          Add<HPushArguments>(key);
+          HValue* intern_key = Add<HCallRuntime>(
+              isolate()->factory()->empty_string(),
+              Runtime::FunctionForId(Runtime::kInternalizeString), 1);
+          Push(intern_key);
+
+          internalized.End();
+          // Key guaranteed to be a unique string
         }
         string_index_if.JoinContinuation(join_continuation);
       }
@@ -1597,84 +1631,6 @@
 }
 
 
-HValue* HGraphBuilder::BuildUncheckedDictionaryElementLoadHelper(
-    HValue* elements,
-    HValue* key,
-    HValue* hash,
-    HValue* mask,
-    int current_probe) {
-  if (current_probe == kNumberDictionaryProbes) {
-    return NULL;
-  }
-
-  int32_t offset = SeededNumberDictionary::GetProbeOffset(current_probe);
-  HValue* raw_index = (current_probe == 0)
-      ? hash
-      : AddUncasted<HAdd>(hash, Add<HConstant>(offset));
-  raw_index = AddUncasted<HBitwise>(Token::BIT_AND, raw_index, mask);
-  int32_t entry_size = SeededNumberDictionary::kEntrySize;
-  raw_index = AddUncasted<HMul>(raw_index, Add<HConstant>(entry_size));
-  raw_index->ClearFlag(HValue::kCanOverflow);
-
-  int32_t base_offset = SeededNumberDictionary::kElementsStartIndex;
-  HValue* key_index = AddUncasted<HAdd>(raw_index, Add<HConstant>(base_offset));
-  key_index->ClearFlag(HValue::kCanOverflow);
-
-  HValue* candidate_key = Add<HLoadKeyed>(elements, key_index,
-                                          static_cast<HValue*>(NULL),
-                                          FAST_ELEMENTS);
-
-  IfBuilder key_compare(this);
-  key_compare.IfNot<HCompareObjectEqAndBranch>(key, candidate_key);
-  key_compare.Then();
-  {
-    // Key at the current probe doesn't match, try at the next probe.
-    HValue* result = BuildUncheckedDictionaryElementLoadHelper(
-        elements, key, hash, mask, current_probe + 1);
-    if (result == NULL) {
-      key_compare.Deopt("probes exhausted in keyed load dictionary lookup");
-      result = graph()->GetConstantUndefined();
-    } else {
-      Push(result);
-    }
-  }
-  key_compare.Else();
-  {
-    // Key at current probe matches. Details must be zero, otherwise the
-    // dictionary element requires special handling.
-    HValue* details_index = AddUncasted<HAdd>(
-        raw_index, Add<HConstant>(base_offset + 2));
-    details_index->ClearFlag(HValue::kCanOverflow);
-
-    HValue* details = Add<HLoadKeyed>(elements, details_index,
-                                      static_cast<HValue*>(NULL),
-                                      FAST_ELEMENTS);
-    IfBuilder details_compare(this);
-    details_compare.If<HCompareNumericAndBranch>(details,
-                                                 graph()->GetConstant0(),
-                                                 Token::NE);
-    details_compare.ThenDeopt("keyed load dictionary element not fast case");
-
-    details_compare.Else();
-    {
-      // Key matches and details are zero --> fast case. Load and return the
-      // value.
-      HValue* result_index = AddUncasted<HAdd>(
-          raw_index, Add<HConstant>(base_offset + 1));
-      result_index->ClearFlag(HValue::kCanOverflow);
-
-      Push(Add<HLoadKeyed>(elements, result_index,
-                           static_cast<HValue*>(NULL),
-                           FAST_ELEMENTS));
-    }
-    details_compare.End();
-  }
-  key_compare.End();
-
-  return Pop();
-}
-
-
 HValue* HGraphBuilder::BuildElementIndexHash(HValue* index) {
   int32_t seed_value = static_cast<uint32_t>(isolate()->heap()->HashSeed());
   HValue* seed = Add<HConstant>(seed_value);
@@ -1722,8 +1678,129 @@
   mask->ChangeRepresentation(Representation::Integer32());
   mask->ClearFlag(HValue::kCanOverflow);
 
-  return BuildUncheckedDictionaryElementLoadHelper(elements, key,
-                                                   hash, mask, 0);
+  HValue* entry = hash;
+  HValue* count = graph()->GetConstant1();
+  Push(entry);
+  Push(count);
+
+  HIfContinuation return_or_loop_continuation(graph()->CreateBasicBlock(),
+                                              graph()->CreateBasicBlock());
+  HIfContinuation found_key_match_continuation(graph()->CreateBasicBlock(),
+                                               graph()->CreateBasicBlock());
+  LoopBuilder probe_loop(this);
+  probe_loop.BeginBody(2);  // Drop entry, count from last environment to
+                            // appease live range building without simulates.
+
+  count = Pop();
+  entry = Pop();
+  entry = AddUncasted<HBitwise>(Token::BIT_AND, entry, mask);
+  int entry_size = SeededNumberDictionary::kEntrySize;
+  HValue* base_index = AddUncasted<HMul>(entry, Add<HConstant>(entry_size));
+  base_index->ClearFlag(HValue::kCanOverflow);
+  int start_offset = SeededNumberDictionary::kElementsStartIndex;
+  HValue* key_index =
+      AddUncasted<HAdd>(base_index, Add<HConstant>(start_offset));
+  key_index->ClearFlag(HValue::kCanOverflow);
+
+  HValue* candidate_key = Add<HLoadKeyed>(
+      elements, key_index, static_cast<HValue*>(NULL), FAST_ELEMENTS);
+  IfBuilder if_undefined(this);
+  if_undefined.If<HCompareObjectEqAndBranch>(candidate_key,
+                                             graph()->GetConstantUndefined());
+  if_undefined.Then();
+  {
+    // element == undefined means "not found". Call the runtime.
+    // TODO(jkummerow): walk the prototype chain instead.
+    Add<HPushArguments>(receiver, key);
+    Push(Add<HCallRuntime>(isolate()->factory()->empty_string(),
+                           Runtime::FunctionForId(Runtime::kKeyedGetProperty),
+                           2));
+  }
+  if_undefined.Else();
+  {
+    IfBuilder if_match(this);
+    if_match.If<HCompareObjectEqAndBranch>(candidate_key, key);
+    if_match.Then();
+    if_match.Else();
+
+    // Update non-internalized string in the dictionary with internalized key?
+    IfBuilder if_update_with_internalized(this);
+    HValue* smi_check =
+        if_update_with_internalized.IfNot<HIsSmiAndBranch>(candidate_key);
+    if_update_with_internalized.And();
+    HValue* map = AddLoadMap(candidate_key, smi_check);
+    HValue* instance_type = Add<HLoadNamedField>(
+        map, static_cast<HValue*>(NULL), HObjectAccess::ForMapInstanceType());
+    HValue* not_internalized_bit = AddUncasted<HBitwise>(
+        Token::BIT_AND, instance_type,
+        Add<HConstant>(static_cast<int>(kIsNotInternalizedMask)));
+    if_update_with_internalized.If<HCompareNumericAndBranch>(
+        not_internalized_bit, graph()->GetConstant0(), Token::NE);
+    if_update_with_internalized.And();
+    if_update_with_internalized.IfNot<HCompareObjectEqAndBranch>(
+        candidate_key, graph()->GetConstantHole());
+    if_update_with_internalized.AndIf<HStringCompareAndBranch>(candidate_key,
+                                                               key, Token::EQ);
+    if_update_with_internalized.Then();
+    // Replace a key that is a non-internalized string by the equivalent
+    // internalized string for faster further lookups.
+    Add<HStoreKeyed>(elements, key_index, key, FAST_ELEMENTS);
+    if_update_with_internalized.Else();
+
+    if_update_with_internalized.JoinContinuation(&found_key_match_continuation);
+    if_match.JoinContinuation(&found_key_match_continuation);
+
+    IfBuilder found_key_match(this, &found_key_match_continuation);
+    found_key_match.Then();
+    // Key at current probe matches. Relevant bits in the |details| field must
+    // be zero, otherwise the dictionary element requires special handling.
+    HValue* details_index =
+        AddUncasted<HAdd>(base_index, Add<HConstant>(start_offset + 2));
+    details_index->ClearFlag(HValue::kCanOverflow);
+    HValue* details = Add<HLoadKeyed>(
+        elements, details_index, static_cast<HValue*>(NULL), FAST_ELEMENTS);
+    int details_mask = PropertyDetails::TypeField::kMask |
+                       PropertyDetails::DeletedField::kMask;
+    details = AddUncasted<HBitwise>(Token::BIT_AND, details,
+                                    Add<HConstant>(details_mask));
+    IfBuilder details_compare(this);
+    details_compare.If<HCompareNumericAndBranch>(
+        details, graph()->GetConstant0(), Token::EQ);
+    details_compare.Then();
+    HValue* result_index =
+        AddUncasted<HAdd>(base_index, Add<HConstant>(start_offset + 1));
+    result_index->ClearFlag(HValue::kCanOverflow);
+    Push(Add<HLoadKeyed>(elements, result_index, static_cast<HValue*>(NULL),
+                         FAST_ELEMENTS));
+    details_compare.Else();
+    Add<HPushArguments>(receiver, key);
+    Push(Add<HCallRuntime>(isolate()->factory()->empty_string(),
+                           Runtime::FunctionForId(Runtime::kKeyedGetProperty),
+                           2));
+    details_compare.End();
+
+    found_key_match.Else();
+    found_key_match.JoinContinuation(&return_or_loop_continuation);
+  }
+  if_undefined.JoinContinuation(&return_or_loop_continuation);
+
+  IfBuilder return_or_loop(this, &return_or_loop_continuation);
+  return_or_loop.Then();
+  probe_loop.Break();
+
+  return_or_loop.Else();
+  entry = AddUncasted<HAdd>(entry, count);
+  entry->ClearFlag(HValue::kCanOverflow);
+  count = AddUncasted<HAdd>(count, graph()->GetConstant1());
+  count->ClearFlag(HValue::kCanOverflow);
+  Push(entry);
+  Push(count);
+
+  probe_loop.EndBody();
+
+  return_or_loop.End();
+
+  return Pop();
 }
 
 
@@ -1779,9 +1856,11 @@
   HAllocate* elements = BuildAllocateElements(elements_kind, size);
   BuildInitializeElementsHeader(elements, elements_kind, length);
 
-  HConstant* size_in_bytes_upper_bound = EstablishElementsAllocationSize(
-      elements_kind, max_length->Integer32Value());
-  elements->set_size_upper_bound(size_in_bytes_upper_bound);
+  if (!elements->has_size_upper_bound()) {
+    HConstant* size_in_bytes_upper_bound = EstablishElementsAllocationSize(
+        elements_kind, max_length->Integer32Value());
+    elements->set_size_upper_bound(size_in_bytes_upper_bound);
+  }
 
   Add<HStoreNamedField>(
       result, HObjectAccess::ForJSArrayOffset(JSArray::kElementsOffset),
@@ -1932,7 +2011,7 @@
     Add<HPushArguments>(object);
     Push(Add<HCallRuntime>(
             isolate()->factory()->empty_string(),
-            Runtime::FunctionForId(Runtime::kHiddenNumberToStringSkipCache),
+            Runtime::FunctionForId(Runtime::kNumberToStringSkipCache),
             1));
   }
   if_found.End();
@@ -1989,11 +2068,11 @@
   HInstruction* right_instance_type = AddLoadStringInstanceType(right);
 
   // Allocate the cons string object. HAllocate does not care whether we
-  // pass CONS_STRING_TYPE or CONS_ASCII_STRING_TYPE here, so we just use
+  // pass CONS_STRING_TYPE or CONS_ONE_BYTE_STRING_TYPE here, so we just use
   // CONS_STRING_TYPE here. Below we decide whether the cons string is
   // one-byte or two-byte and set the appropriate map.
-  ASSERT(HAllocate::CompatibleInstanceTypes(CONS_STRING_TYPE,
-                                            CONS_ASCII_STRING_TYPE));
+  DCHECK(HAllocate::CompatibleInstanceTypes(CONS_STRING_TYPE,
+                                            CONS_ONE_BYTE_STRING_TYPE));
   HAllocate* result = BuildAllocate(Add<HConstant>(ConsString::kSize),
                                     HType::String(), CONS_STRING_TYPE,
                                     allocation_mode);
@@ -2038,7 +2117,7 @@
     // We can safely skip the write barrier for storing the map here.
     Add<HStoreNamedField>(
         result, HObjectAccess::ForMap(),
-        Add<HConstant>(isolate()->factory()->cons_ascii_string_map()));
+        Add<HConstant>(isolate()->factory()->cons_one_byte_string_map()));
   }
   if_onebyte.Else();
   {
@@ -2070,7 +2149,7 @@
                                             HValue* dst_offset,
                                             String::Encoding dst_encoding,
                                             HValue* length) {
-  ASSERT(dst_encoding != String::ONE_BYTE_ENCODING ||
+  DCHECK(dst_encoding != String::ONE_BYTE_ENCODING ||
          src_encoding == String::ONE_BYTE_ENCODING);
   LoopBuilder loop(this, context(), LoopBuilder::kPostIncrement);
   HValue* index = loop.BeginBody(graph()->GetConstant0(), length, Token::LT);
@@ -2087,7 +2166,7 @@
 
 HValue* HGraphBuilder::BuildObjectSizeAlignment(
     HValue* unaligned_size, int header_size) {
-  ASSERT((header_size & kObjectAlignmentMask) == 0);
+  DCHECK((header_size & kObjectAlignmentMask) == 0);
   HValue* size = AddUncasted<HAdd>(
       unaligned_size, Add<HConstant>(static_cast<int32_t>(
           header_size + kObjectAlignmentMask)));
@@ -2112,14 +2191,14 @@
   // Do some manual constant folding here.
   if (left_length->IsConstant()) {
     HConstant* c_left_length = HConstant::cast(left_length);
-    ASSERT_NE(0, c_left_length->Integer32Value());
+    DCHECK_NE(0, c_left_length->Integer32Value());
     if (c_left_length->Integer32Value() + 1 >= ConsString::kMinLength) {
       // The right string contains at least one character.
       return BuildCreateConsString(length, left, right, allocation_mode);
     }
   } else if (right_length->IsConstant()) {
     HConstant* c_right_length = HConstant::cast(right_length);
-    ASSERT_NE(0, c_right_length->Integer32Value());
+    DCHECK_NE(0, c_right_length->Integer32Value());
     if (c_right_length->Integer32Value() + 1 >= ConsString::kMinLength) {
       // The left string contains at least one character.
       return BuildCreateConsString(length, left, right, allocation_mode);
@@ -2166,8 +2245,8 @@
     {
       HConstant* string_map =
           Add<HConstant>(isolate()->factory()->string_map());
-      HConstant* ascii_string_map =
-          Add<HConstant>(isolate()->factory()->ascii_string_map());
+      HConstant* one_byte_string_map =
+          Add<HConstant>(isolate()->factory()->one_byte_string_map());
 
       // Determine map and size depending on whether result is one-byte string.
       IfBuilder if_onebyte(this);
@@ -2181,7 +2260,7 @@
       {
         // Allocate sequential one-byte string object.
         Push(length);
-        Push(ascii_string_map);
+        Push(one_byte_string_map);
       }
       if_onebyte.Else();
       {
@@ -2201,7 +2280,7 @@
       HValue* size = BuildObjectSizeAlignment(Pop(), SeqString::kHeaderSize);
 
       // Allocate the string object. HAllocate does not care whether we pass
-      // STRING_TYPE or ASCII_STRING_TYPE here, so we just use STRING_TYPE here.
+      // STRING_TYPE or ONE_BYTE_STRING_TYPE here, so we just use STRING_TYPE.
       HAllocate* result = BuildAllocate(
           size, HType::String(), STRING_TYPE, allocation_mode);
       Add<HStoreNamedField>(result, HObjectAccess::ForMap(), map);
@@ -2256,7 +2335,7 @@
       Add<HPushArguments>(left, right);
       Push(Add<HCallRuntime>(
             isolate()->factory()->empty_string(),
-            Runtime::FunctionForId(Runtime::kHiddenStringAdd),
+            Runtime::FunctionForId(Runtime::kStringAdd),
             2));
     }
     if_sameencodingandsequential.End();
@@ -2325,7 +2404,7 @@
     PropertyAccessType access_type,
     LoadKeyedHoleMode load_mode,
     KeyedAccessStoreMode store_mode) {
-  ASSERT((!IsExternalArrayElementsKind(elements_kind) &&
+  DCHECK((!IsExternalArrayElementsKind(elements_kind) &&
               !IsFixedTypedArrayElementsKind(elements_kind)) ||
          !is_js_array);
   // No GVNFlag is necessary for ElementsKind if there is an explicit dependency
@@ -2384,14 +2463,14 @@
       length_checker.End();
       return result;
     } else {
-      ASSERT(store_mode == STANDARD_STORE);
+      DCHECK(store_mode == STANDARD_STORE);
       checked_key = Add<HBoundsCheck>(key, length);
       return AddElementAccess(
           backing_store, checked_key, val,
           checked_object, elements_kind, access_type);
     }
   }
-  ASSERT(fast_smi_only_elements ||
+  DCHECK(fast_smi_only_elements ||
          fast_elements ||
          IsFastDoubleElementsKind(elements_kind));
 
@@ -2598,7 +2677,7 @@
     PropertyAccessType access_type,
     LoadKeyedHoleMode load_mode) {
   if (access_type == STORE) {
-    ASSERT(val != NULL);
+    DCHECK(val != NULL);
     if (elements_kind == EXTERNAL_UINT8_CLAMPED_ELEMENTS ||
         elements_kind == UINT8_CLAMPED_ELEMENTS) {
       val = Add<HClampToUint8>(val);
@@ -2607,8 +2686,8 @@
                             STORE_TO_INITIALIZED_ENTRY);
   }
 
-  ASSERT(access_type == LOAD);
-  ASSERT(val == NULL);
+  DCHECK(access_type == LOAD);
+  DCHECK(val == NULL);
   HLoadKeyed* load = Add<HLoadKeyed>(
       elements, checked_key, dependency, elements_kind, load_mode);
   if (FLAG_opt_safe_uint32_operations &&
@@ -2673,7 +2752,7 @@
                                                  HValue* new_capacity) {
   Add<HBoundsCheck>(new_capacity, Add<HConstant>(
           (Page::kMaxRegularHeapObjectSize - FixedArray::kHeaderSize) >>
-          ElementsKindToShiftSize(kind)));
+          ElementsKindToShiftSize(new_kind)));
 
   HValue* new_elements = BuildAllocateElementsAndInitializeElementsHeader(
       new_kind, new_capacity);
@@ -2997,7 +3076,7 @@
     HValue* previous_object,
     HValue* previous_object_size,
     HValue* allocation_site) {
-  ASSERT(allocation_site != NULL);
+  DCHECK(allocation_site != NULL);
   HInnerAllocatedObject* allocation_memento = Add<HInnerAllocatedObject>(
       previous_object, previous_object_size, HType::HeapObject());
   AddStoreMapConstant(
@@ -3068,7 +3147,7 @@
         kind_(kind),
         allocation_site_payload_(allocation_site_payload),
         constructor_function_(constructor_function) {
-  ASSERT(!allocation_site_payload->IsConstant() ||
+  DCHECK(!allocation_site_payload->IsConstant() ||
          HConstant::cast(allocation_site_payload)->handle(
              builder_->isolate())->IsAllocationSite());
   mode_ = override_mode == DISABLE_ALLOCATION_SITES
@@ -3247,7 +3326,6 @@
       break_scope_(NULL),
       inlined_count_(0),
       globals_(10, info->zone()),
-      inline_bailout_(false),
       osr_(new(info->zone()) HOsrBuilder(this)) {
   // This is not initialized in the initializer list because the
   // constructor for the initial state relies on function_state_ == NULL
@@ -3330,6 +3408,11 @@
 }
 
 
+OStream& operator<<(OStream& os, const HBasicBlock& b) {
+  return os << "B" << b.block_id();
+}
+
+
 HGraph::HGraph(CompilationInfo* info)
     : isolate_(info->isolate()),
       next_block_id_(0),
@@ -3351,10 +3434,10 @@
       next_inline_id_(0),
       inlined_functions_(5, info->zone()) {
   if (info->IsStub()) {
-    HydrogenCodeStub* stub = info->code_stub();
-    CodeStubInterfaceDescriptor* descriptor = stub->GetInterfaceDescriptor();
-    start_environment_ =
-        new(zone_) HEnvironment(zone_, descriptor->environment_length());
+    CallInterfaceDescriptor descriptor =
+        info->code_stub()->GetCallInterfaceDescriptor();
+    start_environment_ = new (zone_)
+        HEnvironment(zone_, descriptor.GetEnvironmentParameterCount());
   } else {
     TraceInlinedFunction(info->shared_info(), HSourcePosition::Unknown());
     start_environment_ =
@@ -3375,7 +3458,7 @@
 
 void HGraph::FinalizeUniqueness() {
   DisallowHeapAllocation no_gc;
-  ASSERT(!OptimizingCompilerThread::IsOptimizerThread(isolate()));
+  DCHECK(!OptimizingCompilerThread::IsOptimizerThread(isolate()));
   for (int i = 0; i < blocks()->length(); ++i) {
     for (HInstructionIterator it(blocks()->at(i)); !it.Done(); it.Advance()) {
       it.Current()->FinalizeUniqueness();
@@ -3404,13 +3487,10 @@
     if (!shared->script()->IsUndefined()) {
       Handle<Script> script(Script::cast(shared->script()));
       if (!script->source()->IsUndefined()) {
-        CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
-        PrintF(tracing_scope.file(),
-               "--- FUNCTION SOURCE (%s) id{%d,%d} ---\n",
-               shared->DebugName()->ToCString().get(),
-               info()->optimization_id(),
-               id);
-
+        CodeTracer::Scope tracing_scopex(isolate()->GetCodeTracer());
+        OFStream os(tracing_scopex.file());
+        os << "--- FUNCTION SOURCE (" << shared->DebugName()->ToCString().get()
+           << ") id{" << info()->optimization_id() << "," << id << "} ---\n";
         {
           ConsStringIteratorOp op;
           StringCharacterStream stream(String::cast(script->source()),
@@ -3422,12 +3502,12 @@
               shared->end_position() - shared->start_position() + 1;
           for (int i = 0; i < source_len; i++) {
             if (stream.HasMore()) {
-              PrintF(tracing_scope.file(), "%c", stream.GetNext());
+              os << AsReversiblyEscapedUC16(stream.GetNext());
             }
           }
         }
 
-        PrintF(tracing_scope.file(), "\n--- END ---\n");
+        os << "\n--- END ---\n";
       }
     }
   }
@@ -3436,13 +3516,10 @@
 
   if (inline_id != 0) {
     CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
-    PrintF(tracing_scope.file(), "INLINE (%s) id{%d,%d} AS %d AT ",
-           shared->DebugName()->ToCString().get(),
-           info()->optimization_id(),
-           id,
-           inline_id);
-    position.PrintTo(tracing_scope.file());
-    PrintF(tracing_scope.file(), "\n");
+    OFStream os(tracing_scope.file());
+    os << "INLINE (" << shared->DebugName()->ToCString().get() << ") id{"
+       << info()->optimization_id() << "," << id << "} AS " << inline_id
+       << " AT " << position << endl;
   }
 
   return inline_id;
@@ -3568,7 +3645,7 @@
         return result->SetupLoopMembers(zone, block, block->loop_information(),
                                         loop_header);
       } else {
-        ASSERT(block->IsFinished());
+        DCHECK(block->IsFinished());
         kind_ = SUCCESSORS;
         loop_header_ = loop_header;
         InitializeSuccessors();
@@ -3610,10 +3687,10 @@
   }
 
   void ClosePostorder(ZoneList<HBasicBlock*>* order, Zone* zone) {
-    ASSERT(block_->end()->FirstSuccessor() == NULL ||
+    DCHECK(block_->end()->FirstSuccessor() == NULL ||
            order->Contains(block_->end()->FirstSuccessor()) ||
            block_->end()->FirstSuccessor()->IsLoopHeader());
-    ASSERT(block_->end()->SecondSuccessor() == NULL ||
+    DCHECK(block_->end()->SecondSuccessor() == NULL ||
            order->Contains(block_->end()->SecondSuccessor()) ||
            block_->end()->SecondSuccessor()->IsLoopHeader());
     order->Add(block_, zone);
@@ -3751,7 +3828,7 @@
 #ifdef DEBUG
   // Initially the blocks must not be ordered.
   for (int i = 0; i < blocks_.length(); ++i) {
-    ASSERT(!blocks_[i]->IsOrdered());
+    DCHECK(!blocks_[i]->IsOrdered());
   }
 #endif
 
@@ -3765,7 +3842,7 @@
 #ifdef DEBUG
   // Now all blocks must be marked as ordered.
   for (int i = 0; i < blocks_.length(); ++i) {
-    ASSERT(blocks_[i]->IsOrdered());
+    DCHECK(blocks_[i]->IsOrdered());
   }
 #endif
 
@@ -3912,7 +3989,7 @@
       for_typeof_(false) {
   owner->set_ast_context(this);  // Push.
 #ifdef DEBUG
-  ASSERT(owner->environment()->frame_type() == JS_FUNCTION);
+  DCHECK(owner->environment()->frame_type() == JS_FUNCTION);
   original_length_ = owner->environment()->length();
 #endif
 }
@@ -3924,7 +4001,7 @@
 
 
 EffectContext::~EffectContext() {
-  ASSERT(owner()->HasStackOverflow() ||
+  DCHECK(owner()->HasStackOverflow() ||
          owner()->current_block() == NULL ||
          (owner()->environment()->length() == original_length_ &&
           owner()->environment()->frame_type() == JS_FUNCTION));
@@ -3932,7 +4009,7 @@
 
 
 ValueContext::~ValueContext() {
-  ASSERT(owner()->HasStackOverflow() ||
+  DCHECK(owner()->HasStackOverflow() ||
          owner()->current_block() == NULL ||
          (owner()->environment()->length() == original_length_ + 1 &&
           owner()->environment()->frame_type() == JS_FUNCTION));
@@ -3960,7 +4037,7 @@
 
 
 void EffectContext::ReturnInstruction(HInstruction* instr, BailoutId ast_id) {
-  ASSERT(!instr->IsControlInstruction());
+  DCHECK(!instr->IsControlInstruction());
   owner()->AddInstruction(instr);
   if (instr->HasObservableSideEffects()) {
     owner()->Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
@@ -3970,7 +4047,7 @@
 
 void EffectContext::ReturnControl(HControlInstruction* instr,
                                   BailoutId ast_id) {
-  ASSERT(!instr->HasObservableSideEffects());
+  DCHECK(!instr->HasObservableSideEffects());
   HBasicBlock* empty_true = owner()->graph()->CreateBasicBlock();
   HBasicBlock* empty_false = owner()->graph()->CreateBasicBlock();
   instr->SetSuccessorAt(0, empty_true);
@@ -3998,7 +4075,7 @@
 
 
 void ValueContext::ReturnInstruction(HInstruction* instr, BailoutId ast_id) {
-  ASSERT(!instr->IsControlInstruction());
+  DCHECK(!instr->IsControlInstruction());
   if (!arguments_allowed() && instr->CheckFlag(HValue::kIsArguments)) {
     return owner()->Bailout(kBadValueContextForArgumentsObjectValue);
   }
@@ -4011,7 +4088,7 @@
 
 
 void ValueContext::ReturnControl(HControlInstruction* instr, BailoutId ast_id) {
-  ASSERT(!instr->HasObservableSideEffects());
+  DCHECK(!instr->HasObservableSideEffects());
   if (!arguments_allowed() && instr->CheckFlag(HValue::kIsArguments)) {
     return owner()->Bailout(kBadValueContextForArgumentsObjectValue);
   }
@@ -4054,7 +4131,7 @@
 
 
 void TestContext::ReturnInstruction(HInstruction* instr, BailoutId ast_id) {
-  ASSERT(!instr->IsControlInstruction());
+  DCHECK(!instr->IsControlInstruction());
   HOptimizedGraphBuilder* builder = owner();
   builder->AddInstruction(instr);
   // We expect a simulate after every expression with side effects, though
@@ -4069,7 +4146,7 @@
 
 
 void TestContext::ReturnControl(HControlInstruction* instr, BailoutId ast_id) {
-  ASSERT(!instr->HasObservableSideEffects());
+  DCHECK(!instr->HasObservableSideEffects());
   HBasicBlock* empty_true = owner()->graph()->CreateBasicBlock();
   HBasicBlock* empty_false = owner()->graph()->CreateBasicBlock();
   instr->SetSuccessorAt(0, empty_true);
@@ -4133,7 +4210,7 @@
 
 
 void HOptimizedGraphBuilder::Bailout(BailoutReason reason) {
-  current_info()->set_bailout_reason(reason);
+  current_info()->AbortOptimization(reason);
   SetStackOverflow();
 }
 
@@ -4234,7 +4311,7 @@
   // due to missing/inadequate type feedback, but rather too aggressive
   // optimization. Disable optimistic LICM in that case.
   Handle<Code> unoptimized_code(current_info()->shared_info()->code());
-  ASSERT(unoptimized_code->kind() == Code::FUNCTION);
+  DCHECK(unoptimized_code->kind() == Code::FUNCTION);
   Handle<TypeFeedbackInfo> type_info(
       TypeFeedbackInfo::cast(unoptimized_code->type_feedback_info()));
   int checksum = type_info->own_type_change_checksum();
@@ -4348,7 +4425,7 @@
 #ifdef DEBUG
     for (int i = 0; i < block->phis()->length(); i++) {
       HPhi* phi = block->phis()->at(i);
-      ASSERT(phi->ActualValue() == phi);
+      DCHECK(phi->ActualValue() == phi);
     }
 #endif
 
@@ -4361,7 +4438,7 @@
         // instructions.
         instruction->DeleteAndReplaceWith(instruction->ActualValue());
       } else {
-        ASSERT(instruction->IsInformativeDefinition());
+        DCHECK(instruction->IsInformativeDefinition());
         if (instruction->IsPurelyInformativeDefinition()) {
           instruction->DeleteAndReplaceWith(instruction->RedefinedOperand());
         } else {
@@ -4401,7 +4478,7 @@
 
   // Create an arguments object containing the initial parameters.  Set the
   // initial values of parameters including "this" having parameter index 0.
-  ASSERT_EQ(scope->num_parameters() + 1, environment()->parameter_count());
+  DCHECK_EQ(scope->num_parameters() + 1, environment()->parameter_count());
   HArgumentsObject* arguments_object =
       New<HArgumentsObject>(environment()->parameter_count());
   for (int i = 0; i < environment()->parameter_count(); ++i) {
@@ -4433,6 +4510,11 @@
 }
 
 
+Type* HOptimizedGraphBuilder::ToType(Handle<Map> map) {
+  return IC::MapToType<Type>(map, zone());
+}
+
+
 void HOptimizedGraphBuilder::VisitStatements(ZoneList<Statement*>* statements) {
   for (int i = 0; i < statements->length(); i++) {
     Statement* stmt = statements->at(i);
@@ -4443,9 +4525,9 @@
 
 
 void HOptimizedGraphBuilder::VisitBlock(Block* stmt) {
-  ASSERT(!HasStackOverflow());
-  ASSERT(current_block() != NULL);
-  ASSERT(current_block()->HasPredecessor());
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
 
   Scope* outer_scope = scope();
   Scope* scope = stmt->scope();
@@ -4503,24 +4585,24 @@
 
 void HOptimizedGraphBuilder::VisitExpressionStatement(
     ExpressionStatement* stmt) {
-  ASSERT(!HasStackOverflow());
-  ASSERT(current_block() != NULL);
-  ASSERT(current_block()->HasPredecessor());
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
   VisitForEffect(stmt->expression());
 }
 
 
 void HOptimizedGraphBuilder::VisitEmptyStatement(EmptyStatement* stmt) {
-  ASSERT(!HasStackOverflow());
-  ASSERT(current_block() != NULL);
-  ASSERT(current_block()->HasPredecessor());
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
 }
 
 
 void HOptimizedGraphBuilder::VisitIfStatement(IfStatement* stmt) {
-  ASSERT(!HasStackOverflow());
-  ASSERT(current_block() != NULL);
-  ASSERT(current_block()->HasPredecessor());
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
   if (stmt->condition()->ToBooleanIsTrue()) {
     Add<HSimulate>(stmt->ThenId());
     Visit(stmt->then_statement());
@@ -4567,7 +4649,7 @@
     *drop_extra += current->info()->drop_extra();
     current = current->next();
   }
-  ASSERT(current != NULL);  // Always found (unless stack is malformed).
+  DCHECK(current != NULL);  // Always found (unless stack is malformed).
   *scope = current->info()->scope();
 
   if (type == BREAK) {
@@ -4599,9 +4681,9 @@
 
 void HOptimizedGraphBuilder::VisitContinueStatement(
     ContinueStatement* stmt) {
-  ASSERT(!HasStackOverflow());
-  ASSERT(current_block() != NULL);
-  ASSERT(current_block()->HasPredecessor());
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
   Scope* outer_scope = NULL;
   Scope* inner_scope = scope();
   int drop_extra = 0;
@@ -4631,9 +4713,9 @@
 
 
 void HOptimizedGraphBuilder::VisitBreakStatement(BreakStatement* stmt) {
-  ASSERT(!HasStackOverflow());
-  ASSERT(current_block() != NULL);
-  ASSERT(current_block()->HasPredecessor());
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
   Scope* outer_scope = NULL;
   Scope* inner_scope = scope();
   int drop_extra = 0;
@@ -4662,9 +4744,9 @@
 
 
 void HOptimizedGraphBuilder::VisitReturnStatement(ReturnStatement* stmt) {
-  ASSERT(!HasStackOverflow());
-  ASSERT(current_block() != NULL);
-  ASSERT(current_block()->HasPredecessor());
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
   FunctionState* state = function_state();
   AstContext* context = call_context();
   if (context == NULL) {
@@ -4684,7 +4766,7 @@
       CHECK_ALIVE(VisitForEffect(stmt->expression()));
       Goto(function_return(), state);
     } else {
-      ASSERT(context->IsValue());
+      DCHECK(context->IsValue());
       CHECK_ALIVE(VisitForValue(stmt->expression()));
       HValue* return_value = Pop();
       HValue* receiver = environment()->arguments_environment()->Lookup(0);
@@ -4710,7 +4792,7 @@
     } else if (context->IsEffect()) {
       Goto(function_return(), state);
     } else {
-      ASSERT(context->IsValue());
+      DCHECK(context->IsValue());
       HValue* rhs = environment()->arguments_environment()->Lookup(1);
       AddLeaveInlined(rhs, state);
     }
@@ -4729,7 +4811,7 @@
       Pop();
       Goto(function_return(), state);
     } else {
-      ASSERT(context->IsValue());
+      DCHECK(context->IsValue());
       CHECK_ALIVE(VisitForValue(stmt->expression()));
       AddLeaveInlined(Pop(), state);
     }
@@ -4739,26 +4821,21 @@
 
 
 void HOptimizedGraphBuilder::VisitWithStatement(WithStatement* stmt) {
-  ASSERT(!HasStackOverflow());
-  ASSERT(current_block() != NULL);
-  ASSERT(current_block()->HasPredecessor());
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
   return Bailout(kWithStatement);
 }
 
 
 void HOptimizedGraphBuilder::VisitSwitchStatement(SwitchStatement* stmt) {
-  ASSERT(!HasStackOverflow());
-  ASSERT(current_block() != NULL);
-  ASSERT(current_block()->HasPredecessor());
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
 
-  // We only optimize switch statements with a bounded number of clauses.
-  const int kCaseClauseLimit = 128;
   ZoneList<CaseClause*>* clauses = stmt->cases();
   int clause_count = clauses->length();
   ZoneList<HBasicBlock*> body_blocks(clause_count, zone());
-  if (clause_count > kCaseClauseLimit) {
-    return Bailout(kSwitchStatementTooManyClauses);
-  }
 
   CHECK_ALIVE(VisitForValue(stmt->tag()));
   Add<HSimulate>(stmt->EntryId());
@@ -4861,17 +4938,17 @@
   Add<HSimulate>(stmt->StackCheckId());
   HStackCheck* stack_check =
       HStackCheck::cast(Add<HStackCheck>(HStackCheck::kBackwardsBranch));
-  ASSERT(loop_entry->IsLoopHeader());
+  DCHECK(loop_entry->IsLoopHeader());
   loop_entry->loop_information()->set_stack_check(stack_check);
   CHECK_BAILOUT(Visit(stmt->body()));
 }
 
 
 void HOptimizedGraphBuilder::VisitDoWhileStatement(DoWhileStatement* stmt) {
-  ASSERT(!HasStackOverflow());
-  ASSERT(current_block() != NULL);
-  ASSERT(current_block()->HasPredecessor());
-  ASSERT(current_block() != NULL);
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
+  DCHECK(current_block() != NULL);
   HBasicBlock* loop_entry = BuildLoopEntry(stmt);
 
   BreakAndContinueInfo break_info(stmt, scope());
@@ -4916,10 +4993,10 @@
 
 
 void HOptimizedGraphBuilder::VisitWhileStatement(WhileStatement* stmt) {
-  ASSERT(!HasStackOverflow());
-  ASSERT(current_block() != NULL);
-  ASSERT(current_block()->HasPredecessor());
-  ASSERT(current_block() != NULL);
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
+  DCHECK(current_block() != NULL);
   HBasicBlock* loop_entry = BuildLoopEntry(stmt);
 
   // If the condition is constant true, do not generate a branch.
@@ -4956,13 +5033,13 @@
 
 
 void HOptimizedGraphBuilder::VisitForStatement(ForStatement* stmt) {
-  ASSERT(!HasStackOverflow());
-  ASSERT(current_block() != NULL);
-  ASSERT(current_block()->HasPredecessor());
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
   if (stmt->init() != NULL) {
     CHECK_ALIVE(Visit(stmt->init()));
   }
-  ASSERT(current_block() != NULL);
+  DCHECK(current_block() != NULL);
   HBasicBlock* loop_entry = BuildLoopEntry(stmt);
 
   HBasicBlock* loop_successor = NULL;
@@ -5005,9 +5082,9 @@
 
 
 void HOptimizedGraphBuilder::VisitForInStatement(ForInStatement* stmt) {
-  ASSERT(!HasStackOverflow());
-  ASSERT(current_block() != NULL);
-  ASSERT(current_block()->HasPredecessor());
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
 
   if (!FLAG_optimize_for_in) {
     return Bailout(kForInStatementOptimizationIsDisabled);
@@ -5111,34 +5188,34 @@
 
 
 void HOptimizedGraphBuilder::VisitForOfStatement(ForOfStatement* stmt) {
-  ASSERT(!HasStackOverflow());
-  ASSERT(current_block() != NULL);
-  ASSERT(current_block()->HasPredecessor());
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
   return Bailout(kForOfStatement);
 }
 
 
 void HOptimizedGraphBuilder::VisitTryCatchStatement(TryCatchStatement* stmt) {
-  ASSERT(!HasStackOverflow());
-  ASSERT(current_block() != NULL);
-  ASSERT(current_block()->HasPredecessor());
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
   return Bailout(kTryCatchStatement);
 }
 
 
 void HOptimizedGraphBuilder::VisitTryFinallyStatement(
     TryFinallyStatement* stmt) {
-  ASSERT(!HasStackOverflow());
-  ASSERT(current_block() != NULL);
-  ASSERT(current_block()->HasPredecessor());
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
   return Bailout(kTryFinallyStatement);
 }
 
 
 void HOptimizedGraphBuilder::VisitDebuggerStatement(DebuggerStatement* stmt) {
-  ASSERT(!HasStackOverflow());
-  ASSERT(current_block() != NULL);
-  ASSERT(current_block()->HasPredecessor());
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
   return Bailout(kDebuggerStatement);
 }
 
@@ -5149,12 +5226,13 @@
 
 
 void HOptimizedGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
-  ASSERT(!HasStackOverflow());
-  ASSERT(current_block() != NULL);
-  ASSERT(current_block()->HasPredecessor());
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
   Handle<SharedFunctionInfo> shared_info = expr->shared_info();
   if (shared_info.is_null()) {
-    shared_info = Compiler::BuildFunctionInfo(expr, current_info()->script());
+    shared_info =
+        Compiler::BuildFunctionInfo(expr, current_info()->script(), top_info());
   }
   // We also have a stack overflow if the recursive compilation did.
   if (HasStackOverflow()) return;
@@ -5164,19 +5242,27 @@
 }
 
 
+void HOptimizedGraphBuilder::VisitClassLiteral(ClassLiteral* lit) {
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
+  return Bailout(kClassLiteral);
+}
+
+
 void HOptimizedGraphBuilder::VisitNativeFunctionLiteral(
     NativeFunctionLiteral* expr) {
-  ASSERT(!HasStackOverflow());
-  ASSERT(current_block() != NULL);
-  ASSERT(current_block()->HasPredecessor());
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
   return Bailout(kNativeFunctionLiteral);
 }
 
 
 void HOptimizedGraphBuilder::VisitConditional(Conditional* expr) {
-  ASSERT(!HasStackOverflow());
-  ASSERT(current_block() != NULL);
-  ASSERT(current_block()->HasPredecessor());
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
   HBasicBlock* cond_true = graph()->CreateBasicBlock();
   HBasicBlock* cond_false = graph()->CreateBasicBlock();
   CHECK_BAILOUT(VisitForControl(expr->condition(), cond_true, cond_false));
@@ -5212,25 +5298,32 @@
 
 
 HOptimizedGraphBuilder::GlobalPropertyAccess
-    HOptimizedGraphBuilder::LookupGlobalProperty(
-        Variable* var, LookupResult* lookup, PropertyAccessType access_type) {
+HOptimizedGraphBuilder::LookupGlobalProperty(Variable* var, LookupIterator* it,
+                                             PropertyAccessType access_type) {
   if (var->is_this() || !current_info()->has_global_object()) {
     return kUseGeneric;
   }
-  Handle<GlobalObject> global(current_info()->global_object());
-  global->Lookup(var->name(), lookup);
-  if (!lookup->IsNormal() ||
-      (access_type == STORE && lookup->IsReadOnly()) ||
-      lookup->holder() != *global) {
-    return kUseGeneric;
-  }
 
-  return kUseCell;
+  switch (it->state()) {
+    case LookupIterator::ACCESSOR:
+    case LookupIterator::ACCESS_CHECK:
+    case LookupIterator::INTERCEPTOR:
+    case LookupIterator::NOT_FOUND:
+      return kUseGeneric;
+    case LookupIterator::DATA:
+      if (access_type == STORE && it->IsReadOnly()) return kUseGeneric;
+      return kUseCell;
+    case LookupIterator::JSPROXY:
+    case LookupIterator::TRANSITION:
+      UNREACHABLE();
+  }
+  UNREACHABLE();
+  return kUseGeneric;
 }
 
 
 HValue* HOptimizedGraphBuilder::BuildContextChainWalk(Variable* var) {
-  ASSERT(var->IsContextSlot());
+  DCHECK(var->IsContextSlot());
   HValue* context = environment()->context();
   int length = scope()->ContextChainLength(var->scope());
   while (length-- > 0) {
@@ -5247,14 +5340,14 @@
     current_info()->set_this_has_uses(true);
   }
 
-  ASSERT(!HasStackOverflow());
-  ASSERT(current_block() != NULL);
-  ASSERT(current_block()->HasPredecessor());
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
   Variable* variable = expr->var();
   switch (variable->location()) {
     case Variable::UNALLOCATED: {
       if (IsLexicalVariableMode(variable->mode())) {
-        // TODO(rossberg): should this be an ASSERT?
+        // TODO(rossberg): should this be an DCHECK?
         return Bailout(kReferenceToGlobalLexicalVariable);
       }
       // Handle known global constants like 'undefined' specially to avoid a
@@ -5266,17 +5359,13 @@
         return ast_context()->ReturnInstruction(instr, expr->id());
       }
 
-      LookupResult lookup(isolate());
-      GlobalPropertyAccess type = LookupGlobalProperty(variable, &lookup, LOAD);
-
-      if (type == kUseCell &&
-          current_info()->global_object()->IsAccessCheckNeeded()) {
-        type = kUseGeneric;
-      }
+      Handle<GlobalObject> global(current_info()->global_object());
+      LookupIterator it(global, variable->name(),
+                        LookupIterator::OWN_SKIP_INTERCEPTOR);
+      GlobalPropertyAccess type = LookupGlobalProperty(variable, &it, LOAD);
 
       if (type == kUseCell) {
-        Handle<GlobalObject> global(current_info()->global_object());
-        Handle<PropertyCell> cell(global->GetPropertyCell(&lookup));
+        Handle<PropertyCell> cell = it.GetPropertyCell();
         if (cell->type()->IsConstant()) {
           PropertyCell::AddDependentCompilationInfo(cell, top_info());
           Handle<Object> constant_object = cell->type()->AsConstant()->Value();
@@ -5288,7 +5377,7 @@
           return ast_context()->ReturnInstruction(constant, expr->id());
         } else {
           HLoadGlobalCell* instr =
-              New<HLoadGlobalCell>(cell, lookup.GetPropertyDetails());
+              New<HLoadGlobalCell>(cell, it.property_details());
           return ast_context()->ReturnInstruction(instr, expr->id());
         }
       } else {
@@ -5299,6 +5388,13 @@
             New<HLoadGlobalGeneric>(global_object,
                                     variable->name(),
                                     ast_context()->is_for_typeof());
+        if (FLAG_vector_ics) {
+          Handle<SharedFunctionInfo> current_shared =
+              function_state()->compilation_info()->shared_info();
+          instr->SetVectorAndSlot(
+              handle(current_shared->feedback_vector(), isolate()),
+              expr->VariableFeedbackSlot());
+        }
         return ast_context()->ReturnInstruction(instr, expr->id());
       }
     }
@@ -5307,7 +5403,7 @@
     case Variable::LOCAL: {
       HValue* value = LookupAndMakeLive(variable);
       if (value == graph()->GetConstantHole()) {
-        ASSERT(IsDeclaredVariableMode(variable->mode()) &&
+        DCHECK(IsDeclaredVariableMode(variable->mode()) &&
                variable->mode() != VAR);
         return Bailout(kReferenceToUninitializedVariable);
       }
@@ -5341,18 +5437,18 @@
 
 
 void HOptimizedGraphBuilder::VisitLiteral(Literal* expr) {
-  ASSERT(!HasStackOverflow());
-  ASSERT(current_block() != NULL);
-  ASSERT(current_block()->HasPredecessor());
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
   HConstant* instr = New<HConstant>(expr->value());
   return ast_context()->ReturnInstruction(instr, expr->id());
 }
 
 
 void HOptimizedGraphBuilder::VisitRegExpLiteral(RegExpLiteral* expr) {
-  ASSERT(!HasStackOverflow());
-  ASSERT(current_block() != NULL);
-  ASSERT(current_block()->HasPredecessor());
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
   Handle<JSFunction> closure = function_state()->compilation_info()->closure();
   Handle<FixedArray> literals(closure->literals());
   HRegExpLiteral* instr = New<HRegExpLiteral>(literals,
@@ -5384,7 +5480,7 @@
     return false;
   }
 
-  ASSERT(max_depth >= 0 && *max_properties >= 0);
+  DCHECK(max_depth >= 0 && *max_properties >= 0);
   if (max_depth == 0) return false;
 
   Isolate* isolate = boilerplate->GetIsolate();
@@ -5439,9 +5535,9 @@
 
 
 void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
-  ASSERT(!HasStackOverflow());
-  ASSERT(current_block() != NULL);
-  ASSERT(current_block()->HasPredecessor());
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
   expr->BuildConstantProperties(isolate());
   Handle<JSFunction> closure = function_state()->compilation_info()->closure();
   HInstruction* literal;
@@ -5483,7 +5579,7 @@
     // TODO(mvstanton): Add a flag to turn off creation of any
     // AllocationMementos for this call: we are in crankshaft and should have
     // learned enough about transition behavior to stop emitting mementos.
-    Runtime::FunctionId function_id = Runtime::kHiddenCreateObjectLiteral;
+    Runtime::FunctionId function_id = Runtime::kCreateObjectLiteral;
     literal = Add<HCallRuntime>(isolate()->factory()->empty_string(),
                                 Runtime::FunctionForId(function_id),
                                 4);
@@ -5504,7 +5600,7 @@
 
     switch (property->kind()) {
       case ObjectLiteral::Property::MATERIALIZED_LITERAL:
-        ASSERT(!CompileTimeValue::IsCompileTimeValue(value));
+        DCHECK(!CompileTimeValue::IsCompileTimeValue(value));
         // Fall through.
       case ObjectLiteral::Property::COMPUTED:
         if (key->value()->IsInternalizedString()) {
@@ -5517,18 +5613,18 @@
             if (map.is_null()) {
               // If we don't know the monomorphic type, do a generic store.
               CHECK_ALIVE(store = BuildNamedGeneric(
-                  STORE, literal, name, value));
+                  STORE, NULL, literal, name, value));
             } else {
               PropertyAccessInfo info(this, STORE, ToType(map), name);
               if (info.CanAccessMonomorphic()) {
                 HValue* checked_literal = Add<HCheckMaps>(literal, map);
-                ASSERT(!info.lookup()->IsPropertyCallbacks());
+                DCHECK(!info.IsAccessor());
                 store = BuildMonomorphicAccess(
                     &info, literal, checked_literal, value,
                     BailoutId::None(), BailoutId::None());
               } else {
                 CHECK_ALIVE(store = BuildNamedGeneric(
-                    STORE, literal, name, value));
+                    STORE, NULL, literal, name, value));
               }
             }
             AddInstruction(store);
@@ -5564,9 +5660,9 @@
 
 
 void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
-  ASSERT(!HasStackOverflow());
-  ASSERT(current_block() != NULL);
-  ASSERT(current_block()->HasPredecessor());
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
   expr->BuildConstantElements(isolate());
   ZoneList<Expression*>* subexprs = expr->values();
   int length = subexprs->length();
@@ -5601,14 +5697,14 @@
       isolate()->counters()->cow_arrays_created_runtime()->Increment();
     }
   } else {
-    ASSERT(literals_cell->IsAllocationSite());
+    DCHECK(literals_cell->IsAllocationSite());
     site = Handle<AllocationSite>::cast(literals_cell);
     boilerplate_object = Handle<JSObject>(
         JSObject::cast(site->transition_info()), isolate());
   }
 
-  ASSERT(!boilerplate_object.is_null());
-  ASSERT(site->SitePointsToLiteral());
+  DCHECK(!boilerplate_object.is_null());
+  DCHECK(site->SitePointsToLiteral());
 
   ElementsKind boilerplate_elements_kind =
       boilerplate_object->GetElementsKind();
@@ -5641,7 +5737,7 @@
     // TODO(mvstanton): Consider a flag to turn off creation of any
     // AllocationMementos for this call: we are in crankshaft and should have
     // learned enough about transition behavior to stop emitting mementos.
-    Runtime::FunctionId function_id = Runtime::kHiddenCreateArrayLiteral;
+    Runtime::FunctionId function_id = Runtime::kCreateArrayLiteral;
     literal = Add<HCallRuntime>(isolate()->factory()->empty_string(),
                                 Runtime::FunctionForId(function_id),
                                 4);
@@ -5709,18 +5805,15 @@
     PropertyAccessInfo* info,
     HValue* checked_object) {
   // See if this is a load for an immutable property
-  if (checked_object->ActualValue()->IsConstant() &&
-      info->lookup()->IsCacheable() &&
-      info->lookup()->IsReadOnly() && info->lookup()->IsDontDelete()) {
+  if (checked_object->ActualValue()->IsConstant()) {
     Handle<Object> object(
         HConstant::cast(checked_object->ActualValue())->handle(isolate()));
 
     if (object->IsJSObject()) {
-      LookupResult lookup(isolate());
-      Handle<JSObject>::cast(object)->Lookup(info->name(), &lookup);
-      Handle<Object> value(lookup.GetLazyValue(), isolate());
-
-      if (!value->IsTheHole()) {
+      LookupIterator it(object, info->name(),
+                        LookupIterator::OWN_SKIP_INTERCEPTOR);
+      Handle<Object> value = JSObject::GetDataProperty(&it);
+      if (it.IsFound() && it.IsReadOnly() && !it.IsConfigurable()) {
         return New<HConstant>(value);
       }
     }
@@ -5754,7 +5847,7 @@
     PropertyAccessInfo* info,
     HValue* checked_object,
     HValue* value) {
-  bool transition_to_field = info->lookup()->IsTransition();
+  bool transition_to_field = info->IsTransition();
   // TODO(verwaest): Move this logic into PropertyAccessInfo.
   HObjectAccess field_access = info->access();
 
@@ -5771,8 +5864,9 @@
       HInstruction* heap_number = Add<HAllocate>(heap_number_size,
           HType::HeapObject(),
           NOT_TENURED,
-          HEAP_NUMBER_TYPE);
-      AddStoreMapConstant(heap_number, isolate()->factory()->heap_number_map());
+          MUTABLE_HEAP_NUMBER_TYPE);
+      AddStoreMapConstant(
+          heap_number, isolate()->factory()->mutable_heap_number_map());
       Add<HStoreNamedField>(heap_number, HObjectAccess::ForHeapNumberValue(),
                             value);
       instr = New<HStoreNamedField>(checked_object->ActualValue(),
@@ -5792,7 +5886,7 @@
     }
 
     if (!info->field_maps()->is_empty()) {
-      ASSERT(field_access.representation().IsHeapObject());
+      DCHECK(field_access.representation().IsHeapObject());
       value = Add<HCheckMaps>(value, info->field_maps());
     }
 
@@ -5804,7 +5898,7 @@
 
   if (transition_to_field) {
     Handle<Map> transition(info->transition());
-    ASSERT(!transition->is_deprecated());
+    DCHECK(!transition->is_deprecated());
     instr->SetTransition(Add<HConstant>(transition));
   }
   return instr;
@@ -5830,26 +5924,26 @@
 
   if (!LookupDescriptor()) return false;
 
-  if (!lookup_.IsFound()) {
-    return (!info->lookup_.IsFound() || info->has_holder()) &&
-        map()->prototype() == info->map()->prototype();
+  if (!IsFound()) {
+    return (!info->IsFound() || info->has_holder()) &&
+           map()->prototype() == info->map()->prototype();
   }
 
   // Mismatch if the other access info found the property in the prototype
   // chain.
   if (info->has_holder()) return false;
 
-  if (lookup_.IsPropertyCallbacks()) {
+  if (IsAccessor()) {
     return accessor_.is_identical_to(info->accessor_) &&
         api_holder_.is_identical_to(info->api_holder_);
   }
 
-  if (lookup_.IsConstant()) {
+  if (IsConstant()) {
     return constant_.is_identical_to(info->constant_);
   }
 
-  ASSERT(lookup_.IsField());
-  if (!info->lookup_.IsField()) return false;
+  DCHECK(IsField());
+  if (!info->IsField()) return false;
 
   Representation r = access_.representation();
   if (IsLoad()) {
@@ -5892,23 +5986,23 @@
 
 
 bool HOptimizedGraphBuilder::PropertyAccessInfo::LoadResult(Handle<Map> map) {
-  if (!IsLoad() && lookup_.IsProperty() &&
-      (lookup_.IsReadOnly() || !lookup_.IsCacheable())) {
+  if (!IsLoad() && IsProperty() && IsReadOnly()) {
     return false;
   }
 
-  if (lookup_.IsField()) {
+  if (IsField()) {
     // Construct the object field access.
-    access_ = HObjectAccess::ForField(map, &lookup_, name_);
+    int index = GetLocalFieldIndexFromMap(map);
+    access_ = HObjectAccess::ForField(map, index, representation(), name_);
 
     // Load field map for heap objects.
     LoadFieldMaps(map);
-  } else if (lookup_.IsPropertyCallbacks()) {
-    Handle<Object> callback(lookup_.GetValueFromMap(*map), isolate());
-    if (!callback->IsAccessorPair()) return false;
-    Object* raw_accessor = IsLoad()
-        ? Handle<AccessorPair>::cast(callback)->getter()
-        : Handle<AccessorPair>::cast(callback)->setter();
+  } else if (IsAccessor()) {
+    Handle<Object> accessors = GetAccessorsFromMap(map);
+    if (!accessors->IsAccessorPair()) return false;
+    Object* raw_accessor =
+        IsLoad() ? Handle<AccessorPair>::cast(accessors)->getter()
+                 : Handle<AccessorPair>::cast(accessors)->setter();
     if (!raw_accessor->IsJSFunction()) return false;
     Handle<JSFunction> accessor = handle(JSFunction::cast(raw_accessor));
     if (accessor->shared()->IsApiFunction()) {
@@ -5921,8 +6015,8 @@
       }
     }
     accessor_ = accessor;
-  } else if (lookup_.IsConstant()) {
-    constant_ = handle(lookup_.GetConstantFromMap(*map), isolate());
+  } else if (IsConstant()) {
+    constant_ = GetConstantFromMap(map);
   }
 
   return true;
@@ -5936,12 +6030,12 @@
   field_type_ = HType::Tagged();
 
   // Figure out the field type from the accessor map.
-  Handle<HeapType> field_type(lookup_.GetFieldTypeFromMap(*map), isolate());
+  Handle<HeapType> field_type = GetFieldTypeFromMap(map);
 
   // Collect the (stable) maps from the field type.
   int num_field_maps = field_type->NumClasses();
   if (num_field_maps == 0) return;
-  ASSERT(access_.representation().IsHeapObject());
+  DCHECK(access_.representation().IsHeapObject());
   field_maps_.Reserve(num_field_maps, zone());
   HeapType::Iterator<Map> it = field_type->Classes();
   while (!it.Done()) {
@@ -5954,16 +6048,15 @@
     it.Advance();
   }
   field_maps_.Sort();
-  ASSERT_EQ(num_field_maps, field_maps_.length());
+  DCHECK_EQ(num_field_maps, field_maps_.length());
 
   // Determine field HType from field HeapType.
   field_type_ = HType::FromType<HeapType>(field_type);
-  ASSERT(field_type_.IsHeapObject());
+  DCHECK(field_type_.IsHeapObject());
 
   // Add dependency on the map that introduced the field.
-  Map::AddDependentCompilationInfo(
-      handle(lookup_.GetFieldOwnerFromMap(*map), isolate()),
-      DependentCode::kFieldTypeGroup, top_info());
+  Map::AddDependentCompilationInfo(GetFieldOwnerFromMap(map),
+                                   DependentCode::kFieldTypeGroup, top_info());
 }
 
 
@@ -5981,7 +6074,7 @@
       return false;
     }
     map->LookupDescriptor(*holder_, *name_, &lookup_);
-    if (lookup_.IsFound()) return LoadResult(map);
+    if (IsFound()) return LoadResult(map);
   }
   lookup_.NotFound();
   return true;
@@ -5991,20 +6084,29 @@
 bool HOptimizedGraphBuilder::PropertyAccessInfo::CanAccessMonomorphic() {
   if (!CanInlinePropertyAccess(type_)) return false;
   if (IsJSObjectFieldAccessor()) return IsLoad();
-  if (!LookupDescriptor()) return false;
-  if (lookup_.IsFound()) {
-    if (IsLoad()) return true;
-    return !lookup_.IsReadOnly() && lookup_.IsCacheable();
+  if (this->map()->function_with_prototype() &&
+      !this->map()->has_non_instance_prototype() &&
+      name_.is_identical_to(isolate()->factory()->prototype_string())) {
+    return IsLoad();
   }
+  if (!LookupDescriptor()) return false;
+  if (IsFound()) return IsLoad() || !IsReadOnly();
   if (!LookupInPrototypes()) return false;
   if (IsLoad()) return true;
 
-  if (lookup_.IsPropertyCallbacks()) return true;
+  if (IsAccessor()) return true;
   Handle<Map> map = this->map();
   map->LookupTransition(NULL, *name_, &lookup_);
   if (lookup_.IsTransitionToField() && map->unused_property_fields() > 0) {
     // Construct the object field access.
-    access_ = HObjectAccess::ForField(map, &lookup_, name_);
+    int descriptor = transition()->LastAdded();
+    int index =
+        transition()->instance_descriptors()->GetFieldIndex(descriptor) -
+        map->inobject_properties();
+    PropertyDetails details =
+        transition()->instance_descriptors()->GetDetails(descriptor);
+    Representation representation = details.representation();
+    access_ = HObjectAccess::ForField(map, index, representation, name_);
 
     // Load field map for heap objects.
     LoadFieldMaps(transition());
@@ -6016,7 +6118,7 @@
 
 bool HOptimizedGraphBuilder::PropertyAccessInfo::CanAccessAsMonomorphic(
     SmallMapList* types) {
-  ASSERT(type_->Is(ToType(types->first())));
+  DCHECK(type_->Is(ToType(types->first())));
   if (!CanAccessMonomorphic()) return false;
   STATIC_ASSERT(kMaxLoadPolymorphism == kMaxStorePolymorphism);
   if (types->length() > kMaxLoadPolymorphism) return false;
@@ -6039,8 +6141,8 @@
   if (type_->Is(Type::Number())) return false;
 
   // Multiple maps cannot transition to the same target map.
-  ASSERT(!IsLoad() || !lookup_.IsTransition());
-  if (lookup_.IsTransition() && types->length() > 1) return false;
+  DCHECK(!IsLoad() || !IsTransition());
+  if (IsTransition() && types->length() > 1) return false;
 
   for (int i = 1; i < types->length(); ++i) {
     PropertyAccessInfo test_info(
@@ -6052,6 +6154,14 @@
 }
 
 
+Handle<Map> HOptimizedGraphBuilder::PropertyAccessInfo::map() {
+  JSFunction* ctor = IC::GetRootConstructor(
+      type_, current_info()->closure()->context()->native_context());
+  if (ctor != NULL) return handle(ctor->initial_map());
+  return type_->AsClass()->Map();
+}
+
+
 static bool NeedsWrappingFor(Type* type, Handle<JSFunction> target) {
   return type->Is(Type::NumberOrString()) &&
       target->shared()->strict_mode() == SLOPPY &&
@@ -6070,22 +6180,28 @@
 
   HObjectAccess access = HObjectAccess::ForMap();  // bogus default
   if (info->GetJSObjectFieldAccess(&access)) {
-    ASSERT(info->IsLoad());
+    DCHECK(info->IsLoad());
     return New<HLoadNamedField>(object, checked_object, access);
   }
 
+  if (info->name().is_identical_to(isolate()->factory()->prototype_string()) &&
+      info->map()->function_with_prototype()) {
+    DCHECK(!info->map()->has_non_instance_prototype());
+    return New<HLoadFunctionPrototype>(checked_object);
+  }
+
   HValue* checked_holder = checked_object;
   if (info->has_holder()) {
     Handle<JSObject> prototype(JSObject::cast(info->map()->prototype()));
     checked_holder = BuildCheckPrototypeMaps(prototype, info->holder());
   }
 
-  if (!info->lookup()->IsFound()) {
-    ASSERT(info->IsLoad());
+  if (!info->IsFound()) {
+    DCHECK(info->IsLoad());
     return graph()->GetConstantUndefined();
   }
 
-  if (info->lookup()->IsField()) {
+  if (info->IsField()) {
     if (info->IsLoad()) {
       return BuildLoadNamedField(info, checked_holder);
     } else {
@@ -6093,12 +6209,12 @@
     }
   }
 
-  if (info->lookup()->IsTransition()) {
-    ASSERT(!info->IsLoad());
+  if (info->IsTransition()) {
+    DCHECK(!info->IsLoad());
     return BuildStoreNamedField(info, checked_object, value);
   }
 
-  if (info->lookup()->IsPropertyCallbacks()) {
+  if (info->IsAccessor()) {
     Push(checked_object);
     int argument_count = 1;
     if (!info->IsLoad()) {
@@ -6122,7 +6238,7 @@
     return BuildCallConstantFunction(info->accessor(), argument_count);
   }
 
-  ASSERT(info->lookup()->IsConstant());
+  DCHECK(info->IsConstant());
   if (info->IsLoad()) {
     return New<HConstant>(info->constant());
   } else {
@@ -6133,6 +6249,7 @@
 
 void HOptimizedGraphBuilder::HandlePolymorphicNamedFieldAccess(
     PropertyAccessType access_type,
+    Expression* expr,
     BailoutId ast_id,
     BailoutId return_id,
     HValue* object,
@@ -6147,7 +6264,8 @@
 
   bool handle_smi = false;
   STATIC_ASSERT(kMaxLoadPolymorphism == kMaxStorePolymorphism);
-  for (int i = 0; i < types->length() && count < kMaxLoadPolymorphism; ++i) {
+  int i;
+  for (i = 0; i < types->length() && count < kMaxLoadPolymorphism; ++i) {
     PropertyAccessInfo info(this, access_type, ToType(types->at(i)), name);
     if (info.type()->Is(Type::String())) {
       if (handled_string) continue;
@@ -6162,7 +6280,12 @@
     }
   }
 
-  count = 0;
+  if (i < types->length()) {
+    count = -1;
+    types->Clear();
+  } else {
+    count = 0;
+  }
   HControlInstruction* smi_check = NULL;
   handled_string = false;
 
@@ -6246,7 +6369,8 @@
   if (count == types->length() && FLAG_deoptimize_uncommon_cases) {
     FinishExitWithHardDeoptimization("Uknown map in polymorphic access");
   } else {
-    HInstruction* instr = BuildNamedGeneric(access_type, object, name, value);
+    HInstruction* instr = BuildNamedGeneric(access_type, expr, object, name,
+                                            value);
     AddInstruction(instr);
     if (!ast_context()->IsEffect()) Push(access_type == LOAD ? instr : value);
 
@@ -6259,7 +6383,7 @@
     }
   }
 
-  ASSERT(join != NULL);
+  DCHECK(join != NULL);
   if (join->HasPredecessor()) {
     join->SetJoinId(ast_id);
     set_current_block(join);
@@ -6282,8 +6406,8 @@
     types->FilterForPossibleTransitions(root_map);
     monomorphic = types->length() == 1;
   }
-  return monomorphic && CanInlinePropertyAccess(
-      IC::MapToType<Type>(types->first(), zone));
+  return monomorphic &&
+         CanInlinePropertyAccess(IC::MapToType<Type>(types->first(), zone));
 }
 
 
@@ -6302,16 +6426,19 @@
                                         bool is_uninitialized) {
   if (!prop->key()->IsPropertyName()) {
     // Keyed store.
-    HValue* value = environment()->ExpressionStackAt(0);
-    HValue* key = environment()->ExpressionStackAt(1);
-    HValue* object = environment()->ExpressionStackAt(2);
+    HValue* value = Pop();
+    HValue* key = Pop();
+    HValue* object = Pop();
     bool has_side_effects = false;
-    HandleKeyedElementAccess(object, key, value, expr,
-                             STORE, &has_side_effects);
-    Drop(3);
-    Push(value);
-    Add<HSimulate>(return_id, REMOVABLE_SIMULATE);
-    return ast_context()->ReturnValue(Pop());
+    HValue* result = HandleKeyedElementAccess(
+        object, key, value, expr, ast_id, return_id, STORE, &has_side_effects);
+    if (has_side_effects) {
+      if (!ast_context()->IsEffect()) Push(value);
+      Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
+      if (!ast_context()->IsEffect()) Drop(1);
+    }
+    if (result == NULL) return;
+    return ast_context()->ReturnValue(value);
   }
 
   // Named store.
@@ -6320,7 +6447,7 @@
 
   Literal* key = prop->key()->AsLiteral();
   Handle<String> name = Handle<String>::cast(key->value());
-  ASSERT(!name.is_null());
+  DCHECK(!name.is_null());
 
   HInstruction* instr = BuildNamedAccess(STORE, ast_id, return_id, expr,
                                          object, name, value, is_uninitialized);
@@ -6338,7 +6465,7 @@
 
 void HOptimizedGraphBuilder::HandlePropertyAssignment(Assignment* expr) {
   Property* prop = expr->target()->AsProperty();
-  ASSERT(prop != NULL);
+  DCHECK(prop != NULL);
   CHECK_ALIVE(VisitForValue(prop->obj()));
   if (!prop->key()->IsPropertyName()) {
     CHECK_ALIVE(VisitForValue(prop->key()));
@@ -6356,11 +6483,11 @@
     Variable* var,
     HValue* value,
     BailoutId ast_id) {
-  LookupResult lookup(isolate());
-  GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, STORE);
+  Handle<GlobalObject> global(current_info()->global_object());
+  LookupIterator it(global, var->name(), LookupIterator::OWN_SKIP_INTERCEPTOR);
+  GlobalPropertyAccess type = LookupGlobalProperty(var, &it, STORE);
   if (type == kUseCell) {
-    Handle<GlobalObject> global(current_info()->global_object());
-    Handle<PropertyCell> cell(global->GetPropertyCell(&lookup));
+    Handle<PropertyCell> cell = it.GetPropertyCell();
     if (cell->type()->IsConstant()) {
       Handle<Object> constant = cell->type()->AsConstant()->Value();
       if (value->IsConstant()) {
@@ -6385,7 +6512,7 @@
       }
     }
     HInstruction* instr =
-        Add<HStoreGlobalCell>(value, cell, lookup.GetPropertyDetails());
+        Add<HStoreGlobalCell>(value, cell, it.property_details());
     if (instr->HasObservableSideEffects()) {
       Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
     }
@@ -6397,7 +6524,7 @@
         Add<HStoreNamedGeneric>(global_object, var->name(),
                                  value, function_strict_mode());
     USE(instr);
-    ASSERT(instr->HasObservableSideEffects());
+    DCHECK(instr->HasObservableSideEffects());
     Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
   }
 }
@@ -6407,7 +6534,7 @@
   Expression* target = expr->target();
   VariableProxy* proxy = target->AsVariableProxy();
   Property* prop = target->AsProperty();
-  ASSERT(proxy == NULL || prop == NULL);
+  DCHECK(proxy == NULL || prop == NULL);
 
   // We have a second position recorded in the FullCodeGenerator to have
   // type feedback for the binary operation.
@@ -6486,8 +6613,7 @@
     CHECK_ALIVE(VisitForValue(prop->obj()));
     HValue* object = Top();
     HValue* key = NULL;
-    if ((!prop->IsFunctionPrototype() && !prop->key()->IsPropertyName()) ||
-        prop->IsStringAccess()) {
+    if (!prop->key()->IsPropertyName() || prop->IsStringAccess()) {
       CHECK_ALIVE(VisitForValue(prop->key()));
       key = Top();
     }
@@ -6509,12 +6635,12 @@
 
 
 void HOptimizedGraphBuilder::VisitAssignment(Assignment* expr) {
-  ASSERT(!HasStackOverflow());
-  ASSERT(current_block() != NULL);
-  ASSERT(current_block()->HasPredecessor());
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
   VariableProxy* proxy = expr->target()->AsVariableProxy();
   Property* prop = expr->target()->AsProperty();
-  ASSERT(proxy == NULL || prop == NULL);
+  DCHECK(proxy == NULL || prop == NULL);
 
   if (expr->is_compound()) {
     HandleCompoundAssignment(expr);
@@ -6610,7 +6736,7 @@
                    expr->op() == Token::INIT_CONST) {
           mode = HStoreContextSlot::kNoCheck;
         } else {
-          ASSERT(expr->op() == Token::INIT_CONST_LEGACY);
+          DCHECK(expr->op() == Token::INIT_CONST_LEGACY);
 
           mode = HStoreContextSlot::kCheckIgnoreAssignment;
         }
@@ -6640,20 +6766,22 @@
 
 
 void HOptimizedGraphBuilder::VisitThrow(Throw* expr) {
-  ASSERT(!HasStackOverflow());
-  ASSERT(current_block() != NULL);
-  ASSERT(current_block()->HasPredecessor());
-  // We don't optimize functions with invalid left-hand sides in
-  // assignments, count operations, or for-in.  Consequently throw can
-  // currently only occur in an effect context.
-  ASSERT(ast_context()->IsEffect());
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
+  if (!ast_context()->IsEffect()) {
+    // The parser turns invalid left-hand sides in assignments into throw
+    // statements, which may not be in effect contexts. We might still try
+    // to optimize such functions; bail out now if we do.
+    return Bailout(kInvalidLeftHandSideInAssignment);
+  }
   CHECK_ALIVE(VisitForValue(expr->exception()));
 
   HValue* value = environment()->Pop();
   if (!FLAG_hydrogen_track_positions) SetSourcePosition(expr->position());
   Add<HPushArguments>(value);
   Add<HCallRuntime>(isolate()->factory()->empty_string(),
-                    Runtime::FunctionForId(Runtime::kHiddenThrow), 1);
+                    Runtime::FunctionForId(Runtime::kThrow), 1);
   Add<HSimulate>(expr->id());
 
   // If the throw definitely exits the function, we can finish with a dummy
@@ -6693,6 +6821,7 @@
 
 HInstruction* HOptimizedGraphBuilder::BuildNamedGeneric(
     PropertyAccessType access_type,
+    Expression* expr,
     HValue* object,
     Handle<String> name,
     HValue* value,
@@ -6702,7 +6831,15 @@
                      Deoptimizer::SOFT);
   }
   if (access_type == LOAD) {
-    return New<HLoadNamedGeneric>(object, name);
+    HLoadNamedGeneric* result = New<HLoadNamedGeneric>(object, name);
+    if (FLAG_vector_ics) {
+      Handle<SharedFunctionInfo> current_shared =
+          function_state()->compilation_info()->shared_info();
+      result->SetVectorAndSlot(
+          handle(current_shared->feedback_vector(), isolate()),
+          expr->AsProperty()->PropertyFeedbackSlot());
+    }
+    return result;
   } else {
     return New<HStoreNamedGeneric>(object, name, value, function_strict_mode());
   }
@@ -6712,11 +6849,20 @@
 
 HInstruction* HOptimizedGraphBuilder::BuildKeyedGeneric(
     PropertyAccessType access_type,
+    Expression* expr,
     HValue* object,
     HValue* key,
     HValue* value) {
   if (access_type == LOAD) {
-    return New<HLoadKeyedGeneric>(object, key);
+    HLoadKeyedGeneric* result = New<HLoadKeyedGeneric>(object, key);
+    if (FLAG_vector_ics) {
+      Handle<SharedFunctionInfo> current_shared =
+          function_state()->compilation_info()->shared_info();
+      result->SetVectorAndSlot(
+          handle(current_shared->feedback_vector(), isolate()),
+          expr->AsProperty()->PropertyFeedbackSlot());
+    }
+    return result;
   } else {
     return New<HStoreKeyedGeneric>(object, key, value, function_strict_mode());
   }
@@ -6756,14 +6902,16 @@
     // monomorphic stores need a prototype chain check because shape
     // changes could allow callbacks on elements in the chain that
     // aren't compatible with monomorphic keyed stores.
-    Handle<JSObject> prototype(JSObject::cast(map->prototype()));
-    JSObject* holder = JSObject::cast(map->prototype());
-    while (!holder->GetPrototype()->IsNull()) {
-      holder = JSObject::cast(holder->GetPrototype());
+    PrototypeIterator iter(map);
+    JSObject* holder = NULL;
+    while (!iter.IsAtEnd()) {
+      holder = JSObject::cast(*PrototypeIterator::GetCurrent(iter));
+      iter.Advance();
     }
+    DCHECK(holder && holder->IsJSObject());
 
-    BuildCheckPrototypeMaps(prototype,
-                            Handle<JSObject>(JSObject::cast(holder)));
+    BuildCheckPrototypeMaps(handle(JSObject::cast(map->prototype())),
+                            Handle<JSObject>(holder));
   }
 
   LoadKeyedHoleMode load_mode = BuildKeyedHoleMode(map);
@@ -6842,6 +6990,7 @@
 
 
 HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
+    Expression* expr,
     HValue* object,
     HValue* key,
     HValue* val,
@@ -6873,7 +7022,8 @@
       possible_transitioned_maps.Add(map);
     }
     if (elements_kind == SLOPPY_ARGUMENTS_ELEMENTS) {
-      HInstruction* result = BuildKeyedGeneric(access_type, object, key, val);
+      HInstruction* result = BuildKeyedGeneric(access_type, expr, object, key,
+                                               val);
       *has_side_effects = result->HasObservableSideEffects();
       return AddInstruction(result);
     }
@@ -6890,9 +7040,9 @@
   HTransitionElementsKind* transition = NULL;
   for (int i = 0; i < maps->length(); ++i) {
     Handle<Map> map = maps->at(i);
-    ASSERT(map->IsMap());
+    DCHECK(map->IsMap());
     if (!transition_target.at(i).is_null()) {
-      ASSERT(Map::IsValidElementsTransition(
+      DCHECK(Map::IsValidElementsTransition(
           map->elements_kind(),
           transition_target.at(i)->elements_kind()));
       transition = Add<HTransitionElementsKind>(object, map,
@@ -6904,20 +7054,21 @@
 
   // If only one map is left after transitioning, handle this case
   // monomorphically.
-  ASSERT(untransitionable_maps.length() >= 1);
+  DCHECK(untransitionable_maps.length() >= 1);
   if (untransitionable_maps.length() == 1) {
     Handle<Map> untransitionable_map = untransitionable_maps[0];
     HInstruction* instr = NULL;
     if (untransitionable_map->has_slow_elements_kind() ||
         !untransitionable_map->IsJSObjectMap()) {
-      instr = AddInstruction(BuildKeyedGeneric(access_type, object, key, val));
+      instr = AddInstruction(BuildKeyedGeneric(access_type, expr, object, key,
+                                               val));
     } else {
       instr = BuildMonomorphicElementAccess(
           object, key, val, transition, untransitionable_map, access_type,
           store_mode);
     }
     *has_side_effects |= instr->HasObservableSideEffects();
-    return access_type == STORE ? NULL : instr;
+    return access_type == STORE ? val : instr;
   }
 
   HBasicBlock* join = graph()->CreateBasicBlock();
@@ -6935,9 +7086,10 @@
     set_current_block(this_map);
     HInstruction* access = NULL;
     if (IsDictionaryElementsKind(elements_kind)) {
-      access = AddInstruction(BuildKeyedGeneric(access_type, object, key, val));
+      access = AddInstruction(BuildKeyedGeneric(access_type, expr, object, key,
+                                                val));
     } else {
-      ASSERT(IsFastElementsKind(elements_kind) ||
+      DCHECK(IsFastElementsKind(elements_kind) ||
              IsExternalArrayElementsKind(elements_kind) ||
              IsFixedTypedArrayElementsKind(elements_kind));
       LoadKeyedHoleMode load_mode = BuildKeyedHoleMode(map);
@@ -6964,23 +7116,43 @@
   // necessary because FinishExitWithHardDeoptimization does an AbnormalExit
   // rather than joining the join block. If this becomes an issue, insert a
   // generic access in the case length() == 0.
-  ASSERT(join->predecessors()->length() > 0);
+  DCHECK(join->predecessors()->length() > 0);
   // Deopt if none of the cases matched.
   NoObservableSideEffectsScope scope(this);
   FinishExitWithHardDeoptimization("Unknown map in polymorphic element access");
   set_current_block(join);
-  return access_type == STORE ? NULL : Pop();
+  return access_type == STORE ? val : Pop();
 }
 
 
 HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess(
-    HValue* obj,
-    HValue* key,
-    HValue* val,
-    Expression* expr,
-    PropertyAccessType access_type,
+    HValue* obj, HValue* key, HValue* val, Expression* expr, BailoutId ast_id,
+    BailoutId return_id, PropertyAccessType access_type,
     bool* has_side_effects) {
-  ASSERT(!expr->IsPropertyName());
+  if (key->ActualValue()->IsConstant()) {
+    Handle<Object> constant =
+        HConstant::cast(key->ActualValue())->handle(isolate());
+    uint32_t array_index;
+    if (constant->IsString() &&
+        !Handle<String>::cast(constant)->AsArrayIndex(&array_index)) {
+      if (!constant->IsUniqueName()) {
+        constant = isolate()->factory()->InternalizeString(
+            Handle<String>::cast(constant));
+      }
+      HInstruction* instr =
+          BuildNamedAccess(access_type, ast_id, return_id, expr, obj,
+                           Handle<String>::cast(constant), val, false);
+      if (instr == NULL || instr->IsLinked()) {
+        *has_side_effects = false;
+      } else {
+        AddInstruction(instr);
+        *has_side_effects = instr->HasObservableSideEffects();
+      }
+      return instr;
+    }
+  }
+
+  DCHECK(!expr->IsPropertyName());
   HInstruction* instr = NULL;
 
   SmallMapList* types;
@@ -7006,7 +7178,8 @@
   if (monomorphic) {
     Handle<Map> map = types->first();
     if (map->has_slow_elements_kind() || !map->IsJSObjectMap()) {
-      instr = AddInstruction(BuildKeyedGeneric(access_type, obj, key, val));
+      instr = AddInstruction(BuildKeyedGeneric(access_type, expr, obj, key,
+                                               val));
     } else {
       BuildCheckHeapObject(obj);
       instr = BuildMonomorphicElementAccess(
@@ -7014,7 +7187,7 @@
     }
   } else if (!force_generic && (types != NULL && !types->is_empty())) {
     return HandlePolymorphicElementAccess(
-        obj, key, val, types, access_type,
+        expr, obj, key, val, types, access_type,
         expr->GetStoreMode(), has_side_effects);
   } else {
     if (access_type == STORE) {
@@ -7029,7 +7202,7 @@
                          Deoptimizer::SOFT);
       }
     }
-    instr = AddInstruction(BuildKeyedGeneric(access_type, obj, key, val));
+    instr = AddInstruction(BuildKeyedGeneric(access_type, expr, obj, key, val));
   }
   *has_side_effects = instr->HasObservableSideEffects();
   return instr;
@@ -7075,7 +7248,9 @@
   HInstruction* result = NULL;
   if (expr->key()->IsPropertyName()) {
     Handle<String> name = expr->key()->AsLiteral()->AsPropertyName();
-    if (!name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("length"))) return false;
+    if (!String::Equals(name, isolate()->factory()->length_string())) {
+      return false;
+    }
 
     if (function_state()->outer() == NULL) {
       HInstruction* elements = Add<HArgumentsElements>(false);
@@ -7124,19 +7299,19 @@
     bool is_uninitialized) {
   SmallMapList* types;
   ComputeReceiverTypes(expr, object, &types, zone());
-  ASSERT(types != NULL);
+  DCHECK(types != NULL);
 
   if (types->length() > 0) {
     PropertyAccessInfo info(this, access, ToType(types->first()), name);
     if (!info.CanAccessAsMonomorphic(types)) {
       HandlePolymorphicNamedFieldAccess(
-          access, ast_id, return_id, object, value, types, name);
+          access, expr, ast_id, return_id, object, value, types, name);
       return NULL;
     }
 
     HValue* checked_object;
     // Type::Number() is only supported by polymorphic load/call handling.
-    ASSERT(!info.type()->Is(Type::Number()));
+    DCHECK(!info.type()->Is(Type::Number()));
     BuildCheckHeapObject(object);
     if (AreStringTypes(types)) {
       checked_object =
@@ -7148,7 +7323,7 @@
         &info, object, checked_object, value, ast_id, return_id);
   }
 
-  return BuildNamedGeneric(access, object, name, value, is_uninitialized);
+  return BuildNamedGeneric(access, expr, object, name, value, is_uninitialized);
 }
 
 
@@ -7172,11 +7347,6 @@
     AddInstruction(char_code);
     instr = NewUncasted<HStringCharFromCode>(char_code);
 
-  } else if (expr->IsFunctionPrototype()) {
-    HValue* function = Pop();
-    BuildCheckHeapObject(function);
-    instr = New<HLoadFunctionPrototype>(function);
-
   } else if (expr->key()->IsPropertyName()) {
     Handle<String> name = expr->key()->AsLiteral()->AsPropertyName();
     HValue* object = Pop();
@@ -7192,7 +7362,7 @@
 
     bool has_side_effects = false;
     HValue* load = HandleKeyedElementAccess(
-        obj, key, NULL, expr, LOAD, &has_side_effects);
+        obj, key, NULL, expr, ast_id, expr->LoadId(), LOAD, &has_side_effects);
     if (has_side_effects) {
       if (ast_context()->IsEffect()) {
         Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
@@ -7202,6 +7372,7 @@
         Drop(1);
       }
     }
+    if (load == NULL) return;
     return ast_context()->ReturnValue(load);
   }
   return ast_context()->ReturnInstruction(instr, ast_id);
@@ -7209,15 +7380,14 @@
 
 
 void HOptimizedGraphBuilder::VisitProperty(Property* expr) {
-  ASSERT(!HasStackOverflow());
-  ASSERT(current_block() != NULL);
-  ASSERT(current_block()->HasPredecessor());
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
 
   if (TryArgumentsAccess(expr)) return;
 
   CHECK_ALIVE(VisitForValue(expr->obj()));
-  if ((!expr->IsFunctionPrototype() && !expr->key()->IsPropertyName()) ||
-      expr->IsStringAccess()) {
+  if (!expr->key()->IsPropertyName() || expr->IsStringAccess()) {
     CHECK_ALIVE(VisitForValue(expr->key()));
   }
 
@@ -7235,14 +7405,19 @@
 
 HInstruction* HGraphBuilder::BuildCheckPrototypeMaps(Handle<JSObject> prototype,
                                                      Handle<JSObject> holder) {
-  while (holder.is_null() || !prototype.is_identical_to(holder)) {
-    BuildConstantMapCheck(prototype);
-    Object* next_prototype = prototype->GetPrototype();
-    if (next_prototype->IsNull()) return NULL;
-    CHECK(next_prototype->IsJSObject());
-    prototype = handle(JSObject::cast(next_prototype));
+  PrototypeIterator iter(isolate(), prototype,
+                         PrototypeIterator::START_AT_RECEIVER);
+  while (holder.is_null() ||
+         !PrototypeIterator::GetCurrent(iter).is_identical_to(holder)) {
+    BuildConstantMapCheck(
+        Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)));
+    iter.Advance();
+    if (iter.IsAtEnd()) {
+      return NULL;
+    }
   }
-  return BuildConstantMapCheck(prototype);
+  return BuildConstantMapCheck(
+      Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)));
 }
 
 
@@ -7265,12 +7440,10 @@
 HInstruction* HOptimizedGraphBuilder::NewArgumentAdaptorCall(
     HValue* fun, HValue* context,
     int argument_count, HValue* expected_param_count) {
-  CallInterfaceDescriptor* descriptor =
-      isolate()->call_descriptor(Isolate::ArgumentAdaptorCall);
-
+  ArgumentAdaptorDescriptor descriptor(isolate());
   HValue* arity = Add<HConstant>(argument_count - 1);
 
-  HValue* op_vals[] = { fun, context, arity, expected_param_count };
+  HValue* op_vals[] = { context, fun, arity, expected_param_count };
 
   Handle<Code> adaptor =
       isolate()->builtins()->ArgumentsAdaptorTrampoline();
@@ -7278,7 +7451,7 @@
 
   return New<HCallWithDescriptor>(
       adaptor_value, argument_count, descriptor,
-      Vector<HValue*>(op_vals, descriptor->environment_length()));
+      Vector<HValue*>(op_vals, descriptor.GetEnvironmentLength()));
 }
 
 
@@ -7314,8 +7487,8 @@
 
 class FunctionSorter {
  public:
-  FunctionSorter(int index = 0, int ticks = 0, int size = 0)
-      : index_(index), ticks_(ticks), size_(size) { }
+  explicit FunctionSorter(int index = 0, int ticks = 0, int size = 0)
+      : index_(index), ticks_(ticks), size_(size) {}
 
   int index() const { return index_; }
   int ticks() const { return ticks_; }
@@ -7347,12 +7520,11 @@
   bool handled_string = false;
   int ordered_functions = 0;
 
-  for (int i = 0;
-       i < types->length() && ordered_functions < kMaxCallPolymorphism;
+  int i;
+  for (i = 0; i < types->length() && ordered_functions < kMaxCallPolymorphism;
        ++i) {
     PropertyAccessInfo info(this, LOAD, ToType(types->at(i)), name);
-    if (info.CanAccessMonomorphic() &&
-        info.lookup()->IsConstant() &&
+    if (info.CanAccessMonomorphic() && info.IsConstant() &&
         info.constant()->IsJSFunction()) {
       if (info.type()->Is(Type::String())) {
         if (handled_string) continue;
@@ -7370,6 +7542,11 @@
 
   std::sort(order, order + ordered_functions);
 
+  if (i < types->length()) {
+    types->Clear();
+    ordered_functions = -1;
+  }
+
   HBasicBlock* number_block = NULL;
   HBasicBlock* join = NULL;
   handled_string = false;
@@ -7472,7 +7649,7 @@
   } else {
     Property* prop = expr->expression()->AsProperty();
     HInstruction* function = BuildNamedGeneric(
-        LOAD, receiver, name, NULL, prop->IsUninitialized());
+        LOAD, prop, receiver, name, NULL, prop->IsUninitialized());
     AddInstruction(function);
     Push(function);
     AddSimulate(prop->LoadId(), REMOVABLE_SIMULATE);
@@ -7502,7 +7679,7 @@
   // We assume that control flow is always live after an expression.  So
   // even without predecessors to the join block, we set it as the exit
   // block and continue by adding instructions there.
-  ASSERT(join != NULL);
+  DCHECK(join != NULL);
   if (join->HasPredecessor()) {
     set_current_block(join);
     join->SetJoinId(expr->id());
@@ -7566,7 +7743,7 @@
     TraceInline(target, caller, "target not inlineable");
     return kNotInlinable;
   }
-  if (target_shared->dont_inline() || target_shared->dont_optimize()) {
+  if (target_shared->DisableOptimizationReason() != kNoReason) {
     TraceInline(target, caller, "target contains unsupported syntax [early]");
     return kNotInlinable;
   }
@@ -7626,6 +7803,9 @@
 
   // Parse and allocate variables.
   CompilationInfo target_info(target, zone());
+  // Use the same AstValueFactory for creating strings in the sub-compilation
+  // step, but don't transfer ownership to target_info.
+  target_info.SetAstValueFactory(top_info()->ast_value_factory(), false);
   Handle<SharedFunctionInfo> target_shared(target->shared());
   if (!Parser::Parse(&target_info) || !Scope::Analyze(&target_info)) {
     if (target_info.isolate()->has_pending_exception()) {
@@ -7650,8 +7830,7 @@
     TraceInline(target, caller, "target AST is too large [late]");
     return false;
   }
-  AstProperties::Flags* flags(function->flags());
-  if (flags->Contains(kDontInline) || function->dont_optimize()) {
+  if (function->dont_optimize()) {
     TraceInline(target, caller, "target contains unsupported syntax [late]");
     return false;
   }
@@ -7685,26 +7864,9 @@
 
   // Generate the deoptimization data for the unoptimized version of
   // the target function if we don't already have it.
-  if (!target_shared->has_deoptimization_support()) {
-    // Note that we compile here using the same AST that we will use for
-    // generating the optimized inline code.
-    target_info.EnableDeoptimizationSupport();
-    if (!FullCodeGenerator::MakeCode(&target_info)) {
-      TraceInline(target, caller, "could not generate deoptimization info");
-      return false;
-    }
-    if (target_shared->scope_info() == ScopeInfo::Empty(isolate())) {
-      // The scope info might not have been set if a lazily compiled
-      // function is inlined before being called for the first time.
-      Handle<ScopeInfo> target_scope_info =
-          ScopeInfo::Create(target_info.scope(), zone());
-      target_shared->set_scope_info(*target_scope_info);
-    }
-    target_shared->EnableDeoptimizationSupport(*target_info.code());
-    target_shared->set_feedback_vector(*target_info.feedback_vector());
-    Compiler::RecordFunctionCompilation(Logger::FUNCTION_TAG,
-                                        &target_info,
-                                        target_shared);
+  if (!Compiler::EnsureDeoptimizationSupport(&target_info)) {
+    TraceInline(target, caller, "could not generate deoptimization info");
+    return false;
   }
 
   // ----------------------------------------------------------------
@@ -7712,7 +7874,7 @@
   // TryInline should always return true).
 
   // Type-check the inlined function.
-  ASSERT(target_shared->has_deoptimization_support());
+  DCHECK(target_shared->has_deoptimization_support());
   AstTyper::Run(&target_info);
 
   int function_id = graph()->TraceInlinedFunction(target_shared, position);
@@ -7735,19 +7897,19 @@
   HConstant* context = Add<HConstant>(Handle<Context>(target->context()));
   inner_env->BindContext(context);
 
-  HArgumentsObject* arguments_object = NULL;
-
-  // If the function uses arguments object create and bind one, also copy
+  // Create a dematerialized arguments object for the function, also copy the
   // current arguments values to use them for materialization.
+  HEnvironment* arguments_env = inner_env->arguments_environment();
+  int parameter_count = arguments_env->parameter_count();
+  HArgumentsObject* arguments_object = Add<HArgumentsObject>(parameter_count);
+  for (int i = 0; i < parameter_count; i++) {
+    arguments_object->AddArgument(arguments_env->Lookup(i), zone());
+  }
+
+  // If the function uses arguments object then bind bind one.
   if (function->scope()->arguments() != NULL) {
-    ASSERT(function->scope()->arguments()->IsStackAllocated());
-    HEnvironment* arguments_env = inner_env->arguments_environment();
-    int arguments_count = arguments_env->parameter_count();
-    arguments_object = Add<HArgumentsObject>(arguments_count);
+    DCHECK(function->scope()->arguments()->IsStackAllocated());
     inner_env->Bind(function->scope()->arguments(), arguments_object);
-    for (int i = 0; i < arguments_count; i++) {
-      arguments_object->AddArgument(arguments_env->Lookup(i), zone());
-    }
   }
 
   // Capture the state before invoking the inlined function for deopt in the
@@ -7761,10 +7923,9 @@
   Scope* saved_scope = scope();
   set_scope(target_info.scope());
   HEnterInlined* enter_inlined =
-      Add<HEnterInlined>(return_id, target, arguments_count, function,
+      Add<HEnterInlined>(return_id, target, context, arguments_count, function,
                          function_state()->inlining_kind(),
-                         function->scope()->arguments(),
-                         arguments_object);
+                         function->scope()->arguments(), arguments_object);
   function_state()->set_entry(enter_inlined);
 
   VisitDeclarations(target_info.scope()->declarations());
@@ -7772,10 +7933,10 @@
   set_scope(saved_scope);
   if (HasStackOverflow()) {
     // Bail out if the inline function did, as we cannot residualize a call
-    // instead.
+    // instead, but do not disable optimization for the outer function.
     TraceInline(target, caller, "inline graph construction failed");
     target_shared->DisableOptimization(kInliningBailedOut);
-    inline_bailout_ = true;
+    current_info()->RetryOptimization(kInliningBailedOut);
     delete target_state;
     return true;
   }
@@ -7784,7 +7945,7 @@
   inlined_count_ += nodes_added;
 
   Handle<Code> unoptimized_code(target_shared->code());
-  ASSERT(unoptimized_code->kind() == Code::FUNCTION);
+  DCHECK(unoptimized_code->kind() == Code::FUNCTION);
   Handle<TypeFeedbackInfo> type_info(
       TypeFeedbackInfo::cast(unoptimized_code->type_feedback_info()));
   graph()->update_type_change_checksum(type_info->own_type_change_checksum());
@@ -7802,7 +7963,7 @@
       } else if (call_context()->IsEffect()) {
         Goto(function_return(), state);
       } else {
-        ASSERT(call_context()->IsValue());
+        DCHECK(call_context()->IsValue());
         AddLeaveInlined(implicit_return_value, state);
       }
     } else if (state->inlining_kind() == SETTER_CALL_RETURN) {
@@ -7814,7 +7975,7 @@
       } else if (call_context()->IsEffect()) {
         Goto(function_return(), state);
       } else {
-        ASSERT(call_context()->IsValue());
+        DCHECK(call_context()->IsValue());
         AddLeaveInlined(implicit_return_value, state);
       }
     } else {
@@ -7825,7 +7986,7 @@
       } else if (call_context()->IsEffect()) {
         Goto(function_return(), state);
       } else {
-        ASSERT(call_context()->IsValue());
+        DCHECK(call_context()->IsValue());
         AddLeaveInlined(undefined, state);
       }
     }
@@ -7839,7 +8000,7 @@
     HEnterInlined* entry = function_state()->entry();
 
     // Pop the return test context from the expression context stack.
-    ASSERT(ast_context() == inlined_test_context());
+    DCHECK(ast_context() == inlined_test_context());
     ClearInlinedTestContext();
     delete target_state;
 
@@ -7945,6 +8106,7 @@
       if (!FLAG_fast_math) break;
       // Fall through if FLAG_fast_math.
     case kMathRound:
+    case kMathFround:
     case kMathFloor:
     case kMathAbs:
     case kMathSqrt:
@@ -8016,6 +8178,7 @@
       if (!FLAG_fast_math) break;
       // Fall through if FLAG_fast_math.
     case kMathRound:
+    case kMathFround:
     case kMathFloor:
     case kMathAbs:
     case kMathSqrt:
@@ -8046,7 +8209,7 @@
                 left, kMathPowHalf);
             // MathPowHalf doesn't have side effects so there's no need for
             // an environment simulation here.
-            ASSERT(!sqrt->HasObservableSideEffects());
+            DCHECK(!sqrt->HasObservableSideEffects());
             result = NewUncasted<HDiv>(one, sqrt);
           } else if (exponent == 2.0) {
             result = NewUncasted<HMul>(left, left);
@@ -8089,7 +8252,7 @@
       ElementsKind elements_kind = receiver_map->elements_kind();
       if (!IsFastElementsKind(elements_kind)) return false;
       if (receiver_map->is_observed()) return false;
-      ASSERT(receiver_map->is_extensible());
+      if (!receiver_map->is_extensible()) return false;
 
       Drop(expr->arguments()->length());
       HValue* result;
@@ -8154,7 +8317,7 @@
       if (!IsFastElementsKind(elements_kind)) return false;
       if (receiver_map->is_observed()) return false;
       if (JSArray::IsReadOnlyLengthDescriptor(receiver_map)) return false;
-      ASSERT(receiver_map->is_extensible());
+      if (!receiver_map->is_extensible()) return false;
 
       // If there may be elements accessors in the prototype chain, the fast
       // inlined version can't be used.
@@ -8206,7 +8369,7 @@
       ElementsKind kind = receiver_map->elements_kind();
       if (!IsFastElementsKind(kind)) return false;
       if (receiver_map->is_observed()) return false;
-      ASSERT(receiver_map->is_extensible());
+      if (!receiver_map->is_extensible()) return false;
 
       // If there may be elements accessors in the prototype chain, the fast
       // inlined version can't be used.
@@ -8321,7 +8484,7 @@
       if (!IsFastElementsKind(kind)) return false;
       if (receiver_map->is_observed()) return false;
       if (argument_count != 2) return false;
-      ASSERT(receiver_map->is_extensible());
+      if (!receiver_map->is_extensible()) return false;
 
       // If there may be elements accessors in the prototype chain, the fast
       // inlined version can't be used.
@@ -8428,10 +8591,8 @@
     // Cannot embed a direct reference to the global proxy map
     // as it maybe dropped on deserialization.
     CHECK(!isolate()->serializer_enabled());
-    ASSERT_EQ(0, receiver_maps->length());
-    receiver_maps->Add(handle(
-        function->context()->global_object()->global_receiver()->map()),
-        zone());
+    DCHECK_EQ(0, receiver_maps->length());
+    receiver_maps->Add(handle(function->global_proxy()->map()), zone());
   }
   CallOptimization::HolderLookup holder_lookup =
       CallOptimization::kHolderNotFound;
@@ -8456,7 +8617,7 @@
       if (holder_lookup == CallOptimization::kHolderFound) {
         AddCheckPrototypeMaps(api_holder, receiver_maps->first());
       } else {
-        ASSERT_EQ(holder_lookup, CallOptimization::kHolderIsReceiver);
+        DCHECK_EQ(holder_lookup, CallOptimization::kHolderIsReceiver);
       }
       // Includes receiver.
       PushArgumentsFromEnvironment(argc + 1);
@@ -8465,8 +8626,8 @@
       break;
     case kCallApiGetter:
       // Receiver and prototype chain cannot have changed.
-      ASSERT_EQ(0, argc);
-      ASSERT_EQ(NULL, receiver);
+      DCHECK_EQ(0, argc);
+      DCHECK_EQ(NULL, receiver);
       // Receiver is on expression stack.
       receiver = Pop();
       Add<HPushArguments>(receiver);
@@ -8475,8 +8636,8 @@
       {
         is_store = true;
         // Receiver and prototype chain cannot have changed.
-        ASSERT_EQ(1, argc);
-        ASSERT_EQ(NULL, receiver);
+        DCHECK_EQ(1, argc);
+        DCHECK_EQ(NULL, receiver);
         // Receiver and value are on expression stack.
         HValue* value = Pop();
         receiver = Pop();
@@ -8508,26 +8669,23 @@
   HValue* api_function_address = Add<HConstant>(ExternalReference(ref));
 
   HValue* op_vals[] = {
+    context(),
     Add<HConstant>(function),
     call_data,
     holder,
-    api_function_address,
-    context()
+    api_function_address
   };
 
-  CallInterfaceDescriptor* descriptor =
-      isolate()->call_descriptor(Isolate::ApiFunctionCall);
-
+  ApiFunctionDescriptor descriptor(isolate());
   CallApiFunctionStub stub(isolate(), is_store, call_data_is_undefined, argc);
   Handle<Code> code = stub.GetCode();
   HConstant* code_value = Add<HConstant>(code);
 
-  ASSERT((sizeof(op_vals) / kPointerSize) ==
-         descriptor->environment_length());
+  DCHECK((sizeof(op_vals) / kPointerSize) == descriptor.GetEnvironmentLength());
 
   HInstruction* call = New<HCallWithDescriptor>(
       code_value, argc + 1, descriptor,
-      Vector<HValue*>(op_vals, descriptor->environment_length()));
+      Vector<HValue*>(op_vals, descriptor.GetEnvironmentLength()));
 
   if (drop_extra) Drop(1);  // Drop function.
   ast_context()->ReturnInstruction(call, ast_id);
@@ -8536,7 +8694,7 @@
 
 
 bool HOptimizedGraphBuilder::TryCallApply(Call* expr) {
-  ASSERT(expr->expression()->IsProperty());
+  DCHECK(expr->expression()->IsProperty());
 
   if (!expr->IsMonomorphic()) {
     return false;
@@ -8579,7 +8737,7 @@
   } else {
     // We are inside inlined function and we know exactly what is inside
     // arguments object. But we need to be able to materialize at deopt.
-    ASSERT_EQ(environment()->arguments_environment()->parameter_count(),
+    DCHECK_EQ(environment()->arguments_environment()->parameter_count(),
               function_state()->entry()->arguments_object()->arguments_count());
     HArgumentsObject* args = function_state()->entry()->arguments_object();
     const ZoneList<HValue*>* arguments_values = args->arguments_values();
@@ -8616,9 +8774,8 @@
     // Cannot embed a direct reference to the global proxy
     // as is it dropped on deserialization.
     CHECK(!isolate()->serializer_enabled());
-    Handle<JSObject> global_receiver(
-        target->context()->global_object()->global_receiver());
-    return Add<HConstant>(global_receiver);
+    Handle<JSObject> global_proxy(target->context()->global_proxy());
+    return Add<HConstant>(global_proxy);
   }
   return graph()->GetConstantUndefined();
 }
@@ -8648,7 +8805,7 @@
                                                   HValue* search_element,
                                                   ElementsKind kind,
                                                   ArrayIndexOfMode mode) {
-  ASSERT(IsFastElementsKind(kind));
+  DCHECK(IsFastElementsKind(kind));
 
   NoObservableSideEffectsScope no_effects(this);
 
@@ -8665,7 +8822,7 @@
     token = Token::LT;
     direction = LoopBuilder::kPostIncrement;
   } else {
-    ASSERT_EQ(kLastIndexOf, mode);
+    DCHECK_EQ(kLastIndexOf, mode);
     initial = length;
     terminating = graph()->GetConstant0();
     token = Token::GT;
@@ -8674,6 +8831,12 @@
 
   Push(graph()->GetConstantMinus1());
   if (IsFastDoubleElementsKind(kind) || IsFastSmiElementsKind(kind)) {
+    // Make sure that we can actually compare numbers correctly below, see
+    // https://code.google.com/p/chromium/issues/detail?id=407946 for details.
+    search_element = AddUncasted<HForceRepresentation>(
+        search_element, IsFastSmiElementsKind(kind) ? Representation::Smi()
+                                                    : Representation::Double());
+
     LoopBuilder loop(this, context(), direction);
     {
       HValue* index = loop.BeginBody(initial, terminating, token);
@@ -8681,12 +8844,8 @@
           elements, index, static_cast<HValue*>(NULL),
           kind, ALLOW_RETURN_HOLE);
       IfBuilder if_issame(this);
-      if (IsFastDoubleElementsKind(kind)) {
-        if_issame.If<HCompareNumericAndBranch>(
-            element, search_element, Token::EQ_STRICT);
-      } else {
-        if_issame.If<HCompareObjectEqAndBranch>(element, search_element);
-      }
+      if_issame.If<HCompareNumericAndBranch>(element, search_element,
+                                             Token::EQ_STRICT);
       if_issame.Then();
       {
         Drop(1);
@@ -8824,9 +8983,9 @@
 
 
 void HOptimizedGraphBuilder::VisitCall(Call* expr) {
-  ASSERT(!HasStackOverflow());
-  ASSERT(current_block() != NULL);
-  ASSERT(current_block()->HasPredecessor());
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
   Expression* callee = expr->expression();
   int argument_count = expr->arguments()->length() + 1;  // Plus receiver.
   HInstruction* call = NULL;
@@ -8923,12 +9082,13 @@
       // If there is a global property cell for the name at compile time and
       // access check is not enabled we assume that the function will not change
       // and generate optimized code for calling the function.
-      LookupResult lookup(isolate());
-      GlobalPropertyAccess type = LookupGlobalProperty(var, &lookup, LOAD);
-      if (type == kUseCell &&
-          !current_info()->global_object()->IsAccessCheckNeeded()) {
+      Handle<GlobalObject> global(current_info()->global_object());
+      LookupIterator it(global, var->name(),
+                        LookupIterator::OWN_SKIP_INTERCEPTOR);
+      GlobalPropertyAccess type = LookupGlobalProperty(var, &it, LOAD);
+      if (type == kUseCell) {
         Handle<GlobalObject> global(current_info()->global_object());
-        known_global_function = expr->ComputeGlobalTarget(global, &lookup);
+        known_global_function = expr->ComputeGlobalTarget(global, &it);
       }
       if (known_global_function) {
         Add<HCheckValue>(function, expr->target());
@@ -9005,8 +9165,8 @@
     Expression* expression,
     int argument_count,
     Handle<AllocationSite> site) {
-  ASSERT(!site.is_null());
-  ASSERT(argument_count >= 0 && argument_count <= 1);
+  DCHECK(!site.is_null());
+  DCHECK(argument_count >= 0 && argument_count <= 1);
   NoObservableSideEffectsScope no_effects(this);
 
   // We should at least have the constructor on the expression stack.
@@ -9024,7 +9184,7 @@
     HValue* argument = environment()->Top();
     if (argument->IsConstant()) {
       HConstant* constant_argument = HConstant::cast(argument);
-      ASSERT(constant_argument->HasSmiValue());
+      DCHECK(constant_argument->HasSmiValue());
       int constant_array_size = constant_argument->Integer32Value();
       if (constant_array_size != 0) {
         kind = GetHoleyElementsKind(kind);
@@ -9063,8 +9223,8 @@
   Handle<JSFunction> caller = current_info()->closure();
   Handle<JSFunction> target = array_function();
   // We should have the function plus array arguments on the environment stack.
-  ASSERT(environment()->length() >= (argument_count + 1));
-  ASSERT(!site.is_null());
+  DCHECK(environment()->length() >= (argument_count + 1));
+  DCHECK(!site.is_null());
 
   bool inline_ok = false;
   if (site->CanInlineCall()) {
@@ -9104,9 +9264,9 @@
 
 
 void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
-  ASSERT(!HasStackOverflow());
-  ASSERT(current_block() != NULL);
-  ASSERT(current_block()->HasPredecessor());
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
   if (!FLAG_hydrogen_track_positions) SetSourcePosition(expr->position());
   int argument_count = expr->arguments()->length() + 1;  // Plus constructor.
   Factory* factory = isolate()->factory();
@@ -9130,10 +9290,10 @@
     }
 
     // Calculate instance size from initial map of constructor.
-    ASSERT(constructor->has_initial_map());
+    DCHECK(constructor->has_initial_map());
     Handle<Map> initial_map(constructor->initial_map());
     int instance_size = initial_map->instance_size();
-    ASSERT(initial_map->InitialPropertiesLength() == 0);
+    DCHECK(initial_map->InitialPropertiesLength() == 0);
 
     // Allocate an instance of the implicit receiver object.
     HValue* size_in_bytes = Add<HConstant>(instance_size);
@@ -9156,7 +9316,7 @@
 
     // Initialize map and fields of the newly allocated object.
     { NoObservableSideEffectsScope no_effects(this);
-      ASSERT(initial_map->instance_type() == JS_OBJECT_TYPE);
+      DCHECK(initial_map->instance_type() == JS_OBJECT_TYPE);
       Add<HStoreNamedField>(receiver,
           HObjectAccess::ForMapAndOffset(initial_map, JSObject::kMapOffset),
           Add<HConstant>(initial_map));
@@ -9183,7 +9343,7 @@
     // Replace the constructor function with a newly allocated receiver using
     // the index of the receiver from the top of the expression stack.
     const int receiver_index = argument_count - 1;
-    ASSERT(environment()->ExpressionStackAt(receiver_index) == function);
+    DCHECK(environment()->ExpressionStackAt(receiver_index) == function);
     environment()->SetExpressionStackAt(receiver_index, receiver);
 
     if (TryInlineConstruct(expr, receiver)) {
@@ -9289,7 +9449,7 @@
     CallRuntime* expr) {
   ZoneList<Expression*>* arguments = expr->arguments();
 
-  ASSERT(arguments->length()== 4);
+  DCHECK(arguments->length()== 4);
   CHECK_ALIVE(VisitForValue(arguments->at(0)));
   HValue* obj = Pop();
 
@@ -9433,7 +9593,7 @@
   static const int kByteOffsetArg = 3;
   static const int kByteLengthArg = 4;
   static const int kArgsLength = 5;
-  ASSERT(arguments->length() == kArgsLength);
+  DCHECK(arguments->length() == kArgsLength);
 
 
   CHECK_ALIVE(VisitForValue(arguments->at(kObjectArg)));
@@ -9475,7 +9635,7 @@
     CHECK_ALIVE(VisitForValue(arguments->at(kByteOffsetArg)));
     byte_offset = Pop();
     is_zero_byte_offset = false;
-    ASSERT(buffer != NULL);
+    DCHECK(buffer != NULL);
   }
 
   CHECK_ALIVE(VisitForValue(arguments->at(kByteLengthArg)));
@@ -9523,7 +9683,7 @@
           isolate(), array_type, external_elements_kind);
       AddStoreMapConstant(obj, obj_map);
     } else {
-      ASSERT(is_zero_byte_offset);
+      DCHECK(is_zero_byte_offset);
       elements = BuildAllocateFixedTypedArray(
           array_type, element_size, fixed_elements_kind,
           byte_length, length);
@@ -9549,7 +9709,7 @@
 
 
 void HOptimizedGraphBuilder::GenerateMaxSmi(CallRuntime* expr) {
-  ASSERT(expr->arguments()->length() == 0);
+  DCHECK(expr->arguments()->length() == 0);
   HConstant* max_smi = New<HConstant>(static_cast<int32_t>(Smi::kMaxValue));
   return ast_context()->ReturnInstruction(max_smi, expr->id());
 }
@@ -9557,7 +9717,7 @@
 
 void HOptimizedGraphBuilder::GenerateTypedArrayMaxSizeInHeap(
     CallRuntime* expr) {
-  ASSERT(expr->arguments()->length() == 0);
+  DCHECK(expr->arguments()->length() == 0);
   HConstant* result = New<HConstant>(static_cast<int32_t>(
         FLAG_typed_array_max_size_in_heap));
   return ast_context()->ReturnInstruction(result, expr->id());
@@ -9566,7 +9726,7 @@
 
 void HOptimizedGraphBuilder::GenerateArrayBufferGetByteLength(
     CallRuntime* expr) {
-  ASSERT(expr->arguments()->length() == 1);
+  DCHECK(expr->arguments()->length() == 1);
   CHECK_ALIVE(VisitForValue(expr->arguments()->at(0)));
   HValue* buffer = Pop();
   HInstruction* result = New<HLoadNamedField>(
@@ -9579,7 +9739,7 @@
 
 void HOptimizedGraphBuilder::GenerateArrayBufferViewGetByteLength(
     CallRuntime* expr) {
-  ASSERT(expr->arguments()->length() == 1);
+  DCHECK(expr->arguments()->length() == 1);
   CHECK_ALIVE(VisitForValue(expr->arguments()->at(0)));
   HValue* buffer = Pop();
   HInstruction* result = New<HLoadNamedField>(
@@ -9592,7 +9752,7 @@
 
 void HOptimizedGraphBuilder::GenerateArrayBufferViewGetByteOffset(
     CallRuntime* expr) {
-  ASSERT(expr->arguments()->length() == 1);
+  DCHECK(expr->arguments()->length() == 1);
   CHECK_ALIVE(VisitForValue(expr->arguments()->at(0)));
   HValue* buffer = Pop();
   HInstruction* result = New<HLoadNamedField>(
@@ -9605,7 +9765,7 @@
 
 void HOptimizedGraphBuilder::GenerateTypedArrayGetLength(
     CallRuntime* expr) {
-  ASSERT(expr->arguments()->length() == 1);
+  DCHECK(expr->arguments()->length() == 1);
   CHECK_ALIVE(VisitForValue(expr->arguments()->at(0)));
   HValue* buffer = Pop();
   HInstruction* result = New<HLoadNamedField>(
@@ -9617,32 +9777,32 @@
 
 
 void HOptimizedGraphBuilder::VisitCallRuntime(CallRuntime* expr) {
-  ASSERT(!HasStackOverflow());
-  ASSERT(current_block() != NULL);
-  ASSERT(current_block()->HasPredecessor());
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
   if (expr->is_jsruntime()) {
     return Bailout(kCallToAJavaScriptRuntimeFunction);
   }
 
   const Runtime::Function* function = expr->function();
-  ASSERT(function != NULL);
+  DCHECK(function != NULL);
 
   if (function->intrinsic_type == Runtime::INLINE ||
       function->intrinsic_type == Runtime::INLINE_OPTIMIZED) {
-    ASSERT(expr->name()->length() > 0);
-    ASSERT(expr->name()->Get(0) == '_');
+    DCHECK(expr->name()->length() > 0);
+    DCHECK(expr->name()->Get(0) == '_');
     // Call to an inline function.
     int lookup_index = static_cast<int>(function->function_id) -
         static_cast<int>(Runtime::kFirstInlineFunction);
-    ASSERT(lookup_index >= 0);
-    ASSERT(static_cast<size_t>(lookup_index) <
-           ARRAY_SIZE(kInlineFunctionGenerators));
+    DCHECK(lookup_index >= 0);
+    DCHECK(static_cast<size_t>(lookup_index) <
+           arraysize(kInlineFunctionGenerators));
     InlineFunctionGenerator generator = kInlineFunctionGenerators[lookup_index];
 
     // Call the inline code generator using the pointer-to-member.
     (this->*generator)(expr);
   } else {
-    ASSERT(function->intrinsic_type == Runtime::RUNTIME);
+    DCHECK(function->intrinsic_type == Runtime::RUNTIME);
     Handle<String> name = expr->name();
     int argument_count = expr->arguments()->length();
     CHECK_ALIVE(VisitExpressions(expr->arguments()));
@@ -9655,9 +9815,9 @@
 
 
 void HOptimizedGraphBuilder::VisitUnaryOperation(UnaryOperation* expr) {
-  ASSERT(!HasStackOverflow());
-  ASSERT(current_block() != NULL);
-  ASSERT(current_block()->HasPredecessor());
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
   switch (expr->op()) {
     case Token::DELETE: return VisitDelete(expr);
     case Token::VOID: return VisitVoid(expr);
@@ -9734,7 +9894,7 @@
     return;
   }
 
-  ASSERT(ast_context()->IsValue());
+  DCHECK(ast_context()->IsValue());
   HBasicBlock* materialize_false = graph()->CreateBasicBlock();
   HBasicBlock* materialize_true = graph()->CreateBasicBlock();
   CHECK_BAILOUT(VisitForControl(expr->expression(),
@@ -9820,9 +9980,9 @@
 
 
 void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
-  ASSERT(!HasStackOverflow());
-  ASSERT(current_block() != NULL);
-  ASSERT(current_block()->HasPredecessor());
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
   if (!FLAG_hydrogen_track_positions) SetSourcePosition(expr->position());
   Expression* target = expr->expression();
   VariableProxy* proxy = target->AsVariableProxy();
@@ -9845,7 +10005,7 @@
       return Bailout(kUnsupportedCountOperationWithConst);
     }
     // Argument of the count operation is a variable, not a property.
-    ASSERT(prop == NULL);
+    DCHECK(prop == NULL);
     CHECK_ALIVE(VisitForValue(target));
 
     after = BuildIncrement(returns_original_input, expr);
@@ -9900,15 +10060,14 @@
   }
 
   // Argument of the count operation is a property.
-  ASSERT(prop != NULL);
+  DCHECK(prop != NULL);
   if (returns_original_input) Push(graph()->GetConstantUndefined());
 
   CHECK_ALIVE(VisitForValue(prop->obj()));
   HValue* object = Top();
 
   HValue* key = NULL;
-  if ((!prop->IsFunctionPrototype() && !prop->key()->IsPropertyName()) ||
-      prop->IsStringAccess()) {
+  if (!prop->key()->IsPropertyName() || prop->IsStringAccess()) {
     CHECK_ALIVE(VisitForValue(prop->key()));
     key = Top();
   }
@@ -9942,7 +10101,7 @@
       int32_t i = c_index->NumberValueAsInteger32();
       Handle<String> s = c_string->StringValue();
       if (i < 0 || i >= s->length()) {
-        return New<HConstant>(OS::nan_value());
+        return New<HConstant>(base::OS::nan_value());
       }
       return New<HConstant>(s->Get(i));
     }
@@ -10051,7 +10210,7 @@
   // We expect to get a number.
   // (We need to check first, since Type::None->Is(Type::Any()) == true.
   if (expected_obj->Is(Type::None())) {
-    ASSERT(!expected_number->Is(Type::None(zone())));
+    DCHECK(!expected_number->Is(Type::None(zone())));
     return value;
   }
 
@@ -10115,9 +10274,11 @@
 
   bool maybe_string_add = op == Token::ADD &&
                           (left_type->Maybe(Type::String()) ||
-                           right_type->Maybe(Type::String()));
+                           left_type->Maybe(Type::Receiver()) ||
+                           right_type->Maybe(Type::String()) ||
+                           right_type->Maybe(Type::Receiver()));
 
-  if (left_type->Is(Type::None())) {
+  if (!left_type->IsInhabited()) {
     Add<HDeoptimize>("Insufficient type feedback for LHS of binary operation",
                      Deoptimizer::SOFT);
     // TODO(rossberg): we should be able to get rid of non-continuous
@@ -10128,7 +10289,7 @@
     left_rep = Representation::FromType(left_type);
   }
 
-  if (right_type->Is(Type::None())) {
+  if (!right_type->IsInhabited()) {
     Add<HDeoptimize>("Insufficient type feedback for RHS of binary operation",
                      Deoptimizer::SOFT);
     right_type = Type::Any(zone());
@@ -10152,10 +10313,10 @@
 
     // Convert left argument as necessary.
     if (left_type->Is(Type::Number())) {
-      ASSERT(right_type->Is(Type::String()));
+      DCHECK(right_type->Is(Type::String()));
       left = BuildNumberToString(left, left_type);
     } else if (!left_type->Is(Type::String())) {
-      ASSERT(right_type->Is(Type::String()));
+      DCHECK(right_type->Is(Type::String()));
       HValue* function = AddLoadJSBuiltin(Builtins::STRING_ADD_RIGHT);
       Add<HPushArguments>(left, right);
       return AddUncasted<HInvokeFunction>(function, 2);
@@ -10163,10 +10324,10 @@
 
     // Convert right argument as necessary.
     if (right_type->Is(Type::Number())) {
-      ASSERT(left_type->Is(Type::String()));
+      DCHECK(left_type->Is(Type::String()));
       right = BuildNumberToString(right, right_type);
     } else if (!right_type->Is(Type::String())) {
-      ASSERT(left_type->Is(Type::String()));
+      DCHECK(left_type->Is(Type::String()));
       HValue* function = AddLoadJSBuiltin(Builtins::STRING_ADD_LEFT);
       Add<HPushArguments>(left, right);
       return AddUncasted<HInvokeFunction>(function, 2);
@@ -10186,7 +10347,7 @@
 
     // Register the dependent code with the allocation site.
     if (!allocation_mode.feedback_site().is_null()) {
-      ASSERT(!graph()->info()->IsStub());
+      DCHECK(!graph()->info()->IsStub());
       Handle<AllocationSite> site(allocation_mode.feedback_site());
       AllocationSite::AddDependentCompilationInfo(
           site, AllocationSite::TENURING, top_info());
@@ -10324,18 +10485,18 @@
   Literal* literal = expr->right()->AsLiteral();
   if (literal == NULL) return false;
   if (!literal->value()->IsString()) return false;
-  if (!call->name()->IsOneByteEqualTo(STATIC_ASCII_VECTOR("_ClassOf"))) {
+  if (!call->name()->IsOneByteEqualTo(STATIC_CHAR_VECTOR("_ClassOf"))) {
     return false;
   }
-  ASSERT(call->arguments()->length() == 1);
+  DCHECK(call->arguments()->length() == 1);
   return true;
 }
 
 
 void HOptimizedGraphBuilder::VisitBinaryOperation(BinaryOperation* expr) {
-  ASSERT(!HasStackOverflow());
-  ASSERT(current_block() != NULL);
-  ASSERT(current_block()->HasPredecessor());
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
   switch (expr->op()) {
     case Token::COMMA:
       return VisitComma(expr);
@@ -10382,7 +10543,7 @@
 
   } else if (ast_context()->IsValue()) {
     CHECK_ALIVE(VisitForValue(expr->left()));
-    ASSERT(current_block() != NULL);
+    DCHECK(current_block() != NULL);
     HValue* left_value = Top();
 
     // Short-circuit left values that always evaluate to the same boolean value.
@@ -10417,7 +10578,7 @@
     return ast_context()->ReturnValue(Pop());
 
   } else {
-    ASSERT(ast_context()->IsEffect());
+    DCHECK(ast_context()->IsEffect());
     // In an effect context, we don't need the value of the left subexpression,
     // only its control flow and side effects.  We need an extra block to
     // maintain edge-split form.
@@ -10503,9 +10664,9 @@
 
 
 void HOptimizedGraphBuilder::VisitCompareOperation(CompareOperation* expr) {
-  ASSERT(!HasStackOverflow());
-  ASSERT(current_block() != NULL);
-  ASSERT(current_block()->HasPredecessor());
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
 
   if (!FLAG_hydrogen_track_positions) SetSourcePosition(expr->position());
 
@@ -10526,7 +10687,7 @@
 
   if (IsClassOfTest(expr)) {
     CallRuntime* call = expr->left()->AsCallRuntime();
-    ASSERT(call->arguments()->length() == 1);
+    DCHECK(call->arguments()->length() == 1);
     CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
     HValue* value = Pop();
     Literal* literal = expr->right()->AsLiteral();
@@ -10561,15 +10722,13 @@
     Handle<JSFunction> target = Handle<JSFunction>::null();
     VariableProxy* proxy = expr->right()->AsVariableProxy();
     bool global_function = (proxy != NULL) && proxy->var()->IsUnallocated();
-    if (global_function &&
-        current_info()->has_global_object() &&
-        !current_info()->global_object()->IsAccessCheckNeeded()) {
+    if (global_function && current_info()->has_global_object()) {
       Handle<String> name = proxy->name();
       Handle<GlobalObject> global(current_info()->global_object());
-      LookupResult lookup(isolate());
-      global->Lookup(name, &lookup);
-      if (lookup.IsNormal() && lookup.GetValue()->IsJSFunction()) {
-        Handle<JSFunction> candidate(JSFunction::cast(lookup.GetValue()));
+      LookupIterator it(global, name, LookupIterator::OWN_SKIP_INTERCEPTOR);
+      Handle<Object> value = JSObject::GetDataProperty(&it);
+      if (it.IsFound() && value->IsJSFunction()) {
+        Handle<JSFunction> candidate = Handle<JSFunction>::cast(value);
         // If the function is in new space we assume it's more likely to
         // change and thus prefer the general IC code.
         if (!isolate()->heap()->InNewSpace(*candidate)) {
@@ -10627,7 +10786,7 @@
     BailoutId bailout_id) {
   // Cases handled below depend on collected type feedback. They should
   // soft deoptimize when there is no type feedback.
-  if (combined_type->Is(Type::None())) {
+  if (!combined_type->IsInhabited()) {
     Add<HDeoptimize>("Insufficient type feedback for combined type "
                      "of binary operation",
                      Deoptimizer::SOFT);
@@ -10737,10 +10896,10 @@
 void HOptimizedGraphBuilder::HandleLiteralCompareNil(CompareOperation* expr,
                                                      Expression* sub_expr,
                                                      NilValue nil) {
-  ASSERT(!HasStackOverflow());
-  ASSERT(current_block() != NULL);
-  ASSERT(current_block()->HasPredecessor());
-  ASSERT(expr->op() == Token::EQ || expr->op() == Token::EQ_STRICT);
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
+  DCHECK(expr->op() == Token::EQ || expr->op() == Token::EQ_STRICT);
   if (!FLAG_hydrogen_track_positions) SetSourcePosition(expr->position());
   CHECK_ALIVE(VisitForValue(sub_expr));
   HValue* value = Pop();
@@ -10752,7 +10911,7 @@
         New<HCompareObjectEqAndBranch>(value, nil_constant);
     return ast_context()->ReturnControl(instr, expr->id());
   } else {
-    ASSERT_EQ(Token::EQ, expr->op());
+    DCHECK_EQ(Token::EQ, expr->op());
     Type* type = expr->combined_type()->Is(Type::None())
         ? Type::Any(zone()) : expr->combined_type();
     HIfContinuation continuation;
@@ -10779,7 +10938,7 @@
     AllocationSiteUsageContext* site_context) {
   NoObservableSideEffectsScope no_effects(this);
   InstanceType instance_type = boilerplate_object->map()->instance_type();
-  ASSERT(instance_type == JS_ARRAY_TYPE || instance_type == JS_OBJECT_TYPE);
+  DCHECK(instance_type == JS_ARRAY_TYPE || instance_type == JS_OBJECT_TYPE);
 
   HType type = instance_type == JS_ARRAY_TYPE
       ? HType::JSArray() : HType::JSObject();
@@ -10843,7 +11002,8 @@
   }
 
   // Copy in-object properties.
-  if (boilerplate_object->map()->NumberOfFields() != 0) {
+  if (boilerplate_object->map()->NumberOfFields() != 0 ||
+      boilerplate_object->map()->unused_property_fields() > 0) {
     BuildEmitInObjectProperties(boilerplate_object, object, site_context,
                                 pretenure_flag);
   }
@@ -10854,14 +11014,14 @@
 void HOptimizedGraphBuilder::BuildEmitObjectHeader(
     Handle<JSObject> boilerplate_object,
     HInstruction* object) {
-  ASSERT(boilerplate_object->properties()->length() == 0);
+  DCHECK(boilerplate_object->properties()->length() == 0);
 
   Handle<Map> boilerplate_object_map(boilerplate_object->map());
   AddStoreMapConstant(object, boilerplate_object_map);
 
   Handle<Object> properties_field =
       Handle<Object>(boilerplate_object->properties(), isolate());
-  ASSERT(*properties_field == isolate()->heap()->empty_fixed_array());
+  DCHECK(*properties_field == isolate()->heap()->empty_fixed_array());
   HInstruction* properties = Add<HConstant>(properties_field);
   HObjectAccess access = HObjectAccess::ForPropertiesPointer();
   Add<HStoreNamedField>(object, access, properties);
@@ -10873,7 +11033,7 @@
         Handle<Object>(boilerplate_array->length(), isolate());
     HInstruction* length = Add<HConstant>(length_field);
 
-    ASSERT(boilerplate_array->length()->IsSmi());
+    DCHECK(boilerplate_array->length()->IsSmi());
     Add<HStoreNamedField>(object, HObjectAccess::ForArrayLength(
         boilerplate_array->GetElementsKind()), length);
   }
@@ -10884,7 +11044,7 @@
     Handle<JSObject> boilerplate_object,
     HInstruction* object,
     HInstruction* object_elements) {
-  ASSERT(boilerplate_object->properties()->length() == 0);
+  DCHECK(boilerplate_object->properties()->length() == 0);
   if (object_elements == NULL) {
     Handle<Object> elements_field =
         Handle<Object>(boilerplate_object->elements(), isolate());
@@ -10941,11 +11101,14 @@
         // 2) we can just use the mode of the parent object for pretenuring
         HInstruction* double_box =
             Add<HAllocate>(heap_number_constant, HType::HeapObject(),
-                pretenure_flag, HEAP_NUMBER_TYPE);
+                pretenure_flag, MUTABLE_HEAP_NUMBER_TYPE);
         AddStoreMapConstant(double_box,
-            isolate()->factory()->heap_number_map());
-        Add<HStoreNamedField>(double_box, HObjectAccess::ForHeapNumberValue(),
-                              Add<HConstant>(value));
+            isolate()->factory()->mutable_heap_number_map());
+        // Unwrap the mutable heap number from the boilerplate.
+        HValue* double_value =
+            Add<HConstant>(Handle<HeapNumber>::cast(value)->value());
+        Add<HStoreNamedField>(
+            double_box, HObjectAccess::ForHeapNumberValue(), double_value);
         value_instruction = double_box;
       } else if (representation.IsSmi()) {
         value_instruction = value->IsUninitialized()
@@ -10965,7 +11128,7 @@
   HInstruction* value_instruction =
       Add<HConstant>(isolate()->factory()->one_pointer_filler_map());
   for (int i = copied_fields; i < inobject_properties; i++) {
-    ASSERT(boilerplate_object->IsJSObject());
+    DCHECK(boilerplate_object->IsJSObject());
     int property_offset = boilerplate_object->GetInObjectPropertyOffset(i);
     HObjectAccess access =
         HObjectAccess::ForMapAndOffset(boilerplate_map, property_offset);
@@ -11045,17 +11208,25 @@
 
 
 void HOptimizedGraphBuilder::VisitThisFunction(ThisFunction* expr) {
-  ASSERT(!HasStackOverflow());
-  ASSERT(current_block() != NULL);
-  ASSERT(current_block()->HasPredecessor());
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
   HInstruction* instr = BuildThisFunction();
   return ast_context()->ReturnInstruction(instr, expr->id());
 }
 
 
+void HOptimizedGraphBuilder::VisitSuperReference(SuperReference* expr) {
+  DCHECK(!HasStackOverflow());
+  DCHECK(current_block() != NULL);
+  DCHECK(current_block()->HasPredecessor());
+  return Bailout(kSuperReference);
+}
+
+
 void HOptimizedGraphBuilder::VisitDeclarations(
     ZoneList<Declaration*>* declarations) {
-  ASSERT(globals_.is_empty());
+  DCHECK(globals_.is_empty());
   AstVisitor::VisitDeclarations(declarations);
   if (!globals_.is_empty()) {
     Handle<FixedArray> array =
@@ -11115,7 +11286,7 @@
     case Variable::UNALLOCATED: {
       globals_.Add(variable->name(), zone());
       Handle<SharedFunctionInfo> function = Compiler::BuildFunctionInfo(
-          declaration->fun(), current_info()->script());
+          declaration->fun(), current_info()->script(), top_info());
       // Check for stack-overflow exception.
       if (function.is_null()) return SetStackOverflow();
       globals_.Add(function, zone());
@@ -11191,7 +11362,7 @@
 // Generators for inline runtime functions.
 // Support for types.
 void HOptimizedGraphBuilder::GenerateIsSmi(CallRuntime* call) {
-  ASSERT(call->arguments()->length() == 1);
+  DCHECK(call->arguments()->length() == 1);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
   HIsSmiAndBranch* result = New<HIsSmiAndBranch>(value);
@@ -11200,7 +11371,7 @@
 
 
 void HOptimizedGraphBuilder::GenerateIsSpecObject(CallRuntime* call) {
-  ASSERT(call->arguments()->length() == 1);
+  DCHECK(call->arguments()->length() == 1);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
   HHasInstanceTypeAndBranch* result =
@@ -11212,7 +11383,7 @@
 
 
 void HOptimizedGraphBuilder::GenerateIsFunction(CallRuntime* call) {
-  ASSERT(call->arguments()->length() == 1);
+  DCHECK(call->arguments()->length() == 1);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
   HHasInstanceTypeAndBranch* result =
@@ -11222,7 +11393,7 @@
 
 
 void HOptimizedGraphBuilder::GenerateIsMinusZero(CallRuntime* call) {
-  ASSERT(call->arguments()->length() == 1);
+  DCHECK(call->arguments()->length() == 1);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
   HCompareMinusZeroAndBranch* result = New<HCompareMinusZeroAndBranch>(value);
@@ -11231,7 +11402,7 @@
 
 
 void HOptimizedGraphBuilder::GenerateHasCachedArrayIndex(CallRuntime* call) {
-  ASSERT(call->arguments()->length() == 1);
+  DCHECK(call->arguments()->length() == 1);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
   HHasCachedArrayIndexAndBranch* result =
@@ -11241,7 +11412,7 @@
 
 
 void HOptimizedGraphBuilder::GenerateIsArray(CallRuntime* call) {
-  ASSERT(call->arguments()->length() == 1);
+  DCHECK(call->arguments()->length() == 1);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
   HHasInstanceTypeAndBranch* result =
@@ -11251,7 +11422,7 @@
 
 
 void HOptimizedGraphBuilder::GenerateIsRegExp(CallRuntime* call) {
-  ASSERT(call->arguments()->length() == 1);
+  DCHECK(call->arguments()->length() == 1);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
   HHasInstanceTypeAndBranch* result =
@@ -11261,7 +11432,7 @@
 
 
 void HOptimizedGraphBuilder::GenerateIsObject(CallRuntime* call) {
-  ASSERT(call->arguments()->length() == 1);
+  DCHECK(call->arguments()->length() == 1);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
   HIsObjectAndBranch* result = New<HIsObjectAndBranch>(value);
@@ -11275,7 +11446,7 @@
 
 
 void HOptimizedGraphBuilder::GenerateIsUndetectableObject(CallRuntime* call) {
-  ASSERT(call->arguments()->length() == 1);
+  DCHECK(call->arguments()->length() == 1);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
   HIsUndetectableAndBranch* result = New<HIsUndetectableAndBranch>(value);
@@ -11291,7 +11462,7 @@
 
 // Support for construct call checks.
 void HOptimizedGraphBuilder::GenerateIsConstructCall(CallRuntime* call) {
-  ASSERT(call->arguments()->length() == 0);
+  DCHECK(call->arguments()->length() == 0);
   if (function_state()->outer() != NULL) {
     // We are generating graph for inlined function.
     HValue* value = function_state()->inlining_kind() == CONSTRUCT_CALL_RETURN
@@ -11307,30 +11478,42 @@
 
 // Support for arguments.length and arguments[?].
 void HOptimizedGraphBuilder::GenerateArgumentsLength(CallRuntime* call) {
-  // Our implementation of arguments (based on this stack frame or an
-  // adapter below it) does not work for inlined functions.  This runtime
-  // function is blacklisted by AstNode::IsInlineable.
-  ASSERT(function_state()->outer() == NULL);
-  ASSERT(call->arguments()->length() == 0);
-  HInstruction* elements = Add<HArgumentsElements>(false);
-  HArgumentsLength* result = New<HArgumentsLength>(elements);
+  DCHECK(call->arguments()->length() == 0);
+  HInstruction* result = NULL;
+  if (function_state()->outer() == NULL) {
+    HInstruction* elements = Add<HArgumentsElements>(false);
+    result = New<HArgumentsLength>(elements);
+  } else {
+    // Number of arguments without receiver.
+    int argument_count = environment()->
+        arguments_environment()->parameter_count() - 1;
+    result = New<HConstant>(argument_count);
+  }
   return ast_context()->ReturnInstruction(result, call->id());
 }
 
 
 void HOptimizedGraphBuilder::GenerateArguments(CallRuntime* call) {
-  // Our implementation of arguments (based on this stack frame or an
-  // adapter below it) does not work for inlined functions.  This runtime
-  // function is blacklisted by AstNode::IsInlineable.
-  ASSERT(function_state()->outer() == NULL);
-  ASSERT(call->arguments()->length() == 1);
+  DCHECK(call->arguments()->length() == 1);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* index = Pop();
-  HInstruction* elements = Add<HArgumentsElements>(false);
-  HInstruction* length = Add<HArgumentsLength>(elements);
-  HInstruction* checked_index = Add<HBoundsCheck>(index, length);
-  HAccessArgumentsAt* result = New<HAccessArgumentsAt>(
-      elements, length, checked_index);
+  HInstruction* result = NULL;
+  if (function_state()->outer() == NULL) {
+    HInstruction* elements = Add<HArgumentsElements>(false);
+    HInstruction* length = Add<HArgumentsLength>(elements);
+    HInstruction* checked_index = Add<HBoundsCheck>(index, length);
+    result = New<HAccessArgumentsAt>(elements, length, checked_index);
+  } else {
+    EnsureArgumentsArePushedForAccess();
+
+    // Number of arguments without receiver.
+    HInstruction* elements = function_state()->arguments_elements();
+    int argument_count = environment()->
+        arguments_environment()->parameter_count() - 1;
+    HInstruction* length = Add<HConstant>(argument_count);
+    HInstruction* checked_key = Add<HBoundsCheck>(index, length);
+    result = New<HAccessArgumentsAt>(elements, length, checked_key);
+  }
   return ast_context()->ReturnInstruction(result, call->id());
 }
 
@@ -11344,7 +11527,7 @@
 
 
 void HOptimizedGraphBuilder::GenerateValueOf(CallRuntime* call) {
-  ASSERT(call->arguments()->length() == 1);
+  DCHECK(call->arguments()->length() == 1);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* object = Pop();
 
@@ -11372,8 +11555,8 @@
 
 
 void HOptimizedGraphBuilder::GenerateDateField(CallRuntime* call) {
-  ASSERT(call->arguments()->length() == 2);
-  ASSERT_NE(NULL, call->arguments()->at(1)->AsLiteral());
+  DCHECK(call->arguments()->length() == 2);
+  DCHECK_NE(NULL, call->arguments()->at(1)->AsLiteral());
   Smi* index = Smi::cast(*(call->arguments()->at(1)->AsLiteral()->value()));
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* date = Pop();
@@ -11384,11 +11567,10 @@
 
 void HOptimizedGraphBuilder::GenerateOneByteSeqStringSetChar(
     CallRuntime* call) {
-  ASSERT(call->arguments()->length() == 3);
-  // We need to follow the evaluation order of full codegen.
+  DCHECK(call->arguments()->length() == 3);
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
   CHECK_ALIVE(VisitForValue(call->arguments()->at(2)));
-  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* string = Pop();
   HValue* value = Pop();
   HValue* index = Pop();
@@ -11401,11 +11583,10 @@
 
 void HOptimizedGraphBuilder::GenerateTwoByteSeqStringSetChar(
     CallRuntime* call) {
-  ASSERT(call->arguments()->length() == 3);
-  // We need to follow the evaluation order of full codegen.
+  DCHECK(call->arguments()->length() == 3);
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
   CHECK_ALIVE(VisitForValue(call->arguments()->at(2)));
-  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* string = Pop();
   HValue* value = Pop();
   HValue* index = Pop();
@@ -11417,7 +11598,7 @@
 
 
 void HOptimizedGraphBuilder::GenerateSetValueOf(CallRuntime* call) {
-  ASSERT(call->arguments()->length() == 2);
+  DCHECK(call->arguments()->length() == 2);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
   HValue* value = Pop();
@@ -11455,7 +11636,7 @@
 
 // Fast support for charCodeAt(n).
 void HOptimizedGraphBuilder::GenerateStringCharCodeAt(CallRuntime* call) {
-  ASSERT(call->arguments()->length() == 2);
+  DCHECK(call->arguments()->length() == 2);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
   HValue* index = Pop();
@@ -11467,7 +11648,7 @@
 
 // Fast support for string.charAt(n) and string[n].
 void HOptimizedGraphBuilder::GenerateStringCharFromCode(CallRuntime* call) {
-  ASSERT(call->arguments()->length() == 1);
+  DCHECK(call->arguments()->length() == 1);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* char_code = Pop();
   HInstruction* result = NewUncasted<HStringCharFromCode>(char_code);
@@ -11477,7 +11658,7 @@
 
 // Fast support for string.charAt(n) and string[n].
 void HOptimizedGraphBuilder::GenerateStringCharAt(CallRuntime* call) {
-  ASSERT(call->arguments()->length() == 2);
+  DCHECK(call->arguments()->length() == 2);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
   HValue* index = Pop();
@@ -11491,7 +11672,7 @@
 
 // Fast support for object equality testing.
 void HOptimizedGraphBuilder::GenerateObjectEquals(CallRuntime* call) {
-  ASSERT(call->arguments()->length() == 2);
+  DCHECK(call->arguments()->length() == 2);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
   HValue* right = Pop();
@@ -11504,7 +11685,7 @@
 
 // Fast support for StringAdd.
 void HOptimizedGraphBuilder::GenerateStringAdd(CallRuntime* call) {
-  ASSERT_EQ(2, call->arguments()->length());
+  DCHECK_EQ(2, call->arguments()->length());
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
   HValue* right = Pop();
@@ -11516,7 +11697,7 @@
 
 // Fast support for SubString.
 void HOptimizedGraphBuilder::GenerateSubString(CallRuntime* call) {
-  ASSERT_EQ(3, call->arguments()->length());
+  DCHECK_EQ(3, call->arguments()->length());
   CHECK_ALIVE(VisitExpressions(call->arguments()));
   PushArgumentsFromEnvironment(call->arguments()->length());
   HCallStub* result = New<HCallStub>(CodeStub::SubString, 3);
@@ -11526,7 +11707,7 @@
 
 // Fast support for StringCompare.
 void HOptimizedGraphBuilder::GenerateStringCompare(CallRuntime* call) {
-  ASSERT_EQ(2, call->arguments()->length());
+  DCHECK_EQ(2, call->arguments()->length());
   CHECK_ALIVE(VisitExpressions(call->arguments()));
   PushArgumentsFromEnvironment(call->arguments()->length());
   HCallStub* result = New<HCallStub>(CodeStub::StringCompare, 2);
@@ -11536,7 +11717,7 @@
 
 // Support for direct calls from JavaScript to native RegExp code.
 void HOptimizedGraphBuilder::GenerateRegExpExec(CallRuntime* call) {
-  ASSERT_EQ(4, call->arguments()->length());
+  DCHECK_EQ(4, call->arguments()->length());
   CHECK_ALIVE(VisitExpressions(call->arguments()));
   PushArgumentsFromEnvironment(call->arguments()->length());
   HCallStub* result = New<HCallStub>(CodeStub::RegExpExec, 4);
@@ -11545,7 +11726,7 @@
 
 
 void HOptimizedGraphBuilder::GenerateDoubleLo(CallRuntime* call) {
-  ASSERT_EQ(1, call->arguments()->length());
+  DCHECK_EQ(1, call->arguments()->length());
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
   HInstruction* result = NewUncasted<HDoubleBits>(value, HDoubleBits::LOW);
@@ -11554,7 +11735,7 @@
 
 
 void HOptimizedGraphBuilder::GenerateDoubleHi(CallRuntime* call) {
-  ASSERT_EQ(1, call->arguments()->length());
+  DCHECK_EQ(1, call->arguments()->length());
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
   HInstruction* result = NewUncasted<HDoubleBits>(value, HDoubleBits::HIGH);
@@ -11563,7 +11744,7 @@
 
 
 void HOptimizedGraphBuilder::GenerateConstructDouble(CallRuntime* call) {
-  ASSERT_EQ(2, call->arguments()->length());
+  DCHECK_EQ(2, call->arguments()->length());
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
   HValue* lo = Pop();
@@ -11575,7 +11756,7 @@
 
 // Construct a RegExp exec result with two in-object properties.
 void HOptimizedGraphBuilder::GenerateRegExpConstructResult(CallRuntime* call) {
-  ASSERT_EQ(3, call->arguments()->length());
+  DCHECK_EQ(3, call->arguments()->length());
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
   CHECK_ALIVE(VisitForValue(call->arguments()->at(2)));
@@ -11595,7 +11776,7 @@
 
 // Fast support for number to string.
 void HOptimizedGraphBuilder::GenerateNumberToString(CallRuntime* call) {
-  ASSERT_EQ(1, call->arguments()->length());
+  DCHECK_EQ(1, call->arguments()->length());
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* number = Pop();
   HValue* result = BuildNumberToString(number, Type::Any(zone()));
@@ -11607,7 +11788,7 @@
 void HOptimizedGraphBuilder::GenerateCallFunction(CallRuntime* call) {
   // 1 ~ The function to call is not itself an argument to the call.
   int arg_count = call->arguments()->length() - 1;
-  ASSERT(arg_count >= 1);  // There's always at least a receiver.
+  DCHECK(arg_count >= 1);  // There's always at least a receiver.
 
   CHECK_ALIVE(VisitExpressions(call->arguments()));
   // The function is the last argument
@@ -11651,7 +11832,7 @@
 
 // Fast call to math functions.
 void HOptimizedGraphBuilder::GenerateMathPow(CallRuntime* call) {
-  ASSERT_EQ(2, call->arguments()->length());
+  DCHECK_EQ(2, call->arguments()->length());
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
   HValue* right = Pop();
@@ -11662,7 +11843,7 @@
 
 
 void HOptimizedGraphBuilder::GenerateMathLogRT(CallRuntime* call) {
-  ASSERT(call->arguments()->length() == 1);
+  DCHECK(call->arguments()->length() == 1);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
   HInstruction* result = NewUncasted<HUnaryMathOperation>(value, kMathLog);
@@ -11671,7 +11852,7 @@
 
 
 void HOptimizedGraphBuilder::GenerateMathSqrtRT(CallRuntime* call) {
-  ASSERT(call->arguments()->length() == 1);
+  DCHECK(call->arguments()->length() == 1);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
   HInstruction* result = NewUncasted<HUnaryMathOperation>(value, kMathSqrt);
@@ -11680,7 +11861,7 @@
 
 
 void HOptimizedGraphBuilder::GenerateGetCachedArrayIndex(CallRuntime* call) {
-  ASSERT(call->arguments()->length() == 1);
+  DCHECK(call->arguments()->length() == 1);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
   HValue* value = Pop();
   HGetCachedArrayIndex* result = New<HGetCachedArrayIndex>(value);
@@ -11688,8 +11869,8 @@
 }
 
 
-void HOptimizedGraphBuilder::GenerateFastAsciiArrayJoin(CallRuntime* call) {
-  return Bailout(kInlinedRuntimeFunctionFastAsciiArrayJoin);
+void HOptimizedGraphBuilder::GenerateFastOneByteArrayJoin(CallRuntime* call) {
+  return Bailout(kInlinedRuntimeFunctionFastOneByteArrayJoin);
 }
 
 
@@ -11711,11 +11892,13 @@
 }
 
 
-void HOptimizedGraphBuilder::GenerateDebugCallbackSupportsStepping(
-    CallRuntime* call) {
-  ASSERT(call->arguments()->length() == 1);
-  // Debugging is not supported in optimized code.
-  return ast_context()->ReturnValue(graph()->GetConstantFalse());
+void HOptimizedGraphBuilder::GenerateDebugIsActive(CallRuntime* call) {
+  DCHECK(call->arguments()->length() == 0);
+  HValue* ref =
+      Add<HConstant>(ExternalReference::debug_is_active_address(isolate()));
+  HValue* value = Add<HLoadNamedField>(
+      ref, static_cast<HValue*>(NULL), HObjectAccess::ForExternalUInteger8());
+  return ast_context()->ReturnValue(value);
 }
 
 
@@ -11827,8 +12010,8 @@
 
 
 void HEnvironment::AddIncomingEdge(HBasicBlock* block, HEnvironment* other) {
-  ASSERT(!block->IsLoopHeader());
-  ASSERT(values_.length() == other->values_.length());
+  DCHECK(!block->IsLoopHeader());
+  DCHECK(values_.length() == other->values_.length());
 
   int length = values_.length();
   for (int i = 0; i < length; ++i) {
@@ -11837,12 +12020,12 @@
       // There is already a phi for the i'th value.
       HPhi* phi = HPhi::cast(value);
       // Assert index is correct and that we haven't missed an incoming edge.
-      ASSERT(phi->merged_index() == i || !phi->HasMergedIndex());
-      ASSERT(phi->OperandCount() == block->predecessors()->length());
+      DCHECK(phi->merged_index() == i || !phi->HasMergedIndex());
+      DCHECK(phi->OperandCount() == block->predecessors()->length());
       phi->AddInput(other->values_[i]);
     } else if (values_[i] != other->values_[i]) {
       // There is a fresh value on the incoming edge, a phi is needed.
-      ASSERT(values_[i] != NULL && other->values_[i] != NULL);
+      DCHECK(values_[i] != NULL && other->values_[i] != NULL);
       HPhi* phi = block->AddNewPhi(i);
       HValue* old_value = values_[i];
       for (int j = 0; j < block->predecessors()->length(); j++) {
@@ -11856,7 +12039,7 @@
 
 
 void HEnvironment::Bind(int index, HValue* value) {
-  ASSERT(value != NULL);
+  DCHECK(value != NULL);
   assigned_variables_.Add(index, zone());
   values_[index] = value;
 }
@@ -11868,7 +12051,7 @@
 
 
 bool HEnvironment::ExpressionStackIsEmpty() const {
-  ASSERT(length() >= first_expression_index());
+  DCHECK(length() >= first_expression_index());
   return length() == first_expression_index();
 }
 
@@ -11876,7 +12059,7 @@
 void HEnvironment::SetExpressionStackAt(int index_from_top, HValue* value) {
   int count = index_from_top + 1;
   int index = values_.length() - count;
-  ASSERT(HasExpressionAt(index));
+  DCHECK(HasExpressionAt(index));
   // The push count must include at least the element in question or else
   // the new value will not be included in this environment's history.
   if (push_count_ < count) {
@@ -11940,7 +12123,7 @@
     FunctionLiteral* function,
     HConstant* undefined,
     InliningKind inlining_kind) const {
-  ASSERT(frame_type() == JS_FUNCTION);
+  DCHECK(frame_type() == JS_FUNCTION);
 
   // Outer environment is a copy of this one without the arguments.
   int arity = function->scope()->num_parameters();
@@ -11987,32 +12170,24 @@
 }
 
 
-void HEnvironment::PrintTo(StringStream* stream) {
-  for (int i = 0; i < length(); i++) {
-    if (i == 0) stream->Add("parameters\n");
-    if (i == parameter_count()) stream->Add("specials\n");
-    if (i == parameter_count() + specials_count()) stream->Add("locals\n");
-    if (i == parameter_count() + specials_count() + local_count()) {
-      stream->Add("expressions\n");
+OStream& operator<<(OStream& os, const HEnvironment& env) {
+  for (int i = 0; i < env.length(); i++) {
+    if (i == 0) os << "parameters\n";
+    if (i == env.parameter_count()) os << "specials\n";
+    if (i == env.parameter_count() + env.specials_count()) os << "locals\n";
+    if (i == env.parameter_count() + env.specials_count() + env.local_count()) {
+      os << "expressions\n";
     }
-    HValue* val = values_.at(i);
-    stream->Add("%d: ", i);
+    HValue* val = env.values()->at(i);
+    os << i << ": ";
     if (val != NULL) {
-      val->PrintNameTo(stream);
+      os << val;
     } else {
-      stream->Add("NULL");
+      os << "NULL";
     }
-    stream->Add("\n");
+    os << "\n";
   }
-  PrintF("\n");
-}
-
-
-void HEnvironment::PrintToStd() {
-  HeapStringAllocator string_allocator;
-  StringStream trace(&string_allocator);
-  PrintTo(&trace);
-  PrintF("%s", trace.ToCString().get());
+  return os << "\n";
 }
 
 
@@ -12030,12 +12205,13 @@
     PrintStringProperty("name", CodeStub::MajorName(major_key, false));
     PrintStringProperty("method", "stub");
   }
-  PrintLongProperty("date", static_cast<int64_t>(OS::TimeCurrentMillis()));
+  PrintLongProperty("date",
+                    static_cast<int64_t>(base::OS::TimeCurrentMillis()));
 }
 
 
 void HTracer::TraceLithium(const char* name, LChunk* chunk) {
-  ASSERT(!chunk->isolate()->concurrent_recompilation_enabled());
+  DCHECK(!chunk->isolate()->concurrent_recompilation_enabled());
   AllowHandleDereference allow_deref;
   AllowDeferredHandleDereference allow_deferred_deref;
   Trace(name, chunk->graph(), chunk);
@@ -12043,7 +12219,7 @@
 
 
 void HTracer::TraceHydrogen(const char* name, HGraph* graph) {
-  ASSERT(!graph->isolate()->concurrent_recompilation_enabled());
+  DCHECK(!graph->isolate()->concurrent_recompilation_enabled());
   AllowHandleDereference allow_deref;
   AllowDeferredHandleDereference allow_deferred_deref;
   Trace(name, graph, NULL);
@@ -12126,11 +12302,9 @@
       for (int j = 0; j < total; ++j) {
         HPhi* phi = current->phis()->at(j);
         PrintIndent();
-        trace_.Add("%d ", phi->merged_index());
-        phi->PrintNameTo(&trace_);
-        trace_.Add(" ");
-        phi->PrintTo(&trace_);
-        trace_.Add("\n");
+        OStringStream os;
+        os << phi->merged_index() << " " << NameOf(phi) << " " << *phi << "\n";
+        trace_.Add(os.c_str());
       }
     }
 
@@ -12140,21 +12314,18 @@
         HInstruction* instruction = it.Current();
         int uses = instruction->UseCount();
         PrintIndent();
-        trace_.Add("0 %d ", uses);
-        instruction->PrintNameTo(&trace_);
-        trace_.Add(" ");
-        instruction->PrintTo(&trace_);
+        OStringStream os;
+        os << "0 " << uses << " " << NameOf(instruction) << " " << *instruction;
         if (FLAG_hydrogen_track_positions &&
             instruction->has_position() &&
             instruction->position().raw() != 0) {
           const HSourcePosition pos = instruction->position();
-          trace_.Add(" pos:");
-          if (pos.inlining_id() != 0) {
-            trace_.Add("%d_", pos.inlining_id());
-          }
-          trace_.Add("%d", pos.position());
+          os << " pos:";
+          if (pos.inlining_id() != 0) os << pos.inlining_id() << "_";
+          os << pos.position();
         }
-        trace_.Add(" <|@\n");
+        os << " <|@\n";
+        trace_.Add(os.c_str());
       }
     }
 
@@ -12172,10 +12343,9 @@
             trace_.Add("%d ",
                        LifetimePosition::FromInstructionIndex(i).Value());
             linstr->PrintTo(&trace_);
-            trace_.Add(" [hir:");
-            linstr->hydrogen_value()->PrintNameTo(&trace_);
-            trace_.Add("]");
-            trace_.Add(" <|@\n");
+            OStringStream os;
+            os << " [hir:" << NameOf(linstr->hydrogen_value()) << "] <|@\n";
+            trace_.Add(os.c_str());
           }
         }
       }
@@ -12217,7 +12387,7 @@
         trace_.Add(" \"%s\"",
                    DoubleRegister::AllocationIndexToString(assigned_reg));
       } else {
-        ASSERT(op->IsRegister());
+        DCHECK(op->IsRegister());
         trace_.Add(" \"%s\"", Register::AllocationIndexToString(assigned_reg));
       }
     } else if (range->IsSpilled()) {
@@ -12225,7 +12395,7 @@
       if (op->IsDoubleStackSlot()) {
         trace_.Add(" \"double_stack:%d\"", op->index());
       } else {
-        ASSERT(op->IsStackSlot());
+        DCHECK(op->IsStackSlot());
         trace_.Add(" \"stack:%d\"", op->index());
       }
     }
@@ -12275,15 +12445,22 @@
 }
 
 
-void HStatistics::Print() {
-  PrintF("Timing results:\n");
-  TimeDelta sum;
+void HStatistics::Print(const char* stats_name) {
+  PrintF(
+      "\n"
+      "----------------------------------------"
+      "----------------------------------------\n"
+      "--- %s timing results:\n"
+      "----------------------------------------"
+      "----------------------------------------\n",
+      stats_name);
+  base::TimeDelta sum;
   for (int i = 0; i < times_.length(); ++i) {
     sum += times_[i];
   }
 
   for (int i = 0; i < names_.length(); ++i) {
-    PrintF("%32s", names_[i]);
+    PrintF("%33s", names_[i]);
     double ms = times_[i].InMillisecondsF();
     double percent = times_[i].PercentOf(sum);
     PrintF(" %8.3f ms / %4.1f %% ", ms, percent);
@@ -12293,26 +12470,22 @@
     PrintF(" %9u bytes / %4.1f %%\n", size, size_percent);
   }
 
-  PrintF("----------------------------------------"
-         "---------------------------------------\n");
-  TimeDelta total = create_graph_ + optimize_graph_ + generate_code_;
-  PrintF("%32s %8.3f ms / %4.1f %% \n",
-         "Create graph",
-         create_graph_.InMillisecondsF(),
-         create_graph_.PercentOf(total));
-  PrintF("%32s %8.3f ms / %4.1f %% \n",
-         "Optimize graph",
-         optimize_graph_.InMillisecondsF(),
-         optimize_graph_.PercentOf(total));
-  PrintF("%32s %8.3f ms / %4.1f %% \n",
-         "Generate and install code",
-         generate_code_.InMillisecondsF(),
-         generate_code_.PercentOf(total));
-  PrintF("----------------------------------------"
-         "---------------------------------------\n");
-  PrintF("%32s %8.3f ms (%.1f times slower than full code gen)\n",
-         "Total",
-         total.InMillisecondsF(),
+  PrintF(
+      "----------------------------------------"
+      "----------------------------------------\n");
+  base::TimeDelta total = create_graph_ + optimize_graph_ + generate_code_;
+  PrintF("%33s %8.3f ms / %4.1f %% \n", "Create graph",
+         create_graph_.InMillisecondsF(), create_graph_.PercentOf(total));
+  PrintF("%33s %8.3f ms / %4.1f %% \n", "Optimize graph",
+         optimize_graph_.InMillisecondsF(), optimize_graph_.PercentOf(total));
+  PrintF("%33s %8.3f ms / %4.1f %% \n", "Generate and install code",
+         generate_code_.InMillisecondsF(), generate_code_.PercentOf(total));
+  PrintF(
+      "----------------------------------------"
+      "----------------------------------------\n");
+  PrintF("%33s %8.3f ms           %9u bytes\n", "Total",
+         total.InMillisecondsF(), total_size_);
+  PrintF("%33s     (%.1f times slower than full code gen)\n", "",
          total.TimesOf(full_code_gen_));
 
   double source_size_in_kb = static_cast<double>(source_size_) / 1024;
@@ -12322,13 +12495,13 @@
   double normalized_size_in_kb = source_size_in_kb > 0
       ? total_size_ / 1024 / source_size_in_kb
       : 0;
-  PrintF("%32s %8.3f ms           %7.3f kB allocated\n",
-         "Average per kB source",
-         normalized_time, normalized_size_in_kb);
+  PrintF("%33s %8.3f ms           %7.3f kB allocated\n",
+         "Average per kB source", normalized_time, normalized_size_in_kb);
 }
 
 
-void HStatistics::SaveTiming(const char* name, TimeDelta time, unsigned size) {
+void HStatistics::SaveTiming(const char* name, base::TimeDelta time,
+                             unsigned size) {
   total_size_ += size;
   for (int i = 0; i < names_.length(); ++i) {
     if (strcmp(names_[i], name) == 0) {
diff --git a/src/hydrogen.h b/src/hydrogen.h
index 5df1d65..d507643 100644
--- a/src/hydrogen.h
+++ b/src/hydrogen.h
@@ -10,10 +10,11 @@
 #include "src/accessors.h"
 #include "src/allocation.h"
 #include "src/ast.h"
+#include "src/bailout-reason.h"
 #include "src/compiler.h"
 #include "src/hydrogen-instructions.h"
-#include "src/zone.h"
 #include "src/scopes.h"
+#include "src/zone.h"
 
 namespace v8 {
 namespace internal {
@@ -31,7 +32,7 @@
 class LiveRange;
 
 
-class HBasicBlock V8_FINAL : public ZoneObject {
+class HBasicBlock FINAL : public ZoneObject {
  public:
   explicit HBasicBlock(HGraph* graph);
   ~HBasicBlock() { }
@@ -94,8 +95,8 @@
 
   void SetInitialEnvironment(HEnvironment* env);
   void ClearEnvironment() {
-    ASSERT(IsFinished());
-    ASSERT(end()->SuccessorCount() == 0);
+    DCHECK(IsFinished());
+    DCHECK(end()->SuccessorCount() == 0);
     last_environment_ = NULL;
   }
   bool HasEnvironment() const { return last_environment_ != NULL; }
@@ -103,7 +104,7 @@
   HBasicBlock* parent_loop_header() const { return parent_loop_header_; }
 
   void set_parent_loop_header(HBasicBlock* block) {
-    ASSERT(parent_loop_header_ == NULL);
+    DCHECK(parent_loop_header_ == NULL);
     parent_loop_header_ = block;
   }
 
@@ -214,7 +215,10 @@
 };
 
 
-class HPredecessorIterator V8_FINAL BASE_EMBEDDED {
+OStream& operator<<(OStream& os, const HBasicBlock& b);
+
+
+class HPredecessorIterator FINAL BASE_EMBEDDED {
  public:
   explicit HPredecessorIterator(HBasicBlock* block)
       : predecessor_list_(block->predecessors()), current_(0) { }
@@ -229,7 +233,7 @@
 };
 
 
-class HInstructionIterator V8_FINAL BASE_EMBEDDED {
+class HInstructionIterator FINAL BASE_EMBEDDED {
  public:
   explicit HInstructionIterator(HBasicBlock* block)
       : instr_(block->first()) {
@@ -249,7 +253,7 @@
 };
 
 
-class HLoopInformation V8_FINAL : public ZoneObject {
+class HLoopInformation FINAL : public ZoneObject {
  public:
   HLoopInformation(HBasicBlock* loop_header, Zone* zone)
       : back_edges_(4, zone),
@@ -297,7 +301,7 @@
 
 class BoundsCheckTable;
 class InductionVariableBlocksTable;
-class HGraph V8_FINAL : public ZoneObject {
+class HGraph FINAL : public ZoneObject {
  public:
   explicit HGraph(CompilationInfo* info);
 
@@ -358,7 +362,7 @@
   int GetMaximumValueID() const { return values_.length(); }
   int GetNextBlockID() { return next_block_id_++; }
   int GetNextValueID(HValue* value) {
-    ASSERT(!disallow_adding_new_values_);
+    DCHECK(!disallow_adding_new_values_);
     values_.Add(value, zone());
     return values_.length() - 1;
   }
@@ -433,17 +437,17 @@
   }
 
   bool has_uint32_instructions() {
-    ASSERT(uint32_instructions_ == NULL || !uint32_instructions_->is_empty());
+    DCHECK(uint32_instructions_ == NULL || !uint32_instructions_->is_empty());
     return uint32_instructions_ != NULL;
   }
 
   ZoneList<HInstruction*>* uint32_instructions() {
-    ASSERT(uint32_instructions_ == NULL || !uint32_instructions_->is_empty());
+    DCHECK(uint32_instructions_ == NULL || !uint32_instructions_->is_empty());
     return uint32_instructions_;
   }
 
   void RecordUint32Instruction(HInstruction* instr) {
-    ASSERT(uint32_instructions_ == NULL || !uint32_instructions_->is_empty());
+    DCHECK(uint32_instructions_ == NULL || !uint32_instructions_->is_empty());
     if (uint32_instructions_ == NULL) {
       uint32_instructions_ = new(zone()) ZoneList<HInstruction*>(4, zone());
     }
@@ -544,7 +548,7 @@
 };
 
 
-class HEnvironment V8_FINAL : public ZoneObject {
+class HEnvironment FINAL : public ZoneObject {
  public:
   HEnvironment(HEnvironment* outer,
                Scope* scope,
@@ -603,7 +607,7 @@
 
   HValue* Lookup(int index) const {
     HValue* result = values_[index];
-    ASSERT(result != NULL);
+    DCHECK(result != NULL);
     return result;
   }
 
@@ -613,13 +617,13 @@
   }
 
   void Push(HValue* value) {
-    ASSERT(value != NULL);
+    DCHECK(value != NULL);
     ++push_count_;
     values_.Add(value, zone());
   }
 
   HValue* Pop() {
-    ASSERT(!ExpressionStackIsEmpty());
+    DCHECK(!ExpressionStackIsEmpty());
     if (push_count_ > 0) {
       --push_count_;
     } else {
@@ -636,7 +640,7 @@
 
   HValue* ExpressionStackAt(int index_from_top) const {
     int index = length() - index_from_top - 1;
-    ASSERT(HasExpressionAt(index));
+    DCHECK(HasExpressionAt(index));
     return values_[index];
   }
 
@@ -671,7 +675,7 @@
   }
 
   void SetValueAt(int index, HValue* value) {
-    ASSERT(index < length());
+    DCHECK(index < length());
     values_[index] = value;
   }
 
@@ -679,7 +683,7 @@
   // by 1 (receiver is parameter index -1 but environment index 0).
   // Stack-allocated local indices are shifted by the number of parameters.
   int IndexFor(Variable* variable) const {
-    ASSERT(variable->IsStackAllocated());
+    DCHECK(variable->IsStackAllocated());
     int shift = variable->IsParameter()
         ? 1
         : parameter_count_ + specials_count_;
@@ -698,9 +702,6 @@
     return i >= parameter_count() && i < parameter_count() + specials_count();
   }
 
-  void PrintTo(StringStream* stream);
-  void PrintToStd();
-
   Zone* zone() const { return zone_; }
 
  private:
@@ -742,6 +743,9 @@
 };
 
 
+OStream& operator<<(OStream& os, const HEnvironment& env);
+
+
 class HOptimizedGraphBuilder;
 
 enum ArgumentsAllowedFlag {
@@ -809,37 +813,37 @@
 };
 
 
-class EffectContext V8_FINAL : public AstContext {
+class EffectContext FINAL : public AstContext {
  public:
   explicit EffectContext(HOptimizedGraphBuilder* owner)
       : AstContext(owner, Expression::kEffect) {
   }
   virtual ~EffectContext();
 
-  virtual void ReturnValue(HValue* value) V8_OVERRIDE;
+  virtual void ReturnValue(HValue* value) OVERRIDE;
   virtual void ReturnInstruction(HInstruction* instr,
-                                 BailoutId ast_id) V8_OVERRIDE;
+                                 BailoutId ast_id) OVERRIDE;
   virtual void ReturnControl(HControlInstruction* instr,
-                             BailoutId ast_id) V8_OVERRIDE;
+                             BailoutId ast_id) OVERRIDE;
   virtual void ReturnContinuation(HIfContinuation* continuation,
-                                  BailoutId ast_id) V8_OVERRIDE;
+                                  BailoutId ast_id) OVERRIDE;
 };
 
 
-class ValueContext V8_FINAL : public AstContext {
+class ValueContext FINAL : public AstContext {
  public:
   ValueContext(HOptimizedGraphBuilder* owner, ArgumentsAllowedFlag flag)
       : AstContext(owner, Expression::kValue), flag_(flag) {
   }
   virtual ~ValueContext();
 
-  virtual void ReturnValue(HValue* value) V8_OVERRIDE;
+  virtual void ReturnValue(HValue* value) OVERRIDE;
   virtual void ReturnInstruction(HInstruction* instr,
-                                 BailoutId ast_id) V8_OVERRIDE;
+                                 BailoutId ast_id) OVERRIDE;
   virtual void ReturnControl(HControlInstruction* instr,
-                             BailoutId ast_id) V8_OVERRIDE;
+                             BailoutId ast_id) OVERRIDE;
   virtual void ReturnContinuation(HIfContinuation* continuation,
-                                  BailoutId ast_id) V8_OVERRIDE;
+                                  BailoutId ast_id) OVERRIDE;
 
   bool arguments_allowed() { return flag_ == ARGUMENTS_ALLOWED; }
 
@@ -848,7 +852,7 @@
 };
 
 
-class TestContext V8_FINAL : public AstContext {
+class TestContext FINAL : public AstContext {
  public:
   TestContext(HOptimizedGraphBuilder* owner,
               Expression* condition,
@@ -860,16 +864,16 @@
         if_false_(if_false) {
   }
 
-  virtual void ReturnValue(HValue* value) V8_OVERRIDE;
+  virtual void ReturnValue(HValue* value) OVERRIDE;
   virtual void ReturnInstruction(HInstruction* instr,
-                                 BailoutId ast_id) V8_OVERRIDE;
+                                 BailoutId ast_id) OVERRIDE;
   virtual void ReturnControl(HControlInstruction* instr,
-                             BailoutId ast_id) V8_OVERRIDE;
+                             BailoutId ast_id) OVERRIDE;
   virtual void ReturnContinuation(HIfContinuation* continuation,
-                                  BailoutId ast_id) V8_OVERRIDE;
+                                  BailoutId ast_id) OVERRIDE;
 
   static TestContext* cast(AstContext* context) {
-    ASSERT(context->IsTest());
+    DCHECK(context->IsTest());
     return reinterpret_cast<TestContext*>(context);
   }
 
@@ -888,7 +892,7 @@
 };
 
 
-class FunctionState V8_FINAL {
+class FunctionState FINAL {
  public:
   FunctionState(HOptimizedGraphBuilder* owner,
                 CompilationInfo* info,
@@ -961,7 +965,7 @@
 };
 
 
-class HIfContinuation V8_FINAL {
+class HIfContinuation FINAL {
  public:
   HIfContinuation()
     : continuation_captured_(false),
@@ -971,11 +975,11 @@
                   HBasicBlock* false_branch)
       : continuation_captured_(true), true_branch_(true_branch),
         false_branch_(false_branch) {}
-  ~HIfContinuation() { ASSERT(!continuation_captured_); }
+  ~HIfContinuation() { DCHECK(!continuation_captured_); }
 
   void Capture(HBasicBlock* true_branch,
                HBasicBlock* false_branch) {
-    ASSERT(!continuation_captured_);
+    DCHECK(!continuation_captured_);
     true_branch_ = true_branch;
     false_branch_ = false_branch;
     continuation_captured_ = true;
@@ -983,7 +987,7 @@
 
   void Continue(HBasicBlock** true_branch,
                 HBasicBlock** false_branch) {
-    ASSERT(continuation_captured_);
+    DCHECK(continuation_captured_);
     *true_branch = true_branch_;
     *false_branch = false_branch_;
     continuation_captured_ = false;
@@ -1005,7 +1009,7 @@
 };
 
 
-class HAllocationMode V8_FINAL BASE_EMBEDDED {
+class HAllocationMode FINAL BASE_EMBEDDED {
  public:
   explicit HAllocationMode(Handle<AllocationSite> feedback_site)
       : current_site_(NULL), feedback_site_(feedback_site),
@@ -1020,11 +1024,11 @@
   HValue* current_site() const { return current_site_; }
   Handle<AllocationSite> feedback_site() const { return feedback_site_; }
 
-  bool CreateAllocationMementos() const V8_WARN_UNUSED_RESULT {
+  bool CreateAllocationMementos() const WARN_UNUSED_RESULT {
     return current_site() != NULL;
   }
 
-  PretenureFlag GetPretenureMode() const V8_WARN_UNUSED_RESULT {
+  PretenureFlag GetPretenureMode() const WARN_UNUSED_RESULT {
     if (!feedback_site().is_null()) return feedback_site()->GetPretenureMode();
     return pretenure_flag_;
   }
@@ -1124,7 +1128,7 @@
     HInstruction* result = AddInstruction(NewUncasted<I>(p1));
     // Specializations must have their parameters properly casted
     // to avoid landing here.
-    ASSERT(!result->IsReturn() && !result->IsSimulate() &&
+    DCHECK(!result->IsReturn() && !result->IsSimulate() &&
            !result->IsDeoptimize());
     return result;
   }
@@ -1134,7 +1138,7 @@
     I* result = AddInstructionTyped(New<I>(p1));
     // Specializations must have their parameters properly casted
     // to avoid landing here.
-    ASSERT(!result->IsReturn() && !result->IsSimulate() &&
+    DCHECK(!result->IsReturn() && !result->IsSimulate() &&
            !result->IsDeoptimize());
     return result;
   }
@@ -1154,7 +1158,7 @@
     HInstruction* result = AddInstruction(NewUncasted<I>(p1, p2));
     // Specializations must have their parameters properly casted
     // to avoid landing here.
-    ASSERT(!result->IsSimulate());
+    DCHECK(!result->IsSimulate());
     return result;
   }
 
@@ -1163,7 +1167,7 @@
     I* result = AddInstructionTyped(New<I>(p1, p2));
     // Specializations must have their parameters properly casted
     // to avoid landing here.
-    ASSERT(!result->IsSimulate());
+    DCHECK(!result->IsSimulate());
     return result;
   }
 
@@ -1311,10 +1315,11 @@
 
   template <class BitFieldClass>
   HValue* BuildDecodeField(HValue* encoded_field) {
-    HValue* shifted_field = AddUncasted<HShr>(encoded_field,
-        Add<HConstant>(static_cast<int>(BitFieldClass::kShift)));
     HValue* mask_value = Add<HConstant>(static_cast<int>(BitFieldClass::kMask));
-    return AddUncasted<HBitwise>(Token::BIT_AND, shifted_field, mask_value);
+    HValue* masked_field =
+        AddUncasted<HBitwise>(Token::BIT_AND, encoded_field, mask_value);
+    return AddUncasted<HShr>(masked_field,
+        Add<HConstant>(static_cast<int>(BitFieldClass::kShift)));
   }
 
   HValue* BuildGetElementsKind(HValue* object);
@@ -1469,8 +1474,11 @@
 
   void AddIncrementCounter(StatsCounter* counter);
 
-  class IfBuilder V8_FINAL {
+  class IfBuilder FINAL {
    public:
+    // If using this constructor, Initialize() must be called explicitly!
+    IfBuilder();
+
     explicit IfBuilder(HGraphBuilder* builder);
     IfBuilder(HGraphBuilder* builder,
               HIfContinuation* continuation);
@@ -1479,6 +1487,8 @@
       if (!finished_) End();
     }
 
+    void Initialize(HGraphBuilder* builder);
+
     template<class Condition>
     Condition* If(HValue *p) {
       Condition* compare = builder()->New<Condition>(p);
@@ -1621,9 +1631,14 @@
     void Return(HValue* value);
 
    private:
+    void InitializeDontCreateBlocks(HGraphBuilder* builder);
+
     HControlInstruction* AddCompare(HControlInstruction* compare);
 
-    HGraphBuilder* builder() const { return builder_; }
+    HGraphBuilder* builder() const {
+      DCHECK(builder_ != NULL);  // Have you called "Initialize"?
+      return builder_;
+    }
 
     void AddMergeAtJoinBlock(bool deopt);
 
@@ -1662,15 +1677,17 @@
     int deopt_merge_at_join_block_count_;
   };
 
-  class LoopBuilder V8_FINAL {
+  class LoopBuilder FINAL {
    public:
     enum Direction {
       kPreIncrement,
       kPostIncrement,
       kPreDecrement,
-      kPostDecrement
+      kPostDecrement,
+      kWhileTrue
     };
 
+    explicit LoopBuilder(HGraphBuilder* builder);  // while (true) {...}
     LoopBuilder(HGraphBuilder* builder,
                 HValue* context,
                 Direction direction);
@@ -1680,7 +1697,7 @@
                 HValue* increment_amount);
 
     ~LoopBuilder() {
-      ASSERT(finished_);
+      DCHECK(finished_);
     }
 
     HValue* BeginBody(
@@ -1688,11 +1705,15 @@
         HValue* terminating,
         Token::Value token);
 
+    void BeginBody(int drop_count);
+
     void Break();
 
     void EndBody();
 
    private:
+    void Initialize(HGraphBuilder* builder, HValue* context,
+                    Direction direction, HValue* increment_amount);
     Zone* zone() { return builder_->zone(); }
 
     HGraphBuilder* builder_;
@@ -1708,30 +1729,9 @@
     bool finished_;
   };
 
-  template <class A, class P1>
-  void DeoptimizeIf(P1 p1, char* const reason) {
-    IfBuilder builder(this);
-    builder.If<A>(p1);
-    builder.ThenDeopt(reason);
-  }
-
-  template <class A, class P1, class P2>
-  void DeoptimizeIf(P1 p1, P2 p2, const char* reason) {
-    IfBuilder builder(this);
-    builder.If<A>(p1, p2);
-    builder.ThenDeopt(reason);
-  }
-
-  template <class A, class P1, class P2, class P3>
-  void DeoptimizeIf(P1 p1, P2 p2, P3 p3, const char* reason) {
-    IfBuilder builder(this);
-    builder.If<A>(p1, p2, p3);
-    builder.ThenDeopt(reason);
-  }
-
   HValue* BuildNewElementsCapacity(HValue* old_capacity);
 
-  class JSArrayBuilder V8_FINAL {
+  class JSArrayBuilder FINAL {
    public:
     JSArrayBuilder(HGraphBuilder* builder,
                    ElementsKind kind,
@@ -1880,7 +1880,7 @@
 
  protected:
   void SetSourcePosition(int position) {
-    ASSERT(position != RelocInfo::kNoPosition);
+    DCHECK(position != RelocInfo::kNoPosition);
     position_.set_position(position - start_position_);
   }
 
@@ -1914,13 +1914,6 @@
  private:
   HGraphBuilder();
 
-  HValue* BuildUncheckedDictionaryElementLoadHelper(
-      HValue* elements,
-      HValue* key,
-      HValue* hash,
-      HValue* mask,
-      int current_probe);
-
   template <class I>
   I* AddInstructionTyped(I* instr) {
     return I::cast(AddInstruction(instr));
@@ -2054,7 +2047,7 @@
   // A class encapsulating (lazily-allocated) break and continue blocks for
   // a breakable statement.  Separated from BreakAndContinueScope so that it
   // can have a separate lifetime.
-  class BreakAndContinueInfo V8_FINAL BASE_EMBEDDED {
+  class BreakAndContinueInfo FINAL BASE_EMBEDDED {
    public:
     explicit BreakAndContinueInfo(BreakableStatement* target,
                                   Scope* scope,
@@ -2084,7 +2077,7 @@
 
   // A helper class to maintain a stack of current BreakAndContinueInfo
   // structures mirroring BreakableStatement nesting.
-  class BreakAndContinueScope V8_FINAL BASE_EMBEDDED {
+  class BreakAndContinueScope FINAL BASE_EMBEDDED {
    public:
     BreakAndContinueScope(BreakAndContinueInfo* info,
                           HOptimizedGraphBuilder* owner)
@@ -2111,14 +2104,12 @@
 
   explicit HOptimizedGraphBuilder(CompilationInfo* info);
 
-  virtual bool BuildGraph() V8_OVERRIDE;
+  virtual bool BuildGraph() OVERRIDE;
 
   // Simple accessors.
   BreakAndContinueScope* break_scope() const { return break_scope_; }
   void set_break_scope(BreakAndContinueScope* head) { break_scope_ = head; }
 
-  bool inline_bailout() { return inline_bailout_; }
-
   HValue* context() { return environment()->context(); }
 
   HOsrBuilder* osr() const { return osr_; }
@@ -2300,13 +2291,13 @@
   void PushArgumentsFromEnvironment(int count);
 
   void SetUpScope(Scope* scope);
-  virtual void VisitStatements(ZoneList<Statement*>* statements) V8_OVERRIDE;
+  virtual void VisitStatements(ZoneList<Statement*>* statements) OVERRIDE;
 
-#define DECLARE_VISIT(type) virtual void Visit##type(type* node) V8_OVERRIDE;
+#define DECLARE_VISIT(type) virtual void Visit##type(type* node) OVERRIDE;
   AST_NODE_LIST(DECLARE_VISIT)
 #undef DECLARE_VISIT
 
-  Type* ToType(Handle<Map> map) { return IC::MapToType<Type>(map, zone()); }
+  Type* ToType(Handle<Map> map);
 
  private:
   // Helpers for flow graph construction.
@@ -2314,8 +2305,7 @@
     kUseCell,
     kUseGeneric
   };
-  GlobalPropertyAccess LookupGlobalProperty(Variable* var,
-                                            LookupResult* lookup,
+  GlobalPropertyAccess LookupGlobalProperty(Variable* var, LookupIterator* it,
                                             PropertyAccessType access_type);
 
   void EnsureArgumentsArePushedForAccess();
@@ -2402,6 +2392,7 @@
   void HandlePropertyAssignment(Assignment* expr);
   void HandleCompoundAssignment(Assignment* expr);
   void HandlePolymorphicNamedFieldAccess(PropertyAccessType access_type,
+                                         Expression* expr,
                                          BailoutId ast_id,
                                          BailoutId return_id,
                                          HValue* object,
@@ -2453,23 +2444,7 @@
     // PropertyAccessInfo is built for types->first().
     bool CanAccessAsMonomorphic(SmallMapList* types);
 
-    Handle<Map> map() {
-      if (type_->Is(Type::Number())) {
-        Context* context = current_info()->closure()->context();
-        context = context->native_context();
-        return handle(context->number_function()->initial_map());
-      } else if (type_->Is(Type::Boolean())) {
-        Context* context = current_info()->closure()->context();
-        context = context->native_context();
-        return handle(context->boolean_function()->initial_map());
-      } else if (type_->Is(Type::String())) {
-        Context* context = current_info()->closure()->context();
-        context = context->native_context();
-        return handle(context->string_function()->initial_map());
-      } else {
-        return type_->AsClass()->Map();
-      }
-    }
+    Handle<Map> map();
     Type* type() const { return type_; }
     Handle<String> name() const { return name_; }
 
@@ -2482,10 +2457,10 @@
       int offset;
       if (Accessors::IsJSObjectFieldAccessor<Type>(type_, name_, &offset)) {
         if (type_->Is(Type::String())) {
-          ASSERT(String::Equals(isolate()->factory()->length_string(), name_));
+          DCHECK(String::Equals(isolate()->factory()->length_string(), name_));
           *access = HObjectAccess::ForStringLength();
         } else if (type_->Is(Type::Array())) {
-          ASSERT(String::Equals(isolate()->factory()->length_string(), name_));
+          DCHECK(String::Equals(isolate()->factory()->length_string(), name_));
           *access = HObjectAccess::ForArrayLength(map()->elements_kind());
         } else {
           *access = HObjectAccess::ForMapAndOffset(map(), offset);
@@ -2498,7 +2473,6 @@
     bool has_holder() { return !holder_.is_null(); }
     bool IsLoad() const { return access_type_ == LOAD; }
 
-    LookupResult* lookup() { return &lookup_; }
     Handle<JSObject> holder() { return holder_; }
     Handle<JSFunction> accessor() { return accessor_; }
     Handle<Object> constant() { return constant_; }
@@ -2507,10 +2481,37 @@
     HType field_type() const { return field_type_; }
     HObjectAccess access() { return access_; }
 
+    bool IsFound() const { return lookup_.IsFound(); }
+    bool IsProperty() const { return lookup_.IsProperty(); }
+    bool IsField() const { return lookup_.IsField(); }
+    bool IsConstant() const { return lookup_.IsConstant(); }
+    bool IsAccessor() const { return lookup_.IsPropertyCallbacks(); }
+    bool IsTransition() const { return lookup_.IsTransition(); }
+
+    bool IsConfigurable() const { return lookup_.IsConfigurable(); }
+    bool IsReadOnly() const { return lookup_.IsReadOnly(); }
+
    private:
+    Handle<Object> GetAccessorsFromMap(Handle<Map> map) const {
+      return handle(lookup_.GetValueFromMap(*map), isolate());
+    }
+    Handle<Object> GetConstantFromMap(Handle<Map> map) const {
+      return handle(lookup_.GetConstantFromMap(*map), isolate());
+    }
+    Handle<HeapType> GetFieldTypeFromMap(Handle<Map> map) const {
+      return handle(lookup_.GetFieldTypeFromMap(*map), isolate());
+    }
+    Handle<Map> GetFieldOwnerFromMap(Handle<Map> map) const {
+      return handle(lookup_.GetFieldOwnerFromMap(*map));
+    }
+    int GetLocalFieldIndexFromMap(Handle<Map> map) const {
+      return lookup_.GetLocalFieldIndexFromMap(*map);
+    }
+    Representation representation() const { return lookup_.representation(); }
+
     Type* ToType(Handle<Map> map) { return builder_->ToType(map); }
     Zone* zone() { return builder_->zone(); }
-    Isolate* isolate() { return lookup_.isolate(); }
+    Isolate* isolate() const { return lookup_.isolate(); }
     CompilationInfo* top_info() { return builder_->top_info(); }
     CompilationInfo* current_info() { return builder_->current_info(); }
 
@@ -2595,6 +2596,7 @@
   HInstruction* BuildIncrement(bool returns_original_input,
                                CountOperation* expr);
   HInstruction* BuildKeyedGeneric(PropertyAccessType access_type,
+                                  Expression* expr,
                                   HValue* object,
                                   HValue* key,
                                   HValue* value);
@@ -2614,7 +2616,8 @@
                                               PropertyAccessType access_type,
                                               KeyedAccessStoreMode store_mode);
 
-  HValue* HandlePolymorphicElementAccess(HValue* object,
+  HValue* HandlePolymorphicElementAccess(Expression* expr,
+                                         HValue* object,
                                          HValue* key,
                                          HValue* val,
                                          SmallMapList* maps,
@@ -2622,14 +2625,14 @@
                                          KeyedAccessStoreMode store_mode,
                                          bool* has_side_effects);
 
-  HValue* HandleKeyedElementAccess(HValue* obj,
-                                   HValue* key,
-                                   HValue* val,
-                                   Expression* expr,
+  HValue* HandleKeyedElementAccess(HValue* obj, HValue* key, HValue* val,
+                                   Expression* expr, BailoutId ast_id,
+                                   BailoutId return_id,
                                    PropertyAccessType access_type,
                                    bool* has_side_effects);
 
   HInstruction* BuildNamedGeneric(PropertyAccessType access,
+                                  Expression* expr,
                                   HValue* object,
                                   Handle<String> name,
                                   HValue* value,
@@ -2742,7 +2745,7 @@
 Zone* AstContext::zone() const { return owner_->zone(); }
 
 
-class HStatistics V8_FINAL: public Malloced {
+class HStatistics FINAL: public Malloced {
  public:
   HStatistics()
       : times_(5),
@@ -2752,30 +2755,38 @@
         source_size_(0) { }
 
   void Initialize(CompilationInfo* info);
-  void Print();
-  void SaveTiming(const char* name, TimeDelta time, unsigned size);
+  void Print(const char* stats_name);
+  void SaveTiming(const char* name, base::TimeDelta time, unsigned size);
 
-  void IncrementFullCodeGen(TimeDelta full_code_gen) {
+  void IncrementFullCodeGen(base::TimeDelta full_code_gen) {
     full_code_gen_ += full_code_gen;
   }
 
-  void IncrementSubtotals(TimeDelta create_graph,
-                          TimeDelta optimize_graph,
-                          TimeDelta generate_code) {
-    create_graph_ += create_graph;
-    optimize_graph_ += optimize_graph;
-    generate_code_ += generate_code;
+  void IncrementCreateGraph(base::TimeDelta delta) { create_graph_ += delta; }
+
+  void IncrementOptimizeGraph(base::TimeDelta delta) {
+    optimize_graph_ += delta;
+  }
+
+  void IncrementGenerateCode(base::TimeDelta delta) { generate_code_ += delta; }
+
+  void IncrementSubtotals(base::TimeDelta create_graph,
+                          base::TimeDelta optimize_graph,
+                          base::TimeDelta generate_code) {
+    IncrementCreateGraph(create_graph);
+    IncrementOptimizeGraph(optimize_graph);
+    IncrementGenerateCode(generate_code);
   }
 
  private:
-  List<TimeDelta> times_;
+  List<base::TimeDelta> times_;
   List<const char*> names_;
   List<unsigned> sizes_;
-  TimeDelta create_graph_;
-  TimeDelta optimize_graph_;
-  TimeDelta generate_code_;
+  base::TimeDelta create_graph_;
+  base::TimeDelta optimize_graph_;
+  base::TimeDelta generate_code_;
   unsigned total_size_;
-  TimeDelta full_code_gen_;
+  base::TimeDelta full_code_gen_;
   double source_size_;
 };
 
@@ -2797,14 +2808,14 @@
 };
 
 
-class HTracer V8_FINAL : public Malloced {
+class HTracer FINAL : public Malloced {
  public:
   explicit HTracer(int isolate_id)
       : trace_(&string_allocator_), indent_(0) {
     if (FLAG_trace_hydrogen_file == NULL) {
       SNPrintF(filename_,
                "hydrogen-%d-%d.cfg",
-               OS::GetCurrentProcessId(),
+               base::OS::GetCurrentProcessId(),
                isolate_id);
     } else {
       StrNCpy(filename_, FLAG_trace_hydrogen_file, filename_.length());
@@ -2818,7 +2829,7 @@
   void TraceLiveRanges(const char* name, LAllocator* allocator);
 
  private:
-  class Tag V8_FINAL BASE_EMBEDDED {
+  class Tag FINAL BASE_EMBEDDED {
    public:
     Tag(HTracer* tracer, const char* name) {
       name_ = name;
@@ -2832,7 +2843,7 @@
       tracer_->indent_--;
       tracer_->PrintIndent();
       tracer_->trace_.Add("end_%s\n", name_);
-      ASSERT(tracer_->indent_ >= 0);
+      DCHECK(tracer_->indent_ >= 0);
       tracer_->FlushToFile();
     }
 
@@ -2883,7 +2894,7 @@
 };
 
 
-class NoObservableSideEffectsScope V8_FINAL {
+class NoObservableSideEffectsScope FINAL {
  public:
   explicit NoObservableSideEffectsScope(HGraphBuilder* builder) :
       builder_(builder) {
diff --git a/src/i18n.cc b/src/i18n.cc
index 900da18..cae3a32 100644
--- a/src/i18n.cc
+++ b/src/i18n.cc
@@ -131,24 +131,19 @@
   icu::UnicodeString pattern;
   date_format->toPattern(pattern);
   JSObject::SetProperty(
-      resolved,
-      factory->NewStringFromStaticAscii("pattern"),
+      resolved, factory->NewStringFromStaticChars("pattern"),
       factory->NewStringFromTwoByte(
-        Vector<const uint16_t>(
-            reinterpret_cast<const uint16_t*>(pattern.getBuffer()),
-            pattern.length())).ToHandleChecked(),
-      NONE,
+                   Vector<const uint16_t>(
+                       reinterpret_cast<const uint16_t*>(pattern.getBuffer()),
+                       pattern.length())).ToHandleChecked(),
       SLOPPY).Assert();
 
   // Set time zone and calendar.
   const icu::Calendar* calendar = date_format->getCalendar();
   const char* calendar_name = calendar->getType();
-  JSObject::SetProperty(
-      resolved,
-      factory->NewStringFromStaticAscii("calendar"),
-      factory->NewStringFromAsciiChecked(calendar_name),
-      NONE,
-      SLOPPY).Assert();
+  JSObject::SetProperty(resolved, factory->NewStringFromStaticChars("calendar"),
+                        factory->NewStringFromAsciiChecked(calendar_name),
+                        SLOPPY).Assert();
 
   const icu::TimeZone& tz = calendar->getTimeZone();
   icu::UnicodeString time_zone;
@@ -159,21 +154,16 @@
   if (U_SUCCESS(status)) {
     if (canonical_time_zone == UNICODE_STRING_SIMPLE("Etc/GMT")) {
       JSObject::SetProperty(
-          resolved,
-          factory->NewStringFromStaticAscii("timeZone"),
-          factory->NewStringFromStaticAscii("UTC"),
-          NONE,
-          SLOPPY).Assert();
+          resolved, factory->NewStringFromStaticChars("timeZone"),
+          factory->NewStringFromStaticChars("UTC"), SLOPPY).Assert();
     } else {
       JSObject::SetProperty(
-          resolved,
-          factory->NewStringFromStaticAscii("timeZone"),
+          resolved, factory->NewStringFromStaticChars("timeZone"),
           factory->NewStringFromTwoByte(
-            Vector<const uint16_t>(
-                reinterpret_cast<const uint16_t*>(
-                    canonical_time_zone.getBuffer()),
-                canonical_time_zone.length())).ToHandleChecked(),
-          NONE,
+                       Vector<const uint16_t>(
+                           reinterpret_cast<const uint16_t*>(
+                               canonical_time_zone.getBuffer()),
+                           canonical_time_zone.length())).ToHandleChecked(),
           SLOPPY).Assert();
     }
   }
@@ -187,18 +177,12 @@
   if (U_SUCCESS(status)) {
     const char* ns = numbering_system->getName();
     JSObject::SetProperty(
-        resolved,
-        factory->NewStringFromStaticAscii("numberingSystem"),
-        factory->NewStringFromAsciiChecked(ns),
-        NONE,
-        SLOPPY).Assert();
+        resolved, factory->NewStringFromStaticChars("numberingSystem"),
+        factory->NewStringFromAsciiChecked(ns), SLOPPY).Assert();
   } else {
-    JSObject::SetProperty(
-        resolved,
-        factory->NewStringFromStaticAscii("numberingSystem"),
-        factory->undefined_value(),
-        NONE,
-        SLOPPY).Assert();
+    JSObject::SetProperty(resolved,
+                          factory->NewStringFromStaticChars("numberingSystem"),
+                          factory->undefined_value(), SLOPPY).Assert();
   }
   delete numbering_system;
 
@@ -208,20 +192,14 @@
   uloc_toLanguageTag(
       icu_locale.getName(), result, ULOC_FULLNAME_CAPACITY, FALSE, &status);
   if (U_SUCCESS(status)) {
-    JSObject::SetProperty(
-        resolved,
-        factory->NewStringFromStaticAscii("locale"),
-        factory->NewStringFromAsciiChecked(result),
-        NONE,
-        SLOPPY).Assert();
+    JSObject::SetProperty(resolved, factory->NewStringFromStaticChars("locale"),
+                          factory->NewStringFromAsciiChecked(result),
+                          SLOPPY).Assert();
   } else {
     // This would never happen, since we got the locale from ICU.
-    JSObject::SetProperty(
-        resolved,
-        factory->NewStringFromStaticAscii("locale"),
-        factory->NewStringFromStaticAscii("und"),
-        NONE,
-        SLOPPY).Assert();
+    JSObject::SetProperty(resolved, factory->NewStringFromStaticChars("locale"),
+                          factory->NewStringFromStaticChars("und"),
+                          SLOPPY).Assert();
   }
 }
 
@@ -358,26 +336,22 @@
   icu::UnicodeString pattern;
   number_format->toPattern(pattern);
   JSObject::SetProperty(
-      resolved,
-      factory->NewStringFromStaticAscii("pattern"),
+      resolved, factory->NewStringFromStaticChars("pattern"),
       factory->NewStringFromTwoByte(
-        Vector<const uint16_t>(
-            reinterpret_cast<const uint16_t*>(pattern.getBuffer()),
-            pattern.length())).ToHandleChecked(),
-      NONE,
+                   Vector<const uint16_t>(
+                       reinterpret_cast<const uint16_t*>(pattern.getBuffer()),
+                       pattern.length())).ToHandleChecked(),
       SLOPPY).Assert();
 
   // Set resolved currency code in options.currency if not empty.
   icu::UnicodeString currency(number_format->getCurrency());
   if (!currency.isEmpty()) {
     JSObject::SetProperty(
-        resolved,
-        factory->NewStringFromStaticAscii("currency"),
-        factory->NewStringFromTwoByte(
-          Vector<const uint16_t>(
-              reinterpret_cast<const uint16_t*>(currency.getBuffer()),
-              currency.length())).ToHandleChecked(),
-        NONE,
+        resolved, factory->NewStringFromStaticChars("currency"),
+        factory->NewStringFromTwoByte(Vector<const uint16_t>(
+                                          reinterpret_cast<const uint16_t*>(
+                                              currency.getBuffer()),
+                                          currency.length())).ToHandleChecked(),
         SLOPPY).Assert();
   }
 
@@ -390,67 +364,52 @@
   if (U_SUCCESS(status)) {
     const char* ns = numbering_system->getName();
     JSObject::SetProperty(
-        resolved,
-        factory->NewStringFromStaticAscii("numberingSystem"),
-        factory->NewStringFromAsciiChecked(ns),
-        NONE,
-        SLOPPY).Assert();
+        resolved, factory->NewStringFromStaticChars("numberingSystem"),
+        factory->NewStringFromAsciiChecked(ns), SLOPPY).Assert();
   } else {
-    JSObject::SetProperty(
-        resolved,
-        factory->NewStringFromStaticAscii("numberingSystem"),
-        factory->undefined_value(),
-        NONE,
-        SLOPPY).Assert();
+    JSObject::SetProperty(resolved,
+                          factory->NewStringFromStaticChars("numberingSystem"),
+                          factory->undefined_value(), SLOPPY).Assert();
   }
   delete numbering_system;
 
   JSObject::SetProperty(
-      resolved,
-      factory->NewStringFromStaticAscii("useGrouping"),
-      factory->ToBoolean(number_format->isGroupingUsed()),
-      NONE,
-      SLOPPY).Assert();
+      resolved, factory->NewStringFromStaticChars("useGrouping"),
+      factory->ToBoolean(number_format->isGroupingUsed()), SLOPPY).Assert();
 
   JSObject::SetProperty(
-      resolved,
-      factory->NewStringFromStaticAscii("minimumIntegerDigits"),
+      resolved, factory->NewStringFromStaticChars("minimumIntegerDigits"),
       factory->NewNumberFromInt(number_format->getMinimumIntegerDigits()),
-      NONE,
       SLOPPY).Assert();
 
   JSObject::SetProperty(
-      resolved,
-      factory->NewStringFromStaticAscii("minimumFractionDigits"),
+      resolved, factory->NewStringFromStaticChars("minimumFractionDigits"),
       factory->NewNumberFromInt(number_format->getMinimumFractionDigits()),
-      NONE,
       SLOPPY).Assert();
 
   JSObject::SetProperty(
-      resolved,
-      factory->NewStringFromStaticAscii("maximumFractionDigits"),
+      resolved, factory->NewStringFromStaticChars("maximumFractionDigits"),
       factory->NewNumberFromInt(number_format->getMaximumFractionDigits()),
-      NONE,
       SLOPPY).Assert();
 
   Handle<String> key =
-      factory->NewStringFromStaticAscii("minimumSignificantDigits");
-  if (JSReceiver::HasOwnProperty(resolved, key)) {
+      factory->NewStringFromStaticChars("minimumSignificantDigits");
+  Maybe<bool> maybe = JSReceiver::HasOwnProperty(resolved, key);
+  CHECK(maybe.has_value);
+  if (maybe.value) {
     JSObject::SetProperty(
-        resolved,
-        factory->NewStringFromStaticAscii("minimumSignificantDigits"),
+        resolved, factory->NewStringFromStaticChars("minimumSignificantDigits"),
         factory->NewNumberFromInt(number_format->getMinimumSignificantDigits()),
-        NONE,
         SLOPPY).Assert();
   }
 
-  key = factory->NewStringFromStaticAscii("maximumSignificantDigits");
-  if (JSReceiver::HasOwnProperty(resolved, key)) {
+  key = factory->NewStringFromStaticChars("maximumSignificantDigits");
+  maybe = JSReceiver::HasOwnProperty(resolved, key);
+  CHECK(maybe.has_value);
+  if (maybe.value) {
     JSObject::SetProperty(
-        resolved,
-        factory->NewStringFromStaticAscii("maximumSignificantDigits"),
+        resolved, factory->NewStringFromStaticChars("maximumSignificantDigits"),
         factory->NewNumberFromInt(number_format->getMaximumSignificantDigits()),
-        NONE,
         SLOPPY).Assert();
   }
 
@@ -460,20 +419,14 @@
   uloc_toLanguageTag(
       icu_locale.getName(), result, ULOC_FULLNAME_CAPACITY, FALSE, &status);
   if (U_SUCCESS(status)) {
-    JSObject::SetProperty(
-        resolved,
-        factory->NewStringFromStaticAscii("locale"),
-        factory->NewStringFromAsciiChecked(result),
-        NONE,
-        SLOPPY).Assert();
+    JSObject::SetProperty(resolved, factory->NewStringFromStaticChars("locale"),
+                          factory->NewStringFromAsciiChecked(result),
+                          SLOPPY).Assert();
   } else {
     // This would never happen, since we got the locale from ICU.
-    JSObject::SetProperty(
-        resolved,
-        factory->NewStringFromStaticAscii("locale"),
-        factory->NewStringFromStaticAscii("und"),
-        NONE,
-        SLOPPY).Assert();
+    JSObject::SetProperty(resolved, factory->NewStringFromStaticChars("locale"),
+                          factory->NewStringFromStaticChars("und"),
+                          SLOPPY).Assert();
   }
 }
 
@@ -550,131 +503,85 @@
   UErrorCode status = U_ZERO_ERROR;
 
   JSObject::SetProperty(
-      resolved,
-      factory->NewStringFromStaticAscii("numeric"),
+      resolved, factory->NewStringFromStaticChars("numeric"),
       factory->ToBoolean(
           collator->getAttribute(UCOL_NUMERIC_COLLATION, status) == UCOL_ON),
-      NONE,
       SLOPPY).Assert();
 
   switch (collator->getAttribute(UCOL_CASE_FIRST, status)) {
     case UCOL_LOWER_FIRST:
       JSObject::SetProperty(
-          resolved,
-          factory->NewStringFromStaticAscii("caseFirst"),
-          factory->NewStringFromStaticAscii("lower"),
-          NONE,
-          SLOPPY).Assert();
+          resolved, factory->NewStringFromStaticChars("caseFirst"),
+          factory->NewStringFromStaticChars("lower"), SLOPPY).Assert();
       break;
     case UCOL_UPPER_FIRST:
       JSObject::SetProperty(
-          resolved,
-          factory->NewStringFromStaticAscii("caseFirst"),
-          factory->NewStringFromStaticAscii("upper"),
-          NONE,
-          SLOPPY).Assert();
+          resolved, factory->NewStringFromStaticChars("caseFirst"),
+          factory->NewStringFromStaticChars("upper"), SLOPPY).Assert();
       break;
     default:
       JSObject::SetProperty(
-          resolved,
-          factory->NewStringFromStaticAscii("caseFirst"),
-          factory->NewStringFromStaticAscii("false"),
-          NONE,
-          SLOPPY).Assert();
+          resolved, factory->NewStringFromStaticChars("caseFirst"),
+          factory->NewStringFromStaticChars("false"), SLOPPY).Assert();
   }
 
   switch (collator->getAttribute(UCOL_STRENGTH, status)) {
     case UCOL_PRIMARY: {
       JSObject::SetProperty(
-          resolved,
-          factory->NewStringFromStaticAscii("strength"),
-          factory->NewStringFromStaticAscii("primary"),
-          NONE,
-          SLOPPY).Assert();
+          resolved, factory->NewStringFromStaticChars("strength"),
+          factory->NewStringFromStaticChars("primary"), SLOPPY).Assert();
 
       // case level: true + s1 -> case, s1 -> base.
       if (UCOL_ON == collator->getAttribute(UCOL_CASE_LEVEL, status)) {
         JSObject::SetProperty(
-            resolved,
-            factory->NewStringFromStaticAscii("sensitivity"),
-            factory->NewStringFromStaticAscii("case"),
-            NONE,
-            SLOPPY).Assert();
+            resolved, factory->NewStringFromStaticChars("sensitivity"),
+            factory->NewStringFromStaticChars("case"), SLOPPY).Assert();
       } else {
         JSObject::SetProperty(
-            resolved,
-            factory->NewStringFromStaticAscii("sensitivity"),
-            factory->NewStringFromStaticAscii("base"),
-            NONE,
-            SLOPPY).Assert();
+            resolved, factory->NewStringFromStaticChars("sensitivity"),
+            factory->NewStringFromStaticChars("base"), SLOPPY).Assert();
       }
       break;
     }
     case UCOL_SECONDARY:
       JSObject::SetProperty(
-          resolved,
-          factory->NewStringFromStaticAscii("strength"),
-          factory->NewStringFromStaticAscii("secondary"),
-          NONE,
-          SLOPPY).Assert();
+          resolved, factory->NewStringFromStaticChars("strength"),
+          factory->NewStringFromStaticChars("secondary"), SLOPPY).Assert();
       JSObject::SetProperty(
-          resolved,
-          factory->NewStringFromStaticAscii("sensitivity"),
-          factory->NewStringFromStaticAscii("accent"),
-          NONE,
-          SLOPPY).Assert();
+          resolved, factory->NewStringFromStaticChars("sensitivity"),
+          factory->NewStringFromStaticChars("accent"), SLOPPY).Assert();
       break;
     case UCOL_TERTIARY:
       JSObject::SetProperty(
-          resolved,
-          factory->NewStringFromStaticAscii("strength"),
-          factory->NewStringFromStaticAscii("tertiary"),
-          NONE,
-          SLOPPY).Assert();
+          resolved, factory->NewStringFromStaticChars("strength"),
+          factory->NewStringFromStaticChars("tertiary"), SLOPPY).Assert();
       JSObject::SetProperty(
-          resolved,
-          factory->NewStringFromStaticAscii("sensitivity"),
-          factory->NewStringFromStaticAscii("variant"),
-          NONE,
-          SLOPPY).Assert();
+          resolved, factory->NewStringFromStaticChars("sensitivity"),
+          factory->NewStringFromStaticChars("variant"), SLOPPY).Assert();
       break;
     case UCOL_QUATERNARY:
       // We shouldn't get quaternary and identical from ICU, but if we do
       // put them into variant.
       JSObject::SetProperty(
-          resolved,
-          factory->NewStringFromStaticAscii("strength"),
-          factory->NewStringFromStaticAscii("quaternary"),
-          NONE,
-          SLOPPY).Assert();
+          resolved, factory->NewStringFromStaticChars("strength"),
+          factory->NewStringFromStaticChars("quaternary"), SLOPPY).Assert();
       JSObject::SetProperty(
-          resolved,
-          factory->NewStringFromStaticAscii("sensitivity"),
-          factory->NewStringFromStaticAscii("variant"),
-          NONE,
-          SLOPPY).Assert();
+          resolved, factory->NewStringFromStaticChars("sensitivity"),
+          factory->NewStringFromStaticChars("variant"), SLOPPY).Assert();
       break;
     default:
       JSObject::SetProperty(
-          resolved,
-          factory->NewStringFromStaticAscii("strength"),
-          factory->NewStringFromStaticAscii("identical"),
-          NONE,
-          SLOPPY).Assert();
+          resolved, factory->NewStringFromStaticChars("strength"),
+          factory->NewStringFromStaticChars("identical"), SLOPPY).Assert();
       JSObject::SetProperty(
-          resolved,
-          factory->NewStringFromStaticAscii("sensitivity"),
-          factory->NewStringFromStaticAscii("variant"),
-          NONE,
-          SLOPPY).Assert();
+          resolved, factory->NewStringFromStaticChars("sensitivity"),
+          factory->NewStringFromStaticChars("variant"), SLOPPY).Assert();
   }
 
   JSObject::SetProperty(
-      resolved,
-      factory->NewStringFromStaticAscii("ignorePunctuation"),
-      factory->ToBoolean(collator->getAttribute(
-          UCOL_ALTERNATE_HANDLING, status) == UCOL_SHIFTED),
-      NONE,
+      resolved, factory->NewStringFromStaticChars("ignorePunctuation"),
+      factory->ToBoolean(collator->getAttribute(UCOL_ALTERNATE_HANDLING,
+                                                status) == UCOL_SHIFTED),
       SLOPPY).Assert();
 
   // Set the locale
@@ -683,20 +590,14 @@
   uloc_toLanguageTag(
       icu_locale.getName(), result, ULOC_FULLNAME_CAPACITY, FALSE, &status);
   if (U_SUCCESS(status)) {
-    JSObject::SetProperty(
-        resolved,
-        factory->NewStringFromStaticAscii("locale"),
-        factory->NewStringFromAsciiChecked(result),
-        NONE,
-        SLOPPY).Assert();
+    JSObject::SetProperty(resolved, factory->NewStringFromStaticChars("locale"),
+                          factory->NewStringFromAsciiChecked(result),
+                          SLOPPY).Assert();
   } else {
     // This would never happen, since we got the locale from ICU.
-    JSObject::SetProperty(
-        resolved,
-        factory->NewStringFromStaticAscii("locale"),
-        factory->NewStringFromStaticAscii("und"),
-        NONE,
-        SLOPPY).Assert();
+    JSObject::SetProperty(resolved, factory->NewStringFromStaticChars("locale"),
+                          factory->NewStringFromStaticChars("und"),
+                          SLOPPY).Assert();
   }
 }
 
@@ -747,20 +648,14 @@
   uloc_toLanguageTag(
       icu_locale.getName(), result, ULOC_FULLNAME_CAPACITY, FALSE, &status);
   if (U_SUCCESS(status)) {
-    JSObject::SetProperty(
-        resolved,
-        factory->NewStringFromStaticAscii("locale"),
-        factory->NewStringFromAsciiChecked(result),
-        NONE,
-        SLOPPY).Assert();
+    JSObject::SetProperty(resolved, factory->NewStringFromStaticChars("locale"),
+                          factory->NewStringFromAsciiChecked(result),
+                          SLOPPY).Assert();
   } else {
     // This would never happen, since we got the locale from ICU.
-    JSObject::SetProperty(
-        resolved,
-        factory->NewStringFromStaticAscii("locale"),
-        factory->NewStringFromStaticAscii("und"),
-        NONE,
-        SLOPPY).Assert();
+    JSObject::SetProperty(resolved, factory->NewStringFromStaticChars("locale"),
+                          factory->NewStringFromStaticChars("und"),
+                          SLOPPY).Assert();
   }
 }
 
@@ -822,8 +717,10 @@
     Isolate* isolate,
     Handle<JSObject> obj) {
   Handle<String> key =
-      isolate->factory()->NewStringFromStaticAscii("dateFormat");
-  if (JSReceiver::HasOwnProperty(obj, key)) {
+      isolate->factory()->NewStringFromStaticChars("dateFormat");
+  Maybe<bool> maybe = JSReceiver::HasOwnProperty(obj, key);
+  CHECK(maybe.has_value);
+  if (maybe.value) {
     return reinterpret_cast<icu::SimpleDateFormat*>(
         obj->GetInternalField(0));
   }
@@ -896,8 +793,10 @@
     Isolate* isolate,
     Handle<JSObject> obj) {
   Handle<String> key =
-      isolate->factory()->NewStringFromStaticAscii("numberFormat");
-  if (JSReceiver::HasOwnProperty(obj, key)) {
+      isolate->factory()->NewStringFromStaticChars("numberFormat");
+  Maybe<bool> maybe = JSReceiver::HasOwnProperty(obj, key);
+  CHECK(maybe.has_value);
+  if (maybe.value) {
     return reinterpret_cast<icu::DecimalFormat*>(obj->GetInternalField(0));
   }
 
@@ -951,8 +850,10 @@
 
 icu::Collator* Collator::UnpackCollator(Isolate* isolate,
                                         Handle<JSObject> obj) {
-  Handle<String> key = isolate->factory()->NewStringFromStaticAscii("collator");
-  if (JSReceiver::HasOwnProperty(obj, key)) {
+  Handle<String> key = isolate->factory()->NewStringFromStaticChars("collator");
+  Maybe<bool> maybe = JSReceiver::HasOwnProperty(obj, key);
+  CHECK(maybe.has_value);
+  if (maybe.value) {
     return reinterpret_cast<icu::Collator*>(obj->GetInternalField(0));
   }
 
@@ -1010,8 +911,10 @@
 icu::BreakIterator* BreakIterator::UnpackBreakIterator(Isolate* isolate,
                                                        Handle<JSObject> obj) {
   Handle<String> key =
-      isolate->factory()->NewStringFromStaticAscii("breakIterator");
-  if (JSReceiver::HasOwnProperty(obj, key)) {
+      isolate->factory()->NewStringFromStaticChars("breakIterator");
+  Maybe<bool> maybe = JSReceiver::HasOwnProperty(obj, key);
+  CHECK(maybe.has_value);
+  if (maybe.value) {
     return reinterpret_cast<icu::BreakIterator*>(obj->GetInternalField(0));
   }
 
diff --git a/src/i18n.h b/src/i18n.h
index 5a195eb..a50c43a 100644
--- a/src/i18n.h
+++ b/src/i18n.h
@@ -6,8 +6,8 @@
 #ifndef V8_I18N_H_
 #define V8_I18N_H_
 
-#include "unicode/uversion.h"
 #include "src/v8.h"
+#include "unicode/uversion.h"
 
 namespace U_ICU_NAMESPACE {
 class BreakIterator;
diff --git a/src/i18n.js b/src/i18n.js
index 076845b..61e0ac9 100644
--- a/src/i18n.js
+++ b/src/i18n.js
@@ -942,7 +942,7 @@
  *
  * @constructor
  */
-%SetProperty(Intl, 'Collator', function() {
+%AddNamedProperty(Intl, 'Collator', function() {
     var locales = %_Arguments(0);
     var options = %_Arguments(1);
 
@@ -960,7 +960,7 @@
 /**
  * Collator resolvedOptions method.
  */
-%SetProperty(Intl.Collator.prototype, 'resolvedOptions', function() {
+%AddNamedProperty(Intl.Collator.prototype, 'resolvedOptions', function() {
     if (%_IsConstructCall()) {
       throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
     }
@@ -997,7 +997,7 @@
  * order in the returned list as in the input list.
  * Options are optional parameter.
  */
-%SetProperty(Intl.Collator, 'supportedLocalesOf', function(locales) {
+%AddNamedProperty(Intl.Collator, 'supportedLocalesOf', function(locales) {
     if (%_IsConstructCall()) {
       throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
     }
@@ -1169,7 +1169,7 @@
  *
  * @constructor
  */
-%SetProperty(Intl, 'NumberFormat', function() {
+%AddNamedProperty(Intl, 'NumberFormat', function() {
     var locales = %_Arguments(0);
     var options = %_Arguments(1);
 
@@ -1187,7 +1187,7 @@
 /**
  * NumberFormat resolvedOptions method.
  */
-%SetProperty(Intl.NumberFormat.prototype, 'resolvedOptions', function() {
+%AddNamedProperty(Intl.NumberFormat.prototype, 'resolvedOptions', function() {
     if (%_IsConstructCall()) {
       throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
     }
@@ -1243,7 +1243,7 @@
  * order in the returned list as in the input list.
  * Options are optional parameter.
  */
-%SetProperty(Intl.NumberFormat, 'supportedLocalesOf', function(locales) {
+%AddNamedProperty(Intl.NumberFormat, 'supportedLocalesOf', function(locales) {
     if (%_IsConstructCall()) {
       throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
     }
@@ -1562,7 +1562,7 @@
  *
  * @constructor
  */
-%SetProperty(Intl, 'DateTimeFormat', function() {
+%AddNamedProperty(Intl, 'DateTimeFormat', function() {
     var locales = %_Arguments(0);
     var options = %_Arguments(1);
 
@@ -1580,7 +1580,7 @@
 /**
  * DateTimeFormat resolvedOptions method.
  */
-%SetProperty(Intl.DateTimeFormat.prototype, 'resolvedOptions', function() {
+%AddNamedProperty(Intl.DateTimeFormat.prototype, 'resolvedOptions', function() {
     if (%_IsConstructCall()) {
       throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
     }
@@ -1636,7 +1636,7 @@
  * order in the returned list as in the input list.
  * Options are optional parameter.
  */
-%SetProperty(Intl.DateTimeFormat, 'supportedLocalesOf', function(locales) {
+%AddNamedProperty(Intl.DateTimeFormat, 'supportedLocalesOf', function(locales) {
     if (%_IsConstructCall()) {
       throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
     }
@@ -1768,7 +1768,7 @@
  *
  * @constructor
  */
-%SetProperty(Intl, 'v8BreakIterator', function() {
+%AddNamedProperty(Intl, 'v8BreakIterator', function() {
     var locales = %_Arguments(0);
     var options = %_Arguments(1);
 
@@ -1786,7 +1786,8 @@
 /**
  * BreakIterator resolvedOptions method.
  */
-%SetProperty(Intl.v8BreakIterator.prototype, 'resolvedOptions', function() {
+%AddNamedProperty(Intl.v8BreakIterator.prototype, 'resolvedOptions',
+  function() {
     if (%_IsConstructCall()) {
       throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
     }
@@ -1819,7 +1820,8 @@
  * order in the returned list as in the input list.
  * Options are optional parameter.
  */
-%SetProperty(Intl.v8BreakIterator, 'supportedLocalesOf', function(locales) {
+%AddNamedProperty(Intl.v8BreakIterator, 'supportedLocalesOf',
+  function(locales) {
     if (%_IsConstructCall()) {
       throw new $TypeError(ORDINARY_FUNCTION_CALLED_AS_CONSTRUCTOR);
     }
diff --git a/src/ia32/assembler-ia32-inl.h b/src/ia32/assembler-ia32-inl.h
index 422e1fd..c7ec6d9 100644
--- a/src/ia32/assembler-ia32-inl.h
+++ b/src/ia32/assembler-ia32-inl.h
@@ -39,7 +39,7 @@
 
 #include "src/ia32/assembler-ia32.h"
 
-#include "src/cpu.h"
+#include "src/assembler.h"
 #include "src/debug.h"
 
 namespace v8 {
@@ -58,42 +58,42 @@
   if (IsRuntimeEntry(rmode_) || IsCodeTarget(rmode_)) {
     int32_t* p = reinterpret_cast<int32_t*>(pc_);
     *p -= delta;  // Relocate entry.
-    if (flush_icache) CPU::FlushICache(p, sizeof(uint32_t));
+    if (flush_icache) CpuFeatures::FlushICache(p, sizeof(uint32_t));
   } else if (rmode_ == CODE_AGE_SEQUENCE) {
     if (*pc_ == kCallOpcode) {
       int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
       *p -= delta;  // Relocate entry.
-    if (flush_icache) CPU::FlushICache(p, sizeof(uint32_t));
+    if (flush_icache) CpuFeatures::FlushICache(p, sizeof(uint32_t));
     }
   } else if (rmode_ == JS_RETURN && IsPatchedReturnSequence()) {
     // Special handling of js_return when a break point is set (call
     // instruction has been inserted).
     int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
     *p -= delta;  // Relocate entry.
-    if (flush_icache) CPU::FlushICache(p, sizeof(uint32_t));
+    if (flush_icache) CpuFeatures::FlushICache(p, sizeof(uint32_t));
   } else if (rmode_ == DEBUG_BREAK_SLOT && IsPatchedDebugBreakSlotSequence()) {
     // Special handling of a debug break slot when a break point is set (call
     // instruction has been inserted).
     int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
     *p -= delta;  // Relocate entry.
-    if (flush_icache) CPU::FlushICache(p, sizeof(uint32_t));
+    if (flush_icache) CpuFeatures::FlushICache(p, sizeof(uint32_t));
   } else if (IsInternalReference(rmode_)) {
     // absolute code pointer inside code object moves with the code object.
     int32_t* p = reinterpret_cast<int32_t*>(pc_);
     *p += delta;  // Relocate entry.
-    if (flush_icache) CPU::FlushICache(p, sizeof(uint32_t));
+    if (flush_icache) CpuFeatures::FlushICache(p, sizeof(uint32_t));
   }
 }
 
 
 Address RelocInfo::target_address() {
-  ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
+  DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
   return Assembler::target_address_at(pc_, host_);
 }
 
 
 Address RelocInfo::target_address_address() {
-  ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
+  DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
                               || rmode_ == EMBEDDED_OBJECT
                               || rmode_ == EXTERNAL_REFERENCE);
   return reinterpret_cast<Address>(pc_);
@@ -115,7 +115,7 @@
                                    WriteBarrierMode write_barrier_mode,
                                    ICacheFlushMode icache_flush_mode) {
   Assembler::set_target_address_at(pc_, host_, target, icache_flush_mode);
-  ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
+  DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
   if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL &&
       IsCodeTarget(rmode_)) {
     Object* target_code = Code::GetCodeFromTargetAddress(target);
@@ -126,13 +126,13 @@
 
 
 Object* RelocInfo::target_object() {
-  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+  DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
   return Memory::Object_at(pc_);
 }
 
 
 Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
-  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+  DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
   return Memory::Object_Handle_at(pc_);
 }
 
@@ -140,11 +140,10 @@
 void RelocInfo::set_target_object(Object* target,
                                   WriteBarrierMode write_barrier_mode,
                                   ICacheFlushMode icache_flush_mode) {
-  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
-  ASSERT(!target->IsConsString());
+  DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
   Memory::Object_at(pc_) = target;
   if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
-    CPU::FlushICache(pc_, sizeof(Address));
+    CpuFeatures::FlushICache(pc_, sizeof(Address));
   }
   if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
       host() != NULL &&
@@ -156,13 +155,13 @@
 
 
 Address RelocInfo::target_reference() {
-  ASSERT(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
+  DCHECK(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
   return Memory::Address_at(pc_);
 }
 
 
 Address RelocInfo::target_runtime_entry(Assembler* origin) {
-  ASSERT(IsRuntimeEntry(rmode_));
+  DCHECK(IsRuntimeEntry(rmode_));
   return reinterpret_cast<Address>(*reinterpret_cast<int32_t*>(pc_));
 }
 
@@ -170,7 +169,7 @@
 void RelocInfo::set_target_runtime_entry(Address target,
                                          WriteBarrierMode write_barrier_mode,
                                          ICacheFlushMode icache_flush_mode) {
-  ASSERT(IsRuntimeEntry(rmode_));
+  DCHECK(IsRuntimeEntry(rmode_));
   if (target_address() != target) {
     set_target_address(target, write_barrier_mode, icache_flush_mode);
   }
@@ -178,14 +177,14 @@
 
 
 Handle<Cell> RelocInfo::target_cell_handle() {
-  ASSERT(rmode_ == RelocInfo::CELL);
+  DCHECK(rmode_ == RelocInfo::CELL);
   Address address = Memory::Address_at(pc_);
   return Handle<Cell>(reinterpret_cast<Cell**>(address));
 }
 
 
 Cell* RelocInfo::target_cell() {
-  ASSERT(rmode_ == RelocInfo::CELL);
+  DCHECK(rmode_ == RelocInfo::CELL);
   return Cell::FromValueAddress(Memory::Address_at(pc_));
 }
 
@@ -193,11 +192,11 @@
 void RelocInfo::set_target_cell(Cell* cell,
                                 WriteBarrierMode write_barrier_mode,
                                 ICacheFlushMode icache_flush_mode) {
-  ASSERT(rmode_ == RelocInfo::CELL);
+  DCHECK(rmode_ == RelocInfo::CELL);
   Address address = cell->address() + Cell::kValueOffset;
   Memory::Address_at(pc_) = address;
   if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
-    CPU::FlushICache(pc_, sizeof(Address));
+    CpuFeatures::FlushICache(pc_, sizeof(Address));
   }
   if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
     // TODO(1550) We are passing NULL as a slot because cell can never be on
@@ -209,15 +208,15 @@
 
 
 Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
-  ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
-  ASSERT(*pc_ == kCallOpcode);
+  DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+  DCHECK(*pc_ == kCallOpcode);
   return Memory::Object_Handle_at(pc_ + 1);
 }
 
 
 Code* RelocInfo::code_age_stub() {
-  ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
-  ASSERT(*pc_ == kCallOpcode);
+  DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+  DCHECK(*pc_ == kCallOpcode);
   return Code::GetCodeFromTargetAddress(
       Assembler::target_address_at(pc_ + 1, host_));
 }
@@ -225,22 +224,22 @@
 
 void RelocInfo::set_code_age_stub(Code* stub,
                                   ICacheFlushMode icache_flush_mode) {
-  ASSERT(*pc_ == kCallOpcode);
-  ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+  DCHECK(*pc_ == kCallOpcode);
+  DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
   Assembler::set_target_address_at(pc_ + 1, host_, stub->instruction_start(),
                                    icache_flush_mode);
 }
 
 
 Address RelocInfo::call_address() {
-  ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+  DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
          (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
   return Assembler::target_address_at(pc_ + 1, host_);
 }
 
 
 void RelocInfo::set_call_address(Address target) {
-  ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+  DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
          (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
   Assembler::set_target_address_at(pc_ + 1, host_, target);
   if (host() != NULL) {
@@ -262,7 +261,7 @@
 
 
 Object** RelocInfo::call_object_address() {
-  ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+  DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
          (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
   return reinterpret_cast<Object**>(pc_ + 1);
 }
@@ -294,14 +293,14 @@
   RelocInfo::Mode mode = rmode();
   if (mode == RelocInfo::EMBEDDED_OBJECT) {
     visitor->VisitEmbeddedPointer(this);
-    CPU::FlushICache(pc_, sizeof(Address));
+    CpuFeatures::FlushICache(pc_, sizeof(Address));
   } else if (RelocInfo::IsCodeTarget(mode)) {
     visitor->VisitCodeTarget(this);
   } else if (mode == RelocInfo::CELL) {
     visitor->VisitCell(this);
   } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
     visitor->VisitExternalReference(this);
-    CPU::FlushICache(pc_, sizeof(Address));
+    CpuFeatures::FlushICache(pc_, sizeof(Address));
   } else if (RelocInfo::IsCodeAgeSequence(mode)) {
     visitor->VisitCodeAgeSequence(this);
   } else if (((RelocInfo::IsJSReturn(mode) &&
@@ -321,14 +320,14 @@
   RelocInfo::Mode mode = rmode();
   if (mode == RelocInfo::EMBEDDED_OBJECT) {
     StaticVisitor::VisitEmbeddedPointer(heap, this);
-    CPU::FlushICache(pc_, sizeof(Address));
+    CpuFeatures::FlushICache(pc_, sizeof(Address));
   } else if (RelocInfo::IsCodeTarget(mode)) {
     StaticVisitor::VisitCodeTarget(heap, this);
   } else if (mode == RelocInfo::CELL) {
     StaticVisitor::VisitCell(heap, this);
   } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
     StaticVisitor::VisitExternalReference(this);
-    CPU::FlushICache(pc_, sizeof(Address));
+    CpuFeatures::FlushICache(pc_, sizeof(Address));
   } else if (RelocInfo::IsCodeAgeSequence(mode)) {
     StaticVisitor::VisitCodeAgeSequence(heap, this);
   } else if (heap->isolate()->debug()->has_break_points() &&
@@ -367,7 +366,7 @@
   // Verify all Objects referred by code are NOT in new space.
   Object* obj = *handle;
   if (obj->IsHeapObject()) {
-    ASSERT(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
+    DCHECK(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
     x_ = reinterpret_cast<intptr_t>(handle.location());
     rmode_ = RelocInfo::EMBEDDED_OBJECT;
   } else {
@@ -400,7 +399,7 @@
   AllowDeferredHandleDereference heap_object_check;
   // Verify all Objects referred by code are NOT in new space.
   Object* obj = *handle;
-  ASSERT(!isolate()->heap()->InNewSpace(obj));
+  DCHECK(!isolate()->heap()->InNewSpace(obj));
   if (obj->IsHeapObject()) {
     emit(reinterpret_cast<intptr_t>(handle.location()),
          RelocInfo::EMBEDDED_OBJECT);
@@ -453,7 +452,7 @@
 
 
 void Assembler::emit_w(const Immediate& x) {
-  ASSERT(RelocInfo::IsNone(x.rmode_));
+  DCHECK(RelocInfo::IsNone(x.rmode_));
   uint16_t value = static_cast<uint16_t>(x.x_);
   reinterpret_cast<uint16_t*>(pc_)[0] = value;
   pc_ += sizeof(uint16_t);
@@ -473,7 +472,7 @@
   int32_t* p = reinterpret_cast<int32_t*>(pc);
   *p = target - (pc + sizeof(int32_t));
   if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
-    CPU::FlushICache(p, sizeof(int32_t));
+    CpuFeatures::FlushICache(p, sizeof(int32_t));
   }
 }
 
@@ -483,6 +482,11 @@
 }
 
 
+Address Assembler::break_address_from_return_address(Address pc) {
+  return pc - Assembler::kPatchDebugBreakSlotReturnOffset;
+}
+
+
 Displacement Assembler::disp_at(Label* L) {
   return Displacement(long_at(L->pos()));
 }
@@ -504,7 +508,7 @@
   byte disp = 0x00;
   if (L->is_near_linked()) {
     int offset = L->near_link_pos() - pc_offset();
-    ASSERT(is_int8(offset));
+    DCHECK(is_int8(offset));
     disp = static_cast<byte>(offset & 0xFF);
   }
   L->link_to(pc_offset(), Label::kNear);
@@ -513,30 +517,30 @@
 
 
 void Operand::set_modrm(int mod, Register rm) {
-  ASSERT((mod & -4) == 0);
+  DCHECK((mod & -4) == 0);
   buf_[0] = mod << 6 | rm.code();
   len_ = 1;
 }
 
 
 void Operand::set_sib(ScaleFactor scale, Register index, Register base) {
-  ASSERT(len_ == 1);
-  ASSERT((scale & -4) == 0);
+  DCHECK(len_ == 1);
+  DCHECK((scale & -4) == 0);
   // Use SIB with no index register only for base esp.
-  ASSERT(!index.is(esp) || base.is(esp));
+  DCHECK(!index.is(esp) || base.is(esp));
   buf_[1] = scale << 6 | index.code() << 3 | base.code();
   len_ = 2;
 }
 
 
 void Operand::set_disp8(int8_t disp) {
-  ASSERT(len_ == 1 || len_ == 2);
+  DCHECK(len_ == 1 || len_ == 2);
   *reinterpret_cast<int8_t*>(&buf_[len_++]) = disp;
 }
 
 
 void Operand::set_dispr(int32_t disp, RelocInfo::Mode rmode) {
-  ASSERT(len_ == 1 || len_ == 2);
+  DCHECK(len_ == 1 || len_ == 2);
   int32_t* p = reinterpret_cast<int32_t*>(&buf_[len_]);
   *p = disp;
   len_ += sizeof(int32_t);
@@ -561,6 +565,12 @@
   set_dispr(disp, rmode);
 }
 
+
+Operand::Operand(Immediate imm) {
+  // [disp/r]
+  set_modrm(0, ebp);
+  set_dispr(imm.x_, imm.rmode_);
+}
 } }  // namespace v8::internal
 
 #endif  // V8_IA32_ASSEMBLER_IA32_INL_H_
diff --git a/src/ia32/assembler-ia32.cc b/src/ia32/assembler-ia32.cc
index 75cb6ed..d16eea1 100644
--- a/src/ia32/assembler-ia32.cc
+++ b/src/ia32/assembler-ia32.cc
@@ -38,6 +38,8 @@
 
 #if V8_TARGET_ARCH_IA32
 
+#include "src/base/bits.h"
+#include "src/base/cpu.h"
 #include "src/disassembler.h"
 #include "src/macro-assembler.h"
 #include "src/serialize.h"
@@ -49,7 +51,7 @@
 // Implementation of CpuFeatures
 
 void CpuFeatures::ProbeImpl(bool cross_compile) {
-  CPU cpu;
+  base::CPU cpu;
   CHECK(cpu.has_sse2());  // SSE2 support is mandatory.
   CHECK(cpu.has_cmov());  // CMOV support is mandatory.
 
@@ -69,14 +71,14 @@
 // Implementation of Displacement
 
 void Displacement::init(Label* L, Type type) {
-  ASSERT(!L->is_bound());
+  DCHECK(!L->is_bound());
   int next = 0;
   if (L->is_linked()) {
     next = L->pos();
-    ASSERT(next > 0);  // Displacements must be at positions > 0
+    DCHECK(next > 0);  // Displacements must be at positions > 0
   }
   // Ensure that we _never_ overflow the next field.
-  ASSERT(NextField::is_valid(Assembler::kMaximalBufferSize));
+  DCHECK(NextField::is_valid(Assembler::kMaximalBufferSize));
   data_ = NextField::encode(next) | TypeField::encode(type);
 }
 
@@ -112,7 +114,7 @@
   }
 
   // Indicate that code has changed.
-  CPU::FlushICache(pc_, instruction_count);
+  CpuFeatures::FlushICache(pc_, instruction_count);
 }
 
 
@@ -136,11 +138,11 @@
   patcher.masm()->call(target, RelocInfo::NONE32);
 
   // Check that the size of the code generated is as expected.
-  ASSERT_EQ(kCallCodeSize,
+  DCHECK_EQ(kCallCodeSize,
             patcher.masm()->SizeOfCodeGeneratedSince(&check_codesize));
 
   // Add the requested number of int3 instructions after the call.
-  ASSERT_GE(guard_bytes, 0);
+  DCHECK_GE(guard_bytes, 0);
   for (int i = 0; i < guard_bytes; i++) {
     patcher.masm()->int3();
   }
@@ -175,7 +177,7 @@
                  ScaleFactor scale,
                  int32_t disp,
                  RelocInfo::Mode rmode) {
-  ASSERT(!index.is(esp));  // illegal addressing mode
+  DCHECK(!index.is(esp));  // illegal addressing mode
   // [base + index*scale + disp/r]
   if (disp == 0 && RelocInfo::IsNone(rmode) && !base.is(ebp)) {
     // [base + index*scale]
@@ -199,7 +201,7 @@
                  ScaleFactor scale,
                  int32_t disp,
                  RelocInfo::Mode rmode) {
-  ASSERT(!index.is(esp));  // illegal addressing mode
+  DCHECK(!index.is(esp));  // illegal addressing mode
   // [index*scale + disp/r]
   set_modrm(0, esp);
   set_sib(scale, index, ebp);
@@ -219,7 +221,7 @@
 
 
 Register Operand::reg() const {
-  ASSERT(is_reg_only());
+  DCHECK(is_reg_only());
   return Register::from_code(buf_[0] & 0x07);
 }
 
@@ -259,7 +261,7 @@
 void Assembler::GetCode(CodeDesc* desc) {
   // Finalize code (at this point overflow() may be true, but the gap ensures
   // that we are still not overlapping instructions and relocation info).
-  ASSERT(pc_ <= reloc_info_writer.pos());  // No overlap.
+  DCHECK(pc_ <= reloc_info_writer.pos());  // No overlap.
   // Set up code descriptor.
   desc->buffer = buffer_;
   desc->buffer_size = buffer_size_;
@@ -270,7 +272,7 @@
 
 
 void Assembler::Align(int m) {
-  ASSERT(IsPowerOf2(m));
+  DCHECK(base::bits::IsPowerOfTwo32(m));
   int mask = m - 1;
   int addr = pc_offset();
   Nop((m - (addr & mask)) & mask);
@@ -420,7 +422,7 @@
 
 
 void Assembler::pop(Register dst) {
-  ASSERT(reloc_info_writer.last_pc() != NULL);
+  DCHECK(reloc_info_writer.last_pc() != NULL);
   EnsureSpace ensure_space(this);
   EMIT(0x58 | dst.code());
 }
@@ -633,6 +635,13 @@
 }
 
 
+void Assembler::xchg(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  EMIT(0x87);
+  emit_operand(dst, src);
+}
+
+
 void Assembler::adc(Register dst, int32_t imm32) {
   EnsureSpace ensure_space(this);
   emit_arith(2, Operand(dst), Immediate(imm32));
@@ -661,7 +670,7 @@
 
 
 void Assembler::add(const Operand& dst, const Immediate& x) {
-  ASSERT(reloc_info_writer.last_pc() != NULL);
+  DCHECK(reloc_info_writer.last_pc() != NULL);
   EnsureSpace ensure_space(this);
   emit_arith(0, dst, x);
 }
@@ -727,7 +736,7 @@
 
 
 void Assembler::cmpw(const Operand& op, Immediate imm16) {
-  ASSERT(imm16.is_int16());
+  DCHECK(imm16.is_int16());
   EnsureSpace ensure_space(this);
   EMIT(0x66);
   EMIT(0x81);
@@ -816,10 +825,17 @@
 }
 
 
-void Assembler::idiv(Register src) {
+void Assembler::idiv(const Operand& src) {
   EnsureSpace ensure_space(this);
   EMIT(0xF7);
-  EMIT(0xF8 | src.code());
+  emit_operand(edi, src);
+}
+
+
+void Assembler::div(const Operand& src) {
+  EnsureSpace ensure_space(this);
+  EMIT(0xF7);
+  emit_operand(esi, src);
 }
 
 
@@ -839,14 +855,19 @@
 
 
 void Assembler::imul(Register dst, Register src, int32_t imm32) {
+  imul(dst, Operand(src), imm32);
+}
+
+
+void Assembler::imul(Register dst, const Operand& src, int32_t imm32) {
   EnsureSpace ensure_space(this);
   if (is_int8(imm32)) {
     EMIT(0x6B);
-    EMIT(0xC0 | dst.code() << 3 | src.code());
+    emit_operand(dst, src);
     EMIT(imm32);
   } else {
     EMIT(0x69);
-    EMIT(0xC0 | dst.code() << 3 | src.code());
+    emit_operand(dst, src);
     emit(imm32);
   }
 }
@@ -886,6 +907,13 @@
 }
 
 
+void Assembler::neg(const Operand& dst) {
+  EnsureSpace ensure_space(this);
+  EMIT(0xF7);
+  emit_operand(ebx, dst);
+}
+
+
 void Assembler::not_(Register dst) {
   EnsureSpace ensure_space(this);
   EMIT(0xF7);
@@ -893,6 +921,13 @@
 }
 
 
+void Assembler::not_(const Operand& dst) {
+  EnsureSpace ensure_space(this);
+  EMIT(0xF7);
+  emit_operand(edx, dst);
+}
+
+
 void Assembler::or_(Register dst, int32_t imm32) {
   EnsureSpace ensure_space(this);
   emit_arith(1, Operand(dst), Immediate(imm32));
@@ -921,7 +956,7 @@
 
 void Assembler::rcl(Register dst, uint8_t imm8) {
   EnsureSpace ensure_space(this);
-  ASSERT(is_uint5(imm8));  // illegal shift count
+  DCHECK(is_uint5(imm8));  // illegal shift count
   if (imm8 == 1) {
     EMIT(0xD1);
     EMIT(0xD0 | dst.code());
@@ -935,7 +970,7 @@
 
 void Assembler::rcr(Register dst, uint8_t imm8) {
   EnsureSpace ensure_space(this);
-  ASSERT(is_uint5(imm8));  // illegal shift count
+  DCHECK(is_uint5(imm8));  // illegal shift count
   if (imm8 == 1) {
     EMIT(0xD1);
     EMIT(0xD8 | dst.code());
@@ -949,7 +984,7 @@
 
 void Assembler::ror(Register dst, uint8_t imm8) {
   EnsureSpace ensure_space(this);
-  ASSERT(is_uint5(imm8));  // illegal shift count
+  DCHECK(is_uint5(imm8));  // illegal shift count
   if (imm8 == 1) {
     EMIT(0xD1);
     EMIT(0xC8 | dst.code());
@@ -968,24 +1003,24 @@
 }
 
 
-void Assembler::sar(Register dst, uint8_t imm8) {
+void Assembler::sar(const Operand& dst, uint8_t imm8) {
   EnsureSpace ensure_space(this);
-  ASSERT(is_uint5(imm8));  // illegal shift count
+  DCHECK(is_uint5(imm8));  // illegal shift count
   if (imm8 == 1) {
     EMIT(0xD1);
-    EMIT(0xF8 | dst.code());
+    emit_operand(edi, dst);
   } else {
     EMIT(0xC1);
-    EMIT(0xF8 | dst.code());
+    emit_operand(edi, dst);
     EMIT(imm8);
   }
 }
 
 
-void Assembler::sar_cl(Register dst) {
+void Assembler::sar_cl(const Operand& dst) {
   EnsureSpace ensure_space(this);
   EMIT(0xD3);
-  EMIT(0xF8 | dst.code());
+  emit_operand(edi, dst);
 }
 
 
@@ -1004,24 +1039,24 @@
 }
 
 
-void Assembler::shl(Register dst, uint8_t imm8) {
+void Assembler::shl(const Operand& dst, uint8_t imm8) {
   EnsureSpace ensure_space(this);
-  ASSERT(is_uint5(imm8));  // illegal shift count
+  DCHECK(is_uint5(imm8));  // illegal shift count
   if (imm8 == 1) {
     EMIT(0xD1);
-    EMIT(0xE0 | dst.code());
+    emit_operand(esp, dst);
   } else {
     EMIT(0xC1);
-    EMIT(0xE0 | dst.code());
+    emit_operand(esp, dst);
     EMIT(imm8);
   }
 }
 
 
-void Assembler::shl_cl(Register dst) {
+void Assembler::shl_cl(const Operand& dst) {
   EnsureSpace ensure_space(this);
   EMIT(0xD3);
-  EMIT(0xE0 | dst.code());
+  emit_operand(esp, dst);
 }
 
 
@@ -1033,24 +1068,24 @@
 }
 
 
-void Assembler::shr(Register dst, uint8_t imm8) {
+void Assembler::shr(const Operand& dst, uint8_t imm8) {
   EnsureSpace ensure_space(this);
-  ASSERT(is_uint5(imm8));  // illegal shift count
+  DCHECK(is_uint5(imm8));  // illegal shift count
   if (imm8 == 1) {
     EMIT(0xD1);
-    EMIT(0xE8 | dst.code());
+    emit_operand(ebp, dst);
   } else {
     EMIT(0xC1);
-    EMIT(0xE8 | dst.code());
+    emit_operand(ebp, dst);
     EMIT(imm8);
   }
 }
 
 
-void Assembler::shr_cl(Register dst) {
+void Assembler::shr_cl(const Operand& dst) {
   EnsureSpace ensure_space(this);
   EMIT(0xD3);
-  EMIT(0xE8 | dst.code());
+  emit_operand(ebp, dst);
 }
 
 
@@ -1222,7 +1257,7 @@
 
 void Assembler::ret(int imm16) {
   EnsureSpace ensure_space(this);
-  ASSERT(is_uint16(imm16));
+  DCHECK(is_uint16(imm16));
   if (imm16 == 0) {
     EMIT(0xC3);
   } else {
@@ -1267,7 +1302,7 @@
 
 void Assembler::bind_to(Label* L, int pos) {
   EnsureSpace ensure_space(this);
-  ASSERT(0 <= pos && pos <= pc_offset());  // must have a valid binding position
+  DCHECK(0 <= pos && pos <= pc_offset());  // must have a valid binding position
   while (L->is_linked()) {
     Displacement disp = disp_at(L);
     int fixup_pos = L->pos();
@@ -1276,7 +1311,7 @@
       long_at_put(fixup_pos, pos + Code::kHeaderSize - kHeapObjectTag);
     } else {
       if (disp.type() == Displacement::UNCONDITIONAL_JUMP) {
-        ASSERT(byte_at(fixup_pos - 1) == 0xE9);  // jmp expected
+        DCHECK(byte_at(fixup_pos - 1) == 0xE9);  // jmp expected
       }
       // Relative address, relative to point after address.
       int imm32 = pos - (fixup_pos + sizeof(int32_t));
@@ -1288,7 +1323,7 @@
     int fixup_pos = L->near_link_pos();
     int offset_to_next =
         static_cast<int>(*reinterpret_cast<int8_t*>(addr_at(fixup_pos)));
-    ASSERT(offset_to_next <= 0);
+    DCHECK(offset_to_next <= 0);
     // Relative address, relative to point after address.
     int disp = pos - fixup_pos - sizeof(int8_t);
     CHECK(0 <= disp && disp <= 127);
@@ -1305,7 +1340,7 @@
 
 void Assembler::bind(Label* L) {
   EnsureSpace ensure_space(this);
-  ASSERT(!L->is_bound());  // label can only be bound once
+  DCHECK(!L->is_bound());  // label can only be bound once
   bind_to(L, pc_offset());
 }
 
@@ -1316,7 +1351,7 @@
   if (L->is_bound()) {
     const int long_size = 5;
     int offs = L->pos() - pc_offset();
-    ASSERT(offs <= 0);
+    DCHECK(offs <= 0);
     // 1110 1000 #32-bit disp.
     EMIT(0xE8);
     emit(offs - long_size);
@@ -1331,7 +1366,7 @@
 void Assembler::call(byte* entry, RelocInfo::Mode rmode) {
   positions_recorder()->WriteRecordedPositions();
   EnsureSpace ensure_space(this);
-  ASSERT(!RelocInfo::IsCodeTarget(rmode));
+  DCHECK(!RelocInfo::IsCodeTarget(rmode));
   EMIT(0xE8);
   if (RelocInfo::IsRuntimeEntry(rmode)) {
     emit(reinterpret_cast<uint32_t>(entry), rmode);
@@ -1365,7 +1400,7 @@
                      TypeFeedbackId ast_id) {
   positions_recorder()->WriteRecordedPositions();
   EnsureSpace ensure_space(this);
-  ASSERT(RelocInfo::IsCodeTarget(rmode)
+  DCHECK(RelocInfo::IsCodeTarget(rmode)
       || rmode == RelocInfo::CODE_AGE_SEQUENCE);
   EMIT(0xE8);
   emit(code, rmode, ast_id);
@@ -1378,7 +1413,7 @@
     const int short_size = 2;
     const int long_size  = 5;
     int offs = L->pos() - pc_offset();
-    ASSERT(offs <= 0);
+    DCHECK(offs <= 0);
     if (is_int8(offs - short_size)) {
       // 1110 1011 #8-bit disp.
       EMIT(0xEB);
@@ -1401,7 +1436,7 @@
 
 void Assembler::jmp(byte* entry, RelocInfo::Mode rmode) {
   EnsureSpace ensure_space(this);
-  ASSERT(!RelocInfo::IsCodeTarget(rmode));
+  DCHECK(!RelocInfo::IsCodeTarget(rmode));
   EMIT(0xE9);
   if (RelocInfo::IsRuntimeEntry(rmode)) {
     emit(reinterpret_cast<uint32_t>(entry), rmode);
@@ -1420,7 +1455,7 @@
 
 void Assembler::jmp(Handle<Code> code, RelocInfo::Mode rmode) {
   EnsureSpace ensure_space(this);
-  ASSERT(RelocInfo::IsCodeTarget(rmode));
+  DCHECK(RelocInfo::IsCodeTarget(rmode));
   EMIT(0xE9);
   emit(code, rmode);
 }
@@ -1428,12 +1463,12 @@
 
 void Assembler::j(Condition cc, Label* L, Label::Distance distance) {
   EnsureSpace ensure_space(this);
-  ASSERT(0 <= cc && static_cast<int>(cc) < 16);
+  DCHECK(0 <= cc && static_cast<int>(cc) < 16);
   if (L->is_bound()) {
     const int short_size = 2;
     const int long_size  = 6;
     int offs = L->pos() - pc_offset();
-    ASSERT(offs <= 0);
+    DCHECK(offs <= 0);
     if (is_int8(offs - short_size)) {
       // 0111 tttn #8-bit disp
       EMIT(0x70 | cc);
@@ -1460,7 +1495,7 @@
 
 void Assembler::j(Condition cc, byte* entry, RelocInfo::Mode rmode) {
   EnsureSpace ensure_space(this);
-  ASSERT((0 <= cc) && (static_cast<int>(cc) < 16));
+  DCHECK((0 <= cc) && (static_cast<int>(cc) < 16));
   // 0000 1111 1000 tttn #32-bit disp.
   EMIT(0x0F);
   EMIT(0x80 | cc);
@@ -1587,7 +1622,7 @@
 
 
 void Assembler::fisttp_s(const Operand& adr) {
-  ASSERT(IsEnabled(SSE3));
+  DCHECK(IsEnabled(SSE3));
   EnsureSpace ensure_space(this);
   EMIT(0xDB);
   emit_operand(ecx, adr);
@@ -1595,7 +1630,7 @@
 
 
 void Assembler::fisttp_d(const Operand& adr) {
-  ASSERT(IsEnabled(SSE3));
+  DCHECK(IsEnabled(SSE3));
   EnsureSpace ensure_space(this);
   EMIT(0xDD);
   emit_operand(ecx, adr);
@@ -1872,7 +1907,7 @@
 
 
 void Assembler::setcc(Condition cc, Register reg) {
-  ASSERT(reg.is_byte_register());
+  DCHECK(reg.is_byte_register());
   EnsureSpace ensure_space(this);
   EMIT(0x0F);
   EMIT(0x90 | cc);
@@ -1979,6 +2014,15 @@
 }
 
 
+void Assembler::subsd(XMMRegister dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  EMIT(0xF2);
+  EMIT(0x0F);
+  EMIT(0x5C);
+  emit_sse_operand(dst, src);
+}
+
+
 void Assembler::divsd(XMMRegister dst, XMMRegister src) {
   EnsureSpace ensure_space(this);
   EMIT(0xF2);
@@ -2099,7 +2143,7 @@
 
 
 void Assembler::roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode) {
-  ASSERT(IsEnabled(SSE4_1));
+  DCHECK(IsEnabled(SSE4_1));
   EnsureSpace ensure_space(this);
   EMIT(0x66);
   EMIT(0x0F);
@@ -2156,7 +2200,7 @@
 
 
 void Assembler::shufps(XMMRegister dst, XMMRegister src, byte imm8) {
-  ASSERT(is_uint8(imm8));
+  DCHECK(is_uint8(imm8));
   EnsureSpace ensure_space(this);
   EMIT(0x0F);
   EMIT(0xC6);
@@ -2202,7 +2246,7 @@
 
 
 void Assembler::movntdqa(XMMRegister dst, const Operand& src) {
-  ASSERT(IsEnabled(SSE4_1));
+  DCHECK(IsEnabled(SSE4_1));
   EnsureSpace ensure_space(this);
   EMIT(0x66);
   EMIT(0x0F);
@@ -2222,7 +2266,7 @@
 
 
 void Assembler::prefetch(const Operand& src, int level) {
-  ASSERT(is_uint2(level));
+  DCHECK(is_uint2(level));
   EnsureSpace ensure_space(this);
   EMIT(0x0F);
   EMIT(0x18);
@@ -2287,8 +2331,8 @@
 
 
 void Assembler::extractps(Register dst, XMMRegister src, byte imm8) {
-  ASSERT(IsEnabled(SSE4_1));
-  ASSERT(is_uint8(imm8));
+  DCHECK(IsEnabled(SSE4_1));
+  DCHECK(is_uint8(imm8));
   EnsureSpace ensure_space(this);
   EMIT(0x66);
   EMIT(0x0F);
@@ -2327,7 +2371,7 @@
 
 
 void Assembler::ptest(XMMRegister dst, XMMRegister src) {
-  ASSERT(IsEnabled(SSE4_1));
+  DCHECK(IsEnabled(SSE4_1));
   EnsureSpace ensure_space(this);
   EMIT(0x66);
   EMIT(0x0F);
@@ -2386,7 +2430,7 @@
 
 
 void Assembler::pextrd(const Operand& dst, XMMRegister src, int8_t offset) {
-  ASSERT(IsEnabled(SSE4_1));
+  DCHECK(IsEnabled(SSE4_1));
   EnsureSpace ensure_space(this);
   EMIT(0x66);
   EMIT(0x0F);
@@ -2398,7 +2442,7 @@
 
 
 void Assembler::pinsrd(XMMRegister dst, const Operand& src, int8_t offset) {
-  ASSERT(IsEnabled(SSE4_1));
+  DCHECK(IsEnabled(SSE4_1));
   EnsureSpace ensure_space(this);
   EMIT(0x66);
   EMIT(0x0F);
@@ -2458,16 +2502,13 @@
 
 
 void Assembler::GrowBuffer() {
-  ASSERT(buffer_overflow());
+  DCHECK(buffer_overflow());
   if (!own_buffer_) FATAL("external code buffer is too small");
 
   // Compute new buffer size.
   CodeDesc desc;  // the new buffer
-  if (buffer_size_ < 4*KB) {
-    desc.buffer_size = 4*KB;
-  } else {
-    desc.buffer_size = 2*buffer_size_;
-  }
+  desc.buffer_size = 2 * buffer_size_;
+
   // Some internal data structures overflow for very large buffers,
   // they must ensure that kMaximalBufferSize is not too large.
   if ((desc.buffer_size > kMaximalBufferSize) ||
@@ -2494,12 +2535,7 @@
           desc.reloc_size);
 
   // Switch buffers.
-  if (isolate()->assembler_spare_buffer() == NULL &&
-      buffer_size_ == kMinimalBufferSize) {
-    isolate()->set_assembler_spare_buffer(buffer_);
-  } else {
-    DeleteArray(buffer_);
-  }
+  DeleteArray(buffer_);
   buffer_ = desc.buffer;
   buffer_size_ = desc.buffer_size;
   pc_ += pc_delta;
@@ -2517,14 +2553,14 @@
     }
   }
 
-  ASSERT(!buffer_overflow());
+  DCHECK(!buffer_overflow());
 }
 
 
 void Assembler::emit_arith_b(int op1, int op2, Register dst, int imm8) {
-  ASSERT(is_uint8(op1) && is_uint8(op2));  // wrong opcode
-  ASSERT(is_uint8(imm8));
-  ASSERT((op1 & 0x01) == 0);  // should be 8bit operation
+  DCHECK(is_uint8(op1) && is_uint8(op2));  // wrong opcode
+  DCHECK(is_uint8(imm8));
+  DCHECK((op1 & 0x01) == 0);  // should be 8bit operation
   EMIT(op1);
   EMIT(op2 | dst.code());
   EMIT(imm8);
@@ -2532,7 +2568,7 @@
 
 
 void Assembler::emit_arith(int sel, Operand dst, const Immediate& x) {
-  ASSERT((0 <= sel) && (sel <= 7));
+  DCHECK((0 <= sel) && (sel <= 7));
   Register ireg = { sel };
   if (x.is_int8()) {
     EMIT(0x83);  // using a sign-extended 8-bit immediate.
@@ -2551,7 +2587,7 @@
 
 void Assembler::emit_operand(Register reg, const Operand& adr) {
   const unsigned length = adr.len_;
-  ASSERT(length > 0);
+  DCHECK(length > 0);
 
   // Emit updated ModRM byte containing the given register.
   pc_[0] = (adr.buf_[0] & ~0x38) | (reg.code() << 3);
@@ -2570,8 +2606,8 @@
 
 
 void Assembler::emit_farith(int b1, int b2, int i) {
-  ASSERT(is_uint8(b1) && is_uint8(b2));  // wrong opcode
-  ASSERT(0 <= i &&  i < 8);  // illegal stack offset
+  DCHECK(is_uint8(b1) && is_uint8(b2));  // wrong opcode
+  DCHECK(0 <= i &&  i < 8);  // illegal stack offset
   EMIT(b1);
   EMIT(b2 + i);
 }
@@ -2590,7 +2626,7 @@
 
 
 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
-  ASSERT(!RelocInfo::IsNone(rmode));
+  DCHECK(!RelocInfo::IsNone(rmode));
   // Don't record external references unless the heap will be serialized.
   if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
       !serializer_enabled() && !emit_debug_code()) {
@@ -2603,14 +2639,14 @@
 
 Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
   // No out-of-line constant pool support.
-  ASSERT(!FLAG_enable_ool_constant_pool);
+  DCHECK(!FLAG_enable_ool_constant_pool);
   return isolate->factory()->empty_constant_pool_array();
 }
 
 
 void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
   // No out-of-line constant pool support.
-  ASSERT(!FLAG_enable_ool_constant_pool);
+  DCHECK(!FLAG_enable_ool_constant_pool);
   return;
 }
 
diff --git a/src/ia32/assembler-ia32.h b/src/ia32/assembler-ia32.h
index de8b04f..8175778 100644
--- a/src/ia32/assembler-ia32.h
+++ b/src/ia32/assembler-ia32.h
@@ -78,8 +78,8 @@
   static inline Register FromAllocationIndex(int index);
 
   static Register from_code(int code) {
-    ASSERT(code >= 0);
-    ASSERT(code < kNumRegisters);
+    DCHECK(code >= 0);
+    DCHECK(code < kNumRegisters);
     Register r = { code };
     return r;
   }
@@ -88,11 +88,11 @@
   // eax, ebx, ecx and edx are byte registers, the rest are not.
   bool is_byte_register() const { return code_ <= 3; }
   int code() const {
-    ASSERT(is_valid());
+    DCHECK(is_valid());
     return code_;
   }
   int bit() const {
-    ASSERT(is_valid());
+    DCHECK(is_valid());
     return 1 << code_;
   }
 
@@ -122,7 +122,7 @@
 
 
 inline const char* Register::AllocationIndexToString(int index) {
-  ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
+  DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
   // This is the mapping of allocation indices to registers.
   const char* const kNames[] = { "eax", "ecx", "edx", "ebx", "esi", "edi" };
   return kNames[index];
@@ -130,13 +130,13 @@
 
 
 inline int Register::ToAllocationIndex(Register reg) {
-  ASSERT(reg.is_valid() && !reg.is(esp) && !reg.is(ebp));
+  DCHECK(reg.is_valid() && !reg.is(esp) && !reg.is(ebp));
   return (reg.code() >= 6) ? reg.code() - 2 : reg.code();
 }
 
 
 inline Register Register::FromAllocationIndex(int index)  {
-  ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
+  DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
   return (index >= 4) ? from_code(index + 2) : from_code(index);
 }
 
@@ -149,12 +149,12 @@
   }
 
   static int ToAllocationIndex(XMMRegister reg) {
-    ASSERT(reg.code() != 0);
+    DCHECK(reg.code() != 0);
     return reg.code() - 1;
   }
 
   static XMMRegister FromAllocationIndex(int index) {
-    ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
+    DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
     return from_code(index + 1);
   }
 
@@ -168,14 +168,14 @@
   }
 
   int code() const {
-    ASSERT(is_valid());
+    DCHECK(is_valid());
     return code_;
   }
 
   bool is(XMMRegister reg) const { return code_ == reg.code_; }
 
   static const char* AllocationIndexToString(int index) {
-    ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
+    DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
     const char* const names[] = {
       "xmm1",
       "xmm2",
@@ -300,6 +300,7 @@
   int x_;
   RelocInfo::Mode rmode_;
 
+  friend class Operand;
   friend class Assembler;
   friend class MacroAssembler;
 };
@@ -322,12 +323,17 @@
 
 class Operand BASE_EMBEDDED {
  public:
+  // reg
+  INLINE(explicit Operand(Register reg));
+
   // XMM reg
   INLINE(explicit Operand(XMMRegister xmm_reg));
 
   // [disp/r]
   INLINE(explicit Operand(int32_t disp, RelocInfo::Mode rmode));
-  // disp only must always be relocated
+
+  // [disp/r]
+  INLINE(explicit Operand(Immediate imm));
 
   // [base + disp/r]
   explicit Operand(Register base, int32_t disp,
@@ -364,6 +370,10 @@
                    RelocInfo::CELL);
   }
 
+  static Operand ForRegisterPlusImmediate(Register base, Immediate imm) {
+    return Operand(base, imm.x_, imm.rmode_);
+  }
+
   // Returns true if this Operand is a wrapper for the specified register.
   bool is_reg(Register reg) const;
 
@@ -375,9 +385,6 @@
   Register reg() const;
 
  private:
-  // reg
-  INLINE(explicit Operand(Register reg));
-
   // Set the ModRM byte without an encoded 'reg' register. The
   // register is encoded later as part of the emit_operand operation.
   inline void set_modrm(int mod, Register rm);
@@ -394,7 +401,6 @@
 
   friend class Assembler;
   friend class MacroAssembler;
-  friend class LCodeGen;
 };
 
 
@@ -513,6 +519,9 @@
   // of that call in the instruction stream.
   inline static Address target_address_from_return_address(Address pc);
 
+  // Return the code target address of the patch debug break slot
+  inline static Address break_address_from_return_address(Address pc);
+
   // This sets the branch destination (which is in the instruction on x86).
   // This is for calls and branches within generated code.
   inline static void deserialization_set_special_target_at(
@@ -647,8 +656,9 @@
   void rep_stos();
   void stos();
 
-  // Exchange two registers
+  // Exchange
   void xchg(Register dst, Register src);
+  void xchg(Register dst, const Operand& src);
 
   // Arithmetics
   void adc(Register dst, int32_t imm32);
@@ -690,13 +700,17 @@
 
   void cdq();
 
-  void idiv(Register src);
+  void idiv(Register src) { idiv(Operand(src)); }
+  void idiv(const Operand& src);
+  void div(Register src) { div(Operand(src)); }
+  void div(const Operand& src);
 
   // Signed multiply instructions.
   void imul(Register src);                               // edx:eax = eax * src.
   void imul(Register dst, Register src) { imul(dst, Operand(src)); }
   void imul(Register dst, const Operand& src);           // dst = dst * src.
   void imul(Register dst, Register src, int32_t imm32);  // dst = src * imm32.
+  void imul(Register dst, const Operand& src, int32_t imm32);
 
   void inc(Register dst);
   void inc(const Operand& dst);
@@ -707,8 +721,10 @@
   void mul(Register src);                                // edx:eax = eax * reg.
 
   void neg(Register dst);
+  void neg(const Operand& dst);
 
   void not_(Register dst);
+  void not_(const Operand& dst);
 
   void or_(Register dst, int32_t imm32);
   void or_(Register dst, Register src) { or_(dst, Operand(src)); }
@@ -722,22 +738,28 @@
   void ror(Register dst, uint8_t imm8);
   void ror_cl(Register dst);
 
-  void sar(Register dst, uint8_t imm8);
-  void sar_cl(Register dst);
+  void sar(Register dst, uint8_t imm8) { sar(Operand(dst), imm8); }
+  void sar(const Operand& dst, uint8_t imm8);
+  void sar_cl(Register dst) { sar_cl(Operand(dst)); }
+  void sar_cl(const Operand& dst);
 
   void sbb(Register dst, const Operand& src);
 
   void shld(Register dst, Register src) { shld(dst, Operand(src)); }
   void shld(Register dst, const Operand& src);
 
-  void shl(Register dst, uint8_t imm8);
-  void shl_cl(Register dst);
+  void shl(Register dst, uint8_t imm8) { shl(Operand(dst), imm8); }
+  void shl(const Operand& dst, uint8_t imm8);
+  void shl_cl(Register dst) { shl_cl(Operand(dst)); }
+  void shl_cl(const Operand& dst);
 
   void shrd(Register dst, Register src) { shrd(dst, Operand(src)); }
   void shrd(Register dst, const Operand& src);
 
-  void shr(Register dst, uint8_t imm8);
-  void shr_cl(Register dst);
+  void shr(Register dst, uint8_t imm8) { shr(Operand(dst), imm8); }
+  void shr(const Operand& dst, uint8_t imm8);
+  void shr_cl(Register dst) { shr_cl(Operand(dst)); }
+  void shr_cl(const Operand& dst);
 
   void sub(Register dst, const Immediate& imm) { sub(Operand(dst), imm); }
   void sub(const Operand& dst, const Immediate& x);
@@ -921,6 +943,9 @@
     cvttss2si(dst, Operand(src));
   }
   void cvttsd2si(Register dst, const Operand& src);
+  void cvttsd2si(Register dst, XMMRegister src) {
+    cvttsd2si(dst, Operand(src));
+  }
   void cvtsd2si(Register dst, XMMRegister src);
 
   void cvtsi2sd(XMMRegister dst, Register src) { cvtsi2sd(dst, Operand(src)); }
@@ -931,6 +956,7 @@
   void addsd(XMMRegister dst, XMMRegister src);
   void addsd(XMMRegister dst, const Operand& src);
   void subsd(XMMRegister dst, XMMRegister src);
+  void subsd(XMMRegister dst, const Operand& src);
   void mulsd(XMMRegister dst, XMMRegister src);
   void mulsd(XMMRegister dst, const Operand& src);
   void divsd(XMMRegister dst, XMMRegister src);
@@ -1153,7 +1179,7 @@
 #ifdef DEBUG
   ~EnsureSpace() {
     int bytes_generated = space_before_ - assembler_->available_space();
-    ASSERT(bytes_generated < assembler_->kGap);
+    DCHECK(bytes_generated < assembler_->kGap);
   }
 #endif
 
diff --git a/src/ia32/builtins-ia32.cc b/src/ia32/builtins-ia32.cc
index c62afb0..c24e77f 100644
--- a/src/ia32/builtins-ia32.cc
+++ b/src/ia32/builtins-ia32.cc
@@ -6,10 +6,10 @@
 
 #if V8_TARGET_ARCH_IA32
 
+#include "src/code-factory.h"
 #include "src/codegen.h"
 #include "src/deoptimizer.h"
 #include "src/full-codegen.h"
-#include "src/stub-cache.h"
 
 namespace v8 {
 namespace internal {
@@ -42,7 +42,7 @@
     __ push(edi);
     __ push(scratch);  // Restore return address.
   } else {
-    ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
+    DCHECK(extra_args == NO_EXTRA_ARGUMENTS);
   }
 
   // JumpToExternalReference expects eax to contain the number of arguments
@@ -92,7 +92,7 @@
   __ cmp(esp, Operand::StaticVariable(stack_limit));
   __ j(above_equal, &ok, Label::kNear);
 
-  CallRuntimePassFunction(masm, Runtime::kHiddenTryInstallOptimizedCode);
+  CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode);
   GenerateTailCallToReturnedCode(masm);
 
   __ bind(&ok);
@@ -110,7 +110,7 @@
   // -----------------------------------
 
   // Should never create mementos for api functions.
-  ASSERT(!is_api_function || !create_memento);
+  DCHECK(!is_api_function || !create_memento);
 
   // Enter a construct frame.
   {
@@ -178,7 +178,7 @@
         __ push(edi);
 
         __ push(edi);  // constructor
-        __ CallRuntime(Runtime::kHiddenFinalizeInstanceSize, 1);
+        __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
 
         __ pop(edi);
         __ pop(eax);
@@ -359,9 +359,9 @@
     // edi: function (constructor)
     __ push(edi);
     if (create_memento) {
-      __ CallRuntime(Runtime::kHiddenNewObjectWithAllocationSite, 2);
+      __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 2);
     } else {
-      __ CallRuntime(Runtime::kHiddenNewObject, 1);
+      __ CallRuntime(Runtime::kNewObject, 1);
     }
     __ mov(ebx, eax);  // store result in ebx
 
@@ -550,8 +550,8 @@
 }
 
 
-void Builtins::Generate_CompileUnoptimized(MacroAssembler* masm) {
-  CallRuntimePassFunction(masm, Runtime::kHiddenCompileUnoptimized);
+void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+  CallRuntimePassFunction(masm, Runtime::kCompileLazy);
   GenerateTailCallToReturnedCode(masm);
 }
 
@@ -566,7 +566,7 @@
   // Whether to compile in a background thread.
   __ Push(masm->isolate()->factory()->ToBoolean(concurrent));
 
-  __ CallRuntime(Runtime::kHiddenCompileOptimized, 2);
+  __ CallRuntime(Runtime::kCompileOptimized, 2);
   // Restore receiver.
   __ pop(edi);
 }
@@ -670,7 +670,7 @@
     // stubs that tail call the runtime on deopts passing their parameters in
     // registers.
     __ pushad();
-    __ CallRuntime(Runtime::kHiddenNotifyStubFailure, 0, save_doubles);
+    __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
     __ popad();
     // Tear down internal frame.
   }
@@ -697,7 +697,7 @@
 
     // Pass deoptimization type to the runtime system.
     __ push(Immediate(Smi::FromInt(static_cast<int>(type))));
-    __ CallRuntime(Runtime::kHiddenNotifyDeoptimized, 1);
+    __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
 
     // Tear down internal frame.
   }
@@ -765,7 +765,7 @@
   // 3a. Patch the first argument if necessary when calling a function.
   Label shift_arguments;
   __ Move(edx, Immediate(0));  // indicate regular JS_FUNCTION
-  { Label convert_to_object, use_global_receiver, patch_receiver;
+  { Label convert_to_object, use_global_proxy, patch_receiver;
     // Change context eagerly in case we need the global receiver.
     __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
 
@@ -787,9 +787,9 @@
     // global object if it is null or undefined.
     __ JumpIfSmi(ebx, &convert_to_object);
     __ cmp(ebx, factory->null_value());
-    __ j(equal, &use_global_receiver);
+    __ j(equal, &use_global_proxy);
     __ cmp(ebx, factory->undefined_value());
-    __ j(equal, &use_global_receiver);
+    __ j(equal, &use_global_proxy);
     STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
     __ CmpObjectType(ebx, FIRST_SPEC_OBJECT_TYPE, ecx);
     __ j(above_equal, &shift_arguments);
@@ -814,10 +814,10 @@
     __ mov(edi, Operand(esp, eax, times_4, 1 * kPointerSize));
     __ jmp(&patch_receiver);
 
-    __ bind(&use_global_receiver);
+    __ bind(&use_global_proxy);
     __ mov(ebx,
            Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
-    __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
+    __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalProxyOffset));
 
     __ bind(&patch_receiver);
     __ mov(Operand(esp, eax, times_4, 0), ebx);
@@ -943,7 +943,7 @@
     __ mov(ebx, Operand(ebp, kReceiverOffset));
 
     // Check that the function is a JS function (otherwise it must be a proxy).
-    Label push_receiver, use_global_receiver;
+    Label push_receiver, use_global_proxy;
     __ mov(edi, Operand(ebp, kFunctionOffset));
     __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
     __ j(not_equal, &push_receiver);
@@ -971,9 +971,9 @@
     // global object if it is null or undefined.
     __ JumpIfSmi(ebx, &call_to_object);
     __ cmp(ebx, factory->null_value());
-    __ j(equal, &use_global_receiver);
+    __ j(equal, &use_global_proxy);
     __ cmp(ebx, factory->undefined_value());
-    __ j(equal, &use_global_receiver);
+    __ j(equal, &use_global_proxy);
     STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
     __ CmpObjectType(ebx, FIRST_SPEC_OBJECT_TYPE, ecx);
     __ j(above_equal, &push_receiver);
@@ -984,10 +984,10 @@
     __ mov(ebx, eax);
     __ jmp(&push_receiver);
 
-    __ bind(&use_global_receiver);
+    __ bind(&use_global_proxy);
     __ mov(ebx,
            Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
-    __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
+    __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalProxyOffset));
 
     // Push the receiver.
     __ bind(&push_receiver);
@@ -995,13 +995,19 @@
 
     // Copy all arguments from the array to the stack.
     Label entry, loop;
-    __ mov(ecx, Operand(ebp, kIndexOffset));
+    Register receiver = LoadDescriptor::ReceiverRegister();
+    Register key = LoadDescriptor::NameRegister();
+    __ mov(key, Operand(ebp, kIndexOffset));
     __ jmp(&entry);
     __ bind(&loop);
-    __ mov(edx, Operand(ebp, kArgumentsOffset));  // load arguments
+    __ mov(receiver, Operand(ebp, kArgumentsOffset));  // load arguments
 
     // Use inline caching to speed up access to arguments.
-    Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Initialize();
+    if (FLAG_vector_ics) {
+      __ mov(VectorLoadICDescriptor::SlotRegister(),
+             Immediate(Smi::FromInt(0)));
+    }
+    Handle<Code> ic = CodeFactory::KeyedLoadIC(masm->isolate()).code();
     __ call(ic, RelocInfo::CODE_TARGET);
     // It is important that we do not have a test instruction after the
     // call.  A test instruction after the call is used to indicate that
@@ -1011,19 +1017,19 @@
     // Push the nth argument.
     __ push(eax);
 
-    // Update the index on the stack and in register eax.
-    __ mov(ecx, Operand(ebp, kIndexOffset));
-    __ add(ecx, Immediate(1 << kSmiTagSize));
-    __ mov(Operand(ebp, kIndexOffset), ecx);
+    // Update the index on the stack and in register key.
+    __ mov(key, Operand(ebp, kIndexOffset));
+    __ add(key, Immediate(1 << kSmiTagSize));
+    __ mov(Operand(ebp, kIndexOffset), key);
 
     __ bind(&entry);
-    __ cmp(ecx, Operand(ebp, kLimitOffset));
+    __ cmp(key, Operand(ebp, kLimitOffset));
     __ j(not_equal, &loop);
 
     // Call the function.
     Label call_proxy;
-    __ mov(eax, ecx);
     ParameterCount actual(eax);
+    __ Move(eax, key);
     __ SmiUntag(eax);
     __ mov(edi, Operand(ebp, kFunctionOffset));
     __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
@@ -1436,7 +1442,7 @@
   __ j(above_equal, &ok, Label::kNear);
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
-    __ CallRuntime(Runtime::kHiddenStackGuard, 0);
+    __ CallRuntime(Runtime::kStackGuard, 0);
   }
   __ jmp(masm->isolate()->builtins()->OnStackReplacement(),
          RelocInfo::CODE_TARGET);
diff --git a/src/ia32/code-stubs-ia32.cc b/src/ia32/code-stubs-ia32.cc
index 9e925b0..4e14b69 100644
--- a/src/ia32/code-stubs-ia32.cc
+++ b/src/ia32/code-stubs-ia32.cc
@@ -6,468 +6,116 @@
 
 #if V8_TARGET_ARCH_IA32
 
+#include "src/base/bits.h"
 #include "src/bootstrapper.h"
 #include "src/code-stubs.h"
+#include "src/codegen.h"
+#include "src/ic/handler-compiler.h"
+#include "src/ic/ic.h"
 #include "src/isolate.h"
 #include "src/jsregexp.h"
 #include "src/regexp-macro-assembler.h"
 #include "src/runtime.h"
-#include "src/stub-cache.h"
-#include "src/codegen.h"
-#include "src/runtime.h"
 
 namespace v8 {
 namespace internal {
 
 
-void FastNewClosureStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { ebx };
-  descriptor->register_param_count_ = 1;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      Runtime::FunctionForId(Runtime::kHiddenNewClosureFromStubFailure)->entry;
-}
-
-
-void FastNewContextStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { edi };
-  descriptor->register_param_count_ = 1;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ = NULL;
-}
-
-
-void ToNumberStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { eax };
-  descriptor->register_param_count_ = 1;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ = NULL;
-}
-
-
-void NumberToStringStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { eax };
-  descriptor->register_param_count_ = 1;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      Runtime::FunctionForId(Runtime::kHiddenNumberToString)->entry;
-}
-
-
-void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { eax, ebx, ecx };
-  descriptor->register_param_count_ = 3;
-  descriptor->register_params_ = registers;
-  static Representation representations[] = {
-    Representation::Tagged(),
-    Representation::Smi(),
-    Representation::Tagged() };
-  descriptor->register_param_representations_ = representations;
-  descriptor->deoptimization_handler_ =
-      Runtime::FunctionForId(
-          Runtime::kHiddenCreateArrayLiteralStubBailout)->entry;
-}
-
-
-void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { eax, ebx, ecx, edx };
-  descriptor->register_param_count_ = 4;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      Runtime::FunctionForId(Runtime::kHiddenCreateObjectLiteral)->entry;
-}
-
-
-void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { ebx, edx };
-  descriptor->register_param_count_ = 2;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ = NULL;
-}
-
-
-void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { edx, ecx };
-  descriptor->register_param_count_ = 2;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
-}
-
-
-void KeyedLoadDictionaryElementStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { edx, ecx };
-  descriptor->register_param_count_ = 2;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
-}
-
-
-void RegExpConstructResultStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { ecx, ebx, eax };
-  descriptor->register_param_count_ = 3;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      Runtime::FunctionForId(Runtime::kHiddenRegExpConstructResult)->entry;
-}
-
-
-void KeyedLoadGenericElementStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { edx, ecx };
-  descriptor->register_param_count_ = 2;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      Runtime::FunctionForId(Runtime::kKeyedGetProperty)->entry;
-}
-
-
-void LoadFieldStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { edx };
-  descriptor->register_param_count_ = 1;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ = NULL;
-}
-
-
-void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { edx };
-  descriptor->register_param_count_ = 1;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ = NULL;
-}
-
-
-void StringLengthStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { edx, ecx };
-  descriptor->register_param_count_ = 2;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ = NULL;
-}
-
-
-void KeyedStringLengthStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { edx, ecx };
-  descriptor->register_param_count_ = 2;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ = NULL;
-}
-
-
-void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { edx, ecx, eax };
-  descriptor->register_param_count_ = 3;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      FUNCTION_ADDR(KeyedStoreIC_MissFromStubFailure);
-}
-
-
-void TransitionElementsKindStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { eax, ebx };
-  descriptor->register_param_count_ = 2;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
-}
-
-
 static void InitializeArrayConstructorDescriptor(
-    Isolate* isolate,
-    CodeStubInterfaceDescriptor* descriptor,
+    Isolate* isolate, CodeStubDescriptor* descriptor,
     int constant_stack_parameter_count) {
   // register state
   // eax -- number of arguments
   // edi -- function
   // ebx -- allocation site with elements kind
-  static Register registers_variable_args[] = { edi, ebx, eax };
-  static Register registers_no_args[] = { edi, ebx };
+  Address deopt_handler = Runtime::FunctionForId(
+      Runtime::kArrayConstructor)->entry;
 
   if (constant_stack_parameter_count == 0) {
-    descriptor->register_param_count_ = 2;
-    descriptor->register_params_ = registers_no_args;
+    descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
+                           JS_FUNCTION_STUB_MODE);
   } else {
-    // stack param count needs (constructor pointer, and single argument)
-    descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
-    descriptor->stack_parameter_count_ = eax;
-    descriptor->register_param_count_ = 3;
-    descriptor->register_params_ = registers_variable_args;
-    static Representation representations[] = {
-        Representation::Tagged(),
-        Representation::Tagged(),
-        Representation::Integer32() };
-    descriptor->register_param_representations_ = representations;
+    descriptor->Initialize(eax, deopt_handler, constant_stack_parameter_count,
+                           JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
   }
-
-  descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
-  descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
-  descriptor->deoptimization_handler_ =
-      Runtime::FunctionForId(Runtime::kHiddenArrayConstructor)->entry;
 }
 
 
 static void InitializeInternalArrayConstructorDescriptor(
-    CodeStubInterfaceDescriptor* descriptor,
+    Isolate* isolate, CodeStubDescriptor* descriptor,
     int constant_stack_parameter_count) {
   // register state
   // eax -- number of arguments
   // edi -- constructor function
-  static Register registers_variable_args[] = { edi, eax };
-  static Register registers_no_args[] = { edi };
+  Address deopt_handler = Runtime::FunctionForId(
+      Runtime::kInternalArrayConstructor)->entry;
 
   if (constant_stack_parameter_count == 0) {
-    descriptor->register_param_count_ = 1;
-    descriptor->register_params_ = registers_no_args;
+    descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
+                           JS_FUNCTION_STUB_MODE);
   } else {
-    // stack param count needs (constructor pointer, and single argument)
-    descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
-    descriptor->stack_parameter_count_ = eax;
-    descriptor->register_param_count_ = 2;
-    descriptor->register_params_ = registers_variable_args;
-    static Representation representations[] = {
-        Representation::Tagged(),
-        Representation::Integer32() };
-    descriptor->register_param_representations_ = representations;
+    descriptor->Initialize(eax, deopt_handler, constant_stack_parameter_count,
+                           JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
   }
-
-  descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
-  descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
-  descriptor->deoptimization_handler_ =
-      Runtime::FunctionForId(Runtime::kHiddenInternalArrayConstructor)->entry;
 }
 
 
-void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
+void ArrayNoArgumentConstructorStub::InitializeDescriptor(
+    CodeStubDescriptor* descriptor) {
   InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
 }
 
 
-void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
+void ArraySingleArgumentConstructorStub::InitializeDescriptor(
+    CodeStubDescriptor* descriptor) {
   InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
 }
 
 
-void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
+void ArrayNArgumentsConstructorStub::InitializeDescriptor(
+    CodeStubDescriptor* descriptor) {
   InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
 }
 
 
-void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(descriptor, 0);
+void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
+    CodeStubDescriptor* descriptor) {
+  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
 }
 
 
-void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(descriptor, 1);
+void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
+    CodeStubDescriptor* descriptor) {
+  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
 }
 
 
-void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(descriptor, -1);
-}
-
-
-void CompareNilICStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { eax };
-  descriptor->register_param_count_ = 1;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      FUNCTION_ADDR(CompareNilIC_Miss);
-  descriptor->SetMissHandler(
-      ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate()));
-}
-
-void ToBooleanStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { eax };
-  descriptor->register_param_count_ = 1;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      FUNCTION_ADDR(ToBooleanIC_Miss);
-  descriptor->SetMissHandler(
-      ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate()));
-}
-
-
-void StoreGlobalStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { edx, ecx, eax };
-  descriptor->register_param_count_ = 3;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      FUNCTION_ADDR(StoreIC_MissFromStubFailure);
-}
-
-
-void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { eax, ebx, ecx, edx };
-  descriptor->register_param_count_ = 4;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss);
-}
-
-
-void BinaryOpICStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { edx, eax };
-  descriptor->register_param_count_ = 2;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
-  descriptor->SetMissHandler(
-      ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate()));
-}
-
-
-void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { ecx, edx, eax };
-  descriptor->register_param_count_ = 3;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite);
-}
-
-
-void StringAddStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { edx, eax };
-  descriptor->register_param_count_ = 2;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      Runtime::FunctionForId(Runtime::kHiddenStringAdd)->entry;
-}
-
-
-void CallDescriptors::InitializeForIsolate(Isolate* isolate) {
-  {
-    CallInterfaceDescriptor* descriptor =
-        isolate->call_descriptor(Isolate::ArgumentAdaptorCall);
-    static Register registers[] = { edi,  // JSFunction
-                                    esi,  // context
-                                    eax,  // actual number of arguments
-                                    ebx,  // expected number of arguments
-    };
-    static Representation representations[] = {
-        Representation::Tagged(),     // JSFunction
-        Representation::Tagged(),     // context
-        Representation::Integer32(),  // actual number of arguments
-        Representation::Integer32(),  // expected number of arguments
-    };
-    descriptor->register_param_count_ = 4;
-    descriptor->register_params_ = registers;
-    descriptor->param_representations_ = representations;
-  }
-  {
-    CallInterfaceDescriptor* descriptor =
-        isolate->call_descriptor(Isolate::KeyedCall);
-    static Register registers[] = { esi,  // context
-                                    ecx,  // key
-    };
-    static Representation representations[] = {
-        Representation::Tagged(),     // context
-        Representation::Tagged(),     // key
-    };
-    descriptor->register_param_count_ = 2;
-    descriptor->register_params_ = registers;
-    descriptor->param_representations_ = representations;
-  }
-  {
-    CallInterfaceDescriptor* descriptor =
-        isolate->call_descriptor(Isolate::NamedCall);
-    static Register registers[] = { esi,  // context
-                                    ecx,  // name
-    };
-    static Representation representations[] = {
-        Representation::Tagged(),     // context
-        Representation::Tagged(),     // name
-    };
-    descriptor->register_param_count_ = 2;
-    descriptor->register_params_ = registers;
-    descriptor->param_representations_ = representations;
-  }
-  {
-    CallInterfaceDescriptor* descriptor =
-        isolate->call_descriptor(Isolate::CallHandler);
-    static Register registers[] = { esi,  // context
-                                    edx,  // receiver
-    };
-    static Representation representations[] = {
-        Representation::Tagged(),  // context
-        Representation::Tagged(),  // receiver
-    };
-    descriptor->register_param_count_ = 2;
-    descriptor->register_params_ = registers;
-    descriptor->param_representations_ = representations;
-  }
-  {
-    CallInterfaceDescriptor* descriptor =
-        isolate->call_descriptor(Isolate::ApiFunctionCall);
-    static Register registers[] = { eax,  // callee
-                                    ebx,  // call_data
-                                    ecx,  // holder
-                                    edx,  // api_function_address
-                                    esi,  // context
-    };
-    static Representation representations[] = {
-        Representation::Tagged(),    // callee
-        Representation::Tagged(),    // call_data
-        Representation::Tagged(),    // holder
-        Representation::External(),  // api_function_address
-        Representation::Tagged(),    // context
-    };
-    descriptor->register_param_count_ = 5;
-    descriptor->register_params_ = registers;
-    descriptor->param_representations_ = representations;
-  }
+void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
+    CodeStubDescriptor* descriptor) {
+  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
 }
 
 
 #define __ ACCESS_MASM(masm)
 
 
-void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
+void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
+                                               ExternalReference miss) {
   // Update the static counter each time a new code stub is generated.
   isolate()->counters()->code_stubs()->Increment();
 
-  CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor();
-  int param_count = descriptor->register_param_count_;
+  CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
+  int param_count = descriptor.GetEnvironmentParameterCount();
   {
     // Call the runtime system in a fresh internal frame.
     FrameScope scope(masm, StackFrame::INTERNAL);
-    ASSERT(descriptor->register_param_count_ == 0 ||
-           eax.is(descriptor->register_params_[param_count - 1]));
+    DCHECK(param_count == 0 ||
+           eax.is(descriptor.GetEnvironmentParameterRegister(param_count - 1)));
     // Push arguments
     for (int i = 0; i < param_count; ++i) {
-      __ push(descriptor->register_params_[i]);
+      __ push(descriptor.GetEnvironmentParameterRegister(i));
     }
-    ExternalReference miss = descriptor->miss_handler();
-    __ CallExternalReference(miss, descriptor->register_param_count_);
+    __ CallExternalReference(miss, param_count);
   }
 
   __ ret(0);
@@ -479,7 +127,7 @@
   // store the registers in any particular way, but we do have to store and
   // restore them.
   __ pushad();
-  if (save_doubles_ == kSaveFPRegs) {
+  if (save_doubles()) {
     __ sub(esp, Immediate(kDoubleSize * XMMRegister::kMaxNumRegisters));
     for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
       XMMRegister reg = XMMRegister::from_code(i);
@@ -495,7 +143,7 @@
   __ CallCFunction(
       ExternalReference::store_buffer_overflow_function(isolate()),
       argument_count);
-  if (save_doubles_ == kSaveFPRegs) {
+  if (save_doubles()) {
     for (int i = 0; i < XMMRegister::kMaxNumRegisters; i++) {
       XMMRegister reg = XMMRegister::from_code(i);
       __ movsd(reg, Operand(esp, i * kDoubleSize));
@@ -538,7 +186,7 @@
 void DoubleToIStub::Generate(MacroAssembler* masm) {
   Register input_reg = this->source();
   Register final_result_reg = this->destination();
-  ASSERT(is_truncating());
+  DCHECK(is_truncating());
 
   Label check_negative, process_64_bits, done, done_no_stash;
 
@@ -650,7 +298,7 @@
   }
   __ bind(&done_no_stash);
   if (!final_result_reg.is(result_reg)) {
-    ASSERT(final_result_reg.is(ecx));
+    DCHECK(final_result_reg.is(ecx));
     __ mov(final_result_reg, result_reg);
   }
   __ pop(save_reg);
@@ -733,7 +381,8 @@
 
 void MathPowStub::Generate(MacroAssembler* masm) {
   Factory* factory = isolate()->factory();
-  const Register exponent = eax;
+  const Register exponent = MathPowTaggedDescriptor::exponent();
+  DCHECK(exponent.is(eax));
   const Register base = edx;
   const Register scratch = ecx;
   const XMMRegister double_result = xmm3;
@@ -747,7 +396,7 @@
   __ mov(scratch, Immediate(1));
   __ Cvtsi2sd(double_result, scratch);
 
-  if (exponent_type_ == ON_STACK) {
+  if (exponent_type() == ON_STACK) {
     Label base_is_smi, unpack_exponent;
     // The exponent and base are supplied as arguments on the stack.
     // This can only happen if the stub is called from non-optimized code.
@@ -778,7 +427,7 @@
     __ j(not_equal, &call_runtime);
     __ movsd(double_exponent,
               FieldOperand(exponent, HeapNumber::kValueOffset));
-  } else if (exponent_type_ == TAGGED) {
+  } else if (exponent_type() == TAGGED) {
     __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
     __ SmiUntag(exponent);
     __ jmp(&int_exponent);
@@ -788,10 +437,12 @@
               FieldOperand(exponent, HeapNumber::kValueOffset));
   }
 
-  if (exponent_type_ != INTEGER) {
+  if (exponent_type() != INTEGER) {
     Label fast_power, try_arithmetic_simplification;
     __ DoubleToI(exponent, double_exponent, double_scratch,
-                 TREAT_MINUS_ZERO_AS_ZERO, &try_arithmetic_simplification);
+                 TREAT_MINUS_ZERO_AS_ZERO, &try_arithmetic_simplification,
+                 &try_arithmetic_simplification,
+                 &try_arithmetic_simplification);
     __ jmp(&int_exponent);
 
     __ bind(&try_arithmetic_simplification);
@@ -800,7 +451,7 @@
     __ cmp(exponent, Immediate(0x1));
     __ j(overflow, &call_runtime);
 
-    if (exponent_type_ == ON_STACK) {
+    if (exponent_type() == ON_STACK) {
       // Detect square root case.  Crankshaft detects constant +/-0.5 at
       // compile time and uses DoMathPowHalf instead.  We then skip this check
       // for non-constant cases of +/-0.5 as these hardly occur.
@@ -962,10 +613,10 @@
 
   // Returning or bailing out.
   Counters* counters = isolate()->counters();
-  if (exponent_type_ == ON_STACK) {
+  if (exponent_type() == ON_STACK) {
     // The arguments are still on the stack.
     __ bind(&call_runtime);
-    __ TailCallRuntime(Runtime::kHiddenMathPow, 2, 1);
+    __ TailCallRuntime(Runtime::kMathPowRT, 2, 1);
 
     // The stub is called from non-optimized code, which expects the result
     // as heap number in exponent.
@@ -999,27 +650,51 @@
 
 
 void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- ecx    : name
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
   Label miss;
+  Register receiver = LoadDescriptor::ReceiverRegister();
 
-  if (kind() == Code::KEYED_LOAD_IC) {
-    __ cmp(ecx, Immediate(isolate()->factory()->prototype_string()));
-    __ j(not_equal, &miss);
-  }
-
-  StubCompiler::GenerateLoadFunctionPrototype(masm, edx, eax, ebx, &miss);
+  NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, eax,
+                                                          ebx, &miss);
   __ bind(&miss);
-  StubCompiler::TailCallBuiltin(
-      masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
+  PropertyAccessCompiler::TailCallBuiltin(
+      masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
+}
+
+
+void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
+  // Return address is on the stack.
+  Label slow;
+
+  Register receiver = LoadDescriptor::ReceiverRegister();
+  Register key = LoadDescriptor::NameRegister();
+  Register scratch = eax;
+  DCHECK(!scratch.is(receiver) && !scratch.is(key));
+
+  // Check that the key is an array index, that is Uint32.
+  __ test(key, Immediate(kSmiTagMask | kSmiSignMask));
+  __ j(not_zero, &slow);
+
+  // Everything is fine, call runtime.
+  __ pop(scratch);
+  __ push(receiver);  // receiver
+  __ push(key);       // key
+  __ push(scratch);   // return address
+
+  // Perform tail call to the entry.
+  ExternalReference ref = ExternalReference(
+      IC_Utility(IC::kLoadElementWithInterceptor), masm->isolate());
+  __ TailCallExternalReference(ref, 2, 1);
+
+  __ bind(&slow);
+  PropertyAccessCompiler::TailCallBuiltin(
+      masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
 }
 
 
 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
   // The key is in edx and the parameter count is in eax.
+  DCHECK(edx.is(ArgumentsAccessReadDescriptor::index()));
+  DCHECK(eax.is(ArgumentsAccessReadDescriptor::parameter_count()));
 
   // The displacement is used for skipping the frame pointer on the
   // stack. It is the offset of the last parameter (if any) relative
@@ -1098,7 +773,7 @@
   __ mov(Operand(esp, 2 * kPointerSize), edx);
 
   __ bind(&runtime);
-  __ TailCallRuntime(Runtime::kHiddenNewSloppyArguments, 3, 1);
+  __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
 }
 
 
@@ -1133,7 +808,7 @@
   __ mov(Operand(esp, 2 * kPointerSize), edx);
 
   // ebx = parameter count (tagged)
-  // ecx = argument count (tagged)
+  // ecx = argument count (smi-tagged)
   // esp[4] = parameter count (tagged)
   // esp[8] = address of receiver argument
   // Compute the mapped parameter count = min(ebx, ecx) in ebx.
@@ -1166,47 +841,52 @@
   __ Allocate(ebx, eax, edx, edi, &runtime, TAG_OBJECT);
 
   // eax = address of new object(s) (tagged)
-  // ecx = argument count (tagged)
+  // ecx = argument count (smi-tagged)
   // esp[0] = mapped parameter count (tagged)
   // esp[8] = parameter count (tagged)
   // esp[12] = address of receiver argument
-  // Get the arguments boilerplate from the current native context into edi.
-  Label has_mapped_parameters, copy;
+  // Get the arguments map from the current native context into edi.
+  Label has_mapped_parameters, instantiate;
   __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
   __ mov(edi, FieldOperand(edi, GlobalObject::kNativeContextOffset));
   __ mov(ebx, Operand(esp, 0 * kPointerSize));
   __ test(ebx, ebx);
   __ j(not_zero, &has_mapped_parameters, Label::kNear);
-  __ mov(edi, Operand(edi,
-         Context::SlotOffset(Context::SLOPPY_ARGUMENTS_BOILERPLATE_INDEX)));
-  __ jmp(&copy, Label::kNear);
+  __ mov(
+      edi,
+      Operand(edi, Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX)));
+  __ jmp(&instantiate, Label::kNear);
 
   __ bind(&has_mapped_parameters);
-  __ mov(edi, Operand(edi,
-            Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX)));
-  __ bind(&copy);
+  __ mov(
+      edi,
+      Operand(edi, Context::SlotOffset(Context::ALIASED_ARGUMENTS_MAP_INDEX)));
+  __ bind(&instantiate);
 
   // eax = address of new object (tagged)
   // ebx = mapped parameter count (tagged)
-  // ecx = argument count (tagged)
-  // edi = address of boilerplate object (tagged)
+  // ecx = argument count (smi-tagged)
+  // edi = address of arguments map (tagged)
   // esp[0] = mapped parameter count (tagged)
   // esp[8] = parameter count (tagged)
   // esp[12] = address of receiver argument
   // Copy the JS object part.
-  for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
-    __ mov(edx, FieldOperand(edi, i));
-    __ mov(FieldOperand(eax, i), edx);
-  }
+  __ mov(FieldOperand(eax, JSObject::kMapOffset), edi);
+  __ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
+         masm->isolate()->factory()->empty_fixed_array());
+  __ mov(FieldOperand(eax, JSObject::kElementsOffset),
+         masm->isolate()->factory()->empty_fixed_array());
 
   // Set up the callee in-object property.
   STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
   __ mov(edx, Operand(esp, 4 * kPointerSize));
+  __ AssertNotSmi(edx);
   __ mov(FieldOperand(eax, JSObject::kHeaderSize +
                       Heap::kArgumentsCalleeIndex * kPointerSize),
          edx);
 
   // Use the length (smi tagged) and set that as an in-object property too.
+  __ AssertSmi(ecx);
   STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
   __ mov(FieldOperand(eax, JSObject::kHeaderSize +
                       Heap::kArgumentsLengthIndex * kPointerSize),
@@ -1321,7 +1001,7 @@
   __ bind(&runtime);
   __ pop(eax);  // Remove saved parameter count.
   __ mov(Operand(esp, 1 * kPointerSize), ecx);  // Patch argument count.
-  __ TailCallRuntime(Runtime::kHiddenNewSloppyArguments, 3, 1);
+  __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
 }
 
 
@@ -1363,22 +1043,22 @@
   // Do the allocation of both objects in one go.
   __ Allocate(ecx, eax, edx, ebx, &runtime, TAG_OBJECT);
 
-  // Get the arguments boilerplate from the current native context.
+  // Get the arguments map from the current native context.
   __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
   __ mov(edi, FieldOperand(edi, GlobalObject::kNativeContextOffset));
-  const int offset =
-      Context::SlotOffset(Context::STRICT_ARGUMENTS_BOILERPLATE_INDEX);
+  const int offset = Context::SlotOffset(Context::STRICT_ARGUMENTS_MAP_INDEX);
   __ mov(edi, Operand(edi, offset));
 
-  // Copy the JS object part.
-  for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
-    __ mov(ebx, FieldOperand(edi, i));
-    __ mov(FieldOperand(eax, i), ebx);
-  }
+  __ mov(FieldOperand(eax, JSObject::kMapOffset), edi);
+  __ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
+         masm->isolate()->factory()->empty_fixed_array());
+  __ mov(FieldOperand(eax, JSObject::kElementsOffset),
+         masm->isolate()->factory()->empty_fixed_array());
 
   // Get the length (smi tagged) and set that as an in-object property too.
   STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
   __ mov(ecx, Operand(esp, 1 * kPointerSize));
+  __ AssertSmi(ecx);
   __ mov(FieldOperand(eax, JSObject::kHeaderSize +
                       Heap::kArgumentsLengthIndex * kPointerSize),
          ecx);
@@ -1418,7 +1098,7 @@
 
   // Do the runtime call to allocate the arguments object.
   __ bind(&runtime);
-  __ TailCallRuntime(Runtime::kHiddenNewStrictArguments, 3, 1);
+  __ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
 }
 
 
@@ -1427,7 +1107,7 @@
   // time or if regexp entry in generated code is turned off runtime switch or
   // at compilation.
 #ifdef V8_INTERPRETED_REGEXP
-  __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
+  __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
 #else  // V8_INTERPRETED_REGEXP
 
   // Stack frame on entry.
@@ -1582,7 +1262,7 @@
   __ JumpIfNotSmi(ebx, &runtime);
   __ cmp(ebx, FieldOperand(edx, String::kLengthOffset));
   __ j(above_equal, &runtime);
-  __ mov(edx, FieldOperand(ecx, JSRegExp::kDataAsciiCodeOffset));
+  __ mov(edx, FieldOperand(ecx, JSRegExp::kDataOneByteCodeOffset));
   __ Move(ecx, Immediate(1));  // Type is one byte.
 
   // (E) Carry on.  String handling is done.
@@ -1596,7 +1276,7 @@
   // eax: subject string
   // ebx: previous index (smi)
   // edx: code
-  // ecx: encoding of subject string (1 if ASCII, 0 if two_byte);
+  // ecx: encoding of subject string (1 if one_byte, 0 if two_byte);
   // All checks done. Now push arguments for native regexp code.
   Counters* counters = isolate()->counters();
   __ IncrementCounter(counters->regexp_entry_native(), 1);
@@ -1641,7 +1321,7 @@
   // esi: original subject string
   // eax: underlying subject string
   // ebx: previous index
-  // ecx: encoding of subject string (1 if ASCII 0 if two_byte);
+  // ecx: encoding of subject string (1 if one_byte 0 if two_byte);
   // edx: code
   // Argument 4: End of string data
   // Argument 3: Start of string data
@@ -1810,7 +1490,7 @@
 
   // Do the runtime call to execute the regexp.
   __ bind(&runtime);
-  __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
+  __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
 
   // Deferred code for string handling.
   // (7) Not a long external string?  If yes, go to (10).
@@ -1871,21 +1551,19 @@
 
 
 static int NegativeComparisonResult(Condition cc) {
-  ASSERT(cc != equal);
-  ASSERT((cc == less) || (cc == less_equal)
+  DCHECK(cc != equal);
+  DCHECK((cc == less) || (cc == less_equal)
       || (cc == greater) || (cc == greater_equal));
   return (cc == greater || cc == greater_equal) ? LESS : GREATER;
 }
 
 
-static void CheckInputType(MacroAssembler* masm,
-                           Register input,
-                           CompareIC::State expected,
-                           Label* fail) {
+static void CheckInputType(MacroAssembler* masm, Register input,
+                           CompareICState::State expected, Label* fail) {
   Label ok;
-  if (expected == CompareIC::SMI) {
+  if (expected == CompareICState::SMI) {
     __ JumpIfNotSmi(input, fail);
-  } else if (expected == CompareIC::NUMBER) {
+  } else if (expected == CompareICState::NUMBER) {
     __ JumpIfSmi(input, &ok);
     __ cmp(FieldOperand(input, HeapObject::kMapOffset),
            Immediate(masm->isolate()->factory()->heap_number_map()));
@@ -1910,13 +1588,13 @@
 }
 
 
-void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
+void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
   Label check_unequal_objects;
   Condition cc = GetCondition();
 
   Label miss;
-  CheckInputType(masm, edx, left_, &miss);
-  CheckInputType(masm, eax, right_, &miss);
+  CheckInputType(masm, edx, left(), &miss);
+  CheckInputType(masm, eax, right(), &miss);
 
   // Compare two smis.
   Label non_smi, smi_done;
@@ -1982,7 +1660,7 @@
     // If either is a Smi (we know that not both are), then they can only
     // be equal if the other is a HeapNumber. If so, use the slow case.
     STATIC_ASSERT(kSmiTag == 0);
-    ASSERT_EQ(0, Smi::FromInt(0));
+    DCHECK_EQ(0, Smi::FromInt(0));
     __ mov(ecx, Immediate(kSmiTagMask));
     __ and_(ecx, eax);
     __ test(ecx, edx);
@@ -2062,7 +1740,7 @@
   // If one of the numbers was NaN, then the result is always false.
   // The cc is never not-equal.
   __ bind(&unordered);
-  ASSERT(cc != not_equal);
+  DCHECK(cc != not_equal);
   if (cc == less || cc == less_equal) {
     __ mov(eax, Immediate(Smi::FromInt(1)));
   } else {
@@ -2087,23 +1765,15 @@
 
   __ bind(&check_for_strings);
 
-  __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx,
-                                         &check_unequal_objects);
+  __ JumpIfNotBothSequentialOneByteStrings(edx, eax, ecx, ebx,
+                                           &check_unequal_objects);
 
-  // Inline comparison of ASCII strings.
+  // Inline comparison of one-byte strings.
   if (cc == equal) {
-    StringCompareStub::GenerateFlatAsciiStringEquals(masm,
-                                                     edx,
-                                                     eax,
-                                                     ecx,
-                                                     ebx);
+    StringHelper::GenerateFlatOneByteStringEquals(masm, edx, eax, ecx, ebx);
   } else {
-    StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
-                                                       edx,
-                                                       eax,
-                                                       ecx,
-                                                       ebx,
-                                                       edi);
+    StringHelper::GenerateCompareFlatOneByteStrings(masm, edx, eax, ecx, ebx,
+                                                    edi);
   }
 #ifdef DEBUG
   __ Abort(kUnexpectedFallThroughFromStringComparison);
@@ -2192,7 +1862,7 @@
   // function without changing the state.
   __ cmp(ecx, edi);
   __ j(equal, &done, Label::kFar);
-  __ cmp(ecx, Immediate(TypeFeedbackInfo::MegamorphicSentinel(isolate)));
+  __ cmp(ecx, Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate)));
   __ j(equal, &done, Label::kFar);
 
   if (!FLAG_pretenuring_call_new) {
@@ -2215,14 +1885,14 @@
 
   // A monomorphic miss (i.e, here the cache is not uninitialized) goes
   // megamorphic.
-  __ cmp(ecx, Immediate(TypeFeedbackInfo::UninitializedSentinel(isolate)));
+  __ cmp(ecx, Immediate(TypeFeedbackVector::UninitializedSentinel(isolate)));
   __ j(equal, &initialize);
   // MegamorphicSentinel is an immortal immovable object (undefined) so no
   // write-barrier is needed.
   __ bind(&megamorphic);
-  __ mov(FieldOperand(ebx, edx, times_half_pointer_size,
-                      FixedArray::kHeaderSize),
-         Immediate(TypeFeedbackInfo::MegamorphicSentinel(isolate)));
+  __ mov(
+      FieldOperand(ebx, edx, times_half_pointer_size, FixedArray::kHeaderSize),
+      Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate)));
   __ jmp(&done, Label::kFar);
 
   // An uninitialized cache is patched with the function or sentinel to
@@ -2361,7 +2031,7 @@
     // Load the receiver from the stack.
     __ mov(eax, Operand(esp, (argc + 1) * kPointerSize));
 
-    if (call_as_method) {
+    if (needs_checks) {
       __ JumpIfSmi(eax, &wrap);
 
       __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
@@ -2390,7 +2060,7 @@
 
 
 void CallFunctionStub::Generate(MacroAssembler* masm) {
-  CallFunctionNoFeedback(masm, argc_, NeedsChecks(), CallAsMethod());
+  CallFunctionNoFeedback(masm, argc(), NeedsChecks(), CallAsMethod());
 }
 
 
@@ -2474,7 +2144,7 @@
   // edi - function
   // edx - slot id
   Label miss;
-  int argc = state_.arg_count();
+  int argc = arg_count();
   ParameterCount actual(argc);
 
   EmitLoadTypeFeedbackVector(masm, ebx);
@@ -2498,7 +2168,7 @@
   __ TailCallStub(&stub);
 
   __ bind(&miss);
-  GenerateMiss(masm, IC::kCallIC_Customization_Miss);
+  GenerateMiss(masm);
 
   // The slow case, we need this no matter what to complete a call after a miss.
   CallFunctionNoFeedback(masm,
@@ -2518,7 +2188,7 @@
   Label extra_checks_or_miss, slow_start;
   Label slow, non_function, wrap, cont;
   Label have_js_function;
-  int argc = state_.arg_count();
+  int argc = arg_count();
   ParameterCount actual(argc);
 
   EmitLoadTypeFeedbackVector(masm, ebx);
@@ -2529,7 +2199,7 @@
   __ j(not_equal, &extra_checks_or_miss);
 
   __ bind(&have_js_function);
-  if (state_.CallAsMethod()) {
+  if (CallAsMethod()) {
     EmitContinueIfStrictOrNative(masm, &cont);
 
     // Load the receiver from the stack.
@@ -2548,7 +2218,7 @@
   __ bind(&slow);
   EmitSlowCase(isolate, masm, argc, &non_function);
 
-  if (state_.CallAsMethod()) {
+  if (CallAsMethod()) {
     __ bind(&wrap);
     EmitWrapCase(masm, argc, &cont);
   }
@@ -2558,9 +2228,9 @@
 
   __ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size,
                            FixedArray::kHeaderSize));
-  __ cmp(ecx, Immediate(TypeFeedbackInfo::MegamorphicSentinel(isolate)));
+  __ cmp(ecx, Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate)));
   __ j(equal, &slow_start);
-  __ cmp(ecx, Immediate(TypeFeedbackInfo::UninitializedSentinel(isolate)));
+  __ cmp(ecx, Immediate(TypeFeedbackVector::UninitializedSentinel(isolate)));
   __ j(equal, &miss);
 
   if (!FLAG_trace_ic) {
@@ -2571,13 +2241,13 @@
     __ j(not_equal, &miss);
     __ mov(FieldOperand(ebx, edx, times_half_pointer_size,
                         FixedArray::kHeaderSize),
-           Immediate(TypeFeedbackInfo::MegamorphicSentinel(isolate)));
+           Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate)));
     __ jmp(&slow_start);
   }
 
   // We are here because tracing is on or we are going monomorphic.
   __ bind(&miss);
-  GenerateMiss(masm, IC::kCallIC_Miss);
+  GenerateMiss(masm);
 
   // the slow case
   __ bind(&slow_start);
@@ -2595,9 +2265,9 @@
 }
 
 
-void CallICStub::GenerateMiss(MacroAssembler* masm, IC::UtilityId id) {
+void CallICStub::GenerateMiss(MacroAssembler* masm) {
   // Get the receiver of the function from the stack; 1 ~ return address.
-  __ mov(ecx, Operand(esp, (state_.arg_count() + 1) * kPointerSize));
+  __ mov(ecx, Operand(esp, (arg_count() + 1) * kPointerSize));
 
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
@@ -2609,6 +2279,9 @@
     __ push(edx);
 
     // Call the entry.
+    IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
+                                               : IC::kCallIC_Customization_Miss;
+
     ExternalReference miss = ExternalReference(IC_Utility(id),
                                                masm->isolate());
     __ CallExternalReference(miss, 4);
@@ -2637,13 +2310,8 @@
 
 
 void CodeStub::GenerateFPStubs(Isolate* isolate) {
-  CEntryStub save_doubles(isolate, 1, kSaveFPRegs);
-  // Stubs might already be in the snapshot, detect that and don't regenerate,
-  // which would lead to code stub initialization state being messed up.
-  Code* save_doubles_code;
-  if (!save_doubles.FindCodeInCache(&save_doubles_code)) {
-    save_doubles_code = *(save_doubles.GetCode());
-  }
+  // Generate if not already in cache.
+  CEntryStub(isolate, 1, kSaveFPRegs).GetCode();
   isolate->set_fp_stubs_generated(true);
 }
 
@@ -2665,7 +2333,7 @@
   ProfileEntryHookStub::MaybeCallEntryHook(masm);
 
   // Enter the exit frame that transitions from JavaScript to C++.
-  __ EnterExitFrame(save_doubles_ == kSaveFPRegs);
+  __ EnterExitFrame(save_doubles());
 
   // ebx: pointer to C function  (C callee-saved)
   // ebp: frame pointer  (restored after C call)
@@ -2673,7 +2341,7 @@
   // edi: number of arguments including receiver  (C callee-saved)
   // esi: pointer to the first argument (C callee-saved)
 
-  // Result returned in eax, or eax+edx if result_size_ is 2.
+  // Result returned in eax, or eax+edx if result size is 2.
 
   // Check stack alignment.
   if (FLAG_debug_code) {
@@ -2721,7 +2389,7 @@
   }
 
   // Exit the JavaScript to C++ exit frame.
-  __ LeaveExitFrame(save_doubles_ == kSaveFPRegs);
+  __ LeaveExitFrame(save_doubles());
   __ ret(0);
 
   // Handling of exception.
@@ -2748,7 +2416,7 @@
 }
 
 
-void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
+void JSEntryStub::Generate(MacroAssembler* masm) {
   Label invoke, handler_entry, exit;
   Label not_outermost_js, not_outermost_js_2;
 
@@ -2759,7 +2427,7 @@
   __ mov(ebp, esp);
 
   // Push marker in two places.
-  int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
+  int marker = type();
   __ push(Immediate(Smi::FromInt(marker)));  // context slot
   __ push(Immediate(Smi::FromInt(marker)));  // function slot
   // Save callee-saved registers (C calling conventions).
@@ -2810,7 +2478,7 @@
   // pop the faked function when we return. Notice that we cannot store a
   // reference to the trampoline code directly in this stub, because the
   // builtin stubs may not have been generated yet.
-  if (is_construct) {
+  if (type() == StackFrame::ENTRY_CONSTRUCT) {
     ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
                                       isolate());
     __ mov(edx, Immediate(construct_entry));
@@ -2867,7 +2535,7 @@
 //
 void InstanceofStub::Generate(MacroAssembler* masm) {
   // Call site inlining and patching implies arguments in registers.
-  ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
+  DCHECK(HasArgsInRegisters() || !HasCallSiteInlineCheck());
 
   // Fixed register usage throughout the stub.
   Register object = eax;  // Object (lhs).
@@ -2880,12 +2548,12 @@
   static const int kDeltaToCmpImmediate = 2;
   static const int kDeltaToMov = 8;
   static const int kDeltaToMovImmediate = 9;
-  static const int8_t kCmpEdiOperandByte1 = BitCast<int8_t, uint8_t>(0x3b);
-  static const int8_t kCmpEdiOperandByte2 = BitCast<int8_t, uint8_t>(0x3d);
-  static const int8_t kMovEaxImmediateByte = BitCast<int8_t, uint8_t>(0xb8);
+  static const int8_t kCmpEdiOperandByte1 = bit_cast<int8_t, uint8_t>(0x3b);
+  static const int8_t kCmpEdiOperandByte2 = bit_cast<int8_t, uint8_t>(0x3d);
+  static const int8_t kMovEaxImmediateByte = bit_cast<int8_t, uint8_t>(0xb8);
 
-  ASSERT_EQ(object.code(), InstanceofStub::left().code());
-  ASSERT_EQ(function.code(), InstanceofStub::right().code());
+  DCHECK_EQ(object.code(), InstanceofStub::left().code());
+  DCHECK_EQ(function.code(), InstanceofStub::right().code());
 
   // Get the object and function - they are always both needed.
   Label slow, not_js_object;
@@ -2900,7 +2568,7 @@
 
   // If there is a call site cache don't look in the global cache, but do the
   // real lookup and update the call site cache.
-  if (!HasCallSiteInlineCheck()) {
+  if (!HasCallSiteInlineCheck() && !ReturnTrueFalseObject()) {
     // Look up the function and the map in the instanceof cache.
     Label miss;
     __ CompareRoot(function, scratch, Heap::kInstanceofCacheFunctionRootIndex);
@@ -2927,7 +2595,7 @@
   } else {
     // The constants for the code patching are based on no push instructions
     // at the call site.
-    ASSERT(HasArgsInRegisters());
+    DCHECK(HasArgsInRegisters());
     // Get return address and delta to inlined map check.
     __ mov(scratch, Operand(esp, 0 * kPointerSize));
     __ sub(scratch, Operand(esp, 1 * kPointerSize));
@@ -2959,6 +2627,9 @@
   if (!HasCallSiteInlineCheck()) {
     __ mov(eax, Immediate(0));
     __ StoreRoot(eax, scratch, Heap::kInstanceofCacheAnswerRootIndex);
+    if (ReturnTrueFalseObject()) {
+      __ mov(eax, factory->true_value());
+    }
   } else {
     // Get return address and delta to inlined map check.
     __ mov(eax, factory->true_value());
@@ -2979,6 +2650,9 @@
   if (!HasCallSiteInlineCheck()) {
     __ mov(eax, Immediate(Smi::FromInt(1)));
     __ StoreRoot(eax, scratch, Heap::kInstanceofCacheAnswerRootIndex);
+    if (ReturnTrueFalseObject()) {
+      __ mov(eax, factory->false_value());
+    }
   } else {
     // Get return address and delta to inlined map check.
     __ mov(eax, factory->false_value());
@@ -3006,20 +2680,32 @@
   // Null is not instance of anything.
   __ cmp(object, factory->null_value());
   __ j(not_equal, &object_not_null, Label::kNear);
-  __ Move(eax, Immediate(Smi::FromInt(1)));
+  if (ReturnTrueFalseObject()) {
+    __ mov(eax, factory->false_value());
+  } else {
+    __ Move(eax, Immediate(Smi::FromInt(1)));
+  }
   __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
 
   __ bind(&object_not_null);
   // Smi values is not instance of anything.
   __ JumpIfNotSmi(object, &object_not_null_or_smi, Label::kNear);
-  __ Move(eax, Immediate(Smi::FromInt(1)));
+  if (ReturnTrueFalseObject()) {
+    __ mov(eax, factory->false_value());
+  } else {
+    __ Move(eax, Immediate(Smi::FromInt(1)));
+  }
   __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
 
   __ bind(&object_not_null_or_smi);
   // String values is not instance of anything.
   Condition is_string = masm->IsObjectStringType(object, scratch, scratch);
   __ j(NegateCondition(is_string), &slow, Label::kNear);
-  __ Move(eax, Immediate(Smi::FromInt(1)));
+  if (ReturnTrueFalseObject()) {
+    __ mov(eax, factory->false_value());
+  } else {
+    __ Move(eax, Immediate(Smi::FromInt(1)));
+  }
   __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
 
   // Slow-case: Go through the JavaScript implementation.
@@ -3055,12 +2741,6 @@
 }
 
 
-Register InstanceofStub::left() { return eax; }
-
-
-Register InstanceofStub::right() { return edx; }
-
-
 // -------------------------------------------------------------------------
 // StringCharCodeAtGenerator
 
@@ -3114,9 +2794,9 @@
   if (index_flags_ == STRING_INDEX_IS_NUMBER) {
     __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
   } else {
-    ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
+    DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
     // NumberToSmi discards numbers that are not exact integers.
-    __ CallRuntime(Runtime::kHiddenNumberToSmi, 1);
+    __ CallRuntime(Runtime::kNumberToSmi, 1);
   }
   if (!index_.is(eax)) {
     // Save the conversion result before the pop instructions below
@@ -3142,7 +2822,7 @@
   __ push(object_);
   __ SmiTag(index_);
   __ push(index_);
-  __ CallRuntime(Runtime::kHiddenStringCharCodeAt, 2);
+  __ CallRuntime(Runtime::kStringCharCodeAtRT, 2);
   if (!result_.is(eax)) {
     __ mov(result_, eax);
   }
@@ -3160,7 +2840,7 @@
   // Fast case of Heap::LookupSingleCharacterStringFromCode.
   STATIC_ASSERT(kSmiTag == 0);
   STATIC_ASSERT(kSmiShiftSize == 0);
-  ASSERT(IsPowerOf2(String::kMaxOneByteCharCode + 1));
+  DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCode + 1));
   __ test(code_,
           Immediate(kSmiTagMask |
                     ((~String::kMaxOneByteCharCode) << kSmiTagSize)));
@@ -3171,7 +2851,7 @@
   STATIC_ASSERT(kSmiTag == 0);
   STATIC_ASSERT(kSmiTagSize == 1);
   STATIC_ASSERT(kSmiShiftSize == 0);
-  // At this point code register contains smi tagged ASCII char code.
+  // At this point code register contains smi tagged one byte char code.
   __ mov(result_, FieldOperand(result_,
                                code_, times_half_pointer_size,
                                FixedArray::kHeaderSize));
@@ -3206,9 +2886,9 @@
                                           Register count,
                                           Register scratch,
                                           String::Encoding encoding) {
-  ASSERT(!scratch.is(dest));
-  ASSERT(!scratch.is(src));
-  ASSERT(!scratch.is(count));
+  DCHECK(!scratch.is(dest));
+  DCHECK(!scratch.is(src));
+  DCHECK(!scratch.is(count));
 
   // Nothing to do for zero characters.
   Label done;
@@ -3233,74 +2913,6 @@
 }
 
 
-void StringHelper::GenerateHashInit(MacroAssembler* masm,
-                                    Register hash,
-                                    Register character,
-                                    Register scratch) {
-  // hash = (seed + character) + ((seed + character) << 10);
-  if (masm->serializer_enabled()) {
-    __ LoadRoot(scratch, Heap::kHashSeedRootIndex);
-    __ SmiUntag(scratch);
-    __ add(scratch, character);
-    __ mov(hash, scratch);
-    __ shl(scratch, 10);
-    __ add(hash, scratch);
-  } else {
-    int32_t seed = masm->isolate()->heap()->HashSeed();
-    __ lea(scratch, Operand(character, seed));
-    __ shl(scratch, 10);
-    __ lea(hash, Operand(scratch, character, times_1, seed));
-  }
-  // hash ^= hash >> 6;
-  __ mov(scratch, hash);
-  __ shr(scratch, 6);
-  __ xor_(hash, scratch);
-}
-
-
-void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
-                                            Register hash,
-                                            Register character,
-                                            Register scratch) {
-  // hash += character;
-  __ add(hash, character);
-  // hash += hash << 10;
-  __ mov(scratch, hash);
-  __ shl(scratch, 10);
-  __ add(hash, scratch);
-  // hash ^= hash >> 6;
-  __ mov(scratch, hash);
-  __ shr(scratch, 6);
-  __ xor_(hash, scratch);
-}
-
-
-void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
-                                       Register hash,
-                                       Register scratch) {
-  // hash += hash << 3;
-  __ mov(scratch, hash);
-  __ shl(scratch, 3);
-  __ add(hash, scratch);
-  // hash ^= hash >> 11;
-  __ mov(scratch, hash);
-  __ shr(scratch, 11);
-  __ xor_(hash, scratch);
-  // hash += hash << 15;
-  __ mov(scratch, hash);
-  __ shl(scratch, 15);
-  __ add(hash, scratch);
-
-  __ and_(hash, String::kHashBitMask);
-
-  // if (hash == 0) hash = 27;
-  Label hash_not_zero;
-  __ j(not_zero, &hash_not_zero, Label::kNear);
-  __ mov(hash, Immediate(StringHasher::kZeroHash));
-  __ bind(&hash_not_zero);
-}
-
-
 void SubStringStub::Generate(MacroAssembler* masm) {
   Label runtime;
 
@@ -3403,7 +3015,7 @@
     STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
     __ test(ebx, Immediate(kStringEncodingMask));
     __ j(zero, &two_byte_slice, Label::kNear);
-    __ AllocateAsciiSlicedString(eax, ebx, no_reg, &runtime);
+    __ AllocateOneByteSlicedString(eax, ebx, no_reg, &runtime);
     __ jmp(&set_slice_header, Label::kNear);
     __ bind(&two_byte_slice);
     __ AllocateTwoByteSlicedString(eax, ebx, no_reg, &runtime);
@@ -3450,8 +3062,8 @@
   __ test_b(ebx, kStringEncodingMask);
   __ j(zero, &two_byte_sequential);
 
-  // Sequential ASCII string.  Allocate the result.
-  __ AllocateAsciiString(eax, ecx, ebx, edx, edi, &runtime_drop_two);
+  // Sequential one byte string.  Allocate the result.
+  __ AllocateOneByteString(eax, ecx, ebx, edx, edi, &runtime_drop_two);
 
   // eax: result string
   // ecx: result string length
@@ -3507,7 +3119,7 @@
 
   // Just jump to runtime to create the sub string.
   __ bind(&runtime);
-  __ TailCallRuntime(Runtime::kHiddenSubString, 3, 1);
+  __ TailCallRuntime(Runtime::kSubString, 3, 1);
 
   __ bind(&single_char);
   // eax: string
@@ -3522,11 +3134,11 @@
 }
 
 
-void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
-                                                      Register left,
-                                                      Register right,
-                                                      Register scratch1,
-                                                      Register scratch2) {
+void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
+                                                   Register left,
+                                                   Register right,
+                                                   Register scratch1,
+                                                   Register scratch2) {
   Register length = scratch1;
 
   // Compare lengths.
@@ -3549,8 +3161,8 @@
 
   // Compare characters.
   __ bind(&compare_chars);
-  GenerateAsciiCharsCompareLoop(masm, left, right, length, scratch2,
-                                &strings_not_equal, Label::kNear);
+  GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2,
+                                  &strings_not_equal, Label::kNear);
 
   // Characters are equal.
   __ Move(eax, Immediate(Smi::FromInt(EQUAL)));
@@ -3558,12 +3170,9 @@
 }
 
 
-void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
-                                                        Register left,
-                                                        Register right,
-                                                        Register scratch1,
-                                                        Register scratch2,
-                                                        Register scratch3) {
+void StringHelper::GenerateCompareFlatOneByteStrings(
+    MacroAssembler* masm, Register left, Register right, Register scratch1,
+    Register scratch2, Register scratch3) {
   Counters* counters = masm->isolate()->counters();
   __ IncrementCounter(counters->string_compare_native(), 1);
 
@@ -3589,8 +3198,8 @@
 
   // Compare characters.
   Label result_not_equal;
-  GenerateAsciiCharsCompareLoop(masm, left, right, min_length, scratch2,
-                                &result_not_equal, Label::kNear);
+  GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
+                                  &result_not_equal, Label::kNear);
 
   // Compare lengths -  strings up to min-length are equal.
   __ bind(&compare_lengths);
@@ -3624,13 +3233,9 @@
 }
 
 
-void StringCompareStub::GenerateAsciiCharsCompareLoop(
-    MacroAssembler* masm,
-    Register left,
-    Register right,
-    Register length,
-    Register scratch,
-    Label* chars_not_equal,
+void StringHelper::GenerateOneByteCharsCompareLoop(
+    MacroAssembler* masm, Register left, Register right, Register length,
+    Register scratch, Label* chars_not_equal,
     Label::Distance chars_not_equal_near) {
   // Change index to run from -length to -1 by adding length to string
   // start. This means that loop ends when index reaches zero, which
@@ -3676,20 +3281,21 @@
 
   __ bind(&not_same);
 
-  // Check that both objects are sequential ASCII strings.
-  __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, &runtime);
+  // Check that both objects are sequential one-byte strings.
+  __ JumpIfNotBothSequentialOneByteStrings(edx, eax, ecx, ebx, &runtime);
 
-  // Compare flat ASCII strings.
+  // Compare flat one-byte strings.
   // Drop arguments from the stack.
   __ pop(ecx);
   __ add(esp, Immediate(2 * kPointerSize));
   __ push(ecx);
-  GenerateCompareFlatAsciiStrings(masm, edx, eax, ecx, ebx, edi);
+  StringHelper::GenerateCompareFlatOneByteStrings(masm, edx, eax, ecx, ebx,
+                                                  edi);
 
   // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
   // tagged as a small integer.
   __ bind(&runtime);
-  __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
+  __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
 }
 
 
@@ -3716,13 +3322,13 @@
 
   // Tail call into the stub that handles binary operations with allocation
   // sites.
-  BinaryOpWithAllocationSiteStub stub(isolate(), state_);
+  BinaryOpWithAllocationSiteStub stub(isolate(), state());
   __ TailCallStub(&stub);
 }
 
 
-void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
-  ASSERT(state_ == CompareIC::SMI);
+void CompareICStub::GenerateSmis(MacroAssembler* masm) {
+  DCHECK(state() == CompareICState::SMI);
   Label miss;
   __ mov(ecx, edx);
   __ or_(ecx, eax);
@@ -3747,17 +3353,17 @@
 }
 
 
-void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
-  ASSERT(state_ == CompareIC::NUMBER);
+void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
+  DCHECK(state() == CompareICState::NUMBER);
 
   Label generic_stub;
   Label unordered, maybe_undefined1, maybe_undefined2;
   Label miss;
 
-  if (left_ == CompareIC::SMI) {
+  if (left() == CompareICState::SMI) {
     __ JumpIfNotSmi(edx, &miss);
   }
-  if (right_ == CompareIC::SMI) {
+  if (right() == CompareICState::SMI) {
     __ JumpIfNotSmi(eax, &miss);
   }
 
@@ -3804,12 +3410,12 @@
 
   __ bind(&unordered);
   __ bind(&generic_stub);
-  ICCompareStub stub(isolate(), op_, CompareIC::GENERIC, CompareIC::GENERIC,
-                     CompareIC::GENERIC);
+  CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
+                     CompareICState::GENERIC, CompareICState::GENERIC);
   __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
 
   __ bind(&maybe_undefined1);
-  if (Token::IsOrderedRelationalCompareOp(op_)) {
+  if (Token::IsOrderedRelationalCompareOp(op())) {
     __ cmp(eax, Immediate(isolate()->factory()->undefined_value()));
     __ j(not_equal, &miss);
     __ JumpIfSmi(edx, &unordered);
@@ -3819,7 +3425,7 @@
   }
 
   __ bind(&maybe_undefined2);
-  if (Token::IsOrderedRelationalCompareOp(op_)) {
+  if (Token::IsOrderedRelationalCompareOp(op())) {
     __ cmp(edx, Immediate(isolate()->factory()->undefined_value()));
     __ j(equal, &unordered);
   }
@@ -3829,9 +3435,9 @@
 }
 
 
-void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
-  ASSERT(state_ == CompareIC::INTERNALIZED_STRING);
-  ASSERT(GetCondition() == equal);
+void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
+  DCHECK(state() == CompareICState::INTERNALIZED_STRING);
+  DCHECK(GetCondition() == equal);
 
   // Registers containing left and right operands respectively.
   Register left = edx;
@@ -3861,7 +3467,7 @@
   __ cmp(left, right);
   // Make sure eax is non-zero. At this point input operands are
   // guaranteed to be non-zero.
-  ASSERT(right.is(eax));
+  DCHECK(right.is(eax));
   __ j(not_equal, &done, Label::kNear);
   STATIC_ASSERT(EQUAL == 0);
   STATIC_ASSERT(kSmiTag == 0);
@@ -3874,9 +3480,9 @@
 }
 
 
-void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
-  ASSERT(state_ == CompareIC::UNIQUE_NAME);
-  ASSERT(GetCondition() == equal);
+void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
+  DCHECK(state() == CompareICState::UNIQUE_NAME);
+  DCHECK(GetCondition() == equal);
 
   // Registers containing left and right operands respectively.
   Register left = edx;
@@ -3898,15 +3504,15 @@
   __ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
   __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
 
-  __ JumpIfNotUniqueName(tmp1, &miss, Label::kNear);
-  __ JumpIfNotUniqueName(tmp2, &miss, Label::kNear);
+  __ JumpIfNotUniqueNameInstanceType(tmp1, &miss, Label::kNear);
+  __ JumpIfNotUniqueNameInstanceType(tmp2, &miss, Label::kNear);
 
   // Unique names are compared by identity.
   Label done;
   __ cmp(left, right);
   // Make sure eax is non-zero. At this point input operands are
   // guaranteed to be non-zero.
-  ASSERT(right.is(eax));
+  DCHECK(right.is(eax));
   __ j(not_equal, &done, Label::kNear);
   STATIC_ASSERT(EQUAL == 0);
   STATIC_ASSERT(kSmiTag == 0);
@@ -3919,11 +3525,11 @@
 }
 
 
-void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
-  ASSERT(state_ == CompareIC::STRING);
+void CompareICStub::GenerateStrings(MacroAssembler* masm) {
+  DCHECK(state() == CompareICState::STRING);
   Label miss;
 
-  bool equality = Token::IsEqualityOp(op_);
+  bool equality = Token::IsEqualityOp(op());
 
   // Registers containing left and right operands respectively.
   Register left = edx;
@@ -3974,22 +3580,22 @@
     __ j(not_zero, &do_compare, Label::kNear);
     // Make sure eax is non-zero. At this point input operands are
     // guaranteed to be non-zero.
-    ASSERT(right.is(eax));
+    DCHECK(right.is(eax));
     __ ret(0);
     __ bind(&do_compare);
   }
 
-  // Check that both strings are sequential ASCII.
+  // Check that both strings are sequential one-byte.
   Label runtime;
-  __ JumpIfNotBothSequentialAsciiStrings(left, right, tmp1, tmp2, &runtime);
+  __ JumpIfNotBothSequentialOneByteStrings(left, right, tmp1, tmp2, &runtime);
 
-  // Compare flat ASCII strings. Returns when done.
+  // Compare flat one byte strings. Returns when done.
   if (equality) {
-    StringCompareStub::GenerateFlatAsciiStringEquals(
-        masm, left, right, tmp1, tmp2);
+    StringHelper::GenerateFlatOneByteStringEquals(masm, left, right, tmp1,
+                                                  tmp2);
   } else {
-    StringCompareStub::GenerateCompareFlatAsciiStrings(
-        masm, left, right, tmp1, tmp2, tmp3);
+    StringHelper::GenerateCompareFlatOneByteStrings(masm, left, right, tmp1,
+                                                    tmp2, tmp3);
   }
 
   // Handle more complex cases in runtime.
@@ -4001,7 +3607,7 @@
   if (equality) {
     __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
   } else {
-    __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
+    __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
   }
 
   __ bind(&miss);
@@ -4009,8 +3615,8 @@
 }
 
 
-void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
-  ASSERT(state_ == CompareIC::OBJECT);
+void CompareICStub::GenerateObjects(MacroAssembler* masm) {
+  DCHECK(state() == CompareICState::OBJECT);
   Label miss;
   __ mov(ecx, edx);
   __ and_(ecx, eax);
@@ -4021,7 +3627,7 @@
   __ CmpObjectType(edx, JS_OBJECT_TYPE, ecx);
   __ j(not_equal, &miss, Label::kNear);
 
-  ASSERT(GetCondition() == equal);
+  DCHECK(GetCondition() == equal);
   __ sub(eax, edx);
   __ ret(0);
 
@@ -4030,7 +3636,7 @@
 }
 
 
-void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
+void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
   Label miss;
   __ mov(ecx, edx);
   __ and_(ecx, eax);
@@ -4051,7 +3657,7 @@
 }
 
 
-void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
+void CompareICStub::GenerateMiss(MacroAssembler* masm) {
   {
     // Call the runtime system in a fresh internal frame.
     ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss),
@@ -4061,7 +3667,7 @@
     __ push(eax);
     __ push(edx);  // And also use them as the arguments.
     __ push(eax);
-    __ push(Immediate(Smi::FromInt(op_)));
+    __ push(Immediate(Smi::FromInt(op())));
     __ CallExternalReference(miss, 3);
     // Compute the entry point of the rewritten stub.
     __ lea(edi, FieldOperand(eax, Code::kHeaderSize));
@@ -4085,7 +3691,7 @@
                                                       Register properties,
                                                       Handle<Name> name,
                                                       Register r0) {
-  ASSERT(name->IsUniqueName());
+  DCHECK(name->IsUniqueName());
 
   // If names of slots in range from 1 to kProbes - 1 for the hash value are
   // not equal to the name and kProbes-th slot is not used (its name is the
@@ -4103,11 +3709,11 @@
                                    NameDictionary::GetProbeOffset(i))));
 
     // Scale the index by multiplying by the entry size.
-    ASSERT(NameDictionary::kEntrySize == 3);
+    DCHECK(NameDictionary::kEntrySize == 3);
     __ lea(index, Operand(index, index, times_2, 0));  // index *= 3.
     Register entity_name = r0;
     // Having undefined at this place means the name is not contained.
-    ASSERT_EQ(kSmiTagSize, 1);
+    DCHECK_EQ(kSmiTagSize, 1);
     __ mov(entity_name, Operand(properties, index, times_half_pointer_size,
                                 kElementsStartOffset - kHeapObjectTag));
     __ cmp(entity_name, masm->isolate()->factory()->undefined_value());
@@ -4124,8 +3730,8 @@
 
     // Check if the entry name is not a unique name.
     __ mov(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
-    __ JumpIfNotUniqueName(FieldOperand(entity_name, Map::kInstanceTypeOffset),
-                           miss);
+    __ JumpIfNotUniqueNameInstanceType(
+        FieldOperand(entity_name, Map::kInstanceTypeOffset), miss);
     __ bind(&good);
   }
 
@@ -4151,10 +3757,10 @@
                                                       Register name,
                                                       Register r0,
                                                       Register r1) {
-  ASSERT(!elements.is(r0));
-  ASSERT(!elements.is(r1));
-  ASSERT(!name.is(r0));
-  ASSERT(!name.is(r1));
+  DCHECK(!elements.is(r0));
+  DCHECK(!elements.is(r1));
+  DCHECK(!name.is(r0));
+  DCHECK(!name.is(r1));
 
   __ AssertName(name);
 
@@ -4175,7 +3781,7 @@
     __ and_(r0, r1);
 
     // Scale the index by multiplying by the entry size.
-    ASSERT(NameDictionary::kEntrySize == 3);
+    DCHECK(NameDictionary::kEntrySize == 3);
     __ lea(r0, Operand(r0, r0, times_2, 0));  // r0 = r0 * 3
 
     // Check if the key is identical to the name.
@@ -4217,9 +3823,9 @@
 
   Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
 
-  Register scratch = result_;
+  Register scratch = result();
 
-  __ mov(scratch, FieldOperand(dictionary_, kCapacityOffset));
+  __ mov(scratch, FieldOperand(dictionary(), kCapacityOffset));
   __ dec(scratch);
   __ SmiUntag(scratch);
   __ push(scratch);
@@ -4238,14 +3844,12 @@
     __ and_(scratch, Operand(esp, 0));
 
     // Scale the index by multiplying by the entry size.
-    ASSERT(NameDictionary::kEntrySize == 3);
-    __ lea(index_, Operand(scratch, scratch, times_2, 0));  // index *= 3.
+    DCHECK(NameDictionary::kEntrySize == 3);
+    __ lea(index(), Operand(scratch, scratch, times_2, 0));  // index *= 3.
 
     // Having undefined at this place means the name is not contained.
-    ASSERT_EQ(kSmiTagSize, 1);
-    __ mov(scratch, Operand(dictionary_,
-                            index_,
-                            times_pointer_size,
+    DCHECK_EQ(kSmiTagSize, 1);
+    __ mov(scratch, Operand(dictionary(), index(), times_pointer_size,
                             kElementsStartOffset - kHeapObjectTag));
     __ cmp(scratch, isolate()->factory()->undefined_value());
     __ j(equal, &not_in_dictionary);
@@ -4254,15 +3858,16 @@
     __ cmp(scratch, Operand(esp, 3 * kPointerSize));
     __ j(equal, &in_dictionary);
 
-    if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
+    if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
       // If we hit a key that is not a unique name during negative
       // lookup we have to bailout as this key might be equal to the
       // key we are looking for.
 
       // Check if the entry name is not a unique name.
       __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
-      __ JumpIfNotUniqueName(FieldOperand(scratch, Map::kInstanceTypeOffset),
-                             &maybe_in_dictionary);
+      __ JumpIfNotUniqueNameInstanceType(
+          FieldOperand(scratch, Map::kInstanceTypeOffset),
+          &maybe_in_dictionary);
     }
   }
 
@@ -4270,19 +3875,19 @@
   // If we are doing negative lookup then probing failure should be
   // treated as a lookup success. For positive lookup probing failure
   // should be treated as lookup failure.
-  if (mode_ == POSITIVE_LOOKUP) {
-    __ mov(result_, Immediate(0));
+  if (mode() == POSITIVE_LOOKUP) {
+    __ mov(result(), Immediate(0));
     __ Drop(1);
     __ ret(2 * kPointerSize);
   }
 
   __ bind(&in_dictionary);
-  __ mov(result_, Immediate(1));
+  __ mov(result(), Immediate(1));
   __ Drop(1);
   __ ret(2 * kPointerSize);
 
   __ bind(&not_in_dictionary);
-  __ mov(result_, Immediate(0));
+  __ mov(result(), Immediate(0));
   __ Drop(1);
   __ ret(2 * kPointerSize);
 }
@@ -4312,11 +3917,8 @@
   __ jmp(&skip_to_incremental_noncompacting, Label::kNear);
   __ jmp(&skip_to_incremental_compacting, Label::kFar);
 
-  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
-    __ RememberedSetHelper(object_,
-                           address_,
-                           value_,
-                           save_fp_regs_mode_,
+  if (remembered_set_action() == EMIT_REMEMBERED_SET) {
+    __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
                            MacroAssembler::kReturnAtEnd);
   } else {
     __ ret(0);
@@ -4338,7 +3940,7 @@
 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
   regs_.Save(masm);
 
-  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+  if (remembered_set_action() == EMIT_REMEMBERED_SET) {
     Label dont_need_remembered_set;
 
     __ mov(regs_.scratch0(), Operand(regs_.address(), 0));
@@ -4360,10 +3962,7 @@
         mode);
     InformIncrementalMarker(masm);
     regs_.Restore(masm);
-    __ RememberedSetHelper(object_,
-                           address_,
-                           value_,
-                           save_fp_regs_mode_,
+    __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
                            MacroAssembler::kReturnAtEnd);
 
     __ bind(&dont_need_remembered_set);
@@ -4380,7 +3979,7 @@
 
 
 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
-  regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
+  regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
   int argument_count = 3;
   __ PrepareCallCFunction(argument_count, regs_.scratch0());
   __ mov(Operand(esp, 0 * kPointerSize), regs_.object());
@@ -4393,7 +3992,7 @@
       ExternalReference::incremental_marking_record_write_function(isolate()),
       argument_count);
 
-  regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
+  regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
 }
 
 
@@ -4424,10 +4023,7 @@
 
   regs_.Restore(masm);
   if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
-    __ RememberedSetHelper(object_,
-                           address_,
-                           value_,
-                           save_fp_regs_mode_,
+    __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
                            MacroAssembler::kReturnAtEnd);
   } else {
     __ ret(0);
@@ -4472,10 +4068,7 @@
 
   regs_.Restore(masm);
   if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
-    __ RememberedSetHelper(object_,
-                           address_,
-                           value_,
-                           save_fp_regs_mode_,
+    __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
                            MacroAssembler::kReturnAtEnd);
   } else {
     __ ret(0);
@@ -4583,14 +4176,27 @@
   __ mov(ebx, MemOperand(ebp, parameter_count_offset));
   masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
   __ pop(ecx);
-  int additional_offset = function_mode_ == JS_FUNCTION_STUB_MODE
-      ? kPointerSize
-      : 0;
+  int additional_offset =
+      function_mode() == JS_FUNCTION_STUB_MODE ? kPointerSize : 0;
   __ lea(esp, MemOperand(esp, ebx, times_pointer_size, additional_offset));
   __ jmp(ecx);  // Return to IC Miss stub, continuation still on stack.
 }
 
 
+void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
+  EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
+  VectorLoadStub stub(isolate(), state());
+  __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
+  EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
+  VectorKeyedLoadStub stub(isolate());
+  __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
   if (masm->isolate()->function_entry_hook() != NULL) {
     ProfileEntryHookStub stub(masm->isolate());
@@ -4617,7 +4223,7 @@
   __ push(eax);
 
   // Call the entry hook.
-  ASSERT(isolate()->function_entry_hook() != NULL);
+  DCHECK(isolate()->function_entry_hook() != NULL);
   __ call(FUNCTION_ADDR(isolate()->function_entry_hook()),
           RelocInfo::RUNTIME_ENTRY);
   __ add(esp, Immediate(2 * kPointerSize));
@@ -4670,12 +4276,12 @@
   // esp[4] - last argument
   Label normal_sequence;
   if (mode == DONT_OVERRIDE) {
-    ASSERT(FAST_SMI_ELEMENTS == 0);
-    ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
-    ASSERT(FAST_ELEMENTS == 2);
-    ASSERT(FAST_HOLEY_ELEMENTS == 3);
-    ASSERT(FAST_DOUBLE_ELEMENTS == 4);
-    ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
+    DCHECK(FAST_SMI_ELEMENTS == 0);
+    DCHECK(FAST_HOLEY_SMI_ELEMENTS == 1);
+    DCHECK(FAST_ELEMENTS == 2);
+    DCHECK(FAST_HOLEY_ELEMENTS == 3);
+    DCHECK(FAST_DOUBLE_ELEMENTS == 4);
+    DCHECK(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
 
     // is the low bit set? If so, we are holey and that is good.
     __ test_b(edx, 1);
@@ -4785,7 +4391,7 @@
 void ArrayConstructorStub::GenerateDispatchToArrayStub(
     MacroAssembler* masm,
     AllocationSiteOverrideMode mode) {
-  if (argument_count_ == ANY) {
+  if (argument_count() == ANY) {
     Label not_zero_case, not_one_case;
     __ test(eax, eax);
     __ j(not_zero, &not_zero_case);
@@ -4798,11 +4404,11 @@
 
     __ bind(&not_one_case);
     CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
-  } else if (argument_count_ == NONE) {
+  } else if (argument_count() == NONE) {
     CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
-  } else if (argument_count_ == ONE) {
+  } else if (argument_count() == ONE) {
     CreateArrayDispatchOneArgument(masm, mode);
-  } else if (argument_count_ == MORE_THAN_ONE) {
+  } else if (argument_count() == MORE_THAN_ONE) {
     CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
   } else {
     UNREACHABLE();
@@ -4812,7 +4418,7 @@
 
 void ArrayConstructorStub::Generate(MacroAssembler* masm) {
   // ----------- S t a t e -------------
-  //  -- eax : argc (only if argument_count_ == ANY)
+  //  -- eax : argc (only if argument_count() == ANY)
   //  -- ebx : AllocationSite or undefined
   //  -- edi : constructor
   //  -- esp[0] : return address
@@ -4960,9 +4566,9 @@
   Register return_address = edi;
   Register context = esi;
 
-  int argc = ArgumentBits::decode(bit_field_);
-  bool is_store = IsStoreBits::decode(bit_field_);
-  bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_);
+  int argc = this->argc();
+  bool is_store = this->is_store();
+  bool call_data_undefined = this->call_data_undefined();
 
   typedef FunctionCallbackArguments FCA;
 
@@ -5066,6 +4672,7 @@
   //  -- ...
   //  -- edx                    : api_function_address
   // -----------------------------------
+  DCHECK(edx.is(ApiGetterDescriptor::function_address()));
 
   // array for v8::Arguments::values_, handler for name and pointer
   // to the values (it considered as smi in GC).
diff --git a/src/ia32/code-stubs-ia32.h b/src/ia32/code-stubs-ia32.h
index 8700181..eabb5a5 100644
--- a/src/ia32/code-stubs-ia32.h
+++ b/src/ia32/code-stubs-ia32.h
@@ -5,9 +5,6 @@
 #ifndef V8_IA32_CODE_STUBS_IA32_H_
 #define V8_IA32_CODE_STUBS_IA32_H_
 
-#include "src/macro-assembler.h"
-#include "src/ic-inl.h"
-
 namespace v8 {
 namespace internal {
 
@@ -17,24 +14,6 @@
                      Label* call_generic_code);
 
 
-class StoreBufferOverflowStub: public PlatformCodeStub {
- public:
-  StoreBufferOverflowStub(Isolate* isolate, SaveFPRegsMode save_fp)
-      : PlatformCodeStub(isolate), save_doubles_(save_fp) { }
-
-  void Generate(MacroAssembler* masm);
-
-  static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
-  virtual bool SometimesSetsUpAFrame() { return false; }
-
- private:
-  SaveFPRegsMode save_doubles_;
-
-  Major MajorKey() { return StoreBufferOverflow; }
-  int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
-};
-
-
 class StringHelper : public AllStatic {
  public:
   // Generate code for copying characters using the rep movs instruction.
@@ -47,69 +26,26 @@
                                      Register scratch,
                                      String::Encoding encoding);
 
-  // Generate string hash.
-  static void GenerateHashInit(MacroAssembler* masm,
-                               Register hash,
-                               Register character,
-                               Register scratch);
-  static void GenerateHashAddCharacter(MacroAssembler* masm,
-                                       Register hash,
-                                       Register character,
-                                       Register scratch);
-  static void GenerateHashGetHash(MacroAssembler* masm,
-                                  Register hash,
-                                  Register scratch);
+  // Compares two flat one byte strings and returns result in eax.
+  static void GenerateCompareFlatOneByteStrings(MacroAssembler* masm,
+                                                Register left, Register right,
+                                                Register scratch1,
+                                                Register scratch2,
+                                                Register scratch3);
 
- private:
-  DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
-};
-
-
-class SubStringStub: public PlatformCodeStub {
- public:
-  explicit SubStringStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
-
- private:
-  Major MajorKey() { return SubString; }
-  int MinorKey() { return 0; }
-
-  void Generate(MacroAssembler* masm);
-};
-
-
-class StringCompareStub: public PlatformCodeStub {
- public:
-  explicit StringCompareStub(Isolate* isolate) : PlatformCodeStub(isolate) { }
-
-  // Compares two flat ASCII strings and returns result in eax.
-  static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
-                                              Register left,
-                                              Register right,
+  // Compares two flat one byte strings for equality and returns result in eax.
+  static void GenerateFlatOneByteStringEquals(MacroAssembler* masm,
+                                              Register left, Register right,
                                               Register scratch1,
-                                              Register scratch2,
-                                              Register scratch3);
-
-  // Compares two flat ASCII strings for equality and returns result
-  // in eax.
-  static void GenerateFlatAsciiStringEquals(MacroAssembler* masm,
-                                            Register left,
-                                            Register right,
-                                            Register scratch1,
-                                            Register scratch2);
+                                              Register scratch2);
 
  private:
-  virtual Major MajorKey() { return StringCompare; }
-  virtual int MinorKey() { return 0; }
-  virtual void Generate(MacroAssembler* masm);
-
-  static void GenerateAsciiCharsCompareLoop(
-      MacroAssembler* masm,
-      Register left,
-      Register right,
-      Register length,
-      Register scratch,
-      Label* chars_not_equal,
+  static void GenerateOneByteCharsCompareLoop(
+      MacroAssembler* masm, Register left, Register right, Register length,
+      Register scratch, Label* chars_not_equal,
       Label::Distance chars_not_equal_near = Label::kFar);
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
 };
 
 
@@ -117,15 +53,13 @@
  public:
   enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
 
-  NameDictionaryLookupStub(Isolate* isolate,
-                           Register dictionary,
-                           Register result,
-                           Register index,
-                           LookupMode mode)
-      : PlatformCodeStub(isolate),
-        dictionary_(dictionary), result_(result), index_(index), mode_(mode) { }
-
-  void Generate(MacroAssembler* masm);
+  NameDictionaryLookupStub(Isolate* isolate, Register dictionary,
+                           Register result, Register index, LookupMode mode)
+      : PlatformCodeStub(isolate) {
+    minor_key_ = DictionaryBits::encode(dictionary.code()) |
+                 ResultBits::encode(result.code()) |
+                 IndexBits::encode(index.code()) | LookupModeBits::encode(mode);
+  }
 
   static void GenerateNegativeLookup(MacroAssembler* masm,
                                      Label* miss,
@@ -156,24 +90,27 @@
       NameDictionary::kHeaderSize +
       NameDictionary::kElementsStartIndex * kPointerSize;
 
-  Major MajorKey() { return NameDictionaryLookup; }
-
-  int MinorKey() {
-    return DictionaryBits::encode(dictionary_.code()) |
-        ResultBits::encode(result_.code()) |
-        IndexBits::encode(index_.code()) |
-        LookupModeBits::encode(mode_);
+  Register dictionary() const {
+    return Register::from_code(DictionaryBits::decode(minor_key_));
   }
 
+  Register result() const {
+    return Register::from_code(ResultBits::decode(minor_key_));
+  }
+
+  Register index() const {
+    return Register::from_code(IndexBits::decode(minor_key_));
+  }
+
+  LookupMode mode() const { return LookupModeBits::decode(minor_key_); }
+
   class DictionaryBits: public BitField<int, 0, 3> {};
   class ResultBits: public BitField<int, 3, 3> {};
   class IndexBits: public BitField<int, 6, 3> {};
   class LookupModeBits: public BitField<LookupMode, 9, 1> {};
 
-  Register dictionary_;
-  Register result_;
-  Register index_;
-  LookupMode mode_;
+  DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+  DEFINE_PLATFORM_CODE_STUB(NameDictionaryLookup, PlatformCodeStub);
 };
 
 
@@ -186,16 +123,19 @@
                   RememberedSetAction remembered_set_action,
                   SaveFPRegsMode fp_mode)
       : PlatformCodeStub(isolate),
-        object_(object),
-        value_(value),
-        address_(address),
-        remembered_set_action_(remembered_set_action),
-        save_fp_regs_mode_(fp_mode),
         regs_(object,   // An input reg.
               address,  // An input reg.
               value) {  // One scratch reg.
+    minor_key_ = ObjectBits::encode(object.code()) |
+                 ValueBits::encode(value.code()) |
+                 AddressBits::encode(address.code()) |
+                 RememberedSetActionBits::encode(remembered_set_action) |
+                 SaveFPRegsModeBits::encode(fp_mode);
   }
 
+  RecordWriteStub(uint32_t key, Isolate* isolate)
+      : PlatformCodeStub(key, isolate), regs_(object(), address(), value()) {}
+
   enum Mode {
     STORE_BUFFER_ONLY,
     INCREMENTAL,
@@ -218,13 +158,13 @@
       return INCREMENTAL;
     }
 
-    ASSERT(first_instruction == kTwoByteNopInstruction);
+    DCHECK(first_instruction == kTwoByteNopInstruction);
 
     if (second_instruction == kFiveByteJumpInstruction) {
       return INCREMENTAL_COMPACTION;
     }
 
-    ASSERT(second_instruction == kFiveByteNopInstruction);
+    DCHECK(second_instruction == kFiveByteNopInstruction);
 
     return STORE_BUFFER_ONLY;
   }
@@ -232,25 +172,27 @@
   static void Patch(Code* stub, Mode mode) {
     switch (mode) {
       case STORE_BUFFER_ONLY:
-        ASSERT(GetMode(stub) == INCREMENTAL ||
+        DCHECK(GetMode(stub) == INCREMENTAL ||
                GetMode(stub) == INCREMENTAL_COMPACTION);
         stub->instruction_start()[0] = kTwoByteNopInstruction;
         stub->instruction_start()[2] = kFiveByteNopInstruction;
         break;
       case INCREMENTAL:
-        ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+        DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
         stub->instruction_start()[0] = kTwoByteJumpInstruction;
         break;
       case INCREMENTAL_COMPACTION:
-        ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+        DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
         stub->instruction_start()[0] = kTwoByteNopInstruction;
         stub->instruction_start()[2] = kFiveByteJumpInstruction;
         break;
     }
-    ASSERT(GetMode(stub) == mode);
-    CPU::FlushICache(stub->instruction_start(), 7);
+    DCHECK(GetMode(stub) == mode);
+    CpuFeatures::FlushICache(stub->instruction_start(), 7);
   }
 
+  DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+
  private:
   // This is a helper class for freeing up 3 scratch registers, where the third
   // is always ecx (needed for shift operations).  The input is two registers
@@ -266,7 +208,7 @@
           object_(object),
           address_(address),
           scratch0_(scratch0) {
-      ASSERT(!AreAliased(scratch0, object, address, no_reg));
+      DCHECK(!AreAliased(scratch0, object, address, no_reg));
       scratch1_ = GetRegThatIsNotEcxOr(object_, address_, scratch0_);
       if (scratch0.is(ecx)) {
         scratch0_ = GetRegThatIsNotEcxOr(object_, address_, scratch1_);
@@ -277,15 +219,15 @@
       if (address.is(ecx)) {
         address_ = GetRegThatIsNotEcxOr(object_, scratch0_, scratch1_);
       }
-      ASSERT(!AreAliased(scratch0_, object_, address_, ecx));
+      DCHECK(!AreAliased(scratch0_, object_, address_, ecx));
     }
 
     void Save(MacroAssembler* masm) {
-      ASSERT(!address_orig_.is(object_));
-      ASSERT(object_.is(object_orig_) || address_.is(address_orig_));
-      ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_));
-      ASSERT(!AreAliased(object_orig_, address_, scratch1_, scratch0_));
-      ASSERT(!AreAliased(object_, address_orig_, scratch1_, scratch0_));
+      DCHECK(!address_orig_.is(object_));
+      DCHECK(object_.is(object_orig_) || address_.is(address_orig_));
+      DCHECK(!AreAliased(object_, address_, scratch1_, scratch0_));
+      DCHECK(!AreAliased(object_orig_, address_, scratch1_, scratch0_));
+      DCHECK(!AreAliased(object_, address_orig_, scratch1_, scratch0_));
       // We don't have to save scratch0_orig_ because it was given to us as
       // a scratch register.  But if we had to switch to a different reg then
       // we should save the new scratch0_.
@@ -395,9 +337,11 @@
   enum OnNoNeedToInformIncrementalMarker {
     kReturnOnNoNeedToInformIncrementalMarker,
     kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
-  }
-;
-  void Generate(MacroAssembler* masm);
+  };
+
+  virtual inline Major MajorKey() const FINAL OVERRIDE { return RecordWrite; }
+
+  virtual void Generate(MacroAssembler* masm) OVERRIDE;
   void GenerateIncremental(MacroAssembler* masm, Mode mode);
   void CheckNeedsToInformIncrementalMarker(
       MacroAssembler* masm,
@@ -405,32 +349,39 @@
       Mode mode);
   void InformIncrementalMarker(MacroAssembler* masm);
 
-  Major MajorKey() { return RecordWrite; }
-
-  int MinorKey() {
-    return ObjectBits::encode(object_.code()) |
-        ValueBits::encode(value_.code()) |
-        AddressBits::encode(address_.code()) |
-        RememberedSetActionBits::encode(remembered_set_action_) |
-        SaveFPRegsModeBits::encode(save_fp_regs_mode_);
-  }
-
   void Activate(Code* code) {
     code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
   }
 
+  Register object() const {
+    return Register::from_code(ObjectBits::decode(minor_key_));
+  }
+
+  Register value() const {
+    return Register::from_code(ValueBits::decode(minor_key_));
+  }
+
+  Register address() const {
+    return Register::from_code(AddressBits::decode(minor_key_));
+  }
+
+  RememberedSetAction remembered_set_action() const {
+    return RememberedSetActionBits::decode(minor_key_);
+  }
+
+  SaveFPRegsMode save_fp_regs_mode() const {
+    return SaveFPRegsModeBits::decode(minor_key_);
+  }
+
   class ObjectBits: public BitField<int, 0, 3> {};
   class ValueBits: public BitField<int, 3, 3> {};
   class AddressBits: public BitField<int, 6, 3> {};
   class RememberedSetActionBits: public BitField<RememberedSetAction, 9, 1> {};
   class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 10, 1> {};
 
-  Register object_;
-  Register value_;
-  Register address_;
-  RememberedSetAction remembered_set_action_;
-  SaveFPRegsMode save_fp_regs_mode_;
   RegisterAllocation regs_;
+
+  DISALLOW_COPY_AND_ASSIGN(RecordWriteStub);
 };
 
 
diff --git a/src/ia32/codegen-ia32.cc b/src/ia32/codegen-ia32.cc
index fbd5b89..52cf72b 100644
--- a/src/ia32/codegen-ia32.cc
+++ b/src/ia32/codegen-ia32.cc
@@ -7,7 +7,7 @@
 #if V8_TARGET_ARCH_IA32
 
 #include "src/codegen.h"
-#include "src/heap.h"
+#include "src/heap/heap.h"
 #include "src/macro-assembler.h"
 
 namespace v8 {
@@ -19,14 +19,14 @@
 
 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
   masm->EnterFrame(StackFrame::INTERNAL);
-  ASSERT(!masm->has_frame());
+  DCHECK(!masm->has_frame());
   masm->set_has_frame(true);
 }
 
 
 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
   masm->LeaveFrame(StackFrame::INTERNAL);
-  ASSERT(masm->has_frame());
+  DCHECK(masm->has_frame());
   masm->set_has_frame(false);
 }
 
@@ -37,7 +37,8 @@
 UnaryMathFunction CreateExpFunction() {
   if (!FLAG_fast_math) return &std::exp;
   size_t actual_size;
-  byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
+  byte* buffer =
+      static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
   if (buffer == NULL) return &std::exp;
   ExternalReference::InitializeMathExpData();
 
@@ -62,10 +63,10 @@
 
   CodeDesc desc;
   masm.GetCode(&desc);
-  ASSERT(!RelocInfo::RequiresRelocation(desc));
+  DCHECK(!RelocInfo::RequiresRelocation(desc));
 
-  CPU::FlushICache(buffer, actual_size);
-  OS::ProtectCode(buffer, actual_size);
+  CpuFeatures::FlushICache(buffer, actual_size);
+  base::OS::ProtectCode(buffer, actual_size);
   return FUNCTION_CAST<UnaryMathFunction>(buffer);
 }
 
@@ -73,9 +74,8 @@
 UnaryMathFunction CreateSqrtFunction() {
   size_t actual_size;
   // Allocate buffer in executable space.
-  byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
-                                                 &actual_size,
-                                                 true));
+  byte* buffer =
+      static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
   if (buffer == NULL) return &std::sqrt;
   MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
   // esp[1 * kPointerSize]: raw double input
@@ -92,10 +92,10 @@
 
   CodeDesc desc;
   masm.GetCode(&desc);
-  ASSERT(!RelocInfo::RequiresRelocation(desc));
+  DCHECK(!RelocInfo::RequiresRelocation(desc));
 
-  CPU::FlushICache(buffer, actual_size);
-  OS::ProtectCode(buffer, actual_size);
+  CpuFeatures::FlushICache(buffer, actual_size);
+  base::OS::ProtectCode(buffer, actual_size);
   return FUNCTION_CAST<UnaryMathFunction>(buffer);
 }
 
@@ -189,7 +189,8 @@
 MemMoveFunction CreateMemMoveFunction() {
   size_t actual_size;
   // Allocate buffer in executable space.
-  byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
+  byte* buffer =
+      static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
   if (buffer == NULL) return NULL;
   MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
   LabelConverter conv(buffer);
@@ -503,9 +504,9 @@
 
   CodeDesc desc;
   masm.GetCode(&desc);
-  ASSERT(!RelocInfo::RequiresRelocation(desc));
-  CPU::FlushICache(buffer, actual_size);
-  OS::ProtectCode(buffer, actual_size);
+  DCHECK(!RelocInfo::RequiresRelocation(desc));
+  CpuFeatures::FlushICache(buffer, actual_size);
+  base::OS::ProtectCode(buffer, actual_size);
   // TODO(jkummerow): It would be nice to register this code creation event
   // with the PROFILE / GDBJIT system.
   return FUNCTION_CAST<MemMoveFunction>(buffer);
@@ -521,26 +522,28 @@
 
 
 void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
-    MacroAssembler* masm, AllocationSiteMode mode,
+    MacroAssembler* masm,
+    Register receiver,
+    Register key,
+    Register value,
+    Register target_map,
+    AllocationSiteMode mode,
     Label* allocation_memento_found) {
-  // ----------- S t a t e -------------
-  //  -- eax    : value
-  //  -- ebx    : target map
-  //  -- ecx    : key
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
+  Register scratch = edi;
+  DCHECK(!AreAliased(receiver, key, value, target_map, scratch));
+
   if (mode == TRACK_ALLOCATION_SITE) {
-    ASSERT(allocation_memento_found != NULL);
-    __ JumpIfJSArrayHasAllocationMemento(edx, edi, allocation_memento_found);
+    DCHECK(allocation_memento_found != NULL);
+    __ JumpIfJSArrayHasAllocationMemento(
+        receiver, scratch, allocation_memento_found);
   }
 
   // Set transitioned map.
-  __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
-  __ RecordWriteField(edx,
+  __ mov(FieldOperand(receiver, HeapObject::kMapOffset), target_map);
+  __ RecordWriteField(receiver,
                       HeapObject::kMapOffset,
-                      ebx,
-                      edi,
+                      target_map,
+                      scratch,
                       kDontSaveFPRegs,
                       EMIT_REMEMBERED_SET,
                       OMIT_SMI_CHECK);
@@ -548,14 +551,19 @@
 
 
 void ElementsTransitionGenerator::GenerateSmiToDouble(
-    MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
-  // ----------- S t a t e -------------
-  //  -- eax    : value
-  //  -- ebx    : target map
-  //  -- ecx    : key
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
+    MacroAssembler* masm,
+    Register receiver,
+    Register key,
+    Register value,
+    Register target_map,
+    AllocationSiteMode mode,
+    Label* fail) {
+  // Return address is on the stack.
+  DCHECK(receiver.is(edx));
+  DCHECK(key.is(ecx));
+  DCHECK(value.is(eax));
+  DCHECK(target_map.is(ebx));
+
   Label loop, entry, convert_hole, gc_required, only_change_map;
 
   if (mode == TRACK_ALLOCATION_SITE) {
@@ -669,14 +677,19 @@
 
 
 void ElementsTransitionGenerator::GenerateDoubleToObject(
-    MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
-  // ----------- S t a t e -------------
-  //  -- eax    : value
-  //  -- ebx    : target map
-  //  -- ecx    : key
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
+    MacroAssembler* masm,
+    Register receiver,
+    Register key,
+    Register value,
+    Register target_map,
+    AllocationSiteMode mode,
+    Label* fail) {
+  // Return address is on the stack.
+  DCHECK(receiver.is(edx));
+  DCHECK(key.is(ecx));
+  DCHECK(value.is(eax));
+  DCHECK(target_map.is(ebx));
+
   Label loop, entry, convert_hole, gc_required, only_change_map, success;
 
   if (mode == TRACK_ALLOCATION_SITE) {
@@ -849,7 +862,7 @@
   __ j(zero, &seq_string, Label::kNear);
 
   // Handle external strings.
-  Label ascii_external, done;
+  Label one_byte_external, done;
   if (FLAG_debug_code) {
     // Assert that we do not have a cons or slice (indirect strings) here.
     // Sequential strings have already been ruled out.
@@ -864,22 +877,22 @@
   STATIC_ASSERT(kTwoByteStringTag == 0);
   __ test_b(result, kStringEncodingMask);
   __ mov(result, FieldOperand(string, ExternalString::kResourceDataOffset));
-  __ j(not_equal, &ascii_external, Label::kNear);
+  __ j(not_equal, &one_byte_external, Label::kNear);
   // Two-byte string.
   __ movzx_w(result, Operand(result, index, times_2, 0));
   __ jmp(&done, Label::kNear);
-  __ bind(&ascii_external);
-  // Ascii string.
+  __ bind(&one_byte_external);
+  // One-byte string.
   __ movzx_b(result, Operand(result, index, times_1, 0));
   __ jmp(&done, Label::kNear);
 
-  // Dispatch on the encoding: ASCII or two-byte.
-  Label ascii;
+  // Dispatch on the encoding: one-byte or two-byte.
+  Label one_byte;
   __ bind(&seq_string);
   STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
   STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
   __ test(result, Immediate(kStringEncodingMask));
-  __ j(not_zero, &ascii, Label::kNear);
+  __ j(not_zero, &one_byte, Label::kNear);
 
   // Two-byte string.
   // Load the two-byte character code into the result register.
@@ -889,9 +902,9 @@
                                   SeqTwoByteString::kHeaderSize));
   __ jmp(&done, Label::kNear);
 
-  // Ascii string.
+  // One-byte string.
   // Load the byte into the result register.
-  __ bind(&ascii);
+  __ bind(&one_byte);
   __ movzx_b(result, FieldOperand(string,
                                   index,
                                   times_1,
@@ -911,11 +924,12 @@
                                    XMMRegister double_scratch,
                                    Register temp1,
                                    Register temp2) {
-  ASSERT(!input.is(double_scratch));
-  ASSERT(!input.is(result));
-  ASSERT(!result.is(double_scratch));
-  ASSERT(!temp1.is(temp2));
-  ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
+  DCHECK(!input.is(double_scratch));
+  DCHECK(!input.is(result));
+  DCHECK(!result.is(double_scratch));
+  DCHECK(!temp1.is(temp2));
+  DCHECK(ExternalReference::math_exp_constants(0).address() != NULL);
+  DCHECK(!masm->serializer_enabled());  // External references not serializable.
 
   Label done;
 
@@ -960,7 +974,7 @@
 
 
 CodeAgingHelper::CodeAgingHelper() {
-  ASSERT(young_sequence_.length() == kNoCodeAgeSequenceLength);
+  DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
   CodePatcher patcher(young_sequence_.start(), young_sequence_.length());
   patcher.masm()->push(ebp);
   patcher.masm()->mov(ebp, esp);
@@ -978,7 +992,7 @@
 
 bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
   bool result = isolate->code_aging_helper()->IsYoung(sequence);
-  ASSERT(result || isolate->code_aging_helper()->IsOld(sequence));
+  DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
   return result;
 }
 
@@ -1005,7 +1019,7 @@
   uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
   if (age == kNoAgeCodeAge) {
     isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
-    CPU::FlushICache(sequence, young_length);
+    CpuFeatures::FlushICache(sequence, young_length);
   } else {
     Code* stub = GetCodeAgeStub(isolate, age, parity);
     CodePatcher patcher(sequence, young_length);
diff --git a/src/ia32/codegen-ia32.h b/src/ia32/codegen-ia32.h
index 3f59c2c..2382388 100644
--- a/src/ia32/codegen-ia32.h
+++ b/src/ia32/codegen-ia32.h
@@ -6,7 +6,7 @@
 #define V8_IA32_CODEGEN_IA32_H_
 
 #include "src/ast.h"
-#include "src/ic-inl.h"
+#include "src/macro-assembler.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/ia32/cpu-ia32.cc b/src/ia32/cpu-ia32.cc
index 7094f46..00c2043 100644
--- a/src/ia32/cpu-ia32.cc
+++ b/src/ia32/cpu-ia32.cc
@@ -12,13 +12,13 @@
 
 #if V8_TARGET_ARCH_IA32
 
-#include "src/cpu.h"
+#include "src/assembler.h"
 #include "src/macro-assembler.h"
 
 namespace v8 {
 namespace internal {
 
-void CPU::FlushICache(void* start, size_t size) {
+void CpuFeatures::FlushICache(void* start, size_t size) {
   // No need to flush the instruction cache on Intel. On Intel instruction
   // cache flushing is only necessary when multiple cores running the same
   // code simultaneously. V8 (and JavaScript) is single threaded and when code
diff --git a/src/ia32/debug-ia32.cc b/src/ia32/debug-ia32.cc
index 7572d3e..4331b08 100644
--- a/src/ia32/debug-ia32.cc
+++ b/src/ia32/debug-ia32.cc
@@ -22,7 +22,7 @@
 // CodeGenerator::VisitReturnStatement and VirtualFrame::Exit in codegen-ia32.cc
 // for the precise return instructions sequence.
 void BreakLocationIterator::SetDebugBreakAtReturn() {
-  ASSERT(Assembler::kJSReturnSequenceLength >=
+  DCHECK(Assembler::kJSReturnSequenceLength >=
          Assembler::kCallInstructionLength);
   rinfo()->PatchCodeWithCall(
       debug_info_->GetIsolate()->builtins()->Return_DebugBreak()->entry(),
@@ -40,20 +40,20 @@
 // A debug break in the frame exit code is identified by the JS frame exit code
 // having been patched with a call instruction.
 bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
-  ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
+  DCHECK(RelocInfo::IsJSReturn(rinfo->rmode()));
   return rinfo->IsPatchedReturnSequence();
 }
 
 
 bool BreakLocationIterator::IsDebugBreakAtSlot() {
-  ASSERT(IsDebugBreakSlot());
+  DCHECK(IsDebugBreakSlot());
   // Check whether the debug break slot instructions have been patched.
   return rinfo()->IsPatchedDebugBreakSlotSequence();
 }
 
 
 void BreakLocationIterator::SetDebugBreakAtSlot() {
-  ASSERT(IsDebugBreakSlot());
+  DCHECK(IsDebugBreakSlot());
   Isolate* isolate = debug_info_->GetIsolate();
   rinfo()->PatchCodeWithCall(
       isolate->builtins()->Slot_DebugBreak()->entry(),
@@ -62,7 +62,7 @@
 
 
 void BreakLocationIterator::ClearDebugBreakAtSlot() {
-  ASSERT(IsDebugBreakSlot());
+  DCHECK(IsDebugBreakSlot());
   rinfo()->PatchCode(original_rinfo()->pc(), Assembler::kDebugBreakSlotLength);
 }
 
@@ -86,9 +86,9 @@
     // Store the registers containing live values on the expression stack to
     // make sure that these are correctly updated during GC. Non object values
     // are stored as a smi causing it to be untouched by GC.
-    ASSERT((object_regs & ~kJSCallerSaved) == 0);
-    ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
-    ASSERT((object_regs & non_object_regs) == 0);
+    DCHECK((object_regs & ~kJSCallerSaved) == 0);
+    DCHECK((non_object_regs & ~kJSCallerSaved) == 0);
+    DCHECK((object_regs & non_object_regs) == 0);
     for (int i = 0; i < kNumJSCallerSaved; i++) {
       int r = JSCallerSavedCode(i);
       Register reg = { r };
@@ -141,7 +141,7 @@
       }
     }
 
-    ASSERT(unused_reg.code() != -1);
+    DCHECK(unused_reg.code() != -1);
 
     // Read current padding counter and skip corresponding number of words.
     __ pop(unused_reg);
@@ -180,45 +180,35 @@
 
 void DebugCodegen::GenerateLoadICDebugBreak(MacroAssembler* masm) {
   // Register state for IC load call (from ic-ia32.cc).
-  // ----------- S t a t e -------------
-  //  -- ecx    : name
-  //  -- edx    : receiver
-  // -----------------------------------
-  Generate_DebugBreakCallHelper(masm, ecx.bit() | edx.bit(), 0, false);
+  Register receiver = LoadDescriptor::ReceiverRegister();
+  Register name = LoadDescriptor::NameRegister();
+  Generate_DebugBreakCallHelper(masm, receiver.bit() | name.bit(), 0, false);
 }
 
 
 void DebugCodegen::GenerateStoreICDebugBreak(MacroAssembler* masm) {
   // Register state for IC store call (from ic-ia32.cc).
-  // ----------- S t a t e -------------
-  //  -- eax    : value
-  //  -- ecx    : name
-  //  -- edx    : receiver
-  // -----------------------------------
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  Register name = StoreDescriptor::NameRegister();
+  Register value = StoreDescriptor::ValueRegister();
   Generate_DebugBreakCallHelper(
-      masm, eax.bit() | ecx.bit() | edx.bit(), 0, false);
+      masm, receiver.bit() | name.bit() | value.bit(), 0, false);
 }
 
 
 void DebugCodegen::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
   // Register state for keyed IC load call (from ic-ia32.cc).
-  // ----------- S t a t e -------------
-  //  -- ecx    : key
-  //  -- edx    : receiver
-  // -----------------------------------
-  Generate_DebugBreakCallHelper(masm, ecx.bit() | edx.bit(), 0, false);
+  GenerateLoadICDebugBreak(masm);
 }
 
 
 void DebugCodegen::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
-  // Register state for keyed IC load call (from ic-ia32.cc).
-  // ----------- S t a t e -------------
-  //  -- eax    : value
-  //  -- ecx    : key
-  //  -- edx    : receiver
-  // -----------------------------------
+  // Register state for keyed IC store call (from ic-ia32.cc).
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  Register name = StoreDescriptor::NameRegister();
+  Register value = StoreDescriptor::ValueRegister();
   Generate_DebugBreakCallHelper(
-      masm, eax.bit() | ecx.bit() | edx.bit(), 0, false);
+      masm, receiver.bit() | name.bit() | value.bit(), 0, false);
 }
 
 
@@ -285,7 +275,7 @@
   __ bind(&check_codesize);
   __ RecordDebugBreakSlot();
   __ Nop(Assembler::kDebugBreakSlotLength);
-  ASSERT_EQ(Assembler::kDebugBreakSlotLength,
+  DCHECK_EQ(Assembler::kDebugBreakSlotLength,
             masm->SizeOfCodeGeneratedSince(&check_codesize));
 }
 
diff --git a/src/ia32/deoptimizer-ia32.cc b/src/ia32/deoptimizer-ia32.cc
index 266899e..f40e23c 100644
--- a/src/ia32/deoptimizer-ia32.cc
+++ b/src/ia32/deoptimizer-ia32.cc
@@ -35,7 +35,7 @@
   for (int i = 0; i < deopt_data->DeoptCount(); i++) {
     int pc_offset = deopt_data->Pc(i)->value();
     if (pc_offset == -1) continue;
-    ASSERT_GE(pc_offset, prev_pc_offset);
+    DCHECK_GE(pc_offset, prev_pc_offset);
     int pc_delta = pc_offset - prev_pc_offset;
     // We use RUNTIME_ENTRY reloc info which has a size of 2 bytes
     // if encodable with small pc delta encoding and up to 6 bytes
@@ -81,7 +81,7 @@
       byte* pos_before = reloc_info_writer.pos();
 #endif
       reloc_info_writer.Write(&rinfo);
-      ASSERT(RelocInfo::kMinRelocCommentSize ==
+      DCHECK(RelocInfo::kMinRelocCommentSize ==
              pos_before - reloc_info_writer.pos());
     }
     // Replace relocation information on the code object.
@@ -128,9 +128,6 @@
   // Emit call to lazy deoptimization at all lazy deopt points.
   DeoptimizationInputData* deopt_data =
       DeoptimizationInputData::cast(code->deoptimization_data());
-  SharedFunctionInfo* shared =
-      SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo());
-  shared->EvictFromOptimizedCodeMap(code, "deoptimized code");
 #ifdef DEBUG
   Address prev_call_address = NULL;
 #endif
@@ -149,11 +146,11 @@
                     reinterpret_cast<intptr_t>(deopt_entry),
                     NULL);
     reloc_info_writer.Write(&rinfo);
-    ASSERT_GE(reloc_info_writer.pos(),
+    DCHECK_GE(reloc_info_writer.pos(),
               reloc_info->address() + ByteArray::kHeaderSize);
-    ASSERT(prev_call_address == NULL ||
+    DCHECK(prev_call_address == NULL ||
            call_address >= prev_call_address + patch_size());
-    ASSERT(call_address + patch_size() <= code->instruction_end());
+    DCHECK(call_address + patch_size() <= code->instruction_end());
 #ifdef DEBUG
     prev_call_address = call_address;
 #endif
@@ -169,7 +166,7 @@
   // Handle the junk part after the new relocation info. We will create
   // a non-live object in the extra space at the end of the former reloc info.
   Address junk_address = reloc_info->address() + reloc_info->Size();
-  ASSERT(junk_address <= reloc_end_address);
+  DCHECK(junk_address <= reloc_end_address);
   isolate->heap()->CreateFillerObjectAt(junk_address,
                                         reloc_end_address - junk_address);
 }
@@ -197,9 +194,9 @@
 
 
 void Deoptimizer::SetPlatformCompiledStubRegisters(
-    FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) {
+    FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
   intptr_t handler =
-      reinterpret_cast<intptr_t>(descriptor->deoptimization_handler_);
+      reinterpret_cast<intptr_t>(descriptor->deoptimization_handler());
   int params = descriptor->GetHandlerParameterCount();
   output_frame->SetRegister(eax.code(), params);
   output_frame->SetRegister(ebx.code(), handler);
@@ -221,7 +218,7 @@
       input_frame_size - parameter_count * kPointerSize -
       StandardFrameConstants::kFixedFrameSize -
       kPointerSize;
-  ASSERT(JavaScriptFrameConstants::kDynamicAlignmentStateOffset ==
+  DCHECK(JavaScriptFrameConstants::kDynamicAlignmentStateOffset ==
       JavaScriptFrameConstants::kLocal0Offset);
   int32_t alignment_state = input_->GetFrameSlot(alignment_state_offset);
   return (alignment_state == kAlignmentPaddingPushed);
@@ -405,7 +402,7 @@
     USE(start);
     __ push_imm32(i);
     __ jmp(&done);
-    ASSERT(masm()->pc_offset() - start == table_entry_size_);
+    DCHECK(masm()->pc_offset() - start == table_entry_size_);
   }
   __ bind(&done);
 }
diff --git a/src/ia32/disasm-ia32.cc b/src/ia32/disasm-ia32.cc
index 3b3ff09..22c2a55 100644
--- a/src/ia32/disasm-ia32.cc
+++ b/src/ia32/disasm-ia32.cc
@@ -3,8 +3,8 @@
 // found in the LICENSE file.
 
 #include <assert.h>
-#include <stdio.h>
 #include <stdarg.h>
+#include <stdio.h>
 
 #include "src/v8.h"
 
@@ -211,7 +211,7 @@
     InstructionDesc* id = &instructions_[bm[i].b];
     id->mnem = bm[i].mnem;
     id->op_order_ = bm[i].op_order_;
-    ASSERT_EQ(NO_INSTR, id->type);  // Information not already entered.
+    DCHECK_EQ(NO_INSTR, id->type);  // Information not already entered.
     id->type = type;
   }
 }
@@ -223,7 +223,7 @@
                                      const char* mnem) {
   for (byte b = start; b <= end; b++) {
     InstructionDesc* id = &instructions_[b];
-    ASSERT_EQ(NO_INSTR, id->type);  // Information not already entered.
+    DCHECK_EQ(NO_INSTR, id->type);  // Information not already entered.
     id->mnem = mnem;
     id->type = type;
   }
@@ -233,7 +233,7 @@
 void InstructionTable::AddJumpConditionalShort() {
   for (byte b = 0x70; b <= 0x7F; b++) {
     InstructionDesc* id = &instructions_[b];
-    ASSERT_EQ(NO_INSTR, id->type);  // Information not already entered.
+    DCHECK_EQ(NO_INSTR, id->type);  // Information not already entered.
     id->mnem = jump_conditional_mnem[b & 0x0F];
     id->type = JUMP_CONDITIONAL_SHORT_INSTR;
   }
@@ -528,84 +528,101 @@
 
 // Returns number of bytes used, including *data.
 int DisassemblerIA32::F7Instruction(byte* data) {
-  ASSERT_EQ(0xF7, *data);
-  byte modrm = *(data+1);
+  DCHECK_EQ(0xF7, *data);
+  byte modrm = *++data;
   int mod, regop, rm;
   get_modrm(modrm, &mod, &regop, &rm);
-  if (mod == 3 && regop != 0) {
-    const char* mnem = NULL;
-    switch (regop) {
-      case 2: mnem = "not"; break;
-      case 3: mnem = "neg"; break;
-      case 4: mnem = "mul"; break;
-      case 5: mnem = "imul"; break;
-      case 7: mnem = "idiv"; break;
-      default: UnimplementedInstruction();
-    }
-    AppendToBuffer("%s %s", mnem, NameOfCPURegister(rm));
-    return 2;
-  } else if (mod == 3 && regop == eax) {
-    int32_t imm = *reinterpret_cast<int32_t*>(data+2);
-    AppendToBuffer("test %s,0x%x", NameOfCPURegister(rm), imm);
-    return 6;
-  } else if (regop == eax) {
-    AppendToBuffer("test ");
-    int count = PrintRightOperand(data+1);
-    int32_t imm = *reinterpret_cast<int32_t*>(data+1+count);
-    AppendToBuffer(",0x%x", imm);
-    return 1+count+4 /*int32_t*/;
-  } else {
-    UnimplementedInstruction();
-    return 2;
+  const char* mnem = NULL;
+  switch (regop) {
+    case 0:
+      mnem = "test";
+      break;
+    case 2:
+      mnem = "not";
+      break;
+    case 3:
+      mnem = "neg";
+      break;
+    case 4:
+      mnem = "mul";
+      break;
+    case 5:
+      mnem = "imul";
+      break;
+    case 6:
+      mnem = "div";
+      break;
+    case 7:
+      mnem = "idiv";
+      break;
+    default:
+      UnimplementedInstruction();
   }
+  AppendToBuffer("%s ", mnem);
+  int count = PrintRightOperand(data);
+  if (regop == 0) {
+    AppendToBuffer(",0x%x", *reinterpret_cast<int32_t*>(data + count));
+    count += 4;
+  }
+  return 1 + count;
 }
 
 
 int DisassemblerIA32::D1D3C1Instruction(byte* data) {
   byte op = *data;
-  ASSERT(op == 0xD1 || op == 0xD3 || op == 0xC1);
-  byte modrm = *(data+1);
+  DCHECK(op == 0xD1 || op == 0xD3 || op == 0xC1);
+  byte modrm = *++data;
   int mod, regop, rm;
   get_modrm(modrm, &mod, &regop, &rm);
   int imm8 = -1;
-  int num_bytes = 2;
-  if (mod == 3) {
-    const char* mnem = NULL;
-    switch (regop) {
-      case kROL: mnem = "rol"; break;
-      case kROR: mnem = "ror"; break;
-      case kRCL: mnem = "rcl"; break;
-      case kRCR: mnem = "rcr"; break;
-      case kSHL: mnem = "shl"; break;
-      case KSHR: mnem = "shr"; break;
-      case kSAR: mnem = "sar"; break;
-      default: UnimplementedInstruction();
-    }
-    if (op == 0xD1) {
-      imm8 = 1;
-    } else if (op == 0xC1) {
-      imm8 = *(data+2);
-      num_bytes = 3;
-    } else if (op == 0xD3) {
-      // Shift/rotate by cl.
-    }
-    ASSERT_NE(NULL, mnem);
-    AppendToBuffer("%s %s,", mnem, NameOfCPURegister(rm));
-    if (imm8 >= 0) {
-      AppendToBuffer("%d", imm8);
-    } else {
-      AppendToBuffer("cl");
-    }
-  } else {
-    UnimplementedInstruction();
+  const char* mnem = NULL;
+  switch (regop) {
+    case kROL:
+      mnem = "rol";
+      break;
+    case kROR:
+      mnem = "ror";
+      break;
+    case kRCL:
+      mnem = "rcl";
+      break;
+    case kRCR:
+      mnem = "rcr";
+      break;
+    case kSHL:
+      mnem = "shl";
+      break;
+    case KSHR:
+      mnem = "shr";
+      break;
+    case kSAR:
+      mnem = "sar";
+      break;
+    default:
+      UnimplementedInstruction();
   }
-  return num_bytes;
+  AppendToBuffer("%s ", mnem);
+  int count = PrintRightOperand(data);
+  if (op == 0xD1) {
+    imm8 = 1;
+  } else if (op == 0xC1) {
+    imm8 = *(data + 1);
+    count++;
+  } else if (op == 0xD3) {
+    // Shift/rotate by cl.
+  }
+  if (imm8 >= 0) {
+    AppendToBuffer(",%d", imm8);
+  } else {
+    AppendToBuffer(",cl");
+  }
+  return 1 + count;
 }
 
 
 // Returns number of bytes used, including *data.
 int DisassemblerIA32::JumpShort(byte* data) {
-  ASSERT_EQ(0xEB, *data);
+  DCHECK_EQ(0xEB, *data);
   byte b = *(data+1);
   byte* dest = data + static_cast<int8_t>(b) + 2;
   AppendToBuffer("jmp %s", NameOfAddress(dest));
@@ -615,7 +632,7 @@
 
 // Returns number of bytes used, including *data.
 int DisassemblerIA32::JumpConditional(byte* data, const char* comment) {
-  ASSERT_EQ(0x0F, *data);
+  DCHECK_EQ(0x0F, *data);
   byte cond = *(data+1) & 0x0F;
   byte* dest = data + *reinterpret_cast<int32_t*>(data+2) + 6;
   const char* mnem = jump_conditional_mnem[cond];
@@ -643,7 +660,7 @@
 
 // Returns number of bytes used, including *data.
 int DisassemblerIA32::SetCC(byte* data) {
-  ASSERT_EQ(0x0F, *data);
+  DCHECK_EQ(0x0F, *data);
   byte cond = *(data+1) & 0x0F;
   const char* mnem = set_conditional_mnem[cond];
   AppendToBuffer("%s ", mnem);
@@ -654,7 +671,7 @@
 
 // Returns number of bytes used, including *data.
 int DisassemblerIA32::CMov(byte* data) {
-  ASSERT_EQ(0x0F, *data);
+  DCHECK_EQ(0x0F, *data);
   byte cond = *(data + 1) & 0x0F;
   const char* mnem = conditional_move_mnem[cond];
   int op_size = PrintOperands(mnem, REG_OPER_OP_ORDER, data + 2);
@@ -665,7 +682,7 @@
 // Returns number of bytes used, including *data.
 int DisassemblerIA32::FPUInstruction(byte* data) {
   byte escape_opcode = *data;
-  ASSERT_EQ(0xD8, escape_opcode & 0xF8);
+  DCHECK_EQ(0xD8, escape_opcode & 0xF8);
   byte modrm_byte = *(data+1);
 
   if (modrm_byte >= 0xC0) {
@@ -954,17 +971,18 @@
         data += 3;
         break;
 
-      case 0x69:  // fall through
-      case 0x6B:
-        { int mod, regop, rm;
-          get_modrm(*(data+1), &mod, &regop, &rm);
-          int32_t imm =
-              *data == 0x6B ? *(data+2) : *reinterpret_cast<int32_t*>(data+2);
-          AppendToBuffer("imul %s,%s,0x%x",
-                         NameOfCPURegister(regop),
-                         NameOfCPURegister(rm),
-                         imm);
-          data += 2 + (*data == 0x6B ? 1 : 4);
+      case 0x6B: {
+        data++;
+        data += PrintOperands("imul", REG_OPER_OP_ORDER, data);
+        AppendToBuffer(",%d", *data);
+        data++;
+      } break;
+
+      case 0x69: {
+        data++;
+        data += PrintOperands("imul", REG_OPER_OP_ORDER, data);
+        AppendToBuffer(",%d", *reinterpret_cast<int32_t*>(data));
+        data += 4;
         }
         break;
 
@@ -1373,7 +1391,7 @@
             int mod, regop, rm;
             get_modrm(*data, &mod, &regop, &rm);
             int8_t imm8 = static_cast<int8_t>(data[1]);
-            ASSERT(regop == esi || regop == edx);
+            DCHECK(regop == esi || regop == edx);
             AppendToBuffer("%s %s,%d",
                            (regop == esi) ? "psllq" : "psrlq",
                            NameOfXMMRegister(rm),
@@ -1640,7 +1658,7 @@
   if (instr_len == 0) {
     printf("%02x", *data);
   }
-  ASSERT(instr_len > 0);  // Ensure progress.
+  DCHECK(instr_len > 0);  // Ensure progress.
 
   int outp = 0;
   // Instruction bytes.
diff --git a/src/ia32/frames-ia32.cc b/src/ia32/frames-ia32.cc
index 1acc873..18f1960 100644
--- a/src/ia32/frames-ia32.cc
+++ b/src/ia32/frames-ia32.cc
@@ -7,9 +7,9 @@
 #if V8_TARGET_ARCH_IA32
 
 #include "src/assembler.h"
-#include "src/ia32/assembler-ia32.h"
-#include "src/ia32/assembler-ia32-inl.h"
 #include "src/frames.h"
+#include "src/ia32/assembler-ia32-inl.h"
+#include "src/ia32/assembler-ia32.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/ia32/full-codegen-ia32.cc b/src/ia32/full-codegen-ia32.cc
index 0ea77f0..661d301 100644
--- a/src/ia32/full-codegen-ia32.cc
+++ b/src/ia32/full-codegen-ia32.cc
@@ -6,15 +6,16 @@
 
 #if V8_TARGET_ARCH_IA32
 
+#include "src/code-factory.h"
 #include "src/code-stubs.h"
 #include "src/codegen.h"
 #include "src/compiler.h"
 #include "src/debug.h"
 #include "src/full-codegen.h"
+#include "src/ic/ic.h"
 #include "src/isolate-inl.h"
 #include "src/parser.h"
 #include "src/scopes.h"
-#include "src/stub-cache.h"
 
 namespace v8 {
 namespace internal {
@@ -31,7 +32,7 @@
   }
 
   ~JumpPatchSite() {
-    ASSERT(patch_site_.is_bound() == info_emitted_);
+    DCHECK(patch_site_.is_bound() == info_emitted_);
   }
 
   void EmitJumpIfNotSmi(Register reg,
@@ -51,7 +52,7 @@
   void EmitPatchInfo() {
     if (patch_site_.is_bound()) {
       int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site_);
-      ASSERT(is_uint8(delta_to_patch_site));
+      DCHECK(is_uint8(delta_to_patch_site));
       __ test(eax, Immediate(delta_to_patch_site));
 #ifdef DEBUG
       info_emitted_ = true;
@@ -64,8 +65,8 @@
  private:
   // jc will be patched with jz, jnc will become jnz.
   void EmitJump(Condition cc, Label* target, Label::Distance distance) {
-    ASSERT(!patch_site_.is_bound() && !info_emitted_);
-    ASSERT(cc == carry || cc == not_carry);
+    DCHECK(!patch_site_.is_bound() && !info_emitted_);
+    DCHECK(cc == carry || cc == not_carry);
     __ bind(&patch_site_);
     __ j(cc, target, distance);
   }
@@ -123,7 +124,7 @@
     __ j(not_equal, &ok, Label::kNear);
 
     __ mov(ecx, GlobalObjectOperand());
-    __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalReceiverOffset));
+    __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalProxyOffset));
 
     __ mov(Operand(esp, receiver_offset), ecx);
 
@@ -142,7 +143,7 @@
   { Comment cmnt(masm_, "[ Allocate locals");
     int locals_count = info->scope()->num_stack_slots();
     // Generators allocate locals, if any, in context slots.
-    ASSERT(!info->function()->is_generator() || locals_count == 0);
+    DCHECK(!info->function()->is_generator() || locals_count == 0);
     if (locals_count == 1) {
       __ push(Immediate(isolate()->factory()->undefined_value()));
     } else if (locals_count > 1) {
@@ -190,7 +191,7 @@
     if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
       __ push(edi);
       __ Push(info->scope()->GetScopeInfo());
-      __ CallRuntime(Runtime::kHiddenNewGlobalContext, 2);
+      __ CallRuntime(Runtime::kNewGlobalContext, 2);
     } else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
       FastNewContextStub stub(isolate(), heap_slots);
       __ CallStub(&stub);
@@ -198,7 +199,7 @@
       need_write_barrier = false;
     } else {
       __ push(edi);
-      __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
+      __ CallRuntime(Runtime::kNewFunctionContext, 1);
     }
     function_in_register = false;
     // Context is returned in eax.  It replaces the context passed to us.
@@ -286,9 +287,9 @@
       // constant.
       if (scope()->is_function_scope() && scope()->function() != NULL) {
         VariableDeclaration* function = scope()->function();
-        ASSERT(function->proxy()->var()->mode() == CONST ||
+        DCHECK(function->proxy()->var()->mode() == CONST ||
                function->proxy()->var()->mode() == CONST_LEGACY);
-        ASSERT(function->proxy()->var()->location() != Variable::UNALLOCATED);
+        DCHECK(function->proxy()->var()->location() != Variable::UNALLOCATED);
         VisitVariableDeclaration(function);
       }
       VisitDeclarations(scope()->declarations());
@@ -306,9 +307,9 @@
     }
 
     { Comment cmnt(masm_, "[ Body");
-      ASSERT(loop_depth() == 0);
+      DCHECK(loop_depth() == 0);
       VisitStatements(function()->body());
-      ASSERT(loop_depth() == 0);
+      DCHECK(loop_depth() == 0);
     }
   }
 
@@ -346,7 +347,7 @@
   Comment cmnt(masm_, "[ Back edge bookkeeping");
   Label ok;
 
-  ASSERT(back_edge_target->is_bound());
+  DCHECK(back_edge_target->is_bound());
   int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
   int weight = Min(kMaxBackEdgeWeight,
                    Max(1, distance / kCodeSizeMultiplier));
@@ -416,7 +417,7 @@
     __ Ret(arguments_bytes, ecx);
     // Check that the size of the code used for returning is large enough
     // for the debugger's requirements.
-    ASSERT(Assembler::kJSReturnSequenceLength <=
+    DCHECK(Assembler::kJSReturnSequenceLength <=
            masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
     info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
   }
@@ -424,18 +425,18 @@
 
 
 void FullCodeGenerator::EffectContext::Plug(Variable* var) const {
-  ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+  DCHECK(var->IsStackAllocated() || var->IsContextSlot());
 }
 
 
 void FullCodeGenerator::AccumulatorValueContext::Plug(Variable* var) const {
-  ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+  DCHECK(var->IsStackAllocated() || var->IsContextSlot());
   codegen()->GetVar(result_register(), var);
 }
 
 
 void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
-  ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+  DCHECK(var->IsStackAllocated() || var->IsContextSlot());
   MemOperand operand = codegen()->VarOperand(var, result_register());
   // Memory operands can be pushed directly.
   __ push(operand);
@@ -500,7 +501,7 @@
                                           true,
                                           true_label_,
                                           false_label_);
-  ASSERT(!lit->IsUndetectableObject());  // There are no undetectable literals.
+  DCHECK(!lit->IsUndetectableObject());  // There are no undetectable literals.
   if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
     if (false_label_ != fall_through_) __ jmp(false_label_);
   } else if (lit->IsTrue() || lit->IsJSObject()) {
@@ -527,7 +528,7 @@
 
 void FullCodeGenerator::EffectContext::DropAndPlug(int count,
                                                    Register reg) const {
-  ASSERT(count > 0);
+  DCHECK(count > 0);
   __ Drop(count);
 }
 
@@ -535,7 +536,7 @@
 void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
     int count,
     Register reg) const {
-  ASSERT(count > 0);
+  DCHECK(count > 0);
   __ Drop(count);
   __ Move(result_register(), reg);
 }
@@ -543,7 +544,7 @@
 
 void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
                                                        Register reg) const {
-  ASSERT(count > 0);
+  DCHECK(count > 0);
   if (count > 1) __ Drop(count - 1);
   __ mov(Operand(esp, 0), reg);
 }
@@ -551,7 +552,7 @@
 
 void FullCodeGenerator::TestContext::DropAndPlug(int count,
                                                  Register reg) const {
-  ASSERT(count > 0);
+  DCHECK(count > 0);
   // For simplicity we always test the accumulator register.
   __ Drop(count);
   __ Move(result_register(), reg);
@@ -562,7 +563,7 @@
 
 void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
                                             Label* materialize_false) const {
-  ASSERT(materialize_true == materialize_false);
+  DCHECK(materialize_true == materialize_false);
   __ bind(materialize_true);
 }
 
@@ -595,8 +596,8 @@
 
 void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
                                           Label* materialize_false) const {
-  ASSERT(materialize_true == true_label_);
-  ASSERT(materialize_false == false_label_);
+  DCHECK(materialize_true == true_label_);
+  DCHECK(materialize_false == false_label_);
 }
 
 
@@ -661,7 +662,7 @@
 
 
 MemOperand FullCodeGenerator::StackOperand(Variable* var) {
-  ASSERT(var->IsStackAllocated());
+  DCHECK(var->IsStackAllocated());
   // Offset is negative because higher indexes are at lower addresses.
   int offset = -var->index() * kPointerSize;
   // Adjust by a (parameter or local) base offset.
@@ -675,7 +676,7 @@
 
 
 MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) {
-  ASSERT(var->IsContextSlot() || var->IsStackAllocated());
+  DCHECK(var->IsContextSlot() || var->IsStackAllocated());
   if (var->IsContextSlot()) {
     int context_chain_length = scope()->ContextChainLength(var->scope());
     __ LoadContext(scratch, context_chain_length);
@@ -687,7 +688,7 @@
 
 
 void FullCodeGenerator::GetVar(Register dest, Variable* var) {
-  ASSERT(var->IsContextSlot() || var->IsStackAllocated());
+  DCHECK(var->IsContextSlot() || var->IsStackAllocated());
   MemOperand location = VarOperand(var, dest);
   __ mov(dest, location);
 }
@@ -697,17 +698,17 @@
                                Register src,
                                Register scratch0,
                                Register scratch1) {
-  ASSERT(var->IsContextSlot() || var->IsStackAllocated());
-  ASSERT(!scratch0.is(src));
-  ASSERT(!scratch0.is(scratch1));
-  ASSERT(!scratch1.is(src));
+  DCHECK(var->IsContextSlot() || var->IsStackAllocated());
+  DCHECK(!scratch0.is(src));
+  DCHECK(!scratch0.is(scratch1));
+  DCHECK(!scratch1.is(src));
   MemOperand location = VarOperand(var, scratch0);
   __ mov(location, src);
 
   // Emit the write barrier code if the location is in the heap.
   if (var->IsContextSlot()) {
     int offset = Context::SlotOffset(var->index());
-    ASSERT(!scratch0.is(esi) && !src.is(esi) && !scratch1.is(esi));
+    DCHECK(!scratch0.is(esi) && !src.is(esi) && !scratch1.is(esi));
     __ RecordWriteContextSlot(scratch0, offset, src, scratch1, kDontSaveFPRegs);
   }
 }
@@ -735,7 +736,7 @@
 
 void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
   // The variable in the declaration always resides in the current context.
-  ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
+  DCHECK_EQ(0, scope()->ContextChainLength(variable->scope()));
   if (generate_debug_code_) {
     // Check that we're not inside a with or catch context.
     __ mov(ebx, FieldOperand(esi, HeapObject::kMapOffset));
@@ -789,7 +790,7 @@
       __ push(esi);
       __ push(Immediate(variable->name()));
       // VariableDeclaration nodes are always introduced in one of four modes.
-      ASSERT(IsDeclaredVariableMode(mode));
+      DCHECK(IsDeclaredVariableMode(mode));
       PropertyAttributes attr =
           IsImmutableVariableMode(mode) ? READ_ONLY : NONE;
       __ push(Immediate(Smi::FromInt(attr)));
@@ -802,7 +803,7 @@
       } else {
         __ push(Immediate(Smi::FromInt(0)));  // Indicates no initial value.
       }
-      __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4);
+      __ CallRuntime(Runtime::kDeclareLookupSlot, 4);
       break;
     }
   }
@@ -817,7 +818,7 @@
     case Variable::UNALLOCATED: {
       globals_->Add(variable->name(), zone());
       Handle<SharedFunctionInfo> function =
-          Compiler::BuildFunctionInfo(declaration->fun(), script());
+          Compiler::BuildFunctionInfo(declaration->fun(), script(), info_);
       // Check for stack-overflow exception.
       if (function.is_null()) return SetStackOverflow();
       globals_->Add(function, zone());
@@ -855,7 +856,7 @@
       __ push(Immediate(variable->name()));
       __ push(Immediate(Smi::FromInt(NONE)));
       VisitForStackValue(declaration->fun());
-      __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4);
+      __ CallRuntime(Runtime::kDeclareLookupSlot, 4);
       break;
     }
   }
@@ -864,8 +865,8 @@
 
 void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
   Variable* variable = declaration->proxy()->var();
-  ASSERT(variable->location() == Variable::CONTEXT);
-  ASSERT(variable->interface()->IsFrozen());
+  DCHECK(variable->location() == Variable::CONTEXT);
+  DCHECK(variable->interface()->IsFrozen());
 
   Comment cmnt(masm_, "[ ModuleDeclaration");
   EmitDebugCheckDeclarationContext(variable);
@@ -925,7 +926,7 @@
   __ push(esi);  // The context is the first argument.
   __ Push(pairs);
   __ Push(Smi::FromInt(DeclareGlobalsFlags()));
-  __ CallRuntime(Runtime::kHiddenDeclareGlobals, 3);
+  __ CallRuntime(Runtime::kDeclareGlobals, 3);
   // Return value is ignored.
 }
 
@@ -933,7 +934,7 @@
 void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
   // Call the runtime to declare the modules.
   __ Push(descriptions);
-  __ CallRuntime(Runtime::kHiddenDeclareModules, 1);
+  __ CallRuntime(Runtime::kDeclareModules, 1);
   // Return value is ignored.
 }
 
@@ -988,7 +989,8 @@
 
     // Record position before stub call for type feedback.
     SetSourcePosition(clause->position());
-    Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
+    Handle<Code> ic =
+        CodeFactory::CompareIC(isolate(), Token::EQ_STRICT).code();
     CallIC(ic, clause->CompareId());
     patch_site.EmitPatchInfo();
 
@@ -1116,7 +1118,7 @@
   // No need for a write barrier, we are storing a Smi in the feedback vector.
   __ LoadHeapObject(ebx, FeedbackVector());
   __ mov(FieldOperand(ebx, FixedArray::OffsetOfElementAt(slot)),
-         Immediate(TypeFeedbackInfo::MegamorphicSentinel(isolate())));
+         Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate())));
 
   __ mov(ebx, Immediate(Smi::FromInt(1)));  // Smi indicates slow check
   __ mov(ecx, Operand(esp, 0 * kPointerSize));  // Get enumerated object
@@ -1155,7 +1157,7 @@
 
   // For proxies, no filtering is done.
   // TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
-  ASSERT(Smi::FromInt(0) == 0);
+  DCHECK(Smi::FromInt(0) == 0);
   __ test(edx, edx);
   __ j(zero, &update_each);
 
@@ -1207,15 +1209,6 @@
   Iteration loop_statement(this, stmt);
   increment_loop_depth();
 
-  // var iterable = subject
-  VisitForAccumulatorValue(stmt->assign_iterable());
-
-  // As with for-in, skip the loop if the iterator is null or undefined.
-  __ CompareRoot(eax, Heap::kUndefinedValueRootIndex);
-  __ j(equal, loop_statement.break_label());
-  __ CompareRoot(eax, Heap::kNullValueRootIndex);
-  __ j(equal, loop_statement.break_label());
-
   // var iterator = iterable[Symbol.iterator]();
   VisitForEffect(stmt->assign_iterator());
 
@@ -1264,9 +1257,7 @@
       !pretenure &&
       scope()->is_function_scope() &&
       info->num_literals() == 0) {
-    FastNewClosureStub stub(isolate(),
-                            info->strict_mode(),
-                            info->is_generator());
+    FastNewClosureStub stub(isolate(), info->strict_mode(), info->kind());
     __ mov(ebx, Immediate(info));
     __ CallStub(&stub);
   } else {
@@ -1275,7 +1266,7 @@
     __ push(Immediate(pretenure
                       ? isolate()->factory()->true_value()
                       : isolate()->factory()->false_value()));
-    __ CallRuntime(Runtime::kHiddenNewClosure, 3);
+    __ CallRuntime(Runtime::kNewClosure, 3);
   }
   context()->Plug(eax);
 }
@@ -1287,7 +1278,26 @@
 }
 
 
-void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
+void FullCodeGenerator::EmitLoadHomeObject(SuperReference* expr) {
+  Comment cnmt(masm_, "[ SuperReference ");
+
+  __ mov(LoadDescriptor::ReceiverRegister(),
+         Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+
+  Handle<Symbol> home_object_symbol(isolate()->heap()->home_object_symbol());
+  __ mov(LoadDescriptor::NameRegister(), home_object_symbol);
+
+  CallLoadIC(NOT_CONTEXTUAL, expr->HomeObjectFeedbackId());
+
+  __ cmp(eax, isolate()->factory()->undefined_value());
+  Label done;
+  __ j(not_equal, &done);
+  __ CallRuntime(Runtime::kThrowNonMethodError, 0);
+  __ bind(&done);
+}
+
+
+void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
                                                       TypeofState typeof_state,
                                                       Label* slow) {
   Register context = esi;
@@ -1337,8 +1347,13 @@
 
   // All extension objects were empty and it is safe to use a global
   // load IC call.
-  __ mov(edx, GlobalObjectOperand());
-  __ mov(ecx, var->name());
+  __ mov(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+  __ mov(LoadDescriptor::NameRegister(), proxy->var()->name());
+  if (FLAG_vector_ics) {
+    __ mov(VectorLoadICDescriptor::SlotRegister(),
+           Immediate(Smi::FromInt(proxy->VariableFeedbackSlot())));
+  }
+
   ContextualMode mode = (typeof_state == INSIDE_TYPEOF)
       ? NOT_CONTEXTUAL
       : CONTEXTUAL;
@@ -1349,7 +1364,7 @@
 
 MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
                                                                 Label* slow) {
-  ASSERT(var->IsContextSlot());
+  DCHECK(var->IsContextSlot());
   Register context = esi;
   Register temp = ebx;
 
@@ -1377,7 +1392,7 @@
 }
 
 
-void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
+void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
                                                   TypeofState typeof_state,
                                                   Label* slow,
                                                   Label* done) {
@@ -1386,8 +1401,9 @@
   // introducing variables.  In those cases, we do not want to
   // perform a runtime call for all variables in the scope
   // containing the eval.
+  Variable* var = proxy->var();
   if (var->mode() == DYNAMIC_GLOBAL) {
-    EmitLoadGlobalCheckExtensions(var, typeof_state, slow);
+    EmitLoadGlobalCheckExtensions(proxy, typeof_state, slow);
     __ jmp(done);
   } else if (var->mode() == DYNAMIC_LOCAL) {
     Variable* local = var->local_if_not_shadowed();
@@ -1400,7 +1416,7 @@
         __ mov(eax, isolate()->factory()->undefined_value());
       } else {  // LET || CONST
         __ push(Immediate(var->name()));
-        __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
+        __ CallRuntime(Runtime::kThrowReferenceError, 1);
       }
     }
     __ jmp(done);
@@ -1418,10 +1434,12 @@
   switch (var->location()) {
     case Variable::UNALLOCATED: {
       Comment cmnt(masm_, "[ Global variable");
-      // Use inline caching. Variable name is passed in ecx and the global
-      // object in eax.
-      __ mov(edx, GlobalObjectOperand());
-      __ mov(ecx, var->name());
+      __ mov(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+      __ mov(LoadDescriptor::NameRegister(), var->name());
+      if (FLAG_vector_ics) {
+        __ mov(VectorLoadICDescriptor::SlotRegister(),
+               Immediate(Smi::FromInt(proxy->VariableFeedbackSlot())));
+      }
       CallLoadIC(CONTEXTUAL);
       context()->Plug(eax);
       break;
@@ -1438,7 +1456,7 @@
         // always looked up dynamically, i.e. in that case
         //     var->location() == LOOKUP.
         // always holds.
-        ASSERT(var->scope() != NULL);
+        DCHECK(var->scope() != NULL);
 
         // Check if the binding really needs an initialization check. The check
         // can be skipped in the following situation: we have a LET or CONST
@@ -1461,8 +1479,8 @@
           skip_init_check = false;
         } else {
           // Check that we always have valid source position.
-          ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
-          ASSERT(proxy->position() != RelocInfo::kNoPosition);
+          DCHECK(var->initializer_position() != RelocInfo::kNoPosition);
+          DCHECK(proxy->position() != RelocInfo::kNoPosition);
           skip_init_check = var->mode() != CONST_LEGACY &&
               var->initializer_position() < proxy->position();
         }
@@ -1477,10 +1495,10 @@
             // Throw a reference error when using an uninitialized let/const
             // binding in harmony mode.
             __ push(Immediate(var->name()));
-            __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
+            __ CallRuntime(Runtime::kThrowReferenceError, 1);
           } else {
             // Uninitalized const bindings outside of harmony mode are unholed.
-            ASSERT(var->mode() == CONST_LEGACY);
+            DCHECK(var->mode() == CONST_LEGACY);
             __ mov(eax, isolate()->factory()->undefined_value());
           }
           __ bind(&done);
@@ -1497,11 +1515,11 @@
       Label done, slow;
       // Generate code for loading from variables potentially shadowed
       // by eval-introduced variables.
-      EmitDynamicLookupFastCase(var, NOT_INSIDE_TYPEOF, &slow, &done);
+      EmitDynamicLookupFastCase(proxy, NOT_INSIDE_TYPEOF, &slow, &done);
       __ bind(&slow);
       __ push(esi);  // Context.
       __ push(Immediate(var->name()));
-      __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2);
+      __ CallRuntime(Runtime::kLoadLookupSlot, 2);
       __ bind(&done);
       context()->Plug(eax);
       break;
@@ -1532,7 +1550,7 @@
   __ push(Immediate(Smi::FromInt(expr->literal_index())));
   __ push(Immediate(expr->pattern()));
   __ push(Immediate(expr->flags()));
-  __ CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4);
+  __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
   __ mov(ebx, eax);
 
   __ bind(&materialized);
@@ -1544,7 +1562,7 @@
   __ bind(&runtime_allocate);
   __ push(ebx);
   __ push(Immediate(Smi::FromInt(size)));
-  __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1);
+  __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
   __ pop(ebx);
 
   __ bind(&allocated);
@@ -1594,7 +1612,7 @@
     __ push(Immediate(Smi::FromInt(expr->literal_index())));
     __ push(Immediate(constant_properties));
     __ push(Immediate(Smi::FromInt(flags)));
-    __ CallRuntime(Runtime::kHiddenCreateObjectLiteral, 4);
+    __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
   } else {
     __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
     __ mov(eax, FieldOperand(edi, JSFunction::kLiteralsOffset));
@@ -1629,14 +1647,15 @@
       case ObjectLiteral::Property::CONSTANT:
         UNREACHABLE();
       case ObjectLiteral::Property::MATERIALIZED_LITERAL:
-        ASSERT(!CompileTimeValue::IsCompileTimeValue(value));
+        DCHECK(!CompileTimeValue::IsCompileTimeValue(value));
         // Fall through.
       case ObjectLiteral::Property::COMPUTED:
         if (key->value()->IsInternalizedString()) {
           if (property->emit_store()) {
             VisitForAccumulatorValue(value);
-            __ mov(ecx, Immediate(key->value()));
-            __ mov(edx, Operand(esp, 0));
+            DCHECK(StoreDescriptor::ValueRegister().is(eax));
+            __ mov(StoreDescriptor::NameRegister(), Immediate(key->value()));
+            __ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, 0));
             CallStoreIC(key->LiteralFeedbackId());
             PrepareForBailoutForId(key->id(), NO_REGISTERS);
           } else {
@@ -1648,7 +1667,7 @@
         VisitForStackValue(key);
         VisitForStackValue(value);
         if (property->emit_store()) {
-          __ push(Immediate(Smi::FromInt(NONE)));  // PropertyAttributes
+          __ push(Immediate(Smi::FromInt(SLOPPY)));  // Strict mode
           __ CallRuntime(Runtime::kSetProperty, 4);
         } else {
           __ Drop(3);
@@ -1682,11 +1701,11 @@
     EmitAccessor(it->second->getter);
     EmitAccessor(it->second->setter);
     __ push(Immediate(Smi::FromInt(NONE)));
-    __ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5);
+    __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
   }
 
   if (expr->has_function()) {
-    ASSERT(result_saved);
+    DCHECK(result_saved);
     __ push(Operand(esp, 0));
     __ CallRuntime(Runtime::kToFastProperties, 1);
   }
@@ -1710,7 +1729,7 @@
   ZoneList<Expression*>* subexprs = expr->values();
   int length = subexprs->length();
   Handle<FixedArray> constant_elements = expr->constant_elements();
-  ASSERT_EQ(2, constant_elements->length());
+  DCHECK_EQ(2, constant_elements->length());
   ElementsKind constant_elements_kind =
       static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
   bool has_constant_fast_elements =
@@ -1731,7 +1750,7 @@
     __ push(Immediate(Smi::FromInt(expr->literal_index())));
     __ push(Immediate(constant_elements));
     __ push(Immediate(Smi::FromInt(flags)));
-    __ CallRuntime(Runtime::kHiddenCreateArrayLiteral, 4);
+    __ CallRuntime(Runtime::kCreateArrayLiteral, 4);
   } else {
     __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
     __ mov(eax, FieldOperand(ebx, JSFunction::kLiteralsOffset));
@@ -1791,7 +1810,7 @@
 
 
 void FullCodeGenerator::VisitAssignment(Assignment* expr) {
-  ASSERT(expr->target()->IsValidReferenceExpression());
+  DCHECK(expr->target()->IsValidReferenceExpression());
 
   Comment cmnt(masm_, "[ Assignment");
 
@@ -1813,9 +1832,9 @@
       break;
     case NAMED_PROPERTY:
       if (expr->is_compound()) {
-        // We need the receiver both on the stack and in edx.
+        // We need the receiver both on the stack and in the register.
         VisitForStackValue(property->obj());
-        __ mov(edx, Operand(esp, 0));
+        __ mov(LoadDescriptor::ReceiverRegister(), Operand(esp, 0));
       } else {
         VisitForStackValue(property->obj());
       }
@@ -1824,8 +1843,8 @@
       if (expr->is_compound()) {
         VisitForStackValue(property->obj());
         VisitForStackValue(property->key());
-        __ mov(edx, Operand(esp, kPointerSize));  // Object.
-        __ mov(ecx, Operand(esp, 0));             // Key.
+        __ mov(LoadDescriptor::ReceiverRegister(), Operand(esp, kPointerSize));
+        __ mov(LoadDescriptor::NameRegister(), Operand(esp, 0));
       } else {
         VisitForStackValue(property->obj());
         VisitForStackValue(property->key());
@@ -1907,12 +1926,12 @@
   VisitForStackValue(expr->expression());
 
   switch (expr->yield_kind()) {
-    case Yield::SUSPEND:
+    case Yield::kSuspend:
       // Pop value from top-of-stack slot; box result into result register.
       EmitCreateIteratorResult(false);
       __ push(result_register());
       // Fall through.
-    case Yield::INITIAL: {
+    case Yield::kInitial: {
       Label suspend, continuation, post_runtime, resume;
 
       __ jmp(&suspend);
@@ -1922,7 +1941,7 @@
 
       __ bind(&suspend);
       VisitForAccumulatorValue(expr->generator_object());
-      ASSERT(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
+      DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
       __ mov(FieldOperand(eax, JSGeneratorObject::kContinuationOffset),
              Immediate(Smi::FromInt(continuation.pos())));
       __ mov(FieldOperand(eax, JSGeneratorObject::kContextOffset), esi);
@@ -1933,7 +1952,7 @@
       __ cmp(esp, ebx);
       __ j(equal, &post_runtime);
       __ push(eax);  // generator object
-      __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1);
+      __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
       __ mov(context_register(),
              Operand(ebp, StandardFrameConstants::kContextOffset));
       __ bind(&post_runtime);
@@ -1945,7 +1964,7 @@
       break;
     }
 
-    case Yield::FINAL: {
+    case Yield::kFinal: {
       VisitForAccumulatorValue(expr->generator_object());
       __ mov(FieldOperand(result_register(),
                           JSGeneratorObject::kContinuationOffset),
@@ -1957,7 +1976,7 @@
       break;
     }
 
-    case Yield::DELEGATING: {
+    case Yield::kDelegating: {
       VisitForStackValue(expr->generator_object());
 
       // Initial stack layout is as follows:
@@ -1966,6 +1985,9 @@
 
       Label l_catch, l_try, l_suspend, l_continuation, l_resume;
       Label l_next, l_call, l_loop;
+      Register load_receiver = LoadDescriptor::ReceiverRegister();
+      Register load_name = LoadDescriptor::NameRegister();
+
       // Initial send value is undefined.
       __ mov(eax, isolate()->factory()->undefined_value());
       __ jmp(&l_next);
@@ -1973,10 +1995,10 @@
       // catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; }
       __ bind(&l_catch);
       handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos()));
-      __ mov(ecx, isolate()->factory()->throw_string());  // "throw"
-      __ push(ecx);                                      // "throw"
-      __ push(Operand(esp, 2 * kPointerSize));           // iter
-      __ push(eax);                                      // exception
+      __ mov(load_name, isolate()->factory()->throw_string());  // "throw"
+      __ push(load_name);                                       // "throw"
+      __ push(Operand(esp, 2 * kPointerSize));                  // iter
+      __ push(eax);                                             // exception
       __ jmp(&l_call);
 
       // try { received = %yield result }
@@ -1994,14 +2016,14 @@
       const int generator_object_depth = kPointerSize + handler_size;
       __ mov(eax, Operand(esp, generator_object_depth));
       __ push(eax);                                      // g
-      ASSERT(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
+      DCHECK(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
       __ mov(FieldOperand(eax, JSGeneratorObject::kContinuationOffset),
              Immediate(Smi::FromInt(l_continuation.pos())));
       __ mov(FieldOperand(eax, JSGeneratorObject::kContextOffset), esi);
       __ mov(ecx, esi);
       __ RecordWriteField(eax, JSGeneratorObject::kContextOffset, ecx, edx,
                           kDontSaveFPRegs);
-      __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1);
+      __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
       __ mov(context_register(),
              Operand(ebp, StandardFrameConstants::kContextOffset));
       __ pop(eax);                                       // result
@@ -2011,15 +2033,20 @@
 
       // receiver = iter; f = iter.next; arg = received;
       __ bind(&l_next);
-      __ mov(ecx, isolate()->factory()->next_string());  // "next"
-      __ push(ecx);
-      __ push(Operand(esp, 2 * kPointerSize));           // iter
-      __ push(eax);                                      // received
+
+      __ mov(load_name, isolate()->factory()->next_string());
+      __ push(load_name);                           // "next"
+      __ push(Operand(esp, 2 * kPointerSize));      // iter
+      __ push(eax);                                 // received
 
       // result = receiver[f](arg);
       __ bind(&l_call);
-      __ mov(edx, Operand(esp, kPointerSize));
-      Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+      __ mov(load_receiver, Operand(esp, kPointerSize));
+      if (FLAG_vector_ics) {
+        __ mov(VectorLoadICDescriptor::SlotRegister(),
+               Immediate(Smi::FromInt(expr->KeyedLoadFeedbackSlot())));
+      }
+      Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
       CallIC(ic, TypeFeedbackId::None());
       __ mov(edi, eax);
       __ mov(Operand(esp, 2 * kPointerSize), edi);
@@ -2032,8 +2059,13 @@
       // if (!result.done) goto l_try;
       __ bind(&l_loop);
       __ push(eax);                                      // save result
-      __ mov(edx, eax);                                  // result
-      __ mov(ecx, isolate()->factory()->done_string());  // "done"
+      __ Move(load_receiver, eax);                       // result
+      __ mov(load_name,
+             isolate()->factory()->done_string());       // "done"
+      if (FLAG_vector_ics) {
+        __ mov(VectorLoadICDescriptor::SlotRegister(),
+               Immediate(Smi::FromInt(expr->DoneFeedbackSlot())));
+      }
       CallLoadIC(NOT_CONTEXTUAL);                        // result.done in eax
       Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
       CallIC(bool_ic);
@@ -2041,8 +2073,13 @@
       __ j(zero, &l_try);
 
       // result.value
-      __ pop(edx);                                        // result
-      __ mov(ecx, isolate()->factory()->value_string());  // "value"
+      __ pop(load_receiver);                              // result
+      __ mov(load_name,
+             isolate()->factory()->value_string());       // "value"
+      if (FLAG_vector_ics) {
+        __ mov(VectorLoadICDescriptor::SlotRegister(),
+               Immediate(Smi::FromInt(expr->ValueFeedbackSlot())));
+      }
       CallLoadIC(NOT_CONTEXTUAL);                         // result.value in eax
       context()->DropAndPlug(2, eax);                     // drop iter and g
       break;
@@ -2055,7 +2092,7 @@
     Expression *value,
     JSGeneratorObject::ResumeMode resume_mode) {
   // The value stays in eax, and is ultimately read by the resumed generator, as
-  // if CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject) returned it. Or it
+  // if CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it
   // is read to throw the value when the resumed generator is already closed.
   // ebx will hold the generator object until the activation has been resumed.
   VisitForStackValue(generator);
@@ -2135,7 +2172,7 @@
   __ push(ebx);
   __ push(result_register());
   __ Push(Smi::FromInt(resume_mode));
-  __ CallRuntime(Runtime::kHiddenResumeJSGeneratorObject, 3);
+  __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
   // Not reached: the runtime call returns elsewhere.
   __ Abort(kGeneratorFailedToResume);
 
@@ -2149,14 +2186,14 @@
   } else {
     // Throw the provided value.
     __ push(eax);
-    __ CallRuntime(Runtime::kHiddenThrow, 1);
+    __ CallRuntime(Runtime::kThrow, 1);
   }
   __ jmp(&done);
 
   // Throw error if we attempt to operate on a running generator.
   __ bind(&wrong_state);
   __ push(ebx);
-  __ CallRuntime(Runtime::kHiddenThrowGeneratorStateError, 1);
+  __ CallRuntime(Runtime::kThrowGeneratorStateError, 1);
 
   __ bind(&done);
   context()->Plug(result_register());
@@ -2174,7 +2211,7 @@
 
   __ bind(&gc_required);
   __ Push(Smi::FromInt(map->instance_size()));
-  __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1);
+  __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
   __ mov(context_register(),
          Operand(ebp, StandardFrameConstants::kContextOffset));
 
@@ -2182,7 +2219,7 @@
   __ mov(ebx, map);
   __ pop(ecx);
   __ mov(edx, isolate()->factory()->ToBoolean(done));
-  ASSERT_EQ(map->instance_size(), 5 * kPointerSize);
+  DCHECK_EQ(map->instance_size(), 5 * kPointerSize);
   __ mov(FieldOperand(eax, HeapObject::kMapOffset), ebx);
   __ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
          isolate()->factory()->empty_fixed_array());
@@ -2201,16 +2238,43 @@
 void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
   SetSourcePosition(prop->position());
   Literal* key = prop->key()->AsLiteral();
-  ASSERT(!key->value()->IsSmi());
-  __ mov(ecx, Immediate(key->value()));
-  CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId());
+  DCHECK(!key->value()->IsSmi());
+  __ mov(LoadDescriptor::NameRegister(), Immediate(key->value()));
+  if (FLAG_vector_ics) {
+    __ mov(VectorLoadICDescriptor::SlotRegister(),
+           Immediate(Smi::FromInt(prop->PropertyFeedbackSlot())));
+    CallLoadIC(NOT_CONTEXTUAL);
+  } else {
+    CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId());
+  }
+}
+
+
+void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
+  SetSourcePosition(prop->position());
+  Literal* key = prop->key()->AsLiteral();
+  DCHECK(!key->value()->IsSmi());
+  DCHECK(prop->IsSuperAccess());
+
+  SuperReference* super_ref = prop->obj()->AsSuperReference();
+  EmitLoadHomeObject(super_ref);
+  __ push(eax);
+  VisitForStackValue(super_ref->this_var());
+  __ push(Immediate(key->value()));
+  __ CallRuntime(Runtime::kLoadFromSuper, 3);
 }
 
 
 void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
   SetSourcePosition(prop->position());
-  Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
-  CallIC(ic, prop->PropertyFeedbackId());
+  Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
+  if (FLAG_vector_ics) {
+    __ mov(VectorLoadICDescriptor::SlotRegister(),
+           Immediate(Smi::FromInt(prop->PropertyFeedbackSlot())));
+    CallIC(ic);
+  } else {
+    CallIC(ic, prop->PropertyFeedbackId());
+  }
 }
 
 
@@ -2230,8 +2294,8 @@
 
   __ bind(&stub_call);
   __ mov(eax, ecx);
-  BinaryOpICStub stub(isolate(), op, mode);
-  CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId());
+  Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op, mode).code();
+  CallIC(code, expr->BinaryOperationFeedbackId());
   patch_site.EmitPatchInfo();
   __ jmp(&done, Label::kNear);
 
@@ -2313,16 +2377,16 @@
                                      Token::Value op,
                                      OverwriteMode mode) {
   __ pop(edx);
-  BinaryOpICStub stub(isolate(), op, mode);
+  Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op, mode).code();
   JumpPatchSite patch_site(masm_);    // unbound, signals no inlined smi code.
-  CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId());
+  CallIC(code, expr->BinaryOperationFeedbackId());
   patch_site.EmitPatchInfo();
   context()->Plug(eax);
 }
 
 
 void FullCodeGenerator::EmitAssignment(Expression* expr) {
-  ASSERT(expr->IsValidReferenceExpression());
+  DCHECK(expr->IsValidReferenceExpression());
 
   // Left-hand side can only be a property, a global or a (parameter or local)
   // slot.
@@ -2345,9 +2409,10 @@
     case NAMED_PROPERTY: {
       __ push(eax);  // Preserve value.
       VisitForAccumulatorValue(prop->obj());
-      __ mov(edx, eax);
-      __ pop(eax);  // Restore value.
-      __ mov(ecx, prop->key()->AsLiteral()->value());
+      __ Move(StoreDescriptor::ReceiverRegister(), eax);
+      __ pop(StoreDescriptor::ValueRegister());  // Restore value.
+      __ mov(StoreDescriptor::NameRegister(),
+             prop->key()->AsLiteral()->value());
       CallStoreIC();
       break;
     }
@@ -2355,12 +2420,11 @@
       __ push(eax);  // Preserve value.
       VisitForStackValue(prop->obj());
       VisitForAccumulatorValue(prop->key());
-      __ mov(ecx, eax);
-      __ pop(edx);  // Receiver.
-      __ pop(eax);  // Restore value.
-      Handle<Code> ic = strict_mode() == SLOPPY
-          ? isolate()->builtins()->KeyedStoreIC_Initialize()
-          : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+      __ Move(StoreDescriptor::NameRegister(), eax);
+      __ pop(StoreDescriptor::ReceiverRegister());  // Receiver.
+      __ pop(StoreDescriptor::ValueRegister());     // Restore value.
+      Handle<Code> ic =
+          CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
       CallIC(ic);
       break;
     }
@@ -2380,34 +2444,24 @@
 }
 
 
-void FullCodeGenerator::EmitCallStoreContextSlot(
-    Handle<String> name, StrictMode strict_mode) {
-  __ push(eax);  // Value.
-  __ push(esi);  // Context.
-  __ push(Immediate(name));
-  __ push(Immediate(Smi::FromInt(strict_mode)));
-  __ CallRuntime(Runtime::kHiddenStoreContextSlot, 4);
-}
-
-
 void FullCodeGenerator::EmitVariableAssignment(Variable* var,
                                                Token::Value op) {
   if (var->IsUnallocated()) {
     // Global var, const, or let.
-    __ mov(ecx, var->name());
-    __ mov(edx, GlobalObjectOperand());
+    __ mov(StoreDescriptor::NameRegister(), var->name());
+    __ mov(StoreDescriptor::ReceiverRegister(), GlobalObjectOperand());
     CallStoreIC();
 
   } else if (op == Token::INIT_CONST_LEGACY) {
     // Const initializers need a write barrier.
-    ASSERT(!var->IsParameter());  // No const parameters.
+    DCHECK(!var->IsParameter());  // No const parameters.
     if (var->IsLookupSlot()) {
       __ push(eax);
       __ push(esi);
       __ push(Immediate(var->name()));
-      __ CallRuntime(Runtime::kHiddenInitializeConstContextSlot, 3);
+      __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3);
     } else {
-      ASSERT(var->IsStackLocal() || var->IsContextSlot());
+      DCHECK(var->IsStackLocal() || var->IsContextSlot());
       Label skip;
       MemOperand location = VarOperand(var, ecx);
       __ mov(edx, location);
@@ -2419,28 +2473,30 @@
 
   } else if (var->mode() == LET && op != Token::INIT_LET) {
     // Non-initializing assignment to let variable needs a write barrier.
-    if (var->IsLookupSlot()) {
-      EmitCallStoreContextSlot(var->name(), strict_mode());
-    } else {
-      ASSERT(var->IsStackAllocated() || var->IsContextSlot());
-      Label assign;
-      MemOperand location = VarOperand(var, ecx);
-      __ mov(edx, location);
-      __ cmp(edx, isolate()->factory()->the_hole_value());
-      __ j(not_equal, &assign, Label::kNear);
-      __ push(Immediate(var->name()));
-      __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
-      __ bind(&assign);
-      EmitStoreToStackLocalOrContextSlot(var, location);
-    }
+    DCHECK(!var->IsLookupSlot());
+    DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+    Label assign;
+    MemOperand location = VarOperand(var, ecx);
+    __ mov(edx, location);
+    __ cmp(edx, isolate()->factory()->the_hole_value());
+    __ j(not_equal, &assign, Label::kNear);
+    __ push(Immediate(var->name()));
+    __ CallRuntime(Runtime::kThrowReferenceError, 1);
+    __ bind(&assign);
+    EmitStoreToStackLocalOrContextSlot(var, location);
 
   } else if (!var->is_const_mode() || op == Token::INIT_CONST) {
-    // Assignment to var or initializing assignment to let/const
-    // in harmony mode.
     if (var->IsLookupSlot()) {
-      EmitCallStoreContextSlot(var->name(), strict_mode());
+      // Assignment to var.
+      __ push(eax);  // Value.
+      __ push(esi);  // Context.
+      __ push(Immediate(var->name()));
+      __ push(Immediate(Smi::FromInt(strict_mode())));
+      __ CallRuntime(Runtime::kStoreLookupSlot, 4);
     } else {
-      ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+      // Assignment to var or initializing assignment to let/const in harmony
+      // mode.
+      DCHECK(var->IsStackAllocated() || var->IsContextSlot());
       MemOperand location = VarOperand(var, ecx);
       if (generate_debug_code_ && op == Token::INIT_LET) {
         // Check for an uninitialized let binding.
@@ -2461,13 +2517,13 @@
   // esp[0] : receiver
 
   Property* prop = expr->target()->AsProperty();
-  ASSERT(prop != NULL);
-  ASSERT(prop->key()->IsLiteral());
+  DCHECK(prop != NULL);
+  DCHECK(prop->key()->IsLiteral());
 
   // Record source code position before IC call.
   SetSourcePosition(expr->position());
-  __ mov(ecx, prop->key()->AsLiteral()->value());
-  __ pop(edx);
+  __ mov(StoreDescriptor::NameRegister(), prop->key()->AsLiteral()->value());
+  __ pop(StoreDescriptor::ReceiverRegister());
   CallStoreIC(expr->AssignmentFeedbackId());
   PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
   context()->Plug(eax);
@@ -2480,13 +2536,12 @@
   // esp[0]            : key
   // esp[kPointerSize] : receiver
 
-  __ pop(ecx);  // Key.
-  __ pop(edx);
+  __ pop(StoreDescriptor::NameRegister());  // Key.
+  __ pop(StoreDescriptor::ReceiverRegister());
+  DCHECK(StoreDescriptor::ValueRegister().is(eax));
   // Record source code position before IC call.
   SetSourcePosition(expr->position());
-  Handle<Code> ic = strict_mode() == SLOPPY
-      ? isolate()->builtins()->KeyedStoreIC_Initialize()
-      : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+  Handle<Code> ic = CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
   CallIC(ic, expr->AssignmentFeedbackId());
 
   PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@@ -2499,16 +2554,20 @@
   Expression* key = expr->key();
 
   if (key->IsPropertyName()) {
-    VisitForAccumulatorValue(expr->obj());
-    __ mov(edx, result_register());
-    EmitNamedPropertyLoad(expr);
+    if (!expr->IsSuperAccess()) {
+      VisitForAccumulatorValue(expr->obj());
+      __ Move(LoadDescriptor::ReceiverRegister(), result_register());
+      EmitNamedPropertyLoad(expr);
+    } else {
+      EmitNamedSuperPropertyLoad(expr);
+    }
     PrepareForBailoutForId(expr->LoadId(), TOS_REG);
     context()->Plug(eax);
   } else {
     VisitForStackValue(expr->obj());
     VisitForAccumulatorValue(expr->key());
-    __ pop(edx);                     // Object.
-    __ mov(ecx, result_register());  // Key.
+    __ pop(LoadDescriptor::ReceiverRegister());                  // Object.
+    __ Move(LoadDescriptor::NameRegister(), result_register());  // Key.
     EmitKeyedPropertyLoad(expr);
     context()->Plug(eax);
   }
@@ -2526,11 +2585,10 @@
 void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
   Expression* callee = expr->expression();
 
-  CallIC::CallType call_type = callee->IsVariableProxy()
-      ? CallIC::FUNCTION
-      : CallIC::METHOD;
+  CallICState::CallType call_type =
+      callee->IsVariableProxy() ? CallICState::FUNCTION : CallICState::METHOD;
   // Get the target function.
-  if (call_type == CallIC::FUNCTION) {
+  if (call_type == CallICState::FUNCTION) {
     { StackValueContext context(this);
       EmitVariableLoad(callee->AsVariableProxy());
       PrepareForBailout(callee, NO_REGISTERS);
@@ -2540,8 +2598,9 @@
     __ push(Immediate(isolate()->factory()->undefined_value()));
   } else {
     // Load the function from the receiver.
-    ASSERT(callee->IsProperty());
-    __ mov(edx, Operand(esp, 0));
+    DCHECK(callee->IsProperty());
+    DCHECK(!callee->AsProperty()->IsSuperAccess());
+    __ mov(LoadDescriptor::ReceiverRegister(), Operand(esp, 0));
     EmitNamedPropertyLoad(callee->AsProperty());
     PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
     // Push the target function under the receiver.
@@ -2553,6 +2612,42 @@
 }
 
 
+void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
+  Expression* callee = expr->expression();
+  DCHECK(callee->IsProperty());
+  Property* prop = callee->AsProperty();
+  DCHECK(prop->IsSuperAccess());
+
+  SetSourcePosition(prop->position());
+  Literal* key = prop->key()->AsLiteral();
+  DCHECK(!key->value()->IsSmi());
+  // Load the function from the receiver.
+  SuperReference* super_ref = callee->AsProperty()->obj()->AsSuperReference();
+  EmitLoadHomeObject(super_ref);
+  __ push(eax);
+  VisitForAccumulatorValue(super_ref->this_var());
+  __ push(eax);
+  __ push(Operand(esp, kPointerSize));
+  __ push(eax);
+  __ push(Immediate(key->value()));
+  // Stack here:
+  //  - home_object
+  //  - this (receiver)
+  //  - home_object <-- LoadFromSuper will pop here and below.
+  //  - this (receiver)
+  //  - key
+  __ CallRuntime(Runtime::kLoadFromSuper, 3);
+
+  // Replace home_object with target function.
+  __ mov(Operand(esp, kPointerSize), eax);
+
+  // Stack here:
+  // - target function
+  // - this (receiver)
+  EmitCall(expr, CallICState::METHOD);
+}
+
+
 // Code common for calls using the IC.
 void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
                                                 Expression* key) {
@@ -2562,10 +2657,9 @@
   Expression* callee = expr->expression();
 
   // Load the function from the receiver.
-  ASSERT(callee->IsProperty());
-  __ mov(edx, Operand(esp, 0));
-  // Move the key into the right register for the keyed load IC.
-  __ mov(ecx, eax);
+  DCHECK(callee->IsProperty());
+  __ mov(LoadDescriptor::ReceiverRegister(), Operand(esp, 0));
+  __ mov(LoadDescriptor::NameRegister(), eax);
   EmitKeyedPropertyLoad(callee->AsProperty());
   PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
 
@@ -2573,11 +2667,11 @@
   __ push(Operand(esp, 0));
   __ mov(Operand(esp, kPointerSize), eax);
 
-  EmitCall(expr, CallIC::METHOD);
+  EmitCall(expr, CallICState::METHOD);
 }
 
 
-void FullCodeGenerator::EmitCall(Call* expr, CallIC::CallType call_type) {
+void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
   // Load the arguments.
   ZoneList<Expression*>* args = expr->arguments();
   int arg_count = args->length();
@@ -2614,6 +2708,8 @@
     __ push(Immediate(isolate()->factory()->undefined_value()));
   }
 
+  // Push the enclosing function.
+  __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
   // Push the receiver of the enclosing function.
   __ push(Operand(ebp, (2 + info_->scope()->num_parameters()) * kPointerSize));
   // Push the language mode.
@@ -2623,7 +2719,7 @@
   __ push(Immediate(Smi::FromInt(scope()->start_position())));
 
   // Do the runtime call.
-  __ CallRuntime(Runtime::kHiddenResolvePossiblyDirectEval, 5);
+  __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 6);
 }
 
 
@@ -2683,14 +2779,14 @@
     { PreservePositionScope scope(masm()->positions_recorder());
       // Generate code for loading from variables potentially shadowed by
       // eval-introduced variables.
-      EmitDynamicLookupFastCase(proxy->var(), NOT_INSIDE_TYPEOF, &slow, &done);
+      EmitDynamicLookupFastCase(proxy, NOT_INSIDE_TYPEOF, &slow, &done);
     }
     __ bind(&slow);
     // Call the runtime to find the function to call (returned in eax) and
     // the object holding it (returned in edx).
     __ push(context_register());
     __ push(Immediate(proxy->name()));
-    __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2);
+    __ CallRuntime(Runtime::kLoadLookupSlot, 2);
     __ push(eax);  // Function.
     __ push(edx);  // Receiver.
 
@@ -2714,17 +2810,23 @@
 
   } else if (call_type == Call::PROPERTY_CALL) {
     Property* property = callee->AsProperty();
-    { PreservePositionScope scope(masm()->positions_recorder());
-      VisitForStackValue(property->obj());
-    }
-    if (property->key()->IsPropertyName()) {
-      EmitCallWithLoadIC(expr);
+    bool is_named_call = property->key()->IsPropertyName();
+    // super.x() is handled in EmitCallWithLoadIC.
+    if (property->IsSuperAccess() && is_named_call) {
+      EmitSuperCallWithLoadIC(expr);
     } else {
-      EmitKeyedCallWithLoadIC(expr, property->key());
+      {
+        PreservePositionScope scope(masm()->positions_recorder());
+        VisitForStackValue(property->obj());
+      }
+      if (is_named_call) {
+        EmitCallWithLoadIC(expr);
+      } else {
+        EmitKeyedCallWithLoadIC(expr, property->key());
+      }
     }
-
   } else {
-    ASSERT(call_type == Call::OTHER_CALL);
+    DCHECK(call_type == Call::OTHER_CALL);
     // Call to an arbitrary expression not handled specially above.
     { PreservePositionScope scope(masm()->positions_recorder());
       VisitForStackValue(callee);
@@ -2736,7 +2838,7 @@
 
 #ifdef DEBUG
   // RecordJSReturnSite should have been called.
-  ASSERT(expr->return_is_recorded_);
+  DCHECK(expr->return_is_recorded_);
 #endif
 }
 
@@ -2770,7 +2872,7 @@
   // Record call targets in unoptimized code.
   if (FLAG_pretenuring_call_new) {
     EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot());
-    ASSERT(expr->AllocationSiteFeedbackSlot() ==
+    DCHECK(expr->AllocationSiteFeedbackSlot() ==
            expr->CallNewFeedbackSlot() + 1);
   }
 
@@ -2786,7 +2888,7 @@
 
 void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -2807,7 +2909,7 @@
 
 void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -2828,7 +2930,7 @@
 
 void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -2860,7 +2962,7 @@
 
 void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -2882,7 +2984,7 @@
 
 void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -2907,7 +3009,7 @@
 void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
     CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -2997,7 +3099,7 @@
 
 void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -3019,7 +3121,7 @@
 
 void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -3047,7 +3149,7 @@
 
 void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -3069,7 +3171,7 @@
 
 void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -3091,7 +3193,7 @@
 
 
 void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
-  ASSERT(expr->arguments()->length() == 0);
+  DCHECK(expr->arguments()->length() == 0);
 
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
@@ -3123,7 +3225,7 @@
 
 void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 2);
+  DCHECK(args->length() == 2);
 
   // Load the two objects into registers and perform the comparison.
   VisitForStackValue(args->at(0));
@@ -3147,7 +3249,7 @@
 
 void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   // ArgumentsAccessStub expects the key in edx and the formal
   // parameter count in eax.
@@ -3161,7 +3263,7 @@
 
 
 void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
-  ASSERT(expr->arguments()->length() == 0);
+  DCHECK(expr->arguments()->length() == 0);
 
   Label exit;
   // Get the number of formal parameters.
@@ -3185,7 +3287,7 @@
 
 void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
   Label done, null, function, non_function_constructor;
 
   VisitForAccumulatorValue(args->at(0));
@@ -3225,7 +3327,7 @@
 
   // Functions have class 'Function'.
   __ bind(&function);
-  __ mov(eax, isolate()->factory()->function_class_string());
+  __ mov(eax, isolate()->factory()->Function_string());
   __ jmp(&done);
 
   // Objects with a non-function constructor have class 'Object'.
@@ -3248,7 +3350,7 @@
   // Load the arguments on the stack and call the stub.
   SubStringStub stub(isolate());
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 3);
+  DCHECK(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
   VisitForStackValue(args->at(2));
@@ -3261,7 +3363,7 @@
   // Load the arguments on the stack and call the stub.
   RegExpExecStub stub(isolate());
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 4);
+  DCHECK(args->length() == 4);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
   VisitForStackValue(args->at(2));
@@ -3273,7 +3375,7 @@
 
 void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));  // Load the object.
 
@@ -3292,8 +3394,8 @@
 
 void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 2);
-  ASSERT_NE(NULL, args->at(1)->AsLiteral());
+  DCHECK(args->length() == 2);
+  DCHECK_NE(NULL, args->at(1)->AsLiteral());
   Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value()));
 
   VisitForAccumulatorValue(args->at(0));  // Load the object.
@@ -3329,7 +3431,7 @@
   }
 
   __ bind(&not_date_object);
-  __ CallRuntime(Runtime::kHiddenThrowNotDateError, 0);
+  __ CallRuntime(Runtime::kThrowNotDateError, 0);
   __ bind(&done);
   context()->Plug(result);
 }
@@ -3337,15 +3439,15 @@
 
 void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT_EQ(3, args->length());
+  DCHECK_EQ(3, args->length());
 
   Register string = eax;
   Register index = ebx;
   Register value = ecx;
 
-  VisitForStackValue(args->at(1));  // index
-  VisitForStackValue(args->at(2));  // value
-  VisitForAccumulatorValue(args->at(0));  // string
+  VisitForStackValue(args->at(0));        // index
+  VisitForStackValue(args->at(1));        // value
+  VisitForAccumulatorValue(args->at(2));  // string
 
   __ pop(value);
   __ pop(index);
@@ -3373,15 +3475,15 @@
 
 void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT_EQ(3, args->length());
+  DCHECK_EQ(3, args->length());
 
   Register string = eax;
   Register index = ebx;
   Register value = ecx;
 
-  VisitForStackValue(args->at(1));  // index
-  VisitForStackValue(args->at(2));  // value
-  VisitForAccumulatorValue(args->at(0));  // string
+  VisitForStackValue(args->at(0));        // index
+  VisitForStackValue(args->at(1));        // value
+  VisitForAccumulatorValue(args->at(2));  // string
   __ pop(value);
   __ pop(index);
 
@@ -3407,7 +3509,7 @@
 void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
   // Load the arguments on the stack and call the runtime function.
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 2);
+  DCHECK(args->length() == 2);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
 
@@ -3419,7 +3521,7 @@
 
 void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 2);
+  DCHECK(args->length() == 2);
 
   VisitForStackValue(args->at(0));  // Load the object.
   VisitForAccumulatorValue(args->at(1));  // Load the value.
@@ -3448,7 +3550,7 @@
 
 void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT_EQ(args->length(), 1);
+  DCHECK_EQ(args->length(), 1);
 
   // Load the argument into eax and call the stub.
   VisitForAccumulatorValue(args->at(0));
@@ -3461,7 +3563,7 @@
 
 void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -3480,7 +3582,7 @@
 
 void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 2);
+  DCHECK(args->length() == 2);
 
   VisitForStackValue(args->at(0));
   VisitForAccumulatorValue(args->at(1));
@@ -3526,7 +3628,7 @@
 
 void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 2);
+  DCHECK(args->length() == 2);
 
   VisitForStackValue(args->at(0));
   VisitForAccumulatorValue(args->at(1));
@@ -3574,7 +3676,7 @@
 
 void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT_EQ(2, args->length());
+  DCHECK_EQ(2, args->length());
   VisitForStackValue(args->at(0));
   VisitForAccumulatorValue(args->at(1));
 
@@ -3587,7 +3689,7 @@
 
 void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT_EQ(2, args->length());
+  DCHECK_EQ(2, args->length());
 
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
@@ -3600,7 +3702,7 @@
 
 void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() >= 2);
+  DCHECK(args->length() >= 2);
 
   int arg_count = args->length() - 2;  // 2 ~ receiver and function.
   for (int i = 0; i < arg_count + 1; ++i) {
@@ -3634,7 +3736,7 @@
   // Load the arguments on the stack and call the stub.
   RegExpConstructResultStub stub(isolate());
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 3);
+  DCHECK(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
   VisitForAccumulatorValue(args->at(2));
@@ -3647,9 +3749,9 @@
 
 void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT_EQ(2, args->length());
+  DCHECK_EQ(2, args->length());
 
-  ASSERT_NE(NULL, args->at(0)->AsLiteral());
+  DCHECK_NE(NULL, args->at(0)->AsLiteral());
   int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value();
 
   Handle<FixedArray> jsfunction_result_caches(
@@ -3687,7 +3789,7 @@
   // Call runtime to perform the lookup.
   __ push(cache);
   __ push(key);
-  __ CallRuntime(Runtime::kHiddenGetFromCache, 2);
+  __ CallRuntime(Runtime::kGetFromCache, 2);
 
   __ bind(&done);
   context()->Plug(eax);
@@ -3696,7 +3798,7 @@
 
 void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -3720,7 +3822,7 @@
 
 void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
   VisitForAccumulatorValue(args->at(0));
 
   __ AssertString(eax);
@@ -3732,13 +3834,13 @@
 }
 
 
-void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
+void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
   Label bailout, done, one_char_separator, long_separator,
       non_trivial_array, not_size_one_array, loop,
       loop_1, loop_1_condition, loop_2, loop_2_entry, loop_3, loop_3_entry;
 
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 2);
+  DCHECK(args->length() == 2);
   // We will leave the separator on the stack until the end of the function.
   VisitForStackValue(args->at(1));
   // Load this to eax (= array)
@@ -3790,7 +3892,7 @@
   array = no_reg;
 
 
-  // Check that all array elements are sequential ASCII strings, and
+  // Check that all array elements are sequential one-byte strings, and
   // accumulate the sum of their lengths, as a smi-encoded value.
   __ Move(index, Immediate(0));
   __ Move(string_length, Immediate(0));
@@ -3799,7 +3901,7 @@
   //                      scratch, string_length, elements.
   if (generate_debug_code_) {
     __ cmp(index, array_length);
-    __ Assert(less, kNoEmptyArraysHereInEmitFastAsciiArrayJoin);
+    __ Assert(less, kNoEmptyArraysHereInEmitFastOneByteArrayJoin);
   }
   __ bind(&loop);
   __ mov(string, FieldOperand(elements,
@@ -3837,7 +3939,7 @@
   // string_length: Sum of string lengths, as a smi.
   // elements: FixedArray of strings.
 
-  // Check that the separator is a flat ASCII string.
+  // Check that the separator is a flat one-byte string.
   __ mov(string, separator_operand);
   __ JumpIfSmi(string, &bailout);
   __ mov(scratch, FieldOperand(string, HeapObject::kMapOffset));
@@ -3861,8 +3963,8 @@
   // Live registers and stack values:
   //   string_length
   //   elements
-  __ AllocateAsciiString(result_pos, string_length, scratch,
-                         index, string, &bailout);
+  __ AllocateOneByteString(result_pos, string_length, scratch, index, string,
+                           &bailout);
   __ mov(result_operand, result_pos);
   __ lea(result_pos, FieldOperand(result_pos, SeqOneByteString::kHeaderSize));
 
@@ -3905,7 +4007,7 @@
 
   // One-character separator case
   __ bind(&one_char_separator);
-  // Replace separator with its ASCII character value.
+  // Replace separator with its one-byte character value.
   __ mov_b(scratch, FieldOperand(string, SeqOneByteString::kHeaderSize));
   __ mov_b(separator_operand, scratch);
 
@@ -3996,6 +4098,16 @@
 }
 
 
+void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
+  DCHECK(expr->arguments()->length() == 0);
+  ExternalReference debug_is_active =
+      ExternalReference::debug_is_active_address(isolate());
+  __ movzx_b(eax, Operand::StaticVariable(debug_is_active));
+  __ SmiTag(eax);
+  context()->Plug(eax);
+}
+
+
 void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
   if (expr->function() != NULL &&
       expr->function()->intrinsic_type == Runtime::INLINE) {
@@ -4013,9 +4125,15 @@
     __ push(FieldOperand(eax, GlobalObject::kBuiltinsOffset));
 
     // Load the function from the receiver.
-    __ mov(edx, Operand(esp, 0));
-    __ mov(ecx, Immediate(expr->name()));
-    CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId());
+    __ mov(LoadDescriptor::ReceiverRegister(), Operand(esp, 0));
+    __ mov(LoadDescriptor::NameRegister(), Immediate(expr->name()));
+    if (FLAG_vector_ics) {
+      __ mov(VectorLoadICDescriptor::SlotRegister(),
+             Immediate(Smi::FromInt(expr->CallRuntimeFeedbackSlot())));
+      CallLoadIC(NOT_CONTEXTUAL);
+    } else {
+      CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId());
+    }
 
     // Push the target function under the receiver.
     __ push(Operand(esp, 0));
@@ -4069,7 +4187,7 @@
         Variable* var = proxy->var();
         // Delete of an unqualified identifier is disallowed in strict mode
         // but "delete this" is allowed.
-        ASSERT(strict_mode() == SLOPPY || var->is_this());
+        DCHECK(strict_mode() == SLOPPY || var->is_this());
         if (var->IsUnallocated()) {
           __ push(GlobalObjectOperand());
           __ push(Immediate(var->name()));
@@ -4086,7 +4204,7 @@
           // context where the variable was introduced.
           __ push(context_register());
           __ push(Immediate(var->name()));
-          __ CallRuntime(Runtime::kHiddenDeleteContextSlot, 2);
+          __ CallRuntime(Runtime::kDeleteLookupSlot, 2);
           context()->Plug(eax);
         }
       } else {
@@ -4124,7 +4242,7 @@
         // for control and plugging the control flow into the context,
         // because we need to prepare a pair of extra administrative AST ids
         // for the optimizing compiler.
-        ASSERT(context()->IsAccumulatorValue() || context()->IsStackValue());
+        DCHECK(context()->IsAccumulatorValue() || context()->IsStackValue());
         Label materialize_true, materialize_false, done;
         VisitForControl(expr->expression(),
                         &materialize_false,
@@ -4167,7 +4285,7 @@
 
 
 void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
-  ASSERT(expr->expression()->IsValidReferenceExpression());
+  DCHECK(expr->expression()->IsValidReferenceExpression());
 
   Comment cmnt(masm_, "[ CountOperation");
   SetSourcePosition(expr->position());
@@ -4186,7 +4304,7 @@
 
   // Evaluate expression and get value.
   if (assign_type == VARIABLE) {
-    ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
+    DCHECK(expr->expression()->AsVariableProxy()->var() != NULL);
     AccumulatorValueContext context(this);
     EmitVariableLoad(expr->expression()->AsVariableProxy());
   } else {
@@ -4195,16 +4313,16 @@
       __ push(Immediate(Smi::FromInt(0)));
     }
     if (assign_type == NAMED_PROPERTY) {
-      // Put the object both on the stack and in edx.
-      VisitForAccumulatorValue(prop->obj());
-      __ push(eax);
-      __ mov(edx, eax);
+      // Put the object both on the stack and in the register.
+      VisitForStackValue(prop->obj());
+      __ mov(LoadDescriptor::ReceiverRegister(), Operand(esp, 0));
       EmitNamedPropertyLoad(prop);
     } else {
       VisitForStackValue(prop->obj());
       VisitForStackValue(prop->key());
-      __ mov(edx, Operand(esp, kPointerSize));  // Object.
-      __ mov(ecx, Operand(esp, 0));             // Key.
+      __ mov(LoadDescriptor::ReceiverRegister(),
+             Operand(esp, kPointerSize));                       // Object.
+      __ mov(LoadDescriptor::NameRegister(), Operand(esp, 0));  // Key.
       EmitKeyedPropertyLoad(prop);
     }
   }
@@ -4289,8 +4407,9 @@
   __ bind(&stub_call);
   __ mov(edx, eax);
   __ mov(eax, Immediate(Smi::FromInt(1)));
-  BinaryOpICStub stub(isolate(), expr->binary_op(), NO_OVERWRITE);
-  CallIC(stub.GetCode(), expr->CountBinOpFeedbackId());
+  Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), expr->binary_op(),
+                                              NO_OVERWRITE).code();
+  CallIC(code, expr->CountBinOpFeedbackId());
   patch_site.EmitPatchInfo();
   __ bind(&done);
 
@@ -4319,8 +4438,9 @@
       }
       break;
     case NAMED_PROPERTY: {
-      __ mov(ecx, prop->key()->AsLiteral()->value());
-      __ pop(edx);
+      __ mov(StoreDescriptor::NameRegister(),
+             prop->key()->AsLiteral()->value());
+      __ pop(StoreDescriptor::ReceiverRegister());
       CallStoreIC(expr->CountStoreFeedbackId());
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
       if (expr->is_postfix()) {
@@ -4333,11 +4453,10 @@
       break;
     }
     case KEYED_PROPERTY: {
-      __ pop(ecx);
-      __ pop(edx);
-      Handle<Code> ic = strict_mode() == SLOPPY
-          ? isolate()->builtins()->KeyedStoreIC_Initialize()
-          : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+      __ pop(StoreDescriptor::NameRegister());
+      __ pop(StoreDescriptor::ReceiverRegister());
+      Handle<Code> ic =
+          CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
       CallIC(ic, expr->CountStoreFeedbackId());
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
       if (expr->is_postfix()) {
@@ -4356,13 +4475,17 @@
 
 void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
   VariableProxy* proxy = expr->AsVariableProxy();
-  ASSERT(!context()->IsEffect());
-  ASSERT(!context()->IsTest());
+  DCHECK(!context()->IsEffect());
+  DCHECK(!context()->IsTest());
 
   if (proxy != NULL && proxy->var()->IsUnallocated()) {
     Comment cmnt(masm_, "[ Global variable");
-    __ mov(edx, GlobalObjectOperand());
-    __ mov(ecx, Immediate(proxy->name()));
+    __ mov(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+    __ mov(LoadDescriptor::NameRegister(), Immediate(proxy->name()));
+    if (FLAG_vector_ics) {
+      __ mov(VectorLoadICDescriptor::SlotRegister(),
+             Immediate(Smi::FromInt(proxy->VariableFeedbackSlot())));
+    }
     // Use a regular load, not a contextual load, to avoid a reference
     // error.
     CallLoadIC(NOT_CONTEXTUAL);
@@ -4374,12 +4497,12 @@
 
     // Generate code for loading from variables potentially shadowed
     // by eval-introduced variables.
-    EmitDynamicLookupFastCase(proxy->var(), INSIDE_TYPEOF, &slow, &done);
+    EmitDynamicLookupFastCase(proxy, INSIDE_TYPEOF, &slow, &done);
 
     __ bind(&slow);
     __ push(esi);
     __ push(Immediate(proxy->name()));
-    __ CallRuntime(Runtime::kHiddenLoadContextSlotNoReferenceError, 2);
+    __ CallRuntime(Runtime::kLoadLookupSlotNoReferenceError, 2);
     PrepareForBailout(expr, TOS_REG);
     __ bind(&done);
 
@@ -4429,10 +4552,6 @@
     __ j(equal, if_true);
     __ cmp(eax, isolate()->factory()->false_value());
     Split(equal, if_true, if_false, fall_through);
-  } else if (FLAG_harmony_typeof &&
-             String::Equals(check, factory->null_string())) {
-    __ cmp(eax, isolate()->factory()->null_value());
-    Split(equal, if_true, if_false, fall_through);
   } else if (String::Equals(check, factory->undefined_string())) {
     __ cmp(eax, isolate()->factory()->undefined_value());
     __ j(equal, if_true);
@@ -4451,10 +4570,8 @@
     Split(equal, if_true, if_false, fall_through);
   } else if (String::Equals(check, factory->object_string())) {
     __ JumpIfSmi(eax, if_false);
-    if (!FLAG_harmony_typeof) {
-      __ cmp(eax, isolate()->factory()->null_value());
-      __ j(equal, if_true);
-    }
+    __ cmp(eax, isolate()->factory()->null_value());
+    __ j(equal, if_true);
     __ CmpObjectType(eax, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, edx);
     __ j(below, if_false);
     __ CmpInstanceType(edx, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
@@ -4528,7 +4645,7 @@
 
       // Record position and call the compare IC.
       SetSourcePosition(expr->position());
-      Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+      Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
       CallIC(ic, expr->CompareOperationFeedbackId());
       patch_site.EmitPatchInfo();
 
@@ -4590,7 +4707,7 @@
 
 
 void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
-  ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
+  DCHECK_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
   __ mov(Operand(ebp, frame_offset), value);
 }
 
@@ -4615,7 +4732,7 @@
     // Fetch it from the context.
     __ push(ContextOperand(esi, Context::CLOSURE_INDEX));
   } else {
-    ASSERT(declaration_scope->is_function_scope());
+    DCHECK(declaration_scope->is_function_scope());
     __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
   }
 }
@@ -4626,7 +4743,7 @@
 
 void FullCodeGenerator::EnterFinallyBlock() {
   // Cook return address on top of stack (smi encoded Code* delta)
-  ASSERT(!result_register().is(edx));
+  DCHECK(!result_register().is(edx));
   __ pop(edx);
   __ sub(edx, Immediate(masm_->CodeObject()));
   STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
@@ -4657,7 +4774,7 @@
 
 
 void FullCodeGenerator::ExitFinallyBlock() {
-  ASSERT(!result_register().is(edx));
+  DCHECK(!result_register().is(edx));
   // Restore pending message from stack.
   __ pop(edx);
   ExternalReference pending_message_script =
@@ -4768,25 +4885,25 @@
     Address pc) {
   Address call_target_address = pc - kIntSize;
   Address jns_instr_address = call_target_address - 3;
-  ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
+  DCHECK_EQ(kCallInstruction, *(call_target_address - 1));
 
   if (*jns_instr_address == kJnsInstruction) {
-    ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
-    ASSERT_EQ(isolate->builtins()->InterruptCheck()->entry(),
+    DCHECK_EQ(kJnsOffset, *(call_target_address - 2));
+    DCHECK_EQ(isolate->builtins()->InterruptCheck()->entry(),
               Assembler::target_address_at(call_target_address,
                                            unoptimized_code));
     return INTERRUPT;
   }
 
-  ASSERT_EQ(kNopByteOne, *jns_instr_address);
-  ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
+  DCHECK_EQ(kNopByteOne, *jns_instr_address);
+  DCHECK_EQ(kNopByteTwo, *(call_target_address - 2));
 
   if (Assembler::target_address_at(call_target_address, unoptimized_code) ==
       isolate->builtins()->OnStackReplacement()->entry()) {
     return ON_STACK_REPLACEMENT;
   }
 
-  ASSERT_EQ(isolate->builtins()->OsrAfterStackCheck()->entry(),
+  DCHECK_EQ(isolate->builtins()->OsrAfterStackCheck()->entry(),
             Assembler::target_address_at(call_target_address,
                                          unoptimized_code));
   return OSR_AFTER_STACK_CHECK;
diff --git a/src/ia32/ic-ia32.cc b/src/ia32/ic-ia32.cc
deleted file mode 100644
index b0e4ca0..0000000
--- a/src/ia32/ic-ia32.cc
+++ /dev/null
@@ -1,1290 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#if V8_TARGET_ARCH_IA32
-
-#include "src/codegen.h"
-#include "src/ic-inl.h"
-#include "src/runtime.h"
-#include "src/stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-// ----------------------------------------------------------------------------
-// Static IC stub generators.
-//
-
-#define __ ACCESS_MASM(masm)
-
-
-static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
-                                            Register type,
-                                            Label* global_object) {
-  // Register usage:
-  //   type: holds the receiver instance type on entry.
-  __ cmp(type, JS_GLOBAL_OBJECT_TYPE);
-  __ j(equal, global_object);
-  __ cmp(type, JS_BUILTINS_OBJECT_TYPE);
-  __ j(equal, global_object);
-  __ cmp(type, JS_GLOBAL_PROXY_TYPE);
-  __ j(equal, global_object);
-}
-
-
-// Generated code falls through if the receiver is a regular non-global
-// JS object with slow properties and no interceptors.
-static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm,
-                                                Register receiver,
-                                                Register r0,
-                                                Register r1,
-                                                Label* miss) {
-  // Register usage:
-  //   receiver: holds the receiver on entry and is unchanged.
-  //   r0: used to hold receiver instance type.
-  //       Holds the property dictionary on fall through.
-  //   r1: used to hold receivers map.
-
-  // Check that the receiver isn't a smi.
-  __ JumpIfSmi(receiver, miss);
-
-  // Check that the receiver is a valid JS object.
-  __ mov(r1, FieldOperand(receiver, HeapObject::kMapOffset));
-  __ movzx_b(r0, FieldOperand(r1, Map::kInstanceTypeOffset));
-  __ cmp(r0, FIRST_SPEC_OBJECT_TYPE);
-  __ j(below, miss);
-
-  // If this assert fails, we have to check upper bound too.
-  STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
-
-  GenerateGlobalInstanceTypeCheck(masm, r0, miss);
-
-  // Check for non-global object that requires access check.
-  __ test_b(FieldOperand(r1, Map::kBitFieldOffset),
-            (1 << Map::kIsAccessCheckNeeded) |
-            (1 << Map::kHasNamedInterceptor));
-  __ j(not_zero, miss);
-
-  __ mov(r0, FieldOperand(receiver, JSObject::kPropertiesOffset));
-  __ CheckMap(r0, masm->isolate()->factory()->hash_table_map(), miss,
-              DONT_DO_SMI_CHECK);
-}
-
-
-// Helper function used to load a property from a dictionary backing
-// storage. This function may fail to load a property even though it is
-// in the dictionary, so code at miss_label must always call a backup
-// property load that is complete. This function is safe to call if
-// name is not internalized, and will jump to the miss_label in that
-// case. The generated code assumes that the receiver has slow
-// properties, is not a global object and does not have interceptors.
-static void GenerateDictionaryLoad(MacroAssembler* masm,
-                                   Label* miss_label,
-                                   Register elements,
-                                   Register name,
-                                   Register r0,
-                                   Register r1,
-                                   Register result) {
-  // Register use:
-  //
-  // elements - holds the property dictionary on entry and is unchanged.
-  //
-  // name - holds the name of the property on entry and is unchanged.
-  //
-  // Scratch registers:
-  //
-  // r0   - used for the index into the property dictionary
-  //
-  // r1   - used to hold the capacity of the property dictionary.
-  //
-  // result - holds the result on exit.
-
-  Label done;
-
-  // Probe the dictionary.
-  NameDictionaryLookupStub::GeneratePositiveLookup(masm,
-                                                   miss_label,
-                                                   &done,
-                                                   elements,
-                                                   name,
-                                                   r0,
-                                                   r1);
-
-  // If probing finds an entry in the dictionary, r0 contains the
-  // index into the dictionary. Check that the value is a normal
-  // property.
-  __ bind(&done);
-  const int kElementsStartOffset =
-      NameDictionary::kHeaderSize +
-      NameDictionary::kElementsStartIndex * kPointerSize;
-  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
-  __ test(Operand(elements, r0, times_4, kDetailsOffset - kHeapObjectTag),
-          Immediate(PropertyDetails::TypeField::kMask << kSmiTagSize));
-  __ j(not_zero, miss_label);
-
-  // Get the value at the masked, scaled index.
-  const int kValueOffset = kElementsStartOffset + kPointerSize;
-  __ mov(result, Operand(elements, r0, times_4, kValueOffset - kHeapObjectTag));
-}
-
-
-// Helper function used to store a property to a dictionary backing
-// storage. This function may fail to store a property eventhough it
-// is in the dictionary, so code at miss_label must always call a
-// backup property store that is complete. This function is safe to
-// call if name is not internalized, and will jump to the miss_label in
-// that case. The generated code assumes that the receiver has slow
-// properties, is not a global object and does not have interceptors.
-static void GenerateDictionaryStore(MacroAssembler* masm,
-                                    Label* miss_label,
-                                    Register elements,
-                                    Register name,
-                                    Register value,
-                                    Register r0,
-                                    Register r1) {
-  // Register use:
-  //
-  // elements - holds the property dictionary on entry and is clobbered.
-  //
-  // name - holds the name of the property on entry and is unchanged.
-  //
-  // value - holds the value to store and is unchanged.
-  //
-  // r0 - used for index into the property dictionary and is clobbered.
-  //
-  // r1 - used to hold the capacity of the property dictionary and is clobbered.
-  Label done;
-
-
-  // Probe the dictionary.
-  NameDictionaryLookupStub::GeneratePositiveLookup(masm,
-                                                   miss_label,
-                                                   &done,
-                                                   elements,
-                                                   name,
-                                                   r0,
-                                                   r1);
-
-  // If probing finds an entry in the dictionary, r0 contains the
-  // index into the dictionary. Check that the value is a normal
-  // property that is not read only.
-  __ bind(&done);
-  const int kElementsStartOffset =
-      NameDictionary::kHeaderSize +
-      NameDictionary::kElementsStartIndex * kPointerSize;
-  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
-  const int kTypeAndReadOnlyMask =
-      (PropertyDetails::TypeField::kMask |
-       PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
-  __ test(Operand(elements, r0, times_4, kDetailsOffset - kHeapObjectTag),
-          Immediate(kTypeAndReadOnlyMask));
-  __ j(not_zero, miss_label);
-
-  // Store the value at the masked, scaled index.
-  const int kValueOffset = kElementsStartOffset + kPointerSize;
-  __ lea(r0, Operand(elements, r0, times_4, kValueOffset - kHeapObjectTag));
-  __ mov(Operand(r0, 0), value);
-
-  // Update write barrier. Make sure not to clobber the value.
-  __ mov(r1, value);
-  __ RecordWrite(elements, r0, r1, kDontSaveFPRegs);
-}
-
-
-// Checks the receiver for special cases (value type, slow case bits).
-// Falls through for regular JS object.
-static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
-                                           Register receiver,
-                                           Register map,
-                                           int interceptor_bit,
-                                           Label* slow) {
-  // Register use:
-  //   receiver - holds the receiver and is unchanged.
-  // Scratch registers:
-  //   map - used to hold the map of the receiver.
-
-  // Check that the object isn't a smi.
-  __ JumpIfSmi(receiver, slow);
-
-  // Get the map of the receiver.
-  __ mov(map, FieldOperand(receiver, HeapObject::kMapOffset));
-
-  // Check bit field.
-  __ test_b(FieldOperand(map, Map::kBitFieldOffset),
-            (1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit));
-  __ j(not_zero, slow);
-  // Check that the object is some kind of JS object EXCEPT JS Value type.
-  // In the case that the object is a value-wrapper object,
-  // we enter the runtime system to make sure that indexing
-  // into string objects works as intended.
-  ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
-
-  __ CmpInstanceType(map, JS_OBJECT_TYPE);
-  __ j(below, slow);
-}
-
-
-// Loads an indexed element from a fast case array.
-// If not_fast_array is NULL, doesn't perform the elements map check.
-static void GenerateFastArrayLoad(MacroAssembler* masm,
-                                  Register receiver,
-                                  Register key,
-                                  Register scratch,
-                                  Register result,
-                                  Label* not_fast_array,
-                                  Label* out_of_range) {
-  // Register use:
-  //   receiver - holds the receiver and is unchanged.
-  //   key - holds the key and is unchanged (must be a smi).
-  // Scratch registers:
-  //   scratch - used to hold elements of the receiver and the loaded value.
-  //   result - holds the result on exit if the load succeeds and
-  //            we fall through.
-
-  __ mov(scratch, FieldOperand(receiver, JSObject::kElementsOffset));
-  if (not_fast_array != NULL) {
-    // Check that the object is in fast mode and writable.
-    __ CheckMap(scratch,
-                masm->isolate()->factory()->fixed_array_map(),
-                not_fast_array,
-                DONT_DO_SMI_CHECK);
-  } else {
-    __ AssertFastElements(scratch);
-  }
-  // Check that the key (index) is within bounds.
-  __ cmp(key, FieldOperand(scratch, FixedArray::kLengthOffset));
-  __ j(above_equal, out_of_range);
-  // Fast case: Do the load.
-  STATIC_ASSERT((kPointerSize == 4) && (kSmiTagSize == 1) && (kSmiTag == 0));
-  __ mov(scratch, FieldOperand(scratch, key, times_2, FixedArray::kHeaderSize));
-  __ cmp(scratch, Immediate(masm->isolate()->factory()->the_hole_value()));
-  // In case the loaded value is the_hole we have to consult GetProperty
-  // to ensure the prototype chain is searched.
-  __ j(equal, out_of_range);
-  if (!result.is(scratch)) {
-    __ mov(result, scratch);
-  }
-}
-
-
-// Checks whether a key is an array index string or a unique name.
-// Falls through if the key is a unique name.
-static void GenerateKeyNameCheck(MacroAssembler* masm,
-                                 Register key,
-                                 Register map,
-                                 Register hash,
-                                 Label* index_string,
-                                 Label* not_unique) {
-  // Register use:
-  //   key - holds the key and is unchanged. Assumed to be non-smi.
-  // Scratch registers:
-  //   map - used to hold the map of the key.
-  //   hash - used to hold the hash of the key.
-  Label unique;
-  __ CmpObjectType(key, LAST_UNIQUE_NAME_TYPE, map);
-  __ j(above, not_unique);
-  STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
-  __ j(equal, &unique);
-
-  // Is the string an array index, with cached numeric value?
-  __ mov(hash, FieldOperand(key, Name::kHashFieldOffset));
-  __ test(hash, Immediate(Name::kContainsCachedArrayIndexMask));
-  __ j(zero, index_string);
-
-  // Is the string internalized? We already know it's a string so a single
-  // bit test is enough.
-  STATIC_ASSERT(kNotInternalizedTag != 0);
-  __ test_b(FieldOperand(map, Map::kInstanceTypeOffset),
-            kIsNotInternalizedMask);
-  __ j(not_zero, not_unique);
-
-  __ bind(&unique);
-}
-
-
-static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
-                                             Register object,
-                                             Register key,
-                                             Register scratch1,
-                                             Register scratch2,
-                                             Label* unmapped_case,
-                                             Label* slow_case) {
-  Heap* heap = masm->isolate()->heap();
-  Factory* factory = masm->isolate()->factory();
-
-  // Check that the receiver is a JSObject. Because of the elements
-  // map check later, we do not need to check for interceptors or
-  // whether it requires access checks.
-  __ JumpIfSmi(object, slow_case);
-  // Check that the object is some kind of JSObject.
-  __ CmpObjectType(object, FIRST_JS_RECEIVER_TYPE, scratch1);
-  __ j(below, slow_case);
-
-  // Check that the key is a positive smi.
-  __ test(key, Immediate(0x80000001));
-  __ j(not_zero, slow_case);
-
-  // Load the elements into scratch1 and check its map.
-  Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
-  __ mov(scratch1, FieldOperand(object, JSObject::kElementsOffset));
-  __ CheckMap(scratch1, arguments_map, slow_case, DONT_DO_SMI_CHECK);
-
-  // Check if element is in the range of mapped arguments. If not, jump
-  // to the unmapped lookup with the parameter map in scratch1.
-  __ mov(scratch2, FieldOperand(scratch1, FixedArray::kLengthOffset));
-  __ sub(scratch2, Immediate(Smi::FromInt(2)));
-  __ cmp(key, scratch2);
-  __ j(above_equal, unmapped_case);
-
-  // Load element index and check whether it is the hole.
-  const int kHeaderSize = FixedArray::kHeaderSize + 2 * kPointerSize;
-  __ mov(scratch2, FieldOperand(scratch1,
-                                key,
-                                times_half_pointer_size,
-                                kHeaderSize));
-  __ cmp(scratch2, factory->the_hole_value());
-  __ j(equal, unmapped_case);
-
-  // Load value from context and return it. We can reuse scratch1 because
-  // we do not jump to the unmapped lookup (which requires the parameter
-  // map in scratch1).
-  const int kContextOffset = FixedArray::kHeaderSize;
-  __ mov(scratch1, FieldOperand(scratch1, kContextOffset));
-  return FieldOperand(scratch1,
-                      scratch2,
-                      times_half_pointer_size,
-                      Context::kHeaderSize);
-}
-
-
-static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
-                                               Register key,
-                                               Register parameter_map,
-                                               Register scratch,
-                                               Label* slow_case) {
-  // Element is in arguments backing store, which is referenced by the
-  // second element of the parameter_map.
-  const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
-  Register backing_store = parameter_map;
-  __ mov(backing_store, FieldOperand(parameter_map, kBackingStoreOffset));
-  Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
-  __ CheckMap(backing_store, fixed_array_map, slow_case, DONT_DO_SMI_CHECK);
-  __ mov(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset));
-  __ cmp(key, scratch);
-  __ j(greater_equal, slow_case);
-  return FieldOperand(backing_store,
-                      key,
-                      times_half_pointer_size,
-                      FixedArray::kHeaderSize);
-}
-
-
-void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- ecx    : key
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
-  Label slow, check_name, index_smi, index_name, property_array_property;
-  Label probe_dictionary, check_number_dictionary;
-
-  // Check that the key is a smi.
-  __ JumpIfNotSmi(ecx, &check_name);
-  __ bind(&index_smi);
-  // Now the key is known to be a smi. This place is also jumped to from
-  // where a numeric string is converted to a smi.
-
-  GenerateKeyedLoadReceiverCheck(
-      masm, edx, eax, Map::kHasIndexedInterceptor, &slow);
-
-  // Check the receiver's map to see if it has fast elements.
-  __ CheckFastElements(eax, &check_number_dictionary);
-
-  GenerateFastArrayLoad(masm, edx, ecx, eax, eax, NULL, &slow);
-  Isolate* isolate = masm->isolate();
-  Counters* counters = isolate->counters();
-  __ IncrementCounter(counters->keyed_load_generic_smi(), 1);
-  __ ret(0);
-
-  __ bind(&check_number_dictionary);
-  __ mov(ebx, ecx);
-  __ SmiUntag(ebx);
-  __ mov(eax, FieldOperand(edx, JSObject::kElementsOffset));
-
-  // Check whether the elements is a number dictionary.
-  // edx: receiver
-  // ebx: untagged index
-  // ecx: key
-  // eax: elements
-  __ CheckMap(eax,
-              isolate->factory()->hash_table_map(),
-              &slow,
-              DONT_DO_SMI_CHECK);
-  Label slow_pop_receiver;
-  // Push receiver on the stack to free up a register for the dictionary
-  // probing.
-  __ push(edx);
-  __ LoadFromNumberDictionary(&slow_pop_receiver, eax, ecx, ebx, edx, edi, eax);
-  // Pop receiver before returning.
-  __ pop(edx);
-  __ ret(0);
-
-  __ bind(&slow_pop_receiver);
-  // Pop the receiver from the stack and jump to runtime.
-  __ pop(edx);
-
-  __ bind(&slow);
-  // Slow case: jump to runtime.
-  // edx: receiver
-  // ecx: key
-  __ IncrementCounter(counters->keyed_load_generic_slow(), 1);
-  GenerateRuntimeGetProperty(masm);
-
-  __ bind(&check_name);
-  GenerateKeyNameCheck(masm, ecx, eax, ebx, &index_name, &slow);
-
-  GenerateKeyedLoadReceiverCheck(
-      masm, edx, eax, Map::kHasNamedInterceptor, &slow);
-
-  // If the receiver is a fast-case object, check the keyed lookup
-  // cache. Otherwise probe the dictionary.
-  __ mov(ebx, FieldOperand(edx, JSObject::kPropertiesOffset));
-  __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
-         Immediate(isolate->factory()->hash_table_map()));
-  __ j(equal, &probe_dictionary);
-
-  // The receiver's map is still in eax, compute the keyed lookup cache hash
-  // based on 32 bits of the map pointer and the string hash.
-  if (FLAG_debug_code) {
-    __ cmp(eax, FieldOperand(edx, HeapObject::kMapOffset));
-    __ Check(equal, kMapIsNoLongerInEax);
-  }
-  __ mov(ebx, eax);  // Keep the map around for later.
-  __ shr(eax, KeyedLookupCache::kMapHashShift);
-  __ mov(edi, FieldOperand(ecx, String::kHashFieldOffset));
-  __ shr(edi, String::kHashShift);
-  __ xor_(eax, edi);
-  __ and_(eax, KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask);
-
-  // Load the key (consisting of map and internalized string) from the cache and
-  // check for match.
-  Label load_in_object_property;
-  static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
-  Label hit_on_nth_entry[kEntriesPerBucket];
-  ExternalReference cache_keys =
-      ExternalReference::keyed_lookup_cache_keys(masm->isolate());
-
-  for (int i = 0; i < kEntriesPerBucket - 1; i++) {
-    Label try_next_entry;
-    __ mov(edi, eax);
-    __ shl(edi, kPointerSizeLog2 + 1);
-    if (i != 0) {
-      __ add(edi, Immediate(kPointerSize * i * 2));
-    }
-    __ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys));
-    __ j(not_equal, &try_next_entry);
-    __ add(edi, Immediate(kPointerSize));
-    __ cmp(ecx, Operand::StaticArray(edi, times_1, cache_keys));
-    __ j(equal, &hit_on_nth_entry[i]);
-    __ bind(&try_next_entry);
-  }
-
-  __ lea(edi, Operand(eax, 1));
-  __ shl(edi, kPointerSizeLog2 + 1);
-  __ add(edi, Immediate(kPointerSize * (kEntriesPerBucket - 1) * 2));
-  __ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys));
-  __ j(not_equal, &slow);
-  __ add(edi, Immediate(kPointerSize));
-  __ cmp(ecx, Operand::StaticArray(edi, times_1, cache_keys));
-  __ j(not_equal, &slow);
-
-  // Get field offset.
-  // edx     : receiver
-  // ebx     : receiver's map
-  // ecx     : key
-  // eax     : lookup cache index
-  ExternalReference cache_field_offsets =
-      ExternalReference::keyed_lookup_cache_field_offsets(masm->isolate());
-
-  // Hit on nth entry.
-  for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
-    __ bind(&hit_on_nth_entry[i]);
-    if (i != 0) {
-      __ add(eax, Immediate(i));
-    }
-    __ mov(edi,
-           Operand::StaticArray(eax, times_pointer_size, cache_field_offsets));
-    __ movzx_b(eax, FieldOperand(ebx, Map::kInObjectPropertiesOffset));
-    __ sub(edi, eax);
-    __ j(above_equal, &property_array_property);
-    if (i != 0) {
-      __ jmp(&load_in_object_property);
-    }
-  }
-
-  // Load in-object property.
-  __ bind(&load_in_object_property);
-  __ movzx_b(eax, FieldOperand(ebx, Map::kInstanceSizeOffset));
-  __ add(eax, edi);
-  __ mov(eax, FieldOperand(edx, eax, times_pointer_size, 0));
-  __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
-  __ ret(0);
-
-  // Load property array property.
-  __ bind(&property_array_property);
-  __ mov(eax, FieldOperand(edx, JSObject::kPropertiesOffset));
-  __ mov(eax, FieldOperand(eax, edi, times_pointer_size,
-                           FixedArray::kHeaderSize));
-  __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
-  __ ret(0);
-
-  // Do a quick inline probe of the receiver's dictionary, if it
-  // exists.
-  __ bind(&probe_dictionary);
-
-  __ mov(eax, FieldOperand(edx, JSObject::kMapOffset));
-  __ movzx_b(eax, FieldOperand(eax, Map::kInstanceTypeOffset));
-  GenerateGlobalInstanceTypeCheck(masm, eax, &slow);
-
-  GenerateDictionaryLoad(masm, &slow, ebx, ecx, eax, edi, eax);
-  __ IncrementCounter(counters->keyed_load_generic_symbol(), 1);
-  __ ret(0);
-
-  __ bind(&index_name);
-  __ IndexFromHash(ebx, ecx);
-  // Now jump to the place where smi keys are handled.
-  __ jmp(&index_smi);
-}
-
-
-void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- ecx    : key (index)
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
-  Label miss;
-
-  Register receiver = edx;
-  Register index = ecx;
-  Register scratch = ebx;
-  Register result = eax;
-
-  StringCharAtGenerator char_at_generator(receiver,
-                                          index,
-                                          scratch,
-                                          result,
-                                          &miss,  // When not a string.
-                                          &miss,  // When not a number.
-                                          &miss,  // When index out of range.
-                                          STRING_INDEX_IS_ARRAY_INDEX);
-  char_at_generator.GenerateFast(masm);
-  __ ret(0);
-
-  StubRuntimeCallHelper call_helper;
-  char_at_generator.GenerateSlow(masm, call_helper);
-
-  __ bind(&miss);
-  GenerateMiss(masm);
-}
-
-
-void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- ecx    : key
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
-  Label slow;
-
-  // Check that the receiver isn't a smi.
-  __ JumpIfSmi(edx, &slow);
-
-  // Check that the key is an array index, that is Uint32.
-  __ test(ecx, Immediate(kSmiTagMask | kSmiSignMask));
-  __ j(not_zero, &slow);
-
-  // Get the map of the receiver.
-  __ mov(eax, FieldOperand(edx, HeapObject::kMapOffset));
-
-  // Check that it has indexed interceptor and access checks
-  // are not enabled for this object.
-  __ movzx_b(eax, FieldOperand(eax, Map::kBitFieldOffset));
-  __ and_(eax, Immediate(kSlowCaseBitFieldMask));
-  __ cmp(eax, Immediate(1 << Map::kHasIndexedInterceptor));
-  __ j(not_zero, &slow);
-
-  // Everything is fine, call runtime.
-  __ pop(eax);
-  __ push(edx);  // receiver
-  __ push(ecx);  // key
-  __ push(eax);  // return address
-
-  // Perform tail call to the entry.
-  ExternalReference ref =
-      ExternalReference(IC_Utility(kKeyedLoadPropertyWithInterceptor),
-                        masm->isolate());
-  __ TailCallExternalReference(ref, 2, 1);
-
-  __ bind(&slow);
-  GenerateMiss(masm);
-}
-
-
-void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- ecx    : key
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
-  Label slow, notin;
-  Factory* factory = masm->isolate()->factory();
-  Operand mapped_location =
-      GenerateMappedArgumentsLookup(masm, edx, ecx, ebx, eax, &notin, &slow);
-  __ mov(eax, mapped_location);
-  __ Ret();
-  __ bind(&notin);
-  // The unmapped lookup expects that the parameter map is in ebx.
-  Operand unmapped_location =
-      GenerateUnmappedArgumentsLookup(masm, ecx, ebx, eax, &slow);
-  __ cmp(unmapped_location, factory->the_hole_value());
-  __ j(equal, &slow);
-  __ mov(eax, unmapped_location);
-  __ Ret();
-  __ bind(&slow);
-  GenerateMiss(masm);
-}
-
-
-void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- eax    : value
-  //  -- ecx    : key
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
-  Label slow, notin;
-  Operand mapped_location =
-      GenerateMappedArgumentsLookup(masm, edx, ecx, ebx, edi, &notin, &slow);
-  __ mov(mapped_location, eax);
-  __ lea(ecx, mapped_location);
-  __ mov(edx, eax);
-  __ RecordWrite(ebx, ecx, edx, kDontSaveFPRegs);
-  __ Ret();
-  __ bind(&notin);
-  // The unmapped lookup expects that the parameter map is in ebx.
-  Operand unmapped_location =
-      GenerateUnmappedArgumentsLookup(masm, ecx, ebx, edi, &slow);
-  __ mov(unmapped_location, eax);
-  __ lea(edi, unmapped_location);
-  __ mov(edx, eax);
-  __ RecordWrite(ebx, edi, edx, kDontSaveFPRegs);
-  __ Ret();
-  __ bind(&slow);
-  GenerateMiss(masm);
-}
-
-
-static void KeyedStoreGenerateGenericHelper(
-    MacroAssembler* masm,
-    Label* fast_object,
-    Label* fast_double,
-    Label* slow,
-    KeyedStoreCheckMap check_map,
-    KeyedStoreIncrementLength increment_length) {
-  Label transition_smi_elements;
-  Label finish_object_store, non_double_value, transition_double_elements;
-  Label fast_double_without_map_check;
-  // eax: value
-  // ecx: key (a smi)
-  // edx: receiver
-  // ebx: FixedArray receiver->elements
-  // edi: receiver map
-  // Fast case: Do the store, could either Object or double.
-  __ bind(fast_object);
-  if (check_map == kCheckMap) {
-    __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
-    __ cmp(edi, masm->isolate()->factory()->fixed_array_map());
-    __ j(not_equal, fast_double);
-  }
-
-  // HOLECHECK: guards "A[i] = V"
-  // We have to go to the runtime if the current value is the hole because
-  // there may be a callback on the element
-  Label holecheck_passed1;
-  __ cmp(FixedArrayElementOperand(ebx, ecx),
-         masm->isolate()->factory()->the_hole_value());
-  __ j(not_equal, &holecheck_passed1);
-  __ JumpIfDictionaryInPrototypeChain(edx, ebx, edi, slow);
-  __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
-
-  __ bind(&holecheck_passed1);
-
-  // Smi stores don't require further checks.
-  Label non_smi_value;
-  __ JumpIfNotSmi(eax, &non_smi_value);
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ add(FieldOperand(edx, JSArray::kLengthOffset),
-           Immediate(Smi::FromInt(1)));
-  }
-  // It's irrelevant whether array is smi-only or not when writing a smi.
-  __ mov(FixedArrayElementOperand(ebx, ecx), eax);
-  __ ret(0);
-
-  __ bind(&non_smi_value);
-  // Escape to elements kind transition case.
-  __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
-  __ CheckFastObjectElements(edi, &transition_smi_elements);
-
-  // Fast elements array, store the value to the elements backing store.
-  __ bind(&finish_object_store);
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ add(FieldOperand(edx, JSArray::kLengthOffset),
-           Immediate(Smi::FromInt(1)));
-  }
-  __ mov(FixedArrayElementOperand(ebx, ecx), eax);
-  // Update write barrier for the elements array address.
-  __ mov(edx, eax);  // Preserve the value which is returned.
-  __ RecordWriteArray(
-      ebx, edx, ecx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-  __ ret(0);
-
-  __ bind(fast_double);
-  if (check_map == kCheckMap) {
-    // Check for fast double array case. If this fails, call through to the
-    // runtime.
-    __ cmp(edi, masm->isolate()->factory()->fixed_double_array_map());
-    __ j(not_equal, slow);
-    // If the value is a number, store it as a double in the FastDoubleElements
-    // array.
-  }
-
-  // HOLECHECK: guards "A[i] double hole?"
-  // We have to see if the double version of the hole is present. If so
-  // go to the runtime.
-  uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
-  __ cmp(FieldOperand(ebx, ecx, times_4, offset), Immediate(kHoleNanUpper32));
-  __ j(not_equal, &fast_double_without_map_check);
-  __ JumpIfDictionaryInPrototypeChain(edx, ebx, edi, slow);
-  __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
-
-  __ bind(&fast_double_without_map_check);
-  __ StoreNumberToDoubleElements(eax, ebx, ecx, edi, xmm0,
-                                 &transition_double_elements);
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ add(FieldOperand(edx, JSArray::kLengthOffset),
-           Immediate(Smi::FromInt(1)));
-  }
-  __ ret(0);
-
-  __ bind(&transition_smi_elements);
-  __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
-
-  // Transition the array appropriately depending on the value type.
-  __ CheckMap(eax,
-              masm->isolate()->factory()->heap_number_map(),
-              &non_double_value,
-              DONT_DO_SMI_CHECK);
-
-  // Value is a double. Transition FAST_SMI_ELEMENTS -> FAST_DOUBLE_ELEMENTS
-  // and complete the store.
-  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
-                                         FAST_DOUBLE_ELEMENTS,
-                                         ebx,
-                                         edi,
-                                         slow);
-  AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
-                                                    FAST_DOUBLE_ELEMENTS);
-  ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow);
-  __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
-  __ jmp(&fast_double_without_map_check);
-
-  __ bind(&non_double_value);
-  // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
-  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
-                                         FAST_ELEMENTS,
-                                         ebx,
-                                         edi,
-                                         slow);
-  mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
-  ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode,
-                                                                   slow);
-  __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
-  __ jmp(&finish_object_store);
-
-  __ bind(&transition_double_elements);
-  // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
-  // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
-  // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
-  __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
-  __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
-                                         FAST_ELEMENTS,
-                                         ebx,
-                                         edi,
-                                         slow);
-  mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
-  ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow);
-  __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
-  __ jmp(&finish_object_store);
-}
-
-
-void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
-                                   StrictMode strict_mode) {
-  // ----------- S t a t e -------------
-  //  -- eax    : value
-  //  -- ecx    : key
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
-  Label slow, fast_object, fast_object_grow;
-  Label fast_double, fast_double_grow;
-  Label array, extra, check_if_double_array;
-
-  // Check that the object isn't a smi.
-  __ JumpIfSmi(edx, &slow);
-  // Get the map from the receiver.
-  __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
-  // Check that the receiver does not require access checks and is not observed.
-  // The generic stub does not perform map checks or handle observed objects.
-  __ test_b(FieldOperand(edi, Map::kBitFieldOffset),
-            1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved);
-  __ j(not_zero, &slow);
-  // Check that the key is a smi.
-  __ JumpIfNotSmi(ecx, &slow);
-  __ CmpInstanceType(edi, JS_ARRAY_TYPE);
-  __ j(equal, &array);
-  // Check that the object is some kind of JSObject.
-  __ CmpInstanceType(edi, FIRST_JS_OBJECT_TYPE);
-  __ j(below, &slow);
-
-  // Object case: Check key against length in the elements array.
-  // eax: value
-  // edx: JSObject
-  // ecx: key (a smi)
-  // edi: receiver map
-  __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
-  // Check array bounds. Both the key and the length of FixedArray are smis.
-  __ cmp(ecx, FieldOperand(ebx, FixedArray::kLengthOffset));
-  __ j(below, &fast_object);
-
-  // Slow case: call runtime.
-  __ bind(&slow);
-  GenerateRuntimeSetProperty(masm, strict_mode);
-
-  // Extra capacity case: Check if there is extra capacity to
-  // perform the store and update the length. Used for adding one
-  // element to the array by writing to array[array.length].
-  __ bind(&extra);
-  // eax: value
-  // edx: receiver, a JSArray
-  // ecx: key, a smi.
-  // ebx: receiver->elements, a FixedArray
-  // edi: receiver map
-  // flags: compare (ecx, edx.length())
-  // do not leave holes in the array:
-  __ j(not_equal, &slow);
-  __ cmp(ecx, FieldOperand(ebx, FixedArray::kLengthOffset));
-  __ j(above_equal, &slow);
-  __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
-  __ cmp(edi, masm->isolate()->factory()->fixed_array_map());
-  __ j(not_equal, &check_if_double_array);
-  __ jmp(&fast_object_grow);
-
-  __ bind(&check_if_double_array);
-  __ cmp(edi, masm->isolate()->factory()->fixed_double_array_map());
-  __ j(not_equal, &slow);
-  __ jmp(&fast_double_grow);
-
-  // Array case: Get the length and the elements array from the JS
-  // array. Check that the array is in fast mode (and writable); if it
-  // is the length is always a smi.
-  __ bind(&array);
-  // eax: value
-  // edx: receiver, a JSArray
-  // ecx: key, a smi.
-  // edi: receiver map
-  __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
-
-  // Check the key against the length in the array and fall through to the
-  // common store code.
-  __ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset));  // Compare smis.
-  __ j(above_equal, &extra);
-
-  KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double,
-                                  &slow, kCheckMap, kDontIncrementLength);
-  KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
-                                  &slow, kDontCheckMap, kIncrementLength);
-}
-
-
-void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- ecx    : name
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
-
-  // Probe the stub cache.
-  Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC);
-  masm->isolate()->stub_cache()->GenerateProbe(
-      masm, flags, edx, ecx, ebx, eax);
-
-  // Cache miss: Jump to runtime.
-  GenerateMiss(masm);
-}
-
-
-void LoadIC::GenerateNormal(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- ecx    : name
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
-  Label miss, slow;
-
-  GenerateNameDictionaryReceiverCheck(masm, edx, eax, ebx, &miss);
-
-  // eax: elements
-  // Search the dictionary placing the result in eax.
-  GenerateDictionaryLoad(masm, &slow, eax, ecx, edi, ebx, eax);
-  __ ret(0);
-
-  // Dictionary load failed, go slow (but don't miss).
-  __ bind(&slow);
-  GenerateRuntimeGetProperty(masm);
-
-  // Cache miss: Jump to runtime.
-  __ bind(&miss);
-  GenerateMiss(masm);
-}
-
-
-void LoadIC::GenerateMiss(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- ecx    : name
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
-
-  __ IncrementCounter(masm->isolate()->counters()->load_miss(), 1);
-
-  __ pop(ebx);
-  __ push(edx);  // receiver
-  __ push(ecx);  // name
-  __ push(ebx);  // return address
-
-  // Perform tail call to the entry.
-  ExternalReference ref =
-      ExternalReference(IC_Utility(kLoadIC_Miss), masm->isolate());
-  __ TailCallExternalReference(ref, 2, 1);
-}
-
-
-void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- ecx    : key
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
-
-  __ pop(ebx);
-  __ push(edx);  // receiver
-  __ push(ecx);  // name
-  __ push(ebx);  // return address
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
-}
-
-
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- ecx    : key
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
-
-  __ IncrementCounter(masm->isolate()->counters()->keyed_load_miss(), 1);
-
-  __ pop(ebx);
-  __ push(edx);  // receiver
-  __ push(ecx);  // name
-  __ push(ebx);  // return address
-
-  // Perform tail call to the entry.
-  ExternalReference ref =
-      ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate());
-  __ TailCallExternalReference(ref, 2, 1);
-}
-
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- ecx    : key
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
-
-  __ pop(ebx);
-  __ push(edx);  // receiver
-  __ push(ecx);  // name
-  __ push(ebx);  // return address
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
-}
-
-
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- eax    : value
-  //  -- ecx    : name
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
-  Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC);
-  masm->isolate()->stub_cache()->GenerateProbe(
-      masm, flags, edx, ecx, ebx, no_reg);
-
-  // Cache miss: Jump to runtime.
-  GenerateMiss(masm);
-}
-
-
-void StoreIC::GenerateMiss(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- eax    : value
-  //  -- ecx    : name
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
-
-  __ pop(ebx);
-  __ push(edx);
-  __ push(ecx);
-  __ push(eax);
-  __ push(ebx);
-
-  // Perform tail call to the entry.
-  ExternalReference ref =
-      ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
-  __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void StoreIC::GenerateNormal(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- eax    : value
-  //  -- ecx    : name
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
-
-  Label miss, restore_miss;
-
-  GenerateNameDictionaryReceiverCheck(masm, edx, ebx, edi, &miss);
-
-  // A lot of registers are needed for storing to slow case
-  // objects. Push and restore receiver but rely on
-  // GenerateDictionaryStore preserving the value and name.
-  __ push(edx);
-  GenerateDictionaryStore(masm, &restore_miss, ebx, ecx, eax, edx, edi);
-  __ Drop(1);
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->store_normal_hit(), 1);
-  __ ret(0);
-
-  __ bind(&restore_miss);
-  __ pop(edx);
-
-  __ bind(&miss);
-  __ IncrementCounter(counters->store_normal_miss(), 1);
-  GenerateMiss(masm);
-}
-
-
-void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
-                                         StrictMode strict_mode) {
-  // ----------- S t a t e -------------
-  //  -- eax    : value
-  //  -- ecx    : name
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
-  __ pop(ebx);
-  __ push(edx);
-  __ push(ecx);
-  __ push(eax);
-  __ push(Immediate(Smi::FromInt(NONE)));  // PropertyAttributes
-  __ push(Immediate(Smi::FromInt(strict_mode)));
-  __ push(ebx);  // return address
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
-}
-
-
-void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
-                                              StrictMode strict_mode) {
-  // ----------- S t a t e -------------
-  //  -- eax    : value
-  //  -- ecx    : key
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
-
-  __ pop(ebx);
-  __ push(edx);
-  __ push(ecx);
-  __ push(eax);
-  __ push(Immediate(Smi::FromInt(NONE)));         // PropertyAttributes
-  __ push(Immediate(Smi::FromInt(strict_mode)));  // Strict mode.
-  __ push(ebx);   // return address
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
-}
-
-
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- eax    : value
-  //  -- ecx    : key
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
-
-  __ pop(ebx);
-  __ push(edx);
-  __ push(ecx);
-  __ push(eax);
-  __ push(ebx);
-
-  // Do tail-call to runtime routine.
-  ExternalReference ref =
-      ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
-  __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void StoreIC::GenerateSlow(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- eax    : value
-  //  -- ecx    : key
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
-
-  __ pop(ebx);
-  __ push(edx);
-  __ push(ecx);
-  __ push(eax);
-  __ push(ebx);   // return address
-
-  // Do tail-call to runtime routine.
-  ExternalReference ref(IC_Utility(kStoreIC_Slow), masm->isolate());
-  __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- eax    : value
-  //  -- ecx    : key
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
-
-  __ pop(ebx);
-  __ push(edx);
-  __ push(ecx);
-  __ push(eax);
-  __ push(ebx);   // return address
-
-  // Do tail-call to runtime routine.
-  ExternalReference ref(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
-  __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-#undef __
-
-
-Condition CompareIC::ComputeCondition(Token::Value op) {
-  switch (op) {
-    case Token::EQ_STRICT:
-    case Token::EQ:
-      return equal;
-    case Token::LT:
-      return less;
-    case Token::GT:
-      return greater;
-    case Token::LTE:
-      return less_equal;
-    case Token::GTE:
-      return greater_equal;
-    default:
-      UNREACHABLE();
-      return no_condition;
-  }
-}
-
-
-bool CompareIC::HasInlinedSmiCode(Address address) {
-  // The address of the instruction following the call.
-  Address test_instruction_address =
-      address + Assembler::kCallTargetAddressOffset;
-
-  // If the instruction following the call is not a test al, nothing
-  // was inlined.
-  return *test_instruction_address == Assembler::kTestAlByte;
-}
-
-
-void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
-  // The address of the instruction following the call.
-  Address test_instruction_address =
-      address + Assembler::kCallTargetAddressOffset;
-
-  // If the instruction following the call is not a test al, nothing
-  // was inlined.
-  if (*test_instruction_address != Assembler::kTestAlByte) {
-    ASSERT(*test_instruction_address == Assembler::kNopByte);
-    return;
-  }
-
-  Address delta_address = test_instruction_address + 1;
-  // The delta to the start of the map check instruction and the
-  // condition code uses at the patched jump.
-  uint8_t delta = *reinterpret_cast<uint8_t*>(delta_address);
-  if (FLAG_trace_ic) {
-    PrintF("[  patching ic at %p, test=%p, delta=%d\n",
-           address, test_instruction_address, delta);
-  }
-
-  // Patch with a short conditional jump. Enabling means switching from a short
-  // jump-if-carry/not-carry to jump-if-zero/not-zero, whereas disabling is the
-  // reverse operation of that.
-  Address jmp_address = test_instruction_address - delta;
-  ASSERT((check == ENABLE_INLINED_SMI_CHECK)
-         ? (*jmp_address == Assembler::kJncShortOpcode ||
-            *jmp_address == Assembler::kJcShortOpcode)
-         : (*jmp_address == Assembler::kJnzShortOpcode ||
-            *jmp_address == Assembler::kJzShortOpcode));
-  Condition cc = (check == ENABLE_INLINED_SMI_CHECK)
-      ? (*jmp_address == Assembler::kJncShortOpcode ? not_zero : zero)
-      : (*jmp_address == Assembler::kJnzShortOpcode ? not_carry : carry);
-  *jmp_address = static_cast<byte>(Assembler::kJccShortPrefix | cc);
-}
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/interface-descriptors-ia32.cc b/src/ia32/interface-descriptors-ia32.cc
new file mode 100644
index 0000000..3a0d526
--- /dev/null
+++ b/src/ia32/interface-descriptors-ia32.cc
@@ -0,0 +1,304 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_IA32
+
+#include "src/interface-descriptors.h"
+
+namespace v8 {
+namespace internal {
+
+const Register CallInterfaceDescriptor::ContextRegister() { return esi; }
+
+
+const Register LoadDescriptor::ReceiverRegister() { return edx; }
+const Register LoadDescriptor::NameRegister() { return ecx; }
+
+
+const Register VectorLoadICTrampolineDescriptor::SlotRegister() { return eax; }
+
+
+const Register VectorLoadICDescriptor::VectorRegister() { return ebx; }
+
+
+const Register StoreDescriptor::ReceiverRegister() { return edx; }
+const Register StoreDescriptor::NameRegister() { return ecx; }
+const Register StoreDescriptor::ValueRegister() { return eax; }
+
+
+const Register ElementTransitionAndStoreDescriptor::MapRegister() {
+  return ebx;
+}
+
+
+const Register InstanceofDescriptor::left() { return eax; }
+const Register InstanceofDescriptor::right() { return edx; }
+
+
+const Register ArgumentsAccessReadDescriptor::index() { return edx; }
+const Register ArgumentsAccessReadDescriptor::parameter_count() { return eax; }
+
+
+const Register ApiGetterDescriptor::function_address() { return edx; }
+
+
+const Register MathPowTaggedDescriptor::exponent() { return eax; }
+
+
+const Register MathPowIntegerDescriptor::exponent() {
+  return MathPowTaggedDescriptor::exponent();
+}
+
+
+void FastNewClosureDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {esi, ebx};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void FastNewContextDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {esi, edi};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ToNumberDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  // ToNumberStub invokes a function, and therefore needs a context.
+  Register registers[] = {esi, eax};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void NumberToStringDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {esi, eax};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void FastCloneShallowArrayDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {esi, eax, ebx, ecx};
+  Representation representations[] = {
+      Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
+      Representation::Tagged()};
+  data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void FastCloneShallowObjectDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {esi, eax, ebx, ecx, edx};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void CreateAllocationSiteDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {esi, ebx, edx};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void StoreArrayLiteralElementDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {esi, ecx, eax};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void CallFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {esi, edi};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void CallFunctionWithFeedbackDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {esi, edi, edx};
+  Representation representations[] = {Representation::Tagged(),
+                                      Representation::Tagged(),
+                                      Representation::Smi()};
+  data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void CallConstructDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  // eax : number of arguments
+  // ebx : feedback vector
+  // edx : (only if ebx is not the megamorphic symbol) slot in feedback
+  //       vector (Smi)
+  // edi : constructor function
+  // TODO(turbofan): So far we don't gather type feedback and hence skip the
+  // slot parameter, but ArrayConstructStub needs the vector to be undefined.
+  Register registers[] = {esi, eax, edi, ebx};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void RegExpConstructResultDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {esi, ecx, ebx, eax};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void TransitionElementsKindDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {esi, eax, ebx};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ArrayConstructorConstantArgCountDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  // register state
+  // eax -- number of arguments
+  // edi -- function
+  // ebx -- allocation site with elements kind
+  Register registers[] = {esi, edi, ebx};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ArrayConstructorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  // stack param count needs (constructor pointer, and single argument)
+  Register registers[] = {esi, edi, ebx, eax};
+  Representation representations[] = {
+      Representation::Tagged(), Representation::Tagged(),
+      Representation::Tagged(), Representation::Integer32()};
+  data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void InternalArrayConstructorConstantArgCountDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  // register state
+  // eax -- number of arguments
+  // edi -- function
+  Register registers[] = {esi, edi};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void InternalArrayConstructorDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  // stack param count needs (constructor pointer, and single argument)
+  Register registers[] = {esi, edi, eax};
+  Representation representations[] = {Representation::Tagged(),
+                                      Representation::Tagged(),
+                                      Representation::Integer32()};
+  data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void CompareNilDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {esi, eax};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ToBooleanDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {esi, eax};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void BinaryOpDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {esi, edx, eax};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void BinaryOpWithAllocationSiteDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {esi, ecx, edx, eax};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void StringAddDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {esi, edx, eax};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void KeyedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      esi,  // context
+      ecx,  // key
+  };
+  Representation representations[] = {
+      Representation::Tagged(),  // context
+      Representation::Tagged(),  // key
+  };
+  data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void NamedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      esi,  // context
+      ecx,  // name
+  };
+  Representation representations[] = {
+      Representation::Tagged(),  // context
+      Representation::Tagged(),  // name
+  };
+  data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void CallHandlerDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      esi,  // context
+      edx,  // name
+  };
+  Representation representations[] = {
+      Representation::Tagged(),  // context
+      Representation::Tagged(),  // receiver
+  };
+  data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void ArgumentAdaptorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      esi,  // context
+      edi,  // JSFunction
+      eax,  // actual number of arguments
+      ebx,  // expected number of arguments
+  };
+  Representation representations[] = {
+      Representation::Tagged(),     // context
+      Representation::Tagged(),     // JSFunction
+      Representation::Integer32(),  // actual number of arguments
+      Representation::Integer32(),  // expected number of arguments
+  };
+  data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void ApiFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      esi,  // context
+      eax,  // callee
+      ebx,  // call_data
+      ecx,  // holder
+      edx,  // api_function_address
+  };
+  Representation representations[] = {
+      Representation::Tagged(),    // context
+      Representation::Tagged(),    // callee
+      Representation::Tagged(),    // call_data
+      Representation::Tagged(),    // holder
+      Representation::External(),  // api_function_address
+  };
+  data->Initialize(arraysize(registers), registers, representations);
+}
+}
+}  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ia32/lithium-codegen-ia32.cc b/src/ia32/lithium-codegen-ia32.cc
index 1e4f756..1d7c8c1 100644
--- a/src/ia32/lithium-codegen-ia32.cc
+++ b/src/ia32/lithium-codegen-ia32.cc
@@ -6,20 +6,22 @@
 
 #if V8_TARGET_ARCH_IA32
 
-#include "src/ia32/lithium-codegen-ia32.h"
-#include "src/ic.h"
+#include "src/base/bits.h"
+#include "src/code-factory.h"
 #include "src/code-stubs.h"
-#include "src/deoptimizer.h"
-#include "src/stub-cache.h"
 #include "src/codegen.h"
+#include "src/deoptimizer.h"
 #include "src/hydrogen-osr.h"
+#include "src/ia32/lithium-codegen-ia32.h"
+#include "src/ic/ic.h"
+#include "src/ic/stub-cache.h"
 
 namespace v8 {
 namespace internal {
 
 // When invoking builtins, we need to record the safepoint in the middle of
 // the invoke instruction sequence generated by the macro assembler.
-class SafepointGenerator V8_FINAL : public CallWrapper {
+class SafepointGenerator FINAL : public CallWrapper {
  public:
   SafepointGenerator(LCodeGen* codegen,
                      LPointerMap* pointers,
@@ -29,9 +31,9 @@
         deopt_mode_(mode) {}
   virtual ~SafepointGenerator() {}
 
-  virtual void BeforeCall(int call_size) const V8_OVERRIDE {}
+  virtual void BeforeCall(int call_size) const OVERRIDE {}
 
-  virtual void AfterCall() const V8_OVERRIDE {
+  virtual void AfterCall() const OVERRIDE {
     codegen_->RecordSafepoint(pointers_, deopt_mode_);
   }
 
@@ -46,7 +48,7 @@
 
 bool LCodeGen::GenerateCode() {
   LPhase phase("Z_Code generation", chunk());
-  ASSERT(is_unused());
+  DCHECK(is_unused());
   status_ = GENERATING;
 
   // Open a frame scope to indicate that there is a frame on the stack.  The
@@ -70,7 +72,7 @@
 
 
 void LCodeGen::FinishCode(Handle<Code> code) {
-  ASSERT(is_done());
+  DCHECK(is_done());
   code->set_stack_slots(GetStackSlotCount());
   code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
   if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
@@ -92,8 +94,8 @@
 
 
 void LCodeGen::SaveCallerDoubles() {
-  ASSERT(info()->saves_caller_doubles());
-  ASSERT(NeedsEagerFrame());
+  DCHECK(info()->saves_caller_doubles());
+  DCHECK(NeedsEagerFrame());
   Comment(";;; Save clobbered callee double registers");
   int count = 0;
   BitVector* doubles = chunk()->allocated_double_registers();
@@ -108,8 +110,8 @@
 
 
 void LCodeGen::RestoreCallerDoubles() {
-  ASSERT(info()->saves_caller_doubles());
-  ASSERT(NeedsEagerFrame());
+  DCHECK(info()->saves_caller_doubles());
+  DCHECK(NeedsEagerFrame());
   Comment(";;; Restore clobbered callee double registers");
   BitVector* doubles = chunk()->allocated_double_registers();
   BitVector::Iterator save_iterator(doubles);
@@ -124,7 +126,7 @@
 
 
 bool LCodeGen::GeneratePrologue() {
-  ASSERT(is_generating());
+  DCHECK(is_generating());
 
   if (info()->IsOptimizing()) {
     ProfileEntryHookStub::MaybeCallEntryHook(masm_);
@@ -151,7 +153,7 @@
       __ j(not_equal, &ok, Label::kNear);
 
       __ mov(ecx, GlobalObjectOperand());
-      __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalReceiverOffset));
+      __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalProxyOffset));
 
       __ mov(Operand(esp, receiver_offset), ecx);
 
@@ -186,7 +188,7 @@
 
   info()->set_prologue_offset(masm_->pc_offset());
   if (NeedsEagerFrame()) {
-    ASSERT(!frame_is_built_);
+    DCHECK(!frame_is_built_);
     frame_is_built_ = true;
     if (info()->IsStub()) {
       __ StubPrologue();
@@ -205,7 +207,7 @@
 
   // Reserve space for the stack slots needed by the code.
   int slots = GetStackSlotCount();
-  ASSERT(slots != 0 || !info()->IsOptimizing());
+  DCHECK(slots != 0 || !info()->IsOptimizing());
   if (slots > 0) {
     if (slots == 1) {
       if (dynamic_frame_alignment_) {
@@ -263,7 +265,7 @@
       need_write_barrier = false;
     } else {
       __ push(edi);
-      __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
+      __ CallRuntime(Runtime::kNewFunctionContext, 1);
     }
     RecordSafepoint(Safepoint::kNoLazyDeopt);
     // Context is returned in eax.  It replaces the context passed to us.
@@ -357,7 +359,7 @@
   // Adjust the frame size, subsuming the unoptimized frame into the
   // optimized frame.
   int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
-  ASSERT(slots >= 1);
+  DCHECK(slots >= 1);
   __ sub(esp, Immediate((slots - 1) * kPointerSize));
 }
 
@@ -381,17 +383,12 @@
     Comment(";;; -------------------- Jump table --------------------");
   }
   for (int i = 0; i < jump_table_.length(); i++) {
-    __ bind(&jump_table_[i].label);
-    Address entry = jump_table_[i].address;
-    Deoptimizer::BailoutType type = jump_table_[i].bailout_type;
-    int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
-    if (id == Deoptimizer::kNotDeoptimizationEntry) {
-      Comment(";;; jump table entry %d.", i);
-    } else {
-      Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
-    }
-    if (jump_table_[i].needs_frame) {
-      ASSERT(!info()->saves_caller_doubles());
+    Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
+    __ bind(&table_entry->label);
+    Address entry = table_entry->address;
+    DeoptComment(table_entry->reason);
+    if (table_entry->needs_frame) {
+      DCHECK(!info()->saves_caller_doubles());
       __ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
       if (needs_frame.is_bound()) {
         __ jmp(&needs_frame);
@@ -401,7 +398,7 @@
         // This variant of deopt can only be used with stubs. Since we don't
         // have a function pointer to install in the stack frame that we're
         // building, install a special marker there instead.
-        ASSERT(info()->IsStub());
+        DCHECK(info()->IsStub());
         __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
         // Push a PC inside the function so that the deopt code can find where
         // the deopt comes from. It doesn't have to be the precise return
@@ -427,7 +424,7 @@
 
 
 bool LCodeGen::GenerateDeferredCode() {
-  ASSERT(is_generating());
+  DCHECK(is_generating());
   if (deferred_.length() > 0) {
     for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
       LDeferredCode* code = deferred_[i];
@@ -445,8 +442,8 @@
       __ bind(code->entry());
       if (NeedsDeferredFrame()) {
         Comment(";;; Build frame");
-        ASSERT(!frame_is_built_);
-        ASSERT(info()->IsStub());
+        DCHECK(!frame_is_built_);
+        DCHECK(info()->IsStub());
         frame_is_built_ = true;
         // Build the frame in such a way that esi isn't trashed.
         __ push(ebp);  // Caller's frame pointer.
@@ -459,7 +456,7 @@
       if (NeedsDeferredFrame()) {
         __ bind(code->done());
         Comment(";;; Destroy frame");
-        ASSERT(frame_is_built_);
+        DCHECK(frame_is_built_);
         frame_is_built_ = false;
         __ mov(esp, ebp);
         __ pop(ebp);
@@ -476,7 +473,7 @@
 
 
 bool LCodeGen::GenerateSafepointTable() {
-  ASSERT(is_done());
+  DCHECK(is_done());
   if (!info()->IsStub()) {
     // For lazy deoptimization we need space to patch a call after every call.
     // Ensure there is always space for such patching, even if the code ends
@@ -502,13 +499,13 @@
 
 
 Register LCodeGen::ToRegister(LOperand* op) const {
-  ASSERT(op->IsRegister());
+  DCHECK(op->IsRegister());
   return ToRegister(op->index());
 }
 
 
 XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
-  ASSERT(op->IsDoubleRegister());
+  DCHECK(op->IsDoubleRegister());
   return ToDoubleRegister(op->index());
 }
 
@@ -523,28 +520,28 @@
   HConstant* constant = chunk_->LookupConstant(op);
   int32_t value = constant->Integer32Value();
   if (r.IsInteger32()) return value;
-  ASSERT(r.IsSmiOrTagged());
+  DCHECK(r.IsSmiOrTagged());
   return reinterpret_cast<int32_t>(Smi::FromInt(value));
 }
 
 
 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
   HConstant* constant = chunk_->LookupConstant(op);
-  ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
+  DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
   return constant->handle(isolate());
 }
 
 
 double LCodeGen::ToDouble(LConstantOperand* op) const {
   HConstant* constant = chunk_->LookupConstant(op);
-  ASSERT(constant->HasDoubleValue());
+  DCHECK(constant->HasDoubleValue());
   return constant->DoubleValue();
 }
 
 
 ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const {
   HConstant* constant = chunk_->LookupConstant(op);
-  ASSERT(constant->HasExternalReferenceValue());
+  DCHECK(constant->HasExternalReferenceValue());
   return constant->ExternalReferenceValue();
 }
 
@@ -560,7 +557,7 @@
 
 
 static int ArgumentsOffsetWithoutFrame(int index) {
-  ASSERT(index < 0);
+  DCHECK(index < 0);
   return -(index + 1) * kPointerSize + kPCOnStackSize;
 }
 
@@ -568,7 +565,7 @@
 Operand LCodeGen::ToOperand(LOperand* op) const {
   if (op->IsRegister()) return Operand(ToRegister(op));
   if (op->IsDoubleRegister()) return Operand(ToDoubleRegister(op));
-  ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
+  DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
   if (NeedsEagerFrame()) {
     return Operand(ebp, StackSlotOffset(op->index()));
   } else {
@@ -580,7 +577,7 @@
 
 
 Operand LCodeGen::HighOperand(LOperand* op) {
-  ASSERT(op->IsDoubleStackSlot());
+  DCHECK(op->IsDoubleStackSlot());
   if (NeedsEagerFrame()) {
     return Operand(ebp, StackSlotOffset(op->index()) + kPointerSize);
   } else {
@@ -615,13 +612,13 @@
       translation->BeginConstructStubFrame(closure_id, translation_size);
       break;
     case JS_GETTER:
-      ASSERT(translation_size == 1);
-      ASSERT(height == 0);
+      DCHECK(translation_size == 1);
+      DCHECK(height == 0);
       translation->BeginGetterStubFrame(closure_id);
       break;
     case JS_SETTER:
-      ASSERT(translation_size == 2);
-      ASSERT(height == 0);
+      DCHECK(translation_size == 2);
+      DCHECK(height == 0);
       translation->BeginSetterStubFrame(closure_id);
       break;
     case ARGUMENTS_ADAPTOR:
@@ -721,7 +718,7 @@
                                RelocInfo::Mode mode,
                                LInstruction* instr,
                                SafepointMode safepoint_mode) {
-  ASSERT(instr != NULL);
+  DCHECK(instr != NULL);
   __ call(code, mode);
   RecordSafepointWithLazyDeopt(instr, safepoint_mode);
 
@@ -745,14 +742,14 @@
                            int argc,
                            LInstruction* instr,
                            SaveFPRegsMode save_doubles) {
-  ASSERT(instr != NULL);
-  ASSERT(instr->HasPointerMap());
+  DCHECK(instr != NULL);
+  DCHECK(instr->HasPointerMap());
 
   __ CallRuntime(fun, argc, save_doubles);
 
   RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
 
-  ASSERT(info()->is_calling());
+  DCHECK(info()->is_calling());
 }
 
 
@@ -782,7 +779,7 @@
   RecordSafepointWithRegisters(
       instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
 
-  ASSERT(info()->is_calling());
+  DCHECK(info()->is_calling());
 }
 
 
@@ -823,13 +820,14 @@
 }
 
 
-void LCodeGen::DeoptimizeIf(Condition cc,
-                            LEnvironment* environment,
+void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
+                            const char* detail,
                             Deoptimizer::BailoutType bailout_type) {
+  LEnvironment* environment = instr->environment();
   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
-  ASSERT(environment->HasBeenRegistered());
+  DCHECK(environment->HasBeenRegistered());
   int id = environment->deoptimization_index();
-  ASSERT(info()->IsOptimizing() || info()->IsStub());
+  DCHECK(info()->IsOptimizing() || info()->IsStub());
   Address entry =
       Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
   if (entry == NULL) {
@@ -850,7 +848,7 @@
     __ mov(Operand::StaticVariable(count), eax);
     __ pop(eax);
     __ popfd();
-    ASSERT(frame_is_built_);
+    DCHECK(frame_is_built_);
     __ call(entry, RelocInfo::RUNTIME_ENTRY);
     __ bind(&no_deopt);
     __ mov(Operand::StaticVariable(count), eax);
@@ -865,19 +863,19 @@
     __ bind(&done);
   }
 
-  ASSERT(info()->IsStub() || frame_is_built_);
+  Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
+                             instr->Mnemonic(), detail);
+  DCHECK(info()->IsStub() || frame_is_built_);
   if (cc == no_condition && frame_is_built_) {
+    DeoptComment(reason);
     __ call(entry, RelocInfo::RUNTIME_ENTRY);
   } else {
+    Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
+                                            !frame_is_built_);
     // We often have several deopts to the same entry, reuse the last
     // jump entry if this is the case.
     if (jump_table_.is_empty() ||
-        jump_table_.last().address != entry ||
-        jump_table_.last().needs_frame != !frame_is_built_ ||
-        jump_table_.last().bailout_type != bailout_type) {
-      Deoptimizer::JumpTableEntry table_entry(entry,
-                                              bailout_type,
-                                              !frame_is_built_);
+        !table_entry.IsEquivalentTo(jump_table_.last())) {
       jump_table_.Add(table_entry, zone());
     }
     if (cc == no_condition) {
@@ -889,12 +887,12 @@
 }
 
 
-void LCodeGen::DeoptimizeIf(Condition cc,
-                            LEnvironment* environment) {
+void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
+                            const char* detail) {
   Deoptimizer::BailoutType bailout_type = info()->IsStub()
       ? Deoptimizer::LAZY
       : Deoptimizer::EAGER;
-  DeoptimizeIf(cc, environment, bailout_type);
+  DeoptimizeIf(cc, instr, detail, bailout_type);
 }
 
 
@@ -953,7 +951,7 @@
 
 
 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
-  ASSERT(deoptimization_literals_.length() == 0);
+  DCHECK(deoptimization_literals_.length() == 0);
 
   const ZoneList<Handle<JSFunction> >* inlined_closures =
       chunk()->inlined_closures();
@@ -973,7 +971,7 @@
   if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
     RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
   } else {
-    ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+    DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
     RecordSafepointWithRegisters(
         instr->pointer_map(), 0, Safepoint::kLazyDeopt);
   }
@@ -985,7 +983,7 @@
     Safepoint::Kind kind,
     int arguments,
     Safepoint::DeoptMode deopt_mode) {
-  ASSERT(kind == expected_safepoint_kind_);
+  DCHECK(kind == expected_safepoint_kind_);
   const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
   Safepoint safepoint =
       safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode);
@@ -1072,8 +1070,8 @@
 
 
 void LCodeGen::DoCallStub(LCallStub* instr) {
-  ASSERT(ToRegister(instr->context()).is(esi));
-  ASSERT(ToRegister(instr->result()).is(eax));
+  DCHECK(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->result()).is(eax));
   switch (instr->hydrogen()->major_key()) {
     case CodeStub::RegExpExec: {
       RegExpExecStub stub(isolate());
@@ -1104,7 +1102,7 @@
 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
   Register dividend = ToRegister(instr->dividend());
   int32_t divisor = instr->divisor();
-  ASSERT(dividend.is(ToRegister(instr->result())));
+  DCHECK(dividend.is(ToRegister(instr->result())));
 
   // Theoretically, a variation of the branch-free code for integer division by
   // a power of 2 (calculating the remainder via an additional multiplication
@@ -1123,7 +1121,7 @@
     __ and_(dividend, mask);
     __ neg(dividend);
     if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
-      DeoptimizeIf(zero, instr->environment());
+      DeoptimizeIf(zero, instr, "minus zero");
     }
     __ jmp(&done, Label::kNear);
   }
@@ -1137,10 +1135,10 @@
 void LCodeGen::DoModByConstI(LModByConstI* instr) {
   Register dividend = ToRegister(instr->dividend());
   int32_t divisor = instr->divisor();
-  ASSERT(ToRegister(instr->result()).is(eax));
+  DCHECK(ToRegister(instr->result()).is(eax));
 
   if (divisor == 0) {
-    DeoptimizeIf(no_condition, instr->environment());
+    DeoptimizeIf(no_condition, instr, "division by zero");
     return;
   }
 
@@ -1155,7 +1153,7 @@
     Label remainder_not_zero;
     __ j(not_zero, &remainder_not_zero, Label::kNear);
     __ cmp(dividend, Immediate(0));
-    DeoptimizeIf(less, instr->environment());
+    DeoptimizeIf(less, instr, "minus zero");
     __ bind(&remainder_not_zero);
   }
 }
@@ -1165,19 +1163,19 @@
   HMod* hmod = instr->hydrogen();
 
   Register left_reg = ToRegister(instr->left());
-  ASSERT(left_reg.is(eax));
+  DCHECK(left_reg.is(eax));
   Register right_reg = ToRegister(instr->right());
-  ASSERT(!right_reg.is(eax));
-  ASSERT(!right_reg.is(edx));
+  DCHECK(!right_reg.is(eax));
+  DCHECK(!right_reg.is(edx));
   Register result_reg = ToRegister(instr->result());
-  ASSERT(result_reg.is(edx));
+  DCHECK(result_reg.is(edx));
 
   Label done;
   // Check for x % 0, idiv would signal a divide error. We have to
   // deopt in this case because we can't return a NaN.
   if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
     __ test(right_reg, Operand(right_reg));
-    DeoptimizeIf(zero, instr->environment());
+    DeoptimizeIf(zero, instr, "division by zero");
   }
 
   // Check for kMinInt % -1, idiv would signal a divide error. We
@@ -1188,7 +1186,7 @@
     __ j(not_equal, &no_overflow_possible, Label::kNear);
     __ cmp(right_reg, -1);
     if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
-      DeoptimizeIf(equal, instr->environment());
+      DeoptimizeIf(equal, instr, "minus zero");
     } else {
       __ j(not_equal, &no_overflow_possible, Label::kNear);
       __ Move(result_reg, Immediate(0));
@@ -1207,7 +1205,7 @@
     __ j(not_sign, &positive_left, Label::kNear);
     __ idiv(right_reg);
     __ test(result_reg, Operand(result_reg));
-    DeoptimizeIf(zero, instr->environment());
+    DeoptimizeIf(zero, instr, "minus zero");
     __ jmp(&done, Label::kNear);
     __ bind(&positive_left);
   }
@@ -1220,26 +1218,26 @@
   Register dividend = ToRegister(instr->dividend());
   int32_t divisor = instr->divisor();
   Register result = ToRegister(instr->result());
-  ASSERT(divisor == kMinInt || IsPowerOf2(Abs(divisor)));
-  ASSERT(!result.is(dividend));
+  DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
+  DCHECK(!result.is(dividend));
 
   // Check for (0 / -x) that will produce negative zero.
   HDiv* hdiv = instr->hydrogen();
   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
     __ test(dividend, dividend);
-    DeoptimizeIf(zero, instr->environment());
+    DeoptimizeIf(zero, instr, "minus zero");
   }
   // Check for (kMinInt / -1).
   if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
     __ cmp(dividend, kMinInt);
-    DeoptimizeIf(zero, instr->environment());
+    DeoptimizeIf(zero, instr, "overflow");
   }
   // Deoptimize if remainder will not be 0.
   if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
       divisor != 1 && divisor != -1) {
     int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
     __ test(dividend, Immediate(mask));
-    DeoptimizeIf(not_zero, instr->environment());
+    DeoptimizeIf(not_zero, instr, "lost precision");
   }
   __ Move(result, dividend);
   int32_t shift = WhichPowerOf2Abs(divisor);
@@ -1257,10 +1255,10 @@
 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
   Register dividend = ToRegister(instr->dividend());
   int32_t divisor = instr->divisor();
-  ASSERT(ToRegister(instr->result()).is(edx));
+  DCHECK(ToRegister(instr->result()).is(edx));
 
   if (divisor == 0) {
-    DeoptimizeIf(no_condition, instr->environment());
+    DeoptimizeIf(no_condition, instr, "division by zero");
     return;
   }
 
@@ -1268,7 +1266,7 @@
   HDiv* hdiv = instr->hydrogen();
   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
     __ test(dividend, dividend);
-    DeoptimizeIf(zero, instr->environment());
+    DeoptimizeIf(zero, instr, "minus zero");
   }
 
   __ TruncatingDiv(dividend, Abs(divisor));
@@ -1278,7 +1276,7 @@
     __ mov(eax, edx);
     __ imul(eax, eax, divisor);
     __ sub(eax, dividend);
-    DeoptimizeIf(not_equal, instr->environment());
+    DeoptimizeIf(not_equal, instr, "lost precision");
   }
 }
 
@@ -1289,16 +1287,16 @@
   Register dividend = ToRegister(instr->dividend());
   Register divisor = ToRegister(instr->divisor());
   Register remainder = ToRegister(instr->temp());
-  ASSERT(dividend.is(eax));
-  ASSERT(remainder.is(edx));
-  ASSERT(ToRegister(instr->result()).is(eax));
-  ASSERT(!divisor.is(eax));
-  ASSERT(!divisor.is(edx));
+  DCHECK(dividend.is(eax));
+  DCHECK(remainder.is(edx));
+  DCHECK(ToRegister(instr->result()).is(eax));
+  DCHECK(!divisor.is(eax));
+  DCHECK(!divisor.is(edx));
 
   // Check for x / 0.
   if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
     __ test(divisor, divisor);
-    DeoptimizeIf(zero, instr->environment());
+    DeoptimizeIf(zero, instr, "division by zero");
   }
 
   // Check for (0 / -x) that will produce negative zero.
@@ -1307,7 +1305,7 @@
     __ test(dividend, dividend);
     __ j(not_zero, &dividend_not_zero, Label::kNear);
     __ test(divisor, divisor);
-    DeoptimizeIf(sign, instr->environment());
+    DeoptimizeIf(sign, instr, "minus zero");
     __ bind(&dividend_not_zero);
   }
 
@@ -1317,7 +1315,7 @@
     __ cmp(dividend, kMinInt);
     __ j(not_zero, &dividend_not_min_int, Label::kNear);
     __ cmp(divisor, -1);
-    DeoptimizeIf(zero, instr->environment());
+    DeoptimizeIf(zero, instr, "overflow");
     __ bind(&dividend_not_min_int);
   }
 
@@ -1328,7 +1326,7 @@
   if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
     // Deoptimize if remainder is not 0.
     __ test(remainder, remainder);
-    DeoptimizeIf(not_zero, instr->environment());
+    DeoptimizeIf(not_zero, instr, "lost precision");
   }
 }
 
@@ -1336,7 +1334,7 @@
 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
   Register dividend = ToRegister(instr->dividend());
   int32_t divisor = instr->divisor();
-  ASSERT(dividend.is(ToRegister(instr->result())));
+  DCHECK(dividend.is(ToRegister(instr->result())));
 
   // If the divisor is positive, things are easy: There can be no deopts and we
   // can simply do an arithmetic right shift.
@@ -1350,13 +1348,13 @@
   // If the divisor is negative, we have to negate and handle edge cases.
   __ neg(dividend);
   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
-    DeoptimizeIf(zero, instr->environment());
+    DeoptimizeIf(zero, instr, "minus zero");
   }
 
   // Dividing by -1 is basically negation, unless we overflow.
   if (divisor == -1) {
     if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
-      DeoptimizeIf(overflow, instr->environment());
+      DeoptimizeIf(overflow, instr, "overflow");
     }
     return;
   }
@@ -1380,10 +1378,10 @@
 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
   Register dividend = ToRegister(instr->dividend());
   int32_t divisor = instr->divisor();
-  ASSERT(ToRegister(instr->result()).is(edx));
+  DCHECK(ToRegister(instr->result()).is(edx));
 
   if (divisor == 0) {
-    DeoptimizeIf(no_condition, instr->environment());
+    DeoptimizeIf(no_condition, instr, "division by zero");
     return;
   }
 
@@ -1391,7 +1389,7 @@
   HMathFloorOfDiv* hdiv = instr->hydrogen();
   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
     __ test(dividend, dividend);
-    DeoptimizeIf(zero, instr->environment());
+    DeoptimizeIf(zero, instr, "minus zero");
   }
 
   // Easy case: We need no dynamic check for the dividend and the flooring
@@ -1406,7 +1404,7 @@
   // In the general case we may need to adjust before and after the truncating
   // division to get a flooring division.
   Register temp = ToRegister(instr->temp3());
-  ASSERT(!temp.is(dividend) && !temp.is(eax) && !temp.is(edx));
+  DCHECK(!temp.is(dividend) && !temp.is(eax) && !temp.is(edx));
   Label needs_adjustment, done;
   __ cmp(dividend, Immediate(0));
   __ j(divisor > 0 ? less : greater, &needs_adjustment, Label::kNear);
@@ -1429,16 +1427,16 @@
   Register divisor = ToRegister(instr->divisor());
   Register remainder = ToRegister(instr->temp());
   Register result = ToRegister(instr->result());
-  ASSERT(dividend.is(eax));
-  ASSERT(remainder.is(edx));
-  ASSERT(result.is(eax));
-  ASSERT(!divisor.is(eax));
-  ASSERT(!divisor.is(edx));
+  DCHECK(dividend.is(eax));
+  DCHECK(remainder.is(edx));
+  DCHECK(result.is(eax));
+  DCHECK(!divisor.is(eax));
+  DCHECK(!divisor.is(edx));
 
   // Check for x / 0.
   if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
     __ test(divisor, divisor);
-    DeoptimizeIf(zero, instr->environment());
+    DeoptimizeIf(zero, instr, "division by zero");
   }
 
   // Check for (0 / -x) that will produce negative zero.
@@ -1447,7 +1445,7 @@
     __ test(dividend, dividend);
     __ j(not_zero, &dividend_not_zero, Label::kNear);
     __ test(divisor, divisor);
-    DeoptimizeIf(sign, instr->environment());
+    DeoptimizeIf(sign, instr, "minus zero");
     __ bind(&dividend_not_zero);
   }
 
@@ -1457,7 +1455,7 @@
     __ cmp(dividend, kMinInt);
     __ j(not_zero, &dividend_not_min_int, Label::kNear);
     __ cmp(divisor, -1);
-    DeoptimizeIf(zero, instr->environment());
+    DeoptimizeIf(zero, instr, "overflow");
     __ bind(&dividend_not_min_int);
   }
 
@@ -1535,7 +1533,7 @@
   }
 
   if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
-    DeoptimizeIf(overflow, instr->environment());
+    DeoptimizeIf(overflow, instr, "overflow");
   }
 
   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@@ -1545,15 +1543,15 @@
     __ j(not_zero, &done, Label::kNear);
     if (right->IsConstantOperand()) {
       if (ToInteger32(LConstantOperand::cast(right)) < 0) {
-        DeoptimizeIf(no_condition, instr->environment());
+        DeoptimizeIf(no_condition, instr, "minus zero");
       } else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
         __ cmp(ToRegister(instr->temp()), Immediate(0));
-        DeoptimizeIf(less, instr->environment());
+        DeoptimizeIf(less, instr, "minus zero");
       }
     } else {
       // Test the non-zero operand for negative sign.
       __ or_(ToRegister(instr->temp()), ToOperand(right));
-      DeoptimizeIf(sign, instr->environment());
+      DeoptimizeIf(sign, instr, "minus zero");
     }
     __ bind(&done);
   }
@@ -1563,8 +1561,8 @@
 void LCodeGen::DoBitI(LBitI* instr) {
   LOperand* left = instr->left();
   LOperand* right = instr->right();
-  ASSERT(left->Equals(instr->result()));
-  ASSERT(left->IsRegister());
+  DCHECK(left->Equals(instr->result()));
+  DCHECK(left->IsRegister());
 
   if (right->IsConstantOperand()) {
     int32_t right_operand =
@@ -1610,18 +1608,14 @@
 void LCodeGen::DoShiftI(LShiftI* instr) {
   LOperand* left = instr->left();
   LOperand* right = instr->right();
-  ASSERT(left->Equals(instr->result()));
-  ASSERT(left->IsRegister());
+  DCHECK(left->Equals(instr->result()));
+  DCHECK(left->IsRegister());
   if (right->IsRegister()) {
-    ASSERT(ToRegister(right).is(ecx));
+    DCHECK(ToRegister(right).is(ecx));
 
     switch (instr->op()) {
       case Token::ROR:
         __ ror_cl(ToRegister(left));
-        if (instr->can_deopt()) {
-          __ test(ToRegister(left), ToRegister(left));
-          DeoptimizeIf(sign, instr->environment());
-        }
         break;
       case Token::SAR:
         __ sar_cl(ToRegister(left));
@@ -1630,7 +1624,7 @@
         __ shr_cl(ToRegister(left));
         if (instr->can_deopt()) {
           __ test(ToRegister(left), ToRegister(left));
-          DeoptimizeIf(sign, instr->environment());
+          DeoptimizeIf(sign, instr, "negative value");
         }
         break;
       case Token::SHL:
@@ -1647,7 +1641,7 @@
       case Token::ROR:
         if (shift_count == 0 && instr->can_deopt()) {
           __ test(ToRegister(left), ToRegister(left));
-          DeoptimizeIf(sign, instr->environment());
+          DeoptimizeIf(sign, instr, "negative value");
         } else {
           __ ror(ToRegister(left), shift_count);
         }
@@ -1662,7 +1656,7 @@
           __ shr(ToRegister(left), shift_count);
         } else if (instr->can_deopt()) {
           __ test(ToRegister(left), ToRegister(left));
-          DeoptimizeIf(sign, instr->environment());
+          DeoptimizeIf(sign, instr, "negative value");
         }
         break;
       case Token::SHL:
@@ -1673,7 +1667,7 @@
               __ shl(ToRegister(left), shift_count - 1);
             }
             __ SmiTag(ToRegister(left));
-            DeoptimizeIf(overflow, instr->environment());
+            DeoptimizeIf(overflow, instr, "overflow");
           } else {
             __ shl(ToRegister(left), shift_count);
           }
@@ -1690,7 +1684,7 @@
 void LCodeGen::DoSubI(LSubI* instr) {
   LOperand* left = instr->left();
   LOperand* right = instr->right();
-  ASSERT(left->Equals(instr->result()));
+  DCHECK(left->Equals(instr->result()));
 
   if (right->IsConstantOperand()) {
     __ sub(ToOperand(left),
@@ -1699,7 +1693,7 @@
     __ sub(ToRegister(left), ToOperand(right));
   }
   if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
-    DeoptimizeIf(overflow, instr->environment());
+    DeoptimizeIf(overflow, instr, "overflow");
   }
 }
 
@@ -1716,10 +1710,10 @@
 
 void LCodeGen::DoConstantD(LConstantD* instr) {
   double v = instr->value();
-  uint64_t int_val = BitCast<uint64_t, double>(v);
+  uint64_t int_val = bit_cast<uint64_t, double>(v);
   int32_t lower = static_cast<int32_t>(int_val);
   int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
-  ASSERT(instr->result()->IsDoubleRegister());
+  DCHECK(instr->result()->IsDoubleRegister());
 
   XMMRegister res = ToDoubleRegister(instr->result());
   if (int_val == 0) {
@@ -1779,13 +1773,13 @@
   Register scratch = ToRegister(instr->temp());
   Smi* index = instr->index();
   Label runtime, done;
-  ASSERT(object.is(result));
-  ASSERT(object.is(eax));
+  DCHECK(object.is(result));
+  DCHECK(object.is(eax));
 
   __ test(object, Immediate(kSmiTagMask));
-  DeoptimizeIf(zero, instr->environment());
+  DeoptimizeIf(zero, instr, "Smi");
   __ CmpObjectType(object, JS_DATE_TYPE, scratch);
-  DeoptimizeIf(not_equal, instr->environment());
+  DeoptimizeIf(not_equal, instr, "not a date object");
 
   if (index->value() == 0) {
     __ mov(result, FieldOperand(object, JSDate::kValueOffset));
@@ -1875,12 +1869,12 @@
   if (instr->value()->IsConstantOperand()) {
     int value = ToRepresentation(LConstantOperand::cast(instr->value()),
                                  Representation::Integer32());
-    ASSERT_LE(0, value);
+    DCHECK_LE(0, value);
     if (encoding == String::ONE_BYTE_ENCODING) {
-      ASSERT_LE(value, String::kMaxOneByteCharCode);
+      DCHECK_LE(value, String::kMaxOneByteCharCode);
       __ mov_b(operand, static_cast<int8_t>(value));
     } else {
-      ASSERT_LE(value, String::kMaxUtf16CodeUnit);
+      DCHECK_LE(value, String::kMaxUtf16CodeUnit);
       __ mov_w(operand, static_cast<int16_t>(value));
     }
   } else {
@@ -1915,7 +1909,7 @@
       __ add(ToRegister(left), ToOperand(right));
     }
     if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
-      DeoptimizeIf(overflow, instr->environment());
+      DeoptimizeIf(overflow, instr, "overflow");
     }
   }
 }
@@ -1924,7 +1918,7 @@
 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
   LOperand* left = instr->left();
   LOperand* right = instr->right();
-  ASSERT(left->Equals(instr->result()));
+  DCHECK(left->Equals(instr->result()));
   HMathMinMax::Operation operation = instr->hydrogen()->operation();
   if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
     Label return_left;
@@ -1947,7 +1941,7 @@
     }
     __ bind(&return_left);
   } else {
-    ASSERT(instr->hydrogen()->representation().IsDouble());
+    DCHECK(instr->hydrogen()->representation().IsDouble());
     Label check_nan_left, check_zero, return_left, return_right;
     Condition condition = (operation == HMathMinMax::kMathMin) ? below : above;
     XMMRegister left_reg = ToDoubleRegister(left);
@@ -2028,13 +2022,14 @@
 
 
 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
-  ASSERT(ToRegister(instr->context()).is(esi));
-  ASSERT(ToRegister(instr->left()).is(edx));
-  ASSERT(ToRegister(instr->right()).is(eax));
-  ASSERT(ToRegister(instr->result()).is(eax));
+  DCHECK(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->left()).is(edx));
+  DCHECK(ToRegister(instr->right()).is(eax));
+  DCHECK(ToRegister(instr->result()).is(eax));
 
-  BinaryOpICStub stub(isolate(), instr->op(), NO_OVERWRITE);
-  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  Handle<Code> code =
+      CodeFactory::BinaryOpIC(isolate(), instr->op(), NO_OVERWRITE).code();
+  CallCode(code, RelocInfo::CODE_TARGET, instr);
 }
 
 
@@ -2076,35 +2071,35 @@
     __ test(reg, Operand(reg));
     EmitBranch(instr, not_zero);
   } else if (r.IsDouble()) {
-    ASSERT(!info()->IsStub());
+    DCHECK(!info()->IsStub());
     XMMRegister reg = ToDoubleRegister(instr->value());
     XMMRegister xmm_scratch = double_scratch0();
     __ xorps(xmm_scratch, xmm_scratch);
     __ ucomisd(reg, xmm_scratch);
     EmitBranch(instr, not_equal);
   } else {
-    ASSERT(r.IsTagged());
+    DCHECK(r.IsTagged());
     Register reg = ToRegister(instr->value());
     HType type = instr->hydrogen()->value()->type();
     if (type.IsBoolean()) {
-      ASSERT(!info()->IsStub());
+      DCHECK(!info()->IsStub());
       __ cmp(reg, factory()->true_value());
       EmitBranch(instr, equal);
     } else if (type.IsSmi()) {
-      ASSERT(!info()->IsStub());
+      DCHECK(!info()->IsStub());
       __ test(reg, Operand(reg));
       EmitBranch(instr, not_equal);
     } else if (type.IsJSArray()) {
-      ASSERT(!info()->IsStub());
+      DCHECK(!info()->IsStub());
       EmitBranch(instr, no_condition);
     } else if (type.IsHeapNumber()) {
-      ASSERT(!info()->IsStub());
+      DCHECK(!info()->IsStub());
       XMMRegister xmm_scratch = double_scratch0();
       __ xorps(xmm_scratch, xmm_scratch);
       __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
       EmitBranch(instr, not_equal);
     } else if (type.IsString()) {
-      ASSERT(!info()->IsStub());
+      DCHECK(!info()->IsStub());
       __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
       EmitBranch(instr, not_equal);
     } else {
@@ -2138,13 +2133,13 @@
       } else if (expected.NeedsMap()) {
         // If we need a map later and have a Smi -> deopt.
         __ test(reg, Immediate(kSmiTagMask));
-        DeoptimizeIf(zero, instr->environment());
+        DeoptimizeIf(zero, instr, "Smi");
       }
 
       Register map = no_reg;  // Keep the compiler happy.
       if (expected.NeedsMap()) {
         map = ToRegister(instr->temp());
-        ASSERT(!map.is(reg));
+        DCHECK(!map.is(reg));
         __ mov(map, FieldOperand(reg, HeapObject::kMapOffset));
 
         if (expected.CanBeUndetectable()) {
@@ -2195,7 +2190,7 @@
       if (!expected.IsGeneric()) {
         // We've seen something for the first time -> deopt.
         // This can only happen if we are not generic already.
-        DeoptimizeIf(no_condition, instr->environment());
+        DeoptimizeIf(no_condition, instr, "unexpected object");
       }
     }
   }
@@ -2324,7 +2319,7 @@
 
 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
   Representation rep = instr->hydrogen()->value()->representation();
-  ASSERT(!rep.IsInteger32());
+  DCHECK(!rep.IsInteger32());
   Register scratch = ToRegister(instr->temp());
 
   if (rep.IsDouble()) {
@@ -2459,7 +2454,7 @@
 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
   Token::Value op = instr->op();
 
-  Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+  Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 
   Condition condition = ComputeCompareCondition(op);
@@ -2473,7 +2468,7 @@
   InstanceType from = instr->from();
   InstanceType to = instr->to();
   if (from == FIRST_TYPE) return to;
-  ASSERT(from == to || to == LAST_TYPE);
+  DCHECK(from == to || to == LAST_TYPE);
   return from;
 }
 
@@ -2531,12 +2526,12 @@
                                Register input,
                                Register temp,
                                Register temp2) {
-  ASSERT(!input.is(temp));
-  ASSERT(!input.is(temp2));
-  ASSERT(!temp.is(temp2));
+  DCHECK(!input.is(temp));
+  DCHECK(!input.is(temp2));
+  DCHECK(!temp.is(temp2));
   __ JumpIfSmi(input, is_false);
 
-  if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
+  if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
     // Assuming the following assertions, we can use the same compares to test
     // for both being a function type and being in the object type range.
     STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
@@ -2566,7 +2561,7 @@
   __ mov(temp, FieldOperand(temp, Map::kConstructorOffset));
   // Objects with a non-function constructor have class 'Object'.
   __ CmpObjectType(temp, JS_FUNCTION_TYPE, temp2);
-  if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
+  if (String::Equals(class_name, isolate()->factory()->Object_string())) {
     __ j(not_equal, is_true);
   } else {
     __ j(not_equal, is_false);
@@ -2611,7 +2606,7 @@
 
 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
   // Object and function are in fixed registers defined by the stub.
-  ASSERT(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->context()).is(esi));
   InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
 
@@ -2627,15 +2622,15 @@
 
 
 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
-  class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode {
+  class DeferredInstanceOfKnownGlobal FINAL : public LDeferredCode {
    public:
     DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
                                   LInstanceOfKnownGlobal* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() V8_OVERRIDE {
+    virtual void Generate() OVERRIDE {
       codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
     }
-    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
     Label* map_check() { return &map_check_; }
    private:
     LInstanceOfKnownGlobal* instr_;
@@ -2707,7 +2702,7 @@
   // stack is used to pass the offset to the location of the map check to
   // the stub.
   Register temp = ToRegister(instr->temp());
-  ASSERT(MacroAssembler::SafepointRegisterStackIndex(temp) == 0);
+  DCHECK(MacroAssembler::SafepointRegisterStackIndex(temp) == 0);
   __ LoadHeapObject(InstanceofStub::right(), instr->function());
   static const int kAdditionalDelta = 13;
   int delta = masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
@@ -2730,7 +2725,7 @@
 void LCodeGen::DoCmpT(LCmpT* instr) {
   Token::Value op = instr->op();
 
-  Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+  Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 
   Condition condition = ComputeCompareCondition(op);
@@ -2763,7 +2758,7 @@
     __ SmiUntag(reg);
     Register return_addr_reg = reg.is(ecx) ? ebx : ecx;
     if (dynamic_frame_alignment && FLAG_debug_code) {
-      ASSERT(extra_value_count == 2);
+      DCHECK(extra_value_count == 2);
       __ cmp(Operand(esp, reg, times_pointer_size,
                      extra_value_count * kPointerSize),
              Immediate(kAlignmentZapValue));
@@ -2825,19 +2820,36 @@
   __ mov(result, Operand::ForCell(instr->hydrogen()->cell().handle()));
   if (instr->hydrogen()->RequiresHoleCheck()) {
     __ cmp(result, factory()->the_hole_value());
-    DeoptimizeIf(equal, instr->environment());
+    DeoptimizeIf(equal, instr, "hole");
   }
 }
 
 
-void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
-  ASSERT(ToRegister(instr->context()).is(esi));
-  ASSERT(ToRegister(instr->global_object()).is(edx));
-  ASSERT(ToRegister(instr->result()).is(eax));
+template <class T>
+void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
+  DCHECK(FLAG_vector_ics);
+  Register vector = ToRegister(instr->temp_vector());
+  DCHECK(vector.is(VectorLoadICDescriptor::VectorRegister()));
+  __ mov(vector, instr->hydrogen()->feedback_vector());
+  // No need to allocate this register.
+  DCHECK(VectorLoadICDescriptor::SlotRegister().is(eax));
+  __ mov(VectorLoadICDescriptor::SlotRegister(),
+         Immediate(Smi::FromInt(instr->hydrogen()->slot())));
+}
 
-  __ mov(ecx, instr->name());
+
+void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
+  DCHECK(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->global_object())
+             .is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->result()).is(eax));
+
+  __ mov(LoadDescriptor::NameRegister(), instr->name());
+  if (FLAG_vector_ics) {
+    EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
+  }
   ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
-  Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode);
+  Handle<Code> ic = CodeFactory::LoadIC(isolate(), mode).code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -2852,7 +2864,7 @@
   // it as no longer deleted. We deoptimize in that case.
   if (instr->hydrogen()->RequiresHoleCheck()) {
     __ cmp(Operand::ForCell(cell_handle), factory()->the_hole_value());
-    DeoptimizeIf(equal, instr->environment());
+    DeoptimizeIf(equal, instr, "hole");
   }
 
   // Store the value.
@@ -2869,7 +2881,7 @@
   if (instr->hydrogen()->RequiresHoleCheck()) {
     __ cmp(result, factory()->the_hole_value());
     if (instr->hydrogen()->DeoptimizesOnHole()) {
-      DeoptimizeIf(equal, instr->environment());
+      DeoptimizeIf(equal, instr, "hole");
     } else {
       Label is_not_hole;
       __ j(not_equal, &is_not_hole, Label::kNear);
@@ -2890,7 +2902,7 @@
   if (instr->hydrogen()->RequiresHoleCheck()) {
     __ cmp(target, factory()->the_hole_value());
     if (instr->hydrogen()->DeoptimizesOnHole()) {
-      DeoptimizeIf(equal, instr->environment());
+      DeoptimizeIf(equal, instr, "hole");
     } else {
       __ j(not_equal, &skip_assignment, Label::kNear);
     }
@@ -2947,7 +2959,7 @@
 
 
 void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
-  ASSERT(!operand->IsDoubleRegister());
+  DCHECK(!operand->IsDoubleRegister());
   if (operand->IsConstantOperand()) {
     Handle<Object> object = ToHandle(LConstantOperand::cast(operand));
     AllowDeferredHandleDereference smi_check;
@@ -2965,12 +2977,15 @@
 
 
 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
-  ASSERT(ToRegister(instr->context()).is(esi));
-  ASSERT(ToRegister(instr->object()).is(edx));
-  ASSERT(ToRegister(instr->result()).is(eax));
+  DCHECK(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->result()).is(eax));
 
-  __ mov(ecx, instr->name());
-  Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
+  __ mov(LoadDescriptor::NameRegister(), instr->name());
+  if (FLAG_vector_ics) {
+    EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
+  }
+  Handle<Code> ic = CodeFactory::LoadIC(isolate(), NOT_CONTEXTUAL).code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -2980,23 +2995,13 @@
   Register temp = ToRegister(instr->temp());
   Register result = ToRegister(instr->result());
 
-  // Check that the function really is a function.
-  __ CmpObjectType(function, JS_FUNCTION_TYPE, result);
-  DeoptimizeIf(not_equal, instr->environment());
-
-  // Check whether the function has an instance prototype.
-  Label non_instance;
-  __ test_b(FieldOperand(result, Map::kBitFieldOffset),
-            1 << Map::kHasNonInstancePrototype);
-  __ j(not_zero, &non_instance, Label::kNear);
-
   // Get the prototype or initial map from the function.
   __ mov(result,
          FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
 
   // Check that the function has a prototype or an initial map.
   __ cmp(Operand(result), Immediate(factory()->the_hole_value()));
-  DeoptimizeIf(equal, instr->environment());
+  DeoptimizeIf(equal, instr, "hole");
 
   // If the function does not have an initial map, we're done.
   Label done;
@@ -3005,12 +3010,6 @@
 
   // Get the prototype from the initial map.
   __ mov(result, FieldOperand(result, Map::kPrototypeOffset));
-  __ jmp(&done, Label::kNear);
-
-  // Non-instance prototype: Fetch prototype from constructor field
-  // in the function's map.
-  __ bind(&non_instance);
-  __ mov(result, FieldOperand(result, Map::kConstructorOffset));
 
   // All done.
   __ bind(&done);
@@ -3095,7 +3094,7 @@
         __ mov(result, operand);
         if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
           __ test(result, Operand(result));
-          DeoptimizeIf(negative, instr->environment());
+          DeoptimizeIf(negative, instr, "negative value");
         }
         break;
       case EXTERNAL_FLOAT32_ELEMENTS:
@@ -3125,7 +3124,7 @@
         FAST_DOUBLE_ELEMENTS,
         instr->base_offset() + sizeof(kHoleNanLower32));
     __ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
-    DeoptimizeIf(equal, instr->environment());
+    DeoptimizeIf(equal, instr, "hole");
   }
 
   Operand double_load_operand = BuildFastArrayOperand(
@@ -3144,20 +3143,18 @@
 
   // Load the result.
   __ mov(result,
-         BuildFastArrayOperand(instr->elements(),
-                               instr->key(),
+         BuildFastArrayOperand(instr->elements(), instr->key(),
                                instr->hydrogen()->key()->representation(),
-                               FAST_ELEMENTS,
-                               instr->base_offset()));
+                               FAST_ELEMENTS, instr->base_offset()));
 
   // Check for the hole value.
   if (instr->hydrogen()->RequiresHoleCheck()) {
     if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
       __ test(result, Immediate(kSmiTagMask));
-      DeoptimizeIf(not_equal, instr->environment());
+      DeoptimizeIf(not_equal, instr, "not a Smi");
     } else {
       __ cmp(result, factory()->the_hole_value());
-      DeoptimizeIf(equal, instr->environment());
+      DeoptimizeIf(equal, instr, "hole");
     }
   }
 }
@@ -3206,11 +3203,15 @@
 
 
 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
-  ASSERT(ToRegister(instr->context()).is(esi));
-  ASSERT(ToRegister(instr->object()).is(edx));
-  ASSERT(ToRegister(instr->key()).is(ecx));
+  DCHECK(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
 
-  Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+  if (FLAG_vector_ics) {
+    EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
+  }
+
+  Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -3300,17 +3301,17 @@
 
   // The receiver should be a JS object.
   __ test(receiver, Immediate(kSmiTagMask));
-  DeoptimizeIf(equal, instr->environment());
+  DeoptimizeIf(equal, instr, "Smi");
   __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, scratch);
-  DeoptimizeIf(below, instr->environment());
+  DeoptimizeIf(below, instr, "not a JavaScript object");
 
   __ jmp(&receiver_ok, Label::kNear);
   __ bind(&global_object);
   __ mov(receiver, FieldOperand(function, JSFunction::kContextOffset));
   const int global_offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
   __ mov(receiver, Operand(receiver, global_offset));
-  const int receiver_offset = GlobalObject::kGlobalReceiverOffset;
-  __ mov(receiver, FieldOperand(receiver, receiver_offset));
+  const int proxy_offset = GlobalObject::kGlobalProxyOffset;
+  __ mov(receiver, FieldOperand(receiver, proxy_offset));
   __ bind(&receiver_ok);
 }
 
@@ -3320,15 +3321,15 @@
   Register function = ToRegister(instr->function());
   Register length = ToRegister(instr->length());
   Register elements = ToRegister(instr->elements());
-  ASSERT(receiver.is(eax));  // Used for parameter count.
-  ASSERT(function.is(edi));  // Required by InvokeFunction.
-  ASSERT(ToRegister(instr->result()).is(eax));
+  DCHECK(receiver.is(eax));  // Used for parameter count.
+  DCHECK(function.is(edi));  // Required by InvokeFunction.
+  DCHECK(ToRegister(instr->result()).is(eax));
 
   // Copy the arguments to this function possibly from the
   // adaptor frame below it.
   const uint32_t kArgumentsLimit = 1 * KB;
   __ cmp(length, kArgumentsLimit);
-  DeoptimizeIf(above, instr->environment());
+  DeoptimizeIf(above, instr, "too many arguments");
 
   __ push(receiver);
   __ mov(receiver, length);
@@ -3346,7 +3347,7 @@
 
   // Invoke the function.
   __ bind(&invoke);
-  ASSERT(instr->HasPointerMap());
+  DCHECK(instr->HasPointerMap());
   LPointerMap* pointers = instr->pointer_map();
   SafepointGenerator safepoint_generator(
       this, pointers, Safepoint::kLazyDeopt);
@@ -3383,17 +3384,17 @@
     __ mov(result, Operand(ebp, StandardFrameConstants::kContextOffset));
   } else {
     // If there is no frame, the context must be in esi.
-    ASSERT(result.is(esi));
+    DCHECK(result.is(esi));
   }
 }
 
 
 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
-  ASSERT(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->context()).is(esi));
   __ push(esi);  // The context is the first argument.
   __ push(Immediate(instr->hydrogen()->pairs()));
   __ push(Immediate(Smi::FromInt(instr->hydrogen()->flags())));
-  CallRuntime(Runtime::kHiddenDeclareGlobals, 3, instr);
+  CallRuntime(Runtime::kDeclareGlobals, 3, instr);
 }
 
 
@@ -3440,8 +3441,34 @@
 }
 
 
+void LCodeGen::DoTailCallThroughMegamorphicCache(
+    LTailCallThroughMegamorphicCache* instr) {
+  Register receiver = ToRegister(instr->receiver());
+  Register name = ToRegister(instr->name());
+  DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(name.is(LoadDescriptor::NameRegister()));
+
+  Register scratch = ebx;
+  Register extra = eax;
+  DCHECK(!scratch.is(receiver) && !scratch.is(name));
+  DCHECK(!extra.is(receiver) && !extra.is(name));
+
+  // Important for the tail-call.
+  bool must_teardown_frame = NeedsEagerFrame();
+
+  // The probe will tail call to a handler if found.
+  isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(),
+                                         must_teardown_frame, receiver, name,
+                                         scratch, extra);
+
+  // Tail call to miss if we ended up here.
+  if (must_teardown_frame) __ leave();
+  LoadIC::GenerateMiss(masm());
+}
+
+
 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
-  ASSERT(ToRegister(instr->result()).is(eax));
+  DCHECK(ToRegister(instr->result()).is(eax));
 
   LPointerMap* pointers = instr->pointer_map();
   SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
@@ -3452,7 +3479,7 @@
     generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
     __ call(code, RelocInfo::CODE_TARGET);
   } else {
-    ASSERT(instr->target()->IsRegister());
+    DCHECK(instr->target()->IsRegister());
     Register target = ToRegister(instr->target());
     generator.BeforeCall(__ CallSize(Operand(target)));
     __ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
@@ -3463,8 +3490,8 @@
 
 
 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
-  ASSERT(ToRegister(instr->function()).is(edi));
-  ASSERT(ToRegister(instr->result()).is(eax));
+  DCHECK(ToRegister(instr->function()).is(edi));
+  DCHECK(ToRegister(instr->result()).is(eax));
 
   if (instr->hydrogen()->pass_argument_count()) {
     __ mov(eax, instr->arity());
@@ -3495,7 +3522,7 @@
   Register input_reg = ToRegister(instr->value());
   __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
          factory()->heap_number_map());
-  DeoptimizeIf(not_equal, instr->environment());
+  DeoptimizeIf(not_equal, instr, "not a heap number");
 
   Label slow, allocated, done;
   Register tmp = input_reg.is(eax) ? ecx : eax;
@@ -3517,7 +3544,7 @@
 
   // Slow case: Call the runtime system to do the number allocation.
   __ bind(&slow);
-  CallRuntimeFromDeferred(Runtime::kHiddenAllocateHeapNumber, 0,
+  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0,
                           instr, instr->context());
   // Set the pointer to the new heap number in tmp.
   if (!tmp.is(eax)) __ mov(tmp, eax);
@@ -3542,27 +3569,27 @@
   Label is_positive;
   __ j(not_sign, &is_positive, Label::kNear);
   __ neg(input_reg);  // Sets flags.
-  DeoptimizeIf(negative, instr->environment());
+  DeoptimizeIf(negative, instr, "overflow");
   __ bind(&is_positive);
 }
 
 
 void LCodeGen::DoMathAbs(LMathAbs* instr) {
   // Class for deferred case.
-  class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode {
+  class DeferredMathAbsTaggedHeapNumber FINAL : public LDeferredCode {
    public:
     DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
                                     LMathAbs* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() V8_OVERRIDE {
+    virtual void Generate() OVERRIDE {
       codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
     }
-    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
    private:
     LMathAbs* instr_;
   };
 
-  ASSERT(instr->value()->Equals(instr->result()));
+  DCHECK(instr->value()->Equals(instr->result()));
   Representation r = instr->hydrogen()->value()->representation();
 
   if (r.IsDouble()) {
@@ -3600,20 +3627,20 @@
       __ j(not_equal, &non_zero, Label::kNear);
       __ movmskpd(output_reg, input_reg);
       __ test(output_reg, Immediate(1));
-      DeoptimizeIf(not_zero, instr->environment());
+      DeoptimizeIf(not_zero, instr, "minus zero");
       __ bind(&non_zero);
     }
     __ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
     __ cvttsd2si(output_reg, Operand(xmm_scratch));
     // Overflow is signalled with minint.
     __ cmp(output_reg, 0x1);
-    DeoptimizeIf(overflow, instr->environment());
+    DeoptimizeIf(overflow, instr, "overflow");
   } else {
     Label negative_sign, done;
     // Deoptimize on unordered.
     __ xorps(xmm_scratch, xmm_scratch);  // Zero the register.
     __ ucomisd(input_reg, xmm_scratch);
-    DeoptimizeIf(parity_even, instr->environment());
+    DeoptimizeIf(parity_even, instr, "NaN");
     __ j(below, &negative_sign, Label::kNear);
 
     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@@ -3622,7 +3649,7 @@
       __ j(above, &positive_sign, Label::kNear);
       __ movmskpd(output_reg, input_reg);
       __ test(output_reg, Immediate(1));
-      DeoptimizeIf(not_zero, instr->environment());
+      DeoptimizeIf(not_zero, instr, "minus zero");
       __ Move(output_reg, Immediate(0));
       __ jmp(&done, Label::kNear);
       __ bind(&positive_sign);
@@ -3632,7 +3659,7 @@
     __ cvttsd2si(output_reg, Operand(input_reg));
     // Overflow is signalled with minint.
     __ cmp(output_reg, 0x1);
-    DeoptimizeIf(overflow, instr->environment());
+    DeoptimizeIf(overflow, instr, "overflow");
     __ jmp(&done, Label::kNear);
 
     // Non-zero negative reaches here.
@@ -3643,7 +3670,7 @@
     __ ucomisd(input_reg, xmm_scratch);
     __ j(equal, &done, Label::kNear);
     __ sub(output_reg, Immediate(1));
-    DeoptimizeIf(overflow, instr->environment());
+    DeoptimizeIf(overflow, instr, "overflow");
 
     __ bind(&done);
   }
@@ -3671,8 +3698,7 @@
   __ cvttsd2si(output_reg, Operand(xmm_scratch));
   // Overflow is signalled with minint.
   __ cmp(output_reg, 0x1);
-  __ RecordComment("D2I conversion overflow");
-  DeoptimizeIf(overflow, instr->environment());
+  DeoptimizeIf(overflow, instr, "overflow");
   __ jmp(&done, dist);
 
   __ bind(&below_one_half);
@@ -3687,8 +3713,7 @@
   __ cvttsd2si(output_reg, Operand(input_temp));
   // Catch minint due to overflow, and to prevent overflow when compensating.
   __ cmp(output_reg, 0x1);
-  __ RecordComment("D2I conversion overflow");
-  DeoptimizeIf(overflow, instr->environment());
+  DeoptimizeIf(overflow, instr, "overflow");
 
   __ Cvtsi2sd(xmm_scratch, output_reg);
   __ ucomisd(xmm_scratch, input_temp);
@@ -3704,14 +3729,21 @@
     // If the sign is positive, we return +0.
     __ movmskpd(output_reg, input_reg);
     __ test(output_reg, Immediate(1));
-    __ RecordComment("Minus zero");
-    DeoptimizeIf(not_zero, instr->environment());
+    DeoptimizeIf(not_zero, instr, "minus zero");
   }
   __ Move(output_reg, Immediate(0));
   __ bind(&done);
 }
 
 
+void LCodeGen::DoMathFround(LMathFround* instr) {
+  XMMRegister input_reg = ToDoubleRegister(instr->value());
+  XMMRegister output_reg = ToDoubleRegister(instr->result());
+  __ cvtsd2ss(output_reg, input_reg);
+  __ cvtss2sd(output_reg, output_reg);
+}
+
+
 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
   Operand input = ToOperand(instr->value());
   XMMRegister output = ToDoubleRegister(instr->result());
@@ -3723,7 +3755,7 @@
   XMMRegister xmm_scratch = double_scratch0();
   XMMRegister input_reg = ToDoubleRegister(instr->value());
   Register scratch = ToRegister(instr->temp());
-  ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
+  DCHECK(ToDoubleRegister(instr->result()).is(input_reg));
 
   // Note that according to ECMA-262 15.8.2.13:
   // Math.pow(-Infinity, 0.5) == Infinity
@@ -3757,21 +3789,23 @@
   Representation exponent_type = instr->hydrogen()->right()->representation();
   // Having marked this as a call, we can use any registers.
   // Just make sure that the input/output registers are the expected ones.
-  ASSERT(!instr->right()->IsDoubleRegister() ||
+  Register tagged_exponent = MathPowTaggedDescriptor::exponent();
+  DCHECK(!instr->right()->IsDoubleRegister() ||
          ToDoubleRegister(instr->right()).is(xmm1));
-  ASSERT(!instr->right()->IsRegister() ||
-         ToRegister(instr->right()).is(eax));
-  ASSERT(ToDoubleRegister(instr->left()).is(xmm2));
-  ASSERT(ToDoubleRegister(instr->result()).is(xmm3));
+  DCHECK(!instr->right()->IsRegister() ||
+         ToRegister(instr->right()).is(tagged_exponent));
+  DCHECK(ToDoubleRegister(instr->left()).is(xmm2));
+  DCHECK(ToDoubleRegister(instr->result()).is(xmm3));
 
   if (exponent_type.IsSmi()) {
     MathPowStub stub(isolate(), MathPowStub::TAGGED);
     __ CallStub(&stub);
   } else if (exponent_type.IsTagged()) {
     Label no_deopt;
-    __ JumpIfSmi(eax, &no_deopt);
-    __ CmpObjectType(eax, HEAP_NUMBER_TYPE, ecx);
-    DeoptimizeIf(not_equal, instr->environment());
+    __ JumpIfSmi(tagged_exponent, &no_deopt);
+    DCHECK(!ecx.is(tagged_exponent));
+    __ CmpObjectType(tagged_exponent, HEAP_NUMBER_TYPE, ecx);
+    DeoptimizeIf(not_equal, instr, "not a heap number");
     __ bind(&no_deopt);
     MathPowStub stub(isolate(), MathPowStub::TAGGED);
     __ CallStub(&stub);
@@ -3779,7 +3813,7 @@
     MathPowStub stub(isolate(), MathPowStub::INTEGER);
     __ CallStub(&stub);
   } else {
-    ASSERT(exponent_type.IsDouble());
+    DCHECK(exponent_type.IsDouble());
     MathPowStub stub(isolate(), MathPowStub::DOUBLE);
     __ CallStub(&stub);
   }
@@ -3787,7 +3821,7 @@
 
 
 void LCodeGen::DoMathLog(LMathLog* instr) {
-  ASSERT(instr->value()->Equals(instr->result()));
+  DCHECK(instr->value()->Equals(instr->result()));
   XMMRegister input_reg = ToDoubleRegister(instr->value());
   XMMRegister xmm_scratch = double_scratch0();
   Label positive, done, zero;
@@ -3843,9 +3877,9 @@
 
 
 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
-  ASSERT(ToRegister(instr->context()).is(esi));
-  ASSERT(ToRegister(instr->function()).is(edi));
-  ASSERT(instr->HasPointerMap());
+  DCHECK(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->function()).is(edi));
+  DCHECK(instr->HasPointerMap());
 
   Handle<JSFunction> known_function = instr->hydrogen()->known_function();
   if (known_function.is_null()) {
@@ -3865,9 +3899,9 @@
 
 
 void LCodeGen::DoCallFunction(LCallFunction* instr) {
-  ASSERT(ToRegister(instr->context()).is(esi));
-  ASSERT(ToRegister(instr->function()).is(edi));
-  ASSERT(ToRegister(instr->result()).is(eax));
+  DCHECK(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->function()).is(edi));
+  DCHECK(ToRegister(instr->result()).is(eax));
 
   int arity = instr->arity();
   CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
@@ -3876,9 +3910,9 @@
 
 
 void LCodeGen::DoCallNew(LCallNew* instr) {
-  ASSERT(ToRegister(instr->context()).is(esi));
-  ASSERT(ToRegister(instr->constructor()).is(edi));
-  ASSERT(ToRegister(instr->result()).is(eax));
+  DCHECK(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->constructor()).is(edi));
+  DCHECK(ToRegister(instr->result()).is(eax));
 
   // No cell in ebx for construct type feedback in optimized code
   __ mov(ebx, isolate()->factory()->undefined_value());
@@ -3889,9 +3923,9 @@
 
 
 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
-  ASSERT(ToRegister(instr->context()).is(esi));
-  ASSERT(ToRegister(instr->constructor()).is(edi));
-  ASSERT(ToRegister(instr->result()).is(eax));
+  DCHECK(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->constructor()).is(edi));
+  DCHECK(ToRegister(instr->result()).is(eax));
 
   __ Move(eax, Immediate(instr->arity()));
   __ mov(ebx, isolate()->factory()->undefined_value());
@@ -3934,7 +3968,7 @@
 
 
 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
-  ASSERT(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->context()).is(esi));
   CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles());
 }
 
@@ -3967,7 +4001,7 @@
   int offset = access.offset();
 
   if (access.IsExternalMemory()) {
-    ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+    DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
     MemOperand operand = instr->object()->IsConstantOperand()
         ? MemOperand::StaticVariable(
             ToExternalReference(LConstantOperand::cast(instr->object())))
@@ -3985,13 +4019,13 @@
   Register object = ToRegister(instr->object());
   __ AssertNotSmi(object);
 
-  ASSERT(!representation.IsSmi() ||
+  DCHECK(!representation.IsSmi() ||
          !instr->value()->IsConstantOperand() ||
          IsSmi(LConstantOperand::cast(instr->value())));
   if (representation.IsDouble()) {
-    ASSERT(access.IsInobject());
-    ASSERT(!instr->hydrogen()->has_transition());
-    ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+    DCHECK(access.IsInobject());
+    DCHECK(!instr->hydrogen()->has_transition());
+    DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
     XMMRegister value = ToDoubleRegister(instr->value());
     __ movsd(FieldOperand(object, offset), value);
     return;
@@ -4024,11 +4058,11 @@
       __ Store(value, operand, representation);
     } else if (representation.IsInteger32()) {
       Immediate immediate = ToImmediate(operand_value, representation);
-      ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+      DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
       __ mov(operand, immediate);
     } else {
       Handle<Object> handle_value = ToHandle(operand_value);
-      ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+      DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
       __ mov(operand, handle_value);
     }
   } else {
@@ -4053,11 +4087,11 @@
 
 
 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
-  ASSERT(ToRegister(instr->context()).is(esi));
-  ASSERT(ToRegister(instr->object()).is(edx));
-  ASSERT(ToRegister(instr->value()).is(eax));
+  DCHECK(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
 
-  __ mov(ecx, instr->name());
+  __ mov(StoreDescriptor::NameRegister(), instr->name());
   Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
@@ -4083,7 +4117,7 @@
     __ int3();
     __ bind(&done);
   } else {
-    DeoptimizeIf(cc, instr->environment());
+    DeoptimizeIf(cc, instr, "out of bounds");
   }
 }
 
@@ -4196,16 +4230,16 @@
       Immediate immediate = ToImmediate(operand_value, Representation::Smi());
       __ mov(operand, immediate);
     } else {
-      ASSERT(!IsInteger32(operand_value));
+      DCHECK(!IsInteger32(operand_value));
       Handle<Object> handle_value = ToHandle(operand_value);
       __ mov(operand, handle_value);
     }
   }
 
   if (instr->hydrogen()->NeedsWriteBarrier()) {
-    ASSERT(instr->value()->IsRegister());
+    DCHECK(instr->value()->IsRegister());
     Register value = ToRegister(instr->value());
-    ASSERT(!instr->key()->IsConstantOperand());
+    DCHECK(!instr->key()->IsConstantOperand());
     SmiCheck check_needed =
         instr->hydrogen()->value()->type().IsHeapObject()
           ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
@@ -4235,14 +4269,13 @@
 
 
 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
-  ASSERT(ToRegister(instr->context()).is(esi));
-  ASSERT(ToRegister(instr->object()).is(edx));
-  ASSERT(ToRegister(instr->key()).is(ecx));
-  ASSERT(ToRegister(instr->value()).is(eax));
+  DCHECK(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
+  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
 
-  Handle<Code> ic = instr->strict_mode() == STRICT
-      ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
-      : isolate()->builtins()->KeyedStoreIC_Initialize();
+  Handle<Code> ic =
+      CodeFactory::KeyedStoreIC(isolate(), instr->strict_mode()).code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -4252,7 +4285,7 @@
   Register temp = ToRegister(instr->temp());
   Label no_memento_found;
   __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
-  DeoptimizeIf(equal, instr->environment());
+  DeoptimizeIf(equal, instr, "memento found");
   __ bind(&no_memento_found);
 }
 
@@ -4277,13 +4310,13 @@
     __ mov(FieldOperand(object_reg, HeapObject::kMapOffset),
            Immediate(to_map));
     // Write barrier.
-    ASSERT_NE(instr->temp(), NULL);
+    DCHECK_NE(instr->temp(), NULL);
     __ RecordWriteForMap(object_reg, to_map, new_map_reg,
                          ToRegister(instr->temp()),
                          kDontSaveFPRegs);
   } else {
-    ASSERT(ToRegister(instr->context()).is(esi));
-    ASSERT(object_reg.is(eax));
+    DCHECK(ToRegister(instr->context()).is(esi));
+    DCHECK(object_reg.is(eax));
     PushSafepointRegistersScope scope(this);
     __ mov(ebx, to_map);
     bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
@@ -4297,15 +4330,15 @@
 
 
 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
-  class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode {
+  class DeferredStringCharCodeAt FINAL : public LDeferredCode {
    public:
     DeferredStringCharCodeAt(LCodeGen* codegen,
                              LStringCharCodeAt* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() V8_OVERRIDE {
+    virtual void Generate() OVERRIDE {
       codegen()->DoDeferredStringCharCodeAt(instr_);
     }
-    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
    private:
     LStringCharCodeAt* instr_;
   };
@@ -4346,7 +4379,7 @@
     __ SmiTag(index);
     __ push(index);
   }
-  CallRuntimeFromDeferred(Runtime::kHiddenStringCharCodeAt, 2,
+  CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2,
                           instr, instr->context());
   __ AssertSmi(eax);
   __ SmiUntag(eax);
@@ -4355,15 +4388,15 @@
 
 
 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
-  class DeferredStringCharFromCode V8_FINAL : public LDeferredCode {
+  class DeferredStringCharFromCode FINAL : public LDeferredCode {
    public:
     DeferredStringCharFromCode(LCodeGen* codegen,
                                LStringCharFromCode* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() V8_OVERRIDE {
+    virtual void Generate() OVERRIDE {
       codegen()->DoDeferredStringCharFromCode(instr_);
     }
-    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
    private:
     LStringCharFromCode* instr_;
   };
@@ -4371,10 +4404,10 @@
   DeferredStringCharFromCode* deferred =
       new(zone()) DeferredStringCharFromCode(this, instr);
 
-  ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
+  DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
   Register char_code = ToRegister(instr->char_code());
   Register result = ToRegister(instr->result());
-  ASSERT(!char_code.is(result));
+  DCHECK(!char_code.is(result));
 
   __ cmp(char_code, String::kMaxOneByteCharCode);
   __ j(above, deferred->entry());
@@ -4406,9 +4439,9 @@
 
 
 void LCodeGen::DoStringAdd(LStringAdd* instr) {
-  ASSERT(ToRegister(instr->context()).is(esi));
-  ASSERT(ToRegister(instr->left()).is(edx));
-  ASSERT(ToRegister(instr->right()).is(eax));
+  DCHECK(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->left()).is(edx));
+  DCHECK(ToRegister(instr->right()).is(eax));
   StringAddStub stub(isolate(),
                      instr->hydrogen()->flags(),
                      instr->hydrogen()->pretenure_flag());
@@ -4419,8 +4452,8 @@
 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
   LOperand* input = instr->value();
   LOperand* output = instr->result();
-  ASSERT(input->IsRegister() || input->IsStackSlot());
-  ASSERT(output->IsDoubleRegister());
+  DCHECK(input->IsRegister() || input->IsStackSlot());
+  DCHECK(output->IsDoubleRegister());
   __ Cvtsi2sd(ToDoubleRegister(output), ToOperand(input));
 }
 
@@ -4433,22 +4466,22 @@
 
 
 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
-  class DeferredNumberTagI V8_FINAL : public LDeferredCode {
+  class DeferredNumberTagI FINAL : public LDeferredCode {
    public:
     DeferredNumberTagI(LCodeGen* codegen,
                        LNumberTagI* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() V8_OVERRIDE {
+    virtual void Generate() OVERRIDE {
       codegen()->DoDeferredNumberTagIU(
           instr_, instr_->value(), instr_->temp(), SIGNED_INT32);
     }
-    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
    private:
     LNumberTagI* instr_;
   };
 
   LOperand* input = instr->value();
-  ASSERT(input->IsRegister() && input->Equals(instr->result()));
+  DCHECK(input->IsRegister() && input->Equals(instr->result()));
   Register reg = ToRegister(input);
 
   DeferredNumberTagI* deferred =
@@ -4460,21 +4493,21 @@
 
 
 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
-  class DeferredNumberTagU V8_FINAL : public LDeferredCode {
+  class DeferredNumberTagU FINAL : public LDeferredCode {
    public:
     DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() V8_OVERRIDE {
+    virtual void Generate() OVERRIDE {
       codegen()->DoDeferredNumberTagIU(
           instr_, instr_->value(), instr_->temp(), UNSIGNED_INT32);
     }
-    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
    private:
     LNumberTagU* instr_;
   };
 
   LOperand* input = instr->value();
-  ASSERT(input->IsRegister() && input->Equals(instr->result()));
+  DCHECK(input->IsRegister() && input->Equals(instr->result()));
   Register reg = ToRegister(input);
 
   DeferredNumberTagU* deferred =
@@ -4524,11 +4557,11 @@
 
     // NumberTagI and NumberTagD use the context from the frame, rather than
     // the environment's HContext or HInlinedContext value.
-    // They only call Runtime::kHiddenAllocateHeapNumber.
+    // They only call Runtime::kAllocateHeapNumber.
     // The corresponding HChange instructions are added in a phase that does
     // not have easy access to the local context.
     __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-    __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
+    __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
     RecordSafepointWithRegisters(
         instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
     __ StoreToSafepointRegisterSlot(reg, eax);
@@ -4542,14 +4575,14 @@
 
 
 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
-  class DeferredNumberTagD V8_FINAL : public LDeferredCode {
+  class DeferredNumberTagD FINAL : public LDeferredCode {
    public:
     DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() V8_OVERRIDE {
+    virtual void Generate() OVERRIDE {
       codegen()->DoDeferredNumberTagD(instr_);
     }
-    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
    private:
     LNumberTagD* instr_;
   };
@@ -4580,11 +4613,11 @@
   PushSafepointRegistersScope scope(this);
   // NumberTagI and NumberTagD use the context from the frame, rather than
   // the environment's HContext or HInlinedContext value.
-  // They only call Runtime::kHiddenAllocateHeapNumber.
+  // They only call Runtime::kAllocateHeapNumber.
   // The corresponding HChange instructions are added in a phase that does
   // not have easy access to the local context.
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-  __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
+  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
   RecordSafepointWithRegisters(
       instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
   __ StoreToSafepointRegisterSlot(reg, eax);
@@ -4597,12 +4630,12 @@
   if (hchange->CheckFlag(HValue::kCanOverflow) &&
       hchange->value()->CheckFlag(HValue::kUint32)) {
     __ test(input, Immediate(0xc0000000));
-    DeoptimizeIf(not_zero, instr->environment());
+    DeoptimizeIf(not_zero, instr, "overflow");
   }
   __ SmiTag(input);
   if (hchange->CheckFlag(HValue::kCanOverflow) &&
       !hchange->value()->CheckFlag(HValue::kUint32)) {
-    DeoptimizeIf(overflow, instr->environment());
+    DeoptimizeIf(overflow, instr, "overflow");
   }
 }
 
@@ -4610,10 +4643,10 @@
 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
   LOperand* input = instr->value();
   Register result = ToRegister(input);
-  ASSERT(input->IsRegister() && input->Equals(instr->result()));
+  DCHECK(input->IsRegister() && input->Equals(instr->result()));
   if (instr->needs_check()) {
     __ test(result, Immediate(kSmiTagMask));
-    DeoptimizeIf(not_zero, instr->environment());
+    DeoptimizeIf(not_zero, instr, "not a Smi");
   } else {
     __ AssertSmi(result);
   }
@@ -4621,13 +4654,13 @@
 }
 
 
-void LCodeGen::EmitNumberUntagD(Register input_reg,
-                                Register temp_reg,
-                                XMMRegister result_reg,
-                                bool can_convert_undefined_to_nan,
-                                bool deoptimize_on_minus_zero,
-                                LEnvironment* env,
+void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
+                                Register temp_reg, XMMRegister result_reg,
                                 NumberUntagDMode mode) {
+  bool can_convert_undefined_to_nan =
+      instr->hydrogen()->can_convert_undefined_to_nan();
+  bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
+
   Label convert, load_smi, done;
 
   if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
@@ -4640,7 +4673,7 @@
     if (can_convert_undefined_to_nan) {
       __ j(not_equal, &convert, Label::kNear);
     } else {
-      DeoptimizeIf(not_equal, env);
+      DeoptimizeIf(not_equal, instr, "not a heap number");
     }
 
     // Heap number to XMM conversion.
@@ -4653,7 +4686,7 @@
       __ j(not_zero, &done, Label::kNear);
       __ movmskpd(temp_reg, result_reg);
       __ test_b(temp_reg, 1);
-      DeoptimizeIf(not_zero, env);
+      DeoptimizeIf(not_zero, instr, "minus zero");
     }
     __ jmp(&done, Label::kNear);
 
@@ -4662,7 +4695,7 @@
 
       // Convert undefined (and hole) to NaN.
       __ cmp(input_reg, factory()->undefined_value());
-      DeoptimizeIf(not_equal, env);
+      DeoptimizeIf(not_equal, instr, "not a heap number/undefined");
 
       ExternalReference nan =
           ExternalReference::address_of_canonical_non_hole_nan();
@@ -4670,7 +4703,7 @@
       __ jmp(&done, Label::kNear);
     }
   } else {
-    ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
+    DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
   }
 
   __ bind(&load_smi);
@@ -4716,40 +4749,48 @@
 
     __ bind(&check_false);
     __ cmp(input_reg, factory()->false_value());
-    __ RecordComment("Deferred TaggedToI: cannot truncate");
-    DeoptimizeIf(not_equal, instr->environment());
+    DeoptimizeIf(not_equal, instr, "not a heap number/undefined/true/false");
     __ Move(input_reg, Immediate(0));
   } else {
-    Label bailout;
-    XMMRegister scratch = (instr->temp() != NULL)
-        ? ToDoubleRegister(instr->temp())
-        : no_xmm_reg;
-    __ TaggedToI(input_reg, input_reg, scratch,
-                 instr->hydrogen()->GetMinusZeroMode(), &bailout);
-    __ jmp(done);
-    __ bind(&bailout);
-    DeoptimizeIf(no_condition, instr->environment());
+    XMMRegister scratch = ToDoubleRegister(instr->temp());
+    DCHECK(!scratch.is(xmm0));
+    __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
+           isolate()->factory()->heap_number_map());
+    DeoptimizeIf(not_equal, instr, "not a heap number");
+    __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
+    __ cvttsd2si(input_reg, Operand(xmm0));
+    __ Cvtsi2sd(scratch, Operand(input_reg));
+    __ ucomisd(xmm0, scratch);
+    DeoptimizeIf(not_equal, instr, "lost precision");
+    DeoptimizeIf(parity_even, instr, "NaN");
+    if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) {
+      __ test(input_reg, Operand(input_reg));
+      __ j(not_zero, done);
+      __ movmskpd(input_reg, xmm0);
+      __ and_(input_reg, 1);
+      DeoptimizeIf(not_zero, instr, "minus zero");
+    }
   }
 }
 
 
 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
-  class DeferredTaggedToI V8_FINAL : public LDeferredCode {
+  class DeferredTaggedToI FINAL : public LDeferredCode {
    public:
     DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() V8_OVERRIDE {
+    virtual void Generate() OVERRIDE {
       codegen()->DoDeferredTaggedToI(instr_, done());
     }
-    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
    private:
     LTaggedToI* instr_;
   };
 
   LOperand* input = instr->value();
-  ASSERT(input->IsRegister());
+  DCHECK(input->IsRegister());
   Register input_reg = ToRegister(input);
-  ASSERT(input_reg.is(ToRegister(instr->result())));
+  DCHECK(input_reg.is(ToRegister(instr->result())));
 
   if (instr->hydrogen()->value()->representation().IsSmi()) {
     __ SmiUntag(input_reg);
@@ -4770,15 +4811,13 @@
 
 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
   LOperand* input = instr->value();
-  ASSERT(input->IsRegister());
+  DCHECK(input->IsRegister());
   LOperand* temp = instr->temp();
-  ASSERT(temp->IsRegister());
+  DCHECK(temp->IsRegister());
   LOperand* result = instr->result();
-  ASSERT(result->IsDoubleRegister());
+  DCHECK(result->IsDoubleRegister());
 
   Register input_reg = ToRegister(input);
-  bool deoptimize_on_minus_zero =
-      instr->hydrogen()->deoptimize_on_minus_zero();
   Register temp_reg = ToRegister(temp);
 
   HValue* value = instr->hydrogen()->value();
@@ -4786,35 +4825,35 @@
       ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
 
   XMMRegister result_reg = ToDoubleRegister(result);
-  EmitNumberUntagD(input_reg,
-                   temp_reg,
-                   result_reg,
-                   instr->hydrogen()->can_convert_undefined_to_nan(),
-                   deoptimize_on_minus_zero,
-                   instr->environment(),
-                   mode);
+  EmitNumberUntagD(instr, input_reg, temp_reg, result_reg, mode);
 }
 
 
 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
   LOperand* input = instr->value();
-  ASSERT(input->IsDoubleRegister());
+  DCHECK(input->IsDoubleRegister());
   LOperand* result = instr->result();
-  ASSERT(result->IsRegister());
+  DCHECK(result->IsRegister());
   Register result_reg = ToRegister(result);
 
   if (instr->truncating()) {
     XMMRegister input_reg = ToDoubleRegister(input);
     __ TruncateDoubleToI(result_reg, input_reg);
   } else {
-    Label bailout, done;
+    Label lost_precision, is_nan, minus_zero, done;
     XMMRegister input_reg = ToDoubleRegister(input);
     XMMRegister xmm_scratch = double_scratch0();
-     __ DoubleToI(result_reg, input_reg, xmm_scratch,
-         instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
-    __ jmp(&done, Label::kNear);
-    __ bind(&bailout);
-    DeoptimizeIf(no_condition, instr->environment());
+    Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
+    __ DoubleToI(result_reg, input_reg, xmm_scratch,
+                 instr->hydrogen()->GetMinusZeroMode(), &lost_precision,
+                 &is_nan, &minus_zero, dist);
+    __ jmp(&done, dist);
+    __ bind(&lost_precision);
+    DeoptimizeIf(no_condition, instr, "lost precision");
+    __ bind(&is_nan);
+    DeoptimizeIf(no_condition, instr, "NaN");
+    __ bind(&minus_zero);
+    DeoptimizeIf(no_condition, instr, "minus zero");
     __ bind(&done);
   }
 }
@@ -4822,30 +4861,35 @@
 
 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
   LOperand* input = instr->value();
-  ASSERT(input->IsDoubleRegister());
+  DCHECK(input->IsDoubleRegister());
   LOperand* result = instr->result();
-  ASSERT(result->IsRegister());
+  DCHECK(result->IsRegister());
   Register result_reg = ToRegister(result);
 
-  Label bailout, done;
+  Label lost_precision, is_nan, minus_zero, done;
   XMMRegister input_reg = ToDoubleRegister(input);
   XMMRegister xmm_scratch = double_scratch0();
+  Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
   __ DoubleToI(result_reg, input_reg, xmm_scratch,
-      instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
-  __ jmp(&done, Label::kNear);
-  __ bind(&bailout);
-  DeoptimizeIf(no_condition, instr->environment());
+               instr->hydrogen()->GetMinusZeroMode(), &lost_precision, &is_nan,
+               &minus_zero, dist);
+  __ jmp(&done, dist);
+  __ bind(&lost_precision);
+  DeoptimizeIf(no_condition, instr, "lost precision");
+  __ bind(&is_nan);
+  DeoptimizeIf(no_condition, instr, "NaN");
+  __ bind(&minus_zero);
+  DeoptimizeIf(no_condition, instr, "minus zero");
   __ bind(&done);
-
   __ SmiTag(result_reg);
-  DeoptimizeIf(overflow, instr->environment());
+  DeoptimizeIf(overflow, instr, "overflow");
 }
 
 
 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
   LOperand* input = instr->value();
   __ test(ToOperand(input), Immediate(kSmiTagMask));
-  DeoptimizeIf(not_zero, instr->environment());
+  DeoptimizeIf(not_zero, instr, "not a Smi");
 }
 
 
@@ -4853,7 +4897,7 @@
   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
     LOperand* input = instr->value();
     __ test(ToOperand(input), Immediate(kSmiTagMask));
-    DeoptimizeIf(zero, instr->environment());
+    DeoptimizeIf(zero, instr, "Smi");
   }
 }
 
@@ -4874,14 +4918,14 @@
 
     // If there is only one type in the interval check for equality.
     if (first == last) {
-      DeoptimizeIf(not_equal, instr->environment());
+      DeoptimizeIf(not_equal, instr, "wrong instance type");
     } else {
-      DeoptimizeIf(below, instr->environment());
+      DeoptimizeIf(below, instr, "wrong instance type");
       // Omit check for the last type.
       if (last != LAST_TYPE) {
         __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
                 static_cast<int8_t>(last));
-        DeoptimizeIf(above, instr->environment());
+        DeoptimizeIf(above, instr, "wrong instance type");
       }
     }
   } else {
@@ -4889,15 +4933,15 @@
     uint8_t tag;
     instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
 
-    if (IsPowerOf2(mask)) {
-      ASSERT(tag == 0 || IsPowerOf2(tag));
+    if (base::bits::IsPowerOfTwo32(mask)) {
+      DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
       __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), mask);
-      DeoptimizeIf(tag == 0 ? not_zero : zero, instr->environment());
+      DeoptimizeIf(tag == 0 ? not_zero : zero, instr, "wrong instance type");
     } else {
       __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
       __ and_(temp, mask);
       __ cmp(temp, tag);
-      DeoptimizeIf(not_equal, instr->environment());
+      DeoptimizeIf(not_equal, instr, "wrong instance type");
     }
   }
 }
@@ -4913,7 +4957,7 @@
     Operand operand = ToOperand(instr->value());
     __ cmp(operand, object);
   }
-  DeoptimizeIf(not_equal, instr->environment());
+  DeoptimizeIf(not_equal, instr, "value mismatch");
 }
 
 
@@ -4928,22 +4972,22 @@
 
     __ test(eax, Immediate(kSmiTagMask));
   }
-  DeoptimizeIf(zero, instr->environment());
+  DeoptimizeIf(zero, instr, "instance migration failed");
 }
 
 
 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
-  class DeferredCheckMaps V8_FINAL : public LDeferredCode {
+  class DeferredCheckMaps FINAL : public LDeferredCode {
    public:
     DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr,  Register object)
         : LDeferredCode(codegen), instr_(instr), object_(object) {
       SetExit(check_maps());
     }
-    virtual void Generate() V8_OVERRIDE {
+    virtual void Generate() OVERRIDE {
       codegen()->DoDeferredInstanceMigration(instr_, object_);
     }
     Label* check_maps() { return &check_maps_; }
-    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
    private:
     LCheckMaps* instr_;
     Label check_maps_;
@@ -4959,7 +5003,7 @@
   }
 
   LOperand* input = instr->value();
-  ASSERT(input->IsRegister());
+  DCHECK(input->IsRegister());
   Register reg = ToRegister(input);
 
   DeferredCheckMaps* deferred = NULL;
@@ -4981,7 +5025,7 @@
   if (instr->hydrogen()->HasMigrationTarget()) {
     __ j(not_equal, deferred->entry());
   } else {
-    DeoptimizeIf(not_equal, instr->environment());
+    DeoptimizeIf(not_equal, instr, "wrong map");
   }
 
   __ bind(&success);
@@ -4997,14 +5041,14 @@
 
 
 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
-  ASSERT(instr->unclamped()->Equals(instr->result()));
+  DCHECK(instr->unclamped()->Equals(instr->result()));
   Register value_reg = ToRegister(instr->result());
   __ ClampUint8(value_reg);
 }
 
 
 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
-  ASSERT(instr->unclamped()->Equals(instr->result()));
+  DCHECK(instr->unclamped()->Equals(instr->result()));
   Register input_reg = ToRegister(instr->unclamped());
   XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm());
   XMMRegister xmm_scratch = double_scratch0();
@@ -5020,7 +5064,7 @@
   // Check for undefined. Undefined is converted to zero for clamping
   // conversions.
   __ cmp(input_reg, factory()->undefined_value());
-  DeoptimizeIf(not_equal, instr->environment());
+  DeoptimizeIf(not_equal, instr, "not a heap number/undefined");
   __ mov(input_reg, 0);
   __ jmp(&done, Label::kNear);
 
@@ -5076,14 +5120,14 @@
 
 
 void LCodeGen::DoAllocate(LAllocate* instr) {
-  class DeferredAllocate V8_FINAL : public LDeferredCode {
+  class DeferredAllocate FINAL : public LDeferredCode {
    public:
     DeferredAllocate(LCodeGen* codegen,  LAllocate* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() V8_OVERRIDE {
+    virtual void Generate() OVERRIDE {
       codegen()->DoDeferredAllocate(instr_);
     }
-    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
    private:
     LAllocate* instr_;
   };
@@ -5099,11 +5143,11 @@
     flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
   }
   if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
-    ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
-    ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+    DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
+    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
     flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
   } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
-    ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
     flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
   }
 
@@ -5151,7 +5195,7 @@
   PushSafepointRegistersScope scope(this);
   if (instr->size()->IsRegister()) {
     Register size = ToRegister(instr->size());
-    ASSERT(!size.is(result));
+    DCHECK(!size.is(result));
     __ SmiTag(ToRegister(instr->size()));
     __ push(size);
   } else {
@@ -5168,11 +5212,11 @@
   int flags = AllocateDoubleAlignFlag::encode(
       instr->hydrogen()->MustAllocateDoubleAligned());
   if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
-    ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
-    ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+    DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
+    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
     flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
   } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
-    ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
     flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
   } else {
     flags = AllocateTargetSpace::update(flags, NEW_SPACE);
@@ -5180,20 +5224,20 @@
   __ push(Immediate(Smi::FromInt(flags)));
 
   CallRuntimeFromDeferred(
-      Runtime::kHiddenAllocateInTargetSpace, 2, instr, instr->context());
+      Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
   __ StoreToSafepointRegisterSlot(result, eax);
 }
 
 
 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
-  ASSERT(ToRegister(instr->value()).is(eax));
+  DCHECK(ToRegister(instr->value()).is(eax));
   __ push(eax);
   CallRuntime(Runtime::kToFastProperties, 1, instr);
 }
 
 
 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
-  ASSERT(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->context()).is(esi));
   Label materialized;
   // Registers will be used as follows:
   // ecx = literals array.
@@ -5213,7 +5257,7 @@
   __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
   __ push(Immediate(instr->hydrogen()->pattern()));
   __ push(Immediate(instr->hydrogen()->flags()));
-  CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4, instr);
+  CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
   __ mov(ebx, eax);
 
   __ bind(&materialized);
@@ -5225,7 +5269,7 @@
   __ bind(&runtime_allocate);
   __ push(ebx);
   __ push(Immediate(Smi::FromInt(size)));
-  CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1, instr);
+  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
   __ pop(ebx);
 
   __ bind(&allocated);
@@ -5245,14 +5289,13 @@
 
 
 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
-  ASSERT(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->context()).is(esi));
   // Use the fast case closure allocation code that allocates in new
   // space for nested functions that don't need literals cloning.
   bool pretenure = instr->hydrogen()->pretenure();
   if (!pretenure && instr->hydrogen()->has_no_literals()) {
-    FastNewClosureStub stub(isolate(),
-                            instr->hydrogen()->strict_mode(),
-                            instr->hydrogen()->is_generator());
+    FastNewClosureStub stub(isolate(), instr->hydrogen()->strict_mode(),
+                            instr->hydrogen()->kind());
     __ mov(ebx, Immediate(instr->hydrogen()->shared_info()));
     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   } else {
@@ -5260,13 +5303,13 @@
     __ push(Immediate(instr->hydrogen()->shared_info()));
     __ push(Immediate(pretenure ? factory()->true_value()
                                 : factory()->false_value()));
-    CallRuntime(Runtime::kHiddenNewClosure, 3, instr);
+    CallRuntime(Runtime::kNewClosure, 3, instr);
   }
 }
 
 
 void LCodeGen::DoTypeof(LTypeof* instr) {
-  ASSERT(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->context()).is(esi));
   LOperand* input = instr->value();
   EmitPushTaggedOperand(input);
   CallRuntime(Runtime::kTypeof, 1, instr);
@@ -5320,11 +5363,6 @@
     __ cmp(input, factory()->false_value());
     final_branch_condition = equal;
 
-  } else if (FLAG_harmony_typeof &&
-             String::Equals(type_name, factory()->null_string())) {
-    __ cmp(input, factory()->null_value());
-    final_branch_condition = equal;
-
   } else if (String::Equals(type_name, factory()->undefined_string())) {
     __ cmp(input, factory()->undefined_value());
     __ j(equal, true_label, true_distance);
@@ -5345,10 +5383,8 @@
 
   } else if (String::Equals(type_name, factory()->object_string())) {
     __ JumpIfSmi(input, false_label, false_distance);
-    if (!FLAG_harmony_typeof) {
-      __ cmp(input, factory()->null_value());
-      __ j(equal, true_label, true_distance);
-    }
+    __ cmp(input, factory()->null_value());
+    __ j(equal, true_label, true_distance);
     __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
     __ j(below, false_label, false_distance);
     __ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
@@ -5407,7 +5443,7 @@
 
 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
   last_lazy_deopt_pc_ = masm()->pc_offset();
-  ASSERT(instr->HasEnvironment());
+  DCHECK(instr->HasEnvironment());
   LEnvironment* env = instr->environment();
   RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
@@ -5423,8 +5459,7 @@
   if (info()->IsStub() && type == Deoptimizer::EAGER) {
     type = Deoptimizer::LAZY;
   }
-  Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
-  DeoptimizeIf(no_condition, instr->environment(), type);
+  DeoptimizeIf(no_condition, instr, instr->hydrogen()->reason(), type);
 }
 
 
@@ -5441,29 +5476,29 @@
 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
   PushSafepointRegistersScope scope(this);
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-  __ CallRuntimeSaveDoubles(Runtime::kHiddenStackGuard);
+  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
   RecordSafepointWithLazyDeopt(
       instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
-  ASSERT(instr->HasEnvironment());
+  DCHECK(instr->HasEnvironment());
   LEnvironment* env = instr->environment();
   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
 }
 
 
 void LCodeGen::DoStackCheck(LStackCheck* instr) {
-  class DeferredStackCheck V8_FINAL : public LDeferredCode {
+  class DeferredStackCheck FINAL : public LDeferredCode {
    public:
     DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() V8_OVERRIDE {
+    virtual void Generate() OVERRIDE {
       codegen()->DoDeferredStackCheck(instr_);
     }
-    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
    private:
     LStackCheck* instr_;
   };
 
-  ASSERT(instr->HasEnvironment());
+  DCHECK(instr->HasEnvironment());
   LEnvironment* env = instr->environment();
   // There is no LLazyBailout instruction for stack-checks. We have to
   // prepare for lazy deoptimization explicitly here.
@@ -5475,14 +5510,14 @@
     __ cmp(esp, Operand::StaticVariable(stack_limit));
     __ j(above_equal, &done, Label::kNear);
 
-    ASSERT(instr->context()->IsRegister());
-    ASSERT(ToRegister(instr->context()).is(esi));
+    DCHECK(instr->context()->IsRegister());
+    DCHECK(ToRegister(instr->context()).is(esi));
     CallCode(isolate()->builtins()->StackCheck(),
              RelocInfo::CODE_TARGET,
              instr);
     __ bind(&done);
   } else {
-    ASSERT(instr->hydrogen()->is_backwards_branch());
+    DCHECK(instr->hydrogen()->is_backwards_branch());
     // Perform stack overflow check if this goto needs it before jumping.
     DeferredStackCheck* deferred_stack_check =
         new(zone()) DeferredStackCheck(this, instr);
@@ -5509,7 +5544,7 @@
 
   // If the environment were already registered, we would have no way of
   // backpatching it with the spill slot operands.
-  ASSERT(!environment->HasBeenRegistered());
+  DCHECK(!environment->HasBeenRegistered());
   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
 
   GenerateOsrPrologue();
@@ -5517,19 +5552,19 @@
 
 
 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
-  ASSERT(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->context()).is(esi));
   __ cmp(eax, isolate()->factory()->undefined_value());
-  DeoptimizeIf(equal, instr->environment());
+  DeoptimizeIf(equal, instr, "undefined");
 
   __ cmp(eax, isolate()->factory()->null_value());
-  DeoptimizeIf(equal, instr->environment());
+  DeoptimizeIf(equal, instr, "null");
 
   __ test(eax, Immediate(kSmiTagMask));
-  DeoptimizeIf(zero, instr->environment());
+  DeoptimizeIf(zero, instr, "Smi");
 
   STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
   __ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx);
-  DeoptimizeIf(below_equal, instr->environment());
+  DeoptimizeIf(below_equal, instr, "wrong instance type");
 
   Label use_cache, call_runtime;
   __ CheckEnumCache(&call_runtime);
@@ -5544,7 +5579,7 @@
 
   __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
          isolate()->factory()->meta_map());
-  DeoptimizeIf(not_equal, instr->environment());
+  DeoptimizeIf(not_equal, instr, "wrong map");
   __ bind(&use_cache);
 }
 
@@ -5567,7 +5602,7 @@
          FieldOperand(result, FixedArray::SizeFor(instr->idx())));
   __ bind(&done);
   __ test(result, result);
-  DeoptimizeIf(equal, instr->environment());
+  DeoptimizeIf(equal, instr, "no cache");
 }
 
 
@@ -5575,7 +5610,7 @@
   Register object = ToRegister(instr->value());
   __ cmp(ToRegister(instr->map()),
          FieldOperand(object, HeapObject::kMapOffset));
-  DeoptimizeIf(not_equal, instr->environment());
+  DeoptimizeIf(not_equal, instr, "wrong map");
 }
 
 
@@ -5594,7 +5629,7 @@
 
 
 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
-  class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode {
+  class DeferredLoadMutableDouble FINAL : public LDeferredCode {
    public:
     DeferredLoadMutableDouble(LCodeGen* codegen,
                               LLoadFieldByIndex* instr,
@@ -5605,10 +5640,10 @@
           object_(object),
           index_(index) {
     }
-    virtual void Generate() V8_OVERRIDE {
+    virtual void Generate() OVERRIDE {
       codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_);
     }
-    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
    private:
     LLoadFieldByIndex* instr_;
     Register object_;
@@ -5659,7 +5694,7 @@
   Handle<ScopeInfo> scope_info = instr->scope_info();
   __ Push(scope_info);
   __ push(ToRegister(instr->function()));
-  CallRuntime(Runtime::kHiddenPushBlockContext, 2, instr);
+  CallRuntime(Runtime::kPushBlockContext, 2, instr);
   RecordSafepoint(Safepoint::kNoLazyDeopt);
 }
 
diff --git a/src/ia32/lithium-codegen-ia32.h b/src/ia32/lithium-codegen-ia32.h
index 64a6b3c..0918252 100644
--- a/src/ia32/lithium-codegen-ia32.h
+++ b/src/ia32/lithium-codegen-ia32.h
@@ -7,7 +7,7 @@
 
 #include "src/ia32/lithium-ia32.h"
 
-#include "src/checks.h"
+#include "src/base/logging.h"
 #include "src/deoptimizer.h"
 #include "src/ia32/lithium-gap-resolver-ia32.h"
 #include "src/lithium-codegen.h"
@@ -148,8 +148,8 @@
 
   // Code generation passes.  Returns true if code generation should
   // continue.
-  void GenerateBodyInstructionPre(LInstruction* instr) V8_OVERRIDE;
-  void GenerateBodyInstructionPost(LInstruction* instr) V8_OVERRIDE;
+  void GenerateBodyInstructionPre(LInstruction* instr) OVERRIDE;
+  void GenerateBodyInstructionPost(LInstruction* instr) OVERRIDE;
   bool GeneratePrologue();
   bool GenerateDeferredCode();
   bool GenerateJumpTable();
@@ -209,10 +209,9 @@
 
   void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
                                             Safepoint::DeoptMode mode);
-  void DeoptimizeIf(Condition cc,
-                    LEnvironment* environment,
+  void DeoptimizeIf(Condition cc, LInstruction* instr, const char* detail,
                     Deoptimizer::BailoutType bailout_type);
-  void DeoptimizeIf(Condition cc, LEnvironment* environment);
+  void DeoptimizeIf(Condition cc, LInstruction* instr, const char* detail);
 
   bool DeoptEveryNTimes() {
     return FLAG_deopt_every_n_times != 0 && !info()->IsStub();
@@ -259,7 +258,7 @@
                                     int arguments,
                                     Safepoint::DeoptMode mode);
 
-  void RecordAndWritePosition(int position) V8_OVERRIDE;
+  void RecordAndWritePosition(int position) OVERRIDE;
 
   static Condition TokenToCondition(Token::Value op, bool is_unsigned);
   void EmitGoto(int block);
@@ -269,14 +268,8 @@
   void EmitBranch(InstrType instr, Condition cc);
   template<class InstrType>
   void EmitFalseBranch(InstrType instr, Condition cc);
-  void EmitNumberUntagD(
-      Register input,
-      Register temp,
-      XMMRegister result,
-      bool allow_undefined_as_nan,
-      bool deoptimize_on_minus_zero,
-      LEnvironment* env,
-      NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED);
+  void EmitNumberUntagD(LNumberUntagD* instr, Register input, Register temp,
+                        XMMRegister result, NumberUntagDMode mode);
 
   // Emits optimized code for typeof x == "y".  Modifies input register.
   // Returns the condition on which a final split to
@@ -311,7 +304,7 @@
                     int* offset,
                     AllocationSiteMode mode);
 
-  void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE;
+  void EnsureSpaceForLazyDeopt(int space_needed) OVERRIDE;
   void DoLoadKeyedExternalArray(LLoadKeyed* instr);
   void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
   void DoLoadKeyedFixedArray(LLoadKeyed* instr);
@@ -319,6 +312,9 @@
   void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
   void DoStoreKeyedFixedArray(LStoreKeyed* instr);
 
+  template <class T>
+  void EmitVectorLoadICRegisters(T* instr);
+
   void EmitReturn(LReturn* instr, bool dynamic_frame_alignment);
 
   // Emits code for pushing either a tagged constant, a (non-double)
@@ -356,18 +352,18 @@
 
   Safepoint::Kind expected_safepoint_kind_;
 
-  class PushSafepointRegistersScope V8_FINAL  BASE_EMBEDDED {
+  class PushSafepointRegistersScope FINAL  BASE_EMBEDDED {
    public:
     explicit PushSafepointRegistersScope(LCodeGen* codegen)
         : codegen_(codegen) {
-      ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
+      DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
       codegen_->masm_->PushSafepointRegisters();
       codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
-      ASSERT(codegen_->info()->is_calling());
+      DCHECK(codegen_->info()->is_calling());
     }
 
     ~PushSafepointRegistersScope() {
-      ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
+      DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
       codegen_->masm_->PopSafepointRegisters();
       codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
     }
diff --git a/src/ia32/lithium-gap-resolver-ia32.cc b/src/ia32/lithium-gap-resolver-ia32.cc
index 71a4a0e..682503b 100644
--- a/src/ia32/lithium-gap-resolver-ia32.cc
+++ b/src/ia32/lithium-gap-resolver-ia32.cc
@@ -6,8 +6,8 @@
 
 #if V8_TARGET_ARCH_IA32
 
-#include "src/ia32/lithium-gap-resolver-ia32.h"
 #include "src/ia32/lithium-codegen-ia32.h"
+#include "src/ia32/lithium-gap-resolver-ia32.h"
 
 namespace v8 {
 namespace internal {
@@ -21,7 +21,7 @@
 
 
 void LGapResolver::Resolve(LParallelMove* parallel_move) {
-  ASSERT(HasBeenReset());
+  DCHECK(HasBeenReset());
   // Build up a worklist of moves.
   BuildInitialMoveList(parallel_move);
 
@@ -38,13 +38,13 @@
   // Perform the moves with constant sources.
   for (int i = 0; i < moves_.length(); ++i) {
     if (!moves_[i].IsEliminated()) {
-      ASSERT(moves_[i].source()->IsConstantOperand());
+      DCHECK(moves_[i].source()->IsConstantOperand());
       EmitMove(i);
     }
   }
 
   Finish();
-  ASSERT(HasBeenReset());
+  DCHECK(HasBeenReset());
 }
 
 
@@ -70,12 +70,12 @@
   // which means that a call to PerformMove could change any source operand
   // in the move graph.
 
-  ASSERT(!moves_[index].IsPending());
-  ASSERT(!moves_[index].IsRedundant());
+  DCHECK(!moves_[index].IsPending());
+  DCHECK(!moves_[index].IsRedundant());
 
   // Clear this move's destination to indicate a pending move.  The actual
   // destination is saved on the side.
-  ASSERT(moves_[index].source() != NULL);  // Or else it will look eliminated.
+  DCHECK(moves_[index].source() != NULL);  // Or else it will look eliminated.
   LOperand* destination = moves_[index].destination();
   moves_[index].set_destination(NULL);
 
@@ -116,7 +116,7 @@
   for (int i = 0; i < moves_.length(); ++i) {
     LMoveOperands other_move = moves_[i];
     if (other_move.Blocks(destination)) {
-      ASSERT(other_move.IsPending());
+      DCHECK(other_move.IsPending());
       EmitSwap(index);
       return;
     }
@@ -142,13 +142,13 @@
   LOperand* source = moves_[index].source();
   if (source->IsRegister()) {
     --source_uses_[source->index()];
-    ASSERT(source_uses_[source->index()] >= 0);
+    DCHECK(source_uses_[source->index()] >= 0);
   }
 
   LOperand* destination = moves_[index].destination();
   if (destination->IsRegister()) {
     --destination_uses_[destination->index()];
-    ASSERT(destination_uses_[destination->index()] >= 0);
+    DCHECK(destination_uses_[destination->index()] >= 0);
   }
 
   moves_[index].Eliminate();
@@ -190,12 +190,12 @@
 
 
 void LGapResolver::Verify() {
-#ifdef ENABLE_SLOW_ASSERTS
+#ifdef ENABLE_SLOW_DCHECKS
   // No operand should be the destination for more than one move.
   for (int i = 0; i < moves_.length(); ++i) {
     LOperand* destination = moves_[i].destination();
     for (int j = i + 1; j < moves_.length(); ++j) {
-      SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
+      SLOW_DCHECK(!destination->Equals(moves_[j].destination()));
     }
   }
 #endif
@@ -259,13 +259,13 @@
   // Dispatch on the source and destination operand kinds.  Not all
   // combinations are possible.
   if (source->IsRegister()) {
-    ASSERT(destination->IsRegister() || destination->IsStackSlot());
+    DCHECK(destination->IsRegister() || destination->IsStackSlot());
     Register src = cgen_->ToRegister(source);
     Operand dst = cgen_->ToOperand(destination);
     __ mov(dst, src);
 
   } else if (source->IsStackSlot()) {
-    ASSERT(destination->IsRegister() || destination->IsStackSlot());
+    DCHECK(destination->IsRegister() || destination->IsStackSlot());
     Operand src = cgen_->ToOperand(source);
     if (destination->IsRegister()) {
       Register dst = cgen_->ToRegister(destination);
@@ -292,7 +292,7 @@
       }
     } else if (destination->IsDoubleRegister()) {
       double v = cgen_->ToDouble(constant_source);
-      uint64_t int_val = BitCast<uint64_t, double>(v);
+      uint64_t int_val = bit_cast<uint64_t, double>(v);
       int32_t lower = static_cast<int32_t>(int_val);
       int32_t upper = static_cast<int32_t>(int_val >> kBitsPerInt);
       XMMRegister dst = cgen_->ToDoubleRegister(destination);
@@ -305,7 +305,7 @@
         __ add(esp, Immediate(kDoubleSize));
       }
     } else {
-      ASSERT(destination->IsStackSlot());
+      DCHECK(destination->IsStackSlot());
       Operand dst = cgen_->ToOperand(destination);
       Representation r = cgen_->IsSmi(constant_source)
           ? Representation::Smi() : Representation::Integer32();
@@ -324,12 +324,12 @@
       XMMRegister dst = cgen_->ToDoubleRegister(destination);
       __ movaps(dst, src);
     } else {
-      ASSERT(destination->IsDoubleStackSlot());
+      DCHECK(destination->IsDoubleStackSlot());
       Operand dst = cgen_->ToOperand(destination);
       __ movsd(dst, src);
     }
   } else if (source->IsDoubleStackSlot()) {
-    ASSERT(destination->IsDoubleRegister() ||
+    DCHECK(destination->IsDoubleRegister() ||
            destination->IsDoubleStackSlot());
     Operand src = cgen_->ToOperand(source);
     if (destination->IsDoubleRegister()) {
@@ -414,7 +414,7 @@
   } else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) {
     // XMM register-memory swap.  We rely on having xmm0
     // available as a fixed scratch register.
-    ASSERT(source->IsDoubleStackSlot() || destination->IsDoubleStackSlot());
+    DCHECK(source->IsDoubleStackSlot() || destination->IsDoubleStackSlot());
     XMMRegister reg = cgen_->ToDoubleRegister(source->IsDoubleRegister()
                                               ? source
                                               : destination);
diff --git a/src/ia32/lithium-gap-resolver-ia32.h b/src/ia32/lithium-gap-resolver-ia32.h
index 87549d0..43df245 100644
--- a/src/ia32/lithium-gap-resolver-ia32.h
+++ b/src/ia32/lithium-gap-resolver-ia32.h
@@ -15,7 +15,7 @@
 class LCodeGen;
 class LGapResolver;
 
-class LGapResolver V8_FINAL BASE_EMBEDDED {
+class LGapResolver FINAL BASE_EMBEDDED {
  public:
   explicit LGapResolver(LCodeGen* owner);
 
diff --git a/src/ia32/lithium-ia32.cc b/src/ia32/lithium-ia32.cc
index 44b4ea5..3ed6623 100644
--- a/src/ia32/lithium-ia32.cc
+++ b/src/ia32/lithium-ia32.cc
@@ -6,10 +6,9 @@
 
 #if V8_TARGET_ARCH_IA32
 
-#include "src/lithium-allocator-inl.h"
-#include "src/ia32/lithium-ia32.h"
-#include "src/ia32/lithium-codegen-ia32.h"
 #include "src/hydrogen-osr.h"
+#include "src/ia32/lithium-codegen-ia32.h"
+#include "src/lithium-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -28,17 +27,17 @@
   // outputs because all registers are blocked by the calling convention.
   // Inputs operands must use a fixed register or use-at-start policy or
   // a non-register policy.
-  ASSERT(Output() == NULL ||
+  DCHECK(Output() == NULL ||
          LUnallocated::cast(Output())->HasFixedPolicy() ||
          !LUnallocated::cast(Output())->HasRegisterPolicy());
   for (UseIterator it(this); !it.Done(); it.Advance()) {
     LUnallocated* operand = LUnallocated::cast(it.Current());
-    ASSERT(operand->HasFixedPolicy() ||
+    DCHECK(operand->HasFixedPolicy() ||
            operand->IsUsedAtStart());
   }
   for (TempIterator it(this); !it.Done(); it.Advance()) {
     LUnallocated* operand = LUnallocated::cast(it.Current());
-    ASSERT(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
+    DCHECK(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
   }
 }
 #endif
@@ -358,7 +357,7 @@
   if (kind == DOUBLE_REGISTERS) {
     return LDoubleStackSlot::Create(index, zone());
   } else {
-    ASSERT(kind == GENERAL_REGISTERS);
+    DCHECK(kind == GENERAL_REGISTERS);
     return LStackSlot::Create(index, zone());
   }
 }
@@ -366,8 +365,9 @@
 
 void LStoreNamedField::PrintDataTo(StringStream* stream) {
   object()->PrintTo(stream);
-  hydrogen()->access().PrintTo(stream);
-  stream->Add(" <- ");
+  OStringStream os;
+  os << hydrogen()->access() << " <- ";
+  stream->Add(os.c_str());
   value()->PrintTo(stream);
 }
 
@@ -404,7 +404,7 @@
   }
 
   if (value() == NULL) {
-    ASSERT(hydrogen()->IsConstantHoleStore() &&
+    DCHECK(hydrogen()->IsConstantHoleStore() &&
            hydrogen()->value()->representation().IsDouble());
     stream->Add("<the hole(nan)>");
   } else {
@@ -429,7 +429,7 @@
 
 
 LPlatformChunk* LChunkBuilder::Build() {
-  ASSERT(is_unused());
+  DCHECK(is_unused());
   chunk_ = new(zone()) LPlatformChunk(info(), graph());
   LPhase phase("L_Building chunk", chunk_);
   status_ = BUILDING;
@@ -437,7 +437,7 @@
   // Reserve the first spill slot for the state of dynamic alignment.
   if (info()->IsOptimizing()) {
     int alignment_state_index = chunk_->GetNextSpillIndex(GENERAL_REGISTERS);
-    ASSERT_EQ(alignment_state_index, 0);
+    DCHECK_EQ(alignment_state_index, 0);
     USE(alignment_state_index);
   }
 
@@ -461,12 +461,6 @@
 }
 
 
-void LChunkBuilder::Abort(BailoutReason reason) {
-  info()->set_bailout_reason(reason);
-  status_ = ABORTED;
-}
-
-
 LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
   return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
                                   Register::ToAllocationIndex(reg));
@@ -627,9 +621,8 @@
   HEnvironment* hydrogen_env = current_block_->last_environment();
   int argument_index_accumulator = 0;
   ZoneList<HValue*> objects_to_materialize(0, zone());
-  instr->set_environment(CreateEnvironment(hydrogen_env,
-                                           &argument_index_accumulator,
-                                           &objects_to_materialize));
+  instr->set_environment(CreateEnvironment(
+      hydrogen_env, &argument_index_accumulator, &objects_to_materialize));
   return instr;
 }
 
@@ -663,7 +656,7 @@
 
 
 LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
-  ASSERT(!instr->HasPointerMap());
+  DCHECK(!instr->HasPointerMap());
   instr->set_pointer_map(new(zone()) LPointerMap(zone()));
   return instr;
 }
@@ -684,14 +677,14 @@
 
 LOperand* LChunkBuilder::FixedTemp(Register reg) {
   LUnallocated* operand = ToUnallocated(reg);
-  ASSERT(operand->HasFixedPolicy());
+  DCHECK(operand->HasFixedPolicy());
   return operand;
 }
 
 
 LOperand* LChunkBuilder::FixedTemp(XMMRegister reg) {
   LUnallocated* operand = ToUnallocated(reg);
-  ASSERT(operand->HasFixedPolicy());
+  DCHECK(operand->HasFixedPolicy());
   return operand;
 }
 
@@ -720,8 +713,8 @@
 LInstruction* LChunkBuilder::DoShift(Token::Value op,
                                      HBitwiseBinaryOperation* instr) {
   if (instr->representation().IsSmiOrInteger32()) {
-    ASSERT(instr->left()->representation().Equals(instr->representation()));
-    ASSERT(instr->right()->representation().Equals(instr->representation()));
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
     LOperand* left = UseRegisterAtStart(instr->left());
 
     HValue* right_value = instr->right();
@@ -762,9 +755,9 @@
 
 LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
                                            HArithmeticBinaryOperation* instr) {
-  ASSERT(instr->representation().IsDouble());
-  ASSERT(instr->left()->representation().IsDouble());
-  ASSERT(instr->right()->representation().IsDouble());
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->left()->representation().IsDouble());
+  DCHECK(instr->right()->representation().IsDouble());
   if (op == Token::MOD) {
     LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
     LOperand* right = UseRegisterAtStart(instr->BetterRightOperand());
@@ -783,8 +776,8 @@
                                            HBinaryOperation* instr) {
   HValue* left = instr->left();
   HValue* right = instr->right();
-  ASSERT(left->representation().IsTagged());
-  ASSERT(right->representation().IsTagged());
+  DCHECK(left->representation().IsTagged());
+  DCHECK(right->representation().IsTagged());
   LOperand* context = UseFixed(instr->context(), esi);
   LOperand* left_operand = UseFixed(left, edx);
   LOperand* right_operand = UseFixed(right, eax);
@@ -795,7 +788,7 @@
 
 
 void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
-  ASSERT(is_building());
+  DCHECK(is_building());
   current_block_ = block;
   next_block_ = next_block;
   if (block->IsStartBlock()) {
@@ -804,13 +797,13 @@
   } else if (block->predecessors()->length() == 1) {
     // We have a single predecessor => copy environment and outgoing
     // argument count from the predecessor.
-    ASSERT(block->phis()->length() == 0);
+    DCHECK(block->phis()->length() == 0);
     HBasicBlock* pred = block->predecessors()->at(0);
     HEnvironment* last_environment = pred->last_environment();
-    ASSERT(last_environment != NULL);
+    DCHECK(last_environment != NULL);
     // Only copy the environment, if it is later used again.
     if (pred->end()->SecondSuccessor() == NULL) {
-      ASSERT(pred->end()->FirstSuccessor() == block);
+      DCHECK(pred->end()->FirstSuccessor() == block);
     } else {
       if (pred->end()->FirstSuccessor()->block_id() > block->block_id() ||
           pred->end()->SecondSuccessor()->block_id() > block->block_id()) {
@@ -818,7 +811,7 @@
       }
     }
     block->UpdateEnvironment(last_environment);
-    ASSERT(pred->argument_count() >= 0);
+    DCHECK(pred->argument_count() >= 0);
     argument_count_ = pred->argument_count();
   } else {
     // We are at a state join => process phis.
@@ -870,7 +863,7 @@
     if (current->OperandCount() == 0) {
       instr = DefineAsRegister(new(zone()) LDummy());
     } else {
-      ASSERT(!current->OperandAt(0)->IsControlInstruction());
+      DCHECK(!current->OperandAt(0)->IsControlInstruction());
       instr = DefineAsRegister(new(zone())
           LDummyUse(UseAny(current->OperandAt(0))));
     }
@@ -893,7 +886,7 @@
   }
 
   argument_count_ += current->argument_delta();
-  ASSERT(argument_count_ >= 0);
+  DCHECK(argument_count_ >= 0);
 
   if (instr != NULL) {
     AddInstruction(instr, current);
@@ -935,7 +928,7 @@
       LUnallocated* operand = LUnallocated::cast(it.Current());
       if (operand->HasFixedPolicy()) ++fixed;
     }
-    ASSERT(fixed == 0 || used_at_start == 0);
+    DCHECK(fixed == 0 || used_at_start == 0);
   }
 #endif
 
@@ -1000,7 +993,7 @@
 
 
 LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
-  ASSERT(instr->value()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
   LOperand* value = UseRegisterAtStart(instr->value());
   return new(zone()) LCmpMapAndBranch(value);
 }
@@ -1124,14 +1117,13 @@
 
 LInstruction* LChunkBuilder::DoCallWithDescriptor(
     HCallWithDescriptor* instr) {
-  const CallInterfaceDescriptor* descriptor = instr->descriptor();
-
+  CallInterfaceDescriptor descriptor = instr->descriptor();
   LOperand* target = UseRegisterOrConstantAtStart(instr->target());
   ZoneList<LOperand*> ops(instr->OperandCount(), zone());
   ops.Add(target, zone());
   for (int i = 1; i < instr->OperandCount(); i++) {
-    LOperand* op = UseFixed(instr->OperandAt(i),
-        descriptor->GetParameterRegister(i - 1));
+    LOperand* op =
+        UseFixed(instr->OperandAt(i), descriptor.GetParameterRegister(i - 1));
     ops.Add(op, zone());
   }
 
@@ -1141,6 +1133,19 @@
 }
 
 
+LInstruction* LChunkBuilder::DoTailCallThroughMegamorphicCache(
+    HTailCallThroughMegamorphicCache* instr) {
+  LOperand* context = UseFixed(instr->context(), esi);
+  LOperand* receiver_register =
+      UseFixed(instr->receiver(), LoadDescriptor::ReceiverRegister());
+  LOperand* name_register =
+      UseFixed(instr->name(), LoadDescriptor::NameRegister());
+  // Not marked as call. It can't deoptimize, and it never returns.
+  return new (zone()) LTailCallThroughMegamorphicCache(
+      context, receiver_register, name_register);
+}
+
+
 LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
   LOperand* function = UseFixed(instr->function(), edi);
@@ -1151,14 +1156,24 @@
 
 LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
   switch (instr->op()) {
-    case kMathFloor: return DoMathFloor(instr);
-    case kMathRound: return DoMathRound(instr);
-    case kMathAbs: return DoMathAbs(instr);
-    case kMathLog: return DoMathLog(instr);
-    case kMathExp: return DoMathExp(instr);
-    case kMathSqrt: return DoMathSqrt(instr);
-    case kMathPowHalf: return DoMathPowHalf(instr);
-    case kMathClz32: return DoMathClz32(instr);
+    case kMathFloor:
+      return DoMathFloor(instr);
+    case kMathRound:
+      return DoMathRound(instr);
+    case kMathFround:
+      return DoMathFround(instr);
+    case kMathAbs:
+      return DoMathAbs(instr);
+    case kMathLog:
+      return DoMathLog(instr);
+    case kMathExp:
+      return DoMathExp(instr);
+    case kMathSqrt:
+      return DoMathSqrt(instr);
+    case kMathPowHalf:
+      return DoMathPowHalf(instr);
+    case kMathClz32:
+      return DoMathClz32(instr);
     default:
       UNREACHABLE();
       return NULL;
@@ -1181,6 +1196,13 @@
 }
 
 
+LInstruction* LChunkBuilder::DoMathFround(HUnaryMathOperation* instr) {
+  LOperand* input = UseRegister(instr->value());
+  LMathFround* result = new (zone()) LMathFround(input);
+  return DefineAsRegister(result);
+}
+
+
 LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
   LOperand* context = UseAny(instr->context());  // Deferred use.
   LOperand* input = UseRegisterAtStart(instr->value());
@@ -1194,8 +1216,8 @@
 
 
 LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
-  ASSERT(instr->representation().IsDouble());
-  ASSERT(instr->value()->representation().IsDouble());
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->value()->representation().IsDouble());
   LOperand* input = UseRegisterAtStart(instr->value());
   return MarkAsCall(DefineSameAsFirst(new(zone()) LMathLog(input)), instr);
 }
@@ -1209,8 +1231,8 @@
 
 
 LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
-  ASSERT(instr->representation().IsDouble());
-  ASSERT(instr->value()->representation().IsDouble());
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->value()->representation().IsDouble());
   LOperand* value = UseTempRegister(instr->value());
   LOperand* temp1 = TempRegister();
   LOperand* temp2 = TempRegister();
@@ -1285,9 +1307,9 @@
 
 LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
   if (instr->representation().IsSmiOrInteger32()) {
-    ASSERT(instr->left()->representation().Equals(instr->representation()));
-    ASSERT(instr->right()->representation().Equals(instr->representation()));
-    ASSERT(instr->CheckFlag(HValue::kTruncatingToInt32));
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+    DCHECK(instr->CheckFlag(HValue::kTruncatingToInt32));
 
     LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
     LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
@@ -1299,9 +1321,9 @@
 
 
 LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) {
-  ASSERT(instr->representation().IsSmiOrInteger32());
-  ASSERT(instr->left()->representation().Equals(instr->representation()));
-  ASSERT(instr->right()->representation().Equals(instr->representation()));
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
   LOperand* dividend = UseRegister(instr->left());
   int32_t divisor = instr->right()->GetInteger32Constant();
   LInstruction* result = DefineAsRegister(new(zone()) LDivByPowerOf2I(
@@ -1317,9 +1339,9 @@
 
 
 LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
-  ASSERT(instr->representation().IsInteger32());
-  ASSERT(instr->left()->representation().Equals(instr->representation()));
-  ASSERT(instr->right()->representation().Equals(instr->representation()));
+  DCHECK(instr->representation().IsInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
   LOperand* dividend = UseRegister(instr->left());
   int32_t divisor = instr->right()->GetInteger32Constant();
   LOperand* temp1 = FixedTemp(eax);
@@ -1336,9 +1358,9 @@
 
 
 LInstruction* LChunkBuilder::DoDivI(HDiv* instr) {
-  ASSERT(instr->representation().IsSmiOrInteger32());
-  ASSERT(instr->left()->representation().Equals(instr->representation()));
-  ASSERT(instr->right()->representation().Equals(instr->representation()));
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
   LOperand* dividend = UseFixed(instr->left(), eax);
   LOperand* divisor = UseRegister(instr->right());
   LOperand* temp = FixedTemp(edx);
@@ -1385,9 +1407,9 @@
 
 
 LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
-  ASSERT(instr->representation().IsInteger32());
-  ASSERT(instr->left()->representation().Equals(instr->representation()));
-  ASSERT(instr->right()->representation().Equals(instr->representation()));
+  DCHECK(instr->representation().IsInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
   LOperand* dividend = UseRegister(instr->left());
   int32_t divisor = instr->right()->GetInteger32Constant();
   LOperand* temp1 = FixedTemp(eax);
@@ -1412,9 +1434,9 @@
 
 
 LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) {
-  ASSERT(instr->representation().IsSmiOrInteger32());
-  ASSERT(instr->left()->representation().Equals(instr->representation()));
-  ASSERT(instr->right()->representation().Equals(instr->representation()));
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
   LOperand* dividend = UseFixed(instr->left(), eax);
   LOperand* divisor = UseRegister(instr->right());
   LOperand* temp = FixedTemp(edx);
@@ -1441,14 +1463,15 @@
 
 
 LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) {
-  ASSERT(instr->representation().IsSmiOrInteger32());
-  ASSERT(instr->left()->representation().Equals(instr->representation()));
-  ASSERT(instr->right()->representation().Equals(instr->representation()));
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
   LOperand* dividend = UseRegisterAtStart(instr->left());
   int32_t divisor = instr->right()->GetInteger32Constant();
   LInstruction* result = DefineSameAsFirst(new(zone()) LModByPowerOf2I(
           dividend, divisor));
-  if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+  if (instr->CheckFlag(HValue::kLeftCanBeNegative) &&
+      instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
     result = AssignEnvironment(result);
   }
   return result;
@@ -1456,9 +1479,9 @@
 
 
 LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) {
-  ASSERT(instr->representation().IsSmiOrInteger32());
-  ASSERT(instr->left()->representation().Equals(instr->representation()));
-  ASSERT(instr->right()->representation().Equals(instr->representation()));
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
   LOperand* dividend = UseRegister(instr->left());
   int32_t divisor = instr->right()->GetInteger32Constant();
   LOperand* temp1 = FixedTemp(eax);
@@ -1473,9 +1496,9 @@
 
 
 LInstruction* LChunkBuilder::DoModI(HMod* instr) {
-  ASSERT(instr->representation().IsSmiOrInteger32());
-  ASSERT(instr->left()->representation().Equals(instr->representation()));
-  ASSERT(instr->right()->representation().Equals(instr->representation()));
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
   LOperand* dividend = UseFixed(instr->left(), eax);
   LOperand* divisor = UseRegister(instr->right());
   LOperand* temp = FixedTemp(edx);
@@ -1508,8 +1531,8 @@
 
 LInstruction* LChunkBuilder::DoMul(HMul* instr) {
   if (instr->representation().IsSmiOrInteger32()) {
-    ASSERT(instr->left()->representation().Equals(instr->representation()));
-    ASSERT(instr->right()->representation().Equals(instr->representation()));
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
     LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
     LOperand* right = UseOrConstant(instr->BetterRightOperand());
     LOperand* temp = NULL;
@@ -1532,8 +1555,8 @@
 
 LInstruction* LChunkBuilder::DoSub(HSub* instr) {
   if (instr->representation().IsSmiOrInteger32()) {
-    ASSERT(instr->left()->representation().Equals(instr->representation()));
-    ASSERT(instr->right()->representation().Equals(instr->representation()));
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
     LOperand* left = UseRegisterAtStart(instr->left());
     LOperand* right = UseOrConstantAtStart(instr->right());
     LSubI* sub = new(zone()) LSubI(left, right);
@@ -1552,8 +1575,8 @@
 
 LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
   if (instr->representation().IsSmiOrInteger32()) {
-    ASSERT(instr->left()->representation().Equals(instr->representation()));
-    ASSERT(instr->right()->representation().Equals(instr->representation()));
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
     // Check to see if it would be advantageous to use an lea instruction rather
     // than an add. This is the case when no overflow check is needed and there
     // are multiple uses of the add's inputs, so using a 3-register add will
@@ -1576,9 +1599,9 @@
   } else if (instr->representation().IsDouble()) {
     return DoArithmeticD(Token::ADD, instr);
   } else if (instr->representation().IsExternal()) {
-    ASSERT(instr->left()->representation().IsExternal());
-    ASSERT(instr->right()->representation().IsInteger32());
-    ASSERT(!instr->CheckFlag(HValue::kCanOverflow));
+    DCHECK(instr->left()->representation().IsExternal());
+    DCHECK(instr->right()->representation().IsInteger32());
+    DCHECK(!instr->CheckFlag(HValue::kCanOverflow));
     bool use_lea = LAddI::UseLea(instr);
     LOperand* left = UseRegisterAtStart(instr->left());
     HValue* right_candidate = instr->right();
@@ -1600,14 +1623,14 @@
   LOperand* left = NULL;
   LOperand* right = NULL;
   if (instr->representation().IsSmiOrInteger32()) {
-    ASSERT(instr->left()->representation().Equals(instr->representation()));
-    ASSERT(instr->right()->representation().Equals(instr->representation()));
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
     left = UseRegisterAtStart(instr->BetterLeftOperand());
     right = UseOrConstantAtStart(instr->BetterRightOperand());
   } else {
-    ASSERT(instr->representation().IsDouble());
-    ASSERT(instr->left()->representation().IsDouble());
-    ASSERT(instr->right()->representation().IsDouble());
+    DCHECK(instr->representation().IsDouble());
+    DCHECK(instr->left()->representation().IsDouble());
+    DCHECK(instr->right()->representation().IsDouble());
     left = UseRegisterAtStart(instr->left());
     right = UseRegisterAtStart(instr->right());
   }
@@ -1617,15 +1640,16 @@
 
 
 LInstruction* LChunkBuilder::DoPower(HPower* instr) {
-  ASSERT(instr->representation().IsDouble());
+  DCHECK(instr->representation().IsDouble());
   // We call a C function for double power. It can't trigger a GC.
   // We need to use fixed result register for the call.
   Representation exponent_type = instr->right()->representation();
-  ASSERT(instr->left()->representation().IsDouble());
+  DCHECK(instr->left()->representation().IsDouble());
   LOperand* left = UseFixedDouble(instr->left(), xmm2);
-  LOperand* right = exponent_type.IsDouble() ?
-      UseFixedDouble(instr->right(), xmm1) :
-      UseFixed(instr->right(), eax);
+  LOperand* right =
+      exponent_type.IsDouble()
+          ? UseFixedDouble(instr->right(), xmm1)
+          : UseFixed(instr->right(), MathPowTaggedDescriptor::exponent());
   LPower* result = new(zone()) LPower(left, right);
   return MarkAsCall(DefineFixedDouble(result, xmm3), instr,
                     CAN_DEOPTIMIZE_EAGERLY);
@@ -1633,8 +1657,8 @@
 
 
 LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
-  ASSERT(instr->left()->representation().IsSmiOrTagged());
-  ASSERT(instr->right()->representation().IsSmiOrTagged());
+  DCHECK(instr->left()->representation().IsSmiOrTagged());
+  DCHECK(instr->right()->representation().IsSmiOrTagged());
   LOperand* context = UseFixed(instr->context(), esi);
   LOperand* left = UseFixed(instr->left(), edx);
   LOperand* right = UseFixed(instr->right(), eax);
@@ -1647,15 +1671,15 @@
     HCompareNumericAndBranch* instr) {
   Representation r = instr->representation();
   if (r.IsSmiOrInteger32()) {
-    ASSERT(instr->left()->representation().Equals(r));
-    ASSERT(instr->right()->representation().Equals(r));
+    DCHECK(instr->left()->representation().Equals(r));
+    DCHECK(instr->right()->representation().Equals(r));
     LOperand* left = UseRegisterOrConstantAtStart(instr->left());
     LOperand* right = UseOrConstantAtStart(instr->right());
     return new(zone()) LCompareNumericAndBranch(left, right);
   } else {
-    ASSERT(r.IsDouble());
-    ASSERT(instr->left()->representation().IsDouble());
-    ASSERT(instr->right()->representation().IsDouble());
+    DCHECK(r.IsDouble());
+    DCHECK(instr->left()->representation().IsDouble());
+    DCHECK(instr->right()->representation().IsDouble());
     LOperand* left;
     LOperand* right;
     if (CanBeImmediateConstant(instr->left()) &&
@@ -1697,28 +1721,28 @@
 
 
 LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
-  ASSERT(instr->value()->representation().IsSmiOrTagged());
+  DCHECK(instr->value()->representation().IsSmiOrTagged());
   LOperand* temp = TempRegister();
   return new(zone()) LIsObjectAndBranch(UseRegister(instr->value()), temp);
 }
 
 
 LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
-  ASSERT(instr->value()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
   LOperand* temp = TempRegister();
   return new(zone()) LIsStringAndBranch(UseRegister(instr->value()), temp);
 }
 
 
 LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
-  ASSERT(instr->value()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
   return new(zone()) LIsSmiAndBranch(Use(instr->value()));
 }
 
 
 LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
     HIsUndetectableAndBranch* instr) {
-  ASSERT(instr->value()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
   return new(zone()) LIsUndetectableAndBranch(
       UseRegisterAtStart(instr->value()), TempRegister());
 }
@@ -1726,8 +1750,8 @@
 
 LInstruction* LChunkBuilder::DoStringCompareAndBranch(
     HStringCompareAndBranch* instr) {
-  ASSERT(instr->left()->representation().IsTagged());
-  ASSERT(instr->right()->representation().IsTagged());
+  DCHECK(instr->left()->representation().IsTagged());
+  DCHECK(instr->right()->representation().IsTagged());
   LOperand* context = UseFixed(instr->context(), esi);
   LOperand* left = UseFixed(instr->left(), edx);
   LOperand* right = UseFixed(instr->right(), eax);
@@ -1741,7 +1765,7 @@
 
 LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
     HHasInstanceTypeAndBranch* instr) {
-  ASSERT(instr->value()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
   return new(zone()) LHasInstanceTypeAndBranch(
       UseRegisterAtStart(instr->value()),
       TempRegister());
@@ -1750,7 +1774,7 @@
 
 LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
     HGetCachedArrayIndex* instr)  {
-  ASSERT(instr->value()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
   LOperand* value = UseRegisterAtStart(instr->value());
 
   return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value));
@@ -1759,7 +1783,7 @@
 
 LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
     HHasCachedArrayIndexAndBranch* instr) {
-  ASSERT(instr->value()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
   return new(zone()) LHasCachedArrayIndexAndBranch(
       UseRegisterAtStart(instr->value()));
 }
@@ -1767,7 +1791,7 @@
 
 LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
     HClassOfTestAndBranch* instr) {
-  ASSERT(instr->value()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
   return new(zone()) LClassOfTestAndBranch(UseRegister(instr->value()),
                                            TempRegister(),
                                            TempRegister());
@@ -1895,7 +1919,7 @@
       }
       return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value)));
     } else {
-      ASSERT(to.IsInteger32());
+      DCHECK(to.IsInteger32());
       if (val->type().IsSmi() || val->representation().IsSmi()) {
         LOperand* value = UseRegister(val);
         return DefineSameAsFirst(new(zone()) LSmiUntag(value, false));
@@ -1922,7 +1946,7 @@
       return AssignEnvironment(
           DefineAsRegister(new(zone()) LDoubleToSmi(value)));
     } else {
-      ASSERT(to.IsInteger32());
+      DCHECK(to.IsInteger32());
       bool truncating = instr->CanTruncateToInt32();
       bool needs_temp = !truncating;
       LOperand* value = needs_temp ? UseTempRegister(val) : UseRegister(val);
@@ -1955,7 +1979,7 @@
       }
       return result;
     } else {
-      ASSERT(to.IsDouble());
+      DCHECK(to.IsDouble());
       if (val->CheckFlag(HInstruction::kUint32)) {
         return DefineAsRegister(new(zone()) LUint32ToDouble(UseRegister(val)));
       } else {
@@ -2025,7 +2049,7 @@
     LOperand* reg = UseFixed(value, eax);
     return DefineFixed(new(zone()) LClampIToUint8(reg), eax);
   } else {
-    ASSERT(input_rep.IsSmiOrTagged());
+    DCHECK(input_rep.IsSmiOrTagged());
     LOperand* reg = UseFixed(value, eax);
     // Register allocator doesn't (yet) support allocation of double
     // temps. Reserve xmm1 explicitly.
@@ -2038,7 +2062,7 @@
 
 LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) {
   HValue* value = instr->value();
-  ASSERT(value->representation().IsDouble());
+  DCHECK(value->representation().IsDouble());
   return DefineAsRegister(new(zone()) LDoubleBits(UseRegister(value)));
 }
 
@@ -2066,7 +2090,7 @@
     return DefineAsRegister(new(zone()) LConstantI);
   } else if (r.IsDouble()) {
     double value = instr->DoubleValue();
-    bool value_is_zero = BitCast<uint64_t, double>(value) == 0;
+    bool value_is_zero = bit_cast<uint64_t, double>(value) == 0;
     LOperand* temp = value_is_zero ? NULL : TempRegister();
     return DefineAsRegister(new(zone()) LConstantD(temp));
   } else if (r.IsExternal()) {
@@ -2090,9 +2114,15 @@
 
 LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
-  LOperand* global_object = UseFixed(instr->global_object(), edx);
+  LOperand* global_object =
+      UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
+  LOperand* vector = NULL;
+  if (FLAG_vector_ics) {
+    vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
+  }
+
   LLoadGlobalGeneric* result =
-      new(zone()) LLoadGlobalGeneric(context, global_object);
+      new(zone()) LLoadGlobalGeneric(context, global_object, vector);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
@@ -2145,8 +2175,14 @@
 
 LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
-  LOperand* object = UseFixed(instr->object(), edx);
-  LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric(context, object);
+  LOperand* object =
+      UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
+  LOperand* vector = NULL;
+  if (FLAG_vector_ics) {
+    vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
+  }
+  LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric(
+      context, object, vector);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
@@ -2165,7 +2201,7 @@
 
 
 LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
-  ASSERT(instr->key()->representation().IsSmiOrInteger32());
+  DCHECK(instr->key()->representation().IsSmiOrInteger32());
   ElementsKind elements_kind = instr->elements_kind();
   bool clobbers_key = ExternalArrayOpRequiresTemp(
       instr->key()->representation(), elements_kind);
@@ -2178,7 +2214,7 @@
     LOperand* obj = UseRegisterAtStart(instr->elements());
     result = DefineAsRegister(new(zone()) LLoadKeyed(obj, key));
   } else {
-    ASSERT(
+    DCHECK(
         (instr->representation().IsInteger32() &&
          !(IsDoubleOrFloatElementsKind(instr->elements_kind()))) ||
         (instr->representation().IsDouble() &&
@@ -2203,11 +2239,15 @@
 
 LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
-  LOperand* object = UseFixed(instr->object(), edx);
-  LOperand* key = UseFixed(instr->key(), ecx);
-
+  LOperand* object =
+      UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
+  LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
+  LOperand* vector = NULL;
+  if (FLAG_vector_ics) {
+    vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
+  }
   LLoadKeyedGeneric* result =
-      new(zone()) LLoadKeyedGeneric(context, object, key);
+      new(zone()) LLoadKeyedGeneric(context, object, key, vector);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
@@ -2233,8 +2273,8 @@
 
 LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
   if (!instr->is_typed_elements()) {
-    ASSERT(instr->elements()->representation().IsTagged());
-    ASSERT(instr->key()->representation().IsInteger32() ||
+    DCHECK(instr->elements()->representation().IsTagged());
+    DCHECK(instr->key()->representation().IsInteger32() ||
            instr->key()->representation().IsSmi());
 
     if (instr->value()->representation().IsDouble()) {
@@ -2244,7 +2284,7 @@
       LOperand* key = UseRegisterOrConstantAtStart(instr->key());
       return new(zone()) LStoreKeyed(object, key, val);
     } else {
-      ASSERT(instr->value()->representation().IsSmiOrTagged());
+      DCHECK(instr->value()->representation().IsSmiOrTagged());
       bool needs_write_barrier = instr->NeedsWriteBarrier();
 
       LOperand* obj = UseRegister(instr->elements());
@@ -2262,12 +2302,12 @@
   }
 
   ElementsKind elements_kind = instr->elements_kind();
-  ASSERT(
+  DCHECK(
       (instr->value()->representation().IsInteger32() &&
        !IsDoubleOrFloatElementsKind(elements_kind)) ||
       (instr->value()->representation().IsDouble() &&
        IsDoubleOrFloatElementsKind(elements_kind)));
-  ASSERT((instr->is_fixed_typed_array() &&
+  DCHECK((instr->is_fixed_typed_array() &&
           instr->elements()->representation().IsTagged()) ||
          (instr->is_external() &&
           instr->elements()->representation().IsExternal()));
@@ -2285,13 +2325,14 @@
 
 LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
-  LOperand* object = UseFixed(instr->object(), edx);
-  LOperand* key = UseFixed(instr->key(), ecx);
-  LOperand* value = UseFixed(instr->value(), eax);
+  LOperand* object =
+      UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
+  LOperand* key = UseFixed(instr->key(), StoreDescriptor::NameRegister());
+  LOperand* value = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
 
-  ASSERT(instr->object()->representation().IsTagged());
-  ASSERT(instr->key()->representation().IsTagged());
-  ASSERT(instr->value()->representation().IsTagged());
+  DCHECK(instr->object()->representation().IsTagged());
+  DCHECK(instr->key()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
 
   LStoreKeyedGeneric* result =
       new(zone()) LStoreKeyedGeneric(context, object, key, value);
@@ -2343,9 +2384,9 @@
         ? UseRegister(instr->object())
         : UseTempRegister(instr->object());
   } else if (is_external_location) {
-    ASSERT(!is_in_object);
-    ASSERT(!needs_write_barrier);
-    ASSERT(!needs_write_barrier_for_map);
+    DCHECK(!is_in_object);
+    DCHECK(!needs_write_barrier);
+    DCHECK(!needs_write_barrier_for_map);
     obj = UseRegisterOrConstant(instr->object());
   } else {
     obj = needs_write_barrier_for_map
@@ -2367,8 +2408,6 @@
     val = UseTempRegister(instr->value());
   } else if (can_be_constant) {
     val = UseRegisterOrConstant(instr->value());
-  } else if (instr->field_representation().IsSmi()) {
-    val = UseTempRegister(instr->value());
   } else if (instr->field_representation().IsDouble()) {
     val = UseRegisterAtStart(instr->value());
   } else {
@@ -2389,8 +2428,9 @@
 
 LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
-  LOperand* object = UseFixed(instr->object(), edx);
-  LOperand* value = UseFixed(instr->value(), eax);
+  LOperand* object =
+      UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
+  LOperand* value = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
 
   LStoreNamedGeneric* result =
       new(zone()) LStoreNamedGeneric(context, object, value);
@@ -2453,7 +2493,7 @@
 
 
 LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
-  ASSERT(argument_count_ == 0);
+  DCHECK(argument_count_ == 0);
   allocator_->MarkAsOsrEntry();
   current_block_->last_environment()->set_ast_id(instr->ast_id());
   return AssignEnvironment(new(zone()) LOsrEntry);
@@ -2466,11 +2506,11 @@
     int spill_index = chunk()->GetParameterStackSlot(instr->index());
     return DefineAsSpilled(result, spill_index);
   } else {
-    ASSERT(info()->IsStub());
-    CodeStubInterfaceDescriptor* descriptor =
-        info()->code_stub()->GetInterfaceDescriptor();
+    DCHECK(info()->IsStub());
+    CallInterfaceDescriptor descriptor =
+        info()->code_stub()->GetCallInterfaceDescriptor();
     int index = static_cast<int>(instr->index());
-    Register reg = descriptor->GetParameterRegister(index);
+    Register reg = descriptor.GetEnvironmentParameterRegister(index);
     return DefineFixed(result, reg);
   }
 }
@@ -2486,7 +2526,7 @@
   } else {
     spill_index = env_index - instr->environment()->first_local_index();
     if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
-      Abort(kNotEnoughSpillSlotsForOsr);
+      Retry(kNotEnoughSpillSlotsForOsr);
       spill_index = 0;
     }
     if (spill_index == 0) {
@@ -2577,7 +2617,7 @@
     LOperand* context = UseFixed(instr->context(), esi);
     return MarkAsCall(new(zone()) LStackCheck(context), instr);
   } else {
-    ASSERT(instr->is_backwards_branch());
+    DCHECK(instr->is_backwards_branch());
     LOperand* context = UseAny(instr->context());
     return AssignEnvironment(
         AssignPointerMap(new(zone()) LStackCheck(context)));
@@ -2598,6 +2638,7 @@
   if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
     inner->Bind(instr->arguments_var(), instr->arguments_object());
   }
+  inner->BindContext(instr->closure_context());
   inner->set_entry(instr);
   current_block_->UpdateEnvironment(inner);
   chunk_->AddInlinedClosure(instr->closure());
@@ -2613,7 +2654,7 @@
   if (env->entry()->arguments_pushed()) {
     int argument_count = env->arguments_environment()->parameter_count();
     pop = new(zone()) LDrop(argument_count);
-    ASSERT(instr->argument_delta() == -argument_count);
+    DCHECK(instr->argument_delta() == -argument_count);
   }
 
   HEnvironment* outer = current_block_->last_environment()->
diff --git a/src/ia32/lithium-ia32.h b/src/ia32/lithium-ia32.h
index e12ca5e..75fed82 100644
--- a/src/ia32/lithium-ia32.h
+++ b/src/ia32/lithium-ia32.h
@@ -6,171 +6,177 @@
 #define V8_IA32_LITHIUM_IA32_H_
 
 #include "src/hydrogen.h"
-#include "src/lithium-allocator.h"
 #include "src/lithium.h"
+#include "src/lithium-allocator.h"
 #include "src/safepoint-table.h"
 #include "src/utils.h"
 
 namespace v8 {
 namespace internal {
 
+namespace compiler {
+class RCodeVisualizer;
+}
+
 // Forward declarations.
 class LCodeGen;
 
-#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V)    \
-  V(AccessArgumentsAt)                          \
-  V(AddI)                                       \
-  V(AllocateBlockContext)                       \
-  V(Allocate)                                   \
-  V(ApplyArguments)                             \
-  V(ArgumentsElements)                          \
-  V(ArgumentsLength)                            \
-  V(ArithmeticD)                                \
-  V(ArithmeticT)                                \
-  V(BitI)                                       \
-  V(BoundsCheck)                                \
-  V(Branch)                                     \
-  V(CallJSFunction)                             \
-  V(CallWithDescriptor)                         \
-  V(CallFunction)                               \
-  V(CallNew)                                    \
-  V(CallNewArray)                               \
-  V(CallRuntime)                                \
-  V(CallStub)                                   \
-  V(CheckInstanceType)                          \
-  V(CheckMaps)                                  \
-  V(CheckMapValue)                              \
-  V(CheckNonSmi)                                \
-  V(CheckSmi)                                   \
-  V(CheckValue)                                 \
-  V(ClampDToUint8)                              \
-  V(ClampIToUint8)                              \
-  V(ClampTToUint8)                              \
-  V(ClassOfTestAndBranch)                       \
-  V(CompareMinusZeroAndBranch)                  \
-  V(CompareNumericAndBranch)                    \
-  V(CmpObjectEqAndBranch)                       \
-  V(CmpHoleAndBranch)                           \
-  V(CmpMapAndBranch)                            \
-  V(CmpT)                                       \
-  V(ConstantD)                                  \
-  V(ConstantE)                                  \
-  V(ConstantI)                                  \
-  V(ConstantS)                                  \
-  V(ConstantT)                                  \
-  V(ConstructDouble)                            \
-  V(Context)                                    \
-  V(DateField)                                  \
-  V(DebugBreak)                                 \
-  V(DeclareGlobals)                             \
-  V(Deoptimize)                                 \
-  V(DivByConstI)                                \
-  V(DivByPowerOf2I)                             \
-  V(DivI)                                       \
-  V(DoubleBits)                                 \
-  V(DoubleToI)                                  \
-  V(DoubleToSmi)                                \
-  V(Drop)                                       \
-  V(Dummy)                                      \
-  V(DummyUse)                                   \
-  V(FlooringDivByConstI)                        \
-  V(FlooringDivByPowerOf2I)                     \
-  V(FlooringDivI)                               \
-  V(ForInCacheArray)                            \
-  V(ForInPrepareMap)                            \
-  V(FunctionLiteral)                            \
-  V(GetCachedArrayIndex)                        \
-  V(Goto)                                       \
-  V(HasCachedArrayIndexAndBranch)               \
-  V(HasInstanceTypeAndBranch)                   \
-  V(InnerAllocatedObject)                       \
-  V(InstanceOf)                                 \
-  V(InstanceOfKnownGlobal)                      \
-  V(InstructionGap)                             \
-  V(Integer32ToDouble)                          \
-  V(InvokeFunction)                             \
-  V(IsConstructCallAndBranch)                   \
-  V(IsObjectAndBranch)                          \
-  V(IsStringAndBranch)                          \
-  V(IsSmiAndBranch)                             \
-  V(IsUndetectableAndBranch)                    \
-  V(Label)                                      \
-  V(LazyBailout)                                \
-  V(LoadContextSlot)                            \
-  V(LoadFieldByIndex)                           \
-  V(LoadFunctionPrototype)                      \
-  V(LoadGlobalCell)                             \
-  V(LoadGlobalGeneric)                          \
-  V(LoadKeyed)                                  \
-  V(LoadKeyedGeneric)                           \
-  V(LoadNamedField)                             \
-  V(LoadNamedGeneric)                           \
-  V(LoadRoot)                                   \
-  V(MapEnumLength)                              \
-  V(MathAbs)                                    \
-  V(MathClz32)                                  \
-  V(MathExp)                                    \
-  V(MathFloor)                                  \
-  V(MathLog)                                    \
-  V(MathMinMax)                                 \
-  V(MathPowHalf)                                \
-  V(MathRound)                                  \
-  V(MathSqrt)                                   \
-  V(ModByConstI)                                \
-  V(ModByPowerOf2I)                             \
-  V(ModI)                                       \
-  V(MulI)                                       \
-  V(NumberTagD)                                 \
-  V(NumberTagI)                                 \
-  V(NumberTagU)                                 \
-  V(NumberUntagD)                               \
-  V(OsrEntry)                                   \
-  V(Parameter)                                  \
-  V(Power)                                      \
-  V(PushArgument)                               \
-  V(RegExpLiteral)                              \
-  V(Return)                                     \
-  V(SeqStringGetChar)                           \
-  V(SeqStringSetChar)                           \
-  V(ShiftI)                                     \
-  V(SmiTag)                                     \
-  V(SmiUntag)                                   \
-  V(StackCheck)                                 \
-  V(StoreCodeEntry)                             \
-  V(StoreContextSlot)                           \
-  V(StoreFrameContext)                          \
-  V(StoreGlobalCell)                            \
-  V(StoreKeyed)                                 \
-  V(StoreKeyedGeneric)                          \
-  V(StoreNamedField)                            \
-  V(StoreNamedGeneric)                          \
-  V(StringAdd)                                  \
-  V(StringCharCodeAt)                           \
-  V(StringCharFromCode)                         \
-  V(StringCompareAndBranch)                     \
-  V(SubI)                                       \
-  V(TaggedToI)                                  \
-  V(ThisFunction)                               \
-  V(ToFastProperties)                           \
-  V(TransitionElementsKind)                     \
-  V(TrapAllocationMemento)                      \
-  V(Typeof)                                     \
-  V(TypeofIsAndBranch)                          \
-  V(Uint32ToDouble)                             \
-  V(UnknownOSRValue)                            \
+#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
+  V(AccessArgumentsAt)                       \
+  V(AddI)                                    \
+  V(AllocateBlockContext)                    \
+  V(Allocate)                                \
+  V(ApplyArguments)                          \
+  V(ArgumentsElements)                       \
+  V(ArgumentsLength)                         \
+  V(ArithmeticD)                             \
+  V(ArithmeticT)                             \
+  V(BitI)                                    \
+  V(BoundsCheck)                             \
+  V(Branch)                                  \
+  V(CallJSFunction)                          \
+  V(CallWithDescriptor)                      \
+  V(CallFunction)                            \
+  V(CallNew)                                 \
+  V(CallNewArray)                            \
+  V(CallRuntime)                             \
+  V(CallStub)                                \
+  V(CheckInstanceType)                       \
+  V(CheckMaps)                               \
+  V(CheckMapValue)                           \
+  V(CheckNonSmi)                             \
+  V(CheckSmi)                                \
+  V(CheckValue)                              \
+  V(ClampDToUint8)                           \
+  V(ClampIToUint8)                           \
+  V(ClampTToUint8)                           \
+  V(ClassOfTestAndBranch)                    \
+  V(CompareMinusZeroAndBranch)               \
+  V(CompareNumericAndBranch)                 \
+  V(CmpObjectEqAndBranch)                    \
+  V(CmpHoleAndBranch)                        \
+  V(CmpMapAndBranch)                         \
+  V(CmpT)                                    \
+  V(ConstantD)                               \
+  V(ConstantE)                               \
+  V(ConstantI)                               \
+  V(ConstantS)                               \
+  V(ConstantT)                               \
+  V(ConstructDouble)                         \
+  V(Context)                                 \
+  V(DateField)                               \
+  V(DebugBreak)                              \
+  V(DeclareGlobals)                          \
+  V(Deoptimize)                              \
+  V(DivByConstI)                             \
+  V(DivByPowerOf2I)                          \
+  V(DivI)                                    \
+  V(DoubleBits)                              \
+  V(DoubleToI)                               \
+  V(DoubleToSmi)                             \
+  V(Drop)                                    \
+  V(Dummy)                                   \
+  V(DummyUse)                                \
+  V(FlooringDivByConstI)                     \
+  V(FlooringDivByPowerOf2I)                  \
+  V(FlooringDivI)                            \
+  V(ForInCacheArray)                         \
+  V(ForInPrepareMap)                         \
+  V(FunctionLiteral)                         \
+  V(GetCachedArrayIndex)                     \
+  V(Goto)                                    \
+  V(HasCachedArrayIndexAndBranch)            \
+  V(HasInstanceTypeAndBranch)                \
+  V(InnerAllocatedObject)                    \
+  V(InstanceOf)                              \
+  V(InstanceOfKnownGlobal)                   \
+  V(InstructionGap)                          \
+  V(Integer32ToDouble)                       \
+  V(InvokeFunction)                          \
+  V(IsConstructCallAndBranch)                \
+  V(IsObjectAndBranch)                       \
+  V(IsStringAndBranch)                       \
+  V(IsSmiAndBranch)                          \
+  V(IsUndetectableAndBranch)                 \
+  V(Label)                                   \
+  V(LazyBailout)                             \
+  V(LoadContextSlot)                         \
+  V(LoadFieldByIndex)                        \
+  V(LoadFunctionPrototype)                   \
+  V(LoadGlobalCell)                          \
+  V(LoadGlobalGeneric)                       \
+  V(LoadKeyed)                               \
+  V(LoadKeyedGeneric)                        \
+  V(LoadNamedField)                          \
+  V(LoadNamedGeneric)                        \
+  V(LoadRoot)                                \
+  V(MapEnumLength)                           \
+  V(MathAbs)                                 \
+  V(MathClz32)                               \
+  V(MathExp)                                 \
+  V(MathFloor)                               \
+  V(MathFround)                              \
+  V(MathLog)                                 \
+  V(MathMinMax)                              \
+  V(MathPowHalf)                             \
+  V(MathRound)                               \
+  V(MathSqrt)                                \
+  V(ModByConstI)                             \
+  V(ModByPowerOf2I)                          \
+  V(ModI)                                    \
+  V(MulI)                                    \
+  V(NumberTagD)                              \
+  V(NumberTagI)                              \
+  V(NumberTagU)                              \
+  V(NumberUntagD)                            \
+  V(OsrEntry)                                \
+  V(Parameter)                               \
+  V(Power)                                   \
+  V(PushArgument)                            \
+  V(RegExpLiteral)                           \
+  V(Return)                                  \
+  V(SeqStringGetChar)                        \
+  V(SeqStringSetChar)                        \
+  V(ShiftI)                                  \
+  V(SmiTag)                                  \
+  V(SmiUntag)                                \
+  V(StackCheck)                              \
+  V(StoreCodeEntry)                          \
+  V(StoreContextSlot)                        \
+  V(StoreFrameContext)                       \
+  V(StoreGlobalCell)                         \
+  V(StoreKeyed)                              \
+  V(StoreKeyedGeneric)                       \
+  V(StoreNamedField)                         \
+  V(StoreNamedGeneric)                       \
+  V(StringAdd)                               \
+  V(StringCharCodeAt)                        \
+  V(StringCharFromCode)                      \
+  V(StringCompareAndBranch)                  \
+  V(SubI)                                    \
+  V(TaggedToI)                               \
+  V(TailCallThroughMegamorphicCache)         \
+  V(ThisFunction)                            \
+  V(ToFastProperties)                        \
+  V(TransitionElementsKind)                  \
+  V(TrapAllocationMemento)                   \
+  V(Typeof)                                  \
+  V(TypeofIsAndBranch)                       \
+  V(Uint32ToDouble)                          \
+  V(UnknownOSRValue)                         \
   V(WrapReceiver)
 
 
 #define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)                        \
-  virtual Opcode opcode() const V8_FINAL V8_OVERRIDE {                      \
+  virtual Opcode opcode() const FINAL OVERRIDE {                      \
     return LInstruction::k##type;                                           \
   }                                                                         \
-  virtual void CompileToNative(LCodeGen* generator) V8_FINAL V8_OVERRIDE;   \
-  virtual const char* Mnemonic() const V8_FINAL V8_OVERRIDE {               \
+  virtual void CompileToNative(LCodeGen* generator) FINAL OVERRIDE;   \
+  virtual const char* Mnemonic() const FINAL OVERRIDE {               \
     return mnemonic;                                                        \
   }                                                                         \
   static L##type* cast(LInstruction* instr) {                               \
-    ASSERT(instr->Is##type());                                              \
+    DCHECK(instr->Is##type());                                              \
     return reinterpret_cast<L##type*>(instr);                               \
   }
 
@@ -200,7 +206,7 @@
   enum Opcode {
     // Declare a unique enum value for each instruction.
 #define DECLARE_OPCODE(type) k##type,
-    LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE)
+    LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE) kAdapter,
     kNumberOfInstructions
 #undef DECLARE_OPCODE
   };
@@ -219,6 +225,9 @@
 
   virtual bool IsControl() const { return false; }
 
+  // Try deleting this instruction if possible.
+  virtual bool TryDelete() { return false; }
+
   void set_environment(LEnvironment* env) { environment_ = env; }
   LEnvironment* environment() const { return environment_; }
   bool HasEnvironment() const { return environment_ != NULL; }
@@ -257,11 +266,12 @@
   void VerifyCall();
 #endif
 
+  virtual int InputCount() = 0;
+  virtual LOperand* InputAt(int i) = 0;
+
  private:
   // Iterator support.
   friend class InputIterator;
-  virtual int InputCount() = 0;
-  virtual LOperand* InputAt(int i) = 0;
 
   friend class TempIterator;
   virtual int TempCount() = 0;
@@ -282,7 +292,7 @@
  public:
   // Allow 0 or 1 output operands.
   STATIC_ASSERT(R == 0 || R == 1);
-  virtual bool HasResult() const V8_FINAL V8_OVERRIDE {
+  virtual bool HasResult() const FINAL OVERRIDE {
     return R != 0 && result() != NULL;
   }
   void set_result(LOperand* operand) { results_[0] = operand; }
@@ -304,11 +314,11 @@
 
  private:
   // Iterator support.
-  virtual int InputCount() V8_FINAL V8_OVERRIDE { return I; }
-  virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
+  virtual int InputCount() FINAL OVERRIDE { return I; }
+  virtual LOperand* InputAt(int i) FINAL OVERRIDE { return inputs_[i]; }
 
-  virtual int TempCount() V8_FINAL V8_OVERRIDE { return T; }
-  virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return temps_[i]; }
+  virtual int TempCount() FINAL OVERRIDE { return T; }
+  virtual LOperand* TempAt(int i) FINAL OVERRIDE { return temps_[i]; }
 };
 
 
@@ -322,10 +332,10 @@
   }
 
   // Can't use the DECLARE-macro here because of sub-classes.
-  virtual bool IsGap() const V8_FINAL V8_OVERRIDE { return true; }
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual bool IsGap() const FINAL OVERRIDE { return true; }
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
   static LGap* cast(LInstruction* instr) {
-    ASSERT(instr->IsGap());
+    DCHECK(instr->IsGap());
     return reinterpret_cast<LGap*>(instr);
   }
 
@@ -359,11 +369,11 @@
 };
 
 
-class LInstructionGap V8_FINAL : public LGap {
+class LInstructionGap FINAL : public LGap {
  public:
   explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
 
-  virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
     return !IsRedundant();
   }
 
@@ -371,17 +381,17 @@
 };
 
 
-class LGoto V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LGoto FINAL : public LTemplateInstruction<0, 0, 0> {
  public:
   explicit LGoto(HBasicBlock* block) : block_(block) { }
 
-  virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE;
+  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE;
   DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
-  virtual bool IsControl() const V8_OVERRIDE { return true; }
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  virtual bool IsControl() const OVERRIDE { return true; }
 
   int block_id() const { return block_->block_id(); }
-  virtual bool ClobbersDoubleRegisters(Isolate* isolate) const V8_OVERRIDE {
+  virtual bool ClobbersDoubleRegisters(Isolate* isolate) const OVERRIDE {
     return false;
   }
 
@@ -392,20 +402,20 @@
 };
 
 
-class LLazyBailout V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LLazyBailout FINAL : public LTemplateInstruction<0, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
 };
 
 
-class LDummy V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LDummy FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
-  explicit LDummy() { }
+  LDummy() {}
   DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy")
 };
 
 
-class LDummyUse V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDummyUse FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LDummyUse(LOperand* value) {
     inputs_[0] = value;
@@ -414,25 +424,25 @@
 };
 
 
-class LDeoptimize V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LDeoptimize FINAL : public LTemplateInstruction<0, 0, 0> {
  public:
-  virtual bool IsControl() const V8_OVERRIDE { return true; }
+  virtual bool IsControl() const OVERRIDE { return true; }
   DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
   DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
 };
 
 
-class LLabel V8_FINAL : public LGap {
+class LLabel FINAL : public LGap {
  public:
   explicit LLabel(HBasicBlock* block)
       : LGap(block), replacement_(NULL) { }
 
-  virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
     return false;
   }
   DECLARE_CONCRETE_INSTRUCTION(Label, "label")
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int block_id() const { return block()->block_id(); }
   bool is_loop_header() const { return block()->IsLoopHeader(); }
@@ -448,16 +458,16 @@
 };
 
 
-class LParameter V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LParameter FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
-  virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
     return false;
   }
   DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
 };
 
 
-class LCallStub V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallStub FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LCallStub(LOperand* context) {
     inputs_[0] = context;
@@ -470,9 +480,30 @@
 };
 
 
-class LUnknownOSRValue V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LTailCallThroughMegamorphicCache FINAL
+    : public LTemplateInstruction<0, 3, 0> {
  public:
-  virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+  explicit LTailCallThroughMegamorphicCache(LOperand* context,
+                                            LOperand* receiver,
+                                            LOperand* name) {
+    inputs_[0] = context;
+    inputs_[1] = receiver;
+    inputs_[2] = name;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* receiver() { return inputs_[1]; }
+  LOperand* name() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache,
+                               "tail-call-through-megamorphic-cache")
+  DECLARE_HYDROGEN_ACCESSOR(TailCallThroughMegamorphicCache)
+};
+
+
+class LUnknownOSRValue FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
     return false;
   }
   DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
@@ -484,7 +515,7 @@
  public:
   LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
 
-  virtual bool IsControl() const V8_FINAL V8_OVERRIDE { return true; }
+  virtual bool IsControl() const FINAL OVERRIDE { return true; }
 
   int SuccessorCount() { return hydrogen()->SuccessorCount(); }
   HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
@@ -523,7 +554,7 @@
 };
 
 
-class LWrapReceiver V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LWrapReceiver FINAL : public LTemplateInstruction<1, 2, 1> {
  public:
   LWrapReceiver(LOperand* receiver,
                 LOperand* function,
@@ -542,7 +573,7 @@
 };
 
 
-class LApplyArguments V8_FINAL : public LTemplateInstruction<1, 4, 0> {
+class LApplyArguments FINAL : public LTemplateInstruction<1, 4, 0> {
  public:
   LApplyArguments(LOperand* function,
                   LOperand* receiver,
@@ -563,7 +594,7 @@
 };
 
 
-class LAccessArgumentsAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LAccessArgumentsAt FINAL : public LTemplateInstruction<1, 3, 0> {
  public:
   LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
     inputs_[0] = arguments;
@@ -577,11 +608,11 @@
 
   DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LArgumentsLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LArgumentsLength FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LArgumentsLength(LOperand* elements) {
     inputs_[0] = elements;
@@ -593,20 +624,20 @@
 };
 
 
-class LArgumentsElements V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LArgumentsElements FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
   DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
 };
 
 
-class LDebugBreak V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LDebugBreak FINAL : public LTemplateInstruction<0, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
 };
 
 
-class LModByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LModByPowerOf2I FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   LModByPowerOf2I(LOperand* dividend, int32_t divisor) {
     inputs_[0] = dividend;
@@ -624,7 +655,7 @@
 };
 
 
-class LModByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+class LModByConstI FINAL : public LTemplateInstruction<1, 1, 2> {
  public:
   LModByConstI(LOperand* dividend,
                int32_t divisor,
@@ -649,7 +680,7 @@
 };
 
 
-class LModI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LModI FINAL : public LTemplateInstruction<1, 2, 1> {
  public:
   LModI(LOperand* left, LOperand* right, LOperand* temp) {
     inputs_[0] = left;
@@ -666,7 +697,7 @@
 };
 
 
-class LDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDivByPowerOf2I FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   LDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
     inputs_[0] = dividend;
@@ -684,7 +715,7 @@
 };
 
 
-class LDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+class LDivByConstI FINAL : public LTemplateInstruction<1, 1, 2> {
  public:
   LDivByConstI(LOperand* dividend,
                int32_t divisor,
@@ -709,7 +740,7 @@
 };
 
 
-class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LDivI FINAL : public LTemplateInstruction<1, 2, 1> {
  public:
   LDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
     inputs_[0] = dividend;
@@ -726,7 +757,7 @@
 };
 
 
-class LFlooringDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LFlooringDivByPowerOf2I FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
     inputs_[0] = dividend;
@@ -745,7 +776,7 @@
 };
 
 
-class LFlooringDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 3> {
+class LFlooringDivByConstI FINAL : public LTemplateInstruction<1, 1, 3> {
  public:
   LFlooringDivByConstI(LOperand* dividend,
                        int32_t divisor,
@@ -773,7 +804,7 @@
 };
 
 
-class LFlooringDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LFlooringDivI FINAL : public LTemplateInstruction<1, 2, 1> {
  public:
   LFlooringDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
     inputs_[0] = dividend;
@@ -790,7 +821,7 @@
 };
 
 
-class LMulI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LMulI FINAL : public LTemplateInstruction<1, 2, 1> {
  public:
   LMulI(LOperand* left, LOperand* right, LOperand* temp) {
     inputs_[0] = left;
@@ -807,7 +838,7 @@
 };
 
 
-class LCompareNumericAndBranch V8_FINAL : public LControlInstruction<2, 0> {
+class LCompareNumericAndBranch FINAL : public LControlInstruction<2, 0> {
  public:
   LCompareNumericAndBranch(LOperand* left, LOperand* right) {
     inputs_[0] = left;
@@ -830,7 +861,7 @@
 };
 
 
-class LMathFloor V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathFloor FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LMathFloor(LOperand* value) {
     inputs_[0] = value;
@@ -843,22 +874,32 @@
 };
 
 
-class LMathRound V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LMathRound FINAL : public LTemplateInstruction<1, 1, 1> {
  public:
   LMathRound(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
     temps_[0] = temp;
   }
 
-  LOperand* value() { return inputs_[0]; }
   LOperand* temp() { return temps_[0]; }
+  LOperand* value() { return inputs_[0]; }
 
   DECLARE_CONCRETE_INSTRUCTION(MathRound, "math-round")
   DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
 };
 
 
-class LMathAbs V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LMathFround FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathFround(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathFround, "math-fround")
+};
+
+
+class LMathAbs FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LMathAbs(LOperand* context, LOperand* value) {
     inputs_[1] = context;
@@ -873,7 +914,7 @@
 };
 
 
-class LMathLog V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathLog FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LMathLog(LOperand* value) {
     inputs_[0] = value;
@@ -885,7 +926,7 @@
 };
 
 
-class LMathClz32 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathClz32 FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LMathClz32(LOperand* value) {
     inputs_[0] = value;
@@ -897,7 +938,7 @@
 };
 
 
-class LMathExp V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+class LMathExp FINAL : public LTemplateInstruction<1, 1, 2> {
  public:
   LMathExp(LOperand* value,
            LOperand* temp1,
@@ -916,7 +957,7 @@
 };
 
 
-class LMathSqrt V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathSqrt FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LMathSqrt(LOperand* value) {
     inputs_[0] = value;
@@ -928,7 +969,7 @@
 };
 
 
-class LMathPowHalf V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LMathPowHalf FINAL : public LTemplateInstruction<1, 1, 1> {
  public:
   LMathPowHalf(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
@@ -942,7 +983,7 @@
 };
 
 
-class LCmpObjectEqAndBranch V8_FINAL : public LControlInstruction<2, 0> {
+class LCmpObjectEqAndBranch FINAL : public LControlInstruction<2, 0> {
  public:
   LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
     inputs_[0] = left;
@@ -956,7 +997,7 @@
 };
 
 
-class LCmpHoleAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LCmpHoleAndBranch FINAL : public LControlInstruction<1, 0> {
  public:
   explicit LCmpHoleAndBranch(LOperand* object) {
     inputs_[0] = object;
@@ -969,7 +1010,7 @@
 };
 
 
-class LCompareMinusZeroAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LCompareMinusZeroAndBranch FINAL : public LControlInstruction<1, 1> {
  public:
   LCompareMinusZeroAndBranch(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
@@ -985,7 +1026,7 @@
 };
 
 
-class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LIsObjectAndBranch FINAL : public LControlInstruction<1, 1> {
  public:
   LIsObjectAndBranch(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
@@ -997,11 +1038,11 @@
 
   DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LIsStringAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LIsStringAndBranch FINAL : public LControlInstruction<1, 1> {
  public:
   LIsStringAndBranch(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
@@ -1014,11 +1055,11 @@
   DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LIsSmiAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LIsSmiAndBranch FINAL : public LControlInstruction<1, 0> {
  public:
   explicit LIsSmiAndBranch(LOperand* value) {
     inputs_[0] = value;
@@ -1029,11 +1070,11 @@
   DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LIsUndetectableAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LIsUndetectableAndBranch FINAL : public LControlInstruction<1, 1> {
  public:
   LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
@@ -1047,11 +1088,11 @@
                                "is-undetectable-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LStringCompareAndBranch V8_FINAL : public LControlInstruction<3, 0> {
+class LStringCompareAndBranch FINAL : public LControlInstruction<3, 0> {
  public:
   LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) {
     inputs_[0] = context;
@@ -1067,13 +1108,13 @@
                                "string-compare-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 
   Token::Value op() const { return hydrogen()->token(); }
 };
 
 
-class LHasInstanceTypeAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LHasInstanceTypeAndBranch FINAL : public LControlInstruction<1, 1> {
  public:
   LHasInstanceTypeAndBranch(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
@@ -1087,11 +1128,11 @@
                                "has-instance-type-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LGetCachedArrayIndex V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LGetCachedArrayIndex FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LGetCachedArrayIndex(LOperand* value) {
     inputs_[0] = value;
@@ -1104,7 +1145,7 @@
 };
 
 
-class LHasCachedArrayIndexAndBranch V8_FINAL
+class LHasCachedArrayIndexAndBranch FINAL
     : public LControlInstruction<1, 0> {
  public:
   explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
@@ -1116,11 +1157,11 @@
   DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
                                "has-cached-array-index-and-branch")
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LIsConstructCallAndBranch V8_FINAL : public LControlInstruction<0, 1> {
+class LIsConstructCallAndBranch FINAL : public LControlInstruction<0, 1> {
  public:
   explicit LIsConstructCallAndBranch(LOperand* temp) {
     temps_[0] = temp;
@@ -1133,7 +1174,7 @@
 };
 
 
-class LClassOfTestAndBranch V8_FINAL : public LControlInstruction<1, 2> {
+class LClassOfTestAndBranch FINAL : public LControlInstruction<1, 2> {
  public:
   LClassOfTestAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
     inputs_[0] = value;
@@ -1149,11 +1190,11 @@
                                "class-of-test-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LCmpT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LCmpT FINAL : public LTemplateInstruction<1, 3, 0> {
  public:
   LCmpT(LOperand* context, LOperand* left, LOperand* right) {
     inputs_[0] = context;
@@ -1169,7 +1210,7 @@
 };
 
 
-class LInstanceOf V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LInstanceOf FINAL : public LTemplateInstruction<1, 3, 0> {
  public:
   LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
     inputs_[0] = context;
@@ -1183,7 +1224,7 @@
 };
 
 
-class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LInstanceOfKnownGlobal FINAL : public LTemplateInstruction<1, 2, 1> {
  public:
   LInstanceOfKnownGlobal(LOperand* context, LOperand* value, LOperand* temp) {
     inputs_[0] = context;
@@ -1204,7 +1245,7 @@
     return lazy_deopt_env_;
   }
   virtual void SetDeferredLazyDeoptimizationEnvironment(
-      LEnvironment* env) V8_OVERRIDE {
+      LEnvironment* env) OVERRIDE {
     lazy_deopt_env_ = env;
   }
 
@@ -1213,7 +1254,7 @@
 };
 
 
-class LBoundsCheck V8_FINAL : public LTemplateInstruction<0, 2, 0> {
+class LBoundsCheck FINAL : public LTemplateInstruction<0, 2, 0> {
  public:
   LBoundsCheck(LOperand* index, LOperand* length) {
     inputs_[0] = index;
@@ -1228,7 +1269,7 @@
 };
 
 
-class LBitI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LBitI FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LBitI(LOperand* left, LOperand* right) {
     inputs_[0] = left;
@@ -1245,7 +1286,7 @@
 };
 
 
-class LShiftI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LShiftI FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
       : op_(op), can_deopt_(can_deopt) {
@@ -1267,7 +1308,7 @@
 };
 
 
-class LSubI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LSubI FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LSubI(LOperand* left, LOperand* right) {
     inputs_[0] = left;
@@ -1282,7 +1323,7 @@
 };
 
 
-class LConstantI V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LConstantI FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
   DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1291,7 +1332,7 @@
 };
 
 
-class LConstantS V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LConstantS FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
   DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1300,7 +1341,7 @@
 };
 
 
-class LConstantD V8_FINAL : public LTemplateInstruction<1, 0, 1> {
+class LConstantD FINAL : public LTemplateInstruction<1, 0, 1> {
  public:
   explicit LConstantD(LOperand* temp) {
     temps_[0] = temp;
@@ -1315,7 +1356,7 @@
 };
 
 
-class LConstantE V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LConstantE FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e")
   DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1326,7 +1367,7 @@
 };
 
 
-class LConstantT V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LConstantT FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
   DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1337,7 +1378,7 @@
 };
 
 
-class LBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LBranch FINAL : public LControlInstruction<1, 1> {
  public:
   LBranch(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
@@ -1350,11 +1391,11 @@
   DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
   DECLARE_HYDROGEN_ACCESSOR(Branch)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LCmpMapAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LCmpMapAndBranch FINAL : public LControlInstruction<1, 0> {
  public:
   explicit LCmpMapAndBranch(LOperand* value) {
     inputs_[0] = value;
@@ -1369,7 +1410,7 @@
 };
 
 
-class LMapEnumLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMapEnumLength FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LMapEnumLength(LOperand* value) {
     inputs_[0] = value;
@@ -1381,7 +1422,7 @@
 };
 
 
-class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LDateField FINAL : public LTemplateInstruction<1, 1, 1> {
  public:
   LDateField(LOperand* date, LOperand* temp, Smi* index)
       : index_(index) {
@@ -1402,7 +1443,7 @@
 };
 
 
-class LSeqStringGetChar V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LSeqStringGetChar FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LSeqStringGetChar(LOperand* string, LOperand* index) {
     inputs_[0] = string;
@@ -1417,7 +1458,7 @@
 };
 
 
-class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 4, 0> {
+class LSeqStringSetChar FINAL : public LTemplateInstruction<1, 4, 0> {
  public:
   LSeqStringSetChar(LOperand* context,
                     LOperand* string,
@@ -1438,7 +1479,7 @@
 };
 
 
-class LAddI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LAddI FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LAddI(LOperand* left, LOperand* right) {
     inputs_[0] = left;
@@ -1458,7 +1499,7 @@
 };
 
 
-class LMathMinMax V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LMathMinMax FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LMathMinMax(LOperand* left, LOperand* right) {
     inputs_[0] = left;
@@ -1473,7 +1514,7 @@
 };
 
 
-class LPower V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LPower FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LPower(LOperand* left, LOperand* right) {
     inputs_[0] = left;
@@ -1488,7 +1529,7 @@
 };
 
 
-class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LArithmeticD FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
       : op_(op) {
@@ -1501,18 +1542,18 @@
 
   Token::Value op() const { return op_; }
 
-  virtual Opcode opcode() const V8_OVERRIDE {
+  virtual Opcode opcode() const OVERRIDE {
     return LInstruction::kArithmeticD;
   }
-  virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
-  virtual const char* Mnemonic() const V8_OVERRIDE;
+  virtual void CompileToNative(LCodeGen* generator) OVERRIDE;
+  virtual const char* Mnemonic() const OVERRIDE;
 
  private:
   Token::Value op_;
 };
 
 
-class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LArithmeticT FINAL : public LTemplateInstruction<1, 3, 0> {
  public:
   LArithmeticT(Token::Value op,
                LOperand* context,
@@ -1528,11 +1569,11 @@
   LOperand* left() { return inputs_[1]; }
   LOperand* right() { return inputs_[2]; }
 
-  virtual Opcode opcode() const V8_OVERRIDE {
+  virtual Opcode opcode() const OVERRIDE {
     return LInstruction::kArithmeticT;
   }
-  virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
-  virtual const char* Mnemonic() const V8_OVERRIDE;
+  virtual void CompileToNative(LCodeGen* generator) OVERRIDE;
+  virtual const char* Mnemonic() const OVERRIDE;
 
   Token::Value op() const { return op_; }
 
@@ -1541,7 +1582,7 @@
 };
 
 
-class LReturn V8_FINAL : public LTemplateInstruction<0, 3, 0> {
+class LReturn FINAL : public LTemplateInstruction<0, 3, 0> {
  public:
   explicit LReturn(LOperand* value,
                    LOperand* context,
@@ -1555,7 +1596,7 @@
     return parameter_count()->IsConstantOperand();
   }
   LConstantOperand* constant_parameter_count() {
-    ASSERT(has_constant_parameter_count());
+    DCHECK(has_constant_parameter_count());
     return LConstantOperand::cast(parameter_count());
   }
   LOperand* parameter_count() { return inputs_[2]; }
@@ -1565,7 +1606,7 @@
 };
 
 
-class LLoadNamedField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LLoadNamedField FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LLoadNamedField(LOperand* object) {
     inputs_[0] = object;
@@ -1578,15 +1619,17 @@
 };
 
 
-class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LLoadNamedGeneric FINAL : public LTemplateInstruction<1, 2, 1> {
  public:
-  LLoadNamedGeneric(LOperand* context, LOperand* object) {
+  LLoadNamedGeneric(LOperand* context, LOperand* object, LOperand* vector) {
     inputs_[0] = context;
     inputs_[1] = object;
+    temps_[0] = vector;
   }
 
   LOperand* context() { return inputs_[0]; }
   LOperand* object() { return inputs_[1]; }
+  LOperand* temp_vector() { return temps_[0]; }
 
   DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
   DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
@@ -1595,7 +1638,7 @@
 };
 
 
-class LLoadFunctionPrototype V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LLoadFunctionPrototype FINAL : public LTemplateInstruction<1, 1, 1> {
  public:
   LLoadFunctionPrototype(LOperand* function, LOperand* temp) {
     inputs_[0] = function;
@@ -1610,7 +1653,7 @@
 };
 
 
-class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LLoadRoot FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
   DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
@@ -1619,7 +1662,7 @@
 };
 
 
-class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyed FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LLoadKeyed(LOperand* elements, LOperand* key) {
     inputs_[0] = elements;
@@ -1643,7 +1686,7 @@
   DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
   DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
   uint32_t base_offset() const { return hydrogen()->base_offset(); }
   bool key_is_smi() {
     return hydrogen()->key()->representation().IsTagged();
@@ -1667,38 +1710,45 @@
 }
 
 
-class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LLoadKeyedGeneric FINAL : public LTemplateInstruction<1, 3, 1> {
  public:
-  LLoadKeyedGeneric(LOperand* context, LOperand* obj, LOperand* key) {
+  LLoadKeyedGeneric(LOperand* context, LOperand* obj, LOperand* key,
+                    LOperand* vector) {
     inputs_[0] = context;
     inputs_[1] = obj;
     inputs_[2] = key;
+    temps_[0] = vector;
   }
 
   LOperand* context() { return inputs_[0]; }
   LOperand* object() { return inputs_[1]; }
   LOperand* key() { return inputs_[2]; }
+  LOperand* temp_vector() { return temps_[0]; }
 
   DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
+  DECLARE_HYDROGEN_ACCESSOR(LoadKeyedGeneric)
 };
 
 
-class LLoadGlobalCell V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LLoadGlobalCell FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
   DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
 };
 
 
-class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LLoadGlobalGeneric FINAL : public LTemplateInstruction<1, 2, 1> {
  public:
-  LLoadGlobalGeneric(LOperand* context, LOperand* global_object) {
+  LLoadGlobalGeneric(LOperand* context, LOperand* global_object,
+                     LOperand* vector) {
     inputs_[0] = context;
     inputs_[1] = global_object;
+    temps_[0] = vector;
   }
 
   LOperand* context() { return inputs_[0]; }
   LOperand* global_object() { return inputs_[1]; }
+  LOperand* temp_vector() { return temps_[0]; }
 
   DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
   DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
@@ -1708,7 +1758,7 @@
 };
 
 
-class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LStoreGlobalCell FINAL : public LTemplateInstruction<0, 1, 0> {
  public:
   explicit LStoreGlobalCell(LOperand* value) {
     inputs_[0] = value;
@@ -1721,7 +1771,7 @@
 };
 
 
-class LLoadContextSlot V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LLoadContextSlot FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LLoadContextSlot(LOperand* context) {
     inputs_[0] = context;
@@ -1734,11 +1784,11 @@
 
   int slot_index() { return hydrogen()->slot_index(); }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LStoreContextSlot V8_FINAL : public LTemplateInstruction<0, 2, 1> {
+class LStoreContextSlot FINAL : public LTemplateInstruction<0, 2, 1> {
  public:
   LStoreContextSlot(LOperand* context, LOperand* value, LOperand* temp) {
     inputs_[0] = context;
@@ -1755,11 +1805,11 @@
 
   int slot_index() { return hydrogen()->slot_index(); }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LPushArgument V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LPushArgument FINAL : public LTemplateInstruction<0, 1, 0> {
  public:
   explicit LPushArgument(LOperand* value) {
     inputs_[0] = value;
@@ -1771,7 +1821,7 @@
 };
 
 
-class LDrop V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LDrop FINAL : public LTemplateInstruction<0, 0, 0> {
  public:
   explicit LDrop(int count) : count_(count) { }
 
@@ -1784,7 +1834,7 @@
 };
 
 
-class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 2, 0> {
+class LStoreCodeEntry FINAL: public LTemplateInstruction<0, 2, 0> {
  public:
   LStoreCodeEntry(LOperand* function, LOperand* code_object) {
     inputs_[0] = function;
@@ -1801,7 +1851,7 @@
 };
 
 
-class LInnerAllocatedObject V8_FINAL: public LTemplateInstruction<1, 2, 0> {
+class LInnerAllocatedObject FINAL: public LTemplateInstruction<1, 2, 0> {
  public:
   LInnerAllocatedObject(LOperand* base_object, LOperand* offset) {
     inputs_[0] = base_object;
@@ -1817,21 +1867,21 @@
 };
 
 
-class LThisFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LThisFunction FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
   DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
 };
 
 
-class LContext V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LContext FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(Context, "context")
   DECLARE_HYDROGEN_ACCESSOR(Context)
 };
 
 
-class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LDeclareGlobals FINAL : public LTemplateInstruction<0, 1, 0> {
  public:
   explicit LDeclareGlobals(LOperand* context) {
     inputs_[0] = context;
@@ -1844,7 +1894,7 @@
 };
 
 
-class LCallJSFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallJSFunction FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LCallJSFunction(LOperand* function) {
     inputs_[0] = function;
@@ -1855,44 +1905,44 @@
   DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
   DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 };
 
 
-class LCallWithDescriptor V8_FINAL : public LTemplateResultInstruction<1> {
+class LCallWithDescriptor FINAL : public LTemplateResultInstruction<1> {
  public:
-  LCallWithDescriptor(const CallInterfaceDescriptor* descriptor,
-                      const ZoneList<LOperand*>& operands,
-                      Zone* zone)
-    : inputs_(descriptor->environment_length() + 1, zone) {
-    ASSERT(descriptor->environment_length() + 1 == operands.length());
+  LCallWithDescriptor(CallInterfaceDescriptor descriptor,
+                      const ZoneList<LOperand*>& operands, Zone* zone)
+      : inputs_(descriptor.GetRegisterParameterCount() + 1, zone) {
+    DCHECK(descriptor.GetRegisterParameterCount() + 1 == operands.length());
     inputs_.AddAll(operands, zone);
   }
 
   LOperand* target() const { return inputs_[0]; }
 
- private:
-  DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
   DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ private:
+  DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
+
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 
   ZoneList<LOperand*> inputs_;
 
   // Iterator support.
-  virtual int InputCount() V8_FINAL V8_OVERRIDE { return inputs_.length(); }
-  virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
+  virtual int InputCount() FINAL OVERRIDE { return inputs_.length(); }
+  virtual LOperand* InputAt(int i) FINAL OVERRIDE { return inputs_[i]; }
 
-  virtual int TempCount() V8_FINAL V8_OVERRIDE { return 0; }
-  virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return NULL; }
+  virtual int TempCount() FINAL OVERRIDE { return 0; }
+  virtual LOperand* TempAt(int i) FINAL OVERRIDE { return NULL; }
 };
 
 
-class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LInvokeFunction FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LInvokeFunction(LOperand* context, LOperand* function) {
     inputs_[0] = context;
@@ -1905,13 +1955,13 @@
   DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
   DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 };
 
 
-class LCallFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCallFunction FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   explicit LCallFunction(LOperand* context, LOperand* function) {
     inputs_[0] = context;
@@ -1928,7 +1978,7 @@
 };
 
 
-class LCallNew V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCallNew FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LCallNew(LOperand* context, LOperand* constructor) {
     inputs_[0] = context;
@@ -1941,13 +1991,13 @@
   DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
   DECLARE_HYDROGEN_ACCESSOR(CallNew)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 };
 
 
-class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCallNewArray FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LCallNewArray(LOperand* context, LOperand* constructor) {
     inputs_[0] = context;
@@ -1960,13 +2010,13 @@
   DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
   DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 };
 
 
-class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallRuntime FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LCallRuntime(LOperand* context) {
     inputs_[0] = context;
@@ -1977,7 +2027,7 @@
   DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
   DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
 
-  virtual bool ClobbersDoubleRegisters(Isolate* isolate) const V8_OVERRIDE {
+  virtual bool ClobbersDoubleRegisters(Isolate* isolate) const OVERRIDE {
     return save_doubles() == kDontSaveFPRegs;
   }
 
@@ -1987,7 +2037,7 @@
 };
 
 
-class LInteger32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LInteger32ToDouble FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LInteger32ToDouble(LOperand* value) {
     inputs_[0] = value;
@@ -1999,7 +2049,7 @@
 };
 
 
-class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LUint32ToDouble FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LUint32ToDouble(LOperand* value) {
     inputs_[0] = value;
@@ -2011,7 +2061,7 @@
 };
 
 
-class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LNumberTagI FINAL : public LTemplateInstruction<1, 1, 1> {
  public:
   LNumberTagI(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
@@ -2025,7 +2075,7 @@
 };
 
 
-class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LNumberTagU FINAL : public LTemplateInstruction<1, 1, 1> {
  public:
   LNumberTagU(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
@@ -2039,7 +2089,7 @@
 };
 
 
-class LNumberTagD V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LNumberTagD FINAL : public LTemplateInstruction<1, 1, 1> {
  public:
   LNumberTagD(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
@@ -2055,7 +2105,7 @@
 
 
 // Sometimes truncating conversion from a tagged value to an int32.
-class LDoubleToI V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LDoubleToI FINAL : public LTemplateInstruction<1, 1, 1> {
  public:
   LDoubleToI(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
@@ -2072,7 +2122,7 @@
 };
 
 
-class LDoubleToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDoubleToSmi FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LDoubleToSmi(LOperand* value) {
     inputs_[0] = value;
@@ -2086,7 +2136,7 @@
 
 
 // Truncating conversion from a tagged value to an int32.
-class LTaggedToI V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LTaggedToI FINAL : public LTemplateInstruction<1, 1, 1> {
  public:
   LTaggedToI(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
@@ -2103,7 +2153,7 @@
 };
 
 
-class LSmiTag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LSmiTag FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LSmiTag(LOperand* value) {
     inputs_[0] = value;
@@ -2116,7 +2166,7 @@
 };
 
 
-class LNumberUntagD V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LNumberUntagD FINAL : public LTemplateInstruction<1, 1, 1> {
  public:
   explicit LNumberUntagD(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
@@ -2131,7 +2181,7 @@
 };
 
 
-class LSmiUntag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LSmiUntag FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   LSmiUntag(LOperand* value, bool needs_check)
       : needs_check_(needs_check) {
@@ -2149,7 +2199,7 @@
 };
 
 
-class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 2> {
+class LStoreNamedField FINAL : public LTemplateInstruction<0, 2, 2> {
  public:
   LStoreNamedField(LOperand* obj,
                    LOperand* val,
@@ -2169,11 +2219,11 @@
   DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
   DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
+class LStoreNamedGeneric FINAL : public LTemplateInstruction<0, 3, 0> {
  public:
   LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value) {
     inputs_[0] = context;
@@ -2188,13 +2238,13 @@
   DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
   DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
   Handle<Object> name() const { return hydrogen()->name(); }
   StrictMode strict_mode() { return hydrogen()->strict_mode(); }
 };
 
 
-class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyed FINAL : public LTemplateInstruction<0, 3, 0> {
  public:
   LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val) {
     inputs_[0] = obj;
@@ -2219,13 +2269,13 @@
   DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
   DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
   uint32_t base_offset() const { return hydrogen()->base_offset(); }
   bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
 };
 
 
-class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 4, 0> {
+class LStoreKeyedGeneric FINAL : public LTemplateInstruction<0, 4, 0> {
  public:
   LStoreKeyedGeneric(LOperand* context,
                      LOperand* object,
@@ -2245,13 +2295,13 @@
   DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
   DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 
   StrictMode strict_mode() { return hydrogen()->strict_mode(); }
 };
 
 
-class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 2, 2> {
+class LTransitionElementsKind FINAL : public LTemplateInstruction<0, 2, 2> {
  public:
   LTransitionElementsKind(LOperand* object,
                           LOperand* context,
@@ -2272,7 +2322,7 @@
                                "transition-elements-kind")
   DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 
   Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
   Handle<Map> transitioned_map() {
@@ -2283,7 +2333,7 @@
 };
 
 
-class LTrapAllocationMemento V8_FINAL  : public LTemplateInstruction<0, 1, 1> {
+class LTrapAllocationMemento FINAL  : public LTemplateInstruction<0, 1, 1> {
  public:
   LTrapAllocationMemento(LOperand* object,
                          LOperand* temp) {
@@ -2299,7 +2349,7 @@
 };
 
 
-class LStringAdd V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LStringAdd FINAL : public LTemplateInstruction<1, 3, 0> {
  public:
   LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
     inputs_[0] = context;
@@ -2316,7 +2366,7 @@
 };
 
 
-class LStringCharCodeAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LStringCharCodeAt FINAL : public LTemplateInstruction<1, 3, 0> {
  public:
   LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
     inputs_[0] = context;
@@ -2333,7 +2383,7 @@
 };
 
 
-class LStringCharFromCode V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LStringCharFromCode FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LStringCharFromCode(LOperand* context, LOperand* char_code) {
     inputs_[0] = context;
@@ -2348,7 +2398,7 @@
 };
 
 
-class LCheckValue V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LCheckValue FINAL : public LTemplateInstruction<0, 1, 0> {
  public:
   explicit LCheckValue(LOperand* value) {
     inputs_[0] = value;
@@ -2361,7 +2411,7 @@
 };
 
 
-class LCheckInstanceType V8_FINAL : public LTemplateInstruction<0, 1, 1> {
+class LCheckInstanceType FINAL : public LTemplateInstruction<0, 1, 1> {
  public:
   LCheckInstanceType(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
@@ -2376,7 +2426,7 @@
 };
 
 
-class LCheckMaps V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LCheckMaps FINAL : public LTemplateInstruction<0, 1, 0> {
  public:
   explicit LCheckMaps(LOperand* value = NULL) {
     inputs_[0] = value;
@@ -2389,7 +2439,7 @@
 };
 
 
-class LCheckSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCheckSmi FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LCheckSmi(LOperand* value) {
     inputs_[0] = value;
@@ -2401,7 +2451,7 @@
 };
 
 
-class LClampDToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LClampDToUint8 FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LClampDToUint8(LOperand* value) {
     inputs_[0] = value;
@@ -2413,7 +2463,7 @@
 };
 
 
-class LClampIToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LClampIToUint8 FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LClampIToUint8(LOperand* value) {
     inputs_[0] = value;
@@ -2425,7 +2475,7 @@
 };
 
 
-class LClampTToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LClampTToUint8 FINAL : public LTemplateInstruction<1, 1, 1> {
  public:
   LClampTToUint8(LOperand* value, LOperand* temp_xmm) {
     inputs_[0] = value;
@@ -2439,7 +2489,7 @@
 };
 
 
-class LCheckNonSmi V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LCheckNonSmi FINAL : public LTemplateInstruction<0, 1, 0> {
  public:
   explicit LCheckNonSmi(LOperand* value) {
     inputs_[0] = value;
@@ -2452,7 +2502,7 @@
 };
 
 
-class LDoubleBits V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDoubleBits FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LDoubleBits(LOperand* value) {
     inputs_[0] = value;
@@ -2465,7 +2515,7 @@
 };
 
 
-class LConstructDouble V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LConstructDouble FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LConstructDouble(LOperand* hi, LOperand* lo) {
     inputs_[0] = hi;
@@ -2479,7 +2529,7 @@
 };
 
 
-class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LAllocate FINAL : public LTemplateInstruction<1, 2, 1> {
  public:
   LAllocate(LOperand* context, LOperand* size, LOperand* temp) {
     inputs_[0] = context;
@@ -2496,7 +2546,7 @@
 };
 
 
-class LRegExpLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LRegExpLiteral FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LRegExpLiteral(LOperand* context) {
     inputs_[0] = context;
@@ -2509,7 +2559,7 @@
 };
 
 
-class LFunctionLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LFunctionLiteral FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LFunctionLiteral(LOperand* context) {
     inputs_[0] = context;
@@ -2522,7 +2572,7 @@
 };
 
 
-class LToFastProperties V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LToFastProperties FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LToFastProperties(LOperand* value) {
     inputs_[0] = value;
@@ -2535,7 +2585,7 @@
 };
 
 
-class LTypeof V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LTypeof FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LTypeof(LOperand* context, LOperand* value) {
     inputs_[0] = context;
@@ -2549,7 +2599,7 @@
 };
 
 
-class LTypeofIsAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LTypeofIsAndBranch FINAL : public LControlInstruction<1, 0> {
  public:
   explicit LTypeofIsAndBranch(LOperand* value) {
     inputs_[0] = value;
@@ -2562,20 +2612,20 @@
 
   Handle<String> type_literal() { return hydrogen()->type_literal(); }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LOsrEntry V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LOsrEntry FINAL : public LTemplateInstruction<0, 0, 0> {
  public:
-  virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
     return false;
   }
   DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
 };
 
 
-class LStackCheck V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LStackCheck FINAL : public LTemplateInstruction<0, 1, 0> {
  public:
   explicit LStackCheck(LOperand* context) {
     inputs_[0] = context;
@@ -2593,7 +2643,7 @@
 };
 
 
-class LForInPrepareMap V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LForInPrepareMap FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LForInPrepareMap(LOperand* context, LOperand* object) {
     inputs_[0] = context;
@@ -2607,7 +2657,7 @@
 };
 
 
-class LForInCacheArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LForInCacheArray FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LForInCacheArray(LOperand* map) {
     inputs_[0] = map;
@@ -2623,7 +2673,7 @@
 };
 
 
-class LCheckMapValue V8_FINAL : public LTemplateInstruction<0, 2, 0> {
+class LCheckMapValue FINAL : public LTemplateInstruction<0, 2, 0> {
  public:
   LCheckMapValue(LOperand* value, LOperand* map) {
     inputs_[0] = value;
@@ -2637,7 +2687,7 @@
 };
 
 
-class LLoadFieldByIndex V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LLoadFieldByIndex FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LLoadFieldByIndex(LOperand* object, LOperand* index) {
     inputs_[0] = object;
@@ -2681,7 +2731,7 @@
 
 
 class LChunkBuilder;
-class LPlatformChunk V8_FINAL : public LChunk {
+class LPlatformChunk FINAL : public LChunk {
  public:
   LPlatformChunk(CompilationInfo* info, HGraph* graph)
       : LChunk(info, graph),
@@ -2697,20 +2747,14 @@
 };
 
 
-class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
+class LChunkBuilder FINAL : public LChunkBuilderBase {
  public:
   LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
-      : LChunkBuilderBase(graph->zone()),
-        chunk_(NULL),
-        info_(info),
-        graph_(graph),
-        status_(UNUSED),
+      : LChunkBuilderBase(info, graph),
         current_instruction_(NULL),
         current_block_(NULL),
         next_block_(NULL),
-        allocator_(allocator) { }
-
-  Isolate* isolate() const { return graph_->isolate(); }
+        allocator_(allocator) {}
 
   // Build the sequence for the graph.
   LPlatformChunk* Build();
@@ -2722,6 +2766,7 @@
 
   LInstruction* DoMathFloor(HUnaryMathOperation* instr);
   LInstruction* DoMathRound(HUnaryMathOperation* instr);
+  LInstruction* DoMathFround(HUnaryMathOperation* instr);
   LInstruction* DoMathAbs(HUnaryMathOperation* instr);
   LInstruction* DoMathLog(HUnaryMathOperation* instr);
   LInstruction* DoMathExp(HUnaryMathOperation* instr);
@@ -2739,24 +2784,6 @@
   LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr);
 
  private:
-  enum Status {
-    UNUSED,
-    BUILDING,
-    DONE,
-    ABORTED
-  };
-
-  LPlatformChunk* chunk() const { return chunk_; }
-  CompilationInfo* info() const { return info_; }
-  HGraph* graph() const { return graph_; }
-
-  bool is_unused() const { return status_ == UNUSED; }
-  bool is_building() const { return status_ == BUILDING; }
-  bool is_done() const { return status_ == DONE; }
-  bool is_aborted() const { return status_ == ABORTED; }
-
-  void Abort(BailoutReason reason);
-
   // Methods for getting operands for Use / Define / Temp.
   LUnallocated* ToUnallocated(Register reg);
   LUnallocated* ToUnallocated(XMMRegister reg);
@@ -2802,7 +2829,7 @@
 
   // An input operand in register, stack slot or a constant operand.
   // Will not be moved to a register even if one is freely available.
-  virtual MUST_USE_RESULT LOperand* UseAny(HValue* value) V8_OVERRIDE;
+  virtual MUST_USE_RESULT LOperand* UseAny(HValue* value) OVERRIDE;
 
   // Temporary operand that must be in a register.
   MUST_USE_RESULT LUnallocated* TempRegister();
@@ -2852,10 +2879,6 @@
 
   LOperand* GetStoreKeyedValueOperand(HStoreKeyed* instr);
 
-  LPlatformChunk* chunk_;
-  CompilationInfo* info_;
-  HGraph* const graph_;
-  Status status_;
   HInstruction* current_instruction_;
   HBasicBlock* current_block_;
   HBasicBlock* next_block_;
diff --git a/src/ia32/macro-assembler-ia32.cc b/src/ia32/macro-assembler-ia32.cc
index 1368501..7480a6f 100644
--- a/src/ia32/macro-assembler-ia32.cc
+++ b/src/ia32/macro-assembler-ia32.cc
@@ -6,6 +6,8 @@
 
 #if V8_TARGET_ARCH_IA32
 
+#include "src/base/bits.h"
+#include "src/base/division-by-constant.h"
 #include "src/bootstrapper.h"
 #include "src/codegen.h"
 #include "src/cpu-profiler.h"
@@ -33,7 +35,7 @@
 
 
 void MacroAssembler::Load(Register dst, const Operand& src, Representation r) {
-  ASSERT(!r.IsDouble());
+  DCHECK(!r.IsDouble());
   if (r.IsInteger8()) {
     movsx_b(dst, src);
   } else if (r.IsUInteger8()) {
@@ -49,7 +51,7 @@
 
 
 void MacroAssembler::Store(Register src, const Operand& dst, Representation r) {
-  ASSERT(!r.IsDouble());
+  DCHECK(!r.IsDouble());
   if (r.IsInteger8() || r.IsUInteger8()) {
     mov_b(dst, src);
   } else if (r.IsInteger16() || r.IsUInteger16()) {
@@ -83,7 +85,7 @@
 void MacroAssembler::StoreRoot(Register source,
                                Register scratch,
                                Heap::RootListIndex index) {
-  ASSERT(Heap::RootCanBeWrittenAfterInitialization(index));
+  DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
   ExternalReference roots_array_start =
       ExternalReference::roots_array_start(isolate());
   mov(scratch, Immediate(index));
@@ -105,7 +107,7 @@
 
 
 void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
-  ASSERT(isolate()->heap()->RootCanBeTreatedAsConstant(index));
+  DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(index));
   Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
   cmp(with, value);
 }
@@ -113,7 +115,7 @@
 
 void MacroAssembler::CompareRoot(const Operand& with,
                                  Heap::RootListIndex index) {
-  ASSERT(isolate()->heap()->RootCanBeTreatedAsConstant(index));
+  DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(index));
   Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
   cmp(with, value);
 }
@@ -125,7 +127,7 @@
     Condition cc,
     Label* condition_met,
     Label::Distance condition_met_distance) {
-  ASSERT(cc == equal || cc == not_equal);
+  DCHECK(cc == equal || cc == not_equal);
   if (scratch.is(object)) {
     and_(scratch, Immediate(~Page::kPageAlignmentMask));
   } else {
@@ -133,8 +135,8 @@
     and_(scratch, object);
   }
   // Check that we can use a test_b.
-  ASSERT(MemoryChunk::IN_FROM_SPACE < 8);
-  ASSERT(MemoryChunk::IN_TO_SPACE < 8);
+  DCHECK(MemoryChunk::IN_FROM_SPACE < 8);
+  DCHECK(MemoryChunk::IN_TO_SPACE < 8);
   int mask = (1 << MemoryChunk::IN_FROM_SPACE)
            | (1 << MemoryChunk::IN_TO_SPACE);
   // If non-zero, the page belongs to new-space.
@@ -176,16 +178,15 @@
     ret(0);
     bind(&buffer_overflowed);
   } else {
-    ASSERT(and_then == kFallThroughAtEnd);
+    DCHECK(and_then == kFallThroughAtEnd);
     j(equal, &done, Label::kNear);
   }
-  StoreBufferOverflowStub store_buffer_overflow =
-      StoreBufferOverflowStub(isolate(), save_fp);
+  StoreBufferOverflowStub store_buffer_overflow(isolate(), save_fp);
   CallStub(&store_buffer_overflow);
   if (and_then == kReturnAtEnd) {
     ret(0);
   } else {
-    ASSERT(and_then == kFallThroughAtEnd);
+    DCHECK(and_then == kFallThroughAtEnd);
     bind(&done);
   }
 }
@@ -249,18 +250,17 @@
 }
 
 
-void MacroAssembler::DoubleToI(Register result_reg,
-                               XMMRegister input_reg,
+void MacroAssembler::DoubleToI(Register result_reg, XMMRegister input_reg,
                                XMMRegister scratch,
                                MinusZeroMode minus_zero_mode,
-                               Label* conversion_failed,
-                               Label::Distance dst) {
-  ASSERT(!input_reg.is(scratch));
+                               Label* lost_precision, Label* is_nan,
+                               Label* minus_zero, Label::Distance dst) {
+  DCHECK(!input_reg.is(scratch));
   cvttsd2si(result_reg, Operand(input_reg));
   Cvtsi2sd(scratch, Operand(result_reg));
   ucomisd(scratch, input_reg);
-  j(not_equal, conversion_failed, dst);
-  j(parity_even, conversion_failed, dst);  // NaN.
+  j(not_equal, lost_precision, dst);
+  j(parity_even, is_nan, dst);
   if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
     Label done;
     // The integer converted back is equal to the original. We
@@ -270,9 +270,9 @@
     movmskpd(result_reg, input_reg);
     // Bit 0 contains the sign of the double in input_reg.
     // If input was positive, we are ok and return 0, otherwise
-    // jump to conversion_failed.
+    // jump to minus_zero.
     and_(result_reg, 1);
-    j(not_zero, conversion_failed, dst);
+    j(not_zero, minus_zero, dst);
     bind(&done);
   }
 }
@@ -345,40 +345,6 @@
 }
 
 
-void MacroAssembler::TaggedToI(Register result_reg,
-                               Register input_reg,
-                               XMMRegister temp,
-                               MinusZeroMode minus_zero_mode,
-                               Label* lost_precision) {
-  Label done;
-  ASSERT(!temp.is(xmm0));
-
-  cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
-      isolate()->factory()->heap_number_map());
-  j(not_equal, lost_precision, Label::kNear);
-
-  ASSERT(!temp.is(no_xmm_reg));
-
-  movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
-  cvttsd2si(result_reg, Operand(xmm0));
-  Cvtsi2sd(temp, Operand(result_reg));
-  ucomisd(xmm0, temp);
-  RecordComment("Deferred TaggedToI: lost precision");
-  j(not_equal, lost_precision, Label::kNear);
-  RecordComment("Deferred TaggedToI: NaN");
-  j(parity_even, lost_precision, Label::kNear);
-  if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
-    test(result_reg, Operand(result_reg));
-    j(not_zero, &done, Label::kNear);
-    movmskpd(result_reg, xmm0);
-    and_(result_reg, 1);
-    RecordComment("Deferred TaggedToI: minus zero");
-    j(not_zero, lost_precision, Label::kNear);
-  }
-  bind(&done);
-}
-
-
 void MacroAssembler::LoadUint32(XMMRegister dst,
                                 Register src) {
   Label done;
@@ -406,7 +372,7 @@
 
   // Skip barrier if writing a smi.
   if (smi_check == INLINE_SMI_CHECK) {
-    ASSERT_EQ(0, kSmiTag);
+    DCHECK_EQ(0, kSmiTag);
     test(value, Immediate(kSmiTagMask));
     j(zero, &done);
   }
@@ -426,8 +392,8 @@
   // Clobber clobbered input registers when running with the debug-code flag
   // turned on to provoke errors.
   if (emit_debug_code()) {
-    mov(value, Immediate(BitCast<int32_t>(kZapValue)));
-    mov(index, Immediate(BitCast<int32_t>(kZapValue)));
+    mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
+    mov(index, Immediate(bit_cast<int32_t>(kZapValue)));
   }
 }
 
@@ -452,7 +418,7 @@
 
   // Although the object register is tagged, the offset is relative to the start
   // of the object, so so offset must be a multiple of kPointerSize.
-  ASSERT(IsAligned(offset, kPointerSize));
+  DCHECK(IsAligned(offset, kPointerSize));
 
   lea(dst, FieldOperand(object, offset));
   if (emit_debug_code()) {
@@ -471,8 +437,8 @@
   // Clobber clobbered input registers when running with the debug-code flag
   // turned on to provoke errors.
   if (emit_debug_code()) {
-    mov(value, Immediate(BitCast<int32_t>(kZapValue)));
-    mov(dst, Immediate(BitCast<int32_t>(kZapValue)));
+    mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
+    mov(dst, Immediate(bit_cast<int32_t>(kZapValue)));
   }
 }
 
@@ -496,9 +462,9 @@
     bind(&ok);
   }
 
-  ASSERT(!object.is(value));
-  ASSERT(!object.is(address));
-  ASSERT(!value.is(address));
+  DCHECK(!object.is(value));
+  DCHECK(!object.is(address));
+  DCHECK(!value.is(address));
   AssertNotSmi(object);
 
   if (!FLAG_incremental_marking) {
@@ -508,15 +474,11 @@
   // Compute the address.
   lea(address, FieldOperand(object, HeapObject::kMapOffset));
 
-  // Count number of write barriers in generated code.
-  isolate()->counters()->write_barriers_static()->Increment();
-  IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
-
   // A single check of the map's pages interesting flag suffices, since it is
   // only set during incremental collection, and then it's also guaranteed that
   // the from object's page's interesting flag is also set.  This optimization
   // relies on the fact that maps can never be in new space.
-  ASSERT(!isolate()->heap()->InNewSpace(*map));
+  DCHECK(!isolate()->heap()->InNewSpace(*map));
   CheckPageFlagForMap(map,
                       MemoryChunk::kPointersToHereAreInterestingMask,
                       zero,
@@ -529,12 +491,16 @@
 
   bind(&done);
 
+  // Count number of write barriers in generated code.
+  isolate()->counters()->write_barriers_static()->Increment();
+  IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
+
   // Clobber clobbered input registers when running with the debug-code flag
   // turned on to provoke errors.
   if (emit_debug_code()) {
-    mov(value, Immediate(BitCast<int32_t>(kZapValue)));
-    mov(scratch1, Immediate(BitCast<int32_t>(kZapValue)));
-    mov(scratch2, Immediate(BitCast<int32_t>(kZapValue)));
+    mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
+    mov(scratch1, Immediate(bit_cast<int32_t>(kZapValue)));
+    mov(scratch2, Immediate(bit_cast<int32_t>(kZapValue)));
   }
 }
 
@@ -547,9 +513,9 @@
     RememberedSetAction remembered_set_action,
     SmiCheck smi_check,
     PointersToHereCheck pointers_to_here_check_for_value) {
-  ASSERT(!object.is(value));
-  ASSERT(!object.is(address));
-  ASSERT(!value.is(address));
+  DCHECK(!object.is(value));
+  DCHECK(!object.is(address));
+  DCHECK(!value.is(address));
   AssertNotSmi(object);
 
   if (remembered_set_action == OMIT_REMEMBERED_SET &&
@@ -565,10 +531,6 @@
     bind(&ok);
   }
 
-  // Count number of write barriers in generated code.
-  isolate()->counters()->write_barriers_static()->Increment();
-  IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
-
   // First, check if a write barrier is even needed. The tests below
   // catch stores of Smis and stores into young gen.
   Label done;
@@ -599,11 +561,15 @@
 
   bind(&done);
 
+  // Count number of write barriers in generated code.
+  isolate()->counters()->write_barriers_static()->Increment();
+  IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
+
   // Clobber clobbered registers when running with the debug-code flag
   // turned on to provoke errors.
   if (emit_debug_code()) {
-    mov(address, Immediate(BitCast<int32_t>(kZapValue)));
-    mov(value, Immediate(BitCast<int32_t>(kZapValue)));
+    mov(address, Immediate(bit_cast<int32_t>(kZapValue)));
+    mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
   }
 }
 
@@ -957,14 +923,14 @@
 
 void MacroAssembler::EnterExitFramePrologue() {
   // Set up the frame structure on the stack.
-  ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
-  ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
-  ASSERT(ExitFrameConstants::kCallerFPOffset ==  0 * kPointerSize);
+  DCHECK(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
+  DCHECK(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
+  DCHECK(ExitFrameConstants::kCallerFPOffset ==  0 * kPointerSize);
   push(ebp);
   mov(ebp, esp);
 
   // Reserve room for entry stack pointer and push the code object.
-  ASSERT(ExitFrameConstants::kSPOffset  == -1 * kPointerSize);
+  DCHECK(ExitFrameConstants::kSPOffset  == -1 * kPointerSize);
   push(Immediate(0));  // Saved entry sp, patched before call.
   push(Immediate(CodeObject()));  // Accessed from ExitFrame::code_slot.
 
@@ -992,9 +958,9 @@
   }
 
   // Get the required frame alignment for the OS.
-  const int kFrameAlignment = OS::ActivationFrameAlignment();
+  const int kFrameAlignment = base::OS::ActivationFrameAlignment();
   if (kFrameAlignment > 0) {
-    ASSERT(IsPowerOf2(kFrameAlignment));
+    DCHECK(base::bits::IsPowerOfTwo32(kFrameAlignment));
     and_(esp, -kFrameAlignment);
   }
 
@@ -1219,9 +1185,9 @@
                                             Label* miss) {
   Label same_contexts;
 
-  ASSERT(!holder_reg.is(scratch1));
-  ASSERT(!holder_reg.is(scratch2));
-  ASSERT(!scratch1.is(scratch2));
+  DCHECK(!holder_reg.is(scratch1));
+  DCHECK(!holder_reg.is(scratch2));
+  DCHECK(!scratch1.is(scratch2));
 
   // Load current lexical context from the stack frame.
   mov(scratch1, Operand(ebp, StandardFrameConstants::kContextOffset));
@@ -1280,7 +1246,7 @@
 
 
 // Compute the hash code from the untagged key.  This must be kept in sync with
-// ComputeIntegerHash in utils.h and KeyedLoadGenericElementStub in
+// ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
 // code-stub-hydrogen.cc
 //
 // Note: r0 will contain hash code
@@ -1367,7 +1333,7 @@
     and_(r2, r1);
 
     // Scale the index by multiplying by the entry size.
-    ASSERT(SeededNumberDictionary::kEntrySize == 3);
+    DCHECK(SeededNumberDictionary::kEntrySize == 3);
     lea(r2, Operand(r2, r2, times_2, 0));  // r2 = r2 * 3
 
     // Check if the key matches.
@@ -1386,7 +1352,7 @@
   // Check that the value is a normal propety.
   const int kDetailsOffset =
       SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
-  ASSERT_EQ(NORMAL, 0);
+  DCHECK_EQ(NORMAL, 0);
   test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
        Immediate(PropertyDetails::TypeField::kMask << kSmiTagSize));
   j(not_zero, miss);
@@ -1407,7 +1373,7 @@
   // Just return if allocation top is already known.
   if ((flags & RESULT_CONTAINS_TOP) != 0) {
     // No use of scratch if allocation top is provided.
-    ASSERT(scratch.is(no_reg));
+    DCHECK(scratch.is(no_reg));
 #ifdef DEBUG
     // Assert that result actually contains top on entry.
     cmp(result, Operand::StaticVariable(allocation_top));
@@ -1452,8 +1418,8 @@
                               Register scratch,
                               Label* gc_required,
                               AllocationFlags flags) {
-  ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
-  ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
+  DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
+  DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
   if (!FLAG_inline_new) {
     if (emit_debug_code()) {
       // Trash the registers to simulate an allocation failure.
@@ -1468,7 +1434,7 @@
     jmp(gc_required);
     return;
   }
-  ASSERT(!result.is(result_end));
+  DCHECK(!result.is(result_end));
 
   // Load address of new object into result.
   LoadAllocationTopHelper(result, scratch, flags);
@@ -1479,8 +1445,8 @@
   // Align the next allocation. Storing the filler map without checking top is
   // safe in new-space because the limit of the heap is aligned there.
   if ((flags & DOUBLE_ALIGNMENT) != 0) {
-    ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
-    ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
+    DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
+    DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
     Label aligned;
     test(result, Immediate(kDoubleAlignmentMask));
     j(zero, &aligned, Label::kNear);
@@ -1516,7 +1482,7 @@
       sub(result, Immediate(object_size));
     }
   } else if (tag_result) {
-    ASSERT(kHeapObjectTag == 1);
+    DCHECK(kHeapObjectTag == 1);
     inc(result);
   }
 }
@@ -1531,7 +1497,7 @@
                               Register scratch,
                               Label* gc_required,
                               AllocationFlags flags) {
-  ASSERT((flags & SIZE_IN_WORDS) == 0);
+  DCHECK((flags & SIZE_IN_WORDS) == 0);
   if (!FLAG_inline_new) {
     if (emit_debug_code()) {
       // Trash the registers to simulate an allocation failure.
@@ -1545,7 +1511,7 @@
     jmp(gc_required);
     return;
   }
-  ASSERT(!result.is(result_end));
+  DCHECK(!result.is(result_end));
 
   // Load address of new object into result.
   LoadAllocationTopHelper(result, scratch, flags);
@@ -1556,8 +1522,8 @@
   // Align the next allocation. Storing the filler map without checking top is
   // safe in new-space because the limit of the heap is aligned there.
   if ((flags & DOUBLE_ALIGNMENT) != 0) {
-    ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
-    ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
+    DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
+    DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
     Label aligned;
     test(result, Immediate(kDoubleAlignmentMask));
     j(zero, &aligned, Label::kNear);
@@ -1578,11 +1544,11 @@
     STATIC_ASSERT(static_cast<ScaleFactor>(times_2 - 1) == times_1);
     STATIC_ASSERT(static_cast<ScaleFactor>(times_4 - 1) == times_2);
     STATIC_ASSERT(static_cast<ScaleFactor>(times_8 - 1) == times_4);
-    ASSERT(element_size >= times_2);
-    ASSERT(kSmiTagSize == 1);
+    DCHECK(element_size >= times_2);
+    DCHECK(kSmiTagSize == 1);
     element_size = static_cast<ScaleFactor>(element_size - 1);
   } else {
-    ASSERT(element_count_type == REGISTER_VALUE_IS_INT32);
+    DCHECK(element_count_type == REGISTER_VALUE_IS_INT32);
   }
   lea(result_end, Operand(element_count, element_size, header_size));
   add(result_end, result);
@@ -1591,7 +1557,7 @@
   j(above, gc_required);
 
   if ((flags & TAG_OBJECT) != 0) {
-    ASSERT(kHeapObjectTag == 1);
+    DCHECK(kHeapObjectTag == 1);
     inc(result);
   }
 
@@ -1606,7 +1572,7 @@
                               Register scratch,
                               Label* gc_required,
                               AllocationFlags flags) {
-  ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
+  DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
   if (!FLAG_inline_new) {
     if (emit_debug_code()) {
       // Trash the registers to simulate an allocation failure.
@@ -1620,7 +1586,7 @@
     jmp(gc_required);
     return;
   }
-  ASSERT(!result.is(result_end));
+  DCHECK(!result.is(result_end));
 
   // Load address of new object into result.
   LoadAllocationTopHelper(result, scratch, flags);
@@ -1631,8 +1597,8 @@
   // Align the next allocation. Storing the filler map without checking top is
   // safe in new-space because the limit of the heap is aligned there.
   if ((flags & DOUBLE_ALIGNMENT) != 0) {
-    ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
-    ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
+    DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
+    DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
     Label aligned;
     test(result, Immediate(kDoubleAlignmentMask));
     j(zero, &aligned, Label::kNear);
@@ -1657,7 +1623,7 @@
 
   // Tag result if requested.
   if ((flags & TAG_OBJECT) != 0) {
-    ASSERT(kHeapObjectTag == 1);
+    DCHECK(kHeapObjectTag == 1);
     inc(result);
   }
 
@@ -1683,14 +1649,18 @@
 void MacroAssembler::AllocateHeapNumber(Register result,
                                         Register scratch1,
                                         Register scratch2,
-                                        Label* gc_required) {
+                                        Label* gc_required,
+                                        MutableMode mode) {
   // Allocate heap number in new space.
   Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
            TAG_OBJECT);
 
+  Handle<Map> map = mode == MUTABLE
+      ? isolate()->factory()->mutable_heap_number_map()
+      : isolate()->factory()->heap_number_map();
+
   // Set the map.
-  mov(FieldOperand(result, HeapObject::kMapOffset),
-      Immediate(isolate()->factory()->heap_number_map()));
+  mov(FieldOperand(result, HeapObject::kMapOffset), Immediate(map));
 }
 
 
@@ -1702,8 +1672,8 @@
                                            Label* gc_required) {
   // Calculate the number of bytes needed for the characters in the string while
   // observing object alignment.
-  ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
-  ASSERT(kShortSize == 2);
+  DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+  DCHECK(kShortSize == 2);
   // scratch1 = length * 2 + kObjectAlignmentMask.
   lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask));
   and_(scratch1, Immediate(~kObjectAlignmentMask));
@@ -1730,21 +1700,19 @@
 }
 
 
-void MacroAssembler::AllocateAsciiString(Register result,
-                                         Register length,
-                                         Register scratch1,
-                                         Register scratch2,
-                                         Register scratch3,
-                                         Label* gc_required) {
+void MacroAssembler::AllocateOneByteString(Register result, Register length,
+                                           Register scratch1, Register scratch2,
+                                           Register scratch3,
+                                           Label* gc_required) {
   // Calculate the number of bytes needed for the characters in the string while
   // observing object alignment.
-  ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+  DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
   mov(scratch1, length);
-  ASSERT(kCharSize == 1);
+  DCHECK(kCharSize == 1);
   add(scratch1, Immediate(kObjectAlignmentMask));
   and_(scratch1, Immediate(~kObjectAlignmentMask));
 
-  // Allocate ASCII string in new space.
+  // Allocate one-byte string in new space.
   Allocate(SeqOneByteString::kHeaderSize,
            times_1,
            scratch1,
@@ -1757,7 +1725,7 @@
 
   // Set the map, length and hash field.
   mov(FieldOperand(result, HeapObject::kMapOffset),
-      Immediate(isolate()->factory()->ascii_string_map()));
+      Immediate(isolate()->factory()->one_byte_string_map()));
   mov(scratch1, length);
   SmiTag(scratch1);
   mov(FieldOperand(result, String::kLengthOffset), scratch1);
@@ -1766,20 +1734,18 @@
 }
 
 
-void MacroAssembler::AllocateAsciiString(Register result,
-                                         int length,
-                                         Register scratch1,
-                                         Register scratch2,
-                                         Label* gc_required) {
-  ASSERT(length > 0);
+void MacroAssembler::AllocateOneByteString(Register result, int length,
+                                           Register scratch1, Register scratch2,
+                                           Label* gc_required) {
+  DCHECK(length > 0);
 
-  // Allocate ASCII string in new space.
+  // Allocate one-byte string in new space.
   Allocate(SeqOneByteString::SizeFor(length), result, scratch1, scratch2,
            gc_required, TAG_OBJECT);
 
   // Set the map, length and hash field.
   mov(FieldOperand(result, HeapObject::kMapOffset),
-      Immediate(isolate()->factory()->ascii_string_map()));
+      Immediate(isolate()->factory()->one_byte_string_map()));
   mov(FieldOperand(result, String::kLengthOffset),
       Immediate(Smi::FromInt(length)));
   mov(FieldOperand(result, String::kHashFieldOffset),
@@ -1801,10 +1767,10 @@
 }
 
 
-void MacroAssembler::AllocateAsciiConsString(Register result,
-                                             Register scratch1,
-                                             Register scratch2,
-                                             Label* gc_required) {
+void MacroAssembler::AllocateOneByteConsString(Register result,
+                                               Register scratch1,
+                                               Register scratch2,
+                                               Label* gc_required) {
   Allocate(ConsString::kSize,
            result,
            scratch1,
@@ -1814,7 +1780,7 @@
 
   // Set the map. The other fields are left uninitialized.
   mov(FieldOperand(result, HeapObject::kMapOffset),
-      Immediate(isolate()->factory()->cons_ascii_string_map()));
+      Immediate(isolate()->factory()->cons_one_byte_string_map()));
 }
 
 
@@ -1832,17 +1798,17 @@
 }
 
 
-void MacroAssembler::AllocateAsciiSlicedString(Register result,
-                                               Register scratch1,
-                                               Register scratch2,
-                                               Label* gc_required) {
+void MacroAssembler::AllocateOneByteSlicedString(Register result,
+                                                 Register scratch1,
+                                                 Register scratch2,
+                                                 Label* gc_required) {
   // Allocate heap number in new space.
   Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
            TAG_OBJECT);
 
   // Set the map. The other fields are left uninitialized.
   mov(FieldOperand(result, HeapObject::kMapOffset),
-      Immediate(isolate()->factory()->sliced_ascii_string_map()));
+      Immediate(isolate()->factory()->sliced_one_byte_string_map()));
 }
 
 
@@ -1859,9 +1825,9 @@
                                Register length,
                                Register scratch) {
   Label short_loop, len4, len8, len12, done, short_string;
-  ASSERT(source.is(esi));
-  ASSERT(destination.is(edi));
-  ASSERT(length.is(ecx));
+  DCHECK(source.is(esi));
+  DCHECK(destination.is(edi));
+  DCHECK(length.is(ecx));
   cmp(length, Immediate(4));
   j(below, &short_string, Label::kNear);
 
@@ -1931,7 +1897,7 @@
                                     int field_offset,
                                     int bit_index) {
   bit_index += kSmiTagSize + kSmiShiftSize;
-  ASSERT(IsPowerOf2(kBitsPerByte));
+  DCHECK(base::bits::IsPowerOfTwo32(kBitsPerByte));
   int byte_index = bit_index / kBitsPerByte;
   int byte_bit_index = bit_index & (kBitsPerByte - 1);
   test_b(FieldOperand(object, field_offset + byte_index),
@@ -1972,27 +1938,27 @@
                                              Register scratch,
                                              Label* miss,
                                              bool miss_on_bound_function) {
-  // Check that the receiver isn't a smi.
-  JumpIfSmi(function, miss);
-
-  // Check that the function really is a function.
-  CmpObjectType(function, JS_FUNCTION_TYPE, result);
-  j(not_equal, miss);
-
+  Label non_instance;
   if (miss_on_bound_function) {
+    // Check that the receiver isn't a smi.
+    JumpIfSmi(function, miss);
+
+    // Check that the function really is a function.
+    CmpObjectType(function, JS_FUNCTION_TYPE, result);
+    j(not_equal, miss);
+
     // If a bound function, go to miss label.
     mov(scratch,
         FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
     BooleanBitTest(scratch, SharedFunctionInfo::kCompilerHintsOffset,
                    SharedFunctionInfo::kBoundFunction);
     j(not_zero, miss);
-  }
 
-  // Make sure that the function has an instance prototype.
-  Label non_instance;
-  movzx_b(scratch, FieldOperand(result, Map::kBitFieldOffset));
-  test(scratch, Immediate(1 << Map::kHasNonInstancePrototype));
-  j(not_zero, &non_instance);
+    // Make sure that the function has an instance prototype.
+    movzx_b(scratch, FieldOperand(result, Map::kBitFieldOffset));
+    test(scratch, Immediate(1 << Map::kHasNonInstancePrototype));
+    j(not_zero, &non_instance);
+  }
 
   // Get the prototype or initial map from the function.
   mov(result,
@@ -2011,12 +1977,15 @@
 
   // Get the prototype from the initial map.
   mov(result, FieldOperand(result, Map::kPrototypeOffset));
-  jmp(&done);
 
-  // Non-instance prototype: Fetch prototype from constructor field
-  // in initial map.
-  bind(&non_instance);
-  mov(result, FieldOperand(result, Map::kConstructorOffset));
+  if (miss_on_bound_function) {
+    jmp(&done);
+
+    // Non-instance prototype: Fetch prototype from constructor field
+    // in initial map.
+    bind(&non_instance);
+    mov(result, FieldOperand(result, Map::kConstructorOffset));
+  }
 
   // All done.
   bind(&done);
@@ -2024,7 +1993,7 @@
 
 
 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
-  ASSERT(AllowThisStubCall(stub));  // Calls are not allowed in some stubs.
+  DCHECK(AllowThisStubCall(stub));  // Calls are not allowed in some stubs.
   call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
 }
 
@@ -2035,7 +2004,7 @@
 
 
 void MacroAssembler::StubReturn(int argc) {
-  ASSERT(argc >= 1 && generating_stub());
+  DCHECK(argc >= 1 && generating_stub());
   ret((argc - 1) * kPointerSize);
 }
 
@@ -2049,7 +2018,7 @@
   // The assert checks that the constants for the maximum number of digits
   // for an array index cached in the hash field and the number of bits
   // reserved for it does not conflict.
-  ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
+  DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
          (1 << String::kArrayIndexValueBits));
   if (!index.is(hash)) {
     mov(index, hash);
@@ -2116,7 +2085,7 @@
 void MacroAssembler::PrepareCallApiFunction(int argc) {
   EnterApiExitFrame(argc);
   if (emit_debug_code()) {
-    mov(esi, Immediate(BitCast<int32_t>(kZapValue)));
+    mov(esi, Immediate(bit_cast<int32_t>(kZapValue)));
   }
 }
 
@@ -2135,7 +2104,7 @@
   ExternalReference level_address =
       ExternalReference::handle_scope_level_address(isolate());
 
-  ASSERT(edx.is(function_address));
+  DCHECK(edx.is(function_address));
   // Allocate HandleScope in callee-save registers.
   mov(ebx, Operand::StaticVariable(next_address));
   mov(edi, Operand::StaticVariable(limit_address));
@@ -2252,7 +2221,7 @@
   bind(&promote_scheduled_exception);
   {
     FrameScope frame(this, StackFrame::INTERNAL);
-    CallRuntime(Runtime::kHiddenPromoteScheduledException, 0);
+    CallRuntime(Runtime::kPromoteScheduledException, 0);
   }
   jmp(&exception_handled);
 
@@ -2292,7 +2261,7 @@
   *definitely_mismatches = false;
   Label invoke;
   if (expected.is_immediate()) {
-    ASSERT(actual.is_immediate());
+    DCHECK(actual.is_immediate());
     if (expected.immediate() == actual.immediate()) {
       definitely_matches = true;
     } else {
@@ -2316,15 +2285,15 @@
       // IC mechanism.
       cmp(expected.reg(), actual.immediate());
       j(equal, &invoke);
-      ASSERT(expected.reg().is(ebx));
+      DCHECK(expected.reg().is(ebx));
       mov(eax, actual.immediate());
     } else if (!expected.reg().is(actual.reg())) {
       // Both expected and actual are in (different) registers. This
       // is the case when we invoke functions using call and apply.
       cmp(expected.reg(), actual.reg());
       j(equal, &invoke);
-      ASSERT(actual.reg().is(eax));
-      ASSERT(expected.reg().is(ebx));
+      DCHECK(actual.reg().is(eax));
+      DCHECK(expected.reg().is(ebx));
     }
   }
 
@@ -2359,7 +2328,7 @@
                                 InvokeFlag flag,
                                 const CallWrapper& call_wrapper) {
   // You can't call a function without a valid frame.
-  ASSERT(flag == JUMP_FUNCTION || has_frame());
+  DCHECK(flag == JUMP_FUNCTION || has_frame());
 
   Label done;
   bool definitely_mismatches = false;
@@ -2372,7 +2341,7 @@
       call(code);
       call_wrapper.AfterCall();
     } else {
-      ASSERT(flag == JUMP_FUNCTION);
+      DCHECK(flag == JUMP_FUNCTION);
       jmp(code);
     }
     bind(&done);
@@ -2385,9 +2354,9 @@
                                     InvokeFlag flag,
                                     const CallWrapper& call_wrapper) {
   // You can't call a function without a valid frame.
-  ASSERT(flag == JUMP_FUNCTION || has_frame());
+  DCHECK(flag == JUMP_FUNCTION || has_frame());
 
-  ASSERT(fun.is(edi));
+  DCHECK(fun.is(edi));
   mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
   mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
   mov(ebx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
@@ -2405,9 +2374,9 @@
                                     InvokeFlag flag,
                                     const CallWrapper& call_wrapper) {
   // You can't call a function without a valid frame.
-  ASSERT(flag == JUMP_FUNCTION || has_frame());
+  DCHECK(flag == JUMP_FUNCTION || has_frame());
 
-  ASSERT(fun.is(edi));
+  DCHECK(fun.is(edi));
   mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
 
   InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
@@ -2429,7 +2398,7 @@
                                    InvokeFlag flag,
                                    const CallWrapper& call_wrapper) {
   // You can't call a builtin without a valid frame.
-  ASSERT(flag == JUMP_FUNCTION || has_frame());
+  DCHECK(flag == JUMP_FUNCTION || has_frame());
 
   // Rely on the assertion to check that the number of provided
   // arguments match the expected number of arguments. Fake a
@@ -2452,7 +2421,7 @@
 
 
 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
-  ASSERT(!target.is(edi));
+  DCHECK(!target.is(edi));
   // Load the JavaScript builtin function from the builtins object.
   GetBuiltinFunction(edi, id);
   // Load the code entry point from the function into the target register.
@@ -2565,7 +2534,7 @@
   // The registers are pushed starting with the lowest encoding,
   // which means that lowest encodings are furthest away from
   // the stack pointer.
-  ASSERT(reg_code >= 0 && reg_code < kNumSafepointRegisters);
+  DCHECK(reg_code >= 0 && reg_code < kNumSafepointRegisters);
   return kNumSafepointRegisters - reg_code - 1;
 }
 
@@ -2651,7 +2620,7 @@
 
 void MacroAssembler::Move(XMMRegister dst, double val) {
   // TODO(titzer): recognize double constants with ExternalReferences.
-  uint64_t int_val = BitCast<uint64_t, double>(val);
+  uint64_t int_val = bit_cast<uint64_t, double>(val);
   if (int_val == 0) {
     xorps(dst, dst);
   } else {
@@ -2673,7 +2642,7 @@
 
 
 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
-  ASSERT(value > 0);
+  DCHECK(value > 0);
   if (FLAG_native_code_counters && counter->Enabled()) {
     Operand operand = Operand::StaticVariable(ExternalReference(counter));
     if (value == 1) {
@@ -2686,7 +2655,7 @@
 
 
 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
-  ASSERT(value > 0);
+  DCHECK(value > 0);
   if (FLAG_native_code_counters && counter->Enabled()) {
     Operand operand = Operand::StaticVariable(ExternalReference(counter));
     if (value == 1) {
@@ -2701,7 +2670,7 @@
 void MacroAssembler::IncrementCounter(Condition cc,
                                       StatsCounter* counter,
                                       int value) {
-  ASSERT(value > 0);
+  DCHECK(value > 0);
   if (FLAG_native_code_counters && counter->Enabled()) {
     Label skip;
     j(NegateCondition(cc), &skip);
@@ -2716,7 +2685,7 @@
 void MacroAssembler::DecrementCounter(Condition cc,
                                       StatsCounter* counter,
                                       int value) {
-  ASSERT(value > 0);
+  DCHECK(value > 0);
   if (FLAG_native_code_counters && counter->Enabled()) {
     Label skip;
     j(NegateCondition(cc), &skip);
@@ -2762,10 +2731,10 @@
 
 
 void MacroAssembler::CheckStackAlignment() {
-  int frame_alignment = OS::ActivationFrameAlignment();
+  int frame_alignment = base::OS::ActivationFrameAlignment();
   int frame_alignment_mask = frame_alignment - 1;
   if (frame_alignment > kPointerSize) {
-    ASSERT(IsPowerOf2(frame_alignment));
+    DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
     Label alignment_as_expected;
     test(esp, Immediate(frame_alignment_mask));
     j(zero, &alignment_as_expected);
@@ -2820,7 +2789,7 @@
 void MacroAssembler::LoadPowerOf2(XMMRegister dst,
                                   Register scratch,
                                   int power) {
-  ASSERT(is_uintn(power + HeapNumber::kExponentBias,
+  DCHECK(is_uintn(power + HeapNumber::kExponentBias,
                   HeapNumber::kExponentBits));
   mov(scratch, Immediate(power + HeapNumber::kExponentBias));
   movd(dst, scratch);
@@ -2903,10 +2872,8 @@
 }
 
 
-void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
-    Register instance_type,
-    Register scratch,
-    Label* failure) {
+void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(
+    Register instance_type, Register scratch, Label* failure) {
   if (!scratch.is(instance_type)) {
     mov(scratch, instance_type);
   }
@@ -2917,11 +2884,11 @@
 }
 
 
-void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register object1,
-                                                         Register object2,
-                                                         Register scratch1,
-                                                         Register scratch2,
-                                                         Label* failure) {
+void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register object1,
+                                                           Register object2,
+                                                           Register scratch1,
+                                                           Register scratch2,
+                                                           Label* failure) {
   // Check that both objects are not smis.
   STATIC_ASSERT(kSmiTag == 0);
   mov(scratch1, object1);
@@ -2934,24 +2901,24 @@
   movzx_b(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
   movzx_b(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
 
-  // Check that both are flat ASCII strings.
-  const int kFlatAsciiStringMask =
+  // Check that both are flat one-byte strings.
+  const int kFlatOneByteStringMask =
       kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
-  const int kFlatAsciiStringTag =
+  const int kFlatOneByteStringTag =
       kStringTag | kOneByteStringTag | kSeqStringTag;
   // Interleave bits from both instance types and compare them in one check.
-  ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
-  and_(scratch1, kFlatAsciiStringMask);
-  and_(scratch2, kFlatAsciiStringMask);
+  DCHECK_EQ(0, kFlatOneByteStringMask & (kFlatOneByteStringMask << 3));
+  and_(scratch1, kFlatOneByteStringMask);
+  and_(scratch2, kFlatOneByteStringMask);
   lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
-  cmp(scratch1, kFlatAsciiStringTag | (kFlatAsciiStringTag << 3));
+  cmp(scratch1, kFlatOneByteStringTag | (kFlatOneByteStringTag << 3));
   j(not_equal, failure);
 }
 
 
-void MacroAssembler::JumpIfNotUniqueName(Operand operand,
-                                         Label* not_unique_name,
-                                         Label::Distance distance) {
+void MacroAssembler::JumpIfNotUniqueNameInstanceType(Operand operand,
+                                                     Label* not_unique_name,
+                                                     Label::Distance distance) {
   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
   Label succeed;
   test(operand, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
@@ -2999,13 +2966,13 @@
 
 
 void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
-  int frame_alignment = OS::ActivationFrameAlignment();
+  int frame_alignment = base::OS::ActivationFrameAlignment();
   if (frame_alignment != 0) {
     // Make stack end at alignment and make room for num_arguments words
     // and the original value of esp.
     mov(scratch, esp);
     sub(esp, Immediate((num_arguments + 1) * kPointerSize));
-    ASSERT(IsPowerOf2(frame_alignment));
+    DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
     and_(esp, -frame_alignment);
     mov(Operand(esp, num_arguments * kPointerSize), scratch);
   } else {
@@ -3024,14 +2991,14 @@
 
 void MacroAssembler::CallCFunction(Register function,
                                    int num_arguments) {
-  ASSERT(has_frame());
+  DCHECK(has_frame());
   // Check stack alignment.
   if (emit_debug_code()) {
     CheckStackAlignment();
   }
 
   call(function);
-  if (OS::ActivationFrameAlignment() != 0) {
+  if (base::OS::ActivationFrameAlignment() != 0) {
     mov(esp, Operand(esp, num_arguments * kPointerSize));
   } else {
     add(esp, Immediate(num_arguments * kPointerSize));
@@ -3039,15 +3006,33 @@
 }
 
 
-bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
-  if (r1.is(r2)) return true;
-  if (r1.is(r3)) return true;
-  if (r1.is(r4)) return true;
-  if (r2.is(r3)) return true;
-  if (r2.is(r4)) return true;
-  if (r3.is(r4)) return true;
-  return false;
+#ifdef DEBUG
+bool AreAliased(Register reg1,
+                Register reg2,
+                Register reg3,
+                Register reg4,
+                Register reg5,
+                Register reg6,
+                Register reg7,
+                Register reg8) {
+  int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
+      reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
+      reg7.is_valid() + reg8.is_valid();
+
+  RegList regs = 0;
+  if (reg1.is_valid()) regs |= reg1.bit();
+  if (reg2.is_valid()) regs |= reg2.bit();
+  if (reg3.is_valid()) regs |= reg3.bit();
+  if (reg4.is_valid()) regs |= reg4.bit();
+  if (reg5.is_valid()) regs |= reg5.bit();
+  if (reg6.is_valid()) regs |= reg6.bit();
+  if (reg7.is_valid()) regs |= reg7.bit();
+  if (reg8.is_valid()) regs |= reg8.bit();
+  int n_of_non_aliasing_regs = NumRegs(regs);
+
+  return n_of_valid_regs != n_of_non_aliasing_regs;
 }
+#endif
 
 
 CodePatcher::CodePatcher(byte* address, int size)
@@ -3057,17 +3042,17 @@
   // Create a new macro assembler pointing to the address of the code to patch.
   // The size is adjusted with kGap on order for the assembler to generate size
   // bytes of instructions without failing with buffer size constraints.
-  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+  DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
 }
 
 
 CodePatcher::~CodePatcher() {
   // Indicate that code has changed.
-  CPU::FlushICache(address_, size_);
+  CpuFeatures::FlushICache(address_, size_);
 
   // Check that the code was patched as expected.
-  ASSERT(masm_.pc_ == address_ + size_);
-  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+  DCHECK(masm_.pc_ == address_ + size_);
+  DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
 }
 
 
@@ -3078,7 +3063,7 @@
     Condition cc,
     Label* condition_met,
     Label::Distance condition_met_distance) {
-  ASSERT(cc == zero || cc == not_zero);
+  DCHECK(cc == zero || cc == not_zero);
   if (scratch.is(object)) {
     and_(scratch, Immediate(~Page::kPageAlignmentMask));
   } else {
@@ -3101,12 +3086,13 @@
     Condition cc,
     Label* condition_met,
     Label::Distance condition_met_distance) {
-  ASSERT(cc == zero || cc == not_zero);
+  DCHECK(cc == zero || cc == not_zero);
   Page* page = Page::FromAddress(map->address());
+  DCHECK(!serializer_enabled());  // Serializer cannot match page_flags.
   ExternalReference reference(ExternalReference::page_flags(page));
   // The inlined static address check of the page's flags relies
   // on maps never being compacted.
-  ASSERT(!isolate()->heap()->mark_compact_collector()->
+  DCHECK(!isolate()->heap()->mark_compact_collector()->
          IsOnEvacuationCandidate(*map));
   if (mask < (1 << kBitsPerByte)) {
     test_b(Operand::StaticVariable(reference), static_cast<uint8_t>(mask));
@@ -3137,7 +3123,7 @@
   HasColor(object, scratch0, scratch1,
            on_black, on_black_near,
            1, 0);  // kBlackBitPattern.
-  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+  DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
 }
 
 
@@ -3148,7 +3134,7 @@
                               Label::Distance has_color_distance,
                               int first_bit,
                               int second_bit) {
-  ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, ecx));
+  DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, ecx));
 
   GetMarkBits(object, bitmap_scratch, mask_scratch);
 
@@ -3172,7 +3158,7 @@
 void MacroAssembler::GetMarkBits(Register addr_reg,
                                  Register bitmap_reg,
                                  Register mask_reg) {
-  ASSERT(!AreAliased(addr_reg, mask_reg, bitmap_reg, ecx));
+  DCHECK(!AreAliased(addr_reg, mask_reg, bitmap_reg, ecx));
   mov(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
   and_(bitmap_reg, addr_reg);
   mov(ecx, addr_reg);
@@ -3197,14 +3183,14 @@
     Register mask_scratch,
     Label* value_is_white_and_not_data,
     Label::Distance distance) {
-  ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, ecx));
+  DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ecx));
   GetMarkBits(value, bitmap_scratch, mask_scratch);
 
   // If the value is black or grey we don't need to do anything.
-  ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
-  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
-  ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
-  ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+  DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+  DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
+  DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
+  DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
 
   Label done;
 
@@ -3242,8 +3228,8 @@
 
   bind(&not_heap_number);
   // Check for strings.
-  ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
-  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+  DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+  DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
   // If it's a string and it's not a cons string then it's an object containing
   // no GC pointers.
   Register instance_type = ecx;
@@ -3256,24 +3242,24 @@
   Label not_external;
   // External strings are the only ones with the kExternalStringTag bit
   // set.
-  ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
-  ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
+  DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
+  DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
   test_b(instance_type, kExternalStringTag);
   j(zero, &not_external, Label::kNear);
   mov(length, Immediate(ExternalString::kSize));
   jmp(&is_data_object, Label::kNear);
 
   bind(&not_external);
-  // Sequential string, either ASCII or UC16.
-  ASSERT(kOneByteStringTag == 0x04);
+  // Sequential string, either Latin1 or UC16.
+  DCHECK(kOneByteStringTag == 0x04);
   and_(length, Immediate(kStringEncodingMask));
   xor_(length, Immediate(kStringEncodingMask));
   add(length, Immediate(0x04));
-  // Value now either 4 (if ASCII) or 8 (if UC16), i.e., char-size shifted
+  // Value now either 4 (if Latin1) or 8 (if UC16), i.e., char-size shifted
   // by 2. If we multiply the string length as smi by this, it still
   // won't overflow a 32-bit value.
-  ASSERT_EQ(SeqOneByteString::kMaxSize, SeqTwoByteString::kMaxSize);
-  ASSERT(SeqOneByteString::kMaxSize <=
+  DCHECK_EQ(SeqOneByteString::kMaxSize, SeqTwoByteString::kMaxSize);
+  DCHECK(SeqOneByteString::kMaxSize <=
          static_cast<int>(0xffffffffu >> (2 + kSmiTagSize)));
   imul(length, FieldOperand(value, String::kLengthOffset));
   shr(length, 2 + kSmiTagSize + kSmiShiftSize);
@@ -3373,7 +3359,7 @@
     Register scratch0,
     Register scratch1,
     Label* found) {
-  ASSERT(!scratch1.is(scratch0));
+  DCHECK(!scratch1.is(scratch0));
   Factory* factory = isolate()->factory();
   Register current = scratch0;
   Label loop_again;
@@ -3395,14 +3381,16 @@
 
 
 void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) {
-  ASSERT(!dividend.is(eax));
-  ASSERT(!dividend.is(edx));
-  MultiplierAndShift ms(divisor);
-  mov(eax, Immediate(ms.multiplier()));
+  DCHECK(!dividend.is(eax));
+  DCHECK(!dividend.is(edx));
+  base::MagicNumbersForDivision<uint32_t> mag =
+      base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
+  mov(eax, Immediate(mag.multiplier));
   imul(dividend);
-  if (divisor > 0 && ms.multiplier() < 0) add(edx, dividend);
-  if (divisor < 0 && ms.multiplier() > 0) sub(edx, dividend);
-  if (ms.shift() > 0) sar(edx, ms.shift());
+  bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
+  if (divisor > 0 && neg) add(edx, dividend);
+  if (divisor < 0 && !neg && mag.multiplier > 0) sub(edx, dividend);
+  if (mag.shift > 0) sar(edx, mag.shift);
   mov(eax, dividend);
   shr(eax, 31);
   add(edx, eax);
diff --git a/src/ia32/macro-assembler-ia32.h b/src/ia32/macro-assembler-ia32.h
index b0b61f7..81347e5 100644
--- a/src/ia32/macro-assembler-ia32.h
+++ b/src/ia32/macro-assembler-ia32.h
@@ -6,6 +6,7 @@
 #define V8_IA32_MACRO_ASSEMBLER_IA32_H_
 
 #include "src/assembler.h"
+#include "src/bailout-reason.h"
 #include "src/frames.h"
 #include "src/globals.h"
 
@@ -30,7 +31,16 @@
 };
 
 
-bool AreAliased(Register r1, Register r2, Register r3, Register r4);
+#ifdef DEBUG
+bool AreAliased(Register reg1,
+                Register reg2,
+                Register reg3 = no_reg,
+                Register reg4 = no_reg,
+                Register reg5 = no_reg,
+                Register reg6 = no_reg,
+                Register reg7 = no_reg,
+                Register reg8 = no_reg);
+#endif
 
 
 // MacroAssembler implements a collection of frequently used macros.
@@ -454,11 +464,9 @@
   void TruncateDoubleToI(Register result_reg, XMMRegister input_reg);
 
   void DoubleToI(Register result_reg, XMMRegister input_reg,
-      XMMRegister scratch, MinusZeroMode minus_zero_mode,
-      Label* conversion_failed, Label::Distance dst = Label::kFar);
-
-  void TaggedToI(Register result_reg, Register input_reg, XMMRegister temp,
-      MinusZeroMode minus_zero_mode, Label* lost_precision);
+                 XMMRegister scratch, MinusZeroMode minus_zero_mode,
+                 Label* lost_precision, Label* is_nan, Label* minus_zero,
+                 Label::Distance dst = Label::kFar);
 
   // Smi tagging support.
   void SmiTag(Register reg) {
@@ -638,7 +646,8 @@
   void AllocateHeapNumber(Register result,
                           Register scratch1,
                           Register scratch2,
-                          Label* gc_required);
+                          Label* gc_required,
+                          MutableMode mode = IMMUTABLE);
 
   // Allocate a sequential string. All the header fields of the string object
   // are initialized.
@@ -648,17 +657,11 @@
                              Register scratch2,
                              Register scratch3,
                              Label* gc_required);
-  void AllocateAsciiString(Register result,
-                           Register length,
-                           Register scratch1,
-                           Register scratch2,
-                           Register scratch3,
-                           Label* gc_required);
-  void AllocateAsciiString(Register result,
-                           int length,
-                           Register scratch1,
-                           Register scratch2,
-                           Label* gc_required);
+  void AllocateOneByteString(Register result, Register length,
+                             Register scratch1, Register scratch2,
+                             Register scratch3, Label* gc_required);
+  void AllocateOneByteString(Register result, int length, Register scratch1,
+                             Register scratch2, Label* gc_required);
 
   // Allocate a raw cons string object. Only the map field of the result is
   // initialized.
@@ -666,10 +669,8 @@
                           Register scratch1,
                           Register scratch2,
                           Label* gc_required);
-  void AllocateAsciiConsString(Register result,
-                               Register scratch1,
-                               Register scratch2,
-                               Label* gc_required);
+  void AllocateOneByteConsString(Register result, Register scratch1,
+                                 Register scratch2, Label* gc_required);
 
   // Allocate a raw sliced string object. Only the map field of the result is
   // initialized.
@@ -677,10 +678,8 @@
                             Register scratch1,
                             Register scratch2,
                             Label* gc_required);
-  void AllocateAsciiSlicedString(Register result,
-                                 Register scratch1,
-                                 Register scratch2,
-                                 Label* gc_required);
+  void AllocateOneByteSlicedString(Register result, Register scratch1,
+                                   Register scratch2, Label* gc_required);
 
   // Copy memory, byte-by-byte, from source to destination.  Not optimized for
   // long or aligned copies.
@@ -847,7 +846,7 @@
   void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
 
   Handle<Object> CodeObject() {
-    ASSERT(!code_object_.is_null());
+    DCHECK(!code_object_.is_null());
     return code_object_;
   }
 
@@ -904,29 +903,27 @@
                                Register scratch2,
                                Label* not_found);
 
-  // Check whether the instance type represents a flat ASCII string. Jump to the
-  // label if not. If the instance type can be scratched specify same register
-  // for both instance type and scratch.
-  void JumpIfInstanceTypeIsNotSequentialAscii(Register instance_type,
-                                              Register scratch,
-                                              Label* on_not_flat_ascii_string);
+  // Check whether the instance type represents a flat one-byte string. Jump to
+  // the label if not. If the instance type can be scratched specify same
+  // register for both instance type and scratch.
+  void JumpIfInstanceTypeIsNotSequentialOneByte(
+      Register instance_type, Register scratch,
+      Label* on_not_flat_one_byte_string);
 
-  // Checks if both objects are sequential ASCII strings, and jumps to label
+  // Checks if both objects are sequential one-byte strings, and jumps to label
   // if either is not.
-  void JumpIfNotBothSequentialAsciiStrings(Register object1,
-                                           Register object2,
-                                           Register scratch1,
-                                           Register scratch2,
-                                           Label* on_not_flat_ascii_strings);
+  void JumpIfNotBothSequentialOneByteStrings(
+      Register object1, Register object2, Register scratch1, Register scratch2,
+      Label* on_not_flat_one_byte_strings);
 
   // Checks if the given register or operand is a unique name
-  void JumpIfNotUniqueName(Register reg, Label* not_unique_name,
-                           Label::Distance distance = Label::kFar) {
-    JumpIfNotUniqueName(Operand(reg), not_unique_name, distance);
+  void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name,
+                                       Label::Distance distance = Label::kFar) {
+    JumpIfNotUniqueNameInstanceType(Operand(reg), not_unique_name, distance);
   }
 
-  void JumpIfNotUniqueName(Operand operand, Label* not_unique_name,
-                           Label::Distance distance = Label::kFar);
+  void JumpIfNotUniqueNameInstanceType(Operand operand, Label* not_unique_name,
+                                       Label::Distance distance = Label::kFar);
 
   void EmitSeqStringSetCharCheck(Register string,
                                  Register index,
diff --git a/src/ia32/regexp-macro-assembler-ia32.cc b/src/ia32/regexp-macro-assembler-ia32.cc
index 1945bd6..4118db8 100644
--- a/src/ia32/regexp-macro-assembler-ia32.cc
+++ b/src/ia32/regexp-macro-assembler-ia32.cc
@@ -7,11 +7,12 @@
 #if V8_TARGET_ARCH_IA32
 
 #include "src/cpu-profiler.h"
-#include "src/unicode.h"
 #include "src/log.h"
-#include "src/regexp-stack.h"
 #include "src/macro-assembler.h"
 #include "src/regexp-macro-assembler.h"
+#include "src/regexp-stack.h"
+#include "src/unicode.h"
+
 #include "src/ia32/regexp-macro-assembler-ia32.h"
 
 namespace v8 {
@@ -91,7 +92,7 @@
       success_label_(),
       backtrack_label_(),
       exit_label_() {
-  ASSERT_EQ(0, registers_to_save % 2);
+  DCHECK_EQ(0, registers_to_save % 2);
   __ jmp(&entry_label_);   // We'll write the entry code later.
   __ bind(&start_label_);  // And then continue from here.
 }
@@ -123,8 +124,8 @@
 
 
 void RegExpMacroAssemblerIA32::AdvanceRegister(int reg, int by) {
-  ASSERT(reg >= 0);
-  ASSERT(reg < num_registers_);
+  DCHECK(reg >= 0);
+  DCHECK(reg < num_registers_);
   if (by != 0) {
     __ add(register_location(reg), Immediate(by));
   }
@@ -219,7 +220,7 @@
   __ add(eax, ebx);
   BranchOrBacktrack(greater, on_no_match);
 
-  if (mode_ == ASCII) {
+  if (mode_ == LATIN1) {
     Label success;
     Label fail;
     Label loop_increment;
@@ -281,7 +282,7 @@
     // Compute new value of character position after the matched part.
     __ sub(edi, esi);
   } else {
-    ASSERT(mode_ == UC16);
+    DCHECK(mode_ == UC16);
     // Save registers before calling C function.
     __ push(esi);
     __ push(edi);
@@ -365,11 +366,11 @@
 
   Label loop;
   __ bind(&loop);
-  if (mode_ == ASCII) {
+  if (mode_ == LATIN1) {
     __ movzx_b(eax, Operand(edx, 0));
     __ cmpb_al(Operand(ebx, 0));
   } else {
-    ASSERT(mode_ == UC16);
+    DCHECK(mode_ == UC16);
     __ movzx_w(eax, Operand(edx, 0));
     __ cmpw_ax(Operand(ebx, 0));
   }
@@ -438,7 +439,7 @@
     uc16 minus,
     uc16 mask,
     Label* on_not_equal) {
-  ASSERT(minus < String::kMaxUtf16CodeUnit);
+  DCHECK(minus < String::kMaxUtf16CodeUnit);
   __ lea(eax, Operand(current_character(), -minus));
   if (c == 0) {
     __ test(eax, Immediate(mask));
@@ -475,7 +476,7 @@
     Label* on_bit_set) {
   __ mov(eax, Immediate(table));
   Register index = current_character();
-  if (mode_ != ASCII || kTableMask != String::kMaxOneByteCharCode) {
+  if (mode_ != LATIN1 || kTableMask != String::kMaxOneByteCharCode) {
     __ mov(ebx, kTableSize - 1);
     __ and_(ebx, current_character());
     index = ebx;
@@ -492,7 +493,7 @@
   switch (type) {
   case 's':
     // Match space-characters
-    if (mode_ == ASCII) {
+    if (mode_ == LATIN1) {
       // One byte space characters are '\t'..'\r', ' ' and \u00a0.
       Label success;
       __ cmp(current_character(), ' ');
@@ -542,12 +543,12 @@
     return true;
   }
   case 'w': {
-    if (mode_ != ASCII) {
-      // Table is 128 entries, so all ASCII characters can be tested.
+    if (mode_ != LATIN1) {
+      // Table is 256 entries, so all Latin1 characters can be tested.
       __ cmp(current_character(), Immediate('z'));
       BranchOrBacktrack(above, on_no_match);
     }
-    ASSERT_EQ(0, word_character_map[0]);  // Character '\0' is not a word char.
+    DCHECK_EQ(0, word_character_map[0]);  // Character '\0' is not a word char.
     ExternalReference word_map = ExternalReference::re_word_character_map();
     __ test_b(current_character(),
               Operand::StaticArray(current_character(), times_1, word_map));
@@ -556,17 +557,17 @@
   }
   case 'W': {
     Label done;
-    if (mode_ != ASCII) {
-      // Table is 128 entries, so all ASCII characters can be tested.
+    if (mode_ != LATIN1) {
+      // Table is 256 entries, so all Latin1 characters can be tested.
       __ cmp(current_character(), Immediate('z'));
       __ j(above, &done);
     }
-    ASSERT_EQ(0, word_character_map[0]);  // Character '\0' is not a word char.
+    DCHECK_EQ(0, word_character_map[0]);  // Character '\0' is not a word char.
     ExternalReference word_map = ExternalReference::re_word_character_map();
     __ test_b(current_character(),
               Operand::StaticArray(current_character(), times_1, word_map));
     BranchOrBacktrack(not_zero, on_no_match);
-    if (mode_ != ASCII) {
+    if (mode_ != LATIN1) {
       __ bind(&done);
     }
     return true;
@@ -583,12 +584,12 @@
     // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
     __ sub(eax, Immediate(0x0b));
     __ cmp(eax, 0x0c - 0x0b);
-    if (mode_ == ASCII) {
+    if (mode_ == LATIN1) {
       BranchOrBacktrack(above, on_no_match);
     } else {
       Label done;
       BranchOrBacktrack(below_equal, &done);
-      ASSERT_EQ(UC16, mode_);
+      DCHECK_EQ(UC16, mode_);
       // Compare original value to 0x2028 and 0x2029, using the already
       // computed (current_char ^ 0x01 - 0x0b). I.e., check for
       // 0x201d (0x2028 - 0x0b) or 0x201e.
@@ -946,8 +947,8 @@
                                                     Label* on_end_of_input,
                                                     bool check_bounds,
                                                     int characters) {
-  ASSERT(cp_offset >= -1);      // ^ and \b can look behind one character.
-  ASSERT(cp_offset < (1<<30));  // Be sane! (And ensure negation works)
+  DCHECK(cp_offset >= -1);      // ^ and \b can look behind one character.
+  DCHECK(cp_offset < (1<<30));  // Be sane! (And ensure negation works)
   if (check_bounds) {
     CheckPosition(cp_offset + characters - 1, on_end_of_input);
   }
@@ -1009,7 +1010,7 @@
 
 
 void RegExpMacroAssemblerIA32::SetRegister(int register_index, int to) {
-  ASSERT(register_index >= num_saved_registers_);  // Reserved for positions!
+  DCHECK(register_index >= num_saved_registers_);  // Reserved for positions!
   __ mov(register_location(register_index), Immediate(to));
 }
 
@@ -1032,7 +1033,7 @@
 
 
 void RegExpMacroAssemblerIA32::ClearRegisters(int reg_from, int reg_to) {
-  ASSERT(reg_from <= reg_to);
+  DCHECK(reg_from <= reg_to);
   __ mov(eax, Operand(ebp, kInputStartMinusOne));
   for (int reg = reg_from; reg <= reg_to; reg++) {
     __ mov(register_location(reg), eax);
@@ -1098,10 +1099,10 @@
   Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
 
   // Current string.
-  bool is_ascii = subject->IsOneByteRepresentationUnderneath();
+  bool is_one_byte = subject->IsOneByteRepresentationUnderneath();
 
-  ASSERT(re_code->instruction_start() <= *return_address);
-  ASSERT(*return_address <=
+  DCHECK(re_code->instruction_start() <= *return_address);
+  DCHECK(*return_address <=
       re_code->instruction_start() + re_code->instruction_size());
 
   Object* result = isolate->stack_guard()->HandleInterrupts();
@@ -1129,8 +1130,8 @@
   }
 
   // String might have changed.
-  if (subject_tmp->IsOneByteRepresentation() != is_ascii) {
-    // If we changed between an ASCII and an UC16 string, the specialized
+  if (subject_tmp->IsOneByteRepresentation() != is_one_byte) {
+    // If we changed between an LATIN1 and an UC16 string, the specialized
     // code cannot be used, and we need to restart regexp matching from
     // scratch (including, potentially, compiling a new version of the code).
     return RETRY;
@@ -1140,7 +1141,7 @@
   // be a sequential or external string with the same content.
   // Update the start and end pointers in the stack frame to the current
   // location (whether it has actually moved or not).
-  ASSERT(StringShape(*subject_tmp).IsSequential() ||
+  DCHECK(StringShape(*subject_tmp).IsSequential() ||
       StringShape(*subject_tmp).IsExternal());
 
   // The original start address of the characters to match.
@@ -1172,7 +1173,7 @@
 
 
 Operand RegExpMacroAssemblerIA32::register_location(int register_index) {
-  ASSERT(register_index < (1<<30));
+  DCHECK(register_index < (1<<30));
   if (num_registers_ <= register_index) {
     num_registers_ = register_index + 1;
   }
@@ -1226,7 +1227,7 @@
 
 
 void RegExpMacroAssemblerIA32::Push(Register source) {
-  ASSERT(!source.is(backtrack_stackpointer()));
+  DCHECK(!source.is(backtrack_stackpointer()));
   // Notice: This updates flags, unlike normal Push.
   __ sub(backtrack_stackpointer(), Immediate(kPointerSize));
   __ mov(Operand(backtrack_stackpointer(), 0), source);
@@ -1241,7 +1242,7 @@
 
 
 void RegExpMacroAssemblerIA32::Pop(Register target) {
-  ASSERT(!target.is(backtrack_stackpointer()));
+  DCHECK(!target.is(backtrack_stackpointer()));
   __ mov(target, Operand(backtrack_stackpointer(), 0));
   // Notice: This updates flags, unlike normal Pop.
   __ add(backtrack_stackpointer(), Immediate(kPointerSize));
@@ -1277,22 +1278,22 @@
 
 void RegExpMacroAssemblerIA32::LoadCurrentCharacterUnchecked(int cp_offset,
                                                              int characters) {
-  if (mode_ == ASCII) {
+  if (mode_ == LATIN1) {
     if (characters == 4) {
       __ mov(current_character(), Operand(esi, edi, times_1, cp_offset));
     } else if (characters == 2) {
       __ movzx_w(current_character(), Operand(esi, edi, times_1, cp_offset));
     } else {
-      ASSERT(characters == 1);
+      DCHECK(characters == 1);
       __ movzx_b(current_character(), Operand(esi, edi, times_1, cp_offset));
     }
   } else {
-    ASSERT(mode_ == UC16);
+    DCHECK(mode_ == UC16);
     if (characters == 2) {
       __ mov(current_character(),
              Operand(esi, edi, times_1, cp_offset * sizeof(uc16)));
     } else {
-      ASSERT(characters == 1);
+      DCHECK(characters == 1);
       __ movzx_w(current_character(),
                  Operand(esi, edi, times_1, cp_offset * sizeof(uc16)));
     }
diff --git a/src/ia32/regexp-macro-assembler-ia32.h b/src/ia32/regexp-macro-assembler-ia32.h
index e04a8ef..8f6499c 100644
--- a/src/ia32/regexp-macro-assembler-ia32.h
+++ b/src/ia32/regexp-macro-assembler-ia32.h
@@ -174,7 +174,7 @@
 
   MacroAssembler* masm_;
 
-  // Which mode to generate code for (ASCII or UC16).
+  // Which mode to generate code for (LATIN1 or UC16).
   Mode mode_;
 
   // One greater than maximal register index actually used.
diff --git a/src/ia32/stub-cache-ia32.cc b/src/ia32/stub-cache-ia32.cc
deleted file mode 100644
index 4927019..0000000
--- a/src/ia32/stub-cache-ia32.cc
+++ /dev/null
@@ -1,1494 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#if V8_TARGET_ARCH_IA32
-
-#include "src/ic-inl.h"
-#include "src/codegen.h"
-#include "src/stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-
-static void ProbeTable(Isolate* isolate,
-                       MacroAssembler* masm,
-                       Code::Flags flags,
-                       StubCache::Table table,
-                       Register name,
-                       Register receiver,
-                       // Number of the cache entry pointer-size scaled.
-                       Register offset,
-                       Register extra) {
-  ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
-  ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
-  ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
-
-  Label miss;
-
-  // Multiply by 3 because there are 3 fields per entry (name, code, map).
-  __ lea(offset, Operand(offset, offset, times_2, 0));
-
-  if (extra.is_valid()) {
-    // Get the code entry from the cache.
-    __ mov(extra, Operand::StaticArray(offset, times_1, value_offset));
-
-    // Check that the key in the entry matches the name.
-    __ cmp(name, Operand::StaticArray(offset, times_1, key_offset));
-    __ j(not_equal, &miss);
-
-    // Check the map matches.
-    __ mov(offset, Operand::StaticArray(offset, times_1, map_offset));
-    __ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset));
-    __ j(not_equal, &miss);
-
-    // Check that the flags match what we're looking for.
-    __ mov(offset, FieldOperand(extra, Code::kFlagsOffset));
-    __ and_(offset, ~Code::kFlagsNotUsedInLookup);
-    __ cmp(offset, flags);
-    __ j(not_equal, &miss);
-
-#ifdef DEBUG
-    if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
-      __ jmp(&miss);
-    } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
-      __ jmp(&miss);
-    }
-#endif
-
-    // Jump to the first instruction in the code stub.
-    __ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
-    __ jmp(extra);
-
-    __ bind(&miss);
-  } else {
-    // Save the offset on the stack.
-    __ push(offset);
-
-    // Check that the key in the entry matches the name.
-    __ cmp(name, Operand::StaticArray(offset, times_1, key_offset));
-    __ j(not_equal, &miss);
-
-    // Check the map matches.
-    __ mov(offset, Operand::StaticArray(offset, times_1, map_offset));
-    __ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset));
-    __ j(not_equal, &miss);
-
-    // Restore offset register.
-    __ mov(offset, Operand(esp, 0));
-
-    // Get the code entry from the cache.
-    __ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
-
-    // Check that the flags match what we're looking for.
-    __ mov(offset, FieldOperand(offset, Code::kFlagsOffset));
-    __ and_(offset, ~Code::kFlagsNotUsedInLookup);
-    __ cmp(offset, flags);
-    __ j(not_equal, &miss);
-
-#ifdef DEBUG
-    if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
-      __ jmp(&miss);
-    } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
-      __ jmp(&miss);
-    }
-#endif
-
-    // Restore offset and re-load code entry from cache.
-    __ pop(offset);
-    __ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
-
-    // Jump to the first instruction in the code stub.
-    __ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag));
-    __ jmp(offset);
-
-    // Pop at miss.
-    __ bind(&miss);
-    __ pop(offset);
-  }
-}
-
-
-void StubCompiler::GenerateDictionaryNegativeLookup(MacroAssembler* masm,
-                                                    Label* miss_label,
-                                                    Register receiver,
-                                                    Handle<Name> name,
-                                                    Register scratch0,
-                                                    Register scratch1) {
-  ASSERT(name->IsUniqueName());
-  ASSERT(!receiver.is(scratch0));
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->negative_lookups(), 1);
-  __ IncrementCounter(counters->negative_lookups_miss(), 1);
-
-  __ mov(scratch0, FieldOperand(receiver, HeapObject::kMapOffset));
-
-  const int kInterceptorOrAccessCheckNeededMask =
-      (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
-
-  // Bail out if the receiver has a named interceptor or requires access checks.
-  __ test_b(FieldOperand(scratch0, Map::kBitFieldOffset),
-            kInterceptorOrAccessCheckNeededMask);
-  __ j(not_zero, miss_label);
-
-  // Check that receiver is a JSObject.
-  __ CmpInstanceType(scratch0, FIRST_SPEC_OBJECT_TYPE);
-  __ j(below, miss_label);
-
-  // Load properties array.
-  Register properties = scratch0;
-  __ mov(properties, FieldOperand(receiver, JSObject::kPropertiesOffset));
-
-  // Check that the properties array is a dictionary.
-  __ cmp(FieldOperand(properties, HeapObject::kMapOffset),
-         Immediate(masm->isolate()->factory()->hash_table_map()));
-  __ j(not_equal, miss_label);
-
-  Label done;
-  NameDictionaryLookupStub::GenerateNegativeLookup(masm,
-                                                   miss_label,
-                                                   &done,
-                                                   properties,
-                                                   name,
-                                                   scratch1);
-  __ bind(&done);
-  __ DecrementCounter(counters->negative_lookups_miss(), 1);
-}
-
-
-void StubCache::GenerateProbe(MacroAssembler* masm,
-                              Code::Flags flags,
-                              Register receiver,
-                              Register name,
-                              Register scratch,
-                              Register extra,
-                              Register extra2,
-                              Register extra3) {
-  Label miss;
-
-  // Assert that code is valid.  The multiplying code relies on the entry size
-  // being 12.
-  ASSERT(sizeof(Entry) == 12);
-
-  // Assert the flags do not name a specific type.
-  ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
-
-  // Assert that there are no register conflicts.
-  ASSERT(!scratch.is(receiver));
-  ASSERT(!scratch.is(name));
-  ASSERT(!extra.is(receiver));
-  ASSERT(!extra.is(name));
-  ASSERT(!extra.is(scratch));
-
-  // Assert scratch and extra registers are valid, and extra2/3 are unused.
-  ASSERT(!scratch.is(no_reg));
-  ASSERT(extra2.is(no_reg));
-  ASSERT(extra3.is(no_reg));
-
-  Register offset = scratch;
-  scratch = no_reg;
-
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1);
-
-  // Check that the receiver isn't a smi.
-  __ JumpIfSmi(receiver, &miss);
-
-  // Get the map of the receiver and compute the hash.
-  __ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
-  __ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
-  __ xor_(offset, flags);
-  // We mask out the last two bits because they are not part of the hash and
-  // they are always 01 for maps.  Also in the two 'and' instructions below.
-  __ and_(offset, (kPrimaryTableSize - 1) << kHeapObjectTagSize);
-  // ProbeTable expects the offset to be pointer scaled, which it is, because
-  // the heap object tag size is 2 and the pointer size log 2 is also 2.
-  ASSERT(kHeapObjectTagSize == kPointerSizeLog2);
-
-  // Probe the primary table.
-  ProbeTable(isolate(), masm, flags, kPrimary, name, receiver, offset, extra);
-
-  // Primary miss: Compute hash for secondary probe.
-  __ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
-  __ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
-  __ xor_(offset, flags);
-  __ and_(offset, (kPrimaryTableSize - 1) << kHeapObjectTagSize);
-  __ sub(offset, name);
-  __ add(offset, Immediate(flags));
-  __ and_(offset, (kSecondaryTableSize - 1) << kHeapObjectTagSize);
-
-  // Probe the secondary table.
-  ProbeTable(
-      isolate(), masm, flags, kSecondary, name, receiver, offset, extra);
-
-  // Cache miss: Fall-through and let caller handle the miss by
-  // entering the runtime system.
-  __ bind(&miss);
-  __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1);
-}
-
-
-void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
-                                                       int index,
-                                                       Register prototype) {
-  __ LoadGlobalFunction(index, prototype);
-  __ LoadGlobalFunctionInitialMap(prototype, prototype);
-  // Load the prototype from the initial map.
-  __ mov(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
-}
-
-
-void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
-    MacroAssembler* masm,
-    int index,
-    Register prototype,
-    Label* miss) {
-  // Get the global function with the given index.
-  Handle<JSFunction> function(
-      JSFunction::cast(masm->isolate()->native_context()->get(index)));
-  // Check we're still in the same context.
-  Register scratch = prototype;
-  const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
-  __ mov(scratch, Operand(esi, offset));
-  __ mov(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
-  __ cmp(Operand(scratch, Context::SlotOffset(index)), function);
-  __ j(not_equal, miss);
-
-  // Load its initial map. The global functions all have initial maps.
-  __ Move(prototype, Immediate(Handle<Map>(function->initial_map())));
-  // Load the prototype from the initial map.
-  __ mov(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
-}
-
-
-void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
-                                           Register receiver,
-                                           Register scratch,
-                                           Label* miss_label) {
-  // Check that the receiver isn't a smi.
-  __ JumpIfSmi(receiver, miss_label);
-
-  // Check that the object is a JS array.
-  __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
-  __ j(not_equal, miss_label);
-
-  // Load length directly from the JS array.
-  __ mov(eax, FieldOperand(receiver, JSArray::kLengthOffset));
-  __ ret(0);
-}
-
-
-void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
-                                                 Register receiver,
-                                                 Register scratch1,
-                                                 Register scratch2,
-                                                 Label* miss_label) {
-  __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
-  __ mov(eax, scratch1);
-  __ ret(0);
-}
-
-
-void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
-                                            Register dst,
-                                            Register src,
-                                            bool inobject,
-                                            int index,
-                                            Representation representation) {
-  ASSERT(!representation.IsDouble());
-  int offset = index * kPointerSize;
-  if (!inobject) {
-    // Calculate the offset into the properties array.
-    offset = offset + FixedArray::kHeaderSize;
-    __ mov(dst, FieldOperand(src, JSObject::kPropertiesOffset));
-    src = dst;
-  }
-  __ mov(dst, FieldOperand(src, offset));
-}
-
-
-static void PushInterceptorArguments(MacroAssembler* masm,
-                                     Register receiver,
-                                     Register holder,
-                                     Register name,
-                                     Handle<JSObject> holder_obj) {
-  STATIC_ASSERT(StubCache::kInterceptorArgsNameIndex == 0);
-  STATIC_ASSERT(StubCache::kInterceptorArgsInfoIndex == 1);
-  STATIC_ASSERT(StubCache::kInterceptorArgsThisIndex == 2);
-  STATIC_ASSERT(StubCache::kInterceptorArgsHolderIndex == 3);
-  STATIC_ASSERT(StubCache::kInterceptorArgsLength == 4);
-  __ push(name);
-  Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
-  ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
-  Register scratch = name;
-  __ mov(scratch, Immediate(interceptor));
-  __ push(scratch);
-  __ push(receiver);
-  __ push(holder);
-}
-
-
-static void CompileCallLoadPropertyWithInterceptor(
-    MacroAssembler* masm,
-    Register receiver,
-    Register holder,
-    Register name,
-    Handle<JSObject> holder_obj,
-    IC::UtilityId id) {
-  PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
-  __ CallExternalReference(
-      ExternalReference(IC_Utility(id), masm->isolate()),
-      StubCache::kInterceptorArgsLength);
-}
-
-
-// Generate call to api function.
-// This function uses push() to generate smaller, faster code than
-// the version above. It is an optimization that should will be removed
-// when api call ICs are generated in hydrogen.
-void StubCompiler::GenerateFastApiCall(MacroAssembler* masm,
-                                       const CallOptimization& optimization,
-                                       Handle<Map> receiver_map,
-                                       Register receiver,
-                                       Register scratch_in,
-                                       bool is_store,
-                                       int argc,
-                                       Register* values) {
-  // Copy return value.
-  __ pop(scratch_in);
-  // receiver
-  __ push(receiver);
-  // Write the arguments to stack frame.
-  for (int i = 0; i < argc; i++) {
-    Register arg = values[argc-1-i];
-    ASSERT(!receiver.is(arg));
-    ASSERT(!scratch_in.is(arg));
-    __ push(arg);
-  }
-  __ push(scratch_in);
-  // Stack now matches JSFunction abi.
-  ASSERT(optimization.is_simple_api_call());
-
-  // Abi for CallApiFunctionStub.
-  Register callee = eax;
-  Register call_data = ebx;
-  Register holder = ecx;
-  Register api_function_address = edx;
-  Register scratch = edi;  // scratch_in is no longer valid.
-
-  // Put holder in place.
-  CallOptimization::HolderLookup holder_lookup;
-  Handle<JSObject> api_holder = optimization.LookupHolderOfExpectedType(
-      receiver_map,
-      &holder_lookup);
-  switch (holder_lookup) {
-    case CallOptimization::kHolderIsReceiver:
-      __ Move(holder, receiver);
-      break;
-    case CallOptimization::kHolderFound:
-      __ LoadHeapObject(holder, api_holder);
-     break;
-    case CallOptimization::kHolderNotFound:
-      UNREACHABLE();
-      break;
-  }
-
-  Isolate* isolate = masm->isolate();
-  Handle<JSFunction> function = optimization.constant_function();
-  Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
-  Handle<Object> call_data_obj(api_call_info->data(), isolate);
-
-  // Put callee in place.
-  __ LoadHeapObject(callee, function);
-
-  bool call_data_undefined = false;
-  // Put call_data in place.
-  if (isolate->heap()->InNewSpace(*call_data_obj)) {
-    __ mov(scratch, api_call_info);
-    __ mov(call_data, FieldOperand(scratch, CallHandlerInfo::kDataOffset));
-  } else if (call_data_obj->IsUndefined()) {
-    call_data_undefined = true;
-    __ mov(call_data, Immediate(isolate->factory()->undefined_value()));
-  } else {
-    __ mov(call_data, call_data_obj);
-  }
-
-  // Put api_function_address in place.
-  Address function_address = v8::ToCData<Address>(api_call_info->callback());
-  __ mov(api_function_address, Immediate(function_address));
-
-  // Jump to stub.
-  CallApiFunctionStub stub(isolate, is_store, call_data_undefined, argc);
-  __ TailCallStub(&stub);
-}
-
-
-void StoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
-                                            Label* label,
-                                            Handle<Name> name) {
-  if (!label->is_unused()) {
-    __ bind(label);
-    __ mov(this->name(), Immediate(name));
-  }
-}
-
-
-// Generate code to check that a global property cell is empty. Create
-// the property cell at compilation time if no cell exists for the
-// property.
-void StubCompiler::GenerateCheckPropertyCell(MacroAssembler* masm,
-                                             Handle<JSGlobalObject> global,
-                                             Handle<Name> name,
-                                             Register scratch,
-                                             Label* miss) {
-  Handle<PropertyCell> cell =
-      JSGlobalObject::EnsurePropertyCell(global, name);
-  ASSERT(cell->value()->IsTheHole());
-  Handle<Oddball> the_hole = masm->isolate()->factory()->the_hole_value();
-  if (masm->serializer_enabled()) {
-    __ mov(scratch, Immediate(cell));
-    __ cmp(FieldOperand(scratch, PropertyCell::kValueOffset),
-           Immediate(the_hole));
-  } else {
-    __ cmp(Operand::ForCell(cell), Immediate(the_hole));
-  }
-  __ j(not_equal, miss);
-}
-
-
-void StoreStubCompiler::GenerateNegativeHolderLookup(
-    MacroAssembler* masm,
-    Handle<JSObject> holder,
-    Register holder_reg,
-    Handle<Name> name,
-    Label* miss) {
-  if (holder->IsJSGlobalObject()) {
-    GenerateCheckPropertyCell(
-        masm, Handle<JSGlobalObject>::cast(holder), name, scratch1(), miss);
-  } else if (!holder->HasFastProperties() && !holder->IsJSGlobalProxy()) {
-    GenerateDictionaryNegativeLookup(
-        masm, miss, holder_reg, name, scratch1(), scratch2());
-  }
-}
-
-
-// Receiver_reg is preserved on jumps to miss_label, but may be destroyed if
-// store is successful.
-void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
-                                                Handle<JSObject> object,
-                                                LookupResult* lookup,
-                                                Handle<Map> transition,
-                                                Handle<Name> name,
-                                                Register receiver_reg,
-                                                Register storage_reg,
-                                                Register value_reg,
-                                                Register scratch1,
-                                                Register scratch2,
-                                                Register unused,
-                                                Label* miss_label,
-                                                Label* slow) {
-  int descriptor = transition->LastAdded();
-  DescriptorArray* descriptors = transition->instance_descriptors();
-  PropertyDetails details = descriptors->GetDetails(descriptor);
-  Representation representation = details.representation();
-  ASSERT(!representation.IsNone());
-
-  if (details.type() == CONSTANT) {
-    Handle<Object> constant(descriptors->GetValue(descriptor), masm->isolate());
-    __ CmpObject(value_reg, constant);
-    __ j(not_equal, miss_label);
-  } else if (representation.IsSmi()) {
-      __ JumpIfNotSmi(value_reg, miss_label);
-  } else if (representation.IsHeapObject()) {
-    __ JumpIfSmi(value_reg, miss_label);
-    HeapType* field_type = descriptors->GetFieldType(descriptor);
-    HeapType::Iterator<Map> it = field_type->Classes();
-    if (!it.Done()) {
-      Label do_store;
-      while (true) {
-        __ CompareMap(value_reg, it.Current());
-        it.Advance();
-        if (it.Done()) {
-          __ j(not_equal, miss_label);
-          break;
-        }
-        __ j(equal, &do_store, Label::kNear);
-      }
-      __ bind(&do_store);
-    }
-  } else if (representation.IsDouble()) {
-    Label do_store, heap_number;
-    __ AllocateHeapNumber(storage_reg, scratch1, scratch2, slow);
-
-    __ JumpIfNotSmi(value_reg, &heap_number);
-    __ SmiUntag(value_reg);
-    __ Cvtsi2sd(xmm0, value_reg);
-    __ SmiTag(value_reg);
-    __ jmp(&do_store);
-
-    __ bind(&heap_number);
-    __ CheckMap(value_reg, masm->isolate()->factory()->heap_number_map(),
-                miss_label, DONT_DO_SMI_CHECK);
-    __ movsd(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
-
-    __ bind(&do_store);
-    __ movsd(FieldOperand(storage_reg, HeapNumber::kValueOffset), xmm0);
-  }
-
-  // Stub never generated for non-global objects that require access
-  // checks.
-  ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
-
-  // Perform map transition for the receiver if necessary.
-  if (details.type() == FIELD &&
-      object->map()->unused_property_fields() == 0) {
-    // The properties must be extended before we can store the value.
-    // We jump to a runtime call that extends the properties array.
-    __ pop(scratch1);  // Return address.
-    __ push(receiver_reg);
-    __ push(Immediate(transition));
-    __ push(value_reg);
-    __ push(scratch1);
-    __ TailCallExternalReference(
-        ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
-                          masm->isolate()),
-        3,
-        1);
-    return;
-  }
-
-  // Update the map of the object.
-  __ mov(scratch1, Immediate(transition));
-  __ mov(FieldOperand(receiver_reg, HeapObject::kMapOffset), scratch1);
-
-  // Update the write barrier for the map field.
-  __ RecordWriteField(receiver_reg,
-                      HeapObject::kMapOffset,
-                      scratch1,
-                      scratch2,
-                      kDontSaveFPRegs,
-                      OMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-
-  if (details.type() == CONSTANT) {
-    ASSERT(value_reg.is(eax));
-    __ ret(0);
-    return;
-  }
-
-  int index = transition->instance_descriptors()->GetFieldIndex(
-      transition->LastAdded());
-
-  // Adjust for the number of properties stored in the object. Even in the
-  // face of a transition we can use the old map here because the size of the
-  // object and the number of in-object properties is not going to change.
-  index -= object->map()->inobject_properties();
-
-  SmiCheck smi_check = representation.IsTagged()
-      ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
-  // TODO(verwaest): Share this code as a code stub.
-  if (index < 0) {
-    // Set the property straight into the object.
-    int offset = object->map()->instance_size() + (index * kPointerSize);
-    if (representation.IsDouble()) {
-      __ mov(FieldOperand(receiver_reg, offset), storage_reg);
-    } else {
-      __ mov(FieldOperand(receiver_reg, offset), value_reg);
-    }
-
-    if (!representation.IsSmi()) {
-      // Update the write barrier for the array address.
-      if (!representation.IsDouble()) {
-        __ mov(storage_reg, value_reg);
-      }
-      __ RecordWriteField(receiver_reg,
-                          offset,
-                          storage_reg,
-                          scratch1,
-                          kDontSaveFPRegs,
-                          EMIT_REMEMBERED_SET,
-                          smi_check);
-    }
-  } else {
-    // Write to the properties array.
-    int offset = index * kPointerSize + FixedArray::kHeaderSize;
-    // Get the properties array (optimistically).
-    __ mov(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
-    if (representation.IsDouble()) {
-      __ mov(FieldOperand(scratch1, offset), storage_reg);
-    } else {
-      __ mov(FieldOperand(scratch1, offset), value_reg);
-    }
-
-    if (!representation.IsSmi()) {
-      // Update the write barrier for the array address.
-      if (!representation.IsDouble()) {
-        __ mov(storage_reg, value_reg);
-      }
-      __ RecordWriteField(scratch1,
-                          offset,
-                          storage_reg,
-                          receiver_reg,
-                          kDontSaveFPRegs,
-                          EMIT_REMEMBERED_SET,
-                          smi_check);
-    }
-  }
-
-  // Return the value (register eax).
-  ASSERT(value_reg.is(eax));
-  __ ret(0);
-}
-
-
-// Both name_reg and receiver_reg are preserved on jumps to miss_label,
-// but may be destroyed if store is successful.
-void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
-                                           Handle<JSObject> object,
-                                           LookupResult* lookup,
-                                           Register receiver_reg,
-                                           Register name_reg,
-                                           Register value_reg,
-                                           Register scratch1,
-                                           Register scratch2,
-                                           Label* miss_label) {
-  // Stub never generated for non-global objects that require access
-  // checks.
-  ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
-
-  FieldIndex index = lookup->GetFieldIndex();
-
-  Representation representation = lookup->representation();
-  ASSERT(!representation.IsNone());
-  if (representation.IsSmi()) {
-    __ JumpIfNotSmi(value_reg, miss_label);
-  } else if (representation.IsHeapObject()) {
-    __ JumpIfSmi(value_reg, miss_label);
-    HeapType* field_type = lookup->GetFieldType();
-    HeapType::Iterator<Map> it = field_type->Classes();
-    if (!it.Done()) {
-      Label do_store;
-      while (true) {
-        __ CompareMap(value_reg, it.Current());
-        it.Advance();
-        if (it.Done()) {
-          __ j(not_equal, miss_label);
-          break;
-        }
-        __ j(equal, &do_store, Label::kNear);
-      }
-      __ bind(&do_store);
-    }
-  } else if (representation.IsDouble()) {
-    // Load the double storage.
-    if (index.is_inobject()) {
-      __ mov(scratch1, FieldOperand(receiver_reg, index.offset()));
-    } else {
-      __ mov(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
-      __ mov(scratch1, FieldOperand(scratch1, index.offset()));
-    }
-
-    // Store the value into the storage.
-    Label do_store, heap_number;
-    __ JumpIfNotSmi(value_reg, &heap_number);
-    __ SmiUntag(value_reg);
-    __ Cvtsi2sd(xmm0, value_reg);
-    __ SmiTag(value_reg);
-    __ jmp(&do_store);
-    __ bind(&heap_number);
-    __ CheckMap(value_reg, masm->isolate()->factory()->heap_number_map(),
-                miss_label, DONT_DO_SMI_CHECK);
-    __ movsd(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
-    __ bind(&do_store);
-    __ movsd(FieldOperand(scratch1, HeapNumber::kValueOffset), xmm0);
-    // Return the value (register eax).
-    ASSERT(value_reg.is(eax));
-    __ ret(0);
-    return;
-  }
-
-  ASSERT(!representation.IsDouble());
-  // TODO(verwaest): Share this code as a code stub.
-  SmiCheck smi_check = representation.IsTagged()
-      ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
-  if (index.is_inobject()) {
-    // Set the property straight into the object.
-    __ mov(FieldOperand(receiver_reg, index.offset()), value_reg);
-
-    if (!representation.IsSmi()) {
-      // Update the write barrier for the array address.
-      // Pass the value being stored in the now unused name_reg.
-      __ mov(name_reg, value_reg);
-      __ RecordWriteField(receiver_reg,
-                          index.offset(),
-                          name_reg,
-                          scratch1,
-                          kDontSaveFPRegs,
-                          EMIT_REMEMBERED_SET,
-                          smi_check);
-    }
-  } else {
-    // Write to the properties array.
-    // Get the properties array (optimistically).
-    __ mov(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
-    __ mov(FieldOperand(scratch1, index.offset()), value_reg);
-
-    if (!representation.IsSmi()) {
-      // Update the write barrier for the array address.
-      // Pass the value being stored in the now unused name_reg.
-      __ mov(name_reg, value_reg);
-      __ RecordWriteField(scratch1,
-                          index.offset(),
-                          name_reg,
-                          receiver_reg,
-                          kDontSaveFPRegs,
-                          EMIT_REMEMBERED_SET,
-                          smi_check);
-    }
-  }
-
-  // Return the value (register eax).
-  ASSERT(value_reg.is(eax));
-  __ ret(0);
-}
-
-
-void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) {
-  __ jmp(code, RelocInfo::CODE_TARGET);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-Register StubCompiler::CheckPrototypes(Handle<HeapType> type,
-                                       Register object_reg,
-                                       Handle<JSObject> holder,
-                                       Register holder_reg,
-                                       Register scratch1,
-                                       Register scratch2,
-                                       Handle<Name> name,
-                                       Label* miss,
-                                       PrototypeCheckType check) {
-  Handle<Map> receiver_map(IC::TypeToMap(*type, isolate()));
-
-  // Make sure there's no overlap between holder and object registers.
-  ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
-  ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
-         && !scratch2.is(scratch1));
-
-  // Keep track of the current object in register reg.
-  Register reg = object_reg;
-  int depth = 0;
-
-  Handle<JSObject> current = Handle<JSObject>::null();
-  if (type->IsConstant()) current =
-      Handle<JSObject>::cast(type->AsConstant()->Value());
-  Handle<JSObject> prototype = Handle<JSObject>::null();
-  Handle<Map> current_map = receiver_map;
-  Handle<Map> holder_map(holder->map());
-  // Traverse the prototype chain and check the maps in the prototype chain for
-  // fast and global objects or do negative lookup for normal objects.
-  while (!current_map.is_identical_to(holder_map)) {
-    ++depth;
-
-    // Only global objects and objects that do not require access
-    // checks are allowed in stubs.
-    ASSERT(current_map->IsJSGlobalProxyMap() ||
-           !current_map->is_access_check_needed());
-
-    prototype = handle(JSObject::cast(current_map->prototype()));
-    if (current_map->is_dictionary_map() &&
-        !current_map->IsJSGlobalObjectMap() &&
-        !current_map->IsJSGlobalProxyMap()) {
-      if (!name->IsUniqueName()) {
-        ASSERT(name->IsString());
-        name = factory()->InternalizeString(Handle<String>::cast(name));
-      }
-      ASSERT(current.is_null() ||
-             current->property_dictionary()->FindEntry(name) ==
-             NameDictionary::kNotFound);
-
-      GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
-                                       scratch1, scratch2);
-
-      __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
-      reg = holder_reg;  // From now on the object will be in holder_reg.
-      __ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
-    } else {
-      bool in_new_space = heap()->InNewSpace(*prototype);
-      if (depth != 1 || check == CHECK_ALL_MAPS) {
-        __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK);
-      }
-
-      // Check access rights to the global object.  This has to happen after
-      // the map check so that we know that the object is actually a global
-      // object.
-      if (current_map->IsJSGlobalProxyMap()) {
-        __ CheckAccessGlobalProxy(reg, scratch1, scratch2, miss);
-      } else if (current_map->IsJSGlobalObjectMap()) {
-        GenerateCheckPropertyCell(
-            masm(), Handle<JSGlobalObject>::cast(current), name,
-            scratch2, miss);
-      }
-
-      if (in_new_space) {
-        // Save the map in scratch1 for later.
-        __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
-      }
-
-      reg = holder_reg;  // From now on the object will be in holder_reg.
-
-      if (in_new_space) {
-        // The prototype is in new space; we cannot store a reference to it
-        // in the code.  Load it from the map.
-        __ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
-      } else {
-        // The prototype is in old space; load it directly.
-        __ mov(reg, prototype);
-      }
-    }
-
-    // Go to the next object in the prototype chain.
-    current = prototype;
-    current_map = handle(current->map());
-  }
-
-  // Log the check depth.
-  LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
-
-  if (depth != 0 || check == CHECK_ALL_MAPS) {
-    // Check the holder map.
-    __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK);
-  }
-
-  // Perform security check for access to the global object.
-  ASSERT(current_map->IsJSGlobalProxyMap() ||
-         !current_map->is_access_check_needed());
-  if (current_map->IsJSGlobalProxyMap()) {
-    __ CheckAccessGlobalProxy(reg, scratch1, scratch2, miss);
-  }
-
-  // Return the register containing the holder.
-  return reg;
-}
-
-
-void LoadStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
-  if (!miss->is_unused()) {
-    Label success;
-    __ jmp(&success);
-    __ bind(miss);
-    TailCallBuiltin(masm(), MissBuiltin(kind()));
-    __ bind(&success);
-  }
-}
-
-
-void StoreStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
-  if (!miss->is_unused()) {
-    Label success;
-    __ jmp(&success);
-    GenerateRestoreName(masm(), miss, name);
-    TailCallBuiltin(masm(), MissBuiltin(kind()));
-    __ bind(&success);
-  }
-}
-
-
-Register LoadStubCompiler::CallbackHandlerFrontend(
-    Handle<HeapType> type,
-    Register object_reg,
-    Handle<JSObject> holder,
-    Handle<Name> name,
-    Handle<Object> callback) {
-  Label miss;
-
-  Register reg = HandlerFrontendHeader(type, object_reg, holder, name, &miss);
-
-  if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
-    ASSERT(!reg.is(scratch2()));
-    ASSERT(!reg.is(scratch3()));
-    Register dictionary = scratch1();
-    bool must_preserve_dictionary_reg = reg.is(dictionary);
-
-    // Load the properties dictionary.
-    if (must_preserve_dictionary_reg) {
-      __ push(dictionary);
-    }
-    __ mov(dictionary, FieldOperand(reg, JSObject::kPropertiesOffset));
-
-    // Probe the dictionary.
-    Label probe_done, pop_and_miss;
-    NameDictionaryLookupStub::GeneratePositiveLookup(masm(),
-                                                     &pop_and_miss,
-                                                     &probe_done,
-                                                     dictionary,
-                                                     this->name(),
-                                                     scratch2(),
-                                                     scratch3());
-    __ bind(&pop_and_miss);
-    if (must_preserve_dictionary_reg) {
-      __ pop(dictionary);
-    }
-    __ jmp(&miss);
-    __ bind(&probe_done);
-
-    // If probing finds an entry in the dictionary, scratch2 contains the
-    // index into the dictionary. Check that the value is the callback.
-    Register index = scratch2();
-    const int kElementsStartOffset =
-        NameDictionary::kHeaderSize +
-        NameDictionary::kElementsStartIndex * kPointerSize;
-    const int kValueOffset = kElementsStartOffset + kPointerSize;
-    __ mov(scratch3(),
-           Operand(dictionary, index, times_4, kValueOffset - kHeapObjectTag));
-    if (must_preserve_dictionary_reg) {
-      __ pop(dictionary);
-    }
-    __ cmp(scratch3(), callback);
-    __ j(not_equal, &miss);
-  }
-
-  HandlerFrontendFooter(name, &miss);
-  return reg;
-}
-
-
-void LoadStubCompiler::GenerateLoadField(Register reg,
-                                         Handle<JSObject> holder,
-                                         FieldIndex field,
-                                         Representation representation) {
-  if (!reg.is(receiver())) __ mov(receiver(), reg);
-  if (kind() == Code::LOAD_IC) {
-    LoadFieldStub stub(isolate(), field);
-    GenerateTailCall(masm(), stub.GetCode());
-  } else {
-    KeyedLoadFieldStub stub(isolate(), field);
-    GenerateTailCall(masm(), stub.GetCode());
-  }
-}
-
-
-void LoadStubCompiler::GenerateLoadCallback(
-    Register reg,
-    Handle<ExecutableAccessorInfo> callback) {
-  // Insert additional parameters into the stack frame above return address.
-  ASSERT(!scratch3().is(reg));
-  __ pop(scratch3());  // Get return address to place it below.
-
-  STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
-  STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
-  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
-  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
-  STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
-  STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
-  __ push(receiver());  // receiver
-  // Push data from ExecutableAccessorInfo.
-  if (isolate()->heap()->InNewSpace(callback->data())) {
-    ASSERT(!scratch2().is(reg));
-    __ mov(scratch2(), Immediate(callback));
-    __ push(FieldOperand(scratch2(), ExecutableAccessorInfo::kDataOffset));
-  } else {
-    __ push(Immediate(Handle<Object>(callback->data(), isolate())));
-  }
-  __ push(Immediate(isolate()->factory()->undefined_value()));  // ReturnValue
-  // ReturnValue default value
-  __ push(Immediate(isolate()->factory()->undefined_value()));
-  __ push(Immediate(reinterpret_cast<int>(isolate())));
-  __ push(reg);  // holder
-
-  // Save a pointer to where we pushed the arguments. This will be
-  // passed as the const PropertyAccessorInfo& to the C++ callback.
-  __ push(esp);
-
-  __ push(name());  // name
-
-  __ push(scratch3());  // Restore return address.
-
-  // Abi for CallApiGetter
-  Register getter_address = edx;
-  Address function_address = v8::ToCData<Address>(callback->getter());
-  __ mov(getter_address, Immediate(function_address));
-
-  CallApiGetterStub stub(isolate());
-  __ TailCallStub(&stub);
-}
-
-
-void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
-  // Return the constant value.
-  __ LoadObject(eax, value);
-  __ ret(0);
-}
-
-
-void LoadStubCompiler::GenerateLoadInterceptor(
-    Register holder_reg,
-    Handle<Object> object,
-    Handle<JSObject> interceptor_holder,
-    LookupResult* lookup,
-    Handle<Name> name) {
-  ASSERT(interceptor_holder->HasNamedInterceptor());
-  ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
-
-  // So far the most popular follow ups for interceptor loads are FIELD
-  // and CALLBACKS, so inline only them, other cases may be added
-  // later.
-  bool compile_followup_inline = false;
-  if (lookup->IsFound() && lookup->IsCacheable()) {
-    if (lookup->IsField()) {
-      compile_followup_inline = true;
-    } else if (lookup->type() == CALLBACKS &&
-               lookup->GetCallbackObject()->IsExecutableAccessorInfo()) {
-      ExecutableAccessorInfo* callback =
-          ExecutableAccessorInfo::cast(lookup->GetCallbackObject());
-      compile_followup_inline = callback->getter() != NULL &&
-          callback->IsCompatibleReceiver(*object);
-    }
-  }
-
-  if (compile_followup_inline) {
-    // Compile the interceptor call, followed by inline code to load the
-    // property from further up the prototype chain if the call fails.
-    // Check that the maps haven't changed.
-    ASSERT(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
-
-    // Preserve the receiver register explicitly whenever it is different from
-    // the holder and it is needed should the interceptor return without any
-    // result. The CALLBACKS case needs the receiver to be passed into C++ code,
-    // the FIELD case might cause a miss during the prototype check.
-    bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder();
-    bool must_preserve_receiver_reg = !receiver().is(holder_reg) &&
-        (lookup->type() == CALLBACKS || must_perfrom_prototype_check);
-
-    // Save necessary data before invoking an interceptor.
-    // Requires a frame to make GC aware of pushed pointers.
-    {
-      FrameScope frame_scope(masm(), StackFrame::INTERNAL);
-
-      if (must_preserve_receiver_reg) {
-        __ push(receiver());
-      }
-      __ push(holder_reg);
-      __ push(this->name());
-
-      // Invoke an interceptor.  Note: map checks from receiver to
-      // interceptor's holder has been compiled before (see a caller
-      // of this method.)
-      CompileCallLoadPropertyWithInterceptor(
-          masm(), receiver(), holder_reg, this->name(), interceptor_holder,
-          IC::kLoadPropertyWithInterceptorOnly);
-
-      // Check if interceptor provided a value for property.  If it's
-      // the case, return immediately.
-      Label interceptor_failed;
-      __ cmp(eax, factory()->no_interceptor_result_sentinel());
-      __ j(equal, &interceptor_failed);
-      frame_scope.GenerateLeaveFrame();
-      __ ret(0);
-
-      // Clobber registers when generating debug-code to provoke errors.
-      __ bind(&interceptor_failed);
-      if (FLAG_debug_code) {
-        __ mov(receiver(), Immediate(BitCast<int32_t>(kZapValue)));
-        __ mov(holder_reg, Immediate(BitCast<int32_t>(kZapValue)));
-        __ mov(this->name(), Immediate(BitCast<int32_t>(kZapValue)));
-      }
-
-      __ pop(this->name());
-      __ pop(holder_reg);
-      if (must_preserve_receiver_reg) {
-        __ pop(receiver());
-      }
-
-      // Leave the internal frame.
-    }
-
-    GenerateLoadPostInterceptor(holder_reg, interceptor_holder, name, lookup);
-  } else {  // !compile_followup_inline
-    // Call the runtime system to load the interceptor.
-    // Check that the maps haven't changed.
-    __ pop(scratch2());  // save old return address
-    PushInterceptorArguments(masm(), receiver(), holder_reg,
-                             this->name(), interceptor_holder);
-    __ push(scratch2());  // restore old return address
-
-    ExternalReference ref =
-        ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptor),
-                          isolate());
-    __ TailCallExternalReference(ref, StubCache::kInterceptorArgsLength, 1);
-  }
-}
-
-
-Handle<Code> StoreStubCompiler::CompileStoreCallback(
-    Handle<JSObject> object,
-    Handle<JSObject> holder,
-    Handle<Name> name,
-    Handle<ExecutableAccessorInfo> callback) {
-  Register holder_reg = HandlerFrontend(
-      IC::CurrentTypeOf(object, isolate()), receiver(), holder, name);
-
-  __ pop(scratch1());  // remove the return address
-  __ push(receiver());
-  __ push(holder_reg);
-  __ Push(callback);
-  __ Push(name);
-  __ push(value());
-  __ push(scratch1());  // restore return address
-
-  // Do tail-call to the runtime system.
-  ExternalReference store_callback_property =
-      ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
-  __ TailCallExternalReference(store_callback_property, 5, 1);
-
-  // Return the generated code.
-  return GetCode(kind(), Code::FAST, name);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void StoreStubCompiler::GenerateStoreViaSetter(
-    MacroAssembler* masm,
-    Handle<HeapType> type,
-    Register receiver,
-    Handle<JSFunction> setter) {
-  // ----------- S t a t e -------------
-  //  -- esp[0] : return address
-  // -----------------------------------
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-
-    // Save value register, so we can restore it later.
-    __ push(value());
-
-    if (!setter.is_null()) {
-      // Call the JavaScript setter with receiver and value on the stack.
-      if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
-        // Swap in the global receiver.
-        __ mov(receiver,
-               FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
-      }
-      __ push(receiver);
-      __ push(value());
-      ParameterCount actual(1);
-      ParameterCount expected(setter);
-      __ InvokeFunction(setter, expected, actual,
-                        CALL_FUNCTION, NullCallWrapper());
-    } else {
-      // If we generate a global code snippet for deoptimization only, remember
-      // the place to continue after deoptimization.
-      masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
-    }
-
-    // We have to return the passed value, not the return value of the setter.
-    __ pop(eax);
-
-    // Restore context register.
-    __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-  }
-  __ ret(0);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
-    Handle<JSObject> object,
-    Handle<Name> name) {
-  __ pop(scratch1());  // remove the return address
-  __ push(receiver());
-  __ push(this->name());
-  __ push(value());
-  __ push(scratch1());  // restore return address
-
-  // Do tail-call to the runtime system.
-  ExternalReference store_ic_property =
-      ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate());
-  __ TailCallExternalReference(store_ic_property, 3, 1);
-
-  // Return the generated code.
-  return GetCode(kind(), Code::FAST, name);
-}
-
-
-void StoreStubCompiler::GenerateStoreArrayLength() {
-  // Prepare tail call to StoreIC_ArrayLength.
-  __ pop(scratch1());  // remove the return address
-  __ push(receiver());
-  __ push(value());
-  __ push(scratch1());  // restore return address
-
-  ExternalReference ref =
-      ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength),
-                        masm()->isolate());
-  __ TailCallExternalReference(ref, 2, 1);
-}
-
-
-Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
-    MapHandleList* receiver_maps,
-    CodeHandleList* handler_stubs,
-    MapHandleList* transitioned_maps) {
-  Label miss;
-  __ JumpIfSmi(receiver(), &miss, Label::kNear);
-  __ mov(scratch1(), FieldOperand(receiver(), HeapObject::kMapOffset));
-  for (int i = 0; i < receiver_maps->length(); ++i) {
-    __ cmp(scratch1(), receiver_maps->at(i));
-    if (transitioned_maps->at(i).is_null()) {
-      __ j(equal, handler_stubs->at(i));
-    } else {
-      Label next_map;
-      __ j(not_equal, &next_map, Label::kNear);
-      __ mov(transition_map(), Immediate(transitioned_maps->at(i)));
-      __ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET);
-      __ bind(&next_map);
-    }
-  }
-  __ bind(&miss);
-  TailCallBuiltin(masm(), MissBuiltin(kind()));
-
-  // Return the generated code.
-  return GetICCode(
-      kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
-}
-
-
-Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<HeapType> type,
-                                                      Handle<JSObject> last,
-                                                      Handle<Name> name) {
-  NonexistentHandlerFrontend(type, last, name);
-
-  // Return undefined if maps of the full prototype chain are still the
-  // same and no global property with this name contains a value.
-  __ mov(eax, isolate()->factory()->undefined_value());
-  __ ret(0);
-
-  // Return the generated code.
-  return GetCode(kind(), Code::FAST, name);
-}
-
-
-Register* LoadStubCompiler::registers() {
-  // receiver, name, scratch1, scratch2, scratch3, scratch4.
-  static Register registers[] = { edx, ecx, ebx, eax, edi, no_reg };
-  return registers;
-}
-
-
-Register* KeyedLoadStubCompiler::registers() {
-  // receiver, name, scratch1, scratch2, scratch3, scratch4.
-  static Register registers[] = { edx, ecx, ebx, eax, edi, no_reg };
-  return registers;
-}
-
-
-Register StoreStubCompiler::value() {
-  return eax;
-}
-
-
-Register* StoreStubCompiler::registers() {
-  // receiver, name, scratch1, scratch2, scratch3.
-  static Register registers[] = { edx, ecx, ebx, edi, no_reg };
-  return registers;
-}
-
-
-Register* KeyedStoreStubCompiler::registers() {
-  // receiver, name, scratch1, scratch2, scratch3.
-  static Register registers[] = { edx, ecx, ebx, edi, no_reg };
-  return registers;
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
-                                             Handle<HeapType> type,
-                                             Register receiver,
-                                             Handle<JSFunction> getter) {
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-
-    if (!getter.is_null()) {
-      // Call the JavaScript getter with the receiver on the stack.
-      if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
-        // Swap in the global receiver.
-        __ mov(receiver,
-                FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
-      }
-      __ push(receiver);
-      ParameterCount actual(0);
-      ParameterCount expected(getter);
-      __ InvokeFunction(getter, expected, actual,
-                        CALL_FUNCTION, NullCallWrapper());
-    } else {
-      // If we generate a global code snippet for deoptimization only, remember
-      // the place to continue after deoptimization.
-      masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
-    }
-
-    // Restore context register.
-    __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-  }
-  __ ret(0);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-Handle<Code> LoadStubCompiler::CompileLoadGlobal(
-    Handle<HeapType> type,
-    Handle<GlobalObject> global,
-    Handle<PropertyCell> cell,
-    Handle<Name> name,
-    bool is_dont_delete) {
-  Label miss;
-
-  HandlerFrontendHeader(type, receiver(), global, name, &miss);
-  // Get the value from the cell.
-  if (masm()->serializer_enabled()) {
-    __ mov(eax, Immediate(cell));
-    __ mov(eax, FieldOperand(eax, PropertyCell::kValueOffset));
-  } else {
-    __ mov(eax, Operand::ForCell(cell));
-  }
-
-  // Check for deleted property if property can actually be deleted.
-  if (!is_dont_delete) {
-    __ cmp(eax, factory()->the_hole_value());
-    __ j(equal, &miss);
-  } else if (FLAG_debug_code) {
-    __ cmp(eax, factory()->the_hole_value());
-    __ Check(not_equal, kDontDeleteCellsCannotContainTheHole);
-  }
-
-  Counters* counters = isolate()->counters();
-  __ IncrementCounter(counters->named_load_global_stub(), 1);
-  // The code above already loads the result into the return register.
-  __ ret(0);
-
-  HandlerFrontendFooter(name, &miss);
-
-  // Return the generated code.
-  return GetCode(kind(), Code::NORMAL, name);
-}
-
-
-Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
-    TypeHandleList* types,
-    CodeHandleList* handlers,
-    Handle<Name> name,
-    Code::StubType type,
-    IcCheckType check) {
-  Label miss;
-
-  if (check == PROPERTY &&
-      (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
-    __ cmp(this->name(), Immediate(name));
-    __ j(not_equal, &miss);
-  }
-
-  Label number_case;
-  Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
-  __ JumpIfSmi(receiver(), smi_target);
-
-  Register map_reg = scratch1();
-  __ mov(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
-  int receiver_count = types->length();
-  int number_of_handled_maps = 0;
-  for (int current = 0; current < receiver_count; ++current) {
-    Handle<HeapType> type = types->at(current);
-    Handle<Map> map = IC::TypeToMap(*type, isolate());
-    if (!map->is_deprecated()) {
-      number_of_handled_maps++;
-      __ cmp(map_reg, map);
-      if (type->Is(HeapType::Number())) {
-        ASSERT(!number_case.is_unused());
-        __ bind(&number_case);
-      }
-      __ j(equal, handlers->at(current));
-    }
-  }
-  ASSERT(number_of_handled_maps != 0);
-
-  __ bind(&miss);
-  TailCallBuiltin(masm(), MissBuiltin(kind()));
-
-  // Return the generated code.
-  InlineCacheState state =
-      number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
-  return GetICCode(kind(), type, name, state);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
-    MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- ecx    : key
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
-  Label slow, miss;
-
-  // This stub is meant to be tail-jumped to, the receiver must already
-  // have been verified by the caller to not be a smi.
-  __ JumpIfNotSmi(ecx, &miss);
-  __ mov(ebx, ecx);
-  __ SmiUntag(ebx);
-  __ mov(eax, FieldOperand(edx, JSObject::kElementsOffset));
-
-  // Push receiver on the stack to free up a register for the dictionary
-  // probing.
-  __ push(edx);
-  __ LoadFromNumberDictionary(&slow, eax, ecx, ebx, edx, edi, eax);
-  // Pop receiver before returning.
-  __ pop(edx);
-  __ ret(0);
-
-  __ bind(&slow);
-  __ pop(edx);
-
-  // ----------- S t a t e -------------
-  //  -- ecx    : key
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
-  TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow);
-
-  __ bind(&miss);
-  // ----------- S t a t e -------------
-  //  -- ecx    : key
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
-  TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss);
-}
-
-
-#undef __
-
-} }  // namespace v8::internal
-
-#endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ic-inl.h b/src/ic-inl.h
deleted file mode 100644
index 25094ae..0000000
--- a/src/ic-inl.h
+++ /dev/null
@@ -1,176 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_IC_INL_H_
-#define V8_IC_INL_H_
-
-#include "src/ic.h"
-
-#include "src/compiler.h"
-#include "src/debug.h"
-#include "src/macro-assembler.h"
-
-namespace v8 {
-namespace internal {
-
-
-Address IC::address() const {
-  // Get the address of the call.
-  Address result = Assembler::target_address_from_return_address(pc());
-
-  Debug* debug = isolate()->debug();
-  // First check if any break points are active if not just return the address
-  // of the call.
-  if (!debug->has_break_points()) return result;
-
-  // At least one break point is active perform additional test to ensure that
-  // break point locations are updated correctly.
-  if (debug->IsDebugBreak(Assembler::target_address_at(result,
-                                                       raw_constant_pool()))) {
-    // If the call site is a call to debug break then return the address in
-    // the original code instead of the address in the running code. This will
-    // cause the original code to be updated and keeps the breakpoint active in
-    // the running code.
-    Code* code = GetCode();
-    Code* original_code = GetOriginalCode();
-    intptr_t delta =
-        original_code->instruction_start() - code->instruction_start();
-    // Return the address in the original code. This is the place where
-    // the call which has been overwritten by the DebugBreakXXX resides
-    // and the place where the inline cache system should look.
-    return result + delta;
-  } else {
-    // No break point here just return the address of the call.
-    return result;
-  }
-}
-
-
-ConstantPoolArray* IC::constant_pool() const {
-  if (!FLAG_enable_ool_constant_pool) {
-    return NULL;
-  } else {
-    Handle<ConstantPoolArray> result = raw_constant_pool_;
-    Debug* debug = isolate()->debug();
-    // First check if any break points are active if not just return the
-    // original constant pool.
-    if (!debug->has_break_points()) return *result;
-
-    // At least one break point is active perform additional test to ensure that
-    // break point locations are updated correctly.
-    Address target = Assembler::target_address_from_return_address(pc());
-    if (debug->IsDebugBreak(
-            Assembler::target_address_at(target, raw_constant_pool()))) {
-      // If the call site is a call to debug break then we want to return the
-      // constant pool for the original code instead of the breakpointed code.
-      return GetOriginalCode()->constant_pool();
-    }
-    return *result;
-  }
-}
-
-
-ConstantPoolArray* IC::raw_constant_pool() const {
-  if (FLAG_enable_ool_constant_pool) {
-    return *raw_constant_pool_;
-  } else {
-    return NULL;
-  }
-}
-
-
-Code* IC::GetTargetAtAddress(Address address,
-                             ConstantPoolArray* constant_pool) {
-  // Get the target address of the IC.
-  Address target = Assembler::target_address_at(address, constant_pool);
-  // Convert target address to the code object. Code::GetCodeFromTargetAddress
-  // is safe for use during GC where the map might be marked.
-  Code* result = Code::GetCodeFromTargetAddress(target);
-  ASSERT(result->is_inline_cache_stub());
-  return result;
-}
-
-
-void IC::SetTargetAtAddress(Address address,
-                            Code* target,
-                            ConstantPoolArray* constant_pool) {
-  ASSERT(target->is_inline_cache_stub() || target->is_compare_ic_stub());
-  Heap* heap = target->GetHeap();
-  Code* old_target = GetTargetAtAddress(address, constant_pool);
-#ifdef DEBUG
-  // STORE_IC and KEYED_STORE_IC use Code::extra_ic_state() to mark
-  // ICs as strict mode. The strict-ness of the IC must be preserved.
-  if (old_target->kind() == Code::STORE_IC ||
-      old_target->kind() == Code::KEYED_STORE_IC) {
-    ASSERT(StoreIC::GetStrictMode(old_target->extra_ic_state()) ==
-           StoreIC::GetStrictMode(target->extra_ic_state()));
-  }
-#endif
-  Assembler::set_target_address_at(
-      address, constant_pool, target->instruction_start());
-  if (heap->gc_state() == Heap::MARK_COMPACT) {
-    heap->mark_compact_collector()->RecordCodeTargetPatch(address, target);
-  } else {
-    heap->incremental_marking()->RecordCodeTargetPatch(address, target);
-  }
-  PostPatching(address, target, old_target);
-}
-
-
-InlineCacheHolderFlag IC::GetCodeCacheForObject(Object* object) {
-  if (object->IsJSObject()) return OWN_MAP;
-
-  // If the object is a value, we use the prototype map for the cache.
-  ASSERT(object->IsString() || object->IsSymbol() ||
-         object->IsNumber() || object->IsBoolean());
-  return PROTOTYPE_MAP;
-}
-
-
-HeapObject* IC::GetCodeCacheHolder(Isolate* isolate,
-                                   Object* object,
-                                   InlineCacheHolderFlag holder) {
-  if (object->IsSmi()) holder = PROTOTYPE_MAP;
-  Object* map_owner = holder == OWN_MAP
-      ? object : object->GetPrototype(isolate);
-  return HeapObject::cast(map_owner);
-}
-
-
-InlineCacheHolderFlag IC::GetCodeCacheFlag(HeapType* type) {
-  if (type->Is(HeapType::Boolean()) ||
-      type->Is(HeapType::Number()) ||
-      type->Is(HeapType::String()) ||
-      type->Is(HeapType::Symbol())) {
-    return PROTOTYPE_MAP;
-  }
-  return OWN_MAP;
-}
-
-
-Handle<Map> IC::GetCodeCacheHolder(InlineCacheHolderFlag flag,
-                                   HeapType* type,
-                                   Isolate* isolate) {
-  if (flag == PROTOTYPE_MAP) {
-    Context* context = isolate->context()->native_context();
-    JSFunction* constructor;
-    if (type->Is(HeapType::Boolean())) {
-      constructor = context->boolean_function();
-    } else if (type->Is(HeapType::Number())) {
-      constructor = context->number_function();
-    } else if (type->Is(HeapType::String())) {
-      constructor = context->string_function();
-    } else {
-      ASSERT(type->Is(HeapType::Symbol()));
-      constructor = context->symbol_function();
-    }
-    return handle(JSObject::cast(constructor->instance_prototype())->map());
-  }
-  return TypeToMap(type, isolate);
-}
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_IC_INL_H_
diff --git a/src/ic.cc b/src/ic.cc
deleted file mode 100644
index c764720..0000000
--- a/src/ic.cc
+++ /dev/null
@@ -1,3120 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#include "src/accessors.h"
-#include "src/api.h"
-#include "src/arguments.h"
-#include "src/codegen.h"
-#include "src/conversions.h"
-#include "src/execution.h"
-#include "src/ic-inl.h"
-#include "src/runtime.h"
-#include "src/stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-#ifdef DEBUG
-char IC::TransitionMarkFromState(IC::State state) {
-  switch (state) {
-    case UNINITIALIZED: return '0';
-    case PREMONOMORPHIC: return '.';
-    case MONOMORPHIC: return '1';
-    case MONOMORPHIC_PROTOTYPE_FAILURE: return '^';
-    case POLYMORPHIC: return 'P';
-    case MEGAMORPHIC: return 'N';
-    case GENERIC: return 'G';
-
-    // We never see the debugger states here, because the state is
-    // computed from the original code - not the patched code. Let
-    // these cases fall through to the unreachable code below.
-    case DEBUG_STUB: break;
-  }
-  UNREACHABLE();
-  return 0;
-}
-
-
-const char* GetTransitionMarkModifier(KeyedAccessStoreMode mode) {
-  if (mode == STORE_NO_TRANSITION_HANDLE_COW) return ".COW";
-  if (mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS) {
-    return ".IGNORE_OOB";
-  }
-  if (IsGrowStoreMode(mode)) return ".GROW";
-  return "";
-}
-
-
-void IC::TraceIC(const char* type,
-                 Handle<Object> name) {
-  if (FLAG_trace_ic) {
-    Code* new_target = raw_target();
-    State new_state = new_target->ic_state();
-    PrintF("[%s%s in ", new_target->is_keyed_stub() ? "Keyed" : "", type);
-    StackFrameIterator it(isolate());
-    while (it.frame()->fp() != this->fp()) it.Advance();
-    StackFrame* raw_frame = it.frame();
-    if (raw_frame->is_internal()) {
-      Code* apply_builtin = isolate()->builtins()->builtin(
-          Builtins::kFunctionApply);
-      if (raw_frame->unchecked_code() == apply_builtin) {
-        PrintF("apply from ");
-        it.Advance();
-        raw_frame = it.frame();
-      }
-    }
-    JavaScriptFrame::PrintTop(isolate(), stdout, false, true);
-    ExtraICState extra_state = new_target->extra_ic_state();
-    const char* modifier = "";
-    if (new_target->kind() == Code::KEYED_STORE_IC) {
-      modifier = GetTransitionMarkModifier(
-          KeyedStoreIC::GetKeyedAccessStoreMode(extra_state));
-    }
-    PrintF(" (%c->%c%s)",
-           TransitionMarkFromState(state()),
-           TransitionMarkFromState(new_state),
-           modifier);
-    name->Print();
-    PrintF("]\n");
-  }
-}
-
-#define TRACE_GENERIC_IC(isolate, type, reason)                 \
-  do {                                                          \
-    if (FLAG_trace_ic) {                                        \
-      PrintF("[%s patching generic stub in ", type);            \
-      JavaScriptFrame::PrintTop(isolate, stdout, false, true);  \
-      PrintF(" (%s)]\n", reason);                               \
-    }                                                           \
-  } while (false)
-
-#else
-#define TRACE_GENERIC_IC(isolate, type, reason)
-#endif  // DEBUG
-
-#define TRACE_IC(type, name)             \
-  ASSERT((TraceIC(type, name), true))
-
-IC::IC(FrameDepth depth, Isolate* isolate)
-    : isolate_(isolate),
-      target_set_(false),
-      target_maps_set_(false) {
-  // To improve the performance of the (much used) IC code, we unfold a few
-  // levels of the stack frame iteration code. This yields a ~35% speedup when
-  // running DeltaBlue and a ~25% speedup of gbemu with the '--nouse-ic' flag.
-  const Address entry =
-      Isolate::c_entry_fp(isolate->thread_local_top());
-  Address constant_pool = NULL;
-  if (FLAG_enable_ool_constant_pool) {
-    constant_pool = Memory::Address_at(
-        entry + ExitFrameConstants::kConstantPoolOffset);
-  }
-  Address* pc_address =
-      reinterpret_cast<Address*>(entry + ExitFrameConstants::kCallerPCOffset);
-  Address fp = Memory::Address_at(entry + ExitFrameConstants::kCallerFPOffset);
-  // If there's another JavaScript frame on the stack or a
-  // StubFailureTrampoline, we need to look one frame further down the stack to
-  // find the frame pointer and the return address stack slot.
-  if (depth == EXTRA_CALL_FRAME) {
-    if (FLAG_enable_ool_constant_pool) {
-      constant_pool = Memory::Address_at(
-          fp + StandardFrameConstants::kConstantPoolOffset);
-    }
-    const int kCallerPCOffset = StandardFrameConstants::kCallerPCOffset;
-    pc_address = reinterpret_cast<Address*>(fp + kCallerPCOffset);
-    fp = Memory::Address_at(fp + StandardFrameConstants::kCallerFPOffset);
-  }
-#ifdef DEBUG
-  StackFrameIterator it(isolate);
-  for (int i = 0; i < depth + 1; i++) it.Advance();
-  StackFrame* frame = it.frame();
-  ASSERT(fp == frame->fp() && pc_address == frame->pc_address());
-#endif
-  fp_ = fp;
-  if (FLAG_enable_ool_constant_pool) {
-    raw_constant_pool_ = handle(
-        ConstantPoolArray::cast(reinterpret_cast<Object*>(constant_pool)),
-        isolate);
-  }
-  pc_address_ = StackFrame::ResolveReturnAddressLocation(pc_address);
-  target_ = handle(raw_target(), isolate);
-  state_ = target_->ic_state();
-  extra_ic_state_ = target_->extra_ic_state();
-}
-
-
-SharedFunctionInfo* IC::GetSharedFunctionInfo() const {
-  // Compute the JavaScript frame for the frame pointer of this IC
-  // structure. We need this to be able to find the function
-  // corresponding to the frame.
-  StackFrameIterator it(isolate());
-  while (it.frame()->fp() != this->fp()) it.Advance();
-  JavaScriptFrame* frame = JavaScriptFrame::cast(it.frame());
-  // Find the function on the stack and both the active code for the
-  // function and the original code.
-  JSFunction* function = frame->function();
-  return function->shared();
-}
-
-
-Code* IC::GetCode() const {
-  HandleScope scope(isolate());
-  Handle<SharedFunctionInfo> shared(GetSharedFunctionInfo(), isolate());
-  Code* code = shared->code();
-  return code;
-}
-
-
-Code* IC::GetOriginalCode() const {
-  HandleScope scope(isolate());
-  Handle<SharedFunctionInfo> shared(GetSharedFunctionInfo(), isolate());
-  ASSERT(Debug::HasDebugInfo(shared));
-  Code* original_code = Debug::GetDebugInfo(shared)->original_code();
-  ASSERT(original_code->IsCode());
-  return original_code;
-}
-
-
-static bool HasInterceptorGetter(JSObject* object) {
-  return !object->GetNamedInterceptor()->getter()->IsUndefined();
-}
-
-
-static bool HasInterceptorSetter(JSObject* object) {
-  return !object->GetNamedInterceptor()->setter()->IsUndefined();
-}
-
-
-static void LookupForRead(Handle<Object> object,
-                          Handle<String> name,
-                          LookupResult* lookup) {
-  // Skip all the objects with named interceptors, but
-  // without actual getter.
-  while (true) {
-    object->Lookup(name, lookup);
-    // Besides normal conditions (property not found or it's not
-    // an interceptor), bail out if lookup is not cacheable: we won't
-    // be able to IC it anyway and regular lookup should work fine.
-    if (!lookup->IsInterceptor() || !lookup->IsCacheable()) {
-      return;
-    }
-
-    Handle<JSObject> holder(lookup->holder(), lookup->isolate());
-    if (HasInterceptorGetter(*holder)) {
-      return;
-    }
-
-    holder->LookupOwnRealNamedProperty(name, lookup);
-    if (lookup->IsFound()) {
-      ASSERT(!lookup->IsInterceptor());
-      return;
-    }
-
-    Handle<Object> proto(holder->GetPrototype(), lookup->isolate());
-    if (proto->IsNull()) {
-      ASSERT(!lookup->IsFound());
-      return;
-    }
-
-    object = proto;
-  }
-}
-
-
-bool IC::TryRemoveInvalidPrototypeDependentStub(Handle<Object> receiver,
-                                                Handle<String> name) {
-  if (!IsNameCompatibleWithMonomorphicPrototypeFailure(name)) return false;
-
-  InlineCacheHolderFlag cache_holder =
-      Code::ExtractCacheHolderFromFlags(target()->flags());
-
-  switch (cache_holder) {
-    case OWN_MAP:
-      // The stub was generated for JSObject but called for non-JSObject.
-      // IC::GetCodeCacheHolder is not applicable.
-      if (!receiver->IsJSObject()) return false;
-      break;
-    case PROTOTYPE_MAP:
-      // IC::GetCodeCacheHolder is not applicable.
-      if (receiver->GetPrototype(isolate())->IsNull()) return false;
-      break;
-  }
-
-  Handle<Map> map(
-      IC::GetCodeCacheHolder(isolate(), *receiver, cache_holder)->map());
-
-  // Decide whether the inline cache failed because of changes to the
-  // receiver itself or changes to one of its prototypes.
-  //
-  // If there are changes to the receiver itself, the map of the
-  // receiver will have changed and the current target will not be in
-  // the receiver map's code cache.  Therefore, if the current target
-  // is in the receiver map's code cache, the inline cache failed due
-  // to prototype check failure.
-  int index = map->IndexInCodeCache(*name, *target());
-  if (index >= 0) {
-    map->RemoveFromCodeCache(*name, *target(), index);
-    // Handlers are stored in addition to the ICs on the map. Remove those, too.
-    TryRemoveInvalidHandlers(map, name);
-    return true;
-  }
-
-  // The stub is not in the cache. We've ruled out all other kinds of failure
-  // except for proptotype chain changes, a deprecated map, a map that's
-  // different from the one that the stub expects, elements kind changes, or a
-  // constant global property that will become mutable. Threat all those
-  // situations as prototype failures (stay monomorphic if possible).
-
-  // If the IC is shared between multiple receivers (slow dictionary mode), then
-  // the map cannot be deprecated and the stub invalidated.
-  if (cache_holder == OWN_MAP) {
-    Map* old_map = FirstTargetMap();
-    if (old_map == *map) return true;
-    if (old_map != NULL) {
-      if (old_map->is_deprecated()) return true;
-      if (IsMoreGeneralElementsKindTransition(old_map->elements_kind(),
-                                              map->elements_kind())) {
-        return true;
-      }
-    }
-  }
-
-  if (receiver->IsGlobalObject()) {
-    LookupResult lookup(isolate());
-    GlobalObject* global = GlobalObject::cast(*receiver);
-    global->LookupOwnRealNamedProperty(name, &lookup);
-    if (!lookup.IsFound()) return false;
-    PropertyCell* cell = global->GetPropertyCell(&lookup);
-    return cell->type()->IsConstant();
-  }
-
-  return false;
-}
-
-
-void IC::TryRemoveInvalidHandlers(Handle<Map> map, Handle<String> name) {
-  CodeHandleList handlers;
-  target()->FindHandlers(&handlers);
-  for (int i = 0; i < handlers.length(); i++) {
-    Handle<Code> handler = handlers.at(i);
-    int index = map->IndexInCodeCache(*name, *handler);
-    if (index >= 0) {
-      map->RemoveFromCodeCache(*name, *handler, index);
-      return;
-    }
-  }
-}
-
-
-bool IC::IsNameCompatibleWithMonomorphicPrototypeFailure(Handle<Object> name) {
-  if (target()->is_keyed_stub()) {
-    // Determine whether the failure is due to a name failure.
-    if (!name->IsName()) return false;
-    Name* stub_name = target()->FindFirstName();
-    if (*name != stub_name) return false;
-  }
-
-  return true;
-}
-
-
-void IC::UpdateState(Handle<Object> receiver, Handle<Object> name) {
-  if (!name->IsString()) return;
-  if (state() != MONOMORPHIC) {
-    if (state() == POLYMORPHIC && receiver->IsHeapObject()) {
-      TryRemoveInvalidHandlers(
-          handle(Handle<HeapObject>::cast(receiver)->map()),
-          Handle<String>::cast(name));
-    }
-    return;
-  }
-  if (receiver->IsUndefined() || receiver->IsNull()) return;
-
-  // Remove the target from the code cache if it became invalid
-  // because of changes in the prototype chain to avoid hitting it
-  // again.
-  if (TryRemoveInvalidPrototypeDependentStub(
-          receiver, Handle<String>::cast(name)) &&
-      TryMarkMonomorphicPrototypeFailure(name)) {
-    return;
-  }
-
-  // The builtins object is special.  It only changes when JavaScript
-  // builtins are loaded lazily.  It is important to keep inline
-  // caches for the builtins object monomorphic.  Therefore, if we get
-  // an inline cache miss for the builtins object after lazily loading
-  // JavaScript builtins, we return uninitialized as the state to
-  // force the inline cache back to monomorphic state.
-  if (receiver->IsJSBuiltinsObject()) state_ = UNINITIALIZED;
-}
-
-
-MaybeHandle<Object> IC::TypeError(const char* type,
-                                  Handle<Object> object,
-                                  Handle<Object> key) {
-  HandleScope scope(isolate());
-  Handle<Object> args[2] = { key, object };
-  Handle<Object> error = isolate()->factory()->NewTypeError(
-      type, HandleVector(args, 2));
-  return isolate()->Throw<Object>(error);
-}
-
-
-MaybeHandle<Object> IC::ReferenceError(const char* type, Handle<String> name) {
-  HandleScope scope(isolate());
-  Handle<Object> error = isolate()->factory()->NewReferenceError(
-      type, HandleVector(&name, 1));
-  return isolate()->Throw<Object>(error);
-}
-
-
-static int ComputeTypeInfoCountDelta(IC::State old_state, IC::State new_state) {
-  bool was_uninitialized =
-      old_state == UNINITIALIZED || old_state == PREMONOMORPHIC;
-  bool is_uninitialized =
-      new_state == UNINITIALIZED || new_state == PREMONOMORPHIC;
-  return (was_uninitialized && !is_uninitialized) ?  1 :
-         (!was_uninitialized && is_uninitialized) ? -1 : 0;
-}
-
-
-void IC::PostPatching(Address address, Code* target, Code* old_target) {
-  Isolate* isolate = target->GetHeap()->isolate();
-  Code* host = isolate->
-      inner_pointer_to_code_cache()->GetCacheEntry(address)->code;
-  if (host->kind() != Code::FUNCTION) return;
-
-  if (FLAG_type_info_threshold > 0 &&
-      old_target->is_inline_cache_stub() &&
-      target->is_inline_cache_stub()) {
-    int delta = ComputeTypeInfoCountDelta(old_target->ic_state(),
-                                          target->ic_state());
-    // Call ICs don't have interesting state changes from this point
-    // of view.
-    ASSERT(target->kind() != Code::CALL_IC || delta == 0);
-
-    // Not all Code objects have TypeFeedbackInfo.
-    if (host->type_feedback_info()->IsTypeFeedbackInfo() && delta != 0) {
-      TypeFeedbackInfo* info =
-          TypeFeedbackInfo::cast(host->type_feedback_info());
-      info->change_ic_with_type_info_count(delta);
-    }
-  }
-  if (host->type_feedback_info()->IsTypeFeedbackInfo()) {
-    TypeFeedbackInfo* info =
-        TypeFeedbackInfo::cast(host->type_feedback_info());
-    info->change_own_type_change_checksum();
-  }
-  host->set_profiler_ticks(0);
-  isolate->runtime_profiler()->NotifyICChanged();
-  // TODO(2029): When an optimized function is patched, it would
-  // be nice to propagate the corresponding type information to its
-  // unoptimized version for the benefit of later inlining.
-}
-
-
-void IC::RegisterWeakMapDependency(Handle<Code> stub) {
-  if (FLAG_collect_maps && FLAG_weak_embedded_maps_in_ic &&
-      stub->CanBeWeakStub()) {
-    ASSERT(!stub->is_weak_stub());
-    MapHandleList maps;
-    stub->FindAllMaps(&maps);
-    if (maps.length() == 1 && stub->IsWeakObjectInIC(*maps.at(0))) {
-      Map::AddDependentIC(maps.at(0), stub);
-      stub->mark_as_weak_stub();
-      if (FLAG_enable_ool_constant_pool) {
-        stub->constant_pool()->set_weak_object_state(
-            ConstantPoolArray::WEAK_OBJECTS_IN_IC);
-      }
-    }
-  }
-}
-
-
-void IC::InvalidateMaps(Code* stub) {
-  ASSERT(stub->is_weak_stub());
-  stub->mark_as_invalidated_weak_stub();
-  Isolate* isolate = stub->GetIsolate();
-  Heap* heap = isolate->heap();
-  Object* undefined = heap->undefined_value();
-  int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
-  for (RelocIterator it(stub, mode_mask); !it.done(); it.next()) {
-    RelocInfo::Mode mode = it.rinfo()->rmode();
-    if (mode == RelocInfo::EMBEDDED_OBJECT &&
-        it.rinfo()->target_object()->IsMap()) {
-      it.rinfo()->set_target_object(undefined, SKIP_WRITE_BARRIER);
-    }
-  }
-  CPU::FlushICache(stub->instruction_start(), stub->instruction_size());
-}
-
-
-void IC::Clear(Isolate* isolate, Address address,
-    ConstantPoolArray* constant_pool) {
-  Code* target = GetTargetAtAddress(address, constant_pool);
-
-  // Don't clear debug break inline cache as it will remove the break point.
-  if (target->is_debug_stub()) return;
-
-  switch (target->kind()) {
-    case Code::LOAD_IC:
-      return LoadIC::Clear(isolate, address, target, constant_pool);
-    case Code::KEYED_LOAD_IC:
-      return KeyedLoadIC::Clear(isolate, address, target, constant_pool);
-    case Code::STORE_IC:
-      return StoreIC::Clear(isolate, address, target, constant_pool);
-    case Code::KEYED_STORE_IC:
-      return KeyedStoreIC::Clear(isolate, address, target, constant_pool);
-    case Code::CALL_IC:
-      return CallIC::Clear(isolate, address, target, constant_pool);
-    case Code::COMPARE_IC:
-      return CompareIC::Clear(isolate, address, target, constant_pool);
-    case Code::COMPARE_NIL_IC:
-      return CompareNilIC::Clear(address, target, constant_pool);
-    case Code::BINARY_OP_IC:
-    case Code::TO_BOOLEAN_IC:
-      // Clearing these is tricky and does not
-      // make any performance difference.
-      return;
-    default: UNREACHABLE();
-  }
-}
-
-
-void KeyedLoadIC::Clear(Isolate* isolate,
-                        Address address,
-                        Code* target,
-                        ConstantPoolArray* constant_pool) {
-  if (IsCleared(target)) return;
-  // Make sure to also clear the map used in inline fast cases.  If we
-  // do not clear these maps, cached code can keep objects alive
-  // through the embedded maps.
-  SetTargetAtAddress(address, *pre_monomorphic_stub(isolate), constant_pool);
-}
-
-
-void CallIC::Clear(Isolate* isolate,
-                   Address address,
-                   Code* target,
-                   ConstantPoolArray* constant_pool) {
-  // Currently, CallIC doesn't have state changes.
-}
-
-
-void LoadIC::Clear(Isolate* isolate,
-                   Address address,
-                   Code* target,
-                   ConstantPoolArray* constant_pool) {
-  if (IsCleared(target)) return;
-  Code* code = target->GetIsolate()->stub_cache()->FindPreMonomorphicIC(
-      Code::LOAD_IC, target->extra_ic_state());
-  SetTargetAtAddress(address, code, constant_pool);
-}
-
-
-void StoreIC::Clear(Isolate* isolate,
-                    Address address,
-                    Code* target,
-                    ConstantPoolArray* constant_pool) {
-  if (IsCleared(target)) return;
-  Code* code = target->GetIsolate()->stub_cache()->FindPreMonomorphicIC(
-      Code::STORE_IC, target->extra_ic_state());
-  SetTargetAtAddress(address, code, constant_pool);
-}
-
-
-void KeyedStoreIC::Clear(Isolate* isolate,
-                         Address address,
-                         Code* target,
-                         ConstantPoolArray* constant_pool) {
-  if (IsCleared(target)) return;
-  SetTargetAtAddress(address,
-      *pre_monomorphic_stub(
-          isolate, StoreIC::GetStrictMode(target->extra_ic_state())),
-      constant_pool);
-}
-
-
-void CompareIC::Clear(Isolate* isolate,
-                      Address address,
-                      Code* target,
-                      ConstantPoolArray* constant_pool) {
-  ASSERT(target->major_key() == CodeStub::CompareIC);
-  CompareIC::State handler_state;
-  Token::Value op;
-  ICCompareStub::DecodeMinorKey(target->stub_info(), NULL, NULL,
-                                &handler_state, &op);
-  // Only clear CompareICs that can retain objects.
-  if (handler_state != KNOWN_OBJECT) return;
-  SetTargetAtAddress(address, GetRawUninitialized(isolate, op), constant_pool);
-  PatchInlinedSmiCode(address, DISABLE_INLINED_SMI_CHECK);
-}
-
-
-Handle<Code> KeyedLoadIC::megamorphic_stub() {
-  if (FLAG_compiled_keyed_generic_loads) {
-    return KeyedLoadGenericElementStub(isolate()).GetCode();
-  } else {
-    return isolate()->builtins()->KeyedLoadIC_Generic();
-  }
-}
-
-Handle<Code> KeyedLoadIC::generic_stub() const {
-  if (FLAG_compiled_keyed_generic_loads) {
-    return KeyedLoadGenericElementStub(isolate()).GetCode();
-  } else {
-    return isolate()->builtins()->KeyedLoadIC_Generic();
-  }
-}
-
-
-static bool MigrateDeprecated(Handle<Object> object) {
-  if (!object->IsJSObject()) return false;
-  Handle<JSObject> receiver = Handle<JSObject>::cast(object);
-  if (!receiver->map()->is_deprecated()) return false;
-  JSObject::MigrateInstance(Handle<JSObject>::cast(object));
-  return true;
-}
-
-
-MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<String> name) {
-  // If the object is undefined or null it's illegal to try to get any
-  // of its properties; throw a TypeError in that case.
-  if (object->IsUndefined() || object->IsNull()) {
-    return TypeError("non_object_property_load", object, name);
-  }
-
-  if (FLAG_use_ic) {
-    // Use specialized code for getting prototype of functions.
-    if (object->IsJSFunction() &&
-        String::Equals(isolate()->factory()->prototype_string(), name) &&
-        Handle<JSFunction>::cast(object)->should_have_prototype()) {
-      Handle<Code> stub;
-      if (state() == UNINITIALIZED) {
-        stub = pre_monomorphic_stub();
-      } else if (state() == PREMONOMORPHIC) {
-        FunctionPrototypeStub function_prototype_stub(isolate(), kind());
-        stub = function_prototype_stub.GetCode();
-      } else if (state() != MEGAMORPHIC) {
-        ASSERT(state() != GENERIC);
-        stub = megamorphic_stub();
-      }
-      if (!stub.is_null()) {
-        set_target(*stub);
-        if (FLAG_trace_ic) PrintF("[LoadIC : +#prototype /function]\n");
-      }
-      return Accessors::FunctionGetPrototype(Handle<JSFunction>::cast(object));
-    }
-  }
-
-  // Check if the name is trivially convertible to an index and get
-  // the element or char if so.
-  uint32_t index;
-  if (kind() == Code::KEYED_LOAD_IC && name->AsArrayIndex(&index)) {
-    // Rewrite to the generic keyed load stub.
-    if (FLAG_use_ic) set_target(*generic_stub());
-    Handle<Object> result;
-    ASSIGN_RETURN_ON_EXCEPTION(
-        isolate(),
-        result,
-        Runtime::GetElementOrCharAt(isolate(), object, index),
-        Object);
-    return result;
-  }
-
-  bool use_ic = MigrateDeprecated(object) ? false : FLAG_use_ic;
-
-  // Named lookup in the object.
-  LookupResult lookup(isolate());
-  LookupForRead(object, name, &lookup);
-
-  // If we did not find a property, check if we need to throw an exception.
-  if (!lookup.IsFound()) {
-    if (IsUndeclaredGlobal(object)) {
-      return ReferenceError("not_defined", name);
-    }
-    LOG(isolate(), SuspectReadEvent(*name, *object));
-  }
-
-  // Update inline cache and stub cache.
-  if (use_ic) UpdateCaches(&lookup, object, name);
-
-  // Get the property.
-  LookupIterator it(object, name);
-  Handle<Object> result;
-  ASSIGN_RETURN_ON_EXCEPTION(
-      isolate(), result, Object::GetProperty(&it), Object);
-  // If the property is not present, check if we need to throw an exception.
-  if ((lookup.IsInterceptor() || lookup.IsHandler()) &&
-      !it.IsFound() && IsUndeclaredGlobal(object)) {
-    return ReferenceError("not_defined", name);
-  }
-
-  return result;
-}
-
-
-static bool AddOneReceiverMapIfMissing(MapHandleList* receiver_maps,
-                                       Handle<Map> new_receiver_map) {
-  ASSERT(!new_receiver_map.is_null());
-  for (int current = 0; current < receiver_maps->length(); ++current) {
-    if (!receiver_maps->at(current).is_null() &&
-        receiver_maps->at(current).is_identical_to(new_receiver_map)) {
-      return false;
-    }
-  }
-  receiver_maps->Add(new_receiver_map);
-  return true;
-}
-
-
-bool IC::UpdatePolymorphicIC(Handle<HeapType> type,
-                             Handle<String> name,
-                             Handle<Code> code) {
-  if (!code->is_handler()) return false;
-  TypeHandleList types;
-  CodeHandleList handlers;
-
-  TargetTypes(&types);
-  int number_of_types = types.length();
-  int deprecated_types = 0;
-  int handler_to_overwrite = -1;
-
-  for (int i = 0; i < number_of_types; i++) {
-    Handle<HeapType> current_type = types.at(i);
-    if (current_type->IsClass() &&
-        current_type->AsClass()->Map()->is_deprecated()) {
-      // Filter out deprecated maps to ensure their instances get migrated.
-      ++deprecated_types;
-    } else if (type->NowIs(current_type)) {
-      // If the receiver type is already in the polymorphic IC, this indicates
-      // there was a prototoype chain failure. In that case, just overwrite the
-      // handler.
-      handler_to_overwrite = i;
-    } else if (handler_to_overwrite == -1 &&
-               current_type->IsClass() &&
-               type->IsClass() &&
-               IsTransitionOfMonomorphicTarget(*current_type->AsClass()->Map(),
-                                               *type->AsClass()->Map())) {
-      handler_to_overwrite = i;
-    }
-  }
-
-  int number_of_valid_types =
-    number_of_types - deprecated_types - (handler_to_overwrite != -1);
-
-  if (number_of_valid_types >= 4) return false;
-  if (number_of_types == 0) return false;
-  if (!target()->FindHandlers(&handlers, types.length())) return false;
-
-  number_of_valid_types++;
-  if (handler_to_overwrite >= 0) {
-    handlers.Set(handler_to_overwrite, code);
-    if (!type->NowIs(types.at(handler_to_overwrite))) {
-      types.Set(handler_to_overwrite, type);
-    }
-  } else {
-    types.Add(type);
-    handlers.Add(code);
-  }
-
-  Handle<Code> ic = isolate()->stub_cache()->ComputePolymorphicIC(
-      kind(), &types, &handlers, number_of_valid_types, name, extra_ic_state());
-  set_target(*ic);
-  return true;
-}
-
-
-Handle<HeapType> IC::CurrentTypeOf(Handle<Object> object, Isolate* isolate) {
-  return object->IsJSGlobalObject()
-      ? HeapType::Constant(Handle<JSGlobalObject>::cast(object), isolate)
-      : HeapType::NowOf(object, isolate);
-}
-
-
-Handle<Map> IC::TypeToMap(HeapType* type, Isolate* isolate) {
-  if (type->Is(HeapType::Number()))
-    return isolate->factory()->heap_number_map();
-  if (type->Is(HeapType::Boolean())) return isolate->factory()->boolean_map();
-  if (type->IsConstant()) {
-    return handle(
-        Handle<JSGlobalObject>::cast(type->AsConstant()->Value())->map());
-  }
-  ASSERT(type->IsClass());
-  return type->AsClass()->Map();
-}
-
-
-template <class T>
-typename T::TypeHandle IC::MapToType(Handle<Map> map,
-                                     typename T::Region* region) {
-  if (map->instance_type() == HEAP_NUMBER_TYPE) {
-    return T::Number(region);
-  } else if (map->instance_type() == ODDBALL_TYPE) {
-    // The only oddballs that can be recorded in ICs are booleans.
-    return T::Boolean(region);
-  } else {
-    return T::Class(map, region);
-  }
-}
-
-
-template
-Type* IC::MapToType<Type>(Handle<Map> map, Zone* zone);
-
-
-template
-Handle<HeapType> IC::MapToType<HeapType>(Handle<Map> map, Isolate* region);
-
-
-void IC::UpdateMonomorphicIC(Handle<HeapType> type,
-                             Handle<Code> handler,
-                             Handle<String> name) {
-  if (!handler->is_handler()) return set_target(*handler);
-  Handle<Code> ic = isolate()->stub_cache()->ComputeMonomorphicIC(
-      kind(), name, type, handler, extra_ic_state());
-  set_target(*ic);
-}
-
-
-void IC::CopyICToMegamorphicCache(Handle<String> name) {
-  TypeHandleList types;
-  CodeHandleList handlers;
-  TargetTypes(&types);
-  if (!target()->FindHandlers(&handlers, types.length())) return;
-  for (int i = 0; i < types.length(); i++) {
-    UpdateMegamorphicCache(*types.at(i), *name, *handlers.at(i));
-  }
-}
-
-
-bool IC::IsTransitionOfMonomorphicTarget(Map* source_map, Map* target_map) {
-  if (source_map == NULL) return true;
-  if (target_map == NULL) return false;
-  ElementsKind target_elements_kind = target_map->elements_kind();
-  bool more_general_transition =
-      IsMoreGeneralElementsKindTransition(
-        source_map->elements_kind(), target_elements_kind);
-  Map* transitioned_map = more_general_transition
-      ? source_map->LookupElementsTransitionMap(target_elements_kind)
-      : NULL;
-
-  return transitioned_map == target_map;
-}
-
-
-void IC::PatchCache(Handle<HeapType> type,
-                    Handle<String> name,
-                    Handle<Code> code) {
-  switch (state()) {
-    case UNINITIALIZED:
-    case PREMONOMORPHIC:
-    case MONOMORPHIC_PROTOTYPE_FAILURE:
-      UpdateMonomorphicIC(type, code, name);
-      break;
-    case MONOMORPHIC:  // Fall through.
-    case POLYMORPHIC:
-      if (!target()->is_keyed_stub()) {
-        if (UpdatePolymorphicIC(type, name, code)) break;
-        CopyICToMegamorphicCache(name);
-      }
-      set_target(*megamorphic_stub());
-      // Fall through.
-    case MEGAMORPHIC:
-      UpdateMegamorphicCache(*type, *name, *code);
-      break;
-    case DEBUG_STUB:
-      break;
-    case GENERIC:
-      UNREACHABLE();
-      break;
-  }
-}
-
-
-Handle<Code> LoadIC::initialize_stub(Isolate* isolate,
-                                     ExtraICState extra_state) {
-  return isolate->stub_cache()->ComputeLoad(UNINITIALIZED, extra_state);
-}
-
-
-Handle<Code> LoadIC::pre_monomorphic_stub(Isolate* isolate,
-                                          ExtraICState extra_state) {
-  return isolate->stub_cache()->ComputeLoad(PREMONOMORPHIC, extra_state);
-}
-
-
-Handle<Code> LoadIC::megamorphic_stub() {
-  return isolate()->stub_cache()->ComputeLoad(MEGAMORPHIC, extra_ic_state());
-}
-
-
-Handle<Code> LoadIC::SimpleFieldLoad(FieldIndex index) {
-  if (kind() == Code::LOAD_IC) {
-    LoadFieldStub stub(isolate(), index);
-    return stub.GetCode();
-  } else {
-    KeyedLoadFieldStub stub(isolate(), index);
-    return stub.GetCode();
-  }
-}
-
-
-void LoadIC::UpdateCaches(LookupResult* lookup,
-                          Handle<Object> object,
-                          Handle<String> name) {
-  if (state() == UNINITIALIZED) {
-    // This is the first time we execute this inline cache.
-    // Set the target to the pre monomorphic stub to delay
-    // setting the monomorphic state.
-    set_target(*pre_monomorphic_stub());
-    TRACE_IC("LoadIC", name);
-    return;
-  }
-
-  Handle<HeapType> type = CurrentTypeOf(object, isolate());
-  Handle<Code> code;
-  if (!lookup->IsCacheable()) {
-    // Bail out if the result is not cacheable.
-    code = slow_stub();
-  } else if (!lookup->IsProperty()) {
-    if (kind() == Code::LOAD_IC) {
-      code = isolate()->stub_cache()->ComputeLoadNonexistent(name, type);
-    } else {
-      code = slow_stub();
-    }
-  } else {
-    code = ComputeHandler(lookup, object, name);
-  }
-
-  PatchCache(type, name, code);
-  TRACE_IC("LoadIC", name);
-}
-
-
-void IC::UpdateMegamorphicCache(HeapType* type, Name* name, Code* code) {
-  // Cache code holding map should be consistent with
-  // GenerateMonomorphicCacheProbe.
-  Map* map = *TypeToMap(type, isolate());
-  isolate()->stub_cache()->Set(name, map, code);
-}
-
-
-Handle<Code> IC::ComputeHandler(LookupResult* lookup,
-                                Handle<Object> object,
-                                Handle<String> name,
-                                Handle<Object> value) {
-  InlineCacheHolderFlag cache_holder = GetCodeCacheForObject(*object);
-  Handle<HeapObject> stub_holder(GetCodeCacheHolder(
-      isolate(), *object, cache_holder));
-
-  Handle<Code> code = isolate()->stub_cache()->FindHandler(
-      name, handle(stub_holder->map()), kind(), cache_holder,
-      lookup->holder()->HasFastProperties() ? Code::FAST : Code::NORMAL);
-  if (!code.is_null()) {
-    return code;
-  }
-
-  code = CompileHandler(lookup, object, name, value, cache_holder);
-  ASSERT(code->is_handler());
-
-  if (code->type() != Code::NORMAL) {
-    HeapObject::UpdateMapCodeCache(stub_holder, name, code);
-  }
-
-  return code;
-}
-
-
-Handle<Code> LoadIC::CompileHandler(LookupResult* lookup,
-                                    Handle<Object> object,
-                                    Handle<String> name,
-                                    Handle<Object> unused,
-                                    InlineCacheHolderFlag cache_holder) {
-  if (object->IsString() &&
-      String::Equals(isolate()->factory()->length_string(), name)) {
-    FieldIndex index = FieldIndex::ForInObjectOffset(String::kLengthOffset);
-    return SimpleFieldLoad(index);
-  }
-
-  if (object->IsStringWrapper() &&
-      String::Equals(isolate()->factory()->length_string(), name)) {
-    if (kind() == Code::LOAD_IC) {
-      StringLengthStub string_length_stub(isolate());
-      return string_length_stub.GetCode();
-    } else {
-      KeyedStringLengthStub string_length_stub(isolate());
-      return string_length_stub.GetCode();
-    }
-  }
-
-  Handle<HeapType> type = CurrentTypeOf(object, isolate());
-  Handle<JSObject> holder(lookup->holder());
-  LoadStubCompiler compiler(isolate(), kNoExtraICState, cache_holder, kind());
-
-  switch (lookup->type()) {
-    case FIELD: {
-      FieldIndex field = lookup->GetFieldIndex();
-      if (object.is_identical_to(holder)) {
-        return SimpleFieldLoad(field);
-      }
-      return compiler.CompileLoadField(
-          type, holder, name, field, lookup->representation());
-    }
-    case CONSTANT: {
-      Handle<Object> constant(lookup->GetConstant(), isolate());
-      // TODO(2803): Don't compute a stub for cons strings because they cannot
-      // be embedded into code.
-      if (constant->IsConsString()) break;
-      return compiler.CompileLoadConstant(type, holder, name, constant);
-    }
-    case NORMAL:
-      if (kind() != Code::LOAD_IC) break;
-      if (holder->IsGlobalObject()) {
-        Handle<GlobalObject> global = Handle<GlobalObject>::cast(holder);
-        Handle<PropertyCell> cell(
-            global->GetPropertyCell(lookup), isolate());
-        Handle<Code> code = compiler.CompileLoadGlobal(
-            type, global, cell, name, lookup->IsDontDelete());
-        // TODO(verwaest): Move caching of these NORMAL stubs outside as well.
-        Handle<HeapObject> stub_holder(GetCodeCacheHolder(
-            isolate(), *object, cache_holder));
-        HeapObject::UpdateMapCodeCache(stub_holder, name, code);
-        return code;
-      }
-      // There is only one shared stub for loading normalized
-      // properties. It does not traverse the prototype chain, so the
-      // property must be found in the object for the stub to be
-      // applicable.
-      if (!object.is_identical_to(holder)) break;
-      return isolate()->builtins()->LoadIC_Normal();
-    case CALLBACKS: {
-      // Use simple field loads for some well-known callback properties.
-      if (object->IsJSObject()) {
-        Handle<JSObject> receiver = Handle<JSObject>::cast(object);
-        Handle<Map> map(receiver->map());
-        Handle<HeapType> type = IC::MapToType<HeapType>(
-            handle(receiver->map()), isolate());
-        int object_offset;
-        if (Accessors::IsJSObjectFieldAccessor<HeapType>(
-                type, name, &object_offset)) {
-          FieldIndex index = FieldIndex::ForInObjectOffset(
-              object_offset, receiver->map());
-          return SimpleFieldLoad(index);
-        }
-      }
-
-      Handle<Object> callback(lookup->GetCallbackObject(), isolate());
-      if (callback->IsExecutableAccessorInfo()) {
-        Handle<ExecutableAccessorInfo> info =
-            Handle<ExecutableAccessorInfo>::cast(callback);
-        if (v8::ToCData<Address>(info->getter()) == 0) break;
-        if (!info->IsCompatibleReceiver(*object)) break;
-        return compiler.CompileLoadCallback(type, holder, name, info);
-      } else if (callback->IsAccessorPair()) {
-        Handle<Object> getter(Handle<AccessorPair>::cast(callback)->getter(),
-                              isolate());
-        if (!getter->IsJSFunction()) break;
-        if (holder->IsGlobalObject()) break;
-        if (!holder->HasFastProperties()) break;
-        Handle<JSFunction> function = Handle<JSFunction>::cast(getter);
-        if (!object->IsJSObject() &&
-            !function->IsBuiltin() &&
-            function->shared()->strict_mode() == SLOPPY) {
-          // Calling sloppy non-builtins with a value as the receiver
-          // requires boxing.
-          break;
-        }
-        CallOptimization call_optimization(function);
-        if (call_optimization.is_simple_api_call() &&
-            call_optimization.IsCompatibleReceiver(object, holder)) {
-          return compiler.CompileLoadCallback(
-              type, holder, name, call_optimization);
-        }
-        return compiler.CompileLoadViaGetter(type, holder, name, function);
-      }
-      // TODO(dcarney): Handle correctly.
-      ASSERT(callback->IsDeclaredAccessorInfo());
-      break;
-    }
-    case INTERCEPTOR:
-      ASSERT(HasInterceptorGetter(*holder));
-      return compiler.CompileLoadInterceptor(type, holder, name);
-    default:
-      break;
-  }
-
-  return slow_stub();
-}
-
-
-static Handle<Object> TryConvertKey(Handle<Object> key, Isolate* isolate) {
-  // This helper implements a few common fast cases for converting
-  // non-smi keys of keyed loads/stores to a smi or a string.
-  if (key->IsHeapNumber()) {
-    double value = Handle<HeapNumber>::cast(key)->value();
-    if (std::isnan(value)) {
-      key = isolate->factory()->nan_string();
-    } else {
-      int int_value = FastD2I(value);
-      if (value == int_value && Smi::IsValid(int_value)) {
-        key = Handle<Smi>(Smi::FromInt(int_value), isolate);
-      }
-    }
-  } else if (key->IsUndefined()) {
-    key = isolate->factory()->undefined_string();
-  }
-  return key;
-}
-
-
-Handle<Code> KeyedLoadIC::LoadElementStub(Handle<JSObject> receiver) {
-  // Don't handle megamorphic property accesses for INTERCEPTORS or CALLBACKS
-  // via megamorphic stubs, since they don't have a map in their relocation info
-  // and so the stubs can't be harvested for the object needed for a map check.
-  if (target()->type() != Code::NORMAL) {
-    TRACE_GENERIC_IC(isolate(), "KeyedIC", "non-NORMAL target type");
-    return generic_stub();
-  }
-
-  Handle<Map> receiver_map(receiver->map(), isolate());
-  MapHandleList target_receiver_maps;
-  if (target().is_identical_to(string_stub())) {
-    target_receiver_maps.Add(isolate()->factory()->string_map());
-  } else {
-    TargetMaps(&target_receiver_maps);
-  }
-  if (target_receiver_maps.length() == 0) {
-    return isolate()->stub_cache()->ComputeKeyedLoadElement(receiver_map);
-  }
-
-  // The first time a receiver is seen that is a transitioned version of the
-  // previous monomorphic receiver type, assume the new ElementsKind is the
-  // monomorphic type. This benefits global arrays that only transition
-  // once, and all call sites accessing them are faster if they remain
-  // monomorphic. If this optimistic assumption is not true, the IC will
-  // miss again and it will become polymorphic and support both the
-  // untransitioned and transitioned maps.
-  if (state() == MONOMORPHIC &&
-      IsMoreGeneralElementsKindTransition(
-          target_receiver_maps.at(0)->elements_kind(),
-          receiver->GetElementsKind())) {
-    return isolate()->stub_cache()->ComputeKeyedLoadElement(receiver_map);
-  }
-
-  ASSERT(state() != GENERIC);
-
-  // Determine the list of receiver maps that this call site has seen,
-  // adding the map that was just encountered.
-  if (!AddOneReceiverMapIfMissing(&target_receiver_maps, receiver_map)) {
-    // If the miss wasn't due to an unseen map, a polymorphic stub
-    // won't help, use the generic stub.
-    TRACE_GENERIC_IC(isolate(), "KeyedIC", "same map added twice");
-    return generic_stub();
-  }
-
-  // If the maximum number of receiver maps has been exceeded, use the generic
-  // version of the IC.
-  if (target_receiver_maps.length() > kMaxKeyedPolymorphism) {
-    TRACE_GENERIC_IC(isolate(), "KeyedIC", "max polymorph exceeded");
-    return generic_stub();
-  }
-
-  return isolate()->stub_cache()->ComputeLoadElementPolymorphic(
-      &target_receiver_maps);
-}
-
-
-MaybeHandle<Object> KeyedLoadIC::Load(Handle<Object> object,
-                                      Handle<Object> key) {
-  if (MigrateDeprecated(object)) {
-    Handle<Object> result;
-    ASSIGN_RETURN_ON_EXCEPTION(
-        isolate(),
-        result,
-        Runtime::GetObjectProperty(isolate(), object, key),
-        Object);
-    return result;
-  }
-
-  Handle<Object> load_handle;
-  Handle<Code> stub = generic_stub();
-
-  // Check for non-string values that can be converted into an
-  // internalized string directly or is representable as a smi.
-  key = TryConvertKey(key, isolate());
-
-  if (key->IsInternalizedString()) {
-    ASSIGN_RETURN_ON_EXCEPTION(
-        isolate(),
-        load_handle,
-        LoadIC::Load(object, Handle<String>::cast(key)),
-        Object);
-  } else if (FLAG_use_ic && !object->IsAccessCheckNeeded()) {
-    if (object->IsString() && key->IsNumber()) {
-      if (state() == UNINITIALIZED) stub = string_stub();
-    } else if (object->IsJSObject()) {
-      Handle<JSObject> receiver = Handle<JSObject>::cast(object);
-      if (receiver->elements()->map() ==
-          isolate()->heap()->sloppy_arguments_elements_map()) {
-        stub = sloppy_arguments_stub();
-      } else if (receiver->HasIndexedInterceptor()) {
-        stub = indexed_interceptor_stub();
-      } else if (!Object::ToSmi(isolate(), key).is_null() &&
-                 (!target().is_identical_to(sloppy_arguments_stub()))) {
-        stub = LoadElementStub(receiver);
-      }
-    }
-  }
-
-  if (!is_target_set()) {
-    Code* generic = *generic_stub();
-    if (*stub == generic) {
-      TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "set generic");
-    }
-    set_target(*stub);
-    TRACE_IC("LoadIC", key);
-  }
-
-  if (!load_handle.is_null()) return load_handle;
-  Handle<Object> result;
-  ASSIGN_RETURN_ON_EXCEPTION(
-      isolate(),
-      result,
-      Runtime::GetObjectProperty(isolate(), object, key),
-      Object);
-  return result;
-}
-
-
-static bool LookupForWrite(Handle<JSObject> receiver,
-                           Handle<String> name,
-                           Handle<Object> value,
-                           LookupResult* lookup,
-                           IC* ic) {
-  Handle<JSObject> holder = receiver;
-  receiver->Lookup(name, lookup);
-  if (lookup->IsFound()) {
-    if (lookup->IsInterceptor() && !HasInterceptorSetter(lookup->holder())) {
-      receiver->LookupOwnRealNamedProperty(name, lookup);
-      if (!lookup->IsFound()) return false;
-    }
-
-    if (lookup->IsReadOnly() || !lookup->IsCacheable()) return false;
-    if (lookup->holder() == *receiver) return lookup->CanHoldValue(value);
-    if (lookup->IsPropertyCallbacks()) return true;
-    // JSGlobalProxy either stores on the global object in the prototype, or
-    // goes into the runtime if access checks are needed, so this is always
-    // safe.
-    if (receiver->IsJSGlobalProxy()) {
-      return lookup->holder() == receiver->GetPrototype();
-    }
-    // Currently normal holders in the prototype chain are not supported. They
-    // would require a runtime positive lookup and verification that the details
-    // have not changed.
-    if (lookup->IsInterceptor() || lookup->IsNormal()) return false;
-    holder = Handle<JSObject>(lookup->holder(), lookup->isolate());
-  }
-
-  // While normally LookupTransition gets passed the receiver, in this case we
-  // pass the holder of the property that we overwrite. This keeps the holder in
-  // the LookupResult intact so we can later use it to generate a prototype
-  // chain check. This avoids a double lookup, but requires us to pass in the
-  // receiver when trying to fetch extra information from the transition.
-  receiver->map()->LookupTransition(*holder, *name, lookup);
-  if (!lookup->IsTransition() || lookup->IsReadOnly()) return false;
-
-  // If the value that's being stored does not fit in the field that the
-  // instance would transition to, create a new transition that fits the value.
-  // This has to be done before generating the IC, since that IC will embed the
-  // transition target.
-  // Ensure the instance and its map were migrated before trying to update the
-  // transition target.
-  ASSERT(!receiver->map()->is_deprecated());
-  if (!lookup->CanHoldValue(value)) {
-    Handle<Map> target(lookup->GetTransitionTarget());
-    Representation field_representation = value->OptimalRepresentation();
-    Handle<HeapType> field_type = value->OptimalType(
-        lookup->isolate(), field_representation);
-    Map::GeneralizeRepresentation(
-        target, target->LastAdded(),
-        field_representation, field_type, FORCE_FIELD);
-    // Lookup the transition again since the transition tree may have changed
-    // entirely by the migration above.
-    receiver->map()->LookupTransition(*holder, *name, lookup);
-    if (!lookup->IsTransition()) return false;
-    return ic->TryMarkMonomorphicPrototypeFailure(name);
-  }
-
-  return true;
-}
-
-
-MaybeHandle<Object> StoreIC::Store(Handle<Object> object,
-                                   Handle<String> name,
-                                   Handle<Object> value,
-                                   JSReceiver::StoreFromKeyed store_mode) {
-  if (MigrateDeprecated(object) || object->IsJSProxy()) {
-    Handle<JSReceiver> receiver = Handle<JSReceiver>::cast(object);
-    Handle<Object> result;
-    ASSIGN_RETURN_ON_EXCEPTION(
-        isolate(),
-        result,
-        JSReceiver::SetProperty(receiver, name, value, NONE, strict_mode()),
-        Object);
-    return result;
-  }
-
-  // If the object is undefined or null it's illegal to try to set any
-  // properties on it; throw a TypeError in that case.
-  if (object->IsUndefined() || object->IsNull()) {
-    return TypeError("non_object_property_store", object, name);
-  }
-
-  // The length property of string values is read-only. Throw in strict mode.
-  if (strict_mode() == STRICT && object->IsString() &&
-      String::Equals(isolate()->factory()->length_string(), name)) {
-    return TypeError("strict_read_only_property", object, name);
-  }
-
-  // Ignore other stores where the receiver is not a JSObject.
-  // TODO(1475): Must check prototype chains of object wrappers.
-  if (!object->IsJSObject()) return value;
-
-  Handle<JSObject> receiver = Handle<JSObject>::cast(object);
-
-  // Check if the given name is an array index.
-  uint32_t index;
-  if (name->AsArrayIndex(&index)) {
-    Handle<Object> result;
-    ASSIGN_RETURN_ON_EXCEPTION(
-        isolate(),
-        result,
-        JSObject::SetElement(receiver, index, value, NONE, strict_mode()),
-        Object);
-    return value;
-  }
-
-  // Observed objects are always modified through the runtime.
-  if (receiver->map()->is_observed()) {
-    Handle<Object> result;
-    ASSIGN_RETURN_ON_EXCEPTION(
-        isolate(),
-        result,
-        JSReceiver::SetProperty(
-            receiver, name, value, NONE, strict_mode(), store_mode),
-        Object);
-    return result;
-  }
-
-  LookupResult lookup(isolate());
-  bool can_store = LookupForWrite(receiver, name, value, &lookup, this);
-  if (!can_store &&
-      strict_mode() == STRICT &&
-      !(lookup.IsProperty() && lookup.IsReadOnly()) &&
-      object->IsGlobalObject()) {
-    // Strict mode doesn't allow setting non-existent global property.
-    return ReferenceError("not_defined", name);
-  }
-  if (FLAG_use_ic) {
-    if (state() == UNINITIALIZED) {
-      Handle<Code> stub = pre_monomorphic_stub();
-      set_target(*stub);
-      TRACE_IC("StoreIC", name);
-    } else if (can_store) {
-      UpdateCaches(&lookup, receiver, name, value);
-    } else if (lookup.IsNormal() ||
-               (lookup.IsField() && lookup.CanHoldValue(value))) {
-      Handle<Code> stub = generic_stub();
-      set_target(*stub);
-    }
-  }
-
-  // Set the property.
-  Handle<Object> result;
-  ASSIGN_RETURN_ON_EXCEPTION(
-      isolate(),
-      result,
-      JSReceiver::SetProperty(
-          receiver, name, value, NONE, strict_mode(), store_mode),
-      Object);
-  return result;
-}
-
-
-void CallIC::State::Print(StringStream* stream) const {
-  stream->Add("(args(%d), ",
-              argc_);
-  stream->Add("%s, ",
-              call_type_ == CallIC::METHOD ? "METHOD" : "FUNCTION");
-}
-
-
-Handle<Code> CallIC::initialize_stub(Isolate* isolate,
-                                     int argc,
-                                     CallType call_type) {
-  CallICStub stub(isolate, State(argc, call_type));
-  Handle<Code> code = stub.GetCode();
-  return code;
-}
-
-
-Handle<Code> StoreIC::initialize_stub(Isolate* isolate,
-                                      StrictMode strict_mode) {
-  ExtraICState extra_state = ComputeExtraICState(strict_mode);
-  Handle<Code> ic = isolate->stub_cache()->ComputeStore(
-      UNINITIALIZED, extra_state);
-  return ic;
-}
-
-
-Handle<Code> StoreIC::megamorphic_stub() {
-  return isolate()->stub_cache()->ComputeStore(MEGAMORPHIC, extra_ic_state());
-}
-
-
-Handle<Code> StoreIC::generic_stub() const {
-  return isolate()->stub_cache()->ComputeStore(GENERIC, extra_ic_state());
-}
-
-
-Handle<Code> StoreIC::pre_monomorphic_stub(Isolate* isolate,
-                                           StrictMode strict_mode) {
-  ExtraICState state = ComputeExtraICState(strict_mode);
-  return isolate->stub_cache()->ComputeStore(PREMONOMORPHIC, state);
-}
-
-
-void StoreIC::UpdateCaches(LookupResult* lookup,
-                           Handle<JSObject> receiver,
-                           Handle<String> name,
-                           Handle<Object> value) {
-  ASSERT(lookup->IsFound());
-
-  // These are not cacheable, so we never see such LookupResults here.
-  ASSERT(!lookup->IsHandler());
-
-  Handle<Code> code = ComputeHandler(lookup, receiver, name, value);
-
-  PatchCache(CurrentTypeOf(receiver, isolate()), name, code);
-  TRACE_IC("StoreIC", name);
-}
-
-
-Handle<Code> StoreIC::CompileHandler(LookupResult* lookup,
-                                     Handle<Object> object,
-                                     Handle<String> name,
-                                     Handle<Object> value,
-                                     InlineCacheHolderFlag cache_holder) {
-  if (object->IsAccessCheckNeeded()) return slow_stub();
-  ASSERT(cache_holder == OWN_MAP);
-  // This is currently guaranteed by checks in StoreIC::Store.
-  Handle<JSObject> receiver = Handle<JSObject>::cast(object);
-
-  Handle<JSObject> holder(lookup->holder());
-  // Handlers do not use strict mode.
-  StoreStubCompiler compiler(isolate(), SLOPPY, kind());
-  if (lookup->IsTransition()) {
-    // Explicitly pass in the receiver map since LookupForWrite may have
-    // stored something else than the receiver in the holder.
-    Handle<Map> transition(lookup->GetTransitionTarget());
-    PropertyDetails details = lookup->GetPropertyDetails();
-
-    if (details.type() != CALLBACKS && details.attributes() == NONE) {
-      return compiler.CompileStoreTransition(
-          receiver, lookup, transition, name);
-    }
-  } else {
-    switch (lookup->type()) {
-      case FIELD:
-        return compiler.CompileStoreField(receiver, lookup, name);
-      case NORMAL:
-        if (kind() == Code::KEYED_STORE_IC) break;
-        if (receiver->IsJSGlobalProxy() || receiver->IsGlobalObject()) {
-          // The stub generated for the global object picks the value directly
-          // from the property cell. So the property must be directly on the
-          // global object.
-          Handle<GlobalObject> global = receiver->IsJSGlobalProxy()
-              ? handle(GlobalObject::cast(receiver->GetPrototype()))
-              : Handle<GlobalObject>::cast(receiver);
-          Handle<PropertyCell> cell(global->GetPropertyCell(lookup), isolate());
-          Handle<HeapType> union_type = PropertyCell::UpdatedType(cell, value);
-          StoreGlobalStub stub(
-              isolate(), union_type->IsConstant(), receiver->IsJSGlobalProxy());
-          Handle<Code> code = stub.GetCodeCopyFromTemplate(global, cell);
-          // TODO(verwaest): Move caching of these NORMAL stubs outside as well.
-          HeapObject::UpdateMapCodeCache(receiver, name, code);
-          return code;
-        }
-        ASSERT(holder.is_identical_to(receiver));
-        return isolate()->builtins()->StoreIC_Normal();
-      case CALLBACKS: {
-        Handle<Object> callback(lookup->GetCallbackObject(), isolate());
-        if (callback->IsExecutableAccessorInfo()) {
-          Handle<ExecutableAccessorInfo> info =
-              Handle<ExecutableAccessorInfo>::cast(callback);
-          if (v8::ToCData<Address>(info->setter()) == 0) break;
-          if (!holder->HasFastProperties()) break;
-          if (!info->IsCompatibleReceiver(*receiver)) break;
-          return compiler.CompileStoreCallback(receiver, holder, name, info);
-        } else if (callback->IsAccessorPair()) {
-          Handle<Object> setter(
-              Handle<AccessorPair>::cast(callback)->setter(), isolate());
-          if (!setter->IsJSFunction()) break;
-          if (holder->IsGlobalObject()) break;
-          if (!holder->HasFastProperties()) break;
-          Handle<JSFunction> function = Handle<JSFunction>::cast(setter);
-          CallOptimization call_optimization(function);
-          if (call_optimization.is_simple_api_call() &&
-              call_optimization.IsCompatibleReceiver(receiver, holder)) {
-            return compiler.CompileStoreCallback(
-                receiver, holder, name, call_optimization);
-          }
-          return compiler.CompileStoreViaSetter(
-              receiver, holder, name, Handle<JSFunction>::cast(setter));
-        }
-        // TODO(dcarney): Handle correctly.
-        ASSERT(callback->IsDeclaredAccessorInfo());
-        break;
-      }
-      case INTERCEPTOR:
-        if (kind() == Code::KEYED_STORE_IC) break;
-        ASSERT(HasInterceptorSetter(*holder));
-        return compiler.CompileStoreInterceptor(receiver, name);
-      case CONSTANT:
-        break;
-      case NONEXISTENT:
-      case HANDLER:
-        UNREACHABLE();
-        break;
-    }
-  }
-  return slow_stub();
-}
-
-
-Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
-                                            KeyedAccessStoreMode store_mode) {
-  // Don't handle megamorphic property accesses for INTERCEPTORS or CALLBACKS
-  // via megamorphic stubs, since they don't have a map in their relocation info
-  // and so the stubs can't be harvested for the object needed for a map check.
-  if (target()->type() != Code::NORMAL) {
-    TRACE_GENERIC_IC(isolate(), "KeyedIC", "non-NORMAL target type");
-    return generic_stub();
-  }
-
-  Handle<Map> receiver_map(receiver->map(), isolate());
-  MapHandleList target_receiver_maps;
-  TargetMaps(&target_receiver_maps);
-  if (target_receiver_maps.length() == 0) {
-    Handle<Map> monomorphic_map =
-        ComputeTransitionedMap(receiver_map, store_mode);
-    store_mode = GetNonTransitioningStoreMode(store_mode);
-    return isolate()->stub_cache()->ComputeKeyedStoreElement(
-        monomorphic_map, strict_mode(), store_mode);
-  }
-
-  // There are several special cases where an IC that is MONOMORPHIC can still
-  // transition to a different GetNonTransitioningStoreMode IC that handles a
-  // superset of the original IC. Handle those here if the receiver map hasn't
-  // changed or it has transitioned to a more general kind.
-  KeyedAccessStoreMode old_store_mode =
-      KeyedStoreIC::GetKeyedAccessStoreMode(target()->extra_ic_state());
-  Handle<Map> previous_receiver_map = target_receiver_maps.at(0);
-  if (state() == MONOMORPHIC) {
-    Handle<Map> transitioned_receiver_map = receiver_map;
-    if (IsTransitionStoreMode(store_mode)) {
-      transitioned_receiver_map =
-          ComputeTransitionedMap(receiver_map, store_mode);
-    }
-    if ((receiver_map.is_identical_to(previous_receiver_map) &&
-         IsTransitionStoreMode(store_mode)) ||
-        IsTransitionOfMonomorphicTarget(*previous_receiver_map,
-                                        *transitioned_receiver_map)) {
-      // If the "old" and "new" maps are in the same elements map family, or
-      // if they at least come from the same origin for a transitioning store,
-      // stay MONOMORPHIC and use the map for the most generic ElementsKind.
-      store_mode = GetNonTransitioningStoreMode(store_mode);
-      return isolate()->stub_cache()->ComputeKeyedStoreElement(
-          transitioned_receiver_map, strict_mode(), store_mode);
-    } else if (*previous_receiver_map == receiver->map() &&
-               old_store_mode == STANDARD_STORE &&
-               (store_mode == STORE_AND_GROW_NO_TRANSITION ||
-                store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
-                store_mode == STORE_NO_TRANSITION_HANDLE_COW)) {
-      // A "normal" IC that handles stores can switch to a version that can
-      // grow at the end of the array, handle OOB accesses or copy COW arrays
-      // and still stay MONOMORPHIC.
-      return isolate()->stub_cache()->ComputeKeyedStoreElement(
-          receiver_map, strict_mode(), store_mode);
-    }
-  }
-
-  ASSERT(state() != GENERIC);
-
-  bool map_added =
-      AddOneReceiverMapIfMissing(&target_receiver_maps, receiver_map);
-
-  if (IsTransitionStoreMode(store_mode)) {
-    Handle<Map> transitioned_receiver_map =
-        ComputeTransitionedMap(receiver_map, store_mode);
-    map_added |= AddOneReceiverMapIfMissing(&target_receiver_maps,
-                                            transitioned_receiver_map);
-  }
-
-  if (!map_added) {
-    // If the miss wasn't due to an unseen map, a polymorphic stub
-    // won't help, use the generic stub.
-    TRACE_GENERIC_IC(isolate(), "KeyedIC", "same map added twice");
-    return generic_stub();
-  }
-
-  // If the maximum number of receiver maps has been exceeded, use the generic
-  // version of the IC.
-  if (target_receiver_maps.length() > kMaxKeyedPolymorphism) {
-    TRACE_GENERIC_IC(isolate(), "KeyedIC", "max polymorph exceeded");
-    return generic_stub();
-  }
-
-  // Make sure all polymorphic handlers have the same store mode, otherwise the
-  // generic stub must be used.
-  store_mode = GetNonTransitioningStoreMode(store_mode);
-  if (old_store_mode != STANDARD_STORE) {
-    if (store_mode == STANDARD_STORE) {
-      store_mode = old_store_mode;
-    } else if (store_mode != old_store_mode) {
-      TRACE_GENERIC_IC(isolate(), "KeyedIC", "store mode mismatch");
-      return generic_stub();
-    }
-  }
-
-  // If the store mode isn't the standard mode, make sure that all polymorphic
-  // receivers are either external arrays, or all "normal" arrays. Otherwise,
-  // use the generic stub.
-  if (store_mode != STANDARD_STORE) {
-    int external_arrays = 0;
-    for (int i = 0; i < target_receiver_maps.length(); ++i) {
-      if (target_receiver_maps[i]->has_external_array_elements() ||
-          target_receiver_maps[i]->has_fixed_typed_array_elements()) {
-        external_arrays++;
-      }
-    }
-    if (external_arrays != 0 &&
-        external_arrays != target_receiver_maps.length()) {
-      TRACE_GENERIC_IC(isolate(), "KeyedIC",
-          "unsupported combination of external and normal arrays");
-      return generic_stub();
-    }
-  }
-
-  return isolate()->stub_cache()->ComputeStoreElementPolymorphic(
-      &target_receiver_maps, store_mode, strict_mode());
-}
-
-
-Handle<Map> KeyedStoreIC::ComputeTransitionedMap(
-    Handle<Map> map,
-    KeyedAccessStoreMode store_mode) {
-  switch (store_mode) {
-    case STORE_TRANSITION_SMI_TO_OBJECT:
-    case STORE_TRANSITION_DOUBLE_TO_OBJECT:
-    case STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT:
-    case STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT:
-      return Map::TransitionElementsTo(map, FAST_ELEMENTS);
-    case STORE_TRANSITION_SMI_TO_DOUBLE:
-    case STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE:
-      return Map::TransitionElementsTo(map, FAST_DOUBLE_ELEMENTS);
-    case STORE_TRANSITION_HOLEY_SMI_TO_OBJECT:
-    case STORE_TRANSITION_HOLEY_DOUBLE_TO_OBJECT:
-    case STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_OBJECT:
-    case STORE_AND_GROW_TRANSITION_HOLEY_DOUBLE_TO_OBJECT:
-      return Map::TransitionElementsTo(map, FAST_HOLEY_ELEMENTS);
-    case STORE_TRANSITION_HOLEY_SMI_TO_DOUBLE:
-    case STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_DOUBLE:
-      return Map::TransitionElementsTo(map, FAST_HOLEY_DOUBLE_ELEMENTS);
-    case STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS:
-      ASSERT(map->has_external_array_elements());
-      // Fall through
-    case STORE_NO_TRANSITION_HANDLE_COW:
-    case STANDARD_STORE:
-    case STORE_AND_GROW_NO_TRANSITION:
-      return map;
-  }
-  UNREACHABLE();
-  return MaybeHandle<Map>().ToHandleChecked();
-}
-
-
-bool IsOutOfBoundsAccess(Handle<JSObject> receiver,
-                         int index) {
-  if (receiver->IsJSArray()) {
-    return JSArray::cast(*receiver)->length()->IsSmi() &&
-        index >= Smi::cast(JSArray::cast(*receiver)->length())->value();
-  }
-  return index >= receiver->elements()->length();
-}
-
-
-KeyedAccessStoreMode KeyedStoreIC::GetStoreMode(Handle<JSObject> receiver,
-                                                Handle<Object> key,
-                                                Handle<Object> value) {
-  Handle<Smi> smi_key = Object::ToSmi(isolate(), key).ToHandleChecked();
-  int index = smi_key->value();
-  bool oob_access = IsOutOfBoundsAccess(receiver, index);
-  // Don't consider this a growing store if the store would send the receiver to
-  // dictionary mode.
-  bool allow_growth = receiver->IsJSArray() && oob_access &&
-      !receiver->WouldConvertToSlowElements(key);
-  if (allow_growth) {
-    // Handle growing array in stub if necessary.
-    if (receiver->HasFastSmiElements()) {
-      if (value->IsHeapNumber()) {
-        if (receiver->HasFastHoleyElements()) {
-          return STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_DOUBLE;
-        } else {
-          return STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE;
-        }
-      }
-      if (value->IsHeapObject()) {
-        if (receiver->HasFastHoleyElements()) {
-          return STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_OBJECT;
-        } else {
-          return STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT;
-        }
-      }
-    } else if (receiver->HasFastDoubleElements()) {
-      if (!value->IsSmi() && !value->IsHeapNumber()) {
-        if (receiver->HasFastHoleyElements()) {
-          return STORE_AND_GROW_TRANSITION_HOLEY_DOUBLE_TO_OBJECT;
-        } else {
-          return STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT;
-        }
-      }
-    }
-    return STORE_AND_GROW_NO_TRANSITION;
-  } else {
-    // Handle only in-bounds elements accesses.
-    if (receiver->HasFastSmiElements()) {
-      if (value->IsHeapNumber()) {
-        if (receiver->HasFastHoleyElements()) {
-          return STORE_TRANSITION_HOLEY_SMI_TO_DOUBLE;
-        } else {
-          return STORE_TRANSITION_SMI_TO_DOUBLE;
-        }
-      } else if (value->IsHeapObject()) {
-        if (receiver->HasFastHoleyElements()) {
-          return STORE_TRANSITION_HOLEY_SMI_TO_OBJECT;
-        } else {
-          return STORE_TRANSITION_SMI_TO_OBJECT;
-        }
-      }
-    } else if (receiver->HasFastDoubleElements()) {
-      if (!value->IsSmi() && !value->IsHeapNumber()) {
-        if (receiver->HasFastHoleyElements()) {
-          return STORE_TRANSITION_HOLEY_DOUBLE_TO_OBJECT;
-        } else {
-          return STORE_TRANSITION_DOUBLE_TO_OBJECT;
-        }
-      }
-    }
-    if (!FLAG_trace_external_array_abuse &&
-        receiver->map()->has_external_array_elements() && oob_access) {
-      return STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS;
-    }
-    Heap* heap = receiver->GetHeap();
-    if (receiver->elements()->map() == heap->fixed_cow_array_map()) {
-      return STORE_NO_TRANSITION_HANDLE_COW;
-    } else {
-      return STANDARD_STORE;
-    }
-  }
-}
-
-
-MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
-                                        Handle<Object> key,
-                                        Handle<Object> value) {
-  if (MigrateDeprecated(object)) {
-    Handle<Object> result;
-    ASSIGN_RETURN_ON_EXCEPTION(
-        isolate(),
-        result,
-        Runtime::SetObjectProperty(
-            isolate(), object, key, value, NONE, strict_mode()),
-        Object);
-    return result;
-  }
-
-  // Check for non-string values that can be converted into an
-  // internalized string directly or is representable as a smi.
-  key = TryConvertKey(key, isolate());
-
-  Handle<Object> store_handle;
-  Handle<Code> stub = generic_stub();
-
-  if (key->IsInternalizedString()) {
-    ASSIGN_RETURN_ON_EXCEPTION(
-        isolate(),
-        store_handle,
-        StoreIC::Store(object,
-                       Handle<String>::cast(key),
-                       value,
-                       JSReceiver::MAY_BE_STORE_FROM_KEYED),
-        Object);
-  } else {
-    bool use_ic = FLAG_use_ic &&
-        !object->IsStringWrapper() &&
-        !object->IsAccessCheckNeeded() &&
-        !object->IsJSGlobalProxy() &&
-        !(object->IsJSObject() &&
-          JSObject::cast(*object)->map()->is_observed());
-    if (use_ic && !object->IsSmi()) {
-      // Don't use ICs for maps of the objects in Array's prototype chain. We
-      // expect to be able to trap element sets to objects with those maps in
-      // the runtime to enable optimization of element hole access.
-      Handle<HeapObject> heap_object = Handle<HeapObject>::cast(object);
-      if (heap_object->map()->IsMapInArrayPrototypeChain()) use_ic = false;
-    }
-
-    if (use_ic) {
-      ASSERT(!object->IsAccessCheckNeeded());
-
-      if (object->IsJSObject()) {
-        Handle<JSObject> receiver = Handle<JSObject>::cast(object);
-        bool key_is_smi_like = !Object::ToSmi(isolate(), key).is_null();
-        if (receiver->elements()->map() ==
-            isolate()->heap()->sloppy_arguments_elements_map()) {
-          if (strict_mode() == SLOPPY) {
-            stub = sloppy_arguments_stub();
-          }
-        } else if (key_is_smi_like &&
-                   !(target().is_identical_to(sloppy_arguments_stub()))) {
-          // We should go generic if receiver isn't a dictionary, but our
-          // prototype chain does have dictionary elements. This ensures that
-          // other non-dictionary receivers in the polymorphic case benefit
-          // from fast path keyed stores.
-          if (!(receiver->map()->DictionaryElementsInPrototypeChainOnly())) {
-            KeyedAccessStoreMode store_mode =
-                GetStoreMode(receiver, key, value);
-            stub = StoreElementStub(receiver, store_mode);
-          }
-        }
-      }
-    }
-  }
-
-  if (store_handle.is_null()) {
-    ASSIGN_RETURN_ON_EXCEPTION(
-        isolate(),
-        store_handle,
-        Runtime::SetObjectProperty(
-            isolate(), object, key, value, NONE, strict_mode()),
-        Object);
-  }
-
-  if (!is_target_set()) {
-    Code* generic = *generic_stub();
-    if (*stub == generic) {
-      TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "set generic");
-    }
-    ASSERT(!stub.is_null());
-    set_target(*stub);
-    TRACE_IC("StoreIC", key);
-  }
-
-  return store_handle;
-}
-
-
-CallIC::State::State(ExtraICState extra_ic_state)
-    : argc_(ArgcBits::decode(extra_ic_state)),
-      call_type_(CallTypeBits::decode(extra_ic_state)) {
-}
-
-
-ExtraICState CallIC::State::GetExtraICState() const {
-  ExtraICState extra_ic_state =
-      ArgcBits::encode(argc_) |
-      CallTypeBits::encode(call_type_);
-  return extra_ic_state;
-}
-
-
-bool CallIC::DoCustomHandler(Handle<Object> receiver,
-                             Handle<Object> function,
-                             Handle<FixedArray> vector,
-                             Handle<Smi> slot,
-                             const State& state) {
-  ASSERT(FLAG_use_ic && function->IsJSFunction());
-
-  // Are we the array function?
-  Handle<JSFunction> array_function = Handle<JSFunction>(
-      isolate()->context()->native_context()->array_function(), isolate());
-  if (array_function.is_identical_to(Handle<JSFunction>::cast(function))) {
-    // Alter the slot.
-    Object* feedback = vector->get(slot->value());
-    if (!feedback->IsAllocationSite()) {
-      Handle<AllocationSite> new_site =
-          isolate()->factory()->NewAllocationSite();
-      vector->set(slot->value(), *new_site);
-    }
-
-    CallIC_ArrayStub stub(isolate(), state);
-    set_target(*stub.GetCode());
-    Handle<String> name;
-    if (array_function->shared()->name()->IsString()) {
-      name = Handle<String>(String::cast(array_function->shared()->name()),
-                            isolate());
-    }
-
-    TRACE_IC("CallIC (Array call)", name);
-    return true;
-  }
-  return false;
-}
-
-
-void CallIC::PatchMegamorphic(Handle<FixedArray> vector,
-                              Handle<Smi> slot) {
-  State state(target()->extra_ic_state());
-
-  // We are going generic.
-  vector->set(slot->value(),
-              *TypeFeedbackInfo::MegamorphicSentinel(isolate()),
-              SKIP_WRITE_BARRIER);
-
-  CallICStub stub(isolate(), state);
-  Handle<Code> code = stub.GetCode();
-  set_target(*code);
-
-  TRACE_GENERIC_IC(isolate(), "CallIC", "megamorphic");
-}
-
-
-void CallIC::HandleMiss(Handle<Object> receiver,
-                        Handle<Object> function,
-                        Handle<FixedArray> vector,
-                        Handle<Smi> slot) {
-  State state(target()->extra_ic_state());
-  Object* feedback = vector->get(slot->value());
-
-  // Hand-coded MISS handling is easier if CallIC slots don't contain smis.
-  ASSERT(!feedback->IsSmi());
-
-  if (feedback->IsJSFunction() || !function->IsJSFunction()) {
-    // We are going generic.
-    vector->set(slot->value(),
-                *TypeFeedbackInfo::MegamorphicSentinel(isolate()),
-                SKIP_WRITE_BARRIER);
-
-    TRACE_GENERIC_IC(isolate(), "CallIC", "megamorphic");
-  } else {
-    // The feedback is either uninitialized or an allocation site.
-    // It might be an allocation site because if we re-compile the full code
-    // to add deoptimization support, we call with the default call-ic, and
-    // merely need to patch the target to match the feedback.
-    // TODO(mvstanton): the better approach is to dispense with patching
-    // altogether, which is in progress.
-    ASSERT(feedback == *TypeFeedbackInfo::UninitializedSentinel(isolate()) ||
-           feedback->IsAllocationSite());
-
-    // Do we want to install a custom handler?
-    if (FLAG_use_ic &&
-        DoCustomHandler(receiver, function, vector, slot, state)) {
-      return;
-    }
-
-    Handle<JSFunction> js_function = Handle<JSFunction>::cast(function);
-    Handle<Object> name(js_function->shared()->name(), isolate());
-    TRACE_IC("CallIC", name);
-    vector->set(slot->value(), *function);
-  }
-}
-
-
-#undef TRACE_IC
-
-
-// ----------------------------------------------------------------------------
-// Static IC stub generators.
-//
-
-// Used from ic-<arch>.cc.
-RUNTIME_FUNCTION(CallIC_Miss) {
-  Logger::TimerEventScope timer(
-      isolate, Logger::TimerEventScope::v8_ic_miss);
-  HandleScope scope(isolate);
-  ASSERT(args.length() == 4);
-  CallIC ic(isolate);
-  Handle<Object> receiver = args.at<Object>(0);
-  Handle<Object> function = args.at<Object>(1);
-  Handle<FixedArray> vector = args.at<FixedArray>(2);
-  Handle<Smi> slot = args.at<Smi>(3);
-  ic.HandleMiss(receiver, function, vector, slot);
-  return *function;
-}
-
-
-RUNTIME_FUNCTION(CallIC_Customization_Miss) {
-  Logger::TimerEventScope timer(
-      isolate, Logger::TimerEventScope::v8_ic_miss);
-  HandleScope scope(isolate);
-  ASSERT(args.length() == 4);
-  // A miss on a custom call ic always results in going megamorphic.
-  CallIC ic(isolate);
-  Handle<Object> function = args.at<Object>(1);
-  Handle<FixedArray> vector = args.at<FixedArray>(2);
-  Handle<Smi> slot = args.at<Smi>(3);
-  ic.PatchMegamorphic(vector, slot);
-  return *function;
-}
-
-
-// Used from ic-<arch>.cc.
-RUNTIME_FUNCTION(LoadIC_Miss) {
-  Logger::TimerEventScope timer(
-      isolate, Logger::TimerEventScope::v8_ic_miss);
-  HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
-  LoadIC ic(IC::NO_EXTRA_FRAME, isolate);
-  Handle<Object> receiver = args.at<Object>(0);
-  Handle<String> key = args.at<String>(1);
-  ic.UpdateState(receiver, key);
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
-  return *result;
-}
-
-
-// Used from ic-<arch>.cc
-RUNTIME_FUNCTION(KeyedLoadIC_Miss) {
-  Logger::TimerEventScope timer(
-      isolate, Logger::TimerEventScope::v8_ic_miss);
-  HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
-  KeyedLoadIC ic(IC::NO_EXTRA_FRAME, isolate);
-  Handle<Object> receiver = args.at<Object>(0);
-  Handle<Object> key = args.at<Object>(1);
-  ic.UpdateState(receiver, key);
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
-  return *result;
-}
-
-
-RUNTIME_FUNCTION(KeyedLoadIC_MissFromStubFailure) {
-  Logger::TimerEventScope timer(
-      isolate, Logger::TimerEventScope::v8_ic_miss);
-  HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
-  KeyedLoadIC ic(IC::EXTRA_CALL_FRAME, isolate);
-  Handle<Object> receiver = args.at<Object>(0);
-  Handle<Object> key = args.at<Object>(1);
-  ic.UpdateState(receiver, key);
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
-  return *result;
-}
-
-
-// Used from ic-<arch>.cc.
-RUNTIME_FUNCTION(StoreIC_Miss) {
-  Logger::TimerEventScope timer(
-      isolate, Logger::TimerEventScope::v8_ic_miss);
-  HandleScope scope(isolate);
-  ASSERT(args.length() == 3);
-  StoreIC ic(IC::NO_EXTRA_FRAME, isolate);
-  Handle<Object> receiver = args.at<Object>(0);
-  Handle<String> key = args.at<String>(1);
-  ic.UpdateState(receiver, key);
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate,
-      result,
-      ic.Store(receiver, key, args.at<Object>(2)));
-  return *result;
-}
-
-
-RUNTIME_FUNCTION(StoreIC_MissFromStubFailure) {
-  Logger::TimerEventScope timer(
-      isolate, Logger::TimerEventScope::v8_ic_miss);
-  HandleScope scope(isolate);
-  ASSERT(args.length() == 3);
-  StoreIC ic(IC::EXTRA_CALL_FRAME, isolate);
-  Handle<Object> receiver = args.at<Object>(0);
-  Handle<String> key = args.at<String>(1);
-  ic.UpdateState(receiver, key);
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate,
-      result,
-      ic.Store(receiver, key, args.at<Object>(2)));
-  return *result;
-}
-
-
-RUNTIME_FUNCTION(StoreIC_ArrayLength) {
-  Logger::TimerEventScope timer(
-      isolate, Logger::TimerEventScope::v8_ic_miss);
-  HandleScope scope(isolate);
-
-  ASSERT(args.length() == 2);
-  Handle<JSArray> receiver = args.at<JSArray>(0);
-  Handle<Object> len = args.at<Object>(1);
-
-  // The generated code should filter out non-Smis before we get here.
-  ASSERT(len->IsSmi());
-
-#ifdef DEBUG
-  // The length property has to be a writable callback property.
-  LookupResult debug_lookup(isolate);
-  receiver->LookupOwn(isolate->factory()->length_string(), &debug_lookup);
-  ASSERT(debug_lookup.IsPropertyCallbacks() && !debug_lookup.IsReadOnly());
-#endif
-
-  RETURN_FAILURE_ON_EXCEPTION(
-      isolate, JSArray::SetElementsLength(receiver, len));
-  return *len;
-}
-
-
-// Extend storage is called in a store inline cache when
-// it is necessary to extend the properties array of a
-// JSObject.
-RUNTIME_FUNCTION(SharedStoreIC_ExtendStorage) {
-  Logger::TimerEventScope timer(
-      isolate, Logger::TimerEventScope::v8_ic_miss);
-  HandleScope shs(isolate);
-  ASSERT(args.length() == 3);
-
-  // Convert the parameters
-  Handle<JSObject> object = args.at<JSObject>(0);
-  Handle<Map> transition = args.at<Map>(1);
-  Handle<Object> value = args.at<Object>(2);
-
-  // Check the object has run out out property space.
-  ASSERT(object->HasFastProperties());
-  ASSERT(object->map()->unused_property_fields() == 0);
-
-  // Expand the properties array.
-  Handle<FixedArray> old_storage = handle(object->properties(), isolate);
-  int new_unused = transition->unused_property_fields();
-  int new_size = old_storage->length() + new_unused + 1;
-
-  Handle<FixedArray> new_storage = FixedArray::CopySize(old_storage, new_size);
-
-  Handle<Object> to_store = value;
-
-  PropertyDetails details = transition->instance_descriptors()->GetDetails(
-      transition->LastAdded());
-  if (details.representation().IsDouble()) {
-    to_store = isolate->factory()->NewHeapNumber(value->Number());
-  }
-
-  new_storage->set(old_storage->length(), *to_store);
-
-  // Set the new property value and do the map transition.
-  object->set_properties(*new_storage);
-  object->set_map(*transition);
-
-  // Return the stored value.
-  return *value;
-}
-
-
-// Used from ic-<arch>.cc.
-RUNTIME_FUNCTION(KeyedStoreIC_Miss) {
-  Logger::TimerEventScope timer(
-      isolate, Logger::TimerEventScope::v8_ic_miss);
-  HandleScope scope(isolate);
-  ASSERT(args.length() == 3);
-  KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate);
-  Handle<Object> receiver = args.at<Object>(0);
-  Handle<Object> key = args.at<Object>(1);
-  ic.UpdateState(receiver, key);
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate,
-      result,
-      ic.Store(receiver, key, args.at<Object>(2)));
-  return *result;
-}
-
-
-RUNTIME_FUNCTION(KeyedStoreIC_MissFromStubFailure) {
-  Logger::TimerEventScope timer(
-      isolate, Logger::TimerEventScope::v8_ic_miss);
-  HandleScope scope(isolate);
-  ASSERT(args.length() == 3);
-  KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate);
-  Handle<Object> receiver = args.at<Object>(0);
-  Handle<Object> key = args.at<Object>(1);
-  ic.UpdateState(receiver, key);
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate,
-      result,
-      ic.Store(receiver, key, args.at<Object>(2)));
-  return *result;
-}
-
-
-RUNTIME_FUNCTION(StoreIC_Slow) {
-  HandleScope scope(isolate);
-  ASSERT(args.length() == 3);
-  StoreIC ic(IC::NO_EXTRA_FRAME, isolate);
-  Handle<Object> object = args.at<Object>(0);
-  Handle<Object> key = args.at<Object>(1);
-  Handle<Object> value = args.at<Object>(2);
-  StrictMode strict_mode = ic.strict_mode();
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result,
-      Runtime::SetObjectProperty(
-          isolate, object, key, value, NONE, strict_mode));
-  return *result;
-}
-
-
-RUNTIME_FUNCTION(KeyedStoreIC_Slow) {
-  HandleScope scope(isolate);
-  ASSERT(args.length() == 3);
-  KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate);
-  Handle<Object> object = args.at<Object>(0);
-  Handle<Object> key = args.at<Object>(1);
-  Handle<Object> value = args.at<Object>(2);
-  StrictMode strict_mode = ic.strict_mode();
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result,
-      Runtime::SetObjectProperty(
-          isolate, object, key, value, NONE, strict_mode));
-  return *result;
-}
-
-
-RUNTIME_FUNCTION(ElementsTransitionAndStoreIC_Miss) {
-  Logger::TimerEventScope timer(
-      isolate, Logger::TimerEventScope::v8_ic_miss);
-  HandleScope scope(isolate);
-  ASSERT(args.length() == 4);
-  KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate);
-  Handle<Object> value = args.at<Object>(0);
-  Handle<Map> map = args.at<Map>(1);
-  Handle<Object> key = args.at<Object>(2);
-  Handle<Object> object = args.at<Object>(3);
-  StrictMode strict_mode = ic.strict_mode();
-  if (object->IsJSObject()) {
-    JSObject::TransitionElementsKind(Handle<JSObject>::cast(object),
-                                     map->elements_kind());
-  }
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result,
-      Runtime::SetObjectProperty(
-          isolate, object, key, value, NONE, strict_mode));
-  return *result;
-}
-
-
-BinaryOpIC::State::State(Isolate* isolate, ExtraICState extra_ic_state)
-    : isolate_(isolate) {
-  op_ = static_cast<Token::Value>(
-      FIRST_TOKEN + OpField::decode(extra_ic_state));
-  mode_ = OverwriteModeField::decode(extra_ic_state);
-  fixed_right_arg_ = Maybe<int>(
-      HasFixedRightArgField::decode(extra_ic_state),
-      1 << FixedRightArgValueField::decode(extra_ic_state));
-  left_kind_ = LeftKindField::decode(extra_ic_state);
-  if (fixed_right_arg_.has_value) {
-    right_kind_ = Smi::IsValid(fixed_right_arg_.value) ? SMI : INT32;
-  } else {
-    right_kind_ = RightKindField::decode(extra_ic_state);
-  }
-  result_kind_ = ResultKindField::decode(extra_ic_state);
-  ASSERT_LE(FIRST_TOKEN, op_);
-  ASSERT_LE(op_, LAST_TOKEN);
-}
-
-
-ExtraICState BinaryOpIC::State::GetExtraICState() const {
-  ExtraICState extra_ic_state =
-      OpField::encode(op_ - FIRST_TOKEN) |
-      OverwriteModeField::encode(mode_) |
-      LeftKindField::encode(left_kind_) |
-      ResultKindField::encode(result_kind_) |
-      HasFixedRightArgField::encode(fixed_right_arg_.has_value);
-  if (fixed_right_arg_.has_value) {
-    extra_ic_state = FixedRightArgValueField::update(
-        extra_ic_state, WhichPowerOf2(fixed_right_arg_.value));
-  } else {
-    extra_ic_state = RightKindField::update(extra_ic_state, right_kind_);
-  }
-  return extra_ic_state;
-}
-
-
-// static
-void BinaryOpIC::State::GenerateAheadOfTime(
-    Isolate* isolate, void (*Generate)(Isolate*, const State&)) {
-  // TODO(olivf) We should investigate why adding stubs to the snapshot is so
-  // expensive at runtime. When solved we should be able to add most binops to
-  // the snapshot instead of hand-picking them.
-  // Generated list of commonly used stubs
-#define GENERATE(op, left_kind, right_kind, result_kind, mode)  \
-  do {                                                          \
-    State state(isolate, op, mode);                             \
-    state.left_kind_ = left_kind;                               \
-    state.fixed_right_arg_.has_value = false;                   \
-    state.right_kind_ = right_kind;                             \
-    state.result_kind_ = result_kind;                           \
-    Generate(isolate, state);                                   \
-  } while (false)
-  GENERATE(Token::ADD, INT32, INT32, INT32, NO_OVERWRITE);
-  GENERATE(Token::ADD, INT32, INT32, INT32, OVERWRITE_LEFT);
-  GENERATE(Token::ADD, INT32, INT32, NUMBER, NO_OVERWRITE);
-  GENERATE(Token::ADD, INT32, INT32, NUMBER, OVERWRITE_LEFT);
-  GENERATE(Token::ADD, INT32, NUMBER, NUMBER, NO_OVERWRITE);
-  GENERATE(Token::ADD, INT32, NUMBER, NUMBER, OVERWRITE_LEFT);
-  GENERATE(Token::ADD, INT32, NUMBER, NUMBER, OVERWRITE_RIGHT);
-  GENERATE(Token::ADD, INT32, SMI, INT32, NO_OVERWRITE);
-  GENERATE(Token::ADD, INT32, SMI, INT32, OVERWRITE_LEFT);
-  GENERATE(Token::ADD, INT32, SMI, INT32, OVERWRITE_RIGHT);
-  GENERATE(Token::ADD, NUMBER, INT32, NUMBER, NO_OVERWRITE);
-  GENERATE(Token::ADD, NUMBER, INT32, NUMBER, OVERWRITE_LEFT);
-  GENERATE(Token::ADD, NUMBER, INT32, NUMBER, OVERWRITE_RIGHT);
-  GENERATE(Token::ADD, NUMBER, NUMBER, NUMBER, NO_OVERWRITE);
-  GENERATE(Token::ADD, NUMBER, NUMBER, NUMBER, OVERWRITE_LEFT);
-  GENERATE(Token::ADD, NUMBER, NUMBER, NUMBER, OVERWRITE_RIGHT);
-  GENERATE(Token::ADD, NUMBER, SMI, NUMBER, NO_OVERWRITE);
-  GENERATE(Token::ADD, NUMBER, SMI, NUMBER, OVERWRITE_LEFT);
-  GENERATE(Token::ADD, NUMBER, SMI, NUMBER, OVERWRITE_RIGHT);
-  GENERATE(Token::ADD, SMI, INT32, INT32, NO_OVERWRITE);
-  GENERATE(Token::ADD, SMI, INT32, INT32, OVERWRITE_LEFT);
-  GENERATE(Token::ADD, SMI, INT32, NUMBER, NO_OVERWRITE);
-  GENERATE(Token::ADD, SMI, NUMBER, NUMBER, NO_OVERWRITE);
-  GENERATE(Token::ADD, SMI, NUMBER, NUMBER, OVERWRITE_LEFT);
-  GENERATE(Token::ADD, SMI, NUMBER, NUMBER, OVERWRITE_RIGHT);
-  GENERATE(Token::ADD, SMI, SMI, INT32, OVERWRITE_LEFT);
-  GENERATE(Token::ADD, SMI, SMI, SMI, OVERWRITE_RIGHT);
-  GENERATE(Token::BIT_AND, INT32, INT32, INT32, NO_OVERWRITE);
-  GENERATE(Token::BIT_AND, INT32, INT32, INT32, OVERWRITE_LEFT);
-  GENERATE(Token::BIT_AND, INT32, INT32, INT32, OVERWRITE_RIGHT);
-  GENERATE(Token::BIT_AND, INT32, INT32, SMI, NO_OVERWRITE);
-  GENERATE(Token::BIT_AND, INT32, INT32, SMI, OVERWRITE_RIGHT);
-  GENERATE(Token::BIT_AND, INT32, SMI, INT32, NO_OVERWRITE);
-  GENERATE(Token::BIT_AND, INT32, SMI, INT32, OVERWRITE_RIGHT);
-  GENERATE(Token::BIT_AND, INT32, SMI, SMI, NO_OVERWRITE);
-  GENERATE(Token::BIT_AND, INT32, SMI, SMI, OVERWRITE_LEFT);
-  GENERATE(Token::BIT_AND, INT32, SMI, SMI, OVERWRITE_RIGHT);
-  GENERATE(Token::BIT_AND, NUMBER, INT32, INT32, OVERWRITE_RIGHT);
-  GENERATE(Token::BIT_AND, NUMBER, SMI, SMI, NO_OVERWRITE);
-  GENERATE(Token::BIT_AND, NUMBER, SMI, SMI, OVERWRITE_RIGHT);
-  GENERATE(Token::BIT_AND, SMI, INT32, INT32, NO_OVERWRITE);
-  GENERATE(Token::BIT_AND, SMI, INT32, SMI, OVERWRITE_RIGHT);
-  GENERATE(Token::BIT_AND, SMI, NUMBER, SMI, OVERWRITE_RIGHT);
-  GENERATE(Token::BIT_AND, SMI, SMI, SMI, NO_OVERWRITE);
-  GENERATE(Token::BIT_AND, SMI, SMI, SMI, OVERWRITE_LEFT);
-  GENERATE(Token::BIT_AND, SMI, SMI, SMI, OVERWRITE_RIGHT);
-  GENERATE(Token::BIT_OR, INT32, INT32, INT32, OVERWRITE_LEFT);
-  GENERATE(Token::BIT_OR, INT32, INT32, INT32, OVERWRITE_RIGHT);
-  GENERATE(Token::BIT_OR, INT32, INT32, SMI, OVERWRITE_LEFT);
-  GENERATE(Token::BIT_OR, INT32, SMI, INT32, NO_OVERWRITE);
-  GENERATE(Token::BIT_OR, INT32, SMI, INT32, OVERWRITE_LEFT);
-  GENERATE(Token::BIT_OR, INT32, SMI, INT32, OVERWRITE_RIGHT);
-  GENERATE(Token::BIT_OR, INT32, SMI, SMI, NO_OVERWRITE);
-  GENERATE(Token::BIT_OR, INT32, SMI, SMI, OVERWRITE_RIGHT);
-  GENERATE(Token::BIT_OR, NUMBER, SMI, INT32, NO_OVERWRITE);
-  GENERATE(Token::BIT_OR, NUMBER, SMI, INT32, OVERWRITE_LEFT);
-  GENERATE(Token::BIT_OR, NUMBER, SMI, INT32, OVERWRITE_RIGHT);
-  GENERATE(Token::BIT_OR, NUMBER, SMI, SMI, NO_OVERWRITE);
-  GENERATE(Token::BIT_OR, NUMBER, SMI, SMI, OVERWRITE_LEFT);
-  GENERATE(Token::BIT_OR, SMI, INT32, INT32, OVERWRITE_LEFT);
-  GENERATE(Token::BIT_OR, SMI, INT32, INT32, OVERWRITE_RIGHT);
-  GENERATE(Token::BIT_OR, SMI, INT32, SMI, OVERWRITE_RIGHT);
-  GENERATE(Token::BIT_OR, SMI, SMI, SMI, OVERWRITE_LEFT);
-  GENERATE(Token::BIT_OR, SMI, SMI, SMI, OVERWRITE_RIGHT);
-  GENERATE(Token::BIT_XOR, INT32, INT32, INT32, NO_OVERWRITE);
-  GENERATE(Token::BIT_XOR, INT32, INT32, INT32, OVERWRITE_LEFT);
-  GENERATE(Token::BIT_XOR, INT32, INT32, INT32, OVERWRITE_RIGHT);
-  GENERATE(Token::BIT_XOR, INT32, INT32, SMI, NO_OVERWRITE);
-  GENERATE(Token::BIT_XOR, INT32, INT32, SMI, OVERWRITE_LEFT);
-  GENERATE(Token::BIT_XOR, INT32, NUMBER, SMI, NO_OVERWRITE);
-  GENERATE(Token::BIT_XOR, INT32, SMI, INT32, NO_OVERWRITE);
-  GENERATE(Token::BIT_XOR, INT32, SMI, INT32, OVERWRITE_LEFT);
-  GENERATE(Token::BIT_XOR, INT32, SMI, INT32, OVERWRITE_RIGHT);
-  GENERATE(Token::BIT_XOR, NUMBER, INT32, INT32, NO_OVERWRITE);
-  GENERATE(Token::BIT_XOR, NUMBER, SMI, INT32, NO_OVERWRITE);
-  GENERATE(Token::BIT_XOR, NUMBER, SMI, SMI, NO_OVERWRITE);
-  GENERATE(Token::BIT_XOR, SMI, INT32, INT32, NO_OVERWRITE);
-  GENERATE(Token::BIT_XOR, SMI, INT32, INT32, OVERWRITE_LEFT);
-  GENERATE(Token::BIT_XOR, SMI, INT32, SMI, OVERWRITE_LEFT);
-  GENERATE(Token::BIT_XOR, SMI, SMI, SMI, NO_OVERWRITE);
-  GENERATE(Token::BIT_XOR, SMI, SMI, SMI, OVERWRITE_LEFT);
-  GENERATE(Token::BIT_XOR, SMI, SMI, SMI, OVERWRITE_RIGHT);
-  GENERATE(Token::DIV, INT32, INT32, INT32, NO_OVERWRITE);
-  GENERATE(Token::DIV, INT32, INT32, NUMBER, NO_OVERWRITE);
-  GENERATE(Token::DIV, INT32, NUMBER, NUMBER, NO_OVERWRITE);
-  GENERATE(Token::DIV, INT32, NUMBER, NUMBER, OVERWRITE_LEFT);
-  GENERATE(Token::DIV, INT32, SMI, INT32, NO_OVERWRITE);
-  GENERATE(Token::DIV, INT32, SMI, NUMBER, NO_OVERWRITE);
-  GENERATE(Token::DIV, NUMBER, INT32, NUMBER, NO_OVERWRITE);
-  GENERATE(Token::DIV, NUMBER, INT32, NUMBER, OVERWRITE_LEFT);
-  GENERATE(Token::DIV, NUMBER, NUMBER, NUMBER, NO_OVERWRITE);
-  GENERATE(Token::DIV, NUMBER, NUMBER, NUMBER, OVERWRITE_LEFT);
-  GENERATE(Token::DIV, NUMBER, NUMBER, NUMBER, OVERWRITE_RIGHT);
-  GENERATE(Token::DIV, NUMBER, SMI, NUMBER, NO_OVERWRITE);
-  GENERATE(Token::DIV, NUMBER, SMI, NUMBER, OVERWRITE_LEFT);
-  GENERATE(Token::DIV, SMI, INT32, INT32, NO_OVERWRITE);
-  GENERATE(Token::DIV, SMI, INT32, NUMBER, NO_OVERWRITE);
-  GENERATE(Token::DIV, SMI, INT32, NUMBER, OVERWRITE_LEFT);
-  GENERATE(Token::DIV, SMI, NUMBER, NUMBER, NO_OVERWRITE);
-  GENERATE(Token::DIV, SMI, NUMBER, NUMBER, OVERWRITE_LEFT);
-  GENERATE(Token::DIV, SMI, NUMBER, NUMBER, OVERWRITE_RIGHT);
-  GENERATE(Token::DIV, SMI, SMI, NUMBER, NO_OVERWRITE);
-  GENERATE(Token::DIV, SMI, SMI, NUMBER, OVERWRITE_LEFT);
-  GENERATE(Token::DIV, SMI, SMI, NUMBER, OVERWRITE_RIGHT);
-  GENERATE(Token::DIV, SMI, SMI, SMI, NO_OVERWRITE);
-  GENERATE(Token::DIV, SMI, SMI, SMI, OVERWRITE_LEFT);
-  GENERATE(Token::DIV, SMI, SMI, SMI, OVERWRITE_RIGHT);
-  GENERATE(Token::MOD, NUMBER, SMI, NUMBER, OVERWRITE_LEFT);
-  GENERATE(Token::MOD, SMI, SMI, SMI, NO_OVERWRITE);
-  GENERATE(Token::MOD, SMI, SMI, SMI, OVERWRITE_LEFT);
-  GENERATE(Token::MUL, INT32, INT32, INT32, NO_OVERWRITE);
-  GENERATE(Token::MUL, INT32, INT32, NUMBER, NO_OVERWRITE);
-  GENERATE(Token::MUL, INT32, NUMBER, NUMBER, NO_OVERWRITE);
-  GENERATE(Token::MUL, INT32, NUMBER, NUMBER, OVERWRITE_LEFT);
-  GENERATE(Token::MUL, INT32, SMI, INT32, NO_OVERWRITE);
-  GENERATE(Token::MUL, INT32, SMI, INT32, OVERWRITE_LEFT);
-  GENERATE(Token::MUL, INT32, SMI, NUMBER, NO_OVERWRITE);
-  GENERATE(Token::MUL, NUMBER, INT32, NUMBER, NO_OVERWRITE);
-  GENERATE(Token::MUL, NUMBER, INT32, NUMBER, OVERWRITE_LEFT);
-  GENERATE(Token::MUL, NUMBER, INT32, NUMBER, OVERWRITE_RIGHT);
-  GENERATE(Token::MUL, NUMBER, NUMBER, NUMBER, NO_OVERWRITE);
-  GENERATE(Token::MUL, NUMBER, NUMBER, NUMBER, OVERWRITE_LEFT);
-  GENERATE(Token::MUL, NUMBER, SMI, NUMBER, NO_OVERWRITE);
-  GENERATE(Token::MUL, NUMBER, SMI, NUMBER, OVERWRITE_LEFT);
-  GENERATE(Token::MUL, NUMBER, SMI, NUMBER, OVERWRITE_RIGHT);
-  GENERATE(Token::MUL, SMI, INT32, INT32, NO_OVERWRITE);
-  GENERATE(Token::MUL, SMI, INT32, INT32, OVERWRITE_LEFT);
-  GENERATE(Token::MUL, SMI, INT32, NUMBER, NO_OVERWRITE);
-  GENERATE(Token::MUL, SMI, NUMBER, NUMBER, NO_OVERWRITE);
-  GENERATE(Token::MUL, SMI, NUMBER, NUMBER, OVERWRITE_LEFT);
-  GENERATE(Token::MUL, SMI, NUMBER, NUMBER, OVERWRITE_RIGHT);
-  GENERATE(Token::MUL, SMI, SMI, INT32, NO_OVERWRITE);
-  GENERATE(Token::MUL, SMI, SMI, NUMBER, NO_OVERWRITE);
-  GENERATE(Token::MUL, SMI, SMI, NUMBER, OVERWRITE_LEFT);
-  GENERATE(Token::MUL, SMI, SMI, SMI, NO_OVERWRITE);
-  GENERATE(Token::MUL, SMI, SMI, SMI, OVERWRITE_LEFT);
-  GENERATE(Token::MUL, SMI, SMI, SMI, OVERWRITE_RIGHT);
-  GENERATE(Token::SAR, INT32, SMI, INT32, OVERWRITE_RIGHT);
-  GENERATE(Token::SAR, INT32, SMI, SMI, NO_OVERWRITE);
-  GENERATE(Token::SAR, INT32, SMI, SMI, OVERWRITE_RIGHT);
-  GENERATE(Token::SAR, NUMBER, SMI, SMI, NO_OVERWRITE);
-  GENERATE(Token::SAR, NUMBER, SMI, SMI, OVERWRITE_RIGHT);
-  GENERATE(Token::SAR, SMI, SMI, SMI, OVERWRITE_LEFT);
-  GENERATE(Token::SAR, SMI, SMI, SMI, OVERWRITE_RIGHT);
-  GENERATE(Token::SHL, INT32, SMI, INT32, NO_OVERWRITE);
-  GENERATE(Token::SHL, INT32, SMI, INT32, OVERWRITE_RIGHT);
-  GENERATE(Token::SHL, INT32, SMI, SMI, NO_OVERWRITE);
-  GENERATE(Token::SHL, INT32, SMI, SMI, OVERWRITE_RIGHT);
-  GENERATE(Token::SHL, NUMBER, SMI, SMI, OVERWRITE_RIGHT);
-  GENERATE(Token::SHL, SMI, SMI, INT32, NO_OVERWRITE);
-  GENERATE(Token::SHL, SMI, SMI, INT32, OVERWRITE_LEFT);
-  GENERATE(Token::SHL, SMI, SMI, INT32, OVERWRITE_RIGHT);
-  GENERATE(Token::SHL, SMI, SMI, SMI, NO_OVERWRITE);
-  GENERATE(Token::SHL, SMI, SMI, SMI, OVERWRITE_LEFT);
-  GENERATE(Token::SHL, SMI, SMI, SMI, OVERWRITE_RIGHT);
-  GENERATE(Token::SHR, INT32, SMI, SMI, NO_OVERWRITE);
-  GENERATE(Token::SHR, INT32, SMI, SMI, OVERWRITE_LEFT);
-  GENERATE(Token::SHR, INT32, SMI, SMI, OVERWRITE_RIGHT);
-  GENERATE(Token::SHR, NUMBER, SMI, SMI, NO_OVERWRITE);
-  GENERATE(Token::SHR, NUMBER, SMI, SMI, OVERWRITE_LEFT);
-  GENERATE(Token::SHR, NUMBER, SMI, INT32, OVERWRITE_RIGHT);
-  GENERATE(Token::SHR, SMI, SMI, SMI, NO_OVERWRITE);
-  GENERATE(Token::SHR, SMI, SMI, SMI, OVERWRITE_LEFT);
-  GENERATE(Token::SHR, SMI, SMI, SMI, OVERWRITE_RIGHT);
-  GENERATE(Token::SUB, INT32, INT32, INT32, NO_OVERWRITE);
-  GENERATE(Token::SUB, INT32, INT32, INT32, OVERWRITE_LEFT);
-  GENERATE(Token::SUB, INT32, NUMBER, NUMBER, NO_OVERWRITE);
-  GENERATE(Token::SUB, INT32, NUMBER, NUMBER, OVERWRITE_RIGHT);
-  GENERATE(Token::SUB, INT32, SMI, INT32, OVERWRITE_LEFT);
-  GENERATE(Token::SUB, INT32, SMI, INT32, OVERWRITE_RIGHT);
-  GENERATE(Token::SUB, NUMBER, INT32, NUMBER, NO_OVERWRITE);
-  GENERATE(Token::SUB, NUMBER, INT32, NUMBER, OVERWRITE_LEFT);
-  GENERATE(Token::SUB, NUMBER, NUMBER, NUMBER, NO_OVERWRITE);
-  GENERATE(Token::SUB, NUMBER, NUMBER, NUMBER, OVERWRITE_LEFT);
-  GENERATE(Token::SUB, NUMBER, NUMBER, NUMBER, OVERWRITE_RIGHT);
-  GENERATE(Token::SUB, NUMBER, SMI, NUMBER, NO_OVERWRITE);
-  GENERATE(Token::SUB, NUMBER, SMI, NUMBER, OVERWRITE_LEFT);
-  GENERATE(Token::SUB, NUMBER, SMI, NUMBER, OVERWRITE_RIGHT);
-  GENERATE(Token::SUB, SMI, INT32, INT32, NO_OVERWRITE);
-  GENERATE(Token::SUB, SMI, NUMBER, NUMBER, NO_OVERWRITE);
-  GENERATE(Token::SUB, SMI, NUMBER, NUMBER, OVERWRITE_LEFT);
-  GENERATE(Token::SUB, SMI, NUMBER, NUMBER, OVERWRITE_RIGHT);
-  GENERATE(Token::SUB, SMI, SMI, SMI, NO_OVERWRITE);
-  GENERATE(Token::SUB, SMI, SMI, SMI, OVERWRITE_LEFT);
-  GENERATE(Token::SUB, SMI, SMI, SMI, OVERWRITE_RIGHT);
-#undef GENERATE
-#define GENERATE(op, left_kind, fixed_right_arg_value, result_kind, mode) \
-  do {                                                                    \
-    State state(isolate, op, mode);                                       \
-    state.left_kind_ = left_kind;                                         \
-    state.fixed_right_arg_.has_value = true;                              \
-    state.fixed_right_arg_.value = fixed_right_arg_value;                 \
-    state.right_kind_ = SMI;                                              \
-    state.result_kind_ = result_kind;                                     \
-    Generate(isolate, state);                                             \
-  } while (false)
-  GENERATE(Token::MOD, SMI, 2, SMI, NO_OVERWRITE);
-  GENERATE(Token::MOD, SMI, 4, SMI, NO_OVERWRITE);
-  GENERATE(Token::MOD, SMI, 4, SMI, OVERWRITE_LEFT);
-  GENERATE(Token::MOD, SMI, 8, SMI, NO_OVERWRITE);
-  GENERATE(Token::MOD, SMI, 16, SMI, OVERWRITE_LEFT);
-  GENERATE(Token::MOD, SMI, 32, SMI, NO_OVERWRITE);
-  GENERATE(Token::MOD, SMI, 2048, SMI, NO_OVERWRITE);
-#undef GENERATE
-}
-
-
-Type* BinaryOpIC::State::GetResultType(Zone* zone) const {
-  Kind result_kind = result_kind_;
-  if (HasSideEffects()) {
-    result_kind = NONE;
-  } else if (result_kind == GENERIC && op_ == Token::ADD) {
-    return Type::Union(Type::Number(zone), Type::String(zone), zone);
-  } else if (result_kind == NUMBER && op_ == Token::SHR) {
-    return Type::Unsigned32(zone);
-  }
-  ASSERT_NE(GENERIC, result_kind);
-  return KindToType(result_kind, zone);
-}
-
-
-void BinaryOpIC::State::Print(StringStream* stream) const {
-  stream->Add("(%s", Token::Name(op_));
-  if (mode_ == OVERWRITE_LEFT) stream->Add("_ReuseLeft");
-  else if (mode_ == OVERWRITE_RIGHT) stream->Add("_ReuseRight");
-  if (CouldCreateAllocationMementos()) stream->Add("_CreateAllocationMementos");
-  stream->Add(":%s*", KindToString(left_kind_));
-  if (fixed_right_arg_.has_value) {
-    stream->Add("%d", fixed_right_arg_.value);
-  } else {
-    stream->Add("%s", KindToString(right_kind_));
-  }
-  stream->Add("->%s)", KindToString(result_kind_));
-}
-
-
-void BinaryOpIC::State::Update(Handle<Object> left,
-                               Handle<Object> right,
-                               Handle<Object> result) {
-  ExtraICState old_extra_ic_state = GetExtraICState();
-
-  left_kind_ = UpdateKind(left, left_kind_);
-  right_kind_ = UpdateKind(right, right_kind_);
-
-  int32_t fixed_right_arg_value = 0;
-  bool has_fixed_right_arg =
-      op_ == Token::MOD &&
-      right->ToInt32(&fixed_right_arg_value) &&
-      fixed_right_arg_value > 0 &&
-      IsPowerOf2(fixed_right_arg_value) &&
-      FixedRightArgValueField::is_valid(WhichPowerOf2(fixed_right_arg_value)) &&
-      (left_kind_ == SMI || left_kind_ == INT32) &&
-      (result_kind_ == NONE || !fixed_right_arg_.has_value);
-  fixed_right_arg_ = Maybe<int32_t>(has_fixed_right_arg,
-                                    fixed_right_arg_value);
-
-  result_kind_ = UpdateKind(result, result_kind_);
-
-  if (!Token::IsTruncatingBinaryOp(op_)) {
-    Kind input_kind = Max(left_kind_, right_kind_);
-    if (result_kind_ < input_kind && input_kind <= NUMBER) {
-      result_kind_ = input_kind;
-    }
-  }
-
-  // We don't want to distinguish INT32 and NUMBER for string add (because
-  // NumberToString can't make use of this anyway).
-  if (left_kind_ == STRING && right_kind_ == INT32) {
-    ASSERT_EQ(STRING, result_kind_);
-    ASSERT_EQ(Token::ADD, op_);
-    right_kind_ = NUMBER;
-  } else if (right_kind_ == STRING && left_kind_ == INT32) {
-    ASSERT_EQ(STRING, result_kind_);
-    ASSERT_EQ(Token::ADD, op_);
-    left_kind_ = NUMBER;
-  }
-
-  // Reset overwrite mode unless we can actually make use of it, or may be able
-  // to make use of it at some point in the future.
-  if ((mode_ == OVERWRITE_LEFT && left_kind_ > NUMBER) ||
-      (mode_ == OVERWRITE_RIGHT && right_kind_ > NUMBER) ||
-      result_kind_ > NUMBER) {
-    mode_ = NO_OVERWRITE;
-  }
-
-  if (old_extra_ic_state == GetExtraICState()) {
-    // Tagged operations can lead to non-truncating HChanges
-    if (left->IsUndefined() || left->IsBoolean()) {
-      left_kind_ = GENERIC;
-    } else {
-      ASSERT(right->IsUndefined() || right->IsBoolean());
-      right_kind_ = GENERIC;
-    }
-  }
-}
-
-
-BinaryOpIC::State::Kind BinaryOpIC::State::UpdateKind(Handle<Object> object,
-                                                      Kind kind) const {
-  Kind new_kind = GENERIC;
-  bool is_truncating = Token::IsTruncatingBinaryOp(op());
-  if (object->IsBoolean() && is_truncating) {
-    // Booleans will be automatically truncated by HChange.
-    new_kind = INT32;
-  } else if (object->IsUndefined()) {
-    // Undefined will be automatically truncated by HChange.
-    new_kind = is_truncating ? INT32 : NUMBER;
-  } else if (object->IsSmi()) {
-    new_kind = SMI;
-  } else if (object->IsHeapNumber()) {
-    double value = Handle<HeapNumber>::cast(object)->value();
-    new_kind = IsInt32Double(value) ? INT32 : NUMBER;
-  } else if (object->IsString() && op() == Token::ADD) {
-    new_kind = STRING;
-  }
-  if (new_kind == INT32 && SmiValuesAre32Bits()) {
-    new_kind = NUMBER;
-  }
-  if (kind != NONE &&
-      ((new_kind <= NUMBER && kind > NUMBER) ||
-       (new_kind > NUMBER && kind <= NUMBER))) {
-    new_kind = GENERIC;
-  }
-  return Max(kind, new_kind);
-}
-
-
-// static
-const char* BinaryOpIC::State::KindToString(Kind kind) {
-  switch (kind) {
-    case NONE: return "None";
-    case SMI: return "Smi";
-    case INT32: return "Int32";
-    case NUMBER: return "Number";
-    case STRING: return "String";
-    case GENERIC: return "Generic";
-  }
-  UNREACHABLE();
-  return NULL;
-}
-
-
-// static
-Type* BinaryOpIC::State::KindToType(Kind kind, Zone* zone) {
-  switch (kind) {
-    case NONE: return Type::None(zone);
-    case SMI: return Type::SignedSmall(zone);
-    case INT32: return Type::Signed32(zone);
-    case NUMBER: return Type::Number(zone);
-    case STRING: return Type::String(zone);
-    case GENERIC: return Type::Any(zone);
-  }
-  UNREACHABLE();
-  return NULL;
-}
-
-
-MaybeHandle<Object> BinaryOpIC::Transition(
-    Handle<AllocationSite> allocation_site,
-    Handle<Object> left,
-    Handle<Object> right) {
-  State state(isolate(), target()->extra_ic_state());
-
-  // Compute the actual result using the builtin for the binary operation.
-  Object* builtin = isolate()->js_builtins_object()->javascript_builtin(
-      TokenToJSBuiltin(state.op()));
-  Handle<JSFunction> function = handle(JSFunction::cast(builtin), isolate());
-  Handle<Object> result;
-  ASSIGN_RETURN_ON_EXCEPTION(
-      isolate(),
-      result,
-      Execution::Call(isolate(), function, left, 1, &right),
-      Object);
-
-  // Execution::Call can execute arbitrary JavaScript, hence potentially
-  // update the state of this very IC, so we must update the stored state.
-  UpdateTarget();
-  // Compute the new state.
-  State old_state(isolate(), target()->extra_ic_state());
-  state.Update(left, right, result);
-
-  // Check if we have a string operation here.
-  Handle<Code> target;
-  if (!allocation_site.is_null() || state.ShouldCreateAllocationMementos()) {
-    // Setup the allocation site on-demand.
-    if (allocation_site.is_null()) {
-      allocation_site = isolate()->factory()->NewAllocationSite();
-    }
-
-    // Install the stub with an allocation site.
-    BinaryOpICWithAllocationSiteStub stub(isolate(), state);
-    target = stub.GetCodeCopyFromTemplate(allocation_site);
-
-    // Sanity check the trampoline stub.
-    ASSERT_EQ(*allocation_site, target->FindFirstAllocationSite());
-  } else {
-    // Install the generic stub.
-    BinaryOpICStub stub(isolate(), state);
-    target = stub.GetCode();
-
-    // Sanity check the generic stub.
-    ASSERT_EQ(NULL, target->FindFirstAllocationSite());
-  }
-  set_target(*target);
-
-  if (FLAG_trace_ic) {
-    char buffer[150];
-    NoAllocationStringAllocator allocator(
-        buffer, static_cast<unsigned>(sizeof(buffer)));
-    StringStream stream(&allocator);
-    stream.Add("[BinaryOpIC");
-    old_state.Print(&stream);
-    stream.Add(" => ");
-    state.Print(&stream);
-    stream.Add(" @ %p <- ", static_cast<void*>(*target));
-    stream.OutputToStdOut();
-    JavaScriptFrame::PrintTop(isolate(), stdout, false, true);
-    if (!allocation_site.is_null()) {
-      PrintF(" using allocation site %p", static_cast<void*>(*allocation_site));
-    }
-    PrintF("]\n");
-  }
-
-  // Patch the inlined smi code as necessary.
-  if (!old_state.UseInlinedSmiCode() && state.UseInlinedSmiCode()) {
-    PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
-  } else if (old_state.UseInlinedSmiCode() && !state.UseInlinedSmiCode()) {
-    PatchInlinedSmiCode(address(), DISABLE_INLINED_SMI_CHECK);
-  }
-
-  return result;
-}
-
-
-RUNTIME_FUNCTION(BinaryOpIC_Miss) {
-  Logger::TimerEventScope timer(
-      isolate, Logger::TimerEventScope::v8_ic_miss);
-  HandleScope scope(isolate);
-  ASSERT_EQ(2, args.length());
-  Handle<Object> left = args.at<Object>(BinaryOpICStub::kLeft);
-  Handle<Object> right = args.at<Object>(BinaryOpICStub::kRight);
-  BinaryOpIC ic(isolate);
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate,
-      result,
-      ic.Transition(Handle<AllocationSite>::null(), left, right));
-  return *result;
-}
-
-
-RUNTIME_FUNCTION(BinaryOpIC_MissWithAllocationSite) {
-  Logger::TimerEventScope timer(
-      isolate, Logger::TimerEventScope::v8_ic_miss);
-  HandleScope scope(isolate);
-  ASSERT_EQ(3, args.length());
-  Handle<AllocationSite> allocation_site = args.at<AllocationSite>(
-      BinaryOpWithAllocationSiteStub::kAllocationSite);
-  Handle<Object> left = args.at<Object>(
-      BinaryOpWithAllocationSiteStub::kLeft);
-  Handle<Object> right = args.at<Object>(
-      BinaryOpWithAllocationSiteStub::kRight);
-  BinaryOpIC ic(isolate);
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate,
-      result,
-      ic.Transition(allocation_site, left, right));
-  return *result;
-}
-
-
-Code* CompareIC::GetRawUninitialized(Isolate* isolate, Token::Value op) {
-  ICCompareStub stub(isolate, op, UNINITIALIZED, UNINITIALIZED, UNINITIALIZED);
-  Code* code = NULL;
-  CHECK(stub.FindCodeInCache(&code));
-  return code;
-}
-
-
-Handle<Code> CompareIC::GetUninitialized(Isolate* isolate, Token::Value op) {
-  ICCompareStub stub(isolate, op, UNINITIALIZED, UNINITIALIZED, UNINITIALIZED);
-  return stub.GetCode();
-}
-
-
-const char* CompareIC::GetStateName(State state) {
-  switch (state) {
-    case UNINITIALIZED: return "UNINITIALIZED";
-    case SMI: return "SMI";
-    case NUMBER: return "NUMBER";
-    case INTERNALIZED_STRING: return "INTERNALIZED_STRING";
-    case STRING: return "STRING";
-    case UNIQUE_NAME: return "UNIQUE_NAME";
-    case OBJECT: return "OBJECT";
-    case KNOWN_OBJECT: return "KNOWN_OBJECT";
-    case GENERIC: return "GENERIC";
-  }
-  UNREACHABLE();
-  return NULL;
-}
-
-
-Type* CompareIC::StateToType(
-    Zone* zone,
-    CompareIC::State state,
-    Handle<Map> map) {
-  switch (state) {
-    case CompareIC::UNINITIALIZED: return Type::None(zone);
-    case CompareIC::SMI: return Type::SignedSmall(zone);
-    case CompareIC::NUMBER: return Type::Number(zone);
-    case CompareIC::STRING: return Type::String(zone);
-    case CompareIC::INTERNALIZED_STRING: return Type::InternalizedString(zone);
-    case CompareIC::UNIQUE_NAME: return Type::UniqueName(zone);
-    case CompareIC::OBJECT: return Type::Receiver(zone);
-    case CompareIC::KNOWN_OBJECT:
-      return map.is_null() ? Type::Receiver(zone) : Type::Class(map, zone);
-    case CompareIC::GENERIC: return Type::Any(zone);
-  }
-  UNREACHABLE();
-  return NULL;
-}
-
-
-void CompareIC::StubInfoToType(int stub_minor_key,
-                               Type** left_type,
-                               Type** right_type,
-                               Type** overall_type,
-                               Handle<Map> map,
-                               Zone* zone) {
-  State left_state, right_state, handler_state;
-  ICCompareStub::DecodeMinorKey(stub_minor_key, &left_state, &right_state,
-                                &handler_state, NULL);
-  *left_type = StateToType(zone, left_state);
-  *right_type = StateToType(zone, right_state);
-  *overall_type = StateToType(zone, handler_state, map);
-}
-
-
-CompareIC::State CompareIC::NewInputState(State old_state,
-                                          Handle<Object> value) {
-  switch (old_state) {
-    case UNINITIALIZED:
-      if (value->IsSmi()) return SMI;
-      if (value->IsHeapNumber()) return NUMBER;
-      if (value->IsInternalizedString()) return INTERNALIZED_STRING;
-      if (value->IsString()) return STRING;
-      if (value->IsSymbol()) return UNIQUE_NAME;
-      if (value->IsJSObject()) return OBJECT;
-      break;
-    case SMI:
-      if (value->IsSmi()) return SMI;
-      if (value->IsHeapNumber()) return NUMBER;
-      break;
-    case NUMBER:
-      if (value->IsNumber()) return NUMBER;
-      break;
-    case INTERNALIZED_STRING:
-      if (value->IsInternalizedString()) return INTERNALIZED_STRING;
-      if (value->IsString()) return STRING;
-      if (value->IsSymbol()) return UNIQUE_NAME;
-      break;
-    case STRING:
-      if (value->IsString()) return STRING;
-      break;
-    case UNIQUE_NAME:
-      if (value->IsUniqueName()) return UNIQUE_NAME;
-      break;
-    case OBJECT:
-      if (value->IsJSObject()) return OBJECT;
-      break;
-    case GENERIC:
-      break;
-    case KNOWN_OBJECT:
-      UNREACHABLE();
-      break;
-  }
-  return GENERIC;
-}
-
-
-CompareIC::State CompareIC::TargetState(State old_state,
-                                        State old_left,
-                                        State old_right,
-                                        bool has_inlined_smi_code,
-                                        Handle<Object> x,
-                                        Handle<Object> y) {
-  switch (old_state) {
-    case UNINITIALIZED:
-      if (x->IsSmi() && y->IsSmi()) return SMI;
-      if (x->IsNumber() && y->IsNumber()) return NUMBER;
-      if (Token::IsOrderedRelationalCompareOp(op_)) {
-        // Ordered comparisons treat undefined as NaN, so the
-        // NUMBER stub will do the right thing.
-        if ((x->IsNumber() && y->IsUndefined()) ||
-            (y->IsNumber() && x->IsUndefined())) {
-          return NUMBER;
-        }
-      }
-      if (x->IsInternalizedString() && y->IsInternalizedString()) {
-        // We compare internalized strings as plain ones if we need to determine
-        // the order in a non-equality compare.
-        return Token::IsEqualityOp(op_) ? INTERNALIZED_STRING : STRING;
-      }
-      if (x->IsString() && y->IsString()) return STRING;
-      if (!Token::IsEqualityOp(op_)) return GENERIC;
-      if (x->IsUniqueName() && y->IsUniqueName()) return UNIQUE_NAME;
-      if (x->IsJSObject() && y->IsJSObject()) {
-        if (Handle<JSObject>::cast(x)->map() ==
-            Handle<JSObject>::cast(y)->map()) {
-          return KNOWN_OBJECT;
-        } else {
-          return OBJECT;
-        }
-      }
-      return GENERIC;
-    case SMI:
-      return x->IsNumber() && y->IsNumber() ? NUMBER : GENERIC;
-    case INTERNALIZED_STRING:
-      ASSERT(Token::IsEqualityOp(op_));
-      if (x->IsString() && y->IsString()) return STRING;
-      if (x->IsUniqueName() && y->IsUniqueName()) return UNIQUE_NAME;
-      return GENERIC;
-    case NUMBER:
-      // If the failure was due to one side changing from smi to heap number,
-      // then keep the state (if other changed at the same time, we will get
-      // a second miss and then go to generic).
-      if (old_left == SMI && x->IsHeapNumber()) return NUMBER;
-      if (old_right == SMI && y->IsHeapNumber()) return NUMBER;
-      return GENERIC;
-    case KNOWN_OBJECT:
-      ASSERT(Token::IsEqualityOp(op_));
-      if (x->IsJSObject() && y->IsJSObject()) return OBJECT;
-      return GENERIC;
-    case STRING:
-    case UNIQUE_NAME:
-    case OBJECT:
-    case GENERIC:
-      return GENERIC;
-  }
-  UNREACHABLE();
-  return GENERIC;  // Make the compiler happy.
-}
-
-
-Code* CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
-  HandleScope scope(isolate());
-  State previous_left, previous_right, previous_state;
-  ICCompareStub::DecodeMinorKey(target()->stub_info(), &previous_left,
-                                &previous_right, &previous_state, NULL);
-  State new_left = NewInputState(previous_left, x);
-  State new_right = NewInputState(previous_right, y);
-  State state = TargetState(previous_state, previous_left, previous_right,
-                            HasInlinedSmiCode(address()), x, y);
-  ICCompareStub stub(isolate(), op_, new_left, new_right, state);
-  if (state == KNOWN_OBJECT) {
-    stub.set_known_map(
-        Handle<Map>(Handle<JSObject>::cast(x)->map(), isolate()));
-  }
-  Handle<Code> new_target = stub.GetCode();
-  set_target(*new_target);
-
-  if (FLAG_trace_ic) {
-    PrintF("[CompareIC in ");
-    JavaScriptFrame::PrintTop(isolate(), stdout, false, true);
-    PrintF(" ((%s+%s=%s)->(%s+%s=%s))#%s @ %p]\n",
-           GetStateName(previous_left),
-           GetStateName(previous_right),
-           GetStateName(previous_state),
-           GetStateName(new_left),
-           GetStateName(new_right),
-           GetStateName(state),
-           Token::Name(op_),
-           static_cast<void*>(*stub.GetCode()));
-  }
-
-  // Activate inlined smi code.
-  if (previous_state == UNINITIALIZED) {
-    PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
-  }
-
-  return *new_target;
-}
-
-
-// Used from ICCompareStub::GenerateMiss in code-stubs-<arch>.cc.
-RUNTIME_FUNCTION(CompareIC_Miss) {
-  Logger::TimerEventScope timer(
-      isolate, Logger::TimerEventScope::v8_ic_miss);
-  HandleScope scope(isolate);
-  ASSERT(args.length() == 3);
-  CompareIC ic(isolate, static_cast<Token::Value>(args.smi_at(2)));
-  return ic.UpdateCaches(args.at<Object>(0), args.at<Object>(1));
-}
-
-
-void CompareNilIC::Clear(Address address,
-                         Code* target,
-                         ConstantPoolArray* constant_pool) {
-  if (IsCleared(target)) return;
-  ExtraICState state = target->extra_ic_state();
-
-  CompareNilICStub stub(target->GetIsolate(),
-                        state,
-                        HydrogenCodeStub::UNINITIALIZED);
-  stub.ClearState();
-
-  Code* code = NULL;
-  CHECK(stub.FindCodeInCache(&code));
-
-  SetTargetAtAddress(address, code, constant_pool);
-}
-
-
-Handle<Object> CompareNilIC::DoCompareNilSlow(Isolate* isolate,
-                                              NilValue nil,
-                                              Handle<Object> object) {
-  if (object->IsNull() || object->IsUndefined()) {
-    return handle(Smi::FromInt(true), isolate);
-  }
-  return handle(Smi::FromInt(object->IsUndetectableObject()), isolate);
-}
-
-
-Handle<Object> CompareNilIC::CompareNil(Handle<Object> object) {
-  ExtraICState extra_ic_state = target()->extra_ic_state();
-
-  CompareNilICStub stub(isolate(), extra_ic_state);
-
-  // Extract the current supported types from the patched IC and calculate what
-  // types must be supported as a result of the miss.
-  bool already_monomorphic = stub.IsMonomorphic();
-
-  stub.UpdateStatus(object);
-
-  NilValue nil = stub.GetNilValue();
-
-  // Find or create the specialized stub to support the new set of types.
-  Handle<Code> code;
-  if (stub.IsMonomorphic()) {
-    Handle<Map> monomorphic_map(already_monomorphic && FirstTargetMap() != NULL
-                                ? FirstTargetMap()
-                                : HeapObject::cast(*object)->map());
-    code = isolate()->stub_cache()->ComputeCompareNil(monomorphic_map, &stub);
-  } else {
-    code = stub.GetCode();
-  }
-  set_target(*code);
-  return DoCompareNilSlow(isolate(), nil, object);
-}
-
-
-RUNTIME_FUNCTION(CompareNilIC_Miss) {
-  Logger::TimerEventScope timer(
-      isolate, Logger::TimerEventScope::v8_ic_miss);
-  HandleScope scope(isolate);
-  Handle<Object> object = args.at<Object>(0);
-  CompareNilIC ic(isolate);
-  return *ic.CompareNil(object);
-}
-
-
-RUNTIME_FUNCTION(Unreachable) {
-  UNREACHABLE();
-  CHECK(false);
-  return isolate->heap()->undefined_value();
-}
-
-
-Builtins::JavaScript BinaryOpIC::TokenToJSBuiltin(Token::Value op) {
-  switch (op) {
-    default:
-      UNREACHABLE();
-    case Token::ADD:
-      return Builtins::ADD;
-      break;
-    case Token::SUB:
-      return Builtins::SUB;
-      break;
-    case Token::MUL:
-      return Builtins::MUL;
-      break;
-    case Token::DIV:
-      return Builtins::DIV;
-      break;
-    case Token::MOD:
-      return Builtins::MOD;
-      break;
-    case Token::BIT_OR:
-      return Builtins::BIT_OR;
-      break;
-    case Token::BIT_AND:
-      return Builtins::BIT_AND;
-      break;
-    case Token::BIT_XOR:
-      return Builtins::BIT_XOR;
-      break;
-    case Token::SAR:
-      return Builtins::SAR;
-      break;
-    case Token::SHR:
-      return Builtins::SHR;
-      break;
-    case Token::SHL:
-      return Builtins::SHL;
-      break;
-  }
-}
-
-
-Handle<Object> ToBooleanIC::ToBoolean(Handle<Object> object) {
-  ToBooleanStub stub(isolate(), target()->extra_ic_state());
-  bool to_boolean_value = stub.UpdateStatus(object);
-  Handle<Code> code = stub.GetCode();
-  set_target(*code);
-  return handle(Smi::FromInt(to_boolean_value ? 1 : 0), isolate());
-}
-
-
-RUNTIME_FUNCTION(ToBooleanIC_Miss) {
-  Logger::TimerEventScope timer(
-      isolate, Logger::TimerEventScope::v8_ic_miss);
-  ASSERT(args.length() == 1);
-  HandleScope scope(isolate);
-  Handle<Object> object = args.at<Object>(0);
-  ToBooleanIC ic(isolate);
-  return *ic.ToBoolean(object);
-}
-
-
-static const Address IC_utilities[] = {
-#define ADDR(name) FUNCTION_ADDR(name),
-    IC_UTIL_LIST(ADDR)
-    NULL
-#undef ADDR
-};
-
-
-Address IC::AddressFromUtilityId(IC::UtilityId id) {
-  return IC_utilities[id];
-}
-
-
-} }  // namespace v8::internal
diff --git a/src/ic.h b/src/ic.h
deleted file mode 100644
index 3f55043..0000000
--- a/src/ic.h
+++ /dev/null
@@ -1,1023 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_IC_H_
-#define V8_IC_H_
-
-#include "src/macro-assembler.h"
-
-namespace v8 {
-namespace internal {
-
-
-const int kMaxKeyedPolymorphism = 4;
-
-
-// IC_UTIL_LIST defines all utility functions called from generated
-// inline caching code. The argument for the macro, ICU, is the function name.
-#define IC_UTIL_LIST(ICU)                             \
-  ICU(LoadIC_Miss)                                    \
-  ICU(KeyedLoadIC_Miss)                               \
-  ICU(CallIC_Miss)                                    \
-  ICU(CallIC_Customization_Miss)                      \
-  ICU(StoreIC_Miss)                                   \
-  ICU(StoreIC_ArrayLength)                            \
-  ICU(StoreIC_Slow)                                   \
-  ICU(SharedStoreIC_ExtendStorage)                    \
-  ICU(KeyedStoreIC_Miss)                              \
-  ICU(KeyedStoreIC_Slow)                              \
-  /* Utilities for IC stubs. */                       \
-  ICU(StoreCallbackProperty)                          \
-  ICU(LoadPropertyWithInterceptorOnly)                \
-  ICU(LoadPropertyWithInterceptor)                    \
-  ICU(KeyedLoadPropertyWithInterceptor)               \
-  ICU(StoreInterceptorProperty)                       \
-  ICU(CompareIC_Miss)                                 \
-  ICU(BinaryOpIC_Miss)                                \
-  ICU(CompareNilIC_Miss)                              \
-  ICU(Unreachable)                                    \
-  ICU(ToBooleanIC_Miss)
-//
-// IC is the base class for LoadIC, StoreIC, KeyedLoadIC, and KeyedStoreIC.
-//
-class IC {
- public:
-  // The ids for utility called from the generated code.
-  enum UtilityId {
-  #define CONST_NAME(name) k##name,
-    IC_UTIL_LIST(CONST_NAME)
-  #undef CONST_NAME
-    kUtilityCount
-  };
-
-  // Looks up the address of the named utility.
-  static Address AddressFromUtilityId(UtilityId id);
-
-  // Alias the inline cache state type to make the IC code more readable.
-  typedef InlineCacheState State;
-
-  // The IC code is either invoked with no extra frames on the stack
-  // or with a single extra frame for supporting calls.
-  enum FrameDepth {
-    NO_EXTRA_FRAME = 0,
-    EXTRA_CALL_FRAME = 1
-  };
-
-  // Construct the IC structure with the given number of extra
-  // JavaScript frames on the stack.
-  IC(FrameDepth depth, Isolate* isolate);
-  virtual ~IC() {}
-
-  State state() const { return state_; }
-  inline Address address() const;
-
-  // Compute the current IC state based on the target stub, receiver and name.
-  void UpdateState(Handle<Object> receiver, Handle<Object> name);
-
-  bool IsNameCompatibleWithMonomorphicPrototypeFailure(Handle<Object> name);
-  bool TryMarkMonomorphicPrototypeFailure(Handle<Object> name) {
-    if (IsNameCompatibleWithMonomorphicPrototypeFailure(name)) {
-      state_ = MONOMORPHIC_PROTOTYPE_FAILURE;
-      return true;
-    }
-    return false;
-  }
-
-  // If the stub contains weak maps then this function adds the stub to
-  // the dependent code array of each weak map.
-  static void RegisterWeakMapDependency(Handle<Code> stub);
-
-  // This function is called when a weak map in the stub is dying,
-  // invalidates the stub by setting maps in it to undefined.
-  static void InvalidateMaps(Code* stub);
-
-  // Clear the inline cache to initial state.
-  static void Clear(Isolate* isolate,
-                    Address address,
-                    ConstantPoolArray* constant_pool);
-
-#ifdef DEBUG
-  bool IsLoadStub() const {
-    return target()->is_load_stub() || target()->is_keyed_load_stub();
-  }
-
-  bool IsStoreStub() const {
-    return target()->is_store_stub() || target()->is_keyed_store_stub();
-  }
-
-  bool IsCallStub() const {
-    return target()->is_call_stub();
-  }
-#endif
-
-  // Determines which map must be used for keeping the code stub.
-  // These methods should not be called with undefined or null.
-  static inline InlineCacheHolderFlag GetCodeCacheForObject(Object* object);
-  // TODO(verwaest): This currently returns a HeapObject rather than JSObject*
-  // since loading the IC for loading the length from strings are stored on
-  // the string map directly, rather than on the JSObject-typed prototype.
-  static inline HeapObject* GetCodeCacheHolder(Isolate* isolate,
-                                               Object* object,
-                                               InlineCacheHolderFlag holder);
-
-  static inline InlineCacheHolderFlag GetCodeCacheFlag(HeapType* type);
-  static inline Handle<Map> GetCodeCacheHolder(InlineCacheHolderFlag flag,
-                                               HeapType* type,
-                                               Isolate* isolate);
-
-  static bool IsCleared(Code* code) {
-    InlineCacheState state = code->ic_state();
-    return state == UNINITIALIZED || state == PREMONOMORPHIC;
-  }
-
-  // Utility functions to convert maps to types and back. There are two special
-  // cases:
-  // - The heap_number_map is used as a marker which includes heap numbers as
-  //   well as smis.
-  // - The oddball map is only used for booleans.
-  static Handle<Map> TypeToMap(HeapType* type, Isolate* isolate);
-  template <class T>
-  static typename T::TypeHandle MapToType(Handle<Map> map,
-                                          typename T::Region* region);
-
-  static Handle<HeapType> CurrentTypeOf(Handle<Object> object,
-                                        Isolate* isolate);
-
- protected:
-  // Get the call-site target; used for determining the state.
-  Handle<Code> target() const { return target_; }
-
-  Address fp() const { return fp_; }
-  Address pc() const { return *pc_address_; }
-  Isolate* isolate() const { return isolate_; }
-
-  // Get the shared function info of the caller.
-  SharedFunctionInfo* GetSharedFunctionInfo() const;
-  // Get the code object of the caller.
-  Code* GetCode() const;
-  // Get the original (non-breakpointed) code object of the caller.
-  Code* GetOriginalCode() const;
-
-  // Set the call-site target.
-  void set_target(Code* code) {
-#ifdef VERIFY_HEAP
-    code->VerifyEmbeddedObjectsDependency();
-#endif
-    SetTargetAtAddress(address(), code, constant_pool());
-    target_set_ = true;
-  }
-
-  bool is_target_set() { return target_set_; }
-
-#ifdef DEBUG
-  char TransitionMarkFromState(IC::State state);
-
-  void TraceIC(const char* type, Handle<Object> name);
-#endif
-
-  MaybeHandle<Object> TypeError(const char* type,
-                                Handle<Object> object,
-                                Handle<Object> key);
-  MaybeHandle<Object> ReferenceError(const char* type, Handle<String> name);
-
-  // Access the target code for the given IC address.
-  static inline Code* GetTargetAtAddress(Address address,
-                                         ConstantPoolArray* constant_pool);
-  static inline void SetTargetAtAddress(Address address,
-                                        Code* target,
-                                        ConstantPoolArray* constant_pool);
-  static void PostPatching(Address address, Code* target, Code* old_target);
-
-  // Compute the handler either by compiling or by retrieving a cached version.
-  Handle<Code> ComputeHandler(LookupResult* lookup,
-                              Handle<Object> object,
-                              Handle<String> name,
-                              Handle<Object> value = Handle<Code>::null());
-  virtual Handle<Code> CompileHandler(LookupResult* lookup,
-                                      Handle<Object> object,
-                                      Handle<String> name,
-                                      Handle<Object> value,
-                                      InlineCacheHolderFlag cache_holder) {
-    UNREACHABLE();
-    return Handle<Code>::null();
-  }
-
-  void UpdateMonomorphicIC(Handle<HeapType> type,
-                           Handle<Code> handler,
-                           Handle<String> name);
-
-  bool UpdatePolymorphicIC(Handle<HeapType> type,
-                           Handle<String> name,
-                           Handle<Code> code);
-
-  virtual void UpdateMegamorphicCache(HeapType* type, Name* name, Code* code);
-
-  void CopyICToMegamorphicCache(Handle<String> name);
-  bool IsTransitionOfMonomorphicTarget(Map* source_map, Map* target_map);
-  void PatchCache(Handle<HeapType> type,
-                  Handle<String> name,
-                  Handle<Code> code);
-  virtual Code::Kind kind() const {
-    UNREACHABLE();
-    return Code::STUB;
-  }
-  virtual Handle<Code> slow_stub() const {
-    UNREACHABLE();
-    return Handle<Code>::null();
-  }
-  virtual Handle<Code> megamorphic_stub() {
-    UNREACHABLE();
-    return Handle<Code>::null();
-  }
-  virtual Handle<Code> generic_stub() const {
-    UNREACHABLE();
-    return Handle<Code>::null();
-  }
-
-  bool TryRemoveInvalidPrototypeDependentStub(Handle<Object> receiver,
-                                              Handle<String> name);
-  void TryRemoveInvalidHandlers(Handle<Map> map, Handle<String> name);
-
-  ExtraICState extra_ic_state() const { return extra_ic_state_; }
-  void set_extra_ic_state(ExtraICState state) {
-    extra_ic_state_ = state;
-  }
-
-  void TargetMaps(MapHandleList* list) {
-    FindTargetMaps();
-    for (int i = 0; i < target_maps_.length(); i++) {
-      list->Add(target_maps_.at(i));
-    }
-  }
-
-  void TargetTypes(TypeHandleList* list) {
-    FindTargetMaps();
-    for (int i = 0; i < target_maps_.length(); i++) {
-      list->Add(IC::MapToType<HeapType>(target_maps_.at(i), isolate_));
-    }
-  }
-
-  Map* FirstTargetMap() {
-    FindTargetMaps();
-    return target_maps_.length() > 0 ? *target_maps_.at(0) : NULL;
-  }
-
- protected:
-  void UpdateTarget() {
-    target_ = handle(raw_target(), isolate_);
-  }
-
- private:
-  Code* raw_target() const {
-    return GetTargetAtAddress(address(), constant_pool());
-  }
-  inline ConstantPoolArray* constant_pool() const;
-  inline ConstantPoolArray* raw_constant_pool() const;
-
-  void FindTargetMaps() {
-    if (target_maps_set_) return;
-    target_maps_set_ = true;
-    if (state_ == MONOMORPHIC) {
-      Map* map = target_->FindFirstMap();
-      if (map != NULL) target_maps_.Add(handle(map));
-    } else if (state_ != UNINITIALIZED && state_ != PREMONOMORPHIC) {
-      target_->FindAllMaps(&target_maps_);
-    }
-  }
-
-  // Frame pointer for the frame that uses (calls) the IC.
-  Address fp_;
-
-  // All access to the program counter of an IC structure is indirect
-  // to make the code GC safe. This feature is crucial since
-  // GetProperty and SetProperty are called and they in turn might
-  // invoke the garbage collector.
-  Address* pc_address_;
-
-  Isolate* isolate_;
-
-  // The constant pool of the code which originally called the IC (which might
-  // be for the breakpointed copy of the original code).
-  Handle<ConstantPoolArray> raw_constant_pool_;
-
-  // The original code target that missed.
-  Handle<Code> target_;
-  State state_;
-  bool target_set_;
-
-  ExtraICState extra_ic_state_;
-  MapHandleList target_maps_;
-  bool target_maps_set_;
-
-  DISALLOW_IMPLICIT_CONSTRUCTORS(IC);
-};
-
-
-// An IC_Utility encapsulates IC::UtilityId. It exists mainly because you
-// cannot make forward declarations to an enum.
-class IC_Utility {
- public:
-  explicit IC_Utility(IC::UtilityId id)
-    : address_(IC::AddressFromUtilityId(id)), id_(id) {}
-
-  Address address() const { return address_; }
-
-  IC::UtilityId id() const { return id_; }
- private:
-  Address address_;
-  IC::UtilityId id_;
-};
-
-
-class CallIC: public IC {
- public:
-  enum CallType { METHOD, FUNCTION };
-
-  class State V8_FINAL BASE_EMBEDDED {
-   public:
-    explicit State(ExtraICState extra_ic_state);
-
-    State(int argc, CallType call_type)
-        : argc_(argc), call_type_(call_type) {
-    }
-
-    InlineCacheState GetICState() const { return ::v8::internal::GENERIC; }
-
-    ExtraICState GetExtraICState() const;
-
-    static void GenerateAheadOfTime(
-        Isolate*, void (*Generate)(Isolate*, const State&));
-
-    int arg_count() const { return argc_; }
-    CallType call_type() const { return call_type_; }
-
-    bool CallAsMethod() const { return call_type_ == METHOD; }
-
-    void Print(StringStream* stream) const;
-
-   private:
-    class ArgcBits: public BitField<int, 0, Code::kArgumentsBits> {};
-    class CallTypeBits: public BitField<CallType, Code::kArgumentsBits, 1> {};
-
-    const int argc_;
-    const CallType call_type_;
-  };
-
-  explicit CallIC(Isolate* isolate)
-      : IC(EXTRA_CALL_FRAME, isolate) {
-  }
-
-  void PatchMegamorphic(Handle<FixedArray> vector, Handle<Smi> slot);
-
-  void HandleMiss(Handle<Object> receiver,
-                  Handle<Object> function,
-                  Handle<FixedArray> vector,
-                  Handle<Smi> slot);
-
-  // Returns true if a custom handler was installed.
-  bool DoCustomHandler(Handle<Object> receiver,
-                       Handle<Object> function,
-                       Handle<FixedArray> vector,
-                       Handle<Smi> slot,
-                       const State& state);
-
-  // Code generator routines.
-  static Handle<Code> initialize_stub(Isolate* isolate,
-                                      int argc,
-                                      CallType call_type);
-
-  static void Clear(Isolate* isolate, Address address, Code* target,
-                    ConstantPoolArray* constant_pool);
-};
-
-
-class LoadIC: public IC {
- public:
-  // ExtraICState bits
-  class ContextualModeBits: public BitField<ContextualMode, 0, 1> {};
-  STATIC_ASSERT(static_cast<int>(NOT_CONTEXTUAL) == 0);
-
-  static ExtraICState ComputeExtraICState(ContextualMode contextual_mode) {
-    return ContextualModeBits::encode(contextual_mode);
-  }
-
-  static ContextualMode GetContextualMode(ExtraICState state) {
-    return ContextualModeBits::decode(state);
-  }
-
-  ContextualMode contextual_mode() const {
-    return ContextualModeBits::decode(extra_ic_state());
-  }
-
-  explicit LoadIC(FrameDepth depth, Isolate* isolate)
-      : IC(depth, isolate) {
-    ASSERT(IsLoadStub());
-  }
-
-  // Returns if this IC is for contextual (no explicit receiver)
-  // access to properties.
-  bool IsUndeclaredGlobal(Handle<Object> receiver) {
-    if (receiver->IsGlobalObject()) {
-      return contextual_mode() == CONTEXTUAL;
-    } else {
-      ASSERT(contextual_mode() != CONTEXTUAL);
-      return false;
-    }
-  }
-
-  // Code generator routines.
-  static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
-  static void GeneratePreMonomorphic(MacroAssembler* masm) {
-    GenerateMiss(masm);
-  }
-  static void GenerateMiss(MacroAssembler* masm);
-  static void GenerateMegamorphic(MacroAssembler* masm);
-  static void GenerateNormal(MacroAssembler* masm);
-  static void GenerateRuntimeGetProperty(MacroAssembler* masm);
-
-  static Handle<Code> initialize_stub(Isolate* isolate,
-                                      ExtraICState extra_state);
-
-  MUST_USE_RESULT MaybeHandle<Object> Load(Handle<Object> object,
-                                           Handle<String> name);
-
- protected:
-  virtual Code::Kind kind() const { return Code::LOAD_IC; }
-
-  void set_target(Code* code) {
-    // The contextual mode must be preserved across IC patching.
-    ASSERT(GetContextualMode(code->extra_ic_state()) ==
-           GetContextualMode(target()->extra_ic_state()));
-
-    IC::set_target(code);
-  }
-
-  virtual Handle<Code> slow_stub() const {
-    return isolate()->builtins()->LoadIC_Slow();
-  }
-
-  virtual Handle<Code> megamorphic_stub();
-
-  // Update the inline cache and the global stub cache based on the
-  // lookup result.
-  void UpdateCaches(LookupResult* lookup,
-                    Handle<Object> object,
-                    Handle<String> name);
-
-  virtual Handle<Code> CompileHandler(LookupResult* lookup,
-                                      Handle<Object> object,
-                                      Handle<String> name,
-                                      Handle<Object> unused,
-                                      InlineCacheHolderFlag cache_holder);
-
- private:
-  // Stub accessors.
-  static Handle<Code> pre_monomorphic_stub(Isolate* isolate,
-                                           ExtraICState exstra_state);
-
-  virtual Handle<Code> pre_monomorphic_stub() {
-    return pre_monomorphic_stub(isolate(), extra_ic_state());
-  }
-
-  Handle<Code> SimpleFieldLoad(FieldIndex index);
-
-  static void Clear(Isolate* isolate,
-                    Address address,
-                    Code* target,
-                    ConstantPoolArray* constant_pool);
-
-  friend class IC;
-};
-
-
-class KeyedLoadIC: public LoadIC {
- public:
-  explicit KeyedLoadIC(FrameDepth depth, Isolate* isolate)
-      : LoadIC(depth, isolate) {
-    ASSERT(target()->is_keyed_load_stub());
-  }
-
-  MUST_USE_RESULT MaybeHandle<Object> Load(Handle<Object> object,
-                                           Handle<Object> key);
-
-  // Code generator routines.
-  static void GenerateMiss(MacroAssembler* masm);
-  static void GenerateRuntimeGetProperty(MacroAssembler* masm);
-  static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
-  static void GeneratePreMonomorphic(MacroAssembler* masm) {
-    GenerateMiss(masm);
-  }
-  static void GenerateGeneric(MacroAssembler* masm);
-  static void GenerateString(MacroAssembler* masm);
-  static void GenerateIndexedInterceptor(MacroAssembler* masm);
-  static void GenerateSloppyArguments(MacroAssembler* masm);
-
-  // Bit mask to be tested against bit field for the cases when
-  // generic stub should go into slow case.
-  // Access check is necessary explicitly since generic stub does not perform
-  // map checks.
-  static const int kSlowCaseBitFieldMask =
-      (1 << Map::kIsAccessCheckNeeded) | (1 << Map::kHasIndexedInterceptor);
-
- protected:
-  virtual Code::Kind kind() const { return Code::KEYED_LOAD_IC; }
-
-  Handle<Code> LoadElementStub(Handle<JSObject> receiver);
-
-  virtual Handle<Code> megamorphic_stub();
-  virtual Handle<Code> generic_stub() const;
-
-  virtual Handle<Code> slow_stub() const {
-    return isolate()->builtins()->KeyedLoadIC_Slow();
-  }
-
-  virtual void UpdateMegamorphicCache(HeapType* type, Name* name, Code* code) {}
-
- private:
-  // Stub accessors.
-  static Handle<Code> pre_monomorphic_stub(Isolate* isolate) {
-    return isolate->builtins()->KeyedLoadIC_PreMonomorphic();
-  }
-  virtual Handle<Code> pre_monomorphic_stub() {
-    return pre_monomorphic_stub(isolate());
-  }
-  Handle<Code> indexed_interceptor_stub() {
-    return isolate()->builtins()->KeyedLoadIC_IndexedInterceptor();
-  }
-  Handle<Code> sloppy_arguments_stub() {
-    return isolate()->builtins()->KeyedLoadIC_SloppyArguments();
-  }
-  Handle<Code> string_stub() {
-    return isolate()->builtins()->KeyedLoadIC_String();
-  }
-
-  static void Clear(Isolate* isolate,
-                    Address address,
-                    Code* target,
-                    ConstantPoolArray* constant_pool);
-
-  friend class IC;
-};
-
-
-class StoreIC: public IC {
- public:
-  class StrictModeState: public BitField<StrictMode, 1, 1> {};
-  static ExtraICState ComputeExtraICState(StrictMode flag) {
-    return StrictModeState::encode(flag);
-  }
-  static StrictMode GetStrictMode(ExtraICState state) {
-    return StrictModeState::decode(state);
-  }
-
-  // For convenience, a statically declared encoding of strict mode extra
-  // IC state.
-  static const ExtraICState kStrictModeState =
-      1 << StrictModeState::kShift;
-
-  StoreIC(FrameDepth depth, Isolate* isolate)
-      : IC(depth, isolate) {
-    ASSERT(IsStoreStub());
-  }
-
-  StrictMode strict_mode() const {
-    return StrictModeState::decode(extra_ic_state());
-  }
-
-  // Code generators for stub routines. Only called once at startup.
-  static void GenerateSlow(MacroAssembler* masm);
-  static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
-  static void GeneratePreMonomorphic(MacroAssembler* masm) {
-    GenerateMiss(masm);
-  }
-  static void GenerateMiss(MacroAssembler* masm);
-  static void GenerateMegamorphic(MacroAssembler* masm);
-  static void GenerateNormal(MacroAssembler* masm);
-  static void GenerateRuntimeSetProperty(MacroAssembler* masm,
-                                         StrictMode strict_mode);
-
-  static Handle<Code> initialize_stub(Isolate* isolate,
-                                      StrictMode strict_mode);
-
-  MUST_USE_RESULT MaybeHandle<Object> Store(
-      Handle<Object> object,
-      Handle<String> name,
-      Handle<Object> value,
-      JSReceiver::StoreFromKeyed store_mode =
-          JSReceiver::CERTAINLY_NOT_STORE_FROM_KEYED);
-
- protected:
-  virtual Code::Kind kind() const { return Code::STORE_IC; }
-  virtual Handle<Code> megamorphic_stub();
-
-  // Stub accessors.
-  virtual Handle<Code> generic_stub() const;
-
-  virtual Handle<Code> slow_stub() const {
-    return isolate()->builtins()->StoreIC_Slow();
-  }
-
-  virtual Handle<Code> pre_monomorphic_stub() {
-    return pre_monomorphic_stub(isolate(), strict_mode());
-  }
-
-  static Handle<Code> pre_monomorphic_stub(Isolate* isolate,
-                                           StrictMode strict_mode);
-
-  // Update the inline cache and the global stub cache based on the
-  // lookup result.
-  void UpdateCaches(LookupResult* lookup,
-                    Handle<JSObject> receiver,
-                    Handle<String> name,
-                    Handle<Object> value);
-  virtual Handle<Code> CompileHandler(LookupResult* lookup,
-                                      Handle<Object> object,
-                                      Handle<String> name,
-                                      Handle<Object> value,
-                                      InlineCacheHolderFlag cache_holder);
-
- private:
-  void set_target(Code* code) {
-    // Strict mode must be preserved across IC patching.
-    ASSERT(GetStrictMode(code->extra_ic_state()) ==
-           GetStrictMode(target()->extra_ic_state()));
-    IC::set_target(code);
-  }
-
-  static void Clear(Isolate* isolate,
-                    Address address,
-                    Code* target,
-                    ConstantPoolArray* constant_pool);
-
-  friend class IC;
-};
-
-
-enum KeyedStoreCheckMap {
-  kDontCheckMap,
-  kCheckMap
-};
-
-
-enum KeyedStoreIncrementLength {
-  kDontIncrementLength,
-  kIncrementLength
-};
-
-
-class KeyedStoreIC: public StoreIC {
- public:
-  // ExtraICState bits (building on IC)
-  // ExtraICState bits
-  class ExtraICStateKeyedAccessStoreMode:
-      public BitField<KeyedAccessStoreMode, 2, 4> {};  // NOLINT
-
-  static ExtraICState ComputeExtraICState(StrictMode flag,
-                                          KeyedAccessStoreMode mode) {
-    return StrictModeState::encode(flag) |
-        ExtraICStateKeyedAccessStoreMode::encode(mode);
-  }
-
-  static KeyedAccessStoreMode GetKeyedAccessStoreMode(
-      ExtraICState extra_state) {
-    return ExtraICStateKeyedAccessStoreMode::decode(extra_state);
-  }
-
-  KeyedStoreIC(FrameDepth depth, Isolate* isolate)
-      : StoreIC(depth, isolate) {
-    ASSERT(target()->is_keyed_store_stub());
-  }
-
-  MUST_USE_RESULT MaybeHandle<Object> Store(Handle<Object> object,
-                                            Handle<Object> name,
-                                            Handle<Object> value);
-
-  // Code generators for stub routines.  Only called once at startup.
-  static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
-  static void GeneratePreMonomorphic(MacroAssembler* masm) {
-    GenerateMiss(masm);
-  }
-  static void GenerateMiss(MacroAssembler* masm);
-  static void GenerateSlow(MacroAssembler* masm);
-  static void GenerateRuntimeSetProperty(MacroAssembler* masm,
-                                         StrictMode strict_mode);
-  static void GenerateGeneric(MacroAssembler* masm, StrictMode strict_mode);
-  static void GenerateSloppyArguments(MacroAssembler* masm);
-
- protected:
-  virtual Code::Kind kind() const { return Code::KEYED_STORE_IC; }
-
-  virtual void UpdateMegamorphicCache(HeapType* type, Name* name, Code* code) {}
-
-  virtual Handle<Code> pre_monomorphic_stub() {
-    return pre_monomorphic_stub(isolate(), strict_mode());
-  }
-  static Handle<Code> pre_monomorphic_stub(Isolate* isolate,
-                                           StrictMode strict_mode) {
-    if (strict_mode == STRICT) {
-      return isolate->builtins()->KeyedStoreIC_PreMonomorphic_Strict();
-    } else {
-      return isolate->builtins()->KeyedStoreIC_PreMonomorphic();
-    }
-  }
-  virtual Handle<Code> slow_stub() const {
-    return isolate()->builtins()->KeyedStoreIC_Slow();
-  }
-  virtual Handle<Code> megamorphic_stub() {
-    if (strict_mode() == STRICT) {
-      return isolate()->builtins()->KeyedStoreIC_Generic_Strict();
-    } else {
-      return isolate()->builtins()->KeyedStoreIC_Generic();
-    }
-  }
-
-  Handle<Code> StoreElementStub(Handle<JSObject> receiver,
-                                KeyedAccessStoreMode store_mode);
-
- private:
-  void set_target(Code* code) {
-    // Strict mode must be preserved across IC patching.
-    ASSERT(GetStrictMode(code->extra_ic_state()) == strict_mode());
-    IC::set_target(code);
-  }
-
-  // Stub accessors.
-  virtual Handle<Code> generic_stub() const {
-    if (strict_mode() == STRICT) {
-      return isolate()->builtins()->KeyedStoreIC_Generic_Strict();
-    } else {
-      return isolate()->builtins()->KeyedStoreIC_Generic();
-    }
-  }
-
-  Handle<Code> sloppy_arguments_stub() {
-    return isolate()->builtins()->KeyedStoreIC_SloppyArguments();
-  }
-
-  static void Clear(Isolate* isolate,
-                    Address address,
-                    Code* target,
-                    ConstantPoolArray* constant_pool);
-
-  KeyedAccessStoreMode GetStoreMode(Handle<JSObject> receiver,
-                                    Handle<Object> key,
-                                    Handle<Object> value);
-
-  Handle<Map> ComputeTransitionedMap(Handle<Map> map,
-                                     KeyedAccessStoreMode store_mode);
-
-  friend class IC;
-};
-
-
-// Mode to overwrite BinaryExpression values.
-enum OverwriteMode { NO_OVERWRITE, OVERWRITE_LEFT, OVERWRITE_RIGHT };
-
-// Type Recording BinaryOpIC, that records the types of the inputs and outputs.
-class BinaryOpIC: public IC {
- public:
-  class State V8_FINAL BASE_EMBEDDED {
-   public:
-    State(Isolate* isolate, ExtraICState extra_ic_state);
-
-    State(Isolate* isolate, Token::Value op, OverwriteMode mode)
-        : op_(op), mode_(mode), left_kind_(NONE), right_kind_(NONE),
-          result_kind_(NONE), isolate_(isolate) {
-      ASSERT_LE(FIRST_TOKEN, op);
-      ASSERT_LE(op, LAST_TOKEN);
-    }
-
-    InlineCacheState GetICState() const {
-      if (Max(left_kind_, right_kind_) == NONE) {
-        return ::v8::internal::UNINITIALIZED;
-      }
-      if (Max(left_kind_, right_kind_) == GENERIC) {
-        return ::v8::internal::MEGAMORPHIC;
-      }
-      if (Min(left_kind_, right_kind_) == GENERIC) {
-        return ::v8::internal::GENERIC;
-      }
-      return ::v8::internal::MONOMORPHIC;
-    }
-
-    ExtraICState GetExtraICState() const;
-
-    static void GenerateAheadOfTime(
-        Isolate*, void (*Generate)(Isolate*, const State&));
-
-    bool CanReuseDoubleBox() const {
-      return (result_kind_ > SMI && result_kind_ <= NUMBER) &&
-          ((mode_ == OVERWRITE_LEFT &&
-            left_kind_ > SMI && left_kind_ <= NUMBER) ||
-           (mode_ == OVERWRITE_RIGHT &&
-            right_kind_ > SMI && right_kind_ <= NUMBER));
-    }
-
-    // Returns true if the IC _could_ create allocation mementos.
-    bool CouldCreateAllocationMementos() const {
-      if (left_kind_ == STRING || right_kind_ == STRING) {
-        ASSERT_EQ(Token::ADD, op_);
-        return true;
-      }
-      return false;
-    }
-
-    // Returns true if the IC _should_ create allocation mementos.
-    bool ShouldCreateAllocationMementos() const {
-      return FLAG_allocation_site_pretenuring &&
-          CouldCreateAllocationMementos();
-    }
-
-    bool HasSideEffects() const {
-      return Max(left_kind_, right_kind_) == GENERIC;
-    }
-
-    // Returns true if the IC should enable the inline smi code (i.e. if either
-    // parameter may be a smi).
-    bool UseInlinedSmiCode() const {
-      return KindMaybeSmi(left_kind_) || KindMaybeSmi(right_kind_);
-    }
-
-    static const int FIRST_TOKEN = Token::BIT_OR;
-    static const int LAST_TOKEN = Token::MOD;
-
-    Token::Value op() const { return op_; }
-    OverwriteMode mode() const { return mode_; }
-    Maybe<int> fixed_right_arg() const { return fixed_right_arg_; }
-
-    Type* GetLeftType(Zone* zone) const {
-      return KindToType(left_kind_, zone);
-    }
-    Type* GetRightType(Zone* zone) const {
-      return KindToType(right_kind_, zone);
-    }
-    Type* GetResultType(Zone* zone) const;
-
-    void Print(StringStream* stream) const;
-
-    void Update(Handle<Object> left,
-                Handle<Object> right,
-                Handle<Object> result);
-
-    Isolate* isolate() const { return isolate_; }
-
-   private:
-    enum Kind { NONE, SMI, INT32, NUMBER, STRING, GENERIC };
-
-    Kind UpdateKind(Handle<Object> object, Kind kind) const;
-
-    static const char* KindToString(Kind kind);
-    static Type* KindToType(Kind kind, Zone* zone);
-    static bool KindMaybeSmi(Kind kind) {
-      return (kind >= SMI && kind <= NUMBER) || kind == GENERIC;
-    }
-
-    // We truncate the last bit of the token.
-    STATIC_ASSERT(LAST_TOKEN - FIRST_TOKEN < (1 << 4));
-    class OpField:                 public BitField<int, 0, 4> {};
-    class OverwriteModeField:      public BitField<OverwriteMode, 4, 2> {};
-    class ResultKindField:         public BitField<Kind, 6, 3> {};
-    class LeftKindField:           public BitField<Kind, 9,  3> {};
-    // When fixed right arg is set, we don't need to store the right kind.
-    // Thus the two fields can overlap.
-    class HasFixedRightArgField:   public BitField<bool, 12, 1> {};
-    class FixedRightArgValueField: public BitField<int,  13, 4> {};
-    class RightKindField:          public BitField<Kind, 13, 3> {};
-
-    Token::Value op_;
-    OverwriteMode mode_;
-    Kind left_kind_;
-    Kind right_kind_;
-    Kind result_kind_;
-    Maybe<int> fixed_right_arg_;
-    Isolate* isolate_;
-  };
-
-  explicit BinaryOpIC(Isolate* isolate) : IC(EXTRA_CALL_FRAME, isolate) { }
-
-  static Builtins::JavaScript TokenToJSBuiltin(Token::Value op);
-
-  MaybeHandle<Object> Transition(Handle<AllocationSite> allocation_site,
-                                 Handle<Object> left,
-                                 Handle<Object> right) V8_WARN_UNUSED_RESULT;
-};
-
-
-class CompareIC: public IC {
- public:
-  // The type/state lattice is defined by the following inequations:
-  //   UNINITIALIZED < ...
-  //   ... < GENERIC
-  //   SMI < NUMBER
-  //   INTERNALIZED_STRING < STRING
-  //   KNOWN_OBJECT < OBJECT
-  enum State {
-    UNINITIALIZED,
-    SMI,
-    NUMBER,
-    STRING,
-    INTERNALIZED_STRING,
-    UNIQUE_NAME,    // Symbol or InternalizedString
-    OBJECT,         // JSObject
-    KNOWN_OBJECT,   // JSObject with specific map (faster check)
-    GENERIC
-  };
-
-  static State NewInputState(State old_state, Handle<Object> value);
-
-  static Type* StateToType(Zone* zone,
-                           State state,
-                           Handle<Map> map = Handle<Map>());
-
-  static void StubInfoToType(int stub_minor_key,
-                             Type** left_type,
-                             Type** right_type,
-                             Type** overall_type,
-                             Handle<Map> map,
-                             Zone* zone);
-
-  CompareIC(Isolate* isolate, Token::Value op)
-      : IC(EXTRA_CALL_FRAME, isolate), op_(op) { }
-
-  // Update the inline cache for the given operands.
-  Code* UpdateCaches(Handle<Object> x, Handle<Object> y);
-
-
-  // Factory method for getting an uninitialized compare stub.
-  static Handle<Code> GetUninitialized(Isolate* isolate, Token::Value op);
-
-  // Helper function for computing the condition for a compare operation.
-  static Condition ComputeCondition(Token::Value op);
-
-  static const char* GetStateName(State state);
-
- private:
-  static bool HasInlinedSmiCode(Address address);
-
-  State TargetState(State old_state,
-                    State old_left,
-                    State old_right,
-                    bool has_inlined_smi_code,
-                    Handle<Object> x,
-                    Handle<Object> y);
-
-  bool strict() const { return op_ == Token::EQ_STRICT; }
-  Condition GetCondition() const { return ComputeCondition(op_); }
-
-  static Code* GetRawUninitialized(Isolate* isolate, Token::Value op);
-
-  static void Clear(Isolate* isolate,
-                    Address address,
-                    Code* target,
-                    ConstantPoolArray* constant_pool);
-
-  Token::Value op_;
-
-  friend class IC;
-};
-
-
-class CompareNilIC: public IC {
- public:
-  explicit CompareNilIC(Isolate* isolate) : IC(EXTRA_CALL_FRAME, isolate) {}
-
-  Handle<Object> CompareNil(Handle<Object> object);
-
-  static Handle<Code> GetUninitialized();
-
-  static void Clear(Address address,
-                    Code* target,
-                    ConstantPoolArray* constant_pool);
-
-  static Handle<Object> DoCompareNilSlow(Isolate* isolate, NilValue nil,
-                                         Handle<Object> object);
-};
-
-
-class ToBooleanIC: public IC {
- public:
-  explicit ToBooleanIC(Isolate* isolate) : IC(EXTRA_CALL_FRAME, isolate) { }
-
-  Handle<Object> ToBoolean(Handle<Object> object);
-};
-
-
-// Helper for BinaryOpIC and CompareIC.
-enum InlinedSmiCheck { ENABLE_INLINED_SMI_CHECK, DISABLE_INLINED_SMI_CHECK };
-void PatchInlinedSmiCode(Address address, InlinedSmiCheck check);
-
-DECLARE_RUNTIME_FUNCTION(KeyedLoadIC_MissFromStubFailure);
-DECLARE_RUNTIME_FUNCTION(KeyedStoreIC_MissFromStubFailure);
-DECLARE_RUNTIME_FUNCTION(UnaryOpIC_Miss);
-DECLARE_RUNTIME_FUNCTION(StoreIC_MissFromStubFailure);
-DECLARE_RUNTIME_FUNCTION(ElementsTransitionAndStoreIC_Miss);
-DECLARE_RUNTIME_FUNCTION(BinaryOpIC_Miss);
-DECLARE_RUNTIME_FUNCTION(BinaryOpIC_MissWithAllocationSite);
-DECLARE_RUNTIME_FUNCTION(CompareNilIC_Miss);
-DECLARE_RUNTIME_FUNCTION(ToBooleanIC_Miss);
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_IC_H_
diff --git a/src/ic/access-compiler.cc b/src/ic/access-compiler.cc
new file mode 100644
index 0000000..c3bf11c
--- /dev/null
+++ b/src/ic/access-compiler.cc
@@ -0,0 +1,55 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/ic/access-compiler.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+Handle<Code> PropertyAccessCompiler::GetCodeWithFlags(Code::Flags flags,
+                                                      const char* name) {
+  // Create code object in the heap.
+  CodeDesc desc;
+  masm()->GetCode(&desc);
+  Handle<Code> code = factory()->NewCode(desc, flags, masm()->CodeObject());
+  if (code->IsCodeStubOrIC()) code->set_stub_key(CodeStub::NoCacheKey());
+#ifdef ENABLE_DISASSEMBLER
+  if (FLAG_print_code_stubs) {
+    OFStream os(stdout);
+    code->Disassemble(name, os);
+  }
+#endif
+  return code;
+}
+
+
+Handle<Code> PropertyAccessCompiler::GetCodeWithFlags(Code::Flags flags,
+                                                      Handle<Name> name) {
+  return (FLAG_print_code_stubs && !name.is_null() && name->IsString())
+             ? GetCodeWithFlags(flags,
+                                Handle<String>::cast(name)->ToCString().get())
+             : GetCodeWithFlags(flags, NULL);
+}
+
+
+void PropertyAccessCompiler::TailCallBuiltin(MacroAssembler* masm,
+                                             Builtins::Name name) {
+  Handle<Code> code(masm->isolate()->builtins()->builtin(name));
+  GenerateTailCall(masm, code);
+}
+
+
+Register* PropertyAccessCompiler::GetCallingConvention(Code::Kind kind) {
+  if (kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC) {
+    return load_calling_convention();
+  }
+  DCHECK(kind == Code::STORE_IC || kind == Code::KEYED_STORE_IC);
+  return store_calling_convention();
+}
+}
+}  // namespace v8::internal
diff --git a/src/ic/access-compiler.h b/src/ic/access-compiler.h
new file mode 100644
index 0000000..928b70b
--- /dev/null
+++ b/src/ic/access-compiler.h
@@ -0,0 +1,83 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_IC_ACCESS_COMPILER_H_
+#define V8_IC_ACCESS_COMPILER_H_
+
+#include "src/code-stubs.h"
+#include "src/macro-assembler.h"
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+
+
+class PropertyAccessCompiler BASE_EMBEDDED {
+ public:
+  static Builtins::Name MissBuiltin(Code::Kind kind) {
+    switch (kind) {
+      case Code::LOAD_IC:
+        return Builtins::kLoadIC_Miss;
+      case Code::STORE_IC:
+        return Builtins::kStoreIC_Miss;
+      case Code::KEYED_LOAD_IC:
+        return Builtins::kKeyedLoadIC_Miss;
+      case Code::KEYED_STORE_IC:
+        return Builtins::kKeyedStoreIC_Miss;
+      default:
+        UNREACHABLE();
+    }
+    return Builtins::kLoadIC_Miss;
+  }
+
+  static void TailCallBuiltin(MacroAssembler* masm, Builtins::Name name);
+
+ protected:
+  PropertyAccessCompiler(Isolate* isolate, Code::Kind kind,
+                         CacheHolderFlag cache_holder)
+      : registers_(GetCallingConvention(kind)),
+        kind_(kind),
+        cache_holder_(cache_holder),
+        isolate_(isolate),
+        masm_(isolate, NULL, 256) {}
+
+  Code::Kind kind() const { return kind_; }
+  CacheHolderFlag cache_holder() const { return cache_holder_; }
+  MacroAssembler* masm() { return &masm_; }
+  Isolate* isolate() const { return isolate_; }
+  Heap* heap() const { return isolate()->heap(); }
+  Factory* factory() const { return isolate()->factory(); }
+
+  Register receiver() const { return registers_[0]; }
+  Register name() const { return registers_[1]; }
+  Register scratch1() const { return registers_[2]; }
+  Register scratch2() const { return registers_[3]; }
+  Register scratch3() const { return registers_[4]; }
+
+  // Calling convention between indexed store IC and handler.
+  Register transition_map() const { return scratch1(); }
+
+  static Register* GetCallingConvention(Code::Kind);
+  static Register* load_calling_convention();
+  static Register* store_calling_convention();
+  static Register* keyed_store_calling_convention();
+
+  Register* registers_;
+
+  static void GenerateTailCall(MacroAssembler* masm, Handle<Code> code);
+
+  Handle<Code> GetCodeWithFlags(Code::Flags flags, const char* name);
+  Handle<Code> GetCodeWithFlags(Code::Flags flags, Handle<Name> name);
+
+ private:
+  Code::Kind kind_;
+  CacheHolderFlag cache_holder_;
+
+  Isolate* isolate_;
+  MacroAssembler masm_;
+};
+}
+}  // namespace v8::internal
+
+#endif  // V8_IC_ACCESS_COMPILER_H_
diff --git a/src/ic/arm/access-compiler-arm.cc b/src/ic/arm/access-compiler-arm.cc
new file mode 100644
index 0000000..4a4d688
--- /dev/null
+++ b/src/ic/arm/access-compiler-arm.cc
@@ -0,0 +1,46 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_ARM
+
+#include "src/ic/access-compiler.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
+                                              Handle<Code> code) {
+  __ Jump(code, RelocInfo::CODE_TARGET);
+}
+
+
+Register* PropertyAccessCompiler::load_calling_convention() {
+  // receiver, name, scratch1, scratch2, scratch3, scratch4.
+  Register receiver = LoadDescriptor::ReceiverRegister();
+  Register name = LoadDescriptor::NameRegister();
+  static Register registers[] = {receiver, name, r3, r0, r4, r5};
+  return registers;
+}
+
+
+Register* PropertyAccessCompiler::store_calling_convention() {
+  // receiver, name, scratch1, scratch2, scratch3.
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  Register name = StoreDescriptor::NameRegister();
+  DCHECK(r3.is(ElementTransitionAndStoreDescriptor::MapRegister()));
+  static Register registers[] = {receiver, name, r3, r4, r5};
+  return registers;
+}
+
+
+#undef __
+}
+}  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ic/arm/handler-compiler-arm.cc b/src/ic/arm/handler-compiler-arm.cc
new file mode 100644
index 0000000..5314d48
--- /dev/null
+++ b/src/ic/arm/handler-compiler-arm.cc
@@ -0,0 +1,840 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_ARM
+
+#include "src/ic/call-optimization.h"
+#include "src/ic/handler-compiler.h"
+#include "src/ic/ic.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
+    MacroAssembler* masm, Handle<HeapType> type, Register receiver,
+    Handle<JSFunction> getter) {
+  // ----------- S t a t e -------------
+  //  -- r0    : receiver
+  //  -- r2    : name
+  //  -- lr    : return address
+  // -----------------------------------
+  {
+    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+
+    if (!getter.is_null()) {
+      // Call the JavaScript getter with the receiver on the stack.
+      if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+        // Swap in the global receiver.
+        __ ldr(receiver,
+               FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+      }
+      __ push(receiver);
+      ParameterCount actual(0);
+      ParameterCount expected(getter);
+      __ InvokeFunction(getter, expected, actual, CALL_FUNCTION,
+                        NullCallWrapper());
+    } else {
+      // If we generate a global code snippet for deoptimization only, remember
+      // the place to continue after deoptimization.
+      masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
+    }
+
+    // Restore context register.
+    __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  }
+  __ Ret();
+}
+
+
+void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
+    MacroAssembler* masm, Handle<HeapType> type, Register receiver,
+    Handle<JSFunction> setter) {
+  // ----------- S t a t e -------------
+  //  -- lr    : return address
+  // -----------------------------------
+  {
+    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+
+    // Save value register, so we can restore it later.
+    __ push(value());
+
+    if (!setter.is_null()) {
+      // Call the JavaScript setter with receiver and value on the stack.
+      if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+        // Swap in the global receiver.
+        __ ldr(receiver,
+               FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+      }
+      __ Push(receiver, value());
+      ParameterCount actual(1);
+      ParameterCount expected(setter);
+      __ InvokeFunction(setter, expected, actual, CALL_FUNCTION,
+                        NullCallWrapper());
+    } else {
+      // If we generate a global code snippet for deoptimization only, remember
+      // the place to continue after deoptimization.
+      masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
+    }
+
+    // We have to return the passed value, not the return value of the setter.
+    __ pop(r0);
+
+    // Restore context register.
+    __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  }
+  __ Ret();
+}
+
+
+void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
+    MacroAssembler* masm, Label* miss_label, Register receiver,
+    Handle<Name> name, Register scratch0, Register scratch1) {
+  DCHECK(name->IsUniqueName());
+  DCHECK(!receiver.is(scratch0));
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
+  __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
+
+  Label done;
+
+  const int kInterceptorOrAccessCheckNeededMask =
+      (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
+
+  // Bail out if the receiver has a named interceptor or requires access checks.
+  Register map = scratch1;
+  __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ ldrb(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
+  __ tst(scratch0, Operand(kInterceptorOrAccessCheckNeededMask));
+  __ b(ne, miss_label);
+
+  // Check that receiver is a JSObject.
+  __ ldrb(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
+  __ cmp(scratch0, Operand(FIRST_SPEC_OBJECT_TYPE));
+  __ b(lt, miss_label);
+
+  // Load properties array.
+  Register properties = scratch0;
+  __ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+  // Check that the properties array is a dictionary.
+  __ ldr(map, FieldMemOperand(properties, HeapObject::kMapOffset));
+  Register tmp = properties;
+  __ LoadRoot(tmp, Heap::kHashTableMapRootIndex);
+  __ cmp(map, tmp);
+  __ b(ne, miss_label);
+
+  // Restore the temporarily used register.
+  __ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+
+
+  NameDictionaryLookupStub::GenerateNegativeLookup(
+      masm, miss_label, &done, receiver, properties, name, scratch1);
+  __ bind(&done);
+  __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
+}
+
+
+void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
+    MacroAssembler* masm, int index, Register prototype, Label* miss) {
+  Isolate* isolate = masm->isolate();
+  // Get the global function with the given index.
+  Handle<JSFunction> function(
+      JSFunction::cast(isolate->native_context()->get(index)));
+
+  // Check we're still in the same context.
+  Register scratch = prototype;
+  const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
+  __ ldr(scratch, MemOperand(cp, offset));
+  __ ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
+  __ ldr(scratch, MemOperand(scratch, Context::SlotOffset(index)));
+  __ Move(ip, function);
+  __ cmp(ip, scratch);
+  __ b(ne, miss);
+
+  // Load its initial map. The global functions all have initial maps.
+  __ Move(prototype, Handle<Map>(function->initial_map()));
+  // Load the prototype from the initial map.
+  __ ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
+    MacroAssembler* masm, Register receiver, Register scratch1,
+    Register scratch2, Label* miss_label) {
+  __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
+  __ mov(r0, scratch1);
+  __ Ret();
+}
+
+
+// Generate code to check that a global property cell is empty. Create
+// the property cell at compilation time if no cell exists for the
+// property.
+void PropertyHandlerCompiler::GenerateCheckPropertyCell(
+    MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
+    Register scratch, Label* miss) {
+  Handle<Cell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
+  DCHECK(cell->value()->IsTheHole());
+  __ mov(scratch, Operand(cell));
+  __ ldr(scratch, FieldMemOperand(scratch, Cell::kValueOffset));
+  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+  __ cmp(scratch, ip);
+  __ b(ne, miss);
+}
+
+
+static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
+                                     Register holder, Register name,
+                                     Handle<JSObject> holder_obj) {
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsInfoIndex == 1);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 2);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 3);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 4);
+  __ push(name);
+  Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
+  DCHECK(!masm->isolate()->heap()->InNewSpace(*interceptor));
+  Register scratch = name;
+  __ mov(scratch, Operand(interceptor));
+  __ push(scratch);
+  __ push(receiver);
+  __ push(holder);
+}
+
+
+static void CompileCallLoadPropertyWithInterceptor(
+    MacroAssembler* masm, Register receiver, Register holder, Register name,
+    Handle<JSObject> holder_obj, IC::UtilityId id) {
+  PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
+  __ CallExternalReference(ExternalReference(IC_Utility(id), masm->isolate()),
+                           NamedLoadHandlerCompiler::kInterceptorArgsLength);
+}
+
+
+// Generate call to api function.
+void PropertyHandlerCompiler::GenerateFastApiCall(
+    MacroAssembler* masm, const CallOptimization& optimization,
+    Handle<Map> receiver_map, Register receiver, Register scratch_in,
+    bool is_store, int argc, Register* values) {
+  DCHECK(!receiver.is(scratch_in));
+  __ push(receiver);
+  // Write the arguments to stack frame.
+  for (int i = 0; i < argc; i++) {
+    Register arg = values[argc - 1 - i];
+    DCHECK(!receiver.is(arg));
+    DCHECK(!scratch_in.is(arg));
+    __ push(arg);
+  }
+  DCHECK(optimization.is_simple_api_call());
+
+  // Abi for CallApiFunctionStub.
+  Register callee = r0;
+  Register call_data = r4;
+  Register holder = r2;
+  Register api_function_address = r1;
+
+  // Put holder in place.
+  CallOptimization::HolderLookup holder_lookup;
+  Handle<JSObject> api_holder =
+      optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
+  switch (holder_lookup) {
+    case CallOptimization::kHolderIsReceiver:
+      __ Move(holder, receiver);
+      break;
+    case CallOptimization::kHolderFound:
+      __ Move(holder, api_holder);
+      break;
+    case CallOptimization::kHolderNotFound:
+      UNREACHABLE();
+      break;
+  }
+
+  Isolate* isolate = masm->isolate();
+  Handle<JSFunction> function = optimization.constant_function();
+  Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
+  Handle<Object> call_data_obj(api_call_info->data(), isolate);
+
+  // Put callee in place.
+  __ Move(callee, function);
+
+  bool call_data_undefined = false;
+  // Put call_data in place.
+  if (isolate->heap()->InNewSpace(*call_data_obj)) {
+    __ Move(call_data, api_call_info);
+    __ ldr(call_data, FieldMemOperand(call_data, CallHandlerInfo::kDataOffset));
+  } else if (call_data_obj->IsUndefined()) {
+    call_data_undefined = true;
+    __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
+  } else {
+    __ Move(call_data, call_data_obj);
+  }
+
+  // Put api_function_address in place.
+  Address function_address = v8::ToCData<Address>(api_call_info->callback());
+  ApiFunction fun(function_address);
+  ExternalReference::Type type = ExternalReference::DIRECT_API_CALL;
+  ExternalReference ref = ExternalReference(&fun, type, masm->isolate());
+  __ mov(api_function_address, Operand(ref));
+
+  // Jump to stub.
+  CallApiFunctionStub stub(isolate, is_store, call_data_undefined, argc);
+  __ TailCallStub(&stub);
+}
+
+
+void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
+  // Push receiver, key and value for runtime call.
+  __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+          StoreDescriptor::ValueRegister());
+
+  // The slow case calls into the runtime to complete the store without causing
+  // an IC miss that would otherwise cause a transition to the generic stub.
+  ExternalReference ref =
+      ExternalReference(IC_Utility(IC::kStoreIC_Slow), masm->isolate());
+  __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
+  // Push receiver, key and value for runtime call.
+  __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+          StoreDescriptor::ValueRegister());
+
+  // The slow case calls into the runtime to complete the store without causing
+  // an IC miss that would otherwise cause a transition to the generic stub.
+  ExternalReference ref =
+      ExternalReference(IC_Utility(IC::kKeyedStoreIC_Slow), masm->isolate());
+  __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
+                                                    Handle<Name> name) {
+  if (!label->is_unused()) {
+    __ bind(label);
+    __ mov(this->name(), Operand(name));
+  }
+}
+
+
+// Generate StoreTransition code, value is passed in r0 register.
+// When leaving generated code after success, the receiver_reg and name_reg
+// may be clobbered.  Upon branch to miss_label, the receiver and name
+// registers have their original values.
+void NamedStoreHandlerCompiler::GenerateStoreTransition(
+    Handle<Map> transition, Handle<Name> name, Register receiver_reg,
+    Register storage_reg, Register value_reg, Register scratch1,
+    Register scratch2, Register scratch3, Label* miss_label, Label* slow) {
+  // r0 : value
+  Label exit;
+
+  int descriptor = transition->LastAdded();
+  DescriptorArray* descriptors = transition->instance_descriptors();
+  PropertyDetails details = descriptors->GetDetails(descriptor);
+  Representation representation = details.representation();
+  DCHECK(!representation.IsNone());
+
+  if (details.type() == CONSTANT) {
+    Handle<Object> constant(descriptors->GetValue(descriptor), isolate());
+    __ Move(scratch1, constant);
+    __ cmp(value_reg, scratch1);
+    __ b(ne, miss_label);
+  } else if (representation.IsSmi()) {
+    __ JumpIfNotSmi(value_reg, miss_label);
+  } else if (representation.IsHeapObject()) {
+    __ JumpIfSmi(value_reg, miss_label);
+    HeapType* field_type = descriptors->GetFieldType(descriptor);
+    HeapType::Iterator<Map> it = field_type->Classes();
+    if (!it.Done()) {
+      __ ldr(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset));
+      Label do_store;
+      while (true) {
+        __ CompareMap(scratch1, it.Current(), &do_store);
+        it.Advance();
+        if (it.Done()) {
+          __ b(ne, miss_label);
+          break;
+        }
+        __ b(eq, &do_store);
+      }
+      __ bind(&do_store);
+    }
+  } else if (representation.IsDouble()) {
+    Label do_store, heap_number;
+    __ LoadRoot(scratch3, Heap::kMutableHeapNumberMapRootIndex);
+    __ AllocateHeapNumber(storage_reg, scratch1, scratch2, scratch3, slow,
+                          TAG_RESULT, MUTABLE);
+
+    __ JumpIfNotSmi(value_reg, &heap_number);
+    __ SmiUntag(scratch1, value_reg);
+    __ vmov(s0, scratch1);
+    __ vcvt_f64_s32(d0, s0);
+    __ jmp(&do_store);
+
+    __ bind(&heap_number);
+    __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex, miss_label,
+                DONT_DO_SMI_CHECK);
+    __ vldr(d0, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
+
+    __ bind(&do_store);
+    __ vstr(d0, FieldMemOperand(storage_reg, HeapNumber::kValueOffset));
+  }
+
+  // Stub never generated for objects that require access checks.
+  DCHECK(!transition->is_access_check_needed());
+
+  // Perform map transition for the receiver if necessary.
+  if (details.type() == FIELD &&
+      Map::cast(transition->GetBackPointer())->unused_property_fields() == 0) {
+    // The properties must be extended before we can store the value.
+    // We jump to a runtime call that extends the properties array.
+    __ push(receiver_reg);
+    __ mov(r2, Operand(transition));
+    __ Push(r2, r0);
+    __ TailCallExternalReference(
+        ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
+                          isolate()),
+        3, 1);
+    return;
+  }
+
+  // Update the map of the object.
+  __ mov(scratch1, Operand(transition));
+  __ str(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
+
+  // Update the write barrier for the map field.
+  __ RecordWriteField(receiver_reg, HeapObject::kMapOffset, scratch1, scratch2,
+                      kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+
+  if (details.type() == CONSTANT) {
+    DCHECK(value_reg.is(r0));
+    __ Ret();
+    return;
+  }
+
+  int index = transition->instance_descriptors()->GetFieldIndex(
+      transition->LastAdded());
+
+  // Adjust for the number of properties stored in the object. Even in the
+  // face of a transition we can use the old map here because the size of the
+  // object and the number of in-object properties is not going to change.
+  index -= transition->inobject_properties();
+
+  // TODO(verwaest): Share this code as a code stub.
+  SmiCheck smi_check =
+      representation.IsTagged() ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
+  if (index < 0) {
+    // Set the property straight into the object.
+    int offset = transition->instance_size() + (index * kPointerSize);
+    if (representation.IsDouble()) {
+      __ str(storage_reg, FieldMemOperand(receiver_reg, offset));
+    } else {
+      __ str(value_reg, FieldMemOperand(receiver_reg, offset));
+    }
+
+    if (!representation.IsSmi()) {
+      // Update the write barrier for the array address.
+      if (!representation.IsDouble()) {
+        __ mov(storage_reg, value_reg);
+      }
+      __ RecordWriteField(receiver_reg, offset, storage_reg, scratch1,
+                          kLRHasNotBeenSaved, kDontSaveFPRegs,
+                          EMIT_REMEMBERED_SET, smi_check);
+    }
+  } else {
+    // Write to the properties array.
+    int offset = index * kPointerSize + FixedArray::kHeaderSize;
+    // Get the properties array
+    __ ldr(scratch1,
+           FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
+    if (representation.IsDouble()) {
+      __ str(storage_reg, FieldMemOperand(scratch1, offset));
+    } else {
+      __ str(value_reg, FieldMemOperand(scratch1, offset));
+    }
+
+    if (!representation.IsSmi()) {
+      // Update the write barrier for the array address.
+      if (!representation.IsDouble()) {
+        __ mov(storage_reg, value_reg);
+      }
+      __ RecordWriteField(scratch1, offset, storage_reg, receiver_reg,
+                          kLRHasNotBeenSaved, kDontSaveFPRegs,
+                          EMIT_REMEMBERED_SET, smi_check);
+    }
+  }
+
+  // Return the value (register r0).
+  DCHECK(value_reg.is(r0));
+  __ bind(&exit);
+  __ Ret();
+}
+
+
+void NamedStoreHandlerCompiler::GenerateStoreField(LookupIterator* lookup,
+                                                   Register value_reg,
+                                                   Label* miss_label) {
+  DCHECK(lookup->representation().IsHeapObject());
+  __ JumpIfSmi(value_reg, miss_label);
+  HeapType::Iterator<Map> it = lookup->GetFieldType()->Classes();
+  __ ldr(scratch1(), FieldMemOperand(value_reg, HeapObject::kMapOffset));
+  Label do_store;
+  while (true) {
+    __ CompareMap(scratch1(), it.Current(), &do_store);
+    it.Advance();
+    if (it.Done()) {
+      __ b(ne, miss_label);
+      break;
+    }
+    __ b(eq, &do_store);
+  }
+  __ bind(&do_store);
+
+  StoreFieldStub stub(isolate(), lookup->GetFieldIndex(),
+                      lookup->representation());
+  GenerateTailCall(masm(), stub.GetCode());
+}
+
+
+Register PropertyHandlerCompiler::CheckPrototypes(
+    Register object_reg, Register holder_reg, Register scratch1,
+    Register scratch2, Handle<Name> name, Label* miss,
+    PrototypeCheckType check) {
+  Handle<Map> receiver_map(IC::TypeToMap(*type(), isolate()));
+
+  // Make sure there's no overlap between holder and object registers.
+  DCHECK(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
+  DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg) &&
+         !scratch2.is(scratch1));
+
+  // Keep track of the current object in register reg.
+  Register reg = object_reg;
+  int depth = 0;
+
+  Handle<JSObject> current = Handle<JSObject>::null();
+  if (type()->IsConstant()) {
+    current = Handle<JSObject>::cast(type()->AsConstant()->Value());
+  }
+  Handle<JSObject> prototype = Handle<JSObject>::null();
+  Handle<Map> current_map = receiver_map;
+  Handle<Map> holder_map(holder()->map());
+  // Traverse the prototype chain and check the maps in the prototype chain for
+  // fast and global objects or do negative lookup for normal objects.
+  while (!current_map.is_identical_to(holder_map)) {
+    ++depth;
+
+    // Only global objects and objects that do not require access
+    // checks are allowed in stubs.
+    DCHECK(current_map->IsJSGlobalProxyMap() ||
+           !current_map->is_access_check_needed());
+
+    prototype = handle(JSObject::cast(current_map->prototype()));
+    if (current_map->is_dictionary_map() &&
+        !current_map->IsJSGlobalObjectMap()) {
+      DCHECK(!current_map->IsJSGlobalProxyMap());  // Proxy maps are fast.
+      if (!name->IsUniqueName()) {
+        DCHECK(name->IsString());
+        name = factory()->InternalizeString(Handle<String>::cast(name));
+      }
+      DCHECK(current.is_null() ||
+             current->property_dictionary()->FindEntry(name) ==
+                 NameDictionary::kNotFound);
+
+      GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
+                                       scratch2);
+
+      __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+      reg = holder_reg;  // From now on the object will be in holder_reg.
+      __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
+    } else {
+      Register map_reg = scratch1;
+      if (depth != 1 || check == CHECK_ALL_MAPS) {
+        // CheckMap implicitly loads the map of |reg| into |map_reg|.
+        __ CheckMap(reg, map_reg, current_map, miss, DONT_DO_SMI_CHECK);
+      } else {
+        __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
+      }
+
+      // Check access rights to the global object.  This has to happen after
+      // the map check so that we know that the object is actually a global
+      // object.
+      // This allows us to install generated handlers for accesses to the
+      // global proxy (as opposed to using slow ICs). See corresponding code
+      // in LookupForRead().
+      if (current_map->IsJSGlobalProxyMap()) {
+        __ CheckAccessGlobalProxy(reg, scratch2, miss);
+      } else if (current_map->IsJSGlobalObjectMap()) {
+        GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
+                                  name, scratch2, miss);
+      }
+
+      reg = holder_reg;  // From now on the object will be in holder_reg.
+
+      // Two possible reasons for loading the prototype from the map:
+      // (1) Can't store references to new space in code.
+      // (2) Handler is shared for all receivers with the same prototype
+      //     map (but not necessarily the same prototype instance).
+      bool load_prototype_from_map =
+          heap()->InNewSpace(*prototype) || depth == 1;
+      if (load_prototype_from_map) {
+        __ ldr(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
+      } else {
+        __ mov(reg, Operand(prototype));
+      }
+    }
+
+    // Go to the next object in the prototype chain.
+    current = prototype;
+    current_map = handle(current->map());
+  }
+
+  // Log the check depth.
+  LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
+
+  if (depth != 0 || check == CHECK_ALL_MAPS) {
+    // Check the holder map.
+    __ CheckMap(reg, scratch1, current_map, miss, DONT_DO_SMI_CHECK);
+  }
+
+  // Perform security check for access to the global object.
+  DCHECK(current_map->IsJSGlobalProxyMap() ||
+         !current_map->is_access_check_needed());
+  if (current_map->IsJSGlobalProxyMap()) {
+    __ CheckAccessGlobalProxy(reg, scratch1, miss);
+  }
+
+  // Return the register containing the holder.
+  return reg;
+}
+
+
+void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
+  if (!miss->is_unused()) {
+    Label success;
+    __ b(&success);
+    __ bind(miss);
+    TailCallBuiltin(masm(), MissBuiltin(kind()));
+    __ bind(&success);
+  }
+}
+
+
+void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
+  if (!miss->is_unused()) {
+    Label success;
+    __ b(&success);
+    GenerateRestoreName(miss, name);
+    TailCallBuiltin(masm(), MissBuiltin(kind()));
+    __ bind(&success);
+  }
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
+  // Return the constant value.
+  __ Move(r0, value);
+  __ Ret();
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadCallback(
+    Register reg, Handle<ExecutableAccessorInfo> callback) {
+  // Build AccessorInfo::args_ list on the stack and push property name below
+  // the exit frame to make GC aware of them and store pointers to them.
+  STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
+  STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
+  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
+  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
+  STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
+  STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
+  STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6);
+  DCHECK(!scratch2().is(reg));
+  DCHECK(!scratch3().is(reg));
+  DCHECK(!scratch4().is(reg));
+  __ push(receiver());
+  if (heap()->InNewSpace(callback->data())) {
+    __ Move(scratch3(), callback);
+    __ ldr(scratch3(),
+           FieldMemOperand(scratch3(), ExecutableAccessorInfo::kDataOffset));
+  } else {
+    __ Move(scratch3(), Handle<Object>(callback->data(), isolate()));
+  }
+  __ push(scratch3());
+  __ LoadRoot(scratch3(), Heap::kUndefinedValueRootIndex);
+  __ mov(scratch4(), scratch3());
+  __ Push(scratch3(), scratch4());
+  __ mov(scratch4(), Operand(ExternalReference::isolate_address(isolate())));
+  __ Push(scratch4(), reg);
+  __ mov(scratch2(), sp);  // scratch2 = PropertyAccessorInfo::args_
+  __ push(name());
+
+  // Abi for CallApiGetter
+  Register getter_address_reg = ApiGetterDescriptor::function_address();
+
+  Address getter_address = v8::ToCData<Address>(callback->getter());
+  ApiFunction fun(getter_address);
+  ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL;
+  ExternalReference ref = ExternalReference(&fun, type, isolate());
+  __ mov(getter_address_reg, Operand(ref));
+
+  CallApiGetterStub stub(isolate());
+  __ TailCallStub(&stub);
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
+    LookupIterator* it, Register holder_reg) {
+  DCHECK(holder()->HasNamedInterceptor());
+  DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+
+  // Compile the interceptor call, followed by inline code to load the
+  // property from further up the prototype chain if the call fails.
+  // Check that the maps haven't changed.
+  DCHECK(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
+
+  // Preserve the receiver register explicitly whenever it is different from the
+  // holder and it is needed should the interceptor return without any result.
+  // The ACCESSOR case needs the receiver to be passed into C++ code, the FIELD
+  // case might cause a miss during the prototype check.
+  bool must_perform_prototype_check =
+      !holder().is_identical_to(it->GetHolder<JSObject>());
+  bool must_preserve_receiver_reg =
+      !receiver().is(holder_reg) &&
+      (it->state() == LookupIterator::ACCESSOR || must_perform_prototype_check);
+
+  // Save necessary data before invoking an interceptor.
+  // Requires a frame to make GC aware of pushed pointers.
+  {
+    FrameAndConstantPoolScope frame_scope(masm(), StackFrame::INTERNAL);
+    if (must_preserve_receiver_reg) {
+      __ Push(receiver(), holder_reg, this->name());
+    } else {
+      __ Push(holder_reg, this->name());
+    }
+    // Invoke an interceptor.  Note: map checks from receiver to
+    // interceptor's holder has been compiled before (see a caller
+    // of this method.)
+    CompileCallLoadPropertyWithInterceptor(
+        masm(), receiver(), holder_reg, this->name(), holder(),
+        IC::kLoadPropertyWithInterceptorOnly);
+
+    // Check if interceptor provided a value for property.  If it's
+    // the case, return immediately.
+    Label interceptor_failed;
+    __ LoadRoot(scratch1(), Heap::kNoInterceptorResultSentinelRootIndex);
+    __ cmp(r0, scratch1());
+    __ b(eq, &interceptor_failed);
+    frame_scope.GenerateLeaveFrame();
+    __ Ret();
+
+    __ bind(&interceptor_failed);
+    __ pop(this->name());
+    __ pop(holder_reg);
+    if (must_preserve_receiver_reg) {
+      __ pop(receiver());
+    }
+    // Leave the internal frame.
+  }
+
+  GenerateLoadPostInterceptor(it, holder_reg);
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
+  // Call the runtime system to load the interceptor.
+  DCHECK(holder()->HasNamedInterceptor());
+  DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+  PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
+                           holder());
+
+  ExternalReference ref = ExternalReference(
+      IC_Utility(IC::kLoadPropertyWithInterceptor), isolate());
+  __ TailCallExternalReference(
+      ref, NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
+}
+
+
+Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
+    Handle<JSObject> object, Handle<Name> name,
+    Handle<ExecutableAccessorInfo> callback) {
+  Register holder_reg = Frontend(receiver(), name);
+
+  __ push(receiver());  // receiver
+  __ push(holder_reg);
+  __ mov(ip, Operand(callback));  // callback info
+  __ push(ip);
+  __ mov(ip, Operand(name));
+  __ Push(ip, value());
+
+  // Do tail-call to the runtime system.
+  ExternalReference store_callback_property =
+      ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
+  __ TailCallExternalReference(store_callback_property, 5, 1);
+
+  // Return the generated code.
+  return GetCode(kind(), Code::FAST, name);
+}
+
+
+Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
+    Handle<Name> name) {
+  __ Push(receiver(), this->name(), value());
+
+  // Do tail-call to the runtime system.
+  ExternalReference store_ic_property = ExternalReference(
+      IC_Utility(IC::kStorePropertyWithInterceptor), isolate());
+  __ TailCallExternalReference(store_ic_property, 3, 1);
+
+  // Return the generated code.
+  return GetCode(kind(), Code::FAST, name);
+}
+
+
+Register NamedStoreHandlerCompiler::value() {
+  return StoreDescriptor::ValueRegister();
+}
+
+
+Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
+    Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) {
+  Label miss;
+  FrontendHeader(receiver(), name, &miss);
+
+  // Get the value from the cell.
+  Register result = StoreDescriptor::ValueRegister();
+  __ mov(result, Operand(cell));
+  __ ldr(result, FieldMemOperand(result, Cell::kValueOffset));
+
+  // Check for deleted property if property can actually be deleted.
+  if (is_configurable) {
+    __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+    __ cmp(result, ip);
+    __ b(eq, &miss);
+  }
+
+  Counters* counters = isolate()->counters();
+  __ IncrementCounter(counters->named_load_global_stub(), 1, r1, r3);
+  __ Ret();
+
+  FrontendFooter(name, &miss);
+
+  // Return the generated code.
+  return GetCode(kind(), Code::NORMAL, name);
+}
+
+
+#undef __
+}
+}  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_ARM
diff --git a/src/ic/arm/ic-arm.cc b/src/ic/arm/ic-arm.cc
new file mode 100644
index 0000000..ae13161
--- /dev/null
+++ b/src/ic/arm/ic-arm.cc
@@ -0,0 +1,1019 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_ARM
+
+#include "src/codegen.h"
+#include "src/ic/ic.h"
+#include "src/ic/ic-compiler.h"
+#include "src/ic/stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+
+// ----------------------------------------------------------------------------
+// Static IC stub generators.
+//
+
+#define __ ACCESS_MASM(masm)
+
+
+static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
+                                            Label* global_object) {
+  // Register usage:
+  //   type: holds the receiver instance type on entry.
+  __ cmp(type, Operand(JS_GLOBAL_OBJECT_TYPE));
+  __ b(eq, global_object);
+  __ cmp(type, Operand(JS_BUILTINS_OBJECT_TYPE));
+  __ b(eq, global_object);
+  __ cmp(type, Operand(JS_GLOBAL_PROXY_TYPE));
+  __ b(eq, global_object);
+}
+
+
+// Helper function used from LoadIC GenerateNormal.
+//
+// elements: Property dictionary. It is not clobbered if a jump to the miss
+//           label is done.
+// name:     Property name. It is not clobbered if a jump to the miss label is
+//           done
+// result:   Register for the result. It is only updated if a jump to the miss
+//           label is not done. Can be the same as elements or name clobbering
+//           one of these in the case of not jumping to the miss label.
+// The two scratch registers need to be different from elements, name and
+// result.
+// The generated code assumes that the receiver has slow properties,
+// is not a global object and does not have interceptors.
+static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss,
+                                   Register elements, Register name,
+                                   Register result, Register scratch1,
+                                   Register scratch2) {
+  // Main use of the scratch registers.
+  // scratch1: Used as temporary and to hold the capacity of the property
+  //           dictionary.
+  // scratch2: Used as temporary.
+  Label done;
+
+  // Probe the dictionary.
+  NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
+                                                   name, scratch1, scratch2);
+
+  // If probing finds an entry check that the value is a normal
+  // property.
+  __ bind(&done);  // scratch2 == elements + 4 * index
+  const int kElementsStartOffset =
+      NameDictionary::kHeaderSize +
+      NameDictionary::kElementsStartIndex * kPointerSize;
+  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+  __ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
+  __ tst(scratch1, Operand(PropertyDetails::TypeField::kMask << kSmiTagSize));
+  __ b(ne, miss);
+
+  // Get the value at the masked, scaled index and return.
+  __ ldr(result,
+         FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
+}
+
+
+// Helper function used from StoreIC::GenerateNormal.
+//
+// elements: Property dictionary. It is not clobbered if a jump to the miss
+//           label is done.
+// name:     Property name. It is not clobbered if a jump to the miss label is
+//           done
+// value:    The value to store.
+// The two scratch registers need to be different from elements, name and
+// result.
+// The generated code assumes that the receiver has slow properties,
+// is not a global object and does not have interceptors.
+static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
+                                    Register elements, Register name,
+                                    Register value, Register scratch1,
+                                    Register scratch2) {
+  // Main use of the scratch registers.
+  // scratch1: Used as temporary and to hold the capacity of the property
+  //           dictionary.
+  // scratch2: Used as temporary.
+  Label done;
+
+  // Probe the dictionary.
+  NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
+                                                   name, scratch1, scratch2);
+
+  // If probing finds an entry in the dictionary check that the value
+  // is a normal property that is not read only.
+  __ bind(&done);  // scratch2 == elements + 4 * index
+  const int kElementsStartOffset =
+      NameDictionary::kHeaderSize +
+      NameDictionary::kElementsStartIndex * kPointerSize;
+  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+  const int kTypeAndReadOnlyMask =
+      (PropertyDetails::TypeField::kMask |
+       PropertyDetails::AttributesField::encode(READ_ONLY))
+      << kSmiTagSize;
+  __ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
+  __ tst(scratch1, Operand(kTypeAndReadOnlyMask));
+  __ b(ne, miss);
+
+  // Store the value at the masked, scaled index and return.
+  const int kValueOffset = kElementsStartOffset + kPointerSize;
+  __ add(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag));
+  __ str(value, MemOperand(scratch2));
+
+  // Update the write barrier. Make sure not to clobber the value.
+  __ mov(scratch1, value);
+  __ RecordWrite(elements, scratch2, scratch1, kLRHasNotBeenSaved,
+                 kDontSaveFPRegs);
+}
+
+
+// Checks the receiver for special cases (value type, slow case bits).
+// Falls through for regular JS object.
+static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
+                                           Register receiver, Register map,
+                                           Register scratch,
+                                           int interceptor_bit, Label* slow) {
+  // Check that the object isn't a smi.
+  __ JumpIfSmi(receiver, slow);
+  // Get the map of the receiver.
+  __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  // Check bit field.
+  __ ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
+  __ tst(scratch,
+         Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
+  __ b(ne, slow);
+  // Check that the object is some kind of JS object EXCEPT JS Value type.
+  // In the case that the object is a value-wrapper object,
+  // we enter the runtime system to make sure that indexing into string
+  // objects work as intended.
+  DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
+  __ ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
+  __ cmp(scratch, Operand(JS_OBJECT_TYPE));
+  __ b(lt, slow);
+}
+
+
+// Loads an indexed element from a fast case array.
+// If not_fast_array is NULL, doesn't perform the elements map check.
+static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
+                                  Register key, Register elements,
+                                  Register scratch1, Register scratch2,
+                                  Register result, Label* not_fast_array,
+                                  Label* out_of_range) {
+  // Register use:
+  //
+  // receiver - holds the receiver on entry.
+  //            Unchanged unless 'result' is the same register.
+  //
+  // key      - holds the smi key on entry.
+  //            Unchanged unless 'result' is the same register.
+  //
+  // elements - holds the elements of the receiver on exit.
+  //
+  // result   - holds the result on exit if the load succeeded.
+  //            Allowed to be the the same as 'receiver' or 'key'.
+  //            Unchanged on bailout so 'receiver' and 'key' can be safely
+  //            used by further computation.
+  //
+  // Scratch registers:
+  //
+  // scratch1 - used to hold elements map and elements length.
+  //            Holds the elements map if not_fast_array branch is taken.
+  //
+  // scratch2 - used to hold the loaded value.
+
+  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  if (not_fast_array != NULL) {
+    // Check that the object is in fast mode and writable.
+    __ ldr(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
+    __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
+    __ cmp(scratch1, ip);
+    __ b(ne, not_fast_array);
+  } else {
+    __ AssertFastElements(elements);
+  }
+  // Check that the key (index) is within bounds.
+  __ ldr(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
+  __ cmp(key, Operand(scratch1));
+  __ b(hs, out_of_range);
+  // Fast case: Do the load.
+  __ add(scratch1, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ ldr(scratch2, MemOperand::PointerAddressFromSmiKey(scratch1, key));
+  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+  __ cmp(scratch2, ip);
+  // In case the loaded value is the_hole we have to consult GetProperty
+  // to ensure the prototype chain is searched.
+  __ b(eq, out_of_range);
+  __ mov(result, scratch2);
+}
+
+
+// Checks whether a key is an array index string or a unique name.
+// Falls through if a key is a unique name.
+static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
+                                 Register map, Register hash,
+                                 Label* index_string, Label* not_unique) {
+  // The key is not a smi.
+  Label unique;
+  // Is it a name?
+  __ CompareObjectType(key, map, hash, LAST_UNIQUE_NAME_TYPE);
+  __ b(hi, not_unique);
+  STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
+  __ b(eq, &unique);
+
+  // Is the string an array index, with cached numeric value?
+  __ ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset));
+  __ tst(hash, Operand(Name::kContainsCachedArrayIndexMask));
+  __ b(eq, index_string);
+
+  // Is the string internalized? We know it's a string, so a single
+  // bit test is enough.
+  // map: key map
+  __ ldrb(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
+  STATIC_ASSERT(kInternalizedTag == 0);
+  __ tst(hash, Operand(kIsNotInternalizedMask));
+  __ b(ne, not_unique);
+
+  __ bind(&unique);
+}
+
+
+void LoadIC::GenerateNormal(MacroAssembler* masm) {
+  Register dictionary = r0;
+  DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
+
+  Label slow;
+
+  __ ldr(dictionary, FieldMemOperand(LoadDescriptor::ReceiverRegister(),
+                                     JSObject::kPropertiesOffset));
+  GenerateDictionaryLoad(masm, &slow, dictionary,
+                         LoadDescriptor::NameRegister(), r0, r3, r4);
+  __ Ret();
+
+  // Dictionary load failed, go slow (but don't miss).
+  __ bind(&slow);
+  GenerateRuntimeGetProperty(masm);
+}
+
+
+// A register that isn't one of the parameters to the load ic.
+static const Register LoadIC_TempRegister() { return r3; }
+
+
+void LoadIC::GenerateMiss(MacroAssembler* masm) {
+  // The return address is in lr.
+  Isolate* isolate = masm->isolate();
+
+  __ IncrementCounter(isolate->counters()->load_miss(), 1, r3, r4);
+
+  __ mov(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
+  __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
+
+  // Perform tail call to the entry.
+  ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
+  __ TailCallExternalReference(ref, 2, 1);
+}
+
+
+void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+  // The return address is in lr.
+
+  __ mov(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
+  __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
+
+  __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
+}
+
+
+static MemOperand GenerateMappedArgumentsLookup(
+    MacroAssembler* masm, Register object, Register key, Register scratch1,
+    Register scratch2, Register scratch3, Label* unmapped_case,
+    Label* slow_case) {
+  Heap* heap = masm->isolate()->heap();
+
+  // Check that the receiver is a JSObject. Because of the map check
+  // later, we do not need to check for interceptors or whether it
+  // requires access checks.
+  __ JumpIfSmi(object, slow_case);
+  // Check that the object is some kind of JSObject.
+  __ CompareObjectType(object, scratch1, scratch2, FIRST_JS_RECEIVER_TYPE);
+  __ b(lt, slow_case);
+
+  // Check that the key is a positive smi.
+  __ tst(key, Operand(0x80000001));
+  __ b(ne, slow_case);
+
+  // Load the elements into scratch1 and check its map.
+  Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
+  __ ldr(scratch1, FieldMemOperand(object, JSObject::kElementsOffset));
+  __ CheckMap(scratch1, scratch2, arguments_map, slow_case, DONT_DO_SMI_CHECK);
+
+  // Check if element is in the range of mapped arguments. If not, jump
+  // to the unmapped lookup with the parameter map in scratch1.
+  __ ldr(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
+  __ sub(scratch2, scratch2, Operand(Smi::FromInt(2)));
+  __ cmp(key, Operand(scratch2));
+  __ b(cs, unmapped_case);
+
+  // Load element index and check whether it is the hole.
+  const int kOffset =
+      FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag;
+
+  __ mov(scratch3, Operand(kPointerSize >> 1));
+  __ mul(scratch3, key, scratch3);
+  __ add(scratch3, scratch3, Operand(kOffset));
+
+  __ ldr(scratch2, MemOperand(scratch1, scratch3));
+  __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
+  __ cmp(scratch2, scratch3);
+  __ b(eq, unmapped_case);
+
+  // Load value from context and return it. We can reuse scratch1 because
+  // we do not jump to the unmapped lookup (which requires the parameter
+  // map in scratch1).
+  __ ldr(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
+  __ mov(scratch3, Operand(kPointerSize >> 1));
+  __ mul(scratch3, scratch2, scratch3);
+  __ add(scratch3, scratch3, Operand(Context::kHeaderSize - kHeapObjectTag));
+  return MemOperand(scratch1, scratch3);
+}
+
+
+static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
+                                                  Register key,
+                                                  Register parameter_map,
+                                                  Register scratch,
+                                                  Label* slow_case) {
+  // Element is in arguments backing store, which is referenced by the
+  // second element of the parameter_map. The parameter_map register
+  // must be loaded with the parameter map of the arguments object and is
+  // overwritten.
+  const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
+  Register backing_store = parameter_map;
+  __ ldr(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
+  Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
+  __ CheckMap(backing_store, scratch, fixed_array_map, slow_case,
+              DONT_DO_SMI_CHECK);
+  __ ldr(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
+  __ cmp(key, Operand(scratch));
+  __ b(cs, slow_case);
+  __ mov(scratch, Operand(kPointerSize >> 1));
+  __ mul(scratch, key, scratch);
+  __ add(scratch, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  return MemOperand(backing_store, scratch);
+}
+
+
+void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  Register key = StoreDescriptor::NameRegister();
+  Register value = StoreDescriptor::ValueRegister();
+  DCHECK(receiver.is(r1));
+  DCHECK(key.is(r2));
+  DCHECK(value.is(r0));
+
+  Label slow, notin;
+  MemOperand mapped_location = GenerateMappedArgumentsLookup(
+      masm, receiver, key, r3, r4, r5, &notin, &slow);
+  __ str(value, mapped_location);
+  __ add(r6, r3, r5);
+  __ mov(r9, value);
+  __ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
+  __ Ret();
+  __ bind(&notin);
+  // The unmapped lookup expects that the parameter map is in r3.
+  MemOperand unmapped_location =
+      GenerateUnmappedArgumentsLookup(masm, key, r3, r4, &slow);
+  __ str(value, unmapped_location);
+  __ add(r6, r3, r4);
+  __ mov(r9, value);
+  __ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs);
+  __ Ret();
+  __ bind(&slow);
+  GenerateMiss(masm);
+}
+
+
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
+  // The return address is in lr.
+  Isolate* isolate = masm->isolate();
+
+  __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, r3, r4);
+
+  __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
+
+  // Perform tail call to the entry.
+  ExternalReference ref =
+      ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
+
+  __ TailCallExternalReference(ref, 2, 1);
+}
+
+
+void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+  // The return address is in lr.
+
+  __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
+
+  __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
+}
+
+
+void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
+  // The return address is in lr.
+  Label slow, check_name, index_smi, index_name, property_array_property;
+  Label probe_dictionary, check_number_dictionary;
+
+  Register key = LoadDescriptor::NameRegister();
+  Register receiver = LoadDescriptor::ReceiverRegister();
+  DCHECK(key.is(r2));
+  DCHECK(receiver.is(r1));
+
+  Isolate* isolate = masm->isolate();
+
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(key, &check_name);
+  __ bind(&index_smi);
+  // Now the key is known to be a smi. This place is also jumped to from below
+  // where a numeric string is converted to a smi.
+
+  GenerateKeyedLoadReceiverCheck(masm, receiver, r0, r3,
+                                 Map::kHasIndexedInterceptor, &slow);
+
+  // Check the receiver's map to see if it has fast elements.
+  __ CheckFastElements(r0, r3, &check_number_dictionary);
+
+  GenerateFastArrayLoad(masm, receiver, key, r0, r3, r4, r0, NULL, &slow);
+  __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, r4, r3);
+  __ Ret();
+
+  __ bind(&check_number_dictionary);
+  __ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  __ ldr(r3, FieldMemOperand(r4, JSObject::kMapOffset));
+
+  // Check whether the elements is a number dictionary.
+  // r3: elements map
+  // r4: elements
+  __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
+  __ cmp(r3, ip);
+  __ b(ne, &slow);
+  __ SmiUntag(r0, key);
+  __ LoadFromNumberDictionary(&slow, r4, key, r0, r0, r3, r5);
+  __ Ret();
+
+  // Slow case, key and receiver still in r2 and r1.
+  __ bind(&slow);
+  __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(), 1, r4,
+                      r3);
+  GenerateRuntimeGetProperty(masm);
+
+  __ bind(&check_name);
+  GenerateKeyNameCheck(masm, key, r0, r3, &index_name, &slow);
+
+  GenerateKeyedLoadReceiverCheck(masm, receiver, r0, r3,
+                                 Map::kHasNamedInterceptor, &slow);
+
+  // If the receiver is a fast-case object, check the keyed lookup
+  // cache. Otherwise probe the dictionary.
+  __ ldr(r3, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+  __ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
+  __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
+  __ cmp(r4, ip);
+  __ b(eq, &probe_dictionary);
+
+  // Load the map of the receiver, compute the keyed lookup cache hash
+  // based on 32 bits of the map pointer and the name hash.
+  __ ldr(r0, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ mov(r3, Operand(r0, ASR, KeyedLookupCache::kMapHashShift));
+  __ ldr(r4, FieldMemOperand(key, Name::kHashFieldOffset));
+  __ eor(r3, r3, Operand(r4, ASR, Name::kHashShift));
+  int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask;
+  __ And(r3, r3, Operand(mask));
+
+  // Load the key (consisting of map and unique name) from the cache and
+  // check for match.
+  Label load_in_object_property;
+  static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
+  Label hit_on_nth_entry[kEntriesPerBucket];
+  ExternalReference cache_keys =
+      ExternalReference::keyed_lookup_cache_keys(isolate);
+
+  __ mov(r4, Operand(cache_keys));
+  __ add(r4, r4, Operand(r3, LSL, kPointerSizeLog2 + 1));
+
+  for (int i = 0; i < kEntriesPerBucket - 1; i++) {
+    Label try_next_entry;
+    // Load map and move r4 to next entry.
+    __ ldr(r5, MemOperand(r4, kPointerSize * 2, PostIndex));
+    __ cmp(r0, r5);
+    __ b(ne, &try_next_entry);
+    __ ldr(r5, MemOperand(r4, -kPointerSize));  // Load name
+    __ cmp(key, r5);
+    __ b(eq, &hit_on_nth_entry[i]);
+    __ bind(&try_next_entry);
+  }
+
+  // Last entry: Load map and move r4 to name.
+  __ ldr(r5, MemOperand(r4, kPointerSize, PostIndex));
+  __ cmp(r0, r5);
+  __ b(ne, &slow);
+  __ ldr(r5, MemOperand(r4));
+  __ cmp(key, r5);
+  __ b(ne, &slow);
+
+  // Get field offset.
+  // r0     : receiver's map
+  // r3     : lookup cache index
+  ExternalReference cache_field_offsets =
+      ExternalReference::keyed_lookup_cache_field_offsets(isolate);
+
+  // Hit on nth entry.
+  for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
+    __ bind(&hit_on_nth_entry[i]);
+    __ mov(r4, Operand(cache_field_offsets));
+    if (i != 0) {
+      __ add(r3, r3, Operand(i));
+    }
+    __ ldr(r5, MemOperand(r4, r3, LSL, kPointerSizeLog2));
+    __ ldrb(r6, FieldMemOperand(r0, Map::kInObjectPropertiesOffset));
+    __ sub(r5, r5, r6, SetCC);
+    __ b(ge, &property_array_property);
+    if (i != 0) {
+      __ jmp(&load_in_object_property);
+    }
+  }
+
+  // Load in-object property.
+  __ bind(&load_in_object_property);
+  __ ldrb(r6, FieldMemOperand(r0, Map::kInstanceSizeOffset));
+  __ add(r6, r6, r5);  // Index from start of object.
+  __ sub(receiver, receiver, Operand(kHeapObjectTag));  // Remove the heap tag.
+  __ ldr(r0, MemOperand(receiver, r6, LSL, kPointerSizeLog2));
+  __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1,
+                      r4, r3);
+  __ Ret();
+
+  // Load property array property.
+  __ bind(&property_array_property);
+  __ ldr(receiver, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+  __ add(receiver, receiver, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ ldr(r0, MemOperand(receiver, r5, LSL, kPointerSizeLog2));
+  __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1,
+                      r4, r3);
+  __ Ret();
+
+  // Do a quick inline probe of the receiver's dictionary, if it
+  // exists.
+  __ bind(&probe_dictionary);
+  // r3: elements
+  __ ldr(r0, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
+  GenerateGlobalInstanceTypeCheck(masm, r0, &slow);
+  // Load the property to r0.
+  GenerateDictionaryLoad(masm, &slow, r3, key, r0, r5, r4);
+  __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(), 1, r4,
+                      r3);
+  __ Ret();
+
+  __ bind(&index_name);
+  __ IndexFromHash(r3, key);
+  // Now jump to the place where smi keys are handled.
+  __ jmp(&index_smi);
+}
+
+
+void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
+  // Return address is in lr.
+  Label miss;
+
+  Register receiver = LoadDescriptor::ReceiverRegister();
+  Register index = LoadDescriptor::NameRegister();
+  Register scratch = r3;
+  Register result = r0;
+  DCHECK(!scratch.is(receiver) && !scratch.is(index));
+
+  StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
+                                          &miss,  // When not a string.
+                                          &miss,  // When not a number.
+                                          &miss,  // When index out of range.
+                                          STRING_INDEX_IS_ARRAY_INDEX);
+  char_at_generator.GenerateFast(masm);
+  __ Ret();
+
+  StubRuntimeCallHelper call_helper;
+  char_at_generator.GenerateSlow(masm, call_helper);
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
+  // Push receiver, key and value for runtime call.
+  __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+          StoreDescriptor::ValueRegister());
+
+  ExternalReference ref =
+      ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
+  __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+static void KeyedStoreGenerateGenericHelper(
+    MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
+    KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
+    Register value, Register key, Register receiver, Register receiver_map,
+    Register elements_map, Register elements) {
+  Label transition_smi_elements;
+  Label finish_object_store, non_double_value, transition_double_elements;
+  Label fast_double_without_map_check;
+
+  // Fast case: Do the store, could be either Object or double.
+  __ bind(fast_object);
+  Register scratch_value = r4;
+  Register address = r5;
+  if (check_map == kCheckMap) {
+    __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
+    __ cmp(elements_map,
+           Operand(masm->isolate()->factory()->fixed_array_map()));
+    __ b(ne, fast_double);
+  }
+
+  // HOLECHECK: guards "A[i] = V"
+  // We have to go to the runtime if the current value is the hole because
+  // there may be a callback on the element
+  Label holecheck_passed1;
+  __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ ldr(scratch_value,
+         MemOperand::PointerAddressFromSmiKey(address, key, PreIndex));
+  __ cmp(scratch_value, Operand(masm->isolate()->factory()->the_hole_value()));
+  __ b(ne, &holecheck_passed1);
+  __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
+                                      slow);
+
+  __ bind(&holecheck_passed1);
+
+  // Smi stores don't require further checks.
+  Label non_smi_value;
+  __ JumpIfNotSmi(value, &non_smi_value);
+
+  if (increment_length == kIncrementLength) {
+    // Add 1 to receiver->length.
+    __ add(scratch_value, key, Operand(Smi::FromInt(1)));
+    __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
+  }
+  // It's irrelevant whether array is smi-only or not when writing a smi.
+  __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ str(value, MemOperand::PointerAddressFromSmiKey(address, key));
+  __ Ret();
+
+  __ bind(&non_smi_value);
+  // Escape to elements kind transition case.
+  __ CheckFastObjectElements(receiver_map, scratch_value,
+                             &transition_smi_elements);
+
+  // Fast elements array, store the value to the elements backing store.
+  __ bind(&finish_object_store);
+  if (increment_length == kIncrementLength) {
+    // Add 1 to receiver->length.
+    __ add(scratch_value, key, Operand(Smi::FromInt(1)));
+    __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
+  }
+  __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ add(address, address, Operand::PointerOffsetFromSmiKey(key));
+  __ str(value, MemOperand(address));
+  // Update write barrier for the elements array address.
+  __ mov(scratch_value, value);  // Preserve the value which is returned.
+  __ RecordWrite(elements, address, scratch_value, kLRHasNotBeenSaved,
+                 kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+  __ Ret();
+
+  __ bind(fast_double);
+  if (check_map == kCheckMap) {
+    // Check for fast double array case. If this fails, call through to the
+    // runtime.
+    __ CompareRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex);
+    __ b(ne, slow);
+  }
+
+  // HOLECHECK: guards "A[i] double hole?"
+  // We have to see if the double version of the hole is present. If so
+  // go to the runtime.
+  __ add(address, elements,
+         Operand((FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32)) -
+                 kHeapObjectTag));
+  __ ldr(scratch_value,
+         MemOperand(address, key, LSL, kPointerSizeLog2, PreIndex));
+  __ cmp(scratch_value, Operand(kHoleNanUpper32));
+  __ b(ne, &fast_double_without_map_check);
+  __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
+                                      slow);
+
+  __ bind(&fast_double_without_map_check);
+  __ StoreNumberToDoubleElements(value, key, elements, r3, d0,
+                                 &transition_double_elements);
+  if (increment_length == kIncrementLength) {
+    // Add 1 to receiver->length.
+    __ add(scratch_value, key, Operand(Smi::FromInt(1)));
+    __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
+  }
+  __ Ret();
+
+  __ bind(&transition_smi_elements);
+  // Transition the array appropriately depending on the value type.
+  __ ldr(r4, FieldMemOperand(value, HeapObject::kMapOffset));
+  __ CompareRoot(r4, Heap::kHeapNumberMapRootIndex);
+  __ b(ne, &non_double_value);
+
+  // Value is a double. Transition FAST_SMI_ELEMENTS ->
+  // FAST_DOUBLE_ELEMENTS and complete the store.
+  __ LoadTransitionedArrayMapConditional(
+      FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, r4, slow);
+  AllocationSiteMode mode =
+      AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
+  ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
+                                                   receiver_map, mode, slow);
+  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  __ jmp(&fast_double_without_map_check);
+
+  __ bind(&non_double_value);
+  // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
+  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
+                                         receiver_map, r4, slow);
+  mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
+  ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
+      masm, receiver, key, value, receiver_map, mode, slow);
+  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  __ jmp(&finish_object_store);
+
+  __ bind(&transition_double_elements);
+  // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
+  // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
+  // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
+  __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
+                                         receiver_map, r4, slow);
+  mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
+  ElementsTransitionGenerator::GenerateDoubleToObject(
+      masm, receiver, key, value, receiver_map, mode, slow);
+  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  __ jmp(&finish_object_store);
+}
+
+
+void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
+                                   StrictMode strict_mode) {
+  // ---------- S t a t e --------------
+  //  -- r0     : value
+  //  -- r1     : key
+  //  -- r2     : receiver
+  //  -- lr     : return address
+  // -----------------------------------
+  Label slow, fast_object, fast_object_grow;
+  Label fast_double, fast_double_grow;
+  Label array, extra, check_if_double_array;
+
+  // Register usage.
+  Register value = StoreDescriptor::ValueRegister();
+  Register key = StoreDescriptor::NameRegister();
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  DCHECK(receiver.is(r1));
+  DCHECK(key.is(r2));
+  DCHECK(value.is(r0));
+  Register receiver_map = r3;
+  Register elements_map = r6;
+  Register elements = r9;  // Elements array of the receiver.
+  // r4 and r5 are used as general scratch registers.
+
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(key, &slow);
+  // Check that the object isn't a smi.
+  __ JumpIfSmi(receiver, &slow);
+  // Get the map of the object.
+  __ ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  // Check that the receiver does not require access checks and is not observed.
+  // The generic stub does not perform map checks or handle observed objects.
+  __ ldrb(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
+  __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved));
+  __ b(ne, &slow);
+  // Check if the object is a JS array or not.
+  __ ldrb(r4, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
+  __ cmp(r4, Operand(JS_ARRAY_TYPE));
+  __ b(eq, &array);
+  // Check that the object is some kind of JSObject.
+  __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
+  __ b(lt, &slow);
+
+  // Object case: Check key against length in the elements array.
+  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  // Check array bounds. Both the key and the length of FixedArray are smis.
+  __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
+  __ cmp(key, Operand(ip));
+  __ b(lo, &fast_object);
+
+  // Slow case, handle jump to runtime.
+  __ bind(&slow);
+  // Entry registers are intact.
+  // r0: value.
+  // r1: key.
+  // r2: receiver.
+  PropertyICCompiler::GenerateRuntimeSetProperty(masm, strict_mode);
+
+  // Extra capacity case: Check if there is extra capacity to
+  // perform the store and update the length. Used for adding one
+  // element to the array by writing to array[array.length].
+  __ bind(&extra);
+  // Condition code from comparing key and array length is still available.
+  __ b(ne, &slow);  // Only support writing to writing to array[array.length].
+  // Check for room in the elements backing store.
+  // Both the key and the length of FixedArray are smis.
+  __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
+  __ cmp(key, Operand(ip));
+  __ b(hs, &slow);
+  __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
+  __ cmp(elements_map, Operand(masm->isolate()->factory()->fixed_array_map()));
+  __ b(ne, &check_if_double_array);
+  __ jmp(&fast_object_grow);
+
+  __ bind(&check_if_double_array);
+  __ cmp(elements_map,
+         Operand(masm->isolate()->factory()->fixed_double_array_map()));
+  __ b(ne, &slow);
+  __ jmp(&fast_double_grow);
+
+  // Array case: Get the length and the elements array from the JS
+  // array. Check that the array is in fast mode (and writable); if it
+  // is the length is always a smi.
+  __ bind(&array);
+  __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+
+  // Check the key against the length in the array.
+  __ ldr(ip, FieldMemOperand(receiver, JSArray::kLengthOffset));
+  __ cmp(key, Operand(ip));
+  __ b(hs, &extra);
+
+  KeyedStoreGenerateGenericHelper(
+      masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
+      value, key, receiver, receiver_map, elements_map, elements);
+  KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
+                                  &slow, kDontCheckMap, kIncrementLength, value,
+                                  key, receiver, receiver_map, elements_map,
+                                  elements);
+}
+
+
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  Register name = StoreDescriptor::NameRegister();
+  DCHECK(receiver.is(r1));
+  DCHECK(name.is(r2));
+  DCHECK(StoreDescriptor::ValueRegister().is(r0));
+
+  // Get the receiver from the stack and probe the stub cache.
+  Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
+      Code::ComputeHandlerFlags(Code::STORE_IC));
+
+  masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
+                                               name, r3, r4, r5, r6);
+
+  // Cache miss: Jump to runtime.
+  GenerateMiss(masm);
+}
+
+
+void StoreIC::GenerateMiss(MacroAssembler* masm) {
+  __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+          StoreDescriptor::ValueRegister());
+
+  // Perform tail call to the entry.
+  ExternalReference ref =
+      ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
+  __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void StoreIC::GenerateNormal(MacroAssembler* masm) {
+  Label miss;
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  Register name = StoreDescriptor::NameRegister();
+  Register value = StoreDescriptor::ValueRegister();
+  Register dictionary = r3;
+  DCHECK(receiver.is(r1));
+  DCHECK(name.is(r2));
+  DCHECK(value.is(r0));
+
+  __ ldr(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+
+  GenerateDictionaryStore(masm, &miss, dictionary, name, value, r4, r5);
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(counters->store_normal_hit(), 1, r4, r5);
+  __ Ret();
+
+  __ bind(&miss);
+  __ IncrementCounter(counters->store_normal_miss(), 1, r4, r5);
+  GenerateMiss(masm);
+}
+
+
+#undef __
+
+
+Condition CompareIC::ComputeCondition(Token::Value op) {
+  switch (op) {
+    case Token::EQ_STRICT:
+    case Token::EQ:
+      return eq;
+    case Token::LT:
+      return lt;
+    case Token::GT:
+      return gt;
+    case Token::LTE:
+      return le;
+    case Token::GTE:
+      return ge;
+    default:
+      UNREACHABLE();
+      return kNoCondition;
+  }
+}
+
+
+bool CompareIC::HasInlinedSmiCode(Address address) {
+  // The address of the instruction following the call.
+  Address cmp_instruction_address =
+      Assembler::return_address_from_call_start(address);
+
+  // If the instruction following the call is not a cmp rx, #yyy, nothing
+  // was inlined.
+  Instr instr = Assembler::instr_at(cmp_instruction_address);
+  return Assembler::IsCmpImmediate(instr);
+}
+
+
+void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
+  Address cmp_instruction_address =
+      Assembler::return_address_from_call_start(address);
+
+  // If the instruction following the call is not a cmp rx, #yyy, nothing
+  // was inlined.
+  Instr instr = Assembler::instr_at(cmp_instruction_address);
+  if (!Assembler::IsCmpImmediate(instr)) {
+    return;
+  }
+
+  // The delta to the start of the map check instruction and the
+  // condition code uses at the patched jump.
+  int delta = Assembler::GetCmpImmediateRawImmediate(instr);
+  delta += Assembler::GetCmpImmediateRegister(instr).code() * kOff12Mask;
+  // If the delta is 0 the instruction is cmp r0, #0 which also signals that
+  // nothing was inlined.
+  if (delta == 0) {
+    return;
+  }
+
+  if (FLAG_trace_ic) {
+    PrintF("[  patching ic at %p, cmp=%p, delta=%d\n", address,
+           cmp_instruction_address, delta);
+  }
+
+  Address patch_address =
+      cmp_instruction_address - delta * Instruction::kInstrSize;
+  Instr instr_at_patch = Assembler::instr_at(patch_address);
+  Instr branch_instr =
+      Assembler::instr_at(patch_address + Instruction::kInstrSize);
+  // This is patching a conditional "jump if not smi/jump if smi" site.
+  // Enabling by changing from
+  //   cmp rx, rx
+  //   b eq/ne, <target>
+  // to
+  //   tst rx, #kSmiTagMask
+  //   b ne/eq, <target>
+  // and vice-versa to be disabled again.
+  CodePatcher patcher(patch_address, 2);
+  Register reg = Assembler::GetRn(instr_at_patch);
+  if (check == ENABLE_INLINED_SMI_CHECK) {
+    DCHECK(Assembler::IsCmpRegister(instr_at_patch));
+    DCHECK_EQ(Assembler::GetRn(instr_at_patch).code(),
+              Assembler::GetRm(instr_at_patch).code());
+    patcher.masm()->tst(reg, Operand(kSmiTagMask));
+  } else {
+    DCHECK(check == DISABLE_INLINED_SMI_CHECK);
+    DCHECK(Assembler::IsTstImmediate(instr_at_patch));
+    patcher.masm()->cmp(reg, reg);
+  }
+  DCHECK(Assembler::IsBranch(branch_instr));
+  if (Assembler::GetCondition(branch_instr) == eq) {
+    patcher.EmitCondition(ne);
+  } else {
+    DCHECK(Assembler::GetCondition(branch_instr) == ne);
+    patcher.EmitCondition(eq);
+  }
+}
+}
+}  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_ARM
diff --git a/src/ic/arm/ic-compiler-arm.cc b/src/ic/arm/ic-compiler-arm.cc
new file mode 100644
index 0000000..7bef56e
--- /dev/null
+++ b/src/ic/arm/ic-compiler-arm.cc
@@ -0,0 +1,130 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_ARM
+
+#include "src/ic/ic.h"
+#include "src/ic/ic-compiler.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+void PropertyICCompiler::GenerateRuntimeSetProperty(MacroAssembler* masm,
+                                                    StrictMode strict_mode) {
+  __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+          StoreDescriptor::ValueRegister());
+
+  __ mov(r0, Operand(Smi::FromInt(strict_mode)));
+  __ Push(r0);
+
+  // Do tail-call to runtime routine.
+  __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
+                                                    CodeHandleList* handlers,
+                                                    Handle<Name> name,
+                                                    Code::StubType type,
+                                                    IcCheckType check) {
+  Label miss;
+
+  if (check == PROPERTY &&
+      (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
+    // In case we are compiling an IC for dictionary loads and stores, just
+    // check whether the name is unique.
+    if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
+      Register tmp = scratch1();
+      __ JumpIfSmi(this->name(), &miss);
+      __ ldr(tmp, FieldMemOperand(this->name(), HeapObject::kMapOffset));
+      __ ldrb(tmp, FieldMemOperand(tmp, Map::kInstanceTypeOffset));
+      __ JumpIfNotUniqueNameInstanceType(tmp, &miss);
+    } else {
+      __ cmp(this->name(), Operand(name));
+      __ b(ne, &miss);
+    }
+  }
+
+  Label number_case;
+  Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
+  __ JumpIfSmi(receiver(), smi_target);
+
+  // Polymorphic keyed stores may use the map register
+  Register map_reg = scratch1();
+  DCHECK(kind() != Code::KEYED_STORE_IC ||
+         map_reg.is(ElementTransitionAndStoreDescriptor::MapRegister()));
+
+  int receiver_count = types->length();
+  int number_of_handled_maps = 0;
+  __ ldr(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
+  for (int current = 0; current < receiver_count; ++current) {
+    Handle<HeapType> type = types->at(current);
+    Handle<Map> map = IC::TypeToMap(*type, isolate());
+    if (!map->is_deprecated()) {
+      number_of_handled_maps++;
+      __ mov(ip, Operand(map));
+      __ cmp(map_reg, ip);
+      if (type->Is(HeapType::Number())) {
+        DCHECK(!number_case.is_unused());
+        __ bind(&number_case);
+      }
+      __ Jump(handlers->at(current), RelocInfo::CODE_TARGET, eq);
+    }
+  }
+  DCHECK(number_of_handled_maps != 0);
+
+  __ bind(&miss);
+  TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+  // Return the generated code.
+  InlineCacheState state =
+      number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
+  return GetCode(kind(), type, name, state);
+}
+
+
+Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
+    MapHandleList* receiver_maps, CodeHandleList* handler_stubs,
+    MapHandleList* transitioned_maps) {
+  Label miss;
+  __ JumpIfSmi(receiver(), &miss);
+
+  int receiver_count = receiver_maps->length();
+  __ ldr(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset));
+  for (int i = 0; i < receiver_count; ++i) {
+    __ mov(ip, Operand(receiver_maps->at(i)));
+    __ cmp(scratch1(), ip);
+    if (transitioned_maps->at(i).is_null()) {
+      __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq);
+    } else {
+      Label next_map;
+      __ b(ne, &next_map);
+      __ mov(transition_map(), Operand(transitioned_maps->at(i)));
+      __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, al);
+      __ bind(&next_map);
+    }
+  }
+
+  __ bind(&miss);
+  TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+  // Return the generated code.
+  return GetCode(kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
+}
+
+
+#undef __
+}
+}  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_ARM
diff --git a/src/ic/arm/stub-cache-arm.cc b/src/ic/arm/stub-cache-arm.cc
new file mode 100644
index 0000000..bc8b0fb
--- /dev/null
+++ b/src/ic/arm/stub-cache-arm.cc
@@ -0,0 +1,175 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_ARM
+
+#include "src/codegen.h"
+#include "src/ic/stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
+                       Code::Flags flags, bool leave_frame,
+                       StubCache::Table table, Register receiver, Register name,
+                       // Number of the cache entry, not scaled.
+                       Register offset, Register scratch, Register scratch2,
+                       Register offset_scratch) {
+  ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
+  ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
+  ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
+
+  uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address());
+  uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address());
+  uint32_t map_off_addr = reinterpret_cast<uint32_t>(map_offset.address());
+
+  // Check the relative positions of the address fields.
+  DCHECK(value_off_addr > key_off_addr);
+  DCHECK((value_off_addr - key_off_addr) % 4 == 0);
+  DCHECK((value_off_addr - key_off_addr) < (256 * 4));
+  DCHECK(map_off_addr > key_off_addr);
+  DCHECK((map_off_addr - key_off_addr) % 4 == 0);
+  DCHECK((map_off_addr - key_off_addr) < (256 * 4));
+
+  Label miss;
+  Register base_addr = scratch;
+  scratch = no_reg;
+
+  // Multiply by 3 because there are 3 fields per entry (name, code, map).
+  __ add(offset_scratch, offset, Operand(offset, LSL, 1));
+
+  // Calculate the base address of the entry.
+  __ mov(base_addr, Operand(key_offset));
+  __ add(base_addr, base_addr, Operand(offset_scratch, LSL, kPointerSizeLog2));
+
+  // Check that the key in the entry matches the name.
+  __ ldr(ip, MemOperand(base_addr, 0));
+  __ cmp(name, ip);
+  __ b(ne, &miss);
+
+  // Check the map matches.
+  __ ldr(ip, MemOperand(base_addr, map_off_addr - key_off_addr));
+  __ ldr(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ cmp(ip, scratch2);
+  __ b(ne, &miss);
+
+  // Get the code entry from the cache.
+  Register code = scratch2;
+  scratch2 = no_reg;
+  __ ldr(code, MemOperand(base_addr, value_off_addr - key_off_addr));
+
+  // Check that the flags match what we're looking for.
+  Register flags_reg = base_addr;
+  base_addr = no_reg;
+  __ ldr(flags_reg, FieldMemOperand(code, Code::kFlagsOffset));
+  // It's a nice optimization if this constant is encodable in the bic insn.
+
+  uint32_t mask = Code::kFlagsNotUsedInLookup;
+  DCHECK(__ ImmediateFitsAddrMode1Instruction(mask));
+  __ bic(flags_reg, flags_reg, Operand(mask));
+  __ cmp(flags_reg, Operand(flags));
+  __ b(ne, &miss);
+
+#ifdef DEBUG
+  if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
+    __ jmp(&miss);
+  } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
+    __ jmp(&miss);
+  }
+#endif
+
+  if (leave_frame) __ LeaveFrame(StackFrame::INTERNAL);
+
+  // Jump to the first instruction in the code stub.
+  __ add(pc, code, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+  // Miss: fall through.
+  __ bind(&miss);
+}
+
+
+void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
+                              bool leave_frame, Register receiver,
+                              Register name, Register scratch, Register extra,
+                              Register extra2, Register extra3) {
+  Isolate* isolate = masm->isolate();
+  Label miss;
+
+  // Make sure that code is valid. The multiplying code relies on the
+  // entry size being 12.
+  DCHECK(sizeof(Entry) == 12);
+
+  // Make sure the flags does not name a specific type.
+  DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
+
+  // Make sure that there are no register conflicts.
+  DCHECK(!scratch.is(receiver));
+  DCHECK(!scratch.is(name));
+  DCHECK(!extra.is(receiver));
+  DCHECK(!extra.is(name));
+  DCHECK(!extra.is(scratch));
+  DCHECK(!extra2.is(receiver));
+  DCHECK(!extra2.is(name));
+  DCHECK(!extra2.is(scratch));
+  DCHECK(!extra2.is(extra));
+
+  // Check scratch, extra and extra2 registers are valid.
+  DCHECK(!scratch.is(no_reg));
+  DCHECK(!extra.is(no_reg));
+  DCHECK(!extra2.is(no_reg));
+  DCHECK(!extra3.is(no_reg));
+
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2,
+                      extra3);
+
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(receiver, &miss);
+
+  // Get the map of the receiver and compute the hash.
+  __ ldr(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
+  __ ldr(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ add(scratch, scratch, Operand(ip));
+  uint32_t mask = kPrimaryTableSize - 1;
+  // We shift out the last two bits because they are not part of the hash and
+  // they are always 01 for maps.
+  __ mov(scratch, Operand(scratch, LSR, kCacheIndexShift));
+  // Mask down the eor argument to the minimum to keep the immediate
+  // ARM-encodable.
+  __ eor(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask));
+  // Prefer and_ to ubfx here because ubfx takes 2 cycles.
+  __ and_(scratch, scratch, Operand(mask));
+
+  // Probe the primary table.
+  ProbeTable(isolate, masm, flags, leave_frame, kPrimary, receiver, name,
+             scratch, extra, extra2, extra3);
+
+  // Primary miss: Compute hash for secondary probe.
+  __ sub(scratch, scratch, Operand(name, LSR, kCacheIndexShift));
+  uint32_t mask2 = kSecondaryTableSize - 1;
+  __ add(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask2));
+  __ and_(scratch, scratch, Operand(mask2));
+
+  // Probe the secondary table.
+  ProbeTable(isolate, masm, flags, leave_frame, kSecondary, receiver, name,
+             scratch, extra, extra2, extra3);
+
+  // Cache miss: Fall-through and let caller handle the miss by
+  // entering the runtime system.
+  __ bind(&miss);
+  __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1, extra2,
+                      extra3);
+}
+
+
+#undef __
+}
+}  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_ARM
diff --git a/src/ic/arm64/access-compiler-arm64.cc b/src/ic/arm64/access-compiler-arm64.cc
new file mode 100644
index 0000000..58e6099
--- /dev/null
+++ b/src/ic/arm64/access-compiler-arm64.cc
@@ -0,0 +1,53 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "src/ic/access-compiler.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
+                                              Handle<Code> code) {
+  __ Jump(code, RelocInfo::CODE_TARGET);
+}
+
+
+// TODO(all): The so-called scratch registers are significant in some cases. For
+// example, PropertyAccessCompiler::keyed_store_calling_convention()[3] (x3) is
+// actually
+// used for KeyedStoreCompiler::transition_map(). We should verify which
+// registers are actually scratch registers, and which are important. For now,
+// we use the same assignments as ARM to remain on the safe side.
+
+Register* PropertyAccessCompiler::load_calling_convention() {
+  // receiver, name, scratch1, scratch2, scratch3, scratch4.
+  Register receiver = LoadDescriptor::ReceiverRegister();
+  Register name = LoadDescriptor::NameRegister();
+  static Register registers[] = {receiver, name, x3, x0, x4, x5};
+  return registers;
+}
+
+
+Register* PropertyAccessCompiler::store_calling_convention() {
+  // receiver, value, scratch1, scratch2, scratch3.
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  Register name = StoreDescriptor::NameRegister();
+  DCHECK(x3.is(ElementTransitionAndStoreDescriptor::MapRegister()));
+  static Register registers[] = {receiver, name, x3, x4, x5};
+  return registers;
+}
+
+
+#undef __
+}
+}  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_ARM64
diff --git a/src/ic/arm64/handler-compiler-arm64.cc b/src/ic/arm64/handler-compiler-arm64.cc
new file mode 100644
index 0000000..f7f82bc
--- /dev/null
+++ b/src/ic/arm64/handler-compiler-arm64.cc
@@ -0,0 +1,847 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "src/ic/call-optimization.h"
+#include "src/ic/handler-compiler.h"
+#include "src/ic/ic.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
+    MacroAssembler* masm, Label* miss_label, Register receiver,
+    Handle<Name> name, Register scratch0, Register scratch1) {
+  DCHECK(!AreAliased(receiver, scratch0, scratch1));
+  DCHECK(name->IsUniqueName());
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
+  __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
+
+  Label done;
+
+  const int kInterceptorOrAccessCheckNeededMask =
+      (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
+
+  // Bail out if the receiver has a named interceptor or requires access checks.
+  Register map = scratch1;
+  __ Ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ Ldrb(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
+  __ Tst(scratch0, kInterceptorOrAccessCheckNeededMask);
+  __ B(ne, miss_label);
+
+  // Check that receiver is a JSObject.
+  __ Ldrb(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
+  __ Cmp(scratch0, FIRST_SPEC_OBJECT_TYPE);
+  __ B(lt, miss_label);
+
+  // Load properties array.
+  Register properties = scratch0;
+  __ Ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+  // Check that the properties array is a dictionary.
+  __ Ldr(map, FieldMemOperand(properties, HeapObject::kMapOffset));
+  __ JumpIfNotRoot(map, Heap::kHashTableMapRootIndex, miss_label);
+
+  NameDictionaryLookupStub::GenerateNegativeLookup(
+      masm, miss_label, &done, receiver, properties, name, scratch1);
+  __ Bind(&done);
+  __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
+}
+
+
+void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
+    MacroAssembler* masm, int index, Register prototype, Label* miss) {
+  Isolate* isolate = masm->isolate();
+  // Get the global function with the given index.
+  Handle<JSFunction> function(
+      JSFunction::cast(isolate->native_context()->get(index)));
+
+  // Check we're still in the same context.
+  Register scratch = prototype;
+  __ Ldr(scratch, GlobalObjectMemOperand());
+  __ Ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
+  __ Ldr(scratch, ContextMemOperand(scratch, index));
+  __ Cmp(scratch, Operand(function));
+  __ B(ne, miss);
+
+  // Load its initial map. The global functions all have initial maps.
+  __ Mov(prototype, Operand(Handle<Map>(function->initial_map())));
+  // Load the prototype from the initial map.
+  __ Ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
+    MacroAssembler* masm, Register receiver, Register scratch1,
+    Register scratch2, Label* miss_label) {
+  __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
+  // TryGetFunctionPrototype can't put the result directly in x0 because the
+  // 3 inputs registers can't alias and we call this function from
+  // LoadIC::GenerateFunctionPrototype, where receiver is x0. So we explicitly
+  // move the result in x0.
+  __ Mov(x0, scratch1);
+  __ Ret();
+}
+
+
+// Generate code to check that a global property cell is empty. Create
+// the property cell at compilation time if no cell exists for the
+// property.
+void PropertyHandlerCompiler::GenerateCheckPropertyCell(
+    MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
+    Register scratch, Label* miss) {
+  Handle<Cell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
+  DCHECK(cell->value()->IsTheHole());
+  __ Mov(scratch, Operand(cell));
+  __ Ldr(scratch, FieldMemOperand(scratch, Cell::kValueOffset));
+  __ JumpIfNotRoot(scratch, Heap::kTheHoleValueRootIndex, miss);
+}
+
+
+static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
+                                     Register holder, Register name,
+                                     Handle<JSObject> holder_obj) {
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsInfoIndex == 1);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 2);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 3);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 4);
+
+  __ Push(name);
+  Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
+  DCHECK(!masm->isolate()->heap()->InNewSpace(*interceptor));
+  Register scratch = name;
+  __ Mov(scratch, Operand(interceptor));
+  __ Push(scratch, receiver, holder);
+}
+
+
+static void CompileCallLoadPropertyWithInterceptor(
+    MacroAssembler* masm, Register receiver, Register holder, Register name,
+    Handle<JSObject> holder_obj, IC::UtilityId id) {
+  PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
+
+  __ CallExternalReference(ExternalReference(IC_Utility(id), masm->isolate()),
+                           NamedLoadHandlerCompiler::kInterceptorArgsLength);
+}
+
+
+// Generate call to api function.
+void PropertyHandlerCompiler::GenerateFastApiCall(
+    MacroAssembler* masm, const CallOptimization& optimization,
+    Handle<Map> receiver_map, Register receiver, Register scratch,
+    bool is_store, int argc, Register* values) {
+  DCHECK(!AreAliased(receiver, scratch));
+
+  MacroAssembler::PushPopQueue queue(masm);
+  queue.Queue(receiver);
+  // Write the arguments to the stack frame.
+  for (int i = 0; i < argc; i++) {
+    Register arg = values[argc - 1 - i];
+    DCHECK(!AreAliased(receiver, scratch, arg));
+    queue.Queue(arg);
+  }
+  queue.PushQueued();
+
+  DCHECK(optimization.is_simple_api_call());
+
+  // Abi for CallApiFunctionStub.
+  Register callee = x0;
+  Register call_data = x4;
+  Register holder = x2;
+  Register api_function_address = x1;
+
+  // Put holder in place.
+  CallOptimization::HolderLookup holder_lookup;
+  Handle<JSObject> api_holder =
+      optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
+  switch (holder_lookup) {
+    case CallOptimization::kHolderIsReceiver:
+      __ Mov(holder, receiver);
+      break;
+    case CallOptimization::kHolderFound:
+      __ LoadObject(holder, api_holder);
+      break;
+    case CallOptimization::kHolderNotFound:
+      UNREACHABLE();
+      break;
+  }
+
+  Isolate* isolate = masm->isolate();
+  Handle<JSFunction> function = optimization.constant_function();
+  Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
+  Handle<Object> call_data_obj(api_call_info->data(), isolate);
+
+  // Put callee in place.
+  __ LoadObject(callee, function);
+
+  bool call_data_undefined = false;
+  // Put call_data in place.
+  if (isolate->heap()->InNewSpace(*call_data_obj)) {
+    __ LoadObject(call_data, api_call_info);
+    __ Ldr(call_data, FieldMemOperand(call_data, CallHandlerInfo::kDataOffset));
+  } else if (call_data_obj->IsUndefined()) {
+    call_data_undefined = true;
+    __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
+  } else {
+    __ LoadObject(call_data, call_data_obj);
+  }
+
+  // Put api_function_address in place.
+  Address function_address = v8::ToCData<Address>(api_call_info->callback());
+  ApiFunction fun(function_address);
+  ExternalReference ref = ExternalReference(
+      &fun, ExternalReference::DIRECT_API_CALL, masm->isolate());
+  __ Mov(api_function_address, ref);
+
+  // Jump to stub.
+  CallApiFunctionStub stub(isolate, is_store, call_data_undefined, argc);
+  __ TailCallStub(&stub);
+}
+
+
+void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
+    MacroAssembler* masm, Handle<HeapType> type, Register receiver,
+    Handle<JSFunction> setter) {
+  // ----------- S t a t e -------------
+  //  -- lr    : return address
+  // -----------------------------------
+  Label miss;
+
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+
+    // Save value register, so we can restore it later.
+    __ Push(value());
+
+    if (!setter.is_null()) {
+      // Call the JavaScript setter with receiver and value on the stack.
+      if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+        // Swap in the global receiver.
+        __ Ldr(receiver,
+               FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+      }
+      __ Push(receiver, value());
+      ParameterCount actual(1);
+      ParameterCount expected(setter);
+      __ InvokeFunction(setter, expected, actual, CALL_FUNCTION,
+                        NullCallWrapper());
+    } else {
+      // If we generate a global code snippet for deoptimization only, remember
+      // the place to continue after deoptimization.
+      masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
+    }
+
+    // We have to return the passed value, not the return value of the setter.
+    __ Pop(x0);
+
+    // Restore context register.
+    __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  }
+  __ Ret();
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
+    MacroAssembler* masm, Handle<HeapType> type, Register receiver,
+    Handle<JSFunction> getter) {
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+
+    if (!getter.is_null()) {
+      // Call the JavaScript getter with the receiver on the stack.
+      if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+        // Swap in the global receiver.
+        __ Ldr(receiver,
+               FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+      }
+      __ Push(receiver);
+      ParameterCount actual(0);
+      ParameterCount expected(getter);
+      __ InvokeFunction(getter, expected, actual, CALL_FUNCTION,
+                        NullCallWrapper());
+    } else {
+      // If we generate a global code snippet for deoptimization only, remember
+      // the place to continue after deoptimization.
+      masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
+    }
+
+    // Restore context register.
+    __ Ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  }
+  __ Ret();
+}
+
+
+void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
+  // Push receiver, name and value for runtime call.
+  __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+          StoreDescriptor::ValueRegister());
+
+  // The slow case calls into the runtime to complete the store without causing
+  // an IC miss that would otherwise cause a transition to the generic stub.
+  ExternalReference ref =
+      ExternalReference(IC_Utility(IC::kStoreIC_Slow), masm->isolate());
+  __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
+  ASM_LOCATION("ElementHandlerCompiler::GenerateStoreSlow");
+
+  // Push receiver, key and value for runtime call.
+  __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+          StoreDescriptor::ValueRegister());
+
+  // The slow case calls into the runtime to complete the store without causing
+  // an IC miss that would otherwise cause a transition to the generic stub.
+  ExternalReference ref =
+      ExternalReference(IC_Utility(IC::kKeyedStoreIC_Slow), masm->isolate());
+  __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
+    Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) {
+  Label miss;
+  FrontendHeader(receiver(), name, &miss);
+
+  // Get the value from the cell.
+  Register result = StoreDescriptor::ValueRegister();
+  __ Mov(result, Operand(cell));
+  __ Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
+
+  // Check for deleted property if property can actually be deleted.
+  if (is_configurable) {
+    __ JumpIfRoot(result, Heap::kTheHoleValueRootIndex, &miss);
+  }
+
+  Counters* counters = isolate()->counters();
+  __ IncrementCounter(counters->named_load_global_stub(), 1, x1, x3);
+  __ Ret();
+
+  FrontendFooter(name, &miss);
+
+  // Return the generated code.
+  return GetCode(kind(), Code::NORMAL, name);
+}
+
+
+Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
+    Handle<Name> name) {
+  Label miss;
+
+  ASM_LOCATION("NamedStoreHandlerCompiler::CompileStoreInterceptor");
+
+  __ Push(receiver(), this->name(), value());
+
+  // Do tail-call to the runtime system.
+  ExternalReference store_ic_property = ExternalReference(
+      IC_Utility(IC::kStorePropertyWithInterceptor), isolate());
+  __ TailCallExternalReference(store_ic_property, 3, 1);
+
+  // Return the generated code.
+  return GetCode(kind(), Code::FAST, name);
+}
+
+
+Register NamedStoreHandlerCompiler::value() {
+  return StoreDescriptor::ValueRegister();
+}
+
+
+void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
+                                                    Handle<Name> name) {
+  if (!label->is_unused()) {
+    __ Bind(label);
+    __ Mov(this->name(), Operand(name));
+  }
+}
+
+
+// Generate StoreTransition code, value is passed in x0 register.
+// When leaving generated code after success, the receiver_reg and storage_reg
+// may be clobbered. Upon branch to miss_label, the receiver and name registers
+// have their original values.
+void NamedStoreHandlerCompiler::GenerateStoreTransition(
+    Handle<Map> transition, Handle<Name> name, Register receiver_reg,
+    Register storage_reg, Register value_reg, Register scratch1,
+    Register scratch2, Register scratch3, Label* miss_label, Label* slow) {
+  Label exit;
+
+  DCHECK(!AreAliased(receiver_reg, storage_reg, value_reg, scratch1, scratch2,
+                     scratch3));
+
+  // We don't need scratch3.
+  scratch3 = NoReg;
+
+  int descriptor = transition->LastAdded();
+  DescriptorArray* descriptors = transition->instance_descriptors();
+  PropertyDetails details = descriptors->GetDetails(descriptor);
+  Representation representation = details.representation();
+  DCHECK(!representation.IsNone());
+
+  if (details.type() == CONSTANT) {
+    Handle<Object> constant(descriptors->GetValue(descriptor), isolate());
+    __ LoadObject(scratch1, constant);
+    __ Cmp(value_reg, scratch1);
+    __ B(ne, miss_label);
+  } else if (representation.IsSmi()) {
+    __ JumpIfNotSmi(value_reg, miss_label);
+  } else if (representation.IsHeapObject()) {
+    __ JumpIfSmi(value_reg, miss_label);
+    HeapType* field_type = descriptors->GetFieldType(descriptor);
+    HeapType::Iterator<Map> it = field_type->Classes();
+    if (!it.Done()) {
+      __ Ldr(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset));
+      Label do_store;
+      while (true) {
+        __ CompareMap(scratch1, it.Current());
+        it.Advance();
+        if (it.Done()) {
+          __ B(ne, miss_label);
+          break;
+        }
+        __ B(eq, &do_store);
+      }
+      __ Bind(&do_store);
+    }
+  } else if (representation.IsDouble()) {
+    UseScratchRegisterScope temps(masm());
+    DoubleRegister temp_double = temps.AcquireD();
+    __ SmiUntagToDouble(temp_double, value_reg, kSpeculativeUntag);
+
+    Label do_store;
+    __ JumpIfSmi(value_reg, &do_store);
+
+    __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex, miss_label,
+                DONT_DO_SMI_CHECK);
+    __ Ldr(temp_double, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
+
+    __ Bind(&do_store);
+    __ AllocateHeapNumber(storage_reg, slow, scratch1, scratch2, temp_double,
+                          NoReg, MUTABLE);
+  }
+
+  // Stub never generated for objects that require access checks.
+  DCHECK(!transition->is_access_check_needed());
+
+  // Perform map transition for the receiver if necessary.
+  if (details.type() == FIELD &&
+      Map::cast(transition->GetBackPointer())->unused_property_fields() == 0) {
+    // The properties must be extended before we can store the value.
+    // We jump to a runtime call that extends the properties array.
+    __ Mov(scratch1, Operand(transition));
+    __ Push(receiver_reg, scratch1, value_reg);
+    __ TailCallExternalReference(
+        ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
+                          isolate()),
+        3, 1);
+    return;
+  }
+
+  // Update the map of the object.
+  __ Mov(scratch1, Operand(transition));
+  __ Str(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
+
+  // Update the write barrier for the map field.
+  __ RecordWriteField(receiver_reg, HeapObject::kMapOffset, scratch1, scratch2,
+                      kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+
+  if (details.type() == CONSTANT) {
+    DCHECK(value_reg.is(x0));
+    __ Ret();
+    return;
+  }
+
+  int index = transition->instance_descriptors()->GetFieldIndex(
+      transition->LastAdded());
+
+  // Adjust for the number of properties stored in the object. Even in the
+  // face of a transition we can use the old map here because the size of the
+  // object and the number of in-object properties is not going to change.
+  index -= transition->inobject_properties();
+
+  // TODO(verwaest): Share this code as a code stub.
+  SmiCheck smi_check =
+      representation.IsTagged() ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
+  Register prop_reg = representation.IsDouble() ? storage_reg : value_reg;
+  if (index < 0) {
+    // Set the property straight into the object.
+    int offset = transition->instance_size() + (index * kPointerSize);
+    __ Str(prop_reg, FieldMemOperand(receiver_reg, offset));
+
+    if (!representation.IsSmi()) {
+      // Update the write barrier for the array address.
+      if (!representation.IsDouble()) {
+        __ Mov(storage_reg, value_reg);
+      }
+      __ RecordWriteField(receiver_reg, offset, storage_reg, scratch1,
+                          kLRHasNotBeenSaved, kDontSaveFPRegs,
+                          EMIT_REMEMBERED_SET, smi_check);
+    }
+  } else {
+    // Write to the properties array.
+    int offset = index * kPointerSize + FixedArray::kHeaderSize;
+    // Get the properties array
+    __ Ldr(scratch1,
+           FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
+    __ Str(prop_reg, FieldMemOperand(scratch1, offset));
+
+    if (!representation.IsSmi()) {
+      // Update the write barrier for the array address.
+      if (!representation.IsDouble()) {
+        __ Mov(storage_reg, value_reg);
+      }
+      __ RecordWriteField(scratch1, offset, storage_reg, receiver_reg,
+                          kLRHasNotBeenSaved, kDontSaveFPRegs,
+                          EMIT_REMEMBERED_SET, smi_check);
+    }
+  }
+
+  __ Bind(&exit);
+  // Return the value (register x0).
+  DCHECK(value_reg.is(x0));
+  __ Ret();
+}
+
+
+void NamedStoreHandlerCompiler::GenerateStoreField(LookupIterator* lookup,
+                                                   Register value_reg,
+                                                   Label* miss_label) {
+  DCHECK(lookup->representation().IsHeapObject());
+  __ JumpIfSmi(value_reg, miss_label);
+  HeapType::Iterator<Map> it = lookup->GetFieldType()->Classes();
+  __ Ldr(scratch1(), FieldMemOperand(value_reg, HeapObject::kMapOffset));
+  Label do_store;
+  while (true) {
+    __ CompareMap(scratch1(), it.Current());
+    it.Advance();
+    if (it.Done()) {
+      __ B(ne, miss_label);
+      break;
+    }
+    __ B(eq, &do_store);
+  }
+  __ Bind(&do_store);
+
+  StoreFieldStub stub(isolate(), lookup->GetFieldIndex(),
+                      lookup->representation());
+  GenerateTailCall(masm(), stub.GetCode());
+}
+
+
+Register PropertyHandlerCompiler::CheckPrototypes(
+    Register object_reg, Register holder_reg, Register scratch1,
+    Register scratch2, Handle<Name> name, Label* miss,
+    PrototypeCheckType check) {
+  Handle<Map> receiver_map(IC::TypeToMap(*type(), isolate()));
+
+  // object_reg and holder_reg registers can alias.
+  DCHECK(!AreAliased(object_reg, scratch1, scratch2));
+  DCHECK(!AreAliased(holder_reg, scratch1, scratch2));
+
+  // Keep track of the current object in register reg.
+  Register reg = object_reg;
+  int depth = 0;
+
+  Handle<JSObject> current = Handle<JSObject>::null();
+  if (type()->IsConstant()) {
+    current = Handle<JSObject>::cast(type()->AsConstant()->Value());
+  }
+  Handle<JSObject> prototype = Handle<JSObject>::null();
+  Handle<Map> current_map = receiver_map;
+  Handle<Map> holder_map(holder()->map());
+  // Traverse the prototype chain and check the maps in the prototype chain for
+  // fast and global objects or do negative lookup for normal objects.
+  while (!current_map.is_identical_to(holder_map)) {
+    ++depth;
+
+    // Only global objects and objects that do not require access
+    // checks are allowed in stubs.
+    DCHECK(current_map->IsJSGlobalProxyMap() ||
+           !current_map->is_access_check_needed());
+
+    prototype = handle(JSObject::cast(current_map->prototype()));
+    if (current_map->is_dictionary_map() &&
+        !current_map->IsJSGlobalObjectMap()) {
+      DCHECK(!current_map->IsJSGlobalProxyMap());  // Proxy maps are fast.
+      if (!name->IsUniqueName()) {
+        DCHECK(name->IsString());
+        name = factory()->InternalizeString(Handle<String>::cast(name));
+      }
+      DCHECK(current.is_null() || (current->property_dictionary()->FindEntry(
+                                       name) == NameDictionary::kNotFound));
+
+      GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
+                                       scratch2);
+
+      __ Ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+      reg = holder_reg;  // From now on the object will be in holder_reg.
+      __ Ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
+    } else {
+      // Two possible reasons for loading the prototype from the map:
+      // (1) Can't store references to new space in code.
+      // (2) Handler is shared for all receivers with the same prototype
+      //     map (but not necessarily the same prototype instance).
+      bool load_prototype_from_map =
+          heap()->InNewSpace(*prototype) || depth == 1;
+      Register map_reg = scratch1;
+      __ Ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
+
+      if (depth != 1 || check == CHECK_ALL_MAPS) {
+        __ CheckMap(map_reg, current_map, miss, DONT_DO_SMI_CHECK);
+      }
+
+      // Check access rights to the global object.  This has to happen after
+      // the map check so that we know that the object is actually a global
+      // object.
+      // This allows us to install generated handlers for accesses to the
+      // global proxy (as opposed to using slow ICs). See corresponding code
+      // in LookupForRead().
+      if (current_map->IsJSGlobalProxyMap()) {
+        UseScratchRegisterScope temps(masm());
+        __ CheckAccessGlobalProxy(reg, scratch2, temps.AcquireX(), miss);
+      } else if (current_map->IsJSGlobalObjectMap()) {
+        GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
+                                  name, scratch2, miss);
+      }
+
+      reg = holder_reg;  // From now on the object will be in holder_reg.
+
+      if (load_prototype_from_map) {
+        __ Ldr(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
+      } else {
+        __ Mov(reg, Operand(prototype));
+      }
+    }
+
+    // Go to the next object in the prototype chain.
+    current = prototype;
+    current_map = handle(current->map());
+  }
+
+  // Log the check depth.
+  LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
+
+  // Check the holder map.
+  if (depth != 0 || check == CHECK_ALL_MAPS) {
+    // Check the holder map.
+    __ CheckMap(reg, scratch1, current_map, miss, DONT_DO_SMI_CHECK);
+  }
+
+  // Perform security check for access to the global object.
+  DCHECK(current_map->IsJSGlobalProxyMap() ||
+         !current_map->is_access_check_needed());
+  if (current_map->IsJSGlobalProxyMap()) {
+    __ CheckAccessGlobalProxy(reg, scratch1, scratch2, miss);
+  }
+
+  // Return the register containing the holder.
+  return reg;
+}
+
+
+void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
+  if (!miss->is_unused()) {
+    Label success;
+    __ B(&success);
+
+    __ Bind(miss);
+    TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+    __ Bind(&success);
+  }
+}
+
+
+void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
+  if (!miss->is_unused()) {
+    Label success;
+    __ B(&success);
+
+    GenerateRestoreName(miss, name);
+    TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+    __ Bind(&success);
+  }
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
+  // Return the constant value.
+  __ LoadObject(x0, value);
+  __ Ret();
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadCallback(
+    Register reg, Handle<ExecutableAccessorInfo> callback) {
+  DCHECK(!AreAliased(scratch2(), scratch3(), scratch4(), reg));
+
+  // Build ExecutableAccessorInfo::args_ list on the stack and push property
+  // name below the exit frame to make GC aware of them and store pointers to
+  // them.
+  STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
+  STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
+  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
+  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
+  STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
+  STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
+  STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6);
+
+  __ Push(receiver());
+
+  if (heap()->InNewSpace(callback->data())) {
+    __ Mov(scratch3(), Operand(callback));
+    __ Ldr(scratch3(),
+           FieldMemOperand(scratch3(), ExecutableAccessorInfo::kDataOffset));
+  } else {
+    __ Mov(scratch3(), Operand(Handle<Object>(callback->data(), isolate())));
+  }
+  __ LoadRoot(scratch4(), Heap::kUndefinedValueRootIndex);
+  __ Mov(scratch2(), Operand(ExternalReference::isolate_address(isolate())));
+  __ Push(scratch3(), scratch4(), scratch4(), scratch2(), reg, name());
+
+  Register args_addr = scratch2();
+  __ Add(args_addr, __ StackPointer(), kPointerSize);
+
+  // Stack at this point:
+  //              sp[40] callback data
+  //              sp[32] undefined
+  //              sp[24] undefined
+  //              sp[16] isolate
+  // args_addr -> sp[8]  reg
+  //              sp[0]  name
+
+  // Abi for CallApiGetter.
+  Register getter_address_reg = x2;
+
+  // Set up the call.
+  Address getter_address = v8::ToCData<Address>(callback->getter());
+  ApiFunction fun(getter_address);
+  ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL;
+  ExternalReference ref = ExternalReference(&fun, type, isolate());
+  __ Mov(getter_address_reg, ref);
+
+  CallApiGetterStub stub(isolate());
+  __ TailCallStub(&stub);
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
+    LookupIterator* it, Register holder_reg) {
+  DCHECK(!AreAliased(receiver(), this->name(), scratch1(), scratch2(),
+                     scratch3()));
+  DCHECK(holder()->HasNamedInterceptor());
+  DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+
+  // Compile the interceptor call, followed by inline code to load the
+  // property from further up the prototype chain if the call fails.
+  // Check that the maps haven't changed.
+  DCHECK(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
+
+  // Preserve the receiver register explicitly whenever it is different from the
+  // holder and it is needed should the interceptor return without any result.
+  // The ACCESSOR case needs the receiver to be passed into C++ code, the FIELD
+  // case might cause a miss during the prototype check.
+  bool must_perform_prototype_check =
+      !holder().is_identical_to(it->GetHolder<JSObject>());
+  bool must_preserve_receiver_reg =
+      !receiver().is(holder_reg) &&
+      (it->state() == LookupIterator::ACCESSOR || must_perform_prototype_check);
+
+  // Save necessary data before invoking an interceptor.
+  // Requires a frame to make GC aware of pushed pointers.
+  {
+    FrameScope frame_scope(masm(), StackFrame::INTERNAL);
+    if (must_preserve_receiver_reg) {
+      __ Push(receiver(), holder_reg, this->name());
+    } else {
+      __ Push(holder_reg, this->name());
+    }
+    // Invoke an interceptor.  Note: map checks from receiver to
+    // interceptor's holder has been compiled before (see a caller
+    // of this method.)
+    CompileCallLoadPropertyWithInterceptor(
+        masm(), receiver(), holder_reg, this->name(), holder(),
+        IC::kLoadPropertyWithInterceptorOnly);
+
+    // Check if interceptor provided a value for property.  If it's
+    // the case, return immediately.
+    Label interceptor_failed;
+    __ JumpIfRoot(x0, Heap::kNoInterceptorResultSentinelRootIndex,
+                  &interceptor_failed);
+    frame_scope.GenerateLeaveFrame();
+    __ Ret();
+
+    __ Bind(&interceptor_failed);
+    if (must_preserve_receiver_reg) {
+      __ Pop(this->name(), holder_reg, receiver());
+    } else {
+      __ Pop(this->name(), holder_reg);
+    }
+    // Leave the internal frame.
+  }
+
+  GenerateLoadPostInterceptor(it, holder_reg);
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
+  // Call the runtime system to load the interceptor.
+  DCHECK(holder()->HasNamedInterceptor());
+  DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+  PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
+                           holder());
+
+  ExternalReference ref = ExternalReference(
+      IC_Utility(IC::kLoadPropertyWithInterceptor), isolate());
+  __ TailCallExternalReference(
+      ref, NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
+}
+
+
+Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
+    Handle<JSObject> object, Handle<Name> name,
+    Handle<ExecutableAccessorInfo> callback) {
+  ASM_LOCATION("NamedStoreHandlerCompiler::CompileStoreCallback");
+  Register holder_reg = Frontend(receiver(), name);
+
+  // Stub never generated for non-global objects that require access checks.
+  DCHECK(holder()->IsJSGlobalProxy() || !holder()->IsAccessCheckNeeded());
+
+  // receiver() and holder_reg can alias.
+  DCHECK(!AreAliased(receiver(), scratch1(), scratch2(), value()));
+  DCHECK(!AreAliased(holder_reg, scratch1(), scratch2(), value()));
+  __ Mov(scratch1(), Operand(callback));
+  __ Mov(scratch2(), Operand(name));
+  __ Push(receiver(), holder_reg, scratch1(), scratch2(), value());
+
+  // Do tail-call to the runtime system.
+  ExternalReference store_callback_property =
+      ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
+  __ TailCallExternalReference(store_callback_property, 5, 1);
+
+  // Return the generated code.
+  return GetCode(kind(), Code::FAST, name);
+}
+
+
+#undef __
+}
+}  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ic/arm64/ic-arm64.cc b/src/ic/arm64/ic-arm64.cc
new file mode 100644
index 0000000..76f9c24
--- /dev/null
+++ b/src/ic/arm64/ic-arm64.cc
@@ -0,0 +1,1039 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "src/codegen.h"
+#include "src/ic/ic.h"
+#include "src/ic/ic-compiler.h"
+#include "src/ic/stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+
+#define __ ACCESS_MASM(masm)
+
+
+// "type" holds an instance type on entry and is not clobbered.
+// Generated code branch on "global_object" if type is any kind of global
+// JS object.
+static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
+                                            Label* global_object) {
+  __ Cmp(type, JS_GLOBAL_OBJECT_TYPE);
+  __ Ccmp(type, JS_BUILTINS_OBJECT_TYPE, ZFlag, ne);
+  __ Ccmp(type, JS_GLOBAL_PROXY_TYPE, ZFlag, ne);
+  __ B(eq, global_object);
+}
+
+
+// Helper function used from LoadIC GenerateNormal.
+//
+// elements: Property dictionary. It is not clobbered if a jump to the miss
+//           label is done.
+// name:     Property name. It is not clobbered if a jump to the miss label is
+//           done
+// result:   Register for the result. It is only updated if a jump to the miss
+//           label is not done.
+// The scratch registers need to be different from elements, name and result.
+// The generated code assumes that the receiver has slow properties,
+// is not a global object and does not have interceptors.
+static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss,
+                                   Register elements, Register name,
+                                   Register result, Register scratch1,
+                                   Register scratch2) {
+  DCHECK(!AreAliased(elements, name, scratch1, scratch2));
+  DCHECK(!AreAliased(result, scratch1, scratch2));
+
+  Label done;
+
+  // Probe the dictionary.
+  NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
+                                                   name, scratch1, scratch2);
+
+  // If probing finds an entry check that the value is a normal property.
+  __ Bind(&done);
+
+  static const int kElementsStartOffset =
+      NameDictionary::kHeaderSize +
+      NameDictionary::kElementsStartIndex * kPointerSize;
+  static const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+  __ Ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
+  __ Tst(scratch1, Smi::FromInt(PropertyDetails::TypeField::kMask));
+  __ B(ne, miss);
+
+  // Get the value at the masked, scaled index and return.
+  __ Ldr(result,
+         FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
+}
+
+
+// Helper function used from StoreIC::GenerateNormal.
+//
+// elements: Property dictionary. It is not clobbered if a jump to the miss
+//           label is done.
+// name:     Property name. It is not clobbered if a jump to the miss label is
+//           done
+// value:    The value to store (never clobbered).
+//
+// The generated code assumes that the receiver has slow properties,
+// is not a global object and does not have interceptors.
+static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
+                                    Register elements, Register name,
+                                    Register value, Register scratch1,
+                                    Register scratch2) {
+  DCHECK(!AreAliased(elements, name, value, scratch1, scratch2));
+
+  Label done;
+
+  // Probe the dictionary.
+  NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
+                                                   name, scratch1, scratch2);
+
+  // If probing finds an entry in the dictionary check that the value
+  // is a normal property that is not read only.
+  __ Bind(&done);
+
+  static const int kElementsStartOffset =
+      NameDictionary::kHeaderSize +
+      NameDictionary::kElementsStartIndex * kPointerSize;
+  static const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+  static const int kTypeAndReadOnlyMask =
+      PropertyDetails::TypeField::kMask |
+      PropertyDetails::AttributesField::encode(READ_ONLY);
+  __ Ldrsw(scratch1, UntagSmiFieldMemOperand(scratch2, kDetailsOffset));
+  __ Tst(scratch1, kTypeAndReadOnlyMask);
+  __ B(ne, miss);
+
+  // Store the value at the masked, scaled index and return.
+  static const int kValueOffset = kElementsStartOffset + kPointerSize;
+  __ Add(scratch2, scratch2, kValueOffset - kHeapObjectTag);
+  __ Str(value, MemOperand(scratch2));
+
+  // Update the write barrier. Make sure not to clobber the value.
+  __ Mov(scratch1, value);
+  __ RecordWrite(elements, scratch2, scratch1, kLRHasNotBeenSaved,
+                 kDontSaveFPRegs);
+}
+
+
+// Checks the receiver for special cases (value type, slow case bits).
+// Falls through for regular JS object and return the map of the
+// receiver in 'map_scratch' if the receiver is not a SMI.
+static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
+                                           Register receiver,
+                                           Register map_scratch,
+                                           Register scratch,
+                                           int interceptor_bit, Label* slow) {
+  DCHECK(!AreAliased(map_scratch, scratch));
+
+  // Check that the object isn't a smi.
+  __ JumpIfSmi(receiver, slow);
+  // Get the map of the receiver.
+  __ Ldr(map_scratch, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  // Check bit field.
+  __ Ldrb(scratch, FieldMemOperand(map_scratch, Map::kBitFieldOffset));
+  __ Tbnz(scratch, Map::kIsAccessCheckNeeded, slow);
+  __ Tbnz(scratch, interceptor_bit, slow);
+
+  // Check that the object is some kind of JS object EXCEPT JS Value type.
+  // In the case that the object is a value-wrapper object, we enter the
+  // runtime system to make sure that indexing into string objects work
+  // as intended.
+  STATIC_ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
+  __ Ldrb(scratch, FieldMemOperand(map_scratch, Map::kInstanceTypeOffset));
+  __ Cmp(scratch, JS_OBJECT_TYPE);
+  __ B(lt, slow);
+}
+
+
+// Loads an indexed element from a fast case array.
+// If not_fast_array is NULL, doesn't perform the elements map check.
+//
+// receiver     - holds the receiver on entry.
+//                Unchanged unless 'result' is the same register.
+//
+// key          - holds the smi key on entry.
+//                Unchanged unless 'result' is the same register.
+//
+// elements     - holds the elements of the receiver on exit.
+//
+// elements_map - holds the elements map on exit if the not_fast_array branch is
+//                taken. Otherwise, this is used as a scratch register.
+//
+// result       - holds the result on exit if the load succeeded.
+//                Allowed to be the the same as 'receiver' or 'key'.
+//                Unchanged on bailout so 'receiver' and 'key' can be safely
+//                used by further computation.
+static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
+                                  Register key, Register elements,
+                                  Register elements_map, Register scratch2,
+                                  Register result, Label* not_fast_array,
+                                  Label* slow) {
+  DCHECK(!AreAliased(receiver, key, elements, elements_map, scratch2));
+
+  // Check for fast array.
+  __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  if (not_fast_array != NULL) {
+    // Check that the object is in fast mode and writable.
+    __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
+    __ JumpIfNotRoot(elements_map, Heap::kFixedArrayMapRootIndex,
+                     not_fast_array);
+  } else {
+    __ AssertFastElements(elements);
+  }
+
+  // The elements_map register is only used for the not_fast_array path, which
+  // was handled above. From this point onward it is a scratch register.
+  Register scratch1 = elements_map;
+
+  // Check that the key (index) is within bounds.
+  __ Ldr(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
+  __ Cmp(key, scratch1);
+  __ B(hs, slow);
+
+  // Fast case: Do the load.
+  __ Add(scratch1, elements, FixedArray::kHeaderSize - kHeapObjectTag);
+  __ SmiUntag(scratch2, key);
+  __ Ldr(scratch2, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2));
+
+  // In case the loaded value is the_hole we have to consult GetProperty
+  // to ensure the prototype chain is searched.
+  __ JumpIfRoot(scratch2, Heap::kTheHoleValueRootIndex, slow);
+
+  // Move the value to the result register.
+  // 'result' can alias with 'receiver' or 'key' but these two must be
+  // preserved if we jump to 'slow'.
+  __ Mov(result, scratch2);
+}
+
+
+// Checks whether a key is an array index string or a unique name.
+// Falls through if a key is a unique name.
+// The map of the key is returned in 'map_scratch'.
+// If the jump to 'index_string' is done the hash of the key is left
+// in 'hash_scratch'.
+static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
+                                 Register map_scratch, Register hash_scratch,
+                                 Label* index_string, Label* not_unique) {
+  DCHECK(!AreAliased(key, map_scratch, hash_scratch));
+
+  // Is the key a name?
+  Label unique;
+  __ JumpIfObjectType(key, map_scratch, hash_scratch, LAST_UNIQUE_NAME_TYPE,
+                      not_unique, hi);
+  STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
+  __ B(eq, &unique);
+
+  // Is the string an array index with cached numeric value?
+  __ Ldr(hash_scratch.W(), FieldMemOperand(key, Name::kHashFieldOffset));
+  __ TestAndBranchIfAllClear(hash_scratch, Name::kContainsCachedArrayIndexMask,
+                             index_string);
+
+  // Is the string internalized? We know it's a string, so a single bit test is
+  // enough.
+  __ Ldrb(hash_scratch, FieldMemOperand(map_scratch, Map::kInstanceTypeOffset));
+  STATIC_ASSERT(kInternalizedTag == 0);
+  __ TestAndBranchIfAnySet(hash_scratch, kIsNotInternalizedMask, not_unique);
+
+  __ Bind(&unique);
+  // Fall through if the key is a unique name.
+}
+
+
+// Neither 'object' nor 'key' are modified by this function.
+//
+// If the 'unmapped_case' or 'slow_case' exit is taken, the 'map' register is
+// left with the object's elements map. Otherwise, it is used as a scratch
+// register.
+static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
+                                                Register object, Register key,
+                                                Register map, Register scratch1,
+                                                Register scratch2,
+                                                Label* unmapped_case,
+                                                Label* slow_case) {
+  DCHECK(!AreAliased(object, key, map, scratch1, scratch2));
+
+  Heap* heap = masm->isolate()->heap();
+
+  // Check that the receiver is a JSObject. Because of the elements
+  // map check later, we do not need to check for interceptors or
+  // whether it requires access checks.
+  __ JumpIfSmi(object, slow_case);
+  // Check that the object is some kind of JSObject.
+  __ JumpIfObjectType(object, map, scratch1, FIRST_JS_RECEIVER_TYPE, slow_case,
+                      lt);
+
+  // Check that the key is a positive smi.
+  __ JumpIfNotSmi(key, slow_case);
+  __ Tbnz(key, kXSignBit, slow_case);
+
+  // Load the elements object and check its map.
+  Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
+  __ Ldr(map, FieldMemOperand(object, JSObject::kElementsOffset));
+  __ CheckMap(map, scratch1, arguments_map, slow_case, DONT_DO_SMI_CHECK);
+
+  // Check if element is in the range of mapped arguments. If not, jump
+  // to the unmapped lookup.
+  __ Ldr(scratch1, FieldMemOperand(map, FixedArray::kLengthOffset));
+  __ Sub(scratch1, scratch1, Smi::FromInt(2));
+  __ Cmp(key, scratch1);
+  __ B(hs, unmapped_case);
+
+  // Load element index and check whether it is the hole.
+  static const int offset =
+      FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag;
+
+  __ Add(scratch1, map, offset);
+  __ SmiUntag(scratch2, key);
+  __ Ldr(scratch1, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2));
+  __ JumpIfRoot(scratch1, Heap::kTheHoleValueRootIndex, unmapped_case);
+
+  // Load value from context and return it.
+  __ Ldr(scratch2, FieldMemOperand(map, FixedArray::kHeaderSize));
+  __ SmiUntag(scratch1);
+  __ Lsl(scratch1, scratch1, kPointerSizeLog2);
+  __ Add(scratch1, scratch1, Context::kHeaderSize - kHeapObjectTag);
+  // The base of the result (scratch2) is passed to RecordWrite in
+  // KeyedStoreIC::GenerateSloppyArguments and it must be a HeapObject.
+  return MemOperand(scratch2, scratch1);
+}
+
+
+// The 'parameter_map' register must be loaded with the parameter map of the
+// arguments object and is overwritten.
+static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
+                                                  Register key,
+                                                  Register parameter_map,
+                                                  Register scratch,
+                                                  Label* slow_case) {
+  DCHECK(!AreAliased(key, parameter_map, scratch));
+
+  // Element is in arguments backing store, which is referenced by the
+  // second element of the parameter_map.
+  const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
+  Register backing_store = parameter_map;
+  __ Ldr(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
+  Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
+  __ CheckMap(backing_store, scratch, fixed_array_map, slow_case,
+              DONT_DO_SMI_CHECK);
+  __ Ldr(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
+  __ Cmp(key, scratch);
+  __ B(hs, slow_case);
+
+  __ Add(backing_store, backing_store,
+         FixedArray::kHeaderSize - kHeapObjectTag);
+  __ SmiUntag(scratch, key);
+  return MemOperand(backing_store, scratch, LSL, kPointerSizeLog2);
+}
+
+
+void LoadIC::GenerateNormal(MacroAssembler* masm) {
+  Register dictionary = x0;
+  DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
+  Label slow;
+
+  __ Ldr(dictionary, FieldMemOperand(LoadDescriptor::ReceiverRegister(),
+                                     JSObject::kPropertiesOffset));
+  GenerateDictionaryLoad(masm, &slow, dictionary,
+                         LoadDescriptor::NameRegister(), x0, x3, x4);
+  __ Ret();
+
+  // Dictionary load failed, go slow (but don't miss).
+  __ Bind(&slow);
+  GenerateRuntimeGetProperty(masm);
+}
+
+
+void LoadIC::GenerateMiss(MacroAssembler* masm) {
+  // The return address is in lr.
+  Isolate* isolate = masm->isolate();
+  ASM_LOCATION("LoadIC::GenerateMiss");
+
+  __ IncrementCounter(isolate->counters()->load_miss(), 1, x3, x4);
+
+  // Perform tail call to the entry.
+  __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
+  ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
+  __ TailCallExternalReference(ref, 2, 1);
+}
+
+
+void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+  // The return address is in lr.
+  __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
+  __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
+}
+
+
+void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
+  ASM_LOCATION("KeyedStoreIC::GenerateSloppyArguments");
+  Label slow, notin;
+  Register value = StoreDescriptor::ValueRegister();
+  Register key = StoreDescriptor::NameRegister();
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  DCHECK(receiver.is(x1));
+  DCHECK(key.is(x2));
+  DCHECK(value.is(x0));
+
+  Register map = x3;
+
+  // These registers are used by GenerateMappedArgumentsLookup to build a
+  // MemOperand. They are live for as long as the MemOperand is live.
+  Register mapped1 = x4;
+  Register mapped2 = x5;
+
+  MemOperand mapped = GenerateMappedArgumentsLookup(
+      masm, receiver, key, map, mapped1, mapped2, &notin, &slow);
+  Operand mapped_offset = mapped.OffsetAsOperand();
+  __ Str(value, mapped);
+  __ Add(x10, mapped.base(), mapped_offset);
+  __ Mov(x11, value);
+  __ RecordWrite(mapped.base(), x10, x11, kLRHasNotBeenSaved, kDontSaveFPRegs);
+  __ Ret();
+
+  __ Bind(&notin);
+
+  // These registers are used by GenerateMappedArgumentsLookup to build a
+  // MemOperand. They are live for as long as the MemOperand is live.
+  Register unmapped1 = map;  // This is assumed to alias 'map'.
+  Register unmapped2 = x4;
+  MemOperand unmapped =
+      GenerateUnmappedArgumentsLookup(masm, key, unmapped1, unmapped2, &slow);
+  Operand unmapped_offset = unmapped.OffsetAsOperand();
+  __ Str(value, unmapped);
+  __ Add(x10, unmapped.base(), unmapped_offset);
+  __ Mov(x11, value);
+  __ RecordWrite(unmapped.base(), x10, x11, kLRHasNotBeenSaved,
+                 kDontSaveFPRegs);
+  __ Ret();
+  __ Bind(&slow);
+  GenerateMiss(masm);
+}
+
+
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
+  // The return address is in lr.
+  Isolate* isolate = masm->isolate();
+
+  __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, x10, x11);
+
+  __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
+
+  // Perform tail call to the entry.
+  ExternalReference ref =
+      ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
+
+  __ TailCallExternalReference(ref, 2, 1);
+}
+
+
+void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+  // The return address is in lr.
+  __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
+  __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
+}
+
+
+static void GenerateKeyedLoadWithSmiKey(MacroAssembler* masm, Register key,
+                                        Register receiver, Register scratch1,
+                                        Register scratch2, Register scratch3,
+                                        Register scratch4, Register scratch5,
+                                        Label* slow) {
+  DCHECK(!AreAliased(key, receiver, scratch1, scratch2, scratch3, scratch4,
+                     scratch5));
+
+  Isolate* isolate = masm->isolate();
+  Label check_number_dictionary;
+  // If we can load the value, it should be returned in x0.
+  Register result = x0;
+
+  GenerateKeyedLoadReceiverCheck(masm, receiver, scratch1, scratch2,
+                                 Map::kHasIndexedInterceptor, slow);
+
+  // Check the receiver's map to see if it has fast elements.
+  __ CheckFastElements(scratch1, scratch2, &check_number_dictionary);
+
+  GenerateFastArrayLoad(masm, receiver, key, scratch3, scratch2, scratch1,
+                        result, NULL, slow);
+  __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1,
+                      scratch1, scratch2);
+  __ Ret();
+
+  __ Bind(&check_number_dictionary);
+  __ Ldr(scratch3, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  __ Ldr(scratch2, FieldMemOperand(scratch3, JSObject::kMapOffset));
+
+  // Check whether we have a number dictionary.
+  __ JumpIfNotRoot(scratch2, Heap::kHashTableMapRootIndex, slow);
+
+  __ LoadFromNumberDictionary(slow, scratch3, key, result, scratch1, scratch2,
+                              scratch4, scratch5);
+  __ Ret();
+}
+
+static void GenerateKeyedLoadWithNameKey(MacroAssembler* masm, Register key,
+                                         Register receiver, Register scratch1,
+                                         Register scratch2, Register scratch3,
+                                         Register scratch4, Register scratch5,
+                                         Label* slow) {
+  DCHECK(!AreAliased(key, receiver, scratch1, scratch2, scratch3, scratch4,
+                     scratch5));
+
+  Isolate* isolate = masm->isolate();
+  Label probe_dictionary, property_array_property;
+  // If we can load the value, it should be returned in x0.
+  Register result = x0;
+
+  GenerateKeyedLoadReceiverCheck(masm, receiver, scratch1, scratch2,
+                                 Map::kHasNamedInterceptor, slow);
+
+  // If the receiver is a fast-case object, check the keyed lookup cache.
+  // Otherwise probe the dictionary.
+  __ Ldr(scratch2, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+  __ Ldr(scratch3, FieldMemOperand(scratch2, HeapObject::kMapOffset));
+  __ JumpIfRoot(scratch3, Heap::kHashTableMapRootIndex, &probe_dictionary);
+
+  // We keep the map of the receiver in scratch1.
+  Register receiver_map = scratch1;
+
+  // Load the map of the receiver, compute the keyed lookup cache hash
+  // based on 32 bits of the map pointer and the name hash.
+  __ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ Mov(scratch2, Operand(receiver_map, ASR, KeyedLookupCache::kMapHashShift));
+  __ Ldr(scratch3.W(), FieldMemOperand(key, Name::kHashFieldOffset));
+  __ Eor(scratch2, scratch2, Operand(scratch3, ASR, Name::kHashShift));
+  int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask;
+  __ And(scratch2, scratch2, mask);
+
+  // Load the key (consisting of map and unique name) from the cache and
+  // check for match.
+  Label load_in_object_property;
+  static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
+  Label hit_on_nth_entry[kEntriesPerBucket];
+  ExternalReference cache_keys =
+      ExternalReference::keyed_lookup_cache_keys(isolate);
+
+  __ Mov(scratch3, cache_keys);
+  __ Add(scratch3, scratch3, Operand(scratch2, LSL, kPointerSizeLog2 + 1));
+
+  for (int i = 0; i < kEntriesPerBucket - 1; i++) {
+    Label try_next_entry;
+    // Load map and make scratch3 pointing to the next entry.
+    __ Ldr(scratch4, MemOperand(scratch3, kPointerSize * 2, PostIndex));
+    __ Cmp(receiver_map, scratch4);
+    __ B(ne, &try_next_entry);
+    __ Ldr(scratch4, MemOperand(scratch3, -kPointerSize));  // Load name
+    __ Cmp(key, scratch4);
+    __ B(eq, &hit_on_nth_entry[i]);
+    __ Bind(&try_next_entry);
+  }
+
+  // Last entry.
+  __ Ldr(scratch4, MemOperand(scratch3, kPointerSize, PostIndex));
+  __ Cmp(receiver_map, scratch4);
+  __ B(ne, slow);
+  __ Ldr(scratch4, MemOperand(scratch3));
+  __ Cmp(key, scratch4);
+  __ B(ne, slow);
+
+  // Get field offset.
+  ExternalReference cache_field_offsets =
+      ExternalReference::keyed_lookup_cache_field_offsets(isolate);
+
+  // Hit on nth entry.
+  for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
+    __ Bind(&hit_on_nth_entry[i]);
+    __ Mov(scratch3, cache_field_offsets);
+    if (i != 0) {
+      __ Add(scratch2, scratch2, i);
+    }
+    __ Ldr(scratch4.W(), MemOperand(scratch3, scratch2, LSL, 2));
+    __ Ldrb(scratch5,
+            FieldMemOperand(receiver_map, Map::kInObjectPropertiesOffset));
+    __ Subs(scratch4, scratch4, scratch5);
+    __ B(ge, &property_array_property);
+    if (i != 0) {
+      __ B(&load_in_object_property);
+    }
+  }
+
+  // Load in-object property.
+  __ Bind(&load_in_object_property);
+  __ Ldrb(scratch5, FieldMemOperand(receiver_map, Map::kInstanceSizeOffset));
+  __ Add(scratch5, scratch5, scratch4);        // Index from start of object.
+  __ Sub(receiver, receiver, kHeapObjectTag);  // Remove the heap tag.
+  __ Ldr(result, MemOperand(receiver, scratch5, LSL, kPointerSizeLog2));
+  __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1,
+                      scratch1, scratch2);
+  __ Ret();
+
+  // Load property array property.
+  __ Bind(&property_array_property);
+  __ Ldr(scratch1, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+  __ Add(scratch1, scratch1, FixedArray::kHeaderSize - kHeapObjectTag);
+  __ Ldr(result, MemOperand(scratch1, scratch4, LSL, kPointerSizeLog2));
+  __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1,
+                      scratch1, scratch2);
+  __ Ret();
+
+  // Do a quick inline probe of the receiver's dictionary, if it exists.
+  __ Bind(&probe_dictionary);
+  __ Ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+  GenerateGlobalInstanceTypeCheck(masm, scratch1, slow);
+  // Load the property.
+  GenerateDictionaryLoad(masm, slow, scratch2, key, result, scratch1, scratch3);
+  __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(), 1,
+                      scratch1, scratch2);
+  __ Ret();
+}
+
+
+void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
+  // The return address is in lr.
+  Label slow, check_name, index_smi, index_name;
+
+  Register key = LoadDescriptor::NameRegister();
+  Register receiver = LoadDescriptor::ReceiverRegister();
+  DCHECK(key.is(x2));
+  DCHECK(receiver.is(x1));
+
+  __ JumpIfNotSmi(key, &check_name);
+  __ Bind(&index_smi);
+  // Now the key is known to be a smi. This place is also jumped to from below
+  // where a numeric string is converted to a smi.
+  GenerateKeyedLoadWithSmiKey(masm, key, receiver, x7, x3, x4, x5, x6, &slow);
+
+  // Slow case.
+  __ Bind(&slow);
+  __ IncrementCounter(masm->isolate()->counters()->keyed_load_generic_slow(), 1,
+                      x4, x3);
+  GenerateRuntimeGetProperty(masm);
+
+  __ Bind(&check_name);
+  GenerateKeyNameCheck(masm, key, x0, x3, &index_name, &slow);
+
+  GenerateKeyedLoadWithNameKey(masm, key, receiver, x7, x3, x4, x5, x6, &slow);
+
+  __ Bind(&index_name);
+  __ IndexFromHash(x3, key);
+  // Now jump to the place where smi keys are handled.
+  __ B(&index_smi);
+}
+
+
+void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
+  // Return address is in lr.
+  Label miss;
+
+  Register receiver = LoadDescriptor::ReceiverRegister();
+  Register index = LoadDescriptor::NameRegister();
+  Register result = x0;
+  Register scratch = x3;
+  DCHECK(!scratch.is(receiver) && !scratch.is(index));
+
+  StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
+                                          &miss,  // When not a string.
+                                          &miss,  // When not a number.
+                                          &miss,  // When index out of range.
+                                          STRING_INDEX_IS_ARRAY_INDEX);
+  char_at_generator.GenerateFast(masm);
+  __ Ret();
+
+  StubRuntimeCallHelper call_helper;
+  char_at_generator.GenerateSlow(masm, call_helper);
+
+  __ Bind(&miss);
+  GenerateMiss(masm);
+}
+
+
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
+  ASM_LOCATION("KeyedStoreIC::GenerateMiss");
+
+  // Push receiver, key and value for runtime call.
+  __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+          StoreDescriptor::ValueRegister());
+
+  ExternalReference ref =
+      ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
+  __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+static void KeyedStoreGenerateGenericHelper(
+    MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
+    KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
+    Register value, Register key, Register receiver, Register receiver_map,
+    Register elements_map, Register elements) {
+  DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements,
+                     x10, x11));
+
+  Label transition_smi_elements;
+  Label transition_double_elements;
+  Label fast_double_without_map_check;
+  Label non_double_value;
+  Label finish_store;
+
+  __ Bind(fast_object);
+  if (check_map == kCheckMap) {
+    __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
+    __ Cmp(elements_map,
+           Operand(masm->isolate()->factory()->fixed_array_map()));
+    __ B(ne, fast_double);
+  }
+
+  // HOLECHECK: guards "A[i] = V"
+  // We have to go to the runtime if the current value is the hole because there
+  // may be a callback on the element.
+  Label holecheck_passed;
+  __ Add(x10, elements, FixedArray::kHeaderSize - kHeapObjectTag);
+  __ Add(x10, x10, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
+  __ Ldr(x11, MemOperand(x10));
+  __ JumpIfNotRoot(x11, Heap::kTheHoleValueRootIndex, &holecheck_passed);
+  __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, x10, slow);
+  __ bind(&holecheck_passed);
+
+  // Smi stores don't require further checks.
+  __ JumpIfSmi(value, &finish_store);
+
+  // Escape to elements kind transition case.
+  __ CheckFastObjectElements(receiver_map, x10, &transition_smi_elements);
+
+  __ Bind(&finish_store);
+  if (increment_length == kIncrementLength) {
+    // Add 1 to receiver->length.
+    __ Add(x10, key, Smi::FromInt(1));
+    __ Str(x10, FieldMemOperand(receiver, JSArray::kLengthOffset));
+  }
+
+  Register address = x11;
+  __ Add(address, elements, FixedArray::kHeaderSize - kHeapObjectTag);
+  __ Add(address, address, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
+  __ Str(value, MemOperand(address));
+
+  Label dont_record_write;
+  __ JumpIfSmi(value, &dont_record_write);
+
+  // Update write barrier for the elements array address.
+  __ Mov(x10, value);  // Preserve the value which is returned.
+  __ RecordWrite(elements, address, x10, kLRHasNotBeenSaved, kDontSaveFPRegs,
+                 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+  __ Bind(&dont_record_write);
+  __ Ret();
+
+
+  __ Bind(fast_double);
+  if (check_map == kCheckMap) {
+    // Check for fast double array case. If this fails, call through to the
+    // runtime.
+    __ JumpIfNotRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex, slow);
+  }
+
+  // HOLECHECK: guards "A[i] double hole?"
+  // We have to see if the double version of the hole is present. If so go to
+  // the runtime.
+  __ Add(x10, elements, FixedDoubleArray::kHeaderSize - kHeapObjectTag);
+  __ Add(x10, x10, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
+  __ Ldr(x11, MemOperand(x10));
+  __ CompareAndBranch(x11, kHoleNanInt64, ne, &fast_double_without_map_check);
+  __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, x10, slow);
+
+  __ Bind(&fast_double_without_map_check);
+  __ StoreNumberToDoubleElements(value, key, elements, x10, d0,
+                                 &transition_double_elements);
+  if (increment_length == kIncrementLength) {
+    // Add 1 to receiver->length.
+    __ Add(x10, key, Smi::FromInt(1));
+    __ Str(x10, FieldMemOperand(receiver, JSArray::kLengthOffset));
+  }
+  __ Ret();
+
+
+  __ Bind(&transition_smi_elements);
+  // Transition the array appropriately depending on the value type.
+  __ Ldr(x10, FieldMemOperand(value, HeapObject::kMapOffset));
+  __ JumpIfNotRoot(x10, Heap::kHeapNumberMapRootIndex, &non_double_value);
+
+  // Value is a double. Transition FAST_SMI_ELEMENTS ->
+  // FAST_DOUBLE_ELEMENTS and complete the store.
+  __ LoadTransitionedArrayMapConditional(
+      FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, x10, x11, slow);
+  AllocationSiteMode mode =
+      AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
+  ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
+                                                   receiver_map, mode, slow);
+  __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  __ B(&fast_double_without_map_check);
+
+  __ Bind(&non_double_value);
+  // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS.
+  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
+                                         receiver_map, x10, x11, slow);
+
+  mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
+  ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
+      masm, receiver, key, value, receiver_map, mode, slow);
+
+  __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  __ B(&finish_store);
+
+  __ Bind(&transition_double_elements);
+  // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
+  // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
+  // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
+  __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
+                                         receiver_map, x10, x11, slow);
+  mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
+  ElementsTransitionGenerator::GenerateDoubleToObject(
+      masm, receiver, key, value, receiver_map, mode, slow);
+  __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  __ B(&finish_store);
+}
+
+
+void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
+                                   StrictMode strict_mode) {
+  ASM_LOCATION("KeyedStoreIC::GenerateGeneric");
+  Label slow;
+  Label array;
+  Label fast_object;
+  Label extra;
+  Label fast_object_grow;
+  Label fast_double_grow;
+  Label fast_double;
+
+  Register value = StoreDescriptor::ValueRegister();
+  Register key = StoreDescriptor::NameRegister();
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  DCHECK(receiver.is(x1));
+  DCHECK(key.is(x2));
+  DCHECK(value.is(x0));
+
+  Register receiver_map = x3;
+  Register elements = x4;
+  Register elements_map = x5;
+
+  __ JumpIfNotSmi(key, &slow);
+  __ JumpIfSmi(receiver, &slow);
+  __ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+
+  // Check that the receiver does not require access checks and is not observed.
+  // The generic stub does not perform map checks or handle observed objects.
+  __ Ldrb(x10, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
+  __ TestAndBranchIfAnySet(
+      x10, (1 << Map::kIsAccessCheckNeeded) | (1 << Map::kIsObserved), &slow);
+
+  // Check if the object is a JS array or not.
+  Register instance_type = x10;
+  __ CompareInstanceType(receiver_map, instance_type, JS_ARRAY_TYPE);
+  __ B(eq, &array);
+  // Check that the object is some kind of JSObject.
+  __ Cmp(instance_type, FIRST_JS_OBJECT_TYPE);
+  __ B(lt, &slow);
+
+  // Object case: Check key against length in the elements array.
+  __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  // Check array bounds. Both the key and the length of FixedArray are smis.
+  __ Ldrsw(x10, UntagSmiFieldMemOperand(elements, FixedArray::kLengthOffset));
+  __ Cmp(x10, Operand::UntagSmi(key));
+  __ B(hi, &fast_object);
+
+
+  __ Bind(&slow);
+  // Slow case, handle jump to runtime.
+  // Live values:
+  //  x0: value
+  //  x1: key
+  //  x2: receiver
+  PropertyICCompiler::GenerateRuntimeSetProperty(masm, strict_mode);
+
+
+  __ Bind(&extra);
+  // Extra capacity case: Check if there is extra capacity to
+  // perform the store and update the length. Used for adding one
+  // element to the array by writing to array[array.length].
+
+  // Check for room in the elements backing store.
+  // Both the key and the length of FixedArray are smis.
+  __ Ldrsw(x10, UntagSmiFieldMemOperand(elements, FixedArray::kLengthOffset));
+  __ Cmp(x10, Operand::UntagSmi(key));
+  __ B(ls, &slow);
+
+  __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
+  __ Cmp(elements_map, Operand(masm->isolate()->factory()->fixed_array_map()));
+  __ B(eq, &fast_object_grow);
+  __ Cmp(elements_map,
+         Operand(masm->isolate()->factory()->fixed_double_array_map()));
+  __ B(eq, &fast_double_grow);
+  __ B(&slow);
+
+
+  __ Bind(&array);
+  // Array case: Get the length and the elements array from the JS
+  // array. Check that the array is in fast mode (and writable); if it
+  // is the length is always a smi.
+
+  __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+
+  // Check the key against the length in the array.
+  __ Ldrsw(x10, UntagSmiFieldMemOperand(receiver, JSArray::kLengthOffset));
+  __ Cmp(x10, Operand::UntagSmi(key));
+  __ B(eq, &extra);  // We can handle the case where we are appending 1 element.
+  __ B(lo, &slow);
+
+  KeyedStoreGenerateGenericHelper(
+      masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
+      value, key, receiver, receiver_map, elements_map, elements);
+  KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
+                                  &slow, kDontCheckMap, kIncrementLength, value,
+                                  key, receiver, receiver_map, elements_map,
+                                  elements);
+}
+
+
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  Register name = StoreDescriptor::NameRegister();
+  DCHECK(!AreAliased(receiver, name, StoreDescriptor::ValueRegister(), x3, x4,
+                     x5, x6));
+
+  // Probe the stub cache.
+  Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
+      Code::ComputeHandlerFlags(Code::STORE_IC));
+  masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
+                                               name, x3, x4, x5, x6);
+
+  // Cache miss: Jump to runtime.
+  GenerateMiss(masm);
+}
+
+
+void StoreIC::GenerateMiss(MacroAssembler* masm) {
+  __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+          StoreDescriptor::ValueRegister());
+
+  // Tail call to the entry.
+  ExternalReference ref =
+      ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
+  __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void StoreIC::GenerateNormal(MacroAssembler* masm) {
+  Label miss;
+  Register value = StoreDescriptor::ValueRegister();
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  Register name = StoreDescriptor::NameRegister();
+  Register dictionary = x3;
+  DCHECK(!AreAliased(value, receiver, name, x3, x4, x5));
+
+  __ Ldr(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+
+  GenerateDictionaryStore(masm, &miss, dictionary, name, value, x4, x5);
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(counters->store_normal_hit(), 1, x4, x5);
+  __ Ret();
+
+  // Cache miss: Jump to runtime.
+  __ Bind(&miss);
+  __ IncrementCounter(counters->store_normal_miss(), 1, x4, x5);
+  GenerateMiss(masm);
+}
+
+
+Condition CompareIC::ComputeCondition(Token::Value op) {
+  switch (op) {
+    case Token::EQ_STRICT:
+    case Token::EQ:
+      return eq;
+    case Token::LT:
+      return lt;
+    case Token::GT:
+      return gt;
+    case Token::LTE:
+      return le;
+    case Token::GTE:
+      return ge;
+    default:
+      UNREACHABLE();
+      return al;
+  }
+}
+
+
+bool CompareIC::HasInlinedSmiCode(Address address) {
+  // The address of the instruction following the call.
+  Address info_address = Assembler::return_address_from_call_start(address);
+
+  InstructionSequence* patch_info = InstructionSequence::At(info_address);
+  return patch_info->IsInlineData();
+}
+
+
+// Activate a SMI fast-path by patching the instructions generated by
+// JumpPatchSite::EmitJumpIf(Not)Smi(), using the information encoded by
+// JumpPatchSite::EmitPatchInfo().
+void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
+  // The patch information is encoded in the instruction stream using
+  // instructions which have no side effects, so we can safely execute them.
+  // The patch information is encoded directly after the call to the helper
+  // function which is requesting this patch operation.
+  Address info_address = Assembler::return_address_from_call_start(address);
+  InlineSmiCheckInfo info(info_address);
+
+  // Check and decode the patch information instruction.
+  if (!info.HasSmiCheck()) {
+    return;
+  }
+
+  if (FLAG_trace_ic) {
+    PrintF("[  Patching ic at %p, marker=%p, SMI check=%p\n", address,
+           info_address, reinterpret_cast<void*>(info.SmiCheck()));
+  }
+
+  // Patch and activate code generated by JumpPatchSite::EmitJumpIfNotSmi()
+  // and JumpPatchSite::EmitJumpIfSmi().
+  // Changing
+  //   tb(n)z xzr, #0, <target>
+  // to
+  //   tb(!n)z test_reg, #0, <target>
+  Instruction* to_patch = info.SmiCheck();
+  PatchingAssembler patcher(to_patch, 1);
+  DCHECK(to_patch->IsTestBranch());
+  DCHECK(to_patch->ImmTestBranchBit5() == 0);
+  DCHECK(to_patch->ImmTestBranchBit40() == 0);
+
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagMask == 1);
+
+  int branch_imm = to_patch->ImmTestBranch();
+  Register smi_reg;
+  if (check == ENABLE_INLINED_SMI_CHECK) {
+    DCHECK(to_patch->Rt() == xzr.code());
+    smi_reg = info.SmiRegister();
+  } else {
+    DCHECK(check == DISABLE_INLINED_SMI_CHECK);
+    DCHECK(to_patch->Rt() != xzr.code());
+    smi_reg = xzr;
+  }
+
+  if (to_patch->Mask(TestBranchMask) == TBZ) {
+    // This is JumpIfNotSmi(smi_reg, branch_imm).
+    patcher.tbnz(smi_reg, 0, branch_imm);
+  } else {
+    DCHECK(to_patch->Mask(TestBranchMask) == TBNZ);
+    // This is JumpIfSmi(smi_reg, branch_imm).
+    patcher.tbz(smi_reg, 0, branch_imm);
+  }
+}
+}
+}  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_ARM64
diff --git a/src/ic/arm64/ic-compiler-arm64.cc b/src/ic/arm64/ic-compiler-arm64.cc
new file mode 100644
index 0000000..ffc1069
--- /dev/null
+++ b/src/ic/arm64/ic-compiler-arm64.cc
@@ -0,0 +1,133 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "src/ic/ic.h"
+#include "src/ic/ic-compiler.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+void PropertyICCompiler::GenerateRuntimeSetProperty(MacroAssembler* masm,
+                                                    StrictMode strict_mode) {
+  ASM_LOCATION("PropertyICCompiler::GenerateRuntimeSetProperty");
+
+  __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+          StoreDescriptor::ValueRegister());
+
+  __ Mov(x10, Smi::FromInt(strict_mode));
+  __ Push(x10);
+
+  // Do tail-call to runtime routine.
+  __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
+                                                    CodeHandleList* handlers,
+                                                    Handle<Name> name,
+                                                    Code::StubType type,
+                                                    IcCheckType check) {
+  Label miss;
+
+  if (check == PROPERTY &&
+      (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
+    // In case we are compiling an IC for dictionary loads and stores, just
+    // check whether the name is unique.
+    if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
+      Register tmp = scratch1();
+      __ JumpIfSmi(this->name(), &miss);
+      __ Ldr(tmp, FieldMemOperand(this->name(), HeapObject::kMapOffset));
+      __ Ldrb(tmp, FieldMemOperand(tmp, Map::kInstanceTypeOffset));
+      __ JumpIfNotUniqueNameInstanceType(tmp, &miss);
+    } else {
+      __ CompareAndBranch(this->name(), Operand(name), ne, &miss);
+    }
+  }
+
+  Label number_case;
+  Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
+  __ JumpIfSmi(receiver(), smi_target);
+
+  // Polymorphic keyed stores may use the map register
+  Register map_reg = scratch1();
+  DCHECK(kind() != Code::KEYED_STORE_IC ||
+         map_reg.is(ElementTransitionAndStoreDescriptor::MapRegister()));
+  __ Ldr(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
+  int receiver_count = types->length();
+  int number_of_handled_maps = 0;
+  for (int current = 0; current < receiver_count; ++current) {
+    Handle<HeapType> type = types->at(current);
+    Handle<Map> map = IC::TypeToMap(*type, isolate());
+    if (!map->is_deprecated()) {
+      number_of_handled_maps++;
+      Label try_next;
+      __ Cmp(map_reg, Operand(map));
+      __ B(ne, &try_next);
+      if (type->Is(HeapType::Number())) {
+        DCHECK(!number_case.is_unused());
+        __ Bind(&number_case);
+      }
+      __ Jump(handlers->at(current), RelocInfo::CODE_TARGET);
+      __ Bind(&try_next);
+    }
+  }
+  DCHECK(number_of_handled_maps != 0);
+
+  __ Bind(&miss);
+  TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+  // Return the generated code.
+  InlineCacheState state =
+      (number_of_handled_maps > 1) ? POLYMORPHIC : MONOMORPHIC;
+  return GetCode(kind(), type, name, state);
+}
+
+
+Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
+    MapHandleList* receiver_maps, CodeHandleList* handler_stubs,
+    MapHandleList* transitioned_maps) {
+  Label miss;
+
+  ASM_LOCATION("PropertyICCompiler::CompileStorePolymorphic");
+
+  __ JumpIfSmi(receiver(), &miss);
+
+  int receiver_count = receiver_maps->length();
+  __ Ldr(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset));
+  for (int i = 0; i < receiver_count; i++) {
+    __ Cmp(scratch1(), Operand(receiver_maps->at(i)));
+
+    Label skip;
+    __ B(&skip, ne);
+    if (!transitioned_maps->at(i).is_null()) {
+      // This argument is used by the handler stub. For example, see
+      // ElementsTransitionGenerator::GenerateMapChangeElementsTransition.
+      __ Mov(transition_map(), Operand(transitioned_maps->at(i)));
+    }
+    __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET);
+    __ Bind(&skip);
+  }
+
+  __ Bind(&miss);
+  TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+  return GetCode(kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
+}
+
+
+#undef __
+}
+}  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_ARM64
diff --git a/src/ic/arm64/stub-cache-arm64.cc b/src/ic/arm64/stub-cache-arm64.cc
new file mode 100644
index 0000000..4d31d49
--- /dev/null
+++ b/src/ic/arm64/stub-cache-arm64.cc
@@ -0,0 +1,149 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_ARM64
+
+#include "src/codegen.h"
+#include "src/ic/stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+
+#define __ ACCESS_MASM(masm)
+
+
+// Probe primary or secondary table.
+// If the entry is found in the cache, the generated code jump to the first
+// instruction of the stub in the cache.
+// If there is a miss the code fall trough.
+//
+// 'receiver', 'name' and 'offset' registers are preserved on miss.
+static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
+                       Code::Flags flags, bool leave_frame,
+                       StubCache::Table table, Register receiver, Register name,
+                       Register offset, Register scratch, Register scratch2,
+                       Register scratch3) {
+  // Some code below relies on the fact that the Entry struct contains
+  // 3 pointers (name, code, map).
+  STATIC_ASSERT(sizeof(StubCache::Entry) == (3 * kPointerSize));
+
+  ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
+  ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
+  ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
+
+  uintptr_t key_off_addr = reinterpret_cast<uintptr_t>(key_offset.address());
+  uintptr_t value_off_addr =
+      reinterpret_cast<uintptr_t>(value_offset.address());
+  uintptr_t map_off_addr = reinterpret_cast<uintptr_t>(map_offset.address());
+
+  Label miss;
+
+  DCHECK(!AreAliased(name, offset, scratch, scratch2, scratch3));
+
+  // Multiply by 3 because there are 3 fields per entry.
+  __ Add(scratch3, offset, Operand(offset, LSL, 1));
+
+  // Calculate the base address of the entry.
+  __ Mov(scratch, key_offset);
+  __ Add(scratch, scratch, Operand(scratch3, LSL, kPointerSizeLog2));
+
+  // Check that the key in the entry matches the name.
+  __ Ldr(scratch2, MemOperand(scratch));
+  __ Cmp(name, scratch2);
+  __ B(ne, &miss);
+
+  // Check the map matches.
+  __ Ldr(scratch2, MemOperand(scratch, map_off_addr - key_off_addr));
+  __ Ldr(scratch3, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ Cmp(scratch2, scratch3);
+  __ B(ne, &miss);
+
+  // Get the code entry from the cache.
+  __ Ldr(scratch, MemOperand(scratch, value_off_addr - key_off_addr));
+
+  // Check that the flags match what we're looking for.
+  __ Ldr(scratch2.W(), FieldMemOperand(scratch, Code::kFlagsOffset));
+  __ Bic(scratch2.W(), scratch2.W(), Code::kFlagsNotUsedInLookup);
+  __ Cmp(scratch2.W(), flags);
+  __ B(ne, &miss);
+
+#ifdef DEBUG
+  if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
+    __ B(&miss);
+  } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
+    __ B(&miss);
+  }
+#endif
+
+  if (leave_frame) __ LeaveFrame(StackFrame::INTERNAL);
+
+  // Jump to the first instruction in the code stub.
+  __ Add(scratch, scratch, Code::kHeaderSize - kHeapObjectTag);
+  __ Br(scratch);
+
+  // Miss: fall through.
+  __ Bind(&miss);
+}
+
+
+void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
+                              bool leave_frame, Register receiver,
+                              Register name, Register scratch, Register extra,
+                              Register extra2, Register extra3) {
+  Isolate* isolate = masm->isolate();
+  Label miss;
+
+  // Make sure the flags does not name a specific type.
+  DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
+
+  // Make sure that there are no register conflicts.
+  DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
+
+  // Make sure extra and extra2 registers are valid.
+  DCHECK(!extra.is(no_reg));
+  DCHECK(!extra2.is(no_reg));
+  DCHECK(!extra3.is(no_reg));
+
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2,
+                      extra3);
+
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(receiver, &miss);
+
+  // Compute the hash for primary table.
+  __ Ldr(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
+  __ Ldr(extra, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ Add(scratch, scratch, extra);
+  __ Eor(scratch, scratch, flags);
+  // We shift out the last two bits because they are not part of the hash.
+  __ Ubfx(scratch, scratch, kCacheIndexShift,
+          CountTrailingZeros(kPrimaryTableSize, 64));
+
+  // Probe the primary table.
+  ProbeTable(isolate, masm, flags, leave_frame, kPrimary, receiver, name,
+             scratch, extra, extra2, extra3);
+
+  // Primary miss: Compute hash for secondary table.
+  __ Sub(scratch, scratch, Operand(name, LSR, kCacheIndexShift));
+  __ Add(scratch, scratch, flags >> kCacheIndexShift);
+  __ And(scratch, scratch, kSecondaryTableSize - 1);
+
+  // Probe the secondary table.
+  ProbeTable(isolate, masm, flags, leave_frame, kSecondary, receiver, name,
+             scratch, extra, extra2, extra3);
+
+  // Cache miss: Fall-through and let caller handle the miss by
+  // entering the runtime system.
+  __ Bind(&miss);
+  __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1, extra2,
+                      extra3);
+}
+}
+}  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_ARM64
diff --git a/src/ic/call-optimization.cc b/src/ic/call-optimization.cc
new file mode 100644
index 0000000..7ef1b7e
--- /dev/null
+++ b/src/ic/call-optimization.cc
@@ -0,0 +1,113 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/ic/call-optimization.h"
+
+
+namespace v8 {
+namespace internal {
+
+CallOptimization::CallOptimization(Handle<JSFunction> function) {
+  Initialize(function);
+}
+
+
+Handle<JSObject> CallOptimization::LookupHolderOfExpectedType(
+    Handle<Map> object_map, HolderLookup* holder_lookup) const {
+  DCHECK(is_simple_api_call());
+  if (!object_map->IsJSObjectMap()) {
+    *holder_lookup = kHolderNotFound;
+    return Handle<JSObject>::null();
+  }
+  if (expected_receiver_type_.is_null() ||
+      expected_receiver_type_->IsTemplateFor(*object_map)) {
+    *holder_lookup = kHolderIsReceiver;
+    return Handle<JSObject>::null();
+  }
+  while (true) {
+    if (!object_map->prototype()->IsJSObject()) break;
+    Handle<JSObject> prototype(JSObject::cast(object_map->prototype()));
+    if (!prototype->map()->is_hidden_prototype()) break;
+    object_map = handle(prototype->map());
+    if (expected_receiver_type_->IsTemplateFor(*object_map)) {
+      *holder_lookup = kHolderFound;
+      return prototype;
+    }
+  }
+  *holder_lookup = kHolderNotFound;
+  return Handle<JSObject>::null();
+}
+
+
+bool CallOptimization::IsCompatibleReceiver(Handle<Object> receiver,
+                                            Handle<JSObject> holder) const {
+  DCHECK(is_simple_api_call());
+  if (!receiver->IsJSObject()) return false;
+  Handle<Map> map(JSObject::cast(*receiver)->map());
+  HolderLookup holder_lookup;
+  Handle<JSObject> api_holder = LookupHolderOfExpectedType(map, &holder_lookup);
+  switch (holder_lookup) {
+    case kHolderNotFound:
+      return false;
+    case kHolderIsReceiver:
+      return true;
+    case kHolderFound:
+      if (api_holder.is_identical_to(holder)) return true;
+      // Check if holder is in prototype chain of api_holder.
+      {
+        JSObject* object = *api_holder;
+        while (true) {
+          Object* prototype = object->map()->prototype();
+          if (!prototype->IsJSObject()) return false;
+          if (prototype == *holder) return true;
+          object = JSObject::cast(prototype);
+        }
+      }
+      break;
+  }
+  UNREACHABLE();
+  return false;
+}
+
+
+void CallOptimization::Initialize(Handle<JSFunction> function) {
+  constant_function_ = Handle<JSFunction>::null();
+  is_simple_api_call_ = false;
+  expected_receiver_type_ = Handle<FunctionTemplateInfo>::null();
+  api_call_info_ = Handle<CallHandlerInfo>::null();
+
+  if (function.is_null() || !function->is_compiled()) return;
+
+  constant_function_ = function;
+  AnalyzePossibleApiFunction(function);
+}
+
+
+void CallOptimization::AnalyzePossibleApiFunction(Handle<JSFunction> function) {
+  if (!function->shared()->IsApiFunction()) return;
+  Handle<FunctionTemplateInfo> info(function->shared()->get_api_func_data());
+
+  // Require a C++ callback.
+  if (info->call_code()->IsUndefined()) return;
+  api_call_info_ =
+      Handle<CallHandlerInfo>(CallHandlerInfo::cast(info->call_code()));
+
+  // Accept signatures that either have no restrictions at all or
+  // only have restrictions on the receiver.
+  if (!info->signature()->IsUndefined()) {
+    Handle<SignatureInfo> signature =
+        Handle<SignatureInfo>(SignatureInfo::cast(info->signature()));
+    if (!signature->args()->IsUndefined()) return;
+    if (!signature->receiver()->IsUndefined()) {
+      expected_receiver_type_ = Handle<FunctionTemplateInfo>(
+          FunctionTemplateInfo::cast(signature->receiver()));
+    }
+  }
+
+  is_simple_api_call_ = true;
+}
+}
+}  // namespace v8::internal
diff --git a/src/ic/call-optimization.h b/src/ic/call-optimization.h
new file mode 100644
index 0000000..99494fa
--- /dev/null
+++ b/src/ic/call-optimization.h
@@ -0,0 +1,62 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_IC_CALL_OPTIMIZATION_H_
+#define V8_IC_CALL_OPTIMIZATION_H_
+
+#include "src/code-stubs.h"
+#include "src/ic/access-compiler.h"
+#include "src/macro-assembler.h"
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+// Holds information about possible function call optimizations.
+class CallOptimization BASE_EMBEDDED {
+ public:
+  explicit CallOptimization(Handle<JSFunction> function);
+
+  bool is_constant_call() const { return !constant_function_.is_null(); }
+
+  Handle<JSFunction> constant_function() const {
+    DCHECK(is_constant_call());
+    return constant_function_;
+  }
+
+  bool is_simple_api_call() const { return is_simple_api_call_; }
+
+  Handle<FunctionTemplateInfo> expected_receiver_type() const {
+    DCHECK(is_simple_api_call());
+    return expected_receiver_type_;
+  }
+
+  Handle<CallHandlerInfo> api_call_info() const {
+    DCHECK(is_simple_api_call());
+    return api_call_info_;
+  }
+
+  enum HolderLookup { kHolderNotFound, kHolderIsReceiver, kHolderFound };
+  Handle<JSObject> LookupHolderOfExpectedType(
+      Handle<Map> receiver_map, HolderLookup* holder_lookup) const;
+
+  // Check if the api holder is between the receiver and the holder.
+  bool IsCompatibleReceiver(Handle<Object> receiver,
+                            Handle<JSObject> holder) const;
+
+ private:
+  void Initialize(Handle<JSFunction> function);
+
+  // Determines whether the given function can be called using the
+  // fast api call builtin.
+  void AnalyzePossibleApiFunction(Handle<JSFunction> function);
+
+  Handle<JSFunction> constant_function_;
+  bool is_simple_api_call_;
+  Handle<FunctionTemplateInfo> expected_receiver_type_;
+  Handle<CallHandlerInfo> api_call_info_;
+};
+}
+}  // namespace v8::internal
+
+#endif  // V8_IC_CALL_OPTIMIZATION_H_
diff --git a/src/ic/handler-compiler.cc b/src/ic/handler-compiler.cc
new file mode 100644
index 0000000..4ed92ec
--- /dev/null
+++ b/src/ic/handler-compiler.cc
@@ -0,0 +1,410 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/ic/call-optimization.h"
+#include "src/ic/handler-compiler.h"
+#include "src/ic/ic.h"
+#include "src/ic/ic-inl.h"
+
+namespace v8 {
+namespace internal {
+
+
+Handle<Code> PropertyHandlerCompiler::Find(Handle<Name> name,
+                                           Handle<Map> stub_holder,
+                                           Code::Kind kind,
+                                           CacheHolderFlag cache_holder,
+                                           Code::StubType type) {
+  Code::Flags flags = Code::ComputeHandlerFlags(kind, type, cache_holder);
+  Object* probe = stub_holder->FindInCodeCache(*name, flags);
+  if (probe->IsCode()) return handle(Code::cast(probe));
+  return Handle<Code>::null();
+}
+
+
+Handle<Code> NamedLoadHandlerCompiler::ComputeLoadNonexistent(
+    Handle<Name> name, Handle<HeapType> type) {
+  Isolate* isolate = name->GetIsolate();
+  Handle<Map> receiver_map = IC::TypeToMap(*type, isolate);
+  if (receiver_map->prototype()->IsNull()) {
+    // TODO(jkummerow/verwaest): If there is no prototype and the property
+    // is nonexistent, introduce a builtin to handle this (fast properties
+    // -> return undefined, dictionary properties -> do negative lookup).
+    return Handle<Code>();
+  }
+  CacheHolderFlag flag;
+  Handle<Map> stub_holder_map =
+      IC::GetHandlerCacheHolder(*type, false, isolate, &flag);
+
+  // If no dictionary mode objects are present in the prototype chain, the load
+  // nonexistent IC stub can be shared for all names for a given map and we use
+  // the empty string for the map cache in that case. If there are dictionary
+  // mode objects involved, we need to do negative lookups in the stub and
+  // therefore the stub will be specific to the name.
+  Handle<Name> cache_name =
+      receiver_map->is_dictionary_map()
+          ? name
+          : Handle<Name>::cast(isolate->factory()->nonexistent_symbol());
+  Handle<Map> current_map = stub_holder_map;
+  Handle<JSObject> last(JSObject::cast(receiver_map->prototype()));
+  while (true) {
+    if (current_map->is_dictionary_map()) cache_name = name;
+    if (current_map->prototype()->IsNull()) break;
+    last = handle(JSObject::cast(current_map->prototype()));
+    current_map = handle(last->map());
+  }
+  // Compile the stub that is either shared for all names or
+  // name specific if there are global objects involved.
+  Handle<Code> handler = PropertyHandlerCompiler::Find(
+      cache_name, stub_holder_map, Code::LOAD_IC, flag, Code::FAST);
+  if (!handler.is_null()) return handler;
+
+  NamedLoadHandlerCompiler compiler(isolate, type, last, flag);
+  handler = compiler.CompileLoadNonexistent(cache_name);
+  Map::UpdateCodeCache(stub_holder_map, cache_name, handler);
+  return handler;
+}
+
+
+Handle<Code> PropertyHandlerCompiler::GetCode(Code::Kind kind,
+                                              Code::StubType type,
+                                              Handle<Name> name) {
+  Code::Flags flags = Code::ComputeHandlerFlags(kind, type, cache_holder());
+  Handle<Code> code = GetCodeWithFlags(flags, name);
+  PROFILE(isolate(), CodeCreateEvent(Logger::STUB_TAG, *code, *name));
+  return code;
+}
+
+
+void PropertyHandlerCompiler::set_type_for_object(Handle<Object> object) {
+  type_ = IC::CurrentTypeOf(object, isolate());
+}
+
+
+#define __ ACCESS_MASM(masm())
+
+
+Register NamedLoadHandlerCompiler::FrontendHeader(Register object_reg,
+                                                  Handle<Name> name,
+                                                  Label* miss) {
+  PrototypeCheckType check_type = CHECK_ALL_MAPS;
+  int function_index = -1;
+  if (type()->Is(HeapType::String())) {
+    function_index = Context::STRING_FUNCTION_INDEX;
+  } else if (type()->Is(HeapType::Symbol())) {
+    function_index = Context::SYMBOL_FUNCTION_INDEX;
+  } else if (type()->Is(HeapType::Number())) {
+    function_index = Context::NUMBER_FUNCTION_INDEX;
+  } else if (type()->Is(HeapType::Boolean())) {
+    function_index = Context::BOOLEAN_FUNCTION_INDEX;
+  } else {
+    check_type = SKIP_RECEIVER;
+  }
+
+  if (check_type == CHECK_ALL_MAPS) {
+    GenerateDirectLoadGlobalFunctionPrototype(masm(), function_index,
+                                              scratch1(), miss);
+    Object* function = isolate()->native_context()->get(function_index);
+    Object* prototype = JSFunction::cast(function)->instance_prototype();
+    set_type_for_object(handle(prototype, isolate()));
+    object_reg = scratch1();
+  }
+
+  // Check that the maps starting from the prototype haven't changed.
+  return CheckPrototypes(object_reg, scratch1(), scratch2(), scratch3(), name,
+                         miss, check_type);
+}
+
+
+// Frontend for store uses the name register. It has to be restored before a
+// miss.
+Register NamedStoreHandlerCompiler::FrontendHeader(Register object_reg,
+                                                   Handle<Name> name,
+                                                   Label* miss) {
+  return CheckPrototypes(object_reg, this->name(), scratch1(), scratch2(), name,
+                         miss, SKIP_RECEIVER);
+}
+
+
+Register PropertyHandlerCompiler::Frontend(Register object_reg,
+                                           Handle<Name> name) {
+  Label miss;
+  Register reg = FrontendHeader(object_reg, name, &miss);
+  FrontendFooter(name, &miss);
+  return reg;
+}
+
+
+void PropertyHandlerCompiler::NonexistentFrontendHeader(Handle<Name> name,
+                                                        Label* miss,
+                                                        Register scratch1,
+                                                        Register scratch2) {
+  Register holder_reg;
+  Handle<Map> last_map;
+  if (holder().is_null()) {
+    holder_reg = receiver();
+    last_map = IC::TypeToMap(*type(), isolate());
+    // If |type| has null as its prototype, |holder()| is
+    // Handle<JSObject>::null().
+    DCHECK(last_map->prototype() == isolate()->heap()->null_value());
+  } else {
+    holder_reg = FrontendHeader(receiver(), name, miss);
+    last_map = handle(holder()->map());
+  }
+
+  if (last_map->is_dictionary_map()) {
+    if (last_map->IsJSGlobalObjectMap()) {
+      Handle<JSGlobalObject> global =
+          holder().is_null()
+              ? Handle<JSGlobalObject>::cast(type()->AsConstant()->Value())
+              : Handle<JSGlobalObject>::cast(holder());
+      GenerateCheckPropertyCell(masm(), global, name, scratch1, miss);
+    } else {
+      if (!name->IsUniqueName()) {
+        DCHECK(name->IsString());
+        name = factory()->InternalizeString(Handle<String>::cast(name));
+      }
+      DCHECK(holder().is_null() ||
+             holder()->property_dictionary()->FindEntry(name) ==
+                 NameDictionary::kNotFound);
+      GenerateDictionaryNegativeLookup(masm(), miss, holder_reg, name, scratch1,
+                                       scratch2);
+    }
+  }
+}
+
+
+Handle<Code> NamedLoadHandlerCompiler::CompileLoadField(Handle<Name> name,
+                                                        FieldIndex field) {
+  Register reg = Frontend(receiver(), name);
+  __ Move(receiver(), reg);
+  LoadFieldStub stub(isolate(), field);
+  GenerateTailCall(masm(), stub.GetCode());
+  return GetCode(kind(), Code::FAST, name);
+}
+
+
+Handle<Code> NamedLoadHandlerCompiler::CompileLoadConstant(Handle<Name> name,
+                                                           int constant_index) {
+  Register reg = Frontend(receiver(), name);
+  __ Move(receiver(), reg);
+  LoadConstantStub stub(isolate(), constant_index);
+  GenerateTailCall(masm(), stub.GetCode());
+  return GetCode(kind(), Code::FAST, name);
+}
+
+
+Handle<Code> NamedLoadHandlerCompiler::CompileLoadNonexistent(
+    Handle<Name> name) {
+  Label miss;
+  NonexistentFrontendHeader(name, &miss, scratch2(), scratch3());
+  GenerateLoadConstant(isolate()->factory()->undefined_value());
+  FrontendFooter(name, &miss);
+  return GetCode(kind(), Code::FAST, name);
+}
+
+
+Handle<Code> NamedLoadHandlerCompiler::CompileLoadCallback(
+    Handle<Name> name, Handle<ExecutableAccessorInfo> callback) {
+  Register reg = Frontend(receiver(), name);
+  GenerateLoadCallback(reg, callback);
+  return GetCode(kind(), Code::FAST, name);
+}
+
+
+Handle<Code> NamedLoadHandlerCompiler::CompileLoadCallback(
+    Handle<Name> name, const CallOptimization& call_optimization) {
+  DCHECK(call_optimization.is_simple_api_call());
+  Frontend(receiver(), name);
+  Handle<Map> receiver_map = IC::TypeToMap(*type(), isolate());
+  GenerateFastApiCall(masm(), call_optimization, receiver_map, receiver(),
+                      scratch1(), false, 0, NULL);
+  return GetCode(kind(), Code::FAST, name);
+}
+
+
+Handle<Code> NamedLoadHandlerCompiler::CompileLoadInterceptor(
+    LookupIterator* it) {
+  // So far the most popular follow ups for interceptor loads are FIELD and
+  // ExecutableAccessorInfo, so inline only them. Other cases may be added
+  // later.
+  bool inline_followup = false;
+  switch (it->state()) {
+    case LookupIterator::TRANSITION:
+      UNREACHABLE();
+    case LookupIterator::ACCESS_CHECK:
+    case LookupIterator::INTERCEPTOR:
+    case LookupIterator::JSPROXY:
+    case LookupIterator::NOT_FOUND:
+      break;
+    case LookupIterator::DATA:
+      inline_followup = it->property_details().type() == FIELD;
+      break;
+    case LookupIterator::ACCESSOR: {
+      Handle<Object> accessors = it->GetAccessors();
+      inline_followup = accessors->IsExecutableAccessorInfo();
+      if (!inline_followup) break;
+      Handle<ExecutableAccessorInfo> info =
+          Handle<ExecutableAccessorInfo>::cast(accessors);
+      inline_followup = info->getter() != NULL &&
+                        ExecutableAccessorInfo::IsCompatibleReceiverType(
+                            isolate(), info, type());
+    }
+  }
+
+  Register reg = Frontend(receiver(), it->name());
+  if (inline_followup) {
+    // TODO(368): Compile in the whole chain: all the interceptors in
+    // prototypes and ultimate answer.
+    GenerateLoadInterceptorWithFollowup(it, reg);
+  } else {
+    GenerateLoadInterceptor(reg);
+  }
+  return GetCode(kind(), Code::FAST, it->name());
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadPostInterceptor(
+    LookupIterator* it, Register interceptor_reg) {
+  Handle<JSObject> real_named_property_holder(it->GetHolder<JSObject>());
+
+  set_type_for_object(holder());
+  set_holder(real_named_property_holder);
+  Register reg = Frontend(interceptor_reg, it->name());
+
+  switch (it->state()) {
+    case LookupIterator::ACCESS_CHECK:
+    case LookupIterator::INTERCEPTOR:
+    case LookupIterator::JSPROXY:
+    case LookupIterator::NOT_FOUND:
+    case LookupIterator::TRANSITION:
+      UNREACHABLE();
+    case LookupIterator::DATA: {
+      DCHECK_EQ(FIELD, it->property_details().type());
+      __ Move(receiver(), reg);
+      LoadFieldStub stub(isolate(), it->GetFieldIndex());
+      GenerateTailCall(masm(), stub.GetCode());
+      break;
+    }
+    case LookupIterator::ACCESSOR:
+      Handle<ExecutableAccessorInfo> info =
+          Handle<ExecutableAccessorInfo>::cast(it->GetAccessors());
+      DCHECK_NE(NULL, info->getter());
+      GenerateLoadCallback(reg, info);
+  }
+}
+
+
+Handle<Code> NamedLoadHandlerCompiler::CompileLoadViaGetter(
+    Handle<Name> name, Handle<JSFunction> getter) {
+  Frontend(receiver(), name);
+  GenerateLoadViaGetter(masm(), type(), receiver(), getter);
+  return GetCode(kind(), Code::FAST, name);
+}
+
+
+// TODO(verwaest): Cleanup. holder() is actually the receiver.
+Handle<Code> NamedStoreHandlerCompiler::CompileStoreTransition(
+    Handle<Map> transition, Handle<Name> name) {
+  Label miss, slow;
+
+  // Ensure no transitions to deprecated maps are followed.
+  __ CheckMapDeprecated(transition, scratch1(), &miss);
+
+  // Check that we are allowed to write this.
+  bool is_nonexistent = holder()->map() == transition->GetBackPointer();
+  if (is_nonexistent) {
+    // Find the top object.
+    Handle<JSObject> last;
+    PrototypeIterator iter(isolate(), holder());
+    while (!iter.IsAtEnd()) {
+      last = Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter));
+      iter.Advance();
+    }
+    if (!last.is_null()) set_holder(last);
+    NonexistentFrontendHeader(name, &miss, scratch1(), scratch2());
+  } else {
+    FrontendHeader(receiver(), name, &miss);
+    DCHECK(holder()->HasFastProperties());
+  }
+
+  GenerateStoreTransition(transition, name, receiver(), this->name(), value(),
+                          scratch1(), scratch2(), scratch3(), &miss, &slow);
+
+  GenerateRestoreName(&miss, name);
+  TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+  GenerateRestoreName(&slow, name);
+  TailCallBuiltin(masm(), SlowBuiltin(kind()));
+  return GetCode(kind(), Code::FAST, name);
+}
+
+
+Handle<Code> NamedStoreHandlerCompiler::CompileStoreField(LookupIterator* it) {
+  Label miss;
+  GenerateStoreField(it, value(), &miss);
+  __ bind(&miss);
+  TailCallBuiltin(masm(), MissBuiltin(kind()));
+  return GetCode(kind(), Code::FAST, it->name());
+}
+
+
+Handle<Code> NamedStoreHandlerCompiler::CompileStoreViaSetter(
+    Handle<JSObject> object, Handle<Name> name, Handle<JSFunction> setter) {
+  Frontend(receiver(), name);
+  GenerateStoreViaSetter(masm(), type(), receiver(), setter);
+
+  return GetCode(kind(), Code::FAST, name);
+}
+
+
+Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
+    Handle<JSObject> object, Handle<Name> name,
+    const CallOptimization& call_optimization) {
+  Frontend(receiver(), name);
+  Register values[] = {value()};
+  GenerateFastApiCall(masm(), call_optimization, handle(object->map()),
+                      receiver(), scratch1(), true, 1, values);
+  return GetCode(kind(), Code::FAST, name);
+}
+
+
+#undef __
+
+
+void ElementHandlerCompiler::CompileElementHandlers(
+    MapHandleList* receiver_maps, CodeHandleList* handlers) {
+  for (int i = 0; i < receiver_maps->length(); ++i) {
+    Handle<Map> receiver_map = receiver_maps->at(i);
+    Handle<Code> cached_stub;
+
+    if ((receiver_map->instance_type() & kNotStringTag) == 0) {
+      cached_stub = isolate()->builtins()->KeyedLoadIC_String();
+    } else if (receiver_map->instance_type() < FIRST_JS_RECEIVER_TYPE) {
+      cached_stub = isolate()->builtins()->KeyedLoadIC_Slow();
+    } else {
+      bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
+      ElementsKind elements_kind = receiver_map->elements_kind();
+      if (receiver_map->has_indexed_interceptor()) {
+        cached_stub = LoadIndexedInterceptorStub(isolate()).GetCode();
+      } else if (IsSloppyArgumentsElements(elements_kind)) {
+        cached_stub = KeyedLoadSloppyArgumentsStub(isolate()).GetCode();
+      } else if (IsFastElementsKind(elements_kind) ||
+                 IsExternalArrayElementsKind(elements_kind) ||
+                 IsFixedTypedArrayElementsKind(elements_kind)) {
+        cached_stub = LoadFastElementStub(isolate(), is_js_array, elements_kind)
+                          .GetCode();
+      } else {
+        DCHECK(elements_kind == DICTIONARY_ELEMENTS);
+        cached_stub = LoadDictionaryElementStub(isolate()).GetCode();
+      }
+    }
+
+    handlers->Add(cached_stub);
+  }
+}
+}
+}  // namespace v8::internal
diff --git a/src/ic/handler-compiler.h b/src/ic/handler-compiler.h
new file mode 100644
index 0000000..f033f3f
--- /dev/null
+++ b/src/ic/handler-compiler.h
@@ -0,0 +1,275 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_IC_HANDLER_COMPILER_H_
+#define V8_IC_HANDLER_COMPILER_H_
+
+#include "src/ic/access-compiler.h"
+#include "src/ic/ic-state.h"
+
+namespace v8 {
+namespace internal {
+
+class CallOptimization;
+
+enum PrototypeCheckType { CHECK_ALL_MAPS, SKIP_RECEIVER };
+
+class PropertyHandlerCompiler : public PropertyAccessCompiler {
+ public:
+  static Handle<Code> Find(Handle<Name> name, Handle<Map> map, Code::Kind kind,
+                           CacheHolderFlag cache_holder, Code::StubType type);
+
+ protected:
+  PropertyHandlerCompiler(Isolate* isolate, Code::Kind kind,
+                          Handle<HeapType> type, Handle<JSObject> holder,
+                          CacheHolderFlag cache_holder)
+      : PropertyAccessCompiler(isolate, kind, cache_holder),
+        type_(type),
+        holder_(holder) {}
+
+  virtual ~PropertyHandlerCompiler() {}
+
+  virtual Register FrontendHeader(Register object_reg, Handle<Name> name,
+                                  Label* miss) {
+    UNREACHABLE();
+    return receiver();
+  }
+
+  virtual void FrontendFooter(Handle<Name> name, Label* miss) { UNREACHABLE(); }
+
+  Register Frontend(Register object_reg, Handle<Name> name);
+  void NonexistentFrontendHeader(Handle<Name> name, Label* miss,
+                                 Register scratch1, Register scratch2);
+
+  // TODO(verwaest): Make non-static.
+  static void GenerateFastApiCall(MacroAssembler* masm,
+                                  const CallOptimization& optimization,
+                                  Handle<Map> receiver_map, Register receiver,
+                                  Register scratch, bool is_store, int argc,
+                                  Register* values);
+
+  // Helper function used to check that the dictionary doesn't contain
+  // the property. This function may return false negatives, so miss_label
+  // must always call a backup property check that is complete.
+  // This function is safe to call if the receiver has fast properties.
+  // Name must be unique and receiver must be a heap object.
+  static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
+                                               Label* miss_label,
+                                               Register receiver,
+                                               Handle<Name> name, Register r0,
+                                               Register r1);
+
+  // Generate code to check that a global property cell is empty. Create
+  // the property cell at compilation time if no cell exists for the
+  // property.
+  static void GenerateCheckPropertyCell(MacroAssembler* masm,
+                                        Handle<JSGlobalObject> global,
+                                        Handle<Name> name, Register scratch,
+                                        Label* miss);
+
+  // Generates code that verifies that the property holder has not changed
+  // (checking maps of objects in the prototype chain for fast and global
+  // objects or doing negative lookup for slow objects, ensures that the
+  // property cells for global objects are still empty) and checks that the map
+  // of the holder has not changed. If necessary the function also generates
+  // code for security check in case of global object holders. Helps to make
+  // sure that the current IC is still valid.
+  //
+  // The scratch and holder registers are always clobbered, but the object
+  // register is only clobbered if it the same as the holder register. The
+  // function returns a register containing the holder - either object_reg or
+  // holder_reg.
+  Register CheckPrototypes(Register object_reg, Register holder_reg,
+                           Register scratch1, Register scratch2,
+                           Handle<Name> name, Label* miss,
+                           PrototypeCheckType check = CHECK_ALL_MAPS);
+
+  Handle<Code> GetCode(Code::Kind kind, Code::StubType type, Handle<Name> name);
+  void set_type_for_object(Handle<Object> object);
+  void set_holder(Handle<JSObject> holder) { holder_ = holder; }
+  Handle<HeapType> type() const { return type_; }
+  Handle<JSObject> holder() const { return holder_; }
+
+ private:
+  Handle<HeapType> type_;
+  Handle<JSObject> holder_;
+};
+
+
+class NamedLoadHandlerCompiler : public PropertyHandlerCompiler {
+ public:
+  NamedLoadHandlerCompiler(Isolate* isolate, Handle<HeapType> type,
+                           Handle<JSObject> holder,
+                           CacheHolderFlag cache_holder)
+      : PropertyHandlerCompiler(isolate, Code::LOAD_IC, type, holder,
+                                cache_holder) {}
+
+  virtual ~NamedLoadHandlerCompiler() {}
+
+  Handle<Code> CompileLoadField(Handle<Name> name, FieldIndex index);
+
+  Handle<Code> CompileLoadCallback(Handle<Name> name,
+                                   Handle<ExecutableAccessorInfo> callback);
+
+  Handle<Code> CompileLoadCallback(Handle<Name> name,
+                                   const CallOptimization& call_optimization);
+
+  Handle<Code> CompileLoadConstant(Handle<Name> name, int constant_index);
+
+  // The LookupIterator is used to perform a lookup behind the interceptor. If
+  // the iterator points to a LookupIterator::PROPERTY, its access will be
+  // inlined.
+  Handle<Code> CompileLoadInterceptor(LookupIterator* it);
+
+  Handle<Code> CompileLoadViaGetter(Handle<Name> name,
+                                    Handle<JSFunction> getter);
+
+  Handle<Code> CompileLoadGlobal(Handle<PropertyCell> cell, Handle<Name> name,
+                                 bool is_configurable);
+
+  // Static interface
+  static Handle<Code> ComputeLoadNonexistent(Handle<Name> name,
+                                             Handle<HeapType> type);
+
+  static void GenerateLoadViaGetter(MacroAssembler* masm, Handle<HeapType> type,
+                                    Register receiver,
+                                    Handle<JSFunction> getter);
+
+  static void GenerateLoadViaGetterForDeopt(MacroAssembler* masm) {
+    GenerateLoadViaGetter(masm, Handle<HeapType>::null(), no_reg,
+                          Handle<JSFunction>());
+  }
+
+  static void GenerateLoadFunctionPrototype(MacroAssembler* masm,
+                                            Register receiver,
+                                            Register scratch1,
+                                            Register scratch2,
+                                            Label* miss_label);
+
+  // These constants describe the structure of the interceptor arguments on the
+  // stack. The arguments are pushed by the (platform-specific)
+  // PushInterceptorArguments and read by LoadPropertyWithInterceptorOnly and
+  // LoadWithInterceptor.
+  static const int kInterceptorArgsNameIndex = 0;
+  static const int kInterceptorArgsInfoIndex = 1;
+  static const int kInterceptorArgsThisIndex = 2;
+  static const int kInterceptorArgsHolderIndex = 3;
+  static const int kInterceptorArgsLength = 4;
+
+ protected:
+  virtual Register FrontendHeader(Register object_reg, Handle<Name> name,
+                                  Label* miss);
+
+  virtual void FrontendFooter(Handle<Name> name, Label* miss);
+
+ private:
+  Handle<Code> CompileLoadNonexistent(Handle<Name> name);
+  void GenerateLoadConstant(Handle<Object> value);
+  void GenerateLoadCallback(Register reg,
+                            Handle<ExecutableAccessorInfo> callback);
+  void GenerateLoadCallback(const CallOptimization& call_optimization,
+                            Handle<Map> receiver_map);
+  void GenerateLoadInterceptor(Register holder_reg);
+  void GenerateLoadInterceptorWithFollowup(LookupIterator* it,
+                                           Register holder_reg);
+  void GenerateLoadPostInterceptor(LookupIterator* it, Register reg);
+
+  // Generates prototype loading code that uses the objects from the
+  // context we were in when this function was called. If the context
+  // has changed, a jump to miss is performed. This ties the generated
+  // code to a particular context and so must not be used in cases
+  // where the generated code is not allowed to have references to
+  // objects from a context.
+  static void GenerateDirectLoadGlobalFunctionPrototype(MacroAssembler* masm,
+                                                        int index,
+                                                        Register prototype,
+                                                        Label* miss);
+
+
+  Register scratch4() { return registers_[5]; }
+};
+
+
+class NamedStoreHandlerCompiler : public PropertyHandlerCompiler {
+ public:
+  explicit NamedStoreHandlerCompiler(Isolate* isolate, Handle<HeapType> type,
+                                     Handle<JSObject> holder)
+      : PropertyHandlerCompiler(isolate, Code::STORE_IC, type, holder,
+                                kCacheOnReceiver) {}
+
+  virtual ~NamedStoreHandlerCompiler() {}
+
+  Handle<Code> CompileStoreTransition(Handle<Map> transition,
+                                      Handle<Name> name);
+  Handle<Code> CompileStoreField(LookupIterator* it);
+  Handle<Code> CompileStoreCallback(Handle<JSObject> object, Handle<Name> name,
+                                    Handle<ExecutableAccessorInfo> callback);
+  Handle<Code> CompileStoreCallback(Handle<JSObject> object, Handle<Name> name,
+                                    const CallOptimization& call_optimization);
+  Handle<Code> CompileStoreViaSetter(Handle<JSObject> object, Handle<Name> name,
+                                     Handle<JSFunction> setter);
+  Handle<Code> CompileStoreInterceptor(Handle<Name> name);
+
+  static void GenerateStoreViaSetter(MacroAssembler* masm,
+                                     Handle<HeapType> type, Register receiver,
+                                     Handle<JSFunction> setter);
+
+  static void GenerateStoreViaSetterForDeopt(MacroAssembler* masm) {
+    GenerateStoreViaSetter(masm, Handle<HeapType>::null(), no_reg,
+                           Handle<JSFunction>());
+  }
+
+  static void GenerateSlow(MacroAssembler* masm);
+
+ protected:
+  virtual Register FrontendHeader(Register object_reg, Handle<Name> name,
+                                  Label* miss);
+
+  virtual void FrontendFooter(Handle<Name> name, Label* miss);
+  void GenerateRestoreName(Label* label, Handle<Name> name);
+
+ private:
+  void GenerateStoreTransition(Handle<Map> transition, Handle<Name> name,
+                               Register receiver_reg, Register name_reg,
+                               Register value_reg, Register scratch1,
+                               Register scratch2, Register scratch3,
+                               Label* miss_label, Label* slow);
+
+  void GenerateStoreField(LookupIterator* lookup, Register value_reg,
+                          Label* miss_label);
+
+  static Builtins::Name SlowBuiltin(Code::Kind kind) {
+    switch (kind) {
+      case Code::STORE_IC:
+        return Builtins::kStoreIC_Slow;
+      case Code::KEYED_STORE_IC:
+        return Builtins::kKeyedStoreIC_Slow;
+      default:
+        UNREACHABLE();
+    }
+    return Builtins::kStoreIC_Slow;
+  }
+
+  static Register value();
+};
+
+
+class ElementHandlerCompiler : public PropertyHandlerCompiler {
+ public:
+  explicit ElementHandlerCompiler(Isolate* isolate)
+      : PropertyHandlerCompiler(isolate, Code::KEYED_LOAD_IC,
+                                Handle<HeapType>::null(),
+                                Handle<JSObject>::null(), kCacheOnReceiver) {}
+
+  virtual ~ElementHandlerCompiler() {}
+
+  void CompileElementHandlers(MapHandleList* receiver_maps,
+                              CodeHandleList* handlers);
+
+  static void GenerateStoreSlow(MacroAssembler* masm);
+};
+}
+}  // namespace v8::internal
+
+#endif  // V8_IC_HANDLER_COMPILER_H_
diff --git a/src/ic/ia32/access-compiler-ia32.cc b/src/ic/ia32/access-compiler-ia32.cc
new file mode 100644
index 0000000..9bcbef0
--- /dev/null
+++ b/src/ic/ia32/access-compiler-ia32.cc
@@ -0,0 +1,44 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_IA32
+
+#include "src/ic/access-compiler.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
+                                              Handle<Code> code) {
+  __ jmp(code, RelocInfo::CODE_TARGET);
+}
+
+
+Register* PropertyAccessCompiler::load_calling_convention() {
+  // receiver, name, scratch1, scratch2, scratch3, scratch4.
+  Register receiver = LoadDescriptor::ReceiverRegister();
+  Register name = LoadDescriptor::NameRegister();
+  static Register registers[] = {receiver, name, ebx, eax, edi, no_reg};
+  return registers;
+}
+
+
+Register* PropertyAccessCompiler::store_calling_convention() {
+  // receiver, name, scratch1, scratch2, scratch3.
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  Register name = StoreDescriptor::NameRegister();
+  DCHECK(ebx.is(ElementTransitionAndStoreDescriptor::MapRegister()));
+  static Register registers[] = {receiver, name, ebx, edi, no_reg};
+  return registers;
+}
+
+#undef __
+}
+}  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ic/ia32/handler-compiler-ia32.cc b/src/ic/ia32/handler-compiler-ia32.cc
new file mode 100644
index 0000000..fd97154
--- /dev/null
+++ b/src/ic/ia32/handler-compiler-ia32.cc
@@ -0,0 +1,853 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_IA32
+
+#include "src/ic/call-optimization.h"
+#include "src/ic/handler-compiler.h"
+#include "src/ic/ic.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
+    MacroAssembler* masm, Handle<HeapType> type, Register receiver,
+    Handle<JSFunction> getter) {
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+
+    if (!getter.is_null()) {
+      // Call the JavaScript getter with the receiver on the stack.
+      if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+        // Swap in the global receiver.
+        __ mov(receiver,
+               FieldOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+      }
+      __ push(receiver);
+      ParameterCount actual(0);
+      ParameterCount expected(getter);
+      __ InvokeFunction(getter, expected, actual, CALL_FUNCTION,
+                        NullCallWrapper());
+    } else {
+      // If we generate a global code snippet for deoptimization only, remember
+      // the place to continue after deoptimization.
+      masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
+    }
+
+    // Restore context register.
+    __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+  }
+  __ ret(0);
+}
+
+
+void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
+    MacroAssembler* masm, Label* miss_label, Register receiver,
+    Handle<Name> name, Register scratch0, Register scratch1) {
+  DCHECK(name->IsUniqueName());
+  DCHECK(!receiver.is(scratch0));
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(counters->negative_lookups(), 1);
+  __ IncrementCounter(counters->negative_lookups_miss(), 1);
+
+  __ mov(scratch0, FieldOperand(receiver, HeapObject::kMapOffset));
+
+  const int kInterceptorOrAccessCheckNeededMask =
+      (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
+
+  // Bail out if the receiver has a named interceptor or requires access checks.
+  __ test_b(FieldOperand(scratch0, Map::kBitFieldOffset),
+            kInterceptorOrAccessCheckNeededMask);
+  __ j(not_zero, miss_label);
+
+  // Check that receiver is a JSObject.
+  __ CmpInstanceType(scratch0, FIRST_SPEC_OBJECT_TYPE);
+  __ j(below, miss_label);
+
+  // Load properties array.
+  Register properties = scratch0;
+  __ mov(properties, FieldOperand(receiver, JSObject::kPropertiesOffset));
+
+  // Check that the properties array is a dictionary.
+  __ cmp(FieldOperand(properties, HeapObject::kMapOffset),
+         Immediate(masm->isolate()->factory()->hash_table_map()));
+  __ j(not_equal, miss_label);
+
+  Label done;
+  NameDictionaryLookupStub::GenerateNegativeLookup(masm, miss_label, &done,
+                                                   properties, name, scratch1);
+  __ bind(&done);
+  __ DecrementCounter(counters->negative_lookups_miss(), 1);
+}
+
+
+void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
+    MacroAssembler* masm, int index, Register prototype, Label* miss) {
+  // Get the global function with the given index.
+  Handle<JSFunction> function(
+      JSFunction::cast(masm->isolate()->native_context()->get(index)));
+  // Check we're still in the same context.
+  Register scratch = prototype;
+  const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
+  __ mov(scratch, Operand(esi, offset));
+  __ mov(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
+  __ cmp(Operand(scratch, Context::SlotOffset(index)), function);
+  __ j(not_equal, miss);
+
+  // Load its initial map. The global functions all have initial maps.
+  __ Move(prototype, Immediate(Handle<Map>(function->initial_map())));
+  // Load the prototype from the initial map.
+  __ mov(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
+    MacroAssembler* masm, Register receiver, Register scratch1,
+    Register scratch2, Label* miss_label) {
+  __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
+  __ mov(eax, scratch1);
+  __ ret(0);
+}
+
+
+// Generate call to api function.
+// This function uses push() to generate smaller, faster code than
+// the version above. It is an optimization that should will be removed
+// when api call ICs are generated in hydrogen.
+void PropertyHandlerCompiler::GenerateFastApiCall(
+    MacroAssembler* masm, const CallOptimization& optimization,
+    Handle<Map> receiver_map, Register receiver, Register scratch_in,
+    bool is_store, int argc, Register* values) {
+  // Copy return value.
+  __ pop(scratch_in);
+  // receiver
+  __ push(receiver);
+  // Write the arguments to stack frame.
+  for (int i = 0; i < argc; i++) {
+    Register arg = values[argc - 1 - i];
+    DCHECK(!receiver.is(arg));
+    DCHECK(!scratch_in.is(arg));
+    __ push(arg);
+  }
+  __ push(scratch_in);
+  // Stack now matches JSFunction abi.
+  DCHECK(optimization.is_simple_api_call());
+
+  // Abi for CallApiFunctionStub.
+  Register callee = eax;
+  Register call_data = ebx;
+  Register holder = ecx;
+  Register api_function_address = edx;
+  Register scratch = edi;  // scratch_in is no longer valid.
+
+  // Put holder in place.
+  CallOptimization::HolderLookup holder_lookup;
+  Handle<JSObject> api_holder =
+      optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
+  switch (holder_lookup) {
+    case CallOptimization::kHolderIsReceiver:
+      __ Move(holder, receiver);
+      break;
+    case CallOptimization::kHolderFound:
+      __ LoadHeapObject(holder, api_holder);
+      break;
+    case CallOptimization::kHolderNotFound:
+      UNREACHABLE();
+      break;
+  }
+
+  Isolate* isolate = masm->isolate();
+  Handle<JSFunction> function = optimization.constant_function();
+  Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
+  Handle<Object> call_data_obj(api_call_info->data(), isolate);
+
+  // Put callee in place.
+  __ LoadHeapObject(callee, function);
+
+  bool call_data_undefined = false;
+  // Put call_data in place.
+  if (isolate->heap()->InNewSpace(*call_data_obj)) {
+    __ mov(scratch, api_call_info);
+    __ mov(call_data, FieldOperand(scratch, CallHandlerInfo::kDataOffset));
+  } else if (call_data_obj->IsUndefined()) {
+    call_data_undefined = true;
+    __ mov(call_data, Immediate(isolate->factory()->undefined_value()));
+  } else {
+    __ mov(call_data, call_data_obj);
+  }
+
+  // Put api_function_address in place.
+  Address function_address = v8::ToCData<Address>(api_call_info->callback());
+  __ mov(api_function_address, Immediate(function_address));
+
+  // Jump to stub.
+  CallApiFunctionStub stub(isolate, is_store, call_data_undefined, argc);
+  __ TailCallStub(&stub);
+}
+
+
+// Generate code to check that a global property cell is empty. Create
+// the property cell at compilation time if no cell exists for the
+// property.
+void PropertyHandlerCompiler::GenerateCheckPropertyCell(
+    MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
+    Register scratch, Label* miss) {
+  Handle<PropertyCell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
+  DCHECK(cell->value()->IsTheHole());
+  Handle<Oddball> the_hole = masm->isolate()->factory()->the_hole_value();
+  if (masm->serializer_enabled()) {
+    __ mov(scratch, Immediate(cell));
+    __ cmp(FieldOperand(scratch, PropertyCell::kValueOffset),
+           Immediate(the_hole));
+  } else {
+    __ cmp(Operand::ForCell(cell), Immediate(the_hole));
+  }
+  __ j(not_equal, miss);
+}
+
+
+void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
+    MacroAssembler* masm, Handle<HeapType> type, Register receiver,
+    Handle<JSFunction> setter) {
+  // ----------- S t a t e -------------
+  //  -- esp[0] : return address
+  // -----------------------------------
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+
+    // Save value register, so we can restore it later.
+    __ push(value());
+
+    if (!setter.is_null()) {
+      // Call the JavaScript setter with receiver and value on the stack.
+      if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+        // Swap in the global receiver.
+        __ mov(receiver,
+               FieldOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+      }
+      __ push(receiver);
+      __ push(value());
+      ParameterCount actual(1);
+      ParameterCount expected(setter);
+      __ InvokeFunction(setter, expected, actual, CALL_FUNCTION,
+                        NullCallWrapper());
+    } else {
+      // If we generate a global code snippet for deoptimization only, remember
+      // the place to continue after deoptimization.
+      masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
+    }
+
+    // We have to return the passed value, not the return value of the setter.
+    __ pop(eax);
+
+    // Restore context register.
+    __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+  }
+  __ ret(0);
+}
+
+
+static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
+                                     Register holder, Register name,
+                                     Handle<JSObject> holder_obj) {
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsInfoIndex == 1);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 2);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 3);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 4);
+  __ push(name);
+  Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
+  DCHECK(!masm->isolate()->heap()->InNewSpace(*interceptor));
+  Register scratch = name;
+  __ mov(scratch, Immediate(interceptor));
+  __ push(scratch);
+  __ push(receiver);
+  __ push(holder);
+}
+
+
+static void CompileCallLoadPropertyWithInterceptor(
+    MacroAssembler* masm, Register receiver, Register holder, Register name,
+    Handle<JSObject> holder_obj, IC::UtilityId id) {
+  PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
+  __ CallExternalReference(ExternalReference(IC_Utility(id), masm->isolate()),
+                           NamedLoadHandlerCompiler::kInterceptorArgsLength);
+}
+
+
+static void StoreIC_PushArgs(MacroAssembler* masm) {
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  Register name = StoreDescriptor::NameRegister();
+  Register value = StoreDescriptor::ValueRegister();
+
+  DCHECK(!ebx.is(receiver) && !ebx.is(name) && !ebx.is(value));
+
+  __ pop(ebx);
+  __ push(receiver);
+  __ push(name);
+  __ push(value);
+  __ push(ebx);
+}
+
+
+void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
+  // Return address is on the stack.
+  StoreIC_PushArgs(masm);
+
+  // Do tail-call to runtime routine.
+  ExternalReference ref(IC_Utility(IC::kStoreIC_Slow), masm->isolate());
+  __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
+  // Return address is on the stack.
+  StoreIC_PushArgs(masm);
+
+  // Do tail-call to runtime routine.
+  ExternalReference ref(IC_Utility(IC::kKeyedStoreIC_Slow), masm->isolate());
+  __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
+                                                    Handle<Name> name) {
+  if (!label->is_unused()) {
+    __ bind(label);
+    __ mov(this->name(), Immediate(name));
+  }
+}
+
+
+// Receiver_reg is preserved on jumps to miss_label, but may be destroyed if
+// store is successful.
+void NamedStoreHandlerCompiler::GenerateStoreTransition(
+    Handle<Map> transition, Handle<Name> name, Register receiver_reg,
+    Register storage_reg, Register value_reg, Register scratch1,
+    Register scratch2, Register unused, Label* miss_label, Label* slow) {
+  int descriptor = transition->LastAdded();
+  DescriptorArray* descriptors = transition->instance_descriptors();
+  PropertyDetails details = descriptors->GetDetails(descriptor);
+  Representation representation = details.representation();
+  DCHECK(!representation.IsNone());
+
+  if (details.type() == CONSTANT) {
+    Handle<Object> constant(descriptors->GetValue(descriptor), isolate());
+    __ CmpObject(value_reg, constant);
+    __ j(not_equal, miss_label);
+  } else if (representation.IsSmi()) {
+    __ JumpIfNotSmi(value_reg, miss_label);
+  } else if (representation.IsHeapObject()) {
+    __ JumpIfSmi(value_reg, miss_label);
+    HeapType* field_type = descriptors->GetFieldType(descriptor);
+    HeapType::Iterator<Map> it = field_type->Classes();
+    if (!it.Done()) {
+      Label do_store;
+      while (true) {
+        __ CompareMap(value_reg, it.Current());
+        it.Advance();
+        if (it.Done()) {
+          __ j(not_equal, miss_label);
+          break;
+        }
+        __ j(equal, &do_store, Label::kNear);
+      }
+      __ bind(&do_store);
+    }
+  } else if (representation.IsDouble()) {
+    Label do_store, heap_number;
+    __ AllocateHeapNumber(storage_reg, scratch1, scratch2, slow, MUTABLE);
+
+    __ JumpIfNotSmi(value_reg, &heap_number);
+    __ SmiUntag(value_reg);
+    __ Cvtsi2sd(xmm0, value_reg);
+    __ SmiTag(value_reg);
+    __ jmp(&do_store);
+
+    __ bind(&heap_number);
+    __ CheckMap(value_reg, isolate()->factory()->heap_number_map(), miss_label,
+                DONT_DO_SMI_CHECK);
+    __ movsd(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
+
+    __ bind(&do_store);
+    __ movsd(FieldOperand(storage_reg, HeapNumber::kValueOffset), xmm0);
+  }
+
+  // Stub never generated for objects that require access checks.
+  DCHECK(!transition->is_access_check_needed());
+
+  // Perform map transition for the receiver if necessary.
+  if (details.type() == FIELD &&
+      Map::cast(transition->GetBackPointer())->unused_property_fields() == 0) {
+    // The properties must be extended before we can store the value.
+    // We jump to a runtime call that extends the properties array.
+    __ pop(scratch1);  // Return address.
+    __ push(receiver_reg);
+    __ push(Immediate(transition));
+    __ push(value_reg);
+    __ push(scratch1);
+    __ TailCallExternalReference(
+        ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
+                          isolate()),
+        3, 1);
+    return;
+  }
+
+  // Update the map of the object.
+  __ mov(scratch1, Immediate(transition));
+  __ mov(FieldOperand(receiver_reg, HeapObject::kMapOffset), scratch1);
+
+  // Update the write barrier for the map field.
+  __ RecordWriteField(receiver_reg, HeapObject::kMapOffset, scratch1, scratch2,
+                      kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+  if (details.type() == CONSTANT) {
+    DCHECK(value_reg.is(eax));
+    __ ret(0);
+    return;
+  }
+
+  int index = transition->instance_descriptors()->GetFieldIndex(
+      transition->LastAdded());
+
+  // Adjust for the number of properties stored in the object. Even in the
+  // face of a transition we can use the old map here because the size of the
+  // object and the number of in-object properties is not going to change.
+  index -= transition->inobject_properties();
+
+  SmiCheck smi_check =
+      representation.IsTagged() ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
+  // TODO(verwaest): Share this code as a code stub.
+  if (index < 0) {
+    // Set the property straight into the object.
+    int offset = transition->instance_size() + (index * kPointerSize);
+    if (representation.IsDouble()) {
+      __ mov(FieldOperand(receiver_reg, offset), storage_reg);
+    } else {
+      __ mov(FieldOperand(receiver_reg, offset), value_reg);
+    }
+
+    if (!representation.IsSmi()) {
+      // Update the write barrier for the array address.
+      if (!representation.IsDouble()) {
+        __ mov(storage_reg, value_reg);
+      }
+      __ RecordWriteField(receiver_reg, offset, storage_reg, scratch1,
+                          kDontSaveFPRegs, EMIT_REMEMBERED_SET, smi_check);
+    }
+  } else {
+    // Write to the properties array.
+    int offset = index * kPointerSize + FixedArray::kHeaderSize;
+    // Get the properties array (optimistically).
+    __ mov(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
+    if (representation.IsDouble()) {
+      __ mov(FieldOperand(scratch1, offset), storage_reg);
+    } else {
+      __ mov(FieldOperand(scratch1, offset), value_reg);
+    }
+
+    if (!representation.IsSmi()) {
+      // Update the write barrier for the array address.
+      if (!representation.IsDouble()) {
+        __ mov(storage_reg, value_reg);
+      }
+      __ RecordWriteField(scratch1, offset, storage_reg, receiver_reg,
+                          kDontSaveFPRegs, EMIT_REMEMBERED_SET, smi_check);
+    }
+  }
+
+  // Return the value (register eax).
+  DCHECK(value_reg.is(eax));
+  __ ret(0);
+}
+
+
+void NamedStoreHandlerCompiler::GenerateStoreField(LookupIterator* lookup,
+                                                   Register value_reg,
+                                                   Label* miss_label) {
+  DCHECK(lookup->representation().IsHeapObject());
+  __ JumpIfSmi(value_reg, miss_label);
+  HeapType::Iterator<Map> it = lookup->GetFieldType()->Classes();
+  Label do_store;
+  while (true) {
+    __ CompareMap(value_reg, it.Current());
+    it.Advance();
+    if (it.Done()) {
+      __ j(not_equal, miss_label);
+      break;
+    }
+    __ j(equal, &do_store, Label::kNear);
+  }
+  __ bind(&do_store);
+
+  StoreFieldStub stub(isolate(), lookup->GetFieldIndex(),
+                      lookup->representation());
+  GenerateTailCall(masm(), stub.GetCode());
+}
+
+
+Register PropertyHandlerCompiler::CheckPrototypes(
+    Register object_reg, Register holder_reg, Register scratch1,
+    Register scratch2, Handle<Name> name, Label* miss,
+    PrototypeCheckType check) {
+  Handle<Map> receiver_map(IC::TypeToMap(*type(), isolate()));
+
+  // Make sure there's no overlap between holder and object registers.
+  DCHECK(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
+  DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg) &&
+         !scratch2.is(scratch1));
+
+  // Keep track of the current object in register reg.
+  Register reg = object_reg;
+  int depth = 0;
+
+  Handle<JSObject> current = Handle<JSObject>::null();
+  if (type()->IsConstant())
+    current = Handle<JSObject>::cast(type()->AsConstant()->Value());
+  Handle<JSObject> prototype = Handle<JSObject>::null();
+  Handle<Map> current_map = receiver_map;
+  Handle<Map> holder_map(holder()->map());
+  // Traverse the prototype chain and check the maps in the prototype chain for
+  // fast and global objects or do negative lookup for normal objects.
+  while (!current_map.is_identical_to(holder_map)) {
+    ++depth;
+
+    // Only global objects and objects that do not require access
+    // checks are allowed in stubs.
+    DCHECK(current_map->IsJSGlobalProxyMap() ||
+           !current_map->is_access_check_needed());
+
+    prototype = handle(JSObject::cast(current_map->prototype()));
+    if (current_map->is_dictionary_map() &&
+        !current_map->IsJSGlobalObjectMap()) {
+      DCHECK(!current_map->IsJSGlobalProxyMap());  // Proxy maps are fast.
+      if (!name->IsUniqueName()) {
+        DCHECK(name->IsString());
+        name = factory()->InternalizeString(Handle<String>::cast(name));
+      }
+      DCHECK(current.is_null() ||
+             current->property_dictionary()->FindEntry(name) ==
+                 NameDictionary::kNotFound);
+
+      GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
+                                       scratch2);
+
+      __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
+      reg = holder_reg;  // From now on the object will be in holder_reg.
+      __ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
+    } else {
+      bool in_new_space = heap()->InNewSpace(*prototype);
+      // Two possible reasons for loading the prototype from the map:
+      // (1) Can't store references to new space in code.
+      // (2) Handler is shared for all receivers with the same prototype
+      //     map (but not necessarily the same prototype instance).
+      bool load_prototype_from_map = in_new_space || depth == 1;
+      if (depth != 1 || check == CHECK_ALL_MAPS) {
+        __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK);
+      }
+
+      // Check access rights to the global object.  This has to happen after
+      // the map check so that we know that the object is actually a global
+      // object.
+      // This allows us to install generated handlers for accesses to the
+      // global proxy (as opposed to using slow ICs). See corresponding code
+      // in LookupForRead().
+      if (current_map->IsJSGlobalProxyMap()) {
+        __ CheckAccessGlobalProxy(reg, scratch1, scratch2, miss);
+      } else if (current_map->IsJSGlobalObjectMap()) {
+        GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
+                                  name, scratch2, miss);
+      }
+
+      if (load_prototype_from_map) {
+        // Save the map in scratch1 for later.
+        __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
+      }
+
+      reg = holder_reg;  // From now on the object will be in holder_reg.
+
+      if (load_prototype_from_map) {
+        __ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
+      } else {
+        __ mov(reg, prototype);
+      }
+    }
+
+    // Go to the next object in the prototype chain.
+    current = prototype;
+    current_map = handle(current->map());
+  }
+
+  // Log the check depth.
+  LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
+
+  if (depth != 0 || check == CHECK_ALL_MAPS) {
+    // Check the holder map.
+    __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK);
+  }
+
+  // Perform security check for access to the global object.
+  DCHECK(current_map->IsJSGlobalProxyMap() ||
+         !current_map->is_access_check_needed());
+  if (current_map->IsJSGlobalProxyMap()) {
+    __ CheckAccessGlobalProxy(reg, scratch1, scratch2, miss);
+  }
+
+  // Return the register containing the holder.
+  return reg;
+}
+
+
+void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
+  if (!miss->is_unused()) {
+    Label success;
+    __ jmp(&success);
+    __ bind(miss);
+    TailCallBuiltin(masm(), MissBuiltin(kind()));
+    __ bind(&success);
+  }
+}
+
+
+void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
+  if (!miss->is_unused()) {
+    Label success;
+    __ jmp(&success);
+    GenerateRestoreName(miss, name);
+    TailCallBuiltin(masm(), MissBuiltin(kind()));
+    __ bind(&success);
+  }
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadCallback(
+    Register reg, Handle<ExecutableAccessorInfo> callback) {
+  // Insert additional parameters into the stack frame above return address.
+  DCHECK(!scratch3().is(reg));
+  __ pop(scratch3());  // Get return address to place it below.
+
+  STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
+  STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
+  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
+  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
+  STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
+  STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
+  __ push(receiver());  // receiver
+  // Push data from ExecutableAccessorInfo.
+  if (isolate()->heap()->InNewSpace(callback->data())) {
+    DCHECK(!scratch2().is(reg));
+    __ mov(scratch2(), Immediate(callback));
+    __ push(FieldOperand(scratch2(), ExecutableAccessorInfo::kDataOffset));
+  } else {
+    __ push(Immediate(Handle<Object>(callback->data(), isolate())));
+  }
+  __ push(Immediate(isolate()->factory()->undefined_value()));  // ReturnValue
+  // ReturnValue default value
+  __ push(Immediate(isolate()->factory()->undefined_value()));
+  __ push(Immediate(reinterpret_cast<int>(isolate())));
+  __ push(reg);  // holder
+
+  // Save a pointer to where we pushed the arguments. This will be
+  // passed as the const PropertyAccessorInfo& to the C++ callback.
+  __ push(esp);
+
+  __ push(name());  // name
+
+  __ push(scratch3());  // Restore return address.
+
+  // Abi for CallApiGetter
+  Register getter_address = ApiGetterDescriptor::function_address();
+  Address function_address = v8::ToCData<Address>(callback->getter());
+  __ mov(getter_address, Immediate(function_address));
+
+  CallApiGetterStub stub(isolate());
+  __ TailCallStub(&stub);
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
+  // Return the constant value.
+  __ LoadObject(eax, value);
+  __ ret(0);
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
+    LookupIterator* it, Register holder_reg) {
+  DCHECK(holder()->HasNamedInterceptor());
+  DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+
+  // Compile the interceptor call, followed by inline code to load the
+  // property from further up the prototype chain if the call fails.
+  // Check that the maps haven't changed.
+  DCHECK(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
+
+  // Preserve the receiver register explicitly whenever it is different from the
+  // holder and it is needed should the interceptor return without any result.
+  // The ACCESSOR case needs the receiver to be passed into C++ code, the FIELD
+  // case might cause a miss during the prototype check.
+  bool must_perform_prototype_check =
+      !holder().is_identical_to(it->GetHolder<JSObject>());
+  bool must_preserve_receiver_reg =
+      !receiver().is(holder_reg) &&
+      (it->state() == LookupIterator::ACCESSOR || must_perform_prototype_check);
+
+  // Save necessary data before invoking an interceptor.
+  // Requires a frame to make GC aware of pushed pointers.
+  {
+    FrameScope frame_scope(masm(), StackFrame::INTERNAL);
+
+    if (must_preserve_receiver_reg) {
+      __ push(receiver());
+    }
+    __ push(holder_reg);
+    __ push(this->name());
+
+    // Invoke an interceptor.  Note: map checks from receiver to
+    // interceptor's holder has been compiled before (see a caller
+    // of this method.)
+    CompileCallLoadPropertyWithInterceptor(
+        masm(), receiver(), holder_reg, this->name(), holder(),
+        IC::kLoadPropertyWithInterceptorOnly);
+
+    // Check if interceptor provided a value for property.  If it's
+    // the case, return immediately.
+    Label interceptor_failed;
+    __ cmp(eax, factory()->no_interceptor_result_sentinel());
+    __ j(equal, &interceptor_failed);
+    frame_scope.GenerateLeaveFrame();
+    __ ret(0);
+
+    // Clobber registers when generating debug-code to provoke errors.
+    __ bind(&interceptor_failed);
+    if (FLAG_debug_code) {
+      __ mov(receiver(), Immediate(bit_cast<int32_t>(kZapValue)));
+      __ mov(holder_reg, Immediate(bit_cast<int32_t>(kZapValue)));
+      __ mov(this->name(), Immediate(bit_cast<int32_t>(kZapValue)));
+    }
+
+    __ pop(this->name());
+    __ pop(holder_reg);
+    if (must_preserve_receiver_reg) {
+      __ pop(receiver());
+    }
+
+    // Leave the internal frame.
+  }
+
+  GenerateLoadPostInterceptor(it, holder_reg);
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
+  DCHECK(holder()->HasNamedInterceptor());
+  DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+  // Call the runtime system to load the interceptor.
+  __ pop(scratch2());  // save old return address
+  PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
+                           holder());
+  __ push(scratch2());  // restore old return address
+
+  ExternalReference ref = ExternalReference(
+      IC_Utility(IC::kLoadPropertyWithInterceptor), isolate());
+  __ TailCallExternalReference(
+      ref, NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
+}
+
+
+Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
+    Handle<JSObject> object, Handle<Name> name,
+    Handle<ExecutableAccessorInfo> callback) {
+  Register holder_reg = Frontend(receiver(), name);
+
+  __ pop(scratch1());  // remove the return address
+  __ push(receiver());
+  __ push(holder_reg);
+  __ Push(callback);
+  __ Push(name);
+  __ push(value());
+  __ push(scratch1());  // restore return address
+
+  // Do tail-call to the runtime system.
+  ExternalReference store_callback_property =
+      ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
+  __ TailCallExternalReference(store_callback_property, 5, 1);
+
+  // Return the generated code.
+  return GetCode(kind(), Code::FAST, name);
+}
+
+
+Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
+    Handle<Name> name) {
+  __ pop(scratch1());  // remove the return address
+  __ push(receiver());
+  __ push(this->name());
+  __ push(value());
+  __ push(scratch1());  // restore return address
+
+  // Do tail-call to the runtime system.
+  ExternalReference store_ic_property = ExternalReference(
+      IC_Utility(IC::kStorePropertyWithInterceptor), isolate());
+  __ TailCallExternalReference(store_ic_property, 3, 1);
+
+  // Return the generated code.
+  return GetCode(kind(), Code::FAST, name);
+}
+
+
+Register NamedStoreHandlerCompiler::value() {
+  return StoreDescriptor::ValueRegister();
+}
+
+
+Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
+    Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) {
+  Label miss;
+
+  FrontendHeader(receiver(), name, &miss);
+  // Get the value from the cell.
+  Register result = StoreDescriptor::ValueRegister();
+  if (masm()->serializer_enabled()) {
+    __ mov(result, Immediate(cell));
+    __ mov(result, FieldOperand(result, PropertyCell::kValueOffset));
+  } else {
+    __ mov(result, Operand::ForCell(cell));
+  }
+
+  // Check for deleted property if property can actually be deleted.
+  if (is_configurable) {
+    __ cmp(result, factory()->the_hole_value());
+    __ j(equal, &miss);
+  } else if (FLAG_debug_code) {
+    __ cmp(result, factory()->the_hole_value());
+    __ Check(not_equal, kDontDeleteCellsCannotContainTheHole);
+  }
+
+  Counters* counters = isolate()->counters();
+  __ IncrementCounter(counters->named_load_global_stub(), 1);
+  // The code above already loads the result into the return register.
+  __ ret(0);
+
+  FrontendFooter(name, &miss);
+
+  // Return the generated code.
+  return GetCode(kind(), Code::NORMAL, name);
+}
+
+
+#undef __
+}
+}  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ic/ia32/ic-compiler-ia32.cc b/src/ic/ia32/ic-compiler-ia32.cc
new file mode 100644
index 0000000..ac42f30
--- /dev/null
+++ b/src/ic/ia32/ic-compiler-ia32.cc
@@ -0,0 +1,128 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_IA32
+
+#include "src/ic/ic.h"
+#include "src/ic/ic-compiler.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+void PropertyICCompiler::GenerateRuntimeSetProperty(MacroAssembler* masm,
+                                                    StrictMode strict_mode) {
+  // Return address is on the stack.
+  DCHECK(!ebx.is(StoreDescriptor::ReceiverRegister()) &&
+         !ebx.is(StoreDescriptor::NameRegister()) &&
+         !ebx.is(StoreDescriptor::ValueRegister()));
+  __ pop(ebx);
+  __ push(StoreDescriptor::ReceiverRegister());
+  __ push(StoreDescriptor::NameRegister());
+  __ push(StoreDescriptor::ValueRegister());
+  __ push(Immediate(Smi::FromInt(strict_mode)));
+  __ push(ebx);  // return address
+
+  // Do tail-call to runtime routine.
+  __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
+                                                    CodeHandleList* handlers,
+                                                    Handle<Name> name,
+                                                    Code::StubType type,
+                                                    IcCheckType check) {
+  Label miss;
+
+  if (check == PROPERTY &&
+      (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
+    // In case we are compiling an IC for dictionary loads and stores, just
+    // check whether the name is unique.
+    if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
+      Register tmp = scratch1();
+      __ JumpIfSmi(this->name(), &miss);
+      __ mov(tmp, FieldOperand(this->name(), HeapObject::kMapOffset));
+      __ movzx_b(tmp, FieldOperand(tmp, Map::kInstanceTypeOffset));
+      __ JumpIfNotUniqueNameInstanceType(tmp, &miss);
+    } else {
+      __ cmp(this->name(), Immediate(name));
+      __ j(not_equal, &miss);
+    }
+  }
+
+  Label number_case;
+  Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
+  __ JumpIfSmi(receiver(), smi_target);
+
+  // Polymorphic keyed stores may use the map register
+  Register map_reg = scratch1();
+  DCHECK(kind() != Code::KEYED_STORE_IC ||
+         map_reg.is(ElementTransitionAndStoreDescriptor::MapRegister()));
+  __ mov(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
+  int receiver_count = types->length();
+  int number_of_handled_maps = 0;
+  for (int current = 0; current < receiver_count; ++current) {
+    Handle<HeapType> type = types->at(current);
+    Handle<Map> map = IC::TypeToMap(*type, isolate());
+    if (!map->is_deprecated()) {
+      number_of_handled_maps++;
+      __ cmp(map_reg, map);
+      if (type->Is(HeapType::Number())) {
+        DCHECK(!number_case.is_unused());
+        __ bind(&number_case);
+      }
+      __ j(equal, handlers->at(current));
+    }
+  }
+  DCHECK(number_of_handled_maps != 0);
+
+  __ bind(&miss);
+  TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+  // Return the generated code.
+  InlineCacheState state =
+      number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
+  return GetCode(kind(), type, name, state);
+}
+
+
+Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
+    MapHandleList* receiver_maps, CodeHandleList* handler_stubs,
+    MapHandleList* transitioned_maps) {
+  Label miss;
+  __ JumpIfSmi(receiver(), &miss, Label::kNear);
+  __ mov(scratch1(), FieldOperand(receiver(), HeapObject::kMapOffset));
+  for (int i = 0; i < receiver_maps->length(); ++i) {
+    __ cmp(scratch1(), receiver_maps->at(i));
+    if (transitioned_maps->at(i).is_null()) {
+      __ j(equal, handler_stubs->at(i));
+    } else {
+      Label next_map;
+      __ j(not_equal, &next_map, Label::kNear);
+      __ mov(transition_map(), Immediate(transitioned_maps->at(i)));
+      __ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET);
+      __ bind(&next_map);
+    }
+  }
+  __ bind(&miss);
+  TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+  // Return the generated code.
+  return GetCode(kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
+}
+
+
+#undef __
+}
+}  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ic/ia32/ic-ia32.cc b/src/ic/ia32/ic-ia32.cc
new file mode 100644
index 0000000..67247d2
--- /dev/null
+++ b/src/ic/ia32/ic-ia32.cc
@@ -0,0 +1,984 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_IA32
+
+#include "src/codegen.h"
+#include "src/ic/ic.h"
+#include "src/ic/ic-compiler.h"
+#include "src/ic/stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+// ----------------------------------------------------------------------------
+// Static IC stub generators.
+//
+
+#define __ ACCESS_MASM(masm)
+
+
+static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
+                                            Label* global_object) {
+  // Register usage:
+  //   type: holds the receiver instance type on entry.
+  __ cmp(type, JS_GLOBAL_OBJECT_TYPE);
+  __ j(equal, global_object);
+  __ cmp(type, JS_BUILTINS_OBJECT_TYPE);
+  __ j(equal, global_object);
+  __ cmp(type, JS_GLOBAL_PROXY_TYPE);
+  __ j(equal, global_object);
+}
+
+
+// Helper function used to load a property from a dictionary backing
+// storage. This function may fail to load a property even though it is
+// in the dictionary, so code at miss_label must always call a backup
+// property load that is complete. This function is safe to call if
+// name is not internalized, and will jump to the miss_label in that
+// case. The generated code assumes that the receiver has slow
+// properties, is not a global object and does not have interceptors.
+static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
+                                   Register elements, Register name,
+                                   Register r0, Register r1, Register result) {
+  // Register use:
+  //
+  // elements - holds the property dictionary on entry and is unchanged.
+  //
+  // name - holds the name of the property on entry and is unchanged.
+  //
+  // Scratch registers:
+  //
+  // r0   - used for the index into the property dictionary
+  //
+  // r1   - used to hold the capacity of the property dictionary.
+  //
+  // result - holds the result on exit.
+
+  Label done;
+
+  // Probe the dictionary.
+  NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss_label, &done,
+                                                   elements, name, r0, r1);
+
+  // If probing finds an entry in the dictionary, r0 contains the
+  // index into the dictionary. Check that the value is a normal
+  // property.
+  __ bind(&done);
+  const int kElementsStartOffset =
+      NameDictionary::kHeaderSize +
+      NameDictionary::kElementsStartIndex * kPointerSize;
+  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+  __ test(Operand(elements, r0, times_4, kDetailsOffset - kHeapObjectTag),
+          Immediate(PropertyDetails::TypeField::kMask << kSmiTagSize));
+  __ j(not_zero, miss_label);
+
+  // Get the value at the masked, scaled index.
+  const int kValueOffset = kElementsStartOffset + kPointerSize;
+  __ mov(result, Operand(elements, r0, times_4, kValueOffset - kHeapObjectTag));
+}
+
+
+// Helper function used to store a property to a dictionary backing
+// storage. This function may fail to store a property eventhough it
+// is in the dictionary, so code at miss_label must always call a
+// backup property store that is complete. This function is safe to
+// call if name is not internalized, and will jump to the miss_label in
+// that case. The generated code assumes that the receiver has slow
+// properties, is not a global object and does not have interceptors.
+static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss_label,
+                                    Register elements, Register name,
+                                    Register value, Register r0, Register r1) {
+  // Register use:
+  //
+  // elements - holds the property dictionary on entry and is clobbered.
+  //
+  // name - holds the name of the property on entry and is unchanged.
+  //
+  // value - holds the value to store and is unchanged.
+  //
+  // r0 - used for index into the property dictionary and is clobbered.
+  //
+  // r1 - used to hold the capacity of the property dictionary and is clobbered.
+  Label done;
+
+
+  // Probe the dictionary.
+  NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss_label, &done,
+                                                   elements, name, r0, r1);
+
+  // If probing finds an entry in the dictionary, r0 contains the
+  // index into the dictionary. Check that the value is a normal
+  // property that is not read only.
+  __ bind(&done);
+  const int kElementsStartOffset =
+      NameDictionary::kHeaderSize +
+      NameDictionary::kElementsStartIndex * kPointerSize;
+  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+  const int kTypeAndReadOnlyMask =
+      (PropertyDetails::TypeField::kMask |
+       PropertyDetails::AttributesField::encode(READ_ONLY))
+      << kSmiTagSize;
+  __ test(Operand(elements, r0, times_4, kDetailsOffset - kHeapObjectTag),
+          Immediate(kTypeAndReadOnlyMask));
+  __ j(not_zero, miss_label);
+
+  // Store the value at the masked, scaled index.
+  const int kValueOffset = kElementsStartOffset + kPointerSize;
+  __ lea(r0, Operand(elements, r0, times_4, kValueOffset - kHeapObjectTag));
+  __ mov(Operand(r0, 0), value);
+
+  // Update write barrier. Make sure not to clobber the value.
+  __ mov(r1, value);
+  __ RecordWrite(elements, r0, r1, kDontSaveFPRegs);
+}
+
+
+// Checks the receiver for special cases (value type, slow case bits).
+// Falls through for regular JS object.
+static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
+                                           Register receiver, Register map,
+                                           int interceptor_bit, Label* slow) {
+  // Register use:
+  //   receiver - holds the receiver and is unchanged.
+  // Scratch registers:
+  //   map - used to hold the map of the receiver.
+
+  // Check that the object isn't a smi.
+  __ JumpIfSmi(receiver, slow);
+
+  // Get the map of the receiver.
+  __ mov(map, FieldOperand(receiver, HeapObject::kMapOffset));
+
+  // Check bit field.
+  __ test_b(FieldOperand(map, Map::kBitFieldOffset),
+            (1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit));
+  __ j(not_zero, slow);
+  // Check that the object is some kind of JS object EXCEPT JS Value type.
+  // In the case that the object is a value-wrapper object,
+  // we enter the runtime system to make sure that indexing
+  // into string objects works as intended.
+  DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
+
+  __ CmpInstanceType(map, JS_OBJECT_TYPE);
+  __ j(below, slow);
+}
+
+
+// Loads an indexed element from a fast case array.
+// If not_fast_array is NULL, doesn't perform the elements map check.
+static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
+                                  Register key, Register scratch,
+                                  Register result, Label* not_fast_array,
+                                  Label* out_of_range) {
+  // Register use:
+  //   receiver - holds the receiver and is unchanged.
+  //   key - holds the key and is unchanged (must be a smi).
+  // Scratch registers:
+  //   scratch - used to hold elements of the receiver and the loaded value.
+  //   result - holds the result on exit if the load succeeds and
+  //            we fall through.
+
+  __ mov(scratch, FieldOperand(receiver, JSObject::kElementsOffset));
+  if (not_fast_array != NULL) {
+    // Check that the object is in fast mode and writable.
+    __ CheckMap(scratch, masm->isolate()->factory()->fixed_array_map(),
+                not_fast_array, DONT_DO_SMI_CHECK);
+  } else {
+    __ AssertFastElements(scratch);
+  }
+  // Check that the key (index) is within bounds.
+  __ cmp(key, FieldOperand(scratch, FixedArray::kLengthOffset));
+  __ j(above_equal, out_of_range);
+  // Fast case: Do the load.
+  STATIC_ASSERT((kPointerSize == 4) && (kSmiTagSize == 1) && (kSmiTag == 0));
+  __ mov(scratch, FieldOperand(scratch, key, times_2, FixedArray::kHeaderSize));
+  __ cmp(scratch, Immediate(masm->isolate()->factory()->the_hole_value()));
+  // In case the loaded value is the_hole we have to consult GetProperty
+  // to ensure the prototype chain is searched.
+  __ j(equal, out_of_range);
+  if (!result.is(scratch)) {
+    __ mov(result, scratch);
+  }
+}
+
+
+// Checks whether a key is an array index string or a unique name.
+// Falls through if the key is a unique name.
+static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
+                                 Register map, Register hash,
+                                 Label* index_string, Label* not_unique) {
+  // Register use:
+  //   key - holds the key and is unchanged. Assumed to be non-smi.
+  // Scratch registers:
+  //   map - used to hold the map of the key.
+  //   hash - used to hold the hash of the key.
+  Label unique;
+  __ CmpObjectType(key, LAST_UNIQUE_NAME_TYPE, map);
+  __ j(above, not_unique);
+  STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
+  __ j(equal, &unique);
+
+  // Is the string an array index, with cached numeric value?
+  __ mov(hash, FieldOperand(key, Name::kHashFieldOffset));
+  __ test(hash, Immediate(Name::kContainsCachedArrayIndexMask));
+  __ j(zero, index_string);
+
+  // Is the string internalized? We already know it's a string so a single
+  // bit test is enough.
+  STATIC_ASSERT(kNotInternalizedTag != 0);
+  __ test_b(FieldOperand(map, Map::kInstanceTypeOffset),
+            kIsNotInternalizedMask);
+  __ j(not_zero, not_unique);
+
+  __ bind(&unique);
+}
+
+
+static Operand GenerateMappedArgumentsLookup(
+    MacroAssembler* masm, Register object, Register key, Register scratch1,
+    Register scratch2, Label* unmapped_case, Label* slow_case) {
+  Heap* heap = masm->isolate()->heap();
+  Factory* factory = masm->isolate()->factory();
+
+  // Check that the receiver is a JSObject. Because of the elements
+  // map check later, we do not need to check for interceptors or
+  // whether it requires access checks.
+  __ JumpIfSmi(object, slow_case);
+  // Check that the object is some kind of JSObject.
+  __ CmpObjectType(object, FIRST_JS_RECEIVER_TYPE, scratch1);
+  __ j(below, slow_case);
+
+  // Check that the key is a positive smi.
+  __ test(key, Immediate(0x80000001));
+  __ j(not_zero, slow_case);
+
+  // Load the elements into scratch1 and check its map.
+  Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
+  __ mov(scratch1, FieldOperand(object, JSObject::kElementsOffset));
+  __ CheckMap(scratch1, arguments_map, slow_case, DONT_DO_SMI_CHECK);
+
+  // Check if element is in the range of mapped arguments. If not, jump
+  // to the unmapped lookup with the parameter map in scratch1.
+  __ mov(scratch2, FieldOperand(scratch1, FixedArray::kLengthOffset));
+  __ sub(scratch2, Immediate(Smi::FromInt(2)));
+  __ cmp(key, scratch2);
+  __ j(above_equal, unmapped_case);
+
+  // Load element index and check whether it is the hole.
+  const int kHeaderSize = FixedArray::kHeaderSize + 2 * kPointerSize;
+  __ mov(scratch2,
+         FieldOperand(scratch1, key, times_half_pointer_size, kHeaderSize));
+  __ cmp(scratch2, factory->the_hole_value());
+  __ j(equal, unmapped_case);
+
+  // Load value from context and return it. We can reuse scratch1 because
+  // we do not jump to the unmapped lookup (which requires the parameter
+  // map in scratch1).
+  const int kContextOffset = FixedArray::kHeaderSize;
+  __ mov(scratch1, FieldOperand(scratch1, kContextOffset));
+  return FieldOperand(scratch1, scratch2, times_half_pointer_size,
+                      Context::kHeaderSize);
+}
+
+
+static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
+                                               Register key,
+                                               Register parameter_map,
+                                               Register scratch,
+                                               Label* slow_case) {
+  // Element is in arguments backing store, which is referenced by the
+  // second element of the parameter_map.
+  const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
+  Register backing_store = parameter_map;
+  __ mov(backing_store, FieldOperand(parameter_map, kBackingStoreOffset));
+  Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
+  __ CheckMap(backing_store, fixed_array_map, slow_case, DONT_DO_SMI_CHECK);
+  __ mov(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset));
+  __ cmp(key, scratch);
+  __ j(greater_equal, slow_case);
+  return FieldOperand(backing_store, key, times_half_pointer_size,
+                      FixedArray::kHeaderSize);
+}
+
+
+void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
+  // The return address is on the stack.
+  Label slow, check_name, index_smi, index_name, property_array_property;
+  Label probe_dictionary, check_number_dictionary;
+
+  Register receiver = LoadDescriptor::ReceiverRegister();
+  Register key = LoadDescriptor::NameRegister();
+  DCHECK(receiver.is(edx));
+  DCHECK(key.is(ecx));
+
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(key, &check_name);
+  __ bind(&index_smi);
+  // Now the key is known to be a smi. This place is also jumped to from
+  // where a numeric string is converted to a smi.
+
+  GenerateKeyedLoadReceiverCheck(masm, receiver, eax,
+                                 Map::kHasIndexedInterceptor, &slow);
+
+  // Check the receiver's map to see if it has fast elements.
+  __ CheckFastElements(eax, &check_number_dictionary);
+
+  GenerateFastArrayLoad(masm, receiver, key, eax, eax, NULL, &slow);
+  Isolate* isolate = masm->isolate();
+  Counters* counters = isolate->counters();
+  __ IncrementCounter(counters->keyed_load_generic_smi(), 1);
+  __ ret(0);
+
+  __ bind(&check_number_dictionary);
+  __ mov(ebx, key);
+  __ SmiUntag(ebx);
+  __ mov(eax, FieldOperand(receiver, JSObject::kElementsOffset));
+
+  // Check whether the elements is a number dictionary.
+  // ebx: untagged index
+  // eax: elements
+  __ CheckMap(eax, isolate->factory()->hash_table_map(), &slow,
+              DONT_DO_SMI_CHECK);
+  Label slow_pop_receiver;
+  // Push receiver on the stack to free up a register for the dictionary
+  // probing.
+  __ push(receiver);
+  __ LoadFromNumberDictionary(&slow_pop_receiver, eax, key, ebx, edx, edi, eax);
+  // Pop receiver before returning.
+  __ pop(receiver);
+  __ ret(0);
+
+  __ bind(&slow_pop_receiver);
+  // Pop the receiver from the stack and jump to runtime.
+  __ pop(receiver);
+
+  __ bind(&slow);
+  // Slow case: jump to runtime.
+  __ IncrementCounter(counters->keyed_load_generic_slow(), 1);
+  GenerateRuntimeGetProperty(masm);
+
+  __ bind(&check_name);
+  GenerateKeyNameCheck(masm, key, eax, ebx, &index_name, &slow);
+
+  GenerateKeyedLoadReceiverCheck(masm, receiver, eax, Map::kHasNamedInterceptor,
+                                 &slow);
+
+  // If the receiver is a fast-case object, check the keyed lookup
+  // cache. Otherwise probe the dictionary.
+  __ mov(ebx, FieldOperand(receiver, JSObject::kPropertiesOffset));
+  __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
+         Immediate(isolate->factory()->hash_table_map()));
+  __ j(equal, &probe_dictionary);
+
+  // The receiver's map is still in eax, compute the keyed lookup cache hash
+  // based on 32 bits of the map pointer and the string hash.
+  if (FLAG_debug_code) {
+    __ cmp(eax, FieldOperand(receiver, HeapObject::kMapOffset));
+    __ Check(equal, kMapIsNoLongerInEax);
+  }
+  __ mov(ebx, eax);  // Keep the map around for later.
+  __ shr(eax, KeyedLookupCache::kMapHashShift);
+  __ mov(edi, FieldOperand(key, String::kHashFieldOffset));
+  __ shr(edi, String::kHashShift);
+  __ xor_(eax, edi);
+  __ and_(eax, KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask);
+
+  // Load the key (consisting of map and internalized string) from the cache and
+  // check for match.
+  Label load_in_object_property;
+  static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
+  Label hit_on_nth_entry[kEntriesPerBucket];
+  ExternalReference cache_keys =
+      ExternalReference::keyed_lookup_cache_keys(masm->isolate());
+
+  for (int i = 0; i < kEntriesPerBucket - 1; i++) {
+    Label try_next_entry;
+    __ mov(edi, eax);
+    __ shl(edi, kPointerSizeLog2 + 1);
+    if (i != 0) {
+      __ add(edi, Immediate(kPointerSize * i * 2));
+    }
+    __ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys));
+    __ j(not_equal, &try_next_entry);
+    __ add(edi, Immediate(kPointerSize));
+    __ cmp(key, Operand::StaticArray(edi, times_1, cache_keys));
+    __ j(equal, &hit_on_nth_entry[i]);
+    __ bind(&try_next_entry);
+  }
+
+  __ lea(edi, Operand(eax, 1));
+  __ shl(edi, kPointerSizeLog2 + 1);
+  __ add(edi, Immediate(kPointerSize * (kEntriesPerBucket - 1) * 2));
+  __ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys));
+  __ j(not_equal, &slow);
+  __ add(edi, Immediate(kPointerSize));
+  __ cmp(key, Operand::StaticArray(edi, times_1, cache_keys));
+  __ j(not_equal, &slow);
+
+  // Get field offset.
+  // ebx      : receiver's map
+  // eax      : lookup cache index
+  ExternalReference cache_field_offsets =
+      ExternalReference::keyed_lookup_cache_field_offsets(masm->isolate());
+
+  // Hit on nth entry.
+  for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
+    __ bind(&hit_on_nth_entry[i]);
+    if (i != 0) {
+      __ add(eax, Immediate(i));
+    }
+    __ mov(edi,
+           Operand::StaticArray(eax, times_pointer_size, cache_field_offsets));
+    __ movzx_b(eax, FieldOperand(ebx, Map::kInObjectPropertiesOffset));
+    __ sub(edi, eax);
+    __ j(above_equal, &property_array_property);
+    if (i != 0) {
+      __ jmp(&load_in_object_property);
+    }
+  }
+
+  // Load in-object property.
+  __ bind(&load_in_object_property);
+  __ movzx_b(eax, FieldOperand(ebx, Map::kInstanceSizeOffset));
+  __ add(eax, edi);
+  __ mov(eax, FieldOperand(receiver, eax, times_pointer_size, 0));
+  __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
+  __ ret(0);
+
+  // Load property array property.
+  __ bind(&property_array_property);
+  __ mov(eax, FieldOperand(receiver, JSObject::kPropertiesOffset));
+  __ mov(eax,
+         FieldOperand(eax, edi, times_pointer_size, FixedArray::kHeaderSize));
+  __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
+  __ ret(0);
+
+  // Do a quick inline probe of the receiver's dictionary, if it
+  // exists.
+  __ bind(&probe_dictionary);
+
+  __ mov(eax, FieldOperand(receiver, JSObject::kMapOffset));
+  __ movzx_b(eax, FieldOperand(eax, Map::kInstanceTypeOffset));
+  GenerateGlobalInstanceTypeCheck(masm, eax, &slow);
+
+  GenerateDictionaryLoad(masm, &slow, ebx, key, eax, edi, eax);
+  __ IncrementCounter(counters->keyed_load_generic_symbol(), 1);
+  __ ret(0);
+
+  __ bind(&index_name);
+  __ IndexFromHash(ebx, key);
+  // Now jump to the place where smi keys are handled.
+  __ jmp(&index_smi);
+}
+
+
+void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
+  // Return address is on the stack.
+  Label miss;
+
+  Register receiver = LoadDescriptor::ReceiverRegister();
+  Register index = LoadDescriptor::NameRegister();
+  Register scratch = ebx;
+  DCHECK(!scratch.is(receiver) && !scratch.is(index));
+  Register result = eax;
+  DCHECK(!result.is(scratch));
+
+  StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
+                                          &miss,  // When not a string.
+                                          &miss,  // When not a number.
+                                          &miss,  // When index out of range.
+                                          STRING_INDEX_IS_ARRAY_INDEX);
+  char_at_generator.GenerateFast(masm);
+  __ ret(0);
+
+  StubRuntimeCallHelper call_helper;
+  char_at_generator.GenerateSlow(masm, call_helper);
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+
+void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
+  // Return address is on the stack.
+  Label slow, notin;
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  Register name = StoreDescriptor::NameRegister();
+  Register value = StoreDescriptor::ValueRegister();
+  DCHECK(receiver.is(edx));
+  DCHECK(name.is(ecx));
+  DCHECK(value.is(eax));
+
+  Operand mapped_location = GenerateMappedArgumentsLookup(
+      masm, receiver, name, ebx, edi, &notin, &slow);
+  __ mov(mapped_location, value);
+  __ lea(ecx, mapped_location);
+  __ mov(edx, value);
+  __ RecordWrite(ebx, ecx, edx, kDontSaveFPRegs);
+  __ Ret();
+  __ bind(&notin);
+  // The unmapped lookup expects that the parameter map is in ebx.
+  Operand unmapped_location =
+      GenerateUnmappedArgumentsLookup(masm, name, ebx, edi, &slow);
+  __ mov(unmapped_location, value);
+  __ lea(edi, unmapped_location);
+  __ mov(edx, value);
+  __ RecordWrite(ebx, edi, edx, kDontSaveFPRegs);
+  __ Ret();
+  __ bind(&slow);
+  GenerateMiss(masm);
+}
+
+
+static void KeyedStoreGenerateGenericHelper(
+    MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
+    KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length) {
+  Label transition_smi_elements;
+  Label finish_object_store, non_double_value, transition_double_elements;
+  Label fast_double_without_map_check;
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  Register key = StoreDescriptor::NameRegister();
+  Register value = StoreDescriptor::ValueRegister();
+  DCHECK(receiver.is(edx));
+  DCHECK(key.is(ecx));
+  DCHECK(value.is(eax));
+  // key is a smi.
+  // ebx: FixedArray receiver->elements
+  // edi: receiver map
+  // Fast case: Do the store, could either Object or double.
+  __ bind(fast_object);
+  if (check_map == kCheckMap) {
+    __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
+    __ cmp(edi, masm->isolate()->factory()->fixed_array_map());
+    __ j(not_equal, fast_double);
+  }
+
+  // HOLECHECK: guards "A[i] = V"
+  // We have to go to the runtime if the current value is the hole because
+  // there may be a callback on the element
+  Label holecheck_passed1;
+  __ cmp(FixedArrayElementOperand(ebx, key),
+         masm->isolate()->factory()->the_hole_value());
+  __ j(not_equal, &holecheck_passed1);
+  __ JumpIfDictionaryInPrototypeChain(receiver, ebx, edi, slow);
+  __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
+
+  __ bind(&holecheck_passed1);
+
+  // Smi stores don't require further checks.
+  Label non_smi_value;
+  __ JumpIfNotSmi(value, &non_smi_value);
+  if (increment_length == kIncrementLength) {
+    // Add 1 to receiver->length.
+    __ add(FieldOperand(receiver, JSArray::kLengthOffset),
+           Immediate(Smi::FromInt(1)));
+  }
+  // It's irrelevant whether array is smi-only or not when writing a smi.
+  __ mov(FixedArrayElementOperand(ebx, key), value);
+  __ ret(0);
+
+  __ bind(&non_smi_value);
+  // Escape to elements kind transition case.
+  __ mov(edi, FieldOperand(receiver, HeapObject::kMapOffset));
+  __ CheckFastObjectElements(edi, &transition_smi_elements);
+
+  // Fast elements array, store the value to the elements backing store.
+  __ bind(&finish_object_store);
+  if (increment_length == kIncrementLength) {
+    // Add 1 to receiver->length.
+    __ add(FieldOperand(receiver, JSArray::kLengthOffset),
+           Immediate(Smi::FromInt(1)));
+  }
+  __ mov(FixedArrayElementOperand(ebx, key), value);
+  // Update write barrier for the elements array address.
+  __ mov(edx, value);  // Preserve the value which is returned.
+  __ RecordWriteArray(ebx, edx, key, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+  __ ret(0);
+
+  __ bind(fast_double);
+  if (check_map == kCheckMap) {
+    // Check for fast double array case. If this fails, call through to the
+    // runtime.
+    __ cmp(edi, masm->isolate()->factory()->fixed_double_array_map());
+    __ j(not_equal, slow);
+    // If the value is a number, store it as a double in the FastDoubleElements
+    // array.
+  }
+
+  // HOLECHECK: guards "A[i] double hole?"
+  // We have to see if the double version of the hole is present. If so
+  // go to the runtime.
+  uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
+  __ cmp(FieldOperand(ebx, key, times_4, offset), Immediate(kHoleNanUpper32));
+  __ j(not_equal, &fast_double_without_map_check);
+  __ JumpIfDictionaryInPrototypeChain(receiver, ebx, edi, slow);
+  __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
+
+  __ bind(&fast_double_without_map_check);
+  __ StoreNumberToDoubleElements(value, ebx, key, edi, xmm0,
+                                 &transition_double_elements);
+  if (increment_length == kIncrementLength) {
+    // Add 1 to receiver->length.
+    __ add(FieldOperand(receiver, JSArray::kLengthOffset),
+           Immediate(Smi::FromInt(1)));
+  }
+  __ ret(0);
+
+  __ bind(&transition_smi_elements);
+  __ mov(ebx, FieldOperand(receiver, HeapObject::kMapOffset));
+
+  // Transition the array appropriately depending on the value type.
+  __ CheckMap(value, masm->isolate()->factory()->heap_number_map(),
+              &non_double_value, DONT_DO_SMI_CHECK);
+
+  // Value is a double. Transition FAST_SMI_ELEMENTS -> FAST_DOUBLE_ELEMENTS
+  // and complete the store.
+  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
+                                         FAST_DOUBLE_ELEMENTS, ebx, edi, slow);
+  AllocationSiteMode mode =
+      AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
+  ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
+                                                   ebx, mode, slow);
+  __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
+  __ jmp(&fast_double_without_map_check);
+
+  __ bind(&non_double_value);
+  // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
+  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS, ebx,
+                                         edi, slow);
+  mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
+  ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
+      masm, receiver, key, value, ebx, mode, slow);
+  __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
+  __ jmp(&finish_object_store);
+
+  __ bind(&transition_double_elements);
+  // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
+  // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
+  // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
+  __ mov(ebx, FieldOperand(receiver, HeapObject::kMapOffset));
+  __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
+                                         ebx, edi, slow);
+  mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
+  ElementsTransitionGenerator::GenerateDoubleToObject(masm, receiver, key,
+                                                      value, ebx, mode, slow);
+  __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
+  __ jmp(&finish_object_store);
+}
+
+
+void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
+                                   StrictMode strict_mode) {
+  // Return address is on the stack.
+  Label slow, fast_object, fast_object_grow;
+  Label fast_double, fast_double_grow;
+  Label array, extra, check_if_double_array;
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  Register key = StoreDescriptor::NameRegister();
+  DCHECK(receiver.is(edx));
+  DCHECK(key.is(ecx));
+
+  // Check that the object isn't a smi.
+  __ JumpIfSmi(receiver, &slow);
+  // Get the map from the receiver.
+  __ mov(edi, FieldOperand(receiver, HeapObject::kMapOffset));
+  // Check that the receiver does not require access checks and is not observed.
+  // The generic stub does not perform map checks or handle observed objects.
+  __ test_b(FieldOperand(edi, Map::kBitFieldOffset),
+            1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved);
+  __ j(not_zero, &slow);
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(key, &slow);
+  __ CmpInstanceType(edi, JS_ARRAY_TYPE);
+  __ j(equal, &array);
+  // Check that the object is some kind of JSObject.
+  __ CmpInstanceType(edi, FIRST_JS_OBJECT_TYPE);
+  __ j(below, &slow);
+
+  // Object case: Check key against length in the elements array.
+  // Key is a smi.
+  // edi: receiver map
+  __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
+  // Check array bounds. Both the key and the length of FixedArray are smis.
+  __ cmp(key, FieldOperand(ebx, FixedArray::kLengthOffset));
+  __ j(below, &fast_object);
+
+  // Slow case: call runtime.
+  __ bind(&slow);
+  PropertyICCompiler::GenerateRuntimeSetProperty(masm, strict_mode);
+
+  // Extra capacity case: Check if there is extra capacity to
+  // perform the store and update the length. Used for adding one
+  // element to the array by writing to array[array.length].
+  __ bind(&extra);
+  // receiver is a JSArray.
+  // key is a smi.
+  // ebx: receiver->elements, a FixedArray
+  // edi: receiver map
+  // flags: compare (key, receiver.length())
+  // do not leave holes in the array:
+  __ j(not_equal, &slow);
+  __ cmp(key, FieldOperand(ebx, FixedArray::kLengthOffset));
+  __ j(above_equal, &slow);
+  __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
+  __ cmp(edi, masm->isolate()->factory()->fixed_array_map());
+  __ j(not_equal, &check_if_double_array);
+  __ jmp(&fast_object_grow);
+
+  __ bind(&check_if_double_array);
+  __ cmp(edi, masm->isolate()->factory()->fixed_double_array_map());
+  __ j(not_equal, &slow);
+  __ jmp(&fast_double_grow);
+
+  // Array case: Get the length and the elements array from the JS
+  // array. Check that the array is in fast mode (and writable); if it
+  // is the length is always a smi.
+  __ bind(&array);
+  // receiver is a JSArray.
+  // key is a smi.
+  // edi: receiver map
+  __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
+
+  // Check the key against the length in the array and fall through to the
+  // common store code.
+  __ cmp(key, FieldOperand(receiver, JSArray::kLengthOffset));  // Compare smis.
+  __ j(above_equal, &extra);
+
+  KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double, &slow,
+                                  kCheckMap, kDontIncrementLength);
+  KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
+                                  &slow, kDontCheckMap, kIncrementLength);
+}
+
+
+void LoadIC::GenerateNormal(MacroAssembler* masm) {
+  Register dictionary = eax;
+  DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
+
+  Label slow;
+
+  __ mov(dictionary, FieldOperand(LoadDescriptor::ReceiverRegister(),
+                                  JSObject::kPropertiesOffset));
+  GenerateDictionaryLoad(masm, &slow, dictionary,
+                         LoadDescriptor::NameRegister(), edi, ebx, eax);
+  __ ret(0);
+
+  // Dictionary load failed, go slow (but don't miss).
+  __ bind(&slow);
+  GenerateRuntimeGetProperty(masm);
+}
+
+
+static void LoadIC_PushArgs(MacroAssembler* masm) {
+  Register receiver = LoadDescriptor::ReceiverRegister();
+  Register name = LoadDescriptor::NameRegister();
+  DCHECK(!ebx.is(receiver) && !ebx.is(name));
+
+  __ pop(ebx);
+  __ push(receiver);
+  __ push(name);
+  __ push(ebx);
+}
+
+
+void LoadIC::GenerateMiss(MacroAssembler* masm) {
+  // Return address is on the stack.
+  __ IncrementCounter(masm->isolate()->counters()->load_miss(), 1);
+
+  LoadIC_PushArgs(masm);
+
+  // Perform tail call to the entry.
+  ExternalReference ref =
+      ExternalReference(IC_Utility(kLoadIC_Miss), masm->isolate());
+  __ TailCallExternalReference(ref, 2, 1);
+}
+
+
+void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+  // Return address is on the stack.
+  LoadIC_PushArgs(masm);
+
+  // Perform tail call to the entry.
+  __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
+}
+
+
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
+  // Return address is on the stack.
+  __ IncrementCounter(masm->isolate()->counters()->keyed_load_miss(), 1);
+
+  LoadIC_PushArgs(masm);
+
+  // Perform tail call to the entry.
+  ExternalReference ref =
+      ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate());
+  __ TailCallExternalReference(ref, 2, 1);
+}
+
+
+void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+  // Return address is on the stack.
+  LoadIC_PushArgs(masm);
+
+  // Perform tail call to the entry.
+  __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
+}
+
+
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
+  // Return address is on the stack.
+  Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
+      Code::ComputeHandlerFlags(Code::STORE_IC));
+  masm->isolate()->stub_cache()->GenerateProbe(
+      masm, flags, false, StoreDescriptor::ReceiverRegister(),
+      StoreDescriptor::NameRegister(), ebx, no_reg);
+
+  // Cache miss: Jump to runtime.
+  GenerateMiss(masm);
+}
+
+
+static void StoreIC_PushArgs(MacroAssembler* masm) {
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  Register name = StoreDescriptor::NameRegister();
+  Register value = StoreDescriptor::ValueRegister();
+
+  DCHECK(!ebx.is(receiver) && !ebx.is(name) && !ebx.is(value));
+
+  __ pop(ebx);
+  __ push(receiver);
+  __ push(name);
+  __ push(value);
+  __ push(ebx);
+}
+
+
+void StoreIC::GenerateMiss(MacroAssembler* masm) {
+  // Return address is on the stack.
+  StoreIC_PushArgs(masm);
+
+  // Perform tail call to the entry.
+  ExternalReference ref =
+      ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
+  __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void StoreIC::GenerateNormal(MacroAssembler* masm) {
+  Label restore_miss;
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  Register name = StoreDescriptor::NameRegister();
+  Register value = StoreDescriptor::ValueRegister();
+  Register dictionary = ebx;
+
+  __ mov(dictionary, FieldOperand(receiver, JSObject::kPropertiesOffset));
+
+  // A lot of registers are needed for storing to slow case
+  // objects. Push and restore receiver but rely on
+  // GenerateDictionaryStore preserving the value and name.
+  __ push(receiver);
+  GenerateDictionaryStore(masm, &restore_miss, dictionary, name, value,
+                          receiver, edi);
+  __ Drop(1);
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(counters->store_normal_hit(), 1);
+  __ ret(0);
+
+  __ bind(&restore_miss);
+  __ pop(receiver);
+  __ IncrementCounter(counters->store_normal_miss(), 1);
+  GenerateMiss(masm);
+}
+
+
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
+  // Return address is on the stack.
+  StoreIC_PushArgs(masm);
+
+  // Do tail-call to runtime routine.
+  ExternalReference ref =
+      ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
+  __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+#undef __
+
+
+Condition CompareIC::ComputeCondition(Token::Value op) {
+  switch (op) {
+    case Token::EQ_STRICT:
+    case Token::EQ:
+      return equal;
+    case Token::LT:
+      return less;
+    case Token::GT:
+      return greater;
+    case Token::LTE:
+      return less_equal;
+    case Token::GTE:
+      return greater_equal;
+    default:
+      UNREACHABLE();
+      return no_condition;
+  }
+}
+
+
+bool CompareIC::HasInlinedSmiCode(Address address) {
+  // The address of the instruction following the call.
+  Address test_instruction_address =
+      address + Assembler::kCallTargetAddressOffset;
+
+  // If the instruction following the call is not a test al, nothing
+  // was inlined.
+  return *test_instruction_address == Assembler::kTestAlByte;
+}
+
+
+void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
+  // The address of the instruction following the call.
+  Address test_instruction_address =
+      address + Assembler::kCallTargetAddressOffset;
+
+  // If the instruction following the call is not a test al, nothing
+  // was inlined.
+  if (*test_instruction_address != Assembler::kTestAlByte) {
+    DCHECK(*test_instruction_address == Assembler::kNopByte);
+    return;
+  }
+
+  Address delta_address = test_instruction_address + 1;
+  // The delta to the start of the map check instruction and the
+  // condition code uses at the patched jump.
+  uint8_t delta = *reinterpret_cast<uint8_t*>(delta_address);
+  if (FLAG_trace_ic) {
+    PrintF("[  patching ic at %p, test=%p, delta=%d\n", address,
+           test_instruction_address, delta);
+  }
+
+  // Patch with a short conditional jump. Enabling means switching from a short
+  // jump-if-carry/not-carry to jump-if-zero/not-zero, whereas disabling is the
+  // reverse operation of that.
+  Address jmp_address = test_instruction_address - delta;
+  DCHECK((check == ENABLE_INLINED_SMI_CHECK)
+             ? (*jmp_address == Assembler::kJncShortOpcode ||
+                *jmp_address == Assembler::kJcShortOpcode)
+             : (*jmp_address == Assembler::kJnzShortOpcode ||
+                *jmp_address == Assembler::kJzShortOpcode));
+  Condition cc =
+      (check == ENABLE_INLINED_SMI_CHECK)
+          ? (*jmp_address == Assembler::kJncShortOpcode ? not_zero : zero)
+          : (*jmp_address == Assembler::kJnzShortOpcode ? not_carry : carry);
+  *jmp_address = static_cast<byte>(Assembler::kJccShortPrefix | cc);
+}
+}
+}  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ic/ia32/stub-cache-ia32.cc b/src/ic/ia32/stub-cache-ia32.cc
new file mode 100644
index 0000000..c1f7c9a
--- /dev/null
+++ b/src/ic/ia32/stub-cache-ia32.cc
@@ -0,0 +1,189 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_IA32
+
+#include "src/codegen.h"
+#include "src/ic/stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
+                       Code::Flags flags, bool leave_frame,
+                       StubCache::Table table, Register name, Register receiver,
+                       // Number of the cache entry pointer-size scaled.
+                       Register offset, Register extra) {
+  ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
+  ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
+  ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
+
+  Label miss;
+
+  // Multiply by 3 because there are 3 fields per entry (name, code, map).
+  __ lea(offset, Operand(offset, offset, times_2, 0));
+
+  if (extra.is_valid()) {
+    // Get the code entry from the cache.
+    __ mov(extra, Operand::StaticArray(offset, times_1, value_offset));
+
+    // Check that the key in the entry matches the name.
+    __ cmp(name, Operand::StaticArray(offset, times_1, key_offset));
+    __ j(not_equal, &miss);
+
+    // Check the map matches.
+    __ mov(offset, Operand::StaticArray(offset, times_1, map_offset));
+    __ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset));
+    __ j(not_equal, &miss);
+
+    // Check that the flags match what we're looking for.
+    __ mov(offset, FieldOperand(extra, Code::kFlagsOffset));
+    __ and_(offset, ~Code::kFlagsNotUsedInLookup);
+    __ cmp(offset, flags);
+    __ j(not_equal, &miss);
+
+#ifdef DEBUG
+    if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
+      __ jmp(&miss);
+    } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
+      __ jmp(&miss);
+    }
+#endif
+
+    if (leave_frame) __ leave();
+
+    // Jump to the first instruction in the code stub.
+    __ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
+    __ jmp(extra);
+
+    __ bind(&miss);
+  } else {
+    // Save the offset on the stack.
+    __ push(offset);
+
+    // Check that the key in the entry matches the name.
+    __ cmp(name, Operand::StaticArray(offset, times_1, key_offset));
+    __ j(not_equal, &miss);
+
+    // Check the map matches.
+    __ mov(offset, Operand::StaticArray(offset, times_1, map_offset));
+    __ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset));
+    __ j(not_equal, &miss);
+
+    // Restore offset register.
+    __ mov(offset, Operand(esp, 0));
+
+    // Get the code entry from the cache.
+    __ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
+
+    // Check that the flags match what we're looking for.
+    __ mov(offset, FieldOperand(offset, Code::kFlagsOffset));
+    __ and_(offset, ~Code::kFlagsNotUsedInLookup);
+    __ cmp(offset, flags);
+    __ j(not_equal, &miss);
+
+#ifdef DEBUG
+    if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
+      __ jmp(&miss);
+    } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
+      __ jmp(&miss);
+    }
+#endif
+
+    // Restore offset and re-load code entry from cache.
+    __ pop(offset);
+    __ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
+
+    if (leave_frame) __ leave();
+
+    // Jump to the first instruction in the code stub.
+    __ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag));
+    __ jmp(offset);
+
+    // Pop at miss.
+    __ bind(&miss);
+    __ pop(offset);
+  }
+}
+
+
+void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
+                              bool leave_frame, Register receiver,
+                              Register name, Register scratch, Register extra,
+                              Register extra2, Register extra3) {
+  Label miss;
+
+  // Assert that code is valid.  The multiplying code relies on the entry size
+  // being 12.
+  DCHECK(sizeof(Entry) == 12);
+
+  // Assert the flags do not name a specific type.
+  DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
+
+  // Assert that there are no register conflicts.
+  DCHECK(!scratch.is(receiver));
+  DCHECK(!scratch.is(name));
+  DCHECK(!extra.is(receiver));
+  DCHECK(!extra.is(name));
+  DCHECK(!extra.is(scratch));
+
+  // Assert scratch and extra registers are valid, and extra2/3 are unused.
+  DCHECK(!scratch.is(no_reg));
+  DCHECK(extra2.is(no_reg));
+  DCHECK(extra3.is(no_reg));
+
+  Register offset = scratch;
+  scratch = no_reg;
+
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1);
+
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(receiver, &miss);
+
+  // Get the map of the receiver and compute the hash.
+  __ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
+  __ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
+  __ xor_(offset, flags);
+  // We mask out the last two bits because they are not part of the hash and
+  // they are always 01 for maps.  Also in the two 'and' instructions below.
+  __ and_(offset, (kPrimaryTableSize - 1) << kCacheIndexShift);
+  // ProbeTable expects the offset to be pointer scaled, which it is, because
+  // the heap object tag size is 2 and the pointer size log 2 is also 2.
+  DCHECK(kCacheIndexShift == kPointerSizeLog2);
+
+  // Probe the primary table.
+  ProbeTable(isolate(), masm, flags, leave_frame, kPrimary, name, receiver,
+             offset, extra);
+
+  // Primary miss: Compute hash for secondary probe.
+  __ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
+  __ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
+  __ xor_(offset, flags);
+  __ and_(offset, (kPrimaryTableSize - 1) << kCacheIndexShift);
+  __ sub(offset, name);
+  __ add(offset, Immediate(flags));
+  __ and_(offset, (kSecondaryTableSize - 1) << kCacheIndexShift);
+
+  // Probe the secondary table.
+  ProbeTable(isolate(), masm, flags, leave_frame, kSecondary, name, receiver,
+             offset, extra);
+
+  // Cache miss: Fall-through and let caller handle the miss by
+  // entering the runtime system.
+  __ bind(&miss);
+  __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1);
+}
+
+
+#undef __
+}
+}  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_IA32
diff --git a/src/ic/ic-compiler.cc b/src/ic/ic-compiler.cc
new file mode 100644
index 0000000..aeae4ba
--- /dev/null
+++ b/src/ic/ic-compiler.cc
@@ -0,0 +1,447 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/ic/handler-compiler.h"
+#include "src/ic/ic-inl.h"
+#include "src/ic/ic-compiler.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+Handle<Code> PropertyICCompiler::Find(Handle<Name> name,
+                                      Handle<Map> stub_holder, Code::Kind kind,
+                                      ExtraICState extra_state,
+                                      CacheHolderFlag cache_holder) {
+  Code::Flags flags =
+      Code::ComputeMonomorphicFlags(kind, extra_state, cache_holder);
+  Object* probe = stub_holder->FindInCodeCache(*name, flags);
+  if (probe->IsCode()) return handle(Code::cast(probe));
+  return Handle<Code>::null();
+}
+
+
+bool PropertyICCompiler::IncludesNumberType(TypeHandleList* types) {
+  for (int i = 0; i < types->length(); ++i) {
+    if (types->at(i)->Is(HeapType::Number())) return true;
+  }
+  return false;
+}
+
+
+Handle<Code> PropertyICCompiler::CompileMonomorphic(Handle<HeapType> type,
+                                                    Handle<Code> handler,
+                                                    Handle<Name> name,
+                                                    IcCheckType check) {
+  TypeHandleList types(1);
+  CodeHandleList handlers(1);
+  types.Add(type);
+  handlers.Add(handler);
+  Code::StubType stub_type = handler->type();
+  return CompilePolymorphic(&types, &handlers, name, stub_type, check);
+}
+
+
+Handle<Code> PropertyICCompiler::ComputeMonomorphic(
+    Code::Kind kind, Handle<Name> name, Handle<HeapType> type,
+    Handle<Code> handler, ExtraICState extra_ic_state) {
+  Isolate* isolate = name->GetIsolate();
+  if (handler.is_identical_to(isolate->builtins()->LoadIC_Normal()) ||
+      handler.is_identical_to(isolate->builtins()->StoreIC_Normal())) {
+    name = isolate->factory()->normal_ic_symbol();
+  }
+
+  CacheHolderFlag flag;
+  Handle<Map> stub_holder = IC::GetICCacheHolder(*type, isolate, &flag);
+
+  Handle<Code> ic;
+  // There are multiple string maps that all use the same prototype. That
+  // prototype cannot hold multiple handlers, one for each of the string maps,
+  // for a single name. Hence, turn off caching of the IC.
+  bool can_be_cached = !type->Is(HeapType::String());
+  if (can_be_cached) {
+    ic = Find(name, stub_holder, kind, extra_ic_state, flag);
+    if (!ic.is_null()) return ic;
+  }
+
+#ifdef DEBUG
+  if (kind == Code::KEYED_STORE_IC) {
+    DCHECK(STANDARD_STORE ==
+           KeyedStoreIC::GetKeyedAccessStoreMode(extra_ic_state));
+  }
+#endif
+
+  PropertyICCompiler ic_compiler(isolate, kind, extra_ic_state, flag);
+  ic = ic_compiler.CompileMonomorphic(type, handler, name, PROPERTY);
+
+  if (can_be_cached) Map::UpdateCodeCache(stub_holder, name, ic);
+  return ic;
+}
+
+
+Handle<Code> PropertyICCompiler::ComputeKeyedLoadMonomorphic(
+    Handle<Map> receiver_map) {
+  Isolate* isolate = receiver_map->GetIsolate();
+  Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC);
+  Handle<Name> name = isolate->factory()->KeyedLoadMonomorphic_string();
+
+  Handle<Object> probe(receiver_map->FindInCodeCache(*name, flags), isolate);
+  if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+  ElementsKind elements_kind = receiver_map->elements_kind();
+  Handle<Code> stub;
+  if (receiver_map->has_indexed_interceptor()) {
+    stub = LoadIndexedInterceptorStub(isolate).GetCode();
+  } else if (receiver_map->has_sloppy_arguments_elements()) {
+    stub = KeyedLoadSloppyArgumentsStub(isolate).GetCode();
+  } else if (receiver_map->has_fast_elements() ||
+             receiver_map->has_external_array_elements() ||
+             receiver_map->has_fixed_typed_array_elements()) {
+    stub = LoadFastElementStub(isolate,
+                               receiver_map->instance_type() == JS_ARRAY_TYPE,
+                               elements_kind).GetCode();
+  } else {
+    stub = LoadDictionaryElementStub(isolate).GetCode();
+  }
+  PropertyICCompiler compiler(isolate, Code::KEYED_LOAD_IC);
+  Handle<Code> code =
+      compiler.CompileMonomorphic(HeapType::Class(receiver_map, isolate), stub,
+                                  isolate->factory()->empty_string(), ELEMENT);
+
+  Map::UpdateCodeCache(receiver_map, name, code);
+  return code;
+}
+
+
+Handle<Code> PropertyICCompiler::ComputeKeyedStoreMonomorphic(
+    Handle<Map> receiver_map, StrictMode strict_mode,
+    KeyedAccessStoreMode store_mode) {
+  Isolate* isolate = receiver_map->GetIsolate();
+  ExtraICState extra_state =
+      KeyedStoreIC::ComputeExtraICState(strict_mode, store_mode);
+  Code::Flags flags =
+      Code::ComputeMonomorphicFlags(Code::KEYED_STORE_IC, extra_state);
+
+  DCHECK(store_mode == STANDARD_STORE ||
+         store_mode == STORE_AND_GROW_NO_TRANSITION ||
+         store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
+         store_mode == STORE_NO_TRANSITION_HANDLE_COW);
+
+  Handle<String> name = isolate->factory()->KeyedStoreMonomorphic_string();
+  Handle<Object> probe(receiver_map->FindInCodeCache(*name, flags), isolate);
+  if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+  PropertyICCompiler compiler(isolate, Code::KEYED_STORE_IC, extra_state);
+  Handle<Code> code =
+      compiler.CompileKeyedStoreMonomorphic(receiver_map, store_mode);
+
+  Map::UpdateCodeCache(receiver_map, name, code);
+  DCHECK(KeyedStoreIC::GetKeyedAccessStoreMode(code->extra_ic_state()) ==
+         store_mode);
+  return code;
+}
+
+
+Code* PropertyICCompiler::FindPreMonomorphic(Isolate* isolate, Code::Kind kind,
+                                             ExtraICState state) {
+  Code::Flags flags = Code::ComputeFlags(kind, PREMONOMORPHIC, state);
+  UnseededNumberDictionary* dictionary =
+      isolate->heap()->non_monomorphic_cache();
+  int entry = dictionary->FindEntry(isolate, flags);
+  DCHECK(entry != -1);
+  Object* code = dictionary->ValueAt(entry);
+  // This might be called during the marking phase of the collector
+  // hence the unchecked cast.
+  return reinterpret_cast<Code*>(code);
+}
+
+
+static void FillCache(Isolate* isolate, Handle<Code> code) {
+  Handle<UnseededNumberDictionary> dictionary = UnseededNumberDictionary::Set(
+      isolate->factory()->non_monomorphic_cache(), code->flags(), code);
+  isolate->heap()->public_set_non_monomorphic_cache(*dictionary);
+}
+
+
+Handle<Code> PropertyICCompiler::ComputeLoad(Isolate* isolate,
+                                             InlineCacheState ic_state,
+                                             ExtraICState extra_state) {
+  Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC, ic_state, extra_state);
+  Handle<UnseededNumberDictionary> cache =
+      isolate->factory()->non_monomorphic_cache();
+  int entry = cache->FindEntry(isolate, flags);
+  if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
+
+  PropertyICCompiler compiler(isolate, Code::LOAD_IC);
+  Handle<Code> code;
+  if (ic_state == UNINITIALIZED) {
+    code = compiler.CompileLoadInitialize(flags);
+  } else if (ic_state == PREMONOMORPHIC) {
+    code = compiler.CompileLoadPreMonomorphic(flags);
+  } else {
+    UNREACHABLE();
+  }
+  FillCache(isolate, code);
+  return code;
+}
+
+
+Handle<Code> PropertyICCompiler::ComputeStore(Isolate* isolate,
+                                              InlineCacheState ic_state,
+                                              ExtraICState extra_state) {
+  Code::Flags flags = Code::ComputeFlags(Code::STORE_IC, ic_state, extra_state);
+  Handle<UnseededNumberDictionary> cache =
+      isolate->factory()->non_monomorphic_cache();
+  int entry = cache->FindEntry(isolate, flags);
+  if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
+
+  PropertyICCompiler compiler(isolate, Code::STORE_IC);
+  Handle<Code> code;
+  if (ic_state == UNINITIALIZED) {
+    code = compiler.CompileStoreInitialize(flags);
+  } else if (ic_state == PREMONOMORPHIC) {
+    code = compiler.CompileStorePreMonomorphic(flags);
+  } else if (ic_state == GENERIC) {
+    code = compiler.CompileStoreGeneric(flags);
+  } else if (ic_state == MEGAMORPHIC) {
+    code = compiler.CompileStoreMegamorphic(flags);
+  } else {
+    UNREACHABLE();
+  }
+
+  FillCache(isolate, code);
+  return code;
+}
+
+
+Handle<Code> PropertyICCompiler::ComputeCompareNil(Handle<Map> receiver_map,
+                                                   CompareNilICStub* stub) {
+  Isolate* isolate = receiver_map->GetIsolate();
+  Handle<String> name(isolate->heap()->empty_string());
+  if (!receiver_map->is_dictionary_map()) {
+    Handle<Code> cached_ic =
+        Find(name, receiver_map, Code::COMPARE_NIL_IC, stub->GetExtraICState());
+    if (!cached_ic.is_null()) return cached_ic;
+  }
+
+  Code::FindAndReplacePattern pattern;
+  pattern.Add(isolate->factory()->meta_map(), receiver_map);
+  Handle<Code> ic = stub->GetCodeCopy(pattern);
+
+  if (!receiver_map->is_dictionary_map()) {
+    Map::UpdateCodeCache(receiver_map, name, ic);
+  }
+
+  return ic;
+}
+
+
+// TODO(verwaest): Change this method so it takes in a TypeHandleList.
+Handle<Code> PropertyICCompiler::ComputeKeyedLoadPolymorphic(
+    MapHandleList* receiver_maps) {
+  Isolate* isolate = receiver_maps->at(0)->GetIsolate();
+  Code::Flags flags = Code::ComputeFlags(Code::KEYED_LOAD_IC, POLYMORPHIC);
+  Handle<PolymorphicCodeCache> cache =
+      isolate->factory()->polymorphic_code_cache();
+  Handle<Object> probe = cache->Lookup(receiver_maps, flags);
+  if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+  TypeHandleList types(receiver_maps->length());
+  for (int i = 0; i < receiver_maps->length(); i++) {
+    types.Add(HeapType::Class(receiver_maps->at(i), isolate));
+  }
+  CodeHandleList handlers(receiver_maps->length());
+  ElementHandlerCompiler compiler(isolate);
+  compiler.CompileElementHandlers(receiver_maps, &handlers);
+  PropertyICCompiler ic_compiler(isolate, Code::KEYED_LOAD_IC);
+  Handle<Code> code = ic_compiler.CompilePolymorphic(
+      &types, &handlers, isolate->factory()->empty_string(), Code::NORMAL,
+      ELEMENT);
+
+  isolate->counters()->keyed_load_polymorphic_stubs()->Increment();
+
+  PolymorphicCodeCache::Update(cache, receiver_maps, flags, code);
+  return code;
+}
+
+
+Handle<Code> PropertyICCompiler::ComputePolymorphic(
+    Code::Kind kind, TypeHandleList* types, CodeHandleList* handlers,
+    int valid_types, Handle<Name> name, ExtraICState extra_ic_state) {
+  Handle<Code> handler = handlers->at(0);
+  Code::StubType type = valid_types == 1 ? handler->type() : Code::NORMAL;
+  DCHECK(kind == Code::LOAD_IC || kind == Code::STORE_IC);
+  PropertyICCompiler ic_compiler(name->GetIsolate(), kind, extra_ic_state);
+  return ic_compiler.CompilePolymorphic(types, handlers, name, type, PROPERTY);
+}
+
+
+Handle<Code> PropertyICCompiler::ComputeKeyedStorePolymorphic(
+    MapHandleList* receiver_maps, KeyedAccessStoreMode store_mode,
+    StrictMode strict_mode) {
+  Isolate* isolate = receiver_maps->at(0)->GetIsolate();
+  DCHECK(store_mode == STANDARD_STORE ||
+         store_mode == STORE_AND_GROW_NO_TRANSITION ||
+         store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
+         store_mode == STORE_NO_TRANSITION_HANDLE_COW);
+  Handle<PolymorphicCodeCache> cache =
+      isolate->factory()->polymorphic_code_cache();
+  ExtraICState extra_state =
+      KeyedStoreIC::ComputeExtraICState(strict_mode, store_mode);
+  Code::Flags flags =
+      Code::ComputeFlags(Code::KEYED_STORE_IC, POLYMORPHIC, extra_state);
+  Handle<Object> probe = cache->Lookup(receiver_maps, flags);
+  if (probe->IsCode()) return Handle<Code>::cast(probe);
+
+  PropertyICCompiler compiler(isolate, Code::KEYED_STORE_IC, extra_state);
+  Handle<Code> code =
+      compiler.CompileKeyedStorePolymorphic(receiver_maps, store_mode);
+  PolymorphicCodeCache::Update(cache, receiver_maps, flags, code);
+  return code;
+}
+
+
+Handle<Code> PropertyICCompiler::CompileLoadInitialize(Code::Flags flags) {
+  LoadIC::GenerateInitialize(masm());
+  Handle<Code> code = GetCodeWithFlags(flags, "CompileLoadInitialize");
+  PROFILE(isolate(), CodeCreateEvent(Logger::LOAD_INITIALIZE_TAG, *code, 0));
+  return code;
+}
+
+
+Handle<Code> PropertyICCompiler::CompileLoadPreMonomorphic(Code::Flags flags) {
+  LoadIC::GeneratePreMonomorphic(masm());
+  Handle<Code> code = GetCodeWithFlags(flags, "CompileLoadPreMonomorphic");
+  PROFILE(isolate(),
+          CodeCreateEvent(Logger::LOAD_PREMONOMORPHIC_TAG, *code, 0));
+  return code;
+}
+
+
+Handle<Code> PropertyICCompiler::CompileStoreInitialize(Code::Flags flags) {
+  StoreIC::GenerateInitialize(masm());
+  Handle<Code> code = GetCodeWithFlags(flags, "CompileStoreInitialize");
+  PROFILE(isolate(), CodeCreateEvent(Logger::STORE_INITIALIZE_TAG, *code, 0));
+  return code;
+}
+
+
+Handle<Code> PropertyICCompiler::CompileStorePreMonomorphic(Code::Flags flags) {
+  StoreIC::GeneratePreMonomorphic(masm());
+  Handle<Code> code = GetCodeWithFlags(flags, "CompileStorePreMonomorphic");
+  PROFILE(isolate(),
+          CodeCreateEvent(Logger::STORE_PREMONOMORPHIC_TAG, *code, 0));
+  return code;
+}
+
+
+Handle<Code> PropertyICCompiler::CompileStoreGeneric(Code::Flags flags) {
+  ExtraICState extra_state = Code::ExtractExtraICStateFromFlags(flags);
+  StrictMode strict_mode = StoreIC::GetStrictMode(extra_state);
+  GenerateRuntimeSetProperty(masm(), strict_mode);
+  Handle<Code> code = GetCodeWithFlags(flags, "CompileStoreGeneric");
+  PROFILE(isolate(), CodeCreateEvent(Logger::STORE_GENERIC_TAG, *code, 0));
+  return code;
+}
+
+
+Handle<Code> PropertyICCompiler::CompileStoreMegamorphic(Code::Flags flags) {
+  StoreIC::GenerateMegamorphic(masm());
+  Handle<Code> code = GetCodeWithFlags(flags, "CompileStoreMegamorphic");
+  PROFILE(isolate(), CodeCreateEvent(Logger::STORE_MEGAMORPHIC_TAG, *code, 0));
+  return code;
+}
+
+
+Handle<Code> PropertyICCompiler::GetCode(Code::Kind kind, Code::StubType type,
+                                         Handle<Name> name,
+                                         InlineCacheState state) {
+  Code::Flags flags =
+      Code::ComputeFlags(kind, state, extra_ic_state_, type, cache_holder());
+  Handle<Code> code = GetCodeWithFlags(flags, name);
+  IC::RegisterWeakMapDependency(code);
+  PROFILE(isolate(), CodeCreateEvent(log_kind(code), *code, *name));
+  return code;
+}
+
+
+Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
+    MapHandleList* receiver_maps, KeyedAccessStoreMode store_mode) {
+  // Collect MONOMORPHIC stubs for all |receiver_maps|.
+  CodeHandleList handlers(receiver_maps->length());
+  MapHandleList transitioned_maps(receiver_maps->length());
+  for (int i = 0; i < receiver_maps->length(); ++i) {
+    Handle<Map> receiver_map(receiver_maps->at(i));
+    Handle<Code> cached_stub;
+    Handle<Map> transitioned_map =
+        receiver_map->FindTransitionedMap(receiver_maps);
+
+    // TODO(mvstanton): The code below is doing pessimistic elements
+    // transitions. I would like to stop doing that and rely on Allocation Site
+    // Tracking to do a better job of ensuring the data types are what they need
+    // to be. Not all the elements are in place yet, pessimistic elements
+    // transitions are still important for performance.
+    bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
+    ElementsKind elements_kind = receiver_map->elements_kind();
+    if (!transitioned_map.is_null()) {
+      cached_stub =
+          ElementsTransitionAndStoreStub(isolate(), elements_kind,
+                                         transitioned_map->elements_kind(),
+                                         is_js_array, store_mode).GetCode();
+    } else if (receiver_map->instance_type() < FIRST_JS_RECEIVER_TYPE) {
+      cached_stub = isolate()->builtins()->KeyedStoreIC_Slow();
+    } else {
+      if (receiver_map->has_fast_elements() ||
+          receiver_map->has_external_array_elements() ||
+          receiver_map->has_fixed_typed_array_elements()) {
+        cached_stub = StoreFastElementStub(isolate(), is_js_array,
+                                           elements_kind, store_mode).GetCode();
+      } else {
+        cached_stub = StoreElementStub(isolate(), elements_kind).GetCode();
+      }
+    }
+    DCHECK(!cached_stub.is_null());
+    handlers.Add(cached_stub);
+    transitioned_maps.Add(transitioned_map);
+  }
+
+  Handle<Code> code = CompileKeyedStorePolymorphic(receiver_maps, &handlers,
+                                                   &transitioned_maps);
+  isolate()->counters()->keyed_store_polymorphic_stubs()->Increment();
+  PROFILE(isolate(), CodeCreateEvent(log_kind(code), *code, 0));
+  return code;
+}
+
+
+#define __ ACCESS_MASM(masm())
+
+
+Handle<Code> PropertyICCompiler::CompileKeyedStoreMonomorphic(
+    Handle<Map> receiver_map, KeyedAccessStoreMode store_mode) {
+  ElementsKind elements_kind = receiver_map->elements_kind();
+  bool is_jsarray = receiver_map->instance_type() == JS_ARRAY_TYPE;
+  Handle<Code> stub;
+  if (receiver_map->has_fast_elements() ||
+      receiver_map->has_external_array_elements() ||
+      receiver_map->has_fixed_typed_array_elements()) {
+    stub = StoreFastElementStub(isolate(), is_jsarray, elements_kind,
+                                store_mode).GetCode();
+  } else {
+    stub = StoreElementStub(isolate(), elements_kind).GetCode();
+  }
+
+  __ DispatchMap(receiver(), scratch1(), receiver_map, stub, DO_SMI_CHECK);
+
+  TailCallBuiltin(masm(), Builtins::kKeyedStoreIC_Miss);
+
+  return GetCode(kind(), Code::NORMAL, factory()->empty_string());
+}
+
+
+#undef __
+}
+}  // namespace v8::internal
diff --git a/src/ic/ic-compiler.h b/src/ic/ic-compiler.h
new file mode 100644
index 0000000..3b12157
--- /dev/null
+++ b/src/ic/ic-compiler.h
@@ -0,0 +1,125 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_IC_IC_COMPILER_H_
+#define V8_IC_IC_COMPILER_H_
+
+#include "src/ic/access-compiler.h"
+
+namespace v8 {
+namespace internal {
+
+
+enum IcCheckType { ELEMENT, PROPERTY };
+
+
+class PropertyICCompiler : public PropertyAccessCompiler {
+ public:
+  // Finds the Code object stored in the Heap::non_monomorphic_cache().
+  static Code* FindPreMonomorphic(Isolate* isolate, Code::Kind kind,
+                                  ExtraICState extra_ic_state);
+
+  // Named
+  static Handle<Code> ComputeLoad(Isolate* isolate, InlineCacheState ic_state,
+                                  ExtraICState extra_state);
+  static Handle<Code> ComputeStore(Isolate* isolate, InlineCacheState ic_state,
+                                   ExtraICState extra_state);
+
+  static Handle<Code> ComputeMonomorphic(Code::Kind kind, Handle<Name> name,
+                                         Handle<HeapType> type,
+                                         Handle<Code> handler,
+                                         ExtraICState extra_ic_state);
+  static Handle<Code> ComputePolymorphic(Code::Kind kind, TypeHandleList* types,
+                                         CodeHandleList* handlers,
+                                         int number_of_valid_maps,
+                                         Handle<Name> name,
+                                         ExtraICState extra_ic_state);
+
+  // Keyed
+  static Handle<Code> ComputeKeyedLoadMonomorphic(Handle<Map> receiver_map);
+
+  static Handle<Code> ComputeKeyedStoreMonomorphic(
+      Handle<Map> receiver_map, StrictMode strict_mode,
+      KeyedAccessStoreMode store_mode);
+  static Handle<Code> ComputeKeyedLoadPolymorphic(MapHandleList* receiver_maps);
+  static Handle<Code> ComputeKeyedStorePolymorphic(
+      MapHandleList* receiver_maps, KeyedAccessStoreMode store_mode,
+      StrictMode strict_mode);
+
+  // Compare nil
+  static Handle<Code> ComputeCompareNil(Handle<Map> receiver_map,
+                                        CompareNilICStub* stub);
+
+  // Helpers
+  // TODO(verwaest): Move all uses of these helpers to the PropertyICCompiler
+  // and make the helpers private.
+  static void GenerateRuntimeSetProperty(MacroAssembler* masm,
+                                         StrictMode strict_mode);
+
+
+ private:
+  PropertyICCompiler(Isolate* isolate, Code::Kind kind,
+                     ExtraICState extra_ic_state = kNoExtraICState,
+                     CacheHolderFlag cache_holder = kCacheOnReceiver)
+      : PropertyAccessCompiler(isolate, kind, cache_holder),
+        extra_ic_state_(extra_ic_state) {}
+
+  static Handle<Code> Find(Handle<Name> name, Handle<Map> stub_holder_map,
+                           Code::Kind kind,
+                           ExtraICState extra_ic_state = kNoExtraICState,
+                           CacheHolderFlag cache_holder = kCacheOnReceiver);
+
+  Handle<Code> CompileLoadInitialize(Code::Flags flags);
+  Handle<Code> CompileLoadPreMonomorphic(Code::Flags flags);
+  Handle<Code> CompileStoreInitialize(Code::Flags flags);
+  Handle<Code> CompileStorePreMonomorphic(Code::Flags flags);
+  Handle<Code> CompileStoreGeneric(Code::Flags flags);
+  Handle<Code> CompileStoreMegamorphic(Code::Flags flags);
+
+  Handle<Code> CompileMonomorphic(Handle<HeapType> type, Handle<Code> handler,
+                                  Handle<Name> name, IcCheckType check);
+  Handle<Code> CompilePolymorphic(TypeHandleList* types,
+                                  CodeHandleList* handlers, Handle<Name> name,
+                                  Code::StubType type, IcCheckType check);
+
+  Handle<Code> CompileKeyedStoreMonomorphic(Handle<Map> receiver_map,
+                                            KeyedAccessStoreMode store_mode);
+  Handle<Code> CompileKeyedStorePolymorphic(MapHandleList* receiver_maps,
+                                            KeyedAccessStoreMode store_mode);
+  Handle<Code> CompileKeyedStorePolymorphic(MapHandleList* receiver_maps,
+                                            CodeHandleList* handler_stubs,
+                                            MapHandleList* transitioned_maps);
+
+  bool IncludesNumberType(TypeHandleList* types);
+
+  Handle<Code> GetCode(Code::Kind kind, Code::StubType type, Handle<Name> name,
+                       InlineCacheState state = MONOMORPHIC);
+
+  Logger::LogEventsAndTags log_kind(Handle<Code> code) {
+    if (kind() == Code::LOAD_IC) {
+      return code->ic_state() == MONOMORPHIC ? Logger::LOAD_IC_TAG
+                                             : Logger::LOAD_POLYMORPHIC_IC_TAG;
+    } else if (kind() == Code::KEYED_LOAD_IC) {
+      return code->ic_state() == MONOMORPHIC
+                 ? Logger::KEYED_LOAD_IC_TAG
+                 : Logger::KEYED_LOAD_POLYMORPHIC_IC_TAG;
+    } else if (kind() == Code::STORE_IC) {
+      return code->ic_state() == MONOMORPHIC ? Logger::STORE_IC_TAG
+                                             : Logger::STORE_POLYMORPHIC_IC_TAG;
+    } else {
+      DCHECK_EQ(Code::KEYED_STORE_IC, kind());
+      return code->ic_state() == MONOMORPHIC
+                 ? Logger::KEYED_STORE_IC_TAG
+                 : Logger::KEYED_STORE_POLYMORPHIC_IC_TAG;
+    }
+  }
+
+  const ExtraICState extra_ic_state_;
+};
+
+
+}
+}  // namespace v8::internal
+
+#endif  // V8_IC_IC_COMPILER_H_
diff --git a/src/ic/ic-inl.h b/src/ic/ic-inl.h
new file mode 100644
index 0000000..e10fb45
--- /dev/null
+++ b/src/ic/ic-inl.h
@@ -0,0 +1,229 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_IC_INL_H_
+#define V8_IC_INL_H_
+
+#include "src/ic/ic.h"
+
+#include "src/compiler.h"
+#include "src/debug.h"
+#include "src/macro-assembler.h"
+#include "src/prototype.h"
+
+namespace v8 {
+namespace internal {
+
+
+Address IC::address() const {
+  // Get the address of the call.
+  Address result = Assembler::target_address_from_return_address(pc());
+
+  Debug* debug = isolate()->debug();
+  // First check if any break points are active if not just return the address
+  // of the call.
+  if (!debug->has_break_points()) return result;
+
+  // At least one break point is active perform additional test to ensure that
+  // break point locations are updated correctly.
+  if (debug->IsDebugBreak(
+          Assembler::target_address_at(result, raw_constant_pool()))) {
+    // If the call site is a call to debug break then return the address in
+    // the original code instead of the address in the running code. This will
+    // cause the original code to be updated and keeps the breakpoint active in
+    // the running code.
+    Code* code = GetCode();
+    Code* original_code = GetOriginalCode();
+    intptr_t delta =
+        original_code->instruction_start() - code->instruction_start();
+    // Return the address in the original code. This is the place where
+    // the call which has been overwritten by the DebugBreakXXX resides
+    // and the place where the inline cache system should look.
+    return result + delta;
+  } else {
+    // No break point here just return the address of the call.
+    return result;
+  }
+}
+
+
+ConstantPoolArray* IC::constant_pool() const {
+  if (!FLAG_enable_ool_constant_pool) {
+    return NULL;
+  } else {
+    Handle<ConstantPoolArray> result = raw_constant_pool_;
+    Debug* debug = isolate()->debug();
+    // First check if any break points are active if not just return the
+    // original constant pool.
+    if (!debug->has_break_points()) return *result;
+
+    // At least one break point is active perform additional test to ensure that
+    // break point locations are updated correctly.
+    Address target = Assembler::target_address_from_return_address(pc());
+    if (debug->IsDebugBreak(
+            Assembler::target_address_at(target, raw_constant_pool()))) {
+      // If the call site is a call to debug break then we want to return the
+      // constant pool for the original code instead of the breakpointed code.
+      return GetOriginalCode()->constant_pool();
+    }
+    return *result;
+  }
+}
+
+
+ConstantPoolArray* IC::raw_constant_pool() const {
+  if (FLAG_enable_ool_constant_pool) {
+    return *raw_constant_pool_;
+  } else {
+    return NULL;
+  }
+}
+
+
+Code* IC::GetTargetAtAddress(Address address,
+                             ConstantPoolArray* constant_pool) {
+  // Get the target address of the IC.
+  Address target = Assembler::target_address_at(address, constant_pool);
+  // Convert target address to the code object. Code::GetCodeFromTargetAddress
+  // is safe for use during GC where the map might be marked.
+  Code* result = Code::GetCodeFromTargetAddress(target);
+  DCHECK(result->is_inline_cache_stub());
+  return result;
+}
+
+
+void IC::SetTargetAtAddress(Address address, Code* target,
+                            ConstantPoolArray* constant_pool) {
+  DCHECK(target->is_inline_cache_stub() || target->is_compare_ic_stub());
+  Heap* heap = target->GetHeap();
+  Code* old_target = GetTargetAtAddress(address, constant_pool);
+#ifdef DEBUG
+  // STORE_IC and KEYED_STORE_IC use Code::extra_ic_state() to mark
+  // ICs as strict mode. The strict-ness of the IC must be preserved.
+  if (old_target->kind() == Code::STORE_IC ||
+      old_target->kind() == Code::KEYED_STORE_IC) {
+    DCHECK(StoreIC::GetStrictMode(old_target->extra_ic_state()) ==
+           StoreIC::GetStrictMode(target->extra_ic_state()));
+  }
+#endif
+  Assembler::set_target_address_at(address, constant_pool,
+                                   target->instruction_start());
+  if (heap->gc_state() == Heap::MARK_COMPACT) {
+    heap->mark_compact_collector()->RecordCodeTargetPatch(address, target);
+  } else {
+    heap->incremental_marking()->RecordCodeTargetPatch(address, target);
+  }
+  PostPatching(address, target, old_target);
+}
+
+
+void IC::set_target(Code* code) {
+#ifdef VERIFY_HEAP
+  code->VerifyEmbeddedObjectsDependency();
+#endif
+  SetTargetAtAddress(address(), code, constant_pool());
+  target_set_ = true;
+}
+
+
+void LoadIC::set_target(Code* code) {
+  // The contextual mode must be preserved across IC patching.
+  DCHECK(LoadICState::GetContextualMode(code->extra_ic_state()) ==
+         LoadICState::GetContextualMode(target()->extra_ic_state()));
+
+  IC::set_target(code);
+}
+
+
+void StoreIC::set_target(Code* code) {
+  // Strict mode must be preserved across IC patching.
+  DCHECK(GetStrictMode(code->extra_ic_state()) ==
+         GetStrictMode(target()->extra_ic_state()));
+  IC::set_target(code);
+}
+
+
+void KeyedStoreIC::set_target(Code* code) {
+  // Strict mode must be preserved across IC patching.
+  DCHECK(GetStrictMode(code->extra_ic_state()) == strict_mode());
+  IC::set_target(code);
+}
+
+
+Code* IC::raw_target() const {
+  return GetTargetAtAddress(address(), constant_pool());
+}
+
+void IC::UpdateTarget() { target_ = handle(raw_target(), isolate_); }
+
+
+template <class TypeClass>
+JSFunction* IC::GetRootConstructor(TypeClass* type, Context* native_context) {
+  if (type->Is(TypeClass::Boolean())) {
+    return native_context->boolean_function();
+  } else if (type->Is(TypeClass::Number())) {
+    return native_context->number_function();
+  } else if (type->Is(TypeClass::String())) {
+    return native_context->string_function();
+  } else if (type->Is(TypeClass::Symbol())) {
+    return native_context->symbol_function();
+  } else {
+    return NULL;
+  }
+}
+
+
+Handle<Map> IC::GetHandlerCacheHolder(HeapType* type, bool receiver_is_holder,
+                                      Isolate* isolate, CacheHolderFlag* flag) {
+  Handle<Map> receiver_map = TypeToMap(type, isolate);
+  if (receiver_is_holder) {
+    *flag = kCacheOnReceiver;
+    return receiver_map;
+  }
+  Context* native_context = *isolate->native_context();
+  JSFunction* builtin_ctor = GetRootConstructor(type, native_context);
+  if (builtin_ctor != NULL) {
+    *flag = kCacheOnPrototypeReceiverIsPrimitive;
+    return handle(HeapObject::cast(builtin_ctor->instance_prototype())->map());
+  }
+  *flag = receiver_map->is_dictionary_map()
+              ? kCacheOnPrototypeReceiverIsDictionary
+              : kCacheOnPrototype;
+  // Callers must ensure that the prototype is non-null.
+  return handle(JSObject::cast(receiver_map->prototype())->map());
+}
+
+
+Handle<Map> IC::GetICCacheHolder(HeapType* type, Isolate* isolate,
+                                 CacheHolderFlag* flag) {
+  Context* native_context = *isolate->native_context();
+  JSFunction* builtin_ctor = GetRootConstructor(type, native_context);
+  if (builtin_ctor != NULL) {
+    *flag = kCacheOnPrototype;
+    return handle(builtin_ctor->initial_map());
+  }
+  *flag = kCacheOnReceiver;
+  return TypeToMap(type, isolate);
+}
+
+
+IC::State CallIC::FeedbackToState(Handle<TypeFeedbackVector> vector,
+                                  Handle<Smi> slot) const {
+  IC::State state = UNINITIALIZED;
+  Object* feedback = vector->get(slot->value());
+
+  if (feedback == *TypeFeedbackVector::MegamorphicSentinel(isolate())) {
+    state = GENERIC;
+  } else if (feedback->IsAllocationSite() || feedback->IsJSFunction()) {
+    state = MONOMORPHIC;
+  } else {
+    CHECK(feedback == *TypeFeedbackVector::UninitializedSentinel(isolate()));
+  }
+
+  return state;
+}
+}
+}  // namespace v8::internal
+
+#endif  // V8_IC_INL_H_
diff --git a/src/ic/ic-state.cc b/src/ic/ic-state.cc
new file mode 100644
index 0000000..4238a72
--- /dev/null
+++ b/src/ic/ic-state.cc
@@ -0,0 +1,614 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/ic/ic.h"
+#include "src/ic/ic-state.h"
+
+namespace v8 {
+namespace internal {
+
+void ICUtility::Clear(Isolate* isolate, Address address,
+                      ConstantPoolArray* constant_pool) {
+  IC::Clear(isolate, address, constant_pool);
+}
+
+
+CallICState::CallICState(ExtraICState extra_ic_state)
+    : argc_(ArgcBits::decode(extra_ic_state)),
+      call_type_(CallTypeBits::decode(extra_ic_state)) {}
+
+
+ExtraICState CallICState::GetExtraICState() const {
+  ExtraICState extra_ic_state =
+      ArgcBits::encode(argc_) | CallTypeBits::encode(call_type_);
+  return extra_ic_state;
+}
+
+
+OStream& operator<<(OStream& os, const CallICState& s) {
+  return os << "(args(" << s.arg_count() << "), "
+            << (s.call_type() == CallICState::METHOD ? "METHOD" : "FUNCTION")
+            << ", ";
+}
+
+
+BinaryOpICState::BinaryOpICState(Isolate* isolate, ExtraICState extra_ic_state)
+    : isolate_(isolate) {
+  op_ =
+      static_cast<Token::Value>(FIRST_TOKEN + OpField::decode(extra_ic_state));
+  mode_ = OverwriteModeField::decode(extra_ic_state);
+  fixed_right_arg_ =
+      Maybe<int>(HasFixedRightArgField::decode(extra_ic_state),
+                 1 << FixedRightArgValueField::decode(extra_ic_state));
+  left_kind_ = LeftKindField::decode(extra_ic_state);
+  if (fixed_right_arg_.has_value) {
+    right_kind_ = Smi::IsValid(fixed_right_arg_.value) ? SMI : INT32;
+  } else {
+    right_kind_ = RightKindField::decode(extra_ic_state);
+  }
+  result_kind_ = ResultKindField::decode(extra_ic_state);
+  DCHECK_LE(FIRST_TOKEN, op_);
+  DCHECK_LE(op_, LAST_TOKEN);
+}
+
+
+ExtraICState BinaryOpICState::GetExtraICState() const {
+  ExtraICState extra_ic_state =
+      OpField::encode(op_ - FIRST_TOKEN) | OverwriteModeField::encode(mode_) |
+      LeftKindField::encode(left_kind_) |
+      ResultKindField::encode(result_kind_) |
+      HasFixedRightArgField::encode(fixed_right_arg_.has_value);
+  if (fixed_right_arg_.has_value) {
+    extra_ic_state = FixedRightArgValueField::update(
+        extra_ic_state, WhichPowerOf2(fixed_right_arg_.value));
+  } else {
+    extra_ic_state = RightKindField::update(extra_ic_state, right_kind_);
+  }
+  return extra_ic_state;
+}
+
+
+// static
+void BinaryOpICState::GenerateAheadOfTime(
+    Isolate* isolate, void (*Generate)(Isolate*, const BinaryOpICState&)) {
+// TODO(olivf) We should investigate why adding stubs to the snapshot is so
+// expensive at runtime. When solved we should be able to add most binops to
+// the snapshot instead of hand-picking them.
+// Generated list of commonly used stubs
+#define GENERATE(op, left_kind, right_kind, result_kind, mode) \
+  do {                                                         \
+    BinaryOpICState state(isolate, op, mode);                  \
+    state.left_kind_ = left_kind;                              \
+    state.fixed_right_arg_.has_value = false;                  \
+    state.right_kind_ = right_kind;                            \
+    state.result_kind_ = result_kind;                          \
+    Generate(isolate, state);                                  \
+  } while (false)
+  GENERATE(Token::ADD, INT32, INT32, INT32, NO_OVERWRITE);
+  GENERATE(Token::ADD, INT32, INT32, INT32, OVERWRITE_LEFT);
+  GENERATE(Token::ADD, INT32, INT32, NUMBER, NO_OVERWRITE);
+  GENERATE(Token::ADD, INT32, INT32, NUMBER, OVERWRITE_LEFT);
+  GENERATE(Token::ADD, INT32, NUMBER, NUMBER, NO_OVERWRITE);
+  GENERATE(Token::ADD, INT32, NUMBER, NUMBER, OVERWRITE_LEFT);
+  GENERATE(Token::ADD, INT32, NUMBER, NUMBER, OVERWRITE_RIGHT);
+  GENERATE(Token::ADD, INT32, SMI, INT32, NO_OVERWRITE);
+  GENERATE(Token::ADD, INT32, SMI, INT32, OVERWRITE_LEFT);
+  GENERATE(Token::ADD, INT32, SMI, INT32, OVERWRITE_RIGHT);
+  GENERATE(Token::ADD, NUMBER, INT32, NUMBER, NO_OVERWRITE);
+  GENERATE(Token::ADD, NUMBER, INT32, NUMBER, OVERWRITE_LEFT);
+  GENERATE(Token::ADD, NUMBER, INT32, NUMBER, OVERWRITE_RIGHT);
+  GENERATE(Token::ADD, NUMBER, NUMBER, NUMBER, NO_OVERWRITE);
+  GENERATE(Token::ADD, NUMBER, NUMBER, NUMBER, OVERWRITE_LEFT);
+  GENERATE(Token::ADD, NUMBER, NUMBER, NUMBER, OVERWRITE_RIGHT);
+  GENERATE(Token::ADD, NUMBER, SMI, NUMBER, NO_OVERWRITE);
+  GENERATE(Token::ADD, NUMBER, SMI, NUMBER, OVERWRITE_LEFT);
+  GENERATE(Token::ADD, NUMBER, SMI, NUMBER, OVERWRITE_RIGHT);
+  GENERATE(Token::ADD, SMI, INT32, INT32, NO_OVERWRITE);
+  GENERATE(Token::ADD, SMI, INT32, INT32, OVERWRITE_LEFT);
+  GENERATE(Token::ADD, SMI, INT32, NUMBER, NO_OVERWRITE);
+  GENERATE(Token::ADD, SMI, NUMBER, NUMBER, NO_OVERWRITE);
+  GENERATE(Token::ADD, SMI, NUMBER, NUMBER, OVERWRITE_LEFT);
+  GENERATE(Token::ADD, SMI, NUMBER, NUMBER, OVERWRITE_RIGHT);
+  GENERATE(Token::ADD, SMI, SMI, INT32, OVERWRITE_LEFT);
+  GENERATE(Token::ADD, SMI, SMI, SMI, OVERWRITE_RIGHT);
+  GENERATE(Token::BIT_AND, INT32, INT32, INT32, NO_OVERWRITE);
+  GENERATE(Token::BIT_AND, INT32, INT32, INT32, OVERWRITE_LEFT);
+  GENERATE(Token::BIT_AND, INT32, INT32, INT32, OVERWRITE_RIGHT);
+  GENERATE(Token::BIT_AND, INT32, INT32, SMI, NO_OVERWRITE);
+  GENERATE(Token::BIT_AND, INT32, INT32, SMI, OVERWRITE_RIGHT);
+  GENERATE(Token::BIT_AND, INT32, SMI, INT32, NO_OVERWRITE);
+  GENERATE(Token::BIT_AND, INT32, SMI, INT32, OVERWRITE_RIGHT);
+  GENERATE(Token::BIT_AND, INT32, SMI, SMI, NO_OVERWRITE);
+  GENERATE(Token::BIT_AND, INT32, SMI, SMI, OVERWRITE_LEFT);
+  GENERATE(Token::BIT_AND, INT32, SMI, SMI, OVERWRITE_RIGHT);
+  GENERATE(Token::BIT_AND, NUMBER, INT32, INT32, OVERWRITE_RIGHT);
+  GENERATE(Token::BIT_AND, NUMBER, SMI, SMI, NO_OVERWRITE);
+  GENERATE(Token::BIT_AND, NUMBER, SMI, SMI, OVERWRITE_RIGHT);
+  GENERATE(Token::BIT_AND, SMI, INT32, INT32, NO_OVERWRITE);
+  GENERATE(Token::BIT_AND, SMI, INT32, SMI, OVERWRITE_RIGHT);
+  GENERATE(Token::BIT_AND, SMI, NUMBER, SMI, OVERWRITE_RIGHT);
+  GENERATE(Token::BIT_AND, SMI, SMI, SMI, NO_OVERWRITE);
+  GENERATE(Token::BIT_AND, SMI, SMI, SMI, OVERWRITE_LEFT);
+  GENERATE(Token::BIT_AND, SMI, SMI, SMI, OVERWRITE_RIGHT);
+  GENERATE(Token::BIT_OR, INT32, INT32, INT32, OVERWRITE_LEFT);
+  GENERATE(Token::BIT_OR, INT32, INT32, INT32, OVERWRITE_RIGHT);
+  GENERATE(Token::BIT_OR, INT32, INT32, SMI, OVERWRITE_LEFT);
+  GENERATE(Token::BIT_OR, INT32, SMI, INT32, NO_OVERWRITE);
+  GENERATE(Token::BIT_OR, INT32, SMI, INT32, OVERWRITE_LEFT);
+  GENERATE(Token::BIT_OR, INT32, SMI, INT32, OVERWRITE_RIGHT);
+  GENERATE(Token::BIT_OR, INT32, SMI, SMI, NO_OVERWRITE);
+  GENERATE(Token::BIT_OR, INT32, SMI, SMI, OVERWRITE_RIGHT);
+  GENERATE(Token::BIT_OR, NUMBER, SMI, INT32, NO_OVERWRITE);
+  GENERATE(Token::BIT_OR, NUMBER, SMI, INT32, OVERWRITE_LEFT);
+  GENERATE(Token::BIT_OR, NUMBER, SMI, INT32, OVERWRITE_RIGHT);
+  GENERATE(Token::BIT_OR, NUMBER, SMI, SMI, NO_OVERWRITE);
+  GENERATE(Token::BIT_OR, NUMBER, SMI, SMI, OVERWRITE_LEFT);
+  GENERATE(Token::BIT_OR, SMI, INT32, INT32, OVERWRITE_LEFT);
+  GENERATE(Token::BIT_OR, SMI, INT32, INT32, OVERWRITE_RIGHT);
+  GENERATE(Token::BIT_OR, SMI, INT32, SMI, OVERWRITE_RIGHT);
+  GENERATE(Token::BIT_OR, SMI, SMI, SMI, OVERWRITE_LEFT);
+  GENERATE(Token::BIT_OR, SMI, SMI, SMI, OVERWRITE_RIGHT);
+  GENERATE(Token::BIT_XOR, INT32, INT32, INT32, NO_OVERWRITE);
+  GENERATE(Token::BIT_XOR, INT32, INT32, INT32, OVERWRITE_LEFT);
+  GENERATE(Token::BIT_XOR, INT32, INT32, INT32, OVERWRITE_RIGHT);
+  GENERATE(Token::BIT_XOR, INT32, INT32, SMI, NO_OVERWRITE);
+  GENERATE(Token::BIT_XOR, INT32, INT32, SMI, OVERWRITE_LEFT);
+  GENERATE(Token::BIT_XOR, INT32, NUMBER, SMI, NO_OVERWRITE);
+  GENERATE(Token::BIT_XOR, INT32, SMI, INT32, NO_OVERWRITE);
+  GENERATE(Token::BIT_XOR, INT32, SMI, INT32, OVERWRITE_LEFT);
+  GENERATE(Token::BIT_XOR, INT32, SMI, INT32, OVERWRITE_RIGHT);
+  GENERATE(Token::BIT_XOR, NUMBER, INT32, INT32, NO_OVERWRITE);
+  GENERATE(Token::BIT_XOR, NUMBER, SMI, INT32, NO_OVERWRITE);
+  GENERATE(Token::BIT_XOR, NUMBER, SMI, SMI, NO_OVERWRITE);
+  GENERATE(Token::BIT_XOR, SMI, INT32, INT32, NO_OVERWRITE);
+  GENERATE(Token::BIT_XOR, SMI, INT32, INT32, OVERWRITE_LEFT);
+  GENERATE(Token::BIT_XOR, SMI, INT32, SMI, OVERWRITE_LEFT);
+  GENERATE(Token::BIT_XOR, SMI, SMI, SMI, NO_OVERWRITE);
+  GENERATE(Token::BIT_XOR, SMI, SMI, SMI, OVERWRITE_LEFT);
+  GENERATE(Token::BIT_XOR, SMI, SMI, SMI, OVERWRITE_RIGHT);
+  GENERATE(Token::DIV, INT32, INT32, INT32, NO_OVERWRITE);
+  GENERATE(Token::DIV, INT32, INT32, NUMBER, NO_OVERWRITE);
+  GENERATE(Token::DIV, INT32, NUMBER, NUMBER, NO_OVERWRITE);
+  GENERATE(Token::DIV, INT32, NUMBER, NUMBER, OVERWRITE_LEFT);
+  GENERATE(Token::DIV, INT32, SMI, INT32, NO_OVERWRITE);
+  GENERATE(Token::DIV, INT32, SMI, NUMBER, NO_OVERWRITE);
+  GENERATE(Token::DIV, NUMBER, INT32, NUMBER, NO_OVERWRITE);
+  GENERATE(Token::DIV, NUMBER, INT32, NUMBER, OVERWRITE_LEFT);
+  GENERATE(Token::DIV, NUMBER, NUMBER, NUMBER, NO_OVERWRITE);
+  GENERATE(Token::DIV, NUMBER, NUMBER, NUMBER, OVERWRITE_LEFT);
+  GENERATE(Token::DIV, NUMBER, NUMBER, NUMBER, OVERWRITE_RIGHT);
+  GENERATE(Token::DIV, NUMBER, SMI, NUMBER, NO_OVERWRITE);
+  GENERATE(Token::DIV, NUMBER, SMI, NUMBER, OVERWRITE_LEFT);
+  GENERATE(Token::DIV, SMI, INT32, INT32, NO_OVERWRITE);
+  GENERATE(Token::DIV, SMI, INT32, NUMBER, NO_OVERWRITE);
+  GENERATE(Token::DIV, SMI, INT32, NUMBER, OVERWRITE_LEFT);
+  GENERATE(Token::DIV, SMI, NUMBER, NUMBER, NO_OVERWRITE);
+  GENERATE(Token::DIV, SMI, NUMBER, NUMBER, OVERWRITE_LEFT);
+  GENERATE(Token::DIV, SMI, NUMBER, NUMBER, OVERWRITE_RIGHT);
+  GENERATE(Token::DIV, SMI, SMI, NUMBER, NO_OVERWRITE);
+  GENERATE(Token::DIV, SMI, SMI, NUMBER, OVERWRITE_LEFT);
+  GENERATE(Token::DIV, SMI, SMI, NUMBER, OVERWRITE_RIGHT);
+  GENERATE(Token::DIV, SMI, SMI, SMI, NO_OVERWRITE);
+  GENERATE(Token::DIV, SMI, SMI, SMI, OVERWRITE_LEFT);
+  GENERATE(Token::DIV, SMI, SMI, SMI, OVERWRITE_RIGHT);
+  GENERATE(Token::MOD, NUMBER, SMI, NUMBER, OVERWRITE_LEFT);
+  GENERATE(Token::MOD, SMI, SMI, SMI, NO_OVERWRITE);
+  GENERATE(Token::MOD, SMI, SMI, SMI, OVERWRITE_LEFT);
+  GENERATE(Token::MUL, INT32, INT32, INT32, NO_OVERWRITE);
+  GENERATE(Token::MUL, INT32, INT32, NUMBER, NO_OVERWRITE);
+  GENERATE(Token::MUL, INT32, NUMBER, NUMBER, NO_OVERWRITE);
+  GENERATE(Token::MUL, INT32, NUMBER, NUMBER, OVERWRITE_LEFT);
+  GENERATE(Token::MUL, INT32, SMI, INT32, NO_OVERWRITE);
+  GENERATE(Token::MUL, INT32, SMI, INT32, OVERWRITE_LEFT);
+  GENERATE(Token::MUL, INT32, SMI, NUMBER, NO_OVERWRITE);
+  GENERATE(Token::MUL, NUMBER, INT32, NUMBER, NO_OVERWRITE);
+  GENERATE(Token::MUL, NUMBER, INT32, NUMBER, OVERWRITE_LEFT);
+  GENERATE(Token::MUL, NUMBER, INT32, NUMBER, OVERWRITE_RIGHT);
+  GENERATE(Token::MUL, NUMBER, NUMBER, NUMBER, NO_OVERWRITE);
+  GENERATE(Token::MUL, NUMBER, NUMBER, NUMBER, OVERWRITE_LEFT);
+  GENERATE(Token::MUL, NUMBER, SMI, NUMBER, NO_OVERWRITE);
+  GENERATE(Token::MUL, NUMBER, SMI, NUMBER, OVERWRITE_LEFT);
+  GENERATE(Token::MUL, NUMBER, SMI, NUMBER, OVERWRITE_RIGHT);
+  GENERATE(Token::MUL, SMI, INT32, INT32, NO_OVERWRITE);
+  GENERATE(Token::MUL, SMI, INT32, INT32, OVERWRITE_LEFT);
+  GENERATE(Token::MUL, SMI, INT32, NUMBER, NO_OVERWRITE);
+  GENERATE(Token::MUL, SMI, NUMBER, NUMBER, NO_OVERWRITE);
+  GENERATE(Token::MUL, SMI, NUMBER, NUMBER, OVERWRITE_LEFT);
+  GENERATE(Token::MUL, SMI, NUMBER, NUMBER, OVERWRITE_RIGHT);
+  GENERATE(Token::MUL, SMI, SMI, INT32, NO_OVERWRITE);
+  GENERATE(Token::MUL, SMI, SMI, NUMBER, NO_OVERWRITE);
+  GENERATE(Token::MUL, SMI, SMI, NUMBER, OVERWRITE_LEFT);
+  GENERATE(Token::MUL, SMI, SMI, SMI, NO_OVERWRITE);
+  GENERATE(Token::MUL, SMI, SMI, SMI, OVERWRITE_LEFT);
+  GENERATE(Token::MUL, SMI, SMI, SMI, OVERWRITE_RIGHT);
+  GENERATE(Token::SAR, INT32, SMI, INT32, OVERWRITE_RIGHT);
+  GENERATE(Token::SAR, INT32, SMI, SMI, NO_OVERWRITE);
+  GENERATE(Token::SAR, INT32, SMI, SMI, OVERWRITE_RIGHT);
+  GENERATE(Token::SAR, NUMBER, SMI, SMI, NO_OVERWRITE);
+  GENERATE(Token::SAR, NUMBER, SMI, SMI, OVERWRITE_RIGHT);
+  GENERATE(Token::SAR, SMI, SMI, SMI, OVERWRITE_LEFT);
+  GENERATE(Token::SAR, SMI, SMI, SMI, OVERWRITE_RIGHT);
+  GENERATE(Token::SHL, INT32, SMI, INT32, NO_OVERWRITE);
+  GENERATE(Token::SHL, INT32, SMI, INT32, OVERWRITE_RIGHT);
+  GENERATE(Token::SHL, INT32, SMI, SMI, NO_OVERWRITE);
+  GENERATE(Token::SHL, INT32, SMI, SMI, OVERWRITE_RIGHT);
+  GENERATE(Token::SHL, NUMBER, SMI, SMI, OVERWRITE_RIGHT);
+  GENERATE(Token::SHL, SMI, SMI, INT32, NO_OVERWRITE);
+  GENERATE(Token::SHL, SMI, SMI, INT32, OVERWRITE_LEFT);
+  GENERATE(Token::SHL, SMI, SMI, INT32, OVERWRITE_RIGHT);
+  GENERATE(Token::SHL, SMI, SMI, SMI, NO_OVERWRITE);
+  GENERATE(Token::SHL, SMI, SMI, SMI, OVERWRITE_LEFT);
+  GENERATE(Token::SHL, SMI, SMI, SMI, OVERWRITE_RIGHT);
+  GENERATE(Token::SHR, INT32, SMI, SMI, NO_OVERWRITE);
+  GENERATE(Token::SHR, INT32, SMI, SMI, OVERWRITE_LEFT);
+  GENERATE(Token::SHR, INT32, SMI, SMI, OVERWRITE_RIGHT);
+  GENERATE(Token::SHR, NUMBER, SMI, SMI, NO_OVERWRITE);
+  GENERATE(Token::SHR, NUMBER, SMI, SMI, OVERWRITE_LEFT);
+  GENERATE(Token::SHR, NUMBER, SMI, INT32, OVERWRITE_RIGHT);
+  GENERATE(Token::SHR, SMI, SMI, SMI, NO_OVERWRITE);
+  GENERATE(Token::SHR, SMI, SMI, SMI, OVERWRITE_LEFT);
+  GENERATE(Token::SHR, SMI, SMI, SMI, OVERWRITE_RIGHT);
+  GENERATE(Token::SUB, INT32, INT32, INT32, NO_OVERWRITE);
+  GENERATE(Token::SUB, INT32, INT32, INT32, OVERWRITE_LEFT);
+  GENERATE(Token::SUB, INT32, NUMBER, NUMBER, NO_OVERWRITE);
+  GENERATE(Token::SUB, INT32, NUMBER, NUMBER, OVERWRITE_RIGHT);
+  GENERATE(Token::SUB, INT32, SMI, INT32, OVERWRITE_LEFT);
+  GENERATE(Token::SUB, INT32, SMI, INT32, OVERWRITE_RIGHT);
+  GENERATE(Token::SUB, NUMBER, INT32, NUMBER, NO_OVERWRITE);
+  GENERATE(Token::SUB, NUMBER, INT32, NUMBER, OVERWRITE_LEFT);
+  GENERATE(Token::SUB, NUMBER, NUMBER, NUMBER, NO_OVERWRITE);
+  GENERATE(Token::SUB, NUMBER, NUMBER, NUMBER, OVERWRITE_LEFT);
+  GENERATE(Token::SUB, NUMBER, NUMBER, NUMBER, OVERWRITE_RIGHT);
+  GENERATE(Token::SUB, NUMBER, SMI, NUMBER, NO_OVERWRITE);
+  GENERATE(Token::SUB, NUMBER, SMI, NUMBER, OVERWRITE_LEFT);
+  GENERATE(Token::SUB, NUMBER, SMI, NUMBER, OVERWRITE_RIGHT);
+  GENERATE(Token::SUB, SMI, INT32, INT32, NO_OVERWRITE);
+  GENERATE(Token::SUB, SMI, NUMBER, NUMBER, NO_OVERWRITE);
+  GENERATE(Token::SUB, SMI, NUMBER, NUMBER, OVERWRITE_LEFT);
+  GENERATE(Token::SUB, SMI, NUMBER, NUMBER, OVERWRITE_RIGHT);
+  GENERATE(Token::SUB, SMI, SMI, SMI, NO_OVERWRITE);
+  GENERATE(Token::SUB, SMI, SMI, SMI, OVERWRITE_LEFT);
+  GENERATE(Token::SUB, SMI, SMI, SMI, OVERWRITE_RIGHT);
+#undef GENERATE
+#define GENERATE(op, left_kind, fixed_right_arg_value, result_kind, mode) \
+  do {                                                                    \
+    BinaryOpICState state(isolate, op, mode);                             \
+    state.left_kind_ = left_kind;                                         \
+    state.fixed_right_arg_.has_value = true;                              \
+    state.fixed_right_arg_.value = fixed_right_arg_value;                 \
+    state.right_kind_ = SMI;                                              \
+    state.result_kind_ = result_kind;                                     \
+    Generate(isolate, state);                                             \
+  } while (false)
+  GENERATE(Token::MOD, SMI, 2, SMI, NO_OVERWRITE);
+  GENERATE(Token::MOD, SMI, 4, SMI, NO_OVERWRITE);
+  GENERATE(Token::MOD, SMI, 4, SMI, OVERWRITE_LEFT);
+  GENERATE(Token::MOD, SMI, 8, SMI, NO_OVERWRITE);
+  GENERATE(Token::MOD, SMI, 16, SMI, OVERWRITE_LEFT);
+  GENERATE(Token::MOD, SMI, 32, SMI, NO_OVERWRITE);
+  GENERATE(Token::MOD, SMI, 2048, SMI, NO_OVERWRITE);
+#undef GENERATE
+}
+
+
+Type* BinaryOpICState::GetResultType(Zone* zone) const {
+  Kind result_kind = result_kind_;
+  if (HasSideEffects()) {
+    result_kind = NONE;
+  } else if (result_kind == GENERIC && op_ == Token::ADD) {
+    return Type::Union(Type::Number(zone), Type::String(zone), zone);
+  } else if (result_kind == NUMBER && op_ == Token::SHR) {
+    return Type::Unsigned32(zone);
+  }
+  DCHECK_NE(GENERIC, result_kind);
+  return KindToType(result_kind, zone);
+}
+
+
+OStream& operator<<(OStream& os, const BinaryOpICState& s) {
+  os << "(" << Token::Name(s.op_);
+  if (s.mode_ == OVERWRITE_LEFT)
+    os << "_ReuseLeft";
+  else if (s.mode_ == OVERWRITE_RIGHT)
+    os << "_ReuseRight";
+  if (s.CouldCreateAllocationMementos()) os << "_CreateAllocationMementos";
+  os << ":" << BinaryOpICState::KindToString(s.left_kind_) << "*";
+  if (s.fixed_right_arg_.has_value) {
+    os << s.fixed_right_arg_.value;
+  } else {
+    os << BinaryOpICState::KindToString(s.right_kind_);
+  }
+  return os << "->" << BinaryOpICState::KindToString(s.result_kind_) << ")";
+}
+
+
+void BinaryOpICState::Update(Handle<Object> left, Handle<Object> right,
+                             Handle<Object> result) {
+  ExtraICState old_extra_ic_state = GetExtraICState();
+
+  left_kind_ = UpdateKind(left, left_kind_);
+  right_kind_ = UpdateKind(right, right_kind_);
+
+  int32_t fixed_right_arg_value = 0;
+  bool has_fixed_right_arg =
+      op_ == Token::MOD && right->ToInt32(&fixed_right_arg_value) &&
+      fixed_right_arg_value > 0 &&
+      base::bits::IsPowerOfTwo32(fixed_right_arg_value) &&
+      FixedRightArgValueField::is_valid(WhichPowerOf2(fixed_right_arg_value)) &&
+      (left_kind_ == SMI || left_kind_ == INT32) &&
+      (result_kind_ == NONE || !fixed_right_arg_.has_value);
+  fixed_right_arg_ = Maybe<int32_t>(has_fixed_right_arg, fixed_right_arg_value);
+
+  result_kind_ = UpdateKind(result, result_kind_);
+
+  if (!Token::IsTruncatingBinaryOp(op_)) {
+    Kind input_kind = Max(left_kind_, right_kind_);
+    if (result_kind_ < input_kind && input_kind <= NUMBER) {
+      result_kind_ = input_kind;
+    }
+  }
+
+  // We don't want to distinguish INT32 and NUMBER for string add (because
+  // NumberToString can't make use of this anyway).
+  if (left_kind_ == STRING && right_kind_ == INT32) {
+    DCHECK_EQ(STRING, result_kind_);
+    DCHECK_EQ(Token::ADD, op_);
+    right_kind_ = NUMBER;
+  } else if (right_kind_ == STRING && left_kind_ == INT32) {
+    DCHECK_EQ(STRING, result_kind_);
+    DCHECK_EQ(Token::ADD, op_);
+    left_kind_ = NUMBER;
+  }
+
+  // Reset overwrite mode unless we can actually make use of it, or may be able
+  // to make use of it at some point in the future.
+  if ((mode_ == OVERWRITE_LEFT && left_kind_ > NUMBER) ||
+      (mode_ == OVERWRITE_RIGHT && right_kind_ > NUMBER) ||
+      result_kind_ > NUMBER) {
+    mode_ = NO_OVERWRITE;
+  }
+
+  if (old_extra_ic_state == GetExtraICState()) {
+    // Tagged operations can lead to non-truncating HChanges
+    if (left->IsUndefined() || left->IsBoolean()) {
+      left_kind_ = GENERIC;
+    } else {
+      DCHECK(right->IsUndefined() || right->IsBoolean());
+      right_kind_ = GENERIC;
+    }
+  }
+}
+
+
+BinaryOpICState::Kind BinaryOpICState::UpdateKind(Handle<Object> object,
+                                                  Kind kind) const {
+  Kind new_kind = GENERIC;
+  bool is_truncating = Token::IsTruncatingBinaryOp(op());
+  if (object->IsBoolean() && is_truncating) {
+    // Booleans will be automatically truncated by HChange.
+    new_kind = INT32;
+  } else if (object->IsUndefined()) {
+    // Undefined will be automatically truncated by HChange.
+    new_kind = is_truncating ? INT32 : NUMBER;
+  } else if (object->IsSmi()) {
+    new_kind = SMI;
+  } else if (object->IsHeapNumber()) {
+    double value = Handle<HeapNumber>::cast(object)->value();
+    new_kind = IsInt32Double(value) ? INT32 : NUMBER;
+  } else if (object->IsString() && op() == Token::ADD) {
+    new_kind = STRING;
+  }
+  if (new_kind == INT32 && SmiValuesAre32Bits()) {
+    new_kind = NUMBER;
+  }
+  if (kind != NONE && ((new_kind <= NUMBER && kind > NUMBER) ||
+                       (new_kind > NUMBER && kind <= NUMBER))) {
+    new_kind = GENERIC;
+  }
+  return Max(kind, new_kind);
+}
+
+
+// static
+const char* BinaryOpICState::KindToString(Kind kind) {
+  switch (kind) {
+    case NONE:
+      return "None";
+    case SMI:
+      return "Smi";
+    case INT32:
+      return "Int32";
+    case NUMBER:
+      return "Number";
+    case STRING:
+      return "String";
+    case GENERIC:
+      return "Generic";
+  }
+  UNREACHABLE();
+  return NULL;
+}
+
+
+// static
+Type* BinaryOpICState::KindToType(Kind kind, Zone* zone) {
+  switch (kind) {
+    case NONE:
+      return Type::None(zone);
+    case SMI:
+      return Type::SignedSmall(zone);
+    case INT32:
+      return Type::Signed32(zone);
+    case NUMBER:
+      return Type::Number(zone);
+    case STRING:
+      return Type::String(zone);
+    case GENERIC:
+      return Type::Any(zone);
+  }
+  UNREACHABLE();
+  return NULL;
+}
+
+
+const char* CompareICState::GetStateName(State state) {
+  switch (state) {
+    case UNINITIALIZED:
+      return "UNINITIALIZED";
+    case SMI:
+      return "SMI";
+    case NUMBER:
+      return "NUMBER";
+    case INTERNALIZED_STRING:
+      return "INTERNALIZED_STRING";
+    case STRING:
+      return "STRING";
+    case UNIQUE_NAME:
+      return "UNIQUE_NAME";
+    case OBJECT:
+      return "OBJECT";
+    case KNOWN_OBJECT:
+      return "KNOWN_OBJECT";
+    case GENERIC:
+      return "GENERIC";
+  }
+  UNREACHABLE();
+  return NULL;
+}
+
+
+Type* CompareICState::StateToType(Zone* zone, State state, Handle<Map> map) {
+  switch (state) {
+    case UNINITIALIZED:
+      return Type::None(zone);
+    case SMI:
+      return Type::SignedSmall(zone);
+    case NUMBER:
+      return Type::Number(zone);
+    case STRING:
+      return Type::String(zone);
+    case INTERNALIZED_STRING:
+      return Type::InternalizedString(zone);
+    case UNIQUE_NAME:
+      return Type::UniqueName(zone);
+    case OBJECT:
+      return Type::Receiver(zone);
+    case KNOWN_OBJECT:
+      return map.is_null() ? Type::Receiver(zone) : Type::Class(map, zone);
+    case GENERIC:
+      return Type::Any(zone);
+  }
+  UNREACHABLE();
+  return NULL;
+}
+
+
+CompareICState::State CompareICState::NewInputState(State old_state,
+                                                    Handle<Object> value) {
+  switch (old_state) {
+    case UNINITIALIZED:
+      if (value->IsSmi()) return SMI;
+      if (value->IsHeapNumber()) return NUMBER;
+      if (value->IsInternalizedString()) return INTERNALIZED_STRING;
+      if (value->IsString()) return STRING;
+      if (value->IsSymbol()) return UNIQUE_NAME;
+      if (value->IsJSObject()) return OBJECT;
+      break;
+    case SMI:
+      if (value->IsSmi()) return SMI;
+      if (value->IsHeapNumber()) return NUMBER;
+      break;
+    case NUMBER:
+      if (value->IsNumber()) return NUMBER;
+      break;
+    case INTERNALIZED_STRING:
+      if (value->IsInternalizedString()) return INTERNALIZED_STRING;
+      if (value->IsString()) return STRING;
+      if (value->IsSymbol()) return UNIQUE_NAME;
+      break;
+    case STRING:
+      if (value->IsString()) return STRING;
+      break;
+    case UNIQUE_NAME:
+      if (value->IsUniqueName()) return UNIQUE_NAME;
+      break;
+    case OBJECT:
+      if (value->IsJSObject()) return OBJECT;
+      break;
+    case GENERIC:
+      break;
+    case KNOWN_OBJECT:
+      UNREACHABLE();
+      break;
+  }
+  return GENERIC;
+}
+
+
+// static
+CompareICState::State CompareICState::TargetState(
+    State old_state, State old_left, State old_right, Token::Value op,
+    bool has_inlined_smi_code, Handle<Object> x, Handle<Object> y) {
+  switch (old_state) {
+    case UNINITIALIZED:
+      if (x->IsSmi() && y->IsSmi()) return SMI;
+      if (x->IsNumber() && y->IsNumber()) return NUMBER;
+      if (Token::IsOrderedRelationalCompareOp(op)) {
+        // Ordered comparisons treat undefined as NaN, so the
+        // NUMBER stub will do the right thing.
+        if ((x->IsNumber() && y->IsUndefined()) ||
+            (y->IsNumber() && x->IsUndefined())) {
+          return NUMBER;
+        }
+      }
+      if (x->IsInternalizedString() && y->IsInternalizedString()) {
+        // We compare internalized strings as plain ones if we need to determine
+        // the order in a non-equality compare.
+        return Token::IsEqualityOp(op) ? INTERNALIZED_STRING : STRING;
+      }
+      if (x->IsString() && y->IsString()) return STRING;
+      if (!Token::IsEqualityOp(op)) return GENERIC;
+      if (x->IsUniqueName() && y->IsUniqueName()) return UNIQUE_NAME;
+      if (x->IsJSObject() && y->IsJSObject()) {
+        if (Handle<JSObject>::cast(x)->map() ==
+            Handle<JSObject>::cast(y)->map()) {
+          return KNOWN_OBJECT;
+        } else {
+          return OBJECT;
+        }
+      }
+      return GENERIC;
+    case SMI:
+      return x->IsNumber() && y->IsNumber() ? NUMBER : GENERIC;
+    case INTERNALIZED_STRING:
+      DCHECK(Token::IsEqualityOp(op));
+      if (x->IsString() && y->IsString()) return STRING;
+      if (x->IsUniqueName() && y->IsUniqueName()) return UNIQUE_NAME;
+      return GENERIC;
+    case NUMBER:
+      // If the failure was due to one side changing from smi to heap number,
+      // then keep the state (if other changed at the same time, we will get
+      // a second miss and then go to generic).
+      if (old_left == SMI && x->IsHeapNumber()) return NUMBER;
+      if (old_right == SMI && y->IsHeapNumber()) return NUMBER;
+      return GENERIC;
+    case KNOWN_OBJECT:
+      DCHECK(Token::IsEqualityOp(op));
+      if (x->IsJSObject() && y->IsJSObject()) {
+        return OBJECT;
+      }
+      return GENERIC;
+    case STRING:
+    case UNIQUE_NAME:
+    case OBJECT:
+    case GENERIC:
+      return GENERIC;
+  }
+  UNREACHABLE();
+  return GENERIC;  // Make the compiler happy.
+}
+}
+}  // namespace v8::internal
diff --git a/src/ic/ic-state.h b/src/ic/ic-state.h
new file mode 100644
index 0000000..b84bdb9
--- /dev/null
+++ b/src/ic/ic-state.h
@@ -0,0 +1,238 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_IC_STATE_H_
+#define V8_IC_STATE_H_
+
+#include "src/macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+
+const int kMaxKeyedPolymorphism = 4;
+
+
+class ICUtility : public AllStatic {
+ public:
+  // Clear the inline cache to initial state.
+  static void Clear(Isolate* isolate, Address address,
+                    ConstantPoolArray* constant_pool);
+};
+
+
+class CallICState FINAL BASE_EMBEDDED {
+ public:
+  explicit CallICState(ExtraICState extra_ic_state);
+
+  enum CallType { METHOD, FUNCTION };
+
+  CallICState(int argc, CallType call_type)
+      : argc_(argc), call_type_(call_type) {}
+
+  ExtraICState GetExtraICState() const;
+
+  static void GenerateAheadOfTime(Isolate*,
+                                  void (*Generate)(Isolate*,
+                                                   const CallICState&));
+
+  int arg_count() const { return argc_; }
+  CallType call_type() const { return call_type_; }
+
+  bool CallAsMethod() const { return call_type_ == METHOD; }
+
+ private:
+  class ArgcBits : public BitField<int, 0, Code::kArgumentsBits> {};
+  class CallTypeBits : public BitField<CallType, Code::kArgumentsBits, 1> {};
+
+  const int argc_;
+  const CallType call_type_;
+};
+
+
+OStream& operator<<(OStream& os, const CallICState& s);
+
+
+// Mode to overwrite BinaryExpression values.
+enum OverwriteMode { NO_OVERWRITE, OVERWRITE_LEFT, OVERWRITE_RIGHT };
+
+class BinaryOpICState FINAL BASE_EMBEDDED {
+ public:
+  BinaryOpICState(Isolate* isolate, ExtraICState extra_ic_state);
+
+  BinaryOpICState(Isolate* isolate, Token::Value op, OverwriteMode mode)
+      : op_(op),
+        mode_(mode),
+        left_kind_(NONE),
+        right_kind_(NONE),
+        result_kind_(NONE),
+        isolate_(isolate) {
+    DCHECK_LE(FIRST_TOKEN, op);
+    DCHECK_LE(op, LAST_TOKEN);
+  }
+
+  InlineCacheState GetICState() const {
+    if (Max(left_kind_, right_kind_) == NONE) {
+      return ::v8::internal::UNINITIALIZED;
+    }
+    if (Max(left_kind_, right_kind_) == GENERIC) {
+      return ::v8::internal::MEGAMORPHIC;
+    }
+    if (Min(left_kind_, right_kind_) == GENERIC) {
+      return ::v8::internal::GENERIC;
+    }
+    return ::v8::internal::MONOMORPHIC;
+  }
+
+  ExtraICState GetExtraICState() const;
+
+  static void GenerateAheadOfTime(Isolate*,
+                                  void (*Generate)(Isolate*,
+                                                   const BinaryOpICState&));
+
+  bool CanReuseDoubleBox() const {
+    return (result_kind_ > SMI && result_kind_ <= NUMBER) &&
+           ((mode_ == OVERWRITE_LEFT && left_kind_ > SMI &&
+             left_kind_ <= NUMBER) ||
+            (mode_ == OVERWRITE_RIGHT && right_kind_ > SMI &&
+             right_kind_ <= NUMBER));
+  }
+
+  // Returns true if the IC _could_ create allocation mementos.
+  bool CouldCreateAllocationMementos() const {
+    if (left_kind_ == STRING || right_kind_ == STRING) {
+      DCHECK_EQ(Token::ADD, op_);
+      return true;
+    }
+    return false;
+  }
+
+  // Returns true if the IC _should_ create allocation mementos.
+  bool ShouldCreateAllocationMementos() const {
+    return FLAG_allocation_site_pretenuring && CouldCreateAllocationMementos();
+  }
+
+  bool HasSideEffects() const {
+    return Max(left_kind_, right_kind_) == GENERIC;
+  }
+
+  // Returns true if the IC should enable the inline smi code (i.e. if either
+  // parameter may be a smi).
+  bool UseInlinedSmiCode() const {
+    return KindMaybeSmi(left_kind_) || KindMaybeSmi(right_kind_);
+  }
+
+  static const int FIRST_TOKEN = Token::BIT_OR;
+  static const int LAST_TOKEN = Token::MOD;
+
+  Token::Value op() const { return op_; }
+  OverwriteMode mode() const { return mode_; }
+  Maybe<int> fixed_right_arg() const { return fixed_right_arg_; }
+
+  Type* GetLeftType(Zone* zone) const { return KindToType(left_kind_, zone); }
+  Type* GetRightType(Zone* zone) const { return KindToType(right_kind_, zone); }
+  Type* GetResultType(Zone* zone) const;
+
+  void Update(Handle<Object> left, Handle<Object> right, Handle<Object> result);
+
+  Isolate* isolate() const { return isolate_; }
+
+ private:
+  friend OStream& operator<<(OStream& os, const BinaryOpICState& s);
+
+  enum Kind { NONE, SMI, INT32, NUMBER, STRING, GENERIC };
+
+  Kind UpdateKind(Handle<Object> object, Kind kind) const;
+
+  static const char* KindToString(Kind kind);
+  static Type* KindToType(Kind kind, Zone* zone);
+  static bool KindMaybeSmi(Kind kind) {
+    return (kind >= SMI && kind <= NUMBER) || kind == GENERIC;
+  }
+
+  // We truncate the last bit of the token.
+  STATIC_ASSERT(LAST_TOKEN - FIRST_TOKEN < (1 << 4));
+  class OpField : public BitField<int, 0, 4> {};
+  class OverwriteModeField : public BitField<OverwriteMode, 4, 2> {};
+  class ResultKindField : public BitField<Kind, 6, 3> {};
+  class LeftKindField : public BitField<Kind, 9, 3> {};
+  // When fixed right arg is set, we don't need to store the right kind.
+  // Thus the two fields can overlap.
+  class HasFixedRightArgField : public BitField<bool, 12, 1> {};
+  class FixedRightArgValueField : public BitField<int, 13, 4> {};
+  class RightKindField : public BitField<Kind, 13, 3> {};
+
+  Token::Value op_;
+  OverwriteMode mode_;
+  Kind left_kind_;
+  Kind right_kind_;
+  Kind result_kind_;
+  Maybe<int> fixed_right_arg_;
+  Isolate* isolate_;
+};
+
+
+OStream& operator<<(OStream& os, const BinaryOpICState& s);
+
+
+class CompareICState {
+ public:
+  // The type/state lattice is defined by the following inequations:
+  //   UNINITIALIZED < ...
+  //   ... < GENERIC
+  //   SMI < NUMBER
+  //   INTERNALIZED_STRING < STRING
+  //   KNOWN_OBJECT < OBJECT
+  enum State {
+    UNINITIALIZED,
+    SMI,
+    NUMBER,
+    STRING,
+    INTERNALIZED_STRING,
+    UNIQUE_NAME,   // Symbol or InternalizedString
+    OBJECT,        // JSObject
+    KNOWN_OBJECT,  // JSObject with specific map (faster check)
+    GENERIC
+  };
+
+  static Type* StateToType(Zone* zone, State state,
+                           Handle<Map> map = Handle<Map>());
+
+  static State NewInputState(State old_state, Handle<Object> value);
+
+  static const char* GetStateName(CompareICState::State state);
+
+  static State TargetState(State old_state, State old_left, State old_right,
+                           Token::Value op, bool has_inlined_smi_code,
+                           Handle<Object> x, Handle<Object> y);
+};
+
+
+class LoadICState FINAL BASE_EMBEDDED {
+ public:
+  explicit LoadICState(ExtraICState extra_ic_state) : state_(extra_ic_state) {}
+
+  explicit LoadICState(ContextualMode mode)
+      : state_(ContextualModeBits::encode(mode)) {}
+
+  ExtraICState GetExtraICState() const { return state_; }
+
+  ContextualMode contextual_mode() const {
+    return ContextualModeBits::decode(state_);
+  }
+
+  static ContextualMode GetContextualMode(ExtraICState state) {
+    return LoadICState(state).contextual_mode();
+  }
+
+ private:
+  class ContextualModeBits : public BitField<ContextualMode, 0, 1> {};
+  STATIC_ASSERT(static_cast<int>(NOT_CONTEXTUAL) == 0);
+
+  const ExtraICState state_;
+};
+}
+}
+
+#endif  // V8_IC_STATE_H_
diff --git a/src/ic/ic.cc b/src/ic/ic.cc
new file mode 100644
index 0000000..1f3c750
--- /dev/null
+++ b/src/ic/ic.cc
@@ -0,0 +1,2689 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/accessors.h"
+#include "src/api.h"
+#include "src/arguments.h"
+#include "src/base/bits.h"
+#include "src/codegen.h"
+#include "src/conversions.h"
+#include "src/execution.h"
+#include "src/ic/call-optimization.h"
+#include "src/ic/handler-compiler.h"
+#include "src/ic/ic-inl.h"
+#include "src/ic/ic-compiler.h"
+#include "src/ic/stub-cache.h"
+#include "src/prototype.h"
+#include "src/runtime.h"
+
+namespace v8 {
+namespace internal {
+
+char IC::TransitionMarkFromState(IC::State state) {
+  switch (state) {
+    case UNINITIALIZED:
+      return '0';
+    case PREMONOMORPHIC:
+      return '.';
+    case MONOMORPHIC:
+      return '1';
+    case PROTOTYPE_FAILURE:
+      return '^';
+    case POLYMORPHIC:
+      return 'P';
+    case MEGAMORPHIC:
+      return 'N';
+    case GENERIC:
+      return 'G';
+
+    // We never see the debugger states here, because the state is
+    // computed from the original code - not the patched code. Let
+    // these cases fall through to the unreachable code below.
+    case DEBUG_STUB:
+      break;
+    // Type-vector-based ICs resolve state to one of the above.
+    case DEFAULT:
+      break;
+  }
+  UNREACHABLE();
+  return 0;
+}
+
+
+const char* GetTransitionMarkModifier(KeyedAccessStoreMode mode) {
+  if (mode == STORE_NO_TRANSITION_HANDLE_COW) return ".COW";
+  if (mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS) {
+    return ".IGNORE_OOB";
+  }
+  if (IsGrowStoreMode(mode)) return ".GROW";
+  return "";
+}
+
+
+#ifdef DEBUG
+
+#define TRACE_GENERIC_IC(isolate, type, reason)                \
+  do {                                                         \
+    if (FLAG_trace_ic) {                                       \
+      PrintF("[%s patching generic stub in ", type);           \
+      JavaScriptFrame::PrintTop(isolate, stdout, false, true); \
+      PrintF(" (%s)]\n", reason);                              \
+    }                                                          \
+  } while (false)
+
+#else
+
+#define TRACE_GENERIC_IC(isolate, type, reason)      \
+  do {                                               \
+    if (FLAG_trace_ic) {                             \
+      PrintF("[%s patching generic stub in ", type); \
+      PrintF("(see below) (%s)]\n", reason);         \
+    }                                                \
+  } while (false)
+
+#endif  // DEBUG
+
+
+void IC::TraceIC(const char* type, Handle<Object> name) {
+  if (FLAG_trace_ic) {
+    Code* new_target = raw_target();
+    State new_state = new_target->ic_state();
+    TraceIC(type, name, state(), new_state);
+  }
+}
+
+
+void IC::TraceIC(const char* type, Handle<Object> name, State old_state,
+                 State new_state) {
+  if (FLAG_trace_ic) {
+    Code* new_target = raw_target();
+    PrintF("[%s%s in ", new_target->is_keyed_stub() ? "Keyed" : "", type);
+
+    // TODO(jkummerow): Add support for "apply". The logic is roughly:
+    // marker = [fp_ + kMarkerOffset];
+    // if marker is smi and marker.value == INTERNAL and
+    //     the frame's code == builtin(Builtins::kFunctionApply):
+    // then print "apply from" and advance one frame
+
+    Object* maybe_function =
+        Memory::Object_at(fp_ + JavaScriptFrameConstants::kFunctionOffset);
+    if (maybe_function->IsJSFunction()) {
+      JSFunction* function = JSFunction::cast(maybe_function);
+      JavaScriptFrame::PrintFunctionAndOffset(function, function->code(), pc(),
+                                              stdout, true);
+    }
+
+    ExtraICState extra_state = new_target->extra_ic_state();
+    const char* modifier = "";
+    if (new_target->kind() == Code::KEYED_STORE_IC) {
+      modifier = GetTransitionMarkModifier(
+          KeyedStoreIC::GetKeyedAccessStoreMode(extra_state));
+    }
+    PrintF(" (%c->%c%s)", TransitionMarkFromState(old_state),
+           TransitionMarkFromState(new_state), modifier);
+#ifdef OBJECT_PRINT
+    OFStream os(stdout);
+    name->Print(os);
+#else
+    name->ShortPrint(stdout);
+#endif
+    PrintF("]\n");
+  }
+}
+
+#define TRACE_IC(type, name) TraceIC(type, name)
+#define TRACE_VECTOR_IC(type, name, old_state, new_state) \
+  TraceIC(type, name, old_state, new_state)
+
+IC::IC(FrameDepth depth, Isolate* isolate)
+    : isolate_(isolate), target_set_(false), target_maps_set_(false) {
+  // To improve the performance of the (much used) IC code, we unfold a few
+  // levels of the stack frame iteration code. This yields a ~35% speedup when
+  // running DeltaBlue and a ~25% speedup of gbemu with the '--nouse-ic' flag.
+  const Address entry = Isolate::c_entry_fp(isolate->thread_local_top());
+  Address constant_pool = NULL;
+  if (FLAG_enable_ool_constant_pool) {
+    constant_pool =
+        Memory::Address_at(entry + ExitFrameConstants::kConstantPoolOffset);
+  }
+  Address* pc_address =
+      reinterpret_cast<Address*>(entry + ExitFrameConstants::kCallerPCOffset);
+  Address fp = Memory::Address_at(entry + ExitFrameConstants::kCallerFPOffset);
+  // If there's another JavaScript frame on the stack or a
+  // StubFailureTrampoline, we need to look one frame further down the stack to
+  // find the frame pointer and the return address stack slot.
+  if (depth == EXTRA_CALL_FRAME) {
+    if (FLAG_enable_ool_constant_pool) {
+      constant_pool =
+          Memory::Address_at(fp + StandardFrameConstants::kConstantPoolOffset);
+    }
+    const int kCallerPCOffset = StandardFrameConstants::kCallerPCOffset;
+    pc_address = reinterpret_cast<Address*>(fp + kCallerPCOffset);
+    fp = Memory::Address_at(fp + StandardFrameConstants::kCallerFPOffset);
+  }
+#ifdef DEBUG
+  StackFrameIterator it(isolate);
+  for (int i = 0; i < depth + 1; i++) it.Advance();
+  StackFrame* frame = it.frame();
+  DCHECK(fp == frame->fp() && pc_address == frame->pc_address());
+#endif
+  fp_ = fp;
+  if (FLAG_enable_ool_constant_pool) {
+    raw_constant_pool_ = handle(
+        ConstantPoolArray::cast(reinterpret_cast<Object*>(constant_pool)),
+        isolate);
+  }
+  pc_address_ = StackFrame::ResolveReturnAddressLocation(pc_address);
+  target_ = handle(raw_target(), isolate);
+  state_ = target_->ic_state();
+  kind_ = target_->kind();
+  extra_ic_state_ = target_->extra_ic_state();
+}
+
+
+SharedFunctionInfo* IC::GetSharedFunctionInfo() const {
+  // Compute the JavaScript frame for the frame pointer of this IC
+  // structure. We need this to be able to find the function
+  // corresponding to the frame.
+  StackFrameIterator it(isolate());
+  while (it.frame()->fp() != this->fp()) it.Advance();
+  JavaScriptFrame* frame = JavaScriptFrame::cast(it.frame());
+  // Find the function on the stack and both the active code for the
+  // function and the original code.
+  JSFunction* function = frame->function();
+  return function->shared();
+}
+
+
+Code* IC::GetCode() const {
+  HandleScope scope(isolate());
+  Handle<SharedFunctionInfo> shared(GetSharedFunctionInfo(), isolate());
+  Code* code = shared->code();
+  return code;
+}
+
+
+Code* IC::GetOriginalCode() const {
+  HandleScope scope(isolate());
+  Handle<SharedFunctionInfo> shared(GetSharedFunctionInfo(), isolate());
+  DCHECK(Debug::HasDebugInfo(shared));
+  Code* original_code = Debug::GetDebugInfo(shared)->original_code();
+  DCHECK(original_code->IsCode());
+  return original_code;
+}
+
+
+static void LookupForRead(LookupIterator* it) {
+  for (; it->IsFound(); it->Next()) {
+    switch (it->state()) {
+      case LookupIterator::NOT_FOUND:
+      case LookupIterator::TRANSITION:
+        UNREACHABLE();
+      case LookupIterator::JSPROXY:
+        return;
+      case LookupIterator::INTERCEPTOR: {
+        // If there is a getter, return; otherwise loop to perform the lookup.
+        Handle<JSObject> holder = it->GetHolder<JSObject>();
+        if (!holder->GetNamedInterceptor()->getter()->IsUndefined()) {
+          return;
+        }
+        break;
+      }
+      case LookupIterator::ACCESS_CHECK:
+        // PropertyHandlerCompiler::CheckPrototypes() knows how to emit
+        // access checks for global proxies.
+        if (it->GetHolder<JSObject>()->IsJSGlobalProxy() &&
+            it->HasAccess(v8::ACCESS_GET)) {
+          break;
+        }
+        return;
+      case LookupIterator::ACCESSOR:
+      case LookupIterator::DATA:
+        return;
+    }
+  }
+}
+
+
+bool IC::TryRemoveInvalidPrototypeDependentStub(Handle<Object> receiver,
+                                                Handle<String> name) {
+  if (!IsNameCompatibleWithPrototypeFailure(name)) return false;
+  Handle<Map> receiver_map = TypeToMap(*receiver_type(), isolate());
+  maybe_handler_ = target()->FindHandlerForMap(*receiver_map);
+
+  // The current map wasn't handled yet. There's no reason to stay monomorphic,
+  // *unless* we're moving from a deprecated map to its replacement, or
+  // to a more general elements kind.
+  // TODO(verwaest): Check if the current map is actually what the old map
+  // would transition to.
+  if (maybe_handler_.is_null()) {
+    if (!receiver_map->IsJSObjectMap()) return false;
+    Map* first_map = FirstTargetMap();
+    if (first_map == NULL) return false;
+    Handle<Map> old_map(first_map);
+    if (old_map->is_deprecated()) return true;
+    if (IsMoreGeneralElementsKindTransition(old_map->elements_kind(),
+                                            receiver_map->elements_kind())) {
+      return true;
+    }
+    return false;
+  }
+
+  CacheHolderFlag flag;
+  Handle<Map> ic_holder_map(
+      GetICCacheHolder(*receiver_type(), isolate(), &flag));
+
+  DCHECK(flag != kCacheOnReceiver || receiver->IsJSObject());
+  DCHECK(flag != kCacheOnPrototype || !receiver->IsJSReceiver());
+  DCHECK(flag != kCacheOnPrototypeReceiverIsDictionary);
+
+  if (state() == MONOMORPHIC) {
+    int index = ic_holder_map->IndexInCodeCache(*name, *target());
+    if (index >= 0) {
+      ic_holder_map->RemoveFromCodeCache(*name, *target(), index);
+    }
+  }
+
+  if (receiver->IsGlobalObject()) {
+    Handle<GlobalObject> global = Handle<GlobalObject>::cast(receiver);
+    LookupIterator it(global, name, LookupIterator::OWN_SKIP_INTERCEPTOR);
+    if (it.state() == LookupIterator::ACCESS_CHECK) return false;
+    if (!it.IsFound()) return false;
+    Handle<PropertyCell> cell = it.GetPropertyCell();
+    return cell->type()->IsConstant();
+  }
+
+  return true;
+}
+
+
+bool IC::IsNameCompatibleWithPrototypeFailure(Handle<Object> name) {
+  if (target()->is_keyed_stub()) {
+    // Determine whether the failure is due to a name failure.
+    if (!name->IsName()) return false;
+    Name* stub_name = target()->FindFirstName();
+    if (*name != stub_name) return false;
+  }
+
+  return true;
+}
+
+
+void IC::UpdateState(Handle<Object> receiver, Handle<Object> name) {
+  update_receiver_type(receiver);
+  if (!name->IsString()) return;
+  if (state() != MONOMORPHIC && state() != POLYMORPHIC) return;
+  if (receiver->IsUndefined() || receiver->IsNull()) return;
+
+  // Remove the target from the code cache if it became invalid
+  // because of changes in the prototype chain to avoid hitting it
+  // again.
+  if (TryRemoveInvalidPrototypeDependentStub(receiver,
+                                             Handle<String>::cast(name))) {
+    MarkPrototypeFailure(name);
+    return;
+  }
+
+  // The builtins object is special.  It only changes when JavaScript
+  // builtins are loaded lazily.  It is important to keep inline
+  // caches for the builtins object monomorphic.  Therefore, if we get
+  // an inline cache miss for the builtins object after lazily loading
+  // JavaScript builtins, we return uninitialized as the state to
+  // force the inline cache back to monomorphic state.
+  if (receiver->IsJSBuiltinsObject()) state_ = UNINITIALIZED;
+}
+
+
+MaybeHandle<Object> IC::TypeError(const char* type, Handle<Object> object,
+                                  Handle<Object> key) {
+  HandleScope scope(isolate());
+  Handle<Object> args[2] = {key, object};
+  THROW_NEW_ERROR(isolate(), NewTypeError(type, HandleVector(args, 2)), Object);
+}
+
+
+MaybeHandle<Object> IC::ReferenceError(const char* type, Handle<Name> name) {
+  HandleScope scope(isolate());
+  THROW_NEW_ERROR(isolate(), NewReferenceError(type, HandleVector(&name, 1)),
+                  Object);
+}
+
+
+static void ComputeTypeInfoCountDelta(IC::State old_state, IC::State new_state,
+                                      int* polymorphic_delta,
+                                      int* generic_delta) {
+  switch (old_state) {
+    case UNINITIALIZED:
+    case PREMONOMORPHIC:
+      if (new_state == UNINITIALIZED || new_state == PREMONOMORPHIC) break;
+      if (new_state == MONOMORPHIC || new_state == POLYMORPHIC) {
+        *polymorphic_delta = 1;
+      } else if (new_state == MEGAMORPHIC || new_state == GENERIC) {
+        *generic_delta = 1;
+      }
+      break;
+    case MONOMORPHIC:
+    case POLYMORPHIC:
+      if (new_state == MONOMORPHIC || new_state == POLYMORPHIC) break;
+      *polymorphic_delta = -1;
+      if (new_state == MEGAMORPHIC || new_state == GENERIC) {
+        *generic_delta = 1;
+      }
+      break;
+    case MEGAMORPHIC:
+    case GENERIC:
+      if (new_state == MEGAMORPHIC || new_state == GENERIC) break;
+      *generic_delta = -1;
+      if (new_state == MONOMORPHIC || new_state == POLYMORPHIC) {
+        *polymorphic_delta = 1;
+      }
+      break;
+    case PROTOTYPE_FAILURE:
+    case DEBUG_STUB:
+    case DEFAULT:
+      UNREACHABLE();
+  }
+}
+
+
+void IC::OnTypeFeedbackChanged(Isolate* isolate, Address address,
+                               State old_state, State new_state,
+                               bool target_remains_ic_stub) {
+  Code* host =
+      isolate->inner_pointer_to_code_cache()->GetCacheEntry(address)->code;
+  if (host->kind() != Code::FUNCTION) return;
+
+  if (FLAG_type_info_threshold > 0 && target_remains_ic_stub &&
+      // Not all Code objects have TypeFeedbackInfo.
+      host->type_feedback_info()->IsTypeFeedbackInfo()) {
+    int polymorphic_delta = 0;  // "Polymorphic" here includes monomorphic.
+    int generic_delta = 0;      // "Generic" here includes megamorphic.
+    ComputeTypeInfoCountDelta(old_state, new_state, &polymorphic_delta,
+                              &generic_delta);
+    TypeFeedbackInfo* info = TypeFeedbackInfo::cast(host->type_feedback_info());
+    info->change_ic_with_type_info_count(polymorphic_delta);
+    info->change_ic_generic_count(generic_delta);
+  }
+  if (host->type_feedback_info()->IsTypeFeedbackInfo()) {
+    TypeFeedbackInfo* info = TypeFeedbackInfo::cast(host->type_feedback_info());
+    info->change_own_type_change_checksum();
+  }
+  host->set_profiler_ticks(0);
+  isolate->runtime_profiler()->NotifyICChanged();
+  // TODO(2029): When an optimized function is patched, it would
+  // be nice to propagate the corresponding type information to its
+  // unoptimized version for the benefit of later inlining.
+}
+
+
+void IC::PostPatching(Address address, Code* target, Code* old_target) {
+  // Type vector based ICs update these statistics at a different time because
+  // they don't always patch on state change.
+  if (target->kind() == Code::CALL_IC) return;
+
+  Isolate* isolate = target->GetHeap()->isolate();
+  State old_state = UNINITIALIZED;
+  State new_state = UNINITIALIZED;
+  bool target_remains_ic_stub = false;
+  if (old_target->is_inline_cache_stub() && target->is_inline_cache_stub()) {
+    old_state = old_target->ic_state();
+    new_state = target->ic_state();
+    target_remains_ic_stub = true;
+  }
+
+  OnTypeFeedbackChanged(isolate, address, old_state, new_state,
+                        target_remains_ic_stub);
+}
+
+
+void IC::RegisterWeakMapDependency(Handle<Code> stub) {
+  if (FLAG_collect_maps && FLAG_weak_embedded_maps_in_ic &&
+      stub->CanBeWeakStub()) {
+    DCHECK(!stub->is_weak_stub());
+    MapHandleList maps;
+    stub->FindAllMaps(&maps);
+    if (maps.length() == 1 && stub->IsWeakObjectInIC(*maps.at(0))) {
+      Map::AddDependentIC(maps.at(0), stub);
+      stub->mark_as_weak_stub();
+      if (FLAG_enable_ool_constant_pool) {
+        stub->constant_pool()->set_weak_object_state(
+            ConstantPoolArray::WEAK_OBJECTS_IN_IC);
+      }
+    }
+  }
+}
+
+
+void IC::InvalidateMaps(Code* stub) {
+  DCHECK(stub->is_weak_stub());
+  stub->mark_as_invalidated_weak_stub();
+  Isolate* isolate = stub->GetIsolate();
+  Heap* heap = isolate->heap();
+  Object* undefined = heap->undefined_value();
+  int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
+  for (RelocIterator it(stub, mode_mask); !it.done(); it.next()) {
+    RelocInfo::Mode mode = it.rinfo()->rmode();
+    if (mode == RelocInfo::EMBEDDED_OBJECT &&
+        it.rinfo()->target_object()->IsMap()) {
+      it.rinfo()->set_target_object(undefined, SKIP_WRITE_BARRIER);
+    }
+  }
+  CpuFeatures::FlushICache(stub->instruction_start(), stub->instruction_size());
+}
+
+
+void IC::Clear(Isolate* isolate, Address address,
+               ConstantPoolArray* constant_pool) {
+  Code* target = GetTargetAtAddress(address, constant_pool);
+
+  // Don't clear debug break inline cache as it will remove the break point.
+  if (target->is_debug_stub()) return;
+
+  switch (target->kind()) {
+    case Code::LOAD_IC:
+      return LoadIC::Clear(isolate, address, target, constant_pool);
+    case Code::KEYED_LOAD_IC:
+      return KeyedLoadIC::Clear(isolate, address, target, constant_pool);
+    case Code::STORE_IC:
+      return StoreIC::Clear(isolate, address, target, constant_pool);
+    case Code::KEYED_STORE_IC:
+      return KeyedStoreIC::Clear(isolate, address, target, constant_pool);
+    case Code::CALL_IC:
+      return CallIC::Clear(isolate, address, target, constant_pool);
+    case Code::COMPARE_IC:
+      return CompareIC::Clear(isolate, address, target, constant_pool);
+    case Code::COMPARE_NIL_IC:
+      return CompareNilIC::Clear(address, target, constant_pool);
+    case Code::BINARY_OP_IC:
+    case Code::TO_BOOLEAN_IC:
+      // Clearing these is tricky and does not
+      // make any performance difference.
+      return;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void KeyedLoadIC::Clear(Isolate* isolate, Address address, Code* target,
+                        ConstantPoolArray* constant_pool) {
+  if (IsCleared(target)) return;
+  // Make sure to also clear the map used in inline fast cases.  If we
+  // do not clear these maps, cached code can keep objects alive
+  // through the embedded maps.
+  SetTargetAtAddress(address, *pre_monomorphic_stub(isolate), constant_pool);
+}
+
+
+void CallIC::Clear(Isolate* isolate, Address address, Code* target,
+                   ConstantPoolArray* constant_pool) {
+  // Currently, CallIC doesn't have state changes.
+}
+
+
+void LoadIC::Clear(Isolate* isolate, Address address, Code* target,
+                   ConstantPoolArray* constant_pool) {
+  if (IsCleared(target)) return;
+  Code* code = PropertyICCompiler::FindPreMonomorphic(isolate, Code::LOAD_IC,
+                                                      target->extra_ic_state());
+  SetTargetAtAddress(address, code, constant_pool);
+}
+
+
+void StoreIC::Clear(Isolate* isolate, Address address, Code* target,
+                    ConstantPoolArray* constant_pool) {
+  if (IsCleared(target)) return;
+  Code* code = PropertyICCompiler::FindPreMonomorphic(isolate, Code::STORE_IC,
+                                                      target->extra_ic_state());
+  SetTargetAtAddress(address, code, constant_pool);
+}
+
+
+void KeyedStoreIC::Clear(Isolate* isolate, Address address, Code* target,
+                         ConstantPoolArray* constant_pool) {
+  if (IsCleared(target)) return;
+  SetTargetAtAddress(
+      address, *pre_monomorphic_stub(
+                   isolate, StoreIC::GetStrictMode(target->extra_ic_state())),
+      constant_pool);
+}
+
+
+void CompareIC::Clear(Isolate* isolate, Address address, Code* target,
+                      ConstantPoolArray* constant_pool) {
+  DCHECK(CodeStub::GetMajorKey(target) == CodeStub::CompareIC);
+  CompareICStub stub(target->stub_key(), isolate);
+  // Only clear CompareICs that can retain objects.
+  if (stub.state() != CompareICState::KNOWN_OBJECT) return;
+  SetTargetAtAddress(address, GetRawUninitialized(isolate, stub.op()),
+                     constant_pool);
+  PatchInlinedSmiCode(address, DISABLE_INLINED_SMI_CHECK);
+}
+
+
+// static
+Handle<Code> KeyedLoadIC::generic_stub(Isolate* isolate) {
+  if (FLAG_compiled_keyed_generic_loads) {
+    return KeyedLoadGenericStub(isolate).GetCode();
+  } else {
+    return isolate->builtins()->KeyedLoadIC_Generic();
+  }
+}
+
+
+static bool MigrateDeprecated(Handle<Object> object) {
+  if (!object->IsJSObject()) return false;
+  Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+  if (!receiver->map()->is_deprecated()) return false;
+  JSObject::MigrateInstance(Handle<JSObject>::cast(object));
+  return true;
+}
+
+
+MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name) {
+  // If the object is undefined or null it's illegal to try to get any
+  // of its properties; throw a TypeError in that case.
+  if (object->IsUndefined() || object->IsNull()) {
+    return TypeError("non_object_property_load", object, name);
+  }
+
+  // Check if the name is trivially convertible to an index and get
+  // the element or char if so.
+  uint32_t index;
+  if (kind() == Code::KEYED_LOAD_IC && name->AsArrayIndex(&index)) {
+    // Rewrite to the generic keyed load stub.
+    if (FLAG_use_ic) {
+      set_target(*KeyedLoadIC::generic_stub(isolate()));
+      TRACE_IC("LoadIC", name);
+      TRACE_GENERIC_IC(isolate(), "LoadIC", "name as array index");
+    }
+    Handle<Object> result;
+    ASSIGN_RETURN_ON_EXCEPTION(
+        isolate(), result,
+        Runtime::GetElementOrCharAt(isolate(), object, index), Object);
+    return result;
+  }
+
+  bool use_ic = MigrateDeprecated(object) ? false : FLAG_use_ic;
+
+  // Named lookup in the object.
+  LookupIterator it(object, name);
+  LookupForRead(&it);
+
+  if (it.IsFound() || !IsUndeclaredGlobal(object)) {
+    // Update inline cache and stub cache.
+    if (use_ic) UpdateCaches(&it);
+
+    // Get the property.
+    Handle<Object> result;
+    ASSIGN_RETURN_ON_EXCEPTION(isolate(), result, Object::GetProperty(&it),
+                               Object);
+    if (it.IsFound()) {
+      return result;
+    } else if (!IsUndeclaredGlobal(object)) {
+      LOG(isolate(), SuspectReadEvent(*name, *object));
+      return result;
+    }
+  }
+  return ReferenceError("not_defined", name);
+}
+
+
+static bool AddOneReceiverMapIfMissing(MapHandleList* receiver_maps,
+                                       Handle<Map> new_receiver_map) {
+  DCHECK(!new_receiver_map.is_null());
+  for (int current = 0; current < receiver_maps->length(); ++current) {
+    if (!receiver_maps->at(current).is_null() &&
+        receiver_maps->at(current).is_identical_to(new_receiver_map)) {
+      return false;
+    }
+  }
+  receiver_maps->Add(new_receiver_map);
+  return true;
+}
+
+
+bool IC::UpdatePolymorphicIC(Handle<Name> name, Handle<Code> code) {
+  if (!code->is_handler()) return false;
+  if (target()->is_keyed_stub() && state() != PROTOTYPE_FAILURE) return false;
+  Handle<HeapType> type = receiver_type();
+  TypeHandleList types;
+  CodeHandleList handlers;
+
+  TargetTypes(&types);
+  int number_of_types = types.length();
+  int deprecated_types = 0;
+  int handler_to_overwrite = -1;
+
+  for (int i = 0; i < number_of_types; i++) {
+    Handle<HeapType> current_type = types.at(i);
+    if (current_type->IsClass() &&
+        current_type->AsClass()->Map()->is_deprecated()) {
+      // Filter out deprecated maps to ensure their instances get migrated.
+      ++deprecated_types;
+    } else if (type->NowIs(current_type)) {
+      // If the receiver type is already in the polymorphic IC, this indicates
+      // there was a prototoype chain failure. In that case, just overwrite the
+      // handler.
+      handler_to_overwrite = i;
+    } else if (handler_to_overwrite == -1 && current_type->IsClass() &&
+               type->IsClass() &&
+               IsTransitionOfMonomorphicTarget(*current_type->AsClass()->Map(),
+                                               *type->AsClass()->Map())) {
+      handler_to_overwrite = i;
+    }
+  }
+
+  int number_of_valid_types =
+      number_of_types - deprecated_types - (handler_to_overwrite != -1);
+
+  if (number_of_valid_types >= 4) return false;
+  if (number_of_types == 0) return false;
+  if (!target()->FindHandlers(&handlers, types.length())) return false;
+
+  number_of_valid_types++;
+  if (number_of_valid_types > 1 && target()->is_keyed_stub()) return false;
+  Handle<Code> ic;
+  if (number_of_valid_types == 1) {
+    ic = PropertyICCompiler::ComputeMonomorphic(kind(), name, type, code,
+                                                extra_ic_state());
+  } else {
+    if (handler_to_overwrite >= 0) {
+      handlers.Set(handler_to_overwrite, code);
+      if (!type->NowIs(types.at(handler_to_overwrite))) {
+        types.Set(handler_to_overwrite, type);
+      }
+    } else {
+      types.Add(type);
+      handlers.Add(code);
+    }
+    ic = PropertyICCompiler::ComputePolymorphic(kind(), &types, &handlers,
+                                                number_of_valid_types, name,
+                                                extra_ic_state());
+  }
+  set_target(*ic);
+  return true;
+}
+
+
+Handle<HeapType> IC::CurrentTypeOf(Handle<Object> object, Isolate* isolate) {
+  return object->IsJSGlobalObject()
+             ? HeapType::Constant(Handle<JSGlobalObject>::cast(object), isolate)
+             : HeapType::NowOf(object, isolate);
+}
+
+
+Handle<Map> IC::TypeToMap(HeapType* type, Isolate* isolate) {
+  if (type->Is(HeapType::Number()))
+    return isolate->factory()->heap_number_map();
+  if (type->Is(HeapType::Boolean())) return isolate->factory()->boolean_map();
+  if (type->IsConstant()) {
+    return handle(
+        Handle<JSGlobalObject>::cast(type->AsConstant()->Value())->map());
+  }
+  DCHECK(type->IsClass());
+  return type->AsClass()->Map();
+}
+
+
+template <class T>
+typename T::TypeHandle IC::MapToType(Handle<Map> map,
+                                     typename T::Region* region) {
+  if (map->instance_type() == HEAP_NUMBER_TYPE) {
+    return T::Number(region);
+  } else if (map->instance_type() == ODDBALL_TYPE) {
+    // The only oddballs that can be recorded in ICs are booleans.
+    return T::Boolean(region);
+  } else {
+    return T::Class(map, region);
+  }
+}
+
+
+template Type* IC::MapToType<Type>(Handle<Map> map, Zone* zone);
+
+
+template Handle<HeapType> IC::MapToType<HeapType>(Handle<Map> map,
+                                                  Isolate* region);
+
+
+void IC::UpdateMonomorphicIC(Handle<Code> handler, Handle<Name> name) {
+  DCHECK(handler->is_handler());
+  Handle<Code> ic = PropertyICCompiler::ComputeMonomorphic(
+      kind(), name, receiver_type(), handler, extra_ic_state());
+  set_target(*ic);
+}
+
+
+void IC::CopyICToMegamorphicCache(Handle<Name> name) {
+  TypeHandleList types;
+  CodeHandleList handlers;
+  TargetTypes(&types);
+  if (!target()->FindHandlers(&handlers, types.length())) return;
+  for (int i = 0; i < types.length(); i++) {
+    UpdateMegamorphicCache(*types.at(i), *name, *handlers.at(i));
+  }
+}
+
+
+bool IC::IsTransitionOfMonomorphicTarget(Map* source_map, Map* target_map) {
+  if (source_map == NULL) return true;
+  if (target_map == NULL) return false;
+  ElementsKind target_elements_kind = target_map->elements_kind();
+  bool more_general_transition = IsMoreGeneralElementsKindTransition(
+      source_map->elements_kind(), target_elements_kind);
+  Map* transitioned_map =
+      more_general_transition
+          ? source_map->LookupElementsTransitionMap(target_elements_kind)
+          : NULL;
+
+  return transitioned_map == target_map;
+}
+
+
+void IC::PatchCache(Handle<Name> name, Handle<Code> code) {
+  switch (state()) {
+    case UNINITIALIZED:
+    case PREMONOMORPHIC:
+      UpdateMonomorphicIC(code, name);
+      break;
+    case PROTOTYPE_FAILURE:
+    case MONOMORPHIC:
+    case POLYMORPHIC:
+      if (!target()->is_keyed_stub() || state() == PROTOTYPE_FAILURE) {
+        if (UpdatePolymorphicIC(name, code)) break;
+        CopyICToMegamorphicCache(name);
+      }
+      set_target(*megamorphic_stub());
+    // Fall through.
+    case MEGAMORPHIC:
+      UpdateMegamorphicCache(*receiver_type(), *name, *code);
+      break;
+    case DEBUG_STUB:
+      break;
+    case DEFAULT:
+    case GENERIC:
+      UNREACHABLE();
+      break;
+  }
+}
+
+
+Handle<Code> LoadIC::initialize_stub(Isolate* isolate,
+                                     ExtraICState extra_state) {
+  return PropertyICCompiler::ComputeLoad(isolate, UNINITIALIZED, extra_state);
+}
+
+
+Handle<Code> LoadIC::megamorphic_stub() {
+  if (kind() == Code::LOAD_IC) {
+    MegamorphicLoadStub stub(isolate(), LoadICState(extra_ic_state()));
+    return stub.GetCode();
+  } else {
+    DCHECK_EQ(Code::KEYED_LOAD_IC, kind());
+    return KeyedLoadIC::generic_stub(isolate());
+  }
+}
+
+
+Handle<Code> LoadIC::pre_monomorphic_stub(Isolate* isolate,
+                                          ExtraICState extra_state) {
+  return PropertyICCompiler::ComputeLoad(isolate, PREMONOMORPHIC, extra_state);
+}
+
+
+Handle<Code> KeyedLoadIC::pre_monomorphic_stub(Isolate* isolate) {
+  return isolate->builtins()->KeyedLoadIC_PreMonomorphic();
+}
+
+
+Handle<Code> LoadIC::pre_monomorphic_stub() const {
+  if (kind() == Code::LOAD_IC) {
+    return LoadIC::pre_monomorphic_stub(isolate(), extra_ic_state());
+  } else {
+    DCHECK_EQ(Code::KEYED_LOAD_IC, kind());
+    return KeyedLoadIC::pre_monomorphic_stub(isolate());
+  }
+}
+
+
+Handle<Code> LoadIC::SimpleFieldLoad(FieldIndex index) {
+  LoadFieldStub stub(isolate(), index);
+  return stub.GetCode();
+}
+
+
+void LoadIC::UpdateCaches(LookupIterator* lookup) {
+  if (state() == UNINITIALIZED) {
+    // This is the first time we execute this inline cache. Set the target to
+    // the pre monomorphic stub to delay setting the monomorphic state.
+    set_target(*pre_monomorphic_stub());
+    TRACE_IC("LoadIC", lookup->name());
+    return;
+  }
+
+  Handle<Code> code;
+  if (lookup->state() == LookupIterator::JSPROXY ||
+      lookup->state() == LookupIterator::ACCESS_CHECK) {
+    code = slow_stub();
+  } else if (!lookup->IsFound()) {
+    if (kind() == Code::LOAD_IC) {
+      code = NamedLoadHandlerCompiler::ComputeLoadNonexistent(lookup->name(),
+                                                              receiver_type());
+      // TODO(jkummerow/verwaest): Introduce a builtin that handles this case.
+      if (code.is_null()) code = slow_stub();
+    } else {
+      code = slow_stub();
+    }
+  } else {
+    code = ComputeHandler(lookup);
+  }
+
+  PatchCache(lookup->name(), code);
+  TRACE_IC("LoadIC", lookup->name());
+}
+
+
+void IC::UpdateMegamorphicCache(HeapType* type, Name* name, Code* code) {
+  if (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC) return;
+  Map* map = *TypeToMap(type, isolate());
+  isolate()->stub_cache()->Set(name, map, code);
+}
+
+
+Handle<Code> IC::ComputeHandler(LookupIterator* lookup, Handle<Object> value) {
+  bool receiver_is_holder =
+      lookup->GetReceiver().is_identical_to(lookup->GetHolder<JSObject>());
+  CacheHolderFlag flag;
+  Handle<Map> stub_holder_map = IC::GetHandlerCacheHolder(
+      *receiver_type(), receiver_is_holder, isolate(), &flag);
+
+  Handle<Code> code = PropertyHandlerCompiler::Find(
+      lookup->name(), stub_holder_map, kind(), flag,
+      lookup->is_dictionary_holder() ? Code::NORMAL : Code::FAST);
+  // Use the cached value if it exists, and if it is different from the
+  // handler that just missed.
+  if (!code.is_null()) {
+    if (!maybe_handler_.is_null() &&
+        !maybe_handler_.ToHandleChecked().is_identical_to(code)) {
+      return code;
+    }
+    if (maybe_handler_.is_null()) {
+      // maybe_handler_ is only populated for MONOMORPHIC and POLYMORPHIC ICs.
+      // In MEGAMORPHIC case, check if the handler in the megamorphic stub
+      // cache (which just missed) is different from the cached handler.
+      if (state() == MEGAMORPHIC && lookup->GetReceiver()->IsHeapObject()) {
+        Map* map = Handle<HeapObject>::cast(lookup->GetReceiver())->map();
+        Code* megamorphic_cached_code =
+            isolate()->stub_cache()->Get(*lookup->name(), map, code->flags());
+        if (megamorphic_cached_code != *code) return code;
+      } else {
+        return code;
+      }
+    }
+  }
+
+  code = CompileHandler(lookup, value, flag);
+  DCHECK(code->is_handler());
+
+  // TODO(mvstanton): we'd only like to cache code on the map when it's custom
+  // code compiled for this map, otherwise it's already cached in the global
+  // code
+  // cache. We are also guarding against installing code with flags that don't
+  // match the desired CacheHolderFlag computed above, which would lead to
+  // invalid lookups later.
+  if (code->type() != Code::NORMAL &&
+      Code::ExtractCacheHolderFromFlags(code->flags()) == flag) {
+    Map::UpdateCodeCache(stub_holder_map, lookup->name(), code);
+  }
+
+  return code;
+}
+
+
+Handle<Code> LoadIC::CompileHandler(LookupIterator* lookup,
+                                    Handle<Object> unused,
+                                    CacheHolderFlag cache_holder) {
+  Handle<Object> receiver = lookup->GetReceiver();
+  if (receiver->IsString() &&
+      Name::Equals(isolate()->factory()->length_string(), lookup->name())) {
+    FieldIndex index = FieldIndex::ForInObjectOffset(String::kLengthOffset);
+    return SimpleFieldLoad(index);
+  }
+
+  if (receiver->IsStringWrapper() &&
+      Name::Equals(isolate()->factory()->length_string(), lookup->name())) {
+    StringLengthStub string_length_stub(isolate());
+    return string_length_stub.GetCode();
+  }
+
+  // Use specialized code for getting prototype of functions.
+  if (receiver->IsJSFunction() &&
+      Name::Equals(isolate()->factory()->prototype_string(), lookup->name()) &&
+      Handle<JSFunction>::cast(receiver)->should_have_prototype() &&
+      !Handle<JSFunction>::cast(receiver)
+           ->map()
+           ->has_non_instance_prototype()) {
+    Handle<Code> stub;
+    FunctionPrototypeStub function_prototype_stub(isolate());
+    return function_prototype_stub.GetCode();
+  }
+
+  Handle<HeapType> type = receiver_type();
+  Handle<JSObject> holder = lookup->GetHolder<JSObject>();
+  bool receiver_is_holder = receiver.is_identical_to(holder);
+  switch (lookup->state()) {
+    case LookupIterator::INTERCEPTOR: {
+      DCHECK(!holder->GetNamedInterceptor()->getter()->IsUndefined());
+      NamedLoadHandlerCompiler compiler(isolate(), receiver_type(), holder,
+                                        cache_holder);
+      // Perform a lookup behind the interceptor. Copy the LookupIterator since
+      // the original iterator will be used to fetch the value.
+      LookupIterator it = *lookup;
+      it.Next();
+      LookupForRead(&it);
+      return compiler.CompileLoadInterceptor(&it);
+    }
+
+    case LookupIterator::ACCESSOR: {
+      // Use simple field loads for some well-known callback properties.
+      if (receiver_is_holder) {
+        DCHECK(receiver->IsJSObject());
+        Handle<JSObject> js_receiver = Handle<JSObject>::cast(receiver);
+        int object_offset;
+        if (Accessors::IsJSObjectFieldAccessor<HeapType>(type, lookup->name(),
+                                                         &object_offset)) {
+          FieldIndex index =
+              FieldIndex::ForInObjectOffset(object_offset, js_receiver->map());
+          return SimpleFieldLoad(index);
+        }
+      }
+
+      Handle<Object> accessors = lookup->GetAccessors();
+      if (accessors->IsExecutableAccessorInfo()) {
+        Handle<ExecutableAccessorInfo> info =
+            Handle<ExecutableAccessorInfo>::cast(accessors);
+        if (v8::ToCData<Address>(info->getter()) == 0) break;
+        if (!ExecutableAccessorInfo::IsCompatibleReceiverType(isolate(), info,
+                                                              type)) {
+          break;
+        }
+        if (!holder->HasFastProperties()) break;
+        NamedLoadHandlerCompiler compiler(isolate(), receiver_type(), holder,
+                                          cache_holder);
+        return compiler.CompileLoadCallback(lookup->name(), info);
+      }
+      if (accessors->IsAccessorPair()) {
+        Handle<Object> getter(Handle<AccessorPair>::cast(accessors)->getter(),
+                              isolate());
+        if (!getter->IsJSFunction()) break;
+        if (!holder->HasFastProperties()) break;
+        Handle<JSFunction> function = Handle<JSFunction>::cast(getter);
+        if (!receiver->IsJSObject() && !function->IsBuiltin() &&
+            function->shared()->strict_mode() == SLOPPY) {
+          // Calling sloppy non-builtins with a value as the receiver
+          // requires boxing.
+          break;
+        }
+        CallOptimization call_optimization(function);
+        NamedLoadHandlerCompiler compiler(isolate(), receiver_type(), holder,
+                                          cache_holder);
+        if (call_optimization.is_simple_api_call() &&
+            call_optimization.IsCompatibleReceiver(receiver, holder)) {
+          return compiler.CompileLoadCallback(lookup->name(),
+                                              call_optimization);
+        }
+        return compiler.CompileLoadViaGetter(lookup->name(), function);
+      }
+      // TODO(dcarney): Handle correctly.
+      DCHECK(accessors->IsDeclaredAccessorInfo());
+      break;
+    }
+
+    case LookupIterator::DATA: {
+      if (lookup->is_dictionary_holder()) {
+        if (kind() != Code::LOAD_IC) break;
+        if (holder->IsGlobalObject()) {
+          NamedLoadHandlerCompiler compiler(isolate(), receiver_type(), holder,
+                                            cache_holder);
+          Handle<PropertyCell> cell = lookup->GetPropertyCell();
+          Handle<Code> code = compiler.CompileLoadGlobal(
+              cell, lookup->name(), lookup->IsConfigurable());
+          // TODO(verwaest): Move caching of these NORMAL stubs outside as well.
+          CacheHolderFlag flag;
+          Handle<Map> stub_holder_map = GetHandlerCacheHolder(
+              *type, receiver_is_holder, isolate(), &flag);
+          Map::UpdateCodeCache(stub_holder_map, lookup->name(), code);
+          return code;
+        }
+        // There is only one shared stub for loading normalized
+        // properties. It does not traverse the prototype chain, so the
+        // property must be found in the object for the stub to be
+        // applicable.
+        if (!receiver_is_holder) break;
+        return isolate()->builtins()->LoadIC_Normal();
+      }
+
+      // -------------- Fields --------------
+      if (lookup->property_details().type() == FIELD) {
+        FieldIndex field = lookup->GetFieldIndex();
+        if (receiver_is_holder) {
+          return SimpleFieldLoad(field);
+        }
+        NamedLoadHandlerCompiler compiler(isolate(), receiver_type(), holder,
+                                          cache_holder);
+        return compiler.CompileLoadField(lookup->name(), field);
+      }
+
+      // -------------- Constant properties --------------
+      DCHECK(lookup->property_details().type() == CONSTANT);
+      if (receiver_is_holder) {
+        LoadConstantStub stub(isolate(), lookup->GetConstantIndex());
+        return stub.GetCode();
+      }
+      NamedLoadHandlerCompiler compiler(isolate(), receiver_type(), holder,
+                                        cache_holder);
+      return compiler.CompileLoadConstant(lookup->name(),
+                                          lookup->GetConstantIndex());
+    }
+
+    case LookupIterator::ACCESS_CHECK:
+    case LookupIterator::JSPROXY:
+    case LookupIterator::NOT_FOUND:
+    case LookupIterator::TRANSITION:
+      UNREACHABLE();
+  }
+
+  return slow_stub();
+}
+
+
+static Handle<Object> TryConvertKey(Handle<Object> key, Isolate* isolate) {
+  // This helper implements a few common fast cases for converting
+  // non-smi keys of keyed loads/stores to a smi or a string.
+  if (key->IsHeapNumber()) {
+    double value = Handle<HeapNumber>::cast(key)->value();
+    if (std::isnan(value)) {
+      key = isolate->factory()->nan_string();
+    } else {
+      int int_value = FastD2I(value);
+      if (value == int_value && Smi::IsValid(int_value)) {
+        key = Handle<Smi>(Smi::FromInt(int_value), isolate);
+      }
+    }
+  } else if (key->IsUndefined()) {
+    key = isolate->factory()->undefined_string();
+  }
+  return key;
+}
+
+
+Handle<Code> KeyedLoadIC::LoadElementStub(Handle<JSObject> receiver) {
+  Handle<Map> receiver_map(receiver->map(), isolate());
+  MapHandleList target_receiver_maps;
+  if (target().is_identical_to(string_stub())) {
+    target_receiver_maps.Add(isolate()->factory()->string_map());
+  } else {
+    TargetMaps(&target_receiver_maps);
+  }
+  if (target_receiver_maps.length() == 0) {
+    return PropertyICCompiler::ComputeKeyedLoadMonomorphic(receiver_map);
+  }
+
+  // The first time a receiver is seen that is a transitioned version of the
+  // previous monomorphic receiver type, assume the new ElementsKind is the
+  // monomorphic type. This benefits global arrays that only transition
+  // once, and all call sites accessing them are faster if they remain
+  // monomorphic. If this optimistic assumption is not true, the IC will
+  // miss again and it will become polymorphic and support both the
+  // untransitioned and transitioned maps.
+  if (state() == MONOMORPHIC && IsMoreGeneralElementsKindTransition(
+                                    target_receiver_maps.at(0)->elements_kind(),
+                                    receiver->GetElementsKind())) {
+    return PropertyICCompiler::ComputeKeyedLoadMonomorphic(receiver_map);
+  }
+
+  DCHECK(state() != GENERIC);
+
+  // Determine the list of receiver maps that this call site has seen,
+  // adding the map that was just encountered.
+  if (!AddOneReceiverMapIfMissing(&target_receiver_maps, receiver_map)) {
+    // If the miss wasn't due to an unseen map, a polymorphic stub
+    // won't help, use the generic stub.
+    TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "same map added twice");
+    return generic_stub();
+  }
+
+  // If the maximum number of receiver maps has been exceeded, use the generic
+  // version of the IC.
+  if (target_receiver_maps.length() > kMaxKeyedPolymorphism) {
+    TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "max polymorph exceeded");
+    return generic_stub();
+  }
+
+  return PropertyICCompiler::ComputeKeyedLoadPolymorphic(&target_receiver_maps);
+}
+
+
+MaybeHandle<Object> KeyedLoadIC::Load(Handle<Object> object,
+                                      Handle<Object> key) {
+  if (MigrateDeprecated(object)) {
+    Handle<Object> result;
+    ASSIGN_RETURN_ON_EXCEPTION(
+        isolate(), result, Runtime::GetObjectProperty(isolate(), object, key),
+        Object);
+    return result;
+  }
+
+  Handle<Object> load_handle;
+  Handle<Code> stub = generic_stub();
+
+  // Check for non-string values that can be converted into an
+  // internalized string directly or is representable as a smi.
+  key = TryConvertKey(key, isolate());
+
+  if (key->IsInternalizedString() || key->IsSymbol()) {
+    ASSIGN_RETURN_ON_EXCEPTION(isolate(), load_handle,
+                               LoadIC::Load(object, Handle<Name>::cast(key)),
+                               Object);
+  } else if (FLAG_use_ic && !object->IsAccessCheckNeeded()) {
+    if (object->IsString() && key->IsNumber()) {
+      if (state() == UNINITIALIZED) stub = string_stub();
+    } else if (object->IsJSObject()) {
+      Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+      if (!Object::ToSmi(isolate(), key).is_null()) {
+        stub = LoadElementStub(receiver);
+      }
+    }
+  }
+
+  if (!is_target_set()) {
+    Code* generic = *generic_stub();
+    if (*stub == generic) {
+      TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "set generic");
+    }
+    set_target(*stub);
+    TRACE_IC("LoadIC", key);
+  }
+
+  if (!load_handle.is_null()) return load_handle;
+  Handle<Object> result;
+  ASSIGN_RETURN_ON_EXCEPTION(isolate(), result,
+                             Runtime::GetObjectProperty(isolate(), object, key),
+                             Object);
+  return result;
+}
+
+
+bool StoreIC::LookupForWrite(LookupIterator* it, Handle<Object> value,
+                             JSReceiver::StoreFromKeyed store_mode) {
+  // Disable ICs for non-JSObjects for now.
+  Handle<Object> receiver = it->GetReceiver();
+  if (!receiver->IsJSObject()) return false;
+  DCHECK(!Handle<JSObject>::cast(receiver)->map()->is_deprecated());
+
+  for (; it->IsFound(); it->Next()) {
+    switch (it->state()) {
+      case LookupIterator::NOT_FOUND:
+      case LookupIterator::TRANSITION:
+        UNREACHABLE();
+      case LookupIterator::JSPROXY:
+        return false;
+      case LookupIterator::INTERCEPTOR: {
+        Handle<JSObject> holder = it->GetHolder<JSObject>();
+        InterceptorInfo* info = holder->GetNamedInterceptor();
+        if (it->HolderIsReceiverOrHiddenPrototype()) {
+          if (!info->setter()->IsUndefined()) return true;
+        } else if (!info->getter()->IsUndefined() ||
+                   !info->query()->IsUndefined()) {
+          return false;
+        }
+        break;
+      }
+      case LookupIterator::ACCESS_CHECK:
+        if (it->GetHolder<JSObject>()->IsAccessCheckNeeded()) return false;
+        break;
+      case LookupIterator::ACCESSOR:
+        return !it->IsReadOnly();
+      case LookupIterator::DATA: {
+        if (it->IsReadOnly()) return false;
+        Handle<JSObject> holder = it->GetHolder<JSObject>();
+        if (receiver.is_identical_to(holder)) {
+          it->PrepareForDataProperty(value);
+          // The previous receiver map might just have been deprecated,
+          // so reload it.
+          update_receiver_type(receiver);
+          return true;
+        }
+
+        // Receiver != holder.
+        PrototypeIterator iter(it->isolate(), receiver);
+        if (receiver->IsJSGlobalProxy()) {
+          return it->GetHolder<Object>().is_identical_to(
+              PrototypeIterator::GetCurrent(iter));
+        }
+
+        it->PrepareTransitionToDataProperty(value, NONE, store_mode);
+        return it->IsCacheableTransition();
+      }
+    }
+  }
+
+  it->PrepareTransitionToDataProperty(value, NONE, store_mode);
+  return it->IsCacheableTransition();
+}
+
+
+MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
+                                   Handle<Object> value,
+                                   JSReceiver::StoreFromKeyed store_mode) {
+  // TODO(verwaest): Let SetProperty do the migration, since storing a property
+  // might deprecate the current map again, if value does not fit.
+  if (MigrateDeprecated(object) || object->IsJSProxy()) {
+    Handle<Object> result;
+    ASSIGN_RETURN_ON_EXCEPTION(
+        isolate(), result,
+        Object::SetProperty(object, name, value, strict_mode()), Object);
+    return result;
+  }
+
+  // If the object is undefined or null it's illegal to try to set any
+  // properties on it; throw a TypeError in that case.
+  if (object->IsUndefined() || object->IsNull()) {
+    return TypeError("non_object_property_store", object, name);
+  }
+
+  // Check if the given name is an array index.
+  uint32_t index;
+  if (name->AsArrayIndex(&index)) {
+    // Ignore other stores where the receiver is not a JSObject.
+    // TODO(1475): Must check prototype chains of object wrappers.
+    if (!object->IsJSObject()) return value;
+    Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+
+    Handle<Object> result;
+    ASSIGN_RETURN_ON_EXCEPTION(
+        isolate(), result,
+        JSObject::SetElement(receiver, index, value, NONE, strict_mode()),
+        Object);
+    return value;
+  }
+
+  // Observed objects are always modified through the runtime.
+  if (object->IsHeapObject() &&
+      Handle<HeapObject>::cast(object)->map()->is_observed()) {
+    Handle<Object> result;
+    ASSIGN_RETURN_ON_EXCEPTION(
+        isolate(), result,
+        Object::SetProperty(object, name, value, strict_mode(), store_mode),
+        Object);
+    return result;
+  }
+
+  LookupIterator it(object, name);
+  if (FLAG_use_ic) UpdateCaches(&it, value, store_mode);
+
+  // Set the property.
+  Handle<Object> result;
+  ASSIGN_RETURN_ON_EXCEPTION(
+      isolate(), result,
+      Object::SetProperty(&it, value, strict_mode(), store_mode), Object);
+  return result;
+}
+
+
+Handle<Code> CallIC::initialize_stub(Isolate* isolate, int argc,
+                                     CallICState::CallType call_type) {
+  CallICStub stub(isolate, CallICState(argc, call_type));
+  Handle<Code> code = stub.GetCode();
+  return code;
+}
+
+
+Handle<Code> StoreIC::initialize_stub(Isolate* isolate,
+                                      StrictMode strict_mode) {
+  ExtraICState extra_state = ComputeExtraICState(strict_mode);
+  Handle<Code> ic =
+      PropertyICCompiler::ComputeStore(isolate, UNINITIALIZED, extra_state);
+  return ic;
+}
+
+
+Handle<Code> StoreIC::megamorphic_stub() {
+  if (kind() == Code::STORE_IC) {
+    return PropertyICCompiler::ComputeStore(isolate(), MEGAMORPHIC,
+                                            extra_ic_state());
+  } else {
+    DCHECK(kind() == Code::KEYED_STORE_IC);
+    if (strict_mode() == STRICT) {
+      return isolate()->builtins()->KeyedStoreIC_Generic_Strict();
+    } else {
+      return isolate()->builtins()->KeyedStoreIC_Generic();
+    }
+  }
+}
+
+
+Handle<Code> StoreIC::generic_stub() const {
+  if (kind() == Code::STORE_IC) {
+    return PropertyICCompiler::ComputeStore(isolate(), GENERIC,
+                                            extra_ic_state());
+  } else {
+    DCHECK(kind() == Code::KEYED_STORE_IC);
+    if (strict_mode() == STRICT) {
+      return isolate()->builtins()->KeyedStoreIC_Generic_Strict();
+    } else {
+      return isolate()->builtins()->KeyedStoreIC_Generic();
+    }
+  }
+}
+
+
+Handle<Code> StoreIC::slow_stub() const {
+  if (kind() == Code::STORE_IC) {
+    return isolate()->builtins()->StoreIC_Slow();
+  } else {
+    DCHECK(kind() == Code::KEYED_STORE_IC);
+    return isolate()->builtins()->KeyedStoreIC_Slow();
+  }
+}
+
+
+Handle<Code> StoreIC::pre_monomorphic_stub(Isolate* isolate,
+                                           StrictMode strict_mode) {
+  ExtraICState state = ComputeExtraICState(strict_mode);
+  return PropertyICCompiler::ComputeStore(isolate, PREMONOMORPHIC, state);
+}
+
+
+void StoreIC::UpdateCaches(LookupIterator* lookup, Handle<Object> value,
+                           JSReceiver::StoreFromKeyed store_mode) {
+  if (state() == UNINITIALIZED) {
+    // This is the first time we execute this inline cache. Set the target to
+    // the pre monomorphic stub to delay setting the monomorphic state.
+    set_target(*pre_monomorphic_stub());
+    TRACE_IC("StoreIC", lookup->name());
+    return;
+  }
+
+  bool use_ic = LookupForWrite(lookup, value, store_mode);
+  if (!use_ic) {
+    TRACE_GENERIC_IC(isolate(), "StoreIC", "LookupForWrite said 'false'");
+  }
+  Handle<Code> code = use_ic ? ComputeHandler(lookup, value) : slow_stub();
+
+  PatchCache(lookup->name(), code);
+  TRACE_IC("StoreIC", lookup->name());
+}
+
+
+Handle<Code> StoreIC::CompileHandler(LookupIterator* lookup,
+                                     Handle<Object> value,
+                                     CacheHolderFlag cache_holder) {
+  DCHECK_NE(LookupIterator::JSPROXY, lookup->state());
+
+  // This is currently guaranteed by checks in StoreIC::Store.
+  Handle<JSObject> receiver = Handle<JSObject>::cast(lookup->GetReceiver());
+  Handle<JSObject> holder = lookup->GetHolder<JSObject>();
+  DCHECK(!receiver->IsAccessCheckNeeded());
+
+  switch (lookup->state()) {
+    case LookupIterator::TRANSITION: {
+      Handle<Map> transition = lookup->transition_map();
+      // Currently not handled by CompileStoreTransition.
+      if (!holder->HasFastProperties()) {
+        TRACE_GENERIC_IC(isolate(), "StoreIC", "transition from slow");
+        break;
+      }
+
+      DCHECK(lookup->IsCacheableTransition());
+      NamedStoreHandlerCompiler compiler(isolate(), receiver_type(), holder);
+      return compiler.CompileStoreTransition(transition, lookup->name());
+    }
+
+    case LookupIterator::INTERCEPTOR: {
+      DCHECK(!holder->GetNamedInterceptor()->setter()->IsUndefined());
+      NamedStoreHandlerCompiler compiler(isolate(), receiver_type(), holder);
+      return compiler.CompileStoreInterceptor(lookup->name());
+    }
+
+    case LookupIterator::ACCESSOR: {
+      if (!holder->HasFastProperties()) {
+        TRACE_GENERIC_IC(isolate(), "StoreIC", "accessor on slow map");
+        break;
+      }
+      Handle<Object> accessors = lookup->GetAccessors();
+      if (accessors->IsExecutableAccessorInfo()) {
+        Handle<ExecutableAccessorInfo> info =
+            Handle<ExecutableAccessorInfo>::cast(accessors);
+        if (v8::ToCData<Address>(info->setter()) == 0) {
+          TRACE_GENERIC_IC(isolate(), "StoreIC", "setter == 0");
+          break;
+        }
+        if (!ExecutableAccessorInfo::IsCompatibleReceiverType(
+                isolate(), info, receiver_type())) {
+          TRACE_GENERIC_IC(isolate(), "StoreIC", "incompatible receiver type");
+          break;
+        }
+        NamedStoreHandlerCompiler compiler(isolate(), receiver_type(), holder);
+        return compiler.CompileStoreCallback(receiver, lookup->name(), info);
+      } else if (accessors->IsAccessorPair()) {
+        Handle<Object> setter(Handle<AccessorPair>::cast(accessors)->setter(),
+                              isolate());
+        if (!setter->IsJSFunction()) {
+          TRACE_GENERIC_IC(isolate(), "StoreIC", "setter not a function");
+          break;
+        }
+        Handle<JSFunction> function = Handle<JSFunction>::cast(setter);
+        CallOptimization call_optimization(function);
+        NamedStoreHandlerCompiler compiler(isolate(), receiver_type(), holder);
+        if (call_optimization.is_simple_api_call() &&
+            call_optimization.IsCompatibleReceiver(receiver, holder)) {
+          return compiler.CompileStoreCallback(receiver, lookup->name(),
+                                               call_optimization);
+        }
+        return compiler.CompileStoreViaSetter(receiver, lookup->name(),
+                                              Handle<JSFunction>::cast(setter));
+      }
+      // TODO(dcarney): Handle correctly.
+      DCHECK(accessors->IsDeclaredAccessorInfo());
+      TRACE_GENERIC_IC(isolate(), "StoreIC", "declared accessor info");
+      break;
+    }
+
+    case LookupIterator::DATA: {
+      if (lookup->is_dictionary_holder()) {
+        if (holder->IsGlobalObject()) {
+          Handle<PropertyCell> cell = lookup->GetPropertyCell();
+          Handle<HeapType> union_type = PropertyCell::UpdatedType(cell, value);
+          StoreGlobalStub stub(isolate(), union_type->IsConstant(),
+                               receiver->IsJSGlobalProxy());
+          Handle<Code> code = stub.GetCodeCopyFromTemplate(
+              Handle<GlobalObject>::cast(holder), cell);
+          // TODO(verwaest): Move caching of these NORMAL stubs outside as well.
+          HeapObject::UpdateMapCodeCache(receiver, lookup->name(), code);
+          return code;
+        }
+        DCHECK(holder.is_identical_to(receiver));
+        return isolate()->builtins()->StoreIC_Normal();
+      }
+
+      // -------------- Fields --------------
+      if (lookup->property_details().type() == FIELD) {
+        bool use_stub = true;
+        if (lookup->representation().IsHeapObject()) {
+          // Only use a generic stub if no types need to be tracked.
+          Handle<HeapType> field_type = lookup->GetFieldType();
+          HeapType::Iterator<Map> it = field_type->Classes();
+          use_stub = it.Done();
+        }
+        if (use_stub) {
+          StoreFieldStub stub(isolate(), lookup->GetFieldIndex(),
+                              lookup->representation());
+          return stub.GetCode();
+        }
+        NamedStoreHandlerCompiler compiler(isolate(), receiver_type(), holder);
+        return compiler.CompileStoreField(lookup);
+      }
+
+      // -------------- Constant properties --------------
+      DCHECK(lookup->property_details().type() == CONSTANT);
+      TRACE_GENERIC_IC(isolate(), "StoreIC", "constant property");
+      break;
+    }
+
+    case LookupIterator::ACCESS_CHECK:
+    case LookupIterator::JSPROXY:
+    case LookupIterator::NOT_FOUND:
+      UNREACHABLE();
+  }
+  return slow_stub();
+}
+
+
+Handle<Code> KeyedStoreIC::StoreElementStub(Handle<JSObject> receiver,
+                                            KeyedAccessStoreMode store_mode) {
+  // Don't handle megamorphic property accesses for INTERCEPTORS or CALLBACKS
+  // via megamorphic stubs, since they don't have a map in their relocation info
+  // and so the stubs can't be harvested for the object needed for a map check.
+  if (target()->type() != Code::NORMAL) {
+    TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "non-NORMAL target type");
+    return generic_stub();
+  }
+
+  Handle<Map> receiver_map(receiver->map(), isolate());
+  MapHandleList target_receiver_maps;
+  TargetMaps(&target_receiver_maps);
+  if (target_receiver_maps.length() == 0) {
+    Handle<Map> monomorphic_map =
+        ComputeTransitionedMap(receiver_map, store_mode);
+    store_mode = GetNonTransitioningStoreMode(store_mode);
+    return PropertyICCompiler::ComputeKeyedStoreMonomorphic(
+        monomorphic_map, strict_mode(), store_mode);
+  }
+
+  // There are several special cases where an IC that is MONOMORPHIC can still
+  // transition to a different GetNonTransitioningStoreMode IC that handles a
+  // superset of the original IC. Handle those here if the receiver map hasn't
+  // changed or it has transitioned to a more general kind.
+  KeyedAccessStoreMode old_store_mode =
+      KeyedStoreIC::GetKeyedAccessStoreMode(target()->extra_ic_state());
+  Handle<Map> previous_receiver_map = target_receiver_maps.at(0);
+  if (state() == MONOMORPHIC) {
+    Handle<Map> transitioned_receiver_map = receiver_map;
+    if (IsTransitionStoreMode(store_mode)) {
+      transitioned_receiver_map =
+          ComputeTransitionedMap(receiver_map, store_mode);
+    }
+    if ((receiver_map.is_identical_to(previous_receiver_map) &&
+         IsTransitionStoreMode(store_mode)) ||
+        IsTransitionOfMonomorphicTarget(*previous_receiver_map,
+                                        *transitioned_receiver_map)) {
+      // If the "old" and "new" maps are in the same elements map family, or
+      // if they at least come from the same origin for a transitioning store,
+      // stay MONOMORPHIC and use the map for the most generic ElementsKind.
+      store_mode = GetNonTransitioningStoreMode(store_mode);
+      return PropertyICCompiler::ComputeKeyedStoreMonomorphic(
+          transitioned_receiver_map, strict_mode(), store_mode);
+    } else if (*previous_receiver_map == receiver->map() &&
+               old_store_mode == STANDARD_STORE &&
+               (store_mode == STORE_AND_GROW_NO_TRANSITION ||
+                store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
+                store_mode == STORE_NO_TRANSITION_HANDLE_COW)) {
+      // A "normal" IC that handles stores can switch to a version that can
+      // grow at the end of the array, handle OOB accesses or copy COW arrays
+      // and still stay MONOMORPHIC.
+      return PropertyICCompiler::ComputeKeyedStoreMonomorphic(
+          receiver_map, strict_mode(), store_mode);
+    }
+  }
+
+  DCHECK(state() != GENERIC);
+
+  bool map_added =
+      AddOneReceiverMapIfMissing(&target_receiver_maps, receiver_map);
+
+  if (IsTransitionStoreMode(store_mode)) {
+    Handle<Map> transitioned_receiver_map =
+        ComputeTransitionedMap(receiver_map, store_mode);
+    map_added |= AddOneReceiverMapIfMissing(&target_receiver_maps,
+                                            transitioned_receiver_map);
+  }
+
+  if (!map_added) {
+    // If the miss wasn't due to an unseen map, a polymorphic stub
+    // won't help, use the generic stub.
+    TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "same map added twice");
+    return generic_stub();
+  }
+
+  // If the maximum number of receiver maps has been exceeded, use the generic
+  // version of the IC.
+  if (target_receiver_maps.length() > kMaxKeyedPolymorphism) {
+    TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "max polymorph exceeded");
+    return generic_stub();
+  }
+
+  // Make sure all polymorphic handlers have the same store mode, otherwise the
+  // generic stub must be used.
+  store_mode = GetNonTransitioningStoreMode(store_mode);
+  if (old_store_mode != STANDARD_STORE) {
+    if (store_mode == STANDARD_STORE) {
+      store_mode = old_store_mode;
+    } else if (store_mode != old_store_mode) {
+      TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "store mode mismatch");
+      return generic_stub();
+    }
+  }
+
+  // If the store mode isn't the standard mode, make sure that all polymorphic
+  // receivers are either external arrays, or all "normal" arrays. Otherwise,
+  // use the generic stub.
+  if (store_mode != STANDARD_STORE) {
+    int external_arrays = 0;
+    for (int i = 0; i < target_receiver_maps.length(); ++i) {
+      if (target_receiver_maps[i]->has_external_array_elements() ||
+          target_receiver_maps[i]->has_fixed_typed_array_elements()) {
+        external_arrays++;
+      }
+    }
+    if (external_arrays != 0 &&
+        external_arrays != target_receiver_maps.length()) {
+      TRACE_GENERIC_IC(isolate(), "KeyedStoreIC",
+                       "unsupported combination of external and normal arrays");
+      return generic_stub();
+    }
+  }
+
+  return PropertyICCompiler::ComputeKeyedStorePolymorphic(
+      &target_receiver_maps, store_mode, strict_mode());
+}
+
+
+Handle<Map> KeyedStoreIC::ComputeTransitionedMap(
+    Handle<Map> map, KeyedAccessStoreMode store_mode) {
+  switch (store_mode) {
+    case STORE_TRANSITION_SMI_TO_OBJECT:
+    case STORE_TRANSITION_DOUBLE_TO_OBJECT:
+    case STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT:
+    case STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT:
+      return Map::TransitionElementsTo(map, FAST_ELEMENTS);
+    case STORE_TRANSITION_SMI_TO_DOUBLE:
+    case STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE:
+      return Map::TransitionElementsTo(map, FAST_DOUBLE_ELEMENTS);
+    case STORE_TRANSITION_HOLEY_SMI_TO_OBJECT:
+    case STORE_TRANSITION_HOLEY_DOUBLE_TO_OBJECT:
+    case STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_OBJECT:
+    case STORE_AND_GROW_TRANSITION_HOLEY_DOUBLE_TO_OBJECT:
+      return Map::TransitionElementsTo(map, FAST_HOLEY_ELEMENTS);
+    case STORE_TRANSITION_HOLEY_SMI_TO_DOUBLE:
+    case STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_DOUBLE:
+      return Map::TransitionElementsTo(map, FAST_HOLEY_DOUBLE_ELEMENTS);
+    case STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS:
+      DCHECK(map->has_external_array_elements());
+    // Fall through
+    case STORE_NO_TRANSITION_HANDLE_COW:
+    case STANDARD_STORE:
+    case STORE_AND_GROW_NO_TRANSITION:
+      return map;
+  }
+  UNREACHABLE();
+  return MaybeHandle<Map>().ToHandleChecked();
+}
+
+
+bool IsOutOfBoundsAccess(Handle<JSObject> receiver, int index) {
+  if (receiver->IsJSArray()) {
+    return JSArray::cast(*receiver)->length()->IsSmi() &&
+           index >= Smi::cast(JSArray::cast(*receiver)->length())->value();
+  }
+  return index >= receiver->elements()->length();
+}
+
+
+KeyedAccessStoreMode KeyedStoreIC::GetStoreMode(Handle<JSObject> receiver,
+                                                Handle<Object> key,
+                                                Handle<Object> value) {
+  Handle<Smi> smi_key = Object::ToSmi(isolate(), key).ToHandleChecked();
+  int index = smi_key->value();
+  bool oob_access = IsOutOfBoundsAccess(receiver, index);
+  // Don't consider this a growing store if the store would send the receiver to
+  // dictionary mode.
+  bool allow_growth = receiver->IsJSArray() && oob_access &&
+                      !receiver->WouldConvertToSlowElements(key);
+  if (allow_growth) {
+    // Handle growing array in stub if necessary.
+    if (receiver->HasFastSmiElements()) {
+      if (value->IsHeapNumber()) {
+        if (receiver->HasFastHoleyElements()) {
+          return STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_DOUBLE;
+        } else {
+          return STORE_AND_GROW_TRANSITION_SMI_TO_DOUBLE;
+        }
+      }
+      if (value->IsHeapObject()) {
+        if (receiver->HasFastHoleyElements()) {
+          return STORE_AND_GROW_TRANSITION_HOLEY_SMI_TO_OBJECT;
+        } else {
+          return STORE_AND_GROW_TRANSITION_SMI_TO_OBJECT;
+        }
+      }
+    } else if (receiver->HasFastDoubleElements()) {
+      if (!value->IsSmi() && !value->IsHeapNumber()) {
+        if (receiver->HasFastHoleyElements()) {
+          return STORE_AND_GROW_TRANSITION_HOLEY_DOUBLE_TO_OBJECT;
+        } else {
+          return STORE_AND_GROW_TRANSITION_DOUBLE_TO_OBJECT;
+        }
+      }
+    }
+    return STORE_AND_GROW_NO_TRANSITION;
+  } else {
+    // Handle only in-bounds elements accesses.
+    if (receiver->HasFastSmiElements()) {
+      if (value->IsHeapNumber()) {
+        if (receiver->HasFastHoleyElements()) {
+          return STORE_TRANSITION_HOLEY_SMI_TO_DOUBLE;
+        } else {
+          return STORE_TRANSITION_SMI_TO_DOUBLE;
+        }
+      } else if (value->IsHeapObject()) {
+        if (receiver->HasFastHoleyElements()) {
+          return STORE_TRANSITION_HOLEY_SMI_TO_OBJECT;
+        } else {
+          return STORE_TRANSITION_SMI_TO_OBJECT;
+        }
+      }
+    } else if (receiver->HasFastDoubleElements()) {
+      if (!value->IsSmi() && !value->IsHeapNumber()) {
+        if (receiver->HasFastHoleyElements()) {
+          return STORE_TRANSITION_HOLEY_DOUBLE_TO_OBJECT;
+        } else {
+          return STORE_TRANSITION_DOUBLE_TO_OBJECT;
+        }
+      }
+    }
+    if (!FLAG_trace_external_array_abuse &&
+        receiver->map()->has_external_array_elements() && oob_access) {
+      return STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS;
+    }
+    Heap* heap = receiver->GetHeap();
+    if (receiver->elements()->map() == heap->fixed_cow_array_map()) {
+      return STORE_NO_TRANSITION_HANDLE_COW;
+    } else {
+      return STANDARD_STORE;
+    }
+  }
+}
+
+
+MaybeHandle<Object> KeyedStoreIC::Store(Handle<Object> object,
+                                        Handle<Object> key,
+                                        Handle<Object> value) {
+  // TODO(verwaest): Let SetProperty do the migration, since storing a property
+  // might deprecate the current map again, if value does not fit.
+  if (MigrateDeprecated(object)) {
+    Handle<Object> result;
+    ASSIGN_RETURN_ON_EXCEPTION(
+        isolate(), result, Runtime::SetObjectProperty(isolate(), object, key,
+                                                      value, strict_mode()),
+        Object);
+    return result;
+  }
+
+  // Check for non-string values that can be converted into an
+  // internalized string directly or is representable as a smi.
+  key = TryConvertKey(key, isolate());
+
+  Handle<Object> store_handle;
+  Handle<Code> stub = generic_stub();
+
+  if (key->IsInternalizedString()) {
+    ASSIGN_RETURN_ON_EXCEPTION(
+        isolate(), store_handle,
+        StoreIC::Store(object, Handle<String>::cast(key), value,
+                       JSReceiver::MAY_BE_STORE_FROM_KEYED),
+        Object);
+    // TODO(jkummerow): Ideally we'd wrap this in "if (!is_target_set())",
+    // but doing so causes Hydrogen crashes. Needs investigation.
+    TRACE_GENERIC_IC(isolate(), "KeyedStoreIC",
+                     "unhandled internalized string key");
+    TRACE_IC("StoreIC", key);
+    set_target(*stub);
+    return store_handle;
+  }
+
+  bool use_ic =
+      FLAG_use_ic && !object->IsStringWrapper() &&
+      !object->IsAccessCheckNeeded() && !object->IsJSGlobalProxy() &&
+      !(object->IsJSObject() && JSObject::cast(*object)->map()->is_observed());
+  if (use_ic && !object->IsSmi()) {
+    // Don't use ICs for maps of the objects in Array's prototype chain. We
+    // expect to be able to trap element sets to objects with those maps in
+    // the runtime to enable optimization of element hole access.
+    Handle<HeapObject> heap_object = Handle<HeapObject>::cast(object);
+    if (heap_object->map()->IsMapInArrayPrototypeChain()) {
+      TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "map in array prototype");
+      use_ic = false;
+    }
+  }
+
+  if (use_ic) {
+    DCHECK(!object->IsAccessCheckNeeded());
+
+    if (object->IsJSObject()) {
+      Handle<JSObject> receiver = Handle<JSObject>::cast(object);
+      bool key_is_smi_like = !Object::ToSmi(isolate(), key).is_null();
+      if (receiver->elements()->map() ==
+          isolate()->heap()->sloppy_arguments_elements_map()) {
+        if (strict_mode() == SLOPPY) {
+          stub = sloppy_arguments_stub();
+        } else {
+          TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "arguments receiver");
+        }
+      } else if (key_is_smi_like &&
+                 !(target().is_identical_to(sloppy_arguments_stub()))) {
+        // We should go generic if receiver isn't a dictionary, but our
+        // prototype chain does have dictionary elements. This ensures that
+        // other non-dictionary receivers in the polymorphic case benefit
+        // from fast path keyed stores.
+        if (!(receiver->map()->DictionaryElementsInPrototypeChainOnly())) {
+          KeyedAccessStoreMode store_mode = GetStoreMode(receiver, key, value);
+          stub = StoreElementStub(receiver, store_mode);
+        } else {
+          TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "dictionary prototype");
+        }
+      } else {
+        TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "non-smi-like key");
+      }
+    } else {
+      TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "non-JSObject receiver");
+    }
+  }
+
+  if (store_handle.is_null()) {
+    ASSIGN_RETURN_ON_EXCEPTION(
+        isolate(), store_handle,
+        Runtime::SetObjectProperty(isolate(), object, key, value,
+                                   strict_mode()),
+        Object);
+  }
+
+  DCHECK(!is_target_set());
+  Code* generic = *generic_stub();
+  if (*stub == generic) {
+    TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "set generic");
+  }
+  if (*stub == *slow_stub()) {
+    TRACE_GENERIC_IC(isolate(), "KeyedStoreIC", "slow stub");
+  }
+  DCHECK(!stub.is_null());
+  set_target(*stub);
+  TRACE_IC("StoreIC", key);
+
+  return store_handle;
+}
+
+
+bool CallIC::DoCustomHandler(Handle<Object> receiver, Handle<Object> function,
+                             Handle<TypeFeedbackVector> vector,
+                             Handle<Smi> slot, const CallICState& state) {
+  DCHECK(FLAG_use_ic && function->IsJSFunction());
+
+  // Are we the array function?
+  Handle<JSFunction> array_function =
+      Handle<JSFunction>(isolate()->native_context()->array_function());
+  if (array_function.is_identical_to(Handle<JSFunction>::cast(function))) {
+    // Alter the slot.
+    IC::State old_state = FeedbackToState(vector, slot);
+    Object* feedback = vector->get(slot->value());
+    if (!feedback->IsAllocationSite()) {
+      Handle<AllocationSite> new_site =
+          isolate()->factory()->NewAllocationSite();
+      vector->set(slot->value(), *new_site);
+    }
+
+    CallIC_ArrayStub stub(isolate(), state);
+    set_target(*stub.GetCode());
+    Handle<String> name;
+    if (array_function->shared()->name()->IsString()) {
+      name = Handle<String>(String::cast(array_function->shared()->name()),
+                            isolate());
+    }
+
+    IC::State new_state = FeedbackToState(vector, slot);
+    OnTypeFeedbackChanged(isolate(), address(), old_state, new_state, true);
+    TRACE_VECTOR_IC("CallIC (custom handler)", name, old_state, new_state);
+    return true;
+  }
+  return false;
+}
+
+
+void CallIC::PatchMegamorphic(Handle<Object> function,
+                              Handle<TypeFeedbackVector> vector,
+                              Handle<Smi> slot) {
+  CallICState state(target()->extra_ic_state());
+  IC::State old_state = FeedbackToState(vector, slot);
+
+  // We are going generic.
+  vector->set(slot->value(),
+              *TypeFeedbackVector::MegamorphicSentinel(isolate()),
+              SKIP_WRITE_BARRIER);
+
+  CallICStub stub(isolate(), state);
+  Handle<Code> code = stub.GetCode();
+  set_target(*code);
+
+  Handle<Object> name = isolate()->factory()->empty_string();
+  if (function->IsJSFunction()) {
+    Handle<JSFunction> js_function = Handle<JSFunction>::cast(function);
+    name = handle(js_function->shared()->name(), isolate());
+  }
+
+  IC::State new_state = FeedbackToState(vector, slot);
+  OnTypeFeedbackChanged(isolate(), address(), old_state, new_state, true);
+  TRACE_VECTOR_IC("CallIC", name, old_state, new_state);
+}
+
+
+void CallIC::HandleMiss(Handle<Object> receiver, Handle<Object> function,
+                        Handle<TypeFeedbackVector> vector, Handle<Smi> slot) {
+  CallICState state(target()->extra_ic_state());
+  IC::State old_state = FeedbackToState(vector, slot);
+  Handle<Object> name = isolate()->factory()->empty_string();
+  Object* feedback = vector->get(slot->value());
+
+  // Hand-coded MISS handling is easier if CallIC slots don't contain smis.
+  DCHECK(!feedback->IsSmi());
+
+  if (feedback->IsJSFunction() || !function->IsJSFunction()) {
+    // We are going generic.
+    vector->set(slot->value(),
+                *TypeFeedbackVector::MegamorphicSentinel(isolate()),
+                SKIP_WRITE_BARRIER);
+  } else {
+    // The feedback is either uninitialized or an allocation site.
+    // It might be an allocation site because if we re-compile the full code
+    // to add deoptimization support, we call with the default call-ic, and
+    // merely need to patch the target to match the feedback.
+    // TODO(mvstanton): the better approach is to dispense with patching
+    // altogether, which is in progress.
+    DCHECK(feedback == *TypeFeedbackVector::UninitializedSentinel(isolate()) ||
+           feedback->IsAllocationSite());
+
+    // Do we want to install a custom handler?
+    if (FLAG_use_ic &&
+        DoCustomHandler(receiver, function, vector, slot, state)) {
+      return;
+    }
+
+    vector->set(slot->value(), *function);
+  }
+
+  if (function->IsJSFunction()) {
+    Handle<JSFunction> js_function = Handle<JSFunction>::cast(function);
+    name = handle(js_function->shared()->name(), isolate());
+  }
+
+  IC::State new_state = FeedbackToState(vector, slot);
+  OnTypeFeedbackChanged(isolate(), address(), old_state, new_state, true);
+  TRACE_VECTOR_IC("CallIC", name, old_state, new_state);
+}
+
+
+#undef TRACE_IC
+
+
+// ----------------------------------------------------------------------------
+// Static IC stub generators.
+//
+
+// Used from ic-<arch>.cc.
+RUNTIME_FUNCTION(CallIC_Miss) {
+  TimerEventScope<TimerEventIcMiss> timer(isolate);
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 4);
+  CallIC ic(isolate);
+  Handle<Object> receiver = args.at<Object>(0);
+  Handle<Object> function = args.at<Object>(1);
+  Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(2);
+  Handle<Smi> slot = args.at<Smi>(3);
+  ic.HandleMiss(receiver, function, vector, slot);
+  return *function;
+}
+
+
+RUNTIME_FUNCTION(CallIC_Customization_Miss) {
+  TimerEventScope<TimerEventIcMiss> timer(isolate);
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 4);
+  // A miss on a custom call ic always results in going megamorphic.
+  CallIC ic(isolate);
+  Handle<Object> function = args.at<Object>(1);
+  Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(2);
+  Handle<Smi> slot = args.at<Smi>(3);
+  ic.PatchMegamorphic(function, vector, slot);
+  return *function;
+}
+
+
+// Used from ic-<arch>.cc.
+RUNTIME_FUNCTION(LoadIC_Miss) {
+  TimerEventScope<TimerEventIcMiss> timer(isolate);
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 2);
+  LoadIC ic(IC::NO_EXTRA_FRAME, isolate);
+  Handle<Object> receiver = args.at<Object>(0);
+  Handle<Name> key = args.at<Name>(1);
+  ic.UpdateState(receiver, key);
+  Handle<Object> result;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
+  return *result;
+}
+
+
+// Used from ic-<arch>.cc
+RUNTIME_FUNCTION(KeyedLoadIC_Miss) {
+  TimerEventScope<TimerEventIcMiss> timer(isolate);
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 2);
+  KeyedLoadIC ic(IC::NO_EXTRA_FRAME, isolate);
+  Handle<Object> receiver = args.at<Object>(0);
+  Handle<Object> key = args.at<Object>(1);
+  ic.UpdateState(receiver, key);
+  Handle<Object> result;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
+  return *result;
+}
+
+
+RUNTIME_FUNCTION(KeyedLoadIC_MissFromStubFailure) {
+  TimerEventScope<TimerEventIcMiss> timer(isolate);
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 2);
+  KeyedLoadIC ic(IC::EXTRA_CALL_FRAME, isolate);
+  Handle<Object> receiver = args.at<Object>(0);
+  Handle<Object> key = args.at<Object>(1);
+  ic.UpdateState(receiver, key);
+  Handle<Object> result;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
+  return *result;
+}
+
+
+// Used from ic-<arch>.cc.
+RUNTIME_FUNCTION(StoreIC_Miss) {
+  TimerEventScope<TimerEventIcMiss> timer(isolate);
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 3);
+  StoreIC ic(IC::NO_EXTRA_FRAME, isolate);
+  Handle<Object> receiver = args.at<Object>(0);
+  Handle<String> key = args.at<String>(1);
+  ic.UpdateState(receiver, key);
+  Handle<Object> result;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, result, ic.Store(receiver, key, args.at<Object>(2)));
+  return *result;
+}
+
+
+RUNTIME_FUNCTION(StoreIC_MissFromStubFailure) {
+  TimerEventScope<TimerEventIcMiss> timer(isolate);
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 3);
+  StoreIC ic(IC::EXTRA_CALL_FRAME, isolate);
+  Handle<Object> receiver = args.at<Object>(0);
+  Handle<String> key = args.at<String>(1);
+  ic.UpdateState(receiver, key);
+  Handle<Object> result;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, result, ic.Store(receiver, key, args.at<Object>(2)));
+  return *result;
+}
+
+
+// Extend storage is called in a store inline cache when
+// it is necessary to extend the properties array of a
+// JSObject.
+RUNTIME_FUNCTION(SharedStoreIC_ExtendStorage) {
+  TimerEventScope<TimerEventIcMiss> timer(isolate);
+  HandleScope shs(isolate);
+  DCHECK(args.length() == 3);
+
+  // Convert the parameters
+  Handle<JSObject> object = args.at<JSObject>(0);
+  Handle<Map> transition = args.at<Map>(1);
+  Handle<Object> value = args.at<Object>(2);
+
+  // Check the object has run out out property space.
+  DCHECK(object->HasFastProperties());
+  DCHECK(object->map()->unused_property_fields() == 0);
+
+  JSObject::MigrateToNewProperty(object, transition, value);
+
+  // Return the stored value.
+  return *value;
+}
+
+
+// Used from ic-<arch>.cc.
+RUNTIME_FUNCTION(KeyedStoreIC_Miss) {
+  TimerEventScope<TimerEventIcMiss> timer(isolate);
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 3);
+  KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate);
+  Handle<Object> receiver = args.at<Object>(0);
+  Handle<Object> key = args.at<Object>(1);
+  ic.UpdateState(receiver, key);
+  Handle<Object> result;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, result, ic.Store(receiver, key, args.at<Object>(2)));
+  return *result;
+}
+
+
+RUNTIME_FUNCTION(KeyedStoreIC_MissFromStubFailure) {
+  TimerEventScope<TimerEventIcMiss> timer(isolate);
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 3);
+  KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate);
+  Handle<Object> receiver = args.at<Object>(0);
+  Handle<Object> key = args.at<Object>(1);
+  ic.UpdateState(receiver, key);
+  Handle<Object> result;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, result, ic.Store(receiver, key, args.at<Object>(2)));
+  return *result;
+}
+
+
+RUNTIME_FUNCTION(StoreIC_Slow) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 3);
+  StoreIC ic(IC::NO_EXTRA_FRAME, isolate);
+  Handle<Object> object = args.at<Object>(0);
+  Handle<Object> key = args.at<Object>(1);
+  Handle<Object> value = args.at<Object>(2);
+  StrictMode strict_mode = ic.strict_mode();
+  Handle<Object> result;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, result,
+      Runtime::SetObjectProperty(isolate, object, key, value, strict_mode));
+  return *result;
+}
+
+
+RUNTIME_FUNCTION(KeyedStoreIC_Slow) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 3);
+  KeyedStoreIC ic(IC::NO_EXTRA_FRAME, isolate);
+  Handle<Object> object = args.at<Object>(0);
+  Handle<Object> key = args.at<Object>(1);
+  Handle<Object> value = args.at<Object>(2);
+  StrictMode strict_mode = ic.strict_mode();
+  Handle<Object> result;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, result,
+      Runtime::SetObjectProperty(isolate, object, key, value, strict_mode));
+  return *result;
+}
+
+
+RUNTIME_FUNCTION(ElementsTransitionAndStoreIC_Miss) {
+  TimerEventScope<TimerEventIcMiss> timer(isolate);
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 4);
+  KeyedStoreIC ic(IC::EXTRA_CALL_FRAME, isolate);
+  Handle<Object> value = args.at<Object>(0);
+  Handle<Map> map = args.at<Map>(1);
+  Handle<Object> key = args.at<Object>(2);
+  Handle<Object> object = args.at<Object>(3);
+  StrictMode strict_mode = ic.strict_mode();
+  if (object->IsJSObject()) {
+    JSObject::TransitionElementsKind(Handle<JSObject>::cast(object),
+                                     map->elements_kind());
+  }
+  Handle<Object> result;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, result,
+      Runtime::SetObjectProperty(isolate, object, key, value, strict_mode));
+  return *result;
+}
+
+
+MaybeHandle<Object> BinaryOpIC::Transition(
+    Handle<AllocationSite> allocation_site, Handle<Object> left,
+    Handle<Object> right) {
+  BinaryOpICState state(isolate(), target()->extra_ic_state());
+
+  // Compute the actual result using the builtin for the binary operation.
+  Object* builtin = isolate()->js_builtins_object()->javascript_builtin(
+      TokenToJSBuiltin(state.op()));
+  Handle<JSFunction> function = handle(JSFunction::cast(builtin), isolate());
+  Handle<Object> result;
+  ASSIGN_RETURN_ON_EXCEPTION(
+      isolate(), result, Execution::Call(isolate(), function, left, 1, &right),
+      Object);
+
+  // Execution::Call can execute arbitrary JavaScript, hence potentially
+  // update the state of this very IC, so we must update the stored state.
+  UpdateTarget();
+  // Compute the new state.
+  BinaryOpICState old_state(isolate(), target()->extra_ic_state());
+  state.Update(left, right, result);
+
+  // Check if we have a string operation here.
+  Handle<Code> target;
+  if (!allocation_site.is_null() || state.ShouldCreateAllocationMementos()) {
+    // Setup the allocation site on-demand.
+    if (allocation_site.is_null()) {
+      allocation_site = isolate()->factory()->NewAllocationSite();
+    }
+
+    // Install the stub with an allocation site.
+    BinaryOpICWithAllocationSiteStub stub(isolate(), state);
+    target = stub.GetCodeCopyFromTemplate(allocation_site);
+
+    // Sanity check the trampoline stub.
+    DCHECK_EQ(*allocation_site, target->FindFirstAllocationSite());
+  } else {
+    // Install the generic stub.
+    BinaryOpICStub stub(isolate(), state);
+    target = stub.GetCode();
+
+    // Sanity check the generic stub.
+    DCHECK_EQ(NULL, target->FindFirstAllocationSite());
+  }
+  set_target(*target);
+
+  if (FLAG_trace_ic) {
+    OFStream os(stdout);
+    os << "[BinaryOpIC" << old_state << " => " << state << " @ "
+       << static_cast<void*>(*target) << " <- ";
+    JavaScriptFrame::PrintTop(isolate(), stdout, false, true);
+    if (!allocation_site.is_null()) {
+      os << " using allocation site " << static_cast<void*>(*allocation_site);
+    }
+    os << "]" << endl;
+  }
+
+  // Patch the inlined smi code as necessary.
+  if (!old_state.UseInlinedSmiCode() && state.UseInlinedSmiCode()) {
+    PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
+  } else if (old_state.UseInlinedSmiCode() && !state.UseInlinedSmiCode()) {
+    PatchInlinedSmiCode(address(), DISABLE_INLINED_SMI_CHECK);
+  }
+
+  return result;
+}
+
+
+RUNTIME_FUNCTION(BinaryOpIC_Miss) {
+  TimerEventScope<TimerEventIcMiss> timer(isolate);
+  HandleScope scope(isolate);
+  DCHECK_EQ(2, args.length());
+  Handle<Object> left = args.at<Object>(BinaryOpICStub::kLeft);
+  Handle<Object> right = args.at<Object>(BinaryOpICStub::kRight);
+  BinaryOpIC ic(isolate);
+  Handle<Object> result;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, result,
+      ic.Transition(Handle<AllocationSite>::null(), left, right));
+  return *result;
+}
+
+
+RUNTIME_FUNCTION(BinaryOpIC_MissWithAllocationSite) {
+  TimerEventScope<TimerEventIcMiss> timer(isolate);
+  HandleScope scope(isolate);
+  DCHECK_EQ(3, args.length());
+  Handle<AllocationSite> allocation_site =
+      args.at<AllocationSite>(BinaryOpWithAllocationSiteStub::kAllocationSite);
+  Handle<Object> left = args.at<Object>(BinaryOpWithAllocationSiteStub::kLeft);
+  Handle<Object> right =
+      args.at<Object>(BinaryOpWithAllocationSiteStub::kRight);
+  BinaryOpIC ic(isolate);
+  Handle<Object> result;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, result, ic.Transition(allocation_site, left, right));
+  return *result;
+}
+
+
+Code* CompareIC::GetRawUninitialized(Isolate* isolate, Token::Value op) {
+  CompareICStub stub(isolate, op, CompareICState::UNINITIALIZED,
+                     CompareICState::UNINITIALIZED,
+                     CompareICState::UNINITIALIZED);
+  Code* code = NULL;
+  CHECK(stub.FindCodeInCache(&code));
+  return code;
+}
+
+
+Handle<Code> CompareIC::GetUninitialized(Isolate* isolate, Token::Value op) {
+  CompareICStub stub(isolate, op, CompareICState::UNINITIALIZED,
+                     CompareICState::UNINITIALIZED,
+                     CompareICState::UNINITIALIZED);
+  return stub.GetCode();
+}
+
+
+Code* CompareIC::UpdateCaches(Handle<Object> x, Handle<Object> y) {
+  HandleScope scope(isolate());
+  CompareICStub old_stub(target()->stub_key(), isolate());
+  CompareICState::State new_left =
+      CompareICState::NewInputState(old_stub.left(), x);
+  CompareICState::State new_right =
+      CompareICState::NewInputState(old_stub.right(), y);
+  CompareICState::State state = CompareICState::TargetState(
+      old_stub.state(), old_stub.left(), old_stub.right(), op_,
+      HasInlinedSmiCode(address()), x, y);
+  CompareICStub stub(isolate(), op_, new_left, new_right, state);
+  if (state == CompareICState::KNOWN_OBJECT) {
+    stub.set_known_map(
+        Handle<Map>(Handle<JSObject>::cast(x)->map(), isolate()));
+  }
+  Handle<Code> new_target = stub.GetCode();
+  set_target(*new_target);
+
+  if (FLAG_trace_ic) {
+    PrintF("[CompareIC in ");
+    JavaScriptFrame::PrintTop(isolate(), stdout, false, true);
+    PrintF(" ((%s+%s=%s)->(%s+%s=%s))#%s @ %p]\n",
+           CompareICState::GetStateName(old_stub.left()),
+           CompareICState::GetStateName(old_stub.right()),
+           CompareICState::GetStateName(old_stub.state()),
+           CompareICState::GetStateName(new_left),
+           CompareICState::GetStateName(new_right),
+           CompareICState::GetStateName(state), Token::Name(op_),
+           static_cast<void*>(*stub.GetCode()));
+  }
+
+  // Activate inlined smi code.
+  if (old_stub.state() == CompareICState::UNINITIALIZED) {
+    PatchInlinedSmiCode(address(), ENABLE_INLINED_SMI_CHECK);
+  }
+
+  return *new_target;
+}
+
+
+// Used from CompareICStub::GenerateMiss in code-stubs-<arch>.cc.
+RUNTIME_FUNCTION(CompareIC_Miss) {
+  TimerEventScope<TimerEventIcMiss> timer(isolate);
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 3);
+  CompareIC ic(isolate, static_cast<Token::Value>(args.smi_at(2)));
+  return ic.UpdateCaches(args.at<Object>(0), args.at<Object>(1));
+}
+
+
+void CompareNilIC::Clear(Address address, Code* target,
+                         ConstantPoolArray* constant_pool) {
+  if (IsCleared(target)) return;
+  ExtraICState state = target->extra_ic_state();
+
+  CompareNilICStub stub(target->GetIsolate(), state,
+                        HydrogenCodeStub::UNINITIALIZED);
+  stub.ClearState();
+
+  Code* code = NULL;
+  CHECK(stub.FindCodeInCache(&code));
+
+  SetTargetAtAddress(address, code, constant_pool);
+}
+
+
+Handle<Object> CompareNilIC::DoCompareNilSlow(Isolate* isolate, NilValue nil,
+                                              Handle<Object> object) {
+  if (object->IsNull() || object->IsUndefined()) {
+    return handle(Smi::FromInt(true), isolate);
+  }
+  return handle(Smi::FromInt(object->IsUndetectableObject()), isolate);
+}
+
+
+Handle<Object> CompareNilIC::CompareNil(Handle<Object> object) {
+  ExtraICState extra_ic_state = target()->extra_ic_state();
+
+  CompareNilICStub stub(isolate(), extra_ic_state);
+
+  // Extract the current supported types from the patched IC and calculate what
+  // types must be supported as a result of the miss.
+  bool already_monomorphic = stub.IsMonomorphic();
+
+  stub.UpdateStatus(object);
+
+  NilValue nil = stub.nil_value();
+
+  // Find or create the specialized stub to support the new set of types.
+  Handle<Code> code;
+  if (stub.IsMonomorphic()) {
+    Handle<Map> monomorphic_map(already_monomorphic && FirstTargetMap() != NULL
+                                    ? FirstTargetMap()
+                                    : HeapObject::cast(*object)->map());
+    code = PropertyICCompiler::ComputeCompareNil(monomorphic_map, &stub);
+  } else {
+    code = stub.GetCode();
+  }
+  set_target(*code);
+  return DoCompareNilSlow(isolate(), nil, object);
+}
+
+
+RUNTIME_FUNCTION(CompareNilIC_Miss) {
+  TimerEventScope<TimerEventIcMiss> timer(isolate);
+  HandleScope scope(isolate);
+  Handle<Object> object = args.at<Object>(0);
+  CompareNilIC ic(isolate);
+  return *ic.CompareNil(object);
+}
+
+
+RUNTIME_FUNCTION(Unreachable) {
+  UNREACHABLE();
+  CHECK(false);
+  return isolate->heap()->undefined_value();
+}
+
+
+Builtins::JavaScript BinaryOpIC::TokenToJSBuiltin(Token::Value op) {
+  switch (op) {
+    default:
+      UNREACHABLE();
+    case Token::ADD:
+      return Builtins::ADD;
+      break;
+    case Token::SUB:
+      return Builtins::SUB;
+      break;
+    case Token::MUL:
+      return Builtins::MUL;
+      break;
+    case Token::DIV:
+      return Builtins::DIV;
+      break;
+    case Token::MOD:
+      return Builtins::MOD;
+      break;
+    case Token::BIT_OR:
+      return Builtins::BIT_OR;
+      break;
+    case Token::BIT_AND:
+      return Builtins::BIT_AND;
+      break;
+    case Token::BIT_XOR:
+      return Builtins::BIT_XOR;
+      break;
+    case Token::SAR:
+      return Builtins::SAR;
+      break;
+    case Token::SHR:
+      return Builtins::SHR;
+      break;
+    case Token::SHL:
+      return Builtins::SHL;
+      break;
+  }
+}
+
+
+Handle<Object> ToBooleanIC::ToBoolean(Handle<Object> object) {
+  ToBooleanStub stub(isolate(), target()->extra_ic_state());
+  bool to_boolean_value = stub.UpdateStatus(object);
+  Handle<Code> code = stub.GetCode();
+  set_target(*code);
+  return handle(Smi::FromInt(to_boolean_value ? 1 : 0), isolate());
+}
+
+
+RUNTIME_FUNCTION(ToBooleanIC_Miss) {
+  TimerEventScope<TimerEventIcMiss> timer(isolate);
+  DCHECK(args.length() == 1);
+  HandleScope scope(isolate);
+  Handle<Object> object = args.at<Object>(0);
+  ToBooleanIC ic(isolate);
+  return *ic.ToBoolean(object);
+}
+
+
+RUNTIME_FUNCTION(StoreCallbackProperty) {
+  Handle<JSObject> receiver = args.at<JSObject>(0);
+  Handle<JSObject> holder = args.at<JSObject>(1);
+  Handle<ExecutableAccessorInfo> callback = args.at<ExecutableAccessorInfo>(2);
+  Handle<Name> name = args.at<Name>(3);
+  Handle<Object> value = args.at<Object>(4);
+  HandleScope scope(isolate);
+
+  DCHECK(callback->IsCompatibleReceiver(*receiver));
+
+  Address setter_address = v8::ToCData<Address>(callback->setter());
+  v8::AccessorNameSetterCallback fun =
+      FUNCTION_CAST<v8::AccessorNameSetterCallback>(setter_address);
+  DCHECK(fun != NULL);
+
+  LOG(isolate, ApiNamedPropertyAccess("store", *receiver, *name));
+  PropertyCallbackArguments custom_args(isolate, callback->data(), *receiver,
+                                        *holder);
+  custom_args.Call(fun, v8::Utils::ToLocal(name), v8::Utils::ToLocal(value));
+  RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
+  return *value;
+}
+
+
+/**
+ * Attempts to load a property with an interceptor (which must be present),
+ * but doesn't search the prototype chain.
+ *
+ * Returns |Heap::no_interceptor_result_sentinel()| if interceptor doesn't
+ * provide any value for the given name.
+ */
+RUNTIME_FUNCTION(LoadPropertyWithInterceptorOnly) {
+  DCHECK(args.length() == NamedLoadHandlerCompiler::kInterceptorArgsLength);
+  Handle<Name> name_handle =
+      args.at<Name>(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex);
+  Handle<InterceptorInfo> interceptor_info = args.at<InterceptorInfo>(
+      NamedLoadHandlerCompiler::kInterceptorArgsInfoIndex);
+
+  // TODO(rossberg): Support symbols in the API.
+  if (name_handle->IsSymbol())
+    return isolate->heap()->no_interceptor_result_sentinel();
+  Handle<String> name = Handle<String>::cast(name_handle);
+
+  Address getter_address = v8::ToCData<Address>(interceptor_info->getter());
+  v8::NamedPropertyGetterCallback getter =
+      FUNCTION_CAST<v8::NamedPropertyGetterCallback>(getter_address);
+  DCHECK(getter != NULL);
+
+  Handle<JSObject> receiver =
+      args.at<JSObject>(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex);
+  Handle<JSObject> holder =
+      args.at<JSObject>(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex);
+  PropertyCallbackArguments callback_args(isolate, interceptor_info->data(),
+                                          *receiver, *holder);
+  {
+    // Use the interceptor getter.
+    HandleScope scope(isolate);
+    v8::Handle<v8::Value> r =
+        callback_args.Call(getter, v8::Utils::ToLocal(name));
+    RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
+    if (!r.IsEmpty()) {
+      Handle<Object> result = v8::Utils::OpenHandle(*r);
+      result->VerifyApiCallResultType();
+      return *v8::Utils::OpenHandle(*r);
+    }
+  }
+
+  return isolate->heap()->no_interceptor_result_sentinel();
+}
+
+
+static Object* ThrowReferenceError(Isolate* isolate, Name* name) {
+  // If the load is non-contextual, just return the undefined result.
+  // Note that both keyed and non-keyed loads may end up here.
+  HandleScope scope(isolate);
+  LoadIC ic(IC::NO_EXTRA_FRAME, isolate);
+  if (ic.contextual_mode() != CONTEXTUAL) {
+    return isolate->heap()->undefined_value();
+  }
+
+  // Throw a reference error.
+  Handle<Name> name_handle(name);
+  THROW_NEW_ERROR_RETURN_FAILURE(
+      isolate, NewReferenceError("not_defined", HandleVector(&name_handle, 1)));
+}
+
+
+/**
+ * Loads a property with an interceptor performing post interceptor
+ * lookup if interceptor failed.
+ */
+RUNTIME_FUNCTION(LoadPropertyWithInterceptor) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == NamedLoadHandlerCompiler::kInterceptorArgsLength);
+  Handle<Name> name =
+      args.at<Name>(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex);
+  Handle<JSObject> receiver =
+      args.at<JSObject>(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex);
+  Handle<JSObject> holder =
+      args.at<JSObject>(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex);
+
+  Handle<Object> result;
+  LookupIterator it(receiver, name, holder);
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+                                     JSObject::GetProperty(&it));
+
+  if (it.IsFound()) return *result;
+
+  return ThrowReferenceError(isolate, Name::cast(args[0]));
+}
+
+
+RUNTIME_FUNCTION(StorePropertyWithInterceptor) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 3);
+  StoreIC ic(IC::NO_EXTRA_FRAME, isolate);
+  Handle<JSObject> receiver = args.at<JSObject>(0);
+  Handle<Name> name = args.at<Name>(1);
+  Handle<Object> value = args.at<Object>(2);
+#ifdef DEBUG
+  PrototypeIterator iter(isolate, receiver,
+                         PrototypeIterator::START_AT_RECEIVER);
+  bool found = false;
+  while (!iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN)) {
+    Handle<Object> current = PrototypeIterator::GetCurrent(iter);
+    if (current->IsJSObject() &&
+        Handle<JSObject>::cast(current)->HasNamedInterceptor()) {
+      found = true;
+      break;
+    }
+  }
+  DCHECK(found);
+#endif
+  Handle<Object> result;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, result,
+      JSObject::SetProperty(receiver, name, value, ic.strict_mode()));
+  return *result;
+}
+
+
+RUNTIME_FUNCTION(LoadElementWithInterceptor) {
+  HandleScope scope(isolate);
+  Handle<JSObject> receiver = args.at<JSObject>(0);
+  DCHECK(args.smi_at(1) >= 0);
+  uint32_t index = args.smi_at(1);
+  Handle<Object> result;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, result,
+      JSObject::GetElementWithInterceptor(receiver, receiver, index));
+  return *result;
+}
+
+
+RUNTIME_FUNCTION(VectorLoadIC_MissFromStubFailure) {
+  // TODO(mvstanton): To be enabled when ICs can accept a vector and slot
+  return NULL;
+}
+
+
+RUNTIME_FUNCTION(VectorKeyedLoadIC_MissFromStubFailure) {
+  // TODO(mvstanton): To be enabled when ICs can accept a vector and slot
+  return NULL;
+}
+
+
+static const Address IC_utilities[] = {
+#define ADDR(name) FUNCTION_ADDR(name),
+    IC_UTIL_LIST(ADDR) NULL
+#undef ADDR
+};
+
+
+Address IC::AddressFromUtilityId(IC::UtilityId id) { return IC_utilities[id]; }
+}
+}  // namespace v8::internal
diff --git a/src/ic/ic.h b/src/ic/ic.h
new file mode 100644
index 0000000..d86d2b7
--- /dev/null
+++ b/src/ic/ic.h
@@ -0,0 +1,698 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_IC_H_
+#define V8_IC_H_
+
+#include "src/ic/ic-state.h"
+#include "src/macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+
+// IC_UTIL_LIST defines all utility functions called from generated
+// inline caching code. The argument for the macro, ICU, is the function name.
+#define IC_UTIL_LIST(ICU)              \
+  ICU(LoadIC_Miss)                     \
+  ICU(KeyedLoadIC_Miss)                \
+  ICU(CallIC_Miss)                     \
+  ICU(CallIC_Customization_Miss)       \
+  ICU(StoreIC_Miss)                    \
+  ICU(StoreIC_Slow)                    \
+  ICU(SharedStoreIC_ExtendStorage)     \
+  ICU(KeyedStoreIC_Miss)               \
+  ICU(KeyedStoreIC_Slow)               \
+  /* Utilities for IC stubs. */        \
+  ICU(StoreCallbackProperty)           \
+  ICU(LoadPropertyWithInterceptorOnly) \
+  ICU(LoadPropertyWithInterceptor)     \
+  ICU(LoadElementWithInterceptor)      \
+  ICU(StorePropertyWithInterceptor)    \
+  ICU(CompareIC_Miss)                  \
+  ICU(BinaryOpIC_Miss)                 \
+  ICU(CompareNilIC_Miss)               \
+  ICU(Unreachable)                     \
+  ICU(ToBooleanIC_Miss)
+//
+// IC is the base class for LoadIC, StoreIC, KeyedLoadIC, and KeyedStoreIC.
+//
+class IC {
+ public:
+  // The ids for utility called from the generated code.
+  enum UtilityId {
+#define CONST_NAME(name) k##name,
+    IC_UTIL_LIST(CONST_NAME)
+#undef CONST_NAME
+    kUtilityCount
+  };
+
+  // Looks up the address of the named utility.
+  static Address AddressFromUtilityId(UtilityId id);
+
+  // Alias the inline cache state type to make the IC code more readable.
+  typedef InlineCacheState State;
+
+  // The IC code is either invoked with no extra frames on the stack
+  // or with a single extra frame for supporting calls.
+  enum FrameDepth { NO_EXTRA_FRAME = 0, EXTRA_CALL_FRAME = 1 };
+
+  // Construct the IC structure with the given number of extra
+  // JavaScript frames on the stack.
+  IC(FrameDepth depth, Isolate* isolate);
+  virtual ~IC() {}
+
+  State state() const { return state_; }
+  inline Address address() const;
+
+  // Compute the current IC state based on the target stub, receiver and name.
+  void UpdateState(Handle<Object> receiver, Handle<Object> name);
+
+  bool IsNameCompatibleWithPrototypeFailure(Handle<Object> name);
+  void MarkPrototypeFailure(Handle<Object> name) {
+    DCHECK(IsNameCompatibleWithPrototypeFailure(name));
+    state_ = PROTOTYPE_FAILURE;
+  }
+
+  // If the stub contains weak maps then this function adds the stub to
+  // the dependent code array of each weak map.
+  static void RegisterWeakMapDependency(Handle<Code> stub);
+
+  // This function is called when a weak map in the stub is dying,
+  // invalidates the stub by setting maps in it to undefined.
+  static void InvalidateMaps(Code* stub);
+
+  // Clear the inline cache to initial state.
+  static void Clear(Isolate* isolate, Address address,
+                    ConstantPoolArray* constant_pool);
+
+#ifdef DEBUG
+  bool IsLoadStub() const {
+    return target()->is_load_stub() || target()->is_keyed_load_stub();
+  }
+
+  bool IsStoreStub() const {
+    return target()->is_store_stub() || target()->is_keyed_store_stub();
+  }
+
+  bool IsCallStub() const { return target()->is_call_stub(); }
+#endif
+
+  template <class TypeClass>
+  static JSFunction* GetRootConstructor(TypeClass* type,
+                                        Context* native_context);
+  static inline Handle<Map> GetHandlerCacheHolder(HeapType* type,
+                                                  bool receiver_is_holder,
+                                                  Isolate* isolate,
+                                                  CacheHolderFlag* flag);
+  static inline Handle<Map> GetICCacheHolder(HeapType* type, Isolate* isolate,
+                                             CacheHolderFlag* flag);
+
+  static bool IsCleared(Code* code) {
+    InlineCacheState state = code->ic_state();
+    return state == UNINITIALIZED || state == PREMONOMORPHIC;
+  }
+
+  // Utility functions to convert maps to types and back. There are two special
+  // cases:
+  // - The heap_number_map is used as a marker which includes heap numbers as
+  //   well as smis.
+  // - The oddball map is only used for booleans.
+  static Handle<Map> TypeToMap(HeapType* type, Isolate* isolate);
+  template <class T>
+  static typename T::TypeHandle MapToType(Handle<Map> map,
+                                          typename T::Region* region);
+
+  static Handle<HeapType> CurrentTypeOf(Handle<Object> object,
+                                        Isolate* isolate);
+
+ protected:
+  // Get the call-site target; used for determining the state.
+  Handle<Code> target() const { return target_; }
+
+  Address fp() const { return fp_; }
+  Address pc() const { return *pc_address_; }
+  Isolate* isolate() const { return isolate_; }
+
+  // Get the shared function info of the caller.
+  SharedFunctionInfo* GetSharedFunctionInfo() const;
+  // Get the code object of the caller.
+  Code* GetCode() const;
+  // Get the original (non-breakpointed) code object of the caller.
+  Code* GetOriginalCode() const;
+
+  // Set the call-site target.
+  inline void set_target(Code* code);
+  bool is_target_set() { return target_set_; }
+
+  char TransitionMarkFromState(IC::State state);
+  void TraceIC(const char* type, Handle<Object> name);
+  void TraceIC(const char* type, Handle<Object> name, State old_state,
+               State new_state);
+
+  MaybeHandle<Object> TypeError(const char* type, Handle<Object> object,
+                                Handle<Object> key);
+  MaybeHandle<Object> ReferenceError(const char* type, Handle<Name> name);
+
+  // Access the target code for the given IC address.
+  static inline Code* GetTargetAtAddress(Address address,
+                                         ConstantPoolArray* constant_pool);
+  static inline void SetTargetAtAddress(Address address, Code* target,
+                                        ConstantPoolArray* constant_pool);
+  static void OnTypeFeedbackChanged(Isolate* isolate, Address address,
+                                    State old_state, State new_state,
+                                    bool target_remains_ic_stub);
+  static void PostPatching(Address address, Code* target, Code* old_target);
+
+  // Compute the handler either by compiling or by retrieving a cached version.
+  Handle<Code> ComputeHandler(LookupIterator* lookup,
+                              Handle<Object> value = Handle<Code>::null());
+  virtual Handle<Code> CompileHandler(LookupIterator* lookup,
+                                      Handle<Object> value,
+                                      CacheHolderFlag cache_holder) {
+    UNREACHABLE();
+    return Handle<Code>::null();
+  }
+
+  void UpdateMonomorphicIC(Handle<Code> handler, Handle<Name> name);
+  bool UpdatePolymorphicIC(Handle<Name> name, Handle<Code> code);
+  void UpdateMegamorphicCache(HeapType* type, Name* name, Code* code);
+
+  void CopyICToMegamorphicCache(Handle<Name> name);
+  bool IsTransitionOfMonomorphicTarget(Map* source_map, Map* target_map);
+  void PatchCache(Handle<Name> name, Handle<Code> code);
+  Code::Kind kind() const { return kind_; }
+  Code::Kind handler_kind() const {
+    if (kind_ == Code::KEYED_LOAD_IC) return Code::LOAD_IC;
+    DCHECK(kind_ == Code::LOAD_IC || kind_ == Code::STORE_IC ||
+           kind_ == Code::KEYED_STORE_IC);
+    return kind_;
+  }
+  virtual Handle<Code> megamorphic_stub() {
+    UNREACHABLE();
+    return Handle<Code>::null();
+  }
+
+  bool TryRemoveInvalidPrototypeDependentStub(Handle<Object> receiver,
+                                              Handle<String> name);
+
+  ExtraICState extra_ic_state() const { return extra_ic_state_; }
+  void set_extra_ic_state(ExtraICState state) { extra_ic_state_ = state; }
+
+  Handle<HeapType> receiver_type() { return receiver_type_; }
+  void update_receiver_type(Handle<Object> receiver) {
+    receiver_type_ = CurrentTypeOf(receiver, isolate_);
+  }
+
+  void TargetMaps(MapHandleList* list) {
+    FindTargetMaps();
+    for (int i = 0; i < target_maps_.length(); i++) {
+      list->Add(target_maps_.at(i));
+    }
+  }
+
+  void TargetTypes(TypeHandleList* list) {
+    FindTargetMaps();
+    for (int i = 0; i < target_maps_.length(); i++) {
+      list->Add(MapToType<HeapType>(target_maps_.at(i), isolate_));
+    }
+  }
+
+  Map* FirstTargetMap() {
+    FindTargetMaps();
+    return target_maps_.length() > 0 ? *target_maps_.at(0) : NULL;
+  }
+
+ protected:
+  inline void UpdateTarget();
+
+ private:
+  inline Code* raw_target() const;
+  inline ConstantPoolArray* constant_pool() const;
+  inline ConstantPoolArray* raw_constant_pool() const;
+
+  void FindTargetMaps() {
+    if (target_maps_set_) return;
+    target_maps_set_ = true;
+    if (state_ == MONOMORPHIC) {
+      Map* map = target_->FindFirstMap();
+      if (map != NULL) target_maps_.Add(handle(map));
+    } else if (state_ != UNINITIALIZED && state_ != PREMONOMORPHIC) {
+      target_->FindAllMaps(&target_maps_);
+    }
+  }
+
+  // Frame pointer for the frame that uses (calls) the IC.
+  Address fp_;
+
+  // All access to the program counter of an IC structure is indirect
+  // to make the code GC safe. This feature is crucial since
+  // GetProperty and SetProperty are called and they in turn might
+  // invoke the garbage collector.
+  Address* pc_address_;
+
+  Isolate* isolate_;
+
+  // The constant pool of the code which originally called the IC (which might
+  // be for the breakpointed copy of the original code).
+  Handle<ConstantPoolArray> raw_constant_pool_;
+
+  // The original code target that missed.
+  Handle<Code> target_;
+  bool target_set_;
+  State state_;
+  Code::Kind kind_;
+  Handle<HeapType> receiver_type_;
+  MaybeHandle<Code> maybe_handler_;
+
+  ExtraICState extra_ic_state_;
+  MapHandleList target_maps_;
+  bool target_maps_set_;
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(IC);
+};
+
+
+// An IC_Utility encapsulates IC::UtilityId. It exists mainly because you
+// cannot make forward declarations to an enum.
+class IC_Utility {
+ public:
+  explicit IC_Utility(IC::UtilityId id)
+      : address_(IC::AddressFromUtilityId(id)), id_(id) {}
+
+  Address address() const { return address_; }
+
+  IC::UtilityId id() const { return id_; }
+
+ private:
+  Address address_;
+  IC::UtilityId id_;
+};
+
+
+class CallIC : public IC {
+ public:
+  explicit CallIC(Isolate* isolate) : IC(EXTRA_CALL_FRAME, isolate) {}
+
+  void PatchMegamorphic(Handle<Object> function,
+                        Handle<TypeFeedbackVector> vector, Handle<Smi> slot);
+
+  void HandleMiss(Handle<Object> receiver, Handle<Object> function,
+                  Handle<TypeFeedbackVector> vector, Handle<Smi> slot);
+
+  // Returns true if a custom handler was installed.
+  bool DoCustomHandler(Handle<Object> receiver, Handle<Object> function,
+                       Handle<TypeFeedbackVector> vector, Handle<Smi> slot,
+                       const CallICState& state);
+
+  // Code generator routines.
+  static Handle<Code> initialize_stub(Isolate* isolate, int argc,
+                                      CallICState::CallType call_type);
+
+  static void Clear(Isolate* isolate, Address address, Code* target,
+                    ConstantPoolArray* constant_pool);
+
+ private:
+  inline IC::State FeedbackToState(Handle<TypeFeedbackVector> vector,
+                                   Handle<Smi> slot) const;
+};
+
+
+class LoadIC : public IC {
+ public:
+  static ExtraICState ComputeExtraICState(ContextualMode contextual_mode) {
+    return LoadICState(contextual_mode).GetExtraICState();
+  }
+
+  ContextualMode contextual_mode() const {
+    return LoadICState::GetContextualMode(extra_ic_state());
+  }
+
+  explicit LoadIC(FrameDepth depth, Isolate* isolate) : IC(depth, isolate) {
+    DCHECK(IsLoadStub());
+  }
+
+  // Returns if this IC is for contextual (no explicit receiver)
+  // access to properties.
+  bool IsUndeclaredGlobal(Handle<Object> receiver) {
+    if (receiver->IsGlobalObject()) {
+      return contextual_mode() == CONTEXTUAL;
+    } else {
+      DCHECK(contextual_mode() != CONTEXTUAL);
+      return false;
+    }
+  }
+
+  // Code generator routines.
+  static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
+  static void GeneratePreMonomorphic(MacroAssembler* masm) {
+    GenerateMiss(masm);
+  }
+  static void GenerateMiss(MacroAssembler* masm);
+  static void GenerateNormal(MacroAssembler* masm);
+  static void GenerateRuntimeGetProperty(MacroAssembler* masm);
+
+  static Handle<Code> initialize_stub(Isolate* isolate,
+                                      ExtraICState extra_state);
+
+  MUST_USE_RESULT MaybeHandle<Object> Load(Handle<Object> object,
+                                           Handle<Name> name);
+
+ protected:
+  inline void set_target(Code* code);
+
+  Handle<Code> slow_stub() const {
+    if (kind() == Code::LOAD_IC) {
+      return isolate()->builtins()->LoadIC_Slow();
+    } else {
+      DCHECK_EQ(Code::KEYED_LOAD_IC, kind());
+      return isolate()->builtins()->KeyedLoadIC_Slow();
+    }
+  }
+
+  virtual Handle<Code> megamorphic_stub() OVERRIDE;
+
+  // Update the inline cache and the global stub cache based on the
+  // lookup result.
+  void UpdateCaches(LookupIterator* lookup);
+
+  virtual Handle<Code> CompileHandler(LookupIterator* lookup,
+                                      Handle<Object> unused,
+                                      CacheHolderFlag cache_holder);
+
+ private:
+  virtual Handle<Code> pre_monomorphic_stub() const;
+  static Handle<Code> pre_monomorphic_stub(Isolate* isolate,
+                                           ExtraICState extra_state);
+
+  Handle<Code> SimpleFieldLoad(FieldIndex index);
+
+  static void Clear(Isolate* isolate, Address address, Code* target,
+                    ConstantPoolArray* constant_pool);
+
+  friend class IC;
+};
+
+
+class KeyedLoadIC : public LoadIC {
+ public:
+  explicit KeyedLoadIC(FrameDepth depth, Isolate* isolate)
+      : LoadIC(depth, isolate) {
+    DCHECK(target()->is_keyed_load_stub());
+  }
+
+  MUST_USE_RESULT MaybeHandle<Object> Load(Handle<Object> object,
+                                           Handle<Object> key);
+
+  // Code generator routines.
+  static void GenerateMiss(MacroAssembler* masm);
+  static void GenerateRuntimeGetProperty(MacroAssembler* masm);
+  static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
+  static void GeneratePreMonomorphic(MacroAssembler* masm) {
+    GenerateMiss(masm);
+  }
+  static void GenerateGeneric(MacroAssembler* masm);
+  static void GenerateString(MacroAssembler* masm);
+
+  // Bit mask to be tested against bit field for the cases when
+  // generic stub should go into slow case.
+  // Access check is necessary explicitly since generic stub does not perform
+  // map checks.
+  static const int kSlowCaseBitFieldMask =
+      (1 << Map::kIsAccessCheckNeeded) | (1 << Map::kHasIndexedInterceptor);
+
+  static Handle<Code> generic_stub(Isolate* isolate);
+  static Handle<Code> pre_monomorphic_stub(Isolate* isolate);
+
+ protected:
+  Handle<Code> LoadElementStub(Handle<JSObject> receiver);
+  virtual Handle<Code> pre_monomorphic_stub() const {
+    return pre_monomorphic_stub(isolate());
+  }
+
+ private:
+  Handle<Code> generic_stub() const { return generic_stub(isolate()); }
+  Handle<Code> string_stub() {
+    return isolate()->builtins()->KeyedLoadIC_String();
+  }
+
+  static void Clear(Isolate* isolate, Address address, Code* target,
+                    ConstantPoolArray* constant_pool);
+
+  friend class IC;
+};
+
+
+class StoreIC : public IC {
+ public:
+  class StrictModeState : public BitField<StrictMode, 1, 1> {};
+  static ExtraICState ComputeExtraICState(StrictMode flag) {
+    return StrictModeState::encode(flag);
+  }
+  static StrictMode GetStrictMode(ExtraICState state) {
+    return StrictModeState::decode(state);
+  }
+
+  // For convenience, a statically declared encoding of strict mode extra
+  // IC state.
+  static const ExtraICState kStrictModeState = 1 << StrictModeState::kShift;
+
+  StoreIC(FrameDepth depth, Isolate* isolate) : IC(depth, isolate) {
+    DCHECK(IsStoreStub());
+  }
+
+  StrictMode strict_mode() const {
+    return StrictModeState::decode(extra_ic_state());
+  }
+
+  // Code generators for stub routines. Only called once at startup.
+  static void GenerateSlow(MacroAssembler* masm);
+  static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
+  static void GeneratePreMonomorphic(MacroAssembler* masm) {
+    GenerateMiss(masm);
+  }
+  static void GenerateMiss(MacroAssembler* masm);
+  static void GenerateMegamorphic(MacroAssembler* masm);
+  static void GenerateNormal(MacroAssembler* masm);
+  static void GenerateRuntimeSetProperty(MacroAssembler* masm,
+                                         StrictMode strict_mode);
+
+  static Handle<Code> initialize_stub(Isolate* isolate, StrictMode strict_mode);
+
+  MUST_USE_RESULT MaybeHandle<Object> Store(
+      Handle<Object> object, Handle<Name> name, Handle<Object> value,
+      JSReceiver::StoreFromKeyed store_mode =
+          JSReceiver::CERTAINLY_NOT_STORE_FROM_KEYED);
+
+  bool LookupForWrite(LookupIterator* it, Handle<Object> value,
+                      JSReceiver::StoreFromKeyed store_mode);
+
+ protected:
+  virtual Handle<Code> megamorphic_stub() OVERRIDE;
+
+  // Stub accessors.
+  Handle<Code> generic_stub() const;
+
+  Handle<Code> slow_stub() const;
+
+  virtual Handle<Code> pre_monomorphic_stub() const {
+    return pre_monomorphic_stub(isolate(), strict_mode());
+  }
+
+  static Handle<Code> pre_monomorphic_stub(Isolate* isolate,
+                                           StrictMode strict_mode);
+
+  // Update the inline cache and the global stub cache based on the
+  // lookup result.
+  void UpdateCaches(LookupIterator* lookup, Handle<Object> value,
+                    JSReceiver::StoreFromKeyed store_mode);
+  virtual Handle<Code> CompileHandler(LookupIterator* lookup,
+                                      Handle<Object> value,
+                                      CacheHolderFlag cache_holder);
+
+ private:
+  inline void set_target(Code* code);
+
+  static void Clear(Isolate* isolate, Address address, Code* target,
+                    ConstantPoolArray* constant_pool);
+
+  friend class IC;
+};
+
+
+enum KeyedStoreCheckMap { kDontCheckMap, kCheckMap };
+
+
+enum KeyedStoreIncrementLength { kDontIncrementLength, kIncrementLength };
+
+
+class KeyedStoreIC : public StoreIC {
+ public:
+  // ExtraICState bits (building on IC)
+  // ExtraICState bits
+  class ExtraICStateKeyedAccessStoreMode
+      : public BitField<KeyedAccessStoreMode, 2, 4> {};  // NOLINT
+
+  static ExtraICState ComputeExtraICState(StrictMode flag,
+                                          KeyedAccessStoreMode mode) {
+    return StrictModeState::encode(flag) |
+           ExtraICStateKeyedAccessStoreMode::encode(mode);
+  }
+
+  static KeyedAccessStoreMode GetKeyedAccessStoreMode(
+      ExtraICState extra_state) {
+    return ExtraICStateKeyedAccessStoreMode::decode(extra_state);
+  }
+
+  KeyedStoreIC(FrameDepth depth, Isolate* isolate) : StoreIC(depth, isolate) {
+    DCHECK(target()->is_keyed_store_stub());
+  }
+
+  MUST_USE_RESULT MaybeHandle<Object> Store(Handle<Object> object,
+                                            Handle<Object> name,
+                                            Handle<Object> value);
+
+  // Code generators for stub routines.  Only called once at startup.
+  static void GenerateInitialize(MacroAssembler* masm) { GenerateMiss(masm); }
+  static void GeneratePreMonomorphic(MacroAssembler* masm) {
+    GenerateMiss(masm);
+  }
+  static void GenerateMiss(MacroAssembler* masm);
+  static void GenerateSlow(MacroAssembler* masm);
+  static void GenerateGeneric(MacroAssembler* masm, StrictMode strict_mode);
+  static void GenerateSloppyArguments(MacroAssembler* masm);
+
+ protected:
+  virtual Handle<Code> pre_monomorphic_stub() const {
+    return pre_monomorphic_stub(isolate(), strict_mode());
+  }
+  static Handle<Code> pre_monomorphic_stub(Isolate* isolate,
+                                           StrictMode strict_mode) {
+    if (strict_mode == STRICT) {
+      return isolate->builtins()->KeyedStoreIC_PreMonomorphic_Strict();
+    } else {
+      return isolate->builtins()->KeyedStoreIC_PreMonomorphic();
+    }
+  }
+
+  Handle<Code> StoreElementStub(Handle<JSObject> receiver,
+                                KeyedAccessStoreMode store_mode);
+
+ private:
+  inline void set_target(Code* code);
+
+  // Stub accessors.
+  Handle<Code> sloppy_arguments_stub() {
+    return isolate()->builtins()->KeyedStoreIC_SloppyArguments();
+  }
+
+  static void Clear(Isolate* isolate, Address address, Code* target,
+                    ConstantPoolArray* constant_pool);
+
+  KeyedAccessStoreMode GetStoreMode(Handle<JSObject> receiver,
+                                    Handle<Object> key, Handle<Object> value);
+
+  Handle<Map> ComputeTransitionedMap(Handle<Map> map,
+                                     KeyedAccessStoreMode store_mode);
+
+  friend class IC;
+};
+
+
+// Type Recording BinaryOpIC, that records the types of the inputs and outputs.
+class BinaryOpIC : public IC {
+ public:
+  explicit BinaryOpIC(Isolate* isolate) : IC(EXTRA_CALL_FRAME, isolate) {}
+
+  static Builtins::JavaScript TokenToJSBuiltin(Token::Value op);
+
+  MaybeHandle<Object> Transition(Handle<AllocationSite> allocation_site,
+                                 Handle<Object> left,
+                                 Handle<Object> right) WARN_UNUSED_RESULT;
+};
+
+
+class CompareIC : public IC {
+ public:
+  CompareIC(Isolate* isolate, Token::Value op)
+      : IC(EXTRA_CALL_FRAME, isolate), op_(op) {}
+
+  // Update the inline cache for the given operands.
+  Code* UpdateCaches(Handle<Object> x, Handle<Object> y);
+
+  // Helper function for computing the condition for a compare operation.
+  static Condition ComputeCondition(Token::Value op);
+
+  // Factory method for getting an uninitialized compare stub.
+  static Handle<Code> GetUninitialized(Isolate* isolate, Token::Value op);
+
+ private:
+  static bool HasInlinedSmiCode(Address address);
+
+  bool strict() const { return op_ == Token::EQ_STRICT; }
+  Condition GetCondition() const { return ComputeCondition(op_); }
+
+  static Code* GetRawUninitialized(Isolate* isolate, Token::Value op);
+
+  static void Clear(Isolate* isolate, Address address, Code* target,
+                    ConstantPoolArray* constant_pool);
+
+  Token::Value op_;
+
+  friend class IC;
+};
+
+
+class CompareNilIC : public IC {
+ public:
+  explicit CompareNilIC(Isolate* isolate) : IC(EXTRA_CALL_FRAME, isolate) {}
+
+  Handle<Object> CompareNil(Handle<Object> object);
+
+  static Handle<Code> GetUninitialized();
+
+  static void Clear(Address address, Code* target,
+                    ConstantPoolArray* constant_pool);
+
+  static Handle<Object> DoCompareNilSlow(Isolate* isolate, NilValue nil,
+                                         Handle<Object> object);
+};
+
+
+class ToBooleanIC : public IC {
+ public:
+  explicit ToBooleanIC(Isolate* isolate) : IC(EXTRA_CALL_FRAME, isolate) {}
+
+  Handle<Object> ToBoolean(Handle<Object> object);
+};
+
+
+// Helper for BinaryOpIC and CompareIC.
+enum InlinedSmiCheck { ENABLE_INLINED_SMI_CHECK, DISABLE_INLINED_SMI_CHECK };
+void PatchInlinedSmiCode(Address address, InlinedSmiCheck check);
+
+DECLARE_RUNTIME_FUNCTION(KeyedLoadIC_MissFromStubFailure);
+DECLARE_RUNTIME_FUNCTION(KeyedStoreIC_MissFromStubFailure);
+DECLARE_RUNTIME_FUNCTION(UnaryOpIC_Miss);
+DECLARE_RUNTIME_FUNCTION(StoreIC_MissFromStubFailure);
+DECLARE_RUNTIME_FUNCTION(ElementsTransitionAndStoreIC_Miss);
+DECLARE_RUNTIME_FUNCTION(BinaryOpIC_Miss);
+DECLARE_RUNTIME_FUNCTION(BinaryOpIC_MissWithAllocationSite);
+DECLARE_RUNTIME_FUNCTION(CompareNilIC_Miss);
+DECLARE_RUNTIME_FUNCTION(ToBooleanIC_Miss);
+DECLARE_RUNTIME_FUNCTION(VectorLoadIC_MissFromStubFailure);
+DECLARE_RUNTIME_FUNCTION(VectorKeyedLoadIC_MissFromStubFailure);
+
+// Support functions for callbacks handlers.
+DECLARE_RUNTIME_FUNCTION(StoreCallbackProperty);
+
+// Support functions for interceptor handlers.
+DECLARE_RUNTIME_FUNCTION(LoadPropertyWithInterceptorOnly);
+DECLARE_RUNTIME_FUNCTION(LoadPropertyWithInterceptor);
+DECLARE_RUNTIME_FUNCTION(LoadElementWithInterceptor);
+DECLARE_RUNTIME_FUNCTION(StorePropertyWithInterceptor);
+}
+}  // namespace v8::internal
+
+#endif  // V8_IC_H_
diff --git a/src/ic/mips/OWNERS b/src/ic/mips/OWNERS
new file mode 100644
index 0000000..5508ba6
--- /dev/null
+++ b/src/ic/mips/OWNERS
@@ -0,0 +1,5 @@
+paul.lind@imgtec.com
+gergely.kis@imgtec.com
+akos.palfi@imgtec.com
+balazs.kilvady@imgtec.com
+dusan.milosavljevic@imgtec.com
diff --git a/src/ic/mips/access-compiler-mips.cc b/src/ic/mips/access-compiler-mips.cc
new file mode 100644
index 0000000..dce7602
--- /dev/null
+++ b/src/ic/mips/access-compiler-mips.cc
@@ -0,0 +1,46 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_MIPS
+
+#include "src/ic/access-compiler.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
+                                              Handle<Code> code) {
+  __ Jump(code, RelocInfo::CODE_TARGET);
+}
+
+
+Register* PropertyAccessCompiler::load_calling_convention() {
+  // receiver, name, scratch1, scratch2, scratch3, scratch4.
+  Register receiver = LoadDescriptor::ReceiverRegister();
+  Register name = LoadDescriptor::NameRegister();
+  static Register registers[] = {receiver, name, a3, a0, t0, t1};
+  return registers;
+}
+
+
+Register* PropertyAccessCompiler::store_calling_convention() {
+  // receiver, name, scratch1, scratch2, scratch3.
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  Register name = StoreDescriptor::NameRegister();
+  DCHECK(a3.is(ElementTransitionAndStoreDescriptor::MapRegister()));
+  static Register registers[] = {receiver, name, a3, t0, t1};
+  return registers;
+}
+
+
+#undef __
+}
+}  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_MIPS
diff --git a/src/ic/mips/handler-compiler-mips.cc b/src/ic/mips/handler-compiler-mips.cc
new file mode 100644
index 0000000..5b4555f
--- /dev/null
+++ b/src/ic/mips/handler-compiler-mips.cc
@@ -0,0 +1,840 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_MIPS
+
+#include "src/ic/call-optimization.h"
+#include "src/ic/handler-compiler.h"
+#include "src/ic/ic.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
+    MacroAssembler* masm, Handle<HeapType> type, Register receiver,
+    Handle<JSFunction> getter) {
+  // ----------- S t a t e -------------
+  //  -- a0    : receiver
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+
+    if (!getter.is_null()) {
+      // Call the JavaScript getter with the receiver on the stack.
+      if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+        // Swap in the global receiver.
+        __ lw(receiver,
+              FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+      }
+      __ push(receiver);
+      ParameterCount actual(0);
+      ParameterCount expected(getter);
+      __ InvokeFunction(getter, expected, actual, CALL_FUNCTION,
+                        NullCallWrapper());
+    } else {
+      // If we generate a global code snippet for deoptimization only, remember
+      // the place to continue after deoptimization.
+      masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
+    }
+
+    // Restore context register.
+    __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  }
+  __ Ret();
+}
+
+
+void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
+    MacroAssembler* masm, Handle<HeapType> type, Register receiver,
+    Handle<JSFunction> setter) {
+  // ----------- S t a t e -------------
+  //  -- ra    : return address
+  // -----------------------------------
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+
+    // Save value register, so we can restore it later.
+    __ push(value());
+
+    if (!setter.is_null()) {
+      // Call the JavaScript setter with receiver and value on the stack.
+      if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+        // Swap in the global receiver.
+        __ lw(receiver,
+              FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+      }
+      __ Push(receiver, value());
+      ParameterCount actual(1);
+      ParameterCount expected(setter);
+      __ InvokeFunction(setter, expected, actual, CALL_FUNCTION,
+                        NullCallWrapper());
+    } else {
+      // If we generate a global code snippet for deoptimization only, remember
+      // the place to continue after deoptimization.
+      masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
+    }
+
+    // We have to return the passed value, not the return value of the setter.
+    __ pop(v0);
+
+    // Restore context register.
+    __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  }
+  __ Ret();
+}
+
+
+void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
+    MacroAssembler* masm, Label* miss_label, Register receiver,
+    Handle<Name> name, Register scratch0, Register scratch1) {
+  DCHECK(name->IsUniqueName());
+  DCHECK(!receiver.is(scratch0));
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
+  __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
+
+  Label done;
+
+  const int kInterceptorOrAccessCheckNeededMask =
+  (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
+
+  // Bail out if the receiver has a named interceptor or requires access checks.
+  Register map = scratch1;
+  __ lw(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ lbu(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
+  __ And(scratch0, scratch0, Operand(kInterceptorOrAccessCheckNeededMask));
+  __ Branch(miss_label, ne, scratch0, Operand(zero_reg));
+
+  // Check that receiver is a JSObject.
+  __ lbu(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
+  __ Branch(miss_label, lt, scratch0, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+  // Load properties array.
+  Register properties = scratch0;
+  __ lw(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+  // Check that the properties array is a dictionary.
+  __ lw(map, FieldMemOperand(properties, HeapObject::kMapOffset));
+  Register tmp = properties;
+  __ LoadRoot(tmp, Heap::kHashTableMapRootIndex);
+  __ Branch(miss_label, ne, map, Operand(tmp));
+
+  // Restore the temporarily used register.
+  __ lw(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+
+
+  NameDictionaryLookupStub::GenerateNegativeLookup(
+      masm, miss_label, &done, receiver, properties, name, scratch1);
+  __ bind(&done);
+  __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
+}
+
+
+void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
+    MacroAssembler* masm, int index, Register prototype, Label* miss) {
+  Isolate* isolate = masm->isolate();
+  // Get the global function with the given index.
+  Handle<JSFunction> function(
+      JSFunction::cast(isolate->native_context()->get(index)));
+
+  // Check we're still in the same context.
+  Register scratch = prototype;
+  const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
+  __ lw(scratch, MemOperand(cp, offset));
+  __ lw(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
+  __ lw(scratch, MemOperand(scratch, Context::SlotOffset(index)));
+  __ li(at, function);
+  __ Branch(miss, ne, at, Operand(scratch));
+
+  // Load its initial map. The global functions all have initial maps.
+  __ li(prototype, Handle<Map>(function->initial_map()));
+  // Load the prototype from the initial map.
+  __ lw(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
+    MacroAssembler* masm, Register receiver, Register scratch1,
+    Register scratch2, Label* miss_label) {
+  __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
+  __ Ret(USE_DELAY_SLOT);
+  __ mov(v0, scratch1);
+}
+
+
+// Generate code to check that a global property cell is empty. Create
+// the property cell at compilation time if no cell exists for the
+// property.
+void PropertyHandlerCompiler::GenerateCheckPropertyCell(
+    MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
+    Register scratch, Label* miss) {
+  Handle<Cell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
+  DCHECK(cell->value()->IsTheHole());
+  __ li(scratch, Operand(cell));
+  __ lw(scratch, FieldMemOperand(scratch, Cell::kValueOffset));
+  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+  __ Branch(miss, ne, scratch, Operand(at));
+}
+
+
+static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
+                                     Register holder, Register name,
+                                     Handle<JSObject> holder_obj) {
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsInfoIndex == 1);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 2);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 3);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 4);
+  __ push(name);
+  Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
+  DCHECK(!masm->isolate()->heap()->InNewSpace(*interceptor));
+  Register scratch = name;
+  __ li(scratch, Operand(interceptor));
+  __ Push(scratch, receiver, holder);
+}
+
+
+static void CompileCallLoadPropertyWithInterceptor(
+    MacroAssembler* masm, Register receiver, Register holder, Register name,
+    Handle<JSObject> holder_obj, IC::UtilityId id) {
+  PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
+  __ CallExternalReference(ExternalReference(IC_Utility(id), masm->isolate()),
+                           NamedLoadHandlerCompiler::kInterceptorArgsLength);
+}
+
+
+// Generate call to api function.
+void PropertyHandlerCompiler::GenerateFastApiCall(
+    MacroAssembler* masm, const CallOptimization& optimization,
+    Handle<Map> receiver_map, Register receiver, Register scratch_in,
+    bool is_store, int argc, Register* values) {
+  DCHECK(!receiver.is(scratch_in));
+  // Preparing to push, adjust sp.
+  __ Subu(sp, sp, Operand((argc + 1) * kPointerSize));
+  __ sw(receiver, MemOperand(sp, argc * kPointerSize));  // Push receiver.
+  // Write the arguments to stack frame.
+  for (int i = 0; i < argc; i++) {
+    Register arg = values[argc - 1 - i];
+    DCHECK(!receiver.is(arg));
+    DCHECK(!scratch_in.is(arg));
+    __ sw(arg, MemOperand(sp, (argc - 1 - i) * kPointerSize));  // Push arg.
+  }
+  DCHECK(optimization.is_simple_api_call());
+
+  // Abi for CallApiFunctionStub.
+  Register callee = a0;
+  Register call_data = t0;
+  Register holder = a2;
+  Register api_function_address = a1;
+
+  // Put holder in place.
+  CallOptimization::HolderLookup holder_lookup;
+  Handle<JSObject> api_holder =
+      optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
+  switch (holder_lookup) {
+    case CallOptimization::kHolderIsReceiver:
+      __ Move(holder, receiver);
+      break;
+    case CallOptimization::kHolderFound:
+      __ li(holder, api_holder);
+      break;
+    case CallOptimization::kHolderNotFound:
+      UNREACHABLE();
+      break;
+  }
+
+  Isolate* isolate = masm->isolate();
+  Handle<JSFunction> function = optimization.constant_function();
+  Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
+  Handle<Object> call_data_obj(api_call_info->data(), isolate);
+
+  // Put callee in place.
+  __ li(callee, function);
+
+  bool call_data_undefined = false;
+  // Put call_data in place.
+  if (isolate->heap()->InNewSpace(*call_data_obj)) {
+    __ li(call_data, api_call_info);
+    __ lw(call_data, FieldMemOperand(call_data, CallHandlerInfo::kDataOffset));
+  } else if (call_data_obj->IsUndefined()) {
+    call_data_undefined = true;
+    __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
+  } else {
+    __ li(call_data, call_data_obj);
+  }
+  // Put api_function_address in place.
+  Address function_address = v8::ToCData<Address>(api_call_info->callback());
+  ApiFunction fun(function_address);
+  ExternalReference::Type type = ExternalReference::DIRECT_API_CALL;
+  ExternalReference ref = ExternalReference(&fun, type, masm->isolate());
+  __ li(api_function_address, Operand(ref));
+
+  // Jump to stub.
+  CallApiFunctionStub stub(isolate, is_store, call_data_undefined, argc);
+  __ TailCallStub(&stub);
+}
+
+
+void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
+  // Push receiver, key and value for runtime call.
+  __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+          StoreDescriptor::ValueRegister());
+
+  // The slow case calls into the runtime to complete the store without causing
+  // an IC miss that would otherwise cause a transition to the generic stub.
+  ExternalReference ref =
+      ExternalReference(IC_Utility(IC::kStoreIC_Slow), masm->isolate());
+  __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
+  // Push receiver, key and value for runtime call.
+  __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+          StoreDescriptor::ValueRegister());
+
+  // The slow case calls into the runtime to complete the store without causing
+  // an IC miss that would otherwise cause a transition to the generic stub.
+  ExternalReference ref =
+      ExternalReference(IC_Utility(IC::kKeyedStoreIC_Slow), masm->isolate());
+  __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
+                                                    Handle<Name> name) {
+  if (!label->is_unused()) {
+    __ bind(label);
+    __ li(this->name(), Operand(name));
+  }
+}
+
+
+// Generate StoreTransition code, value is passed in a0 register.
+// After executing generated code, the receiver_reg and name_reg
+// may be clobbered.
+void NamedStoreHandlerCompiler::GenerateStoreTransition(
+    Handle<Map> transition, Handle<Name> name, Register receiver_reg,
+    Register storage_reg, Register value_reg, Register scratch1,
+    Register scratch2, Register scratch3, Label* miss_label, Label* slow) {
+  // a0 : value.
+  Label exit;
+
+  int descriptor = transition->LastAdded();
+  DescriptorArray* descriptors = transition->instance_descriptors();
+  PropertyDetails details = descriptors->GetDetails(descriptor);
+  Representation representation = details.representation();
+  DCHECK(!representation.IsNone());
+
+  if (details.type() == CONSTANT) {
+    Handle<Object> constant(descriptors->GetValue(descriptor), isolate());
+    __ li(scratch1, constant);
+    __ Branch(miss_label, ne, value_reg, Operand(scratch1));
+  } else if (representation.IsSmi()) {
+    __ JumpIfNotSmi(value_reg, miss_label);
+  } else if (representation.IsHeapObject()) {
+    __ JumpIfSmi(value_reg, miss_label);
+    HeapType* field_type = descriptors->GetFieldType(descriptor);
+    HeapType::Iterator<Map> it = field_type->Classes();
+    Handle<Map> current;
+    if (!it.Done()) {
+      __ lw(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset));
+      Label do_store;
+      while (true) {
+        // Do the CompareMap() directly within the Branch() functions.
+        current = it.Current();
+        it.Advance();
+        if (it.Done()) {
+          __ Branch(miss_label, ne, scratch1, Operand(current));
+          break;
+        }
+        __ Branch(&do_store, eq, scratch1, Operand(current));
+      }
+      __ bind(&do_store);
+    }
+  } else if (representation.IsDouble()) {
+    Label do_store, heap_number;
+    __ LoadRoot(scratch3, Heap::kMutableHeapNumberMapRootIndex);
+    __ AllocateHeapNumber(storage_reg, scratch1, scratch2, scratch3, slow,
+                          TAG_RESULT, MUTABLE);
+
+    __ JumpIfNotSmi(value_reg, &heap_number);
+    __ SmiUntag(scratch1, value_reg);
+    __ mtc1(scratch1, f6);
+    __ cvt_d_w(f4, f6);
+    __ jmp(&do_store);
+
+    __ bind(&heap_number);
+    __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex, miss_label,
+                DONT_DO_SMI_CHECK);
+    __ ldc1(f4, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
+
+    __ bind(&do_store);
+    __ sdc1(f4, FieldMemOperand(storage_reg, HeapNumber::kValueOffset));
+  }
+
+  // Stub never generated for objects that require access checks.
+  DCHECK(!transition->is_access_check_needed());
+
+  // Perform map transition for the receiver if necessary.
+  if (details.type() == FIELD &&
+      Map::cast(transition->GetBackPointer())->unused_property_fields() == 0) {
+    // The properties must be extended before we can store the value.
+    // We jump to a runtime call that extends the properties array.
+    __ push(receiver_reg);
+    __ li(a2, Operand(transition));
+    __ Push(a2, a0);
+    __ TailCallExternalReference(
+        ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
+                          isolate()),
+        3, 1);
+    return;
+  }
+
+  // Update the map of the object.
+  __ li(scratch1, Operand(transition));
+  __ sw(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
+
+  // Update the write barrier for the map field.
+  __ RecordWriteField(receiver_reg, HeapObject::kMapOffset, scratch1, scratch2,
+                      kRAHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+
+  if (details.type() == CONSTANT) {
+    DCHECK(value_reg.is(a0));
+    __ Ret(USE_DELAY_SLOT);
+    __ mov(v0, a0);
+    return;
+  }
+
+  int index = transition->instance_descriptors()->GetFieldIndex(
+      transition->LastAdded());
+
+  // Adjust for the number of properties stored in the object. Even in the
+  // face of a transition we can use the old map here because the size of the
+  // object and the number of in-object properties is not going to change.
+  index -= transition->inobject_properties();
+
+  // TODO(verwaest): Share this code as a code stub.
+  SmiCheck smi_check =
+      representation.IsTagged() ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
+  if (index < 0) {
+    // Set the property straight into the object.
+    int offset = transition->instance_size() + (index * kPointerSize);
+    if (representation.IsDouble()) {
+      __ sw(storage_reg, FieldMemOperand(receiver_reg, offset));
+    } else {
+      __ sw(value_reg, FieldMemOperand(receiver_reg, offset));
+    }
+
+    if (!representation.IsSmi()) {
+      // Update the write barrier for the array address.
+      if (!representation.IsDouble()) {
+        __ mov(storage_reg, value_reg);
+      }
+      __ RecordWriteField(receiver_reg, offset, storage_reg, scratch1,
+                          kRAHasNotBeenSaved, kDontSaveFPRegs,
+                          EMIT_REMEMBERED_SET, smi_check);
+    }
+  } else {
+    // Write to the properties array.
+    int offset = index * kPointerSize + FixedArray::kHeaderSize;
+    // Get the properties array
+    __ lw(scratch1, FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
+    if (representation.IsDouble()) {
+      __ sw(storage_reg, FieldMemOperand(scratch1, offset));
+    } else {
+      __ sw(value_reg, FieldMemOperand(scratch1, offset));
+    }
+
+    if (!representation.IsSmi()) {
+      // Update the write barrier for the array address.
+      if (!representation.IsDouble()) {
+        __ mov(storage_reg, value_reg);
+      }
+      __ RecordWriteField(scratch1, offset, storage_reg, receiver_reg,
+                          kRAHasNotBeenSaved, kDontSaveFPRegs,
+                          EMIT_REMEMBERED_SET, smi_check);
+    }
+  }
+
+  // Return the value (register v0).
+  DCHECK(value_reg.is(a0));
+  __ bind(&exit);
+  __ Ret(USE_DELAY_SLOT);
+  __ mov(v0, a0);
+}
+
+
+void NamedStoreHandlerCompiler::GenerateStoreField(LookupIterator* lookup,
+                                                   Register value_reg,
+                                                   Label* miss_label) {
+  DCHECK(lookup->representation().IsHeapObject());
+  __ JumpIfSmi(value_reg, miss_label);
+  HeapType::Iterator<Map> it = lookup->GetFieldType()->Classes();
+  __ lw(scratch1(), FieldMemOperand(value_reg, HeapObject::kMapOffset));
+  Label do_store;
+  Handle<Map> current;
+  while (true) {
+    // Do the CompareMap() directly within the Branch() functions.
+    current = it.Current();
+    it.Advance();
+    if (it.Done()) {
+      __ Branch(miss_label, ne, scratch1(), Operand(current));
+      break;
+    }
+    __ Branch(&do_store, eq, scratch1(), Operand(current));
+  }
+  __ bind(&do_store);
+
+  StoreFieldStub stub(isolate(), lookup->GetFieldIndex(),
+                      lookup->representation());
+  GenerateTailCall(masm(), stub.GetCode());
+}
+
+
+Register PropertyHandlerCompiler::CheckPrototypes(
+    Register object_reg, Register holder_reg, Register scratch1,
+    Register scratch2, Handle<Name> name, Label* miss,
+    PrototypeCheckType check) {
+  Handle<Map> receiver_map(IC::TypeToMap(*type(), isolate()));
+
+  // Make sure there's no overlap between holder and object registers.
+  DCHECK(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
+  DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg) &&
+         !scratch2.is(scratch1));
+
+  // Keep track of the current object in register reg.
+  Register reg = object_reg;
+  int depth = 0;
+
+  Handle<JSObject> current = Handle<JSObject>::null();
+  if (type()->IsConstant()) {
+    current = Handle<JSObject>::cast(type()->AsConstant()->Value());
+  }
+  Handle<JSObject> prototype = Handle<JSObject>::null();
+  Handle<Map> current_map = receiver_map;
+  Handle<Map> holder_map(holder()->map());
+  // Traverse the prototype chain and check the maps in the prototype chain for
+  // fast and global objects or do negative lookup for normal objects.
+  while (!current_map.is_identical_to(holder_map)) {
+    ++depth;
+
+    // Only global objects and objects that do not require access
+    // checks are allowed in stubs.
+    DCHECK(current_map->IsJSGlobalProxyMap() ||
+           !current_map->is_access_check_needed());
+
+    prototype = handle(JSObject::cast(current_map->prototype()));
+    if (current_map->is_dictionary_map() &&
+        !current_map->IsJSGlobalObjectMap()) {
+      DCHECK(!current_map->IsJSGlobalProxyMap());  // Proxy maps are fast.
+      if (!name->IsUniqueName()) {
+        DCHECK(name->IsString());
+        name = factory()->InternalizeString(Handle<String>::cast(name));
+      }
+      DCHECK(current.is_null() ||
+             current->property_dictionary()->FindEntry(name) ==
+                 NameDictionary::kNotFound);
+
+      GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
+                                       scratch2);
+
+      __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+      reg = holder_reg;  // From now on the object will be in holder_reg.
+      __ lw(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
+    } else {
+      Register map_reg = scratch1;
+      if (depth != 1 || check == CHECK_ALL_MAPS) {
+        // CheckMap implicitly loads the map of |reg| into |map_reg|.
+        __ CheckMap(reg, map_reg, current_map, miss, DONT_DO_SMI_CHECK);
+      } else {
+        __ lw(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
+      }
+
+      // Check access rights to the global object.  This has to happen after
+      // the map check so that we know that the object is actually a global
+      // object.
+      // This allows us to install generated handlers for accesses to the
+      // global proxy (as opposed to using slow ICs). See corresponding code
+      // in LookupForRead().
+      if (current_map->IsJSGlobalProxyMap()) {
+        __ CheckAccessGlobalProxy(reg, scratch2, miss);
+      } else if (current_map->IsJSGlobalObjectMap()) {
+        GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
+                                  name, scratch2, miss);
+      }
+
+      reg = holder_reg;  // From now on the object will be in holder_reg.
+
+      // Two possible reasons for loading the prototype from the map:
+      // (1) Can't store references to new space in code.
+      // (2) Handler is shared for all receivers with the same prototype
+      //     map (but not necessarily the same prototype instance).
+      bool load_prototype_from_map =
+          heap()->InNewSpace(*prototype) || depth == 1;
+      if (load_prototype_from_map) {
+        __ lw(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
+      } else {
+        __ li(reg, Operand(prototype));
+      }
+    }
+
+    // Go to the next object in the prototype chain.
+    current = prototype;
+    current_map = handle(current->map());
+  }
+
+  // Log the check depth.
+  LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
+
+  if (depth != 0 || check == CHECK_ALL_MAPS) {
+    // Check the holder map.
+    __ CheckMap(reg, scratch1, current_map, miss, DONT_DO_SMI_CHECK);
+  }
+
+  // Perform security check for access to the global object.
+  DCHECK(current_map->IsJSGlobalProxyMap() ||
+         !current_map->is_access_check_needed());
+  if (current_map->IsJSGlobalProxyMap()) {
+    __ CheckAccessGlobalProxy(reg, scratch1, miss);
+  }
+
+  // Return the register containing the holder.
+  return reg;
+}
+
+
+void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
+  if (!miss->is_unused()) {
+    Label success;
+    __ Branch(&success);
+    __ bind(miss);
+    TailCallBuiltin(masm(), MissBuiltin(kind()));
+    __ bind(&success);
+  }
+}
+
+
+void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
+  if (!miss->is_unused()) {
+    Label success;
+    __ Branch(&success);
+    GenerateRestoreName(miss, name);
+    TailCallBuiltin(masm(), MissBuiltin(kind()));
+    __ bind(&success);
+  }
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
+  // Return the constant value.
+  __ li(v0, value);
+  __ Ret();
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadCallback(
+    Register reg, Handle<ExecutableAccessorInfo> callback) {
+  // Build AccessorInfo::args_ list on the stack and push property name below
+  // the exit frame to make GC aware of them and store pointers to them.
+  STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
+  STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
+  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
+  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
+  STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
+  STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
+  STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6);
+  DCHECK(!scratch2().is(reg));
+  DCHECK(!scratch3().is(reg));
+  DCHECK(!scratch4().is(reg));
+  __ push(receiver());
+  if (heap()->InNewSpace(callback->data())) {
+    __ li(scratch3(), callback);
+    __ lw(scratch3(),
+          FieldMemOperand(scratch3(), ExecutableAccessorInfo::kDataOffset));
+  } else {
+    __ li(scratch3(), Handle<Object>(callback->data(), isolate()));
+  }
+  __ Subu(sp, sp, 6 * kPointerSize);
+  __ sw(scratch3(), MemOperand(sp, 5 * kPointerSize));
+  __ LoadRoot(scratch3(), Heap::kUndefinedValueRootIndex);
+  __ sw(scratch3(), MemOperand(sp, 4 * kPointerSize));
+  __ sw(scratch3(), MemOperand(sp, 3 * kPointerSize));
+  __ li(scratch4(), Operand(ExternalReference::isolate_address(isolate())));
+  __ sw(scratch4(), MemOperand(sp, 2 * kPointerSize));
+  __ sw(reg, MemOperand(sp, 1 * kPointerSize));
+  __ sw(name(), MemOperand(sp, 0 * kPointerSize));
+  __ Addu(scratch2(), sp, 1 * kPointerSize);
+
+  __ mov(a2, scratch2());  // Saved in case scratch2 == a1.
+  // Abi for CallApiGetter.
+  Register getter_address_reg = ApiGetterDescriptor::function_address();
+
+  Address getter_address = v8::ToCData<Address>(callback->getter());
+  ApiFunction fun(getter_address);
+  ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL;
+  ExternalReference ref = ExternalReference(&fun, type, isolate());
+  __ li(getter_address_reg, Operand(ref));
+
+  CallApiGetterStub stub(isolate());
+  __ TailCallStub(&stub);
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
+    LookupIterator* it, Register holder_reg) {
+  DCHECK(holder()->HasNamedInterceptor());
+  DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+
+  // Compile the interceptor call, followed by inline code to load the
+  // property from further up the prototype chain if the call fails.
+  // Check that the maps haven't changed.
+  DCHECK(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
+
+  // Preserve the receiver register explicitly whenever it is different from the
+  // holder and it is needed should the interceptor return without any result.
+  // The ACCESSOR case needs the receiver to be passed into C++ code, the FIELD
+  // case might cause a miss during the prototype check.
+  bool must_perform_prototype_check =
+      !holder().is_identical_to(it->GetHolder<JSObject>());
+  bool must_preserve_receiver_reg =
+      !receiver().is(holder_reg) &&
+      (it->state() == LookupIterator::ACCESSOR || must_perform_prototype_check);
+
+  // Save necessary data before invoking an interceptor.
+  // Requires a frame to make GC aware of pushed pointers.
+  {
+    FrameScope frame_scope(masm(), StackFrame::INTERNAL);
+    if (must_preserve_receiver_reg) {
+      __ Push(receiver(), holder_reg, this->name());
+    } else {
+      __ Push(holder_reg, this->name());
+    }
+    // Invoke an interceptor.  Note: map checks from receiver to
+    // interceptor's holder has been compiled before (see a caller
+    // of this method).
+    CompileCallLoadPropertyWithInterceptor(
+        masm(), receiver(), holder_reg, this->name(), holder(),
+        IC::kLoadPropertyWithInterceptorOnly);
+
+    // Check if interceptor provided a value for property.  If it's
+    // the case, return immediately.
+    Label interceptor_failed;
+    __ LoadRoot(scratch1(), Heap::kNoInterceptorResultSentinelRootIndex);
+    __ Branch(&interceptor_failed, eq, v0, Operand(scratch1()));
+    frame_scope.GenerateLeaveFrame();
+    __ Ret();
+
+    __ bind(&interceptor_failed);
+    if (must_preserve_receiver_reg) {
+      __ Pop(receiver(), holder_reg, this->name());
+    } else {
+      __ Pop(holder_reg, this->name());
+    }
+    // Leave the internal frame.
+  }
+
+  GenerateLoadPostInterceptor(it, holder_reg);
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
+  // Call the runtime system to load the interceptor.
+  DCHECK(holder()->HasNamedInterceptor());
+  DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+  PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
+                           holder());
+
+  ExternalReference ref = ExternalReference(
+      IC_Utility(IC::kLoadPropertyWithInterceptor), isolate());
+  __ TailCallExternalReference(
+      ref, NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
+}
+
+
+Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
+    Handle<JSObject> object, Handle<Name> name,
+    Handle<ExecutableAccessorInfo> callback) {
+  Register holder_reg = Frontend(receiver(), name);
+
+  __ Push(receiver(), holder_reg);  // Receiver.
+  __ li(at, Operand(callback));     // Callback info.
+  __ push(at);
+  __ li(at, Operand(name));
+  __ Push(at, value());
+
+  // Do tail-call to the runtime system.
+  ExternalReference store_callback_property =
+      ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
+  __ TailCallExternalReference(store_callback_property, 5, 1);
+
+  // Return the generated code.
+  return GetCode(kind(), Code::FAST, name);
+}
+
+
+Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
+    Handle<Name> name) {
+  __ Push(receiver(), this->name(), value());
+
+  // Do tail-call to the runtime system.
+  ExternalReference store_ic_property = ExternalReference(
+      IC_Utility(IC::kStorePropertyWithInterceptor), isolate());
+  __ TailCallExternalReference(store_ic_property, 3, 1);
+
+  // Return the generated code.
+  return GetCode(kind(), Code::FAST, name);
+}
+
+
+Register NamedStoreHandlerCompiler::value() {
+  return StoreDescriptor::ValueRegister();
+}
+
+
+Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
+    Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) {
+  Label miss;
+
+  FrontendHeader(receiver(), name, &miss);
+
+  // Get the value from the cell.
+  Register result = StoreDescriptor::ValueRegister();
+  __ li(result, Operand(cell));
+  __ lw(result, FieldMemOperand(result, Cell::kValueOffset));
+
+  // Check for deleted property if property can actually be deleted.
+  if (is_configurable) {
+    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+    __ Branch(&miss, eq, result, Operand(at));
+  }
+
+  Counters* counters = isolate()->counters();
+  __ IncrementCounter(counters->named_load_global_stub(), 1, a1, a3);
+  __ Ret(USE_DELAY_SLOT);
+  __ mov(v0, result);
+
+  FrontendFooter(name, &miss);
+
+  // Return the generated code.
+  return GetCode(kind(), Code::NORMAL, name);
+}
+
+
+#undef __
+}
+}  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_MIPS
diff --git a/src/ic/mips/ic-compiler-mips.cc b/src/ic/mips/ic-compiler-mips.cc
new file mode 100644
index 0000000..c1e67f9
--- /dev/null
+++ b/src/ic/mips/ic-compiler-mips.cc
@@ -0,0 +1,131 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_MIPS
+
+#include "src/ic/ic.h"
+#include "src/ic/ic-compiler.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm())
+
+
+Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
+                                                    CodeHandleList* handlers,
+                                                    Handle<Name> name,
+                                                    Code::StubType type,
+                                                    IcCheckType check) {
+  Label miss;
+
+  if (check == PROPERTY &&
+      (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
+    // In case we are compiling an IC for dictionary loads and stores, just
+    // check whether the name is unique.
+    if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
+      Register tmp = scratch1();
+      __ JumpIfSmi(this->name(), &miss);
+      __ lw(tmp, FieldMemOperand(this->name(), HeapObject::kMapOffset));
+      __ lbu(tmp, FieldMemOperand(tmp, Map::kInstanceTypeOffset));
+      __ JumpIfNotUniqueNameInstanceType(tmp, &miss);
+    } else {
+      __ Branch(&miss, ne, this->name(), Operand(name));
+    }
+  }
+
+  Label number_case;
+  Register match = scratch2();
+  Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
+  __ JumpIfSmi(receiver(), smi_target, match);  // Reg match is 0 if Smi.
+
+  // Polymorphic keyed stores may use the map register
+  Register map_reg = scratch1();
+  DCHECK(kind() != Code::KEYED_STORE_IC ||
+         map_reg.is(ElementTransitionAndStoreDescriptor::MapRegister()));
+
+  int receiver_count = types->length();
+  int number_of_handled_maps = 0;
+  __ lw(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
+  for (int current = 0; current < receiver_count; ++current) {
+    Handle<HeapType> type = types->at(current);
+    Handle<Map> map = IC::TypeToMap(*type, isolate());
+    if (!map->is_deprecated()) {
+      number_of_handled_maps++;
+      // Check map and tail call if there's a match.
+      // Separate compare from branch, to provide path for above JumpIfSmi().
+      __ Subu(match, map_reg, Operand(map));
+      if (type->Is(HeapType::Number())) {
+        DCHECK(!number_case.is_unused());
+        __ bind(&number_case);
+      }
+      __ Jump(handlers->at(current), RelocInfo::CODE_TARGET, eq, match,
+              Operand(zero_reg));
+    }
+  }
+  DCHECK(number_of_handled_maps != 0);
+
+  __ bind(&miss);
+  TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+  // Return the generated code.
+  InlineCacheState state =
+      number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
+  return GetCode(kind(), type, name, state);
+}
+
+
+Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
+    MapHandleList* receiver_maps, CodeHandleList* handler_stubs,
+    MapHandleList* transitioned_maps) {
+  Label miss;
+  __ JumpIfSmi(receiver(), &miss);
+
+  int receiver_count = receiver_maps->length();
+  __ lw(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset));
+  for (int i = 0; i < receiver_count; ++i) {
+    if (transitioned_maps->at(i).is_null()) {
+      __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq, scratch1(),
+              Operand(receiver_maps->at(i)));
+    } else {
+      Label next_map;
+      __ Branch(&next_map, ne, scratch1(), Operand(receiver_maps->at(i)));
+      __ li(transition_map(), Operand(transitioned_maps->at(i)));
+      __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET);
+      __ bind(&next_map);
+    }
+  }
+
+  __ bind(&miss);
+  TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+  // Return the generated code.
+  return GetCode(kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+void PropertyICCompiler::GenerateRuntimeSetProperty(MacroAssembler* masm,
+                                                    StrictMode strict_mode) {
+  __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+          StoreDescriptor::ValueRegister());
+
+  __ li(a0, Operand(Smi::FromInt(strict_mode)));
+  __ Push(a0);
+
+  // Do tail-call to runtime routine.
+  __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
+}
+
+
+#undef __
+}
+}  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_MIPS
diff --git a/src/ic/mips/ic-mips.cc b/src/ic/mips/ic-mips.cc
new file mode 100644
index 0000000..d97a6ba
--- /dev/null
+++ b/src/ic/mips/ic-mips.cc
@@ -0,0 +1,1024 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_MIPS
+
+#include "src/codegen.h"
+#include "src/ic/ic.h"
+#include "src/ic/ic-compiler.h"
+#include "src/ic/stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+
+// ----------------------------------------------------------------------------
+// Static IC stub generators.
+//
+
+#define __ ACCESS_MASM(masm)
+
+
+static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
+                                            Label* global_object) {
+  // Register usage:
+  //   type: holds the receiver instance type on entry.
+  __ Branch(global_object, eq, type, Operand(JS_GLOBAL_OBJECT_TYPE));
+  __ Branch(global_object, eq, type, Operand(JS_BUILTINS_OBJECT_TYPE));
+  __ Branch(global_object, eq, type, Operand(JS_GLOBAL_PROXY_TYPE));
+}
+
+
+// Helper function used from LoadIC GenerateNormal.
+//
+// elements: Property dictionary. It is not clobbered if a jump to the miss
+//           label is done.
+// name:     Property name. It is not clobbered if a jump to the miss label is
+//           done
+// result:   Register for the result. It is only updated if a jump to the miss
+//           label is not done. Can be the same as elements or name clobbering
+//           one of these in the case of not jumping to the miss label.
+// The two scratch registers need to be different from elements, name and
+// result.
+// The generated code assumes that the receiver has slow properties,
+// is not a global object and does not have interceptors.
+// The address returned from GenerateStringDictionaryProbes() in scratch2
+// is used.
+static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss,
+                                   Register elements, Register name,
+                                   Register result, Register scratch1,
+                                   Register scratch2) {
+  // Main use of the scratch registers.
+  // scratch1: Used as temporary and to hold the capacity of the property
+  //           dictionary.
+  // scratch2: Used as temporary.
+  Label done;
+
+  // Probe the dictionary.
+  NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
+                                                   name, scratch1, scratch2);
+
+  // If probing finds an entry check that the value is a normal
+  // property.
+  __ bind(&done);  // scratch2 == elements + 4 * index.
+  const int kElementsStartOffset =
+      NameDictionary::kHeaderSize +
+      NameDictionary::kElementsStartIndex * kPointerSize;
+  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+  __ lw(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
+  __ And(at, scratch1,
+         Operand(PropertyDetails::TypeField::kMask << kSmiTagSize));
+  __ Branch(miss, ne, at, Operand(zero_reg));
+
+  // Get the value at the masked, scaled index and return.
+  __ lw(result,
+        FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
+}
+
+
+// Helper function used from StoreIC::GenerateNormal.
+//
+// elements: Property dictionary. It is not clobbered if a jump to the miss
+//           label is done.
+// name:     Property name. It is not clobbered if a jump to the miss label is
+//           done
+// value:    The value to store.
+// The two scratch registers need to be different from elements, name and
+// result.
+// The generated code assumes that the receiver has slow properties,
+// is not a global object and does not have interceptors.
+// The address returned from GenerateStringDictionaryProbes() in scratch2
+// is used.
+static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
+                                    Register elements, Register name,
+                                    Register value, Register scratch1,
+                                    Register scratch2) {
+  // Main use of the scratch registers.
+  // scratch1: Used as temporary and to hold the capacity of the property
+  //           dictionary.
+  // scratch2: Used as temporary.
+  Label done;
+
+  // Probe the dictionary.
+  NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
+                                                   name, scratch1, scratch2);
+
+  // If probing finds an entry in the dictionary check that the value
+  // is a normal property that is not read only.
+  __ bind(&done);  // scratch2 == elements + 4 * index.
+  const int kElementsStartOffset =
+      NameDictionary::kHeaderSize +
+      NameDictionary::kElementsStartIndex * kPointerSize;
+  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+  const int kTypeAndReadOnlyMask =
+      (PropertyDetails::TypeField::kMask |
+       PropertyDetails::AttributesField::encode(READ_ONLY))
+      << kSmiTagSize;
+  __ lw(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
+  __ And(at, scratch1, Operand(kTypeAndReadOnlyMask));
+  __ Branch(miss, ne, at, Operand(zero_reg));
+
+  // Store the value at the masked, scaled index and return.
+  const int kValueOffset = kElementsStartOffset + kPointerSize;
+  __ Addu(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag));
+  __ sw(value, MemOperand(scratch2));
+
+  // Update the write barrier. Make sure not to clobber the value.
+  __ mov(scratch1, value);
+  __ RecordWrite(elements, scratch2, scratch1, kRAHasNotBeenSaved,
+                 kDontSaveFPRegs);
+}
+
+
+// Checks the receiver for special cases (value type, slow case bits).
+// Falls through for regular JS object.
+static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
+                                           Register receiver, Register map,
+                                           Register scratch,
+                                           int interceptor_bit, Label* slow) {
+  // Check that the object isn't a smi.
+  __ JumpIfSmi(receiver, slow);
+  // Get the map of the receiver.
+  __ lw(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  // Check bit field.
+  __ lbu(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
+  __ And(at, scratch,
+         Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
+  __ Branch(slow, ne, at, Operand(zero_reg));
+  // Check that the object is some kind of JS object EXCEPT JS Value type.
+  // In the case that the object is a value-wrapper object,
+  // we enter the runtime system to make sure that indexing into string
+  // objects work as intended.
+  DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
+  __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
+  __ Branch(slow, lt, scratch, Operand(JS_OBJECT_TYPE));
+}
+
+
+// Loads an indexed element from a fast case array.
+// If not_fast_array is NULL, doesn't perform the elements map check.
+static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
+                                  Register key, Register elements,
+                                  Register scratch1, Register scratch2,
+                                  Register result, Label* not_fast_array,
+                                  Label* out_of_range) {
+  // Register use:
+  //
+  // receiver - holds the receiver on entry.
+  //            Unchanged unless 'result' is the same register.
+  //
+  // key      - holds the smi key on entry.
+  //            Unchanged unless 'result' is the same register.
+  //
+  // elements - holds the elements of the receiver on exit.
+  //
+  // result   - holds the result on exit if the load succeeded.
+  //            Allowed to be the the same as 'receiver' or 'key'.
+  //            Unchanged on bailout so 'receiver' and 'key' can be safely
+  //            used by further computation.
+  //
+  // Scratch registers:
+  //
+  // scratch1 - used to hold elements map and elements length.
+  //            Holds the elements map if not_fast_array branch is taken.
+  //
+  // scratch2 - used to hold the loaded value.
+
+  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  if (not_fast_array != NULL) {
+    // Check that the object is in fast mode (not dictionary).
+    __ lw(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
+    __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
+    __ Branch(not_fast_array, ne, scratch1, Operand(at));
+  } else {
+    __ AssertFastElements(elements);
+  }
+
+  // Check that the key (index) is within bounds.
+  __ lw(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
+  __ Branch(out_of_range, hs, key, Operand(scratch1));
+
+  // Fast case: Do the load.
+  __ Addu(scratch1, elements,
+          Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  // The key is a smi.
+  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+  __ sll(at, key, kPointerSizeLog2 - kSmiTagSize);
+  __ addu(at, at, scratch1);
+  __ lw(scratch2, MemOperand(at));
+
+  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+  // In case the loaded value is the_hole we have to consult GetProperty
+  // to ensure the prototype chain is searched.
+  __ Branch(out_of_range, eq, scratch2, Operand(at));
+  __ mov(result, scratch2);
+}
+
+
+// Checks whether a key is an array index string or a unique name.
+// Falls through if a key is a unique name.
+static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
+                                 Register map, Register hash,
+                                 Label* index_string, Label* not_unique) {
+  // The key is not a smi.
+  Label unique;
+  // Is it a name?
+  __ GetObjectType(key, map, hash);
+  __ Branch(not_unique, hi, hash, Operand(LAST_UNIQUE_NAME_TYPE));
+  STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
+  __ Branch(&unique, eq, hash, Operand(LAST_UNIQUE_NAME_TYPE));
+
+  // Is the string an array index, with cached numeric value?
+  __ lw(hash, FieldMemOperand(key, Name::kHashFieldOffset));
+  __ And(at, hash, Operand(Name::kContainsCachedArrayIndexMask));
+  __ Branch(index_string, eq, at, Operand(zero_reg));
+
+  // Is the string internalized? We know it's a string, so a single
+  // bit test is enough.
+  // map: key map
+  __ lbu(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
+  STATIC_ASSERT(kInternalizedTag == 0);
+  __ And(at, hash, Operand(kIsNotInternalizedMask));
+  __ Branch(not_unique, ne, at, Operand(zero_reg));
+
+  __ bind(&unique);
+}
+
+
+void LoadIC::GenerateNormal(MacroAssembler* masm) {
+  Register dictionary = a0;
+  DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
+
+  Label slow;
+
+  __ lw(dictionary, FieldMemOperand(LoadDescriptor::ReceiverRegister(),
+                                    JSObject::kPropertiesOffset));
+  GenerateDictionaryLoad(masm, &slow, dictionary,
+                         LoadDescriptor::NameRegister(), v0, a3, t0);
+  __ Ret();
+
+  // Dictionary load failed, go slow (but don't miss).
+  __ bind(&slow);
+  GenerateRuntimeGetProperty(masm);
+}
+
+
+// A register that isn't one of the parameters to the load ic.
+static const Register LoadIC_TempRegister() { return a3; }
+
+
+void LoadIC::GenerateMiss(MacroAssembler* masm) {
+  // The return address is in ra.
+  Isolate* isolate = masm->isolate();
+
+  __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, t0);
+
+  __ mov(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
+  __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
+
+  // Perform tail call to the entry.
+  ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
+  __ TailCallExternalReference(ref, 2, 1);
+}
+
+
+void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+  // The return address is in ra.
+
+  __ mov(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
+  __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
+
+  __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
+}
+
+
+static MemOperand GenerateMappedArgumentsLookup(
+    MacroAssembler* masm, Register object, Register key, Register scratch1,
+    Register scratch2, Register scratch3, Label* unmapped_case,
+    Label* slow_case) {
+  Heap* heap = masm->isolate()->heap();
+
+  // Check that the receiver is a JSObject. Because of the map check
+  // later, we do not need to check for interceptors or whether it
+  // requires access checks.
+  __ JumpIfSmi(object, slow_case);
+  // Check that the object is some kind of JSObject.
+  __ GetObjectType(object, scratch1, scratch2);
+  __ Branch(slow_case, lt, scratch2, Operand(FIRST_JS_RECEIVER_TYPE));
+
+  // Check that the key is a positive smi.
+  __ And(scratch1, key, Operand(0x80000001));
+  __ Branch(slow_case, ne, scratch1, Operand(zero_reg));
+
+  // Load the elements into scratch1 and check its map.
+  Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
+  __ lw(scratch1, FieldMemOperand(object, JSObject::kElementsOffset));
+  __ CheckMap(scratch1, scratch2, arguments_map, slow_case, DONT_DO_SMI_CHECK);
+  // Check if element is in the range of mapped arguments. If not, jump
+  // to the unmapped lookup with the parameter map in scratch1.
+  __ lw(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
+  __ Subu(scratch2, scratch2, Operand(Smi::FromInt(2)));
+  __ Branch(unmapped_case, Ugreater_equal, key, Operand(scratch2));
+
+  // Load element index and check whether it is the hole.
+  const int kOffset =
+      FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag;
+
+  __ li(scratch3, Operand(kPointerSize >> 1));
+  __ Mul(scratch3, key, scratch3);
+  __ Addu(scratch3, scratch3, Operand(kOffset));
+
+  __ Addu(scratch2, scratch1, scratch3);
+  __ lw(scratch2, MemOperand(scratch2));
+  __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
+  __ Branch(unmapped_case, eq, scratch2, Operand(scratch3));
+
+  // Load value from context and return it. We can reuse scratch1 because
+  // we do not jump to the unmapped lookup (which requires the parameter
+  // map in scratch1).
+  __ lw(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
+  __ li(scratch3, Operand(kPointerSize >> 1));
+  __ Mul(scratch3, scratch2, scratch3);
+  __ Addu(scratch3, scratch3, Operand(Context::kHeaderSize - kHeapObjectTag));
+  __ Addu(scratch2, scratch1, scratch3);
+  return MemOperand(scratch2);
+}
+
+
+static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
+                                                  Register key,
+                                                  Register parameter_map,
+                                                  Register scratch,
+                                                  Label* slow_case) {
+  // Element is in arguments backing store, which is referenced by the
+  // second element of the parameter_map. The parameter_map register
+  // must be loaded with the parameter map of the arguments object and is
+  // overwritten.
+  const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
+  Register backing_store = parameter_map;
+  __ lw(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
+  __ CheckMap(backing_store, scratch, Heap::kFixedArrayMapRootIndex, slow_case,
+              DONT_DO_SMI_CHECK);
+  __ lw(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
+  __ Branch(slow_case, Ugreater_equal, key, Operand(scratch));
+  __ li(scratch, Operand(kPointerSize >> 1));
+  __ Mul(scratch, key, scratch);
+  __ Addu(scratch, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ Addu(scratch, backing_store, scratch);
+  return MemOperand(scratch);
+}
+
+
+void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  Register key = StoreDescriptor::NameRegister();
+  Register value = StoreDescriptor::ValueRegister();
+  DCHECK(value.is(a0));
+
+  Label slow, notin;
+  // Store address is returned in register (of MemOperand) mapped_location.
+  MemOperand mapped_location = GenerateMappedArgumentsLookup(
+      masm, receiver, key, a3, t0, t1, &notin, &slow);
+  __ sw(value, mapped_location);
+  __ mov(t5, value);
+  DCHECK_EQ(mapped_location.offset(), 0);
+  __ RecordWrite(a3, mapped_location.rm(), t5, kRAHasNotBeenSaved,
+                 kDontSaveFPRegs);
+  __ Ret(USE_DELAY_SLOT);
+  __ mov(v0, value);  // (In delay slot) return the value stored in v0.
+  __ bind(&notin);
+  // The unmapped lookup expects that the parameter map is in a3.
+  // Store address is returned in register (of MemOperand) unmapped_location.
+  MemOperand unmapped_location =
+      GenerateUnmappedArgumentsLookup(masm, key, a3, t0, &slow);
+  __ sw(value, unmapped_location);
+  __ mov(t5, value);
+  DCHECK_EQ(unmapped_location.offset(), 0);
+  __ RecordWrite(a3, unmapped_location.rm(), t5, kRAHasNotBeenSaved,
+                 kDontSaveFPRegs);
+  __ Ret(USE_DELAY_SLOT);
+  __ mov(v0, a0);  // (In delay slot) return the value stored in v0.
+  __ bind(&slow);
+  GenerateMiss(masm);
+}
+
+
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
+  // The return address is in ra.
+  Isolate* isolate = masm->isolate();
+
+  __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, t0);
+
+  __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
+
+  // Perform tail call to the entry.
+  ExternalReference ref =
+      ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
+
+  __ TailCallExternalReference(ref, 2, 1);
+}
+
+
+void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+  // The return address is in ra.
+
+  __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
+
+  __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
+}
+
+
+void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
+  // The return address is in ra.
+  Label slow, check_name, index_smi, index_name, property_array_property;
+  Label probe_dictionary, check_number_dictionary;
+
+  Register key = LoadDescriptor::NameRegister();
+  Register receiver = LoadDescriptor::ReceiverRegister();
+  DCHECK(key.is(a2));
+  DCHECK(receiver.is(a1));
+
+  Isolate* isolate = masm->isolate();
+
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(key, &check_name);
+  __ bind(&index_smi);
+  // Now the key is known to be a smi. This place is also jumped to from below
+  // where a numeric string is converted to a smi.
+
+  GenerateKeyedLoadReceiverCheck(masm, receiver, a0, a3,
+                                 Map::kHasIndexedInterceptor, &slow);
+
+  // Check the receiver's map to see if it has fast elements.
+  __ CheckFastElements(a0, a3, &check_number_dictionary);
+
+  GenerateFastArrayLoad(masm, receiver, key, a0, a3, t0, v0, NULL, &slow);
+  __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, t0, a3);
+  __ Ret();
+
+  __ bind(&check_number_dictionary);
+  __ lw(t0, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  __ lw(a3, FieldMemOperand(t0, JSObject::kMapOffset));
+
+  // Check whether the elements is a number dictionary.
+  // a3: elements map
+  // t0: elements
+  __ LoadRoot(at, Heap::kHashTableMapRootIndex);
+  __ Branch(&slow, ne, a3, Operand(at));
+  __ sra(a0, key, kSmiTagSize);
+  __ LoadFromNumberDictionary(&slow, t0, key, v0, a0, a3, t1);
+  __ Ret();
+
+  // Slow case, key and receiver still in a2 and a1.
+  __ bind(&slow);
+  __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(), 1, t0,
+                      a3);
+  GenerateRuntimeGetProperty(masm);
+
+  __ bind(&check_name);
+  GenerateKeyNameCheck(masm, key, a0, a3, &index_name, &slow);
+
+  GenerateKeyedLoadReceiverCheck(masm, receiver, a0, a3,
+                                 Map::kHasNamedInterceptor, &slow);
+
+
+  // If the receiver is a fast-case object, check the keyed lookup
+  // cache. Otherwise probe the dictionary.
+  __ lw(a3, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+  __ lw(t0, FieldMemOperand(a3, HeapObject::kMapOffset));
+  __ LoadRoot(at, Heap::kHashTableMapRootIndex);
+  __ Branch(&probe_dictionary, eq, t0, Operand(at));
+
+  // Load the map of the receiver, compute the keyed lookup cache hash
+  // based on 32 bits of the map pointer and the name hash.
+  __ lw(a0, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ sra(a3, a0, KeyedLookupCache::kMapHashShift);
+  __ lw(t0, FieldMemOperand(key, Name::kHashFieldOffset));
+  __ sra(at, t0, Name::kHashShift);
+  __ xor_(a3, a3, at);
+  int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask;
+  __ And(a3, a3, Operand(mask));
+
+  // Load the key (consisting of map and unique name) from the cache and
+  // check for match.
+  Label load_in_object_property;
+  static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
+  Label hit_on_nth_entry[kEntriesPerBucket];
+  ExternalReference cache_keys =
+      ExternalReference::keyed_lookup_cache_keys(isolate);
+  __ li(t0, Operand(cache_keys));
+  __ sll(at, a3, kPointerSizeLog2 + 1);
+  __ addu(t0, t0, at);
+
+  for (int i = 0; i < kEntriesPerBucket - 1; i++) {
+    Label try_next_entry;
+    __ lw(t1, MemOperand(t0, kPointerSize * i * 2));
+    __ Branch(&try_next_entry, ne, a0, Operand(t1));
+    __ lw(t1, MemOperand(t0, kPointerSize * (i * 2 + 1)));
+    __ Branch(&hit_on_nth_entry[i], eq, key, Operand(t1));
+    __ bind(&try_next_entry);
+  }
+
+  __ lw(t1, MemOperand(t0, kPointerSize * (kEntriesPerBucket - 1) * 2));
+  __ Branch(&slow, ne, a0, Operand(t1));
+  __ lw(t1, MemOperand(t0, kPointerSize * ((kEntriesPerBucket - 1) * 2 + 1)));
+  __ Branch(&slow, ne, key, Operand(t1));
+
+  // Get field offset.
+  // a0     : receiver's map
+  // a3     : lookup cache index
+  ExternalReference cache_field_offsets =
+      ExternalReference::keyed_lookup_cache_field_offsets(isolate);
+
+  // Hit on nth entry.
+  for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
+    __ bind(&hit_on_nth_entry[i]);
+    __ li(t0, Operand(cache_field_offsets));
+    __ sll(at, a3, kPointerSizeLog2);
+    __ addu(at, t0, at);
+    __ lw(t1, MemOperand(at, kPointerSize * i));
+    __ lbu(t2, FieldMemOperand(a0, Map::kInObjectPropertiesOffset));
+    __ Subu(t1, t1, t2);
+    __ Branch(&property_array_property, ge, t1, Operand(zero_reg));
+    if (i != 0) {
+      __ Branch(&load_in_object_property);
+    }
+  }
+
+  // Load in-object property.
+  __ bind(&load_in_object_property);
+  __ lbu(t2, FieldMemOperand(a0, Map::kInstanceSizeOffset));
+  __ addu(t2, t2, t1);  // Index from start of object.
+  __ Subu(receiver, receiver, Operand(kHeapObjectTag));  // Remove the heap tag.
+  __ sll(at, t2, kPointerSizeLog2);
+  __ addu(at, receiver, at);
+  __ lw(v0, MemOperand(at));
+  __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1,
+                      t0, a3);
+  __ Ret();
+
+  // Load property array property.
+  __ bind(&property_array_property);
+  __ lw(receiver, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+  __ Addu(receiver, receiver, FixedArray::kHeaderSize - kHeapObjectTag);
+  __ sll(v0, t1, kPointerSizeLog2);
+  __ Addu(v0, v0, receiver);
+  __ lw(v0, MemOperand(v0));
+  __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1,
+                      t0, a3);
+  __ Ret();
+
+
+  // Do a quick inline probe of the receiver's dictionary, if it
+  // exists.
+  __ bind(&probe_dictionary);
+  // a3: elements
+  __ lw(a0, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
+  GenerateGlobalInstanceTypeCheck(masm, a0, &slow);
+  // Load the property to v0.
+  GenerateDictionaryLoad(masm, &slow, a3, key, v0, t1, t0);
+  __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(), 1, t0,
+                      a3);
+  __ Ret();
+
+  __ bind(&index_name);
+  __ IndexFromHash(a3, key);
+  // Now jump to the place where smi keys are handled.
+  __ Branch(&index_smi);
+}
+
+
+void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
+  // Return address is in ra.
+  Label miss;
+
+  Register receiver = LoadDescriptor::ReceiverRegister();
+  Register index = LoadDescriptor::NameRegister();
+  Register scratch = a3;
+  Register result = v0;
+  DCHECK(!scratch.is(receiver) && !scratch.is(index));
+
+  StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
+                                          &miss,  // When not a string.
+                                          &miss,  // When not a number.
+                                          &miss,  // When index out of range.
+                                          STRING_INDEX_IS_ARRAY_INDEX);
+  char_at_generator.GenerateFast(masm);
+  __ Ret();
+
+  StubRuntimeCallHelper call_helper;
+  char_at_generator.GenerateSlow(masm, call_helper);
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+
+static void KeyedStoreGenerateGenericHelper(
+    MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
+    KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
+    Register value, Register key, Register receiver, Register receiver_map,
+    Register elements_map, Register elements) {
+  Label transition_smi_elements;
+  Label finish_object_store, non_double_value, transition_double_elements;
+  Label fast_double_without_map_check;
+
+  // Fast case: Do the store, could be either Object or double.
+  __ bind(fast_object);
+  Register scratch_value = t0;
+  Register address = t1;
+  if (check_map == kCheckMap) {
+    __ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
+    __ Branch(fast_double, ne, elements_map,
+              Operand(masm->isolate()->factory()->fixed_array_map()));
+  }
+
+  // HOLECHECK: guards "A[i] = V"
+  // We have to go to the runtime if the current value is the hole because
+  // there may be a callback on the element.
+  Label holecheck_passed1;
+  __ Addu(address, elements, FixedArray::kHeaderSize - kHeapObjectTag);
+  __ sll(at, key, kPointerSizeLog2 - kSmiTagSize);
+  __ addu(address, address, at);
+  __ lw(scratch_value, MemOperand(address));
+  __ Branch(&holecheck_passed1, ne, scratch_value,
+            Operand(masm->isolate()->factory()->the_hole_value()));
+  __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
+                                      slow);
+
+  __ bind(&holecheck_passed1);
+
+  // Smi stores don't require further checks.
+  Label non_smi_value;
+  __ JumpIfNotSmi(value, &non_smi_value);
+
+  if (increment_length == kIncrementLength) {
+    // Add 1 to receiver->length.
+    __ Addu(scratch_value, key, Operand(Smi::FromInt(1)));
+    __ sw(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
+  }
+  // It's irrelevant whether array is smi-only or not when writing a smi.
+  __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ sll(scratch_value, key, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(address, address, scratch_value);
+  __ sw(value, MemOperand(address));
+  __ Ret();
+
+  __ bind(&non_smi_value);
+  // Escape to elements kind transition case.
+  __ CheckFastObjectElements(receiver_map, scratch_value,
+                             &transition_smi_elements);
+
+  // Fast elements array, store the value to the elements backing store.
+  __ bind(&finish_object_store);
+  if (increment_length == kIncrementLength) {
+    // Add 1 to receiver->length.
+    __ Addu(scratch_value, key, Operand(Smi::FromInt(1)));
+    __ sw(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
+  }
+  __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ sll(scratch_value, key, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(address, address, scratch_value);
+  __ sw(value, MemOperand(address));
+  // Update write barrier for the elements array address.
+  __ mov(scratch_value, value);  // Preserve the value which is returned.
+  __ RecordWrite(elements, address, scratch_value, kRAHasNotBeenSaved,
+                 kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+  __ Ret();
+
+  __ bind(fast_double);
+  if (check_map == kCheckMap) {
+    // Check for fast double array case. If this fails, call through to the
+    // runtime.
+    __ LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
+    __ Branch(slow, ne, elements_map, Operand(at));
+  }
+
+  // HOLECHECK: guards "A[i] double hole?"
+  // We have to see if the double version of the hole is present. If so
+  // go to the runtime.
+  __ Addu(address, elements, Operand(FixedDoubleArray::kHeaderSize +
+                                     kHoleNanUpper32Offset - kHeapObjectTag));
+  __ sll(at, key, kPointerSizeLog2);
+  __ addu(address, address, at);
+  __ lw(scratch_value, MemOperand(address));
+  __ Branch(&fast_double_without_map_check, ne, scratch_value,
+            Operand(kHoleNanUpper32));
+  __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
+                                      slow);
+
+  __ bind(&fast_double_without_map_check);
+  __ StoreNumberToDoubleElements(value, key,
+                                 elements,  // Overwritten.
+                                 a3,        // Scratch regs...
+                                 t0, t1, &transition_double_elements);
+  if (increment_length == kIncrementLength) {
+    // Add 1 to receiver->length.
+    __ Addu(scratch_value, key, Operand(Smi::FromInt(1)));
+    __ sw(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
+  }
+  __ Ret();
+
+  __ bind(&transition_smi_elements);
+  // Transition the array appropriately depending on the value type.
+  __ lw(t0, FieldMemOperand(value, HeapObject::kMapOffset));
+  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+  __ Branch(&non_double_value, ne, t0, Operand(at));
+
+  // Value is a double. Transition FAST_SMI_ELEMENTS ->
+  // FAST_DOUBLE_ELEMENTS and complete the store.
+  __ LoadTransitionedArrayMapConditional(
+      FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, t0, slow);
+  AllocationSiteMode mode =
+      AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
+  ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
+                                                   receiver_map, mode, slow);
+  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  __ jmp(&fast_double_without_map_check);
+
+  __ bind(&non_double_value);
+  // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
+  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
+                                         receiver_map, t0, slow);
+  mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
+  ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
+      masm, receiver, key, value, receiver_map, mode, slow);
+  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  __ jmp(&finish_object_store);
+
+  __ bind(&transition_double_elements);
+  // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
+  // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
+  // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
+  __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
+                                         receiver_map, t0, slow);
+  mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
+  ElementsTransitionGenerator::GenerateDoubleToObject(
+      masm, receiver, key, value, receiver_map, mode, slow);
+  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  __ jmp(&finish_object_store);
+}
+
+
+void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
+                                   StrictMode strict_mode) {
+  // ---------- S t a t e --------------
+  //  -- a0     : value
+  //  -- a1     : key
+  //  -- a2     : receiver
+  //  -- ra     : return address
+  // -----------------------------------
+  Label slow, fast_object, fast_object_grow;
+  Label fast_double, fast_double_grow;
+  Label array, extra, check_if_double_array;
+
+  // Register usage.
+  Register value = StoreDescriptor::ValueRegister();
+  Register key = StoreDescriptor::NameRegister();
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  DCHECK(value.is(a0));
+  Register receiver_map = a3;
+  Register elements_map = t2;
+  Register elements = t3;  // Elements array of the receiver.
+  // t0 and t1 are used as general scratch registers.
+
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(key, &slow);
+  // Check that the object isn't a smi.
+  __ JumpIfSmi(receiver, &slow);
+  // Get the map of the object.
+  __ lw(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  // Check that the receiver does not require access checks and is not observed.
+  // The generic stub does not perform map checks or handle observed objects.
+  __ lbu(t0, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
+  __ And(t0, t0,
+         Operand(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved));
+  __ Branch(&slow, ne, t0, Operand(zero_reg));
+  // Check if the object is a JS array or not.
+  __ lbu(t0, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
+  __ Branch(&array, eq, t0, Operand(JS_ARRAY_TYPE));
+  // Check that the object is some kind of JSObject.
+  __ Branch(&slow, lt, t0, Operand(FIRST_JS_OBJECT_TYPE));
+
+  // Object case: Check key against length in the elements array.
+  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  // Check array bounds. Both the key and the length of FixedArray are smis.
+  __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
+  __ Branch(&fast_object, lo, key, Operand(t0));
+
+  // Slow case, handle jump to runtime.
+  __ bind(&slow);
+  // Entry registers are intact.
+  // a0: value.
+  // a1: key.
+  // a2: receiver.
+  PropertyICCompiler::GenerateRuntimeSetProperty(masm, strict_mode);
+
+  // Extra capacity case: Check if there is extra capacity to
+  // perform the store and update the length. Used for adding one
+  // element to the array by writing to array[array.length].
+  __ bind(&extra);
+  // Condition code from comparing key and array length is still available.
+  // Only support writing to array[array.length].
+  __ Branch(&slow, ne, key, Operand(t0));
+  // Check for room in the elements backing store.
+  // Both the key and the length of FixedArray are smis.
+  __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
+  __ Branch(&slow, hs, key, Operand(t0));
+  __ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
+  __ Branch(&check_if_double_array, ne, elements_map,
+            Heap::kFixedArrayMapRootIndex);
+
+  __ jmp(&fast_object_grow);
+
+  __ bind(&check_if_double_array);
+  __ Branch(&slow, ne, elements_map, Heap::kFixedDoubleArrayMapRootIndex);
+  __ jmp(&fast_double_grow);
+
+  // Array case: Get the length and the elements array from the JS
+  // array. Check that the array is in fast mode (and writable); if it
+  // is the length is always a smi.
+  __ bind(&array);
+  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+
+  // Check the key against the length in the array.
+  __ lw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
+  __ Branch(&extra, hs, key, Operand(t0));
+
+  KeyedStoreGenerateGenericHelper(
+      masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
+      value, key, receiver, receiver_map, elements_map, elements);
+  KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
+                                  &slow, kDontCheckMap, kIncrementLength, value,
+                                  key, receiver, receiver_map, elements_map,
+                                  elements);
+}
+
+
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
+  // Push receiver, key and value for runtime call.
+  __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+          StoreDescriptor::ValueRegister());
+
+  ExternalReference ref =
+      ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
+  __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  Register name = StoreDescriptor::NameRegister();
+  DCHECK(receiver.is(a1));
+  DCHECK(name.is(a2));
+  DCHECK(StoreDescriptor::ValueRegister().is(a0));
+
+  // Get the receiver from the stack and probe the stub cache.
+  Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
+      Code::ComputeHandlerFlags(Code::STORE_IC));
+  masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
+                                               name, a3, t0, t1, t2);
+
+  // Cache miss: Jump to runtime.
+  GenerateMiss(masm);
+}
+
+
+void StoreIC::GenerateMiss(MacroAssembler* masm) {
+  __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+          StoreDescriptor::ValueRegister());
+  // Perform tail call to the entry.
+  ExternalReference ref =
+      ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
+  __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void StoreIC::GenerateNormal(MacroAssembler* masm) {
+  Label miss;
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  Register name = StoreDescriptor::NameRegister();
+  Register value = StoreDescriptor::ValueRegister();
+  Register dictionary = a3;
+  DCHECK(receiver.is(a1));
+  DCHECK(name.is(a2));
+  DCHECK(value.is(a0));
+
+  __ lw(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+
+  GenerateDictionaryStore(masm, &miss, dictionary, name, value, t0, t1);
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(counters->store_normal_hit(), 1, t0, t1);
+  __ Ret();
+
+  __ bind(&miss);
+  __ IncrementCounter(counters->store_normal_miss(), 1, t0, t1);
+  GenerateMiss(masm);
+}
+
+
+#undef __
+
+
+Condition CompareIC::ComputeCondition(Token::Value op) {
+  switch (op) {
+    case Token::EQ_STRICT:
+    case Token::EQ:
+      return eq;
+    case Token::LT:
+      return lt;
+    case Token::GT:
+      return gt;
+    case Token::LTE:
+      return le;
+    case Token::GTE:
+      return ge;
+    default:
+      UNREACHABLE();
+      return kNoCondition;
+  }
+}
+
+
+bool CompareIC::HasInlinedSmiCode(Address address) {
+  // The address of the instruction following the call.
+  Address andi_instruction_address =
+      address + Assembler::kCallTargetAddressOffset;
+
+  // If the instruction following the call is not a andi at, rx, #yyy, nothing
+  // was inlined.
+  Instr instr = Assembler::instr_at(andi_instruction_address);
+  return Assembler::IsAndImmediate(instr) &&
+         Assembler::GetRt(instr) == static_cast<uint32_t>(zero_reg.code());
+}
+
+
+void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
+  Address andi_instruction_address =
+      address + Assembler::kCallTargetAddressOffset;
+
+  // If the instruction following the call is not a andi at, rx, #yyy, nothing
+  // was inlined.
+  Instr instr = Assembler::instr_at(andi_instruction_address);
+  if (!(Assembler::IsAndImmediate(instr) &&
+        Assembler::GetRt(instr) == static_cast<uint32_t>(zero_reg.code()))) {
+    return;
+  }
+
+  // The delta to the start of the map check instruction and the
+  // condition code uses at the patched jump.
+  int delta = Assembler::GetImmediate16(instr);
+  delta += Assembler::GetRs(instr) * kImm16Mask;
+  // If the delta is 0 the instruction is andi at, zero_reg, #0 which also
+  // signals that nothing was inlined.
+  if (delta == 0) {
+    return;
+  }
+
+  if (FLAG_trace_ic) {
+    PrintF("[  patching ic at %p, andi=%p, delta=%d\n", address,
+           andi_instruction_address, delta);
+  }
+
+  Address patch_address =
+      andi_instruction_address - delta * Instruction::kInstrSize;
+  Instr instr_at_patch = Assembler::instr_at(patch_address);
+  Instr branch_instr =
+      Assembler::instr_at(patch_address + Instruction::kInstrSize);
+  // This is patching a conditional "jump if not smi/jump if smi" site.
+  // Enabling by changing from
+  //   andi at, rx, 0
+  //   Branch <target>, eq, at, Operand(zero_reg)
+  // to:
+  //   andi at, rx, #kSmiTagMask
+  //   Branch <target>, ne, at, Operand(zero_reg)
+  // and vice-versa to be disabled again.
+  CodePatcher patcher(patch_address, 2);
+  Register reg = Register::from_code(Assembler::GetRs(instr_at_patch));
+  if (check == ENABLE_INLINED_SMI_CHECK) {
+    DCHECK(Assembler::IsAndImmediate(instr_at_patch));
+    DCHECK_EQ(0, Assembler::GetImmediate16(instr_at_patch));
+    patcher.masm()->andi(at, reg, kSmiTagMask);
+  } else {
+    DCHECK(check == DISABLE_INLINED_SMI_CHECK);
+    DCHECK(Assembler::IsAndImmediate(instr_at_patch));
+    patcher.masm()->andi(at, reg, 0);
+  }
+  DCHECK(Assembler::IsBranch(branch_instr));
+  if (Assembler::IsBeq(branch_instr)) {
+    patcher.ChangeBranchCondition(ne);
+  } else {
+    DCHECK(Assembler::IsBne(branch_instr));
+    patcher.ChangeBranchCondition(eq);
+  }
+}
+}
+}  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_MIPS
diff --git a/src/ic/mips/stub-cache-mips.cc b/src/ic/mips/stub-cache-mips.cc
new file mode 100644
index 0000000..e538712
--- /dev/null
+++ b/src/ic/mips/stub-cache-mips.cc
@@ -0,0 +1,169 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_MIPS
+
+#include "src/codegen.h"
+#include "src/ic/stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
+                       Code::Flags flags, bool leave_frame,
+                       StubCache::Table table, Register receiver, Register name,
+                       // Number of the cache entry, not scaled.
+                       Register offset, Register scratch, Register scratch2,
+                       Register offset_scratch) {
+  ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
+  ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
+  ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
+
+  uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address());
+  uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address());
+  uint32_t map_off_addr = reinterpret_cast<uint32_t>(map_offset.address());
+
+  // Check the relative positions of the address fields.
+  DCHECK(value_off_addr > key_off_addr);
+  DCHECK((value_off_addr - key_off_addr) % 4 == 0);
+  DCHECK((value_off_addr - key_off_addr) < (256 * 4));
+  DCHECK(map_off_addr > key_off_addr);
+  DCHECK((map_off_addr - key_off_addr) % 4 == 0);
+  DCHECK((map_off_addr - key_off_addr) < (256 * 4));
+
+  Label miss;
+  Register base_addr = scratch;
+  scratch = no_reg;
+
+  // Multiply by 3 because there are 3 fields per entry (name, code, map).
+  __ sll(offset_scratch, offset, 1);
+  __ Addu(offset_scratch, offset_scratch, offset);
+
+  // Calculate the base address of the entry.
+  __ li(base_addr, Operand(key_offset));
+  __ sll(at, offset_scratch, kPointerSizeLog2);
+  __ Addu(base_addr, base_addr, at);
+
+  // Check that the key in the entry matches the name.
+  __ lw(at, MemOperand(base_addr, 0));
+  __ Branch(&miss, ne, name, Operand(at));
+
+  // Check the map matches.
+  __ lw(at, MemOperand(base_addr, map_off_addr - key_off_addr));
+  __ lw(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ Branch(&miss, ne, at, Operand(scratch2));
+
+  // Get the code entry from the cache.
+  Register code = scratch2;
+  scratch2 = no_reg;
+  __ lw(code, MemOperand(base_addr, value_off_addr - key_off_addr));
+
+  // Check that the flags match what we're looking for.
+  Register flags_reg = base_addr;
+  base_addr = no_reg;
+  __ lw(flags_reg, FieldMemOperand(code, Code::kFlagsOffset));
+  __ And(flags_reg, flags_reg, Operand(~Code::kFlagsNotUsedInLookup));
+  __ Branch(&miss, ne, flags_reg, Operand(flags));
+
+#ifdef DEBUG
+  if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
+    __ jmp(&miss);
+  } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
+    __ jmp(&miss);
+  }
+#endif
+
+  if (leave_frame) __ LeaveFrame(StackFrame::INTERNAL);
+
+  // Jump to the first instruction in the code stub.
+  __ Addu(at, code, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ Jump(at);
+
+  // Miss: fall through.
+  __ bind(&miss);
+}
+
+
+void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
+                              bool leave_frame, Register receiver,
+                              Register name, Register scratch, Register extra,
+                              Register extra2, Register extra3) {
+  Isolate* isolate = masm->isolate();
+  Label miss;
+
+  // Make sure that code is valid. The multiplying code relies on the
+  // entry size being 12.
+  DCHECK(sizeof(Entry) == 12);
+
+  // Make sure the flags does not name a specific type.
+  DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
+
+  // Make sure that there are no register conflicts.
+  DCHECK(!scratch.is(receiver));
+  DCHECK(!scratch.is(name));
+  DCHECK(!extra.is(receiver));
+  DCHECK(!extra.is(name));
+  DCHECK(!extra.is(scratch));
+  DCHECK(!extra2.is(receiver));
+  DCHECK(!extra2.is(name));
+  DCHECK(!extra2.is(scratch));
+  DCHECK(!extra2.is(extra));
+
+  // Check register validity.
+  DCHECK(!scratch.is(no_reg));
+  DCHECK(!extra.is(no_reg));
+  DCHECK(!extra2.is(no_reg));
+  DCHECK(!extra3.is(no_reg));
+
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2,
+                      extra3);
+
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(receiver, &miss);
+
+  // Get the map of the receiver and compute the hash.
+  __ lw(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
+  __ lw(at, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ Addu(scratch, scratch, at);
+  uint32_t mask = kPrimaryTableSize - 1;
+  // We shift out the last two bits because they are not part of the hash and
+  // they are always 01 for maps.
+  __ srl(scratch, scratch, kCacheIndexShift);
+  __ Xor(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask));
+  __ And(scratch, scratch, Operand(mask));
+
+  // Probe the primary table.
+  ProbeTable(isolate, masm, flags, leave_frame, kPrimary, receiver, name,
+             scratch, extra, extra2, extra3);
+
+  // Primary miss: Compute hash for secondary probe.
+  __ srl(at, name, kCacheIndexShift);
+  __ Subu(scratch, scratch, at);
+  uint32_t mask2 = kSecondaryTableSize - 1;
+  __ Addu(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask2));
+  __ And(scratch, scratch, Operand(mask2));
+
+  // Probe the secondary table.
+  ProbeTable(isolate, masm, flags, leave_frame, kSecondary, receiver, name,
+             scratch, extra, extra2, extra3);
+
+  // Cache miss: Fall-through and let caller handle the miss by
+  // entering the runtime system.
+  __ bind(&miss);
+  __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1, extra2,
+                      extra3);
+}
+
+
+#undef __
+}
+}  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_MIPS
diff --git a/src/ic/mips64/OWNERS b/src/ic/mips64/OWNERS
new file mode 100644
index 0000000..5508ba6
--- /dev/null
+++ b/src/ic/mips64/OWNERS
@@ -0,0 +1,5 @@
+paul.lind@imgtec.com
+gergely.kis@imgtec.com
+akos.palfi@imgtec.com
+balazs.kilvady@imgtec.com
+dusan.milosavljevic@imgtec.com
diff --git a/src/ic/mips64/access-compiler-mips64.cc b/src/ic/mips64/access-compiler-mips64.cc
new file mode 100644
index 0000000..5e3cfc5
--- /dev/null
+++ b/src/ic/mips64/access-compiler-mips64.cc
@@ -0,0 +1,46 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_MIPS64
+
+#include "src/ic/access-compiler.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
+                                              Handle<Code> code) {
+  __ Jump(code, RelocInfo::CODE_TARGET);
+}
+
+
+Register* PropertyAccessCompiler::load_calling_convention() {
+  // receiver, name, scratch1, scratch2, scratch3, scratch4.
+  Register receiver = LoadDescriptor::ReceiverRegister();
+  Register name = LoadDescriptor::NameRegister();
+  static Register registers[] = {receiver, name, a3, a0, a4, a5};
+  return registers;
+}
+
+
+Register* PropertyAccessCompiler::store_calling_convention() {
+  // receiver, name, scratch1, scratch2, scratch3.
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  Register name = StoreDescriptor::NameRegister();
+  DCHECK(a3.is(ElementTransitionAndStoreDescriptor::MapRegister()));
+  static Register registers[] = {receiver, name, a3, a4, a5};
+  return registers;
+}
+
+
+#undef __
+}
+}  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_MIPS64
diff --git a/src/ic/mips64/handler-compiler-mips64.cc b/src/ic/mips64/handler-compiler-mips64.cc
new file mode 100644
index 0000000..f44226f
--- /dev/null
+++ b/src/ic/mips64/handler-compiler-mips64.cc
@@ -0,0 +1,840 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_MIPS64
+
+#include "src/ic/call-optimization.h"
+#include "src/ic/handler-compiler.h"
+#include "src/ic/ic.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
+    MacroAssembler* masm, Handle<HeapType> type, Register receiver,
+    Handle<JSFunction> getter) {
+  // ----------- S t a t e -------------
+  //  -- a0    : receiver
+  //  -- a2    : name
+  //  -- ra    : return address
+  // -----------------------------------
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+
+    if (!getter.is_null()) {
+      // Call the JavaScript getter with the receiver on the stack.
+      if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+        // Swap in the global receiver.
+        __ ld(receiver,
+              FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+      }
+      __ push(receiver);
+      ParameterCount actual(0);
+      ParameterCount expected(getter);
+      __ InvokeFunction(getter, expected, actual, CALL_FUNCTION,
+                        NullCallWrapper());
+    } else {
+      // If we generate a global code snippet for deoptimization only, remember
+      // the place to continue after deoptimization.
+      masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
+    }
+
+    // Restore context register.
+    __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  }
+  __ Ret();
+}
+
+
+void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
+    MacroAssembler* masm, Handle<HeapType> type, Register receiver,
+    Handle<JSFunction> setter) {
+  // ----------- S t a t e -------------
+  //  -- ra    : return address
+  // -----------------------------------
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+
+    // Save value register, so we can restore it later.
+    __ push(value());
+
+    if (!setter.is_null()) {
+      // Call the JavaScript setter with receiver and value on the stack.
+      if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+        // Swap in the global receiver.
+        __ ld(receiver,
+              FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+      }
+      __ Push(receiver, value());
+      ParameterCount actual(1);
+      ParameterCount expected(setter);
+      __ InvokeFunction(setter, expected, actual, CALL_FUNCTION,
+                        NullCallWrapper());
+    } else {
+      // If we generate a global code snippet for deoptimization only, remember
+      // the place to continue after deoptimization.
+      masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
+    }
+
+    // We have to return the passed value, not the return value of the setter.
+    __ pop(v0);
+
+    // Restore context register.
+    __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  }
+  __ Ret();
+}
+
+
+void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
+    MacroAssembler* masm, Label* miss_label, Register receiver,
+    Handle<Name> name, Register scratch0, Register scratch1) {
+  DCHECK(name->IsUniqueName());
+  DCHECK(!receiver.is(scratch0));
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
+  __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
+
+  Label done;
+
+  const int kInterceptorOrAccessCheckNeededMask =
+      (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
+
+  // Bail out if the receiver has a named interceptor or requires access checks.
+  Register map = scratch1;
+  __ ld(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ lbu(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
+  __ And(scratch0, scratch0, Operand(kInterceptorOrAccessCheckNeededMask));
+  __ Branch(miss_label, ne, scratch0, Operand(zero_reg));
+
+  // Check that receiver is a JSObject.
+  __ lbu(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
+  __ Branch(miss_label, lt, scratch0, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+  // Load properties array.
+  Register properties = scratch0;
+  __ ld(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+  // Check that the properties array is a dictionary.
+  __ ld(map, FieldMemOperand(properties, HeapObject::kMapOffset));
+  Register tmp = properties;
+  __ LoadRoot(tmp, Heap::kHashTableMapRootIndex);
+  __ Branch(miss_label, ne, map, Operand(tmp));
+
+  // Restore the temporarily used register.
+  __ ld(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+
+
+  NameDictionaryLookupStub::GenerateNegativeLookup(
+      masm, miss_label, &done, receiver, properties, name, scratch1);
+  __ bind(&done);
+  __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
+}
+
+
+void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
+    MacroAssembler* masm, int index, Register prototype, Label* miss) {
+  Isolate* isolate = masm->isolate();
+  // Get the global function with the given index.
+  Handle<JSFunction> function(
+      JSFunction::cast(isolate->native_context()->get(index)));
+
+  // Check we're still in the same context.
+  Register scratch = prototype;
+  const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
+  __ ld(scratch, MemOperand(cp, offset));
+  __ ld(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
+  __ ld(scratch, MemOperand(scratch, Context::SlotOffset(index)));
+  __ li(at, function);
+  __ Branch(miss, ne, at, Operand(scratch));
+
+  // Load its initial map. The global functions all have initial maps.
+  __ li(prototype, Handle<Map>(function->initial_map()));
+  // Load the prototype from the initial map.
+  __ ld(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
+    MacroAssembler* masm, Register receiver, Register scratch1,
+    Register scratch2, Label* miss_label) {
+  __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
+  __ Ret(USE_DELAY_SLOT);
+  __ mov(v0, scratch1);
+}
+
+
+// Generate code to check that a global property cell is empty. Create
+// the property cell at compilation time if no cell exists for the
+// property.
+void PropertyHandlerCompiler::GenerateCheckPropertyCell(
+    MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
+    Register scratch, Label* miss) {
+  Handle<Cell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
+  DCHECK(cell->value()->IsTheHole());
+  __ li(scratch, Operand(cell));
+  __ ld(scratch, FieldMemOperand(scratch, Cell::kValueOffset));
+  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+  __ Branch(miss, ne, scratch, Operand(at));
+}
+
+
+static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
+                                     Register holder, Register name,
+                                     Handle<JSObject> holder_obj) {
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsInfoIndex == 1);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 2);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 3);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 4);
+  __ push(name);
+  Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
+  DCHECK(!masm->isolate()->heap()->InNewSpace(*interceptor));
+  Register scratch = name;
+  __ li(scratch, Operand(interceptor));
+  __ Push(scratch, receiver, holder);
+}
+
+
+static void CompileCallLoadPropertyWithInterceptor(
+    MacroAssembler* masm, Register receiver, Register holder, Register name,
+    Handle<JSObject> holder_obj, IC::UtilityId id) {
+  PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
+  __ CallExternalReference(ExternalReference(IC_Utility(id), masm->isolate()),
+                           NamedLoadHandlerCompiler::kInterceptorArgsLength);
+}
+
+
+// Generate call to api function.
+void PropertyHandlerCompiler::GenerateFastApiCall(
+    MacroAssembler* masm, const CallOptimization& optimization,
+    Handle<Map> receiver_map, Register receiver, Register scratch_in,
+    bool is_store, int argc, Register* values) {
+  DCHECK(!receiver.is(scratch_in));
+  // Preparing to push, adjust sp.
+  __ Dsubu(sp, sp, Operand((argc + 1) * kPointerSize));
+  __ sd(receiver, MemOperand(sp, argc * kPointerSize));  // Push receiver.
+  // Write the arguments to stack frame.
+  for (int i = 0; i < argc; i++) {
+    Register arg = values[argc - 1 - i];
+    DCHECK(!receiver.is(arg));
+    DCHECK(!scratch_in.is(arg));
+    __ sd(arg, MemOperand(sp, (argc - 1 - i) * kPointerSize));  // Push arg.
+  }
+  DCHECK(optimization.is_simple_api_call());
+
+  // Abi for CallApiFunctionStub.
+  Register callee = a0;
+  Register call_data = a4;
+  Register holder = a2;
+  Register api_function_address = a1;
+
+  // Put holder in place.
+  CallOptimization::HolderLookup holder_lookup;
+  Handle<JSObject> api_holder =
+      optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
+  switch (holder_lookup) {
+    case CallOptimization::kHolderIsReceiver:
+      __ Move(holder, receiver);
+      break;
+    case CallOptimization::kHolderFound:
+      __ li(holder, api_holder);
+      break;
+    case CallOptimization::kHolderNotFound:
+      UNREACHABLE();
+      break;
+  }
+
+  Isolate* isolate = masm->isolate();
+  Handle<JSFunction> function = optimization.constant_function();
+  Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
+  Handle<Object> call_data_obj(api_call_info->data(), isolate);
+
+  // Put callee in place.
+  __ li(callee, function);
+
+  bool call_data_undefined = false;
+  // Put call_data in place.
+  if (isolate->heap()->InNewSpace(*call_data_obj)) {
+    __ li(call_data, api_call_info);
+    __ ld(call_data, FieldMemOperand(call_data, CallHandlerInfo::kDataOffset));
+  } else if (call_data_obj->IsUndefined()) {
+    call_data_undefined = true;
+    __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
+  } else {
+    __ li(call_data, call_data_obj);
+  }
+  // Put api_function_address in place.
+  Address function_address = v8::ToCData<Address>(api_call_info->callback());
+  ApiFunction fun(function_address);
+  ExternalReference::Type type = ExternalReference::DIRECT_API_CALL;
+  ExternalReference ref = ExternalReference(&fun, type, masm->isolate());
+  __ li(api_function_address, Operand(ref));
+
+  // Jump to stub.
+  CallApiFunctionStub stub(isolate, is_store, call_data_undefined, argc);
+  __ TailCallStub(&stub);
+}
+
+
+void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
+  // Push receiver, key and value for runtime call.
+  __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+          StoreDescriptor::ValueRegister());
+
+  // The slow case calls into the runtime to complete the store without causing
+  // an IC miss that would otherwise cause a transition to the generic stub.
+  ExternalReference ref =
+      ExternalReference(IC_Utility(IC::kStoreIC_Slow), masm->isolate());
+  __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
+  // Push receiver, key and value for runtime call.
+  __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+          StoreDescriptor::ValueRegister());
+
+  // The slow case calls into the runtime to complete the store without causing
+  // an IC miss that would otherwise cause a transition to the generic stub.
+  ExternalReference ref =
+      ExternalReference(IC_Utility(IC::kKeyedStoreIC_Slow), masm->isolate());
+  __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
+                                                    Handle<Name> name) {
+  if (!label->is_unused()) {
+    __ bind(label);
+    __ li(this->name(), Operand(name));
+  }
+}
+
+
+// Generate StoreTransition code, value is passed in a0 register.
+// After executing generated code, the receiver_reg and name_reg
+// may be clobbered.
+void NamedStoreHandlerCompiler::GenerateStoreTransition(
+    Handle<Map> transition, Handle<Name> name, Register receiver_reg,
+    Register storage_reg, Register value_reg, Register scratch1,
+    Register scratch2, Register scratch3, Label* miss_label, Label* slow) {
+  // a0 : value.
+  Label exit;
+
+  int descriptor = transition->LastAdded();
+  DescriptorArray* descriptors = transition->instance_descriptors();
+  PropertyDetails details = descriptors->GetDetails(descriptor);
+  Representation representation = details.representation();
+  DCHECK(!representation.IsNone());
+
+  if (details.type() == CONSTANT) {
+    Handle<Object> constant(descriptors->GetValue(descriptor), isolate());
+    __ li(scratch1, constant);
+    __ Branch(miss_label, ne, value_reg, Operand(scratch1));
+  } else if (representation.IsSmi()) {
+    __ JumpIfNotSmi(value_reg, miss_label);
+  } else if (representation.IsHeapObject()) {
+    __ JumpIfSmi(value_reg, miss_label);
+    HeapType* field_type = descriptors->GetFieldType(descriptor);
+    HeapType::Iterator<Map> it = field_type->Classes();
+    Handle<Map> current;
+    if (!it.Done()) {
+      __ ld(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset));
+      Label do_store;
+      while (true) {
+        // Do the CompareMap() directly within the Branch() functions.
+        current = it.Current();
+        it.Advance();
+        if (it.Done()) {
+          __ Branch(miss_label, ne, scratch1, Operand(current));
+          break;
+        }
+        __ Branch(&do_store, eq, scratch1, Operand(current));
+      }
+      __ bind(&do_store);
+    }
+  } else if (representation.IsDouble()) {
+    Label do_store, heap_number;
+    __ LoadRoot(scratch3, Heap::kMutableHeapNumberMapRootIndex);
+    __ AllocateHeapNumber(storage_reg, scratch1, scratch2, scratch3, slow,
+                          TAG_RESULT, MUTABLE);
+
+    __ JumpIfNotSmi(value_reg, &heap_number);
+    __ SmiUntag(scratch1, value_reg);
+    __ mtc1(scratch1, f6);
+    __ cvt_d_w(f4, f6);
+    __ jmp(&do_store);
+
+    __ bind(&heap_number);
+    __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex, miss_label,
+                DONT_DO_SMI_CHECK);
+    __ ldc1(f4, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
+
+    __ bind(&do_store);
+    __ sdc1(f4, FieldMemOperand(storage_reg, HeapNumber::kValueOffset));
+  }
+
+  // Stub never generated for objects that require access checks.
+  DCHECK(!transition->is_access_check_needed());
+
+  // Perform map transition for the receiver if necessary.
+  if (details.type() == FIELD &&
+      Map::cast(transition->GetBackPointer())->unused_property_fields() == 0) {
+    // The properties must be extended before we can store the value.
+    // We jump to a runtime call that extends the properties array.
+    __ push(receiver_reg);
+    __ li(a2, Operand(transition));
+    __ Push(a2, a0);
+    __ TailCallExternalReference(
+        ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
+                          isolate()),
+        3, 1);
+    return;
+  }
+
+  // Update the map of the object.
+  __ li(scratch1, Operand(transition));
+  __ sd(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
+
+  // Update the write barrier for the map field.
+  __ RecordWriteField(receiver_reg, HeapObject::kMapOffset, scratch1, scratch2,
+                      kRAHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+
+  if (details.type() == CONSTANT) {
+    DCHECK(value_reg.is(a0));
+    __ Ret(USE_DELAY_SLOT);
+    __ mov(v0, a0);
+    return;
+  }
+
+  int index = transition->instance_descriptors()->GetFieldIndex(
+      transition->LastAdded());
+
+  // Adjust for the number of properties stored in the object. Even in the
+  // face of a transition we can use the old map here because the size of the
+  // object and the number of in-object properties is not going to change.
+  index -= transition->inobject_properties();
+
+  // TODO(verwaest): Share this code as a code stub.
+  SmiCheck smi_check =
+      representation.IsTagged() ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
+  if (index < 0) {
+    // Set the property straight into the object.
+    int offset = transition->instance_size() + (index * kPointerSize);
+    if (representation.IsDouble()) {
+      __ sd(storage_reg, FieldMemOperand(receiver_reg, offset));
+    } else {
+      __ sd(value_reg, FieldMemOperand(receiver_reg, offset));
+    }
+
+    if (!representation.IsSmi()) {
+      // Update the write barrier for the array address.
+      if (!representation.IsDouble()) {
+        __ mov(storage_reg, value_reg);
+      }
+      __ RecordWriteField(receiver_reg, offset, storage_reg, scratch1,
+                          kRAHasNotBeenSaved, kDontSaveFPRegs,
+                          EMIT_REMEMBERED_SET, smi_check);
+    }
+  } else {
+    // Write to the properties array.
+    int offset = index * kPointerSize + FixedArray::kHeaderSize;
+    // Get the properties array
+    __ ld(scratch1, FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
+    if (representation.IsDouble()) {
+      __ sd(storage_reg, FieldMemOperand(scratch1, offset));
+    } else {
+      __ sd(value_reg, FieldMemOperand(scratch1, offset));
+    }
+
+    if (!representation.IsSmi()) {
+      // Update the write barrier for the array address.
+      if (!representation.IsDouble()) {
+        __ mov(storage_reg, value_reg);
+      }
+      __ RecordWriteField(scratch1, offset, storage_reg, receiver_reg,
+                          kRAHasNotBeenSaved, kDontSaveFPRegs,
+                          EMIT_REMEMBERED_SET, smi_check);
+    }
+  }
+
+  // Return the value (register v0).
+  DCHECK(value_reg.is(a0));
+  __ bind(&exit);
+  __ Ret(USE_DELAY_SLOT);
+  __ mov(v0, a0);
+}
+
+
+void NamedStoreHandlerCompiler::GenerateStoreField(LookupIterator* lookup,
+                                                   Register value_reg,
+                                                   Label* miss_label) {
+  DCHECK(lookup->representation().IsHeapObject());
+  __ JumpIfSmi(value_reg, miss_label);
+  HeapType::Iterator<Map> it = lookup->GetFieldType()->Classes();
+  __ ld(scratch1(), FieldMemOperand(value_reg, HeapObject::kMapOffset));
+  Label do_store;
+  Handle<Map> current;
+  while (true) {
+    // Do the CompareMap() directly within the Branch() functions.
+    current = it.Current();
+    it.Advance();
+    if (it.Done()) {
+      __ Branch(miss_label, ne, scratch1(), Operand(current));
+      break;
+    }
+    __ Branch(&do_store, eq, scratch1(), Operand(current));
+  }
+  __ bind(&do_store);
+
+  StoreFieldStub stub(isolate(), lookup->GetFieldIndex(),
+                      lookup->representation());
+  GenerateTailCall(masm(), stub.GetCode());
+}
+
+
+Register PropertyHandlerCompiler::CheckPrototypes(
+    Register object_reg, Register holder_reg, Register scratch1,
+    Register scratch2, Handle<Name> name, Label* miss,
+    PrototypeCheckType check) {
+  Handle<Map> receiver_map(IC::TypeToMap(*type(), isolate()));
+
+  // Make sure there's no overlap between holder and object registers.
+  DCHECK(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
+  DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg) &&
+         !scratch2.is(scratch1));
+
+  // Keep track of the current object in register reg.
+  Register reg = object_reg;
+  int depth = 0;
+
+  Handle<JSObject> current = Handle<JSObject>::null();
+  if (type()->IsConstant()) {
+    current = Handle<JSObject>::cast(type()->AsConstant()->Value());
+  }
+  Handle<JSObject> prototype = Handle<JSObject>::null();
+  Handle<Map> current_map = receiver_map;
+  Handle<Map> holder_map(holder()->map());
+  // Traverse the prototype chain and check the maps in the prototype chain for
+  // fast and global objects or do negative lookup for normal objects.
+  while (!current_map.is_identical_to(holder_map)) {
+    ++depth;
+
+    // Only global objects and objects that do not require access
+    // checks are allowed in stubs.
+    DCHECK(current_map->IsJSGlobalProxyMap() ||
+           !current_map->is_access_check_needed());
+
+    prototype = handle(JSObject::cast(current_map->prototype()));
+    if (current_map->is_dictionary_map() &&
+        !current_map->IsJSGlobalObjectMap()) {
+      DCHECK(!current_map->IsJSGlobalProxyMap());  // Proxy maps are fast.
+      if (!name->IsUniqueName()) {
+        DCHECK(name->IsString());
+        name = factory()->InternalizeString(Handle<String>::cast(name));
+      }
+      DCHECK(current.is_null() ||
+             current->property_dictionary()->FindEntry(name) ==
+                 NameDictionary::kNotFound);
+
+      GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
+                                       scratch2);
+
+      __ ld(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+      reg = holder_reg;  // From now on the object will be in holder_reg.
+      __ ld(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
+    } else {
+      // Two possible reasons for loading the prototype from the map:
+      // (1) Can't store references to new space in code.
+      // (2) Handler is shared for all receivers with the same prototype
+      //     map (but not necessarily the same prototype instance).
+      bool load_prototype_from_map =
+          heap()->InNewSpace(*prototype) || depth == 1;
+      Register map_reg = scratch1;
+      if (depth != 1 || check == CHECK_ALL_MAPS) {
+        // CheckMap implicitly loads the map of |reg| into |map_reg|.
+        __ CheckMap(reg, map_reg, current_map, miss, DONT_DO_SMI_CHECK);
+      } else {
+        __ ld(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
+      }
+
+      // Check access rights to the global object.  This has to happen after
+      // the map check so that we know that the object is actually a global
+      // object.
+      // This allows us to install generated handlers for accesses to the
+      // global proxy (as opposed to using slow ICs). See corresponding code
+      // in LookupForRead().
+      if (current_map->IsJSGlobalProxyMap()) {
+        __ CheckAccessGlobalProxy(reg, scratch2, miss);
+      } else if (current_map->IsJSGlobalObjectMap()) {
+        GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
+                                  name, scratch2, miss);
+      }
+
+      reg = holder_reg;  // From now on the object will be in holder_reg.
+
+      if (load_prototype_from_map) {
+        __ ld(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
+      } else {
+        __ li(reg, Operand(prototype));
+      }
+    }
+
+    // Go to the next object in the prototype chain.
+    current = prototype;
+    current_map = handle(current->map());
+  }
+
+  // Log the check depth.
+  LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
+
+  if (depth != 0 || check == CHECK_ALL_MAPS) {
+    // Check the holder map.
+    __ CheckMap(reg, scratch1, current_map, miss, DONT_DO_SMI_CHECK);
+  }
+
+  // Perform security check for access to the global object.
+  DCHECK(current_map->IsJSGlobalProxyMap() ||
+         !current_map->is_access_check_needed());
+  if (current_map->IsJSGlobalProxyMap()) {
+    __ CheckAccessGlobalProxy(reg, scratch1, miss);
+  }
+
+  // Return the register containing the holder.
+  return reg;
+}
+
+
+void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
+  if (!miss->is_unused()) {
+    Label success;
+    __ Branch(&success);
+    __ bind(miss);
+    TailCallBuiltin(masm(), MissBuiltin(kind()));
+    __ bind(&success);
+  }
+}
+
+
+void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
+  if (!miss->is_unused()) {
+    Label success;
+    __ Branch(&success);
+    GenerateRestoreName(miss, name);
+    TailCallBuiltin(masm(), MissBuiltin(kind()));
+    __ bind(&success);
+  }
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
+  // Return the constant value.
+  __ li(v0, value);
+  __ Ret();
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadCallback(
+    Register reg, Handle<ExecutableAccessorInfo> callback) {
+  // Build AccessorInfo::args_ list on the stack and push property name below
+  // the exit frame to make GC aware of them and store pointers to them.
+  STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
+  STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
+  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
+  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
+  STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
+  STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
+  STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6);
+  DCHECK(!scratch2().is(reg));
+  DCHECK(!scratch3().is(reg));
+  DCHECK(!scratch4().is(reg));
+  __ push(receiver());
+  if (heap()->InNewSpace(callback->data())) {
+    __ li(scratch3(), callback);
+    __ ld(scratch3(),
+          FieldMemOperand(scratch3(), ExecutableAccessorInfo::kDataOffset));
+  } else {
+    __ li(scratch3(), Handle<Object>(callback->data(), isolate()));
+  }
+  __ Dsubu(sp, sp, 6 * kPointerSize);
+  __ sd(scratch3(), MemOperand(sp, 5 * kPointerSize));
+  __ LoadRoot(scratch3(), Heap::kUndefinedValueRootIndex);
+  __ sd(scratch3(), MemOperand(sp, 4 * kPointerSize));
+  __ sd(scratch3(), MemOperand(sp, 3 * kPointerSize));
+  __ li(scratch4(), Operand(ExternalReference::isolate_address(isolate())));
+  __ sd(scratch4(), MemOperand(sp, 2 * kPointerSize));
+  __ sd(reg, MemOperand(sp, 1 * kPointerSize));
+  __ sd(name(), MemOperand(sp, 0 * kPointerSize));
+  __ Daddu(scratch2(), sp, 1 * kPointerSize);
+
+  __ mov(a2, scratch2());  // Saved in case scratch2 == a1.
+  // Abi for CallApiGetter.
+  Register getter_address_reg = ApiGetterDescriptor::function_address();
+
+  Address getter_address = v8::ToCData<Address>(callback->getter());
+  ApiFunction fun(getter_address);
+  ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL;
+  ExternalReference ref = ExternalReference(&fun, type, isolate());
+  __ li(getter_address_reg, Operand(ref));
+
+  CallApiGetterStub stub(isolate());
+  __ TailCallStub(&stub);
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
+    LookupIterator* it, Register holder_reg) {
+  DCHECK(holder()->HasNamedInterceptor());
+  DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+
+  // Compile the interceptor call, followed by inline code to load the
+  // property from further up the prototype chain if the call fails.
+  // Check that the maps haven't changed.
+  DCHECK(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
+
+  // Preserve the receiver register explicitly whenever it is different from the
+  // holder and it is needed should the interceptor return without any result.
+  // The ACCESSOR case needs the receiver to be passed into C++ code, the FIELD
+  // case might cause a miss during the prototype check.
+  bool must_perform_prototype_check =
+      !holder().is_identical_to(it->GetHolder<JSObject>());
+  bool must_preserve_receiver_reg =
+      !receiver().is(holder_reg) &&
+      (it->state() == LookupIterator::ACCESSOR || must_perform_prototype_check);
+
+  // Save necessary data before invoking an interceptor.
+  // Requires a frame to make GC aware of pushed pointers.
+  {
+    FrameScope frame_scope(masm(), StackFrame::INTERNAL);
+    if (must_preserve_receiver_reg) {
+      __ Push(receiver(), holder_reg, this->name());
+    } else {
+      __ Push(holder_reg, this->name());
+    }
+    // Invoke an interceptor.  Note: map checks from receiver to
+    // interceptor's holder has been compiled before (see a caller
+    // of this method).
+    CompileCallLoadPropertyWithInterceptor(
+        masm(), receiver(), holder_reg, this->name(), holder(),
+        IC::kLoadPropertyWithInterceptorOnly);
+
+    // Check if interceptor provided a value for property.  If it's
+    // the case, return immediately.
+    Label interceptor_failed;
+    __ LoadRoot(scratch1(), Heap::kNoInterceptorResultSentinelRootIndex);
+    __ Branch(&interceptor_failed, eq, v0, Operand(scratch1()));
+    frame_scope.GenerateLeaveFrame();
+    __ Ret();
+
+    __ bind(&interceptor_failed);
+    if (must_preserve_receiver_reg) {
+      __ Pop(receiver(), holder_reg, this->name());
+    } else {
+      __ Pop(holder_reg, this->name());
+    }
+    // Leave the internal frame.
+  }
+
+  GenerateLoadPostInterceptor(it, holder_reg);
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
+  // Call the runtime system to load the interceptor.
+  DCHECK(holder()->HasNamedInterceptor());
+  DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+  PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
+                           holder());
+
+  ExternalReference ref = ExternalReference(
+      IC_Utility(IC::kLoadPropertyWithInterceptor), isolate());
+  __ TailCallExternalReference(
+      ref, NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
+}
+
+
+Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
+    Handle<JSObject> object, Handle<Name> name,
+    Handle<ExecutableAccessorInfo> callback) {
+  Register holder_reg = Frontend(receiver(), name);
+
+  __ Push(receiver(), holder_reg);  // Receiver.
+  __ li(at, Operand(callback));     // Callback info.
+  __ push(at);
+  __ li(at, Operand(name));
+  __ Push(at, value());
+
+  // Do tail-call to the runtime system.
+  ExternalReference store_callback_property =
+      ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
+  __ TailCallExternalReference(store_callback_property, 5, 1);
+
+  // Return the generated code.
+  return GetCode(kind(), Code::FAST, name);
+}
+
+
+Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
+    Handle<Name> name) {
+  __ Push(receiver(), this->name(), value());
+
+  // Do tail-call to the runtime system.
+  ExternalReference store_ic_property = ExternalReference(
+      IC_Utility(IC::kStorePropertyWithInterceptor), isolate());
+  __ TailCallExternalReference(store_ic_property, 3, 1);
+
+  // Return the generated code.
+  return GetCode(kind(), Code::FAST, name);
+}
+
+
+Register NamedStoreHandlerCompiler::value() {
+  return StoreDescriptor::ValueRegister();
+}
+
+
+Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
+    Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) {
+  Label miss;
+
+  FrontendHeader(receiver(), name, &miss);
+
+  // Get the value from the cell.
+  Register result = StoreDescriptor::ValueRegister();
+  __ li(result, Operand(cell));
+  __ ld(result, FieldMemOperand(result, Cell::kValueOffset));
+
+  // Check for deleted property if property can actually be deleted.
+  if (is_configurable) {
+    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+    __ Branch(&miss, eq, result, Operand(at));
+  }
+
+  Counters* counters = isolate()->counters();
+  __ IncrementCounter(counters->named_load_global_stub(), 1, a1, a3);
+  __ Ret(USE_DELAY_SLOT);
+  __ mov(v0, result);
+
+  FrontendFooter(name, &miss);
+
+  // Return the generated code.
+  return GetCode(kind(), Code::NORMAL, name);
+}
+
+
+#undef __
+}
+}  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_MIPS64
diff --git a/src/ic/mips64/ic-compiler-mips64.cc b/src/ic/mips64/ic-compiler-mips64.cc
new file mode 100644
index 0000000..796ed87
--- /dev/null
+++ b/src/ic/mips64/ic-compiler-mips64.cc
@@ -0,0 +1,131 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_MIPS64
+
+#include "src/ic/ic.h"
+#include "src/ic/ic-compiler.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm())
+
+
+Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
+                                                    CodeHandleList* handlers,
+                                                    Handle<Name> name,
+                                                    Code::StubType type,
+                                                    IcCheckType check) {
+  Label miss;
+
+  if (check == PROPERTY &&
+      (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
+    // In case we are compiling an IC for dictionary loads and stores, just
+    // check whether the name is unique.
+    if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
+      Register tmp = scratch1();
+      __ JumpIfSmi(this->name(), &miss);
+      __ ld(tmp, FieldMemOperand(this->name(), HeapObject::kMapOffset));
+      __ lbu(tmp, FieldMemOperand(tmp, Map::kInstanceTypeOffset));
+      __ JumpIfNotUniqueNameInstanceType(tmp, &miss);
+    } else {
+      __ Branch(&miss, ne, this->name(), Operand(name));
+    }
+  }
+
+  Label number_case;
+  Register match = scratch2();
+  Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
+  __ JumpIfSmi(receiver(), smi_target, match);  // Reg match is 0 if Smi.
+
+  // Polymorphic keyed stores may use the map register
+  Register map_reg = scratch1();
+  DCHECK(kind() != Code::KEYED_STORE_IC ||
+         map_reg.is(ElementTransitionAndStoreDescriptor::MapRegister()));
+
+  int receiver_count = types->length();
+  int number_of_handled_maps = 0;
+  __ ld(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
+  for (int current = 0; current < receiver_count; ++current) {
+    Handle<HeapType> type = types->at(current);
+    Handle<Map> map = IC::TypeToMap(*type, isolate());
+    if (!map->is_deprecated()) {
+      number_of_handled_maps++;
+      // Check map and tail call if there's a match.
+      // Separate compare from branch, to provide path for above JumpIfSmi().
+      __ Dsubu(match, map_reg, Operand(map));
+      if (type->Is(HeapType::Number())) {
+        DCHECK(!number_case.is_unused());
+        __ bind(&number_case);
+      }
+      __ Jump(handlers->at(current), RelocInfo::CODE_TARGET, eq, match,
+              Operand(zero_reg));
+    }
+  }
+  DCHECK(number_of_handled_maps != 0);
+
+  __ bind(&miss);
+  TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+  // Return the generated code.
+  InlineCacheState state =
+      number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
+  return GetCode(kind(), type, name, state);
+}
+
+
+Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
+    MapHandleList* receiver_maps, CodeHandleList* handler_stubs,
+    MapHandleList* transitioned_maps) {
+  Label miss;
+  __ JumpIfSmi(receiver(), &miss);
+
+  int receiver_count = receiver_maps->length();
+  __ ld(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset));
+  for (int i = 0; i < receiver_count; ++i) {
+    if (transitioned_maps->at(i).is_null()) {
+      __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq, scratch1(),
+              Operand(receiver_maps->at(i)));
+    } else {
+      Label next_map;
+      __ Branch(&next_map, ne, scratch1(), Operand(receiver_maps->at(i)));
+      __ li(transition_map(), Operand(transitioned_maps->at(i)));
+      __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET);
+      __ bind(&next_map);
+    }
+  }
+
+  __ bind(&miss);
+  TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+  // Return the generated code.
+  return GetCode(kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm)
+
+
+void PropertyICCompiler::GenerateRuntimeSetProperty(MacroAssembler* masm,
+                                                    StrictMode strict_mode) {
+  __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+          StoreDescriptor::ValueRegister());
+
+  __ li(a0, Operand(Smi::FromInt(strict_mode)));
+  __ Push(a0);
+
+  // Do tail-call to runtime routine.
+  __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
+}
+
+
+#undef __
+}
+}  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_MIPS64
diff --git a/src/ic/mips64/ic-mips64.cc b/src/ic/mips64/ic-mips64.cc
new file mode 100644
index 0000000..a5d9fe7
--- /dev/null
+++ b/src/ic/mips64/ic-mips64.cc
@@ -0,0 +1,1031 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_MIPS64
+
+#include "src/codegen.h"
+#include "src/ic/ic.h"
+#include "src/ic/ic-compiler.h"
+#include "src/ic/stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+
+// ----------------------------------------------------------------------------
+// Static IC stub generators.
+//
+
+#define __ ACCESS_MASM(masm)
+
+
+static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
+                                            Label* global_object) {
+  // Register usage:
+  //   type: holds the receiver instance type on entry.
+  __ Branch(global_object, eq, type, Operand(JS_GLOBAL_OBJECT_TYPE));
+  __ Branch(global_object, eq, type, Operand(JS_BUILTINS_OBJECT_TYPE));
+  __ Branch(global_object, eq, type, Operand(JS_GLOBAL_PROXY_TYPE));
+}
+
+
+// Helper function used from LoadIC GenerateNormal.
+//
+// elements: Property dictionary. It is not clobbered if a jump to the miss
+//           label is done.
+// name:     Property name. It is not clobbered if a jump to the miss label is
+//           done
+// result:   Register for the result. It is only updated if a jump to the miss
+//           label is not done. Can be the same as elements or name clobbering
+//           one of these in the case of not jumping to the miss label.
+// The two scratch registers need to be different from elements, name and
+// result.
+// The generated code assumes that the receiver has slow properties,
+// is not a global object and does not have interceptors.
+// The address returned from GenerateStringDictionaryProbes() in scratch2
+// is used.
+static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss,
+                                   Register elements, Register name,
+                                   Register result, Register scratch1,
+                                   Register scratch2) {
+  // Main use of the scratch registers.
+  // scratch1: Used as temporary and to hold the capacity of the property
+  //           dictionary.
+  // scratch2: Used as temporary.
+  Label done;
+
+  // Probe the dictionary.
+  NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
+                                                   name, scratch1, scratch2);
+
+  // If probing finds an entry check that the value is a normal
+  // property.
+  __ bind(&done);  // scratch2 == elements + 4 * index.
+  const int kElementsStartOffset =
+      NameDictionary::kHeaderSize +
+      NameDictionary::kElementsStartIndex * kPointerSize;
+  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+  __ ld(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
+  __ And(at, scratch1,
+         Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
+  __ Branch(miss, ne, at, Operand(zero_reg));
+
+  // Get the value at the masked, scaled index and return.
+  __ ld(result,
+        FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
+}
+
+
+// Helper function used from StoreIC::GenerateNormal.
+//
+// elements: Property dictionary. It is not clobbered if a jump to the miss
+//           label is done.
+// name:     Property name. It is not clobbered if a jump to the miss label is
+//           done
+// value:    The value to store.
+// The two scratch registers need to be different from elements, name and
+// result.
+// The generated code assumes that the receiver has slow properties,
+// is not a global object and does not have interceptors.
+// The address returned from GenerateStringDictionaryProbes() in scratch2
+// is used.
+static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
+                                    Register elements, Register name,
+                                    Register value, Register scratch1,
+                                    Register scratch2) {
+  // Main use of the scratch registers.
+  // scratch1: Used as temporary and to hold the capacity of the property
+  //           dictionary.
+  // scratch2: Used as temporary.
+  Label done;
+
+  // Probe the dictionary.
+  NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
+                                                   name, scratch1, scratch2);
+
+  // If probing finds an entry in the dictionary check that the value
+  // is a normal property that is not read only.
+  __ bind(&done);  // scratch2 == elements + 4 * index.
+  const int kElementsStartOffset =
+      NameDictionary::kHeaderSize +
+      NameDictionary::kElementsStartIndex * kPointerSize;
+  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+  const int kTypeAndReadOnlyMask =
+      (PropertyDetails::TypeField::kMask |
+       PropertyDetails::AttributesField::encode(READ_ONLY));
+  __ ld(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
+  __ And(at, scratch1, Operand(Smi::FromInt(kTypeAndReadOnlyMask)));
+  __ Branch(miss, ne, at, Operand(zero_reg));
+
+  // Store the value at the masked, scaled index and return.
+  const int kValueOffset = kElementsStartOffset + kPointerSize;
+  __ Daddu(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag));
+  __ sd(value, MemOperand(scratch2));
+
+  // Update the write barrier. Make sure not to clobber the value.
+  __ mov(scratch1, value);
+  __ RecordWrite(elements, scratch2, scratch1, kRAHasNotBeenSaved,
+                 kDontSaveFPRegs);
+}
+
+
+// Checks the receiver for special cases (value type, slow case bits).
+// Falls through for regular JS object.
+static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
+                                           Register receiver, Register map,
+                                           Register scratch,
+                                           int interceptor_bit, Label* slow) {
+  // Check that the object isn't a smi.
+  __ JumpIfSmi(receiver, slow);
+  // Get the map of the receiver.
+  __ ld(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  // Check bit field.
+  __ lbu(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
+  __ And(at, scratch,
+         Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
+  __ Branch(slow, ne, at, Operand(zero_reg));
+  // Check that the object is some kind of JS object EXCEPT JS Value type.
+  // In the case that the object is a value-wrapper object,
+  // we enter the runtime system to make sure that indexing into string
+  // objects work as intended.
+  DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
+  __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
+  __ Branch(slow, lt, scratch, Operand(JS_OBJECT_TYPE));
+}
+
+
+// Loads an indexed element from a fast case array.
+// If not_fast_array is NULL, doesn't perform the elements map check.
+static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
+                                  Register key, Register elements,
+                                  Register scratch1, Register scratch2,
+                                  Register result, Label* not_fast_array,
+                                  Label* out_of_range) {
+  // Register use:
+  //
+  // receiver - holds the receiver on entry.
+  //            Unchanged unless 'result' is the same register.
+  //
+  // key      - holds the smi key on entry.
+  //            Unchanged unless 'result' is the same register.
+  //
+  // elements - holds the elements of the receiver on exit.
+  //
+  // result   - holds the result on exit if the load succeeded.
+  //            Allowed to be the the same as 'receiver' or 'key'.
+  //            Unchanged on bailout so 'receiver' and 'key' can be safely
+  //            used by further computation.
+  //
+  // Scratch registers:
+  //
+  // scratch1 - used to hold elements map and elements length.
+  //            Holds the elements map if not_fast_array branch is taken.
+  //
+  // scratch2 - used to hold the loaded value.
+
+  __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  if (not_fast_array != NULL) {
+    // Check that the object is in fast mode (not dictionary).
+    __ ld(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
+    __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
+    __ Branch(not_fast_array, ne, scratch1, Operand(at));
+  } else {
+    __ AssertFastElements(elements);
+  }
+
+  // Check that the key (index) is within bounds.
+  __ ld(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
+  __ Branch(out_of_range, hs, key, Operand(scratch1));
+
+  // Fast case: Do the load.
+  __ Daddu(scratch1, elements,
+           Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  // The key is a smi.
+  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
+  __ SmiScale(at, key, kPointerSizeLog2);
+  __ daddu(at, at, scratch1);
+  __ ld(scratch2, MemOperand(at));
+
+  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+  // In case the loaded value is the_hole we have to consult GetProperty
+  // to ensure the prototype chain is searched.
+  __ Branch(out_of_range, eq, scratch2, Operand(at));
+  __ mov(result, scratch2);
+}
+
+
+// Checks whether a key is an array index string or a unique name.
+// Falls through if a key is a unique name.
+static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
+                                 Register map, Register hash,
+                                 Label* index_string, Label* not_unique) {
+  // The key is not a smi.
+  Label unique;
+  // Is it a name?
+  __ GetObjectType(key, map, hash);
+  __ Branch(not_unique, hi, hash, Operand(LAST_UNIQUE_NAME_TYPE));
+  STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
+  __ Branch(&unique, eq, hash, Operand(LAST_UNIQUE_NAME_TYPE));
+
+  // Is the string an array index, with cached numeric value?
+  __ lwu(hash, FieldMemOperand(key, Name::kHashFieldOffset));
+  __ And(at, hash, Operand(Name::kContainsCachedArrayIndexMask));
+  __ Branch(index_string, eq, at, Operand(zero_reg));
+
+  // Is the string internalized? We know it's a string, so a single
+  // bit test is enough.
+  // map: key map
+  __ lbu(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
+  STATIC_ASSERT(kInternalizedTag == 0);
+  __ And(at, hash, Operand(kIsNotInternalizedMask));
+  __ Branch(not_unique, ne, at, Operand(zero_reg));
+
+  __ bind(&unique);
+}
+
+
+void LoadIC::GenerateNormal(MacroAssembler* masm) {
+  Register dictionary = a0;
+  DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
+  Label slow;
+
+  __ ld(dictionary, FieldMemOperand(LoadDescriptor::ReceiverRegister(),
+                                    JSObject::kPropertiesOffset));
+  GenerateDictionaryLoad(masm, &slow, dictionary,
+                         LoadDescriptor::NameRegister(), v0, a3, a4);
+  __ Ret();
+
+  // Dictionary load failed, go slow (but don't miss).
+  __ bind(&slow);
+  GenerateRuntimeGetProperty(masm);
+}
+
+
+// A register that isn't one of the parameters to the load ic.
+static const Register LoadIC_TempRegister() { return a3; }
+
+
+void LoadIC::GenerateMiss(MacroAssembler* masm) {
+  // The return address is on the stack.
+  Isolate* isolate = masm->isolate();
+
+  __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, a4);
+
+  __ mov(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
+  __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
+
+  // Perform tail call to the entry.
+  ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
+  __ TailCallExternalReference(ref, 2, 1);
+}
+
+
+void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+  // The return address is in ra.
+
+  __ mov(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
+  __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
+
+  __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
+}
+
+
+static MemOperand GenerateMappedArgumentsLookup(
+    MacroAssembler* masm, Register object, Register key, Register scratch1,
+    Register scratch2, Register scratch3, Label* unmapped_case,
+    Label* slow_case) {
+  Heap* heap = masm->isolate()->heap();
+
+  // Check that the receiver is a JSObject. Because of the map check
+  // later, we do not need to check for interceptors or whether it
+  // requires access checks.
+  __ JumpIfSmi(object, slow_case);
+  // Check that the object is some kind of JSObject.
+  __ GetObjectType(object, scratch1, scratch2);
+  __ Branch(slow_case, lt, scratch2, Operand(FIRST_JS_RECEIVER_TYPE));
+
+  // Check that the key is a positive smi.
+  __ NonNegativeSmiTst(key, scratch1);
+  __ Branch(slow_case, ne, scratch1, Operand(zero_reg));
+
+  // Load the elements into scratch1 and check its map.
+  Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
+  __ ld(scratch1, FieldMemOperand(object, JSObject::kElementsOffset));
+  __ CheckMap(scratch1, scratch2, arguments_map, slow_case, DONT_DO_SMI_CHECK);
+  // Check if element is in the range of mapped arguments. If not, jump
+  // to the unmapped lookup with the parameter map in scratch1.
+  __ ld(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
+  __ Dsubu(scratch2, scratch2, Operand(Smi::FromInt(2)));
+  __ Branch(unmapped_case, Ugreater_equal, key, Operand(scratch2));
+
+  // Load element index and check whether it is the hole.
+  const int kOffset =
+      FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag;
+
+  __ SmiUntag(scratch3, key);
+  __ dsll(scratch3, scratch3, kPointerSizeLog2);
+  __ Daddu(scratch3, scratch3, Operand(kOffset));
+
+  __ Daddu(scratch2, scratch1, scratch3);
+  __ ld(scratch2, MemOperand(scratch2));
+  __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
+  __ Branch(unmapped_case, eq, scratch2, Operand(scratch3));
+
+  // Load value from context and return it. We can reuse scratch1 because
+  // we do not jump to the unmapped lookup (which requires the parameter
+  // map in scratch1).
+  __ ld(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
+  __ SmiUntag(scratch3, scratch2);
+  __ dsll(scratch3, scratch3, kPointerSizeLog2);
+  __ Daddu(scratch3, scratch3, Operand(Context::kHeaderSize - kHeapObjectTag));
+  __ Daddu(scratch2, scratch1, scratch3);
+  return MemOperand(scratch2);
+}
+
+
+static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
+                                                  Register key,
+                                                  Register parameter_map,
+                                                  Register scratch,
+                                                  Label* slow_case) {
+  // Element is in arguments backing store, which is referenced by the
+  // second element of the parameter_map. The parameter_map register
+  // must be loaded with the parameter map of the arguments object and is
+  // overwritten.
+  const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
+  Register backing_store = parameter_map;
+  __ ld(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
+  __ CheckMap(backing_store, scratch, Heap::kFixedArrayMapRootIndex, slow_case,
+              DONT_DO_SMI_CHECK);
+  __ ld(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
+  __ Branch(slow_case, Ugreater_equal, key, Operand(scratch));
+  __ SmiUntag(scratch, key);
+  __ dsll(scratch, scratch, kPointerSizeLog2);
+  __ Daddu(scratch, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ Daddu(scratch, backing_store, scratch);
+  return MemOperand(scratch);
+}
+
+
+void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  Register key = StoreDescriptor::NameRegister();
+  Register value = StoreDescriptor::ValueRegister();
+  DCHECK(value.is(a0));
+
+  Label slow, notin;
+  // Store address is returned in register (of MemOperand) mapped_location.
+  MemOperand mapped_location = GenerateMappedArgumentsLookup(
+      masm, receiver, key, a3, a4, a5, &notin, &slow);
+  __ sd(value, mapped_location);
+  __ mov(t1, value);
+  DCHECK_EQ(mapped_location.offset(), 0);
+  __ RecordWrite(a3, mapped_location.rm(), t1, kRAHasNotBeenSaved,
+                 kDontSaveFPRegs);
+  __ Ret(USE_DELAY_SLOT);
+  __ mov(v0, value);  // (In delay slot) return the value stored in v0.
+  __ bind(&notin);
+  // The unmapped lookup expects that the parameter map is in a3.
+  // Store address is returned in register (of MemOperand) unmapped_location.
+  MemOperand unmapped_location =
+      GenerateUnmappedArgumentsLookup(masm, key, a3, a4, &slow);
+  __ sd(value, unmapped_location);
+  __ mov(t1, value);
+  DCHECK_EQ(unmapped_location.offset(), 0);
+  __ RecordWrite(a3, unmapped_location.rm(), t1, kRAHasNotBeenSaved,
+                 kDontSaveFPRegs);
+  __ Ret(USE_DELAY_SLOT);
+  __ mov(v0, a0);  // (In delay slot) return the value stored in v0.
+  __ bind(&slow);
+  GenerateMiss(masm);
+}
+
+
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
+  // The return address is in ra.
+  Isolate* isolate = masm->isolate();
+
+  __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, a4);
+
+  __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
+
+  // Perform tail call to the entry.
+  ExternalReference ref =
+      ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
+
+  __ TailCallExternalReference(ref, 2, 1);
+}
+
+
+void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+  // The return address is in ra.
+
+  __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
+
+  __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
+}
+
+
+void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
+  // The return address is in ra.
+  Label slow, check_name, index_smi, index_name, property_array_property;
+  Label probe_dictionary, check_number_dictionary;
+
+  Register key = LoadDescriptor::NameRegister();
+  Register receiver = LoadDescriptor::ReceiverRegister();
+  DCHECK(key.is(a2));
+  DCHECK(receiver.is(a1));
+
+  Isolate* isolate = masm->isolate();
+
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(key, &check_name);
+  __ bind(&index_smi);
+  // Now the key is known to be a smi. This place is also jumped to from below
+  // where a numeric string is converted to a smi.
+
+  GenerateKeyedLoadReceiverCheck(masm, receiver, a0, a3,
+                                 Map::kHasIndexedInterceptor, &slow);
+
+  // Check the receiver's map to see if it has fast elements.
+  __ CheckFastElements(a0, a3, &check_number_dictionary);
+
+  GenerateFastArrayLoad(masm, receiver, key, a0, a3, a4, v0, NULL, &slow);
+  __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, a4, a3);
+  __ Ret();
+
+  __ bind(&check_number_dictionary);
+  __ ld(a4, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  __ ld(a3, FieldMemOperand(a4, JSObject::kMapOffset));
+
+  // Check whether the elements is a number dictionary.
+  // a3: elements map
+  // a4: elements
+  __ LoadRoot(at, Heap::kHashTableMapRootIndex);
+  __ Branch(&slow, ne, a3, Operand(at));
+  __ dsra32(a0, key, 0);
+  __ LoadFromNumberDictionary(&slow, a4, key, v0, a0, a3, a5);
+  __ Ret();
+
+  // Slow case, key and receiver still in a2 and a1.
+  __ bind(&slow);
+  __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(), 1, a4,
+                      a3);
+  GenerateRuntimeGetProperty(masm);
+
+  __ bind(&check_name);
+  GenerateKeyNameCheck(masm, key, a0, a3, &index_name, &slow);
+
+  GenerateKeyedLoadReceiverCheck(masm, receiver, a0, a3,
+                                 Map::kHasNamedInterceptor, &slow);
+
+
+  // If the receiver is a fast-case object, check the keyed lookup
+  // cache. Otherwise probe the dictionary.
+  __ ld(a3, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+  __ ld(a4, FieldMemOperand(a3, HeapObject::kMapOffset));
+  __ LoadRoot(at, Heap::kHashTableMapRootIndex);
+  __ Branch(&probe_dictionary, eq, a4, Operand(at));
+
+  // Load the map of the receiver, compute the keyed lookup cache hash
+  // based on 32 bits of the map pointer and the name hash.
+  __ ld(a0, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ dsll32(a3, a0, 0);
+  __ dsrl32(a3, a3, 0);
+  __ dsra(a3, a3, KeyedLookupCache::kMapHashShift);
+  __ lwu(a4, FieldMemOperand(key, Name::kHashFieldOffset));
+  __ dsra(at, a4, Name::kHashShift);
+  __ xor_(a3, a3, at);
+  int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask;
+  __ And(a3, a3, Operand(mask));
+
+  // Load the key (consisting of map and unique name) from the cache and
+  // check for match.
+  Label load_in_object_property;
+  static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
+  Label hit_on_nth_entry[kEntriesPerBucket];
+  ExternalReference cache_keys =
+      ExternalReference::keyed_lookup_cache_keys(isolate);
+  __ li(a4, Operand(cache_keys));
+  __ dsll(at, a3, kPointerSizeLog2 + 1);
+  __ daddu(a4, a4, at);
+
+  for (int i = 0; i < kEntriesPerBucket - 1; i++) {
+    Label try_next_entry;
+    __ ld(a5, MemOperand(a4, kPointerSize * i * 2));
+    __ Branch(&try_next_entry, ne, a0, Operand(a5));
+    __ ld(a5, MemOperand(a4, kPointerSize * (i * 2 + 1)));
+    __ Branch(&hit_on_nth_entry[i], eq, key, Operand(a5));
+    __ bind(&try_next_entry);
+  }
+
+  __ ld(a5, MemOperand(a4, kPointerSize * (kEntriesPerBucket - 1) * 2));
+  __ Branch(&slow, ne, a0, Operand(a5));
+  __ ld(a5, MemOperand(a4, kPointerSize * ((kEntriesPerBucket - 1) * 2 + 1)));
+  __ Branch(&slow, ne, key, Operand(a5));
+
+  // Get field offset.
+  // a0     : receiver's map
+  // a3     : lookup cache index
+  ExternalReference cache_field_offsets =
+      ExternalReference::keyed_lookup_cache_field_offsets(isolate);
+
+  // Hit on nth entry.
+  for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
+    __ bind(&hit_on_nth_entry[i]);
+    __ li(a4, Operand(cache_field_offsets));
+
+    // TODO(yy) This data structure does NOT follow natural pointer size.
+    __ dsll(at, a3, kPointerSizeLog2 - 1);
+    __ daddu(at, a4, at);
+    __ lwu(a5, MemOperand(at, kPointerSize / 2 * i));
+
+    __ lbu(a6, FieldMemOperand(a0, Map::kInObjectPropertiesOffset));
+    __ Dsubu(a5, a5, a6);
+    __ Branch(&property_array_property, ge, a5, Operand(zero_reg));
+    if (i != 0) {
+      __ Branch(&load_in_object_property);
+    }
+  }
+
+  // Load in-object property.
+  __ bind(&load_in_object_property);
+  __ lbu(a6, FieldMemOperand(a0, Map::kInstanceSizeOffset));
+  // Index from start of object.
+  __ daddu(a6, a6, a5);
+  // Remove the heap tag.
+  __ Dsubu(receiver, receiver, Operand(kHeapObjectTag));
+  __ dsll(at, a6, kPointerSizeLog2);
+  __ daddu(at, receiver, at);
+  __ ld(v0, MemOperand(at));
+  __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1,
+                      a4, a3);
+  __ Ret();
+
+  // Load property array property.
+  __ bind(&property_array_property);
+  __ ld(receiver, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+  __ Daddu(receiver, receiver, FixedArray::kHeaderSize - kHeapObjectTag);
+  __ dsll(v0, a5, kPointerSizeLog2);
+  __ Daddu(v0, v0, a1);
+  __ ld(v0, MemOperand(v0));
+  __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1,
+                      a4, a3);
+  __ Ret();
+
+
+  // Do a quick inline probe of the receiver's dictionary, if it
+  // exists.
+  __ bind(&probe_dictionary);
+  // a3: elements
+  __ ld(a0, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
+  GenerateGlobalInstanceTypeCheck(masm, a0, &slow);
+  // Load the property to v0.
+  GenerateDictionaryLoad(masm, &slow, a3, key, v0, a5, a4);
+  __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(), 1, a4,
+                      a3);
+  __ Ret();
+
+  __ bind(&index_name);
+  __ IndexFromHash(a3, key);
+  // Now jump to the place where smi keys are handled.
+  __ Branch(&index_smi);
+}
+
+
+void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
+  // Return address is in ra.
+  Label miss;
+
+  Register receiver = LoadDescriptor::ReceiverRegister();
+  Register index = LoadDescriptor::NameRegister();
+  Register scratch = a3;
+  Register result = v0;
+  DCHECK(!scratch.is(receiver) && !scratch.is(index));
+
+  StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
+                                          &miss,  // When not a string.
+                                          &miss,  // When not a number.
+                                          &miss,  // When index out of range.
+                                          STRING_INDEX_IS_ARRAY_INDEX);
+  char_at_generator.GenerateFast(masm);
+  __ Ret();
+
+  StubRuntimeCallHelper call_helper;
+  char_at_generator.GenerateSlow(masm, call_helper);
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+
+static void KeyedStoreGenerateGenericHelper(
+    MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
+    KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
+    Register value, Register key, Register receiver, Register receiver_map,
+    Register elements_map, Register elements) {
+  Label transition_smi_elements;
+  Label finish_object_store, non_double_value, transition_double_elements;
+  Label fast_double_without_map_check;
+
+  // Fast case: Do the store, could be either Object or double.
+  __ bind(fast_object);
+  Register scratch_value = a4;
+  Register address = a5;
+  if (check_map == kCheckMap) {
+    __ ld(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
+    __ Branch(fast_double, ne, elements_map,
+              Operand(masm->isolate()->factory()->fixed_array_map()));
+  }
+
+  // HOLECHECK: guards "A[i] = V"
+  // We have to go to the runtime if the current value is the hole because
+  // there may be a callback on the element.
+  Label holecheck_passed1;
+  __ Daddu(address, elements, FixedArray::kHeaderSize - kHeapObjectTag);
+  __ SmiScale(at, key, kPointerSizeLog2);
+  __ daddu(address, address, at);
+  __ ld(scratch_value, MemOperand(address));
+
+  __ Branch(&holecheck_passed1, ne, scratch_value,
+            Operand(masm->isolate()->factory()->the_hole_value()));
+  __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
+                                      slow);
+
+  __ bind(&holecheck_passed1);
+
+  // Smi stores don't require further checks.
+  Label non_smi_value;
+  __ JumpIfNotSmi(value, &non_smi_value);
+
+  if (increment_length == kIncrementLength) {
+    // Add 1 to receiver->length.
+    __ Daddu(scratch_value, key, Operand(Smi::FromInt(1)));
+    __ sd(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
+  }
+  // It's irrelevant whether array is smi-only or not when writing a smi.
+  __ Daddu(address, elements,
+           Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ SmiScale(scratch_value, key, kPointerSizeLog2);
+  __ Daddu(address, address, scratch_value);
+  __ sd(value, MemOperand(address));
+  __ Ret();
+
+  __ bind(&non_smi_value);
+  // Escape to elements kind transition case.
+  __ CheckFastObjectElements(receiver_map, scratch_value,
+                             &transition_smi_elements);
+
+  // Fast elements array, store the value to the elements backing store.
+  __ bind(&finish_object_store);
+  if (increment_length == kIncrementLength) {
+    // Add 1 to receiver->length.
+    __ Daddu(scratch_value, key, Operand(Smi::FromInt(1)));
+    __ sd(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
+  }
+  __ Daddu(address, elements,
+           Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ SmiScale(scratch_value, key, kPointerSizeLog2);
+  __ Daddu(address, address, scratch_value);
+  __ sd(value, MemOperand(address));
+  // Update write barrier for the elements array address.
+  __ mov(scratch_value, value);  // Preserve the value which is returned.
+  __ RecordWrite(elements, address, scratch_value, kRAHasNotBeenSaved,
+                 kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+  __ Ret();
+
+  __ bind(fast_double);
+  if (check_map == kCheckMap) {
+    // Check for fast double array case. If this fails, call through to the
+    // runtime.
+    __ LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
+    __ Branch(slow, ne, elements_map, Operand(at));
+  }
+
+  // HOLECHECK: guards "A[i] double hole?"
+  // We have to see if the double version of the hole is present. If so
+  // go to the runtime.
+  __ Daddu(address, elements,
+           Operand(FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32) -
+                   kHeapObjectTag));
+  __ SmiScale(at, key, kPointerSizeLog2);
+  __ daddu(address, address, at);
+  __ lw(scratch_value, MemOperand(address));
+  __ Branch(&fast_double_without_map_check, ne, scratch_value,
+            Operand(kHoleNanUpper32));
+  __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
+                                      slow);
+
+  __ bind(&fast_double_without_map_check);
+  __ StoreNumberToDoubleElements(value, key,
+                                 elements,  // Overwritten.
+                                 a3,        // Scratch regs...
+                                 a4, a5, &transition_double_elements);
+  if (increment_length == kIncrementLength) {
+    // Add 1 to receiver->length.
+    __ Daddu(scratch_value, key, Operand(Smi::FromInt(1)));
+    __ sd(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
+  }
+  __ Ret();
+
+  __ bind(&transition_smi_elements);
+  // Transition the array appropriately depending on the value type.
+  __ ld(a4, FieldMemOperand(value, HeapObject::kMapOffset));
+  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+  __ Branch(&non_double_value, ne, a4, Operand(at));
+
+  // Value is a double. Transition FAST_SMI_ELEMENTS ->
+  // FAST_DOUBLE_ELEMENTS and complete the store.
+  __ LoadTransitionedArrayMapConditional(
+      FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, a4, slow);
+  AllocationSiteMode mode =
+      AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
+  ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
+                                                   receiver_map, mode, slow);
+  __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  __ jmp(&fast_double_without_map_check);
+
+  __ bind(&non_double_value);
+  // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
+  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
+                                         receiver_map, a4, slow);
+  mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
+  ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
+      masm, receiver, key, value, receiver_map, mode, slow);
+  __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  __ jmp(&finish_object_store);
+
+  __ bind(&transition_double_elements);
+  // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
+  // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
+  // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
+  __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
+                                         receiver_map, a4, slow);
+  mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
+  ElementsTransitionGenerator::GenerateDoubleToObject(
+      masm, receiver, key, value, receiver_map, mode, slow);
+  __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  __ jmp(&finish_object_store);
+}
+
+
+void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
+                                   StrictMode strict_mode) {
+  // ---------- S t a t e --------------
+  //  -- a0     : value
+  //  -- a1     : key
+  //  -- a2     : receiver
+  //  -- ra     : return address
+  // -----------------------------------
+  Label slow, fast_object, fast_object_grow;
+  Label fast_double, fast_double_grow;
+  Label array, extra, check_if_double_array;
+
+  // Register usage.
+  Register value = StoreDescriptor::ValueRegister();
+  Register key = StoreDescriptor::NameRegister();
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  DCHECK(value.is(a0));
+  Register receiver_map = a3;
+  Register elements_map = a6;
+  Register elements = a7;  // Elements array of the receiver.
+  // a4 and a5 are used as general scratch registers.
+
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(key, &slow);
+  // Check that the object isn't a smi.
+  __ JumpIfSmi(receiver, &slow);
+  // Get the map of the object.
+  __ ld(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  // Check that the receiver does not require access checks and is not observed.
+  // The generic stub does not perform map checks or handle observed objects.
+  __ lbu(a4, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
+  __ And(a4, a4,
+         Operand(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved));
+  __ Branch(&slow, ne, a4, Operand(zero_reg));
+  // Check if the object is a JS array or not.
+  __ lbu(a4, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
+  __ Branch(&array, eq, a4, Operand(JS_ARRAY_TYPE));
+  // Check that the object is some kind of JSObject.
+  __ Branch(&slow, lt, a4, Operand(FIRST_JS_OBJECT_TYPE));
+
+  // Object case: Check key against length in the elements array.
+  __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  // Check array bounds. Both the key and the length of FixedArray are smis.
+  __ ld(a4, FieldMemOperand(elements, FixedArray::kLengthOffset));
+  __ Branch(&fast_object, lo, key, Operand(a4));
+
+  // Slow case, handle jump to runtime.
+  __ bind(&slow);
+  // Entry registers are intact.
+  // a0: value.
+  // a1: key.
+  // a2: receiver.
+  PropertyICCompiler::GenerateRuntimeSetProperty(masm, strict_mode);
+
+  // Extra capacity case: Check if there is extra capacity to
+  // perform the store and update the length. Used for adding one
+  // element to the array by writing to array[array.length].
+  __ bind(&extra);
+  // Condition code from comparing key and array length is still available.
+  // Only support writing to array[array.length].
+  __ Branch(&slow, ne, key, Operand(a4));
+  // Check for room in the elements backing store.
+  // Both the key and the length of FixedArray are smis.
+  __ ld(a4, FieldMemOperand(elements, FixedArray::kLengthOffset));
+  __ Branch(&slow, hs, key, Operand(a4));
+  __ ld(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
+  __ Branch(&check_if_double_array, ne, elements_map,
+            Heap::kFixedArrayMapRootIndex);
+
+  __ jmp(&fast_object_grow);
+
+  __ bind(&check_if_double_array);
+  __ Branch(&slow, ne, elements_map, Heap::kFixedDoubleArrayMapRootIndex);
+  __ jmp(&fast_double_grow);
+
+  // Array case: Get the length and the elements array from the JS
+  // array. Check that the array is in fast mode (and writable); if it
+  // is the length is always a smi.
+  __ bind(&array);
+  __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+
+  // Check the key against the length in the array.
+  __ ld(a4, FieldMemOperand(receiver, JSArray::kLengthOffset));
+  __ Branch(&extra, hs, key, Operand(a4));
+
+  KeyedStoreGenerateGenericHelper(
+      masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
+      value, key, receiver, receiver_map, elements_map, elements);
+  KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
+                                  &slow, kDontCheckMap, kIncrementLength, value,
+                                  key, receiver, receiver_map, elements_map,
+                                  elements);
+}
+
+
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
+  // Push receiver, key and value for runtime call.
+  __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+          StoreDescriptor::ValueRegister());
+
+  ExternalReference ref =
+      ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
+  __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  Register name = StoreDescriptor::NameRegister();
+  DCHECK(receiver.is(a1));
+  DCHECK(name.is(a2));
+  DCHECK(StoreDescriptor::ValueRegister().is(a0));
+
+  // Get the receiver from the stack and probe the stub cache.
+  Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
+      Code::ComputeHandlerFlags(Code::STORE_IC));
+  masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
+                                               name, a3, a4, a5, a6);
+
+  // Cache miss: Jump to runtime.
+  GenerateMiss(masm);
+}
+
+
+void StoreIC::GenerateMiss(MacroAssembler* masm) {
+  __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+          StoreDescriptor::ValueRegister());
+  // Perform tail call to the entry.
+  ExternalReference ref =
+      ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
+  __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void StoreIC::GenerateNormal(MacroAssembler* masm) {
+  Label miss;
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  Register name = StoreDescriptor::NameRegister();
+  Register value = StoreDescriptor::ValueRegister();
+  Register dictionary = a3;
+  DCHECK(!AreAliased(value, receiver, name, dictionary, a4, a5));
+
+  __ ld(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+
+  GenerateDictionaryStore(masm, &miss, a3, name, value, a4, a5);
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(counters->store_normal_hit(), 1, a4, a5);
+  __ Ret();
+
+  __ bind(&miss);
+  __ IncrementCounter(counters->store_normal_miss(), 1, a4, a5);
+  GenerateMiss(masm);
+}
+
+
+#undef __
+
+
+Condition CompareIC::ComputeCondition(Token::Value op) {
+  switch (op) {
+    case Token::EQ_STRICT:
+    case Token::EQ:
+      return eq;
+    case Token::LT:
+      return lt;
+    case Token::GT:
+      return gt;
+    case Token::LTE:
+      return le;
+    case Token::GTE:
+      return ge;
+    default:
+      UNREACHABLE();
+      return kNoCondition;
+  }
+}
+
+
+bool CompareIC::HasInlinedSmiCode(Address address) {
+  // The address of the instruction following the call.
+  Address andi_instruction_address =
+      address + Assembler::kCallTargetAddressOffset;
+
+  // If the instruction following the call is not a andi at, rx, #yyy, nothing
+  // was inlined.
+  Instr instr = Assembler::instr_at(andi_instruction_address);
+  return Assembler::IsAndImmediate(instr) &&
+         Assembler::GetRt(instr) == static_cast<uint32_t>(zero_reg.code());
+}
+
+
+void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
+  Address andi_instruction_address =
+      address + Assembler::kCallTargetAddressOffset;
+
+  // If the instruction following the call is not a andi at, rx, #yyy, nothing
+  // was inlined.
+  Instr instr = Assembler::instr_at(andi_instruction_address);
+  if (!(Assembler::IsAndImmediate(instr) &&
+        Assembler::GetRt(instr) == static_cast<uint32_t>(zero_reg.code()))) {
+    return;
+  }
+
+  // The delta to the start of the map check instruction and the
+  // condition code uses at the patched jump.
+  int delta = Assembler::GetImmediate16(instr);
+  delta += Assembler::GetRs(instr) * kImm16Mask;
+  // If the delta is 0 the instruction is andi at, zero_reg, #0 which also
+  // signals that nothing was inlined.
+  if (delta == 0) {
+    return;
+  }
+
+  if (FLAG_trace_ic) {
+    PrintF("[  patching ic at %p, andi=%p, delta=%d\n", address,
+           andi_instruction_address, delta);
+  }
+
+  Address patch_address =
+      andi_instruction_address - delta * Instruction::kInstrSize;
+  Instr instr_at_patch = Assembler::instr_at(patch_address);
+  Instr branch_instr =
+      Assembler::instr_at(patch_address + Instruction::kInstrSize);
+  // This is patching a conditional "jump if not smi/jump if smi" site.
+  // Enabling by changing from
+  //   andi at, rx, 0
+  //   Branch <target>, eq, at, Operand(zero_reg)
+  // to:
+  //   andi at, rx, #kSmiTagMask
+  //   Branch <target>, ne, at, Operand(zero_reg)
+  // and vice-versa to be disabled again.
+  CodePatcher patcher(patch_address, 2);
+  Register reg = Register::from_code(Assembler::GetRs(instr_at_patch));
+  if (check == ENABLE_INLINED_SMI_CHECK) {
+    DCHECK(Assembler::IsAndImmediate(instr_at_patch));
+    DCHECK_EQ(0, Assembler::GetImmediate16(instr_at_patch));
+    patcher.masm()->andi(at, reg, kSmiTagMask);
+  } else {
+    DCHECK(check == DISABLE_INLINED_SMI_CHECK);
+    DCHECK(Assembler::IsAndImmediate(instr_at_patch));
+    patcher.masm()->andi(at, reg, 0);
+  }
+  DCHECK(Assembler::IsBranch(branch_instr));
+  if (Assembler::IsBeq(branch_instr)) {
+    patcher.ChangeBranchCondition(ne);
+  } else {
+    DCHECK(Assembler::IsBne(branch_instr));
+    patcher.ChangeBranchCondition(eq);
+  }
+}
+}
+}  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_MIPS64
diff --git a/src/ic/mips64/stub-cache-mips64.cc b/src/ic/mips64/stub-cache-mips64.cc
new file mode 100644
index 0000000..272e5be
--- /dev/null
+++ b/src/ic/mips64/stub-cache-mips64.cc
@@ -0,0 +1,170 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_MIPS64
+
+#include "src/codegen.h"
+#include "src/ic/stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
+                       Code::Flags flags, bool leave_frame,
+                       StubCache::Table table, Register receiver, Register name,
+                       // Number of the cache entry, not scaled.
+                       Register offset, Register scratch, Register scratch2,
+                       Register offset_scratch) {
+  ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
+  ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
+  ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
+
+  uint64_t key_off_addr = reinterpret_cast<uint64_t>(key_offset.address());
+  uint64_t value_off_addr = reinterpret_cast<uint64_t>(value_offset.address());
+  uint64_t map_off_addr = reinterpret_cast<uint64_t>(map_offset.address());
+
+  // Check the relative positions of the address fields.
+  DCHECK(value_off_addr > key_off_addr);
+  DCHECK((value_off_addr - key_off_addr) % 4 == 0);
+  DCHECK((value_off_addr - key_off_addr) < (256 * 4));
+  DCHECK(map_off_addr > key_off_addr);
+  DCHECK((map_off_addr - key_off_addr) % 4 == 0);
+  DCHECK((map_off_addr - key_off_addr) < (256 * 4));
+
+  Label miss;
+  Register base_addr = scratch;
+  scratch = no_reg;
+
+  // Multiply by 3 because there are 3 fields per entry (name, code, map).
+  __ dsll(offset_scratch, offset, 1);
+  __ Daddu(offset_scratch, offset_scratch, offset);
+
+  // Calculate the base address of the entry.
+  __ li(base_addr, Operand(key_offset));
+  __ dsll(at, offset_scratch, kPointerSizeLog2);
+  __ Daddu(base_addr, base_addr, at);
+
+  // Check that the key in the entry matches the name.
+  __ ld(at, MemOperand(base_addr, 0));
+  __ Branch(&miss, ne, name, Operand(at));
+
+  // Check the map matches.
+  __ ld(at, MemOperand(base_addr, map_off_addr - key_off_addr));
+  __ ld(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ Branch(&miss, ne, at, Operand(scratch2));
+
+  // Get the code entry from the cache.
+  Register code = scratch2;
+  scratch2 = no_reg;
+  __ ld(code, MemOperand(base_addr, value_off_addr - key_off_addr));
+
+  // Check that the flags match what we're looking for.
+  Register flags_reg = base_addr;
+  base_addr = no_reg;
+  __ lw(flags_reg, FieldMemOperand(code, Code::kFlagsOffset));
+  __ And(flags_reg, flags_reg, Operand(~Code::kFlagsNotUsedInLookup));
+  __ Branch(&miss, ne, flags_reg, Operand(flags));
+
+#ifdef DEBUG
+  if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
+    __ jmp(&miss);
+  } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
+    __ jmp(&miss);
+  }
+#endif
+
+  if (leave_frame) __ LeaveFrame(StackFrame::INTERNAL);
+
+  // Jump to the first instruction in the code stub.
+  __ Daddu(at, code, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ Jump(at);
+
+  // Miss: fall through.
+  __ bind(&miss);
+}
+
+
+void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
+                              bool leave_frame, Register receiver,
+                              Register name, Register scratch, Register extra,
+                              Register extra2, Register extra3) {
+  Isolate* isolate = masm->isolate();
+  Label miss;
+
+  // Make sure that code is valid. The multiplying code relies on the
+  // entry size being 12.
+  // DCHECK(sizeof(Entry) == 12);
+  // DCHECK(sizeof(Entry) == 3 * kPointerSize);
+
+  // Make sure the flags does not name a specific type.
+  DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
+
+  // Make sure that there are no register conflicts.
+  DCHECK(!scratch.is(receiver));
+  DCHECK(!scratch.is(name));
+  DCHECK(!extra.is(receiver));
+  DCHECK(!extra.is(name));
+  DCHECK(!extra.is(scratch));
+  DCHECK(!extra2.is(receiver));
+  DCHECK(!extra2.is(name));
+  DCHECK(!extra2.is(scratch));
+  DCHECK(!extra2.is(extra));
+
+  // Check register validity.
+  DCHECK(!scratch.is(no_reg));
+  DCHECK(!extra.is(no_reg));
+  DCHECK(!extra2.is(no_reg));
+  DCHECK(!extra3.is(no_reg));
+
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2,
+                      extra3);
+
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(receiver, &miss);
+
+  // Get the map of the receiver and compute the hash.
+  __ ld(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
+  __ ld(at, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ Daddu(scratch, scratch, at);
+  uint64_t mask = kPrimaryTableSize - 1;
+  // We shift out the last two bits because they are not part of the hash and
+  // they are always 01 for maps.
+  __ dsrl(scratch, scratch, kCacheIndexShift);
+  __ Xor(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask));
+  __ And(scratch, scratch, Operand(mask));
+
+  // Probe the primary table.
+  ProbeTable(isolate, masm, flags, leave_frame, kPrimary, receiver, name,
+             scratch, extra, extra2, extra3);
+
+  // Primary miss: Compute hash for secondary probe.
+  __ dsrl(at, name, kCacheIndexShift);
+  __ Dsubu(scratch, scratch, at);
+  uint64_t mask2 = kSecondaryTableSize - 1;
+  __ Daddu(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask2));
+  __ And(scratch, scratch, Operand(mask2));
+
+  // Probe the secondary table.
+  ProbeTable(isolate, masm, flags, leave_frame, kSecondary, receiver, name,
+             scratch, extra, extra2, extra3);
+
+  // Cache miss: Fall-through and let caller handle the miss by
+  // entering the runtime system.
+  __ bind(&miss);
+  __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1, extra2,
+                      extra3);
+}
+
+
+#undef __
+}
+}  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_MIPS64
diff --git a/src/ic/stub-cache.cc b/src/ic/stub-cache.cc
new file mode 100644
index 0000000..35a4acf
--- /dev/null
+++ b/src/ic/stub-cache.cc
@@ -0,0 +1,147 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/base/bits.h"
+#include "src/ic/stub-cache.h"
+#include "src/type-info.h"
+
+namespace v8 {
+namespace internal {
+
+
+StubCache::StubCache(Isolate* isolate) : isolate_(isolate) {}
+
+
+void StubCache::Initialize() {
+  DCHECK(base::bits::IsPowerOfTwo32(kPrimaryTableSize));
+  DCHECK(base::bits::IsPowerOfTwo32(kSecondaryTableSize));
+  Clear();
+}
+
+
+static Code::Flags CommonStubCacheChecks(Name* name, Map* map,
+                                         Code::Flags flags) {
+  flags = Code::RemoveTypeAndHolderFromFlags(flags);
+
+  // Validate that the name does not move on scavenge, and that we
+  // can use identity checks instead of structural equality checks.
+  DCHECK(!name->GetHeap()->InNewSpace(name));
+  DCHECK(name->IsUniqueName());
+
+  // The state bits are not important to the hash function because the stub
+  // cache only contains handlers. Make sure that the bits are the least
+  // significant so they will be the ones masked out.
+  DCHECK_EQ(Code::HANDLER, Code::ExtractKindFromFlags(flags));
+  STATIC_ASSERT((Code::ICStateField::kMask & 1) == 1);
+
+  // Make sure that the code type and cache holder are not included in the hash.
+  DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
+  DCHECK(Code::ExtractCacheHolderFromFlags(flags) == 0);
+
+  return flags;
+}
+
+
+Code* StubCache::Set(Name* name, Map* map, Code* code) {
+  Code::Flags flags = CommonStubCacheChecks(name, map, code->flags());
+
+  // Compute the primary entry.
+  int primary_offset = PrimaryOffset(name, flags, map);
+  Entry* primary = entry(primary_, primary_offset);
+  Code* old_code = primary->value;
+
+  // If the primary entry has useful data in it, we retire it to the
+  // secondary cache before overwriting it.
+  if (old_code != isolate_->builtins()->builtin(Builtins::kIllegal)) {
+    Map* old_map = primary->map;
+    Code::Flags old_flags =
+        Code::RemoveTypeAndHolderFromFlags(old_code->flags());
+    int seed = PrimaryOffset(primary->key, old_flags, old_map);
+    int secondary_offset = SecondaryOffset(primary->key, old_flags, seed);
+    Entry* secondary = entry(secondary_, secondary_offset);
+    *secondary = *primary;
+  }
+
+  // Update primary cache.
+  primary->key = name;
+  primary->value = code;
+  primary->map = map;
+  isolate()->counters()->megamorphic_stub_cache_updates()->Increment();
+  return code;
+}
+
+
+Code* StubCache::Get(Name* name, Map* map, Code::Flags flags) {
+  flags = CommonStubCacheChecks(name, map, flags);
+  int primary_offset = PrimaryOffset(name, flags, map);
+  Entry* primary = entry(primary_, primary_offset);
+  if (primary->key == name && primary->map == map) {
+    return primary->value;
+  }
+  int secondary_offset = SecondaryOffset(name, flags, primary_offset);
+  Entry* secondary = entry(secondary_, secondary_offset);
+  if (secondary->key == name && secondary->map == map) {
+    return secondary->value;
+  }
+  return NULL;
+}
+
+
+void StubCache::Clear() {
+  Code* empty = isolate_->builtins()->builtin(Builtins::kIllegal);
+  for (int i = 0; i < kPrimaryTableSize; i++) {
+    primary_[i].key = isolate()->heap()->empty_string();
+    primary_[i].map = NULL;
+    primary_[i].value = empty;
+  }
+  for (int j = 0; j < kSecondaryTableSize; j++) {
+    secondary_[j].key = isolate()->heap()->empty_string();
+    secondary_[j].map = NULL;
+    secondary_[j].value = empty;
+  }
+}
+
+
+void StubCache::CollectMatchingMaps(SmallMapList* types, Handle<Name> name,
+                                    Code::Flags flags,
+                                    Handle<Context> native_context,
+                                    Zone* zone) {
+  for (int i = 0; i < kPrimaryTableSize; i++) {
+    if (primary_[i].key == *name) {
+      Map* map = primary_[i].map;
+      // Map can be NULL, if the stub is constant function call
+      // with a primitive receiver.
+      if (map == NULL) continue;
+
+      int offset = PrimaryOffset(*name, flags, map);
+      if (entry(primary_, offset) == &primary_[i] &&
+          !TypeFeedbackOracle::CanRetainOtherContext(map, *native_context)) {
+        types->AddMapIfMissing(Handle<Map>(map), zone);
+      }
+    }
+  }
+
+  for (int i = 0; i < kSecondaryTableSize; i++) {
+    if (secondary_[i].key == *name) {
+      Map* map = secondary_[i].map;
+      // Map can be NULL, if the stub is constant function call
+      // with a primitive receiver.
+      if (map == NULL) continue;
+
+      // Lookup in primary table and skip duplicates.
+      int primary_offset = PrimaryOffset(*name, flags, map);
+
+      // Lookup in secondary table and add matches.
+      int offset = SecondaryOffset(*name, flags, primary_offset);
+      if (entry(secondary_, offset) == &secondary_[i] &&
+          !TypeFeedbackOracle::CanRetainOtherContext(map, *native_context)) {
+        types->AddMapIfMissing(Handle<Map>(map), zone);
+      }
+    }
+  }
+}
+}
+}  // namespace v8::internal
diff --git a/src/ic/stub-cache.h b/src/ic/stub-cache.h
new file mode 100644
index 0000000..7aee6f1
--- /dev/null
+++ b/src/ic/stub-cache.h
@@ -0,0 +1,171 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_STUB_CACHE_H_
+#define V8_STUB_CACHE_H_
+
+#include "src/macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+
+// The stub cache is used for megamorphic property accesses.
+// It maps (map, name, type) to property access handlers. The cache does not
+// need explicit invalidation when a prototype chain is modified, since the
+// handlers verify the chain.
+
+
+class SCTableReference {
+ public:
+  Address address() const { return address_; }
+
+ private:
+  explicit SCTableReference(Address address) : address_(address) {}
+
+  Address address_;
+
+  friend class StubCache;
+};
+
+
+class StubCache {
+ public:
+  struct Entry {
+    Name* key;
+    Code* value;
+    Map* map;
+  };
+
+  void Initialize();
+  // Access cache for entry hash(name, map).
+  Code* Set(Name* name, Map* map, Code* code);
+  Code* Get(Name* name, Map* map, Code::Flags flags);
+  // Clear the lookup table (@ mark compact collection).
+  void Clear();
+  // Collect all maps that match the name and flags.
+  void CollectMatchingMaps(SmallMapList* types, Handle<Name> name,
+                           Code::Flags flags, Handle<Context> native_context,
+                           Zone* zone);
+  // Generate code for probing the stub cache table.
+  // Arguments extra, extra2 and extra3 may be used to pass additional scratch
+  // registers. Set to no_reg if not needed.
+  // If leave_frame is true, then exit a frame before the tail call.
+  void GenerateProbe(MacroAssembler* masm, Code::Flags flags, bool leave_frame,
+                     Register receiver, Register name, Register scratch,
+                     Register extra, Register extra2 = no_reg,
+                     Register extra3 = no_reg);
+
+  enum Table { kPrimary, kSecondary };
+
+  SCTableReference key_reference(StubCache::Table table) {
+    return SCTableReference(
+        reinterpret_cast<Address>(&first_entry(table)->key));
+  }
+
+  SCTableReference map_reference(StubCache::Table table) {
+    return SCTableReference(
+        reinterpret_cast<Address>(&first_entry(table)->map));
+  }
+
+  SCTableReference value_reference(StubCache::Table table) {
+    return SCTableReference(
+        reinterpret_cast<Address>(&first_entry(table)->value));
+  }
+
+  StubCache::Entry* first_entry(StubCache::Table table) {
+    switch (table) {
+      case StubCache::kPrimary:
+        return StubCache::primary_;
+      case StubCache::kSecondary:
+        return StubCache::secondary_;
+    }
+    UNREACHABLE();
+    return NULL;
+  }
+
+  Isolate* isolate() { return isolate_; }
+
+  // Setting the entry size such that the index is shifted by Name::kHashShift
+  // is convenient; shifting down the length field (to extract the hash code)
+  // automatically discards the hash bit field.
+  static const int kCacheIndexShift = Name::kHashShift;
+
+ private:
+  explicit StubCache(Isolate* isolate);
+
+  // The stub cache has a primary and secondary level.  The two levels have
+  // different hashing algorithms in order to avoid simultaneous collisions
+  // in both caches.  Unlike a probing strategy (quadratic or otherwise) the
+  // update strategy on updates is fairly clear and simple:  Any existing entry
+  // in the primary cache is moved to the secondary cache, and secondary cache
+  // entries are overwritten.
+
+  // Hash algorithm for the primary table.  This algorithm is replicated in
+  // assembler for every architecture.  Returns an index into the table that
+  // is scaled by 1 << kCacheIndexShift.
+  static int PrimaryOffset(Name* name, Code::Flags flags, Map* map) {
+    STATIC_ASSERT(kCacheIndexShift == Name::kHashShift);
+    // Compute the hash of the name (use entire hash field).
+    DCHECK(name->HasHashCode());
+    uint32_t field = name->hash_field();
+    // Using only the low bits in 64-bit mode is unlikely to increase the
+    // risk of collision even if the heap is spread over an area larger than
+    // 4Gb (and not at all if it isn't).
+    uint32_t map_low32bits =
+        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map));
+    // We always set the in_loop bit to zero when generating the lookup code
+    // so do it here too so the hash codes match.
+    uint32_t iflags =
+        (static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup);
+    // Base the offset on a simple combination of name, flags, and map.
+    uint32_t key = (map_low32bits + field) ^ iflags;
+    return key & ((kPrimaryTableSize - 1) << kCacheIndexShift);
+  }
+
+  // Hash algorithm for the secondary table.  This algorithm is replicated in
+  // assembler for every architecture.  Returns an index into the table that
+  // is scaled by 1 << kCacheIndexShift.
+  static int SecondaryOffset(Name* name, Code::Flags flags, int seed) {
+    // Use the seed from the primary cache in the secondary cache.
+    uint32_t name_low32bits =
+        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name));
+    // We always set the in_loop bit to zero when generating the lookup code
+    // so do it here too so the hash codes match.
+    uint32_t iflags =
+        (static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup);
+    uint32_t key = (seed - name_low32bits) + iflags;
+    return key & ((kSecondaryTableSize - 1) << kCacheIndexShift);
+  }
+
+  // Compute the entry for a given offset in exactly the same way as
+  // we do in generated code.  We generate an hash code that already
+  // ends in Name::kHashShift 0s.  Then we multiply it so it is a multiple
+  // of sizeof(Entry).  This makes it easier to avoid making mistakes
+  // in the hashed offset computations.
+  static Entry* entry(Entry* table, int offset) {
+    const int multiplier = sizeof(*table) >> Name::kHashShift;
+    return reinterpret_cast<Entry*>(reinterpret_cast<Address>(table) +
+                                    offset * multiplier);
+  }
+
+  static const int kPrimaryTableBits = 11;
+  static const int kPrimaryTableSize = (1 << kPrimaryTableBits);
+  static const int kSecondaryTableBits = 9;
+  static const int kSecondaryTableSize = (1 << kSecondaryTableBits);
+
+ private:
+  Entry primary_[kPrimaryTableSize];
+  Entry secondary_[kSecondaryTableSize];
+  Isolate* isolate_;
+
+  friend class Isolate;
+  friend class SCTableReference;
+
+  DISALLOW_COPY_AND_ASSIGN(StubCache);
+};
+}
+}  // namespace v8::internal
+
+#endif  // V8_STUB_CACHE_H_
diff --git a/src/ic/x64/access-compiler-x64.cc b/src/ic/x64/access-compiler-x64.cc
new file mode 100644
index 0000000..cd9196f
--- /dev/null
+++ b/src/ic/x64/access-compiler-x64.cc
@@ -0,0 +1,46 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_X64
+
+#include "src/ic/access-compiler.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
+                                              Handle<Code> code) {
+  __ jmp(code, RelocInfo::CODE_TARGET);
+}
+
+
+Register* PropertyAccessCompiler::load_calling_convention() {
+  // receiver, name, scratch1, scratch2, scratch3, scratch4.
+  Register receiver = LoadDescriptor::ReceiverRegister();
+  Register name = LoadDescriptor::NameRegister();
+  static Register registers[] = {receiver, name, rax, rbx, rdi, r8};
+  return registers;
+}
+
+
+Register* PropertyAccessCompiler::store_calling_convention() {
+  // receiver, name, scratch1, scratch2, scratch3.
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  Register name = StoreDescriptor::NameRegister();
+  DCHECK(rbx.is(ElementTransitionAndStoreDescriptor::MapRegister()));
+  static Register registers[] = {receiver, name, rbx, rdi, r8};
+  return registers;
+}
+
+
+#undef __
+}
+}  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_X64
diff --git a/src/ic/x64/handler-compiler-x64.cc b/src/ic/x64/handler-compiler-x64.cc
new file mode 100644
index 0000000..c4d6ecf
--- /dev/null
+++ b/src/ic/x64/handler-compiler-x64.cc
@@ -0,0 +1,835 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_X64
+
+#include "src/ic/call-optimization.h"
+#include "src/ic/handler-compiler.h"
+#include "src/ic/ic.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
+    MacroAssembler* masm, Label* miss_label, Register receiver,
+    Handle<Name> name, Register scratch0, Register scratch1) {
+  DCHECK(name->IsUniqueName());
+  DCHECK(!receiver.is(scratch0));
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(counters->negative_lookups(), 1);
+  __ IncrementCounter(counters->negative_lookups_miss(), 1);
+
+  __ movp(scratch0, FieldOperand(receiver, HeapObject::kMapOffset));
+
+  const int kInterceptorOrAccessCheckNeededMask =
+      (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
+
+  // Bail out if the receiver has a named interceptor or requires access checks.
+  __ testb(FieldOperand(scratch0, Map::kBitFieldOffset),
+           Immediate(kInterceptorOrAccessCheckNeededMask));
+  __ j(not_zero, miss_label);
+
+  // Check that receiver is a JSObject.
+  __ CmpInstanceType(scratch0, FIRST_SPEC_OBJECT_TYPE);
+  __ j(below, miss_label);
+
+  // Load properties array.
+  Register properties = scratch0;
+  __ movp(properties, FieldOperand(receiver, JSObject::kPropertiesOffset));
+
+  // Check that the properties array is a dictionary.
+  __ CompareRoot(FieldOperand(properties, HeapObject::kMapOffset),
+                 Heap::kHashTableMapRootIndex);
+  __ j(not_equal, miss_label);
+
+  Label done;
+  NameDictionaryLookupStub::GenerateNegativeLookup(masm, miss_label, &done,
+                                                   properties, name, scratch1);
+  __ bind(&done);
+  __ DecrementCounter(counters->negative_lookups_miss(), 1);
+}
+
+
+void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
+    MacroAssembler* masm, int index, Register prototype, Label* miss) {
+  Isolate* isolate = masm->isolate();
+  // Get the global function with the given index.
+  Handle<JSFunction> function(
+      JSFunction::cast(isolate->native_context()->get(index)));
+
+  // Check we're still in the same context.
+  Register scratch = prototype;
+  const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
+  __ movp(scratch, Operand(rsi, offset));
+  __ movp(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
+  __ Cmp(Operand(scratch, Context::SlotOffset(index)), function);
+  __ j(not_equal, miss);
+
+  // Load its initial map. The global functions all have initial maps.
+  __ Move(prototype, Handle<Map>(function->initial_map()));
+  // Load the prototype from the initial map.
+  __ movp(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
+    MacroAssembler* masm, Register receiver, Register result, Register scratch,
+    Label* miss_label) {
+  __ TryGetFunctionPrototype(receiver, result, miss_label);
+  if (!result.is(rax)) __ movp(rax, result);
+  __ ret(0);
+}
+
+
+static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
+                                     Register holder, Register name,
+                                     Handle<JSObject> holder_obj) {
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsInfoIndex == 1);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 2);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 3);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 4);
+  __ Push(name);
+  Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
+  DCHECK(!masm->isolate()->heap()->InNewSpace(*interceptor));
+  __ Move(kScratchRegister, interceptor);
+  __ Push(kScratchRegister);
+  __ Push(receiver);
+  __ Push(holder);
+}
+
+
+static void CompileCallLoadPropertyWithInterceptor(
+    MacroAssembler* masm, Register receiver, Register holder, Register name,
+    Handle<JSObject> holder_obj, IC::UtilityId id) {
+  PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
+  __ CallExternalReference(ExternalReference(IC_Utility(id), masm->isolate()),
+                           NamedLoadHandlerCompiler::kInterceptorArgsLength);
+}
+
+
+// Generate call to api function.
+void PropertyHandlerCompiler::GenerateFastApiCall(
+    MacroAssembler* masm, const CallOptimization& optimization,
+    Handle<Map> receiver_map, Register receiver, Register scratch_in,
+    bool is_store, int argc, Register* values) {
+  DCHECK(optimization.is_simple_api_call());
+
+  __ PopReturnAddressTo(scratch_in);
+  // receiver
+  __ Push(receiver);
+  // Write the arguments to stack frame.
+  for (int i = 0; i < argc; i++) {
+    Register arg = values[argc - 1 - i];
+    DCHECK(!receiver.is(arg));
+    DCHECK(!scratch_in.is(arg));
+    __ Push(arg);
+  }
+  __ PushReturnAddressFrom(scratch_in);
+  // Stack now matches JSFunction abi.
+
+  // Abi for CallApiFunctionStub.
+  Register callee = rax;
+  Register call_data = rbx;
+  Register holder = rcx;
+  Register api_function_address = rdx;
+  Register scratch = rdi;  // scratch_in is no longer valid.
+
+  // Put holder in place.
+  CallOptimization::HolderLookup holder_lookup;
+  Handle<JSObject> api_holder =
+      optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
+  switch (holder_lookup) {
+    case CallOptimization::kHolderIsReceiver:
+      __ Move(holder, receiver);
+      break;
+    case CallOptimization::kHolderFound:
+      __ Move(holder, api_holder);
+      break;
+    case CallOptimization::kHolderNotFound:
+      UNREACHABLE();
+      break;
+  }
+
+  Isolate* isolate = masm->isolate();
+  Handle<JSFunction> function = optimization.constant_function();
+  Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
+  Handle<Object> call_data_obj(api_call_info->data(), isolate);
+
+  // Put callee in place.
+  __ Move(callee, function);
+
+  bool call_data_undefined = false;
+  // Put call_data in place.
+  if (isolate->heap()->InNewSpace(*call_data_obj)) {
+    __ Move(scratch, api_call_info);
+    __ movp(call_data, FieldOperand(scratch, CallHandlerInfo::kDataOffset));
+  } else if (call_data_obj->IsUndefined()) {
+    call_data_undefined = true;
+    __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
+  } else {
+    __ Move(call_data, call_data_obj);
+  }
+
+  // Put api_function_address in place.
+  Address function_address = v8::ToCData<Address>(api_call_info->callback());
+  __ Move(api_function_address, function_address,
+          RelocInfo::EXTERNAL_REFERENCE);
+
+  // Jump to stub.
+  CallApiFunctionStub stub(isolate, is_store, call_data_undefined, argc);
+  __ TailCallStub(&stub);
+}
+
+
+void PropertyHandlerCompiler::GenerateCheckPropertyCell(
+    MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
+    Register scratch, Label* miss) {
+  Handle<PropertyCell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
+  DCHECK(cell->value()->IsTheHole());
+  __ Move(scratch, cell);
+  __ Cmp(FieldOperand(scratch, Cell::kValueOffset),
+         masm->isolate()->factory()->the_hole_value());
+  __ j(not_equal, miss);
+}
+
+
+void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
+    MacroAssembler* masm, Handle<HeapType> type, Register receiver,
+    Handle<JSFunction> setter) {
+  // ----------- S t a t e -------------
+  //  -- rsp[0] : return address
+  // -----------------------------------
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+
+    // Save value register, so we can restore it later.
+    __ Push(value());
+
+    if (!setter.is_null()) {
+      // Call the JavaScript setter with receiver and value on the stack.
+      if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+        // Swap in the global receiver.
+        __ movp(receiver,
+                FieldOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+      }
+      __ Push(receiver);
+      __ Push(value());
+      ParameterCount actual(1);
+      ParameterCount expected(setter);
+      __ InvokeFunction(setter, expected, actual, CALL_FUNCTION,
+                        NullCallWrapper());
+    } else {
+      // If we generate a global code snippet for deoptimization only, remember
+      // the place to continue after deoptimization.
+      masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
+    }
+
+    // We have to return the passed value, not the return value of the setter.
+    __ Pop(rax);
+
+    // Restore context register.
+    __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+  }
+  __ ret(0);
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
+    MacroAssembler* masm, Handle<HeapType> type, Register receiver,
+    Handle<JSFunction> getter) {
+  // ----------- S t a t e -------------
+  //  -- rax    : receiver
+  //  -- rcx    : name
+  //  -- rsp[0] : return address
+  // -----------------------------------
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+
+    if (!getter.is_null()) {
+      // Call the JavaScript getter with the receiver on the stack.
+      if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+        // Swap in the global receiver.
+        __ movp(receiver,
+                FieldOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+      }
+      __ Push(receiver);
+      ParameterCount actual(0);
+      ParameterCount expected(getter);
+      __ InvokeFunction(getter, expected, actual, CALL_FUNCTION,
+                        NullCallWrapper());
+    } else {
+      // If we generate a global code snippet for deoptimization only, remember
+      // the place to continue after deoptimization.
+      masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
+    }
+
+    // Restore context register.
+    __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
+  }
+  __ ret(0);
+}
+
+
+static void StoreIC_PushArgs(MacroAssembler* masm) {
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  Register name = StoreDescriptor::NameRegister();
+  Register value = StoreDescriptor::ValueRegister();
+
+  DCHECK(!rbx.is(receiver) && !rbx.is(name) && !rbx.is(value));
+
+  __ PopReturnAddressTo(rbx);
+  __ Push(receiver);
+  __ Push(name);
+  __ Push(value);
+  __ PushReturnAddressFrom(rbx);
+}
+
+
+void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
+  // Return address is on the stack.
+  StoreIC_PushArgs(masm);
+
+  // Do tail-call to runtime routine.
+  ExternalReference ref(IC_Utility(IC::kStoreIC_Slow), masm->isolate());
+  __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
+  // Return address is on the stack.
+  StoreIC_PushArgs(masm);
+
+  // Do tail-call to runtime routine.
+  ExternalReference ref(IC_Utility(IC::kKeyedStoreIC_Slow), masm->isolate());
+  __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+#undef __
+#define __ ACCESS_MASM((masm()))
+
+
+void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
+                                                    Handle<Name> name) {
+  if (!label->is_unused()) {
+    __ bind(label);
+    __ Move(this->name(), name);
+  }
+}
+
+
+// Receiver_reg is preserved on jumps to miss_label, but may be destroyed if
+// store is successful.
+void NamedStoreHandlerCompiler::GenerateStoreTransition(
+    Handle<Map> transition, Handle<Name> name, Register receiver_reg,
+    Register storage_reg, Register value_reg, Register scratch1,
+    Register scratch2, Register unused, Label* miss_label, Label* slow) {
+  int descriptor = transition->LastAdded();
+  DescriptorArray* descriptors = transition->instance_descriptors();
+  PropertyDetails details = descriptors->GetDetails(descriptor);
+  Representation representation = details.representation();
+  DCHECK(!representation.IsNone());
+
+  if (details.type() == CONSTANT) {
+    Handle<Object> constant(descriptors->GetValue(descriptor), isolate());
+    __ Cmp(value_reg, constant);
+    __ j(not_equal, miss_label);
+  } else if (representation.IsSmi()) {
+    __ JumpIfNotSmi(value_reg, miss_label);
+  } else if (representation.IsHeapObject()) {
+    __ JumpIfSmi(value_reg, miss_label);
+    HeapType* field_type = descriptors->GetFieldType(descriptor);
+    HeapType::Iterator<Map> it = field_type->Classes();
+    if (!it.Done()) {
+      Label do_store;
+      while (true) {
+        __ CompareMap(value_reg, it.Current());
+        it.Advance();
+        if (it.Done()) {
+          __ j(not_equal, miss_label);
+          break;
+        }
+        __ j(equal, &do_store, Label::kNear);
+      }
+      __ bind(&do_store);
+    }
+  } else if (representation.IsDouble()) {
+    Label do_store, heap_number;
+    __ AllocateHeapNumber(storage_reg, scratch1, slow, MUTABLE);
+
+    __ JumpIfNotSmi(value_reg, &heap_number);
+    __ SmiToInteger32(scratch1, value_reg);
+    __ Cvtlsi2sd(xmm0, scratch1);
+    __ jmp(&do_store);
+
+    __ bind(&heap_number);
+    __ CheckMap(value_reg, isolate()->factory()->heap_number_map(), miss_label,
+                DONT_DO_SMI_CHECK);
+    __ movsd(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
+
+    __ bind(&do_store);
+    __ movsd(FieldOperand(storage_reg, HeapNumber::kValueOffset), xmm0);
+  }
+
+  // Stub never generated for objects that require access checks.
+  DCHECK(!transition->is_access_check_needed());
+
+  // Perform map transition for the receiver if necessary.
+  if (details.type() == FIELD &&
+      Map::cast(transition->GetBackPointer())->unused_property_fields() == 0) {
+    // The properties must be extended before we can store the value.
+    // We jump to a runtime call that extends the properties array.
+    __ PopReturnAddressTo(scratch1);
+    __ Push(receiver_reg);
+    __ Push(transition);
+    __ Push(value_reg);
+    __ PushReturnAddressFrom(scratch1);
+    __ TailCallExternalReference(
+        ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
+                          isolate()),
+        3, 1);
+    return;
+  }
+
+  // Update the map of the object.
+  __ Move(scratch1, transition);
+  __ movp(FieldOperand(receiver_reg, HeapObject::kMapOffset), scratch1);
+
+  // Update the write barrier for the map field.
+  __ RecordWriteField(receiver_reg, HeapObject::kMapOffset, scratch1, scratch2,
+                      kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+  if (details.type() == CONSTANT) {
+    DCHECK(value_reg.is(rax));
+    __ ret(0);
+    return;
+  }
+
+  int index = transition->instance_descriptors()->GetFieldIndex(
+      transition->LastAdded());
+
+  // Adjust for the number of properties stored in the object. Even in the
+  // face of a transition we can use the old map here because the size of the
+  // object and the number of in-object properties is not going to change.
+  index -= transition->inobject_properties();
+
+  // TODO(verwaest): Share this code as a code stub.
+  SmiCheck smi_check =
+      representation.IsTagged() ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
+  if (index < 0) {
+    // Set the property straight into the object.
+    int offset = transition->instance_size() + (index * kPointerSize);
+    if (representation.IsDouble()) {
+      __ movp(FieldOperand(receiver_reg, offset), storage_reg);
+    } else {
+      __ movp(FieldOperand(receiver_reg, offset), value_reg);
+    }
+
+    if (!representation.IsSmi()) {
+      // Update the write barrier for the array address.
+      if (!representation.IsDouble()) {
+        __ movp(storage_reg, value_reg);
+      }
+      __ RecordWriteField(receiver_reg, offset, storage_reg, scratch1,
+                          kDontSaveFPRegs, EMIT_REMEMBERED_SET, smi_check);
+    }
+  } else {
+    // Write to the properties array.
+    int offset = index * kPointerSize + FixedArray::kHeaderSize;
+    // Get the properties array (optimistically).
+    __ movp(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
+    if (representation.IsDouble()) {
+      __ movp(FieldOperand(scratch1, offset), storage_reg);
+    } else {
+      __ movp(FieldOperand(scratch1, offset), value_reg);
+    }
+
+    if (!representation.IsSmi()) {
+      // Update the write barrier for the array address.
+      if (!representation.IsDouble()) {
+        __ movp(storage_reg, value_reg);
+      }
+      __ RecordWriteField(scratch1, offset, storage_reg, receiver_reg,
+                          kDontSaveFPRegs, EMIT_REMEMBERED_SET, smi_check);
+    }
+  }
+
+  // Return the value (register rax).
+  DCHECK(value_reg.is(rax));
+  __ ret(0);
+}
+
+
+void NamedStoreHandlerCompiler::GenerateStoreField(LookupIterator* lookup,
+                                                   Register value_reg,
+                                                   Label* miss_label) {
+  DCHECK(lookup->representation().IsHeapObject());
+  __ JumpIfSmi(value_reg, miss_label);
+  HeapType::Iterator<Map> it = lookup->GetFieldType()->Classes();
+  Label do_store;
+  while (true) {
+    __ CompareMap(value_reg, it.Current());
+    it.Advance();
+    if (it.Done()) {
+      __ j(not_equal, miss_label);
+      break;
+    }
+    __ j(equal, &do_store, Label::kNear);
+  }
+  __ bind(&do_store);
+
+  StoreFieldStub stub(isolate(), lookup->GetFieldIndex(),
+                      lookup->representation());
+  GenerateTailCall(masm(), stub.GetCode());
+}
+
+
+Register PropertyHandlerCompiler::CheckPrototypes(
+    Register object_reg, Register holder_reg, Register scratch1,
+    Register scratch2, Handle<Name> name, Label* miss,
+    PrototypeCheckType check) {
+  Handle<Map> receiver_map(IC::TypeToMap(*type(), isolate()));
+
+  // Make sure there's no overlap between holder and object registers.
+  DCHECK(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
+  DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg) &&
+         !scratch2.is(scratch1));
+
+  // Keep track of the current object in register reg.  On the first
+  // iteration, reg is an alias for object_reg, on later iterations,
+  // it is an alias for holder_reg.
+  Register reg = object_reg;
+  int depth = 0;
+
+  Handle<JSObject> current = Handle<JSObject>::null();
+  if (type()->IsConstant()) {
+    current = Handle<JSObject>::cast(type()->AsConstant()->Value());
+  }
+  Handle<JSObject> prototype = Handle<JSObject>::null();
+  Handle<Map> current_map = receiver_map;
+  Handle<Map> holder_map(holder()->map());
+  // Traverse the prototype chain and check the maps in the prototype chain for
+  // fast and global objects or do negative lookup for normal objects.
+  while (!current_map.is_identical_to(holder_map)) {
+    ++depth;
+
+    // Only global objects and objects that do not require access
+    // checks are allowed in stubs.
+    DCHECK(current_map->IsJSGlobalProxyMap() ||
+           !current_map->is_access_check_needed());
+
+    prototype = handle(JSObject::cast(current_map->prototype()));
+    if (current_map->is_dictionary_map() &&
+        !current_map->IsJSGlobalObjectMap()) {
+      DCHECK(!current_map->IsJSGlobalProxyMap());  // Proxy maps are fast.
+      if (!name->IsUniqueName()) {
+        DCHECK(name->IsString());
+        name = factory()->InternalizeString(Handle<String>::cast(name));
+      }
+      DCHECK(current.is_null() ||
+             current->property_dictionary()->FindEntry(name) ==
+                 NameDictionary::kNotFound);
+
+      GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
+                                       scratch2);
+
+      __ movp(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
+      reg = holder_reg;  // From now on the object will be in holder_reg.
+      __ movp(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
+    } else {
+      bool in_new_space = heap()->InNewSpace(*prototype);
+      // Two possible reasons for loading the prototype from the map:
+      // (1) Can't store references to new space in code.
+      // (2) Handler is shared for all receivers with the same prototype
+      //     map (but not necessarily the same prototype instance).
+      bool load_prototype_from_map = in_new_space || depth == 1;
+      if (load_prototype_from_map) {
+        // Save the map in scratch1 for later.
+        __ movp(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
+      }
+      if (depth != 1 || check == CHECK_ALL_MAPS) {
+        __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK);
+      }
+
+      // Check access rights to the global object.  This has to happen after
+      // the map check so that we know that the object is actually a global
+      // object.
+      // This allows us to install generated handlers for accesses to the
+      // global proxy (as opposed to using slow ICs). See corresponding code
+      // in LookupForRead().
+      if (current_map->IsJSGlobalProxyMap()) {
+        __ CheckAccessGlobalProxy(reg, scratch2, miss);
+      } else if (current_map->IsJSGlobalObjectMap()) {
+        GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
+                                  name, scratch2, miss);
+      }
+      reg = holder_reg;  // From now on the object will be in holder_reg.
+
+      if (load_prototype_from_map) {
+        __ movp(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
+      } else {
+        __ Move(reg, prototype);
+      }
+    }
+
+    // Go to the next object in the prototype chain.
+    current = prototype;
+    current_map = handle(current->map());
+  }
+
+  // Log the check depth.
+  LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
+
+  if (depth != 0 || check == CHECK_ALL_MAPS) {
+    // Check the holder map.
+    __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK);
+  }
+
+  // Perform security check for access to the global object.
+  DCHECK(current_map->IsJSGlobalProxyMap() ||
+         !current_map->is_access_check_needed());
+  if (current_map->IsJSGlobalProxyMap()) {
+    __ CheckAccessGlobalProxy(reg, scratch1, miss);
+  }
+
+  // Return the register containing the holder.
+  return reg;
+}
+
+
+void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
+  if (!miss->is_unused()) {
+    Label success;
+    __ jmp(&success);
+    __ bind(miss);
+    TailCallBuiltin(masm(), MissBuiltin(kind()));
+    __ bind(&success);
+  }
+}
+
+
+void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
+  if (!miss->is_unused()) {
+    Label success;
+    __ jmp(&success);
+    GenerateRestoreName(miss, name);
+    TailCallBuiltin(masm(), MissBuiltin(kind()));
+    __ bind(&success);
+  }
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadCallback(
+    Register reg, Handle<ExecutableAccessorInfo> callback) {
+  // Insert additional parameters into the stack frame above return address.
+  DCHECK(!scratch4().is(reg));
+  __ PopReturnAddressTo(scratch4());
+
+  STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
+  STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
+  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
+  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
+  STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
+  STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
+  STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6);
+  __ Push(receiver());  // receiver
+  if (heap()->InNewSpace(callback->data())) {
+    DCHECK(!scratch2().is(reg));
+    __ Move(scratch2(), callback);
+    __ Push(FieldOperand(scratch2(),
+                         ExecutableAccessorInfo::kDataOffset));  // data
+  } else {
+    __ Push(Handle<Object>(callback->data(), isolate()));
+  }
+  DCHECK(!kScratchRegister.is(reg));
+  __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
+  __ Push(kScratchRegister);  // return value
+  __ Push(kScratchRegister);  // return value default
+  __ PushAddress(ExternalReference::isolate_address(isolate()));
+  __ Push(reg);     // holder
+  __ Push(name());  // name
+  // Save a pointer to where we pushed the arguments pointer.  This will be
+  // passed as the const PropertyAccessorInfo& to the C++ callback.
+
+  __ PushReturnAddressFrom(scratch4());
+
+  // Abi for CallApiGetter
+  Register api_function_address = ApiGetterDescriptor::function_address();
+  Address getter_address = v8::ToCData<Address>(callback->getter());
+  __ Move(api_function_address, getter_address, RelocInfo::EXTERNAL_REFERENCE);
+
+  CallApiGetterStub stub(isolate());
+  __ TailCallStub(&stub);
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
+  // Return the constant value.
+  __ Move(rax, value);
+  __ ret(0);
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
+    LookupIterator* it, Register holder_reg) {
+  DCHECK(holder()->HasNamedInterceptor());
+  DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+
+  // Compile the interceptor call, followed by inline code to load the
+  // property from further up the prototype chain if the call fails.
+  // Check that the maps haven't changed.
+  DCHECK(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
+
+  // Preserve the receiver register explicitly whenever it is different from the
+  // holder and it is needed should the interceptor return without any result.
+  // The ACCESSOR case needs the receiver to be passed into C++ code, the FIELD
+  // case might cause a miss during the prototype check.
+  bool must_perform_prototype_check =
+      !holder().is_identical_to(it->GetHolder<JSObject>());
+  bool must_preserve_receiver_reg =
+      !receiver().is(holder_reg) &&
+      (it->state() == LookupIterator::ACCESSOR || must_perform_prototype_check);
+
+  // Save necessary data before invoking an interceptor.
+  // Requires a frame to make GC aware of pushed pointers.
+  {
+    FrameScope frame_scope(masm(), StackFrame::INTERNAL);
+
+    if (must_preserve_receiver_reg) {
+      __ Push(receiver());
+    }
+    __ Push(holder_reg);
+    __ Push(this->name());
+
+    // Invoke an interceptor.  Note: map checks from receiver to
+    // interceptor's holder has been compiled before (see a caller
+    // of this method.)
+    CompileCallLoadPropertyWithInterceptor(
+        masm(), receiver(), holder_reg, this->name(), holder(),
+        IC::kLoadPropertyWithInterceptorOnly);
+
+    // Check if interceptor provided a value for property.  If it's
+    // the case, return immediately.
+    Label interceptor_failed;
+    __ CompareRoot(rax, Heap::kNoInterceptorResultSentinelRootIndex);
+    __ j(equal, &interceptor_failed);
+    frame_scope.GenerateLeaveFrame();
+    __ ret(0);
+
+    __ bind(&interceptor_failed);
+    __ Pop(this->name());
+    __ Pop(holder_reg);
+    if (must_preserve_receiver_reg) {
+      __ Pop(receiver());
+    }
+
+    // Leave the internal frame.
+  }
+
+  GenerateLoadPostInterceptor(it, holder_reg);
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
+  // Call the runtime system to load the interceptor.
+  DCHECK(holder()->HasNamedInterceptor());
+  DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+  __ PopReturnAddressTo(scratch2());
+  PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
+                           holder());
+  __ PushReturnAddressFrom(scratch2());
+
+  ExternalReference ref = ExternalReference(
+      IC_Utility(IC::kLoadPropertyWithInterceptor), isolate());
+  __ TailCallExternalReference(
+      ref, NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
+}
+
+
+Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
+    Handle<JSObject> object, Handle<Name> name,
+    Handle<ExecutableAccessorInfo> callback) {
+  Register holder_reg = Frontend(receiver(), name);
+
+  __ PopReturnAddressTo(scratch1());
+  __ Push(receiver());
+  __ Push(holder_reg);
+  __ Push(callback);  // callback info
+  __ Push(name);
+  __ Push(value());
+  __ PushReturnAddressFrom(scratch1());
+
+  // Do tail-call to the runtime system.
+  ExternalReference store_callback_property =
+      ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
+  __ TailCallExternalReference(store_callback_property, 5, 1);
+
+  // Return the generated code.
+  return GetCode(kind(), Code::FAST, name);
+}
+
+
+Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
+    Handle<Name> name) {
+  __ PopReturnAddressTo(scratch1());
+  __ Push(receiver());
+  __ Push(this->name());
+  __ Push(value());
+  __ PushReturnAddressFrom(scratch1());
+
+  // Do tail-call to the runtime system.
+  ExternalReference store_ic_property = ExternalReference(
+      IC_Utility(IC::kStorePropertyWithInterceptor), isolate());
+  __ TailCallExternalReference(store_ic_property, 3, 1);
+
+  // Return the generated code.
+  return GetCode(kind(), Code::FAST, name);
+}
+
+
+Register NamedStoreHandlerCompiler::value() {
+  return StoreDescriptor::ValueRegister();
+}
+
+
+Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
+    Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) {
+  Label miss;
+  FrontendHeader(receiver(), name, &miss);
+
+  // Get the value from the cell.
+  Register result = StoreDescriptor::ValueRegister();
+  __ Move(result, cell);
+  __ movp(result, FieldOperand(result, PropertyCell::kValueOffset));
+
+  // Check for deleted property if property can actually be deleted.
+  if (is_configurable) {
+    __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
+    __ j(equal, &miss);
+  } else if (FLAG_debug_code) {
+    __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
+    __ Check(not_equal, kDontDeleteCellsCannotContainTheHole);
+  }
+
+  Counters* counters = isolate()->counters();
+  __ IncrementCounter(counters->named_load_global_stub(), 1);
+  __ ret(0);
+
+  FrontendFooter(name, &miss);
+
+  // Return the generated code.
+  return GetCode(kind(), Code::NORMAL, name);
+}
+
+
+#undef __
+}
+}  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_X64
diff --git a/src/ic/x64/ic-compiler-x64.cc b/src/ic/x64/ic-compiler-x64.cc
new file mode 100644
index 0000000..a5848b6
--- /dev/null
+++ b/src/ic/x64/ic-compiler-x64.cc
@@ -0,0 +1,137 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_X64
+
+#include "src/ic/ic.h"
+#include "src/ic/ic-compiler.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+void PropertyICCompiler::GenerateRuntimeSetProperty(MacroAssembler* masm,
+                                                    StrictMode strict_mode) {
+  // Return address is on the stack.
+  DCHECK(!rbx.is(StoreDescriptor::ReceiverRegister()) &&
+         !rbx.is(StoreDescriptor::NameRegister()) &&
+         !rbx.is(StoreDescriptor::ValueRegister()));
+
+  __ PopReturnAddressTo(rbx);
+  __ Push(StoreDescriptor::ReceiverRegister());
+  __ Push(StoreDescriptor::NameRegister());
+  __ Push(StoreDescriptor::ValueRegister());
+  __ Push(Smi::FromInt(strict_mode));
+  __ PushReturnAddressFrom(rbx);
+
+  // Do tail-call to runtime routine.
+  __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
+    MapHandleList* receiver_maps, CodeHandleList* handler_stubs,
+    MapHandleList* transitioned_maps) {
+  Label miss;
+  __ JumpIfSmi(receiver(), &miss, Label::kNear);
+
+  __ movp(scratch1(), FieldOperand(receiver(), HeapObject::kMapOffset));
+  int receiver_count = receiver_maps->length();
+  for (int i = 0; i < receiver_count; ++i) {
+    // Check map and tail call if there's a match
+    __ Cmp(scratch1(), receiver_maps->at(i));
+    if (transitioned_maps->at(i).is_null()) {
+      __ j(equal, handler_stubs->at(i), RelocInfo::CODE_TARGET);
+    } else {
+      Label next_map;
+      __ j(not_equal, &next_map, Label::kNear);
+      __ Move(transition_map(), transitioned_maps->at(i),
+              RelocInfo::EMBEDDED_OBJECT);
+      __ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET);
+      __ bind(&next_map);
+    }
+  }
+
+  __ bind(&miss);
+
+  TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+  // Return the generated code.
+  return GetCode(kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
+}
+
+
+Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
+                                                    CodeHandleList* handlers,
+                                                    Handle<Name> name,
+                                                    Code::StubType type,
+                                                    IcCheckType check) {
+  Label miss;
+
+  if (check == PROPERTY &&
+      (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
+    // In case we are compiling an IC for dictionary loads and stores, just
+    // check whether the name is unique.
+    if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
+      Register tmp = scratch1();
+      __ JumpIfSmi(this->name(), &miss);
+      __ movp(tmp, FieldOperand(this->name(), HeapObject::kMapOffset));
+      __ movzxbp(tmp, FieldOperand(tmp, Map::kInstanceTypeOffset));
+      __ JumpIfNotUniqueNameInstanceType(tmp, &miss);
+    } else {
+      __ Cmp(this->name(), name);
+      __ j(not_equal, &miss);
+    }
+  }
+
+  Label number_case;
+  Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
+  __ JumpIfSmi(receiver(), smi_target);
+
+  // Polymorphic keyed stores may use the map register
+  Register map_reg = scratch1();
+  DCHECK(kind() != Code::KEYED_STORE_IC ||
+         map_reg.is(ElementTransitionAndStoreDescriptor::MapRegister()));
+  __ movp(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
+  int receiver_count = types->length();
+  int number_of_handled_maps = 0;
+  for (int current = 0; current < receiver_count; ++current) {
+    Handle<HeapType> type = types->at(current);
+    Handle<Map> map = IC::TypeToMap(*type, isolate());
+    if (!map->is_deprecated()) {
+      number_of_handled_maps++;
+      // Check map and tail call if there's a match
+      __ Cmp(map_reg, map);
+      if (type->Is(HeapType::Number())) {
+        DCHECK(!number_case.is_unused());
+        __ bind(&number_case);
+      }
+      __ j(equal, handlers->at(current), RelocInfo::CODE_TARGET);
+    }
+  }
+  DCHECK(number_of_handled_maps > 0);
+
+  __ bind(&miss);
+  TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+  // Return the generated code.
+  InlineCacheState state =
+      number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
+  return GetCode(kind(), type, name, state);
+}
+
+
+#undef __
+}
+}  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_X64
diff --git a/src/ic/x64/ic-x64.cc b/src/ic/x64/ic-x64.cc
new file mode 100644
index 0000000..ad79f30
--- /dev/null
+++ b/src/ic/x64/ic-x64.cc
@@ -0,0 +1,990 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_X64
+
+#include "src/codegen.h"
+#include "src/ic/ic.h"
+#include "src/ic/ic-compiler.h"
+#include "src/ic/stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+// ----------------------------------------------------------------------------
+// Static IC stub generators.
+//
+
+#define __ ACCESS_MASM(masm)
+
+
+static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
+                                            Label* global_object) {
+  // Register usage:
+  //   type: holds the receiver instance type on entry.
+  __ cmpb(type, Immediate(JS_GLOBAL_OBJECT_TYPE));
+  __ j(equal, global_object);
+  __ cmpb(type, Immediate(JS_BUILTINS_OBJECT_TYPE));
+  __ j(equal, global_object);
+  __ cmpb(type, Immediate(JS_GLOBAL_PROXY_TYPE));
+  __ j(equal, global_object);
+}
+
+
+// Helper function used to load a property from a dictionary backing storage.
+// This function may return false negatives, so miss_label
+// must always call a backup property load that is complete.
+// This function is safe to call if name is not an internalized string,
+// and will jump to the miss_label in that case.
+// The generated code assumes that the receiver has slow properties,
+// is not a global object and does not have interceptors.
+static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
+                                   Register elements, Register name,
+                                   Register r0, Register r1, Register result) {
+  // Register use:
+  //
+  // elements - holds the property dictionary on entry and is unchanged.
+  //
+  // name - holds the name of the property on entry and is unchanged.
+  //
+  // r0   - used to hold the capacity of the property dictionary.
+  //
+  // r1   - used to hold the index into the property dictionary.
+  //
+  // result - holds the result on exit if the load succeeded.
+
+  Label done;
+
+  // Probe the dictionary.
+  NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss_label, &done,
+                                                   elements, name, r0, r1);
+
+  // If probing finds an entry in the dictionary, r1 contains the
+  // index into the dictionary. Check that the value is a normal
+  // property.
+  __ bind(&done);
+  const int kElementsStartOffset =
+      NameDictionary::kHeaderSize +
+      NameDictionary::kElementsStartIndex * kPointerSize;
+  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+  __ Test(Operand(elements, r1, times_pointer_size,
+                  kDetailsOffset - kHeapObjectTag),
+          Smi::FromInt(PropertyDetails::TypeField::kMask));
+  __ j(not_zero, miss_label);
+
+  // Get the value at the masked, scaled index.
+  const int kValueOffset = kElementsStartOffset + kPointerSize;
+  __ movp(result, Operand(elements, r1, times_pointer_size,
+                          kValueOffset - kHeapObjectTag));
+}
+
+
+// Helper function used to store a property to a dictionary backing
+// storage. This function may fail to store a property even though it
+// is in the dictionary, so code at miss_label must always call a
+// backup property store that is complete. This function is safe to
+// call if name is not an internalized string, and will jump to the miss_label
+// in that case. The generated code assumes that the receiver has slow
+// properties, is not a global object and does not have interceptors.
+static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss_label,
+                                    Register elements, Register name,
+                                    Register value, Register scratch0,
+                                    Register scratch1) {
+  // Register use:
+  //
+  // elements - holds the property dictionary on entry and is clobbered.
+  //
+  // name - holds the name of the property on entry and is unchanged.
+  //
+  // value - holds the value to store and is unchanged.
+  //
+  // scratch0 - used during the positive dictionary lookup and is clobbered.
+  //
+  // scratch1 - used for index into the property dictionary and is clobbered.
+  Label done;
+
+  // Probe the dictionary.
+  NameDictionaryLookupStub::GeneratePositiveLookup(
+      masm, miss_label, &done, elements, name, scratch0, scratch1);
+
+  // If probing finds an entry in the dictionary, scratch0 contains the
+  // index into the dictionary. Check that the value is a normal
+  // property that is not read only.
+  __ bind(&done);
+  const int kElementsStartOffset =
+      NameDictionary::kHeaderSize +
+      NameDictionary::kElementsStartIndex * kPointerSize;
+  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+  const int kTypeAndReadOnlyMask =
+      (PropertyDetails::TypeField::kMask |
+       PropertyDetails::AttributesField::encode(READ_ONLY))
+      << kSmiTagSize;
+  __ Test(Operand(elements, scratch1, times_pointer_size,
+                  kDetailsOffset - kHeapObjectTag),
+          Smi::FromInt(kTypeAndReadOnlyMask));
+  __ j(not_zero, miss_label);
+
+  // Store the value at the masked, scaled index.
+  const int kValueOffset = kElementsStartOffset + kPointerSize;
+  __ leap(scratch1, Operand(elements, scratch1, times_pointer_size,
+                            kValueOffset - kHeapObjectTag));
+  __ movp(Operand(scratch1, 0), value);
+
+  // Update write barrier. Make sure not to clobber the value.
+  __ movp(scratch0, value);
+  __ RecordWrite(elements, scratch1, scratch0, kDontSaveFPRegs);
+}
+
+
+// Checks the receiver for special cases (value type, slow case bits).
+// Falls through for regular JS object.
+static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
+                                           Register receiver, Register map,
+                                           int interceptor_bit, Label* slow) {
+  // Register use:
+  //   receiver - holds the receiver and is unchanged.
+  // Scratch registers:
+  //   map - used to hold the map of the receiver.
+
+  // Check that the object isn't a smi.
+  __ JumpIfSmi(receiver, slow);
+
+  // Check that the object is some kind of JS object EXCEPT JS Value type.
+  // In the case that the object is a value-wrapper object,
+  // we enter the runtime system to make sure that indexing
+  // into string objects work as intended.
+  DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
+  __ CmpObjectType(receiver, JS_OBJECT_TYPE, map);
+  __ j(below, slow);
+
+  // Check bit field.
+  __ testb(
+      FieldOperand(map, Map::kBitFieldOffset),
+      Immediate((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
+  __ j(not_zero, slow);
+}
+
+
+// Loads an indexed element from a fast case array.
+// If not_fast_array is NULL, doesn't perform the elements map check.
+static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
+                                  Register key, Register elements,
+                                  Register scratch, Register result,
+                                  Label* not_fast_array, Label* out_of_range) {
+  // Register use:
+  //
+  // receiver - holds the receiver on entry.
+  //            Unchanged unless 'result' is the same register.
+  //
+  // key      - holds the smi key on entry.
+  //            Unchanged unless 'result' is the same register.
+  //
+  // elements - holds the elements of the receiver on exit.
+  //
+  // result   - holds the result on exit if the load succeeded.
+  //            Allowed to be the the same as 'receiver' or 'key'.
+  //            Unchanged on bailout so 'receiver' and 'key' can be safely
+  //            used by further computation.
+  //
+  // Scratch registers:
+  //
+  //   scratch - used to hold elements of the receiver and the loaded value.
+
+  __ movp(elements, FieldOperand(receiver, JSObject::kElementsOffset));
+  if (not_fast_array != NULL) {
+    // Check that the object is in fast mode and writable.
+    __ CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
+                   Heap::kFixedArrayMapRootIndex);
+    __ j(not_equal, not_fast_array);
+  } else {
+    __ AssertFastElements(elements);
+  }
+  // Check that the key (index) is within bounds.
+  __ SmiCompare(key, FieldOperand(elements, FixedArray::kLengthOffset));
+  // Unsigned comparison rejects negative indices.
+  __ j(above_equal, out_of_range);
+  // Fast case: Do the load.
+  SmiIndex index = masm->SmiToIndex(scratch, key, kPointerSizeLog2);
+  __ movp(scratch, FieldOperand(elements, index.reg, index.scale,
+                                FixedArray::kHeaderSize));
+  __ CompareRoot(scratch, Heap::kTheHoleValueRootIndex);
+  // In case the loaded value is the_hole we have to consult GetProperty
+  // to ensure the prototype chain is searched.
+  __ j(equal, out_of_range);
+  if (!result.is(scratch)) {
+    __ movp(result, scratch);
+  }
+}
+
+
+// Checks whether a key is an array index string or a unique name.
+// Falls through if the key is a unique name.
+static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
+                                 Register map, Register hash,
+                                 Label* index_string, Label* not_unique) {
+  // Register use:
+  //   key - holds the key and is unchanged. Assumed to be non-smi.
+  // Scratch registers:
+  //   map - used to hold the map of the key.
+  //   hash - used to hold the hash of the key.
+  Label unique;
+  __ CmpObjectType(key, LAST_UNIQUE_NAME_TYPE, map);
+  __ j(above, not_unique);
+  STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
+  __ j(equal, &unique);
+
+  // Is the string an array index, with cached numeric value?
+  __ movl(hash, FieldOperand(key, Name::kHashFieldOffset));
+  __ testl(hash, Immediate(Name::kContainsCachedArrayIndexMask));
+  __ j(zero, index_string);  // The value in hash is used at jump target.
+
+  // Is the string internalized? We already know it's a string so a single
+  // bit test is enough.
+  STATIC_ASSERT(kNotInternalizedTag != 0);
+  __ testb(FieldOperand(map, Map::kInstanceTypeOffset),
+           Immediate(kIsNotInternalizedMask));
+  __ j(not_zero, not_unique);
+
+  __ bind(&unique);
+}
+
+
+void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
+  // The return address is on the stack.
+  Label slow, check_name, index_smi, index_name, property_array_property;
+  Label probe_dictionary, check_number_dictionary;
+
+  Register receiver = LoadDescriptor::ReceiverRegister();
+  Register key = LoadDescriptor::NameRegister();
+  DCHECK(receiver.is(rdx));
+  DCHECK(key.is(rcx));
+
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(key, &check_name);
+  __ bind(&index_smi);
+  // Now the key is known to be a smi. This place is also jumped to from below
+  // where a numeric string is converted to a smi.
+
+  GenerateKeyedLoadReceiverCheck(masm, receiver, rax,
+                                 Map::kHasIndexedInterceptor, &slow);
+
+  // Check the receiver's map to see if it has fast elements.
+  __ CheckFastElements(rax, &check_number_dictionary);
+
+  GenerateFastArrayLoad(masm, receiver, key, rax, rbx, rax, NULL, &slow);
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(counters->keyed_load_generic_smi(), 1);
+  __ ret(0);
+
+  __ bind(&check_number_dictionary);
+  __ SmiToInteger32(rbx, key);
+  __ movp(rax, FieldOperand(receiver, JSObject::kElementsOffset));
+
+  // Check whether the elements is a number dictionary.
+  // rbx: key as untagged int32
+  // rax: elements
+  __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
+                 Heap::kHashTableMapRootIndex);
+  __ j(not_equal, &slow);
+  __ LoadFromNumberDictionary(&slow, rax, key, rbx, r9, rdi, rax);
+  __ ret(0);
+
+  __ bind(&slow);
+  // Slow case: Jump to runtime.
+  __ IncrementCounter(counters->keyed_load_generic_slow(), 1);
+  GenerateRuntimeGetProperty(masm);
+
+  __ bind(&check_name);
+  GenerateKeyNameCheck(masm, key, rax, rbx, &index_name, &slow);
+
+  GenerateKeyedLoadReceiverCheck(masm, receiver, rax, Map::kHasNamedInterceptor,
+                                 &slow);
+
+  // If the receiver is a fast-case object, check the keyed lookup
+  // cache. Otherwise probe the dictionary leaving result in key.
+  __ movp(rbx, FieldOperand(receiver, JSObject::kPropertiesOffset));
+  __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
+                 Heap::kHashTableMapRootIndex);
+  __ j(equal, &probe_dictionary);
+
+  // Load the map of the receiver, compute the keyed lookup cache hash
+  // based on 32 bits of the map pointer and the string hash.
+  __ movp(rbx, FieldOperand(receiver, HeapObject::kMapOffset));
+  __ movl(rax, rbx);
+  __ shrl(rax, Immediate(KeyedLookupCache::kMapHashShift));
+  __ movl(rdi, FieldOperand(key, String::kHashFieldOffset));
+  __ shrl(rdi, Immediate(String::kHashShift));
+  __ xorp(rax, rdi);
+  int mask = (KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask);
+  __ andp(rax, Immediate(mask));
+
+  // Load the key (consisting of map and internalized string) from the cache and
+  // check for match.
+  Label load_in_object_property;
+  static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
+  Label hit_on_nth_entry[kEntriesPerBucket];
+  ExternalReference cache_keys =
+      ExternalReference::keyed_lookup_cache_keys(masm->isolate());
+
+  for (int i = 0; i < kEntriesPerBucket - 1; i++) {
+    Label try_next_entry;
+    __ movp(rdi, rax);
+    __ shlp(rdi, Immediate(kPointerSizeLog2 + 1));
+    __ LoadAddress(kScratchRegister, cache_keys);
+    int off = kPointerSize * i * 2;
+    __ cmpp(rbx, Operand(kScratchRegister, rdi, times_1, off));
+    __ j(not_equal, &try_next_entry);
+    __ cmpp(key, Operand(kScratchRegister, rdi, times_1, off + kPointerSize));
+    __ j(equal, &hit_on_nth_entry[i]);
+    __ bind(&try_next_entry);
+  }
+
+  int off = kPointerSize * (kEntriesPerBucket - 1) * 2;
+  __ cmpp(rbx, Operand(kScratchRegister, rdi, times_1, off));
+  __ j(not_equal, &slow);
+  __ cmpp(key, Operand(kScratchRegister, rdi, times_1, off + kPointerSize));
+  __ j(not_equal, &slow);
+
+  // Get field offset, which is a 32-bit integer.
+  ExternalReference cache_field_offsets =
+      ExternalReference::keyed_lookup_cache_field_offsets(masm->isolate());
+
+  // Hit on nth entry.
+  for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
+    __ bind(&hit_on_nth_entry[i]);
+    if (i != 0) {
+      __ addl(rax, Immediate(i));
+    }
+    __ LoadAddress(kScratchRegister, cache_field_offsets);
+    __ movl(rdi, Operand(kScratchRegister, rax, times_4, 0));
+    __ movzxbp(rax, FieldOperand(rbx, Map::kInObjectPropertiesOffset));
+    __ subp(rdi, rax);
+    __ j(above_equal, &property_array_property);
+    if (i != 0) {
+      __ jmp(&load_in_object_property);
+    }
+  }
+
+  // Load in-object property.
+  __ bind(&load_in_object_property);
+  __ movzxbp(rax, FieldOperand(rbx, Map::kInstanceSizeOffset));
+  __ addp(rax, rdi);
+  __ movp(rax, FieldOperand(receiver, rax, times_pointer_size, 0));
+  __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
+  __ ret(0);
+
+  // Load property array property.
+  __ bind(&property_array_property);
+  __ movp(rax, FieldOperand(receiver, JSObject::kPropertiesOffset));
+  __ movp(rax,
+          FieldOperand(rax, rdi, times_pointer_size, FixedArray::kHeaderSize));
+  __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
+  __ ret(0);
+
+  // Do a quick inline probe of the receiver's dictionary, if it
+  // exists.
+  __ bind(&probe_dictionary);
+  // rbx: elements
+
+  __ movp(rax, FieldOperand(receiver, JSObject::kMapOffset));
+  __ movb(rax, FieldOperand(rax, Map::kInstanceTypeOffset));
+  GenerateGlobalInstanceTypeCheck(masm, rax, &slow);
+
+  GenerateDictionaryLoad(masm, &slow, rbx, key, rax, rdi, rax);
+  __ IncrementCounter(counters->keyed_load_generic_symbol(), 1);
+  __ ret(0);
+
+  __ bind(&index_name);
+  __ IndexFromHash(rbx, key);
+  __ jmp(&index_smi);
+}
+
+
+void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
+  // Return address is on the stack.
+  Label miss;
+
+  Register receiver = LoadDescriptor::ReceiverRegister();
+  Register index = LoadDescriptor::NameRegister();
+  Register scratch = rbx;
+  Register result = rax;
+  DCHECK(!scratch.is(receiver) && !scratch.is(index));
+
+  StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
+                                          &miss,  // When not a string.
+                                          &miss,  // When not a number.
+                                          &miss,  // When index out of range.
+                                          STRING_INDEX_IS_ARRAY_INDEX);
+  char_at_generator.GenerateFast(masm);
+  __ ret(0);
+
+  StubRuntimeCallHelper call_helper;
+  char_at_generator.GenerateSlow(masm, call_helper);
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+
+static void KeyedStoreGenerateGenericHelper(
+    MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
+    KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length) {
+  Label transition_smi_elements;
+  Label finish_object_store, non_double_value, transition_double_elements;
+  Label fast_double_without_map_check;
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  Register key = StoreDescriptor::NameRegister();
+  Register value = StoreDescriptor::ValueRegister();
+  DCHECK(receiver.is(rdx));
+  DCHECK(key.is(rcx));
+  DCHECK(value.is(rax));
+  // Fast case: Do the store, could be either Object or double.
+  __ bind(fast_object);
+  // rbx: receiver's elements array (a FixedArray)
+  // receiver is a JSArray.
+  // r9: map of receiver
+  if (check_map == kCheckMap) {
+    __ movp(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
+    __ CompareRoot(rdi, Heap::kFixedArrayMapRootIndex);
+    __ j(not_equal, fast_double);
+  }
+
+  // HOLECHECK: guards "A[i] = V"
+  // We have to go to the runtime if the current value is the hole because
+  // there may be a callback on the element
+  Label holecheck_passed1;
+  __ movp(kScratchRegister,
+          FieldOperand(rbx, key, times_pointer_size, FixedArray::kHeaderSize));
+  __ CompareRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
+  __ j(not_equal, &holecheck_passed1);
+  __ JumpIfDictionaryInPrototypeChain(receiver, rdi, kScratchRegister, slow);
+
+  __ bind(&holecheck_passed1);
+
+  // Smi stores don't require further checks.
+  Label non_smi_value;
+  __ JumpIfNotSmi(value, &non_smi_value);
+  if (increment_length == kIncrementLength) {
+    // Add 1 to receiver->length.
+    __ leal(rdi, Operand(key, 1));
+    __ Integer32ToSmiField(FieldOperand(receiver, JSArray::kLengthOffset), rdi);
+  }
+  // It's irrelevant whether array is smi-only or not when writing a smi.
+  __ movp(FieldOperand(rbx, key, times_pointer_size, FixedArray::kHeaderSize),
+          value);
+  __ ret(0);
+
+  __ bind(&non_smi_value);
+  // Writing a non-smi, check whether array allows non-smi elements.
+  // r9: receiver's map
+  __ CheckFastObjectElements(r9, &transition_smi_elements);
+
+  __ bind(&finish_object_store);
+  if (increment_length == kIncrementLength) {
+    // Add 1 to receiver->length.
+    __ leal(rdi, Operand(key, 1));
+    __ Integer32ToSmiField(FieldOperand(receiver, JSArray::kLengthOffset), rdi);
+  }
+  __ movp(FieldOperand(rbx, key, times_pointer_size, FixedArray::kHeaderSize),
+          value);
+  __ movp(rdx, value);  // Preserve the value which is returned.
+  __ RecordWriteArray(rbx, rdx, key, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+  __ ret(0);
+
+  __ bind(fast_double);
+  if (check_map == kCheckMap) {
+    // Check for fast double array case. If this fails, call through to the
+    // runtime.
+    // rdi: elements array's map
+    __ CompareRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
+    __ j(not_equal, slow);
+  }
+
+  // HOLECHECK: guards "A[i] double hole?"
+  // We have to see if the double version of the hole is present. If so
+  // go to the runtime.
+  uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
+  __ cmpl(FieldOperand(rbx, key, times_8, offset), Immediate(kHoleNanUpper32));
+  __ j(not_equal, &fast_double_without_map_check);
+  __ JumpIfDictionaryInPrototypeChain(receiver, rdi, kScratchRegister, slow);
+
+  __ bind(&fast_double_without_map_check);
+  __ StoreNumberToDoubleElements(value, rbx, key, xmm0,
+                                 &transition_double_elements);
+  if (increment_length == kIncrementLength) {
+    // Add 1 to receiver->length.
+    __ leal(rdi, Operand(key, 1));
+    __ Integer32ToSmiField(FieldOperand(receiver, JSArray::kLengthOffset), rdi);
+  }
+  __ ret(0);
+
+  __ bind(&transition_smi_elements);
+  __ movp(rbx, FieldOperand(receiver, HeapObject::kMapOffset));
+
+  // Transition the array appropriately depending on the value type.
+  __ movp(r9, FieldOperand(value, HeapObject::kMapOffset));
+  __ CompareRoot(r9, Heap::kHeapNumberMapRootIndex);
+  __ j(not_equal, &non_double_value);
+
+  // Value is a double. Transition FAST_SMI_ELEMENTS ->
+  // FAST_DOUBLE_ELEMENTS and complete the store.
+  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
+                                         FAST_DOUBLE_ELEMENTS, rbx, rdi, slow);
+  AllocationSiteMode mode =
+      AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
+  ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
+                                                   rbx, mode, slow);
+  __ movp(rbx, FieldOperand(receiver, JSObject::kElementsOffset));
+  __ jmp(&fast_double_without_map_check);
+
+  __ bind(&non_double_value);
+  // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
+  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS, rbx,
+                                         rdi, slow);
+  mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
+  ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
+      masm, receiver, key, value, rbx, mode, slow);
+  __ movp(rbx, FieldOperand(receiver, JSObject::kElementsOffset));
+  __ jmp(&finish_object_store);
+
+  __ bind(&transition_double_elements);
+  // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
+  // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
+  // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
+  __ movp(rbx, FieldOperand(receiver, HeapObject::kMapOffset));
+  __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
+                                         rbx, rdi, slow);
+  mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
+  ElementsTransitionGenerator::GenerateDoubleToObject(masm, receiver, key,
+                                                      value, rbx, mode, slow);
+  __ movp(rbx, FieldOperand(receiver, JSObject::kElementsOffset));
+  __ jmp(&finish_object_store);
+}
+
+
+void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
+                                   StrictMode strict_mode) {
+  // Return address is on the stack.
+  Label slow, slow_with_tagged_index, fast_object, fast_object_grow;
+  Label fast_double, fast_double_grow;
+  Label array, extra, check_if_double_array;
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  Register key = StoreDescriptor::NameRegister();
+  DCHECK(receiver.is(rdx));
+  DCHECK(key.is(rcx));
+
+  // Check that the object isn't a smi.
+  __ JumpIfSmi(receiver, &slow_with_tagged_index);
+  // Get the map from the receiver.
+  __ movp(r9, FieldOperand(receiver, HeapObject::kMapOffset));
+  // Check that the receiver does not require access checks and is not observed.
+  // The generic stub does not perform map checks or handle observed objects.
+  __ testb(FieldOperand(r9, Map::kBitFieldOffset),
+           Immediate(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved));
+  __ j(not_zero, &slow_with_tagged_index);
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(key, &slow_with_tagged_index);
+  __ SmiToInteger32(key, key);
+
+  __ CmpInstanceType(r9, JS_ARRAY_TYPE);
+  __ j(equal, &array);
+  // Check that the object is some kind of JSObject.
+  __ CmpInstanceType(r9, FIRST_JS_OBJECT_TYPE);
+  __ j(below, &slow);
+
+  // Object case: Check key against length in the elements array.
+  __ movp(rbx, FieldOperand(receiver, JSObject::kElementsOffset));
+  // Check array bounds.
+  __ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), key);
+  // rbx: FixedArray
+  __ j(above, &fast_object);
+
+  // Slow case: call runtime.
+  __ bind(&slow);
+  __ Integer32ToSmi(key, key);
+  __ bind(&slow_with_tagged_index);
+  PropertyICCompiler::GenerateRuntimeSetProperty(masm, strict_mode);
+  // Never returns to here.
+
+  // Extra capacity case: Check if there is extra capacity to
+  // perform the store and update the length. Used for adding one
+  // element to the array by writing to array[array.length].
+  __ bind(&extra);
+  // receiver is a JSArray.
+  // rbx: receiver's elements array (a FixedArray)
+  // flags: smicompare (receiver.length(), rbx)
+  __ j(not_equal, &slow);  // do not leave holes in the array
+  __ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), key);
+  __ j(below_equal, &slow);
+  // Increment index to get new length.
+  __ movp(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
+  __ CompareRoot(rdi, Heap::kFixedArrayMapRootIndex);
+  __ j(not_equal, &check_if_double_array);
+  __ jmp(&fast_object_grow);
+
+  __ bind(&check_if_double_array);
+  // rdi: elements array's map
+  __ CompareRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
+  __ j(not_equal, &slow);
+  __ jmp(&fast_double_grow);
+
+  // Array case: Get the length and the elements array from the JS
+  // array. Check that the array is in fast mode (and writable); if it
+  // is the length is always a smi.
+  __ bind(&array);
+  // receiver is a JSArray.
+  __ movp(rbx, FieldOperand(receiver, JSObject::kElementsOffset));
+
+  // Check the key against the length in the array, compute the
+  // address to store into and fall through to fast case.
+  __ SmiCompareInteger32(FieldOperand(receiver, JSArray::kLengthOffset), key);
+  __ j(below_equal, &extra);
+
+  KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double, &slow,
+                                  kCheckMap, kDontIncrementLength);
+  KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
+                                  &slow, kDontCheckMap, kIncrementLength);
+}
+
+
+static Operand GenerateMappedArgumentsLookup(
+    MacroAssembler* masm, Register object, Register key, Register scratch1,
+    Register scratch2, Register scratch3, Label* unmapped_case,
+    Label* slow_case) {
+  Heap* heap = masm->isolate()->heap();
+
+  // Check that the receiver is a JSObject. Because of the elements
+  // map check later, we do not need to check for interceptors or
+  // whether it requires access checks.
+  __ JumpIfSmi(object, slow_case);
+  // Check that the object is some kind of JSObject.
+  __ CmpObjectType(object, FIRST_JS_RECEIVER_TYPE, scratch1);
+  __ j(below, slow_case);
+
+  // Check that the key is a positive smi.
+  Condition check = masm->CheckNonNegativeSmi(key);
+  __ j(NegateCondition(check), slow_case);
+
+  // Load the elements into scratch1 and check its map. If not, jump
+  // to the unmapped lookup with the parameter map in scratch1.
+  Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
+  __ movp(scratch1, FieldOperand(object, JSObject::kElementsOffset));
+  __ CheckMap(scratch1, arguments_map, slow_case, DONT_DO_SMI_CHECK);
+
+  // Check if element is in the range of mapped arguments.
+  __ movp(scratch2, FieldOperand(scratch1, FixedArray::kLengthOffset));
+  __ SmiSubConstant(scratch2, scratch2, Smi::FromInt(2));
+  __ cmpp(key, scratch2);
+  __ j(greater_equal, unmapped_case);
+
+  // Load element index and check whether it is the hole.
+  const int kHeaderSize = FixedArray::kHeaderSize + 2 * kPointerSize;
+  __ SmiToInteger64(scratch3, key);
+  __ movp(scratch2,
+          FieldOperand(scratch1, scratch3, times_pointer_size, kHeaderSize));
+  __ CompareRoot(scratch2, Heap::kTheHoleValueRootIndex);
+  __ j(equal, unmapped_case);
+
+  // Load value from context and return it. We can reuse scratch1 because
+  // we do not jump to the unmapped lookup (which requires the parameter
+  // map in scratch1).
+  __ movp(scratch1, FieldOperand(scratch1, FixedArray::kHeaderSize));
+  __ SmiToInteger64(scratch3, scratch2);
+  return FieldOperand(scratch1, scratch3, times_pointer_size,
+                      Context::kHeaderSize);
+}
+
+
+static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
+                                               Register key,
+                                               Register parameter_map,
+                                               Register scratch,
+                                               Label* slow_case) {
+  // Element is in arguments backing store, which is referenced by the
+  // second element of the parameter_map. The parameter_map register
+  // must be loaded with the parameter map of the arguments object and is
+  // overwritten.
+  const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
+  Register backing_store = parameter_map;
+  __ movp(backing_store, FieldOperand(parameter_map, kBackingStoreOffset));
+  Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
+  __ CheckMap(backing_store, fixed_array_map, slow_case, DONT_DO_SMI_CHECK);
+  __ movp(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset));
+  __ cmpp(key, scratch);
+  __ j(greater_equal, slow_case);
+  __ SmiToInteger64(scratch, key);
+  return FieldOperand(backing_store, scratch, times_pointer_size,
+                      FixedArray::kHeaderSize);
+}
+
+
+void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
+  // The return address is on the stack.
+  Label slow, notin;
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  Register name = StoreDescriptor::NameRegister();
+  Register value = StoreDescriptor::ValueRegister();
+  DCHECK(receiver.is(rdx));
+  DCHECK(name.is(rcx));
+  DCHECK(value.is(rax));
+
+  Operand mapped_location = GenerateMappedArgumentsLookup(
+      masm, receiver, name, rbx, rdi, r8, &notin, &slow);
+  __ movp(mapped_location, value);
+  __ leap(r9, mapped_location);
+  __ movp(r8, value);
+  __ RecordWrite(rbx, r9, r8, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+                 INLINE_SMI_CHECK);
+  __ Ret();
+  __ bind(&notin);
+  // The unmapped lookup expects that the parameter map is in rbx.
+  Operand unmapped_location =
+      GenerateUnmappedArgumentsLookup(masm, name, rbx, rdi, &slow);
+  __ movp(unmapped_location, value);
+  __ leap(r9, unmapped_location);
+  __ movp(r8, value);
+  __ RecordWrite(rbx, r9, r8, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+                 INLINE_SMI_CHECK);
+  __ Ret();
+  __ bind(&slow);
+  GenerateMiss(masm);
+}
+
+
+void LoadIC::GenerateNormal(MacroAssembler* masm) {
+  Register dictionary = rax;
+  DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
+
+  Label slow;
+
+  __ movp(dictionary, FieldOperand(LoadDescriptor::ReceiverRegister(),
+                                   JSObject::kPropertiesOffset));
+  GenerateDictionaryLoad(masm, &slow, dictionary,
+                         LoadDescriptor::NameRegister(), rbx, rdi, rax);
+  __ ret(0);
+
+  // Dictionary load failed, go slow (but don't miss).
+  __ bind(&slow);
+  GenerateRuntimeGetProperty(masm);
+}
+
+
+// A register that isn't one of the parameters to the load ic.
+static const Register LoadIC_TempRegister() { return rbx; }
+
+
+static const Register KeyedLoadIC_TempRegister() { return rbx; }
+
+
+void LoadIC::GenerateMiss(MacroAssembler* masm) {
+  // The return address is on the stack.
+
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(counters->load_miss(), 1);
+
+  __ PopReturnAddressTo(LoadIC_TempRegister());
+  __ Push(LoadDescriptor::ReceiverRegister());  // receiver
+  __ Push(LoadDescriptor::NameRegister());      // name
+  __ PushReturnAddressFrom(LoadIC_TempRegister());
+
+  // Perform tail call to the entry.
+  ExternalReference ref =
+      ExternalReference(IC_Utility(kLoadIC_Miss), masm->isolate());
+  __ TailCallExternalReference(ref, 2, 1);
+}
+
+
+void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+  // The return address is on the stack.
+
+  __ PopReturnAddressTo(LoadIC_TempRegister());
+  __ Push(LoadDescriptor::ReceiverRegister());  // receiver
+  __ Push(LoadDescriptor::NameRegister());      // name
+  __ PushReturnAddressFrom(LoadIC_TempRegister());
+
+  // Perform tail call to the entry.
+  __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
+}
+
+
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
+  // The return address is on the stack.
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(counters->keyed_load_miss(), 1);
+
+  __ PopReturnAddressTo(KeyedLoadIC_TempRegister());
+  __ Push(LoadDescriptor::ReceiverRegister());  // receiver
+  __ Push(LoadDescriptor::NameRegister());      // name
+  __ PushReturnAddressFrom(KeyedLoadIC_TempRegister());
+
+  // Perform tail call to the entry.
+  ExternalReference ref =
+      ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate());
+  __ TailCallExternalReference(ref, 2, 1);
+}
+
+
+void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+  // The return address is on the stack.
+
+  __ PopReturnAddressTo(KeyedLoadIC_TempRegister());
+  __ Push(LoadDescriptor::ReceiverRegister());  // receiver
+  __ Push(LoadDescriptor::NameRegister());      // name
+  __ PushReturnAddressFrom(KeyedLoadIC_TempRegister());
+
+  // Perform tail call to the entry.
+  __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
+}
+
+
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
+  // The return address is on the stack.
+
+  // Get the receiver from the stack and probe the stub cache.
+  Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
+      Code::ComputeHandlerFlags(Code::STORE_IC));
+  masm->isolate()->stub_cache()->GenerateProbe(
+      masm, flags, false, StoreDescriptor::ReceiverRegister(),
+      StoreDescriptor::NameRegister(), rbx, no_reg);
+
+  // Cache miss: Jump to runtime.
+  GenerateMiss(masm);
+}
+
+
+static void StoreIC_PushArgs(MacroAssembler* masm) {
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  Register name = StoreDescriptor::NameRegister();
+  Register value = StoreDescriptor::ValueRegister();
+
+  DCHECK(!rbx.is(receiver) && !rbx.is(name) && !rbx.is(value));
+
+  __ PopReturnAddressTo(rbx);
+  __ Push(receiver);
+  __ Push(name);
+  __ Push(value);
+  __ PushReturnAddressFrom(rbx);
+}
+
+
+void StoreIC::GenerateMiss(MacroAssembler* masm) {
+  // Return address is on the stack.
+  StoreIC_PushArgs(masm);
+
+  // Perform tail call to the entry.
+  ExternalReference ref =
+      ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
+  __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void StoreIC::GenerateNormal(MacroAssembler* masm) {
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  Register name = StoreDescriptor::NameRegister();
+  Register value = StoreDescriptor::ValueRegister();
+  Register dictionary = rbx;
+
+  Label miss;
+
+  __ movp(dictionary, FieldOperand(receiver, JSObject::kPropertiesOffset));
+  GenerateDictionaryStore(masm, &miss, dictionary, name, value, r8, r9);
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(counters->store_normal_hit(), 1);
+  __ ret(0);
+
+  __ bind(&miss);
+  __ IncrementCounter(counters->store_normal_miss(), 1);
+  GenerateMiss(masm);
+}
+
+
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
+  // Return address is on the stack.
+  StoreIC_PushArgs(masm);
+
+  // Do tail-call to runtime routine.
+  ExternalReference ref =
+      ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
+  __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+#undef __
+
+
+Condition CompareIC::ComputeCondition(Token::Value op) {
+  switch (op) {
+    case Token::EQ_STRICT:
+    case Token::EQ:
+      return equal;
+    case Token::LT:
+      return less;
+    case Token::GT:
+      return greater;
+    case Token::LTE:
+      return less_equal;
+    case Token::GTE:
+      return greater_equal;
+    default:
+      UNREACHABLE();
+      return no_condition;
+  }
+}
+
+
+bool CompareIC::HasInlinedSmiCode(Address address) {
+  // The address of the instruction following the call.
+  Address test_instruction_address =
+      address + Assembler::kCallTargetAddressOffset;
+
+  // If the instruction following the call is not a test al, nothing
+  // was inlined.
+  return *test_instruction_address == Assembler::kTestAlByte;
+}
+
+
+void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
+  // The address of the instruction following the call.
+  Address test_instruction_address =
+      address + Assembler::kCallTargetAddressOffset;
+
+  // If the instruction following the call is not a test al, nothing
+  // was inlined.
+  if (*test_instruction_address != Assembler::kTestAlByte) {
+    DCHECK(*test_instruction_address == Assembler::kNopByte);
+    return;
+  }
+
+  Address delta_address = test_instruction_address + 1;
+  // The delta to the start of the map check instruction and the
+  // condition code uses at the patched jump.
+  uint8_t delta = *reinterpret_cast<uint8_t*>(delta_address);
+  if (FLAG_trace_ic) {
+    PrintF("[  patching ic at %p, test=%p, delta=%d\n", address,
+           test_instruction_address, delta);
+  }
+
+  // Patch with a short conditional jump. Enabling means switching from a short
+  // jump-if-carry/not-carry to jump-if-zero/not-zero, whereas disabling is the
+  // reverse operation of that.
+  Address jmp_address = test_instruction_address - delta;
+  DCHECK((check == ENABLE_INLINED_SMI_CHECK)
+             ? (*jmp_address == Assembler::kJncShortOpcode ||
+                *jmp_address == Assembler::kJcShortOpcode)
+             : (*jmp_address == Assembler::kJnzShortOpcode ||
+                *jmp_address == Assembler::kJzShortOpcode));
+  Condition cc =
+      (check == ENABLE_INLINED_SMI_CHECK)
+          ? (*jmp_address == Assembler::kJncShortOpcode ? not_zero : zero)
+          : (*jmp_address == Assembler::kJnzShortOpcode ? not_carry : carry);
+  *jmp_address = static_cast<byte>(Assembler::kJccShortPrefix | cc);
+}
+}
+}  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_X64
diff --git a/src/ic/x64/stub-cache-x64.cc b/src/ic/x64/stub-cache-x64.cc
new file mode 100644
index 0000000..a54ddca
--- /dev/null
+++ b/src/ic/x64/stub-cache-x64.cc
@@ -0,0 +1,153 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_X64
+
+#include "src/codegen.h"
+#include "src/ic/stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
+                       Code::Flags flags, bool leave_frame,
+                       StubCache::Table table, Register receiver, Register name,
+                       // The offset is scaled by 4, based on
+                       // kCacheIndexShift, which is two bits
+                       Register offset) {
+  // We need to scale up the pointer by 2 when the offset is scaled by less
+  // than the pointer size.
+  DCHECK(kPointerSize == kInt64Size
+             ? kPointerSizeLog2 == StubCache::kCacheIndexShift + 1
+             : kPointerSizeLog2 == StubCache::kCacheIndexShift);
+  ScaleFactor scale_factor = kPointerSize == kInt64Size ? times_2 : times_1;
+
+  DCHECK_EQ(3 * kPointerSize, sizeof(StubCache::Entry));
+  // The offset register holds the entry offset times four (due to masking
+  // and shifting optimizations).
+  ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
+  ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
+  Label miss;
+
+  // Multiply by 3 because there are 3 fields per entry (name, code, map).
+  __ leap(offset, Operand(offset, offset, times_2, 0));
+
+  __ LoadAddress(kScratchRegister, key_offset);
+
+  // Check that the key in the entry matches the name.
+  // Multiply entry offset by 16 to get the entry address. Since the
+  // offset register already holds the entry offset times four, multiply
+  // by a further four.
+  __ cmpl(name, Operand(kScratchRegister, offset, scale_factor, 0));
+  __ j(not_equal, &miss);
+
+  // Get the map entry from the cache.
+  // Use key_offset + kPointerSize * 2, rather than loading map_offset.
+  __ movp(kScratchRegister,
+          Operand(kScratchRegister, offset, scale_factor, kPointerSize * 2));
+  __ cmpp(kScratchRegister, FieldOperand(receiver, HeapObject::kMapOffset));
+  __ j(not_equal, &miss);
+
+  // Get the code entry from the cache.
+  __ LoadAddress(kScratchRegister, value_offset);
+  __ movp(kScratchRegister, Operand(kScratchRegister, offset, scale_factor, 0));
+
+  // Check that the flags match what we're looking for.
+  __ movl(offset, FieldOperand(kScratchRegister, Code::kFlagsOffset));
+  __ andp(offset, Immediate(~Code::kFlagsNotUsedInLookup));
+  __ cmpl(offset, Immediate(flags));
+  __ j(not_equal, &miss);
+
+#ifdef DEBUG
+  if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
+    __ jmp(&miss);
+  } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
+    __ jmp(&miss);
+  }
+#endif
+
+  if (leave_frame) __ leave();
+
+  // Jump to the first instruction in the code stub.
+  __ addp(kScratchRegister, Immediate(Code::kHeaderSize - kHeapObjectTag));
+  __ jmp(kScratchRegister);
+
+  __ bind(&miss);
+}
+
+
+void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
+                              bool leave_frame, Register receiver,
+                              Register name, Register scratch, Register extra,
+                              Register extra2, Register extra3) {
+  Isolate* isolate = masm->isolate();
+  Label miss;
+  USE(extra);   // The register extra is not used on the X64 platform.
+  USE(extra2);  // The register extra2 is not used on the X64 platform.
+  USE(extra3);  // The register extra2 is not used on the X64 platform.
+  // Make sure that code is valid. The multiplying code relies on the
+  // entry size being 3 * kPointerSize.
+  DCHECK(sizeof(Entry) == 3 * kPointerSize);
+
+  // Make sure the flags do not name a specific type.
+  DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
+
+  // Make sure that there are no register conflicts.
+  DCHECK(!scratch.is(receiver));
+  DCHECK(!scratch.is(name));
+
+  // Check scratch register is valid, extra and extra2 are unused.
+  DCHECK(!scratch.is(no_reg));
+  DCHECK(extra2.is(no_reg));
+  DCHECK(extra3.is(no_reg));
+
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1);
+
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(receiver, &miss);
+
+  // Get the map of the receiver and compute the hash.
+  __ movl(scratch, FieldOperand(name, Name::kHashFieldOffset));
+  // Use only the low 32 bits of the map pointer.
+  __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
+  __ xorp(scratch, Immediate(flags));
+  // We mask out the last two bits because they are not part of the hash and
+  // they are always 01 for maps.  Also in the two 'and' instructions below.
+  __ andp(scratch, Immediate((kPrimaryTableSize - 1) << kCacheIndexShift));
+
+  // Probe the primary table.
+  ProbeTable(isolate, masm, flags, leave_frame, kPrimary, receiver, name,
+             scratch);
+
+  // Primary miss: Compute hash for secondary probe.
+  __ movl(scratch, FieldOperand(name, Name::kHashFieldOffset));
+  __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
+  __ xorp(scratch, Immediate(flags));
+  __ andp(scratch, Immediate((kPrimaryTableSize - 1) << kCacheIndexShift));
+  __ subl(scratch, name);
+  __ addl(scratch, Immediate(flags));
+  __ andp(scratch, Immediate((kSecondaryTableSize - 1) << kCacheIndexShift));
+
+  // Probe the secondary table.
+  ProbeTable(isolate, masm, flags, leave_frame, kSecondary, receiver, name,
+             scratch);
+
+  // Cache miss: Fall-through and let caller handle the miss by
+  // entering the runtime system.
+  __ bind(&miss);
+  __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1);
+}
+
+
+#undef __
+}
+}  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_X64
diff --git a/src/ic/x87/OWNERS b/src/ic/x87/OWNERS
new file mode 100644
index 0000000..dd9998b
--- /dev/null
+++ b/src/ic/x87/OWNERS
@@ -0,0 +1 @@
+weiliang.lin@intel.com
diff --git a/src/ic/x87/access-compiler-x87.cc b/src/ic/x87/access-compiler-x87.cc
new file mode 100644
index 0000000..9456ec8
--- /dev/null
+++ b/src/ic/x87/access-compiler-x87.cc
@@ -0,0 +1,44 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_X87
+
+#include "src/ic/access-compiler.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
+                                              Handle<Code> code) {
+  __ jmp(code, RelocInfo::CODE_TARGET);
+}
+
+
+Register* PropertyAccessCompiler::load_calling_convention() {
+  // receiver, name, scratch1, scratch2, scratch3, scratch4.
+  Register receiver = LoadDescriptor::ReceiverRegister();
+  Register name = LoadDescriptor::NameRegister();
+  static Register registers[] = {receiver, name, ebx, eax, edi, no_reg};
+  return registers;
+}
+
+
+Register* PropertyAccessCompiler::store_calling_convention() {
+  // receiver, name, scratch1, scratch2, scratch3.
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  Register name = StoreDescriptor::NameRegister();
+  DCHECK(ebx.is(ElementTransitionAndStoreDescriptor::MapRegister()));
+  static Register registers[] = {receiver, name, ebx, edi, no_reg};
+  return registers;
+}
+
+#undef __
+}
+}  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_X87
diff --git a/src/ic/x87/handler-compiler-x87.cc b/src/ic/x87/handler-compiler-x87.cc
new file mode 100644
index 0000000..e706998
--- /dev/null
+++ b/src/ic/x87/handler-compiler-x87.cc
@@ -0,0 +1,855 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_X87
+
+#include "src/ic/call-optimization.h"
+#include "src/ic/handler-compiler.h"
+#include "src/ic/ic.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
+    MacroAssembler* masm, Handle<HeapType> type, Register receiver,
+    Handle<JSFunction> getter) {
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+
+    if (!getter.is_null()) {
+      // Call the JavaScript getter with the receiver on the stack.
+      if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+        // Swap in the global receiver.
+        __ mov(receiver,
+               FieldOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+      }
+      __ push(receiver);
+      ParameterCount actual(0);
+      ParameterCount expected(getter);
+      __ InvokeFunction(getter, expected, actual, CALL_FUNCTION,
+                        NullCallWrapper());
+    } else {
+      // If we generate a global code snippet for deoptimization only, remember
+      // the place to continue after deoptimization.
+      masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
+    }
+
+    // Restore context register.
+    __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+  }
+  __ ret(0);
+}
+
+
+void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
+    MacroAssembler* masm, Label* miss_label, Register receiver,
+    Handle<Name> name, Register scratch0, Register scratch1) {
+  DCHECK(name->IsUniqueName());
+  DCHECK(!receiver.is(scratch0));
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(counters->negative_lookups(), 1);
+  __ IncrementCounter(counters->negative_lookups_miss(), 1);
+
+  __ mov(scratch0, FieldOperand(receiver, HeapObject::kMapOffset));
+
+  const int kInterceptorOrAccessCheckNeededMask =
+      (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
+
+  // Bail out if the receiver has a named interceptor or requires access checks.
+  __ test_b(FieldOperand(scratch0, Map::kBitFieldOffset),
+            kInterceptorOrAccessCheckNeededMask);
+  __ j(not_zero, miss_label);
+
+  // Check that receiver is a JSObject.
+  __ CmpInstanceType(scratch0, FIRST_SPEC_OBJECT_TYPE);
+  __ j(below, miss_label);
+
+  // Load properties array.
+  Register properties = scratch0;
+  __ mov(properties, FieldOperand(receiver, JSObject::kPropertiesOffset));
+
+  // Check that the properties array is a dictionary.
+  __ cmp(FieldOperand(properties, HeapObject::kMapOffset),
+         Immediate(masm->isolate()->factory()->hash_table_map()));
+  __ j(not_equal, miss_label);
+
+  Label done;
+  NameDictionaryLookupStub::GenerateNegativeLookup(masm, miss_label, &done,
+                                                   properties, name, scratch1);
+  __ bind(&done);
+  __ DecrementCounter(counters->negative_lookups_miss(), 1);
+}
+
+
+void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
+    MacroAssembler* masm, int index, Register prototype, Label* miss) {
+  // Get the global function with the given index.
+  Handle<JSFunction> function(
+      JSFunction::cast(masm->isolate()->native_context()->get(index)));
+  // Check we're still in the same context.
+  Register scratch = prototype;
+  const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
+  __ mov(scratch, Operand(esi, offset));
+  __ mov(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
+  __ cmp(Operand(scratch, Context::SlotOffset(index)), function);
+  __ j(not_equal, miss);
+
+  // Load its initial map. The global functions all have initial maps.
+  __ Move(prototype, Immediate(Handle<Map>(function->initial_map())));
+  // Load the prototype from the initial map.
+  __ mov(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
+    MacroAssembler* masm, Register receiver, Register scratch1,
+    Register scratch2, Label* miss_label) {
+  __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
+  __ mov(eax, scratch1);
+  __ ret(0);
+}
+
+
+// Generate call to api function.
+// This function uses push() to generate smaller, faster code than
+// the version above. It is an optimization that should will be removed
+// when api call ICs are generated in hydrogen.
+void PropertyHandlerCompiler::GenerateFastApiCall(
+    MacroAssembler* masm, const CallOptimization& optimization,
+    Handle<Map> receiver_map, Register receiver, Register scratch_in,
+    bool is_store, int argc, Register* values) {
+  // Copy return value.
+  __ pop(scratch_in);
+  // receiver
+  __ push(receiver);
+  // Write the arguments to stack frame.
+  for (int i = 0; i < argc; i++) {
+    Register arg = values[argc - 1 - i];
+    DCHECK(!receiver.is(arg));
+    DCHECK(!scratch_in.is(arg));
+    __ push(arg);
+  }
+  __ push(scratch_in);
+  // Stack now matches JSFunction abi.
+  DCHECK(optimization.is_simple_api_call());
+
+  // Abi for CallApiFunctionStub.
+  Register callee = eax;
+  Register call_data = ebx;
+  Register holder = ecx;
+  Register api_function_address = edx;
+  Register scratch = edi;  // scratch_in is no longer valid.
+
+  // Put holder in place.
+  CallOptimization::HolderLookup holder_lookup;
+  Handle<JSObject> api_holder =
+      optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
+  switch (holder_lookup) {
+    case CallOptimization::kHolderIsReceiver:
+      __ Move(holder, receiver);
+      break;
+    case CallOptimization::kHolderFound:
+      __ LoadHeapObject(holder, api_holder);
+      break;
+    case CallOptimization::kHolderNotFound:
+      UNREACHABLE();
+      break;
+  }
+
+  Isolate* isolate = masm->isolate();
+  Handle<JSFunction> function = optimization.constant_function();
+  Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
+  Handle<Object> call_data_obj(api_call_info->data(), isolate);
+
+  // Put callee in place.
+  __ LoadHeapObject(callee, function);
+
+  bool call_data_undefined = false;
+  // Put call_data in place.
+  if (isolate->heap()->InNewSpace(*call_data_obj)) {
+    __ mov(scratch, api_call_info);
+    __ mov(call_data, FieldOperand(scratch, CallHandlerInfo::kDataOffset));
+  } else if (call_data_obj->IsUndefined()) {
+    call_data_undefined = true;
+    __ mov(call_data, Immediate(isolate->factory()->undefined_value()));
+  } else {
+    __ mov(call_data, call_data_obj);
+  }
+
+  // Put api_function_address in place.
+  Address function_address = v8::ToCData<Address>(api_call_info->callback());
+  __ mov(api_function_address, Immediate(function_address));
+
+  // Jump to stub.
+  CallApiFunctionStub stub(isolate, is_store, call_data_undefined, argc);
+  __ TailCallStub(&stub);
+}
+
+
+// Generate code to check that a global property cell is empty. Create
+// the property cell at compilation time if no cell exists for the
+// property.
+void PropertyHandlerCompiler::GenerateCheckPropertyCell(
+    MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
+    Register scratch, Label* miss) {
+  Handle<PropertyCell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
+  DCHECK(cell->value()->IsTheHole());
+  Handle<Oddball> the_hole = masm->isolate()->factory()->the_hole_value();
+  if (masm->serializer_enabled()) {
+    __ mov(scratch, Immediate(cell));
+    __ cmp(FieldOperand(scratch, PropertyCell::kValueOffset),
+           Immediate(the_hole));
+  } else {
+    __ cmp(Operand::ForCell(cell), Immediate(the_hole));
+  }
+  __ j(not_equal, miss);
+}
+
+
+void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
+    MacroAssembler* masm, Handle<HeapType> type, Register receiver,
+    Handle<JSFunction> setter) {
+  // ----------- S t a t e -------------
+  //  -- esp[0] : return address
+  // -----------------------------------
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+
+    // Save value register, so we can restore it later.
+    __ push(value());
+
+    if (!setter.is_null()) {
+      // Call the JavaScript setter with receiver and value on the stack.
+      if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+        // Swap in the global receiver.
+        __ mov(receiver,
+               FieldOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+      }
+      __ push(receiver);
+      __ push(value());
+      ParameterCount actual(1);
+      ParameterCount expected(setter);
+      __ InvokeFunction(setter, expected, actual, CALL_FUNCTION,
+                        NullCallWrapper());
+    } else {
+      // If we generate a global code snippet for deoptimization only, remember
+      // the place to continue after deoptimization.
+      masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
+    }
+
+    // We have to return the passed value, not the return value of the setter.
+    __ pop(eax);
+
+    // Restore context register.
+    __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+  }
+  __ ret(0);
+}
+
+
+static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
+                                     Register holder, Register name,
+                                     Handle<JSObject> holder_obj) {
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsInfoIndex == 1);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 2);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 3);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 4);
+  __ push(name);
+  Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
+  DCHECK(!masm->isolate()->heap()->InNewSpace(*interceptor));
+  Register scratch = name;
+  __ mov(scratch, Immediate(interceptor));
+  __ push(scratch);
+  __ push(receiver);
+  __ push(holder);
+}
+
+
+static void CompileCallLoadPropertyWithInterceptor(
+    MacroAssembler* masm, Register receiver, Register holder, Register name,
+    Handle<JSObject> holder_obj, IC::UtilityId id) {
+  PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
+  __ CallExternalReference(ExternalReference(IC_Utility(id), masm->isolate()),
+                           NamedLoadHandlerCompiler::kInterceptorArgsLength);
+}
+
+
+static void StoreIC_PushArgs(MacroAssembler* masm) {
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  Register name = StoreDescriptor::NameRegister();
+  Register value = StoreDescriptor::ValueRegister();
+
+  DCHECK(!ebx.is(receiver) && !ebx.is(name) && !ebx.is(value));
+
+  __ pop(ebx);
+  __ push(receiver);
+  __ push(name);
+  __ push(value);
+  __ push(ebx);
+}
+
+
+void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
+  // Return address is on the stack.
+  StoreIC_PushArgs(masm);
+
+  // Do tail-call to runtime routine.
+  ExternalReference ref(IC_Utility(IC::kStoreIC_Slow), masm->isolate());
+  __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
+  // Return address is on the stack.
+  StoreIC_PushArgs(masm);
+
+  // Do tail-call to runtime routine.
+  ExternalReference ref(IC_Utility(IC::kKeyedStoreIC_Slow), masm->isolate());
+  __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
+                                                    Handle<Name> name) {
+  if (!label->is_unused()) {
+    __ bind(label);
+    __ mov(this->name(), Immediate(name));
+  }
+}
+
+
+// Receiver_reg is preserved on jumps to miss_label, but may be destroyed if
+// store is successful.
+void NamedStoreHandlerCompiler::GenerateStoreTransition(
+    Handle<Map> transition, Handle<Name> name, Register receiver_reg,
+    Register storage_reg, Register value_reg, Register scratch1,
+    Register scratch2, Register unused, Label* miss_label, Label* slow) {
+  int descriptor = transition->LastAdded();
+  DescriptorArray* descriptors = transition->instance_descriptors();
+  PropertyDetails details = descriptors->GetDetails(descriptor);
+  Representation representation = details.representation();
+  DCHECK(!representation.IsNone());
+
+  if (details.type() == CONSTANT) {
+    Handle<Object> constant(descriptors->GetValue(descriptor), isolate());
+    __ CmpObject(value_reg, constant);
+    __ j(not_equal, miss_label);
+  } else if (representation.IsSmi()) {
+    __ JumpIfNotSmi(value_reg, miss_label);
+  } else if (representation.IsHeapObject()) {
+    __ JumpIfSmi(value_reg, miss_label);
+    HeapType* field_type = descriptors->GetFieldType(descriptor);
+    HeapType::Iterator<Map> it = field_type->Classes();
+    if (!it.Done()) {
+      Label do_store;
+      while (true) {
+        __ CompareMap(value_reg, it.Current());
+        it.Advance();
+        if (it.Done()) {
+          __ j(not_equal, miss_label);
+          break;
+        }
+        __ j(equal, &do_store, Label::kNear);
+      }
+      __ bind(&do_store);
+    }
+  } else if (representation.IsDouble()) {
+    Label do_store, heap_number;
+    __ AllocateHeapNumber(storage_reg, scratch1, scratch2, slow, MUTABLE);
+
+    __ JumpIfNotSmi(value_reg, &heap_number);
+    __ SmiUntag(value_reg);
+    __ push(value_reg);
+    __ fild_s(Operand(esp, 0));
+    __ pop(value_reg);
+    __ SmiTag(value_reg);
+    __ jmp(&do_store);
+
+    __ bind(&heap_number);
+    __ CheckMap(value_reg, isolate()->factory()->heap_number_map(), miss_label,
+                DONT_DO_SMI_CHECK);
+    __ fld_d(FieldOperand(value_reg, HeapNumber::kValueOffset));
+
+    __ bind(&do_store);
+    __ fstp_d(FieldOperand(storage_reg, HeapNumber::kValueOffset));
+  }
+
+  // Stub never generated for objects that require access checks.
+  DCHECK(!transition->is_access_check_needed());
+
+  // Perform map transition for the receiver if necessary.
+  if (details.type() == FIELD &&
+      Map::cast(transition->GetBackPointer())->unused_property_fields() == 0) {
+    // The properties must be extended before we can store the value.
+    // We jump to a runtime call that extends the properties array.
+    __ pop(scratch1);  // Return address.
+    __ push(receiver_reg);
+    __ push(Immediate(transition));
+    __ push(value_reg);
+    __ push(scratch1);
+    __ TailCallExternalReference(
+        ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
+                          isolate()),
+        3, 1);
+    return;
+  }
+
+  // Update the map of the object.
+  __ mov(scratch1, Immediate(transition));
+  __ mov(FieldOperand(receiver_reg, HeapObject::kMapOffset), scratch1);
+
+  // Update the write barrier for the map field.
+  __ RecordWriteField(receiver_reg, HeapObject::kMapOffset, scratch1, scratch2,
+                      kDontSaveFPRegs, OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+
+  if (details.type() == CONSTANT) {
+    DCHECK(value_reg.is(eax));
+    __ ret(0);
+    return;
+  }
+
+  int index = transition->instance_descriptors()->GetFieldIndex(
+      transition->LastAdded());
+
+  // Adjust for the number of properties stored in the object. Even in the
+  // face of a transition we can use the old map here because the size of the
+  // object and the number of in-object properties is not going to change.
+  index -= transition->inobject_properties();
+
+  SmiCheck smi_check =
+      representation.IsTagged() ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
+  // TODO(verwaest): Share this code as a code stub.
+  if (index < 0) {
+    // Set the property straight into the object.
+    int offset = transition->instance_size() + (index * kPointerSize);
+    if (representation.IsDouble()) {
+      __ mov(FieldOperand(receiver_reg, offset), storage_reg);
+    } else {
+      __ mov(FieldOperand(receiver_reg, offset), value_reg);
+    }
+
+    if (!representation.IsSmi()) {
+      // Update the write barrier for the array address.
+      if (!representation.IsDouble()) {
+        __ mov(storage_reg, value_reg);
+      }
+      __ RecordWriteField(receiver_reg, offset, storage_reg, scratch1,
+                          kDontSaveFPRegs, EMIT_REMEMBERED_SET, smi_check);
+    }
+  } else {
+    // Write to the properties array.
+    int offset = index * kPointerSize + FixedArray::kHeaderSize;
+    // Get the properties array (optimistically).
+    __ mov(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
+    if (representation.IsDouble()) {
+      __ mov(FieldOperand(scratch1, offset), storage_reg);
+    } else {
+      __ mov(FieldOperand(scratch1, offset), value_reg);
+    }
+
+    if (!representation.IsSmi()) {
+      // Update the write barrier for the array address.
+      if (!representation.IsDouble()) {
+        __ mov(storage_reg, value_reg);
+      }
+      __ RecordWriteField(scratch1, offset, storage_reg, receiver_reg,
+                          kDontSaveFPRegs, EMIT_REMEMBERED_SET, smi_check);
+    }
+  }
+
+  // Return the value (register eax).
+  DCHECK(value_reg.is(eax));
+  __ ret(0);
+}
+
+
+void NamedStoreHandlerCompiler::GenerateStoreField(LookupIterator* lookup,
+                                                   Register value_reg,
+                                                   Label* miss_label) {
+  DCHECK(lookup->representation().IsHeapObject());
+  __ JumpIfSmi(value_reg, miss_label);
+  HeapType::Iterator<Map> it = lookup->GetFieldType()->Classes();
+  Label do_store;
+  while (true) {
+    __ CompareMap(value_reg, it.Current());
+    it.Advance();
+    if (it.Done()) {
+      __ j(not_equal, miss_label);
+      break;
+    }
+    __ j(equal, &do_store, Label::kNear);
+  }
+  __ bind(&do_store);
+
+  StoreFieldStub stub(isolate(), lookup->GetFieldIndex(),
+                      lookup->representation());
+  GenerateTailCall(masm(), stub.GetCode());
+}
+
+
+Register PropertyHandlerCompiler::CheckPrototypes(
+    Register object_reg, Register holder_reg, Register scratch1,
+    Register scratch2, Handle<Name> name, Label* miss,
+    PrototypeCheckType check) {
+  Handle<Map> receiver_map(IC::TypeToMap(*type(), isolate()));
+
+  // Make sure there's no overlap between holder and object registers.
+  DCHECK(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
+  DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg) &&
+         !scratch2.is(scratch1));
+
+  // Keep track of the current object in register reg.
+  Register reg = object_reg;
+  int depth = 0;
+
+  Handle<JSObject> current = Handle<JSObject>::null();
+  if (type()->IsConstant())
+    current = Handle<JSObject>::cast(type()->AsConstant()->Value());
+  Handle<JSObject> prototype = Handle<JSObject>::null();
+  Handle<Map> current_map = receiver_map;
+  Handle<Map> holder_map(holder()->map());
+  // Traverse the prototype chain and check the maps in the prototype chain for
+  // fast and global objects or do negative lookup for normal objects.
+  while (!current_map.is_identical_to(holder_map)) {
+    ++depth;
+
+    // Only global objects and objects that do not require access
+    // checks are allowed in stubs.
+    DCHECK(current_map->IsJSGlobalProxyMap() ||
+           !current_map->is_access_check_needed());
+
+    prototype = handle(JSObject::cast(current_map->prototype()));
+    if (current_map->is_dictionary_map() &&
+        !current_map->IsJSGlobalObjectMap()) {
+      DCHECK(!current_map->IsJSGlobalProxyMap());  // Proxy maps are fast.
+      if (!name->IsUniqueName()) {
+        DCHECK(name->IsString());
+        name = factory()->InternalizeString(Handle<String>::cast(name));
+      }
+      DCHECK(current.is_null() ||
+             current->property_dictionary()->FindEntry(name) ==
+                 NameDictionary::kNotFound);
+
+      GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
+                                       scratch2);
+
+      __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
+      reg = holder_reg;  // From now on the object will be in holder_reg.
+      __ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
+    } else {
+      bool in_new_space = heap()->InNewSpace(*prototype);
+      // Two possible reasons for loading the prototype from the map:
+      // (1) Can't store references to new space in code.
+      // (2) Handler is shared for all receivers with the same prototype
+      //     map (but not necessarily the same prototype instance).
+      bool load_prototype_from_map = in_new_space || depth == 1;
+      if (depth != 1 || check == CHECK_ALL_MAPS) {
+        __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK);
+      }
+
+      // Check access rights to the global object.  This has to happen after
+      // the map check so that we know that the object is actually a global
+      // object.
+      // This allows us to install generated handlers for accesses to the
+      // global proxy (as opposed to using slow ICs). See corresponding code
+      // in LookupForRead().
+      if (current_map->IsJSGlobalProxyMap()) {
+        __ CheckAccessGlobalProxy(reg, scratch1, scratch2, miss);
+      } else if (current_map->IsJSGlobalObjectMap()) {
+        GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
+                                  name, scratch2, miss);
+      }
+
+      if (load_prototype_from_map) {
+        // Save the map in scratch1 for later.
+        __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
+      }
+
+      reg = holder_reg;  // From now on the object will be in holder_reg.
+
+      if (load_prototype_from_map) {
+        __ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
+      } else {
+        __ mov(reg, prototype);
+      }
+    }
+
+    // Go to the next object in the prototype chain.
+    current = prototype;
+    current_map = handle(current->map());
+  }
+
+  // Log the check depth.
+  LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
+
+  if (depth != 0 || check == CHECK_ALL_MAPS) {
+    // Check the holder map.
+    __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK);
+  }
+
+  // Perform security check for access to the global object.
+  DCHECK(current_map->IsJSGlobalProxyMap() ||
+         !current_map->is_access_check_needed());
+  if (current_map->IsJSGlobalProxyMap()) {
+    __ CheckAccessGlobalProxy(reg, scratch1, scratch2, miss);
+  }
+
+  // Return the register containing the holder.
+  return reg;
+}
+
+
+void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
+  if (!miss->is_unused()) {
+    Label success;
+    __ jmp(&success);
+    __ bind(miss);
+    TailCallBuiltin(masm(), MissBuiltin(kind()));
+    __ bind(&success);
+  }
+}
+
+
+void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
+  if (!miss->is_unused()) {
+    Label success;
+    __ jmp(&success);
+    GenerateRestoreName(miss, name);
+    TailCallBuiltin(masm(), MissBuiltin(kind()));
+    __ bind(&success);
+  }
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadCallback(
+    Register reg, Handle<ExecutableAccessorInfo> callback) {
+  // Insert additional parameters into the stack frame above return address.
+  DCHECK(!scratch3().is(reg));
+  __ pop(scratch3());  // Get return address to place it below.
+
+  STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
+  STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
+  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
+  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
+  STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
+  STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
+  __ push(receiver());  // receiver
+  // Push data from ExecutableAccessorInfo.
+  if (isolate()->heap()->InNewSpace(callback->data())) {
+    DCHECK(!scratch2().is(reg));
+    __ mov(scratch2(), Immediate(callback));
+    __ push(FieldOperand(scratch2(), ExecutableAccessorInfo::kDataOffset));
+  } else {
+    __ push(Immediate(Handle<Object>(callback->data(), isolate())));
+  }
+  __ push(Immediate(isolate()->factory()->undefined_value()));  // ReturnValue
+  // ReturnValue default value
+  __ push(Immediate(isolate()->factory()->undefined_value()));
+  __ push(Immediate(reinterpret_cast<int>(isolate())));
+  __ push(reg);  // holder
+
+  // Save a pointer to where we pushed the arguments. This will be
+  // passed as the const PropertyAccessorInfo& to the C++ callback.
+  __ push(esp);
+
+  __ push(name());  // name
+
+  __ push(scratch3());  // Restore return address.
+
+  // Abi for CallApiGetter
+  Register getter_address = ApiGetterDescriptor::function_address();
+  Address function_address = v8::ToCData<Address>(callback->getter());
+  __ mov(getter_address, Immediate(function_address));
+
+  CallApiGetterStub stub(isolate());
+  __ TailCallStub(&stub);
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
+  // Return the constant value.
+  __ LoadObject(eax, value);
+  __ ret(0);
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
+    LookupIterator* it, Register holder_reg) {
+  DCHECK(holder()->HasNamedInterceptor());
+  DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+
+  // Compile the interceptor call, followed by inline code to load the
+  // property from further up the prototype chain if the call fails.
+  // Check that the maps haven't changed.
+  DCHECK(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
+
+  // Preserve the receiver register explicitly whenever it is different from the
+  // holder and it is needed should the interceptor return without any result.
+  // The ACCESSOR case needs the receiver to be passed into C++ code, the FIELD
+  // case might cause a miss during the prototype check.
+  bool must_perform_prototype_check =
+      !holder().is_identical_to(it->GetHolder<JSObject>());
+  bool must_preserve_receiver_reg =
+      !receiver().is(holder_reg) &&
+      (it->state() == LookupIterator::ACCESSOR || must_perform_prototype_check);
+
+  // Save necessary data before invoking an interceptor.
+  // Requires a frame to make GC aware of pushed pointers.
+  {
+    FrameScope frame_scope(masm(), StackFrame::INTERNAL);
+
+    if (must_preserve_receiver_reg) {
+      __ push(receiver());
+    }
+    __ push(holder_reg);
+    __ push(this->name());
+
+    // Invoke an interceptor.  Note: map checks from receiver to
+    // interceptor's holder has been compiled before (see a caller
+    // of this method.)
+    CompileCallLoadPropertyWithInterceptor(
+        masm(), receiver(), holder_reg, this->name(), holder(),
+        IC::kLoadPropertyWithInterceptorOnly);
+
+    // Check if interceptor provided a value for property.  If it's
+    // the case, return immediately.
+    Label interceptor_failed;
+    __ cmp(eax, factory()->no_interceptor_result_sentinel());
+    __ j(equal, &interceptor_failed);
+    frame_scope.GenerateLeaveFrame();
+    __ ret(0);
+
+    // Clobber registers when generating debug-code to provoke errors.
+    __ bind(&interceptor_failed);
+    if (FLAG_debug_code) {
+      __ mov(receiver(), Immediate(bit_cast<int32_t>(kZapValue)));
+      __ mov(holder_reg, Immediate(bit_cast<int32_t>(kZapValue)));
+      __ mov(this->name(), Immediate(bit_cast<int32_t>(kZapValue)));
+    }
+
+    __ pop(this->name());
+    __ pop(holder_reg);
+    if (must_preserve_receiver_reg) {
+      __ pop(receiver());
+    }
+
+    // Leave the internal frame.
+  }
+
+  GenerateLoadPostInterceptor(it, holder_reg);
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
+  DCHECK(holder()->HasNamedInterceptor());
+  DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+  // Call the runtime system to load the interceptor.
+  __ pop(scratch2());  // save old return address
+  PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
+                           holder());
+  __ push(scratch2());  // restore old return address
+
+  ExternalReference ref = ExternalReference(
+      IC_Utility(IC::kLoadPropertyWithInterceptor), isolate());
+  __ TailCallExternalReference(
+      ref, NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
+}
+
+
+Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
+    Handle<JSObject> object, Handle<Name> name,
+    Handle<ExecutableAccessorInfo> callback) {
+  Register holder_reg = Frontend(receiver(), name);
+
+  __ pop(scratch1());  // remove the return address
+  __ push(receiver());
+  __ push(holder_reg);
+  __ Push(callback);
+  __ Push(name);
+  __ push(value());
+  __ push(scratch1());  // restore return address
+
+  // Do tail-call to the runtime system.
+  ExternalReference store_callback_property =
+      ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
+  __ TailCallExternalReference(store_callback_property, 5, 1);
+
+  // Return the generated code.
+  return GetCode(kind(), Code::FAST, name);
+}
+
+
+Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
+    Handle<Name> name) {
+  __ pop(scratch1());  // remove the return address
+  __ push(receiver());
+  __ push(this->name());
+  __ push(value());
+  __ push(scratch1());  // restore return address
+
+  // Do tail-call to the runtime system.
+  ExternalReference store_ic_property = ExternalReference(
+      IC_Utility(IC::kStorePropertyWithInterceptor), isolate());
+  __ TailCallExternalReference(store_ic_property, 3, 1);
+
+  // Return the generated code.
+  return GetCode(kind(), Code::FAST, name);
+}
+
+
+Register NamedStoreHandlerCompiler::value() {
+  return StoreDescriptor::ValueRegister();
+}
+
+
+Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
+    Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) {
+  Label miss;
+
+  FrontendHeader(receiver(), name, &miss);
+  // Get the value from the cell.
+  Register result = StoreDescriptor::ValueRegister();
+  if (masm()->serializer_enabled()) {
+    __ mov(result, Immediate(cell));
+    __ mov(result, FieldOperand(result, PropertyCell::kValueOffset));
+  } else {
+    __ mov(result, Operand::ForCell(cell));
+  }
+
+  // Check for deleted property if property can actually be deleted.
+  if (is_configurable) {
+    __ cmp(result, factory()->the_hole_value());
+    __ j(equal, &miss);
+  } else if (FLAG_debug_code) {
+    __ cmp(result, factory()->the_hole_value());
+    __ Check(not_equal, kDontDeleteCellsCannotContainTheHole);
+  }
+
+  Counters* counters = isolate()->counters();
+  __ IncrementCounter(counters->named_load_global_stub(), 1);
+  // The code above already loads the result into the return register.
+  __ ret(0);
+
+  FrontendFooter(name, &miss);
+
+  // Return the generated code.
+  return GetCode(kind(), Code::NORMAL, name);
+}
+
+
+#undef __
+}
+}  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_X87
diff --git a/src/ic/x87/ic-compiler-x87.cc b/src/ic/x87/ic-compiler-x87.cc
new file mode 100644
index 0000000..20b47e7
--- /dev/null
+++ b/src/ic/x87/ic-compiler-x87.cc
@@ -0,0 +1,128 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_X87
+
+#include "src/ic/ic.h"
+#include "src/ic/ic-compiler.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+void PropertyICCompiler::GenerateRuntimeSetProperty(MacroAssembler* masm,
+                                                    StrictMode strict_mode) {
+  // Return address is on the stack.
+  DCHECK(!ebx.is(StoreDescriptor::ReceiverRegister()) &&
+         !ebx.is(StoreDescriptor::NameRegister()) &&
+         !ebx.is(StoreDescriptor::ValueRegister()));
+  __ pop(ebx);
+  __ push(StoreDescriptor::ReceiverRegister());
+  __ push(StoreDescriptor::NameRegister());
+  __ push(StoreDescriptor::ValueRegister());
+  __ push(Immediate(Smi::FromInt(strict_mode)));
+  __ push(ebx);  // return address
+
+  // Do tail-call to runtime routine.
+  __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
+                                                    CodeHandleList* handlers,
+                                                    Handle<Name> name,
+                                                    Code::StubType type,
+                                                    IcCheckType check) {
+  Label miss;
+
+  if (check == PROPERTY &&
+      (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
+    // In case we are compiling an IC for dictionary loads and stores, just
+    // check whether the name is unique.
+    if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
+      Register tmp = scratch1();
+      __ JumpIfSmi(this->name(), &miss);
+      __ mov(tmp, FieldOperand(this->name(), HeapObject::kMapOffset));
+      __ movzx_b(tmp, FieldOperand(tmp, Map::kInstanceTypeOffset));
+      __ JumpIfNotUniqueNameInstanceType(tmp, &miss);
+    } else {
+      __ cmp(this->name(), Immediate(name));
+      __ j(not_equal, &miss);
+    }
+  }
+
+  Label number_case;
+  Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
+  __ JumpIfSmi(receiver(), smi_target);
+
+  // Polymorphic keyed stores may use the map register
+  Register map_reg = scratch1();
+  DCHECK(kind() != Code::KEYED_STORE_IC ||
+         map_reg.is(ElementTransitionAndStoreDescriptor::MapRegister()));
+  __ mov(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
+  int receiver_count = types->length();
+  int number_of_handled_maps = 0;
+  for (int current = 0; current < receiver_count; ++current) {
+    Handle<HeapType> type = types->at(current);
+    Handle<Map> map = IC::TypeToMap(*type, isolate());
+    if (!map->is_deprecated()) {
+      number_of_handled_maps++;
+      __ cmp(map_reg, map);
+      if (type->Is(HeapType::Number())) {
+        DCHECK(!number_case.is_unused());
+        __ bind(&number_case);
+      }
+      __ j(equal, handlers->at(current));
+    }
+  }
+  DCHECK(number_of_handled_maps != 0);
+
+  __ bind(&miss);
+  TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+  // Return the generated code.
+  InlineCacheState state =
+      number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
+  return GetCode(kind(), type, name, state);
+}
+
+
+Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
+    MapHandleList* receiver_maps, CodeHandleList* handler_stubs,
+    MapHandleList* transitioned_maps) {
+  Label miss;
+  __ JumpIfSmi(receiver(), &miss, Label::kNear);
+  __ mov(scratch1(), FieldOperand(receiver(), HeapObject::kMapOffset));
+  for (int i = 0; i < receiver_maps->length(); ++i) {
+    __ cmp(scratch1(), receiver_maps->at(i));
+    if (transitioned_maps->at(i).is_null()) {
+      __ j(equal, handler_stubs->at(i));
+    } else {
+      Label next_map;
+      __ j(not_equal, &next_map, Label::kNear);
+      __ mov(transition_map(), Immediate(transitioned_maps->at(i)));
+      __ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET);
+      __ bind(&next_map);
+    }
+  }
+  __ bind(&miss);
+  TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+  // Return the generated code.
+  return GetCode(kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
+}
+
+
+#undef __
+}
+}  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_X87
diff --git a/src/ic/x87/ic-x87.cc b/src/ic/x87/ic-x87.cc
new file mode 100644
index 0000000..9c090c5
--- /dev/null
+++ b/src/ic/x87/ic-x87.cc
@@ -0,0 +1,986 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_X87
+
+#include "src/codegen.h"
+#include "src/ic/ic.h"
+#include "src/ic/ic-compiler.h"
+#include "src/ic/stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+// ----------------------------------------------------------------------------
+// Static IC stub generators.
+//
+
+#define __ ACCESS_MASM(masm)
+
+
+static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
+                                            Label* global_object) {
+  // Register usage:
+  //   type: holds the receiver instance type on entry.
+  __ cmp(type, JS_GLOBAL_OBJECT_TYPE);
+  __ j(equal, global_object);
+  __ cmp(type, JS_BUILTINS_OBJECT_TYPE);
+  __ j(equal, global_object);
+  __ cmp(type, JS_GLOBAL_PROXY_TYPE);
+  __ j(equal, global_object);
+}
+
+
+// Helper function used to load a property from a dictionary backing
+// storage. This function may fail to load a property even though it is
+// in the dictionary, so code at miss_label must always call a backup
+// property load that is complete. This function is safe to call if
+// name is not internalized, and will jump to the miss_label in that
+// case. The generated code assumes that the receiver has slow
+// properties, is not a global object and does not have interceptors.
+static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
+                                   Register elements, Register name,
+                                   Register r0, Register r1, Register result) {
+  // Register use:
+  //
+  // elements - holds the property dictionary on entry and is unchanged.
+  //
+  // name - holds the name of the property on entry and is unchanged.
+  //
+  // Scratch registers:
+  //
+  // r0   - used for the index into the property dictionary
+  //
+  // r1   - used to hold the capacity of the property dictionary.
+  //
+  // result - holds the result on exit.
+
+  Label done;
+
+  // Probe the dictionary.
+  NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss_label, &done,
+                                                   elements, name, r0, r1);
+
+  // If probing finds an entry in the dictionary, r0 contains the
+  // index into the dictionary. Check that the value is a normal
+  // property.
+  __ bind(&done);
+  const int kElementsStartOffset =
+      NameDictionary::kHeaderSize +
+      NameDictionary::kElementsStartIndex * kPointerSize;
+  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+  __ test(Operand(elements, r0, times_4, kDetailsOffset - kHeapObjectTag),
+          Immediate(PropertyDetails::TypeField::kMask << kSmiTagSize));
+  __ j(not_zero, miss_label);
+
+  // Get the value at the masked, scaled index.
+  const int kValueOffset = kElementsStartOffset + kPointerSize;
+  __ mov(result, Operand(elements, r0, times_4, kValueOffset - kHeapObjectTag));
+}
+
+
+// Helper function used to store a property to a dictionary backing
+// storage. This function may fail to store a property eventhough it
+// is in the dictionary, so code at miss_label must always call a
+// backup property store that is complete. This function is safe to
+// call if name is not internalized, and will jump to the miss_label in
+// that case. The generated code assumes that the receiver has slow
+// properties, is not a global object and does not have interceptors.
+static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss_label,
+                                    Register elements, Register name,
+                                    Register value, Register r0, Register r1) {
+  // Register use:
+  //
+  // elements - holds the property dictionary on entry and is clobbered.
+  //
+  // name - holds the name of the property on entry and is unchanged.
+  //
+  // value - holds the value to store and is unchanged.
+  //
+  // r0 - used for index into the property dictionary and is clobbered.
+  //
+  // r1 - used to hold the capacity of the property dictionary and is clobbered.
+  Label done;
+
+
+  // Probe the dictionary.
+  NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss_label, &done,
+                                                   elements, name, r0, r1);
+
+  // If probing finds an entry in the dictionary, r0 contains the
+  // index into the dictionary. Check that the value is a normal
+  // property that is not read only.
+  __ bind(&done);
+  const int kElementsStartOffset =
+      NameDictionary::kHeaderSize +
+      NameDictionary::kElementsStartIndex * kPointerSize;
+  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+  const int kTypeAndReadOnlyMask =
+      (PropertyDetails::TypeField::kMask |
+       PropertyDetails::AttributesField::encode(READ_ONLY))
+      << kSmiTagSize;
+  __ test(Operand(elements, r0, times_4, kDetailsOffset - kHeapObjectTag),
+          Immediate(kTypeAndReadOnlyMask));
+  __ j(not_zero, miss_label);
+
+  // Store the value at the masked, scaled index.
+  const int kValueOffset = kElementsStartOffset + kPointerSize;
+  __ lea(r0, Operand(elements, r0, times_4, kValueOffset - kHeapObjectTag));
+  __ mov(Operand(r0, 0), value);
+
+  // Update write barrier. Make sure not to clobber the value.
+  __ mov(r1, value);
+  __ RecordWrite(elements, r0, r1, kDontSaveFPRegs);
+}
+
+
+// Checks the receiver for special cases (value type, slow case bits).
+// Falls through for regular JS object.
+static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
+                                           Register receiver, Register map,
+                                           int interceptor_bit, Label* slow) {
+  // Register use:
+  //   receiver - holds the receiver and is unchanged.
+  // Scratch registers:
+  //   map - used to hold the map of the receiver.
+
+  // Check that the object isn't a smi.
+  __ JumpIfSmi(receiver, slow);
+
+  // Get the map of the receiver.
+  __ mov(map, FieldOperand(receiver, HeapObject::kMapOffset));
+
+  // Check bit field.
+  __ test_b(FieldOperand(map, Map::kBitFieldOffset),
+            (1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit));
+  __ j(not_zero, slow);
+  // Check that the object is some kind of JS object EXCEPT JS Value type.
+  // In the case that the object is a value-wrapper object,
+  // we enter the runtime system to make sure that indexing
+  // into string objects works as intended.
+  DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
+
+  __ CmpInstanceType(map, JS_OBJECT_TYPE);
+  __ j(below, slow);
+}
+
+
+// Loads an indexed element from a fast case array.
+// If not_fast_array is NULL, doesn't perform the elements map check.
+static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
+                                  Register key, Register scratch,
+                                  Register result, Label* not_fast_array,
+                                  Label* out_of_range) {
+  // Register use:
+  //   receiver - holds the receiver and is unchanged.
+  //   key - holds the key and is unchanged (must be a smi).
+  // Scratch registers:
+  //   scratch - used to hold elements of the receiver and the loaded value.
+  //   result - holds the result on exit if the load succeeds and
+  //            we fall through.
+
+  __ mov(scratch, FieldOperand(receiver, JSObject::kElementsOffset));
+  if (not_fast_array != NULL) {
+    // Check that the object is in fast mode and writable.
+    __ CheckMap(scratch,
+                masm->isolate()->factory()->fixed_array_map(),
+                not_fast_array,
+                DONT_DO_SMI_CHECK);
+  } else {
+    __ AssertFastElements(scratch);
+  }
+  // Check that the key (index) is within bounds.
+  __ cmp(key, FieldOperand(scratch, FixedArray::kLengthOffset));
+  __ j(above_equal, out_of_range);
+  // Fast case: Do the load.
+  STATIC_ASSERT((kPointerSize == 4) && (kSmiTagSize == 1) && (kSmiTag == 0));
+  __ mov(scratch, FieldOperand(scratch, key, times_2, FixedArray::kHeaderSize));
+  __ cmp(scratch, Immediate(masm->isolate()->factory()->the_hole_value()));
+  // In case the loaded value is the_hole we have to consult GetProperty
+  // to ensure the prototype chain is searched.
+  __ j(equal, out_of_range);
+  if (!result.is(scratch)) {
+    __ mov(result, scratch);
+  }
+}
+
+
+// Checks whether a key is an array index string or a unique name.
+// Falls through if the key is a unique name.
+static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
+                                 Register map, Register hash,
+                                 Label* index_string, Label* not_unique) {
+  // Register use:
+  //   key - holds the key and is unchanged. Assumed to be non-smi.
+  // Scratch registers:
+  //   map - used to hold the map of the key.
+  //   hash - used to hold the hash of the key.
+  Label unique;
+  __ CmpObjectType(key, LAST_UNIQUE_NAME_TYPE, map);
+  __ j(above, not_unique);
+  STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
+  __ j(equal, &unique);
+
+  // Is the string an array index, with cached numeric value?
+  __ mov(hash, FieldOperand(key, Name::kHashFieldOffset));
+  __ test(hash, Immediate(Name::kContainsCachedArrayIndexMask));
+  __ j(zero, index_string);
+
+  // Is the string internalized? We already know it's a string so a single
+  // bit test is enough.
+  STATIC_ASSERT(kNotInternalizedTag != 0);
+  __ test_b(FieldOperand(map, Map::kInstanceTypeOffset),
+            kIsNotInternalizedMask);
+  __ j(not_zero, not_unique);
+
+  __ bind(&unique);
+}
+
+
+static Operand GenerateMappedArgumentsLookup(
+    MacroAssembler* masm, Register object, Register key, Register scratch1,
+    Register scratch2, Label* unmapped_case, Label* slow_case) {
+  Heap* heap = masm->isolate()->heap();
+  Factory* factory = masm->isolate()->factory();
+
+  // Check that the receiver is a JSObject. Because of the elements
+  // map check later, we do not need to check for interceptors or
+  // whether it requires access checks.
+  __ JumpIfSmi(object, slow_case);
+  // Check that the object is some kind of JSObject.
+  __ CmpObjectType(object, FIRST_JS_RECEIVER_TYPE, scratch1);
+  __ j(below, slow_case);
+
+  // Check that the key is a positive smi.
+  __ test(key, Immediate(0x80000001));
+  __ j(not_zero, slow_case);
+
+  // Load the elements into scratch1 and check its map.
+  Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
+  __ mov(scratch1, FieldOperand(object, JSObject::kElementsOffset));
+  __ CheckMap(scratch1, arguments_map, slow_case, DONT_DO_SMI_CHECK);
+
+  // Check if element is in the range of mapped arguments. If not, jump
+  // to the unmapped lookup with the parameter map in scratch1.
+  __ mov(scratch2, FieldOperand(scratch1, FixedArray::kLengthOffset));
+  __ sub(scratch2, Immediate(Smi::FromInt(2)));
+  __ cmp(key, scratch2);
+  __ j(above_equal, unmapped_case);
+
+  // Load element index and check whether it is the hole.
+  const int kHeaderSize = FixedArray::kHeaderSize + 2 * kPointerSize;
+  __ mov(scratch2,
+         FieldOperand(scratch1, key, times_half_pointer_size, kHeaderSize));
+  __ cmp(scratch2, factory->the_hole_value());
+  __ j(equal, unmapped_case);
+
+  // Load value from context and return it. We can reuse scratch1 because
+  // we do not jump to the unmapped lookup (which requires the parameter
+  // map in scratch1).
+  const int kContextOffset = FixedArray::kHeaderSize;
+  __ mov(scratch1, FieldOperand(scratch1, kContextOffset));
+  return FieldOperand(scratch1, scratch2, times_half_pointer_size,
+                      Context::kHeaderSize);
+}
+
+
+static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
+                                               Register key,
+                                               Register parameter_map,
+                                               Register scratch,
+                                               Label* slow_case) {
+  // Element is in arguments backing store, which is referenced by the
+  // second element of the parameter_map.
+  const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
+  Register backing_store = parameter_map;
+  __ mov(backing_store, FieldOperand(parameter_map, kBackingStoreOffset));
+  Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
+  __ CheckMap(backing_store, fixed_array_map, slow_case, DONT_DO_SMI_CHECK);
+  __ mov(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset));
+  __ cmp(key, scratch);
+  __ j(greater_equal, slow_case);
+  return FieldOperand(backing_store, key, times_half_pointer_size,
+                      FixedArray::kHeaderSize);
+}
+
+
+void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
+  // The return address is on the stack.
+  Label slow, check_name, index_smi, index_name, property_array_property;
+  Label probe_dictionary, check_number_dictionary;
+
+  Register receiver = LoadDescriptor::ReceiverRegister();
+  Register key = LoadDescriptor::NameRegister();
+  DCHECK(receiver.is(edx));
+  DCHECK(key.is(ecx));
+
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(key, &check_name);
+  __ bind(&index_smi);
+  // Now the key is known to be a smi. This place is also jumped to from
+  // where a numeric string is converted to a smi.
+
+  GenerateKeyedLoadReceiverCheck(masm, receiver, eax,
+                                 Map::kHasIndexedInterceptor, &slow);
+
+  // Check the receiver's map to see if it has fast elements.
+  __ CheckFastElements(eax, &check_number_dictionary);
+
+  GenerateFastArrayLoad(masm, receiver, key, eax, eax, NULL, &slow);
+  Isolate* isolate = masm->isolate();
+  Counters* counters = isolate->counters();
+  __ IncrementCounter(counters->keyed_load_generic_smi(), 1);
+  __ ret(0);
+
+  __ bind(&check_number_dictionary);
+  __ mov(ebx, key);
+  __ SmiUntag(ebx);
+  __ mov(eax, FieldOperand(receiver, JSObject::kElementsOffset));
+
+  // Check whether the elements is a number dictionary.
+  // ebx: untagged index
+  // eax: elements
+  __ CheckMap(eax, isolate->factory()->hash_table_map(), &slow,
+              DONT_DO_SMI_CHECK);
+  Label slow_pop_receiver;
+  // Push receiver on the stack to free up a register for the dictionary
+  // probing.
+  __ push(receiver);
+  __ LoadFromNumberDictionary(&slow_pop_receiver, eax, key, ebx, edx, edi, eax);
+  // Pop receiver before returning.
+  __ pop(receiver);
+  __ ret(0);
+
+  __ bind(&slow_pop_receiver);
+  // Pop the receiver from the stack and jump to runtime.
+  __ pop(receiver);
+
+  __ bind(&slow);
+  // Slow case: jump to runtime.
+  __ IncrementCounter(counters->keyed_load_generic_slow(), 1);
+  GenerateRuntimeGetProperty(masm);
+
+  __ bind(&check_name);
+  GenerateKeyNameCheck(masm, key, eax, ebx, &index_name, &slow);
+
+  GenerateKeyedLoadReceiverCheck(masm, receiver, eax, Map::kHasNamedInterceptor,
+                                 &slow);
+
+  // If the receiver is a fast-case object, check the keyed lookup
+  // cache. Otherwise probe the dictionary.
+  __ mov(ebx, FieldOperand(receiver, JSObject::kPropertiesOffset));
+  __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
+         Immediate(isolate->factory()->hash_table_map()));
+  __ j(equal, &probe_dictionary);
+
+  // The receiver's map is still in eax, compute the keyed lookup cache hash
+  // based on 32 bits of the map pointer and the string hash.
+  if (FLAG_debug_code) {
+    __ cmp(eax, FieldOperand(receiver, HeapObject::kMapOffset));
+    __ Check(equal, kMapIsNoLongerInEax);
+  }
+  __ mov(ebx, eax);  // Keep the map around for later.
+  __ shr(eax, KeyedLookupCache::kMapHashShift);
+  __ mov(edi, FieldOperand(key, String::kHashFieldOffset));
+  __ shr(edi, String::kHashShift);
+  __ xor_(eax, edi);
+  __ and_(eax, KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask);
+
+  // Load the key (consisting of map and internalized string) from the cache and
+  // check for match.
+  Label load_in_object_property;
+  static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
+  Label hit_on_nth_entry[kEntriesPerBucket];
+  ExternalReference cache_keys =
+      ExternalReference::keyed_lookup_cache_keys(masm->isolate());
+
+  for (int i = 0; i < kEntriesPerBucket - 1; i++) {
+    Label try_next_entry;
+    __ mov(edi, eax);
+    __ shl(edi, kPointerSizeLog2 + 1);
+    if (i != 0) {
+      __ add(edi, Immediate(kPointerSize * i * 2));
+    }
+    __ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys));
+    __ j(not_equal, &try_next_entry);
+    __ add(edi, Immediate(kPointerSize));
+    __ cmp(key, Operand::StaticArray(edi, times_1, cache_keys));
+    __ j(equal, &hit_on_nth_entry[i]);
+    __ bind(&try_next_entry);
+  }
+
+  __ lea(edi, Operand(eax, 1));
+  __ shl(edi, kPointerSizeLog2 + 1);
+  __ add(edi, Immediate(kPointerSize * (kEntriesPerBucket - 1) * 2));
+  __ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys));
+  __ j(not_equal, &slow);
+  __ add(edi, Immediate(kPointerSize));
+  __ cmp(key, Operand::StaticArray(edi, times_1, cache_keys));
+  __ j(not_equal, &slow);
+
+  // Get field offset.
+  // ebx      : receiver's map
+  // eax      : lookup cache index
+  ExternalReference cache_field_offsets =
+      ExternalReference::keyed_lookup_cache_field_offsets(masm->isolate());
+
+  // Hit on nth entry.
+  for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
+    __ bind(&hit_on_nth_entry[i]);
+    if (i != 0) {
+      __ add(eax, Immediate(i));
+    }
+    __ mov(edi,
+           Operand::StaticArray(eax, times_pointer_size, cache_field_offsets));
+    __ movzx_b(eax, FieldOperand(ebx, Map::kInObjectPropertiesOffset));
+    __ sub(edi, eax);
+    __ j(above_equal, &property_array_property);
+    if (i != 0) {
+      __ jmp(&load_in_object_property);
+    }
+  }
+
+  // Load in-object property.
+  __ bind(&load_in_object_property);
+  __ movzx_b(eax, FieldOperand(ebx, Map::kInstanceSizeOffset));
+  __ add(eax, edi);
+  __ mov(eax, FieldOperand(receiver, eax, times_pointer_size, 0));
+  __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
+  __ ret(0);
+
+  // Load property array property.
+  __ bind(&property_array_property);
+  __ mov(eax, FieldOperand(receiver, JSObject::kPropertiesOffset));
+  __ mov(eax,
+         FieldOperand(eax, edi, times_pointer_size, FixedArray::kHeaderSize));
+  __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
+  __ ret(0);
+
+  // Do a quick inline probe of the receiver's dictionary, if it
+  // exists.
+  __ bind(&probe_dictionary);
+
+  __ mov(eax, FieldOperand(receiver, JSObject::kMapOffset));
+  __ movzx_b(eax, FieldOperand(eax, Map::kInstanceTypeOffset));
+  GenerateGlobalInstanceTypeCheck(masm, eax, &slow);
+
+  GenerateDictionaryLoad(masm, &slow, ebx, key, eax, edi, eax);
+  __ IncrementCounter(counters->keyed_load_generic_symbol(), 1);
+  __ ret(0);
+
+  __ bind(&index_name);
+  __ IndexFromHash(ebx, key);
+  // Now jump to the place where smi keys are handled.
+  __ jmp(&index_smi);
+}
+
+
+void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
+  // Return address is on the stack.
+  Label miss;
+
+  Register receiver = LoadDescriptor::ReceiverRegister();
+  Register index = LoadDescriptor::NameRegister();
+  Register scratch = ebx;
+  DCHECK(!scratch.is(receiver) && !scratch.is(index));
+  Register result = eax;
+  DCHECK(!result.is(scratch));
+
+  StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
+                                          &miss,  // When not a string.
+                                          &miss,  // When not a number.
+                                          &miss,  // When index out of range.
+                                          STRING_INDEX_IS_ARRAY_INDEX);
+  char_at_generator.GenerateFast(masm);
+  __ ret(0);
+
+  StubRuntimeCallHelper call_helper;
+  char_at_generator.GenerateSlow(masm, call_helper);
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+
+void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
+  // Return address is on the stack.
+  Label slow, notin;
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  Register name = StoreDescriptor::NameRegister();
+  Register value = StoreDescriptor::ValueRegister();
+  DCHECK(receiver.is(edx));
+  DCHECK(name.is(ecx));
+  DCHECK(value.is(eax));
+
+  Operand mapped_location = GenerateMappedArgumentsLookup(
+      masm, receiver, name, ebx, edi, &notin, &slow);
+  __ mov(mapped_location, value);
+  __ lea(ecx, mapped_location);
+  __ mov(edx, value);
+  __ RecordWrite(ebx, ecx, edx, kDontSaveFPRegs);
+  __ Ret();
+  __ bind(&notin);
+  // The unmapped lookup expects that the parameter map is in ebx.
+  Operand unmapped_location =
+      GenerateUnmappedArgumentsLookup(masm, name, ebx, edi, &slow);
+  __ mov(unmapped_location, value);
+  __ lea(edi, unmapped_location);
+  __ mov(edx, value);
+  __ RecordWrite(ebx, edi, edx, kDontSaveFPRegs);
+  __ Ret();
+  __ bind(&slow);
+  GenerateMiss(masm);
+}
+
+
+static void KeyedStoreGenerateGenericHelper(
+    MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
+    KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length) {
+  Label transition_smi_elements;
+  Label finish_object_store, non_double_value, transition_double_elements;
+  Label fast_double_without_map_check;
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  Register key = StoreDescriptor::NameRegister();
+  Register value = StoreDescriptor::ValueRegister();
+  DCHECK(receiver.is(edx));
+  DCHECK(key.is(ecx));
+  DCHECK(value.is(eax));
+  // key is a smi.
+  // ebx: FixedArray receiver->elements
+  // edi: receiver map
+  // Fast case: Do the store, could either Object or double.
+  __ bind(fast_object);
+  if (check_map == kCheckMap) {
+    __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
+    __ cmp(edi, masm->isolate()->factory()->fixed_array_map());
+    __ j(not_equal, fast_double);
+  }
+
+  // HOLECHECK: guards "A[i] = V"
+  // We have to go to the runtime if the current value is the hole because
+  // there may be a callback on the element
+  Label holecheck_passed1;
+  __ cmp(FixedArrayElementOperand(ebx, key),
+         masm->isolate()->factory()->the_hole_value());
+  __ j(not_equal, &holecheck_passed1);
+  __ JumpIfDictionaryInPrototypeChain(receiver, ebx, edi, slow);
+  __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
+
+  __ bind(&holecheck_passed1);
+
+  // Smi stores don't require further checks.
+  Label non_smi_value;
+  __ JumpIfNotSmi(value, &non_smi_value);
+  if (increment_length == kIncrementLength) {
+    // Add 1 to receiver->length.
+    __ add(FieldOperand(receiver, JSArray::kLengthOffset),
+           Immediate(Smi::FromInt(1)));
+  }
+  // It's irrelevant whether array is smi-only or not when writing a smi.
+  __ mov(FixedArrayElementOperand(ebx, key), value);
+  __ ret(0);
+
+  __ bind(&non_smi_value);
+  // Escape to elements kind transition case.
+  __ mov(edi, FieldOperand(receiver, HeapObject::kMapOffset));
+  __ CheckFastObjectElements(edi, &transition_smi_elements);
+
+  // Fast elements array, store the value to the elements backing store.
+  __ bind(&finish_object_store);
+  if (increment_length == kIncrementLength) {
+    // Add 1 to receiver->length.
+    __ add(FieldOperand(receiver, JSArray::kLengthOffset),
+           Immediate(Smi::FromInt(1)));
+  }
+  __ mov(FixedArrayElementOperand(ebx, key), value);
+  // Update write barrier for the elements array address.
+  __ mov(edx, value);  // Preserve the value which is returned.
+  __ RecordWriteArray(ebx, edx, key, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+  __ ret(0);
+
+  __ bind(fast_double);
+  if (check_map == kCheckMap) {
+    // Check for fast double array case. If this fails, call through to the
+    // runtime.
+    __ cmp(edi, masm->isolate()->factory()->fixed_double_array_map());
+    __ j(not_equal, slow);
+    // If the value is a number, store it as a double in the FastDoubleElements
+    // array.
+  }
+
+  // HOLECHECK: guards "A[i] double hole?"
+  // We have to see if the double version of the hole is present. If so
+  // go to the runtime.
+  uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
+  __ cmp(FieldOperand(ebx, key, times_4, offset), Immediate(kHoleNanUpper32));
+  __ j(not_equal, &fast_double_without_map_check);
+  __ JumpIfDictionaryInPrototypeChain(receiver, ebx, edi, slow);
+  __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
+
+  __ bind(&fast_double_without_map_check);
+  __ StoreNumberToDoubleElements(value, ebx, key, edi,
+                                 &transition_double_elements, false);
+  if (increment_length == kIncrementLength) {
+    // Add 1 to receiver->length.
+    __ add(FieldOperand(receiver, JSArray::kLengthOffset),
+           Immediate(Smi::FromInt(1)));
+  }
+  __ ret(0);
+
+  __ bind(&transition_smi_elements);
+  __ mov(ebx, FieldOperand(receiver, HeapObject::kMapOffset));
+
+  // Transition the array appropriately depending on the value type.
+  __ CheckMap(value, masm->isolate()->factory()->heap_number_map(),
+              &non_double_value, DONT_DO_SMI_CHECK);
+
+  // Value is a double. Transition FAST_SMI_ELEMENTS -> FAST_DOUBLE_ELEMENTS
+  // and complete the store.
+  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
+                                         FAST_DOUBLE_ELEMENTS, ebx, edi, slow);
+  AllocationSiteMode mode =
+      AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
+  ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
+                                                   ebx, mode, slow);
+  __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
+  __ jmp(&fast_double_without_map_check);
+
+  __ bind(&non_double_value);
+  // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
+  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS, ebx,
+                                         edi, slow);
+  mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
+  ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
+      masm, receiver, key, value, ebx, mode, slow);
+  __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
+  __ jmp(&finish_object_store);
+
+  __ bind(&transition_double_elements);
+  // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
+  // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
+  // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
+  __ mov(ebx, FieldOperand(receiver, HeapObject::kMapOffset));
+  __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
+                                         ebx, edi, slow);
+  mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
+  ElementsTransitionGenerator::GenerateDoubleToObject(masm, receiver, key,
+                                                      value, ebx, mode, slow);
+  __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
+  __ jmp(&finish_object_store);
+}
+
+
+void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
+                                   StrictMode strict_mode) {
+  // Return address is on the stack.
+  Label slow, fast_object, fast_object_grow;
+  Label fast_double, fast_double_grow;
+  Label array, extra, check_if_double_array;
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  Register key = StoreDescriptor::NameRegister();
+  DCHECK(receiver.is(edx));
+  DCHECK(key.is(ecx));
+
+  // Check that the object isn't a smi.
+  __ JumpIfSmi(receiver, &slow);
+  // Get the map from the receiver.
+  __ mov(edi, FieldOperand(receiver, HeapObject::kMapOffset));
+  // Check that the receiver does not require access checks and is not observed.
+  // The generic stub does not perform map checks or handle observed objects.
+  __ test_b(FieldOperand(edi, Map::kBitFieldOffset),
+            1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved);
+  __ j(not_zero, &slow);
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(key, &slow);
+  __ CmpInstanceType(edi, JS_ARRAY_TYPE);
+  __ j(equal, &array);
+  // Check that the object is some kind of JSObject.
+  __ CmpInstanceType(edi, FIRST_JS_OBJECT_TYPE);
+  __ j(below, &slow);
+
+  // Object case: Check key against length in the elements array.
+  // Key is a smi.
+  // edi: receiver map
+  __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
+  // Check array bounds. Both the key and the length of FixedArray are smis.
+  __ cmp(key, FieldOperand(ebx, FixedArray::kLengthOffset));
+  __ j(below, &fast_object);
+
+  // Slow case: call runtime.
+  __ bind(&slow);
+  PropertyICCompiler::GenerateRuntimeSetProperty(masm, strict_mode);
+
+  // Extra capacity case: Check if there is extra capacity to
+  // perform the store and update the length. Used for adding one
+  // element to the array by writing to array[array.length].
+  __ bind(&extra);
+  // receiver is a JSArray.
+  // key is a smi.
+  // ebx: receiver->elements, a FixedArray
+  // edi: receiver map
+  // flags: compare (key, receiver.length())
+  // do not leave holes in the array:
+  __ j(not_equal, &slow);
+  __ cmp(key, FieldOperand(ebx, FixedArray::kLengthOffset));
+  __ j(above_equal, &slow);
+  __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
+  __ cmp(edi, masm->isolate()->factory()->fixed_array_map());
+  __ j(not_equal, &check_if_double_array);
+  __ jmp(&fast_object_grow);
+
+  __ bind(&check_if_double_array);
+  __ cmp(edi, masm->isolate()->factory()->fixed_double_array_map());
+  __ j(not_equal, &slow);
+  __ jmp(&fast_double_grow);
+
+  // Array case: Get the length and the elements array from the JS
+  // array. Check that the array is in fast mode (and writable); if it
+  // is the length is always a smi.
+  __ bind(&array);
+  // receiver is a JSArray.
+  // key is a smi.
+  // edi: receiver map
+  __ mov(ebx, FieldOperand(receiver, JSObject::kElementsOffset));
+
+  // Check the key against the length in the array and fall through to the
+  // common store code.
+  __ cmp(key, FieldOperand(receiver, JSArray::kLengthOffset));  // Compare smis.
+  __ j(above_equal, &extra);
+
+  KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double, &slow,
+                                  kCheckMap, kDontIncrementLength);
+  KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
+                                  &slow, kDontCheckMap, kIncrementLength);
+}
+
+
+void LoadIC::GenerateNormal(MacroAssembler* masm) {
+  Register dictionary = eax;
+  DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
+
+  Label slow;
+
+  __ mov(dictionary, FieldOperand(LoadDescriptor::ReceiverRegister(),
+                                  JSObject::kPropertiesOffset));
+  GenerateDictionaryLoad(masm, &slow, dictionary,
+                         LoadDescriptor::NameRegister(), edi, ebx, eax);
+  __ ret(0);
+
+  // Dictionary load failed, go slow (but don't miss).
+  __ bind(&slow);
+  GenerateRuntimeGetProperty(masm);
+}
+
+
+static void LoadIC_PushArgs(MacroAssembler* masm) {
+  Register receiver = LoadDescriptor::ReceiverRegister();
+  Register name = LoadDescriptor::NameRegister();
+  DCHECK(!ebx.is(receiver) && !ebx.is(name));
+
+  __ pop(ebx);
+  __ push(receiver);
+  __ push(name);
+  __ push(ebx);
+}
+
+
+void LoadIC::GenerateMiss(MacroAssembler* masm) {
+  // Return address is on the stack.
+  __ IncrementCounter(masm->isolate()->counters()->load_miss(), 1);
+
+  LoadIC_PushArgs(masm);
+
+  // Perform tail call to the entry.
+  ExternalReference ref =
+      ExternalReference(IC_Utility(kLoadIC_Miss), masm->isolate());
+  __ TailCallExternalReference(ref, 2, 1);
+}
+
+
+void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+  // Return address is on the stack.
+  LoadIC_PushArgs(masm);
+
+  // Perform tail call to the entry.
+  __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
+}
+
+
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
+  // Return address is on the stack.
+  __ IncrementCounter(masm->isolate()->counters()->keyed_load_miss(), 1);
+
+  LoadIC_PushArgs(masm);
+
+  // Perform tail call to the entry.
+  ExternalReference ref =
+      ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate());
+  __ TailCallExternalReference(ref, 2, 1);
+}
+
+
+void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+  // Return address is on the stack.
+  LoadIC_PushArgs(masm);
+
+  // Perform tail call to the entry.
+  __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
+}
+
+
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
+  // Return address is on the stack.
+  Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
+      Code::ComputeHandlerFlags(Code::STORE_IC));
+  masm->isolate()->stub_cache()->GenerateProbe(
+      masm, flags, false, StoreDescriptor::ReceiverRegister(),
+      StoreDescriptor::NameRegister(), ebx, no_reg);
+
+  // Cache miss: Jump to runtime.
+  GenerateMiss(masm);
+}
+
+
+static void StoreIC_PushArgs(MacroAssembler* masm) {
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  Register name = StoreDescriptor::NameRegister();
+  Register value = StoreDescriptor::ValueRegister();
+
+  DCHECK(!ebx.is(receiver) && !ebx.is(name) && !ebx.is(value));
+
+  __ pop(ebx);
+  __ push(receiver);
+  __ push(name);
+  __ push(value);
+  __ push(ebx);
+}
+
+
+void StoreIC::GenerateMiss(MacroAssembler* masm) {
+  // Return address is on the stack.
+  StoreIC_PushArgs(masm);
+
+  // Perform tail call to the entry.
+  ExternalReference ref =
+      ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
+  __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void StoreIC::GenerateNormal(MacroAssembler* masm) {
+  Label restore_miss;
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  Register name = StoreDescriptor::NameRegister();
+  Register value = StoreDescriptor::ValueRegister();
+  Register dictionary = ebx;
+
+  __ mov(dictionary, FieldOperand(receiver, JSObject::kPropertiesOffset));
+
+  // A lot of registers are needed for storing to slow case
+  // objects. Push and restore receiver but rely on
+  // GenerateDictionaryStore preserving the value and name.
+  __ push(receiver);
+  GenerateDictionaryStore(masm, &restore_miss, dictionary, name, value,
+                          receiver, edi);
+  __ Drop(1);
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(counters->store_normal_hit(), 1);
+  __ ret(0);
+
+  __ bind(&restore_miss);
+  __ pop(receiver);
+  __ IncrementCounter(counters->store_normal_miss(), 1);
+  GenerateMiss(masm);
+}
+
+
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
+  // Return address is on the stack.
+  StoreIC_PushArgs(masm);
+
+  // Do tail-call to runtime routine.
+  ExternalReference ref =
+      ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
+  __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+#undef __
+
+
+Condition CompareIC::ComputeCondition(Token::Value op) {
+  switch (op) {
+    case Token::EQ_STRICT:
+    case Token::EQ:
+      return equal;
+    case Token::LT:
+      return less;
+    case Token::GT:
+      return greater;
+    case Token::LTE:
+      return less_equal;
+    case Token::GTE:
+      return greater_equal;
+    default:
+      UNREACHABLE();
+      return no_condition;
+  }
+}
+
+
+bool CompareIC::HasInlinedSmiCode(Address address) {
+  // The address of the instruction following the call.
+  Address test_instruction_address =
+      address + Assembler::kCallTargetAddressOffset;
+
+  // If the instruction following the call is not a test al, nothing
+  // was inlined.
+  return *test_instruction_address == Assembler::kTestAlByte;
+}
+
+
+void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
+  // The address of the instruction following the call.
+  Address test_instruction_address =
+      address + Assembler::kCallTargetAddressOffset;
+
+  // If the instruction following the call is not a test al, nothing
+  // was inlined.
+  if (*test_instruction_address != Assembler::kTestAlByte) {
+    DCHECK(*test_instruction_address == Assembler::kNopByte);
+    return;
+  }
+
+  Address delta_address = test_instruction_address + 1;
+  // The delta to the start of the map check instruction and the
+  // condition code uses at the patched jump.
+  uint8_t delta = *reinterpret_cast<uint8_t*>(delta_address);
+  if (FLAG_trace_ic) {
+    PrintF("[  patching ic at %p, test=%p, delta=%d\n", address,
+           test_instruction_address, delta);
+  }
+
+  // Patch with a short conditional jump. Enabling means switching from a short
+  // jump-if-carry/not-carry to jump-if-zero/not-zero, whereas disabling is the
+  // reverse operation of that.
+  Address jmp_address = test_instruction_address - delta;
+  DCHECK((check == ENABLE_INLINED_SMI_CHECK)
+             ? (*jmp_address == Assembler::kJncShortOpcode ||
+                *jmp_address == Assembler::kJcShortOpcode)
+             : (*jmp_address == Assembler::kJnzShortOpcode ||
+                *jmp_address == Assembler::kJzShortOpcode));
+  Condition cc =
+      (check == ENABLE_INLINED_SMI_CHECK)
+          ? (*jmp_address == Assembler::kJncShortOpcode ? not_zero : zero)
+          : (*jmp_address == Assembler::kJnzShortOpcode ? not_carry : carry);
+  *jmp_address = static_cast<byte>(Assembler::kJccShortPrefix | cc);
+}
+}
+}  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_X87
diff --git a/src/ic/x87/stub-cache-x87.cc b/src/ic/x87/stub-cache-x87.cc
new file mode 100644
index 0000000..0291ef3
--- /dev/null
+++ b/src/ic/x87/stub-cache-x87.cc
@@ -0,0 +1,189 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_X87
+
+#include "src/codegen.h"
+#include "src/ic/stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
+                       Code::Flags flags, bool leave_frame,
+                       StubCache::Table table, Register name, Register receiver,
+                       // Number of the cache entry pointer-size scaled.
+                       Register offset, Register extra) {
+  ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
+  ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
+  ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
+
+  Label miss;
+
+  // Multiply by 3 because there are 3 fields per entry (name, code, map).
+  __ lea(offset, Operand(offset, offset, times_2, 0));
+
+  if (extra.is_valid()) {
+    // Get the code entry from the cache.
+    __ mov(extra, Operand::StaticArray(offset, times_1, value_offset));
+
+    // Check that the key in the entry matches the name.
+    __ cmp(name, Operand::StaticArray(offset, times_1, key_offset));
+    __ j(not_equal, &miss);
+
+    // Check the map matches.
+    __ mov(offset, Operand::StaticArray(offset, times_1, map_offset));
+    __ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset));
+    __ j(not_equal, &miss);
+
+    // Check that the flags match what we're looking for.
+    __ mov(offset, FieldOperand(extra, Code::kFlagsOffset));
+    __ and_(offset, ~Code::kFlagsNotUsedInLookup);
+    __ cmp(offset, flags);
+    __ j(not_equal, &miss);
+
+#ifdef DEBUG
+    if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
+      __ jmp(&miss);
+    } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
+      __ jmp(&miss);
+    }
+#endif
+
+    if (leave_frame) __ leave();
+
+    // Jump to the first instruction in the code stub.
+    __ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
+    __ jmp(extra);
+
+    __ bind(&miss);
+  } else {
+    // Save the offset on the stack.
+    __ push(offset);
+
+    // Check that the key in the entry matches the name.
+    __ cmp(name, Operand::StaticArray(offset, times_1, key_offset));
+    __ j(not_equal, &miss);
+
+    // Check the map matches.
+    __ mov(offset, Operand::StaticArray(offset, times_1, map_offset));
+    __ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset));
+    __ j(not_equal, &miss);
+
+    // Restore offset register.
+    __ mov(offset, Operand(esp, 0));
+
+    // Get the code entry from the cache.
+    __ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
+
+    // Check that the flags match what we're looking for.
+    __ mov(offset, FieldOperand(offset, Code::kFlagsOffset));
+    __ and_(offset, ~Code::kFlagsNotUsedInLookup);
+    __ cmp(offset, flags);
+    __ j(not_equal, &miss);
+
+#ifdef DEBUG
+    if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
+      __ jmp(&miss);
+    } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
+      __ jmp(&miss);
+    }
+#endif
+
+    // Restore offset and re-load code entry from cache.
+    __ pop(offset);
+    __ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
+
+    if (leave_frame) __ leave();
+
+    // Jump to the first instruction in the code stub.
+    __ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag));
+    __ jmp(offset);
+
+    // Pop at miss.
+    __ bind(&miss);
+    __ pop(offset);
+  }
+}
+
+
+void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
+                              bool leave_frame, Register receiver,
+                              Register name, Register scratch, Register extra,
+                              Register extra2, Register extra3) {
+  Label miss;
+
+  // Assert that code is valid.  The multiplying code relies on the entry size
+  // being 12.
+  DCHECK(sizeof(Entry) == 12);
+
+  // Assert the flags do not name a specific type.
+  DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
+
+  // Assert that there are no register conflicts.
+  DCHECK(!scratch.is(receiver));
+  DCHECK(!scratch.is(name));
+  DCHECK(!extra.is(receiver));
+  DCHECK(!extra.is(name));
+  DCHECK(!extra.is(scratch));
+
+  // Assert scratch and extra registers are valid, and extra2/3 are unused.
+  DCHECK(!scratch.is(no_reg));
+  DCHECK(extra2.is(no_reg));
+  DCHECK(extra3.is(no_reg));
+
+  Register offset = scratch;
+  scratch = no_reg;
+
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1);
+
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(receiver, &miss);
+
+  // Get the map of the receiver and compute the hash.
+  __ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
+  __ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
+  __ xor_(offset, flags);
+  // We mask out the last two bits because they are not part of the hash and
+  // they are always 01 for maps.  Also in the two 'and' instructions below.
+  __ and_(offset, (kPrimaryTableSize - 1) << kCacheIndexShift);
+  // ProbeTable expects the offset to be pointer scaled, which it is, because
+  // the heap object tag size is 2 and the pointer size log 2 is also 2.
+  DCHECK(kCacheIndexShift == kPointerSizeLog2);
+
+  // Probe the primary table.
+  ProbeTable(isolate(), masm, flags, leave_frame, kPrimary, name, receiver,
+             offset, extra);
+
+  // Primary miss: Compute hash for secondary probe.
+  __ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
+  __ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
+  __ xor_(offset, flags);
+  __ and_(offset, (kPrimaryTableSize - 1) << kCacheIndexShift);
+  __ sub(offset, name);
+  __ add(offset, Immediate(flags));
+  __ and_(offset, (kSecondaryTableSize - 1) << kCacheIndexShift);
+
+  // Probe the secondary table.
+  ProbeTable(isolate(), masm, flags, leave_frame, kSecondary, name, receiver,
+             offset, extra);
+
+  // Cache miss: Fall-through and let caller handle the miss by
+  // entering the runtime system.
+  __ bind(&miss);
+  __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1);
+}
+
+
+#undef __
+}
+}  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_X87
diff --git a/src/incremental-marking-inl.h b/src/incremental-marking-inl.h
deleted file mode 100644
index 7b98474..0000000
--- a/src/incremental-marking-inl.h
+++ /dev/null
@@ -1,122 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_INCREMENTAL_MARKING_INL_H_
-#define V8_INCREMENTAL_MARKING_INL_H_
-
-#include "src/incremental-marking.h"
-
-namespace v8 {
-namespace internal {
-
-
-bool IncrementalMarking::BaseRecordWrite(HeapObject* obj,
-                                         Object** slot,
-                                         Object* value) {
-  HeapObject* value_heap_obj = HeapObject::cast(value);
-  MarkBit value_bit = Marking::MarkBitFrom(value_heap_obj);
-  if (Marking::IsWhite(value_bit)) {
-    MarkBit obj_bit = Marking::MarkBitFrom(obj);
-    if (Marking::IsBlack(obj_bit)) {
-      MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
-      if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
-        if (chunk->IsLeftOfProgressBar(slot)) {
-          WhiteToGreyAndPush(value_heap_obj, value_bit);
-          RestartIfNotMarking();
-        } else {
-          return false;
-        }
-      } else {
-        BlackToGreyAndUnshift(obj, obj_bit);
-        RestartIfNotMarking();
-        return false;
-      }
-    } else {
-      return false;
-    }
-  }
-  if (!is_compacting_) return false;
-  MarkBit obj_bit = Marking::MarkBitFrom(obj);
-  return Marking::IsBlack(obj_bit);
-}
-
-
-void IncrementalMarking::RecordWrite(HeapObject* obj,
-                                     Object** slot,
-                                     Object* value) {
-  if (IsMarking() && value->IsHeapObject()) {
-    RecordWriteSlow(obj, slot, value);
-  }
-}
-
-
-void IncrementalMarking::RecordWriteOfCodeEntry(JSFunction* host,
-                                                Object** slot,
-                                                Code* value) {
-  if (IsMarking()) RecordWriteOfCodeEntrySlow(host, slot, value);
-}
-
-
-void IncrementalMarking::RecordWriteIntoCode(HeapObject* obj,
-                                             RelocInfo* rinfo,
-                                             Object* value) {
-  if (IsMarking() && value->IsHeapObject()) {
-    RecordWriteIntoCodeSlow(obj, rinfo, value);
-  }
-}
-
-
-void IncrementalMarking::RecordWrites(HeapObject* obj) {
-  if (IsMarking()) {
-    MarkBit obj_bit = Marking::MarkBitFrom(obj);
-    if (Marking::IsBlack(obj_bit)) {
-      MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
-      if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
-        chunk->set_progress_bar(0);
-      }
-      BlackToGreyAndUnshift(obj, obj_bit);
-      RestartIfNotMarking();
-    }
-  }
-}
-
-
-void IncrementalMarking::BlackToGreyAndUnshift(HeapObject* obj,
-                                               MarkBit mark_bit) {
-  ASSERT(Marking::MarkBitFrom(obj) == mark_bit);
-  ASSERT(obj->Size() >= 2*kPointerSize);
-  ASSERT(IsMarking());
-  Marking::BlackToGrey(mark_bit);
-  int obj_size = obj->Size();
-  MemoryChunk::IncrementLiveBytesFromGC(obj->address(), -obj_size);
-  bytes_scanned_ -= obj_size;
-  int64_t old_bytes_rescanned = bytes_rescanned_;
-  bytes_rescanned_ = old_bytes_rescanned + obj_size;
-  if ((bytes_rescanned_ >> 20) != (old_bytes_rescanned >> 20)) {
-    if (bytes_rescanned_ > 2 * heap_->PromotedSpaceSizeOfObjects()) {
-      // If we have queued twice the heap size for rescanning then we are
-      // going around in circles, scanning the same objects again and again
-      // as the program mutates the heap faster than we can incrementally
-      // trace it.  In this case we switch to non-incremental marking in
-      // order to finish off this marking phase.
-      if (FLAG_trace_gc) {
-        PrintPID("Hurrying incremental marking because of lack of progress\n");
-      }
-      marking_speed_ = kMaxMarkingSpeed;
-    }
-  }
-
-  marking_deque_.UnshiftGrey(obj);
-}
-
-
-void IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit) {
-  Marking::WhiteToGrey(mark_bit);
-  marking_deque_.PushGrey(obj);
-}
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_INCREMENTAL_MARKING_INL_H_
diff --git a/src/incremental-marking.cc b/src/incremental-marking.cc
deleted file mode 100644
index 8a158c3..0000000
--- a/src/incremental-marking.cc
+++ /dev/null
@@ -1,995 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#include "src/incremental-marking.h"
-
-#include "src/code-stubs.h"
-#include "src/compilation-cache.h"
-#include "src/conversions.h"
-#include "src/objects-visiting.h"
-#include "src/objects-visiting-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-IncrementalMarking::IncrementalMarking(Heap* heap)
-    : heap_(heap),
-      state_(STOPPED),
-      marking_deque_memory_(NULL),
-      marking_deque_memory_committed_(false),
-      steps_count_(0),
-      steps_took_(0),
-      longest_step_(0.0),
-      old_generation_space_available_at_start_of_incremental_(0),
-      old_generation_space_used_at_start_of_incremental_(0),
-      steps_count_since_last_gc_(0),
-      steps_took_since_last_gc_(0),
-      should_hurry_(false),
-      marking_speed_(0),
-      allocated_(0),
-      no_marking_scope_depth_(0),
-      unscanned_bytes_of_large_object_(0) {
-}
-
-
-void IncrementalMarking::TearDown() {
-  delete marking_deque_memory_;
-}
-
-
-void IncrementalMarking::RecordWriteSlow(HeapObject* obj,
-                                         Object** slot,
-                                         Object* value) {
-  if (BaseRecordWrite(obj, slot, value) && slot != NULL) {
-    MarkBit obj_bit = Marking::MarkBitFrom(obj);
-    if (Marking::IsBlack(obj_bit)) {
-      // Object is not going to be rescanned we need to record the slot.
-      heap_->mark_compact_collector()->RecordSlot(
-          HeapObject::RawField(obj, 0), slot, value);
-    }
-  }
-}
-
-
-void IncrementalMarking::RecordWriteFromCode(HeapObject* obj,
-                                             Object** slot,
-                                             Isolate* isolate) {
-  ASSERT(obj->IsHeapObject());
-  IncrementalMarking* marking = isolate->heap()->incremental_marking();
-
-  MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
-  int counter = chunk->write_barrier_counter();
-  if (counter < (MemoryChunk::kWriteBarrierCounterGranularity / 2)) {
-    marking->write_barriers_invoked_since_last_step_ +=
-        MemoryChunk::kWriteBarrierCounterGranularity -
-            chunk->write_barrier_counter();
-    chunk->set_write_barrier_counter(
-        MemoryChunk::kWriteBarrierCounterGranularity);
-  }
-
-  marking->RecordWrite(obj, slot, *slot);
-}
-
-
-void IncrementalMarking::RecordCodeTargetPatch(Code* host,
-                                               Address pc,
-                                               HeapObject* value) {
-  if (IsMarking()) {
-    RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
-    RecordWriteIntoCode(host, &rinfo, value);
-  }
-}
-
-
-void IncrementalMarking::RecordCodeTargetPatch(Address pc, HeapObject* value) {
-  if (IsMarking()) {
-    Code* host = heap_->isolate()->inner_pointer_to_code_cache()->
-        GcSafeFindCodeForInnerPointer(pc);
-    RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
-    RecordWriteIntoCode(host, &rinfo, value);
-  }
-}
-
-
-void IncrementalMarking::RecordWriteOfCodeEntrySlow(JSFunction* host,
-                                                    Object** slot,
-                                                    Code* value) {
-  if (BaseRecordWrite(host, slot, value)) {
-    ASSERT(slot != NULL);
-    heap_->mark_compact_collector()->
-        RecordCodeEntrySlot(reinterpret_cast<Address>(slot), value);
-  }
-}
-
-
-void IncrementalMarking::RecordWriteIntoCodeSlow(HeapObject* obj,
-                                                 RelocInfo* rinfo,
-                                                 Object* value) {
-  MarkBit value_bit = Marking::MarkBitFrom(HeapObject::cast(value));
-  if (Marking::IsWhite(value_bit)) {
-    MarkBit obj_bit = Marking::MarkBitFrom(obj);
-    if (Marking::IsBlack(obj_bit)) {
-      BlackToGreyAndUnshift(obj, obj_bit);
-      RestartIfNotMarking();
-    }
-    // Object is either grey or white.  It will be scanned if survives.
-    return;
-  }
-
-  if (is_compacting_) {
-    MarkBit obj_bit = Marking::MarkBitFrom(obj);
-    if (Marking::IsBlack(obj_bit)) {
-      // Object is not going to be rescanned.  We need to record the slot.
-      heap_->mark_compact_collector()->RecordRelocSlot(rinfo,
-                                                       Code::cast(value));
-    }
-  }
-}
-
-
-static void MarkObjectGreyDoNotEnqueue(Object* obj) {
-  if (obj->IsHeapObject()) {
-    HeapObject* heap_obj = HeapObject::cast(obj);
-    MarkBit mark_bit = Marking::MarkBitFrom(HeapObject::cast(obj));
-    if (Marking::IsBlack(mark_bit)) {
-      MemoryChunk::IncrementLiveBytesFromGC(heap_obj->address(),
-                                            -heap_obj->Size());
-    }
-    Marking::AnyToGrey(mark_bit);
-  }
-}
-
-
-static inline void MarkBlackOrKeepGrey(HeapObject* heap_object,
-                                       MarkBit mark_bit,
-                                       int size) {
-  ASSERT(!Marking::IsImpossible(mark_bit));
-  if (mark_bit.Get()) return;
-  mark_bit.Set();
-  MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(), size);
-  ASSERT(Marking::IsBlack(mark_bit));
-}
-
-
-static inline void MarkBlackOrKeepBlack(HeapObject* heap_object,
-                                        MarkBit mark_bit,
-                                        int size) {
-  ASSERT(!Marking::IsImpossible(mark_bit));
-  if (Marking::IsBlack(mark_bit)) return;
-  Marking::MarkBlack(mark_bit);
-  MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(), size);
-  ASSERT(Marking::IsBlack(mark_bit));
-}
-
-
-class IncrementalMarkingMarkingVisitor
-    : public StaticMarkingVisitor<IncrementalMarkingMarkingVisitor> {
- public:
-  static void Initialize() {
-    StaticMarkingVisitor<IncrementalMarkingMarkingVisitor>::Initialize();
-    table_.Register(kVisitFixedArray, &VisitFixedArrayIncremental);
-    table_.Register(kVisitNativeContext, &VisitNativeContextIncremental);
-    table_.Register(kVisitJSRegExp, &VisitJSRegExp);
-  }
-
-  static const int kProgressBarScanningChunk = 32 * 1024;
-
-  static void VisitFixedArrayIncremental(Map* map, HeapObject* object) {
-    MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
-    // TODO(mstarzinger): Move setting of the flag to the allocation site of
-    // the array. The visitor should just check the flag.
-    if (FLAG_use_marking_progress_bar &&
-        chunk->owner()->identity() == LO_SPACE) {
-      chunk->SetFlag(MemoryChunk::HAS_PROGRESS_BAR);
-    }
-    if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
-      Heap* heap = map->GetHeap();
-      // When using a progress bar for large fixed arrays, scan only a chunk of
-      // the array and try to push it onto the marking deque again until it is
-      // fully scanned. Fall back to scanning it through to the end in case this
-      // fails because of a full deque.
-      int object_size = FixedArray::BodyDescriptor::SizeOf(map, object);
-      int start_offset = Max(FixedArray::BodyDescriptor::kStartOffset,
-                             chunk->progress_bar());
-      int end_offset = Min(object_size,
-                           start_offset + kProgressBarScanningChunk);
-      int already_scanned_offset = start_offset;
-      bool scan_until_end = false;
-      do {
-        VisitPointersWithAnchor(heap,
-                                HeapObject::RawField(object, 0),
-                                HeapObject::RawField(object, start_offset),
-                                HeapObject::RawField(object, end_offset));
-        start_offset = end_offset;
-        end_offset = Min(object_size, end_offset + kProgressBarScanningChunk);
-        scan_until_end = heap->incremental_marking()->marking_deque()->IsFull();
-      } while (scan_until_end && start_offset < object_size);
-      chunk->set_progress_bar(start_offset);
-      if (start_offset < object_size) {
-        heap->incremental_marking()->marking_deque()->UnshiftGrey(object);
-        heap->incremental_marking()->NotifyIncompleteScanOfObject(
-            object_size - (start_offset - already_scanned_offset));
-      }
-    } else {
-      FixedArrayVisitor::Visit(map, object);
-    }
-  }
-
-  static void VisitNativeContextIncremental(Map* map, HeapObject* object) {
-    Context* context = Context::cast(object);
-
-    // We will mark cache black with a separate pass when we finish marking.
-    // Note that GC can happen when the context is not fully initialized,
-    // so the cache can be undefined.
-    Object* cache = context->get(Context::NORMALIZED_MAP_CACHE_INDEX);
-    if (!cache->IsUndefined()) {
-      MarkObjectGreyDoNotEnqueue(cache);
-    }
-    VisitNativeContext(map, context);
-  }
-
-  INLINE(static void VisitPointer(Heap* heap, Object** p)) {
-    Object* obj = *p;
-    if (obj->IsHeapObject()) {
-      heap->mark_compact_collector()->RecordSlot(p, p, obj);
-      MarkObject(heap, obj);
-    }
-  }
-
-  INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
-    for (Object** p = start; p < end; p++) {
-      Object* obj = *p;
-      if (obj->IsHeapObject()) {
-        heap->mark_compact_collector()->RecordSlot(start, p, obj);
-        MarkObject(heap, obj);
-      }
-    }
-  }
-
-  INLINE(static void VisitPointersWithAnchor(Heap* heap,
-                                             Object** anchor,
-                                             Object** start,
-                                             Object** end)) {
-    for (Object** p = start; p < end; p++) {
-      Object* obj = *p;
-      if (obj->IsHeapObject()) {
-        heap->mark_compact_collector()->RecordSlot(anchor, p, obj);
-        MarkObject(heap, obj);
-      }
-    }
-  }
-
-  // Marks the object grey and pushes it on the marking stack.
-  INLINE(static void MarkObject(Heap* heap, Object* obj)) {
-    HeapObject* heap_object = HeapObject::cast(obj);
-    MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
-    if (mark_bit.data_only()) {
-      MarkBlackOrKeepGrey(heap_object, mark_bit, heap_object->Size());
-    } else if (Marking::IsWhite(mark_bit)) {
-      heap->incremental_marking()->WhiteToGreyAndPush(heap_object, mark_bit);
-    }
-  }
-
-  // Marks the object black without pushing it on the marking stack.
-  // Returns true if object needed marking and false otherwise.
-  INLINE(static bool MarkObjectWithoutPush(Heap* heap, Object* obj)) {
-    HeapObject* heap_object = HeapObject::cast(obj);
-    MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
-    if (Marking::IsWhite(mark_bit)) {
-      mark_bit.Set();
-      MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(),
-                                            heap_object->Size());
-      return true;
-    }
-    return false;
-  }
-};
-
-
-class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor {
- public:
-  explicit IncrementalMarkingRootMarkingVisitor(
-      IncrementalMarking* incremental_marking)
-      : incremental_marking_(incremental_marking) {
-  }
-
-  void VisitPointer(Object** p) {
-    MarkObjectByPointer(p);
-  }
-
-  void VisitPointers(Object** start, Object** end) {
-    for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
-  }
-
- private:
-  void MarkObjectByPointer(Object** p) {
-    Object* obj = *p;
-    if (!obj->IsHeapObject()) return;
-
-    HeapObject* heap_object = HeapObject::cast(obj);
-    MarkBit mark_bit = Marking::MarkBitFrom(heap_object);
-    if (mark_bit.data_only()) {
-      MarkBlackOrKeepGrey(heap_object, mark_bit, heap_object->Size());
-    } else {
-      if (Marking::IsWhite(mark_bit)) {
-        incremental_marking_->WhiteToGreyAndPush(heap_object, mark_bit);
-      }
-    }
-  }
-
-  IncrementalMarking* incremental_marking_;
-};
-
-
-void IncrementalMarking::Initialize() {
-  IncrementalMarkingMarkingVisitor::Initialize();
-}
-
-
-void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk,
-                                              bool is_marking,
-                                              bool is_compacting) {
-  if (is_marking) {
-    chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
-    chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
-
-    // It's difficult to filter out slots recorded for large objects.
-    if (chunk->owner()->identity() == LO_SPACE &&
-        chunk->size() > static_cast<size_t>(Page::kPageSize) &&
-        is_compacting) {
-      chunk->SetFlag(MemoryChunk::RESCAN_ON_EVACUATION);
-    }
-  } else if (chunk->owner()->identity() == CELL_SPACE ||
-             chunk->owner()->identity() == PROPERTY_CELL_SPACE ||
-             chunk->scan_on_scavenge()) {
-    chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
-    chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
-  } else {
-    chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
-    chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
-  }
-}
-
-
-void IncrementalMarking::SetNewSpacePageFlags(NewSpacePage* chunk,
-                                              bool is_marking) {
-  chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING);
-  if (is_marking) {
-    chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
-  } else {
-    chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING);
-  }
-  chunk->SetFlag(MemoryChunk::SCAN_ON_SCAVENGE);
-}
-
-
-void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
-    PagedSpace* space) {
-  PageIterator it(space);
-  while (it.has_next()) {
-    Page* p = it.next();
-    SetOldSpacePageFlags(p, false, false);
-  }
-}
-
-
-void IncrementalMarking::DeactivateIncrementalWriteBarrierForSpace(
-    NewSpace* space) {
-  NewSpacePageIterator it(space);
-  while (it.has_next()) {
-    NewSpacePage* p = it.next();
-    SetNewSpacePageFlags(p, false);
-  }
-}
-
-
-void IncrementalMarking::DeactivateIncrementalWriteBarrier() {
-  DeactivateIncrementalWriteBarrierForSpace(heap_->old_pointer_space());
-  DeactivateIncrementalWriteBarrierForSpace(heap_->old_data_space());
-  DeactivateIncrementalWriteBarrierForSpace(heap_->cell_space());
-  DeactivateIncrementalWriteBarrierForSpace(heap_->property_cell_space());
-  DeactivateIncrementalWriteBarrierForSpace(heap_->map_space());
-  DeactivateIncrementalWriteBarrierForSpace(heap_->code_space());
-  DeactivateIncrementalWriteBarrierForSpace(heap_->new_space());
-
-  LargePage* lop = heap_->lo_space()->first_page();
-  while (lop->is_valid()) {
-    SetOldSpacePageFlags(lop, false, false);
-    lop = lop->next_page();
-  }
-}
-
-
-void IncrementalMarking::ActivateIncrementalWriteBarrier(PagedSpace* space) {
-  PageIterator it(space);
-  while (it.has_next()) {
-    Page* p = it.next();
-    SetOldSpacePageFlags(p, true, is_compacting_);
-  }
-}
-
-
-void IncrementalMarking::ActivateIncrementalWriteBarrier(NewSpace* space) {
-  NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
-  while (it.has_next()) {
-    NewSpacePage* p = it.next();
-    SetNewSpacePageFlags(p, true);
-  }
-}
-
-
-void IncrementalMarking::ActivateIncrementalWriteBarrier() {
-  ActivateIncrementalWriteBarrier(heap_->old_pointer_space());
-  ActivateIncrementalWriteBarrier(heap_->old_data_space());
-  ActivateIncrementalWriteBarrier(heap_->cell_space());
-  ActivateIncrementalWriteBarrier(heap_->property_cell_space());
-  ActivateIncrementalWriteBarrier(heap_->map_space());
-  ActivateIncrementalWriteBarrier(heap_->code_space());
-  ActivateIncrementalWriteBarrier(heap_->new_space());
-
-  LargePage* lop = heap_->lo_space()->first_page();
-  while (lop->is_valid()) {
-    SetOldSpacePageFlags(lop, true, is_compacting_);
-    lop = lop->next_page();
-  }
-}
-
-
-bool IncrementalMarking::WorthActivating() {
-#ifndef DEBUG
-  static const intptr_t kActivationThreshold = 8 * MB;
-#else
-  // TODO(gc) consider setting this to some low level so that some
-  // debug tests run with incremental marking and some without.
-  static const intptr_t kActivationThreshold = 0;
-#endif
-  // Only start incremental marking in a safe state: 1) when incremental
-  // marking is turned on, 2) when we are currently not in a GC, and
-  // 3) when we are currently not serializing or deserializing the heap.
-  return FLAG_incremental_marking &&
-      FLAG_incremental_marking_steps &&
-      heap_->gc_state() == Heap::NOT_IN_GC &&
-      !heap_->isolate()->serializer_enabled() &&
-      heap_->isolate()->IsInitialized() &&
-      heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold;
-}
-
-
-void IncrementalMarking::ActivateGeneratedStub(Code* stub) {
-  ASSERT(RecordWriteStub::GetMode(stub) ==
-         RecordWriteStub::STORE_BUFFER_ONLY);
-
-  if (!IsMarking()) {
-    // Initially stub is generated in STORE_BUFFER_ONLY mode thus
-    // we don't need to do anything if incremental marking is
-    // not active.
-  } else if (IsCompacting()) {
-    RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL_COMPACTION);
-  } else {
-    RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL);
-  }
-}
-
-
-static void PatchIncrementalMarkingRecordWriteStubs(
-    Heap* heap, RecordWriteStub::Mode mode) {
-  UnseededNumberDictionary* stubs = heap->code_stubs();
-
-  int capacity = stubs->Capacity();
-  for (int i = 0; i < capacity; i++) {
-    Object* k = stubs->KeyAt(i);
-    if (stubs->IsKey(k)) {
-      uint32_t key = NumberToUint32(k);
-
-      if (CodeStub::MajorKeyFromKey(key) ==
-          CodeStub::RecordWrite) {
-        Object* e = stubs->ValueAt(i);
-        if (e->IsCode()) {
-          RecordWriteStub::Patch(Code::cast(e), mode);
-        }
-      }
-    }
-  }
-}
-
-
-void IncrementalMarking::EnsureMarkingDequeIsCommitted() {
-  if (marking_deque_memory_ == NULL) {
-    marking_deque_memory_ = new VirtualMemory(4 * MB);
-  }
-  if (!marking_deque_memory_committed_) {
-    bool success = marking_deque_memory_->Commit(
-        reinterpret_cast<Address>(marking_deque_memory_->address()),
-        marking_deque_memory_->size(),
-        false);  // Not executable.
-    CHECK(success);
-    marking_deque_memory_committed_ = true;
-  }
-}
-
-
-void IncrementalMarking::UncommitMarkingDeque() {
-  if (state_ == STOPPED && marking_deque_memory_committed_) {
-    bool success = marking_deque_memory_->Uncommit(
-        reinterpret_cast<Address>(marking_deque_memory_->address()),
-        marking_deque_memory_->size());
-    CHECK(success);
-    marking_deque_memory_committed_ = false;
-  }
-}
-
-
-void IncrementalMarking::Start(CompactionFlag flag) {
-  if (FLAG_trace_incremental_marking) {
-    PrintF("[IncrementalMarking] Start\n");
-  }
-  ASSERT(FLAG_incremental_marking);
-  ASSERT(FLAG_incremental_marking_steps);
-  ASSERT(state_ == STOPPED);
-  ASSERT(heap_->gc_state() == Heap::NOT_IN_GC);
-  ASSERT(!heap_->isolate()->serializer_enabled());
-  ASSERT(heap_->isolate()->IsInitialized());
-
-  ResetStepCounters();
-
-  if (!heap_->mark_compact_collector()->IsConcurrentSweepingInProgress()) {
-    StartMarking(flag);
-  } else {
-    if (FLAG_trace_incremental_marking) {
-      PrintF("[IncrementalMarking] Start sweeping.\n");
-    }
-    state_ = SWEEPING;
-  }
-
-  heap_->new_space()->LowerInlineAllocationLimit(kAllocatedThreshold);
-}
-
-
-void IncrementalMarking::StartMarking(CompactionFlag flag) {
-  if (FLAG_trace_incremental_marking) {
-    PrintF("[IncrementalMarking] Start marking\n");
-  }
-
-  is_compacting_ = !FLAG_never_compact && (flag == ALLOW_COMPACTION) &&
-      heap_->mark_compact_collector()->StartCompaction(
-          MarkCompactCollector::INCREMENTAL_COMPACTION);
-
-  state_ = MARKING;
-
-  RecordWriteStub::Mode mode = is_compacting_ ?
-      RecordWriteStub::INCREMENTAL_COMPACTION : RecordWriteStub::INCREMENTAL;
-
-  PatchIncrementalMarkingRecordWriteStubs(heap_, mode);
-
-  EnsureMarkingDequeIsCommitted();
-
-  // Initialize marking stack.
-  Address addr = static_cast<Address>(marking_deque_memory_->address());
-  size_t size = marking_deque_memory_->size();
-  if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize;
-  marking_deque_.Initialize(addr, addr + size);
-
-  ActivateIncrementalWriteBarrier();
-
-  // Marking bits are cleared by the sweeper.
-#ifdef VERIFY_HEAP
-  if (FLAG_verify_heap) {
-    heap_->mark_compact_collector()->VerifyMarkbitsAreClean();
-  }
-#endif
-
-  heap_->CompletelyClearInstanceofCache();
-  heap_->isolate()->compilation_cache()->MarkCompactPrologue();
-
-  if (FLAG_cleanup_code_caches_at_gc) {
-    // We will mark cache black with a separate pass
-    // when we finish marking.
-    MarkObjectGreyDoNotEnqueue(heap_->polymorphic_code_cache());
-  }
-
-  // Mark strong roots grey.
-  IncrementalMarkingRootMarkingVisitor visitor(this);
-  heap_->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
-
-  heap_->mark_compact_collector()->MarkWeakObjectToCodeTable();
-
-  // Ready to start incremental marking.
-  if (FLAG_trace_incremental_marking) {
-    PrintF("[IncrementalMarking] Running\n");
-  }
-}
-
-
-void IncrementalMarking::PrepareForScavenge() {
-  if (!IsMarking()) return;
-  NewSpacePageIterator it(heap_->new_space()->FromSpaceStart(),
-                          heap_->new_space()->FromSpaceEnd());
-  while (it.has_next()) {
-    Bitmap::Clear(it.next());
-  }
-}
-
-
-void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
-  if (!IsMarking()) return;
-
-  int current = marking_deque_.bottom();
-  int mask = marking_deque_.mask();
-  int limit = marking_deque_.top();
-  HeapObject** array = marking_deque_.array();
-  int new_top = current;
-
-  Map* filler_map = heap_->one_pointer_filler_map();
-
-  while (current != limit) {
-    HeapObject* obj = array[current];
-    ASSERT(obj->IsHeapObject());
-    current = ((current + 1) & mask);
-    if (heap_->InNewSpace(obj)) {
-      MapWord map_word = obj->map_word();
-      if (map_word.IsForwardingAddress()) {
-        HeapObject* dest = map_word.ToForwardingAddress();
-        array[new_top] = dest;
-        new_top = ((new_top + 1) & mask);
-        ASSERT(new_top != marking_deque_.bottom());
-#ifdef DEBUG
-        MarkBit mark_bit = Marking::MarkBitFrom(obj);
-        ASSERT(Marking::IsGrey(mark_bit) ||
-               (obj->IsFiller() && Marking::IsWhite(mark_bit)));
-#endif
-      }
-    } else if (obj->map() != filler_map) {
-      // Skip one word filler objects that appear on the
-      // stack when we perform in place array shift.
-      array[new_top] = obj;
-      new_top = ((new_top + 1) & mask);
-      ASSERT(new_top != marking_deque_.bottom());
-#ifdef DEBUG
-        MarkBit mark_bit = Marking::MarkBitFrom(obj);
-        MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
-        ASSERT(Marking::IsGrey(mark_bit) ||
-               (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
-               (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
-                Marking::IsBlack(mark_bit)));
-#endif
-    }
-  }
-  marking_deque_.set_top(new_top);
-
-  steps_took_since_last_gc_ = 0;
-  steps_count_since_last_gc_ = 0;
-  longest_step_ = 0.0;
-}
-
-
-void IncrementalMarking::VisitObject(Map* map, HeapObject* obj, int size) {
-  MarkBit map_mark_bit = Marking::MarkBitFrom(map);
-  if (Marking::IsWhite(map_mark_bit)) {
-    WhiteToGreyAndPush(map, map_mark_bit);
-  }
-
-  IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
-
-  MarkBit mark_bit = Marking::MarkBitFrom(obj);
-#if ENABLE_SLOW_ASSERTS
-  MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
-  SLOW_ASSERT(Marking::IsGrey(mark_bit) ||
-              (obj->IsFiller() && Marking::IsWhite(mark_bit)) ||
-              (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) &&
-               Marking::IsBlack(mark_bit)));
-#endif
-  MarkBlackOrKeepBlack(obj, mark_bit, size);
-}
-
-
-void IncrementalMarking::ProcessMarkingDeque(intptr_t bytes_to_process) {
-  Map* filler_map = heap_->one_pointer_filler_map();
-  while (!marking_deque_.IsEmpty() && bytes_to_process > 0) {
-    HeapObject* obj = marking_deque_.Pop();
-
-    // Explicitly skip one word fillers. Incremental markbit patterns are
-    // correct only for objects that occupy at least two words.
-    Map* map = obj->map();
-    if (map == filler_map) continue;
-
-    int size = obj->SizeFromMap(map);
-    unscanned_bytes_of_large_object_ = 0;
-    VisitObject(map, obj, size);
-    bytes_to_process -= (size - unscanned_bytes_of_large_object_);
-  }
-}
-
-
-void IncrementalMarking::ProcessMarkingDeque() {
-  Map* filler_map = heap_->one_pointer_filler_map();
-  while (!marking_deque_.IsEmpty()) {
-    HeapObject* obj = marking_deque_.Pop();
-
-    // Explicitly skip one word fillers. Incremental markbit patterns are
-    // correct only for objects that occupy at least two words.
-    Map* map = obj->map();
-    if (map == filler_map) continue;
-
-    VisitObject(map, obj, obj->SizeFromMap(map));
-  }
-}
-
-
-void IncrementalMarking::Hurry() {
-  if (state() == MARKING) {
-    double start = 0.0;
-    if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
-      start = OS::TimeCurrentMillis();
-      if (FLAG_trace_incremental_marking) {
-        PrintF("[IncrementalMarking] Hurry\n");
-      }
-    }
-    // TODO(gc) hurry can mark objects it encounters black as mutator
-    // was stopped.
-    ProcessMarkingDeque();
-    state_ = COMPLETE;
-    if (FLAG_trace_incremental_marking || FLAG_print_cumulative_gc_stat) {
-      double end = OS::TimeCurrentMillis();
-      double delta = end - start;
-      heap_->AddMarkingTime(delta);
-      if (FLAG_trace_incremental_marking) {
-        PrintF("[IncrementalMarking] Complete (hurry), spent %d ms.\n",
-               static_cast<int>(delta));
-      }
-    }
-  }
-
-  if (FLAG_cleanup_code_caches_at_gc) {
-    PolymorphicCodeCache* poly_cache = heap_->polymorphic_code_cache();
-    Marking::GreyToBlack(Marking::MarkBitFrom(poly_cache));
-    MemoryChunk::IncrementLiveBytesFromGC(poly_cache->address(),
-                                          PolymorphicCodeCache::kSize);
-  }
-
-  Object* context = heap_->native_contexts_list();
-  while (!context->IsUndefined()) {
-    // GC can happen when the context is not fully initialized,
-    // so the cache can be undefined.
-    HeapObject* cache = HeapObject::cast(
-        Context::cast(context)->get(Context::NORMALIZED_MAP_CACHE_INDEX));
-    if (!cache->IsUndefined()) {
-      MarkBit mark_bit = Marking::MarkBitFrom(cache);
-      if (Marking::IsGrey(mark_bit)) {
-        Marking::GreyToBlack(mark_bit);
-        MemoryChunk::IncrementLiveBytesFromGC(cache->address(), cache->Size());
-      }
-    }
-    context = Context::cast(context)->get(Context::NEXT_CONTEXT_LINK);
-  }
-}
-
-
-void IncrementalMarking::Abort() {
-  if (IsStopped()) return;
-  if (FLAG_trace_incremental_marking) {
-    PrintF("[IncrementalMarking] Aborting.\n");
-  }
-  heap_->new_space()->LowerInlineAllocationLimit(0);
-  IncrementalMarking::set_should_hurry(false);
-  ResetStepCounters();
-  if (IsMarking()) {
-    PatchIncrementalMarkingRecordWriteStubs(heap_,
-                                            RecordWriteStub::STORE_BUFFER_ONLY);
-    DeactivateIncrementalWriteBarrier();
-
-    if (is_compacting_) {
-      LargeObjectIterator it(heap_->lo_space());
-      for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
-        Page* p = Page::FromAddress(obj->address());
-        if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
-          p->ClearFlag(Page::RESCAN_ON_EVACUATION);
-        }
-      }
-    }
-  }
-  heap_->isolate()->stack_guard()->ClearGC();
-  state_ = STOPPED;
-  is_compacting_ = false;
-}
-
-
-void IncrementalMarking::Finalize() {
-  Hurry();
-  state_ = STOPPED;
-  is_compacting_ = false;
-  heap_->new_space()->LowerInlineAllocationLimit(0);
-  IncrementalMarking::set_should_hurry(false);
-  ResetStepCounters();
-  PatchIncrementalMarkingRecordWriteStubs(heap_,
-                                          RecordWriteStub::STORE_BUFFER_ONLY);
-  DeactivateIncrementalWriteBarrier();
-  ASSERT(marking_deque_.IsEmpty());
-  heap_->isolate()->stack_guard()->ClearGC();
-}
-
-
-void IncrementalMarking::MarkingComplete(CompletionAction action) {
-  state_ = COMPLETE;
-  // We will set the stack guard to request a GC now.  This will mean the rest
-  // of the GC gets performed as soon as possible (we can't do a GC here in a
-  // record-write context).  If a few things get allocated between now and then
-  // that shouldn't make us do a scavenge and keep being incremental, so we set
-  // the should-hurry flag to indicate that there can't be much work left to do.
-  set_should_hurry(true);
-  if (FLAG_trace_incremental_marking) {
-    PrintF("[IncrementalMarking] Complete (normal).\n");
-  }
-  if (action == GC_VIA_STACK_GUARD) {
-    heap_->isolate()->stack_guard()->RequestGC();
-  }
-}
-
-
-void IncrementalMarking::OldSpaceStep(intptr_t allocated) {
-  if (IsStopped() && WorthActivating() && heap_->NextGCIsLikelyToBeFull()) {
-    // TODO(hpayer): Let's play safe for now, but compaction should be
-    // in principle possible.
-    Start(PREVENT_COMPACTION);
-  } else {
-    Step(allocated * kFastMarking / kInitialMarkingSpeed, GC_VIA_STACK_GUARD);
-  }
-}
-
-
-void IncrementalMarking::Step(intptr_t allocated_bytes,
-                              CompletionAction action) {
-  if (heap_->gc_state() != Heap::NOT_IN_GC ||
-      !FLAG_incremental_marking ||
-      !FLAG_incremental_marking_steps ||
-      (state_ != SWEEPING && state_ != MARKING)) {
-    return;
-  }
-
-  allocated_ += allocated_bytes;
-
-  if (allocated_ < kAllocatedThreshold &&
-      write_barriers_invoked_since_last_step_ <
-          kWriteBarriersInvokedThreshold) {
-    return;
-  }
-
-  if (state_ == MARKING && no_marking_scope_depth_ > 0) return;
-
-  // The marking speed is driven either by the allocation rate or by the rate
-  // at which we are having to check the color of objects in the write barrier.
-  // It is possible for a tight non-allocating loop to run a lot of write
-  // barriers before we get here and check them (marking can only take place on
-  // allocation), so to reduce the lumpiness we don't use the write barriers
-  // invoked since last step directly to determine the amount of work to do.
-  intptr_t bytes_to_process =
-      marking_speed_ * Max(allocated_, write_barriers_invoked_since_last_step_);
-  allocated_ = 0;
-  write_barriers_invoked_since_last_step_ = 0;
-
-  bytes_scanned_ += bytes_to_process;
-
-  double start = 0;
-
-  if (FLAG_trace_incremental_marking || FLAG_trace_gc ||
-      FLAG_print_cumulative_gc_stat) {
-    start = OS::TimeCurrentMillis();
-  }
-
-  if (state_ == SWEEPING) {
-    if (heap_->mark_compact_collector()->IsConcurrentSweepingInProgress() &&
-        heap_->mark_compact_collector()->IsSweepingCompleted()) {
-      heap_->mark_compact_collector()->WaitUntilSweepingCompleted();
-    }
-    if (!heap_->mark_compact_collector()->IsConcurrentSweepingInProgress()) {
-      bytes_scanned_ = 0;
-      StartMarking(PREVENT_COMPACTION);
-    }
-  } else if (state_ == MARKING) {
-    ProcessMarkingDeque(bytes_to_process);
-    if (marking_deque_.IsEmpty()) MarkingComplete(action);
-  }
-
-  steps_count_++;
-  steps_count_since_last_gc_++;
-
-  bool speed_up = false;
-
-  if ((steps_count_ % kMarkingSpeedAccellerationInterval) == 0) {
-    if (FLAG_trace_gc) {
-      PrintPID("Speed up marking after %d steps\n",
-               static_cast<int>(kMarkingSpeedAccellerationInterval));
-    }
-    speed_up = true;
-  }
-
-  bool space_left_is_very_small =
-      (old_generation_space_available_at_start_of_incremental_ < 10 * MB);
-
-  bool only_1_nth_of_space_that_was_available_still_left =
-      (SpaceLeftInOldSpace() * (marking_speed_ + 1) <
-          old_generation_space_available_at_start_of_incremental_);
-
-  if (space_left_is_very_small ||
-      only_1_nth_of_space_that_was_available_still_left) {
-    if (FLAG_trace_gc) PrintPID("Speed up marking because of low space left\n");
-    speed_up = true;
-  }
-
-  bool size_of_old_space_multiplied_by_n_during_marking =
-      (heap_->PromotedTotalSize() >
-       (marking_speed_ + 1) *
-           old_generation_space_used_at_start_of_incremental_);
-  if (size_of_old_space_multiplied_by_n_during_marking) {
-    speed_up = true;
-    if (FLAG_trace_gc) {
-      PrintPID("Speed up marking because of heap size increase\n");
-    }
-  }
-
-  int64_t promoted_during_marking = heap_->PromotedTotalSize()
-      - old_generation_space_used_at_start_of_incremental_;
-  intptr_t delay = marking_speed_ * MB;
-  intptr_t scavenge_slack = heap_->MaxSemiSpaceSize();
-
-  // We try to scan at at least twice the speed that we are allocating.
-  if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) {
-    if (FLAG_trace_gc) {
-      PrintPID("Speed up marking because marker was not keeping up\n");
-    }
-    speed_up = true;
-  }
-
-  if (speed_up) {
-    if (state_ != MARKING) {
-      if (FLAG_trace_gc) {
-        PrintPID("Postponing speeding up marking until marking starts\n");
-      }
-    } else {
-      marking_speed_ += kMarkingSpeedAccelleration;
-      marking_speed_ = static_cast<int>(
-          Min(kMaxMarkingSpeed,
-              static_cast<intptr_t>(marking_speed_ * 1.3)));
-      if (FLAG_trace_gc) {
-        PrintPID("Marking speed increased to %d\n", marking_speed_);
-      }
-    }
-  }
-
-  if (FLAG_trace_incremental_marking || FLAG_trace_gc ||
-      FLAG_print_cumulative_gc_stat) {
-    double end = OS::TimeCurrentMillis();
-    double delta = (end - start);
-    longest_step_ = Max(longest_step_, delta);
-    steps_took_ += delta;
-    steps_took_since_last_gc_ += delta;
-    heap_->AddMarkingTime(delta);
-  }
-}
-
-
-void IncrementalMarking::ResetStepCounters() {
-  steps_count_ = 0;
-  steps_took_ = 0;
-  longest_step_ = 0.0;
-  old_generation_space_available_at_start_of_incremental_ =
-      SpaceLeftInOldSpace();
-  old_generation_space_used_at_start_of_incremental_ =
-      heap_->PromotedTotalSize();
-  steps_count_since_last_gc_ = 0;
-  steps_took_since_last_gc_ = 0;
-  bytes_rescanned_ = 0;
-  marking_speed_ = kInitialMarkingSpeed;
-  bytes_scanned_ = 0;
-  write_barriers_invoked_since_last_step_ = 0;
-}
-
-
-int64_t IncrementalMarking::SpaceLeftInOldSpace() {
-  return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects();
-}
-
-} }  // namespace v8::internal
diff --git a/src/incremental-marking.h b/src/incremental-marking.h
deleted file mode 100644
index 31b9714..0000000
--- a/src/incremental-marking.h
+++ /dev/null
@@ -1,261 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_INCREMENTAL_MARKING_H_
-#define V8_INCREMENTAL_MARKING_H_
-
-
-#include "src/execution.h"
-#include "src/mark-compact.h"
-#include "src/objects.h"
-
-namespace v8 {
-namespace internal {
-
-
-class IncrementalMarking {
- public:
-  enum State {
-    STOPPED,
-    SWEEPING,
-    MARKING,
-    COMPLETE
-  };
-
-  enum CompletionAction {
-    GC_VIA_STACK_GUARD,
-    NO_GC_VIA_STACK_GUARD
-  };
-
-  explicit IncrementalMarking(Heap* heap);
-
-  static void Initialize();
-
-  void TearDown();
-
-  State state() {
-    ASSERT(state_ == STOPPED || FLAG_incremental_marking);
-    return state_;
-  }
-
-  bool should_hurry() { return should_hurry_; }
-  void set_should_hurry(bool val) { should_hurry_ = val; }
-
-  inline bool IsStopped() { return state() == STOPPED; }
-
-  INLINE(bool IsMarking()) { return state() >= MARKING; }
-
-  inline bool IsMarkingIncomplete() { return state() == MARKING; }
-
-  inline bool IsComplete() { return state() == COMPLETE; }
-
-  bool WorthActivating();
-
-  enum CompactionFlag { ALLOW_COMPACTION, PREVENT_COMPACTION };
-
-  void Start(CompactionFlag flag = ALLOW_COMPACTION);
-
-  void Stop();
-
-  void PrepareForScavenge();
-
-  void UpdateMarkingDequeAfterScavenge();
-
-  void Hurry();
-
-  void Finalize();
-
-  void Abort();
-
-  void MarkingComplete(CompletionAction action);
-
-  // It's hard to know how much work the incremental marker should do to make
-  // progress in the face of the mutator creating new work for it.  We start
-  // of at a moderate rate of work and gradually increase the speed of the
-  // incremental marker until it completes.
-  // Do some marking every time this much memory has been allocated or that many
-  // heavy (color-checking) write barriers have been invoked.
-  static const intptr_t kAllocatedThreshold = 65536;
-  static const intptr_t kWriteBarriersInvokedThreshold = 32768;
-  // Start off by marking this many times more memory than has been allocated.
-  static const intptr_t kInitialMarkingSpeed = 1;
-  // But if we are promoting a lot of data we need to mark faster to keep up
-  // with the data that is entering the old space through promotion.
-  static const intptr_t kFastMarking = 3;
-  // After this many steps we increase the marking/allocating factor.
-  static const intptr_t kMarkingSpeedAccellerationInterval = 1024;
-  // This is how much we increase the marking/allocating factor by.
-  static const intptr_t kMarkingSpeedAccelleration = 2;
-  static const intptr_t kMaxMarkingSpeed = 1000;
-
-  void OldSpaceStep(intptr_t allocated);
-
-  void Step(intptr_t allocated, CompletionAction action);
-
-  inline void RestartIfNotMarking() {
-    if (state_ == COMPLETE) {
-      state_ = MARKING;
-      if (FLAG_trace_incremental_marking) {
-        PrintF("[IncrementalMarking] Restarting (new grey objects)\n");
-      }
-    }
-  }
-
-  static void RecordWriteFromCode(HeapObject* obj,
-                                  Object** slot,
-                                  Isolate* isolate);
-
-  // Record a slot for compaction.  Returns false for objects that are
-  // guaranteed to be rescanned or not guaranteed to survive.
-  //
-  // No slots in white objects should be recorded, as some slots are typed and
-  // cannot be interpreted correctly if the underlying object does not survive
-  // the incremental cycle (stays white).
-  INLINE(bool BaseRecordWrite(HeapObject* obj, Object** slot, Object* value));
-  INLINE(void RecordWrite(HeapObject* obj, Object** slot, Object* value));
-  INLINE(void RecordWriteIntoCode(HeapObject* obj,
-                                  RelocInfo* rinfo,
-                                  Object* value));
-  INLINE(void RecordWriteOfCodeEntry(JSFunction* host,
-                                     Object** slot,
-                                     Code* value));
-
-
-  void RecordWriteSlow(HeapObject* obj, Object** slot, Object* value);
-  void RecordWriteIntoCodeSlow(HeapObject* obj,
-                               RelocInfo* rinfo,
-                               Object* value);
-  void RecordWriteOfCodeEntrySlow(JSFunction* host, Object** slot, Code* value);
-  void RecordCodeTargetPatch(Code* host, Address pc, HeapObject* value);
-  void RecordCodeTargetPatch(Address pc, HeapObject* value);
-
-  inline void RecordWrites(HeapObject* obj);
-
-  inline void BlackToGreyAndUnshift(HeapObject* obj, MarkBit mark_bit);
-
-  inline void WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit);
-
-  inline int steps_count() {
-    return steps_count_;
-  }
-
-  inline double steps_took() {
-    return steps_took_;
-  }
-
-  inline double longest_step() {
-    return longest_step_;
-  }
-
-  inline int steps_count_since_last_gc() {
-    return steps_count_since_last_gc_;
-  }
-
-  inline double steps_took_since_last_gc() {
-    return steps_took_since_last_gc_;
-  }
-
-  inline void SetOldSpacePageFlags(MemoryChunk* chunk) {
-    SetOldSpacePageFlags(chunk, IsMarking(), IsCompacting());
-  }
-
-  inline void SetNewSpacePageFlags(NewSpacePage* chunk) {
-    SetNewSpacePageFlags(chunk, IsMarking());
-  }
-
-  MarkingDeque* marking_deque() { return &marking_deque_; }
-
-  bool IsCompacting() { return IsMarking() && is_compacting_; }
-
-  void ActivateGeneratedStub(Code* stub);
-
-  void NotifyOfHighPromotionRate() {
-    if (IsMarking()) {
-      if (marking_speed_ < kFastMarking) {
-        if (FLAG_trace_gc) {
-          PrintPID("Increasing marking speed to %d "
-                   "due to high promotion rate\n",
-                   static_cast<int>(kFastMarking));
-        }
-        marking_speed_ = kFastMarking;
-      }
-    }
-  }
-
-  void EnterNoMarkingScope() {
-    no_marking_scope_depth_++;
-  }
-
-  void LeaveNoMarkingScope() {
-    no_marking_scope_depth_--;
-  }
-
-  void UncommitMarkingDeque();
-
-  void NotifyIncompleteScanOfObject(int unscanned_bytes) {
-    unscanned_bytes_of_large_object_ = unscanned_bytes;
-  }
-
- private:
-  int64_t SpaceLeftInOldSpace();
-
-  void ResetStepCounters();
-
-  void StartMarking(CompactionFlag flag);
-
-  void ActivateIncrementalWriteBarrier(PagedSpace* space);
-  static void ActivateIncrementalWriteBarrier(NewSpace* space);
-  void ActivateIncrementalWriteBarrier();
-
-  static void DeactivateIncrementalWriteBarrierForSpace(PagedSpace* space);
-  static void DeactivateIncrementalWriteBarrierForSpace(NewSpace* space);
-  void DeactivateIncrementalWriteBarrier();
-
-  static void SetOldSpacePageFlags(MemoryChunk* chunk,
-                                   bool is_marking,
-                                   bool is_compacting);
-
-  static void SetNewSpacePageFlags(NewSpacePage* chunk, bool is_marking);
-
-  void EnsureMarkingDequeIsCommitted();
-
-  INLINE(void ProcessMarkingDeque());
-
-  INLINE(void ProcessMarkingDeque(intptr_t bytes_to_process));
-
-  INLINE(void VisitObject(Map* map, HeapObject* obj, int size));
-
-  Heap* heap_;
-
-  State state_;
-  bool is_compacting_;
-
-  VirtualMemory* marking_deque_memory_;
-  bool marking_deque_memory_committed_;
-  MarkingDeque marking_deque_;
-
-  int steps_count_;
-  double steps_took_;
-  double longest_step_;
-  int64_t old_generation_space_available_at_start_of_incremental_;
-  int64_t old_generation_space_used_at_start_of_incremental_;
-  int steps_count_since_last_gc_;
-  double steps_took_since_last_gc_;
-  int64_t bytes_rescanned_;
-  bool should_hurry_;
-  int marking_speed_;
-  intptr_t bytes_scanned_;
-  intptr_t allocated_;
-  intptr_t write_barriers_invoked_since_last_step_;
-
-  int no_marking_scope_depth_;
-
-  int unscanned_bytes_of_large_object_;
-
-  DISALLOW_IMPLICIT_CONSTRUCTORS(IncrementalMarking);
-};
-
-} }  // namespace v8::internal
-
-#endif  // V8_INCREMENTAL_MARKING_H_
diff --git a/src/interface-descriptors.cc b/src/interface-descriptors.cc
new file mode 100644
index 0000000..62d7105
--- /dev/null
+++ b/src/interface-descriptors.cc
@@ -0,0 +1,143 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/interface-descriptors.h"
+
+namespace v8 {
+namespace internal {
+
+void CallInterfaceDescriptorData::Initialize(
+    int register_parameter_count, Register* registers,
+    Representation* register_param_representations,
+    PlatformInterfaceDescriptor* platform_descriptor) {
+  platform_specific_descriptor_ = platform_descriptor;
+  register_param_count_ = register_parameter_count;
+
+  // An interface descriptor must have a context register.
+  DCHECK(register_parameter_count > 0 &&
+         registers[0].is(CallInterfaceDescriptor::ContextRegister()));
+
+  // InterfaceDescriptor owns a copy of the registers array.
+  register_params_.Reset(NewArray<Register>(register_parameter_count));
+  for (int i = 0; i < register_parameter_count; i++) {
+    register_params_[i] = registers[i];
+  }
+
+  // If a representations array is specified, then the descriptor owns that as
+  // well.
+  if (register_param_representations != NULL) {
+    register_param_representations_.Reset(
+        NewArray<Representation>(register_parameter_count));
+    for (int i = 0; i < register_parameter_count; i++) {
+      // If there is a context register, the representation must be tagged.
+      DCHECK(
+          i != 0 ||
+          register_param_representations[i].Equals(Representation::Tagged()));
+      register_param_representations_[i] = register_param_representations[i];
+    }
+  }
+}
+
+
+const char* CallInterfaceDescriptor::DebugName(Isolate* isolate) {
+  CallInterfaceDescriptorData* start = isolate->call_descriptor_data(0);
+  size_t index = data_ - start;
+  DCHECK(index < CallDescriptors::NUMBER_OF_DESCRIPTORS);
+  CallDescriptors::Key key = static_cast<CallDescriptors::Key>(index);
+  switch (key) {
+#define DEF_CASE(NAME)        \
+  case CallDescriptors::NAME: \
+    return #NAME " Descriptor";
+    INTERFACE_DESCRIPTOR_LIST(DEF_CASE)
+#undef DEF_CASE
+    case CallDescriptors::NUMBER_OF_DESCRIPTORS:
+      break;
+  }
+  return "";
+}
+
+
+void LoadDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {ContextRegister(), ReceiverRegister(),
+                          NameRegister()};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void StoreDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {ContextRegister(), ReceiverRegister(), NameRegister(),
+                          ValueRegister()};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ElementTransitionAndStoreDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {ContextRegister(), ValueRegister(), MapRegister(),
+                          NameRegister(), ReceiverRegister()};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void InstanceofDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {ContextRegister(), left(), right()};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void MathPowTaggedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {ContextRegister(), exponent()};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void MathPowIntegerDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {ContextRegister(), exponent()};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void VectorLoadICTrampolineDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {ContextRegister(), ReceiverRegister(), NameRegister(),
+                          SlotRegister()};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void VectorLoadICDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {ContextRegister(), ReceiverRegister(), NameRegister(),
+                          SlotRegister(), VectorRegister()};
+  Representation representations[] = {
+      Representation::Tagged(), Representation::Tagged(),
+      Representation::Tagged(), Representation::Smi(),
+      Representation::Tagged()};
+  data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void ApiGetterDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {ContextRegister(), function_address()};
+  Representation representations[] = {Representation::Tagged(),
+                                      Representation::External()};
+  data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void ArgumentsAccessReadDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {ContextRegister(), index(), parameter_count()};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ContextOnlyDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {ContextRegister()};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+}
+}  // namespace v8::internal
diff --git a/src/interface-descriptors.h b/src/interface-descriptors.h
new file mode 100644
index 0000000..b773c91
--- /dev/null
+++ b/src/interface-descriptors.h
@@ -0,0 +1,486 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CALL_INTERFACE_DESCRIPTOR_H_
+#define V8_CALL_INTERFACE_DESCRIPTOR_H_
+
+#include "src/assembler.h"
+#include "src/macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+class PlatformInterfaceDescriptor;
+
+#define INTERFACE_DESCRIPTOR_LIST(V)          \
+  V(Load)                                     \
+  V(Store)                                    \
+  V(ElementTransitionAndStore)                \
+  V(Instanceof)                               \
+  V(VectorLoadICTrampoline)                   \
+  V(VectorLoadIC)                             \
+  V(FastNewClosure)                           \
+  V(FastNewContext)                           \
+  V(ToNumber)                                 \
+  V(NumberToString)                           \
+  V(FastCloneShallowArray)                    \
+  V(FastCloneShallowObject)                   \
+  V(CreateAllocationSite)                     \
+  V(CallFunction)                             \
+  V(CallFunctionWithFeedback)                 \
+  V(CallConstruct)                            \
+  V(RegExpConstructResult)                    \
+  V(TransitionElementsKind)                   \
+  V(ArrayConstructorConstantArgCount)         \
+  V(ArrayConstructor)                         \
+  V(InternalArrayConstructorConstantArgCount) \
+  V(InternalArrayConstructor)                 \
+  V(CompareNil)                               \
+  V(ToBoolean)                                \
+  V(BinaryOp)                                 \
+  V(BinaryOpWithAllocationSite)               \
+  V(StringAdd)                                \
+  V(Keyed)                                    \
+  V(Named)                                    \
+  V(CallHandler)                              \
+  V(ArgumentAdaptor)                          \
+  V(ApiGetter)                                \
+  V(ApiFunction)                              \
+  V(ArgumentsAccessRead)                      \
+  V(StoreArrayLiteralElement)                 \
+  V(MathPowTagged)                            \
+  V(MathPowInteger)                           \
+  V(ContextOnly)
+
+
+class CallInterfaceDescriptorData {
+ public:
+  CallInterfaceDescriptorData() : register_param_count_(-1) {}
+
+  // A copy of the passed in registers and param_representations is made
+  // and owned by the CallInterfaceDescriptorData.
+
+  // TODO(mvstanton): Instead of taking parallel arrays register and
+  // param_representations, how about a struct that puts the representation
+  // and register side by side (eg, RegRep(r1, Representation::Tagged()).
+  // The same should go for the CodeStubDescriptor class.
+  void Initialize(int register_parameter_count, Register* registers,
+                  Representation* param_representations,
+                  PlatformInterfaceDescriptor* platform_descriptor = NULL);
+
+  bool IsInitialized() const { return register_param_count_ >= 0; }
+
+  int register_param_count() const { return register_param_count_; }
+  Register register_param(int index) const { return register_params_[index]; }
+  Register* register_params() const { return register_params_.get(); }
+  Representation register_param_representation(int index) const {
+    return register_param_representations_[index];
+  }
+  Representation* register_param_representations() const {
+    return register_param_representations_.get();
+  }
+  PlatformInterfaceDescriptor* platform_specific_descriptor() const {
+    return platform_specific_descriptor_;
+  }
+
+ private:
+  int register_param_count_;
+
+  // The Register params are allocated dynamically by the
+  // InterfaceDescriptor, and freed on destruction. This is because static
+  // arrays of Registers cause creation of runtime static initializers
+  // which we don't want.
+  SmartArrayPointer<Register> register_params_;
+  // Specifies Representations for the stub's parameter. Points to an array of
+  // Representations of the same length of the numbers of parameters to the
+  // stub, or if NULL (the default value), Representation of each parameter
+  // assumed to be Tagged().
+  SmartArrayPointer<Representation> register_param_representations_;
+
+  PlatformInterfaceDescriptor* platform_specific_descriptor_;
+
+  DISALLOW_COPY_AND_ASSIGN(CallInterfaceDescriptorData);
+};
+
+
+class CallDescriptors {
+ public:
+  enum Key {
+#define DEF_ENUM(name) name,
+    INTERFACE_DESCRIPTOR_LIST(DEF_ENUM)
+#undef DEF_ENUM
+    NUMBER_OF_DESCRIPTORS
+  };
+};
+
+
+class CallInterfaceDescriptor {
+ public:
+  CallInterfaceDescriptor() : data_(NULL) {}
+
+  CallInterfaceDescriptor(Isolate* isolate, CallDescriptors::Key key)
+      : data_(isolate->call_descriptor_data(key)) {}
+
+  int GetEnvironmentLength() const { return data()->register_param_count(); }
+
+  int GetRegisterParameterCount() const {
+    return data()->register_param_count();
+  }
+
+  Register GetParameterRegister(int index) const {
+    return data()->register_param(index);
+  }
+
+  Representation GetParameterRepresentation(int index) const {
+    DCHECK(index < data()->register_param_count());
+    if (data()->register_param_representations() == NULL) {
+      return Representation::Tagged();
+    }
+
+    return data()->register_param_representation(index);
+  }
+
+  // "Environment" versions of parameter functions. The first register
+  // parameter (context) is not included.
+  int GetEnvironmentParameterCount() const {
+    return GetEnvironmentLength() - 1;
+  }
+
+  Register GetEnvironmentParameterRegister(int index) const {
+    return GetParameterRegister(index + 1);
+  }
+
+  Representation GetEnvironmentParameterRepresentation(int index) const {
+    return GetParameterRepresentation(index + 1);
+  }
+
+  // Some platforms have extra information to associate with the descriptor.
+  PlatformInterfaceDescriptor* platform_specific_descriptor() const {
+    return data()->platform_specific_descriptor();
+  }
+
+  static const Register ContextRegister();
+
+  const char* DebugName(Isolate* isolate);
+
+ protected:
+  const CallInterfaceDescriptorData* data() const { return data_; }
+
+ private:
+  const CallInterfaceDescriptorData* data_;
+};
+
+
+#define DECLARE_DESCRIPTOR(name, base)                                     \
+  explicit name(Isolate* isolate) : base(isolate, key()) {                 \
+    if (!data()->IsInitialized())                                          \
+      Initialize(isolate->call_descriptor_data(key()));                    \
+  }                                                                        \
+                                                                           \
+ protected:                                                                \
+  void Initialize(CallInterfaceDescriptorData* data);                      \
+  name(Isolate* isolate, CallDescriptors::Key key) : base(isolate, key) {} \
+                                                                           \
+ public:                                                                   \
+  static inline CallDescriptors::Key key();
+
+
+// LoadDescriptor is used by all stubs that implement Load/KeyedLoad ICs.
+class LoadDescriptor : public CallInterfaceDescriptor {
+ public:
+  DECLARE_DESCRIPTOR(LoadDescriptor, CallInterfaceDescriptor)
+
+  enum ParameterIndices { kReceiverIndex, kNameIndex };
+  static const Register ReceiverRegister();
+  static const Register NameRegister();
+};
+
+
+class StoreDescriptor : public CallInterfaceDescriptor {
+ public:
+  DECLARE_DESCRIPTOR(StoreDescriptor, CallInterfaceDescriptor)
+
+  enum ParameterIndices {
+    kReceiverIndex,
+    kNameIndex,
+    kValueIndex,
+    kParameterCount
+  };
+  static const Register ReceiverRegister();
+  static const Register NameRegister();
+  static const Register ValueRegister();
+};
+
+
+class ElementTransitionAndStoreDescriptor : public StoreDescriptor {
+ public:
+  DECLARE_DESCRIPTOR(ElementTransitionAndStoreDescriptor, StoreDescriptor)
+
+  static const Register MapRegister();
+};
+
+
+class InstanceofDescriptor : public CallInterfaceDescriptor {
+ public:
+  DECLARE_DESCRIPTOR(InstanceofDescriptor, CallInterfaceDescriptor)
+
+  enum ParameterIndices { kLeftIndex, kRightIndex, kParameterCount };
+  static const Register left();
+  static const Register right();
+};
+
+
+class VectorLoadICTrampolineDescriptor : public LoadDescriptor {
+ public:
+  DECLARE_DESCRIPTOR(VectorLoadICTrampolineDescriptor, LoadDescriptor)
+
+  enum ParameterIndices { kReceiverIndex, kNameIndex, kSlotIndex };
+
+  static const Register SlotRegister();
+};
+
+
+class VectorLoadICDescriptor : public VectorLoadICTrampolineDescriptor {
+ public:
+  DECLARE_DESCRIPTOR(VectorLoadICDescriptor, VectorLoadICTrampolineDescriptor)
+
+  enum ParameterIndices {
+    kReceiverIndex,
+    kNameIndex,
+    kSlotIndex,
+    kVectorIndex
+  };
+
+  static const Register VectorRegister();
+};
+
+
+class FastNewClosureDescriptor : public CallInterfaceDescriptor {
+ public:
+  DECLARE_DESCRIPTOR(FastNewClosureDescriptor, CallInterfaceDescriptor)
+};
+
+
+class FastNewContextDescriptor : public CallInterfaceDescriptor {
+ public:
+  DECLARE_DESCRIPTOR(FastNewContextDescriptor, CallInterfaceDescriptor)
+};
+
+
+class ToNumberDescriptor : public CallInterfaceDescriptor {
+ public:
+  DECLARE_DESCRIPTOR(ToNumberDescriptor, CallInterfaceDescriptor)
+};
+
+
+class NumberToStringDescriptor : public CallInterfaceDescriptor {
+ public:
+  DECLARE_DESCRIPTOR(NumberToStringDescriptor, CallInterfaceDescriptor)
+};
+
+
+class FastCloneShallowArrayDescriptor : public CallInterfaceDescriptor {
+ public:
+  DECLARE_DESCRIPTOR(FastCloneShallowArrayDescriptor, CallInterfaceDescriptor)
+};
+
+
+class FastCloneShallowObjectDescriptor : public CallInterfaceDescriptor {
+ public:
+  DECLARE_DESCRIPTOR(FastCloneShallowObjectDescriptor, CallInterfaceDescriptor)
+};
+
+
+class CreateAllocationSiteDescriptor : public CallInterfaceDescriptor {
+ public:
+  DECLARE_DESCRIPTOR(CreateAllocationSiteDescriptor, CallInterfaceDescriptor)
+};
+
+
+class CallFunctionDescriptor : public CallInterfaceDescriptor {
+ public:
+  DECLARE_DESCRIPTOR(CallFunctionDescriptor, CallInterfaceDescriptor)
+};
+
+
+class CallFunctionWithFeedbackDescriptor : public CallInterfaceDescriptor {
+ public:
+  DECLARE_DESCRIPTOR(CallFunctionWithFeedbackDescriptor,
+                     CallInterfaceDescriptor)
+};
+
+
+class CallConstructDescriptor : public CallInterfaceDescriptor {
+ public:
+  DECLARE_DESCRIPTOR(CallConstructDescriptor, CallInterfaceDescriptor)
+};
+
+
+class RegExpConstructResultDescriptor : public CallInterfaceDescriptor {
+ public:
+  DECLARE_DESCRIPTOR(RegExpConstructResultDescriptor, CallInterfaceDescriptor)
+};
+
+
+class TransitionElementsKindDescriptor : public CallInterfaceDescriptor {
+ public:
+  DECLARE_DESCRIPTOR(TransitionElementsKindDescriptor, CallInterfaceDescriptor)
+};
+
+
+class ArrayConstructorConstantArgCountDescriptor
+    : public CallInterfaceDescriptor {
+ public:
+  DECLARE_DESCRIPTOR(ArrayConstructorConstantArgCountDescriptor,
+                     CallInterfaceDescriptor)
+};
+
+
+class ArrayConstructorDescriptor : public CallInterfaceDescriptor {
+ public:
+  DECLARE_DESCRIPTOR(ArrayConstructorDescriptor, CallInterfaceDescriptor)
+};
+
+
+class InternalArrayConstructorConstantArgCountDescriptor
+    : public CallInterfaceDescriptor {
+ public:
+  DECLARE_DESCRIPTOR(InternalArrayConstructorConstantArgCountDescriptor,
+                     CallInterfaceDescriptor)
+};
+
+
+class InternalArrayConstructorDescriptor : public CallInterfaceDescriptor {
+ public:
+  DECLARE_DESCRIPTOR(InternalArrayConstructorDescriptor,
+                     CallInterfaceDescriptor)
+};
+
+
+class CompareNilDescriptor : public CallInterfaceDescriptor {
+ public:
+  DECLARE_DESCRIPTOR(CompareNilDescriptor, CallInterfaceDescriptor)
+};
+
+
+class ToBooleanDescriptor : public CallInterfaceDescriptor {
+ public:
+  DECLARE_DESCRIPTOR(ToBooleanDescriptor, CallInterfaceDescriptor)
+};
+
+
+class BinaryOpDescriptor : public CallInterfaceDescriptor {
+ public:
+  DECLARE_DESCRIPTOR(BinaryOpDescriptor, CallInterfaceDescriptor)
+};
+
+
+class BinaryOpWithAllocationSiteDescriptor : public CallInterfaceDescriptor {
+ public:
+  DECLARE_DESCRIPTOR(BinaryOpWithAllocationSiteDescriptor,
+                     CallInterfaceDescriptor)
+};
+
+
+class StringAddDescriptor : public CallInterfaceDescriptor {
+ public:
+  DECLARE_DESCRIPTOR(StringAddDescriptor, CallInterfaceDescriptor)
+};
+
+
+class KeyedDescriptor : public CallInterfaceDescriptor {
+ public:
+  DECLARE_DESCRIPTOR(KeyedDescriptor, CallInterfaceDescriptor)
+};
+
+
+class NamedDescriptor : public CallInterfaceDescriptor {
+ public:
+  DECLARE_DESCRIPTOR(NamedDescriptor, CallInterfaceDescriptor)
+};
+
+
+class CallHandlerDescriptor : public CallInterfaceDescriptor {
+ public:
+  DECLARE_DESCRIPTOR(CallHandlerDescriptor, CallInterfaceDescriptor)
+};
+
+
+class ArgumentAdaptorDescriptor : public CallInterfaceDescriptor {
+ public:
+  DECLARE_DESCRIPTOR(ArgumentAdaptorDescriptor, CallInterfaceDescriptor)
+};
+
+
+class ApiFunctionDescriptor : public CallInterfaceDescriptor {
+ public:
+  DECLARE_DESCRIPTOR(ApiFunctionDescriptor, CallInterfaceDescriptor)
+};
+
+
+class ApiGetterDescriptor : public CallInterfaceDescriptor {
+ public:
+  DECLARE_DESCRIPTOR(ApiGetterDescriptor, CallInterfaceDescriptor)
+
+  static const Register function_address();
+};
+
+
+class ArgumentsAccessReadDescriptor : public CallInterfaceDescriptor {
+ public:
+  DECLARE_DESCRIPTOR(ArgumentsAccessReadDescriptor, CallInterfaceDescriptor)
+
+  static const Register index();
+  static const Register parameter_count();
+};
+
+
+class StoreArrayLiteralElementDescriptor : public CallInterfaceDescriptor {
+ public:
+  DECLARE_DESCRIPTOR(StoreArrayLiteralElementDescriptor,
+                     CallInterfaceDescriptor)
+};
+
+
+class MathPowTaggedDescriptor : public CallInterfaceDescriptor {
+ public:
+  DECLARE_DESCRIPTOR(MathPowTaggedDescriptor, CallInterfaceDescriptor)
+
+  static const Register exponent();
+};
+
+
+class MathPowIntegerDescriptor : public CallInterfaceDescriptor {
+ public:
+  DECLARE_DESCRIPTOR(MathPowIntegerDescriptor, CallInterfaceDescriptor)
+
+  static const Register exponent();
+};
+
+
+class ContextOnlyDescriptor : public CallInterfaceDescriptor {
+ public:
+  DECLARE_DESCRIPTOR(ContextOnlyDescriptor, CallInterfaceDescriptor)
+};
+
+#undef DECLARE_DESCRIPTOR
+
+
+// We define the association between CallDescriptors::Key and the specialized
+// descriptor here to reduce boilerplate and mistakes.
+#define DEF_KEY(name) \
+  CallDescriptors::Key name##Descriptor::key() { return CallDescriptors::name; }
+INTERFACE_DESCRIPTOR_LIST(DEF_KEY)
+#undef DEF_KEY
+}
+}  // namespace v8::internal
+
+
+#if V8_TARGET_ARCH_ARM64
+#include "src/arm64/interface-descriptors-arm64.h"
+#elif V8_TARGET_ARCH_ARM
+#include "src/arm/interface-descriptors-arm.h"
+#endif
+
+#endif  // V8_CALL_INTERFACE_DESCRIPTOR_H_
diff --git a/src/interface.cc b/src/interface.cc
index d6e8406..62169f5 100644
--- a/src/interface.cc
+++ b/src/interface.cc
@@ -9,25 +9,16 @@
 namespace v8 {
 namespace internal {
 
-static bool Match(void* key1, void* key2) {
-  String* name1 = *static_cast<String**>(key1);
-  String* name2 = *static_cast<String**>(key2);
-  ASSERT(name1->IsInternalizedString());
-  ASSERT(name2->IsInternalizedString());
-  return name1 == name2;
-}
-
-
 Interface* Interface::Lookup(Handle<String> name, Zone* zone) {
-  ASSERT(IsModule());
+  DCHECK(IsModule());
   ZoneHashMap* map = Chase()->exports_;
   if (map == NULL) return NULL;
   ZoneAllocationPolicy allocator(zone);
   ZoneHashMap::Entry* p = map->Lookup(name.location(), name->Hash(), false,
                                       allocator);
   if (p == NULL) return NULL;
-  ASSERT(*static_cast<String**>(p->key) == *name);
-  ASSERT(p->value != NULL);
+  DCHECK(*static_cast<String**>(p->key) == *name);
+  DCHECK(p->value != NULL);
   return static_cast<Interface*>(p->value);
 }
 
@@ -47,8 +38,8 @@
 #endif
 
 
-void Interface::DoAdd(
-    void* name, uint32_t hash, Interface* interface, Zone* zone, bool* ok) {
+void Interface::DoAdd(const void* name, uint32_t hash, Interface* interface,
+                      Zone* zone, bool* ok) {
   MakeModule(ok);
   if (!*ok) return;
 
@@ -57,8 +48,9 @@
     PrintF("%*s# Adding...\n", Nesting::current(), "");
     PrintF("%*sthis = ", Nesting::current(), "");
     this->Print(Nesting::current());
-    PrintF("%*s%s : ", Nesting::current(), "",
-           (*static_cast<String**>(name))->ToAsciiArray());
+    const AstRawString* symbol = static_cast<const AstRawString*>(name);
+    PrintF("%*s%.*s : ", Nesting::current(), "", symbol->length(),
+           symbol->raw_data());
     interface->Print(Nesting::current());
   }
 #endif
@@ -68,10 +60,12 @@
 
   if (*map == NULL) {
     *map = new(zone->New(sizeof(ZoneHashMap)))
-        ZoneHashMap(Match, ZoneHashMap::kDefaultHashMapCapacity, allocator);
+        ZoneHashMap(ZoneHashMap::PointersMatch,
+                    ZoneHashMap::kDefaultHashMapCapacity, allocator);
   }
 
-  ZoneHashMap::Entry* p = (*map)->Lookup(name, hash, !IsFrozen(), allocator);
+  ZoneHashMap::Entry* p =
+      (*map)->Lookup(const_cast<void*>(name), hash, !IsFrozen(), allocator);
   if (p == NULL) {
     // This didn't have name but was frozen already, that's an error.
     *ok = false;
@@ -97,8 +91,8 @@
 void Interface::Unify(Interface* that, Zone* zone, bool* ok) {
   if (this->forward_) return this->Chase()->Unify(that, zone, ok);
   if (that->forward_) return this->Unify(that->Chase(), zone, ok);
-  ASSERT(this->forward_ == NULL);
-  ASSERT(that->forward_ == NULL);
+  DCHECK(this->forward_ == NULL);
+  DCHECK(that->forward_ == NULL);
 
   *ok = true;
   if (this == that) return;
@@ -144,13 +138,13 @@
 
 
 void Interface::DoUnify(Interface* that, bool* ok, Zone* zone) {
-  ASSERT(this->forward_ == NULL);
-  ASSERT(that->forward_ == NULL);
-  ASSERT(!this->IsValue());
-  ASSERT(!that->IsValue());
-  ASSERT(this->index_ == -1);
-  ASSERT(that->index_ == -1);
-  ASSERT(*ok);
+  DCHECK(this->forward_ == NULL);
+  DCHECK(that->forward_ == NULL);
+  DCHECK(!this->IsValue());
+  DCHECK(!that->IsValue());
+  DCHECK(this->index_ == -1);
+  DCHECK(that->index_ == -1);
+  DCHECK(*ok);
 
 #ifdef DEBUG
     Nesting nested;
diff --git a/src/interface.h b/src/interface.h
index 086facf..598d038 100644
--- a/src/interface.h
+++ b/src/interface.h
@@ -5,6 +5,7 @@
 #ifndef V8_INTERFACE_H_
 #define V8_INTERFACE_H_
 
+#include "src/ast-value-factory.h"
 #include "src/zone-inl.h"  // For operator new.
 
 namespace v8 {
@@ -59,8 +60,9 @@
 
   // Add a name to the list of exports. If it already exists, unify with
   // interface, otherwise insert unless this is closed.
-  void Add(Handle<String> name, Interface* interface, Zone* zone, bool* ok) {
-    DoAdd(name.location(), name->Hash(), interface, zone, ok);
+  void Add(const AstRawString* name, Interface* interface, Zone* zone,
+           bool* ok) {
+    DoAdd(name, name->hash(), interface, zone, ok);
   }
 
   // Unify with another interface. If successful, both interface objects will
@@ -93,7 +95,7 @@
 
   // Assign an index.
   void Allocate(int index) {
-    ASSERT(IsModule() && IsFrozen() && Chase()->index_ == -1);
+    DCHECK(IsModule() && IsFrozen() && Chase()->index_ == -1);
     Chase()->index_ = index;
   }
 
@@ -122,14 +124,14 @@
   }
 
   int Length() {
-    ASSERT(IsModule() && IsFrozen());
+    DCHECK(IsModule() && IsFrozen());
     ZoneHashMap* exports = Chase()->exports_;
     return exports ? exports->occupancy() : 0;
   }
 
   // The context slot in the hosting global context pointing to this module.
   int Index() {
-    ASSERT(IsModule() && IsFrozen());
+    DCHECK(IsModule() && IsFrozen());
     return Chase()->index_;
   }
 
@@ -146,12 +148,12 @@
   class Iterator {
    public:
     bool done() const { return entry_ == NULL; }
-    Handle<String> name() const {
-      ASSERT(!done());
-      return Handle<String>(*static_cast<String**>(entry_->key));
+    const AstRawString* name() const {
+      DCHECK(!done());
+      return static_cast<const AstRawString*>(entry_->key);
     }
     Interface* interface() const {
-      ASSERT(!done());
+      DCHECK(!done());
       return static_cast<Interface*>(entry_->value);
     }
     void Advance() { entry_ = exports_->Next(entry_); }
@@ -207,7 +209,7 @@
     return result;
   }
 
-  void DoAdd(void* name, uint32_t hash, Interface* interface, Zone* zone,
+  void DoAdd(const void* name, uint32_t hash, Interface* interface, Zone* zone,
              bool* ok);
   void DoUnify(Interface* that, bool* ok, Zone* zone);
 };
diff --git a/src/interpreter-irregexp.cc b/src/interpreter-irregexp.cc
index c72a3d0..2aedfb4 100644
--- a/src/interpreter-irregexp.cc
+++ b/src/interpreter-irregexp.cc
@@ -6,13 +6,14 @@
 
 
 #include "src/v8.h"
-#include "src/unicode.h"
-#include "src/utils.h"
+
 #include "src/ast.h"
 #include "src/bytecodes-irregexp.h"
 #include "src/interpreter-irregexp.h"
 #include "src/jsregexp.h"
 #include "src/regexp-macro-assembler.h"
+#include "src/unicode.h"
+#include "src/utils.h"
 
 namespace v8 {
 namespace internal {
@@ -118,13 +119,13 @@
 
 
 static int32_t Load32Aligned(const byte* pc) {
-  ASSERT((reinterpret_cast<intptr_t>(pc) & 3) == 0);
+  DCHECK((reinterpret_cast<intptr_t>(pc) & 3) == 0);
   return *reinterpret_cast<const int32_t *>(pc);
 }
 
 
 static int32_t Load16Aligned(const byte* pc) {
-  ASSERT((reinterpret_cast<intptr_t>(pc) & 1) == 0);
+  DCHECK((reinterpret_cast<intptr_t>(pc) & 1) == 0);
   return *reinterpret_cast<const uint16_t *>(pc);
 }
 
@@ -135,9 +136,7 @@
 // matching terminates.
 class BacktrackStack {
  public:
-  explicit BacktrackStack() {
-    data_ = NewArray<int>(kBacktrackStackSize);
-  }
+  BacktrackStack() { data_ = NewArray<int>(kBacktrackStackSize); }
 
   ~BacktrackStack() {
     DeleteArray(data_);
@@ -307,7 +306,7 @@
         break;
       }
       BYTECODE(LOAD_4_CURRENT_CHARS) {
-        ASSERT(sizeof(Char) == 1);
+        DCHECK(sizeof(Char) == 1);
         int pos = current + (insn >> BYTECODE_SHIFT);
         if (pos + 4 > subject.length()) {
           pc = code_base + Load32Aligned(pc + 4);
@@ -324,7 +323,7 @@
         break;
       }
       BYTECODE(LOAD_4_CURRENT_CHARS_UNCHECKED) {
-        ASSERT(sizeof(Char) == 1);
+        DCHECK(sizeof(Char) == 1);
         int pos = current + (insn >> BYTECODE_SHIFT);
         Char next1 = subject[pos + 1];
         Char next2 = subject[pos + 2];
@@ -579,13 +578,13 @@
     Handle<String> subject,
     int* registers,
     int start_position) {
-  ASSERT(subject->IsFlat());
+  DCHECK(subject->IsFlat());
 
   DisallowHeapAllocation no_gc;
   const byte* code_base = code_array->GetDataStartAddress();
   uc16 previous_char = '\n';
   String::FlatContent subject_content = subject->GetFlatContent();
-  if (subject_content.IsAscii()) {
+  if (subject_content.IsOneByte()) {
     Vector<const uint8_t> subject_vector = subject_content.ToOneByteVector();
     if (start_position != 0) previous_char = subject_vector[start_position - 1];
     return RawMatch(isolate,
@@ -595,7 +594,7 @@
                     start_position,
                     previous_char);
   } else {
-    ASSERT(subject_content.IsTwoByte());
+    DCHECK(subject_content.IsTwoByte());
     Vector<const uc16> subject_vector = subject_content.ToUC16Vector();
     if (start_position != 0) previous_char = subject_vector[start_position - 1];
     return RawMatch(isolate,
diff --git a/src/isolate-inl.h b/src/isolate-inl.h
index 12a861f..b44c4d6 100644
--- a/src/isolate-inl.h
+++ b/src/isolate-inl.h
@@ -5,9 +5,9 @@
 #ifndef V8_ISOLATE_INL_H_
 #define V8_ISOLATE_INL_H_
 
+#include "src/base/utils/random-number-generator.h"
 #include "src/debug.h"
 #include "src/isolate.h"
-#include "src/utils/random-number-generator.h"
 
 namespace v8 {
 namespace internal {
@@ -30,12 +30,13 @@
 }
 
 
-RandomNumberGenerator* Isolate::random_number_generator() {
+base::RandomNumberGenerator* Isolate::random_number_generator() {
   if (random_number_generator_ == NULL) {
     if (FLAG_random_seed != 0) {
-      random_number_generator_ = new RandomNumberGenerator(FLAG_random_seed);
+      random_number_generator_ =
+          new base::RandomNumberGenerator(FLAG_random_seed);
     } else {
-      random_number_generator_ = new RandomNumberGenerator();
+      random_number_generator_ = new base::RandomNumberGenerator();
     }
   }
   return random_number_generator_;
diff --git a/src/isolate.cc b/src/isolate.cc
index 9ec3c9b..c6a8b81 100644
--- a/src/isolate.cc
+++ b/src/isolate.cc
@@ -7,29 +7,31 @@
 #include "src/v8.h"
 
 #include "src/ast.h"
+#include "src/base/platform/platform.h"
+#include "src/base/sys-info.h"
+#include "src/base/utils/random-number-generator.h"
 #include "src/bootstrapper.h"
 #include "src/codegen.h"
 #include "src/compilation-cache.h"
 #include "src/cpu-profiler.h"
 #include "src/debug.h"
 #include "src/deoptimizer.h"
+#include "src/heap/spaces.h"
+#include "src/heap/sweeper-thread.h"
 #include "src/heap-profiler.h"
 #include "src/hydrogen.h"
+#include "src/ic/stub-cache.h"
 #include "src/isolate-inl.h"
 #include "src/lithium-allocator.h"
 #include "src/log.h"
 #include "src/messages.h"
-#include "src/platform.h"
+#include "src/prototype.h"
 #include "src/regexp-stack.h"
 #include "src/runtime-profiler.h"
 #include "src/sampler.h"
 #include "src/scopeinfo.h"
 #include "src/serialize.h"
 #include "src/simulator.h"
-#include "src/spaces.h"
-#include "src/stub-cache.h"
-#include "src/sweeper-thread.h"
-#include "src/utils/random-number-generator.h"
 #include "src/version.h"
 #include "src/vm-state-inl.h"
 
@@ -46,10 +48,10 @@
 
 
 int ThreadId::GetCurrentThreadId() {
-  int thread_id = Thread::GetThreadLocalInt(Isolate::thread_id_key_);
+  int thread_id = base::Thread::GetThreadLocalInt(Isolate::thread_id_key_);
   if (thread_id == 0) {
     thread_id = AllocateThreadId();
-    Thread::SetThreadLocalInt(Isolate::thread_id_key_, thread_id);
+    base::Thread::SetThreadLocalInt(Isolate::thread_id_key_, thread_id);
   }
   return thread_id;
 }
@@ -77,6 +79,7 @@
   save_context_ = NULL;
   catcher_ = NULL;
   top_lookup_result_ = NULL;
+  promise_on_stack_ = NULL;
 
   // These members are re-initialized later after deserialization
   // is complete.
@@ -98,21 +101,19 @@
 }
 
 
-Thread::LocalStorageKey Isolate::isolate_key_;
-Thread::LocalStorageKey Isolate::thread_id_key_;
-Thread::LocalStorageKey Isolate::per_isolate_thread_data_key_;
+void ThreadLocalTop::Free() {
+  // Match unmatched PopPromise calls.
+  while (promise_on_stack_) isolate_->PopPromise();
+}
+
+
+base::Thread::LocalStorageKey Isolate::isolate_key_;
+base::Thread::LocalStorageKey Isolate::thread_id_key_;
+base::Thread::LocalStorageKey Isolate::per_isolate_thread_data_key_;
 #ifdef DEBUG
-Thread::LocalStorageKey PerThreadAssertScopeBase::thread_local_key;
+base::Thread::LocalStorageKey PerThreadAssertScopeBase::thread_local_key;
 #endif  // DEBUG
-Mutex Isolate::process_wide_mutex_;
-// TODO(dcarney): Remove with default isolate.
-enum DefaultIsolateStatus {
-  kDefaultIsolateUninitialized,
-  kDefaultIsolateInitialized,
-  kDefaultIsolateCrashIfInitialized
-};
-static DefaultIsolateStatus default_isolate_status_
-    = kDefaultIsolateUninitialized;
+base::LazyMutex Isolate::thread_data_table_mutex_ = LAZY_MUTEX_INITIALIZER;
 Isolate::ThreadDataTable* Isolate::thread_data_table_ = NULL;
 base::Atomic32 Isolate::isolate_counter_ = 0;
 
@@ -121,13 +122,13 @@
   ThreadId thread_id = ThreadId::Current();
   PerIsolateThreadData* per_thread = NULL;
   {
-    LockGuard<Mutex> lock_guard(&process_wide_mutex_);
+    base::LockGuard<base::Mutex> lock_guard(thread_data_table_mutex_.Pointer());
     per_thread = thread_data_table_->Lookup(this, thread_id);
     if (per_thread == NULL) {
       per_thread = new PerIsolateThreadData(this, thread_id);
       thread_data_table_->Insert(per_thread);
     }
-    ASSERT(thread_data_table_->Lookup(this, thread_id) == per_thread);
+    DCHECK(thread_data_table_->Lookup(this, thread_id) == per_thread);
   }
   return per_thread;
 }
@@ -143,40 +144,26 @@
     ThreadId thread_id) {
   PerIsolateThreadData* per_thread = NULL;
   {
-    LockGuard<Mutex> lock_guard(&process_wide_mutex_);
+    base::LockGuard<base::Mutex> lock_guard(thread_data_table_mutex_.Pointer());
     per_thread = thread_data_table_->Lookup(this, thread_id);
   }
   return per_thread;
 }
 
 
-void Isolate::SetCrashIfDefaultIsolateInitialized() {
-  LockGuard<Mutex> lock_guard(&process_wide_mutex_);
-  CHECK(default_isolate_status_ != kDefaultIsolateInitialized);
-  default_isolate_status_ = kDefaultIsolateCrashIfInitialized;
-}
-
-
-void Isolate::EnsureDefaultIsolate() {
-  LockGuard<Mutex> lock_guard(&process_wide_mutex_);
-  CHECK(default_isolate_status_ != kDefaultIsolateCrashIfInitialized);
-  if (thread_data_table_ == NULL) {
-    isolate_key_ = Thread::CreateThreadLocalKey();
-    thread_id_key_ = Thread::CreateThreadLocalKey();
-    per_isolate_thread_data_key_ = Thread::CreateThreadLocalKey();
+void Isolate::InitializeOncePerProcess() {
+  base::LockGuard<base::Mutex> lock_guard(thread_data_table_mutex_.Pointer());
+  CHECK(thread_data_table_ == NULL);
+  isolate_key_ = base::Thread::CreateThreadLocalKey();
+  thread_id_key_ = base::Thread::CreateThreadLocalKey();
+  per_isolate_thread_data_key_ = base::Thread::CreateThreadLocalKey();
 #ifdef DEBUG
-    PerThreadAssertScopeBase::thread_local_key = Thread::CreateThreadLocalKey();
+  PerThreadAssertScopeBase::thread_local_key =
+      base::Thread::CreateThreadLocalKey();
 #endif  // DEBUG
-    thread_data_table_ = new Isolate::ThreadDataTable();
-  }
+  thread_data_table_ = new Isolate::ThreadDataTable();
 }
 
-struct StaticInitializer {
-  StaticInitializer() {
-    Isolate::EnsureDefaultIsolate();
-  }
-} static_initializer;
-
 
 Address Isolate::get_address_from_id(Isolate::AddressId id) {
   return isolate_addresses_[id];
@@ -200,16 +187,16 @@
   // Visit the roots from the top for a given thread.
   v->VisitPointer(&thread->pending_exception_);
   v->VisitPointer(&(thread->pending_message_obj_));
-  v->VisitPointer(BitCast<Object**>(&(thread->pending_message_script_)));
-  v->VisitPointer(BitCast<Object**>(&(thread->context_)));
+  v->VisitPointer(bit_cast<Object**>(&(thread->pending_message_script_)));
+  v->VisitPointer(bit_cast<Object**>(&(thread->context_)));
   v->VisitPointer(&thread->scheduled_exception_);
 
   for (v8::TryCatch* block = thread->try_catch_handler();
        block != NULL;
        block = block->next_) {
-    v->VisitPointer(BitCast<Object**>(&(block->exception_)));
-    v->VisitPointer(BitCast<Object**>(&(block->message_obj_)));
-    v->VisitPointer(BitCast<Object**>(&(block->message_script_)));
+    v->VisitPointer(bit_cast<Object**>(&(block->exception_)));
+    v->VisitPointer(bit_cast<Object**>(&(block->message_obj_)));
+    v->VisitPointer(bit_cast<Object**>(&(block->message_script_)));
   }
 
   // Iterate over pointers on native execution stack.
@@ -266,7 +253,7 @@
 
 
 void Isolate::UnregisterTryCatchHandler(v8::TryCatch* that) {
-  ASSERT(thread_local_top()->try_catch_handler() == that);
+  DCHECK(thread_local_top()->try_catch_handler() == that);
   thread_local_top()->set_try_catch_handler(that->next_);
   thread_local_top()->catcher_ = NULL;
 }
@@ -286,14 +273,14 @@
     return stack_trace;
   } else if (stack_trace_nesting_level_ == 1) {
     stack_trace_nesting_level_++;
-    OS::PrintError(
+    base::OS::PrintError(
       "\n\nAttempt to print stack while printing stack (double fault)\n");
-    OS::PrintError(
+    base::OS::PrintError(
       "If you are lucky you may find a partial stack dump on stdout.\n\n");
     incomplete_message_->OutputToStdOut();
     return factory()->empty_string();
   } else {
-    OS::Abort();
+    base::OS::Abort();
     // Unreachable
     return factory()->empty_string();
   }
@@ -311,11 +298,10 @@
   String::WriteToFlat(*trace, buffer, 0, length);
   buffer[length] = '\0';
   // TODO(dcarney): convert buffer to utf8?
-  OS::PrintError("Stacktrace (%x-%x) %p %p: %s\n",
-                 magic, magic2,
-                 static_cast<void*>(object), static_cast<void*>(map),
-                 reinterpret_cast<char*>(buffer));
-  OS::Abort();
+  base::OS::PrintError("Stacktrace (%x-%x) %p %p: %s\n", magic, magic2,
+                       static_cast<void*>(object), static_cast<void*>(map),
+                       reinterpret_cast<char*>(buffer));
+  base::OS::Abort();
 }
 
 
@@ -325,13 +311,10 @@
 // call to this function is encountered it is skipped.  The seen_caller
 // in/out parameter is used to remember if the caller has been seen
 // yet.
-static bool IsVisibleInStackTrace(StackFrame* raw_frame,
+static bool IsVisibleInStackTrace(JSFunction* fun,
                                   Object* caller,
+                                  Object* receiver,
                                   bool* seen_caller) {
-  // Only display JS frames.
-  if (!raw_frame->is_java_script()) return false;
-  JavaScriptFrame* frame = JavaScriptFrame::cast(raw_frame);
-  JSFunction* fun = frame->function();
   if ((fun == caller) && !(*seen_caller)) {
     *seen_caller = true;
     return false;
@@ -344,8 +327,10 @@
   // The --builtins-in-stack-traces command line flag allows including
   // internal call sites in the stack trace for debugging purposes.
   if (!FLAG_builtins_in_stack_traces) {
-    if (frame->receiver()->IsJSBuiltinsObject() ||
-        (fun->IsBuiltin() && !fun->shared()->native())) {
+    if (receiver->IsJSBuiltinsObject()) return false;
+    if (fun->IsBuiltin()) {
+      return fun->shared()->native();
+    } else if (fun->IsFromNativeScript() || fun->IsFromExtensionScript()) {
       return false;
     }
   }
@@ -353,10 +338,23 @@
 }
 
 
-Handle<JSArray> Isolate::CaptureSimpleStackTrace(Handle<JSObject> error_object,
-                                                 Handle<Object> caller,
-                                                 int limit) {
+Handle<Object> Isolate::CaptureSimpleStackTrace(Handle<JSObject> error_object,
+                                                Handle<Object> caller) {
+  // Get stack trace limit.
+  Handle<Object> error = Object::GetProperty(
+      this, js_builtins_object(), "$Error").ToHandleChecked();
+  if (!error->IsJSObject()) return factory()->undefined_value();
+
+  Handle<String> stackTraceLimit =
+      factory()->InternalizeUtf8String("stackTraceLimit");
+  DCHECK(!stackTraceLimit.is_null());
+  Handle<Object> stack_trace_limit =
+      JSObject::GetDataProperty(Handle<JSObject>::cast(error),
+                                stackTraceLimit);
+  if (!stack_trace_limit->IsNumber()) return factory()->undefined_value();
+  int limit = FastD2IChecked(stack_trace_limit->Number());
   limit = Max(limit, 0);  // Ensure that limit is not negative.
+
   int initial_size = Min(limit, 10);
   Handle<FixedArray> elements =
       factory()->NewFixedArrayWithHoles(initial_size * 4 + 1);
@@ -369,50 +367,50 @@
   int frames_seen = 0;
   int sloppy_frames = 0;
   bool encountered_strict_function = false;
-  for (StackFrameIterator iter(this);
+  for (JavaScriptFrameIterator iter(this);
        !iter.done() && frames_seen < limit;
        iter.Advance()) {
-    StackFrame* raw_frame = iter.frame();
-    if (IsVisibleInStackTrace(raw_frame, *caller, &seen_caller)) {
-      JavaScriptFrame* frame = JavaScriptFrame::cast(raw_frame);
-      // Set initial size to the maximum inlining level + 1 for the outermost
-      // function.
-      List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
-      frame->Summarize(&frames);
-      for (int i = frames.length() - 1; i >= 0; i--) {
-        Handle<JSFunction> fun = frames[i].function();
-        // Filter out frames from other security contexts.
-        if (!this->context()->HasSameSecurityTokenAs(fun->context())) continue;
-        if (cursor + 4 > elements->length()) {
-          int new_capacity = JSObject::NewElementsCapacity(elements->length());
-          Handle<FixedArray> new_elements =
-              factory()->NewFixedArrayWithHoles(new_capacity);
-          for (int i = 0; i < cursor; i++) {
-            new_elements->set(i, elements->get(i));
-          }
-          elements = new_elements;
+    JavaScriptFrame* frame = iter.frame();
+    // Set initial size to the maximum inlining level + 1 for the outermost
+    // function.
+    List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
+    frame->Summarize(&frames);
+    for (int i = frames.length() - 1; i >= 0; i--) {
+      Handle<JSFunction> fun = frames[i].function();
+      Handle<Object> recv = frames[i].receiver();
+      // Filter out internal frames that we do not want to show.
+      if (!IsVisibleInStackTrace(*fun, *caller, *recv, &seen_caller)) continue;
+      // Filter out frames from other security contexts.
+      if (!this->context()->HasSameSecurityTokenAs(fun->context())) continue;
+      if (cursor + 4 > elements->length()) {
+        int new_capacity = JSObject::NewElementsCapacity(elements->length());
+        Handle<FixedArray> new_elements =
+            factory()->NewFixedArrayWithHoles(new_capacity);
+        for (int i = 0; i < cursor; i++) {
+          new_elements->set(i, elements->get(i));
         }
-        ASSERT(cursor + 4 <= elements->length());
-
-        Handle<Object> recv = frames[i].receiver();
-        Handle<Code> code = frames[i].code();
-        Handle<Smi> offset(Smi::FromInt(frames[i].offset()), this);
-        // The stack trace API should not expose receivers and function
-        // objects on frames deeper than the top-most one with a strict
-        // mode function.  The number of sloppy frames is stored as
-        // first element in the result array.
-        if (!encountered_strict_function) {
-          if (fun->shared()->strict_mode() == STRICT) {
-            encountered_strict_function = true;
-          } else {
-            sloppy_frames++;
-          }
-        }
-        elements->set(cursor++, *recv);
-        elements->set(cursor++, *fun);
-        elements->set(cursor++, *code);
-        elements->set(cursor++, *offset);
+        elements = new_elements;
       }
+      DCHECK(cursor + 4 <= elements->length());
+
+
+      Handle<Code> code = frames[i].code();
+      Handle<Smi> offset(Smi::FromInt(frames[i].offset()), this);
+      // The stack trace API should not expose receivers and function
+      // objects on frames deeper than the top-most one with a strict
+      // mode function.  The number of sloppy frames is stored as
+      // first element in the result array.
+      if (!encountered_strict_function) {
+        if (fun->shared()->strict_mode() == STRICT) {
+          encountered_strict_function = true;
+        } else {
+          sloppy_frames++;
+        }
+      }
+      elements->set(cursor++, *recv);
+      elements->set(cursor++, *fun);
+      elements->set(cursor++, *code);
+      elements->set(cursor++, *offset);
       frames_seen++;
     }
   }
@@ -426,15 +424,24 @@
 void Isolate::CaptureAndSetDetailedStackTrace(Handle<JSObject> error_object) {
   if (capture_stack_trace_for_uncaught_exceptions_) {
     // Capture stack trace for a detailed exception message.
-    Handle<String> key = factory()->hidden_stack_trace_string();
+    Handle<Name> key = factory()->detailed_stack_trace_symbol();
     Handle<JSArray> stack_trace = CaptureCurrentStackTrace(
         stack_trace_for_uncaught_exceptions_frame_limit_,
         stack_trace_for_uncaught_exceptions_options_);
-    JSObject::SetHiddenProperty(error_object, key, stack_trace);
+    JSObject::SetProperty(error_object, key, stack_trace, STRICT).Assert();
   }
 }
 
 
+void Isolate::CaptureAndSetSimpleStackTrace(Handle<JSObject> error_object,
+                                            Handle<Object> caller) {
+  // Capture stack trace for simple stack trace string formatting.
+  Handle<Name> key = factory()->stack_trace_symbol();
+  Handle<Object> stack_trace = CaptureSimpleStackTrace(error_object, caller);
+  JSObject::SetProperty(error_object, key, stack_trace, STRICT).Assert();
+}
+
+
 Handle<JSArray> Isolate::CaptureCurrentStackTrace(
     int frame_limit, StackTrace::StackTraceOptions options) {
   // Ensure no negative values.
@@ -442,22 +449,22 @@
   Handle<JSArray> stack_trace = factory()->NewJSArray(frame_limit);
 
   Handle<String> column_key =
-      factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("column"));
+      factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("column"));
   Handle<String> line_key =
-      factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("lineNumber"));
+      factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("lineNumber"));
   Handle<String> script_id_key =
-      factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("scriptId"));
+      factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("scriptId"));
   Handle<String> script_name_key =
-      factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("scriptName"));
+      factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("scriptName"));
   Handle<String> script_name_or_source_url_key =
       factory()->InternalizeOneByteString(
-          STATIC_ASCII_VECTOR("scriptNameOrSourceURL"));
+          STATIC_CHAR_VECTOR("scriptNameOrSourceURL"));
   Handle<String> function_key =
-      factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("functionName"));
+      factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("functionName"));
   Handle<String> eval_key =
-      factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("isEval"));
+      factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("isEval"));
   Handle<String> constructor_key =
-      factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("isConstructor"));
+      factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("isConstructor"));
 
   StackTraceFrameIterator it(this);
   int frames_seen = 0;
@@ -494,55 +501,48 @@
             // tag.
             column_offset += script->column_offset()->value();
           }
-          JSObject::SetOwnPropertyIgnoreAttributes(
+          JSObject::AddProperty(
               stack_frame, column_key,
-              Handle<Smi>(Smi::FromInt(column_offset + 1), this), NONE).Check();
+              handle(Smi::FromInt(column_offset + 1), this), NONE);
         }
-       JSObject::SetOwnPropertyIgnoreAttributes(
+       JSObject::AddProperty(
             stack_frame, line_key,
-            Handle<Smi>(Smi::FromInt(line_number + 1), this), NONE).Check();
+            handle(Smi::FromInt(line_number + 1), this), NONE);
       }
 
       if (options & StackTrace::kScriptId) {
-        Handle<Smi> script_id(script->id(), this);
-        JSObject::SetOwnPropertyIgnoreAttributes(
-            stack_frame, script_id_key, script_id, NONE).Check();
+        JSObject::AddProperty(
+            stack_frame, script_id_key, handle(script->id(), this), NONE);
       }
 
       if (options & StackTrace::kScriptName) {
-        Handle<Object> script_name(script->name(), this);
-        JSObject::SetOwnPropertyIgnoreAttributes(
-            stack_frame, script_name_key, script_name, NONE).Check();
+        JSObject::AddProperty(
+            stack_frame, script_name_key, handle(script->name(), this), NONE);
       }
 
       if (options & StackTrace::kScriptNameOrSourceURL) {
         Handle<Object> result = Script::GetNameOrSourceURL(script);
-        JSObject::SetOwnPropertyIgnoreAttributes(
-            stack_frame, script_name_or_source_url_key, result, NONE).Check();
+        JSObject::AddProperty(
+            stack_frame, script_name_or_source_url_key, result, NONE);
       }
 
       if (options & StackTrace::kFunctionName) {
-        Handle<Object> fun_name(fun->shared()->name(), this);
-        if (!fun_name->BooleanValue()) {
-          fun_name = Handle<Object>(fun->shared()->inferred_name(), this);
-        }
-        JSObject::SetOwnPropertyIgnoreAttributes(
-            stack_frame, function_key, fun_name, NONE).Check();
+        Handle<Object> fun_name(fun->shared()->DebugName(), this);
+        JSObject::AddProperty(stack_frame, function_key, fun_name, NONE);
       }
 
       if (options & StackTrace::kIsEval) {
         Handle<Object> is_eval =
             script->compilation_type() == Script::COMPILATION_TYPE_EVAL ?
                 factory()->true_value() : factory()->false_value();
-        JSObject::SetOwnPropertyIgnoreAttributes(
-            stack_frame, eval_key, is_eval, NONE).Check();
+        JSObject::AddProperty(stack_frame, eval_key, is_eval, NONE);
       }
 
       if (options & StackTrace::kIsConstructor) {
         Handle<Object> is_constructor = (frames[i].is_constructor()) ?
             factory()->true_value() : factory()->false_value();
-        JSObject::SetOwnPropertyIgnoreAttributes(
-            stack_frame, constructor_key, is_constructor, NONE).Check();
+        JSObject::AddProperty(
+            stack_frame, constructor_key, is_constructor, NONE);
       }
 
       FixedArray::cast(stack_trace->elements())->set(frames_seen, *stack_frame);
@@ -571,9 +571,9 @@
     stack_trace_nesting_level_ = 0;
   } else if (stack_trace_nesting_level_ == 1) {
     stack_trace_nesting_level_++;
-    OS::PrintError(
+    base::OS::PrintError(
       "\n\nAttempt to print stack while printing stack (double fault)\n");
-    OS::PrintError(
+    base::OS::PrintError(
       "If you are lucky you may find a partial stack dump on stdout.\n\n");
     incomplete_message_->OutputToFile(out);
   }
@@ -600,7 +600,7 @@
   }
   // The MentionedObjectCache is not GC-proof at the moment.
   DisallowHeapAllocation no_gc;
-  ASSERT(StringStream::IsMentionedObjectCacheClear(this));
+  DCHECK(StringStream::IsMentionedObjectCacheClear(this));
 
   // Avoid printing anything if there are no frames.
   if (c_entry_fp(thread_local_top()) == 0) return;
@@ -639,10 +639,17 @@
 
 void Isolate::ReportFailedAccessCheck(Handle<JSObject> receiver,
                                       v8::AccessType type) {
-  if (!thread_local_top()->failed_access_check_callback_) return;
+  if (!thread_local_top()->failed_access_check_callback_) {
+    Handle<String> message = factory()->InternalizeUtf8String("no access");
+    Handle<Object> error;
+    ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+        this, error, factory()->NewTypeError(message), /* void */);
+    ScheduleThrow(*error);
+    return;
+  }
 
-  ASSERT(receiver->IsAccessCheckNeeded());
-  ASSERT(context());
+  DCHECK(receiver->IsAccessCheckNeeded());
+  DCHECK(context());
 
   // Get the data object from access check info.
   HandleScope scope(this);
@@ -696,7 +703,7 @@
 bool Isolate::MayNamedAccess(Handle<JSObject> receiver,
                              Handle<Object> key,
                              v8::AccessType type) {
-  ASSERT(receiver->IsJSGlobalProxy() || receiver->IsAccessCheckNeeded());
+  DCHECK(receiver->IsJSGlobalProxy() || receiver->IsAccessCheckNeeded());
 
   // Skip checks for hidden properties access.  Note, we do not
   // require existence of a context in this case.
@@ -704,7 +711,7 @@
 
   // Check for compatibility between the security tokens in the
   // current lexical context and the accessed object.
-  ASSERT(context());
+  DCHECK(context());
 
   MayAccessDecision decision = MayAccessPreCheck(this, receiver, type);
   if (decision != UNKNOWN) return decision == YES;
@@ -735,10 +742,10 @@
 bool Isolate::MayIndexedAccess(Handle<JSObject> receiver,
                                uint32_t index,
                                v8::AccessType type) {
-  ASSERT(receiver->IsJSGlobalProxy() || receiver->IsAccessCheckNeeded());
+  DCHECK(receiver->IsJSGlobalProxy() || receiver->IsAccessCheckNeeded());
   // Check for compatibility between the security tokens in the
   // current lexical context and the accessed object.
-  ASSERT(context());
+  DCHECK(context());
 
   MayAccessDecision decision = MayAccessPreCheck(this, receiver, type);
   if (decision != UNKNOWN) return decision == YES;
@@ -780,26 +787,7 @@
   Handle<JSObject> exception = factory()->CopyJSObject(boilerplate);
   DoThrow(*exception, NULL);
 
-  // Get stack trace limit.
-  Handle<Object> error = Object::GetProperty(
-      this, js_builtins_object(), "$Error").ToHandleChecked();
-  if (!error->IsJSObject()) return heap()->exception();
-
-  Handle<String> stackTraceLimit =
-      factory()->InternalizeUtf8String("stackTraceLimit");
-  ASSERT(!stackTraceLimit.is_null());
-  Handle<Object> stack_trace_limit =
-      JSObject::GetDataProperty(Handle<JSObject>::cast(error),
-                                stackTraceLimit);
-  if (!stack_trace_limit->IsNumber()) return heap()->exception();
-  double dlimit = stack_trace_limit->Number();
-  int limit = std::isnan(dlimit) ? 0 : static_cast<int>(dlimit);
-
-  Handle<JSArray> stack_trace = CaptureSimpleStackTrace(
-      exception, factory()->undefined_value(), limit);
-  JSObject::SetHiddenProperty(exception,
-                              factory()->hidden_stack_trace_string(),
-                              stack_trace);
+  CaptureAndSetSimpleStackTrace(exception, factory()->undefined_value());
   return heap()->exception();
 }
 
@@ -873,12 +861,6 @@
 }
 
 
-Object* Isolate::ThrowInvalidStringLength() {
-  return Throw(*factory()->NewRangeError(
-      "invalid_string_length", HandleVector<Object>(NULL, 0)));
-}
-
-
 void Isolate::ScheduleThrow(Object* exception) {
   // When scheduling a throw we first throw the exception to get the
   // error reporting if it is uncaught before rescheduling it.
@@ -893,14 +875,14 @@
 
 
 void Isolate::RestorePendingMessageFromTryCatch(v8::TryCatch* handler) {
-  ASSERT(handler == try_catch_handler());
-  ASSERT(handler->HasCaught());
-  ASSERT(handler->rethrow_);
-  ASSERT(handler->capture_message_);
+  DCHECK(handler == try_catch_handler());
+  DCHECK(handler->HasCaught());
+  DCHECK(handler->rethrow_);
+  DCHECK(handler->capture_message_);
   Object* message = reinterpret_cast<Object*>(handler->message_obj_);
   Object* script = reinterpret_cast<Object*>(handler->message_script_);
-  ASSERT(message->IsJSMessageObject() || message->IsTheHole());
-  ASSERT(script->IsScript() || script->IsTheHole());
+  DCHECK(message->IsJSMessageObject() || message->IsTheHole());
+  DCHECK(script->IsScript() || script->IsTheHole());
   thread_local_top()->pending_message_obj_ = message;
   thread_local_top()->pending_message_script_ = script;
   thread_local_top()->pending_message_start_pos_ = handler->message_start_pos_;
@@ -908,6 +890,15 @@
 }
 
 
+void Isolate::CancelScheduledExceptionFromTryCatch(v8::TryCatch* handler) {
+  DCHECK(has_scheduled_exception());
+  if (scheduled_exception() == handler->exception_) {
+    DCHECK(scheduled_exception() != heap()->termination_exception());
+    clear_scheduled_exception();
+  }
+}
+
+
 Object* Isolate::PromoteScheduledException() {
   Object* thrown = scheduled_exception();
   clear_scheduled_exception();
@@ -997,15 +988,15 @@
   if (!obj->IsJSObject()) return false;
 
   Handle<String> error_key =
-      factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("$Error"));
+      factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("$Error"));
   Handle<Object> error_constructor = Object::GetProperty(
       js_builtins_object(), error_key).ToHandleChecked();
 
   DisallowHeapAllocation no_gc;
-  for (Object* prototype = *obj; !prototype->IsNull();
-       prototype = prototype->GetPrototype(this)) {
-    if (!prototype->IsJSObject()) return false;
-    if (JSObject::cast(prototype)->map()->constructor() ==
+  for (PrototypeIterator iter(this, *obj, PrototypeIterator::START_AT_RECEIVER);
+       !iter.IsAtEnd(); iter.Advance()) {
+    if (iter.GetCurrent()->IsJSProxy()) return false;
+    if (JSObject::cast(iter.GetCurrent())->map()->constructor() ==
         *error_constructor) {
       return true;
     }
@@ -1016,7 +1007,7 @@
 static int fatal_exception_depth = 0;
 
 void Isolate::DoThrow(Object* exception, MessageLocation* location) {
-  ASSERT(!has_pending_exception());
+  DCHECK(!has_pending_exception());
 
   HandleScope scope(this);
   Handle<Object> exception_handle(exception, this);
@@ -1028,19 +1019,19 @@
       ShouldReportException(&can_be_caught_externally, catchable_by_javascript);
   bool report_exception = catchable_by_javascript && should_report_exception;
   bool try_catch_needs_message =
-      can_be_caught_externally && try_catch_handler()->capture_message_ &&
-      !thread_local_top()->rethrowing_message_;
+      can_be_caught_externally && try_catch_handler()->capture_message_;
   bool bootstrapping = bootstrapper()->IsActive();
+  bool rethrowing_message = thread_local_top()->rethrowing_message_;
 
   thread_local_top()->rethrowing_message_ = false;
 
   // Notify debugger of exception.
   if (catchable_by_javascript) {
-    debug()->OnException(exception_handle, report_exception);
+    debug()->OnThrow(exception_handle, report_exception);
   }
 
   // Generate the message if required.
-  if (report_exception || try_catch_needs_message) {
+  if (!rethrowing_message && (report_exception || try_catch_needs_message)) {
     MessageLocation potential_computed_location;
     if (location == NULL) {
       // If no location was specified we use a computed one instead.
@@ -1055,13 +1046,16 @@
       if (capture_stack_trace_for_uncaught_exceptions_) {
         if (IsErrorObject(exception_handle)) {
           // We fetch the stack trace that corresponds to this error object.
-          Handle<String> key = factory()->hidden_stack_trace_string();
-          Object* stack_property =
-              JSObject::cast(*exception_handle)->GetHiddenProperty(key);
-          // Property lookup may have failed.  In this case it's probably not
-          // a valid Error object.
-          if (stack_property->IsJSArray()) {
-            stack_trace_object = Handle<JSArray>(JSArray::cast(stack_property));
+          Handle<Name> key = factory()->detailed_stack_trace_symbol();
+          // Look up as own property.  If the lookup fails, the exception is
+          // probably not a valid Error object.  In that case, we fall through
+          // and capture the stack trace at this throw site.
+          LookupIterator lookup(exception_handle, key,
+                                LookupIterator::OWN_SKIP_INTERCEPTOR);
+          Handle<Object> stack_trace_property;
+          if (Object::GetProperty(&lookup).ToHandle(&stack_trace_property) &&
+              stack_trace_property->IsJSArray()) {
+            stack_trace_object = Handle<JSArray>::cast(stack_trace_property);
           }
         }
         if (stack_trace_object.is_null()) {
@@ -1081,7 +1075,7 @@
             Execution::ToDetailString(this, exception_arg);
         if (!maybe_exception.ToHandle(&exception_arg)) {
           exception_arg = factory()->InternalizeOneByteString(
-              STATIC_ASCII_VECTOR("exception"));
+              STATIC_CHAR_VECTOR("exception"));
         }
       }
       Handle<Object> message_obj = MessageHandler::MakeMessageObject(
@@ -1109,7 +1103,7 @@
                "%s\n\nFROM\n",
                MessageHandler::GetLocalizedMessage(this, message_obj).get());
         PrintCurrentStackTrace(stderr);
-        OS::Abort();
+        base::OS::Abort();
       }
     } else if (location != NULL && !location->script().is_null()) {
       // We are bootstrapping and caught an error where the location is set
@@ -1120,18 +1114,18 @@
       int line_number =
           location->script()->GetLineNumber(location->start_pos()) + 1;
       if (exception->IsString() && location->script()->name()->IsString()) {
-        OS::PrintError(
+        base::OS::PrintError(
             "Extension or internal compilation error: %s in %s at line %d.\n",
             String::cast(exception)->ToCString().get(),
             String::cast(location->script()->name())->ToCString().get(),
             line_number);
       } else if (location->script()->name()->IsString()) {
-        OS::PrintError(
+        base::OS::PrintError(
             "Extension or internal compilation error in %s at line %d.\n",
             String::cast(location->script()->name())->ToCString().get(),
             line_number);
       } else {
-        OS::PrintError("Extension or internal compilation error.\n");
+        base::OS::PrintError("Extension or internal compilation error.\n");
       }
 #ifdef OBJECT_PRINT
       // Since comments and empty lines have been stripped from the source of
@@ -1167,7 +1161,7 @@
 
 
 bool Isolate::HasExternalTryCatch() {
-  ASSERT(has_pending_exception());
+  DCHECK(has_pending_exception());
 
   return (thread_local_top()->catcher_ != NULL) &&
       (try_catch_handler() == thread_local_top()->catcher_);
@@ -1179,7 +1173,7 @@
   // determine which one is closer to the top of the stack.
   Address external_handler_address =
       thread_local_top()->try_catch_handler_address();
-  ASSERT(external_handler_address != NULL);
+  DCHECK(external_handler_address != NULL);
 
   // The exception has been externally caught if and only if there is
   // an external handler which is on top of the top-most try-finally
@@ -1193,7 +1187,7 @@
   StackHandler* handler =
       StackHandler::FromAddress(Isolate::handler(thread_local_top()));
   while (handler != NULL && handler->address() < external_handler_address) {
-    ASSERT(!handler->is_catch());
+    DCHECK(!handler->is_catch());
     if (handler->is_finally()) return true;
 
     handler = handler->next();
@@ -1204,12 +1198,11 @@
 
 
 void Isolate::ReportPendingMessages() {
-  ASSERT(has_pending_exception());
+  DCHECK(has_pending_exception());
   bool can_clear_message = PropagatePendingExceptionToExternalTryCatch();
 
   HandleScope scope(this);
-  if (thread_local_top_.pending_exception_ ==
-          heap()->termination_exception()) {
+  if (thread_local_top_.pending_exception_ == heap()->termination_exception()) {
     // Do nothing: if needed, the exception has been already propagated to
     // v8::TryCatch.
   } else {
@@ -1237,7 +1230,7 @@
 
 
 MessageLocation Isolate::GetMessageLocation() {
-  ASSERT(has_pending_exception());
+  DCHECK(has_pending_exception());
 
   if (thread_local_top_.pending_exception_ != heap()->termination_exception() &&
       thread_local_top_.has_pending_message_ &&
@@ -1255,7 +1248,7 @@
 
 
 bool Isolate::OptionalRescheduleException(bool is_bottom_call) {
-  ASSERT(has_pending_exception());
+  DCHECK(has_pending_exception());
   PropagatePendingExceptionToExternalTryCatch();
 
   bool is_termination_exception =
@@ -1274,7 +1267,7 @@
     // If the exception is externally caught, clear it if there are no
     // JavaScript frames on the way to the C++ frame that has the
     // external handler.
-    ASSERT(thread_local_top()->try_catch_handler_address() != NULL);
+    DCHECK(thread_local_top()->try_catch_handler_address() != NULL);
     Address external_handler_address =
         thread_local_top()->try_catch_handler_address();
     JavaScriptFrameIterator it(this);
@@ -1297,6 +1290,48 @@
 }
 
 
+void Isolate::PushPromise(Handle<JSObject> promise) {
+  ThreadLocalTop* tltop = thread_local_top();
+  PromiseOnStack* prev = tltop->promise_on_stack_;
+  StackHandler* handler = StackHandler::FromAddress(Isolate::handler(tltop));
+  Handle<JSObject> global_handle =
+      Handle<JSObject>::cast(global_handles()->Create(*promise));
+  tltop->promise_on_stack_ = new PromiseOnStack(handler, global_handle, prev);
+}
+
+
+void Isolate::PopPromise() {
+  ThreadLocalTop* tltop = thread_local_top();
+  if (tltop->promise_on_stack_ == NULL) return;
+  PromiseOnStack* prev = tltop->promise_on_stack_->prev();
+  Handle<Object> global_handle = tltop->promise_on_stack_->promise();
+  delete tltop->promise_on_stack_;
+  tltop->promise_on_stack_ = prev;
+  global_handles()->Destroy(global_handle.location());
+}
+
+
+Handle<Object> Isolate::GetPromiseOnStackOnThrow() {
+  Handle<Object> undefined = factory()->undefined_value();
+  ThreadLocalTop* tltop = thread_local_top();
+  if (tltop->promise_on_stack_ == NULL) return undefined;
+  StackHandler* promise_try = tltop->promise_on_stack_->handler();
+  // Find the top-most try-catch handler.
+  StackHandler* handler = StackHandler::FromAddress(Isolate::handler(tltop));
+  do {
+    if (handler == promise_try) {
+      // Mark the pushed try-catch handler to prevent a later duplicate event
+      // triggered with the following reject.
+      return tltop->promise_on_stack_->promise();
+    }
+    handler = handler->next();
+    // Throwing inside a Promise can be intercepted by an inner try-catch, so
+    // we stop at the first try-catch handler.
+  } while (handler != NULL && !handler->is_catch());
+  return undefined;
+}
+
+
 void Isolate::SetCaptureStackTraceForUncaughtExceptions(
       bool capture,
       int frame_limit,
@@ -1308,12 +1343,12 @@
 
 
 Handle<Context> Isolate::native_context() {
-  return Handle<Context>(context()->global_object()->native_context());
+  return handle(context()->native_context());
 }
 
 
 Handle<Context> Isolate::global_context() {
-  return Handle<Context>(context()->global_object()->global_context());
+  return handle(context()->global_object()->global_context());
 }
 
 
@@ -1356,7 +1391,7 @@
 #ifdef USE_SIMULATOR
   thread_local_top()->simulator_ = Simulator::current(this);
 #endif
-  ASSERT(context() == NULL || context()->IsContext());
+  DCHECK(context() == NULL || context()->IsContext());
   return from + sizeof(ThreadLocalTop);
 }
 
@@ -1370,7 +1405,7 @@
   // TODO(svenpanne) The assertion below would fire if an embedder does not
   // cleanly dispose all Isolates before disposing v8, so we are conservative
   // and leave it out for now.
-  // ASSERT_EQ(NULL, list_);
+  // DCHECK_EQ(NULL, list_);
 }
 
 
@@ -1465,8 +1500,7 @@
       string_tracker_(NULL),
       regexp_stack_(NULL),
       date_cache_(NULL),
-      code_stub_interface_descriptors_(NULL),
-      call_descriptors_(NULL),
+      call_descriptor_data_(NULL),
       // TODO(bmeurer) Initialized lazily because it depends on flags; can
       // be fixed once the default isolate cleanup is done.
       random_number_generator_(NULL),
@@ -1481,7 +1515,12 @@
       sweeper_thread_(NULL),
       num_sweeper_threads_(0),
       stress_deopt_count_(0),
-      next_optimization_id_(0) {
+      next_optimization_id_(0),
+      use_counter_callback_(NULL) {
+  {
+    base::LockGuard<base::Mutex> lock_guard(thread_data_table_mutex_.Pointer());
+    CHECK(thread_data_table_);
+  }
   id_ = base::NoBarrier_AtomicIncrement(&isolate_counter_, 1);
   TRACE_ISOLATE(constructor);
 
@@ -1531,7 +1570,8 @@
 
   Deinit();
 
-  { LockGuard<Mutex> lock_guard(&process_wide_mutex_);
+  {
+    base::LockGuard<base::Mutex> lock_guard(thread_data_table_mutex_.Pointer());
     thread_data_table_->RemoveAllThreads(this);
   }
 
@@ -1549,6 +1589,7 @@
 
 void Isolate::GlobalTearDown() {
   delete thread_data_table_;
+  thread_data_table_ = NULL;
 }
 
 
@@ -1558,6 +1599,8 @@
 
     debug()->Unload();
 
+    FreeThreadResources();
+
     if (concurrent_recompilation_enabled()) {
       optimizing_compiler_thread_->Stop();
       delete optimizing_compiler_thread_;
@@ -1573,11 +1616,12 @@
     sweeper_thread_ = NULL;
 
     if (FLAG_job_based_sweeping &&
-        heap_.mark_compact_collector()->IsConcurrentSweepingInProgress()) {
-      heap_.mark_compact_collector()->WaitUntilSweepingCompleted();
+        heap_.mark_compact_collector()->sweeping_in_progress()) {
+      heap_.mark_compact_collector()->EnsureSweepingCompleted();
     }
 
-    if (FLAG_hydrogen_stats) GetHStatistics()->Print();
+    if (FLAG_turbo_stats) GetTStatistics()->Print("TurboFan");
+    if (FLAG_hydrogen_stats) GetHStatistics()->Print("Hydrogen");
 
     if (FLAG_print_deopt_stress) {
       PrintF(stdout, "=== Stress deopt counter: %u\n", stress_deopt_count_);
@@ -1632,8 +1676,8 @@
 
 void Isolate::SetIsolateThreadLocals(Isolate* isolate,
                                      PerIsolateThreadData* data) {
-  Thread::SetThreadLocal(isolate_key_, isolate);
-  Thread::SetThreadLocal(per_isolate_thread_data_key_, data);
+  base::Thread::SetThreadLocal(isolate_key_, isolate);
+  base::Thread::SetThreadLocal(per_isolate_thread_data_key_, data);
 }
 
 
@@ -1644,25 +1688,19 @@
   runtime_zone_.DeleteKeptSegment();
 
   // The entry stack must be empty when we get here.
-  ASSERT(entry_stack_ == NULL || entry_stack_->previous_item == NULL);
+  DCHECK(entry_stack_ == NULL || entry_stack_->previous_item == NULL);
 
   delete entry_stack_;
   entry_stack_ = NULL;
 
-  delete[] assembler_spare_buffer_;
-  assembler_spare_buffer_ = NULL;
-
   delete unicode_cache_;
   unicode_cache_ = NULL;
 
   delete date_cache_;
   date_cache_ = NULL;
 
-  delete[] code_stub_interface_descriptors_;
-  code_stub_interface_descriptors_ = NULL;
-
-  delete[] call_descriptors_;
-  call_descriptors_ = NULL;
+  delete[] call_descriptor_data_;
+  call_descriptor_data_ = NULL;
 
   delete regexp_stack_;
   regexp_stack_ = NULL;
@@ -1738,7 +1776,7 @@
 
 
 bool Isolate::PropagatePendingExceptionToExternalTryCatch() {
-  ASSERT(has_pending_exception());
+  DCHECK(has_pending_exception());
 
   bool has_external_try_catch = HasExternalTryCatch();
   if (!has_external_try_catch) {
@@ -1753,16 +1791,15 @@
   }
 
   thread_local_top_.external_caught_exception_ = true;
-  if (thread_local_top_.pending_exception_ ==
-             heap()->termination_exception()) {
+  if (thread_local_top_.pending_exception_ == heap()->termination_exception()) {
     try_catch_handler()->can_continue_ = false;
     try_catch_handler()->has_terminated_ = true;
     try_catch_handler()->exception_ = heap()->null_value();
   } else {
     v8::TryCatch* handler = try_catch_handler();
-    ASSERT(thread_local_top_.pending_message_obj_->IsJSMessageObject() ||
+    DCHECK(thread_local_top_.pending_message_obj_->IsJSMessageObject() ||
            thread_local_top_.pending_message_obj_->IsTheHole());
-    ASSERT(thread_local_top_.pending_message_script_->IsScript() ||
+    DCHECK(thread_local_top_.pending_message_script_->IsScript() ||
            thread_local_top_.pending_message_script_->IsTheHole());
     handler->can_continue_ = true;
     handler->has_terminated_ = false;
@@ -1790,7 +1827,7 @@
 
 
 bool Isolate::Init(Deserializer* des) {
-  ASSERT(state_ != INITIALIZED);
+  DCHECK(state_ != INITIALIZED);
   TRACE_ISOLATE(init);
 
   stress_deopt_count_ = FLAG_deopt_every_n_times;
@@ -1802,7 +1839,7 @@
     // stubs from scratch to get entry hooks, rather than loading the previously
     // generated stubs from disk.
     // If this assert fires, the initialization path has regressed.
-    ASSERT(des == NULL);
+    DCHECK(des == NULL);
   }
 
   // The initialization process does not handle memory exhaustion.
@@ -1838,10 +1875,8 @@
   regexp_stack_ = new RegExpStack();
   regexp_stack_->isolate_ = this;
   date_cache_ = new DateCache();
-  code_stub_interface_descriptors_ =
-      new CodeStubInterfaceDescriptor[CodeStub::NUMBER_OF_IDS];
-  call_descriptors_ =
-      new CallInterfaceDescriptor[NUMBER_OF_CALL_DESCRIPTORS];
+  call_descriptor_data_ =
+      new CallInterfaceDescriptorData[CallDescriptors::NUMBER_OF_DESCRIPTORS];
   cpu_profiler_ = new CpuProfiler(this);
   heap_profiler_ = new HeapProfiler(heap());
 
@@ -1850,7 +1885,8 @@
 
   // Initialize other runtime facilities
 #if defined(USE_SIMULATOR)
-#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS
+#if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || \
+    V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
   Simulator::Initialize(this);
 #endif
 #endif
@@ -1866,7 +1902,7 @@
   }
 
   // SetUp the object heap.
-  ASSERT(!heap_.HasBeenSetUp());
+  DCHECK(!heap_.HasBeenSetUp());
   if (!heap_.SetUp()) {
     V8::FatalProcessOutOfMemory("heap setup");
     return false;
@@ -1891,9 +1927,9 @@
   builtins_.SetUp(this, create_heap_objects);
 
   if (FLAG_log_internal_timer_events) {
-    set_event_logger(Logger::LogInternalEvents);
+    set_event_logger(Logger::DefaultTimerEventsLogger);
   } else {
-    set_event_logger(Logger::EmptyLogInternalEvents);
+    set_event_logger(Logger::EmptyTimerEventsLogger);
   }
 
   // Set default value if not yet set.
@@ -1901,7 +1937,8 @@
   // once ResourceConstraints becomes an argument to the Isolate constructor.
   if (max_available_threads_ < 1) {
     // Choose the default between 1 and 4.
-    max_available_threads_ = Max(Min(OS::NumberOfProcessorsOnline(), 4), 1);
+    max_available_threads_ =
+        Max(Min(base::SysInfo::NumberOfProcessors(), 4), 1);
   }
 
   if (!FLAG_job_based_sweeping) {
@@ -1957,12 +1994,6 @@
     LOG(this, LogCompiledFunctions());
   }
 
-  // If we are profiling with the Linux perf tool, we need to disable
-  // code relocation.
-  if (FLAG_perf_jit_prof || FLAG_perf_basic_prof) {
-    FLAG_compact_code_space = false;
-  }
-
   CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, embedder_data_)),
            Internals::kIsolateEmbedderDataOffset);
   CHECK_EQ(static_cast<int>(OFFSET_OF(Isolate, heap_.roots_)),
@@ -1976,7 +2007,7 @@
            Internals::kAmountOfExternalAllocatedMemoryAtLastGlobalGCOffset);
 
   state_ = INITIALIZED;
-  time_millis_at_init_ = OS::TimeCurrentMillis();
+  time_millis_at_init_ = base::OS::TimeCurrentMillis();
 
   if (!create_heap_objects) {
     // Now that the heap is consistent, it's OK to generate the code for the
@@ -1996,26 +2027,8 @@
     CodeStub::GenerateFPStubs(this);
     StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(this);
     StubFailureTrampolineStub::GenerateAheadOfTime(this);
-    // Ensure interface descriptors are initialized even when stubs have been
-    // deserialized out of the snapshot without using the graph builder.
-    FastCloneShallowArrayStub::InstallDescriptors(this);
-    BinaryOpICStub::InstallDescriptors(this);
-    BinaryOpWithAllocationSiteStub::InstallDescriptors(this);
-    CompareNilICStub::InstallDescriptors(this);
-    ToBooleanStub::InstallDescriptors(this);
-    ToNumberStub::InstallDescriptors(this);
-    ArrayConstructorStubBase::InstallDescriptors(this);
-    InternalArrayConstructorStubBase::InstallDescriptors(this);
-    FastNewClosureStub::InstallDescriptors(this);
-    FastNewContextStub::InstallDescriptors(this);
-    NumberToStringStub::InstallDescriptors(this);
-    StringAddStub::InstallDescriptors(this);
-    RegExpConstructResultStub::InstallDescriptors(this);
-    KeyedLoadGenericElementStub::InstallDescriptors(this);
   }
 
-  CallDescriptors::InitializeForIsolate(this);
-
   initialized_from_snapshot_ = (des != NULL);
 
   return true;
@@ -2037,11 +2050,11 @@
   PerIsolateThreadData* current_data = CurrentPerIsolateThreadData();
   if (current_data != NULL) {
     current_isolate = current_data->isolate_;
-    ASSERT(current_isolate != NULL);
+    DCHECK(current_isolate != NULL);
     if (current_isolate == this) {
-      ASSERT(Current() == this);
-      ASSERT(entry_stack_ != NULL);
-      ASSERT(entry_stack_->previous_thread_data == NULL ||
+      DCHECK(Current() == this);
+      DCHECK(entry_stack_ != NULL);
+      DCHECK(entry_stack_->previous_thread_data == NULL ||
              entry_stack_->previous_thread_data->thread_id().Equals(
                  ThreadId::Current()));
       // Same thread re-enters the isolate, no need to re-init anything.
@@ -2050,19 +2063,9 @@
     }
   }
 
-  // Threads can have default isolate set into TLS as Current but not yet have
-  // PerIsolateThreadData for it, as it requires more advanced phase of the
-  // initialization. For example, a thread might be the one that system used for
-  // static initializers - in this case the default isolate is set in TLS but
-  // the thread did not yet Enter the isolate. If PerisolateThreadData is not
-  // there, use the isolate set in TLS.
-  if (current_isolate == NULL) {
-    current_isolate = Isolate::UncheckedCurrent();
-  }
-
   PerIsolateThreadData* data = FindOrAllocatePerThreadDataForThisThread();
-  ASSERT(data != NULL);
-  ASSERT(data->isolate_ == this);
+  DCHECK(data != NULL);
+  DCHECK(data->isolate_ == this);
 
   EntryStackItem* item = new EntryStackItem(current_data,
                                             current_isolate,
@@ -2077,15 +2080,15 @@
 
 
 void Isolate::Exit() {
-  ASSERT(entry_stack_ != NULL);
-  ASSERT(entry_stack_->previous_thread_data == NULL ||
+  DCHECK(entry_stack_ != NULL);
+  DCHECK(entry_stack_->previous_thread_data == NULL ||
          entry_stack_->previous_thread_data->thread_id().Equals(
              ThreadId::Current()));
 
   if (--entry_stack_->entry_count > 0) return;
 
-  ASSERT(CurrentPerIsolateThreadData() != NULL);
-  ASSERT(CurrentPerIsolateThreadData()->isolate_ == this);
+  DCHECK(CurrentPerIsolateThreadData() != NULL);
+  DCHECK(CurrentPerIsolateThreadData()->isolate_ == this);
 
   // Pop the stack.
   EntryStackItem* item = entry_stack_;
@@ -2117,7 +2120,7 @@
   while (deferred_iterator->previous_ != NULL) {
     deferred_iterator = deferred_iterator->previous_;
   }
-  ASSERT(deferred_handles_head_ == deferred_iterator);
+  DCHECK(deferred_handles_head_ == deferred_iterator);
 #endif
   if (deferred_handles_head_ == deferred) {
     deferred_handles_head_ = deferred_handles_head_->next_;
@@ -2137,6 +2140,12 @@
 }
 
 
+HStatistics* Isolate::GetTStatistics() {
+  if (tstatistics() == NULL) set_tstatistics(new HStatistics());
+  return tstatistics();
+}
+
+
 HTracer* Isolate::GetHTracer() {
   if (htracer() == NULL) set_htracer(new HTracer(id()));
   return htracer();
@@ -2173,7 +2182,7 @@
 bool Isolate::IsFastArrayConstructorPrototypeChainIntact() {
   Map* root_array_map =
       get_initial_js_array_map(GetInitialFastElementsKind());
-  ASSERT(root_array_map != NULL);
+  DCHECK(root_array_map != NULL);
   JSObject* initial_array_proto = JSObject::cast(*initial_array_prototype());
 
   // Check that the array prototype hasn't been altered WRT empty elements.
@@ -2184,26 +2193,22 @@
 
   // Check that the object prototype hasn't been altered WRT empty elements.
   JSObject* initial_object_proto = JSObject::cast(*initial_object_prototype());
-  Object* root_array_map_proto = initial_array_proto->GetPrototype();
-  if (root_array_map_proto != initial_object_proto) return false;
+  PrototypeIterator iter(this, initial_array_proto);
+  if (iter.IsAtEnd() || iter.GetCurrent() != initial_object_proto) {
+    return false;
+  }
   if (initial_object_proto->elements() != heap()->empty_fixed_array()) {
     return false;
   }
 
-  return initial_object_proto->GetPrototype()->IsNull();
+  iter.Advance();
+  return iter.IsAtEnd();
 }
 
 
-CodeStubInterfaceDescriptor*
-    Isolate::code_stub_interface_descriptor(int index) {
-  return code_stub_interface_descriptors_ + index;
-}
-
-
-CallInterfaceDescriptor*
-    Isolate::call_descriptor(CallDescriptorKey index) {
-  ASSERT(0 <= index && index < NUMBER_OF_CALL_DESCRIPTORS);
-  return &call_descriptors_[index];
+CallInterfaceDescriptorData* Isolate::call_descriptor_data(int index) {
+  DCHECK(0 <= index && index < CallDescriptors::NUMBER_OF_DESCRIPTORS);
+  return &call_descriptor_data_[index];
 }
 
 
@@ -2230,11 +2235,11 @@
     static const char* nested[] = {
       "for", "for_api", "for_intern", "keyFor", "private_api", "private_intern"
     };
-    for (unsigned i = 0; i < ARRAY_SIZE(nested); ++i) {
+    for (unsigned i = 0; i < arraysize(nested); ++i) {
       Handle<String> name = factory()->InternalizeUtf8String(nested[i]);
       Handle<JSObject> obj = factory()->NewJSObjectFromMap(map);
       JSObject::NormalizeProperties(obj, KEEP_INOBJECT_PROPERTIES, 8);
-      JSObject::SetProperty(registry, name, obj, NONE, STRICT).Assert();
+      JSObject::SetProperty(registry, name, obj, STRICT).Assert();
     }
   }
   return Handle<JSObject>::cast(factory()->symbol_registry());
@@ -2275,10 +2280,10 @@
 
 
 void Isolate::EnqueueMicrotask(Handle<Object> microtask) {
-  ASSERT(microtask->IsJSFunction() || microtask->IsCallHandlerInfo());
+  DCHECK(microtask->IsJSFunction() || microtask->IsCallHandlerInfo());
   Handle<FixedArray> queue(heap()->microtask_queue(), this);
   int num_tasks = pending_microtask_count();
-  ASSERT(num_tasks <= queue->length());
+  DCHECK(num_tasks <= queue->length());
   if (num_tasks == 0) {
     queue = factory()->NewFixedArray(8);
     heap()->set_microtask_queue(*queue);
@@ -2286,19 +2291,19 @@
     queue = FixedArray::CopySize(queue, num_tasks * 2);
     heap()->set_microtask_queue(*queue);
   }
-  ASSERT(queue->get(num_tasks)->IsUndefined());
+  DCHECK(queue->get(num_tasks)->IsUndefined());
   queue->set(num_tasks, *microtask);
   set_pending_microtask_count(num_tasks + 1);
 }
 
 
 void Isolate::RunMicrotasks() {
-  // TODO(adamk): This ASSERT triggers in mjsunit tests which
-  // call the %RunMicrotasks runtime function. But it should
-  // never happen outside of tests, so it would be nice to
-  // uncomment it.
+  // %RunMicrotasks may be called in mjsunit tests, which violates
+  // this assertion, hence the check for --allow-natives-syntax.
+  // TODO(adamk): However, this also fails some layout tests.
   //
-  // ASSERT(handle_scope_implementer()->CallDepthIsZero());
+  // DCHECK(FLAG_allow_natives_syntax ||
+  //        handle_scope_implementer()->CallDepthIsZero());
 
   // Increase call depth to prevent recursive callbacks.
   v8::Isolate::SuppressMicrotaskExecutionScope suppress(
@@ -2308,7 +2313,7 @@
     HandleScope scope(this);
     int num_tasks = pending_microtask_count();
     Handle<FixedArray> queue(heap()->microtask_queue(), this);
-    ASSERT(num_tasks <= queue->length());
+    DCHECK(num_tasks <= queue->length());
     set_pending_microtask_count(0);
     heap()->set_microtask_queue(heap()->empty_fixed_array());
 
@@ -2318,14 +2323,15 @@
       if (microtask->IsJSFunction()) {
         Handle<JSFunction> microtask_function =
             Handle<JSFunction>::cast(microtask);
-        Handle<Object> exception;
-        MaybeHandle<Object> result = Execution::TryCall(
-            microtask_function, factory()->undefined_value(),
-            0, NULL, &exception);
+        SaveContext save(this);
+        set_context(microtask_function->context()->native_context());
+        MaybeHandle<Object> maybe_exception;
+        MaybeHandle<Object> result =
+            Execution::TryCall(microtask_function, factory()->undefined_value(),
+                               0, NULL, &maybe_exception);
         // If execution is terminating, just bail out.
-        if (result.is_null() &&
-            !exception.is_null() &&
-            *exception == heap()->termination_exception()) {
+        Handle<Object> exception;
+        if (result.is_null() && maybe_exception.is_null()) {
           // Clear out any remaining callbacks in the queue.
           heap()->set_microtask_queue(heap()->empty_fixed_array());
           set_pending_microtask_count(0);
@@ -2344,6 +2350,19 @@
 }
 
 
+void Isolate::SetUseCounterCallback(v8::Isolate::UseCounterCallback callback) {
+  DCHECK(!use_counter_callback_);
+  use_counter_callback_ = callback;
+}
+
+
+void Isolate::CountUsage(v8::Isolate::UseCounterFeature feature) {
+  if (use_counter_callback_) {
+    use_counter_callback_(reinterpret_cast<v8::Isolate*>(this), feature);
+  }
+}
+
+
 bool StackLimitCheck::JsHasOverflowed() const {
   StackGuard* stack_guard = isolate_->stack_guard();
 #ifdef USE_SIMULATOR
@@ -2352,8 +2371,19 @@
   uintptr_t jssp = reinterpret_cast<uintptr_t>(jssp_address);
   if (jssp < stack_guard->real_jslimit()) return true;
 #endif  // USE_SIMULATOR
-  return reinterpret_cast<uintptr_t>(this) < stack_guard->real_climit();
+  return GetCurrentStackPosition() < stack_guard->real_climit();
 }
 
 
+bool PostponeInterruptsScope::Intercept(StackGuard::InterruptFlag flag) {
+  // First check whether the previous scope intercepts.
+  if (prev_ && prev_->Intercept(flag)) return true;
+  // Then check whether this scope intercepts.
+  if ((flag & intercept_mask_)) {
+    intercepted_flags_ |= flag;
+    return true;
+  }
+  return false;
+}
+
 } }  // namespace v8::internal
diff --git a/src/isolate.h b/src/isolate.h
index 7de7303..24d4b08 100644
--- a/src/isolate.h
+++ b/src/isolate.h
@@ -11,27 +11,32 @@
 #include "src/base/atomicops.h"
 #include "src/builtins.h"
 #include "src/contexts.h"
+#include "src/date.h"
 #include "src/execution.h"
 #include "src/frames.h"
-#include "src/date.h"
 #include "src/global-handles.h"
 #include "src/handles.h"
 #include "src/hashmap.h"
-#include "src/heap.h"
+#include "src/heap/heap.h"
 #include "src/optimizing-compiler-thread.h"
 #include "src/regexp-stack.h"
-#include "src/runtime-profiler.h"
 #include "src/runtime.h"
+#include "src/runtime-profiler.h"
 #include "src/zone.h"
 
 namespace v8 {
+
+namespace base {
+class RandomNumberGenerator;
+}
+
 namespace internal {
 
 class Bootstrapper;
-struct CallInterfaceDescriptor;
+class CallInterfaceDescriptorData;
 class CodeGenerator;
 class CodeRange;
-struct CodeStubInterfaceDescriptor;
+class CodeStubDescriptor;
 class CodeTracer;
 class CompilationCache;
 class ConsStringIteratorOp;
@@ -53,9 +58,7 @@
 class InlineRuntimeFunctionsTable;
 class InnerPointerToCodeCache;
 class MaterializedObjectStore;
-class NoAllocationStringAllocator;
 class CodeAgingHelper;
-class RandomNumberGenerator;
 class RegExpStack;
 class SaveContext;
 class StringTracker;
@@ -75,10 +78,12 @@
 
 class Debug;
 class Debugger;
+class PromiseOnStack;
 
 #if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
     !defined(__aarch64__) && V8_TARGET_ARCH_ARM64 || \
-    !defined(__mips__) && V8_TARGET_ARCH_MIPS
+    !defined(__mips__) && V8_TARGET_ARCH_MIPS || \
+    !defined(__mips__) && V8_TARGET_ARCH_MIPS64
 class Redirection;
 class Simulator;
 #endif
@@ -102,19 +107,22 @@
 
 // Macros for MaybeHandle.
 
-#define RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, T)  \
-  do {                                                       \
-    Isolate* __isolate__ = (isolate);                        \
-    if (__isolate__->has_scheduled_exception()) {            \
-      __isolate__->PromoteScheduledException();              \
-      return MaybeHandle<T>();                               \
-    }                                                        \
+#define RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, value) \
+  do {                                                      \
+    Isolate* __isolate__ = (isolate);                       \
+    if (__isolate__->has_scheduled_exception()) {           \
+      __isolate__->PromoteScheduledException();             \
+      return value;                                         \
+    }                                                       \
   } while (false)
 
+#define RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, T) \
+  RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, MaybeHandle<T>())
+
 #define ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, dst, call, value)  \
   do {                                                               \
     if (!(call).ToHandle(&dst)) {                                    \
-      ASSERT((isolate)->has_pending_exception());                    \
+      DCHECK((isolate)->has_pending_exception());                    \
       return value;                                                  \
     }                                                                \
   } while (false)
@@ -126,10 +134,26 @@
 #define ASSIGN_RETURN_ON_EXCEPTION(isolate, dst, call, T)  \
   ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, dst, call, MaybeHandle<T>())
 
+#define THROW_NEW_ERROR(isolate, call, T)                                    \
+  do {                                                                       \
+    Handle<Object> __error__;                                                \
+    ASSIGN_RETURN_ON_EXCEPTION(isolate, __error__, isolate->factory()->call, \
+                               T);                                           \
+    return isolate->Throw<T>(__error__);                                     \
+  } while (false)
+
+#define THROW_NEW_ERROR_RETURN_FAILURE(isolate, call)             \
+  do {                                                            \
+    Handle<Object> __error__;                                     \
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, __error__,        \
+                                       isolate->factory()->call); \
+    return isolate->Throw(*__error__);                            \
+  } while (false)
+
 #define RETURN_ON_EXCEPTION_VALUE(isolate, call, value)            \
   do {                                                             \
     if ((call).is_null()) {                                        \
-      ASSERT((isolate)->has_pending_exception());                  \
+      DCHECK((isolate)->has_pending_exception());                  \
       return value;                                                \
     }                                                              \
   } while (false)
@@ -233,11 +257,7 @@
         v8::TryCatch::JSStackComparableAddress(try_catch_handler()));
   }
 
-  void Free() {
-    ASSERT(!has_pending_message_);
-    ASSERT(!external_caught_exception_);
-    ASSERT(try_catch_handler_ == NULL);
-  }
+  void Free();
 
   Isolate* isolate_;
   // The context where the current execution method is created and for variable
@@ -263,6 +283,11 @@
   Address c_entry_fp_;  // the frame pointer of the top c entry frame
   Address handler_;   // try-blocks are chained through the stack
 
+  // Throwing an exception may cause a Promise rejection.  For this purpose
+  // we keep track of a stack of nested promises and the corresponding
+  // try-catch handlers.
+  PromiseOnStack* promise_on_stack_;
+
 #ifdef USE_SIMULATOR
   Simulator* simulator_;
 #endif
@@ -290,7 +315,8 @@
 
 #if V8_TARGET_ARCH_ARM && !defined(__arm__) || \
     V8_TARGET_ARCH_ARM64 && !defined(__aarch64__) || \
-    V8_TARGET_ARCH_MIPS && !defined(__mips__)
+    V8_TARGET_ARCH_MIPS && !defined(__mips__) || \
+    V8_TARGET_ARCH_MIPS64 && !defined(__mips__)
 
 #define ISOLATE_INIT_SIMULATOR_LIST(V)                                         \
   V(bool, simulator_initialized, false)                                        \
@@ -332,8 +358,6 @@
   V(int, serialize_partial_snapshot_cache_capacity, 0)                         \
   V(Object**, serialize_partial_snapshot_cache, NULL)                          \
   /* Assembler state. */                                                       \
-  /* A previously allocated buffer of kMinimalBufferSize bytes, or NULL. */    \
-  V(byte*, assembler_spare_buffer, NULL)                                       \
   V(FatalErrorCallback, exception_behavior, NULL)                              \
   V(LogEventCallback, event_logger, NULL)                                      \
   V(AllowCodeGenerationFromStringsCallback, allow_code_gen_callback, NULL)     \
@@ -349,12 +373,10 @@
   V(Object*, string_stream_current_security_token, NULL)                       \
   /* Serializer state. */                                                      \
   V(ExternalReferenceTable*, external_reference_table, NULL)                   \
-  /* AstNode state. */                                                         \
-  V(int, ast_node_id, 0)                                                       \
-  V(unsigned, ast_node_count, 0)                                               \
   V(int, pending_microtask_count, 0)                                           \
   V(bool, autorun_microtasks, true)                                            \
   V(HStatistics*, hstatistics, NULL)                                           \
+  V(HStatistics*, tstatistics, NULL)                                           \
   V(HTracer*, htracer, NULL)                                                   \
   V(CodeTracer*, code_tracer, NULL)                                            \
   V(bool, fp_stubs_generated, false)                                           \
@@ -389,7 +411,8 @@
           thread_state_(NULL),
 #if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
     !defined(__aarch64__) && V8_TARGET_ARCH_ARM64 || \
-    !defined(__mips__) && V8_TARGET_ARCH_MIPS
+    !defined(__mips__) && V8_TARGET_ARCH_MIPS || \
+    !defined(__mips__) && V8_TARGET_ARCH_MIPS64
           simulator_(NULL),
 #endif
           next_(NULL),
@@ -403,7 +426,8 @@
 
 #if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
     !defined(__aarch64__) && V8_TARGET_ARCH_ARM64 || \
-    !defined(__mips__) && V8_TARGET_ARCH_MIPS
+    !defined(__mips__) && V8_TARGET_ARCH_MIPS || \
+    !defined(__mips__) && V8_TARGET_ARCH_MIPS64
     FIELD_ACCESSOR(Simulator*, simulator)
 #endif
 
@@ -419,7 +443,8 @@
 
 #if !defined(__arm__) && V8_TARGET_ARCH_ARM || \
     !defined(__aarch64__) && V8_TARGET_ARCH_ARM64 || \
-    !defined(__mips__) && V8_TARGET_ARCH_MIPS
+    !defined(__mips__) && V8_TARGET_ARCH_MIPS || \
+    !defined(__mips__) && V8_TARGET_ARCH_MIPS64
     Simulator* simulator_;
 #endif
 
@@ -441,23 +466,33 @@
     kIsolateAddressCount
   };
 
+  static void InitializeOncePerProcess();
+
   // Returns the PerIsolateThreadData for the current thread (or NULL if one is
   // not currently set).
   static PerIsolateThreadData* CurrentPerIsolateThreadData() {
     return reinterpret_cast<PerIsolateThreadData*>(
-        Thread::GetThreadLocal(per_isolate_thread_data_key_));
+        base::Thread::GetThreadLocal(per_isolate_thread_data_key_));
   }
 
   // Returns the isolate inside which the current thread is running.
   INLINE(static Isolate* Current()) {
     Isolate* isolate = reinterpret_cast<Isolate*>(
-        Thread::GetExistingThreadLocal(isolate_key_));
-    ASSERT(isolate != NULL);
+        base::Thread::GetExistingThreadLocal(isolate_key_));
+    DCHECK(isolate != NULL);
     return isolate;
   }
 
   INLINE(static Isolate* UncheckedCurrent()) {
-    return reinterpret_cast<Isolate*>(Thread::GetThreadLocal(isolate_key_));
+    return reinterpret_cast<Isolate*>(
+        base::Thread::GetThreadLocal(isolate_key_));
+  }
+
+  // Like UncheckedCurrent, but skips the check that |isolate_key_| was
+  // initialized. Callers have to ensure that themselves.
+  INLINE(static Isolate* UnsafeCurrent()) {
+    return reinterpret_cast<Isolate*>(
+        base::Thread::GetThreadLocal(isolate_key_));
   }
 
   // Usually called by Init(), but can be called early e.g. to allow
@@ -481,13 +516,6 @@
 
   static void GlobalTearDown();
 
-  static void SetCrashIfDefaultIsolateInitialized();
-  // Ensures that process-wide resources and the default isolate have been
-  // allocated. It is only necessary to call this method in rare cases, for
-  // example if you are using V8 from within the body of a static initializer.
-  // Safe to call multiple times.
-  static void EnsureDefaultIsolate();
-
   // Find the PerThread for this particular (isolate, thread) combination
   // If one does not yet exist, return null.
   PerIsolateThreadData* FindPerThreadDataForThisThread();
@@ -499,26 +527,26 @@
   // Returns the key used to store the pointer to the current isolate.
   // Used internally for V8 threads that do not execute JavaScript but still
   // are part of the domain of an isolate (like the context switcher).
-  static Thread::LocalStorageKey isolate_key() {
+  static base::Thread::LocalStorageKey isolate_key() {
     return isolate_key_;
   }
 
   // Returns the key used to store process-wide thread IDs.
-  static Thread::LocalStorageKey thread_id_key() {
+  static base::Thread::LocalStorageKey thread_id_key() {
     return thread_id_key_;
   }
 
-  static Thread::LocalStorageKey per_isolate_thread_data_key();
+  static base::Thread::LocalStorageKey per_isolate_thread_data_key();
 
   // Mutex for serializing access to break control structures.
-  RecursiveMutex* break_access() { return &break_access_; }
+  base::RecursiveMutex* break_access() { return &break_access_; }
 
   Address get_address_from_id(AddressId id);
 
   // Access to top context (where the current function object was created).
   Context* context() { return thread_local_top_.context_; }
   void set_context(Context* context) {
-    ASSERT(context == NULL || context->IsContext());
+    DCHECK(context == NULL || context->IsContext());
     thread_local_top_.context_ = context;
   }
   Context** context_address() { return &thread_local_top_.context_; }
@@ -530,18 +558,18 @@
 
   // Interface to pending exception.
   Object* pending_exception() {
-    ASSERT(has_pending_exception());
-    ASSERT(!thread_local_top_.pending_exception_->IsException());
+    DCHECK(has_pending_exception());
+    DCHECK(!thread_local_top_.pending_exception_->IsException());
     return thread_local_top_.pending_exception_;
   }
 
   void set_pending_exception(Object* exception_obj) {
-    ASSERT(!exception_obj->IsException());
+    DCHECK(!exception_obj->IsException());
     thread_local_top_.pending_exception_ = exception_obj;
   }
 
   void clear_pending_exception() {
-    ASSERT(!thread_local_top_.pending_exception_->IsException());
+    DCHECK(!thread_local_top_.pending_exception_->IsException());
     thread_local_top_.pending_exception_ = heap_.the_hole_value();
   }
 
@@ -550,7 +578,7 @@
   }
 
   bool has_pending_exception() {
-    ASSERT(!thread_local_top_.pending_exception_->IsException());
+    DCHECK(!thread_local_top_.pending_exception_->IsException());
     return !thread_local_top_.pending_exception_->IsTheHole();
   }
 
@@ -591,16 +619,16 @@
   }
 
   Object* scheduled_exception() {
-    ASSERT(has_scheduled_exception());
-    ASSERT(!thread_local_top_.scheduled_exception_->IsException());
+    DCHECK(has_scheduled_exception());
+    DCHECK(!thread_local_top_.scheduled_exception_->IsException());
     return thread_local_top_.scheduled_exception_;
   }
   bool has_scheduled_exception() {
-    ASSERT(!thread_local_top_.scheduled_exception_->IsException());
+    DCHECK(!thread_local_top_.scheduled_exception_->IsException());
     return thread_local_top_.scheduled_exception_ != heap_.the_hole_value();
   }
   void clear_scheduled_exception() {
-    ASSERT(!thread_local_top_.scheduled_exception_->IsException());
+    DCHECK(!thread_local_top_.scheduled_exception_->IsException());
     thread_local_top_.scheduled_exception_ = heap_.the_hole_value();
   }
 
@@ -643,7 +671,7 @@
   }
 
   // Returns the global proxy object of the current context.
-  Object* global_proxy() {
+  JSObject* global_proxy() {
     return context()->global_proxy();
   }
 
@@ -660,6 +688,11 @@
   // JavaScript code.  If an exception is scheduled true is returned.
   bool OptionalRescheduleException(bool is_bottom_call);
 
+  // Push and pop a promise and the current try-catch handler.
+  void PushPromise(Handle<JSObject> promise);
+  void PopPromise();
+  Handle<Object> GetPromiseOnStackOnThrow();
+
   class ExceptionScope {
    public:
     explicit ExceptionScope(Isolate* isolate) :
@@ -697,11 +730,11 @@
   Handle<JSArray> CaptureCurrentStackTrace(
       int frame_limit,
       StackTrace::StackTraceOptions options);
-
-  Handle<JSArray> CaptureSimpleStackTrace(Handle<JSObject> error_object,
-                                          Handle<Object> caller,
-                                          int limit);
+  Handle<Object> CaptureSimpleStackTrace(Handle<JSObject> error_object,
+                                         Handle<Object> caller);
   void CaptureAndSetDetailedStackTrace(Handle<JSObject> error_object);
+  void CaptureAndSetSimpleStackTrace(Handle<JSObject> error_object,
+                                     Handle<Object> caller);
 
   // Returns if the top context may access the given global object. If
   // the result is false, the pending exception is guaranteed to be
@@ -736,11 +769,12 @@
   // Re-set pending message, script and positions reported to the TryCatch
   // back to the TLS for re-use when rethrowing.
   void RestorePendingMessageFromTryCatch(v8::TryCatch* handler);
+  // Un-schedule an exception that was caught by a TryCatch handler.
+  void CancelScheduledExceptionFromTryCatch(v8::TryCatch* handler);
   void ReportPendingMessages();
   // Return pending location if any or unfilled structure.
   MessageLocation GetMessageLocation();
   Object* ThrowIllegalOperation();
-  Object* ThrowInvalidStringLength();
 
   // Promote a scheduled exception to pending. Asserts has_scheduled_exception.
   Object* PromoteScheduledException();
@@ -790,11 +824,11 @@
   // Accessors.
 #define GLOBAL_ACCESSOR(type, name, initialvalue)                       \
   inline type name() const {                                            \
-    ASSERT(OFFSET_OF(Isolate, name##_) == name##_debug_offset_);        \
+    DCHECK(OFFSET_OF(Isolate, name##_) == name##_debug_offset_);        \
     return name##_;                                                     \
   }                                                                     \
   inline void set_##name(type value) {                                  \
-    ASSERT(OFFSET_OF(Isolate, name##_) == name##_debug_offset_);        \
+    DCHECK(OFFSET_OF(Isolate, name##_) == name##_debug_offset_);        \
     name##_ = value;                                                    \
   }
   ISOLATE_INIT_LIST(GLOBAL_ACCESSOR)
@@ -802,7 +836,7 @@
 
 #define GLOBAL_ARRAY_ACCESSOR(type, name, length)                       \
   inline type* name() {                                                 \
-    ASSERT(OFFSET_OF(Isolate, name##_) == name##_debug_offset_);        \
+    DCHECK(OFFSET_OF(Isolate, name##_) == name##_debug_offset_);        \
     return &(name##_)[0];                                               \
   }
   ISOLATE_INIT_ARRAY_LIST(GLOBAL_ARRAY_ACCESSOR)
@@ -810,10 +844,10 @@
 
 #define NATIVE_CONTEXT_FIELD_ACCESSOR(index, type, name)            \
   Handle<type> name() {                                             \
-    return Handle<type>(context()->native_context()->name(), this); \
+    return Handle<type>(native_context()->name(), this);            \
   }                                                                 \
   bool is_##name(type* value) {                                     \
-    return context()->native_context()->is_##name(value);           \
+    return native_context()->is_##name(value);                      \
   }
   NATIVE_CONTEXT_FIELDS(NATIVE_CONTEXT_FIELD_ACCESSOR)
 #undef NATIVE_CONTEXT_FIELD_ACCESSOR
@@ -822,7 +856,7 @@
   Counters* counters() {
     // Call InitializeLoggingAndCounters() if logging is needed before
     // the isolate is fully initialized.
-    ASSERT(counters_ != NULL);
+    DCHECK(counters_ != NULL);
     return counters_;
   }
   CodeRange* code_range() { return code_range_; }
@@ -831,7 +865,7 @@
   Logger* logger() {
     // Call InitializeLoggingAndCounters() if logging is needed before
     // the isolate is fully initialized.
-    ASSERT(logger_ != NULL);
+    DCHECK(logger_ != NULL);
     return logger_;
   }
   StackGuard* stack_guard() { return &stack_guard_; }
@@ -864,7 +898,7 @@
   HandleScopeData* handle_scope_data() { return &handle_scope_data_; }
 
   HandleScopeImplementer* handle_scope_implementer() {
-    ASSERT(handle_scope_implementer_);
+    DCHECK(handle_scope_implementer_);
     return handle_scope_implementer_;
   }
   Zone* runtime_zone() { return &runtime_zone_; }
@@ -953,11 +987,11 @@
   THREAD_LOCAL_TOP_ACCESSOR(StateTag, current_vm_state)
 
   void SetData(uint32_t slot, void* data) {
-    ASSERT(slot < Internals::kNumIsolateDataSlots);
+    DCHECK(slot < Internals::kNumIsolateDataSlots);
     embedder_data_[slot] = data;
   }
   void* GetData(uint32_t slot) {
-    ASSERT(slot < Internals::kNumIsolateDataSlots);
+    DCHECK(slot < Internals::kNumIsolateDataSlots);
     return embedder_data_[slot];
   }
 
@@ -965,7 +999,7 @@
 
   void enable_serializer() {
     // The serializer can only be enabled before the isolate init.
-    ASSERT(state_ != INITIALIZED);
+    DCHECK(state_ != INITIALIZED);
     serializer_enabled_ = true;
   }
 
@@ -979,7 +1013,7 @@
   bool initialized_from_snapshot() { return initialized_from_snapshot_; }
 
   double time_millis_since_init() {
-    return OS::TimeCurrentMillis() - time_millis_at_init_;
+    return base::OS::TimeCurrentMillis() - time_millis_at_init_;
   }
 
   DateCache* date_cache() {
@@ -997,19 +1031,7 @@
 
   bool IsFastArrayConstructorPrototypeChainIntact();
 
-  CodeStubInterfaceDescriptor*
-      code_stub_interface_descriptor(int index);
-
-  enum CallDescriptorKey {
-    KeyedCall,
-    NamedCall,
-    CallHandler,
-    ArgumentAdaptorCall,
-    ApiFunctionCall,
-    NUMBER_OF_CALL_DESCRIPTORS
-  };
-
-  CallInterfaceDescriptor* call_descriptor(CallDescriptorKey index);
+  CallInterfaceDescriptorData* call_descriptor_data(int index);
 
   void IterateDeferredHandles(ObjectVisitor* visitor);
   void LinkDeferredHandles(DeferredHandles* deferred_handles);
@@ -1021,14 +1043,14 @@
 
   bool concurrent_recompilation_enabled() {
     // Thread is only available with flag enabled.
-    ASSERT(optimizing_compiler_thread_ == NULL ||
+    DCHECK(optimizing_compiler_thread_ == NULL ||
            FLAG_concurrent_recompilation);
     return optimizing_compiler_thread_ != NULL;
   }
 
   bool concurrent_osr_enabled() const {
     // Thread is only available with flag enabled.
-    ASSERT(optimizing_compiler_thread_ == NULL ||
+    DCHECK(optimizing_compiler_thread_ == NULL ||
            FLAG_concurrent_recompilation);
     return optimizing_compiler_thread_ != NULL && FLAG_concurrent_osr;
   }
@@ -1048,6 +1070,7 @@
   int id() const { return static_cast<int>(id_); }
 
   HStatistics* GetHStatistics();
+  HStatistics* GetTStatistics();
   HTracer* GetHTracer();
   CodeTracer* GetCodeTracer();
 
@@ -1058,7 +1081,7 @@
 
   void* stress_deopt_count_address() { return &stress_deopt_count_; }
 
-  inline RandomNumberGenerator* random_number_generator();
+  inline base::RandomNumberGenerator* random_number_generator();
 
   // Given an address occupied by a live code object, return that object.
   Object* FindCodeObject(Address a);
@@ -1081,6 +1104,11 @@
   void EnqueueMicrotask(Handle<Object> microtask);
   void RunMicrotasks();
 
+  void SetUseCounterCallback(v8::Isolate::UseCounterCallback callback);
+  void CountUsage(v8::Isolate::UseCounterFeature feature);
+
+  static Isolate* NewForTesting() { return new Isolate(); }
+
  private:
   Isolate();
 
@@ -1140,12 +1168,11 @@
     DISALLOW_COPY_AND_ASSIGN(EntryStackItem);
   };
 
-  // This mutex protects highest_thread_id_ and thread_data_table_.
-  static Mutex process_wide_mutex_;
+  static base::LazyMutex thread_data_table_mutex_;
 
-  static Thread::LocalStorageKey per_isolate_thread_data_key_;
-  static Thread::LocalStorageKey isolate_key_;
-  static Thread::LocalStorageKey thread_id_key_;
+  static base::Thread::LocalStorageKey per_isolate_thread_data_key_;
+  static base::Thread::LocalStorageKey isolate_key_;
+  static base::Thread::LocalStorageKey thread_id_key_;
   static ThreadDataTable* thread_data_table_;
 
   // A global counter for all generated Isolates, might overflow.
@@ -1199,7 +1226,7 @@
   CompilationCache* compilation_cache_;
   Counters* counters_;
   CodeRange* code_range_;
-  RecursiveMutex break_access_;
+  base::RecursiveMutex break_access_;
   base::Atomic32 debugger_initialized_;
   Logger* logger_;
   StackGuard stack_guard_;
@@ -1239,9 +1266,8 @@
   RegExpStack* regexp_stack_;
   DateCache* date_cache_;
   unibrow::Mapping<unibrow::Ecma262Canonicalize> interp_canonicalize_mapping_;
-  CodeStubInterfaceDescriptor* code_stub_interface_descriptors_;
-  CallInterfaceDescriptor* call_descriptors_;
-  RandomNumberGenerator* random_number_generator_;
+  CallInterfaceDescriptorData* call_descriptor_data_;
+  base::RandomNumberGenerator* random_number_generator_;
 
   // Whether the isolate has been created for snapshotting.
   bool serializer_enabled_;
@@ -1300,6 +1326,8 @@
   // List of callbacks when a Call completes.
   List<CallCompletedCallback> call_completed_callbacks_;
 
+  v8::Isolate::UseCounterCallback use_counter_callback_;
+
   friend class ExecutionAccess;
   friend class HandleScopeImplementer;
   friend class IsolateInitializer;
@@ -1323,6 +1351,22 @@
 #undef THREAD_LOCAL_TOP_ACCESSOR
 
 
+class PromiseOnStack {
+ public:
+  PromiseOnStack(StackHandler* handler, Handle<JSObject> promise,
+                 PromiseOnStack* prev)
+      : handler_(handler), promise_(promise), prev_(prev) {}
+  StackHandler* handler() { return handler_; }
+  Handle<JSObject> promise() { return promise_; }
+  PromiseOnStack* prev() { return prev_; }
+
+ private:
+  StackHandler* handler_;
+  Handle<JSObject> promise_;
+  PromiseOnStack* prev_;
+};
+
+
 // If the GCC version is 4.1.x or 4.2.x an additional field is added to the
 // class as a work around for a bug in the generated code found with these
 // versions of GCC. See V8 issue 122 for details.
@@ -1358,7 +1402,7 @@
     : isolate_(isolate),
       context_(isolate->context(), isolate) { }
   ~AssertNoContextChange() {
-    ASSERT(isolate_->context() == *context_);
+    DCHECK(isolate_->context() == *context_);
   }
 
  private:
@@ -1398,7 +1442,7 @@
   // Use this to check for stack-overflows in C++ code.
   inline bool HasOverflowed() const {
     StackGuard* stack_guard = isolate_->stack_guard();
-    return reinterpret_cast<uintptr_t>(this) < stack_guard->real_climit();
+    return GetCurrentStackPosition() < stack_guard->real_climit();
   }
 
   // Use this to check for stack-overflow when entering runtime from JS code.
@@ -1415,26 +1459,33 @@
 // account.
 class PostponeInterruptsScope BASE_EMBEDDED {
  public:
-  explicit PostponeInterruptsScope(Isolate* isolate)
-      : stack_guard_(isolate->stack_guard()), isolate_(isolate) {
-    ExecutionAccess access(isolate_);
-    stack_guard_->thread_local_.postpone_interrupts_nesting_++;
-    stack_guard_->DisableInterrupts();
+  PostponeInterruptsScope(Isolate* isolate,
+                          int intercept_mask = StackGuard::ALL_INTERRUPTS)
+      : stack_guard_(isolate->stack_guard()),
+        intercept_mask_(intercept_mask),
+        intercepted_flags_(0) {
+    stack_guard_->PushPostponeInterruptsScope(this);
   }
 
   ~PostponeInterruptsScope() {
-    ExecutionAccess access(isolate_);
-    if (--stack_guard_->thread_local_.postpone_interrupts_nesting_ == 0) {
-      stack_guard_->EnableInterrupts();
-    }
+    stack_guard_->PopPostponeInterruptsScope();
   }
+
+  // Find the bottom-most scope that intercepts this interrupt.
+  // Return whether the interrupt has been intercepted.
+  bool Intercept(StackGuard::InterruptFlag flag);
+
  private:
   StackGuard* stack_guard_;
-  Isolate* isolate_;
+  int intercept_mask_;
+  int intercepted_flags_;
+  PostponeInterruptsScope* prev_;
+
+  friend class StackGuard;
 };
 
 
-class CodeTracer V8_FINAL : public Malloced {
+class CodeTracer FINAL : public Malloced {
  public:
   explicit CodeTracer(int isolate_id)
       : file_(NULL),
@@ -1447,7 +1498,7 @@
     if (FLAG_redirect_code_traces_to == NULL) {
       SNPrintF(filename_,
                "code-%d-%d.asm",
-               OS::GetCurrentProcessId(),
+               base::OS::GetCurrentProcessId(),
                isolate_id);
     } else {
       StrNCpy(filename_, FLAG_redirect_code_traces_to, filename_.length());
@@ -1473,7 +1524,7 @@
     }
 
     if (file_ == NULL) {
-      file_ = OS::FOpen(filename_.start(), "a");
+      file_ = base::OS::FOpen(filename_.start(), "a");
     }
 
     scope_depth_++;
diff --git a/src/json-parser.h b/src/json-parser.h
index 60855a0..2993249 100644
--- a/src/json-parser.h
+++ b/src/json-parser.h
@@ -9,15 +9,15 @@
 
 #include "src/char-predicates-inl.h"
 #include "src/conversions.h"
+#include "src/heap/spaces-inl.h"
 #include "src/messages.h"
-#include "src/spaces-inl.h"
 #include "src/token.h"
 
 namespace v8 {
 namespace internal {
 
 // A simple json parser.
-template <bool seq_ascii>
+template <bool seq_one_byte>
 class JsonParser BASE_EMBEDDED {
  public:
   MUST_USE_RESULT static MaybeHandle<Object> Parse(Handle<String> source) {
@@ -39,8 +39,8 @@
     source_ = String::Flatten(source_);
     pretenure_ = (source_length_ >= kPretenureTreshold) ? TENURED : NOT_TENURED;
 
-    // Optimized fast case where we only have ASCII characters.
-    if (seq_ascii) {
+    // Optimized fast case where we only have Latin1 characters.
+    if (seq_one_byte) {
       seq_source_ = Handle<SeqOneByteString>::cast(source_);
     }
   }
@@ -52,7 +52,7 @@
     position_++;
     if (position_ >= source_length_) {
       c0_ = kEndOfString;
-    } else if (seq_ascii) {
+    } else if (seq_one_byte) {
       c0_ = seq_source_->SeqOneByteStringGet(position_);
     } else {
       c0_ = source_->Get(position_);
@@ -103,8 +103,8 @@
     if (source_->length() - position_ - 1 > length) {
       DisallowHeapAllocation no_gc;
       String::FlatContent content = expected->GetFlatContent();
-      if (content.IsAscii()) {
-        ASSERT_EQ('"', c0_);
+      if (content.IsOneByte()) {
+        DCHECK_EQ('"', c0_);
         const uint8_t* input_chars = seq_source_->GetChars() + position_ + 1;
         const uint8_t* expected_chars = content.ToOneByteVector().start();
         for (int i = 0; i < length; i++) {
@@ -132,7 +132,7 @@
   Handle<String> ScanJsonString();
   // Creates a new string and copies prefix[start..end] into the beginning
   // of it. Then scans the rest of the string, adding characters after the
-  // prefix. Called by ScanJsonString when reaching a '\' or non-ASCII char.
+  // prefix. Called by ScanJsonString when reaching a '\' or non-Latin1 char.
   template <typename StringType, typename SinkChar>
   Handle<String> SlowScanJsonString(Handle<String> prefix, int start, int end);
 
@@ -182,6 +182,9 @@
  private:
   Zone* zone() { return &zone_; }
 
+  void CommitStateToJsonObject(Handle<JSObject> json_object, Handle<Map> map,
+                               ZoneList<Handle<Object> >* properties);
+
   Handle<String> source_;
   int source_length_;
   Handle<SeqOneByteString> seq_source_;
@@ -195,8 +198,8 @@
   int position_;
 };
 
-template <bool seq_ascii>
-MaybeHandle<Object> JsonParser<seq_ascii>::ParseJson() {
+template <bool seq_one_byte>
+MaybeHandle<Object> JsonParser<seq_one_byte>::ParseJson() {
   // Advance to the first character (possibly EOS)
   AdvanceSkipWhitespace();
   Handle<Object> result = ParseJsonValue();
@@ -244,7 +247,9 @@
     MessageLocation location(factory->NewScript(source_),
                              position_,
                              position_ + 1);
-    Handle<Object> error = factory->NewSyntaxError(message, array);
+    Handle<Object> error;
+    ASSIGN_RETURN_ON_EXCEPTION(isolate(), error,
+                               factory->NewSyntaxError(message, array), Object);
     return isolate()->template Throw<Object>(error, &location);
   }
   return result;
@@ -252,8 +257,8 @@
 
 
 // Parse any JSON value.
-template <bool seq_ascii>
-Handle<Object> JsonParser<seq_ascii>::ParseJsonValue() {
+template <bool seq_one_byte>
+Handle<Object> JsonParser<seq_one_byte>::ParseJsonValue() {
   StackLimitCheck stack_check(isolate_);
   if (stack_check.HasOverflowed()) {
     isolate_->StackOverflow();
@@ -293,14 +298,14 @@
 
 
 // Parse a JSON object. Position must be right at '{'.
-template <bool seq_ascii>
-Handle<Object> JsonParser<seq_ascii>::ParseJsonObject() {
+template <bool seq_one_byte>
+Handle<Object> JsonParser<seq_one_byte>::ParseJsonObject() {
   HandleScope scope(isolate());
   Handle<JSObject> json_object =
       factory()->NewJSObject(object_constructor(), pretenure_);
   Handle<Map> map(json_object->map());
   ZoneList<Handle<Object> > properties(8, zone());
-  ASSERT_EQ(c0_, '{');
+  DCHECK_EQ(c0_, '{');
 
   bool transitioning = true;
 
@@ -357,20 +362,20 @@
         // to parse it first.
         bool follow_expected = false;
         Handle<Map> target;
-        if (seq_ascii) {
-          key = JSObject::ExpectedTransitionKey(map);
+        if (seq_one_byte) {
+          key = Map::ExpectedTransitionKey(map);
           follow_expected = !key.is_null() && ParseJsonString(key);
         }
         // If the expected transition hits, follow it.
         if (follow_expected) {
-          target = JSObject::ExpectedTransitionTarget(map);
+          target = Map::ExpectedTransitionTarget(map);
         } else {
           // If the expected transition failed, parse an internalized string and
           // try to find a matching transition.
           key = ParseJsonInternalizedString();
           if (key.is_null()) return ReportUnexpectedCharacter();
 
-          target = JSObject::FindTransitionToField(map, key);
+          target = Map::FindTransitionToField(map, key);
           // If a transition was found, follow it and continue.
           transitioning = !target.is_null();
         }
@@ -387,11 +392,9 @@
           Representation expected_representation = details.representation();
 
           if (value->FitsRepresentation(expected_representation)) {
-            // If the target representation is double and the value is already
-            // double, use the existing box.
-            if (value->IsSmi() && expected_representation.IsDouble()) {
-              value = factory()->NewHeapNumber(
-                  Handle<Smi>::cast(value)->value());
+            if (expected_representation.IsDouble()) {
+              value = Object::NewStorageFor(isolate(), value,
+                                            expected_representation);
             } else if (expected_representation.IsHeapObject() &&
                        !target->instance_descriptors()->GetFieldType(
                            descriptor)->NowContains(value)) {
@@ -399,7 +402,7 @@
                       isolate(), expected_representation));
               Map::GeneralizeFieldType(target, descriptor, value_type);
             }
-            ASSERT(target->instance_descriptors()->GetFieldType(
+            DCHECK(target->instance_descriptors()->GetFieldType(
                     descriptor)->NowContains(value));
             properties.Add(value, zone());
             map = target;
@@ -410,13 +413,7 @@
         }
 
         // Commit the intermediate state to the object and stop transitioning.
-        JSObject::AllocateStorageForMap(json_object, map);
-        int length = properties.length();
-        for (int i = 0; i < length; i++) {
-          Handle<Object> value = properties[i];
-          FieldIndex index = FieldIndex::ForPropertyIndex(*map, i);
-          json_object->FastPropertyAtPut(index, *value);
-        }
+        CommitStateToJsonObject(json_object, map, &properties);
       } else {
         key = ParseJsonInternalizedString();
         if (key.is_null() || c0_ != ':') return ReportUnexpectedCharacter();
@@ -426,8 +423,7 @@
         if (value.is_null()) return ReportUnexpectedCharacter();
       }
 
-      JSObject::SetOwnPropertyIgnoreAttributes(
-          json_object, key, value, NONE).Assert();
+      Runtime::DefineObjectProperty(json_object, key, value, NONE).Check();
     } while (MatchSkipWhiteSpace(','));
     if (c0_ != '}') {
       return ReportUnexpectedCharacter();
@@ -435,25 +431,56 @@
 
     // If we transitioned until the very end, transition the map now.
     if (transitioning) {
-      JSObject::AllocateStorageForMap(json_object, map);
-      int length = properties.length();
-      for (int i = 0; i < length; i++) {
-        Handle<Object> value = properties[i];
-        FieldIndex index = FieldIndex::ForPropertyIndex(*map, i);
-        json_object->FastPropertyAtPut(index, *value);
-      }
+      CommitStateToJsonObject(json_object, map, &properties);
     }
   }
   AdvanceSkipWhitespace();
   return scope.CloseAndEscape(json_object);
 }
 
+
+template <bool seq_one_byte>
+void JsonParser<seq_one_byte>::CommitStateToJsonObject(
+    Handle<JSObject> json_object, Handle<Map> map,
+    ZoneList<Handle<Object> >* properties) {
+  JSObject::AllocateStorageForMap(json_object, map);
+  DCHECK(!json_object->map()->is_dictionary_map());
+
+  DisallowHeapAllocation no_gc;
+  Factory* factory = isolate()->factory();
+  // If the |json_object|'s map is exactly the same as |map| then the
+  // |properties| values correspond to the |map| and nothing more has to be
+  // done. But if the |json_object|'s map is different then we have to
+  // iterate descriptors to ensure that properties still correspond to the
+  // map.
+  bool slow_case = json_object->map() != *map;
+  DescriptorArray* descriptors = NULL;
+
+  int length = properties->length();
+  if (slow_case) {
+    descriptors = json_object->map()->instance_descriptors();
+    DCHECK(json_object->map()->NumberOfOwnDescriptors() == length);
+  }
+  for (int i = 0; i < length; i++) {
+    Handle<Object> value = (*properties)[i];
+    if (slow_case && value->IsMutableHeapNumber() &&
+        !descriptors->GetDetails(i).representation().IsDouble()) {
+      // Turn mutable heap numbers into immutable if the field representation
+      // is not double.
+      HeapNumber::cast(*value)->set_map(*factory->heap_number_map());
+    }
+    FieldIndex index = FieldIndex::ForPropertyIndex(*map, i);
+    json_object->FastPropertyAtPut(index, *value);
+  }
+}
+
+
 // Parse a JSON array. Position must be right at '['.
-template <bool seq_ascii>
-Handle<Object> JsonParser<seq_ascii>::ParseJsonArray() {
+template <bool seq_one_byte>
+Handle<Object> JsonParser<seq_one_byte>::ParseJsonArray() {
   HandleScope scope(isolate());
   ZoneList<Handle<Object> > elements(4, zone());
-  ASSERT_EQ(c0_, '[');
+  DCHECK_EQ(c0_, '[');
 
   AdvanceSkipWhitespace();
   if (c0_ != ']') {
@@ -479,8 +506,8 @@
 }
 
 
-template <bool seq_ascii>
-Handle<Object> JsonParser<seq_ascii>::ParseJsonNumber() {
+template <bool seq_one_byte>
+Handle<Object> JsonParser<seq_one_byte>::ParseJsonNumber() {
   bool negative = false;
   int beg_pos = position_;
   if (c0_ == '-') {
@@ -523,12 +550,12 @@
   }
   int length = position_ - beg_pos;
   double number;
-  if (seq_ascii) {
+  if (seq_one_byte) {
     Vector<const uint8_t> chars(seq_source_->GetChars() +  beg_pos, length);
     number = StringToDouble(isolate()->unicode_cache(),
                             chars,
                             NO_FLAGS,  // Hex, octal or trailing junk.
-                            OS::nan_value());
+                            base::OS::nan_value());
   } else {
     Vector<uint8_t> buffer = Vector<uint8_t>::New(length);
     String::WriteToFlat(*source_, buffer.start(), beg_pos, position_);
@@ -581,9 +608,9 @@
 // Scans the rest of a JSON string starting from position_ and writes
 // prefix[start..end] along with the scanned characters into a
 // sequential string of type StringType.
-template <bool seq_ascii>
+template <bool seq_one_byte>
 template <typename StringType, typename SinkChar>
-Handle<String> JsonParser<seq_ascii>::SlowScanJsonString(
+Handle<String> JsonParser<seq_one_byte>::SlowScanJsonString(
     Handle<String> prefix, int start, int end) {
   int count = end - start;
   int max_length = count + source_length_ - position_;
@@ -603,16 +630,15 @@
     }
     if (c0_ != '\\') {
       // If the sink can contain UC16 characters, or source_ contains only
-      // ASCII characters, there's no need to test whether we can store the
+      // Latin1 characters, there's no need to test whether we can store the
       // character. Otherwise check whether the UC16 source character can fit
-      // in the ASCII sink.
-      if (sizeof(SinkChar) == kUC16Size ||
-          seq_ascii ||
+      // in the Latin1 sink.
+      if (sizeof(SinkChar) == kUC16Size || seq_one_byte ||
           c0_ <= String::kMaxOneByteCharCode) {
         SeqStringSet(seq_string, count++, c0_);
         Advance();
       } else {
-        // StringType is SeqOneByteString and we just read a non-ASCII char.
+        // StringType is SeqOneByteString and we just read a non-Latin1 char.
         return SlowScanJsonString<SeqTwoByteString, uc16>(seq_string, 0, count);
       }
     } else {
@@ -653,7 +679,8 @@
             SeqStringSet(seq_string, count++, value);
             break;
           } else {
-            // StringType is SeqOneByteString and we just read a non-ASCII char.
+            // StringType is SeqOneByteString and we just read a non-Latin1
+            // char.
             position_ -= 6;  // Rewind position_ to \ in \uxxxx.
             Advance();
             return SlowScanJsonString<SeqTwoByteString, uc16>(seq_string,
@@ -668,7 +695,7 @@
     }
   }
 
-  ASSERT_EQ('"', c0_);
+  DCHECK_EQ('"', c0_);
   // Advance past the last '"'.
   AdvanceSkipWhitespace();
 
@@ -677,17 +704,17 @@
 }
 
 
-template <bool seq_ascii>
+template <bool seq_one_byte>
 template <bool is_internalized>
-Handle<String> JsonParser<seq_ascii>::ScanJsonString() {
-  ASSERT_EQ('"', c0_);
+Handle<String> JsonParser<seq_one_byte>::ScanJsonString() {
+  DCHECK_EQ('"', c0_);
   Advance();
   if (c0_ == '"') {
     AdvanceSkipWhitespace();
     return factory()->empty_string();
   }
 
-  if (seq_ascii && is_internalized) {
+  if (seq_one_byte && is_internalized) {
     // Fast path for existing internalized strings.  If the the string being
     // parsed is not a known internalized string, contains backslashes or
     // unexpectedly reaches the end of string, return with an empty handle.
@@ -721,7 +748,8 @@
     } while (c0 != '"');
     int length = position - position_;
     uint32_t hash = (length <= String::kMaxHashCalcLength)
-        ? StringHasher::GetHashCore(running_hash) : length;
+                        ? StringHasher::GetHashCore(running_hash)
+                        : static_cast<uint32_t>(length);
     Vector<const uint8_t> string_vector(
         seq_source_->GetChars() + position_, length);
     StringTable* string_table = isolate()->heap()->string_table();
@@ -743,7 +771,7 @@
 #ifdef DEBUG
         uint32_t hash_field =
             (hash << String::kHashShift) | String::kIsNotArrayIndexMask;
-        ASSERT_EQ(static_cast<int>(result->Hash()),
+        DCHECK_EQ(static_cast<int>(result->Hash()),
                   static_cast<int>(hash_field >> String::kHashShift));
 #endif
         break;
@@ -757,12 +785,12 @@
   }
 
   int beg_pos = position_;
-  // Fast case for ASCII only without escape characters.
+  // Fast case for Latin1 only without escape characters.
   do {
     // Check for control character (0x00-0x1f) or unterminated string (<0).
     if (c0_ < 0x20) return Handle<String>::null();
     if (c0_ != '\\') {
-      if (seq_ascii || c0_ <= String::kMaxOneByteCharCode) {
+      if (seq_one_byte || c0_ <= String::kMaxOneByteCharCode) {
         Advance();
       } else {
         return SlowScanJsonString<SeqTwoByteString, uc16>(source_,
@@ -781,7 +809,7 @@
   uint8_t* dest = SeqOneByteString::cast(*result)->GetChars();
   String::WriteToFlat(*source_, dest, beg_pos, position_);
 
-  ASSERT_EQ('"', c0_);
+  DCHECK_EQ('"', c0_);
   // Advance past the last '"'.
   AdvanceSkipWhitespace();
   return result;
diff --git a/src/json-stringifier.h b/src/json-stringifier.h
index 03461d7..f89a19f 100644
--- a/src/json-stringifier.h
+++ b/src/json-stringifier.h
@@ -6,6 +6,7 @@
 #define V8_JSON_STRINGIFIER_H_
 
 #include "src/v8.h"
+
 #include "src/conversions.h"
 #include "src/utils.h"
 
@@ -37,22 +38,22 @@
 
   INLINE(void ShrinkCurrentPart());
 
-  template <bool is_ascii, typename Char>
+  template <bool is_one_byte, typename Char>
   INLINE(void Append_(Char c));
 
-  template <bool is_ascii, typename Char>
+  template <bool is_one_byte, typename Char>
   INLINE(void Append_(const Char* chars));
 
   INLINE(void Append(uint8_t c)) {
-    if (is_ascii_) {
+    if (is_one_byte_) {
       Append_<true>(c);
     } else {
       Append_<false>(c);
     }
   }
 
-  INLINE(void AppendAscii(const char* chars)) {
-    if (is_ascii_) {
+  INLINE(void AppendOneByte(const char* chars)) {
+    if (is_one_byte_) {
       Append_<true>(reinterpret_cast<const uint8_t*>(chars));
     } else {
       Append_<false>(reinterpret_cast<const uint8_t*>(chars));
@@ -94,7 +95,7 @@
   INLINE(Result SerializeProperty(Handle<Object> object,
                                   bool deferred_comma,
                                   Handle<String> deferred_key)) {
-    ASSERT(!deferred_key.is_null());
+    DCHECK(!deferred_key.is_null());
     return Serialize_<true>(object, deferred_comma, deferred_key);
   }
 
@@ -128,7 +129,7 @@
                                               DestChar* dest,
                                               int length));
 
-  template <bool is_ascii, typename Char>
+  template <bool is_one_byte, typename Char>
   INLINE(void SerializeString_(Handle<String> string));
 
   template <typename Char>
@@ -158,7 +159,7 @@
   Handle<JSArray> stack_;
   int current_index_;
   int part_length_;
-  bool is_ascii_;
+  bool is_one_byte_;
   bool overflowed_;
 
   static const int kJsonEscapeTableEntrySize = 8;
@@ -166,7 +167,7 @@
 };
 
 
-// Translation table to escape ASCII characters.
+// Translation table to escape Latin1 characters.
 // Table entries start at a multiple of 8 and are null-terminated.
 const char* const BasicJsonStringifier::JsonEscapeTable =
     "\\u0000\0 \\u0001\0 \\u0002\0 \\u0003\0 "
@@ -238,7 +239,7 @@
 BasicJsonStringifier::BasicJsonStringifier(Isolate* isolate)
     : isolate_(isolate),
       current_index_(0),
-      is_ascii_(true),
+      is_one_byte_(true),
       overflowed_(false) {
   factory_ = isolate_->factory();
   accumulator_store_ = Handle<JSValue>::cast(
@@ -257,12 +258,11 @@
     ShrinkCurrentPart();
     Accumulate();
     if (overflowed_) {
-      return isolate_->Throw<Object>(
-          isolate_->factory()->NewInvalidStringLengthError());
+      THROW_NEW_ERROR(isolate_, NewInvalidStringLengthError(), Object);
     }
     return accumulator();
   }
-  ASSERT(result == EXCEPTION);
+  DCHECK(result == EXCEPTION);
   return MaybeHandle<Object>();
 }
 
@@ -280,7 +280,7 @@
   }
 
   object = String::Flatten(object);
-  ASSERT(object->IsFlat());
+  DCHECK(object->IsFlat());
   if (object->IsOneByteRepresentationUnderneath()) {
     Handle<String> result = isolate->factory()->NewRawOneByteString(
         worst_case_length).ToHandleChecked();
@@ -317,9 +317,9 @@
 }
 
 
-template <bool is_ascii, typename Char>
+template <bool is_one_byte, typename Char>
 void BasicJsonStringifier::Append_(Char c) {
-  if (is_ascii) {
+  if (is_one_byte) {
     SeqOneByteString::cast(*current_part_)->SeqOneByteStringSet(
         current_index_++, c);
   } else {
@@ -330,15 +330,16 @@
 }
 
 
-template <bool is_ascii, typename Char>
+template <bool is_one_byte, typename Char>
 void BasicJsonStringifier::Append_(const Char* chars) {
-  for ( ; *chars != '\0'; chars++) Append_<is_ascii, Char>(*chars);
+  for (; *chars != '\0'; chars++) Append_<is_one_byte, Char>(*chars);
 }
 
 
 MaybeHandle<Object> BasicJsonStringifier::ApplyToJsonFunction(
     Handle<Object> object, Handle<Object> key) {
-  LookupIterator it(object, tojson_string_, LookupIterator::SKIP_INTERCEPTOR);
+  LookupIterator it(object, tojson_string_,
+                    LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
   Handle<Object> fun;
   ASSIGN_RETURN_ON_EXCEPTION(isolate_, fun, Object::GetProperty(&it), Object);
   if (!fun->IsJSFunction()) return object;
@@ -370,8 +371,10 @@
     for (int i = 0; i < length; i++) {
       if (elements->get(i) == *object) {
         AllowHeapAllocation allow_to_return_error;
-        isolate_->Throw(*factory_->NewTypeError(
-            "circular_structure", HandleVector<Object>(NULL, 0)));
+        Handle<Object> error;
+        MaybeHandle<Object> maybe_error = factory_->NewTypeError(
+            "circular_structure", HandleVector<Object>(NULL, 0));
+        if (maybe_error.ToHandle(&error)) isolate_->Throw(*error);
         return EXCEPTION;
       }
     }
@@ -406,21 +409,22 @@
 
   switch (HeapObject::cast(*object)->map()->instance_type()) {
     case HEAP_NUMBER_TYPE:
+    case MUTABLE_HEAP_NUMBER_TYPE:
       if (deferred_string_key) SerializeDeferredKey(comma, key);
       return SerializeHeapNumber(Handle<HeapNumber>::cast(object));
     case ODDBALL_TYPE:
       switch (Oddball::cast(*object)->kind()) {
         case Oddball::kFalse:
           if (deferred_string_key) SerializeDeferredKey(comma, key);
-          AppendAscii("false");
+          AppendOneByte("false");
           return SUCCESS;
         case Oddball::kTrue:
           if (deferred_string_key) SerializeDeferredKey(comma, key);
-          AppendAscii("true");
+          AppendOneByte("true");
           return SUCCESS;
         case Oddball::kNull:
           if (deferred_string_key) SerializeDeferredKey(comma, key);
-          AppendAscii("null");
+          AppendOneByte("null");
           return SUCCESS;
         default:
           return UNCHANGED;
@@ -440,7 +444,8 @@
         SerializeString(Handle<String>::cast(object));
         return SUCCESS;
       } else if (object->IsJSObject()) {
-        if (object->IsAccessCheckNeeded()) break;
+        // Go to slow path for global proxy and objects requiring access checks.
+        if (object->IsAccessCheckNeeded() || object->IsJSGlobalProxy()) break;
         if (deferred_string_key) SerializeDeferredKey(comma, key);
         return SerializeJSObject(Handle<JSObject>::cast(object));
       }
@@ -503,10 +508,10 @@
     if (value->IsSmi()) return SerializeSmi(Smi::cast(*value));
     SerializeHeapNumber(Handle<HeapNumber>::cast(value));
   } else {
-    ASSERT(class_name == isolate_->heap()->Boolean_string());
+    DCHECK(class_name == isolate_->heap()->Boolean_string());
     Object* value = JSValue::cast(*object)->value();
-    ASSERT(value->IsBoolean());
-    AppendAscii(value->IsTrue() ? "true" : "false");
+    DCHECK(value->IsBoolean());
+    AppendOneByte(value->IsTrue() ? "true" : "false");
   }
   return SUCCESS;
 }
@@ -516,7 +521,7 @@
   static const int kBufferSize = 100;
   char chars[kBufferSize];
   Vector<char> buffer(chars, kBufferSize);
-  AppendAscii(IntToCString(object->value(), buffer));
+  AppendOneByte(IntToCString(object->value(), buffer));
   return SUCCESS;
 }
 
@@ -524,13 +529,13 @@
 BasicJsonStringifier::Result BasicJsonStringifier::SerializeDouble(
     double number) {
   if (std::isinf(number) || std::isnan(number)) {
-    AppendAscii("null");
+    AppendOneByte("null");
     return SUCCESS;
   }
   static const int kBufferSize = 100;
   char chars[kBufferSize];
   Vector<char> buffer(chars, kBufferSize);
-  AppendAscii(DoubleToCString(number, buffer));
+  AppendOneByte(DoubleToCString(number, buffer));
   return SUCCESS;
 }
 
@@ -575,7 +580,7 @@
                              i);
         if (result == SUCCESS) continue;
         if (result == UNCHANGED) {
-          AppendAscii("null");
+          AppendOneByte("null");
         } else {
           return result;
         }
@@ -608,12 +613,12 @@
         Object::GetElement(isolate_, object, i),
         EXCEPTION);
     if (element->IsUndefined()) {
-      AppendAscii("null");
+      AppendOneByte("null");
     } else {
       Result result = SerializeElement(isolate_, element, i);
       if (result == SUCCESS) continue;
       if (result == UNCHANGED) {
-        AppendAscii("null");
+        AppendOneByte("null");
       } else {
         return result;
       }
@@ -628,11 +633,7 @@
   HandleScope handle_scope(isolate_);
   Result stack_push = StackPush(object);
   if (stack_push != SUCCESS) return stack_push;
-  if (object->IsJSGlobalProxy()) {
-    object = Handle<JSObject>(
-                 JSObject::cast(object->GetPrototype()), isolate_);
-    ASSERT(object->IsGlobalObject());
-  }
+  DCHECK(!object->IsJSGlobalProxy() && !object->IsGlobalObject());
 
   Append('{');
   bool comma = false;
@@ -678,7 +679,7 @@
         key_handle = Handle<String>(String::cast(key), isolate_);
         maybe_property = Object::GetPropertyOrElement(object, key_handle);
       } else {
-        ASSERT(key->IsNumber());
+        DCHECK(key->IsNumber());
         key_handle = factory_->NumberToString(Handle<Object>(key, isolate_));
         uint32_t index;
         if (key->IsSmi()) {
@@ -707,7 +708,7 @@
 
 
 void BasicJsonStringifier::ShrinkCurrentPart() {
-  ASSERT(current_index_ < part_length_);
+  DCHECK(current_index_ < part_length_);
   current_part_ = SeqString::Truncate(Handle<SeqString>::cast(current_part_),
                                       current_index_);
 }
@@ -730,14 +731,14 @@
   if (part_length_ <= kMaxPartLength / kPartLengthGrowthFactor) {
     part_length_ *= kPartLengthGrowthFactor;
   }
-  if (is_ascii_) {
+  if (is_one_byte_) {
     current_part_ =
         factory_->NewRawOneByteString(part_length_).ToHandleChecked();
   } else {
     current_part_ =
         factory_->NewRawTwoByteString(part_length_).ToHandleChecked();
   }
-  ASSERT(!current_part_.is_null());
+  DCHECK(!current_part_.is_null());
   current_index_ = 0;
 }
 
@@ -747,9 +748,9 @@
   Accumulate();
   current_part_ =
       factory_->NewRawTwoByteString(part_length_).ToHandleChecked();
-  ASSERT(!current_part_.is_null());
+  DCHECK(!current_part_.is_null());
   current_index_ = 0;
-  is_ascii_ = false;
+  is_one_byte_ = false;
 }
 
 
@@ -761,7 +762,7 @@
 
   // Assert that uc16 character is not truncated down to 8 bit.
   // The <uc16, char> version of this method must not be called.
-  ASSERT(sizeof(*dest) >= sizeof(*src));
+  DCHECK(sizeof(*dest) >= sizeof(*src));
 
   for (int i = 0; i < length; i++) {
     SrcChar c = src[i];
@@ -778,10 +779,10 @@
 }
 
 
-template <bool is_ascii, typename Char>
+template <bool is_one_byte, typename Char>
 void BasicJsonStringifier::SerializeString_(Handle<String> string) {
   int length = string->length();
-  Append_<is_ascii, char>('"');
+  Append_<is_one_byte, char>('"');
   // We make a rough estimate to find out if the current string can be
   // serialized without allocating a new string part. The worst case length of
   // an escaped character is 6.  Shifting the remainin string length right by 3
@@ -790,7 +791,7 @@
   if (((part_length_ - current_index_) >> 3) > length) {
     DisallowHeapAllocation no_gc;
     Vector<const Char> vector = GetCharVector<Char>(string);
-    if (is_ascii) {
+    if (is_one_byte) {
       current_index_ += SerializeStringUnchecked_(
           vector.start(),
           SeqOneByteString::cast(*current_part_)->GetChars() + current_index_,
@@ -814,15 +815,15 @@
       }
       Char c = vector[i];
       if (DoNotEscape(c)) {
-        Append_<is_ascii, Char>(c);
+        Append_<is_one_byte, Char>(c);
       } else {
-        Append_<is_ascii, uint8_t>(reinterpret_cast<const uint8_t*>(
+        Append_<is_one_byte, uint8_t>(reinterpret_cast<const uint8_t*>(
             &JsonEscapeTable[c * kJsonEscapeTableEntrySize]));
       }
     }
   }
 
-  Append_<is_ascii, uint8_t>('"');
+  Append_<is_one_byte, uint8_t>('"');
 }
 
 
@@ -842,7 +843,7 @@
 Vector<const uint8_t> BasicJsonStringifier::GetCharVector(
     Handle<String> string) {
   String::FlatContent flat = string->GetFlatContent();
-  ASSERT(flat.IsAscii());
+  DCHECK(flat.IsOneByte());
   return flat.ToOneByteVector();
 }
 
@@ -850,14 +851,14 @@
 template <>
 Vector<const uc16> BasicJsonStringifier::GetCharVector(Handle<String> string) {
   String::FlatContent flat = string->GetFlatContent();
-  ASSERT(flat.IsTwoByte());
+  DCHECK(flat.IsTwoByte());
   return flat.ToUC16Vector();
 }
 
 
 void BasicJsonStringifier::SerializeString(Handle<String> object) {
   object = String::Flatten(object);
-  if (is_ascii_) {
+  if (is_one_byte_) {
     if (object->IsOneByteRepresentationUnderneath()) {
       SerializeString_<true, uint8_t>(object);
     } else {
diff --git a/src/jsregexp-inl.h b/src/jsregexp-inl.h
index 86fe1d6..1ab70b8 100644
--- a/src/jsregexp-inl.h
+++ b/src/jsregexp-inl.h
@@ -8,7 +8,7 @@
 
 #include "src/allocation.h"
 #include "src/handles.h"
-#include "src/heap.h"
+#include "src/heap/heap.h"
 #include "src/jsregexp.h"
 #include "src/objects.h"
 
diff --git a/src/jsregexp.cc b/src/jsregexp.cc
index 8f378a6..98aca72 100644
--- a/src/jsregexp.cc
+++ b/src/jsregexp.cc
@@ -5,35 +5,37 @@
 #include "src/v8.h"
 
 #include "src/ast.h"
+#include "src/base/platform/platform.h"
+#include "src/compilation-cache.h"
 #include "src/compiler.h"
 #include "src/execution.h"
 #include "src/factory.h"
-#include "src/jsregexp.h"
 #include "src/jsregexp-inl.h"
-#include "src/platform.h"
-#include "src/string-search.h"
-#include "src/runtime.h"
-#include "src/compilation-cache.h"
-#include "src/string-stream.h"
+#include "src/jsregexp.h"
+#include "src/ostreams.h"
 #include "src/parser.h"
 #include "src/regexp-macro-assembler.h"
-#include "src/regexp-macro-assembler-tracer.h"
 #include "src/regexp-macro-assembler-irregexp.h"
+#include "src/regexp-macro-assembler-tracer.h"
 #include "src/regexp-stack.h"
+#include "src/runtime.h"
+#include "src/string-search.h"
 
 #ifndef V8_INTERPRETED_REGEXP
 #if V8_TARGET_ARCH_IA32
-#include "src/ia32/regexp-macro-assembler-ia32.h"
+#include "src/ia32/regexp-macro-assembler-ia32.h"  // NOLINT
 #elif V8_TARGET_ARCH_X64
-#include "src/x64/regexp-macro-assembler-x64.h"
+#include "src/x64/regexp-macro-assembler-x64.h"  // NOLINT
 #elif V8_TARGET_ARCH_ARM64
-#include "src/arm64/regexp-macro-assembler-arm64.h"
+#include "src/arm64/regexp-macro-assembler-arm64.h"  // NOLINT
 #elif V8_TARGET_ARCH_ARM
-#include "src/arm/regexp-macro-assembler-arm.h"
+#include "src/arm/regexp-macro-assembler-arm.h"  // NOLINT
 #elif V8_TARGET_ARCH_MIPS
-#include "src/mips/regexp-macro-assembler-mips.h"
+#include "src/mips/regexp-macro-assembler-mips.h"  // NOLINT
+#elif V8_TARGET_ARCH_MIPS64
+#include "src/mips64/regexp-macro-assembler-mips64.h"  // NOLINT
 #elif V8_TARGET_ARCH_X87
-#include "src/x87/regexp-macro-assembler-x87.h"
+#include "src/x87/regexp-macro-assembler-x87.h"  // NOLINT
 #else
 #error Unsupported target architecture.
 #endif
@@ -51,7 +53,7 @@
     Handle<String> flags) {
   // Call the construct code with 2 arguments.
   Handle<Object> argv[] = { pattern, flags };
-  return Execution::New(constructor, ARRAY_SIZE(argv), argv);
+  return Execution::New(constructor, arraysize(argv), argv);
 }
 
 
@@ -68,6 +70,9 @@
       case 'm':
         flags |= JSRegExp::MULTILINE;
         break;
+      case 'y':
+        if (FLAG_harmony_regexps) flags |= JSRegExp::STICKY;
+        break;
     }
   }
   return JSRegExp::Flags(flags);
@@ -86,8 +91,8 @@
   elements->set(0, *pattern);
   elements->set(1, *error_text);
   Handle<JSArray> array = factory->NewJSArrayWithElements(elements);
-  Handle<Object> regexp_err = factory->NewSyntaxError(message, array);
-  return isolate->Throw<Object>(regexp_err);
+  Handle<Object> regexp_err;
+  THROW_NEW_ERROR(isolate, NewSyntaxError(message, array), Object);
 }
 
 
@@ -95,8 +100,8 @@
                             const int* ranges,
                             int ranges_length,
                             Interval new_range) {
-  ASSERT((ranges_length & 1) == 1);
-  ASSERT(ranges[ranges_length - 1] == String::kMaxUtf16CodeUnit + 1);
+  DCHECK((ranges_length & 1) == 1);
+  DCHECK(ranges[ranges_length - 1] == String::kMaxUtf16CodeUnit + 1);
   if (containment == kLatticeUnknown) return containment;
   bool inside = false;
   int last = 0;
@@ -183,12 +188,14 @@
 
   if (parse_result.simple &&
       !flags.is_ignore_case() &&
+      !flags.is_sticky() &&
       !HasFewDifferentCharacters(pattern)) {
     // Parse-tree is a single atom that is equal to the pattern.
     AtomCompile(re, pattern, flags, pattern);
     has_been_compiled = true;
   } else if (parse_result.tree->IsAtom() &&
       !flags.is_ignore_case() &&
+      !flags.is_sticky() &&
       parse_result.capture_count == 0) {
     RegExpAtom* atom = parse_result.tree->AsAtom();
     Vector<const uc16> atom_pattern = atom->data();
@@ -205,7 +212,7 @@
   if (!has_been_compiled) {
     IrregexpInitialize(re, pattern, flags, parse_result.capture_count);
   }
-  ASSERT(re->data()->IsFixedArray());
+  DCHECK(re->data()->IsFixedArray());
   // Compilation succeeded so the data is set on the regexp
   // and we can store it in the cache.
   Handle<FixedArray> data(FixedArray::cast(re->data()));
@@ -267,16 +274,16 @@
                             int output_size) {
   Isolate* isolate = regexp->GetIsolate();
 
-  ASSERT(0 <= index);
-  ASSERT(index <= subject->length());
+  DCHECK(0 <= index);
+  DCHECK(index <= subject->length());
 
   subject = String::Flatten(subject);
   DisallowHeapAllocation no_gc;  // ensure vectors stay valid
 
   String* needle = String::cast(regexp->DataAt(JSRegExp::kAtomPatternIndex));
   int needle_len = needle->length();
-  ASSERT(needle->IsFlat());
-  ASSERT_LT(0, needle_len);
+  DCHECK(needle->IsFlat());
+  DCHECK_LT(0, needle_len);
 
   if (index + needle_len > subject->length()) {
     return RegExpImpl::RE_FAILURE;
@@ -285,28 +292,21 @@
   for (int i = 0; i < output_size; i += 2) {
     String::FlatContent needle_content = needle->GetFlatContent();
     String::FlatContent subject_content = subject->GetFlatContent();
-    ASSERT(needle_content.IsFlat());
-    ASSERT(subject_content.IsFlat());
+    DCHECK(needle_content.IsFlat());
+    DCHECK(subject_content.IsFlat());
     // dispatch on type of strings
-    index = (needle_content.IsAscii()
-             ? (subject_content.IsAscii()
-                ? SearchString(isolate,
-                               subject_content.ToOneByteVector(),
-                               needle_content.ToOneByteVector(),
-                               index)
-                : SearchString(isolate,
-                               subject_content.ToUC16Vector(),
-                               needle_content.ToOneByteVector(),
-                               index))
-             : (subject_content.IsAscii()
-                ? SearchString(isolate,
-                               subject_content.ToOneByteVector(),
-                               needle_content.ToUC16Vector(),
-                               index)
-                : SearchString(isolate,
-                               subject_content.ToUC16Vector(),
-                               needle_content.ToUC16Vector(),
-                               index)));
+    index =
+        (needle_content.IsOneByte()
+             ? (subject_content.IsOneByte()
+                    ? SearchString(isolate, subject_content.ToOneByteVector(),
+                                   needle_content.ToOneByteVector(), index)
+                    : SearchString(isolate, subject_content.ToUC16Vector(),
+                                   needle_content.ToOneByteVector(), index))
+             : (subject_content.IsOneByte()
+                    ? SearchString(isolate, subject_content.ToOneByteVector(),
+                                   needle_content.ToUC16Vector(), index)
+                    : SearchString(isolate, subject_content.ToUC16Vector(),
+                                   needle_content.ToUC16Vector(), index)));
     if (index == -1) {
       return i / 2;  // Return number of matches.
     } else {
@@ -333,7 +333,7 @@
 
   if (res == RegExpImpl::RE_FAILURE) return isolate->factory()->null_value();
 
-  ASSERT_EQ(res, RegExpImpl::RE_SUCCESS);
+  DCHECK_EQ(res, RegExpImpl::RE_SUCCESS);
   SealHandleScope shs(isolate);
   FixedArray* array = FixedArray::cast(last_match_info->elements());
   SetAtomLastCapture(array, *subject, output_registers[0], output_registers[1]);
@@ -344,14 +344,15 @@
 // Irregexp implementation.
 
 // Ensures that the regexp object contains a compiled version of the
-// source for either ASCII or non-ASCII strings.
+// source for either one-byte or two-byte subject strings.
 // If the compiled version doesn't already exist, it is compiled
 // from the source pattern.
 // If compilation fails, an exception is thrown and this function
 // returns false.
-bool RegExpImpl::EnsureCompiledIrregexp(
-    Handle<JSRegExp> re, Handle<String> sample_subject, bool is_ascii) {
-  Object* compiled_code = re->DataAt(JSRegExp::code_index(is_ascii));
+bool RegExpImpl::EnsureCompiledIrregexp(Handle<JSRegExp> re,
+                                        Handle<String> sample_subject,
+                                        bool is_one_byte) {
+  Object* compiled_code = re->DataAt(JSRegExp::code_index(is_one_byte));
 #ifdef V8_INTERPRETED_REGEXP
   if (compiled_code->IsByteArray()) return true;
 #else  // V8_INTERPRETED_REGEXP (RegExp native code)
@@ -359,19 +360,18 @@
 #endif
   // We could potentially have marked this as flushable, but have kept
   // a saved version if we did not flush it yet.
-  Object* saved_code = re->DataAt(JSRegExp::saved_code_index(is_ascii));
+  Object* saved_code = re->DataAt(JSRegExp::saved_code_index(is_one_byte));
   if (saved_code->IsCode()) {
     // Reinstate the code in the original place.
-    re->SetDataAt(JSRegExp::code_index(is_ascii), saved_code);
-    ASSERT(compiled_code->IsSmi());
+    re->SetDataAt(JSRegExp::code_index(is_one_byte), saved_code);
+    DCHECK(compiled_code->IsSmi());
     return true;
   }
-  return CompileIrregexp(re, sample_subject, is_ascii);
+  return CompileIrregexp(re, sample_subject, is_one_byte);
 }
 
 
-static bool CreateRegExpErrorObjectAndThrow(Handle<JSRegExp> re,
-                                            bool is_ascii,
+static void CreateRegExpErrorObjectAndThrow(Handle<JSRegExp> re,
                                             Handle<String> error_message,
                                             Isolate* isolate) {
   Factory* factory = isolate->factory();
@@ -379,29 +379,29 @@
   elements->set(0, re->Pattern());
   elements->set(1, *error_message);
   Handle<JSArray> array = factory->NewJSArrayWithElements(elements);
-  Handle<Object> regexp_err =
+  Handle<Object> error;
+  MaybeHandle<Object> maybe_error =
       factory->NewSyntaxError("malformed_regexp", array);
-  isolate->Throw(*regexp_err);
-  return false;
+  if (maybe_error.ToHandle(&error)) isolate->Throw(*error);
 }
 
 
 bool RegExpImpl::CompileIrregexp(Handle<JSRegExp> re,
                                  Handle<String> sample_subject,
-                                 bool is_ascii) {
+                                 bool is_one_byte) {
   // Compile the RegExp.
   Isolate* isolate = re->GetIsolate();
   Zone zone(isolate);
   PostponeInterruptsScope postpone(isolate);
   // If we had a compilation error the last time this is saved at the
   // saved code index.
-  Object* entry = re->DataAt(JSRegExp::code_index(is_ascii));
+  Object* entry = re->DataAt(JSRegExp::code_index(is_one_byte));
   // When arriving here entry can only be a smi, either representing an
   // uncompiled regexp, a previous compilation error, or code that has
   // been flushed.
-  ASSERT(entry->IsSmi());
+  DCHECK(entry->IsSmi());
   int entry_value = Smi::cast(entry)->value();
-  ASSERT(entry_value == JSRegExp::kUninitializedValue ||
+  DCHECK(entry_value == JSRegExp::kUninitializedValue ||
          entry_value == JSRegExp::kCompilationErrorValue ||
          (entry_value < JSRegExp::kCodeAgeMask && entry_value >= 0));
 
@@ -409,10 +409,10 @@
     // A previous compilation failed and threw an error which we store in
     // the saved code index (we store the error message, not the actual
     // error). Recreate the error object and throw it.
-    Object* error_string = re->DataAt(JSRegExp::saved_code_index(is_ascii));
-    ASSERT(error_string->IsString());
+    Object* error_string = re->DataAt(JSRegExp::saved_code_index(is_one_byte));
+    DCHECK(error_string->IsString());
     Handle<String> error_message(String::cast(error_string));
-    CreateRegExpErrorObjectAndThrow(re, is_ascii, error_message, isolate);
+    CreateRegExpErrorObjectAndThrow(re, error_message, isolate);
     return false;
   }
 
@@ -433,25 +433,20 @@
                              "malformed_regexp"));
     return false;
   }
-  RegExpEngine::CompilationResult result =
-      RegExpEngine::Compile(&compile_data,
-                            flags.is_ignore_case(),
-                            flags.is_global(),
-                            flags.is_multiline(),
-                            pattern,
-                            sample_subject,
-                            is_ascii,
-                            &zone);
+  RegExpEngine::CompilationResult result = RegExpEngine::Compile(
+      &compile_data, flags.is_ignore_case(), flags.is_global(),
+      flags.is_multiline(), flags.is_sticky(), pattern, sample_subject,
+      is_one_byte, &zone);
   if (result.error_message != NULL) {
     // Unable to compile regexp.
     Handle<String> error_message = isolate->factory()->NewStringFromUtf8(
         CStrVector(result.error_message)).ToHandleChecked();
-    CreateRegExpErrorObjectAndThrow(re, is_ascii, error_message, isolate);
+    CreateRegExpErrorObjectAndThrow(re, error_message, isolate);
     return false;
   }
 
   Handle<FixedArray> data = Handle<FixedArray>(FixedArray::cast(re->data()));
-  data->set(JSRegExp::code_index(is_ascii), result.code);
+  data->set(JSRegExp::code_index(is_one_byte), result.code);
   int register_max = IrregexpMaxRegisterCount(*data);
   if (result.num_registers > register_max) {
     SetIrregexpMaxRegisterCount(*data, result.num_registers);
@@ -482,13 +477,13 @@
 }
 
 
-ByteArray* RegExpImpl::IrregexpByteCode(FixedArray* re, bool is_ascii) {
-  return ByteArray::cast(re->get(JSRegExp::code_index(is_ascii)));
+ByteArray* RegExpImpl::IrregexpByteCode(FixedArray* re, bool is_one_byte) {
+  return ByteArray::cast(re->get(JSRegExp::code_index(is_one_byte)));
 }
 
 
-Code* RegExpImpl::IrregexpNativeCode(FixedArray* re, bool is_ascii) {
-  return Code::cast(re->get(JSRegExp::code_index(is_ascii)));
+Code* RegExpImpl::IrregexpNativeCode(FixedArray* re, bool is_one_byte) {
+  return Code::cast(re->get(JSRegExp::code_index(is_one_byte)));
 }
 
 
@@ -509,9 +504,9 @@
                                 Handle<String> subject) {
   subject = String::Flatten(subject);
 
-  // Check the asciiness of the underlying storage.
-  bool is_ascii = subject->IsOneByteRepresentationUnderneath();
-  if (!EnsureCompiledIrregexp(regexp, subject, is_ascii)) return -1;
+  // Check representation of the underlying storage.
+  bool is_one_byte = subject->IsOneByteRepresentationUnderneath();
+  if (!EnsureCompiledIrregexp(regexp, subject, is_one_byte)) return -1;
 
 #ifdef V8_INTERPRETED_REGEXP
   // Byte-code regexp needs space allocated for all its registers.
@@ -537,17 +532,17 @@
 
   Handle<FixedArray> irregexp(FixedArray::cast(regexp->data()), isolate);
 
-  ASSERT(index >= 0);
-  ASSERT(index <= subject->length());
-  ASSERT(subject->IsFlat());
+  DCHECK(index >= 0);
+  DCHECK(index <= subject->length());
+  DCHECK(subject->IsFlat());
 
-  bool is_ascii = subject->IsOneByteRepresentationUnderneath();
+  bool is_one_byte = subject->IsOneByteRepresentationUnderneath();
 
 #ifndef V8_INTERPRETED_REGEXP
-  ASSERT(output_size >= (IrregexpNumberOfCaptures(*irregexp) + 1) * 2);
+  DCHECK(output_size >= (IrregexpNumberOfCaptures(*irregexp) + 1) * 2);
   do {
-    EnsureCompiledIrregexp(regexp, subject, is_ascii);
-    Handle<Code> code(IrregexpNativeCode(*irregexp, is_ascii), isolate);
+    EnsureCompiledIrregexp(regexp, subject, is_one_byte);
+    Handle<Code> code(IrregexpNativeCode(*irregexp, is_one_byte), isolate);
     // The stack is used to allocate registers for the compiled regexp code.
     // This means that in case of failure, the output registers array is left
     // untouched and contains the capture results from the previous successful
@@ -560,7 +555,7 @@
                                           index,
                                           isolate);
     if (res != NativeRegExpMacroAssembler::RETRY) {
-      ASSERT(res != NativeRegExpMacroAssembler::EXCEPTION ||
+      DCHECK(res != NativeRegExpMacroAssembler::EXCEPTION ||
              isolate->has_pending_exception());
       STATIC_ASSERT(
           static_cast<int>(NativeRegExpMacroAssembler::SUCCESS) == RE_SUCCESS);
@@ -574,16 +569,16 @@
     // must restart from scratch.
     // In this case, it means we must make sure we are prepared to handle
     // the, potentially, different subject (the string can switch between
-    // being internal and external, and even between being ASCII and UC16,
+    // being internal and external, and even between being Latin1 and UC16,
     // but the characters are always the same).
     IrregexpPrepare(regexp, subject);
-    is_ascii = subject->IsOneByteRepresentationUnderneath();
+    is_one_byte = subject->IsOneByteRepresentationUnderneath();
   } while (true);
   UNREACHABLE();
   return RE_EXCEPTION;
 #else  // V8_INTERPRETED_REGEXP
 
-  ASSERT(output_size >= IrregexpNumberOfRegisters(*irregexp));
+  DCHECK(output_size >= IrregexpNumberOfRegisters(*irregexp));
   // We must have done EnsureCompiledIrregexp, so we can get the number of
   // registers.
   int number_of_capture_registers =
@@ -595,7 +590,8 @@
   for (int i = number_of_capture_registers - 1; i >= 0; i--) {
     raw_output[i] = -1;
   }
-  Handle<ByteArray> byte_codes(IrregexpByteCode(*irregexp, is_ascii), isolate);
+  Handle<ByteArray> byte_codes(IrregexpByteCode(*irregexp, is_one_byte),
+                               isolate);
 
   IrregexpResult result = IrregexpInterpreter::Match(isolate,
                                                      byte_codes,
@@ -607,7 +603,7 @@
     MemCopy(output, raw_output, number_of_capture_registers * sizeof(int32_t));
   }
   if (result == RE_EXCEPTION) {
-    ASSERT(!isolate->has_pending_exception());
+    DCHECK(!isolate->has_pending_exception());
     isolate->StackOverflow();
   }
   return result;
@@ -620,7 +616,7 @@
                                              int previous_index,
                                              Handle<JSArray> last_match_info) {
   Isolate* isolate = regexp->GetIsolate();
-  ASSERT_EQ(regexp->TypeTag(), JSRegExp::IRREGEXP);
+  DCHECK_EQ(regexp->TypeTag(), JSRegExp::IRREGEXP);
 
   // Prepare space for the return values.
 #if defined(V8_INTERPRETED_REGEXP) && defined(DEBUG)
@@ -633,7 +629,7 @@
   int required_registers = RegExpImpl::IrregexpPrepare(regexp, subject);
   if (required_registers < 0) {
     // Compiling failed with an exception.
-    ASSERT(isolate->has_pending_exception());
+    DCHECK(isolate->has_pending_exception());
     return MaybeHandle<Object>();
   }
 
@@ -655,10 +651,10 @@
         last_match_info, subject, capture_count, output_registers);
   }
   if (res == RE_EXCEPTION) {
-    ASSERT(isolate->has_pending_exception());
+    DCHECK(isolate->has_pending_exception());
     return MaybeHandle<Object>();
   }
-  ASSERT(res == RE_FAILURE);
+  DCHECK(res == RE_FAILURE);
   return isolate->factory()->null_value();
 }
 
@@ -667,7 +663,7 @@
                                              Handle<String> subject,
                                              int capture_count,
                                              int32_t* match) {
-  ASSERT(last_match_info->HasFastObjectElements());
+  DCHECK(last_match_info->HasFastObjectElements());
   int capture_register_count = (capture_count + 1) * 2;
   JSArray::EnsureSize(last_match_info,
                       capture_register_count + kLastMatchOverhead);
@@ -734,8 +730,8 @@
   // to the compiled regexp.
   current_match_index_ = max_matches_ - 1;
   num_matches_ = max_matches_;
-  ASSERT(registers_per_match_ >= 2);  // Each match has at least one capture.
-  ASSERT_GE(register_array_size_, registers_per_match_);
+  DCHECK(registers_per_match_ >= 2);  // Each match has at least one capture.
+  DCHECK_GE(register_array_size_, registers_per_match_);
   int32_t* last_match =
       &register_array_[current_match_index_ * registers_per_match_];
   last_match[0] = -1;
@@ -964,7 +960,7 @@
   // Does not measure in percent, but rather per-128 (the table size from the
   // regexp macro assembler).
   int Frequency(int in_character) {
-    ASSERT((in_character & RegExpMacroAssembler::kTableMask) == in_character);
+    DCHECK((in_character & RegExpMacroAssembler::kTableMask) == in_character);
     if (total_samples_ < 1) return 1;  // Division by zero.
     int freq_in_per128 =
         (frequencies_[in_character].counter() * 128) / total_samples_;
@@ -996,7 +992,7 @@
 
 class RegExpCompiler {
  public:
-  RegExpCompiler(int capture_count, bool ignore_case, bool is_ascii,
+  RegExpCompiler(int capture_count, bool ignore_case, bool is_one_byte,
                  Zone* zone);
 
   int AllocateRegister() {
@@ -1029,7 +1025,7 @@
   void SetRegExpTooBig() { reg_exp_too_big_ = true; }
 
   inline bool ignore_case() { return ignore_case_; }
-  inline bool ascii() { return ascii_; }
+  inline bool one_byte() { return one_byte_; }
   FrequencyCollator* frequency_collator() { return &frequency_collator_; }
 
   int current_expansion_factor() { return current_expansion_factor_; }
@@ -1048,7 +1044,7 @@
   int recursion_depth_;
   RegExpMacroAssembler* macro_assembler_;
   bool ignore_case_;
-  bool ascii_;
+  bool one_byte_;
   bool reg_exp_too_big_;
   int current_expansion_factor_;
   FrequencyCollator frequency_collator_;
@@ -1074,19 +1070,19 @@
 
 // Attempts to compile the regexp using an Irregexp code generator.  Returns
 // a fixed array or a null handle depending on whether it succeeded.
-RegExpCompiler::RegExpCompiler(int capture_count, bool ignore_case, bool ascii,
-                               Zone* zone)
+RegExpCompiler::RegExpCompiler(int capture_count, bool ignore_case,
+                               bool one_byte, Zone* zone)
     : next_register_(2 * (capture_count + 1)),
       work_list_(NULL),
       recursion_depth_(0),
       ignore_case_(ignore_case),
-      ascii_(ascii),
+      one_byte_(one_byte),
       reg_exp_too_big_(false),
       current_expansion_factor_(1),
       frequency_collator_(),
       zone_(zone) {
   accept_ = new(zone) EndNode(EndNode::ACCEPT, zone);
-  ASSERT(next_register_ - 1 <= RegExpMacroAssembler::kMaxRegister);
+  DCHECK(next_register_ - 1 <= RegExpMacroAssembler::kMaxRegister);
 }
 
 
@@ -1133,8 +1129,8 @@
 #ifdef DEBUG
   if (FLAG_print_code) {
     CodeTracer::Scope trace_scope(heap->isolate()->GetCodeTracer());
-    Handle<Code>::cast(code)->Disassemble(pattern->ToCString().get(),
-                                          trace_scope.file());
+    OFStream os(trace_scope.file());
+    Handle<Code>::cast(code)->Disassemble(pattern->ToCString().get(), os);
   }
   if (FLAG_trace_regexp_assembler) {
     delete macro_assembler_;
@@ -1166,7 +1162,7 @@
 
 
 bool Trace::GetStoredPosition(int reg, int* cp_offset) {
-  ASSERT_EQ(0, *cp_offset);
+  DCHECK_EQ(0, *cp_offset);
   for (DeferredAction* action = actions_;
        action != NULL;
        action = action->next()) {
@@ -1268,16 +1264,16 @@
             // we can set undo_action to IGNORE if we know there is no value to
             // restore.
             undo_action = RESTORE;
-            ASSERT_EQ(store_position, -1);
-            ASSERT(!clear);
+            DCHECK_EQ(store_position, -1);
+            DCHECK(!clear);
             break;
           }
           case ActionNode::INCREMENT_REGISTER:
             if (!absolute) {
               value++;
             }
-            ASSERT_EQ(store_position, -1);
-            ASSERT(!clear);
+            DCHECK_EQ(store_position, -1);
+            DCHECK(!clear);
             undo_action = RESTORE;
             break;
           case ActionNode::STORE_POSITION: {
@@ -1299,8 +1295,8 @@
             } else {
               undo_action = pc->is_capture() ? CLEAR : RESTORE;
             }
-            ASSERT(!absolute);
-            ASSERT_EQ(value, 0);
+            DCHECK(!absolute);
+            DCHECK_EQ(value, 0);
             break;
           }
           case ActionNode::CLEAR_CAPTURES: {
@@ -1311,8 +1307,8 @@
               clear = true;
             }
             undo_action = RESTORE;
-            ASSERT(!absolute);
-            ASSERT_EQ(value, 0);
+            DCHECK(!absolute);
+            DCHECK_EQ(value, 0);
             break;
           }
           default:
@@ -1357,7 +1353,7 @@
 void Trace::Flush(RegExpCompiler* compiler, RegExpNode* successor) {
   RegExpMacroAssembler* assembler = compiler->macro_assembler();
 
-  ASSERT(!is_trivial());
+  DCHECK(!is_trivial());
 
   if (actions_ == NULL && backtrack() == NULL) {
     // Here we just have some deferred cp advances to fix and we are back to
@@ -1574,13 +1570,13 @@
                                Trace* trace) {
   switch (guard->op()) {
     case Guard::LT:
-      ASSERT(!trace->mentions_reg(guard->reg()));
+      DCHECK(!trace->mentions_reg(guard->reg()));
       macro_assembler->IfRegisterGE(guard->reg(),
                                     guard->value(),
                                     trace->backtrack());
       break;
     case Guard::GEQ:
-      ASSERT(!trace->mentions_reg(guard->reg()));
+      DCHECK(!trace->mentions_reg(guard->reg()));
       macro_assembler->IfRegisterLT(guard->reg(),
                                     guard->value(),
                                     trace->backtrack());
@@ -1591,9 +1587,8 @@
 
 // Returns the number of characters in the equivalence class, omitting those
 // that cannot occur in the source string because it is ASCII.
-static int GetCaseIndependentLetters(Isolate* isolate,
-                                     uc16 character,
-                                     bool ascii_subject,
+static int GetCaseIndependentLetters(Isolate* isolate, uc16 character,
+                                     bool one_byte_subject,
                                      unibrow::uchar* letters) {
   int length =
       isolate->jsregexp_uncanonicalize()->get(character, '\0', letters);
@@ -1603,11 +1598,14 @@
     letters[0] = character;
     length = 1;
   }
-  if (!ascii_subject || character <= String::kMaxOneByteCharCode) {
+  if (!one_byte_subject || character <= String::kMaxOneByteCharCode) {
     return length;
   }
+
   // The standard requires that non-ASCII characters cannot have ASCII
   // character codes in their equivalence class.
+  // TODO(dcarney): issue 3550 this is not actually true for Latin1 anymore,
+  // is it?  For example, \u00C5 is equivalent to \u212B.
   return 0;
 }
 
@@ -1643,18 +1641,19 @@
                                      bool check,
                                      bool preloaded) {
   RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
-  bool ascii = compiler->ascii();
+  bool one_byte = compiler->one_byte();
   unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth];
-  int length = GetCaseIndependentLetters(isolate, c, ascii, chars);
+  int length = GetCaseIndependentLetters(isolate, c, one_byte, chars);
   if (length < 1) {
-    // This can't match.  Must be an ASCII subject and a non-ASCII character.
-    // We do not need to do anything since the ASCII pass already handled this.
+    // This can't match.  Must be an one-byte subject and a non-one-byte
+    // character.  We do not need to do anything since the one-byte pass
+    // already handled this.
     return false;  // Bounds not checked.
   }
   bool checked = false;
   // We handle the length > 1 case in a later pass.
   if (length == 1) {
-    if (ascii && c > String::kMaxOneByteCharCodeU) {
+    if (one_byte && c > String::kMaxOneByteCharCodeU) {
       // Can't match - see above.
       return false;  // Bounds not checked.
     }
@@ -1669,12 +1668,10 @@
 
 
 static bool ShortCutEmitCharacterPair(RegExpMacroAssembler* macro_assembler,
-                                      bool ascii,
-                                      uc16 c1,
-                                      uc16 c2,
+                                      bool one_byte, uc16 c1, uc16 c2,
                                       Label* on_failure) {
   uc16 char_mask;
-  if (ascii) {
+  if (one_byte) {
     char_mask = String::kMaxOneByteCharCode;
   } else {
     char_mask = String::kMaxUtf16CodeUnit;
@@ -1684,12 +1681,12 @@
   if (((exor - 1) & exor) == 0) {
     // If c1 and c2 differ only by one bit.
     // Ecma262UnCanonicalize always gives the highest number last.
-    ASSERT(c2 > c1);
+    DCHECK(c2 > c1);
     uc16 mask = char_mask ^ exor;
     macro_assembler->CheckNotCharacterAfterAnd(c1, mask, on_failure);
     return true;
   }
-  ASSERT(c2 > c1);
+  DCHECK(c2 > c1);
   uc16 diff = c2 - c1;
   if (((diff - 1) & diff) == 0 && c1 >= diff) {
     // If the characters differ by 2^n but don't differ by one bit then
@@ -1725,9 +1722,9 @@
                                   bool check,
                                   bool preloaded) {
   RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
-  bool ascii = compiler->ascii();
+  bool one_byte = compiler->one_byte();
   unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth];
-  int length = GetCaseIndependentLetters(isolate, c, ascii, chars);
+  int length = GetCaseIndependentLetters(isolate, c, one_byte, chars);
   if (length <= 1) return false;
   // We may not need to check against the end of the input string
   // if this character lies before a character that matched.
@@ -1735,14 +1732,11 @@
     macro_assembler->LoadCurrentCharacter(cp_offset, on_failure, check);
   }
   Label ok;
-  ASSERT(unibrow::Ecma262UnCanonicalize::kMaxWidth == 4);
+  DCHECK(unibrow::Ecma262UnCanonicalize::kMaxWidth == 4);
   switch (length) {
     case 2: {
-      if (ShortCutEmitCharacterPair(macro_assembler,
-                                    ascii,
-                                    chars[0],
-                                    chars[1],
-                                    on_failure)) {
+      if (ShortCutEmitCharacterPair(macro_assembler, one_byte, chars[0],
+                                    chars[1], on_failure)) {
       } else {
         macro_assembler->CheckCharacter(chars[0], &ok);
         macro_assembler->CheckNotCharacter(chars[1], on_failure);
@@ -1823,9 +1817,9 @@
 
   // Assert that everything is on one kTableSize page.
   for (int i = start_index; i <= end_index; i++) {
-    ASSERT_EQ(ranges->at(i) & ~kMask, base);
+    DCHECK_EQ(ranges->at(i) & ~kMask, base);
   }
-  ASSERT(start_index == 0 || (ranges->at(start_index - 1) & ~kMask) <= base);
+  DCHECK(start_index == 0 || (ranges->at(start_index - 1) & ~kMask) <= base);
 
   char templ[kSize];
   Label* on_bit_set;
@@ -1881,7 +1875,7 @@
                          &dummy,
                          in_range_label,
                          &dummy);
-  ASSERT(!dummy.is_linked());
+  DCHECK(!dummy.is_linked());
   // Cut out the single range by rewriting the array.  This creates a new
   // range that is a merger of the two ranges on either side of the one we
   // are cutting out.  The oddity of the labels is preserved.
@@ -1917,7 +1911,7 @@
   // new_start_index is the index of the first edge that is beyond the
   // current kSize space.
 
-  // For very large search spaces we do a binary chop search of the non-ASCII
+  // For very large search spaces we do a binary chop search of the non-Latin1
   // space instead of just going to the end of the current kSize space.  The
   // heuristics are complicated a little by the fact that any 128-character
   // encoding space can be quickly tested with a table lookup, so we don't
@@ -1926,14 +1920,13 @@
   // for example, we only want to match every second character (eg. the lower
   // case characters on some Unicode pages).
   int binary_chop_index = (end_index + start_index) / 2;
-  // The first test ensures that we get to the code that handles the ASCII
+  // The first test ensures that we get to the code that handles the Latin1
   // range with a single not-taken branch, speeding up this important
-  // character range (even non-ASCII charset-based text has spaces and
+  // character range (even non-Latin1 charset-based text has spaces and
   // punctuation).
-  if (*border - 1 > String::kMaxOneByteCharCode &&  // ASCII case.
+  if (*border - 1 > String::kMaxOneByteCharCode &&  // Latin1 case.
       end_index - start_index > (*new_start_index - start_index) * 2 &&
-      last - first > kSize * 2 &&
-      binary_chop_index > *new_start_index &&
+      last - first > kSize * 2 && binary_chop_index > *new_start_index &&
       ranges->at(binary_chop_index) >= first + 2 * kSize) {
     int scan_forward_for_section_border = binary_chop_index;;
     int new_border = (ranges->at(binary_chop_index) | kMask) + 1;
@@ -1948,7 +1941,7 @@
     }
   }
 
-  ASSERT(*new_start_index > start_index);
+  DCHECK(*new_start_index > start_index);
   *new_end_index = *new_start_index - 1;
   if (ranges->at(*new_end_index) == *border) {
     (*new_end_index)--;
@@ -1979,7 +1972,7 @@
   int first = ranges->at(start_index);
   int last = ranges->at(end_index) - 1;
 
-  ASSERT_LT(min_char, first);
+  DCHECK_LT(min_char, first);
 
   // Just need to test if the character is before or on-or-after
   // a particular character.
@@ -2012,7 +2005,7 @@
     if (cut == kNoCutIndex) cut = start_index;
     CutOutRange(
         masm, ranges, start_index, end_index, cut, even_label, odd_label);
-    ASSERT_GE(end_index - start_index, 2);
+    DCHECK_GE(end_index - start_index, 2);
     GenerateBranches(masm,
                      ranges,
                      start_index + 1,
@@ -2072,25 +2065,25 @@
     // We didn't find any section that started after the limit, so everything
     // above the border is one of the terminal labels.
     above = (end_index & 1) != (start_index & 1) ? odd_label : even_label;
-    ASSERT(new_end_index == end_index - 1);
+    DCHECK(new_end_index == end_index - 1);
   }
 
-  ASSERT_LE(start_index, new_end_index);
-  ASSERT_LE(new_start_index, end_index);
-  ASSERT_LT(start_index, new_start_index);
-  ASSERT_LT(new_end_index, end_index);
-  ASSERT(new_end_index + 1 == new_start_index ||
+  DCHECK_LE(start_index, new_end_index);
+  DCHECK_LE(new_start_index, end_index);
+  DCHECK_LT(start_index, new_start_index);
+  DCHECK_LT(new_end_index, end_index);
+  DCHECK(new_end_index + 1 == new_start_index ||
          (new_end_index + 2 == new_start_index &&
           border == ranges->at(new_end_index + 1)));
-  ASSERT_LT(min_char, border - 1);
-  ASSERT_LT(border, max_char);
-  ASSERT_LT(ranges->at(new_end_index), border);
-  ASSERT(border < ranges->at(new_start_index) ||
+  DCHECK_LT(min_char, border - 1);
+  DCHECK_LT(border, max_char);
+  DCHECK_LT(ranges->at(new_end_index), border);
+  DCHECK(border < ranges->at(new_start_index) ||
          (border == ranges->at(new_start_index) &&
           new_start_index == end_index &&
           new_end_index == end_index - 1 &&
           border == last + 1));
-  ASSERT(new_start_index == 0 || border >= ranges->at(new_start_index - 1));
+  DCHECK(new_start_index == 0 || border >= ranges->at(new_start_index - 1));
 
   masm->CheckCharacterGT(border - 1, above);
   Label dummy;
@@ -2120,20 +2113,16 @@
 
 
 static void EmitCharClass(RegExpMacroAssembler* macro_assembler,
-                          RegExpCharacterClass* cc,
-                          bool ascii,
-                          Label* on_failure,
-                          int cp_offset,
-                          bool check_offset,
-                          bool preloaded,
-                          Zone* zone) {
+                          RegExpCharacterClass* cc, bool one_byte,
+                          Label* on_failure, int cp_offset, bool check_offset,
+                          bool preloaded, Zone* zone) {
   ZoneList<CharacterRange>* ranges = cc->ranges(zone);
   if (!CharacterRange::IsCanonical(ranges)) {
     CharacterRange::Canonicalize(ranges);
   }
 
   int max_char;
-  if (ascii) {
+  if (one_byte) {
     max_char = String::kMaxOneByteCharCode;
   } else {
     max_char = String::kMaxUtf16CodeUnit;
@@ -2207,7 +2196,7 @@
   for (int i = 0; i <= last_valid_range; i++) {
     CharacterRange& range = ranges->at(i);
     if (range.from() == 0) {
-      ASSERT_EQ(i, 0);
+      DCHECK_EQ(i, 0);
       zeroth_entry_is_failure = !zeroth_entry_is_failure;
     } else {
       range_boundaries->Add(range.from(), zone);
@@ -2454,6 +2443,7 @@
 
 
 bool RegExpNode::EmitQuickCheck(RegExpCompiler* compiler,
+                                Trace* bounds_check_trace,
                                 Trace* trace,
                                 bool preload_has_checked_bounds,
                                 Label* on_possible_success,
@@ -2463,8 +2453,8 @@
   GetQuickCheckDetails(
       details, compiler, 0, trace->at_start() == Trace::FALSE_VALUE);
   if (details->cannot_match()) return false;
-  if (!details->Rationalize(compiler->ascii())) return false;
-  ASSERT(details->characters() == 1 ||
+  if (!details->Rationalize(compiler->one_byte())) return false;
+  DCHECK(details->characters() == 1 ||
          compiler->macro_assembler()->CanReadUnaligned());
   uint32_t mask = details->mask();
   uint32_t value = details->value();
@@ -2472,8 +2462,13 @@
   RegExpMacroAssembler* assembler = compiler->macro_assembler();
 
   if (trace->characters_preloaded() != details->characters()) {
+    DCHECK(trace->cp_offset() == bounds_check_trace->cp_offset());
+    // We are attempting to preload the minimum number of characters
+    // any choice would eat, so if the bounds check fails, then none of the
+    // choices can succeed, so we can just immediately backtrack, rather
+    // than go to the next choice.
     assembler->LoadCurrentCharacter(trace->cp_offset(),
-                                    trace->backtrack(),
+                                    bounds_check_trace->backtrack(),
                                     !preload_has_checked_bounds,
                                     details->characters());
   }
@@ -2485,7 +2480,7 @@
     // If number of characters preloaded is 1 then we used a byte or 16 bit
     // load so the value is already masked down.
     uint32_t char_mask;
-    if (compiler->ascii()) {
+    if (compiler->one_byte()) {
       char_mask = String::kMaxOneByteCharCode;
     } else {
       char_mask = String::kMaxUtf16CodeUnit;
@@ -2493,11 +2488,11 @@
     if ((mask & char_mask) == char_mask) need_mask = false;
     mask &= char_mask;
   } else {
-    // For 2-character preloads in ASCII mode or 1-character preloads in
-    // TWO_BYTE mode we also use a 16 bit load with zero extend.
-    if (details->characters() == 2 && compiler->ascii()) {
+    // For 2-character preloads in one-byte mode or 1-character preloads in
+    // two-byte mode we also use a 16 bit load with zero extend.
+    if (details->characters() == 2 && compiler->one_byte()) {
       if ((mask & 0xffff) == 0xffff) need_mask = false;
-    } else if (details->characters() == 1 && !compiler->ascii()) {
+    } else if (details->characters() == 1 && !compiler->one_byte()) {
       if ((mask & 0xffff) == 0xffff) need_mask = false;
     } else {
       if (mask == 0xffffffff) need_mask = false;
@@ -2534,10 +2529,10 @@
                                     int characters_filled_in,
                                     bool not_at_start) {
   Isolate* isolate = compiler->macro_assembler()->zone()->isolate();
-  ASSERT(characters_filled_in < details->characters());
+  DCHECK(characters_filled_in < details->characters());
   int characters = details->characters();
   int char_mask;
-  if (compiler->ascii()) {
+  if (compiler->one_byte()) {
     char_mask = String::kMaxOneByteCharCode;
   } else {
     char_mask = String::kMaxUtf16CodeUnit;
@@ -2551,19 +2546,21 @@
             details->positions(characters_filled_in);
         uc16 c = quarks[i];
         if (c > char_mask) {
-          // If we expect a non-ASCII character from an ASCII string,
-          // there is no way we can match. Not even case independent
-          // matching can turn an ASCII character into non-ASCII or
+          // If we expect a non-Latin1 character from an one-byte string,
+          // there is no way we can match. Not even case-independent
+          // matching can turn an Latin1 character into non-Latin1 or
           // vice versa.
+          // TODO(dcarney): issue 3550.  Verify that this works as expected.
+          // For example, \u0178 is uppercase of \u00ff (y-umlaut).
           details->set_cannot_match();
           pos->determines_perfectly = false;
           return;
         }
         if (compiler->ignore_case()) {
           unibrow::uchar chars[unibrow::Ecma262UnCanonicalize::kMaxWidth];
-          int length = GetCaseIndependentLetters(isolate, c, compiler->ascii(),
-                                                 chars);
-          ASSERT(length != 0);  // Can only happen if c > char_mask (see above).
+          int length = GetCaseIndependentLetters(isolate, c,
+                                                 compiler->one_byte(), chars);
+          DCHECK(length != 0);  // Can only happen if c > char_mask (see above).
           if (length == 1) {
             // This letter has no case equivalents, so it's nice and simple
             // and the mask-compare will determine definitely whether we have
@@ -2599,7 +2596,7 @@
           pos->determines_perfectly = true;
         }
         characters_filled_in++;
-        ASSERT(characters_filled_in <= details->characters());
+        DCHECK(characters_filled_in <= details->characters());
         if (characters_filled_in == details->characters()) {
           return;
         }
@@ -2665,13 +2662,13 @@
         pos->value = bits;
       }
       characters_filled_in++;
-      ASSERT(characters_filled_in <= details->characters());
+      DCHECK(characters_filled_in <= details->characters());
       if (characters_filled_in == details->characters()) {
         return;
       }
     }
   }
-  ASSERT(characters_filled_in != details->characters());
+  DCHECK(characters_filled_in != details->characters());
   if (!details->cannot_match()) {
     on_success()-> GetQuickCheckDetails(details,
                                         compiler,
@@ -2691,8 +2688,8 @@
 }
 
 
-void QuickCheckDetails::Advance(int by, bool ascii) {
-  ASSERT(by >= 0);
+void QuickCheckDetails::Advance(int by, bool one_byte) {
+  DCHECK(by >= 0);
   if (by >= characters_) {
     Clear();
     return;
@@ -2713,7 +2710,7 @@
 
 
 void QuickCheckDetails::Merge(QuickCheckDetails* other, int from_index) {
-  ASSERT(characters_ == other->characters_);
+  DCHECK(characters_ == other->characters_);
   if (other->cannot_match_) {
     return;
   }
@@ -2744,7 +2741,7 @@
 class VisitMarker {
  public:
   explicit VisitMarker(NodeInfo* info) : info_(info) {
-    ASSERT(!info->visited);
+    DCHECK(!info->visited);
     info->visited = true;
   }
   ~VisitMarker() {
@@ -2755,17 +2752,17 @@
 };
 
 
-RegExpNode* SeqRegExpNode::FilterASCII(int depth, bool ignore_case) {
+RegExpNode* SeqRegExpNode::FilterOneByte(int depth, bool ignore_case) {
   if (info()->replacement_calculated) return replacement();
   if (depth < 0) return this;
-  ASSERT(!info()->visited);
+  DCHECK(!info()->visited);
   VisitMarker marker(info());
   return FilterSuccessor(depth - 1, ignore_case);
 }
 
 
 RegExpNode* SeqRegExpNode::FilterSuccessor(int depth, bool ignore_case) {
-  RegExpNode* next = on_success_->FilterASCII(depth - 1, ignore_case);
+  RegExpNode* next = on_success_->FilterOneByte(depth - 1, ignore_case);
   if (next == NULL) return set_replacement(NULL);
   on_success_ = next;
   return set_replacement(this);
@@ -2789,10 +2786,10 @@
 }
 
 
-RegExpNode* TextNode::FilterASCII(int depth, bool ignore_case) {
+RegExpNode* TextNode::FilterOneByte(int depth, bool ignore_case) {
   if (info()->replacement_calculated) return replacement();
   if (depth < 0) return this;
-  ASSERT(!info()->visited);
+  DCHECK(!info()->visited);
   VisitMarker marker(info());
   int element_count = elms_->length();
   for (int i = 0; i < element_count; i++) {
@@ -2813,7 +2810,7 @@
         copy[j] = converted;
       }
     } else {
-      ASSERT(elm.text_type() == TextElement::CHAR_CLASS);
+      DCHECK(elm.text_type() == TextElement::CHAR_CLASS);
       RegExpCharacterClass* cc = elm.char_class();
       ZoneList<CharacterRange>* ranges = cc->ranges(zone());
       if (!CharacterRange::IsCanonical(ranges)) {
@@ -2843,7 +2840,7 @@
 }
 
 
-RegExpNode* LoopChoiceNode::FilterASCII(int depth, bool ignore_case) {
+RegExpNode* LoopChoiceNode::FilterOneByte(int depth, bool ignore_case) {
   if (info()->replacement_calculated) return replacement();
   if (depth < 0) return this;
   if (info()->visited) return this;
@@ -2851,17 +2848,17 @@
     VisitMarker marker(info());
 
     RegExpNode* continue_replacement =
-        continue_node_->FilterASCII(depth - 1, ignore_case);
+        continue_node_->FilterOneByte(depth - 1, ignore_case);
     // If we can't continue after the loop then there is no sense in doing the
     // loop.
     if (continue_replacement == NULL) return set_replacement(NULL);
   }
 
-  return ChoiceNode::FilterASCII(depth - 1, ignore_case);
+  return ChoiceNode::FilterOneByte(depth - 1, ignore_case);
 }
 
 
-RegExpNode* ChoiceNode::FilterASCII(int depth, bool ignore_case) {
+RegExpNode* ChoiceNode::FilterOneByte(int depth, bool ignore_case) {
   if (info()->replacement_calculated) return replacement();
   if (depth < 0) return this;
   if (info()->visited) return this;
@@ -2881,8 +2878,8 @@
   for (int i = 0; i < choice_count; i++) {
     GuardedAlternative alternative = alternatives_->at(i);
     RegExpNode* replacement =
-        alternative.node()->FilterASCII(depth - 1, ignore_case);
-    ASSERT(replacement != this);  // No missing EMPTY_MATCH_CHECK.
+        alternative.node()->FilterOneByte(depth - 1, ignore_case);
+    DCHECK(replacement != this);  // No missing EMPTY_MATCH_CHECK.
     if (replacement != NULL) {
       alternatives_->at(i).set_node(replacement);
       surviving++;
@@ -2901,7 +2898,7 @@
       new(zone()) ZoneList<GuardedAlternative>(surviving, zone());
   for (int i = 0; i < choice_count; i++) {
     RegExpNode* replacement =
-        alternatives_->at(i).node()->FilterASCII(depth - 1, ignore_case);
+        alternatives_->at(i).node()->FilterOneByte(depth - 1, ignore_case);
     if (replacement != NULL) {
       alternatives_->at(i).set_node(replacement);
       new_alternatives->Add(alternatives_->at(i), zone());
@@ -2912,8 +2909,8 @@
 }
 
 
-RegExpNode* NegativeLookaheadChoiceNode::FilterASCII(int depth,
-                                                     bool ignore_case) {
+RegExpNode* NegativeLookaheadChoiceNode::FilterOneByte(int depth,
+                                                       bool ignore_case) {
   if (info()->replacement_calculated) return replacement();
   if (depth < 0) return this;
   if (info()->visited) return this;
@@ -2921,12 +2918,12 @@
   // Alternative 0 is the negative lookahead, alternative 1 is what comes
   // afterwards.
   RegExpNode* node = alternatives_->at(1).node();
-  RegExpNode* replacement = node->FilterASCII(depth - 1, ignore_case);
+  RegExpNode* replacement = node->FilterOneByte(depth - 1, ignore_case);
   if (replacement == NULL) return set_replacement(NULL);
   alternatives_->at(1).set_node(replacement);
 
   RegExpNode* neg_node = alternatives_->at(0).node();
-  RegExpNode* neg_replacement = neg_node->FilterASCII(depth - 1, ignore_case);
+  RegExpNode* neg_replacement = neg_node->FilterOneByte(depth - 1, ignore_case);
   // If the negative lookahead is always going to fail then
   // we don't need to check it.
   if (neg_replacement == NULL) return set_replacement(replacement);
@@ -2968,7 +2965,7 @@
                                       bool not_at_start) {
   not_at_start = (not_at_start || not_at_start_);
   int choice_count = alternatives_->length();
-  ASSERT(choice_count > 0);
+  DCHECK(choice_count > 0);
   alternatives_->at(0).node()->GetQuickCheckDetails(details,
                                                     compiler,
                                                     characters_filled_in,
@@ -3035,7 +3032,7 @@
   if (!assembler->CheckSpecialCharacterClass('n',
                                              new_trace.backtrack())) {
     // Newline means \n, \r, 0x2028 or 0x2029.
-    if (!compiler->ascii()) {
+    if (!compiler->one_byte()) {
       assembler->CheckCharacterAfterAnd(0x2028, 0xfffe, &ok);
     }
     assembler->CheckCharacter('\n', &ok);
@@ -3092,7 +3089,7 @@
   } else if (next_is_word_character == Trace::TRUE_VALUE) {
     BacktrackIfPrevious(compiler, trace, at_boundary ? kIsWord : kIsNonWord);
   } else {
-    ASSERT(next_is_word_character == Trace::FALSE_VALUE);
+    DCHECK(next_is_word_character == Trace::FALSE_VALUE);
     BacktrackIfPrevious(compiler, trace, at_boundary ? kIsNonWord : kIsWord);
   }
 }
@@ -3233,7 +3230,7 @@
                             int* checked_up_to) {
   RegExpMacroAssembler* assembler = compiler->macro_assembler();
   Isolate* isolate = assembler->zone()->isolate();
-  bool ascii = compiler->ascii();
+  bool one_byte = compiler->one_byte();
   Label* backtrack = trace->backtrack();
   QuickCheckDetails* quick_check = trace->quick_check_performed();
   int element_count = elms_->length();
@@ -3247,8 +3244,8 @@
         if (DeterminedAlready(quick_check, elm.cp_offset() + j)) continue;
         EmitCharacterFunction* emit_function = NULL;
         switch (pass) {
-          case NON_ASCII_MATCH:
-            ASSERT(ascii);
+          case NON_LATIN1_MATCH:
+            DCHECK(one_byte);
             if (quarks[j] > String::kMaxOneByteCharCode) {
               assembler->GoTo(backtrack);
               return;
@@ -3278,19 +3275,13 @@
         }
       }
     } else {
-      ASSERT_EQ(TextElement::CHAR_CLASS, elm.text_type());
+      DCHECK_EQ(TextElement::CHAR_CLASS, elm.text_type());
       if (pass == CHARACTER_CLASS_MATCH) {
         if (first_element_checked && i == 0) continue;
         if (DeterminedAlready(quick_check, elm.cp_offset())) continue;
         RegExpCharacterClass* cc = elm.char_class();
-        EmitCharClass(assembler,
-                      cc,
-                      ascii,
-                      backtrack,
-                      cp_offset,
-                      *checked_up_to < cp_offset,
-                      preloaded,
-                      zone());
+        EmitCharClass(assembler, cc, one_byte, backtrack, cp_offset,
+                      *checked_up_to < cp_offset, preloaded, zone());
         UpdateBoundsCheck(cp_offset, checked_up_to);
       }
     }
@@ -3300,7 +3291,7 @@
 
 int TextNode::Length() {
   TextElement elm = elms_->last();
-  ASSERT(elm.cp_offset() >= 0);
+  DCHECK(elm.cp_offset() >= 0);
   return elm.cp_offset() + elm.length();
 }
 
@@ -3324,16 +3315,16 @@
 void TextNode::Emit(RegExpCompiler* compiler, Trace* trace) {
   LimitResult limit_result = LimitVersions(compiler, trace);
   if (limit_result == DONE) return;
-  ASSERT(limit_result == CONTINUE);
+  DCHECK(limit_result == CONTINUE);
 
   if (trace->cp_offset() + Length() > RegExpMacroAssembler::kMaxCPOffset) {
     compiler->SetRegExpTooBig();
     return;
   }
 
-  if (compiler->ascii()) {
+  if (compiler->one_byte()) {
     int dummy = 0;
-    TextEmitPass(compiler, NON_ASCII_MATCH, false, trace, false, &dummy);
+    TextEmitPass(compiler, NON_LATIN1_MATCH, false, trace, false, &dummy);
   }
 
   bool first_elt_done = false;
@@ -3381,7 +3372,7 @@
 
 
 void Trace::AdvanceCurrentPositionInTrace(int by, RegExpCompiler* compiler) {
-  ASSERT(by > 0);
+  DCHECK(by > 0);
   // We don't have an instruction for shifting the current character register
   // down or for using a shifted value for anything so lets just forget that
   // we preloaded any characters into it.
@@ -3389,7 +3380,7 @@
   // Adjust the offsets of the quick check performed information.  This
   // information is used to find out what we already determined about the
   // characters by means of mask and compare.
-  quick_check_performed_.Advance(by, compiler->ascii());
+  quick_check_performed_.Advance(by, compiler->one_byte());
   cp_offset_ += by;
   if (cp_offset_ > RegExpMacroAssembler::kMaxCPOffset) {
     compiler->SetRegExpTooBig();
@@ -3399,7 +3390,7 @@
 }
 
 
-void TextNode::MakeCaseIndependent(bool is_ascii) {
+void TextNode::MakeCaseIndependent(bool is_one_byte) {
   int element_count = elms_->length();
   for (int i = 0; i < element_count; i++) {
     TextElement elm = elms_->at(i);
@@ -3411,7 +3402,7 @@
       ZoneList<CharacterRange>* ranges = cc->ranges(zone());
       int range_count = ranges->length();
       for (int j = 0; j < range_count; j++) {
-        ranges->at(j).AddCaseEquivalents(ranges, is_ascii, zone());
+        ranges->at(j).AddCaseEquivalents(ranges, is_one_byte, zone());
       }
     }
   }
@@ -3439,7 +3430,7 @@
   }
   if (ranges->length() != 1) return NULL;
   uint32_t max_char;
-  if (compiler->ascii()) {
+  if (compiler->one_byte()) {
     max_char = String::kMaxOneByteCharCode;
   } else {
     max_char = String::kMaxUtf16CodeUnit;
@@ -3476,14 +3467,14 @@
 
 
 void LoopChoiceNode::AddLoopAlternative(GuardedAlternative alt) {
-  ASSERT_EQ(loop_node_, NULL);
+  DCHECK_EQ(loop_node_, NULL);
   AddAlternative(alt);
   loop_node_ = alt.node();
 }
 
 
 void LoopChoiceNode::AddContinueAlternative(GuardedAlternative alt) {
-  ASSERT_EQ(continue_node_, NULL);
+  DCHECK_EQ(continue_node_, NULL);
   AddAlternative(alt);
   continue_node_ = alt.node();
 }
@@ -3492,17 +3483,18 @@
 void LoopChoiceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
   RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
   if (trace->stop_node() == this) {
+    // Back edge of greedy optimized loop node graph.
     int text_length =
         GreedyLoopTextLengthForAlternative(&(alternatives_->at(0)));
-    ASSERT(text_length != kNodeIsTooComplexForGreedyLoops);
+    DCHECK(text_length != kNodeIsTooComplexForGreedyLoops);
     // Update the counter-based backtracking info on the stack.  This is an
     // optimization for greedy loops (see below).
-    ASSERT(trace->cp_offset() == text_length);
+    DCHECK(trace->cp_offset() == text_length);
     macro_assembler->AdvanceCurrentPosition(text_length);
     macro_assembler->GoTo(trace->loop_label());
     return;
   }
-  ASSERT(trace->stop_node() == NULL);
+  DCHECK(trace->stop_node() == NULL);
   if (!trace->is_trivial()) {
     trace->Flush(compiler, this);
     return;
@@ -3515,8 +3507,8 @@
                                            int eats_at_least) {
   int preload_characters = Min(4, eats_at_least);
   if (compiler->macro_assembler()->CanReadUnaligned()) {
-    bool ascii = compiler->ascii();
-    if (ascii) {
+    bool one_byte = compiler->one_byte();
+    if (one_byte) {
       if (preload_characters > 4) preload_characters = 4;
       // We can't preload 3 characters because there is no machine instruction
       // to do that.  We can't just load 4 because we could be reading
@@ -3586,18 +3578,18 @@
     0x00A0, 0x00A1, 0x1680, 0x1681, 0x180E, 0x180F, 0x2000, 0x200B,
     0x2028, 0x202A, 0x202F, 0x2030, 0x205F, 0x2060, 0x3000, 0x3001,
     0xFEFF, 0xFF00, 0x10000 };
-static const int kSpaceRangeCount = ARRAY_SIZE(kSpaceRanges);
+static const int kSpaceRangeCount = arraysize(kSpaceRanges);
 
 static const int kWordRanges[] = {
     '0', '9' + 1, 'A', 'Z' + 1, '_', '_' + 1, 'a', 'z' + 1, 0x10000 };
-static const int kWordRangeCount = ARRAY_SIZE(kWordRanges);
+static const int kWordRangeCount = arraysize(kWordRanges);
 static const int kDigitRanges[] = { '0', '9' + 1, 0x10000 };
-static const int kDigitRangeCount = ARRAY_SIZE(kDigitRanges);
+static const int kDigitRangeCount = arraysize(kDigitRanges);
 static const int kSurrogateRanges[] = { 0xd800, 0xe000, 0x10000 };
-static const int kSurrogateRangeCount = ARRAY_SIZE(kSurrogateRanges);
+static const int kSurrogateRangeCount = arraysize(kSurrogateRanges);
 static const int kLineTerminatorRanges[] = { 0x000A, 0x000B, 0x000D, 0x000E,
     0x2028, 0x202A, 0x10000 };
-static const int kLineTerminatorRangeCount = ARRAY_SIZE(kLineTerminatorRanges);
+static const int kLineTerminatorRangeCount = arraysize(kLineTerminatorRanges);
 
 
 void BoyerMoorePositionInfo::Set(int character) {
@@ -3642,7 +3634,7 @@
     int length, RegExpCompiler* compiler, Zone* zone)
     : length_(length),
       compiler_(compiler) {
-  if (compiler->ascii()) {
+  if (compiler->one_byte()) {
     max_char_ = String::kMaxOneByteCharCode;
   } else {
     max_char_ = String::kMaxUtf16CodeUnit;
@@ -3710,8 +3702,9 @@
     // dividing by 2 we switch off the skipping if the probability of skipping
     // is less than 50%.  This is because the multibyte mask-and-compare
     // skipping in quickcheck is more likely to do well on this case.
-    bool in_quickcheck_range = ((i - remembered_from < 4) ||
-        (compiler_->ascii() ? remembered_from <= 4 : remembered_from <= 2));
+    bool in_quickcheck_range =
+        ((i - remembered_from < 4) ||
+         (compiler_->one_byte() ? remembered_from <= 4 : remembered_from <= 2));
     // Called 'probability' but it is only a rough estimate and can actually
     // be outside the 0-kSize range.
     int probability = (in_quickcheck_range ? kSize / 2 : kSize) - frequency;
@@ -3758,13 +3751,13 @@
 
 
 // See comment above on the implementation of GetSkipTable.
-bool BoyerMooreLookahead::EmitSkipInstructions(RegExpMacroAssembler* masm) {
+void BoyerMooreLookahead::EmitSkipInstructions(RegExpMacroAssembler* masm) {
   const int kSize = RegExpMacroAssembler::kTableSize;
 
   int min_lookahead = 0;
   int max_lookahead = 0;
 
-  if (!FindWorthwhileInterval(&min_lookahead, &max_lookahead)) return false;
+  if (!FindWorthwhileInterval(&min_lookahead, &max_lookahead)) return;
 
   bool found_single_character = false;
   int single_character = 0;
@@ -3788,7 +3781,7 @@
 
   if (found_single_character && lookahead_width == 1 && max_lookahead < 3) {
     // The mask-compare can probably handle this better.
-    return false;
+    return;
   }
 
   if (found_single_character) {
@@ -3805,14 +3798,14 @@
     masm->AdvanceCurrentPosition(lookahead_width);
     masm->GoTo(&again);
     masm->Bind(&cont);
-    return true;
+    return;
   }
 
   Factory* factory = masm->zone()->isolate()->factory();
   Handle<ByteArray> boolean_skip_table = factory->NewByteArray(kSize, TENURED);
   int skip_distance = GetSkipTable(
       min_lookahead, max_lookahead, boolean_skip_table);
-  ASSERT(skip_distance != 0);
+  DCHECK(skip_distance != 0);
 
   Label cont, again;
   masm->Bind(&again);
@@ -3821,8 +3814,6 @@
   masm->AdvanceCurrentPosition(skip_distance);
   masm->GoTo(&again);
   masm->Bind(&cont);
-
-  return true;
 }
 
 
@@ -3876,10 +3867,12 @@
  *     \   F   V
  *      \-----S4
  *
- * For greedy loops we reverse our expectation and expect to match rather
- * than fail. Therefore we want the loop code to look like this (U is the
- * unwind code that steps back in the greedy loop).  The following alternatives
- * look the same as above.
+ * For greedy loops we push the current position, then generate the code that
+ * eats the input specially in EmitGreedyLoop.  The other choice (the
+ * continuation) is generated by the normal code in EmitChoices, and steps back
+ * in the input to the starting position when it fails to match.  The loop code
+ * looks like this (U is the unwind code that steps back in the greedy loop).
+ *
  *              _____
  *             /     \
  *             V     |
@@ -3888,41 +3881,66 @@
  *           / |S    |
  *         F/  \_____/
  *         /
- *        |<-----------
- *        |            \
- *        V             \
- *        Q2 ---> S2     \
- *        |  S   /       |
- *       F|     /        |
- *        |   F/         |
- *        |   /          |
- *        |  R           |
- *        | /            |
- *   F    VL             |
- * <------U              |
- * back   |S             |
- *        \______________/
+ *        |<-----
+ *        |      \
+ *        V       |S
+ *        Q2 ---> U----->backtrack
+ *        |  F   /
+ *       S|     /
+ *        V  F /
+ *        S2--/
  */
 
-void ChoiceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
-  RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
-  int choice_count = alternatives_->length();
+GreedyLoopState::GreedyLoopState(bool not_at_start) {
+  counter_backtrack_trace_.set_backtrack(&label_);
+  if (not_at_start) counter_backtrack_trace_.set_at_start(false);
+}
+
+
+void ChoiceNode::AssertGuardsMentionRegisters(Trace* trace) {
 #ifdef DEBUG
+  int choice_count = alternatives_->length();
   for (int i = 0; i < choice_count - 1; i++) {
     GuardedAlternative alternative = alternatives_->at(i);
     ZoneList<Guard*>* guards = alternative.guards();
     int guard_count = (guards == NULL) ? 0 : guards->length();
     for (int j = 0; j < guard_count; j++) {
-      ASSERT(!trace->mentions_reg(guards->at(j)->reg()));
+      DCHECK(!trace->mentions_reg(guards->at(j)->reg()));
     }
   }
 #endif
+}
+
+
+void ChoiceNode::SetUpPreLoad(RegExpCompiler* compiler,
+                              Trace* current_trace,
+                              PreloadState* state) {
+    if (state->eats_at_least_ == PreloadState::kEatsAtLeastNotYetInitialized) {
+      // Save some time by looking at most one machine word ahead.
+      state->eats_at_least_ =
+          EatsAtLeast(compiler->one_byte() ? 4 : 2, kRecursionBudget,
+                      current_trace->at_start() == Trace::FALSE_VALUE);
+    }
+    state->preload_characters_ =
+        CalculatePreloadCharacters(compiler, state->eats_at_least_);
+
+    state->preload_is_current_ =
+        (current_trace->characters_preloaded() == state->preload_characters_);
+    state->preload_has_checked_bounds_ = state->preload_is_current_;
+}
+
+
+void ChoiceNode::Emit(RegExpCompiler* compiler, Trace* trace) {
+  int choice_count = alternatives_->length();
+
+  AssertGuardsMentionRegisters(trace);
 
   LimitResult limit_result = LimitVersions(compiler, trace);
   if (limit_result == DONE) return;
-  ASSERT(limit_result == CONTINUE);
+  DCHECK(limit_result == CONTINUE);
 
-  int new_flush_budget = trace->flush_budget() / choice_count;
+  // For loop nodes we already flushed (see LoopChoiceNode::Emit), but for
+  // other choice nodes we only flush if we are out of code size budget.
   if (trace->flush_budget() == 0 && trace->actions() != NULL) {
     trace->Flush(compiler, this);
     return;
@@ -3930,141 +3948,217 @@
 
   RecursionCheck rc(compiler);
 
-  Trace* current_trace = trace;
+  PreloadState preload;
+  preload.init();
+  GreedyLoopState greedy_loop_state(not_at_start());
 
-  int text_length = GreedyLoopTextLengthForAlternative(&(alternatives_->at(0)));
-  bool greedy_loop = false;
-  Label greedy_loop_label;
-  Trace counter_backtrack_trace;
-  counter_backtrack_trace.set_backtrack(&greedy_loop_label);
-  if (not_at_start()) counter_backtrack_trace.set_at_start(false);
+  int text_length = GreedyLoopTextLengthForAlternative(&alternatives_->at(0));
+  AlternativeGenerationList alt_gens(choice_count, zone());
 
   if (choice_count > 1 && text_length != kNodeIsTooComplexForGreedyLoops) {
-    // Here we have special handling for greedy loops containing only text nodes
-    // and other simple nodes.  These are handled by pushing the current
-    // position on the stack and then incrementing the current position each
-    // time around the switch.  On backtrack we decrement the current position
-    // and check it against the pushed value.  This avoids pushing backtrack
-    // information for each iteration of the loop, which could take up a lot of
-    // space.
-    greedy_loop = true;
-    ASSERT(trace->stop_node() == NULL);
-    macro_assembler->PushCurrentPosition();
-    current_trace = &counter_backtrack_trace;
-    Label greedy_match_failed;
-    Trace greedy_match_trace;
-    if (not_at_start()) greedy_match_trace.set_at_start(false);
-    greedy_match_trace.set_backtrack(&greedy_match_failed);
-    Label loop_label;
-    macro_assembler->Bind(&loop_label);
-    greedy_match_trace.set_stop_node(this);
-    greedy_match_trace.set_loop_label(&loop_label);
-    alternatives_->at(0).node()->Emit(compiler, &greedy_match_trace);
-    macro_assembler->Bind(&greedy_match_failed);
+    trace = EmitGreedyLoop(compiler,
+                           trace,
+                           &alt_gens,
+                           &preload,
+                           &greedy_loop_state,
+                           text_length);
+  } else {
+    // TODO(erikcorry): Delete this.  We don't need this label, but it makes us
+    // match the traces produced pre-cleanup.
+    Label second_choice;
+    compiler->macro_assembler()->Bind(&second_choice);
+
+    preload.eats_at_least_ = EmitOptimizedUnanchoredSearch(compiler, trace);
+
+    EmitChoices(compiler,
+                &alt_gens,
+                0,
+                trace,
+                &preload);
   }
 
+  // At this point we need to generate slow checks for the alternatives where
+  // the quick check was inlined.  We can recognize these because the associated
+  // label was bound.
+  int new_flush_budget = trace->flush_budget() / choice_count;
+  for (int i = 0; i < choice_count; i++) {
+    AlternativeGeneration* alt_gen = alt_gens.at(i);
+    Trace new_trace(*trace);
+    // If there are actions to be flushed we have to limit how many times
+    // they are flushed.  Take the budget of the parent trace and distribute
+    // it fairly amongst the children.
+    if (new_trace.actions() != NULL) {
+      new_trace.set_flush_budget(new_flush_budget);
+    }
+    bool next_expects_preload =
+        i == choice_count - 1 ? false : alt_gens.at(i + 1)->expects_preload;
+    EmitOutOfLineContinuation(compiler,
+                              &new_trace,
+                              alternatives_->at(i),
+                              alt_gen,
+                              preload.preload_characters_,
+                              next_expects_preload);
+  }
+}
+
+
+Trace* ChoiceNode::EmitGreedyLoop(RegExpCompiler* compiler,
+                                  Trace* trace,
+                                  AlternativeGenerationList* alt_gens,
+                                  PreloadState* preload,
+                                  GreedyLoopState* greedy_loop_state,
+                                  int text_length) {
+  RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
+  // Here we have special handling for greedy loops containing only text nodes
+  // and other simple nodes.  These are handled by pushing the current
+  // position on the stack and then incrementing the current position each
+  // time around the switch.  On backtrack we decrement the current position
+  // and check it against the pushed value.  This avoids pushing backtrack
+  // information for each iteration of the loop, which could take up a lot of
+  // space.
+  DCHECK(trace->stop_node() == NULL);
+  macro_assembler->PushCurrentPosition();
+  Label greedy_match_failed;
+  Trace greedy_match_trace;
+  if (not_at_start()) greedy_match_trace.set_at_start(false);
+  greedy_match_trace.set_backtrack(&greedy_match_failed);
+  Label loop_label;
+  macro_assembler->Bind(&loop_label);
+  greedy_match_trace.set_stop_node(this);
+  greedy_match_trace.set_loop_label(&loop_label);
+  alternatives_->at(0).node()->Emit(compiler, &greedy_match_trace);
+  macro_assembler->Bind(&greedy_match_failed);
+
   Label second_choice;  // For use in greedy matches.
   macro_assembler->Bind(&second_choice);
 
-  int first_normal_choice = greedy_loop ? 1 : 0;
+  Trace* new_trace = greedy_loop_state->counter_backtrack_trace();
 
-  bool not_at_start = current_trace->at_start() == Trace::FALSE_VALUE;
-  const int kEatsAtLeastNotYetInitialized = -1;
-  int eats_at_least = kEatsAtLeastNotYetInitialized;
+  EmitChoices(compiler,
+              alt_gens,
+              1,
+              new_trace,
+              preload);
 
-  bool skip_was_emitted = false;
+  macro_assembler->Bind(greedy_loop_state->label());
+  // If we have unwound to the bottom then backtrack.
+  macro_assembler->CheckGreedyLoop(trace->backtrack());
+  // Otherwise try the second priority at an earlier position.
+  macro_assembler->AdvanceCurrentPosition(-text_length);
+  macro_assembler->GoTo(&second_choice);
+  return new_trace;
+}
 
-  if (!greedy_loop && choice_count == 2) {
-    GuardedAlternative alt1 = alternatives_->at(1);
-    if (alt1.guards() == NULL || alt1.guards()->length() == 0) {
-      RegExpNode* eats_anything_node = alt1.node();
-      if (eats_anything_node->GetSuccessorOfOmnivorousTextNode(compiler) ==
-          this) {
-        // At this point we know that we are at a non-greedy loop that will eat
-        // any character one at a time.  Any non-anchored regexp has such a
-        // loop prepended to it in order to find where it starts.  We look for
-        // a pattern of the form ...abc... where we can look 6 characters ahead
-        // and step forwards 3 if the character is not one of abc.  Abc need
-        // not be atoms, they can be any reasonably limited character class or
-        // small alternation.
-        ASSERT(trace->is_trivial());  // This is the case on LoopChoiceNodes.
-        BoyerMooreLookahead* lookahead = bm_info(not_at_start);
-        if (lookahead == NULL) {
-          eats_at_least = Min(kMaxLookaheadForBoyerMoore,
-                              EatsAtLeast(kMaxLookaheadForBoyerMoore,
-                                          kRecursionBudget,
-                                          not_at_start));
-          if (eats_at_least >= 1) {
-            BoyerMooreLookahead* bm =
-                new(zone()) BoyerMooreLookahead(eats_at_least,
-                                                compiler,
-                                                zone());
-            GuardedAlternative alt0 = alternatives_->at(0);
-            alt0.node()->FillInBMInfo(0, kRecursionBudget, bm, not_at_start);
-            skip_was_emitted = bm->EmitSkipInstructions(macro_assembler);
-          }
-        } else {
-          skip_was_emitted = lookahead->EmitSkipInstructions(macro_assembler);
-        }
-      }
+int ChoiceNode::EmitOptimizedUnanchoredSearch(RegExpCompiler* compiler,
+                                              Trace* trace) {
+  int eats_at_least = PreloadState::kEatsAtLeastNotYetInitialized;
+  if (alternatives_->length() != 2) return eats_at_least;
+
+  GuardedAlternative alt1 = alternatives_->at(1);
+  if (alt1.guards() != NULL && alt1.guards()->length() != 0) {
+    return eats_at_least;
+  }
+  RegExpNode* eats_anything_node = alt1.node();
+  if (eats_anything_node->GetSuccessorOfOmnivorousTextNode(compiler) != this) {
+    return eats_at_least;
+  }
+
+  // Really we should be creating a new trace when we execute this function,
+  // but there is no need, because the code it generates cannot backtrack, and
+  // we always arrive here with a trivial trace (since it's the entry to a
+  // loop.  That also implies that there are no preloaded characters, which is
+  // good, because it means we won't be violating any assumptions by
+  // overwriting those characters with new load instructions.
+  DCHECK(trace->is_trivial());
+
+  RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
+  // At this point we know that we are at a non-greedy loop that will eat
+  // any character one at a time.  Any non-anchored regexp has such a
+  // loop prepended to it in order to find where it starts.  We look for
+  // a pattern of the form ...abc... where we can look 6 characters ahead
+  // and step forwards 3 if the character is not one of abc.  Abc need
+  // not be atoms, they can be any reasonably limited character class or
+  // small alternation.
+  BoyerMooreLookahead* bm = bm_info(false);
+  if (bm == NULL) {
+    eats_at_least = Min(kMaxLookaheadForBoyerMoore,
+                        EatsAtLeast(kMaxLookaheadForBoyerMoore,
+                                    kRecursionBudget,
+                                    false));
+    if (eats_at_least >= 1) {
+      bm = new(zone()) BoyerMooreLookahead(eats_at_least,
+                                           compiler,
+                                           zone());
+      GuardedAlternative alt0 = alternatives_->at(0);
+      alt0.node()->FillInBMInfo(0, kRecursionBudget, bm, false);
     }
   }
-
-  if (eats_at_least == kEatsAtLeastNotYetInitialized) {
-    // Save some time by looking at most one machine word ahead.
-    eats_at_least =
-        EatsAtLeast(compiler->ascii() ? 4 : 2, kRecursionBudget, not_at_start);
+  if (bm != NULL) {
+    bm->EmitSkipInstructions(macro_assembler);
   }
-  int preload_characters = CalculatePreloadCharacters(compiler, eats_at_least);
+  return eats_at_least;
+}
 
-  bool preload_is_current = !skip_was_emitted &&
-      (current_trace->characters_preloaded() == preload_characters);
-  bool preload_has_checked_bounds = preload_is_current;
 
-  AlternativeGenerationList alt_gens(choice_count, zone());
+void ChoiceNode::EmitChoices(RegExpCompiler* compiler,
+                             AlternativeGenerationList* alt_gens,
+                             int first_choice,
+                             Trace* trace,
+                             PreloadState* preload) {
+  RegExpMacroAssembler* macro_assembler = compiler->macro_assembler();
+  SetUpPreLoad(compiler, trace, preload);
 
   // For now we just call all choices one after the other.  The idea ultimately
   // is to use the Dispatch table to try only the relevant ones.
-  for (int i = first_normal_choice; i < choice_count; i++) {
+  int choice_count = alternatives_->length();
+
+  int new_flush_budget = trace->flush_budget() / choice_count;
+
+  for (int i = first_choice; i < choice_count; i++) {
+    bool is_last = i == choice_count - 1;
+    bool fall_through_on_failure = !is_last;
     GuardedAlternative alternative = alternatives_->at(i);
-    AlternativeGeneration* alt_gen = alt_gens.at(i);
-    alt_gen->quick_check_details.set_characters(preload_characters);
+    AlternativeGeneration* alt_gen = alt_gens->at(i);
+    alt_gen->quick_check_details.set_characters(preload->preload_characters_);
     ZoneList<Guard*>* guards = alternative.guards();
     int guard_count = (guards == NULL) ? 0 : guards->length();
-    Trace new_trace(*current_trace);
-    new_trace.set_characters_preloaded(preload_is_current ?
-                                         preload_characters :
+    Trace new_trace(*trace);
+    new_trace.set_characters_preloaded(preload->preload_is_current_ ?
+                                         preload->preload_characters_ :
                                          0);
-    if (preload_has_checked_bounds) {
-      new_trace.set_bound_checked_up_to(preload_characters);
+    if (preload->preload_has_checked_bounds_) {
+      new_trace.set_bound_checked_up_to(preload->preload_characters_);
     }
     new_trace.quick_check_performed()->Clear();
     if (not_at_start_) new_trace.set_at_start(Trace::FALSE_VALUE);
-    alt_gen->expects_preload = preload_is_current;
+    if (!is_last) {
+      new_trace.set_backtrack(&alt_gen->after);
+    }
+    alt_gen->expects_preload = preload->preload_is_current_;
     bool generate_full_check_inline = false;
     if (FLAG_regexp_optimization &&
-        try_to_emit_quick_check_for_alternative(i) &&
+        try_to_emit_quick_check_for_alternative(i == 0) &&
         alternative.node()->EmitQuickCheck(compiler,
+                                           trace,
                                            &new_trace,
-                                           preload_has_checked_bounds,
+                                           preload->preload_has_checked_bounds_,
                                            &alt_gen->possible_success,
                                            &alt_gen->quick_check_details,
-                                           i < choice_count - 1)) {
+                                           fall_through_on_failure)) {
       // Quick check was generated for this choice.
-      preload_is_current = true;
-      preload_has_checked_bounds = true;
-      // On the last choice in the ChoiceNode we generated the quick
-      // check to fall through on possible success.  So now we need to
-      // generate the full check inline.
-      if (i == choice_count - 1) {
+      preload->preload_is_current_ = true;
+      preload->preload_has_checked_bounds_ = true;
+      // If we generated the quick check to fall through on possible success,
+      // we now need to generate the full check inline.
+      if (!fall_through_on_failure) {
         macro_assembler->Bind(&alt_gen->possible_success);
         new_trace.set_quick_check_performed(&alt_gen->quick_check_details);
-        new_trace.set_characters_preloaded(preload_characters);
-        new_trace.set_bound_checked_up_to(preload_characters);
+        new_trace.set_characters_preloaded(preload->preload_characters_);
+        new_trace.set_bound_checked_up_to(preload->preload_characters_);
         generate_full_check_inline = true;
       }
     } else if (alt_gen->quick_check_details.cannot_match()) {
-      if (i == choice_count - 1 && !greedy_loop) {
+      if (!fall_through_on_failure) {
         macro_assembler->GoTo(trace->backtrack());
       }
       continue;
@@ -4074,13 +4168,10 @@
       // previous cases that go here when they fail.  There's no reason to
       // insist that they preload characters since the slow check we are about
       // to generate probably can't use it.
-      if (i != first_normal_choice) {
+      if (i != first_choice) {
         alt_gen->expects_preload = false;
         new_trace.InvalidateCurrentCharacter();
       }
-      if (i < choice_count - 1) {
-        new_trace.set_backtrack(&alt_gen->after);
-      }
       generate_full_check_inline = true;
     }
     if (generate_full_check_inline) {
@@ -4091,38 +4182,10 @@
         GenerateGuard(macro_assembler, guards->at(j), &new_trace);
       }
       alternative.node()->Emit(compiler, &new_trace);
-      preload_is_current = false;
+      preload->preload_is_current_ = false;
     }
     macro_assembler->Bind(&alt_gen->after);
   }
-  if (greedy_loop) {
-    macro_assembler->Bind(&greedy_loop_label);
-    // If we have unwound to the bottom then backtrack.
-    macro_assembler->CheckGreedyLoop(trace->backtrack());
-    // Otherwise try the second priority at an earlier position.
-    macro_assembler->AdvanceCurrentPosition(-text_length);
-    macro_assembler->GoTo(&second_choice);
-  }
-
-  // At this point we need to generate slow checks for the alternatives where
-  // the quick check was inlined.  We can recognize these because the associated
-  // label was bound.
-  for (int i = first_normal_choice; i < choice_count - 1; i++) {
-    AlternativeGeneration* alt_gen = alt_gens.at(i);
-    Trace new_trace(*current_trace);
-    // If there are actions to be flushed we have to limit how many times
-    // they are flushed.  Take the budget of the parent trace and distribute
-    // it fairly amongst the children.
-    if (new_trace.actions() != NULL) {
-      new_trace.set_flush_budget(new_flush_budget);
-    }
-    EmitOutOfLineContinuation(compiler,
-                              &new_trace,
-                              alternatives_->at(i),
-                              alt_gen,
-                              preload_characters,
-                              alt_gens.at(i + 1)->expects_preload);
-  }
 }
 
 
@@ -4172,7 +4235,7 @@
   RegExpMacroAssembler* assembler = compiler->macro_assembler();
   LimitResult limit_result = LimitVersions(compiler, trace);
   if (limit_result == DONE) return;
-  ASSERT(limit_result == CONTINUE);
+  DCHECK(limit_result == CONTINUE);
 
   RecursionCheck rc(compiler);
 
@@ -4280,7 +4343,7 @@
       int clear_registers_to = clear_registers_from + clear_register_count - 1;
       assembler->ClearRegisters(clear_registers_from, clear_registers_to);
 
-      ASSERT(trace->backtrack() == NULL);
+      DCHECK(trace->backtrack() == NULL);
       assembler->Backtrack();
       return;
     }
@@ -4299,11 +4362,11 @@
 
   LimitResult limit_result = LimitVersions(compiler, trace);
   if (limit_result == DONE) return;
-  ASSERT(limit_result == CONTINUE);
+  DCHECK(limit_result == CONTINUE);
 
   RecursionCheck rc(compiler);
 
-  ASSERT_EQ(start_reg_ + 1, end_reg_);
+  DCHECK_EQ(start_reg_ + 1, end_reg_);
   if (compiler->ignore_case()) {
     assembler->CheckNotBackReferenceIgnoreCase(start_reg_,
                                                trace->backtrack());
@@ -4323,44 +4386,41 @@
 
 class DotPrinter: public NodeVisitor {
  public:
-  explicit DotPrinter(bool ignore_case)
-      : ignore_case_(ignore_case),
-        stream_(&alloc_) { }
+  DotPrinter(OStream& os, bool ignore_case)  // NOLINT
+      : os_(os),
+        ignore_case_(ignore_case) {}
   void PrintNode(const char* label, RegExpNode* node);
   void Visit(RegExpNode* node);
   void PrintAttributes(RegExpNode* from);
-  StringStream* stream() { return &stream_; }
   void PrintOnFailure(RegExpNode* from, RegExpNode* to);
 #define DECLARE_VISIT(Type)                                          \
   virtual void Visit##Type(Type##Node* that);
 FOR_EACH_NODE_TYPE(DECLARE_VISIT)
 #undef DECLARE_VISIT
  private:
+  OStream& os_;
   bool ignore_case_;
-  HeapStringAllocator alloc_;
-  StringStream stream_;
 };
 
 
 void DotPrinter::PrintNode(const char* label, RegExpNode* node) {
-  stream()->Add("digraph G {\n  graph [label=\"");
+  os_ << "digraph G {\n  graph [label=\"";
   for (int i = 0; label[i]; i++) {
     switch (label[i]) {
       case '\\':
-        stream()->Add("\\\\");
+        os_ << "\\\\";
         break;
       case '"':
-        stream()->Add("\"");
+        os_ << "\"";
         break;
       default:
-        stream()->Put(label[i]);
+        os_ << label[i];
         break;
     }
   }
-  stream()->Add("\"];\n");
+  os_ << "\"];\n";
   Visit(node);
-  stream()->Add("}\n");
-  printf("%s", stream()->ToCString().get());
+  os_ << "}" << endl;
 }
 
 
@@ -4372,97 +4432,95 @@
 
 
 void DotPrinter::PrintOnFailure(RegExpNode* from, RegExpNode* on_failure) {
-  stream()->Add("  n%p -> n%p [style=dotted];\n", from, on_failure);
+  os_ << "  n" << from << " -> n" << on_failure << " [style=dotted];\n";
   Visit(on_failure);
 }
 
 
 class TableEntryBodyPrinter {
  public:
-  TableEntryBodyPrinter(StringStream* stream, ChoiceNode* choice)
-      : stream_(stream), choice_(choice) { }
+  TableEntryBodyPrinter(OStream& os, ChoiceNode* choice)  // NOLINT
+      : os_(os),
+        choice_(choice) {}
   void Call(uc16 from, DispatchTable::Entry entry) {
     OutSet* out_set = entry.out_set();
     for (unsigned i = 0; i < OutSet::kFirstLimit; i++) {
       if (out_set->Get(i)) {
-        stream()->Add("    n%p:s%io%i -> n%p;\n",
-                      choice(),
-                      from,
-                      i,
-                      choice()->alternatives()->at(i).node());
+        os_ << "    n" << choice() << ":s" << from << "o" << i << " -> n"
+            << choice()->alternatives()->at(i).node() << ";\n";
       }
     }
   }
  private:
-  StringStream* stream() { return stream_; }
   ChoiceNode* choice() { return choice_; }
-  StringStream* stream_;
+  OStream& os_;
   ChoiceNode* choice_;
 };
 
 
 class TableEntryHeaderPrinter {
  public:
-  explicit TableEntryHeaderPrinter(StringStream* stream)
-      : first_(true), stream_(stream) { }
+  explicit TableEntryHeaderPrinter(OStream& os)  // NOLINT
+      : first_(true),
+        os_(os) {}
   void Call(uc16 from, DispatchTable::Entry entry) {
     if (first_) {
       first_ = false;
     } else {
-      stream()->Add("|");
+      os_ << "|";
     }
-    stream()->Add("{\\%k-\\%k|{", from, entry.to());
+    os_ << "{\\" << AsUC16(from) << "-\\" << AsUC16(entry.to()) << "|{";
     OutSet* out_set = entry.out_set();
     int priority = 0;
     for (unsigned i = 0; i < OutSet::kFirstLimit; i++) {
       if (out_set->Get(i)) {
-        if (priority > 0) stream()->Add("|");
-        stream()->Add("<s%io%i> %i", from, i, priority);
+        if (priority > 0) os_ << "|";
+        os_ << "<s" << from << "o" << i << "> " << priority;
         priority++;
       }
     }
-    stream()->Add("}}");
+    os_ << "}}";
   }
 
  private:
   bool first_;
-  StringStream* stream() { return stream_; }
-  StringStream* stream_;
+  OStream& os_;
 };
 
 
 class AttributePrinter {
  public:
-  explicit AttributePrinter(DotPrinter* out)
-      : out_(out), first_(true) { }
+  explicit AttributePrinter(OStream& os)  // NOLINT
+      : os_(os),
+        first_(true) {}
   void PrintSeparator() {
     if (first_) {
       first_ = false;
     } else {
-      out_->stream()->Add("|");
+      os_ << "|";
     }
   }
   void PrintBit(const char* name, bool value) {
     if (!value) return;
     PrintSeparator();
-    out_->stream()->Add("{%s}", name);
+    os_ << "{" << name << "}";
   }
   void PrintPositive(const char* name, int value) {
     if (value < 0) return;
     PrintSeparator();
-    out_->stream()->Add("{%s|%x}", name, value);
+    os_ << "{" << name << "|" << value << "}";
   }
+
  private:
-  DotPrinter* out_;
+  OStream& os_;
   bool first_;
 };
 
 
 void DotPrinter::PrintAttributes(RegExpNode* that) {
-  stream()->Add("  a%p [shape=Mrecord, color=grey, fontcolor=grey, "
-                "margin=0.1, fontsize=10, label=\"{",
-                that);
-  AttributePrinter printer(this);
+  os_ << "  a" << that << " [shape=Mrecord, color=grey, fontcolor=grey, "
+      << "margin=0.1, fontsize=10, label=\"{";
+  AttributePrinter printer(os_);
   NodeInfo* info = that->info();
   printer.PrintBit("NI", info->follows_newline_interest);
   printer.PrintBit("WI", info->follows_word_interest);
@@ -4470,27 +4528,27 @@
   Label* label = that->label();
   if (label->is_bound())
     printer.PrintPositive("@", label->pos());
-  stream()->Add("}\"];\n");
-  stream()->Add("  a%p -> n%p [style=dashed, color=grey, "
-                "arrowhead=none];\n", that, that);
+  os_ << "}\"];\n"
+      << "  a" << that << " -> n" << that
+      << " [style=dashed, color=grey, arrowhead=none];\n";
 }
 
 
 static const bool kPrintDispatchTable = false;
 void DotPrinter::VisitChoice(ChoiceNode* that) {
   if (kPrintDispatchTable) {
-    stream()->Add("  n%p [shape=Mrecord, label=\"", that);
-    TableEntryHeaderPrinter header_printer(stream());
+    os_ << "  n" << that << " [shape=Mrecord, label=\"";
+    TableEntryHeaderPrinter header_printer(os_);
     that->GetTable(ignore_case_)->ForEach(&header_printer);
-    stream()->Add("\"]\n", that);
+    os_ << "\"]\n";
     PrintAttributes(that);
-    TableEntryBodyPrinter body_printer(stream(), that);
+    TableEntryBodyPrinter body_printer(os_, that);
     that->GetTable(ignore_case_)->ForEach(&body_printer);
   } else {
-    stream()->Add("  n%p [shape=Mrecord, label=\"?\"];\n", that);
+    os_ << "  n" << that << " [shape=Mrecord, label=\"?\"];\n";
     for (int i = 0; i < that->alternatives()->length(); i++) {
       GuardedAlternative alt = that->alternatives()->at(i);
-      stream()->Add("  n%p -> n%p;\n", that, alt.node());
+      os_ << "  n" << that << " -> n" << alt.node();
     }
   }
   for (int i = 0; i < that->alternatives()->length(); i++) {
@@ -4502,138 +4560,136 @@
 
 void DotPrinter::VisitText(TextNode* that) {
   Zone* zone = that->zone();
-  stream()->Add("  n%p [label=\"", that);
+  os_ << "  n" << that << " [label=\"";
   for (int i = 0; i < that->elements()->length(); i++) {
-    if (i > 0) stream()->Add(" ");
+    if (i > 0) os_ << " ";
     TextElement elm = that->elements()->at(i);
     switch (elm.text_type()) {
       case TextElement::ATOM: {
-        stream()->Add("'%w'", elm.atom()->data());
+        Vector<const uc16> data = elm.atom()->data();
+        for (int i = 0; i < data.length(); i++) {
+          os_ << static_cast<char>(data[i]);
+        }
         break;
       }
       case TextElement::CHAR_CLASS: {
         RegExpCharacterClass* node = elm.char_class();
-        stream()->Add("[");
-        if (node->is_negated())
-          stream()->Add("^");
+        os_ << "[";
+        if (node->is_negated()) os_ << "^";
         for (int j = 0; j < node->ranges(zone)->length(); j++) {
           CharacterRange range = node->ranges(zone)->at(j);
-          stream()->Add("%k-%k", range.from(), range.to());
+          os_ << AsUC16(range.from()) << "-" << AsUC16(range.to());
         }
-        stream()->Add("]");
+        os_ << "]";
         break;
       }
       default:
         UNREACHABLE();
     }
   }
-  stream()->Add("\", shape=box, peripheries=2];\n");
+  os_ << "\", shape=box, peripheries=2];\n";
   PrintAttributes(that);
-  stream()->Add("  n%p -> n%p;\n", that, that->on_success());
+  os_ << "  n" << that << " -> n" << that->on_success() << ";\n";
   Visit(that->on_success());
 }
 
 
 void DotPrinter::VisitBackReference(BackReferenceNode* that) {
-  stream()->Add("  n%p [label=\"$%i..$%i\", shape=doubleoctagon];\n",
-                that,
-                that->start_register(),
-                that->end_register());
+  os_ << "  n" << that << " [label=\"$" << that->start_register() << "..$"
+      << that->end_register() << "\", shape=doubleoctagon];\n";
   PrintAttributes(that);
-  stream()->Add("  n%p -> n%p;\n", that, that->on_success());
+  os_ << "  n" << that << " -> n" << that->on_success() << ";\n";
   Visit(that->on_success());
 }
 
 
 void DotPrinter::VisitEnd(EndNode* that) {
-  stream()->Add("  n%p [style=bold, shape=point];\n", that);
+  os_ << "  n" << that << " [style=bold, shape=point];\n";
   PrintAttributes(that);
 }
 
 
 void DotPrinter::VisitAssertion(AssertionNode* that) {
-  stream()->Add("  n%p [", that);
+  os_ << "  n" << that << " [";
   switch (that->assertion_type()) {
     case AssertionNode::AT_END:
-      stream()->Add("label=\"$\", shape=septagon");
+      os_ << "label=\"$\", shape=septagon";
       break;
     case AssertionNode::AT_START:
-      stream()->Add("label=\"^\", shape=septagon");
+      os_ << "label=\"^\", shape=septagon";
       break;
     case AssertionNode::AT_BOUNDARY:
-      stream()->Add("label=\"\\b\", shape=septagon");
+      os_ << "label=\"\\b\", shape=septagon";
       break;
     case AssertionNode::AT_NON_BOUNDARY:
-      stream()->Add("label=\"\\B\", shape=septagon");
+      os_ << "label=\"\\B\", shape=septagon";
       break;
     case AssertionNode::AFTER_NEWLINE:
-      stream()->Add("label=\"(?<=\\n)\", shape=septagon");
+      os_ << "label=\"(?<=\\n)\", shape=septagon";
       break;
   }
-  stream()->Add("];\n");
+  os_ << "];\n";
   PrintAttributes(that);
   RegExpNode* successor = that->on_success();
-  stream()->Add("  n%p -> n%p;\n", that, successor);
+  os_ << "  n" << that << " -> n" << successor << ";\n";
   Visit(successor);
 }
 
 
 void DotPrinter::VisitAction(ActionNode* that) {
-  stream()->Add("  n%p [", that);
+  os_ << "  n" << that << " [";
   switch (that->action_type_) {
     case ActionNode::SET_REGISTER:
-      stream()->Add("label=\"$%i:=%i\", shape=octagon",
-                    that->data_.u_store_register.reg,
-                    that->data_.u_store_register.value);
+      os_ << "label=\"$" << that->data_.u_store_register.reg
+          << ":=" << that->data_.u_store_register.value << "\", shape=octagon";
       break;
     case ActionNode::INCREMENT_REGISTER:
-      stream()->Add("label=\"$%i++\", shape=octagon",
-                    that->data_.u_increment_register.reg);
+      os_ << "label=\"$" << that->data_.u_increment_register.reg
+          << "++\", shape=octagon";
       break;
     case ActionNode::STORE_POSITION:
-      stream()->Add("label=\"$%i:=$pos\", shape=octagon",
-                    that->data_.u_position_register.reg);
+      os_ << "label=\"$" << that->data_.u_position_register.reg
+          << ":=$pos\", shape=octagon";
       break;
     case ActionNode::BEGIN_SUBMATCH:
-      stream()->Add("label=\"$%i:=$pos,begin\", shape=septagon",
-                    that->data_.u_submatch.current_position_register);
+      os_ << "label=\"$" << that->data_.u_submatch.current_position_register
+          << ":=$pos,begin\", shape=septagon";
       break;
     case ActionNode::POSITIVE_SUBMATCH_SUCCESS:
-      stream()->Add("label=\"escape\", shape=septagon");
+      os_ << "label=\"escape\", shape=septagon";
       break;
     case ActionNode::EMPTY_MATCH_CHECK:
-      stream()->Add("label=\"$%i=$pos?,$%i<%i?\", shape=septagon",
-                    that->data_.u_empty_match_check.start_register,
-                    that->data_.u_empty_match_check.repetition_register,
-                    that->data_.u_empty_match_check.repetition_limit);
+      os_ << "label=\"$" << that->data_.u_empty_match_check.start_register
+          << "=$pos?,$" << that->data_.u_empty_match_check.repetition_register
+          << "<" << that->data_.u_empty_match_check.repetition_limit
+          << "?\", shape=septagon";
       break;
     case ActionNode::CLEAR_CAPTURES: {
-      stream()->Add("label=\"clear $%i to $%i\", shape=septagon",
-                    that->data_.u_clear_captures.range_from,
-                    that->data_.u_clear_captures.range_to);
+      os_ << "label=\"clear $" << that->data_.u_clear_captures.range_from
+          << " to $" << that->data_.u_clear_captures.range_to
+          << "\", shape=septagon";
       break;
     }
   }
-  stream()->Add("];\n");
+  os_ << "];\n";
   PrintAttributes(that);
   RegExpNode* successor = that->on_success();
-  stream()->Add("  n%p -> n%p;\n", that, successor);
+  os_ << "  n" << that << " -> n" << successor << ";\n";
   Visit(successor);
 }
 
 
 class DispatchTableDumper {
  public:
-  explicit DispatchTableDumper(StringStream* stream) : stream_(stream) { }
+  explicit DispatchTableDumper(OStream& os) : os_(os) {}
   void Call(uc16 key, DispatchTable::Entry entry);
-  StringStream* stream() { return stream_; }
  private:
-  StringStream* stream_;
+  OStream& os_;
 };
 
 
 void DispatchTableDumper::Call(uc16 key, DispatchTable::Entry entry) {
-  stream()->Add("[%k-%k]: {", key, entry.to());
+  os_ << "[" << AsUC16(key) << "-" << AsUC16(entry.to()) << "]: {";
   OutSet* set = entry.out_set();
   bool first = true;
   for (unsigned i = 0; i < OutSet::kFirstLimit; i++) {
@@ -4641,28 +4697,27 @@
       if (first) {
         first = false;
       } else {
-        stream()->Add(", ");
+        os_ << ", ";
       }
-      stream()->Add("%i", i);
+      os_ << i;
     }
   }
-  stream()->Add("}\n");
+  os_ << "}\n";
 }
 
 
 void DispatchTable::Dump() {
-  HeapStringAllocator alloc;
-  StringStream stream(&alloc);
-  DispatchTableDumper dumper(&stream);
+  OFStream os(stderr);
+  DispatchTableDumper dumper(os);
   tree()->ForEach(&dumper);
-  OS::PrintError("%s", stream.ToCString().get());
 }
 
 
 void RegExpEngine::DotPrint(const char* label,
                             RegExpNode* node,
                             bool ignore_case) {
-  DotPrinter printer(ignore_case);
+  OFStream os(stdout);
+  DotPrinter printer(os, ignore_case);
   printer.PrintNode(label, node);
 }
 
@@ -4692,10 +4747,10 @@
                                  const int* special_class,
                                  int length) {
   length--;  // Remove final 0x10000.
-  ASSERT(special_class[length] == 0x10000);
-  ASSERT(ranges->length() != 0);
-  ASSERT(length != 0);
-  ASSERT(special_class[0] != 0);
+  DCHECK(special_class[length] == 0x10000);
+  DCHECK(ranges->length() != 0);
+  DCHECK(length != 0);
+  DCHECK(special_class[0] != 0);
   if (ranges->length() != (length >> 1) + 1) {
     return false;
   }
@@ -4723,7 +4778,7 @@
                           const int* special_class,
                           int length) {
   length--;  // Remove final 0x10000.
-  ASSERT(special_class[length] == 0x10000);
+  DCHECK(special_class[length] == 0x10000);
   if (ranges->length() * 2 != length) {
     return false;
   }
@@ -4820,7 +4875,7 @@
       : compiler_(compiler),
         saved_expansion_factor_(compiler->current_expansion_factor()),
         ok_to_expand_(saved_expansion_factor_ <= kMaxExpansionFactor) {
-    ASSERT(factor > 0);
+    DCHECK(factor > 0);
     if (ok_to_expand_) {
       if (factor > kMaxExpansionFactor) {
         // Avoid integer overflow of the current expansion factor.
@@ -4909,7 +4964,7 @@
       }
     }
     if (max <= kMaxUnrolledMaxMatches && min == 0) {
-      ASSERT(max > 0);  // Due to the 'if' above.
+      DCHECK(max > 0);  // Due to the 'if' above.
       RegExpExpansionLimiter limiter(compiler, max);
       if (limiter.ok_to_expand()) {
         // Unroll the optional matches up to max.
@@ -5148,9 +5203,9 @@
                      ZoneList<CharacterRange>* ranges,
                      Zone* zone) {
   elmc--;
-  ASSERT(elmv[elmc] == 0x10000);
+  DCHECK(elmv[elmc] == 0x10000);
   for (int i = 0; i < elmc; i += 2) {
-    ASSERT(elmv[i] < elmv[i + 1]);
+    DCHECK(elmv[i] < elmv[i + 1]);
     ranges->Add(CharacterRange(elmv[i], elmv[i + 1] - 1), zone);
   }
 }
@@ -5161,13 +5216,13 @@
                             ZoneList<CharacterRange>* ranges,
                             Zone* zone) {
   elmc--;
-  ASSERT(elmv[elmc] == 0x10000);
-  ASSERT(elmv[0] != 0x0000);
-  ASSERT(elmv[elmc-1] != String::kMaxUtf16CodeUnit);
+  DCHECK(elmv[elmc] == 0x10000);
+  DCHECK(elmv[0] != 0x0000);
+  DCHECK(elmv[elmc-1] != String::kMaxUtf16CodeUnit);
   uc16 last = 0x0000;
   for (int i = 0; i < elmc; i += 2) {
-    ASSERT(last <= elmv[i] - 1);
-    ASSERT(elmv[i] < elmv[i + 1]);
+    DCHECK(last <= elmv[i] - 1);
+    DCHECK(elmv[i] < elmv[i + 1]);
     ranges->Add(CharacterRange(last, elmv[i] - 1), zone);
     last = elmv[i + 1];
   }
@@ -5263,8 +5318,8 @@
                            ZoneList<CharacterRange>** included,
                            ZoneList<CharacterRange>** excluded,
                            Zone* zone) {
-  ASSERT_EQ(NULL, *included);
-  ASSERT_EQ(NULL, *excluded);
+  DCHECK_EQ(NULL, *included);
+  DCHECK_EQ(NULL, *excluded);
   DispatchTable table(zone);
   for (int i = 0; i < base->length(); i++)
     table.AddRange(base->at(i), CharacterRangeSplitter::kInBase, zone);
@@ -5278,12 +5333,11 @@
 
 
 void CharacterRange::AddCaseEquivalents(ZoneList<CharacterRange>* ranges,
-                                        bool is_ascii,
-                                        Zone* zone) {
+                                        bool is_one_byte, Zone* zone) {
   Isolate* isolate = zone->isolate();
   uc16 bottom = from();
   uc16 top = to();
-  if (is_ascii && !RangeContainsLatin1Equivalents(*this)) {
+  if (is_one_byte && !RangeContainsLatin1Equivalents(*this)) {
     if (bottom > String::kMaxOneByteCharCode) return;
     if (top > String::kMaxOneByteCharCode) top = String::kMaxOneByteCharCode;
   }
@@ -5324,7 +5378,7 @@
       if (length == 0) {
         block_end = pos;
       } else {
-        ASSERT_EQ(1, length);
+        DCHECK_EQ(1, length);
         block_end = range[0];
       }
       int end = (block_end > top) ? top : block_end;
@@ -5344,7 +5398,7 @@
 
 
 bool CharacterRange::IsCanonical(ZoneList<CharacterRange>* ranges) {
-  ASSERT_NOT_NULL(ranges);
+  DCHECK_NOT_NULL(ranges);
   int n = ranges->length();
   if (n <= 1) return true;
   int max = ranges->at(0).to();
@@ -5484,15 +5538,15 @@
   } while (read < n);
   character_ranges->Rewind(num_canonical);
 
-  ASSERT(CharacterRange::IsCanonical(character_ranges));
+  DCHECK(CharacterRange::IsCanonical(character_ranges));
 }
 
 
 void CharacterRange::Negate(ZoneList<CharacterRange>* ranges,
                             ZoneList<CharacterRange>* negated_ranges,
                             Zone* zone) {
-  ASSERT(CharacterRange::IsCanonical(ranges));
-  ASSERT_EQ(0, negated_ranges->length());
+  DCHECK(CharacterRange::IsCanonical(ranges));
+  DCHECK_EQ(0, negated_ranges->length());
   int range_count = ranges->length();
   uc16 from = 0;
   int i = 0;
@@ -5568,7 +5622,7 @@
   if (tree()->is_empty()) {
     // If this is the first range we just insert into the table.
     ZoneSplayTree<Config>::Locator loc;
-    ASSERT_RESULT(tree()->Insert(current.from(), &loc));
+    DCHECK_RESULT(tree()->Insert(current.from(), &loc));
     loc.set_value(Entry(current.from(), current.to(),
                         empty()->Extend(value, zone)));
     return;
@@ -5594,7 +5648,7 @@
       // to the map and let the next step deal with merging it with
       // the range we're adding.
       ZoneSplayTree<Config>::Locator loc;
-      ASSERT_RESULT(tree()->Insert(right.from(), &loc));
+      DCHECK_RESULT(tree()->Insert(right.from(), &loc));
       loc.set_value(Entry(right.from(),
                           right.to(),
                           entry->out_set()));
@@ -5610,24 +5664,24 @@
       // then we have to add a range covering just that space.
       if (current.from() < entry->from()) {
         ZoneSplayTree<Config>::Locator ins;
-        ASSERT_RESULT(tree()->Insert(current.from(), &ins));
+        DCHECK_RESULT(tree()->Insert(current.from(), &ins));
         ins.set_value(Entry(current.from(),
                             entry->from() - 1,
                             empty()->Extend(value, zone)));
         current.set_from(entry->from());
       }
-      ASSERT_EQ(current.from(), entry->from());
+      DCHECK_EQ(current.from(), entry->from());
       // If the overlapping range extends beyond the one we want to add
       // we have to snap the right part off and add it separately.
       if (entry->to() > current.to()) {
         ZoneSplayTree<Config>::Locator ins;
-        ASSERT_RESULT(tree()->Insert(current.to() + 1, &ins));
+        DCHECK_RESULT(tree()->Insert(current.to() + 1, &ins));
         ins.set_value(Entry(current.to() + 1,
                             entry->to(),
                             entry->out_set()));
         entry->set_to(current.to());
       }
-      ASSERT(entry->to() <= current.to());
+      DCHECK(entry->to() <= current.to());
       // The overlapping range is now completely contained by the range
       // we're adding so we can just update it and move the start point
       // of the range we're adding just past it.
@@ -5636,12 +5690,12 @@
       // adding 1 will wrap around to 0.
       if (entry->to() == String::kMaxUtf16CodeUnit)
         break;
-      ASSERT(entry->to() + 1 > current.from());
+      DCHECK(entry->to() + 1 > current.from());
       current.set_from(entry->to() + 1);
     } else {
       // There is no overlap so we can just add the range
       ZoneSplayTree<Config>::Locator ins;
-      ASSERT_RESULT(tree()->Insert(current.from(), &ins));
+      DCHECK_RESULT(tree()->Insert(current.from(), &ins));
       ins.set_value(Entry(current.from(),
                           current.to(),
                           empty()->Extend(value, zone)));
@@ -5702,7 +5756,7 @@
 
 void Analysis::VisitText(TextNode* that) {
   if (ignore_case_) {
-    that->MakeCaseIndependent(is_ascii_);
+    that->MakeCaseIndependent(is_one_byte_);
   }
   EnsureAnalyzed(that->on_success());
   if (!has_failed()) {
@@ -5834,7 +5888,7 @@
         }
       }
     } else {
-      ASSERT_EQ(TextElement::CHAR_CLASS, text.text_type());
+      DCHECK_EQ(TextElement::CHAR_CLASS, text.text_type());
       RegExpCharacterClass* char_class = text.char_class();
       ZoneList<CharacterRange>* ranges = char_class->ranges(zone());
       if (char_class->is_negated()) {
@@ -5978,18 +6032,13 @@
 
 
 RegExpEngine::CompilationResult RegExpEngine::Compile(
-    RegExpCompileData* data,
-    bool ignore_case,
-    bool is_global,
-    bool is_multiline,
-    Handle<String> pattern,
-    Handle<String> sample_subject,
-    bool is_ascii,
-    Zone* zone) {
+    RegExpCompileData* data, bool ignore_case, bool is_global,
+    bool is_multiline, bool is_sticky, Handle<String> pattern,
+    Handle<String> sample_subject, bool is_one_byte, Zone* zone) {
   if ((data->capture_count + 1) * 2 - 1 > RegExpMacroAssembler::kMaxRegister) {
     return IrregexpRegExpTooBig(zone->isolate());
   }
-  RegExpCompiler compiler(data->capture_count, ignore_case, is_ascii, zone);
+  RegExpCompiler compiler(data->capture_count, ignore_case, is_one_byte, zone);
 
   // Sample some characters from the middle of the string.
   static const int kSampleSize = 128;
@@ -6012,9 +6061,9 @@
   bool is_end_anchored = data->tree->IsAnchoredAtEnd();
   bool is_start_anchored = data->tree->IsAnchoredAtStart();
   int max_length = data->tree->max_match();
-  if (!is_start_anchored) {
+  if (!is_start_anchored && !is_sticky) {
     // Add a .*? at the beginning, outside the body capture, unless
-    // this expression is anchored at the beginning.
+    // this expression is anchored at the beginning or sticky.
     RegExpNode* loop_node =
         RegExpQuantifier::ToNode(0,
                                  RegExpTree::kInfinity,
@@ -6036,18 +6085,18 @@
       node = loop_node;
     }
   }
-  if (is_ascii) {
-    node = node->FilterASCII(RegExpCompiler::kMaxRecursion, ignore_case);
+  if (is_one_byte) {
+    node = node->FilterOneByte(RegExpCompiler::kMaxRecursion, ignore_case);
     // Do it again to propagate the new nodes to places where they were not
     // put because they had not been calculated yet.
     if (node != NULL) {
-      node = node->FilterASCII(RegExpCompiler::kMaxRecursion, ignore_case);
+      node = node->FilterOneByte(RegExpCompiler::kMaxRecursion, ignore_case);
     }
   }
 
   if (node == NULL) node = new(zone) EndNode(EndNode::BACKTRACK, zone);
   data->node = node;
-  Analysis analysis(ignore_case, is_ascii);
+  Analysis analysis(ignore_case, is_one_byte);
   analysis.EnsureAnalyzed(node);
   if (analysis.has_failed()) {
     const char* error_message = analysis.error_message();
@@ -6059,8 +6108,8 @@
   // Native regexp implementation.
 
   NativeRegExpMacroAssembler::Mode mode =
-      is_ascii ? NativeRegExpMacroAssembler::ASCII
-               : NativeRegExpMacroAssembler::UC16;
+      is_one_byte ? NativeRegExpMacroAssembler::LATIN1
+                  : NativeRegExpMacroAssembler::UC16;
 
 #if V8_TARGET_ARCH_IA32
   RegExpMacroAssemblerIA32 macro_assembler(mode, (data->capture_count + 1) * 2,
@@ -6077,6 +6126,9 @@
 #elif V8_TARGET_ARCH_MIPS
   RegExpMacroAssemblerMIPS macro_assembler(mode, (data->capture_count + 1) * 2,
                                            zone);
+#elif V8_TARGET_ARCH_MIPS64
+  RegExpMacroAssemblerMIPS macro_assembler(mode, (data->capture_count + 1) * 2,
+                                           zone);
 #elif V8_TARGET_ARCH_X87
   RegExpMacroAssemblerX87 macro_assembler(mode, (data->capture_count + 1) * 2,
                                           zone);
diff --git a/src/jsregexp.h b/src/jsregexp.h
index 4da8ba3..c65adea 100644
--- a/src/jsregexp.h
+++ b/src/jsregexp.h
@@ -204,8 +204,8 @@
   static void SetIrregexpMaxRegisterCount(FixedArray* re, int value);
   static int IrregexpNumberOfCaptures(FixedArray* re);
   static int IrregexpNumberOfRegisters(FixedArray* re);
-  static ByteArray* IrregexpByteCode(FixedArray* re, bool is_ascii);
-  static Code* IrregexpNativeCode(FixedArray* re, bool is_ascii);
+  static ByteArray* IrregexpByteCode(FixedArray* re, bool is_one_byte);
+  static Code* IrregexpNativeCode(FixedArray* re, bool is_one_byte);
 
   // Limit the space regexps take up on the heap.  In order to limit this we
   // would like to keep track of the amount of regexp code on the heap.  This
@@ -216,10 +216,11 @@
   static const int kRegWxpCompiledLimit = 1 * MB;
 
  private:
-  static bool CompileIrregexp(
-      Handle<JSRegExp> re, Handle<String> sample_subject, bool is_ascii);
-  static inline bool EnsureCompiledIrregexp(
-      Handle<JSRegExp> re, Handle<String> sample_subject, bool is_ascii);
+  static bool CompileIrregexp(Handle<JSRegExp> re,
+                              Handle<String> sample_subject, bool is_one_byte);
+  static inline bool EnsureCompiledIrregexp(Handle<JSRegExp> re,
+                                            Handle<String> sample_subject,
+                                            bool is_one_byte);
 };
 
 
@@ -239,7 +240,7 @@
  public:
   CharacterRange() : from_(0), to_(0) { }
   // For compatibility with the CHECK_OK macro
-  CharacterRange(void* null) { ASSERT_EQ(NULL, null); }  //NOLINT
+  CharacterRange(void* null) { DCHECK_EQ(NULL, null); }  //NOLINT
   CharacterRange(uc16 from, uc16 to) : from_(from), to_(to) { }
   static void AddClassEscape(uc16 type, ZoneList<CharacterRange>* ranges,
                              Zone* zone);
@@ -248,7 +249,7 @@
     return CharacterRange(value, value);
   }
   static inline CharacterRange Range(uc16 from, uc16 to) {
-    ASSERT(from <= to);
+    DCHECK(from <= to);
     return CharacterRange(from, to);
   }
   static inline CharacterRange Everything() {
@@ -262,7 +263,7 @@
   bool is_valid() { return from_ <= to_; }
   bool IsEverything(uc16 max) { return from_ == 0 && to_ >= max; }
   bool IsSingleton() { return (from_ == to_); }
-  void AddCaseEquivalents(ZoneList<CharacterRange>* ranges, bool is_ascii,
+  void AddCaseEquivalents(ZoneList<CharacterRange>* ranges, bool is_one_byte,
                           Zone* zone);
   static void Split(ZoneList<CharacterRange>* base,
                     Vector<const int> overlay,
@@ -406,7 +407,7 @@
 #undef FORWARD_DECLARE
 
 
-class TextElement V8_FINAL BASE_EMBEDDED {
+class TextElement FINAL BASE_EMBEDDED {
  public:
   enum TextType {
     ATOM,
@@ -425,12 +426,12 @@
   RegExpTree* tree() const { return tree_; }
 
   RegExpAtom* atom() const {
-    ASSERT(text_type() == ATOM);
+    DCHECK(text_type() == ATOM);
     return reinterpret_cast<RegExpAtom*>(tree());
   }
 
   RegExpCharacterClass* char_class() const {
-    ASSERT(text_type() == CHAR_CLASS);
+    DCHECK(text_type() == CHAR_CLASS);
     return reinterpret_cast<RegExpCharacterClass*>(tree());
   }
 
@@ -445,7 +446,9 @@
 
 
 class Trace;
-
+struct PreloadState;
+class GreedyLoopState;
+class AlternativeGenerationList;
 
 struct NodeInfo {
   NodeInfo()
@@ -524,11 +527,11 @@
         mask_(0),
         value_(0),
         cannot_match_(false) { }
-  bool Rationalize(bool ascii);
+  bool Rationalize(bool one_byte);
   // Merge in the information from another branch of an alternation.
   void Merge(QuickCheckDetails* other, int from_index);
   // Advance the current position by some amount.
-  void Advance(int by, bool ascii);
+  void Advance(int by, bool one_byte);
   void Clear();
   bool cannot_match() { return cannot_match_; }
   void set_cannot_match() { cannot_match_ = true; }
@@ -541,8 +544,8 @@
   int characters() { return characters_; }
   void set_characters(int characters) { characters_ = characters; }
   Position* positions(int index) {
-    ASSERT(index >= 0);
-    ASSERT(index < characters_);
+    DCHECK(index >= 0);
+    DCHECK(index < characters_);
     return positions_ + index;
   }
   uint32_t mask() { return mask_; }
@@ -587,6 +590,7 @@
   // Falls through on certain failure, jumps to the label on possible success.
   // If the node cannot make a quick check it does nothing and returns false.
   bool EmitQuickCheck(RegExpCompiler* compiler,
+                      Trace* bounds_check_trace,
                       Trace* trace,
                       bool preload_has_checked_bounds,
                       Label* on_possible_success,
@@ -622,13 +626,15 @@
     UNREACHABLE();
   }
 
-  // If we know that the input is ASCII then there are some nodes that can
+  // If we know that the input is one-byte then there are some nodes that can
   // never match.  This method returns a node that can be substituted for
   // itself, or NULL if the node can never match.
-  virtual RegExpNode* FilterASCII(int depth, bool ignore_case) { return this; }
-  // Helper for FilterASCII.
+  virtual RegExpNode* FilterOneByte(int depth, bool ignore_case) {
+    return this;
+  }
+  // Helper for FilterOneByte.
   RegExpNode* replacement() {
-    ASSERT(info()->replacement_calculated);
+    DCHECK(info()->replacement_calculated);
     return replacement_;
   }
   RegExpNode* set_replacement(RegExpNode* replacement) {
@@ -720,7 +726,7 @@
       : RegExpNode(on_success->zone()), on_success_(on_success) { }
   RegExpNode* on_success() { return on_success_; }
   void set_on_success(RegExpNode* node) { on_success_ = node; }
-  virtual RegExpNode* FilterASCII(int depth, bool ignore_case);
+  virtual RegExpNode* FilterOneByte(int depth, bool ignore_case);
   virtual void FillInBMInfo(int offset,
                             int budget,
                             BoyerMooreLookahead* bm,
@@ -841,7 +847,7 @@
                                     int characters_filled_in,
                                     bool not_at_start);
   ZoneList<TextElement>* elements() { return elms_; }
-  void MakeCaseIndependent(bool is_ascii);
+  void MakeCaseIndependent(bool is_one_byte);
   virtual int GreedyLoopTextLength();
   virtual RegExpNode* GetSuccessorOfOmnivorousTextNode(
       RegExpCompiler* compiler);
@@ -850,11 +856,11 @@
                             BoyerMooreLookahead* bm,
                             bool not_at_start);
   void CalculateOffsets();
-  virtual RegExpNode* FilterASCII(int depth, bool ignore_case);
+  virtual RegExpNode* FilterOneByte(int depth, bool ignore_case);
 
  private:
   enum TextEmitPassType {
-    NON_ASCII_MATCH,             // Check for characters that can't match.
+    NON_LATIN1_MATCH,            // Check for characters that can't match.
     SIMPLE_CHARACTER_MATCH,      // Case-dependent single character check.
     NON_LETTER_CHARACTER_MATCH,  // Check characters that have no case equivs.
     CASE_CHARACTER_MATCH,        // Case-independent single character check.
@@ -1076,8 +1082,10 @@
   bool not_at_start() { return not_at_start_; }
   void set_not_at_start() { not_at_start_ = true; }
   void set_being_calculated(bool b) { being_calculated_ = b; }
-  virtual bool try_to_emit_quick_check_for_alternative(int i) { return true; }
-  virtual RegExpNode* FilterASCII(int depth, bool ignore_case);
+  virtual bool try_to_emit_quick_check_for_alternative(bool is_first) {
+    return true;
+  }
+  virtual RegExpNode* FilterOneByte(int depth, bool ignore_case);
 
  protected:
   int GreedyLoopTextLengthForAlternative(GuardedAlternative* alternative);
@@ -1096,6 +1104,22 @@
                                  AlternativeGeneration* alt_gen,
                                  int preload_characters,
                                  bool next_expects_preload);
+  void SetUpPreLoad(RegExpCompiler* compiler,
+                    Trace* current_trace,
+                    PreloadState* preloads);
+  void AssertGuardsMentionRegisters(Trace* trace);
+  int EmitOptimizedUnanchoredSearch(RegExpCompiler* compiler, Trace* trace);
+  Trace* EmitGreedyLoop(RegExpCompiler* compiler,
+                        Trace* trace,
+                        AlternativeGenerationList* alt_gens,
+                        PreloadState* preloads,
+                        GreedyLoopState* greedy_loop_state,
+                        int text_length);
+  void EmitChoices(RegExpCompiler* compiler,
+                   AlternativeGenerationList* alt_gens,
+                   int first_choice,
+                   Trace* trace,
+                   PreloadState* preloads);
   DispatchTable* table_;
   // If true, this node is never checked at the start of the input.
   // Allows a new trace to start with at_start() set to false.
@@ -1131,8 +1155,10 @@
   // starts by loading enough characters for the alternative that takes fewest
   // characters, but on a negative lookahead the negative branch did not take
   // part in that calculation (EatsAtLeast) so the assumptions don't hold.
-  virtual bool try_to_emit_quick_check_for_alternative(int i) { return i != 0; }
-  virtual RegExpNode* FilterASCII(int depth, bool ignore_case);
+  virtual bool try_to_emit_quick_check_for_alternative(bool is_first) {
+    return !is_first;
+  }
+  virtual RegExpNode* FilterOneByte(int depth, bool ignore_case);
 };
 
 
@@ -1142,7 +1168,8 @@
       : ChoiceNode(2, zone),
         loop_node_(NULL),
         continue_node_(NULL),
-        body_can_be_zero_length_(body_can_be_zero_length) { }
+        body_can_be_zero_length_(body_can_be_zero_length)
+        { }
   void AddLoopAlternative(GuardedAlternative alt);
   void AddContinueAlternative(GuardedAlternative alt);
   virtual void Emit(RegExpCompiler* compiler, Trace* trace);
@@ -1159,7 +1186,7 @@
   RegExpNode* continue_node() { return continue_node_; }
   bool body_can_be_zero_length() { return body_can_be_zero_length_; }
   virtual void Accept(NodeVisitor* visitor);
-  virtual RegExpNode* FilterASCII(int depth, bool ignore_case);
+  virtual RegExpNode* FilterOneByte(int depth, bool ignore_case);
 
  private:
   // AddAlternative is made private for loop nodes because alternatives
@@ -1293,7 +1320,7 @@
   void SetRest(int from_map) {
     for (int i = from_map; i < length_; i++) SetAll(i);
   }
-  bool EmitSkipInstructions(RegExpMacroAssembler* masm);
+  void EmitSkipInstructions(RegExpMacroAssembler* masm);
 
  private:
   // This is the value obtained by EatsAtLeast.  If we do not have at least this
@@ -1302,7 +1329,7 @@
   // point.
   int length_;
   RegExpCompiler* compiler_;
-  // 0x7f for ASCII, 0xffff for UTF-16.
+  // 0xff for Latin1, 0xffff for UTF-16.
   int max_char_;
   ZoneList<BoyerMoorePositionInfo*>* bitmaps_;
 
@@ -1445,7 +1472,7 @@
   // These set methods and AdvanceCurrentPositionInTrace should be used only on
   // new traces - the intention is that traces are immutable after creation.
   void add_action(DeferredAction* new_action) {
-    ASSERT(new_action->next_ == NULL);
+    DCHECK(new_action->next_ == NULL);
     new_action->next_ = actions_;
     actions_ = new_action;
   }
@@ -1486,6 +1513,31 @@
 };
 
 
+class GreedyLoopState {
+ public:
+  explicit GreedyLoopState(bool not_at_start);
+
+  Label* label() { return &label_; }
+  Trace* counter_backtrack_trace() { return &counter_backtrack_trace_; }
+
+ private:
+  Label label_;
+  Trace counter_backtrack_trace_;
+};
+
+
+struct PreloadState {
+  static const int kEatsAtLeastNotYetInitialized = -1;
+  bool preload_is_current_;
+  bool preload_has_checked_bounds_;
+  int preload_characters_;
+  int eats_at_least_;
+  void init() {
+    eats_at_least_ = kEatsAtLeastNotYetInitialized;
+  }
+};
+
+
 class NodeVisitor {
  public:
   virtual ~NodeVisitor() { }
@@ -1546,10 +1598,10 @@
 //   +-------+        +------------+
 class Analysis: public NodeVisitor {
  public:
-  Analysis(bool ignore_case, bool is_ascii)
+  Analysis(bool ignore_case, bool is_one_byte)
       : ignore_case_(ignore_case),
-        is_ascii_(is_ascii),
-        error_message_(NULL) { }
+        is_one_byte_(is_one_byte),
+        error_message_(NULL) {}
   void EnsureAnalyzed(RegExpNode* node);
 
 #define DECLARE_VISIT(Type)                                          \
@@ -1560,7 +1612,7 @@
 
   bool has_failed() { return error_message_ != NULL; }
   const char* error_message() {
-    ASSERT(error_message_ != NULL);
+    DCHECK(error_message_ != NULL);
     return error_message_;
   }
   void fail(const char* error_message) {
@@ -1569,7 +1621,7 @@
 
  private:
   bool ignore_case_;
-  bool is_ascii_;
+  bool is_one_byte_;
   const char* error_message_;
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(Analysis);
@@ -1608,13 +1660,11 @@
     int num_registers;
   };
 
-  static CompilationResult Compile(RegExpCompileData* input,
-                                   bool ignore_case,
-                                   bool global,
-                                   bool multiline,
+  static CompilationResult Compile(RegExpCompileData* input, bool ignore_case,
+                                   bool global, bool multiline, bool sticky,
                                    Handle<String> pattern,
                                    Handle<String> sample_subject,
-                                   bool is_ascii, Zone* zone);
+                                   bool is_one_byte, Zone* zone);
 
   static void DotPrint(const char* label, RegExpNode* node, bool ignore_case);
 };
diff --git a/src/libplatform/DEPS b/src/libplatform/DEPS
index bace5d3..2ea3359 100644
--- a/src/libplatform/DEPS
+++ b/src/libplatform/DEPS
@@ -1,6 +1,8 @@
 include_rules = [
-  # TODO(jochen): Enable this.
-  #"-src",
+  "-include",
+  "+include/libplatform",
+  "+include/v8-platform.h",
+  "-src",
   "+src/base",
   "+src/libplatform",
 ]
diff --git a/src/libplatform/default-platform-unittest.cc b/src/libplatform/default-platform-unittest.cc
new file mode 100644
index 0000000..d2c160e
--- /dev/null
+++ b/src/libplatform/default-platform-unittest.cc
@@ -0,0 +1,43 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/libplatform/default-platform.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+using testing::InSequence;
+using testing::StrictMock;
+
+namespace v8 {
+namespace platform {
+
+namespace {
+
+struct MockTask : public Task {
+  virtual ~MockTask() { Die(); }
+  MOCK_METHOD0(Run, void());
+  MOCK_METHOD0(Die, void());
+};
+
+}  // namespace
+
+
+TEST(DefaultPlatformTest, PumpMessageLoop) {
+  InSequence s;
+
+  int dummy;
+  Isolate* isolate = reinterpret_cast<Isolate*>(&dummy);
+
+  DefaultPlatform platform;
+  EXPECT_FALSE(platform.PumpMessageLoop(isolate));
+
+  StrictMock<MockTask>* task = new StrictMock<MockTask>;
+  platform.CallOnForegroundThread(isolate, task);
+  EXPECT_CALL(*task, Run());
+  EXPECT_CALL(*task, Die());
+  EXPECT_TRUE(platform.PumpMessageLoop(isolate));
+  EXPECT_FALSE(platform.PumpMessageLoop(isolate));
+}
+
+}  // namespace platform
+}  // namespace v8
diff --git a/src/libplatform/default-platform.cc b/src/libplatform/default-platform.cc
index 733bcf0..b5b8571 100644
--- a/src/libplatform/default-platform.cc
+++ b/src/libplatform/default-platform.cc
@@ -7,13 +7,26 @@
 #include <algorithm>
 #include <queue>
 
-// TODO(jochen): We should have our own version of checks.h.
-#include "src/checks.h"
-#include "src/platform.h"
+#include "src/base/logging.h"
+#include "src/base/platform/platform.h"
+#include "src/base/sys-info.h"
 #include "src/libplatform/worker-thread.h"
 
 namespace v8 {
-namespace internal {
+namespace platform {
+
+
+v8::Platform* CreateDefaultPlatform(int thread_pool_size) {
+  DefaultPlatform* platform = new DefaultPlatform();
+  platform->SetThreadPoolSize(thread_pool_size);
+  platform->EnsureInitialized();
+  return platform;
+}
+
+
+bool PumpMessageLoop(v8::Platform* platform, v8::Isolate* isolate) {
+  return reinterpret_cast<DefaultPlatform*>(platform)->PumpMessageLoop(isolate);
+}
 
 
 const int DefaultPlatform::kMaxThreadPoolSize = 4;
@@ -24,7 +37,7 @@
 
 
 DefaultPlatform::~DefaultPlatform() {
-  LockGuard<Mutex> guard(&lock_);
+  base::LockGuard<base::Mutex> guard(&lock_);
   queue_.Terminate();
   if (initialized_) {
     for (std::vector<WorkerThread*>::iterator i = thread_pool_.begin();
@@ -32,21 +45,30 @@
       delete *i;
     }
   }
+  for (std::map<v8::Isolate*, std::queue<Task*> >::iterator i =
+           main_thread_queue_.begin();
+       i != main_thread_queue_.end(); ++i) {
+    while (!i->second.empty()) {
+      delete i->second.front();
+      i->second.pop();
+    }
+  }
 }
 
 
 void DefaultPlatform::SetThreadPoolSize(int thread_pool_size) {
-  LockGuard<Mutex> guard(&lock_);
-  ASSERT(thread_pool_size >= 0);
-  if (thread_pool_size < 1)
-    thread_pool_size = OS::NumberOfProcessorsOnline();
+  base::LockGuard<base::Mutex> guard(&lock_);
+  DCHECK(thread_pool_size >= 0);
+  if (thread_pool_size < 1) {
+    thread_pool_size = base::SysInfo::NumberOfProcessors();
+  }
   thread_pool_size_ =
       std::max(std::min(thread_pool_size, kMaxThreadPoolSize), 1);
 }
 
 
 void DefaultPlatform::EnsureInitialized() {
-  LockGuard<Mutex> guard(&lock_);
+  base::LockGuard<base::Mutex> guard(&lock_);
   if (initialized_) return;
   initialized_ = true;
 
@@ -54,6 +76,24 @@
     thread_pool_.push_back(new WorkerThread(&queue_));
 }
 
+
+bool DefaultPlatform::PumpMessageLoop(v8::Isolate* isolate) {
+  Task* task = NULL;
+  {
+    base::LockGuard<base::Mutex> guard(&lock_);
+    std::map<v8::Isolate*, std::queue<Task*> >::iterator it =
+        main_thread_queue_.find(isolate);
+    if (it == main_thread_queue_.end() || it->second.empty()) {
+      return false;
+    }
+    task = it->second.front();
+    it->second.pop();
+  }
+  task->Run();
+  delete task;
+  return true;
+}
+
 void DefaultPlatform::CallOnBackgroundThread(Task *task,
                                              ExpectedRuntime expected_runtime) {
   EnsureInitialized();
@@ -62,9 +102,8 @@
 
 
 void DefaultPlatform::CallOnForegroundThread(v8::Isolate* isolate, Task* task) {
-  // TODO(jochen): implement.
-  task->Run();
-  delete task;
+  base::LockGuard<base::Mutex> guard(&lock_);
+  main_thread_queue_[isolate].push(task);
 }
 
-} }  // namespace v8::internal
+} }  // namespace v8::platform
diff --git a/src/libplatform/default-platform.h b/src/libplatform/default-platform.h
index 112ba4e..1efd7b2 100644
--- a/src/libplatform/default-platform.h
+++ b/src/libplatform/default-platform.h
@@ -5,15 +5,17 @@
 #ifndef V8_LIBPLATFORM_DEFAULT_PLATFORM_H_
 #define V8_LIBPLATFORM_DEFAULT_PLATFORM_H_
 
+#include <map>
+#include <queue>
 #include <vector>
 
 #include "include/v8-platform.h"
 #include "src/base/macros.h"
-#include "src/platform/mutex.h"
+#include "src/base/platform/mutex.h"
 #include "src/libplatform/task-queue.h"
 
 namespace v8 {
-namespace internal {
+namespace platform {
 
 class TaskQueue;
 class Thread;
@@ -28,26 +30,29 @@
 
   void EnsureInitialized();
 
+  bool PumpMessageLoop(v8::Isolate* isolate);
+
   // v8::Platform implementation.
   virtual void CallOnBackgroundThread(
-      Task *task, ExpectedRuntime expected_runtime) V8_OVERRIDE;
-  virtual void CallOnForegroundThread(v8::Isolate *isolate,
-                                      Task *task) V8_OVERRIDE;
+      Task* task, ExpectedRuntime expected_runtime) OVERRIDE;
+  virtual void CallOnForegroundThread(v8::Isolate* isolate,
+                                      Task* task) OVERRIDE;
 
  private:
   static const int kMaxThreadPoolSize;
 
-  Mutex lock_;
+  base::Mutex lock_;
   bool initialized_;
   int thread_pool_size_;
   std::vector<WorkerThread*> thread_pool_;
   TaskQueue queue_;
+  std::map<v8::Isolate*, std::queue<Task*> > main_thread_queue_;
 
   DISALLOW_COPY_AND_ASSIGN(DefaultPlatform);
 };
 
 
-} }  // namespace v8::internal
+} }  // namespace v8::platform
 
 
 #endif  // V8_LIBPLATFORM_DEFAULT_PLATFORM_H_
diff --git a/src/libplatform/libplatform.gyp b/src/libplatform/libplatform.gyp
new file mode 100644
index 0000000..4321da7
--- /dev/null
+++ b/src/libplatform/libplatform.gyp
@@ -0,0 +1,39 @@
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+  'variables': {
+    'v8_code': 1,
+  },
+  'includes': ['../../build/toolchain.gypi', '../../build/features.gypi'],
+  'targets': [
+    {
+      'target_name': 'libplatform-unittests',
+      'type': 'executable',
+      'dependencies': [
+        '../../testing/gtest.gyp:gtest',
+        '../../testing/gmock.gyp:gmock',
+        '../../testing/gmock.gyp:gmock_main',
+        '../../tools/gyp/v8.gyp:v8_libplatform',
+      ],
+      'include_dirs': [
+        '../..',
+      ],
+      'sources': [  ### gcmole(all) ###
+        'default-platform-unittest.cc',
+        'task-queue-unittest.cc',
+        'worker-thread-unittest.cc',
+      ],
+      'conditions': [
+        ['os_posix == 1', {
+          # TODO(svenpanne): This is a temporary work-around to fix the warnings
+          # that show up because we use -std=gnu++0x instead of -std=c++11.
+          'cflags!': [
+            '-pedantic',
+          ],
+        }],
+      ],
+    },
+  ],
+}
diff --git a/src/libplatform/task-queue-unittest.cc b/src/libplatform/task-queue-unittest.cc
new file mode 100644
index 0000000..9a18658
--- /dev/null
+++ b/src/libplatform/task-queue-unittest.cc
@@ -0,0 +1,60 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/v8-platform.h"
+#include "src/base/platform/platform.h"
+#include "src/libplatform/task-queue.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+using testing::InSequence;
+using testing::IsNull;
+using testing::StrictMock;
+
+namespace v8 {
+namespace platform {
+
+namespace {
+
+struct MockTask : public Task {
+  MOCK_METHOD0(Run, void());
+};
+
+
+class TaskQueueThread FINAL : public base::Thread {
+ public:
+  explicit TaskQueueThread(TaskQueue* queue)
+      : Thread(Options("libplatform TaskQueueThread")), queue_(queue) {}
+
+  virtual void Run() OVERRIDE { EXPECT_THAT(queue_->GetNext(), IsNull()); }
+
+ private:
+  TaskQueue* queue_;
+};
+
+}  // namespace
+
+
+TEST(TaskQueueTest, Basic) {
+  TaskQueue queue;
+  MockTask task;
+  queue.Append(&task);
+  EXPECT_EQ(&task, queue.GetNext());
+  queue.Terminate();
+  EXPECT_THAT(queue.GetNext(), IsNull());
+}
+
+
+TEST(TaskQueueTest, TerminateMultipleReaders) {
+  TaskQueue queue;
+  TaskQueueThread thread1(&queue);
+  TaskQueueThread thread2(&queue);
+  thread1.Start();
+  thread2.Start();
+  queue.Terminate();
+  thread1.Join();
+  thread2.Join();
+}
+
+}  // namespace platform
+}  // namespace v8
diff --git a/src/libplatform/task-queue.cc b/src/libplatform/task-queue.cc
index e618cb7..7a9071f 100644
--- a/src/libplatform/task-queue.cc
+++ b/src/libplatform/task-queue.cc
@@ -4,25 +4,24 @@
 
 #include "src/libplatform/task-queue.h"
 
-// TODO(jochen): We should have our own version of checks.h.
-#include "src/checks.h"
+#include "src/base/logging.h"
 
 namespace v8 {
-namespace internal {
+namespace platform {
 
 TaskQueue::TaskQueue() : process_queue_semaphore_(0), terminated_(false) {}
 
 
 TaskQueue::~TaskQueue() {
-  LockGuard<Mutex> guard(&lock_);
-  ASSERT(terminated_);
-  ASSERT(task_queue_.empty());
+  base::LockGuard<base::Mutex> guard(&lock_);
+  DCHECK(terminated_);
+  DCHECK(task_queue_.empty());
 }
 
 
 void TaskQueue::Append(Task* task) {
-  LockGuard<Mutex> guard(&lock_);
-  ASSERT(!terminated_);
+  base::LockGuard<base::Mutex> guard(&lock_);
+  DCHECK(!terminated_);
   task_queue_.push(task);
   process_queue_semaphore_.Signal();
 }
@@ -31,7 +30,7 @@
 Task* TaskQueue::GetNext() {
   for (;;) {
     {
-      LockGuard<Mutex> guard(&lock_);
+      base::LockGuard<base::Mutex> guard(&lock_);
       if (!task_queue_.empty()) {
         Task* result = task_queue_.front();
         task_queue_.pop();
@@ -48,10 +47,10 @@
 
 
 void TaskQueue::Terminate() {
-  LockGuard<Mutex> guard(&lock_);
-  ASSERT(!terminated_);
+  base::LockGuard<base::Mutex> guard(&lock_);
+  DCHECK(!terminated_);
   terminated_ = true;
   process_queue_semaphore_.Signal();
 }
 
-} }  // namespace v8::internal
+} }  // namespace v8::platform
diff --git a/src/libplatform/task-queue.h b/src/libplatform/task-queue.h
index 1403664..eb9d698 100644
--- a/src/libplatform/task-queue.h
+++ b/src/libplatform/task-queue.h
@@ -8,14 +8,14 @@
 #include <queue>
 
 #include "src/base/macros.h"
-#include "src/platform/mutex.h"
-#include "src/platform/semaphore.h"
+#include "src/base/platform/mutex.h"
+#include "src/base/platform/semaphore.h"
 
 namespace v8 {
 
 class Task;
 
-namespace internal {
+namespace platform {
 
 class TaskQueue {
  public:
@@ -33,15 +33,15 @@
   void Terminate();
 
  private:
-  Mutex lock_;
-  Semaphore process_queue_semaphore_;
+  base::Mutex lock_;
+  base::Semaphore process_queue_semaphore_;
   std::queue<Task*> task_queue_;
   bool terminated_;
 
   DISALLOW_COPY_AND_ASSIGN(TaskQueue);
 };
 
-} }  // namespace v8::internal
+} }  // namespace v8::platform
 
 
 #endif  // V8_LIBPLATFORM_TASK_QUEUE_H_
diff --git a/src/libplatform/worker-thread-unittest.cc b/src/libplatform/worker-thread-unittest.cc
new file mode 100644
index 0000000..175b311
--- /dev/null
+++ b/src/libplatform/worker-thread-unittest.cc
@@ -0,0 +1,48 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/v8-platform.h"
+#include "src/libplatform/task-queue.h"
+#include "src/libplatform/worker-thread.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+using testing::InSequence;
+using testing::IsNull;
+using testing::StrictMock;
+
+namespace v8 {
+namespace platform {
+
+namespace {
+
+struct MockTask : public Task {
+  virtual ~MockTask() { Die(); }
+  MOCK_METHOD0(Run, void());
+  MOCK_METHOD0(Die, void());
+};
+
+}  // namespace
+
+
+TEST(WorkerThreadTest, Basic) {
+  static const size_t kNumTasks = 10;
+
+  TaskQueue queue;
+  for (size_t i = 0; i < kNumTasks; ++i) {
+    InSequence s;
+    StrictMock<MockTask>* task = new StrictMock<MockTask>;
+    EXPECT_CALL(*task, Run());
+    EXPECT_CALL(*task, Die());
+    queue.Append(task);
+  }
+
+  WorkerThread thread1(&queue);
+  WorkerThread thread2(&queue);
+
+  // TaskQueue DCHECKS that it's empty in its destructor.
+  queue.Terminate();
+}
+
+}  // namespace platform
+}  // namespace v8
diff --git a/src/libplatform/worker-thread.cc b/src/libplatform/worker-thread.cc
index 6b3892c..9963715 100644
--- a/src/libplatform/worker-thread.cc
+++ b/src/libplatform/worker-thread.cc
@@ -8,10 +8,10 @@
 #include "src/libplatform/task-queue.h"
 
 namespace v8 {
-namespace internal {
+namespace platform {
 
 WorkerThread::WorkerThread(TaskQueue* queue)
-    : Thread("V8 WorkerThread"), queue_(queue) {
+    : Thread(Options("V8 WorkerThread")), queue_(queue) {
   Start();
 }
 
@@ -28,4 +28,4 @@
   }
 }
 
-} }  // namespace v8::internal
+} }  // namespace v8::platform
diff --git a/src/libplatform/worker-thread.h b/src/libplatform/worker-thread.h
index 20b9add..67f086d 100644
--- a/src/libplatform/worker-thread.h
+++ b/src/libplatform/worker-thread.h
@@ -8,21 +8,21 @@
 #include <queue>
 
 #include "src/base/macros.h"
-#include "src/platform.h"
+#include "src/base/platform/platform.h"
 
 namespace v8 {
 
-namespace internal {
+namespace platform {
 
 class TaskQueue;
 
-class WorkerThread : public Thread {
+class WorkerThread : public base::Thread {
  public:
   explicit WorkerThread(TaskQueue* queue);
   virtual ~WorkerThread();
 
   // Thread implementation.
-  virtual void Run() V8_OVERRIDE;
+  virtual void Run() OVERRIDE;
 
  private:
   friend class QuitTask;
@@ -32,7 +32,7 @@
   DISALLOW_COPY_AND_ASSIGN(WorkerThread);
 };
 
-} }  // namespace v8::internal
+} }  // namespace v8::platform
 
 
 #endif  // V8_LIBPLATFORM_WORKER_THREAD_H_
diff --git a/src/list-inl.h b/src/list-inl.h
index 8a4cf56..60e8fab 100644
--- a/src/list-inl.h
+++ b/src/list-inl.h
@@ -6,7 +6,8 @@
 #define V8_LIST_INL_H_
 
 #include "src/list.h"
-#include "src/platform.h"
+
+#include "src/base/platform/platform.h"
 
 namespace v8 {
 namespace internal {
@@ -49,7 +50,7 @@
 
 template<typename T, class P>
 void List<T, P>::ResizeAddInternal(const T& element, P alloc) {
-  ASSERT(length_ >= capacity_);
+  DCHECK(length_ >= capacity_);
   // Grow the list capacity by 100%, but make sure to let it grow
   // even when the capacity is zero (possible initial case).
   int new_capacity = 1 + 2 * capacity_;
@@ -63,7 +64,7 @@
 
 template<typename T, class P>
 void List<T, P>::Resize(int new_capacity, P alloc) {
-  ASSERT_LE(length_, new_capacity);
+  DCHECK_LE(length_, new_capacity);
   T* new_data = NewData(new_capacity, alloc);
   MemCopy(new_data, data_, length_ * sizeof(T));
   List<T, P>::DeleteData(data_);
@@ -82,14 +83,14 @@
 
 template<typename T, class P>
 void List<T, P>::Set(int index, const T& elm) {
-  ASSERT(index >= 0 && index <= length_);
+  DCHECK(index >= 0 && index <= length_);
   data_[index] = elm;
 }
 
 
 template<typename T, class P>
 void List<T, P>::InsertAt(int index, const T& elm, P alloc) {
-  ASSERT(index >= 0 && index <= length_);
+  DCHECK(index >= 0 && index <= length_);
   Add(elm, alloc);
   for (int i = length_ - 1; i > index; --i) {
     data_[i] = data_[i - 1];
@@ -143,7 +144,7 @@
 
 template<typename T, class P>
 void List<T, P>::Rewind(int pos) {
-  ASSERT(0 <= pos && pos <= length_);
+  DCHECK(0 <= pos && pos <= length_);
   length_ = pos;
 }
 
@@ -194,7 +195,7 @@
   ToVector().Sort(cmp);
 #ifdef DEBUG
   for (int i = 1; i < length_; i++)
-    ASSERT(cmp(&data_[i - 1], &data_[i]) <= 0);
+    DCHECK(cmp(&data_[i - 1], &data_[i]) <= 0);
 #endif
 }
 
@@ -207,7 +208,7 @@
 
 template<typename T, class P>
 void List<T, P>::Initialize(int capacity, P allocator) {
-  ASSERT(capacity >= 0);
+  DCHECK(capacity >= 0);
   data_ = (capacity > 0) ? NewData(capacity, allocator) : NULL;
   capacity_ = capacity;
   length_ = 0;
diff --git a/src/list.h b/src/list.h
index 2244d67..ea5fd1e 100644
--- a/src/list.h
+++ b/src/list.h
@@ -5,6 +5,7 @@
 #ifndef V8_LIST_H_
 #define V8_LIST_H_
 
+#include "src/checks.h"
 #include "src/utils.h"
 
 namespace v8 {
@@ -61,8 +62,8 @@
   // not safe to use after operations that can change the list's
   // backing store (e.g. Add).
   inline T& operator[](int i) const {
-    ASSERT(0 <= i);
-    SLOW_ASSERT(i < length_);
+    DCHECK(0 <= i);
+    SLOW_DCHECK(i < length_);
     return data_[i];
   }
   inline T& at(int i) const { return operator[](i); }
diff --git a/src/lithium-allocator-inl.h b/src/lithium-allocator-inl.h
index 1016ee3..bafa00f 100644
--- a/src/lithium-allocator-inl.h
+++ b/src/lithium-allocator-inl.h
@@ -8,17 +8,19 @@
 #include "src/lithium-allocator.h"
 
 #if V8_TARGET_ARCH_IA32
-#include "src/ia32/lithium-ia32.h"
+#include "src/ia32/lithium-ia32.h" // NOLINT
 #elif V8_TARGET_ARCH_X64
-#include "src/x64/lithium-x64.h"
+#include "src/x64/lithium-x64.h" // NOLINT
 #elif V8_TARGET_ARCH_ARM64
-#include "src/arm64/lithium-arm64.h"
+#include "src/arm64/lithium-arm64.h" // NOLINT
 #elif V8_TARGET_ARCH_ARM
-#include "src/arm/lithium-arm.h"
+#include "src/arm/lithium-arm.h" // NOLINT
 #elif V8_TARGET_ARCH_MIPS
-#include "src/mips/lithium-mips.h"
+#include "src/mips/lithium-mips.h" // NOLINT
+#elif V8_TARGET_ARCH_MIPS64
+#include "src/mips64/lithium-mips64.h" // NOLINT
 #elif V8_TARGET_ARCH_X87
-#include "src/x87/lithium-x87.h"
+#include "src/x87/lithium-x87.h" // NOLINT
 #else
 #error "Unknown architecture."
 #endif
@@ -39,98 +41,11 @@
 }
 
 
-TempIterator::TempIterator(LInstruction* instr)
-    : instr_(instr),
-      limit_(instr->TempCount()),
-      current_(0) {
-  SkipUninteresting();
-}
-
-
-bool TempIterator::Done() { return current_ >= limit_; }
-
-
-LOperand* TempIterator::Current() {
-  ASSERT(!Done());
-  return instr_->TempAt(current_);
-}
-
-
-void TempIterator::SkipUninteresting() {
-  while (current_ < limit_ && instr_->TempAt(current_) == NULL) ++current_;
-}
-
-
-void TempIterator::Advance() {
-  ++current_;
-  SkipUninteresting();
-}
-
-
-InputIterator::InputIterator(LInstruction* instr)
-    : instr_(instr),
-      limit_(instr->InputCount()),
-      current_(0) {
-  SkipUninteresting();
-}
-
-
-bool InputIterator::Done() { return current_ >= limit_; }
-
-
-LOperand* InputIterator::Current() {
-  ASSERT(!Done());
-  ASSERT(instr_->InputAt(current_) != NULL);
-  return instr_->InputAt(current_);
-}
-
-
-void InputIterator::Advance() {
-  ++current_;
-  SkipUninteresting();
-}
-
-
-void InputIterator::SkipUninteresting() {
-  while (current_ < limit_) {
-    LOperand* current = instr_->InputAt(current_);
-    if (current != NULL && !current->IsConstantOperand()) break;
-    ++current_;
-  }
-}
-
-
-UseIterator::UseIterator(LInstruction* instr)
-    : input_iterator_(instr), env_iterator_(instr->environment()) { }
-
-
-bool UseIterator::Done() {
-  return input_iterator_.Done() && env_iterator_.Done();
-}
-
-
-LOperand* UseIterator::Current() {
-  ASSERT(!Done());
-  LOperand* result = input_iterator_.Done()
-      ? env_iterator_.Current()
-      : input_iterator_.Current();
-  ASSERT(result != NULL);
-  return result;
-}
-
-
-void UseIterator::Advance() {
-  input_iterator_.Done()
-      ? env_iterator_.Advance()
-      : input_iterator_.Advance();
-}
-
-
 void LAllocator::SetLiveRangeAssignedRegister(LiveRange* range, int reg) {
   if (range->Kind() == DOUBLE_REGISTERS) {
     assigned_double_registers_->Add(reg);
   } else {
-    ASSERT(range->Kind() == GENERAL_REGISTERS);
+    DCHECK(range->Kind() == GENERAL_REGISTERS);
     assigned_registers_->Add(reg);
   }
   range->set_assigned_register(reg, chunk()->zone());
diff --git a/src/lithium-allocator.cc b/src/lithium-allocator.cc
index a36f7de..5f4f17f 100644
--- a/src/lithium-allocator.cc
+++ b/src/lithium-allocator.cc
@@ -3,27 +3,12 @@
 // found in the LICENSE file.
 
 #include "src/v8.h"
-#include "src/lithium-allocator-inl.h"
 
 #include "src/hydrogen.h"
+#include "src/lithium-inl.h"
+#include "src/lithium-allocator-inl.h"
 #include "src/string-stream.h"
 
-#if V8_TARGET_ARCH_IA32
-#include "src/ia32/lithium-ia32.h"
-#elif V8_TARGET_ARCH_X64
-#include "src/x64/lithium-x64.h"
-#elif V8_TARGET_ARCH_ARM64
-#include "src/arm64/lithium-arm64.h"
-#elif V8_TARGET_ARCH_ARM
-#include "src/arm/lithium-arm.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "src/mips/lithium-mips.h"
-#elif V8_TARGET_ARCH_X87
-#include "src/x87/lithium-x87.h"
-#else
-#error "Unknown architecture."
-#endif
-
 namespace v8 {
 namespace internal {
 
@@ -52,7 +37,7 @@
         unalloc->HasDoubleRegisterPolicy();
     register_beneficial_ = !unalloc->HasAnyPolicy();
   }
-  ASSERT(pos_.IsValid());
+  DCHECK(pos_.IsValid());
 }
 
 
@@ -72,7 +57,7 @@
 
 
 void UseInterval::SplitAt(LifetimePosition pos, Zone* zone) {
-  ASSERT(Contains(pos) && pos.Value() != start().Value());
+  DCHECK(Contains(pos) && pos.Value() != start().Value());
   UseInterval* after = new(zone) UseInterval(pos, end_);
   after->next_ = next_;
   next_ = after;
@@ -86,7 +71,7 @@
 void LiveRange::Verify() const {
   UsePosition* cur = first_pos_;
   while (cur != NULL) {
-    ASSERT(Start().Value() <= cur->pos().Value() &&
+    DCHECK(Start().Value() <= cur->pos().Value() &&
            cur->pos().Value() <= End().Value());
     cur = cur->next();
   }
@@ -123,20 +108,20 @@
       current_interval_(NULL),
       last_processed_use_(NULL),
       current_hint_operand_(NULL),
-      spill_operand_(new(zone) LOperand()),
-      spill_start_index_(kMaxInt) { }
+      spill_operand_(new (zone) LOperand()),
+      spill_start_index_(kMaxInt) {}
 
 
 void LiveRange::set_assigned_register(int reg, Zone* zone) {
-  ASSERT(!HasRegisterAssigned() && !IsSpilled());
+  DCHECK(!HasRegisterAssigned() && !IsSpilled());
   assigned_register_ = reg;
   ConvertOperands(zone);
 }
 
 
 void LiveRange::MakeSpilled(Zone* zone) {
-  ASSERT(!IsSpilled());
-  ASSERT(TopLevel()->HasAllocatedSpillOperand());
+  DCHECK(!IsSpilled());
+  DCHECK(TopLevel()->HasAllocatedSpillOperand());
   spilled_ = true;
   assigned_register_ = kInvalidAssignment;
   ConvertOperands(zone);
@@ -144,15 +129,15 @@
 
 
 bool LiveRange::HasAllocatedSpillOperand() const {
-  ASSERT(spill_operand_ != NULL);
+  DCHECK(spill_operand_ != NULL);
   return !spill_operand_->IsIgnored();
 }
 
 
 void LiveRange::SetSpillOperand(LOperand* operand) {
-  ASSERT(!operand->IsUnallocated());
-  ASSERT(spill_operand_ != NULL);
-  ASSERT(spill_operand_->IsIgnored());
+  DCHECK(!operand->IsUnallocated());
+  DCHECK(spill_operand_ != NULL);
+  DCHECK(spill_operand_->IsIgnored());
   spill_operand_->ConvertTo(operand->kind(), operand->index());
 }
 
@@ -212,7 +197,7 @@
 LOperand* LiveRange::CreateAssignedOperand(Zone* zone) {
   LOperand* op = NULL;
   if (HasRegisterAssigned()) {
-    ASSERT(!IsSpilled());
+    DCHECK(!IsSpilled());
     switch (Kind()) {
       case GENERAL_REGISTERS:
         op = LRegister::Create(assigned_register(), zone);
@@ -224,9 +209,9 @@
         UNREACHABLE();
     }
   } else if (IsSpilled()) {
-    ASSERT(!HasRegisterAssigned());
+    DCHECK(!HasRegisterAssigned());
     op = TopLevel()->GetSpillOperand();
-    ASSERT(!op->IsUnallocated());
+    DCHECK(!op->IsUnallocated());
   } else {
     LUnallocated* unalloc = new(zone) LUnallocated(LUnallocated::NONE);
     unalloc->set_virtual_register(id_);
@@ -263,8 +248,8 @@
 void LiveRange::SplitAt(LifetimePosition position,
                         LiveRange* result,
                         Zone* zone) {
-  ASSERT(Start().Value() < position.Value());
-  ASSERT(result->IsEmpty());
+  DCHECK(Start().Value() < position.Value());
+  DCHECK(result->IsEmpty());
   // Find the last interval that ends before the position. If the
   // position is contained in one of the intervals in the chain, we
   // split that interval and use the first part.
@@ -368,9 +353,9 @@
 
 void LiveRange::ShortenTo(LifetimePosition start) {
   LAllocator::TraceAlloc("Shorten live range %d to [%d\n", id_, start.Value());
-  ASSERT(first_interval_ != NULL);
-  ASSERT(first_interval_->start().Value() <= start.Value());
-  ASSERT(start.Value() < first_interval_->end().Value());
+  DCHECK(first_interval_ != NULL);
+  DCHECK(first_interval_->start().Value() <= start.Value());
+  DCHECK(start.Value() < first_interval_->end().Value());
   first_interval_->set_start(start);
 }
 
@@ -422,7 +407,7 @@
       // Order of instruction's processing (see ProcessInstructions) guarantees
       // that each new use interval either precedes or intersects with
       // last added interval.
-      ASSERT(start.Value() < first_interval_->end().Value());
+      DCHECK(start.Value() < first_interval_->end().Value());
       first_interval_->start_ = Min(start, first_interval_->start_);
       first_interval_->end_ = Max(end, first_interval_->end_);
     }
@@ -465,11 +450,11 @@
   LOperand* op = CreateAssignedOperand(zone);
   UsePosition* use_pos = first_pos();
   while (use_pos != NULL) {
-    ASSERT(Start().Value() <= use_pos->pos().Value() &&
+    DCHECK(Start().Value() <= use_pos->pos().Value() &&
            use_pos->pos().Value() <= End().Value());
 
     if (use_pos->HasOperand()) {
-      ASSERT(op->IsRegister() || op->IsDoubleRegister() ||
+      DCHECK(op->IsRegister() || op->IsDoubleRegister() ||
              !use_pos->RequiresRegister());
       use_pos->operand()->ConvertTo(op->kind(), op->index());
     }
@@ -491,7 +476,7 @@
   for (UseInterval* interval = start_search;
        interval != NULL;
        interval = interval->next()) {
-    ASSERT(interval->next() == NULL ||
+    DCHECK(interval->next() == NULL ||
            interval->next()->start().Value() >= interval->start().Value());
     AdvanceLastProcessedMarker(interval, position);
     if (interval->Contains(position)) return true;
@@ -542,7 +527,7 @@
       num_registers_(-1),
       graph_(graph),
       has_osr_entry_(false),
-      allocation_ok_(true) { }
+      allocation_ok_(true) {}
 
 
 void LAllocator::InitializeLivenessAnalysis() {
@@ -609,7 +594,7 @@
                                     int pos,
                                     bool is_tagged) {
   TraceAlloc("Allocating fixed reg for op %d\n", operand->virtual_register());
-  ASSERT(operand->HasFixedPolicy());
+  DCHECK(operand->HasFixedPolicy());
   if (operand->HasFixedSlotPolicy()) {
     operand->ConvertTo(LOperand::STACK_SLOT, operand->fixed_slot_index());
   } else if (operand->HasFixedRegisterPolicy()) {
@@ -633,11 +618,11 @@
 
 
 LiveRange* LAllocator::FixedLiveRangeFor(int index) {
-  ASSERT(index < Register::kMaxNumAllocatableRegisters);
+  DCHECK(index < Register::kMaxNumAllocatableRegisters);
   LiveRange* result = fixed_live_ranges_[index];
   if (result == NULL) {
     result = new(zone()) LiveRange(FixedLiveRangeID(index), chunk()->zone());
-    ASSERT(result->IsFixed());
+    DCHECK(result->IsFixed());
     result->kind_ = GENERAL_REGISTERS;
     SetLiveRangeAssignedRegister(result, index);
     fixed_live_ranges_[index] = result;
@@ -647,12 +632,12 @@
 
 
 LiveRange* LAllocator::FixedDoubleLiveRangeFor(int index) {
-  ASSERT(index < DoubleRegister::NumAllocatableRegisters());
+  DCHECK(index < DoubleRegister::NumAllocatableRegisters());
   LiveRange* result = fixed_double_live_ranges_[index];
   if (result == NULL) {
     result = new(zone()) LiveRange(FixedDoubleLiveRangeID(index),
                                    chunk()->zone());
-    ASSERT(result->IsFixed());
+    DCHECK(result->IsFixed());
     result->kind_ = DOUBLE_REGISTERS;
     SetLiveRangeAssignedRegister(result, index);
     fixed_double_live_ranges_[index] = result;
@@ -842,7 +827,7 @@
       } else if (cur_input->HasWritableRegisterPolicy()) {
         // The live range of writable input registers always goes until the end
         // of the instruction.
-        ASSERT(!cur_input->IsUsedAtStart());
+        DCHECK(!cur_input->IsUsedAtStart());
 
         LUnallocated* input_copy = cur_input->CopyUnconstrained(
             chunk()->zone());
@@ -942,7 +927,7 @@
         }
       }
     } else {
-      ASSERT(!IsGapAt(index));
+      DCHECK(!IsGapAt(index));
       LInstruction* instr = InstructionAt(index);
 
       if (instr != NULL) {
@@ -1031,7 +1016,7 @@
   for (int i = 0; i < phis->length(); ++i) {
     HPhi* phi = phis->at(i);
     LUnallocated* phi_operand =
-        new(chunk()->zone()) LUnallocated(LUnallocated::NONE);
+        new (chunk()->zone()) LUnallocated(LUnallocated::NONE);
     phi_operand->set_virtual_register(phi->id());
     for (int j = 0; j < phi->OperandCount(); ++j) {
       HValue* op = phi->OperandAt(j);
@@ -1040,7 +1025,7 @@
         HConstant* constant = HConstant::cast(op);
         operand = chunk_->DefineConstantOperand(constant);
       } else {
-        ASSERT(!op->EmitAtUses());
+        DCHECK(!op->EmitAtUses());
         LUnallocated* unalloc =
             new(chunk()->zone()) LUnallocated(LUnallocated::ANY);
         unalloc->set_virtual_register(op->id());
@@ -1082,7 +1067,7 @@
 
 
 bool LAllocator::Allocate(LChunk* chunk) {
-  ASSERT(chunk_ == NULL);
+  DCHECK(chunk_ == NULL);
   chunk_ = static_cast<LPlatformChunk*>(chunk);
   assigned_registers_ =
       new(chunk->zone()) BitVector(Register::NumAllocatableRegisters(),
@@ -1140,18 +1125,18 @@
   LiveRange* cur_range = range;
   while (cur_range != NULL && (cur_cover == NULL || pred_cover == NULL)) {
     if (cur_range->CanCover(cur_start)) {
-      ASSERT(cur_cover == NULL);
+      DCHECK(cur_cover == NULL);
       cur_cover = cur_range;
     }
     if (cur_range->CanCover(pred_end)) {
-      ASSERT(pred_cover == NULL);
+      DCHECK(pred_cover == NULL);
       pred_cover = cur_range;
     }
     cur_range = cur_range->next();
   }
 
   if (cur_cover->IsSpilled()) return;
-  ASSERT(pred_cover != NULL && cur_cover != NULL);
+  DCHECK(pred_cover != NULL && cur_cover != NULL);
   if (pred_cover != cur_cover) {
     LOperand* pred_op = pred_cover->CreateAssignedOperand(chunk()->zone());
     LOperand* cur_op = cur_cover->CreateAssignedOperand(chunk()->zone());
@@ -1160,7 +1145,7 @@
       if (block->predecessors()->length() == 1) {
         gap = GapAt(block->first_instruction_index());
       } else {
-        ASSERT(pred->end()->SecondSuccessor() == NULL);
+        DCHECK(pred->end()->SecondSuccessor() == NULL);
         gap = GetLastGap(pred);
 
         // We are going to insert a move before the branch instruction.
@@ -1309,7 +1294,7 @@
           break;
         }
       }
-      ASSERT(hint != NULL);
+      DCHECK(hint != NULL);
 
       LifetimePosition block_start = LifetimePosition::FromInstructionIndex(
               block->first_instruction_index());
@@ -1356,7 +1341,7 @@
           CodeStub::Major major_key = chunk_->info()->code_stub()->MajorKey();
           PrintF("Function: %s\n", CodeStub::MajorName(major_key, false));
         } else {
-          ASSERT(chunk_->info()->IsOptimizing());
+          DCHECK(chunk_->info()->IsOptimizing());
           AllowHandleDereference allow_deref;
           PrintF("Function: %s\n",
                  chunk_->info()->function()->debug_name()->ToCString().get());
@@ -1366,7 +1351,7 @@
         PrintF("First use is at %d\n", range->first_pos()->pos().Value());
         iterator.Advance();
       }
-      ASSERT(!found);
+      DCHECK(!found);
     }
 #endif
   }
@@ -1395,7 +1380,7 @@
   LAllocatorPhase phase("L_Populate pointer maps", this);
   const ZoneList<LPointerMap*>* pointer_maps = chunk_->pointer_maps();
 
-  ASSERT(SafePointsAreInOrder());
+  DCHECK(SafePointsAreInOrder());
 
   // Iterate over all safe point positions and record a pointer
   // for all spilled live ranges at this point.
@@ -1417,7 +1402,7 @@
     for (LiveRange* cur = range; cur != NULL; cur = cur->next()) {
       LifetimePosition this_end = cur->End();
       if (this_end.InstructionIndex() > end) end = this_end.InstructionIndex();
-      ASSERT(cur->Start().InstructionIndex() >= start);
+      DCHECK(cur->Start().InstructionIndex() >= start);
     }
 
     // Most of the ranges are in order, but not all.  Keep an eye on when
@@ -1471,7 +1456,7 @@
                    "at safe point %d\n",
                    cur->id(), cur->Start().Value(), safe_point);
         LOperand* operand = cur->CreateAssignedOperand(chunk()->zone());
-        ASSERT(!operand->IsStackSlot());
+        DCHECK(!operand->IsStackSlot());
         map->RecordPointer(operand, chunk()->zone());
       }
     }
@@ -1496,7 +1481,7 @@
 
 
 void LAllocator::AllocateRegisters() {
-  ASSERT(unhandled_live_ranges_.is_empty());
+  DCHECK(unhandled_live_ranges_.is_empty());
 
   for (int i = 0; i < live_ranges_.length(); ++i) {
     if (live_ranges_[i] != NULL) {
@@ -1506,11 +1491,11 @@
     }
   }
   SortUnhandled();
-  ASSERT(UnhandledIsSorted());
+  DCHECK(UnhandledIsSorted());
 
-  ASSERT(reusable_slots_.is_empty());
-  ASSERT(active_live_ranges_.is_empty());
-  ASSERT(inactive_live_ranges_.is_empty());
+  DCHECK(reusable_slots_.is_empty());
+  DCHECK(active_live_ranges_.is_empty());
+  DCHECK(inactive_live_ranges_.is_empty());
 
   if (mode_ == DOUBLE_REGISTERS) {
     for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
@@ -1520,7 +1505,7 @@
       }
     }
   } else {
-    ASSERT(mode_ == GENERAL_REGISTERS);
+    DCHECK(mode_ == GENERAL_REGISTERS);
     for (int i = 0; i < fixed_live_ranges_.length(); ++i) {
       LiveRange* current = fixed_live_ranges_.at(i);
       if (current != NULL) {
@@ -1530,9 +1515,9 @@
   }
 
   while (!unhandled_live_ranges_.is_empty()) {
-    ASSERT(UnhandledIsSorted());
+    DCHECK(UnhandledIsSorted());
     LiveRange* current = unhandled_live_ranges_.RemoveLast();
-    ASSERT(UnhandledIsSorted());
+    DCHECK(UnhandledIsSorted());
     LifetimePosition position = current->Start();
 #ifdef DEBUG
     allocation_finger_ = position;
@@ -1559,7 +1544,7 @@
         // the register is too close to the start of live range.
         SpillBetween(current, current->Start(), pos->pos());
         if (!AllocationOk()) return;
-        ASSERT(UnhandledIsSorted());
+        DCHECK(UnhandledIsSorted());
         continue;
       }
     }
@@ -1586,7 +1571,7 @@
       }
     }
 
-    ASSERT(!current->HasRegisterAssigned() && !current->IsSpilled());
+    DCHECK(!current->HasRegisterAssigned() && !current->IsSpilled());
 
     bool result = TryAllocateFreeReg(current);
     if (!AllocationOk()) return;
@@ -1618,7 +1603,7 @@
   if (FLAG_trace_alloc) {
     va_list arguments;
     va_start(arguments, msg);
-    OS::VPrint(msg, arguments);
+    base::OS::VPrint(msg, arguments);
     va_end(arguments);
   }
 }
@@ -1660,33 +1645,33 @@
 
 void LAllocator::AddToUnhandledSorted(LiveRange* range) {
   if (range == NULL || range->IsEmpty()) return;
-  ASSERT(!range->HasRegisterAssigned() && !range->IsSpilled());
-  ASSERT(allocation_finger_.Value() <= range->Start().Value());
+  DCHECK(!range->HasRegisterAssigned() && !range->IsSpilled());
+  DCHECK(allocation_finger_.Value() <= range->Start().Value());
   for (int i = unhandled_live_ranges_.length() - 1; i >= 0; --i) {
     LiveRange* cur_range = unhandled_live_ranges_.at(i);
     if (range->ShouldBeAllocatedBefore(cur_range)) {
       TraceAlloc("Add live range %d to unhandled at %d\n", range->id(), i + 1);
       unhandled_live_ranges_.InsertAt(i + 1, range, zone());
-      ASSERT(UnhandledIsSorted());
+      DCHECK(UnhandledIsSorted());
       return;
     }
   }
   TraceAlloc("Add live range %d to unhandled at start\n", range->id());
   unhandled_live_ranges_.InsertAt(0, range, zone());
-  ASSERT(UnhandledIsSorted());
+  DCHECK(UnhandledIsSorted());
 }
 
 
 void LAllocator::AddToUnhandledUnsorted(LiveRange* range) {
   if (range == NULL || range->IsEmpty()) return;
-  ASSERT(!range->HasRegisterAssigned() && !range->IsSpilled());
+  DCHECK(!range->HasRegisterAssigned() && !range->IsSpilled());
   TraceAlloc("Add live range %d to unhandled unsorted at end\n", range->id());
   unhandled_live_ranges_.Add(range, zone());
 }
 
 
 static int UnhandledSortHelper(LiveRange* const* a, LiveRange* const* b) {
-  ASSERT(!(*a)->ShouldBeAllocatedBefore(*b) ||
+  DCHECK(!(*a)->ShouldBeAllocatedBefore(*b) ||
          !(*b)->ShouldBeAllocatedBefore(*a));
   if ((*a)->ShouldBeAllocatedBefore(*b)) return 1;
   if ((*b)->ShouldBeAllocatedBefore(*a)) return -1;
@@ -1740,7 +1725,7 @@
 
 
 void LAllocator::ActiveToHandled(LiveRange* range) {
-  ASSERT(active_live_ranges_.Contains(range));
+  DCHECK(active_live_ranges_.Contains(range));
   active_live_ranges_.RemoveElement(range);
   TraceAlloc("Moving live range %d from active to handled\n", range->id());
   FreeSpillSlot(range);
@@ -1748,7 +1733,7 @@
 
 
 void LAllocator::ActiveToInactive(LiveRange* range) {
-  ASSERT(active_live_ranges_.Contains(range));
+  DCHECK(active_live_ranges_.Contains(range));
   active_live_ranges_.RemoveElement(range);
   inactive_live_ranges_.Add(range, zone());
   TraceAlloc("Moving live range %d from active to inactive\n", range->id());
@@ -1756,7 +1741,7 @@
 
 
 void LAllocator::InactiveToHandled(LiveRange* range) {
-  ASSERT(inactive_live_ranges_.Contains(range));
+  DCHECK(inactive_live_ranges_.Contains(range));
   inactive_live_ranges_.RemoveElement(range);
   TraceAlloc("Moving live range %d from inactive to handled\n", range->id());
   FreeSpillSlot(range);
@@ -1764,7 +1749,7 @@
 
 
 void LAllocator::InactiveToActive(LiveRange* range) {
-  ASSERT(inactive_live_ranges_.Contains(range));
+  DCHECK(inactive_live_ranges_.Contains(range));
   inactive_live_ranges_.RemoveElement(range);
   active_live_ranges_.Add(range, zone());
   TraceAlloc("Moving live range %d from inactive to active\n", range->id());
@@ -1792,7 +1777,7 @@
 
   for (int i = 0; i < inactive_live_ranges_.length(); ++i) {
     LiveRange* cur_inactive = inactive_live_ranges_.at(i);
-    ASSERT(cur_inactive->End().Value() > current->Start().Value());
+    DCHECK(cur_inactive->End().Value() > current->Start().Value());
     LifetimePosition next_intersection =
         cur_inactive->FirstIntersection(current);
     if (!next_intersection.IsValid()) continue;
@@ -1846,7 +1831,7 @@
 
   // Register reg is available at the range start and is free until
   // the range end.
-  ASSERT(pos.Value() >= current->End().Value());
+  DCHECK(pos.Value() >= current->End().Value());
   TraceAlloc("Assigning free reg %s to live range %d\n",
              RegisterName(reg),
              current->id());
@@ -1892,7 +1877,7 @@
 
   for (int i = 0; i < inactive_live_ranges_.length(); ++i) {
     LiveRange* range = inactive_live_ranges_.at(i);
-    ASSERT(range->End().Value() > current->Start().Value());
+    DCHECK(range->End().Value() > current->Start().Value());
     LifetimePosition next_intersection = range->FirstIntersection(current);
     if (!next_intersection.IsValid()) continue;
     int cur_reg = range->assigned_register();
@@ -1931,7 +1916,7 @@
   }
 
   // Register reg is not blocked for the whole range.
-  ASSERT(block_pos[reg].Value() >= current->End().Value());
+  DCHECK(block_pos[reg].Value() >= current->End().Value());
   TraceAlloc("Assigning blocked reg %s to live range %d\n",
              RegisterName(reg),
              current->id());
@@ -1978,7 +1963,7 @@
 
 
 void LAllocator::SplitAndSpillIntersecting(LiveRange* current) {
-  ASSERT(current->HasRegisterAssigned());
+  DCHECK(current->HasRegisterAssigned());
   int reg = current->assigned_register();
   LifetimePosition split_pos = current->Start();
   for (int i = 0; i < active_live_ranges_.length(); ++i) {
@@ -2007,7 +1992,7 @@
 
   for (int i = 0; i < inactive_live_ranges_.length(); ++i) {
     LiveRange* range = inactive_live_ranges_[i];
-    ASSERT(range->End().Value() > current->Start().Value());
+    DCHECK(range->End().Value() > current->Start().Value());
     if (range->assigned_register() == reg && !range->IsFixed()) {
       LifetimePosition next_intersection = range->FirstIntersection(current);
       if (next_intersection.IsValid()) {
@@ -2034,14 +2019,14 @@
 
 
 LiveRange* LAllocator::SplitRangeAt(LiveRange* range, LifetimePosition pos) {
-  ASSERT(!range->IsFixed());
+  DCHECK(!range->IsFixed());
   TraceAlloc("Splitting live range %d at %d\n", range->id(), pos.Value());
 
   if (pos.Value() <= range->Start().Value()) return range;
 
   // We can't properly connect liveranges if split occured at the end
   // of control instruction.
-  ASSERT(pos.IsInstructionStart() ||
+  DCHECK(pos.IsInstructionStart() ||
          !chunk_->instructions()->at(pos.InstructionIndex())->IsControl());
 
   int vreg = GetVirtualRegister();
@@ -2055,14 +2040,14 @@
 LiveRange* LAllocator::SplitBetween(LiveRange* range,
                                     LifetimePosition start,
                                     LifetimePosition end) {
-  ASSERT(!range->IsFixed());
+  DCHECK(!range->IsFixed());
   TraceAlloc("Splitting live range %d in position between [%d, %d]\n",
              range->id(),
              start.Value(),
              end.Value());
 
   LifetimePosition split_pos = FindOptimalSplitPos(start, end);
-  ASSERT(split_pos.Value() >= start.Value());
+  DCHECK(split_pos.Value() >= start.Value());
   return SplitRangeAt(range, split_pos);
 }
 
@@ -2071,7 +2056,7 @@
                                                  LifetimePosition end) {
   int start_instr = start.InstructionIndex();
   int end_instr = end.InstructionIndex();
-  ASSERT(start_instr <= end_instr);
+  DCHECK(start_instr <= end_instr);
 
   // We have no choice
   if (start_instr == end_instr) return end;
@@ -2133,7 +2118,7 @@
         end.PrevInstruction().InstructionEnd());
     if (!AllocationOk()) return;
 
-    ASSERT(third_part != second_part);
+    DCHECK(third_part != second_part);
 
     Spill(second_part);
     AddToUnhandledSorted(third_part);
@@ -2146,7 +2131,7 @@
 
 
 void LAllocator::Spill(LiveRange* range) {
-  ASSERT(!range->IsSpilled());
+  DCHECK(!range->IsSpilled());
   TraceAlloc("Spilling live range %d\n", range->id());
   LiveRange* first = range->TopLevel();
 
@@ -2192,7 +2177,7 @@
   if (FLAG_hydrogen_stats) {
     unsigned size = allocator_->zone()->allocation_size() -
                     allocator_zone_start_allocation_size_;
-    isolate()->GetHStatistics()->SaveTiming(name(), TimeDelta(), size);
+    isolate()->GetHStatistics()->SaveTiming(name(), base::TimeDelta(), size);
   }
 
   if (ShouldProduceTraceOutput()) {
diff --git a/src/lithium-allocator.h b/src/lithium-allocator.h
index 1d313a5..f63077e 100644
--- a/src/lithium-allocator.h
+++ b/src/lithium-allocator.h
@@ -17,7 +17,6 @@
 // Forward declarations.
 class HBasicBlock;
 class HGraph;
-class HInstruction;
 class HPhi;
 class HTracer;
 class HValue;
@@ -52,7 +51,7 @@
   // Returns the index of the instruction to which this lifetime position
   // corresponds.
   int InstructionIndex() const {
-    ASSERT(IsValid());
+    DCHECK(IsValid());
     return value_ / kStep;
   }
 
@@ -65,28 +64,28 @@
   // Returns the lifetime position for the start of the instruction which
   // corresponds to this lifetime position.
   LifetimePosition InstructionStart() const {
-    ASSERT(IsValid());
+    DCHECK(IsValid());
     return LifetimePosition(value_ & ~(kStep - 1));
   }
 
   // Returns the lifetime position for the end of the instruction which
   // corresponds to this lifetime position.
   LifetimePosition InstructionEnd() const {
-    ASSERT(IsValid());
+    DCHECK(IsValid());
     return LifetimePosition(InstructionStart().Value() + kStep/2);
   }
 
   // Returns the lifetime position for the beginning of the next instruction.
   LifetimePosition NextInstruction() const {
-    ASSERT(IsValid());
+    DCHECK(IsValid());
     return LifetimePosition(InstructionStart().Value() + kStep);
   }
 
   // Returns the lifetime position for the beginning of the previous
   // instruction.
   LifetimePosition PrevInstruction() const {
-    ASSERT(IsValid());
-    ASSERT(value_ > 1);
+    DCHECK(IsValid());
+    DCHECK(value_ > 1);
     return LifetimePosition(InstructionStart().Value() - kStep);
   }
 
@@ -118,70 +117,12 @@
 };
 
 
-enum RegisterKind {
-  UNALLOCATED_REGISTERS,
-  GENERAL_REGISTERS,
-  DOUBLE_REGISTERS
-};
-
-
-// A register-allocator view of a Lithium instruction. It contains the id of
-// the output operand and a list of input operand uses.
-
-class LInstruction;
-class LEnvironment;
-
-// Iterator for non-null temp operands.
-class TempIterator BASE_EMBEDDED {
- public:
-  inline explicit TempIterator(LInstruction* instr);
-  inline bool Done();
-  inline LOperand* Current();
-  inline void Advance();
-
- private:
-  inline void SkipUninteresting();
-  LInstruction* instr_;
-  int limit_;
-  int current_;
-};
-
-
-// Iterator for non-constant input operands.
-class InputIterator BASE_EMBEDDED {
- public:
-  inline explicit InputIterator(LInstruction* instr);
-  inline bool Done();
-  inline LOperand* Current();
-  inline void Advance();
-
- private:
-  inline void SkipUninteresting();
-  LInstruction* instr_;
-  int limit_;
-  int current_;
-};
-
-
-class UseIterator BASE_EMBEDDED {
- public:
-  inline explicit UseIterator(LInstruction* instr);
-  inline bool Done();
-  inline LOperand* Current();
-  inline void Advance();
-
- private:
-  InputIterator input_iterator_;
-  DeepIterator env_iterator_;
-};
-
-
 // Representation of the non-empty interval [start,end[.
 class UseInterval: public ZoneObject {
  public:
   UseInterval(LifetimePosition start, LifetimePosition end)
       : start_(start), end_(end), next_(NULL) {
-    ASSERT(start.Value() < end.Value());
+    DCHECK(start.Value() < end.Value());
   }
 
   LifetimePosition start() const { return start_; }
@@ -302,7 +243,7 @@
   bool IsSpilled() const { return spilled_; }
 
   LOperand* current_hint_operand() const {
-    ASSERT(current_hint_operand_ == FirstHint());
+    DCHECK(current_hint_operand_ == FirstHint());
     return current_hint_operand_;
   }
   LOperand* FirstHint() const {
@@ -313,12 +254,12 @@
   }
 
   LifetimePosition Start() const {
-    ASSERT(!IsEmpty());
+    DCHECK(!IsEmpty());
     return first_interval()->start();
   }
 
   LifetimePosition End() const {
-    ASSERT(!IsEmpty());
+    DCHECK(!IsEmpty());
     return last_interval_->end();
   }
 
@@ -423,7 +364,7 @@
 
   void MarkAsOsrEntry() {
     // There can be only one.
-    ASSERT(!has_osr_entry_);
+    DCHECK(!has_osr_entry_);
     // Simply set a flag to find and process instruction later.
     has_osr_entry_ = true;
   }
diff --git a/src/lithium-codegen.cc b/src/lithium-codegen.cc
index f49887d..ea6b83a 100644
--- a/src/lithium-codegen.cc
+++ b/src/lithium-codegen.cc
@@ -7,23 +7,26 @@
 #include "src/lithium-codegen.h"
 
 #if V8_TARGET_ARCH_IA32
-#include "src/ia32/lithium-ia32.h"
-#include "src/ia32/lithium-codegen-ia32.h"
+#include "src/ia32/lithium-ia32.h"  // NOLINT
+#include "src/ia32/lithium-codegen-ia32.h"  // NOLINT
 #elif V8_TARGET_ARCH_X64
-#include "src/x64/lithium-x64.h"
-#include "src/x64/lithium-codegen-x64.h"
+#include "src/x64/lithium-x64.h"  // NOLINT
+#include "src/x64/lithium-codegen-x64.h"  // NOLINT
 #elif V8_TARGET_ARCH_ARM
-#include "src/arm/lithium-arm.h"
-#include "src/arm/lithium-codegen-arm.h"
+#include "src/arm/lithium-arm.h"  // NOLINT
+#include "src/arm/lithium-codegen-arm.h"  // NOLINT
 #elif V8_TARGET_ARCH_ARM64
-#include "src/arm64/lithium-arm64.h"
-#include "src/arm64/lithium-codegen-arm64.h"
+#include "src/arm64/lithium-arm64.h"  // NOLINT
+#include "src/arm64/lithium-codegen-arm64.h"  // NOLINT
 #elif V8_TARGET_ARCH_MIPS
-#include "src/mips/lithium-mips.h"
-#include "src/mips/lithium-codegen-mips.h"
+#include "src/mips/lithium-mips.h"  // NOLINT
+#include "src/mips/lithium-codegen-mips.h"  // NOLINT
+#elif V8_TARGET_ARCH_MIPS64
+#include "src/mips64/lithium-mips64.h"  // NOLINT
+#include "src/mips64/lithium-codegen-mips64.h"  // NOLINT
 #elif V8_TARGET_ARCH_X87
-#include "src/x87/lithium-x87.h"
-#include "src/x87/lithium-codegen-x87.h"
+#include "src/x87/lithium-x87.h"  // NOLINT
+#include "src/x87/lithium-codegen-x87.h"  // NOLINT
 #else
 #error Unsupported target architecture.
 #endif
@@ -53,7 +56,7 @@
 
 
 bool LCodeGenBase::GenerateBody() {
-  ASSERT(is_generating());
+  DCHECK(is_generating());
   bool emit_instructions = true;
   LCodeGen* codegen = static_cast<LCodeGen*>(this);
   for (current_instruction_ = 0;
@@ -113,12 +116,12 @@
 
     HInstruction* hinstr = HInstruction::cast(hval);
     if (!hinstr->CanDeoptimize() && instr->HasEnvironment()) {
-      V8_Fatal(__FILE__, __LINE__, "CanDeoptimize is wrong for %s (%s)\n",
+      V8_Fatal(__FILE__, __LINE__, "CanDeoptimize is wrong for %s (%s)",
                hinstr->Mnemonic(), instr->Mnemonic());
     }
 
     if (instr->HasEnvironment() && !instr->environment()->has_been_used()) {
-      V8_Fatal(__FILE__, __LINE__, "unused environment for %s (%s)\n",
+      V8_Fatal(__FILE__, __LINE__, "unused environment for %s (%s)",
                hinstr->Mnemonic(), instr->Mnemonic());
     }
   }
@@ -129,7 +132,7 @@
 void LCodeGenBase::Comment(const char* format, ...) {
   if (!FLAG_code_comments) return;
   char buffer[4 * KB];
-  StringBuilder builder(buffer, ARRAY_SIZE(buffer));
+  StringBuilder builder(buffer, arraysize(buffer));
   va_list arguments;
   va_start(arguments, format);
   builder.AddFormattedList(format, arguments);
@@ -144,6 +147,15 @@
 }
 
 
+void LCodeGenBase::DeoptComment(const Deoptimizer::Reason& reason) {
+  OStringStream os;
+  os << ";;; deoptimize at " << HSourcePosition(reason.raw_position) << " "
+     << reason.mnemonic;
+  if (reason.detail != NULL) os << ": " << reason.detail;
+  Comment("%s", os.c_str());
+}
+
+
 int LCodeGenBase::GetNextEmittedBlock() const {
   for (int i = current_block_ + 1; i < graph()->blocks()->length(); ++i) {
     if (!graph()->blocks()->at(i)->IsReachable()) continue;
@@ -165,7 +177,7 @@
 
 
 void LCodeGenBase::RegisterWeakObjectsInOptimizedCode(Handle<Code> code) {
-  ASSERT(code->is_optimized_code());
+  DCHECK(code->is_optimized_code());
   ZoneList<Handle<Map> > maps(1, zone());
   ZoneList<Handle<JSObject> > objects(1, zone());
   ZoneList<Handle<Cell> > cells(1, zone());
@@ -214,19 +226,25 @@
 
 
 void LCodeGenBase::Abort(BailoutReason reason) {
-  info()->set_bailout_reason(reason);
+  info()->AbortOptimization(reason);
+  status_ = ABORTED;
+}
+
+
+void LCodeGenBase::Retry(BailoutReason reason) {
+  info()->RetryOptimization(reason);
   status_ = ABORTED;
 }
 
 
 void LCodeGenBase::AddDeprecationDependency(Handle<Map> map) {
-  if (map->is_deprecated()) return Abort(kMapBecameDeprecated);
+  if (map->is_deprecated()) return Retry(kMapBecameDeprecated);
   chunk_->AddDeprecationDependency(map);
 }
 
 
 void LCodeGenBase::AddStabilityDependency(Handle<Map> map) {
-  if (!map->is_stable()) return Abort(kMapBecameUnstable);
+  if (!map->is_stable()) return Retry(kMapBecameUnstable);
   chunk_->AddStabilityDependency(map);
 }
 
diff --git a/src/lithium-codegen.h b/src/lithium-codegen.h
index 1eb963e..40d4d8e 100644
--- a/src/lithium-codegen.h
+++ b/src/lithium-codegen.h
@@ -7,7 +7,9 @@
 
 #include "src/v8.h"
 
+#include "src/bailout-reason.h"
 #include "src/compiler.h"
+#include "src/deoptimizer.h"
 
 namespace v8 {
 namespace internal {
@@ -33,6 +35,7 @@
   HGraph* graph() const;
 
   void FPRINTF_CHECKING Comment(const char* format, ...);
+  void DeoptComment(const Deoptimizer::Reason& reason);
 
   bool GenerateBody();
   virtual void GenerateBodyInstructionPre(LInstruction* instr) {}
@@ -74,6 +77,7 @@
   bool is_aborted() const { return status_ == ABORTED; }
 
   void Abort(BailoutReason reason);
+  void Retry(BailoutReason reason);
 
   // Methods for code dependencies.
   void AddDeprecationDependency(Handle<Map> map);
diff --git a/src/lithium-inl.h b/src/lithium-inl.h
new file mode 100644
index 0000000..36e166e
--- /dev/null
+++ b/src/lithium-inl.h
@@ -0,0 +1,112 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_LITHIUM_INL_H_
+#define V8_LITHIUM_INL_H_
+
+#include "src/lithium.h"
+
+#if V8_TARGET_ARCH_IA32
+#include "src/ia32/lithium-ia32.h"  // NOLINT
+#elif V8_TARGET_ARCH_X64
+#include "src/x64/lithium-x64.h"  // NOLINT
+#elif V8_TARGET_ARCH_ARM64
+#include "src/arm64/lithium-arm64.h"  // NOLINT
+#elif V8_TARGET_ARCH_ARM
+#include "src/arm/lithium-arm.h"  // NOLINT
+#elif V8_TARGET_ARCH_MIPS
+#include "src/mips/lithium-mips.h"  // NOLINT
+#elif V8_TARGET_ARCH_MIPS64
+#include "src/mips64/lithium-mips64.h"  // NOLINT
+#elif V8_TARGET_ARCH_X87
+#include "src/x87/lithium-x87.h"  // NOLINT
+#else
+#error "Unknown architecture."
+#endif
+
+namespace v8 {
+namespace internal {
+
+TempIterator::TempIterator(LInstruction* instr)
+    : instr_(instr), limit_(instr->TempCount()), current_(0) {
+  SkipUninteresting();
+}
+
+
+bool TempIterator::Done() { return current_ >= limit_; }
+
+
+LOperand* TempIterator::Current() {
+  DCHECK(!Done());
+  return instr_->TempAt(current_);
+}
+
+
+void TempIterator::SkipUninteresting() {
+  while (current_ < limit_ && instr_->TempAt(current_) == NULL) ++current_;
+}
+
+
+void TempIterator::Advance() {
+  ++current_;
+  SkipUninteresting();
+}
+
+
+InputIterator::InputIterator(LInstruction* instr)
+    : instr_(instr), limit_(instr->InputCount()), current_(0) {
+  SkipUninteresting();
+}
+
+
+bool InputIterator::Done() { return current_ >= limit_; }
+
+
+LOperand* InputIterator::Current() {
+  DCHECK(!Done());
+  DCHECK(instr_->InputAt(current_) != NULL);
+  return instr_->InputAt(current_);
+}
+
+
+void InputIterator::Advance() {
+  ++current_;
+  SkipUninteresting();
+}
+
+
+void InputIterator::SkipUninteresting() {
+  while (current_ < limit_) {
+    LOperand* current = instr_->InputAt(current_);
+    if (current != NULL && !current->IsConstantOperand()) break;
+    ++current_;
+  }
+}
+
+
+UseIterator::UseIterator(LInstruction* instr)
+    : input_iterator_(instr), env_iterator_(instr->environment()) {}
+
+
+bool UseIterator::Done() {
+  return input_iterator_.Done() && env_iterator_.Done();
+}
+
+
+LOperand* UseIterator::Current() {
+  DCHECK(!Done());
+  LOperand* result = input_iterator_.Done() ? env_iterator_.Current()
+                                            : input_iterator_.Current();
+  DCHECK(result != NULL);
+  return result;
+}
+
+
+void UseIterator::Advance() {
+  input_iterator_.Done() ? env_iterator_.Advance() : input_iterator_.Advance();
+}
+}
+}  // namespace v8::internal
+
+#endif  // V8_LITHIUM_INL_H_
diff --git a/src/lithium.cc b/src/lithium.cc
index b292b4f..7d992a1 100644
--- a/src/lithium.cc
+++ b/src/lithium.cc
@@ -2,29 +2,34 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/v8.h"
 #include "src/lithium.h"
+
+#include "src/v8.h"
+
 #include "src/scopes.h"
 #include "src/serialize.h"
 
 #if V8_TARGET_ARCH_IA32
-#include "src/ia32/lithium-ia32.h"
-#include "src/ia32/lithium-codegen-ia32.h"
+#include "src/ia32/lithium-ia32.h"  // NOLINT
+#include "src/ia32/lithium-codegen-ia32.h"  // NOLINT
 #elif V8_TARGET_ARCH_X64
-#include "src/x64/lithium-x64.h"
-#include "src/x64/lithium-codegen-x64.h"
+#include "src/x64/lithium-x64.h"  // NOLINT
+#include "src/x64/lithium-codegen-x64.h"  // NOLINT
 #elif V8_TARGET_ARCH_ARM
-#include "src/arm/lithium-arm.h"
-#include "src/arm/lithium-codegen-arm.h"
+#include "src/arm/lithium-arm.h"  // NOLINT
+#include "src/arm/lithium-codegen-arm.h"  // NOLINT
 #elif V8_TARGET_ARCH_MIPS
-#include "src/mips/lithium-mips.h"
-#include "src/mips/lithium-codegen-mips.h"
+#include "src/mips/lithium-mips.h"  // NOLINT
+#include "src/mips/lithium-codegen-mips.h"  // NOLINT
 #elif V8_TARGET_ARCH_ARM64
-#include "src/arm64/lithium-arm64.h"
-#include "src/arm64/lithium-codegen-arm64.h"
+#include "src/arm64/lithium-arm64.h"  // NOLINT
+#include "src/arm64/lithium-codegen-arm64.h"  // NOLINT
+#elif V8_TARGET_ARCH_MIPS64
+#include "src/mips64/lithium-mips64.h"  // NOLINT
+#include "src/mips64/lithium-codegen-mips64.h"  // NOLINT
 #elif V8_TARGET_ARCH_X87
-#include "src/x87/lithium-x87.h"
-#include "src/x87/lithium-codegen-x87.h"
+#include "src/x87/lithium-x87.h"  // NOLINT
+#include "src/x87/lithium-codegen-x87.h"  // NOLINT
 #else
 #error "Unknown architecture."
 #endif
@@ -51,16 +56,26 @@
           break;
         case LUnallocated::FIXED_REGISTER: {
           int reg_index = unalloc->fixed_register_index();
-          const char* register_name =
-              Register::AllocationIndexToString(reg_index);
-          stream->Add("(=%s)", register_name);
+          if (reg_index < 0 ||
+              reg_index >= Register::kMaxNumAllocatableRegisters) {
+            stream->Add("(=invalid_reg#%d)", reg_index);
+          } else {
+            const char* register_name =
+                Register::AllocationIndexToString(reg_index);
+            stream->Add("(=%s)", register_name);
+          }
           break;
         }
         case LUnallocated::FIXED_DOUBLE_REGISTER: {
           int reg_index = unalloc->fixed_register_index();
-          const char* double_register_name =
-              DoubleRegister::AllocationIndexToString(reg_index);
-          stream->Add("(=%s)", double_register_name);
+          if (reg_index < 0 ||
+              reg_index >= DoubleRegister::kMaxNumAllocatableRegisters) {
+            stream->Add("(=invalid_double_reg#%d)", reg_index);
+          } else {
+            const char* double_register_name =
+                DoubleRegister::AllocationIndexToString(reg_index);
+            stream->Add("(=%s)", double_register_name);
+          }
           break;
         }
         case LUnallocated::MUST_HAVE_REGISTER:
@@ -89,12 +104,26 @@
     case DOUBLE_STACK_SLOT:
       stream->Add("[double_stack:%d]", index());
       break;
-    case REGISTER:
-      stream->Add("[%s|R]", Register::AllocationIndexToString(index()));
+    case REGISTER: {
+      int reg_index = index();
+      if (reg_index < 0 || reg_index >= Register::kMaxNumAllocatableRegisters) {
+        stream->Add("(=invalid_reg#%d|R)", reg_index);
+      } else {
+        stream->Add("[%s|R]", Register::AllocationIndexToString(reg_index));
+      }
       break;
-    case DOUBLE_REGISTER:
-      stream->Add("[%s|R]", DoubleRegister::AllocationIndexToString(index()));
+    }
+    case DOUBLE_REGISTER: {
+      int reg_index = index();
+      if (reg_index < 0 ||
+          reg_index >= DoubleRegister::kMaxNumAllocatableRegisters) {
+        stream->Add("(=invalid_double_reg#%d|R)", reg_index);
+      } else {
+        stream->Add("[%s|R]",
+                    DoubleRegister::AllocationIndexToString(reg_index));
+      }
       break;
+    }
   }
 }
 
@@ -117,6 +146,7 @@
 template<LOperand::Kind kOperandKind, int kNumCachedOperands>
 void LSubKindOperand<kOperandKind, kNumCachedOperands>::TearDownCache() {
   delete[] cache;
+  cache = NULL;
 }
 
 
@@ -185,7 +215,7 @@
 void LPointerMap::RecordPointer(LOperand* op, Zone* zone) {
   // Do not record arguments as pointers.
   if (op->IsStackSlot() && op->index() < 0) return;
-  ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
+  DCHECK(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
   pointer_operands_.Add(op, zone);
 }
 
@@ -193,7 +223,7 @@
 void LPointerMap::RemovePointer(LOperand* op) {
   // Do not record arguments as pointers.
   if (op->IsStackSlot() && op->index() < 0) return;
-  ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
+  DCHECK(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
   for (int i = 0; i < pointer_operands_.length(); ++i) {
     if (pointer_operands_[i]->Equals(op)) {
       pointer_operands_.Remove(i);
@@ -206,7 +236,7 @@
 void LPointerMap::RecordUntagged(LOperand* op, Zone* zone) {
   // Do not record arguments as pointers.
   if (op->IsStackSlot() && op->index() < 0) return;
-  ASSERT(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
+  DCHECK(!op->IsDoubleRegister() && !op->IsDoubleStackSlot());
   untagged_operands_.Add(op, zone);
 }
 
@@ -238,12 +268,11 @@
     : spill_slot_count_(0),
       info_(info),
       graph_(graph),
-      instructions_(32, graph->zone()),
-      pointer_maps_(8, graph->zone()),
-      inlined_closures_(1, graph->zone()),
-      deprecation_dependencies_(MapLess(), MapAllocator(graph->zone())),
-      stability_dependencies_(MapLess(), MapAllocator(graph->zone())) {
-}
+      instructions_(32, info->zone()),
+      pointer_maps_(8, info->zone()),
+      inlined_closures_(1, info->zone()),
+      deprecation_dependencies_(MapLess(), MapAllocator(info->zone())),
+      stability_dependencies_(MapLess(), MapAllocator(info->zone())) {}
 
 
 LLabel* LChunk::GetLabel(int block_id) const {
@@ -263,7 +292,7 @@
 
 Label* LChunk::GetAssemblyLabel(int block_id) const {
   LLabel* label = GetLabel(block_id);
-  ASSERT(!label->HasReplacement());
+  DCHECK(!label->HasReplacement());
   return label->label();
 }
 
@@ -304,7 +333,7 @@
 
 
 void LChunk::AddInstruction(LInstruction* instr, HBasicBlock* block) {
-  LInstructionGap* gap = new(graph_->zone()) LInstructionGap(block);
+  LInstructionGap* gap = new (zone()) LInstructionGap(block);
   gap->set_hydrogen_value(instr->hydrogen_value());
   int index = -1;
   if (instr->IsControl()) {
@@ -335,14 +364,14 @@
   // spill slots.
   int result = index - info()->num_parameters() - 1;
 
-  ASSERT(result < 0);
+  DCHECK(result < 0);
   return result;
 }
 
 
 // A parameter relative to ebp in the arguments stub.
 int LChunk::ParameterAt(int index) {
-  ASSERT(-1 <= index);  // -1 is the receiver.
+  DCHECK(-1 <= index);  // -1 is the receiver.
   return (1 + info()->scope()->num_parameters() - index) *
       kPointerSize;
 }
@@ -385,16 +414,16 @@
   for (MapSet::const_iterator it = deprecation_dependencies_.begin(),
        iend = deprecation_dependencies_.end(); it != iend; ++it) {
     Handle<Map> map = *it;
-    ASSERT(!map->is_deprecated());
-    ASSERT(map->CanBeDeprecated());
+    DCHECK(!map->is_deprecated());
+    DCHECK(map->CanBeDeprecated());
     Map::AddDependentCode(map, DependentCode::kTransitionGroup, code);
   }
 
   for (MapSet::const_iterator it = stability_dependencies_.begin(),
        iend = stability_dependencies_.end(); it != iend; ++it) {
     Handle<Map> map = *it;
-    ASSERT(map->is_stable());
-    ASSERT(map->CanTransition());
+    DCHECK(map->is_stable());
+    DCHECK(map->CanTransition());
     Map::AddDependentCode(map, DependentCode::kPrototypeCheckGroup, code);
   }
 
@@ -409,7 +438,7 @@
   int values = graph->GetMaximumValueID();
   CompilationInfo* info = graph->info();
   if (values > LUnallocated::kMaxVirtualRegisters) {
-    info->set_bailout_reason(kNotEnoughVirtualRegistersForValues);
+    info->AbortOptimization(kNotEnoughVirtualRegistersForValues);
     return NULL;
   }
   LAllocator allocator(values, graph);
@@ -418,7 +447,7 @@
   if (chunk == NULL) return NULL;
 
   if (!allocator.Allocate(chunk)) {
-    info->set_bailout_reason(kNotEnoughVirtualRegistersRegalloc);
+    info->AbortOptimization(kNotEnoughVirtualRegistersRegalloc);
     return NULL;
   }
 
@@ -434,6 +463,8 @@
   LOG_CODE_EVENT(info()->isolate(),
                  CodeStartLinePosInfoRecordEvent(
                      assembler.positions_recorder()));
+  // TODO(yangguo) remove this once the code serializer handles code stubs.
+  if (info()->will_serialize()) assembler.enable_serializer();
   LCodeGen generator(this, &assembler, info());
 
   MarkEmptyBlocks();
@@ -453,7 +484,7 @@
                    CodeEndLinePosInfoRecordEvent(*code, jit_handler_data));
 
     CodeGenerator::PrintCode(code, info());
-    ASSERT(!(info()->isolate()->serializer_enabled() &&
+    DCHECK(!(info()->isolate()->serializer_enabled() &&
              info()->GetMustNotHaveEagerFrame() &&
              generator.NeedsEagerFrame()));
     return code;
@@ -480,19 +511,35 @@
 }
 
 
+void LChunkBuilderBase::Abort(BailoutReason reason) {
+  info()->AbortOptimization(reason);
+  status_ = ABORTED;
+}
+
+
+void LChunkBuilderBase::Retry(BailoutReason reason) {
+  info()->RetryOptimization(reason);
+  status_ = ABORTED;
+}
+
+
 LEnvironment* LChunkBuilderBase::CreateEnvironment(
-    HEnvironment* hydrogen_env,
-    int* argument_index_accumulator,
+    HEnvironment* hydrogen_env, int* argument_index_accumulator,
     ZoneList<HValue*>* objects_to_materialize) {
   if (hydrogen_env == NULL) return NULL;
 
-  LEnvironment* outer = CreateEnvironment(hydrogen_env->outer(),
-                                          argument_index_accumulator,
-                                          objects_to_materialize);
+  LEnvironment* outer =
+      CreateEnvironment(hydrogen_env->outer(), argument_index_accumulator,
+                        objects_to_materialize);
   BailoutId ast_id = hydrogen_env->ast_id();
-  ASSERT(!ast_id.IsNone() ||
+  DCHECK(!ast_id.IsNone() ||
          hydrogen_env->frame_type() != JS_FUNCTION);
-  int value_count = hydrogen_env->length() - hydrogen_env->specials_count();
+
+  int omitted_count = (hydrogen_env->frame_type() == JS_FUNCTION)
+                          ? 0
+                          : hydrogen_env->specials_count();
+
+  int value_count = hydrogen_env->length() - omitted_count;
   LEnvironment* result =
       new(zone()) LEnvironment(hydrogen_env->closure(),
                                hydrogen_env->frame_type(),
@@ -508,8 +555,10 @@
   // Store the environment description into the environment
   // (with holes for nested objects)
   for (int i = 0; i < hydrogen_env->length(); ++i) {
-    if (hydrogen_env->is_special_index(i)) continue;
-
+    if (hydrogen_env->is_special_index(i) &&
+        hydrogen_env->frame_type() != JS_FUNCTION) {
+      continue;
+    }
     LOperand* op;
     HValue* value = hydrogen_env->values()->at(i);
     CHECK(!value->IsPushArguments());  // Do not deopt outgoing arguments
@@ -593,7 +642,7 @@
       // Insert a hole for nested objects
       op = LEnvironment::materialization_marker();
     } else {
-      ASSERT(!arg_value->IsPushArguments());
+      DCHECK(!arg_value->IsPushArguments());
       // For ordinary values, tell the register allocator we need the value
       // to be alive here
       op = UseAny(arg_value);
diff --git a/src/lithium.h b/src/lithium.h
index 8aeebe6..83f760d 100644
--- a/src/lithium.h
+++ b/src/lithium.h
@@ -8,6 +8,7 @@
 #include <set>
 
 #include "src/allocation.h"
+#include "src/bailout-reason.h"
 #include "src/hydrogen.h"
 #include "src/safepoint-table.h"
 #include "src/zone-allocator.h"
@@ -22,7 +23,6 @@
   V(Register,        REGISTER,          16)   \
   V(DoubleRegister,  DOUBLE_REGISTER,   16)
 
-
 class LOperand : public ZoneObject {
  public:
   enum Kind {
@@ -49,9 +49,10 @@
 
   void PrintTo(StringStream* stream);
   void ConvertTo(Kind kind, int index) {
+    if (kind == REGISTER) DCHECK(index >= 0);
     value_ = KindField::encode(kind);
     value_ |= index << kKindFieldWidth;
-    ASSERT(this->index() == index);
+    DCHECK(this->index() == index);
   }
 
   // Calls SetUpCache()/TearDownCache() for each subclass.
@@ -107,14 +108,14 @@
   }
 
   LUnallocated(BasicPolicy policy, int index) : LOperand(UNALLOCATED, 0) {
-    ASSERT(policy == FIXED_SLOT);
+    DCHECK(policy == FIXED_SLOT);
     value_ |= BasicPolicyField::encode(policy);
     value_ |= index << FixedSlotIndexField::kShift;
-    ASSERT(this->fixed_slot_index() == index);
+    DCHECK(this->fixed_slot_index() == index);
   }
 
   LUnallocated(ExtendedPolicy policy, int index) : LOperand(UNALLOCATED, 0) {
-    ASSERT(policy == FIXED_REGISTER || policy == FIXED_DOUBLE_REGISTER);
+    DCHECK(policy == FIXED_REGISTER || policy == FIXED_DOUBLE_REGISTER);
     value_ |= BasicPolicyField::encode(EXTENDED_POLICY);
     value_ |= ExtendedPolicyField::encode(policy);
     value_ |= LifetimeField::encode(USED_AT_END);
@@ -135,7 +136,7 @@
   }
 
   static LUnallocated* cast(LOperand* op) {
-    ASSERT(op->IsUnallocated());
+    DCHECK(op->IsUnallocated());
     return reinterpret_cast<LUnallocated*>(op);
   }
 
@@ -222,19 +223,19 @@
 
   // [extended_policy]: Only for non-FIXED_SLOT. The finer-grained policy.
   ExtendedPolicy extended_policy() const {
-    ASSERT(basic_policy() == EXTENDED_POLICY);
+    DCHECK(basic_policy() == EXTENDED_POLICY);
     return ExtendedPolicyField::decode(value_);
   }
 
   // [fixed_slot_index]: Only for FIXED_SLOT.
   int fixed_slot_index() const {
-    ASSERT(HasFixedSlotPolicy());
+    DCHECK(HasFixedSlotPolicy());
     return static_cast<int>(value_) >> FixedSlotIndexField::kShift;
   }
 
   // [fixed_register_index]: Only for FIXED_REGISTER or FIXED_DOUBLE_REGISTER.
   int fixed_register_index() const {
-    ASSERT(HasFixedRegisterPolicy() || HasFixedDoubleRegisterPolicy());
+    DCHECK(HasFixedRegisterPolicy() || HasFixedDoubleRegisterPolicy());
     return FixedRegisterField::decode(value_);
   }
 
@@ -248,13 +249,13 @@
 
   // [lifetime]: Only for non-FIXED_SLOT.
   bool IsUsedAtStart() {
-    ASSERT(basic_policy() == EXTENDED_POLICY);
+    DCHECK(basic_policy() == EXTENDED_POLICY);
     return LifetimeField::decode(value_) == USED_AT_START;
   }
 };
 
 
-class LMoveOperands V8_FINAL BASE_EMBEDDED {
+class LMoveOperands FINAL BASE_EMBEDDED {
  public:
   LMoveOperands(LOperand* source, LOperand* destination)
       : source_(source), destination_(destination) {
@@ -278,9 +279,10 @@
   }
 
   // A move is redundant if it's been eliminated, if its source and
-  // destination are the same, or if its destination is unneeded.
+  // destination are the same, or if its destination is unneeded or constant.
   bool IsRedundant() const {
-    return IsEliminated() || source_->Equals(destination_) || IsIgnored();
+    return IsEliminated() || source_->Equals(destination_) || IsIgnored() ||
+           (destination_ != NULL && destination_->IsConstantOperand());
   }
 
   bool IsIgnored() const {
@@ -290,7 +292,7 @@
   // We clear both operands to indicate move that's been eliminated.
   void Eliminate() { source_ = destination_ = NULL; }
   bool IsEliminated() const {
-    ASSERT(source_ != NULL || destination_ == NULL);
+    DCHECK(source_ != NULL || destination_ == NULL);
     return source_ == NULL;
   }
 
@@ -301,16 +303,16 @@
 
 
 template<LOperand::Kind kOperandKind, int kNumCachedOperands>
-class LSubKindOperand V8_FINAL : public LOperand {
+class LSubKindOperand FINAL : public LOperand {
  public:
   static LSubKindOperand* Create(int index, Zone* zone) {
-    ASSERT(index >= 0);
+    DCHECK(index >= 0);
     if (index < kNumCachedOperands) return &cache[index];
     return new(zone) LSubKindOperand(index);
   }
 
   static LSubKindOperand* cast(LOperand* op) {
-    ASSERT(op->kind() == kOperandKind);
+    DCHECK(op->kind() == kOperandKind);
     return reinterpret_cast<LSubKindOperand*>(op);
   }
 
@@ -331,7 +333,7 @@
 #undef LITHIUM_TYPEDEF_SUBKIND_OPERAND_CLASS
 
 
-class LParallelMove V8_FINAL : public ZoneObject {
+class LParallelMove FINAL : public ZoneObject {
  public:
   explicit LParallelMove(Zone* zone) : move_operands_(4, zone) { }
 
@@ -341,9 +343,7 @@
 
   bool IsRedundant() const;
 
-  const ZoneList<LMoveOperands>* move_operands() const {
-    return &move_operands_;
-  }
+  ZoneList<LMoveOperands>* move_operands() { return &move_operands_; }
 
   void PrintDataTo(StringStream* stream) const;
 
@@ -352,7 +352,7 @@
 };
 
 
-class LPointerMap V8_FINAL : public ZoneObject {
+class LPointerMap FINAL : public ZoneObject {
  public:
   explicit LPointerMap(Zone* zone)
       : pointer_operands_(8, zone),
@@ -369,7 +369,7 @@
   int lithium_position() const { return lithium_position_; }
 
   void set_lithium_position(int pos) {
-    ASSERT(lithium_position_ == -1);
+    DCHECK(lithium_position_ == -1);
     lithium_position_ = pos;
   }
 
@@ -385,7 +385,7 @@
 };
 
 
-class LEnvironment V8_FINAL : public ZoneObject {
+class LEnvironment FINAL : public ZoneObject {
  public:
   LEnvironment(Handle<JSFunction> closure,
                FrameType frame_type,
@@ -436,7 +436,7 @@
                 bool is_uint32) {
     values_.Add(operand, zone());
     if (representation.IsSmiOrTagged()) {
-      ASSERT(!is_uint32);
+      DCHECK(!is_uint32);
       is_tagged_.Add(values_.length() - 1, zone());
     }
 
@@ -467,17 +467,17 @@
   }
 
   int ObjectDuplicateOfAt(int index) {
-    ASSERT(ObjectIsDuplicateAt(index));
+    DCHECK(ObjectIsDuplicateAt(index));
     return LengthOrDupeField::decode(object_mapping_[index]);
   }
 
   int ObjectLengthAt(int index) {
-    ASSERT(!ObjectIsDuplicateAt(index));
+    DCHECK(!ObjectIsDuplicateAt(index));
     return LengthOrDupeField::decode(object_mapping_[index]);
   }
 
   bool ObjectIsArgumentsAt(int index) {
-    ASSERT(!ObjectIsDuplicateAt(index));
+    DCHECK(!ObjectIsDuplicateAt(index));
     return IsArgumentsField::decode(object_mapping_[index]);
   }
 
@@ -488,7 +488,7 @@
   void Register(int deoptimization_index,
                 int translation_index,
                 int pc_offset) {
-    ASSERT(!HasBeenRegistered());
+    DCHECK(!HasBeenRegistered());
     deoptimization_index_ = deoptimization_index;
     translation_index_ = translation_index;
     pc_offset_ = pc_offset;
@@ -535,7 +535,7 @@
 
 
 // Iterates over the non-null, non-constant operands in an environment.
-class ShallowIterator V8_FINAL BASE_EMBEDDED {
+class ShallowIterator FINAL BASE_EMBEDDED {
  public:
   explicit ShallowIterator(LEnvironment* env)
       : env_(env),
@@ -547,13 +547,13 @@
   bool Done() { return current_ >= limit_; }
 
   LOperand* Current() {
-    ASSERT(!Done());
-    ASSERT(env_->values()->at(current_) != NULL);
+    DCHECK(!Done());
+    DCHECK(env_->values()->at(current_) != NULL);
     return env_->values()->at(current_);
   }
 
   void Advance() {
-    ASSERT(!Done());
+    DCHECK(!Done());
     ++current_;
     SkipUninteresting();
   }
@@ -579,7 +579,7 @@
 
 
 // Iterator for non-null, non-constant operands incl. outer environments.
-class DeepIterator V8_FINAL BASE_EMBEDDED {
+class DeepIterator FINAL BASE_EMBEDDED {
  public:
   explicit DeepIterator(LEnvironment* env)
       : current_iterator_(env) {
@@ -589,8 +589,8 @@
   bool Done() { return current_iterator_.Done(); }
 
   LOperand* Current() {
-    ASSERT(!current_iterator_.Done());
-    ASSERT(current_iterator_.Current() != NULL);
+    DCHECK(!current_iterator_.Done());
+    DCHECK(current_iterator_.Current() != NULL);
     return current_iterator_.Current();
   }
 
@@ -651,16 +651,16 @@
   }
 
   void AddDeprecationDependency(Handle<Map> map) {
-    ASSERT(!map->is_deprecated());
+    DCHECK(!map->is_deprecated());
     if (!map->CanBeDeprecated()) return;
-    ASSERT(!info_->IsStub());
+    DCHECK(!info_->IsStub());
     deprecation_dependencies_.insert(map);
   }
 
   void AddStabilityDependency(Handle<Map> map) {
-    ASSERT(map->is_stable());
+    DCHECK(map->is_stable());
     if (!map->CanTransition()) return;
-    ASSERT(!info_->IsStub());
+    DCHECK(!info_->IsStub());
     stability_dependencies_.insert(map);
   }
 
@@ -698,13 +698,34 @@
 
 class LChunkBuilderBase BASE_EMBEDDED {
  public:
-  explicit LChunkBuilderBase(Zone* zone)
+  explicit LChunkBuilderBase(CompilationInfo* info, HGraph* graph)
       : argument_count_(0),
-        zone_(zone) { }
+        chunk_(NULL),
+        info_(info),
+        graph_(graph),
+        status_(UNUSED),
+        zone_(graph->zone()) {}
 
   virtual ~LChunkBuilderBase() { }
 
+  void Abort(BailoutReason reason);
+  void Retry(BailoutReason reason);
+
  protected:
+  enum Status { UNUSED, BUILDING, DONE, ABORTED };
+
+  LPlatformChunk* chunk() const { return chunk_; }
+  CompilationInfo* info() const { return info_; }
+  HGraph* graph() const { return graph_; }
+  int argument_count() const { return argument_count_; }
+  Isolate* isolate() const { return graph_->isolate(); }
+  Heap* heap() const { return isolate()->heap(); }
+
+  bool is_unused() const { return status_ == UNUSED; }
+  bool is_building() const { return status_ == BUILDING; }
+  bool is_done() const { return status_ == DONE; }
+  bool is_aborted() const { return status_ == ABORTED; }
+
   // An input operand in register, stack slot or a constant operand.
   // Will not be moved to a register even if one is freely available.
   virtual MUST_USE_RESULT LOperand* UseAny(HValue* value) = 0;
@@ -719,6 +740,10 @@
   Zone* zone() const { return zone_; }
 
   int argument_count_;
+  LPlatformChunk* chunk_;
+  CompilationInfo* info_;
+  HGraph* const graph_;
+  Status status_;
 
  private:
   Zone* zone_;
@@ -747,6 +772,61 @@
 };
 
 
+// A register-allocator view of a Lithium instruction. It contains the id of
+// the output operand and a list of input operand uses.
+
+enum RegisterKind {
+  UNALLOCATED_REGISTERS,
+  GENERAL_REGISTERS,
+  DOUBLE_REGISTERS
+};
+
+// Iterator for non-null temp operands.
+class TempIterator BASE_EMBEDDED {
+ public:
+  inline explicit TempIterator(LInstruction* instr);
+  inline bool Done();
+  inline LOperand* Current();
+  inline void Advance();
+
+ private:
+  inline void SkipUninteresting();
+  LInstruction* instr_;
+  int limit_;
+  int current_;
+};
+
+
+// Iterator for non-constant input operands.
+class InputIterator BASE_EMBEDDED {
+ public:
+  inline explicit InputIterator(LInstruction* instr);
+  inline bool Done();
+  inline LOperand* Current();
+  inline void Advance();
+
+ private:
+  inline void SkipUninteresting();
+  LInstruction* instr_;
+  int limit_;
+  int current_;
+};
+
+
+class UseIterator BASE_EMBEDDED {
+ public:
+  inline explicit UseIterator(LInstruction* instr);
+  inline bool Done();
+  inline LOperand* Current();
+  inline void Advance();
+
+ private:
+  InputIterator input_iterator_;
+  DeepIterator env_iterator_;
+};
+
+class LInstruction;
+class LCodeGen;
 } }  // namespace v8::internal
 
 #endif  // V8_LITHIUM_H_
diff --git a/src/liveedit.cc b/src/liveedit.cc
index 05bd550..a87c31b 100644
--- a/src/liveedit.cc
+++ b/src/liveedit.cc
@@ -161,7 +161,7 @@
 
   // Each cell keeps a value plus direction. Value is multiplied by 4.
   void set_value4_and_dir(int i1, int i2, int value4, Direction dir) {
-    ASSERT((value4 & kDirectionMask) == 0);
+    DCHECK((value4 & kDirectionMask) == 0);
     get_cell(i1, i2) = value4 | dir;
   }
 
@@ -174,7 +174,7 @@
 
   static const int kDirectionSizeBits = 2;
   static const int kDirectionMask = (1 << kDirectionSizeBits) - 1;
-  static const int kEmptyCellValue = -1 << kDirectionSizeBits;
+  static const int kEmptyCellValue = ~0u << kDirectionSizeBits;
 
   // This method only holds static assert statement (unfortunately you cannot
   // place one in class scope).
@@ -649,15 +649,15 @@
 }
 
 
-Handle<FixedArray> FunctionInfoWrapper::GetFeedbackVector() {
+Handle<TypeFeedbackVector> FunctionInfoWrapper::GetFeedbackVector() {
   Handle<Object> element = this->GetField(kSharedFunctionInfoOffset_);
-  Handle<FixedArray> result;
+  Handle<TypeFeedbackVector> result;
   if (element->IsJSValue()) {
     Handle<JSValue> value_wrapper = Handle<JSValue>::cast(element);
     Handle<Object> raw_result = UnwrapJSValue(value_wrapper);
     Handle<SharedFunctionInfo> shared =
         Handle<SharedFunctionInfo>::cast(raw_result);
-    result = Handle<FixedArray>(shared->feedback_vector(), isolate());
+    result = Handle<TypeFeedbackVector>(shared->feedback_vector(), isolate());
     CHECK_EQ(result->length(), GetSlotCount());
   } else {
     // Scripts may never have a SharedFunctionInfo created, so
@@ -875,22 +875,22 @@
 
       Factory* factory = isolate->factory();
       Handle<String> start_pos_key = factory->InternalizeOneByteString(
-          STATIC_ASCII_VECTOR("startPosition"));
-      Handle<String> end_pos_key = factory->InternalizeOneByteString(
-          STATIC_ASCII_VECTOR("endPosition"));
-      Handle<String> script_obj_key = factory->InternalizeOneByteString(
-          STATIC_ASCII_VECTOR("scriptObject"));
+          STATIC_CHAR_VECTOR("startPosition"));
+      Handle<String> end_pos_key =
+          factory->InternalizeOneByteString(STATIC_CHAR_VECTOR("endPosition"));
+      Handle<String> script_obj_key =
+          factory->InternalizeOneByteString(STATIC_CHAR_VECTOR("scriptObject"));
       Handle<Smi> start_pos(
           Smi::FromInt(message_location.start_pos()), isolate);
       Handle<Smi> end_pos(Smi::FromInt(message_location.end_pos()), isolate);
       Handle<JSObject> script_obj =
           Script::GetWrapper(message_location.script());
-      JSReceiver::SetProperty(
-          rethrow_exception, start_pos_key, start_pos, NONE, SLOPPY).Assert();
-      JSReceiver::SetProperty(
-          rethrow_exception, end_pos_key, end_pos, NONE, SLOPPY).Assert();
-      JSReceiver::SetProperty(
-          rethrow_exception, script_obj_key, script_obj, NONE, SLOPPY).Assert();
+      Object::SetProperty(rethrow_exception, start_pos_key, start_pos, SLOPPY)
+          .Assert();
+      Object::SetProperty(rethrow_exception, end_pos_key, end_pos, SLOPPY)
+          .Assert();
+      Object::SetProperty(rethrow_exception, script_obj_key, script_obj, SLOPPY)
+          .Assert();
     }
   }
 
@@ -976,7 +976,7 @@
   Heap* heap = original->GetHeap();
   HeapIterator iterator(heap);
 
-  ASSERT(!heap->InNewSpace(*substitution));
+  DCHECK(!heap->InNewSpace(*substitution));
 
   ReplacingVisitor visitor(*original, *substitution);
 
@@ -1160,7 +1160,7 @@
   virtual void LeaveContext(Context* context)  { }  // Don't care.
   virtual void VisitFunction(JSFunction* function) {
     // It should be guaranteed by the iterator that everything is optimized.
-    ASSERT(function->code()->kind() == Code::OPTIMIZED_FUNCTION);
+    DCHECK(function->code()->kind() == Code::OPTIMIZED_FUNCTION);
     if (shared_info_ == function->shared() ||
         IsInlined(function, shared_info_)) {
       // Mark the code for deoptimization.
@@ -1203,7 +1203,7 @@
     }
     shared_info->DisableOptimization(kLiveEdit);
     // Update the type feedback vector
-    Handle<FixedArray> feedback_vector =
+    Handle<TypeFeedbackVector> feedback_vector =
         compile_info_wrapper.GetFeedbackVector();
     shared_info->set_feedback_vector(*feedback_vector);
   }
@@ -1279,7 +1279,7 @@
     CHECK(element->IsSmi());
     int chunk_end = Handle<Smi>::cast(element)->value();
     // Position mustn't be inside a chunk.
-    ASSERT(original_position >= chunk_end);
+    DCHECK(original_position >= chunk_end);
     element = Object::GetElement(
         isolate, position_change_array, i + 2).ToHandleChecked();
     CHECK(element->IsSmi());
@@ -1477,7 +1477,7 @@
     Handle<Script> old_script = CreateScriptCopy(original_script);
     old_script->set_name(String::cast(*old_script_name));
     old_script_object = old_script;
-    isolate->debug()->OnAfterCompile(old_script, Debug::SEND_WHEN_DEBUGGING);
+    isolate->debug()->OnAfterCompile(old_script);
   } else {
     old_script_object = isolate->factory()->null_value();
   }
@@ -1579,7 +1579,7 @@
 //   -- frame base
 static Object** SetUpFrameDropperFrame(StackFrame* bottom_js_frame,
                                        Handle<Code> code) {
-  ASSERT(bottom_js_frame->is_java_script());
+  DCHECK(bottom_js_frame->is_java_script());
 
   Address fp = bottom_js_frame->fp();
 
@@ -1613,7 +1613,7 @@
   StackFrame* top_frame = frames[top_frame_index];
   StackFrame* bottom_js_frame = frames[bottom_js_frame_index];
 
-  ASSERT(bottom_js_frame->is_java_script());
+  DCHECK(bottom_js_frame->is_java_script());
 
   // Check the nature of the top frame.
   Isolate* isolate = bottom_js_frame->isolate();
@@ -1638,7 +1638,7 @@
              isolate->builtins()->builtin(Builtins::kReturn_DebugBreak)) {
     *mode = LiveEdit::FRAME_DROPPED_IN_RETURN_CALL;
   } else if (pre_top_frame_code->kind() == Code::STUB &&
-      pre_top_frame_code->major_key() == CodeStub::CEntry) {
+             CodeStub::GetMajorKey(pre_top_frame_code) == CodeStub::CEntry) {
     // Entry from our unit tests on 'debugger' statement.
     // It's fine, we support this case.
     *mode = LiveEdit::FRAME_DROPPED_IN_DIRECT_CALL;
@@ -1649,7 +1649,7 @@
   } else if (pre_top_frame->type() == StackFrame::ARGUMENTS_ADAPTOR) {
     // This must be adaptor that remain from the frame dropping that
     // is still on stack. A frame dropper frame must be above it.
-    ASSERT(frames[top_frame_index - 2]->LookupCode() ==
+    DCHECK(frames[top_frame_index - 2]->LookupCode() ==
            isolate->builtins()->builtin(Builtins::kFrameDropper_LiveEdit));
     pre_top_frame = frames[top_frame_index - 3];
     top_frame = frames[top_frame_index - 2];
@@ -1667,7 +1667,7 @@
   Address* top_frame_pc_address = top_frame->pc_address();
 
   // top_frame may be damaged below this point. Do not used it.
-  ASSERT(!(top_frame = NULL));
+  DCHECK(!(top_frame = NULL));
 
   if (unused_stack_top > unused_stack_bottom) {
     if (frame_has_padding) {
@@ -1712,7 +1712,7 @@
 
   FixTryCatchHandler(pre_top_frame, bottom_js_frame);
   // Make sure FixTryCatchHandler is idempotent.
-  ASSERT(!FixTryCatchHandler(pre_top_frame, bottom_js_frame));
+  DCHECK(!FixTryCatchHandler(pre_top_frame, bottom_js_frame));
 
   Handle<Code> code = isolate->builtins()->FrameDropper_LiveEdit();
   *top_frame_pc_address = code->entry();
@@ -1721,7 +1721,7 @@
   *restarter_frame_function_pointer =
       SetUpFrameDropperFrame(bottom_js_frame, code);
 
-  ASSERT((**restarter_frame_function_pointer)->IsJSFunction());
+  DCHECK((**restarter_frame_function_pointer)->IsJSFunction());
 
   for (Address a = unused_stack_top;
       a < unused_stack_bottom;
@@ -1895,7 +1895,7 @@
   Isolate* isolate = shared_info_array->GetIsolate();
   bool found_suspended_activations = false;
 
-  ASSERT_LE(len, result->length());
+  DCHECK_LE(len, result->length());
 
   FunctionPatchabilityStatus active = FUNCTION_BLOCKED_ACTIVE_GENERATOR;
 
@@ -1957,7 +1957,7 @@
   Isolate* isolate = shared_info_array->GetIsolate();
   int len = GetArrayLength(shared_info_array);
 
-  CHECK(shared_info_array->HasFastElements());
+  DCHECK(shared_info_array->HasFastElements());
   Handle<FixedArray> shared_info_array_elements(
       FixedArray::cast(shared_info_array->elements()));
 
diff --git a/src/liveedit.h b/src/liveedit.h
index 3465d88..53418b0 100644
--- a/src/liveedit.h
+++ b/src/liveedit.h
@@ -307,7 +307,7 @@
 
   Handle<Code> GetFunctionCode();
 
-  Handle<FixedArray> GetFeedbackVector();
+  Handle<TypeFeedbackVector> GetFeedbackVector();
 
   Handle<Object> GetCodeScopeInfo();
 
diff --git a/src/log-utils.cc b/src/log-utils.cc
index 4598b81..c94d07a 100644
--- a/src/log-utils.cc
+++ b/src/log-utils.cc
@@ -54,20 +54,20 @@
 
 
 void Log::OpenStdout() {
-  ASSERT(!IsEnabled());
+  DCHECK(!IsEnabled());
   output_handle_ = stdout;
 }
 
 
 void Log::OpenTemporaryFile() {
-  ASSERT(!IsEnabled());
-  output_handle_ = i::OS::OpenTemporaryFile();
+  DCHECK(!IsEnabled());
+  output_handle_ = base::OS::OpenTemporaryFile();
 }
 
 
 void Log::OpenFile(const char* name) {
-  ASSERT(!IsEnabled());
-  output_handle_ = OS::FOpen(name, OS::LogFileOpenMode);
+  DCHECK(!IsEnabled());
+  output_handle_ = base::OS::FOpen(name, base::OS::LogFileOpenMode);
 }
 
 
@@ -94,7 +94,7 @@
   : log_(log),
     lock_guard_(&log_->mutex_),
     pos_(0) {
-  ASSERT(log_->message_buffer_ != NULL);
+  DCHECK(log_->message_buffer_ != NULL);
 }
 
 
@@ -105,7 +105,7 @@
   va_start(args, format);
   AppendVA(format, args);
   va_end(args);
-  ASSERT(pos_ <= Log::kMessageBufferSize);
+  DCHECK(pos_ <= Log::kMessageBufferSize);
 }
 
 
@@ -120,7 +120,7 @@
   } else {
     pos_ = Log::kMessageBufferSize;
   }
-  ASSERT(pos_ <= Log::kMessageBufferSize);
+  DCHECK(pos_ <= Log::kMessageBufferSize);
 }
 
 
@@ -128,7 +128,7 @@
   if (pos_ < Log::kMessageBufferSize) {
     log_->message_buffer_[pos_++] = c;
   }
-  ASSERT(pos_ <= Log::kMessageBufferSize);
+  DCHECK(pos_ <= Log::kMessageBufferSize);
 }
 
 
@@ -159,7 +159,7 @@
 
 
 void Log::MessageBuilder::AppendSymbolName(Symbol* symbol) {
-  ASSERT(symbol);
+  DCHECK(symbol);
   Append("symbol(");
   if (!symbol->name()->IsUndefined()) {
     Append("\"");
@@ -206,19 +206,23 @@
 void Log::MessageBuilder::AppendStringPart(const char* str, int len) {
   if (pos_ + len > Log::kMessageBufferSize) {
     len = Log::kMessageBufferSize - pos_;
-    ASSERT(len >= 0);
+    DCHECK(len >= 0);
     if (len == 0) return;
   }
   Vector<char> buf(log_->message_buffer_ + pos_,
                    Log::kMessageBufferSize - pos_);
   StrNCpy(buf, str, len);
   pos_ += len;
-  ASSERT(pos_ <= Log::kMessageBufferSize);
+  DCHECK(pos_ <= Log::kMessageBufferSize);
 }
 
 
 void Log::MessageBuilder::WriteToLogFile() {
-  ASSERT(pos_ <= Log::kMessageBufferSize);
+  DCHECK(pos_ <= Log::kMessageBufferSize);
+  // Assert that we do not already have a new line at the end.
+  DCHECK(pos_ == 0 || log_->message_buffer_[pos_ - 1] != '\n');
+  if (pos_ == Log::kMessageBufferSize) pos_--;
+  log_->message_buffer_[pos_++] = '\n';
   const int written = log_->WriteToFile(log_->message_buffer_, pos_);
   if (written != pos_) {
     log_->stop();
diff --git a/src/log-utils.h b/src/log-utils.h
index c5e0a0c..ef285e6 100644
--- a/src/log-utils.h
+++ b/src/log-utils.h
@@ -85,7 +85,7 @@
 
    private:
     Log* log_;
-    LockGuard<Mutex> lock_guard_;
+    base::LockGuard<base::Mutex> lock_guard_;
     int pos_;
   };
 
@@ -103,9 +103,9 @@
 
   // Implementation of writing to a log file.
   int WriteToFile(const char* msg, int length) {
-    ASSERT(output_handle_ != NULL);
+    DCHECK(output_handle_ != NULL);
     size_t rv = fwrite(msg, 1, length, output_handle_);
-    ASSERT(static_cast<size_t>(length) == rv);
+    DCHECK(static_cast<size_t>(length) == rv);
     USE(rv);
     fflush(output_handle_);
     return length;
@@ -120,7 +120,7 @@
 
   // mutex_ is a Mutex used for enforcing exclusive
   // access to the formatting buffer and the log file or log memory buffer.
-  Mutex mutex_;
+  base::Mutex mutex_;
 
   // Buffer used for formatting log messages. This is a singleton buffer and
   // mutex_ should be acquired before using it.
diff --git a/src/log.cc b/src/log.cc
index e8af5d0..86f5ce0 100644
--- a/src/log.cc
+++ b/src/log.cc
@@ -6,6 +6,8 @@
 
 #include "src/v8.h"
 
+#include "src/bailout-reason.h"
+#include "src/base/platform/platform.h"
 #include "src/bootstrapper.h"
 #include "src/code-stubs.h"
 #include "src/cpu-profiler.h"
@@ -14,7 +16,7 @@
 #include "src/log.h"
 #include "src/log-utils.h"
 #include "src/macro-assembler.h"
-#include "src/platform.h"
+#include "src/perf-jit.h"
 #include "src/runtime-profiler.h"
 #include "src/serialize.h"
 #include "src/string-stream.h"
@@ -262,9 +264,10 @@
   int size = SNPrintF(
       perf_dump_name,
       kFilenameFormatString,
-      OS::GetCurrentProcessId());
+      base::OS::GetCurrentProcessId());
   CHECK_NE(size, -1);
-  perf_output_handle_ = OS::FOpen(perf_dump_name.start(), OS::LogFileOpenMode);
+  perf_output_handle_ =
+      base::OS::FOpen(perf_dump_name.start(), base::OS::LogFileOpenMode);
   CHECK_NE(perf_output_handle_, NULL);
   setvbuf(perf_output_handle_, NULL, _IOFBF, kLogBufferSize);
 }
@@ -280,176 +283,11 @@
                                        SharedFunctionInfo*,
                                        const char* name,
                                        int length) {
-  ASSERT(code->instruction_start() == code->address() + Code::kHeaderSize);
+  DCHECK(code->instruction_start() == code->address() + Code::kHeaderSize);
 
-  OS::FPrint(perf_output_handle_, "%llx %x %.*s\n",
-      reinterpret_cast<uint64_t>(code->instruction_start()),
-      code->instruction_size(),
-      length, name);
-}
-
-
-// Linux perf tool logging support
-class PerfJitLogger : public CodeEventLogger {
- public:
-  PerfJitLogger();
-  virtual ~PerfJitLogger();
-
-  virtual void CodeMoveEvent(Address from, Address to) { }
-  virtual void CodeDisableOptEvent(Code* code, SharedFunctionInfo* shared) { }
-  virtual void CodeDeleteEvent(Address from) { }
-
- private:
-  virtual void LogRecordedBuffer(Code* code,
-                                 SharedFunctionInfo* shared,
-                                 const char* name,
-                                 int length);
-
-  // Extension added to V8 log file name to get the low-level log name.
-  static const char kFilenameFormatString[];
-  static const int kFilenameBufferPadding;
-
-  // File buffer size of the low-level log. We don't use the default to
-  // minimize the associated overhead.
-  static const int kLogBufferSize = 2 * MB;
-
-  void LogWriteBytes(const char* bytes, int size);
-  void LogWriteHeader();
-
-  static const uint32_t kJitHeaderMagic = 0x4F74496A;
-  static const uint32_t kJitHeaderVersion = 0x2;
-  static const uint32_t kElfMachIA32 = 3;
-  static const uint32_t kElfMachX64 = 62;
-  static const uint32_t kElfMachARM = 40;
-  static const uint32_t kElfMachMIPS = 10;
-  static const uint32_t kElfMachX87 = 3;
-
-  struct jitheader {
-    uint32_t magic;
-    uint32_t version;
-    uint32_t total_size;
-    uint32_t elf_mach;
-    uint32_t pad1;
-    uint32_t pid;
-    uint64_t timestamp;
-  };
-
-  enum jit_record_type {
-    JIT_CODE_LOAD = 0
-    // JIT_CODE_UNLOAD = 1,
-    // JIT_CODE_CLOSE = 2,
-    // JIT_CODE_DEBUG_INFO = 3,
-    // JIT_CODE_PAGE_MAP = 4,
-    // JIT_CODE_MAX = 5
-  };
-
-  struct jr_code_load {
-    uint32_t id;
-    uint32_t total_size;
-    uint64_t timestamp;
-    uint64_t vma;
-    uint64_t code_addr;
-    uint32_t code_size;
-    uint32_t align;
-  };
-
-  uint32_t GetElfMach() {
-#if V8_TARGET_ARCH_IA32
-    return kElfMachIA32;
-#elif V8_TARGET_ARCH_X64
-    return kElfMachX64;
-#elif V8_TARGET_ARCH_ARM
-    return kElfMachARM;
-#elif V8_TARGET_ARCH_MIPS
-    return kElfMachMIPS;
-#elif V8_TARGET_ARCH_X87
-    return kElfMachX87;
-#else
-    UNIMPLEMENTED();
-    return 0;
-#endif
-  }
-
-  FILE* perf_output_handle_;
-};
-
-const char PerfJitLogger::kFilenameFormatString[] = "/tmp/jit-%d.dump";
-
-// Extra padding for the PID in the filename
-const int PerfJitLogger::kFilenameBufferPadding = 16;
-
-PerfJitLogger::PerfJitLogger()
-    : perf_output_handle_(NULL) {
-  // Open the perf JIT dump file.
-  int bufferSize = sizeof(kFilenameFormatString) + kFilenameBufferPadding;
-  ScopedVector<char> perf_dump_name(bufferSize);
-  int size = SNPrintF(
-      perf_dump_name,
-      kFilenameFormatString,
-      OS::GetCurrentProcessId());
-  CHECK_NE(size, -1);
-  perf_output_handle_ = OS::FOpen(perf_dump_name.start(), OS::LogFileOpenMode);
-  CHECK_NE(perf_output_handle_, NULL);
-  setvbuf(perf_output_handle_, NULL, _IOFBF, kLogBufferSize);
-
-  LogWriteHeader();
-}
-
-
-PerfJitLogger::~PerfJitLogger() {
-  fclose(perf_output_handle_);
-  perf_output_handle_ = NULL;
-}
-
-
-void PerfJitLogger::LogRecordedBuffer(Code* code,
-                                      SharedFunctionInfo*,
-                                      const char* name,
-                                      int length) {
-  ASSERT(code->instruction_start() == code->address() + Code::kHeaderSize);
-  ASSERT(perf_output_handle_ != NULL);
-
-  const char* code_name = name;
-  uint8_t* code_pointer = reinterpret_cast<uint8_t*>(code->instruction_start());
-  uint32_t code_size = code->instruction_size();
-
-  static const char string_terminator[] = "\0";
-
-  jr_code_load code_load;
-  code_load.id = JIT_CODE_LOAD;
-  code_load.total_size = sizeof(code_load) + length + 1 + code_size;
-  code_load.timestamp =
-      static_cast<uint64_t>(OS::TimeCurrentMillis() * 1000.0);
-  code_load.vma = 0x0;  //  Our addresses are absolute.
-  code_load.code_addr = reinterpret_cast<uint64_t>(code->instruction_start());
-  code_load.code_size = code_size;
-  code_load.align = 0;
-
-  LogWriteBytes(reinterpret_cast<const char*>(&code_load), sizeof(code_load));
-  LogWriteBytes(code_name, length);
-  LogWriteBytes(string_terminator, 1);
-  LogWriteBytes(reinterpret_cast<const char*>(code_pointer), code_size);
-}
-
-
-void PerfJitLogger::LogWriteBytes(const char* bytes, int size) {
-  size_t rv = fwrite(bytes, 1, size, perf_output_handle_);
-  ASSERT(static_cast<size_t>(size) == rv);
-  USE(rv);
-}
-
-
-void PerfJitLogger::LogWriteHeader() {
-  ASSERT(perf_output_handle_ != NULL);
-  jitheader header;
-  header.magic = kJitHeaderMagic;
-  header.version = kJitHeaderVersion;
-  header.total_size = sizeof(jitheader);
-  header.pad1 = 0xdeadbeef;
-  header.elf_mach = GetElfMach();
-  header.pid = OS::GetCurrentProcessId();
-  header.timestamp = static_cast<uint64_t>(OS::TimeCurrentMillis() * 1000.0);
-  LogWriteBytes(reinterpret_cast<const char*>(&header), sizeof(header));
+  base::OS::FPrint(perf_output_handle_, "%llx %x %.*s\n",
+                   reinterpret_cast<uint64_t>(code->instruction_start()),
+                   code->instruction_size(), length, name);
 }
 
 
@@ -538,7 +376,8 @@
   ScopedVector<char> ll_name(static_cast<int>(len + sizeof(kLogExt)));
   MemCopy(ll_name.start(), name, len);
   MemCopy(ll_name.start() + len, kLogExt, sizeof(kLogExt));
-  ll_output_handle_ = OS::FOpen(ll_name.start(), OS::LogFileOpenMode);
+  ll_output_handle_ =
+      base::OS::FOpen(ll_name.start(), base::OS::LogFileOpenMode);
   setvbuf(ll_output_handle_, NULL, _IOFBF, kLogBufferSize);
 
   LogCodeInfo();
@@ -554,14 +393,18 @@
 void LowLevelLogger::LogCodeInfo() {
 #if V8_TARGET_ARCH_IA32
   const char arch[] = "ia32";
-#elif V8_TARGET_ARCH_X64
+#elif V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_64_BIT
   const char arch[] = "x64";
+#elif V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT
+  const char arch[] = "x32";
 #elif V8_TARGET_ARCH_ARM
   const char arch[] = "arm";
 #elif V8_TARGET_ARCH_MIPS
   const char arch[] = "mips";
 #elif V8_TARGET_ARCH_X87
   const char arch[] = "x87";
+#elif V8_TARGET_ARCH_ARM64
+  const char arch[] = "arm64";
 #else
   const char arch[] = "unknown";
 #endif
@@ -576,7 +419,7 @@
   CodeCreateStruct event;
   event.name_size = length;
   event.code_address = code->instruction_start();
-  ASSERT(event.code_address == code->address() + Code::kHeaderSize);
+  DCHECK(event.code_address == code->address() + Code::kHeaderSize);
   event.code_size = code->instruction_size();
   LogWriteStruct(event);
   LogWriteBytes(name, length);
@@ -611,7 +454,7 @@
 
 void LowLevelLogger::LogWriteBytes(const char* bytes, int size) {
   size_t rv = fwrite(bytes, 1, size, ll_output_handle_);
-  ASSERT(static_cast<size_t>(size) == rv);
+  DCHECK(static_cast<size_t>(size) == rv);
   USE(rv);
 }
 
@@ -666,11 +509,11 @@
   event.type = JitCodeEvent::CODE_ADDED;
   event.code_start = code->instruction_start();
   event.code_len = code->instruction_size();
-  Handle<Script> script_handle;
+  Handle<SharedFunctionInfo> shared_function_handle;
   if (shared && shared->script()->IsScript()) {
-    script_handle = Handle<Script>(Script::cast(shared->script()));
+    shared_function_handle = Handle<SharedFunctionInfo>(shared);
   }
-  event.script = ToApiHandle<v8::Script>(script_handle);
+  event.script = ToApiHandle<v8::UnboundScript>(shared_function_handle);
   event.name.str = name;
   event.name.len = length;
   code_event_handler_(&event);
@@ -751,7 +594,7 @@
 // An independent thread removes data and writes it to the log.
 // This design minimizes the time spent in the sampler.
 //
-class Profiler: public Thread {
+class Profiler: public base::Thread {
  public:
   explicit Profiler(Isolate* isolate);
   void Engage();
@@ -800,7 +643,7 @@
   int tail_;  // Index to the buffer tail.
   bool overflow_;  // Tell whether a buffer overflow has occurred.
   // Sempahore used for buffer synchronization.
-  Semaphore buffer_semaphore_;
+  base::Semaphore buffer_semaphore_;
 
   // Tells whether profiler is engaged, that is, processing thread is stated.
   bool engaged_;
@@ -830,7 +673,7 @@
   }
 
   void SetProfiler(Profiler* profiler) {
-    ASSERT(profiler_ == NULL);
+    DCHECK(profiler_ == NULL);
     profiler_ = profiler;
     IncreaseProfilingDepth();
     if (!IsActive()) Start();
@@ -851,7 +694,7 @@
 // Profiler implementation.
 //
 Profiler::Profiler(Isolate* isolate)
-    : Thread("v8:Profiler"),
+    : base::Thread(Options("v8:Profiler")),
       isolate_(isolate),
       head_(0),
       tail_(0),
@@ -859,16 +702,15 @@
       buffer_semaphore_(0),
       engaged_(false),
       running_(false),
-      paused_(false) {
-}
+      paused_(false) {}
 
 
 void Profiler::Engage() {
   if (engaged_) return;
   engaged_ = true;
 
-  std::vector<OS::SharedLibraryAddress> addresses =
-      OS::GetSharedLibraryAddresses();
+  std::vector<base::OS::SharedLibraryAddress> addresses =
+      base::OS::GetSharedLibraryAddresses();
   for (size_t i = 0; i < addresses.size(); ++i) {
     LOG(isolate_, SharedLibraryEvent(
         addresses[i].library_path, addresses[i].start, addresses[i].end));
@@ -942,13 +784,13 @@
 
 
 void Logger::addCodeEventListener(CodeEventListener* listener) {
-  ASSERT(!hasCodeEventListener(listener));
+  DCHECK(!hasCodeEventListener(listener));
   listeners_.Add(listener);
 }
 
 
 void Logger::removeCodeEventListener(CodeEventListener* listener) {
-  ASSERT(hasCodeEventListener(listener));
+  DCHECK(hasCodeEventListener(listener));
   listeners_.RemoveElement(listener);
 }
 
@@ -961,7 +803,7 @@
 void Logger::ProfilerBeginEvent() {
   if (!log_->IsEnabled()) return;
   Log::MessageBuilder msg(log_);
-  msg.Append("profiler,\"begin\",%d\n", kSamplingIntervalMs);
+  msg.Append("profiler,\"begin\",%d", kSamplingIntervalMs);
   msg.WriteToLogFile();
 }
 
@@ -974,7 +816,7 @@
 void Logger::UncheckedStringEvent(const char* name, const char* value) {
   if (!log_->IsEnabled()) return;
   Log::MessageBuilder msg(log_);
-  msg.Append("%s,\"%s\"\n", name, value);
+  msg.Append("%s,\"%s\"", name, value);
   msg.WriteToLogFile();
 }
 
@@ -992,7 +834,7 @@
 void Logger::UncheckedIntEvent(const char* name, int value) {
   if (!log_->IsEnabled()) return;
   Log::MessageBuilder msg(log_);
-  msg.Append("%s,%d\n", name, value);
+  msg.Append("%s,%d", name, value);
   msg.WriteToLogFile();
 }
 
@@ -1000,7 +842,7 @@
 void Logger::UncheckedIntPtrTEvent(const char* name, intptr_t value) {
   if (!log_->IsEnabled()) return;
   Log::MessageBuilder msg(log_);
-  msg.Append("%s,%" V8_PTR_PREFIX "d\n", name, value);
+  msg.Append("%s,%" V8_PTR_PREFIX "d", name, value);
   msg.WriteToLogFile();
 }
 
@@ -1008,7 +850,7 @@
 void Logger::HandleEvent(const char* name, Object** location) {
   if (!log_->IsEnabled() || !FLAG_log_handles) return;
   Log::MessageBuilder msg(log_);
-  msg.Append("%s,0x%" V8PRIxPTR "\n", name, location);
+  msg.Append("%s,0x%" V8PRIxPTR, name, location);
   msg.WriteToLogFile();
 }
 
@@ -1017,7 +859,7 @@
 // caller's responsibility to ensure that log is enabled and that
 // FLAG_log_api is true.
 void Logger::ApiEvent(const char* format, ...) {
-  ASSERT(log_->IsEnabled() && FLAG_log_api);
+  DCHECK(log_->IsEnabled() && FLAG_log_api);
   Log::MessageBuilder msg(log_);
   va_list ap;
   va_start(ap, format);
@@ -1032,23 +874,21 @@
   if (key->IsString()) {
     SmartArrayPointer<char> str =
         String::cast(key)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
-    ApiEvent("api,check-security,\"%s\"\n", str.get());
+    ApiEvent("api,check-security,\"%s\"", str.get());
   } else if (key->IsSymbol()) {
     Symbol* symbol = Symbol::cast(key);
     if (symbol->name()->IsUndefined()) {
-      ApiEvent("api,check-security,symbol(hash %x)\n",
-               Symbol::cast(key)->Hash());
+      ApiEvent("api,check-security,symbol(hash %x)", Symbol::cast(key)->Hash());
     } else {
       SmartArrayPointer<char> str = String::cast(symbol->name())->ToCString(
           DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
-      ApiEvent("api,check-security,symbol(\"%s\" hash %x)\n",
-               str.get(),
+      ApiEvent("api,check-security,symbol(\"%s\" hash %x)", str.get(),
                Symbol::cast(key)->Hash());
     }
   } else if (key->IsUndefined()) {
-    ApiEvent("api,check-security,undefined\n");
+    ApiEvent("api,check-security,undefined");
   } else {
-    ApiEvent("api,check-security,['no-name']\n");
+    ApiEvent("api,check-security,['no-name']");
   }
 }
 
@@ -1058,80 +898,79 @@
                                 uintptr_t end) {
   if (!log_->IsEnabled() || !FLAG_prof) return;
   Log::MessageBuilder msg(log_);
-  msg.Append("shared-library,\"%s\",0x%08" V8PRIxPTR ",0x%08" V8PRIxPTR "\n",
-             library_path.c_str(),
-             start,
-             end);
+  msg.Append("shared-library,\"%s\",0x%08" V8PRIxPTR ",0x%08" V8PRIxPTR,
+             library_path.c_str(), start, end);
   msg.WriteToLogFile();
 }
 
 
 void Logger::CodeDeoptEvent(Code* code) {
   if (!log_->IsEnabled()) return;
-  ASSERT(FLAG_log_internal_timer_events);
+  DCHECK(FLAG_log_internal_timer_events);
   Log::MessageBuilder msg(log_);
   int since_epoch = static_cast<int>(timer_.Elapsed().InMicroseconds());
-  msg.Append("code-deopt,%ld,%d\n", since_epoch, code->CodeSize());
+  msg.Append("code-deopt,%ld,%d", since_epoch, code->CodeSize());
   msg.WriteToLogFile();
 }
 
 
 void Logger::CurrentTimeEvent() {
   if (!log_->IsEnabled()) return;
-  ASSERT(FLAG_log_internal_timer_events);
+  DCHECK(FLAG_log_internal_timer_events);
   Log::MessageBuilder msg(log_);
   int since_epoch = static_cast<int>(timer_.Elapsed().InMicroseconds());
-  msg.Append("current-time,%ld\n", since_epoch);
+  msg.Append("current-time,%ld", since_epoch);
   msg.WriteToLogFile();
 }
 
 
-void Logger::TimerEvent(StartEnd se, const char* name) {
+void Logger::TimerEvent(Logger::StartEnd se, const char* name) {
   if (!log_->IsEnabled()) return;
-  ASSERT(FLAG_log_internal_timer_events);
+  DCHECK(FLAG_log_internal_timer_events);
   Log::MessageBuilder msg(log_);
   int since_epoch = static_cast<int>(timer_.Elapsed().InMicroseconds());
-  const char* format = (se == START) ? "timer-event-start,\"%s\",%ld\n"
-                                     : "timer-event-end,\"%s\",%ld\n";
+  const char* format = (se == START) ? "timer-event-start,\"%s\",%ld"
+                                     : "timer-event-end,\"%s\",%ld";
   msg.Append(format, name, since_epoch);
   msg.WriteToLogFile();
 }
 
 
 void Logger::EnterExternal(Isolate* isolate) {
-  LOG(isolate, TimerEvent(START, TimerEventScope::v8_external));
-  ASSERT(isolate->current_vm_state() == JS);
+  LOG(isolate, TimerEvent(START, TimerEventExternal::name()));
+  DCHECK(isolate->current_vm_state() == JS);
   isolate->set_current_vm_state(EXTERNAL);
 }
 
 
 void Logger::LeaveExternal(Isolate* isolate) {
-  LOG(isolate, TimerEvent(END, TimerEventScope::v8_external));
-  ASSERT(isolate->current_vm_state() == EXTERNAL);
+  LOG(isolate, TimerEvent(END, TimerEventExternal::name()));
+  DCHECK(isolate->current_vm_state() == EXTERNAL);
   isolate->set_current_vm_state(JS);
 }
 
 
-void Logger::LogInternalEvents(const char* name, int se) {
+void Logger::DefaultTimerEventsLogger(const char* name, int se) {
   Isolate* isolate = Isolate::Current();
   LOG(isolate, TimerEvent(static_cast<StartEnd>(se), name));
 }
 
 
-void Logger::TimerEventScope::LogTimerEvent(StartEnd se) {
-  isolate_->event_logger()(name_, se);
+template <class TimerEvent>
+void TimerEventScope<TimerEvent>::LogTimerEvent(Logger::StartEnd se) {
+  if (TimerEvent::expose_to_api() ||
+      isolate_->event_logger() == Logger::DefaultTimerEventsLogger) {
+    isolate_->event_logger()(TimerEvent::name(), se);
+  }
 }
 
 
-const char* Logger::TimerEventScope::v8_recompile_synchronous =
-    "V8.RecompileSynchronous";
-const char* Logger::TimerEventScope::v8_recompile_concurrent =
-    "V8.RecompileConcurrent";
-const char* Logger::TimerEventScope::v8_compile_full_code =
-    "V8.CompileFullCode";
-const char* Logger::TimerEventScope::v8_execute = "V8.Execute";
-const char* Logger::TimerEventScope::v8_external = "V8.External";
-const char* Logger::TimerEventScope::v8_ic_miss = "V8.IcMiss";
+// Instantiate template methods.
+#define V(TimerName, expose)                                           \
+  template void TimerEventScope<TimerEvent##TimerName>::LogTimerEvent( \
+      Logger::StartEnd se);
+TIMER_EVENTS_LIST(V)
+#undef V
 
 
 void Logger::LogRegExpSource(Handle<JSRegExp> regexp) {
@@ -1185,21 +1024,21 @@
   Log::MessageBuilder msg(log_);
   msg.Append("regexp-compile,");
   LogRegExpSource(regexp);
-  msg.Append(in_cache ? ",hit\n" : ",miss\n");
+  msg.Append(in_cache ? ",hit" : ",miss");
   msg.WriteToLogFile();
 }
 
 
 void Logger::ApiIndexedSecurityCheck(uint32_t index) {
   if (!log_->IsEnabled() || !FLAG_log_api) return;
-  ApiEvent("api,check-security,%u\n", index);
+  ApiEvent("api,check-security,%u", index);
 }
 
 
 void Logger::ApiNamedPropertyAccess(const char* tag,
                                     JSObject* holder,
                                     Object* name) {
-  ASSERT(name->IsName());
+  DCHECK(name->IsName());
   if (!log_->IsEnabled() || !FLAG_log_api) return;
   String* class_name_obj = holder->class_name();
   SmartArrayPointer<char> class_name =
@@ -1207,18 +1046,18 @@
   if (name->IsString()) {
     SmartArrayPointer<char> property_name =
         String::cast(name)->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
-    ApiEvent("api,%s,\"%s\",\"%s\"\n", tag, class_name.get(),
+    ApiEvent("api,%s,\"%s\",\"%s\"", tag, class_name.get(),
              property_name.get());
   } else {
     Symbol* symbol = Symbol::cast(name);
     uint32_t hash = symbol->Hash();
     if (symbol->name()->IsUndefined()) {
-      ApiEvent("api,%s,\"%s\",symbol(hash %x)\n", tag, class_name.get(), hash);
+      ApiEvent("api,%s,\"%s\",symbol(hash %x)", tag, class_name.get(), hash);
     } else {
       SmartArrayPointer<char> str = String::cast(symbol->name())->ToCString(
           DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
-      ApiEvent("api,%s,\"%s\",symbol(\"%s\" hash %x)\n",
-               tag, class_name.get(), str.get(), hash);
+      ApiEvent("api,%s,\"%s\",symbol(\"%s\" hash %x)", tag, class_name.get(),
+               str.get(), hash);
     }
   }
 }
@@ -1230,7 +1069,7 @@
   String* class_name_obj = holder->class_name();
   SmartArrayPointer<char> class_name =
       class_name_obj->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
-  ApiEvent("api,%s,\"%s\",%u\n", tag, class_name.get(), index);
+  ApiEvent("api,%s,\"%s\",%u", tag, class_name.get(), index);
 }
 
 
@@ -1239,20 +1078,20 @@
   String* class_name_obj = object->class_name();
   SmartArrayPointer<char> class_name =
       class_name_obj->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
-  ApiEvent("api,%s,\"%s\"\n", tag, class_name.get());
+  ApiEvent("api,%s,\"%s\"", tag, class_name.get());
 }
 
 
 void Logger::ApiEntryCall(const char* name) {
   if (!log_->IsEnabled() || !FLAG_log_api) return;
-  ApiEvent("api,%s\n", name);
+  ApiEvent("api,%s", name);
 }
 
 
 void Logger::NewEvent(const char* name, void* object, size_t size) {
   if (!log_->IsEnabled() || !FLAG_log) return;
   Log::MessageBuilder msg(log_);
-  msg.Append("new,%s,0x%" V8PRIxPTR ",%u\n", name, object,
+  msg.Append("new,%s,0x%" V8PRIxPTR ",%u", name, object,
              static_cast<unsigned int>(size));
   msg.WriteToLogFile();
 }
@@ -1261,7 +1100,7 @@
 void Logger::DeleteEvent(const char* name, void* object) {
   if (!log_->IsEnabled() || !FLAG_log) return;
   Log::MessageBuilder msg(log_);
-  msg.Append("delete,%s,0x%" V8PRIxPTR "\n", name, object);
+  msg.Append("delete,%s,0x%" V8PRIxPTR, name, object);
   msg.WriteToLogFile();
 }
 
@@ -1299,7 +1138,6 @@
                  symbol->Hash());
     }
   }
-  msg.Append('\n');
   msg.WriteToLogFile();
 }
 
@@ -1325,7 +1163,7 @@
 static void AppendCodeCreateHeader(Log::MessageBuilder* msg,
                                    Logger::LogEventsAndTags tag,
                                    Code* code) {
-  ASSERT(msg);
+  DCHECK(msg);
   msg->Append("%s,%s,%d,",
               kLogEventsNames[Logger::CODE_CREATION_EVENT],
               kLogEventsNames[tag],
@@ -1347,7 +1185,6 @@
   Log::MessageBuilder msg(log_);
   AppendCodeCreateHeader(&msg, tag, code);
   msg.AppendDoubleQuotedString(comment);
-  msg.Append('\n');
   msg.WriteToLogFile();
 }
 
@@ -1370,7 +1207,6 @@
   } else {
     msg.AppendSymbolName(Symbol::cast(name));
   }
-  msg.Append('\n');
   msg.WriteToLogFile();
 }
 
@@ -1386,8 +1222,7 @@
   CALL_LISTENERS(CodeCreateEvent(tag, code, shared, info, name));
 
   if (!FLAG_log_code || !log_->IsEnabled()) return;
-  if (code == isolate_->builtins()->builtin(Builtins::kCompileUnoptimized))
-    return;
+  if (code == isolate_->builtins()->builtin(Builtins::kCompileLazy)) return;
 
   Log::MessageBuilder msg(log_);
   AppendCodeCreateHeader(&msg, tag, code);
@@ -1401,7 +1236,6 @@
   msg.Append(',');
   msg.AppendAddress(shared->address());
   msg.Append(",%s", ComputeMarker(code));
-  msg.Append('\n');
   msg.WriteToLogFile();
 }
 
@@ -1436,7 +1270,6 @@
   msg.Append(":%d:%d\",", line, column);
   msg.AppendAddress(shared->address());
   msg.Append(",%s", ComputeMarker(code));
-  msg.Append('\n');
   msg.WriteToLogFile();
 }
 
@@ -1453,7 +1286,6 @@
   Log::MessageBuilder msg(log_);
   AppendCodeCreateHeader(&msg, tag, code);
   msg.Append("\"args_count: %d\"", args_count);
-  msg.Append('\n');
   msg.WriteToLogFile();
 }
 
@@ -1471,7 +1303,7 @@
   SmartArrayPointer<char> name =
       shared->DebugName()->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
   msg.Append("\"%s\",", name.get());
-  msg.Append("\"%s\"\n", GetBailoutReason(shared->DisableOptimizationReason()));
+  msg.Append("\"%s\"", GetBailoutReason(shared->DisableOptimizationReason()));
   msg.WriteToLogFile();
 }
 
@@ -1482,7 +1314,7 @@
   if (!is_logging_code_events()) return;
   if (!log_->IsEnabled() || !FLAG_ll_prof) return;
   CALL_LISTENERS(CodeMovingGCEvent());
-  OS::SignalCodeMovingGC();
+  base::OS::SignalCodeMovingGC();
 }
 
 
@@ -1498,7 +1330,6 @@
   msg.Append('"');
   msg.AppendDetailed(source, false);
   msg.Append('"');
-  msg.Append('\n');
   msg.WriteToLogFile();
 }
 
@@ -1522,7 +1353,6 @@
   Log::MessageBuilder msg(log_);
   msg.Append("%s,", kLogEventsNames[CODE_DELETE_EVENT]);
   msg.AppendAddress(from);
-  msg.Append('\n');
   msg.WriteToLogFile();
 }
 
@@ -1565,7 +1395,6 @@
   Log::MessageBuilder msg(log_);
   msg.Append("%s,%d,", kLogEventsNames[SNAPSHOT_CODE_NAME_EVENT], pos);
   msg.AppendDoubleQuotedString(code_name);
-  msg.Append("\n");
   msg.WriteToLogFile();
 }
 
@@ -1578,7 +1407,6 @@
   msg.Append("%s,", kLogEventsNames[SNAPSHOT_POSITION_EVENT]);
   msg.AppendAddress(addr);
   msg.Append(",%d", pos);
-  msg.Append('\n');
   msg.WriteToLogFile();
 }
 
@@ -1600,7 +1428,6 @@
   msg.AppendAddress(from);
   msg.Append(',');
   msg.AppendAddress(to);
-  msg.Append('\n');
   msg.WriteToLogFile();
 }
 
@@ -1611,12 +1438,10 @@
   msg.Append("%s,%s,", name, tag);
 
   uint32_t sec, usec;
-  if (OS::GetUserTime(&sec, &usec) != -1) {
+  if (base::OS::GetUserTime(&sec, &usec) != -1) {
     msg.Append("%d,%d,", sec, usec);
   }
-  msg.Append("%.0f", OS::TimeCurrentMillis());
-
-  msg.Append('\n');
+  msg.Append("%.0f", base::OS::TimeCurrentMillis());
   msg.WriteToLogFile();
 }
 
@@ -1637,7 +1462,6 @@
   } else {
     msg.AppendSymbolName(Symbol::cast(name));
   }
-  msg.Append('\n');
   msg.WriteToLogFile();
 }
 
@@ -1647,8 +1471,8 @@
   Log::MessageBuilder msg(log_);
   // Using non-relative system time in order to be able to synchronize with
   // external memory profiling events (e.g. DOM memory size).
-  msg.Append("heap-sample-begin,\"%s\",\"%s\",%.0f\n",
-             space, kind, OS::TimeCurrentMillis());
+  msg.Append("heap-sample-begin,\"%s\",\"%s\",%.0f", space, kind,
+             base::OS::TimeCurrentMillis());
   msg.WriteToLogFile();
 }
 
@@ -1656,7 +1480,7 @@
 void Logger::HeapSampleEndEvent(const char* space, const char* kind) {
   if (!log_->IsEnabled() || !FLAG_log_gc) return;
   Log::MessageBuilder msg(log_);
-  msg.Append("heap-sample-end,\"%s\",\"%s\"\n", space, kind);
+  msg.Append("heap-sample-end,\"%s\",\"%s\"", space, kind);
   msg.WriteToLogFile();
 }
 
@@ -1664,7 +1488,7 @@
 void Logger::HeapSampleItemEvent(const char* type, int number, int bytes) {
   if (!log_->IsEnabled() || !FLAG_log_gc) return;
   Log::MessageBuilder msg(log_);
-  msg.Append("heap-sample-item,%s,%d,%d\n", type, number, bytes);
+  msg.Append("heap-sample-item,%s,%d,%d", type, number, bytes);
   msg.WriteToLogFile();
 }
 
@@ -1672,7 +1496,7 @@
 void Logger::DebugTag(const char* call_site_tag) {
   if (!log_->IsEnabled() || !FLAG_log) return;
   Log::MessageBuilder msg(log_);
-  msg.Append("debug-tag,%s\n", call_site_tag);
+  msg.Append("debug-tag,%s", call_site_tag);
   msg.WriteToLogFile();
 }
 
@@ -1685,10 +1509,8 @@
   }
   char* parameter_string = s.Finalize();
   Log::MessageBuilder msg(log_);
-  msg.Append("debug-queue-event,%s,%15.3f,%s\n",
-             event_type,
-             OS::TimeCurrentMillis(),
-             parameter_string);
+  msg.Append("debug-queue-event,%s,%15.3f,%s", event_type,
+             base::OS::TimeCurrentMillis(), parameter_string);
   DeleteArray(parameter_string);
   msg.WriteToLogFile();
 }
@@ -1711,11 +1533,10 @@
   if (overflow) {
     msg.Append(",overflow");
   }
-  for (int i = 0; i < sample->frames_count; ++i) {
+  for (unsigned i = 0; i < sample->frames_count; ++i) {
     msg.Append(',');
     msg.AppendAddress(sample->stack[i]);
   }
-  msg.Append('\n');
   msg.WriteToLogFile();
 }
 
@@ -1755,7 +1576,7 @@
       sfis_[*count_] = Handle<SharedFunctionInfo>(sfi);
     }
     if (code_objects_ != NULL) {
-      ASSERT(function->code()->kind() == Code::OPTIMIZED_FUNCTION);
+      DCHECK(function->code()->kind() == Code::OPTIMIZED_FUNCTION);
       code_objects_[*count_] = Handle<Code>(function->code());
     }
     *count_ = *count_ + 1;
@@ -1934,8 +1755,7 @@
   // During iteration, there can be heap allocation due to
   // GetScriptLineNumber call.
   for (int i = 0; i < compiled_funcs_count; ++i) {
-    if (code_objects[i].is_identical_to(
-            isolate_->builtins()->CompileUnoptimized()))
+    if (code_objects[i].is_identical_to(isolate_->builtins()->CompileLazy()))
       continue;
     LogExistingFunction(sfis[i], code_objects[i]);
   }
@@ -1965,16 +1785,15 @@
 }
 
 
-static void AddIsolateIdIfNeeded(Isolate* isolate, StringStream* stream) {
-  if (FLAG_logfile_per_isolate) stream->Add("isolate-%p-", isolate);
+static void AddIsolateIdIfNeeded(OStream& os,  // NOLINT
+                                 Isolate* isolate) {
+  if (FLAG_logfile_per_isolate) os << "isolate-" << isolate << "-";
 }
 
 
-static SmartArrayPointer<const char> PrepareLogFileName(
-    Isolate* isolate, const char* file_name) {
-  HeapStringAllocator allocator;
-  StringStream stream(&allocator);
-  AddIsolateIdIfNeeded(isolate, &stream);
+static void PrepareLogFileName(OStream& os,  // NOLINT
+                               Isolate* isolate, const char* file_name) {
+  AddIsolateIdIfNeeded(os, isolate);
   for (const char* p = file_name; *p; p++) {
     if (*p == '%') {
       p++;
@@ -1985,29 +1804,25 @@
           p--;
           break;
         case 'p':
-          stream.Add("%d", OS::GetCurrentProcessId());
+          os << base::OS::GetCurrentProcessId();
           break;
-        case 't': {
+        case 't':
           // %t expands to the current time in milliseconds.
-          double time = OS::TimeCurrentMillis();
-          stream.Add("%.0f", FmtElm(time));
+          os << static_cast<int64_t>(base::OS::TimeCurrentMillis());
           break;
-        }
         case '%':
           // %% expands (contracts really) to %.
-          stream.Put('%');
+          os << '%';
           break;
         default:
           // All other %'s expand to themselves.
-          stream.Put('%');
-          stream.Put(*p);
+          os << '%' << *p;
           break;
       }
     } else {
-      stream.Put(*p);
+      os << *p;
     }
   }
-  return SmartArrayPointer<const char>(stream.ToCString());
 }
 
 
@@ -2021,9 +1836,9 @@
     FLAG_log_snapshot_positions = true;
   }
 
-  SmartArrayPointer<const char> log_file_name =
-      PrepareLogFileName(isolate, FLAG_logfile);
-  log_->Initialize(log_file_name.get());
+  OStringStream log_file_name;
+  PrepareLogFileName(log_file_name, isolate, FLAG_logfile);
+  log_->Initialize(log_file_name.c_str());
 
 
   if (FLAG_perf_basic_prof) {
@@ -2037,7 +1852,7 @@
   }
 
   if (FLAG_ll_prof) {
-    ll_logger_ = new LowLevelLogger(log_file_name.get());
+    ll_logger_ = new LowLevelLogger(log_file_name.c_str());
     addCodeEventListener(ll_logger_);
   }
 
diff --git a/src/log.h b/src/log.h
index e98874b..51597dd 100644
--- a/src/log.h
+++ b/src/log.h
@@ -8,11 +8,16 @@
 #include <string>
 
 #include "src/allocation.h"
+#include "src/base/platform/elapsed-timer.h"
+#include "src/base/platform/platform.h"
 #include "src/objects.h"
-#include "src/platform.h"
-#include "src/platform/elapsed-timer.h"
 
 namespace v8 {
+
+namespace base {
+class Semaphore;
+}
+
 namespace internal {
 
 // Logger is used for collecting logging information from V8 during
@@ -57,7 +62,6 @@
 class Log;
 class PositionsRecorder;
 class Profiler;
-class Semaphore;
 class Ticker;
 struct TickSample;
 
@@ -147,6 +151,8 @@
 
 class Logger {
  public:
+  enum StartEnd { START = 0, END = 1 };
+
 #define DECLARE_ENUM(enum_item, ignore) enum_item,
   enum LogEventsAndTags {
     LOG_EVENTS_AND_TAGS_LIST(DECLARE_ENUM)
@@ -286,9 +292,6 @@
                           uintptr_t start,
                           uintptr_t end);
 
-  // ==== Events logged by --log-timer-events. ====
-  enum StartEnd { START, END };
-
   void CodeDeoptEvent(Code* code);
   void CurrentTimeEvent();
 
@@ -297,33 +300,8 @@
   static void EnterExternal(Isolate* isolate);
   static void LeaveExternal(Isolate* isolate);
 
-  static void EmptyLogInternalEvents(const char* name, int se) { }
-  static void LogInternalEvents(const char* name, int se);
-
-  class TimerEventScope {
-   public:
-    TimerEventScope(Isolate* isolate, const char* name)
-        : isolate_(isolate), name_(name) {
-      LogTimerEvent(START);
-    }
-
-    ~TimerEventScope() {
-      LogTimerEvent(END);
-    }
-
-    void LogTimerEvent(StartEnd se);
-
-    static const char* v8_recompile_synchronous;
-    static const char* v8_recompile_concurrent;
-    static const char* v8_compile_full_code;
-    static const char* v8_execute;
-    static const char* v8_external;
-    static const char* v8_ic_miss;
-
-   private:
-    Isolate* isolate_;
-    const char* name_;
-  };
+  static void EmptyTimerEventsLogger(const char* name, int se) {}
+  static void DefaultTimerEventsLogger(const char* name, int se);
 
   // ==== Events logged by --log-regexp ====
   // Regexp compilation and execution events.
@@ -436,12 +414,46 @@
   // 'true' between SetUp() and TearDown().
   bool is_initialized_;
 
-  ElapsedTimer timer_;
+  base::ElapsedTimer timer_;
 
   friend class CpuProfiler;
 };
 
 
+#define TIMER_EVENTS_LIST(V)    \
+  V(RecompileSynchronous, true) \
+  V(RecompileConcurrent, true)  \
+  V(CompileFullCode, true)      \
+  V(Execute, true)              \
+  V(External, true)             \
+  V(IcMiss, false)
+
+#define V(TimerName, expose)                                                  \
+  class TimerEvent##TimerName : public AllStatic {                            \
+   public:                                                                    \
+    static const char* name(void* unused = NULL) { return "V8." #TimerName; } \
+    static bool expose_to_api() { return expose; }                            \
+  };
+TIMER_EVENTS_LIST(V)
+#undef V
+
+
+template <class TimerEvent>
+class TimerEventScope {
+ public:
+  explicit TimerEventScope(Isolate* isolate) : isolate_(isolate) {
+    LogTimerEvent(Logger::START);
+  }
+
+  ~TimerEventScope() { LogTimerEvent(Logger::END); }
+
+  void LogTimerEvent(Logger::StartEnd se);
+
+ private:
+  Isolate* isolate_;
+};
+
+
 class CodeEventListener {
  public:
   virtual ~CodeEventListener() {}
diff --git a/src/lookup-inl.h b/src/lookup-inl.h
new file mode 100644
index 0000000..d4777a0
--- /dev/null
+++ b/src/lookup-inl.h
@@ -0,0 +1,87 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_LOOKUP_INL_H_
+#define V8_LOOKUP_INL_H_
+
+#include "src/lookup.h"
+
+namespace v8 {
+namespace internal {
+
+
+JSReceiver* LookupIterator::NextHolder(Map* map) {
+  DisallowHeapAllocation no_gc;
+  if (map->prototype()->IsNull()) return NULL;
+
+  JSReceiver* next = JSReceiver::cast(map->prototype());
+  DCHECK(!next->map()->IsGlobalObjectMap() ||
+         next->map()->is_hidden_prototype());
+
+  if (!check_prototype_chain() &&
+      !(check_hidden() && next->map()->is_hidden_prototype()) &&
+      // Always lookup behind the JSGlobalProxy into the JSGlobalObject, even
+      // when not checking other hidden prototypes.
+      !map->IsJSGlobalProxyMap()) {
+    return NULL;
+  }
+
+  return next;
+}
+
+
+LookupIterator::State LookupIterator::LookupInHolder(Map* map,
+                                                     JSReceiver* holder) {
+  STATIC_ASSERT(INTERCEPTOR == BEFORE_PROPERTY);
+  DisallowHeapAllocation no_gc;
+  switch (state_) {
+    case NOT_FOUND:
+      if (map->IsJSProxyMap()) return JSPROXY;
+      if (map->is_access_check_needed()) return ACCESS_CHECK;
+    // Fall through.
+    case ACCESS_CHECK:
+      if (check_interceptor() && map->has_named_interceptor()) {
+        return INTERCEPTOR;
+      }
+    // Fall through.
+    case INTERCEPTOR:
+      if (map->is_dictionary_map()) {
+        NameDictionary* dict = JSObject::cast(holder)->property_dictionary();
+        number_ = dict->FindEntry(name_);
+        if (number_ == NameDictionary::kNotFound) return NOT_FOUND;
+        property_details_ = dict->DetailsAt(number_);
+        if (holder->IsGlobalObject()) {
+          if (property_details_.IsDeleted()) return NOT_FOUND;
+          PropertyCell* cell = PropertyCell::cast(dict->ValueAt(number_));
+          if (cell->value()->IsTheHole()) return NOT_FOUND;
+        }
+      } else {
+        DescriptorArray* descriptors = map->instance_descriptors();
+        number_ = descriptors->SearchWithCache(*name_, map);
+        if (number_ == DescriptorArray::kNotFound) return NOT_FOUND;
+        property_details_ = descriptors->GetDetails(number_);
+      }
+      has_property_ = true;
+      switch (property_details_.type()) {
+        case v8::internal::CONSTANT:
+        case v8::internal::FIELD:
+        case v8::internal::NORMAL:
+          return DATA;
+        case v8::internal::CALLBACKS:
+          return ACCESSOR;
+      }
+    case ACCESSOR:
+    case DATA:
+      return NOT_FOUND;
+    case JSPROXY:
+    case TRANSITION:
+      UNREACHABLE();
+  }
+  UNREACHABLE();
+  return state_;
+}
+}
+}  // namespace v8::internal
+
+#endif  // V8_LOOKUP_INL_H_
diff --git a/src/lookup.cc b/src/lookup.cc
index 5391640..b855abe 100644
--- a/src/lookup.cc
+++ b/src/lookup.cc
@@ -5,101 +5,65 @@
 #include "src/v8.h"
 
 #include "src/bootstrapper.h"
+#include "src/deoptimizer.h"
 #include "src/lookup.h"
+#include "src/lookup-inl.h"
 
 namespace v8 {
 namespace internal {
 
 
 void LookupIterator::Next() {
+  DCHECK_NE(JSPROXY, state_);
+  DCHECK_NE(TRANSITION, state_);
+  DisallowHeapAllocation no_gc;
   has_property_ = false;
+
+  JSReceiver* holder = *holder_;
+  Map* map = *holder_map_;
+
+  // Perform lookup on current holder.
+  state_ = LookupInHolder(map, holder);
+  if (IsFound()) return;
+
+  // Continue lookup if lookup on current holder failed.
   do {
-    state_ = LookupInHolder();
-  } while (!IsFound() && NextHolder());
+    JSReceiver* maybe_holder = NextHolder(map);
+    if (maybe_holder == NULL) break;
+    holder = maybe_holder;
+    map = holder->map();
+    state_ = LookupInHolder(map, holder);
+  } while (!IsFound());
+
+  if (holder != *holder_) {
+    holder_ = handle(holder, isolate_);
+    holder_map_ = handle(map, isolate_);
+  }
 }
 
 
 Handle<JSReceiver> LookupIterator::GetRoot() const {
-  Handle<Object> receiver = GetReceiver();
-  if (receiver->IsJSReceiver()) return Handle<JSReceiver>::cast(receiver);
-  Context* native_context = isolate_->context()->native_context();
-  JSFunction* function;
-  if (receiver->IsNumber()) {
-    function = native_context->number_function();
-  } else if (receiver->IsString()) {
-    function = native_context->string_function();
-  } else if (receiver->IsSymbol()) {
-    function = native_context->symbol_function();
-  } else if (receiver->IsBoolean()) {
-    function = native_context->boolean_function();
-  } else {
-    UNREACHABLE();
-    function = NULL;
-  }
-  return handle(JSReceiver::cast(function->instance_prototype()));
+  if (receiver_->IsJSReceiver()) return Handle<JSReceiver>::cast(receiver_);
+  Handle<Object> root =
+      handle(receiver_->GetRootMap(isolate_)->prototype(), isolate_);
+  CHECK(!root->IsNull());
+  return Handle<JSReceiver>::cast(root);
 }
 
 
 Handle<Map> LookupIterator::GetReceiverMap() const {
-  Handle<Object> receiver = GetReceiver();
-  if (receiver->IsNumber()) return isolate_->factory()->heap_number_map();
-  return handle(Handle<HeapObject>::cast(receiver)->map());
+  if (receiver_->IsNumber()) return isolate_->factory()->heap_number_map();
+  return handle(Handle<HeapObject>::cast(receiver_)->map(), isolate_);
 }
 
 
-bool LookupIterator::NextHolder() {
-  if (holder_map_->prototype()->IsNull()) return false;
-
-  Handle<JSReceiver> next(JSReceiver::cast(holder_map_->prototype()));
-
-  if (!check_derived() &&
-      !(check_hidden() &&
-         // TODO(verwaest): Check if this is actually necessary currently. If it
-         // is, this should be handled by setting is_hidden_prototype on the
-         // global object behind the proxy.
-        (holder_map_->IsJSGlobalProxyMap() ||
-         next->map()->is_hidden_prototype()))) {
-    return false;
+Handle<JSObject> LookupIterator::GetStoreTarget() const {
+  if (receiver_->IsJSGlobalProxy()) {
+    PrototypeIterator iter(isolate(), receiver_);
+    if (iter.IsAtEnd()) return Handle<JSGlobalProxy>::cast(receiver_);
+    return Handle<JSGlobalObject>::cast(PrototypeIterator::GetCurrent(iter));
   }
-
-  holder_map_ = handle(next->map());
-  maybe_holder_ = next;
-  return true;
-}
-
-
-LookupIterator::State LookupIterator::LookupInHolder() {
-  switch (state_) {
-    case NOT_FOUND:
-      if (holder_map_->IsJSProxyMap()) {
-        return JSPROXY;
-      }
-      if (check_access_check() && holder_map_->is_access_check_needed()) {
-        return ACCESS_CHECK;
-      }
-      // Fall through.
-    case ACCESS_CHECK:
-      if (check_interceptor() && holder_map_->has_named_interceptor()) {
-        return INTERCEPTOR;
-      }
-      // Fall through.
-    case INTERCEPTOR:
-      if (holder_map_->is_dictionary_map()) {
-        property_encoding_ = DICTIONARY;
-      } else {
-        DescriptorArray* descriptors = holder_map_->instance_descriptors();
-        number_ = descriptors->SearchWithCache(*name_, *holder_map_);
-        if (number_ == DescriptorArray::kNotFound) return NOT_FOUND;
-        property_encoding_ = DESCRIPTOR;
-      }
-      return PROPERTY;
-    case PROPERTY:
-      return NOT_FOUND;
-    case JSPROXY:
-      UNREACHABLE();
-  }
-  UNREACHABLE();
-  return state_;
+  return Handle<JSObject>::cast(receiver_);
 }
 
 
@@ -109,92 +73,244 @@
 
 
 bool LookupIterator::HasAccess(v8::AccessType access_type) const {
-  ASSERT_EQ(ACCESS_CHECK, state_);
-  ASSERT(is_guaranteed_to_have_holder());
-  return isolate_->MayNamedAccess(GetHolder(), name_, access_type);
+  DCHECK_EQ(ACCESS_CHECK, state_);
+  return isolate_->MayNamedAccess(GetHolder<JSObject>(), name_, access_type);
 }
 
 
-bool LookupIterator::HasProperty() {
-  ASSERT_EQ(PROPERTY, state_);
-  ASSERT(is_guaranteed_to_have_holder());
+void LookupIterator::ReloadPropertyInformation() {
+  state_ = BEFORE_PROPERTY;
+  state_ = LookupInHolder(*holder_map_, *holder_);
+  DCHECK(IsFound() || holder_map_->is_dictionary_map());
+}
 
-  if (property_encoding_ == DICTIONARY) {
-    Handle<JSObject> holder = GetHolder();
-    number_ = holder->property_dictionary()->FindEntry(name_);
-    if (number_ == NameDictionary::kNotFound) return false;
 
-    property_details_ = GetHolder()->property_dictionary()->DetailsAt(number_);
-    // Holes in dictionary cells are absent values unless marked as read-only.
-    if (holder->IsGlobalObject() &&
-        (property_details_.IsDeleted() ||
-         (!property_details_.IsReadOnly() && FetchValue()->IsTheHole()))) {
-      return false;
+void LookupIterator::PrepareForDataProperty(Handle<Object> value) {
+  DCHECK(state_ == DATA || state_ == ACCESSOR);
+  DCHECK(HolderIsReceiverOrHiddenPrototype());
+  if (holder_map_->is_dictionary_map()) return;
+  holder_map_ =
+      Map::PrepareForDataProperty(holder_map_, descriptor_number(), value);
+  JSObject::MigrateToMap(GetHolder<JSObject>(), holder_map_);
+  ReloadPropertyInformation();
+}
+
+
+void LookupIterator::ReconfigureDataProperty(Handle<Object> value,
+                                             PropertyAttributes attributes) {
+  DCHECK(state_ == DATA || state_ == ACCESSOR);
+  DCHECK(HolderIsReceiverOrHiddenPrototype());
+  Handle<JSObject> holder = GetHolder<JSObject>();
+  if (holder_map_->is_dictionary_map()) {
+    PropertyDetails details(attributes, NORMAL, 0);
+    JSObject::SetNormalizedProperty(holder, name(), value, details);
+  } else {
+    holder_map_ = Map::ReconfigureDataProperty(holder_map_, descriptor_number(),
+                                               attributes);
+    JSObject::MigrateToMap(holder, holder_map_);
+  }
+
+  ReloadPropertyInformation();
+}
+
+
+void LookupIterator::PrepareTransitionToDataProperty(
+    Handle<Object> value, PropertyAttributes attributes,
+    Object::StoreFromKeyed store_mode) {
+  if (state_ == TRANSITION) return;
+  DCHECK(state_ != LookupIterator::ACCESSOR ||
+         GetAccessors()->IsDeclaredAccessorInfo());
+  DCHECK(state_ == NOT_FOUND || !HolderIsReceiverOrHiddenPrototype());
+
+  // Can only be called when the receiver is a JSObject. JSProxy has to be
+  // handled via a trap. Adding properties to primitive values is not
+  // observable.
+  Handle<JSObject> receiver = GetStoreTarget();
+
+  if (!name().is_identical_to(isolate()->factory()->hidden_string()) &&
+      !receiver->map()->is_extensible()) {
+    return;
+  }
+
+  transition_map_ = Map::TransitionToDataProperty(
+      handle(receiver->map(), isolate_), name_, value, attributes, store_mode);
+  state_ = TRANSITION;
+}
+
+
+void LookupIterator::ApplyTransitionToDataProperty() {
+  DCHECK_EQ(TRANSITION, state_);
+
+  Handle<JSObject> receiver = GetStoreTarget();
+  holder_ = receiver;
+  holder_map_ = transition_map_;
+  JSObject::MigrateToMap(receiver, holder_map_);
+  ReloadPropertyInformation();
+}
+
+
+void LookupIterator::TransitionToAccessorProperty(
+    AccessorComponent component, Handle<Object> accessor,
+    PropertyAttributes attributes) {
+  DCHECK(!accessor->IsNull());
+  // Can only be called when the receiver is a JSObject. JSProxy has to be
+  // handled via a trap. Adding properties to primitive values is not
+  // observable.
+  Handle<JSObject> receiver = GetStoreTarget();
+  holder_ = receiver;
+  holder_map_ =
+      Map::TransitionToAccessorProperty(handle(receiver->map(), isolate_),
+                                        name_, component, accessor, attributes);
+  JSObject::MigrateToMap(receiver, holder_map_);
+
+  ReloadPropertyInformation();
+
+  if (!holder_map_->is_dictionary_map()) return;
+
+  // We have to deoptimize since accesses to data properties may have been
+  // inlined without a corresponding map-check.
+  if (holder_map_->IsGlobalObjectMap()) {
+    Deoptimizer::DeoptimizeGlobalObject(*receiver);
+  }
+
+  // Install the accessor into the dictionary-mode object.
+  PropertyDetails details(attributes, CALLBACKS, 0);
+  Handle<AccessorPair> pair;
+  if (state() == ACCESSOR && GetAccessors()->IsAccessorPair()) {
+    pair = Handle<AccessorPair>::cast(GetAccessors());
+    // If the component and attributes are identical, nothing has to be done.
+    if (pair->get(component) == *accessor) {
+      if (property_details().attributes() == attributes) return;
+    } else {
+      pair = AccessorPair::Copy(pair);
+      pair->set(component, *accessor);
     }
   } else {
-    property_details_ = holder_map_->instance_descriptors()->GetDetails(
-        number_);
+    pair = isolate()->factory()->NewAccessorPair();
+    pair->set(component, *accessor);
   }
+  JSObject::SetNormalizedProperty(receiver, name_, pair, details);
 
-  switch (property_details_.type()) {
-    case v8::internal::FIELD:
-    case v8::internal::NORMAL:
-    case v8::internal::CONSTANT:
-      property_kind_ = DATA;
-      break;
-    case v8::internal::CALLBACKS:
-      property_kind_ = ACCESSOR;
-      break;
-    case v8::internal::HANDLER:
-    case v8::internal::NONEXISTENT:
-    case v8::internal::INTERCEPTOR:
-      UNREACHABLE();
+  JSObject::ReoptimizeIfPrototype(receiver);
+  holder_map_ = handle(receiver->map(), isolate_);
+  ReloadPropertyInformation();
+}
+
+
+bool LookupIterator::HolderIsReceiverOrHiddenPrototype() const {
+  DCHECK(has_property_ || state_ == INTERCEPTOR || state_ == JSPROXY);
+  // Optimization that only works if configuration_ is not mutable.
+  if (!check_prototype_chain()) return true;
+  DisallowHeapAllocation no_gc;
+  if (!receiver_->IsJSReceiver()) return false;
+  Object* current = *receiver_;
+  JSReceiver* holder = *holder_;
+  // JSProxy do not occur as hidden prototypes.
+  if (current->IsJSProxy()) {
+    return JSReceiver::cast(current) == holder;
   }
-
-  has_property_ = true;
-  return true;
+  PrototypeIterator iter(isolate(), current,
+                         PrototypeIterator::START_AT_RECEIVER);
+  do {
+    if (JSReceiver::cast(iter.GetCurrent()) == holder) return true;
+    DCHECK(!current->IsJSProxy());
+    iter.Advance();
+  } while (!iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN));
+  return false;
 }
 
 
 Handle<Object> LookupIterator::FetchValue() const {
   Object* result = NULL;
-  switch (property_encoding_) {
-    case DICTIONARY:
-      result = GetHolder()->property_dictionary()->ValueAt(number_);
-      if (GetHolder()->IsGlobalObject()) {
-        result = PropertyCell::cast(result)->value();
-      }
-      break;
-    case DESCRIPTOR:
-      if (property_details_.type() == v8::internal::FIELD) {
-        FieldIndex field_index = FieldIndex::ForDescriptor(
-            *holder_map_, number_);
-        return JSObject::FastPropertyAt(
-            GetHolder(), property_details_.representation(), field_index);
-      }
-      result = holder_map_->instance_descriptors()->GetValue(number_);
+  Handle<JSObject> holder = GetHolder<JSObject>();
+  if (holder_map_->is_dictionary_map()) {
+    result = holder->property_dictionary()->ValueAt(number_);
+    if (holder_map_->IsGlobalObjectMap()) {
+      result = PropertyCell::cast(result)->value();
+    }
+  } else if (property_details_.type() == v8::internal::FIELD) {
+    FieldIndex field_index = FieldIndex::ForDescriptor(*holder_map_, number_);
+    return JSObject::FastPropertyAt(holder, property_details_.representation(),
+                                    field_index);
+  } else {
+    result = holder_map_->instance_descriptors()->GetValue(number_);
   }
   return handle(result, isolate_);
 }
 
 
+int LookupIterator::GetConstantIndex() const {
+  DCHECK(has_property_);
+  DCHECK(!holder_map_->is_dictionary_map());
+  DCHECK_EQ(v8::internal::CONSTANT, property_details_.type());
+  return descriptor_number();
+}
+
+
+FieldIndex LookupIterator::GetFieldIndex() const {
+  DCHECK(has_property_);
+  DCHECK(!holder_map_->is_dictionary_map());
+  DCHECK_EQ(v8::internal::FIELD, property_details_.type());
+  int index =
+      holder_map_->instance_descriptors()->GetFieldIndex(descriptor_number());
+  bool is_double = representation().IsDouble();
+  return FieldIndex::ForPropertyIndex(*holder_map_, index, is_double);
+}
+
+
+Handle<HeapType> LookupIterator::GetFieldType() const {
+  DCHECK(has_property_);
+  DCHECK(!holder_map_->is_dictionary_map());
+  DCHECK_EQ(v8::internal::FIELD, property_details_.type());
+  return handle(
+      holder_map_->instance_descriptors()->GetFieldType(descriptor_number()),
+      isolate_);
+}
+
+
+Handle<PropertyCell> LookupIterator::GetPropertyCell() const {
+  Handle<JSObject> holder = GetHolder<JSObject>();
+  Handle<GlobalObject> global = Handle<GlobalObject>::cast(holder);
+  Object* value = global->property_dictionary()->ValueAt(dictionary_entry());
+  return Handle<PropertyCell>(PropertyCell::cast(value));
+}
+
+
 Handle<Object> LookupIterator::GetAccessors() const {
-  ASSERT(has_property_);
-  ASSERT_EQ(ACCESSOR, property_kind_);
+  DCHECK_EQ(ACCESSOR, state_);
   return FetchValue();
 }
 
 
 Handle<Object> LookupIterator::GetDataValue() const {
-  ASSERT(has_property_);
-  ASSERT_EQ(DATA, property_kind_);
+  DCHECK_EQ(DATA, state_);
   Handle<Object> value = FetchValue();
-  if (value->IsTheHole()) {
-    ASSERT(property_details_.IsReadOnly());
-    return factory()->undefined_value();
-  }
   return value;
 }
 
 
+void LookupIterator::WriteDataValue(Handle<Object> value) {
+  DCHECK_EQ(DATA, state_);
+  Handle<JSObject> holder = GetHolder<JSObject>();
+  if (holder_map_->is_dictionary_map()) {
+    NameDictionary* property_dictionary = holder->property_dictionary();
+    if (holder->IsGlobalObject()) {
+      Handle<PropertyCell> cell(
+          PropertyCell::cast(property_dictionary->ValueAt(dictionary_entry())));
+      PropertyCell::SetValueInferType(cell, value);
+    } else {
+      property_dictionary->ValueAtPut(dictionary_entry(), *value);
+    }
+  } else if (property_details_.type() == v8::internal::FIELD) {
+    holder->WriteToField(descriptor_number(), *value);
+  } else {
+    DCHECK_EQ(v8::internal::CONSTANT, property_details_.type());
+  }
+}
+
+
+void LookupIterator::InternalizeName() {
+  if (name_->IsUniqueName()) return;
+  name_ = factory()->InternalizeString(Handle<String>::cast(name_));
+}
 } }  // namespace v8::internal
diff --git a/src/lookup.h b/src/lookup.h
index 0ac9d35..14ca010 100644
--- a/src/lookup.h
+++ b/src/lookup.h
@@ -12,70 +12,61 @@
 namespace v8 {
 namespace internal {
 
-class LookupIterator V8_FINAL BASE_EMBEDDED {
+class LookupIterator FINAL BASE_EMBEDDED {
  public:
   enum Configuration {
-    CHECK_OWN_REAL     = 0,
-    CHECK_HIDDEN       = 1 << 0,
-    CHECK_DERIVED      = 1 << 1,
-    CHECK_INTERCEPTOR  = 1 << 2,
-    CHECK_ACCESS_CHECK = 1 << 3,
-    CHECK_ALL          = CHECK_HIDDEN | CHECK_DERIVED |
-                         CHECK_INTERCEPTOR | CHECK_ACCESS_CHECK,
-    SKIP_INTERCEPTOR   = CHECK_ALL ^ CHECK_INTERCEPTOR,
-    CHECK_OWN          = CHECK_ALL ^ CHECK_DERIVED
+    // Configuration bits.
+    kHidden = 1 << 0,
+    kInterceptor = 1 << 1,
+    kPrototypeChain = 1 << 2,
+
+    // Convience combinations of bits.
+    OWN_SKIP_INTERCEPTOR = 0,
+    OWN = kInterceptor,
+    HIDDEN_SKIP_INTERCEPTOR = kHidden,
+    HIDDEN = kHidden | kInterceptor,
+    PROTOTYPE_CHAIN_SKIP_INTERCEPTOR = kHidden | kPrototypeChain,
+    PROTOTYPE_CHAIN = kHidden | kPrototypeChain | kInterceptor
   };
 
   enum State {
-    NOT_FOUND,
-    PROPERTY,
-    INTERCEPTOR,
     ACCESS_CHECK,
-    JSPROXY
-  };
-
-  enum PropertyKind {
+    INTERCEPTOR,
+    JSPROXY,
+    NOT_FOUND,
+    ACCESSOR,
     DATA,
-    ACCESSOR
+    TRANSITION,
+    // Set state_ to BEFORE_PROPERTY to ensure that the next lookup will be a
+    // PROPERTY lookup.
+    BEFORE_PROPERTY = INTERCEPTOR
   };
 
-  enum PropertyEncoding {
-    DICTIONARY,
-    DESCRIPTOR
-  };
-
-  LookupIterator(Handle<Object> receiver,
-                 Handle<Name> name,
-                 Configuration configuration = CHECK_ALL)
-      : configuration_(configuration),
+  LookupIterator(Handle<Object> receiver, Handle<Name> name,
+                 Configuration configuration = PROTOTYPE_CHAIN)
+      : configuration_(ComputeConfiguration(configuration, name)),
         state_(NOT_FOUND),
-        property_kind_(DATA),
-        property_encoding_(DESCRIPTOR),
-        property_details_(NONE, NONEXISTENT, Representation::None()),
+        property_details_(NONE, NORMAL, Representation::None()),
         isolate_(name->GetIsolate()),
         name_(name),
-        maybe_receiver_(receiver),
+        receiver_(receiver),
         number_(DescriptorArray::kNotFound) {
-    Handle<JSReceiver> root = GetRoot();
-    holder_map_ = handle(root->map());
-    maybe_holder_ = root;
+    holder_ = GetRoot();
+    holder_map_ = handle(holder_->map(), isolate_);
     Next();
   }
 
-  LookupIterator(Handle<Object> receiver,
-                 Handle<Name> name,
+  LookupIterator(Handle<Object> receiver, Handle<Name> name,
                  Handle<JSReceiver> holder,
-                 Configuration configuration = CHECK_ALL)
-      : configuration_(configuration),
+                 Configuration configuration = PROTOTYPE_CHAIN)
+      : configuration_(ComputeConfiguration(configuration, name)),
         state_(NOT_FOUND),
-        property_kind_(DATA),
-        property_encoding_(DESCRIPTOR),
-        property_details_(NONE, NONEXISTENT, Representation::None()),
+        property_details_(NONE, NORMAL, Representation::None()),
         isolate_(name->GetIsolate()),
         name_(name),
-        holder_map_(holder->map()),
-        maybe_receiver_(receiver),
-        maybe_holder_(holder),
+        holder_map_(holder->map(), isolate_),
+        receiver_(receiver),
+        holder_(holder),
         number_(DescriptorArray::kNotFound) {
     Next();
   }
@@ -86,93 +77,118 @@
 
   bool IsFound() const { return state_ != NOT_FOUND; }
   void Next();
-
-  Heap* heap() const { return isolate_->heap(); }
-  Factory* factory() const { return isolate_->factory(); }
-  Handle<Object> GetReceiver() const {
-    return Handle<Object>::cast(maybe_receiver_.ToHandleChecked());
+  void NotFound() {
+    has_property_ = false;
+    state_ = NOT_FOUND;
   }
-  Handle<JSObject> GetHolder() const {
-    ASSERT(IsFound() && state_ != JSPROXY);
-    return Handle<JSObject>::cast(maybe_holder_.ToHandleChecked());
+
+  Factory* factory() const { return isolate_->factory(); }
+  Handle<Object> GetReceiver() const { return receiver_; }
+  Handle<JSObject> GetStoreTarget() const;
+  bool is_dictionary_holder() const { return holder_map_->is_dictionary_map(); }
+  Handle<Map> transition_map() const {
+    DCHECK_EQ(TRANSITION, state_);
+    return transition_map_;
+  }
+  template <class T>
+  Handle<T> GetHolder() const {
+    DCHECK(IsFound());
+    return Handle<T>::cast(holder_);
   }
   Handle<JSReceiver> GetRoot() const;
-
-  /* Dynamically reduce the trapped types. */
-  void skip_interceptor() {
-    configuration_ = static_cast<Configuration>(
-        configuration_ & ~CHECK_INTERCEPTOR);
-  }
-  void skip_access_check() {
-    configuration_ = static_cast<Configuration>(
-        configuration_ & ~CHECK_ACCESS_CHECK);
-  }
+  bool HolderIsReceiverOrHiddenPrototype() const;
 
   /* ACCESS_CHECK */
   bool HasAccess(v8::AccessType access_type) const;
 
   /* PROPERTY */
-  // HasProperty needs to be called before any of the other PROPERTY methods
-  // below can be used. It ensures that we are able to provide a definite
-  // answer, and loads extra information about the property.
-  bool HasProperty();
-  PropertyKind property_kind() const {
-    ASSERT(has_property_);
-    return property_kind_;
+  void PrepareForDataProperty(Handle<Object> value);
+  void PrepareTransitionToDataProperty(Handle<Object> value,
+                                       PropertyAttributes attributes,
+                                       Object::StoreFromKeyed store_mode);
+  bool IsCacheableTransition() {
+    bool cacheable =
+        state_ == TRANSITION && transition_map()->GetBackPointer()->IsMap();
+    if (cacheable) {
+      property_details_ = transition_map_->GetLastDescriptorDetails();
+      has_property_ = true;
+    }
+    return cacheable;
   }
+  void ApplyTransitionToDataProperty();
+  void ReconfigureDataProperty(Handle<Object> value,
+                               PropertyAttributes attributes);
+  void TransitionToAccessorProperty(AccessorComponent component,
+                                    Handle<Object> accessor,
+                                    PropertyAttributes attributes);
   PropertyDetails property_details() const {
-    ASSERT(has_property_);
+    DCHECK(has_property_);
     return property_details_;
   }
+  bool IsConfigurable() const { return property_details().IsConfigurable(); }
+  bool IsReadOnly() const { return property_details().IsReadOnly(); }
+  Representation representation() const {
+    return property_details().representation();
+  }
+  FieldIndex GetFieldIndex() const;
+  Handle<HeapType> GetFieldType() const;
+  int GetConstantIndex() const;
+  Handle<PropertyCell> GetPropertyCell() const;
   Handle<Object> GetAccessors() const;
   Handle<Object> GetDataValue() const;
+  void WriteDataValue(Handle<Object> value);
 
-  /* JSPROXY */
-
-  Handle<JSProxy> GetJSProxy() const {
-    return Handle<JSProxy>::cast(maybe_holder_.ToHandleChecked());
-  }
+  void InternalizeName();
 
  private:
   Handle<Map> GetReceiverMap() const;
 
-  MUST_USE_RESULT bool NextHolder();
-  State LookupInHolder();
+  MUST_USE_RESULT inline JSReceiver* NextHolder(Map* map);
+  inline State LookupInHolder(Map* map, JSReceiver* holder);
   Handle<Object> FetchValue() const;
+  void ReloadPropertyInformation();
 
   bool IsBootstrapping() const;
 
-  // Methods that fetch data from the holder ensure they always have a holder.
-  // This means the receiver needs to be present as opposed to just the receiver
-  // map. Other objects in the prototype chain are transitively guaranteed to be
-  // present via the receiver map.
-  bool is_guaranteed_to_have_holder() const {
-    return !maybe_receiver_.is_null();
-  }
+  bool check_hidden() const { return (configuration_ & kHidden) != 0; }
   bool check_interceptor() const {
-    return !IsBootstrapping() && (configuration_ & CHECK_INTERCEPTOR) != 0;
+    return !IsBootstrapping() && (configuration_ & kInterceptor) != 0;
   }
-  bool check_derived() const {
-    return (configuration_ & CHECK_DERIVED) != 0;
+  bool check_prototype_chain() const {
+    return (configuration_ & kPrototypeChain) != 0;
   }
-  bool check_hidden() const {
-    return (configuration_ & CHECK_HIDDEN) != 0;
+  int descriptor_number() const {
+    DCHECK(has_property_);
+    DCHECK(!holder_map_->is_dictionary_map());
+    return number_;
   }
-  bool check_access_check() const {
-    return (configuration_ & CHECK_ACCESS_CHECK) != 0;
+  int dictionary_entry() const {
+    DCHECK(has_property_);
+    DCHECK(holder_map_->is_dictionary_map());
+    return number_;
   }
 
+  static Configuration ComputeConfiguration(
+      Configuration configuration, Handle<Name> name) {
+    if (name->IsOwn()) {
+      return static_cast<Configuration>(configuration & HIDDEN);
+    } else {
+      return configuration;
+    }
+  }
+
+  // If configuration_ becomes mutable, update
+  // HolderIsReceiverOrHiddenPrototype.
   Configuration configuration_;
   State state_;
   bool has_property_;
-  PropertyKind property_kind_;
-  PropertyEncoding property_encoding_;
   PropertyDetails property_details_;
   Isolate* isolate_;
   Handle<Name> name_;
   Handle<Map> holder_map_;
-  MaybeHandle<Object> maybe_receiver_;
-  MaybeHandle<JSReceiver> maybe_holder_;
+  Handle<Map> transition_map_;
+  Handle<Object> receiver_;
+  Handle<JSReceiver> holder_;
 
   int number_;
 };
diff --git a/src/macro-assembler.h b/src/macro-assembler.h
index a11afd8..54cebca 100644
--- a/src/macro-assembler.h
+++ b/src/macro-assembler.h
@@ -41,41 +41,48 @@
 #include "src/assembler.h"
 #include "src/ia32/assembler-ia32.h"
 #include "src/ia32/assembler-ia32-inl.h"
-#include "src/code.h"  // must be after assembler_*.h
+#include "src/code.h"  // NOLINT, must be after assembler_*.h
 #include "src/ia32/macro-assembler-ia32.h"
 #elif V8_TARGET_ARCH_X64
 #include "src/assembler.h"
 #include "src/x64/assembler-x64.h"
 #include "src/x64/assembler-x64-inl.h"
-#include "src/code.h"  // must be after assembler_*.h
+#include "src/code.h"  // NOLINT, must be after assembler_*.h
 #include "src/x64/macro-assembler-x64.h"
 #elif V8_TARGET_ARCH_ARM64
 #include "src/arm64/constants-arm64.h"
 #include "src/assembler.h"
-#include "src/arm64/assembler-arm64.h"
+#include "src/arm64/assembler-arm64.h"  // NOLINT
 #include "src/arm64/assembler-arm64-inl.h"
-#include "src/code.h"  // must be after assembler_*.h
-#include "src/arm64/macro-assembler-arm64.h"
+#include "src/code.h"  // NOLINT, must be after assembler_*.h
+#include "src/arm64/macro-assembler-arm64.h"  // NOLINT
 #include "src/arm64/macro-assembler-arm64-inl.h"
 #elif V8_TARGET_ARCH_ARM
 #include "src/arm/constants-arm.h"
 #include "src/assembler.h"
-#include "src/arm/assembler-arm.h"
+#include "src/arm/assembler-arm.h"  // NOLINT
 #include "src/arm/assembler-arm-inl.h"
-#include "src/code.h"  // must be after assembler_*.h
-#include "src/arm/macro-assembler-arm.h"
+#include "src/code.h"                     // NOLINT, must be after assembler_*.h
+#include "src/arm/macro-assembler-arm.h"  // NOLINT
 #elif V8_TARGET_ARCH_MIPS
 #include "src/mips/constants-mips.h"
-#include "src/assembler.h"
-#include "src/mips/assembler-mips.h"
+#include "src/assembler.h"            // NOLINT
+#include "src/mips/assembler-mips.h"  // NOLINT
 #include "src/mips/assembler-mips-inl.h"
-#include "src/code.h"  // must be after assembler_*.h
+#include "src/code.h"  // NOLINT, must be after assembler_*.h
 #include "src/mips/macro-assembler-mips.h"
+#elif V8_TARGET_ARCH_MIPS64
+#include "src/mips64/constants-mips64.h"
+#include "src/assembler.h"                // NOLINT
+#include "src/mips64/assembler-mips64.h"  // NOLINT
+#include "src/mips64/assembler-mips64-inl.h"
+#include "src/code.h"  // NOLINT, must be after assembler_*.h
+#include "src/mips64/macro-assembler-mips64.h"
 #elif V8_TARGET_ARCH_X87
 #include "src/assembler.h"
 #include "src/x87/assembler-x87.h"
 #include "src/x87/assembler-x87-inl.h"
-#include "src/code.h"  // must be after assembler_*.h
+#include "src/code.h"  // NOLINT, must be after assembler_*.h
 #include "src/x87/macro-assembler-x87.h"
 #else
 #error Unsupported target architecture.
@@ -107,7 +114,7 @@
   // scope, the MacroAssembler is still marked as being in a frame scope, and
   // the code will be generated again when it goes out of scope.
   void GenerateLeaveFrame() {
-    ASSERT(type_ != StackFrame::MANUAL && type_ != StackFrame::NONE);
+    DCHECK(type_ != StackFrame::MANUAL && type_ != StackFrame::NONE);
     masm_->LeaveFrame(type_);
   }
 
diff --git a/src/macros.py b/src/macros.py
index 305a693..b3ff0fc 100644
--- a/src/macros.py
+++ b/src/macros.py
@@ -166,14 +166,16 @@
 macro TO_NUMBER_INLINE(arg) = (IS_NUMBER(%IS_VAR(arg)) ? arg : NonNumberToNumber(arg));
 macro TO_OBJECT_INLINE(arg) = (IS_SPEC_OBJECT(%IS_VAR(arg)) ? arg : ToObject(arg));
 macro JSON_NUMBER_TO_STRING(arg) = ((%_IsSmi(%IS_VAR(arg)) || arg - arg == 0) ? %_NumberToString(arg) : "null");
+macro HAS_OWN_PROPERTY(obj, index) = (%_CallFunction(obj, index, ObjectHasOwnProperty));
 
 # Private names.
 # GET_PRIVATE should only be used if the property is known to exists on obj
 # itself (it should really use %GetOwnProperty, but that would be way slower).
-macro GLOBAL_PRIVATE(name) = (%CreateGlobalPrivateSymbol(name));
-macro NEW_PRIVATE(name) = (%CreatePrivateSymbol(name));
+macro GLOBAL_PRIVATE(name) = (%CreateGlobalPrivateOwnSymbol(name));
+macro NEW_PRIVATE_OWN(name) = (%CreatePrivateOwnSymbol(name));
 macro IS_PRIVATE(sym) = (%SymbolIsPrivate(sym));
 macro HAS_PRIVATE(obj, sym) = (%HasOwnProperty(obj, sym));
+macro HAS_DEFINED_PRIVATE(obj, sym) = (!IS_UNDEFINED(obj[sym]));
 macro GET_PRIVATE(obj, sym) = (obj[sym]);
 macro SET_PRIVATE(obj, sym, val) = (obj[sym] = val);
 macro DELETE_PRIVATE(obj, sym) = (delete obj[sym]);
@@ -285,3 +287,6 @@
 const ITERATOR_KIND_KEYS = 1;
 const ITERATOR_KIND_VALUES = 2;
 const ITERATOR_KIND_ENTRIES = 3;
+
+# Check whether debug is active.
+const DEBUG_IS_ACTIVE = (%_DebugIsActive() != 0);
diff --git a/src/mark-compact-inl.h b/src/mark-compact-inl.h
deleted file mode 100644
index d1374c4..0000000
--- a/src/mark-compact-inl.h
+++ /dev/null
@@ -1,79 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_MARK_COMPACT_INL_H_
-#define V8_MARK_COMPACT_INL_H_
-
-#include <memory.h>
-
-#include "src/isolate.h"
-#include "src/mark-compact.h"
-
-
-namespace v8 {
-namespace internal {
-
-
-MarkBit Marking::MarkBitFrom(Address addr) {
-  MemoryChunk* p = MemoryChunk::FromAddress(addr);
-  return p->markbits()->MarkBitFromIndex(p->AddressToMarkbitIndex(addr),
-                                         p->ContainsOnlyData());
-}
-
-
-void MarkCompactCollector::SetFlags(int flags) {
-  sweep_precisely_ = ((flags & Heap::kSweepPreciselyMask) != 0);
-  reduce_memory_footprint_ = ((flags & Heap::kReduceMemoryFootprintMask) != 0);
-  abort_incremental_marking_ =
-      ((flags & Heap::kAbortIncrementalMarkingMask) != 0);
-}
-
-
-void MarkCompactCollector::MarkObject(HeapObject* obj, MarkBit mark_bit) {
-  ASSERT(Marking::MarkBitFrom(obj) == mark_bit);
-  if (!mark_bit.Get()) {
-    mark_bit.Set();
-    MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size());
-    ASSERT(IsMarked(obj));
-    ASSERT(obj->GetIsolate()->heap()->Contains(obj));
-    marking_deque_.PushBlack(obj);
-  }
-}
-
-
-void MarkCompactCollector::SetMark(HeapObject* obj, MarkBit mark_bit) {
-  ASSERT(!mark_bit.Get());
-  ASSERT(Marking::MarkBitFrom(obj) == mark_bit);
-  mark_bit.Set();
-  MemoryChunk::IncrementLiveBytesFromGC(obj->address(), obj->Size());
-}
-
-
-bool MarkCompactCollector::IsMarked(Object* obj) {
-  ASSERT(obj->IsHeapObject());
-  HeapObject* heap_object = HeapObject::cast(obj);
-  return Marking::MarkBitFrom(heap_object).Get();
-}
-
-
-void MarkCompactCollector::RecordSlot(Object** anchor_slot,
-                                      Object** slot,
-                                      Object* object,
-                                      SlotsBuffer::AdditionMode mode) {
-  Page* object_page = Page::FromAddress(reinterpret_cast<Address>(object));
-  if (object_page->IsEvacuationCandidate() &&
-      !ShouldSkipEvacuationSlotRecording(anchor_slot)) {
-    if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
-                            object_page->slots_buffer_address(),
-                            slot,
-                            mode)) {
-      EvictEvacuationCandidate(object_page);
-    }
-  }
-}
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_MARK_COMPACT_INL_H_
diff --git a/src/mark-compact.cc b/src/mark-compact.cc
deleted file mode 100644
index 30b9755..0000000
--- a/src/mark-compact.cc
+++ /dev/null
@@ -1,4469 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#include "src/base/atomicops.h"
-#include "src/code-stubs.h"
-#include "src/compilation-cache.h"
-#include "src/cpu-profiler.h"
-#include "src/deoptimizer.h"
-#include "src/execution.h"
-#include "src/gdb-jit.h"
-#include "src/global-handles.h"
-#include "src/heap-profiler.h"
-#include "src/ic-inl.h"
-#include "src/incremental-marking.h"
-#include "src/mark-compact.h"
-#include "src/objects-visiting.h"
-#include "src/objects-visiting-inl.h"
-#include "src/spaces-inl.h"
-#include "src/stub-cache.h"
-#include "src/sweeper-thread.h"
-
-namespace v8 {
-namespace internal {
-
-
-const char* Marking::kWhiteBitPattern = "00";
-const char* Marking::kBlackBitPattern = "10";
-const char* Marking::kGreyBitPattern = "11";
-const char* Marking::kImpossibleBitPattern = "01";
-
-
-// -------------------------------------------------------------------------
-// MarkCompactCollector
-
-MarkCompactCollector::MarkCompactCollector(Heap* heap) :  // NOLINT
-#ifdef DEBUG
-      state_(IDLE),
-#endif
-      sweep_precisely_(false),
-      reduce_memory_footprint_(false),
-      abort_incremental_marking_(false),
-      marking_parity_(ODD_MARKING_PARITY),
-      compacting_(false),
-      was_marked_incrementally_(false),
-      sweeping_pending_(false),
-      pending_sweeper_jobs_semaphore_(0),
-      sequential_sweeping_(false),
-      tracer_(NULL),
-      migration_slots_buffer_(NULL),
-      heap_(heap),
-      code_flusher_(NULL),
-      have_code_to_deoptimize_(false) { }
-
-#ifdef VERIFY_HEAP
-class VerifyMarkingVisitor: public ObjectVisitor {
- public:
-  explicit VerifyMarkingVisitor(Heap* heap) : heap_(heap) {}
-
-  void VisitPointers(Object** start, Object** end) {
-    for (Object** current = start; current < end; current++) {
-      if ((*current)->IsHeapObject()) {
-        HeapObject* object = HeapObject::cast(*current);
-        CHECK(heap_->mark_compact_collector()->IsMarked(object));
-      }
-    }
-  }
-
-  void VisitEmbeddedPointer(RelocInfo* rinfo) {
-    ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
-    if (!rinfo->host()->IsWeakObject(rinfo->target_object())) {
-      Object* p = rinfo->target_object();
-      VisitPointer(&p);
-    }
-  }
-
-  void VisitCell(RelocInfo* rinfo) {
-    Code* code = rinfo->host();
-    ASSERT(rinfo->rmode() == RelocInfo::CELL);
-    if (!code->IsWeakObject(rinfo->target_cell())) {
-      ObjectVisitor::VisitCell(rinfo);
-    }
-  }
-
- private:
-  Heap* heap_;
-};
-
-
-static void VerifyMarking(Heap* heap, Address bottom, Address top) {
-  VerifyMarkingVisitor visitor(heap);
-  HeapObject* object;
-  Address next_object_must_be_here_or_later = bottom;
-
-  for (Address current = bottom;
-       current < top;
-       current += kPointerSize) {
-    object = HeapObject::FromAddress(current);
-    if (MarkCompactCollector::IsMarked(object)) {
-      CHECK(current >= next_object_must_be_here_or_later);
-      object->Iterate(&visitor);
-      next_object_must_be_here_or_later = current + object->Size();
-    }
-  }
-}
-
-
-static void VerifyMarking(NewSpace* space) {
-  Address end = space->top();
-  NewSpacePageIterator it(space->bottom(), end);
-  // The bottom position is at the start of its page. Allows us to use
-  // page->area_start() as start of range on all pages.
-  CHECK_EQ(space->bottom(),
-            NewSpacePage::FromAddress(space->bottom())->area_start());
-  while (it.has_next()) {
-    NewSpacePage* page = it.next();
-    Address limit = it.has_next() ? page->area_end() : end;
-    CHECK(limit == end || !page->Contains(end));
-    VerifyMarking(space->heap(), page->area_start(), limit);
-  }
-}
-
-
-static void VerifyMarking(PagedSpace* space) {
-  PageIterator it(space);
-
-  while (it.has_next()) {
-    Page* p = it.next();
-    VerifyMarking(space->heap(), p->area_start(), p->area_end());
-  }
-}
-
-
-static void VerifyMarking(Heap* heap) {
-  VerifyMarking(heap->old_pointer_space());
-  VerifyMarking(heap->old_data_space());
-  VerifyMarking(heap->code_space());
-  VerifyMarking(heap->cell_space());
-  VerifyMarking(heap->property_cell_space());
-  VerifyMarking(heap->map_space());
-  VerifyMarking(heap->new_space());
-
-  VerifyMarkingVisitor visitor(heap);
-
-  LargeObjectIterator it(heap->lo_space());
-  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
-    if (MarkCompactCollector::IsMarked(obj)) {
-      obj->Iterate(&visitor);
-    }
-  }
-
-  heap->IterateStrongRoots(&visitor, VISIT_ONLY_STRONG);
-}
-
-
-class VerifyEvacuationVisitor: public ObjectVisitor {
- public:
-  void VisitPointers(Object** start, Object** end) {
-    for (Object** current = start; current < end; current++) {
-      if ((*current)->IsHeapObject()) {
-        HeapObject* object = HeapObject::cast(*current);
-        CHECK(!MarkCompactCollector::IsOnEvacuationCandidate(object));
-      }
-    }
-  }
-};
-
-
-static void VerifyEvacuation(Address bottom, Address top) {
-  VerifyEvacuationVisitor visitor;
-  HeapObject* object;
-  Address next_object_must_be_here_or_later = bottom;
-
-  for (Address current = bottom;
-       current < top;
-       current += kPointerSize) {
-    object = HeapObject::FromAddress(current);
-    if (MarkCompactCollector::IsMarked(object)) {
-      CHECK(current >= next_object_must_be_here_or_later);
-      object->Iterate(&visitor);
-      next_object_must_be_here_or_later = current + object->Size();
-    }
-  }
-}
-
-
-static void VerifyEvacuation(NewSpace* space) {
-  NewSpacePageIterator it(space->bottom(), space->top());
-  VerifyEvacuationVisitor visitor;
-
-  while (it.has_next()) {
-    NewSpacePage* page = it.next();
-    Address current = page->area_start();
-    Address limit = it.has_next() ? page->area_end() : space->top();
-    CHECK(limit == space->top() || !page->Contains(space->top()));
-    while (current < limit) {
-      HeapObject* object = HeapObject::FromAddress(current);
-      object->Iterate(&visitor);
-      current += object->Size();
-    }
-  }
-}
-
-
-static void VerifyEvacuation(PagedSpace* space) {
-  // TODO(hpayer): Bring back VerifyEvacuation for parallel-concurrently
-  // swept pages.
-  if ((FLAG_concurrent_sweeping || FLAG_parallel_sweeping) &&
-      space->was_swept_conservatively()) return;
-  PageIterator it(space);
-
-  while (it.has_next()) {
-    Page* p = it.next();
-    if (p->IsEvacuationCandidate()) continue;
-    VerifyEvacuation(p->area_start(), p->area_end());
-  }
-}
-
-
-static void VerifyEvacuation(Heap* heap) {
-  VerifyEvacuation(heap->old_pointer_space());
-  VerifyEvacuation(heap->old_data_space());
-  VerifyEvacuation(heap->code_space());
-  VerifyEvacuation(heap->cell_space());
-  VerifyEvacuation(heap->property_cell_space());
-  VerifyEvacuation(heap->map_space());
-  VerifyEvacuation(heap->new_space());
-
-  VerifyEvacuationVisitor visitor;
-  heap->IterateStrongRoots(&visitor, VISIT_ALL);
-}
-#endif  // VERIFY_HEAP
-
-
-#ifdef DEBUG
-class VerifyNativeContextSeparationVisitor: public ObjectVisitor {
- public:
-  VerifyNativeContextSeparationVisitor() : current_native_context_(NULL) {}
-
-  void VisitPointers(Object** start, Object** end) {
-    for (Object** current = start; current < end; current++) {
-      if ((*current)->IsHeapObject()) {
-        HeapObject* object = HeapObject::cast(*current);
-        if (object->IsString()) continue;
-        switch (object->map()->instance_type()) {
-          case JS_FUNCTION_TYPE:
-            CheckContext(JSFunction::cast(object)->context());
-            break;
-          case JS_GLOBAL_PROXY_TYPE:
-            CheckContext(JSGlobalProxy::cast(object)->native_context());
-            break;
-          case JS_GLOBAL_OBJECT_TYPE:
-          case JS_BUILTINS_OBJECT_TYPE:
-            CheckContext(GlobalObject::cast(object)->native_context());
-            break;
-          case JS_ARRAY_TYPE:
-          case JS_DATE_TYPE:
-          case JS_OBJECT_TYPE:
-          case JS_REGEXP_TYPE:
-            VisitPointer(HeapObject::RawField(object, JSObject::kMapOffset));
-            break;
-          case MAP_TYPE:
-            VisitPointer(HeapObject::RawField(object, Map::kPrototypeOffset));
-            VisitPointer(HeapObject::RawField(object, Map::kConstructorOffset));
-            break;
-          case FIXED_ARRAY_TYPE:
-            if (object->IsContext()) {
-              CheckContext(object);
-            } else {
-              FixedArray* array = FixedArray::cast(object);
-              int length = array->length();
-              // Set array length to zero to prevent cycles while iterating
-              // over array bodies, this is easier than intrusive marking.
-              array->set_length(0);
-              array->IterateBody(
-                  FIXED_ARRAY_TYPE, FixedArray::SizeFor(length), this);
-              array->set_length(length);
-            }
-            break;
-          case CELL_TYPE:
-          case JS_PROXY_TYPE:
-          case JS_VALUE_TYPE:
-          case TYPE_FEEDBACK_INFO_TYPE:
-            object->Iterate(this);
-            break;
-          case DECLARED_ACCESSOR_INFO_TYPE:
-          case EXECUTABLE_ACCESSOR_INFO_TYPE:
-          case BYTE_ARRAY_TYPE:
-          case CALL_HANDLER_INFO_TYPE:
-          case CODE_TYPE:
-          case FIXED_DOUBLE_ARRAY_TYPE:
-          case HEAP_NUMBER_TYPE:
-          case INTERCEPTOR_INFO_TYPE:
-          case ODDBALL_TYPE:
-          case SCRIPT_TYPE:
-          case SHARED_FUNCTION_INFO_TYPE:
-            break;
-          default:
-            UNREACHABLE();
-        }
-      }
-    }
-  }
-
- private:
-  void CheckContext(Object* context) {
-    if (!context->IsContext()) return;
-    Context* native_context = Context::cast(context)->native_context();
-    if (current_native_context_ == NULL) {
-      current_native_context_ = native_context;
-    } else {
-      CHECK_EQ(current_native_context_, native_context);
-    }
-  }
-
-  Context* current_native_context_;
-};
-
-
-static void VerifyNativeContextSeparation(Heap* heap) {
-  HeapObjectIterator it(heap->code_space());
-
-  for (Object* object = it.Next(); object != NULL; object = it.Next()) {
-    VerifyNativeContextSeparationVisitor visitor;
-    Code::cast(object)->CodeIterateBody(&visitor);
-  }
-}
-#endif
-
-
-void MarkCompactCollector::SetUp() {
-  free_list_old_data_space_.Reset(new FreeList(heap_->old_data_space()));
-  free_list_old_pointer_space_.Reset(new FreeList(heap_->old_pointer_space()));
-}
-
-
-void MarkCompactCollector::TearDown() {
-  AbortCompaction();
-}
-
-
-void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
-  p->MarkEvacuationCandidate();
-  evacuation_candidates_.Add(p);
-}
-
-
-static void TraceFragmentation(PagedSpace* space) {
-  int number_of_pages = space->CountTotalPages();
-  intptr_t reserved = (number_of_pages * space->AreaSize());
-  intptr_t free = reserved - space->SizeOfObjects();
-  PrintF("[%s]: %d pages, %d (%.1f%%) free\n",
-         AllocationSpaceName(space->identity()),
-         number_of_pages,
-         static_cast<int>(free),
-         static_cast<double>(free) * 100 / reserved);
-}
-
-
-bool MarkCompactCollector::StartCompaction(CompactionMode mode) {
-  if (!compacting_) {
-    ASSERT(evacuation_candidates_.length() == 0);
-
-#ifdef ENABLE_GDB_JIT_INTERFACE
-    // If GDBJIT interface is active disable compaction.
-    if (FLAG_gdbjit) return false;
-#endif
-
-    CollectEvacuationCandidates(heap()->old_pointer_space());
-    CollectEvacuationCandidates(heap()->old_data_space());
-
-    if (FLAG_compact_code_space &&
-        (mode == NON_INCREMENTAL_COMPACTION ||
-         FLAG_incremental_code_compaction)) {
-      CollectEvacuationCandidates(heap()->code_space());
-    } else if (FLAG_trace_fragmentation) {
-      TraceFragmentation(heap()->code_space());
-    }
-
-    if (FLAG_trace_fragmentation) {
-      TraceFragmentation(heap()->map_space());
-      TraceFragmentation(heap()->cell_space());
-      TraceFragmentation(heap()->property_cell_space());
-    }
-
-    heap()->old_pointer_space()->EvictEvacuationCandidatesFromFreeLists();
-    heap()->old_data_space()->EvictEvacuationCandidatesFromFreeLists();
-    heap()->code_space()->EvictEvacuationCandidatesFromFreeLists();
-
-    compacting_ = evacuation_candidates_.length() > 0;
-  }
-
-  return compacting_;
-}
-
-
-void MarkCompactCollector::CollectGarbage() {
-  // Make sure that Prepare() has been called. The individual steps below will
-  // update the state as they proceed.
-  ASSERT(state_ == PREPARE_GC);
-
-  MarkLiveObjects();
-  ASSERT(heap_->incremental_marking()->IsStopped());
-
-  if (FLAG_collect_maps) ClearNonLiveReferences();
-
-  ClearWeakCollections();
-
-#ifdef VERIFY_HEAP
-  if (FLAG_verify_heap) {
-    VerifyMarking(heap_);
-  }
-#endif
-
-  SweepSpaces();
-
-#ifdef DEBUG
-  if (FLAG_verify_native_context_separation) {
-    VerifyNativeContextSeparation(heap_);
-  }
-#endif
-
-#ifdef VERIFY_HEAP
-  if (heap()->weak_embedded_objects_verification_enabled()) {
-    VerifyWeakEmbeddedObjectsInCode();
-  }
-  if (FLAG_collect_maps && FLAG_omit_map_checks_for_leaf_maps) {
-    VerifyOmittedMapChecks();
-  }
-#endif
-
-  Finish();
-
-  if (marking_parity_ == EVEN_MARKING_PARITY) {
-    marking_parity_ = ODD_MARKING_PARITY;
-  } else {
-    ASSERT(marking_parity_ == ODD_MARKING_PARITY);
-    marking_parity_ = EVEN_MARKING_PARITY;
-  }
-
-  tracer_ = NULL;
-}
-
-
-#ifdef VERIFY_HEAP
-void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) {
-  PageIterator it(space);
-
-  while (it.has_next()) {
-    Page* p = it.next();
-    CHECK(p->markbits()->IsClean());
-    CHECK_EQ(0, p->LiveBytes());
-  }
-}
-
-
-void MarkCompactCollector::VerifyMarkbitsAreClean(NewSpace* space) {
-  NewSpacePageIterator it(space->bottom(), space->top());
-
-  while (it.has_next()) {
-    NewSpacePage* p = it.next();
-    CHECK(p->markbits()->IsClean());
-    CHECK_EQ(0, p->LiveBytes());
-  }
-}
-
-
-void MarkCompactCollector::VerifyMarkbitsAreClean() {
-  VerifyMarkbitsAreClean(heap_->old_pointer_space());
-  VerifyMarkbitsAreClean(heap_->old_data_space());
-  VerifyMarkbitsAreClean(heap_->code_space());
-  VerifyMarkbitsAreClean(heap_->cell_space());
-  VerifyMarkbitsAreClean(heap_->property_cell_space());
-  VerifyMarkbitsAreClean(heap_->map_space());
-  VerifyMarkbitsAreClean(heap_->new_space());
-
-  LargeObjectIterator it(heap_->lo_space());
-  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
-    MarkBit mark_bit = Marking::MarkBitFrom(obj);
-    CHECK(Marking::IsWhite(mark_bit));
-    CHECK_EQ(0, Page::FromAddress(obj->address())->LiveBytes());
-  }
-}
-
-
-void MarkCompactCollector::VerifyWeakEmbeddedObjectsInCode() {
-  HeapObjectIterator code_iterator(heap()->code_space());
-  for (HeapObject* obj = code_iterator.Next();
-       obj != NULL;
-       obj = code_iterator.Next()) {
-    Code* code = Code::cast(obj);
-    if (!code->is_optimized_code() && !code->is_weak_stub()) continue;
-    if (WillBeDeoptimized(code)) continue;
-    code->VerifyEmbeddedObjectsDependency();
-  }
-}
-
-
-void MarkCompactCollector::VerifyOmittedMapChecks() {
-  HeapObjectIterator iterator(heap()->map_space());
-  for (HeapObject* obj = iterator.Next();
-       obj != NULL;
-       obj = iterator.Next()) {
-    Map* map = Map::cast(obj);
-    map->VerifyOmittedMapChecks();
-  }
-}
-#endif  // VERIFY_HEAP
-
-
-static void ClearMarkbitsInPagedSpace(PagedSpace* space) {
-  PageIterator it(space);
-
-  while (it.has_next()) {
-    Bitmap::Clear(it.next());
-  }
-}
-
-
-static void ClearMarkbitsInNewSpace(NewSpace* space) {
-  NewSpacePageIterator it(space->ToSpaceStart(), space->ToSpaceEnd());
-
-  while (it.has_next()) {
-    Bitmap::Clear(it.next());
-  }
-}
-
-
-void MarkCompactCollector::ClearMarkbits() {
-  ClearMarkbitsInPagedSpace(heap_->code_space());
-  ClearMarkbitsInPagedSpace(heap_->map_space());
-  ClearMarkbitsInPagedSpace(heap_->old_pointer_space());
-  ClearMarkbitsInPagedSpace(heap_->old_data_space());
-  ClearMarkbitsInPagedSpace(heap_->cell_space());
-  ClearMarkbitsInPagedSpace(heap_->property_cell_space());
-  ClearMarkbitsInNewSpace(heap_->new_space());
-
-  LargeObjectIterator it(heap_->lo_space());
-  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
-    MarkBit mark_bit = Marking::MarkBitFrom(obj);
-    mark_bit.Clear();
-    mark_bit.Next().Clear();
-    Page::FromAddress(obj->address())->ResetProgressBar();
-    Page::FromAddress(obj->address())->ResetLiveBytes();
-  }
-}
-
-
-class MarkCompactCollector::SweeperTask : public v8::Task {
- public:
-  SweeperTask(Heap* heap, PagedSpace* space)
-    : heap_(heap), space_(space) {}
-
-  virtual ~SweeperTask() {}
-
- private:
-  // v8::Task overrides.
-  virtual void Run() V8_OVERRIDE {
-    heap_->mark_compact_collector()->SweepInParallel(space_);
-    heap_->mark_compact_collector()->pending_sweeper_jobs_semaphore_.Signal();
-  }
-
-  Heap* heap_;
-  PagedSpace* space_;
-
-  DISALLOW_COPY_AND_ASSIGN(SweeperTask);
-};
-
-
-void MarkCompactCollector::StartSweeperThreads() {
-  ASSERT(free_list_old_pointer_space_.get()->IsEmpty());
-  ASSERT(free_list_old_data_space_.get()->IsEmpty());
-  sweeping_pending_ = true;
-  for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
-    isolate()->sweeper_threads()[i]->StartSweeping();
-  }
-  if (FLAG_job_based_sweeping) {
-    V8::GetCurrentPlatform()->CallOnBackgroundThread(
-        new SweeperTask(heap(), heap()->old_data_space()),
-        v8::Platform::kShortRunningTask);
-    V8::GetCurrentPlatform()->CallOnBackgroundThread(
-        new SweeperTask(heap(), heap()->old_pointer_space()),
-        v8::Platform::kShortRunningTask);
-  }
-}
-
-
-void MarkCompactCollector::WaitUntilSweepingCompleted() {
-  ASSERT(sweeping_pending_ == true);
-  for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
-    isolate()->sweeper_threads()[i]->WaitForSweeperThread();
-  }
-  if (FLAG_job_based_sweeping) {
-    // Wait twice for both jobs.
-    pending_sweeper_jobs_semaphore_.Wait();
-    pending_sweeper_jobs_semaphore_.Wait();
-  }
-  ParallelSweepSpacesComplete();
-  sweeping_pending_ = false;
-  RefillFreeList(heap()->paged_space(OLD_DATA_SPACE));
-  RefillFreeList(heap()->paged_space(OLD_POINTER_SPACE));
-  heap()->paged_space(OLD_DATA_SPACE)->ResetUnsweptFreeBytes();
-  heap()->paged_space(OLD_POINTER_SPACE)->ResetUnsweptFreeBytes();
-}
-
-
-bool MarkCompactCollector::IsSweepingCompleted() {
-  for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
-    if (!isolate()->sweeper_threads()[i]->SweepingCompleted()) {
-      return false;
-    }
-  }
-  if (FLAG_job_based_sweeping) {
-    if (!pending_sweeper_jobs_semaphore_.WaitFor(TimeDelta::FromSeconds(0))) {
-      return false;
-    }
-    pending_sweeper_jobs_semaphore_.Signal();
-  }
-  return true;
-}
-
-
-void MarkCompactCollector::RefillFreeList(PagedSpace* space) {
-  FreeList* free_list;
-
-  if (space == heap()->old_pointer_space()) {
-    free_list = free_list_old_pointer_space_.get();
-  } else if (space == heap()->old_data_space()) {
-    free_list = free_list_old_data_space_.get();
-  } else {
-    // Any PagedSpace might invoke RefillFreeLists, so we need to make sure
-    // to only refill them for old data and pointer spaces.
-    return;
-  }
-
-  intptr_t freed_bytes = space->free_list()->Concatenate(free_list);
-  space->AddToAccountingStats(freed_bytes);
-  space->DecrementUnsweptFreeBytes(freed_bytes);
-}
-
-
-bool MarkCompactCollector::AreSweeperThreadsActivated() {
-  return isolate()->sweeper_threads() != NULL || FLAG_job_based_sweeping;
-}
-
-
-bool MarkCompactCollector::IsConcurrentSweepingInProgress() {
-  return sweeping_pending_;
-}
-
-
-void Marking::TransferMark(Address old_start, Address new_start) {
-  // This is only used when resizing an object.
-  ASSERT(MemoryChunk::FromAddress(old_start) ==
-         MemoryChunk::FromAddress(new_start));
-
-  if (!heap_->incremental_marking()->IsMarking()) return;
-
-  // If the mark doesn't move, we don't check the color of the object.
-  // It doesn't matter whether the object is black, since it hasn't changed
-  // size, so the adjustment to the live data count will be zero anyway.
-  if (old_start == new_start) return;
-
-  MarkBit new_mark_bit = MarkBitFrom(new_start);
-  MarkBit old_mark_bit = MarkBitFrom(old_start);
-
-#ifdef DEBUG
-  ObjectColor old_color = Color(old_mark_bit);
-#endif
-
-  if (Marking::IsBlack(old_mark_bit)) {
-    old_mark_bit.Clear();
-    ASSERT(IsWhite(old_mark_bit));
-    Marking::MarkBlack(new_mark_bit);
-    return;
-  } else if (Marking::IsGrey(old_mark_bit)) {
-    old_mark_bit.Clear();
-    old_mark_bit.Next().Clear();
-    ASSERT(IsWhite(old_mark_bit));
-    heap_->incremental_marking()->WhiteToGreyAndPush(
-        HeapObject::FromAddress(new_start), new_mark_bit);
-    heap_->incremental_marking()->RestartIfNotMarking();
-  }
-
-#ifdef DEBUG
-  ObjectColor new_color = Color(new_mark_bit);
-  ASSERT(new_color == old_color);
-#endif
-}
-
-
-const char* AllocationSpaceName(AllocationSpace space) {
-  switch (space) {
-    case NEW_SPACE: return "NEW_SPACE";
-    case OLD_POINTER_SPACE: return "OLD_POINTER_SPACE";
-    case OLD_DATA_SPACE: return "OLD_DATA_SPACE";
-    case CODE_SPACE: return "CODE_SPACE";
-    case MAP_SPACE: return "MAP_SPACE";
-    case CELL_SPACE: return "CELL_SPACE";
-    case PROPERTY_CELL_SPACE:
-      return "PROPERTY_CELL_SPACE";
-    case LO_SPACE: return "LO_SPACE";
-    default:
-      UNREACHABLE();
-  }
-
-  return NULL;
-}
-
-
-// Returns zero for pages that have so little fragmentation that it is not
-// worth defragmenting them.  Otherwise a positive integer that gives an
-// estimate of fragmentation on an arbitrary scale.
-static int FreeListFragmentation(PagedSpace* space, Page* p) {
-  // If page was not swept then there are no free list items on it.
-  if (!p->WasSwept()) {
-    if (FLAG_trace_fragmentation) {
-      PrintF("%p [%s]: %d bytes live (unswept)\n",
-             reinterpret_cast<void*>(p),
-             AllocationSpaceName(space->identity()),
-             p->LiveBytes());
-    }
-    return 0;
-  }
-
-  PagedSpace::SizeStats sizes;
-  space->ObtainFreeListStatistics(p, &sizes);
-
-  intptr_t ratio;
-  intptr_t ratio_threshold;
-  intptr_t area_size = space->AreaSize();
-  if (space->identity() == CODE_SPACE) {
-    ratio = (sizes.medium_size_ * 10 + sizes.large_size_ * 2) * 100 /
-        area_size;
-    ratio_threshold = 10;
-  } else {
-    ratio = (sizes.small_size_ * 5 + sizes.medium_size_) * 100 /
-        area_size;
-    ratio_threshold = 15;
-  }
-
-  if (FLAG_trace_fragmentation) {
-    PrintF("%p [%s]: %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %d (%.2f%%) %s\n",
-           reinterpret_cast<void*>(p),
-           AllocationSpaceName(space->identity()),
-           static_cast<int>(sizes.small_size_),
-           static_cast<double>(sizes.small_size_ * 100) /
-           area_size,
-           static_cast<int>(sizes.medium_size_),
-           static_cast<double>(sizes.medium_size_ * 100) /
-           area_size,
-           static_cast<int>(sizes.large_size_),
-           static_cast<double>(sizes.large_size_ * 100) /
-           area_size,
-           static_cast<int>(sizes.huge_size_),
-           static_cast<double>(sizes.huge_size_ * 100) /
-           area_size,
-           (ratio > ratio_threshold) ? "[fragmented]" : "");
-  }
-
-  if (FLAG_always_compact && sizes.Total() != area_size) {
-    return 1;
-  }
-
-  if (ratio <= ratio_threshold) return 0;  // Not fragmented.
-
-  return static_cast<int>(ratio - ratio_threshold);
-}
-
-
-void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
-  ASSERT(space->identity() == OLD_POINTER_SPACE ||
-         space->identity() == OLD_DATA_SPACE ||
-         space->identity() == CODE_SPACE);
-
-  static const int kMaxMaxEvacuationCandidates = 1000;
-  int number_of_pages = space->CountTotalPages();
-  int max_evacuation_candidates =
-      static_cast<int>(std::sqrt(number_of_pages / 2.0) + 1);
-
-  if (FLAG_stress_compaction || FLAG_always_compact) {
-    max_evacuation_candidates = kMaxMaxEvacuationCandidates;
-  }
-
-  class Candidate {
-   public:
-    Candidate() : fragmentation_(0), page_(NULL) { }
-    Candidate(int f, Page* p) : fragmentation_(f), page_(p) { }
-
-    int fragmentation() { return fragmentation_; }
-    Page* page() { return page_; }
-
-   private:
-    int fragmentation_;
-    Page* page_;
-  };
-
-  enum CompactionMode {
-    COMPACT_FREE_LISTS,
-    REDUCE_MEMORY_FOOTPRINT
-  };
-
-  CompactionMode mode = COMPACT_FREE_LISTS;
-
-  intptr_t reserved = number_of_pages * space->AreaSize();
-  intptr_t over_reserved = reserved - space->SizeOfObjects();
-  static const intptr_t kFreenessThreshold = 50;
-
-  if (reduce_memory_footprint_ && over_reserved >= space->AreaSize()) {
-    // If reduction of memory footprint was requested, we are aggressive
-    // about choosing pages to free.  We expect that half-empty pages
-    // are easier to compact so slightly bump the limit.
-    mode = REDUCE_MEMORY_FOOTPRINT;
-    max_evacuation_candidates += 2;
-  }
-
-
-  if (over_reserved > reserved / 3 && over_reserved >= 2 * space->AreaSize()) {
-    // If over-usage is very high (more than a third of the space), we
-    // try to free all mostly empty pages.  We expect that almost empty
-    // pages are even easier to compact so bump the limit even more.
-    mode = REDUCE_MEMORY_FOOTPRINT;
-    max_evacuation_candidates *= 2;
-  }
-
-  if (FLAG_trace_fragmentation && mode == REDUCE_MEMORY_FOOTPRINT) {
-    PrintF("Estimated over reserved memory: %.1f / %.1f MB (threshold %d), "
-           "evacuation candidate limit: %d\n",
-           static_cast<double>(over_reserved) / MB,
-           static_cast<double>(reserved) / MB,
-           static_cast<int>(kFreenessThreshold),
-           max_evacuation_candidates);
-  }
-
-  intptr_t estimated_release = 0;
-
-  Candidate candidates[kMaxMaxEvacuationCandidates];
-
-  max_evacuation_candidates =
-      Min(kMaxMaxEvacuationCandidates, max_evacuation_candidates);
-
-  int count = 0;
-  int fragmentation = 0;
-  Candidate* least = NULL;
-
-  PageIterator it(space);
-  if (it.has_next()) it.next();  // Never compact the first page.
-
-  while (it.has_next()) {
-    Page* p = it.next();
-    p->ClearEvacuationCandidate();
-
-    if (FLAG_stress_compaction) {
-      unsigned int counter = space->heap()->ms_count();
-      uintptr_t page_number = reinterpret_cast<uintptr_t>(p) >> kPageSizeBits;
-      if ((counter & 1) == (page_number & 1)) fragmentation = 1;
-    } else if (mode == REDUCE_MEMORY_FOOTPRINT) {
-      // Don't try to release too many pages.
-      if (estimated_release >= over_reserved) {
-        continue;
-      }
-
-      intptr_t free_bytes = 0;
-
-      if (!p->WasSwept()) {
-        free_bytes = (p->area_size() - p->LiveBytes());
-      } else {
-        PagedSpace::SizeStats sizes;
-        space->ObtainFreeListStatistics(p, &sizes);
-        free_bytes = sizes.Total();
-      }
-
-      int free_pct = static_cast<int>(free_bytes * 100) / p->area_size();
-
-      if (free_pct >= kFreenessThreshold) {
-        estimated_release += free_bytes;
-        fragmentation = free_pct;
-      } else {
-        fragmentation = 0;
-      }
-
-      if (FLAG_trace_fragmentation) {
-        PrintF("%p [%s]: %d (%.2f%%) free %s\n",
-               reinterpret_cast<void*>(p),
-               AllocationSpaceName(space->identity()),
-               static_cast<int>(free_bytes),
-               static_cast<double>(free_bytes * 100) / p->area_size(),
-               (fragmentation > 0) ? "[fragmented]" : "");
-      }
-    } else {
-      fragmentation = FreeListFragmentation(space, p);
-    }
-
-    if (fragmentation != 0) {
-      if (count < max_evacuation_candidates) {
-        candidates[count++] = Candidate(fragmentation, p);
-      } else {
-        if (least == NULL) {
-          for (int i = 0; i < max_evacuation_candidates; i++) {
-            if (least == NULL ||
-                candidates[i].fragmentation() < least->fragmentation()) {
-              least = candidates + i;
-            }
-          }
-        }
-        if (least->fragmentation() < fragmentation) {
-          *least = Candidate(fragmentation, p);
-          least = NULL;
-        }
-      }
-    }
-  }
-
-  for (int i = 0; i < count; i++) {
-    AddEvacuationCandidate(candidates[i].page());
-  }
-
-  if (count > 0 && FLAG_trace_fragmentation) {
-    PrintF("Collected %d evacuation candidates for space %s\n",
-           count,
-           AllocationSpaceName(space->identity()));
-  }
-}
-
-
-void MarkCompactCollector::AbortCompaction() {
-  if (compacting_) {
-    int npages = evacuation_candidates_.length();
-    for (int i = 0; i < npages; i++) {
-      Page* p = evacuation_candidates_[i];
-      slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
-      p->ClearEvacuationCandidate();
-      p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
-    }
-    compacting_ = false;
-    evacuation_candidates_.Rewind(0);
-    invalidated_code_.Rewind(0);
-  }
-  ASSERT_EQ(0, evacuation_candidates_.length());
-}
-
-
-void MarkCompactCollector::Prepare(GCTracer* tracer) {
-  was_marked_incrementally_ = heap()->incremental_marking()->IsMarking();
-
-  // Rather than passing the tracer around we stash it in a static member
-  // variable.
-  tracer_ = tracer;
-
-#ifdef DEBUG
-  ASSERT(state_ == IDLE);
-  state_ = PREPARE_GC;
-#endif
-
-  ASSERT(!FLAG_never_compact || !FLAG_always_compact);
-
-  if (IsConcurrentSweepingInProgress()) {
-    // Instead of waiting we could also abort the sweeper threads here.
-    WaitUntilSweepingCompleted();
-  }
-
-  // Clear marking bits if incremental marking is aborted.
-  if (was_marked_incrementally_ && abort_incremental_marking_) {
-    heap()->incremental_marking()->Abort();
-    ClearMarkbits();
-    AbortCompaction();
-    was_marked_incrementally_ = false;
-  }
-
-  // Don't start compaction if we are in the middle of incremental
-  // marking cycle. We did not collect any slots.
-  if (!FLAG_never_compact && !was_marked_incrementally_) {
-    StartCompaction(NON_INCREMENTAL_COMPACTION);
-  }
-
-  PagedSpaces spaces(heap());
-  for (PagedSpace* space = spaces.next();
-       space != NULL;
-       space = spaces.next()) {
-    space->PrepareForMarkCompact();
-  }
-
-#ifdef VERIFY_HEAP
-  if (!was_marked_incrementally_ && FLAG_verify_heap) {
-    VerifyMarkbitsAreClean();
-  }
-#endif
-}
-
-
-void MarkCompactCollector::Finish() {
-#ifdef DEBUG
-  ASSERT(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS);
-  state_ = IDLE;
-#endif
-  // The stub cache is not traversed during GC; clear the cache to
-  // force lazy re-initialization of it. This must be done after the
-  // GC, because it relies on the new address of certain old space
-  // objects (empty string, illegal builtin).
-  isolate()->stub_cache()->Clear();
-
-  if (have_code_to_deoptimize_) {
-    // Some code objects were marked for deoptimization during the GC.
-    Deoptimizer::DeoptimizeMarkedCode(isolate());
-    have_code_to_deoptimize_ = false;
-  }
-}
-
-
-// -------------------------------------------------------------------------
-// Phase 1: tracing and marking live objects.
-//   before: all objects are in normal state.
-//   after: a live object's map pointer is marked as '00'.
-
-// Marking all live objects in the heap as part of mark-sweep or mark-compact
-// collection.  Before marking, all objects are in their normal state.  After
-// marking, live objects' map pointers are marked indicating that the object
-// has been found reachable.
-//
-// The marking algorithm is a (mostly) depth-first (because of possible stack
-// overflow) traversal of the graph of objects reachable from the roots.  It
-// uses an explicit stack of pointers rather than recursion.  The young
-// generation's inactive ('from') space is used as a marking stack.  The
-// objects in the marking stack are the ones that have been reached and marked
-// but their children have not yet been visited.
-//
-// The marking stack can overflow during traversal.  In that case, we set an
-// overflow flag.  When the overflow flag is set, we continue marking objects
-// reachable from the objects on the marking stack, but no longer push them on
-// the marking stack.  Instead, we mark them as both marked and overflowed.
-// When the stack is in the overflowed state, objects marked as overflowed
-// have been reached and marked but their children have not been visited yet.
-// After emptying the marking stack, we clear the overflow flag and traverse
-// the heap looking for objects marked as overflowed, push them on the stack,
-// and continue with marking.  This process repeats until all reachable
-// objects have been marked.
-
-void CodeFlusher::ProcessJSFunctionCandidates() {
-  Code* lazy_compile =
-      isolate_->builtins()->builtin(Builtins::kCompileUnoptimized);
-  Object* undefined = isolate_->heap()->undefined_value();
-
-  JSFunction* candidate = jsfunction_candidates_head_;
-  JSFunction* next_candidate;
-  while (candidate != NULL) {
-    next_candidate = GetNextCandidate(candidate);
-    ClearNextCandidate(candidate, undefined);
-
-    SharedFunctionInfo* shared = candidate->shared();
-
-    Code* code = shared->code();
-    MarkBit code_mark = Marking::MarkBitFrom(code);
-    if (!code_mark.Get()) {
-      if (FLAG_trace_code_flushing && shared->is_compiled()) {
-        PrintF("[code-flushing clears: ");
-        shared->ShortPrint();
-        PrintF(" - age: %d]\n", code->GetAge());
-      }
-      shared->set_code(lazy_compile);
-      candidate->set_code(lazy_compile);
-    } else {
-      candidate->set_code(code);
-    }
-
-    // We are in the middle of a GC cycle so the write barrier in the code
-    // setter did not record the slot update and we have to do that manually.
-    Address slot = candidate->address() + JSFunction::kCodeEntryOffset;
-    Code* target = Code::cast(Code::GetObjectFromEntryAddress(slot));
-    isolate_->heap()->mark_compact_collector()->
-        RecordCodeEntrySlot(slot, target);
-
-    Object** shared_code_slot =
-        HeapObject::RawField(shared, SharedFunctionInfo::kCodeOffset);
-    isolate_->heap()->mark_compact_collector()->
-        RecordSlot(shared_code_slot, shared_code_slot, *shared_code_slot);
-
-    candidate = next_candidate;
-  }
-
-  jsfunction_candidates_head_ = NULL;
-}
-
-
-void CodeFlusher::ProcessSharedFunctionInfoCandidates() {
-  Code* lazy_compile =
-      isolate_->builtins()->builtin(Builtins::kCompileUnoptimized);
-
-  SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
-  SharedFunctionInfo* next_candidate;
-  while (candidate != NULL) {
-    next_candidate = GetNextCandidate(candidate);
-    ClearNextCandidate(candidate);
-
-    Code* code = candidate->code();
-    MarkBit code_mark = Marking::MarkBitFrom(code);
-    if (!code_mark.Get()) {
-      if (FLAG_trace_code_flushing && candidate->is_compiled()) {
-        PrintF("[code-flushing clears: ");
-        candidate->ShortPrint();
-        PrintF(" - age: %d]\n", code->GetAge());
-      }
-      candidate->set_code(lazy_compile);
-    }
-
-    Object** code_slot =
-        HeapObject::RawField(candidate, SharedFunctionInfo::kCodeOffset);
-    isolate_->heap()->mark_compact_collector()->
-        RecordSlot(code_slot, code_slot, *code_slot);
-
-    candidate = next_candidate;
-  }
-
-  shared_function_info_candidates_head_ = NULL;
-}
-
-
-void CodeFlusher::ProcessOptimizedCodeMaps() {
-  STATIC_ASSERT(SharedFunctionInfo::kEntryLength == 4);
-
-  SharedFunctionInfo* holder = optimized_code_map_holder_head_;
-  SharedFunctionInfo* next_holder;
-
-  while (holder != NULL) {
-    next_holder = GetNextCodeMap(holder);
-    ClearNextCodeMap(holder);
-
-    FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
-    int new_length = SharedFunctionInfo::kEntriesStart;
-    int old_length = code_map->length();
-    for (int i = SharedFunctionInfo::kEntriesStart;
-         i < old_length;
-         i += SharedFunctionInfo::kEntryLength) {
-      Code* code =
-          Code::cast(code_map->get(i + SharedFunctionInfo::kCachedCodeOffset));
-      if (!Marking::MarkBitFrom(code).Get()) continue;
-
-      // Move every slot in the entry.
-      for (int j = 0; j < SharedFunctionInfo::kEntryLength; j++) {
-        int dst_index = new_length++;
-        Object** slot = code_map->RawFieldOfElementAt(dst_index);
-        Object* object = code_map->get(i + j);
-        code_map->set(dst_index, object);
-        if (j == SharedFunctionInfo::kOsrAstIdOffset) {
-          ASSERT(object->IsSmi());
-        } else {
-          ASSERT(Marking::IsBlack(
-              Marking::MarkBitFrom(HeapObject::cast(*slot))));
-          isolate_->heap()->mark_compact_collector()->
-              RecordSlot(slot, slot, *slot);
-        }
-      }
-    }
-
-    // Trim the optimized code map if entries have been removed.
-    if (new_length < old_length) {
-      holder->TrimOptimizedCodeMap(old_length - new_length);
-    }
-
-    holder = next_holder;
-  }
-
-  optimized_code_map_holder_head_ = NULL;
-}
-
-
-void CodeFlusher::EvictCandidate(SharedFunctionInfo* shared_info) {
-  // Make sure previous flushing decisions are revisited.
-  isolate_->heap()->incremental_marking()->RecordWrites(shared_info);
-
-  if (FLAG_trace_code_flushing) {
-    PrintF("[code-flushing abandons function-info: ");
-    shared_info->ShortPrint();
-    PrintF("]\n");
-  }
-
-  SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
-  SharedFunctionInfo* next_candidate;
-  if (candidate == shared_info) {
-    next_candidate = GetNextCandidate(shared_info);
-    shared_function_info_candidates_head_ = next_candidate;
-    ClearNextCandidate(shared_info);
-  } else {
-    while (candidate != NULL) {
-      next_candidate = GetNextCandidate(candidate);
-
-      if (next_candidate == shared_info) {
-        next_candidate = GetNextCandidate(shared_info);
-        SetNextCandidate(candidate, next_candidate);
-        ClearNextCandidate(shared_info);
-        break;
-      }
-
-      candidate = next_candidate;
-    }
-  }
-}
-
-
-void CodeFlusher::EvictCandidate(JSFunction* function) {
-  ASSERT(!function->next_function_link()->IsUndefined());
-  Object* undefined = isolate_->heap()->undefined_value();
-
-  // Make sure previous flushing decisions are revisited.
-  isolate_->heap()->incremental_marking()->RecordWrites(function);
-  isolate_->heap()->incremental_marking()->RecordWrites(function->shared());
-
-  if (FLAG_trace_code_flushing) {
-    PrintF("[code-flushing abandons closure: ");
-    function->shared()->ShortPrint();
-    PrintF("]\n");
-  }
-
-  JSFunction* candidate = jsfunction_candidates_head_;
-  JSFunction* next_candidate;
-  if (candidate == function) {
-    next_candidate = GetNextCandidate(function);
-    jsfunction_candidates_head_ = next_candidate;
-    ClearNextCandidate(function, undefined);
-  } else {
-    while (candidate != NULL) {
-      next_candidate = GetNextCandidate(candidate);
-
-      if (next_candidate == function) {
-        next_candidate = GetNextCandidate(function);
-        SetNextCandidate(candidate, next_candidate);
-        ClearNextCandidate(function, undefined);
-        break;
-      }
-
-      candidate = next_candidate;
-    }
-  }
-}
-
-
-void CodeFlusher::EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder) {
-  ASSERT(!FixedArray::cast(code_map_holder->optimized_code_map())->
-         get(SharedFunctionInfo::kNextMapIndex)->IsUndefined());
-
-  // Make sure previous flushing decisions are revisited.
-  isolate_->heap()->incremental_marking()->RecordWrites(code_map_holder);
-
-  if (FLAG_trace_code_flushing) {
-    PrintF("[code-flushing abandons code-map: ");
-    code_map_holder->ShortPrint();
-    PrintF("]\n");
-  }
-
-  SharedFunctionInfo* holder = optimized_code_map_holder_head_;
-  SharedFunctionInfo* next_holder;
-  if (holder == code_map_holder) {
-    next_holder = GetNextCodeMap(code_map_holder);
-    optimized_code_map_holder_head_ = next_holder;
-    ClearNextCodeMap(code_map_holder);
-  } else {
-    while (holder != NULL) {
-      next_holder = GetNextCodeMap(holder);
-
-      if (next_holder == code_map_holder) {
-        next_holder = GetNextCodeMap(code_map_holder);
-        SetNextCodeMap(holder, next_holder);
-        ClearNextCodeMap(code_map_holder);
-        break;
-      }
-
-      holder = next_holder;
-    }
-  }
-}
-
-
-void CodeFlusher::EvictJSFunctionCandidates() {
-  JSFunction* candidate = jsfunction_candidates_head_;
-  JSFunction* next_candidate;
-  while (candidate != NULL) {
-    next_candidate = GetNextCandidate(candidate);
-    EvictCandidate(candidate);
-    candidate = next_candidate;
-  }
-  ASSERT(jsfunction_candidates_head_ == NULL);
-}
-
-
-void CodeFlusher::EvictSharedFunctionInfoCandidates() {
-  SharedFunctionInfo* candidate = shared_function_info_candidates_head_;
-  SharedFunctionInfo* next_candidate;
-  while (candidate != NULL) {
-    next_candidate = GetNextCandidate(candidate);
-    EvictCandidate(candidate);
-    candidate = next_candidate;
-  }
-  ASSERT(shared_function_info_candidates_head_ == NULL);
-}
-
-
-void CodeFlusher::EvictOptimizedCodeMaps() {
-  SharedFunctionInfo* holder = optimized_code_map_holder_head_;
-  SharedFunctionInfo* next_holder;
-  while (holder != NULL) {
-    next_holder = GetNextCodeMap(holder);
-    EvictOptimizedCodeMap(holder);
-    holder = next_holder;
-  }
-  ASSERT(optimized_code_map_holder_head_ == NULL);
-}
-
-
-void CodeFlusher::IteratePointersToFromSpace(ObjectVisitor* v) {
-  Heap* heap = isolate_->heap();
-
-  JSFunction** slot = &jsfunction_candidates_head_;
-  JSFunction* candidate = jsfunction_candidates_head_;
-  while (candidate != NULL) {
-    if (heap->InFromSpace(candidate)) {
-      v->VisitPointer(reinterpret_cast<Object**>(slot));
-    }
-    candidate = GetNextCandidate(*slot);
-    slot = GetNextCandidateSlot(*slot);
-  }
-}
-
-
-MarkCompactCollector::~MarkCompactCollector() {
-  if (code_flusher_ != NULL) {
-    delete code_flusher_;
-    code_flusher_ = NULL;
-  }
-}
-
-
-static inline HeapObject* ShortCircuitConsString(Object** p) {
-  // Optimization: If the heap object pointed to by p is a non-internalized
-  // cons string whose right substring is HEAP->empty_string, update
-  // it in place to its left substring.  Return the updated value.
-  //
-  // Here we assume that if we change *p, we replace it with a heap object
-  // (i.e., the left substring of a cons string is always a heap object).
-  //
-  // The check performed is:
-  //   object->IsConsString() && !object->IsInternalizedString() &&
-  //   (ConsString::cast(object)->second() == HEAP->empty_string())
-  // except the maps for the object and its possible substrings might be
-  // marked.
-  HeapObject* object = HeapObject::cast(*p);
-  if (!FLAG_clever_optimizations) return object;
-  Map* map = object->map();
-  InstanceType type = map->instance_type();
-  if ((type & kShortcutTypeMask) != kShortcutTypeTag) return object;
-
-  Object* second = reinterpret_cast<ConsString*>(object)->second();
-  Heap* heap = map->GetHeap();
-  if (second != heap->empty_string()) {
-    return object;
-  }
-
-  // Since we don't have the object's start, it is impossible to update the
-  // page dirty marks. Therefore, we only replace the string with its left
-  // substring when page dirty marks do not change.
-  Object* first = reinterpret_cast<ConsString*>(object)->first();
-  if (!heap->InNewSpace(object) && heap->InNewSpace(first)) return object;
-
-  *p = first;
-  return HeapObject::cast(first);
-}
-
-
-class MarkCompactMarkingVisitor
-    : public StaticMarkingVisitor<MarkCompactMarkingVisitor> {
- public:
-  static void ObjectStatsVisitBase(StaticVisitorBase::VisitorId id,
-                                   Map* map, HeapObject* obj);
-
-  static void ObjectStatsCountFixedArray(
-      FixedArrayBase* fixed_array,
-      FixedArraySubInstanceType fast_type,
-      FixedArraySubInstanceType dictionary_type);
-
-  template<MarkCompactMarkingVisitor::VisitorId id>
-  class ObjectStatsTracker {
-   public:
-    static inline void Visit(Map* map, HeapObject* obj);
-  };
-
-  static void Initialize();
-
-  INLINE(static void VisitPointer(Heap* heap, Object** p)) {
-    MarkObjectByPointer(heap->mark_compact_collector(), p, p);
-  }
-
-  INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
-    // Mark all objects pointed to in [start, end).
-    const int kMinRangeForMarkingRecursion = 64;
-    if (end - start >= kMinRangeForMarkingRecursion) {
-      if (VisitUnmarkedObjects(heap, start, end)) return;
-      // We are close to a stack overflow, so just mark the objects.
-    }
-    MarkCompactCollector* collector = heap->mark_compact_collector();
-    for (Object** p = start; p < end; p++) {
-      MarkObjectByPointer(collector, start, p);
-    }
-  }
-
-  // Marks the object black and pushes it on the marking stack.
-  INLINE(static void MarkObject(Heap* heap, HeapObject* object)) {
-    MarkBit mark = Marking::MarkBitFrom(object);
-    heap->mark_compact_collector()->MarkObject(object, mark);
-  }
-
-  // Marks the object black without pushing it on the marking stack.
-  // Returns true if object needed marking and false otherwise.
-  INLINE(static bool MarkObjectWithoutPush(Heap* heap, HeapObject* object)) {
-    MarkBit mark_bit = Marking::MarkBitFrom(object);
-    if (!mark_bit.Get()) {
-      heap->mark_compact_collector()->SetMark(object, mark_bit);
-      return true;
-    }
-    return false;
-  }
-
-  // Mark object pointed to by p.
-  INLINE(static void MarkObjectByPointer(MarkCompactCollector* collector,
-                                         Object** anchor_slot,
-                                         Object** p)) {
-    if (!(*p)->IsHeapObject()) return;
-    HeapObject* object = ShortCircuitConsString(p);
-    collector->RecordSlot(anchor_slot, p, object);
-    MarkBit mark = Marking::MarkBitFrom(object);
-    collector->MarkObject(object, mark);
-  }
-
-
-  // Visit an unmarked object.
-  INLINE(static void VisitUnmarkedObject(MarkCompactCollector* collector,
-                                         HeapObject* obj)) {
-#ifdef DEBUG
-    ASSERT(collector->heap()->Contains(obj));
-    ASSERT(!collector->heap()->mark_compact_collector()->IsMarked(obj));
-#endif
-    Map* map = obj->map();
-    Heap* heap = obj->GetHeap();
-    MarkBit mark = Marking::MarkBitFrom(obj);
-    heap->mark_compact_collector()->SetMark(obj, mark);
-    // Mark the map pointer and the body.
-    MarkBit map_mark = Marking::MarkBitFrom(map);
-    heap->mark_compact_collector()->MarkObject(map, map_mark);
-    IterateBody(map, obj);
-  }
-
-  // Visit all unmarked objects pointed to by [start, end).
-  // Returns false if the operation fails (lack of stack space).
-  INLINE(static bool VisitUnmarkedObjects(Heap* heap,
-                                          Object** start,
-                                          Object** end)) {
-    // Return false is we are close to the stack limit.
-    StackLimitCheck check(heap->isolate());
-    if (check.HasOverflowed()) return false;
-
-    MarkCompactCollector* collector = heap->mark_compact_collector();
-    // Visit the unmarked objects.
-    for (Object** p = start; p < end; p++) {
-      Object* o = *p;
-      if (!o->IsHeapObject()) continue;
-      collector->RecordSlot(start, p, o);
-      HeapObject* obj = HeapObject::cast(o);
-      MarkBit mark = Marking::MarkBitFrom(obj);
-      if (mark.Get()) continue;
-      VisitUnmarkedObject(collector, obj);
-    }
-    return true;
-  }
-
- private:
-  template<int id>
-  static inline void TrackObjectStatsAndVisit(Map* map, HeapObject* obj);
-
-  // Code flushing support.
-
-  static const int kRegExpCodeThreshold = 5;
-
-  static void UpdateRegExpCodeAgeAndFlush(Heap* heap,
-                                          JSRegExp* re,
-                                          bool is_ascii) {
-    // Make sure that the fixed array is in fact initialized on the RegExp.
-    // We could potentially trigger a GC when initializing the RegExp.
-    if (HeapObject::cast(re->data())->map()->instance_type() !=
-            FIXED_ARRAY_TYPE) return;
-
-    // Make sure this is a RegExp that actually contains code.
-    if (re->TypeTag() != JSRegExp::IRREGEXP) return;
-
-    Object* code = re->DataAt(JSRegExp::code_index(is_ascii));
-    if (!code->IsSmi() &&
-        HeapObject::cast(code)->map()->instance_type() == CODE_TYPE) {
-      // Save a copy that can be reinstated if we need the code again.
-      re->SetDataAt(JSRegExp::saved_code_index(is_ascii), code);
-
-      // Saving a copy might create a pointer into compaction candidate
-      // that was not observed by marker.  This might happen if JSRegExp data
-      // was marked through the compilation cache before marker reached JSRegExp
-      // object.
-      FixedArray* data = FixedArray::cast(re->data());
-      Object** slot = data->data_start() + JSRegExp::saved_code_index(is_ascii);
-      heap->mark_compact_collector()->
-          RecordSlot(slot, slot, code);
-
-      // Set a number in the 0-255 range to guarantee no smi overflow.
-      re->SetDataAt(JSRegExp::code_index(is_ascii),
-                    Smi::FromInt(heap->sweep_generation() & 0xff));
-    } else if (code->IsSmi()) {
-      int value = Smi::cast(code)->value();
-      // The regexp has not been compiled yet or there was a compilation error.
-      if (value == JSRegExp::kUninitializedValue ||
-          value == JSRegExp::kCompilationErrorValue) {
-        return;
-      }
-
-      // Check if we should flush now.
-      if (value == ((heap->sweep_generation() - kRegExpCodeThreshold) & 0xff)) {
-        re->SetDataAt(JSRegExp::code_index(is_ascii),
-                      Smi::FromInt(JSRegExp::kUninitializedValue));
-        re->SetDataAt(JSRegExp::saved_code_index(is_ascii),
-                      Smi::FromInt(JSRegExp::kUninitializedValue));
-      }
-    }
-  }
-
-
-  // Works by setting the current sweep_generation (as a smi) in the
-  // code object place in the data array of the RegExp and keeps a copy
-  // around that can be reinstated if we reuse the RegExp before flushing.
-  // If we did not use the code for kRegExpCodeThreshold mark sweep GCs
-  // we flush the code.
-  static void VisitRegExpAndFlushCode(Map* map, HeapObject* object) {
-    Heap* heap = map->GetHeap();
-    MarkCompactCollector* collector = heap->mark_compact_collector();
-    if (!collector->is_code_flushing_enabled()) {
-      VisitJSRegExp(map, object);
-      return;
-    }
-    JSRegExp* re = reinterpret_cast<JSRegExp*>(object);
-    // Flush code or set age on both ASCII and two byte code.
-    UpdateRegExpCodeAgeAndFlush(heap, re, true);
-    UpdateRegExpCodeAgeAndFlush(heap, re, false);
-    // Visit the fields of the RegExp, including the updated FixedArray.
-    VisitJSRegExp(map, object);
-  }
-
-  static VisitorDispatchTable<Callback> non_count_table_;
-};
-
-
-void MarkCompactMarkingVisitor::ObjectStatsCountFixedArray(
-    FixedArrayBase* fixed_array,
-    FixedArraySubInstanceType fast_type,
-    FixedArraySubInstanceType dictionary_type) {
-  Heap* heap = fixed_array->map()->GetHeap();
-  if (fixed_array->map() != heap->fixed_cow_array_map() &&
-      fixed_array->map() != heap->fixed_double_array_map() &&
-      fixed_array != heap->empty_fixed_array()) {
-    if (fixed_array->IsDictionary()) {
-      heap->RecordFixedArraySubTypeStats(dictionary_type,
-                                         fixed_array->Size());
-    } else {
-      heap->RecordFixedArraySubTypeStats(fast_type,
-                                         fixed_array->Size());
-    }
-  }
-}
-
-
-void MarkCompactMarkingVisitor::ObjectStatsVisitBase(
-    MarkCompactMarkingVisitor::VisitorId id, Map* map, HeapObject* obj) {
-  Heap* heap = map->GetHeap();
-  int object_size = obj->Size();
-  heap->RecordObjectStats(map->instance_type(), object_size);
-  non_count_table_.GetVisitorById(id)(map, obj);
-  if (obj->IsJSObject()) {
-    JSObject* object = JSObject::cast(obj);
-    ObjectStatsCountFixedArray(object->elements(),
-                               DICTIONARY_ELEMENTS_SUB_TYPE,
-                               FAST_ELEMENTS_SUB_TYPE);
-    ObjectStatsCountFixedArray(object->properties(),
-                               DICTIONARY_PROPERTIES_SUB_TYPE,
-                               FAST_PROPERTIES_SUB_TYPE);
-  }
-}
-
-
-template<MarkCompactMarkingVisitor::VisitorId id>
-void MarkCompactMarkingVisitor::ObjectStatsTracker<id>::Visit(
-    Map* map, HeapObject* obj) {
-  ObjectStatsVisitBase(id, map, obj);
-}
-
-
-template<>
-class MarkCompactMarkingVisitor::ObjectStatsTracker<
-    MarkCompactMarkingVisitor::kVisitMap> {
- public:
-  static inline void Visit(Map* map, HeapObject* obj) {
-    Heap* heap = map->GetHeap();
-    Map* map_obj = Map::cast(obj);
-    ASSERT(map->instance_type() == MAP_TYPE);
-    DescriptorArray* array = map_obj->instance_descriptors();
-    if (map_obj->owns_descriptors() &&
-        array != heap->empty_descriptor_array()) {
-      int fixed_array_size = array->Size();
-      heap->RecordFixedArraySubTypeStats(DESCRIPTOR_ARRAY_SUB_TYPE,
-                                         fixed_array_size);
-    }
-    if (map_obj->HasTransitionArray()) {
-      int fixed_array_size = map_obj->transitions()->Size();
-      heap->RecordFixedArraySubTypeStats(TRANSITION_ARRAY_SUB_TYPE,
-                                         fixed_array_size);
-    }
-    if (map_obj->has_code_cache()) {
-      CodeCache* cache = CodeCache::cast(map_obj->code_cache());
-      heap->RecordFixedArraySubTypeStats(MAP_CODE_CACHE_SUB_TYPE,
-                                         cache->default_cache()->Size());
-      if (!cache->normal_type_cache()->IsUndefined()) {
-        heap->RecordFixedArraySubTypeStats(
-            MAP_CODE_CACHE_SUB_TYPE,
-            FixedArray::cast(cache->normal_type_cache())->Size());
-      }
-    }
-    ObjectStatsVisitBase(kVisitMap, map, obj);
-  }
-};
-
-
-template<>
-class MarkCompactMarkingVisitor::ObjectStatsTracker<
-    MarkCompactMarkingVisitor::kVisitCode> {
- public:
-  static inline void Visit(Map* map, HeapObject* obj) {
-    Heap* heap = map->GetHeap();
-    int object_size = obj->Size();
-    ASSERT(map->instance_type() == CODE_TYPE);
-    Code* code_obj = Code::cast(obj);
-    heap->RecordCodeSubTypeStats(code_obj->kind(), code_obj->GetRawAge(),
-                                 object_size);
-    ObjectStatsVisitBase(kVisitCode, map, obj);
-  }
-};
-
-
-template<>
-class MarkCompactMarkingVisitor::ObjectStatsTracker<
-    MarkCompactMarkingVisitor::kVisitSharedFunctionInfo> {
- public:
-  static inline void Visit(Map* map, HeapObject* obj) {
-    Heap* heap = map->GetHeap();
-    SharedFunctionInfo* sfi = SharedFunctionInfo::cast(obj);
-    if (sfi->scope_info() != heap->empty_fixed_array()) {
-      heap->RecordFixedArraySubTypeStats(
-          SCOPE_INFO_SUB_TYPE,
-          FixedArray::cast(sfi->scope_info())->Size());
-    }
-    ObjectStatsVisitBase(kVisitSharedFunctionInfo, map, obj);
-  }
-};
-
-
-template<>
-class MarkCompactMarkingVisitor::ObjectStatsTracker<
-    MarkCompactMarkingVisitor::kVisitFixedArray> {
- public:
-  static inline void Visit(Map* map, HeapObject* obj) {
-    Heap* heap = map->GetHeap();
-    FixedArray* fixed_array = FixedArray::cast(obj);
-    if (fixed_array == heap->string_table()) {
-      heap->RecordFixedArraySubTypeStats(
-          STRING_TABLE_SUB_TYPE,
-          fixed_array->Size());
-    }
-    ObjectStatsVisitBase(kVisitFixedArray, map, obj);
-  }
-};
-
-
-void MarkCompactMarkingVisitor::Initialize() {
-  StaticMarkingVisitor<MarkCompactMarkingVisitor>::Initialize();
-
-  table_.Register(kVisitJSRegExp,
-                  &VisitRegExpAndFlushCode);
-
-  if (FLAG_track_gc_object_stats) {
-    // Copy the visitor table to make call-through possible.
-    non_count_table_.CopyFrom(&table_);
-#define VISITOR_ID_COUNT_FUNCTION(id)                                   \
-    table_.Register(kVisit##id, ObjectStatsTracker<kVisit##id>::Visit);
-    VISITOR_ID_LIST(VISITOR_ID_COUNT_FUNCTION)
-#undef VISITOR_ID_COUNT_FUNCTION
-  }
-}
-
-
-VisitorDispatchTable<MarkCompactMarkingVisitor::Callback>
-    MarkCompactMarkingVisitor::non_count_table_;
-
-
-class CodeMarkingVisitor : public ThreadVisitor {
- public:
-  explicit CodeMarkingVisitor(MarkCompactCollector* collector)
-      : collector_(collector) {}
-
-  void VisitThread(Isolate* isolate, ThreadLocalTop* top) {
-    collector_->PrepareThreadForCodeFlushing(isolate, top);
-  }
-
- private:
-  MarkCompactCollector* collector_;
-};
-
-
-class SharedFunctionInfoMarkingVisitor : public ObjectVisitor {
- public:
-  explicit SharedFunctionInfoMarkingVisitor(MarkCompactCollector* collector)
-      : collector_(collector) {}
-
-  void VisitPointers(Object** start, Object** end) {
-    for (Object** p = start; p < end; p++) VisitPointer(p);
-  }
-
-  void VisitPointer(Object** slot) {
-    Object* obj = *slot;
-    if (obj->IsSharedFunctionInfo()) {
-      SharedFunctionInfo* shared = reinterpret_cast<SharedFunctionInfo*>(obj);
-      MarkBit shared_mark = Marking::MarkBitFrom(shared);
-      MarkBit code_mark = Marking::MarkBitFrom(shared->code());
-      collector_->MarkObject(shared->code(), code_mark);
-      collector_->MarkObject(shared, shared_mark);
-    }
-  }
-
- private:
-  MarkCompactCollector* collector_;
-};
-
-
-void MarkCompactCollector::PrepareThreadForCodeFlushing(Isolate* isolate,
-                                                        ThreadLocalTop* top) {
-  for (StackFrameIterator it(isolate, top); !it.done(); it.Advance()) {
-    // Note: for the frame that has a pending lazy deoptimization
-    // StackFrame::unchecked_code will return a non-optimized code object for
-    // the outermost function and StackFrame::LookupCode will return
-    // actual optimized code object.
-    StackFrame* frame = it.frame();
-    Code* code = frame->unchecked_code();
-    MarkBit code_mark = Marking::MarkBitFrom(code);
-    MarkObject(code, code_mark);
-    if (frame->is_optimized()) {
-      MarkCompactMarkingVisitor::MarkInlinedFunctionsCode(heap(),
-                                                          frame->LookupCode());
-    }
-  }
-}
-
-
-void MarkCompactCollector::PrepareForCodeFlushing() {
-  // Enable code flushing for non-incremental cycles.
-  if (FLAG_flush_code && !FLAG_flush_code_incrementally) {
-    EnableCodeFlushing(!was_marked_incrementally_);
-  }
-
-  // If code flushing is disabled, there is no need to prepare for it.
-  if (!is_code_flushing_enabled()) return;
-
-  // Ensure that empty descriptor array is marked. Method MarkDescriptorArray
-  // relies on it being marked before any other descriptor array.
-  HeapObject* descriptor_array = heap()->empty_descriptor_array();
-  MarkBit descriptor_array_mark = Marking::MarkBitFrom(descriptor_array);
-  MarkObject(descriptor_array, descriptor_array_mark);
-
-  // Make sure we are not referencing the code from the stack.
-  ASSERT(this == heap()->mark_compact_collector());
-  PrepareThreadForCodeFlushing(heap()->isolate(),
-                               heap()->isolate()->thread_local_top());
-
-  // Iterate the archived stacks in all threads to check if
-  // the code is referenced.
-  CodeMarkingVisitor code_marking_visitor(this);
-  heap()->isolate()->thread_manager()->IterateArchivedThreads(
-      &code_marking_visitor);
-
-  SharedFunctionInfoMarkingVisitor visitor(this);
-  heap()->isolate()->compilation_cache()->IterateFunctions(&visitor);
-  heap()->isolate()->handle_scope_implementer()->Iterate(&visitor);
-
-  ProcessMarkingDeque();
-}
-
-
-// Visitor class for marking heap roots.
-class RootMarkingVisitor : public ObjectVisitor {
- public:
-  explicit RootMarkingVisitor(Heap* heap)
-    : collector_(heap->mark_compact_collector()) { }
-
-  void VisitPointer(Object** p) {
-    MarkObjectByPointer(p);
-  }
-
-  void VisitPointers(Object** start, Object** end) {
-    for (Object** p = start; p < end; p++) MarkObjectByPointer(p);
-  }
-
-  // Skip the weak next code link in a code object, which is visited in
-  // ProcessTopOptimizedFrame.
-  void VisitNextCodeLink(Object** p) { }
-
- private:
-  void MarkObjectByPointer(Object** p) {
-    if (!(*p)->IsHeapObject()) return;
-
-    // Replace flat cons strings in place.
-    HeapObject* object = ShortCircuitConsString(p);
-    MarkBit mark_bit = Marking::MarkBitFrom(object);
-    if (mark_bit.Get()) return;
-
-    Map* map = object->map();
-    // Mark the object.
-    collector_->SetMark(object, mark_bit);
-
-    // Mark the map pointer and body, and push them on the marking stack.
-    MarkBit map_mark = Marking::MarkBitFrom(map);
-    collector_->MarkObject(map, map_mark);
-    MarkCompactMarkingVisitor::IterateBody(map, object);
-
-    // Mark all the objects reachable from the map and body.  May leave
-    // overflowed objects in the heap.
-    collector_->EmptyMarkingDeque();
-  }
-
-  MarkCompactCollector* collector_;
-};
-
-
-// Helper class for pruning the string table.
-template<bool finalize_external_strings>
-class StringTableCleaner : public ObjectVisitor {
- public:
-  explicit StringTableCleaner(Heap* heap)
-    : heap_(heap), pointers_removed_(0) { }
-
-  virtual void VisitPointers(Object** start, Object** end) {
-    // Visit all HeapObject pointers in [start, end).
-    for (Object** p = start; p < end; p++) {
-      Object* o = *p;
-      if (o->IsHeapObject() &&
-          !Marking::MarkBitFrom(HeapObject::cast(o)).Get()) {
-        if (finalize_external_strings) {
-          ASSERT(o->IsExternalString());
-          heap_->FinalizeExternalString(String::cast(*p));
-        } else {
-          pointers_removed_++;
-        }
-        // Set the entry to the_hole_value (as deleted).
-        *p = heap_->the_hole_value();
-      }
-    }
-  }
-
-  int PointersRemoved() {
-    ASSERT(!finalize_external_strings);
-    return pointers_removed_;
-  }
-
- private:
-  Heap* heap_;
-  int pointers_removed_;
-};
-
-
-typedef StringTableCleaner<false> InternalizedStringTableCleaner;
-typedef StringTableCleaner<true> ExternalStringTableCleaner;
-
-
-// Implementation of WeakObjectRetainer for mark compact GCs. All marked objects
-// are retained.
-class MarkCompactWeakObjectRetainer : public WeakObjectRetainer {
- public:
-  virtual Object* RetainAs(Object* object) {
-    if (Marking::MarkBitFrom(HeapObject::cast(object)).Get()) {
-      return object;
-    } else if (object->IsAllocationSite() &&
-               !(AllocationSite::cast(object)->IsZombie())) {
-      // "dead" AllocationSites need to live long enough for a traversal of new
-      // space. These sites get a one-time reprieve.
-      AllocationSite* site = AllocationSite::cast(object);
-      site->MarkZombie();
-      site->GetHeap()->mark_compact_collector()->MarkAllocationSite(site);
-      return object;
-    } else {
-      return NULL;
-    }
-  }
-};
-
-
-// Fill the marking stack with overflowed objects returned by the given
-// iterator.  Stop when the marking stack is filled or the end of the space
-// is reached, whichever comes first.
-template<class T>
-static void DiscoverGreyObjectsWithIterator(Heap* heap,
-                                            MarkingDeque* marking_deque,
-                                            T* it) {
-  // The caller should ensure that the marking stack is initially not full,
-  // so that we don't waste effort pointlessly scanning for objects.
-  ASSERT(!marking_deque->IsFull());
-
-  Map* filler_map = heap->one_pointer_filler_map();
-  for (HeapObject* object = it->Next();
-       object != NULL;
-       object = it->Next()) {
-    MarkBit markbit = Marking::MarkBitFrom(object);
-    if ((object->map() != filler_map) && Marking::IsGrey(markbit)) {
-      Marking::GreyToBlack(markbit);
-      MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
-      marking_deque->PushBlack(object);
-      if (marking_deque->IsFull()) return;
-    }
-  }
-}
-
-
-static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts);
-
-
-static void DiscoverGreyObjectsOnPage(MarkingDeque* marking_deque,
-                                      MemoryChunk* p) {
-  ASSERT(!marking_deque->IsFull());
-  ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
-  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
-  ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
-  ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
-
-  for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
-    Address cell_base = it.CurrentCellBase();
-    MarkBit::CellType* cell = it.CurrentCell();
-
-    const MarkBit::CellType current_cell = *cell;
-    if (current_cell == 0) continue;
-
-    MarkBit::CellType grey_objects;
-    if (it.HasNext()) {
-      const MarkBit::CellType next_cell = *(cell+1);
-      grey_objects = current_cell &
-          ((current_cell >> 1) | (next_cell << (Bitmap::kBitsPerCell - 1)));
-    } else {
-      grey_objects = current_cell & (current_cell >> 1);
-    }
-
-    int offset = 0;
-    while (grey_objects != 0) {
-      int trailing_zeros = CompilerIntrinsics::CountTrailingZeros(grey_objects);
-      grey_objects >>= trailing_zeros;
-      offset += trailing_zeros;
-      MarkBit markbit(cell, 1 << offset, false);
-      ASSERT(Marking::IsGrey(markbit));
-      Marking::GreyToBlack(markbit);
-      Address addr = cell_base + offset * kPointerSize;
-      HeapObject* object = HeapObject::FromAddress(addr);
-      MemoryChunk::IncrementLiveBytesFromGC(object->address(), object->Size());
-      marking_deque->PushBlack(object);
-      if (marking_deque->IsFull()) return;
-      offset += 2;
-      grey_objects >>= 2;
-    }
-
-    grey_objects >>= (Bitmap::kBitsPerCell - 1);
-  }
-}
-
-
-int MarkCompactCollector::DiscoverAndPromoteBlackObjectsOnPage(
-    NewSpace* new_space,
-    NewSpacePage* p) {
-  ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
-  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
-  ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
-  ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
-
-  MarkBit::CellType* cells = p->markbits()->cells();
-  int survivors_size = 0;
-
-  for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
-    Address cell_base = it.CurrentCellBase();
-    MarkBit::CellType* cell = it.CurrentCell();
-
-    MarkBit::CellType current_cell = *cell;
-    if (current_cell == 0) continue;
-
-    int offset = 0;
-    while (current_cell != 0) {
-      int trailing_zeros = CompilerIntrinsics::CountTrailingZeros(current_cell);
-      current_cell >>= trailing_zeros;
-      offset += trailing_zeros;
-      Address address = cell_base + offset * kPointerSize;
-      HeapObject* object = HeapObject::FromAddress(address);
-
-      int size = object->Size();
-      survivors_size += size;
-
-      Heap::UpdateAllocationSiteFeedback(object, Heap::RECORD_SCRATCHPAD_SLOT);
-
-      offset++;
-      current_cell >>= 1;
-      // Aggressively promote young survivors to the old space.
-      if (TryPromoteObject(object, size)) {
-        continue;
-      }
-
-      // Promotion failed. Just migrate object to another semispace.
-      AllocationResult allocation = new_space->AllocateRaw(size);
-      if (allocation.IsRetry()) {
-        if (!new_space->AddFreshPage()) {
-          // Shouldn't happen. We are sweeping linearly, and to-space
-          // has the same number of pages as from-space, so there is
-          // always room.
-          UNREACHABLE();
-        }
-        allocation = new_space->AllocateRaw(size);
-        ASSERT(!allocation.IsRetry());
-      }
-      Object* target = allocation.ToObjectChecked();
-
-      MigrateObject(HeapObject::cast(target),
-                    object,
-                    size,
-                    NEW_SPACE);
-      heap()->IncrementSemiSpaceCopiedObjectSize(size);
-    }
-    *cells = 0;
-  }
-  return survivors_size;
-}
-
-
-static void DiscoverGreyObjectsInSpace(Heap* heap,
-                                       MarkingDeque* marking_deque,
-                                       PagedSpace* space) {
-  PageIterator it(space);
-  while (it.has_next()) {
-    Page* p = it.next();
-    DiscoverGreyObjectsOnPage(marking_deque, p);
-    if (marking_deque->IsFull()) return;
-  }
-}
-
-
-static void DiscoverGreyObjectsInNewSpace(Heap* heap,
-                                          MarkingDeque* marking_deque) {
-  NewSpace* space = heap->new_space();
-  NewSpacePageIterator it(space->bottom(), space->top());
-  while (it.has_next()) {
-    NewSpacePage* page = it.next();
-    DiscoverGreyObjectsOnPage(marking_deque, page);
-    if (marking_deque->IsFull()) return;
-  }
-}
-
-
-bool MarkCompactCollector::IsUnmarkedHeapObject(Object** p) {
-  Object* o = *p;
-  if (!o->IsHeapObject()) return false;
-  HeapObject* heap_object = HeapObject::cast(o);
-  MarkBit mark = Marking::MarkBitFrom(heap_object);
-  return !mark.Get();
-}
-
-
-bool MarkCompactCollector::IsUnmarkedHeapObjectWithHeap(Heap* heap,
-                                                        Object** p) {
-  Object* o = *p;
-  ASSERT(o->IsHeapObject());
-  HeapObject* heap_object = HeapObject::cast(o);
-  MarkBit mark = Marking::MarkBitFrom(heap_object);
-  return !mark.Get();
-}
-
-
-void MarkCompactCollector::MarkStringTable(RootMarkingVisitor* visitor) {
-  StringTable* string_table = heap()->string_table();
-  // Mark the string table itself.
-  MarkBit string_table_mark = Marking::MarkBitFrom(string_table);
-  if (!string_table_mark.Get()) {
-    // String table could have already been marked by visiting the handles list.
-    SetMark(string_table, string_table_mark);
-  }
-  // Explicitly mark the prefix.
-  string_table->IteratePrefix(visitor);
-  ProcessMarkingDeque();
-}
-
-
-void MarkCompactCollector::MarkAllocationSite(AllocationSite* site) {
-  MarkBit mark_bit = Marking::MarkBitFrom(site);
-  SetMark(site, mark_bit);
-}
-
-
-void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
-  // Mark the heap roots including global variables, stack variables,
-  // etc., and all objects reachable from them.
-  heap()->IterateStrongRoots(visitor, VISIT_ONLY_STRONG);
-
-  // Handle the string table specially.
-  MarkStringTable(visitor);
-
-  MarkWeakObjectToCodeTable();
-
-  // There may be overflowed objects in the heap.  Visit them now.
-  while (marking_deque_.overflowed()) {
-    RefillMarkingDeque();
-    EmptyMarkingDeque();
-  }
-}
-
-
-void MarkCompactCollector::MarkImplicitRefGroups() {
-  List<ImplicitRefGroup*>* ref_groups =
-      isolate()->global_handles()->implicit_ref_groups();
-
-  int last = 0;
-  for (int i = 0; i < ref_groups->length(); i++) {
-    ImplicitRefGroup* entry = ref_groups->at(i);
-    ASSERT(entry != NULL);
-
-    if (!IsMarked(*entry->parent)) {
-      (*ref_groups)[last++] = entry;
-      continue;
-    }
-
-    Object*** children = entry->children;
-    // A parent object is marked, so mark all child heap objects.
-    for (size_t j = 0; j < entry->length; ++j) {
-      if ((*children[j])->IsHeapObject()) {
-        HeapObject* child = HeapObject::cast(*children[j]);
-        MarkBit mark = Marking::MarkBitFrom(child);
-        MarkObject(child, mark);
-      }
-    }
-
-    // Once the entire group has been marked, dispose it because it's
-    // not needed anymore.
-    delete entry;
-  }
-  ref_groups->Rewind(last);
-}
-
-
-void MarkCompactCollector::MarkWeakObjectToCodeTable() {
-  HeapObject* weak_object_to_code_table =
-      HeapObject::cast(heap()->weak_object_to_code_table());
-  if (!IsMarked(weak_object_to_code_table)) {
-    MarkBit mark = Marking::MarkBitFrom(weak_object_to_code_table);
-    SetMark(weak_object_to_code_table, mark);
-  }
-}
-
-
-// Mark all objects reachable from the objects on the marking stack.
-// Before: the marking stack contains zero or more heap object pointers.
-// After: the marking stack is empty, and all objects reachable from the
-// marking stack have been marked, or are overflowed in the heap.
-void MarkCompactCollector::EmptyMarkingDeque() {
-  while (!marking_deque_.IsEmpty()) {
-    HeapObject* object = marking_deque_.Pop();
-    ASSERT(object->IsHeapObject());
-    ASSERT(heap()->Contains(object));
-    ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object)));
-
-    Map* map = object->map();
-    MarkBit map_mark = Marking::MarkBitFrom(map);
-    MarkObject(map, map_mark);
-
-    MarkCompactMarkingVisitor::IterateBody(map, object);
-  }
-}
-
-
-// Sweep the heap for overflowed objects, clear their overflow bits, and
-// push them on the marking stack.  Stop early if the marking stack fills
-// before sweeping completes.  If sweeping completes, there are no remaining
-// overflowed objects in the heap so the overflow flag on the markings stack
-// is cleared.
-void MarkCompactCollector::RefillMarkingDeque() {
-  ASSERT(marking_deque_.overflowed());
-
-  DiscoverGreyObjectsInNewSpace(heap(), &marking_deque_);
-  if (marking_deque_.IsFull()) return;
-
-  DiscoverGreyObjectsInSpace(heap(),
-                             &marking_deque_,
-                             heap()->old_pointer_space());
-  if (marking_deque_.IsFull()) return;
-
-  DiscoverGreyObjectsInSpace(heap(),
-                             &marking_deque_,
-                             heap()->old_data_space());
-  if (marking_deque_.IsFull()) return;
-
-  DiscoverGreyObjectsInSpace(heap(),
-                             &marking_deque_,
-                             heap()->code_space());
-  if (marking_deque_.IsFull()) return;
-
-  DiscoverGreyObjectsInSpace(heap(),
-                             &marking_deque_,
-                             heap()->map_space());
-  if (marking_deque_.IsFull()) return;
-
-  DiscoverGreyObjectsInSpace(heap(),
-                             &marking_deque_,
-                             heap()->cell_space());
-  if (marking_deque_.IsFull()) return;
-
-  DiscoverGreyObjectsInSpace(heap(),
-                             &marking_deque_,
-                             heap()->property_cell_space());
-  if (marking_deque_.IsFull()) return;
-
-  LargeObjectIterator lo_it(heap()->lo_space());
-  DiscoverGreyObjectsWithIterator(heap(),
-                                  &marking_deque_,
-                                  &lo_it);
-  if (marking_deque_.IsFull()) return;
-
-  marking_deque_.ClearOverflowed();
-}
-
-
-// Mark all objects reachable (transitively) from objects on the marking
-// stack.  Before: the marking stack contains zero or more heap object
-// pointers.  After: the marking stack is empty and there are no overflowed
-// objects in the heap.
-void MarkCompactCollector::ProcessMarkingDeque() {
-  EmptyMarkingDeque();
-  while (marking_deque_.overflowed()) {
-    RefillMarkingDeque();
-    EmptyMarkingDeque();
-  }
-}
-
-
-// Mark all objects reachable (transitively) from objects on the marking
-// stack including references only considered in the atomic marking pause.
-void MarkCompactCollector::ProcessEphemeralMarking(ObjectVisitor* visitor) {
-  bool work_to_do = true;
-  ASSERT(marking_deque_.IsEmpty());
-  while (work_to_do) {
-    isolate()->global_handles()->IterateObjectGroups(
-        visitor, &IsUnmarkedHeapObjectWithHeap);
-    MarkImplicitRefGroups();
-    ProcessWeakCollections();
-    work_to_do = !marking_deque_.IsEmpty();
-    ProcessMarkingDeque();
-  }
-}
-
-
-void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
-  for (StackFrameIterator it(isolate(), isolate()->thread_local_top());
-       !it.done(); it.Advance()) {
-    if (it.frame()->type() == StackFrame::JAVA_SCRIPT) {
-      return;
-    }
-    if (it.frame()->type() == StackFrame::OPTIMIZED) {
-      Code* code = it.frame()->LookupCode();
-      if (!code->CanDeoptAt(it.frame()->pc())) {
-        code->CodeIterateBody(visitor);
-      }
-      ProcessMarkingDeque();
-      return;
-    }
-  }
-}
-
-
-void MarkCompactCollector::MarkLiveObjects() {
-  GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_MARK);
-  // The recursive GC marker detects when it is nearing stack overflow,
-  // and switches to a different marking system.  JS interrupts interfere
-  // with the C stack limit check.
-  PostponeInterruptsScope postpone(isolate());
-
-  bool incremental_marking_overflowed = false;
-  IncrementalMarking* incremental_marking = heap_->incremental_marking();
-  if (was_marked_incrementally_) {
-    // Finalize the incremental marking and check whether we had an overflow.
-    // Both markers use grey color to mark overflowed objects so
-    // non-incremental marker can deal with them as if overflow
-    // occured during normal marking.
-    // But incremental marker uses a separate marking deque
-    // so we have to explicitly copy its overflow state.
-    incremental_marking->Finalize();
-    incremental_marking_overflowed =
-        incremental_marking->marking_deque()->overflowed();
-    incremental_marking->marking_deque()->ClearOverflowed();
-  } else {
-    // Abort any pending incremental activities e.g. incremental sweeping.
-    incremental_marking->Abort();
-  }
-
-#ifdef DEBUG
-  ASSERT(state_ == PREPARE_GC);
-  state_ = MARK_LIVE_OBJECTS;
-#endif
-  // The to space contains live objects, a page in from space is used as a
-  // marking stack.
-  Address marking_deque_start = heap()->new_space()->FromSpacePageLow();
-  Address marking_deque_end = heap()->new_space()->FromSpacePageHigh();
-  if (FLAG_force_marking_deque_overflows) {
-    marking_deque_end = marking_deque_start + 64 * kPointerSize;
-  }
-  marking_deque_.Initialize(marking_deque_start,
-                            marking_deque_end);
-  ASSERT(!marking_deque_.overflowed());
-
-  if (incremental_marking_overflowed) {
-    // There are overflowed objects left in the heap after incremental marking.
-    marking_deque_.SetOverflowed();
-  }
-
-  PrepareForCodeFlushing();
-
-  if (was_marked_incrementally_) {
-    // There is no write barrier on cells so we have to scan them now at the end
-    // of the incremental marking.
-    {
-      HeapObjectIterator cell_iterator(heap()->cell_space());
-      HeapObject* cell;
-      while ((cell = cell_iterator.Next()) != NULL) {
-        ASSERT(cell->IsCell());
-        if (IsMarked(cell)) {
-          int offset = Cell::kValueOffset;
-          MarkCompactMarkingVisitor::VisitPointer(
-              heap(),
-              reinterpret_cast<Object**>(cell->address() + offset));
-        }
-      }
-    }
-    {
-      HeapObjectIterator js_global_property_cell_iterator(
-          heap()->property_cell_space());
-      HeapObject* cell;
-      while ((cell = js_global_property_cell_iterator.Next()) != NULL) {
-        ASSERT(cell->IsPropertyCell());
-        if (IsMarked(cell)) {
-          MarkCompactMarkingVisitor::VisitPropertyCell(cell->map(), cell);
-        }
-      }
-    }
-  }
-
-  RootMarkingVisitor root_visitor(heap());
-  MarkRoots(&root_visitor);
-
-  ProcessTopOptimizedFrame(&root_visitor);
-
-  // The objects reachable from the roots are marked, yet unreachable
-  // objects are unmarked.  Mark objects reachable due to host
-  // application specific logic or through Harmony weak maps.
-  ProcessEphemeralMarking(&root_visitor);
-
-  // The objects reachable from the roots, weak maps or object groups
-  // are marked, yet unreachable objects are unmarked.  Mark objects
-  // reachable only from weak global handles.
-  //
-  // First we identify nonlive weak handles and mark them as pending
-  // destruction.
-  heap()->isolate()->global_handles()->IdentifyWeakHandles(
-      &IsUnmarkedHeapObject);
-  // Then we mark the objects and process the transitive closure.
-  heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
-  while (marking_deque_.overflowed()) {
-    RefillMarkingDeque();
-    EmptyMarkingDeque();
-  }
-
-  // Repeat host application specific and Harmony weak maps marking to
-  // mark unmarked objects reachable from the weak roots.
-  ProcessEphemeralMarking(&root_visitor);
-
-  AfterMarking();
-}
-
-
-void MarkCompactCollector::AfterMarking() {
-  // Object literal map caches reference strings (cache keys) and maps
-  // (cache values). At this point still useful maps have already been
-  // marked. Mark the keys for the alive values before we process the
-  // string table.
-  ProcessMapCaches();
-
-  // Prune the string table removing all strings only pointed to by the
-  // string table.  Cannot use string_table() here because the string
-  // table is marked.
-  StringTable* string_table = heap()->string_table();
-  InternalizedStringTableCleaner internalized_visitor(heap());
-  string_table->IterateElements(&internalized_visitor);
-  string_table->ElementsRemoved(internalized_visitor.PointersRemoved());
-
-  ExternalStringTableCleaner external_visitor(heap());
-  heap()->external_string_table_.Iterate(&external_visitor);
-  heap()->external_string_table_.CleanUp();
-
-  // Process the weak references.
-  MarkCompactWeakObjectRetainer mark_compact_object_retainer;
-  heap()->ProcessWeakReferences(&mark_compact_object_retainer);
-
-  // Remove object groups after marking phase.
-  heap()->isolate()->global_handles()->RemoveObjectGroups();
-  heap()->isolate()->global_handles()->RemoveImplicitRefGroups();
-
-  // Flush code from collected candidates.
-  if (is_code_flushing_enabled()) {
-    code_flusher_->ProcessCandidates();
-    // If incremental marker does not support code flushing, we need to
-    // disable it before incremental marking steps for next cycle.
-    if (FLAG_flush_code && !FLAG_flush_code_incrementally) {
-      EnableCodeFlushing(false);
-    }
-  }
-
-  if (FLAG_track_gc_object_stats) {
-    heap()->CheckpointObjectStats();
-  }
-}
-
-
-void MarkCompactCollector::ProcessMapCaches() {
-  Object* raw_context = heap()->native_contexts_list();
-  while (raw_context != heap()->undefined_value()) {
-    Context* context = reinterpret_cast<Context*>(raw_context);
-    if (IsMarked(context)) {
-      HeapObject* raw_map_cache =
-          HeapObject::cast(context->get(Context::MAP_CACHE_INDEX));
-      // A map cache may be reachable from the stack. In this case
-      // it's already transitively marked and it's too late to clean
-      // up its parts.
-      if (!IsMarked(raw_map_cache) &&
-          raw_map_cache != heap()->undefined_value()) {
-        MapCache* map_cache = reinterpret_cast<MapCache*>(raw_map_cache);
-        int existing_elements = map_cache->NumberOfElements();
-        int used_elements = 0;
-        for (int i = MapCache::kElementsStartIndex;
-             i < map_cache->length();
-             i += MapCache::kEntrySize) {
-          Object* raw_key = map_cache->get(i);
-          if (raw_key == heap()->undefined_value() ||
-              raw_key == heap()->the_hole_value()) continue;
-          STATIC_ASSERT(MapCache::kEntrySize == 2);
-          Object* raw_map = map_cache->get(i + 1);
-          if (raw_map->IsHeapObject() && IsMarked(raw_map)) {
-            ++used_elements;
-          } else {
-            // Delete useless entries with unmarked maps.
-            ASSERT(raw_map->IsMap());
-            map_cache->set_the_hole(i);
-            map_cache->set_the_hole(i + 1);
-          }
-        }
-        if (used_elements == 0) {
-          context->set(Context::MAP_CACHE_INDEX, heap()->undefined_value());
-        } else {
-          // Note: we don't actually shrink the cache here to avoid
-          // extra complexity during GC. We rely on subsequent cache
-          // usages (EnsureCapacity) to do this.
-          map_cache->ElementsRemoved(existing_elements - used_elements);
-          MarkBit map_cache_markbit = Marking::MarkBitFrom(map_cache);
-          MarkObject(map_cache, map_cache_markbit);
-        }
-      }
-    }
-    // Move to next element in the list.
-    raw_context = context->get(Context::NEXT_CONTEXT_LINK);
-  }
-  ProcessMarkingDeque();
-}
-
-
-void MarkCompactCollector::ClearNonLiveReferences() {
-  // Iterate over the map space, setting map transitions that go from
-  // a marked map to an unmarked map to null transitions.  This action
-  // is carried out only on maps of JSObjects and related subtypes.
-  HeapObjectIterator map_iterator(heap()->map_space());
-  for (HeapObject* obj = map_iterator.Next();
-       obj != NULL;
-       obj = map_iterator.Next()) {
-    Map* map = Map::cast(obj);
-
-    if (!map->CanTransition()) continue;
-
-    MarkBit map_mark = Marking::MarkBitFrom(map);
-    ClearNonLivePrototypeTransitions(map);
-    ClearNonLiveMapTransitions(map, map_mark);
-
-    if (map_mark.Get()) {
-      ClearNonLiveDependentCode(map->dependent_code());
-    } else {
-      ClearDependentCode(map->dependent_code());
-      map->set_dependent_code(DependentCode::cast(heap()->empty_fixed_array()));
-    }
-  }
-
-  // Iterate over property cell space, removing dependent code that is not
-  // otherwise kept alive by strong references.
-  HeapObjectIterator cell_iterator(heap_->property_cell_space());
-  for (HeapObject* cell = cell_iterator.Next();
-       cell != NULL;
-       cell = cell_iterator.Next()) {
-    if (IsMarked(cell)) {
-      ClearNonLiveDependentCode(PropertyCell::cast(cell)->dependent_code());
-    }
-  }
-
-  // Iterate over allocation sites, removing dependent code that is not
-  // otherwise kept alive by strong references.
-  Object* undefined = heap()->undefined_value();
-  for (Object* site = heap()->allocation_sites_list();
-       site != undefined;
-       site = AllocationSite::cast(site)->weak_next()) {
-    if (IsMarked(site)) {
-      ClearNonLiveDependentCode(AllocationSite::cast(site)->dependent_code());
-    }
-  }
-
-  if (heap_->weak_object_to_code_table()->IsHashTable()) {
-    WeakHashTable* table =
-        WeakHashTable::cast(heap_->weak_object_to_code_table());
-    uint32_t capacity = table->Capacity();
-    for (uint32_t i = 0; i < capacity; i++) {
-      uint32_t key_index = table->EntryToIndex(i);
-      Object* key = table->get(key_index);
-      if (!table->IsKey(key)) continue;
-      uint32_t value_index = table->EntryToValueIndex(i);
-      Object* value = table->get(value_index);
-      if (key->IsCell() && !IsMarked(key)) {
-        Cell* cell = Cell::cast(key);
-        Object* object = cell->value();
-        if (IsMarked(object)) {
-          MarkBit mark = Marking::MarkBitFrom(cell);
-          SetMark(cell, mark);
-          Object** value_slot = HeapObject::RawField(cell, Cell::kValueOffset);
-          RecordSlot(value_slot, value_slot, *value_slot);
-        }
-      }
-      if (IsMarked(key)) {
-        if (!IsMarked(value)) {
-          HeapObject* obj = HeapObject::cast(value);
-          MarkBit mark = Marking::MarkBitFrom(obj);
-          SetMark(obj, mark);
-        }
-        ClearNonLiveDependentCode(DependentCode::cast(value));
-      } else {
-        ClearDependentCode(DependentCode::cast(value));
-        table->set(key_index, heap_->the_hole_value());
-        table->set(value_index, heap_->the_hole_value());
-        table->ElementRemoved();
-      }
-    }
-  }
-}
-
-
-void MarkCompactCollector::ClearNonLivePrototypeTransitions(Map* map) {
-  int number_of_transitions = map->NumberOfProtoTransitions();
-  FixedArray* prototype_transitions = map->GetPrototypeTransitions();
-
-  int new_number_of_transitions = 0;
-  const int header = Map::kProtoTransitionHeaderSize;
-  const int proto_offset = header + Map::kProtoTransitionPrototypeOffset;
-  const int map_offset = header + Map::kProtoTransitionMapOffset;
-  const int step = Map::kProtoTransitionElementsPerEntry;
-  for (int i = 0; i < number_of_transitions; i++) {
-    Object* prototype = prototype_transitions->get(proto_offset + i * step);
-    Object* cached_map = prototype_transitions->get(map_offset + i * step);
-    if (IsMarked(prototype) && IsMarked(cached_map)) {
-      ASSERT(!prototype->IsUndefined());
-      int proto_index = proto_offset + new_number_of_transitions * step;
-      int map_index = map_offset + new_number_of_transitions * step;
-      if (new_number_of_transitions != i) {
-        prototype_transitions->set(
-            proto_index,
-            prototype,
-            UPDATE_WRITE_BARRIER);
-        prototype_transitions->set(
-            map_index,
-            cached_map,
-            SKIP_WRITE_BARRIER);
-      }
-      Object** slot = prototype_transitions->RawFieldOfElementAt(proto_index);
-      RecordSlot(slot, slot, prototype);
-      new_number_of_transitions++;
-    }
-  }
-
-  if (new_number_of_transitions != number_of_transitions) {
-    map->SetNumberOfProtoTransitions(new_number_of_transitions);
-  }
-
-  // Fill slots that became free with undefined value.
-  for (int i = new_number_of_transitions * step;
-       i < number_of_transitions * step;
-       i++) {
-    prototype_transitions->set_undefined(header + i);
-  }
-}
-
-
-void MarkCompactCollector::ClearNonLiveMapTransitions(Map* map,
-                                                      MarkBit map_mark) {
-  Object* potential_parent = map->GetBackPointer();
-  if (!potential_parent->IsMap()) return;
-  Map* parent = Map::cast(potential_parent);
-
-  // Follow back pointer, check whether we are dealing with a map transition
-  // from a live map to a dead path and in case clear transitions of parent.
-  bool current_is_alive = map_mark.Get();
-  bool parent_is_alive = Marking::MarkBitFrom(parent).Get();
-  if (!current_is_alive && parent_is_alive) {
-    parent->ClearNonLiveTransitions(heap());
-  }
-}
-
-
-void MarkCompactCollector::ClearDependentICList(Object* head) {
-  Object* current = head;
-  Object* undefined = heap()->undefined_value();
-  while (current != undefined) {
-    Code* code = Code::cast(current);
-    if (IsMarked(code)) {
-      ASSERT(code->is_weak_stub());
-      IC::InvalidateMaps(code);
-    }
-    current = code->next_code_link();
-    code->set_next_code_link(undefined);
-  }
-}
-
-
-void MarkCompactCollector::ClearDependentCode(
-    DependentCode* entries) {
-  DisallowHeapAllocation no_allocation;
-  DependentCode::GroupStartIndexes starts(entries);
-  int number_of_entries = starts.number_of_entries();
-  if (number_of_entries == 0) return;
-  int g = DependentCode::kWeakICGroup;
-  if (starts.at(g) != starts.at(g + 1)) {
-    int i = starts.at(g);
-    ASSERT(i + 1 == starts.at(g + 1));
-    Object* head = entries->object_at(i);
-    ClearDependentICList(head);
-  }
-  g = DependentCode::kWeakCodeGroup;
-  for (int i = starts.at(g); i < starts.at(g + 1); i++) {
-    // If the entry is compilation info then the map must be alive,
-    // and ClearDependentCode shouldn't be called.
-    ASSERT(entries->is_code_at(i));
-    Code* code = entries->code_at(i);
-    if (IsMarked(code) && !code->marked_for_deoptimization()) {
-      code->set_marked_for_deoptimization(true);
-      code->InvalidateEmbeddedObjects();
-      have_code_to_deoptimize_ = true;
-    }
-  }
-  for (int i = 0; i < number_of_entries; i++) {
-    entries->clear_at(i);
-  }
-}
-
-
-int MarkCompactCollector::ClearNonLiveDependentCodeInGroup(
-    DependentCode* entries, int group, int start, int end, int new_start) {
-  int survived = 0;
-  if (group == DependentCode::kWeakICGroup) {
-    // Dependent weak IC stubs form a linked list and only the head is stored
-    // in the dependent code array.
-    if (start != end) {
-      ASSERT(start + 1 == end);
-      Object* old_head = entries->object_at(start);
-      MarkCompactWeakObjectRetainer retainer;
-      Object* head = VisitWeakList<Code>(heap(), old_head, &retainer);
-      entries->set_object_at(new_start, head);
-      Object** slot = entries->slot_at(new_start);
-      RecordSlot(slot, slot, head);
-      // We do not compact this group even if the head is undefined,
-      // more dependent ICs are likely to be added later.
-      survived = 1;
-    }
-  } else {
-    for (int i = start; i < end; i++) {
-      Object* obj = entries->object_at(i);
-      ASSERT(obj->IsCode() || IsMarked(obj));
-      if (IsMarked(obj) &&
-          (!obj->IsCode() || !WillBeDeoptimized(Code::cast(obj)))) {
-        if (new_start + survived != i) {
-          entries->set_object_at(new_start + survived, obj);
-        }
-        Object** slot = entries->slot_at(new_start + survived);
-        RecordSlot(slot, slot, obj);
-        survived++;
-      }
-    }
-  }
-  entries->set_number_of_entries(
-      static_cast<DependentCode::DependencyGroup>(group), survived);
-  return survived;
-}
-
-
-void MarkCompactCollector::ClearNonLiveDependentCode(DependentCode* entries) {
-  DisallowHeapAllocation no_allocation;
-  DependentCode::GroupStartIndexes starts(entries);
-  int number_of_entries = starts.number_of_entries();
-  if (number_of_entries == 0) return;
-  int new_number_of_entries = 0;
-  // Go through all groups, remove dead codes and compact.
-  for (int g = 0; g < DependentCode::kGroupCount; g++) {
-    int survived = ClearNonLiveDependentCodeInGroup(
-        entries, g, starts.at(g), starts.at(g + 1), new_number_of_entries);
-    new_number_of_entries += survived;
-  }
-  for (int i = new_number_of_entries; i < number_of_entries; i++) {
-    entries->clear_at(i);
-  }
-}
-
-
-void MarkCompactCollector::ProcessWeakCollections() {
-  GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_WEAKCOLLECTION_PROCESS);
-  Object* weak_collection_obj = heap()->encountered_weak_collections();
-  while (weak_collection_obj != Smi::FromInt(0)) {
-    JSWeakCollection* weak_collection =
-        reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
-    ASSERT(MarkCompactCollector::IsMarked(weak_collection));
-    if (weak_collection->table()->IsHashTable()) {
-      ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
-      Object** anchor = reinterpret_cast<Object**>(table->address());
-      for (int i = 0; i < table->Capacity(); i++) {
-        if (MarkCompactCollector::IsMarked(HeapObject::cast(table->KeyAt(i)))) {
-          Object** key_slot =
-              table->RawFieldOfElementAt(ObjectHashTable::EntryToIndex(i));
-          RecordSlot(anchor, key_slot, *key_slot);
-          Object** value_slot =
-              table->RawFieldOfElementAt(ObjectHashTable::EntryToValueIndex(i));
-          MarkCompactMarkingVisitor::MarkObjectByPointer(
-              this, anchor, value_slot);
-        }
-      }
-    }
-    weak_collection_obj = weak_collection->next();
-  }
-}
-
-
-void MarkCompactCollector::ClearWeakCollections() {
-  GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_WEAKCOLLECTION_CLEAR);
-  Object* weak_collection_obj = heap()->encountered_weak_collections();
-  while (weak_collection_obj != Smi::FromInt(0)) {
-    JSWeakCollection* weak_collection =
-        reinterpret_cast<JSWeakCollection*>(weak_collection_obj);
-    ASSERT(MarkCompactCollector::IsMarked(weak_collection));
-    if (weak_collection->table()->IsHashTable()) {
-      ObjectHashTable* table = ObjectHashTable::cast(weak_collection->table());
-      for (int i = 0; i < table->Capacity(); i++) {
-        HeapObject* key = HeapObject::cast(table->KeyAt(i));
-        if (!MarkCompactCollector::IsMarked(key)) {
-          table->RemoveEntry(i);
-        }
-      }
-    }
-    weak_collection_obj = weak_collection->next();
-    weak_collection->set_next(heap()->undefined_value());
-  }
-  heap()->set_encountered_weak_collections(Smi::FromInt(0));
-}
-
-
-// We scavange new space simultaneously with sweeping. This is done in two
-// passes.
-//
-// The first pass migrates all alive objects from one semispace to another or
-// promotes them to old space.  Forwarding address is written directly into
-// first word of object without any encoding.  If object is dead we write
-// NULL as a forwarding address.
-//
-// The second pass updates pointers to new space in all spaces.  It is possible
-// to encounter pointers to dead new space objects during traversal of pointers
-// to new space.  We should clear them to avoid encountering them during next
-// pointer iteration.  This is an issue if the store buffer overflows and we
-// have to scan the entire old space, including dead objects, looking for
-// pointers to new space.
-void MarkCompactCollector::MigrateObject(HeapObject* dst,
-                                         HeapObject* src,
-                                         int size,
-                                         AllocationSpace dest) {
-  Address dst_addr = dst->address();
-  Address src_addr = src->address();
-  HeapProfiler* heap_profiler = heap()->isolate()->heap_profiler();
-  if (heap_profiler->is_tracking_object_moves()) {
-    heap_profiler->ObjectMoveEvent(src_addr, dst_addr, size);
-  }
-  ASSERT(heap()->AllowedToBeMigrated(src, dest));
-  ASSERT(dest != LO_SPACE && size <= Page::kMaxRegularHeapObjectSize);
-  if (dest == OLD_POINTER_SPACE) {
-    Address src_slot = src_addr;
-    Address dst_slot = dst_addr;
-    ASSERT(IsAligned(size, kPointerSize));
-
-    for (int remaining = size / kPointerSize; remaining > 0; remaining--) {
-      Object* value = Memory::Object_at(src_slot);
-
-      Memory::Object_at(dst_slot) = value;
-
-      if (heap_->InNewSpace(value)) {
-        heap_->store_buffer()->Mark(dst_slot);
-      } else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) {
-        SlotsBuffer::AddTo(&slots_buffer_allocator_,
-                           &migration_slots_buffer_,
-                           reinterpret_cast<Object**>(dst_slot),
-                           SlotsBuffer::IGNORE_OVERFLOW);
-      }
-
-      src_slot += kPointerSize;
-      dst_slot += kPointerSize;
-    }
-
-    if (compacting_ && dst->IsJSFunction()) {
-      Address code_entry_slot = dst_addr + JSFunction::kCodeEntryOffset;
-      Address code_entry = Memory::Address_at(code_entry_slot);
-
-      if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
-        SlotsBuffer::AddTo(&slots_buffer_allocator_,
-                           &migration_slots_buffer_,
-                           SlotsBuffer::CODE_ENTRY_SLOT,
-                           code_entry_slot,
-                           SlotsBuffer::IGNORE_OVERFLOW);
-      }
-    } else if (compacting_ && dst->IsConstantPoolArray()) {
-      ConstantPoolArray* array = ConstantPoolArray::cast(dst);
-      ConstantPoolArray::Iterator code_iter(array, ConstantPoolArray::CODE_PTR);
-      while (!code_iter.is_finished()) {
-        Address code_entry_slot =
-            dst_addr + array->OffsetOfElementAt(code_iter.next_index());
-        Address code_entry = Memory::Address_at(code_entry_slot);
-
-        if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
-          SlotsBuffer::AddTo(&slots_buffer_allocator_,
-                             &migration_slots_buffer_,
-                             SlotsBuffer::CODE_ENTRY_SLOT,
-                             code_entry_slot,
-                             SlotsBuffer::IGNORE_OVERFLOW);
-        }
-      }
-    }
-  } else if (dest == CODE_SPACE) {
-    PROFILE(isolate(), CodeMoveEvent(src_addr, dst_addr));
-    heap()->MoveBlock(dst_addr, src_addr, size);
-    SlotsBuffer::AddTo(&slots_buffer_allocator_,
-                       &migration_slots_buffer_,
-                       SlotsBuffer::RELOCATED_CODE_OBJECT,
-                       dst_addr,
-                       SlotsBuffer::IGNORE_OVERFLOW);
-    Code::cast(dst)->Relocate(dst_addr - src_addr);
-  } else {
-    ASSERT(dest == OLD_DATA_SPACE || dest == NEW_SPACE);
-    heap()->MoveBlock(dst_addr, src_addr, size);
-  }
-  Memory::Address_at(src_addr) = dst_addr;
-}
-
-
-// Visitor for updating pointers from live objects in old spaces to new space.
-// It does not expect to encounter pointers to dead objects.
-class PointersUpdatingVisitor: public ObjectVisitor {
- public:
-  explicit PointersUpdatingVisitor(Heap* heap) : heap_(heap) { }
-
-  void VisitPointer(Object** p) {
-    UpdatePointer(p);
-  }
-
-  void VisitPointers(Object** start, Object** end) {
-    for (Object** p = start; p < end; p++) UpdatePointer(p);
-  }
-
-  void VisitEmbeddedPointer(RelocInfo* rinfo) {
-    ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
-    Object* target = rinfo->target_object();
-    Object* old_target = target;
-    VisitPointer(&target);
-    // Avoid unnecessary changes that might unnecessary flush the instruction
-    // cache.
-    if (target != old_target) {
-      rinfo->set_target_object(target);
-    }
-  }
-
-  void VisitCodeTarget(RelocInfo* rinfo) {
-    ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
-    Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
-    Object* old_target = target;
-    VisitPointer(&target);
-    if (target != old_target) {
-      rinfo->set_target_address(Code::cast(target)->instruction_start());
-    }
-  }
-
-  void VisitCodeAgeSequence(RelocInfo* rinfo) {
-    ASSERT(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
-    Object* stub = rinfo->code_age_stub();
-    ASSERT(stub != NULL);
-    VisitPointer(&stub);
-    if (stub != rinfo->code_age_stub()) {
-      rinfo->set_code_age_stub(Code::cast(stub));
-    }
-  }
-
-  void VisitDebugTarget(RelocInfo* rinfo) {
-    ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
-            rinfo->IsPatchedReturnSequence()) ||
-           (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
-            rinfo->IsPatchedDebugBreakSlotSequence()));
-    Object* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
-    VisitPointer(&target);
-    rinfo->set_call_address(Code::cast(target)->instruction_start());
-  }
-
-  static inline void UpdateSlot(Heap* heap, Object** slot) {
-    Object* obj = *slot;
-
-    if (!obj->IsHeapObject()) return;
-
-    HeapObject* heap_obj = HeapObject::cast(obj);
-
-    MapWord map_word = heap_obj->map_word();
-    if (map_word.IsForwardingAddress()) {
-      ASSERT(heap->InFromSpace(heap_obj) ||
-             MarkCompactCollector::IsOnEvacuationCandidate(heap_obj));
-      HeapObject* target = map_word.ToForwardingAddress();
-      *slot = target;
-      ASSERT(!heap->InFromSpace(target) &&
-             !MarkCompactCollector::IsOnEvacuationCandidate(target));
-    }
-  }
-
- private:
-  inline void UpdatePointer(Object** p) {
-    UpdateSlot(heap_, p);
-  }
-
-  Heap* heap_;
-};
-
-
-static void UpdatePointer(HeapObject** address, HeapObject* object) {
-  Address new_addr = Memory::Address_at(object->address());
-
-  // The new space sweep will overwrite the map word of dead objects
-  // with NULL. In this case we do not need to transfer this entry to
-  // the store buffer which we are rebuilding.
-  // We perform the pointer update with a no barrier compare-and-swap. The
-  // compare and swap may fail in the case where the pointer update tries to
-  // update garbage memory which was concurrently accessed by the sweeper.
-  if (new_addr != NULL) {
-    base::NoBarrier_CompareAndSwap(
-        reinterpret_cast<base::AtomicWord*>(address),
-        reinterpret_cast<base::AtomicWord>(object),
-        reinterpret_cast<base::AtomicWord>(HeapObject::FromAddress(new_addr)));
-  } else {
-    // We have to zap this pointer, because the store buffer may overflow later,
-    // and then we have to scan the entire heap and we don't want to find
-    // spurious newspace pointers in the old space.
-    // TODO(mstarzinger): This was changed to a sentinel value to track down
-    // rare crashes, change it back to Smi::FromInt(0) later.
-    base::NoBarrier_CompareAndSwap(
-        reinterpret_cast<base::AtomicWord*>(address),
-        reinterpret_cast<base::AtomicWord>(object),
-        reinterpret_cast<base::AtomicWord>(Smi::FromInt(0x0f100d00 >> 1)));
-  }
-}
-
-
-static String* UpdateReferenceInExternalStringTableEntry(Heap* heap,
-                                                         Object** p) {
-  MapWord map_word = HeapObject::cast(*p)->map_word();
-
-  if (map_word.IsForwardingAddress()) {
-    return String::cast(map_word.ToForwardingAddress());
-  }
-
-  return String::cast(*p);
-}
-
-
-bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
-                                            int object_size) {
-  ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
-
-  OldSpace* target_space = heap()->TargetSpace(object);
-
-  ASSERT(target_space == heap()->old_pointer_space() ||
-         target_space == heap()->old_data_space());
-  HeapObject* target;
-  AllocationResult allocation = target_space->AllocateRaw(object_size);
-  if (allocation.To(&target)) {
-    MigrateObject(target,
-                  object,
-                  object_size,
-                  target_space->identity());
-    heap()->IncrementPromotedObjectsSize(object_size);
-    return true;
-  }
-
-  return false;
-}
-
-
-void MarkCompactCollector::EvacuateNewSpace() {
-  // There are soft limits in the allocation code, designed trigger a mark
-  // sweep collection by failing allocations.  But since we are already in
-  // a mark-sweep allocation, there is no sense in trying to trigger one.
-  AlwaysAllocateScope scope(isolate());
-
-  NewSpace* new_space = heap()->new_space();
-
-  // Store allocation range before flipping semispaces.
-  Address from_bottom = new_space->bottom();
-  Address from_top = new_space->top();
-
-  // Flip the semispaces.  After flipping, to space is empty, from space has
-  // live objects.
-  new_space->Flip();
-  new_space->ResetAllocationInfo();
-
-  int survivors_size = 0;
-
-  // First pass: traverse all objects in inactive semispace, remove marks,
-  // migrate live objects and write forwarding addresses.  This stage puts
-  // new entries in the store buffer and may cause some pages to be marked
-  // scan-on-scavenge.
-  NewSpacePageIterator it(from_bottom, from_top);
-  while (it.has_next()) {
-    NewSpacePage* p = it.next();
-    survivors_size += DiscoverAndPromoteBlackObjectsOnPage(new_space, p);
-  }
-
-  heap_->IncrementYoungSurvivorsCounter(survivors_size);
-  new_space->set_age_mark(new_space->top());
-}
-
-
-void MarkCompactCollector::EvacuateLiveObjectsFromPage(Page* p) {
-  AlwaysAllocateScope always_allocate(isolate());
-  PagedSpace* space = static_cast<PagedSpace*>(p->owner());
-  ASSERT(p->IsEvacuationCandidate() && !p->WasSwept());
-  p->MarkSweptPrecisely();
-
-  int offsets[16];
-
-  for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
-    Address cell_base = it.CurrentCellBase();
-    MarkBit::CellType* cell = it.CurrentCell();
-
-    if (*cell == 0) continue;
-
-    int live_objects = MarkWordToObjectStarts(*cell, offsets);
-    for (int i = 0; i < live_objects; i++) {
-      Address object_addr = cell_base + offsets[i] * kPointerSize;
-      HeapObject* object = HeapObject::FromAddress(object_addr);
-      ASSERT(Marking::IsBlack(Marking::MarkBitFrom(object)));
-
-      int size = object->Size();
-
-      HeapObject* target_object;
-      AllocationResult allocation = space->AllocateRaw(size);
-      if (!allocation.To(&target_object)) {
-        // OS refused to give us memory.
-        V8::FatalProcessOutOfMemory("Evacuation");
-        return;
-      }
-
-      MigrateObject(target_object, object, size, space->identity());
-      ASSERT(object->map_word().IsForwardingAddress());
-    }
-
-    // Clear marking bits for current cell.
-    *cell = 0;
-  }
-  p->ResetLiveBytes();
-}
-
-
-void MarkCompactCollector::EvacuatePages() {
-  int npages = evacuation_candidates_.length();
-  for (int i = 0; i < npages; i++) {
-    Page* p = evacuation_candidates_[i];
-    ASSERT(p->IsEvacuationCandidate() ||
-           p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
-    ASSERT(static_cast<int>(p->parallel_sweeping()) ==
-           MemoryChunk::PARALLEL_SWEEPING_DONE);
-    if (p->IsEvacuationCandidate()) {
-      // During compaction we might have to request a new page.
-      // Check that space still have room for that.
-      if (static_cast<PagedSpace*>(p->owner())->CanExpand()) {
-        EvacuateLiveObjectsFromPage(p);
-      } else {
-        // Without room for expansion evacuation is not guaranteed to succeed.
-        // Pessimistically abandon unevacuated pages.
-        for (int j = i; j < npages; j++) {
-          Page* page = evacuation_candidates_[j];
-          slots_buffer_allocator_.DeallocateChain(page->slots_buffer_address());
-          page->ClearEvacuationCandidate();
-          page->SetFlag(Page::RESCAN_ON_EVACUATION);
-        }
-        return;
-      }
-    }
-  }
-}
-
-
-class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
- public:
-  virtual Object* RetainAs(Object* object) {
-    if (object->IsHeapObject()) {
-      HeapObject* heap_object = HeapObject::cast(object);
-      MapWord map_word = heap_object->map_word();
-      if (map_word.IsForwardingAddress()) {
-        return map_word.ToForwardingAddress();
-      }
-    }
-    return object;
-  }
-};
-
-
-static inline void UpdateSlot(Isolate* isolate,
-                              ObjectVisitor* v,
-                              SlotsBuffer::SlotType slot_type,
-                              Address addr) {
-  switch (slot_type) {
-    case SlotsBuffer::CODE_TARGET_SLOT: {
-      RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, NULL);
-      rinfo.Visit(isolate, v);
-      break;
-    }
-    case SlotsBuffer::CODE_ENTRY_SLOT: {
-      v->VisitCodeEntry(addr);
-      break;
-    }
-    case SlotsBuffer::RELOCATED_CODE_OBJECT: {
-      HeapObject* obj = HeapObject::FromAddress(addr);
-      Code::cast(obj)->CodeIterateBody(v);
-      break;
-    }
-    case SlotsBuffer::DEBUG_TARGET_SLOT: {
-      RelocInfo rinfo(addr, RelocInfo::DEBUG_BREAK_SLOT, 0, NULL);
-      if (rinfo.IsPatchedDebugBreakSlotSequence()) rinfo.Visit(isolate, v);
-      break;
-    }
-    case SlotsBuffer::JS_RETURN_SLOT: {
-      RelocInfo rinfo(addr, RelocInfo::JS_RETURN, 0, NULL);
-      if (rinfo.IsPatchedReturnSequence()) rinfo.Visit(isolate, v);
-      break;
-    }
-    case SlotsBuffer::EMBEDDED_OBJECT_SLOT: {
-      RelocInfo rinfo(addr, RelocInfo::EMBEDDED_OBJECT, 0, NULL);
-      rinfo.Visit(isolate, v);
-      break;
-    }
-    default:
-      UNREACHABLE();
-      break;
-  }
-}
-
-
-enum SweepingMode {
-  SWEEP_ONLY,
-  SWEEP_AND_VISIT_LIVE_OBJECTS
-};
-
-
-enum SkipListRebuildingMode {
-  REBUILD_SKIP_LIST,
-  IGNORE_SKIP_LIST
-};
-
-
-enum FreeSpaceTreatmentMode {
-  IGNORE_FREE_SPACE,
-  ZAP_FREE_SPACE
-};
-
-
-// Sweep a space precisely.  After this has been done the space can
-// be iterated precisely, hitting only the live objects.  Code space
-// is always swept precisely because we want to be able to iterate
-// over it.  Map space is swept precisely, because it is not compacted.
-// Slots in live objects pointing into evacuation candidates are updated
-// if requested.
-template<SweepingMode sweeping_mode,
-         SkipListRebuildingMode skip_list_mode,
-         FreeSpaceTreatmentMode free_space_mode>
-static void SweepPrecisely(PagedSpace* space,
-                           Page* p,
-                           ObjectVisitor* v) {
-  ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
-  ASSERT_EQ(skip_list_mode == REBUILD_SKIP_LIST,
-            space->identity() == CODE_SPACE);
-  ASSERT((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST));
-
-  double start_time = 0.0;
-  if (FLAG_print_cumulative_gc_stat) {
-    start_time = OS::TimeCurrentMillis();
-  }
-
-  p->MarkSweptPrecisely();
-
-  Address free_start = p->area_start();
-  ASSERT(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
-  int offsets[16];
-
-  SkipList* skip_list = p->skip_list();
-  int curr_region = -1;
-  if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list) {
-    skip_list->Clear();
-  }
-
-  for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
-    Address cell_base = it.CurrentCellBase();
-    MarkBit::CellType* cell = it.CurrentCell();
-    int live_objects = MarkWordToObjectStarts(*cell, offsets);
-    int live_index = 0;
-    for ( ; live_objects != 0; live_objects--) {
-      Address free_end = cell_base + offsets[live_index++] * kPointerSize;
-      if (free_end != free_start) {
-        if (free_space_mode == ZAP_FREE_SPACE) {
-          memset(free_start, 0xcc, static_cast<int>(free_end - free_start));
-        }
-        space->Free(free_start, static_cast<int>(free_end - free_start));
-#ifdef ENABLE_GDB_JIT_INTERFACE
-        if (FLAG_gdbjit && space->identity() == CODE_SPACE) {
-          GDBJITInterface::RemoveCodeRange(free_start, free_end);
-        }
-#endif
-      }
-      HeapObject* live_object = HeapObject::FromAddress(free_end);
-      ASSERT(Marking::IsBlack(Marking::MarkBitFrom(live_object)));
-      Map* map = live_object->map();
-      int size = live_object->SizeFromMap(map);
-      if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) {
-        live_object->IterateBody(map->instance_type(), size, v);
-      }
-      if ((skip_list_mode == REBUILD_SKIP_LIST) && skip_list != NULL) {
-        int new_region_start =
-            SkipList::RegionNumber(free_end);
-        int new_region_end =
-            SkipList::RegionNumber(free_end + size - kPointerSize);
-        if (new_region_start != curr_region ||
-            new_region_end != curr_region) {
-          skip_list->AddObject(free_end, size);
-          curr_region = new_region_end;
-        }
-      }
-      free_start = free_end + size;
-    }
-    // Clear marking bits for current cell.
-    *cell = 0;
-  }
-  if (free_start != p->area_end()) {
-    if (free_space_mode == ZAP_FREE_SPACE) {
-      memset(free_start, 0xcc, static_cast<int>(p->area_end() - free_start));
-    }
-    space->Free(free_start, static_cast<int>(p->area_end() - free_start));
-#ifdef ENABLE_GDB_JIT_INTERFACE
-    if (FLAG_gdbjit && space->identity() == CODE_SPACE) {
-      GDBJITInterface::RemoveCodeRange(free_start, p->area_end());
-    }
-#endif
-  }
-  p->ResetLiveBytes();
-  if (FLAG_print_cumulative_gc_stat) {
-    space->heap()->AddSweepingTime(OS::TimeCurrentMillis() - start_time);
-  }
-}
-
-
-static bool SetMarkBitsUnderInvalidatedCode(Code* code, bool value) {
-  Page* p = Page::FromAddress(code->address());
-
-  if (p->IsEvacuationCandidate() ||
-      p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
-    return false;
-  }
-
-  Address code_start = code->address();
-  Address code_end = code_start + code->Size();
-
-  uint32_t start_index = MemoryChunk::FastAddressToMarkbitIndex(code_start);
-  uint32_t end_index =
-      MemoryChunk::FastAddressToMarkbitIndex(code_end - kPointerSize);
-
-  Bitmap* b = p->markbits();
-
-  MarkBit start_mark_bit = b->MarkBitFromIndex(start_index);
-  MarkBit end_mark_bit = b->MarkBitFromIndex(end_index);
-
-  MarkBit::CellType* start_cell = start_mark_bit.cell();
-  MarkBit::CellType* end_cell = end_mark_bit.cell();
-
-  if (value) {
-    MarkBit::CellType start_mask = ~(start_mark_bit.mask() - 1);
-    MarkBit::CellType end_mask = (end_mark_bit.mask() << 1) - 1;
-
-    if (start_cell == end_cell) {
-      *start_cell |= start_mask & end_mask;
-    } else {
-      *start_cell |= start_mask;
-      for (MarkBit::CellType* cell = start_cell + 1; cell < end_cell; cell++) {
-        *cell = ~0;
-      }
-      *end_cell |= end_mask;
-    }
-  } else {
-    for (MarkBit::CellType* cell = start_cell ; cell <= end_cell; cell++) {
-      *cell = 0;
-    }
-  }
-
-  return true;
-}
-
-
-static bool IsOnInvalidatedCodeObject(Address addr) {
-  // We did not record any slots in large objects thus
-  // we can safely go to the page from the slot address.
-  Page* p = Page::FromAddress(addr);
-
-  // First check owner's identity because old pointer and old data spaces
-  // are swept lazily and might still have non-zero mark-bits on some
-  // pages.
-  if (p->owner()->identity() != CODE_SPACE) return false;
-
-  // In code space only bits on evacuation candidates (but we don't record
-  // any slots on them) and under invalidated code objects are non-zero.
-  MarkBit mark_bit =
-      p->markbits()->MarkBitFromIndex(Page::FastAddressToMarkbitIndex(addr));
-
-  return mark_bit.Get();
-}
-
-
-void MarkCompactCollector::InvalidateCode(Code* code) {
-  if (heap_->incremental_marking()->IsCompacting() &&
-      !ShouldSkipEvacuationSlotRecording(code)) {
-    ASSERT(compacting_);
-
-    // If the object is white than no slots were recorded on it yet.
-    MarkBit mark_bit = Marking::MarkBitFrom(code);
-    if (Marking::IsWhite(mark_bit)) return;
-
-    invalidated_code_.Add(code);
-  }
-}
-
-
-// Return true if the given code is deoptimized or will be deoptimized.
-bool MarkCompactCollector::WillBeDeoptimized(Code* code) {
-  return code->is_optimized_code() && code->marked_for_deoptimization();
-}
-
-
-bool MarkCompactCollector::MarkInvalidatedCode() {
-  bool code_marked = false;
-
-  int length = invalidated_code_.length();
-  for (int i = 0; i < length; i++) {
-    Code* code = invalidated_code_[i];
-
-    if (SetMarkBitsUnderInvalidatedCode(code, true)) {
-      code_marked = true;
-    }
-  }
-
-  return code_marked;
-}
-
-
-void MarkCompactCollector::RemoveDeadInvalidatedCode() {
-  int length = invalidated_code_.length();
-  for (int i = 0; i < length; i++) {
-    if (!IsMarked(invalidated_code_[i])) invalidated_code_[i] = NULL;
-  }
-}
-
-
-void MarkCompactCollector::ProcessInvalidatedCode(ObjectVisitor* visitor) {
-  int length = invalidated_code_.length();
-  for (int i = 0; i < length; i++) {
-    Code* code = invalidated_code_[i];
-    if (code != NULL) {
-      code->Iterate(visitor);
-      SetMarkBitsUnderInvalidatedCode(code, false);
-    }
-  }
-  invalidated_code_.Rewind(0);
-}
-
-
-void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
-  Heap::RelocationLock relocation_lock(heap());
-
-  bool code_slots_filtering_required;
-  { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP_NEWSPACE);
-    code_slots_filtering_required = MarkInvalidatedCode();
-    EvacuateNewSpace();
-  }
-
-  { GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_EVACUATE_PAGES);
-    EvacuatePages();
-  }
-
-  // Second pass: find pointers to new space and update them.
-  PointersUpdatingVisitor updating_visitor(heap());
-
-  { GCTracer::Scope gc_scope(tracer_,
-                             GCTracer::Scope::MC_UPDATE_NEW_TO_NEW_POINTERS);
-    // Update pointers in to space.
-    SemiSpaceIterator to_it(heap()->new_space()->bottom(),
-                            heap()->new_space()->top());
-    for (HeapObject* object = to_it.Next();
-         object != NULL;
-         object = to_it.Next()) {
-      Map* map = object->map();
-      object->IterateBody(map->instance_type(),
-                          object->SizeFromMap(map),
-                          &updating_visitor);
-    }
-  }
-
-  { GCTracer::Scope gc_scope(tracer_,
-                             GCTracer::Scope::MC_UPDATE_ROOT_TO_NEW_POINTERS);
-    // Update roots.
-    heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
-  }
-
-  { GCTracer::Scope gc_scope(tracer_,
-                             GCTracer::Scope::MC_UPDATE_OLD_TO_NEW_POINTERS);
-    StoreBufferRebuildScope scope(heap_,
-                                  heap_->store_buffer(),
-                                  &Heap::ScavengeStoreBufferCallback);
-    heap_->store_buffer()->IteratePointersToNewSpaceAndClearMaps(
-        &UpdatePointer);
-  }
-
-  { GCTracer::Scope gc_scope(tracer_,
-                             GCTracer::Scope::MC_UPDATE_POINTERS_TO_EVACUATED);
-    SlotsBuffer::UpdateSlotsRecordedIn(heap_,
-                                       migration_slots_buffer_,
-                                       code_slots_filtering_required);
-    if (FLAG_trace_fragmentation) {
-      PrintF("  migration slots buffer: %d\n",
-             SlotsBuffer::SizeOfChain(migration_slots_buffer_));
-    }
-
-    if (compacting_ && was_marked_incrementally_) {
-      // It's difficult to filter out slots recorded for large objects.
-      LargeObjectIterator it(heap_->lo_space());
-      for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
-        // LargeObjectSpace is not swept yet thus we have to skip
-        // dead objects explicitly.
-        if (!IsMarked(obj)) continue;
-
-        Page* p = Page::FromAddress(obj->address());
-        if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
-          obj->Iterate(&updating_visitor);
-          p->ClearFlag(Page::RESCAN_ON_EVACUATION);
-        }
-      }
-    }
-  }
-
-  int npages = evacuation_candidates_.length();
-  { GCTracer::Scope gc_scope(
-      tracer_, GCTracer::Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED);
-    for (int i = 0; i < npages; i++) {
-      Page* p = evacuation_candidates_[i];
-      ASSERT(p->IsEvacuationCandidate() ||
-             p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
-
-      if (p->IsEvacuationCandidate()) {
-        SlotsBuffer::UpdateSlotsRecordedIn(heap_,
-                                           p->slots_buffer(),
-                                           code_slots_filtering_required);
-        if (FLAG_trace_fragmentation) {
-          PrintF("  page %p slots buffer: %d\n",
-                 reinterpret_cast<void*>(p),
-                 SlotsBuffer::SizeOfChain(p->slots_buffer()));
-        }
-
-        // Important: skip list should be cleared only after roots were updated
-        // because root iteration traverses the stack and might have to find
-        // code objects from non-updated pc pointing into evacuation candidate.
-        SkipList* list = p->skip_list();
-        if (list != NULL) list->Clear();
-      } else {
-        if (FLAG_gc_verbose) {
-          PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n",
-                 reinterpret_cast<intptr_t>(p));
-        }
-        PagedSpace* space = static_cast<PagedSpace*>(p->owner());
-        p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
-
-        switch (space->identity()) {
-          case OLD_DATA_SPACE:
-            SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
-            break;
-          case OLD_POINTER_SPACE:
-            SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS,
-                           IGNORE_SKIP_LIST,
-                           IGNORE_FREE_SPACE>(
-                space, p, &updating_visitor);
-            break;
-          case CODE_SPACE:
-            if (FLAG_zap_code_space) {
-              SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS,
-                             REBUILD_SKIP_LIST,
-                             ZAP_FREE_SPACE>(
-                  space, p, &updating_visitor);
-            } else {
-              SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS,
-                             REBUILD_SKIP_LIST,
-                             IGNORE_FREE_SPACE>(
-                  space, p, &updating_visitor);
-            }
-            break;
-          default:
-            UNREACHABLE();
-            break;
-        }
-      }
-    }
-  }
-
-  GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_UPDATE_MISC_POINTERS);
-
-  // Update pointers from cells.
-  HeapObjectIterator cell_iterator(heap_->cell_space());
-  for (HeapObject* cell = cell_iterator.Next();
-       cell != NULL;
-       cell = cell_iterator.Next()) {
-    if (cell->IsCell()) {
-      Cell::BodyDescriptor::IterateBody(cell, &updating_visitor);
-    }
-  }
-
-  HeapObjectIterator js_global_property_cell_iterator(
-      heap_->property_cell_space());
-  for (HeapObject* cell = js_global_property_cell_iterator.Next();
-       cell != NULL;
-       cell = js_global_property_cell_iterator.Next()) {
-    if (cell->IsPropertyCell()) {
-      PropertyCell::BodyDescriptor::IterateBody(cell, &updating_visitor);
-    }
-  }
-
-  heap_->string_table()->Iterate(&updating_visitor);
-  updating_visitor.VisitPointer(heap_->weak_object_to_code_table_address());
-  if (heap_->weak_object_to_code_table()->IsHashTable()) {
-    WeakHashTable* table =
-        WeakHashTable::cast(heap_->weak_object_to_code_table());
-    table->Iterate(&updating_visitor);
-    table->Rehash(heap_->isolate()->factory()->undefined_value());
-  }
-
-  // Update pointers from external string table.
-  heap_->UpdateReferencesInExternalStringTable(
-      &UpdateReferenceInExternalStringTableEntry);
-
-  EvacuationWeakObjectRetainer evacuation_object_retainer;
-  heap()->ProcessWeakReferences(&evacuation_object_retainer);
-
-  // Visit invalidated code (we ignored all slots on it) and clear mark-bits
-  // under it.
-  ProcessInvalidatedCode(&updating_visitor);
-
-  heap_->isolate()->inner_pointer_to_code_cache()->Flush();
-
-#ifdef VERIFY_HEAP
-  if (FLAG_verify_heap) {
-    VerifyEvacuation(heap_);
-  }
-#endif
-
-  slots_buffer_allocator_.DeallocateChain(&migration_slots_buffer_);
-  ASSERT(migration_slots_buffer_ == NULL);
-}
-
-
-void MarkCompactCollector::MoveEvacuationCandidatesToEndOfPagesList() {
-  int npages = evacuation_candidates_.length();
-  for (int i = 0; i < npages; i++) {
-    Page* p = evacuation_candidates_[i];
-    if (!p->IsEvacuationCandidate()) continue;
-    p->Unlink();
-    PagedSpace* space = static_cast<PagedSpace*>(p->owner());
-    p->InsertAfter(space->LastPage());
-  }
-}
-
-
-void MarkCompactCollector::ReleaseEvacuationCandidates() {
-  int npages = evacuation_candidates_.length();
-  for (int i = 0; i < npages; i++) {
-    Page* p = evacuation_candidates_[i];
-    if (!p->IsEvacuationCandidate()) continue;
-    PagedSpace* space = static_cast<PagedSpace*>(p->owner());
-    space->Free(p->area_start(), p->area_size());
-    p->set_scan_on_scavenge(false);
-    slots_buffer_allocator_.DeallocateChain(p->slots_buffer_address());
-    p->ResetLiveBytes();
-    space->ReleasePage(p);
-  }
-  evacuation_candidates_.Rewind(0);
-  compacting_ = false;
-  heap()->FreeQueuedChunks();
-}
-
-
-static const int kStartTableEntriesPerLine = 5;
-static const int kStartTableLines = 171;
-static const int kStartTableInvalidLine = 127;
-static const int kStartTableUnusedEntry = 126;
-
-#define _ kStartTableUnusedEntry
-#define X kStartTableInvalidLine
-// Mark-bit to object start offset table.
-//
-// The line is indexed by the mark bits in a byte.  The first number on
-// the line describes the number of live object starts for the line and the
-// other numbers on the line describe the offsets (in words) of the object
-// starts.
-//
-// Since objects are at least 2 words large we don't have entries for two
-// consecutive 1 bits.  All entries after 170 have at least 2 consecutive bits.
-char kStartTable[kStartTableLines * kStartTableEntriesPerLine] = {
-  0, _, _, _, _,  // 0
-  1, 0, _, _, _,  // 1
-  1, 1, _, _, _,  // 2
-  X, _, _, _, _,  // 3
-  1, 2, _, _, _,  // 4
-  2, 0, 2, _, _,  // 5
-  X, _, _, _, _,  // 6
-  X, _, _, _, _,  // 7
-  1, 3, _, _, _,  // 8
-  2, 0, 3, _, _,  // 9
-  2, 1, 3, _, _,  // 10
-  X, _, _, _, _,  // 11
-  X, _, _, _, _,  // 12
-  X, _, _, _, _,  // 13
-  X, _, _, _, _,  // 14
-  X, _, _, _, _,  // 15
-  1, 4, _, _, _,  // 16
-  2, 0, 4, _, _,  // 17
-  2, 1, 4, _, _,  // 18
-  X, _, _, _, _,  // 19
-  2, 2, 4, _, _,  // 20
-  3, 0, 2, 4, _,  // 21
-  X, _, _, _, _,  // 22
-  X, _, _, _, _,  // 23
-  X, _, _, _, _,  // 24
-  X, _, _, _, _,  // 25
-  X, _, _, _, _,  // 26
-  X, _, _, _, _,  // 27
-  X, _, _, _, _,  // 28
-  X, _, _, _, _,  // 29
-  X, _, _, _, _,  // 30
-  X, _, _, _, _,  // 31
-  1, 5, _, _, _,  // 32
-  2, 0, 5, _, _,  // 33
-  2, 1, 5, _, _,  // 34
-  X, _, _, _, _,  // 35
-  2, 2, 5, _, _,  // 36
-  3, 0, 2, 5, _,  // 37
-  X, _, _, _, _,  // 38
-  X, _, _, _, _,  // 39
-  2, 3, 5, _, _,  // 40
-  3, 0, 3, 5, _,  // 41
-  3, 1, 3, 5, _,  // 42
-  X, _, _, _, _,  // 43
-  X, _, _, _, _,  // 44
-  X, _, _, _, _,  // 45
-  X, _, _, _, _,  // 46
-  X, _, _, _, _,  // 47
-  X, _, _, _, _,  // 48
-  X, _, _, _, _,  // 49
-  X, _, _, _, _,  // 50
-  X, _, _, _, _,  // 51
-  X, _, _, _, _,  // 52
-  X, _, _, _, _,  // 53
-  X, _, _, _, _,  // 54
-  X, _, _, _, _,  // 55
-  X, _, _, _, _,  // 56
-  X, _, _, _, _,  // 57
-  X, _, _, _, _,  // 58
-  X, _, _, _, _,  // 59
-  X, _, _, _, _,  // 60
-  X, _, _, _, _,  // 61
-  X, _, _, _, _,  // 62
-  X, _, _, _, _,  // 63
-  1, 6, _, _, _,  // 64
-  2, 0, 6, _, _,  // 65
-  2, 1, 6, _, _,  // 66
-  X, _, _, _, _,  // 67
-  2, 2, 6, _, _,  // 68
-  3, 0, 2, 6, _,  // 69
-  X, _, _, _, _,  // 70
-  X, _, _, _, _,  // 71
-  2, 3, 6, _, _,  // 72
-  3, 0, 3, 6, _,  // 73
-  3, 1, 3, 6, _,  // 74
-  X, _, _, _, _,  // 75
-  X, _, _, _, _,  // 76
-  X, _, _, _, _,  // 77
-  X, _, _, _, _,  // 78
-  X, _, _, _, _,  // 79
-  2, 4, 6, _, _,  // 80
-  3, 0, 4, 6, _,  // 81
-  3, 1, 4, 6, _,  // 82
-  X, _, _, _, _,  // 83
-  3, 2, 4, 6, _,  // 84
-  4, 0, 2, 4, 6,  // 85
-  X, _, _, _, _,  // 86
-  X, _, _, _, _,  // 87
-  X, _, _, _, _,  // 88
-  X, _, _, _, _,  // 89
-  X, _, _, _, _,  // 90
-  X, _, _, _, _,  // 91
-  X, _, _, _, _,  // 92
-  X, _, _, _, _,  // 93
-  X, _, _, _, _,  // 94
-  X, _, _, _, _,  // 95
-  X, _, _, _, _,  // 96
-  X, _, _, _, _,  // 97
-  X, _, _, _, _,  // 98
-  X, _, _, _, _,  // 99
-  X, _, _, _, _,  // 100
-  X, _, _, _, _,  // 101
-  X, _, _, _, _,  // 102
-  X, _, _, _, _,  // 103
-  X, _, _, _, _,  // 104
-  X, _, _, _, _,  // 105
-  X, _, _, _, _,  // 106
-  X, _, _, _, _,  // 107
-  X, _, _, _, _,  // 108
-  X, _, _, _, _,  // 109
-  X, _, _, _, _,  // 110
-  X, _, _, _, _,  // 111
-  X, _, _, _, _,  // 112
-  X, _, _, _, _,  // 113
-  X, _, _, _, _,  // 114
-  X, _, _, _, _,  // 115
-  X, _, _, _, _,  // 116
-  X, _, _, _, _,  // 117
-  X, _, _, _, _,  // 118
-  X, _, _, _, _,  // 119
-  X, _, _, _, _,  // 120
-  X, _, _, _, _,  // 121
-  X, _, _, _, _,  // 122
-  X, _, _, _, _,  // 123
-  X, _, _, _, _,  // 124
-  X, _, _, _, _,  // 125
-  X, _, _, _, _,  // 126
-  X, _, _, _, _,  // 127
-  1, 7, _, _, _,  // 128
-  2, 0, 7, _, _,  // 129
-  2, 1, 7, _, _,  // 130
-  X, _, _, _, _,  // 131
-  2, 2, 7, _, _,  // 132
-  3, 0, 2, 7, _,  // 133
-  X, _, _, _, _,  // 134
-  X, _, _, _, _,  // 135
-  2, 3, 7, _, _,  // 136
-  3, 0, 3, 7, _,  // 137
-  3, 1, 3, 7, _,  // 138
-  X, _, _, _, _,  // 139
-  X, _, _, _, _,  // 140
-  X, _, _, _, _,  // 141
-  X, _, _, _, _,  // 142
-  X, _, _, _, _,  // 143
-  2, 4, 7, _, _,  // 144
-  3, 0, 4, 7, _,  // 145
-  3, 1, 4, 7, _,  // 146
-  X, _, _, _, _,  // 147
-  3, 2, 4, 7, _,  // 148
-  4, 0, 2, 4, 7,  // 149
-  X, _, _, _, _,  // 150
-  X, _, _, _, _,  // 151
-  X, _, _, _, _,  // 152
-  X, _, _, _, _,  // 153
-  X, _, _, _, _,  // 154
-  X, _, _, _, _,  // 155
-  X, _, _, _, _,  // 156
-  X, _, _, _, _,  // 157
-  X, _, _, _, _,  // 158
-  X, _, _, _, _,  // 159
-  2, 5, 7, _, _,  // 160
-  3, 0, 5, 7, _,  // 161
-  3, 1, 5, 7, _,  // 162
-  X, _, _, _, _,  // 163
-  3, 2, 5, 7, _,  // 164
-  4, 0, 2, 5, 7,  // 165
-  X, _, _, _, _,  // 166
-  X, _, _, _, _,  // 167
-  3, 3, 5, 7, _,  // 168
-  4, 0, 3, 5, 7,  // 169
-  4, 1, 3, 5, 7   // 170
-};
-#undef _
-#undef X
-
-
-// Takes a word of mark bits.  Returns the number of objects that start in the
-// range.  Puts the offsets of the words in the supplied array.
-static inline int MarkWordToObjectStarts(uint32_t mark_bits, int* starts) {
-  int objects = 0;
-  int offset = 0;
-
-  // No consecutive 1 bits.
-  ASSERT((mark_bits & 0x180) != 0x180);
-  ASSERT((mark_bits & 0x18000) != 0x18000);
-  ASSERT((mark_bits & 0x1800000) != 0x1800000);
-
-  while (mark_bits != 0) {
-    int byte = (mark_bits & 0xff);
-    mark_bits >>= 8;
-    if (byte != 0) {
-      ASSERT(byte < kStartTableLines);  // No consecutive 1 bits.
-      char* table = kStartTable + byte * kStartTableEntriesPerLine;
-      int objects_in_these_8_words = table[0];
-      ASSERT(objects_in_these_8_words != kStartTableInvalidLine);
-      ASSERT(objects_in_these_8_words < kStartTableEntriesPerLine);
-      for (int i = 0; i < objects_in_these_8_words; i++) {
-        starts[objects++] = offset + table[1 + i];
-      }
-    }
-    offset += 8;
-  }
-  return objects;
-}
-
-
-static inline Address DigestFreeStart(Address approximate_free_start,
-                                      uint32_t free_start_cell) {
-  ASSERT(free_start_cell != 0);
-
-  // No consecutive 1 bits.
-  ASSERT((free_start_cell & (free_start_cell << 1)) == 0);
-
-  int offsets[16];
-  uint32_t cell = free_start_cell;
-  int offset_of_last_live;
-  if ((cell & 0x80000000u) != 0) {
-    // This case would overflow below.
-    offset_of_last_live = 31;
-  } else {
-    // Remove all but one bit, the most significant.  This is an optimization
-    // that may or may not be worthwhile.
-    cell |= cell >> 16;
-    cell |= cell >> 8;
-    cell |= cell >> 4;
-    cell |= cell >> 2;
-    cell |= cell >> 1;
-    cell = (cell + 1) >> 1;
-    int live_objects = MarkWordToObjectStarts(cell, offsets);
-    ASSERT(live_objects == 1);
-    offset_of_last_live = offsets[live_objects - 1];
-  }
-  Address last_live_start =
-      approximate_free_start + offset_of_last_live * kPointerSize;
-  HeapObject* last_live = HeapObject::FromAddress(last_live_start);
-  Address free_start = last_live_start + last_live->Size();
-  return free_start;
-}
-
-
-static inline Address StartOfLiveObject(Address block_address, uint32_t cell) {
-  ASSERT(cell != 0);
-
-  // No consecutive 1 bits.
-  ASSERT((cell & (cell << 1)) == 0);
-
-  int offsets[16];
-  if (cell == 0x80000000u) {  // Avoid overflow below.
-    return block_address + 31 * kPointerSize;
-  }
-  uint32_t first_set_bit = ((cell ^ (cell - 1)) + 1) >> 1;
-  ASSERT((first_set_bit & cell) == first_set_bit);
-  int live_objects = MarkWordToObjectStarts(first_set_bit, offsets);
-  ASSERT(live_objects == 1);
-  USE(live_objects);
-  return block_address + offsets[0] * kPointerSize;
-}
-
-
-template<MarkCompactCollector::SweepingParallelism mode>
-static intptr_t Free(PagedSpace* space,
-                     FreeList* free_list,
-                     Address start,
-                     int size) {
-  if (mode == MarkCompactCollector::SWEEP_SEQUENTIALLY) {
-    return space->Free(start, size);
-  } else {
-    return size - free_list->Free(start, size);
-  }
-}
-
-
-// Force instantiation of templatized SweepConservatively method for
-// SWEEP_SEQUENTIALLY mode.
-template intptr_t MarkCompactCollector::
-    SweepConservatively<MarkCompactCollector::SWEEP_SEQUENTIALLY>(
-        PagedSpace*, FreeList*, Page*);
-
-
-// Force instantiation of templatized SweepConservatively method for
-// SWEEP_IN_PARALLEL mode.
-template intptr_t MarkCompactCollector::
-    SweepConservatively<MarkCompactCollector::SWEEP_IN_PARALLEL>(
-        PagedSpace*, FreeList*, Page*);
-
-
-// Sweeps a space conservatively.  After this has been done the larger free
-// spaces have been put on the free list and the smaller ones have been
-// ignored and left untouched.  A free space is always either ignored or put
-// on the free list, never split up into two parts.  This is important
-// because it means that any FreeSpace maps left actually describe a region of
-// memory that can be ignored when scanning.  Dead objects other than free
-// spaces will not contain the free space map.
-template<MarkCompactCollector::SweepingParallelism mode>
-intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space,
-                                                   FreeList* free_list,
-                                                   Page* p) {
-  ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept());
-  ASSERT((mode == MarkCompactCollector::SWEEP_IN_PARALLEL &&
-         free_list != NULL) ||
-         (mode == MarkCompactCollector::SWEEP_SEQUENTIALLY &&
-         free_list == NULL));
-
-  // When parallel sweeping is active, the page will be marked after
-  // sweeping by the main thread.
-  if (mode != MarkCompactCollector::SWEEP_IN_PARALLEL) {
-    p->MarkSweptConservatively();
-  }
-
-  intptr_t freed_bytes = 0;
-  size_t size = 0;
-
-  // Skip over all the dead objects at the start of the page and mark them free.
-  Address cell_base = 0;
-  MarkBit::CellType* cell = NULL;
-  MarkBitCellIterator it(p);
-  for (; !it.Done(); it.Advance()) {
-    cell_base = it.CurrentCellBase();
-    cell = it.CurrentCell();
-    if (*cell != 0) break;
-  }
-
-  if (it.Done()) {
-    size = p->area_end() - p->area_start();
-    freed_bytes += Free<mode>(space, free_list, p->area_start(),
-                              static_cast<int>(size));
-    ASSERT_EQ(0, p->LiveBytes());
-    return freed_bytes;
-  }
-
-  // Grow the size of the start-of-page free space a little to get up to the
-  // first live object.
-  Address free_end = StartOfLiveObject(cell_base, *cell);
-  // Free the first free space.
-  size = free_end - p->area_start();
-  freed_bytes += Free<mode>(space, free_list, p->area_start(),
-                            static_cast<int>(size));
-
-  // The start of the current free area is represented in undigested form by
-  // the address of the last 32-word section that contained a live object and
-  // the marking bitmap for that cell, which describes where the live object
-  // started.  Unless we find a large free space in the bitmap we will not
-  // digest this pair into a real address.  We start the iteration here at the
-  // first word in the marking bit map that indicates a live object.
-  Address free_start = cell_base;
-  MarkBit::CellType free_start_cell = *cell;
-
-  for (; !it.Done(); it.Advance()) {
-    cell_base = it.CurrentCellBase();
-    cell = it.CurrentCell();
-    if (*cell != 0) {
-      // We have a live object.  Check approximately whether it is more than 32
-      // words since the last live object.
-      if (cell_base - free_start > 32 * kPointerSize) {
-        free_start = DigestFreeStart(free_start, free_start_cell);
-        if (cell_base - free_start > 32 * kPointerSize) {
-          // Now that we know the exact start of the free space it still looks
-          // like we have a large enough free space to be worth bothering with.
-          // so now we need to find the start of the first live object at the
-          // end of the free space.
-          free_end = StartOfLiveObject(cell_base, *cell);
-          freed_bytes += Free<mode>(space, free_list, free_start,
-                                    static_cast<int>(free_end - free_start));
-        }
-      }
-      // Update our undigested record of where the current free area started.
-      free_start = cell_base;
-      free_start_cell = *cell;
-      // Clear marking bits for current cell.
-      *cell = 0;
-    }
-  }
-
-  // Handle the free space at the end of the page.
-  if (cell_base - free_start > 32 * kPointerSize) {
-    free_start = DigestFreeStart(free_start, free_start_cell);
-    freed_bytes += Free<mode>(space, free_list, free_start,
-                              static_cast<int>(p->area_end() - free_start));
-  }
-
-  p->ResetLiveBytes();
-  return freed_bytes;
-}
-
-
-void MarkCompactCollector::SweepInParallel(PagedSpace* space) {
-  PageIterator it(space);
-  FreeList* free_list = space == heap()->old_pointer_space()
-                            ? free_list_old_pointer_space_.get()
-                            : free_list_old_data_space_.get();
-  FreeList private_free_list(space);
-  while (it.has_next()) {
-    Page* p = it.next();
-
-    if (p->TryParallelSweeping()) {
-      SweepConservatively<SWEEP_IN_PARALLEL>(space, &private_free_list, p);
-      free_list->Concatenate(&private_free_list);
-      p->set_parallel_sweeping(MemoryChunk::PARALLEL_SWEEPING_FINALIZE);
-    }
-    if (p == space->end_of_unswept_pages()) break;
-  }
-}
-
-
-void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
-  space->set_was_swept_conservatively(sweeper == CONSERVATIVE ||
-                                      sweeper == PARALLEL_CONSERVATIVE ||
-                                      sweeper == CONCURRENT_CONSERVATIVE);
-  space->ClearStats();
-
-  // We defensively initialize end_of_unswept_pages_ here with the first page
-  // of the pages list.
-  space->set_end_of_unswept_pages(space->FirstPage());
-
-  PageIterator it(space);
-
-  int pages_swept = 0;
-  bool unused_page_present = false;
-  bool parallel_sweeping_active = false;
-
-  while (it.has_next()) {
-    Page* p = it.next();
-    ASSERT(p->parallel_sweeping() == MemoryChunk::PARALLEL_SWEEPING_DONE);
-
-    // Clear sweeping flags indicating that marking bits are still intact.
-    p->ClearSweptPrecisely();
-    p->ClearSweptConservatively();
-
-    if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION) ||
-        p->IsEvacuationCandidate()) {
-      // Will be processed in EvacuateNewSpaceAndCandidates.
-      ASSERT(evacuation_candidates_.length() > 0);
-      continue;
-    }
-
-    // One unused page is kept, all further are released before sweeping them.
-    if (p->LiveBytes() == 0) {
-      if (unused_page_present) {
-        if (FLAG_gc_verbose) {
-          PrintF("Sweeping 0x%" V8PRIxPTR " released page.\n",
-                 reinterpret_cast<intptr_t>(p));
-        }
-        // Adjust unswept free bytes because releasing a page expects said
-        // counter to be accurate for unswept pages.
-        space->IncreaseUnsweptFreeBytes(p);
-        space->ReleasePage(p);
-        continue;
-      }
-      unused_page_present = true;
-    }
-
-    switch (sweeper) {
-      case CONSERVATIVE: {
-        if (FLAG_gc_verbose) {
-          PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
-                 reinterpret_cast<intptr_t>(p));
-        }
-        SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
-        pages_swept++;
-        break;
-      }
-      case CONCURRENT_CONSERVATIVE:
-      case PARALLEL_CONSERVATIVE: {
-        if (!parallel_sweeping_active) {
-          if (FLAG_gc_verbose) {
-            PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
-                   reinterpret_cast<intptr_t>(p));
-          }
-          SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p);
-          pages_swept++;
-          parallel_sweeping_active = true;
-        } else {
-          if (FLAG_gc_verbose) {
-            PrintF("Sweeping 0x%" V8PRIxPTR " conservatively in parallel.\n",
-                   reinterpret_cast<intptr_t>(p));
-          }
-          p->set_parallel_sweeping(MemoryChunk::PARALLEL_SWEEPING_PENDING);
-          space->IncreaseUnsweptFreeBytes(p);
-        }
-        space->set_end_of_unswept_pages(p);
-        break;
-      }
-      case PRECISE: {
-        if (FLAG_gc_verbose) {
-          PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n",
-                 reinterpret_cast<intptr_t>(p));
-        }
-        if (space->identity() == CODE_SPACE && FLAG_zap_code_space) {
-          SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST, ZAP_FREE_SPACE>(
-              space, p, NULL);
-        } else if (space->identity() == CODE_SPACE) {
-          SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST, IGNORE_FREE_SPACE>(
-              space, p, NULL);
-        } else {
-          SweepPrecisely<SWEEP_ONLY, IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>(
-              space, p, NULL);
-        }
-        pages_swept++;
-        break;
-      }
-      default: {
-        UNREACHABLE();
-      }
-    }
-  }
-
-  if (FLAG_gc_verbose) {
-    PrintF("SweepSpace: %s (%d pages swept)\n",
-           AllocationSpaceName(space->identity()),
-           pages_swept);
-  }
-
-  // Give pages that are queued to be freed back to the OS.
-  heap()->FreeQueuedChunks();
-}
-
-
-void MarkCompactCollector::SweepSpaces() {
-  GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP);
-#ifdef DEBUG
-  state_ = SWEEP_SPACES;
-#endif
-  SweeperType how_to_sweep = CONSERVATIVE;
-  if (AreSweeperThreadsActivated()) {
-    if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_CONSERVATIVE;
-    if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_CONSERVATIVE;
-  }
-  if (sweep_precisely_) how_to_sweep = PRECISE;
-
-  MoveEvacuationCandidatesToEndOfPagesList();
-
-  // Noncompacting collections simply sweep the spaces to clear the mark
-  // bits and free the nonlive blocks (for old and map spaces).  We sweep
-  // the map space last because freeing non-live maps overwrites them and
-  // the other spaces rely on possibly non-live maps to get the sizes for
-  // non-live objects.
-  { GCTracer::Scope sweep_scope(tracer_, GCTracer::Scope::MC_SWEEP_OLDSPACE);
-    { SequentialSweepingScope scope(this);
-      SweepSpace(heap()->old_pointer_space(), how_to_sweep);
-      SweepSpace(heap()->old_data_space(), how_to_sweep);
-    }
-
-    if (how_to_sweep == PARALLEL_CONSERVATIVE ||
-        how_to_sweep == CONCURRENT_CONSERVATIVE) {
-      StartSweeperThreads();
-    }
-
-    if (how_to_sweep == PARALLEL_CONSERVATIVE) {
-      WaitUntilSweepingCompleted();
-    }
-  }
-  RemoveDeadInvalidatedCode();
-  SweepSpace(heap()->code_space(), PRECISE);
-
-  SweepSpace(heap()->cell_space(), PRECISE);
-  SweepSpace(heap()->property_cell_space(), PRECISE);
-
-  EvacuateNewSpaceAndCandidates();
-
-  // ClearNonLiveTransitions depends on precise sweeping of map space to
-  // detect whether unmarked map became dead in this collection or in one
-  // of the previous ones.
-  SweepSpace(heap()->map_space(), PRECISE);
-
-  // Deallocate unmarked objects and clear marked bits for marked objects.
-  heap_->lo_space()->FreeUnmarkedObjects();
-
-  // Deallocate evacuated candidate pages.
-  ReleaseEvacuationCandidates();
-}
-
-
-void MarkCompactCollector::ParallelSweepSpaceComplete(PagedSpace* space) {
-  PageIterator it(space);
-  while (it.has_next()) {
-    Page* p = it.next();
-    if (p->parallel_sweeping() == MemoryChunk::PARALLEL_SWEEPING_FINALIZE) {
-      p->set_parallel_sweeping(MemoryChunk::PARALLEL_SWEEPING_DONE);
-      p->MarkSweptConservatively();
-    }
-    ASSERT(p->parallel_sweeping() == MemoryChunk::PARALLEL_SWEEPING_DONE);
-  }
-}
-
-
-void MarkCompactCollector::ParallelSweepSpacesComplete() {
-  ParallelSweepSpaceComplete(heap()->old_pointer_space());
-  ParallelSweepSpaceComplete(heap()->old_data_space());
-}
-
-
-void MarkCompactCollector::EnableCodeFlushing(bool enable) {
-  if (isolate()->debug()->is_loaded() ||
-      isolate()->debug()->has_break_points()) {
-    enable = false;
-  }
-
-  if (enable) {
-    if (code_flusher_ != NULL) return;
-    code_flusher_ = new CodeFlusher(isolate());
-  } else {
-    if (code_flusher_ == NULL) return;
-    code_flusher_->EvictAllCandidates();
-    delete code_flusher_;
-    code_flusher_ = NULL;
-  }
-
-  if (FLAG_trace_code_flushing) {
-    PrintF("[code-flushing is now %s]\n", enable ? "on" : "off");
-  }
-}
-
-
-// TODO(1466) ReportDeleteIfNeeded is not called currently.
-// Our profiling tools do not expect intersections between
-// code objects. We should either reenable it or change our tools.
-void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj,
-                                                Isolate* isolate) {
-#ifdef ENABLE_GDB_JIT_INTERFACE
-  if (obj->IsCode()) {
-    GDBJITInterface::RemoveCode(reinterpret_cast<Code*>(obj));
-  }
-#endif
-  if (obj->IsCode()) {
-    PROFILE(isolate, CodeDeleteEvent(obj->address()));
-  }
-}
-
-
-Isolate* MarkCompactCollector::isolate() const {
-  return heap_->isolate();
-}
-
-
-void MarkCompactCollector::Initialize() {
-  MarkCompactMarkingVisitor::Initialize();
-  IncrementalMarking::Initialize();
-}
-
-
-bool SlotsBuffer::IsTypedSlot(ObjectSlot slot) {
-  return reinterpret_cast<uintptr_t>(slot) < NUMBER_OF_SLOT_TYPES;
-}
-
-
-bool SlotsBuffer::AddTo(SlotsBufferAllocator* allocator,
-                        SlotsBuffer** buffer_address,
-                        SlotType type,
-                        Address addr,
-                        AdditionMode mode) {
-  SlotsBuffer* buffer = *buffer_address;
-  if (buffer == NULL || !buffer->HasSpaceForTypedSlot()) {
-    if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
-      allocator->DeallocateChain(buffer_address);
-      return false;
-    }
-    buffer = allocator->AllocateBuffer(buffer);
-    *buffer_address = buffer;
-  }
-  ASSERT(buffer->HasSpaceForTypedSlot());
-  buffer->Add(reinterpret_cast<ObjectSlot>(type));
-  buffer->Add(reinterpret_cast<ObjectSlot>(addr));
-  return true;
-}
-
-
-static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) {
-  if (RelocInfo::IsCodeTarget(rmode)) {
-    return SlotsBuffer::CODE_TARGET_SLOT;
-  } else if (RelocInfo::IsEmbeddedObject(rmode)) {
-    return SlotsBuffer::EMBEDDED_OBJECT_SLOT;
-  } else if (RelocInfo::IsDebugBreakSlot(rmode)) {
-    return SlotsBuffer::DEBUG_TARGET_SLOT;
-  } else if (RelocInfo::IsJSReturn(rmode)) {
-    return SlotsBuffer::JS_RETURN_SLOT;
-  }
-  UNREACHABLE();
-  return SlotsBuffer::NUMBER_OF_SLOT_TYPES;
-}
-
-
-void MarkCompactCollector::RecordRelocSlot(RelocInfo* rinfo, Object* target) {
-  Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
-  RelocInfo::Mode rmode = rinfo->rmode();
-  if (target_page->IsEvacuationCandidate() &&
-      (rinfo->host() == NULL ||
-       !ShouldSkipEvacuationSlotRecording(rinfo->host()))) {
-    bool success;
-    if (RelocInfo::IsEmbeddedObject(rmode) && rinfo->IsInConstantPool()) {
-      // This doesn't need to be typed since it is just a normal heap pointer.
-      Object** target_pointer =
-          reinterpret_cast<Object**>(rinfo->constant_pool_entry_address());
-      success = SlotsBuffer::AddTo(&slots_buffer_allocator_,
-                                   target_page->slots_buffer_address(),
-                                   target_pointer,
-                                   SlotsBuffer::FAIL_ON_OVERFLOW);
-    } else if (RelocInfo::IsCodeTarget(rmode) && rinfo->IsInConstantPool()) {
-      success = SlotsBuffer::AddTo(&slots_buffer_allocator_,
-                                   target_page->slots_buffer_address(),
-                                   SlotsBuffer::CODE_ENTRY_SLOT,
-                                   rinfo->constant_pool_entry_address(),
-                                   SlotsBuffer::FAIL_ON_OVERFLOW);
-    } else {
-      success = SlotsBuffer::AddTo(&slots_buffer_allocator_,
-                                  target_page->slots_buffer_address(),
-                                  SlotTypeForRMode(rmode),
-                                  rinfo->pc(),
-                                  SlotsBuffer::FAIL_ON_OVERFLOW);
-    }
-    if (!success) {
-      EvictEvacuationCandidate(target_page);
-    }
-  }
-}
-
-
-void MarkCompactCollector::RecordCodeEntrySlot(Address slot, Code* target) {
-  Page* target_page = Page::FromAddress(reinterpret_cast<Address>(target));
-  if (target_page->IsEvacuationCandidate() &&
-      !ShouldSkipEvacuationSlotRecording(reinterpret_cast<Object**>(slot))) {
-    if (!SlotsBuffer::AddTo(&slots_buffer_allocator_,
-                            target_page->slots_buffer_address(),
-                            SlotsBuffer::CODE_ENTRY_SLOT,
-                            slot,
-                            SlotsBuffer::FAIL_ON_OVERFLOW)) {
-      EvictEvacuationCandidate(target_page);
-    }
-  }
-}
-
-
-void MarkCompactCollector::RecordCodeTargetPatch(Address pc, Code* target) {
-  ASSERT(heap()->gc_state() == Heap::MARK_COMPACT);
-  if (is_compacting()) {
-    Code* host = isolate()->inner_pointer_to_code_cache()->
-        GcSafeFindCodeForInnerPointer(pc);
-    MarkBit mark_bit = Marking::MarkBitFrom(host);
-    if (Marking::IsBlack(mark_bit)) {
-      RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
-      RecordRelocSlot(&rinfo, target);
-    }
-  }
-}
-
-
-static inline SlotsBuffer::SlotType DecodeSlotType(
-    SlotsBuffer::ObjectSlot slot) {
-  return static_cast<SlotsBuffer::SlotType>(reinterpret_cast<intptr_t>(slot));
-}
-
-
-void SlotsBuffer::UpdateSlots(Heap* heap) {
-  PointersUpdatingVisitor v(heap);
-
-  for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
-    ObjectSlot slot = slots_[slot_idx];
-    if (!IsTypedSlot(slot)) {
-      PointersUpdatingVisitor::UpdateSlot(heap, slot);
-    } else {
-      ++slot_idx;
-      ASSERT(slot_idx < idx_);
-      UpdateSlot(heap->isolate(),
-                 &v,
-                 DecodeSlotType(slot),
-                 reinterpret_cast<Address>(slots_[slot_idx]));
-    }
-  }
-}
-
-
-void SlotsBuffer::UpdateSlotsWithFilter(Heap* heap) {
-  PointersUpdatingVisitor v(heap);
-
-  for (int slot_idx = 0; slot_idx < idx_; ++slot_idx) {
-    ObjectSlot slot = slots_[slot_idx];
-    if (!IsTypedSlot(slot)) {
-      if (!IsOnInvalidatedCodeObject(reinterpret_cast<Address>(slot))) {
-        PointersUpdatingVisitor::UpdateSlot(heap, slot);
-      }
-    } else {
-      ++slot_idx;
-      ASSERT(slot_idx < idx_);
-      Address pc = reinterpret_cast<Address>(slots_[slot_idx]);
-      if (!IsOnInvalidatedCodeObject(pc)) {
-        UpdateSlot(heap->isolate(),
-                   &v,
-                   DecodeSlotType(slot),
-                   reinterpret_cast<Address>(slots_[slot_idx]));
-      }
-    }
-  }
-}
-
-
-SlotsBuffer* SlotsBufferAllocator::AllocateBuffer(SlotsBuffer* next_buffer) {
-  return new SlotsBuffer(next_buffer);
-}
-
-
-void SlotsBufferAllocator::DeallocateBuffer(SlotsBuffer* buffer) {
-  delete buffer;
-}
-
-
-void SlotsBufferAllocator::DeallocateChain(SlotsBuffer** buffer_address) {
-  SlotsBuffer* buffer = *buffer_address;
-  while (buffer != NULL) {
-    SlotsBuffer* next_buffer = buffer->next();
-    DeallocateBuffer(buffer);
-    buffer = next_buffer;
-  }
-  *buffer_address = NULL;
-}
-
-
-} }  // namespace v8::internal
diff --git a/src/mark-compact.h b/src/mark-compact.h
deleted file mode 100644
index ae6767f..0000000
--- a/src/mark-compact.h
+++ /dev/null
@@ -1,985 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_MARK_COMPACT_H_
-#define V8_MARK_COMPACT_H_
-
-#include "src/compiler-intrinsics.h"
-#include "src/spaces.h"
-
-namespace v8 {
-namespace internal {
-
-// Callback function, returns whether an object is alive. The heap size
-// of the object is returned in size. It optionally updates the offset
-// to the first live object in the page (only used for old and map objects).
-typedef bool (*IsAliveFunction)(HeapObject* obj, int* size, int* offset);
-
-// Forward declarations.
-class CodeFlusher;
-class GCTracer;
-class MarkCompactCollector;
-class MarkingVisitor;
-class RootMarkingVisitor;
-
-
-class Marking {
- public:
-  explicit Marking(Heap* heap)
-      : heap_(heap) {
-  }
-
-  INLINE(static MarkBit MarkBitFrom(Address addr));
-
-  INLINE(static MarkBit MarkBitFrom(HeapObject* obj)) {
-    return MarkBitFrom(reinterpret_cast<Address>(obj));
-  }
-
-  // Impossible markbits: 01
-  static const char* kImpossibleBitPattern;
-  INLINE(static bool IsImpossible(MarkBit mark_bit)) {
-    return !mark_bit.Get() && mark_bit.Next().Get();
-  }
-
-  // Black markbits: 10 - this is required by the sweeper.
-  static const char* kBlackBitPattern;
-  INLINE(static bool IsBlack(MarkBit mark_bit)) {
-    return mark_bit.Get() && !mark_bit.Next().Get();
-  }
-
-  // White markbits: 00 - this is required by the mark bit clearer.
-  static const char* kWhiteBitPattern;
-  INLINE(static bool IsWhite(MarkBit mark_bit)) {
-    return !mark_bit.Get();
-  }
-
-  // Grey markbits: 11
-  static const char* kGreyBitPattern;
-  INLINE(static bool IsGrey(MarkBit mark_bit)) {
-    return mark_bit.Get() && mark_bit.Next().Get();
-  }
-
-  INLINE(static void MarkBlack(MarkBit mark_bit)) {
-    mark_bit.Set();
-    mark_bit.Next().Clear();
-  }
-
-  INLINE(static void BlackToGrey(MarkBit markbit)) {
-    markbit.Next().Set();
-  }
-
-  INLINE(static void WhiteToGrey(MarkBit markbit)) {
-    markbit.Set();
-    markbit.Next().Set();
-  }
-
-  INLINE(static void GreyToBlack(MarkBit markbit)) {
-    markbit.Next().Clear();
-  }
-
-  INLINE(static void BlackToGrey(HeapObject* obj)) {
-    BlackToGrey(MarkBitFrom(obj));
-  }
-
-  INLINE(static void AnyToGrey(MarkBit markbit)) {
-    markbit.Set();
-    markbit.Next().Set();
-  }
-
-  void TransferMark(Address old_start, Address new_start);
-
-#ifdef DEBUG
-  enum ObjectColor {
-    BLACK_OBJECT,
-    WHITE_OBJECT,
-    GREY_OBJECT,
-    IMPOSSIBLE_COLOR
-  };
-
-  static const char* ColorName(ObjectColor color) {
-    switch (color) {
-      case BLACK_OBJECT: return "black";
-      case WHITE_OBJECT: return "white";
-      case GREY_OBJECT: return "grey";
-      case IMPOSSIBLE_COLOR: return "impossible";
-    }
-    return "error";
-  }
-
-  static ObjectColor Color(HeapObject* obj) {
-    return Color(Marking::MarkBitFrom(obj));
-  }
-
-  static ObjectColor Color(MarkBit mark_bit) {
-    if (IsBlack(mark_bit)) return BLACK_OBJECT;
-    if (IsWhite(mark_bit)) return WHITE_OBJECT;
-    if (IsGrey(mark_bit)) return GREY_OBJECT;
-    UNREACHABLE();
-    return IMPOSSIBLE_COLOR;
-  }
-#endif
-
-  // Returns true if the transferred color is black.
-  INLINE(static bool TransferColor(HeapObject* from,
-                                   HeapObject* to)) {
-    MarkBit from_mark_bit = MarkBitFrom(from);
-    MarkBit to_mark_bit = MarkBitFrom(to);
-    bool is_black = false;
-    if (from_mark_bit.Get()) {
-      to_mark_bit.Set();
-      is_black = true;  // Looks black so far.
-    }
-    if (from_mark_bit.Next().Get()) {
-      to_mark_bit.Next().Set();
-      is_black = false;  // Was actually gray.
-    }
-    return is_black;
-  }
-
- private:
-  Heap* heap_;
-};
-
-// ----------------------------------------------------------------------------
-// Marking deque for tracing live objects.
-class MarkingDeque {
- public:
-  MarkingDeque()
-      : array_(NULL), top_(0), bottom_(0), mask_(0), overflowed_(false) { }
-
-  void Initialize(Address low, Address high) {
-    HeapObject** obj_low = reinterpret_cast<HeapObject**>(low);
-    HeapObject** obj_high = reinterpret_cast<HeapObject**>(high);
-    array_ = obj_low;
-    mask_ = RoundDownToPowerOf2(static_cast<int>(obj_high - obj_low)) - 1;
-    top_ = bottom_ = 0;
-    overflowed_ = false;
-  }
-
-  inline bool IsFull() { return ((top_ + 1) & mask_) == bottom_; }
-
-  inline bool IsEmpty() { return top_ == bottom_; }
-
-  bool overflowed() const { return overflowed_; }
-
-  void ClearOverflowed() { overflowed_ = false; }
-
-  void SetOverflowed() { overflowed_ = true; }
-
-  // Push the (marked) object on the marking stack if there is room,
-  // otherwise mark the object as overflowed and wait for a rescan of the
-  // heap.
-  INLINE(void PushBlack(HeapObject* object)) {
-    ASSERT(object->IsHeapObject());
-    if (IsFull()) {
-      Marking::BlackToGrey(object);
-      MemoryChunk::IncrementLiveBytesFromGC(object->address(), -object->Size());
-      SetOverflowed();
-    } else {
-      array_[top_] = object;
-      top_ = ((top_ + 1) & mask_);
-    }
-  }
-
-  INLINE(void PushGrey(HeapObject* object)) {
-    ASSERT(object->IsHeapObject());
-    if (IsFull()) {
-      SetOverflowed();
-    } else {
-      array_[top_] = object;
-      top_ = ((top_ + 1) & mask_);
-    }
-  }
-
-  INLINE(HeapObject* Pop()) {
-    ASSERT(!IsEmpty());
-    top_ = ((top_ - 1) & mask_);
-    HeapObject* object = array_[top_];
-    ASSERT(object->IsHeapObject());
-    return object;
-  }
-
-  INLINE(void UnshiftGrey(HeapObject* object)) {
-    ASSERT(object->IsHeapObject());
-    if (IsFull()) {
-      SetOverflowed();
-    } else {
-      bottom_ = ((bottom_ - 1) & mask_);
-      array_[bottom_] = object;
-    }
-  }
-
-  HeapObject** array() { return array_; }
-  int bottom() { return bottom_; }
-  int top() { return top_; }
-  int mask() { return mask_; }
-  void set_top(int top) { top_ = top; }
-
- private:
-  HeapObject** array_;
-  // array_[(top - 1) & mask_] is the top element in the deque.  The Deque is
-  // empty when top_ == bottom_.  It is full when top_ + 1 == bottom
-  // (mod mask + 1).
-  int top_;
-  int bottom_;
-  int mask_;
-  bool overflowed_;
-
-  DISALLOW_COPY_AND_ASSIGN(MarkingDeque);
-};
-
-
-class SlotsBufferAllocator {
- public:
-  SlotsBuffer* AllocateBuffer(SlotsBuffer* next_buffer);
-  void DeallocateBuffer(SlotsBuffer* buffer);
-
-  void DeallocateChain(SlotsBuffer** buffer_address);
-};
-
-
-// SlotsBuffer records a sequence of slots that has to be updated
-// after live objects were relocated from evacuation candidates.
-// All slots are either untyped or typed:
-//    - Untyped slots are expected to contain a tagged object pointer.
-//      They are recorded by an address.
-//    - Typed slots are expected to contain an encoded pointer to a heap
-//      object where the way of encoding depends on the type of the slot.
-//      They are recorded as a pair (SlotType, slot address).
-// We assume that zero-page is never mapped this allows us to distinguish
-// untyped slots from typed slots during iteration by a simple comparison:
-// if element of slots buffer is less than NUMBER_OF_SLOT_TYPES then it
-// is the first element of typed slot's pair.
-class SlotsBuffer {
- public:
-  typedef Object** ObjectSlot;
-
-  explicit SlotsBuffer(SlotsBuffer* next_buffer)
-      : idx_(0), chain_length_(1), next_(next_buffer) {
-    if (next_ != NULL) {
-      chain_length_ = next_->chain_length_ + 1;
-    }
-  }
-
-  ~SlotsBuffer() {
-  }
-
-  void Add(ObjectSlot slot) {
-    ASSERT(0 <= idx_ && idx_ < kNumberOfElements);
-    slots_[idx_++] = slot;
-  }
-
-  enum SlotType {
-    EMBEDDED_OBJECT_SLOT,
-    RELOCATED_CODE_OBJECT,
-    CODE_TARGET_SLOT,
-    CODE_ENTRY_SLOT,
-    DEBUG_TARGET_SLOT,
-    JS_RETURN_SLOT,
-    NUMBER_OF_SLOT_TYPES
-  };
-
-  static const char* SlotTypeToString(SlotType type) {
-    switch (type) {
-      case EMBEDDED_OBJECT_SLOT:
-        return "EMBEDDED_OBJECT_SLOT";
-      case RELOCATED_CODE_OBJECT:
-        return "RELOCATED_CODE_OBJECT";
-      case CODE_TARGET_SLOT:
-        return "CODE_TARGET_SLOT";
-      case CODE_ENTRY_SLOT:
-        return "CODE_ENTRY_SLOT";
-      case DEBUG_TARGET_SLOT:
-        return "DEBUG_TARGET_SLOT";
-      case JS_RETURN_SLOT:
-        return "JS_RETURN_SLOT";
-      case NUMBER_OF_SLOT_TYPES:
-        return "NUMBER_OF_SLOT_TYPES";
-    }
-    return "UNKNOWN SlotType";
-  }
-
-  void UpdateSlots(Heap* heap);
-
-  void UpdateSlotsWithFilter(Heap* heap);
-
-  SlotsBuffer* next() { return next_; }
-
-  static int SizeOfChain(SlotsBuffer* buffer) {
-    if (buffer == NULL) return 0;
-    return static_cast<int>(buffer->idx_ +
-                            (buffer->chain_length_ - 1) * kNumberOfElements);
-  }
-
-  inline bool IsFull() {
-    return idx_ == kNumberOfElements;
-  }
-
-  inline bool HasSpaceForTypedSlot() {
-    return idx_ < kNumberOfElements - 1;
-  }
-
-  static void UpdateSlotsRecordedIn(Heap* heap,
-                                    SlotsBuffer* buffer,
-                                    bool code_slots_filtering_required) {
-    while (buffer != NULL) {
-      if (code_slots_filtering_required) {
-        buffer->UpdateSlotsWithFilter(heap);
-      } else {
-        buffer->UpdateSlots(heap);
-      }
-      buffer = buffer->next();
-    }
-  }
-
-  enum AdditionMode {
-    FAIL_ON_OVERFLOW,
-    IGNORE_OVERFLOW
-  };
-
-  static bool ChainLengthThresholdReached(SlotsBuffer* buffer) {
-    return buffer != NULL && buffer->chain_length_ >= kChainLengthThreshold;
-  }
-
-  INLINE(static bool AddTo(SlotsBufferAllocator* allocator,
-                           SlotsBuffer** buffer_address,
-                           ObjectSlot slot,
-                           AdditionMode mode)) {
-    SlotsBuffer* buffer = *buffer_address;
-    if (buffer == NULL || buffer->IsFull()) {
-      if (mode == FAIL_ON_OVERFLOW && ChainLengthThresholdReached(buffer)) {
-        allocator->DeallocateChain(buffer_address);
-        return false;
-      }
-      buffer = allocator->AllocateBuffer(buffer);
-      *buffer_address = buffer;
-    }
-    buffer->Add(slot);
-    return true;
-  }
-
-  static bool IsTypedSlot(ObjectSlot slot);
-
-  static bool AddTo(SlotsBufferAllocator* allocator,
-                    SlotsBuffer** buffer_address,
-                    SlotType type,
-                    Address addr,
-                    AdditionMode mode);
-
-  static const int kNumberOfElements = 1021;
-
- private:
-  static const int kChainLengthThreshold = 15;
-
-  intptr_t idx_;
-  intptr_t chain_length_;
-  SlotsBuffer* next_;
-  ObjectSlot slots_[kNumberOfElements];
-};
-
-
-// CodeFlusher collects candidates for code flushing during marking and
-// processes those candidates after marking has completed in order to
-// reset those functions referencing code objects that would otherwise
-// be unreachable. Code objects can be referenced in three ways:
-//    - SharedFunctionInfo references unoptimized code.
-//    - JSFunction references either unoptimized or optimized code.
-//    - OptimizedCodeMap references optimized code.
-// We are not allowed to flush unoptimized code for functions that got
-// optimized or inlined into optimized code, because we might bailout
-// into the unoptimized code again during deoptimization.
-class CodeFlusher {
- public:
-  explicit CodeFlusher(Isolate* isolate)
-      : isolate_(isolate),
-        jsfunction_candidates_head_(NULL),
-        shared_function_info_candidates_head_(NULL),
-        optimized_code_map_holder_head_(NULL) {}
-
-  void AddCandidate(SharedFunctionInfo* shared_info) {
-    if (GetNextCandidate(shared_info) == NULL) {
-      SetNextCandidate(shared_info, shared_function_info_candidates_head_);
-      shared_function_info_candidates_head_ = shared_info;
-    }
-  }
-
-  void AddCandidate(JSFunction* function) {
-    ASSERT(function->code() == function->shared()->code());
-    if (GetNextCandidate(function)->IsUndefined()) {
-      SetNextCandidate(function, jsfunction_candidates_head_);
-      jsfunction_candidates_head_ = function;
-    }
-  }
-
-  void AddOptimizedCodeMap(SharedFunctionInfo* code_map_holder) {
-    if (GetNextCodeMap(code_map_holder)->IsUndefined()) {
-      SetNextCodeMap(code_map_holder, optimized_code_map_holder_head_);
-      optimized_code_map_holder_head_ = code_map_holder;
-    }
-  }
-
-  void EvictOptimizedCodeMap(SharedFunctionInfo* code_map_holder);
-  void EvictCandidate(SharedFunctionInfo* shared_info);
-  void EvictCandidate(JSFunction* function);
-
-  void ProcessCandidates() {
-    ProcessOptimizedCodeMaps();
-    ProcessSharedFunctionInfoCandidates();
-    ProcessJSFunctionCandidates();
-  }
-
-  void EvictAllCandidates() {
-    EvictOptimizedCodeMaps();
-    EvictJSFunctionCandidates();
-    EvictSharedFunctionInfoCandidates();
-  }
-
-  void IteratePointersToFromSpace(ObjectVisitor* v);
-
- private:
-  void ProcessOptimizedCodeMaps();
-  void ProcessJSFunctionCandidates();
-  void ProcessSharedFunctionInfoCandidates();
-  void EvictOptimizedCodeMaps();
-  void EvictJSFunctionCandidates();
-  void EvictSharedFunctionInfoCandidates();
-
-  static JSFunction** GetNextCandidateSlot(JSFunction* candidate) {
-    return reinterpret_cast<JSFunction**>(
-        HeapObject::RawField(candidate, JSFunction::kNextFunctionLinkOffset));
-  }
-
-  static JSFunction* GetNextCandidate(JSFunction* candidate) {
-    Object* next_candidate = candidate->next_function_link();
-    return reinterpret_cast<JSFunction*>(next_candidate);
-  }
-
-  static void SetNextCandidate(JSFunction* candidate,
-                               JSFunction* next_candidate) {
-    candidate->set_next_function_link(next_candidate);
-  }
-
-  static void ClearNextCandidate(JSFunction* candidate, Object* undefined) {
-    ASSERT(undefined->IsUndefined());
-    candidate->set_next_function_link(undefined, SKIP_WRITE_BARRIER);
-  }
-
-  static SharedFunctionInfo* GetNextCandidate(SharedFunctionInfo* candidate) {
-    Object* next_candidate = candidate->code()->gc_metadata();
-    return reinterpret_cast<SharedFunctionInfo*>(next_candidate);
-  }
-
-  static void SetNextCandidate(SharedFunctionInfo* candidate,
-                               SharedFunctionInfo* next_candidate) {
-    candidate->code()->set_gc_metadata(next_candidate);
-  }
-
-  static void ClearNextCandidate(SharedFunctionInfo* candidate) {
-    candidate->code()->set_gc_metadata(NULL, SKIP_WRITE_BARRIER);
-  }
-
-  static SharedFunctionInfo* GetNextCodeMap(SharedFunctionInfo* holder) {
-    FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
-    Object* next_map = code_map->get(SharedFunctionInfo::kNextMapIndex);
-    return reinterpret_cast<SharedFunctionInfo*>(next_map);
-  }
-
-  static void SetNextCodeMap(SharedFunctionInfo* holder,
-                             SharedFunctionInfo* next_holder) {
-    FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
-    code_map->set(SharedFunctionInfo::kNextMapIndex, next_holder);
-  }
-
-  static void ClearNextCodeMap(SharedFunctionInfo* holder) {
-    FixedArray* code_map = FixedArray::cast(holder->optimized_code_map());
-    code_map->set_undefined(SharedFunctionInfo::kNextMapIndex);
-  }
-
-  Isolate* isolate_;
-  JSFunction* jsfunction_candidates_head_;
-  SharedFunctionInfo* shared_function_info_candidates_head_;
-  SharedFunctionInfo* optimized_code_map_holder_head_;
-
-  DISALLOW_COPY_AND_ASSIGN(CodeFlusher);
-};
-
-
-// Defined in isolate.h.
-class ThreadLocalTop;
-
-
-// -------------------------------------------------------------------------
-// Mark-Compact collector
-class MarkCompactCollector {
- public:
-  // Set the global flags, it must be called before Prepare to take effect.
-  inline void SetFlags(int flags);
-
-  static void Initialize();
-
-  void SetUp();
-
-  void TearDown();
-
-  void CollectEvacuationCandidates(PagedSpace* space);
-
-  void AddEvacuationCandidate(Page* p);
-
-  // Prepares for GC by resetting relocation info in old and map spaces and
-  // choosing spaces to compact.
-  void Prepare(GCTracer* tracer);
-
-  // Performs a global garbage collection.
-  void CollectGarbage();
-
-  enum CompactionMode {
-    INCREMENTAL_COMPACTION,
-    NON_INCREMENTAL_COMPACTION
-  };
-
-  bool StartCompaction(CompactionMode mode);
-
-  void AbortCompaction();
-
-  // During a full GC, there is a stack-allocated GCTracer that is used for
-  // bookkeeping information.  Return a pointer to that tracer.
-  GCTracer* tracer() { return tracer_; }
-
-#ifdef DEBUG
-  // Checks whether performing mark-compact collection.
-  bool in_use() { return state_ > PREPARE_GC; }
-  bool are_map_pointers_encoded() { return state_ == UPDATE_POINTERS; }
-#endif
-
-  // Determine type of object and emit deletion log event.
-  static void ReportDeleteIfNeeded(HeapObject* obj, Isolate* isolate);
-
-  // Distinguishable invalid map encodings (for single word and multiple words)
-  // that indicate free regions.
-  static const uint32_t kSingleFreeEncoding = 0;
-  static const uint32_t kMultiFreeEncoding = 1;
-
-  static inline bool IsMarked(Object* obj);
-
-  inline Heap* heap() const { return heap_; }
-  inline Isolate* isolate() const;
-
-  CodeFlusher* code_flusher() { return code_flusher_; }
-  inline bool is_code_flushing_enabled() const { return code_flusher_ != NULL; }
-  void EnableCodeFlushing(bool enable);
-
-  enum SweeperType {
-    CONSERVATIVE,
-    PARALLEL_CONSERVATIVE,
-    CONCURRENT_CONSERVATIVE,
-    PRECISE
-  };
-
-  enum SweepingParallelism {
-    SWEEP_SEQUENTIALLY,
-    SWEEP_IN_PARALLEL
-  };
-
-#ifdef VERIFY_HEAP
-  void VerifyMarkbitsAreClean();
-  static void VerifyMarkbitsAreClean(PagedSpace* space);
-  static void VerifyMarkbitsAreClean(NewSpace* space);
-  void VerifyWeakEmbeddedObjectsInCode();
-  void VerifyOmittedMapChecks();
-#endif
-
-  // Sweep a single page from the given space conservatively.
-  // Return a number of reclaimed bytes.
-  template<SweepingParallelism type>
-  static intptr_t SweepConservatively(PagedSpace* space,
-                                      FreeList* free_list,
-                                      Page* p);
-
-  INLINE(static bool ShouldSkipEvacuationSlotRecording(Object** anchor)) {
-    return Page::FromAddress(reinterpret_cast<Address>(anchor))->
-        ShouldSkipEvacuationSlotRecording();
-  }
-
-  INLINE(static bool ShouldSkipEvacuationSlotRecording(Object* host)) {
-    return Page::FromAddress(reinterpret_cast<Address>(host))->
-        ShouldSkipEvacuationSlotRecording();
-  }
-
-  INLINE(static bool IsOnEvacuationCandidate(Object* obj)) {
-    return Page::FromAddress(reinterpret_cast<Address>(obj))->
-        IsEvacuationCandidate();
-  }
-
-  INLINE(void EvictEvacuationCandidate(Page* page)) {
-    if (FLAG_trace_fragmentation) {
-      PrintF("Page %p is too popular. Disabling evacuation.\n",
-             reinterpret_cast<void*>(page));
-    }
-
-    // TODO(gc) If all evacuation candidates are too popular we
-    // should stop slots recording entirely.
-    page->ClearEvacuationCandidate();
-
-    // We were not collecting slots on this page that point
-    // to other evacuation candidates thus we have to
-    // rescan the page after evacuation to discover and update all
-    // pointers to evacuated objects.
-    if (page->owner()->identity() == OLD_DATA_SPACE) {
-      evacuation_candidates_.RemoveElement(page);
-    } else {
-      page->SetFlag(Page::RESCAN_ON_EVACUATION);
-    }
-  }
-
-  void RecordRelocSlot(RelocInfo* rinfo, Object* target);
-  void RecordCodeEntrySlot(Address slot, Code* target);
-  void RecordCodeTargetPatch(Address pc, Code* target);
-
-  INLINE(void RecordSlot(Object** anchor_slot,
-                         Object** slot,
-                         Object* object,
-                         SlotsBuffer::AdditionMode mode =
-                             SlotsBuffer::FAIL_ON_OVERFLOW));
-
-  void MigrateObject(HeapObject* dst,
-                     HeapObject* src,
-                     int size,
-                     AllocationSpace to_old_space);
-
-  bool TryPromoteObject(HeapObject* object, int object_size);
-
-  void InvalidateCode(Code* code);
-
-  void ClearMarkbits();
-
-  bool abort_incremental_marking() const { return abort_incremental_marking_; }
-
-  bool is_compacting() const { return compacting_; }
-
-  MarkingParity marking_parity() { return marking_parity_; }
-
-  // Concurrent and parallel sweeping support.
-  void SweepInParallel(PagedSpace* space);
-
-  void WaitUntilSweepingCompleted();
-
-  bool IsSweepingCompleted();
-
-  void RefillFreeList(PagedSpace* space);
-
-  bool AreSweeperThreadsActivated();
-
-  bool IsConcurrentSweepingInProgress();
-
-  void set_sequential_sweeping(bool sequential_sweeping) {
-    sequential_sweeping_ = sequential_sweeping;
-  }
-
-  bool sequential_sweeping() const {
-    return sequential_sweeping_;
-  }
-
-  // Mark the global table which maps weak objects to dependent code without
-  // marking its contents.
-  void MarkWeakObjectToCodeTable();
-
-  // Special case for processing weak references in a full collection. We need
-  // to artificially keep AllocationSites alive for a time.
-  void MarkAllocationSite(AllocationSite* site);
-
- private:
-  class SweeperTask;
-
-  explicit MarkCompactCollector(Heap* heap);
-  ~MarkCompactCollector();
-
-  bool MarkInvalidatedCode();
-  bool WillBeDeoptimized(Code* code);
-  void RemoveDeadInvalidatedCode();
-  void ProcessInvalidatedCode(ObjectVisitor* visitor);
-
-  void StartSweeperThreads();
-
-#ifdef DEBUG
-  enum CollectorState {
-    IDLE,
-    PREPARE_GC,
-    MARK_LIVE_OBJECTS,
-    SWEEP_SPACES,
-    ENCODE_FORWARDING_ADDRESSES,
-    UPDATE_POINTERS,
-    RELOCATE_OBJECTS
-  };
-
-  // The current stage of the collector.
-  CollectorState state_;
-#endif
-
-  // Global flag that forces sweeping to be precise, so we can traverse the
-  // heap.
-  bool sweep_precisely_;
-
-  bool reduce_memory_footprint_;
-
-  bool abort_incremental_marking_;
-
-  MarkingParity marking_parity_;
-
-  // True if we are collecting slots to perform evacuation from evacuation
-  // candidates.
-  bool compacting_;
-
-  bool was_marked_incrementally_;
-
-  // True if concurrent or parallel sweeping is currently in progress.
-  bool sweeping_pending_;
-
-  Semaphore pending_sweeper_jobs_semaphore_;
-
-  bool sequential_sweeping_;
-
-  // A pointer to the current stack-allocated GC tracer object during a full
-  // collection (NULL before and after).
-  GCTracer* tracer_;
-
-  SlotsBufferAllocator slots_buffer_allocator_;
-
-  SlotsBuffer* migration_slots_buffer_;
-
-  // Finishes GC, performs heap verification if enabled.
-  void Finish();
-
-  // -----------------------------------------------------------------------
-  // Phase 1: Marking live objects.
-  //
-  //  Before: The heap has been prepared for garbage collection by
-  //          MarkCompactCollector::Prepare() and is otherwise in its
-  //          normal state.
-  //
-  //   After: Live objects are marked and non-live objects are unmarked.
-
-  friend class RootMarkingVisitor;
-  friend class MarkingVisitor;
-  friend class MarkCompactMarkingVisitor;
-  friend class CodeMarkingVisitor;
-  friend class SharedFunctionInfoMarkingVisitor;
-
-  // Mark code objects that are active on the stack to prevent them
-  // from being flushed.
-  void PrepareThreadForCodeFlushing(Isolate* isolate, ThreadLocalTop* top);
-
-  void PrepareForCodeFlushing();
-
-  // Marking operations for objects reachable from roots.
-  void MarkLiveObjects();
-
-  void AfterMarking();
-
-  // Marks the object black and pushes it on the marking stack.
-  // This is for non-incremental marking only.
-  INLINE(void MarkObject(HeapObject* obj, MarkBit mark_bit));
-
-  // Marks the object black assuming that it is not yet marked.
-  // This is for non-incremental marking only.
-  INLINE(void SetMark(HeapObject* obj, MarkBit mark_bit));
-
-  // Mark the heap roots and all objects reachable from them.
-  void MarkRoots(RootMarkingVisitor* visitor);
-
-  // Mark the string table specially.  References to internalized strings from
-  // the string table are weak.
-  void MarkStringTable(RootMarkingVisitor* visitor);
-
-  // Mark objects in implicit references groups if their parent object
-  // is marked.
-  void MarkImplicitRefGroups();
-
-  // Mark objects reachable (transitively) from objects in the marking stack
-  // or overflowed in the heap.
-  void ProcessMarkingDeque();
-
-  // Mark objects reachable (transitively) from objects in the marking stack
-  // or overflowed in the heap.  This respects references only considered in
-  // the final atomic marking pause including the following:
-  //    - Processing of objects reachable through Harmony WeakMaps.
-  //    - Objects reachable due to host application logic like object groups
-  //      or implicit references' groups.
-  void ProcessEphemeralMarking(ObjectVisitor* visitor);
-
-  // If the call-site of the top optimized code was not prepared for
-  // deoptimization, then treat the maps in the code as strong pointers,
-  // otherwise a map can die and deoptimize the code.
-  void ProcessTopOptimizedFrame(ObjectVisitor* visitor);
-
-  // Mark objects reachable (transitively) from objects in the marking
-  // stack.  This function empties the marking stack, but may leave
-  // overflowed objects in the heap, in which case the marking stack's
-  // overflow flag will be set.
-  void EmptyMarkingDeque();
-
-  // Refill the marking stack with overflowed objects from the heap.  This
-  // function either leaves the marking stack full or clears the overflow
-  // flag on the marking stack.
-  void RefillMarkingDeque();
-
-  // After reachable maps have been marked process per context object
-  // literal map caches removing unmarked entries.
-  void ProcessMapCaches();
-
-  // Callback function for telling whether the object *p is an unmarked
-  // heap object.
-  static bool IsUnmarkedHeapObject(Object** p);
-  static bool IsUnmarkedHeapObjectWithHeap(Heap* heap, Object** p);
-
-  // Map transitions from a live map to a dead map must be killed.
-  // We replace them with a null descriptor, with the same key.
-  void ClearNonLiveReferences();
-  void ClearNonLivePrototypeTransitions(Map* map);
-  void ClearNonLiveMapTransitions(Map* map, MarkBit map_mark);
-
-  void ClearDependentCode(DependentCode* dependent_code);
-  void ClearDependentICList(Object* head);
-  void ClearNonLiveDependentCode(DependentCode* dependent_code);
-  int ClearNonLiveDependentCodeInGroup(DependentCode* dependent_code, int group,
-                                       int start, int end, int new_start);
-
-  // Mark all values associated with reachable keys in weak collections
-  // encountered so far.  This might push new object or even new weak maps onto
-  // the marking stack.
-  void ProcessWeakCollections();
-
-  // After all reachable objects have been marked those weak map entries
-  // with an unreachable key are removed from all encountered weak maps.
-  // The linked list of all encountered weak maps is destroyed.
-  void ClearWeakCollections();
-
-  // -----------------------------------------------------------------------
-  // Phase 2: Sweeping to clear mark bits and free non-live objects for
-  // a non-compacting collection.
-  //
-  //  Before: Live objects are marked and non-live objects are unmarked.
-  //
-  //   After: Live objects are unmarked, non-live regions have been added to
-  //          their space's free list. Active eden semispace is compacted by
-  //          evacuation.
-  //
-
-  // If we are not compacting the heap, we simply sweep the spaces except
-  // for the large object space, clearing mark bits and adding unmarked
-  // regions to each space's free list.
-  void SweepSpaces();
-
-  int DiscoverAndPromoteBlackObjectsOnPage(NewSpace* new_space,
-                                           NewSpacePage* p);
-
-  void EvacuateNewSpace();
-
-  void EvacuateLiveObjectsFromPage(Page* p);
-
-  void EvacuatePages();
-
-  void EvacuateNewSpaceAndCandidates();
-
-  void ReleaseEvacuationCandidates();
-
-  // Moves the pages of the evacuation_candidates_ list to the end of their
-  // corresponding space pages list.
-  void MoveEvacuationCandidatesToEndOfPagesList();
-
-  void SweepSpace(PagedSpace* space, SweeperType sweeper);
-
-  // Finalizes the parallel sweeping phase. Marks all the pages that were
-  // swept in parallel.
-  void ParallelSweepSpacesComplete();
-
-  void ParallelSweepSpaceComplete(PagedSpace* space);
-
-#ifdef DEBUG
-  friend class MarkObjectVisitor;
-  static void VisitObject(HeapObject* obj);
-
-  friend class UnmarkObjectVisitor;
-  static void UnmarkObject(HeapObject* obj);
-#endif
-
-  Heap* heap_;
-  MarkingDeque marking_deque_;
-  CodeFlusher* code_flusher_;
-  bool have_code_to_deoptimize_;
-
-  List<Page*> evacuation_candidates_;
-  List<Code*> invalidated_code_;
-
-  SmartPointer<FreeList> free_list_old_data_space_;
-  SmartPointer<FreeList> free_list_old_pointer_space_;
-
-  friend class Heap;
-};
-
-
-class MarkBitCellIterator BASE_EMBEDDED {
- public:
-  explicit MarkBitCellIterator(MemoryChunk* chunk)
-      : chunk_(chunk) {
-    last_cell_index_ = Bitmap::IndexToCell(
-        Bitmap::CellAlignIndex(
-            chunk_->AddressToMarkbitIndex(chunk_->area_end())));
-    cell_base_ = chunk_->area_start();
-    cell_index_ = Bitmap::IndexToCell(
-        Bitmap::CellAlignIndex(
-            chunk_->AddressToMarkbitIndex(cell_base_)));
-    cells_ = chunk_->markbits()->cells();
-  }
-
-  inline bool Done() { return cell_index_ == last_cell_index_; }
-
-  inline bool HasNext() { return cell_index_ < last_cell_index_ - 1; }
-
-  inline MarkBit::CellType* CurrentCell() {
-    ASSERT(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
-        chunk_->AddressToMarkbitIndex(cell_base_))));
-    return &cells_[cell_index_];
-  }
-
-  inline Address CurrentCellBase() {
-    ASSERT(cell_index_ == Bitmap::IndexToCell(Bitmap::CellAlignIndex(
-        chunk_->AddressToMarkbitIndex(cell_base_))));
-    return cell_base_;
-  }
-
-  inline void Advance() {
-    cell_index_++;
-    cell_base_ += 32 * kPointerSize;
-  }
-
- private:
-  MemoryChunk* chunk_;
-  MarkBit::CellType* cells_;
-  unsigned int last_cell_index_;
-  unsigned int cell_index_;
-  Address cell_base_;
-};
-
-
-class SequentialSweepingScope BASE_EMBEDDED {
- public:
-  explicit SequentialSweepingScope(MarkCompactCollector *collector) :
-    collector_(collector) {
-    collector_->set_sequential_sweeping(true);
-  }
-
-  ~SequentialSweepingScope() {
-    collector_->set_sequential_sweeping(false);
-  }
-
- private:
-  MarkCompactCollector* collector_;
-};
-
-
-const char* AllocationSpaceName(AllocationSpace space);
-
-} }  // namespace v8::internal
-
-#endif  // V8_MARK_COMPACT_H_
diff --git a/src/math.js b/src/math.js
index d231c22..f06249d 100644
--- a/src/math.js
+++ b/src/math.js
@@ -56,12 +56,6 @@
   return -MathFloor(-x);
 }
 
-// ECMA 262 - 15.8.2.7
-function MathCos(x) {
-  x = MathAbs(x);  // Convert to number and get rid of -0.
-  return TrigonometricInterpolation(x, 1);
-}
-
 // ECMA 262 - 15.8.2.8
 function MathExp(x) {
   return %MathExpRT(TO_NUMBER_INLINE(x));
@@ -164,94 +158,158 @@
   return %RoundNumber(TO_NUMBER_INLINE(x));
 }
 
-// ECMA 262 - 15.8.2.16
-function MathSin(x) {
-  x = x * 1;  // Convert to number and deal with -0.
-  if (%_IsMinusZero(x)) return x;
-  return TrigonometricInterpolation(x, 0);
-}
-
 // ECMA 262 - 15.8.2.17
 function MathSqrt(x) {
   return %_MathSqrtRT(TO_NUMBER_INLINE(x));
 }
 
-// ECMA 262 - 15.8.2.18
-function MathTan(x) {
-  return MathSin(x) / MathCos(x);
-}
-
 // Non-standard extension.
 function MathImul(x, y) {
   return %NumberImul(TO_NUMBER_INLINE(x), TO_NUMBER_INLINE(y));
 }
 
+// ES6 draft 09-27-13, section 20.2.2.28.
+function MathSign(x) {
+  x = TO_NUMBER_INLINE(x);
+  if (x > 0) return 1;
+  if (x < 0) return -1;
+  // -0, 0 or NaN.
+  return x;
+}
 
-var kInversePiHalf      = 0.636619772367581343;      // 2 / pi
-var kInversePiHalfS26   = 9.48637384723993156e-9;    // 2 / pi / (2^26)
-var kS26                = 1 << 26;
-var kTwoStepThreshold   = 1 << 27;
-// pi / 2 rounded up
-var kPiHalf             = 1.570796326794896780;      // 0x192d4454fb21f93f
-// We use two parts for pi/2 to emulate a higher precision.
-// pi_half_1 only has 26 significant bits for mantissa.
-// Note that pi_half > pi_half_1 + pi_half_2
-var kPiHalf1            = 1.570796325802803040;      // 0x00000054fb21f93f
-var kPiHalf2            = 9.920935796805404252e-10;  // 0x3326a611460b113e
+// ES6 draft 09-27-13, section 20.2.2.34.
+function MathTrunc(x) {
+  x = TO_NUMBER_INLINE(x);
+  if (x > 0) return MathFloor(x);
+  if (x < 0) return MathCeil(x);
+  // -0, 0 or NaN.
+  return x;
+}
 
-var kSamples;            // Initialized to a number during genesis.
-var kIndexConvert;       // Initialized to kSamples / (pi/2) during genesis.
-var kSinTable;           // Initialized to a Float64Array during genesis.
-var kCosXIntervalTable;  // Initialized to a Float64Array during genesis.
+// ES6 draft 09-27-13, section 20.2.2.33.
+function MathTanh(x) {
+  if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
+  // Idempotent for +/-0.
+  if (x === 0) return x;
+  // Returns +/-1 for +/-Infinity.
+  if (!NUMBER_IS_FINITE(x)) return MathSign(x);
+  var exp1 = MathExp(x);
+  var exp2 = MathExp(-x);
+  return (exp1 - exp2) / (exp1 + exp2);
+}
 
-// This implements sine using the following algorithm.
-// 1) Multiplication takes care of to-number conversion.
-// 2) Reduce x to the first quadrant [0, pi/2].
-//    Conveniently enough, in case of +/-Infinity, we get NaN.
-//    Note that we try to use only 26 instead of 52 significant bits for
-//    mantissa to avoid rounding errors when multiplying.  For very large
-//    input we therefore have additional steps.
-// 3) Replace x by (pi/2-x) if x was in the 2nd or 4th quadrant.
-// 4) Do a table lookup for the closest samples to the left and right of x.
-// 5) Find the derivatives at those sampling points by table lookup:
-//    dsin(x)/dx = cos(x) = sin(pi/2-x) for x in [0, pi/2].
-// 6) Use cubic spline interpolation to approximate sin(x).
-// 7) Negate the result if x was in the 3rd or 4th quadrant.
-// 8) Get rid of -0 by adding 0.
-function TrigonometricInterpolation(x, phase) {
-  if (x < 0 || x > kPiHalf) {
-    var multiple;
-    while (x < -kTwoStepThreshold || x > kTwoStepThreshold) {
-      // Let's assume this loop does not terminate.
-      // All numbers x in each loop forms a set S.
-      // (1) abs(x) > 2^27 for all x in S.
-      // (2) abs(multiple) != 0 since (2^27 * inverse_pi_half_s26) > 1
-      // (3) multiple is rounded down in 2^26 steps, so the rounding error is
-      //     at most max(ulp, 2^26).
-      // (4) so for x > 2^27, we subtract at most (1+pi/4)x and at least
-      //     (1-pi/4)x
-      // (5) The subtraction results in x' so that abs(x') <= abs(x)*pi/4.
-      //     Note that this difference cannot be simply rounded off.
-      // Set S cannot exist since (5) violates (1).  Loop must terminate.
-      multiple = MathFloor(x * kInversePiHalfS26) * kS26;
-      x = x - multiple * kPiHalf1 - multiple * kPiHalf2;
-    }
-    multiple = MathFloor(x * kInversePiHalf);
-    x = x - multiple * kPiHalf1 - multiple * kPiHalf2;
-    phase += multiple;
+// ES6 draft 09-27-13, section 20.2.2.5.
+function MathAsinh(x) {
+  if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
+  // Idempotent for NaN, +/-0 and +/-Infinity.
+  if (x === 0 || !NUMBER_IS_FINITE(x)) return x;
+  if (x > 0) return MathLog(x + MathSqrt(x * x + 1));
+  // This is to prevent numerical errors caused by large negative x.
+  return -MathLog(-x + MathSqrt(x * x + 1));
+}
+
+// ES6 draft 09-27-13, section 20.2.2.3.
+function MathAcosh(x) {
+  if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
+  if (x < 1) return NAN;
+  // Idempotent for NaN and +Infinity.
+  if (!NUMBER_IS_FINITE(x)) return x;
+  return MathLog(x + MathSqrt(x + 1) * MathSqrt(x - 1));
+}
+
+// ES6 draft 09-27-13, section 20.2.2.7.
+function MathAtanh(x) {
+  if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
+  // Idempotent for +/-0.
+  if (x === 0) return x;
+  // Returns NaN for NaN and +/- Infinity.
+  if (!NUMBER_IS_FINITE(x)) return NAN;
+  return 0.5 * MathLog((1 + x) / (1 - x));
+}
+
+// ES6 draft 09-27-13, section 20.2.2.21.
+function MathLog10(x) {
+  return MathLog(x) * 0.434294481903251828;  // log10(x) = log(x)/log(10).
+}
+
+
+// ES6 draft 09-27-13, section 20.2.2.22.
+function MathLog2(x) {
+  return MathLog(x) * 1.442695040888963407;  // log2(x) = log(x)/log(2).
+}
+
+// ES6 draft 09-27-13, section 20.2.2.17.
+function MathHypot(x, y) {  // Function length is 2.
+  // We may want to introduce fast paths for two arguments and when
+  // normalization to avoid overflow is not necessary.  For now, we
+  // simply assume the general case.
+  var length = %_ArgumentsLength();
+  var args = new InternalArray(length);
+  var max = 0;
+  for (var i = 0; i < length; i++) {
+    var n = %_Arguments(i);
+    if (!IS_NUMBER(n)) n = NonNumberToNumber(n);
+    if (n === INFINITY || n === -INFINITY) return INFINITY;
+    n = MathAbs(n);
+    if (n > max) max = n;
+    args[i] = n;
   }
-  var double_index = x * kIndexConvert;
-  if (phase & 1) double_index = kSamples - double_index;
-  var index = double_index | 0;
-  var t1 = double_index - index;
-  var t2 = 1 - t1;
-  var y1 = kSinTable[index];
-  var y2 = kSinTable[index + 1];
-  var dy = y2 - y1;
-  return (t2 * y1 + t1 * y2 +
-              t1 * t2 * ((kCosXIntervalTable[index] - dy) * t2 +
-                         (dy - kCosXIntervalTable[index + 1]) * t1))
-         * (1 - (phase & 2)) + 0;
+
+  // Kahan summation to avoid rounding errors.
+  // Normalize the numbers to the largest one to avoid overflow.
+  if (max === 0) max = 1;
+  var sum = 0;
+  var compensation = 0;
+  for (var i = 0; i < length; i++) {
+    var n = args[i] / max;
+    var summand = n * n - compensation;
+    var preliminary = sum + summand;
+    compensation = (preliminary - sum) - summand;
+    sum = preliminary;
+  }
+  return MathSqrt(sum) * max;
+}
+
+// ES6 draft 09-27-13, section 20.2.2.16.
+function MathFroundJS(x) {
+  return %MathFround(TO_NUMBER_INLINE(x));
+}
+
+// ES6 draft 07-18-14, section 20.2.2.11
+function MathClz32(x) {
+  x = ToUint32(TO_NUMBER_INLINE(x));
+  if (x == 0) return 32;
+  var result = 0;
+  // Binary search.
+  if ((x & 0xFFFF0000) === 0) { x <<= 16; result += 16; };
+  if ((x & 0xFF000000) === 0) { x <<=  8; result +=  8; };
+  if ((x & 0xF0000000) === 0) { x <<=  4; result +=  4; };
+  if ((x & 0xC0000000) === 0) { x <<=  2; result +=  2; };
+  if ((x & 0x80000000) === 0) { x <<=  1; result +=  1; };
+  return result;
+}
+
+// ES6 draft 09-27-13, section 20.2.2.9.
+// Cube root approximation, refer to: http://metamerist.com/cbrt/cbrt.htm
+// Using initial approximation adapted from Kahan's cbrt and 4 iterations
+// of Newton's method.
+function MathCbrt(x) {
+  if (!IS_NUMBER(x)) x = NonNumberToNumber(x);
+  if (x == 0 || !NUMBER_IS_FINITE(x)) return x;
+  return x >= 0 ? CubeRoot(x) : -CubeRoot(-x);
+}
+
+macro NEWTON_ITERATION_CBRT(x, approx)
+  (1.0 / 3.0) * (x / (approx * approx) + 2 * approx);
+endmacro
+
+function CubeRoot(x) {
+  var approx_hi = MathFloor(%_DoubleHi(x) / 3) + 0x2A9F7893;
+  var approx = %_ConstructDouble(approx_hi, 0);
+  approx = NEWTON_ITERATION_CBRT(x, approx);
+  approx = NEWTON_ITERATION_CBRT(x, approx);
+  approx = NEWTON_ITERATION_CBRT(x, approx);
+  return NEWTON_ITERATION_CBRT(x, approx);
 }
 
 // -------------------------------------------------------------------
@@ -259,8 +317,8 @@
 function SetUpMath() {
   %CheckIsBootstrapping();
 
-  %SetPrototype($Math, $Object.prototype);
-  %SetProperty(global, "Math", $Math, DONT_ENUM);
+  %InternalSetPrototype($Math, $Object.prototype);
+  %AddNamedProperty(global, "Math", $Math, DONT_ENUM);
   %FunctionSetInstanceClassName(MathConstructor, 'Math');
 
   // Set up math constants.
@@ -288,27 +346,41 @@
     "asin", MathAsinJS,
     "atan", MathAtanJS,
     "ceil", MathCeil,
-    "cos", MathCos,
+    "cos", MathCos,       // implemented by third_party/fdlibm
     "exp", MathExp,
     "floor", MathFloor,
     "log", MathLog,
     "round", MathRound,
-    "sin", MathSin,
+    "sin", MathSin,       // implemented by third_party/fdlibm
     "sqrt", MathSqrt,
-    "tan", MathTan,
+    "tan", MathTan,       // implemented by third_party/fdlibm
     "atan2", MathAtan2JS,
     "pow", MathPow,
     "max", MathMax,
     "min", MathMin,
-    "imul", MathImul
+    "imul", MathImul,
+    "sign", MathSign,
+    "trunc", MathTrunc,
+    "sinh", MathSinh,     // implemented by third_party/fdlibm
+    "cosh", MathCosh,     // implemented by third_party/fdlibm
+    "tanh", MathTanh,
+    "asinh", MathAsinh,
+    "acosh", MathAcosh,
+    "atanh", MathAtanh,
+    "log10", MathLog10,
+    "log2", MathLog2,
+    "hypot", MathHypot,
+    "fround", MathFroundJS,
+    "clz32", MathClz32,
+    "cbrt", MathCbrt,
+    "log1p", MathLog1p,   // implemented by third_party/fdlibm
+    "expm1", MathExpm1    // implemented by third_party/fdlibm
   ));
 
   %SetInlineBuiltinFlag(MathCeil);
   %SetInlineBuiltinFlag(MathRandom);
   %SetInlineBuiltinFlag(MathSin);
   %SetInlineBuiltinFlag(MathCos);
-  %SetInlineBuiltinFlag(MathTan);
-  %SetInlineBuiltinFlag(TrigonometricInterpolation);
 }
 
 SetUpMath();
diff --git a/src/messages.cc b/src/messages.cc
index 05402e9..290f756 100644
--- a/src/messages.cc
+++ b/src/messages.cc
@@ -6,8 +6,8 @@
 
 #include "src/api.h"
 #include "src/execution.h"
+#include "src/heap/spaces-inl.h"
 #include "src/messages.h"
-#include "src/spaces-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -130,7 +130,7 @@
                                           Handle<Object> data) {
   Factory* factory = isolate->factory();
   Handle<String> fmt_str =
-      factory->InternalizeOneByteString(STATIC_ASCII_VECTOR("FormatMessage"));
+      factory->InternalizeOneByteString(STATIC_CHAR_VECTOR("FormatMessage"));
   Handle<JSFunction> fun = Handle<JSFunction>::cast(Object::GetProperty(
           isolate->js_builtins_object(), fmt_str).ToHandleChecked());
   Handle<JSMessageObject> message = Handle<JSMessageObject>::cast(data);
@@ -138,10 +138,10 @@
                             Handle<Object>(message->arguments(), isolate) };
 
   MaybeHandle<Object> maybe_result = Execution::TryCall(
-      fun, isolate->js_builtins_object(), ARRAY_SIZE(argv), argv);
+      fun, isolate->js_builtins_object(), arraysize(argv), argv);
   Handle<Object> result;
   if (!maybe_result.ToHandle(&result) || !result->IsString()) {
-    return factory->InternalizeOneByteString(STATIC_ASCII_VECTOR("<error>"));
+    return factory->InternalizeOneByteString(STATIC_CHAR_VECTOR("<error>"));
   }
   Handle<String> result_string = Handle<String>::cast(result);
   // A string that has been obtained from JS code in this way is
diff --git a/src/messages.js b/src/messages.js
index 859bc0d..4a71a61 100644
--- a/src/messages.js
+++ b/src/messages.js
@@ -8,6 +8,7 @@
   // Error
   cyclic_proto:                  ["Cyclic __proto__ value"],
   code_gen_from_strings:         ["%0"],
+  constructor_special_method:    ["Class constructor may not be an accessor"],
   generator_running:             ["Generator is already running"],
   generator_finished:            ["Generator has already finished"],
   // TypeError
@@ -26,6 +27,7 @@
   newline_after_throw:           ["Illegal newline after throw"],
   label_redeclaration:           ["Label '", "%0", "' has already been declared"],
   var_redeclaration:             ["Identifier '", "%0", "' has already been declared"],
+  duplicate_template_property:   ["Object template has duplicate property '", "%0", "'"],
   no_catch_or_finally:           ["Missing catch or finally after try"],
   unknown_label:                 ["Undefined label '", "%0", "'"],
   uncaught_exception:            ["Uncaught ", "%0"],
@@ -36,6 +38,8 @@
   cannot_convert_to_primitive:   ["Cannot convert object to primitive value"],
   not_constructor:               ["%0", " is not a constructor"],
   not_defined:                   ["%0", " is not defined"],
+  non_method:                    ["'super' is referenced from non-method"],
+  unsupported_super:             ["Unsupported reference to 'super'"],
   non_object_property_load:      ["Cannot read property '", "%0", "' of ", "%1"],
   non_object_property_store:     ["Cannot set property '", "%0", "' of ", "%1"],
   with_expression:               ["%0", " has no properties"],
@@ -43,6 +47,8 @@
   no_setter_in_callback:         ["Cannot set property ", "%0", " of ", "%1", " which has only a getter"],
   apply_non_function:            ["Function.prototype.apply was called on ", "%0", ", which is a ", "%1", " and not a function"],
   apply_wrong_args:              ["Function.prototype.apply: Arguments list has wrong type"],
+  toMethod_non_function:         ["Function.prototype.toMethod was called on ", "%0", ", which is a ", "%1", " and not a function"],
+  toMethod_non_object:           ["Function.prototype.toMethod: home object ", "%0", " is not an object"],
   invalid_in_operator_use:       ["Cannot use 'in' operator to search for '", "%0", "' in ", "%1"],
   instanceof_function_expected:  ["Expecting a function in instanceof check, but got ", "%0"],
   instanceof_nonobject_proto:    ["Function has non-object prototype '", "%0", "' in instanceof check"],
@@ -89,6 +95,10 @@
   array_functions_on_frozen:     ["Cannot modify frozen array elements"],
   array_functions_change_sealed: ["Cannot add/remove sealed array elements"],
   first_argument_not_regexp:     ["First argument to ", "%0", " must not be a regular expression"],
+  not_iterable:                  ["%0", " is not iterable"],
+  not_an_iterator:               ["%0", " is not an iterator"],
+  iterator_result_not_an_object: ["Iterator result ", "%0", " is not an object"],
+  iterator_value_not_an_object:  ["Iterator value ", "%0", " is not an entry object"],
   // RangeError
   invalid_array_length:          ["Invalid array length"],
   invalid_array_buffer_length:   ["Invalid array buffer length"],
@@ -108,6 +118,7 @@
   stack_overflow:                ["Maximum call stack size exceeded"],
   invalid_time_value:            ["Invalid time value"],
   invalid_count_value:           ["Invalid count value"],
+  invalid_code_point:            ["Invalid code point ", "%0"],
   // ReferenceError
   invalid_lhs_in_assignment:     ["Invalid left-hand side in assignment"],
   invalid_lhs_in_for:            ["Invalid left-hand side in for-loop"],
@@ -122,7 +133,6 @@
   illegal_break:                 ["Illegal break statement"],
   illegal_continue:              ["Illegal continue statement"],
   illegal_return:                ["Illegal return statement"],
-  illegal_let:                   ["Illegal let declaration outside extended mode"],
   error_loading_debugger:        ["Error loading debugger"],
   no_input_to_regexp:            ["No input to ", "%0"],
   invalid_json:                  ["String '", "%0", "' is not valid JSON"],
@@ -132,8 +142,7 @@
   array_indexof_not_defined:     ["Array.getIndexOf: Argument undefined"],
   object_not_extensible:         ["Can't add property ", "%0", ", object is not extensible"],
   illegal_access:                ["Illegal access"],
-  invalid_cached_data_function:  ["Invalid cached data for function ", "%0"],
-  invalid_cached_data:           ["Invalid cached data"],
+  static_prototype:              ["Classes may not have static property named prototype"],
   strict_mode_with:              ["Strict mode code may not include a with statement"],
   strict_eval_arguments:         ["Unexpected eval or arguments in strict mode"],
   too_many_arguments:            ["Too many arguments in function call (only 65535 allowed)"],
@@ -152,6 +161,7 @@
   strict_cannot_assign:          ["Cannot assign to read only '", "%0", "' in strict mode"],
   strict_poison_pill:            ["'caller', 'callee', and 'arguments' properties may not be accessed on strict mode functions or the arguments objects for calls to them"],
   strict_caller:                 ["Illegal access to a strict mode caller function."],
+  malformed_arrow_function_parameter_list: ["Malformed arrow function parameter list"],
   generator_poison_pill:         ["'caller' and 'arguments' properties may not be accessed on generator functions."],
   unprotected_let:               ["Illegal let declaration in unprotected statement context."],
   unprotected_const:             ["Illegal const declaration in unprotected statement context."],
@@ -160,9 +170,11 @@
   harmony_const_assign:          ["Assignment to constant variable."],
   symbol_to_string:              ["Cannot convert a Symbol value to a string"],
   symbol_to_primitive:           ["Cannot convert a Symbol wrapper object to a primitive value"],
+  symbol_to_number:              ["Cannot convert a Symbol value to a number"],
   invalid_module_path:           ["Module does not export '", "%0", "', or export is not itself a module"],
   module_type_error:             ["Module '", "%0", "' used improperly"],
-  module_export_undefined:       ["Export '", "%0", "' is not defined in module"]
+  module_export_undefined:       ["Export '", "%0", "' is not defined in module"],
+  unexpected_super:              ["'super' keyword unexpected here"]
 };
 
 
@@ -208,6 +220,7 @@
     }
     return str;
   }
+  if (IS_SYMBOL(obj)) return %_CallFunction(obj, SymbolToString);
   if (IS_OBJECT(obj) && %GetDataProperty(obj, "toString") === ObjectToString) {
     var constructor = %GetDataProperty(obj, "constructor");
     if (typeof constructor == "function") {
@@ -282,8 +295,8 @@
  * Set up the Script function and constructor.
  */
 %FunctionSetInstanceClassName(Script, 'Script');
-%SetProperty(Script.prototype, 'constructor', Script,
-             DONT_ENUM | DONT_DELETE | READ_ONLY);
+%AddNamedProperty(Script.prototype, 'constructor', Script,
+                  DONT_ENUM | DONT_DELETE | READ_ONLY);
 %SetCode(Script, function(x) {
   // Script objects can only be created by the VM.
   throw new $Error("Not supported");
@@ -555,44 +568,16 @@
   if (this.line_offset > 0 || this.column_offset > 0) {
     return this.name;
   }
-
-  // The result is cached as on long scripts it takes noticable time to search
-  // for the sourceURL.
-  if (this.hasCachedNameOrSourceURL) {
-    return this.cachedNameOrSourceURL;
+  if (this.source_url) {
+    return this.source_url;
   }
-  this.hasCachedNameOrSourceURL = true;
-
-  // TODO(608): the spaces in a regexp below had to be escaped as \040
-  // because this file is being processed by js2c whose handling of spaces
-  // in regexps is broken. Also, ['"] are excluded from allowed URLs to
-  // avoid matches against sources that invoke evals with sourceURL.
-  // A better solution would be to detect these special comments in
-  // the scanner/parser.
-  var source = ToString(this.source);
-  var sourceUrlPos = %StringIndexOf(source, "sourceURL=", 0);
-  this.cachedNameOrSourceURL = this.name;
-  if (sourceUrlPos > 4) {
-    var sourceUrlPattern =
-        /\/\/[#@][\040\t]sourceURL=[\040\t]*([^\s\'\"]*)[\040\t]*$/gm;
-    // Don't reuse lastMatchInfo here, so we create a new array with room
-    // for four captures (array with length one longer than the index
-    // of the fourth capture, where the numbering is zero-based).
-    var matchInfo = new InternalArray(CAPTURE(3) + 1);
-    var match =
-        %_RegExpExec(sourceUrlPattern, source, sourceUrlPos - 4, matchInfo);
-    if (match) {
-      this.cachedNameOrSourceURL =
-          %_SubString(source, matchInfo[CAPTURE(2)], matchInfo[CAPTURE(3)]);
-    }
-  }
-  return this.cachedNameOrSourceURL;
+  return this.name;
 }
 
 
 SetUpLockedPrototype(Script,
-  $Array("source", "name", "line_ends", "line_offset", "column_offset",
-         "cachedNameOrSourceURL", "hasCachedNameOrSourceURL" ),
+  $Array("source", "name", "source_url", "source_mapping_url", "line_ends",
+         "line_offset", "column_offset"),
   $Array(
     "lineFromPosition", ScriptLineFromPosition,
     "locationFromPosition", ScriptLocationFromPosition,
@@ -772,10 +757,10 @@
 // ----------------------------------------------------------------------------
 // Error implementation
 
-var CallSiteReceiverKey = NEW_PRIVATE("CallSite#receiver");
-var CallSiteFunctionKey = NEW_PRIVATE("CallSite#function");
-var CallSitePositionKey = NEW_PRIVATE("CallSite#position");
-var CallSiteStrictModeKey = NEW_PRIVATE("CallSite#strict_mode");
+var CallSiteReceiverKey = NEW_PRIVATE_OWN("CallSite#receiver");
+var CallSiteFunctionKey = NEW_PRIVATE_OWN("CallSite#function");
+var CallSitePositionKey = NEW_PRIVATE_OWN("CallSite#position");
+var CallSiteStrictModeKey = NEW_PRIVATE_OWN("CallSite#strict_mode");
 
 function CallSite(receiver, fun, pos, strict_mode) {
   SET_PRIVATE(this, CallSiteReceiverKey, receiver);
@@ -1076,7 +1061,8 @@
 var formatting_custom_stack_trace = false;
 
 
-function FormatStackTrace(obj, error_string, frames) {
+function FormatStackTrace(obj, raw_stack) {
+  var frames = GetStackFrames(raw_stack);
   if (IS_FUNCTION($Error.prepareStackTrace) && !formatting_custom_stack_trace) {
     var array = [];
     %MoveArrayContents(frames, array);
@@ -1093,7 +1079,7 @@
   }
 
   var lines = new InternalArray();
-  lines.push(error_string);
+  lines.push(FormatErrorString(obj));
   for (var i = 0; i < frames.length; i++) {
     var frame = frames[i];
     var line;
@@ -1128,45 +1114,55 @@
 }
 
 
-function captureStackTrace(obj, cons_opt) {
-  var stackTraceLimit = $Error.stackTraceLimit;
-  if (!stackTraceLimit || !IS_NUMBER(stackTraceLimit)) return;
-  if (stackTraceLimit < 0 || stackTraceLimit > 10000) {
-    stackTraceLimit = 10000;
-  }
-  var stack = %CollectStackTrace(obj,
-                                 cons_opt ? cons_opt : captureStackTrace,
-                                 stackTraceLimit);
+var stack_trace_symbol;  // Set during bootstrapping.
+var formatted_stack_trace_symbol = NEW_PRIVATE_OWN("formatted stack trace");
 
-  var error_string = FormatErrorString(obj);
 
-  // Set the 'stack' property on the receiver.  If the receiver is the same as
-  // holder of this setter, the accessor pair is turned into a data property.
-  var setter = function(v) {
-    // Set data property on the receiver (not necessarily holder).
-    %DefineOrRedefineDataProperty(this, 'stack', v, NONE);
-    if (this === obj) {
-      // Release context values if holder is the same as the receiver.
-      stack = error_string = UNDEFINED;
+// Format the stack trace if not yet done, and return it.
+// Cache the formatted stack trace on the holder.
+var StackTraceGetter = function() {
+  var formatted_stack_trace = UNDEFINED;
+  var holder = this;
+  while (holder) {
+    var formatted_stack_trace =
+      GET_PRIVATE(holder, formatted_stack_trace_symbol);
+    if (IS_UNDEFINED(formatted_stack_trace)) {
+      // No formatted stack trace available.
+      var stack_trace = GET_PRIVATE(holder, stack_trace_symbol);
+      if (IS_UNDEFINED(stack_trace)) {
+        // Neither formatted nor structured stack trace available.
+        // Look further up the prototype chain.
+        holder = %GetPrototype(holder);
+        continue;
+      }
+      formatted_stack_trace = FormatStackTrace(holder, stack_trace);
+      SET_PRIVATE(holder, stack_trace_symbol, UNDEFINED);
+      SET_PRIVATE(holder, formatted_stack_trace_symbol, formatted_stack_trace);
     }
-  };
+    return formatted_stack_trace;
+  }
+  return UNDEFINED;
+};
 
-  // The holder of this getter ('obj') may not be the receiver ('this').
-  // When this getter is called the first time, we use the context values to
-  // format a stack trace string and turn this accessor pair into a data
-  // property (on the holder).
-  var getter = function() {
-    // Stack is still a raw array awaiting to be formatted.
-    var result = FormatStackTrace(obj, error_string, GetStackFrames(stack));
-    // Replace this accessor to return result directly.
-    %DefineOrRedefineAccessorProperty(
-        obj, 'stack', function() { return result }, setter, DONT_ENUM);
-    // Release context values.
-    stack = error_string = UNDEFINED;
-    return result;
-  };
 
-  %DefineOrRedefineAccessorProperty(obj, 'stack', getter, setter, DONT_ENUM);
+// If the receiver equals the holder, set the formatted stack trace that the
+// getter returns.
+var StackTraceSetter = function(v) {
+  if (HAS_PRIVATE(this, stack_trace_symbol)) {
+    SET_PRIVATE(this, stack_trace_symbol, UNDEFINED);
+    SET_PRIVATE(this, formatted_stack_trace_symbol, v);
+  }
+};
+
+
+// Use a dummy function since we do not actually want to capture a stack trace
+// when constructing the initial Error prototytpes.
+var captureStackTrace = function captureStackTrace(obj, cons_opt) {
+  // Define accessors first, as this may fail and throw.
+  ObjectDefineProperty(obj, 'stack', { get: StackTraceGetter,
+                                       set: StackTraceSetter,
+                                       configurable: true });
+  %CollectStackTrace(obj, cons_opt ? cons_opt : captureStackTrace);
 }
 
 
@@ -1181,8 +1177,9 @@
     // effects when overwriting the error functions from
     // user code.
     var name = f.name;
-    %SetProperty(global, name, f, DONT_ENUM);
-    %SetProperty(builtins, '$' + name, f, DONT_ENUM | DONT_DELETE | READ_ONLY);
+    %AddNamedProperty(global, name, f, DONT_ENUM);
+    %AddNamedProperty(builtins, '$' + name, f,
+                      DONT_ENUM | DONT_DELETE | READ_ONLY);
     // Configure the error function.
     if (name == 'Error') {
       // The prototype of the Error object must itself be an error.
@@ -1197,19 +1194,18 @@
       %FunctionSetPrototype(f, new $Error());
     }
     %FunctionSetInstanceClassName(f, 'Error');
-    %SetProperty(f.prototype, 'constructor', f, DONT_ENUM);
-    %SetProperty(f.prototype, "name", name, DONT_ENUM);
+    %AddNamedProperty(f.prototype, 'constructor', f, DONT_ENUM);
+    %AddNamedProperty(f.prototype, "name", name, DONT_ENUM);
     %SetCode(f, function(m) {
       if (%_IsConstructCall()) {
         // Define all the expected properties directly on the error
         // object. This avoids going through getters and setters defined
         // on prototype objects.
-        %IgnoreAttributesAndSetProperty(this, 'stack', UNDEFINED, DONT_ENUM);
+        %AddNamedProperty(this, 'stack', UNDEFINED, DONT_ENUM);
         if (!IS_UNDEFINED(m)) {
-          %IgnoreAttributesAndSetProperty(
-            this, 'message', ToString(m), DONT_ENUM);
+          %AddNamedProperty(this, 'message', ToString(m), DONT_ENUM);
         }
-        captureStackTrace(this, f);
+        try { captureStackTrace(this, f); } catch (e) { }
       } else {
         return new f(m);
       }
@@ -1230,7 +1226,7 @@
 
 $Error.captureStackTrace = captureStackTrace;
 
-%SetProperty($Error.prototype, 'message', '', DONT_ENUM);
+%AddNamedProperty($Error.prototype, 'message', '', DONT_ENUM);
 
 // Global list of error objects visited during ErrorToString. This is
 // used to detect cycles in error toString formatting.
@@ -1302,40 +1298,8 @@
 function SetUpStackOverflowBoilerplate() {
   var boilerplate = MakeRangeError('stack_overflow', []);
 
-  var error_string = boilerplate.name + ": " + boilerplate.message;
-
-  // Set the 'stack' property on the receiver.  If the receiver is the same as
-  // holder of this setter, the accessor pair is turned into a data property.
-  var setter = function(v) {
-    %DefineOrRedefineDataProperty(this, 'stack', v, NONE);
-    // Tentatively clear the hidden property. If the receiver is the same as
-    // holder, we release the raw stack trace this way.
-    %GetAndClearOverflowedStackTrace(this);
-  };
-
-  // The raw stack trace is stored as a hidden property on the holder of this
-  // getter, which may not be the same as the receiver.  Find the holder to
-  // retrieve the raw stack trace and then turn this accessor pair into a
-  // data property.
-  var getter = function() {
-    var holder = this;
-    while (!IS_ERROR(holder)) {
-      holder = %GetPrototype(holder);
-      if (IS_NULL(holder)) return MakeSyntaxError('illegal_access', []);
-    }
-    var stack = %GetAndClearOverflowedStackTrace(holder);
-    // We may not have captured any stack trace.
-    if (IS_UNDEFINED(stack)) return stack;
-
-    var result = FormatStackTrace(holder, error_string, GetStackFrames(stack));
-    // Replace this accessor to return result directly.
-    %DefineOrRedefineAccessorProperty(
-        holder, 'stack', function() { return result }, setter, DONT_ENUM);
-    return result;
-  };
-
-  %DefineOrRedefineAccessorProperty(
-      boilerplate, 'stack', getter, setter, DONT_ENUM);
+  %DefineAccessorPropertyUnchecked(
+      boilerplate, 'stack', StackTraceGetter, StackTraceSetter, DONT_ENUM);
 
   return boilerplate;
 }
diff --git a/src/mips/OWNERS b/src/mips/OWNERS
index 2dc1d77..5508ba6 100644
--- a/src/mips/OWNERS
+++ b/src/mips/OWNERS
@@ -1,5 +1,5 @@
-plind44@gmail.com
-gergely@homejinni.com
-palfia@homejinni.com
-kilvadyb@homejinni.com
-Dusan.Milosavljevic@rt-rk.com
+paul.lind@imgtec.com
+gergely.kis@imgtec.com
+akos.palfi@imgtec.com
+balazs.kilvady@imgtec.com
+dusan.milosavljevic@imgtec.com
diff --git a/src/mips/assembler-mips-inl.h b/src/mips/assembler-mips-inl.h
index eec19a6..2666f6a 100644
--- a/src/mips/assembler-mips-inl.h
+++ b/src/mips/assembler-mips-inl.h
@@ -39,7 +39,7 @@
 
 #include "src/mips/assembler-mips.h"
 
-#include "src/cpu.h"
+#include "src/assembler.h"
 #include "src/debug.h"
 
 
@@ -100,11 +100,11 @@
 
 
 int FPURegister::ToAllocationIndex(FPURegister reg) {
-  ASSERT(reg.code() % 2 == 0);
-  ASSERT(reg.code() / 2 < kMaxNumAllocatableRegisters);
-  ASSERT(reg.is_valid());
-  ASSERT(!reg.is(kDoubleRegZero));
-  ASSERT(!reg.is(kLithiumScratchDouble));
+  DCHECK(reg.code() % 2 == 0);
+  DCHECK(reg.code() / 2 < kMaxNumAllocatableRegisters);
+  DCHECK(reg.is_valid());
+  DCHECK(!reg.is(kDoubleRegZero));
+  DCHECK(!reg.is(kLithiumScratchDouble));
   return (reg.code() / 2);
 }
 
@@ -125,19 +125,19 @@
     // Absolute code pointer inside code object moves with the code object.
     byte* p = reinterpret_cast<byte*>(pc_);
     int count = Assembler::RelocateInternalReference(p, delta);
-    CPU::FlushICache(p, count * sizeof(uint32_t));
+    CpuFeatures::FlushICache(p, count * sizeof(uint32_t));
   }
 }
 
 
 Address RelocInfo::target_address() {
-  ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
+  DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
   return Assembler::target_address_at(pc_, host_);
 }
 
 
 Address RelocInfo::target_address_address() {
-  ASSERT(IsCodeTarget(rmode_) ||
+  DCHECK(IsCodeTarget(rmode_) ||
          IsRuntimeEntry(rmode_) ||
          rmode_ == EMBEDDED_OBJECT ||
          rmode_ == EXTERNAL_REFERENCE);
@@ -174,7 +174,7 @@
 void RelocInfo::set_target_address(Address target,
                                    WriteBarrierMode write_barrier_mode,
                                    ICacheFlushMode icache_flush_mode) {
-  ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
+  DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
   Assembler::set_target_address_at(pc_, host_, target, icache_flush_mode);
   if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
       host() != NULL && IsCodeTarget(rmode_)) {
@@ -190,14 +190,19 @@
 }
 
 
+Address Assembler::break_address_from_return_address(Address pc) {
+  return pc - Assembler::kPatchDebugBreakSlotReturnOffset;
+}
+
+
 Object* RelocInfo::target_object() {
-  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+  DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
   return reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_));
 }
 
 
 Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
-  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+  DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
   return Handle<Object>(reinterpret_cast<Object**>(
       Assembler::target_address_at(pc_, host_)));
 }
@@ -206,8 +211,7 @@
 void RelocInfo::set_target_object(Object* target,
                                   WriteBarrierMode write_barrier_mode,
                                   ICacheFlushMode icache_flush_mode) {
-  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
-  ASSERT(!target->IsConsString());
+  DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
   Assembler::set_target_address_at(pc_, host_,
                                    reinterpret_cast<Address>(target),
                                    icache_flush_mode);
@@ -221,13 +225,13 @@
 
 
 Address RelocInfo::target_reference() {
-  ASSERT(rmode_ == EXTERNAL_REFERENCE);
+  DCHECK(rmode_ == EXTERNAL_REFERENCE);
   return Assembler::target_address_at(pc_, host_);
 }
 
 
 Address RelocInfo::target_runtime_entry(Assembler* origin) {
-  ASSERT(IsRuntimeEntry(rmode_));
+  DCHECK(IsRuntimeEntry(rmode_));
   return target_address();
 }
 
@@ -235,21 +239,21 @@
 void RelocInfo::set_target_runtime_entry(Address target,
                                          WriteBarrierMode write_barrier_mode,
                                          ICacheFlushMode icache_flush_mode) {
-  ASSERT(IsRuntimeEntry(rmode_));
+  DCHECK(IsRuntimeEntry(rmode_));
   if (target_address() != target)
     set_target_address(target, write_barrier_mode, icache_flush_mode);
 }
 
 
 Handle<Cell> RelocInfo::target_cell_handle() {
-  ASSERT(rmode_ == RelocInfo::CELL);
+  DCHECK(rmode_ == RelocInfo::CELL);
   Address address = Memory::Address_at(pc_);
   return Handle<Cell>(reinterpret_cast<Cell**>(address));
 }
 
 
 Cell* RelocInfo::target_cell() {
-  ASSERT(rmode_ == RelocInfo::CELL);
+  DCHECK(rmode_ == RelocInfo::CELL);
   return Cell::FromValueAddress(Memory::Address_at(pc_));
 }
 
@@ -257,7 +261,7 @@
 void RelocInfo::set_target_cell(Cell* cell,
                                 WriteBarrierMode write_barrier_mode,
                                 ICacheFlushMode icache_flush_mode) {
-  ASSERT(rmode_ == RelocInfo::CELL);
+  DCHECK(rmode_ == RelocInfo::CELL);
   Address address = cell->address() + Cell::kValueOffset;
   Memory::Address_at(pc_) = address;
   if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
@@ -279,7 +283,7 @@
 
 
 Code* RelocInfo::code_age_stub() {
-  ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+  DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
   return Code::GetCodeFromTargetAddress(
       Assembler::target_address_at(pc_ + Assembler::kInstrSize, host_));
 }
@@ -287,7 +291,7 @@
 
 void RelocInfo::set_code_age_stub(Code* stub,
                                   ICacheFlushMode icache_flush_mode) {
-  ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+  DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
   Assembler::set_target_address_at(pc_ + Assembler::kInstrSize,
                                    host_,
                                    stub->instruction_start());
@@ -295,7 +299,7 @@
 
 
 Address RelocInfo::call_address() {
-  ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+  DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
          (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
   // The pc_ offset of 0 assumes mips patched return sequence per
   // debug-mips.cc BreakLocationIterator::SetDebugBreakAtReturn(), or
@@ -305,7 +309,7 @@
 
 
 void RelocInfo::set_call_address(Address target) {
-  ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+  DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
          (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
   // The pc_ offset of 0 assumes mips patched return sequence per
   // debug-mips.cc BreakLocationIterator::SetDebugBreakAtReturn(), or
@@ -325,7 +329,7 @@
 
 
 Object** RelocInfo::call_object_address() {
-  ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+  DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
          (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
   return reinterpret_cast<Object**>(pc_ + 2 * Assembler::kInstrSize);
 }
@@ -337,7 +341,7 @@
 
 
 void RelocInfo::WipeOut() {
-  ASSERT(IsEmbeddedObject(rmode_) ||
+  DCHECK(IsEmbeddedObject(rmode_) ||
          IsCodeTarget(rmode_) ||
          IsRuntimeEntry(rmode_) ||
          IsExternalReference(rmode_));
diff --git a/src/mips/assembler-mips.cc b/src/mips/assembler-mips.cc
index e4bebfe..f1e5dfb 100644
--- a/src/mips/assembler-mips.cc
+++ b/src/mips/assembler-mips.cc
@@ -37,6 +37,8 @@
 
 #if V8_TARGET_ARCH_MIPS
 
+#include "src/base/bits.h"
+#include "src/base/cpu.h"
 #include "src/mips/assembler-mips-inl.h"
 #include "src/serialize.h"
 
@@ -65,7 +67,7 @@
 
 
 const char* DoubleRegister::AllocationIndexToString(int index) {
-  ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
+  DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
   const char* const names[] = {
     "f0",
     "f2",
@@ -97,10 +99,32 @@
 #ifndef __mips__
   // For the simulator build, use FPU.
   supported_ |= 1u << FPU;
+#if defined(_MIPS_ARCH_MIPS32R6)
+  // FP64 mode is implied on r6.
+  supported_ |= 1u << FP64FPU;
+#endif
+#if defined(FPU_MODE_FP64)
+  supported_ |= 1u << FP64FPU;
+#endif
 #else
   // Probe for additional features at runtime.
-  CPU cpu;
+  base::CPU cpu;
   if (cpu.has_fpu()) supported_ |= 1u << FPU;
+#if defined(FPU_MODE_FPXX)
+  if (cpu.is_fp64_mode()) supported_ |= 1u << FP64FPU;
+#elif defined(FPU_MODE_FP64)
+  supported_ |= 1u << FP64FPU;
+#endif
+#if defined(_MIPS_ARCH_MIPS32RX)
+  if (cpu.architecture() == 6) {
+    supported_ |= 1u << MIPSr6;
+  } else if (cpu.architecture() == 2) {
+    supported_ |= 1u << MIPSr1;
+    supported_ |= 1u << MIPSr2;
+  } else {
+    supported_ |= 1u << MIPSr1;
+  }
+#endif
 #endif
 }
 
@@ -110,7 +134,7 @@
 
 
 int ToNumber(Register reg) {
-  ASSERT(reg.is_valid());
+  DCHECK(reg.is_valid());
   const int kNumbers[] = {
     0,    // zero_reg
     1,    // at
@@ -150,7 +174,7 @@
 
 
 Register ToRegister(int num) {
-  ASSERT(num >= 0 && num < kNumRegisters);
+  DCHECK(num >= 0 && num < kNumRegisters);
   const Register kRegisters[] = {
     zero_reg,
     at,
@@ -198,7 +222,7 @@
   }
 
   // Indicate that code has changed.
-  CPU::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
+  CpuFeatures::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
 }
 
 
@@ -220,7 +244,7 @@
   // Verify all Objects referred by code are NOT in new space.
   Object* obj = *handle;
   if (obj->IsHeapObject()) {
-    ASSERT(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
+    DCHECK(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
     imm32_ = reinterpret_cast<intptr_t>(handle.location());
     rmode_ = RelocInfo::EMBEDDED_OBJECT;
   } else {
@@ -305,7 +329,7 @@
 
 
 void Assembler::GetCode(CodeDesc* desc) {
-  ASSERT(pc_ <= reloc_info_writer.pos());  // No overlap.
+  DCHECK(pc_ <= reloc_info_writer.pos());  // No overlap.
   // Set up code descriptor.
   desc->buffer = buffer_;
   desc->buffer_size = buffer_size_;
@@ -316,7 +340,7 @@
 
 
 void Assembler::Align(int m) {
-  ASSERT(m >= 4 && IsPowerOf2(m));
+  DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
   while ((pc_offset() & (m - 1)) != 0) {
     nop();
   }
@@ -483,7 +507,9 @@
       opcode == BGTZL ||
       (opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
                             rt_field == BLTZAL || rt_field == BGEZAL)) ||
-      (opcode == COP1 && rs_field == BC1);  // Coprocessor branch.
+      (opcode == COP1 && rs_field == BC1) ||  // Coprocessor branch.
+      (opcode == COP1 && rs_field == BC1EQZ) ||
+      (opcode == COP1 && rs_field == BC1NEZ);
 }
 
 
@@ -528,12 +554,18 @@
 
 
 bool Assembler::IsJr(Instr instr) {
-  return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JR;
+  if (!IsMipsArchVariant(kMips32r6))  {
+    return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JR;
+  } else {
+    return GetOpcodeField(instr) == SPECIAL &&
+        GetRdField(instr) == 0  && GetFunctionField(instr) == JALR;
+  }
 }
 
 
 bool Assembler::IsJalr(Instr instr) {
-  return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JALR;
+  return GetOpcodeField(instr) == SPECIAL &&
+         GetRdField(instr) != 0  && GetFunctionField(instr) == JALR;
 }
 
 
@@ -553,7 +585,7 @@
 
 bool Assembler::IsNop(Instr instr, unsigned int type) {
   // See Assembler::nop(type).
-  ASSERT(type < 32);
+  DCHECK(type < 32);
   uint32_t opcode = GetOpcodeField(instr);
   uint32_t function = GetFunctionField(instr);
   uint32_t rt = GetRt(instr);
@@ -576,7 +608,7 @@
 
 
 int32_t Assembler::GetBranchOffset(Instr instr) {
-  ASSERT(IsBranch(instr));
+  DCHECK(IsBranch(instr));
   return (static_cast<int16_t>(instr & kImm16Mask)) << 2;
 }
 
@@ -587,13 +619,13 @@
 
 
 int16_t Assembler::GetLwOffset(Instr instr) {
-  ASSERT(IsLw(instr));
+  DCHECK(IsLw(instr));
   return ((instr & kImm16Mask));
 }
 
 
 Instr Assembler::SetLwOffset(Instr instr, int16_t offset) {
-  ASSERT(IsLw(instr));
+  DCHECK(IsLw(instr));
 
   // We actually create a new lw instruction based on the original one.
   Instr temp_instr = LW | (instr & kRsFieldMask) | (instr & kRtFieldMask)
@@ -609,7 +641,7 @@
 
 
 Instr Assembler::SetSwOffset(Instr instr, int16_t offset) {
-  ASSERT(IsSw(instr));
+  DCHECK(IsSw(instr));
   return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
 }
 
@@ -620,7 +652,7 @@
 
 
 Instr Assembler::SetAddImmediateOffset(Instr instr, int16_t offset) {
-  ASSERT(IsAddImmediate(instr));
+  DCHECK(IsAddImmediate(instr));
   return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
 }
 
@@ -642,7 +674,7 @@
      }
   }
   // Check we have a branch or jump instruction.
-  ASSERT(IsBranch(instr) || IsJ(instr) || IsLui(instr));
+  DCHECK(IsBranch(instr) || IsJ(instr) || IsLui(instr));
   // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
   // the compiler uses arithmectic shifts for signed integers.
   if (IsBranch(instr)) {
@@ -657,7 +689,7 @@
   } else if (IsLui(instr)) {
     Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
     Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
-    ASSERT(IsOri(instr_ori));
+    DCHECK(IsOri(instr_ori));
     int32_t imm = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
     imm |= (instr_ori & static_cast<int32_t>(kImm16Mask));
 
@@ -667,7 +699,7 @@
     } else {
       uint32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos);
       int32_t delta = instr_address - imm;
-      ASSERT(pos > delta);
+      DCHECK(pos > delta);
       return pos - delta;
     }
   } else {
@@ -679,7 +711,7 @@
       uint32_t instr_address = reinterpret_cast<int32_t>(buffer_ + pos);
       instr_address &= kImm28Mask;
       int32_t delta = instr_address - imm28;
-      ASSERT(pos > delta);
+      DCHECK(pos > delta);
       return pos - delta;
     }
   }
@@ -689,29 +721,29 @@
 void Assembler::target_at_put(int32_t pos, int32_t target_pos) {
   Instr instr = instr_at(pos);
   if ((instr & ~kImm16Mask) == 0) {
-    ASSERT(target_pos == kEndOfChain || target_pos >= 0);
+    DCHECK(target_pos == kEndOfChain || target_pos >= 0);
     // Emitted label constant, not part of a branch.
     // Make label relative to Code* of generated Code object.
     instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
     return;
   }
 
-  ASSERT(IsBranch(instr) || IsJ(instr) || IsLui(instr));
+  DCHECK(IsBranch(instr) || IsJ(instr) || IsLui(instr));
   if (IsBranch(instr)) {
     int32_t imm18 = target_pos - (pos + kBranchPCOffset);
-    ASSERT((imm18 & 3) == 0);
+    DCHECK((imm18 & 3) == 0);
 
     instr &= ~kImm16Mask;
     int32_t imm16 = imm18 >> 2;
-    ASSERT(is_int16(imm16));
+    DCHECK(is_int16(imm16));
 
     instr_at_put(pos, instr | (imm16 & kImm16Mask));
   } else if (IsLui(instr)) {
     Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
     Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
-    ASSERT(IsOri(instr_ori));
+    DCHECK(IsOri(instr_ori));
     uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos;
-    ASSERT((imm & 3) == 0);
+    DCHECK((imm & 3) == 0);
 
     instr_lui &= ~kImm16Mask;
     instr_ori &= ~kImm16Mask;
@@ -723,11 +755,11 @@
   } else {
     uint32_t imm28 = reinterpret_cast<uint32_t>(buffer_) + target_pos;
     imm28 &= kImm28Mask;
-    ASSERT((imm28 & 3) == 0);
+    DCHECK((imm28 & 3) == 0);
 
     instr &= ~kImm26Mask;
     uint32_t imm26 = imm28 >> 2;
-    ASSERT(is_uint26(imm26));
+    DCHECK(is_uint26(imm26));
 
     instr_at_put(pos, instr | (imm26 & kImm26Mask));
   }
@@ -759,7 +791,7 @@
 
 
 void Assembler::bind_to(Label* L, int pos) {
-  ASSERT(0 <= pos && pos <= pc_offset());  // Must have valid binding position.
+  DCHECK(0 <= pos && pos <= pc_offset());  // Must have valid binding position.
   int32_t trampoline_pos = kInvalidSlotPos;
   if (L->is_linked() && !trampoline_emitted_) {
     unbound_labels_count_--;
@@ -777,14 +809,14 @@
           trampoline_pos = get_trampoline_entry(fixup_pos);
           CHECK(trampoline_pos != kInvalidSlotPos);
         }
-        ASSERT((trampoline_pos - fixup_pos) <= kMaxBranchOffset);
+        DCHECK((trampoline_pos - fixup_pos) <= kMaxBranchOffset);
         target_at_put(fixup_pos, trampoline_pos);
         fixup_pos = trampoline_pos;
         dist = pos - fixup_pos;
       }
       target_at_put(fixup_pos, pos);
     } else {
-      ASSERT(IsJ(instr) || IsLui(instr) || IsEmittedConstant(instr));
+      DCHECK(IsJ(instr) || IsLui(instr) || IsEmittedConstant(instr));
       target_at_put(fixup_pos, pos);
     }
   }
@@ -798,18 +830,18 @@
 
 
 void Assembler::bind(Label* L) {
-  ASSERT(!L->is_bound());  // Label can only be bound once.
+  DCHECK(!L->is_bound());  // Label can only be bound once.
   bind_to(L, pc_offset());
 }
 
 
 void Assembler::next(Label* L) {
-  ASSERT(L->is_linked());
+  DCHECK(L->is_linked());
   int link = target_at(L->pos());
   if (link == kEndOfChain) {
     L->Unuse();
   } else {
-    ASSERT(link >= 0);
+    DCHECK(link >= 0);
     L->link_to(link);
   }
 }
@@ -837,7 +869,7 @@
                                  Register rd,
                                  uint16_t sa,
                                  SecondaryField func) {
-  ASSERT(rd.is_valid() && rs.is_valid() && rt.is_valid() && is_uint5(sa));
+  DCHECK(rd.is_valid() && rs.is_valid() && rt.is_valid() && is_uint5(sa));
   Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
       | (rd.code() << kRdShift) | (sa << kSaShift) | func;
   emit(instr);
@@ -850,7 +882,7 @@
                                  uint16_t msb,
                                  uint16_t lsb,
                                  SecondaryField func) {
-  ASSERT(rs.is_valid() && rt.is_valid() && is_uint5(msb) && is_uint5(lsb));
+  DCHECK(rs.is_valid() && rt.is_valid() && is_uint5(msb) && is_uint5(lsb));
   Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
       | (msb << kRdShift) | (lsb << kSaShift) | func;
   emit(instr);
@@ -863,7 +895,7 @@
                                  FPURegister fs,
                                  FPURegister fd,
                                  SecondaryField func) {
-  ASSERT(fd.is_valid() && fs.is_valid() && ft.is_valid());
+  DCHECK(fd.is_valid() && fs.is_valid() && ft.is_valid());
   Instr instr = opcode | fmt | (ft.code() << kFtShift) | (fs.code() << kFsShift)
       | (fd.code() << kFdShift) | func;
   emit(instr);
@@ -876,7 +908,7 @@
                                  FPURegister fs,
                                  FPURegister fd,
                                  SecondaryField func) {
-  ASSERT(fd.is_valid() && fr.is_valid() && fs.is_valid() && ft.is_valid());
+  DCHECK(fd.is_valid() && fr.is_valid() && fs.is_valid() && ft.is_valid());
   Instr instr = opcode | (fr.code() << kFrShift) | (ft.code() << kFtShift)
       | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
   emit(instr);
@@ -889,7 +921,7 @@
                                  FPURegister fs,
                                  FPURegister fd,
                                  SecondaryField func) {
-  ASSERT(fd.is_valid() && fs.is_valid() && rt.is_valid());
+  DCHECK(fd.is_valid() && fs.is_valid() && rt.is_valid());
   Instr instr = opcode | fmt | (rt.code() << kRtShift)
       | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
   emit(instr);
@@ -901,7 +933,7 @@
                                  Register rt,
                                  FPUControlRegister fs,
                                  SecondaryField func) {
-  ASSERT(fs.is_valid() && rt.is_valid());
+  DCHECK(fs.is_valid() && rt.is_valid());
   Instr instr =
       opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func;
   emit(instr);
@@ -914,7 +946,7 @@
                                   Register rs,
                                   Register rt,
                                   int32_t j) {
-  ASSERT(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j)));
+  DCHECK(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j)));
   Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
       | (j & kImm16Mask);
   emit(instr);
@@ -925,7 +957,7 @@
                                   Register rs,
                                   SecondaryField SF,
                                   int32_t j) {
-  ASSERT(rs.is_valid() && (is_int16(j) || is_uint16(j)));
+  DCHECK(rs.is_valid() && (is_int16(j) || is_uint16(j)));
   Instr instr = opcode | (rs.code() << kRsShift) | SF | (j & kImm16Mask);
   emit(instr);
 }
@@ -935,7 +967,7 @@
                                   Register rs,
                                   FPURegister ft,
                                   int32_t j) {
-  ASSERT(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
+  DCHECK(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
   Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift)
       | (j & kImm16Mask);
   emit(instr);
@@ -945,7 +977,7 @@
 void Assembler::GenInstrJump(Opcode opcode,
                              uint32_t address) {
   BlockTrampolinePoolScope block_trampoline_pool(this);
-  ASSERT(is_uint26(address));
+  DCHECK(is_uint26(address));
   Instr instr = opcode | address;
   emit(instr);
   BlockTrampolinePoolFor(1);  // For associated delay slot.
@@ -985,7 +1017,7 @@
   }
 
   uint32_t imm = reinterpret_cast<uint32_t>(buffer_) + target_pos;
-  ASSERT((imm & 3) == 0);
+  DCHECK((imm & 3) == 0);
 
   return imm;
 }
@@ -1011,8 +1043,90 @@
   }
 
   int32_t offset = target_pos - (pc_offset() + kBranchPCOffset);
-  ASSERT((offset & 3) == 0);
-  ASSERT(is_int16(offset >> 2));
+  DCHECK((offset & 3) == 0);
+  DCHECK(is_int16(offset >> 2));
+
+  return offset;
+}
+
+
+int32_t Assembler::branch_offset_compact(Label* L,
+    bool jump_elimination_allowed) {
+  int32_t target_pos;
+  if (L->is_bound()) {
+    target_pos = L->pos();
+  } else {
+    if (L->is_linked()) {
+      target_pos = L->pos();
+      L->link_to(pc_offset());
+    } else {
+      L->link_to(pc_offset());
+      if (!trampoline_emitted_) {
+        unbound_labels_count_++;
+        next_buffer_check_ -= kTrampolineSlotsSize;
+      }
+      return kEndOfChain;
+    }
+  }
+
+  int32_t offset = target_pos - pc_offset();
+  DCHECK((offset & 3) == 0);
+  DCHECK(is_int16(offset >> 2));
+
+  return offset;
+}
+
+
+int32_t Assembler::branch_offset21(Label* L, bool jump_elimination_allowed) {
+  int32_t target_pos;
+
+  if (L->is_bound()) {
+    target_pos = L->pos();
+  } else {
+    if (L->is_linked()) {
+      target_pos = L->pos();
+      L->link_to(pc_offset());
+    } else {
+      L->link_to(pc_offset());
+      if (!trampoline_emitted_) {
+        unbound_labels_count_++;
+        next_buffer_check_ -= kTrampolineSlotsSize;
+      }
+      return kEndOfChain;
+    }
+  }
+
+  int32_t offset = target_pos - (pc_offset() + kBranchPCOffset);
+  DCHECK((offset & 3) == 0);
+  DCHECK(((offset >> 2) & 0xFFE00000) == 0);  // Offset is 21bit width.
+
+  return offset;
+}
+
+
+int32_t Assembler::branch_offset21_compact(Label* L,
+    bool jump_elimination_allowed) {
+  int32_t target_pos;
+
+  if (L->is_bound()) {
+    target_pos = L->pos();
+  } else {
+    if (L->is_linked()) {
+      target_pos = L->pos();
+      L->link_to(pc_offset());
+    } else {
+      L->link_to(pc_offset());
+      if (!trampoline_emitted_) {
+        unbound_labels_count_++;
+        next_buffer_check_ -= kTrampolineSlotsSize;
+      }
+      return kEndOfChain;
+    }
+  }
+
+  int32_t offset = target_pos - pc_offset();
+  DCHECK((offset & 3) == 0);
+  DCHECK(((offset >> 2) & 0xFFe00000) == 0);  // Offset is 21bit width.
 
   return offset;
 }
@@ -1027,9 +1141,9 @@
     if (L->is_linked()) {
       target_pos = L->pos();  // L's link.
       int32_t imm18 = target_pos - at_offset;
-      ASSERT((imm18 & 3) == 0);
+      DCHECK((imm18 & 3) == 0);
       int32_t imm16 = imm18 >> 2;
-      ASSERT(is_int16(imm16));
+      DCHECK(is_int16(imm16));
       instr_at_put(at_offset, (imm16 & kImm16Mask));
     } else {
       target_pos = kEndOfChain;
@@ -1071,7 +1185,33 @@
 }
 
 
+void Assembler::bgezc(Register rt, int16_t offset) {
+  DCHECK(IsMipsArchVariant(kMips32r6));
+  DCHECK(!(rt.is(zero_reg)));
+  GenInstrImmediate(BLEZL, rt, rt, offset);
+}
+
+
+void Assembler::bgeuc(Register rs, Register rt, int16_t offset) {
+  DCHECK(IsMipsArchVariant(kMips32r6));
+  DCHECK(!(rs.is(zero_reg)));
+  DCHECK(!(rt.is(zero_reg)));
+  DCHECK(rs.code() != rt.code());
+  GenInstrImmediate(BLEZ, rs, rt, offset);
+}
+
+
+void Assembler::bgec(Register rs, Register rt, int16_t offset) {
+  DCHECK(IsMipsArchVariant(kMips32r6));
+  DCHECK(!(rs.is(zero_reg)));
+  DCHECK(!(rt.is(zero_reg)));
+  DCHECK(rs.code() != rt.code());
+  GenInstrImmediate(BLEZL, rs, rt, offset);
+}
+
+
 void Assembler::bgezal(Register rs, int16_t offset) {
+  DCHECK(!IsMipsArchVariant(kMips32r6) || rs.is(zero_reg));
   BlockTrampolinePoolScope block_trampoline_pool(this);
   positions_recorder()->WriteRecordedPositions();
   GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
@@ -1086,6 +1226,13 @@
 }
 
 
+void Assembler::bgtzc(Register rt, int16_t offset) {
+  DCHECK(IsMipsArchVariant(kMips32r6));
+  DCHECK(!(rt.is(zero_reg)));
+  GenInstrImmediate(BGTZL, zero_reg, rt, offset);
+}
+
+
 void Assembler::blez(Register rs, int16_t offset) {
   BlockTrampolinePoolScope block_trampoline_pool(this);
   GenInstrImmediate(BLEZ, rs, zero_reg, offset);
@@ -1093,6 +1240,38 @@
 }
 
 
+void Assembler::blezc(Register rt, int16_t offset) {
+  DCHECK(IsMipsArchVariant(kMips32r6));
+  DCHECK(!(rt.is(zero_reg)));
+  GenInstrImmediate(BLEZL, zero_reg, rt, offset);
+}
+
+
+void Assembler::bltzc(Register rt, int16_t offset) {
+  DCHECK(IsMipsArchVariant(kMips32r6));
+  DCHECK(!(rt.is(zero_reg)));
+  GenInstrImmediate(BGTZL, rt, rt, offset);
+}
+
+
+void Assembler::bltuc(Register rs, Register rt, int16_t offset) {
+  DCHECK(IsMipsArchVariant(kMips32r6));
+  DCHECK(!(rs.is(zero_reg)));
+  DCHECK(!(rt.is(zero_reg)));
+  DCHECK(rs.code() != rt.code());
+  GenInstrImmediate(BGTZ, rs, rt, offset);
+}
+
+
+void Assembler::bltc(Register rs, Register rt, int16_t offset) {
+  DCHECK(IsMipsArchVariant(kMips32r6));
+  DCHECK(!(rs.is(zero_reg)));
+  DCHECK(!(rt.is(zero_reg)));
+  DCHECK(rs.code() != rt.code());
+  GenInstrImmediate(BGTZL, rs, rt, offset);
+}
+
+
 void Assembler::bltz(Register rs, int16_t offset) {
   BlockTrampolinePoolScope block_trampoline_pool(this);
   GenInstrImmediate(REGIMM, rs, BLTZ, offset);
@@ -1101,6 +1280,7 @@
 
 
 void Assembler::bltzal(Register rs, int16_t offset) {
+  DCHECK(!IsMipsArchVariant(kMips32r6) || rs.is(zero_reg));
   BlockTrampolinePoolScope block_trampoline_pool(this);
   positions_recorder()->WriteRecordedPositions();
   GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
@@ -1115,25 +1295,124 @@
 }
 
 
+void Assembler::bovc(Register rs, Register rt, int16_t offset) {
+  DCHECK(IsMipsArchVariant(kMips32r6));
+  DCHECK(!(rs.is(zero_reg)));
+  DCHECK(rs.code() >= rt.code());
+  GenInstrImmediate(ADDI, rs, rt, offset);
+}
+
+
+void Assembler::bnvc(Register rs, Register rt, int16_t offset) {
+  DCHECK(IsMipsArchVariant(kMips32r6));
+  DCHECK(!(rs.is(zero_reg)));
+  DCHECK(rs.code() >= rt.code());
+  GenInstrImmediate(DADDI, rs, rt, offset);
+}
+
+
+void Assembler::blezalc(Register rt, int16_t offset) {
+  DCHECK(IsMipsArchVariant(kMips32r6));
+  DCHECK(!(rt.is(zero_reg)));
+  GenInstrImmediate(BLEZ, zero_reg, rt, offset);
+}
+
+
+void Assembler::bgezalc(Register rt, int16_t offset) {
+  DCHECK(IsMipsArchVariant(kMips32r6));
+  DCHECK(!(rt.is(zero_reg)));
+  GenInstrImmediate(BLEZ, rt, rt, offset);
+}
+
+
+void Assembler::bgezall(Register rs, int16_t offset) {
+  DCHECK(IsMipsArchVariant(kMips32r6));
+  DCHECK(!(rs.is(zero_reg)));
+  GenInstrImmediate(REGIMM, rs, BGEZALL, offset);
+}
+
+
+void Assembler::bltzalc(Register rt, int16_t offset) {
+  DCHECK(IsMipsArchVariant(kMips32r6));
+  DCHECK(!(rt.is(zero_reg)));
+  GenInstrImmediate(BGTZ, rt, rt, offset);
+}
+
+
+void Assembler::bgtzalc(Register rt, int16_t offset) {
+  DCHECK(IsMipsArchVariant(kMips32r6));
+  DCHECK(!(rt.is(zero_reg)));
+  GenInstrImmediate(BGTZ, zero_reg, rt, offset);
+}
+
+
+void Assembler::beqzalc(Register rt, int16_t offset) {
+  DCHECK(IsMipsArchVariant(kMips32r6));
+  DCHECK(!(rt.is(zero_reg)));
+  GenInstrImmediate(ADDI, zero_reg, rt, offset);
+}
+
+
+void Assembler::bnezalc(Register rt, int16_t offset) {
+  DCHECK(IsMipsArchVariant(kMips32r6));
+  DCHECK(!(rt.is(zero_reg)));
+  GenInstrImmediate(DADDI, zero_reg, rt, offset);
+}
+
+
+void Assembler::beqc(Register rs, Register rt, int16_t offset) {
+  DCHECK(IsMipsArchVariant(kMips32r6));
+  DCHECK(rs.code() < rt.code());
+  GenInstrImmediate(ADDI, rs, rt, offset);
+}
+
+
+void Assembler::beqzc(Register rs, int32_t offset) {
+  DCHECK(IsMipsArchVariant(kMips32r6));
+  DCHECK(!(rs.is(zero_reg)));
+  Instr instr = BEQZC | (rs.code() << kRsShift) | offset;
+  emit(instr);
+}
+
+
+void Assembler::bnec(Register rs, Register rt, int16_t offset) {
+  DCHECK(IsMipsArchVariant(kMips32r6));
+  DCHECK(rs.code() < rt.code());
+  GenInstrImmediate(DADDI, rs, rt, offset);
+}
+
+
+void Assembler::bnezc(Register rs, int32_t offset) {
+  DCHECK(IsMipsArchVariant(kMips32r6));
+  DCHECK(!(rs.is(zero_reg)));
+  Instr instr = BNEZC | (rs.code() << kRsShift) | offset;
+  emit(instr);
+}
+
+
 void Assembler::j(int32_t target) {
 #if DEBUG
   // Get pc of delay slot.
   uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
   bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
                   (kImm26Bits + kImmFieldShift)) == 0;
-  ASSERT(in_range && ((target & 3) == 0));
+  DCHECK(in_range && ((target & 3) == 0));
 #endif
   GenInstrJump(J, target >> 2);
 }
 
 
 void Assembler::jr(Register rs) {
-  BlockTrampolinePoolScope block_trampoline_pool(this);
-  if (rs.is(ra)) {
-    positions_recorder()->WriteRecordedPositions();
+  if (!IsMipsArchVariant(kMips32r6)) {
+    BlockTrampolinePoolScope block_trampoline_pool(this);
+    if (rs.is(ra)) {
+      positions_recorder()->WriteRecordedPositions();
+    }
+    GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
+    BlockTrampolinePoolFor(1);  // For associated delay slot.
+  } else {
+    jalr(rs, zero_reg);
   }
-  GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
-  BlockTrampolinePoolFor(1);  // For associated delay slot.
 }
 
 
@@ -1143,7 +1422,7 @@
   uint32_t ipc = reinterpret_cast<uint32_t>(pc_ + 1 * kInstrSize);
   bool in_range = (ipc ^ static_cast<uint32_t>(target) >>
                   (kImm26Bits + kImmFieldShift)) == 0;
-  ASSERT(in_range && ((target & 3) == 0));
+  DCHECK(in_range && ((target & 3) == 0));
 #endif
   positions_recorder()->WriteRecordedPositions();
   GenInstrJump(JAL, target >> 2);
@@ -1204,7 +1483,41 @@
 
 
 void Assembler::mul(Register rd, Register rs, Register rt) {
-  GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
+  if (!IsMipsArchVariant(kMips32r6)) {
+    GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
+  } else {
+    GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH);
+  }
+}
+
+
+void Assembler::mulu(Register rd, Register rs, Register rt) {
+  DCHECK(IsMipsArchVariant(kMips32r6));
+  GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH_U);
+}
+
+
+void Assembler::muh(Register rd, Register rs, Register rt) {
+  DCHECK(IsMipsArchVariant(kMips32r6));
+  GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH);
+}
+
+
+void Assembler::muhu(Register rd, Register rs, Register rt) {
+  DCHECK(IsMipsArchVariant(kMips32r6));
+  GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH_U);
+}
+
+
+void Assembler::mod(Register rd, Register rs, Register rt) {
+  DCHECK(IsMipsArchVariant(kMips32r6));
+  GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD);
+}
+
+
+void Assembler::modu(Register rd, Register rs, Register rt) {
+  DCHECK(IsMipsArchVariant(kMips32r6));
+  GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD_U);
 }
 
 
@@ -1223,11 +1536,23 @@
 }
 
 
+void Assembler::div(Register rd, Register rs, Register rt) {
+  DCHECK(IsMipsArchVariant(kMips32r6));
+  GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD);
+}
+
+
 void Assembler::divu(Register rs, Register rt) {
   GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU);
 }
 
 
+void Assembler::divu(Register rd, Register rs, Register rt) {
+  DCHECK(IsMipsArchVariant(kMips32r6));
+  GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD_U);
+}
+
+
 // Logical.
 
 void Assembler::and_(Register rd, Register rs, Register rt) {
@@ -1236,7 +1561,7 @@
 
 
 void Assembler::andi(Register rt, Register rs, int32_t j) {
-  ASSERT(is_uint16(j));
+  DCHECK(is_uint16(j));
   GenInstrImmediate(ANDI, rs, rt, j);
 }
 
@@ -1247,7 +1572,7 @@
 
 
 void Assembler::ori(Register rt, Register rs, int32_t j) {
-  ASSERT(is_uint16(j));
+  DCHECK(is_uint16(j));
   GenInstrImmediate(ORI, rs, rt, j);
 }
 
@@ -1258,7 +1583,7 @@
 
 
 void Assembler::xori(Register rt, Register rs, int32_t j) {
-  ASSERT(is_uint16(j));
+  DCHECK(is_uint16(j));
   GenInstrImmediate(XORI, rs, rt, j);
 }
 
@@ -1277,7 +1602,7 @@
   // generated using the sll instruction. They must be generated using
   // nop(int/NopMarkerTypes) or MarkCode(int/NopMarkerTypes) pseudo
   // instructions.
-  ASSERT(coming_from_nop || !(rd.is(zero_reg) && rt.is(zero_reg)));
+  DCHECK(coming_from_nop || !(rd.is(zero_reg) && rt.is(zero_reg)));
   GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SLL);
 }
 
@@ -1309,8 +1634,8 @@
 
 void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
   // Should be called via MacroAssembler::Ror.
-  ASSERT(rd.is_valid() && rt.is_valid() && is_uint5(sa));
-  ASSERT(kArchVariant == kMips32r2);
+  DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa));
+  DCHECK(IsMipsArchVariant(kMips32r2));
   Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
       | (rd.code() << kRdShift) | (sa << kSaShift) | SRL;
   emit(instr);
@@ -1319,8 +1644,8 @@
 
 void Assembler::rotrv(Register rd, Register rt, Register rs) {
   // Should be called via MacroAssembler::Ror.
-  ASSERT(rd.is_valid() && rt.is_valid() && rs.is_valid() );
-  ASSERT(kArchVariant == kMips32r2);
+  DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid() );
+  DCHECK(IsMipsArchVariant(kMips32r2));
   Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
      | (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
   emit(instr);
@@ -1331,7 +1656,7 @@
 
 // Helper for base-reg + offset, when offset is larger than int16.
 void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) {
-  ASSERT(!src.rm().is(at));
+  DCHECK(!src.rm().is(at));
   lui(at, (src.offset_ >> kLuiShift) & kImm16Mask);
   ori(at, at, src.offset_ & kImm16Mask);  // Load 32-bit offset.
   addu(at, at, src.rm());  // Add base register.
@@ -1439,20 +1764,28 @@
 
 
 void Assembler::lui(Register rd, int32_t j) {
-  ASSERT(is_uint16(j));
+  DCHECK(is_uint16(j));
   GenInstrImmediate(LUI, zero_reg, rd, j);
 }
 
 
+void Assembler::aui(Register rs, Register rt, int32_t j) {
+  // This instruction uses same opcode as 'lui'. The difference in encoding is
+  // 'lui' has zero reg. for rs field.
+  DCHECK(is_uint16(j));
+  GenInstrImmediate(LUI, rs, rt, j);
+}
+
+
 // -------------Misc-instructions--------------
 
 // Break / Trap instructions.
 void Assembler::break_(uint32_t code, bool break_as_stop) {
-  ASSERT((code & ~0xfffff) == 0);
+  DCHECK((code & ~0xfffff) == 0);
   // We need to invalidate breaks that could be stops as well because the
   // simulator expects a char pointer after the stop instruction.
   // See constants-mips.h for explanation.
-  ASSERT((break_as_stop &&
+  DCHECK((break_as_stop &&
           code <= kMaxStopCode &&
           code > kMaxWatchpointCode) ||
          (!break_as_stop &&
@@ -1464,8 +1797,8 @@
 
 
 void Assembler::stop(const char* msg, uint32_t code) {
-  ASSERT(code > kMaxWatchpointCode);
-  ASSERT(code <= kMaxStopCode);
+  DCHECK(code > kMaxWatchpointCode);
+  DCHECK(code <= kMaxStopCode);
 #if V8_HOST_ARCH_MIPS
   break_(0x54321);
 #else  // V8_HOST_ARCH_MIPS
@@ -1479,7 +1812,7 @@
 
 
 void Assembler::tge(Register rs, Register rt, uint16_t code) {
-  ASSERT(is_uint10(code));
+  DCHECK(is_uint10(code));
   Instr instr = SPECIAL | TGE | rs.code() << kRsShift
       | rt.code() << kRtShift | code << 6;
   emit(instr);
@@ -1487,7 +1820,7 @@
 
 
 void Assembler::tgeu(Register rs, Register rt, uint16_t code) {
-  ASSERT(is_uint10(code));
+  DCHECK(is_uint10(code));
   Instr instr = SPECIAL | TGEU | rs.code() << kRsShift
       | rt.code() << kRtShift | code << 6;
   emit(instr);
@@ -1495,7 +1828,7 @@
 
 
 void Assembler::tlt(Register rs, Register rt, uint16_t code) {
-  ASSERT(is_uint10(code));
+  DCHECK(is_uint10(code));
   Instr instr =
       SPECIAL | TLT | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
   emit(instr);
@@ -1503,7 +1836,7 @@
 
 
 void Assembler::tltu(Register rs, Register rt, uint16_t code) {
-  ASSERT(is_uint10(code));
+  DCHECK(is_uint10(code));
   Instr instr =
       SPECIAL | TLTU | rs.code() << kRsShift
       | rt.code() << kRtShift | code << 6;
@@ -1512,7 +1845,7 @@
 
 
 void Assembler::teq(Register rs, Register rt, uint16_t code) {
-  ASSERT(is_uint10(code));
+  DCHECK(is_uint10(code));
   Instr instr =
       SPECIAL | TEQ | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
   emit(instr);
@@ -1520,7 +1853,7 @@
 
 
 void Assembler::tne(Register rs, Register rt, uint16_t code) {
-  ASSERT(is_uint10(code));
+  DCHECK(is_uint10(code));
   Instr instr =
       SPECIAL | TNE | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
   emit(instr);
@@ -1587,15 +1920,19 @@
 
 // Bit twiddling.
 void Assembler::clz(Register rd, Register rs) {
-  // Clz instr requires same GPR number in 'rd' and 'rt' fields.
-  GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ);
+  if (!IsMipsArchVariant(kMips32r6)) {
+    // Clz instr requires same GPR number in 'rd' and 'rt' fields.
+    GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ);
+  } else {
+    GenInstrRegister(SPECIAL, rs, zero_reg, rd, 1, CLZ_R6);
+  }
 }
 
 
 void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
   // Should be called via MacroAssembler::Ins.
   // Ins instr has 'rt' field as dest, and two uint5: msb, lsb.
-  ASSERT(kArchVariant == kMips32r2);
+  DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
   GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS);
 }
 
@@ -1603,14 +1940,14 @@
 void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
   // Should be called via MacroAssembler::Ext.
   // Ext instr has 'rt' field as dest, and two uint5: msb, lsb.
-  ASSERT(kArchVariant == kMips32r2);
+  DCHECK(IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6));
   GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT);
 }
 
 
 void Assembler::pref(int32_t hint, const MemOperand& rs) {
-  ASSERT(kArchVariant != kLoongson);
-  ASSERT(is_uint5(hint) && is_uint16(rs.offset_));
+  DCHECK(!IsMipsArchVariant(kLoongson));
+  DCHECK(is_uint5(hint) && is_uint16(rs.offset_));
   Instr instr = PREF | (rs.rm().code() << kRsShift) | (hint << kRtShift)
       | (rs.offset_);
   emit(instr);
@@ -1628,12 +1965,20 @@
 void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
   // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
   // load to two 32-bit loads.
-  GenInstrImmediate(LWC1, src.rm(), fd, src.offset_ +
-      Register::kMantissaOffset);
-  FPURegister nextfpreg;
-  nextfpreg.setcode(fd.code() + 1);
-  GenInstrImmediate(LWC1, src.rm(), nextfpreg, src.offset_ +
-      Register::kExponentOffset);
+  if (IsFp64Mode()) {
+    GenInstrImmediate(LWC1, src.rm(), fd, src.offset_ +
+        Register::kMantissaOffset);
+    GenInstrImmediate(LW, src.rm(), at, src.offset_ +
+        Register::kExponentOffset);
+    mthc1(at, fd);
+  } else {
+    GenInstrImmediate(LWC1, src.rm(), fd, src.offset_ +
+        Register::kMantissaOffset);
+    FPURegister nextfpreg;
+    nextfpreg.setcode(fd.code() + 1);
+    GenInstrImmediate(LWC1, src.rm(), nextfpreg, src.offset_ +
+        Register::kExponentOffset);
+  }
 }
 
 
@@ -1645,12 +1990,20 @@
 void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
   // Workaround for non-8-byte alignment of HeapNumber, convert 64-bit
   // store to two 32-bit stores.
-  GenInstrImmediate(SWC1, src.rm(), fd, src.offset_ +
-      Register::kMantissaOffset);
-  FPURegister nextfpreg;
-  nextfpreg.setcode(fd.code() + 1);
-  GenInstrImmediate(SWC1, src.rm(), nextfpreg, src.offset_ +
-      Register::kExponentOffset);
+  if (IsFp64Mode()) {
+    GenInstrImmediate(SWC1, src.rm(), fd, src.offset_ +
+        Register::kMantissaOffset);
+    mfhc1(at, fd);
+    GenInstrImmediate(SW, src.rm(), at, src.offset_ +
+        Register::kExponentOffset);
+  } else {
+    GenInstrImmediate(SWC1, src.rm(), fd, src.offset_ +
+        Register::kMantissaOffset);
+    FPURegister nextfpreg;
+    nextfpreg.setcode(fd.code() + 1);
+    GenInstrImmediate(SWC1, src.rm(), nextfpreg, src.offset_ +
+        Register::kExponentOffset);
+  }
 }
 
 
@@ -1659,11 +2012,21 @@
 }
 
 
+void Assembler::mthc1(Register rt, FPURegister fs) {
+  GenInstrRegister(COP1, MTHC1, rt, fs, f0);
+}
+
+
 void Assembler::mfc1(Register rt, FPURegister fs) {
   GenInstrRegister(COP1, MFC1, rt, fs, f0);
 }
 
 
+void Assembler::mfhc1(Register rt, FPURegister fs) {
+  GenInstrRegister(COP1, MFHC1, rt, fs, f0);
+}
+
+
 void Assembler::ctc1(Register rt, FPUControlRegister fs) {
   GenInstrRegister(COP1, CTC1, rt, fs);
 }
@@ -1784,25 +2147,25 @@
 
 
 void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
-  ASSERT(kArchVariant == kMips32r2);
+  DCHECK(IsMipsArchVariant(kMips32r2));
   GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
 }
 
 
 void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
-  ASSERT(kArchVariant == kMips32r2);
+  DCHECK(IsMipsArchVariant(kMips32r2));
   GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
 }
 
 
 void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) {
-  ASSERT(kArchVariant == kMips32r2);
+  DCHECK(IsMipsArchVariant(kMips32r2));
   GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S);
 }
 
 
 void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) {
-  ASSERT(kArchVariant == kMips32r2);
+  DCHECK(IsMipsArchVariant(kMips32r2));
   GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D);
 }
 
@@ -1837,13 +2200,45 @@
 }
 
 
+void Assembler::min(SecondaryField fmt, FPURegister fd, FPURegister ft,
+    FPURegister fs) {
+  DCHECK(IsMipsArchVariant(kMips32r6));
+  DCHECK((fmt == D) || (fmt == S));
+  GenInstrRegister(COP1, fmt, ft, fs, fd, MIN);
+}
+
+
+void Assembler::mina(SecondaryField fmt, FPURegister fd, FPURegister ft,
+    FPURegister fs) {
+  DCHECK(IsMipsArchVariant(kMips32r6));
+  DCHECK((fmt == D) || (fmt == S));
+  GenInstrRegister(COP1, fmt, ft, fs, fd, MINA);
+}
+
+
+void Assembler::max(SecondaryField fmt, FPURegister fd, FPURegister ft,
+    FPURegister fs) {
+  DCHECK(IsMipsArchVariant(kMips32r6));
+  DCHECK((fmt == D) || (fmt == S));
+  GenInstrRegister(COP1, fmt, ft, fs, fd, MAX);
+}
+
+
+void Assembler::maxa(SecondaryField fmt, FPURegister fd, FPURegister ft,
+    FPURegister fs) {
+  DCHECK(IsMipsArchVariant(kMips32r6));
+  DCHECK((fmt == D) || (fmt == S));
+  GenInstrRegister(COP1, fmt, ft, fs, fd, MAXA);
+}
+
+
 void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
   GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W);
 }
 
 
 void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
-  ASSERT(kArchVariant == kMips32r2);
+  DCHECK(IsMipsArchVariant(kMips32r2));
   GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
 }
 
@@ -1859,7 +2254,7 @@
 
 
 void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
-  ASSERT(kArchVariant == kMips32r2);
+  DCHECK(IsMipsArchVariant(kMips32r2));
   GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
 }
 
@@ -1869,11 +2264,36 @@
 }
 
 
-// Conditions.
+// Conditions for >= MIPSr6.
+void Assembler::cmp(FPUCondition cond, SecondaryField fmt,
+    FPURegister fd, FPURegister fs, FPURegister ft) {
+  DCHECK(IsMipsArchVariant(kMips32r6));
+  DCHECK((fmt & ~(31 << kRsShift)) == 0);
+  Instr instr = COP1 | fmt | ft.code() << kFtShift |
+      fs.code() << kFsShift | fd.code() << kFdShift | (0 << 5) | cond;
+  emit(instr);
+}
+
+
+void Assembler::bc1eqz(int16_t offset, FPURegister ft) {
+  DCHECK(IsMipsArchVariant(kMips32r6));
+  Instr instr = COP1 | BC1EQZ | ft.code() << kFtShift | (offset & kImm16Mask);
+  emit(instr);
+}
+
+
+void Assembler::bc1nez(int16_t offset, FPURegister ft) {
+  DCHECK(IsMipsArchVariant(kMips32r6));
+  Instr instr = COP1 | BC1NEZ | ft.code() << kFtShift | (offset & kImm16Mask);
+  emit(instr);
+}
+
+
+// Conditions for < MIPSr6.
 void Assembler::c(FPUCondition cond, SecondaryField fmt,
     FPURegister fs, FPURegister ft, uint16_t cc) {
-  ASSERT(is_uint3(cc));
-  ASSERT((fmt & ~(31 << kRsShift)) == 0);
+  DCHECK(is_uint3(cc));
+  DCHECK((fmt & ~(31 << kRsShift)) == 0);
   Instr instr = COP1 | fmt | ft.code() << 16 | fs.code() << kFsShift
       | cc << 8 | 3 << 4 | cond;
   emit(instr);
@@ -1882,7 +2302,7 @@
 
 void Assembler::fcmp(FPURegister src1, const double src2,
       FPUCondition cond) {
-  ASSERT(src2 == 0.0);
+  DCHECK(src2 == 0.0);
   mtc1(zero_reg, f14);
   cvt_d_w(f14, f14);
   c(cond, D, src1, f14, 0);
@@ -1890,14 +2310,14 @@
 
 
 void Assembler::bc1f(int16_t offset, uint16_t cc) {
-  ASSERT(is_uint3(cc));
+  DCHECK(is_uint3(cc));
   Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask);
   emit(instr);
 }
 
 
 void Assembler::bc1t(int16_t offset, uint16_t cc) {
-  ASSERT(is_uint3(cc));
+  DCHECK(is_uint3(cc));
   Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask);
   emit(instr);
 }
@@ -1928,18 +2348,18 @@
 
 int Assembler::RelocateInternalReference(byte* pc, intptr_t pc_delta) {
   Instr instr = instr_at(pc);
-  ASSERT(IsJ(instr) || IsLui(instr));
+  DCHECK(IsJ(instr) || IsLui(instr));
   if (IsLui(instr)) {
     Instr instr_lui = instr_at(pc + 0 * Assembler::kInstrSize);
     Instr instr_ori = instr_at(pc + 1 * Assembler::kInstrSize);
-    ASSERT(IsOri(instr_ori));
+    DCHECK(IsOri(instr_ori));
     int32_t imm = (instr_lui & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
     imm |= (instr_ori & static_cast<int32_t>(kImm16Mask));
     if (imm == kEndOfJumpChain) {
       return 0;  // Number of instructions patched.
     }
     imm += pc_delta;
-    ASSERT((imm & 3) == 0);
+    DCHECK((imm & 3) == 0);
 
     instr_lui &= ~kImm16Mask;
     instr_ori &= ~kImm16Mask;
@@ -1956,11 +2376,11 @@
     }
     imm28 += pc_delta;
     imm28 &= kImm28Mask;
-    ASSERT((imm28 & 3) == 0);
+    DCHECK((imm28 & 3) == 0);
 
     instr &= ~kImm26Mask;
     uint32_t imm26 = imm28 >> 2;
-    ASSERT(is_uint26(imm26));
+    DCHECK(is_uint26(imm26));
 
     instr_at_put(pc, instr | (imm26 & kImm26Mask));
     return 1;  // Number of instructions patched.
@@ -1973,9 +2393,7 @@
 
   // Compute new buffer size.
   CodeDesc desc;  // The new buffer.
-  if (buffer_size_ < 4*KB) {
-    desc.buffer_size = 4*KB;
-  } else if (buffer_size_ < 1*MB) {
+  if (buffer_size_ < 1 * MB) {
     desc.buffer_size = 2*buffer_size_;
   } else {
     desc.buffer_size = buffer_size_ + 1*MB;
@@ -2012,7 +2430,7 @@
     }
   }
 
-  ASSERT(!overflow());
+  DCHECK(!overflow());
 }
 
 
@@ -2043,7 +2461,7 @@
   RelocInfo rinfo(pc_, rmode, data, NULL);
   if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {
     // Adjust code for new modes.
-    ASSERT(RelocInfo::IsDebugBreakSlot(rmode)
+    DCHECK(RelocInfo::IsDebugBreakSlot(rmode)
            || RelocInfo::IsJSReturn(rmode)
            || RelocInfo::IsComment(rmode)
            || RelocInfo::IsPosition(rmode));
@@ -2055,7 +2473,7 @@
         !serializer_enabled() && !emit_debug_code()) {
       return;
     }
-    ASSERT(buffer_space() >= kMaxRelocSize);  // Too late to grow buffer here.
+    DCHECK(buffer_space() >= kMaxRelocSize);  // Too late to grow buffer here.
     if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
       RelocInfo reloc_info_with_ast_id(pc_,
                                        rmode,
@@ -2093,8 +2511,8 @@
     return;
   }
 
-  ASSERT(!trampoline_emitted_);
-  ASSERT(unbound_labels_count_ >= 0);
+  DCHECK(!trampoline_emitted_);
+  DCHECK(unbound_labels_count_ >= 0);
   if (unbound_labels_count_ > 0) {
     // First we emit jump (2 instructions), then we emit trampoline pool.
     { BlockTrampolinePoolScope block_trampoline_pool(this);
@@ -2156,7 +2574,7 @@
 // snapshot generated on ia32, the resulting MIPS sNaN must be quieted.
 // OS::nan_value() returns a qNaN.
 void Assembler::QuietNaN(HeapObject* object) {
-  HeapNumber::cast(object)->set_value(OS::nan_value());
+  HeapNumber::cast(object)->set_value(base::OS::nan_value());
 }
 
 
@@ -2185,7 +2603,7 @@
   // lui rt, upper-16.
   // ori rt rt, lower-16.
   *p = LUI | rt_code | ((itarget & kHiMask) >> kLuiShift);
-  *(p+1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask);
+  *(p + 1) = ORI | rt_code | (rt_code << 5) | (itarget & kImm16Mask);
 
   // The following code is an optimization for the common case of Call()
   // or Jump() which is load to register, and jump through register:
@@ -2228,20 +2646,20 @@
   if (IsJalr(instr3)) {
     // Try to convert JALR to JAL.
     if (in_range && GetRt(instr2) == GetRs(instr3)) {
-      *(p+2) = JAL | target_field;
+      *(p + 2) = JAL | target_field;
       patched_jump = true;
     }
   } else if (IsJr(instr3)) {
     // Try to convert JR to J, skip returns (jr ra).
     bool is_ret = static_cast<int>(GetRs(instr3)) == ra.code();
     if (in_range && !is_ret && GetRt(instr2) == GetRs(instr3)) {
-      *(p+2) = J | target_field;
+      *(p + 2) = J | target_field;
       patched_jump = true;
     }
   } else if (IsJal(instr3)) {
     if (in_range) {
       // We are patching an already converted JAL.
-      *(p+2) = JAL | target_field;
+      *(p + 2) = JAL | target_field;
     } else {
       // Patch JAL, but out of range, revert to JALR.
       // JALR rs reg is the rt reg specified in the ORI instruction.
@@ -2253,18 +2671,22 @@
   } else if (IsJ(instr3)) {
     if (in_range) {
       // We are patching an already converted J (jump).
-      *(p+2) = J | target_field;
+      *(p + 2) = J | target_field;
     } else {
       // Trying patch J, but out of range, just go back to JR.
       // JR 'rs' reg is the 'rt' reg specified in the ORI instruction (instr2).
       uint32_t rs_field = GetRt(instr2) << kRsShift;
-      *(p+2) = SPECIAL | rs_field | JR;
+      if (IsMipsArchVariant(kMips32r6)) {
+        *(p + 2) = SPECIAL | rs_field | (zero_reg.code() << kRdShift) | JALR;
+      } else {
+        *(p + 2) = SPECIAL | rs_field | JR;
+      }
     }
     patched_jump = true;
   }
 
   if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
-    CPU::FlushICache(pc, (patched_jump ? 3 : 2) * sizeof(int32_t));
+    CpuFeatures::FlushICache(pc, (patched_jump ? 3 : 2) * sizeof(int32_t));
   }
 }
 
@@ -2281,38 +2703,42 @@
   bool patched = false;
 
   if (IsJal(instr3)) {
-    ASSERT(GetOpcodeField(instr1) == LUI);
-    ASSERT(GetOpcodeField(instr2) == ORI);
+    DCHECK(GetOpcodeField(instr1) == LUI);
+    DCHECK(GetOpcodeField(instr2) == ORI);
 
     uint32_t rs_field = GetRt(instr2) << kRsShift;
     uint32_t rd_field = ra.code() << kRdShift;  // Return-address (ra) reg.
-    *(p+2) = SPECIAL | rs_field | rd_field | JALR;
+    *(p + 2) = SPECIAL | rs_field | rd_field | JALR;
     patched = true;
   } else if (IsJ(instr3)) {
-    ASSERT(GetOpcodeField(instr1) == LUI);
-    ASSERT(GetOpcodeField(instr2) == ORI);
+    DCHECK(GetOpcodeField(instr1) == LUI);
+    DCHECK(GetOpcodeField(instr2) == ORI);
 
     uint32_t rs_field = GetRt(instr2) << kRsShift;
-    *(p+2) = SPECIAL | rs_field | JR;
+    if (IsMipsArchVariant(kMips32r6)) {
+      *(p + 2) = SPECIAL | rs_field | (zero_reg.code() << kRdShift) | JALR;
+    } else {
+      *(p + 2) = SPECIAL | rs_field | JR;
+    }
     patched = true;
   }
 
   if (patched) {
-      CPU::FlushICache(pc+2, sizeof(Address));
+    CpuFeatures::FlushICache(pc + 2, sizeof(Address));
   }
 }
 
 
 Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
   // No out-of-line constant pool support.
-  ASSERT(!FLAG_enable_ool_constant_pool);
+  DCHECK(!FLAG_enable_ool_constant_pool);
   return isolate->factory()->empty_constant_pool_array();
 }
 
 
 void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
   // No out-of-line constant pool support.
-  ASSERT(!FLAG_enable_ool_constant_pool);
+  DCHECK(!FLAG_enable_ool_constant_pool);
   return;
 }
 
diff --git a/src/mips/assembler-mips.h b/src/mips/assembler-mips.h
index 2ba3ef7..5cdf16a 100644
--- a/src/mips/assembler-mips.h
+++ b/src/mips/assembler-mips.h
@@ -90,7 +90,7 @@
   inline static int NumAllocatableRegisters();
 
   static int ToAllocationIndex(Register reg) {
-    ASSERT((reg.code() - 2) < (kMaxNumAllocatableRegisters - 1) ||
+    DCHECK((reg.code() - 2) < (kMaxNumAllocatableRegisters - 1) ||
            reg.is(from_code(kCpRegister)));
     return reg.is(from_code(kCpRegister)) ?
            kMaxNumAllocatableRegisters - 1 :  // Return last index for 'cp'.
@@ -98,14 +98,14 @@
   }
 
   static Register FromAllocationIndex(int index) {
-    ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
+    DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
     return index == kMaxNumAllocatableRegisters - 1 ?
            from_code(kCpRegister) :  // Last index is always the 'cp' register.
            from_code(index + 2);  // zero_reg and 'at' are skipped.
   }
 
   static const char* AllocationIndexToString(int index) {
-    ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
+    DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
     const char* const names[] = {
       "v0",
       "v1",
@@ -133,11 +133,11 @@
   bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
   bool is(Register reg) const { return code_ == reg.code_; }
   int code() const {
-    ASSERT(is_valid());
+    DCHECK(is_valid());
     return code_;
   }
   int bit() const {
-    ASSERT(is_valid());
+    DCHECK(is_valid());
     return 1 << code_;
   }
 
@@ -226,7 +226,7 @@
   static const char* AllocationIndexToString(int index);
 
   static FPURegister FromAllocationIndex(int index) {
-    ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
+    DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
     return from_code(index * 2);
   }
 
@@ -239,32 +239,32 @@
   bool is(FPURegister creg) const { return code_ == creg.code_; }
   FPURegister low() const {
     // Find low reg of a Double-reg pair, which is the reg itself.
-    ASSERT(code_ % 2 == 0);  // Specified Double reg must be even.
+    DCHECK(code_ % 2 == 0);  // Specified Double reg must be even.
     FPURegister reg;
     reg.code_ = code_;
-    ASSERT(reg.is_valid());
+    DCHECK(reg.is_valid());
     return reg;
   }
   FPURegister high() const {
     // Find high reg of a Doubel-reg pair, which is reg + 1.
-    ASSERT(code_ % 2 == 0);  // Specified Double reg must be even.
+    DCHECK(code_ % 2 == 0);  // Specified Double reg must be even.
     FPURegister reg;
     reg.code_ = code_ + 1;
-    ASSERT(reg.is_valid());
+    DCHECK(reg.is_valid());
     return reg;
   }
 
   int code() const {
-    ASSERT(is_valid());
+    DCHECK(is_valid());
     return code_;
   }
   int bit() const {
-    ASSERT(is_valid());
+    DCHECK(is_valid());
     return 1 << code_;
   }
   void setcode(int f) {
     code_ = f;
-    ASSERT(is_valid());
+    DCHECK(is_valid());
   }
   // Unfortunately we can't make this private in a struct.
   int code_;
@@ -328,6 +328,8 @@
 #define kLithiumScratchReg2 s4
 #define kLithiumScratchDouble f30
 #define kDoubleRegZero f28
+// Used on mips32r6 for compare operations.
+#define kDoubleCompareReg f31
 
 // FPU (coprocessor 1) control registers.
 // Currently only FCSR (#31) is implemented.
@@ -335,16 +337,16 @@
   bool is_valid() const { return code_ == kFCSRRegister; }
   bool is(FPUControlRegister creg) const { return code_ == creg.code_; }
   int code() const {
-    ASSERT(is_valid());
+    DCHECK(is_valid());
     return code_;
   }
   int bit() const {
-    ASSERT(is_valid());
+    DCHECK(is_valid());
     return 1 << code_;
   }
   void setcode(int f) {
     code_ = f;
-    ASSERT(is_valid());
+    DCHECK(is_valid());
   }
   // Unfortunately we can't make this private in a struct.
   int code_;
@@ -377,7 +379,7 @@
   INLINE(bool is_reg() const);
 
   inline int32_t immediate() const {
-    ASSERT(!is_reg());
+    DCHECK(!is_reg());
     return imm32_;
   }
 
@@ -465,9 +467,18 @@
   // position. Links the label to the current position if it is still unbound.
   // Manages the jump elimination optimization if the second parameter is true.
   int32_t branch_offset(Label* L, bool jump_elimination_allowed);
+  int32_t branch_offset_compact(Label* L, bool jump_elimination_allowed);
+  int32_t branch_offset21(Label* L, bool jump_elimination_allowed);
+  int32_t branch_offset21_compact(Label* L, bool jump_elimination_allowed);
   int32_t shifted_branch_offset(Label* L, bool jump_elimination_allowed) {
     int32_t o = branch_offset(L, jump_elimination_allowed);
-    ASSERT((o & 3) == 0);   // Assert the offset is aligned.
+    DCHECK((o & 3) == 0);   // Assert the offset is aligned.
+    return o >> 2;
+  }
+  int32_t shifted_branch_offset_compact(Label* L,
+      bool jump_elimination_allowed) {
+    int32_t o = branch_offset_compact(L, jump_elimination_allowed);
+    DCHECK((o & 3) == 0);   // Assert the offset is aligned.
     return o >> 2;
   }
   uint32_t jump_address(Label* L);
@@ -511,6 +522,9 @@
   // of that call in the instruction stream.
   inline static Address target_address_from_return_address(Address pc);
 
+  // Return the code target address of the patch debug break slot
+  inline static Address break_address_from_return_address(Address pc);
+
   static void JumpLabelToJumpRegister(Address pc);
 
   static void QuietNaN(HeapObject* nan);
@@ -606,7 +620,7 @@
   // sll(zero_reg, zero_reg, 0). We use rt_reg == at for non-zero
   // marking, to avoid conflict with ssnop and ehb instructions.
   void nop(unsigned int type = 0) {
-    ASSERT(type < 32);
+    DCHECK(type < 32);
     Register nop_rt_reg = (type == 0) ? zero_reg : at;
     sll(zero_reg, nop_rt_reg, type, true);
   }
@@ -624,15 +638,99 @@
     beq(rs, rt, branch_offset(L, false) >> 2);
   }
   void bgez(Register rs, int16_t offset);
+  void bgezc(Register rt, int16_t offset);
+  void bgezc(Register rt, Label* L) {
+    bgezc(rt, branch_offset_compact(L, false)>>2);
+  }
+  void bgeuc(Register rs, Register rt, int16_t offset);
+  void bgeuc(Register rs, Register rt, Label* L) {
+    bgeuc(rs, rt, branch_offset_compact(L, false)>>2);
+  }
+  void bgec(Register rs, Register rt, int16_t offset);
+  void bgec(Register rs, Register rt, Label* L) {
+    bgec(rs, rt, branch_offset_compact(L, false)>>2);
+  }
   void bgezal(Register rs, int16_t offset);
+  void bgezalc(Register rt, int16_t offset);
+  void bgezalc(Register rt, Label* L) {
+    bgezalc(rt, branch_offset_compact(L, false)>>2);
+  }
+  void bgezall(Register rs, int16_t offset);
+  void bgezall(Register rs, Label* L) {
+    bgezall(rs, branch_offset(L, false)>>2);
+  }
   void bgtz(Register rs, int16_t offset);
+  void bgtzc(Register rt, int16_t offset);
+  void bgtzc(Register rt, Label* L) {
+    bgtzc(rt, branch_offset_compact(L, false)>>2);
+  }
   void blez(Register rs, int16_t offset);
+  void blezc(Register rt, int16_t offset);
+  void blezc(Register rt, Label* L) {
+    blezc(rt, branch_offset_compact(L, false)>>2);
+  }
   void bltz(Register rs, int16_t offset);
+  void bltzc(Register rt, int16_t offset);
+  void bltzc(Register rt, Label* L) {
+    bltzc(rt, branch_offset_compact(L, false)>>2);
+  }
+  void bltuc(Register rs, Register rt, int16_t offset);
+  void bltuc(Register rs, Register rt, Label* L) {
+    bltuc(rs, rt, branch_offset_compact(L, false)>>2);
+  }
+  void bltc(Register rs, Register rt, int16_t offset);
+  void bltc(Register rs, Register rt, Label* L) {
+    bltc(rs, rt, branch_offset_compact(L, false)>>2);
+  }
   void bltzal(Register rs, int16_t offset);
+  void blezalc(Register rt, int16_t offset);
+  void blezalc(Register rt, Label* L) {
+    blezalc(rt, branch_offset_compact(L, false)>>2);
+  }
+  void bltzalc(Register rt, int16_t offset);
+  void bltzalc(Register rt, Label* L) {
+    bltzalc(rt, branch_offset_compact(L, false)>>2);
+  }
+  void bgtzalc(Register rt, int16_t offset);
+  void bgtzalc(Register rt, Label* L) {
+    bgtzalc(rt, branch_offset_compact(L, false)>>2);
+  }
+  void beqzalc(Register rt, int16_t offset);
+  void beqzalc(Register rt, Label* L) {
+    beqzalc(rt, branch_offset_compact(L, false)>>2);
+  }
+  void beqc(Register rs, Register rt, int16_t offset);
+  void beqc(Register rs, Register rt, Label* L) {
+    beqc(rs, rt, branch_offset_compact(L, false)>>2);
+  }
+  void beqzc(Register rs, int32_t offset);
+  void beqzc(Register rs, Label* L) {
+    beqzc(rs, branch_offset21_compact(L, false)>>2);
+  }
+  void bnezalc(Register rt, int16_t offset);
+  void bnezalc(Register rt, Label* L) {
+    bnezalc(rt, branch_offset_compact(L, false)>>2);
+  }
+  void bnec(Register rs, Register rt, int16_t offset);
+  void bnec(Register rs, Register rt, Label* L) {
+    bnec(rs, rt, branch_offset_compact(L, false)>>2);
+  }
+  void bnezc(Register rt, int32_t offset);
+  void bnezc(Register rt, Label* L) {
+    bnezc(rt, branch_offset21_compact(L, false)>>2);
+  }
   void bne(Register rs, Register rt, int16_t offset);
   void bne(Register rs, Register rt, Label* L) {
     bne(rs, rt, branch_offset(L, false)>>2);
   }
+  void bovc(Register rs, Register rt, int16_t offset);
+  void bovc(Register rs, Register rt, Label* L) {
+    bovc(rs, rt, branch_offset_compact(L, false)>>2);
+  }
+  void bnvc(Register rs, Register rt, int16_t offset);
+  void bnvc(Register rs, Register rt, Label* L) {
+    bnvc(rs, rt, branch_offset_compact(L, false)>>2);
+  }
 
   // Never use the int16_t b(l)cond version with a branch offset
   // instead of using the Label* version.
@@ -655,7 +753,14 @@
   void multu(Register rs, Register rt);
   void div(Register rs, Register rt);
   void divu(Register rs, Register rt);
+  void div(Register rd, Register rs, Register rt);
+  void divu(Register rd, Register rs, Register rt);
+  void mod(Register rd, Register rs, Register rt);
+  void modu(Register rd, Register rs, Register rt);
   void mul(Register rd, Register rs, Register rt);
+  void muh(Register rd, Register rs, Register rt);
+  void mulu(Register rd, Register rs, Register rt);
+  void muhu(Register rd, Register rs, Register rt);
 
   void addiu(Register rd, Register rs, int32_t j);
 
@@ -669,6 +774,7 @@
   void ori(Register rd, Register rs, int32_t j);
   void xori(Register rd, Register rs, int32_t j);
   void lui(Register rd, int32_t j);
+  void aui(Register rs, Register rt, int32_t j);
 
   // Shifts.
   // Please note: sll(zero_reg, zero_reg, x) instructions are reserved as nop
@@ -733,6 +839,15 @@
   void movt(Register rd, Register rs, uint16_t cc = 0);
   void movf(Register rd, Register rs, uint16_t cc = 0);
 
+  void sel(SecondaryField fmt, FPURegister fd, FPURegister ft,
+      FPURegister fs, uint8_t sel);
+  void seleqz(Register rs, Register rt, Register rd);
+  void seleqz(SecondaryField fmt, FPURegister fd, FPURegister ft,
+      FPURegister fs);
+  void selnez(Register rs, Register rt, Register rd);
+  void selnez(SecondaryField fmt, FPURegister fd, FPURegister ft,
+      FPURegister fs);
+
   // Bit twiddling.
   void clz(Register rd, Register rs);
   void ins_(Register rt, Register rs, uint16_t pos, uint16_t size);
@@ -748,7 +863,10 @@
   void sdc1(FPURegister fs, const MemOperand& dst);
 
   void mtc1(Register rt, FPURegister fs);
+  void mthc1(Register rt, FPURegister fs);
+
   void mfc1(Register rt, FPURegister fs);
+  void mfhc1(Register rt, FPURegister fs);
 
   void ctc1(Register rt, FPUControlRegister fs);
   void cfc1(Register rt, FPUControlRegister fs);
@@ -787,6 +905,11 @@
   void ceil_l_s(FPURegister fd, FPURegister fs);
   void ceil_l_d(FPURegister fd, FPURegister fs);
 
+  void min(SecondaryField fmt, FPURegister fd, FPURegister ft, FPURegister fs);
+  void mina(SecondaryField fmt, FPURegister fd, FPURegister ft, FPURegister fs);
+  void max(SecondaryField fmt, FPURegister fd, FPURegister ft, FPURegister fs);
+  void maxa(SecondaryField fmt, FPURegister fd, FPURegister ft, FPURegister fs);
+
   void cvt_s_w(FPURegister fd, FPURegister fs);
   void cvt_s_l(FPURegister fd, FPURegister fs);
   void cvt_s_d(FPURegister fd, FPURegister fs);
@@ -795,7 +918,20 @@
   void cvt_d_l(FPURegister fd, FPURegister fs);
   void cvt_d_s(FPURegister fd, FPURegister fs);
 
-  // Conditions and branches.
+  // Conditions and branches for MIPSr6.
+  void cmp(FPUCondition cond, SecondaryField fmt,
+         FPURegister fd, FPURegister ft, FPURegister fs);
+
+  void bc1eqz(int16_t offset, FPURegister ft);
+  void bc1eqz(Label* L, FPURegister ft) {
+    bc1eqz(branch_offset(L, false)>>2, ft);
+  }
+  void bc1nez(int16_t offset, FPURegister ft);
+  void bc1nez(Label* L, FPURegister ft) {
+    bc1nez(branch_offset(L, false)>>2, ft);
+  }
+
+  // Conditions and branches for non MIPSr6.
   void c(FPUCondition cond, SecondaryField fmt,
          FPURegister ft, FPURegister fs, uint16_t cc = 0);
 
@@ -861,12 +997,12 @@
   // Record the AST id of the CallIC being compiled, so that it can be placed
   // in the relocation information.
   void SetRecordedAstId(TypeFeedbackId ast_id) {
-    ASSERT(recorded_ast_id_.IsNone());
+    DCHECK(recorded_ast_id_.IsNone());
     recorded_ast_id_ = ast_id;
   }
 
   TypeFeedbackId RecordedAstId() {
-    ASSERT(!recorded_ast_id_.IsNone());
+    DCHECK(!recorded_ast_id_.IsNone());
     return recorded_ast_id_;
   }
 
@@ -1021,12 +1157,12 @@
 
   // Temporarily block automatic assembly buffer growth.
   void StartBlockGrowBuffer() {
-    ASSERT(!block_buffer_growth_);
+    DCHECK(!block_buffer_growth_);
     block_buffer_growth_ = true;
   }
 
   void EndBlockGrowBuffer() {
-    ASSERT(block_buffer_growth_);
+    DCHECK(block_buffer_growth_);
     block_buffer_growth_ = false;
   }
 
@@ -1188,7 +1324,7 @@
         // We have run out of space on trampolines.
         // Make sure we fail in debug mode, so we become aware of each case
         // when this happens.
-        ASSERT(0);
+        DCHECK(0);
         // Internal exception will be caught.
       } else {
         trampoline_slot = next_slot_;
diff --git a/src/mips/builtins-mips.cc b/src/mips/builtins-mips.cc
index 800a79e..2813dd4 100644
--- a/src/mips/builtins-mips.cc
+++ b/src/mips/builtins-mips.cc
@@ -13,7 +13,7 @@
 #include "src/deoptimizer.h"
 #include "src/full-codegen.h"
 #include "src/runtime.h"
-#include "src/stub-cache.h"
+
 
 namespace v8 {
 namespace internal {
@@ -42,7 +42,7 @@
     num_extra_args = 1;
     __ push(a1);
   } else {
-    ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
+    DCHECK(extra_args == NO_EXTRA_ARGUMENTS);
   }
 
   // JumpToExternalReference expects s0 to contain the number of arguments
@@ -309,7 +309,7 @@
   __ LoadRoot(t0, Heap::kStackLimitRootIndex);
   __ Branch(&ok, hs, sp, Operand(t0));
 
-  CallRuntimePassFunction(masm, Runtime::kHiddenTryInstallOptimizedCode);
+  CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode);
   GenerateTailCallToReturnedCode(masm);
 
   __ bind(&ok);
@@ -329,7 +329,7 @@
   // -----------------------------------
 
   // Should never create mementos for api functions.
-  ASSERT(!is_api_function || !create_memento);
+  DCHECK(!is_api_function || !create_memento);
 
   Isolate* isolate = masm->isolate();
 
@@ -393,11 +393,11 @@
         __ sw(t0, bit_field3);  // In delay slot.
 
         __ Push(a1, a2, a1);  // a1 = Constructor.
-        __ CallRuntime(Runtime::kHiddenFinalizeInstanceSize, 1);
+        __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
 
         __ Pop(a1, a2);
         // Slack tracking counter is kNoSlackTracking after runtime call.
-        ASSERT(JSFunction::kNoSlackTracking == 0);
+        DCHECK(JSFunction::kNoSlackTracking == 0);
         __ mov(t2, zero_reg);
 
         __ bind(&allocate);
@@ -425,9 +425,9 @@
       __ sw(t6, MemOperand(t5, JSObject::kPropertiesOffset));
       __ sw(t6, MemOperand(t5, JSObject::kElementsOffset));
       __ Addu(t5, t5, Operand(3*kPointerSize));
-      ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
-      ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
-      ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset);
+      DCHECK_EQ(0 * kPointerSize, JSObject::kMapOffset);
+      DCHECK_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
+      DCHECK_EQ(2 * kPointerSize, JSObject::kElementsOffset);
 
       // Fill all the in-object properties with appropriate filler.
       // a1: constructor function
@@ -436,7 +436,7 @@
       // t4: JSObject (not tagged)
       // t5: First in-object property of JSObject (not tagged)
       // t2: slack tracking counter (non-API function case)
-      ASSERT_EQ(3 * kPointerSize, JSObject::kHeaderSize);
+      DCHECK_EQ(3 * kPointerSize, JSObject::kHeaderSize);
 
       // Use t7 to hold undefined, which is used in several places below.
       __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
@@ -476,12 +476,12 @@
         // Fill in memento fields.
         // t5: points to the allocated but uninitialized memento.
         __ LoadRoot(t7, Heap::kAllocationMementoMapRootIndex);
-        ASSERT_EQ(0 * kPointerSize, AllocationMemento::kMapOffset);
+        DCHECK_EQ(0 * kPointerSize, AllocationMemento::kMapOffset);
         __ sw(t7, MemOperand(t5));
         __ Addu(t5, t5, kPointerSize);
         // Load the AllocationSite.
         __ lw(t7, MemOperand(sp, 2 * kPointerSize));
-        ASSERT_EQ(1 * kPointerSize, AllocationMemento::kAllocationSiteOffset);
+        DCHECK_EQ(1 * kPointerSize, AllocationMemento::kAllocationSiteOffset);
         __ sw(t7, MemOperand(t5));
         __ Addu(t5, t5, kPointerSize);
       } else {
@@ -541,8 +541,8 @@
       __ sw(a0, MemOperand(a2, FixedArray::kLengthOffset));
       __ Addu(a2, a2, Operand(2 * kPointerSize));
 
-      ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset);
-      ASSERT_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+      DCHECK_EQ(0 * kPointerSize, JSObject::kMapOffset);
+      DCHECK_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
 
       // Initialize the fields to undefined.
       // a1: constructor
@@ -552,7 +552,7 @@
       // t5: FixedArray (not tagged)
       __ sll(t3, a3, kPointerSizeLog2);
       __ addu(t6, a2, t3);  // End of object.
-      ASSERT_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
+      DCHECK_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
       { Label loop, entry;
         if (!is_api_function || create_memento) {
           __ LoadRoot(t7, Heap::kUndefinedValueRootIndex);
@@ -600,9 +600,9 @@
 
     __ push(a1);  // Argument for Runtime_NewObject.
     if (create_memento) {
-      __ CallRuntime(Runtime::kHiddenNewObjectWithAllocationSite, 2);
+      __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 2);
     } else {
-      __ CallRuntime(Runtime::kHiddenNewObject, 1);
+      __ CallRuntime(Runtime::kNewObject, 1);
     }
     __ mov(t4, v0);
 
@@ -824,8 +824,8 @@
 }
 
 
-void Builtins::Generate_CompileUnoptimized(MacroAssembler* masm) {
-  CallRuntimePassFunction(masm, Runtime::kHiddenCompileUnoptimized);
+void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+  CallRuntimePassFunction(masm, Runtime::kCompileLazy);
   GenerateTailCallToReturnedCode(masm);
 }
 
@@ -838,7 +838,7 @@
   // Whether to compile in a background thread.
   __ Push(masm->isolate()->factory()->ToBoolean(concurrent));
 
-  __ CallRuntime(Runtime::kHiddenCompileOptimized, 2);
+  __ CallRuntime(Runtime::kCompileOptimized, 2);
   // Restore receiver.
   __ Pop(a1);
 }
@@ -947,7 +947,7 @@
     // registers.
     __ MultiPush(kJSCallerSaved | kCalleeSaved);
     // Pass the function and deoptimization type to the runtime system.
-    __ CallRuntime(Runtime::kHiddenNotifyStubFailure, 0, save_doubles);
+    __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
     __ MultiPop(kJSCallerSaved | kCalleeSaved);
   }
 
@@ -973,7 +973,7 @@
     // Pass the function and deoptimization type to the runtime system.
     __ li(a0, Operand(Smi::FromInt(static_cast<int>(type))));
     __ push(a0);
-    __ CallRuntime(Runtime::kHiddenNotifyDeoptimized, 1);
+    __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
   }
 
   // Get the full codegen state from the stack and untag it -> t2.
@@ -1055,7 +1055,7 @@
   __ Branch(&ok, hs, sp, Operand(at));
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
-    __ CallRuntime(Runtime::kHiddenStackGuard, 0);
+    __ CallRuntime(Runtime::kStackGuard, 0);
   }
   __ Jump(masm->isolate()->builtins()->OnStackReplacement(),
           RelocInfo::CODE_TARGET);
@@ -1092,7 +1092,7 @@
   // a1: function
   Label shift_arguments;
   __ li(t0, Operand(0, RelocInfo::NONE32));  // Indicate regular JS_FUNCTION.
-  { Label convert_to_object, use_global_receiver, patch_receiver;
+  { Label convert_to_object, use_global_proxy, patch_receiver;
     // Change context eagerly in case we need the global receiver.
     __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
 
@@ -1118,9 +1118,9 @@
     __ JumpIfSmi(a2, &convert_to_object, t2);
 
     __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
-    __ Branch(&use_global_receiver, eq, a2, Operand(a3));
+    __ Branch(&use_global_proxy, eq, a2, Operand(a3));
     __ LoadRoot(a3, Heap::kNullValueRootIndex);
-    __ Branch(&use_global_receiver, eq, a2, Operand(a3));
+    __ Branch(&use_global_proxy, eq, a2, Operand(a3));
 
     STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
     __ GetObjectType(a2, a3, a3);
@@ -1139,16 +1139,17 @@
       __ sra(a0, a0, kSmiTagSize);  // Un-tag.
       // Leave internal frame.
     }
+
     // Restore the function to a1, and the flag to t0.
     __ sll(at, a0, kPointerSizeLog2);
     __ addu(at, sp, at);
     __ lw(a1, MemOperand(at));
-    __ li(t0, Operand(0, RelocInfo::NONE32));
-    __ Branch(&patch_receiver);
+    __ Branch(USE_DELAY_SLOT, &patch_receiver);
+    __ li(t0, Operand(0, RelocInfo::NONE32));  // In delay slot.
 
-    __ bind(&use_global_receiver);
+    __ bind(&use_global_proxy);
     __ lw(a2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
-    __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalReceiverOffset));
+    __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalProxyOffset));
 
     __ bind(&patch_receiver);
     __ sll(at, a0, kPointerSizeLog2);
@@ -1300,7 +1301,7 @@
 
     // Compute the receiver.
     // Do not transform the receiver for strict mode functions.
-    Label call_to_object, use_global_receiver;
+    Label call_to_object, use_global_proxy;
     __ lw(a2, FieldMemOperand(a2, SharedFunctionInfo::kCompilerHintsOffset));
     __ And(t3, a2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
                                  kSmiTagSize)));
@@ -1313,9 +1314,9 @@
     // Compute the receiver in sloppy mode.
     __ JumpIfSmi(a0, &call_to_object);
     __ LoadRoot(a1, Heap::kNullValueRootIndex);
-    __ Branch(&use_global_receiver, eq, a0, Operand(a1));
+    __ Branch(&use_global_proxy, eq, a0, Operand(a1));
     __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
-    __ Branch(&use_global_receiver, eq, a0, Operand(a2));
+    __ Branch(&use_global_proxy, eq, a0, Operand(a2));
 
     // Check if the receiver is already a JavaScript object.
     // a0: receiver
@@ -1331,9 +1332,9 @@
     __ mov(a0, v0);  // Put object in a0 to match other paths to push_receiver.
     __ Branch(&push_receiver);
 
-    __ bind(&use_global_receiver);
+    __ bind(&use_global_proxy);
     __ lw(a0, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
-    __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalReceiverOffset));
+    __ lw(a0, FieldMemOperand(a0, GlobalObject::kGlobalProxyOffset));
 
     // Push the receiver.
     // a0: receiver
diff --git a/src/mips/code-stubs-mips.cc b/src/mips/code-stubs-mips.cc
index b8f565b..a9c10b8 100644
--- a/src/mips/code-stubs-mips.cc
+++ b/src/mips/code-stubs-mips.cc
@@ -6,442 +6,86 @@
 
 #if V8_TARGET_ARCH_MIPS
 
+#include "src/base/bits.h"
 #include "src/bootstrapper.h"
 #include "src/code-stubs.h"
 #include "src/codegen.h"
+#include "src/ic/handler-compiler.h"
+#include "src/ic/ic.h"
+#include "src/isolate.h"
+#include "src/jsregexp.h"
 #include "src/regexp-macro-assembler.h"
-#include "src/stub-cache.h"
+#include "src/runtime.h"
 
 namespace v8 {
 namespace internal {
 
 
-void FastNewClosureStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { a2 };
-  descriptor->register_param_count_ = 1;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      Runtime::FunctionForId(Runtime::kHiddenNewClosureFromStubFailure)->entry;
-}
-
-
-void FastNewContextStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { a1 };
-  descriptor->register_param_count_ = 1;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ = NULL;
-}
-
-
-void ToNumberStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { a0 };
-  descriptor->register_param_count_ = 1;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ = NULL;
-}
-
-
-void NumberToStringStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { a0 };
-  descriptor->register_param_count_ = 1;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      Runtime::FunctionForId(Runtime::kHiddenNumberToString)->entry;
-}
-
-
-void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { a3, a2, a1 };
-  descriptor->register_param_count_ = 3;
-  descriptor->register_params_ = registers;
-  static Representation representations[] = {
-    Representation::Tagged(),
-    Representation::Smi(),
-    Representation::Tagged() };
-  descriptor->register_param_representations_ = representations;
-  descriptor->deoptimization_handler_ =
-      Runtime::FunctionForId(
-          Runtime::kHiddenCreateArrayLiteralStubBailout)->entry;
-}
-
-
-void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { a3, a2, a1, a0 };
-  descriptor->register_param_count_ = 4;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      Runtime::FunctionForId(Runtime::kHiddenCreateObjectLiteral)->entry;
-}
-
-
-void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { a2, a3 };
-  descriptor->register_param_count_ = 2;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ = NULL;
-}
-
-
-void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { a1, a0 };
-  descriptor->register_param_count_ = 2;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
-}
-
-
-void KeyedLoadDictionaryElementStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = {a1, a0 };
-  descriptor->register_param_count_ = 2;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
-}
-
-
-void RegExpConstructResultStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { a2, a1, a0 };
-  descriptor->register_param_count_ = 3;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      Runtime::FunctionForId(Runtime::kHiddenRegExpConstructResult)->entry;
-}
-
-
-void KeyedLoadGenericElementStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { a1, a0 };
-  descriptor->register_param_count_ = 2;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      Runtime::FunctionForId(Runtime::kKeyedGetProperty)->entry;
-}
-
-
-void LoadFieldStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { a0 };
-  descriptor->register_param_count_ = 1;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ = NULL;
-}
-
-
-void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { a1 };
-  descriptor->register_param_count_ = 1;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ = NULL;
-}
-
-
-void StringLengthStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { a0, a2 };
-  descriptor->register_param_count_ = 2;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ = NULL;
-}
-
-
-void KeyedStringLengthStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { a1, a0 };
-  descriptor->register_param_count_ = 2;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ = NULL;
-}
-
-
-void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { a2, a1, a0 };
-  descriptor->register_param_count_ = 3;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      FUNCTION_ADDR(KeyedStoreIC_MissFromStubFailure);
-}
-
-
-void TransitionElementsKindStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { a0, a1 };
-  descriptor->register_param_count_ = 2;
-  descriptor->register_params_ = registers;
-  Address entry =
-      Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
-  descriptor->deoptimization_handler_ = FUNCTION_ADDR(entry);
-}
-
-
-void CompareNilICStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { a0 };
-  descriptor->register_param_count_ = 1;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      FUNCTION_ADDR(CompareNilIC_Miss);
-  descriptor->SetMissHandler(
-      ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate()));
-}
-
-
 static void InitializeArrayConstructorDescriptor(
-    CodeStubInterfaceDescriptor* descriptor,
+    Isolate* isolate, CodeStubDescriptor* descriptor,
     int constant_stack_parameter_count) {
-  // register state
-  // a0 -- number of arguments
-  // a1 -- function
-  // a2 -- allocation site with elements kind
-  static Register registers_variable_args[] = { a1, a2, a0 };
-  static Register registers_no_args[] = { a1, a2 };
+  Address deopt_handler = Runtime::FunctionForId(
+      Runtime::kArrayConstructor)->entry;
 
   if (constant_stack_parameter_count == 0) {
-    descriptor->register_param_count_ = 2;
-    descriptor->register_params_ = registers_no_args;
+    descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
+                           JS_FUNCTION_STUB_MODE);
   } else {
-    // stack param count needs (constructor pointer, and single argument)
-    descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
-    descriptor->stack_parameter_count_ = a0;
-    descriptor->register_param_count_ = 3;
-    descriptor->register_params_ = registers_variable_args;
-    static Representation representations[] = {
-        Representation::Tagged(),
-        Representation::Tagged(),
-        Representation::Integer32() };
-    descriptor->register_param_representations_ = representations;
+    descriptor->Initialize(a0, deopt_handler, constant_stack_parameter_count,
+                           JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
   }
-
-  descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
-  descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
-  descriptor->deoptimization_handler_ =
-      Runtime::FunctionForId(Runtime::kHiddenArrayConstructor)->entry;
 }
 
 
 static void InitializeInternalArrayConstructorDescriptor(
-    CodeStubInterfaceDescriptor* descriptor,
+    Isolate* isolate, CodeStubDescriptor* descriptor,
     int constant_stack_parameter_count) {
-  // register state
-  // a0 -- number of arguments
-  // a1 -- constructor function
-  static Register registers_variable_args[] = { a1, a0 };
-  static Register registers_no_args[] = { a1 };
+  Address deopt_handler = Runtime::FunctionForId(
+      Runtime::kInternalArrayConstructor)->entry;
 
   if (constant_stack_parameter_count == 0) {
-    descriptor->register_param_count_ = 1;
-    descriptor->register_params_ = registers_no_args;
+    descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
+                           JS_FUNCTION_STUB_MODE);
   } else {
-    // stack param count needs (constructor pointer, and single argument)
-    descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
-    descriptor->stack_parameter_count_ = a0;
-    descriptor->register_param_count_ = 2;
-    descriptor->register_params_ = registers_variable_args;
-    static Representation representations[] = {
-        Representation::Tagged(),
-        Representation::Integer32() };
-    descriptor->register_param_representations_ = representations;
+    descriptor->Initialize(a0, deopt_handler, constant_stack_parameter_count,
+                           JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
   }
-
-  descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
-  descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
-  descriptor->deoptimization_handler_ =
-      Runtime::FunctionForId(Runtime::kHiddenInternalArrayConstructor)->entry;
 }
 
 
-void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  InitializeArrayConstructorDescriptor(descriptor, 0);
+void ArrayNoArgumentConstructorStub::InitializeDescriptor(
+    CodeStubDescriptor* descriptor) {
+  InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
 }
 
 
-void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  InitializeArrayConstructorDescriptor(descriptor, 1);
+void ArraySingleArgumentConstructorStub::InitializeDescriptor(
+    CodeStubDescriptor* descriptor) {
+  InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
 }
 
 
-void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  InitializeArrayConstructorDescriptor(descriptor, -1);
+void ArrayNArgumentsConstructorStub::InitializeDescriptor(
+    CodeStubDescriptor* descriptor) {
+  InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
 }
 
 
-void ToBooleanStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { a0 };
-  descriptor->register_param_count_ = 1;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      FUNCTION_ADDR(ToBooleanIC_Miss);
-  descriptor->SetMissHandler(
-      ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate()));
+void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
+    CodeStubDescriptor* descriptor) {
+  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
 }
 
 
-void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(descriptor, 0);
+void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
+    CodeStubDescriptor* descriptor) {
+  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
 }
 
 
-void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(descriptor, 1);
-}
-
-
-void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(descriptor, -1);
-}
-
-
-void StoreGlobalStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { a1, a2, a0 };
-  descriptor->register_param_count_ = 3;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      FUNCTION_ADDR(StoreIC_MissFromStubFailure);
-}
-
-
-void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { a0, a3, a1, a2 };
-  descriptor->register_param_count_ = 4;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss);
-}
-
-
-void BinaryOpICStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { a1, a0 };
-  descriptor->register_param_count_ = 2;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
-  descriptor->SetMissHandler(
-      ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate()));
-}
-
-
-void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { a2, a1, a0 };
-  descriptor->register_param_count_ = 3;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite);
-}
-
-
-void StringAddStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { a1, a0 };
-  descriptor->register_param_count_ = 2;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      Runtime::FunctionForId(Runtime::kHiddenStringAdd)->entry;
-}
-
-
-void CallDescriptors::InitializeForIsolate(Isolate* isolate) {
-  {
-    CallInterfaceDescriptor* descriptor =
-        isolate->call_descriptor(Isolate::ArgumentAdaptorCall);
-    static Register registers[] = { a1,  // JSFunction
-                                    cp,  // context
-                                    a0,  // actual number of arguments
-                                    a2,  // expected number of arguments
-    };
-    static Representation representations[] = {
-        Representation::Tagged(),     // JSFunction
-        Representation::Tagged(),     // context
-        Representation::Integer32(),  // actual number of arguments
-        Representation::Integer32(),  // expected number of arguments
-    };
-    descriptor->register_param_count_ = 4;
-    descriptor->register_params_ = registers;
-    descriptor->param_representations_ = representations;
-  }
-  {
-    CallInterfaceDescriptor* descriptor =
-        isolate->call_descriptor(Isolate::KeyedCall);
-    static Register registers[] = { cp,  // context
-                                    a2,  // key
-    };
-    static Representation representations[] = {
-        Representation::Tagged(),     // context
-        Representation::Tagged(),     // key
-    };
-    descriptor->register_param_count_ = 2;
-    descriptor->register_params_ = registers;
-    descriptor->param_representations_ = representations;
-  }
-  {
-    CallInterfaceDescriptor* descriptor =
-        isolate->call_descriptor(Isolate::NamedCall);
-    static Register registers[] = { cp,  // context
-                                    a2,  // name
-    };
-    static Representation representations[] = {
-        Representation::Tagged(),     // context
-        Representation::Tagged(),     // name
-    };
-    descriptor->register_param_count_ = 2;
-    descriptor->register_params_ = registers;
-    descriptor->param_representations_ = representations;
-  }
-  {
-    CallInterfaceDescriptor* descriptor =
-        isolate->call_descriptor(Isolate::CallHandler);
-    static Register registers[] = { cp,  // context
-                                    a0,  // receiver
-    };
-    static Representation representations[] = {
-        Representation::Tagged(),  // context
-        Representation::Tagged(),  // receiver
-    };
-    descriptor->register_param_count_ = 2;
-    descriptor->register_params_ = registers;
-    descriptor->param_representations_ = representations;
-  }
-  {
-    CallInterfaceDescriptor* descriptor =
-        isolate->call_descriptor(Isolate::ApiFunctionCall);
-    static Register registers[] = { a0,  // callee
-                                    t0,  // call_data
-                                    a2,  // holder
-                                    a1,  // api_function_address
-                                    cp,  // context
-    };
-    static Representation representations[] = {
-        Representation::Tagged(),    // callee
-        Representation::Tagged(),    // call_data
-        Representation::Tagged(),    // holder
-        Representation::External(),  // api_function_address
-        Representation::Tagged(),    // context
-    };
-    descriptor->register_param_count_ = 5;
-    descriptor->register_params_ = registers;
-    descriptor->param_representations_ = representations;
-  }
+void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
+    CodeStubDescriptor* descriptor) {
+  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
 }
 
 
@@ -462,134 +106,32 @@
                                            Register rhs);
 
 
-void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
+void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
+                                               ExternalReference miss) {
   // Update the static counter each time a new code stub is generated.
   isolate()->counters()->code_stubs()->Increment();
 
-  CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor();
-  int param_count = descriptor->register_param_count_;
+  CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
+  int param_count = descriptor.GetEnvironmentParameterCount();
   {
     // Call the runtime system in a fresh internal frame.
     FrameScope scope(masm, StackFrame::INTERNAL);
-    ASSERT(descriptor->register_param_count_ == 0 ||
-           a0.is(descriptor->register_params_[param_count - 1]));
+    DCHECK(param_count == 0 ||
+           a0.is(descriptor.GetEnvironmentParameterRegister(param_count - 1)));
     // Push arguments, adjust sp.
     __ Subu(sp, sp, Operand(param_count * kPointerSize));
     for (int i = 0; i < param_count; ++i) {
       // Store argument to stack.
-      __ sw(descriptor->register_params_[i],
-            MemOperand(sp, (param_count-1-i) * kPointerSize));
+      __ sw(descriptor.GetEnvironmentParameterRegister(i),
+            MemOperand(sp, (param_count - 1 - i) * kPointerSize));
     }
-    ExternalReference miss = descriptor->miss_handler();
-    __ CallExternalReference(miss, descriptor->register_param_count_);
+    __ CallExternalReference(miss, param_count);
   }
 
   __ Ret();
 }
 
 
-// Takes a Smi and converts to an IEEE 64 bit floating point value in two
-// registers.  The format is 1 sign bit, 11 exponent bits (biased 1023) and
-// 52 fraction bits (20 in the first word, 32 in the second).  Zeros is a
-// scratch register.  Destroys the source register.  No GC occurs during this
-// stub so you don't have to set up the frame.
-class ConvertToDoubleStub : public PlatformCodeStub {
- public:
-  ConvertToDoubleStub(Isolate* isolate,
-                      Register result_reg_1,
-                      Register result_reg_2,
-                      Register source_reg,
-                      Register scratch_reg)
-      : PlatformCodeStub(isolate),
-        result1_(result_reg_1),
-        result2_(result_reg_2),
-        source_(source_reg),
-        zeros_(scratch_reg) { }
-
- private:
-  Register result1_;
-  Register result2_;
-  Register source_;
-  Register zeros_;
-
-  // Minor key encoding in 16 bits.
-  class ModeBits: public BitField<OverwriteMode, 0, 2> {};
-  class OpBits: public BitField<Token::Value, 2, 14> {};
-
-  Major MajorKey() { return ConvertToDouble; }
-  int MinorKey() {
-    // Encode the parameters in a unique 16 bit value.
-    return  result1_.code() +
-           (result2_.code() << 4) +
-           (source_.code() << 8) +
-           (zeros_.code() << 12);
-  }
-
-  void Generate(MacroAssembler* masm);
-};
-
-
-void ConvertToDoubleStub::Generate(MacroAssembler* masm) {
-  Register exponent, mantissa;
-  if (kArchEndian == kLittle) {
-    exponent = result1_;
-    mantissa = result2_;
-  } else {
-    exponent = result2_;
-    mantissa = result1_;
-  }
-  Label not_special;
-  // Convert from Smi to integer.
-  __ sra(source_, source_, kSmiTagSize);
-  // Move sign bit from source to destination.  This works because the sign bit
-  // in the exponent word of the double has the same position and polarity as
-  // the 2's complement sign bit in a Smi.
-  STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
-  __ And(exponent, source_, Operand(HeapNumber::kSignMask));
-  // Subtract from 0 if source was negative.
-  __ subu(at, zero_reg, source_);
-  __ Movn(source_, at, exponent);
-
-  // We have -1, 0 or 1, which we treat specially. Register source_ contains
-  // absolute value: it is either equal to 1 (special case of -1 and 1),
-  // greater than 1 (not a special case) or less than 1 (special case of 0).
-  __ Branch(&not_special, gt, source_, Operand(1));
-
-  // For 1 or -1 we need to or in the 0 exponent (biased to 1023).
-  const uint32_t exponent_word_for_1 =
-      HeapNumber::kExponentBias << HeapNumber::kExponentShift;
-  // Safe to use 'at' as dest reg here.
-  __ Or(at, exponent, Operand(exponent_word_for_1));
-  __ Movn(exponent, at, source_);  // Write exp when source not 0.
-  // 1, 0 and -1 all have 0 for the second word.
-  __ Ret(USE_DELAY_SLOT);
-  __ mov(mantissa, zero_reg);
-
-  __ bind(&not_special);
-  // Count leading zeros.
-  // Gets the wrong answer for 0, but we already checked for that case above.
-  __ Clz(zeros_, source_);
-  // Compute exponent and or it into the exponent register.
-  // We use mantissa as a scratch register here.
-  __ li(mantissa, Operand(31 + HeapNumber::kExponentBias));
-  __ subu(mantissa, mantissa, zeros_);
-  __ sll(mantissa, mantissa, HeapNumber::kExponentShift);
-  __ Or(exponent, exponent, mantissa);
-
-  // Shift up the source chopping the top bit off.
-  __ Addu(zeros_, zeros_, Operand(1));
-  // This wouldn't work for 1.0 or -1.0 as the shift would be 32 which means 0.
-  __ sllv(source_, source_, zeros_);
-  // Compute lower part of fraction (last 12 bits).
-  __ sll(mantissa, source_, HeapNumber::kMantissaBitsInTopWord);
-  // And the top (top 20 bits).
-  __ srl(source_, source_, 32 - HeapNumber::kMantissaBitsInTopWord);
-
-  __ Ret(USE_DELAY_SLOT);
-  __ or_(exponent, exponent, source_);
-}
-
-
 void DoubleToIStub::Generate(MacroAssembler* masm) {
   Label out_of_range, only_low, negate, done;
   Register input_reg = source();
@@ -746,32 +288,32 @@
   // We test for the special value that has a different exponent.
   STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
   // Test sign, and save for later conditionals.
-  __ And(sign_, the_int_, Operand(0x80000000u));
-  __ Branch(&max_negative_int, eq, the_int_, Operand(0x80000000u));
+  __ And(sign(), the_int(), Operand(0x80000000u));
+  __ Branch(&max_negative_int, eq, the_int(), Operand(0x80000000u));
 
   // Set up the correct exponent in scratch_.  All non-Smi int32s have the same.
   // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
   uint32_t non_smi_exponent =
       (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
-  __ li(scratch_, Operand(non_smi_exponent));
+  __ li(scratch(), Operand(non_smi_exponent));
   // Set the sign bit in scratch_ if the value was negative.
-  __ or_(scratch_, scratch_, sign_);
+  __ or_(scratch(), scratch(), sign());
   // Subtract from 0 if the value was negative.
-  __ subu(at, zero_reg, the_int_);
-  __ Movn(the_int_, at, sign_);
+  __ subu(at, zero_reg, the_int());
+  __ Movn(the_int(), at, sign());
   // We should be masking the implict first digit of the mantissa away here,
   // but it just ends up combining harmlessly with the last digit of the
   // exponent that happens to be 1.  The sign bit is 0 so we shift 10 to get
   // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
-  ASSERT(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
+  DCHECK(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
   const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
-  __ srl(at, the_int_, shift_distance);
-  __ or_(scratch_, scratch_, at);
-  __ sw(scratch_, FieldMemOperand(the_heap_number_,
+  __ srl(at, the_int(), shift_distance);
+  __ or_(scratch(), scratch(), at);
+  __ sw(scratch(), FieldMemOperand(the_heap_number(),
                                    HeapNumber::kExponentOffset));
-  __ sll(scratch_, the_int_, 32 - shift_distance);
+  __ sll(scratch(), the_int(), 32 - shift_distance);
   __ Ret(USE_DELAY_SLOT);
-  __ sw(scratch_, FieldMemOperand(the_heap_number_,
+  __ sw(scratch(), FieldMemOperand(the_heap_number(),
                                    HeapNumber::kMantissaOffset));
 
   __ bind(&max_negative_int);
@@ -780,13 +322,13 @@
   // The actual mantissa bits stored are all 0 because the implicit most
   // significant 1 bit is not stored.
   non_smi_exponent += 1 << HeapNumber::kExponentShift;
-  __ li(scratch_, Operand(HeapNumber::kSignMask | non_smi_exponent));
-  __ sw(scratch_,
-        FieldMemOperand(the_heap_number_, HeapNumber::kExponentOffset));
-  __ mov(scratch_, zero_reg);
+  __ li(scratch(), Operand(HeapNumber::kSignMask | non_smi_exponent));
+  __ sw(scratch(),
+        FieldMemOperand(the_heap_number(), HeapNumber::kExponentOffset));
+  __ mov(scratch(), zero_reg);
   __ Ret(USE_DELAY_SLOT);
-  __ sw(scratch_,
-        FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
+  __ sw(scratch(),
+        FieldMemOperand(the_heap_number(), HeapNumber::kMantissaOffset));
 }
 
 
@@ -824,7 +366,7 @@
         __ Branch(&return_equal, ne, t4, Operand(ODDBALL_TYPE));
         __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
         __ Branch(&return_equal, ne, a0, Operand(t2));
-        ASSERT(is_int16(GREATER) && is_int16(LESS));
+        DCHECK(is_int16(GREATER) && is_int16(LESS));
         __ Ret(USE_DELAY_SLOT);
         if (cc == le) {
           // undefined <= undefined should fail.
@@ -838,7 +380,7 @@
   }
 
   __ bind(&return_equal);
-  ASSERT(is_int16(GREATER) && is_int16(LESS));
+  DCHECK(is_int16(GREATER) && is_int16(LESS));
   __ Ret(USE_DELAY_SLOT);
   if (cc == less) {
     __ li(v0, Operand(GREATER));  // Things aren't less than themselves.
@@ -877,7 +419,7 @@
     if (cc != eq) {
       // All-zero means Infinity means equal.
       __ Ret(eq, v0, Operand(zero_reg));
-      ASSERT(is_int16(GREATER) && is_int16(LESS));
+      DCHECK(is_int16(GREATER) && is_int16(LESS));
       __ Ret(USE_DELAY_SLOT);
       if (cc == le) {
         __ li(v0, Operand(GREATER));  // NaN <= NaN should fail.
@@ -898,7 +440,7 @@
                                     Label* both_loaded_as_doubles,
                                     Label* slow,
                                     bool strict) {
-  ASSERT((lhs.is(a0) && rhs.is(a1)) ||
+  DCHECK((lhs.is(a0) && rhs.is(a1)) ||
          (lhs.is(a1) && rhs.is(a0)));
 
   Label lhs_is_smi;
@@ -1016,7 +558,7 @@
                                                      Register rhs,
                                                      Label* possible_strings,
                                                      Label* not_both_strings) {
-  ASSERT((lhs.is(a0) && rhs.is(a1)) ||
+  DCHECK((lhs.is(a0) && rhs.is(a1)) ||
          (lhs.is(a1) && rhs.is(a0)));
 
   // a2 is object type of rhs.
@@ -1054,15 +596,14 @@
 }
 
 
-static void ICCompareStub_CheckInputType(MacroAssembler* masm,
-                                         Register input,
+static void CompareICStub_CheckInputType(MacroAssembler* masm, Register input,
                                          Register scratch,
-                                         CompareIC::State expected,
+                                         CompareICState::State expected,
                                          Label* fail) {
   Label ok;
-  if (expected == CompareIC::SMI) {
+  if (expected == CompareICState::SMI) {
     __ JumpIfNotSmi(input, fail);
-  } else if (expected == CompareIC::NUMBER) {
+  } else if (expected == CompareICState::NUMBER) {
     __ JumpIfSmi(input, &ok);
     __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
                 DONT_DO_SMI_CHECK);
@@ -1076,14 +617,14 @@
 // On entry a1 and a2 are the values to be compared.
 // On exit a0 is 0, positive or negative to indicate the result of
 // the comparison.
-void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
+void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
   Register lhs = a1;
   Register rhs = a0;
   Condition cc = GetCondition();
 
   Label miss;
-  ICCompareStub_CheckInputType(masm, lhs, a2, left_, &miss);
-  ICCompareStub_CheckInputType(masm, rhs, a3, right_, &miss);
+  CompareICStub_CheckInputType(masm, lhs, a2, left(), &miss);
+  CompareICStub_CheckInputType(masm, rhs, a3, right(), &miss);
 
   Label slow;  // Call builtin.
   Label not_smis, both_loaded_as_doubles;
@@ -1107,7 +648,7 @@
   // If either is a Smi (we know that not both are), then they can only
   // be strictly equal if the other is a HeapNumber.
   STATIC_ASSERT(kSmiTag == 0);
-  ASSERT_EQ(0, Smi::FromInt(0));
+  DCHECK_EQ(0, Smi::FromInt(0));
   __ And(t2, lhs, Operand(rhs));
   __ JumpIfNotSmi(t2, &not_smis, t0);
   // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
@@ -1135,23 +676,35 @@
 
   // Check if LESS condition is satisfied. If true, move conditionally
   // result to v0.
-  __ c(OLT, D, f12, f14);
-  __ Movt(v0, t0);
-  // Use previous check to store conditionally to v0 oposite condition
-  // (GREATER). If rhs is equal to lhs, this will be corrected in next
-  // check.
-  __ Movf(v0, t1);
-  // Check if EQUAL condition is satisfied. If true, move conditionally
-  // result to v0.
-  __ c(EQ, D, f12, f14);
-  __ Movt(v0, t2);
+  if (!IsMipsArchVariant(kMips32r6)) {
+    __ c(OLT, D, f12, f14);
+    __ Movt(v0, t0);
+    // Use previous check to store conditionally to v0 oposite condition
+    // (GREATER). If rhs is equal to lhs, this will be corrected in next
+    // check.
+    __ Movf(v0, t1);
+    // Check if EQUAL condition is satisfied. If true, move conditionally
+    // result to v0.
+    __ c(EQ, D, f12, f14);
+    __ Movt(v0, t2);
+  } else {
+    Label skip;
+    __ BranchF(USE_DELAY_SLOT, &skip, NULL, lt, f12, f14);
+    __ mov(v0, t0);  // Return LESS as result.
+
+    __ BranchF(USE_DELAY_SLOT, &skip, NULL, eq, f12, f14);
+    __ mov(v0, t2);  // Return EQUAL as result.
+
+    __ mov(v0, t1);  // Return GREATER as result.
+    __ bind(&skip);
+  }
 
   __ Ret();
 
   __ bind(&nan);
   // NaN comparisons always fail.
   // Load whatever we need in v0 to make the comparison fail.
-  ASSERT(is_int16(GREATER) && is_int16(LESS));
+  DCHECK(is_int16(GREATER) && is_int16(LESS));
   __ Ret(USE_DELAY_SLOT);
   if (cc == lt || cc == le) {
     __ li(v0, Operand(GREATER));
@@ -1193,29 +746,19 @@
         masm, lhs, rhs, &flat_string_check, &slow);
   }
 
-  // Check for both being sequential ASCII strings, and inline if that is the
-  // case.
+  // Check for both being sequential one-byte strings,
+  // and inline if that is the case.
   __ bind(&flat_string_check);
 
-  __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs, rhs, a2, a3, &slow);
+  __ JumpIfNonSmisNotBothSequentialOneByteStrings(lhs, rhs, a2, a3, &slow);
 
   __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, a2,
                       a3);
   if (cc == eq) {
-    StringCompareStub::GenerateFlatAsciiStringEquals(masm,
-                                                     lhs,
-                                                     rhs,
-                                                     a2,
-                                                     a3,
-                                                     t0);
+    StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, a2, a3, t0);
   } else {
-    StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
-                                                       lhs,
-                                                       rhs,
-                                                       a2,
-                                                       a3,
-                                                       t0,
-                                                       t1);
+    StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, a2, a3, t0,
+                                                    t1);
   }
   // Never falls through to here.
 
@@ -1233,7 +776,7 @@
     if (cc == lt || cc == le) {
       ncr = GREATER;
     } else {
-      ASSERT(cc == gt || cc == ge);  // Remaining cases.
+      DCHECK(cc == gt || cc == ge);  // Remaining cases.
       ncr = LESS;
     }
     __ li(a0, Operand(Smi::FromInt(ncr)));
@@ -1252,11 +795,7 @@
 void StoreRegistersStateStub::Generate(MacroAssembler* masm) {
   __ mov(t9, ra);
   __ pop(ra);
-  if (save_doubles_ == kSaveFPRegs) {
-    __ PushSafepointRegistersAndDoubles();
-  } else {
-    __ PushSafepointRegisters();
-  }
+  __ PushSafepointRegisters();
   __ Jump(t9);
 }
 
@@ -1264,12 +803,7 @@
 void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
   __ mov(t9, ra);
   __ pop(ra);
-  __ StoreToSafepointRegisterSlot(t9, t9);
-  if (save_doubles_ == kSaveFPRegs) {
-    __ PopSafepointRegistersAndDoubles();
-  } else {
-    __ PopSafepointRegisters();
-  }
+  __ PopSafepointRegisters();
   __ Jump(t9);
 }
 
@@ -1279,7 +813,7 @@
   // store the registers in any particular way, but we do have to store and
   // restore them.
   __ MultiPush(kJSCallerSaved | ra.bit());
-  if (save_doubles_ == kSaveFPRegs) {
+  if (save_doubles()) {
     __ MultiPushFPU(kCallerSavedFPU);
   }
   const int argument_count = 1;
@@ -1292,7 +826,7 @@
   __ CallCFunction(
       ExternalReference::store_buffer_overflow_function(isolate()),
       argument_count);
-  if (save_doubles_ == kSaveFPRegs) {
+  if (save_doubles()) {
     __ MultiPopFPU(kCallerSavedFPU);
   }
 
@@ -1303,7 +837,8 @@
 
 void MathPowStub::Generate(MacroAssembler* masm) {
   const Register base = a1;
-  const Register exponent = a2;
+  const Register exponent = MathPowTaggedDescriptor::exponent();
+  DCHECK(exponent.is(a2));
   const Register heapnumbermap = t1;
   const Register heapnumber = v0;
   const DoubleRegister double_base = f2;
@@ -1315,7 +850,7 @@
   const Register scratch2 = t3;
 
   Label call_runtime, done, int_exponent;
-  if (exponent_type_ == ON_STACK) {
+  if (exponent_type() == ON_STACK) {
     Label base_is_smi, unpack_exponent;
     // The exponent and base are supplied as arguments on the stack.
     // This can only happen if the stub is called from non-optimized code.
@@ -1343,7 +878,7 @@
     __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
     __ ldc1(double_exponent,
             FieldMemOperand(exponent, HeapNumber::kValueOffset));
-  } else if (exponent_type_ == TAGGED) {
+  } else if (exponent_type() == TAGGED) {
     // Base is already in double_base.
     __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
 
@@ -1351,7 +886,7 @@
             FieldMemOperand(exponent, HeapNumber::kValueOffset));
   }
 
-  if (exponent_type_ != INTEGER) {
+  if (exponent_type() != INTEGER) {
     Label int_exponent_convert;
     // Detect integer exponents stored as double.
     __ EmitFPUTruncate(kRoundToMinusInf,
@@ -1364,7 +899,7 @@
     // scratch2 == 0 means there was no conversion error.
     __ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg));
 
-    if (exponent_type_ == ON_STACK) {
+    if (exponent_type() == ON_STACK) {
       // Detect square root case.  Crankshaft detects constant +/-0.5 at
       // compile time and uses DoMathPowHalf instead.  We then skip this check
       // for non-constant cases of +/-0.5 as these hardly occur.
@@ -1433,7 +968,7 @@
   __ bind(&int_exponent);
 
   // Get two copies of exponent in the registers scratch and exponent.
-  if (exponent_type_ == INTEGER) {
+  if (exponent_type() == INTEGER) {
     __ mov(scratch, exponent);
   } else {
     // Exponent has previously been stored into scratch as untagged integer.
@@ -1481,10 +1016,10 @@
 
   // Returning or bailing out.
   Counters* counters = isolate()->counters();
-  if (exponent_type_ == ON_STACK) {
+  if (exponent_type() == ON_STACK) {
     // The arguments are still on the stack.
     __ bind(&call_runtime);
-    __ TailCallRuntime(Runtime::kHiddenMathPow, 2, 1);
+    __ TailCallRuntime(Runtime::kMathPowRT, 2, 1);
 
     // The stub is called from non-optimized code, which expects the result
     // as heap number in exponent.
@@ -1493,7 +1028,7 @@
         heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
     __ sdc1(double_result,
             FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
-    ASSERT(heapnumber.is(v0));
+    DCHECK(heapnumber.is(v0));
     __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
     __ DropAndRet(2);
   } else {
@@ -1535,41 +1070,23 @@
 }
 
 
-void StoreRegistersStateStub::GenerateAheadOfTime(
-    Isolate* isolate) {
-  StoreRegistersStateStub stub1(isolate, kDontSaveFPRegs);
-  stub1.GetCode();
-  // Hydrogen code stubs need stub2 at snapshot time.
-  StoreRegistersStateStub stub2(isolate, kSaveFPRegs);
-  stub2.GetCode();
+void StoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
+  StoreRegistersStateStub stub(isolate);
+  stub.GetCode();
 }
 
 
-void RestoreRegistersStateStub::GenerateAheadOfTime(
-    Isolate* isolate) {
-  RestoreRegistersStateStub stub1(isolate, kDontSaveFPRegs);
-  stub1.GetCode();
-  // Hydrogen code stubs need stub2 at snapshot time.
-  RestoreRegistersStateStub stub2(isolate, kSaveFPRegs);
-  stub2.GetCode();
+void RestoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
+  RestoreRegistersStateStub stub(isolate);
+  stub.GetCode();
 }
 
 
 void CodeStub::GenerateFPStubs(Isolate* isolate) {
+  // Generate if not already in cache.
   SaveFPRegsMode mode = kSaveFPRegs;
-  CEntryStub save_doubles(isolate, 1, mode);
-  StoreBufferOverflowStub stub(isolate, mode);
-  // These stubs might already be in the snapshot, detect that and don't
-  // regenerate, which would lead to code stub initialization state being messed
-  // up.
-  Code* save_doubles_code;
-  if (!save_doubles.FindCodeInCache(&save_doubles_code)) {
-    save_doubles_code = *save_doubles.GetCode();
-  }
-  Code* store_buffer_overflow_code;
-  if (!stub.FindCodeInCache(&store_buffer_overflow_code)) {
-      store_buffer_overflow_code = *stub.GetCode();
-  }
+  CEntryStub(isolate, 1, mode).GetCode();
+  StoreBufferOverflowStub(isolate, mode).GetCode();
   isolate->set_fp_stubs_generated(true);
 }
 
@@ -1601,7 +1118,7 @@
 
   // Enter the exit frame that transitions from JavaScript to C++.
   FrameScope scope(masm, StackFrame::MANUAL);
-  __ EnterExitFrame(save_doubles_);
+  __ EnterExitFrame(save_doubles());
 
   // s0: number of arguments  including receiver (C callee-saved)
   // s1: pointer to first argument (C callee-saved)
@@ -1648,7 +1165,7 @@
     // Set up sp in the delay slot.
     masm->addiu(sp, sp, -kCArgsSlotsSize);
     // Make sure the stored 'ra' points to this position.
-    ASSERT_EQ(kNumInstructionsToJump,
+    DCHECK_EQ(kNumInstructionsToJump,
               masm->InstructionsGeneratedSince(&find_ra));
   }
 
@@ -1689,7 +1206,7 @@
   // sp: stack pointer
   // fp: frame pointer
   // s0: still holds argc (callee-saved).
-  __ LeaveExitFrame(save_doubles_, s0, true, EMIT_RETURN);
+  __ LeaveExitFrame(save_doubles(), s0, true, EMIT_RETURN);
 
   // Handling of exception.
   __ bind(&exception_returned);
@@ -1716,7 +1233,7 @@
 }
 
 
-void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
+void JSEntryStub::Generate(MacroAssembler* masm) {
   Label invoke, handler_entry, exit;
   Isolate* isolate = masm->isolate();
 
@@ -1750,7 +1267,7 @@
 
   // We build an EntryFrame.
   __ li(t3, Operand(-1));  // Push a bad frame pointer to fail if it is used.
-  int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
+  int marker = type();
   __ li(t2, Operand(Smi::FromInt(marker)));
   __ li(t1, Operand(Smi::FromInt(marker)));
   __ li(t0, Operand(ExternalReference(Isolate::kCEntryFPAddress,
@@ -1841,7 +1358,7 @@
   // 4 args slots
   // args
 
-  if (is_construct) {
+  if (type() == StackFrame::ENTRY_CONSTRUCT) {
     ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
                                       isolate);
     __ li(t0, Operand(construct_entry));
@@ -1899,9 +1416,9 @@
 // in the safepoint slot for register t0.
 void InstanceofStub::Generate(MacroAssembler* masm) {
   // Call site inlining and patching implies arguments in registers.
-  ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
+  DCHECK(HasArgsInRegisters() || !HasCallSiteInlineCheck());
   // ReturnTrueFalse is only implemented for inlined call sites.
-  ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck());
+  DCHECK(!ReturnTrueFalseObject() || HasCallSiteInlineCheck());
 
   // Fixed register usage throughout the stub:
   const Register object = a0;  // Object (lhs).
@@ -1951,7 +1468,7 @@
     __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
     __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
   } else {
-    ASSERT(HasArgsInRegisters());
+    DCHECK(HasArgsInRegisters());
     // Patch the (relocated) inlined map check.
 
     // The offset was stored in t0 safepoint slot.
@@ -1981,7 +1498,7 @@
   __ Branch(&loop);
 
   __ bind(&is_instance);
-  ASSERT(Smi::FromInt(0) == 0);
+  DCHECK(Smi::FromInt(0) == 0);
   if (!HasCallSiteInlineCheck()) {
     __ mov(v0, zero_reg);
     __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
@@ -1993,7 +1510,7 @@
     __ PatchRelocatedValue(inline_site, scratch, v0);
 
     if (!ReturnTrueFalseObject()) {
-      ASSERT_EQ(Smi::FromInt(0), 0);
+      DCHECK_EQ(Smi::FromInt(0), 0);
       __ mov(v0, zero_reg);
     }
   }
@@ -2069,45 +1586,22 @@
 
 void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
   Label miss;
-  Register receiver;
-  if (kind() == Code::KEYED_LOAD_IC) {
-    // ----------- S t a t e -------------
-    //  -- ra    : return address
-    //  -- a0    : key
-    //  -- a1    : receiver
-    // -----------------------------------
-    __ Branch(&miss, ne, a0,
-        Operand(isolate()->factory()->prototype_string()));
-    receiver = a1;
-  } else {
-    ASSERT(kind() == Code::LOAD_IC);
-    // ----------- S t a t e -------------
-    //  -- a2    : name
-    //  -- ra    : return address
-    //  -- a0    : receiver
-    //  -- sp[0] : receiver
-    // -----------------------------------
-    receiver = a0;
-  }
-
-  StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, a3, t0, &miss);
+  Register receiver = LoadDescriptor::ReceiverRegister();
+  NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, a3,
+                                                          t0, &miss);
   __ bind(&miss);
-  StubCompiler::TailCallBuiltin(
-      masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
+  PropertyAccessCompiler::TailCallBuiltin(
+      masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
 }
 
 
-Register InstanceofStub::left() { return a0; }
-
-
-Register InstanceofStub::right() { return a1; }
-
-
 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
   // The displacement is the offset of the last parameter (if any)
   // relative to the frame pointer.
   const int kDisplacement =
       StandardFrameConstants::kCallerSPOffset - kPointerSize;
+  DCHECK(a1.is(ArgumentsAccessReadDescriptor::index()));
+  DCHECK(a0.is(ArgumentsAccessReadDescriptor::parameter_count()));
 
   // Check that the key is a smiGenerateReadElement.
   Label slow;
@@ -2178,7 +1672,7 @@
   __ sw(a3, MemOperand(sp, 1 * kPointerSize));
 
   __ bind(&runtime);
-  __ TailCallRuntime(Runtime::kHiddenNewSloppyArguments, 3, 1);
+  __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
 }
 
 
@@ -2233,7 +1727,7 @@
       FixedArray::kHeaderSize + 2 * kPointerSize;
   // If there are no mapped parameters, we do not need the parameter_map.
   Label param_map_size;
-  ASSERT_EQ(0, Smi::FromInt(0));
+  DCHECK_EQ(0, Smi::FromInt(0));
   __ Branch(USE_DELAY_SLOT, &param_map_size, eq, a1, Operand(zero_reg));
   __ mov(t5, zero_reg);  // In delay slot: param map size = 0 when a1 == 0.
   __ sll(t5, a1, 1);
@@ -2252,12 +1746,12 @@
   __ Allocate(t5, v0, a3, t0, &runtime, TAG_OBJECT);
 
   // v0 = address of new object(s) (tagged)
-  // a2 = argument count (tagged)
+  // a2 = argument count (smi-tagged)
   // Get the arguments boilerplate from the current native context into t0.
   const int kNormalOffset =
-      Context::SlotOffset(Context::SLOPPY_ARGUMENTS_BOILERPLATE_INDEX);
+      Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
   const int kAliasedOffset =
-      Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX);
+      Context::SlotOffset(Context::ALIASED_ARGUMENTS_MAP_INDEX);
 
   __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
   __ lw(t0, FieldMemOperand(t0, GlobalObject::kNativeContextOffset));
@@ -2272,22 +1766,23 @@
 
   // v0 = address of new object (tagged)
   // a1 = mapped parameter count (tagged)
-  // a2 = argument count (tagged)
-  // t0 = address of boilerplate object (tagged)
-  // Copy the JS object part.
-  for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
-    __ lw(a3, FieldMemOperand(t0, i));
-    __ sw(a3, FieldMemOperand(v0, i));
-  }
+  // a2 = argument count (smi-tagged)
+  // t0 = address of arguments map (tagged)
+  __ sw(t0, FieldMemOperand(v0, JSObject::kMapOffset));
+  __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
+  __ sw(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
+  __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
 
   // Set up the callee in-object property.
   STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
   __ lw(a3, MemOperand(sp, 2 * kPointerSize));
+  __ AssertNotSmi(a3);
   const int kCalleeOffset = JSObject::kHeaderSize +
       Heap::kArgumentsCalleeIndex * kPointerSize;
   __ sw(a3, FieldMemOperand(v0, kCalleeOffset));
 
   // Use the length (smi tagged) and set that as an in-object property too.
+  __ AssertSmi(a2);
   STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
   const int kLengthOffset = JSObject::kHeaderSize +
       Heap::kArgumentsLengthIndex * kPointerSize;
@@ -2397,7 +1892,33 @@
   // a2 = argument count (tagged)
   __ bind(&runtime);
   __ sw(a2, MemOperand(sp, 0 * kPointerSize));  // Patch argument count.
-  __ TailCallRuntime(Runtime::kHiddenNewSloppyArguments, 3, 1);
+  __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
+}
+
+
+void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
+  // Return address is in ra.
+  Label slow;
+
+  Register receiver = LoadDescriptor::ReceiverRegister();
+  Register key = LoadDescriptor::NameRegister();
+
+  // Check that the key is an array index, that is Uint32.
+  __ And(t0, key, Operand(kSmiTagMask | kSmiSignMask));
+  __ Branch(&slow, ne, t0, Operand(zero_reg));
+
+  // Everything is fine, call runtime.
+  __ Push(receiver, key);  // Receiver, key.
+
+  // Perform tail call to the entry.
+  __ TailCallExternalReference(
+      ExternalReference(IC_Utility(IC::kLoadElementWithInterceptor),
+                        masm->isolate()),
+      2, 1);
+
+  __ bind(&slow);
+  PropertyAccessCompiler::TailCallBuiltin(
+      masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
 }
 
 
@@ -2446,15 +1967,18 @@
   // Get the arguments boilerplate from the current native context.
   __ lw(t0, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
   __ lw(t0, FieldMemOperand(t0, GlobalObject::kNativeContextOffset));
-  __ lw(t0, MemOperand(t0, Context::SlotOffset(
-      Context::STRICT_ARGUMENTS_BOILERPLATE_INDEX)));
+  __ lw(t0, MemOperand(
+                t0, Context::SlotOffset(Context::STRICT_ARGUMENTS_MAP_INDEX)));
 
-  // Copy the JS object part.
-  __ CopyFields(v0, t0, a3.bit(), JSObject::kHeaderSize / kPointerSize);
+  __ sw(t0, FieldMemOperand(v0, JSObject::kMapOffset));
+  __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
+  __ sw(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
+  __ sw(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
 
   // Get the length (smi tagged) and set that as an in-object property too.
   STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
   __ lw(a1, MemOperand(sp, 0 * kPointerSize));
+  __ AssertSmi(a1);
   __ sw(a1, FieldMemOperand(v0, JSObject::kHeaderSize +
       Heap::kArgumentsLengthIndex * kPointerSize));
 
@@ -2495,7 +2019,7 @@
 
   // Do the runtime call to allocate the arguments object.
   __ bind(&runtime);
-  __ TailCallRuntime(Runtime::kHiddenNewStrictArguments, 3, 1);
+  __ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
 }
 
 
@@ -2504,7 +2028,7 @@
   // time or if regexp entry in generated code is turned off runtime switch or
   // at compilation.
 #ifdef V8_INTERPRETED_REGEXP
-  __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
+  __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
 #else  // V8_INTERPRETED_REGEXP
 
   // Stack frame on entry.
@@ -2662,8 +2186,8 @@
   STATIC_ASSERT(kStringEncodingMask == 4);
   STATIC_ASSERT(kOneByteStringTag == 4);
   STATIC_ASSERT(kTwoByteStringTag == 0);
-  __ And(a0, a0, Operand(kStringEncodingMask));  // Non-zero for ASCII.
-  __ lw(t9, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset));
+  __ And(a0, a0, Operand(kStringEncodingMask));  // Non-zero for one-byte.
+  __ lw(t9, FieldMemOperand(regexp_data, JSRegExp::kDataOneByteCodeOffset));
   __ sra(a3, a0, 2);  // a3 is 1 for ASCII, 0 for UC16 (used below).
   __ lw(t1, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
   __ Movz(t9, t1, a0);  // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
@@ -2676,7 +2200,7 @@
   __ JumpIfSmi(t9, &runtime);
 
   // a1: previous index
-  // a3: encoding of subject string (1 if ASCII, 0 if two_byte);
+  // a3: encoding of subject string (1 if one_byte, 0 if two_byte);
   // t9: code
   // subject: Subject string
   // regexp_data: RegExp data (FixedArray)
@@ -2731,7 +2255,7 @@
   __ sw(a0, MemOperand(sp, 1 * kPointerSize));
 
   // For arguments 4 and 3 get string length, calculate start of string data
-  // and calculate the shift of the index (0 for ASCII and 1 for two byte).
+  // calculate the shift of the index (0 for one-byte and 1 for two-byte).
   __ Addu(t2, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
   __ Xor(a3, a3, Operand(1));  // 1 for 2-byte str, 0 for 1-byte.
   // Load the length from the original subject string from the previous stack
@@ -2894,7 +2418,7 @@
 
   // Do the runtime call to execute the regexp.
   __ bind(&runtime);
-  __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
+  __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
 
   // Deferred code for string handling.
   // (6) Not a long external string?  If yes, go to (8).
@@ -2950,9 +2474,9 @@
   // a3 : slot in feedback vector (Smi)
   Label initialize, done, miss, megamorphic, not_array_function;
 
-  ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()),
+  DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
             masm->isolate()->heap()->megamorphic_symbol());
-  ASSERT_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()),
+  DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()),
             masm->isolate()->heap()->uninitialized_symbol());
 
   // Load the cache state into t0.
@@ -3150,7 +2674,7 @@
 
 
 void CallFunctionStub::Generate(MacroAssembler* masm) {
-  CallFunctionNoFeedback(masm, argc_, NeedsChecks(), CallAsMethod());
+  CallFunctionNoFeedback(masm, argc(), NeedsChecks(), CallAsMethod());
 }
 
 
@@ -3252,7 +2776,7 @@
   __ TailCallStub(&stub);
 
   __ bind(&miss);
-  GenerateMiss(masm, IC::kCallIC_Customization_Miss);
+  GenerateMiss(masm);
 
   // The slow case, we need this no matter what to complete a call after a miss.
   CallFunctionNoFeedback(masm,
@@ -3271,7 +2795,7 @@
   Label extra_checks_or_miss, slow_start;
   Label slow, non_function, wrap, cont;
   Label have_js_function;
-  int argc = state_.arg_count();
+  int argc = arg_count();
   ParameterCount actual(argc);
 
   EmitLoadTypeFeedbackVector(masm, a2);
@@ -3283,7 +2807,7 @@
   __ Branch(&extra_checks_or_miss, ne, a1, Operand(t0));
 
   __ bind(&have_js_function);
-  if (state_.CallAsMethod()) {
+  if (CallAsMethod()) {
     EmitContinueIfStrictOrNative(masm, &cont);
     // Compute the receiver in sloppy mode.
     __ lw(a3, MemOperand(sp, argc * kPointerSize));
@@ -3300,7 +2824,7 @@
   __ bind(&slow);
   EmitSlowCase(masm, argc, &non_function);
 
-  if (state_.CallAsMethod()) {
+  if (CallAsMethod()) {
     __ bind(&wrap);
     EmitWrapCase(masm, argc, &cont);
   }
@@ -3328,7 +2852,7 @@
 
   // We are here because tracing is on or we are going monomorphic.
   __ bind(&miss);
-  GenerateMiss(masm, IC::kCallIC_Miss);
+  GenerateMiss(masm);
 
   // the slow case
   __ bind(&slow_start);
@@ -3343,9 +2867,9 @@
 }
 
 
-void CallICStub::GenerateMiss(MacroAssembler* masm, IC::UtilityId id) {
+void CallICStub::GenerateMiss(MacroAssembler* masm) {
   // Get the receiver of the function from the stack; 1 ~ return address.
-  __ lw(t0, MemOperand(sp, (state_.arg_count() + 1) * kPointerSize));
+  __ lw(t0, MemOperand(sp, (arg_count() + 1) * kPointerSize));
 
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
@@ -3354,6 +2878,9 @@
     __ Push(t0, a1, a2, a3);
 
     // Call the entry.
+    IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
+                                               : IC::kCallIC_Customization_Miss;
+
     ExternalReference miss = ExternalReference(IC_Utility(id),
                                                masm->isolate());
     __ CallExternalReference(miss, 4);
@@ -3366,14 +2893,9 @@
 
 // StringCharCodeAtGenerator.
 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
-  Label flat_string;
-  Label ascii_string;
-  Label got_char_code;
-  Label sliced_string;
-
-  ASSERT(!t0.is(index_));
-  ASSERT(!t0.is(result_));
-  ASSERT(!t0.is(object_));
+  DCHECK(!t0.is(index_));
+  DCHECK(!t0.is(result_));
+  DCHECK(!t0.is(object_));
 
   // If the receiver is a smi trigger the non-string case.
   __ JumpIfSmi(object_, receiver_not_string_);
@@ -3426,9 +2948,9 @@
   if (index_flags_ == STRING_INDEX_IS_NUMBER) {
     __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
   } else {
-    ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
+    DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
     // NumberToSmi discards numbers that are not exact integers.
-    __ CallRuntime(Runtime::kHiddenNumberToSmi, 1);
+    __ CallRuntime(Runtime::kNumberToSmi, 1);
   }
 
   // Save the conversion result before the pop instructions below
@@ -3452,7 +2974,7 @@
   call_helper.BeforeCall(masm);
   __ sll(index_, index_, kSmiTagSize);
   __ Push(object_, index_);
-  __ CallRuntime(Runtime::kHiddenStringCharCodeAt, 2);
+  __ CallRuntime(Runtime::kStringCharCodeAtRT, 2);
 
   __ Move(result_, v0);
 
@@ -3469,12 +2991,12 @@
 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
   // Fast case of Heap::LookupSingleCharacterStringFromCode.
 
-  ASSERT(!t0.is(result_));
-  ASSERT(!t0.is(code_));
+  DCHECK(!t0.is(result_));
+  DCHECK(!t0.is(code_));
 
   STATIC_ASSERT(kSmiTag == 0);
   STATIC_ASSERT(kSmiShiftSize == 0);
-  ASSERT(IsPowerOf2(String::kMaxOneByteCharCode + 1));
+  DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCode + 1));
   __ And(t0,
          code_,
          Operand(kSmiTagMask |
@@ -3482,7 +3004,7 @@
   __ Branch(&slow_case_, ne, t0, Operand(zero_reg));
 
   __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
-  // At this point code register contains smi tagged ASCII char code.
+  // At this point code register contains smi tagged one-byte char code.
   STATIC_ASSERT(kSmiTag == 0);
   __ sll(t0, code_, kPointerSizeLog2 - kSmiTagSize);
   __ Addu(result_, result_, t0);
@@ -3511,10 +3033,7 @@
 }
 
 
-enum CopyCharactersFlags {
-  COPY_ASCII = 1,
-  DEST_ALWAYS_ALIGNED = 2
-};
+enum CopyCharactersFlags { COPY_ONE_BYTE = 1, DEST_ALWAYS_ALIGNED = 2 };
 
 
 void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
@@ -3558,57 +3077,6 @@
 }
 
 
-void StringHelper::GenerateHashInit(MacroAssembler* masm,
-                                    Register hash,
-                                    Register character) {
-  // hash = seed + character + ((seed + character) << 10);
-  __ LoadRoot(hash, Heap::kHashSeedRootIndex);
-  // Untag smi seed and add the character.
-  __ SmiUntag(hash);
-  __ addu(hash, hash, character);
-  __ sll(at, hash, 10);
-  __ addu(hash, hash, at);
-  // hash ^= hash >> 6;
-  __ srl(at, hash, 6);
-  __ xor_(hash, hash, at);
-}
-
-
-void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
-                                            Register hash,
-                                            Register character) {
-  // hash += character;
-  __ addu(hash, hash, character);
-  // hash += hash << 10;
-  __ sll(at, hash, 10);
-  __ addu(hash, hash, at);
-  // hash ^= hash >> 6;
-  __ srl(at, hash, 6);
-  __ xor_(hash, hash, at);
-}
-
-
-void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
-                                       Register hash) {
-  // hash += hash << 3;
-  __ sll(at, hash, 3);
-  __ addu(hash, hash, at);
-  // hash ^= hash >> 11;
-  __ srl(at, hash, 11);
-  __ xor_(hash, hash, at);
-  // hash += hash << 15;
-  __ sll(at, hash, 15);
-  __ addu(hash, hash, at);
-
-  __ li(at, Operand(String::kHashBitMask));
-  __ and_(hash, hash, at);
-
-  // if (hash == 0) hash = 27;
-  __ ori(at, zero_reg, StringHasher::kZeroHash);
-  __ Movz(hash, at, hash);
-}
-
-
 void SubStringStub::Generate(MacroAssembler* masm) {
   Label runtime;
   // Stack frame on entry.
@@ -3729,7 +3197,7 @@
     STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
     __ And(t0, a1, Operand(kStringEncodingMask));
     __ Branch(&two_byte_slice, eq, t0, Operand(zero_reg));
-    __ AllocateAsciiSlicedString(v0, a2, t2, t3, &runtime);
+    __ AllocateOneByteSlicedString(v0, a2, t2, t3, &runtime);
     __ jmp(&set_slice_header);
     __ bind(&two_byte_slice);
     __ AllocateTwoByteSlicedString(v0, a2, t2, t3, &runtime);
@@ -3773,7 +3241,7 @@
   __ Branch(&two_byte_sequential, eq, t0, Operand(zero_reg));
 
   // Allocate and copy the resulting ASCII string.
-  __ AllocateAsciiString(v0, a2, t0, t2, t3, &runtime);
+  __ AllocateOneByteString(v0, a2, t0, t2, t3, &runtime);
 
   // Locate first character of substring to copy.
   __ Addu(t1, t1, a3);
@@ -3816,7 +3284,7 @@
 
   // Just jump to runtime to create the sub string.
   __ bind(&runtime);
-  __ TailCallRuntime(Runtime::kHiddenSubString, 3, 1);
+  __ TailCallRuntime(Runtime::kSubString, 3, 1);
 
   __ bind(&single_char);
   // v0: original string
@@ -3832,12 +3300,9 @@
 }
 
 
-void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
-                                                      Register left,
-                                                      Register right,
-                                                      Register scratch1,
-                                                      Register scratch2,
-                                                      Register scratch3) {
+void StringHelper::GenerateFlatOneByteStringEquals(
+    MacroAssembler* masm, Register left, Register right, Register scratch1,
+    Register scratch2, Register scratch3) {
   Register length = scratch1;
 
   // Compare lengths.
@@ -3846,7 +3311,7 @@
   __ lw(scratch2, FieldMemOperand(right, String::kLengthOffset));
   __ Branch(&check_zero_length, eq, length, Operand(scratch2));
   __ bind(&strings_not_equal);
-  ASSERT(is_int16(NOT_EQUAL));
+  DCHECK(is_int16(NOT_EQUAL));
   __ Ret(USE_DELAY_SLOT);
   __ li(v0, Operand(Smi::FromInt(NOT_EQUAL)));
 
@@ -3855,16 +3320,15 @@
   __ bind(&check_zero_length);
   STATIC_ASSERT(kSmiTag == 0);
   __ Branch(&compare_chars, ne, length, Operand(zero_reg));
-  ASSERT(is_int16(EQUAL));
+  DCHECK(is_int16(EQUAL));
   __ Ret(USE_DELAY_SLOT);
   __ li(v0, Operand(Smi::FromInt(EQUAL)));
 
   // Compare characters.
   __ bind(&compare_chars);
 
-  GenerateAsciiCharsCompareLoop(masm,
-                                left, right, length, scratch2, scratch3, v0,
-                                &strings_not_equal);
+  GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2, scratch3,
+                                  v0, &strings_not_equal);
 
   // Characters are equal.
   __ Ret(USE_DELAY_SLOT);
@@ -3872,13 +3336,9 @@
 }
 
 
-void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
-                                                        Register left,
-                                                        Register right,
-                                                        Register scratch1,
-                                                        Register scratch2,
-                                                        Register scratch3,
-                                                        Register scratch4) {
+void StringHelper::GenerateCompareFlatOneByteStrings(
+    MacroAssembler* masm, Register left, Register right, Register scratch1,
+    Register scratch2, Register scratch3, Register scratch4) {
   Label result_not_equal, compare_lengths;
   // Find minimum length and length difference.
   __ lw(scratch1, FieldMemOperand(left, String::kLengthOffset));
@@ -3892,13 +3352,12 @@
   __ Branch(&compare_lengths, eq, min_length, Operand(zero_reg));
 
   // Compare loop.
-  GenerateAsciiCharsCompareLoop(masm,
-                                left, right, min_length, scratch2, scratch4, v0,
-                                &result_not_equal);
+  GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
+                                  scratch4, v0, &result_not_equal);
 
   // Compare lengths - strings up to min-length are equal.
   __ bind(&compare_lengths);
-  ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
+  DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
   // Use length_delta as result if it's zero.
   __ mov(scratch2, length_delta);
   __ mov(scratch4, zero_reg);
@@ -3917,14 +3376,9 @@
 }
 
 
-void StringCompareStub::GenerateAsciiCharsCompareLoop(
-    MacroAssembler* masm,
-    Register left,
-    Register right,
-    Register length,
-    Register scratch1,
-    Register scratch2,
-    Register scratch3,
+void StringHelper::GenerateOneByteCharsCompareLoop(
+    MacroAssembler* masm, Register left, Register right, Register length,
+    Register scratch1, Register scratch2, Register scratch3,
     Label* chars_not_equal) {
   // Change index to run from -length to -1 by adding length to string
   // start. This means that loop ends when index reaches zero, which
@@ -3972,16 +3426,16 @@
 
   __ bind(&not_same);
 
-  // Check that both objects are sequential ASCII strings.
-  __ JumpIfNotBothSequentialAsciiStrings(a1, a0, a2, a3, &runtime);
+  // Check that both objects are sequential one-byte strings.
+  __ JumpIfNotBothSequentialOneByteStrings(a1, a0, a2, a3, &runtime);
 
   // Compare flat ASCII strings natively. Remove arguments from stack first.
   __ IncrementCounter(counters->string_compare_native(), 1, a2, a3);
   __ Addu(sp, sp, Operand(2 * kPointerSize));
-  GenerateCompareFlatAsciiStrings(masm, a1, a0, a2, a3, t0, t1);
+  StringHelper::GenerateCompareFlatOneByteStrings(masm, a1, a0, a2, a3, t0, t1);
 
   __ bind(&runtime);
-  __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
+  __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
 }
 
 
@@ -4008,13 +3462,13 @@
 
   // Tail call into the stub that handles binary operations with allocation
   // sites.
-  BinaryOpWithAllocationSiteStub stub(isolate(), state_);
+  BinaryOpWithAllocationSiteStub stub(isolate(), state());
   __ TailCallStub(&stub);
 }
 
 
-void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
-  ASSERT(state_ == CompareIC::SMI);
+void CompareICStub::GenerateSmis(MacroAssembler* masm) {
+  DCHECK(state() == CompareICState::SMI);
   Label miss;
   __ Or(a2, a1, a0);
   __ JumpIfNotSmi(a2, &miss);
@@ -4036,17 +3490,17 @@
 }
 
 
-void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
-  ASSERT(state_ == CompareIC::NUMBER);
+void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
+  DCHECK(state() == CompareICState::NUMBER);
 
   Label generic_stub;
   Label unordered, maybe_undefined1, maybe_undefined2;
   Label miss;
 
-  if (left_ == CompareIC::SMI) {
+  if (left() == CompareICState::SMI) {
     __ JumpIfNotSmi(a1, &miss);
   }
-  if (right_ == CompareIC::SMI) {
+  if (right() == CompareICState::SMI) {
     __ JumpIfNotSmi(a0, &miss);
   }
 
@@ -4090,7 +3544,7 @@
   __ BranchF(&fpu_lt, NULL, lt, f0, f2);
 
   // Otherwise it's greater, so just fall thru, and return.
-  ASSERT(is_int16(GREATER) && is_int16(EQUAL) && is_int16(LESS));
+  DCHECK(is_int16(GREATER) && is_int16(EQUAL) && is_int16(LESS));
   __ Ret(USE_DELAY_SLOT);
   __ li(v0, Operand(GREATER));
 
@@ -4104,12 +3558,12 @@
 
   __ bind(&unordered);
   __ bind(&generic_stub);
-  ICCompareStub stub(isolate(), op_, CompareIC::GENERIC, CompareIC::GENERIC,
-                     CompareIC::GENERIC);
+  CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
+                     CompareICState::GENERIC, CompareICState::GENERIC);
   __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
 
   __ bind(&maybe_undefined1);
-  if (Token::IsOrderedRelationalCompareOp(op_)) {
+  if (Token::IsOrderedRelationalCompareOp(op())) {
     __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
     __ Branch(&miss, ne, a0, Operand(at));
     __ JumpIfSmi(a1, &unordered);
@@ -4119,7 +3573,7 @@
   }
 
   __ bind(&maybe_undefined2);
-  if (Token::IsOrderedRelationalCompareOp(op_)) {
+  if (Token::IsOrderedRelationalCompareOp(op())) {
     __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
     __ Branch(&unordered, eq, a1, Operand(at));
   }
@@ -4129,8 +3583,8 @@
 }
 
 
-void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
-  ASSERT(state_ == CompareIC::INTERNALIZED_STRING);
+void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
+  DCHECK(state() == CompareICState::INTERNALIZED_STRING);
   Label miss;
 
   // Registers containing left and right operands respectively.
@@ -4154,13 +3608,13 @@
 
   // Make sure a0 is non-zero. At this point input operands are
   // guaranteed to be non-zero.
-  ASSERT(right.is(a0));
+  DCHECK(right.is(a0));
   STATIC_ASSERT(EQUAL == 0);
   STATIC_ASSERT(kSmiTag == 0);
   __ mov(v0, right);
   // Internalized strings are compared by identity.
   __ Ret(ne, left, Operand(right));
-  ASSERT(is_int16(EQUAL));
+  DCHECK(is_int16(EQUAL));
   __ Ret(USE_DELAY_SLOT);
   __ li(v0, Operand(Smi::FromInt(EQUAL)));
 
@@ -4169,9 +3623,9 @@
 }
 
 
-void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
-  ASSERT(state_ == CompareIC::UNIQUE_NAME);
-  ASSERT(GetCondition() == eq);
+void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
+  DCHECK(state() == CompareICState::UNIQUE_NAME);
+  DCHECK(GetCondition() == eq);
   Label miss;
 
   // Registers containing left and right operands respectively.
@@ -4190,8 +3644,8 @@
   __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
   __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
 
-  __ JumpIfNotUniqueName(tmp1, &miss);
-  __ JumpIfNotUniqueName(tmp2, &miss);
+  __ JumpIfNotUniqueNameInstanceType(tmp1, &miss);
+  __ JumpIfNotUniqueNameInstanceType(tmp2, &miss);
 
   // Use a0 as result
   __ mov(v0, a0);
@@ -4201,7 +3655,7 @@
   __ Branch(&done, ne, left, Operand(right));
   // Make sure a0 is non-zero. At this point input operands are
   // guaranteed to be non-zero.
-  ASSERT(right.is(a0));
+  DCHECK(right.is(a0));
   STATIC_ASSERT(EQUAL == 0);
   STATIC_ASSERT(kSmiTag == 0);
   __ li(v0, Operand(Smi::FromInt(EQUAL)));
@@ -4213,11 +3667,11 @@
 }
 
 
-void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
-  ASSERT(state_ == CompareIC::STRING);
+void CompareICStub::GenerateStrings(MacroAssembler* masm) {
+  DCHECK(state() == CompareICState::STRING);
   Label miss;
 
-  bool equality = Token::IsEqualityOp(op_);
+  bool equality = Token::IsEqualityOp(op());
 
   // Registers containing left and right operands respectively.
   Register left = a1;
@@ -4257,7 +3711,7 @@
   // because we already know they are not identical. We know they are both
   // strings.
   if (equality) {
-    ASSERT(GetCondition() == eq);
+    DCHECK(GetCondition() == eq);
     STATIC_ASSERT(kInternalizedTag == 0);
     __ Or(tmp3, tmp1, Operand(tmp2));
     __ And(tmp5, tmp3, Operand(kIsNotInternalizedMask));
@@ -4265,24 +3719,24 @@
     __ Branch(&is_symbol, ne, tmp5, Operand(zero_reg));
     // Make sure a0 is non-zero. At this point input operands are
     // guaranteed to be non-zero.
-    ASSERT(right.is(a0));
+    DCHECK(right.is(a0));
     __ Ret(USE_DELAY_SLOT);
     __ mov(v0, a0);  // In the delay slot.
     __ bind(&is_symbol);
   }
 
-  // Check that both strings are sequential ASCII.
+  // Check that both strings are sequential one-byte.
   Label runtime;
-  __ JumpIfBothInstanceTypesAreNotSequentialAscii(
-      tmp1, tmp2, tmp3, tmp4, &runtime);
+  __ JumpIfBothInstanceTypesAreNotSequentialOneByte(tmp1, tmp2, tmp3, tmp4,
+                                                    &runtime);
 
-  // Compare flat ASCII strings. Returns when done.
+  // Compare flat one-byte strings. Returns when done.
   if (equality) {
-    StringCompareStub::GenerateFlatAsciiStringEquals(
-        masm, left, right, tmp1, tmp2, tmp3);
+    StringHelper::GenerateFlatOneByteStringEquals(masm, left, right, tmp1, tmp2,
+                                                  tmp3);
   } else {
-    StringCompareStub::GenerateCompareFlatAsciiStrings(
-        masm, left, right, tmp1, tmp2, tmp3, tmp4);
+    StringHelper::GenerateCompareFlatOneByteStrings(masm, left, right, tmp1,
+                                                    tmp2, tmp3, tmp4);
   }
 
   // Handle more complex cases in runtime.
@@ -4291,7 +3745,7 @@
   if (equality) {
     __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
   } else {
-    __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
+    __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
   }
 
   __ bind(&miss);
@@ -4299,8 +3753,8 @@
 }
 
 
-void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
-  ASSERT(state_ == CompareIC::OBJECT);
+void CompareICStub::GenerateObjects(MacroAssembler* masm) {
+  DCHECK(state() == CompareICState::OBJECT);
   Label miss;
   __ And(a2, a1, Operand(a0));
   __ JumpIfSmi(a2, &miss);
@@ -4310,7 +3764,7 @@
   __ GetObjectType(a1, a2, a2);
   __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
 
-  ASSERT(GetCondition() == eq);
+  DCHECK(GetCondition() == eq);
   __ Ret(USE_DELAY_SLOT);
   __ subu(v0, a0, a1);
 
@@ -4319,7 +3773,7 @@
 }
 
 
-void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
+void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
   Label miss;
   __ And(a2, a1, a0);
   __ JumpIfSmi(a2, &miss);
@@ -4336,7 +3790,7 @@
 }
 
 
-void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
+void CompareICStub::GenerateMiss(MacroAssembler* masm) {
   {
     // Call the runtime system in a fresh internal frame.
     ExternalReference miss =
@@ -4344,7 +3798,7 @@
     FrameScope scope(masm, StackFrame::INTERNAL);
     __ Push(a1, a0);
     __ Push(ra, a1, a0);
-    __ li(t0, Operand(Smi::FromInt(op_)));
+    __ li(t0, Operand(Smi::FromInt(op())));
     __ addiu(sp, sp, -kPointerSize);
     __ CallExternalReference(miss, 3, USE_DELAY_SLOT);
     __ sw(t0, MemOperand(sp));  // In the delay slot.
@@ -4399,7 +3853,7 @@
                                                       Register properties,
                                                       Handle<Name> name,
                                                       Register scratch0) {
-  ASSERT(name->IsUniqueName());
+  DCHECK(name->IsUniqueName());
   // If names of slots in range from 1 to kProbes - 1 for the hash value are
   // not equal to the name and kProbes-th slot is not used (its name is the
   // undefined value), it guarantees the hash table doesn't contain the
@@ -4416,19 +3870,19 @@
         Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i))));
 
     // Scale the index by multiplying by the entry size.
-    ASSERT(NameDictionary::kEntrySize == 3);
+    DCHECK(NameDictionary::kEntrySize == 3);
     __ sll(at, index, 1);
     __ Addu(index, index, at);
 
     Register entity_name = scratch0;
     // Having undefined at this place means the name is not contained.
-    ASSERT_EQ(kSmiTagSize, 1);
+    DCHECK_EQ(kSmiTagSize, 1);
     Register tmp = properties;
     __ sll(scratch0, index, 1);
     __ Addu(tmp, properties, scratch0);
     __ lw(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
 
-    ASSERT(!tmp.is(entity_name));
+    DCHECK(!tmp.is(entity_name));
     __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
     __ Branch(done, eq, entity_name, Operand(tmp));
 
@@ -4445,7 +3899,7 @@
     __ lw(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
     __ lbu(entity_name,
            FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
-    __ JumpIfNotUniqueName(entity_name, miss);
+    __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
     __ bind(&good);
 
     // Restore the properties.
@@ -4481,10 +3935,10 @@
                                                       Register name,
                                                       Register scratch1,
                                                       Register scratch2) {
-  ASSERT(!elements.is(scratch1));
-  ASSERT(!elements.is(scratch2));
-  ASSERT(!name.is(scratch1));
-  ASSERT(!name.is(scratch2));
+  DCHECK(!elements.is(scratch1));
+  DCHECK(!elements.is(scratch2));
+  DCHECK(!name.is(scratch1));
+  DCHECK(!name.is(scratch2));
 
   __ AssertName(name);
 
@@ -4503,7 +3957,7 @@
       // Add the probe offset (i + i * i) left shifted to avoid right shifting
       // the hash in a separate instruction. The value hash + i + i * i is right
       // shifted in the following and instruction.
-      ASSERT(NameDictionary::GetProbeOffset(i) <
+      DCHECK(NameDictionary::GetProbeOffset(i) <
              1 << (32 - Name::kHashFieldOffset));
       __ Addu(scratch2, scratch2, Operand(
           NameDictionary::GetProbeOffset(i) << Name::kHashShift));
@@ -4512,7 +3966,7 @@
     __ And(scratch2, scratch1, scratch2);
 
     // Scale the index by multiplying by the element size.
-    ASSERT(NameDictionary::kEntrySize == 3);
+    DCHECK(NameDictionary::kEntrySize == 3);
     // scratch2 = scratch2 * 3.
 
     __ sll(at, scratch2, 1);
@@ -4532,7 +3986,7 @@
 
   __ MultiPush(spill_mask);
   if (name.is(a0)) {
-    ASSERT(!elements.is(a1));
+    DCHECK(!elements.is(a1));
     __ Move(a1, name);
     __ Move(a0, elements);
   } else {
@@ -4588,7 +4042,7 @@
       // Add the probe offset (i + i * i) left shifted to avoid right shifting
       // the hash in a separate instruction. The value hash + i + i * i is right
       // shifted in the following and instruction.
-      ASSERT(NameDictionary::GetProbeOffset(i) <
+      DCHECK(NameDictionary::GetProbeOffset(i) <
              1 << (32 - Name::kHashFieldOffset));
       __ Addu(index, hash, Operand(
           NameDictionary::GetProbeOffset(i) << Name::kHashShift));
@@ -4599,14 +4053,14 @@
     __ And(index, mask, index);
 
     // Scale the index by multiplying by the entry size.
-    ASSERT(NameDictionary::kEntrySize == 3);
+    DCHECK(NameDictionary::kEntrySize == 3);
     // index *= 3.
     __ mov(at, index);
     __ sll(index, index, 1);
     __ Addu(index, index, at);
 
 
-    ASSERT_EQ(kSmiTagSize, 1);
+    DCHECK_EQ(kSmiTagSize, 1);
     __ sll(index, index, 2);
     __ Addu(index, index, dictionary);
     __ lw(entry_key, FieldMemOperand(index, kElementsStartOffset));
@@ -4617,12 +4071,12 @@
     // Stop if found the property.
     __ Branch(&in_dictionary, eq, entry_key, Operand(key));
 
-    if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
+    if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
       // Check if the entry name is not a unique name.
       __ lw(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
       __ lbu(entry_key,
              FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
-      __ JumpIfNotUniqueName(entry_key, &maybe_in_dictionary);
+      __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
     }
   }
 
@@ -4630,7 +4084,7 @@
   // If we are doing negative lookup then probing failure should be
   // treated as a lookup success. For positive lookup probing failure
   // should be treated as lookup failure.
-  if (mode_ == POSITIVE_LOOKUP) {
+  if (mode() == POSITIVE_LOOKUP) {
     __ Ret(USE_DELAY_SLOT);
     __ mov(result, zero_reg);
   }
@@ -4674,11 +4128,11 @@
   __ beq(zero_reg, zero_reg, &skip_to_incremental_compacting);
   __ nop();
 
-  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
-    __ RememberedSetHelper(object_,
-                           address_,
-                           value_,
-                           save_fp_regs_mode_,
+  if (remembered_set_action() == EMIT_REMEMBERED_SET) {
+    __ RememberedSetHelper(object(),
+                           address(),
+                           value(),
+                           save_fp_regs_mode(),
                            MacroAssembler::kReturnAtEnd);
   }
   __ Ret();
@@ -4700,7 +4154,7 @@
 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
   regs_.Save(masm);
 
-  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+  if (remembered_set_action() == EMIT_REMEMBERED_SET) {
     Label dont_need_remembered_set;
 
     __ lw(regs_.scratch0(), MemOperand(regs_.address(), 0));
@@ -4720,10 +4174,10 @@
         masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
     InformIncrementalMarker(masm);
     regs_.Restore(masm);
-    __ RememberedSetHelper(object_,
-                           address_,
-                           value_,
-                           save_fp_regs_mode_,
+    __ RememberedSetHelper(object(),
+                           address(),
+                           value(),
+                           save_fp_regs_mode(),
                            MacroAssembler::kReturnAtEnd);
 
     __ bind(&dont_need_remembered_set);
@@ -4738,13 +4192,13 @@
 
 
 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
-  regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
+  regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
   int argument_count = 3;
   __ PrepareCallCFunction(argument_count, regs_.scratch0());
   Register address =
       a0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
-  ASSERT(!address.is(regs_.object()));
-  ASSERT(!address.is(a0));
+  DCHECK(!address.is(regs_.object()));
+  DCHECK(!address.is(a0));
   __ Move(address, regs_.address());
   __ Move(a0, regs_.object());
   __ Move(a1, address);
@@ -4754,7 +4208,7 @@
   __ CallCFunction(
       ExternalReference::incremental_marking_record_write_function(isolate()),
       argument_count);
-  regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
+  regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
 }
 
 
@@ -4782,10 +4236,10 @@
 
   regs_.Restore(masm);
   if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
-    __ RememberedSetHelper(object_,
-                           address_,
-                           value_,
-                           save_fp_regs_mode_,
+    __ RememberedSetHelper(object(),
+                           address(),
+                           value(),
+                           save_fp_regs_mode(),
                            MacroAssembler::kReturnAtEnd);
   } else {
     __ Ret();
@@ -4826,10 +4280,10 @@
 
   regs_.Restore(masm);
   if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
-    __ RememberedSetHelper(object_,
-                           address_,
-                           value_,
-                           save_fp_regs_mode_,
+    __ RememberedSetHelper(object(),
+                           address(),
+                           value(),
+                           save_fp_regs_mode(),
                            MacroAssembler::kReturnAtEnd);
   } else {
     __ Ret();
@@ -4917,7 +4371,7 @@
   int parameter_count_offset =
       StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
   __ lw(a1, MemOperand(fp, parameter_count_offset));
-  if (function_mode_ == JS_FUNCTION_STUB_MODE) {
+  if (function_mode() == JS_FUNCTION_STUB_MODE) {
     __ Addu(a1, a1, Operand(1));
   }
   masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
@@ -4927,6 +4381,20 @@
 }
 
 
+void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
+  EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
+  VectorLoadStub stub(isolate(), state());
+  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
+  EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
+  VectorKeyedLoadStub stub(isolate());
+  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
   if (masm->isolate()->function_entry_hook() != NULL) {
     ProfileEntryHookStub stub(masm->isolate());
@@ -4965,7 +4433,7 @@
   int frame_alignment = masm->ActivationFrameAlignment();
   if (frame_alignment > kPointerSize) {
     __ mov(s5, sp);
-    ASSERT(IsPowerOf2(frame_alignment));
+    DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
     __ And(sp, sp, Operand(-frame_alignment));
   }
   __ Subu(sp, sp, kCArgsSlotsSize);
@@ -5032,12 +4500,12 @@
   // sp[0] - last argument
   Label normal_sequence;
   if (mode == DONT_OVERRIDE) {
-    ASSERT(FAST_SMI_ELEMENTS == 0);
-    ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
-    ASSERT(FAST_ELEMENTS == 2);
-    ASSERT(FAST_HOLEY_ELEMENTS == 3);
-    ASSERT(FAST_DOUBLE_ELEMENTS == 4);
-    ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
+    DCHECK(FAST_SMI_ELEMENTS == 0);
+    DCHECK(FAST_HOLEY_SMI_ELEMENTS == 1);
+    DCHECK(FAST_ELEMENTS == 2);
+    DCHECK(FAST_HOLEY_ELEMENTS == 3);
+    DCHECK(FAST_DOUBLE_ELEMENTS == 4);
+    DCHECK(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
 
     // is the low bit set? If so, we are holey and that is good.
     __ And(at, a3, Operand(1));
@@ -5143,7 +4611,7 @@
 void ArrayConstructorStub::GenerateDispatchToArrayStub(
     MacroAssembler* masm,
     AllocationSiteOverrideMode mode) {
-  if (argument_count_ == ANY) {
+  if (argument_count() == ANY) {
     Label not_zero_case, not_one_case;
     __ And(at, a0, a0);
     __ Branch(&not_zero_case, ne, at, Operand(zero_reg));
@@ -5155,11 +4623,11 @@
 
     __ bind(&not_one_case);
     CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
-  } else if (argument_count_ == NONE) {
+  } else if (argument_count() == NONE) {
     CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
-  } else if (argument_count_ == ONE) {
+  } else if (argument_count() == ONE) {
     CreateArrayDispatchOneArgument(masm, mode);
-  } else if (argument_count_ == MORE_THAN_ONE) {
+  } else if (argument_count() == MORE_THAN_ONE) {
     CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
   } else {
     UNREACHABLE();
@@ -5169,7 +4637,7 @@
 
 void ArrayConstructorStub::Generate(MacroAssembler* masm) {
   // ----------- S t a t e -------------
-  //  -- a0 : argc (only if argument_count_ == ANY)
+  //  -- a0 : argc (only if argument_count() == ANY)
   //  -- a1 : constructor
   //  -- a2 : AllocationSite or undefined
   //  -- sp[0] : return address
@@ -5304,9 +4772,9 @@
   Register api_function_address = a1;
   Register context = cp;
 
-  int argc = ArgumentBits::decode(bit_field_);
-  bool is_store = IsStoreBits::decode(bit_field_);
-  bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_);
+  int argc = this->argc();
+  bool is_store = this->is_store();
+  bool call_data_undefined = this->call_data_undefined();
 
   typedef FunctionCallbackArguments FCA;
 
@@ -5345,7 +4813,7 @@
   FrameScope frame_scope(masm, StackFrame::MANUAL);
   __ EnterExitFrame(false, kApiStackSpace);
 
-  ASSERT(!api_function_address.is(a0) && !scratch.is(a0));
+  DCHECK(!api_function_address.is(a0) && !scratch.is(a0));
   // a0 = FunctionCallbackInfo&
   // Arguments is after the return address.
   __ Addu(a0, sp, Operand(1 * kPointerSize));
@@ -5392,7 +4860,8 @@
   //  -- a2                     : api_function_address
   // -----------------------------------
 
-  Register api_function_address = a2;
+  Register api_function_address = ApiGetterDescriptor::function_address();
+  DCHECK(api_function_address.is(a2));
 
   __ mov(a0, sp);  // a0 = Handle<Name>
   __ Addu(a1, a0, Operand(1 * kPointerSize));  // a1 = PCA
diff --git a/src/mips/code-stubs-mips.h b/src/mips/code-stubs-mips.h
index 3e0eaa1..afad32b 100644
--- a/src/mips/code-stubs-mips.h
+++ b/src/mips/code-stubs-mips.h
@@ -5,9 +5,6 @@
 #ifndef V8_MIPS_CODE_STUBS_ARM_H_
 #define V8_MIPS_CODE_STUBS_ARM_H_
 
-#include "src/ic-inl.h"
-
-
 namespace v8 {
 namespace internal {
 
@@ -15,24 +12,6 @@
 void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
 
 
-class StoreBufferOverflowStub: public PlatformCodeStub {
- public:
-  StoreBufferOverflowStub(Isolate* isolate, SaveFPRegsMode save_fp)
-      : PlatformCodeStub(isolate), save_doubles_(save_fp) {}
-
-  void Generate(MacroAssembler* masm);
-
-  static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
-  virtual bool SometimesSetsUpAFrame() { return false; }
-
- private:
-  SaveFPRegsMode save_doubles_;
-
-  Major MajorKey() { return StoreBufferOverflow; }
-  int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
-};
-
-
 class StringHelper : public AllStatic {
  public:
   // Generate code for copying a large number of characters. This function
@@ -46,98 +25,51 @@
                                      Register scratch,
                                      String::Encoding encoding);
 
+  // Compares two flat one-byte strings and returns result in v0.
+  static void GenerateCompareFlatOneByteStrings(
+      MacroAssembler* masm, Register left, Register right, Register scratch1,
+      Register scratch2, Register scratch3, Register scratch4);
 
-  // Generate string hash.
-  static void GenerateHashInit(MacroAssembler* masm,
-                               Register hash,
-                               Register character);
-
-  static void GenerateHashAddCharacter(MacroAssembler* masm,
-                                       Register hash,
-                                       Register character);
-
-  static void GenerateHashGetHash(MacroAssembler* masm,
-                                  Register hash);
+  // Compares two flat one-byte strings for equality and returns result in v0.
+  static void GenerateFlatOneByteStringEquals(MacroAssembler* masm,
+                                              Register left, Register right,
+                                              Register scratch1,
+                                              Register scratch2,
+                                              Register scratch3);
 
  private:
+  static void GenerateOneByteCharsCompareLoop(
+      MacroAssembler* masm, Register left, Register right, Register length,
+      Register scratch1, Register scratch2, Register scratch3,
+      Label* chars_not_equal);
+
   DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
 };
 
 
-class SubStringStub: public PlatformCodeStub {
- public:
-  explicit SubStringStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
-
- private:
-  Major MajorKey() { return SubString; }
-  int MinorKey() { return 0; }
-
-  void Generate(MacroAssembler* masm);
-};
-
 class StoreRegistersStateStub: public PlatformCodeStub {
  public:
-  explicit StoreRegistersStateStub(Isolate* isolate, SaveFPRegsMode with_fp)
-      : PlatformCodeStub(isolate), save_doubles_(with_fp) {}
+  explicit StoreRegistersStateStub(Isolate* isolate)
+      : PlatformCodeStub(isolate) {}
 
   static void GenerateAheadOfTime(Isolate* isolate);
- private:
-  Major MajorKey() { return StoreRegistersState; }
-  int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
-  SaveFPRegsMode save_doubles_;
 
-  void Generate(MacroAssembler* masm);
+ private:
+  DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+  DEFINE_PLATFORM_CODE_STUB(StoreRegistersState, PlatformCodeStub);
 };
 
+
 class RestoreRegistersStateStub: public PlatformCodeStub {
  public:
-  explicit RestoreRegistersStateStub(Isolate* isolate, SaveFPRegsMode with_fp)
-      : PlatformCodeStub(isolate), save_doubles_(with_fp) {}
+  explicit RestoreRegistersStateStub(Isolate* isolate)
+      : PlatformCodeStub(isolate) {}
 
   static void GenerateAheadOfTime(Isolate* isolate);
- private:
-  Major MajorKey() { return RestoreRegistersState; }
-  int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
-  SaveFPRegsMode save_doubles_;
-
-  void Generate(MacroAssembler* masm);
-};
-
-class StringCompareStub: public PlatformCodeStub {
- public:
-  explicit StringCompareStub(Isolate* isolate) : PlatformCodeStub(isolate) { }
-
-  // Compare two flat ASCII strings and returns result in v0.
-  static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
-                                              Register left,
-                                              Register right,
-                                              Register scratch1,
-                                              Register scratch2,
-                                              Register scratch3,
-                                              Register scratch4);
-
-  // Compares two flat ASCII strings for equality and returns result
-  // in v0.
-  static void GenerateFlatAsciiStringEquals(MacroAssembler* masm,
-                                            Register left,
-                                            Register right,
-                                            Register scratch1,
-                                            Register scratch2,
-                                            Register scratch3);
 
  private:
-  virtual Major MajorKey() { return StringCompare; }
-  virtual int MinorKey() { return 0; }
-  virtual void Generate(MacroAssembler* masm);
-
-  static void GenerateAsciiCharsCompareLoop(MacroAssembler* masm,
-                                            Register left,
-                                            Register right,
-                                            Register length,
-                                            Register scratch1,
-                                            Register scratch2,
-                                            Register scratch3,
-                                            Label* chars_not_equal);
+  DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+  DEFINE_PLATFORM_CODE_STUB(RestoreRegistersState, PlatformCodeStub);
 };
 
 
@@ -146,29 +78,38 @@
 // so you don't have to set up the frame.
 class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
  public:
-  WriteInt32ToHeapNumberStub(Isolate* isolate,
-                             Register the_int,
-                             Register the_heap_number,
-                             Register scratch,
+  WriteInt32ToHeapNumberStub(Isolate* isolate, Register the_int,
+                             Register the_heap_number, Register scratch,
                              Register scratch2)
-      : PlatformCodeStub(isolate),
-        the_int_(the_int),
-        the_heap_number_(the_heap_number),
-        scratch_(scratch),
-        sign_(scratch2) {
-    ASSERT(IntRegisterBits::is_valid(the_int_.code()));
-    ASSERT(HeapNumberRegisterBits::is_valid(the_heap_number_.code()));
-    ASSERT(ScratchRegisterBits::is_valid(scratch_.code()));
-    ASSERT(SignRegisterBits::is_valid(sign_.code()));
+      : PlatformCodeStub(isolate) {
+    minor_key_ = IntRegisterBits::encode(the_int.code()) |
+                 HeapNumberRegisterBits::encode(the_heap_number.code()) |
+                 ScratchRegisterBits::encode(scratch.code()) |
+                 SignRegisterBits::encode(scratch2.code());
+    DCHECK(IntRegisterBits::is_valid(the_int.code()));
+    DCHECK(HeapNumberRegisterBits::is_valid(the_heap_number.code()));
+    DCHECK(ScratchRegisterBits::is_valid(scratch.code()));
+    DCHECK(SignRegisterBits::is_valid(scratch2.code()));
   }
 
   static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
 
  private:
-  Register the_int_;
-  Register the_heap_number_;
-  Register scratch_;
-  Register sign_;
+  Register the_int() const {
+    return Register::from_code(IntRegisterBits::decode(minor_key_));
+  }
+
+  Register the_heap_number() const {
+    return Register::from_code(HeapNumberRegisterBits::decode(minor_key_));
+  }
+
+  Register scratch() const {
+    return Register::from_code(ScratchRegisterBits::decode(minor_key_));
+  }
+
+  Register sign() const {
+    return Register::from_code(SignRegisterBits::decode(minor_key_));
+  }
 
   // Minor key encoding in 16 bits.
   class IntRegisterBits: public BitField<int, 0, 4> {};
@@ -176,16 +117,8 @@
   class ScratchRegisterBits: public BitField<int, 8, 4> {};
   class SignRegisterBits: public BitField<int, 12, 4> {};
 
-  Major MajorKey() { return WriteInt32ToHeapNumber; }
-  int MinorKey() {
-    // Encode the parameters in a unique 16 bit value.
-    return IntRegisterBits::encode(the_int_.code())
-           | HeapNumberRegisterBits::encode(the_heap_number_.code())
-           | ScratchRegisterBits::encode(scratch_.code())
-           | SignRegisterBits::encode(sign_.code());
-  }
-
-  void Generate(MacroAssembler* masm);
+  DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+  DEFINE_PLATFORM_CODE_STUB(WriteInt32ToHeapNumber, PlatformCodeStub);
 };
 
 
@@ -198,16 +131,19 @@
                   RememberedSetAction remembered_set_action,
                   SaveFPRegsMode fp_mode)
       : PlatformCodeStub(isolate),
-        object_(object),
-        value_(value),
-        address_(address),
-        remembered_set_action_(remembered_set_action),
-        save_fp_regs_mode_(fp_mode),
         regs_(object,   // An input reg.
               address,  // An input reg.
               value) {  // One scratch reg.
+    minor_key_ = ObjectBits::encode(object.code()) |
+                 ValueBits::encode(value.code()) |
+                 AddressBits::encode(address.code()) |
+                 RememberedSetActionBits::encode(remembered_set_action) |
+                 SaveFPRegsModeBits::encode(fp_mode);
   }
 
+  RecordWriteStub(uint32_t key, Isolate* isolate)
+      : PlatformCodeStub(key, isolate), regs_(object(), address(), value()) {}
+
   enum Mode {
     STORE_BUFFER_ONLY,
     INCREMENTAL,
@@ -220,14 +156,14 @@
     const unsigned offset = masm->instr_at(pos) & kImm16Mask;
     masm->instr_at_put(pos, BNE | (zero_reg.code() << kRsShift) |
         (zero_reg.code() << kRtShift) | (offset & kImm16Mask));
-    ASSERT(Assembler::IsBne(masm->instr_at(pos)));
+    DCHECK(Assembler::IsBne(masm->instr_at(pos)));
   }
 
   static void PatchNopIntoBranch(MacroAssembler* masm, int pos) {
     const unsigned offset = masm->instr_at(pos) & kImm16Mask;
     masm->instr_at_put(pos, BEQ | (zero_reg.code() << kRsShift) |
         (zero_reg.code() << kRtShift) | (offset & kImm16Mask));
-    ASSERT(Assembler::IsBeq(masm->instr_at(pos)));
+    DCHECK(Assembler::IsBeq(masm->instr_at(pos)));
   }
 
   static Mode GetMode(Code* stub) {
@@ -239,13 +175,13 @@
       return INCREMENTAL;
     }
 
-    ASSERT(Assembler::IsBne(first_instruction));
+    DCHECK(Assembler::IsBne(first_instruction));
 
     if (Assembler::IsBeq(second_instruction)) {
       return INCREMENTAL_COMPACTION;
     }
 
-    ASSERT(Assembler::IsBne(second_instruction));
+    DCHECK(Assembler::IsBne(second_instruction));
 
     return STORE_BUFFER_ONLY;
   }
@@ -256,24 +192,27 @@
                         stub->instruction_size());
     switch (mode) {
       case STORE_BUFFER_ONLY:
-        ASSERT(GetMode(stub) == INCREMENTAL ||
+        DCHECK(GetMode(stub) == INCREMENTAL ||
                GetMode(stub) == INCREMENTAL_COMPACTION);
         PatchBranchIntoNop(&masm, 0);
         PatchBranchIntoNop(&masm, 2 * Assembler::kInstrSize);
         break;
       case INCREMENTAL:
-        ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+        DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
         PatchNopIntoBranch(&masm, 0);
         break;
       case INCREMENTAL_COMPACTION:
-        ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+        DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
         PatchNopIntoBranch(&masm, 2 * Assembler::kInstrSize);
         break;
     }
-    ASSERT(GetMode(stub) == mode);
-    CPU::FlushICache(stub->instruction_start(), 4 * Assembler::kInstrSize);
+    DCHECK(GetMode(stub) == mode);
+    CpuFeatures::FlushICache(stub->instruction_start(),
+                             4 * Assembler::kInstrSize);
   }
 
+  DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+
  private:
   // This is a helper class for freeing up 3 scratch registers.  The input is
   // two registers that must be preserved and one scratch register provided by
@@ -286,12 +225,12 @@
         : object_(object),
           address_(address),
           scratch0_(scratch0) {
-      ASSERT(!AreAliased(scratch0, object, address, no_reg));
+      DCHECK(!AreAliased(scratch0, object, address, no_reg));
       scratch1_ = GetRegisterThatIsNotOneOf(object_, address_, scratch0_);
     }
 
     void Save(MacroAssembler* masm) {
-      ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_));
+      DCHECK(!AreAliased(object_, address_, scratch1_, scratch0_));
       // We don't have to save scratch0_ because it was given to us as
       // a scratch register.
       masm->push(scratch1_);
@@ -338,7 +277,9 @@
     kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
   };
 
-  void Generate(MacroAssembler* masm);
+  virtual inline Major MajorKey() const FINAL OVERRIDE { return RecordWrite; }
+
+  virtual void Generate(MacroAssembler* masm) OVERRIDE;
   void GenerateIncremental(MacroAssembler* masm, Mode mode);
   void CheckNeedsToInformIncrementalMarker(
       MacroAssembler* masm,
@@ -346,33 +287,40 @@
       Mode mode);
   void InformIncrementalMarker(MacroAssembler* masm);
 
-  Major MajorKey() { return RecordWrite; }
-
-  int MinorKey() {
-    return ObjectBits::encode(object_.code()) |
-        ValueBits::encode(value_.code()) |
-        AddressBits::encode(address_.code()) |
-        RememberedSetActionBits::encode(remembered_set_action_) |
-        SaveFPRegsModeBits::encode(save_fp_regs_mode_);
-  }
-
   void Activate(Code* code) {
     code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
   }
 
+  Register object() const {
+    return Register::from_code(ObjectBits::decode(minor_key_));
+  }
+
+  Register value() const {
+    return Register::from_code(ValueBits::decode(minor_key_));
+  }
+
+  Register address() const {
+    return Register::from_code(AddressBits::decode(minor_key_));
+  }
+
+  RememberedSetAction remembered_set_action() const {
+    return RememberedSetActionBits::decode(minor_key_);
+  }
+
+  SaveFPRegsMode save_fp_regs_mode() const {
+    return SaveFPRegsModeBits::decode(minor_key_);
+  }
+
   class ObjectBits: public BitField<int, 0, 5> {};
   class ValueBits: public BitField<int, 5, 5> {};
   class AddressBits: public BitField<int, 10, 5> {};
   class RememberedSetActionBits: public BitField<RememberedSetAction, 15, 1> {};
   class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 16, 1> {};
 
-  Register object_;
-  Register value_;
-  Register address_;
-  RememberedSetAction remembered_set_action_;
-  SaveFPRegsMode save_fp_regs_mode_;
   Label slow_;
   RegisterAllocation regs_;
+
+  DISALLOW_COPY_AND_ASSIGN(RecordWriteStub);
 };
 
 
@@ -384,14 +332,13 @@
 class DirectCEntryStub: public PlatformCodeStub {
  public:
   explicit DirectCEntryStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
-  void Generate(MacroAssembler* masm);
   void GenerateCall(MacroAssembler* masm, Register target);
 
  private:
-  Major MajorKey() { return DirectCEntry; }
-  int MinorKey() { return 0; }
-
   bool NeedsImmovableCode() { return true; }
+
+  DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+  DEFINE_PLATFORM_CODE_STUB(DirectCEntry, PlatformCodeStub);
 };
 
 
@@ -400,9 +347,9 @@
   enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
 
   NameDictionaryLookupStub(Isolate* isolate, LookupMode mode)
-      : PlatformCodeStub(isolate), mode_(mode) { }
-
-  void Generate(MacroAssembler* masm);
+      : PlatformCodeStub(isolate) {
+    minor_key_ = LookupModeBits::encode(mode);
+  }
 
   static void GenerateNegativeLookup(MacroAssembler* masm,
                                      Label* miss,
@@ -434,15 +381,12 @@
       NameDictionary::kHeaderSize +
       NameDictionary::kElementsStartIndex * kPointerSize;
 
-  Major MajorKey() { return NameDictionaryLookup; }
-
-  int MinorKey() {
-    return LookupModeBits::encode(mode_);
-  }
+  LookupMode mode() const { return LookupModeBits::decode(minor_key_); }
 
   class LookupModeBits: public BitField<LookupMode, 0, 1> {};
 
-  LookupMode mode_;
+  DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+  DEFINE_PLATFORM_CODE_STUB(NameDictionaryLookup, PlatformCodeStub);
 };
 
 
diff --git a/src/mips/codegen-mips.cc b/src/mips/codegen-mips.cc
index 5d613d0..0ecac19 100644
--- a/src/mips/codegen-mips.cc
+++ b/src/mips/codegen-mips.cc
@@ -29,7 +29,8 @@
 UnaryMathFunction CreateExpFunction() {
   if (!FLAG_fast_math) return &std::exp;
   size_t actual_size;
-  byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
+  byte* buffer =
+      static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
   if (buffer == NULL) return &std::exp;
   ExternalReference::InitializeMathExpData();
 
@@ -56,10 +57,10 @@
 
   CodeDesc desc;
   masm.GetCode(&desc);
-  ASSERT(!RelocInfo::RequiresRelocation(desc));
+  DCHECK(!RelocInfo::RequiresRelocation(desc));
 
-  CPU::FlushICache(buffer, actual_size);
-  OS::ProtectCode(buffer, actual_size);
+  CpuFeatures::FlushICache(buffer, actual_size);
+  base::OS::ProtectCode(buffer, actual_size);
 
 #if !defined(USE_SIMULATOR)
   return FUNCTION_CAST<UnaryMathFunction>(buffer);
@@ -72,11 +73,13 @@
 
 #if defined(V8_HOST_ARCH_MIPS)
 MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
-#if defined(USE_SIMULATOR)
+#if defined(USE_SIMULATOR) || defined(_MIPS_ARCH_MIPS32R6) || \
+    defined(_MIPS_ARCH_MIPS32RX)
   return stub;
 #else
   size_t actual_size;
-  byte* buffer = static_cast<byte*>(OS::Allocate(3 * KB, &actual_size, true));
+  byte* buffer =
+      static_cast<byte*>(base::OS::Allocate(3 * KB, &actual_size, true));
   if (buffer == NULL) return stub;
 
   // This code assumes that cache lines are 32 bytes and if the cache line is
@@ -96,7 +99,7 @@
     // the kPrefHintPrepareForStore hint is used, the code will not work
     // correctly.
     uint32_t max_pref_size = 128;
-    ASSERT(pref_chunk < max_pref_size);
+    DCHECK(pref_chunk < max_pref_size);
 
     // pref_limit is set based on the fact that we never use an offset
     // greater then 5 on a store pref and that a single pref can
@@ -109,7 +112,7 @@
     // The initial prefetches may fetch bytes that are before the buffer being
     // copied. Start copies with an offset of 4 so avoid this situation when
     // using kPrefHintPrepareForStore.
-    ASSERT(pref_hint_store != kPrefHintPrepareForStore ||
+    DCHECK(pref_hint_store != kPrefHintPrepareForStore ||
            pref_chunk * 4 >= max_pref_size);
 
     // If the size is less than 8, go to lastb. Regardless of size,
@@ -592,10 +595,10 @@
   }
   CodeDesc desc;
   masm.GetCode(&desc);
-  ASSERT(!RelocInfo::RequiresRelocation(desc));
+  DCHECK(!RelocInfo::RequiresRelocation(desc));
 
-  CPU::FlushICache(buffer, actual_size);
-  OS::ProtectCode(buffer, actual_size);
+  CpuFeatures::FlushICache(buffer, actual_size);
+  base::OS::ProtectCode(buffer, actual_size);
   return FUNCTION_CAST<MemCopyUint8Function>(buffer);
 #endif
 }
@@ -606,7 +609,8 @@
   return &std::sqrt;
 #else
   size_t actual_size;
-  byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
+  byte* buffer =
+      static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
   if (buffer == NULL) return &std::sqrt;
 
   MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
@@ -618,10 +622,10 @@
 
   CodeDesc desc;
   masm.GetCode(&desc);
-  ASSERT(!RelocInfo::RequiresRelocation(desc));
+  DCHECK(!RelocInfo::RequiresRelocation(desc));
 
-  CPU::FlushICache(buffer, actual_size);
-  OS::ProtectCode(buffer, actual_size);
+  CpuFeatures::FlushICache(buffer, actual_size);
+  base::OS::ProtectCode(buffer, actual_size);
   return FUNCTION_CAST<UnaryMathFunction>(buffer);
 #endif
 }
@@ -634,14 +638,14 @@
 
 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
   masm->EnterFrame(StackFrame::INTERNAL);
-  ASSERT(!masm->has_frame());
+  DCHECK(!masm->has_frame());
   masm->set_has_frame(true);
 }
 
 
 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
   masm->LeaveFrame(StackFrame::INTERNAL);
-  ASSERT(masm->has_frame());
+  DCHECK(masm->has_frame());
   masm->set_has_frame(false);
 }
 
@@ -652,26 +656,28 @@
 #define __ ACCESS_MASM(masm)
 
 void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
-    MacroAssembler* masm, AllocationSiteMode mode,
+    MacroAssembler* masm,
+    Register receiver,
+    Register key,
+    Register value,
+    Register target_map,
+    AllocationSiteMode mode,
     Label* allocation_memento_found) {
-  // ----------- S t a t e -------------
-  //  -- a0    : value
-  //  -- a1    : key
-  //  -- a2    : receiver
-  //  -- ra    : return address
-  //  -- a3    : target map, scratch for subsequent call
-  //  -- t0    : scratch (elements)
-  // -----------------------------------
+  Register scratch_elements = t0;
+  DCHECK(!AreAliased(receiver, key, value, target_map,
+                     scratch_elements));
+
   if (mode == TRACK_ALLOCATION_SITE) {
-    ASSERT(allocation_memento_found != NULL);
-    __ JumpIfJSArrayHasAllocationMemento(a2, t0, allocation_memento_found);
+    DCHECK(allocation_memento_found != NULL);
+    __ JumpIfJSArrayHasAllocationMemento(
+        receiver, scratch_elements, allocation_memento_found);
   }
 
   // Set transitioned map.
-  __ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
-  __ RecordWriteField(a2,
+  __ sw(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ RecordWriteField(receiver,
                       HeapObject::kMapOffset,
-                      a3,
+                      target_map,
                       t5,
                       kRAHasNotBeenSaved,
                       kDontSaveFPRegs,
@@ -681,62 +687,74 @@
 
 
 void ElementsTransitionGenerator::GenerateSmiToDouble(
-    MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
-  // ----------- S t a t e -------------
-  //  -- a0    : value
-  //  -- a1    : key
-  //  -- a2    : receiver
-  //  -- ra    : return address
-  //  -- a3    : target map, scratch for subsequent call
-  //  -- t0    : scratch (elements)
-  // -----------------------------------
+    MacroAssembler* masm,
+    Register receiver,
+    Register key,
+    Register value,
+    Register target_map,
+    AllocationSiteMode mode,
+    Label* fail) {
+  // Register ra contains the return address.
   Label loop, entry, convert_hole, gc_required, only_change_map, done;
+  Register elements = t0;
+  Register length = t1;
+  Register array = t2;
+  Register array_end = array;
+
+  // target_map parameter can be clobbered.
+  Register scratch1 = target_map;
+  Register scratch2 = t5;
+  Register scratch3 = t3;
+
+  // Verify input registers don't conflict with locals.
+  DCHECK(!AreAliased(receiver, key, value, target_map,
+                     elements, length, array, scratch2));
 
   Register scratch = t6;
 
   if (mode == TRACK_ALLOCATION_SITE) {
-    __ JumpIfJSArrayHasAllocationMemento(a2, t0, fail);
+    __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
   }
 
   // Check for empty arrays, which only require a map transition and no changes
   // to the backing store.
-  __ lw(t0, FieldMemOperand(a2, JSObject::kElementsOffset));
+  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
   __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
-  __ Branch(&only_change_map, eq, at, Operand(t0));
+  __ Branch(&only_change_map, eq, at, Operand(elements));
 
   __ push(ra);
-  __ lw(t1, FieldMemOperand(t0, FixedArray::kLengthOffset));
-  // t0: source FixedArray
-  // t1: number of elements (smi-tagged)
+  __ lw(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
+  // elements: source FixedArray
+  // length: number of elements (smi-tagged)
 
   // Allocate new FixedDoubleArray.
-  __ sll(scratch, t1, 2);
+  __ sll(scratch, length, 2);
   __ Addu(scratch, scratch, FixedDoubleArray::kHeaderSize);
-  __ Allocate(scratch, t2, t3, t5, &gc_required, DOUBLE_ALIGNMENT);
-  // t2: destination FixedDoubleArray, not tagged as heap object
+  __ Allocate(scratch, array, t3, scratch2, &gc_required, DOUBLE_ALIGNMENT);
+  // array: destination FixedDoubleArray, not tagged as heap object
 
   // Set destination FixedDoubleArray's length and map.
-  __ LoadRoot(t5, Heap::kFixedDoubleArrayMapRootIndex);
-  __ sw(t1, MemOperand(t2, FixedDoubleArray::kLengthOffset));
-  __ sw(t5, MemOperand(t2, HeapObject::kMapOffset));
+  __ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
+  __ sw(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
   // Update receiver's map.
+  __ sw(scratch2, MemOperand(array, HeapObject::kMapOffset));
 
-  __ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
-  __ RecordWriteField(a2,
+  __ sw(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ RecordWriteField(receiver,
                       HeapObject::kMapOffset,
-                      a3,
-                      t5,
+                      target_map,
+                      scratch2,
                       kRAHasBeenSaved,
                       kDontSaveFPRegs,
                       OMIT_REMEMBERED_SET,
                       OMIT_SMI_CHECK);
   // Replace receiver's backing store with newly created FixedDoubleArray.
-  __ Addu(a3, t2, Operand(kHeapObjectTag));
-  __ sw(a3, FieldMemOperand(a2, JSObject::kElementsOffset));
-  __ RecordWriteField(a2,
+  __ Addu(scratch1, array, Operand(kHeapObjectTag));
+  __ sw(scratch1, FieldMemOperand(a2, JSObject::kElementsOffset));
+  __ RecordWriteField(receiver,
                       JSObject::kElementsOffset,
-                      a3,
-                      t5,
+                      scratch1,
+                      scratch2,
                       kRAHasBeenSaved,
                       kDontSaveFPRegs,
                       EMIT_REMEMBERED_SET,
@@ -744,26 +762,32 @@
 
 
   // Prepare for conversion loop.
-  __ Addu(a3, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ Addu(t3, t2, Operand(FixedDoubleArray::kHeaderSize));
-  __ sll(t2, t1, 2);
-  __ Addu(t2, t2, t3);
-  __ li(t0, Operand(kHoleNanLower32));
-  __ li(t1, Operand(kHoleNanUpper32));
-  // t0: kHoleNanLower32
-  // t1: kHoleNanUpper32
-  // t2: end of destination FixedDoubleArray, not tagged
-  // t3: begin of FixedDoubleArray element fields, not tagged
+  __ Addu(scratch1, elements,
+      Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ Addu(scratch3, array, Operand(FixedDoubleArray::kHeaderSize));
+  __ sll(at, length, 2);
+  __ Addu(array_end, scratch3, at);
 
-  __ Branch(&entry);
+  // Repurpose registers no longer in use.
+  Register hole_lower = elements;
+  Register hole_upper = length;
+
+  __ li(hole_lower, Operand(kHoleNanLower32));
+  // scratch1: begin of source FixedArray element fields, not tagged
+  // hole_lower: kHoleNanLower32
+  // hole_upper: kHoleNanUpper32
+  // array_end: end of destination FixedDoubleArray, not tagged
+  // scratch3: begin of FixedDoubleArray element fields, not tagged
+  __ Branch(USE_DELAY_SLOT, &entry);
+  __ li(hole_upper, Operand(kHoleNanUpper32));  // In delay slot.
 
   __ bind(&only_change_map);
-  __ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
-  __ RecordWriteField(a2,
+  __ sw(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ RecordWriteField(receiver,
                       HeapObject::kMapOffset,
-                      a3,
-                      t5,
-                      kRAHasNotBeenSaved,
+                      target_map,
+                      scratch2,
+                      kRAHasBeenSaved,
                       kDontSaveFPRegs,
                       OMIT_REMEMBERED_SET,
                       OMIT_SMI_CHECK);
@@ -771,130 +795,154 @@
 
   // Call into runtime if GC is required.
   __ bind(&gc_required);
-  __ pop(ra);
-  __ Branch(fail);
+  __ lw(ra, MemOperand(sp, 0));
+  __ Branch(USE_DELAY_SLOT, fail);
+  __ addiu(sp, sp, kPointerSize);  // In delay slot.
 
   // Convert and copy elements.
   __ bind(&loop);
-  __ lw(t5, MemOperand(a3));
-  __ Addu(a3, a3, kIntSize);
-  // t5: current element
-  __ UntagAndJumpIfNotSmi(t5, t5, &convert_hole);
+  __ lw(scratch2, MemOperand(scratch1));
+  __ Addu(scratch1, scratch1, kIntSize);
+  // scratch2: current element
+  __ UntagAndJumpIfNotSmi(scratch2, scratch2, &convert_hole);
 
   // Normal smi, convert to double and store.
-  __ mtc1(t5, f0);
+  __ mtc1(scratch2, f0);
   __ cvt_d_w(f0, f0);
-  __ sdc1(f0, MemOperand(t3));
-  __ Addu(t3, t3, kDoubleSize);
-
-  __ Branch(&entry);
+  __ sdc1(f0, MemOperand(scratch3));
+  __ Branch(USE_DELAY_SLOT, &entry);
+  __ addiu(scratch3, scratch3, kDoubleSize);  // In delay slot.
 
   // Hole found, store the-hole NaN.
   __ bind(&convert_hole);
   if (FLAG_debug_code) {
     // Restore a "smi-untagged" heap object.
-    __ SmiTag(t5);
-    __ Or(t5, t5, Operand(1));
+    __ SmiTag(scratch2);
+    __ Or(scratch2, scratch2, Operand(1));
     __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
-    __ Assert(eq, kObjectFoundInSmiOnlyArray, at, Operand(t5));
+    __ Assert(eq, kObjectFoundInSmiOnlyArray, at, Operand(scratch2));
   }
-  __ sw(t0, MemOperand(t3, Register::kMantissaOffset));  // mantissa
-  __ sw(t1, MemOperand(t3, Register::kExponentOffset));  // exponent
-  __ Addu(t3, t3, kDoubleSize);
-
+  // mantissa
+  __ sw(hole_lower, MemOperand(scratch3, Register::kMantissaOffset));
+  // exponent
+  __ sw(hole_upper, MemOperand(scratch3, Register::kExponentOffset));
   __ bind(&entry);
-  __ Branch(&loop, lt, t3, Operand(t2));
+  __ addiu(scratch3, scratch3, kDoubleSize);
 
-  __ pop(ra);
+  __ Branch(&loop, lt, scratch3, Operand(array_end));
+
   __ bind(&done);
+  __ pop(ra);
 }
 
 
 void ElementsTransitionGenerator::GenerateDoubleToObject(
-    MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
-  // ----------- S t a t e -------------
-  //  -- a0    : value
-  //  -- a1    : key
-  //  -- a2    : receiver
-  //  -- ra    : return address
-  //  -- a3    : target map, scratch for subsequent call
-  //  -- t0    : scratch (elements)
-  // -----------------------------------
+    MacroAssembler* masm,
+    Register receiver,
+    Register key,
+    Register value,
+    Register target_map,
+    AllocationSiteMode mode,
+    Label* fail) {
+  // Register ra contains the return address.
   Label entry, loop, convert_hole, gc_required, only_change_map;
+  Register elements = t0;
+  Register array = t2;
+  Register length = t1;
+  Register scratch = t5;
+
+  // Verify input registers don't conflict with locals.
+  DCHECK(!AreAliased(receiver, key, value, target_map,
+                     elements, array, length, scratch));
 
   if (mode == TRACK_ALLOCATION_SITE) {
-    __ JumpIfJSArrayHasAllocationMemento(a2, t0, fail);
+    __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
   }
 
   // Check for empty arrays, which only require a map transition and no changes
   // to the backing store.
-  __ lw(t0, FieldMemOperand(a2, JSObject::kElementsOffset));
+  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
   __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
-  __ Branch(&only_change_map, eq, at, Operand(t0));
+  __ Branch(&only_change_map, eq, at, Operand(elements));
 
-  __ MultiPush(a0.bit() | a1.bit() | a2.bit() | a3.bit() | ra.bit());
+  __ MultiPush(
+      value.bit() | key.bit() | receiver.bit() | target_map.bit() | ra.bit());
 
-  __ lw(t1, FieldMemOperand(t0, FixedArray::kLengthOffset));
-  // t0: source FixedArray
-  // t1: number of elements (smi-tagged)
+  __ lw(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
+  // elements: source FixedArray
+  // length: number of elements (smi-tagged)
 
   // Allocate new FixedArray.
-  __ sll(a0, t1, 1);
-  __ Addu(a0, a0, FixedDoubleArray::kHeaderSize);
-  __ Allocate(a0, t2, t3, t5, &gc_required, NO_ALLOCATION_FLAGS);
-  // t2: destination FixedArray, not tagged as heap object
+  // Re-use value and target_map registers, as they have been saved on the
+  // stack.
+  Register array_size = value;
+  Register allocate_scratch = target_map;
+  __ sll(array_size, length, 1);
+  __ Addu(array_size, array_size, FixedDoubleArray::kHeaderSize);
+  __ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
+              NO_ALLOCATION_FLAGS);
+  // array: destination FixedArray, not tagged as heap object
   // Set destination FixedDoubleArray's length and map.
-  __ LoadRoot(t5, Heap::kFixedArrayMapRootIndex);
-  __ sw(t1, MemOperand(t2, FixedDoubleArray::kLengthOffset));
-  __ sw(t5, MemOperand(t2, HeapObject::kMapOffset));
+  __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
+  __ sw(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
+  __ sw(scratch, MemOperand(array, HeapObject::kMapOffset));
 
   // Prepare for conversion loop.
-  __ Addu(t0, t0, Operand(
+  Register src_elements = elements;
+  Register dst_elements = target_map;
+  Register dst_end = length;
+  Register heap_number_map = scratch;
+  __ Addu(src_elements, src_elements, Operand(
         FixedDoubleArray::kHeaderSize - kHeapObjectTag
         + Register::kExponentOffset));
-  __ Addu(a3, t2, Operand(FixedArray::kHeaderSize));
-  __ Addu(t2, t2, Operand(kHeapObjectTag));
-  __ sll(t1, t1, 1);
-  __ Addu(t1, a3, t1);
-  __ LoadRoot(t3, Heap::kTheHoleValueRootIndex);
-  __ LoadRoot(t5, Heap::kHeapNumberMapRootIndex);
+  __ Addu(dst_elements, array, Operand(FixedArray::kHeaderSize));
+  __ Addu(array, array, Operand(kHeapObjectTag));
+  __ sll(dst_end, dst_end, 1);
+  __ Addu(dst_end, dst_elements, dst_end);
+  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
   // Using offsetted addresses.
-  // a3: begin of destination FixedArray element fields, not tagged
-  // t0: begin of source FixedDoubleArray element fields, not tagged,
-  //     points to the exponent
-  // t1: end of destination FixedArray, not tagged
-  // t2: destination FixedArray
-  // t3: the-hole pointer
-  // t5: heap number map
+  // dst_elements: begin of destination FixedArray element fields, not tagged
+  // src_elements: begin of source FixedDoubleArray element fields, not tagged,
+  //               points to the exponent
+  // dst_end: end of destination FixedArray, not tagged
+  // array: destination FixedArray
+  // heap_number_map: heap number map
   __ Branch(&entry);
 
   // Call into runtime if GC is required.
   __ bind(&gc_required);
-  __ MultiPop(a0.bit() | a1.bit() | a2.bit() | a3.bit() | ra.bit());
+  __ MultiPop(
+      value.bit() | key.bit() | receiver.bit() | target_map.bit() | ra.bit());
 
   __ Branch(fail);
 
   __ bind(&loop);
-  __ lw(a1, MemOperand(t0));
-  __ Addu(t0, t0, kDoubleSize);
-  // a1: current element's upper 32 bit
-  // t0: address of next element's upper 32 bit
+  Register upper_bits = key;
+  __ lw(upper_bits, MemOperand(src_elements));
+  __ Addu(src_elements, src_elements, kDoubleSize);
+  // upper_bits: current element's upper 32 bit
+  // src_elements: address of next element's upper 32 bit
   __ Branch(&convert_hole, eq, a1, Operand(kHoleNanUpper32));
 
   // Non-hole double, copy value into a heap number.
-  __ AllocateHeapNumber(a2, a0, t6, t5, &gc_required);
-  // a2: new heap number
-  // Load mantissa of current element, t0 point to exponent of next element.
-  __ lw(a0, MemOperand(t0, (Register::kMantissaOffset
+  Register heap_number = receiver;
+  Register scratch2 = value;
+  Register scratch3 = t6;
+  __ AllocateHeapNumber(heap_number, scratch2, scratch3, heap_number_map,
+                        &gc_required);
+  // heap_number: new heap number
+  // Load mantissa of current element, src_elements
+  // point to exponent of next element.
+  __ lw(scratch2, MemOperand(src_elements, (Register::kMantissaOffset
       - Register::kExponentOffset - kDoubleSize)));
-  __ sw(a0, FieldMemOperand(a2, HeapNumber::kMantissaOffset));
-  __ sw(a1, FieldMemOperand(a2, HeapNumber::kExponentOffset));
-  __ mov(a0, a3);
-  __ sw(a2, MemOperand(a3));
-  __ Addu(a3, a3, kIntSize);
-  __ RecordWrite(t2,
-                 a0,
-                 a2,
+  __ sw(scratch2, FieldMemOperand(heap_number, HeapNumber::kMantissaOffset));
+  __ sw(upper_bits, FieldMemOperand(heap_number, HeapNumber::kExponentOffset));
+  __ mov(scratch2, dst_elements);
+  __ sw(heap_number, MemOperand(dst_elements));
+  __ Addu(dst_elements, dst_elements, kIntSize);
+  __ RecordWrite(array,
+                 scratch2,
+                 heap_number,
                  kRAHasBeenSaved,
                  kDontSaveFPRegs,
                  EMIT_REMEMBERED_SET,
@@ -903,19 +951,20 @@
 
   // Replace the-hole NaN with the-hole pointer.
   __ bind(&convert_hole);
-  __ sw(t3, MemOperand(a3));
-  __ Addu(a3, a3, kIntSize);
+  __ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex);
+  __ sw(scratch2, MemOperand(dst_elements));
+  __ Addu(dst_elements, dst_elements, kIntSize);
 
   __ bind(&entry);
-  __ Branch(&loop, lt, a3, Operand(t1));
+  __ Branch(&loop, lt, dst_elements, Operand(dst_end));
 
-  __ MultiPop(a2.bit() | a3.bit() | a0.bit() | a1.bit());
+  __ MultiPop(receiver.bit() | target_map.bit() | value.bit() | key.bit());
   // Replace receiver's backing store with newly created and filled FixedArray.
-  __ sw(t2, FieldMemOperand(a2, JSObject::kElementsOffset));
-  __ RecordWriteField(a2,
+  __ sw(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  __ RecordWriteField(receiver,
                       JSObject::kElementsOffset,
-                      t2,
-                      t5,
+                      array,
+                      scratch,
                       kRAHasBeenSaved,
                       kDontSaveFPRegs,
                       EMIT_REMEMBERED_SET,
@@ -924,11 +973,11 @@
 
   __ bind(&only_change_map);
   // Update receiver's map.
-  __ sw(a3, FieldMemOperand(a2, HeapObject::kMapOffset));
-  __ RecordWriteField(a2,
+  __ sw(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ RecordWriteField(receiver,
                       HeapObject::kMapOffset,
-                      a3,
-                      t5,
+                      target_map,
+                      scratch,
                       kRAHasNotBeenSaved,
                       kDontSaveFPRegs,
                       OMIT_REMEMBERED_SET,
@@ -1010,18 +1059,18 @@
   __ Branch(call_runtime, ne, at, Operand(zero_reg));
   __ lw(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
 
-  Label ascii, done;
+  Label one_byte, done;
   __ bind(&check_encoding);
   STATIC_ASSERT(kTwoByteStringTag == 0);
   __ And(at, result, Operand(kStringEncodingMask));
-  __ Branch(&ascii, ne, at, Operand(zero_reg));
+  __ Branch(&one_byte, ne, at, Operand(zero_reg));
   // Two-byte string.
   __ sll(at, index, 1);
   __ Addu(at, string, at);
   __ lhu(result, MemOperand(at));
   __ jmp(&done);
-  __ bind(&ascii);
-  // Ascii string.
+  __ bind(&one_byte);
+  // One_byte string.
   __ Addu(at, string, index);
   __ lbu(result, MemOperand(at));
   __ bind(&done);
@@ -1041,16 +1090,17 @@
                                    Register temp1,
                                    Register temp2,
                                    Register temp3) {
-  ASSERT(!input.is(result));
-  ASSERT(!input.is(double_scratch1));
-  ASSERT(!input.is(double_scratch2));
-  ASSERT(!result.is(double_scratch1));
-  ASSERT(!result.is(double_scratch2));
-  ASSERT(!double_scratch1.is(double_scratch2));
-  ASSERT(!temp1.is(temp2));
-  ASSERT(!temp1.is(temp3));
-  ASSERT(!temp2.is(temp3));
-  ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
+  DCHECK(!input.is(result));
+  DCHECK(!input.is(double_scratch1));
+  DCHECK(!input.is(double_scratch2));
+  DCHECK(!result.is(double_scratch1));
+  DCHECK(!result.is(double_scratch2));
+  DCHECK(!double_scratch1.is(double_scratch2));
+  DCHECK(!temp1.is(temp2));
+  DCHECK(!temp1.is(temp3));
+  DCHECK(!temp2.is(temp3));
+  DCHECK(ExternalReference::math_exp_constants(0).address() != NULL);
+  DCHECK(!masm->serializer_enabled());  // External references not serializable.
 
   Label zero, infinity, done;
 
@@ -1079,7 +1129,7 @@
   __ mul_d(result, result, double_scratch2);
   __ sub_d(result, result, double_scratch1);
   // Mov 1 in double_scratch2 as math_exp_constants_array[8] == 1.
-  ASSERT(*reinterpret_cast<double*>
+  DCHECK(*reinterpret_cast<double*>
          (ExternalReference::math_exp_constants(8).address()) == 1);
   __ Move(double_scratch2, 1);
   __ add_d(result, result, double_scratch2);
@@ -1123,7 +1173,7 @@
 
 
 CodeAgingHelper::CodeAgingHelper() {
-  ASSERT(young_sequence_.length() == kNoCodeAgeSequenceLength);
+  DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
   // Since patcher is a large object, allocate it dynamically when needed,
   // to avoid overloading the stack in stress conditions.
   // DONT_FLUSH is used because the CodeAgingHelper is initialized early in
@@ -1149,7 +1199,7 @@
 
 bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
   bool result = isolate->code_aging_helper()->IsYoung(sequence);
-  ASSERT(result || isolate->code_aging_helper()->IsOld(sequence));
+  DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
   return result;
 }
 
@@ -1175,7 +1225,7 @@
   uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
   if (age == kNoAgeCodeAge) {
     isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
-    CPU::FlushICache(sequence, young_length);
+    CpuFeatures::FlushICache(sequence, young_length);
   } else {
     Code* stub = GetCodeAgeStub(isolate, age, parity);
     CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
diff --git a/src/mips/codegen-mips.h b/src/mips/codegen-mips.h
index 82a410e..b02ec4f 100644
--- a/src/mips/codegen-mips.h
+++ b/src/mips/codegen-mips.h
@@ -8,7 +8,7 @@
 
 
 #include "src/ast.h"
-#include "src/ic-inl.h"
+#include "src/macro-assembler.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/mips/constants-mips.cc b/src/mips/constants-mips.cc
index f149927..30e4b2e 100644
--- a/src/mips/constants-mips.cc
+++ b/src/mips/constants-mips.cc
@@ -278,6 +278,8 @@
     case COP1:    // Coprocessor instructions.
       switch (RsFieldRawNoAssert()) {
         case BC1:   // Branch on coprocessor condition.
+        case BC1EQZ:
+        case BC1NEZ:
           return kImmediateType;
         default:
           return kRegisterType;
@@ -292,6 +294,7 @@
     case BLEZ:
     case BGTZ:
     case ADDI:
+    case DADDI:
     case ADDIU:
     case SLTI:
     case SLTIU:
@@ -303,6 +306,8 @@
     case BNEL:
     case BLEZL:
     case BGTZL:
+    case BEQZC:
+    case BNEZC:
     case LB:
     case LH:
     case LWL:
diff --git a/src/mips/constants-mips.h b/src/mips/constants-mips.h
index fc64f7d..5ead110 100644
--- a/src/mips/constants-mips.h
+++ b/src/mips/constants-mips.h
@@ -4,7 +4,7 @@
 
 #ifndef  V8_MIPS_CONSTANTS_H_
 #define  V8_MIPS_CONSTANTS_H_
-
+#include "src/globals.h"
 // UNIMPLEMENTED_ macro for MIPS.
 #ifdef DEBUG
 #define UNIMPLEMENTED_MIPS()                                                  \
@@ -17,17 +17,25 @@
 #define UNSUPPORTED_MIPS() v8::internal::PrintF("Unsupported instruction.\n")
 
 enum ArchVariants {
-  kMips32r2,
-  kMips32r1,
+  kMips32r1 = v8::internal::MIPSr1,
+  kMips32r2 = v8::internal::MIPSr2,
+  kMips32r6 = v8::internal::MIPSr6,
   kLoongson
 };
 
 #ifdef _MIPS_ARCH_MIPS32R2
   static const ArchVariants kArchVariant = kMips32r2;
+#elif _MIPS_ARCH_MIPS32R6
+  static const ArchVariants kArchVariant = kMips32r6;
 #elif _MIPS_ARCH_LOONGSON
 // The loongson flag refers to the LOONGSON architectures based on MIPS-III,
 // which predates (and is a subset of) the mips32r2 and r1 architectures.
   static const ArchVariants kArchVariant = kLoongson;
+#elif _MIPS_ARCH_MIPS32RX
+// This flags referred to compatibility mode that creates universal code that
+// can run on any MIPS32 architecture revision. The dynamically generated code
+// by v8 is specialized for the MIPS host detected in runtime probing.
+  static const ArchVariants kArchVariant = kMips32r1;
 #else
   static const ArchVariants kArchVariant = kMips32r1;
 #endif
@@ -45,6 +53,22 @@
 #error Unknown endianness
 #endif
 
+enum FpuMode {
+  kFP32,
+  kFP64,
+  kFPXX
+};
+
+#if defined(FPU_MODE_FP32)
+  static const FpuMode kFpuMode = kFP32;
+#elif defined(FPU_MODE_FP64)
+  static const FpuMode kFpuMode = kFP64;
+#elif defined(FPU_MODE_FPXX)
+  static const FpuMode kFpuMode = kFPXX;
+#else
+  static const FpuMode kFpuMode = kFP32;
+#endif
+
 #if(defined(__mips_hard_float) && __mips_hard_float != 0)
 // Use floating-point coprocessor instructions. This flag is raised when
 // -mhard-float is passed to the compiler.
@@ -68,6 +92,26 @@
 #error Unknown endianness
 #endif
 
+#ifndef FPU_MODE_FPXX
+#define IsFp64Mode() \
+  (kFpuMode == kFP64)
+#else
+#define IsFp64Mode() \
+  (CpuFeatures::IsSupported(FP64FPU))
+#endif
+
+#ifndef _MIPS_ARCH_MIPS32RX
+#define IsMipsArchVariant(check) \
+  (kArchVariant == check)
+#else
+#define IsMipsArchVariant(check) \
+  (CpuFeatures::IsSupported(check))
+#endif
+
+
+#define __STDC_FORMAT_MACROS
+#include <inttypes.h>
+
 // Defines constants and accessor classes to assemble, disassemble and
 // simulate MIPS32 instructions.
 //
@@ -99,6 +143,8 @@
 const int kFCSRRegister = 31;
 const int kInvalidFPUControlRegister = -1;
 const uint32_t kFPUInvalidResult = static_cast<uint32_t>(1 << 31) - 1;
+const uint64_t kFPU64InvalidResult =
+    static_cast<uint64_t>(static_cast<uint64_t>(1) << 63) - 1;
 
 // FCSR constants.
 const uint32_t kFCSRInexactFlagBit = 2;
@@ -216,10 +262,14 @@
 
 const int kImm16Shift = 0;
 const int kImm16Bits  = 16;
+const int kImm21Shift = 0;
+const int kImm21Bits  = 21;
 const int kImm26Shift = 0;
 const int kImm26Bits  = 26;
 const int kImm28Shift = 0;
 const int kImm28Bits  = 28;
+const int kImm32Shift = 0;
+const int kImm32Bits  = 32;
 
 // In branches and jumps immediate fields point to words, not bytes,
 // and are therefore shifted by 2.
@@ -278,14 +328,16 @@
   ANDI      =   ((1 << 3) + 4) << kOpcodeShift,
   ORI       =   ((1 << 3) + 5) << kOpcodeShift,
   XORI      =   ((1 << 3) + 6) << kOpcodeShift,
-  LUI       =   ((1 << 3) + 7) << kOpcodeShift,
+  LUI       =   ((1 << 3) + 7) << kOpcodeShift,  // LUI/AUI family.
 
+  BEQC      =   ((2 << 3) + 0) << kOpcodeShift,
   COP1      =   ((2 << 3) + 1) << kOpcodeShift,  // Coprocessor 1 class.
   BEQL      =   ((2 << 3) + 4) << kOpcodeShift,
   BNEL      =   ((2 << 3) + 5) << kOpcodeShift,
   BLEZL     =   ((2 << 3) + 6) << kOpcodeShift,
   BGTZL     =   ((2 << 3) + 7) << kOpcodeShift,
 
+  DADDI     =   ((3 << 3) + 0) << kOpcodeShift,  // This is also BNEC.
   SPECIAL2  =   ((3 << 3) + 4) << kOpcodeShift,
   SPECIAL3  =   ((3 << 3) + 7) << kOpcodeShift,
 
@@ -304,11 +356,13 @@
 
   LWC1      =   ((6 << 3) + 1) << kOpcodeShift,
   LDC1      =   ((6 << 3) + 5) << kOpcodeShift,
+  BEQZC     =   ((6 << 3) + 6) << kOpcodeShift,
 
   PREF      =   ((6 << 3) + 3) << kOpcodeShift,
 
   SWC1      =   ((7 << 3) + 1) << kOpcodeShift,
   SDC1      =   ((7 << 3) + 5) << kOpcodeShift,
+  BNEZC     =   ((7 << 3) + 6) << kOpcodeShift,
 
   COP1X     =   ((1 << 4) + 3) << kOpcodeShift
 };
@@ -330,6 +384,8 @@
   BREAK     =   ((1 << 3) + 5),
 
   MFHI      =   ((2 << 3) + 0),
+  CLZ_R6    =   ((2 << 3) + 0),
+  CLO_R6    =   ((2 << 3) + 1),
   MFLO      =   ((2 << 3) + 2),
 
   MULT      =   ((3 << 3) + 0),
@@ -354,7 +410,21 @@
   TLT       =   ((6 << 3) + 2),
   TLTU      =   ((6 << 3) + 3),
   TEQ       =   ((6 << 3) + 4),
+  SELEQZ_S  =   ((6 << 3) + 5),
   TNE       =   ((6 << 3) + 6),
+  SELNEZ_S  =   ((6 << 3) + 7),
+
+  // Multiply integers in r6.
+  MUL_MUH   =   ((3 << 3) + 0),  // MUL, MUH.
+  MUL_MUH_U =   ((3 << 3) + 1),  // MUL_U, MUH_U.
+
+  MUL_OP    =   ((0 << 3) + 2),
+  MUH_OP    =   ((0 << 3) + 3),
+  DIV_OP    =   ((0 << 3) + 2),
+  MOD_OP    =   ((0 << 3) + 3),
+
+  DIV_MOD   =   ((3 << 3) + 2),
+  DIV_MOD_U =   ((3 << 3) + 3),
 
   // SPECIAL2 Encoding of Function Field.
   MUL       =   ((0 << 3) + 2),
@@ -370,6 +440,7 @@
   BGEZ      =   ((0 << 3) + 1) << 16,
   BLTZAL    =   ((2 << 3) + 0) << 16,
   BGEZAL    =   ((2 << 3) + 1) << 16,
+  BGEZALL   =   ((2 << 3) + 3) << 16,
 
   // COP1 Encoding of rs Field.
   MFC1      =   ((0 << 3) + 0) << 21,
@@ -414,6 +485,10 @@
   TRUNC_W_D =   ((1 << 3) + 5),
   CEIL_W_D  =   ((1 << 3) + 6),
   FLOOR_W_D =   ((1 << 3) + 7),
+  MIN       =   ((3 << 3) + 4),
+  MINA      =   ((3 << 3) + 5),
+  MAX       =   ((3 << 3) + 6),
+  MAXA      =   ((3 << 3) + 7),
   CVT_S_D   =   ((4 << 3) + 0),
   CVT_W_D   =   ((4 << 3) + 4),
   CVT_L_D   =   ((4 << 3) + 5),
@@ -430,6 +505,46 @@
   CVT_D_W   =   ((4 << 3) + 1),
   CVT_S_L   =   ((4 << 3) + 0),
   CVT_D_L   =   ((4 << 3) + 1),
+  BC1EQZ    =   ((2 << 2) + 1) << 21,
+  BC1NEZ    =   ((3 << 2) + 1) << 21,
+  // COP1 CMP positive predicates Bit 5..4 = 00.
+  CMP_AF    =   ((0 << 3) + 0),
+  CMP_UN    =   ((0 << 3) + 1),
+  CMP_EQ    =   ((0 << 3) + 2),
+  CMP_UEQ   =   ((0 << 3) + 3),
+  CMP_LT    =   ((0 << 3) + 4),
+  CMP_ULT   =   ((0 << 3) + 5),
+  CMP_LE    =   ((0 << 3) + 6),
+  CMP_ULE   =   ((0 << 3) + 7),
+  CMP_SAF   =   ((1 << 3) + 0),
+  CMP_SUN   =   ((1 << 3) + 1),
+  CMP_SEQ   =   ((1 << 3) + 2),
+  CMP_SUEQ  =   ((1 << 3) + 3),
+  CMP_SSLT  =   ((1 << 3) + 4),
+  CMP_SSULT =   ((1 << 3) + 5),
+  CMP_SLE   =   ((1 << 3) + 6),
+  CMP_SULE  =   ((1 << 3) + 7),
+  // COP1 CMP negative predicates Bit 5..4 = 01.
+  CMP_AT    =   ((2 << 3) + 0),  // Reserved, not implemented.
+  CMP_OR    =   ((2 << 3) + 1),
+  CMP_UNE   =   ((2 << 3) + 2),
+  CMP_NE    =   ((2 << 3) + 3),
+  CMP_UGE   =   ((2 << 3) + 4),  // Reserved, not implemented.
+  CMP_OGE   =   ((2 << 3) + 5),  // Reserved, not implemented.
+  CMP_UGT   =   ((2 << 3) + 6),  // Reserved, not implemented.
+  CMP_OGT   =   ((2 << 3) + 7),  // Reserved, not implemented.
+  CMP_SAT   =   ((3 << 3) + 0),  // Reserved, not implemented.
+  CMP_SOR   =   ((3 << 3) + 1),
+  CMP_SUNE  =   ((3 << 3) + 2),
+  CMP_SNE   =   ((3 << 3) + 3),
+  CMP_SUGE  =   ((3 << 3) + 4),  // Reserved, not implemented.
+  CMP_SOGE  =   ((3 << 3) + 5),  // Reserved, not implemented.
+  CMP_SUGT  =   ((3 << 3) + 6),  // Reserved, not implemented.
+  CMP_SOGT  =   ((3 << 3) + 7),  // Reserved, not implemented.
+
+  SEL       =   ((2 << 3) + 0),
+  SELEQZ_C  =   ((2 << 3) + 4),  // COP1 on FPR registers.
+  SELNEZ_C  =   ((2 << 3) + 7),  // COP1 on FPR registers.
   // COP1 Encoding of Function Field When rs=PS.
   // COP1X Encoding of Function Field.
   MADD_D    =   ((4 << 3) + 1),
@@ -499,7 +614,7 @@
 // no_condition value (-2). As long as tests for no_condition check
 // for condition < 0, this will work as expected.
 inline Condition NegateCondition(Condition cc) {
-  ASSERT(cc != cc_always);
+  DCHECK(cc != cc_always);
   return static_cast<Condition>(cc ^ 1);
 }
 
@@ -660,29 +775,29 @@
   }
 
   inline int RsValue() const {
-    ASSERT(InstructionType() == kRegisterType ||
+    DCHECK(InstructionType() == kRegisterType ||
            InstructionType() == kImmediateType);
     return Bits(kRsShift + kRsBits - 1, kRsShift);
   }
 
   inline int RtValue() const {
-    ASSERT(InstructionType() == kRegisterType ||
+    DCHECK(InstructionType() == kRegisterType ||
            InstructionType() == kImmediateType);
     return Bits(kRtShift + kRtBits - 1, kRtShift);
   }
 
   inline int RdValue() const {
-    ASSERT(InstructionType() == kRegisterType);
+    DCHECK(InstructionType() == kRegisterType);
     return Bits(kRdShift + kRdBits - 1, kRdShift);
   }
 
   inline int SaValue() const {
-    ASSERT(InstructionType() == kRegisterType);
+    DCHECK(InstructionType() == kRegisterType);
     return Bits(kSaShift + kSaBits - 1, kSaShift);
   }
 
   inline int FunctionValue() const {
-    ASSERT(InstructionType() == kRegisterType ||
+    DCHECK(InstructionType() == kRegisterType ||
            InstructionType() == kImmediateType);
     return Bits(kFunctionShift + kFunctionBits - 1, kFunctionShift);
   }
@@ -724,7 +839,7 @@
   }
 
   inline int RsFieldRaw() const {
-    ASSERT(InstructionType() == kRegisterType ||
+    DCHECK(InstructionType() == kRegisterType ||
            InstructionType() == kImmediateType);
     return InstructionBits() & kRsFieldMask;
   }
@@ -735,18 +850,18 @@
   }
 
   inline int RtFieldRaw() const {
-    ASSERT(InstructionType() == kRegisterType ||
+    DCHECK(InstructionType() == kRegisterType ||
            InstructionType() == kImmediateType);
     return InstructionBits() & kRtFieldMask;
   }
 
   inline int RdFieldRaw() const {
-    ASSERT(InstructionType() == kRegisterType);
+    DCHECK(InstructionType() == kRegisterType);
     return InstructionBits() & kRdFieldMask;
   }
 
   inline int SaFieldRaw() const {
-    ASSERT(InstructionType() == kRegisterType);
+    DCHECK(InstructionType() == kRegisterType);
     return InstructionBits() & kSaFieldMask;
   }
 
@@ -771,12 +886,17 @@
   }
 
   inline int32_t Imm16Value() const {
-    ASSERT(InstructionType() == kImmediateType);
+    DCHECK(InstructionType() == kImmediateType);
     return Bits(kImm16Shift + kImm16Bits - 1, kImm16Shift);
   }
 
+  inline int32_t Imm21Value() const {
+    DCHECK(InstructionType() == kImmediateType);
+    return Bits(kImm21Shift + kImm21Bits - 1, kImm21Shift);
+  }
+
   inline int32_t Imm26Value() const {
-    ASSERT(InstructionType() == kJumpType);
+    DCHECK(InstructionType() == kJumpType);
     return Bits(kImm26Shift + kImm26Bits - 1, kImm26Shift);
   }
 
diff --git a/src/mips/cpu-mips.cc b/src/mips/cpu-mips.cc
index ce47126..f2d5065 100644
--- a/src/mips/cpu-mips.cc
+++ b/src/mips/cpu-mips.cc
@@ -15,7 +15,7 @@
 
 #if V8_TARGET_ARCH_MIPS
 
-#include "src/cpu.h"
+#include "src/assembler.h"
 #include "src/macro-assembler.h"
 
 #include "src/simulator.h"  // For cache flushing.
@@ -24,7 +24,7 @@
 namespace internal {
 
 
-void CPU::FlushICache(void* start, size_t size) {
+void CpuFeatures::FlushICache(void* start, size_t size) {
   // Nothing to do, flushing no instructions.
   if (size == 0) {
     return;
diff --git a/src/mips/debug-mips.cc b/src/mips/debug-mips.cc
index fc05211..96a1467 100644
--- a/src/mips/debug-mips.cc
+++ b/src/mips/debug-mips.cc
@@ -30,7 +30,7 @@
   // nop (in branch delay slot)
 
   // Make sure this constant matches the number if instrucntions we emit.
-  ASSERT(Assembler::kJSReturnSequenceInstructions == 7);
+  DCHECK(Assembler::kJSReturnSequenceInstructions == 7);
   CodePatcher patcher(rinfo()->pc(), Assembler::kJSReturnSequenceInstructions);
   // li and Call pseudo-instructions emit two instructions each.
   patcher.masm()->li(v8::internal::t9, Operand(reinterpret_cast<int32_t>(
@@ -55,20 +55,20 @@
 // A debug break in the exit code is identified by the JS frame exit code
 // having been patched with li/call psuedo-instrunction (liu/ori/jalr).
 bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
-  ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
+  DCHECK(RelocInfo::IsJSReturn(rinfo->rmode()));
   return rinfo->IsPatchedReturnSequence();
 }
 
 
 bool BreakLocationIterator::IsDebugBreakAtSlot() {
-  ASSERT(IsDebugBreakSlot());
+  DCHECK(IsDebugBreakSlot());
   // Check whether the debug break slot instructions have been patched.
   return rinfo()->IsPatchedDebugBreakSlotSequence();
 }
 
 
 void BreakLocationIterator::SetDebugBreakAtSlot() {
-  ASSERT(IsDebugBreakSlot());
+  DCHECK(IsDebugBreakSlot());
   // Patch the code changing the debug break slot code from:
   //   nop(DEBUG_BREAK_NOP) - nop(1) is sll(zero_reg, zero_reg, 1)
   //   nop(DEBUG_BREAK_NOP)
@@ -85,7 +85,7 @@
 
 
 void BreakLocationIterator::ClearDebugBreakAtSlot() {
-  ASSERT(IsDebugBreakSlot());
+  DCHECK(IsDebugBreakSlot());
   rinfo()->PatchCode(original_rinfo()->pc(),
                      Assembler::kDebugBreakSlotInstructions);
 }
@@ -101,12 +101,22 @@
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
 
+    // Load padding words on stack.
+    __ li(at, Operand(Smi::FromInt(LiveEdit::kFramePaddingValue)));
+    __ Subu(sp, sp,
+            Operand(kPointerSize * LiveEdit::kFramePaddingInitialSize));
+    for (int i = LiveEdit::kFramePaddingInitialSize - 1; i >= 0; i--) {
+      __ sw(at, MemOperand(sp, kPointerSize * i));
+    }
+    __ li(at, Operand(Smi::FromInt(LiveEdit::kFramePaddingInitialSize)));
+    __ push(at);
+
     // Store the registers containing live values on the expression stack to
     // make sure that these are correctly updated during GC. Non object values
     // are stored as a smi causing it to be untouched by GC.
-    ASSERT((object_regs & ~kJSCallerSaved) == 0);
-    ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
-    ASSERT((object_regs & non_object_regs) == 0);
+    DCHECK((object_regs & ~kJSCallerSaved) == 0);
+    DCHECK((non_object_regs & ~kJSCallerSaved) == 0);
+    DCHECK((object_regs & non_object_regs) == 0);
     if ((object_regs | non_object_regs) != 0) {
       for (int i = 0; i < kNumJSCallerSaved; i++) {
         int r = JSCallerSavedCode(i);
@@ -147,6 +157,9 @@
       }
     }
 
+    // Don't bother removing padding bytes pushed on the stack
+    // as the frame is going to be restored right away.
+
     // Leave the internal frame.
   }
 
@@ -172,49 +185,35 @@
 
 
 void DebugCodegen::GenerateLoadICDebugBreak(MacroAssembler* masm) {
-  // Calling convention for IC load (from ic-mips.cc).
-  // ----------- S t a t e -------------
-  //  -- a2    : name
-  //  -- ra    : return address
-  //  -- a0    : receiver
-  //  -- [sp]  : receiver
-  // -----------------------------------
-  // Registers a0 and a2 contain objects that need to be pushed on the
-  // expression stack of the fake JS frame.
-  Generate_DebugBreakCallHelper(masm, a0.bit() | a2.bit(), 0);
+  Register receiver = LoadDescriptor::ReceiverRegister();
+  Register name = LoadDescriptor::NameRegister();
+  Generate_DebugBreakCallHelper(masm, receiver.bit() | name.bit(), 0);
 }
 
 
 void DebugCodegen::GenerateStoreICDebugBreak(MacroAssembler* masm) {
   // Calling convention for IC store (from ic-mips.cc).
-  // ----------- S t a t e -------------
-  //  -- a0    : value
-  //  -- a1    : receiver
-  //  -- a2    : name
-  //  -- ra    : return address
-  // -----------------------------------
-  // Registers a0, a1, and a2 contain objects that need to be pushed on the
-  // expression stack of the fake JS frame.
-  Generate_DebugBreakCallHelper(masm, a0.bit() | a1.bit() | a2.bit(), 0);
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  Register name = StoreDescriptor::NameRegister();
+  Register value = StoreDescriptor::ValueRegister();
+  Generate_DebugBreakCallHelper(
+      masm, receiver.bit() | name.bit() | value.bit(), 0);
 }
 
 
 void DebugCodegen::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
-  // ---------- S t a t e --------------
-  //  -- ra  : return address
-  //  -- a0  : key
-  //  -- a1  : receiver
-  Generate_DebugBreakCallHelper(masm, a0.bit() | a1.bit(), 0);
+  // Calling convention for keyed IC load (from ic-mips.cc).
+  GenerateLoadICDebugBreak(masm);
 }
 
 
 void DebugCodegen::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
-  // ---------- S t a t e --------------
-  //  -- a0     : value
-  //  -- a1     : key
-  //  -- a2     : receiver
-  //  -- ra     : return address
-  Generate_DebugBreakCallHelper(masm, a0.bit() | a1.bit() | a2.bit(), 0);
+  // Calling convention for IC keyed store call (from ic-mips.cc).
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  Register name = StoreDescriptor::NameRegister();
+  Register value = StoreDescriptor::ValueRegister();
+  Generate_DebugBreakCallHelper(
+      masm, receiver.bit() | name.bit() | value.bit(), 0);
 }
 
 
@@ -277,7 +276,7 @@
   for (int i = 0; i < Assembler::kDebugBreakSlotInstructions; i++) {
     __ nop(MacroAssembler::DEBUG_BREAK_NOP);
   }
-  ASSERT_EQ(Assembler::kDebugBreakSlotInstructions,
+  DCHECK_EQ(Assembler::kDebugBreakSlotInstructions,
             masm->InstructionsGeneratedSince(&check_codesize));
 }
 
@@ -290,16 +289,36 @@
 
 
 void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
-  masm->Abort(kLiveEditFrameDroppingIsNotSupportedOnMips);
+  __ Ret();
 }
 
 
 void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
-  masm->Abort(kLiveEditFrameDroppingIsNotSupportedOnMips);
+  ExternalReference restarter_frame_function_slot =
+      ExternalReference::debug_restarter_frame_function_pointer_address(
+          masm->isolate());
+  __ li(at, Operand(restarter_frame_function_slot));
+  __ sw(zero_reg, MemOperand(at, 0));
+
+  // We do not know our frame height, but set sp based on fp.
+  __ Subu(sp, fp, Operand(kPointerSize));
+
+  __ Pop(ra, fp, a1);  // Return address, Frame, Function.
+
+  // Load context from the function.
+  __ lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+
+  // Get function code.
+  __ lw(at, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+  __ lw(at, FieldMemOperand(at, SharedFunctionInfo::kCodeOffset));
+  __ Addu(t9, at, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+  // Re-run JSFunction, a1 is function, cp is context.
+  __ Jump(t9);
 }
 
 
-const bool LiveEdit::kFrameDropperSupported = false;
+const bool LiveEdit::kFrameDropperSupported = true;
 
 #undef __
 
diff --git a/src/mips/deoptimizer-mips.cc b/src/mips/deoptimizer-mips.cc
index 71c82fb..dd9832d 100644
--- a/src/mips/deoptimizer-mips.cc
+++ b/src/mips/deoptimizer-mips.cc
@@ -48,9 +48,6 @@
 
   DeoptimizationInputData* deopt_data =
       DeoptimizationInputData::cast(code->deoptimization_data());
-  SharedFunctionInfo* shared =
-      SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo());
-  shared->EvictFromOptimizedCodeMap(code, "deoptimized code");
 #ifdef DEBUG
   Address prev_call_address = NULL;
 #endif
@@ -63,13 +60,13 @@
     int call_size_in_bytes = MacroAssembler::CallSize(deopt_entry,
                                                       RelocInfo::NONE32);
     int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
-    ASSERT(call_size_in_bytes % Assembler::kInstrSize == 0);
-    ASSERT(call_size_in_bytes <= patch_size());
+    DCHECK(call_size_in_bytes % Assembler::kInstrSize == 0);
+    DCHECK(call_size_in_bytes <= patch_size());
     CodePatcher patcher(call_address, call_size_in_words);
     patcher.masm()->Call(deopt_entry, RelocInfo::NONE32);
-    ASSERT(prev_call_address == NULL ||
+    DCHECK(prev_call_address == NULL ||
            call_address >= prev_call_address + patch_size());
-    ASSERT(call_address + patch_size() <= code->instruction_end());
+    DCHECK(call_address + patch_size() <= code->instruction_end());
 
 #ifdef DEBUG
     prev_call_address = call_address;
@@ -100,8 +97,8 @@
 
 
 void Deoptimizer::SetPlatformCompiledStubRegisters(
-    FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) {
-  ApiFunction function(descriptor->deoptimization_handler_);
+    FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
+  ApiFunction function(descriptor->deoptimization_handler());
   ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
   intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
   int params = descriptor->GetHandlerParameterCount();
@@ -198,7 +195,7 @@
   __ lw(a1, MemOperand(v0, Deoptimizer::input_offset()));
 
   // Copy core registers into FrameDescription::registers_[kNumRegisters].
-  ASSERT(Register::kNumRegisters == kNumberOfRegisters);
+  DCHECK(Register::kNumRegisters == kNumberOfRegisters);
   for (int i = 0; i < kNumberOfRegisters; i++) {
     int offset = (i * kPointerSize) + FrameDescription::registers_offset();
     if ((saved_regs & (1 << i)) != 0) {
@@ -300,7 +297,7 @@
 
   // Technically restoring 'at' should work unless zero_reg is also restored
   // but it's safer to check for this.
-  ASSERT(!(at.bit() & restored_regs));
+  DCHECK(!(at.bit() & restored_regs));
   // Restore the registers from the last output frame.
   __ mov(at, a2);
   for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
@@ -320,39 +317,66 @@
 
 
 // Maximum size of a table entry generated below.
-const int Deoptimizer::table_entry_size_ = 7 * Assembler::kInstrSize;
+const int Deoptimizer::table_entry_size_ = 2 * Assembler::kInstrSize;
 
 void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
   Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
 
   // Create a sequence of deoptimization entries.
   // Note that registers are still live when jumping to an entry.
-  Label table_start;
+  Label table_start, done, done_special, trampoline_jump;
   __ bind(&table_start);
-  for (int i = 0; i < count(); i++) {
-    Label start;
-    __ bind(&start);
-    __ addiu(sp, sp, -1 * kPointerSize);
-    // Jump over the remaining deopt entries (including this one).
-    // This code is always reached by calling Jump, which puts the target (label
-    // start) into t9.
-    const int remaining_entries = (count() - i) * table_entry_size_;
-    __ Addu(t9, t9, remaining_entries);
-    // 'at' was clobbered so we can only load the current entry value here.
-    __ li(at, i);
-    __ jr(t9);  // Expose delay slot.
-    __ sw(at, MemOperand(sp, 0 * kPointerSize));  // In the delay slot.
+  int kMaxEntriesBranchReach = (1 << (kImm16Bits - 2))/
+     (table_entry_size_ /  Assembler::kInstrSize);
 
-    // Pad the rest of the code.
-    while (table_entry_size_ > (masm()->SizeOfCodeGeneratedSince(&start))) {
-      __ nop();
+  if (count() <= kMaxEntriesBranchReach) {
+    // Common case.
+    for (int i = 0; i < count(); i++) {
+      Label start;
+      __ bind(&start);
+      DCHECK(is_int16(i));
+      __ Branch(USE_DELAY_SLOT, &done);  // Expose delay slot.
+      __ li(at, i);  // In the delay slot.
+
+      DCHECK_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
     }
 
-    ASSERT_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
-  }
+    DCHECK_EQ(masm()->SizeOfCodeGeneratedSince(&table_start),
+        count() * table_entry_size_);
+    __ bind(&done);
+    __ Push(at);
+  } else {
+    // Uncommon case, the branch cannot reach.
+    // Create mini trampoline and adjust id constants to get proper value at
+    // the end of table.
+    for (int i = kMaxEntriesBranchReach; i > 1; i--) {
+      Label start;
+      __ bind(&start);
+      DCHECK(is_int16(i));
+      __ Branch(USE_DELAY_SLOT, &trampoline_jump);  // Expose delay slot.
+      __ li(at, - i);  // In the delay slot.
+      DCHECK_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
+    }
+    // Entry with id == kMaxEntriesBranchReach - 1.
+    __ bind(&trampoline_jump);
+    __ Branch(USE_DELAY_SLOT, &done_special);
+    __ li(at, -1);
 
-  ASSERT_EQ(masm()->SizeOfCodeGeneratedSince(&table_start),
-      count() * table_entry_size_);
+    for (int i = kMaxEntriesBranchReach ; i < count(); i++) {
+      Label start;
+      __ bind(&start);
+      DCHECK(is_int16(i));
+      __ Branch(USE_DELAY_SLOT, &done);  // Expose delay slot.
+      __ li(at, i);  // In the delay slot.
+    }
+
+    DCHECK_EQ(masm()->SizeOfCodeGeneratedSince(&table_start),
+        count() * table_entry_size_);
+    __ bind(&done_special);
+    __ addiu(at, at, kMaxEntriesBranchReach);
+    __ bind(&done);
+    __ Push(at);
+  }
 }
 
 
diff --git a/src/mips/disasm-mips.cc b/src/mips/disasm-mips.cc
index 82a4758..564627e 100644
--- a/src/mips/disasm-mips.cc
+++ b/src/mips/disasm-mips.cc
@@ -24,18 +24,18 @@
 
 
 #include <assert.h>
-#include <stdio.h>
 #include <stdarg.h>
+#include <stdio.h>
 #include <string.h>
 
 #include "src/v8.h"
 
 #if V8_TARGET_ARCH_MIPS
 
-#include "src/mips/constants-mips.h"
+#include "src/base/platform/platform.h"
 #include "src/disasm.h"
 #include "src/macro-assembler.h"
-#include "src/platform.h"
+#include "src/mips/constants-mips.h"
 
 namespace v8 {
 namespace internal {
@@ -86,6 +86,7 @@
   void PrintUImm16(Instruction* instr);
   void PrintSImm16(Instruction* instr);
   void PrintXImm16(Instruction* instr);
+  void PrintXImm21(Instruction* instr);
   void PrintXImm26(Instruction* instr);
   void PrintCode(Instruction* instr);   // For break and trap instructions.
   // Printing of instruction name.
@@ -246,6 +247,13 @@
 }
 
 
+// Print 21-bit immediate value.
+void Decoder::PrintXImm21(Instruction* instr) {
+  uint32_t imm = instr->Imm21Value();
+  out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm);
+}
+
+
 // Print 26-bit immediate value.
 void Decoder::PrintXImm26(Instruction* instr) {
   uint32_t imm = instr->Imm26Value() << kImmFieldShift;
@@ -289,7 +297,7 @@
 // Handle all register based formatting in this function to reduce the
 // complexity of FormatOption.
 int Decoder::FormatRegister(Instruction* instr, const char* format) {
-  ASSERT(format[0] == 'r');
+  DCHECK(format[0] == 'r');
   if (format[1] == 's') {  // 'rs: Rs register.
     int reg = instr->RsValue();
     PrintRegister(reg);
@@ -311,7 +319,7 @@
 // Handle all FPUregister based formatting in this function to reduce the
 // complexity of FormatOption.
 int Decoder::FormatFPURegister(Instruction* instr, const char* format) {
-  ASSERT(format[0] == 'f');
+  DCHECK(format[0] == 'f');
   if (format[1] == 's') {  // 'fs: fs register.
     int reg = instr->FsValue();
     PrintFPURegister(reg);
@@ -342,26 +350,30 @@
 int Decoder::FormatOption(Instruction* instr, const char* format) {
   switch (format[0]) {
     case 'c': {   // 'code for break or trap instructions.
-      ASSERT(STRING_STARTS_WITH(format, "code"));
+      DCHECK(STRING_STARTS_WITH(format, "code"));
       PrintCode(instr);
       return 4;
     }
     case 'i': {   // 'imm16u or 'imm26.
       if (format[3] == '1') {
-        ASSERT(STRING_STARTS_WITH(format, "imm16"));
+        DCHECK(STRING_STARTS_WITH(format, "imm16"));
         if (format[5] == 's') {
-          ASSERT(STRING_STARTS_WITH(format, "imm16s"));
+          DCHECK(STRING_STARTS_WITH(format, "imm16s"));
           PrintSImm16(instr);
         } else if (format[5] == 'u') {
-          ASSERT(STRING_STARTS_WITH(format, "imm16u"));
+          DCHECK(STRING_STARTS_WITH(format, "imm16u"));
           PrintSImm16(instr);
         } else {
-          ASSERT(STRING_STARTS_WITH(format, "imm16x"));
+          DCHECK(STRING_STARTS_WITH(format, "imm16x"));
           PrintXImm16(instr);
         }
         return 6;
-      } else {
-        ASSERT(STRING_STARTS_WITH(format, "imm26x"));
+      } else if (format[3] == '2' && format[4] == '1') {
+        DCHECK(STRING_STARTS_WITH(format, "imm21x"));
+        PrintXImm21(instr);
+        return 6;
+      } else if (format[3] == '2' && format[4] == '6') {
+        DCHECK(STRING_STARTS_WITH(format, "imm26x"));
         PrintXImm26(instr);
         return 6;
       }
@@ -375,22 +387,22 @@
     case 's': {   // 'sa.
       switch (format[1]) {
         case 'a': {
-          ASSERT(STRING_STARTS_WITH(format, "sa"));
+          DCHECK(STRING_STARTS_WITH(format, "sa"));
           PrintSa(instr);
           return 2;
         }
         case 'd': {
-          ASSERT(STRING_STARTS_WITH(format, "sd"));
+          DCHECK(STRING_STARTS_WITH(format, "sd"));
           PrintSd(instr);
           return 2;
         }
         case 's': {
           if (format[2] == '1') {
-              ASSERT(STRING_STARTS_WITH(format, "ss1"));  /* ext size */
+              DCHECK(STRING_STARTS_WITH(format, "ss1"));  /* ext size */
               PrintSs1(instr);
               return 3;
           } else {
-              ASSERT(STRING_STARTS_WITH(format, "ss2"));  /* ins size */
+              DCHECK(STRING_STARTS_WITH(format, "ss2"));  /* ins size */
               PrintSs2(instr);
               return 3;
           }
@@ -398,12 +410,12 @@
       }
     }
     case 'b': {   // 'bc - Special for bc1 cc field.
-      ASSERT(STRING_STARTS_WITH(format, "bc"));
+      DCHECK(STRING_STARTS_WITH(format, "bc"));
       PrintBc(instr);
       return 2;
     }
     case 'C': {   // 'Cc - Special for c.xx.d cc field.
-      ASSERT(STRING_STARTS_WITH(format, "Cc"));
+      DCHECK(STRING_STARTS_WITH(format, "Cc"));
       PrintCc(instr);
       return 2;
     }
@@ -492,25 +504,15 @@
             case CVT_W_D:
               Format(instr, "cvt.w.d 'fd, 'fs");
               break;
-            case CVT_L_D: {
-              if (kArchVariant == kMips32r2) {
-                Format(instr, "cvt.l.d 'fd, 'fs");
-              } else {
-                Unknown(instr);
-              }
+            case CVT_L_D:
+              Format(instr, "cvt.l.d 'fd, 'fs");
               break;
-            }
             case TRUNC_W_D:
               Format(instr, "trunc.w.d 'fd, 'fs");
               break;
-            case TRUNC_L_D: {
-              if (kArchVariant == kMips32r2) {
-                Format(instr, "trunc.l.d 'fd, 'fs");
-              } else {
-                Unknown(instr);
-              }
+            case TRUNC_L_D:
+              Format(instr, "trunc.l.d 'fd, 'fs");
               break;
-            }
             case ROUND_W_D:
               Format(instr, "round.w.d 'fd, 'fs");
               break;
@@ -569,22 +571,42 @@
           break;
         case L:
           switch (instr->FunctionFieldRaw()) {
-            case CVT_D_L: {
-              if (kArchVariant == kMips32r2) {
-                Format(instr, "cvt.d.l 'fd, 'fs");
-              } else {
-                Unknown(instr);
-              }
+            case CVT_D_L:
+              Format(instr, "cvt.d.l 'fd, 'fs");
               break;
-            }
-            case CVT_S_L: {
-              if (kArchVariant == kMips32r2) {
-                Format(instr, "cvt.s.l 'fd, 'fs");
-              } else {
-                Unknown(instr);
-              }
+            case CVT_S_L:
+              Format(instr, "cvt.s.l 'fd, 'fs");
               break;
-            }
+            case CMP_UN:
+              Format(instr, "cmp.un.d  'fd,  'fs, 'ft");
+              break;
+            case CMP_EQ:
+              Format(instr, "cmp.eq.d  'fd,  'fs, 'ft");
+              break;
+            case CMP_UEQ:
+              Format(instr, "cmp.ueq.d  'fd,  'fs, 'ft");
+              break;
+            case CMP_LT:
+              Format(instr, "cmp.lt.d  'fd,  'fs, 'ft");
+              break;
+            case CMP_ULT:
+              Format(instr, "cmp.ult.d  'fd,  'fs, 'ft");
+              break;
+            case CMP_LE:
+              Format(instr, "cmp.le.d  'fd,  'fs, 'ft");
+              break;
+            case CMP_ULE:
+              Format(instr, "cmp.ule.d  'fd,  'fs, 'ft");
+              break;
+            case CMP_OR:
+              Format(instr, "cmp.or.d  'fd,  'fs, 'ft");
+              break;
+            case CMP_UNE:
+              Format(instr, "cmp.une.d  'fd,  'fs, 'ft");
+              break;
+            case CMP_NE:
+              Format(instr, "cmp.ne.d  'fd,  'fs, 'ft");
+              break;
             default:
               UNREACHABLE();
           }
@@ -623,7 +645,7 @@
           if (instr->RsValue() == 0) {
             Format(instr, "srl     'rd, 'rt, 'sa");
           } else {
-            if (kArchVariant == kMips32r2) {
+            if (IsMipsArchVariant(kMips32r2)) {
               Format(instr, "rotr    'rd, 'rt, 'sa");
             } else {
               Unknown(instr);
@@ -640,7 +662,7 @@
           if (instr->SaValue() == 0) {
             Format(instr, "srlv    'rd, 'rt, 'rs");
           } else {
-            if (kArchVariant == kMips32r2) {
+            if (IsMipsArchVariant(kMips32r2)) {
               Format(instr, "rotrv   'rd, 'rt, 'rs");
             } else {
               Unknown(instr);
@@ -651,22 +673,64 @@
           Format(instr, "srav    'rd, 'rt, 'rs");
           break;
         case MFHI:
-          Format(instr, "mfhi    'rd");
+          if (instr->Bits(25, 16) == 0) {
+            Format(instr, "mfhi    'rd");
+          } else {
+            if ((instr->FunctionFieldRaw() == CLZ_R6)
+                && (instr->FdValue() == 1)) {
+              Format(instr, "clz     'rd, 'rs");
+            } else if ((instr->FunctionFieldRaw() == CLO_R6)
+                && (instr->FdValue() == 1)) {
+              Format(instr, "clo     'rd, 'rs");
+            }
+          }
           break;
         case MFLO:
           Format(instr, "mflo    'rd");
           break;
-        case MULT:
-          Format(instr, "mult    'rs, 'rt");
+        case MULT:  // @Mips32r6 == MUL_MUH.
+          if (!IsMipsArchVariant(kMips32r6)) {
+            Format(instr, "mult    'rs, 'rt");
+          } else {
+            if (instr->SaValue() == MUL_OP) {
+              Format(instr, "mul    'rd, 'rs, 'rt");
+            } else {
+              Format(instr, "muh    'rd, 'rs, 'rt");
+            }
+          }
           break;
-        case MULTU:
-          Format(instr, "multu   'rs, 'rt");
+        case MULTU:  // @Mips32r6 == MUL_MUH_U.
+          if (!IsMipsArchVariant(kMips32r6)) {
+            Format(instr, "multu   'rs, 'rt");
+          } else {
+            if (instr->SaValue() == MUL_OP) {
+              Format(instr, "mulu   'rd, 'rs, 'rt");
+            } else {
+              Format(instr, "muhu   'rd, 'rs, 'rt");
+            }
+          }
           break;
-        case DIV:
-          Format(instr, "div     'rs, 'rt");
+        case DIV:  // @Mips32r6 == DIV_MOD.
+          if (!IsMipsArchVariant(kMips32r6)) {
+            Format(instr, "div     'rs, 'rt");
+          } else {
+            if (instr->SaValue() == DIV_OP) {
+              Format(instr, "div    'rd, 'rs, 'rt");
+            } else {
+              Format(instr, "mod    'rd, 'rs, 'rt");
+            }
+          }
           break;
-        case DIVU:
-          Format(instr, "divu    'rs, 'rt");
+        case DIVU:  // @Mips32r6 == DIV_MOD_U.
+          if (!IsMipsArchVariant(kMips32r6)) {
+            Format(instr, "divu    'rs, 'rt");
+          } else {
+            if (instr->SaValue() == DIV_OP) {
+              Format(instr, "divu   'rd, 'rs, 'rt");
+            } else {
+              Format(instr, "modu   'rd, 'rs, 'rt");
+            }
+          }
           break;
         case ADD:
           Format(instr, "add     'rd, 'rs, 'rt");
@@ -738,6 +802,12 @@
             Format(instr, "movf    'rd, 'rs, 'bc");
           }
           break;
+        case SELEQZ_S:
+          Format(instr, "seleqz    'rd, 'rs, 'rt");
+          break;
+        case SELNEZ_S:
+          Format(instr, "selnez    'rd, 'rs, 'rt");
+          break;
         default:
           UNREACHABLE();
       }
@@ -748,7 +818,9 @@
           Format(instr, "mul     'rd, 'rs, 'rt");
           break;
         case CLZ:
-          Format(instr, "clz     'rd, 'rs");
+          if (!IsMipsArchVariant(kMips32r6)) {
+            Format(instr, "clz     'rd, 'rs");
+          }
           break;
         default:
           UNREACHABLE();
@@ -757,7 +829,7 @@
     case SPECIAL3:
       switch (instr->FunctionFieldRaw()) {
         case INS: {
-          if (kArchVariant == kMips32r2) {
+          if (IsMipsArchVariant(kMips32r2)) {
             Format(instr, "ins     'rt, 'rs, 'sa, 'ss2");
           } else {
             Unknown(instr);
@@ -765,7 +837,7 @@
           break;
         }
         case EXT: {
-          if (kArchVariant == kMips32r2) {
+          if (IsMipsArchVariant(kMips32r2)) {
             Format(instr, "ext     'rt, 'rs, 'sa, 'ss1");
           } else {
             Unknown(instr);
@@ -784,7 +856,6 @@
 
 void Decoder::DecodeTypeImmediate(Instruction* instr) {
   switch (instr->OpcodeFieldRaw()) {
-    // ------------- REGIMM class.
     case COP1:
       switch (instr->RsFieldRaw()) {
         case BC1:
@@ -794,10 +865,150 @@
             Format(instr, "bc1f    'bc, 'imm16u");
           }
           break;
+        case BC1EQZ:
+          Format(instr, "bc1eqz    'ft, 'imm16u");
+          break;
+        case BC1NEZ:
+          Format(instr, "bc1nez    'ft, 'imm16u");
+          break;
+        case W:  // CMP.S instruction.
+          switch (instr->FunctionValue()) {
+            case CMP_AF:
+              Format(instr, "cmp.af.S    'ft, 'fs, 'fd");
+              break;
+            case CMP_UN:
+              Format(instr, "cmp.un.S    'ft, 'fs, 'fd");
+              break;
+            case CMP_EQ:
+              Format(instr, "cmp.eq.S    'ft, 'fs, 'fd");
+              break;
+            case CMP_UEQ:
+              Format(instr, "cmp.ueq.S   'ft, 'fs, 'fd");
+              break;
+            case CMP_LT:
+              Format(instr, "cmp.lt.S    'ft, 'fs, 'fd");
+              break;
+            case CMP_ULT:
+              Format(instr, "cmp.ult.S   'ft, 'fs, 'fd");
+              break;
+            case CMP_LE:
+              Format(instr, "cmp.le.S    'ft, 'fs, 'fd");
+              break;
+            case CMP_ULE:
+              Format(instr, "cmp.ule.S   'ft, 'fs, 'fd");
+              break;
+            case CMP_OR:
+              Format(instr, "cmp.or.S    'ft, 'fs, 'fd");
+              break;
+            case CMP_UNE:
+              Format(instr, "cmp.une.S   'ft, 'fs, 'fd");
+              break;
+            case CMP_NE:
+              Format(instr, "cmp.ne.S    'ft, 'fs, 'fd");
+              break;
+            default:
+              UNREACHABLE();
+          }
+          break;
+        case L:  // CMP.D instruction.
+          switch (instr->FunctionValue()) {
+            case CMP_AF:
+              Format(instr, "cmp.af.D    'ft, 'fs, 'fd");
+              break;
+            case CMP_UN:
+              Format(instr, "cmp.un.D    'ft, 'fs, 'fd");
+              break;
+            case CMP_EQ:
+              Format(instr, "cmp.eq.D    'ft, 'fs, 'fd");
+              break;
+            case CMP_UEQ:
+              Format(instr, "cmp.ueq.D   'ft, 'fs, 'fd");
+              break;
+            case CMP_LT:
+              Format(instr, "cmp.lt.D    'ft, 'fs, 'fd");
+              break;
+            case CMP_ULT:
+              Format(instr, "cmp.ult.D   'ft, 'fs, 'fd");
+              break;
+            case CMP_LE:
+              Format(instr, "cmp.le.D    'ft, 'fs, 'fd");
+              break;
+            case CMP_ULE:
+              Format(instr, "cmp.ule.D   'ft, 'fs, 'fd");
+              break;
+            case CMP_OR:
+              Format(instr, "cmp.or.D    'ft, 'fs, 'fd");
+              break;
+            case CMP_UNE:
+              Format(instr, "cmp.une.D   'ft, 'fs, 'fd");
+              break;
+            case CMP_NE:
+              Format(instr, "cmp.ne.D    'ft, 'fs, 'fd");
+              break;
+            default:
+              UNREACHABLE();
+          }
+          break;
+        case S:
+          switch (instr->FunctionValue()) {
+            case SEL:
+              Format(instr, "sel.S    'ft, 'fs, 'fd");
+              break;
+            case SELEQZ_C:
+              Format(instr, "seleqz.S 'ft, 'fs, 'fd");
+              break;
+            case SELNEZ_C:
+              Format(instr, "selnez.S 'ft, 'fs, 'fd");
+              break;
+            case MIN:
+              Format(instr, "min.S    'ft, 'fs, 'fd");
+              break;
+            case MINA:
+              Format(instr, "mina.S   'ft, 'fs, 'fd");
+              break;
+            case MAX:
+              Format(instr, "max.S    'ft, 'fs, 'fd");
+              break;
+            case MAXA:
+              Format(instr, "maxa.S   'ft, 'fs, 'fd");
+              break;
+            default:
+              UNREACHABLE();
+          }
+          break;
+        case D:
+          switch (instr->FunctionValue()) {
+            case SEL:
+              Format(instr, "sel.D    'ft, 'fs, 'fd");
+              break;
+            case SELEQZ_C:
+              Format(instr, "seleqz.D 'ft, 'fs, 'fd");
+              break;
+            case SELNEZ_C:
+              Format(instr, "selnez.D 'ft, 'fs, 'fd");
+              break;
+            case MIN:
+              Format(instr, "min.D    'ft, 'fs, 'fd");
+              break;
+            case MINA:
+              Format(instr, "mina.D   'ft, 'fs, 'fd");
+              break;
+            case MAX:
+              Format(instr, "max.D    'ft, 'fs, 'fd");
+              break;
+            case MAXA:
+              Format(instr, "maxa.D   'ft, 'fs, 'fd");
+              break;
+            default:
+              UNREACHABLE();
+          }
+          break;
         default:
           UNREACHABLE();
       }
+
       break;  // Case COP1.
+    // ------------- REGIMM class.
     case REGIMM:
       switch (instr->RtFieldRaw()) {
         case BLTZ:
@@ -812,6 +1023,9 @@
         case BGEZAL:
           Format(instr, "bgezal  'rs, 'imm16u");
           break;
+        case BGEZALL:
+          Format(instr, "bgezall 'rs, 'imm16u");
+          break;
         default:
           UNREACHABLE();
       }
@@ -824,14 +1038,103 @@
       Format(instr, "bne     'rs, 'rt, 'imm16u");
       break;
     case BLEZ:
-      Format(instr, "blez    'rs, 'imm16u");
+      if ((instr->RtFieldRaw() == 0)
+          && (instr->RsFieldRaw() != 0)) {
+        Format(instr, "blez    'rs, 'imm16u");
+      } else if ((instr->RtFieldRaw() != instr->RsFieldRaw())
+          && (instr->RsFieldRaw() != 0) && (instr->RtFieldRaw() != 0)) {
+        Format(instr, "bgeuc    'rs, 'rt, 'imm16u");
+      } else if ((instr->RtFieldRaw() == instr->RsFieldRaw())
+          && (instr->RtFieldRaw() != 0)) {
+        Format(instr, "bgezalc  'rs, 'imm16u");
+      } else if ((instr->RsFieldRaw() == 0)
+          && (instr->RtFieldRaw() != 0)) {
+        Format(instr, "blezalc  'rs, 'imm16u");
+      } else {
+        UNREACHABLE();
+      }
       break;
     case BGTZ:
-      Format(instr, "bgtz    'rs, 'imm16u");
+      if ((instr->RtFieldRaw() == 0)
+          && (instr->RsFieldRaw() != 0)) {
+        Format(instr, "bgtz    'rs, 'imm16u");
+      } else if ((instr->RtFieldRaw() != instr->RsFieldRaw())
+          && (instr->RsFieldRaw() != 0) && (instr->RtFieldRaw() != 0)) {
+        Format(instr, "bltuc   'rs, 'rt, 'imm16u");
+      } else if ((instr->RtFieldRaw() == instr->RsFieldRaw())
+          && (instr->RtFieldRaw() != 0)) {
+        Format(instr, "bltzalc 'rt, 'imm16u");
+      } else if ((instr->RsFieldRaw() == 0)
+          && (instr->RtFieldRaw() != 0)) {
+        Format(instr, "bgtzalc 'rt, 'imm16u");
+      } else {
+        UNREACHABLE();
+      }
+      break;
+    case BLEZL:
+      if ((instr->RtFieldRaw() == instr->RsFieldRaw())
+          && (instr->RtFieldRaw() != 0)) {
+        Format(instr, "bgezc    'rt, 'imm16u");
+      } else if ((instr->RtFieldRaw() != instr->RsFieldRaw())
+          && (instr->RsFieldRaw() != 0) && (instr->RtFieldRaw() != 0)) {
+        Format(instr, "bgec     'rs, 'rt, 'imm16u");
+      } else if ((instr->RsFieldRaw() == 0)
+          && (instr->RtFieldRaw() != 0)) {
+        Format(instr, "blezc    'rt, 'imm16u");
+      } else {
+        UNREACHABLE();
+      }
+      break;
+    case BGTZL:
+      if ((instr->RtFieldRaw() == instr->RsFieldRaw())
+          && (instr->RtFieldRaw() != 0)) {
+        Format(instr, "bltzc    'rt, 'imm16u");
+      } else if ((instr->RtFieldRaw() != instr->RsFieldRaw())
+          && (instr->RsFieldRaw() != 0) && (instr->RtFieldRaw() != 0)) {
+        Format(instr, "bltc     'rs, 'rt, 'imm16u");
+      } else if ((instr->RsFieldRaw() == 0)
+          && (instr->RtFieldRaw() != 0)) {
+        Format(instr, "bgtzc    'rt, 'imm16u");
+      } else {
+        UNREACHABLE();
+      }
+      break;
+    case BEQZC:
+      if (instr->RsFieldRaw() != 0) {
+        Format(instr, "beqzc   'rs, 'imm21x");
+      }
+      break;
+    case BNEZC:
+      if (instr->RsFieldRaw() != 0) {
+        Format(instr, "bnezc   'rs, 'imm21x");
+      }
       break;
     // ------------- Arithmetic instructions.
     case ADDI:
-      Format(instr, "addi    'rt, 'rs, 'imm16s");
+      if (!IsMipsArchVariant(kMips32r6)) {
+        Format(instr, "addi    'rt, 'rs, 'imm16s");
+      } else {
+        // Check if BOVC or BEQC instruction.
+        if (instr->RsFieldRaw() >= instr->RtFieldRaw()) {
+          Format(instr, "bovc  'rs, 'rt, 'imm16s");
+        } else if (instr->RsFieldRaw() < instr->RtFieldRaw()) {
+          Format(instr, "beqc  'rs, 'rt, 'imm16s");
+        } else {
+          UNREACHABLE();
+        }
+      }
+      break;
+    case DADDI:
+      if (IsMipsArchVariant(kMips32r6)) {
+        // Check if BNVC or BNEC instruction.
+        if (instr->RsFieldRaw() >= instr->RtFieldRaw()) {
+          Format(instr, "bnvc  'rs, 'rt, 'imm16s");
+        } else if (instr->RsFieldRaw() < instr->RtFieldRaw()) {
+          Format(instr, "bnec  'rs, 'rt, 'imm16s");
+        } else {
+          UNREACHABLE();
+        }
+      }
       break;
     case ADDIU:
       Format(instr, "addiu   'rt, 'rs, 'imm16s");
@@ -852,7 +1155,15 @@
       Format(instr, "xori    'rt, 'rs, 'imm16x");
       break;
     case LUI:
-      Format(instr, "lui     'rt, 'imm16x");
+      if (!IsMipsArchVariant(kMips32r6)) {
+        Format(instr, "lui     'rt, 'imm16x");
+      } else {
+        if (instr->RsValue() != 0) {
+          Format(instr, "aui     'rt, 'imm16x");
+        } else {
+          Format(instr, "lui     'rt, 'imm16x");
+        }
+      }
       break;
     // ------------- Memory instructions.
     case LB:
@@ -907,6 +1218,7 @@
       Format(instr, "sdc1    'ft, 'imm16s('rs)");
       break;
     default:
+      printf("a 0x%x \n", instr->OpcodeFieldRaw());
       UNREACHABLE();
       break;
   }
diff --git a/src/mips/frames-mips.cc b/src/mips/frames-mips.cc
index 5da0080..b65f1bf 100644
--- a/src/mips/frames-mips.cc
+++ b/src/mips/frames-mips.cc
@@ -7,9 +7,9 @@
 #if V8_TARGET_ARCH_MIPS
 
 #include "src/assembler.h"
-#include "src/mips/assembler-mips.h"
-#include "src/mips/assembler-mips-inl.h"
 #include "src/frames.h"
+#include "src/mips/assembler-mips-inl.h"
+#include "src/mips/assembler-mips.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/mips/full-codegen-mips.cc b/src/mips/full-codegen-mips.cc
index 41acad3..1dbfe09 100644
--- a/src/mips/full-codegen-mips.cc
+++ b/src/mips/full-codegen-mips.cc
@@ -14,15 +14,16 @@
 // places where we have to move a previous result in v0 to a0 for the
 // next call: mov(a0, v0). This is not needed on the other architectures.
 
+#include "src/code-factory.h"
 #include "src/code-stubs.h"
 #include "src/codegen.h"
 #include "src/compiler.h"
 #include "src/debug.h"
 #include "src/full-codegen.h"
+#include "src/ic/ic.h"
 #include "src/isolate-inl.h"
 #include "src/parser.h"
 #include "src/scopes.h"
-#include "src/stub-cache.h"
 
 #include "src/mips/code-stubs-mips.h"
 #include "src/mips/macro-assembler-mips.h"
@@ -50,13 +51,13 @@
   }
 
   ~JumpPatchSite() {
-    ASSERT(patch_site_.is_bound() == info_emitted_);
+    DCHECK(patch_site_.is_bound() == info_emitted_);
   }
 
   // When initially emitting this ensure that a jump is always generated to skip
   // the inlined smi code.
   void EmitJumpIfNotSmi(Register reg, Label* target) {
-    ASSERT(!patch_site_.is_bound() && !info_emitted_);
+    DCHECK(!patch_site_.is_bound() && !info_emitted_);
     Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
     __ bind(&patch_site_);
     __ andi(at, reg, 0);
@@ -68,7 +69,7 @@
   // the inlined smi code.
   void EmitJumpIfSmi(Register reg, Label* target) {
     Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
-    ASSERT(!patch_site_.is_bound() && !info_emitted_);
+    DCHECK(!patch_site_.is_bound() && !info_emitted_);
     __ bind(&patch_site_);
     __ andi(at, reg, 0);
     // Never taken before patched.
@@ -141,7 +142,7 @@
     __ Branch(&ok, ne, a2, Operand(at));
 
     __ lw(a2, GlobalObjectOperand());
-    __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalReceiverOffset));
+    __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalProxyOffset));
 
     __ sw(a2, MemOperand(sp, receiver_offset));
 
@@ -160,7 +161,7 @@
   { Comment cmnt(masm_, "[ Allocate locals");
     int locals_count = info->scope()->num_stack_slots();
     // Generators allocate locals, if any, in context slots.
-    ASSERT(!info->function()->is_generator() || locals_count == 0);
+    DCHECK(!info->function()->is_generator() || locals_count == 0);
     if (locals_count > 0) {
       if (locals_count >= 128) {
         Label ok;
@@ -206,7 +207,7 @@
     if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
       __ push(a1);
       __ Push(info->scope()->GetScopeInfo());
-      __ CallRuntime(Runtime::kHiddenNewGlobalContext, 2);
+      __ CallRuntime(Runtime::kNewGlobalContext, 2);
     } else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
       FastNewContextStub stub(isolate(), heap_slots);
       __ CallStub(&stub);
@@ -214,7 +215,7 @@
       need_write_barrier = false;
     } else {
       __ push(a1);
-      __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
+      __ CallRuntime(Runtime::kNewFunctionContext, 1);
     }
     function_in_register = false;
     // Context is returned in v0. It replaces the context passed to us.
@@ -301,9 +302,9 @@
       // constant.
       if (scope()->is_function_scope() && scope()->function() != NULL) {
         VariableDeclaration* function = scope()->function();
-        ASSERT(function->proxy()->var()->mode() == CONST ||
+        DCHECK(function->proxy()->var()->mode() == CONST ||
                function->proxy()->var()->mode() == CONST_LEGACY);
-        ASSERT(function->proxy()->var()->location() != Variable::UNALLOCATED);
+        DCHECK(function->proxy()->var()->location() != Variable::UNALLOCATED);
         VisitVariableDeclaration(function);
       }
       VisitDeclarations(scope()->declarations());
@@ -322,9 +323,9 @@
     }
 
     { Comment cmnt(masm_, "[ Body");
-      ASSERT(loop_depth() == 0);
+      DCHECK(loop_depth() == 0);
       VisitStatements(function()->body());
-      ASSERT(loop_depth() == 0);
+      DCHECK(loop_depth() == 0);
     }
   }
 
@@ -338,7 +339,7 @@
 
 
 void FullCodeGenerator::ClearAccumulator() {
-  ASSERT(Smi::FromInt(0) == 0);
+  DCHECK(Smi::FromInt(0) == 0);
   __ mov(v0, zero_reg);
 }
 
@@ -373,7 +374,7 @@
   Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
   Comment cmnt(masm_, "[ Back edge bookkeeping");
   Label ok;
-  ASSERT(back_edge_target->is_bound());
+  DCHECK(back_edge_target->is_bound());
   int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
   int weight = Min(kMaxBackEdgeWeight,
                    Max(1, distance / kCodeSizeMultiplier));
@@ -452,7 +453,7 @@
 #ifdef DEBUG
     // Check that the size of the code used for returning is large enough
     // for the debugger's requirements.
-    ASSERT(Assembler::kJSReturnSequenceInstructions <=
+    DCHECK(Assembler::kJSReturnSequenceInstructions <=
            masm_->InstructionsGeneratedSince(&check_exit_codesize));
 #endif
   }
@@ -460,18 +461,18 @@
 
 
 void FullCodeGenerator::EffectContext::Plug(Variable* var) const {
-  ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+  DCHECK(var->IsStackAllocated() || var->IsContextSlot());
 }
 
 
 void FullCodeGenerator::AccumulatorValueContext::Plug(Variable* var) const {
-  ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+  DCHECK(var->IsStackAllocated() || var->IsContextSlot());
   codegen()->GetVar(result_register(), var);
 }
 
 
 void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
-  ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+  DCHECK(var->IsStackAllocated() || var->IsContextSlot());
   codegen()->GetVar(result_register(), var);
   __ push(result_register());
 }
@@ -542,7 +543,7 @@
                                           true,
                                           true_label_,
                                           false_label_);
-  ASSERT(!lit->IsUndetectableObject());  // There are no undetectable literals.
+  DCHECK(!lit->IsUndetectableObject());  // There are no undetectable literals.
   if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
     if (false_label_ != fall_through_) __ Branch(false_label_);
   } else if (lit->IsTrue() || lit->IsJSObject()) {
@@ -569,7 +570,7 @@
 
 void FullCodeGenerator::EffectContext::DropAndPlug(int count,
                                                    Register reg) const {
-  ASSERT(count > 0);
+  DCHECK(count > 0);
   __ Drop(count);
 }
 
@@ -577,7 +578,7 @@
 void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
     int count,
     Register reg) const {
-  ASSERT(count > 0);
+  DCHECK(count > 0);
   __ Drop(count);
   __ Move(result_register(), reg);
 }
@@ -585,7 +586,7 @@
 
 void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
                                                        Register reg) const {
-  ASSERT(count > 0);
+  DCHECK(count > 0);
   if (count > 1) __ Drop(count - 1);
   __ sw(reg, MemOperand(sp, 0));
 }
@@ -593,7 +594,7 @@
 
 void FullCodeGenerator::TestContext::DropAndPlug(int count,
                                                  Register reg) const {
-  ASSERT(count > 0);
+  DCHECK(count > 0);
   // For simplicity we always test the accumulator register.
   __ Drop(count);
   __ Move(result_register(), reg);
@@ -604,7 +605,7 @@
 
 void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
                                             Label* materialize_false) const {
-  ASSERT(materialize_true == materialize_false);
+  DCHECK(materialize_true == materialize_false);
   __ bind(materialize_true);
 }
 
@@ -640,8 +641,8 @@
 
 void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
                                           Label* materialize_false) const {
-  ASSERT(materialize_true == true_label_);
-  ASSERT(materialize_false == false_label_);
+  DCHECK(materialize_true == true_label_);
+  DCHECK(materialize_false == false_label_);
 }
 
 
@@ -707,7 +708,7 @@
 
 
 MemOperand FullCodeGenerator::StackOperand(Variable* var) {
-  ASSERT(var->IsStackAllocated());
+  DCHECK(var->IsStackAllocated());
   // Offset is negative because higher indexes are at lower addresses.
   int offset = -var->index() * kPointerSize;
   // Adjust by a (parameter or local) base offset.
@@ -721,7 +722,7 @@
 
 
 MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) {
-  ASSERT(var->IsContextSlot() || var->IsStackAllocated());
+  DCHECK(var->IsContextSlot() || var->IsStackAllocated());
   if (var->IsContextSlot()) {
     int context_chain_length = scope()->ContextChainLength(var->scope());
     __ LoadContext(scratch, context_chain_length);
@@ -743,10 +744,10 @@
                                Register src,
                                Register scratch0,
                                Register scratch1) {
-  ASSERT(var->IsContextSlot() || var->IsStackAllocated());
-  ASSERT(!scratch0.is(src));
-  ASSERT(!scratch0.is(scratch1));
-  ASSERT(!scratch1.is(src));
+  DCHECK(var->IsContextSlot() || var->IsStackAllocated());
+  DCHECK(!scratch0.is(src));
+  DCHECK(!scratch0.is(scratch1));
+  DCHECK(!scratch1.is(src));
   MemOperand location = VarOperand(var, scratch0);
   __ sw(src, location);
   // Emit the write barrier code if the location is in the heap.
@@ -784,7 +785,7 @@
 void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
   // The variable in the declaration always resides in the current function
   // context.
-  ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
+  DCHECK_EQ(0, scope()->ContextChainLength(variable->scope()));
   if (generate_debug_code_) {
     // Check that we're not inside a with or catch context.
     __ lw(a1, FieldMemOperand(cp, HeapObject::kMapOffset));
@@ -840,7 +841,7 @@
       Comment cmnt(masm_, "[ VariableDeclaration");
       __ li(a2, Operand(variable->name()));
       // Declaration nodes are always introduced in one of four modes.
-      ASSERT(IsDeclaredVariableMode(mode));
+      DCHECK(IsDeclaredVariableMode(mode));
       PropertyAttributes attr =
           IsImmutableVariableMode(mode) ? READ_ONLY : NONE;
       __ li(a1, Operand(Smi::FromInt(attr)));
@@ -852,11 +853,11 @@
         __ LoadRoot(a0, Heap::kTheHoleValueRootIndex);
         __ Push(cp, a2, a1, a0);
       } else {
-        ASSERT(Smi::FromInt(0) == 0);
+        DCHECK(Smi::FromInt(0) == 0);
         __ mov(a0, zero_reg);  // Smi::FromInt(0) indicates no initial value.
         __ Push(cp, a2, a1, a0);
       }
-      __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4);
+      __ CallRuntime(Runtime::kDeclareLookupSlot, 4);
       break;
     }
   }
@@ -871,7 +872,7 @@
     case Variable::UNALLOCATED: {
       globals_->Add(variable->name(), zone());
       Handle<SharedFunctionInfo> function =
-          Compiler::BuildFunctionInfo(declaration->fun(), script());
+          Compiler::BuildFunctionInfo(declaration->fun(), script(), info_);
       // Check for stack-overflow exception.
       if (function.is_null()) return SetStackOverflow();
       globals_->Add(function, zone());
@@ -912,7 +913,7 @@
       __ Push(cp, a2, a1);
       // Push initial value for function declaration.
       VisitForStackValue(declaration->fun());
-      __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4);
+      __ CallRuntime(Runtime::kDeclareLookupSlot, 4);
       break;
     }
   }
@@ -921,8 +922,8 @@
 
 void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
   Variable* variable = declaration->proxy()->var();
-  ASSERT(variable->location() == Variable::CONTEXT);
-  ASSERT(variable->interface()->IsFrozen());
+  DCHECK(variable->location() == Variable::CONTEXT);
+  DCHECK(variable->interface()->IsFrozen());
 
   Comment cmnt(masm_, "[ ModuleDeclaration");
   EmitDebugCheckDeclarationContext(variable);
@@ -984,7 +985,7 @@
   __ li(a1, Operand(pairs));
   __ li(a0, Operand(Smi::FromInt(DeclareGlobalsFlags())));
   __ Push(cp, a1, a0);
-  __ CallRuntime(Runtime::kHiddenDeclareGlobals, 3);
+  __ CallRuntime(Runtime::kDeclareGlobals, 3);
   // Return value is ignored.
 }
 
@@ -992,7 +993,7 @@
 void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
   // Call the runtime to declare the modules.
   __ Push(descriptions);
-  __ CallRuntime(Runtime::kHiddenDeclareModules, 1);
+  __ CallRuntime(Runtime::kDeclareModules, 1);
   // Return value is ignored.
 }
 
@@ -1047,7 +1048,8 @@
 
     // Record position before stub call for type feedback.
     SetSourcePosition(clause->position());
-    Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
+    Handle<Code> ic =
+        CodeFactory::CompareIC(isolate(), Token::EQ_STRICT).code();
     CallIC(ic, clause->CompareId());
     patch_site.EmitPatchInfo();
 
@@ -1178,7 +1180,7 @@
   __ bind(&fixed_array);
 
   __ li(a1, FeedbackVector());
-  __ li(a2, Operand(TypeFeedbackInfo::MegamorphicSentinel(isolate())));
+  __ li(a2, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
   __ sw(a2, FieldMemOperand(a1, FixedArray::OffsetOfElementAt(slot)));
 
   __ li(a1, Operand(Smi::FromInt(1)));  // Smi indicates slow check
@@ -1221,7 +1223,7 @@
 
   // For proxies, no filtering is done.
   // TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
-  ASSERT_EQ(Smi::FromInt(0), 0);
+  DCHECK_EQ(Smi::FromInt(0), 0);
   __ Branch(&update_each, eq, a2, Operand(zero_reg));
 
   // Convert the entry to a string or (smi) 0 if it isn't a property
@@ -1272,16 +1274,6 @@
   Iteration loop_statement(this, stmt);
   increment_loop_depth();
 
-  // var iterable = subject
-  VisitForAccumulatorValue(stmt->assign_iterable());
-  __ mov(a0, v0);
-
-  // As with for-in, skip the loop if the iterator is null or undefined.
-  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
-  __ Branch(loop_statement.break_label(), eq, a0, Operand(at));
-  __ LoadRoot(at, Heap::kNullValueRootIndex);
-  __ Branch(loop_statement.break_label(), eq, a0, Operand(at));
-
   // var iterator = iterable[Symbol.iterator]();
   VisitForEffect(stmt->assign_iterator());
 
@@ -1330,9 +1322,7 @@
       !pretenure &&
       scope()->is_function_scope() &&
       info->num_literals() == 0) {
-    FastNewClosureStub stub(isolate(),
-                            info->strict_mode(),
-                            info->is_generator());
+    FastNewClosureStub stub(isolate(), info->strict_mode(), info->kind());
     __ li(a2, Operand(info));
     __ CallStub(&stub);
   } else {
@@ -1340,7 +1330,7 @@
     __ LoadRoot(a1, pretenure ? Heap::kTrueValueRootIndex
                               : Heap::kFalseValueRootIndex);
     __ Push(cp, a0, a1);
-    __ CallRuntime(Runtime::kHiddenNewClosure, 3);
+    __ CallRuntime(Runtime::kNewClosure, 3);
   }
   context()->Plug(v0);
 }
@@ -1352,7 +1342,25 @@
 }
 
 
-void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
+void FullCodeGenerator::EmitLoadHomeObject(SuperReference* expr) {
+  Comment cnmt(masm_, "[ SuperReference ");
+
+  __ lw(LoadDescriptor::ReceiverRegister(),
+        MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+
+  Handle<Symbol> home_object_symbol(isolate()->heap()->home_object_symbol());
+  __ li(LoadDescriptor::NameRegister(), home_object_symbol);
+
+  CallLoadIC(NOT_CONTEXTUAL, expr->HomeObjectFeedbackId());
+
+  Label done;
+  __ Branch(&done, ne, v0, Operand(isolate()->factory()->undefined_value()));
+  __ CallRuntime(Runtime::kThrowNonMethodError, 0);
+  __ bind(&done);
+}
+
+
+void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
                                                       TypeofState typeof_state,
                                                       Label* slow) {
   Register current = cp;
@@ -1397,8 +1405,13 @@
     __ bind(&fast);
   }
 
-  __ lw(a0, GlobalObjectOperand());
-  __ li(a2, Operand(var->name()));
+  __ lw(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+  __ li(LoadDescriptor::NameRegister(), Operand(proxy->var()->name()));
+  if (FLAG_vector_ics) {
+    __ li(VectorLoadICDescriptor::SlotRegister(),
+          Operand(Smi::FromInt(proxy->VariableFeedbackSlot())));
+  }
+
   ContextualMode mode = (typeof_state == INSIDE_TYPEOF)
       ? NOT_CONTEXTUAL
       : CONTEXTUAL;
@@ -1408,7 +1421,7 @@
 
 MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
                                                                 Label* slow) {
-  ASSERT(var->IsContextSlot());
+  DCHECK(var->IsContextSlot());
   Register context = cp;
   Register next = a3;
   Register temp = t0;
@@ -1436,7 +1449,7 @@
 }
 
 
-void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
+void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
                                                   TypeofState typeof_state,
                                                   Label* slow,
                                                   Label* done) {
@@ -1445,8 +1458,9 @@
   // introducing variables.  In those cases, we do not want to
   // perform a runtime call for all variables in the scope
   // containing the eval.
+  Variable* var = proxy->var();
   if (var->mode() == DYNAMIC_GLOBAL) {
-    EmitLoadGlobalCheckExtensions(var, typeof_state, slow);
+    EmitLoadGlobalCheckExtensions(proxy, typeof_state, slow);
     __ Branch(done);
   } else if (var->mode() == DYNAMIC_LOCAL) {
     Variable* local = var->local_if_not_shadowed();
@@ -1462,7 +1476,7 @@
         __ Branch(done, ne, at, Operand(zero_reg));
         __ li(a0, Operand(var->name()));
         __ push(a0);
-        __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
+        __ CallRuntime(Runtime::kThrowReferenceError, 1);
       }
     }
     __ Branch(done);
@@ -1480,10 +1494,12 @@
   switch (var->location()) {
     case Variable::UNALLOCATED: {
       Comment cmnt(masm_, "[ Global variable");
-      // Use inline caching. Variable name is passed in a2 and the global
-      // object (receiver) in a0.
-      __ lw(a0, GlobalObjectOperand());
-      __ li(a2, Operand(var->name()));
+      __ lw(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+      __ li(LoadDescriptor::NameRegister(), Operand(var->name()));
+      if (FLAG_vector_ics) {
+        __ li(VectorLoadICDescriptor::SlotRegister(),
+              Operand(Smi::FromInt(proxy->VariableFeedbackSlot())));
+      }
       CallLoadIC(CONTEXTUAL);
       context()->Plug(v0);
       break;
@@ -1500,7 +1516,7 @@
         // always looked up dynamically, i.e. in that case
         //     var->location() == LOOKUP.
         // always holds.
-        ASSERT(var->scope() != NULL);
+        DCHECK(var->scope() != NULL);
 
         // Check if the binding really needs an initialization check. The check
         // can be skipped in the following situation: we have a LET or CONST
@@ -1523,8 +1539,8 @@
           skip_init_check = false;
         } else {
           // Check that we always have valid source position.
-          ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
-          ASSERT(proxy->position() != RelocInfo::kNoPosition);
+          DCHECK(var->initializer_position() != RelocInfo::kNoPosition);
+          DCHECK(proxy->position() != RelocInfo::kNoPosition);
           skip_init_check = var->mode() != CONST_LEGACY &&
               var->initializer_position() < proxy->position();
         }
@@ -1541,11 +1557,11 @@
             __ Branch(&done, ne, at, Operand(zero_reg));
             __ li(a0, Operand(var->name()));
             __ push(a0);
-            __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
+            __ CallRuntime(Runtime::kThrowReferenceError, 1);
             __ bind(&done);
           } else {
             // Uninitalized const bindings outside of harmony mode are unholed.
-            ASSERT(var->mode() == CONST_LEGACY);
+            DCHECK(var->mode() == CONST_LEGACY);
             __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
             __ Movz(v0, a0, at);  // Conditional move: Undefined if TheHole.
           }
@@ -1562,11 +1578,11 @@
       Label done, slow;
       // Generate code for loading from variables potentially shadowed
       // by eval-introduced variables.
-      EmitDynamicLookupFastCase(var, NOT_INSIDE_TYPEOF, &slow, &done);
+      EmitDynamicLookupFastCase(proxy, NOT_INSIDE_TYPEOF, &slow, &done);
       __ bind(&slow);
       __ li(a1, Operand(var->name()));
       __ Push(cp, a1);  // Context and name.
-      __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2);
+      __ CallRuntime(Runtime::kLoadLookupSlot, 2);
       __ bind(&done);
       context()->Plug(v0);
     }
@@ -1598,7 +1614,7 @@
   __ li(a2, Operand(expr->pattern()));
   __ li(a1, Operand(expr->flags()));
   __ Push(t0, a3, a2, a1);
-  __ CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4);
+  __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
   __ mov(t1, v0);
 
   __ bind(&materialized);
@@ -1610,7 +1626,7 @@
   __ bind(&runtime_allocate);
   __ li(a0, Operand(Smi::FromInt(size)));
   __ Push(t1, a0);
-  __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1);
+  __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
   __ pop(t1);
 
   __ bind(&allocated);
@@ -1655,7 +1671,7 @@
       masm()->serializer_enabled() || flags != ObjectLiteral::kFastElements ||
       properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
     __ Push(a3, a2, a1, a0);
-    __ CallRuntime(Runtime::kHiddenCreateObjectLiteral, 4);
+    __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
   } else {
     FastCloneShallowObjectStub stub(isolate(), properties_count);
     __ CallStub(&stub);
@@ -1685,15 +1701,16 @@
       case ObjectLiteral::Property::CONSTANT:
         UNREACHABLE();
       case ObjectLiteral::Property::MATERIALIZED_LITERAL:
-        ASSERT(!CompileTimeValue::IsCompileTimeValue(property->value()));
+        DCHECK(!CompileTimeValue::IsCompileTimeValue(property->value()));
         // Fall through.
       case ObjectLiteral::Property::COMPUTED:
         if (key->value()->IsInternalizedString()) {
           if (property->emit_store()) {
             VisitForAccumulatorValue(value);
-            __ mov(a0, result_register());
-            __ li(a2, Operand(key->value()));
-            __ lw(a1, MemOperand(sp));
+            __ mov(StoreDescriptor::ValueRegister(), result_register());
+            DCHECK(StoreDescriptor::ValueRegister().is(a0));
+            __ li(StoreDescriptor::NameRegister(), Operand(key->value()));
+            __ lw(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
             CallStoreIC(key->LiteralFeedbackId());
             PrepareForBailoutForId(key->id(), NO_REGISTERS);
           } else {
@@ -1707,7 +1724,7 @@
         VisitForStackValue(key);
         VisitForStackValue(value);
         if (property->emit_store()) {
-          __ li(a0, Operand(Smi::FromInt(NONE)));  // PropertyAttributes.
+          __ li(a0, Operand(Smi::FromInt(SLOPPY)));  // PropertyAttributes.
           __ push(a0);
           __ CallRuntime(Runtime::kSetProperty, 4);
         } else {
@@ -1746,11 +1763,11 @@
     EmitAccessor(it->second->setter);
     __ li(a0, Operand(Smi::FromInt(NONE)));
     __ push(a0);
-    __ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5);
+    __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
   }
 
   if (expr->has_function()) {
-    ASSERT(result_saved);
+    DCHECK(result_saved);
     __ lw(a0, MemOperand(sp));
     __ push(a0);
     __ CallRuntime(Runtime::kToFastProperties, 1);
@@ -1776,7 +1793,7 @@
   int length = subexprs->length();
 
   Handle<FixedArray> constant_elements = expr->constant_elements();
-  ASSERT_EQ(2, constant_elements->length());
+  DCHECK_EQ(2, constant_elements->length());
   ElementsKind constant_elements_kind =
       static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
   bool has_fast_elements =
@@ -1799,7 +1816,7 @@
   if (expr->depth() > 1 || length > JSObject::kInitialMaxFastElementArray) {
     __ li(a0, Operand(Smi::FromInt(flags)));
     __ Push(a3, a2, a1, a0);
-    __ CallRuntime(Runtime::kHiddenCreateArrayLiteral, 4);
+    __ CallRuntime(Runtime::kCreateArrayLiteral, 4);
   } else {
     FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
     __ CallStub(&stub);
@@ -1851,7 +1868,7 @@
 
 
 void FullCodeGenerator::VisitAssignment(Assignment* expr) {
-  ASSERT(expr->target()->IsValidReferenceExpression());
+  DCHECK(expr->target()->IsValidReferenceExpression());
 
   Comment cmnt(masm_, "[ Assignment");
 
@@ -1873,9 +1890,9 @@
       break;
     case NAMED_PROPERTY:
       if (expr->is_compound()) {
-        // We need the receiver both on the stack and in the accumulator.
-        VisitForAccumulatorValue(property->obj());
-        __ push(result_register());
+        // We need the receiver both on the stack and in the register.
+        VisitForStackValue(property->obj());
+        __ lw(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
       } else {
         VisitForStackValue(property->obj());
       }
@@ -1884,9 +1901,10 @@
       // We need the key and receiver on both the stack and in v0 and a1.
       if (expr->is_compound()) {
         VisitForStackValue(property->obj());
-        VisitForAccumulatorValue(property->key());
-        __ lw(a1, MemOperand(sp, 0));
-        __ push(v0);
+        VisitForStackValue(property->key());
+        __ lw(LoadDescriptor::ReceiverRegister(),
+              MemOperand(sp, 1 * kPointerSize));
+        __ lw(LoadDescriptor::NameRegister(), MemOperand(sp, 0));
       } else {
         VisitForStackValue(property->obj());
         VisitForStackValue(property->key());
@@ -1967,12 +1985,12 @@
   VisitForStackValue(expr->expression());
 
   switch (expr->yield_kind()) {
-    case Yield::SUSPEND:
+    case Yield::kSuspend:
       // Pop value from top-of-stack slot; box result into result register.
       EmitCreateIteratorResult(false);
       __ push(result_register());
       // Fall through.
-    case Yield::INITIAL: {
+    case Yield::kInitial: {
       Label suspend, continuation, post_runtime, resume;
 
       __ jmp(&suspend);
@@ -1982,7 +2000,7 @@
 
       __ bind(&suspend);
       VisitForAccumulatorValue(expr->generator_object());
-      ASSERT(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
+      DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
       __ li(a1, Operand(Smi::FromInt(continuation.pos())));
       __ sw(a1, FieldMemOperand(v0, JSGeneratorObject::kContinuationOffset));
       __ sw(cp, FieldMemOperand(v0, JSGeneratorObject::kContextOffset));
@@ -1992,7 +2010,7 @@
       __ Addu(a1, fp, Operand(StandardFrameConstants::kExpressionsOffset));
       __ Branch(&post_runtime, eq, sp, Operand(a1));
       __ push(v0);  // generator object
-      __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1);
+      __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
       __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
       __ bind(&post_runtime);
       __ pop(result_register());
@@ -2003,7 +2021,7 @@
       break;
     }
 
-    case Yield::FINAL: {
+    case Yield::kFinal: {
       VisitForAccumulatorValue(expr->generator_object());
       __ li(a1, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorClosed)));
       __ sw(a1, FieldMemOperand(result_register(),
@@ -2015,7 +2033,7 @@
       break;
     }
 
-    case Yield::DELEGATING: {
+    case Yield::kDelegating: {
       VisitForStackValue(expr->generator_object());
 
       // Initial stack layout is as follows:
@@ -2023,7 +2041,10 @@
       // [sp + 0 * kPointerSize] g
 
       Label l_catch, l_try, l_suspend, l_continuation, l_resume;
-      Label l_next, l_call, l_loop;
+      Label l_next, l_call;
+      Register load_receiver = LoadDescriptor::ReceiverRegister();
+      Register load_name = LoadDescriptor::NameRegister();
+
       // Initial send value is undefined.
       __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
       __ Branch(&l_next);
@@ -2032,9 +2053,9 @@
       __ bind(&l_catch);
       __ mov(a0, v0);
       handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos()));
-      __ LoadRoot(a2, Heap::kthrow_stringRootIndex);  // "throw"
-      __ lw(a3, MemOperand(sp, 1 * kPointerSize));    // iter
-      __ Push(a2, a3, a0);                            // "throw", iter, except
+      __ LoadRoot(load_name, Heap::kthrow_stringRootIndex);  // "throw"
+      __ lw(a3, MemOperand(sp, 1 * kPointerSize));           // iter
+      __ Push(load_name, a3, a0);                     // "throw", iter, except
       __ jmp(&l_call);
 
       // try { received = %yield result }
@@ -2053,14 +2074,14 @@
       const int generator_object_depth = kPointerSize + handler_size;
       __ lw(a0, MemOperand(sp, generator_object_depth));
       __ push(a0);                                       // g
-      ASSERT(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
+      DCHECK(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
       __ li(a1, Operand(Smi::FromInt(l_continuation.pos())));
       __ sw(a1, FieldMemOperand(a0, JSGeneratorObject::kContinuationOffset));
       __ sw(cp, FieldMemOperand(a0, JSGeneratorObject::kContextOffset));
       __ mov(a1, cp);
       __ RecordWriteField(a0, JSGeneratorObject::kContextOffset, a1, a2,
                           kRAHasBeenSaved, kDontSaveFPRegs);
-      __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1);
+      __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
       __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
       __ pop(v0);                                      // result
       EmitReturnSequence();
@@ -2070,15 +2091,20 @@
 
       // receiver = iter; f = 'next'; arg = received;
       __ bind(&l_next);
-      __ LoadRoot(a2, Heap::knext_stringRootIndex);    // "next"
-      __ lw(a3, MemOperand(sp, 1 * kPointerSize));     // iter
-      __ Push(a2, a3, a0);                             // "next", iter, received
+
+      __ LoadRoot(load_name, Heap::knext_stringRootIndex);  // "next"
+      __ lw(a3, MemOperand(sp, 1 * kPointerSize));          // iter
+      __ Push(load_name, a3, a0);                      // "next", iter, received
 
       // result = receiver[f](arg);
       __ bind(&l_call);
-      __ lw(a1, MemOperand(sp, kPointerSize));
-      __ lw(a0, MemOperand(sp, 2 * kPointerSize));
-      Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+      __ lw(load_receiver, MemOperand(sp, kPointerSize));
+      __ lw(load_name, MemOperand(sp, 2 * kPointerSize));
+      if (FLAG_vector_ics) {
+        __ li(VectorLoadICDescriptor::SlotRegister(),
+              Operand(Smi::FromInt(expr->KeyedLoadFeedbackSlot())));
+      }
+      Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
       CallIC(ic, TypeFeedbackId::None());
       __ mov(a0, v0);
       __ mov(a1, a0);
@@ -2090,21 +2116,29 @@
       __ Drop(1);  // The function is still on the stack; drop it.
 
       // if (!result.done) goto l_try;
-      __ bind(&l_loop);
-      __ mov(a0, v0);
-      __ push(a0);                                       // save result
-      __ LoadRoot(a2, Heap::kdone_stringRootIndex);      // "done"
-      CallLoadIC(NOT_CONTEXTUAL);                        // result.done in v0
+      __ Move(load_receiver, v0);
+
+      __ push(load_receiver);                               // save result
+      __ LoadRoot(load_name, Heap::kdone_stringRootIndex);  // "done"
+      if (FLAG_vector_ics) {
+        __ li(VectorLoadICDescriptor::SlotRegister(),
+              Operand(Smi::FromInt(expr->DoneFeedbackSlot())));
+      }
+      CallLoadIC(NOT_CONTEXTUAL);                           // v0=result.done
       __ mov(a0, v0);
       Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
       CallIC(bool_ic);
       __ Branch(&l_try, eq, v0, Operand(zero_reg));
 
       // result.value
-      __ pop(a0);                                        // result
-      __ LoadRoot(a2, Heap::kvalue_stringRootIndex);     // "value"
-      CallLoadIC(NOT_CONTEXTUAL);                        // result.value in v0
-      context()->DropAndPlug(2, v0);                     // drop iter and g
+      __ pop(load_receiver);                                 // result
+      __ LoadRoot(load_name, Heap::kvalue_stringRootIndex);  // "value"
+      if (FLAG_vector_ics) {
+        __ li(VectorLoadICDescriptor::SlotRegister(),
+              Operand(Smi::FromInt(expr->ValueFeedbackSlot())));
+      }
+      CallLoadIC(NOT_CONTEXTUAL);                            // v0=result.value
+      context()->DropAndPlug(2, v0);                         // drop iter and g
       break;
     }
   }
@@ -2115,7 +2149,7 @@
     Expression *value,
     JSGeneratorObject::ResumeMode resume_mode) {
   // The value stays in a0, and is ultimately read by the resumed generator, as
-  // if CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject) returned it. Or it
+  // if CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it
   // is read to throw the value when the resumed generator is already closed.
   // a1 will hold the generator object until the activation has been resumed.
   VisitForStackValue(generator);
@@ -2194,10 +2228,10 @@
   __ push(a2);
   __ Branch(&push_operand_holes);
   __ bind(&call_resume);
-  ASSERT(!result_register().is(a1));
+  DCHECK(!result_register().is(a1));
   __ Push(a1, result_register());
   __ Push(Smi::FromInt(resume_mode));
-  __ CallRuntime(Runtime::kHiddenResumeJSGeneratorObject, 3);
+  __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
   // Not reached: the runtime call returns elsewhere.
   __ stop("not-reached");
 
@@ -2212,14 +2246,14 @@
   } else {
     // Throw the provided value.
     __ push(a0);
-    __ CallRuntime(Runtime::kHiddenThrow, 1);
+    __ CallRuntime(Runtime::kThrow, 1);
   }
   __ jmp(&done);
 
   // Throw error if we attempt to operate on a running generator.
   __ bind(&wrong_state);
   __ push(a1);
-  __ CallRuntime(Runtime::kHiddenThrowGeneratorStateError, 1);
+  __ CallRuntime(Runtime::kThrowGeneratorStateError, 1);
 
   __ bind(&done);
   context()->Plug(result_register());
@@ -2230,23 +2264,26 @@
   Label gc_required;
   Label allocated;
 
-  Handle<Map> map(isolate()->native_context()->iterator_result_map());
+  const int instance_size = 5 * kPointerSize;
+  DCHECK_EQ(isolate()->native_context()->iterator_result_map()->instance_size(),
+            instance_size);
 
-  __ Allocate(map->instance_size(), v0, a2, a3, &gc_required, TAG_OBJECT);
+  __ Allocate(instance_size, v0, a2, a3, &gc_required, TAG_OBJECT);
   __ jmp(&allocated);
 
   __ bind(&gc_required);
-  __ Push(Smi::FromInt(map->instance_size()));
-  __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1);
+  __ Push(Smi::FromInt(instance_size));
+  __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
   __ lw(context_register(),
         MemOperand(fp, StandardFrameConstants::kContextOffset));
 
   __ bind(&allocated);
-  __ li(a1, Operand(map));
+  __ lw(a1, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
+  __ lw(a1, FieldMemOperand(a1, GlobalObject::kNativeContextOffset));
+  __ lw(a1, ContextOperand(a1, Context::ITERATOR_RESULT_MAP_INDEX));
   __ pop(a2);
   __ li(a3, Operand(isolate()->factory()->ToBoolean(done)));
   __ li(t0, Operand(isolate()->factory()->empty_fixed_array()));
-  ASSERT_EQ(map->instance_size(), 5 * kPointerSize);
   __ sw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
   __ sw(t0, FieldMemOperand(v0, JSObject::kPropertiesOffset));
   __ sw(t0, FieldMemOperand(v0, JSObject::kElementsOffset));
@@ -2265,19 +2302,43 @@
 void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
   SetSourcePosition(prop->position());
   Literal* key = prop->key()->AsLiteral();
-  __ mov(a0, result_register());
-  __ li(a2, Operand(key->value()));
-  // Call load IC. It has arguments receiver and property name a0 and a2.
-  CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId());
+
+  __ li(LoadDescriptor::NameRegister(), Operand(key->value()));
+  if (FLAG_vector_ics) {
+    __ li(VectorLoadICDescriptor::SlotRegister(),
+          Operand(Smi::FromInt(prop->PropertyFeedbackSlot())));
+    CallLoadIC(NOT_CONTEXTUAL);
+  } else {
+    CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId());
+  }
+}
+
+
+void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
+  SetSourcePosition(prop->position());
+  Literal* key = prop->key()->AsLiteral();
+  DCHECK(!key->value()->IsSmi());
+  DCHECK(prop->IsSuperAccess());
+
+  SuperReference* super_ref = prop->obj()->AsSuperReference();
+  EmitLoadHomeObject(super_ref);
+  __ Push(v0);
+  VisitForStackValue(super_ref->this_var());
+  __ Push(key->value());
+  __ CallRuntime(Runtime::kLoadFromSuper, 3);
 }
 
 
 void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
   SetSourcePosition(prop->position());
-  __ mov(a0, result_register());
-  // Call keyed load IC. It has arguments key and receiver in a0 and a1.
-  Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
-  CallIC(ic, prop->PropertyFeedbackId());
+  Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
+  if (FLAG_vector_ics) {
+    __ li(VectorLoadICDescriptor::SlotRegister(),
+          Operand(Smi::FromInt(prop->PropertyFeedbackSlot())));
+    CallIC(ic);
+  } else {
+    CallIC(ic, prop->PropertyFeedbackId());
+  }
 }
 
 
@@ -2304,8 +2365,8 @@
   patch_site.EmitJumpIfSmi(scratch1, &smi_case);
 
   __ bind(&stub_call);
-  BinaryOpICStub stub(isolate(), op, mode);
-  CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId());
+  Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op, mode).code();
+  CallIC(code, expr->BinaryOperationFeedbackId());
   patch_site.EmitPatchInfo();
   __ jmp(&done);
 
@@ -2346,16 +2407,13 @@
       break;
     case Token::MUL: {
       __ SmiUntag(scratch1, right);
-      __ Mult(left, scratch1);
-      __ mflo(scratch1);
-      __ mfhi(scratch2);
-      __ sra(scratch1, scratch1, 31);
+      __ Mul(scratch2, v0, left, scratch1);
+      __ sra(scratch1, v0, 31);
       __ Branch(&stub_call, ne, scratch1, Operand(scratch2));
-      __ mflo(v0);
       __ Branch(&done, ne, v0, Operand(zero_reg));
       __ Addu(scratch2, right, left);
       __ Branch(&stub_call, lt, scratch2, Operand(zero_reg));
-      ASSERT(Smi::FromInt(0) == 0);
+      DCHECK(Smi::FromInt(0) == 0);
       __ mov(v0, zero_reg);
       break;
     }
@@ -2382,16 +2440,16 @@
                                      OverwriteMode mode) {
   __ mov(a0, result_register());
   __ pop(a1);
-  BinaryOpICStub stub(isolate(), op, mode);
+  Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op, mode).code();
   JumpPatchSite patch_site(masm_);    // unbound, signals no inlined smi code.
-  CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId());
+  CallIC(code, expr->BinaryOperationFeedbackId());
   patch_site.EmitPatchInfo();
   context()->Plug(v0);
 }
 
 
 void FullCodeGenerator::EmitAssignment(Expression* expr) {
-  ASSERT(expr->IsValidReferenceExpression());
+  DCHECK(expr->IsValidReferenceExpression());
 
   // Left-hand side can only be a property, a global or a (parameter or local)
   // slot.
@@ -2414,9 +2472,10 @@
     case NAMED_PROPERTY: {
       __ push(result_register());  // Preserve value.
       VisitForAccumulatorValue(prop->obj());
-      __ mov(a1, result_register());
-      __ pop(a0);  // Restore value.
-      __ li(a2, Operand(prop->key()->AsLiteral()->value()));
+      __ mov(StoreDescriptor::ReceiverRegister(), result_register());
+      __ pop(StoreDescriptor::ValueRegister());  // Restore value.
+      __ li(StoreDescriptor::NameRegister(),
+            Operand(prop->key()->AsLiteral()->value()));
       CallStoreIC();
       break;
     }
@@ -2424,11 +2483,11 @@
       __ push(result_register());  // Preserve value.
       VisitForStackValue(prop->obj());
       VisitForAccumulatorValue(prop->key());
-      __ mov(a1, result_register());
-      __ Pop(a0, a2);  // a0 = restored value.
-      Handle<Code> ic = strict_mode() == SLOPPY
-        ? isolate()->builtins()->KeyedStoreIC_Initialize()
-        : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+      __ mov(StoreDescriptor::NameRegister(), result_register());
+      __ Pop(StoreDescriptor::ValueRegister(),
+             StoreDescriptor::ReceiverRegister());
+      Handle<Code> ic =
+          CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
       CallIC(ic);
       break;
     }
@@ -2450,32 +2509,23 @@
 }
 
 
-void FullCodeGenerator::EmitCallStoreContextSlot(
-    Handle<String> name, StrictMode strict_mode) {
-  __ li(a1, Operand(name));
-  __ li(a0, Operand(Smi::FromInt(strict_mode)));
-  __ Push(v0, cp, a1, a0);  // Value, context, name, strict mode.
-  __ CallRuntime(Runtime::kHiddenStoreContextSlot, 4);
-}
-
-
 void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
   if (var->IsUnallocated()) {
     // Global var, const, or let.
-    __ mov(a0, result_register());
-    __ li(a2, Operand(var->name()));
-    __ lw(a1, GlobalObjectOperand());
+    __ mov(StoreDescriptor::ValueRegister(), result_register());
+    __ li(StoreDescriptor::NameRegister(), Operand(var->name()));
+    __ lw(StoreDescriptor::ReceiverRegister(), GlobalObjectOperand());
     CallStoreIC();
 
   } else if (op == Token::INIT_CONST_LEGACY) {
     // Const initializers need a write barrier.
-    ASSERT(!var->IsParameter());  // No const parameters.
+    DCHECK(!var->IsParameter());  // No const parameters.
     if (var->IsLookupSlot()) {
       __ li(a0, Operand(var->name()));
       __ Push(v0, cp, a0);  // Context and name.
-      __ CallRuntime(Runtime::kHiddenInitializeConstContextSlot, 3);
+      __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3);
     } else {
-      ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+      DCHECK(var->IsStackAllocated() || var->IsContextSlot());
       Label skip;
       MemOperand location = VarOperand(var, a1);
       __ lw(a2, location);
@@ -2487,30 +2537,31 @@
 
   } else if (var->mode() == LET && op != Token::INIT_LET) {
     // Non-initializing assignment to let variable needs a write barrier.
-    if (var->IsLookupSlot()) {
-      EmitCallStoreContextSlot(var->name(), strict_mode());
-    } else {
-      ASSERT(var->IsStackAllocated() || var->IsContextSlot());
-      Label assign;
-      MemOperand location = VarOperand(var, a1);
-      __ lw(a3, location);
-      __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
-      __ Branch(&assign, ne, a3, Operand(t0));
-      __ li(a3, Operand(var->name()));
-      __ push(a3);
-      __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
-      // Perform the assignment.
-      __ bind(&assign);
-      EmitStoreToStackLocalOrContextSlot(var, location);
-    }
+    DCHECK(!var->IsLookupSlot());
+    DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+    Label assign;
+    MemOperand location = VarOperand(var, a1);
+    __ lw(a3, location);
+    __ LoadRoot(t0, Heap::kTheHoleValueRootIndex);
+    __ Branch(&assign, ne, a3, Operand(t0));
+    __ li(a3, Operand(var->name()));
+    __ push(a3);
+    __ CallRuntime(Runtime::kThrowReferenceError, 1);
+    // Perform the assignment.
+    __ bind(&assign);
+    EmitStoreToStackLocalOrContextSlot(var, location);
 
   } else if (!var->is_const_mode() || op == Token::INIT_CONST) {
-    // Assignment to var or initializing assignment to let/const
-    // in harmony mode.
     if (var->IsLookupSlot()) {
-      EmitCallStoreContextSlot(var->name(), strict_mode());
+      // Assignment to var.
+      __ li(a1, Operand(var->name()));
+      __ li(a0, Operand(Smi::FromInt(strict_mode())));
+      __ Push(v0, cp, a1, a0);  // Value, context, name, strict mode.
+      __ CallRuntime(Runtime::kStoreLookupSlot, 4);
     } else {
-      ASSERT((var->IsStackAllocated() || var->IsContextSlot()));
+      // Assignment to var or initializing assignment to let/const in harmony
+      // mode.
+      DCHECK((var->IsStackAllocated() || var->IsContextSlot()));
       MemOperand location = VarOperand(var, a1);
       if (generate_debug_code_ && op == Token::INIT_LET) {
         // Check for an uninitialized let binding.
@@ -2528,15 +2579,15 @@
 void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
   // Assignment to a property, using a named store IC.
   Property* prop = expr->target()->AsProperty();
-  ASSERT(prop != NULL);
-  ASSERT(prop->key()->IsLiteral());
+  DCHECK(prop != NULL);
+  DCHECK(prop->key()->IsLiteral());
 
   // Record source code position before IC call.
   SetSourcePosition(expr->position());
-  __ mov(a0, result_register());  // Load the value.
-  __ li(a2, Operand(prop->key()->AsLiteral()->value()));
-  __ pop(a1);
-
+  __ mov(StoreDescriptor::ValueRegister(), result_register());
+  __ li(StoreDescriptor::NameRegister(),
+        Operand(prop->key()->AsLiteral()->value()));
+  __ pop(StoreDescriptor::ReceiverRegister());
   CallStoreIC(expr->AssignmentFeedbackId());
 
   PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@@ -2554,12 +2605,11 @@
   // - a0 is the value,
   // - a1 is the key,
   // - a2 is the receiver.
-  __ mov(a0, result_register());
-  __ Pop(a2, a1);  // a1 = key.
+  __ mov(StoreDescriptor::ValueRegister(), result_register());
+  __ Pop(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister());
+  DCHECK(StoreDescriptor::ValueRegister().is(a0));
 
-  Handle<Code> ic = strict_mode() == SLOPPY
-      ? isolate()->builtins()->KeyedStoreIC_Initialize()
-      : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+  Handle<Code> ic = CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
   CallIC(ic, expr->AssignmentFeedbackId());
 
   PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@@ -2572,14 +2622,20 @@
   Expression* key = expr->key();
 
   if (key->IsPropertyName()) {
-    VisitForAccumulatorValue(expr->obj());
-    EmitNamedPropertyLoad(expr);
+    if (!expr->IsSuperAccess()) {
+      VisitForAccumulatorValue(expr->obj());
+      __ Move(LoadDescriptor::ReceiverRegister(), v0);
+      EmitNamedPropertyLoad(expr);
+    } else {
+      EmitNamedSuperPropertyLoad(expr);
+    }
     PrepareForBailoutForId(expr->LoadId(), TOS_REG);
     context()->Plug(v0);
   } else {
     VisitForStackValue(expr->obj());
     VisitForAccumulatorValue(expr->key());
-    __ pop(a1);
+    __ Move(LoadDescriptor::NameRegister(), v0);
+    __ pop(LoadDescriptor::ReceiverRegister());
     EmitKeyedPropertyLoad(expr);
     context()->Plug(v0);
   }
@@ -2597,12 +2653,11 @@
 void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
   Expression* callee = expr->expression();
 
-  CallIC::CallType call_type = callee->IsVariableProxy()
-      ? CallIC::FUNCTION
-      : CallIC::METHOD;
+  CallICState::CallType call_type =
+      callee->IsVariableProxy() ? CallICState::FUNCTION : CallICState::METHOD;
 
   // Get the target function.
-  if (call_type == CallIC::FUNCTION) {
+  if (call_type == CallICState::FUNCTION) {
     { StackValueContext context(this);
       EmitVariableLoad(callee->AsVariableProxy());
       PrepareForBailout(callee, NO_REGISTERS);
@@ -2612,8 +2667,9 @@
     __ Push(isolate()->factory()->undefined_value());
   } else {
     // Load the function from the receiver.
-    ASSERT(callee->IsProperty());
-    __ lw(v0, MemOperand(sp, 0));
+    DCHECK(callee->IsProperty());
+    DCHECK(!callee->AsProperty()->IsSuperAccess());
+    __ lw(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
     EmitNamedPropertyLoad(callee->AsProperty());
     PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
     // Push the target function under the receiver.
@@ -2626,6 +2682,44 @@
 }
 
 
+void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
+  Expression* callee = expr->expression();
+  DCHECK(callee->IsProperty());
+  Property* prop = callee->AsProperty();
+  DCHECK(prop->IsSuperAccess());
+
+  SetSourcePosition(prop->position());
+  Literal* key = prop->key()->AsLiteral();
+  DCHECK(!key->value()->IsSmi());
+  // Load the function from the receiver.
+  const Register scratch = a1;
+  SuperReference* super_ref = prop->obj()->AsSuperReference();
+  EmitLoadHomeObject(super_ref);
+  __ Push(v0);
+  VisitForAccumulatorValue(super_ref->this_var());
+  __ Push(v0);
+  __ lw(scratch, MemOperand(sp, kPointerSize));
+  __ Push(scratch, v0);
+  __ Push(key->value());
+
+  // Stack here:
+  //  - home_object
+  //  - this (receiver)
+  //  - home_object <-- LoadFromSuper will pop here and below.
+  //  - this (receiver)
+  //  - key
+  __ CallRuntime(Runtime::kLoadFromSuper, 3);
+
+  // Replace home_object with target function.
+  __ sw(v0, MemOperand(sp, kPointerSize));
+
+  // Stack here:
+  // - target function
+  // - this (receiver)
+  EmitCall(expr, CallICState::METHOD);
+}
+
+
 // Code common for calls using the IC.
 void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
                                                 Expression* key) {
@@ -2635,8 +2729,9 @@
   Expression* callee = expr->expression();
 
   // Load the function from the receiver.
-  ASSERT(callee->IsProperty());
-  __ lw(a1, MemOperand(sp, 0));
+  DCHECK(callee->IsProperty());
+  __ lw(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
+  __ Move(LoadDescriptor::NameRegister(), v0);
   EmitKeyedPropertyLoad(callee->AsProperty());
   PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
 
@@ -2645,11 +2740,11 @@
   __ push(at);
   __ sw(v0, MemOperand(sp, kPointerSize));
 
-  EmitCall(expr, CallIC::METHOD);
+  EmitCall(expr, CallICState::METHOD);
 }
 
 
-void FullCodeGenerator::EmitCall(Call* expr, CallIC::CallType call_type) {
+void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
   // Load the arguments.
   ZoneList<Expression*>* args = expr->arguments();
   int arg_count = args->length();
@@ -2677,13 +2772,16 @@
 
 
 void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
-  // t2: copy of the first argument or undefined if it doesn't exist.
+  // t3: copy of the first argument or undefined if it doesn't exist.
   if (arg_count > 0) {
-    __ lw(t2, MemOperand(sp, arg_count * kPointerSize));
+    __ lw(t3, MemOperand(sp, arg_count * kPointerSize));
   } else {
-    __ LoadRoot(t2, Heap::kUndefinedValueRootIndex);
+    __ LoadRoot(t3, Heap::kUndefinedValueRootIndex);
   }
 
+  // t2: the receiver of the enclosing function.
+  __ lw(t2, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+
   // t1: the receiver of the enclosing function.
   int receiver_offset = 2 + info_->scope()->num_parameters();
   __ lw(t1, MemOperand(fp, receiver_offset * kPointerSize));
@@ -2695,8 +2793,9 @@
   __ li(a1, Operand(Smi::FromInt(scope()->start_position())));
 
   // Do the runtime call.
+  __ Push(t3);
   __ Push(t2, t1, t0, a1);
-  __ CallRuntime(Runtime::kHiddenResolvePossiblyDirectEval, 5);
+  __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 6);
 }
 
 
@@ -2759,16 +2858,16 @@
     { PreservePositionScope scope(masm()->positions_recorder());
       // Generate code for loading from variables potentially shadowed
       // by eval-introduced variables.
-      EmitDynamicLookupFastCase(proxy->var(), NOT_INSIDE_TYPEOF, &slow, &done);
+      EmitDynamicLookupFastCase(proxy, NOT_INSIDE_TYPEOF, &slow, &done);
     }
 
     __ bind(&slow);
     // Call the runtime to find the function to call (returned in v0)
     // and the object holding it (returned in v1).
-    ASSERT(!context_register().is(a2));
+    DCHECK(!context_register().is(a2));
     __ li(a2, Operand(proxy->name()));
     __ Push(context_register(), a2);
-    __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2);
+    __ CallRuntime(Runtime::kLoadLookupSlot, 2);
     __ Push(v0, v1);  // Function, receiver.
 
     // If fast case code has been generated, emit code to push the
@@ -2792,16 +2891,23 @@
     EmitCall(expr);
   } else if (call_type == Call::PROPERTY_CALL) {
     Property* property = callee->AsProperty();
-    { PreservePositionScope scope(masm()->positions_recorder());
-      VisitForStackValue(property->obj());
-    }
-    if (property->key()->IsPropertyName()) {
-      EmitCallWithLoadIC(expr);
+    bool is_named_call = property->key()->IsPropertyName();
+    // super.x() is handled in EmitCallWithLoadIC.
+    if (property->IsSuperAccess() && is_named_call) {
+      EmitSuperCallWithLoadIC(expr);
     } else {
-      EmitKeyedCallWithLoadIC(expr, property->key());
+      {
+        PreservePositionScope scope(masm()->positions_recorder());
+        VisitForStackValue(property->obj());
+      }
+      if (is_named_call) {
+        EmitCallWithLoadIC(expr);
+      } else {
+        EmitKeyedCallWithLoadIC(expr, property->key());
+      }
     }
   } else {
-    ASSERT(call_type == Call::OTHER_CALL);
+    DCHECK(call_type == Call::OTHER_CALL);
     // Call to an arbitrary expression not handled specially above.
     { PreservePositionScope scope(masm()->positions_recorder());
       VisitForStackValue(callee);
@@ -2814,7 +2920,7 @@
 
 #ifdef DEBUG
   // RecordJSReturnSite should have been called.
-  ASSERT(expr->return_is_recorded_);
+  DCHECK(expr->return_is_recorded_);
 #endif
 }
 
@@ -2848,7 +2954,7 @@
   // Record call targets in unoptimized code.
   if (FLAG_pretenuring_call_new) {
     EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot());
-    ASSERT(expr->AllocationSiteFeedbackSlot() ==
+    DCHECK(expr->AllocationSiteFeedbackSlot() ==
            expr->CallNewFeedbackSlot() + 1);
   }
 
@@ -2864,7 +2970,7 @@
 
 void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -2885,7 +2991,7 @@
 
 void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -2906,7 +3012,7 @@
 
 void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -2937,7 +3043,7 @@
 
 void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -2960,7 +3066,7 @@
 
 void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -2985,7 +3091,7 @@
 void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
     CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -3072,7 +3178,7 @@
 
 void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -3095,7 +3201,7 @@
 
 void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -3125,7 +3231,7 @@
 
 void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -3148,7 +3254,7 @@
 
 void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -3169,7 +3275,7 @@
 
 
 void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
-  ASSERT(expr->arguments()->length() == 0);
+  DCHECK(expr->arguments()->length() == 0);
 
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
@@ -3201,7 +3307,7 @@
 
 void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 2);
+  DCHECK(args->length() == 2);
 
   // Load the two objects into registers and perform the comparison.
   VisitForStackValue(args->at(0));
@@ -3224,7 +3330,7 @@
 
 void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   // ArgumentsAccessStub expects the key in a1 and the formal
   // parameter count in a0.
@@ -3238,7 +3344,7 @@
 
 
 void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
-  ASSERT(expr->arguments()->length() == 0);
+  DCHECK(expr->arguments()->length() == 0);
   Label exit;
   // Get the number of formal parameters.
   __ li(v0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
@@ -3260,7 +3366,7 @@
 
 void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
   Label done, null, function, non_function_constructor;
 
   VisitForAccumulatorValue(args->at(0));
@@ -3299,7 +3405,7 @@
 
   // Functions have class 'Function'.
   __ bind(&function);
-  __ LoadRoot(v0, Heap::kfunction_class_stringRootIndex);
+  __ LoadRoot(v0, Heap::kFunction_stringRootIndex);
   __ jmp(&done);
 
   // Objects with a non-function constructor have class 'Object'.
@@ -3322,7 +3428,7 @@
   // Load the arguments on the stack and call the stub.
   SubStringStub stub(isolate());
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 3);
+  DCHECK(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
   VisitForStackValue(args->at(2));
@@ -3335,7 +3441,7 @@
   // Load the arguments on the stack and call the stub.
   RegExpExecStub stub(isolate());
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 4);
+  DCHECK(args->length() == 4);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
   VisitForStackValue(args->at(2));
@@ -3347,7 +3453,7 @@
 
 void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));  // Load the object.
 
@@ -3367,8 +3473,8 @@
 
 void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 2);
-  ASSERT_NE(NULL, args->at(1)->AsLiteral());
+  DCHECK(args->length() == 2);
+  DCHECK_NE(NULL, args->at(1)->AsLiteral());
   Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value()));
 
   VisitForAccumulatorValue(args->at(0));  // Load the object.
@@ -3406,7 +3512,7 @@
   }
 
   __ bind(&not_date_object);
-  __ CallRuntime(Runtime::kHiddenThrowNotDateError, 0);
+  __ CallRuntime(Runtime::kThrowNotDateError, 0);
   __ bind(&done);
   context()->Plug(v0);
 }
@@ -3414,15 +3520,15 @@
 
 void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT_EQ(3, args->length());
+  DCHECK_EQ(3, args->length());
 
   Register string = v0;
   Register index = a1;
   Register value = a2;
 
-  VisitForStackValue(args->at(1));  // index
-  VisitForStackValue(args->at(2));  // value
-  VisitForAccumulatorValue(args->at(0));  // string
+  VisitForStackValue(args->at(0));        // index
+  VisitForStackValue(args->at(1));        // value
+  VisitForAccumulatorValue(args->at(2));  // string
   __ Pop(index, value);
 
   if (FLAG_debug_code) {
@@ -3451,15 +3557,15 @@
 
 void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT_EQ(3, args->length());
+  DCHECK_EQ(3, args->length());
 
   Register string = v0;
   Register index = a1;
   Register value = a2;
 
-  VisitForStackValue(args->at(1));  // index
-  VisitForStackValue(args->at(2));  // value
-  VisitForAccumulatorValue(args->at(0));  // string
+  VisitForStackValue(args->at(0));        // index
+  VisitForStackValue(args->at(1));        // value
+  VisitForAccumulatorValue(args->at(2));  // string
   __ Pop(index, value);
 
   if (FLAG_debug_code) {
@@ -3489,7 +3595,7 @@
 void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
   // Load the arguments on the stack and call the runtime function.
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 2);
+  DCHECK(args->length() == 2);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
   MathPowStub stub(isolate(), MathPowStub::ON_STACK);
@@ -3500,7 +3606,7 @@
 
 void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 2);
+  DCHECK(args->length() == 2);
 
   VisitForStackValue(args->at(0));  // Load the object.
   VisitForAccumulatorValue(args->at(1));  // Load the value.
@@ -3529,7 +3635,7 @@
 
 void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT_EQ(args->length(), 1);
+  DCHECK_EQ(args->length(), 1);
 
   // Load the argument into a0 and call the stub.
   VisitForAccumulatorValue(args->at(0));
@@ -3543,7 +3649,7 @@
 
 void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -3562,7 +3668,7 @@
 
 void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 2);
+  DCHECK(args->length() == 2);
 
   VisitForStackValue(args->at(0));
   VisitForAccumulatorValue(args->at(1));
@@ -3609,7 +3715,7 @@
 
 void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 2);
+  DCHECK(args->length() == 2);
 
   VisitForStackValue(args->at(0));
   VisitForAccumulatorValue(args->at(1));
@@ -3658,7 +3764,7 @@
 
 void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT_EQ(2, args->length());
+  DCHECK_EQ(2, args->length());
   VisitForStackValue(args->at(0));
   VisitForAccumulatorValue(args->at(1));
 
@@ -3672,7 +3778,7 @@
 
 void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT_EQ(2, args->length());
+  DCHECK_EQ(2, args->length());
 
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
@@ -3685,7 +3791,7 @@
 
 void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() >= 2);
+  DCHECK(args->length() >= 2);
 
   int arg_count = args->length() - 2;  // 2 ~ receiver and function.
   for (int i = 0; i < arg_count + 1; i++) {
@@ -3718,7 +3824,7 @@
 void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
   RegExpConstructResultStub stub(isolate());
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 3);
+  DCHECK(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
   VisitForAccumulatorValue(args->at(2));
@@ -3732,9 +3838,9 @@
 
 void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT_EQ(2, args->length());
+  DCHECK_EQ(2, args->length());
 
-  ASSERT_NE(NULL, args->at(0)->AsLiteral());
+  DCHECK_NE(NULL, args->at(0)->AsLiteral());
   int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value();
 
   Handle<FixedArray> jsfunction_result_caches(
@@ -3777,7 +3883,7 @@
   __ bind(&not_found);
   // Call runtime to perform the lookup.
   __ Push(cache, key);
-  __ CallRuntime(Runtime::kHiddenGetFromCache, 2);
+  __ CallRuntime(Runtime::kGetFromCache, 2);
 
   __ bind(&done);
   context()->Plug(v0);
@@ -3807,7 +3913,7 @@
 
 void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
   VisitForAccumulatorValue(args->at(0));
 
   __ AssertString(v0);
@@ -3819,13 +3925,13 @@
 }
 
 
-void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
+void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
   Label bailout, done, one_char_separator, long_separator,
       non_trivial_array, not_size_one_array, loop,
       empty_separator_loop, one_char_separator_loop,
       one_char_separator_loop_entry, long_separator_loop;
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 2);
+  DCHECK(args->length() == 2);
   VisitForStackValue(args->at(1));
   VisitForAccumulatorValue(args->at(0));
 
@@ -3869,7 +3975,7 @@
   __ lw(elements, FieldMemOperand(array, JSArray::kElementsOffset));
   array = no_reg;  // End of array's live range.
 
-  // Check that all array elements are sequential ASCII strings, and
+  // Check that all array elements are sequential one-byte strings, and
   // accumulate the sum of their lengths, as a smi-encoded value.
   __ mov(string_length, zero_reg);
   __ Addu(element,
@@ -3885,8 +3991,8 @@
   //   element: Current array element.
   //   elements_end: Array end.
   if (generate_debug_code_) {
-    __ Assert(gt, kNoEmptyArraysHereInEmitFastAsciiArrayJoin,
-        array_length, Operand(zero_reg));
+    __ Assert(gt, kNoEmptyArraysHereInEmitFastOneByteArrayJoin, array_length,
+              Operand(zero_reg));
   }
   __ bind(&loop);
   __ lw(string, MemOperand(element));
@@ -3894,7 +4000,7 @@
   __ JumpIfSmi(string, &bailout);
   __ lw(scratch1, FieldMemOperand(string, HeapObject::kMapOffset));
   __ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
-  __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
+  __ JumpIfInstanceTypeIsNotSequentialOneByte(scratch1, scratch2, &bailout);
   __ lw(scratch1, FieldMemOperand(string, SeqOneByteString::kLengthOffset));
   __ AdduAndCheckForOverflow(string_length, string_length, scratch1, scratch3);
   __ BranchOnOverflow(&bailout, scratch3);
@@ -3913,23 +4019,21 @@
   //   string_length: Sum of string lengths (smi).
   //   elements: FixedArray of strings.
 
-  // Check that the separator is a flat ASCII string.
+  // Check that the separator is a flat one-byte string.
   __ JumpIfSmi(separator, &bailout);
   __ lw(scratch1, FieldMemOperand(separator, HeapObject::kMapOffset));
   __ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
-  __ JumpIfInstanceTypeIsNotSequentialAscii(scratch1, scratch2, &bailout);
+  __ JumpIfInstanceTypeIsNotSequentialOneByte(scratch1, scratch2, &bailout);
 
   // Add (separator length times array_length) - separator length to the
   // string_length to get the length of the result string. array_length is not
   // smi but the other values are, so the result is a smi.
   __ lw(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
   __ Subu(string_length, string_length, Operand(scratch1));
-  __ Mult(array_length, scratch1);
+  __ Mul(scratch3, scratch2, array_length, scratch1);
   // Check for smi overflow. No overflow if higher 33 bits of 64-bit result are
   // zero.
-  __ mfhi(scratch2);
-  __ Branch(&bailout, ne, scratch2, Operand(zero_reg));
-  __ mflo(scratch2);
+  __ Branch(&bailout, ne, scratch3, Operand(zero_reg));
   __ And(scratch3, scratch2, Operand(0x80000000));
   __ Branch(&bailout, ne, scratch3, Operand(zero_reg));
   __ AdduAndCheckForOverflow(string_length, string_length, scratch2, scratch3);
@@ -3947,12 +4051,8 @@
   //   separator: Separator string
   //   string_length: Length of result string (not smi)
   //   array_length: Length of the array.
-  __ AllocateAsciiString(result,
-                         string_length,
-                         scratch1,
-                         scratch2,
-                         elements_end,
-                         &bailout);
+  __ AllocateOneByteString(result, string_length, scratch1, scratch2,
+                           elements_end, &bailout);
   // Prepare for looping. Set up elements_end to end of the array. Set
   // result_pos to the position of the result where to write the first
   // character.
@@ -3986,12 +4086,12 @@
   __ CopyBytes(string, result_pos, string_length, scratch1);
   // End while (element < elements_end).
   __ Branch(&empty_separator_loop, lt, element, Operand(elements_end));
-  ASSERT(result.is(v0));
+  DCHECK(result.is(v0));
   __ Branch(&done);
 
   // One-character separator case.
   __ bind(&one_char_separator);
-  // Replace separator with its ASCII character value.
+  // Replace separator with its one-byte character value.
   __ lbu(separator, FieldMemOperand(separator, SeqOneByteString::kHeaderSize));
   // Jump into the loop after the code that copies the separator, so the first
   // element is not preceded by a separator.
@@ -4002,7 +4102,7 @@
   //   result_pos: the position to which we are currently copying characters.
   //   element: Current array element.
   //   elements_end: Array end.
-  //   separator: Single separator ASCII char (in lower byte).
+  //   separator: Single separator one-byte char (in lower byte).
 
   // Copy the separator character to the result.
   __ sb(separator, MemOperand(result_pos));
@@ -4018,7 +4118,7 @@
   __ CopyBytes(string, result_pos, string_length, scratch1);
   // End while (element < elements_end).
   __ Branch(&one_char_separator_loop, lt, element, Operand(elements_end));
-  ASSERT(result.is(v0));
+  DCHECK(result.is(v0));
   __ Branch(&done);
 
   // Long separator case (separator is more than one character). Entry is at the
@@ -4047,7 +4147,7 @@
   __ CopyBytes(string, result_pos, string_length, scratch1);
   // End while (element < elements_end).
   __ Branch(&long_separator_loop, lt, element, Operand(elements_end));
-  ASSERT(result.is(v0));
+  DCHECK(result.is(v0));
   __ Branch(&done);
 
   __ bind(&bailout);
@@ -4057,6 +4157,17 @@
 }
 
 
+void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
+  DCHECK(expr->arguments()->length() == 0);
+  ExternalReference debug_is_active =
+      ExternalReference::debug_is_active_address(isolate());
+  __ li(at, Operand(debug_is_active));
+  __ lb(v0, MemOperand(at));
+  __ SmiTag(v0);
+  context()->Plug(v0);
+}
+
+
 void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
   if (expr->function() != NULL &&
       expr->function()->intrinsic_type == Runtime::INLINE) {
@@ -4071,12 +4182,20 @@
 
   if (expr->is_jsruntime()) {
     // Push the builtins object as the receiver.
-    __ lw(a0, GlobalObjectOperand());
-    __ lw(a0, FieldMemOperand(a0, GlobalObject::kBuiltinsOffset));
-    __ push(a0);
+    Register receiver = LoadDescriptor::ReceiverRegister();
+    __ lw(receiver, GlobalObjectOperand());
+    __ lw(receiver, FieldMemOperand(receiver, GlobalObject::kBuiltinsOffset));
+    __ push(receiver);
+
     // Load the function from the receiver.
-    __ li(a2, Operand(expr->name()));
-    CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId());
+    __ li(LoadDescriptor::NameRegister(), Operand(expr->name()));
+    if (FLAG_vector_ics) {
+      __ li(VectorLoadICDescriptor::SlotRegister(),
+            Operand(Smi::FromInt(expr->CallRuntimeFeedbackSlot())));
+      CallLoadIC(NOT_CONTEXTUAL);
+    } else {
+      CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId());
+    }
 
     // Push the target function under the receiver.
     __ lw(at, MemOperand(sp, 0));
@@ -4130,7 +4249,7 @@
         Variable* var = proxy->var();
         // Delete of an unqualified identifier is disallowed in strict mode
         // but "delete this" is allowed.
-        ASSERT(strict_mode() == SLOPPY || var->is_this());
+        DCHECK(strict_mode() == SLOPPY || var->is_this());
         if (var->IsUnallocated()) {
           __ lw(a2, GlobalObjectOperand());
           __ li(a1, Operand(var->name()));
@@ -4145,10 +4264,10 @@
         } else {
           // Non-global variable.  Call the runtime to try to delete from the
           // context where the variable was introduced.
-          ASSERT(!context_register().is(a2));
+          DCHECK(!context_register().is(a2));
           __ li(a2, Operand(var->name()));
           __ Push(context_register(), a2);
-          __ CallRuntime(Runtime::kHiddenDeleteContextSlot, 2);
+          __ CallRuntime(Runtime::kDeleteLookupSlot, 2);
           context()->Plug(v0);
         }
       } else {
@@ -4186,7 +4305,7 @@
         // for control and plugging the control flow into the context,
         // because we need to prepare a pair of extra administrative AST ids
         // for the optimizing compiler.
-        ASSERT(context()->IsAccumulatorValue() || context()->IsStackValue());
+        DCHECK(context()->IsAccumulatorValue() || context()->IsStackValue());
         Label materialize_true, materialize_false, done;
         VisitForControl(expr->expression(),
                         &materialize_false,
@@ -4223,7 +4342,7 @@
 
 
 void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
-  ASSERT(expr->expression()->IsValidReferenceExpression());
+  DCHECK(expr->expression()->IsValidReferenceExpression());
 
   Comment cmnt(masm_, "[ CountOperation");
   SetSourcePosition(expr->position());
@@ -4242,7 +4361,7 @@
 
   // Evaluate expression and get value.
   if (assign_type == VARIABLE) {
-    ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
+    DCHECK(expr->expression()->AsVariableProxy()->var() != NULL);
     AccumulatorValueContext context(this);
     EmitVariableLoad(expr->expression()->AsVariableProxy());
   } else {
@@ -4252,15 +4371,16 @@
       __ push(at);
     }
     if (assign_type == NAMED_PROPERTY) {
-      // Put the object both on the stack and in the accumulator.
-      VisitForAccumulatorValue(prop->obj());
-      __ push(v0);
+      // Put the object both on the stack and in the register.
+      VisitForStackValue(prop->obj());
+      __ lw(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
       EmitNamedPropertyLoad(prop);
     } else {
       VisitForStackValue(prop->obj());
-      VisitForAccumulatorValue(prop->key());
-      __ lw(a1, MemOperand(sp, 0));
-      __ push(v0);
+      VisitForStackValue(prop->key());
+      __ lw(LoadDescriptor::ReceiverRegister(),
+            MemOperand(sp, 1 * kPointerSize));
+      __ lw(LoadDescriptor::NameRegister(), MemOperand(sp, 0));
       EmitKeyedPropertyLoad(prop);
     }
   }
@@ -4343,8 +4463,9 @@
   // Record position before stub call.
   SetSourcePosition(expr->position());
 
-  BinaryOpICStub stub(isolate(), Token::ADD, NO_OVERWRITE);
-  CallIC(stub.GetCode(), expr->CountBinOpFeedbackId());
+  Handle<Code> code =
+      CodeFactory::BinaryOpIC(isolate(), Token::ADD, NO_OVERWRITE).code();
+  CallIC(code, expr->CountBinOpFeedbackId());
   patch_site.EmitPatchInfo();
   __ bind(&done);
 
@@ -4371,9 +4492,10 @@
       }
       break;
     case NAMED_PROPERTY: {
-      __ mov(a0, result_register());  // Value.
-      __ li(a2, Operand(prop->key()->AsLiteral()->value()));  // Name.
-      __ pop(a1);  // Receiver.
+      __ mov(StoreDescriptor::ValueRegister(), result_register());
+      __ li(StoreDescriptor::NameRegister(),
+            Operand(prop->key()->AsLiteral()->value()));
+      __ pop(StoreDescriptor::ReceiverRegister());
       CallStoreIC(expr->CountStoreFeedbackId());
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
       if (expr->is_postfix()) {
@@ -4386,11 +4508,11 @@
       break;
     }
     case KEYED_PROPERTY: {
-      __ mov(a0, result_register());  // Value.
-      __ Pop(a2, a1);  // a1 = key, a2 = receiver.
-      Handle<Code> ic = strict_mode() == SLOPPY
-          ? isolate()->builtins()->KeyedStoreIC_Initialize()
-          : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+      __ mov(StoreDescriptor::ValueRegister(), result_register());
+      __ Pop(StoreDescriptor::ReceiverRegister(),
+             StoreDescriptor::NameRegister());
+      Handle<Code> ic =
+          CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
       CallIC(ic, expr->CountStoreFeedbackId());
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
       if (expr->is_postfix()) {
@@ -4407,13 +4529,17 @@
 
 
 void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
-  ASSERT(!context()->IsEffect());
-  ASSERT(!context()->IsTest());
+  DCHECK(!context()->IsEffect());
+  DCHECK(!context()->IsTest());
   VariableProxy* proxy = expr->AsVariableProxy();
   if (proxy != NULL && proxy->var()->IsUnallocated()) {
     Comment cmnt(masm_, "[ Global variable");
-    __ lw(a0, GlobalObjectOperand());
-    __ li(a2, Operand(proxy->name()));
+    __ lw(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+    __ li(LoadDescriptor::NameRegister(), Operand(proxy->name()));
+    if (FLAG_vector_ics) {
+      __ li(VectorLoadICDescriptor::SlotRegister(),
+            Operand(Smi::FromInt(proxy->VariableFeedbackSlot())));
+    }
     // Use a regular load, not a contextual load, to avoid a reference
     // error.
     CallLoadIC(NOT_CONTEXTUAL);
@@ -4425,12 +4551,12 @@
 
     // Generate code for loading from variables potentially shadowed
     // by eval-introduced variables.
-    EmitDynamicLookupFastCase(proxy->var(), INSIDE_TYPEOF, &slow, &done);
+    EmitDynamicLookupFastCase(proxy, INSIDE_TYPEOF, &slow, &done);
 
     __ bind(&slow);
     __ li(a0, Operand(proxy->name()));
     __ Push(cp, a0);
-    __ CallRuntime(Runtime::kHiddenLoadContextSlotNoReferenceError, 2);
+    __ CallRuntime(Runtime::kLoadLookupSlotNoReferenceError, 2);
     PrepareForBailout(expr, TOS_REG);
     __ bind(&done);
 
@@ -4480,10 +4606,6 @@
     __ Branch(if_true, eq, v0, Operand(at));
     __ LoadRoot(at, Heap::kFalseValueRootIndex);
     Split(eq, v0, Operand(at), if_true, if_false, fall_through);
-  } else if (FLAG_harmony_typeof &&
-             String::Equals(check, factory->null_string())) {
-    __ LoadRoot(at, Heap::kNullValueRootIndex);
-    Split(eq, v0, Operand(at), if_true, if_false, fall_through);
   } else if (String::Equals(check, factory->undefined_string())) {
     __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
     __ Branch(if_true, eq, v0, Operand(at));
@@ -4502,10 +4624,8 @@
           if_true, if_false, fall_through);
   } else if (String::Equals(check, factory->object_string())) {
     __ JumpIfSmi(v0, if_false);
-    if (!FLAG_harmony_typeof) {
-      __ LoadRoot(at, Heap::kNullValueRootIndex);
-      __ Branch(if_true, eq, v0, Operand(at));
-    }
+    __ LoadRoot(at, Heap::kNullValueRootIndex);
+    __ Branch(if_true, eq, v0, Operand(at));
     // Check for JS objects => true.
     __ GetObjectType(v0, v0, a1);
     __ Branch(if_false, lt, a1, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
@@ -4577,7 +4697,7 @@
       }
       // Record position and call the compare IC.
       SetSourcePosition(expr->position());
-      Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+      Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
       CallIC(ic, expr->CompareOperationFeedbackId());
       patch_site.EmitPatchInfo();
       PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
@@ -4636,7 +4756,7 @@
 
 
 void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
-  ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
+  DCHECK_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
   __ sw(value, MemOperand(fp, frame_offset));
 }
 
@@ -4661,7 +4781,7 @@
     // code.  Fetch it from the context.
     __ lw(at, ContextOperand(cp, Context::CLOSURE_INDEX));
   } else {
-    ASSERT(declaration_scope->is_function_scope());
+    DCHECK(declaration_scope->is_function_scope());
     __ lw(at, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
   }
   __ push(at);
@@ -4672,12 +4792,12 @@
 // Non-local control flow support.
 
 void FullCodeGenerator::EnterFinallyBlock() {
-  ASSERT(!result_register().is(a1));
+  DCHECK(!result_register().is(a1));
   // Store result register while executing finally block.
   __ push(result_register());
   // Cook return address in link register to stack (smi encoded Code* delta).
   __ Subu(a1, ra, Operand(masm_->CodeObject()));
-  ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
+  DCHECK_EQ(1, kSmiTagSize + kSmiShiftSize);
   STATIC_ASSERT(0 == kSmiTag);
   __ Addu(a1, a1, Operand(a1));  // Convert to smi.
 
@@ -4707,7 +4827,7 @@
 
 
 void FullCodeGenerator::ExitFinallyBlock() {
-  ASSERT(!result_register().is(a1));
+  DCHECK(!result_register().is(a1));
   // Restore pending message from stack.
   __ pop(a1);
   ExternalReference pending_message_script =
@@ -4733,7 +4853,7 @@
 
   // Uncook return address and return.
   __ pop(result_register());
-  ASSERT_EQ(1, kSmiTagSize + kSmiShiftSize);
+  DCHECK_EQ(1, kSmiTagSize + kSmiShiftSize);
   __ sra(a1, a1, 1);  // Un-smi-tag value.
   __ Addu(at, a1, Operand(masm_->CodeObject()));
   __ Jump(at);
@@ -4821,16 +4941,16 @@
   Address branch_address = pc - 6 * kInstrSize;
   Address pc_immediate_load_address = pc - 4 * kInstrSize;
 
-  ASSERT(Assembler::IsBeq(Assembler::instr_at(pc - 5 * kInstrSize)));
+  DCHECK(Assembler::IsBeq(Assembler::instr_at(pc - 5 * kInstrSize)));
   if (!Assembler::IsAddImmediate(Assembler::instr_at(branch_address))) {
-    ASSERT(reinterpret_cast<uint32_t>(
+    DCHECK(reinterpret_cast<uint32_t>(
         Assembler::target_address_at(pc_immediate_load_address)) ==
            reinterpret_cast<uint32_t>(
                isolate->builtins()->InterruptCheck()->entry()));
     return INTERRUPT;
   }
 
-  ASSERT(Assembler::IsAddImmediate(Assembler::instr_at(branch_address)));
+  DCHECK(Assembler::IsAddImmediate(Assembler::instr_at(branch_address)));
 
   if (reinterpret_cast<uint32_t>(
       Assembler::target_address_at(pc_immediate_load_address)) ==
@@ -4839,7 +4959,7 @@
     return ON_STACK_REPLACEMENT;
   }
 
-  ASSERT(reinterpret_cast<uint32_t>(
+  DCHECK(reinterpret_cast<uint32_t>(
       Assembler::target_address_at(pc_immediate_load_address)) ==
          reinterpret_cast<uint32_t>(
              isolate->builtins()->OsrAfterStackCheck()->entry()));
diff --git a/src/mips/ic-mips.cc b/src/mips/ic-mips.cc
deleted file mode 100644
index 834135c..0000000
--- a/src/mips/ic-mips.cc
+++ /dev/null
@@ -1,1340 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-
-
-#include "src/v8.h"
-
-#if V8_TARGET_ARCH_MIPS
-
-#include "src/codegen.h"
-#include "src/code-stubs.h"
-#include "src/ic-inl.h"
-#include "src/runtime.h"
-#include "src/stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-
-// ----------------------------------------------------------------------------
-// Static IC stub generators.
-//
-
-#define __ ACCESS_MASM(masm)
-
-
-static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
-                                            Register type,
-                                            Label* global_object) {
-  // Register usage:
-  //   type: holds the receiver instance type on entry.
-  __ Branch(global_object, eq, type, Operand(JS_GLOBAL_OBJECT_TYPE));
-  __ Branch(global_object, eq, type, Operand(JS_BUILTINS_OBJECT_TYPE));
-  __ Branch(global_object, eq, type, Operand(JS_GLOBAL_PROXY_TYPE));
-}
-
-
-// Generated code falls through if the receiver is a regular non-global
-// JS object with slow properties and no interceptors.
-static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm,
-                                                Register receiver,
-                                                Register elements,
-                                                Register scratch0,
-                                                Register scratch1,
-                                                Label* miss) {
-  // Register usage:
-  //   receiver: holds the receiver on entry and is unchanged.
-  //   elements: holds the property dictionary on fall through.
-  // Scratch registers:
-  //   scratch0: used to holds the receiver map.
-  //   scratch1: used to holds the receiver instance type, receiver bit mask
-  //     and elements map.
-
-  // Check that the receiver isn't a smi.
-  __ JumpIfSmi(receiver, miss);
-
-  // Check that the receiver is a valid JS object.
-  __ GetObjectType(receiver, scratch0, scratch1);
-  __ Branch(miss, lt, scratch1, Operand(FIRST_SPEC_OBJECT_TYPE));
-
-  // If this assert fails, we have to check upper bound too.
-  STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
-
-  GenerateGlobalInstanceTypeCheck(masm, scratch1, miss);
-
-  // Check that the global object does not require access checks.
-  __ lbu(scratch1, FieldMemOperand(scratch0, Map::kBitFieldOffset));
-  __ And(scratch1, scratch1, Operand((1 << Map::kIsAccessCheckNeeded) |
-                           (1 << Map::kHasNamedInterceptor)));
-  __ Branch(miss, ne, scratch1, Operand(zero_reg));
-
-  __ lw(elements, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-  __ lw(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
-  __ LoadRoot(scratch0, Heap::kHashTableMapRootIndex);
-  __ Branch(miss, ne, scratch1, Operand(scratch0));
-}
-
-
-// Helper function used from LoadIC GenerateNormal.
-//
-// elements: Property dictionary. It is not clobbered if a jump to the miss
-//           label is done.
-// name:     Property name. It is not clobbered if a jump to the miss label is
-//           done
-// result:   Register for the result. It is only updated if a jump to the miss
-//           label is not done. Can be the same as elements or name clobbering
-//           one of these in the case of not jumping to the miss label.
-// The two scratch registers need to be different from elements, name and
-// result.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-// The address returned from GenerateStringDictionaryProbes() in scratch2
-// is used.
-static void GenerateDictionaryLoad(MacroAssembler* masm,
-                                   Label* miss,
-                                   Register elements,
-                                   Register name,
-                                   Register result,
-                                   Register scratch1,
-                                   Register scratch2) {
-  // Main use of the scratch registers.
-  // scratch1: Used as temporary and to hold the capacity of the property
-  //           dictionary.
-  // scratch2: Used as temporary.
-  Label done;
-
-  // Probe the dictionary.
-  NameDictionaryLookupStub::GeneratePositiveLookup(masm,
-                                                   miss,
-                                                   &done,
-                                                   elements,
-                                                   name,
-                                                   scratch1,
-                                                   scratch2);
-
-  // If probing finds an entry check that the value is a normal
-  // property.
-  __ bind(&done);  // scratch2 == elements + 4 * index.
-  const int kElementsStartOffset = NameDictionary::kHeaderSize +
-      NameDictionary::kElementsStartIndex * kPointerSize;
-  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
-  __ lw(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
-  __ And(at,
-         scratch1,
-         Operand(PropertyDetails::TypeField::kMask << kSmiTagSize));
-  __ Branch(miss, ne, at, Operand(zero_reg));
-
-  // Get the value at the masked, scaled index and return.
-  __ lw(result,
-        FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
-}
-
-
-// Helper function used from StoreIC::GenerateNormal.
-//
-// elements: Property dictionary. It is not clobbered if a jump to the miss
-//           label is done.
-// name:     Property name. It is not clobbered if a jump to the miss label is
-//           done
-// value:    The value to store.
-// The two scratch registers need to be different from elements, name and
-// result.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-// The address returned from GenerateStringDictionaryProbes() in scratch2
-// is used.
-static void GenerateDictionaryStore(MacroAssembler* masm,
-                                    Label* miss,
-                                    Register elements,
-                                    Register name,
-                                    Register value,
-                                    Register scratch1,
-                                    Register scratch2) {
-  // Main use of the scratch registers.
-  // scratch1: Used as temporary and to hold the capacity of the property
-  //           dictionary.
-  // scratch2: Used as temporary.
-  Label done;
-
-  // Probe the dictionary.
-  NameDictionaryLookupStub::GeneratePositiveLookup(masm,
-                                                   miss,
-                                                   &done,
-                                                   elements,
-                                                   name,
-                                                   scratch1,
-                                                   scratch2);
-
-  // If probing finds an entry in the dictionary check that the value
-  // is a normal property that is not read only.
-  __ bind(&done);  // scratch2 == elements + 4 * index.
-  const int kElementsStartOffset = NameDictionary::kHeaderSize +
-      NameDictionary::kElementsStartIndex * kPointerSize;
-  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
-  const int kTypeAndReadOnlyMask =
-      (PropertyDetails::TypeField::kMask |
-       PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
-  __ lw(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
-  __ And(at, scratch1, Operand(kTypeAndReadOnlyMask));
-  __ Branch(miss, ne, at, Operand(zero_reg));
-
-  // Store the value at the masked, scaled index and return.
-  const int kValueOffset = kElementsStartOffset + kPointerSize;
-  __ Addu(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag));
-  __ sw(value, MemOperand(scratch2));
-
-  // Update the write barrier. Make sure not to clobber the value.
-  __ mov(scratch1, value);
-  __ RecordWrite(
-      elements, scratch2, scratch1, kRAHasNotBeenSaved, kDontSaveFPRegs);
-}
-
-
-// Checks the receiver for special cases (value type, slow case bits).
-// Falls through for regular JS object.
-static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
-                                           Register receiver,
-                                           Register map,
-                                           Register scratch,
-                                           int interceptor_bit,
-                                           Label* slow) {
-  // Check that the object isn't a smi.
-  __ JumpIfSmi(receiver, slow);
-  // Get the map of the receiver.
-  __ lw(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  // Check bit field.
-  __ lbu(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
-  __ And(at, scratch,
-         Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
-  __ Branch(slow, ne, at, Operand(zero_reg));
-  // Check that the object is some kind of JS object EXCEPT JS Value type.
-  // In the case that the object is a value-wrapper object,
-  // we enter the runtime system to make sure that indexing into string
-  // objects work as intended.
-  ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
-  __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
-  __ Branch(slow, lt, scratch, Operand(JS_OBJECT_TYPE));
-}
-
-
-// Loads an indexed element from a fast case array.
-// If not_fast_array is NULL, doesn't perform the elements map check.
-static void GenerateFastArrayLoad(MacroAssembler* masm,
-                                  Register receiver,
-                                  Register key,
-                                  Register elements,
-                                  Register scratch1,
-                                  Register scratch2,
-                                  Register result,
-                                  Label* not_fast_array,
-                                  Label* out_of_range) {
-  // Register use:
-  //
-  // receiver - holds the receiver on entry.
-  //            Unchanged unless 'result' is the same register.
-  //
-  // key      - holds the smi key on entry.
-  //            Unchanged unless 'result' is the same register.
-  //
-  // elements - holds the elements of the receiver on exit.
-  //
-  // result   - holds the result on exit if the load succeeded.
-  //            Allowed to be the the same as 'receiver' or 'key'.
-  //            Unchanged on bailout so 'receiver' and 'key' can be safely
-  //            used by further computation.
-  //
-  // Scratch registers:
-  //
-  // scratch1 - used to hold elements map and elements length.
-  //            Holds the elements map if not_fast_array branch is taken.
-  //
-  // scratch2 - used to hold the loaded value.
-
-  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  if (not_fast_array != NULL) {
-    // Check that the object is in fast mode (not dictionary).
-    __ lw(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
-    __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
-    __ Branch(not_fast_array, ne, scratch1, Operand(at));
-  } else {
-    __ AssertFastElements(elements);
-  }
-
-  // Check that the key (index) is within bounds.
-  __ lw(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
-  __ Branch(out_of_range, hs, key, Operand(scratch1));
-
-  // Fast case: Do the load.
-  __ Addu(scratch1, elements,
-          Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  // The key is a smi.
-  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2);
-  __ sll(at, key, kPointerSizeLog2 - kSmiTagSize);
-  __ addu(at, at, scratch1);
-  __ lw(scratch2, MemOperand(at));
-
-  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
-  // In case the loaded value is the_hole we have to consult GetProperty
-  // to ensure the prototype chain is searched.
-  __ Branch(out_of_range, eq, scratch2, Operand(at));
-  __ mov(result, scratch2);
-}
-
-
-// Checks whether a key is an array index string or a unique name.
-// Falls through if a key is a unique name.
-static void GenerateKeyNameCheck(MacroAssembler* masm,
-                                 Register key,
-                                 Register map,
-                                 Register hash,
-                                 Label* index_string,
-                                 Label* not_unique) {
-  // The key is not a smi.
-  Label unique;
-  // Is it a name?
-  __ GetObjectType(key, map, hash);
-  __ Branch(not_unique, hi, hash, Operand(LAST_UNIQUE_NAME_TYPE));
-  STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
-  __ Branch(&unique, eq, hash, Operand(LAST_UNIQUE_NAME_TYPE));
-
-  // Is the string an array index, with cached numeric value?
-  __ lw(hash, FieldMemOperand(key, Name::kHashFieldOffset));
-  __ And(at, hash, Operand(Name::kContainsCachedArrayIndexMask));
-  __ Branch(index_string, eq, at, Operand(zero_reg));
-
-  // Is the string internalized? We know it's a string, so a single
-  // bit test is enough.
-  // map: key map
-  __ lbu(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
-  STATIC_ASSERT(kInternalizedTag == 0);
-  __ And(at, hash, Operand(kIsNotInternalizedMask));
-  __ Branch(not_unique, ne, at, Operand(zero_reg));
-
-  __ bind(&unique);
-}
-
-
-void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- a2    : name
-  //  -- ra    : return address
-  //  -- a0    : receiver
-  // -----------------------------------
-
-  // Probe the stub cache.
-  Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC);
-  masm->isolate()->stub_cache()->GenerateProbe(
-      masm, flags, a0, a2, a3, t0, t1, t2);
-
-  // Cache miss: Jump to runtime.
-  GenerateMiss(masm);
-}
-
-
-void LoadIC::GenerateNormal(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- a2    : name
-  //  -- lr    : return address
-  //  -- a0    : receiver
-  // -----------------------------------
-  Label miss, slow;
-
-  GenerateNameDictionaryReceiverCheck(masm, a0, a1, a3, t0, &miss);
-
-  // a1: elements
-  GenerateDictionaryLoad(masm, &slow, a1, a2, v0, a3, t0);
-  __ Ret();
-
-  // Dictionary load failed, go slow (but don't miss).
-  __ bind(&slow);
-  GenerateRuntimeGetProperty(masm);
-
-  // Cache miss: Jump to runtime.
-  __ bind(&miss);
-  GenerateMiss(masm);
-}
-
-
-void LoadIC::GenerateMiss(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- a2    : name
-  //  -- ra    : return address
-  //  -- a0    : receiver
-  // -----------------------------------
-  Isolate* isolate = masm->isolate();
-
-  __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, t0);
-
-  __ mov(a3, a0);
-  __ Push(a3, a2);
-
-  // Perform tail call to the entry.
-  ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
-  __ TailCallExternalReference(ref, 2, 1);
-}
-
-
-void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
-  // ---------- S t a t e --------------
-  //  -- a2    : name
-  //  -- ra    : return address
-  //  -- a0    : receiver
-  // -----------------------------------
-
-  __ mov(a3, a0);
-  __ Push(a3, a2);
-
-  __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
-}
-
-
-static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm,
-                                                Register object,
-                                                Register key,
-                                                Register scratch1,
-                                                Register scratch2,
-                                                Register scratch3,
-                                                Label* unmapped_case,
-                                                Label* slow_case) {
-  Heap* heap = masm->isolate()->heap();
-
-  // Check that the receiver is a JSObject. Because of the map check
-  // later, we do not need to check for interceptors or whether it
-  // requires access checks.
-  __ JumpIfSmi(object, slow_case);
-  // Check that the object is some kind of JSObject.
-  __ GetObjectType(object, scratch1, scratch2);
-  __ Branch(slow_case, lt, scratch2, Operand(FIRST_JS_RECEIVER_TYPE));
-
-  // Check that the key is a positive smi.
-  __ And(scratch1, key, Operand(0x80000001));
-  __ Branch(slow_case, ne, scratch1, Operand(zero_reg));
-
-  // Load the elements into scratch1 and check its map.
-  Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
-  __ lw(scratch1, FieldMemOperand(object, JSObject::kElementsOffset));
-  __ CheckMap(scratch1,
-              scratch2,
-              arguments_map,
-              slow_case,
-              DONT_DO_SMI_CHECK);
-  // Check if element is in the range of mapped arguments. If not, jump
-  // to the unmapped lookup with the parameter map in scratch1.
-  __ lw(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
-  __ Subu(scratch2, scratch2, Operand(Smi::FromInt(2)));
-  __ Branch(unmapped_case, Ugreater_equal, key, Operand(scratch2));
-
-  // Load element index and check whether it is the hole.
-  const int kOffset =
-      FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag;
-
-  __ li(scratch3, Operand(kPointerSize >> 1));
-  __ Mul(scratch3, key, scratch3);
-  __ Addu(scratch3, scratch3, Operand(kOffset));
-
-  __ Addu(scratch2, scratch1, scratch3);
-  __ lw(scratch2, MemOperand(scratch2));
-  __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
-  __ Branch(unmapped_case, eq, scratch2, Operand(scratch3));
-
-  // Load value from context and return it. We can reuse scratch1 because
-  // we do not jump to the unmapped lookup (which requires the parameter
-  // map in scratch1).
-  __ lw(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
-  __ li(scratch3, Operand(kPointerSize >> 1));
-  __ Mul(scratch3, scratch2, scratch3);
-  __ Addu(scratch3, scratch3, Operand(Context::kHeaderSize - kHeapObjectTag));
-  __ Addu(scratch2, scratch1, scratch3);
-  return MemOperand(scratch2);
-}
-
-
-static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
-                                                  Register key,
-                                                  Register parameter_map,
-                                                  Register scratch,
-                                                  Label* slow_case) {
-  // Element is in arguments backing store, which is referenced by the
-  // second element of the parameter_map. The parameter_map register
-  // must be loaded with the parameter map of the arguments object and is
-  // overwritten.
-  const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
-  Register backing_store = parameter_map;
-  __ lw(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
-  __ CheckMap(backing_store,
-              scratch,
-              Heap::kFixedArrayMapRootIndex,
-              slow_case,
-              DONT_DO_SMI_CHECK);
-  __ lw(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
-  __ Branch(slow_case, Ugreater_equal, key, Operand(scratch));
-  __ li(scratch, Operand(kPointerSize >> 1));
-  __ Mul(scratch, key, scratch);
-  __ Addu(scratch,
-          scratch,
-          Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ Addu(scratch, backing_store, scratch);
-  return MemOperand(scratch);
-}
-
-
-void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
-  // ---------- S t a t e --------------
-  //  -- lr     : return address
-  //  -- a0     : key
-  //  -- a1     : receiver
-  // -----------------------------------
-  Label slow, notin;
-  MemOperand mapped_location =
-      GenerateMappedArgumentsLookup(masm, a1, a0, a2, a3, t0, &notin, &slow);
-  __ Ret(USE_DELAY_SLOT);
-  __ lw(v0, mapped_location);
-  __ bind(&notin);
-  // The unmapped lookup expects that the parameter map is in a2.
-  MemOperand unmapped_location =
-      GenerateUnmappedArgumentsLookup(masm, a0, a2, a3, &slow);
-  __ lw(a2, unmapped_location);
-  __ LoadRoot(a3, Heap::kTheHoleValueRootIndex);
-  __ Branch(&slow, eq, a2, Operand(a3));
-  __ Ret(USE_DELAY_SLOT);
-  __ mov(v0, a2);
-  __ bind(&slow);
-  GenerateMiss(masm);
-}
-
-
-void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
-  // ---------- S t a t e --------------
-  //  -- a0     : value
-  //  -- a1     : key
-  //  -- a2     : receiver
-  //  -- lr     : return address
-  // -----------------------------------
-  Label slow, notin;
-  // Store address is returned in register (of MemOperand) mapped_location.
-  MemOperand mapped_location =
-      GenerateMappedArgumentsLookup(masm, a2, a1, a3, t0, t1, &notin, &slow);
-  __ sw(a0, mapped_location);
-  __ mov(t5, a0);
-  ASSERT_EQ(mapped_location.offset(), 0);
-  __ RecordWrite(a3, mapped_location.rm(), t5,
-                 kRAHasNotBeenSaved, kDontSaveFPRegs);
-  __ Ret(USE_DELAY_SLOT);
-  __ mov(v0, a0);  // (In delay slot) return the value stored in v0.
-  __ bind(&notin);
-  // The unmapped lookup expects that the parameter map is in a3.
-  // Store address is returned in register (of MemOperand) unmapped_location.
-  MemOperand unmapped_location =
-      GenerateUnmappedArgumentsLookup(masm, a1, a3, t0, &slow);
-  __ sw(a0, unmapped_location);
-  __ mov(t5, a0);
-  ASSERT_EQ(unmapped_location.offset(), 0);
-  __ RecordWrite(a3, unmapped_location.rm(), t5,
-                 kRAHasNotBeenSaved, kDontSaveFPRegs);
-  __ Ret(USE_DELAY_SLOT);
-  __ mov(v0, a0);  // (In delay slot) return the value stored in v0.
-  __ bind(&slow);
-  GenerateMiss(masm);
-}
-
-
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
-  // ---------- S t a t e --------------
-  //  -- ra     : return address
-  //  -- a0     : key
-  //  -- a1     : receiver
-  // -----------------------------------
-  Isolate* isolate = masm->isolate();
-
-  __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, t0);
-
-  __ Push(a1, a0);
-
-  // Perform tail call to the entry.
-  ExternalReference ref =
-      ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
-
-  __ TailCallExternalReference(ref, 2, 1);
-}
-
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
-  // ---------- S t a t e --------------
-  //  -- ra     : return address
-  //  -- a0     : key
-  //  -- a1     : receiver
-  // -----------------------------------
-
-  __ Push(a1, a0);
-
-  __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
-}
-
-
-void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
-  // ---------- S t a t e --------------
-  //  -- ra     : return address
-  //  -- a0     : key
-  //  -- a1     : receiver
-  // -----------------------------------
-  Label slow, check_name, index_smi, index_name, property_array_property;
-  Label probe_dictionary, check_number_dictionary;
-
-  Register key = a0;
-  Register receiver = a1;
-
-  Isolate* isolate = masm->isolate();
-
-  // Check that the key is a smi.
-  __ JumpIfNotSmi(key, &check_name);
-  __ bind(&index_smi);
-  // Now the key is known to be a smi. This place is also jumped to from below
-  // where a numeric string is converted to a smi.
-
-  GenerateKeyedLoadReceiverCheck(
-      masm, receiver, a2, a3, Map::kHasIndexedInterceptor, &slow);
-
-  // Check the receiver's map to see if it has fast elements.
-  __ CheckFastElements(a2, a3, &check_number_dictionary);
-
-  GenerateFastArrayLoad(
-      masm, receiver, key, t0, a3, a2, v0, NULL, &slow);
-
-  __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, a2, a3);
-  __ Ret();
-
-  __ bind(&check_number_dictionary);
-  __ lw(t0, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ lw(a3, FieldMemOperand(t0, JSObject::kMapOffset));
-
-  // Check whether the elements is a number dictionary.
-  // a0: key
-  // a3: elements map
-  // t0: elements
-  __ LoadRoot(at, Heap::kHashTableMapRootIndex);
-  __ Branch(&slow, ne, a3, Operand(at));
-  __ sra(a2, a0, kSmiTagSize);
-  __ LoadFromNumberDictionary(&slow, t0, a0, v0, a2, a3, t1);
-  __ Ret();
-
-  // Slow case, key and receiver still in a0 and a1.
-  __ bind(&slow);
-  __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(),
-                      1,
-                      a2,
-                      a3);
-  GenerateRuntimeGetProperty(masm);
-
-  __ bind(&check_name);
-  GenerateKeyNameCheck(masm, key, a2, a3, &index_name, &slow);
-
-  GenerateKeyedLoadReceiverCheck(
-       masm, receiver, a2, a3, Map::kHasNamedInterceptor, &slow);
-
-
-  // If the receiver is a fast-case object, check the keyed lookup
-  // cache. Otherwise probe the dictionary.
-  __ lw(a3, FieldMemOperand(a1, JSObject::kPropertiesOffset));
-  __ lw(t0, FieldMemOperand(a3, HeapObject::kMapOffset));
-  __ LoadRoot(at, Heap::kHashTableMapRootIndex);
-  __ Branch(&probe_dictionary, eq, t0, Operand(at));
-
-  // Load the map of the receiver, compute the keyed lookup cache hash
-  // based on 32 bits of the map pointer and the name hash.
-  __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
-  __ sra(a3, a2, KeyedLookupCache::kMapHashShift);
-  __ lw(t0, FieldMemOperand(a0, Name::kHashFieldOffset));
-  __ sra(at, t0, Name::kHashShift);
-  __ xor_(a3, a3, at);
-  int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask;
-  __ And(a3, a3, Operand(mask));
-
-  // Load the key (consisting of map and unique name) from the cache and
-  // check for match.
-  Label load_in_object_property;
-  static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
-  Label hit_on_nth_entry[kEntriesPerBucket];
-  ExternalReference cache_keys =
-      ExternalReference::keyed_lookup_cache_keys(isolate);
-  __ li(t0, Operand(cache_keys));
-  __ sll(at, a3, kPointerSizeLog2 + 1);
-  __ addu(t0, t0, at);
-
-  for (int i = 0; i < kEntriesPerBucket - 1; i++) {
-    Label try_next_entry;
-    __ lw(t1, MemOperand(t0, kPointerSize * i * 2));
-    __ Branch(&try_next_entry, ne, a2, Operand(t1));
-    __ lw(t1, MemOperand(t0, kPointerSize * (i * 2 + 1)));
-    __ Branch(&hit_on_nth_entry[i], eq, a0, Operand(t1));
-    __ bind(&try_next_entry);
-  }
-
-  __ lw(t1, MemOperand(t0, kPointerSize * (kEntriesPerBucket - 1) * 2));
-  __ Branch(&slow, ne, a2, Operand(t1));
-  __ lw(t1, MemOperand(t0, kPointerSize * ((kEntriesPerBucket - 1) * 2 + 1)));
-  __ Branch(&slow, ne, a0, Operand(t1));
-
-  // Get field offset.
-  // a0     : key
-  // a1     : receiver
-  // a2     : receiver's map
-  // a3     : lookup cache index
-  ExternalReference cache_field_offsets =
-      ExternalReference::keyed_lookup_cache_field_offsets(isolate);
-
-  // Hit on nth entry.
-  for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
-    __ bind(&hit_on_nth_entry[i]);
-    __ li(t0, Operand(cache_field_offsets));
-    __ sll(at, a3, kPointerSizeLog2);
-    __ addu(at, t0, at);
-    __ lw(t1, MemOperand(at, kPointerSize * i));
-    __ lbu(t2, FieldMemOperand(a2, Map::kInObjectPropertiesOffset));
-    __ Subu(t1, t1, t2);
-    __ Branch(&property_array_property, ge, t1, Operand(zero_reg));
-    if (i != 0) {
-      __ Branch(&load_in_object_property);
-    }
-  }
-
-  // Load in-object property.
-  __ bind(&load_in_object_property);
-  __ lbu(t2, FieldMemOperand(a2, Map::kInstanceSizeOffset));
-  __ addu(t2, t2, t1);  // Index from start of object.
-  __ Subu(a1, a1, Operand(kHeapObjectTag));  // Remove the heap tag.
-  __ sll(at, t2, kPointerSizeLog2);
-  __ addu(at, a1, at);
-  __ lw(v0, MemOperand(at));
-  __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
-                      1,
-                      a2,
-                      a3);
-  __ Ret();
-
-  // Load property array property.
-  __ bind(&property_array_property);
-  __ lw(a1, FieldMemOperand(a1, JSObject::kPropertiesOffset));
-  __ Addu(a1, a1, FixedArray::kHeaderSize - kHeapObjectTag);
-  __ sll(t0, t1, kPointerSizeLog2);
-  __ Addu(t0, t0, a1);
-  __ lw(v0, MemOperand(t0));
-  __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(),
-                      1,
-                      a2,
-                      a3);
-  __ Ret();
-
-
-  // Do a quick inline probe of the receiver's dictionary, if it
-  // exists.
-  __ bind(&probe_dictionary);
-  // a1: receiver
-  // a0: key
-  // a3: elements
-  __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
-  __ lbu(a2, FieldMemOperand(a2, Map::kInstanceTypeOffset));
-  GenerateGlobalInstanceTypeCheck(masm, a2, &slow);
-  // Load the property to v0.
-  GenerateDictionaryLoad(masm, &slow, a3, a0, v0, a2, t0);
-  __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(),
-                      1,
-                      a2,
-                      a3);
-  __ Ret();
-
-  __ bind(&index_name);
-  __ IndexFromHash(a3, key);
-  // Now jump to the place where smi keys are handled.
-  __ Branch(&index_smi);
-}
-
-
-void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
-  // ---------- S t a t e --------------
-  //  -- ra     : return address
-  //  -- a0     : key (index)
-  //  -- a1     : receiver
-  // -----------------------------------
-  Label miss;
-
-  Register receiver = a1;
-  Register index = a0;
-  Register scratch = a3;
-  Register result = v0;
-
-  StringCharAtGenerator char_at_generator(receiver,
-                                          index,
-                                          scratch,
-                                          result,
-                                          &miss,  // When not a string.
-                                          &miss,  // When not a number.
-                                          &miss,  // When index out of range.
-                                          STRING_INDEX_IS_ARRAY_INDEX);
-  char_at_generator.GenerateFast(masm);
-  __ Ret();
-
-  StubRuntimeCallHelper call_helper;
-  char_at_generator.GenerateSlow(masm, call_helper);
-
-  __ bind(&miss);
-  GenerateMiss(masm);
-}
-
-
-void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
-                                              StrictMode strict_mode) {
-  // ---------- S t a t e --------------
-  //  -- a0     : value
-  //  -- a1     : key
-  //  -- a2     : receiver
-  //  -- ra     : return address
-  // -----------------------------------
-
-  // Push receiver, key and value for runtime call.
-  __ Push(a2, a1, a0);
-  __ li(a1, Operand(Smi::FromInt(NONE)));          // PropertyAttributes.
-  __ li(a0, Operand(Smi::FromInt(strict_mode)));   // Strict mode.
-  __ Push(a1, a0);
-
-  __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
-}
-
-
-static void KeyedStoreGenerateGenericHelper(
-    MacroAssembler* masm,
-    Label* fast_object,
-    Label* fast_double,
-    Label* slow,
-    KeyedStoreCheckMap check_map,
-    KeyedStoreIncrementLength increment_length,
-    Register value,
-    Register key,
-    Register receiver,
-    Register receiver_map,
-    Register elements_map,
-    Register elements) {
-  Label transition_smi_elements;
-  Label finish_object_store, non_double_value, transition_double_elements;
-  Label fast_double_without_map_check;
-
-  // Fast case: Do the store, could be either Object or double.
-  __ bind(fast_object);
-  Register scratch_value = t0;
-  Register address = t1;
-  if (check_map == kCheckMap) {
-    __ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
-    __ Branch(fast_double, ne, elements_map,
-              Operand(masm->isolate()->factory()->fixed_array_map()));
-  }
-
-  // HOLECHECK: guards "A[i] = V"
-  // We have to go to the runtime if the current value is the hole because
-  // there may be a callback on the element.
-  Label holecheck_passed1;
-  __ Addu(address, elements, FixedArray::kHeaderSize - kHeapObjectTag);
-  __ sll(at, key, kPointerSizeLog2 - kSmiTagSize);
-  __ addu(address, address, at);
-  __ lw(scratch_value, MemOperand(address));
-  __ Branch(&holecheck_passed1, ne, scratch_value,
-            Operand(masm->isolate()->factory()->the_hole_value()));
-  __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
-                                      slow);
-
-  __ bind(&holecheck_passed1);
-
-  // Smi stores don't require further checks.
-  Label non_smi_value;
-  __ JumpIfNotSmi(value, &non_smi_value);
-
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ Addu(scratch_value, key, Operand(Smi::FromInt(1)));
-    __ sw(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
-  }
-  // It's irrelevant whether array is smi-only or not when writing a smi.
-  __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ sll(scratch_value, key, kPointerSizeLog2 - kSmiTagSize);
-  __ Addu(address, address, scratch_value);
-  __ sw(value, MemOperand(address));
-  __ Ret();
-
-  __ bind(&non_smi_value);
-  // Escape to elements kind transition case.
-  __ CheckFastObjectElements(receiver_map, scratch_value,
-                             &transition_smi_elements);
-
-  // Fast elements array, store the value to the elements backing store.
-  __ bind(&finish_object_store);
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ Addu(scratch_value, key, Operand(Smi::FromInt(1)));
-    __ sw(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
-  }
-  __ Addu(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
-  __ sll(scratch_value, key, kPointerSizeLog2 - kSmiTagSize);
-  __ Addu(address, address, scratch_value);
-  __ sw(value, MemOperand(address));
-  // Update write barrier for the elements array address.
-  __ mov(scratch_value, value);  // Preserve the value which is returned.
-  __ RecordWrite(elements,
-                 address,
-                 scratch_value,
-                 kRAHasNotBeenSaved,
-                 kDontSaveFPRegs,
-                 EMIT_REMEMBERED_SET,
-                 OMIT_SMI_CHECK);
-  __ Ret();
-
-  __ bind(fast_double);
-  if (check_map == kCheckMap) {
-    // Check for fast double array case. If this fails, call through to the
-    // runtime.
-    __ LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
-    __ Branch(slow, ne, elements_map, Operand(at));
-  }
-
-  // HOLECHECK: guards "A[i] double hole?"
-  // We have to see if the double version of the hole is present. If so
-  // go to the runtime.
-  __ Addu(address, elements,
-          Operand(FixedDoubleArray::kHeaderSize + kHoleNanUpper32Offset
-                  - kHeapObjectTag));
-  __ sll(at, key, kPointerSizeLog2);
-  __ addu(address, address, at);
-  __ lw(scratch_value, MemOperand(address));
-  __ Branch(&fast_double_without_map_check, ne, scratch_value,
-            Operand(kHoleNanUpper32));
-  __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
-                                      slow);
-
-  __ bind(&fast_double_without_map_check);
-  __ StoreNumberToDoubleElements(value,
-                                 key,
-                                 elements,  // Overwritten.
-                                 a3,        // Scratch regs...
-                                 t0,
-                                 t1,
-                                 &transition_double_elements);
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ Addu(scratch_value, key, Operand(Smi::FromInt(1)));
-    __ sw(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset));
-  }
-  __ Ret();
-
-  __ bind(&transition_smi_elements);
-  // Transition the array appropriately depending on the value type.
-  __ lw(t0, FieldMemOperand(value, HeapObject::kMapOffset));
-  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
-  __ Branch(&non_double_value, ne, t0, Operand(at));
-
-  // Value is a double. Transition FAST_SMI_ELEMENTS ->
-  // FAST_DOUBLE_ELEMENTS and complete the store.
-  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
-                                         FAST_DOUBLE_ELEMENTS,
-                                         receiver_map,
-                                         t0,
-                                         slow);
-  ASSERT(receiver_map.is(a3));  // Transition code expects map in a3
-  AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
-                                                    FAST_DOUBLE_ELEMENTS);
-  ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow);
-  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ jmp(&fast_double_without_map_check);
-
-  __ bind(&non_double_value);
-  // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
-  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
-                                         FAST_ELEMENTS,
-                                         receiver_map,
-                                         t0,
-                                         slow);
-  ASSERT(receiver_map.is(a3));  // Transition code expects map in a3
-  mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
-  ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode,
-                                                                   slow);
-  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ jmp(&finish_object_store);
-
-  __ bind(&transition_double_elements);
-  // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
-  // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
-  // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
-  __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
-                                         FAST_ELEMENTS,
-                                         receiver_map,
-                                         t0,
-                                         slow);
-  ASSERT(receiver_map.is(a3));  // Transition code expects map in a3
-  mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
-  ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow);
-  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ jmp(&finish_object_store);
-}
-
-
-void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
-                                   StrictMode strict_mode) {
-  // ---------- S t a t e --------------
-  //  -- a0     : value
-  //  -- a1     : key
-  //  -- a2     : receiver
-  //  -- ra     : return address
-  // -----------------------------------
-  Label slow, fast_object, fast_object_grow;
-  Label fast_double, fast_double_grow;
-  Label array, extra, check_if_double_array;
-
-  // Register usage.
-  Register value = a0;
-  Register key = a1;
-  Register receiver = a2;
-  Register receiver_map = a3;
-  Register elements_map = t2;
-  Register elements = t3;  // Elements array of the receiver.
-  // t0 and t1 are used as general scratch registers.
-
-  // Check that the key is a smi.
-  __ JumpIfNotSmi(key, &slow);
-  // Check that the object isn't a smi.
-  __ JumpIfSmi(receiver, &slow);
-  // Get the map of the object.
-  __ lw(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  // Check that the receiver does not require access checks and is not observed.
-  // The generic stub does not perform map checks or handle observed objects.
-  __ lbu(t0, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
-  __ And(t0, t0, Operand(1 << Map::kIsAccessCheckNeeded |
-                         1 << Map::kIsObserved));
-  __ Branch(&slow, ne, t0, Operand(zero_reg));
-  // Check if the object is a JS array or not.
-  __ lbu(t0, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
-  __ Branch(&array, eq, t0, Operand(JS_ARRAY_TYPE));
-  // Check that the object is some kind of JSObject.
-  __ Branch(&slow, lt, t0, Operand(FIRST_JS_OBJECT_TYPE));
-
-  // Object case: Check key against length in the elements array.
-  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  // Check array bounds. Both the key and the length of FixedArray are smis.
-  __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
-  __ Branch(&fast_object, lo, key, Operand(t0));
-
-  // Slow case, handle jump to runtime.
-  __ bind(&slow);
-  // Entry registers are intact.
-  // a0: value.
-  // a1: key.
-  // a2: receiver.
-  GenerateRuntimeSetProperty(masm, strict_mode);
-
-  // Extra capacity case: Check if there is extra capacity to
-  // perform the store and update the length. Used for adding one
-  // element to the array by writing to array[array.length].
-  __ bind(&extra);
-  // Condition code from comparing key and array length is still available.
-  // Only support writing to array[array.length].
-  __ Branch(&slow, ne, key, Operand(t0));
-  // Check for room in the elements backing store.
-  // Both the key and the length of FixedArray are smis.
-  __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset));
-  __ Branch(&slow, hs, key, Operand(t0));
-  __ lw(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
-  __ Branch(
-      &check_if_double_array, ne, elements_map, Heap::kFixedArrayMapRootIndex);
-
-  __ jmp(&fast_object_grow);
-
-  __ bind(&check_if_double_array);
-  __ Branch(&slow, ne, elements_map, Heap::kFixedDoubleArrayMapRootIndex);
-  __ jmp(&fast_double_grow);
-
-  // Array case: Get the length and the elements array from the JS
-  // array. Check that the array is in fast mode (and writable); if it
-  // is the length is always a smi.
-  __ bind(&array);
-  __ lw(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
-
-  // Check the key against the length in the array.
-  __ lw(t0, FieldMemOperand(receiver, JSArray::kLengthOffset));
-  __ Branch(&extra, hs, key, Operand(t0));
-
-  KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double,
-                                  &slow, kCheckMap, kDontIncrementLength,
-                                  value, key, receiver, receiver_map,
-                                  elements_map, elements);
-  KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
-                                  &slow, kDontCheckMap, kIncrementLength,
-                                  value, key, receiver, receiver_map,
-                                  elements_map, elements);
-}
-
-
-void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
-  // ---------- S t a t e --------------
-  //  -- ra     : return address
-  //  -- a0     : key
-  //  -- a1     : receiver
-  // -----------------------------------
-  Label slow;
-
-  // Check that the receiver isn't a smi.
-  __ JumpIfSmi(a1, &slow);
-
-  // Check that the key is an array index, that is Uint32.
-  __ And(t0, a0, Operand(kSmiTagMask | kSmiSignMask));
-  __ Branch(&slow, ne, t0, Operand(zero_reg));
-
-  // Get the map of the receiver.
-  __ lw(a2, FieldMemOperand(a1, HeapObject::kMapOffset));
-
-  // Check that it has indexed interceptor and access checks
-  // are not enabled for this object.
-  __ lbu(a3, FieldMemOperand(a2, Map::kBitFieldOffset));
-  __ And(a3, a3, Operand(kSlowCaseBitFieldMask));
-  __ Branch(&slow, ne, a3, Operand(1 << Map::kHasIndexedInterceptor));
-  // Everything is fine, call runtime.
-  __ Push(a1, a0);  // Receiver, key.
-
-  // Perform tail call to the entry.
-  __ TailCallExternalReference(ExternalReference(
-       IC_Utility(kKeyedLoadPropertyWithInterceptor), masm->isolate()), 2, 1);
-
-  __ bind(&slow);
-  GenerateMiss(masm);
-}
-
-
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
-  // ---------- S t a t e --------------
-  //  -- a0     : value
-  //  -- a1     : key
-  //  -- a2     : receiver
-  //  -- ra     : return address
-  // -----------------------------------
-
-  // Push receiver, key and value for runtime call.
-  __ Push(a2, a1, a0);
-
-  ExternalReference ref =
-      ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
-  __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void StoreIC::GenerateSlow(MacroAssembler* masm) {
-  // ---------- S t a t e --------------
-  //  -- a0     : value
-  //  -- a2     : key
-  //  -- a1     : receiver
-  //  -- ra     : return address
-  // -----------------------------------
-
-  // Push receiver, key and value for runtime call.
-  __ Push(a1, a2, a0);
-
-  // The slow case calls into the runtime to complete the store without causing
-  // an IC miss that would otherwise cause a transition to the generic stub.
-  ExternalReference ref =
-      ExternalReference(IC_Utility(kStoreIC_Slow), masm->isolate());
-  __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
-  // ---------- S t a t e --------------
-  //  -- a0     : value
-  //  -- a1     : key
-  //  -- a2     : receiver
-  //  -- ra     : return address
-  // -----------------------------------
-
-  // Push receiver, key and value for runtime call.
-  // We can't use MultiPush as the order of the registers is important.
-  __ Push(a2, a1, a0);
-
-  // The slow case calls into the runtime to complete the store without causing
-  // an IC miss that would otherwise cause a transition to the generic stub.
-  ExternalReference ref =
-      ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
-
-  __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- a0    : value
-  //  -- a1    : receiver
-  //  -- a2    : name
-  //  -- ra    : return address
-  // -----------------------------------
-
-  // Get the receiver from the stack and probe the stub cache.
-  Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC);
-  masm->isolate()->stub_cache()->GenerateProbe(
-      masm, flags, a1, a2, a3, t0, t1, t2);
-
-  // Cache miss: Jump to runtime.
-  GenerateMiss(masm);
-}
-
-
-void StoreIC::GenerateMiss(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- a0    : value
-  //  -- a1    : receiver
-  //  -- a2    : name
-  //  -- ra    : return address
-  // -----------------------------------
-
-  __ Push(a1, a2, a0);
-  // Perform tail call to the entry.
-  ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_Miss),
-                                            masm->isolate());
-  __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void StoreIC::GenerateNormal(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- a0    : value
-  //  -- a1    : receiver
-  //  -- a2    : name
-  //  -- ra    : return address
-  // -----------------------------------
-  Label miss;
-
-  GenerateNameDictionaryReceiverCheck(masm, a1, a3, t0, t1, &miss);
-
-  GenerateDictionaryStore(masm, &miss, a3, a2, a0, t0, t1);
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->store_normal_hit(), 1, t0, t1);
-  __ Ret();
-
-  __ bind(&miss);
-  __ IncrementCounter(counters->store_normal_miss(), 1, t0, t1);
-  GenerateMiss(masm);
-}
-
-
-void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
-                                         StrictMode strict_mode) {
-  // ----------- S t a t e -------------
-  //  -- a0    : value
-  //  -- a1    : receiver
-  //  -- a2    : name
-  //  -- ra    : return address
-  // -----------------------------------
-
-  __ Push(a1, a2, a0);
-
-  __ li(a1, Operand(Smi::FromInt(NONE)));  // PropertyAttributes.
-  __ li(a0, Operand(Smi::FromInt(strict_mode)));
-  __ Push(a1, a0);
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
-}
-
-
-#undef __
-
-
-Condition CompareIC::ComputeCondition(Token::Value op) {
-  switch (op) {
-    case Token::EQ_STRICT:
-    case Token::EQ:
-      return eq;
-    case Token::LT:
-      return lt;
-    case Token::GT:
-      return gt;
-    case Token::LTE:
-      return le;
-    case Token::GTE:
-      return ge;
-    default:
-      UNREACHABLE();
-      return kNoCondition;
-  }
-}
-
-
-bool CompareIC::HasInlinedSmiCode(Address address) {
-  // The address of the instruction following the call.
-  Address andi_instruction_address =
-      address + Assembler::kCallTargetAddressOffset;
-
-  // If the instruction following the call is not a andi at, rx, #yyy, nothing
-  // was inlined.
-  Instr instr = Assembler::instr_at(andi_instruction_address);
-  return Assembler::IsAndImmediate(instr) &&
-      Assembler::GetRt(instr) == static_cast<uint32_t>(zero_reg.code());
-}
-
-
-void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
-  Address andi_instruction_address =
-      address + Assembler::kCallTargetAddressOffset;
-
-  // If the instruction following the call is not a andi at, rx, #yyy, nothing
-  // was inlined.
-  Instr instr = Assembler::instr_at(andi_instruction_address);
-  if (!(Assembler::IsAndImmediate(instr) &&
-        Assembler::GetRt(instr) == static_cast<uint32_t>(zero_reg.code()))) {
-    return;
-  }
-
-  // The delta to the start of the map check instruction and the
-  // condition code uses at the patched jump.
-  int delta = Assembler::GetImmediate16(instr);
-  delta += Assembler::GetRs(instr) * kImm16Mask;
-  // If the delta is 0 the instruction is andi at, zero_reg, #0 which also
-  // signals that nothing was inlined.
-  if (delta == 0) {
-    return;
-  }
-
-  if (FLAG_trace_ic) {
-    PrintF("[  patching ic at %p, andi=%p, delta=%d\n",
-           address, andi_instruction_address, delta);
-  }
-
-  Address patch_address =
-      andi_instruction_address - delta * Instruction::kInstrSize;
-  Instr instr_at_patch = Assembler::instr_at(patch_address);
-  Instr branch_instr =
-      Assembler::instr_at(patch_address + Instruction::kInstrSize);
-  // This is patching a conditional "jump if not smi/jump if smi" site.
-  // Enabling by changing from
-  //   andi at, rx, 0
-  //   Branch <target>, eq, at, Operand(zero_reg)
-  // to:
-  //   andi at, rx, #kSmiTagMask
-  //   Branch <target>, ne, at, Operand(zero_reg)
-  // and vice-versa to be disabled again.
-  CodePatcher patcher(patch_address, 2);
-  Register reg = Register::from_code(Assembler::GetRs(instr_at_patch));
-  if (check == ENABLE_INLINED_SMI_CHECK) {
-    ASSERT(Assembler::IsAndImmediate(instr_at_patch));
-    ASSERT_EQ(0, Assembler::GetImmediate16(instr_at_patch));
-    patcher.masm()->andi(at, reg, kSmiTagMask);
-  } else {
-    ASSERT(check == DISABLE_INLINED_SMI_CHECK);
-    ASSERT(Assembler::IsAndImmediate(instr_at_patch));
-    patcher.masm()->andi(at, reg, 0);
-  }
-  ASSERT(Assembler::IsBranch(branch_instr));
-  if (Assembler::IsBeq(branch_instr)) {
-    patcher.ChangeBranchCondition(ne);
-  } else {
-    ASSERT(Assembler::IsBne(branch_instr));
-    patcher.ChangeBranchCondition(eq);
-  }
-}
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_TARGET_ARCH_MIPS
diff --git a/src/mips/interface-descriptors-mips.cc b/src/mips/interface-descriptors-mips.cc
new file mode 100644
index 0000000..936ce20
--- /dev/null
+++ b/src/mips/interface-descriptors-mips.cc
@@ -0,0 +1,303 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_MIPS
+
+#include "src/interface-descriptors.h"
+
+namespace v8 {
+namespace internal {
+
+const Register CallInterfaceDescriptor::ContextRegister() { return cp; }
+
+
+const Register LoadDescriptor::ReceiverRegister() { return a1; }
+const Register LoadDescriptor::NameRegister() { return a2; }
+
+
+const Register VectorLoadICTrampolineDescriptor::SlotRegister() { return a0; }
+
+
+const Register VectorLoadICDescriptor::VectorRegister() { return a3; }
+
+
+const Register StoreDescriptor::ReceiverRegister() { return a1; }
+const Register StoreDescriptor::NameRegister() { return a2; }
+const Register StoreDescriptor::ValueRegister() { return a0; }
+
+
+const Register ElementTransitionAndStoreDescriptor::MapRegister() { return a3; }
+
+
+const Register InstanceofDescriptor::left() { return a0; }
+const Register InstanceofDescriptor::right() { return a1; }
+
+
+const Register ArgumentsAccessReadDescriptor::index() { return a1; }
+const Register ArgumentsAccessReadDescriptor::parameter_count() { return a0; }
+
+
+const Register ApiGetterDescriptor::function_address() { return a2; }
+
+
+const Register MathPowTaggedDescriptor::exponent() { return a2; }
+
+
+const Register MathPowIntegerDescriptor::exponent() {
+  return MathPowTaggedDescriptor::exponent();
+}
+
+
+void FastNewClosureDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, a2};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void FastNewContextDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, a1};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ToNumberDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, a0};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void NumberToStringDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, a0};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void FastCloneShallowArrayDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, a3, a2, a1};
+  Representation representations[] = {
+      Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
+      Representation::Tagged()};
+  data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void FastCloneShallowObjectDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, a3, a2, a1, a0};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void CreateAllocationSiteDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, a2, a3};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void StoreArrayLiteralElementDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, a3, a0};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void CallFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, a1};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void CallFunctionWithFeedbackDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, a1, a3};
+  Representation representations[] = {Representation::Tagged(),
+                                      Representation::Tagged(),
+                                      Representation::Smi()};
+  data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void CallConstructDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  // a0 : number of arguments
+  // a1 : the function to call
+  // a2 : feedback vector
+  // a3 : (only if a2 is not the megamorphic symbol) slot in feedback
+  //      vector (Smi)
+  // TODO(turbofan): So far we don't gather type feedback and hence skip the
+  // slot parameter, but ArrayConstructStub needs the vector to be undefined.
+  Register registers[] = {cp, a0, a1, a2};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void RegExpConstructResultDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, a2, a1, a0};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void TransitionElementsKindDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, a0, a1};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ArrayConstructorConstantArgCountDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  // register state
+  // cp -- context
+  // a0 -- number of arguments
+  // a1 -- function
+  // a2 -- allocation site with elements kind
+  Register registers[] = {cp, a1, a2};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ArrayConstructorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  // stack param count needs (constructor pointer, and single argument)
+  Register registers[] = {cp, a1, a2, a0};
+  Representation representations[] = {
+      Representation::Tagged(), Representation::Tagged(),
+      Representation::Tagged(), Representation::Integer32()};
+  data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void InternalArrayConstructorConstantArgCountDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  // register state
+  // cp -- context
+  // a0 -- number of arguments
+  // a1 -- constructor function
+  Register registers[] = {cp, a1};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void InternalArrayConstructorDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  // stack param count needs (constructor pointer, and single argument)
+  Register registers[] = {cp, a1, a0};
+  Representation representations[] = {Representation::Tagged(),
+                                      Representation::Tagged(),
+                                      Representation::Integer32()};
+  data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void CompareNilDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, a0};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ToBooleanDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, a0};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void BinaryOpDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, a1, a0};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void BinaryOpWithAllocationSiteDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, a2, a1, a0};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void StringAddDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, a1, a0};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void KeyedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      cp,  // context
+      a2,  // key
+  };
+  Representation representations[] = {
+      Representation::Tagged(),  // context
+      Representation::Tagged(),  // key
+  };
+  data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void NamedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      cp,  // context
+      a2,  // name
+  };
+  Representation representations[] = {
+      Representation::Tagged(),  // context
+      Representation::Tagged(),  // name
+  };
+  data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void CallHandlerDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      cp,  // context
+      a0,  // receiver
+  };
+  Representation representations[] = {
+      Representation::Tagged(),  // context
+      Representation::Tagged(),  // receiver
+  };
+  data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void ArgumentAdaptorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      cp,  // context
+      a1,  // JSFunction
+      a0,  // actual number of arguments
+      a2,  // expected number of arguments
+  };
+  Representation representations[] = {
+      Representation::Tagged(),     // context
+      Representation::Tagged(),     // JSFunction
+      Representation::Integer32(),  // actual number of arguments
+      Representation::Integer32(),  // expected number of arguments
+  };
+  data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void ApiFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      cp,  // context
+      a0,  // callee
+      t0,  // call_data
+      a2,  // holder
+      a1,  // api_function_address
+  };
+  Representation representations[] = {
+      Representation::Tagged(),    // context
+      Representation::Tagged(),    // callee
+      Representation::Tagged(),    // call_data
+      Representation::Tagged(),    // holder
+      Representation::External(),  // api_function_address
+  };
+  data->Initialize(arraysize(registers), registers, representations);
+}
+}
+}  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_MIPS
diff --git a/src/mips/lithium-codegen-mips.cc b/src/mips/lithium-codegen-mips.cc
index 5edca6a..497d10f 100644
--- a/src/mips/lithium-codegen-mips.cc
+++ b/src/mips/lithium-codegen-mips.cc
@@ -27,17 +27,21 @@
 
 #include "src/v8.h"
 
+#include "src/base/bits.h"
+#include "src/code-factory.h"
+#include "src/code-stubs.h"
+#include "src/hydrogen-osr.h"
+#include "src/ic/ic.h"
+#include "src/ic/stub-cache.h"
 #include "src/mips/lithium-codegen-mips.h"
 #include "src/mips/lithium-gap-resolver-mips.h"
-#include "src/code-stubs.h"
-#include "src/stub-cache.h"
-#include "src/hydrogen-osr.h"
+
 
 namespace v8 {
 namespace internal {
 
 
-class SafepointGenerator V8_FINAL  : public CallWrapper {
+class SafepointGenerator FINAL  : public CallWrapper {
  public:
   SafepointGenerator(LCodeGen* codegen,
                      LPointerMap* pointers,
@@ -47,9 +51,9 @@
         deopt_mode_(mode) { }
   virtual ~SafepointGenerator() {}
 
-  virtual void BeforeCall(int call_size) const V8_OVERRIDE {}
+  virtual void BeforeCall(int call_size) const OVERRIDE {}
 
-  virtual void AfterCall() const V8_OVERRIDE {
+  virtual void AfterCall() const OVERRIDE {
     codegen_->RecordSafepoint(pointers_, deopt_mode_);
   }
 
@@ -64,7 +68,7 @@
 
 bool LCodeGen::GenerateCode() {
   LPhase phase("Z_Code generation", chunk());
-  ASSERT(is_unused());
+  DCHECK(is_unused());
   status_ = GENERATING;
 
   // Open a frame scope to indicate that there is a frame on the stack.  The
@@ -72,16 +76,13 @@
   // the frame (that is done in GeneratePrologue).
   FrameScope frame_scope(masm_, StackFrame::NONE);
 
-  return GeneratePrologue() &&
-      GenerateBody() &&
-      GenerateDeferredCode() &&
-      GenerateDeoptJumpTable() &&
-      GenerateSafepointTable();
+  return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
+         GenerateJumpTable() && GenerateSafepointTable();
 }
 
 
 void LCodeGen::FinishCode(Handle<Code> code) {
-  ASSERT(is_done());
+  DCHECK(is_done());
   code->set_stack_slots(GetStackSlotCount());
   code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
   if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
@@ -90,8 +91,8 @@
 
 
 void LCodeGen::SaveCallerDoubles() {
-  ASSERT(info()->saves_caller_doubles());
-  ASSERT(NeedsEagerFrame());
+  DCHECK(info()->saves_caller_doubles());
+  DCHECK(NeedsEagerFrame());
   Comment(";;; Save clobbered callee double registers");
   int count = 0;
   BitVector* doubles = chunk()->allocated_double_registers();
@@ -106,8 +107,8 @@
 
 
 void LCodeGen::RestoreCallerDoubles() {
-  ASSERT(info()->saves_caller_doubles());
-  ASSERT(NeedsEagerFrame());
+  DCHECK(info()->saves_caller_doubles());
+  DCHECK(NeedsEagerFrame());
   Comment(";;; Restore clobbered callee double registers");
   BitVector* doubles = chunk()->allocated_double_registers();
   BitVector::Iterator save_iterator(doubles);
@@ -122,7 +123,7 @@
 
 
 bool LCodeGen::GeneratePrologue() {
-  ASSERT(is_generating());
+  DCHECK(is_generating());
 
   if (info()->IsOptimizing()) {
     ProfileEntryHookStub::MaybeCallEntryHook(masm_);
@@ -152,7 +153,7 @@
       __ Branch(&ok, ne, a2, Operand(at));
 
       __ lw(a2, GlobalObjectOperand());
-      __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalReceiverOffset));
+      __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalProxyOffset));
 
       __ sw(a2, MemOperand(sp, receiver_offset));
 
@@ -207,7 +208,7 @@
       need_write_barrier = false;
     } else {
       __ push(a1);
-      __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
+      __ CallRuntime(Runtime::kNewFunctionContext, 1);
     }
     RecordSafepoint(Safepoint::kNoLazyDeopt);
     // Context is returned in both v0. It replaces the context passed to us.
@@ -261,7 +262,7 @@
   // Adjust the frame size, subsuming the unoptimized frame into the
   // optimized frame.
   int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
-  ASSERT(slots >= 0);
+  DCHECK(slots >= 0);
   __ Subu(sp, sp, Operand(slots * kPointerSize));
 }
 
@@ -277,7 +278,7 @@
 
 
 bool LCodeGen::GenerateDeferredCode() {
-  ASSERT(is_generating());
+  DCHECK(is_generating());
   if (deferred_.length() > 0) {
     for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
       LDeferredCode* code = deferred_[i];
@@ -295,8 +296,8 @@
       __ bind(code->entry());
       if (NeedsDeferredFrame()) {
         Comment(";;; Build frame");
-        ASSERT(!frame_is_built_);
-        ASSERT(info()->IsStub());
+        DCHECK(!frame_is_built_);
+        DCHECK(info()->IsStub());
         frame_is_built_ = true;
         __ MultiPush(cp.bit() | fp.bit() | ra.bit());
         __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
@@ -307,7 +308,7 @@
       code->Generate();
       if (NeedsDeferredFrame()) {
         Comment(";;; Destroy frame");
-        ASSERT(frame_is_built_);
+        DCHECK(frame_is_built_);
         __ pop(at);
         __ MultiPop(cp.bit() | fp.bit() | ra.bit());
         frame_is_built_ = false;
@@ -322,47 +323,74 @@
 }
 
 
-bool LCodeGen::GenerateDeoptJumpTable() {
-  if (deopt_jump_table_.length() > 0) {
+bool LCodeGen::GenerateJumpTable() {
+  if (jump_table_.length() > 0) {
+    Label needs_frame, call_deopt_entry;
+
     Comment(";;; -------------------- Jump table --------------------");
-  }
-  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
-  Label table_start;
-  __ bind(&table_start);
-  Label needs_frame;
-  for (int i = 0; i < deopt_jump_table_.length(); i++) {
-    __ bind(&deopt_jump_table_[i].label);
-    Address entry = deopt_jump_table_[i].address;
-    Deoptimizer::BailoutType type = deopt_jump_table_[i].bailout_type;
-    int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
-    if (id == Deoptimizer::kNotDeoptimizationEntry) {
-      Comment(";;; jump table entry %d.", i);
-    } else {
-      Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
-    }
-    __ li(t9, Operand(ExternalReference::ForDeoptEntry(entry)));
-    if (deopt_jump_table_[i].needs_frame) {
-      ASSERT(!info()->saves_caller_doubles());
-      if (needs_frame.is_bound()) {
-        __ Branch(&needs_frame);
+    Address base = jump_table_[0].address;
+
+    Register entry_offset = t9;
+
+    int length = jump_table_.length();
+    for (int i = 0; i < length; i++) {
+      Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
+      __ bind(&table_entry->label);
+
+      DCHECK(table_entry->bailout_type == jump_table_[0].bailout_type);
+      Address entry = table_entry->address;
+      DeoptComment(table_entry->reason);
+
+      // Second-level deopt table entries are contiguous and small, so instead
+      // of loading the full, absolute address of each one, load an immediate
+      // offset which will be added to the base address later.
+      __ li(entry_offset, Operand(entry - base));
+
+      if (table_entry->needs_frame) {
+        DCHECK(!info()->saves_caller_doubles());
+        if (needs_frame.is_bound()) {
+          __ Branch(&needs_frame);
+        } else {
+          __ bind(&needs_frame);
+          Comment(";;; call deopt with frame");
+          __ MultiPush(cp.bit() | fp.bit() | ra.bit());
+          // This variant of deopt can only be used with stubs. Since we don't
+          // have a function pointer to install in the stack frame that we're
+          // building, install a special marker there instead.
+          DCHECK(info()->IsStub());
+          __ li(at, Operand(Smi::FromInt(StackFrame::STUB)));
+          __ push(at);
+          __ Addu(fp, sp,
+                  Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+          __ bind(&call_deopt_entry);
+          // Add the base address to the offset previously loaded in
+          // entry_offset.
+          __ Addu(entry_offset, entry_offset,
+                  Operand(ExternalReference::ForDeoptEntry(base)));
+          __ Call(entry_offset);
+        }
       } else {
-        __ bind(&needs_frame);
-        __ MultiPush(cp.bit() | fp.bit() | ra.bit());
-        // This variant of deopt can only be used with stubs. Since we don't
-        // have a function pointer to install in the stack frame that we're
-        // building, install a special marker there instead.
-        ASSERT(info()->IsStub());
-        __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
-        __ push(scratch0());
-        __ Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
-        __ Call(t9);
+        // The last entry can fall through into `call_deopt_entry`, avoiding a
+        // branch.
+        bool need_branch = ((i + 1) != length) || call_deopt_entry.is_bound();
+
+        if (need_branch) __ Branch(&call_deopt_entry);
       }
-    } else {
+    }
+
+    if (!call_deopt_entry.is_bound()) {
+      Comment(";;; call deopt");
+      __ bind(&call_deopt_entry);
+
       if (info()->saves_caller_doubles()) {
-        ASSERT(info()->IsStub());
+        DCHECK(info()->IsStub());
         RestoreCallerDoubles();
       }
-      __ Call(t9);
+
+      // Add the base address to the offset previously loaded in entry_offset.
+      __ Addu(entry_offset, entry_offset,
+              Operand(ExternalReference::ForDeoptEntry(base)));
+      __ Call(entry_offset);
     }
   }
   __ RecordComment("]");
@@ -375,7 +403,7 @@
 
 
 bool LCodeGen::GenerateSafepointTable() {
-  ASSERT(is_done());
+  DCHECK(is_done());
   safepoints_.Emit(masm(), GetStackSlotCount());
   return !is_aborted();
 }
@@ -392,7 +420,7 @@
 
 
 Register LCodeGen::ToRegister(LOperand* op) const {
-  ASSERT(op->IsRegister());
+  DCHECK(op->IsRegister());
   return ToRegister(op->index());
 }
 
@@ -406,15 +434,15 @@
     Handle<Object> literal = constant->handle(isolate());
     Representation r = chunk_->LookupLiteralRepresentation(const_op);
     if (r.IsInteger32()) {
-      ASSERT(literal->IsNumber());
+      DCHECK(literal->IsNumber());
       __ li(scratch, Operand(static_cast<int32_t>(literal->Number())));
     } else if (r.IsSmi()) {
-      ASSERT(constant->HasSmiValue());
+      DCHECK(constant->HasSmiValue());
       __ li(scratch, Operand(Smi::FromInt(constant->Integer32Value())));
     } else if (r.IsDouble()) {
       Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
     } else {
-      ASSERT(r.IsSmiOrTagged());
+      DCHECK(r.IsSmiOrTagged());
       __ li(scratch, literal);
     }
     return scratch;
@@ -428,7 +456,7 @@
 
 
 DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
-  ASSERT(op->IsDoubleRegister());
+  DCHECK(op->IsDoubleRegister());
   return ToDoubleRegister(op->index());
 }
 
@@ -444,7 +472,7 @@
     Handle<Object> literal = constant->handle(isolate());
     Representation r = chunk_->LookupLiteralRepresentation(const_op);
     if (r.IsInteger32()) {
-      ASSERT(literal->IsNumber());
+      DCHECK(literal->IsNumber());
       __ li(at, Operand(static_cast<int32_t>(literal->Number())));
       __ mtc1(at, flt_scratch);
       __ cvt_d_w(dbl_scratch, flt_scratch);
@@ -466,7 +494,7 @@
 
 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
   HConstant* constant = chunk_->LookupConstant(op);
-  ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
+  DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
   return constant->handle(isolate());
 }
 
@@ -491,7 +519,7 @@
   HConstant* constant = chunk_->LookupConstant(op);
   int32_t value = constant->Integer32Value();
   if (r.IsInteger32()) return value;
-  ASSERT(r.IsSmiOrTagged());
+  DCHECK(r.IsSmiOrTagged());
   return reinterpret_cast<int32_t>(Smi::FromInt(value));
 }
 
@@ -504,7 +532,7 @@
 
 double LCodeGen::ToDouble(LConstantOperand* op) const {
   HConstant* constant = chunk_->LookupConstant(op);
-  ASSERT(constant->HasDoubleValue());
+  DCHECK(constant->HasDoubleValue());
   return constant->DoubleValue();
 }
 
@@ -515,15 +543,15 @@
     HConstant* constant = chunk()->LookupConstant(const_op);
     Representation r = chunk_->LookupLiteralRepresentation(const_op);
     if (r.IsSmi()) {
-      ASSERT(constant->HasSmiValue());
+      DCHECK(constant->HasSmiValue());
       return Operand(Smi::FromInt(constant->Integer32Value()));
     } else if (r.IsInteger32()) {
-      ASSERT(constant->HasInteger32Value());
+      DCHECK(constant->HasInteger32Value());
       return Operand(constant->Integer32Value());
     } else if (r.IsDouble()) {
       Abort(kToOperandUnsupportedDoubleImmediate);
     }
-    ASSERT(r.IsTagged());
+    DCHECK(r.IsTagged());
     return Operand(constant->handle(isolate()));
   } else if (op->IsRegister()) {
     return Operand(ToRegister(op));
@@ -538,15 +566,15 @@
 
 
 static int ArgumentsOffsetWithoutFrame(int index) {
-  ASSERT(index < 0);
+  DCHECK(index < 0);
   return -(index + 1) * kPointerSize;
 }
 
 
 MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
-  ASSERT(!op->IsRegister());
-  ASSERT(!op->IsDoubleRegister());
-  ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
+  DCHECK(!op->IsRegister());
+  DCHECK(!op->IsDoubleRegister());
+  DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
   if (NeedsEagerFrame()) {
     return MemOperand(fp, StackSlotOffset(op->index()));
   } else {
@@ -558,7 +586,7 @@
 
 
 MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
-  ASSERT(op->IsDoubleStackSlot());
+  DCHECK(op->IsDoubleStackSlot());
   if (NeedsEagerFrame()) {
     return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
   } else {
@@ -594,13 +622,13 @@
       translation->BeginConstructStubFrame(closure_id, translation_size);
       break;
     case JS_GETTER:
-      ASSERT(translation_size == 1);
-      ASSERT(height == 0);
+      DCHECK(translation_size == 1);
+      DCHECK(height == 0);
       translation->BeginGetterStubFrame(closure_id);
       break;
     case JS_SETTER:
-      ASSERT(translation_size == 2);
-      ASSERT(height == 0);
+      DCHECK(translation_size == 2);
+      DCHECK(height == 0);
       translation->BeginSetterStubFrame(closure_id);
       break;
     case STUB:
@@ -705,7 +733,7 @@
                                RelocInfo::Mode mode,
                                LInstruction* instr,
                                SafepointMode safepoint_mode) {
-  ASSERT(instr != NULL);
+  DCHECK(instr != NULL);
   __ Call(code, mode);
   RecordSafepointWithLazyDeopt(instr, safepoint_mode);
 }
@@ -715,7 +743,7 @@
                            int num_arguments,
                            LInstruction* instr,
                            SaveFPRegsMode save_doubles) {
-  ASSERT(instr != NULL);
+  DCHECK(instr != NULL);
 
   __ CallRuntime(function, num_arguments, save_doubles);
 
@@ -786,15 +814,15 @@
 }
 
 
-void LCodeGen::DeoptimizeIf(Condition condition,
-                            LEnvironment* environment,
+void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
                             Deoptimizer::BailoutType bailout_type,
-                            Register src1,
-                            const Operand& src2) {
+                            Register src1, const Operand& src2,
+                            const char* detail) {
+  LEnvironment* environment = instr->environment();
   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
-  ASSERT(environment->HasBeenRegistered());
+  DCHECK(environment->HasBeenRegistered());
   int id = environment->deoptimization_index();
-  ASSERT(info()->IsOptimizing() || info()->IsStub());
+  DCHECK(info()->IsOptimizing() || info()->IsStub());
   Address entry =
       Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
   if (entry == NULL) {
@@ -830,37 +858,36 @@
     __ bind(&skip);
   }
 
-  ASSERT(info()->IsStub() || frame_is_built_);
+  Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
+                             instr->Mnemonic(), detail);
+  DCHECK(info()->IsStub() || frame_is_built_);
   // Go through jump table if we need to handle condition, build frame, or
   // restore caller doubles.
   if (condition == al && frame_is_built_ &&
       !info()->saves_caller_doubles()) {
+    DeoptComment(reason);
     __ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2);
   } else {
+    Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
+                                            !frame_is_built_);
     // We often have several deopts to the same entry, reuse the last
     // jump entry if this is the case.
-    if (deopt_jump_table_.is_empty() ||
-        (deopt_jump_table_.last().address != entry) ||
-        (deopt_jump_table_.last().bailout_type != bailout_type) ||
-        (deopt_jump_table_.last().needs_frame != !frame_is_built_)) {
-      Deoptimizer::JumpTableEntry table_entry(entry,
-                                              bailout_type,
-                                              !frame_is_built_);
-      deopt_jump_table_.Add(table_entry, zone());
+    if (jump_table_.is_empty() ||
+        !table_entry.IsEquivalentTo(jump_table_.last())) {
+      jump_table_.Add(table_entry, zone());
     }
-    __ Branch(&deopt_jump_table_.last().label, condition, src1, src2);
+    __ Branch(&jump_table_.last().label, condition, src1, src2);
   }
 }
 
 
-void LCodeGen::DeoptimizeIf(Condition condition,
-                            LEnvironment* environment,
-                            Register src1,
-                            const Operand& src2) {
+void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
+                            Register src1, const Operand& src2,
+                            const char* detail) {
   Deoptimizer::BailoutType bailout_type = info()->IsStub()
       ? Deoptimizer::LAZY
       : Deoptimizer::EAGER;
-  DeoptimizeIf(condition, environment, bailout_type, src1, src2);
+  DeoptimizeIf(condition, instr, bailout_type, src1, src2, detail);
 }
 
 
@@ -919,7 +946,7 @@
 
 
 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
-  ASSERT(deoptimization_literals_.length() == 0);
+  DCHECK(deoptimization_literals_.length() == 0);
 
   const ZoneList<Handle<JSFunction> >* inlined_closures =
       chunk()->inlined_closures();
@@ -939,7 +966,7 @@
   if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
     RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
   } else {
-    ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+    DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
     RecordSafepointWithRegisters(
         instr->pointer_map(), 0, Safepoint::kLazyDeopt);
   }
@@ -951,7 +978,7 @@
     Safepoint::Kind kind,
     int arguments,
     Safepoint::DeoptMode deopt_mode) {
-  ASSERT(expected_safepoint_kind_ == kind);
+  DCHECK(expected_safepoint_kind_ == kind);
 
   const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
   Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
@@ -987,15 +1014,6 @@
 }
 
 
-void LCodeGen::RecordSafepointWithRegistersAndDoubles(
-    LPointerMap* pointers,
-    int arguments,
-    Safepoint::DeoptMode deopt_mode) {
-  RecordSafepoint(
-      pointers, Safepoint::kWithRegistersAndDoubles, arguments, deopt_mode);
-}
-
-
 void LCodeGen::RecordAndWritePosition(int position) {
   if (position == RelocInfo::kNoPosition) return;
   masm()->positions_recorder()->RecordPosition(position);
@@ -1049,8 +1067,8 @@
 
 
 void LCodeGen::DoCallStub(LCallStub* instr) {
-  ASSERT(ToRegister(instr->context()).is(cp));
-  ASSERT(ToRegister(instr->result()).is(v0));
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->result()).is(v0));
   switch (instr->hydrogen()->major_key()) {
     case CodeStub::RegExpExec: {
       RegExpExecStub stub(isolate());
@@ -1081,7 +1099,7 @@
 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
   Register dividend = ToRegister(instr->dividend());
   int32_t divisor = instr->divisor();
-  ASSERT(dividend.is(ToRegister(instr->result())));
+  DCHECK(dividend.is(ToRegister(instr->result())));
 
   // Theoretically, a variation of the branch-free code for integer division by
   // a power of 2 (calculating the remainder via an additional multiplication
@@ -1099,7 +1117,7 @@
     __ subu(dividend, zero_reg, dividend);
     __ And(dividend, dividend, Operand(mask));
     if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
-      DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg));
+      DeoptimizeIf(eq, instr, dividend, Operand(zero_reg));
     }
     __ Branch(USE_DELAY_SLOT, &done);
     __ subu(dividend, zero_reg, dividend);
@@ -1115,10 +1133,10 @@
   Register dividend = ToRegister(instr->dividend());
   int32_t divisor = instr->divisor();
   Register result = ToRegister(instr->result());
-  ASSERT(!dividend.is(result));
+  DCHECK(!dividend.is(result));
 
   if (divisor == 0) {
-    DeoptimizeIf(al, instr->environment());
+    DeoptimizeIf(al, instr);
     return;
   }
 
@@ -1131,7 +1149,7 @@
   if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
     Label remainder_not_zero;
     __ Branch(&remainder_not_zero, ne, result, Operand(zero_reg));
-    DeoptimizeIf(lt, instr->environment(), dividend, Operand(zero_reg));
+    DeoptimizeIf(lt, instr, dividend, Operand(zero_reg));
     __ bind(&remainder_not_zero);
   }
 }
@@ -1144,13 +1162,13 @@
   const Register result_reg = ToRegister(instr->result());
 
   // div runs in the background while we check for special cases.
-  __ div(left_reg, right_reg);
+  __ Mod(result_reg, left_reg, right_reg);
 
   Label done;
   // Check for x % 0, we have to deopt in this case because we can't return a
   // NaN.
   if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
-    DeoptimizeIf(eq, instr->environment(), right_reg, Operand(zero_reg));
+    DeoptimizeIf(eq, instr, right_reg, Operand(zero_reg));
   }
 
   // Check for kMinInt % -1, div will return kMinInt, which is not what we
@@ -1159,7 +1177,7 @@
     Label no_overflow_possible;
     __ Branch(&no_overflow_possible, ne, left_reg, Operand(kMinInt));
     if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
-      DeoptimizeIf(eq, instr->environment(), right_reg, Operand(-1));
+      DeoptimizeIf(eq, instr, right_reg, Operand(-1));
     } else {
       __ Branch(&no_overflow_possible, ne, right_reg, Operand(-1));
       __ Branch(USE_DELAY_SLOT, &done);
@@ -1169,10 +1187,9 @@
   }
 
   // If we care about -0, test if the dividend is <0 and the result is 0.
-  __ Branch(USE_DELAY_SLOT, &done, ge, left_reg, Operand(zero_reg));
-  __ mfhi(result_reg);
+  __ Branch(&done, ge, left_reg, Operand(zero_reg));
   if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
-    DeoptimizeIf(eq, instr->environment(), result_reg, Operand(zero_reg));
+    DeoptimizeIf(eq, instr, result_reg, Operand(zero_reg));
   }
   __ bind(&done);
 }
@@ -1182,24 +1199,24 @@
   Register dividend = ToRegister(instr->dividend());
   int32_t divisor = instr->divisor();
   Register result = ToRegister(instr->result());
-  ASSERT(divisor == kMinInt || IsPowerOf2(Abs(divisor)));
-  ASSERT(!result.is(dividend));
+  DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
+  DCHECK(!result.is(dividend));
 
   // Check for (0 / -x) that will produce negative zero.
   HDiv* hdiv = instr->hydrogen();
   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
-    DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg));
+    DeoptimizeIf(eq, instr, dividend, Operand(zero_reg));
   }
   // Check for (kMinInt / -1).
   if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
-    DeoptimizeIf(eq, instr->environment(), dividend, Operand(kMinInt));
+    DeoptimizeIf(eq, instr, dividend, Operand(kMinInt));
   }
   // Deoptimize if remainder will not be 0.
   if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
       divisor != 1 && divisor != -1) {
     int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
     __ And(at, dividend, Operand(mask));
-    DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
+    DeoptimizeIf(ne, instr, at, Operand(zero_reg));
   }
 
   if (divisor == -1) {  // Nice shortcut, not needed for correctness.
@@ -1226,17 +1243,17 @@
   Register dividend = ToRegister(instr->dividend());
   int32_t divisor = instr->divisor();
   Register result = ToRegister(instr->result());
-  ASSERT(!dividend.is(result));
+  DCHECK(!dividend.is(result));
 
   if (divisor == 0) {
-    DeoptimizeIf(al, instr->environment());
+    DeoptimizeIf(al, instr);
     return;
   }
 
   // Check for (0 / -x) that will produce negative zero.
   HDiv* hdiv = instr->hydrogen();
   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
-    DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg));
+    DeoptimizeIf(eq, instr, dividend, Operand(zero_reg));
   }
 
   __ TruncatingDiv(result, dividend, Abs(divisor));
@@ -1245,7 +1262,7 @@
   if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
     __ Mul(scratch0(), result, Operand(divisor));
     __ Subu(scratch0(), scratch0(), dividend);
-    DeoptimizeIf(ne, instr->environment(), scratch0(), Operand(zero_reg));
+    DeoptimizeIf(ne, instr, scratch0(), Operand(zero_reg));
   }
 }
 
@@ -1256,21 +1273,22 @@
   Register dividend = ToRegister(instr->dividend());
   Register divisor = ToRegister(instr->divisor());
   const Register result = ToRegister(instr->result());
+  Register remainder = ToRegister(instr->temp());
 
   // On MIPS div is asynchronous - it will run in the background while we
   // check for special cases.
-  __ div(dividend, divisor);
+  __ Div(remainder, result, dividend, divisor);
 
   // Check for x / 0.
   if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
-    DeoptimizeIf(eq, instr->environment(), divisor, Operand(zero_reg));
+    DeoptimizeIf(eq, instr, divisor, Operand(zero_reg));
   }
 
   // Check for (0 / -x) that will produce negative zero.
   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
     Label left_not_zero;
     __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
-    DeoptimizeIf(lt, instr->environment(), divisor, Operand(zero_reg));
+    DeoptimizeIf(lt, instr, divisor, Operand(zero_reg));
     __ bind(&left_not_zero);
   }
 
@@ -1279,16 +1297,12 @@
       !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
     Label left_not_min_int;
     __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
-    DeoptimizeIf(eq, instr->environment(), divisor, Operand(-1));
+    DeoptimizeIf(eq, instr, divisor, Operand(-1));
     __ bind(&left_not_min_int);
   }
 
   if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
-    __ mfhi(result);
-    DeoptimizeIf(ne, instr->environment(), result, Operand(zero_reg));
-    __ mflo(result);
-  } else {
-    __ mflo(result);
+    DeoptimizeIf(ne, instr, remainder, Operand(zero_reg));
   }
 }
 
@@ -1299,7 +1313,7 @@
   DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
 
   // This is computed in-place.
-  ASSERT(addend.is(ToDoubleRegister(instr->result())));
+  DCHECK(addend.is(ToDoubleRegister(instr->result())));
 
   __ madd_d(addend, addend, multiplier, multiplicand);
 }
@@ -1310,7 +1324,7 @@
   Register result = ToRegister(instr->result());
   int32_t divisor = instr->divisor();
   Register scratch = result.is(dividend) ? scratch0() : dividend;
-  ASSERT(!result.is(dividend) || !scratch.is(dividend));
+  DCHECK(!result.is(dividend) || !scratch.is(dividend));
 
   // If the divisor is 1, return the dividend.
   if (divisor == 1) {
@@ -1334,14 +1348,14 @@
 
   __ Subu(result, zero_reg, dividend);
   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
-    DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
+    DeoptimizeIf(eq, instr, result, Operand(zero_reg));
   }
 
   // Dividing by -1 is basically negation, unless we overflow.
   __ Xor(scratch, scratch, result);
   if (divisor == -1) {
     if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
-      DeoptimizeIf(ge, instr->environment(), scratch, Operand(zero_reg));
+      DeoptimizeIf(ge, instr, scratch, Operand(zero_reg));
     }
     return;
   }
@@ -1366,17 +1380,17 @@
   Register dividend = ToRegister(instr->dividend());
   int32_t divisor = instr->divisor();
   Register result = ToRegister(instr->result());
-  ASSERT(!dividend.is(result));
+  DCHECK(!dividend.is(result));
 
   if (divisor == 0) {
-    DeoptimizeIf(al, instr->environment());
+    DeoptimizeIf(al, instr);
     return;
   }
 
   // Check for (0 / -x) that will produce negative zero.
   HMathFloorOfDiv* hdiv = instr->hydrogen();
   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
-    DeoptimizeIf(eq, instr->environment(), dividend, Operand(zero_reg));
+    DeoptimizeIf(eq, instr, dividend, Operand(zero_reg));
   }
 
   // Easy case: We need no dynamic check for the dividend and the flooring
@@ -1391,7 +1405,7 @@
   // In the general case we may need to adjust before and after the truncating
   // division to get a flooring division.
   Register temp = ToRegister(instr->temp());
-  ASSERT(!temp.is(dividend) && !temp.is(result));
+  DCHECK(!temp.is(dividend) && !temp.is(result));
   Label needs_adjustment, done;
   __ Branch(&needs_adjustment, divisor > 0 ? lt : gt,
             dividend, Operand(zero_reg));
@@ -1413,21 +1427,21 @@
   Register dividend = ToRegister(instr->dividend());
   Register divisor = ToRegister(instr->divisor());
   const Register result = ToRegister(instr->result());
-
+  Register remainder = scratch0();
   // On MIPS div is asynchronous - it will run in the background while we
   // check for special cases.
-  __ div(dividend, divisor);
+  __ Div(remainder, result, dividend, divisor);
 
   // Check for x / 0.
   if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
-    DeoptimizeIf(eq, instr->environment(), divisor, Operand(zero_reg));
+    DeoptimizeIf(eq, instr, divisor, Operand(zero_reg));
   }
 
   // Check for (0 / -x) that will produce negative zero.
   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
     Label left_not_zero;
     __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
-    DeoptimizeIf(lt, instr->environment(), divisor, Operand(zero_reg));
+    DeoptimizeIf(lt, instr, divisor, Operand(zero_reg));
     __ bind(&left_not_zero);
   }
 
@@ -1436,15 +1450,12 @@
       !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
     Label left_not_min_int;
     __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
-    DeoptimizeIf(eq, instr->environment(), divisor, Operand(-1));
+    DeoptimizeIf(eq, instr, divisor, Operand(-1));
     __ bind(&left_not_min_int);
   }
 
   // We performed a truncating division. Correct the result if necessary.
   Label done;
-  Register remainder = scratch0();
-  __ mfhi(remainder);
-  __ mflo(result);
   __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT);
   __ Xor(remainder, remainder, Operand(divisor));
   __ Branch(&done, ge, remainder, Operand(zero_reg));
@@ -1470,14 +1481,14 @@
     if (bailout_on_minus_zero && (constant < 0)) {
       // The case of a null constant will be handled separately.
       // If constant is negative and left is null, the result should be -0.
-      DeoptimizeIf(eq, instr->environment(), left, Operand(zero_reg));
+      DeoptimizeIf(eq, instr, left, Operand(zero_reg));
     }
 
     switch (constant) {
       case -1:
         if (overflow) {
           __ SubuAndCheckForOverflow(result, zero_reg, left, scratch);
-          DeoptimizeIf(lt, instr->environment(), scratch, Operand(zero_reg));
+          DeoptimizeIf(lt, instr, scratch, Operand(zero_reg));
         } else {
           __ Subu(result, zero_reg, left);
         }
@@ -1486,7 +1497,7 @@
         if (bailout_on_minus_zero) {
           // If left is strictly negative and the constant is null, the
           // result is -0. Deoptimize if required, otherwise return 0.
-          DeoptimizeIf(lt, instr->environment(), left, Operand(zero_reg));
+          DeoptimizeIf(lt, instr, left, Operand(zero_reg));
         }
         __ mov(result, zero_reg);
         break;
@@ -1501,18 +1512,18 @@
         int32_t mask = constant >> 31;
         uint32_t constant_abs = (constant + mask) ^ mask;
 
-        if (IsPowerOf2(constant_abs)) {
+        if (base::bits::IsPowerOfTwo32(constant_abs)) {
           int32_t shift = WhichPowerOf2(constant_abs);
           __ sll(result, left, shift);
           // Correct the sign of the result if the constant is negative.
           if (constant < 0)  __ Subu(result, zero_reg, result);
-        } else if (IsPowerOf2(constant_abs - 1)) {
+        } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
           int32_t shift = WhichPowerOf2(constant_abs - 1);
           __ sll(scratch, left, shift);
           __ Addu(result, scratch, left);
           // Correct the sign of the result if the constant is negative.
           if (constant < 0)  __ Subu(result, zero_reg, result);
-        } else if (IsPowerOf2(constant_abs + 1)) {
+        } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
           int32_t shift = WhichPowerOf2(constant_abs + 1);
           __ sll(scratch, left, shift);
           __ Subu(result, scratch, left);
@@ -1526,23 +1537,19 @@
     }
 
   } else {
-    ASSERT(right_op->IsRegister());
+    DCHECK(right_op->IsRegister());
     Register right = ToRegister(right_op);
 
     if (overflow) {
       // hi:lo = left * right.
       if (instr->hydrogen()->representation().IsSmi()) {
         __ SmiUntag(result, left);
-        __ mult(result, right);
-        __ mfhi(scratch);
-        __ mflo(result);
+        __ Mul(scratch, result, result, right);
       } else {
-        __ mult(left, right);
-        __ mfhi(scratch);
-        __ mflo(result);
+        __ Mul(scratch, result, left, right);
       }
       __ sra(at, result, 31);
-      DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
+      DeoptimizeIf(ne, instr, scratch, Operand(at));
     } else {
       if (instr->hydrogen()->representation().IsSmi()) {
         __ SmiUntag(result, left);
@@ -1557,10 +1564,7 @@
       __ Xor(at, left, right);
       __ Branch(&done, ge, at, Operand(zero_reg));
       // Bail out if the result is minus zero.
-      DeoptimizeIf(eq,
-                   instr->environment(),
-                   result,
-                   Operand(zero_reg));
+      DeoptimizeIf(eq, instr, result, Operand(zero_reg));
       __ bind(&done);
     }
   }
@@ -1570,7 +1574,7 @@
 void LCodeGen::DoBitI(LBitI* instr) {
   LOperand* left_op = instr->left();
   LOperand* right_op = instr->right();
-  ASSERT(left_op->IsRegister());
+  DCHECK(left_op->IsRegister());
   Register left = ToRegister(left_op);
   Register result = ToRegister(instr->result());
   Operand right(no_reg);
@@ -1578,7 +1582,7 @@
   if (right_op->IsStackSlot()) {
     right = Operand(EmitLoadRegister(right_op, at));
   } else {
-    ASSERT(right_op->IsRegister() || right_op->IsConstantOperand());
+    DCHECK(right_op->IsRegister() || right_op->IsConstantOperand());
     right = ToOperand(right_op);
   }
 
@@ -1624,7 +1628,7 @@
       case Token::SHR:
         __ srlv(result, left, ToRegister(right_op));
         if (instr->can_deopt()) {
-          DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg));
+          DeoptimizeIf(lt, instr, result, Operand(zero_reg));
         }
         break;
       case Token::SHL:
@@ -1659,7 +1663,7 @@
         } else {
           if (instr->can_deopt()) {
             __ And(at, left, Operand(0x80000000));
-            DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
+            DeoptimizeIf(ne, instr, at, Operand(zero_reg));
           }
           __ Move(result, left);
         }
@@ -1674,7 +1678,7 @@
             } else {
               __ SmiTagCheckOverflow(result, left, scratch);
             }
-            DeoptimizeIf(lt, instr->environment(), scratch, Operand(zero_reg));
+            DeoptimizeIf(lt, instr, scratch, Operand(zero_reg));
           } else {
             __ sll(result, left, shift_count);
           }
@@ -1701,7 +1705,7 @@
       Register right_reg = EmitLoadRegister(right, at);
       __ Subu(ToRegister(result), ToRegister(left), Operand(right_reg));
     } else {
-      ASSERT(right->IsRegister() || right->IsConstantOperand());
+      DCHECK(right->IsRegister() || right->IsConstantOperand());
       __ Subu(ToRegister(result), ToRegister(left), ToOperand(right));
     }
   } else {  // can_overflow.
@@ -1714,7 +1718,7 @@
                                  right_reg,
                                  overflow);  // Reg at also used as scratch.
     } else {
-      ASSERT(right->IsRegister());
+      DCHECK(right->IsRegister());
       // Due to overflow check macros not supporting constant operands,
       // handling the IsConstantOperand case was moved to prev if clause.
       __ SubuAndCheckForOverflow(ToRegister(result),
@@ -1722,7 +1726,7 @@
                                  ToRegister(right),
                                  overflow);  // Reg at also used as scratch.
     }
-    DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg));
+    DeoptimizeIf(lt, instr, overflow, Operand(zero_reg));
   }
 }
 
@@ -1738,7 +1742,7 @@
 
 
 void LCodeGen::DoConstantD(LConstantD* instr) {
-  ASSERT(instr->result()->IsDoubleRegister());
+  DCHECK(instr->result()->IsDoubleRegister());
   DoubleRegister result = ToDoubleRegister(instr->result());
   double v = instr->value();
   __ Move(result, v);
@@ -1770,15 +1774,15 @@
   Register scratch = ToRegister(instr->temp());
   Smi* index = instr->index();
   Label runtime, done;
-  ASSERT(object.is(a0));
-  ASSERT(result.is(v0));
-  ASSERT(!scratch.is(scratch0()));
-  ASSERT(!scratch.is(object));
+  DCHECK(object.is(a0));
+  DCHECK(result.is(v0));
+  DCHECK(!scratch.is(scratch0()));
+  DCHECK(!scratch.is(object));
 
   __ SmiTst(object, at);
-  DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
+  DeoptimizeIf(eq, instr, at, Operand(zero_reg));
   __ GetObjectType(object, scratch, scratch);
-  DeoptimizeIf(ne, instr->environment(), scratch, Operand(JS_DATE_TYPE));
+  DeoptimizeIf(ne, instr, scratch, Operand(JS_DATE_TYPE));
 
   if (index->value() == 0) {
     __ lw(result, FieldMemOperand(object, JSDate::kValueOffset));
@@ -1814,8 +1818,8 @@
     return FieldMemOperand(string, SeqString::kHeaderSize + offset);
   }
   Register scratch = scratch0();
-  ASSERT(!scratch.is(string));
-  ASSERT(!scratch.is(ToRegister(index)));
+  DCHECK(!scratch.is(string));
+  DCHECK(!scratch.is(ToRegister(index)));
   if (encoding == String::ONE_BYTE_ENCODING) {
     __ Addu(scratch, string, ToRegister(index));
   } else {
@@ -1891,7 +1895,7 @@
       Register right_reg = EmitLoadRegister(right, at);
       __ Addu(ToRegister(result), ToRegister(left), Operand(right_reg));
     } else {
-      ASSERT(right->IsRegister() || right->IsConstantOperand());
+      DCHECK(right->IsRegister() || right->IsConstantOperand());
       __ Addu(ToRegister(result), ToRegister(left), ToOperand(right));
     }
   } else {  // can_overflow.
@@ -1905,7 +1909,7 @@
                                  right_reg,
                                  overflow);  // Reg at also used as scratch.
     } else {
-      ASSERT(right->IsRegister());
+      DCHECK(right->IsRegister());
       // Due to overflow check macros not supporting constant operands,
       // handling the IsConstantOperand case was moved to prev if clause.
       __ AdduAndCheckForOverflow(ToRegister(result),
@@ -1913,7 +1917,7 @@
                                  ToRegister(right),
                                  overflow);  // Reg at also used as scratch.
     }
-    DeoptimizeIf(lt, instr->environment(), overflow, Operand(zero_reg));
+    DeoptimizeIf(lt, instr, overflow, Operand(zero_reg));
   }
 }
 
@@ -1934,12 +1938,12 @@
      __  Movz(result_reg, left_reg, scratch);
      __  Movn(result_reg, right_reg, scratch);
     } else {
-     ASSERT(condition == le);
+     DCHECK(condition == le);
      __  Movn(result_reg, left_reg, scratch);
      __  Movz(result_reg, right_reg, scratch);
     }
   } else {
-    ASSERT(instr->hydrogen()->representation().IsDouble());
+    DCHECK(instr->hydrogen()->representation().IsDouble());
     FPURegister left_reg = ToDoubleRegister(left);
     FPURegister right_reg = ToDoubleRegister(right);
     FPURegister result_reg = ToDoubleRegister(instr->result());
@@ -2021,13 +2025,14 @@
 
 
 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
-  ASSERT(ToRegister(instr->context()).is(cp));
-  ASSERT(ToRegister(instr->left()).is(a1));
-  ASSERT(ToRegister(instr->right()).is(a0));
-  ASSERT(ToRegister(instr->result()).is(v0));
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->left()).is(a1));
+  DCHECK(ToRegister(instr->right()).is(a0));
+  DCHECK(ToRegister(instr->result()).is(v0));
 
-  BinaryOpICStub stub(isolate(), instr->op(), NO_OVERWRITE);
-  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  Handle<Code> code =
+      CodeFactory::BinaryOpIC(isolate(), instr->op(), NO_OVERWRITE).code();
+  CallCode(code, RelocInfo::CODE_TARGET, instr);
   // Other arch use a nop here, to signal that there is no inlined
   // patchable code. Mips does not need the nop, since our marker
   // instruction (andi zero_reg) will never be used in normal code.
@@ -2111,36 +2116,36 @@
 void LCodeGen::DoBranch(LBranch* instr) {
   Representation r = instr->hydrogen()->value()->representation();
   if (r.IsInteger32() || r.IsSmi()) {
-    ASSERT(!info()->IsStub());
+    DCHECK(!info()->IsStub());
     Register reg = ToRegister(instr->value());
     EmitBranch(instr, ne, reg, Operand(zero_reg));
   } else if (r.IsDouble()) {
-    ASSERT(!info()->IsStub());
+    DCHECK(!info()->IsStub());
     DoubleRegister reg = ToDoubleRegister(instr->value());
     // Test the double value. Zero and NaN are false.
     EmitBranchF(instr, nue, reg, kDoubleRegZero);
   } else {
-    ASSERT(r.IsTagged());
+    DCHECK(r.IsTagged());
     Register reg = ToRegister(instr->value());
     HType type = instr->hydrogen()->value()->type();
     if (type.IsBoolean()) {
-      ASSERT(!info()->IsStub());
+      DCHECK(!info()->IsStub());
       __ LoadRoot(at, Heap::kTrueValueRootIndex);
       EmitBranch(instr, eq, reg, Operand(at));
     } else if (type.IsSmi()) {
-      ASSERT(!info()->IsStub());
+      DCHECK(!info()->IsStub());
       EmitBranch(instr, ne, reg, Operand(zero_reg));
     } else if (type.IsJSArray()) {
-      ASSERT(!info()->IsStub());
+      DCHECK(!info()->IsStub());
       EmitBranch(instr, al, zero_reg, Operand(zero_reg));
     } else if (type.IsHeapNumber()) {
-      ASSERT(!info()->IsStub());
+      DCHECK(!info()->IsStub());
       DoubleRegister dbl_scratch = double_scratch0();
       __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
       // Test the double value. Zero and NaN are false.
       EmitBranchF(instr, nue, dbl_scratch, kDoubleRegZero);
     } else if (type.IsString()) {
-      ASSERT(!info()->IsStub());
+      DCHECK(!info()->IsStub());
       __ lw(at, FieldMemOperand(reg, String::kLengthOffset));
       EmitBranch(instr, ne, at, Operand(zero_reg));
     } else {
@@ -2173,7 +2178,7 @@
       } else if (expected.NeedsMap()) {
         // If we need a map later and have a Smi -> deopt.
         __ SmiTst(reg, at);
-        DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
+        DeoptimizeIf(eq, instr, at, Operand(zero_reg));
       }
 
       const Register map = scratch0();
@@ -2229,7 +2234,7 @@
       if (!expected.IsGeneric()) {
         // We've seen something for the first time -> deopt.
         // This can only happen if we are not generic already.
-        DeoptimizeIf(al, instr->environment(), zero_reg, Operand(zero_reg));
+        DeoptimizeIf(al, instr, zero_reg, Operand(zero_reg));
       }
     }
   }
@@ -2370,7 +2375,7 @@
 
 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
   Representation rep = instr->hydrogen()->value()->representation();
-  ASSERT(!rep.IsInteger32());
+  DCHECK(!rep.IsInteger32());
   Register scratch = ToRegister(instr->temp());
 
   if (rep.IsDouble()) {
@@ -2504,10 +2509,10 @@
 
 
 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
-  ASSERT(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->context()).is(cp));
   Token::Value op = instr->op();
 
-  Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+  Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 
   Condition condition = ComputeCompareCondition(op);
@@ -2520,7 +2525,7 @@
   InstanceType from = instr->from();
   InstanceType to = instr->to();
   if (from == FIRST_TYPE) return to;
-  ASSERT(from == to || to == LAST_TYPE);
+  DCHECK(from == to || to == LAST_TYPE);
   return from;
 }
 
@@ -2583,13 +2588,13 @@
                                Register input,
                                Register temp,
                                Register temp2) {
-  ASSERT(!input.is(temp));
-  ASSERT(!input.is(temp2));
-  ASSERT(!temp.is(temp2));
+  DCHECK(!input.is(temp));
+  DCHECK(!input.is(temp2));
+  DCHECK(!temp.is(temp2));
 
   __ JumpIfSmi(input, is_false);
 
-  if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
+  if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
     // Assuming the following assertions, we can use the same compares to test
     // for both being a function type and being in the object type range.
     STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
@@ -2618,7 +2623,7 @@
 
   // Objects with a non-function constructor have class 'Object'.
   __ GetObjectType(temp, temp2, temp2);
-  if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
+  if (String::Equals(class_name, isolate()->factory()->Object_string())) {
     __ Branch(is_true, ne, temp2, Operand(JS_FUNCTION_TYPE));
   } else {
     __ Branch(is_false, ne, temp2, Operand(JS_FUNCTION_TYPE));
@@ -2664,12 +2669,12 @@
 
 
 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
-  ASSERT(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->context()).is(cp));
   Label true_label, done;
-  ASSERT(ToRegister(instr->left()).is(a0));  // Object is in a0.
-  ASSERT(ToRegister(instr->right()).is(a1));  // Function is in a1.
+  DCHECK(ToRegister(instr->left()).is(a0));  // Object is in a0.
+  DCHECK(ToRegister(instr->right()).is(a1));  // Function is in a1.
   Register result = ToRegister(instr->result());
-  ASSERT(result.is(v0));
+  DCHECK(result.is(v0));
 
   InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
@@ -2684,15 +2689,15 @@
 
 
 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
-  class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode {
+  class DeferredInstanceOfKnownGlobal FINAL : public LDeferredCode {
    public:
     DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
                                   LInstanceOfKnownGlobal* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() V8_OVERRIDE {
+    virtual void Generate() OVERRIDE {
       codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
     }
-    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
     Label* map_check() { return &map_check_; }
 
    private:
@@ -2708,8 +2713,8 @@
   Register temp = ToRegister(instr->temp());
   Register result = ToRegister(instr->result());
 
-  ASSERT(object.is(a0));
-  ASSERT(result.is(v0));
+  DCHECK(object.is(a0));
+  DCHECK(result.is(v0));
 
   // A Smi is not instance of anything.
   __ JumpIfSmi(object, &false_result);
@@ -2763,7 +2768,7 @@
 void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
                                                Label* map_check) {
   Register result = ToRegister(instr->result());
-  ASSERT(result.is(v0));
+  DCHECK(result.is(v0));
 
   InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
   flags = static_cast<InstanceofStub::Flags>(
@@ -2774,14 +2779,14 @@
       flags | InstanceofStub::kReturnTrueFalseObject);
   InstanceofStub stub(isolate(), flags);
 
-  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+  PushSafepointRegistersScope scope(this);
   LoadContextFromDeferred(instr->context());
 
   // Get the temp register reserved by the instruction. This needs to be t0 as
   // its slot of the pushing of safepoint registers is used to communicate the
   // offset to the location of the map check.
   Register temp = ToRegister(instr->temp());
-  ASSERT(temp.is(t0));
+  DCHECK(temp.is(t0));
   __ li(InstanceofStub::right(), instr->function());
   static const int kAdditionalDelta = 7;
   int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
@@ -2805,10 +2810,10 @@
 
 
 void LCodeGen::DoCmpT(LCmpT* instr) {
-  ASSERT(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->context()).is(cp));
   Token::Value op = instr->op();
 
-  Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+  Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
   // On MIPS there is no need for a "no inlined smi code" marker (nop).
 
@@ -2820,7 +2825,7 @@
   __ Branch(USE_DELAY_SLOT, &done, condition, v0, Operand(zero_reg));
   __ bind(&check);
   __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
-  ASSERT_EQ(1, masm()->InstructionsGeneratedSince(&check));
+  DCHECK_EQ(1, masm()->InstructionsGeneratedSince(&check));
   __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
   __ bind(&done);
 }
@@ -2873,19 +2878,36 @@
   __ lw(result, FieldMemOperand(at, Cell::kValueOffset));
   if (instr->hydrogen()->RequiresHoleCheck()) {
     __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
-    DeoptimizeIf(eq, instr->environment(), result, Operand(at));
+    DeoptimizeIf(eq, instr, result, Operand(at));
   }
 }
 
 
-void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
-  ASSERT(ToRegister(instr->context()).is(cp));
-  ASSERT(ToRegister(instr->global_object()).is(a0));
-  ASSERT(ToRegister(instr->result()).is(v0));
+template <class T>
+void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
+  DCHECK(FLAG_vector_ics);
+  Register vector = ToRegister(instr->temp_vector());
+  DCHECK(vector.is(VectorLoadICDescriptor::VectorRegister()));
+  __ li(vector, instr->hydrogen()->feedback_vector());
+  // No need to allocate this register.
+  DCHECK(VectorLoadICDescriptor::SlotRegister().is(a0));
+  __ li(VectorLoadICDescriptor::SlotRegister(),
+        Operand(Smi::FromInt(instr->hydrogen()->slot())));
+}
 
-  __ li(a2, Operand(instr->name()));
+
+void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->global_object())
+             .is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->result()).is(v0));
+
+  __ li(LoadDescriptor::NameRegister(), Operand(instr->name()));
+  if (FLAG_vector_ics) {
+    EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
+  }
   ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
-  Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode);
+  Handle<Code> ic = CodeFactory::LoadIC(isolate(), mode).code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -2906,7 +2928,7 @@
     Register payload = ToRegister(instr->temp());
     __ lw(payload, FieldMemOperand(cell, Cell::kValueOffset));
     __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
-    DeoptimizeIf(eq, instr->environment(), payload, Operand(at));
+    DeoptimizeIf(eq, instr, payload, Operand(at));
   }
 
   // Store the value.
@@ -2925,7 +2947,7 @@
     __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
 
     if (instr->hydrogen()->DeoptimizesOnHole()) {
-      DeoptimizeIf(eq, instr->environment(), result, Operand(at));
+      DeoptimizeIf(eq, instr, result, Operand(at));
     } else {
       Label is_not_hole;
       __ Branch(&is_not_hole, ne, result, Operand(at));
@@ -2949,7 +2971,7 @@
     __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
 
     if (instr->hydrogen()->DeoptimizesOnHole()) {
-      DeoptimizeIf(eq, instr->environment(), scratch, Operand(at));
+      DeoptimizeIf(eq, instr, scratch, Operand(at));
     } else {
       __ Branch(&skip_assignment, ne, scratch, Operand(at));
     }
@@ -3003,13 +3025,16 @@
 
 
 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
-  ASSERT(ToRegister(instr->context()).is(cp));
-  ASSERT(ToRegister(instr->object()).is(a0));
-  ASSERT(ToRegister(instr->result()).is(v0));
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->result()).is(v0));
 
   // Name is always in a2.
-  __ li(a2, Operand(instr->name()));
-  Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
+  __ li(LoadDescriptor::NameRegister(), Operand(instr->name()));
+  if (FLAG_vector_ics) {
+    EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
+  }
+  Handle<Code> ic = CodeFactory::LoadIC(isolate(), NOT_CONTEXTUAL).code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -3019,24 +3044,13 @@
   Register function = ToRegister(instr->function());
   Register result = ToRegister(instr->result());
 
-  // Check that the function really is a function. Load map into the
-  // result register.
-  __ GetObjectType(function, result, scratch);
-  DeoptimizeIf(ne, instr->environment(), scratch, Operand(JS_FUNCTION_TYPE));
-
-  // Make sure that the function has an instance prototype.
-  Label non_instance;
-  __ lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
-  __ And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
-  __ Branch(&non_instance, ne, scratch, Operand(zero_reg));
-
   // Get the prototype or initial map from the function.
   __ lw(result,
          FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
 
   // Check that the function has a prototype or an initial map.
   __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
-  DeoptimizeIf(eq, instr->environment(), result, Operand(at));
+  DeoptimizeIf(eq, instr, result, Operand(at));
 
   // If the function does not have an initial map, we're done.
   Label done;
@@ -3045,12 +3059,6 @@
 
   // Get the prototype from the initial map.
   __ lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
-  __ Branch(&done);
-
-  // Non-instance prototype: Fetch prototype from constructor field
-  // in initial map.
-  __ bind(&non_instance);
-  __ lw(result, FieldMemOperand(result, Map::kConstructorOffset));
 
   // All done.
   __ bind(&done);
@@ -3178,8 +3186,7 @@
       case UINT32_ELEMENTS:
         __ lw(result, mem_operand);
         if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
-          DeoptimizeIf(Ugreater_equal, instr->environment(),
-              result, Operand(0x80000000));
+          DeoptimizeIf(Ugreater_equal, instr, result, Operand(0x80000000));
         }
         break;
       case FLOAT32_ELEMENTS:
@@ -3232,7 +3239,7 @@
 
   if (instr->hydrogen()->RequiresHoleCheck()) {
     __ lw(scratch, MemOperand(scratch, kHoleNanUpper32Offset));
-    DeoptimizeIf(eq, instr->environment(), scratch, Operand(kHoleNanUpper32));
+    DeoptimizeIf(eq, instr, scratch, Operand(kHoleNanUpper32));
   }
 }
 
@@ -3268,10 +3275,10 @@
   if (instr->hydrogen()->RequiresHoleCheck()) {
     if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
       __ SmiTst(result, scratch);
-      DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
+      DeoptimizeIf(ne, instr, scratch, Operand(zero_reg));
     } else {
       __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
-      DeoptimizeIf(eq, instr->environment(), result, Operand(scratch));
+      DeoptimizeIf(eq, instr, result, Operand(scratch));
     }
   }
 }
@@ -3305,7 +3312,7 @@
       __ Addu(scratch0(), base, scratch0());
       return MemOperand(scratch0());
     } else {
-      ASSERT_EQ(-1, shift_size);
+      DCHECK_EQ(-1, shift_size);
       __ srl(scratch0(), key, 1);
       __ Addu(scratch0(), base, scratch0());
       return MemOperand(scratch0());
@@ -3317,7 +3324,7 @@
     __ Addu(scratch0(), base, scratch0());
     return MemOperand(scratch0(), base_offset);
   } else {
-    ASSERT_EQ(-1, shift_size);
+    DCHECK_EQ(-1, shift_size);
     __ sra(scratch0(), key, 1);
     __ Addu(scratch0(), base, scratch0());
     return MemOperand(scratch0(), base_offset);
@@ -3326,11 +3333,15 @@
 
 
 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
-  ASSERT(ToRegister(instr->context()).is(cp));
-  ASSERT(ToRegister(instr->object()).is(a1));
-  ASSERT(ToRegister(instr->key()).is(a0));
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
 
-  Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+  if (FLAG_vector_ics) {
+    EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
+  }
+
+  Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -3413,11 +3424,10 @@
 
   // Deoptimize if the receiver is not a JS object.
   __ SmiTst(receiver, scratch);
-  DeoptimizeIf(eq, instr->environment(), scratch, Operand(zero_reg));
+  DeoptimizeIf(eq, instr, scratch, Operand(zero_reg));
 
   __ GetObjectType(receiver, scratch, scratch);
-  DeoptimizeIf(lt, instr->environment(),
-               scratch, Operand(FIRST_SPEC_OBJECT_TYPE));
+  DeoptimizeIf(lt, instr, scratch, Operand(FIRST_SPEC_OBJECT_TYPE));
 
   __ Branch(&result_in_receiver);
   __ bind(&global_object);
@@ -3425,7 +3435,7 @@
   __ lw(result,
         ContextOperand(result, Context::GLOBAL_OBJECT_INDEX));
   __ lw(result,
-        FieldMemOperand(result, GlobalObject::kGlobalReceiverOffset));
+        FieldMemOperand(result, GlobalObject::kGlobalProxyOffset));
 
   if (result.is(receiver)) {
     __ bind(&result_in_receiver);
@@ -3445,14 +3455,14 @@
   Register length = ToRegister(instr->length());
   Register elements = ToRegister(instr->elements());
   Register scratch = scratch0();
-  ASSERT(receiver.is(a0));  // Used for parameter count.
-  ASSERT(function.is(a1));  // Required by InvokeFunction.
-  ASSERT(ToRegister(instr->result()).is(v0));
+  DCHECK(receiver.is(a0));  // Used for parameter count.
+  DCHECK(function.is(a1));  // Required by InvokeFunction.
+  DCHECK(ToRegister(instr->result()).is(v0));
 
   // Copy the arguments to this function possibly from the
   // adaptor frame below it.
   const uint32_t kArgumentsLimit = 1 * KB;
-  DeoptimizeIf(hi, instr->environment(), length, Operand(kArgumentsLimit));
+  DeoptimizeIf(hi, instr, length, Operand(kArgumentsLimit));
 
   // Push the receiver and use the register to keep the original
   // number of arguments.
@@ -3476,7 +3486,7 @@
   __ sll(scratch, length, 2);
 
   __ bind(&invoke);
-  ASSERT(instr->HasPointerMap());
+  DCHECK(instr->HasPointerMap());
   LPointerMap* pointers = instr->pointer_map();
   SafepointGenerator safepoint_generator(
       this, pointers, Safepoint::kLazyDeopt);
@@ -3516,18 +3526,18 @@
     __ lw(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
   } else {
     // If there is no frame, the context must be in cp.
-    ASSERT(result.is(cp));
+    DCHECK(result.is(cp));
   }
 }
 
 
 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
-  ASSERT(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->context()).is(cp));
   __ li(scratch0(), instr->hydrogen()->pairs());
   __ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
   // The context is the first argument.
   __ Push(cp, scratch0(), scratch1());
-  CallRuntime(Runtime::kHiddenDeclareGlobals, 3, instr);
+  CallRuntime(Runtime::kDeclareGlobals, 3, instr);
 }
 
 
@@ -3573,8 +3583,8 @@
 
 
 void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
-  ASSERT(instr->context() != NULL);
-  ASSERT(ToRegister(instr->context()).is(cp));
+  DCHECK(instr->context() != NULL);
+  DCHECK(ToRegister(instr->context()).is(cp));
   Register input = ToRegister(instr->value());
   Register result = ToRegister(instr->result());
   Register scratch = scratch0();
@@ -3582,7 +3592,7 @@
   // Deoptimize if not a heap number.
   __ lw(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
   __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
-  DeoptimizeIf(ne, instr->environment(), scratch, Operand(at));
+  DeoptimizeIf(ne, instr, scratch, Operand(at));
 
   Label done;
   Register exponent = scratch0();
@@ -3597,7 +3607,7 @@
   // Input is negative. Reverse its sign.
   // Preserve the value of all registers.
   {
-    PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+    PushSafepointRegistersScope scope(this);
 
     // Registers were saved at the safepoint, so we can use
     // many scratch registers.
@@ -3616,7 +3626,7 @@
     // Slow case: Call the runtime system to do the number allocation.
     __ bind(&slow);
 
-    CallRuntimeFromDeferred(Runtime::kHiddenAllocateHeapNumber, 0, instr,
+    CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
                             instr->context());
     // Set the pointer to the new heap number in tmp.
     if (!tmp1.is(v0))
@@ -3649,21 +3659,21 @@
   __ mov(result, input);
   __ subu(result, zero_reg, input);
   // Overflow if result is still negative, i.e. 0x80000000.
-  DeoptimizeIf(lt, instr->environment(), result, Operand(zero_reg));
+  DeoptimizeIf(lt, instr, result, Operand(zero_reg));
   __ bind(&done);
 }
 
 
 void LCodeGen::DoMathAbs(LMathAbs* instr) {
   // Class for deferred case.
-  class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode {
+  class DeferredMathAbsTaggedHeapNumber FINAL : public LDeferredCode {
    public:
     DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() V8_OVERRIDE {
+    virtual void Generate() OVERRIDE {
       codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
     }
-    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
    private:
     LMathAbs* instr_;
   };
@@ -3703,15 +3713,15 @@
                      except_flag);
 
   // Deopt if the operation did not succeed.
-  DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
+  DeoptimizeIf(ne, instr, except_flag, Operand(zero_reg));
 
   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
     // Test for -0.
     Label done;
     __ Branch(&done, ne, result, Operand(zero_reg));
-    __ mfc1(scratch1, input.high());
+    __ Mfhc1(scratch1, input);
     __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
-    DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
+    DeoptimizeIf(ne, instr, scratch1, Operand(zero_reg));
     __ bind(&done);
   }
 }
@@ -3725,7 +3735,7 @@
   Label done, check_sign_on_zero;
 
   // Extract exponent bits.
-  __ mfc1(result, input.high());
+  __ Mfhc1(result, input);
   __ Ext(scratch,
          result,
          HeapNumber::kExponentShift,
@@ -3744,8 +3754,7 @@
 
   // The following conversion will not work with numbers
   // outside of ]-2^32, 2^32[.
-  DeoptimizeIf(ge, instr->environment(), scratch,
-               Operand(HeapNumber::kExponentBias + 32));
+  DeoptimizeIf(ge, instr, scratch, Operand(HeapNumber::kExponentBias + 32));
 
   // Save the original sign for later comparison.
   __ And(scratch, result, Operand(HeapNumber::kSignMask));
@@ -3755,12 +3764,11 @@
 
   // Check sign of the result: if the sign changed, the input
   // value was in ]0.5, 0[ and the result should be -0.
-  __ mfc1(result, double_scratch0().high());
+  __ Mfhc1(result, double_scratch0());
   __ Xor(result, result, Operand(scratch));
   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
     // ARM uses 'mi' here, which is 'lt'
-    DeoptimizeIf(lt, instr->environment(), result,
-                 Operand(zero_reg));
+    DeoptimizeIf(lt, instr, result, Operand(zero_reg));
   } else {
     Label skip2;
     // ARM uses 'mi' here, which is 'lt'
@@ -3779,20 +3787,28 @@
                      double_scratch1,
                      except_flag);
 
-  DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
+  DeoptimizeIf(ne, instr, except_flag, Operand(zero_reg));
 
   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
     // Test for -0.
     __ Branch(&done, ne, result, Operand(zero_reg));
     __ bind(&check_sign_on_zero);
-    __ mfc1(scratch, input.high());
+    __ Mfhc1(scratch, input);
     __ And(scratch, scratch, Operand(HeapNumber::kSignMask));
-    DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
+    DeoptimizeIf(ne, instr, scratch, Operand(zero_reg));
   }
   __ bind(&done);
 }
 
 
+void LCodeGen::DoMathFround(LMathFround* instr) {
+  DoubleRegister input = ToDoubleRegister(instr->value());
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  __ cvt_s_d(result.low(), input);
+  __ cvt_d_s(result, result.low());
+}
+
+
 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
   DoubleRegister input = ToDoubleRegister(instr->value());
   DoubleRegister result = ToDoubleRegister(instr->result());
@@ -3805,7 +3821,7 @@
   DoubleRegister result = ToDoubleRegister(instr->result());
   DoubleRegister temp = ToDoubleRegister(instr->temp());
 
-  ASSERT(!input.is(result));
+  DCHECK(!input.is(result));
 
   // Note that according to ECMA-262 15.8.2.13:
   // Math.pow(-Infinity, 0.5) == Infinity
@@ -3828,22 +3844,24 @@
   Representation exponent_type = instr->hydrogen()->right()->representation();
   // Having marked this as a call, we can use any registers.
   // Just make sure that the input/output registers are the expected ones.
-  ASSERT(!instr->right()->IsDoubleRegister() ||
+  Register tagged_exponent = MathPowTaggedDescriptor::exponent();
+  DCHECK(!instr->right()->IsDoubleRegister() ||
          ToDoubleRegister(instr->right()).is(f4));
-  ASSERT(!instr->right()->IsRegister() ||
-         ToRegister(instr->right()).is(a2));
-  ASSERT(ToDoubleRegister(instr->left()).is(f2));
-  ASSERT(ToDoubleRegister(instr->result()).is(f0));
+  DCHECK(!instr->right()->IsRegister() ||
+         ToRegister(instr->right()).is(tagged_exponent));
+  DCHECK(ToDoubleRegister(instr->left()).is(f2));
+  DCHECK(ToDoubleRegister(instr->result()).is(f0));
 
   if (exponent_type.IsSmi()) {
     MathPowStub stub(isolate(), MathPowStub::TAGGED);
     __ CallStub(&stub);
   } else if (exponent_type.IsTagged()) {
     Label no_deopt;
-    __ JumpIfSmi(a2, &no_deopt);
-    __ lw(t3, FieldMemOperand(a2, HeapObject::kMapOffset));
+    __ JumpIfSmi(tagged_exponent, &no_deopt);
+    DCHECK(!t3.is(tagged_exponent));
+    __ lw(t3, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
     __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
-    DeoptimizeIf(ne, instr->environment(), t3, Operand(at));
+    DeoptimizeIf(ne, instr, t3, Operand(at));
     __ bind(&no_deopt);
     MathPowStub stub(isolate(), MathPowStub::TAGGED);
     __ CallStub(&stub);
@@ -3851,7 +3869,7 @@
     MathPowStub stub(isolate(), MathPowStub::INTEGER);
     __ CallStub(&stub);
   } else {
-    ASSERT(exponent_type.IsDouble());
+    DCHECK(exponent_type.IsDouble());
     MathPowStub stub(isolate(), MathPowStub::DOUBLE);
     __ CallStub(&stub);
   }
@@ -3889,9 +3907,9 @@
 
 
 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
-  ASSERT(ToRegister(instr->context()).is(cp));
-  ASSERT(ToRegister(instr->function()).is(a1));
-  ASSERT(instr->HasPointerMap());
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->function()).is(a1));
+  DCHECK(instr->HasPointerMap());
 
   Handle<JSFunction> known_function = instr->hydrogen()->known_function();
   if (known_function.is_null()) {
@@ -3909,8 +3927,36 @@
 }
 
 
+void LCodeGen::DoTailCallThroughMegamorphicCache(
+    LTailCallThroughMegamorphicCache* instr) {
+  Register receiver = ToRegister(instr->receiver());
+  Register name = ToRegister(instr->name());
+  DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(name.is(LoadDescriptor::NameRegister()));
+  DCHECK(receiver.is(a1));
+  DCHECK(name.is(a2));
+
+  Register scratch = a3;
+  Register extra = t0;
+  Register extra2 = t1;
+  Register extra3 = t2;
+
+  // Important for the tail-call.
+  bool must_teardown_frame = NeedsEagerFrame();
+
+  // The probe will tail call to a handler if found.
+  isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(),
+                                         must_teardown_frame, receiver, name,
+                                         scratch, extra, extra2, extra3);
+
+  // Tail call to miss if we ended up here.
+  if (must_teardown_frame) __ LeaveFrame(StackFrame::INTERNAL);
+  LoadIC::GenerateMiss(masm());
+}
+
+
 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
-  ASSERT(ToRegister(instr->result()).is(v0));
+  DCHECK(ToRegister(instr->result()).is(v0));
 
   LPointerMap* pointers = instr->pointer_map();
   SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
@@ -3921,7 +3967,7 @@
     generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
     __ Call(code, RelocInfo::CODE_TARGET);
   } else {
-    ASSERT(instr->target()->IsRegister());
+    DCHECK(instr->target()->IsRegister());
     Register target = ToRegister(instr->target());
     generator.BeforeCall(__ CallSize(target));
     __ Addu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
@@ -3932,8 +3978,8 @@
 
 
 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
-  ASSERT(ToRegister(instr->function()).is(a1));
-  ASSERT(ToRegister(instr->result()).is(v0));
+  DCHECK(ToRegister(instr->function()).is(a1));
+  DCHECK(ToRegister(instr->result()).is(v0));
 
   if (instr->hydrogen()->pass_argument_count()) {
     __ li(a0, Operand(instr->arity()));
@@ -3951,9 +3997,9 @@
 
 
 void LCodeGen::DoCallFunction(LCallFunction* instr) {
-  ASSERT(ToRegister(instr->context()).is(cp));
-  ASSERT(ToRegister(instr->function()).is(a1));
-  ASSERT(ToRegister(instr->result()).is(v0));
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->function()).is(a1));
+  DCHECK(ToRegister(instr->result()).is(v0));
 
   int arity = instr->arity();
   CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
@@ -3962,9 +4008,9 @@
 
 
 void LCodeGen::DoCallNew(LCallNew* instr) {
-  ASSERT(ToRegister(instr->context()).is(cp));
-  ASSERT(ToRegister(instr->constructor()).is(a1));
-  ASSERT(ToRegister(instr->result()).is(v0));
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->constructor()).is(a1));
+  DCHECK(ToRegister(instr->result()).is(v0));
 
   __ li(a0, Operand(instr->arity()));
   // No cell in a2 for construct type feedback in optimized code
@@ -3975,9 +4021,9 @@
 
 
 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
-  ASSERT(ToRegister(instr->context()).is(cp));
-  ASSERT(ToRegister(instr->constructor()).is(a1));
-  ASSERT(ToRegister(instr->result()).is(v0));
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->constructor()).is(a1));
+  DCHECK(ToRegister(instr->result()).is(v0));
 
   __ li(a0, Operand(instr->arity()));
   __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
@@ -4063,13 +4109,13 @@
 
   __ AssertNotSmi(object);
 
-  ASSERT(!representation.IsSmi() ||
+  DCHECK(!representation.IsSmi() ||
          !instr->value()->IsConstantOperand() ||
          IsSmi(LConstantOperand::cast(instr->value())));
   if (representation.IsDouble()) {
-    ASSERT(access.IsInobject());
-    ASSERT(!instr->hydrogen()->has_transition());
-    ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+    DCHECK(access.IsInobject());
+    DCHECK(!instr->hydrogen()->has_transition());
+    DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
     DoubleRegister value = ToDoubleRegister(instr->value());
     __ sdc1(value, FieldMemOperand(object, offset));
     return;
@@ -4130,12 +4176,11 @@
 
 
 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
-  ASSERT(ToRegister(instr->context()).is(cp));
-  ASSERT(ToRegister(instr->object()).is(a1));
-  ASSERT(ToRegister(instr->value()).is(a0));
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
 
-  // Name is always in a2.
-  __ li(a2, Operand(instr->name()));
+  __ li(StoreDescriptor::NameRegister(), Operand(instr->name()));
   Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
@@ -4159,7 +4204,7 @@
     __ stop("eliminated bounds check failed");
     __ bind(&done);
   } else {
-    DeoptimizeIf(cc, instr->environment(), reg, operand);
+    DeoptimizeIf(cc, instr, reg, operand);
   }
 }
 
@@ -4312,7 +4357,7 @@
 
   // Do the store.
   if (instr->key()->IsConstantOperand()) {
-    ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+    DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
     LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
     offset += ToInteger32(const_operand) * kPointerSize;
     store_base = elements;
@@ -4362,14 +4407,13 @@
 
 
 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
-  ASSERT(ToRegister(instr->context()).is(cp));
-  ASSERT(ToRegister(instr->object()).is(a2));
-  ASSERT(ToRegister(instr->key()).is(a1));
-  ASSERT(ToRegister(instr->value()).is(a0));
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
+  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
 
-  Handle<Code> ic = (instr->strict_mode() == STRICT)
-      ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
-      : isolate()->builtins()->KeyedStoreIC_Initialize();
+  Handle<Code> ic =
+      CodeFactory::KeyedStoreIC(isolate(), instr->strict_mode()).code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -4398,15 +4442,14 @@
                          GetRAState(),
                          kDontSaveFPRegs);
   } else {
-    ASSERT(object_reg.is(a0));
-    ASSERT(ToRegister(instr->context()).is(cp));
-    PushSafepointRegistersScope scope(
-        this, Safepoint::kWithRegistersAndDoubles);
+    DCHECK(object_reg.is(a0));
+    DCHECK(ToRegister(instr->context()).is(cp));
+    PushSafepointRegistersScope scope(this);
     __ li(a1, Operand(to_map));
     bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
     TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
     __ CallStub(&stub);
-    RecordSafepointWithRegistersAndDoubles(
+    RecordSafepointWithRegisters(
         instr->pointer_map(), 0, Safepoint::kLazyDeopt);
   }
   __ bind(&not_applicable);
@@ -4419,15 +4462,15 @@
   Label no_memento_found;
   __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found,
                                      ne, &no_memento_found);
-  DeoptimizeIf(al, instr->environment());
+  DeoptimizeIf(al, instr);
   __ bind(&no_memento_found);
 }
 
 
 void LCodeGen::DoStringAdd(LStringAdd* instr) {
-  ASSERT(ToRegister(instr->context()).is(cp));
-  ASSERT(ToRegister(instr->left()).is(a1));
-  ASSERT(ToRegister(instr->right()).is(a0));
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->left()).is(a1));
+  DCHECK(ToRegister(instr->right()).is(a0));
   StringAddStub stub(isolate(),
                      instr->hydrogen()->flags(),
                      instr->hydrogen()->pretenure_flag());
@@ -4436,14 +4479,14 @@
 
 
 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
-  class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode {
+  class DeferredStringCharCodeAt FINAL : public LDeferredCode {
    public:
     DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() V8_OVERRIDE {
+    virtual void Generate() OVERRIDE {
       codegen()->DoDeferredStringCharCodeAt(instr_);
     }
-    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
    private:
     LStringCharCodeAt* instr_;
   };
@@ -4469,7 +4512,7 @@
   // contained in the register pointer map.
   __ mov(result, zero_reg);
 
-  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+  PushSafepointRegistersScope scope(this);
   __ push(string);
   // Push the index as a smi. This is safe because of the checks in
   // DoStringCharCodeAt above.
@@ -4482,7 +4525,7 @@
     __ SmiTag(index);
     __ push(index);
   }
-  CallRuntimeFromDeferred(Runtime::kHiddenStringCharCodeAt, 2, instr,
+  CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
                           instr->context());
   __ AssertSmi(v0);
   __ SmiUntag(v0);
@@ -4491,14 +4534,14 @@
 
 
 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
-  class DeferredStringCharFromCode V8_FINAL : public LDeferredCode {
+  class DeferredStringCharFromCode FINAL : public LDeferredCode {
    public:
     DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() V8_OVERRIDE {
+    virtual void Generate() OVERRIDE {
       codegen()->DoDeferredStringCharFromCode(instr_);
     }
-    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
    private:
     LStringCharFromCode* instr_;
   };
@@ -4506,11 +4549,11 @@
   DeferredStringCharFromCode* deferred =
       new(zone()) DeferredStringCharFromCode(this, instr);
 
-  ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
+  DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
   Register char_code = ToRegister(instr->char_code());
   Register result = ToRegister(instr->result());
   Register scratch = scratch0();
-  ASSERT(!char_code.is(result));
+  DCHECK(!char_code.is(result));
 
   __ Branch(deferred->entry(), hi,
             char_code, Operand(String::kMaxOneByteCharCode));
@@ -4533,7 +4576,7 @@
   // contained in the register pointer map.
   __ mov(result, zero_reg);
 
-  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+  PushSafepointRegistersScope scope(this);
   __ SmiTag(char_code);
   __ push(char_code);
   CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
@@ -4543,9 +4586,9 @@
 
 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
   LOperand* input = instr->value();
-  ASSERT(input->IsRegister() || input->IsStackSlot());
+  DCHECK(input->IsRegister() || input->IsStackSlot());
   LOperand* output = instr->result();
-  ASSERT(output->IsDoubleRegister());
+  DCHECK(output->IsDoubleRegister());
   FPURegister single_scratch = double_scratch0().low();
   if (input->IsStackSlot()) {
     Register scratch = scratch0();
@@ -4569,18 +4612,18 @@
 
 
 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
-  class DeferredNumberTagI V8_FINAL : public LDeferredCode {
+  class DeferredNumberTagI FINAL : public LDeferredCode {
    public:
     DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() V8_OVERRIDE {
+    virtual void Generate() OVERRIDE {
       codegen()->DoDeferredNumberTagIU(instr_,
                                        instr_->value(),
                                        instr_->temp1(),
                                        instr_->temp2(),
                                        SIGNED_INT32);
     }
-    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
    private:
     LNumberTagI* instr_;
   };
@@ -4597,18 +4640,18 @@
 
 
 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
-  class DeferredNumberTagU V8_FINAL : public LDeferredCode {
+  class DeferredNumberTagU FINAL : public LDeferredCode {
    public:
     DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() V8_OVERRIDE {
+    virtual void Generate() OVERRIDE {
       codegen()->DoDeferredNumberTagIU(instr_,
                                        instr_->value(),
                                        instr_->temp1(),
                                        instr_->temp2(),
                                        UNSIGNED_INT32);
     }
-    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
    private:
     LNumberTagU* instr_;
   };
@@ -4666,15 +4709,15 @@
     __ mov(dst, zero_reg);
 
     // Preserve the value of all registers.
-    PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+    PushSafepointRegistersScope scope(this);
 
     // NumberTagI and NumberTagD use the context from the frame, rather than
     // the environment's HContext or HInlinedContext value.
-    // They only call Runtime::kHiddenAllocateHeapNumber.
+    // They only call Runtime::kAllocateHeapNumber.
     // The corresponding HChange instructions are added in a phase that does
     // not have easy access to the local context.
     __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-    __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
+    __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
     RecordSafepointWithRegisters(
         instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
     __ Subu(v0, v0, kHeapObjectTag);
@@ -4691,14 +4734,14 @@
 
 
 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
-  class DeferredNumberTagD V8_FINAL : public LDeferredCode {
+  class DeferredNumberTagD FINAL : public LDeferredCode {
    public:
     DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() V8_OVERRIDE {
+    virtual void Generate() OVERRIDE {
       codegen()->DoDeferredNumberTagD(instr_);
     }
-    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
    private:
     LNumberTagD* instr_;
   };
@@ -4732,14 +4775,14 @@
   Register reg = ToRegister(instr->result());
   __ mov(reg, zero_reg);
 
-  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+  PushSafepointRegistersScope scope(this);
   // NumberTagI and NumberTagD use the context from the frame, rather than
   // the environment's HContext or HInlinedContext value.
-  // They only call Runtime::kHiddenAllocateHeapNumber.
+  // They only call Runtime::kAllocateHeapNumber.
   // The corresponding HChange instructions are added in a phase that does
   // not have easy access to the local context.
   __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-  __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
+  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
   RecordSafepointWithRegisters(
       instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
   __ Subu(v0, v0, kHeapObjectTag);
@@ -4754,12 +4797,12 @@
   if (hchange->CheckFlag(HValue::kCanOverflow) &&
       hchange->value()->CheckFlag(HValue::kUint32)) {
     __ And(at, input, Operand(0xc0000000));
-    DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
+    DeoptimizeIf(ne, instr, at, Operand(zero_reg));
   }
   if (hchange->CheckFlag(HValue::kCanOverflow) &&
       !hchange->value()->CheckFlag(HValue::kUint32)) {
     __ SmiTagCheckOverflow(output, input, at);
-    DeoptimizeIf(lt, instr->environment(), at, Operand(zero_reg));
+    DeoptimizeIf(lt, instr, at, Operand(zero_reg));
   } else {
     __ SmiTag(output, input);
   }
@@ -4775,19 +4818,20 @@
     // If the input is a HeapObject, value of scratch won't be zero.
     __ And(scratch, input, Operand(kHeapObjectTag));
     __ SmiUntag(result, input);
-    DeoptimizeIf(ne, instr->environment(), scratch, Operand(zero_reg));
+    DeoptimizeIf(ne, instr, scratch, Operand(zero_reg));
   } else {
     __ SmiUntag(result, input);
   }
 }
 
 
-void LCodeGen::EmitNumberUntagD(Register input_reg,
+void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
                                 DoubleRegister result_reg,
-                                bool can_convert_undefined_to_nan,
-                                bool deoptimize_on_minus_zero,
-                                LEnvironment* env,
                                 NumberUntagDMode mode) {
+  bool can_convert_undefined_to_nan =
+      instr->hydrogen()->can_convert_undefined_to_nan();
+  bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
+
   Register scratch = scratch0();
   Label convert, load_smi, done;
   if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
@@ -4799,29 +4843,29 @@
     if (can_convert_undefined_to_nan) {
       __ Branch(&convert, ne, scratch, Operand(at));
     } else {
-      DeoptimizeIf(ne, env, scratch, Operand(at));
+      DeoptimizeIf(ne, instr, scratch, Operand(at));
     }
     // Load heap number.
     __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
     if (deoptimize_on_minus_zero) {
       __ mfc1(at, result_reg.low());
       __ Branch(&done, ne, at, Operand(zero_reg));
-      __ mfc1(scratch, result_reg.high());
-      DeoptimizeIf(eq, env, scratch, Operand(HeapNumber::kSignMask));
+      __ Mfhc1(scratch, result_reg);
+      DeoptimizeIf(eq, instr, scratch, Operand(HeapNumber::kSignMask));
     }
     __ Branch(&done);
     if (can_convert_undefined_to_nan) {
       __ bind(&convert);
       // Convert undefined (and hole) to NaN.
       __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
-      DeoptimizeIf(ne, env, input_reg, Operand(at));
+      DeoptimizeIf(ne, instr, input_reg, Operand(at));
       __ LoadRoot(scratch, Heap::kNanValueRootIndex);
       __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
       __ Branch(&done);
     }
   } else {
     __ SmiUntag(scratch, input_reg);
-    ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
+    DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
   }
   // Smi to double register conversion
   __ bind(&load_smi);
@@ -4839,8 +4883,8 @@
   DoubleRegister double_scratch = double_scratch0();
   DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2());
 
-  ASSERT(!scratch1.is(input_reg) && !scratch1.is(scratch2));
-  ASSERT(!scratch2.is(input_reg) && !scratch2.is(scratch1));
+  DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2));
+  DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1));
 
   Label done;
 
@@ -4866,7 +4910,7 @@
     __ bind(&no_heap_number);
     __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
     __ Branch(&check_bools, ne, input_reg, Operand(at));
-    ASSERT(ToRegister(instr->result()).is(input_reg));
+    DCHECK(ToRegister(instr->result()).is(input_reg));
     __ Branch(USE_DELAY_SLOT, &done);
     __ mov(input_reg, zero_reg);  // In delay slot.
 
@@ -4878,12 +4922,11 @@
 
     __ bind(&check_false);
     __ LoadRoot(at, Heap::kFalseValueRootIndex);
-    DeoptimizeIf(ne, instr->environment(), scratch2, Operand(at));
+    DeoptimizeIf(ne, instr, scratch2, Operand(at), "cannot truncate");
     __ Branch(USE_DELAY_SLOT, &done);
     __ mov(input_reg, zero_reg);  // In delay slot.
   } else {
-    // Deoptimize if we don't have a heap number.
-    DeoptimizeIf(ne, instr->environment(), scratch1, Operand(at));
+    DeoptimizeIf(ne, instr, scratch1, Operand(at), "not a heap number");
 
     // Load the double value.
     __ ldc1(double_scratch,
@@ -4898,15 +4941,15 @@
                        except_flag,
                        kCheckForInexactConversion);
 
-    // Deopt if the operation did not succeed.
-    DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
+    DeoptimizeIf(ne, instr, except_flag, Operand(zero_reg),
+                 "lost precision or NaN");
 
     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
       __ Branch(&done, ne, input_reg, Operand(zero_reg));
 
-      __ mfc1(scratch1, double_scratch.high());
+      __ Mfhc1(scratch1, double_scratch);
       __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
-      DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
+      DeoptimizeIf(ne, instr, scratch1, Operand(zero_reg), "minus zero");
     }
   }
   __ bind(&done);
@@ -4914,21 +4957,21 @@
 
 
 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
-  class DeferredTaggedToI V8_FINAL : public LDeferredCode {
+  class DeferredTaggedToI FINAL : public LDeferredCode {
    public:
     DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() V8_OVERRIDE {
+    virtual void Generate() OVERRIDE {
       codegen()->DoDeferredTaggedToI(instr_);
     }
-    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
    private:
     LTaggedToI* instr_;
   };
 
   LOperand* input = instr->value();
-  ASSERT(input->IsRegister());
-  ASSERT(input->Equals(instr->result()));
+  DCHECK(input->IsRegister());
+  DCHECK(input->Equals(instr->result()));
 
   Register input_reg = ToRegister(input);
 
@@ -4949,9 +4992,9 @@
 
 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
   LOperand* input = instr->value();
-  ASSERT(input->IsRegister());
+  DCHECK(input->IsRegister());
   LOperand* result = instr->result();
-  ASSERT(result->IsDoubleRegister());
+  DCHECK(result->IsDoubleRegister());
 
   Register input_reg = ToRegister(input);
   DoubleRegister result_reg = ToDoubleRegister(result);
@@ -4960,11 +5003,7 @@
   NumberUntagDMode mode = value->representation().IsSmi()
       ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
 
-  EmitNumberUntagD(input_reg, result_reg,
-                   instr->hydrogen()->can_convert_undefined_to_nan(),
-                   instr->hydrogen()->deoptimize_on_minus_zero(),
-                   instr->environment(),
-                   mode);
+  EmitNumberUntagD(instr, input_reg, result_reg, mode);
 }
 
 
@@ -4987,14 +5026,14 @@
                        kCheckForInexactConversion);
 
     // Deopt if the operation did not succeed (except_flag != 0).
-    DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
+    DeoptimizeIf(ne, instr, except_flag, Operand(zero_reg));
 
     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
       Label done;
       __ Branch(&done, ne, result_reg, Operand(zero_reg));
-      __ mfc1(scratch1, double_input.high());
+      __ Mfhc1(scratch1, double_input);
       __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
-      DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
+      DeoptimizeIf(ne, instr, scratch1, Operand(zero_reg));
       __ bind(&done);
     }
   }
@@ -5020,26 +5059,26 @@
                        kCheckForInexactConversion);
 
     // Deopt if the operation did not succeed (except_flag != 0).
-    DeoptimizeIf(ne, instr->environment(), except_flag, Operand(zero_reg));
+    DeoptimizeIf(ne, instr, except_flag, Operand(zero_reg));
 
     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
       Label done;
       __ Branch(&done, ne, result_reg, Operand(zero_reg));
-      __ mfc1(scratch1, double_input.high());
+      __ Mfhc1(scratch1, double_input);
       __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
-      DeoptimizeIf(ne, instr->environment(), scratch1, Operand(zero_reg));
+      DeoptimizeIf(ne, instr, scratch1, Operand(zero_reg));
       __ bind(&done);
     }
   }
   __ SmiTagCheckOverflow(result_reg, result_reg, scratch1);
-  DeoptimizeIf(lt, instr->environment(), scratch1, Operand(zero_reg));
+  DeoptimizeIf(lt, instr, scratch1, Operand(zero_reg));
 }
 
 
 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
   LOperand* input = instr->value();
   __ SmiTst(ToRegister(input), at);
-  DeoptimizeIf(ne, instr->environment(), at, Operand(zero_reg));
+  DeoptimizeIf(ne, instr, at, Operand(zero_reg));
 }
 
 
@@ -5047,7 +5086,7 @@
   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
     LOperand* input = instr->value();
     __ SmiTst(ToRegister(input), at);
-    DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
+    DeoptimizeIf(eq, instr, at, Operand(zero_reg));
   }
 }
 
@@ -5065,12 +5104,12 @@
 
     // If there is only one type in the interval check for equality.
     if (first == last) {
-      DeoptimizeIf(ne, instr->environment(), scratch, Operand(first));
+      DeoptimizeIf(ne, instr, scratch, Operand(first));
     } else {
-      DeoptimizeIf(lo, instr->environment(), scratch, Operand(first));
+      DeoptimizeIf(lo, instr, scratch, Operand(first));
       // Omit check for the last type.
       if (last != LAST_TYPE) {
-        DeoptimizeIf(hi, instr->environment(), scratch, Operand(last));
+        DeoptimizeIf(hi, instr, scratch, Operand(last));
       }
     }
   } else {
@@ -5078,14 +5117,13 @@
     uint8_t tag;
     instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
 
-    if (IsPowerOf2(mask)) {
-      ASSERT(tag == 0 || IsPowerOf2(tag));
+    if (base::bits::IsPowerOfTwo32(mask)) {
+      DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
       __ And(at, scratch, mask);
-      DeoptimizeIf(tag == 0 ? ne : eq, instr->environment(),
-          at, Operand(zero_reg));
+      DeoptimizeIf(tag == 0 ? ne : eq, instr, at, Operand(zero_reg));
     } else {
       __ And(scratch, scratch, Operand(mask));
-      DeoptimizeIf(ne, instr->environment(), scratch, Operand(tag));
+      DeoptimizeIf(ne, instr, scratch, Operand(tag));
     }
   }
 }
@@ -5100,18 +5138,16 @@
     Handle<Cell> cell = isolate()->factory()->NewCell(object);
     __ li(at, Operand(Handle<Object>(cell)));
     __ lw(at, FieldMemOperand(at, Cell::kValueOffset));
-    DeoptimizeIf(ne, instr->environment(), reg,
-                 Operand(at));
+    DeoptimizeIf(ne, instr, reg, Operand(at));
   } else {
-    DeoptimizeIf(ne, instr->environment(), reg,
-                 Operand(object));
+    DeoptimizeIf(ne, instr, reg, Operand(object));
   }
 }
 
 
 void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
   {
-    PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+    PushSafepointRegistersScope scope(this);
     __ push(object);
     __ mov(cp, zero_reg);
     __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
@@ -5120,22 +5156,22 @@
     __ StoreToSafepointRegisterSlot(v0, scratch0());
   }
   __ SmiTst(scratch0(), at);
-  DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
+  DeoptimizeIf(eq, instr, at, Operand(zero_reg));
 }
 
 
 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
-  class DeferredCheckMaps V8_FINAL : public LDeferredCode {
+  class DeferredCheckMaps FINAL : public LDeferredCode {
    public:
     DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
         : LDeferredCode(codegen), instr_(instr), object_(object) {
       SetExit(check_maps());
     }
-    virtual void Generate() V8_OVERRIDE {
+    virtual void Generate() OVERRIDE {
       codegen()->DoDeferredInstanceMigration(instr_, object_);
     }
     Label* check_maps() { return &check_maps_; }
-    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
    private:
     LCheckMaps* instr_;
     Label check_maps_;
@@ -5152,7 +5188,7 @@
 
   Register map_reg = scratch0();
   LOperand* input = instr->value();
-  ASSERT(input->IsRegister());
+  DCHECK(input->IsRegister());
   Register reg = ToRegister(input);
   __ lw(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
 
@@ -5173,7 +5209,7 @@
   if (instr->hydrogen()->HasMigrationTarget()) {
     __ Branch(deferred->entry(), ne, map_reg, Operand(map));
   } else {
-    DeoptimizeIf(ne, instr->environment(), map_reg, Operand(map));
+    DeoptimizeIf(ne, instr, map_reg, Operand(map));
   }
 
   __ bind(&success);
@@ -5211,8 +5247,7 @@
 
   // Check for undefined. Undefined is converted to zero for clamping
   // conversions.
-  DeoptimizeIf(ne, instr->environment(), input_reg,
-               Operand(factory()->undefined_value()));
+  DeoptimizeIf(ne, instr, input_reg, Operand(factory()->undefined_value()));
   __ mov(result_reg, zero_reg);
   __ jmp(&done);
 
@@ -5250,14 +5285,14 @@
 
 
 void LCodeGen::DoAllocate(LAllocate* instr) {
-  class DeferredAllocate V8_FINAL : public LDeferredCode {
+  class DeferredAllocate FINAL : public LDeferredCode {
    public:
     DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() V8_OVERRIDE {
+    virtual void Generate() OVERRIDE {
       codegen()->DoDeferredAllocate(instr_);
     }
-    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
    private:
     LAllocate* instr_;
   };
@@ -5275,11 +5310,11 @@
     flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
   }
   if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
-    ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
-    ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+    DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
+    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
     flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
   } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
-    ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
     flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
   }
   if (instr->size()->IsConstantOperand()) {
@@ -5291,33 +5326,26 @@
     }
   } else {
     Register size = ToRegister(instr->size());
-    __ Allocate(size,
-                result,
-                scratch,
-                scratch2,
-                deferred->entry(),
-                flags);
+    __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
   }
 
   __ bind(deferred->exit());
 
   if (instr->hydrogen()->MustPrefillWithFiller()) {
+    STATIC_ASSERT(kHeapObjectTag == 1);
     if (instr->size()->IsConstantOperand()) {
       int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
-      __ li(scratch, Operand(size));
+      __ li(scratch, Operand(size - kHeapObjectTag));
     } else {
-      scratch = ToRegister(instr->size());
+      __ Subu(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag));
     }
-    __ Subu(scratch, scratch, Operand(kPointerSize));
-    __ Subu(result, result, Operand(kHeapObjectTag));
+    __ li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
     Label loop;
     __ bind(&loop);
-    __ li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
+    __ Subu(scratch, scratch, Operand(kPointerSize));
     __ Addu(at, result, Operand(scratch));
     __ sw(scratch2, MemOperand(at));
-    __ Subu(scratch, scratch, Operand(kPointerSize));
     __ Branch(&loop, ge, scratch, Operand(zero_reg));
-    __ Addu(result, result, Operand(kHeapObjectTag));
   }
 }
 
@@ -5330,10 +5358,10 @@
   // contained in the register pointer map.
   __ mov(result, zero_reg);
 
-  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+  PushSafepointRegistersScope scope(this);
   if (instr->size()->IsRegister()) {
     Register size = ToRegister(instr->size());
-    ASSERT(!size.is(result));
+    DCHECK(!size.is(result));
     __ SmiTag(size);
     __ push(size);
   } else {
@@ -5350,11 +5378,11 @@
   int flags = AllocateDoubleAlignFlag::encode(
       instr->hydrogen()->MustAllocateDoubleAligned());
   if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
-    ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
-    ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+    DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
+    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
     flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
   } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
-    ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
     flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
   } else {
     flags = AllocateTargetSpace::update(flags, NEW_SPACE);
@@ -5362,21 +5390,21 @@
   __ Push(Smi::FromInt(flags));
 
   CallRuntimeFromDeferred(
-      Runtime::kHiddenAllocateInTargetSpace, 2, instr, instr->context());
+      Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
   __ StoreToSafepointRegisterSlot(v0, result);
 }
 
 
 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
-  ASSERT(ToRegister(instr->value()).is(a0));
-  ASSERT(ToRegister(instr->result()).is(v0));
+  DCHECK(ToRegister(instr->value()).is(a0));
+  DCHECK(ToRegister(instr->result()).is(v0));
   __ push(a0);
   CallRuntime(Runtime::kToFastProperties, 1, instr);
 }
 
 
 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
-  ASSERT(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->context()).is(cp));
   Label materialized;
   // Registers will be used as follows:
   // t3 = literals array.
@@ -5396,7 +5424,7 @@
   __ li(t1, Operand(instr->hydrogen()->pattern()));
   __ li(t0, Operand(instr->hydrogen()->flags()));
   __ Push(t3, t2, t1, t0);
-  CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4, instr);
+  CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
   __ mov(a1, v0);
 
   __ bind(&materialized);
@@ -5409,7 +5437,7 @@
   __ bind(&runtime_allocate);
   __ li(a0, Operand(Smi::FromInt(size)));
   __ Push(a1, a0);
-  CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1, instr);
+  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
   __ pop(a1);
 
   __ bind(&allocated);
@@ -5429,14 +5457,13 @@
 
 
 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
-  ASSERT(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->context()).is(cp));
   // Use the fast case closure allocation code that allocates in new
   // space for nested functions that don't need literals cloning.
   bool pretenure = instr->hydrogen()->pretenure();
   if (!pretenure && instr->hydrogen()->has_no_literals()) {
-    FastNewClosureStub stub(isolate(),
-                            instr->hydrogen()->strict_mode(),
-                            instr->hydrogen()->is_generator());
+    FastNewClosureStub stub(isolate(), instr->hydrogen()->strict_mode(),
+                            instr->hydrogen()->kind());
     __ li(a2, Operand(instr->hydrogen()->shared_info()));
     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   } else {
@@ -5444,13 +5471,13 @@
     __ li(a1, Operand(pretenure ? factory()->true_value()
                                 : factory()->false_value()));
     __ Push(cp, a2, a1);
-    CallRuntime(Runtime::kHiddenNewClosure, 3, instr);
+    CallRuntime(Runtime::kNewClosure, 3, instr);
   }
 }
 
 
 void LCodeGen::DoTypeof(LTypeof* instr) {
-  ASSERT(ToRegister(instr->result()).is(v0));
+  DCHECK(ToRegister(instr->result()).is(v0));
   Register input = ToRegister(instr->value());
   __ push(input);
   CallRuntime(Runtime::kTypeof, 1, instr);
@@ -5470,8 +5497,8 @@
                                                   &cmp1,
                                                   &cmp2);
 
-  ASSERT(cmp1.is_valid());
-  ASSERT(!cmp2.is_reg() || cmp2.rm().is_valid());
+  DCHECK(cmp1.is_valid());
+  DCHECK(!cmp2.is_reg() || cmp2.rm().is_valid());
 
   if (final_branch_condition != kNoCondition) {
     EmitBranch(instr, final_branch_condition, cmp1, cmp2);
@@ -5527,13 +5554,6 @@
     *cmp2 = Operand(input);
     final_branch_condition = eq;
 
-  } else if (FLAG_harmony_typeof &&
-             String::Equals(type_name, factory->null_string())) {
-    __ LoadRoot(at, Heap::kNullValueRootIndex);
-    *cmp1 = at;
-    *cmp2 = Operand(input);
-    final_branch_condition = eq;
-
   } else if (String::Equals(type_name, factory->undefined_string())) {
     __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
     __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
@@ -5559,10 +5579,8 @@
 
   } else if (String::Equals(type_name, factory->object_string())) {
     __ JumpIfSmi(input, false_label);
-    if (!FLAG_harmony_typeof) {
-      __ LoadRoot(at, Heap::kNullValueRootIndex);
-      __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
-    }
+    __ LoadRoot(at, Heap::kNullValueRootIndex);
+    __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
     Register map = input;
     __ GetObjectType(input, map, scratch);
     __ Branch(false_label,
@@ -5598,7 +5616,7 @@
 
 
 void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
-  ASSERT(!temp1.is(temp2));
+  DCHECK(!temp1.is(temp2));
   // Get the frame pointer for the calling frame.
   __ lw(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
 
@@ -5622,7 +5640,7 @@
     int current_pc = masm()->pc_offset();
     if (current_pc < last_lazy_deopt_pc_ + space_needed) {
       int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
-      ASSERT_EQ(0, padding_size % Assembler::kInstrSize);
+      DCHECK_EQ(0, padding_size % Assembler::kInstrSize);
       while (padding_size > 0) {
         __ nop();
         padding_size -= Assembler::kInstrSize;
@@ -5635,7 +5653,7 @@
 
 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
   last_lazy_deopt_pc_ = masm()->pc_offset();
-  ASSERT(instr->HasEnvironment());
+  DCHECK(instr->HasEnvironment());
   LEnvironment* env = instr->environment();
   RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
@@ -5652,8 +5670,8 @@
     type = Deoptimizer::LAZY;
   }
 
-  Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
-  DeoptimizeIf(al, instr->environment(), type, zero_reg, Operand(zero_reg));
+  DeoptimizeIf(al, instr, type, zero_reg, Operand(zero_reg),
+               instr->hydrogen()->reason());
 }
 
 
@@ -5668,31 +5686,31 @@
 
 
 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
-  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+  PushSafepointRegistersScope scope(this);
   LoadContextFromDeferred(instr->context());
-  __ CallRuntimeSaveDoubles(Runtime::kHiddenStackGuard);
+  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
   RecordSafepointWithLazyDeopt(
       instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
-  ASSERT(instr->HasEnvironment());
+  DCHECK(instr->HasEnvironment());
   LEnvironment* env = instr->environment();
   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
 }
 
 
 void LCodeGen::DoStackCheck(LStackCheck* instr) {
-  class DeferredStackCheck V8_FINAL : public LDeferredCode {
+  class DeferredStackCheck FINAL : public LDeferredCode {
    public:
     DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() V8_OVERRIDE {
+    virtual void Generate() OVERRIDE {
       codegen()->DoDeferredStackCheck(instr_);
     }
-    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
    private:
     LStackCheck* instr_;
   };
 
-  ASSERT(instr->HasEnvironment());
+  DCHECK(instr->HasEnvironment());
   LEnvironment* env = instr->environment();
   // There is no LLazyBailout instruction for stack-checks. We have to
   // prepare for lazy deoptimization explicitly here.
@@ -5701,14 +5719,14 @@
     Label done;
     __ LoadRoot(at, Heap::kStackLimitRootIndex);
     __ Branch(&done, hs, sp, Operand(at));
-    ASSERT(instr->context()->IsRegister());
-    ASSERT(ToRegister(instr->context()).is(cp));
+    DCHECK(instr->context()->IsRegister());
+    DCHECK(ToRegister(instr->context()).is(cp));
     CallCode(isolate()->builtins()->StackCheck(),
              RelocInfo::CODE_TARGET,
              instr);
     __ bind(&done);
   } else {
-    ASSERT(instr->hydrogen()->is_backwards_branch());
+    DCHECK(instr->hydrogen()->is_backwards_branch());
     // Perform stack overflow check if this goto needs it before jumping.
     DeferredStackCheck* deferred_stack_check =
         new(zone()) DeferredStackCheck(this, instr);
@@ -5733,7 +5751,7 @@
 
   // If the environment were already registered, we would have no way of
   // backpatching it with the spill slot operands.
-  ASSERT(!environment->HasBeenRegistered());
+  DCHECK(!environment->HasBeenRegistered());
   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
 
   GenerateOsrPrologue();
@@ -5744,21 +5762,21 @@
   Register result = ToRegister(instr->result());
   Register object = ToRegister(instr->object());
   __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
-  DeoptimizeIf(eq, instr->environment(), object, Operand(at));
+  DeoptimizeIf(eq, instr, object, Operand(at));
 
   Register null_value = t1;
   __ LoadRoot(null_value, Heap::kNullValueRootIndex);
-  DeoptimizeIf(eq, instr->environment(), object, Operand(null_value));
+  DeoptimizeIf(eq, instr, object, Operand(null_value));
 
   __ And(at, object, kSmiTagMask);
-  DeoptimizeIf(eq, instr->environment(), at, Operand(zero_reg));
+  DeoptimizeIf(eq, instr, at, Operand(zero_reg));
 
   STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
   __ GetObjectType(object, a1, a1);
-  DeoptimizeIf(le, instr->environment(), a1, Operand(LAST_JS_PROXY_TYPE));
+  DeoptimizeIf(le, instr, a1, Operand(LAST_JS_PROXY_TYPE));
 
   Label use_cache, call_runtime;
-  ASSERT(object.is(a0));
+  DCHECK(object.is(a0));
   __ CheckEnumCache(null_value, &call_runtime);
 
   __ lw(result, FieldMemOperand(object, HeapObject::kMapOffset));
@@ -5770,9 +5788,9 @@
   CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
 
   __ lw(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
-  ASSERT(result.is(v0));
+  DCHECK(result.is(v0));
   __ LoadRoot(at, Heap::kMetaMapRootIndex);
-  DeoptimizeIf(ne, instr->environment(), a1, Operand(at));
+  DeoptimizeIf(ne, instr, a1, Operand(at));
   __ bind(&use_cache);
 }
 
@@ -5792,7 +5810,7 @@
         FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
   __ lw(result,
         FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
-  DeoptimizeIf(eq, instr->environment(), result, Operand(zero_reg));
+  DeoptimizeIf(eq, instr, result, Operand(zero_reg));
 
   __ bind(&done);
 }
@@ -5802,7 +5820,7 @@
   Register object = ToRegister(instr->value());
   Register map = ToRegister(instr->map());
   __ lw(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
-  DeoptimizeIf(ne, instr->environment(), map, Operand(scratch0()));
+  DeoptimizeIf(ne, instr, map, Operand(scratch0()));
 }
 
 
@@ -5810,7 +5828,7 @@
                                            Register result,
                                            Register object,
                                            Register index) {
-  PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
+  PushSafepointRegistersScope scope(this);
   __ Push(object, index);
   __ mov(cp, zero_reg);
   __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
@@ -5821,7 +5839,7 @@
 
 
 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
-  class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode {
+  class DeferredLoadMutableDouble FINAL : public LDeferredCode {
    public:
     DeferredLoadMutableDouble(LCodeGen* codegen,
                               LLoadFieldByIndex* instr,
@@ -5834,10 +5852,10 @@
           object_(object),
           index_(index) {
     }
-    virtual void Generate() V8_OVERRIDE {
+    virtual void Generate() OVERRIDE {
       codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
     }
-    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
    private:
     LLoadFieldByIndex* instr_;
     Register result_;
@@ -5890,7 +5908,7 @@
   Handle<ScopeInfo> scope_info = instr->scope_info();
   __ li(at, scope_info);
   __ Push(at, ToRegister(instr->function()));
-  CallRuntime(Runtime::kHiddenPushBlockContext, 2, instr);
+  CallRuntime(Runtime::kPushBlockContext, 2, instr);
   RecordSafepoint(Safepoint::kNoLazyDeopt);
 }
 
diff --git a/src/mips/lithium-codegen-mips.h b/src/mips/lithium-codegen-mips.h
index d70c871..5402c9a 100644
--- a/src/mips/lithium-codegen-mips.h
+++ b/src/mips/lithium-codegen-mips.h
@@ -6,9 +6,9 @@
 #define V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
 
 #include "src/deoptimizer.h"
+#include "src/lithium-codegen.h"
 #include "src/mips/lithium-gap-resolver-mips.h"
 #include "src/mips/lithium-mips.h"
-#include "src/lithium-codegen.h"
 #include "src/safepoint-table.h"
 #include "src/scopes.h"
 #include "src/utils.h"
@@ -25,7 +25,7 @@
   LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
       : LCodeGenBase(chunk, assembler, info),
         deoptimizations_(4, info->zone()),
-        deopt_jump_table_(4, info->zone()),
+        jump_table_(4, info->zone()),
         deoptimization_literals_(8, info->zone()),
         inlined_function_count_(0),
         scope_(info->scope()),
@@ -169,10 +169,10 @@
 
   // Code generation passes.  Returns true if code generation should
   // continue.
-  void GenerateBodyInstructionPre(LInstruction* instr) V8_OVERRIDE;
+  void GenerateBodyInstructionPre(LInstruction* instr) OVERRIDE;
   bool GeneratePrologue();
   bool GenerateDeferredCode();
-  bool GenerateDeoptJumpTable();
+  bool GenerateJumpTable();
   bool GenerateSafepointTable();
 
   // Generates the custom OSR entrypoint and sets the osr_pc_offset.
@@ -228,15 +228,15 @@
 
   void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
                                             Safepoint::DeoptMode mode);
-  void DeoptimizeIf(Condition condition,
-                    LEnvironment* environment,
+  void DeoptimizeIf(Condition condition, LInstruction* instr,
                     Deoptimizer::BailoutType bailout_type,
                     Register src1 = zero_reg,
-                    const Operand& src2 = Operand(zero_reg));
-  void DeoptimizeIf(Condition condition,
-                    LEnvironment* environment,
+                    const Operand& src2 = Operand(zero_reg),
+                    const char* detail = NULL);
+  void DeoptimizeIf(Condition condition, LInstruction* instr,
                     Register src1 = zero_reg,
-                    const Operand& src2 = Operand(zero_reg));
+                    const Operand& src2 = Operand(zero_reg),
+                    const char* detail = NULL);
 
   void AddToTranslation(LEnvironment* environment,
                         Translation* translation,
@@ -269,11 +269,8 @@
   void RecordSafepointWithRegisters(LPointerMap* pointers,
                                     int arguments,
                                     Safepoint::DeoptMode mode);
-  void RecordSafepointWithRegistersAndDoubles(LPointerMap* pointers,
-                                              int arguments,
-                                              Safepoint::DeoptMode mode);
 
-  void RecordAndWritePosition(int position) V8_OVERRIDE;
+  void RecordAndWritePosition(int position) OVERRIDE;
 
   static Condition TokenToCondition(Token::Value op, bool is_unsigned);
   void EmitGoto(int block);
@@ -300,12 +297,8 @@
                         FPURegister src1,
                         FPURegister src2);
   void EmitCmpI(LOperand* left, LOperand* right);
-  void EmitNumberUntagD(Register input,
-                        DoubleRegister result,
-                        bool allow_undefined_as_nan,
-                        bool deoptimize_on_minus_zero,
-                        LEnvironment* env,
-                        NumberUntagDMode mode);
+  void EmitNumberUntagD(LNumberUntagD* instr, Register input,
+                        DoubleRegister result, NumberUntagDMode mode);
 
   // Emits optimized code for typeof x == "y".  Modifies input register.
   // Returns the condition on which a final split to
@@ -359,7 +352,7 @@
                                            LEnvironment* environment);
 
 
-  void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE;
+  void EnsureSpaceForLazyDeopt(int space_needed) OVERRIDE;
   void DoLoadKeyedExternalArray(LLoadKeyed* instr);
   void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
   void DoLoadKeyedFixedArray(LLoadKeyed* instr);
@@ -367,8 +360,11 @@
   void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
   void DoStoreKeyedFixedArray(LStoreKeyed* instr);
 
+  template <class T>
+  void EmitVectorLoadICRegisters(T* instr);
+
   ZoneList<LEnvironment*> deoptimizations_;
-  ZoneList<Deoptimizer::JumpTableEntry> deopt_jump_table_;
+  ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
   ZoneList<Handle<Object> > deoptimization_literals_;
   int inlined_function_count_;
   Scope* const scope_;
@@ -386,56 +382,24 @@
 
   Safepoint::Kind expected_safepoint_kind_;
 
-  class PushSafepointRegistersScope V8_FINAL  BASE_EMBEDDED {
+  class PushSafepointRegistersScope FINAL BASE_EMBEDDED {
    public:
-    PushSafepointRegistersScope(LCodeGen* codegen,
-                                Safepoint::Kind kind)
+    explicit PushSafepointRegistersScope(LCodeGen* codegen)
         : codegen_(codegen) {
-      ASSERT(codegen_->info()->is_calling());
-      ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
-      codegen_->expected_safepoint_kind_ = kind;
+      DCHECK(codegen_->info()->is_calling());
+      DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
+      codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
 
-      switch (codegen_->expected_safepoint_kind_) {
-        case Safepoint::kWithRegisters: {
-          StoreRegistersStateStub stub1(codegen_->masm_->isolate(),
-                                        kDontSaveFPRegs);
-          codegen_->masm_->push(ra);
-          codegen_->masm_->CallStub(&stub1);
-          break;
-        }
-        case Safepoint::kWithRegistersAndDoubles: {
-          StoreRegistersStateStub stub2(codegen_->masm_->isolate(),
-                                        kSaveFPRegs);
-          codegen_->masm_->push(ra);
-          codegen_->masm_->CallStub(&stub2);
-          break;
-        }
-        default:
-          UNREACHABLE();
-      }
+      StoreRegistersStateStub stub(codegen_->isolate());
+      codegen_->masm_->push(ra);
+      codegen_->masm_->CallStub(&stub);
     }
 
     ~PushSafepointRegistersScope() {
-      Safepoint::Kind kind = codegen_->expected_safepoint_kind_;
-      ASSERT((kind & Safepoint::kWithRegisters) != 0);
-      switch (kind) {
-        case Safepoint::kWithRegisters: {
-          RestoreRegistersStateStub stub1(codegen_->masm_->isolate(),
-                                          kDontSaveFPRegs);
-          codegen_->masm_->push(ra);
-          codegen_->masm_->CallStub(&stub1);
-          break;
-        }
-        case Safepoint::kWithRegistersAndDoubles: {
-          RestoreRegistersStateStub stub2(codegen_->masm_->isolate(),
-                                          kSaveFPRegs);
-          codegen_->masm_->push(ra);
-          codegen_->masm_->CallStub(&stub2);
-          break;
-        }
-        default:
-          UNREACHABLE();
-      }
+      DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
+      RestoreRegistersStateStub stub(codegen_->isolate());
+      codegen_->masm_->push(ra);
+      codegen_->masm_->CallStub(&stub);
       codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
     }
 
diff --git a/src/mips/lithium-gap-resolver-mips.cc b/src/mips/lithium-gap-resolver-mips.cc
index 6447520..1bec0c8 100644
--- a/src/mips/lithium-gap-resolver-mips.cc
+++ b/src/mips/lithium-gap-resolver-mips.cc
@@ -4,8 +4,8 @@
 
 #include "src/v8.h"
 
-#include "src/mips/lithium-gap-resolver-mips.h"
 #include "src/mips/lithium-codegen-mips.h"
+#include "src/mips/lithium-gap-resolver-mips.h"
 
 namespace v8 {
 namespace internal {
@@ -19,7 +19,7 @@
 
 
 void LGapResolver::Resolve(LParallelMove* parallel_move) {
-  ASSERT(moves_.is_empty());
+  DCHECK(moves_.is_empty());
   // Build up a worklist of moves.
   BuildInitialMoveList(parallel_move);
 
@@ -40,7 +40,7 @@
   // Perform the moves with constant sources.
   for (int i = 0; i < moves_.length(); ++i) {
     if (!moves_[i].IsEliminated()) {
-      ASSERT(moves_[i].source()->IsConstantOperand());
+      DCHECK(moves_[i].source()->IsConstantOperand());
       EmitMove(i);
     }
   }
@@ -78,13 +78,13 @@
   // An additional complication is that moves to MemOperands with large
   // offsets (more than 1K or 4K) require us to spill this spilled value to
   // the stack, to free up the register.
-  ASSERT(!moves_[index].IsPending());
-  ASSERT(!moves_[index].IsRedundant());
+  DCHECK(!moves_[index].IsPending());
+  DCHECK(!moves_[index].IsRedundant());
 
   // Clear this move's destination to indicate a pending move.  The actual
   // destination is saved in a stack allocated local.  Multiple moves can
   // be pending because this function is recursive.
-  ASSERT(moves_[index].source() != NULL);  // Or else it will look eliminated.
+  DCHECK(moves_[index].source() != NULL);  // Or else it will look eliminated.
   LOperand* destination = moves_[index].destination();
   moves_[index].set_destination(NULL);
 
@@ -111,7 +111,7 @@
   // a scratch register to break it.
   LMoveOperands other_move = moves_[root_index_];
   if (other_move.Blocks(destination)) {
-    ASSERT(other_move.IsPending());
+    DCHECK(other_move.IsPending());
     BreakCycle(index);
     return;
   }
@@ -122,12 +122,12 @@
 
 
 void LGapResolver::Verify() {
-#ifdef ENABLE_SLOW_ASSERTS
+#ifdef ENABLE_SLOW_DCHECKS
   // No operand should be the destination for more than one move.
   for (int i = 0; i < moves_.length(); ++i) {
     LOperand* destination = moves_[i].destination();
     for (int j = i + 1; j < moves_.length(); ++j) {
-      SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
+      SLOW_DCHECK(!destination->Equals(moves_[j].destination()));
     }
   }
 #endif
@@ -139,8 +139,8 @@
   // We save in a register the value that should end up in the source of
   // moves_[root_index].  After performing all moves in the tree rooted
   // in that move, we save the value to that source.
-  ASSERT(moves_[index].destination()->Equals(moves_[root_index_].source()));
-  ASSERT(!in_cycle_);
+  DCHECK(moves_[index].destination()->Equals(moves_[root_index_].source()));
+  DCHECK(!in_cycle_);
   in_cycle_ = true;
   LOperand* source = moves_[index].source();
   saved_destination_ = moves_[index].destination();
@@ -161,8 +161,8 @@
 
 
 void LGapResolver::RestoreValue() {
-  ASSERT(in_cycle_);
-  ASSERT(saved_destination_ != NULL);
+  DCHECK(in_cycle_);
+  DCHECK(saved_destination_ != NULL);
 
   // Spilled value is in kLithiumScratchReg or kLithiumScratchDouble.
   if (saved_destination_->IsRegister()) {
@@ -196,7 +196,7 @@
     if (destination->IsRegister()) {
       __ mov(cgen_->ToRegister(destination), source_register);
     } else {
-      ASSERT(destination->IsStackSlot());
+      DCHECK(destination->IsStackSlot());
       __ sw(source_register, cgen_->ToMemOperand(destination));
     }
   } else if (source->IsStackSlot()) {
@@ -204,7 +204,7 @@
     if (destination->IsRegister()) {
       __ lw(cgen_->ToRegister(destination), source_operand);
     } else {
-      ASSERT(destination->IsStackSlot());
+      DCHECK(destination->IsStackSlot());
       MemOperand destination_operand = cgen_->ToMemOperand(destination);
       if (in_cycle_) {
         if (!destination_operand.OffsetIsInt16Encodable()) {
@@ -240,8 +240,8 @@
       double v = cgen_->ToDouble(constant_source);
       __ Move(result, v);
     } else {
-      ASSERT(destination->IsStackSlot());
-      ASSERT(!in_cycle_);  // Constant moves happen after all cycles are gone.
+      DCHECK(destination->IsStackSlot());
+      DCHECK(!in_cycle_);  // Constant moves happen after all cycles are gone.
       Representation r = cgen_->IsSmi(constant_source)
           ? Representation::Smi() : Representation::Integer32();
       if (cgen_->IsInteger32(constant_source)) {
@@ -258,7 +258,7 @@
     if (destination->IsDoubleRegister()) {
       __ mov_d(cgen_->ToDoubleRegister(destination), source_register);
     } else {
-      ASSERT(destination->IsDoubleStackSlot());
+      DCHECK(destination->IsDoubleStackSlot());
       MemOperand destination_operand = cgen_->ToMemOperand(destination);
       __ sdc1(source_register, destination_operand);
     }
@@ -268,7 +268,7 @@
     if (destination->IsDoubleRegister()) {
       __ ldc1(cgen_->ToDoubleRegister(destination), source_operand);
     } else {
-      ASSERT(destination->IsDoubleStackSlot());
+      DCHECK(destination->IsDoubleStackSlot());
       MemOperand destination_operand = cgen_->ToMemOperand(destination);
       if (in_cycle_) {
         // kLithiumScratchDouble was used to break the cycle,
diff --git a/src/mips/lithium-gap-resolver-mips.h b/src/mips/lithium-gap-resolver-mips.h
index 0072e52..9e6f14e 100644
--- a/src/mips/lithium-gap-resolver-mips.h
+++ b/src/mips/lithium-gap-resolver-mips.h
@@ -15,7 +15,7 @@
 class LCodeGen;
 class LGapResolver;
 
-class LGapResolver V8_FINAL BASE_EMBEDDED {
+class LGapResolver FINAL BASE_EMBEDDED {
  public:
   explicit LGapResolver(LCodeGen* owner);
 
diff --git a/src/mips/lithium-mips.cc b/src/mips/lithium-mips.cc
index 830fc91..1757d92 100644
--- a/src/mips/lithium-mips.cc
+++ b/src/mips/lithium-mips.cc
@@ -4,10 +4,11 @@
 
 #include "src/v8.h"
 
-#include "src/lithium-allocator-inl.h"
-#include "src/mips/lithium-mips.h"
-#include "src/mips/lithium-codegen-mips.h"
+#if V8_TARGET_ARCH_MIPS
+
 #include "src/hydrogen-osr.h"
+#include "src/lithium-inl.h"
+#include "src/mips/lithium-codegen-mips.h"
 
 namespace v8 {
 namespace internal {
@@ -25,17 +26,17 @@
   // outputs because all registers are blocked by the calling convention.
   // Inputs operands must use a fixed register or use-at-start policy or
   // a non-register policy.
-  ASSERT(Output() == NULL ||
+  DCHECK(Output() == NULL ||
          LUnallocated::cast(Output())->HasFixedPolicy() ||
          !LUnallocated::cast(Output())->HasRegisterPolicy());
   for (UseIterator it(this); !it.Done(); it.Advance()) {
     LUnallocated* operand = LUnallocated::cast(it.Current());
-    ASSERT(operand->HasFixedPolicy() ||
+    DCHECK(operand->HasFixedPolicy() ||
            operand->IsUsedAtStart());
   }
   for (TempIterator it(this); !it.Done(); it.Advance()) {
     LUnallocated* operand = LUnallocated::cast(it.Current());
-    ASSERT(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
+    DCHECK(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
   }
 }
 #endif
@@ -322,8 +323,9 @@
 
 void LStoreNamedField::PrintDataTo(StringStream* stream) {
   object()->PrintTo(stream);
-  hydrogen()->access().PrintTo(stream);
-  stream->Add(" <- ");
+  OStringStream os;
+  os << hydrogen()->access() << " <- ";
+  stream->Add(os.c_str());
   value()->PrintTo(stream);
 }
 
@@ -360,7 +362,7 @@
   }
 
   if (value() == NULL) {
-    ASSERT(hydrogen()->IsConstantHoleStore() &&
+    DCHECK(hydrogen()->IsConstantHoleStore() &&
            hydrogen()->value()->representation().IsDouble());
     stream->Add("<the hole(nan)>");
   } else {
@@ -396,14 +398,14 @@
   if (kind == DOUBLE_REGISTERS) {
     return LDoubleStackSlot::Create(index, zone());
   } else {
-    ASSERT(kind == GENERAL_REGISTERS);
+    DCHECK(kind == GENERAL_REGISTERS);
     return LStackSlot::Create(index, zone());
   }
 }
 
 
 LPlatformChunk* LChunkBuilder::Build() {
-  ASSERT(is_unused());
+  DCHECK(is_unused());
   chunk_ = new(zone()) LPlatformChunk(info(), graph());
   LPhase phase("L_Building chunk", chunk_);
   status_ = BUILDING;
@@ -428,12 +430,6 @@
 }
 
 
-void LChunkBuilder::Abort(BailoutReason reason) {
-  info()->set_bailout_reason(reason);
-  status_ = ABORTED;
-}
-
-
 LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
   return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
                                   Register::ToAllocationIndex(reg));
@@ -614,7 +610,7 @@
 
 
 LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
-  ASSERT(!instr->HasPointerMap());
+  DCHECK(!instr->HasPointerMap());
   instr->set_pointer_map(new(zone()) LPointerMap(zone()));
   return instr;
 }
@@ -648,14 +644,14 @@
 
 LOperand* LChunkBuilder::FixedTemp(Register reg) {
   LUnallocated* operand = ToUnallocated(reg);
-  ASSERT(operand->HasFixedPolicy());
+  DCHECK(operand->HasFixedPolicy());
   return operand;
 }
 
 
 LOperand* LChunkBuilder::FixedTemp(DoubleRegister reg) {
   LUnallocated* operand = ToUnallocated(reg);
-  ASSERT(operand->HasFixedPolicy());
+  DCHECK(operand->HasFixedPolicy());
   return operand;
 }
 
@@ -684,8 +680,8 @@
 LInstruction* LChunkBuilder::DoShift(Token::Value op,
                                      HBitwiseBinaryOperation* instr) {
   if (instr->representation().IsSmiOrInteger32()) {
-    ASSERT(instr->left()->representation().Equals(instr->representation()));
-    ASSERT(instr->right()->representation().Equals(instr->representation()));
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
     LOperand* left = UseRegisterAtStart(instr->left());
 
     HValue* right_value = instr->right();
@@ -726,9 +722,9 @@
 
 LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
                                            HArithmeticBinaryOperation* instr) {
-  ASSERT(instr->representation().IsDouble());
-  ASSERT(instr->left()->representation().IsDouble());
-  ASSERT(instr->right()->representation().IsDouble());
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->left()->representation().IsDouble());
+  DCHECK(instr->right()->representation().IsDouble());
   if (op == Token::MOD) {
     LOperand* left = UseFixedDouble(instr->left(), f2);
     LOperand* right = UseFixedDouble(instr->right(), f4);
@@ -750,8 +746,8 @@
                                            HBinaryOperation* instr) {
   HValue* left = instr->left();
   HValue* right = instr->right();
-  ASSERT(left->representation().IsTagged());
-  ASSERT(right->representation().IsTagged());
+  DCHECK(left->representation().IsTagged());
+  DCHECK(right->representation().IsTagged());
   LOperand* context = UseFixed(instr->context(), cp);
   LOperand* left_operand = UseFixed(left, a1);
   LOperand* right_operand = UseFixed(right, a0);
@@ -762,7 +758,7 @@
 
 
 void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
-  ASSERT(is_building());
+  DCHECK(is_building());
   current_block_ = block;
   next_block_ = next_block;
   if (block->IsStartBlock()) {
@@ -771,13 +767,13 @@
   } else if (block->predecessors()->length() == 1) {
     // We have a single predecessor => copy environment and outgoing
     // argument count from the predecessor.
-    ASSERT(block->phis()->length() == 0);
+    DCHECK(block->phis()->length() == 0);
     HBasicBlock* pred = block->predecessors()->at(0);
     HEnvironment* last_environment = pred->last_environment();
-    ASSERT(last_environment != NULL);
+    DCHECK(last_environment != NULL);
     // Only copy the environment, if it is later used again.
     if (pred->end()->SecondSuccessor() == NULL) {
-      ASSERT(pred->end()->FirstSuccessor() == block);
+      DCHECK(pred->end()->FirstSuccessor() == block);
     } else {
       if (pred->end()->FirstSuccessor()->block_id() > block->block_id() ||
           pred->end()->SecondSuccessor()->block_id() > block->block_id()) {
@@ -785,7 +781,7 @@
       }
     }
     block->UpdateEnvironment(last_environment);
-    ASSERT(pred->argument_count() >= 0);
+    DCHECK(pred->argument_count() >= 0);
     argument_count_ = pred->argument_count();
   } else {
     // We are at a state join => process phis.
@@ -837,7 +833,7 @@
     if (current->OperandCount() == 0) {
       instr = DefineAsRegister(new(zone()) LDummy());
     } else {
-      ASSERT(!current->OperandAt(0)->IsControlInstruction());
+      DCHECK(!current->OperandAt(0)->IsControlInstruction());
       instr = DefineAsRegister(new(zone())
           LDummyUse(UseAny(current->OperandAt(0))));
     }
@@ -860,7 +856,7 @@
   }
 
   argument_count_ += current->argument_delta();
-  ASSERT(argument_count_ >= 0);
+  DCHECK(argument_count_ >= 0);
 
   if (instr != NULL) {
     AddInstruction(instr, current);
@@ -902,7 +898,7 @@
       LUnallocated* operand = LUnallocated::cast(it.Current());
       if (operand->HasFixedPolicy()) ++fixed;
     }
-    ASSERT(fixed == 0 || used_at_start == 0);
+    DCHECK(fixed == 0 || used_at_start == 0);
   }
 #endif
 
@@ -961,7 +957,7 @@
 
 
 LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
-  ASSERT(instr->value()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
   LOperand* value = UseRegisterAtStart(instr->value());
   LOperand* temp = TempRegister();
   return new(zone()) LCmpMapAndBranch(value, temp);
@@ -1085,14 +1081,14 @@
 
 LInstruction* LChunkBuilder::DoCallWithDescriptor(
     HCallWithDescriptor* instr) {
-  const CallInterfaceDescriptor* descriptor = instr->descriptor();
+  CallInterfaceDescriptor descriptor = instr->descriptor();
 
   LOperand* target = UseRegisterOrConstantAtStart(instr->target());
   ZoneList<LOperand*> ops(instr->OperandCount(), zone());
   ops.Add(target, zone());
   for (int i = 1; i < instr->OperandCount(); i++) {
-    LOperand* op = UseFixed(instr->OperandAt(i),
-        descriptor->GetParameterRegister(i - 1));
+    LOperand* op =
+        UseFixed(instr->OperandAt(i), descriptor.GetParameterRegister(i - 1));
     ops.Add(op, zone());
   }
 
@@ -1102,6 +1098,19 @@
 }
 
 
+LInstruction* LChunkBuilder::DoTailCallThroughMegamorphicCache(
+    HTailCallThroughMegamorphicCache* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* receiver_register =
+      UseFixed(instr->receiver(), LoadDescriptor::ReceiverRegister());
+  LOperand* name_register =
+      UseFixed(instr->name(), LoadDescriptor::NameRegister());
+  // Not marked as call. It can't deoptimize, and it never returns.
+  return new (zone()) LTailCallThroughMegamorphicCache(
+      context, receiver_register, name_register);
+}
+
+
 LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
   LOperand* context = UseFixed(instr->context(), cp);
   LOperand* function = UseFixed(instr->function(), a1);
@@ -1112,14 +1121,24 @@
 
 LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
   switch (instr->op()) {
-    case kMathFloor: return DoMathFloor(instr);
-    case kMathRound: return DoMathRound(instr);
-    case kMathAbs: return DoMathAbs(instr);
-    case kMathLog: return DoMathLog(instr);
-    case kMathExp: return DoMathExp(instr);
-    case kMathSqrt: return DoMathSqrt(instr);
-    case kMathPowHalf: return DoMathPowHalf(instr);
-    case kMathClz32: return DoMathClz32(instr);
+    case kMathFloor:
+      return DoMathFloor(instr);
+    case kMathRound:
+      return DoMathRound(instr);
+    case kMathFround:
+      return DoMathFround(instr);
+    case kMathAbs:
+      return DoMathAbs(instr);
+    case kMathLog:
+      return DoMathLog(instr);
+    case kMathExp:
+      return DoMathExp(instr);
+    case kMathSqrt:
+      return DoMathSqrt(instr);
+    case kMathPowHalf:
+      return DoMathPowHalf(instr);
+    case kMathClz32:
+      return DoMathClz32(instr);
     default:
       UNREACHABLE();
       return NULL;
@@ -1128,8 +1147,8 @@
 
 
 LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
-  ASSERT(instr->representation().IsDouble());
-  ASSERT(instr->value()->representation().IsDouble());
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->value()->representation().IsDouble());
   LOperand* input = UseFixedDouble(instr->value(), f4);
   return MarkAsCall(DefineFixedDouble(new(zone()) LMathLog(input), f4), instr);
 }
@@ -1143,8 +1162,8 @@
 
 
 LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
-  ASSERT(instr->representation().IsDouble());
-  ASSERT(instr->value()->representation().IsDouble());
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->value()->representation().IsDouble());
   LOperand* input = UseRegister(instr->value());
   LOperand* temp1 = TempRegister();
   LOperand* temp2 = TempRegister();
@@ -1163,6 +1182,13 @@
 }
 
 
+LInstruction* LChunkBuilder::DoMathFround(HUnaryMathOperation* instr) {
+  LOperand* input = UseRegister(instr->value());
+  LMathFround* result = new (zone()) LMathFround(input);
+  return DefineAsRegister(result);
+}
+
+
 LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
   Representation r = instr->value()->representation();
   LOperand* context = (r.IsDouble() || r.IsSmiOrInteger32())
@@ -1252,9 +1278,9 @@
 
 LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
   if (instr->representation().IsSmiOrInteger32()) {
-    ASSERT(instr->left()->representation().Equals(instr->representation()));
-    ASSERT(instr->right()->representation().Equals(instr->representation()));
-    ASSERT(instr->CheckFlag(HValue::kTruncatingToInt32));
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+    DCHECK(instr->CheckFlag(HValue::kTruncatingToInt32));
 
     LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
     LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
@@ -1266,9 +1292,9 @@
 
 
 LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) {
-  ASSERT(instr->representation().IsSmiOrInteger32());
-  ASSERT(instr->left()->representation().Equals(instr->representation()));
-  ASSERT(instr->right()->representation().Equals(instr->representation()));
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
   LOperand* dividend = UseRegister(instr->left());
   int32_t divisor = instr->right()->GetInteger32Constant();
   LInstruction* result = DefineAsRegister(new(zone()) LDivByPowerOf2I(
@@ -1284,9 +1310,9 @@
 
 
 LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
-  ASSERT(instr->representation().IsInteger32());
-  ASSERT(instr->left()->representation().Equals(instr->representation()));
-  ASSERT(instr->right()->representation().Equals(instr->representation()));
+  DCHECK(instr->representation().IsInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
   LOperand* dividend = UseRegister(instr->left());
   int32_t divisor = instr->right()->GetInteger32Constant();
   LInstruction* result = DefineAsRegister(new(zone()) LDivByConstI(
@@ -1301,13 +1327,14 @@
 
 
 LInstruction* LChunkBuilder::DoDivI(HDiv* instr) {
-  ASSERT(instr->representation().IsSmiOrInteger32());
-  ASSERT(instr->left()->representation().Equals(instr->representation()));
-  ASSERT(instr->right()->representation().Equals(instr->representation()));
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
   LOperand* dividend = UseRegister(instr->left());
   LOperand* divisor = UseRegister(instr->right());
+  LOperand* temp = TempRegister();
   LInstruction* result =
-      DefineAsRegister(new(zone()) LDivI(dividend, divisor));
+      DefineAsRegister(new(zone()) LDivI(dividend, divisor, temp));
   if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
       instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
       (instr->CheckFlag(HValue::kCanOverflow) &&
@@ -1351,9 +1378,9 @@
 
 
 LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
-  ASSERT(instr->representation().IsInteger32());
-  ASSERT(instr->left()->representation().Equals(instr->representation()));
-  ASSERT(instr->right()->representation().Equals(instr->representation()));
+  DCHECK(instr->representation().IsInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
   LOperand* dividend = UseRegister(instr->left());
   int32_t divisor = instr->right()->GetInteger32Constant();
   LOperand* temp =
@@ -1371,9 +1398,9 @@
 
 
 LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) {
-  ASSERT(instr->representation().IsSmiOrInteger32());
-  ASSERT(instr->left()->representation().Equals(instr->representation()));
-  ASSERT(instr->right()->representation().Equals(instr->representation()));
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
   LOperand* dividend = UseRegister(instr->left());
   LOperand* divisor = UseRegister(instr->right());
   LFlooringDivI* div = new(zone()) LFlooringDivI(dividend, divisor);
@@ -1393,14 +1420,15 @@
 
 
 LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) {
-  ASSERT(instr->representation().IsSmiOrInteger32());
-  ASSERT(instr->left()->representation().Equals(instr->representation()));
-  ASSERT(instr->right()->representation().Equals(instr->representation()));
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
   LOperand* dividend = UseRegisterAtStart(instr->left());
   int32_t divisor = instr->right()->GetInteger32Constant();
   LInstruction* result = DefineSameAsFirst(new(zone()) LModByPowerOf2I(
           dividend, divisor));
-  if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+  if (instr->CheckFlag(HValue::kLeftCanBeNegative) &&
+      instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
     result = AssignEnvironment(result);
   }
   return result;
@@ -1408,9 +1436,9 @@
 
 
 LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) {
-  ASSERT(instr->representation().IsSmiOrInteger32());
-  ASSERT(instr->left()->representation().Equals(instr->representation()));
-  ASSERT(instr->right()->representation().Equals(instr->representation()));
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
   LOperand* dividend = UseRegister(instr->left());
   int32_t divisor = instr->right()->GetInteger32Constant();
   LInstruction* result = DefineAsRegister(new(zone()) LModByConstI(
@@ -1423,9 +1451,9 @@
 
 
 LInstruction* LChunkBuilder::DoModI(HMod* instr) {
-  ASSERT(instr->representation().IsSmiOrInteger32());
-  ASSERT(instr->left()->representation().Equals(instr->representation()));
-  ASSERT(instr->right()->representation().Equals(instr->representation()));
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
   LOperand* dividend = UseRegister(instr->left());
   LOperand* divisor = UseRegister(instr->right());
   LInstruction* result = DefineAsRegister(new(zone()) LModI(
@@ -1451,8 +1479,8 @@
 
 LInstruction* LChunkBuilder::DoMul(HMul* instr) {
   if (instr->representation().IsSmiOrInteger32()) {
-    ASSERT(instr->left()->representation().Equals(instr->representation()));
-    ASSERT(instr->right()->representation().Equals(instr->representation()));
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
     HValue* left = instr->BetterLeftOperand();
     HValue* right = instr->BetterRightOperand();
     LOperand* left_op;
@@ -1491,8 +1519,8 @@
     return DefineAsRegister(mul);
 
   } else if (instr->representation().IsDouble()) {
-    if (kArchVariant == kMips32r2) {
-      if (instr->UseCount() == 1 && instr->uses().value()->IsAdd()) {
+    if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
+      if (instr->HasOneUse() && instr->uses().value()->IsAdd()) {
         HAdd* add = HAdd::cast(instr->uses().value());
         if (instr == add->left()) {
           // This mul is the lhs of an add. The add and mul will be folded
@@ -1515,8 +1543,8 @@
 
 LInstruction* LChunkBuilder::DoSub(HSub* instr) {
   if (instr->representation().IsSmiOrInteger32()) {
-    ASSERT(instr->left()->representation().Equals(instr->representation()));
-    ASSERT(instr->right()->representation().Equals(instr->representation()));
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
     LOperand* left = UseRegisterAtStart(instr->left());
     LOperand* right = UseOrConstantAtStart(instr->right());
     LSubI* sub = new(zone()) LSubI(left, right);
@@ -1544,8 +1572,8 @@
 
 LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
   if (instr->representation().IsSmiOrInteger32()) {
-    ASSERT(instr->left()->representation().Equals(instr->representation()));
-    ASSERT(instr->right()->representation().Equals(instr->representation()));
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
     LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
     LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
     LAddI* add = new(zone()) LAddI(left, right);
@@ -1555,21 +1583,21 @@
     }
     return result;
   } else if (instr->representation().IsExternal()) {
-    ASSERT(instr->left()->representation().IsExternal());
-    ASSERT(instr->right()->representation().IsInteger32());
-    ASSERT(!instr->CheckFlag(HValue::kCanOverflow));
+    DCHECK(instr->left()->representation().IsExternal());
+    DCHECK(instr->right()->representation().IsInteger32());
+    DCHECK(!instr->CheckFlag(HValue::kCanOverflow));
     LOperand* left = UseRegisterAtStart(instr->left());
     LOperand* right = UseOrConstantAtStart(instr->right());
     LAddI* add = new(zone()) LAddI(left, right);
     LInstruction* result = DefineAsRegister(add);
     return result;
   } else if (instr->representation().IsDouble()) {
-    if (kArchVariant == kMips32r2) {
+    if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
       if (instr->left()->IsMul())
         return DoMultiplyAdd(HMul::cast(instr->left()), instr->right());
 
       if (instr->right()->IsMul()) {
-        ASSERT(!instr->left()->IsMul());
+        DCHECK(!instr->left()->IsMul());
         return DoMultiplyAdd(HMul::cast(instr->right()), instr->left());
       }
     }
@@ -1584,14 +1612,14 @@
   LOperand* left = NULL;
   LOperand* right = NULL;
   if (instr->representation().IsSmiOrInteger32()) {
-    ASSERT(instr->left()->representation().Equals(instr->representation()));
-    ASSERT(instr->right()->representation().Equals(instr->representation()));
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
     left = UseRegisterAtStart(instr->BetterLeftOperand());
     right = UseOrConstantAtStart(instr->BetterRightOperand());
   } else {
-    ASSERT(instr->representation().IsDouble());
-    ASSERT(instr->left()->representation().IsDouble());
-    ASSERT(instr->right()->representation().IsDouble());
+    DCHECK(instr->representation().IsDouble());
+    DCHECK(instr->left()->representation().IsDouble());
+    DCHECK(instr->right()->representation().IsDouble());
     left = UseRegisterAtStart(instr->left());
     right = UseRegisterAtStart(instr->right());
   }
@@ -1600,15 +1628,16 @@
 
 
 LInstruction* LChunkBuilder::DoPower(HPower* instr) {
-  ASSERT(instr->representation().IsDouble());
+  DCHECK(instr->representation().IsDouble());
   // We call a C function for double power. It can't trigger a GC.
   // We need to use fixed result register for the call.
   Representation exponent_type = instr->right()->representation();
-  ASSERT(instr->left()->representation().IsDouble());
+  DCHECK(instr->left()->representation().IsDouble());
   LOperand* left = UseFixedDouble(instr->left(), f2);
-  LOperand* right = exponent_type.IsDouble() ?
-      UseFixedDouble(instr->right(), f4) :
-      UseFixed(instr->right(), a2);
+  LOperand* right =
+      exponent_type.IsDouble()
+          ? UseFixedDouble(instr->right(), f4)
+          : UseFixed(instr->right(), MathPowTaggedDescriptor::exponent());
   LPower* result = new(zone()) LPower(left, right);
   return MarkAsCall(DefineFixedDouble(result, f0),
                     instr,
@@ -1617,8 +1646,8 @@
 
 
 LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
-  ASSERT(instr->left()->representation().IsTagged());
-  ASSERT(instr->right()->representation().IsTagged());
+  DCHECK(instr->left()->representation().IsTagged());
+  DCHECK(instr->right()->representation().IsTagged());
   LOperand* context = UseFixed(instr->context(), cp);
   LOperand* left = UseFixed(instr->left(), a1);
   LOperand* right = UseFixed(instr->right(), a0);
@@ -1631,15 +1660,15 @@
     HCompareNumericAndBranch* instr) {
   Representation r = instr->representation();
   if (r.IsSmiOrInteger32()) {
-    ASSERT(instr->left()->representation().Equals(r));
-    ASSERT(instr->right()->representation().Equals(r));
+    DCHECK(instr->left()->representation().Equals(r));
+    DCHECK(instr->right()->representation().Equals(r));
     LOperand* left = UseRegisterOrConstantAtStart(instr->left());
     LOperand* right = UseRegisterOrConstantAtStart(instr->right());
     return new(zone()) LCompareNumericAndBranch(left, right);
   } else {
-    ASSERT(r.IsDouble());
-    ASSERT(instr->left()->representation().IsDouble());
-    ASSERT(instr->right()->representation().IsDouble());
+    DCHECK(r.IsDouble());
+    DCHECK(instr->left()->representation().IsDouble());
+    DCHECK(instr->right()->representation().IsDouble());
     LOperand* left = UseRegisterAtStart(instr->left());
     LOperand* right = UseRegisterAtStart(instr->right());
     return new(zone()) LCompareNumericAndBranch(left, right);
@@ -1671,7 +1700,7 @@
 
 
 LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
-  ASSERT(instr->value()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
   LOperand* temp = TempRegister();
   return new(zone()) LIsObjectAndBranch(UseRegisterAtStart(instr->value()),
                                         temp);
@@ -1679,7 +1708,7 @@
 
 
 LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
-  ASSERT(instr->value()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
   LOperand* temp = TempRegister();
   return new(zone()) LIsStringAndBranch(UseRegisterAtStart(instr->value()),
                                         temp);
@@ -1687,14 +1716,14 @@
 
 
 LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
-  ASSERT(instr->value()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
   return new(zone()) LIsSmiAndBranch(Use(instr->value()));
 }
 
 
 LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
     HIsUndetectableAndBranch* instr) {
-  ASSERT(instr->value()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
   return new(zone()) LIsUndetectableAndBranch(
       UseRegisterAtStart(instr->value()), TempRegister());
 }
@@ -1702,8 +1731,8 @@
 
 LInstruction* LChunkBuilder::DoStringCompareAndBranch(
     HStringCompareAndBranch* instr) {
-  ASSERT(instr->left()->representation().IsTagged());
-  ASSERT(instr->right()->representation().IsTagged());
+  DCHECK(instr->left()->representation().IsTagged());
+  DCHECK(instr->right()->representation().IsTagged());
   LOperand* context = UseFixed(instr->context(), cp);
   LOperand* left = UseFixed(instr->left(), a1);
   LOperand* right = UseFixed(instr->right(), a0);
@@ -1715,7 +1744,7 @@
 
 LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
     HHasInstanceTypeAndBranch* instr) {
-  ASSERT(instr->value()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
   LOperand* value = UseRegisterAtStart(instr->value());
   return new(zone()) LHasInstanceTypeAndBranch(value);
 }
@@ -1723,7 +1752,7 @@
 
 LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
     HGetCachedArrayIndex* instr)  {
-  ASSERT(instr->value()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
   LOperand* value = UseRegisterAtStart(instr->value());
 
   return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value));
@@ -1732,7 +1761,7 @@
 
 LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
     HHasCachedArrayIndexAndBranch* instr) {
-  ASSERT(instr->value()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
   return new(zone()) LHasCachedArrayIndexAndBranch(
       UseRegisterAtStart(instr->value()));
 }
@@ -1740,7 +1769,7 @@
 
 LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
     HClassOfTestAndBranch* instr) {
-  ASSERT(instr->value()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
   return new(zone()) LClassOfTestAndBranch(UseRegister(instr->value()),
                                            TempRegister());
 }
@@ -1843,7 +1872,7 @@
       }
       return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value)));
     } else {
-      ASSERT(to.IsInteger32());
+      DCHECK(to.IsInteger32());
       if (val->type().IsSmi() || val->representation().IsSmi()) {
         LOperand* value = UseRegisterAtStart(val);
         return DefineAsRegister(new(zone()) LSmiUntag(value, false));
@@ -1871,7 +1900,7 @@
       return AssignEnvironment(
           DefineAsRegister(new(zone()) LDoubleToSmi(value)));
     } else {
-      ASSERT(to.IsInteger32());
+      DCHECK(to.IsInteger32());
       LOperand* value = UseRegister(val);
       LInstruction* result = DefineAsRegister(new(zone()) LDoubleToI(value));
       if (!instr->CanTruncateToInt32()) result = AssignEnvironment(result);
@@ -1904,7 +1933,7 @@
       }
       return result;
     } else {
-      ASSERT(to.IsDouble());
+      DCHECK(to.IsDouble());
       if (val->CheckFlag(HInstruction::kUint32)) {
         return DefineAsRegister(new(zone()) LUint32ToDouble(UseRegister(val)));
       } else {
@@ -1969,7 +1998,7 @@
   } else if (input_rep.IsInteger32()) {
     return DefineAsRegister(new(zone()) LClampIToUint8(reg));
   } else {
-    ASSERT(input_rep.IsSmiOrTagged());
+    DCHECK(input_rep.IsSmiOrTagged());
     LClampTToUint8* result =
         new(zone()) LClampTToUint8(reg, TempDoubleRegister());
     return AssignEnvironment(DefineAsRegister(result));
@@ -1979,7 +2008,7 @@
 
 LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) {
   HValue* value = instr->value();
-  ASSERT(value->representation().IsDouble());
+  DCHECK(value->representation().IsDouble());
   return DefineAsRegister(new(zone()) LDoubleBits(UseRegister(value)));
 }
 
@@ -2030,9 +2059,14 @@
 
 LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
   LOperand* context = UseFixed(instr->context(), cp);
-  LOperand* global_object = UseFixed(instr->global_object(), a0);
+  LOperand* global_object =
+      UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
+  LOperand* vector = NULL;
+  if (FLAG_vector_ics) {
+    vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
+  }
   LLoadGlobalGeneric* result =
-      new(zone()) LLoadGlobalGeneric(context, global_object);
+      new(zone()) LLoadGlobalGeneric(context, global_object, vector);
   return MarkAsCall(DefineFixed(result, v0), instr);
 }
 
@@ -2084,9 +2118,15 @@
 
 LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
   LOperand* context = UseFixed(instr->context(), cp);
-  LOperand* object = UseFixed(instr->object(), a0);
+  LOperand* object =
+      UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
+  LOperand* vector = NULL;
+  if (FLAG_vector_ics) {
+    vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
+  }
+
   LInstruction* result =
-      DefineFixed(new(zone()) LLoadNamedGeneric(context, object), v0);
+      DefineFixed(new(zone()) LLoadNamedGeneric(context, object, vector), v0);
   return MarkAsCall(result, instr);
 }
 
@@ -2104,7 +2144,7 @@
 
 
 LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
-  ASSERT(instr->key()->representation().IsSmiOrInteger32());
+  DCHECK(instr->key()->representation().IsSmiOrInteger32());
   ElementsKind elements_kind = instr->elements_kind();
   LOperand* key = UseRegisterOrConstantAtStart(instr->key());
   LInstruction* result = NULL;
@@ -2114,12 +2154,12 @@
     if (instr->representation().IsDouble()) {
       obj = UseRegister(instr->elements());
     } else {
-      ASSERT(instr->representation().IsSmiOrTagged());
+      DCHECK(instr->representation().IsSmiOrTagged());
       obj = UseRegisterAtStart(instr->elements());
     }
     result = DefineAsRegister(new(zone()) LLoadKeyed(obj, key));
   } else {
-    ASSERT(
+    DCHECK(
         (instr->representation().IsInteger32() &&
          !IsDoubleOrFloatElementsKind(elements_kind)) ||
         (instr->representation().IsDouble() &&
@@ -2144,18 +2184,24 @@
 
 LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
   LOperand* context = UseFixed(instr->context(), cp);
-  LOperand* object = UseFixed(instr->object(), a1);
-  LOperand* key = UseFixed(instr->key(), a0);
+  LOperand* object =
+      UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
+  LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
+  LOperand* vector = NULL;
+  if (FLAG_vector_ics) {
+    vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
+  }
 
   LInstruction* result =
-      DefineFixed(new(zone()) LLoadKeyedGeneric(context, object, key), v0);
+      DefineFixed(new(zone()) LLoadKeyedGeneric(context, object, key, vector),
+                  v0);
   return MarkAsCall(result, instr);
 }
 
 
 LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
   if (!instr->is_typed_elements()) {
-    ASSERT(instr->elements()->representation().IsTagged());
+    DCHECK(instr->elements()->representation().IsTagged());
     bool needs_write_barrier = instr->NeedsWriteBarrier();
     LOperand* object = NULL;
     LOperand* val = NULL;
@@ -2166,7 +2212,7 @@
       key = UseRegisterOrConstantAtStart(instr->key());
       val = UseRegister(instr->value());
     } else {
-      ASSERT(instr->value()->representation().IsSmiOrTagged());
+      DCHECK(instr->value()->representation().IsSmiOrTagged());
       if (needs_write_barrier) {
         object = UseTempRegister(instr->elements());
         val = UseTempRegister(instr->value());
@@ -2181,12 +2227,12 @@
     return new(zone()) LStoreKeyed(object, key, val);
   }
 
-  ASSERT(
+  DCHECK(
       (instr->value()->representation().IsInteger32() &&
        !IsDoubleOrFloatElementsKind(instr->elements_kind())) ||
       (instr->value()->representation().IsDouble() &&
        IsDoubleOrFloatElementsKind(instr->elements_kind())));
-  ASSERT((instr->is_fixed_typed_array() &&
+  DCHECK((instr->is_fixed_typed_array() &&
           instr->elements()->representation().IsTagged()) ||
          (instr->is_external() &&
           instr->elements()->representation().IsExternal()));
@@ -2199,13 +2245,14 @@
 
 LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
   LOperand* context = UseFixed(instr->context(), cp);
-  LOperand* obj = UseFixed(instr->object(), a2);
-  LOperand* key = UseFixed(instr->key(), a1);
-  LOperand* val = UseFixed(instr->value(), a0);
+  LOperand* obj =
+      UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
+  LOperand* key = UseFixed(instr->key(), StoreDescriptor::NameRegister());
+  LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
 
-  ASSERT(instr->object()->representation().IsTagged());
-  ASSERT(instr->key()->representation().IsTagged());
-  ASSERT(instr->value()->representation().IsTagged());
+  DCHECK(instr->object()->representation().IsTagged());
+  DCHECK(instr->key()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
 
   return MarkAsCall(
       new(zone()) LStoreKeyedGeneric(context, obj, key, val), instr);
@@ -2258,7 +2305,7 @@
   }
 
   LOperand* val;
-  if (needs_write_barrier || instr->field_representation().IsSmi()) {
+  if (needs_write_barrier) {
     val = UseTempRegister(instr->value());
   } else if (instr->field_representation().IsDouble()) {
     val = UseRegisterAtStart(instr->value());
@@ -2275,8 +2322,9 @@
 
 LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
   LOperand* context = UseFixed(instr->context(), cp);
-  LOperand* obj = UseFixed(instr->object(), a1);
-  LOperand* val = UseFixed(instr->value(), a0);
+  LOperand* obj =
+      UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
+  LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
 
   LInstruction* result = new(zone()) LStoreNamedGeneric(context, obj, val);
   return MarkAsCall(result, instr);
@@ -2315,9 +2363,7 @@
 LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
   info()->MarkAsDeferredCalling();
   LOperand* context = UseAny(instr->context());
-  LOperand* size = instr->size()->IsConstant()
-      ? UseConstant(instr->size())
-      : UseTempRegister(instr->size());
+  LOperand* size = UseRegisterOrConstant(instr->size());
   LOperand* temp1 = TempRegister();
   LOperand* temp2 = TempRegister();
   LAllocate* result = new(zone()) LAllocate(context, size, temp1, temp2);
@@ -2340,7 +2386,7 @@
 
 
 LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
-  ASSERT(argument_count_ == 0);
+  DCHECK(argument_count_ == 0);
   allocator_->MarkAsOsrEntry();
   current_block_->last_environment()->set_ast_id(instr->ast_id());
   return AssignEnvironment(new(zone()) LOsrEntry);
@@ -2353,11 +2399,11 @@
     int spill_index = chunk()->GetParameterStackSlot(instr->index());
     return DefineAsSpilled(result, spill_index);
   } else {
-    ASSERT(info()->IsStub());
-    CodeStubInterfaceDescriptor* descriptor =
-        info()->code_stub()->GetInterfaceDescriptor();
+    DCHECK(info()->IsStub());
+    CallInterfaceDescriptor descriptor =
+        info()->code_stub()->GetCallInterfaceDescriptor();
     int index = static_cast<int>(instr->index());
-    Register reg = descriptor->GetParameterRegister(index);
+    Register reg = descriptor.GetEnvironmentParameterRegister(index);
     return DefineFixed(result, reg);
   }
 }
@@ -2373,7 +2419,7 @@
   } else {
     spill_index = env_index - instr->environment()->first_local_index();
     if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
-      Abort(kTooManySpillSlotsNeededForOSR);
+      Retry(kTooManySpillSlotsNeededForOSR);
       spill_index = 0;
     }
   }
@@ -2449,7 +2495,7 @@
     LOperand* context = UseFixed(instr->context(), cp);
     return MarkAsCall(new(zone()) LStackCheck(context), instr);
   } else {
-    ASSERT(instr->is_backwards_branch());
+    DCHECK(instr->is_backwards_branch());
     LOperand* context = UseAny(instr->context());
     return AssignEnvironment(
         AssignPointerMap(new(zone()) LStackCheck(context)));
@@ -2470,6 +2516,7 @@
   if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
     inner->Bind(instr->arguments_var(), instr->arguments_object());
   }
+  inner->BindContext(instr->closure_context());
   inner->set_entry(instr);
   current_block_->UpdateEnvironment(inner);
   chunk_->AddInlinedClosure(instr->closure());
@@ -2485,7 +2532,7 @@
   if (env->entry()->arguments_pushed()) {
     int argument_count = env->arguments_environment()->parameter_count();
     pop = new(zone()) LDrop(argument_count);
-    ASSERT(instr->argument_delta() == -argument_count);
+    DCHECK(instr->argument_delta() == -argument_count);
   }
 
   HEnvironment* outer = current_block_->last_environment()->
@@ -2543,3 +2590,5 @@
 }
 
 } }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_MIPS
diff --git a/src/mips/lithium-mips.h b/src/mips/lithium-mips.h
index ea3a658..36e5b57 100644
--- a/src/mips/lithium-mips.h
+++ b/src/mips/lithium-mips.h
@@ -6,8 +6,8 @@
 #define V8_MIPS_LITHIUM_MIPS_H_
 
 #include "src/hydrogen.h"
-#include "src/lithium-allocator.h"
 #include "src/lithium.h"
+#include "src/lithium-allocator.h"
 #include "src/safepoint-table.h"
 #include "src/utils.h"
 
@@ -17,160 +17,162 @@
 // Forward declarations.
 class LCodeGen;
 
-#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V)    \
-  V(AccessArgumentsAt)                          \
-  V(AddI)                                       \
-  V(Allocate)                                   \
-  V(AllocateBlockContext)                       \
-  V(ApplyArguments)                             \
-  V(ArgumentsElements)                          \
-  V(ArgumentsLength)                            \
-  V(ArithmeticD)                                \
-  V(ArithmeticT)                                \
-  V(BitI)                                       \
-  V(BoundsCheck)                                \
-  V(Branch)                                     \
-  V(CallJSFunction)                             \
-  V(CallWithDescriptor)                         \
-  V(CallFunction)                               \
-  V(CallNew)                                    \
-  V(CallNewArray)                               \
-  V(CallRuntime)                                \
-  V(CallStub)                                   \
-  V(CheckInstanceType)                          \
-  V(CheckMaps)                                  \
-  V(CheckMapValue)                              \
-  V(CheckNonSmi)                                \
-  V(CheckSmi)                                   \
-  V(CheckValue)                                 \
-  V(ClampDToUint8)                              \
-  V(ClampIToUint8)                              \
-  V(ClampTToUint8)                              \
-  V(ClassOfTestAndBranch)                       \
-  V(CompareMinusZeroAndBranch)                  \
-  V(CompareNumericAndBranch)                    \
-  V(CmpObjectEqAndBranch)                       \
-  V(CmpHoleAndBranch)                           \
-  V(CmpMapAndBranch)                            \
-  V(CmpT)                                       \
-  V(ConstantD)                                  \
-  V(ConstantE)                                  \
-  V(ConstantI)                                  \
-  V(ConstantS)                                  \
-  V(ConstantT)                                  \
-  V(ConstructDouble)                            \
-  V(Context)                                    \
-  V(DateField)                                  \
-  V(DebugBreak)                                 \
-  V(DeclareGlobals)                             \
-  V(Deoptimize)                                 \
-  V(DivByConstI)                                \
-  V(DivByPowerOf2I)                             \
-  V(DivI)                                       \
-  V(DoubleToI)                                  \
-  V(DoubleBits)                                 \
-  V(DoubleToSmi)                                \
-  V(Drop)                                       \
-  V(Dummy)                                      \
-  V(DummyUse)                                   \
-  V(FlooringDivByConstI)                        \
-  V(FlooringDivByPowerOf2I)                     \
-  V(FlooringDivI)                               \
-  V(ForInCacheArray)                            \
-  V(ForInPrepareMap)                            \
-  V(FunctionLiteral)                            \
-  V(GetCachedArrayIndex)                        \
-  V(Goto)                                       \
-  V(HasCachedArrayIndexAndBranch)               \
-  V(HasInstanceTypeAndBranch)                   \
-  V(InnerAllocatedObject)                       \
-  V(InstanceOf)                                 \
-  V(InstanceOfKnownGlobal)                      \
-  V(InstructionGap)                             \
-  V(Integer32ToDouble)                          \
-  V(InvokeFunction)                             \
-  V(IsConstructCallAndBranch)                   \
-  V(IsObjectAndBranch)                          \
-  V(IsStringAndBranch)                          \
-  V(IsSmiAndBranch)                             \
-  V(IsUndetectableAndBranch)                    \
-  V(Label)                                      \
-  V(LazyBailout)                                \
-  V(LoadContextSlot)                            \
-  V(LoadRoot)                                   \
-  V(LoadFieldByIndex)                           \
-  V(LoadFunctionPrototype)                      \
-  V(LoadGlobalCell)                             \
-  V(LoadGlobalGeneric)                          \
-  V(LoadKeyed)                                  \
-  V(LoadKeyedGeneric)                           \
-  V(LoadNamedField)                             \
-  V(LoadNamedGeneric)                           \
-  V(MapEnumLength)                              \
-  V(MathAbs)                                    \
-  V(MathExp)                                    \
-  V(MathClz32)                                  \
-  V(MathFloor)                                  \
-  V(MathLog)                                    \
-  V(MathMinMax)                                 \
-  V(MathPowHalf)                                \
-  V(MathRound)                                  \
-  V(MathSqrt)                                   \
-  V(ModByConstI)                                \
-  V(ModByPowerOf2I)                             \
-  V(ModI)                                       \
-  V(MulI)                                       \
-  V(MultiplyAddD)                               \
-  V(NumberTagD)                                 \
-  V(NumberTagI)                                 \
-  V(NumberTagU)                                 \
-  V(NumberUntagD)                               \
-  V(OsrEntry)                                   \
-  V(Parameter)                                  \
-  V(Power)                                      \
-  V(PushArgument)                               \
-  V(RegExpLiteral)                              \
-  V(Return)                                     \
-  V(SeqStringGetChar)                           \
-  V(SeqStringSetChar)                           \
-  V(ShiftI)                                     \
-  V(SmiTag)                                     \
-  V(SmiUntag)                                   \
-  V(StackCheck)                                 \
-  V(StoreCodeEntry)                             \
-  V(StoreContextSlot)                           \
-  V(StoreFrameContext)                          \
-  V(StoreGlobalCell)                            \
-  V(StoreKeyed)                                 \
-  V(StoreKeyedGeneric)                          \
-  V(StoreNamedField)                            \
-  V(StoreNamedGeneric)                          \
-  V(StringAdd)                                  \
-  V(StringCharCodeAt)                           \
-  V(StringCharFromCode)                         \
-  V(StringCompareAndBranch)                     \
-  V(SubI)                                       \
-  V(TaggedToI)                                  \
-  V(ThisFunction)                               \
-  V(ToFastProperties)                           \
-  V(TransitionElementsKind)                     \
-  V(TrapAllocationMemento)                      \
-  V(Typeof)                                     \
-  V(TypeofIsAndBranch)                          \
-  V(Uint32ToDouble)                             \
-  V(UnknownOSRValue)                            \
+#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
+  V(AccessArgumentsAt)                       \
+  V(AddI)                                    \
+  V(Allocate)                                \
+  V(AllocateBlockContext)                    \
+  V(ApplyArguments)                          \
+  V(ArgumentsElements)                       \
+  V(ArgumentsLength)                         \
+  V(ArithmeticD)                             \
+  V(ArithmeticT)                             \
+  V(BitI)                                    \
+  V(BoundsCheck)                             \
+  V(Branch)                                  \
+  V(CallJSFunction)                          \
+  V(CallWithDescriptor)                      \
+  V(CallFunction)                            \
+  V(CallNew)                                 \
+  V(CallNewArray)                            \
+  V(CallRuntime)                             \
+  V(CallStub)                                \
+  V(CheckInstanceType)                       \
+  V(CheckMaps)                               \
+  V(CheckMapValue)                           \
+  V(CheckNonSmi)                             \
+  V(CheckSmi)                                \
+  V(CheckValue)                              \
+  V(ClampDToUint8)                           \
+  V(ClampIToUint8)                           \
+  V(ClampTToUint8)                           \
+  V(ClassOfTestAndBranch)                    \
+  V(CompareMinusZeroAndBranch)               \
+  V(CompareNumericAndBranch)                 \
+  V(CmpObjectEqAndBranch)                    \
+  V(CmpHoleAndBranch)                        \
+  V(CmpMapAndBranch)                         \
+  V(CmpT)                                    \
+  V(ConstantD)                               \
+  V(ConstantE)                               \
+  V(ConstantI)                               \
+  V(ConstantS)                               \
+  V(ConstantT)                               \
+  V(ConstructDouble)                         \
+  V(Context)                                 \
+  V(DateField)                               \
+  V(DebugBreak)                              \
+  V(DeclareGlobals)                          \
+  V(Deoptimize)                              \
+  V(DivByConstI)                             \
+  V(DivByPowerOf2I)                          \
+  V(DivI)                                    \
+  V(DoubleToI)                               \
+  V(DoubleBits)                              \
+  V(DoubleToSmi)                             \
+  V(Drop)                                    \
+  V(Dummy)                                   \
+  V(DummyUse)                                \
+  V(FlooringDivByConstI)                     \
+  V(FlooringDivByPowerOf2I)                  \
+  V(FlooringDivI)                            \
+  V(ForInCacheArray)                         \
+  V(ForInPrepareMap)                         \
+  V(FunctionLiteral)                         \
+  V(GetCachedArrayIndex)                     \
+  V(Goto)                                    \
+  V(HasCachedArrayIndexAndBranch)            \
+  V(HasInstanceTypeAndBranch)                \
+  V(InnerAllocatedObject)                    \
+  V(InstanceOf)                              \
+  V(InstanceOfKnownGlobal)                   \
+  V(InstructionGap)                          \
+  V(Integer32ToDouble)                       \
+  V(InvokeFunction)                          \
+  V(IsConstructCallAndBranch)                \
+  V(IsObjectAndBranch)                       \
+  V(IsStringAndBranch)                       \
+  V(IsSmiAndBranch)                          \
+  V(IsUndetectableAndBranch)                 \
+  V(Label)                                   \
+  V(LazyBailout)                             \
+  V(LoadContextSlot)                         \
+  V(LoadRoot)                                \
+  V(LoadFieldByIndex)                        \
+  V(LoadFunctionPrototype)                   \
+  V(LoadGlobalCell)                          \
+  V(LoadGlobalGeneric)                       \
+  V(LoadKeyed)                               \
+  V(LoadKeyedGeneric)                        \
+  V(LoadNamedField)                          \
+  V(LoadNamedGeneric)                        \
+  V(MapEnumLength)                           \
+  V(MathAbs)                                 \
+  V(MathExp)                                 \
+  V(MathClz32)                               \
+  V(MathFloor)                               \
+  V(MathFround)                              \
+  V(MathLog)                                 \
+  V(MathMinMax)                              \
+  V(MathPowHalf)                             \
+  V(MathRound)                               \
+  V(MathSqrt)                                \
+  V(ModByConstI)                             \
+  V(ModByPowerOf2I)                          \
+  V(ModI)                                    \
+  V(MulI)                                    \
+  V(MultiplyAddD)                            \
+  V(NumberTagD)                              \
+  V(NumberTagI)                              \
+  V(NumberTagU)                              \
+  V(NumberUntagD)                            \
+  V(OsrEntry)                                \
+  V(Parameter)                               \
+  V(Power)                                   \
+  V(PushArgument)                            \
+  V(RegExpLiteral)                           \
+  V(Return)                                  \
+  V(SeqStringGetChar)                        \
+  V(SeqStringSetChar)                        \
+  V(ShiftI)                                  \
+  V(SmiTag)                                  \
+  V(SmiUntag)                                \
+  V(StackCheck)                              \
+  V(StoreCodeEntry)                          \
+  V(StoreContextSlot)                        \
+  V(StoreFrameContext)                       \
+  V(StoreGlobalCell)                         \
+  V(StoreKeyed)                              \
+  V(StoreKeyedGeneric)                       \
+  V(StoreNamedField)                         \
+  V(StoreNamedGeneric)                       \
+  V(StringAdd)                               \
+  V(StringCharCodeAt)                        \
+  V(StringCharFromCode)                      \
+  V(StringCompareAndBranch)                  \
+  V(SubI)                                    \
+  V(TaggedToI)                               \
+  V(TailCallThroughMegamorphicCache)         \
+  V(ThisFunction)                            \
+  V(ToFastProperties)                        \
+  V(TransitionElementsKind)                  \
+  V(TrapAllocationMemento)                   \
+  V(Typeof)                                  \
+  V(TypeofIsAndBranch)                       \
+  V(Uint32ToDouble)                          \
+  V(UnknownOSRValue)                         \
   V(WrapReceiver)
 
 #define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)                        \
-  virtual Opcode opcode() const V8_FINAL V8_OVERRIDE {                      \
+  virtual Opcode opcode() const FINAL OVERRIDE {                      \
     return LInstruction::k##type;                                           \
   }                                                                         \
-  virtual void CompileToNative(LCodeGen* generator) V8_FINAL V8_OVERRIDE;   \
-  virtual const char* Mnemonic() const V8_FINAL V8_OVERRIDE {               \
+  virtual void CompileToNative(LCodeGen* generator) FINAL OVERRIDE;   \
+  virtual const char* Mnemonic() const FINAL OVERRIDE {               \
     return mnemonic;                                                        \
   }                                                                         \
   static L##type* cast(LInstruction* instr) {                               \
-    ASSERT(instr->Is##type());                                              \
+    DCHECK(instr->Is##type());                                              \
     return reinterpret_cast<L##type*>(instr);                               \
   }
 
@@ -219,6 +221,9 @@
 
   virtual bool IsControl() const { return false; }
 
+  // Try deleting this instruction if possible.
+  virtual bool TryDelete() { return false; }
+
   void set_environment(LEnvironment* env) { environment_ = env; }
   LEnvironment* environment() const { return environment_; }
   bool HasEnvironment() const { return environment_ != NULL; }
@@ -257,11 +262,12 @@
   void VerifyCall();
 #endif
 
+  virtual int InputCount() = 0;
+  virtual LOperand* InputAt(int i) = 0;
+
  private:
   // Iterator interface.
   friend class InputIterator;
-  virtual int InputCount() = 0;
-  virtual LOperand* InputAt(int i) = 0;
 
   friend class TempIterator;
   virtual int TempCount() = 0;
@@ -282,7 +288,7 @@
  public:
   // Allow 0 or 1 output operands.
   STATIC_ASSERT(R == 0 || R == 1);
-  virtual bool HasResult() const V8_FINAL V8_OVERRIDE {
+  virtual bool HasResult() const FINAL OVERRIDE {
     return R != 0 && result() != NULL;
   }
   void set_result(LOperand* operand) { results_[0] = operand; }
@@ -304,11 +310,11 @@
 
  private:
   // Iterator support.
-  virtual int InputCount() V8_FINAL V8_OVERRIDE { return I; }
-  virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
+  virtual int InputCount() FINAL OVERRIDE { return I; }
+  virtual LOperand* InputAt(int i) FINAL OVERRIDE { return inputs_[i]; }
 
-  virtual int TempCount() V8_FINAL V8_OVERRIDE { return T; }
-  virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return temps_[i]; }
+  virtual int TempCount() FINAL OVERRIDE { return T; }
+  virtual LOperand* TempAt(int i) FINAL OVERRIDE { return temps_[i]; }
 };
 
 
@@ -323,10 +329,10 @@
   }
 
   // Can't use the DECLARE-macro here because of sub-classes.
-  virtual bool IsGap() const V8_FINAL V8_OVERRIDE { return true; }
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual bool IsGap() const FINAL OVERRIDE { return true; }
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
   static LGap* cast(LInstruction* instr) {
-    ASSERT(instr->IsGap());
+    DCHECK(instr->IsGap());
     return reinterpret_cast<LGap*>(instr);
   }
 
@@ -360,11 +366,11 @@
 };
 
 
-class LInstructionGap V8_FINAL : public LGap {
+class LInstructionGap FINAL : public LGap {
  public:
   explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
 
-  virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
     return !IsRedundant();
   }
 
@@ -372,14 +378,14 @@
 };
 
 
-class LGoto V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LGoto FINAL : public LTemplateInstruction<0, 0, 0> {
  public:
   explicit LGoto(HBasicBlock* block) : block_(block) { }
 
-  virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE;
+  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE;
   DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
-  virtual bool IsControl() const V8_OVERRIDE { return true; }
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  virtual bool IsControl() const OVERRIDE { return true; }
 
   int block_id() const { return block_->block_id(); }
 
@@ -388,7 +394,7 @@
 };
 
 
-class LLazyBailout V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LLazyBailout FINAL : public LTemplateInstruction<0, 0, 0> {
  public:
   LLazyBailout() : gap_instructions_size_(0) { }
 
@@ -404,14 +410,14 @@
 };
 
 
-class LDummy V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LDummy FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
-  explicit LDummy() { }
+  LDummy() {}
   DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy")
 };
 
 
-class LDummyUse V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDummyUse FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LDummyUse(LOperand* value) {
     inputs_[0] = value;
@@ -420,25 +426,25 @@
 };
 
 
-class LDeoptimize V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LDeoptimize FINAL : public LTemplateInstruction<0, 0, 0> {
  public:
-  virtual bool IsControl() const V8_OVERRIDE { return true; }
+  virtual bool IsControl() const OVERRIDE { return true; }
   DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
   DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
 };
 
 
-class LLabel V8_FINAL : public LGap {
+class LLabel FINAL : public LGap {
  public:
   explicit LLabel(HBasicBlock* block)
       : LGap(block), replacement_(NULL) { }
 
-  virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
     return false;
   }
   DECLARE_CONCRETE_INSTRUCTION(Label, "label")
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int block_id() const { return block()->block_id(); }
   bool is_loop_header() const { return block()->IsLoopHeader(); }
@@ -454,16 +460,16 @@
 };
 
 
-class LParameter V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LParameter FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
-  virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
     return false;
   }
   DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
 };
 
 
-class LCallStub V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallStub FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LCallStub(LOperand* context) {
     inputs_[0] = context;
@@ -476,9 +482,30 @@
 };
 
 
-class LUnknownOSRValue V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LTailCallThroughMegamorphicCache FINAL
+    : public LTemplateInstruction<0, 3, 0> {
  public:
-  virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+  explicit LTailCallThroughMegamorphicCache(LOperand* context,
+                                            LOperand* receiver,
+                                            LOperand* name) {
+    inputs_[0] = context;
+    inputs_[1] = receiver;
+    inputs_[2] = name;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* receiver() { return inputs_[1]; }
+  LOperand* name() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache,
+                               "tail-call-through-megamorphic-cache")
+  DECLARE_HYDROGEN_ACCESSOR(TailCallThroughMegamorphicCache)
+};
+
+
+class LUnknownOSRValue FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
     return false;
   }
   DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
@@ -490,7 +517,7 @@
  public:
   LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
 
-  virtual bool IsControl() const V8_FINAL V8_OVERRIDE { return true; }
+  virtual bool IsControl() const FINAL OVERRIDE { return true; }
 
   int SuccessorCount() { return hydrogen()->SuccessorCount(); }
   HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
@@ -529,7 +556,7 @@
 };
 
 
-class LWrapReceiver V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LWrapReceiver FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LWrapReceiver(LOperand* receiver, LOperand* function) {
     inputs_[0] = receiver;
@@ -544,7 +571,7 @@
 };
 
 
-class LApplyArguments V8_FINAL : public LTemplateInstruction<1, 4, 0> {
+class LApplyArguments FINAL : public LTemplateInstruction<1, 4, 0> {
  public:
   LApplyArguments(LOperand* function,
                   LOperand* receiver,
@@ -565,7 +592,7 @@
 };
 
 
-class LAccessArgumentsAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LAccessArgumentsAt FINAL : public LTemplateInstruction<1, 3, 0> {
  public:
   LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
     inputs_[0] = arguments;
@@ -579,11 +606,11 @@
   LOperand* length() { return inputs_[1]; }
   LOperand* index() { return inputs_[2]; }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LArgumentsLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LArgumentsLength FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LArgumentsLength(LOperand* elements) {
     inputs_[0] = elements;
@@ -595,14 +622,14 @@
 };
 
 
-class LArgumentsElements V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LArgumentsElements FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
   DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
 };
 
 
-class LModByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LModByPowerOf2I FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   LModByPowerOf2I(LOperand* dividend, int32_t divisor) {
     inputs_[0] = dividend;
@@ -620,7 +647,7 @@
 };
 
 
-class LModByConstI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LModByConstI FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   LModByConstI(LOperand* dividend, int32_t divisor) {
     inputs_[0] = dividend;
@@ -638,7 +665,7 @@
 };
 
 
-class LModI V8_FINAL : public LTemplateInstruction<1, 2, 3> {
+class LModI FINAL : public LTemplateInstruction<1, 2, 3> {
  public:
   LModI(LOperand* left,
         LOperand* right) {
@@ -654,7 +681,7 @@
 };
 
 
-class LDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDivByPowerOf2I FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   LDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
     inputs_[0] = dividend;
@@ -672,7 +699,7 @@
 };
 
 
-class LDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDivByConstI FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   LDivByConstI(LOperand* dividend, int32_t divisor) {
     inputs_[0] = dividend;
@@ -690,22 +717,24 @@
 };
 
 
-class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LDivI FINAL : public LTemplateInstruction<1, 2, 1> {
  public:
-  LDivI(LOperand* dividend, LOperand* divisor) {
+  LDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
     inputs_[0] = dividend;
     inputs_[1] = divisor;
+    temps_[0] = temp;
   }
 
   LOperand* dividend() { return inputs_[0]; }
   LOperand* divisor() { return inputs_[1]; }
+  LOperand* temp() { return temps_[0]; }
 
   DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
   DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
 };
 
 
-class LFlooringDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LFlooringDivByPowerOf2I FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
     inputs_[0] = dividend;
@@ -724,7 +753,7 @@
 };
 
 
-class LFlooringDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+class LFlooringDivByConstI FINAL : public LTemplateInstruction<1, 1, 2> {
  public:
   LFlooringDivByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) {
     inputs_[0] = dividend;
@@ -744,7 +773,7 @@
 };
 
 
-class LFlooringDivI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LFlooringDivI FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LFlooringDivI(LOperand* dividend, LOperand* divisor) {
     inputs_[0] = dividend;
@@ -759,7 +788,7 @@
 };
 
 
-class LMulI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LMulI FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LMulI(LOperand* left, LOperand* right) {
     inputs_[0] = left;
@@ -775,7 +804,7 @@
 
 
 // Instruction for computing multiplier * multiplicand + addend.
-class LMultiplyAddD V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LMultiplyAddD FINAL : public LTemplateInstruction<1, 3, 0> {
  public:
   LMultiplyAddD(LOperand* addend, LOperand* multiplier,
                 LOperand* multiplicand) {
@@ -792,13 +821,13 @@
 };
 
 
-class LDebugBreak V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LDebugBreak FINAL : public LTemplateInstruction<0, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
 };
 
 
-class LCompareNumericAndBranch V8_FINAL : public LControlInstruction<2, 0> {
+class LCompareNumericAndBranch FINAL : public LControlInstruction<2, 0> {
  public:
   LCompareNumericAndBranch(LOperand* left, LOperand* right) {
     inputs_[0] = left;
@@ -817,11 +846,11 @@
     return hydrogen()->representation().IsDouble();
   }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LMathFloor V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LMathFloor FINAL : public LTemplateInstruction<1, 1, 1> {
  public:
   LMathFloor(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
@@ -836,7 +865,7 @@
 };
 
 
-class LMathRound V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LMathRound FINAL : public LTemplateInstruction<1, 1, 1> {
  public:
   LMathRound(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
@@ -851,7 +880,17 @@
 };
 
 
-class LMathAbs V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LMathFround FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathFround(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathFround, "math-fround")
+};
+
+
+class LMathAbs FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LMathAbs(LOperand* context, LOperand* value) {
     inputs_[1] = context;
@@ -866,7 +905,7 @@
 };
 
 
-class LMathLog V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathLog FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LMathLog(LOperand* value) {
     inputs_[0] = value;
@@ -878,7 +917,7 @@
 };
 
 
-class LMathClz32 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathClz32 FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LMathClz32(LOperand* value) {
     inputs_[0] = value;
@@ -890,7 +929,7 @@
 };
 
 
-class LMathExp V8_FINAL : public LTemplateInstruction<1, 1, 3> {
+class LMathExp FINAL : public LTemplateInstruction<1, 1, 3> {
  public:
   LMathExp(LOperand* value,
            LOperand* double_temp,
@@ -912,7 +951,7 @@
 };
 
 
-class LMathSqrt V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathSqrt FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LMathSqrt(LOperand* value) {
     inputs_[0] = value;
@@ -924,7 +963,7 @@
 };
 
 
-class LMathPowHalf V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LMathPowHalf FINAL : public LTemplateInstruction<1, 1, 1> {
  public:
   LMathPowHalf(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
@@ -938,7 +977,7 @@
 };
 
 
-class LCmpObjectEqAndBranch V8_FINAL : public LControlInstruction<2, 0> {
+class LCmpObjectEqAndBranch FINAL : public LControlInstruction<2, 0> {
  public:
   LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
     inputs_[0] = left;
@@ -953,7 +992,7 @@
 };
 
 
-class LCmpHoleAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LCmpHoleAndBranch FINAL : public LControlInstruction<1, 0> {
  public:
   explicit LCmpHoleAndBranch(LOperand* object) {
     inputs_[0] = object;
@@ -966,7 +1005,7 @@
 };
 
 
-class LCompareMinusZeroAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LCompareMinusZeroAndBranch FINAL : public LControlInstruction<1, 1> {
  public:
   LCompareMinusZeroAndBranch(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
@@ -982,7 +1021,7 @@
 };
 
 
-class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LIsObjectAndBranch FINAL : public LControlInstruction<1, 1> {
  public:
   LIsObjectAndBranch(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
@@ -999,7 +1038,7 @@
 };
 
 
-class LIsStringAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LIsStringAndBranch FINAL : public LControlInstruction<1, 1> {
  public:
   LIsStringAndBranch(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
@@ -1012,11 +1051,11 @@
   DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LIsSmiAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LIsSmiAndBranch FINAL : public LControlInstruction<1, 0> {
  public:
   explicit LIsSmiAndBranch(LOperand* value) {
     inputs_[0] = value;
@@ -1027,11 +1066,11 @@
   DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LIsUndetectableAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LIsUndetectableAndBranch FINAL : public LControlInstruction<1, 1> {
  public:
   explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
@@ -1045,11 +1084,11 @@
                                "is-undetectable-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LStringCompareAndBranch V8_FINAL : public LControlInstruction<3, 0> {
+class LStringCompareAndBranch FINAL : public LControlInstruction<3, 0> {
  public:
   LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) {
     inputs_[0] = context;
@@ -1067,11 +1106,11 @@
 
   Token::Value op() const { return hydrogen()->token(); }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LHasInstanceTypeAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LHasInstanceTypeAndBranch FINAL : public LControlInstruction<1, 0> {
  public:
   explicit LHasInstanceTypeAndBranch(LOperand* value) {
     inputs_[0] = value;
@@ -1083,11 +1122,11 @@
                                "has-instance-type-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LGetCachedArrayIndex V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LGetCachedArrayIndex FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LGetCachedArrayIndex(LOperand* value) {
     inputs_[0] = value;
@@ -1100,7 +1139,7 @@
 };
 
 
-class LHasCachedArrayIndexAndBranch V8_FINAL
+class LHasCachedArrayIndexAndBranch FINAL
     : public LControlInstruction<1, 0> {
  public:
   explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
@@ -1113,11 +1152,11 @@
                                "has-cached-array-index-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LClassOfTestAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LClassOfTestAndBranch FINAL : public LControlInstruction<1, 1> {
  public:
   LClassOfTestAndBranch(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
@@ -1131,11 +1170,11 @@
                                "class-of-test-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LCmpT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LCmpT FINAL : public LTemplateInstruction<1, 3, 0> {
  public:
   LCmpT(LOperand* context, LOperand* left, LOperand* right) {
     inputs_[0] = context;
@@ -1154,7 +1193,7 @@
 };
 
 
-class LInstanceOf V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LInstanceOf FINAL : public LTemplateInstruction<1, 3, 0> {
  public:
   LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
     inputs_[0] = context;
@@ -1170,7 +1209,7 @@
 };
 
 
-class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LInstanceOfKnownGlobal FINAL : public LTemplateInstruction<1, 2, 1> {
  public:
   LInstanceOfKnownGlobal(LOperand* context, LOperand* value, LOperand* temp) {
     inputs_[0] = context;
@@ -1191,7 +1230,7 @@
     return lazy_deopt_env_;
   }
   virtual void SetDeferredLazyDeoptimizationEnvironment(
-      LEnvironment* env) V8_OVERRIDE {
+      LEnvironment* env) OVERRIDE {
     lazy_deopt_env_ = env;
   }
 
@@ -1200,7 +1239,7 @@
 };
 
 
-class LBoundsCheck V8_FINAL : public LTemplateInstruction<0, 2, 0> {
+class LBoundsCheck FINAL : public LTemplateInstruction<0, 2, 0> {
  public:
   LBoundsCheck(LOperand* index, LOperand* length) {
     inputs_[0] = index;
@@ -1215,7 +1254,7 @@
 };
 
 
-class LBitI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LBitI FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LBitI(LOperand* left, LOperand* right) {
     inputs_[0] = left;
@@ -1232,7 +1271,7 @@
 };
 
 
-class LShiftI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LShiftI FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
       : op_(op), can_deopt_(can_deopt) {
@@ -1253,7 +1292,7 @@
 };
 
 
-class LSubI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LSubI FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LSubI(LOperand* left, LOperand* right) {
     inputs_[0] = left;
@@ -1268,7 +1307,7 @@
 };
 
 
-class LConstantI V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LConstantI FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
   DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1277,7 +1316,7 @@
 };
 
 
-class LConstantS V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LConstantS FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
   DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1286,7 +1325,7 @@
 };
 
 
-class LConstantD V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LConstantD FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
   DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1295,7 +1334,7 @@
 };
 
 
-class LConstantE V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LConstantE FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e")
   DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1306,7 +1345,7 @@
 };
 
 
-class LConstantT V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LConstantT FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
   DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1317,7 +1356,7 @@
 };
 
 
-class LBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LBranch FINAL : public LControlInstruction<1, 0> {
  public:
   explicit LBranch(LOperand* value) {
     inputs_[0] = value;
@@ -1328,11 +1367,11 @@
   DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
   DECLARE_HYDROGEN_ACCESSOR(Branch)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LCmpMapAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LCmpMapAndBranch FINAL : public LControlInstruction<1, 1> {
  public:
   LCmpMapAndBranch(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
@@ -1349,7 +1388,7 @@
 };
 
 
-class LMapEnumLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMapEnumLength FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LMapEnumLength(LOperand* value) {
     inputs_[0] = value;
@@ -1361,7 +1400,7 @@
 };
 
 
-class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LDateField FINAL : public LTemplateInstruction<1, 1, 1> {
  public:
   LDateField(LOperand* date, LOperand* temp, Smi* index) : index_(index) {
     inputs_[0] = date;
@@ -1380,7 +1419,7 @@
 };
 
 
-class LSeqStringGetChar V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LSeqStringGetChar FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LSeqStringGetChar(LOperand* string, LOperand* index) {
     inputs_[0] = string;
@@ -1395,7 +1434,7 @@
 };
 
 
-class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 4, 0> {
+class LSeqStringSetChar FINAL : public LTemplateInstruction<1, 4, 0> {
  public:
   LSeqStringSetChar(LOperand* context,
                     LOperand* string,
@@ -1416,7 +1455,7 @@
 };
 
 
-class LAddI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LAddI FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LAddI(LOperand* left, LOperand* right) {
     inputs_[0] = left;
@@ -1431,7 +1470,7 @@
 };
 
 
-class LMathMinMax V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LMathMinMax FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LMathMinMax(LOperand* left, LOperand* right) {
     inputs_[0] = left;
@@ -1446,7 +1485,7 @@
 };
 
 
-class LPower V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LPower FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LPower(LOperand* left, LOperand* right) {
     inputs_[0] = left;
@@ -1461,7 +1500,7 @@
 };
 
 
-class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LArithmeticD FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
       : op_(op) {
@@ -1473,18 +1512,18 @@
   LOperand* left() { return inputs_[0]; }
   LOperand* right() { return inputs_[1]; }
 
-  virtual Opcode opcode() const V8_OVERRIDE {
+  virtual Opcode opcode() const OVERRIDE {
     return LInstruction::kArithmeticD;
   }
-  virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
-  virtual const char* Mnemonic() const V8_OVERRIDE;
+  virtual void CompileToNative(LCodeGen* generator) OVERRIDE;
+  virtual const char* Mnemonic() const OVERRIDE;
 
  private:
   Token::Value op_;
 };
 
 
-class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LArithmeticT FINAL : public LTemplateInstruction<1, 3, 0> {
  public:
   LArithmeticT(Token::Value op,
                LOperand* context,
@@ -1501,16 +1540,16 @@
   LOperand* right() { return inputs_[2]; }
   Token::Value op() const { return op_; }
 
-  virtual Opcode opcode() const V8_FINAL  { return LInstruction::kArithmeticT; }
-  virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
-  virtual const char* Mnemonic() const V8_OVERRIDE;
+  virtual Opcode opcode() const FINAL  { return LInstruction::kArithmeticT; }
+  virtual void CompileToNative(LCodeGen* generator) OVERRIDE;
+  virtual const char* Mnemonic() const OVERRIDE;
 
  private:
   Token::Value op_;
 };
 
 
-class LReturn V8_FINAL : public LTemplateInstruction<0, 3, 0> {
+class LReturn FINAL : public LTemplateInstruction<0, 3, 0> {
  public:
   LReturn(LOperand* value, LOperand* context, LOperand* parameter_count) {
     inputs_[0] = value;
@@ -1524,7 +1563,7 @@
     return parameter_count()->IsConstantOperand();
   }
   LConstantOperand* constant_parameter_count() {
-    ASSERT(has_constant_parameter_count());
+    DCHECK(has_constant_parameter_count());
     return LConstantOperand::cast(parameter_count());
   }
   LOperand* parameter_count() { return inputs_[2]; }
@@ -1533,7 +1572,7 @@
 };
 
 
-class LLoadNamedField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LLoadNamedField FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LLoadNamedField(LOperand* object) {
     inputs_[0] = object;
@@ -1546,15 +1585,17 @@
 };
 
 
-class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LLoadNamedGeneric FINAL : public LTemplateInstruction<1, 2, 1> {
  public:
-  LLoadNamedGeneric(LOperand* context, LOperand* object) {
+  LLoadNamedGeneric(LOperand* context, LOperand* object, LOperand* vector) {
     inputs_[0] = context;
     inputs_[1] = object;
+    temps_[0] = vector;
   }
 
   LOperand* context() { return inputs_[0]; }
   LOperand* object() { return inputs_[1]; }
+  LOperand* temp_vector() { return temps_[0]; }
 
   DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
   DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
@@ -1563,7 +1604,7 @@
 };
 
 
-class LLoadFunctionPrototype V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LLoadFunctionPrototype FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LLoadFunctionPrototype(LOperand* function) {
     inputs_[0] = function;
@@ -1576,7 +1617,7 @@
 };
 
 
-class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LLoadRoot FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
   DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
@@ -1585,7 +1626,7 @@
 };
 
 
-class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyed FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LLoadKeyed(LOperand* elements, LOperand* key) {
     inputs_[0] = elements;
@@ -1615,38 +1656,45 @@
 };
 
 
-class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LLoadKeyedGeneric FINAL : public LTemplateInstruction<1, 3, 1> {
  public:
-  LLoadKeyedGeneric(LOperand* context, LOperand* object, LOperand* key) {
+  LLoadKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
+                    LOperand* vector) {
     inputs_[0] = context;
     inputs_[1] = object;
     inputs_[2] = key;
+    temps_[0] = vector;
   }
 
   LOperand* context() { return inputs_[0]; }
   LOperand* object() { return inputs_[1]; }
   LOperand* key() { return inputs_[2]; }
+  LOperand* temp_vector() { return temps_[0]; }
 
   DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
+  DECLARE_HYDROGEN_ACCESSOR(LoadKeyedGeneric)
 };
 
 
-class LLoadGlobalCell V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LLoadGlobalCell FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
   DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
 };
 
 
-class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LLoadGlobalGeneric FINAL : public LTemplateInstruction<1, 2, 1> {
  public:
-  LLoadGlobalGeneric(LOperand* context, LOperand* global_object) {
+  LLoadGlobalGeneric(LOperand* context, LOperand* global_object,
+                     LOperand* vector) {
     inputs_[0] = context;
     inputs_[1] = global_object;
+    temps_[0] = vector;
   }
 
   LOperand* context() { return inputs_[0]; }
   LOperand* global_object() { return inputs_[1]; }
+  LOperand* temp_vector() { return temps_[0]; }
 
   DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
   DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
@@ -1656,7 +1704,7 @@
 };
 
 
-class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 1> {
+class LStoreGlobalCell FINAL : public LTemplateInstruction<0, 1, 1> {
  public:
   LStoreGlobalCell(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
@@ -1671,7 +1719,7 @@
 };
 
 
-class LLoadContextSlot V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LLoadContextSlot FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LLoadContextSlot(LOperand* context) {
     inputs_[0] = context;
@@ -1688,7 +1736,7 @@
 };
 
 
-class LStoreContextSlot V8_FINAL : public LTemplateInstruction<0, 2, 0> {
+class LStoreContextSlot FINAL : public LTemplateInstruction<0, 2, 0> {
  public:
   LStoreContextSlot(LOperand* context, LOperand* value) {
     inputs_[0] = context;
@@ -1703,11 +1751,11 @@
 
   int slot_index() { return hydrogen()->slot_index(); }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LPushArgument V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LPushArgument FINAL : public LTemplateInstruction<0, 1, 0> {
  public:
   explicit LPushArgument(LOperand* value) {
     inputs_[0] = value;
@@ -1719,7 +1767,7 @@
 };
 
 
-class LDrop V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LDrop FINAL : public LTemplateInstruction<0, 0, 0> {
  public:
   explicit LDrop(int count) : count_(count) { }
 
@@ -1732,7 +1780,7 @@
 };
 
 
-class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 2, 0> {
+class LStoreCodeEntry FINAL: public LTemplateInstruction<0, 2, 0> {
  public:
   LStoreCodeEntry(LOperand* function, LOperand* code_object) {
     inputs_[0] = function;
@@ -1749,7 +1797,7 @@
 };
 
 
-class LInnerAllocatedObject V8_FINAL: public LTemplateInstruction<1, 2, 0> {
+class LInnerAllocatedObject FINAL: public LTemplateInstruction<1, 2, 0> {
  public:
   LInnerAllocatedObject(LOperand* base_object, LOperand* offset) {
     inputs_[0] = base_object;
@@ -1759,27 +1807,27 @@
   LOperand* base_object() const { return inputs_[0]; }
   LOperand* offset() const { return inputs_[1]; }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 
   DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object")
 };
 
 
-class LThisFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LThisFunction FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
   DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
 };
 
 
-class LContext V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LContext FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(Context, "context")
   DECLARE_HYDROGEN_ACCESSOR(Context)
 };
 
 
-class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LDeclareGlobals FINAL : public LTemplateInstruction<0, 1, 0> {
  public:
   explicit LDeclareGlobals(LOperand* context) {
     inputs_[0] = context;
@@ -1792,7 +1840,7 @@
 };
 
 
-class LCallJSFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallJSFunction FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LCallJSFunction(LOperand* function) {
     inputs_[0] = function;
@@ -1803,48 +1851,47 @@
   DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
   DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 };
 
 
-class LCallWithDescriptor V8_FINAL : public LTemplateResultInstruction<1> {
+class LCallWithDescriptor FINAL : public LTemplateResultInstruction<1> {
  public:
-  LCallWithDescriptor(const CallInterfaceDescriptor* descriptor,
-                      const ZoneList<LOperand*>& operands,
-                      Zone* zone)
-    : descriptor_(descriptor),
-      inputs_(descriptor->environment_length() + 1, zone) {
-    ASSERT(descriptor->environment_length() + 1 == operands.length());
+  LCallWithDescriptor(CallInterfaceDescriptor descriptor,
+                      const ZoneList<LOperand*>& operands, Zone* zone)
+      : descriptor_(descriptor),
+        inputs_(descriptor.GetRegisterParameterCount() + 1, zone) {
+    DCHECK(descriptor.GetRegisterParameterCount() + 1 == operands.length());
     inputs_.AddAll(operands, zone);
   }
 
   LOperand* target() const { return inputs_[0]; }
 
-  const CallInterfaceDescriptor* descriptor() { return descriptor_; }
+  const CallInterfaceDescriptor descriptor() { return descriptor_; }
 
  private:
   DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
   DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 
-  const CallInterfaceDescriptor* descriptor_;
+  CallInterfaceDescriptor descriptor_;
   ZoneList<LOperand*> inputs_;
 
   // Iterator support.
-  virtual int InputCount() V8_FINAL V8_OVERRIDE { return inputs_.length(); }
-  virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
+  virtual int InputCount() FINAL OVERRIDE { return inputs_.length(); }
+  virtual LOperand* InputAt(int i) FINAL OVERRIDE { return inputs_[i]; }
 
-  virtual int TempCount() V8_FINAL V8_OVERRIDE { return 0; }
-  virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return NULL; }
+  virtual int TempCount() FINAL OVERRIDE { return 0; }
+  virtual LOperand* TempAt(int i) FINAL OVERRIDE { return NULL; }
 };
 
 
-class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LInvokeFunction FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LInvokeFunction(LOperand* context, LOperand* function) {
     inputs_[0] = context;
@@ -1857,13 +1904,13 @@
   DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
   DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 };
 
 
-class LCallFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCallFunction FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LCallFunction(LOperand* context, LOperand* function) {
     inputs_[0] = context;
@@ -1880,7 +1927,7 @@
 };
 
 
-class LCallNew V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCallNew FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LCallNew(LOperand* context, LOperand* constructor) {
     inputs_[0] = context;
@@ -1893,13 +1940,13 @@
   DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
   DECLARE_HYDROGEN_ACCESSOR(CallNew)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 };
 
 
-class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCallNewArray FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LCallNewArray(LOperand* context, LOperand* constructor) {
     inputs_[0] = context;
@@ -1912,13 +1959,13 @@
   DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
   DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 };
 
 
-class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallRuntime FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LCallRuntime(LOperand* context) {
     inputs_[0] = context;
@@ -1929,7 +1976,7 @@
   DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
   DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
 
-  virtual bool ClobbersDoubleRegisters(Isolate* isolate) const V8_OVERRIDE {
+  virtual bool ClobbersDoubleRegisters(Isolate* isolate) const OVERRIDE {
     return save_doubles() == kDontSaveFPRegs;
   }
 
@@ -1939,7 +1986,7 @@
 };
 
 
-class LInteger32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LInteger32ToDouble FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LInteger32ToDouble(LOperand* value) {
     inputs_[0] = value;
@@ -1951,7 +1998,7 @@
 };
 
 
-class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LUint32ToDouble FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LUint32ToDouble(LOperand* value) {
     inputs_[0] = value;
@@ -1963,7 +2010,7 @@
 };
 
 
-class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+class LNumberTagI FINAL : public LTemplateInstruction<1, 1, 2> {
  public:
   LNumberTagI(LOperand* value, LOperand* temp1, LOperand* temp2) {
     inputs_[0] = value;
@@ -1979,7 +2026,7 @@
 };
 
 
-class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+class LNumberTagU FINAL : public LTemplateInstruction<1, 1, 2> {
  public:
   LNumberTagU(LOperand* value, LOperand* temp1, LOperand* temp2) {
     inputs_[0] = value;
@@ -1995,7 +2042,7 @@
 };
 
 
-class LNumberTagD V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+class LNumberTagD FINAL : public LTemplateInstruction<1, 1, 2> {
  public:
   LNumberTagD(LOperand* value, LOperand* temp, LOperand* temp2) {
     inputs_[0] = value;
@@ -2012,7 +2059,7 @@
 };
 
 
-class LDoubleToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDoubleToSmi FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LDoubleToSmi(LOperand* value) {
     inputs_[0] = value;
@@ -2028,7 +2075,7 @@
 
 
 // Sometimes truncating conversion from a tagged value to an int32.
-class LDoubleToI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDoubleToI FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LDoubleToI(LOperand* value) {
     inputs_[0] = value;
@@ -2044,7 +2091,7 @@
 
 
 // Truncating conversion from a tagged value to an int32.
-class LTaggedToI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+class LTaggedToI FINAL : public LTemplateInstruction<1, 1, 2> {
  public:
   LTaggedToI(LOperand* value,
              LOperand* temp,
@@ -2065,7 +2112,7 @@
 };
 
 
-class LSmiTag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LSmiTag FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LSmiTag(LOperand* value) {
     inputs_[0] = value;
@@ -2078,7 +2125,7 @@
 };
 
 
-class LNumberUntagD V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LNumberUntagD FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LNumberUntagD(LOperand* value) {
     inputs_[0] = value;
@@ -2091,7 +2138,7 @@
 };
 
 
-class LSmiUntag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LSmiUntag FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   LSmiUntag(LOperand* value, bool needs_check)
       : needs_check_(needs_check) {
@@ -2108,7 +2155,7 @@
 };
 
 
-class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 1> {
+class LStoreNamedField FINAL : public LTemplateInstruction<0, 2, 1> {
  public:
   LStoreNamedField(LOperand* object, LOperand* value, LOperand* temp) {
     inputs_[0] = object;
@@ -2123,7 +2170,7 @@
   DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
   DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 
   Representation representation() const {
     return hydrogen()->field_representation();
@@ -2131,7 +2178,7 @@
 };
 
 
-class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
+class LStoreNamedGeneric FINAL : public LTemplateInstruction<0, 3, 0> {
  public:
   LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value) {
     inputs_[0] = context;
@@ -2146,14 +2193,14 @@
   DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
   DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 
   Handle<Object> name() const { return hydrogen()->name(); }
   StrictMode strict_mode() { return hydrogen()->strict_mode(); }
 };
 
 
-class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyed FINAL : public LTemplateInstruction<0, 3, 0> {
  public:
   LStoreKeyed(LOperand* object, LOperand* key, LOperand* value) {
     inputs_[0] = object;
@@ -2178,13 +2225,13 @@
   DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
   DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
   bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
   uint32_t base_offset() const { return hydrogen()->base_offset(); }
 };
 
 
-class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 4, 0> {
+class LStoreKeyedGeneric FINAL : public LTemplateInstruction<0, 4, 0> {
  public:
   LStoreKeyedGeneric(LOperand* context,
                      LOperand* obj,
@@ -2204,13 +2251,13 @@
   DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
   DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 
   StrictMode strict_mode() { return hydrogen()->strict_mode(); }
 };
 
 
-class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 2, 1> {
+class LTransitionElementsKind FINAL : public LTemplateInstruction<0, 2, 1> {
  public:
   LTransitionElementsKind(LOperand* object,
                           LOperand* context,
@@ -2228,7 +2275,7 @@
                                "transition-elements-kind")
   DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 
   Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
   Handle<Map> transitioned_map() {
@@ -2239,7 +2286,7 @@
 };
 
 
-class LTrapAllocationMemento V8_FINAL : public LTemplateInstruction<0, 1, 1> {
+class LTrapAllocationMemento FINAL : public LTemplateInstruction<0, 1, 1> {
  public:
   LTrapAllocationMemento(LOperand* object,
                          LOperand* temp) {
@@ -2255,7 +2302,7 @@
 };
 
 
-class LStringAdd V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LStringAdd FINAL : public LTemplateInstruction<1, 3, 0> {
  public:
   LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
     inputs_[0] = context;
@@ -2273,7 +2320,7 @@
 
 
 
-class LStringCharCodeAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LStringCharCodeAt FINAL : public LTemplateInstruction<1, 3, 0> {
  public:
   LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
     inputs_[0] = context;
@@ -2290,7 +2337,7 @@
 };
 
 
-class LStringCharFromCode V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LStringCharFromCode FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   explicit LStringCharFromCode(LOperand* context, LOperand* char_code) {
     inputs_[0] = context;
@@ -2305,7 +2352,7 @@
 };
 
 
-class LCheckValue V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LCheckValue FINAL : public LTemplateInstruction<0, 1, 0> {
  public:
   explicit LCheckValue(LOperand* value) {
     inputs_[0] = value;
@@ -2318,7 +2365,7 @@
 };
 
 
-class LCheckInstanceType V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LCheckInstanceType FINAL : public LTemplateInstruction<0, 1, 0> {
  public:
   explicit LCheckInstanceType(LOperand* value) {
     inputs_[0] = value;
@@ -2331,7 +2378,7 @@
 };
 
 
-class LCheckMaps V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LCheckMaps FINAL : public LTemplateInstruction<0, 1, 0> {
  public:
   explicit LCheckMaps(LOperand* value = NULL) {
     inputs_[0] = value;
@@ -2344,7 +2391,7 @@
 };
 
 
-class LCheckSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCheckSmi FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LCheckSmi(LOperand* value) {
     inputs_[0] = value;
@@ -2356,7 +2403,7 @@
 };
 
 
-class LCheckNonSmi V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LCheckNonSmi FINAL : public LTemplateInstruction<0, 1, 0> {
  public:
   explicit LCheckNonSmi(LOperand* value) {
     inputs_[0] = value;
@@ -2369,7 +2416,7 @@
 };
 
 
-class LClampDToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LClampDToUint8 FINAL : public LTemplateInstruction<1, 1, 1> {
  public:
   LClampDToUint8(LOperand* unclamped, LOperand* temp) {
     inputs_[0] = unclamped;
@@ -2383,7 +2430,7 @@
 };
 
 
-class LClampIToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LClampIToUint8 FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LClampIToUint8(LOperand* unclamped) {
     inputs_[0] = unclamped;
@@ -2395,7 +2442,7 @@
 };
 
 
-class LClampTToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LClampTToUint8 FINAL : public LTemplateInstruction<1, 1, 1> {
  public:
   LClampTToUint8(LOperand* unclamped, LOperand* temp) {
     inputs_[0] = unclamped;
@@ -2409,7 +2456,7 @@
 };
 
 
-class LDoubleBits V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDoubleBits FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LDoubleBits(LOperand* value) {
     inputs_[0] = value;
@@ -2422,7 +2469,7 @@
 };
 
 
-class LConstructDouble V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LConstructDouble FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LConstructDouble(LOperand* hi, LOperand* lo) {
     inputs_[0] = hi;
@@ -2436,7 +2483,7 @@
 };
 
 
-class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 2> {
+class LAllocate FINAL : public LTemplateInstruction<1, 2, 2> {
  public:
   LAllocate(LOperand* context,
             LOperand* size,
@@ -2458,7 +2505,7 @@
 };
 
 
-class LRegExpLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LRegExpLiteral FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LRegExpLiteral(LOperand* context) {
     inputs_[0] = context;
@@ -2471,7 +2518,7 @@
 };
 
 
-class LFunctionLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LFunctionLiteral FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LFunctionLiteral(LOperand* context) {
     inputs_[0] = context;
@@ -2484,7 +2531,7 @@
 };
 
 
-class LToFastProperties V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LToFastProperties FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LToFastProperties(LOperand* value) {
     inputs_[0] = value;
@@ -2497,7 +2544,7 @@
 };
 
 
-class LTypeof V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LTypeof FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LTypeof(LOperand* context, LOperand* value) {
     inputs_[0] = context;
@@ -2511,7 +2558,7 @@
 };
 
 
-class LTypeofIsAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LTypeofIsAndBranch FINAL : public LControlInstruction<1, 0> {
  public:
   explicit LTypeofIsAndBranch(LOperand* value) {
     inputs_[0] = value;
@@ -2524,11 +2571,11 @@
 
   Handle<String> type_literal() { return hydrogen()->type_literal(); }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LIsConstructCallAndBranch V8_FINAL : public LControlInstruction<0, 1> {
+class LIsConstructCallAndBranch FINAL : public LControlInstruction<0, 1> {
  public:
   explicit LIsConstructCallAndBranch(LOperand* temp) {
     temps_[0] = temp;
@@ -2541,18 +2588,18 @@
 };
 
 
-class LOsrEntry V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LOsrEntry FINAL : public LTemplateInstruction<0, 0, 0> {
  public:
   LOsrEntry() {}
 
-  virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
     return false;
   }
   DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
 };
 
 
-class LStackCheck V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LStackCheck FINAL : public LTemplateInstruction<0, 1, 0> {
  public:
   explicit LStackCheck(LOperand* context) {
     inputs_[0] = context;
@@ -2570,7 +2617,7 @@
 };
 
 
-class LForInPrepareMap V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LForInPrepareMap FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LForInPrepareMap(LOperand* context, LOperand* object) {
     inputs_[0] = context;
@@ -2584,7 +2631,7 @@
 };
 
 
-class LForInCacheArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LForInCacheArray FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LForInCacheArray(LOperand* map) {
     inputs_[0] = map;
@@ -2600,7 +2647,7 @@
 };
 
 
-class LCheckMapValue V8_FINAL : public LTemplateInstruction<0, 2, 0> {
+class LCheckMapValue FINAL : public LTemplateInstruction<0, 2, 0> {
  public:
   LCheckMapValue(LOperand* value, LOperand* map) {
     inputs_[0] = value;
@@ -2614,7 +2661,7 @@
 };
 
 
-class LLoadFieldByIndex V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LLoadFieldByIndex FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LLoadFieldByIndex(LOperand* object, LOperand* index) {
     inputs_[0] = object;
@@ -2658,7 +2705,7 @@
 
 
 class LChunkBuilder;
-class LPlatformChunk V8_FINAL : public LChunk {
+class LPlatformChunk FINAL : public LChunk {
  public:
   LPlatformChunk(CompilationInfo* info, HGraph* graph)
       : LChunk(info, graph) { }
@@ -2668,20 +2715,14 @@
 };
 
 
-class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
+class LChunkBuilder FINAL : public LChunkBuilderBase {
  public:
   LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
-      : LChunkBuilderBase(graph->zone()),
-        chunk_(NULL),
-        info_(info),
-        graph_(graph),
-        status_(UNUSED),
+      : LChunkBuilderBase(info, graph),
         current_instruction_(NULL),
         current_block_(NULL),
         next_block_(NULL),
-        allocator_(allocator) { }
-
-  Isolate* isolate() const { return graph_->isolate(); }
+        allocator_(allocator) {}
 
   // Build the sequence for the graph.
   LPlatformChunk* Build();
@@ -2697,6 +2738,7 @@
 
   LInstruction* DoMathFloor(HUnaryMathOperation* instr);
   LInstruction* DoMathRound(HUnaryMathOperation* instr);
+  LInstruction* DoMathFround(HUnaryMathOperation* instr);
   LInstruction* DoMathAbs(HUnaryMathOperation* instr);
   LInstruction* DoMathLog(HUnaryMathOperation* instr);
   LInstruction* DoMathExp(HUnaryMathOperation* instr);
@@ -2714,24 +2756,6 @@
   LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr);
 
  private:
-  enum Status {
-    UNUSED,
-    BUILDING,
-    DONE,
-    ABORTED
-  };
-
-  LPlatformChunk* chunk() const { return chunk_; }
-  CompilationInfo* info() const { return info_; }
-  HGraph* graph() const { return graph_; }
-
-  bool is_unused() const { return status_ == UNUSED; }
-  bool is_building() const { return status_ == BUILDING; }
-  bool is_done() const { return status_ == DONE; }
-  bool is_aborted() const { return status_ == ABORTED; }
-
-  void Abort(BailoutReason reason);
-
   // Methods for getting operands for Use / Define / Temp.
   LUnallocated* ToUnallocated(Register reg);
   LUnallocated* ToUnallocated(DoubleRegister reg);
@@ -2773,7 +2797,7 @@
 
   // An input operand in register, stack slot or a constant operand.
   // Will not be moved to a register even if one is freely available.
-  virtual MUST_USE_RESULT LOperand* UseAny(HValue* value) V8_OVERRIDE;
+  virtual MUST_USE_RESULT LOperand* UseAny(HValue* value) OVERRIDE;
 
   // Temporary operand that must be in a register.
   MUST_USE_RESULT LUnallocated* TempRegister();
@@ -2817,10 +2841,6 @@
   LInstruction* DoArithmeticT(Token::Value op,
                               HBinaryOperation* instr);
 
-  LPlatformChunk* chunk_;
-  CompilationInfo* info_;
-  HGraph* const graph_;
-  Status status_;
   HInstruction* current_instruction_;
   HBasicBlock* current_block_;
   HBasicBlock* next_block_;
diff --git a/src/mips/macro-assembler-mips.cc b/src/mips/macro-assembler-mips.cc
index 45ba4a9..604293b 100644
--- a/src/mips/macro-assembler-mips.cc
+++ b/src/mips/macro-assembler-mips.cc
@@ -8,6 +8,8 @@
 
 #if V8_TARGET_ARCH_MIPS
 
+#include "src/base/bits.h"
+#include "src/base/division-by-constant.h"
 #include "src/bootstrapper.h"
 #include "src/codegen.h"
 #include "src/cpu-profiler.h"
@@ -32,7 +34,7 @@
 void MacroAssembler::Load(Register dst,
                           const MemOperand& src,
                           Representation r) {
-  ASSERT(!r.IsDouble());
+  DCHECK(!r.IsDouble());
   if (r.IsInteger8()) {
     lb(dst, src);
   } else if (r.IsUInteger8()) {
@@ -50,7 +52,7 @@
 void MacroAssembler::Store(Register src,
                            const MemOperand& dst,
                            Representation r) {
-  ASSERT(!r.IsDouble());
+  DCHECK(!r.IsDouble());
   if (r.IsInteger8() || r.IsUInteger8()) {
     sb(src, dst);
   } else if (r.IsInteger16() || r.IsUInteger16()) {
@@ -101,7 +103,7 @@
   // Safepoints expect a block of kNumSafepointRegisters values on the
   // stack, so adjust the stack for unsaved registers.
   const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
-  ASSERT(num_unsaved >= 0);
+  DCHECK(num_unsaved >= 0);
   if (num_unsaved > 0) {
     Subu(sp, sp, Operand(num_unsaved * kPointerSize));
   }
@@ -118,32 +120,6 @@
 }
 
 
-void MacroAssembler::PushSafepointRegistersAndDoubles() {
-  PushSafepointRegisters();
-  Subu(sp, sp, Operand(FPURegister::NumAllocatableRegisters() * kDoubleSize));
-  for (int i = 0; i < FPURegister::NumAllocatableRegisters(); i+=2) {
-    FPURegister reg = FPURegister::FromAllocationIndex(i);
-    sdc1(reg, MemOperand(sp, i * kDoubleSize));
-  }
-}
-
-
-void MacroAssembler::PopSafepointRegistersAndDoubles() {
-  for (int i = 0; i < FPURegister::NumAllocatableRegisters(); i+=2) {
-    FPURegister reg = FPURegister::FromAllocationIndex(i);
-    ldc1(reg, MemOperand(sp, i * kDoubleSize));
-  }
-  Addu(sp, sp, Operand(FPURegister::NumAllocatableRegisters() * kDoubleSize));
-  PopSafepointRegisters();
-}
-
-
-void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src,
-                                                             Register dst) {
-  sw(src, SafepointRegistersAndDoublesSlot(dst));
-}
-
-
 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
   sw(src, SafepointRegisterSlot(dst));
 }
@@ -179,7 +155,7 @@
                                 Register scratch,
                                 Condition cc,
                                 Label* branch) {
-  ASSERT(cc == eq || cc == ne);
+  DCHECK(cc == eq || cc == ne);
   And(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
   Branch(branch, cc, scratch,
          Operand(ExternalReference::new_space_start(isolate())));
@@ -196,7 +172,7 @@
     RememberedSetAction remembered_set_action,
     SmiCheck smi_check,
     PointersToHereCheck pointers_to_here_check_for_value) {
-  ASSERT(!AreAliased(value, dst, t8, object));
+  DCHECK(!AreAliased(value, dst, t8, object));
   // First, check if a write barrier is even needed. The tests below
   // catch stores of Smis.
   Label done;
@@ -208,7 +184,7 @@
 
   // Although the object register is tagged, the offset is relative to the start
   // of the object, so so offset must be a multiple of kPointerSize.
-  ASSERT(IsAligned(offset, kPointerSize));
+  DCHECK(IsAligned(offset, kPointerSize));
 
   Addu(dst, object, Operand(offset - kHeapObjectTag));
   if (emit_debug_code()) {
@@ -233,8 +209,8 @@
   // Clobber clobbered input registers when running with the debug-code flag
   // turned on to provoke errors.
   if (emit_debug_code()) {
-    li(value, Operand(BitCast<int32_t>(kZapValue + 4)));
-    li(dst, Operand(BitCast<int32_t>(kZapValue + 8)));
+    li(value, Operand(bit_cast<int32_t>(kZapValue + 4)));
+    li(dst, Operand(bit_cast<int32_t>(kZapValue + 8)));
   }
 }
 
@@ -247,7 +223,7 @@
                                        RAStatus ra_status,
                                        SaveFPRegsMode fp_mode) {
   if (emit_debug_code()) {
-    ASSERT(!dst.is(at));
+    DCHECK(!dst.is(at));
     lw(dst, FieldMemOperand(map, HeapObject::kMapOffset));
     Check(eq,
           kWrongAddressOrValuePassedToRecordWrite,
@@ -259,10 +235,6 @@
     return;
   }
 
-  // Count number of write barriers in generated code.
-  isolate()->counters()->write_barriers_static()->Increment();
-  // TODO(mstarzinger): Dynamic counter missing.
-
   if (emit_debug_code()) {
     lw(at, FieldMemOperand(object, HeapObject::kMapOffset));
     Check(eq,
@@ -305,11 +277,15 @@
 
   bind(&done);
 
+  // Count number of write barriers in generated code.
+  isolate()->counters()->write_barriers_static()->Increment();
+  IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, at, dst);
+
   // Clobber clobbered registers when running with the debug-code flag
   // turned on to provoke errors.
   if (emit_debug_code()) {
-    li(dst, Operand(BitCast<int32_t>(kZapValue + 12)));
-    li(map, Operand(BitCast<int32_t>(kZapValue + 16)));
+    li(dst, Operand(bit_cast<int32_t>(kZapValue + 12)));
+    li(map, Operand(bit_cast<int32_t>(kZapValue + 16)));
   }
 }
 
@@ -326,8 +302,8 @@
     RememberedSetAction remembered_set_action,
     SmiCheck smi_check,
     PointersToHereCheck pointers_to_here_check_for_value) {
-  ASSERT(!AreAliased(object, address, value, t8));
-  ASSERT(!AreAliased(object, address, value, t9));
+  DCHECK(!AreAliased(object, address, value, t8));
+  DCHECK(!AreAliased(object, address, value, t9));
 
   if (emit_debug_code()) {
     lw(at, MemOperand(address));
@@ -340,16 +316,12 @@
     return;
   }
 
-  // Count number of write barriers in generated code.
-  isolate()->counters()->write_barriers_static()->Increment();
-  // TODO(mstarzinger): Dynamic counter missing.
-
   // First, check if a write barrier is even needed. The tests below
   // catch stores of smis and stores into the young generation.
   Label done;
 
   if (smi_check == INLINE_SMI_CHECK) {
-    ASSERT_EQ(0, kSmiTag);
+    DCHECK_EQ(0, kSmiTag);
     JumpIfSmi(value, &done);
   }
 
@@ -379,11 +351,16 @@
 
   bind(&done);
 
+  // Count number of write barriers in generated code.
+  isolate()->counters()->write_barriers_static()->Increment();
+  IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, at,
+                   value);
+
   // Clobber clobbered registers when running with the debug-code flag
   // turned on to provoke errors.
   if (emit_debug_code()) {
-    li(address, Operand(BitCast<int32_t>(kZapValue + 12)));
-    li(value, Operand(BitCast<int32_t>(kZapValue + 16)));
+    li(address, Operand(bit_cast<int32_t>(kZapValue + 12)));
+    li(value, Operand(bit_cast<int32_t>(kZapValue + 16)));
   }
 }
 
@@ -416,12 +393,11 @@
   if (and_then == kFallThroughAtEnd) {
     Branch(&done, eq, t8, Operand(zero_reg));
   } else {
-    ASSERT(and_then == kReturnAtEnd);
+    DCHECK(and_then == kReturnAtEnd);
     Ret(eq, t8, Operand(zero_reg));
   }
   push(ra);
-  StoreBufferOverflowStub store_buffer_overflow =
-      StoreBufferOverflowStub(isolate(), fp_mode);
+  StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
   CallStub(&store_buffer_overflow);
   pop(ra);
   bind(&done);
@@ -440,9 +416,9 @@
                                             Label* miss) {
   Label same_contexts;
 
-  ASSERT(!holder_reg.is(scratch));
-  ASSERT(!holder_reg.is(at));
-  ASSERT(!scratch.is(at));
+  DCHECK(!holder_reg.is(scratch));
+  DCHECK(!holder_reg.is(at));
+  DCHECK(!scratch.is(at));
 
   // Load current lexical context from the stack frame.
   lw(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
@@ -505,6 +481,9 @@
 }
 
 
+// Compute the hash code from the untagged key.  This must be kept in sync with
+// ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
+// code-stub-hydrogen.cc
 void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
   // First of all we assign the hash seed to scratch.
   LoadRoot(scratch, Heap::kHashSeedRootIndex);
@@ -594,7 +573,7 @@
     and_(reg2, reg2, reg1);
 
     // Scale the index by multiplying by the element size.
-    ASSERT(SeededNumberDictionary::kEntrySize == 3);
+    DCHECK(SeededNumberDictionary::kEntrySize == 3);
     sll(at, reg2, 1);  // 2x.
     addu(reg2, reg2, at);  // reg2 = reg2 * 3.
 
@@ -637,7 +616,7 @@
       addiu(rd, rs, rt.imm32_);
     } else {
       // li handles the relocation.
-      ASSERT(!rs.is(at));
+      DCHECK(!rs.is(at));
       li(at, rt);
       addu(rd, rs, at);
     }
@@ -653,7 +632,7 @@
       addiu(rd, rs, -rt.imm32_);  // No subiu instr, use addiu(x, y, -imm).
     } else {
       // li handles the relocation.
-      ASSERT(!rs.is(at));
+      DCHECK(!rs.is(at));
       li(at, rt);
       subu(rd, rs, at);
     }
@@ -663,7 +642,7 @@
 
 void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
   if (rt.is_reg()) {
-    if (kArchVariant == kLoongson) {
+    if (IsMipsArchVariant(kLoongson)) {
       mult(rs, rt.rm());
       mflo(rd);
     } else {
@@ -671,9 +650,9 @@
     }
   } else {
     // li handles the relocation.
-    ASSERT(!rs.is(at));
+    DCHECK(!rs.is(at));
     li(at, rt);
-    if (kArchVariant == kLoongson) {
+    if (IsMipsArchVariant(kLoongson)) {
       mult(rs, at);
       mflo(rd);
     } else {
@@ -683,12 +662,77 @@
 }
 
 
+void MacroAssembler::Mul(Register rd_hi, Register rd_lo,
+    Register rs, const Operand& rt) {
+  if (rt.is_reg()) {
+    if (!IsMipsArchVariant(kMips32r6)) {
+      mult(rs, rt.rm());
+      mflo(rd_lo);
+      mfhi(rd_hi);
+    } else {
+      if (rd_lo.is(rs)) {
+        DCHECK(!rd_hi.is(rs));
+        DCHECK(!rd_hi.is(rt.rm()) && !rd_lo.is(rt.rm()));
+        muh(rd_hi, rs, rt.rm());
+        mul(rd_lo, rs, rt.rm());
+      } else {
+        DCHECK(!rd_hi.is(rt.rm()) && !rd_lo.is(rt.rm()));
+        mul(rd_lo, rs, rt.rm());
+        muh(rd_hi, rs, rt.rm());
+      }
+    }
+  } else {
+    // li handles the relocation.
+    DCHECK(!rs.is(at));
+    li(at, rt);
+    if (!IsMipsArchVariant(kMips32r6)) {
+      mult(rs, at);
+      mflo(rd_lo);
+      mfhi(rd_hi);
+    } else {
+      if (rd_lo.is(rs)) {
+        DCHECK(!rd_hi.is(rs));
+        DCHECK(!rd_hi.is(at) && !rd_lo.is(at));
+        muh(rd_hi, rs, at);
+        mul(rd_lo, rs, at);
+      } else {
+        DCHECK(!rd_hi.is(at) && !rd_lo.is(at));
+        mul(rd_lo, rs, at);
+        muh(rd_hi, rs, at);
+      }
+    }
+  }
+}
+
+
+void MacroAssembler::Mulh(Register rd, Register rs, const Operand& rt) {
+  if (rt.is_reg()) {
+    if (!IsMipsArchVariant(kMips32r6)) {
+      mult(rs, rt.rm());
+      mfhi(rd);
+    } else {
+      muh(rd, rs, rt.rm());
+    }
+  } else {
+    // li handles the relocation.
+    DCHECK(!rs.is(at));
+    li(at, rt);
+    if (!IsMipsArchVariant(kMips32r6)) {
+      mult(rs, at);
+      mfhi(rd);
+    } else {
+      muh(rd, rs, at);
+    }
+  }
+}
+
+
 void MacroAssembler::Mult(Register rs, const Operand& rt) {
   if (rt.is_reg()) {
     mult(rs, rt.rm());
   } else {
     // li handles the relocation.
-    ASSERT(!rs.is(at));
+    DCHECK(!rs.is(at));
     li(at, rt);
     mult(rs, at);
   }
@@ -700,7 +744,7 @@
     multu(rs, rt.rm());
   } else {
     // li handles the relocation.
-    ASSERT(!rs.is(at));
+    DCHECK(!rs.is(at));
     li(at, rt);
     multu(rs, at);
   }
@@ -712,19 +756,68 @@
     div(rs, rt.rm());
   } else {
     // li handles the relocation.
-    ASSERT(!rs.is(at));
+    DCHECK(!rs.is(at));
     li(at, rt);
     div(rs, at);
   }
 }
 
 
+void MacroAssembler::Div(Register rem, Register res,
+    Register rs, const Operand& rt) {
+  if (rt.is_reg()) {
+    if (!IsMipsArchVariant(kMips32r6)) {
+      div(rs, rt.rm());
+      mflo(res);
+      mfhi(rem);
+    } else {
+      div(res, rs, rt.rm());
+      mod(rem, rs, rt.rm());
+    }
+  } else {
+    // li handles the relocation.
+    DCHECK(!rs.is(at));
+    li(at, rt);
+    if (!IsMipsArchVariant(kMips32r6)) {
+      div(rs, at);
+      mflo(res);
+      mfhi(rem);
+    } else {
+      div(res, rs, at);
+      mod(rem, rs, at);
+    }
+  }
+}
+
+
+void MacroAssembler::Mod(Register rd, Register rs, const Operand& rt) {
+  if (rt.is_reg()) {
+    if (!IsMipsArchVariant(kMips32r6)) {
+      div(rs, rt.rm());
+      mfhi(rd);
+    } else {
+      mod(rd, rs, rt.rm());
+    }
+  } else {
+    // li handles the relocation.
+    DCHECK(!rs.is(at));
+    li(at, rt);
+    if (!IsMipsArchVariant(kMips32r6)) {
+      div(rs, at);
+      mfhi(rd);
+    } else {
+      mod(rd, rs, at);
+    }
+  }
+}
+
+
 void MacroAssembler::Divu(Register rs, const Operand& rt) {
   if (rt.is_reg()) {
     divu(rs, rt.rm());
   } else {
     // li handles the relocation.
-    ASSERT(!rs.is(at));
+    DCHECK(!rs.is(at));
     li(at, rt);
     divu(rs, at);
   }
@@ -739,7 +832,7 @@
       andi(rd, rs, rt.imm32_);
     } else {
       // li handles the relocation.
-      ASSERT(!rs.is(at));
+      DCHECK(!rs.is(at));
       li(at, rt);
       and_(rd, rs, at);
     }
@@ -755,7 +848,7 @@
       ori(rd, rs, rt.imm32_);
     } else {
       // li handles the relocation.
-      ASSERT(!rs.is(at));
+      DCHECK(!rs.is(at));
       li(at, rt);
       or_(rd, rs, at);
     }
@@ -771,7 +864,7 @@
       xori(rd, rs, rt.imm32_);
     } else {
       // li handles the relocation.
-      ASSERT(!rs.is(at));
+      DCHECK(!rs.is(at));
       li(at, rt);
       xor_(rd, rs, at);
     }
@@ -784,7 +877,7 @@
     nor(rd, rs, rt.rm());
   } else {
     // li handles the relocation.
-    ASSERT(!rs.is(at));
+    DCHECK(!rs.is(at));
     li(at, rt);
     nor(rd, rs, at);
   }
@@ -792,9 +885,9 @@
 
 
 void MacroAssembler::Neg(Register rs, const Operand& rt) {
-  ASSERT(rt.is_reg());
-  ASSERT(!at.is(rs));
-  ASSERT(!at.is(rt.rm()));
+  DCHECK(rt.is_reg());
+  DCHECK(!at.is(rs));
+  DCHECK(!at.is(rt.rm()));
   li(at, -1);
   xor_(rs, rt.rm(), at);
 }
@@ -808,7 +901,7 @@
       slti(rd, rs, rt.imm32_);
     } else {
       // li handles the relocation.
-      ASSERT(!rs.is(at));
+      DCHECK(!rs.is(at));
       li(at, rt);
       slt(rd, rs, at);
     }
@@ -824,7 +917,7 @@
       sltiu(rd, rs, rt.imm32_);
     } else {
       // li handles the relocation.
-      ASSERT(!rs.is(at));
+      DCHECK(!rs.is(at));
       li(at, rt);
       sltu(rd, rs, at);
     }
@@ -833,7 +926,7 @@
 
 
 void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
-  if (kArchVariant == kMips32r2) {
+  if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
     if (rt.is_reg()) {
       rotrv(rd, rs, rt.rm());
     } else {
@@ -859,7 +952,7 @@
 
 
 void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) {
-  if (kArchVariant == kLoongson) {
+  if (IsMipsArchVariant(kLoongson)) {
     lw(zero_reg, rs);
   } else {
     pref(hint, rs);
@@ -886,7 +979,7 @@
   if (value->IsSmi()) {
     li(dst, Operand(value), mode);
   } else {
-    ASSERT(value->IsHeapObject());
+    DCHECK(value->IsHeapObject());
     if (isolate()->heap()->InNewSpace(*value)) {
       Handle<Cell> cell = isolate()->factory()->NewCell(value);
       li(dst, Operand(cell));
@@ -899,7 +992,7 @@
 
 
 void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
-  ASSERT(!j.is_reg());
+  DCHECK(!j.is_reg());
   BlockTrampolinePoolScope block_trampoline_pool(this);
   if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) {
     // Normal load of an immediate value which does not need Relocation Info.
@@ -1052,10 +1145,10 @@
                          Register rs,
                          uint16_t pos,
                          uint16_t size) {
-  ASSERT(pos < 32);
-  ASSERT(pos + size < 33);
+  DCHECK(pos < 32);
+  DCHECK(pos + size < 33);
 
-  if (kArchVariant == kMips32r2) {
+  if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
     ext_(rt, rs, pos, size);
   } else {
     // Move rs to rt and shift it left then right to get the
@@ -1075,14 +1168,14 @@
                          Register rs,
                          uint16_t pos,
                          uint16_t size) {
-  ASSERT(pos < 32);
-  ASSERT(pos + size <= 32);
-  ASSERT(size != 0);
+  DCHECK(pos < 32);
+  DCHECK(pos + size <= 32);
+  DCHECK(size != 0);
 
-  if (kArchVariant == kMips32r2) {
+  if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
     ins_(rt, rs, pos, size);
   } else {
-    ASSERT(!rt.is(t8) && !rs.is(t8));
+    DCHECK(!rt.is(t8) && !rs.is(t8));
     Subu(at, zero_reg, Operand(1));
     srl(at, at, 32 - size);
     and_(t8, rs, at);
@@ -1111,9 +1204,9 @@
   // We do this by converting rs minus the MSB to avoid sign conversion,
   // then adding 2^31 to the result (if needed).
 
-  ASSERT(!fd.is(scratch));
-  ASSERT(!rs.is(t9));
-  ASSERT(!rs.is(at));
+  DCHECK(!fd.is(scratch));
+  DCHECK(!rs.is(t9));
+  DCHECK(!rs.is(at));
 
   // Save rs's MSB to t9.
   Ext(t9, rs, 31, 1);
@@ -1133,8 +1226,8 @@
 
   // Load 2^31 into f20 as its float representation.
   li(at, 0x41E00000);
-  mtc1(at, FPURegister::from_code(scratch.code() + 1));
   mtc1(zero_reg, scratch);
+  Mthc1(at, scratch);
   // Add it to fd.
   add_d(fd, fd, scratch);
 
@@ -1151,10 +1244,10 @@
 
 
 void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
-  if (kArchVariant == kLoongson && fd.is(fs)) {
-    mfc1(t8, FPURegister::from_code(fs.code() + 1));
+  if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
+    Mfhc1(t8, fs);
     trunc_w_d(fd, fs);
-    mtc1(t8, FPURegister::from_code(fs.code() + 1));
+    Mthc1(t8, fs);
   } else {
     trunc_w_d(fd, fs);
   }
@@ -1162,10 +1255,10 @@
 
 
 void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
-  if (kArchVariant == kLoongson && fd.is(fs)) {
-    mfc1(t8, FPURegister::from_code(fs.code() + 1));
+  if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
+    Mfhc1(t8, fs);
     round_w_d(fd, fs);
-    mtc1(t8, FPURegister::from_code(fs.code() + 1));
+    Mthc1(t8, fs);
   } else {
     round_w_d(fd, fs);
   }
@@ -1173,10 +1266,10 @@
 
 
 void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
-  if (kArchVariant == kLoongson && fd.is(fs)) {
-    mfc1(t8, FPURegister::from_code(fs.code() + 1));
+  if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
+    Mfhc1(t8, fs);
     floor_w_d(fd, fs);
-    mtc1(t8, FPURegister::from_code(fs.code() + 1));
+    Mthc1(t8, fs);
   } else {
     floor_w_d(fd, fs);
   }
@@ -1184,10 +1277,10 @@
 
 
 void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
-  if (kArchVariant == kLoongson && fd.is(fs)) {
-    mfc1(t8, FPURegister::from_code(fs.code() + 1));
+  if (IsMipsArchVariant(kLoongson) && fd.is(fs)) {
+    Mfhc1(t8, fs);
     ceil_w_d(fd, fs);
-    mtc1(t8, FPURegister::from_code(fs.code() + 1));
+    Mthc1(t8, fs);
   } else {
     ceil_w_d(fd, fs);
   }
@@ -1197,13 +1290,13 @@
 void MacroAssembler::Trunc_uw_d(FPURegister fd,
                                 Register rs,
                                 FPURegister scratch) {
-  ASSERT(!fd.is(scratch));
-  ASSERT(!rs.is(at));
+  DCHECK(!fd.is(scratch));
+  DCHECK(!rs.is(at));
 
   // Load 2^31 into scratch as its float representation.
   li(at, 0x41E00000);
-  mtc1(at, FPURegister::from_code(scratch.code() + 1));
   mtc1(zero_reg, scratch);
+  Mthc1(at, scratch);
   // Test if scratch > fd.
   // If fd < 2^31 we can convert it normally.
   Label simple_convert;
@@ -1227,6 +1320,24 @@
 }
 
 
+void MacroAssembler::Mthc1(Register rt, FPURegister fs) {
+  if (IsFp64Mode()) {
+    mthc1(rt, fs);
+  } else {
+    mtc1(rt, fs.high());
+  }
+}
+
+
+void MacroAssembler::Mfhc1(Register rt, FPURegister fs) {
+  if (IsFp64Mode()) {
+    mfhc1(rt, fs);
+  } else {
+    mfc1(rt, fs.high());
+  }
+}
+
+
 void MacroAssembler::BranchF(Label* target,
                              Label* nan,
                              Condition cc,
@@ -1239,52 +1350,106 @@
     return;
   }
 
-  ASSERT(nan || target);
+  DCHECK(nan || target);
   // Check for unordered (NaN) cases.
   if (nan) {
-    c(UN, D, cmp1, cmp2);
-    bc1t(nan);
+    if (!IsMipsArchVariant(kMips32r6)) {
+      c(UN, D, cmp1, cmp2);
+      bc1t(nan);
+    } else {
+      // Use kDoubleCompareReg for comparison result. It has to be unavailable
+      // to lithium register allocator.
+      DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
+      cmp(UN, L, kDoubleCompareReg, cmp1, cmp2);
+      bc1nez(nan, kDoubleCompareReg);
+    }
   }
 
-  if (target) {
-    // Here NaN cases were either handled by this function or are assumed to
-    // have been handled by the caller.
-    // Unsigned conditions are treated as their signed counterpart.
-    switch (cc) {
-      case lt:
-        c(OLT, D, cmp1, cmp2);
-        bc1t(target);
-        break;
-      case gt:
-        c(ULE, D, cmp1, cmp2);
-        bc1f(target);
-        break;
-      case ge:
-        c(ULT, D, cmp1, cmp2);
-        bc1f(target);
-        break;
-      case le:
-        c(OLE, D, cmp1, cmp2);
-        bc1t(target);
-        break;
-      case eq:
-        c(EQ, D, cmp1, cmp2);
-        bc1t(target);
-        break;
-      case ueq:
-        c(UEQ, D, cmp1, cmp2);
-        bc1t(target);
-        break;
-      case ne:
-        c(EQ, D, cmp1, cmp2);
-        bc1f(target);
-        break;
-      case nue:
-        c(UEQ, D, cmp1, cmp2);
-        bc1f(target);
-        break;
-      default:
-        CHECK(0);
+  if (!IsMipsArchVariant(kMips32r6)) {
+    if (target) {
+      // Here NaN cases were either handled by this function or are assumed to
+      // have been handled by the caller.
+      switch (cc) {
+        case lt:
+          c(OLT, D, cmp1, cmp2);
+          bc1t(target);
+          break;
+        case gt:
+          c(ULE, D, cmp1, cmp2);
+          bc1f(target);
+          break;
+        case ge:
+          c(ULT, D, cmp1, cmp2);
+          bc1f(target);
+          break;
+        case le:
+          c(OLE, D, cmp1, cmp2);
+          bc1t(target);
+          break;
+        case eq:
+          c(EQ, D, cmp1, cmp2);
+          bc1t(target);
+          break;
+        case ueq:
+          c(UEQ, D, cmp1, cmp2);
+          bc1t(target);
+          break;
+        case ne:
+          c(EQ, D, cmp1, cmp2);
+          bc1f(target);
+          break;
+        case nue:
+          c(UEQ, D, cmp1, cmp2);
+          bc1f(target);
+          break;
+        default:
+          CHECK(0);
+      }
+    }
+  } else {
+    if (target) {
+      // Here NaN cases were either handled by this function or are assumed to
+      // have been handled by the caller.
+      // Unsigned conditions are treated as their signed counterpart.
+      // Use kDoubleCompareReg for comparison result, it is
+      // valid in fp64 (FR = 1) mode which is implied for mips32r6.
+      DCHECK(!cmp1.is(kDoubleCompareReg) && !cmp2.is(kDoubleCompareReg));
+      switch (cc) {
+        case lt:
+          cmp(OLT, L, kDoubleCompareReg, cmp1, cmp2);
+          bc1nez(target, kDoubleCompareReg);
+          break;
+        case gt:
+          cmp(ULE, L, kDoubleCompareReg, cmp1, cmp2);
+          bc1eqz(target, kDoubleCompareReg);
+          break;
+        case ge:
+          cmp(ULT, L, kDoubleCompareReg, cmp1, cmp2);
+          bc1eqz(target, kDoubleCompareReg);
+          break;
+        case le:
+          cmp(OLE, L, kDoubleCompareReg, cmp1, cmp2);
+          bc1nez(target, kDoubleCompareReg);
+          break;
+        case eq:
+          cmp(EQ, L, kDoubleCompareReg, cmp1, cmp2);
+          bc1nez(target, kDoubleCompareReg);
+          break;
+        case ueq:
+          cmp(UEQ, L, kDoubleCompareReg, cmp1, cmp2);
+          bc1nez(target, kDoubleCompareReg);
+          break;
+        case ne:
+          cmp(EQ, L, kDoubleCompareReg, cmp1, cmp2);
+          bc1eqz(target, kDoubleCompareReg);
+          break;
+        case nue:
+          cmp(UEQ, L, kDoubleCompareReg, cmp1, cmp2);
+          bc1eqz(target, kDoubleCompareReg);
+          break;
+        default:
+          CHECK(0);
+      }
     }
   }
 
@@ -1319,16 +1484,16 @@
     // register of FPU register pair.
     if (hi != 0) {
       li(at, Operand(hi));
-      mtc1(at, dst.high());
+      Mthc1(at, dst);
     } else {
-      mtc1(zero_reg, dst.high());
+      Mthc1(zero_reg, dst);
     }
   }
 }
 
 
 void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
-  if (kArchVariant == kLoongson) {
+  if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) {
     Label done;
     Branch(&done, ne, rt, Operand(zero_reg));
     mov(rd, rs);
@@ -1340,7 +1505,7 @@
 
 
 void MacroAssembler::Movn(Register rd, Register rs, Register rt) {
-  if (kArchVariant == kLoongson) {
+  if (IsMipsArchVariant(kLoongson) || IsMipsArchVariant(kMips32r6)) {
     Label done;
     Branch(&done, eq, rt, Operand(zero_reg));
     mov(rd, rs);
@@ -1352,11 +1517,11 @@
 
 
 void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) {
-  if (kArchVariant == kLoongson) {
+  if (IsMipsArchVariant(kLoongson)) {
     // Tests an FP condition code and then conditionally move rs to rd.
     // We do not currently use any FPU cc bit other than bit 0.
-    ASSERT(cc == 0);
-    ASSERT(!(rs.is(t8) || rd.is(t8)));
+    DCHECK(cc == 0);
+    DCHECK(!(rs.is(t8) || rd.is(t8)));
     Label done;
     Register scratch = t8;
     // For testing purposes we need to fetch content of the FCSR register and
@@ -1378,11 +1543,11 @@
 
 
 void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) {
-  if (kArchVariant == kLoongson) {
+  if (IsMipsArchVariant(kLoongson)) {
     // Tests an FP condition code and then conditionally move rs to rd.
     // We do not currently use any FPU cc bit other than bit 0.
-    ASSERT(cc == 0);
-    ASSERT(!(rs.is(t8) || rd.is(t8)));
+    DCHECK(cc == 0);
+    DCHECK(!(rs.is(t8) || rd.is(t8)));
     Label done;
     Register scratch = t8;
     // For testing purposes we need to fetch content of the FCSR register and
@@ -1404,8 +1569,8 @@
 
 
 void MacroAssembler::Clz(Register rd, Register rs) {
-  if (kArchVariant == kLoongson) {
-    ASSERT(!(rd.is(t8) || rd.is(t9)) && !(rs.is(t8) || rs.is(t9)));
+  if (IsMipsArchVariant(kLoongson)) {
+    DCHECK(!(rd.is(t8) || rd.is(t9)) && !(rs.is(t8) || rs.is(t9)));
     Register mask = t8;
     Register scratch = t9;
     Label loop, end;
@@ -1432,9 +1597,9 @@
                                      DoubleRegister double_scratch,
                                      Register except_flag,
                                      CheckForInexactConversion check_inexact) {
-  ASSERT(!result.is(scratch));
-  ASSERT(!double_input.is(double_scratch));
-  ASSERT(!except_flag.is(scratch));
+  DCHECK(!result.is(scratch));
+  DCHECK(!double_input.is(double_scratch));
+  DCHECK(!except_flag.is(scratch));
 
   Label done;
 
@@ -1538,7 +1703,7 @@
 void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
   Label done;
   DoubleRegister double_scratch = f12;
-  ASSERT(!result.is(object));
+  DCHECK(!result.is(object));
 
   ldc1(double_scratch,
        MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
@@ -1565,7 +1730,7 @@
                                        Register scratch,
                                        Label* not_number) {
   Label done;
-  ASSERT(!result.is(object));
+  DCHECK(!result.is(object));
 
   UntagAndJumpIfSmi(result, object, &done);
   JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number);
@@ -1592,7 +1757,7 @@
 // Emulated condtional branches do not emit a nop in the branch delay slot.
 //
 // BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
-#define BRANCH_ARGS_CHECK(cond, rs, rt) ASSERT(                                \
+#define BRANCH_ARGS_CHECK(cond, rs, rt) DCHECK(                                \
     (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) ||          \
     (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
 
@@ -1684,7 +1849,7 @@
                                  const Operand& rt,
                                  BranchDelaySlot bdslot) {
   BRANCH_ARGS_CHECK(cond, rs, rt);
-  ASSERT(!rs.is(zero_reg));
+  DCHECK(!rs.is(zero_reg));
   Register r2 = no_reg;
   Register scratch = at;
 
@@ -1784,14 +1949,14 @@
         break;
       case eq:
         // We don't want any other register but scratch clobbered.
-        ASSERT(!scratch.is(rs));
+        DCHECK(!scratch.is(rs));
         r2 = scratch;
         li(r2, rt);
         beq(rs, r2, offset);
         break;
       case ne:
         // We don't want any other register but scratch clobbered.
-        ASSERT(!scratch.is(rs));
+        DCHECK(!scratch.is(rs));
         r2 = scratch;
         li(r2, rt);
         bne(rs, r2, offset);
@@ -2036,14 +2201,14 @@
         b(offset);
         break;
       case eq:
-        ASSERT(!scratch.is(rs));
+        DCHECK(!scratch.is(rs));
         r2 = scratch;
         li(r2, rt);
         offset = shifted_branch_offset(L, false);
         beq(rs, r2, offset);
         break;
       case ne:
-        ASSERT(!scratch.is(rs));
+        DCHECK(!scratch.is(rs));
         r2 = scratch;
         li(r2, rt);
         offset = shifted_branch_offset(L, false);
@@ -2055,7 +2220,7 @@
           offset = shifted_branch_offset(L, false);
           bgtz(rs, offset);
         } else {
-          ASSERT(!scratch.is(rs));
+          DCHECK(!scratch.is(rs));
           r2 = scratch;
           li(r2, rt);
           slt(scratch, r2, rs);
@@ -2072,7 +2237,7 @@
           offset = shifted_branch_offset(L, false);
           beq(scratch, zero_reg, offset);
         } else {
-          ASSERT(!scratch.is(rs));
+          DCHECK(!scratch.is(rs));
           r2 = scratch;
           li(r2, rt);
           slt(scratch, rs, r2);
@@ -2089,7 +2254,7 @@
           offset = shifted_branch_offset(L, false);
           bne(scratch, zero_reg, offset);
         } else {
-          ASSERT(!scratch.is(rs));
+          DCHECK(!scratch.is(rs));
           r2 = scratch;
           li(r2, rt);
           slt(scratch, rs, r2);
@@ -2102,7 +2267,7 @@
           offset = shifted_branch_offset(L, false);
           blez(rs, offset);
         } else {
-          ASSERT(!scratch.is(rs));
+          DCHECK(!scratch.is(rs));
           r2 = scratch;
           li(r2, rt);
           slt(scratch, r2, rs);
@@ -2116,7 +2281,7 @@
           offset = shifted_branch_offset(L, false);
           bne(rs, zero_reg, offset);
         } else {
-          ASSERT(!scratch.is(rs));
+          DCHECK(!scratch.is(rs));
           r2 = scratch;
           li(r2, rt);
           sltu(scratch, r2, rs);
@@ -2133,7 +2298,7 @@
           offset = shifted_branch_offset(L, false);
           beq(scratch, zero_reg, offset);
         } else {
-          ASSERT(!scratch.is(rs));
+          DCHECK(!scratch.is(rs));
           r2 = scratch;
           li(r2, rt);
           sltu(scratch, rs, r2);
@@ -2150,7 +2315,7 @@
           offset = shifted_branch_offset(L, false);
           bne(scratch, zero_reg, offset);
         } else {
-          ASSERT(!scratch.is(rs));
+          DCHECK(!scratch.is(rs));
           r2 = scratch;
           li(r2, rt);
           sltu(scratch, rs, r2);
@@ -2163,7 +2328,7 @@
           offset = shifted_branch_offset(L, false);
           beq(rs, zero_reg, offset);
         } else {
-          ASSERT(!scratch.is(rs));
+          DCHECK(!scratch.is(rs));
           r2 = scratch;
           li(r2, rt);
           sltu(scratch, r2, rs);
@@ -2176,7 +2341,7 @@
     }
   }
   // Check that offset could actually hold on an int16_t.
-  ASSERT(is_int16(offset));
+  DCHECK(is_int16(offset));
   // Emit a nop in the branch delay slot if required.
   if (bdslot == PROTECT)
     nop();
@@ -2266,7 +2431,7 @@
     li(r2, rt);
   }
 
-  {
+  if (!IsMipsArchVariant(kMips32r6)) {
     BlockTrampolinePoolScope block_trampoline_pool(this);
     switch (cond) {
       case cc_always:
@@ -2330,7 +2495,88 @@
       default:
         UNREACHABLE();
     }
+  } else {
+    BlockTrampolinePoolScope block_trampoline_pool(this);
+    switch (cond) {
+      case cc_always:
+        bal(offset);
+        break;
+      case eq:
+        bne(rs, r2, 2);
+        nop();
+        bal(offset);
+        break;
+      case ne:
+        beq(rs, r2, 2);
+        nop();
+        bal(offset);
+        break;
+
+      // Signed comparison.
+      case greater:
+        // rs > rt
+        slt(scratch, r2, rs);
+        beq(scratch, zero_reg, 2);
+        nop();
+        bal(offset);
+        break;
+      case greater_equal:
+        // rs >= rt
+        slt(scratch, rs, r2);
+        bne(scratch, zero_reg, 2);
+        nop();
+        bal(offset);
+        break;
+      case less:
+        // rs < r2
+        slt(scratch, rs, r2);
+        bne(scratch, zero_reg, 2);
+        nop();
+        bal(offset);
+        break;
+      case less_equal:
+        // rs <= r2
+        slt(scratch, r2, rs);
+        bne(scratch, zero_reg, 2);
+        nop();
+        bal(offset);
+        break;
+
+
+      // Unsigned comparison.
+      case Ugreater:
+        // rs > rt
+        sltu(scratch, r2, rs);
+        beq(scratch, zero_reg, 2);
+        nop();
+        bal(offset);
+        break;
+      case Ugreater_equal:
+        // rs >= rt
+        sltu(scratch, rs, r2);
+        bne(scratch, zero_reg, 2);
+        nop();
+        bal(offset);
+        break;
+      case Uless:
+        // rs < r2
+        sltu(scratch, rs, r2);
+        bne(scratch, zero_reg, 2);
+        nop();
+        bal(offset);
+        break;
+      case Uless_equal:
+        // rs <= r2
+        sltu(scratch, r2, rs);
+        bne(scratch, zero_reg, 2);
+        nop();
+        bal(offset);
+        break;
+      default:
+        UNREACHABLE();
+    }
   }
+
   // Emit a nop in the branch delay slot if required.
   if (bdslot == PROTECT)
     nop();
@@ -2361,7 +2607,7 @@
     li(r2, rt);
   }
 
-  {
+  if (!IsMipsArchVariant(kMips32r6)) {
     BlockTrampolinePoolScope block_trampoline_pool(this);
     switch (cond) {
       case cc_always:
@@ -2436,9 +2682,102 @@
       default:
         UNREACHABLE();
     }
+  } else {
+    BlockTrampolinePoolScope block_trampoline_pool(this);
+    switch (cond) {
+      case cc_always:
+        offset = shifted_branch_offset(L, false);
+        bal(offset);
+        break;
+      case eq:
+        bne(rs, r2, 2);
+        nop();
+        offset = shifted_branch_offset(L, false);
+        bal(offset);
+        break;
+      case ne:
+        beq(rs, r2, 2);
+        nop();
+        offset = shifted_branch_offset(L, false);
+        bal(offset);
+        break;
+
+      // Signed comparison.
+      case greater:
+        // rs > rt
+        slt(scratch, r2, rs);
+        beq(scratch, zero_reg, 2);
+        nop();
+        offset = shifted_branch_offset(L, false);
+        bal(offset);
+        break;
+      case greater_equal:
+        // rs >= rt
+        slt(scratch, rs, r2);
+        bne(scratch, zero_reg, 2);
+        nop();
+        offset = shifted_branch_offset(L, false);
+        bal(offset);
+        break;
+      case less:
+        // rs < r2
+        slt(scratch, rs, r2);
+        bne(scratch, zero_reg, 2);
+        nop();
+        offset = shifted_branch_offset(L, false);
+        bal(offset);
+        break;
+      case less_equal:
+        // rs <= r2
+        slt(scratch, r2, rs);
+        bne(scratch, zero_reg, 2);
+        nop();
+        offset = shifted_branch_offset(L, false);
+        bal(offset);
+        break;
+
+
+      // Unsigned comparison.
+      case Ugreater:
+        // rs > rt
+        sltu(scratch, r2, rs);
+        beq(scratch, zero_reg, 2);
+        nop();
+        offset = shifted_branch_offset(L, false);
+        bal(offset);
+        break;
+      case Ugreater_equal:
+        // rs >= rt
+        sltu(scratch, rs, r2);
+        bne(scratch, zero_reg, 2);
+        nop();
+        offset = shifted_branch_offset(L, false);
+        bal(offset);
+        break;
+      case Uless:
+        // rs < r2
+        sltu(scratch, rs, r2);
+        bne(scratch, zero_reg, 2);
+        nop();
+        offset = shifted_branch_offset(L, false);
+        bal(offset);
+        break;
+      case Uless_equal:
+        // rs <= r2
+        sltu(scratch, r2, rs);
+        bne(scratch, zero_reg, 2);
+        nop();
+        offset = shifted_branch_offset(L, false);
+        bal(offset);
+        break;
+
+      default:
+        UNREACHABLE();
+    }
   }
+
   // Check that offset could actually hold on an int16_t.
-  ASSERT(is_int16(offset));
+  DCHECK(is_int16(offset));
 
   // Emit a nop in the branch delay slot if required.
   if (bdslot == PROTECT)
@@ -2489,7 +2828,7 @@
                           Register rs,
                           const Operand& rt,
                           BranchDelaySlot bd) {
-  ASSERT(!RelocInfo::IsCodeTarget(rmode));
+  DCHECK(!RelocInfo::IsCodeTarget(rmode));
   Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
 }
 
@@ -2500,7 +2839,7 @@
                           Register rs,
                           const Operand& rt,
                           BranchDelaySlot bd) {
-  ASSERT(RelocInfo::IsCodeTarget(rmode));
+  DCHECK(RelocInfo::IsCodeTarget(rmode));
   AllowDeferredHandleDereference embedding_raw_address;
   Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd);
 }
@@ -2546,7 +2885,7 @@
   if (bd == PROTECT)
     nop();
 
-  ASSERT_EQ(CallSize(target, cond, rs, rt, bd),
+  DCHECK_EQ(CallSize(target, cond, rs, rt, bd),
             SizeOfCodeGeneratedSince(&start));
 }
 
@@ -2577,7 +2916,7 @@
   positions_recorder()->WriteRecordedPositions();
   li(t9, Operand(target_int, rmode), CONSTANT_SIZE);
   Call(t9, cond, rs, rt, bd);
-  ASSERT_EQ(CallSize(target, rmode, cond, rs, rt, bd),
+  DCHECK_EQ(CallSize(target, rmode, cond, rs, rt, bd),
             SizeOfCodeGeneratedSince(&start));
 }
 
@@ -2605,14 +2944,14 @@
   BlockTrampolinePoolScope block_trampoline_pool(this);
   Label start;
   bind(&start);
-  ASSERT(RelocInfo::IsCodeTarget(rmode));
+  DCHECK(RelocInfo::IsCodeTarget(rmode));
   if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
     SetRecordedAstId(ast_id);
     rmode = RelocInfo::CODE_TARGET_WITH_ID;
   }
   AllowDeferredHandleDereference embedding_raw_address;
   Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
-  ASSERT_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd),
+  DCHECK_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd),
             SizeOfCodeGeneratedSince(&start));
 }
 
@@ -2760,7 +3099,7 @@
   PrepareCEntryArgs(0);
   PrepareCEntryFunction(ExternalReference(Runtime::kDebugBreak, isolate()));
   CEntryStub ces(isolate(), 1);
-  ASSERT(AllowThisStubCall(&ces));
+  DCHECK(AllowThisStubCall(&ces));
   Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
 }
 
@@ -2790,7 +3129,7 @@
 
   // Push the frame pointer, context, state, and code object.
   if (kind == StackHandler::JS_ENTRY) {
-    ASSERT_EQ(Smi::FromInt(0), 0);
+    DCHECK_EQ(Smi::FromInt(0), 0);
     // The second zero_reg indicates no context.
     // The first zero_reg is the NULL frame pointer.
     // The operands are reversed to match the order of MultiPush/Pop.
@@ -2918,7 +3257,7 @@
                               Register scratch2,
                               Label* gc_required,
                               AllocationFlags flags) {
-  ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
+  DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
   if (!FLAG_inline_new) {
     if (emit_debug_code()) {
       // Trash the registers to simulate an allocation failure.
@@ -2930,18 +3269,18 @@
     return;
   }
 
-  ASSERT(!result.is(scratch1));
-  ASSERT(!result.is(scratch2));
-  ASSERT(!scratch1.is(scratch2));
-  ASSERT(!scratch1.is(t9));
-  ASSERT(!scratch2.is(t9));
-  ASSERT(!result.is(t9));
+  DCHECK(!result.is(scratch1));
+  DCHECK(!result.is(scratch2));
+  DCHECK(!scratch1.is(scratch2));
+  DCHECK(!scratch1.is(t9));
+  DCHECK(!scratch2.is(t9));
+  DCHECK(!result.is(t9));
 
   // Make object size into bytes.
   if ((flags & SIZE_IN_WORDS) != 0) {
     object_size *= kPointerSize;
   }
-  ASSERT_EQ(0, object_size & kObjectAlignmentMask);
+  DCHECK_EQ(0, object_size & kObjectAlignmentMask);
 
   // Check relative positions of allocation top and limit addresses.
   // ARM adds additional checks to make sure the ldm instruction can be
@@ -2955,7 +3294,7 @@
       reinterpret_cast<intptr_t>(allocation_top.address());
   intptr_t limit =
       reinterpret_cast<intptr_t>(allocation_limit.address());
-  ASSERT((limit - top) == kPointerSize);
+  DCHECK((limit - top) == kPointerSize);
 
   // Set up allocation top address and object size registers.
   Register topaddr = scratch1;
@@ -2981,8 +3320,8 @@
   if ((flags & DOUBLE_ALIGNMENT) != 0) {
     // Align the next allocation. Storing the filler map without checking top is
     // safe in new-space because the limit of the heap is aligned there.
-    ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
-    ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
+    DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
+    DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
     And(scratch2, result, Operand(kDoubleAlignmentMask));
     Label aligned;
     Branch(&aligned, eq, scratch2, Operand(zero_reg));
@@ -3025,11 +3364,11 @@
     return;
   }
 
-  ASSERT(!result.is(scratch1));
-  ASSERT(!result.is(scratch2));
-  ASSERT(!scratch1.is(scratch2));
-  ASSERT(!object_size.is(t9));
-  ASSERT(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9));
+  DCHECK(!result.is(scratch1));
+  DCHECK(!result.is(scratch2));
+  DCHECK(!scratch1.is(scratch2));
+  DCHECK(!object_size.is(t9));
+  DCHECK(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9));
 
   // Check relative positions of allocation top and limit addresses.
   // ARM adds additional checks to make sure the ldm instruction can be
@@ -3042,7 +3381,7 @@
       reinterpret_cast<intptr_t>(allocation_top.address());
   intptr_t limit =
       reinterpret_cast<intptr_t>(allocation_limit.address());
-  ASSERT((limit - top) == kPointerSize);
+  DCHECK((limit - top) == kPointerSize);
 
   // Set up allocation top address and object size registers.
   Register topaddr = scratch1;
@@ -3068,8 +3407,8 @@
   if ((flags & DOUBLE_ALIGNMENT) != 0) {
     // Align the next allocation. Storing the filler map without checking top is
     // safe in new-space because the limit of the heap is aligned there.
-    ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
-    ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
+    DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
+    DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
     And(scratch2, result, Operand(kDoubleAlignmentMask));
     Label aligned;
     Branch(&aligned, eq, scratch2, Operand(zero_reg));
@@ -3135,7 +3474,7 @@
                                            Label* gc_required) {
   // Calculate the number of bytes needed for the characters in the string while
   // observing object alignment.
-  ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+  DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
   sll(scratch1, length, 1);  // Length in bytes, not chars.
   addiu(scratch1, scratch1,
        kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
@@ -3158,20 +3497,18 @@
 }
 
 
-void MacroAssembler::AllocateAsciiString(Register result,
-                                         Register length,
-                                         Register scratch1,
-                                         Register scratch2,
-                                         Register scratch3,
-                                         Label* gc_required) {
+void MacroAssembler::AllocateOneByteString(Register result, Register length,
+                                           Register scratch1, Register scratch2,
+                                           Register scratch3,
+                                           Label* gc_required) {
   // Calculate the number of bytes needed for the characters in the string
   // while observing object alignment.
-  ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
-  ASSERT(kCharSize == 1);
+  DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+  DCHECK(kCharSize == 1);
   addiu(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
   And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
 
-  // Allocate ASCII string in new space.
+  // Allocate one-byte string in new space.
   Allocate(scratch1,
            result,
            scratch2,
@@ -3180,11 +3517,8 @@
            TAG_OBJECT);
 
   // Set the map, length and hash field.
-  InitializeNewString(result,
-                      length,
-                      Heap::kAsciiStringMapRootIndex,
-                      scratch1,
-                      scratch2);
+  InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
+                      scratch1, scratch2);
 }
 
 
@@ -3203,11 +3537,10 @@
 }
 
 
-void MacroAssembler::AllocateAsciiConsString(Register result,
-                                             Register length,
-                                             Register scratch1,
-                                             Register scratch2,
-                                             Label* gc_required) {
+void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
+                                               Register scratch1,
+                                               Register scratch2,
+                                               Label* gc_required) {
   Allocate(ConsString::kSize,
            result,
            scratch1,
@@ -3215,11 +3548,8 @@
            gc_required,
            TAG_OBJECT);
 
-  InitializeNewString(result,
-                      length,
-                      Heap::kConsAsciiStringMapRootIndex,
-                      scratch1,
-                      scratch2);
+  InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
+                      scratch1, scratch2);
 }
 
 
@@ -3239,24 +3569,21 @@
 }
 
 
-void MacroAssembler::AllocateAsciiSlicedString(Register result,
-                                               Register length,
-                                               Register scratch1,
-                                               Register scratch2,
-                                               Label* gc_required) {
+void MacroAssembler::AllocateOneByteSlicedString(Register result,
+                                                 Register length,
+                                                 Register scratch1,
+                                                 Register scratch2,
+                                                 Label* gc_required) {
   Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
            TAG_OBJECT);
 
-  InitializeNewString(result,
-                      length,
-                      Heap::kSlicedAsciiStringMapRootIndex,
-                      scratch1,
-                      scratch2);
+  InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
+                      scratch1, scratch2);
 }
 
 
-void MacroAssembler::JumpIfNotUniqueName(Register reg,
-                                         Label* not_unique_name) {
+void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
+                                                     Label* not_unique_name) {
   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
   Label succeed;
   And(at, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
@@ -3274,14 +3601,19 @@
                                         Register scratch2,
                                         Register heap_number_map,
                                         Label* need_gc,
-                                        TaggingMode tagging_mode) {
+                                        TaggingMode tagging_mode,
+                                        MutableMode mode) {
   // Allocate an object in the heap for the heap number and tag it as a heap
   // object.
   Allocate(HeapNumber::kSize, result, scratch1, scratch2, need_gc,
            tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
 
+  Heap::RootListIndex map_index = mode == MUTABLE
+      ? Heap::kMutableHeapNumberMapRootIndex
+      : Heap::kHeapNumberMapRootIndex;
+  AssertIsRoot(heap_number_map, map_index);
+
   // Store heap number map in the allocated object.
-  AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
   if (tagging_mode == TAG_RESULT) {
     sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
   } else {
@@ -3306,8 +3638,8 @@
                                 Register src,
                                 RegList temps,
                                 int field_count) {
-  ASSERT((temps & dst.bit()) == 0);
-  ASSERT((temps & src.bit()) == 0);
+  DCHECK((temps & dst.bit()) == 0);
+  DCHECK((temps & src.bit()) == 0);
   // Primitive implementation using only one temporary register.
 
   Register tmp = no_reg;
@@ -3318,7 +3650,7 @@
       break;
     }
   }
-  ASSERT(!tmp.is(no_reg));
+  DCHECK(!tmp.is(no_reg));
 
   for (int i = 0; i < field_count; i++) {
     lw(tmp, FieldMemOperand(src, i * kPointerSize));
@@ -3637,7 +3969,7 @@
                                           DoubleRegister src2) {
   if (!IsMipsSoftFloatABI) {
     if (src2.is(f12)) {
-      ASSERT(!src1.is(f14));
+      DCHECK(!src1.is(f14));
       Move(f14, src2);
       Move(f12, src1);
     } else {
@@ -3680,12 +4012,12 @@
   // The code below is made a lot easier because the calling code already sets
   // up actual and expected registers according to the contract if values are
   // passed in registers.
-  ASSERT(actual.is_immediate() || actual.reg().is(a0));
-  ASSERT(expected.is_immediate() || expected.reg().is(a2));
-  ASSERT((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3));
+  DCHECK(actual.is_immediate() || actual.reg().is(a0));
+  DCHECK(expected.is_immediate() || expected.reg().is(a2));
+  DCHECK((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3));
 
   if (expected.is_immediate()) {
-    ASSERT(actual.is_immediate());
+    DCHECK(actual.is_immediate());
     if (expected.immediate() == actual.immediate()) {
       definitely_matches = true;
     } else {
@@ -3738,7 +4070,7 @@
                                 InvokeFlag flag,
                                 const CallWrapper& call_wrapper) {
   // You can't call a function without a valid frame.
-  ASSERT(flag == JUMP_FUNCTION || has_frame());
+  DCHECK(flag == JUMP_FUNCTION || has_frame());
 
   Label done;
 
@@ -3752,7 +4084,7 @@
       Call(code);
       call_wrapper.AfterCall();
     } else {
-      ASSERT(flag == JUMP_FUNCTION);
+      DCHECK(flag == JUMP_FUNCTION);
       Jump(code);
     }
     // Continue here if InvokePrologue does handle the invocation due to
@@ -3767,10 +4099,10 @@
                                     InvokeFlag flag,
                                     const CallWrapper& call_wrapper) {
   // You can't call a function without a valid frame.
-  ASSERT(flag == JUMP_FUNCTION || has_frame());
+  DCHECK(flag == JUMP_FUNCTION || has_frame());
 
   // Contract with called JS functions requires that function is passed in a1.
-  ASSERT(function.is(a1));
+  DCHECK(function.is(a1));
   Register expected_reg = a2;
   Register code_reg = a3;
 
@@ -3793,10 +4125,10 @@
                                     InvokeFlag flag,
                                     const CallWrapper& call_wrapper) {
   // You can't call a function without a valid frame.
-  ASSERT(flag == JUMP_FUNCTION || has_frame());
+  DCHECK(flag == JUMP_FUNCTION || has_frame());
 
   // Contract with called JS functions requires that function is passed in a1.
-  ASSERT(function.is(a1));
+  DCHECK(function.is(a1));
 
   // Get the function and setup the context.
   lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
@@ -3840,7 +4172,7 @@
 void MacroAssembler::IsObjectJSStringType(Register object,
                                           Register scratch,
                                           Label* fail) {
-  ASSERT(kNotStringTag != 0);
+  DCHECK(kNotStringTag != 0);
 
   lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
   lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
@@ -3867,14 +4199,15 @@
                                              Register scratch,
                                              Label* miss,
                                              bool miss_on_bound_function) {
-  // Check that the receiver isn't a smi.
-  JumpIfSmi(function, miss);
-
-  // Check that the function really is a function.  Load map into result reg.
-  GetObjectType(function, result, scratch);
-  Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE));
-
+  Label non_instance;
   if (miss_on_bound_function) {
+    // Check that the receiver isn't a smi.
+    JumpIfSmi(function, miss);
+
+    // Check that the function really is a function.  Load map into result reg.
+    GetObjectType(function, result, scratch);
+    Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE));
+
     lw(scratch,
        FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
     lw(scratch,
@@ -3882,13 +4215,12 @@
     And(scratch, scratch,
         Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction)));
     Branch(miss, ne, scratch, Operand(zero_reg));
-  }
 
-  // Make sure that the function has an instance prototype.
-  Label non_instance;
-  lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
-  And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
-  Branch(&non_instance, ne, scratch, Operand(zero_reg));
+    // Make sure that the function has an instance prototype.
+    lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
+    And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
+    Branch(&non_instance, ne, scratch, Operand(zero_reg));
+  }
 
   // Get the prototype or initial map from the function.
   lw(result,
@@ -3907,12 +4239,15 @@
 
   // Get the prototype from the initial map.
   lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
-  jmp(&done);
 
-  // Non-instance prototype: Fetch prototype from constructor field
-  // in initial map.
-  bind(&non_instance);
-  lw(result, FieldMemOperand(result, Map::kConstructorOffset));
+  if (miss_on_bound_function) {
+    jmp(&done);
+
+    // Non-instance prototype: Fetch prototype from constructor field
+    // in initial map.
+    bind(&non_instance);
+    lw(result, FieldMemOperand(result, Map::kConstructorOffset));
+  }
 
   // All done.
   bind(&done);
@@ -3936,7 +4271,7 @@
                               Register r1,
                               const Operand& r2,
                               BranchDelaySlot bd) {
-  ASSERT(AllowThisStubCall(stub));  // Stub calls are not allowed in some stubs.
+  DCHECK(AllowThisStubCall(stub));  // Stub calls are not allowed in some stubs.
   Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id,
        cond, r1, r2, bd);
 }
@@ -3972,7 +4307,7 @@
       ExternalReference::handle_scope_level_address(isolate()),
       next_address);
 
-  ASSERT(function_address.is(a1) || function_address.is(a2));
+  DCHECK(function_address.is(a1) || function_address.is(a2));
 
   Label profiler_disabled;
   Label end_profiler_check;
@@ -4061,7 +4396,7 @@
   {
     FrameScope frame(this, StackFrame::INTERNAL);
     CallExternalReference(
-        ExternalReference(Runtime::kHiddenPromoteScheduledException, isolate()),
+        ExternalReference(Runtime::kPromoteScheduledException, isolate()),
         0);
   }
   jmp(&exception_handled);
@@ -4090,7 +4425,7 @@
   // that the constants for the maximum number of digits for an array index
   // cached in the hash field and the number of bits reserved for it does not
   // conflict.
-  ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
+  DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
          (1 << String::kArrayIndexValueBits));
   DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash);
 }
@@ -4147,18 +4482,18 @@
                                              Register right,
                                              Register overflow_dst,
                                              Register scratch) {
-  ASSERT(!dst.is(overflow_dst));
-  ASSERT(!dst.is(scratch));
-  ASSERT(!overflow_dst.is(scratch));
-  ASSERT(!overflow_dst.is(left));
-  ASSERT(!overflow_dst.is(right));
+  DCHECK(!dst.is(overflow_dst));
+  DCHECK(!dst.is(scratch));
+  DCHECK(!overflow_dst.is(scratch));
+  DCHECK(!overflow_dst.is(left));
+  DCHECK(!overflow_dst.is(right));
 
   if (left.is(right) && dst.is(left)) {
-    ASSERT(!dst.is(t9));
-    ASSERT(!scratch.is(t9));
-    ASSERT(!left.is(t9));
-    ASSERT(!right.is(t9));
-    ASSERT(!overflow_dst.is(t9));
+    DCHECK(!dst.is(t9));
+    DCHECK(!scratch.is(t9));
+    DCHECK(!left.is(t9));
+    DCHECK(!right.is(t9));
+    DCHECK(!overflow_dst.is(t9));
     mov(t9, right);
     right = t9;
   }
@@ -4189,13 +4524,13 @@
                                              Register right,
                                              Register overflow_dst,
                                              Register scratch) {
-  ASSERT(!dst.is(overflow_dst));
-  ASSERT(!dst.is(scratch));
-  ASSERT(!overflow_dst.is(scratch));
-  ASSERT(!overflow_dst.is(left));
-  ASSERT(!overflow_dst.is(right));
-  ASSERT(!scratch.is(left));
-  ASSERT(!scratch.is(right));
+  DCHECK(!dst.is(overflow_dst));
+  DCHECK(!dst.is(scratch));
+  DCHECK(!overflow_dst.is(scratch));
+  DCHECK(!overflow_dst.is(left));
+  DCHECK(!overflow_dst.is(right));
+  DCHECK(!scratch.is(left));
+  DCHECK(!scratch.is(right));
 
   // This happens with some crankshaft code. Since Subu works fine if
   // left == right, let's not make that restriction here.
@@ -4296,7 +4631,7 @@
                                    InvokeFlag flag,
                                    const CallWrapper& call_wrapper) {
   // You can't call a builtin without a valid frame.
-  ASSERT(flag == JUMP_FUNCTION || has_frame());
+  DCHECK(flag == JUMP_FUNCTION || has_frame());
 
   GetBuiltinEntry(t9, id);
   if (flag == CALL_FUNCTION) {
@@ -4304,7 +4639,7 @@
     Call(t9);
     call_wrapper.AfterCall();
   } else {
-    ASSERT(flag == JUMP_FUNCTION);
+    DCHECK(flag == JUMP_FUNCTION);
     Jump(t9);
   }
 }
@@ -4322,7 +4657,7 @@
 
 
 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
-  ASSERT(!target.is(a1));
+  DCHECK(!target.is(a1));
   GetBuiltinFunction(a1, id);
   // Load the code entry point from the builtins object.
   lw(target, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
@@ -4341,7 +4676,7 @@
 
 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
                                       Register scratch1, Register scratch2) {
-  ASSERT(value > 0);
+  DCHECK(value > 0);
   if (FLAG_native_code_counters && counter->Enabled()) {
     li(scratch2, Operand(ExternalReference(counter)));
     lw(scratch1, MemOperand(scratch2));
@@ -4353,7 +4688,7 @@
 
 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
                                       Register scratch1, Register scratch2) {
-  ASSERT(value > 0);
+  DCHECK(value > 0);
   if (FLAG_native_code_counters && counter->Enabled()) {
     li(scratch2, Operand(ExternalReference(counter)));
     lw(scratch1, MemOperand(scratch2));
@@ -4375,7 +4710,7 @@
 
 void MacroAssembler::AssertFastElements(Register elements) {
   if (emit_debug_code()) {
-    ASSERT(!elements.is(at));
+    DCHECK(!elements.is(at));
     Label ok;
     push(elements);
     lw(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
@@ -4438,7 +4773,7 @@
     // generated instructions is 10, so we use this as a maximum value.
     static const int kExpectedAbortInstructions = 10;
     int abort_instructions = InstructionsGeneratedSince(&abort_start);
-    ASSERT(abort_instructions <= kExpectedAbortInstructions);
+    DCHECK(abort_instructions <= kExpectedAbortInstructions);
     while (abort_instructions++ < kExpectedAbortInstructions) {
       nop();
     }
@@ -4614,9 +4949,9 @@
   const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
   if (save_doubles) {
     // The stack  must be allign to 0 modulo 8 for stores with sdc1.
-    ASSERT(kDoubleSize == frame_alignment);
+    DCHECK(kDoubleSize == frame_alignment);
     if (frame_alignment > 0) {
-      ASSERT(IsPowerOf2(frame_alignment));
+      DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
       And(sp, sp, Operand(-frame_alignment));  // Align stack.
     }
     int space = FPURegister::kMaxNumRegisters * kDoubleSize;
@@ -4631,10 +4966,10 @@
   // Reserve place for the return address, stack space and an optional slot
   // (used by the DirectCEntryStub to hold the return value if a struct is
   // returned) and align the frame preparing for calling the runtime function.
-  ASSERT(stack_space >= 0);
+  DCHECK(stack_space >= 0);
   Subu(sp, sp, Operand((stack_space + 2) * kPointerSize));
   if (frame_alignment > 0) {
-    ASSERT(IsPowerOf2(frame_alignment));
+    DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
     And(sp, sp, Operand(-frame_alignment));  // Align stack.
   }
 
@@ -4711,7 +5046,7 @@
   // environment.
   // Note: This will break if we ever start generating snapshots on one Mips
   // platform for another Mips platform with a different alignment.
-  return OS::ActivationFrameAlignment();
+  return base::OS::ActivationFrameAlignment();
 #else  // V8_HOST_ARCH_MIPS
   // If we are using the simulator then we should always align to the expected
   // alignment. As the simulator is used to generate snapshots we do not know
@@ -4729,7 +5064,7 @@
 
       if (frame_alignment > kPointerSize) {
         Label alignment_as_expected;
-        ASSERT(IsPowerOf2(frame_alignment));
+        DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
         andi(at, sp, frame_alignment_mask);
         Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
         // Don't use Check here, as it will call Runtime_Abort re-entering here.
@@ -4753,7 +5088,7 @@
 
 
 void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) {
-  ASSERT(!reg.is(overflow));
+  DCHECK(!reg.is(overflow));
   mov(overflow, reg);  // Save original value.
   SmiTag(reg);
   xor_(overflow, overflow, reg);  // Overflow if (value ^ 2 * value) < 0.
@@ -4767,9 +5102,9 @@
     // Fall back to slower case.
     SmiTagCheckOverflow(dst, overflow);
   } else {
-    ASSERT(!dst.is(src));
-    ASSERT(!dst.is(overflow));
-    ASSERT(!src.is(overflow));
+    DCHECK(!dst.is(src));
+    DCHECK(!dst.is(overflow));
+    DCHECK(!src.is(overflow));
     SmiTag(dst, src);
     xor_(overflow, dst, src);  // Overflow if (value ^ 2 * value) < 0.
   }
@@ -4795,7 +5130,7 @@
                                Label* smi_label,
                                Register scratch,
                                BranchDelaySlot bd) {
-  ASSERT_EQ(0, kSmiTag);
+  DCHECK_EQ(0, kSmiTag);
   andi(scratch, value, kSmiTagMask);
   Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
 }
@@ -4804,7 +5139,7 @@
                                   Label* not_smi_label,
                                   Register scratch,
                                   BranchDelaySlot bd) {
-  ASSERT_EQ(0, kSmiTag);
+  DCHECK_EQ(0, kSmiTag);
   andi(scratch, value, kSmiTagMask);
   Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg));
 }
@@ -4814,7 +5149,7 @@
                                       Register reg2,
                                       Label* on_not_both_smi) {
   STATIC_ASSERT(kSmiTag == 0);
-  ASSERT_EQ(1, kSmiTagMask);
+  DCHECK_EQ(1, kSmiTagMask);
   or_(at, reg1, reg2);
   JumpIfNotSmi(at, on_not_both_smi);
 }
@@ -4824,7 +5159,7 @@
                                      Register reg2,
                                      Label* on_either_smi) {
   STATIC_ASSERT(kSmiTag == 0);
-  ASSERT_EQ(1, kSmiTagMask);
+  DCHECK_EQ(1, kSmiTagMask);
   // Both Smi tags must be 1 (not Smi).
   and_(at, reg1, reg2);
   JumpIfSmi(at, on_either_smi);
@@ -4896,7 +5231,7 @@
 
 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
   if (emit_debug_code()) {
-    ASSERT(!reg.is(at));
+    DCHECK(!reg.is(at));
     LoadRoot(at, index);
     Check(eq, kHeapNumberMapRegisterClobbered, reg, Operand(at));
   }
@@ -4993,71 +5328,59 @@
 }
 
 
-void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
-    Register first,
-    Register second,
-    Register scratch1,
-    Register scratch2,
+void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
+    Register first, Register second, Register scratch1, Register scratch2,
     Label* failure) {
-  // Test that both first and second are sequential ASCII strings.
+  // Test that both first and second are sequential one-byte strings.
   // Assume that they are non-smis.
   lw(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
   lw(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
   lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
   lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
 
-  JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1,
-                                               scratch2,
-                                               scratch1,
-                                               scratch2,
-                                               failure);
+  JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1,
+                                                 scratch2, failure);
 }
 
 
-void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
-                                                         Register second,
-                                                         Register scratch1,
-                                                         Register scratch2,
-                                                         Label* failure) {
+void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first,
+                                                           Register second,
+                                                           Register scratch1,
+                                                           Register scratch2,
+                                                           Label* failure) {
   // Check that neither is a smi.
   STATIC_ASSERT(kSmiTag == 0);
   And(scratch1, first, Operand(second));
   JumpIfSmi(scratch1, failure);
-  JumpIfNonSmisNotBothSequentialAsciiStrings(first,
-                                             second,
-                                             scratch1,
-                                             scratch2,
-                                             failure);
+  JumpIfNonSmisNotBothSequentialOneByteStrings(first, second, scratch1,
+                                               scratch2, failure);
 }
 
 
-void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
-    Register first,
-    Register second,
-    Register scratch1,
-    Register scratch2,
+void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
+    Register first, Register second, Register scratch1, Register scratch2,
     Label* failure) {
-  const int kFlatAsciiStringMask =
+  const int kFlatOneByteStringMask =
       kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
-  const int kFlatAsciiStringTag =
+  const int kFlatOneByteStringTag =
       kStringTag | kOneByteStringTag | kSeqStringTag;
-  ASSERT(kFlatAsciiStringTag <= 0xffff);  // Ensure this fits 16-bit immed.
-  andi(scratch1, first, kFlatAsciiStringMask);
-  Branch(failure, ne, scratch1, Operand(kFlatAsciiStringTag));
-  andi(scratch2, second, kFlatAsciiStringMask);
-  Branch(failure, ne, scratch2, Operand(kFlatAsciiStringTag));
+  DCHECK(kFlatOneByteStringTag <= 0xffff);  // Ensure this fits 16-bit immed.
+  andi(scratch1, first, kFlatOneByteStringMask);
+  Branch(failure, ne, scratch1, Operand(kFlatOneByteStringTag));
+  andi(scratch2, second, kFlatOneByteStringMask);
+  Branch(failure, ne, scratch2, Operand(kFlatOneByteStringTag));
 }
 
 
-void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
-                                                            Register scratch,
-                                                            Label* failure) {
-  const int kFlatAsciiStringMask =
+void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
+                                                              Register scratch,
+                                                              Label* failure) {
+  const int kFlatOneByteStringMask =
       kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
-  const int kFlatAsciiStringTag =
+  const int kFlatOneByteStringTag =
       kStringTag | kOneByteStringTag | kSeqStringTag;
-  And(scratch, type, Operand(kFlatAsciiStringMask));
-  Branch(failure, ne, scratch, Operand(kFlatAsciiStringTag));
+  And(scratch, type, Operand(kFlatOneByteStringMask));
+  Branch(failure, ne, scratch, Operand(kFlatOneByteStringTag));
 }
 
 
@@ -5106,7 +5429,7 @@
   lw(at, FieldMemOperand(string, String::kLengthOffset));
   Check(lt, kIndexIsTooLarge, index, Operand(at));
 
-  ASSERT(Smi::FromInt(0) == 0);
+  DCHECK(Smi::FromInt(0) == 0);
   Check(ge, kIndexIsNegative, index, Operand(zero_reg));
 
   SmiUntag(index, index);
@@ -5130,7 +5453,7 @@
     // and the original value of sp.
     mov(scratch, sp);
     Subu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
-    ASSERT(IsPowerOf2(frame_alignment));
+    DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
     And(sp, sp, Operand(-frame_alignment));
     sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
   } else {
@@ -5175,7 +5498,7 @@
 void MacroAssembler::CallCFunctionHelper(Register function,
                                          int num_reg_arguments,
                                          int num_double_arguments) {
-  ASSERT(has_frame());
+  DCHECK(has_frame());
   // Make sure that the stack is aligned before calling a C function unless
   // running in the simulator. The simulator has its own alignment check which
   // provides more information.
@@ -5184,10 +5507,10 @@
 
 #if V8_HOST_ARCH_MIPS
   if (emit_debug_code()) {
-    int frame_alignment = OS::ActivationFrameAlignment();
+    int frame_alignment = base::OS::ActivationFrameAlignment();
     int frame_alignment_mask = frame_alignment - 1;
     if (frame_alignment > kPointerSize) {
-      ASSERT(IsPowerOf2(frame_alignment));
+      DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
       Label alignment_as_expected;
       And(at, sp, Operand(frame_alignment_mask));
       Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
@@ -5213,7 +5536,7 @@
   int stack_passed_arguments = CalculateStackPassedWords(
       num_reg_arguments, num_double_arguments);
 
-  if (OS::ActivationFrameAlignment() > kPointerSize) {
+  if (base::OS::ActivationFrameAlignment() > kPointerSize) {
     lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
   } else {
     Addu(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
@@ -5313,7 +5636,7 @@
                                  Register scratch1,
                                  Label* on_black) {
   HasColor(object, scratch0, scratch1, on_black, 1, 0);  // kBlackBitPattern.
-  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+  DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
 }
 
 
@@ -5323,8 +5646,8 @@
                               Label* has_color,
                               int first_bit,
                               int second_bit) {
-  ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, t8));
-  ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, t9));
+  DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t8));
+  DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t9));
 
   GetMarkBits(object, bitmap_scratch, mask_scratch);
 
@@ -5353,13 +5676,13 @@
 void MacroAssembler::JumpIfDataObject(Register value,
                                       Register scratch,
                                       Label* not_data_object) {
-  ASSERT(!AreAliased(value, scratch, t8, no_reg));
+  DCHECK(!AreAliased(value, scratch, t8, no_reg));
   Label is_data_object;
   lw(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
   LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
   Branch(&is_data_object, eq, t8, Operand(scratch));
-  ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
-  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+  DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+  DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
   // If it's a string and it's not a cons string then it's an object containing
   // no GC pointers.
   lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
@@ -5372,7 +5695,7 @@
 void MacroAssembler::GetMarkBits(Register addr_reg,
                                  Register bitmap_reg,
                                  Register mask_reg) {
-  ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
+  DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
   And(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
   Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
   const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
@@ -5390,14 +5713,14 @@
     Register mask_scratch,
     Register load_scratch,
     Label* value_is_white_and_not_data) {
-  ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, t8));
+  DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, t8));
   GetMarkBits(value, bitmap_scratch, mask_scratch);
 
   // If the value is black or grey we don't need to do anything.
-  ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
-  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
-  ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
-  ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+  DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+  DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
+  DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
+  DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
 
   Label done;
 
@@ -5436,8 +5759,8 @@
   }
 
   // Check for strings.
-  ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
-  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+  DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+  DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
   // If it's a string and it's not a cons string then it's an object containing
   // no GC pointers.
   Register instance_type = load_scratch;
@@ -5449,8 +5772,8 @@
   // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
   // External strings are the only ones with the kExternalStringTag bit
   // set.
-  ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
-  ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
+  DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
+  DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
   And(t8, instance_type, Operand(kExternalStringTag));
   {
     Label skip;
@@ -5460,12 +5783,12 @@
     bind(&skip);
   }
 
-  // Sequential string, either ASCII or UC16.
-  // For ASCII (char-size of 1) we shift the smi tag away to get the length.
+  // Sequential string, either Latin1 or UC16.
+  // For Latin1 (char-size of 1) we shift the smi tag away to get the length.
   // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
   // getting the length multiplied by 2.
-  ASSERT(kOneByteStringTag == 4 && kStringEncodingMask == 4);
-  ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+  DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4);
+  DCHECK(kSmiTag == 0 && kSmiTagSize == 1);
   lw(t9, FieldMemOperand(value, String::kLengthOffset));
   And(t8, instance_type, Operand(kStringEncodingMask));
   {
@@ -5555,7 +5878,7 @@
 
 
 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
-  ASSERT(!output_reg.is(input_reg));
+  DCHECK(!output_reg.is(input_reg));
   Label done;
   li(output_reg, Operand(255));
   // Normal branch: nop in delay slot.
@@ -5650,7 +5973,7 @@
     Register scratch0,
     Register scratch1,
     Label* found) {
-  ASSERT(!scratch1.is(scratch0));
+  DCHECK(!scratch1.is(scratch0));
   Factory* factory = isolate()->factory();
   Register current = scratch0;
   Label loop_again;
@@ -5669,14 +5992,30 @@
 }
 
 
-bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
-  if (r1.is(r2)) return true;
-  if (r1.is(r3)) return true;
-  if (r1.is(r4)) return true;
-  if (r2.is(r3)) return true;
-  if (r2.is(r4)) return true;
-  if (r3.is(r4)) return true;
-  return false;
+bool AreAliased(Register reg1,
+                Register reg2,
+                Register reg3,
+                Register reg4,
+                Register reg5,
+                Register reg6,
+                Register reg7,
+                Register reg8) {
+  int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
+      reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
+      reg7.is_valid() + reg8.is_valid();
+
+  RegList regs = 0;
+  if (reg1.is_valid()) regs |= reg1.bit();
+  if (reg2.is_valid()) regs |= reg2.bit();
+  if (reg3.is_valid()) regs |= reg3.bit();
+  if (reg4.is_valid()) regs |= reg4.bit();
+  if (reg5.is_valid()) regs |= reg5.bit();
+  if (reg6.is_valid()) regs |= reg6.bit();
+  if (reg7.is_valid()) regs |= reg7.bit();
+  if (reg8.is_valid()) regs |= reg8.bit();
+  int n_of_non_aliasing_regs = NumRegs(regs);
+
+  return n_of_valid_regs != n_of_non_aliasing_regs;
 }
 
 
@@ -5690,19 +6029,19 @@
   // Create a new macro assembler pointing to the address of the code to patch.
   // The size is adjusted with kGap on order for the assembler to generate size
   // bytes of instructions without failing with buffer size constraints.
-  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+  DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
 }
 
 
 CodePatcher::~CodePatcher() {
   // Indicate that code has changed.
   if (flush_cache_ == FLUSH) {
-    CPU::FlushICache(address_, size_);
+    CpuFeatures::FlushICache(address_, size_);
   }
 
   // Check that the code was patched as expected.
-  ASSERT(masm_.pc_ == address_ + size_);
-  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+  DCHECK(masm_.pc_ == address_ + size_);
+  DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
 }
 
 
@@ -5718,13 +6057,13 @@
 
 void CodePatcher::ChangeBranchCondition(Condition cond) {
   Instr instr = Assembler::instr_at(masm_.pc_);
-  ASSERT(Assembler::IsBranch(instr));
+  DCHECK(Assembler::IsBranch(instr));
   uint32_t opcode = Assembler::GetOpcodeField(instr);
   // Currently only the 'eq' and 'ne' cond values are supported and the simple
   // branch instructions (with opcode being the branch type).
   // There are some special cases (see Assembler::IsBranch()) so extending this
   // would be tricky.
-  ASSERT(opcode == BEQ ||
+  DCHECK(opcode == BEQ ||
          opcode == BNE ||
         opcode == BLEZ ||
         opcode == BGTZ ||
@@ -5741,20 +6080,21 @@
 void MacroAssembler::TruncatingDiv(Register result,
                                    Register dividend,
                                    int32_t divisor) {
-  ASSERT(!dividend.is(result));
-  ASSERT(!dividend.is(at));
-  ASSERT(!result.is(at));
-  MultiplierAndShift ms(divisor);
-  li(at, Operand(ms.multiplier()));
-  Mult(dividend, Operand(at));
-  mfhi(result);
-  if (divisor > 0 && ms.multiplier() < 0) {
+  DCHECK(!dividend.is(result));
+  DCHECK(!dividend.is(at));
+  DCHECK(!result.is(at));
+  base::MagicNumbersForDivision<uint32_t> mag =
+      base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
+  li(at, Operand(mag.multiplier));
+  Mulh(result, dividend, Operand(at));
+  bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
+  if (divisor > 0 && neg) {
     Addu(result, result, Operand(dividend));
   }
-  if (divisor < 0 && ms.multiplier() > 0) {
+  if (divisor < 0 && !neg && mag.multiplier > 0) {
     Subu(result, result, Operand(dividend));
   }
-  if (ms.shift() > 0) sra(result, result, ms.shift());
+  if (mag.shift > 0) sra(result, result, mag.shift);
   srl(at, dividend, 31);
   Addu(result, result, Operand(at));
 }
diff --git a/src/mips/macro-assembler-mips.h b/src/mips/macro-assembler-mips.h
index d339a3f..ce52986 100644
--- a/src/mips/macro-assembler-mips.h
+++ b/src/mips/macro-assembler-mips.h
@@ -6,8 +6,8 @@
 #define V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
 
 #include "src/assembler.h"
-#include "src/mips/assembler-mips.h"
 #include "src/globals.h"
+#include "src/mips/assembler-mips.h"
 
 namespace v8 {
 namespace internal {
@@ -84,7 +84,14 @@
                                    Register reg5 = no_reg,
                                    Register reg6 = no_reg);
 
-bool AreAliased(Register r1, Register r2, Register r3, Register r4);
+bool AreAliased(Register reg1,
+                Register reg2,
+                Register reg3 = no_reg,
+                Register reg4 = no_reg,
+                Register reg5 = no_reg,
+                Register reg6 = no_reg,
+                Register reg7 = no_reg,
+                Register reg8 = no_reg);
 
 
 // -----------------------------------------------------------------------------
@@ -109,7 +116,7 @@
 // Generate a MemOperand for storing arguments 5..N on the stack
 // when calling CallCFunction().
 inline MemOperand CFunctionArgumentOperand(int index) {
-  ASSERT(index > kCArgSlotCount);
+  DCHECK(index > kCArgSlotCount);
   // Argument 5 takes the slot just past the four Arg-slots.
   int offset = (index - 5) * kPointerSize + kCArgsSlotsSize;
   return MemOperand(sp, offset);
@@ -227,11 +234,11 @@
 
   inline void Move(Register dst_low, Register dst_high, FPURegister src) {
     mfc1(dst_low, src);
-    mfc1(dst_high, FPURegister::from_code(src.code() + 1));
+    Mfhc1(dst_high, src);
   }
 
   inline void FmoveHigh(Register dst_high, FPURegister src) {
-    mfc1(dst_high, FPURegister::from_code(src.code() + 1));
+    Mfhc1(dst_high, src);
   }
 
   inline void FmoveLow(Register dst_low, FPURegister src) {
@@ -240,7 +247,7 @@
 
   inline void Move(FPURegister dst, Register src_low, Register src_high) {
     mtc1(src_low, dst);
-    mtc1(src_high, FPURegister::from_code(dst.code() + 1));
+    Mthc1(src_high, dst);
   }
 
   // Conditional move.
@@ -449,7 +456,7 @@
   // nop(type)). These instructions are generated to mark special location in
   // the code, like some special IC code.
   static inline bool IsMarkedCode(Instr instr, int type) {
-    ASSERT((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
+    DCHECK((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
     return IsNop(instr, type);
   }
 
@@ -467,7 +474,7 @@
                   rs == static_cast<uint32_t>(ToNumber(zero_reg)));
     int type =
         (sllzz && FIRST_IC_MARKER <= sa && sa < LAST_CODE_MARKER) ? sa : -1;
-    ASSERT((type == -1) ||
+    DCHECK((type == -1) ||
            ((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)));
     return type;
   }
@@ -511,32 +518,25 @@
                              Register scratch2,
                              Register scratch3,
                              Label* gc_required);
-  void AllocateAsciiString(Register result,
-                           Register length,
-                           Register scratch1,
-                           Register scratch2,
-                           Register scratch3,
-                           Label* gc_required);
+  void AllocateOneByteString(Register result, Register length,
+                             Register scratch1, Register scratch2,
+                             Register scratch3, Label* gc_required);
   void AllocateTwoByteConsString(Register result,
                                  Register length,
                                  Register scratch1,
                                  Register scratch2,
                                  Label* gc_required);
-  void AllocateAsciiConsString(Register result,
-                               Register length,
-                               Register scratch1,
-                               Register scratch2,
-                               Label* gc_required);
+  void AllocateOneByteConsString(Register result, Register length,
+                                 Register scratch1, Register scratch2,
+                                 Label* gc_required);
   void AllocateTwoByteSlicedString(Register result,
                                    Register length,
                                    Register scratch1,
                                    Register scratch2,
                                    Label* gc_required);
-  void AllocateAsciiSlicedString(Register result,
-                                 Register length,
-                                 Register scratch1,
-                                 Register scratch2,
-                                 Label* gc_required);
+  void AllocateOneByteSlicedString(Register result, Register length,
+                                   Register scratch1, Register scratch2,
+                                   Label* gc_required);
 
   // Allocates a heap number or jumps to the gc_required label if the young
   // space is full and a scavenge is needed. All registers are clobbered also
@@ -546,7 +546,8 @@
                           Register scratch2,
                           Register heap_number_map,
                           Label* gc_required,
-                          TaggingMode tagging_mode = TAG_RESULT);
+                          TaggingMode tagging_mode = TAG_RESULT,
+                          MutableMode mode = IMMUTABLE);
   void AllocateHeapNumberWithValue(Register result,
                                    FPURegister value,
                                    Register scratch1,
@@ -574,14 +575,28 @@
     instr(rs, Operand(j));                                                     \
   }
 
+#define DEFINE_INSTRUCTION3(instr)                                             \
+  void instr(Register rd_hi, Register rd_lo, Register rs, const Operand& rt);  \
+  void instr(Register rd_hi, Register rd_lo, Register rs, Register rt) {       \
+    instr(rd_hi, rd_lo, rs, Operand(rt));                                      \
+  }                                                                            \
+  void instr(Register rd_hi, Register rd_lo, Register rs, int32_t j) {         \
+    instr(rd_hi, rd_lo, rs, Operand(j));                                       \
+  }
+
   DEFINE_INSTRUCTION(Addu);
   DEFINE_INSTRUCTION(Subu);
   DEFINE_INSTRUCTION(Mul);
+  DEFINE_INSTRUCTION(Mod);
+  DEFINE_INSTRUCTION(Mulh);
   DEFINE_INSTRUCTION2(Mult);
   DEFINE_INSTRUCTION2(Multu);
   DEFINE_INSTRUCTION2(Div);
   DEFINE_INSTRUCTION2(Divu);
 
+  DEFINE_INSTRUCTION3(Div);
+  DEFINE_INSTRUCTION3(Mul);
+
   DEFINE_INSTRUCTION(And);
   DEFINE_INSTRUCTION(Or);
   DEFINE_INSTRUCTION(Xor);
@@ -681,7 +696,7 @@
 
   // Pop two registers. Pops rightmost register first (from lower address).
   void Pop(Register src1, Register src2) {
-    ASSERT(!src1.is(src2));
+    DCHECK(!src1.is(src2));
     lw(src2, MemOperand(sp, 0 * kPointerSize));
     lw(src1, MemOperand(sp, 1 * kPointerSize));
     Addu(sp, sp, 2 * kPointerSize);
@@ -703,17 +718,15 @@
   // RegList constant kSafepointSavedRegisters.
   void PushSafepointRegisters();
   void PopSafepointRegisters();
-  void PushSafepointRegistersAndDoubles();
-  void PopSafepointRegistersAndDoubles();
   // Store value in register src in the safepoint stack slot for
   // register dst.
   void StoreToSafepointRegisterSlot(Register src, Register dst);
-  void StoreToSafepointRegistersAndDoublesSlot(Register src, Register dst);
   // Load the value of the src register from its safepoint stack slot
   // into register dst.
   void LoadFromSafepointRegisterSlot(Register dst, Register src);
 
-  // Flush the I-cache from asm code. You should use CPU::FlushICache from C.
+  // Flush the I-cache from asm code. You should use CpuFeatures::FlushICache
+  // from C.
   // Does not handle errors.
   void FlushICache(Register address, unsigned instructions);
 
@@ -736,6 +749,20 @@
   void Round_w_d(FPURegister fd, FPURegister fs);
   void Floor_w_d(FPURegister fd, FPURegister fs);
   void Ceil_w_d(FPURegister fd, FPURegister fs);
+
+  // FP32 mode: Move the general purpose register into
+  // the high part of the double-register pair.
+  // FP64 mode: Move the general-purpose register into
+  // the higher 32 bits of the 64-bit coprocessor register,
+  // while leaving the low bits unchanged.
+  void Mthc1(Register rt, FPURegister fs);
+
+  // FP32 mode: move the high part of the double-register pair into
+  // general purpose register.
+  // FP64 mode: Move the higher 32 bits of the 64-bit coprocessor register into
+  // general-purpose register.
+  void Mfhc1(Register rt, FPURegister fs);
+
   // Wrapper function for the different cmp/branch types.
   void BranchF(Label* target,
                Label* nan,
@@ -1070,7 +1097,7 @@
     lw(type, FieldMemOperand(obj, HeapObject::kMapOffset));
     lbu(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
     And(type, type, Operand(kIsNotStringMask));
-    ASSERT_EQ(0, kStringTag);
+    DCHECK_EQ(0, kStringTag);
     return eq;
   }
 
@@ -1282,7 +1309,7 @@
   };
 
   Handle<Object> CodeObject() {
-    ASSERT(!code_object_.is_null());
+    DCHECK(!code_object_.is_null());
     return code_object_;
   }
 
@@ -1447,20 +1474,16 @@
 
   // Checks if both instance types are sequential ASCII strings and jumps to
   // label if either is not.
-  void JumpIfBothInstanceTypesAreNotSequentialAscii(
-      Register first_object_instance_type,
-      Register second_object_instance_type,
-      Register scratch1,
-      Register scratch2,
-      Label* failure);
+  void JumpIfBothInstanceTypesAreNotSequentialOneByte(
+      Register first_object_instance_type, Register second_object_instance_type,
+      Register scratch1, Register scratch2, Label* failure);
 
-  // Check if instance type is sequential ASCII string and jump to label if
+  // Check if instance type is sequential one-byte string and jump to label if
   // it is not.
-  void JumpIfInstanceTypeIsNotSequentialAscii(Register type,
-                                              Register scratch,
-                                              Label* failure);
+  void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
+                                                Label* failure);
 
-  void JumpIfNotUniqueName(Register reg, Label* not_unique_name);
+  void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
 
   void EmitSeqStringSetCharCheck(Register string,
                                  Register index,
@@ -1468,21 +1491,20 @@
                                  Register scratch,
                                  uint32_t encoding_mask);
 
-  // Test that both first and second are sequential ASCII strings.
-  // Assume that they are non-smis.
-  void JumpIfNonSmisNotBothSequentialAsciiStrings(Register first,
-                                                  Register second,
-                                                  Register scratch1,
-                                                  Register scratch2,
-                                                  Label* failure);
+  // Checks if both objects are sequential one-byte strings and jumps to label
+  // if either is not. Assumes that neither object is a smi.
+  void JumpIfNonSmisNotBothSequentialOneByteStrings(Register first,
+                                                    Register second,
+                                                    Register scratch1,
+                                                    Register scratch2,
+                                                    Label* failure);
 
-  // Test that both first and second are sequential ASCII strings.
-  // Check that they are non-smis.
-  void JumpIfNotBothSequentialAsciiStrings(Register first,
-                                           Register second,
-                                           Register scratch1,
-                                           Register scratch2,
-                                           Label* failure);
+  // Checks if both objects are sequential one-byte strings and jumps to label
+  // if either is not.
+  void JumpIfNotBothSequentialOneByteStrings(Register first, Register second,
+                                             Register scratch1,
+                                             Register scratch2,
+                                             Label* not_flat_one_byte_strings);
 
   void ClampUint8(Register output_reg, Register input_reg);
 
diff --git a/src/mips/regexp-macro-assembler-mips.cc b/src/mips/regexp-macro-assembler-mips.cc
index bbd5e12..dbc12a0 100644
--- a/src/mips/regexp-macro-assembler-mips.cc
+++ b/src/mips/regexp-macro-assembler-mips.cc
@@ -6,12 +6,13 @@
 
 #if V8_TARGET_ARCH_MIPS
 
-#include "src/unicode.h"
-#include "src/log.h"
 #include "src/code-stubs.h"
-#include "src/regexp-stack.h"
+#include "src/log.h"
 #include "src/macro-assembler.h"
 #include "src/regexp-macro-assembler.h"
+#include "src/regexp-stack.h"
+#include "src/unicode.h"
+
 #include "src/mips/regexp-macro-assembler-mips.h"
 
 namespace v8 {
@@ -109,7 +110,7 @@
       backtrack_label_(),
       exit_label_(),
       internal_failure_label_() {
-  ASSERT_EQ(0, registers_to_save % 2);
+  DCHECK_EQ(0, registers_to_save % 2);
   __ jmp(&entry_label_);   // We'll write the entry code later.
   // If the code gets too big or corrupted, an internal exception will be
   // raised, and we will exit right away.
@@ -148,8 +149,8 @@
 
 
 void RegExpMacroAssemblerMIPS::AdvanceRegister(int reg, int by) {
-  ASSERT(reg >= 0);
-  ASSERT(reg < num_registers_);
+  DCHECK(reg >= 0);
+  DCHECK(reg < num_registers_);
   if (by != 0) {
     __ lw(a0, register_location(reg));
     __ Addu(a0, a0, Operand(by));
@@ -240,7 +241,7 @@
   // Check that there are enough characters left in the input.
   BranchOrBacktrack(on_no_match, gt, t5, Operand(zero_reg));
 
-  if (mode_ == ASCII) {
+  if (mode_ == LATIN1) {
     Label success;
     Label fail;
     Label loop_check;
@@ -288,7 +289,7 @@
     // Compute new value of character position after the matched part.
     __ Subu(current_input_offset(), a2, end_of_input_address());
   } else {
-    ASSERT(mode_ == UC16);
+    DCHECK(mode_ == UC16);
     // Put regexp engine registers on stack.
     RegList regexp_registers_to_retain = current_input_offset().bit() |
         current_character().bit() | backtrack_stackpointer().bit();
@@ -364,13 +365,13 @@
 
   Label loop;
   __ bind(&loop);
-  if (mode_ == ASCII) {
+  if (mode_ == LATIN1) {
     __ lbu(a3, MemOperand(a0, 0));
     __ addiu(a0, a0, char_size());
     __ lbu(t0, MemOperand(a2, 0));
     __ addiu(a2, a2, char_size());
   } else {
-    ASSERT(mode_ == UC16);
+    DCHECK(mode_ == UC16);
     __ lhu(a3, MemOperand(a0, 0));
     __ addiu(a0, a0, char_size());
     __ lhu(t0, MemOperand(a2, 0));
@@ -414,7 +415,7 @@
     uc16 minus,
     uc16 mask,
     Label* on_not_equal) {
-  ASSERT(minus < String::kMaxUtf16CodeUnit);
+  DCHECK(minus < String::kMaxUtf16CodeUnit);
   __ Subu(a0, current_character(), Operand(minus));
   __ And(a0, a0, Operand(mask));
   BranchOrBacktrack(on_not_equal, ne, a0, Operand(c));
@@ -445,7 +446,7 @@
     Handle<ByteArray> table,
     Label* on_bit_set) {
   __ li(a0, Operand(table));
-  if (mode_ != ASCII || kTableMask != String::kMaxOneByteCharCode) {
+  if (mode_ != LATIN1 || kTableMask != String::kMaxOneByteCharCode) {
     __ And(a1, current_character(), Operand(kTableSize - 1));
     __ Addu(a0, a0, a1);
   } else {
@@ -464,7 +465,7 @@
   switch (type) {
   case 's':
     // Match space-characters.
-    if (mode_ == ASCII) {
+    if (mode_ == LATIN1) {
       // One byte space characters are '\t'..'\r', ' ' and \u00a0.
       Label success;
       __ Branch(&success, eq, current_character(), Operand(' '));
@@ -481,12 +482,12 @@
     // The emitted code for generic character classes is good enough.
     return false;
   case 'd':
-    // Match ASCII digits ('0'..'9').
+    // Match Latin1 digits ('0'..'9').
     __ Subu(a0, current_character(), Operand('0'));
     BranchOrBacktrack(on_no_match, hi, a0, Operand('9' - '0'));
     return true;
   case 'D':
-    // Match non ASCII-digits.
+    // Match non Latin1-digits.
     __ Subu(a0, current_character(), Operand('0'));
     BranchOrBacktrack(on_no_match, ls, a0, Operand('9' - '0'));
     return true;
@@ -510,7 +511,7 @@
     __ Xor(a0, current_character(), Operand(0x01));
     // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c.
     __ Subu(a0, a0, Operand(0x0b));
-    if (mode_ == ASCII) {
+    if (mode_ == LATIN1) {
       BranchOrBacktrack(on_no_match, hi, a0, Operand(0x0c - 0x0b));
     } else {
       Label done;
@@ -525,8 +526,8 @@
     return true;
   }
   case 'w': {
-    if (mode_ != ASCII) {
-      // Table is 128 entries, so all ASCII characters can be tested.
+    if (mode_ != LATIN1) {
+      // Table is 256 entries, so all Latin1 characters can be tested.
       BranchOrBacktrack(on_no_match, hi, current_character(), Operand('z'));
     }
     ExternalReference map = ExternalReference::re_word_character_map();
@@ -538,8 +539,8 @@
   }
   case 'W': {
     Label done;
-    if (mode_ != ASCII) {
-      // Table is 128 entries, so all ASCII characters can be tested.
+    if (mode_ != LATIN1) {
+      // Table is 256 entries, so all Latin1 characters can be tested.
       __ Branch(&done, hi, current_character(), Operand('z'));
     }
     ExternalReference map = ExternalReference::re_word_character_map();
@@ -547,7 +548,7 @@
     __ Addu(a0, a0, current_character());
     __ lbu(a0, MemOperand(a0, 0));
     BranchOrBacktrack(on_no_match, ne, a0, Operand(zero_reg));
-    if (mode_ != ASCII) {
+    if (mode_ != LATIN1) {
       __ bind(&done);
     }
     return true;
@@ -705,7 +706,7 @@
         __ Addu(a1, a1, Operand(a2));
         // a1 is length of string in characters.
 
-        ASSERT_EQ(0, num_saved_registers_ % 2);
+        DCHECK_EQ(0, num_saved_registers_ % 2);
         // Always an even number of capture registers. This allows us to
         // unroll the loop once to add an operation between a load of a register
         // and the following use of that register.
@@ -907,8 +908,8 @@
                                                     Label* on_end_of_input,
                                                     bool check_bounds,
                                                     int characters) {
-  ASSERT(cp_offset >= -1);      // ^ and \b can look behind one character.
-  ASSERT(cp_offset < (1<<30));  // Be sane! (And ensure negation works).
+  DCHECK(cp_offset >= -1);      // ^ and \b can look behind one character.
+  DCHECK(cp_offset < (1<<30));  // Be sane! (And ensure negation works).
   if (check_bounds) {
     CheckPosition(cp_offset + characters - 1, on_end_of_input);
   }
@@ -993,7 +994,7 @@
 
 
 void RegExpMacroAssemblerMIPS::SetRegister(int register_index, int to) {
-  ASSERT(register_index >= num_saved_registers_);  // Reserved for positions!
+  DCHECK(register_index >= num_saved_registers_);  // Reserved for positions!
   __ li(a0, Operand(to));
   __ sw(a0, register_location(register_index));
 }
@@ -1017,7 +1018,7 @@
 
 
 void RegExpMacroAssemblerMIPS::ClearRegisters(int reg_from, int reg_to) {
-  ASSERT(reg_from <= reg_to);
+  DCHECK(reg_from <= reg_to);
   __ lw(a0, MemOperand(frame_pointer(), kInputStartMinusOne));
   for (int reg = reg_from; reg <= reg_to; reg++) {
     __ sw(a0, register_location(reg));
@@ -1040,12 +1041,12 @@
 // Private methods:
 
 void RegExpMacroAssemblerMIPS::CallCheckStackGuardState(Register scratch) {
-  int stack_alignment = OS::ActivationFrameAlignment();
+  int stack_alignment = base::OS::ActivationFrameAlignment();
 
   // Align the stack pointer and save the original sp value on the stack.
   __ mov(scratch, sp);
   __ Subu(sp, sp, Operand(kPointerSize));
-  ASSERT(IsPowerOf2(stack_alignment));
+  DCHECK(base::bits::IsPowerOfTwo32(stack_alignment));
   __ And(sp, sp, Operand(-stack_alignment));
   __ sw(scratch, MemOperand(sp));
 
@@ -1054,7 +1055,7 @@
   __ li(a1, Operand(masm_->CodeObject()), CONSTANT_SIZE);
 
   // We need to make room for the return address on the stack.
-  ASSERT(IsAligned(stack_alignment, kPointerSize));
+  DCHECK(IsAligned(stack_alignment, kPointerSize));
   __ Subu(sp, sp, Operand(stack_alignment));
 
   // Stack pointer now points to cell where return address is to be written.
@@ -1125,10 +1126,10 @@
 
   Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
   // Current string.
-  bool is_ascii = subject->IsOneByteRepresentationUnderneath();
+  bool is_one_byte = subject->IsOneByteRepresentationUnderneath();
 
-  ASSERT(re_code->instruction_start() <= *return_address);
-  ASSERT(*return_address <=
+  DCHECK(re_code->instruction_start() <= *return_address);
+  DCHECK(*return_address <=
       re_code->instruction_start() + re_code->instruction_size());
 
   Object* result = isolate->stack_guard()->HandleInterrupts();
@@ -1156,8 +1157,8 @@
   }
 
   // String might have changed.
-  if (subject_tmp->IsOneByteRepresentation() != is_ascii) {
-    // If we changed between an ASCII and an UC16 string, the specialized
+  if (subject_tmp->IsOneByteRepresentation() != is_one_byte) {
+    // If we changed between an Latin1 and an UC16 string, the specialized
     // code cannot be used, and we need to restart regexp matching from
     // scratch (including, potentially, compiling a new version of the code).
     return RETRY;
@@ -1167,7 +1168,7 @@
   // be a sequential or external string with the same content.
   // Update the start and end pointers in the stack frame to the current
   // location (whether it has actually moved or not).
-  ASSERT(StringShape(*subject_tmp).IsSequential() ||
+  DCHECK(StringShape(*subject_tmp).IsSequential() ||
       StringShape(*subject_tmp).IsExternal());
 
   // The original start address of the characters to match.
@@ -1199,7 +1200,7 @@
 
 
 MemOperand RegExpMacroAssemblerMIPS::register_location(int register_index) {
-  ASSERT(register_index < (1<<30));
+  DCHECK(register_index < (1<<30));
   if (num_registers_ <= register_index) {
     num_registers_ = register_index + 1;
   }
@@ -1260,7 +1261,7 @@
 
 
 void RegExpMacroAssemblerMIPS::Push(Register source) {
-  ASSERT(!source.is(backtrack_stackpointer()));
+  DCHECK(!source.is(backtrack_stackpointer()));
   __ Addu(backtrack_stackpointer(),
           backtrack_stackpointer(),
           Operand(-kPointerSize));
@@ -1269,7 +1270,7 @@
 
 
 void RegExpMacroAssemblerMIPS::Pop(Register target) {
-  ASSERT(!target.is(backtrack_stackpointer()));
+  DCHECK(!target.is(backtrack_stackpointer()));
   __ lw(target, MemOperand(backtrack_stackpointer()));
   __ Addu(backtrack_stackpointer(), backtrack_stackpointer(), kPointerSize);
 }
@@ -1305,12 +1306,12 @@
   }
   // We assume that we cannot do unaligned loads on MIPS, so this function
   // must only be used to load a single character at a time.
-  ASSERT(characters == 1);
+  DCHECK(characters == 1);
   __ Addu(t5, end_of_input_address(), Operand(offset));
-  if (mode_ == ASCII) {
+  if (mode_ == LATIN1) {
     __ lbu(current_character(), MemOperand(t5, 0));
   } else {
-    ASSERT(mode_ == UC16);
+    DCHECK(mode_ == UC16);
     __ lhu(current_character(), MemOperand(t5, 0));
   }
 }
diff --git a/src/mips/regexp-macro-assembler-mips.h b/src/mips/regexp-macro-assembler-mips.h
index 921a848..c7d8f6d 100644
--- a/src/mips/regexp-macro-assembler-mips.h
+++ b/src/mips/regexp-macro-assembler-mips.h
@@ -6,10 +6,9 @@
 #ifndef V8_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
 #define V8_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
 
-#include "src/mips/assembler-mips.h"
-#include "src/mips/assembler-mips-inl.h"
 #include "src/macro-assembler.h"
-#include "src/code.h"
+#include "src/mips/assembler-mips-inl.h"
+#include "src/mips/assembler-mips.h"
 #include "src/mips/macro-assembler-mips.h"
 
 namespace v8 {
@@ -198,7 +197,7 @@
 
   MacroAssembler* masm_;
 
-  // Which mode to generate code for (ASCII or UC16).
+  // Which mode to generate code for (Latin1 or UC16).
   Mode mode_;
 
   // One greater than maximal register index actually used.
diff --git a/src/mips/simulator-mips.cc b/src/mips/simulator-mips.cc
index dfb1ee3..fabca67 100644
--- a/src/mips/simulator-mips.cc
+++ b/src/mips/simulator-mips.cc
@@ -11,12 +11,12 @@
 
 #if V8_TARGET_ARCH_MIPS
 
-#include "src/cpu.h"
-#include "src/disasm.h"
 #include "src/assembler.h"
-#include "src/globals.h"    // Need the BitCast.
+#include "src/base/bits.h"
+#include "src/disasm.h"
 #include "src/mips/constants-mips.h"
 #include "src/mips/simulator-mips.h"
+#include "src/ostreams.h"
 
 
 // Only build the simulator if not compiling for real MIPS hardware.
@@ -67,11 +67,12 @@
   Simulator* sim_;
 
   int32_t GetRegisterValue(int regnum);
-  int32_t GetFPURegisterValueInt(int regnum);
-  int64_t GetFPURegisterValueLong(int regnum);
+  int32_t GetFPURegisterValue32(int regnum);
+  int64_t GetFPURegisterValue64(int regnum);
   float GetFPURegisterValueFloat(int regnum);
   double GetFPURegisterValueDouble(int regnum);
   bool GetValue(const char* desc, int32_t* value);
+  bool GetValue(const char* desc, int64_t* value);
 
   // Set or delete a breakpoint. Returns true if successful.
   bool SetBreakpoint(Instruction* breakpc);
@@ -107,7 +108,7 @@
   char** msg_address =
     reinterpret_cast<char**>(sim_->get_pc() + Instr::kInstrSize);
   char* msg = *msg_address;
-  ASSERT(msg != NULL);
+  DCHECK(msg != NULL);
 
   // Update this stop description.
   if (!watched_stops_[code].desc) {
@@ -160,20 +161,20 @@
 }
 
 
-int32_t MipsDebugger::GetFPURegisterValueInt(int regnum) {
+int32_t MipsDebugger::GetFPURegisterValue32(int regnum) {
   if (regnum == kNumFPURegisters) {
     return sim_->get_pc();
   } else {
-    return sim_->get_fpu_register(regnum);
+    return sim_->get_fpu_register_word(regnum);
   }
 }
 
 
-int64_t MipsDebugger::GetFPURegisterValueLong(int regnum) {
+int64_t MipsDebugger::GetFPURegisterValue64(int regnum) {
   if (regnum == kNumFPURegisters) {
     return sim_->get_pc();
   } else {
-    return sim_->get_fpu_register_long(regnum);
+    return sim_->get_fpu_register(regnum);
   }
 }
 
@@ -204,7 +205,7 @@
     *value = GetRegisterValue(regnum);
     return true;
   } else if (fpuregnum != kInvalidFPURegister) {
-    *value = GetFPURegisterValueInt(fpuregnum);
+    *value = GetFPURegisterValue32(fpuregnum);
     return true;
   } else if (strncmp(desc, "0x", 2) == 0) {
     return SScanF(desc, "%x", reinterpret_cast<uint32_t*>(value)) == 1;
@@ -215,6 +216,26 @@
 }
 
 
+bool MipsDebugger::GetValue(const char* desc, int64_t* value) {
+  int regnum = Registers::Number(desc);
+  int fpuregnum = FPURegisters::Number(desc);
+
+  if (regnum != kInvalidRegister) {
+    *value = GetRegisterValue(regnum);
+    return true;
+  } else if (fpuregnum != kInvalidFPURegister) {
+    *value = GetFPURegisterValue64(fpuregnum);
+    return true;
+  } else if (strncmp(desc, "0x", 2) == 0) {
+    return SScanF(desc + 2, "%" SCNx64,
+                  reinterpret_cast<uint64_t*>(value)) == 1;
+  } else {
+    return SScanF(desc, "%" SCNu64, reinterpret_cast<uint64_t*>(value)) == 1;
+  }
+  return false;
+}
+
+
 bool MipsDebugger::SetBreakpoint(Instruction* breakpc) {
   // Check if a breakpoint can be set. If not return without any side-effects.
   if (sim_->break_pc_ != NULL) {
@@ -295,34 +316,76 @@
 
 
 void MipsDebugger::PrintAllRegsIncludingFPU() {
-#define FPU_REG_INFO(n) FPURegisters::Name(n), FPURegisters::Name(n+1), \
-        GetFPURegisterValueInt(n+1), \
-        GetFPURegisterValueInt(n), \
-                        GetFPURegisterValueDouble(n)
+#define FPU_REG_INFO32(n) FPURegisters::Name(n), FPURegisters::Name(n+1), \
+        GetFPURegisterValue32(n+1), \
+        GetFPURegisterValue32(n), \
+        GetFPURegisterValueDouble(n)
+
+#define FPU_REG_INFO64(n) FPURegisters::Name(n), \
+        GetFPURegisterValue64(n), \
+        GetFPURegisterValueDouble(n)
 
   PrintAllRegs();
 
   PrintF("\n\n");
   // f0, f1, f2, ... f31.
-  PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(0) );
-  PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(2) );
-  PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(4) );
-  PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(6) );
-  PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(8) );
-  PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(10));
-  PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(12));
-  PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(14));
-  PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(16));
-  PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(18));
-  PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(20));
-  PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(22));
-  PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(24));
-  PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(26));
-  PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(28));
-  PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO(30));
+  // This must be a compile-time switch,
+  // compiler will throw out warnings otherwise.
+  if (kFpuMode == kFP64) {
+    PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(0) );
+    PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(1) );
+    PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(2) );
+    PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(3) );
+    PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(4) );
+    PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(5) );
+    PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(6) );
+    PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(7) );
+    PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(8) );
+    PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(9) );
+    PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(10));
+    PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(11));
+    PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(12));
+    PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(13));
+    PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(14));
+    PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(15));
+    PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(16));
+    PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(17));
+    PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(18));
+    PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(19));
+    PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(20));
+    PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(21));
+    PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(22));
+    PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(23));
+    PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(24));
+    PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(25));
+    PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(26));
+    PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(27));
+    PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(28));
+    PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(29));
+    PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(30));
+    PrintF("%3s: 0x%016llx %16.4e\n", FPU_REG_INFO64(31));
+  } else {
+    PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(0) );
+    PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(2) );
+    PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(4) );
+    PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(6) );
+    PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(8) );
+    PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(10));
+    PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(12));
+    PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(14));
+    PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(16));
+    PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(18));
+    PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(20));
+    PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(22));
+    PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(24));
+    PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(26));
+    PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(28));
+    PrintF("%3s,%3s: 0x%08x%08x %16.4e\n", FPU_REG_INFO32(30));
+  }
 
 #undef REG_INFO
-#undef FPU_REG_INFO
+#undef FPU_REG_INFO32
+#undef FPU_REG_INFO64
 }
 
 
@@ -397,8 +460,6 @@
         done = true;
       } else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) {
         if (argc == 2) {
-          int32_t value;
-          float fvalue;
           if (strcmp(arg1, "all") == 0) {
             PrintAllRegs();
           } else if (strcmp(arg1, "allf") == 0) {
@@ -408,24 +469,36 @@
             int fpuregnum = FPURegisters::Number(arg1);
 
             if (regnum != kInvalidRegister) {
+              int32_t value;
               value = GetRegisterValue(regnum);
               PrintF("%s: 0x%08x %d \n", arg1, value, value);
             } else if (fpuregnum != kInvalidFPURegister) {
-              if (fpuregnum % 2 == 1) {
-                value = GetFPURegisterValueInt(fpuregnum);
-                fvalue = GetFPURegisterValueFloat(fpuregnum);
-                PrintF("%s: 0x%08x %11.4e\n", arg1, value, fvalue);
+              if (IsFp64Mode()) {
+                int64_t value;
+                double dvalue;
+                value = GetFPURegisterValue64(fpuregnum);
+                dvalue = GetFPURegisterValueDouble(fpuregnum);
+                PrintF("%3s: 0x%016llx %16.4e\n",
+                       FPURegisters::Name(fpuregnum), value, dvalue);
               } else {
-                double dfvalue;
-                int32_t lvalue1 = GetFPURegisterValueInt(fpuregnum);
-                int32_t lvalue2 = GetFPURegisterValueInt(fpuregnum + 1);
-                dfvalue = GetFPURegisterValueDouble(fpuregnum);
-                PrintF("%3s,%3s: 0x%08x%08x %16.4e\n",
-                       FPURegisters::Name(fpuregnum+1),
-                       FPURegisters::Name(fpuregnum),
-                       lvalue1,
-                       lvalue2,
-                       dfvalue);
+                if (fpuregnum % 2 == 1) {
+                  int32_t value;
+                  float fvalue;
+                  value = GetFPURegisterValue32(fpuregnum);
+                  fvalue = GetFPURegisterValueFloat(fpuregnum);
+                  PrintF("%s: 0x%08x %11.4e\n", arg1, value, fvalue);
+                } else {
+                  double dfvalue;
+                  int32_t lvalue1 = GetFPURegisterValue32(fpuregnum);
+                  int32_t lvalue2 = GetFPURegisterValue32(fpuregnum + 1);
+                  dfvalue = GetFPURegisterValueDouble(fpuregnum);
+                  PrintF("%3s,%3s: 0x%08x%08x %16.4e\n",
+                         FPURegisters::Name(fpuregnum+1),
+                         FPURegisters::Name(fpuregnum),
+                         lvalue1,
+                         lvalue2,
+                         dfvalue);
+                }
               }
             } else {
               PrintF("%s unrecognized\n", arg1);
@@ -439,7 +512,7 @@
               int fpuregnum = FPURegisters::Number(arg1);
 
               if (fpuregnum != kInvalidFPURegister) {
-                value = GetFPURegisterValueInt(fpuregnum);
+                value = GetFPURegisterValue32(fpuregnum);
                 fvalue = GetFPURegisterValueFloat(fpuregnum);
                 PrintF("%s: 0x%08x %11.4e\n", arg1, value, fvalue);
               } else {
@@ -456,17 +529,18 @@
                  || (strcmp(cmd, "printobject") == 0)) {
         if (argc == 2) {
           int32_t value;
+          OFStream os(stdout);
           if (GetValue(arg1, &value)) {
             Object* obj = reinterpret_cast<Object*>(value);
-            PrintF("%s: \n", arg1);
+            os << arg1 << ": \n";
 #ifdef DEBUG
-            obj->PrintLn();
+            obj->Print(os);
+            os << "\n";
 #else
-            obj->ShortPrint();
-            PrintF("\n");
+            os << Brief(obj) << "\n";
 #endif
           } else {
-            PrintF("%s unrecognized\n", arg1);
+            os << arg1 << " unrecognized\n";
           }
         } else {
           PrintF("printobject <value>\n");
@@ -488,15 +562,28 @@
           next_arg++;
         }
 
-        int32_t words;
-        if (argc == next_arg) {
-          words = 10;
-        } else {
-          if (!GetValue(argv[next_arg], &words)) {
+        // TODO(palfia): optimize this.
+        if (IsFp64Mode()) {
+          int64_t words;
+          if (argc == next_arg) {
             words = 10;
+          } else {
+            if (!GetValue(argv[next_arg], &words)) {
+              words = 10;
+            }
           }
+          end = cur + words;
+        } else {
+          int32_t words;
+          if (argc == next_arg) {
+            words = 10;
+          } else {
+            if (!GetValue(argv[next_arg], &words)) {
+              words = 10;
+            }
+          }
+          end = cur + words;
         }
-        end = cur + words;
 
         while (cur < end) {
           PrintF("  0x%08x:  0x%08x %10d",
@@ -567,7 +654,7 @@
         }
       } else if (strcmp(cmd, "gdb") == 0) {
         PrintF("relinquishing control to gdb\n");
-        v8::internal::OS::DebugBreak();
+        v8::base::OS::DebugBreak();
         PrintF("regaining control from gdb\n");
       } else if (strcmp(cmd, "break") == 0) {
         if (argc == 2) {
@@ -753,8 +840,8 @@
 
 
 static bool ICacheMatch(void* one, void* two) {
-  ASSERT((reinterpret_cast<intptr_t>(one) & CachePage::kPageMask) == 0);
-  ASSERT((reinterpret_cast<intptr_t>(two) & CachePage::kPageMask) == 0);
+  DCHECK((reinterpret_cast<intptr_t>(one) & CachePage::kPageMask) == 0);
+  DCHECK((reinterpret_cast<intptr_t>(two) & CachePage::kPageMask) == 0);
   return one == two;
 }
 
@@ -791,7 +878,7 @@
     FlushOnePage(i_cache, start, bytes_to_flush);
     start += bytes_to_flush;
     size -= bytes_to_flush;
-    ASSERT_EQ(0, start & CachePage::kPageMask);
+    DCHECK_EQ(0, start & CachePage::kPageMask);
     offset = 0;
   }
   if (size != 0) {
@@ -816,10 +903,10 @@
 void Simulator::FlushOnePage(v8::internal::HashMap* i_cache,
                              intptr_t start,
                              int size) {
-  ASSERT(size <= CachePage::kPageSize);
-  ASSERT(AllOnOnePage(start, size - 1));
-  ASSERT((start & CachePage::kLineMask) == 0);
-  ASSERT((size & CachePage::kLineMask) == 0);
+  DCHECK(size <= CachePage::kPageSize);
+  DCHECK(AllOnOnePage(start, size - 1));
+  DCHECK((start & CachePage::kLineMask) == 0);
+  DCHECK((size & CachePage::kLineMask) == 0);
   void* page = reinterpret_cast<void*>(start & (~CachePage::kPageMask));
   int offset = (start & CachePage::kPageMask);
   CachePage* cache_page = GetCachePage(i_cache, page);
@@ -978,8 +1065,8 @@
 Simulator* Simulator::current(Isolate* isolate) {
   v8::internal::Isolate::PerIsolateThreadData* isolate_data =
        isolate->FindOrAllocatePerThreadDataForThisThread();
-  ASSERT(isolate_data != NULL);
-  ASSERT(isolate_data != NULL);
+  DCHECK(isolate_data != NULL);
+  DCHECK(isolate_data != NULL);
 
   Simulator* sim = isolate_data->simulator();
   if (sim == NULL) {
@@ -994,7 +1081,7 @@
 // Sets the register in the architecture state. It will also deal with updating
 // Simulator internal state for special registers such as PC.
 void Simulator::set_register(int reg, int32_t value) {
-  ASSERT((reg >= 0) && (reg < kNumSimuRegisters));
+  DCHECK((reg >= 0) && (reg < kNumSimuRegisters));
   if (reg == pc) {
     pc_modified_ = true;
   }
@@ -1005,34 +1092,60 @@
 
 
 void Simulator::set_dw_register(int reg, const int* dbl) {
-  ASSERT((reg >= 0) && (reg < kNumSimuRegisters));
+  DCHECK((reg >= 0) && (reg < kNumSimuRegisters));
   registers_[reg] = dbl[0];
   registers_[reg + 1] = dbl[1];
 }
 
 
-void Simulator::set_fpu_register(int fpureg, int32_t value) {
-  ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
+void Simulator::set_fpu_register(int fpureg, int64_t value) {
+  DCHECK(IsFp64Mode());
+  DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
   FPUregisters_[fpureg] = value;
 }
 
 
+void Simulator::set_fpu_register_word(int fpureg, int32_t value) {
+  // Set ONLY lower 32-bits, leaving upper bits untouched.
+  // TODO(plind): big endian issue.
+  DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+  int32_t *pword = reinterpret_cast<int32_t*>(&FPUregisters_[fpureg]);
+  *pword = value;
+}
+
+
+void Simulator::set_fpu_register_hi_word(int fpureg, int32_t value) {
+  // Set ONLY upper 32-bits, leaving lower bits untouched.
+  // TODO(plind): big endian issue.
+  DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+  int32_t *phiword = (reinterpret_cast<int32_t*>(&FPUregisters_[fpureg])) + 1;
+  *phiword = value;
+}
+
+
 void Simulator::set_fpu_register_float(int fpureg, float value) {
-  ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
-  *BitCast<float*>(&FPUregisters_[fpureg]) = value;
+  DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+  *bit_cast<float*>(&FPUregisters_[fpureg]) = value;
 }
 
 
 void Simulator::set_fpu_register_double(int fpureg, double value) {
-  ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
-  *BitCast<double*>(&FPUregisters_[fpureg]) = value;
+  if (IsFp64Mode()) {
+    DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+    *bit_cast<double*>(&FPUregisters_[fpureg]) = value;
+  } else {
+    DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
+    int64_t i64 = bit_cast<int64_t>(value);
+    set_fpu_register_word(fpureg, i64 & 0xffffffff);
+    set_fpu_register_word(fpureg + 1, i64 >> 32);
+  }
 }
 
 
 // Get the register from the architecture state. This function does handle
 // the special case of accessing the PC register.
 int32_t Simulator::get_register(int reg) const {
-  ASSERT((reg >= 0) && (reg < kNumSimuRegisters));
+  DCHECK((reg >= 0) && (reg < kNumSimuRegisters));
   if (reg == 0)
     return 0;
   else
@@ -1041,7 +1154,8 @@
 
 
 double Simulator::get_double_from_register_pair(int reg) {
-  ASSERT((reg >= 0) && (reg < kNumSimuRegisters) && ((reg % 2) == 0));
+  // TODO(plind): bad ABI stuff, refactor or remove.
+  DCHECK((reg >= 0) && (reg < kNumSimuRegisters) && ((reg % 2) == 0));
 
   double dm_val = 0.0;
   // Read the bits from the unsigned integer register_[] array
@@ -1053,29 +1167,48 @@
 }
 
 
-int32_t Simulator::get_fpu_register(int fpureg) const {
-  ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
+int64_t Simulator::get_fpu_register(int fpureg) const {
+  DCHECK(IsFp64Mode());
+  DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
   return FPUregisters_[fpureg];
 }
 
 
-int64_t Simulator::get_fpu_register_long(int fpureg) const {
-  ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
-  return *BitCast<int64_t*>(
-      const_cast<int32_t*>(&FPUregisters_[fpureg]));
+int32_t Simulator::get_fpu_register_word(int fpureg) const {
+  DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+  return static_cast<int32_t>(FPUregisters_[fpureg] & 0xffffffff);
+}
+
+
+int32_t Simulator::get_fpu_register_signed_word(int fpureg) const {
+  DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+  return static_cast<int32_t>(FPUregisters_[fpureg] & 0xffffffff);
+}
+
+
+int32_t Simulator::get_fpu_register_hi_word(int fpureg) const {
+  DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+  return static_cast<int32_t>((FPUregisters_[fpureg] >> 32) & 0xffffffff);
 }
 
 
 float Simulator::get_fpu_register_float(int fpureg) const {
-  ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters));
-  return *BitCast<float*>(
-      const_cast<int32_t*>(&FPUregisters_[fpureg]));
+  DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+  return *bit_cast<float*>(const_cast<int64_t*>(&FPUregisters_[fpureg]));
 }
 
 
 double Simulator::get_fpu_register_double(int fpureg) const {
-  ASSERT((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
-  return *BitCast<double*>(const_cast<int32_t*>(&FPUregisters_[fpureg]));
+  if (IsFp64Mode()) {
+    DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+    return *bit_cast<double*>(&FPUregisters_[fpureg]);
+  } else {
+    DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters) && ((fpureg % 2) == 0));
+    int64_t i64;
+    i64 = static_cast<uint32_t>(get_fpu_register_word(fpureg));
+    i64 |= static_cast<uint64_t>(get_fpu_register_word(fpureg + 1)) << 32;
+    return bit_cast<double>(i64);
+  }
 }
 
 
@@ -1088,6 +1221,7 @@
     *y = get_fpu_register_double(14);
     *z = get_register(a2);
   } else {
+    // TODO(plind): bad ABI stuff, refactor or remove.
     // We use a char buffer to get around the strict-aliasing rules which
     // otherwise allow the compiler to optimize away the copy.
     char buffer[sizeof(*x)];
@@ -1142,6 +1276,8 @@
 // Returns true if the operation was invalid.
 bool Simulator::set_fcsr_round_error(double original, double rounded) {
   bool ret = false;
+  double max_int32 = std::numeric_limits<int32_t>::max();
+  double min_int32 = std::numeric_limits<int32_t>::min();
 
   if (!std::isfinite(original) || !std::isfinite(rounded)) {
     set_fcsr_bit(kFCSRInvalidOpFlagBit, true);
@@ -1157,7 +1293,7 @@
     ret = true;
   }
 
-  if (rounded > INT_MAX || rounded < INT_MIN) {
+  if (rounded > max_int32 || rounded < min_int32) {
     set_fcsr_bit(kFCSROverflowFlagBit, true);
     // The reference is not really clear but it seems this is required:
     set_fcsr_bit(kFCSRInvalidOpFlagBit, true);
@@ -1244,7 +1380,7 @@
   PrintF("Unaligned (double) read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
          addr,
          reinterpret_cast<intptr_t>(instr));
-  OS::Abort();
+  base::OS::Abort();
   return 0;
 }
 
@@ -1258,7 +1394,7 @@
   PrintF("Unaligned (double) write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
          addr,
          reinterpret_cast<intptr_t>(instr));
-  OS::Abort();
+  base::OS::Abort();
 }
 
 
@@ -1270,7 +1406,7 @@
   PrintF("Unaligned unsigned halfword read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
          addr,
          reinterpret_cast<intptr_t>(instr));
-  OS::Abort();
+  base::OS::Abort();
   return 0;
 }
 
@@ -1283,7 +1419,7 @@
   PrintF("Unaligned signed halfword read at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
          addr,
          reinterpret_cast<intptr_t>(instr));
-  OS::Abort();
+  base::OS::Abort();
   return 0;
 }
 
@@ -1297,7 +1433,7 @@
   PrintF("Unaligned unsigned halfword write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
          addr,
          reinterpret_cast<intptr_t>(instr));
-  OS::Abort();
+  base::OS::Abort();
 }
 
 
@@ -1310,7 +1446,7 @@
   PrintF("Unaligned halfword write at 0x%08x, pc=0x%08" V8PRIxPTR "\n",
          addr,
          reinterpret_cast<intptr_t>(instr));
-  OS::Abort();
+  base::OS::Abort();
 }
 
 
@@ -1419,18 +1555,35 @@
       switch (redirection->type()) {
       case ExternalReference::BUILTIN_FP_FP_CALL:
       case ExternalReference::BUILTIN_COMPARE_CALL:
-        arg0 = get_fpu_register(f12);
-        arg1 = get_fpu_register(f13);
-        arg2 = get_fpu_register(f14);
-        arg3 = get_fpu_register(f15);
+        if (IsFp64Mode()) {
+          arg0 = get_fpu_register_word(f12);
+          arg1 = get_fpu_register_hi_word(f12);
+          arg2 = get_fpu_register_word(f14);
+          arg3 = get_fpu_register_hi_word(f14);
+        } else {
+          arg0 = get_fpu_register_word(f12);
+          arg1 = get_fpu_register_word(f13);
+          arg2 = get_fpu_register_word(f14);
+          arg3 = get_fpu_register_word(f15);
+        }
         break;
       case ExternalReference::BUILTIN_FP_CALL:
-        arg0 = get_fpu_register(f12);
-        arg1 = get_fpu_register(f13);
+        if (IsFp64Mode()) {
+          arg0 = get_fpu_register_word(f12);
+          arg1 = get_fpu_register_hi_word(f12);
+        } else {
+          arg0 = get_fpu_register_word(f12);
+          arg1 = get_fpu_register_word(f13);
+        }
         break;
       case ExternalReference::BUILTIN_FP_INT_CALL:
-        arg0 = get_fpu_register(f12);
-        arg1 = get_fpu_register(f13);
+        if (IsFp64Mode()) {
+          arg0 = get_fpu_register_word(f12);
+          arg1 = get_fpu_register_hi_word(f12);
+        } else {
+          arg0 = get_fpu_register_word(f12);
+          arg1 = get_fpu_register_word(f13);
+        }
         arg2 = get_register(a2);
         break;
       default:
@@ -1637,8 +1790,8 @@
 
 
 bool Simulator::IsEnabledStop(uint32_t code) {
-  ASSERT(code <= kMaxStopCode);
-  ASSERT(code > kMaxWatchpointCode);
+  DCHECK(code <= kMaxStopCode);
+  DCHECK(code > kMaxWatchpointCode);
   return !(watched_stops_[code].count & kStopDisabledBit);
 }
 
@@ -1658,7 +1811,7 @@
 
 
 void Simulator::IncreaseStopCounter(uint32_t code) {
-  ASSERT(code <= kMaxStopCode);
+  DCHECK(code <= kMaxStopCode);
   if ((watched_stops_[code].count & ~(1 << 31)) == 0x7fffffff) {
     PrintF("Stop counter for code %i has overflowed.\n"
            "Enabling this code and reseting the counter to 0.\n", code);
@@ -1734,25 +1887,20 @@
   switch (op) {
     case COP1:    // Coprocessor instructions.
       switch (instr->RsFieldRaw()) {
-        case BC1:   // Handled in DecodeTypeImmed, should never come here.
-          UNREACHABLE();
-          break;
         case CFC1:
           // At the moment only FCSR is supported.
-          ASSERT(fs_reg == kFCSRRegister);
+          DCHECK(fs_reg == kFCSRRegister);
           *alu_out = FCSR_;
           break;
         case MFC1:
-          *alu_out = get_fpu_register(fs_reg);
+          *alu_out = get_fpu_register_word(fs_reg);
           break;
         case MFHC1:
-          UNIMPLEMENTED_MIPS();
+          *alu_out = get_fpu_register_hi_word(fs_reg);
           break;
         case CTC1:
         case MTC1:
         case MTHC1:
-          // Do the store in the execution step.
-          break;
         case S:
         case D:
         case W:
@@ -1761,7 +1909,8 @@
           // Do everything in the execution step.
           break;
         default:
-          UNIMPLEMENTED_MIPS();
+        // BC1 BC1EQZ BC1NEZ handled in DecodeTypeImmed, should never come here.
+          UNREACHABLE();
       }
       break;
     case COP1X:
@@ -1809,17 +1958,51 @@
         case SRAV:
           *alu_out = rt >> rs;
           break;
-        case MFHI:
-          *alu_out = get_register(HI);
+        case MFHI:  // MFHI == CLZ on R6.
+          if (!IsMipsArchVariant(kMips32r6)) {
+            DCHECK(instr->SaValue() == 0);
+            *alu_out = get_register(HI);
+          } else {
+            // MIPS spec: If no bits were set in GPR rs, the result written to
+            // GPR rd is 32.
+            DCHECK(instr->SaValue() == 1);
+            *alu_out = base::bits::CountLeadingZeros32(rs_u);
+          }
           break;
         case MFLO:
           *alu_out = get_register(LO);
           break;
-        case MULT:
-          *i64hilo = static_cast<int64_t>(rs) * static_cast<int64_t>(rt);
+        case MULT:  // MULT == MUL_MUH.
+          if (!IsMipsArchVariant(kMips32r6)) {
+            *i64hilo = static_cast<int64_t>(rs) * static_cast<int64_t>(rt);
+          } else {
+            switch (instr->SaValue()) {
+              case MUL_OP:
+              case MUH_OP:
+                *i64hilo = static_cast<int64_t>(rs) * static_cast<int64_t>(rt);
+                break;
+              default:
+                UNIMPLEMENTED_MIPS();
+                break;
+            }
+          }
           break;
-        case MULTU:
-          *u64hilo = static_cast<uint64_t>(rs_u) * static_cast<uint64_t>(rt_u);
+        case MULTU:  // MULTU == MUL_MUH_U.
+          if (!IsMipsArchVariant(kMips32r6)) {
+            *u64hilo = static_cast<uint64_t>(rs_u) *
+                static_cast<uint64_t>(rt_u);
+          } else {
+            switch (instr->SaValue()) {
+              case MUL_OP:
+              case MUH_OP:
+                *u64hilo = static_cast<uint64_t>(rs_u) *
+                    static_cast<uint64_t>(rt_u);
+                break;
+              default:
+                UNIMPLEMENTED_MIPS();
+                break;
+            }
+          }
           break;
         case ADD:
           if (HaveSameSign(rs, rt)) {
@@ -1908,9 +2091,7 @@
         case CLZ:
           // MIPS32 spec: If no bits were set in GPR rs, the result written to
           // GPR rd is 32.
-          // GCC __builtin_clz: If input is 0, the result is undefined.
-          *alu_out =
-              rs_u == 0 ? 32 : CompilerIntrinsics::CountLeadingZeros(rs_u);
+          *alu_out = base::bits::CountLeadingZeros32(rs_u);
           break;
         default:
           UNREACHABLE();
@@ -1997,27 +2178,27 @@
   switch (op) {
     case COP1:
       switch (instr->RsFieldRaw()) {
-        case BC1:   // Branch on coprocessor condition.
-          UNREACHABLE();
-          break;
         case CFC1:
           set_register(rt_reg, alu_out);
+          break;
         case MFC1:
           set_register(rt_reg, alu_out);
           break;
         case MFHC1:
-          UNIMPLEMENTED_MIPS();
+          set_register(rt_reg, alu_out);
           break;
         case CTC1:
           // At the moment only FCSR is supported.
-          ASSERT(fs_reg == kFCSRRegister);
+          DCHECK(fs_reg == kFCSRRegister);
           FCSR_ = registers_[rt_reg];
           break;
         case MTC1:
-          FPUregisters_[fs_reg] = registers_[rt_reg];
+          // Hardware writes upper 32-bits to zero on mtc1.
+          set_fpu_register_hi_word(fs_reg, 0);
+          set_fpu_register_word(fs_reg, registers_[rt_reg]);
           break;
         case MTHC1:
-          UNIMPLEMENTED_MIPS();
+          set_fpu_register_hi_word(fs_reg, registers_[rt_reg]);
           break;
         case S:
           float f;
@@ -2026,20 +2207,9 @@
               f = get_fpu_register_float(fs_reg);
               set_fpu_register_double(fd_reg, static_cast<double>(f));
               break;
-            case CVT_W_S:
-            case CVT_L_S:
-            case TRUNC_W_S:
-            case TRUNC_L_S:
-            case ROUND_W_S:
-            case ROUND_L_S:
-            case FLOOR_W_S:
-            case FLOOR_L_S:
-            case CEIL_W_S:
-            case CEIL_L_S:
-            case CVT_PS_S:
-              UNIMPLEMENTED_MIPS();
-              break;
             default:
+            // CVT_W_S CVT_L_S TRUNC_W_S ROUND_W_S ROUND_L_S FLOOR_W_S FLOOR_L_S
+            // CEIL_W_S CEIL_L_S CVT_PS_S are unimplemented.
               UNREACHABLE();
           }
           break;
@@ -2102,7 +2272,7 @@
               break;
             case CVT_W_D:   // Convert double to word.
               // Rounding modes are not yet supported.
-              ASSERT((FCSR_ & 3) == 0);
+              DCHECK((FCSR_ & 3) == 0);
               // In rounding mode 0 it should behave like ROUND.
             case ROUND_W_D:  // Round double to word (round half to even).
               {
@@ -2113,9 +2283,9 @@
                   // round to the even one.
                   result--;
                 }
-                set_fpu_register(fd_reg, result);
+                set_fpu_register_word(fd_reg, result);
                 if (set_fcsr_round_error(fs, rounded)) {
-                  set_fpu_register(fd_reg, kFPUInvalidResult);
+                  set_fpu_register_word(fd_reg, kFPUInvalidResult);
                 }
               }
               break;
@@ -2123,9 +2293,9 @@
               {
                 double rounded = trunc(fs);
                 int32_t result = static_cast<int32_t>(rounded);
-                set_fpu_register(fd_reg, result);
+                set_fpu_register_word(fd_reg, result);
                 if (set_fcsr_round_error(fs, rounded)) {
-                  set_fpu_register(fd_reg, kFPUInvalidResult);
+                  set_fpu_register_word(fd_reg, kFPUInvalidResult);
                 }
               }
               break;
@@ -2133,9 +2303,9 @@
               {
                 double rounded = std::floor(fs);
                 int32_t result = static_cast<int32_t>(rounded);
-                set_fpu_register(fd_reg, result);
+                set_fpu_register_word(fd_reg, result);
                 if (set_fcsr_round_error(fs, rounded)) {
-                  set_fpu_register(fd_reg, kFPUInvalidResult);
+                  set_fpu_register_word(fd_reg, kFPUInvalidResult);
                 }
               }
               break;
@@ -2143,9 +2313,9 @@
               {
                 double rounded = std::ceil(fs);
                 int32_t result = static_cast<int32_t>(rounded);
-                set_fpu_register(fd_reg, result);
+                set_fpu_register_word(fd_reg, result);
                 if (set_fcsr_round_error(fs, rounded)) {
-                  set_fpu_register(fd_reg, kFPUInvalidResult);
+                  set_fpu_register_word(fd_reg, kFPUInvalidResult);
                 }
               }
               break;
@@ -2155,34 +2325,54 @@
             case CVT_L_D: {  // Mips32r2: Truncate double to 64-bit long-word.
               double rounded = trunc(fs);
               i64 = static_cast<int64_t>(rounded);
-              set_fpu_register(fd_reg, i64 & 0xffffffff);
-              set_fpu_register(fd_reg + 1, i64 >> 32);
+              if (IsFp64Mode()) {
+                set_fpu_register(fd_reg, i64);
+              } else {
+                set_fpu_register_word(fd_reg, i64 & 0xffffffff);
+                set_fpu_register_word(fd_reg + 1, i64 >> 32);
+              }
               break;
             }
             case TRUNC_L_D: {  // Mips32r2 instruction.
               double rounded = trunc(fs);
               i64 = static_cast<int64_t>(rounded);
-              set_fpu_register(fd_reg, i64 & 0xffffffff);
-              set_fpu_register(fd_reg + 1, i64 >> 32);
+              if (IsFp64Mode()) {
+                set_fpu_register(fd_reg, i64);
+              } else {
+                set_fpu_register_word(fd_reg, i64 & 0xffffffff);
+                set_fpu_register_word(fd_reg + 1, i64 >> 32);
+              }
               break;
             }
             case ROUND_L_D: {  // Mips32r2 instruction.
               double rounded =
                   fs > 0 ? std::floor(fs + 0.5) : std::ceil(fs - 0.5);
               i64 = static_cast<int64_t>(rounded);
-              set_fpu_register(fd_reg, i64 & 0xffffffff);
-              set_fpu_register(fd_reg + 1, i64 >> 32);
+              if (IsFp64Mode()) {
+                set_fpu_register(fd_reg, i64);
+              } else {
+                set_fpu_register_word(fd_reg, i64 & 0xffffffff);
+                set_fpu_register_word(fd_reg + 1, i64 >> 32);
+              }
               break;
             }
             case FLOOR_L_D:  // Mips32r2 instruction.
               i64 = static_cast<int64_t>(std::floor(fs));
-              set_fpu_register(fd_reg, i64 & 0xffffffff);
-              set_fpu_register(fd_reg + 1, i64 >> 32);
+              if (IsFp64Mode()) {
+                set_fpu_register(fd_reg, i64);
+              } else {
+                set_fpu_register_word(fd_reg, i64 & 0xffffffff);
+                set_fpu_register_word(fd_reg + 1, i64 >> 32);
+              }
               break;
             case CEIL_L_D:  // Mips32r2 instruction.
               i64 = static_cast<int64_t>(std::ceil(fs));
-              set_fpu_register(fd_reg, i64 & 0xffffffff);
-              set_fpu_register(fd_reg + 1, i64 >> 32);
+              if (IsFp64Mode()) {
+                set_fpu_register(fd_reg, i64);
+              } else {
+                set_fpu_register_word(fd_reg, i64 & 0xffffffff);
+                set_fpu_register_word(fd_reg + 1, i64 >> 32);
+              }
               break;
             case C_F_D:
               UNIMPLEMENTED_MIPS();
@@ -2194,35 +2384,92 @@
         case W:
           switch (instr->FunctionFieldRaw()) {
             case CVT_S_W:   // Convert word to float (single).
-              alu_out = get_fpu_register(fs_reg);
+              alu_out = get_fpu_register_signed_word(fs_reg);
               set_fpu_register_float(fd_reg, static_cast<float>(alu_out));
               break;
             case CVT_D_W:   // Convert word to double.
-              alu_out = get_fpu_register(fs_reg);
+              alu_out = get_fpu_register_signed_word(fs_reg);
               set_fpu_register_double(fd_reg, static_cast<double>(alu_out));
               break;
-            default:
+            default:  // Mips64r6 CMP.S instructions unimplemented.
               UNREACHABLE();
           }
           break;
         case L:
+          fs = get_fpu_register_double(fs_reg);
+          ft = get_fpu_register_double(ft_reg);
           switch (instr->FunctionFieldRaw()) {
           case CVT_D_L:  // Mips32r2 instruction.
             // Watch the signs here, we want 2 32-bit vals
             // to make a sign-64.
-            i64 = static_cast<uint32_t>(get_fpu_register(fs_reg));
-            i64 |= static_cast<int64_t>(get_fpu_register(fs_reg + 1)) << 32;
+            if (IsFp64Mode()) {
+              i64 = get_fpu_register(fs_reg);
+            } else {
+              i64 = static_cast<uint32_t>(get_fpu_register_word(fs_reg));
+              i64 |= static_cast<int64_t>(
+                  get_fpu_register_word(fs_reg + 1)) << 32;
+            }
             set_fpu_register_double(fd_reg, static_cast<double>(i64));
             break;
             case CVT_S_L:
               UNIMPLEMENTED_MIPS();
               break;
-            default:
+            case CMP_AF:  // Mips64r6 CMP.D instructions.
+              UNIMPLEMENTED_MIPS();
+              break;
+            case CMP_UN:
+              if (std::isnan(fs) || std::isnan(ft)) {
+                set_fpu_register(fd_reg, -1);
+              } else {
+                set_fpu_register(fd_reg, 0);
+              }
+              break;
+            case CMP_EQ:
+              if (fs == ft) {
+                set_fpu_register(fd_reg, -1);
+              } else {
+                set_fpu_register(fd_reg, 0);
+              }
+              break;
+            case CMP_UEQ:
+              if ((fs == ft) || (std::isnan(fs) || std::isnan(ft))) {
+                set_fpu_register(fd_reg, -1);
+              } else {
+                set_fpu_register(fd_reg, 0);
+              }
+              break;
+            case CMP_LT:
+              if (fs < ft) {
+                set_fpu_register(fd_reg, -1);
+              } else {
+                set_fpu_register(fd_reg, 0);
+              }
+              break;
+            case CMP_ULT:
+              if ((fs < ft) || (std::isnan(fs) || std::isnan(ft))) {
+                set_fpu_register(fd_reg, -1);
+              } else {
+                set_fpu_register(fd_reg, 0);
+              }
+              break;
+            case CMP_LE:
+              if (fs <= ft) {
+                set_fpu_register(fd_reg, -1);
+              } else {
+                set_fpu_register(fd_reg, 0);
+              }
+              break;
+            case CMP_ULE:
+              if ((fs <= ft) || (std::isnan(fs) || std::isnan(ft))) {
+                set_fpu_register(fd_reg, -1);
+              } else {
+                set_fpu_register(fd_reg, 0);
+              }
+              break;
+            default:  // CMP_OR CMP_UNE CMP_NE UNIMPLEMENTED.
               UNREACHABLE();
           }
           break;
-        case PS:
-          break;
         default:
           UNREACHABLE();
       }
@@ -2262,30 +2509,100 @@
         }
         // Instructions using HI and LO registers.
         case MULT:
-          set_register(LO, static_cast<int32_t>(i64hilo & 0xffffffff));
-          set_register(HI, static_cast<int32_t>(i64hilo >> 32));
+          if (!IsMipsArchVariant(kMips32r6)) {
+            set_register(LO, static_cast<int32_t>(i64hilo & 0xffffffff));
+            set_register(HI, static_cast<int32_t>(i64hilo >> 32));
+          } else {
+            switch (instr->SaValue()) {
+              case MUL_OP:
+                set_register(rd_reg,
+                    static_cast<int32_t>(i64hilo & 0xffffffff));
+                break;
+              case MUH_OP:
+                set_register(rd_reg, static_cast<int32_t>(i64hilo >> 32));
+                break;
+              default:
+                UNIMPLEMENTED_MIPS();
+                break;
+            }
+          }
           break;
         case MULTU:
-          set_register(LO, static_cast<int32_t>(u64hilo & 0xffffffff));
-          set_register(HI, static_cast<int32_t>(u64hilo >> 32));
+          if (!IsMipsArchVariant(kMips32r6)) {
+            set_register(LO, static_cast<int32_t>(u64hilo & 0xffffffff));
+            set_register(HI, static_cast<int32_t>(u64hilo >> 32));
+          } else {
+            switch (instr->SaValue()) {
+              case MUL_OP:
+                set_register(rd_reg,
+                    static_cast<int32_t>(u64hilo & 0xffffffff));
+                break;
+              case MUH_OP:
+                set_register(rd_reg, static_cast<int32_t>(u64hilo >> 32));
+                break;
+              default:
+                UNIMPLEMENTED_MIPS();
+                break;
+            }
+          }
           break;
         case DIV:
-          // Divide by zero and overflow was not checked in the configuration
-          // step - div and divu do not raise exceptions. On division by 0
-          // the result will be UNPREDICTABLE. On overflow (INT_MIN/-1),
-          // return INT_MIN which is what the hardware does.
-          if (rs == INT_MIN && rt == -1) {
-            set_register(LO, INT_MIN);
-            set_register(HI, 0);
-          } else if (rt != 0) {
-            set_register(LO, rs / rt);
-            set_register(HI, rs % rt);
+          if (IsMipsArchVariant(kMips32r6)) {
+            switch (instr->SaValue()) {
+              case DIV_OP:
+                if (rs == INT_MIN && rt == -1) {
+                  set_register(rd_reg, INT_MIN);
+                } else if (rt != 0) {
+                  set_register(rd_reg, rs / rt);
+                }
+                break;
+              case MOD_OP:
+                if (rs == INT_MIN && rt == -1) {
+                  set_register(rd_reg, 0);
+                } else if (rt != 0) {
+                  set_register(rd_reg, rs % rt);
+                }
+                break;
+              default:
+                UNIMPLEMENTED_MIPS();
+                break;
+            }
+          } else {
+            // Divide by zero and overflow was not checked in the
+            // configuration step - div and divu do not raise exceptions. On
+            // division by 0 the result will be UNPREDICTABLE. On overflow
+            // (INT_MIN/-1), return INT_MIN which is what the hardware does.
+            if (rs == INT_MIN && rt == -1) {
+              set_register(LO, INT_MIN);
+              set_register(HI, 0);
+            } else if (rt != 0) {
+              set_register(LO, rs / rt);
+              set_register(HI, rs % rt);
+            }
           }
           break;
         case DIVU:
-          if (rt_u != 0) {
-            set_register(LO, rs_u / rt_u);
-            set_register(HI, rs_u % rt_u);
+          if (IsMipsArchVariant(kMips32r6)) {
+            switch (instr->SaValue()) {
+              case DIV_OP:
+                if (rt_u != 0) {
+                  set_register(rd_reg, rs_u / rt_u);
+                }
+                break;
+              case MOD_OP:
+                if (rt_u != 0) {
+                  set_register(rd_reg, rs_u % rt_u);
+                }
+                break;
+              default:
+                UNIMPLEMENTED_MIPS();
+                break;
+              }
+          } else {
+            if (rt_u != 0) {
+              set_register(LO, rs_u / rt_u);
+              set_register(HI, rs_u % rt_u);
+            }
           }
           break;
         // Break and trap instructions.
@@ -2367,6 +2684,7 @@
   int16_t  imm16  = instr->Imm16Value();
 
   int32_t  ft_reg = instr->FtValue();  // Destination register.
+  int64_t  ft;
 
   // Zero extended immediate.
   uint32_t  oe_imm16 = 0xffff & imm16;
@@ -2411,6 +2729,28 @@
             next_pc = current_pc + kBranchReturnOffset;
           }
           break;
+        case BC1EQZ:
+          ft = get_fpu_register(ft_reg);
+          do_branch = (ft & 0x1) ? false : true;
+          execute_branch_delay_instruction = true;
+          // Set next_pc.
+          if (do_branch) {
+            next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
+          } else {
+            next_pc = current_pc + kBranchReturnOffset;
+          }
+          break;
+        case BC1NEZ:
+          ft = get_fpu_register(ft_reg);
+          do_branch = (ft & 0x1) ? true : false;
+          execute_branch_delay_instruction = true;
+          // Set next_pc.
+          if (do_branch) {
+            next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
+          } else {
+            next_pc = current_pc + kBranchReturnOffset;
+          }
+          break;
         default:
           UNREACHABLE();
       }
@@ -2645,14 +2985,15 @@
       WriteW(addr, mem_value, instr);
       break;
     case LWC1:
-      set_fpu_register(ft_reg, alu_out);
+      set_fpu_register_hi_word(ft_reg, 0);
+      set_fpu_register_word(ft_reg, alu_out);
       break;
     case LDC1:
       set_fpu_register_double(ft_reg, fp_out);
       break;
     case SWC1:
       addr = rs + se_imm16;
-      WriteW(addr, get_fpu_register(ft_reg), instr);
+      WriteW(addr, get_fpu_register_word(ft_reg), instr);
       break;
     case SDC1:
       addr = rs + se_imm16;
@@ -2846,7 +3187,7 @@
   // Set up arguments.
 
   // First four arguments passed in registers.
-  ASSERT(argument_count >= 4);
+  DCHECK(argument_count >= 4);
   set_register(a0, va_arg(parameters, int32_t));
   set_register(a1, va_arg(parameters, int32_t));
   set_register(a2, va_arg(parameters, int32_t));
@@ -2857,8 +3198,8 @@
   // Compute position of stack on entry to generated code.
   int entry_stack = (original_stack - (argument_count - 4) * sizeof(int32_t)
                                     - kCArgsSlotsSize);
-  if (OS::ActivationFrameAlignment() != 0) {
-    entry_stack &= -OS::ActivationFrameAlignment();
+  if (base::OS::ActivationFrameAlignment() != 0) {
+    entry_stack &= -base::OS::ActivationFrameAlignment();
   }
   // Store remaining arguments on stack, from low to high memory.
   intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
@@ -2885,7 +3226,7 @@
     set_fpu_register_double(f14, d1);
   } else {
     int buffer[2];
-    ASSERT(sizeof(buffer[0]) * 2 == sizeof(d0));
+    DCHECK(sizeof(buffer[0]) * 2 == sizeof(d0));
     memcpy(buffer, &d0, sizeof(d0));
     set_dw_register(a0, buffer);
     memcpy(buffer, &d1, sizeof(d1));
diff --git a/src/mips/simulator-mips.h b/src/mips/simulator-mips.h
index 20dde25..85f6477 100644
--- a/src/mips/simulator-mips.h
+++ b/src/mips/simulator-mips.h
@@ -70,8 +70,8 @@
 #else  // !defined(USE_SIMULATOR)
 // Running with a simulator.
 
-#include "src/hashmap.h"
 #include "src/assembler.h"
+#include "src/hashmap.h"
 
 namespace v8 {
 namespace internal {
@@ -162,11 +162,15 @@
   int32_t get_register(int reg) const;
   double get_double_from_register_pair(int reg);
   // Same for FPURegisters.
-  void set_fpu_register(int fpureg, int32_t value);
+  void set_fpu_register(int fpureg, int64_t value);
+  void set_fpu_register_word(int fpureg, int32_t value);
+  void set_fpu_register_hi_word(int fpureg, int32_t value);
   void set_fpu_register_float(int fpureg, float value);
   void set_fpu_register_double(int fpureg, double value);
-  int32_t get_fpu_register(int fpureg) const;
-  int64_t get_fpu_register_long(int fpureg) const;
+  int64_t get_fpu_register(int fpureg) const;
+  int32_t get_fpu_register_word(int fpureg) const;
+  int32_t get_fpu_register_signed_word(int fpureg) const;
+  int32_t get_fpu_register_hi_word(int fpureg) const;
   float get_fpu_register_float(int fpureg) const;
   double get_fpu_register_double(int fpureg) const;
   void set_fcsr_bit(uint32_t cc, bool value);
@@ -338,7 +342,9 @@
   // Registers.
   int32_t registers_[kNumSimuRegisters];
   // Coprocessor Registers.
-  int32_t FPUregisters_[kNumFPURegisters];
+  // Note: FP32 mode uses only the lower 32-bit part of each element,
+  // the upper 32-bit is unpredictable.
+  int64_t FPUregisters_[kNumFPURegisters];
   // FPU control register.
   uint32_t FCSR_;
 
diff --git a/src/mips/stub-cache-mips.cc b/src/mips/stub-cache-mips.cc
deleted file mode 100644
index 13e7e4b..0000000
--- a/src/mips/stub-cache-mips.cc
+++ /dev/null
@@ -1,1509 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#if V8_TARGET_ARCH_MIPS
-
-#include "src/ic-inl.h"
-#include "src/codegen.h"
-#include "src/stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-
-static void ProbeTable(Isolate* isolate,
-                       MacroAssembler* masm,
-                       Code::Flags flags,
-                       StubCache::Table table,
-                       Register receiver,
-                       Register name,
-                       // Number of the cache entry, not scaled.
-                       Register offset,
-                       Register scratch,
-                       Register scratch2,
-                       Register offset_scratch) {
-  ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
-  ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
-  ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
-
-  uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address());
-  uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address());
-  uint32_t map_off_addr = reinterpret_cast<uint32_t>(map_offset.address());
-
-  // Check the relative positions of the address fields.
-  ASSERT(value_off_addr > key_off_addr);
-  ASSERT((value_off_addr - key_off_addr) % 4 == 0);
-  ASSERT((value_off_addr - key_off_addr) < (256 * 4));
-  ASSERT(map_off_addr > key_off_addr);
-  ASSERT((map_off_addr - key_off_addr) % 4 == 0);
-  ASSERT((map_off_addr - key_off_addr) < (256 * 4));
-
-  Label miss;
-  Register base_addr = scratch;
-  scratch = no_reg;
-
-  // Multiply by 3 because there are 3 fields per entry (name, code, map).
-  __ sll(offset_scratch, offset, 1);
-  __ Addu(offset_scratch, offset_scratch, offset);
-
-  // Calculate the base address of the entry.
-  __ li(base_addr, Operand(key_offset));
-  __ sll(at, offset_scratch, kPointerSizeLog2);
-  __ Addu(base_addr, base_addr, at);
-
-  // Check that the key in the entry matches the name.
-  __ lw(at, MemOperand(base_addr, 0));
-  __ Branch(&miss, ne, name, Operand(at));
-
-  // Check the map matches.
-  __ lw(at, MemOperand(base_addr, map_off_addr - key_off_addr));
-  __ lw(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ Branch(&miss, ne, at, Operand(scratch2));
-
-  // Get the code entry from the cache.
-  Register code = scratch2;
-  scratch2 = no_reg;
-  __ lw(code, MemOperand(base_addr, value_off_addr - key_off_addr));
-
-  // Check that the flags match what we're looking for.
-  Register flags_reg = base_addr;
-  base_addr = no_reg;
-  __ lw(flags_reg, FieldMemOperand(code, Code::kFlagsOffset));
-  __ And(flags_reg, flags_reg, Operand(~Code::kFlagsNotUsedInLookup));
-  __ Branch(&miss, ne, flags_reg, Operand(flags));
-
-#ifdef DEBUG
-    if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
-      __ jmp(&miss);
-    } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
-      __ jmp(&miss);
-    }
-#endif
-
-  // Jump to the first instruction in the code stub.
-  __ Addu(at, code, Operand(Code::kHeaderSize - kHeapObjectTag));
-  __ Jump(at);
-
-  // Miss: fall through.
-  __ bind(&miss);
-}
-
-
-void StubCompiler::GenerateDictionaryNegativeLookup(MacroAssembler* masm,
-                                                    Label* miss_label,
-                                                    Register receiver,
-                                                    Handle<Name> name,
-                                                    Register scratch0,
-                                                    Register scratch1) {
-  ASSERT(name->IsUniqueName());
-  ASSERT(!receiver.is(scratch0));
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
-  __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
-
-  Label done;
-
-  const int kInterceptorOrAccessCheckNeededMask =
-      (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
-
-  // Bail out if the receiver has a named interceptor or requires access checks.
-  Register map = scratch1;
-  __ lw(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ lbu(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
-  __ And(scratch0, scratch0, Operand(kInterceptorOrAccessCheckNeededMask));
-  __ Branch(miss_label, ne, scratch0, Operand(zero_reg));
-
-  // Check that receiver is a JSObject.
-  __ lbu(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
-  __ Branch(miss_label, lt, scratch0, Operand(FIRST_SPEC_OBJECT_TYPE));
-
-  // Load properties array.
-  Register properties = scratch0;
-  __ lw(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-  // Check that the properties array is a dictionary.
-  __ lw(map, FieldMemOperand(properties, HeapObject::kMapOffset));
-  Register tmp = properties;
-  __ LoadRoot(tmp, Heap::kHashTableMapRootIndex);
-  __ Branch(miss_label, ne, map, Operand(tmp));
-
-  // Restore the temporarily used register.
-  __ lw(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
-
-
-  NameDictionaryLookupStub::GenerateNegativeLookup(masm,
-                                                   miss_label,
-                                                   &done,
-                                                   receiver,
-                                                   properties,
-                                                   name,
-                                                   scratch1);
-  __ bind(&done);
-  __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
-}
-
-
-void StubCache::GenerateProbe(MacroAssembler* masm,
-                              Code::Flags flags,
-                              Register receiver,
-                              Register name,
-                              Register scratch,
-                              Register extra,
-                              Register extra2,
-                              Register extra3) {
-  Isolate* isolate = masm->isolate();
-  Label miss;
-
-  // Make sure that code is valid. The multiplying code relies on the
-  // entry size being 12.
-  ASSERT(sizeof(Entry) == 12);
-
-  // Make sure the flags does not name a specific type.
-  ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
-
-  // Make sure that there are no register conflicts.
-  ASSERT(!scratch.is(receiver));
-  ASSERT(!scratch.is(name));
-  ASSERT(!extra.is(receiver));
-  ASSERT(!extra.is(name));
-  ASSERT(!extra.is(scratch));
-  ASSERT(!extra2.is(receiver));
-  ASSERT(!extra2.is(name));
-  ASSERT(!extra2.is(scratch));
-  ASSERT(!extra2.is(extra));
-
-  // Check register validity.
-  ASSERT(!scratch.is(no_reg));
-  ASSERT(!extra.is(no_reg));
-  ASSERT(!extra2.is(no_reg));
-  ASSERT(!extra3.is(no_reg));
-
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1,
-                      extra2, extra3);
-
-  // Check that the receiver isn't a smi.
-  __ JumpIfSmi(receiver, &miss);
-
-  // Get the map of the receiver and compute the hash.
-  __ lw(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
-  __ lw(at, FieldMemOperand(receiver, HeapObject::kMapOffset));
-  __ Addu(scratch, scratch, at);
-  uint32_t mask = kPrimaryTableSize - 1;
-  // We shift out the last two bits because they are not part of the hash and
-  // they are always 01 for maps.
-  __ srl(scratch, scratch, kHeapObjectTagSize);
-  __ Xor(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask));
-  __ And(scratch, scratch, Operand(mask));
-
-  // Probe the primary table.
-  ProbeTable(isolate,
-             masm,
-             flags,
-             kPrimary,
-             receiver,
-             name,
-             scratch,
-             extra,
-             extra2,
-             extra3);
-
-  // Primary miss: Compute hash for secondary probe.
-  __ srl(at, name, kHeapObjectTagSize);
-  __ Subu(scratch, scratch, at);
-  uint32_t mask2 = kSecondaryTableSize - 1;
-  __ Addu(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask2));
-  __ And(scratch, scratch, Operand(mask2));
-
-  // Probe the secondary table.
-  ProbeTable(isolate,
-             masm,
-             flags,
-             kSecondary,
-             receiver,
-             name,
-             scratch,
-             extra,
-             extra2,
-             extra3);
-
-  // Cache miss: Fall-through and let caller handle the miss by
-  // entering the runtime system.
-  __ bind(&miss);
-  __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1,
-                      extra2, extra3);
-}
-
-
-void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
-                                                       int index,
-                                                       Register prototype) {
-  // Load the global or builtins object from the current context.
-  __ lw(prototype,
-        MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
-  // Load the native context from the global or builtins object.
-  __ lw(prototype,
-         FieldMemOperand(prototype, GlobalObject::kNativeContextOffset));
-  // Load the function from the native context.
-  __ lw(prototype, MemOperand(prototype, Context::SlotOffset(index)));
-  // Load the initial map.  The global functions all have initial maps.
-  __ lw(prototype,
-         FieldMemOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset));
-  // Load the prototype from the initial map.
-  __ lw(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
-}
-
-
-void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
-    MacroAssembler* masm,
-    int index,
-    Register prototype,
-    Label* miss) {
-  Isolate* isolate = masm->isolate();
-  // Get the global function with the given index.
-  Handle<JSFunction> function(
-      JSFunction::cast(isolate->native_context()->get(index)));
-
-  // Check we're still in the same context.
-  Register scratch = prototype;
-  const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
-  __ lw(scratch, MemOperand(cp, offset));
-  __ lw(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
-  __ lw(scratch, MemOperand(scratch, Context::SlotOffset(index)));
-  __ li(at, function);
-  __ Branch(miss, ne, at, Operand(scratch));
-
-  // Load its initial map. The global functions all have initial maps.
-  __ li(prototype, Handle<Map>(function->initial_map()));
-  // Load the prototype from the initial map.
-  __ lw(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
-}
-
-
-void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
-                                            Register dst,
-                                            Register src,
-                                            bool inobject,
-                                            int index,
-                                            Representation representation) {
-  ASSERT(!representation.IsDouble());
-  int offset = index * kPointerSize;
-  if (!inobject) {
-    // Calculate the offset into the properties array.
-    offset = offset + FixedArray::kHeaderSize;
-    __ lw(dst, FieldMemOperand(src, JSObject::kPropertiesOffset));
-    src = dst;
-  }
-  __ lw(dst, FieldMemOperand(src, offset));
-}
-
-
-void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
-                                           Register receiver,
-                                           Register scratch,
-                                           Label* miss_label) {
-  // Check that the receiver isn't a smi.
-  __ JumpIfSmi(receiver, miss_label);
-
-  // Check that the object is a JS array.
-  __ GetObjectType(receiver, scratch, scratch);
-  __ Branch(miss_label, ne, scratch, Operand(JS_ARRAY_TYPE));
-
-  // Load length directly from the JS array.
-  __ Ret(USE_DELAY_SLOT);
-  __ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset));
-}
-
-
-void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
-                                                 Register receiver,
-                                                 Register scratch1,
-                                                 Register scratch2,
-                                                 Label* miss_label) {
-  __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
-  __ Ret(USE_DELAY_SLOT);
-  __ mov(v0, scratch1);
-}
-
-
-void StubCompiler::GenerateCheckPropertyCell(MacroAssembler* masm,
-                                             Handle<JSGlobalObject> global,
-                                             Handle<Name> name,
-                                             Register scratch,
-                                             Label* miss) {
-  Handle<Cell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
-  ASSERT(cell->value()->IsTheHole());
-  __ li(scratch, Operand(cell));
-  __ lw(scratch, FieldMemOperand(scratch, Cell::kValueOffset));
-  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
-  __ Branch(miss, ne, scratch, Operand(at));
-}
-
-
-void StoreStubCompiler::GenerateNegativeHolderLookup(
-    MacroAssembler* masm,
-    Handle<JSObject> holder,
-    Register holder_reg,
-    Handle<Name> name,
-    Label* miss) {
-  if (holder->IsJSGlobalObject()) {
-    GenerateCheckPropertyCell(
-        masm, Handle<JSGlobalObject>::cast(holder), name, scratch1(), miss);
-  } else if (!holder->HasFastProperties() && !holder->IsJSGlobalProxy()) {
-    GenerateDictionaryNegativeLookup(
-        masm, miss, holder_reg, name, scratch1(), scratch2());
-  }
-}
-
-
-// Generate StoreTransition code, value is passed in a0 register.
-// After executing generated code, the receiver_reg and name_reg
-// may be clobbered.
-void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
-                                                Handle<JSObject> object,
-                                                LookupResult* lookup,
-                                                Handle<Map> transition,
-                                                Handle<Name> name,
-                                                Register receiver_reg,
-                                                Register storage_reg,
-                                                Register value_reg,
-                                                Register scratch1,
-                                                Register scratch2,
-                                                Register scratch3,
-                                                Label* miss_label,
-                                                Label* slow) {
-  // a0 : value.
-  Label exit;
-
-  int descriptor = transition->LastAdded();
-  DescriptorArray* descriptors = transition->instance_descriptors();
-  PropertyDetails details = descriptors->GetDetails(descriptor);
-  Representation representation = details.representation();
-  ASSERT(!representation.IsNone());
-
-  if (details.type() == CONSTANT) {
-    Handle<Object> constant(descriptors->GetValue(descriptor), masm->isolate());
-    __ li(scratch1, constant);
-    __ Branch(miss_label, ne, value_reg, Operand(scratch1));
-  } else if (representation.IsSmi()) {
-    __ JumpIfNotSmi(value_reg, miss_label);
-  } else if (representation.IsHeapObject()) {
-    __ JumpIfSmi(value_reg, miss_label);
-    HeapType* field_type = descriptors->GetFieldType(descriptor);
-    HeapType::Iterator<Map> it = field_type->Classes();
-    Handle<Map> current;
-    if (!it.Done()) {
-      __ lw(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset));
-      Label do_store;
-      while (true) {
-        // Do the CompareMap() directly within the Branch() functions.
-        current = it.Current();
-        it.Advance();
-        if (it.Done()) {
-          __ Branch(miss_label, ne, scratch1, Operand(current));
-          break;
-        }
-        __ Branch(&do_store, eq, scratch1, Operand(current));
-      }
-      __ bind(&do_store);
-    }
-  } else if (representation.IsDouble()) {
-    Label do_store, heap_number;
-    __ LoadRoot(scratch3, Heap::kHeapNumberMapRootIndex);
-    __ AllocateHeapNumber(storage_reg, scratch1, scratch2, scratch3, slow);
-
-    __ JumpIfNotSmi(value_reg, &heap_number);
-    __ SmiUntag(scratch1, value_reg);
-    __ mtc1(scratch1, f6);
-    __ cvt_d_w(f4, f6);
-    __ jmp(&do_store);
-
-    __ bind(&heap_number);
-    __ CheckMap(value_reg, scratch1, Heap::kHeapNumberMapRootIndex,
-                miss_label, DONT_DO_SMI_CHECK);
-    __ ldc1(f4, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
-
-    __ bind(&do_store);
-    __ sdc1(f4, FieldMemOperand(storage_reg, HeapNumber::kValueOffset));
-  }
-
-  // Stub never generated for non-global objects that require access
-  // checks.
-  ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
-
-  // Perform map transition for the receiver if necessary.
-  if (details.type() == FIELD &&
-      object->map()->unused_property_fields() == 0) {
-    // The properties must be extended before we can store the value.
-    // We jump to a runtime call that extends the properties array.
-    __ push(receiver_reg);
-    __ li(a2, Operand(transition));
-    __ Push(a2, a0);
-    __ TailCallExternalReference(
-           ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
-                             masm->isolate()),
-           3, 1);
-    return;
-  }
-
-  // Update the map of the object.
-  __ li(scratch1, Operand(transition));
-  __ sw(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
-
-  // Update the write barrier for the map field.
-  __ RecordWriteField(receiver_reg,
-                      HeapObject::kMapOffset,
-                      scratch1,
-                      scratch2,
-                      kRAHasNotBeenSaved,
-                      kDontSaveFPRegs,
-                      OMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-
-  if (details.type() == CONSTANT) {
-    ASSERT(value_reg.is(a0));
-    __ Ret(USE_DELAY_SLOT);
-    __ mov(v0, a0);
-    return;
-  }
-
-  int index = transition->instance_descriptors()->GetFieldIndex(
-      transition->LastAdded());
-
-  // Adjust for the number of properties stored in the object. Even in the
-  // face of a transition we can use the old map here because the size of the
-  // object and the number of in-object properties is not going to change.
-  index -= object->map()->inobject_properties();
-
-  // TODO(verwaest): Share this code as a code stub.
-  SmiCheck smi_check = representation.IsTagged()
-      ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
-  if (index < 0) {
-    // Set the property straight into the object.
-    int offset = object->map()->instance_size() + (index * kPointerSize);
-    if (representation.IsDouble()) {
-      __ sw(storage_reg, FieldMemOperand(receiver_reg, offset));
-    } else {
-      __ sw(value_reg, FieldMemOperand(receiver_reg, offset));
-    }
-
-    if (!representation.IsSmi()) {
-      // Update the write barrier for the array address.
-      if (!representation.IsDouble()) {
-        __ mov(storage_reg, value_reg);
-      }
-      __ RecordWriteField(receiver_reg,
-                          offset,
-                          storage_reg,
-                          scratch1,
-                          kRAHasNotBeenSaved,
-                          kDontSaveFPRegs,
-                          EMIT_REMEMBERED_SET,
-                          smi_check);
-    }
-  } else {
-    // Write to the properties array.
-    int offset = index * kPointerSize + FixedArray::kHeaderSize;
-    // Get the properties array
-    __ lw(scratch1,
-          FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
-    if (representation.IsDouble()) {
-      __ sw(storage_reg, FieldMemOperand(scratch1, offset));
-    } else {
-      __ sw(value_reg, FieldMemOperand(scratch1, offset));
-    }
-
-    if (!representation.IsSmi()) {
-      // Update the write barrier for the array address.
-      if (!representation.IsDouble()) {
-        __ mov(storage_reg, value_reg);
-      }
-      __ RecordWriteField(scratch1,
-                          offset,
-                          storage_reg,
-                          receiver_reg,
-                          kRAHasNotBeenSaved,
-                          kDontSaveFPRegs,
-                          EMIT_REMEMBERED_SET,
-                          smi_check);
-    }
-  }
-
-  // Return the value (register v0).
-  ASSERT(value_reg.is(a0));
-  __ bind(&exit);
-  __ Ret(USE_DELAY_SLOT);
-  __ mov(v0, a0);
-}
-
-
-// Generate StoreField code, value is passed in a0 register.
-// When leaving generated code after success, the receiver_reg and name_reg
-// may be clobbered.  Upon branch to miss_label, the receiver and name
-// registers have their original values.
-void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
-                                           Handle<JSObject> object,
-                                           LookupResult* lookup,
-                                           Register receiver_reg,
-                                           Register name_reg,
-                                           Register value_reg,
-                                           Register scratch1,
-                                           Register scratch2,
-                                           Label* miss_label) {
-  // a0 : value
-  Label exit;
-
-  // Stub never generated for non-global objects that require access
-  // checks.
-  ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
-
-  FieldIndex index = lookup->GetFieldIndex();
-
-  Representation representation = lookup->representation();
-  ASSERT(!representation.IsNone());
-  if (representation.IsSmi()) {
-    __ JumpIfNotSmi(value_reg, miss_label);
-  } else if (representation.IsHeapObject()) {
-    __ JumpIfSmi(value_reg, miss_label);
-    HeapType* field_type = lookup->GetFieldType();
-    HeapType::Iterator<Map> it = field_type->Classes();
-    if (!it.Done()) {
-      __ lw(scratch1, FieldMemOperand(value_reg, HeapObject::kMapOffset));
-      Label do_store;
-      Handle<Map> current;
-      while (true) {
-        // Do the CompareMap() directly within the Branch() functions.
-        current = it.Current();
-        it.Advance();
-        if (it.Done()) {
-          __ Branch(miss_label, ne, scratch1, Operand(current));
-          break;
-        }
-        __ Branch(&do_store, eq, scratch1, Operand(current));
-      }
-      __ bind(&do_store);
-    }
-  } else if (representation.IsDouble()) {
-    // Load the double storage.
-    if (index.is_inobject()) {
-      __ lw(scratch1, FieldMemOperand(receiver_reg, index.offset()));
-    } else {
-      __ lw(scratch1,
-            FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
-      __ lw(scratch1, FieldMemOperand(scratch1, index.offset()));
-    }
-
-    // Store the value into the storage.
-    Label do_store, heap_number;
-    __ JumpIfNotSmi(value_reg, &heap_number);
-    __ SmiUntag(scratch2, value_reg);
-    __ mtc1(scratch2, f6);
-    __ cvt_d_w(f4, f6);
-    __ jmp(&do_store);
-
-    __ bind(&heap_number);
-    __ CheckMap(value_reg, scratch2, Heap::kHeapNumberMapRootIndex,
-                miss_label, DONT_DO_SMI_CHECK);
-    __ ldc1(f4, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
-
-    __ bind(&do_store);
-    __ sdc1(f4, FieldMemOperand(scratch1, HeapNumber::kValueOffset));
-    // Return the value (register v0).
-    ASSERT(value_reg.is(a0));
-    __ Ret(USE_DELAY_SLOT);
-    __ mov(v0, a0);
-    return;
-  }
-
-  // TODO(verwaest): Share this code as a code stub.
-  SmiCheck smi_check = representation.IsTagged()
-      ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
-  if (index.is_inobject()) {
-    // Set the property straight into the object.
-    __ sw(value_reg, FieldMemOperand(receiver_reg, index.offset()));
-
-    if (!representation.IsSmi()) {
-      // Skip updating write barrier if storing a smi.
-      __ JumpIfSmi(value_reg, &exit);
-
-      // Update the write barrier for the array address.
-      // Pass the now unused name_reg as a scratch register.
-      __ mov(name_reg, value_reg);
-      __ RecordWriteField(receiver_reg,
-                          index.offset(),
-                          name_reg,
-                          scratch1,
-                          kRAHasNotBeenSaved,
-                          kDontSaveFPRegs,
-                          EMIT_REMEMBERED_SET,
-                          smi_check);
-    }
-  } else {
-    // Write to the properties array.
-    // Get the properties array.
-    __ lw(scratch1,
-          FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
-    __ sw(value_reg, FieldMemOperand(scratch1, index.offset()));
-
-    if (!representation.IsSmi()) {
-      // Skip updating write barrier if storing a smi.
-      __ JumpIfSmi(value_reg, &exit);
-
-      // Update the write barrier for the array address.
-      // Ok to clobber receiver_reg and name_reg, since we return.
-      __ mov(name_reg, value_reg);
-      __ RecordWriteField(scratch1,
-                          index.offset(),
-                          name_reg,
-                          receiver_reg,
-                          kRAHasNotBeenSaved,
-                          kDontSaveFPRegs,
-                          EMIT_REMEMBERED_SET,
-                          smi_check);
-    }
-  }
-
-  // Return the value (register v0).
-  ASSERT(value_reg.is(a0));
-  __ bind(&exit);
-  __ Ret(USE_DELAY_SLOT);
-  __ mov(v0, a0);
-}
-
-
-void StoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
-                                            Label* label,
-                                            Handle<Name> name) {
-  if (!label->is_unused()) {
-    __ bind(label);
-    __ li(this->name(), Operand(name));
-  }
-}
-
-
-static void PushInterceptorArguments(MacroAssembler* masm,
-                                     Register receiver,
-                                     Register holder,
-                                     Register name,
-                                     Handle<JSObject> holder_obj) {
-  STATIC_ASSERT(StubCache::kInterceptorArgsNameIndex == 0);
-  STATIC_ASSERT(StubCache::kInterceptorArgsInfoIndex == 1);
-  STATIC_ASSERT(StubCache::kInterceptorArgsThisIndex == 2);
-  STATIC_ASSERT(StubCache::kInterceptorArgsHolderIndex == 3);
-  STATIC_ASSERT(StubCache::kInterceptorArgsLength == 4);
-  __ push(name);
-  Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
-  ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
-  Register scratch = name;
-  __ li(scratch, Operand(interceptor));
-  __ Push(scratch, receiver, holder);
-}
-
-
-static void CompileCallLoadPropertyWithInterceptor(
-    MacroAssembler* masm,
-    Register receiver,
-    Register holder,
-    Register name,
-    Handle<JSObject> holder_obj,
-    IC::UtilityId id) {
-  PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
-  __ CallExternalReference(
-      ExternalReference(IC_Utility(id), masm->isolate()),
-      StubCache::kInterceptorArgsLength);
-}
-
-
-// Generate call to api function.
-void StubCompiler::GenerateFastApiCall(MacroAssembler* masm,
-                                       const CallOptimization& optimization,
-                                       Handle<Map> receiver_map,
-                                       Register receiver,
-                                       Register scratch_in,
-                                       bool is_store,
-                                       int argc,
-                                       Register* values) {
-  ASSERT(!receiver.is(scratch_in));
-  // Preparing to push, adjust sp.
-  __ Subu(sp, sp, Operand((argc + 1) * kPointerSize));
-  __ sw(receiver, MemOperand(sp, argc * kPointerSize));  // Push receiver.
-  // Write the arguments to stack frame.
-  for (int i = 0; i < argc; i++) {
-    Register arg = values[argc-1-i];
-    ASSERT(!receiver.is(arg));
-    ASSERT(!scratch_in.is(arg));
-    __ sw(arg, MemOperand(sp, (argc-1-i) * kPointerSize));  // Push arg.
-  }
-  ASSERT(optimization.is_simple_api_call());
-
-  // Abi for CallApiFunctionStub.
-  Register callee = a0;
-  Register call_data = t0;
-  Register holder = a2;
-  Register api_function_address = a1;
-
-  // Put holder in place.
-  CallOptimization::HolderLookup holder_lookup;
-  Handle<JSObject> api_holder = optimization.LookupHolderOfExpectedType(
-      receiver_map,
-      &holder_lookup);
-  switch (holder_lookup) {
-    case CallOptimization::kHolderIsReceiver:
-      __ Move(holder, receiver);
-      break;
-    case CallOptimization::kHolderFound:
-      __ li(holder, api_holder);
-     break;
-    case CallOptimization::kHolderNotFound:
-      UNREACHABLE();
-      break;
-  }
-
-  Isolate* isolate = masm->isolate();
-  Handle<JSFunction> function = optimization.constant_function();
-  Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
-  Handle<Object> call_data_obj(api_call_info->data(), isolate);
-
-  // Put callee in place.
-  __ li(callee, function);
-
-  bool call_data_undefined = false;
-  // Put call_data in place.
-  if (isolate->heap()->InNewSpace(*call_data_obj)) {
-    __ li(call_data, api_call_info);
-    __ lw(call_data, FieldMemOperand(call_data, CallHandlerInfo::kDataOffset));
-  } else if (call_data_obj->IsUndefined()) {
-    call_data_undefined = true;
-    __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
-  } else {
-    __ li(call_data, call_data_obj);
-  }
-  // Put api_function_address in place.
-  Address function_address = v8::ToCData<Address>(api_call_info->callback());
-  ApiFunction fun(function_address);
-  ExternalReference::Type type = ExternalReference::DIRECT_API_CALL;
-  ExternalReference ref =
-      ExternalReference(&fun,
-                        type,
-                        masm->isolate());
-  __ li(api_function_address, Operand(ref));
-
-  // Jump to stub.
-  CallApiFunctionStub stub(isolate, is_store, call_data_undefined, argc);
-  __ TailCallStub(&stub);
-}
-
-
-void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) {
-  __ Jump(code, RelocInfo::CODE_TARGET);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-Register StubCompiler::CheckPrototypes(Handle<HeapType> type,
-                                       Register object_reg,
-                                       Handle<JSObject> holder,
-                                       Register holder_reg,
-                                       Register scratch1,
-                                       Register scratch2,
-                                       Handle<Name> name,
-                                       Label* miss,
-                                       PrototypeCheckType check) {
-  Handle<Map> receiver_map(IC::TypeToMap(*type, isolate()));
-
-  // Make sure there's no overlap between holder and object registers.
-  ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
-  ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
-         && !scratch2.is(scratch1));
-
-  // Keep track of the current object in register reg.
-  Register reg = object_reg;
-  int depth = 0;
-
-  Handle<JSObject> current = Handle<JSObject>::null();
-  if (type->IsConstant()) {
-    current = Handle<JSObject>::cast(type->AsConstant()->Value());
-  }
-  Handle<JSObject> prototype = Handle<JSObject>::null();
-  Handle<Map> current_map = receiver_map;
-  Handle<Map> holder_map(holder->map());
-  // Traverse the prototype chain and check the maps in the prototype chain for
-  // fast and global objects or do negative lookup for normal objects.
-  while (!current_map.is_identical_to(holder_map)) {
-    ++depth;
-
-    // Only global objects and objects that do not require access
-    // checks are allowed in stubs.
-    ASSERT(current_map->IsJSGlobalProxyMap() ||
-           !current_map->is_access_check_needed());
-
-    prototype = handle(JSObject::cast(current_map->prototype()));
-    if (current_map->is_dictionary_map() &&
-        !current_map->IsJSGlobalObjectMap() &&
-        !current_map->IsJSGlobalProxyMap()) {
-      if (!name->IsUniqueName()) {
-        ASSERT(name->IsString());
-        name = factory()->InternalizeString(Handle<String>::cast(name));
-      }
-      ASSERT(current.is_null() ||
-             current->property_dictionary()->FindEntry(name) ==
-             NameDictionary::kNotFound);
-
-      GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
-                                       scratch1, scratch2);
-
-      __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
-      reg = holder_reg;  // From now on the object will be in holder_reg.
-      __ lw(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
-    } else {
-      Register map_reg = scratch1;
-      if (depth != 1 || check == CHECK_ALL_MAPS) {
-        // CheckMap implicitly loads the map of |reg| into |map_reg|.
-        __ CheckMap(reg, map_reg, current_map, miss, DONT_DO_SMI_CHECK);
-      } else {
-        __ lw(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
-      }
-
-      // Check access rights to the global object.  This has to happen after
-      // the map check so that we know that the object is actually a global
-      // object.
-      if (current_map->IsJSGlobalProxyMap()) {
-        __ CheckAccessGlobalProxy(reg, scratch2, miss);
-      } else if (current_map->IsJSGlobalObjectMap()) {
-        GenerateCheckPropertyCell(
-            masm(), Handle<JSGlobalObject>::cast(current), name,
-            scratch2, miss);
-      }
-
-      reg = holder_reg;  // From now on the object will be in holder_reg.
-
-      if (heap()->InNewSpace(*prototype)) {
-        // The prototype is in new space; we cannot store a reference to it
-        // in the code.  Load it from the map.
-        __ lw(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
-      } else {
-        // The prototype is in old space; load it directly.
-        __ li(reg, Operand(prototype));
-      }
-    }
-
-    // Go to the next object in the prototype chain.
-    current = prototype;
-    current_map = handle(current->map());
-  }
-
-  // Log the check depth.
-  LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
-
-  if (depth != 0 || check == CHECK_ALL_MAPS) {
-    // Check the holder map.
-    __ CheckMap(reg, scratch1, current_map, miss, DONT_DO_SMI_CHECK);
-  }
-
-  // Perform security check for access to the global object.
-  ASSERT(current_map->IsJSGlobalProxyMap() ||
-         !current_map->is_access_check_needed());
-  if (current_map->IsJSGlobalProxyMap()) {
-    __ CheckAccessGlobalProxy(reg, scratch1, miss);
-  }
-
-  // Return the register containing the holder.
-  return reg;
-}
-
-
-void LoadStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
-  if (!miss->is_unused()) {
-    Label success;
-    __ Branch(&success);
-    __ bind(miss);
-    TailCallBuiltin(masm(), MissBuiltin(kind()));
-    __ bind(&success);
-  }
-}
-
-
-void StoreStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
-  if (!miss->is_unused()) {
-    Label success;
-    __ Branch(&success);
-    GenerateRestoreName(masm(), miss, name);
-    TailCallBuiltin(masm(), MissBuiltin(kind()));
-    __ bind(&success);
-  }
-}
-
-
-Register LoadStubCompiler::CallbackHandlerFrontend(
-    Handle<HeapType> type,
-    Register object_reg,
-    Handle<JSObject> holder,
-    Handle<Name> name,
-    Handle<Object> callback) {
-  Label miss;
-
-  Register reg = HandlerFrontendHeader(type, object_reg, holder, name, &miss);
-
-  if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
-    ASSERT(!reg.is(scratch2()));
-    ASSERT(!reg.is(scratch3()));
-    ASSERT(!reg.is(scratch4()));
-
-    // Load the properties dictionary.
-    Register dictionary = scratch4();
-    __ lw(dictionary, FieldMemOperand(reg, JSObject::kPropertiesOffset));
-
-    // Probe the dictionary.
-    Label probe_done;
-    NameDictionaryLookupStub::GeneratePositiveLookup(masm(),
-                                                     &miss,
-                                                     &probe_done,
-                                                     dictionary,
-                                                     this->name(),
-                                                     scratch2(),
-                                                     scratch3());
-    __ bind(&probe_done);
-
-    // If probing finds an entry in the dictionary, scratch3 contains the
-    // pointer into the dictionary. Check that the value is the callback.
-    Register pointer = scratch3();
-    const int kElementsStartOffset = NameDictionary::kHeaderSize +
-        NameDictionary::kElementsStartIndex * kPointerSize;
-    const int kValueOffset = kElementsStartOffset + kPointerSize;
-    __ lw(scratch2(), FieldMemOperand(pointer, kValueOffset));
-    __ Branch(&miss, ne, scratch2(), Operand(callback));
-  }
-
-  HandlerFrontendFooter(name, &miss);
-  return reg;
-}
-
-
-void LoadStubCompiler::GenerateLoadField(Register reg,
-                                         Handle<JSObject> holder,
-                                         FieldIndex field,
-                                         Representation representation) {
-  if (!reg.is(receiver())) __ mov(receiver(), reg);
-  if (kind() == Code::LOAD_IC) {
-    LoadFieldStub stub(isolate(), field);
-    GenerateTailCall(masm(), stub.GetCode());
-  } else {
-    KeyedLoadFieldStub stub(isolate(), field);
-    GenerateTailCall(masm(), stub.GetCode());
-  }
-}
-
-
-void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
-  // Return the constant value.
-  __ li(v0, value);
-  __ Ret();
-}
-
-
-void LoadStubCompiler::GenerateLoadCallback(
-    Register reg,
-    Handle<ExecutableAccessorInfo> callback) {
-  // Build AccessorInfo::args_ list on the stack and push property name below
-  // the exit frame to make GC aware of them and store pointers to them.
-  STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
-  STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
-  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
-  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
-  STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
-  STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
-  STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6);
-  ASSERT(!scratch2().is(reg));
-  ASSERT(!scratch3().is(reg));
-  ASSERT(!scratch4().is(reg));
-  __ push(receiver());
-  if (heap()->InNewSpace(callback->data())) {
-    __ li(scratch3(), callback);
-    __ lw(scratch3(), FieldMemOperand(scratch3(),
-                                      ExecutableAccessorInfo::kDataOffset));
-  } else {
-    __ li(scratch3(), Handle<Object>(callback->data(), isolate()));
-  }
-  __ Subu(sp, sp, 6 * kPointerSize);
-  __ sw(scratch3(), MemOperand(sp, 5 * kPointerSize));
-  __ LoadRoot(scratch3(), Heap::kUndefinedValueRootIndex);
-  __ sw(scratch3(), MemOperand(sp, 4 * kPointerSize));
-  __ sw(scratch3(), MemOperand(sp, 3 * kPointerSize));
-  __ li(scratch4(),
-        Operand(ExternalReference::isolate_address(isolate())));
-  __ sw(scratch4(), MemOperand(sp, 2 * kPointerSize));
-  __ sw(reg, MemOperand(sp, 1 * kPointerSize));
-  __ sw(name(), MemOperand(sp, 0 * kPointerSize));
-  __ Addu(scratch2(), sp, 1 * kPointerSize);
-
-  __ mov(a2, scratch2());  // Saved in case scratch2 == a1.
-  // Abi for CallApiGetter.
-  Register getter_address_reg = a2;
-
-  Address getter_address = v8::ToCData<Address>(callback->getter());
-  ApiFunction fun(getter_address);
-  ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL;
-  ExternalReference ref = ExternalReference(&fun, type, isolate());
-  __ li(getter_address_reg, Operand(ref));
-
-  CallApiGetterStub stub(isolate());
-  __ TailCallStub(&stub);
-}
-
-
-void LoadStubCompiler::GenerateLoadInterceptor(
-    Register holder_reg,
-    Handle<Object> object,
-    Handle<JSObject> interceptor_holder,
-    LookupResult* lookup,
-    Handle<Name> name) {
-  ASSERT(interceptor_holder->HasNamedInterceptor());
-  ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
-
-  // So far the most popular follow ups for interceptor loads are FIELD
-  // and CALLBACKS, so inline only them, other cases may be added
-  // later.
-  bool compile_followup_inline = false;
-  if (lookup->IsFound() && lookup->IsCacheable()) {
-    if (lookup->IsField()) {
-      compile_followup_inline = true;
-    } else if (lookup->type() == CALLBACKS &&
-        lookup->GetCallbackObject()->IsExecutableAccessorInfo()) {
-      ExecutableAccessorInfo* callback =
-          ExecutableAccessorInfo::cast(lookup->GetCallbackObject());
-      compile_followup_inline = callback->getter() != NULL &&
-          callback->IsCompatibleReceiver(*object);
-    }
-  }
-
-  if (compile_followup_inline) {
-    // Compile the interceptor call, followed by inline code to load the
-    // property from further up the prototype chain if the call fails.
-    // Check that the maps haven't changed.
-    ASSERT(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
-
-    // Preserve the receiver register explicitly whenever it is different from
-    // the holder and it is needed should the interceptor return without any
-    // result. The CALLBACKS case needs the receiver to be passed into C++ code,
-    // the FIELD case might cause a miss during the prototype check.
-    bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder();
-    bool must_preserve_receiver_reg = !receiver().is(holder_reg) &&
-        (lookup->type() == CALLBACKS || must_perfrom_prototype_check);
-
-    // Save necessary data before invoking an interceptor.
-    // Requires a frame to make GC aware of pushed pointers.
-    {
-      FrameScope frame_scope(masm(), StackFrame::INTERNAL);
-      if (must_preserve_receiver_reg) {
-        __ Push(receiver(), holder_reg, this->name());
-      } else {
-        __ Push(holder_reg, this->name());
-      }
-      // Invoke an interceptor.  Note: map checks from receiver to
-      // interceptor's holder has been compiled before (see a caller
-      // of this method).
-      CompileCallLoadPropertyWithInterceptor(
-          masm(), receiver(), holder_reg, this->name(), interceptor_holder,
-          IC::kLoadPropertyWithInterceptorOnly);
-
-      // Check if interceptor provided a value for property.  If it's
-      // the case, return immediately.
-      Label interceptor_failed;
-      __ LoadRoot(scratch1(), Heap::kNoInterceptorResultSentinelRootIndex);
-      __ Branch(&interceptor_failed, eq, v0, Operand(scratch1()));
-      frame_scope.GenerateLeaveFrame();
-      __ Ret();
-
-      __ bind(&interceptor_failed);
-      __ pop(this->name());
-      __ pop(holder_reg);
-      if (must_preserve_receiver_reg) {
-        __ pop(receiver());
-      }
-      // Leave the internal frame.
-    }
-    GenerateLoadPostInterceptor(holder_reg, interceptor_holder, name, lookup);
-  } else {  // !compile_followup_inline
-    // Call the runtime system to load the interceptor.
-    // Check that the maps haven't changed.
-    PushInterceptorArguments(masm(), receiver(), holder_reg,
-                             this->name(), interceptor_holder);
-
-    ExternalReference ref = ExternalReference(
-        IC_Utility(IC::kLoadPropertyWithInterceptor), isolate());
-    __ TailCallExternalReference(ref, StubCache::kInterceptorArgsLength, 1);
-  }
-}
-
-
-Handle<Code> StoreStubCompiler::CompileStoreCallback(
-    Handle<JSObject> object,
-    Handle<JSObject> holder,
-    Handle<Name> name,
-    Handle<ExecutableAccessorInfo> callback) {
-  Register holder_reg = HandlerFrontend(
-      IC::CurrentTypeOf(object, isolate()), receiver(), holder, name);
-
-  // Stub never generated for non-global objects that require access
-  // checks.
-  ASSERT(holder->IsJSGlobalProxy() || !holder->IsAccessCheckNeeded());
-
-  __ Push(receiver(), holder_reg);  // Receiver.
-  __ li(at, Operand(callback));  // Callback info.
-  __ push(at);
-  __ li(at, Operand(name));
-  __ Push(at, value());
-
-  // Do tail-call to the runtime system.
-  ExternalReference store_callback_property =
-      ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
-  __ TailCallExternalReference(store_callback_property, 5, 1);
-
-  // Return the generated code.
-  return GetCode(kind(), Code::FAST, name);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void StoreStubCompiler::GenerateStoreViaSetter(
-    MacroAssembler* masm,
-    Handle<HeapType> type,
-    Register receiver,
-    Handle<JSFunction> setter) {
-  // ----------- S t a t e -------------
-  //  -- ra    : return address
-  // -----------------------------------
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-
-    // Save value register, so we can restore it later.
-    __ push(value());
-
-    if (!setter.is_null()) {
-      // Call the JavaScript setter with receiver and value on the stack.
-      if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
-        // Swap in the global receiver.
-        __ lw(receiver,
-               FieldMemOperand(
-                   receiver, JSGlobalObject::kGlobalReceiverOffset));
-      }
-      __ Push(receiver, value());
-      ParameterCount actual(1);
-      ParameterCount expected(setter);
-      __ InvokeFunction(setter, expected, actual,
-                        CALL_FUNCTION, NullCallWrapper());
-    } else {
-      // If we generate a global code snippet for deoptimization only, remember
-      // the place to continue after deoptimization.
-      masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
-    }
-
-    // We have to return the passed value, not the return value of the setter.
-    __ pop(v0);
-
-    // Restore context register.
-    __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-  }
-  __ Ret();
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
-    Handle<JSObject> object,
-    Handle<Name> name) {
-  __ Push(receiver(), this->name(), value());
-
-  // Do tail-call to the runtime system.
-  ExternalReference store_ic_property =
-      ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate());
-  __ TailCallExternalReference(store_ic_property, 3, 1);
-
-  // Return the generated code.
-  return GetCode(kind(), Code::FAST, name);
-}
-
-
-Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<HeapType> type,
-                                                      Handle<JSObject> last,
-                                                      Handle<Name> name) {
-  NonexistentHandlerFrontend(type, last, name);
-
-  // Return undefined if maps of the full prototype chain is still the same.
-  __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
-  __ Ret();
-
-  // Return the generated code.
-  return GetCode(kind(), Code::FAST, name);
-}
-
-
-Register* LoadStubCompiler::registers() {
-  // receiver, name, scratch1, scratch2, scratch3, scratch4.
-  static Register registers[] = { a0, a2, a3, a1, t0, t1 };
-  return registers;
-}
-
-
-Register* KeyedLoadStubCompiler::registers() {
-  // receiver, name, scratch1, scratch2, scratch3, scratch4.
-  static Register registers[] = { a1, a0, a2, a3, t0, t1 };
-  return registers;
-}
-
-
-Register StoreStubCompiler::value() {
-  return a0;
-}
-
-
-Register* StoreStubCompiler::registers() {
-  // receiver, name, scratch1, scratch2, scratch3.
-  static Register registers[] = { a1, a2, a3, t0, t1 };
-  return registers;
-}
-
-
-Register* KeyedStoreStubCompiler::registers() {
-  // receiver, name, scratch1, scratch2, scratch3.
-  static Register registers[] = { a2, a1, a3, t0, t1 };
-  return registers;
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
-                                             Handle<HeapType> type,
-                                             Register receiver,
-                                             Handle<JSFunction> getter) {
-  // ----------- S t a t e -------------
-  //  -- a0    : receiver
-  //  -- a2    : name
-  //  -- ra    : return address
-  // -----------------------------------
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-
-    if (!getter.is_null()) {
-      // Call the JavaScript getter with the receiver on the stack.
-      if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
-        // Swap in the global receiver.
-        __ lw(receiver,
-                FieldMemOperand(
-                    receiver, JSGlobalObject::kGlobalReceiverOffset));
-      }
-      __ push(receiver);
-      ParameterCount actual(0);
-      ParameterCount expected(getter);
-      __ InvokeFunction(getter, expected, actual,
-                        CALL_FUNCTION, NullCallWrapper());
-    } else {
-      // If we generate a global code snippet for deoptimization only, remember
-      // the place to continue after deoptimization.
-      masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
-    }
-
-    // Restore context register.
-    __ lw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
-  }
-  __ Ret();
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-Handle<Code> LoadStubCompiler::CompileLoadGlobal(
-    Handle<HeapType> type,
-    Handle<GlobalObject> global,
-    Handle<PropertyCell> cell,
-    Handle<Name> name,
-    bool is_dont_delete) {
-  Label miss;
-
-  HandlerFrontendHeader(type, receiver(), global, name, &miss);
-
-  // Get the value from the cell.
-  __ li(a3, Operand(cell));
-  __ lw(t0, FieldMemOperand(a3, Cell::kValueOffset));
-
-  // Check for deleted property if property can actually be deleted.
-  if (!is_dont_delete) {
-    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
-    __ Branch(&miss, eq, t0, Operand(at));
-  }
-
-  Counters* counters = isolate()->counters();
-  __ IncrementCounter(counters->named_load_global_stub(), 1, a1, a3);
-  __ Ret(USE_DELAY_SLOT);
-  __ mov(v0, t0);
-
-  HandlerFrontendFooter(name, &miss);
-
-  // Return the generated code.
-  return GetCode(kind(), Code::NORMAL, name);
-}
-
-
-Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
-    TypeHandleList* types,
-    CodeHandleList* handlers,
-    Handle<Name> name,
-    Code::StubType type,
-    IcCheckType check) {
-  Label miss;
-
-  if (check == PROPERTY &&
-      (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
-    __ Branch(&miss, ne, this->name(), Operand(name));
-  }
-
-  Label number_case;
-  Register match = scratch1();
-  Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
-  __ JumpIfSmi(receiver(), smi_target, match);  // Reg match is 0 if Smi.
-
-  Register map_reg = scratch2();
-
-  int receiver_count = types->length();
-  int number_of_handled_maps = 0;
-  __ lw(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
-  for (int current = 0; current < receiver_count; ++current) {
-    Handle<HeapType> type = types->at(current);
-    Handle<Map> map = IC::TypeToMap(*type, isolate());
-    if (!map->is_deprecated()) {
-      number_of_handled_maps++;
-      // Check map and tail call if there's a match.
-      // Separate compare from branch, to provide path for above JumpIfSmi().
-      __ Subu(match, map_reg, Operand(map));
-      if (type->Is(HeapType::Number())) {
-        ASSERT(!number_case.is_unused());
-        __ bind(&number_case);
-      }
-      __ Jump(handlers->at(current), RelocInfo::CODE_TARGET,
-          eq, match, Operand(zero_reg));
-    }
-  }
-  ASSERT(number_of_handled_maps != 0);
-
-  __ bind(&miss);
-  TailCallBuiltin(masm(), MissBuiltin(kind()));
-
-  // Return the generated code.
-  InlineCacheState state =
-      number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
-  return GetICCode(kind(), type, name, state);
-}
-
-
-void StoreStubCompiler::GenerateStoreArrayLength() {
-  // Prepare tail call to StoreIC_ArrayLength.
-  __ Push(receiver(), value());
-
-  ExternalReference ref =
-      ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength),
-                        masm()->isolate());
-  __ TailCallExternalReference(ref, 2, 1);
-}
-
-
-Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
-    MapHandleList* receiver_maps,
-    CodeHandleList* handler_stubs,
-    MapHandleList* transitioned_maps) {
-  Label miss;
-  __ JumpIfSmi(receiver(), &miss);
-
-  int receiver_count = receiver_maps->length();
-  __ lw(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset));
-  for (int i = 0; i < receiver_count; ++i) {
-    if (transitioned_maps->at(i).is_null()) {
-      __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq,
-          scratch1(), Operand(receiver_maps->at(i)));
-    } else {
-      Label next_map;
-      __ Branch(&next_map, ne, scratch1(), Operand(receiver_maps->at(i)));
-      __ li(transition_map(), Operand(transitioned_maps->at(i)));
-      __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET);
-      __ bind(&next_map);
-    }
-  }
-
-  __ bind(&miss);
-  TailCallBuiltin(masm(), MissBuiltin(kind()));
-
-  // Return the generated code.
-  return GetICCode(
-      kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
-    MacroAssembler* masm) {
-  // ---------- S t a t e --------------
-  //  -- ra     : return address
-  //  -- a0     : key
-  //  -- a1     : receiver
-  // -----------------------------------
-  Label slow, miss;
-
-  Register key = a0;
-  Register receiver = a1;
-
-  __ JumpIfNotSmi(key, &miss);
-  __ lw(t0, FieldMemOperand(receiver, JSObject::kElementsOffset));
-  __ sra(a2, a0, kSmiTagSize);
-  __ LoadFromNumberDictionary(&slow, t0, a0, v0, a2, a3, t1);
-  __ Ret();
-
-  // Slow case, key and receiver still in a0 and a1.
-  __ bind(&slow);
-  __ IncrementCounter(
-      masm->isolate()->counters()->keyed_load_external_array_slow(),
-      1, a2, a3);
-  // Entry registers are intact.
-  // ---------- S t a t e --------------
-  //  -- ra     : return address
-  //  -- a0     : key
-  //  -- a1     : receiver
-  // -----------------------------------
-  TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow);
-
-  // Miss case, call the runtime.
-  __ bind(&miss);
-
-  // ---------- S t a t e --------------
-  //  -- ra     : return address
-  //  -- a0     : key
-  //  -- a1     : receiver
-  // -----------------------------------
-  TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss);
-}
-
-
-#undef __
-
-} }  // namespace v8::internal
-
-#endif  // V8_TARGET_ARCH_MIPS
diff --git a/src/mips64/OWNERS b/src/mips64/OWNERS
new file mode 100644
index 0000000..5508ba6
--- /dev/null
+++ b/src/mips64/OWNERS
@@ -0,0 +1,5 @@
+paul.lind@imgtec.com
+gergely.kis@imgtec.com
+akos.palfi@imgtec.com
+balazs.kilvady@imgtec.com
+dusan.milosavljevic@imgtec.com
diff --git a/src/mips64/assembler-mips64-inl.h b/src/mips64/assembler-mips64-inl.h
new file mode 100644
index 0000000..de294ee
--- /dev/null
+++ b/src/mips64/assembler-mips64-inl.h
@@ -0,0 +1,457 @@
+
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2012 the V8 project authors. All rights reserved.
+
+
+#ifndef V8_MIPS_ASSEMBLER_MIPS_INL_H_
+#define V8_MIPS_ASSEMBLER_MIPS_INL_H_
+
+#include "src/mips64/assembler-mips64.h"
+
+#include "src/assembler.h"
+#include "src/debug.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+bool CpuFeatures::SupportsCrankshaft() { return IsSupported(FPU); }
+
+
+// -----------------------------------------------------------------------------
+// Operand and MemOperand.
+
+Operand::Operand(int64_t immediate, RelocInfo::Mode rmode)  {
+  rm_ = no_reg;
+  imm64_ = immediate;
+  rmode_ = rmode;
+}
+
+
+Operand::Operand(const ExternalReference& f)  {
+  rm_ = no_reg;
+  imm64_ = reinterpret_cast<int64_t>(f.address());
+  rmode_ = RelocInfo::EXTERNAL_REFERENCE;
+}
+
+
+Operand::Operand(Smi* value) {
+  rm_ = no_reg;
+  imm64_ =  reinterpret_cast<intptr_t>(value);
+  rmode_ = RelocInfo::NONE32;
+}
+
+
+Operand::Operand(Register rm) {
+  rm_ = rm;
+}
+
+
+bool Operand::is_reg() const {
+  return rm_.is_valid();
+}
+
+
+int Register::NumAllocatableRegisters() {
+    return kMaxNumAllocatableRegisters;
+}
+
+
+int DoubleRegister::NumRegisters() {
+    return FPURegister::kMaxNumRegisters;
+}
+
+
+int DoubleRegister::NumAllocatableRegisters() {
+    return FPURegister::kMaxNumAllocatableRegisters;
+}
+
+
+int FPURegister::ToAllocationIndex(FPURegister reg) {
+  DCHECK(reg.code() % 2 == 0);
+  DCHECK(reg.code() / 2 < kMaxNumAllocatableRegisters);
+  DCHECK(reg.is_valid());
+  DCHECK(!reg.is(kDoubleRegZero));
+  DCHECK(!reg.is(kLithiumScratchDouble));
+  return (reg.code() / 2);
+}
+
+
+// -----------------------------------------------------------------------------
+// RelocInfo.
+
+void RelocInfo::apply(intptr_t delta, ICacheFlushMode icache_flush_mode) {
+  if (IsInternalReference(rmode_)) {
+    // Absolute code pointer inside code object moves with the code object.
+    byte* p = reinterpret_cast<byte*>(pc_);
+    int count = Assembler::RelocateInternalReference(p, delta);
+    CpuFeatures::FlushICache(p, count * sizeof(uint32_t));
+  }
+}
+
+
+Address RelocInfo::target_address() {
+  DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
+  return Assembler::target_address_at(pc_, host_);
+}
+
+
+Address RelocInfo::target_address_address() {
+  DCHECK(IsCodeTarget(rmode_) ||
+         IsRuntimeEntry(rmode_) ||
+         rmode_ == EMBEDDED_OBJECT ||
+         rmode_ == EXTERNAL_REFERENCE);
+  // Read the address of the word containing the target_address in an
+  // instruction stream.
+  // The only architecture-independent user of this function is the serializer.
+  // The serializer uses it to find out how many raw bytes of instruction to
+  // output before the next target.
+  // For an instruction like LUI/ORI where the target bits are mixed into the
+  // instruction bits, the size of the target will be zero, indicating that the
+  // serializer should not step forward in memory after a target is resolved
+  // and written. In this case the target_address_address function should
+  // return the end of the instructions to be patched, allowing the
+  // deserializer to deserialize the instructions as raw bytes and put them in
+  // place, ready to be patched with the target. After jump optimization,
+  // that is the address of the instruction that follows J/JAL/JR/JALR
+  // instruction.
+  // return reinterpret_cast<Address>(
+  //  pc_ + Assembler::kInstructionsFor32BitConstant * Assembler::kInstrSize);
+  return reinterpret_cast<Address>(
+    pc_ + Assembler::kInstructionsFor64BitConstant * Assembler::kInstrSize);
+}
+
+
+Address RelocInfo::constant_pool_entry_address() {
+  UNREACHABLE();
+  return NULL;
+}
+
+
+int RelocInfo::target_address_size() {
+  return Assembler::kSpecialTargetSize;
+}
+
+
+void RelocInfo::set_target_address(Address target,
+                                   WriteBarrierMode write_barrier_mode,
+                                   ICacheFlushMode icache_flush_mode) {
+  DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
+  Assembler::set_target_address_at(pc_, host_, target, icache_flush_mode);
+  if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
+      host() != NULL && IsCodeTarget(rmode_)) {
+    Object* target_code = Code::GetCodeFromTargetAddress(target);
+    host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+        host(), this, HeapObject::cast(target_code));
+  }
+}
+
+
+Address Assembler::target_address_from_return_address(Address pc) {
+  return pc - kCallTargetAddressOffset;
+}
+
+
+Address Assembler::break_address_from_return_address(Address pc) {
+  return pc - Assembler::kPatchDebugBreakSlotReturnOffset;
+}
+
+
+Object* RelocInfo::target_object() {
+  DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+  return reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_));
+}
+
+
+Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
+  DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+  return Handle<Object>(reinterpret_cast<Object**>(
+      Assembler::target_address_at(pc_, host_)));
+}
+
+
+void RelocInfo::set_target_object(Object* target,
+                                  WriteBarrierMode write_barrier_mode,
+                                  ICacheFlushMode icache_flush_mode) {
+  DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+  Assembler::set_target_address_at(pc_, host_,
+                                   reinterpret_cast<Address>(target),
+                                   icache_flush_mode);
+  if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
+      host() != NULL &&
+      target->IsHeapObject()) {
+    host()->GetHeap()->incremental_marking()->RecordWrite(
+        host(), &Memory::Object_at(pc_), HeapObject::cast(target));
+  }
+}
+
+
+Address RelocInfo::target_reference() {
+  DCHECK(rmode_ == EXTERNAL_REFERENCE);
+  return Assembler::target_address_at(pc_, host_);
+}
+
+
+Address RelocInfo::target_runtime_entry(Assembler* origin) {
+  DCHECK(IsRuntimeEntry(rmode_));
+  return target_address();
+}
+
+
+void RelocInfo::set_target_runtime_entry(Address target,
+                                         WriteBarrierMode write_barrier_mode,
+                                         ICacheFlushMode icache_flush_mode) {
+  DCHECK(IsRuntimeEntry(rmode_));
+  if (target_address() != target)
+    set_target_address(target, write_barrier_mode, icache_flush_mode);
+}
+
+
+Handle<Cell> RelocInfo::target_cell_handle() {
+  DCHECK(rmode_ == RelocInfo::CELL);
+  Address address = Memory::Address_at(pc_);
+  return Handle<Cell>(reinterpret_cast<Cell**>(address));
+}
+
+
+Cell* RelocInfo::target_cell() {
+  DCHECK(rmode_ == RelocInfo::CELL);
+  return Cell::FromValueAddress(Memory::Address_at(pc_));
+}
+
+
+void RelocInfo::set_target_cell(Cell* cell,
+                                WriteBarrierMode write_barrier_mode,
+                                ICacheFlushMode icache_flush_mode) {
+  DCHECK(rmode_ == RelocInfo::CELL);
+  Address address = cell->address() + Cell::kValueOffset;
+  Memory::Address_at(pc_) = address;
+  if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
+    // TODO(1550) We are passing NULL as a slot because cell can never be on
+    // evacuation candidate.
+    host()->GetHeap()->incremental_marking()->RecordWrite(
+        host(), NULL, cell);
+  }
+}
+
+
+static const int kNoCodeAgeSequenceLength = 9 * Assembler::kInstrSize;
+
+
+Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
+  UNREACHABLE();  // This should never be reached on Arm.
+  return Handle<Object>();
+}
+
+
+Code* RelocInfo::code_age_stub() {
+  DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+  return Code::GetCodeFromTargetAddress(
+      Assembler::target_address_at(pc_ + Assembler::kInstrSize, host_));
+}
+
+
+void RelocInfo::set_code_age_stub(Code* stub,
+                                  ICacheFlushMode icache_flush_mode) {
+  DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+  Assembler::set_target_address_at(pc_ + Assembler::kInstrSize,
+                                   host_,
+                                   stub->instruction_start());
+}
+
+
+Address RelocInfo::call_address() {
+  DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+         (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
+  // The pc_ offset of 0 assumes mips patched return sequence per
+  // debug-mips.cc BreakLocationIterator::SetDebugBreakAtReturn(), or
+  // debug break slot per BreakLocationIterator::SetDebugBreakAtSlot().
+  return Assembler::target_address_at(pc_, host_);
+}
+
+
+void RelocInfo::set_call_address(Address target) {
+  DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+         (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
+  // The pc_ offset of 0 assumes mips patched return sequence per
+  // debug-mips.cc BreakLocationIterator::SetDebugBreakAtReturn(), or
+  // debug break slot per BreakLocationIterator::SetDebugBreakAtSlot().
+  Assembler::set_target_address_at(pc_, host_, target);
+  if (host() != NULL) {
+    Object* target_code = Code::GetCodeFromTargetAddress(target);
+    host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+        host(), this, HeapObject::cast(target_code));
+  }
+}
+
+
+Object* RelocInfo::call_object() {
+  return *call_object_address();
+}
+
+
+Object** RelocInfo::call_object_address() {
+  DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+         (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
+  return reinterpret_cast<Object**>(pc_ + 6 * Assembler::kInstrSize);
+}
+
+
+void RelocInfo::set_call_object(Object* target) {
+  *call_object_address() = target;
+}
+
+
+void RelocInfo::WipeOut() {
+  DCHECK(IsEmbeddedObject(rmode_) ||
+         IsCodeTarget(rmode_) ||
+         IsRuntimeEntry(rmode_) ||
+         IsExternalReference(rmode_));
+  Assembler::set_target_address_at(pc_, host_, NULL);
+}
+
+
+bool RelocInfo::IsPatchedReturnSequence() {
+  Instr instr0 = Assembler::instr_at(pc_);  // lui.
+  Instr instr1 = Assembler::instr_at(pc_ + 1 * Assembler::kInstrSize);  // ori.
+  Instr instr2 = Assembler::instr_at(pc_ + 2 * Assembler::kInstrSize);  // dsll.
+  Instr instr3 = Assembler::instr_at(pc_ + 3 * Assembler::kInstrSize);  // ori.
+  Instr instr4 = Assembler::instr_at(pc_ + 4 * Assembler::kInstrSize);  // jalr.
+
+  bool patched_return = ((instr0 & kOpcodeMask) == LUI &&
+                         (instr1 & kOpcodeMask) == ORI &&
+                         (instr2 & kFunctionFieldMask) == DSLL &&
+                         (instr3 & kOpcodeMask) == ORI &&
+                         (instr4 & kFunctionFieldMask) == JALR);
+  return patched_return;
+}
+
+
+bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
+  Instr current_instr = Assembler::instr_at(pc_);
+  return !Assembler::IsNop(current_instr, Assembler::DEBUG_BREAK_NOP);
+}
+
+
+void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
+  RelocInfo::Mode mode = rmode();
+  if (mode == RelocInfo::EMBEDDED_OBJECT) {
+    visitor->VisitEmbeddedPointer(this);
+  } else if (RelocInfo::IsCodeTarget(mode)) {
+    visitor->VisitCodeTarget(this);
+  } else if (mode == RelocInfo::CELL) {
+    visitor->VisitCell(this);
+  } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
+    visitor->VisitExternalReference(this);
+  } else if (RelocInfo::IsCodeAgeSequence(mode)) {
+    visitor->VisitCodeAgeSequence(this);
+  } else if (((RelocInfo::IsJSReturn(mode) &&
+              IsPatchedReturnSequence()) ||
+             (RelocInfo::IsDebugBreakSlot(mode) &&
+             IsPatchedDebugBreakSlotSequence())) &&
+             isolate->debug()->has_break_points()) {
+    visitor->VisitDebugTarget(this);
+  } else if (RelocInfo::IsRuntimeEntry(mode)) {
+    visitor->VisitRuntimeEntry(this);
+  }
+}
+
+
+template<typename StaticVisitor>
+void RelocInfo::Visit(Heap* heap) {
+  RelocInfo::Mode mode = rmode();
+  if (mode == RelocInfo::EMBEDDED_OBJECT) {
+    StaticVisitor::VisitEmbeddedPointer(heap, this);
+  } else if (RelocInfo::IsCodeTarget(mode)) {
+    StaticVisitor::VisitCodeTarget(heap, this);
+  } else if (mode == RelocInfo::CELL) {
+    StaticVisitor::VisitCell(heap, this);
+  } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
+    StaticVisitor::VisitExternalReference(this);
+  } else if (RelocInfo::IsCodeAgeSequence(mode)) {
+    StaticVisitor::VisitCodeAgeSequence(heap, this);
+  } else if (heap->isolate()->debug()->has_break_points() &&
+             ((RelocInfo::IsJSReturn(mode) &&
+              IsPatchedReturnSequence()) ||
+             (RelocInfo::IsDebugBreakSlot(mode) &&
+              IsPatchedDebugBreakSlotSequence()))) {
+    StaticVisitor::VisitDebugTarget(heap, this);
+  } else if (RelocInfo::IsRuntimeEntry(mode)) {
+    StaticVisitor::VisitRuntimeEntry(this);
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+// Assembler.
+
+
+void Assembler::CheckBuffer() {
+  if (buffer_space() <= kGap) {
+    GrowBuffer();
+  }
+}
+
+
+void Assembler::CheckTrampolinePoolQuick() {
+  if (pc_offset() >= next_buffer_check_) {
+    CheckTrampolinePool();
+  }
+}
+
+
+void Assembler::emit(Instr x) {
+  if (!is_buffer_growth_blocked()) {
+    CheckBuffer();
+  }
+  *reinterpret_cast<Instr*>(pc_) = x;
+  pc_ += kInstrSize;
+  CheckTrampolinePoolQuick();
+}
+
+
+void Assembler::emit(uint64_t x) {
+  if (!is_buffer_growth_blocked()) {
+    CheckBuffer();
+  }
+  *reinterpret_cast<uint64_t*>(pc_) = x;
+  pc_ += kInstrSize * 2;
+  CheckTrampolinePoolQuick();
+}
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_MIPS_ASSEMBLER_MIPS_INL_H_
diff --git a/src/mips64/assembler-mips64.cc b/src/mips64/assembler-mips64.cc
new file mode 100644
index 0000000..5d51e63
--- /dev/null
+++ b/src/mips64/assembler-mips64.cc
@@ -0,0 +1,2933 @@
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2012 the V8 project authors. All rights reserved.
+
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_MIPS64
+
+#include "src/base/cpu.h"
+#include "src/mips64/assembler-mips64-inl.h"
+#include "src/serialize.h"
+
+namespace v8 {
+namespace internal {
+
+
+// Get the CPU features enabled by the build. For cross compilation the
+// preprocessor symbols CAN_USE_FPU_INSTRUCTIONS
+// can be defined to enable FPU instructions when building the
+// snapshot.
+static unsigned CpuFeaturesImpliedByCompiler() {
+  unsigned answer = 0;
+#ifdef CAN_USE_FPU_INSTRUCTIONS
+  answer |= 1u << FPU;
+#endif  // def CAN_USE_FPU_INSTRUCTIONS
+
+  // If the compiler is allowed to use FPU then we can use FPU too in our code
+  // generation even when generating snapshots.  This won't work for cross
+  // compilation.
+#if defined(__mips__) && defined(__mips_hard_float) && __mips_hard_float != 0
+  answer |= 1u << FPU;
+#endif
+
+  return answer;
+}
+
+
+const char* DoubleRegister::AllocationIndexToString(int index) {
+  DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
+  const char* const names[] = {
+    "f0",
+    "f2",
+    "f4",
+    "f6",
+    "f8",
+    "f10",
+    "f12",
+    "f14",
+    "f16",
+    "f18",
+    "f20",
+    "f22",
+    "f24",
+    "f26"
+  };
+  return names[index];
+}
+
+
+void CpuFeatures::ProbeImpl(bool cross_compile) {
+  supported_ |= CpuFeaturesImpliedByCompiler();
+
+  // Only use statically determined features for cross compile (snapshot).
+  if (cross_compile) return;
+
+  // If the compiler is allowed to use fpu then we can use fpu too in our
+  // code generation.
+#ifndef __mips__
+  // For the simulator build, use FPU.
+  supported_ |= 1u << FPU;
+#else
+  // Probe for additional features at runtime.
+  base::CPU cpu;
+  if (cpu.has_fpu()) supported_ |= 1u << FPU;
+#endif
+}
+
+
+void CpuFeatures::PrintTarget() { }
+void CpuFeatures::PrintFeatures() { }
+
+
+int ToNumber(Register reg) {
+  DCHECK(reg.is_valid());
+  const int kNumbers[] = {
+    0,    // zero_reg
+    1,    // at
+    2,    // v0
+    3,    // v1
+    4,    // a0
+    5,    // a1
+    6,    // a2
+    7,    // a3
+    8,    // a4
+    9,    // a5
+    10,   // a6
+    11,   // a7
+    12,   // t0
+    13,   // t1
+    14,   // t2
+    15,   // t3
+    16,   // s0
+    17,   // s1
+    18,   // s2
+    19,   // s3
+    20,   // s4
+    21,   // s5
+    22,   // s6
+    23,   // s7
+    24,   // t8
+    25,   // t9
+    26,   // k0
+    27,   // k1
+    28,   // gp
+    29,   // sp
+    30,   // fp
+    31,   // ra
+  };
+  return kNumbers[reg.code()];
+}
+
+
+Register ToRegister(int num) {
+  DCHECK(num >= 0 && num < kNumRegisters);
+  const Register kRegisters[] = {
+    zero_reg,
+    at,
+    v0, v1,
+    a0, a1, a2, a3, a4, a5, a6, a7,
+    t0, t1, t2, t3,
+    s0, s1, s2, s3, s4, s5, s6, s7,
+    t8, t9,
+    k0, k1,
+    gp,
+    sp,
+    fp,
+    ra
+  };
+  return kRegisters[num];
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of RelocInfo.
+
+const int RelocInfo::kApplyMask = RelocInfo::kCodeTargetMask |
+                                  1 << RelocInfo::INTERNAL_REFERENCE;
+
+
+bool RelocInfo::IsCodedSpecially() {
+  // The deserializer needs to know whether a pointer is specially coded.  Being
+  // specially coded on MIPS means that it is a lui/ori instruction, and that is
+  // always the case inside code objects.
+  return true;
+}
+
+
+bool RelocInfo::IsInConstantPool() {
+  return false;
+}
+
+
+// Patch the code at the current address with the supplied instructions.
+void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
+  Instr* pc = reinterpret_cast<Instr*>(pc_);
+  Instr* instr = reinterpret_cast<Instr*>(instructions);
+  for (int i = 0; i < instruction_count; i++) {
+    *(pc + i) = *(instr + i);
+  }
+
+  // Indicate that code has changed.
+  CpuFeatures::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
+}
+
+
+// Patch the code at the current PC with a call to the target address.
+// Additional guard instructions can be added if required.
+void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
+  // Patch the code at the current address with a call to the target.
+  UNIMPLEMENTED_MIPS();
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of Operand and MemOperand.
+// See assembler-mips-inl.h for inlined constructors.
+
+Operand::Operand(Handle<Object> handle) {
+  AllowDeferredHandleDereference using_raw_address;
+  rm_ = no_reg;
+  // Verify all Objects referred by code are NOT in new space.
+  Object* obj = *handle;
+  if (obj->IsHeapObject()) {
+    DCHECK(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
+    imm64_ = reinterpret_cast<intptr_t>(handle.location());
+    rmode_ = RelocInfo::EMBEDDED_OBJECT;
+  } else {
+    // No relocation needed.
+    imm64_ = reinterpret_cast<intptr_t>(obj);
+    rmode_ = RelocInfo::NONE64;
+  }
+}
+
+
+MemOperand::MemOperand(Register rm, int64_t offset) : Operand(rm) {
+  offset_ = offset;
+}
+
+
+MemOperand::MemOperand(Register rm, int64_t unit, int64_t multiplier,
+                       OffsetAddend offset_addend) : Operand(rm) {
+  offset_ = unit * multiplier + offset_addend;
+}
+
+
+// -----------------------------------------------------------------------------
+// Specific instructions, constants, and masks.
+
+static const int kNegOffset = 0x00008000;
+// daddiu(sp, sp, 8) aka Pop() operation or part of Pop(r)
+// operations as post-increment of sp.
+const Instr kPopInstruction = DADDIU | (kRegister_sp_Code << kRsShift)
+      | (kRegister_sp_Code << kRtShift)
+      | (kPointerSize & kImm16Mask);  // NOLINT
+// daddiu(sp, sp, -8) part of Push(r) operation as pre-decrement of sp.
+const Instr kPushInstruction = DADDIU | (kRegister_sp_Code << kRsShift)
+      | (kRegister_sp_Code << kRtShift)
+      | (-kPointerSize & kImm16Mask);  // NOLINT
+// sd(r, MemOperand(sp, 0))
+const Instr kPushRegPattern = SD | (kRegister_sp_Code << kRsShift)
+      |  (0 & kImm16Mask);  // NOLINT
+//  ld(r, MemOperand(sp, 0))
+const Instr kPopRegPattern = LD | (kRegister_sp_Code << kRsShift)
+      |  (0 & kImm16Mask);  // NOLINT
+
+const Instr kLwRegFpOffsetPattern = LW | (kRegister_fp_Code << kRsShift)
+      |  (0 & kImm16Mask);  // NOLINT
+
+const Instr kSwRegFpOffsetPattern = SW | (kRegister_fp_Code << kRsShift)
+      |  (0 & kImm16Mask);  // NOLINT
+
+const Instr kLwRegFpNegOffsetPattern = LW | (kRegister_fp_Code << kRsShift)
+      |  (kNegOffset & kImm16Mask);  // NOLINT
+
+const Instr kSwRegFpNegOffsetPattern = SW | (kRegister_fp_Code << kRsShift)
+      |  (kNegOffset & kImm16Mask);  // NOLINT
+// A mask for the Rt register for push, pop, lw, sw instructions.
+const Instr kRtMask = kRtFieldMask;
+const Instr kLwSwInstrTypeMask = 0xffe00000;
+const Instr kLwSwInstrArgumentMask  = ~kLwSwInstrTypeMask;
+const Instr kLwSwOffsetMask = kImm16Mask;
+
+
+Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
+    : AssemblerBase(isolate, buffer, buffer_size),
+      recorded_ast_id_(TypeFeedbackId::None()),
+      positions_recorder_(this) {
+  reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
+
+  last_trampoline_pool_end_ = 0;
+  no_trampoline_pool_before_ = 0;
+  trampoline_pool_blocked_nesting_ = 0;
+  // We leave space (16 * kTrampolineSlotsSize)
+  // for BlockTrampolinePoolScope buffer.
+  next_buffer_check_ = FLAG_force_long_branches
+      ? kMaxInt : kMaxBranchOffset - kTrampolineSlotsSize * 16;
+  internal_trampoline_exception_ = false;
+  last_bound_pos_ = 0;
+
+  trampoline_emitted_ = FLAG_force_long_branches;
+  unbound_labels_count_ = 0;
+  block_buffer_growth_ = false;
+
+  ClearRecordedAstId();
+}
+
+
+void Assembler::GetCode(CodeDesc* desc) {
+  DCHECK(pc_ <= reloc_info_writer.pos());  // No overlap.
+  // Set up code descriptor.
+  desc->buffer = buffer_;
+  desc->buffer_size = buffer_size_;
+  desc->instr_size = pc_offset();
+  desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
+  desc->origin = this;
+}
+
+
+void Assembler::Align(int m) {
+  DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
+  while ((pc_offset() & (m - 1)) != 0) {
+    nop();
+  }
+}
+
+
+void Assembler::CodeTargetAlign() {
+  // No advantage to aligning branch/call targets to more than
+  // single instruction, that I am aware of.
+  Align(4);
+}
+
+
+Register Assembler::GetRtReg(Instr instr) {
+  Register rt;
+  rt.code_ = (instr & kRtFieldMask) >> kRtShift;
+  return rt;
+}
+
+
+Register Assembler::GetRsReg(Instr instr) {
+  Register rs;
+  rs.code_ = (instr & kRsFieldMask) >> kRsShift;
+  return rs;
+}
+
+
+Register Assembler::GetRdReg(Instr instr) {
+  Register rd;
+  rd.code_ = (instr & kRdFieldMask) >> kRdShift;
+  return rd;
+}
+
+
+uint32_t Assembler::GetRt(Instr instr) {
+  return (instr & kRtFieldMask) >> kRtShift;
+}
+
+
+uint32_t Assembler::GetRtField(Instr instr) {
+  return instr & kRtFieldMask;
+}
+
+
+uint32_t Assembler::GetRs(Instr instr) {
+  return (instr & kRsFieldMask) >> kRsShift;
+}
+
+
+uint32_t Assembler::GetRsField(Instr instr) {
+  return instr & kRsFieldMask;
+}
+
+
+uint32_t Assembler::GetRd(Instr instr) {
+  return  (instr & kRdFieldMask) >> kRdShift;
+}
+
+
+uint32_t Assembler::GetRdField(Instr instr) {
+  return  instr & kRdFieldMask;
+}
+
+
+uint32_t Assembler::GetSa(Instr instr) {
+  return (instr & kSaFieldMask) >> kSaShift;
+}
+
+
+uint32_t Assembler::GetSaField(Instr instr) {
+  return instr & kSaFieldMask;
+}
+
+
+uint32_t Assembler::GetOpcodeField(Instr instr) {
+  return instr & kOpcodeMask;
+}
+
+
+uint32_t Assembler::GetFunction(Instr instr) {
+  return (instr & kFunctionFieldMask) >> kFunctionShift;
+}
+
+
+uint32_t Assembler::GetFunctionField(Instr instr) {
+  return instr & kFunctionFieldMask;
+}
+
+
+uint32_t Assembler::GetImmediate16(Instr instr) {
+  return instr & kImm16Mask;
+}
+
+
+uint32_t Assembler::GetLabelConst(Instr instr) {
+  return instr & ~kImm16Mask;
+}
+
+
+bool Assembler::IsPop(Instr instr) {
+  return (instr & ~kRtMask) == kPopRegPattern;
+}
+
+
+bool Assembler::IsPush(Instr instr) {
+  return (instr & ~kRtMask) == kPushRegPattern;
+}
+
+
+bool Assembler::IsSwRegFpOffset(Instr instr) {
+  return ((instr & kLwSwInstrTypeMask) == kSwRegFpOffsetPattern);
+}
+
+
+bool Assembler::IsLwRegFpOffset(Instr instr) {
+  return ((instr & kLwSwInstrTypeMask) == kLwRegFpOffsetPattern);
+}
+
+
+bool Assembler::IsSwRegFpNegOffset(Instr instr) {
+  return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
+          kSwRegFpNegOffsetPattern);
+}
+
+
+bool Assembler::IsLwRegFpNegOffset(Instr instr) {
+  return ((instr & (kLwSwInstrTypeMask | kNegOffset)) ==
+          kLwRegFpNegOffsetPattern);
+}
+
+
+// Labels refer to positions in the (to be) generated code.
+// There are bound, linked, and unused labels.
+//
+// Bound labels refer to known positions in the already
+// generated code. pos() is the position the label refers to.
+//
+// Linked labels refer to unknown positions in the code
+// to be generated; pos() is the position of the last
+// instruction using the label.
+
+// The link chain is terminated by a value in the instruction of -1,
+// which is an otherwise illegal value (branch -1 is inf loop).
+// The instruction 16-bit offset field addresses 32-bit words, but in
+// code is conv to an 18-bit value addressing bytes, hence the -4 value.
+
+const int kEndOfChain = -4;
+// Determines the end of the Jump chain (a subset of the label link chain).
+const int kEndOfJumpChain = 0;
+
+
+bool Assembler::IsBranch(Instr instr) {
+  uint32_t opcode   = GetOpcodeField(instr);
+  uint32_t rt_field = GetRtField(instr);
+  uint32_t rs_field = GetRsField(instr);
+  // Checks if the instruction is a branch.
+  return opcode == BEQ ||
+      opcode == BNE ||
+      opcode == BLEZ ||
+      opcode == BGTZ ||
+      opcode == BEQL ||
+      opcode == BNEL ||
+      opcode == BLEZL ||
+      opcode == BGTZL ||
+      (opcode == REGIMM && (rt_field == BLTZ || rt_field == BGEZ ||
+                            rt_field == BLTZAL || rt_field == BGEZAL)) ||
+      (opcode == COP1 && rs_field == BC1) ||  // Coprocessor branch.
+      (opcode == COP1 && rs_field == BC1EQZ) ||
+      (opcode == COP1 && rs_field == BC1NEZ);
+}
+
+
+bool Assembler::IsEmittedConstant(Instr instr) {
+  uint32_t label_constant = GetLabelConst(instr);
+  return label_constant == 0;  // Emitted label const in reg-exp engine.
+}
+
+
+bool Assembler::IsBeq(Instr instr) {
+  return GetOpcodeField(instr) == BEQ;
+}
+
+
+bool Assembler::IsBne(Instr instr) {
+  return GetOpcodeField(instr) == BNE;
+}
+
+
+bool Assembler::IsJump(Instr instr) {
+  uint32_t opcode   = GetOpcodeField(instr);
+  uint32_t rt_field = GetRtField(instr);
+  uint32_t rd_field = GetRdField(instr);
+  uint32_t function_field = GetFunctionField(instr);
+  // Checks if the instruction is a jump.
+  return opcode == J || opcode == JAL ||
+      (opcode == SPECIAL && rt_field == 0 &&
+      ((function_field == JALR) || (rd_field == 0 && (function_field == JR))));
+}
+
+
+bool Assembler::IsJ(Instr instr) {
+  uint32_t opcode = GetOpcodeField(instr);
+  // Checks if the instruction is a jump.
+  return opcode == J;
+}
+
+
+bool Assembler::IsJal(Instr instr) {
+  return GetOpcodeField(instr) == JAL;
+}
+
+
+bool Assembler::IsJr(Instr instr) {
+  return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JR;
+}
+
+
+bool Assembler::IsJalr(Instr instr) {
+  return GetOpcodeField(instr) == SPECIAL && GetFunctionField(instr) == JALR;
+}
+
+
+bool Assembler::IsLui(Instr instr) {
+  uint32_t opcode = GetOpcodeField(instr);
+  // Checks if the instruction is a load upper immediate.
+  return opcode == LUI;
+}
+
+
+bool Assembler::IsOri(Instr instr) {
+  uint32_t opcode = GetOpcodeField(instr);
+  // Checks if the instruction is a load upper immediate.
+  return opcode == ORI;
+}
+
+
+bool Assembler::IsNop(Instr instr, unsigned int type) {
+  // See Assembler::nop(type).
+  DCHECK(type < 32);
+  uint32_t opcode = GetOpcodeField(instr);
+  uint32_t function = GetFunctionField(instr);
+  uint32_t rt = GetRt(instr);
+  uint32_t rd = GetRd(instr);
+  uint32_t sa = GetSa(instr);
+
+  // Traditional mips nop == sll(zero_reg, zero_reg, 0)
+  // When marking non-zero type, use sll(zero_reg, at, type)
+  // to avoid use of mips ssnop and ehb special encodings
+  // of the sll instruction.
+
+  Register nop_rt_reg = (type == 0) ? zero_reg : at;
+  bool ret = (opcode == SPECIAL && function == SLL &&
+              rd == static_cast<uint32_t>(ToNumber(zero_reg)) &&
+              rt == static_cast<uint32_t>(ToNumber(nop_rt_reg)) &&
+              sa == type);
+
+  return ret;
+}
+
+
+int32_t Assembler::GetBranchOffset(Instr instr) {
+  DCHECK(IsBranch(instr));
+  return (static_cast<int16_t>(instr & kImm16Mask)) << 2;
+}
+
+
+bool Assembler::IsLw(Instr instr) {
+  return ((instr & kOpcodeMask) == LW);
+}
+
+
+int16_t Assembler::GetLwOffset(Instr instr) {
+  DCHECK(IsLw(instr));
+  return ((instr & kImm16Mask));
+}
+
+
+Instr Assembler::SetLwOffset(Instr instr, int16_t offset) {
+  DCHECK(IsLw(instr));
+
+  // We actually create a new lw instruction based on the original one.
+  Instr temp_instr = LW | (instr & kRsFieldMask) | (instr & kRtFieldMask)
+      | (offset & kImm16Mask);
+
+  return temp_instr;
+}
+
+
+bool Assembler::IsSw(Instr instr) {
+  return ((instr & kOpcodeMask) == SW);
+}
+
+
+Instr Assembler::SetSwOffset(Instr instr, int16_t offset) {
+  DCHECK(IsSw(instr));
+  return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
+}
+
+
+bool Assembler::IsAddImmediate(Instr instr) {
+  return ((instr & kOpcodeMask) == ADDIU || (instr & kOpcodeMask) == DADDIU);
+}
+
+
+Instr Assembler::SetAddImmediateOffset(Instr instr, int16_t offset) {
+  DCHECK(IsAddImmediate(instr));
+  return ((instr & ~kImm16Mask) | (offset & kImm16Mask));
+}
+
+
+bool Assembler::IsAndImmediate(Instr instr) {
+  return GetOpcodeField(instr) == ANDI;
+}
+
+
+int64_t Assembler::target_at(int64_t pos) {
+  Instr instr = instr_at(pos);
+  if ((instr & ~kImm16Mask) == 0) {
+    // Emitted label constant, not part of a branch.
+    if (instr == 0) {
+       return kEndOfChain;
+     } else {
+       int32_t imm18 =((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
+       return (imm18 + pos);
+     }
+  }
+  // Check we have a branch or jump instruction.
+  DCHECK(IsBranch(instr) || IsJ(instr) || IsLui(instr));
+  // Do NOT change this to <<2. We rely on arithmetic shifts here, assuming
+  // the compiler uses arithmetic shifts for signed integers.
+  if (IsBranch(instr)) {
+    int32_t imm18 = ((instr & static_cast<int32_t>(kImm16Mask)) << 16) >> 14;
+    if (imm18 == kEndOfChain) {
+      // EndOfChain sentinel is returned directly, not relative to pc or pos.
+      return kEndOfChain;
+    } else {
+      return pos + kBranchPCOffset + imm18;
+    }
+  } else if (IsLui(instr)) {
+    Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
+    Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
+    Instr instr_ori2 = instr_at(pos + 3 * Assembler::kInstrSize);
+    DCHECK(IsOri(instr_ori));
+    DCHECK(IsOri(instr_ori2));
+
+    // TODO(plind) create named constants for shift values.
+    int64_t imm = static_cast<int64_t>(instr_lui & kImm16Mask) << 48;
+    imm |= static_cast<int64_t>(instr_ori & kImm16Mask) << 32;
+    imm |= static_cast<int64_t>(instr_ori2 & kImm16Mask) << 16;
+    // Sign extend address;
+    imm >>= 16;
+
+    if (imm == kEndOfJumpChain) {
+      // EndOfChain sentinel is returned directly, not relative to pc or pos.
+      return kEndOfChain;
+    } else {
+      uint64_t instr_address = reinterpret_cast<int64_t>(buffer_ + pos);
+      int64_t delta = instr_address - imm;
+      DCHECK(pos > delta);
+      return pos - delta;
+    }
+  } else {
+    int32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
+    if (imm28 == kEndOfJumpChain) {
+      // EndOfChain sentinel is returned directly, not relative to pc or pos.
+      return kEndOfChain;
+    } else {
+      uint64_t instr_address = reinterpret_cast<int64_t>(buffer_ + pos);
+      instr_address &= kImm28Mask;
+      int64_t delta = instr_address - imm28;
+      DCHECK(pos > delta);
+      return pos - delta;
+    }
+  }
+}
+
+
+void Assembler::target_at_put(int64_t pos, int64_t target_pos) {
+  Instr instr = instr_at(pos);
+  if ((instr & ~kImm16Mask) == 0) {
+    DCHECK(target_pos == kEndOfChain || target_pos >= 0);
+    // Emitted label constant, not part of a branch.
+    // Make label relative to Code* of generated Code object.
+    instr_at_put(pos, target_pos + (Code::kHeaderSize - kHeapObjectTag));
+    return;
+  }
+
+  DCHECK(IsBranch(instr) || IsJ(instr) || IsLui(instr));
+  if (IsBranch(instr)) {
+    int32_t imm18 = target_pos - (pos + kBranchPCOffset);
+    DCHECK((imm18 & 3) == 0);
+
+    instr &= ~kImm16Mask;
+    int32_t imm16 = imm18 >> 2;
+    DCHECK(is_int16(imm16));
+
+    instr_at_put(pos, instr | (imm16 & kImm16Mask));
+  } else if (IsLui(instr)) {
+    Instr instr_lui = instr_at(pos + 0 * Assembler::kInstrSize);
+    Instr instr_ori = instr_at(pos + 1 * Assembler::kInstrSize);
+    Instr instr_ori2 = instr_at(pos + 3 * Assembler::kInstrSize);
+    DCHECK(IsOri(instr_ori));
+    DCHECK(IsOri(instr_ori2));
+
+    uint64_t imm = reinterpret_cast<uint64_t>(buffer_) + target_pos;
+    DCHECK((imm & 3) == 0);
+
+    instr_lui &= ~kImm16Mask;
+    instr_ori &= ~kImm16Mask;
+    instr_ori2 &= ~kImm16Mask;
+
+    instr_at_put(pos + 0 * Assembler::kInstrSize,
+                 instr_lui | ((imm >> 32) & kImm16Mask));
+    instr_at_put(pos + 1 * Assembler::kInstrSize,
+                 instr_ori | ((imm >> 16) & kImm16Mask));
+    instr_at_put(pos + 3 * Assembler::kInstrSize,
+                 instr_ori2 | (imm & kImm16Mask));
+  } else {
+    uint64_t imm28 = reinterpret_cast<uint64_t>(buffer_) + target_pos;
+    imm28 &= kImm28Mask;
+    DCHECK((imm28 & 3) == 0);
+
+    instr &= ~kImm26Mask;
+    uint32_t imm26 = imm28 >> 2;
+    DCHECK(is_uint26(imm26));
+
+    instr_at_put(pos, instr | (imm26 & kImm26Mask));
+  }
+}
+
+
+void Assembler::print(Label* L) {
+  if (L->is_unused()) {
+    PrintF("unused label\n");
+  } else if (L->is_bound()) {
+    PrintF("bound label to %d\n", L->pos());
+  } else if (L->is_linked()) {
+    Label l = *L;
+    PrintF("unbound label");
+    while (l.is_linked()) {
+      PrintF("@ %d ", l.pos());
+      Instr instr = instr_at(l.pos());
+      if ((instr & ~kImm16Mask) == 0) {
+        PrintF("value\n");
+      } else {
+        PrintF("%d\n", instr);
+      }
+      next(&l);
+    }
+  } else {
+    PrintF("label in inconsistent state (pos = %d)\n", L->pos_);
+  }
+}
+
+
+void Assembler::bind_to(Label* L, int pos) {
+  DCHECK(0 <= pos && pos <= pc_offset());  // Must have valid binding position.
+  int32_t trampoline_pos = kInvalidSlotPos;
+  if (L->is_linked() && !trampoline_emitted_) {
+    unbound_labels_count_--;
+    next_buffer_check_ += kTrampolineSlotsSize;
+  }
+
+  while (L->is_linked()) {
+    int32_t fixup_pos = L->pos();
+    int32_t dist = pos - fixup_pos;
+    next(L);  // Call next before overwriting link with target at fixup_pos.
+    Instr instr = instr_at(fixup_pos);
+    if (IsBranch(instr)) {
+      if (dist > kMaxBranchOffset) {
+        if (trampoline_pos == kInvalidSlotPos) {
+          trampoline_pos = get_trampoline_entry(fixup_pos);
+          CHECK(trampoline_pos != kInvalidSlotPos);
+        }
+        DCHECK((trampoline_pos - fixup_pos) <= kMaxBranchOffset);
+        target_at_put(fixup_pos, trampoline_pos);
+        fixup_pos = trampoline_pos;
+        dist = pos - fixup_pos;
+      }
+      target_at_put(fixup_pos, pos);
+    } else {
+      DCHECK(IsJ(instr) || IsLui(instr) || IsEmittedConstant(instr));
+      target_at_put(fixup_pos, pos);
+    }
+  }
+  L->bind_to(pos);
+
+  // Keep track of the last bound label so we don't eliminate any instructions
+  // before a bound label.
+  if (pos > last_bound_pos_)
+    last_bound_pos_ = pos;
+}
+
+
+void Assembler::bind(Label* L) {
+  DCHECK(!L->is_bound());  // Label can only be bound once.
+  bind_to(L, pc_offset());
+}
+
+
+void Assembler::next(Label* L) {
+  DCHECK(L->is_linked());
+  int link = target_at(L->pos());
+  if (link == kEndOfChain) {
+    L->Unuse();
+  } else {
+    DCHECK(link >= 0);
+    L->link_to(link);
+  }
+}
+
+
+bool Assembler::is_near(Label* L) {
+  if (L->is_bound()) {
+    return ((pc_offset() - L->pos()) < kMaxBranchOffset - 4 * kInstrSize);
+  }
+  return false;
+}
+
+
+// We have to use a temporary register for things that can be relocated even
+// if they can be encoded in the MIPS's 16 bits of immediate-offset instruction
+// space.  There is no guarantee that the relocated location can be similarly
+// encoded.
+bool Assembler::MustUseReg(RelocInfo::Mode rmode) {
+  return !RelocInfo::IsNone(rmode);
+}
+
+void Assembler::GenInstrRegister(Opcode opcode,
+                                 Register rs,
+                                 Register rt,
+                                 Register rd,
+                                 uint16_t sa,
+                                 SecondaryField func) {
+  DCHECK(rd.is_valid() && rs.is_valid() && rt.is_valid() && is_uint5(sa));
+  Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
+      | (rd.code() << kRdShift) | (sa << kSaShift) | func;
+  emit(instr);
+}
+
+
+void Assembler::GenInstrRegister(Opcode opcode,
+                                 Register rs,
+                                 Register rt,
+                                 uint16_t msb,
+                                 uint16_t lsb,
+                                 SecondaryField func) {
+  DCHECK(rs.is_valid() && rt.is_valid() && is_uint5(msb) && is_uint5(lsb));
+  Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
+      | (msb << kRdShift) | (lsb << kSaShift) | func;
+  emit(instr);
+}
+
+
+void Assembler::GenInstrRegister(Opcode opcode,
+                                 SecondaryField fmt,
+                                 FPURegister ft,
+                                 FPURegister fs,
+                                 FPURegister fd,
+                                 SecondaryField func) {
+  DCHECK(fd.is_valid() && fs.is_valid() && ft.is_valid());
+  Instr instr = opcode | fmt | (ft.code() << kFtShift) | (fs.code() << kFsShift)
+      | (fd.code() << kFdShift) | func;
+  emit(instr);
+}
+
+
+void Assembler::GenInstrRegister(Opcode opcode,
+                                 FPURegister fr,
+                                 FPURegister ft,
+                                 FPURegister fs,
+                                 FPURegister fd,
+                                 SecondaryField func) {
+  DCHECK(fd.is_valid() && fr.is_valid() && fs.is_valid() && ft.is_valid());
+  Instr instr = opcode | (fr.code() << kFrShift) | (ft.code() << kFtShift)
+      | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
+  emit(instr);
+}
+
+
+void Assembler::GenInstrRegister(Opcode opcode,
+                                 SecondaryField fmt,
+                                 Register rt,
+                                 FPURegister fs,
+                                 FPURegister fd,
+                                 SecondaryField func) {
+  DCHECK(fd.is_valid() && fs.is_valid() && rt.is_valid());
+  Instr instr = opcode | fmt | (rt.code() << kRtShift)
+      | (fs.code() << kFsShift) | (fd.code() << kFdShift) | func;
+  emit(instr);
+}
+
+
+void Assembler::GenInstrRegister(Opcode opcode,
+                                 SecondaryField fmt,
+                                 Register rt,
+                                 FPUControlRegister fs,
+                                 SecondaryField func) {
+  DCHECK(fs.is_valid() && rt.is_valid());
+  Instr instr =
+      opcode | fmt | (rt.code() << kRtShift) | (fs.code() << kFsShift) | func;
+  emit(instr);
+}
+
+
+// Instructions with immediate value.
+// Registers are in the order of the instruction encoding, from left to right.
+void Assembler::GenInstrImmediate(Opcode opcode,
+                                  Register rs,
+                                  Register rt,
+                                  int32_t j) {
+  DCHECK(rs.is_valid() && rt.is_valid() && (is_int16(j) || is_uint16(j)));
+  Instr instr = opcode | (rs.code() << kRsShift) | (rt.code() << kRtShift)
+      | (j & kImm16Mask);
+  emit(instr);
+}
+
+
+void Assembler::GenInstrImmediate(Opcode opcode,
+                                  Register rs,
+                                  SecondaryField SF,
+                                  int32_t j) {
+  DCHECK(rs.is_valid() && (is_int16(j) || is_uint16(j)));
+  Instr instr = opcode | (rs.code() << kRsShift) | SF | (j & kImm16Mask);
+  emit(instr);
+}
+
+
+void Assembler::GenInstrImmediate(Opcode opcode,
+                                  Register rs,
+                                  FPURegister ft,
+                                  int32_t j) {
+  DCHECK(rs.is_valid() && ft.is_valid() && (is_int16(j) || is_uint16(j)));
+  Instr instr = opcode | (rs.code() << kRsShift) | (ft.code() << kFtShift)
+      | (j & kImm16Mask);
+  emit(instr);
+}
+
+
+void Assembler::GenInstrJump(Opcode opcode,
+                             uint32_t address) {
+  BlockTrampolinePoolScope block_trampoline_pool(this);
+  DCHECK(is_uint26(address));
+  Instr instr = opcode | address;
+  emit(instr);
+  BlockTrampolinePoolFor(1);  // For associated delay slot.
+}
+
+
+// Returns the next free trampoline entry.
+int32_t Assembler::get_trampoline_entry(int32_t pos) {
+  int32_t trampoline_entry = kInvalidSlotPos;
+  if (!internal_trampoline_exception_) {
+    if (trampoline_.start() > pos) {
+     trampoline_entry = trampoline_.take_slot();
+    }
+
+    if (kInvalidSlotPos == trampoline_entry) {
+      internal_trampoline_exception_ = true;
+    }
+  }
+  return trampoline_entry;
+}
+
+
+uint64_t Assembler::jump_address(Label* L) {
+  int64_t target_pos;
+  if (L->is_bound()) {
+    target_pos = L->pos();
+  } else {
+    if (L->is_linked()) {
+      target_pos = L->pos();  // L's link.
+      L->link_to(pc_offset());
+    } else {
+      L->link_to(pc_offset());
+      return kEndOfJumpChain;
+    }
+  }
+
+  uint64_t imm = reinterpret_cast<uint64_t>(buffer_) + target_pos;
+  DCHECK((imm & 3) == 0);
+
+  return imm;
+}
+
+
+int32_t Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
+  int32_t target_pos;
+  if (L->is_bound()) {
+    target_pos = L->pos();
+  } else {
+    if (L->is_linked()) {
+      target_pos = L->pos();
+      L->link_to(pc_offset());
+    } else {
+      L->link_to(pc_offset());
+      if (!trampoline_emitted_) {
+        unbound_labels_count_++;
+        next_buffer_check_ -= kTrampolineSlotsSize;
+      }
+      return kEndOfChain;
+    }
+  }
+
+  int32_t offset = target_pos - (pc_offset() + kBranchPCOffset);
+  DCHECK((offset & 3) == 0);
+  DCHECK(is_int16(offset >> 2));
+
+  return offset;
+}
+
+
+int32_t Assembler::branch_offset_compact(Label* L,
+    bool jump_elimination_allowed) {
+  int32_t target_pos;
+  if (L->is_bound()) {
+    target_pos = L->pos();
+  } else {
+    if (L->is_linked()) {
+      target_pos = L->pos();
+      L->link_to(pc_offset());
+    } else {
+      L->link_to(pc_offset());
+      if (!trampoline_emitted_) {
+        unbound_labels_count_++;
+        next_buffer_check_ -= kTrampolineSlotsSize;
+      }
+      return kEndOfChain;
+    }
+  }
+
+  int32_t offset = target_pos - pc_offset();
+  DCHECK((offset & 3) == 0);
+  DCHECK(is_int16(offset >> 2));
+
+  return offset;
+}
+
+
+int32_t Assembler::branch_offset21(Label* L, bool jump_elimination_allowed) {
+  int32_t target_pos;
+  if (L->is_bound()) {
+    target_pos = L->pos();
+  } else {
+    if (L->is_linked()) {
+      target_pos = L->pos();
+      L->link_to(pc_offset());
+    } else {
+      L->link_to(pc_offset());
+      if (!trampoline_emitted_) {
+        unbound_labels_count_++;
+        next_buffer_check_ -= kTrampolineSlotsSize;
+      }
+      return kEndOfChain;
+    }
+  }
+
+  int32_t offset = target_pos - (pc_offset() + kBranchPCOffset);
+  DCHECK((offset & 3) == 0);
+  DCHECK(((offset >> 2) & 0xFFE00000) == 0);  // Offset is 21bit width.
+
+  return offset;
+}
+
+
+int32_t Assembler::branch_offset21_compact(Label* L,
+    bool jump_elimination_allowed) {
+  int32_t target_pos;
+  if (L->is_bound()) {
+    target_pos = L->pos();
+  } else {
+    if (L->is_linked()) {
+      target_pos = L->pos();
+      L->link_to(pc_offset());
+    } else {
+      L->link_to(pc_offset());
+      if (!trampoline_emitted_) {
+        unbound_labels_count_++;
+        next_buffer_check_ -= kTrampolineSlotsSize;
+      }
+      return kEndOfChain;
+    }
+  }
+
+  int32_t offset = target_pos - pc_offset();
+  DCHECK((offset & 3) == 0);
+  DCHECK(((offset >> 2) & 0xFFE00000) == 0);  // Offset is 21bit width.
+
+  return offset;
+}
+
+
+void Assembler::label_at_put(Label* L, int at_offset) {
+  int target_pos;
+  if (L->is_bound()) {
+    target_pos = L->pos();
+    instr_at_put(at_offset, target_pos + (Code::kHeaderSize - kHeapObjectTag));
+  } else {
+    if (L->is_linked()) {
+      target_pos = L->pos();  // L's link.
+      int32_t imm18 = target_pos - at_offset;
+      DCHECK((imm18 & 3) == 0);
+      int32_t imm16 = imm18 >> 2;
+      DCHECK(is_int16(imm16));
+      instr_at_put(at_offset, (imm16 & kImm16Mask));
+    } else {
+      target_pos = kEndOfChain;
+      instr_at_put(at_offset, 0);
+      if (!trampoline_emitted_) {
+        unbound_labels_count_++;
+        next_buffer_check_ -= kTrampolineSlotsSize;
+      }
+    }
+    L->link_to(at_offset);
+  }
+}
+
+
+//------- Branch and jump instructions --------
+
+void Assembler::b(int16_t offset) {
+  beq(zero_reg, zero_reg, offset);
+}
+
+
+void Assembler::bal(int16_t offset) {
+  positions_recorder()->WriteRecordedPositions();
+  bgezal(zero_reg, offset);
+}
+
+
+void Assembler::beq(Register rs, Register rt, int16_t offset) {
+  BlockTrampolinePoolScope block_trampoline_pool(this);
+  GenInstrImmediate(BEQ, rs, rt, offset);
+  BlockTrampolinePoolFor(1);  // For associated delay slot.
+}
+
+
+void Assembler::bgez(Register rs, int16_t offset) {
+  BlockTrampolinePoolScope block_trampoline_pool(this);
+  GenInstrImmediate(REGIMM, rs, BGEZ, offset);
+  BlockTrampolinePoolFor(1);  // For associated delay slot.
+}
+
+
+void Assembler::bgezc(Register rt, int16_t offset) {
+  DCHECK(kArchVariant == kMips64r6);
+  DCHECK(!(rt.is(zero_reg)));
+  GenInstrImmediate(BLEZL, rt, rt, offset);
+}
+
+
+void Assembler::bgeuc(Register rs, Register rt, int16_t offset) {
+  DCHECK(kArchVariant == kMips64r6);
+  DCHECK(!(rs.is(zero_reg)));
+  DCHECK(!(rt.is(zero_reg)));
+  DCHECK(rs.code() != rt.code());
+  GenInstrImmediate(BLEZ, rs, rt, offset);
+}
+
+
+void Assembler::bgec(Register rs, Register rt, int16_t offset) {
+  DCHECK(kArchVariant == kMips64r6);
+  DCHECK(!(rs.is(zero_reg)));
+  DCHECK(!(rt.is(zero_reg)));
+  DCHECK(rs.code() != rt.code());
+  GenInstrImmediate(BLEZL, rs, rt, offset);
+}
+
+
+void Assembler::bgezal(Register rs, int16_t offset) {
+  DCHECK(kArchVariant != kMips64r6 || rs.is(zero_reg));
+  BlockTrampolinePoolScope block_trampoline_pool(this);
+  positions_recorder()->WriteRecordedPositions();
+  GenInstrImmediate(REGIMM, rs, BGEZAL, offset);
+  BlockTrampolinePoolFor(1);  // For associated delay slot.
+}
+
+
+void Assembler::bgtz(Register rs, int16_t offset) {
+  BlockTrampolinePoolScope block_trampoline_pool(this);
+  GenInstrImmediate(BGTZ, rs, zero_reg, offset);
+  BlockTrampolinePoolFor(1);  // For associated delay slot.
+}
+
+
+void Assembler::bgtzc(Register rt, int16_t offset) {
+  DCHECK(kArchVariant == kMips64r6);
+  DCHECK(!(rt.is(zero_reg)));
+  GenInstrImmediate(BGTZL, zero_reg, rt, offset);
+}
+
+
+void Assembler::blez(Register rs, int16_t offset) {
+  BlockTrampolinePoolScope block_trampoline_pool(this);
+  GenInstrImmediate(BLEZ, rs, zero_reg, offset);
+  BlockTrampolinePoolFor(1);  // For associated delay slot.
+}
+
+
+void Assembler::blezc(Register rt, int16_t offset) {
+  DCHECK(kArchVariant == kMips64r6);
+  DCHECK(!(rt.is(zero_reg)));
+  GenInstrImmediate(BLEZL, zero_reg, rt, offset);
+}
+
+
+void Assembler::bltzc(Register rt, int16_t offset) {
+  DCHECK(kArchVariant == kMips64r6);
+  DCHECK(!(rt.is(zero_reg)));
+  GenInstrImmediate(BGTZL, rt, rt, offset);
+}
+
+
+void Assembler::bltuc(Register rs, Register rt, int16_t offset) {
+  DCHECK(kArchVariant == kMips64r6);
+  DCHECK(!(rs.is(zero_reg)));
+  DCHECK(!(rt.is(zero_reg)));
+  DCHECK(rs.code() != rt.code());
+  GenInstrImmediate(BGTZ, rs, rt, offset);
+}
+
+
+void Assembler::bltc(Register rs, Register rt, int16_t offset) {
+  DCHECK(kArchVariant == kMips64r6);
+  DCHECK(!(rs.is(zero_reg)));
+  DCHECK(!(rt.is(zero_reg)));
+  DCHECK(rs.code() != rt.code());
+  GenInstrImmediate(BGTZL, rs, rt, offset);
+}
+
+
+void Assembler::bltz(Register rs, int16_t offset) {
+  BlockTrampolinePoolScope block_trampoline_pool(this);
+  GenInstrImmediate(REGIMM, rs, BLTZ, offset);
+  BlockTrampolinePoolFor(1);  // For associated delay slot.
+}
+
+
+void Assembler::bltzal(Register rs, int16_t offset) {
+  DCHECK(kArchVariant != kMips64r6 || rs.is(zero_reg));
+  BlockTrampolinePoolScope block_trampoline_pool(this);
+  positions_recorder()->WriteRecordedPositions();
+  GenInstrImmediate(REGIMM, rs, BLTZAL, offset);
+  BlockTrampolinePoolFor(1);  // For associated delay slot.
+}
+
+
+void Assembler::bne(Register rs, Register rt, int16_t offset) {
+  BlockTrampolinePoolScope block_trampoline_pool(this);
+  GenInstrImmediate(BNE, rs, rt, offset);
+  BlockTrampolinePoolFor(1);  // For associated delay slot.
+}
+
+
+void Assembler::bovc(Register rs, Register rt, int16_t offset) {
+  DCHECK(kArchVariant == kMips64r6);
+  DCHECK(!(rs.is(zero_reg)));
+  DCHECK(rs.code() >= rt.code());
+  GenInstrImmediate(ADDI, rs, rt, offset);
+}
+
+
+void Assembler::bnvc(Register rs, Register rt, int16_t offset) {
+  DCHECK(kArchVariant == kMips64r6);
+  DCHECK(!(rs.is(zero_reg)));
+  DCHECK(rs.code() >= rt.code());
+  GenInstrImmediate(DADDI, rs, rt, offset);
+}
+
+
+void Assembler::blezalc(Register rt, int16_t offset) {
+  DCHECK(kArchVariant == kMips64r6);
+  DCHECK(!(rt.is(zero_reg)));
+  GenInstrImmediate(BLEZ, zero_reg, rt, offset);
+}
+
+
+void Assembler::bgezalc(Register rt, int16_t offset) {
+  DCHECK(kArchVariant == kMips64r6);
+  DCHECK(!(rt.is(zero_reg)));
+  GenInstrImmediate(BLEZ, rt, rt, offset);
+}
+
+
+void Assembler::bgezall(Register rs, int16_t offset) {
+  DCHECK(kArchVariant == kMips64r6);
+  DCHECK(!(rs.is(zero_reg)));
+  GenInstrImmediate(REGIMM, rs, BGEZALL, offset);
+}
+
+
+void Assembler::bltzalc(Register rt, int16_t offset) {
+  DCHECK(kArchVariant == kMips64r6);
+  DCHECK(!(rt.is(zero_reg)));
+  GenInstrImmediate(BGTZ, rt, rt, offset);
+}
+
+
+void Assembler::bgtzalc(Register rt, int16_t offset) {
+  DCHECK(kArchVariant == kMips64r6);
+  DCHECK(!(rt.is(zero_reg)));
+  GenInstrImmediate(BGTZ, zero_reg, rt, offset);
+}
+
+
+void Assembler::beqzalc(Register rt, int16_t offset) {
+  DCHECK(kArchVariant == kMips64r6);
+  DCHECK(!(rt.is(zero_reg)));
+  GenInstrImmediate(ADDI, zero_reg, rt, offset);
+}
+
+
+void Assembler::bnezalc(Register rt, int16_t offset) {
+  DCHECK(kArchVariant == kMips64r6);
+  DCHECK(!(rt.is(zero_reg)));
+  GenInstrImmediate(DADDI, zero_reg, rt, offset);
+}
+
+
+void Assembler::beqc(Register rs, Register rt, int16_t offset) {
+  DCHECK(kArchVariant == kMips64r6);
+  DCHECK(rs.code() < rt.code());
+  GenInstrImmediate(ADDI, rs, rt, offset);
+}
+
+
+void Assembler::beqzc(Register rs, int32_t offset) {
+  DCHECK(kArchVariant == kMips64r6);
+  DCHECK(!(rs.is(zero_reg)));
+  Instr instr = BEQZC | (rs.code() << kRsShift) | offset;
+  emit(instr);
+}
+
+
+void Assembler::bnec(Register rs, Register rt, int16_t offset) {
+  DCHECK(kArchVariant == kMips64r6);
+  DCHECK(rs.code() < rt.code());
+  GenInstrImmediate(DADDI, rs, rt, offset);
+}
+
+
+void Assembler::bnezc(Register rs, int32_t offset) {
+  DCHECK(kArchVariant == kMips64r6);
+  DCHECK(!(rs.is(zero_reg)));
+  Instr instr = BNEZC | (rs.code() << kRsShift) | offset;
+  emit(instr);
+}
+
+
+void Assembler::j(int64_t target) {
+#if DEBUG
+  // Get pc of delay slot.
+  uint64_t ipc = reinterpret_cast<uint64_t>(pc_ + 1 * kInstrSize);
+  bool in_range = (ipc ^ static_cast<uint64_t>(target) >>
+                  (kImm26Bits + kImmFieldShift)) == 0;
+  DCHECK(in_range && ((target & 3) == 0));
+#endif
+  GenInstrJump(J, target >> 2);
+}
+
+
+void Assembler::jr(Register rs) {
+  if (kArchVariant != kMips64r6) {
+    BlockTrampolinePoolScope block_trampoline_pool(this);
+    if (rs.is(ra)) {
+      positions_recorder()->WriteRecordedPositions();
+    }
+    GenInstrRegister(SPECIAL, rs, zero_reg, zero_reg, 0, JR);
+    BlockTrampolinePoolFor(1);  // For associated delay slot.
+  } else {
+    jalr(rs, zero_reg);
+  }
+}
+
+
+void Assembler::jal(int64_t target) {
+#ifdef DEBUG
+  // Get pc of delay slot.
+  uint64_t ipc = reinterpret_cast<uint64_t>(pc_ + 1 * kInstrSize);
+  bool in_range = (ipc ^ static_cast<uint64_t>(target) >>
+                  (kImm26Bits + kImmFieldShift)) == 0;
+  DCHECK(in_range && ((target & 3) == 0));
+#endif
+  positions_recorder()->WriteRecordedPositions();
+  GenInstrJump(JAL, target >> 2);
+}
+
+
+void Assembler::jalr(Register rs, Register rd) {
+  BlockTrampolinePoolScope block_trampoline_pool(this);
+  positions_recorder()->WriteRecordedPositions();
+  GenInstrRegister(SPECIAL, rs, zero_reg, rd, 0, JALR);
+  BlockTrampolinePoolFor(1);  // For associated delay slot.
+}
+
+
+void Assembler::j_or_jr(int64_t target, Register rs) {
+  // Get pc of delay slot.
+  uint64_t ipc = reinterpret_cast<uint64_t>(pc_ + 1 * kInstrSize);
+  bool in_range = (ipc ^ static_cast<uint64_t>(target) >>
+                  (kImm26Bits + kImmFieldShift)) == 0;
+  if (in_range) {
+      j(target);
+  } else {
+      jr(t9);
+  }
+}
+
+
+void Assembler::jal_or_jalr(int64_t target, Register rs) {
+  // Get pc of delay slot.
+  uint64_t ipc = reinterpret_cast<uint64_t>(pc_ + 1 * kInstrSize);
+  bool in_range = (ipc ^ static_cast<uint64_t>(target) >>
+                  (kImm26Bits+kImmFieldShift)) == 0;
+  if (in_range) {
+      jal(target);
+  } else {
+      jalr(t9);
+  }
+}
+
+
+// -------Data-processing-instructions---------
+
+// Arithmetic.
+
+void Assembler::addu(Register rd, Register rs, Register rt) {
+  GenInstrRegister(SPECIAL, rs, rt, rd, 0, ADDU);
+}
+
+
+void Assembler::addiu(Register rd, Register rs, int32_t j) {
+  GenInstrImmediate(ADDIU, rs, rd, j);
+}
+
+
+void Assembler::subu(Register rd, Register rs, Register rt) {
+  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SUBU);
+}
+
+
+void Assembler::mul(Register rd, Register rs, Register rt) {
+  if (kArchVariant == kMips64r6) {
+      GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH);
+  } else {
+      GenInstrRegister(SPECIAL2, rs, rt, rd, 0, MUL);
+  }
+}
+
+
+void Assembler::muh(Register rd, Register rs, Register rt) {
+  DCHECK(kArchVariant == kMips64r6);
+  GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH);
+}
+
+
+void Assembler::mulu(Register rd, Register rs, Register rt) {
+  DCHECK(kArchVariant == kMips64r6);
+  GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, MUL_MUH_U);
+}
+
+
+void Assembler::muhu(Register rd, Register rs, Register rt) {
+  DCHECK(kArchVariant == kMips64r6);
+  GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, MUL_MUH_U);
+}
+
+
+void Assembler::dmul(Register rd, Register rs, Register rt) {
+  DCHECK(kArchVariant == kMips64r6);
+  GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, D_MUL_MUH);
+}
+
+
+void Assembler::dmuh(Register rd, Register rs, Register rt) {
+  DCHECK(kArchVariant == kMips64r6);
+  GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, D_MUL_MUH);
+}
+
+
+void Assembler::dmulu(Register rd, Register rs, Register rt) {
+  DCHECK(kArchVariant == kMips64r6);
+  GenInstrRegister(SPECIAL, rs, rt, rd, MUL_OP, D_MUL_MUH_U);
+}
+
+
+void Assembler::dmuhu(Register rd, Register rs, Register rt) {
+  DCHECK(kArchVariant == kMips64r6);
+  GenInstrRegister(SPECIAL, rs, rt, rd, MUH_OP, D_MUL_MUH_U);
+}
+
+
+void Assembler::mult(Register rs, Register rt) {
+  DCHECK(kArchVariant != kMips64r6);
+  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULT);
+}
+
+
+void Assembler::multu(Register rs, Register rt) {
+  DCHECK(kArchVariant != kMips64r6);
+  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, MULTU);
+}
+
+
+void Assembler::daddiu(Register rd, Register rs, int32_t j) {
+  GenInstrImmediate(DADDIU, rs, rd, j);
+}
+
+
+void Assembler::div(Register rs, Register rt) {
+  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIV);
+}
+
+
+void Assembler::div(Register rd, Register rs, Register rt) {
+  DCHECK(kArchVariant == kMips64r6);
+  GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD);
+}
+
+
+void Assembler::mod(Register rd, Register rs, Register rt) {
+  DCHECK(kArchVariant == kMips64r6);
+  GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD);
+}
+
+
+void Assembler::divu(Register rs, Register rt) {
+  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DIVU);
+}
+
+
+void Assembler::divu(Register rd, Register rs, Register rt) {
+  DCHECK(kArchVariant == kMips64r6);
+  GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, DIV_MOD_U);
+}
+
+
+void Assembler::modu(Register rd, Register rs, Register rt) {
+  DCHECK(kArchVariant == kMips64r6);
+  GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, DIV_MOD_U);
+}
+
+
+void Assembler::daddu(Register rd, Register rs, Register rt) {
+  GenInstrRegister(SPECIAL, rs, rt, rd, 0, DADDU);
+}
+
+
+void Assembler::dsubu(Register rd, Register rs, Register rt) {
+  GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSUBU);
+}
+
+
+void Assembler::dmult(Register rs, Register rt) {
+  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DMULT);
+}
+
+
+void Assembler::dmultu(Register rs, Register rt) {
+  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DMULTU);
+}
+
+
+void Assembler::ddiv(Register rs, Register rt) {
+  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DDIV);
+}
+
+
+void Assembler::ddiv(Register rd, Register rs, Register rt) {
+  DCHECK(kArchVariant == kMips64r6);
+  GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, D_DIV_MOD);
+}
+
+
+void Assembler::dmod(Register rd, Register rs, Register rt) {
+  DCHECK(kArchVariant == kMips64r6);
+  GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, D_DIV_MOD);
+}
+
+
+void Assembler::ddivu(Register rs, Register rt) {
+  GenInstrRegister(SPECIAL, rs, rt, zero_reg, 0, DDIVU);
+}
+
+
+void Assembler::ddivu(Register rd, Register rs, Register rt) {
+  DCHECK(kArchVariant == kMips64r6);
+  GenInstrRegister(SPECIAL, rs, rt, rd, DIV_OP, D_DIV_MOD_U);
+}
+
+
+void Assembler::dmodu(Register rd, Register rs, Register rt) {
+  DCHECK(kArchVariant == kMips64r6);
+  GenInstrRegister(SPECIAL, rs, rt, rd, MOD_OP, D_DIV_MOD_U);
+}
+
+
+// Logical.
+
+void Assembler::and_(Register rd, Register rs, Register rt) {
+  GenInstrRegister(SPECIAL, rs, rt, rd, 0, AND);
+}
+
+
+void Assembler::andi(Register rt, Register rs, int32_t j) {
+  DCHECK(is_uint16(j));
+  GenInstrImmediate(ANDI, rs, rt, j);
+}
+
+
+void Assembler::or_(Register rd, Register rs, Register rt) {
+  GenInstrRegister(SPECIAL, rs, rt, rd, 0, OR);
+}
+
+
+void Assembler::ori(Register rt, Register rs, int32_t j) {
+  DCHECK(is_uint16(j));
+  GenInstrImmediate(ORI, rs, rt, j);
+}
+
+
+void Assembler::xor_(Register rd, Register rs, Register rt) {
+  GenInstrRegister(SPECIAL, rs, rt, rd, 0, XOR);
+}
+
+
+void Assembler::xori(Register rt, Register rs, int32_t j) {
+  DCHECK(is_uint16(j));
+  GenInstrImmediate(XORI, rs, rt, j);
+}
+
+
+void Assembler::nor(Register rd, Register rs, Register rt) {
+  GenInstrRegister(SPECIAL, rs, rt, rd, 0, NOR);
+}
+
+
+// Shifts.
+void Assembler::sll(Register rd,
+                    Register rt,
+                    uint16_t sa,
+                    bool coming_from_nop) {
+  // Don't allow nop instructions in the form sll zero_reg, zero_reg to be
+  // generated using the sll instruction. They must be generated using
+  // nop(int/NopMarkerTypes) or MarkCode(int/NopMarkerTypes) pseudo
+  // instructions.
+  DCHECK(coming_from_nop || !(rd.is(zero_reg) && rt.is(zero_reg)));
+  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SLL);
+}
+
+
+void Assembler::sllv(Register rd, Register rt, Register rs) {
+  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLLV);
+}
+
+
+void Assembler::srl(Register rd, Register rt, uint16_t sa) {
+  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRL);
+}
+
+
+void Assembler::srlv(Register rd, Register rt, Register rs) {
+  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRLV);
+}
+
+
+void Assembler::sra(Register rd, Register rt, uint16_t sa) {
+  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, SRA);
+}
+
+
+void Assembler::srav(Register rd, Register rt, Register rs) {
+  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SRAV);
+}
+
+
+void Assembler::rotr(Register rd, Register rt, uint16_t sa) {
+  // Should be called via MacroAssembler::Ror.
+  DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa));
+  DCHECK(kArchVariant == kMips64r2);
+  Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
+      | (rd.code() << kRdShift) | (sa << kSaShift) | SRL;
+  emit(instr);
+}
+
+
+void Assembler::rotrv(Register rd, Register rt, Register rs) {
+  // Should be called via MacroAssembler::Ror.
+  DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid() );
+  DCHECK(kArchVariant == kMips64r2);
+  Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
+     | (rd.code() << kRdShift) | (1 << kSaShift) | SRLV;
+  emit(instr);
+}
+
+
+void Assembler::dsll(Register rd, Register rt, uint16_t sa) {
+  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSLL);
+}
+
+
+void Assembler::dsllv(Register rd, Register rt, Register rs) {
+  GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSLLV);
+}
+
+
+void Assembler::dsrl(Register rd, Register rt, uint16_t sa) {
+  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSRL);
+}
+
+
+void Assembler::dsrlv(Register rd, Register rt, Register rs) {
+  GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSRLV);
+}
+
+
+void Assembler::drotr(Register rd, Register rt, uint16_t sa) {
+  DCHECK(rd.is_valid() && rt.is_valid() && is_uint5(sa));
+  Instr instr = SPECIAL | (1 << kRsShift) | (rt.code() << kRtShift)
+      | (rd.code() << kRdShift) | (sa << kSaShift) | DSRL;
+  emit(instr);
+}
+
+
+void Assembler::drotrv(Register rd, Register rt, Register rs) {
+  DCHECK(rd.is_valid() && rt.is_valid() && rs.is_valid() );
+  Instr instr = SPECIAL | (rs.code() << kRsShift) | (rt.code() << kRtShift)
+      | (rd.code() << kRdShift) | (1 << kSaShift) | DSRLV;
+  emit(instr);
+}
+
+
+void Assembler::dsra(Register rd, Register rt, uint16_t sa) {
+  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSRA);
+}
+
+
+void Assembler::dsrav(Register rd, Register rt, Register rs) {
+  GenInstrRegister(SPECIAL, rs, rt, rd, 0, DSRAV);
+}
+
+
+void Assembler::dsll32(Register rd, Register rt, uint16_t sa) {
+  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSLL32);
+}
+
+
+void Assembler::dsrl32(Register rd, Register rt, uint16_t sa) {
+  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSRL32);
+}
+
+
+void Assembler::dsra32(Register rd, Register rt, uint16_t sa) {
+  GenInstrRegister(SPECIAL, zero_reg, rt, rd, sa, DSRA32);
+}
+
+
+// ------------Memory-instructions-------------
+
+// Helper for base-reg + offset, when offset is larger than int16.
+void Assembler::LoadRegPlusOffsetToAt(const MemOperand& src) {
+  DCHECK(!src.rm().is(at));
+  DCHECK(is_int32(src.offset_));
+  daddiu(at, zero_reg, (src.offset_ >> kLuiShift) & kImm16Mask);
+  dsll(at, at, kLuiShift);
+  ori(at, at, src.offset_ & kImm16Mask);  // Load 32-bit offset.
+  daddu(at, at, src.rm());  // Add base register.
+}
+
+
+void Assembler::lb(Register rd, const MemOperand& rs) {
+  if (is_int16(rs.offset_)) {
+    GenInstrImmediate(LB, rs.rm(), rd, rs.offset_);
+  } else {  // Offset > 16 bits, use multiple instructions to load.
+    LoadRegPlusOffsetToAt(rs);
+    GenInstrImmediate(LB, at, rd, 0);  // Equiv to lb(rd, MemOperand(at, 0));
+  }
+}
+
+
+void Assembler::lbu(Register rd, const MemOperand& rs) {
+  if (is_int16(rs.offset_)) {
+    GenInstrImmediate(LBU, rs.rm(), rd, rs.offset_);
+  } else {  // Offset > 16 bits, use multiple instructions to load.
+    LoadRegPlusOffsetToAt(rs);
+    GenInstrImmediate(LBU, at, rd, 0);  // Equiv to lbu(rd, MemOperand(at, 0));
+  }
+}
+
+
+void Assembler::lh(Register rd, const MemOperand& rs) {
+  if (is_int16(rs.offset_)) {
+    GenInstrImmediate(LH, rs.rm(), rd, rs.offset_);
+  } else {  // Offset > 16 bits, use multiple instructions to load.
+    LoadRegPlusOffsetToAt(rs);
+    GenInstrImmediate(LH, at, rd, 0);  // Equiv to lh(rd, MemOperand(at, 0));
+  }
+}
+
+
+void Assembler::lhu(Register rd, const MemOperand& rs) {
+  if (is_int16(rs.offset_)) {
+    GenInstrImmediate(LHU, rs.rm(), rd, rs.offset_);
+  } else {  // Offset > 16 bits, use multiple instructions to load.
+    LoadRegPlusOffsetToAt(rs);
+    GenInstrImmediate(LHU, at, rd, 0);  // Equiv to lhu(rd, MemOperand(at, 0));
+  }
+}
+
+
+void Assembler::lw(Register rd, const MemOperand& rs) {
+  if (is_int16(rs.offset_)) {
+    GenInstrImmediate(LW, rs.rm(), rd, rs.offset_);
+  } else {  // Offset > 16 bits, use multiple instructions to load.
+    LoadRegPlusOffsetToAt(rs);
+    GenInstrImmediate(LW, at, rd, 0);  // Equiv to lw(rd, MemOperand(at, 0));
+  }
+}
+
+
+void Assembler::lwu(Register rd, const MemOperand& rs) {
+  if (is_int16(rs.offset_)) {
+    GenInstrImmediate(LWU, rs.rm(), rd, rs.offset_);
+  } else {  // Offset > 16 bits, use multiple instructions to load.
+    LoadRegPlusOffsetToAt(rs);
+    GenInstrImmediate(LWU, at, rd, 0);  // Equiv to lwu(rd, MemOperand(at, 0));
+  }
+}
+
+
+void Assembler::lwl(Register rd, const MemOperand& rs) {
+  GenInstrImmediate(LWL, rs.rm(), rd, rs.offset_);
+}
+
+
+void Assembler::lwr(Register rd, const MemOperand& rs) {
+  GenInstrImmediate(LWR, rs.rm(), rd, rs.offset_);
+}
+
+
+void Assembler::sb(Register rd, const MemOperand& rs) {
+  if (is_int16(rs.offset_)) {
+    GenInstrImmediate(SB, rs.rm(), rd, rs.offset_);
+  } else {  // Offset > 16 bits, use multiple instructions to store.
+    LoadRegPlusOffsetToAt(rs);
+    GenInstrImmediate(SB, at, rd, 0);  // Equiv to sb(rd, MemOperand(at, 0));
+  }
+}
+
+
+void Assembler::sh(Register rd, const MemOperand& rs) {
+  if (is_int16(rs.offset_)) {
+    GenInstrImmediate(SH, rs.rm(), rd, rs.offset_);
+  } else {  // Offset > 16 bits, use multiple instructions to store.
+    LoadRegPlusOffsetToAt(rs);
+    GenInstrImmediate(SH, at, rd, 0);  // Equiv to sh(rd, MemOperand(at, 0));
+  }
+}
+
+
+void Assembler::sw(Register rd, const MemOperand& rs) {
+  if (is_int16(rs.offset_)) {
+    GenInstrImmediate(SW, rs.rm(), rd, rs.offset_);
+  } else {  // Offset > 16 bits, use multiple instructions to store.
+    LoadRegPlusOffsetToAt(rs);
+    GenInstrImmediate(SW, at, rd, 0);  // Equiv to sw(rd, MemOperand(at, 0));
+  }
+}
+
+
+void Assembler::swl(Register rd, const MemOperand& rs) {
+  GenInstrImmediate(SWL, rs.rm(), rd, rs.offset_);
+}
+
+
+void Assembler::swr(Register rd, const MemOperand& rs) {
+  GenInstrImmediate(SWR, rs.rm(), rd, rs.offset_);
+}
+
+
+void Assembler::lui(Register rd, int32_t j) {
+  DCHECK(is_uint16(j));
+  GenInstrImmediate(LUI, zero_reg, rd, j);
+}
+
+
+void Assembler::aui(Register rs, Register rt, int32_t j) {
+  // This instruction uses same opcode as 'lui'. The difference in encoding is
+  // 'lui' has zero reg. for rs field.
+  DCHECK(is_uint16(j));
+  GenInstrImmediate(LUI, rs, rt, j);
+}
+
+
+void Assembler::daui(Register rs, Register rt, int32_t j) {
+  DCHECK(is_uint16(j));
+  GenInstrImmediate(DAUI, rs, rt, j);
+}
+
+
+void Assembler::dahi(Register rs, int32_t j) {
+  DCHECK(is_uint16(j));
+  GenInstrImmediate(REGIMM, rs, DAHI, j);
+}
+
+
+void Assembler::dati(Register rs, int32_t j) {
+  DCHECK(is_uint16(j));
+  GenInstrImmediate(REGIMM, rs, DATI, j);
+}
+
+
+void Assembler::ldl(Register rd, const MemOperand& rs) {
+  GenInstrImmediate(LDL, rs.rm(), rd, rs.offset_);
+}
+
+
+void Assembler::ldr(Register rd, const MemOperand& rs) {
+  GenInstrImmediate(LDR, rs.rm(), rd, rs.offset_);
+}
+
+
+void Assembler::sdl(Register rd, const MemOperand& rs) {
+  GenInstrImmediate(SDL, rs.rm(), rd, rs.offset_);
+}
+
+
+void Assembler::sdr(Register rd, const MemOperand& rs) {
+  GenInstrImmediate(SDR, rs.rm(), rd, rs.offset_);
+}
+
+
+void Assembler::ld(Register rd, const MemOperand& rs) {
+  if (is_int16(rs.offset_)) {
+    GenInstrImmediate(LD, rs.rm(), rd, rs.offset_);
+  } else {  // Offset > 16 bits, use multiple instructions to load.
+    LoadRegPlusOffsetToAt(rs);
+    GenInstrImmediate(LD, at, rd, 0);  // Equiv to lw(rd, MemOperand(at, 0));
+  }
+}
+
+
+void Assembler::sd(Register rd, const MemOperand& rs) {
+  if (is_int16(rs.offset_)) {
+    GenInstrImmediate(SD, rs.rm(), rd, rs.offset_);
+  } else {  // Offset > 16 bits, use multiple instructions to store.
+    LoadRegPlusOffsetToAt(rs);
+    GenInstrImmediate(SD, at, rd, 0);  // Equiv to sw(rd, MemOperand(at, 0));
+  }
+}
+
+
+// -------------Misc-instructions--------------
+
+// Break / Trap instructions.
+void Assembler::break_(uint32_t code, bool break_as_stop) {
+  DCHECK((code & ~0xfffff) == 0);
+  // We need to invalidate breaks that could be stops as well because the
+  // simulator expects a char pointer after the stop instruction.
+  // See constants-mips.h for explanation.
+  DCHECK((break_as_stop &&
+          code <= kMaxStopCode &&
+          code > kMaxWatchpointCode) ||
+         (!break_as_stop &&
+          (code > kMaxStopCode ||
+           code <= kMaxWatchpointCode)));
+  Instr break_instr = SPECIAL | BREAK | (code << 6);
+  emit(break_instr);
+}
+
+
+void Assembler::stop(const char* msg, uint32_t code) {
+  DCHECK(code > kMaxWatchpointCode);
+  DCHECK(code <= kMaxStopCode);
+#if defined(V8_HOST_ARCH_MIPS) || defined(V8_HOST_ARCH_MIPS64)
+  break_(0x54321);
+#else  // V8_HOST_ARCH_MIPS
+  BlockTrampolinePoolFor(3);
+  // The Simulator will handle the stop instruction and get the message address.
+  // On MIPS stop() is just a special kind of break_().
+  break_(code, true);
+  emit(reinterpret_cast<uint64_t>(msg));
+#endif
+}
+
+
+void Assembler::tge(Register rs, Register rt, uint16_t code) {
+  DCHECK(is_uint10(code));
+  Instr instr = SPECIAL | TGE | rs.code() << kRsShift
+      | rt.code() << kRtShift | code << 6;
+  emit(instr);
+}
+
+
+void Assembler::tgeu(Register rs, Register rt, uint16_t code) {
+  DCHECK(is_uint10(code));
+  Instr instr = SPECIAL | TGEU | rs.code() << kRsShift
+      | rt.code() << kRtShift | code << 6;
+  emit(instr);
+}
+
+
+void Assembler::tlt(Register rs, Register rt, uint16_t code) {
+  DCHECK(is_uint10(code));
+  Instr instr =
+      SPECIAL | TLT | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
+  emit(instr);
+}
+
+
+void Assembler::tltu(Register rs, Register rt, uint16_t code) {
+  DCHECK(is_uint10(code));
+  Instr instr =
+      SPECIAL | TLTU | rs.code() << kRsShift
+      | rt.code() << kRtShift | code << 6;
+  emit(instr);
+}
+
+
+void Assembler::teq(Register rs, Register rt, uint16_t code) {
+  DCHECK(is_uint10(code));
+  Instr instr =
+      SPECIAL | TEQ | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
+  emit(instr);
+}
+
+
+void Assembler::tne(Register rs, Register rt, uint16_t code) {
+  DCHECK(is_uint10(code));
+  Instr instr =
+      SPECIAL | TNE | rs.code() << kRsShift | rt.code() << kRtShift | code << 6;
+  emit(instr);
+}
+
+
+// Move from HI/LO register.
+
+void Assembler::mfhi(Register rd) {
+  GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFHI);
+}
+
+
+void Assembler::mflo(Register rd) {
+  GenInstrRegister(SPECIAL, zero_reg, zero_reg, rd, 0, MFLO);
+}
+
+
+// Set on less than instructions.
+void Assembler::slt(Register rd, Register rs, Register rt) {
+  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLT);
+}
+
+
+void Assembler::sltu(Register rd, Register rs, Register rt) {
+  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SLTU);
+}
+
+
+void Assembler::slti(Register rt, Register rs, int32_t j) {
+  GenInstrImmediate(SLTI, rs, rt, j);
+}
+
+
+void Assembler::sltiu(Register rt, Register rs, int32_t j) {
+  GenInstrImmediate(SLTIU, rs, rt, j);
+}
+
+
+// Conditional move.
+void Assembler::movz(Register rd, Register rs, Register rt) {
+  GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVZ);
+}
+
+
+void Assembler::movn(Register rd, Register rs, Register rt) {
+  GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVN);
+}
+
+
+void Assembler::movt(Register rd, Register rs, uint16_t cc) {
+  Register rt;
+  rt.code_ = (cc & 0x0007) << 2 | 1;
+  GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
+}
+
+
+void Assembler::movf(Register rd, Register rs, uint16_t cc) {
+  Register rt;
+  rt.code_ = (cc & 0x0007) << 2 | 0;
+  GenInstrRegister(SPECIAL, rs, rt, rd, 0, MOVCI);
+}
+
+
+void Assembler::sel(SecondaryField fmt, FPURegister fd,
+    FPURegister ft, FPURegister fs, uint8_t sel) {
+  DCHECK(kArchVariant == kMips64r6);
+  DCHECK(fmt == D);
+  DCHECK(fmt == S);
+
+  Instr instr = COP1 | fmt << kRsShift | ft.code() << kFtShift |
+      fs.code() << kFsShift | fd.code() << kFdShift | SEL;
+  emit(instr);
+}
+
+
+// GPR.
+void Assembler::seleqz(Register rs, Register rt, Register rd) {
+  DCHECK(kArchVariant == kMips64r6);
+  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELEQZ_S);
+}
+
+
+// FPR.
+void Assembler::seleqz(SecondaryField fmt, FPURegister fd,
+    FPURegister ft, FPURegister fs) {
+  DCHECK(kArchVariant == kMips64r6);
+  DCHECK(fmt == D);
+  DCHECK(fmt == S);
+
+  Instr instr = COP1 | fmt << kRsShift | ft.code() << kFtShift |
+      fs.code() << kFsShift | fd.code() << kFdShift | SELEQZ_C;
+  emit(instr);
+}
+
+
+// GPR.
+void Assembler::selnez(Register rs, Register rt, Register rd) {
+  DCHECK(kArchVariant == kMips64r6);
+  GenInstrRegister(SPECIAL, rs, rt, rd, 0, SELNEZ_S);
+}
+
+
+// FPR.
+void Assembler::selnez(SecondaryField fmt, FPURegister fd,
+    FPURegister ft, FPURegister fs) {
+  DCHECK(kArchVariant == kMips64r6);
+  DCHECK(fmt == D);
+  DCHECK(fmt == S);
+
+  Instr instr = COP1 | fmt << kRsShift | ft.code() << kFtShift |
+      fs.code() << kFsShift | fd.code() << kFdShift | SELNEZ_C;
+  emit(instr);
+}
+
+
+// Bit twiddling.
+void Assembler::clz(Register rd, Register rs) {
+  if (kArchVariant != kMips64r6) {
+    // Clz instr requires same GPR number in 'rd' and 'rt' fields.
+    GenInstrRegister(SPECIAL2, rs, rd, rd, 0, CLZ);
+  } else {
+    GenInstrRegister(SPECIAL, rs, zero_reg, rd, 1, CLZ_R6);
+  }
+}
+
+
+void Assembler::ins_(Register rt, Register rs, uint16_t pos, uint16_t size) {
+  // Should be called via MacroAssembler::Ins.
+  // Ins instr has 'rt' field as dest, and two uint5: msb, lsb.
+  DCHECK((kArchVariant == kMips64r2) || (kArchVariant == kMips64r6));
+  GenInstrRegister(SPECIAL3, rs, rt, pos + size - 1, pos, INS);
+}
+
+
+void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
+  // Should be called via MacroAssembler::Ext.
+  // Ext instr has 'rt' field as dest, and two uint5: msb, lsb.
+  DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
+  GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, EXT);
+}
+
+
+void Assembler::pref(int32_t hint, const MemOperand& rs) {
+  DCHECK(is_uint5(hint) && is_uint16(rs.offset_));
+  Instr instr = PREF | (rs.rm().code() << kRsShift) | (hint << kRtShift)
+      | (rs.offset_);
+  emit(instr);
+}
+
+
+// --------Coprocessor-instructions----------------
+
+// Load, store, move.
+void Assembler::lwc1(FPURegister fd, const MemOperand& src) {
+  GenInstrImmediate(LWC1, src.rm(), fd, src.offset_);
+}
+
+
+void Assembler::ldc1(FPURegister fd, const MemOperand& src) {
+  GenInstrImmediate(LDC1, src.rm(), fd, src.offset_);
+}
+
+
+void Assembler::swc1(FPURegister fd, const MemOperand& src) {
+  GenInstrImmediate(SWC1, src.rm(), fd, src.offset_);
+}
+
+
+void Assembler::sdc1(FPURegister fd, const MemOperand& src) {
+  GenInstrImmediate(SDC1, src.rm(), fd, src.offset_);
+}
+
+
+void Assembler::mtc1(Register rt, FPURegister fs) {
+  GenInstrRegister(COP1, MTC1, rt, fs, f0);
+}
+
+
+void Assembler::mthc1(Register rt, FPURegister fs) {
+  GenInstrRegister(COP1, MTHC1, rt, fs, f0);
+}
+
+
+void Assembler::dmtc1(Register rt, FPURegister fs) {
+  GenInstrRegister(COP1, DMTC1, rt, fs, f0);
+}
+
+
+void Assembler::mfc1(Register rt, FPURegister fs) {
+  GenInstrRegister(COP1, MFC1, rt, fs, f0);
+}
+
+
+void Assembler::mfhc1(Register rt, FPURegister fs) {
+  GenInstrRegister(COP1, MFHC1, rt, fs, f0);
+}
+
+
+void Assembler::dmfc1(Register rt, FPURegister fs) {
+  GenInstrRegister(COP1, DMFC1, rt, fs, f0);
+}
+
+
+void Assembler::ctc1(Register rt, FPUControlRegister fs) {
+  GenInstrRegister(COP1, CTC1, rt, fs);
+}
+
+
+void Assembler::cfc1(Register rt, FPUControlRegister fs) {
+  GenInstrRegister(COP1, CFC1, rt, fs);
+}
+
+
+void Assembler::DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
+  uint64_t i;
+  memcpy(&i, &d, 8);
+
+  *lo = i & 0xffffffff;
+  *hi = i >> 32;
+}
+
+
+// Arithmetic.
+
+void Assembler::add_d(FPURegister fd, FPURegister fs, FPURegister ft) {
+  GenInstrRegister(COP1, D, ft, fs, fd, ADD_D);
+}
+
+
+void Assembler::sub_d(FPURegister fd, FPURegister fs, FPURegister ft) {
+  GenInstrRegister(COP1, D, ft, fs, fd, SUB_D);
+}
+
+
+void Assembler::mul_d(FPURegister fd, FPURegister fs, FPURegister ft) {
+  GenInstrRegister(COP1, D, ft, fs, fd, MUL_D);
+}
+
+
+void Assembler::madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
+    FPURegister ft) {
+  GenInstrRegister(COP1X, fr, ft, fs, fd, MADD_D);
+}
+
+
+void Assembler::div_d(FPURegister fd, FPURegister fs, FPURegister ft) {
+  GenInstrRegister(COP1, D, ft, fs, fd, DIV_D);
+}
+
+
+void Assembler::abs_d(FPURegister fd, FPURegister fs) {
+  GenInstrRegister(COP1, D, f0, fs, fd, ABS_D);
+}
+
+
+void Assembler::mov_d(FPURegister fd, FPURegister fs) {
+  GenInstrRegister(COP1, D, f0, fs, fd, MOV_D);
+}
+
+
+void Assembler::neg_d(FPURegister fd, FPURegister fs) {
+  GenInstrRegister(COP1, D, f0, fs, fd, NEG_D);
+}
+
+
+void Assembler::sqrt_d(FPURegister fd, FPURegister fs) {
+  GenInstrRegister(COP1, D, f0, fs, fd, SQRT_D);
+}
+
+
+// Conversions.
+
+void Assembler::cvt_w_s(FPURegister fd, FPURegister fs) {
+  GenInstrRegister(COP1, S, f0, fs, fd, CVT_W_S);
+}
+
+
+void Assembler::cvt_w_d(FPURegister fd, FPURegister fs) {
+  GenInstrRegister(COP1, D, f0, fs, fd, CVT_W_D);
+}
+
+
+void Assembler::trunc_w_s(FPURegister fd, FPURegister fs) {
+  GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_W_S);
+}
+
+
+void Assembler::trunc_w_d(FPURegister fd, FPURegister fs) {
+  GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_W_D);
+}
+
+
+void Assembler::round_w_s(FPURegister fd, FPURegister fs) {
+  GenInstrRegister(COP1, S, f0, fs, fd, ROUND_W_S);
+}
+
+
+void Assembler::round_w_d(FPURegister fd, FPURegister fs) {
+  GenInstrRegister(COP1, D, f0, fs, fd, ROUND_W_D);
+}
+
+
+void Assembler::floor_w_s(FPURegister fd, FPURegister fs) {
+  GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_W_S);
+}
+
+
+void Assembler::floor_w_d(FPURegister fd, FPURegister fs) {
+  GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_W_D);
+}
+
+
+void Assembler::ceil_w_s(FPURegister fd, FPURegister fs) {
+  GenInstrRegister(COP1, S, f0, fs, fd, CEIL_W_S);
+}
+
+
+void Assembler::ceil_w_d(FPURegister fd, FPURegister fs) {
+  GenInstrRegister(COP1, D, f0, fs, fd, CEIL_W_D);
+}
+
+
+void Assembler::cvt_l_s(FPURegister fd, FPURegister fs) {
+  DCHECK(kArchVariant == kMips64r2);
+  GenInstrRegister(COP1, S, f0, fs, fd, CVT_L_S);
+}
+
+
+void Assembler::cvt_l_d(FPURegister fd, FPURegister fs) {
+  DCHECK(kArchVariant == kMips64r2);
+  GenInstrRegister(COP1, D, f0, fs, fd, CVT_L_D);
+}
+
+
+void Assembler::trunc_l_s(FPURegister fd, FPURegister fs) {
+  DCHECK(kArchVariant == kMips64r2);
+  GenInstrRegister(COP1, S, f0, fs, fd, TRUNC_L_S);
+}
+
+
+void Assembler::trunc_l_d(FPURegister fd, FPURegister fs) {
+  DCHECK(kArchVariant == kMips64r2);
+  GenInstrRegister(COP1, D, f0, fs, fd, TRUNC_L_D);
+}
+
+
+void Assembler::round_l_s(FPURegister fd, FPURegister fs) {
+  GenInstrRegister(COP1, S, f0, fs, fd, ROUND_L_S);
+}
+
+
+void Assembler::round_l_d(FPURegister fd, FPURegister fs) {
+  GenInstrRegister(COP1, D, f0, fs, fd, ROUND_L_D);
+}
+
+
+void Assembler::floor_l_s(FPURegister fd, FPURegister fs) {
+  GenInstrRegister(COP1, S, f0, fs, fd, FLOOR_L_S);
+}
+
+
+void Assembler::floor_l_d(FPURegister fd, FPURegister fs) {
+  GenInstrRegister(COP1, D, f0, fs, fd, FLOOR_L_D);
+}
+
+
+void Assembler::ceil_l_s(FPURegister fd, FPURegister fs) {
+  GenInstrRegister(COP1, S, f0, fs, fd, CEIL_L_S);
+}
+
+
+void Assembler::ceil_l_d(FPURegister fd, FPURegister fs) {
+  GenInstrRegister(COP1, D, f0, fs, fd, CEIL_L_D);
+}
+
+
+void Assembler::min(SecondaryField fmt, FPURegister fd, FPURegister ft,
+    FPURegister fs) {
+  DCHECK(kArchVariant == kMips64r6);
+  DCHECK((fmt == D) || (fmt == S));
+  GenInstrRegister(COP1, fmt, ft, fs, fd, MIN);
+}
+
+
+void Assembler::mina(SecondaryField fmt, FPURegister fd, FPURegister ft,
+    FPURegister fs) {
+  DCHECK(kArchVariant == kMips64r6);
+  DCHECK((fmt == D) || (fmt == S));
+  GenInstrRegister(COP1, fmt, ft, fs, fd, MINA);
+}
+
+
+void Assembler::max(SecondaryField fmt, FPURegister fd, FPURegister ft,
+    FPURegister fs) {
+  DCHECK(kArchVariant == kMips64r6);
+  DCHECK((fmt == D) || (fmt == S));
+  GenInstrRegister(COP1, fmt, ft, fs, fd, MAX);
+}
+
+
+void Assembler::maxa(SecondaryField fmt, FPURegister fd, FPURegister ft,
+    FPURegister fs) {
+  DCHECK(kArchVariant == kMips64r6);
+  DCHECK((fmt == D) || (fmt == S));
+  GenInstrRegister(COP1, fmt, ft, fs, fd, MAXA);
+}
+
+
+void Assembler::cvt_s_w(FPURegister fd, FPURegister fs) {
+  GenInstrRegister(COP1, W, f0, fs, fd, CVT_S_W);
+}
+
+
+void Assembler::cvt_s_l(FPURegister fd, FPURegister fs) {
+  DCHECK(kArchVariant == kMips64r2);
+  GenInstrRegister(COP1, L, f0, fs, fd, CVT_S_L);
+}
+
+
+void Assembler::cvt_s_d(FPURegister fd, FPURegister fs) {
+  GenInstrRegister(COP1, D, f0, fs, fd, CVT_S_D);
+}
+
+
+void Assembler::cvt_d_w(FPURegister fd, FPURegister fs) {
+  GenInstrRegister(COP1, W, f0, fs, fd, CVT_D_W);
+}
+
+
+void Assembler::cvt_d_l(FPURegister fd, FPURegister fs) {
+  DCHECK(kArchVariant == kMips64r2);
+  GenInstrRegister(COP1, L, f0, fs, fd, CVT_D_L);
+}
+
+
+void Assembler::cvt_d_s(FPURegister fd, FPURegister fs) {
+  GenInstrRegister(COP1, S, f0, fs, fd, CVT_D_S);
+}
+
+
+// Conditions for >= MIPSr6.
+void Assembler::cmp(FPUCondition cond, SecondaryField fmt,
+    FPURegister fd, FPURegister fs, FPURegister ft) {
+  DCHECK(kArchVariant == kMips64r6);
+  DCHECK((fmt & ~(31 << kRsShift)) == 0);
+  Instr instr = COP1 | fmt | ft.code() << kFtShift |
+      fs.code() << kFsShift | fd.code() << kFdShift | (0 << 5) | cond;
+  emit(instr);
+}
+
+
+void Assembler::bc1eqz(int16_t offset, FPURegister ft) {
+  DCHECK(kArchVariant == kMips64r6);
+  Instr instr = COP1 | BC1EQZ | ft.code() << kFtShift | (offset & kImm16Mask);
+  emit(instr);
+}
+
+
+void Assembler::bc1nez(int16_t offset, FPURegister ft) {
+  DCHECK(kArchVariant == kMips64r6);
+  Instr instr = COP1 | BC1NEZ | ft.code() << kFtShift | (offset & kImm16Mask);
+  emit(instr);
+}
+
+
+// Conditions for < MIPSr6.
+void Assembler::c(FPUCondition cond, SecondaryField fmt,
+    FPURegister fs, FPURegister ft, uint16_t cc) {
+  DCHECK(kArchVariant != kMips64r6);
+  DCHECK(is_uint3(cc));
+  DCHECK((fmt & ~(31 << kRsShift)) == 0);
+  Instr instr = COP1 | fmt | ft.code() << kFtShift | fs.code() << kFsShift
+      | cc << 8 | 3 << 4 | cond;
+  emit(instr);
+}
+
+
+void Assembler::fcmp(FPURegister src1, const double src2,
+      FPUCondition cond) {
+  DCHECK(src2 == 0.0);
+  mtc1(zero_reg, f14);
+  cvt_d_w(f14, f14);
+  c(cond, D, src1, f14, 0);
+}
+
+
+void Assembler::bc1f(int16_t offset, uint16_t cc) {
+  DCHECK(is_uint3(cc));
+  Instr instr = COP1 | BC1 | cc << 18 | 0 << 16 | (offset & kImm16Mask);
+  emit(instr);
+}
+
+
+void Assembler::bc1t(int16_t offset, uint16_t cc) {
+  DCHECK(is_uint3(cc));
+  Instr instr = COP1 | BC1 | cc << 18 | 1 << 16 | (offset & kImm16Mask);
+  emit(instr);
+}
+
+
+// Debugging.
+void Assembler::RecordJSReturn() {
+  positions_recorder()->WriteRecordedPositions();
+  CheckBuffer();
+  RecordRelocInfo(RelocInfo::JS_RETURN);
+}
+
+
+void Assembler::RecordDebugBreakSlot() {
+  positions_recorder()->WriteRecordedPositions();
+  CheckBuffer();
+  RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
+}
+
+
+void Assembler::RecordComment(const char* msg) {
+  if (FLAG_code_comments) {
+    CheckBuffer();
+    RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
+  }
+}
+
+
+int Assembler::RelocateInternalReference(byte* pc, intptr_t pc_delta) {
+  Instr instr = instr_at(pc);
+  DCHECK(IsJ(instr) || IsLui(instr));
+  if (IsLui(instr)) {
+    Instr instr_lui = instr_at(pc + 0 * Assembler::kInstrSize);
+    Instr instr_ori = instr_at(pc + 1 * Assembler::kInstrSize);
+    Instr instr_ori2 = instr_at(pc + 3 * Assembler::kInstrSize);
+    DCHECK(IsOri(instr_ori));
+    DCHECK(IsOri(instr_ori2));
+    // TODO(plind): symbolic names for the shifts.
+    int64_t imm = (instr_lui & static_cast<int64_t>(kImm16Mask)) << 48;
+    imm |= (instr_ori & static_cast<int64_t>(kImm16Mask)) << 32;
+    imm |= (instr_ori2 & static_cast<int64_t>(kImm16Mask)) << 16;
+    // Sign extend address.
+    imm >>= 16;
+
+    if (imm == kEndOfJumpChain) {
+      return 0;  // Number of instructions patched.
+    }
+    imm += pc_delta;
+    DCHECK((imm & 3) == 0);
+
+    instr_lui &= ~kImm16Mask;
+    instr_ori &= ~kImm16Mask;
+    instr_ori2 &= ~kImm16Mask;
+
+    instr_at_put(pc + 0 * Assembler::kInstrSize,
+                 instr_lui | ((imm >> 32) & kImm16Mask));
+    instr_at_put(pc + 1 * Assembler::kInstrSize,
+                 instr_ori | (imm >> 16 & kImm16Mask));
+    instr_at_put(pc + 3 * Assembler::kInstrSize,
+                 instr_ori2 | (imm & kImm16Mask));
+    return 4;  // Number of instructions patched.
+  } else {
+    uint32_t imm28 = (instr & static_cast<int32_t>(kImm26Mask)) << 2;
+    if (static_cast<int32_t>(imm28) == kEndOfJumpChain) {
+      return 0;  // Number of instructions patched.
+    }
+
+    imm28 += pc_delta;
+    imm28 &= kImm28Mask;
+    DCHECK((imm28 & 3) == 0);
+
+    instr &= ~kImm26Mask;
+    uint32_t imm26 = imm28 >> 2;
+    DCHECK(is_uint26(imm26));
+
+    instr_at_put(pc, instr | (imm26 & kImm26Mask));
+    return 1;  // Number of instructions patched.
+  }
+}
+
+
+void Assembler::GrowBuffer() {
+  if (!own_buffer_) FATAL("external code buffer is too small");
+
+  // Compute new buffer size.
+  CodeDesc desc;  // The new buffer.
+  if (buffer_size_ < 1 * MB) {
+    desc.buffer_size = 2*buffer_size_;
+  } else {
+    desc.buffer_size = buffer_size_ + 1*MB;
+  }
+  CHECK_GT(desc.buffer_size, 0);  // No overflow.
+
+  // Set up new buffer.
+  desc.buffer = NewArray<byte>(desc.buffer_size);
+
+  desc.instr_size = pc_offset();
+  desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
+
+  // Copy the data.
+  intptr_t pc_delta = desc.buffer - buffer_;
+  intptr_t rc_delta = (desc.buffer + desc.buffer_size) -
+      (buffer_ + buffer_size_);
+  MemMove(desc.buffer, buffer_, desc.instr_size);
+  MemMove(reloc_info_writer.pos() + rc_delta,
+              reloc_info_writer.pos(), desc.reloc_size);
+
+  // Switch buffers.
+  DeleteArray(buffer_);
+  buffer_ = desc.buffer;
+  buffer_size_ = desc.buffer_size;
+  pc_ += pc_delta;
+  reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
+                               reloc_info_writer.last_pc() + pc_delta);
+
+  // Relocate runtime entries.
+  for (RelocIterator it(desc); !it.done(); it.next()) {
+    RelocInfo::Mode rmode = it.rinfo()->rmode();
+    if (rmode == RelocInfo::INTERNAL_REFERENCE) {
+      byte* p = reinterpret_cast<byte*>(it.rinfo()->pc());
+      RelocateInternalReference(p, pc_delta);
+    }
+  }
+
+  DCHECK(!overflow());
+}
+
+
+void Assembler::db(uint8_t data) {
+  CheckBuffer();
+  *reinterpret_cast<uint8_t*>(pc_) = data;
+  pc_ += sizeof(uint8_t);
+}
+
+
+void Assembler::dd(uint32_t data) {
+  CheckBuffer();
+  *reinterpret_cast<uint32_t*>(pc_) = data;
+  pc_ += sizeof(uint32_t);
+}
+
+
+void Assembler::emit_code_stub_address(Code* stub) {
+  CheckBuffer();
+  *reinterpret_cast<uint64_t*>(pc_) =
+      reinterpret_cast<uint64_t>(stub->instruction_start());
+  pc_ += sizeof(uint64_t);
+}
+
+
+void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
+  // We do not try to reuse pool constants.
+  RelocInfo rinfo(pc_, rmode, data, NULL);
+  if (rmode >= RelocInfo::JS_RETURN && rmode <= RelocInfo::DEBUG_BREAK_SLOT) {
+    // Adjust code for new modes.
+    DCHECK(RelocInfo::IsDebugBreakSlot(rmode)
+           || RelocInfo::IsJSReturn(rmode)
+           || RelocInfo::IsComment(rmode)
+           || RelocInfo::IsPosition(rmode));
+    // These modes do not need an entry in the constant pool.
+  }
+  if (!RelocInfo::IsNone(rinfo.rmode())) {
+    // Don't record external references unless the heap will be serialized.
+    if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
+        !serializer_enabled() && !emit_debug_code()) {
+      return;
+    }
+    DCHECK(buffer_space() >= kMaxRelocSize);  // Too late to grow buffer here.
+    if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
+      RelocInfo reloc_info_with_ast_id(pc_,
+                                       rmode,
+                                       RecordedAstId().ToInt(),
+                                       NULL);
+      ClearRecordedAstId();
+      reloc_info_writer.Write(&reloc_info_with_ast_id);
+    } else {
+      reloc_info_writer.Write(&rinfo);
+    }
+  }
+}
+
+
+void Assembler::BlockTrampolinePoolFor(int instructions) {
+  BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
+}
+
+
+void Assembler::CheckTrampolinePool() {
+  // Some small sequences of instructions must not be broken up by the
+  // insertion of a trampoline pool; such sequences are protected by setting
+  // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
+  // which are both checked here. Also, recursive calls to CheckTrampolinePool
+  // are blocked by trampoline_pool_blocked_nesting_.
+  if ((trampoline_pool_blocked_nesting_ > 0) ||
+      (pc_offset() < no_trampoline_pool_before_)) {
+    // Emission is currently blocked; make sure we try again as soon as
+    // possible.
+    if (trampoline_pool_blocked_nesting_ > 0) {
+      next_buffer_check_ = pc_offset() + kInstrSize;
+    } else {
+      next_buffer_check_ = no_trampoline_pool_before_;
+    }
+    return;
+  }
+
+  DCHECK(!trampoline_emitted_);
+  DCHECK(unbound_labels_count_ >= 0);
+  if (unbound_labels_count_ > 0) {
+    // First we emit jump (2 instructions), then we emit trampoline pool.
+    { BlockTrampolinePoolScope block_trampoline_pool(this);
+      Label after_pool;
+      b(&after_pool);
+      nop();
+
+      int pool_start = pc_offset();
+      for (int i = 0; i < unbound_labels_count_; i++) {
+        uint64_t imm64;
+        imm64 = jump_address(&after_pool);
+        { BlockGrowBufferScope block_buf_growth(this);
+          // Buffer growth (and relocation) must be blocked for internal
+          // references until associated instructions are emitted and available
+          // to be patched.
+          RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
+          // TODO(plind): Verify this, presume I cannot use macro-assembler
+          // here.
+          lui(at, (imm64 >> 32) & kImm16Mask);
+          ori(at, at, (imm64 >> 16) & kImm16Mask);
+          dsll(at, at, 16);
+          ori(at, at, imm64 & kImm16Mask);
+        }
+        jr(at);
+        nop();
+      }
+      bind(&after_pool);
+      trampoline_ = Trampoline(pool_start, unbound_labels_count_);
+
+      trampoline_emitted_ = true;
+      // As we are only going to emit trampoline once, we need to prevent any
+      // further emission.
+      next_buffer_check_ = kMaxInt;
+    }
+  } else {
+    // Number of branches to unbound label at this point is zero, so we can
+    // move next buffer check to maximum.
+    next_buffer_check_ = pc_offset() +
+        kMaxBranchOffset - kTrampolineSlotsSize * 16;
+  }
+  return;
+}
+
+
+Address Assembler::target_address_at(Address pc) {
+  Instr instr0 = instr_at(pc);
+  Instr instr1 = instr_at(pc + 1 * kInstrSize);
+  Instr instr3 = instr_at(pc + 3 * kInstrSize);
+
+  // Interpret 4 instructions for address generated by li: See listing in
+  // Assembler::set_target_address_at() just below.
+  if ((GetOpcodeField(instr0) == LUI) && (GetOpcodeField(instr1) == ORI) &&
+      (GetOpcodeField(instr3) == ORI)) {
+    // Assemble the 48 bit value.
+     int64_t addr  = static_cast<int64_t>(
+          ((uint64_t)(GetImmediate16(instr0)) << 32) |
+          ((uint64_t)(GetImmediate16(instr1)) << 16) |
+          ((uint64_t)(GetImmediate16(instr3))));
+
+    // Sign extend to get canonical address.
+    addr = (addr << 16) >> 16;
+    return reinterpret_cast<Address>(addr);
+  }
+  // We should never get here, force a bad address if we do.
+  UNREACHABLE();
+  return (Address)0x0;
+}
+
+
+// MIPS and ia32 use opposite encoding for qNaN and sNaN, such that ia32
+// qNaN is a MIPS sNaN, and ia32 sNaN is MIPS qNaN. If running from a heap
+// snapshot generated on ia32, the resulting MIPS sNaN must be quieted.
+// OS::nan_value() returns a qNaN.
+void Assembler::QuietNaN(HeapObject* object) {
+  HeapNumber::cast(object)->set_value(base::OS::nan_value());
+}
+
+
+// On Mips64, a target address is stored in a 4-instruction sequence:
+//    0: lui(rd, (j.imm64_ >> 32) & kImm16Mask);
+//    1: ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
+//    2: dsll(rd, rd, 16);
+//    3: ori(rd, rd, j.imm32_ & kImm16Mask);
+//
+// Patching the address must replace all the lui & ori instructions,
+// and flush the i-cache.
+//
+// There is an optimization below, which emits a nop when the address
+// fits in just 16 bits. This is unlikely to help, and should be benchmarked,
+// and possibly removed.
+void Assembler::set_target_address_at(Address pc,
+                                      Address target,
+                                      ICacheFlushMode icache_flush_mode) {
+// There is an optimization where only 4 instructions are used to load address
+// in code on MIP64 because only 48-bits of address is effectively used.
+// It relies on fact the upper [63:48] bits are not used for virtual address
+// translation and they have to be set according to value of bit 47 in order
+// get canonical address.
+  Instr instr1 = instr_at(pc + kInstrSize);
+  uint32_t rt_code = GetRt(instr1);
+  uint32_t* p = reinterpret_cast<uint32_t*>(pc);
+  uint64_t itarget = reinterpret_cast<uint64_t>(target);
+
+#ifdef DEBUG
+  // Check we have the result from a li macro-instruction.
+  Instr instr0 = instr_at(pc);
+  Instr instr3 = instr_at(pc + kInstrSize * 3);
+  CHECK((GetOpcodeField(instr0) == LUI && GetOpcodeField(instr1) == ORI &&
+         GetOpcodeField(instr3) == ORI));
+#endif
+
+  // Must use 4 instructions to insure patchable code.
+  // lui rt, upper-16.
+  // ori rt, rt, lower-16.
+  // dsll rt, rt, 16.
+  // ori rt rt, lower-16.
+  *p = LUI | (rt_code << kRtShift) | ((itarget >> 32) & kImm16Mask);
+  *(p + 1) = ORI | (rt_code << kRtShift) | (rt_code << kRsShift)
+      | ((itarget >> 16) & kImm16Mask);
+  *(p + 3) = ORI | (rt_code << kRsShift) | (rt_code << kRtShift)
+      | (itarget & kImm16Mask);
+
+  if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+    CpuFeatures::FlushICache(pc, 4 * Assembler::kInstrSize);
+  }
+}
+
+
+void Assembler::JumpLabelToJumpRegister(Address pc) {
+  // Address pc points to lui/ori instructions.
+  // Jump to label may follow at pc + 2 * kInstrSize.
+  uint32_t* p = reinterpret_cast<uint32_t*>(pc);
+#ifdef DEBUG
+  Instr instr1 = instr_at(pc);
+#endif
+  Instr instr2 = instr_at(pc + 1 * kInstrSize);
+  Instr instr3 = instr_at(pc + 6 * kInstrSize);
+  bool patched = false;
+
+  if (IsJal(instr3)) {
+    DCHECK(GetOpcodeField(instr1) == LUI);
+    DCHECK(GetOpcodeField(instr2) == ORI);
+
+    uint32_t rs_field = GetRt(instr2) << kRsShift;
+    uint32_t rd_field = ra.code() << kRdShift;  // Return-address (ra) reg.
+    *(p+6) = SPECIAL | rs_field | rd_field | JALR;
+    patched = true;
+  } else if (IsJ(instr3)) {
+    DCHECK(GetOpcodeField(instr1) == LUI);
+    DCHECK(GetOpcodeField(instr2) == ORI);
+
+    uint32_t rs_field = GetRt(instr2) << kRsShift;
+    *(p+6) = SPECIAL | rs_field | JR;
+    patched = true;
+  }
+
+  if (patched) {
+      CpuFeatures::FlushICache(pc+6, sizeof(int32_t));
+  }
+}
+
+
+Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
+  // No out-of-line constant pool support.
+  DCHECK(!FLAG_enable_ool_constant_pool);
+  return isolate->factory()->empty_constant_pool_array();
+}
+
+
+void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
+  // No out-of-line constant pool support.
+  DCHECK(!FLAG_enable_ool_constant_pool);
+  return;
+}
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_MIPS64
diff --git a/src/mips64/assembler-mips64.h b/src/mips64/assembler-mips64.h
new file mode 100644
index 0000000..5c754f4
--- /dev/null
+++ b/src/mips64/assembler-mips64.h
@@ -0,0 +1,1416 @@
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
+// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
+// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
+// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2012 the V8 project authors. All rights reserved.
+
+
+#ifndef V8_MIPS_ASSEMBLER_MIPS_H_
+#define V8_MIPS_ASSEMBLER_MIPS_H_
+
+#include <stdio.h>
+#include "src/assembler.h"
+#include "src/mips64/constants-mips64.h"
+#include "src/serialize.h"
+
+namespace v8 {
+namespace internal {
+
+// CPU Registers.
+//
+// 1) We would prefer to use an enum, but enum values are assignment-
+// compatible with int, which has caused code-generation bugs.
+//
+// 2) We would prefer to use a class instead of a struct but we don't like
+// the register initialization to depend on the particular initialization
+// order (which appears to be different on OS X, Linux, and Windows for the
+// installed versions of C++ we tried). Using a struct permits C-style
+// "initialization". Also, the Register objects cannot be const as this
+// forces initialization stubs in MSVC, making us dependent on initialization
+// order.
+//
+// 3) By not using an enum, we are possibly preventing the compiler from
+// doing certain constant folds, which may significantly reduce the
+// code generated for some assembly instructions (because they boil down
+// to a few constants). If this is a problem, we could change the code
+// such that we use an enum in optimized mode, and the struct in debug
+// mode. This way we get the compile-time error checking in debug mode
+// and best performance in optimized code.
+
+
+// -----------------------------------------------------------------------------
+// Implementation of Register and FPURegister.
+
+// Core register.
+struct Register {
+  static const int kNumRegisters = v8::internal::kNumRegisters;
+  static const int kMaxNumAllocatableRegisters = 14;  // v0 through t6 and cp.
+  static const int kSizeInBytes = 8;
+  static const int kCpRegister = 23;  // cp (s7) is the 23rd register.
+
+  inline static int NumAllocatableRegisters();
+
+  static int ToAllocationIndex(Register reg) {
+    DCHECK((reg.code() - 2) < (kMaxNumAllocatableRegisters - 1) ||
+           reg.is(from_code(kCpRegister)));
+    return reg.is(from_code(kCpRegister)) ?
+           kMaxNumAllocatableRegisters - 1 :  // Return last index for 'cp'.
+           reg.code() - 2;  // zero_reg and 'at' are skipped.
+  }
+
+  static Register FromAllocationIndex(int index) {
+    DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
+    return index == kMaxNumAllocatableRegisters - 1 ?
+           from_code(kCpRegister) :  // Last index is always the 'cp' register.
+           from_code(index + 2);  // zero_reg and 'at' are skipped.
+  }
+
+  static const char* AllocationIndexToString(int index) {
+    DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
+    const char* const names[] = {
+      "v0",
+      "v1",
+      "a0",
+      "a1",
+      "a2",
+      "a3",
+      "a4",
+      "a5",
+      "a6",
+      "a7",
+      "t0",
+      "t1",
+      "t2",
+      "s7",
+    };
+    return names[index];
+  }
+
+  static Register from_code(int code) {
+    Register r = { code };
+    return r;
+  }
+
+  bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
+  bool is(Register reg) const { return code_ == reg.code_; }
+  int code() const {
+    DCHECK(is_valid());
+    return code_;
+  }
+  int bit() const {
+    DCHECK(is_valid());
+    return 1 << code_;
+  }
+
+  // Unfortunately we can't make this private in a struct.
+  int code_;
+};
+
+#define REGISTER(N, C) \
+  const int kRegister_ ## N ## _Code = C; \
+  const Register N = { C }
+
+REGISTER(no_reg, -1);
+// Always zero.
+REGISTER(zero_reg, 0);
+// at: Reserved for synthetic instructions.
+REGISTER(at, 1);
+// v0, v1: Used when returning multiple values from subroutines.
+REGISTER(v0, 2);
+REGISTER(v1, 3);
+// a0 - a4: Used to pass non-FP parameters.
+REGISTER(a0, 4);
+REGISTER(a1, 5);
+REGISTER(a2, 6);
+REGISTER(a3, 7);
+// a4 - a7 t0 - t3: Can be used without reservation, act as temporary registers
+// and are allowed to be destroyed by subroutines.
+REGISTER(a4, 8);
+REGISTER(a5, 9);
+REGISTER(a6, 10);
+REGISTER(a7, 11);
+REGISTER(t0, 12);
+REGISTER(t1, 13);
+REGISTER(t2, 14);
+REGISTER(t3, 15);
+// s0 - s7: Subroutine register variables. Subroutines that write to these
+// registers must restore their values before exiting so that the caller can
+// expect the values to be preserved.
+REGISTER(s0, 16);
+REGISTER(s1, 17);
+REGISTER(s2, 18);
+REGISTER(s3, 19);
+REGISTER(s4, 20);
+REGISTER(s5, 21);
+REGISTER(s6, 22);
+REGISTER(s7, 23);
+REGISTER(t8, 24);
+REGISTER(t9, 25);
+// k0, k1: Reserved for system calls and interrupt handlers.
+REGISTER(k0, 26);
+REGISTER(k1, 27);
+// gp: Reserved.
+REGISTER(gp, 28);
+// sp: Stack pointer.
+REGISTER(sp, 29);
+// fp: Frame pointer.
+REGISTER(fp, 30);
+// ra: Return address pointer.
+REGISTER(ra, 31);
+
+#undef REGISTER
+
+
+int ToNumber(Register reg);
+
+Register ToRegister(int num);
+
+// Coprocessor register.
+struct FPURegister {
+  static const int kMaxNumRegisters = v8::internal::kNumFPURegisters;
+
+  // TODO(plind): Warning, inconsistent numbering here. kNumFPURegisters refers
+  // to number of 32-bit FPU regs, but kNumAllocatableRegisters refers to
+  // number of Double regs (64-bit regs, or FPU-reg-pairs).
+
+  // A few double registers are reserved: one as a scratch register and one to
+  // hold 0.0.
+  //  f28: 0.0
+  //  f30: scratch register.
+  static const int kNumReservedRegisters = 2;
+  static const int kMaxNumAllocatableRegisters = kMaxNumRegisters / 2 -
+      kNumReservedRegisters;
+
+  inline static int NumRegisters();
+  inline static int NumAllocatableRegisters();
+  inline static int ToAllocationIndex(FPURegister reg);
+  static const char* AllocationIndexToString(int index);
+
+  static FPURegister FromAllocationIndex(int index) {
+    DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
+    return from_code(index * 2);
+  }
+
+  static FPURegister from_code(int code) {
+    FPURegister r = { code };
+    return r;
+  }
+
+  bool is_valid() const { return 0 <= code_ && code_ < kMaxNumRegisters ; }
+  bool is(FPURegister creg) const { return code_ == creg.code_; }
+  FPURegister low() const {
+    // TODO(plind): Create DCHECK for FR=0 mode. This usage suspect for FR=1.
+    // Find low reg of a Double-reg pair, which is the reg itself.
+    DCHECK(code_ % 2 == 0);  // Specified Double reg must be even.
+    FPURegister reg;
+    reg.code_ = code_;
+    DCHECK(reg.is_valid());
+    return reg;
+  }
+  FPURegister high() const {
+    // TODO(plind): Create DCHECK for FR=0 mode. This usage illegal in FR=1.
+    // Find high reg of a Doubel-reg pair, which is reg + 1.
+    DCHECK(code_ % 2 == 0);  // Specified Double reg must be even.
+    FPURegister reg;
+    reg.code_ = code_ + 1;
+    DCHECK(reg.is_valid());
+    return reg;
+  }
+
+  int code() const {
+    DCHECK(is_valid());
+    return code_;
+  }
+  int bit() const {
+    DCHECK(is_valid());
+    return 1 << code_;
+  }
+  void setcode(int f) {
+    code_ = f;
+    DCHECK(is_valid());
+  }
+  // Unfortunately we can't make this private in a struct.
+  int code_;
+};
+
+// V8 now supports the O32 ABI, and the FPU Registers are organized as 32
+// 32-bit registers, f0 through f31. When used as 'double' they are used
+// in pairs, starting with the even numbered register. So a double operation
+// on f0 really uses f0 and f1.
+// (Modern mips hardware also supports 32 64-bit registers, via setting
+// (privileged) Status Register FR bit to 1. This is used by the N32 ABI,
+// but it is not in common use. Someday we will want to support this in v8.)
+
+// For O32 ABI, Floats and Doubles refer to same set of 32 32-bit registers.
+typedef FPURegister DoubleRegister;
+typedef FPURegister FloatRegister;
+
+const FPURegister no_freg = { -1 };
+
+const FPURegister f0 = { 0 };  // Return value in hard float mode.
+const FPURegister f1 = { 1 };
+const FPURegister f2 = { 2 };
+const FPURegister f3 = { 3 };
+const FPURegister f4 = { 4 };
+const FPURegister f5 = { 5 };
+const FPURegister f6 = { 6 };
+const FPURegister f7 = { 7 };
+const FPURegister f8 = { 8 };
+const FPURegister f9 = { 9 };
+const FPURegister f10 = { 10 };
+const FPURegister f11 = { 11 };
+const FPURegister f12 = { 12 };  // Arg 0 in hard float mode.
+const FPURegister f13 = { 13 };
+const FPURegister f14 = { 14 };  // Arg 1 in hard float mode.
+const FPURegister f15 = { 15 };
+const FPURegister f16 = { 16 };
+const FPURegister f17 = { 17 };
+const FPURegister f18 = { 18 };
+const FPURegister f19 = { 19 };
+const FPURegister f20 = { 20 };
+const FPURegister f21 = { 21 };
+const FPURegister f22 = { 22 };
+const FPURegister f23 = { 23 };
+const FPURegister f24 = { 24 };
+const FPURegister f25 = { 25 };
+const FPURegister f26 = { 26 };
+const FPURegister f27 = { 27 };
+const FPURegister f28 = { 28 };
+const FPURegister f29 = { 29 };
+const FPURegister f30 = { 30 };
+const FPURegister f31 = { 31 };
+
+// Register aliases.
+// cp is assumed to be a callee saved register.
+// Defined using #define instead of "static const Register&" because Clang
+// complains otherwise when a compilation unit that includes this header
+// doesn't use the variables.
+#define kRootRegister s6
+#define cp s7
+#define kLithiumScratchReg s3
+#define kLithiumScratchReg2 s4
+#define kLithiumScratchDouble f30
+#define kDoubleRegZero f28
+
+// FPU (coprocessor 1) control registers.
+// Currently only FCSR (#31) is implemented.
+struct FPUControlRegister {
+  bool is_valid() const { return code_ == kFCSRRegister; }
+  bool is(FPUControlRegister creg) const { return code_ == creg.code_; }
+  int code() const {
+    DCHECK(is_valid());
+    return code_;
+  }
+  int bit() const {
+    DCHECK(is_valid());
+    return 1 << code_;
+  }
+  void setcode(int f) {
+    code_ = f;
+    DCHECK(is_valid());
+  }
+  // Unfortunately we can't make this private in a struct.
+  int code_;
+};
+
+const FPUControlRegister no_fpucreg = { kInvalidFPUControlRegister };
+const FPUControlRegister FCSR = { kFCSRRegister };
+
+
+// -----------------------------------------------------------------------------
+// Machine instruction Operands.
+const int kSmiShift = kSmiTagSize + kSmiShiftSize;
+const uint64_t kSmiShiftMask = (1UL << kSmiShift) - 1;
+// Class Operand represents a shifter operand in data processing instructions.
+class Operand BASE_EMBEDDED {
+ public:
+  // Immediate.
+  INLINE(explicit Operand(int64_t immediate,
+         RelocInfo::Mode rmode = RelocInfo::NONE64));
+  INLINE(explicit Operand(const ExternalReference& f));
+  INLINE(explicit Operand(const char* s));
+  INLINE(explicit Operand(Object** opp));
+  INLINE(explicit Operand(Context** cpp));
+  explicit Operand(Handle<Object> handle);
+  INLINE(explicit Operand(Smi* value));
+
+  // Register.
+  INLINE(explicit Operand(Register rm));
+
+  // Return true if this is a register operand.
+  INLINE(bool is_reg() const);
+
+  inline int64_t immediate() const {
+    DCHECK(!is_reg());
+    return imm64_;
+  }
+
+  Register rm() const { return rm_; }
+
+ private:
+  Register rm_;
+  int64_t imm64_;  // Valid if rm_ == no_reg.
+  RelocInfo::Mode rmode_;
+
+  friend class Assembler;
+  friend class MacroAssembler;
+};
+
+
+// On MIPS we have only one adressing mode with base_reg + offset.
+// Class MemOperand represents a memory operand in load and store instructions.
+class MemOperand : public Operand {
+ public:
+  // Immediate value attached to offset.
+  enum OffsetAddend {
+    offset_minus_one = -1,
+    offset_zero = 0
+  };
+
+  explicit MemOperand(Register rn, int64_t offset = 0);
+  explicit MemOperand(Register rn, int64_t unit, int64_t multiplier,
+                      OffsetAddend offset_addend = offset_zero);
+  int32_t offset() const { return offset_; }
+
+  bool OffsetIsInt16Encodable() const {
+    return is_int16(offset_);
+  }
+
+ private:
+  int32_t offset_;
+
+  friend class Assembler;
+};
+
+
+class Assembler : public AssemblerBase {
+ public:
+  // Create an assembler. Instructions and relocation information are emitted
+  // into a buffer, with the instructions starting from the beginning and the
+  // relocation information starting from the end of the buffer. See CodeDesc
+  // for a detailed comment on the layout (globals.h).
+  //
+  // If the provided buffer is NULL, the assembler allocates and grows its own
+  // buffer, and buffer_size determines the initial buffer size. The buffer is
+  // owned by the assembler and deallocated upon destruction of the assembler.
+  //
+  // If the provided buffer is not NULL, the assembler uses the provided buffer
+  // for code generation and assumes its size to be buffer_size. If the buffer
+  // is too small, a fatal error occurs. No deallocation of the buffer is done
+  // upon destruction of the assembler.
+  Assembler(Isolate* isolate, void* buffer, int buffer_size);
+  virtual ~Assembler() { }
+
+  // GetCode emits any pending (non-emitted) code and fills the descriptor
+  // desc. GetCode() is idempotent; it returns the same result if no other
+  // Assembler functions are invoked in between GetCode() calls.
+  void GetCode(CodeDesc* desc);
+
+  // Label operations & relative jumps (PPUM Appendix D).
+  //
+  // Takes a branch opcode (cc) and a label (L) and generates
+  // either a backward branch or a forward branch and links it
+  // to the label fixup chain. Usage:
+  //
+  // Label L;    // unbound label
+  // j(cc, &L);  // forward branch to unbound label
+  // bind(&L);   // bind label to the current pc
+  // j(cc, &L);  // backward branch to bound label
+  // bind(&L);   // illegal: a label may be bound only once
+  //
+  // Note: The same Label can be used for forward and backward branches
+  // but it may be bound only once.
+  void bind(Label* L);  // Binds an unbound label L to current code position.
+  // Determines if Label is bound and near enough so that branch instruction
+  // can be used to reach it, instead of jump instruction.
+  bool is_near(Label* L);
+
+  // Returns the branch offset to the given label from the current code
+  // position. Links the label to the current position if it is still unbound.
+  // Manages the jump elimination optimization if the second parameter is true.
+  int32_t branch_offset(Label* L, bool jump_elimination_allowed);
+  int32_t branch_offset_compact(Label* L, bool jump_elimination_allowed);
+  int32_t branch_offset21(Label* L, bool jump_elimination_allowed);
+  int32_t branch_offset21_compact(Label* L, bool jump_elimination_allowed);
+  int32_t shifted_branch_offset(Label* L, bool jump_elimination_allowed) {
+    int32_t o = branch_offset(L, jump_elimination_allowed);
+    DCHECK((o & 3) == 0);   // Assert the offset is aligned.
+    return o >> 2;
+  }
+  int32_t shifted_branch_offset_compact(Label* L,
+      bool jump_elimination_allowed) {
+    int32_t o = branch_offset_compact(L, jump_elimination_allowed);
+    DCHECK((o & 3) == 0);   // Assert the offset is aligned.
+    return o >> 2;
+  }
+  uint64_t jump_address(Label* L);
+
+  // Puts a labels target address at the given position.
+  // The high 8 bits are set to zero.
+  void label_at_put(Label* L, int at_offset);
+
+  // Read/Modify the code target address in the branch/call instruction at pc.
+  static Address target_address_at(Address pc);
+  static void set_target_address_at(Address pc,
+                                    Address target,
+                                    ICacheFlushMode icache_flush_mode =
+                                        FLUSH_ICACHE_IF_NEEDED);
+  // On MIPS there is no Constant Pool so we skip that parameter.
+  INLINE(static Address target_address_at(Address pc,
+                                          ConstantPoolArray* constant_pool)) {
+    return target_address_at(pc);
+  }
+  INLINE(static void set_target_address_at(Address pc,
+                                           ConstantPoolArray* constant_pool,
+                                           Address target,
+                                           ICacheFlushMode icache_flush_mode =
+                                               FLUSH_ICACHE_IF_NEEDED)) {
+    set_target_address_at(pc, target, icache_flush_mode);
+  }
+  INLINE(static Address target_address_at(Address pc, Code* code)) {
+    ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+    return target_address_at(pc, constant_pool);
+  }
+  INLINE(static void set_target_address_at(Address pc,
+                                           Code* code,
+                                           Address target,
+                                           ICacheFlushMode icache_flush_mode =
+                                               FLUSH_ICACHE_IF_NEEDED)) {
+    ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+    set_target_address_at(pc, constant_pool, target, icache_flush_mode);
+  }
+
+  // Return the code target address at a call site from the return address
+  // of that call in the instruction stream.
+  inline static Address target_address_from_return_address(Address pc);
+
+  // Return the code target address of the patch debug break slot
+  inline static Address break_address_from_return_address(Address pc);
+
+  static void JumpLabelToJumpRegister(Address pc);
+
+  static void QuietNaN(HeapObject* nan);
+
+  // This sets the branch destination (which gets loaded at the call address).
+  // This is for calls and branches within generated code.  The serializer
+  // has already deserialized the lui/ori instructions etc.
+  inline static void deserialization_set_special_target_at(
+      Address instruction_payload, Code* code, Address target) {
+    set_target_address_at(
+        instruction_payload - kInstructionsFor64BitConstant * kInstrSize,
+        code,
+        target);
+  }
+
+  // Size of an instruction.
+  static const int kInstrSize = sizeof(Instr);
+
+  // Difference between address of current opcode and target address offset.
+  static const int kBranchPCOffset = 4;
+
+  // Here we are patching the address in the LUI/ORI instruction pair.
+  // These values are used in the serialization process and must be zero for
+  // MIPS platform, as Code, Embedded Object or External-reference pointers
+  // are split across two consecutive instructions and don't exist separately
+  // in the code, so the serializer should not step forwards in memory after
+  // a target is resolved and written.
+  static const int kSpecialTargetSize = 0;
+
+  // Number of consecutive instructions used to store 32bit/64bit constant.
+  // Before jump-optimizations, this constant was used in
+  // RelocInfo::target_address_address() function to tell serializer address of
+  // the instruction that follows LUI/ORI instruction pair. Now, with new jump
+  // optimization, where jump-through-register instruction that usually
+  // follows LUI/ORI pair is substituted with J/JAL, this constant equals
+  // to 3 instructions (LUI+ORI+J/JAL/JR/JALR).
+  static const int kInstructionsFor32BitConstant = 3;
+  static const int kInstructionsFor64BitConstant = 5;
+
+  // Distance between the instruction referring to the address of the call
+  // target and the return address.
+  static const int kCallTargetAddressOffset = 6 * kInstrSize;
+
+  // Distance between start of patched return sequence and the emitted address
+  // to jump to.
+  static const int kPatchReturnSequenceAddressOffset = 0;
+
+  // Distance between start of patched debug break slot and the emitted address
+  // to jump to.
+  static const int kPatchDebugBreakSlotAddressOffset =  0 * kInstrSize;
+
+  // Difference between address of current opcode and value read from pc
+  // register.
+  static const int kPcLoadDelta = 4;
+
+  static const int kPatchDebugBreakSlotReturnOffset = 6 * kInstrSize;
+
+  // Number of instructions used for the JS return sequence. The constant is
+  // used by the debugger to patch the JS return sequence.
+  static const int kJSReturnSequenceInstructions = 7;
+  static const int kDebugBreakSlotInstructions = 6;
+  static const int kDebugBreakSlotLength =
+      kDebugBreakSlotInstructions * kInstrSize;
+
+
+  // ---------------------------------------------------------------------------
+  // Code generation.
+
+  // Insert the smallest number of nop instructions
+  // possible to align the pc offset to a multiple
+  // of m. m must be a power of 2 (>= 4).
+  void Align(int m);
+  // Aligns code to something that's optimal for a jump target for the platform.
+  void CodeTargetAlign();
+
+  // Different nop operations are used by the code generator to detect certain
+  // states of the generated code.
+  enum NopMarkerTypes {
+    NON_MARKING_NOP = 0,
+    DEBUG_BREAK_NOP,
+    // IC markers.
+    PROPERTY_ACCESS_INLINED,
+    PROPERTY_ACCESS_INLINED_CONTEXT,
+    PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE,
+    // Helper values.
+    LAST_CODE_MARKER,
+    FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED,
+    // Code aging
+    CODE_AGE_MARKER_NOP = 6,
+    CODE_AGE_SEQUENCE_NOP
+  };
+
+  // Type == 0 is the default non-marking nop. For mips this is a
+  // sll(zero_reg, zero_reg, 0). We use rt_reg == at for non-zero
+  // marking, to avoid conflict with ssnop and ehb instructions.
+  void nop(unsigned int type = 0) {
+    DCHECK(type < 32);
+    Register nop_rt_reg = (type == 0) ? zero_reg : at;
+    sll(zero_reg, nop_rt_reg, type, true);
+  }
+
+
+  // --------Branch-and-jump-instructions----------
+  // We don't use likely variant of instructions.
+  void b(int16_t offset);
+  void b(Label* L) { b(branch_offset(L, false)>>2); }
+  void bal(int16_t offset);
+  void bal(Label* L) { bal(branch_offset(L, false)>>2); }
+
+  void beq(Register rs, Register rt, int16_t offset);
+  void beq(Register rs, Register rt, Label* L) {
+    beq(rs, rt, branch_offset(L, false) >> 2);
+  }
+  void bgez(Register rs, int16_t offset);
+  void bgezc(Register rt, int16_t offset);
+  void bgezc(Register rt, Label* L) {
+    bgezc(rt, branch_offset_compact(L, false)>>2);
+  }
+  void bgeuc(Register rs, Register rt, int16_t offset);
+  void bgeuc(Register rs, Register rt, Label* L) {
+    bgeuc(rs, rt, branch_offset_compact(L, false)>>2);
+  }
+  void bgec(Register rs, Register rt, int16_t offset);
+  void bgec(Register rs, Register rt, Label* L) {
+    bgec(rs, rt, branch_offset_compact(L, false)>>2);
+  }
+  void bgezal(Register rs, int16_t offset);
+  void bgezalc(Register rt, int16_t offset);
+  void bgezalc(Register rt, Label* L) {
+    bgezalc(rt, branch_offset_compact(L, false)>>2);
+  }
+  void bgezall(Register rs, int16_t offset);
+  void bgezall(Register rs, Label* L) {
+    bgezall(rs, branch_offset(L, false)>>2);
+  }
+  void bgtz(Register rs, int16_t offset);
+  void bgtzc(Register rt, int16_t offset);
+  void bgtzc(Register rt, Label* L) {
+    bgtzc(rt, branch_offset_compact(L, false)>>2);
+  }
+  void blez(Register rs, int16_t offset);
+  void blezc(Register rt, int16_t offset);
+  void blezc(Register rt, Label* L) {
+    blezc(rt, branch_offset_compact(L, false)>>2);
+  }
+  void bltz(Register rs, int16_t offset);
+  void bltzc(Register rt, int16_t offset);
+  void bltzc(Register rt, Label* L) {
+    bltzc(rt, branch_offset_compact(L, false)>>2);
+  }
+  void bltuc(Register rs, Register rt, int16_t offset);
+  void bltuc(Register rs, Register rt, Label* L) {
+    bltuc(rs, rt, branch_offset_compact(L, false)>>2);
+  }
+  void bltc(Register rs, Register rt, int16_t offset);
+  void bltc(Register rs, Register rt, Label* L) {
+    bltc(rs, rt, branch_offset_compact(L, false)>>2);
+  }
+
+  void bltzal(Register rs, int16_t offset);
+  void blezalc(Register rt, int16_t offset);
+  void blezalc(Register rt, Label* L) {
+    blezalc(rt, branch_offset_compact(L, false)>>2);
+  }
+  void bltzalc(Register rt, int16_t offset);
+  void bltzalc(Register rt, Label* L) {
+    bltzalc(rt, branch_offset_compact(L, false)>>2);
+  }
+  void bgtzalc(Register rt, int16_t offset);
+  void bgtzalc(Register rt, Label* L) {
+    bgtzalc(rt, branch_offset_compact(L, false)>>2);
+  }
+  void beqzalc(Register rt, int16_t offset);
+  void beqzalc(Register rt, Label* L) {
+    beqzalc(rt, branch_offset_compact(L, false)>>2);
+  }
+  void beqc(Register rs, Register rt, int16_t offset);
+  void beqc(Register rs, Register rt, Label* L) {
+    beqc(rs, rt, branch_offset_compact(L, false)>>2);
+  }
+  void beqzc(Register rs, int32_t offset);
+  void beqzc(Register rs, Label* L) {
+    beqzc(rs, branch_offset21_compact(L, false)>>2);
+  }
+  void bnezalc(Register rt, int16_t offset);
+  void bnezalc(Register rt, Label* L) {
+    bnezalc(rt, branch_offset_compact(L, false)>>2);
+  }
+  void bnec(Register rs, Register rt, int16_t offset);
+  void bnec(Register rs, Register rt, Label* L) {
+    bnec(rs, rt, branch_offset_compact(L, false)>>2);
+  }
+  void bnezc(Register rt, int32_t offset);
+  void bnezc(Register rt, Label* L) {
+    bnezc(rt, branch_offset21_compact(L, false)>>2);
+  }
+  void bne(Register rs, Register rt, int16_t offset);
+  void bne(Register rs, Register rt, Label* L) {
+    bne(rs, rt, branch_offset(L, false)>>2);
+  }
+  void bovc(Register rs, Register rt, int16_t offset);
+  void bovc(Register rs, Register rt, Label* L) {
+    bovc(rs, rt, branch_offset_compact(L, false)>>2);
+  }
+  void bnvc(Register rs, Register rt, int16_t offset);
+  void bnvc(Register rs, Register rt, Label* L) {
+    bnvc(rs, rt, branch_offset_compact(L, false)>>2);
+  }
+
+  // Never use the int16_t b(l)cond version with a branch offset
+  // instead of using the Label* version.
+
+  // Jump targets must be in the current 256 MB-aligned region. i.e. 28 bits.
+  void j(int64_t target);
+  void jal(int64_t target);
+  void jalr(Register rs, Register rd = ra);
+  void jr(Register target);
+  void j_or_jr(int64_t target, Register rs);
+  void jal_or_jalr(int64_t target, Register rs);
+
+
+  // -------Data-processing-instructions---------
+
+  // Arithmetic.
+  void addu(Register rd, Register rs, Register rt);
+  void subu(Register rd, Register rs, Register rt);
+
+  void div(Register rs, Register rt);
+  void divu(Register rs, Register rt);
+  void ddiv(Register rs, Register rt);
+  void ddivu(Register rs, Register rt);
+  void div(Register rd, Register rs, Register rt);
+  void divu(Register rd, Register rs, Register rt);
+  void ddiv(Register rd, Register rs, Register rt);
+  void ddivu(Register rd, Register rs, Register rt);
+  void mod(Register rd, Register rs, Register rt);
+  void modu(Register rd, Register rs, Register rt);
+  void dmod(Register rd, Register rs, Register rt);
+  void dmodu(Register rd, Register rs, Register rt);
+
+  void mul(Register rd, Register rs, Register rt);
+  void muh(Register rd, Register rs, Register rt);
+  void mulu(Register rd, Register rs, Register rt);
+  void muhu(Register rd, Register rs, Register rt);
+  void mult(Register rs, Register rt);
+  void multu(Register rs, Register rt);
+  void dmul(Register rd, Register rs, Register rt);
+  void dmuh(Register rd, Register rs, Register rt);
+  void dmulu(Register rd, Register rs, Register rt);
+  void dmuhu(Register rd, Register rs, Register rt);
+  void daddu(Register rd, Register rs, Register rt);
+  void dsubu(Register rd, Register rs, Register rt);
+  void dmult(Register rs, Register rt);
+  void dmultu(Register rs, Register rt);
+
+  void addiu(Register rd, Register rs, int32_t j);
+  void daddiu(Register rd, Register rs, int32_t j);
+
+  // Logical.
+  void and_(Register rd, Register rs, Register rt);
+  void or_(Register rd, Register rs, Register rt);
+  void xor_(Register rd, Register rs, Register rt);
+  void nor(Register rd, Register rs, Register rt);
+
+  void andi(Register rd, Register rs, int32_t j);
+  void ori(Register rd, Register rs, int32_t j);
+  void xori(Register rd, Register rs, int32_t j);
+  void lui(Register rd, int32_t j);
+  void aui(Register rs, Register rt, int32_t j);
+  void daui(Register rs, Register rt, int32_t j);
+  void dahi(Register rs, int32_t j);
+  void dati(Register rs, int32_t j);
+
+  // Shifts.
+  // Please note: sll(zero_reg, zero_reg, x) instructions are reserved as nop
+  // and may cause problems in normal code. coming_from_nop makes sure this
+  // doesn't happen.
+  void sll(Register rd, Register rt, uint16_t sa, bool coming_from_nop = false);
+  void sllv(Register rd, Register rt, Register rs);
+  void srl(Register rd, Register rt, uint16_t sa);
+  void srlv(Register rd, Register rt, Register rs);
+  void sra(Register rt, Register rd, uint16_t sa);
+  void srav(Register rt, Register rd, Register rs);
+  void rotr(Register rd, Register rt, uint16_t sa);
+  void rotrv(Register rd, Register rt, Register rs);
+  void dsll(Register rd, Register rt, uint16_t sa);
+  void dsllv(Register rd, Register rt, Register rs);
+  void dsrl(Register rd, Register rt, uint16_t sa);
+  void dsrlv(Register rd, Register rt, Register rs);
+  void drotr(Register rd, Register rt, uint16_t sa);
+  void drotrv(Register rd, Register rt, Register rs);
+  void dsra(Register rt, Register rd, uint16_t sa);
+  void dsrav(Register rd, Register rt, Register rs);
+  void dsll32(Register rt, Register rd, uint16_t sa);
+  void dsrl32(Register rt, Register rd, uint16_t sa);
+  void dsra32(Register rt, Register rd, uint16_t sa);
+
+
+  // ------------Memory-instructions-------------
+
+  void lb(Register rd, const MemOperand& rs);
+  void lbu(Register rd, const MemOperand& rs);
+  void lh(Register rd, const MemOperand& rs);
+  void lhu(Register rd, const MemOperand& rs);
+  void lw(Register rd, const MemOperand& rs);
+  void lwu(Register rd, const MemOperand& rs);
+  void lwl(Register rd, const MemOperand& rs);
+  void lwr(Register rd, const MemOperand& rs);
+  void sb(Register rd, const MemOperand& rs);
+  void sh(Register rd, const MemOperand& rs);
+  void sw(Register rd, const MemOperand& rs);
+  void swl(Register rd, const MemOperand& rs);
+  void swr(Register rd, const MemOperand& rs);
+  void ldl(Register rd, const MemOperand& rs);
+  void ldr(Register rd, const MemOperand& rs);
+  void sdl(Register rd, const MemOperand& rs);
+  void sdr(Register rd, const MemOperand& rs);
+  void ld(Register rd, const MemOperand& rs);
+  void sd(Register rd, const MemOperand& rs);
+
+
+  // ----------------Prefetch--------------------
+
+  void pref(int32_t hint, const MemOperand& rs);
+
+
+  // -------------Misc-instructions--------------
+
+  // Break / Trap instructions.
+  void break_(uint32_t code, bool break_as_stop = false);
+  void stop(const char* msg, uint32_t code = kMaxStopCode);
+  void tge(Register rs, Register rt, uint16_t code);
+  void tgeu(Register rs, Register rt, uint16_t code);
+  void tlt(Register rs, Register rt, uint16_t code);
+  void tltu(Register rs, Register rt, uint16_t code);
+  void teq(Register rs, Register rt, uint16_t code);
+  void tne(Register rs, Register rt, uint16_t code);
+
+  // Move from HI/LO register.
+  void mfhi(Register rd);
+  void mflo(Register rd);
+
+  // Set on less than.
+  void slt(Register rd, Register rs, Register rt);
+  void sltu(Register rd, Register rs, Register rt);
+  void slti(Register rd, Register rs, int32_t j);
+  void sltiu(Register rd, Register rs, int32_t j);
+
+  // Conditional move.
+  void movz(Register rd, Register rs, Register rt);
+  void movn(Register rd, Register rs, Register rt);
+  void movt(Register rd, Register rs, uint16_t cc = 0);
+  void movf(Register rd, Register rs, uint16_t cc = 0);
+
+  void sel(SecondaryField fmt, FPURegister fd, FPURegister ft,
+      FPURegister fs, uint8_t sel);
+  void seleqz(Register rs, Register rt, Register rd);
+  void seleqz(SecondaryField fmt, FPURegister fd, FPURegister ft,
+      FPURegister fs);
+  void selnez(Register rs, Register rt, Register rd);
+  void selnez(SecondaryField fmt, FPURegister fd, FPURegister ft,
+      FPURegister fs);
+
+  // Bit twiddling.
+  void clz(Register rd, Register rs);
+  void ins_(Register rt, Register rs, uint16_t pos, uint16_t size);
+  void ext_(Register rt, Register rs, uint16_t pos, uint16_t size);
+
+  // --------Coprocessor-instructions----------------
+
+  // Load, store, and move.
+  void lwc1(FPURegister fd, const MemOperand& src);
+  void ldc1(FPURegister fd, const MemOperand& src);
+
+  void swc1(FPURegister fs, const MemOperand& dst);
+  void sdc1(FPURegister fs, const MemOperand& dst);
+
+  void mtc1(Register rt, FPURegister fs);
+  void mthc1(Register rt, FPURegister fs);
+  void dmtc1(Register rt, FPURegister fs);
+
+  void mfc1(Register rt, FPURegister fs);
+  void mfhc1(Register rt, FPURegister fs);
+  void dmfc1(Register rt, FPURegister fs);
+
+  void ctc1(Register rt, FPUControlRegister fs);
+  void cfc1(Register rt, FPUControlRegister fs);
+
+  // Arithmetic.
+  void add_d(FPURegister fd, FPURegister fs, FPURegister ft);
+  void sub_d(FPURegister fd, FPURegister fs, FPURegister ft);
+  void mul_d(FPURegister fd, FPURegister fs, FPURegister ft);
+  void madd_d(FPURegister fd, FPURegister fr, FPURegister fs, FPURegister ft);
+  void div_d(FPURegister fd, FPURegister fs, FPURegister ft);
+  void abs_d(FPURegister fd, FPURegister fs);
+  void mov_d(FPURegister fd, FPURegister fs);
+  void neg_d(FPURegister fd, FPURegister fs);
+  void sqrt_d(FPURegister fd, FPURegister fs);
+
+  // Conversion.
+  void cvt_w_s(FPURegister fd, FPURegister fs);
+  void cvt_w_d(FPURegister fd, FPURegister fs);
+  void trunc_w_s(FPURegister fd, FPURegister fs);
+  void trunc_w_d(FPURegister fd, FPURegister fs);
+  void round_w_s(FPURegister fd, FPURegister fs);
+  void round_w_d(FPURegister fd, FPURegister fs);
+  void floor_w_s(FPURegister fd, FPURegister fs);
+  void floor_w_d(FPURegister fd, FPURegister fs);
+  void ceil_w_s(FPURegister fd, FPURegister fs);
+  void ceil_w_d(FPURegister fd, FPURegister fs);
+
+  void cvt_l_s(FPURegister fd, FPURegister fs);
+  void cvt_l_d(FPURegister fd, FPURegister fs);
+  void trunc_l_s(FPURegister fd, FPURegister fs);
+  void trunc_l_d(FPURegister fd, FPURegister fs);
+  void round_l_s(FPURegister fd, FPURegister fs);
+  void round_l_d(FPURegister fd, FPURegister fs);
+  void floor_l_s(FPURegister fd, FPURegister fs);
+  void floor_l_d(FPURegister fd, FPURegister fs);
+  void ceil_l_s(FPURegister fd, FPURegister fs);
+  void ceil_l_d(FPURegister fd, FPURegister fs);
+
+  void min(SecondaryField fmt, FPURegister fd, FPURegister ft, FPURegister fs);
+  void mina(SecondaryField fmt, FPURegister fd, FPURegister ft, FPURegister fs);
+  void max(SecondaryField fmt, FPURegister fd, FPURegister ft, FPURegister fs);
+  void maxa(SecondaryField fmt, FPURegister fd, FPURegister ft, FPURegister fs);
+
+  void cvt_s_w(FPURegister fd, FPURegister fs);
+  void cvt_s_l(FPURegister fd, FPURegister fs);
+  void cvt_s_d(FPURegister fd, FPURegister fs);
+
+  void cvt_d_w(FPURegister fd, FPURegister fs);
+  void cvt_d_l(FPURegister fd, FPURegister fs);
+  void cvt_d_s(FPURegister fd, FPURegister fs);
+
+  // Conditions and branches for MIPSr6.
+  void cmp(FPUCondition cond, SecondaryField fmt,
+         FPURegister fd, FPURegister ft, FPURegister fs);
+
+  void bc1eqz(int16_t offset, FPURegister ft);
+  void bc1eqz(Label* L, FPURegister ft) {
+    bc1eqz(branch_offset(L, false)>>2, ft);
+  }
+  void bc1nez(int16_t offset, FPURegister ft);
+  void bc1nez(Label* L, FPURegister ft) {
+    bc1nez(branch_offset(L, false)>>2, ft);
+  }
+
+  // Conditions and branches for non MIPSr6.
+  void c(FPUCondition cond, SecondaryField fmt,
+         FPURegister ft, FPURegister fs, uint16_t cc = 0);
+
+  void bc1f(int16_t offset, uint16_t cc = 0);
+  void bc1f(Label* L, uint16_t cc = 0) {
+    bc1f(branch_offset(L, false)>>2, cc);
+  }
+  void bc1t(int16_t offset, uint16_t cc = 0);
+  void bc1t(Label* L, uint16_t cc = 0) {
+    bc1t(branch_offset(L, false)>>2, cc);
+  }
+  void fcmp(FPURegister src1, const double src2, FPUCondition cond);
+
+  // Check the code size generated from label to here.
+  int SizeOfCodeGeneratedSince(Label* label) {
+    return pc_offset() - label->pos();
+  }
+
+  // Check the number of instructions generated from label to here.
+  int InstructionsGeneratedSince(Label* label) {
+    return SizeOfCodeGeneratedSince(label) / kInstrSize;
+  }
+
+  // Class for scoping postponing the trampoline pool generation.
+  class BlockTrampolinePoolScope {
+   public:
+    explicit BlockTrampolinePoolScope(Assembler* assem) : assem_(assem) {
+      assem_->StartBlockTrampolinePool();
+    }
+    ~BlockTrampolinePoolScope() {
+      assem_->EndBlockTrampolinePool();
+    }
+
+   private:
+    Assembler* assem_;
+
+    DISALLOW_IMPLICIT_CONSTRUCTORS(BlockTrampolinePoolScope);
+  };
+
+  // Class for postponing the assembly buffer growth. Typically used for
+  // sequences of instructions that must be emitted as a unit, before
+  // buffer growth (and relocation) can occur.
+  // This blocking scope is not nestable.
+  class BlockGrowBufferScope {
+   public:
+    explicit BlockGrowBufferScope(Assembler* assem) : assem_(assem) {
+      assem_->StartBlockGrowBuffer();
+    }
+    ~BlockGrowBufferScope() {
+      assem_->EndBlockGrowBuffer();
+    }
+
+   private:
+    Assembler* assem_;
+
+    DISALLOW_IMPLICIT_CONSTRUCTORS(BlockGrowBufferScope);
+  };
+
+  // Debugging.
+
+  // Mark address of the ExitJSFrame code.
+  void RecordJSReturn();
+
+  // Mark address of a debug break slot.
+  void RecordDebugBreakSlot();
+
+  // Record the AST id of the CallIC being compiled, so that it can be placed
+  // in the relocation information.
+  void SetRecordedAstId(TypeFeedbackId ast_id) {
+    DCHECK(recorded_ast_id_.IsNone());
+    recorded_ast_id_ = ast_id;
+  }
+
+  TypeFeedbackId RecordedAstId() {
+    DCHECK(!recorded_ast_id_.IsNone());
+    return recorded_ast_id_;
+  }
+
+  void ClearRecordedAstId() { recorded_ast_id_ = TypeFeedbackId::None(); }
+
+  // Record a comment relocation entry that can be used by a disassembler.
+  // Use --code-comments to enable.
+  void RecordComment(const char* msg);
+
+  static int RelocateInternalReference(byte* pc, intptr_t pc_delta);
+
+  // Writes a single byte or word of data in the code stream.  Used for
+  // inline tables, e.g., jump-tables.
+  void db(uint8_t data);
+  void dd(uint32_t data);
+
+  // Emits the address of the code stub's first instruction.
+  void emit_code_stub_address(Code* stub);
+
+  PositionsRecorder* positions_recorder() { return &positions_recorder_; }
+
+  // Postpone the generation of the trampoline pool for the specified number of
+  // instructions.
+  void BlockTrampolinePoolFor(int instructions);
+
+  // Check if there is less than kGap bytes available in the buffer.
+  // If this is the case, we need to grow the buffer before emitting
+  // an instruction or relocation information.
+  inline bool overflow() const { return pc_ >= reloc_info_writer.pos() - kGap; }
+
+  // Get the number of bytes available in the buffer.
+  inline int available_space() const { return reloc_info_writer.pos() - pc_; }
+
+  // Read/patch instructions.
+  static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
+  static void instr_at_put(byte* pc, Instr instr) {
+    *reinterpret_cast<Instr*>(pc) = instr;
+  }
+  Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
+  void instr_at_put(int pos, Instr instr) {
+    *reinterpret_cast<Instr*>(buffer_ + pos) = instr;
+  }
+
+  // Check if an instruction is a branch of some kind.
+  static bool IsBranch(Instr instr);
+  static bool IsBeq(Instr instr);
+  static bool IsBne(Instr instr);
+
+  static bool IsJump(Instr instr);
+  static bool IsJ(Instr instr);
+  static bool IsLui(Instr instr);
+  static bool IsOri(Instr instr);
+
+  static bool IsJal(Instr instr);
+  static bool IsJr(Instr instr);
+  static bool IsJalr(Instr instr);
+
+  static bool IsNop(Instr instr, unsigned int type);
+  static bool IsPop(Instr instr);
+  static bool IsPush(Instr instr);
+  static bool IsLwRegFpOffset(Instr instr);
+  static bool IsSwRegFpOffset(Instr instr);
+  static bool IsLwRegFpNegOffset(Instr instr);
+  static bool IsSwRegFpNegOffset(Instr instr);
+
+  static Register GetRtReg(Instr instr);
+  static Register GetRsReg(Instr instr);
+  static Register GetRdReg(Instr instr);
+
+  static uint32_t GetRt(Instr instr);
+  static uint32_t GetRtField(Instr instr);
+  static uint32_t GetRs(Instr instr);
+  static uint32_t GetRsField(Instr instr);
+  static uint32_t GetRd(Instr instr);
+  static uint32_t GetRdField(Instr instr);
+  static uint32_t GetSa(Instr instr);
+  static uint32_t GetSaField(Instr instr);
+  static uint32_t GetOpcodeField(Instr instr);
+  static uint32_t GetFunction(Instr instr);
+  static uint32_t GetFunctionField(Instr instr);
+  static uint32_t GetImmediate16(Instr instr);
+  static uint32_t GetLabelConst(Instr instr);
+
+  static int32_t GetBranchOffset(Instr instr);
+  static bool IsLw(Instr instr);
+  static int16_t GetLwOffset(Instr instr);
+  static Instr SetLwOffset(Instr instr, int16_t offset);
+
+  static bool IsSw(Instr instr);
+  static Instr SetSwOffset(Instr instr, int16_t offset);
+  static bool IsAddImmediate(Instr instr);
+  static Instr SetAddImmediateOffset(Instr instr, int16_t offset);
+
+  static bool IsAndImmediate(Instr instr);
+  static bool IsEmittedConstant(Instr instr);
+
+  void CheckTrampolinePool();
+
+  // Allocate a constant pool of the correct size for the generated code.
+  Handle<ConstantPoolArray> NewConstantPool(Isolate* isolate);
+
+  // Generate the constant pool for the generated code.
+  void PopulateConstantPool(ConstantPoolArray* constant_pool);
+
+ protected:
+  // Relocation for a type-recording IC has the AST id added to it.  This
+  // member variable is a way to pass the information from the call site to
+  // the relocation info.
+  TypeFeedbackId recorded_ast_id_;
+
+  int64_t buffer_space() const { return reloc_info_writer.pos() - pc_; }
+
+  // Decode branch instruction at pos and return branch target pos.
+  int64_t target_at(int64_t pos);
+
+  // Patch branch instruction at pos to branch to given branch target pos.
+  void target_at_put(int64_t pos, int64_t target_pos);
+
+  // Say if we need to relocate with this mode.
+  bool MustUseReg(RelocInfo::Mode rmode);
+
+  // Record reloc info for current pc_.
+  void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
+
+  // Block the emission of the trampoline pool before pc_offset.
+  void BlockTrampolinePoolBefore(int pc_offset) {
+    if (no_trampoline_pool_before_ < pc_offset)
+      no_trampoline_pool_before_ = pc_offset;
+  }
+
+  void StartBlockTrampolinePool() {
+    trampoline_pool_blocked_nesting_++;
+  }
+
+  void EndBlockTrampolinePool() {
+    trampoline_pool_blocked_nesting_--;
+  }
+
+  bool is_trampoline_pool_blocked() const {
+    return trampoline_pool_blocked_nesting_ > 0;
+  }
+
+  bool has_exception() const {
+    return internal_trampoline_exception_;
+  }
+
+  void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi);
+
+  bool is_trampoline_emitted() const {
+    return trampoline_emitted_;
+  }
+
+  // Temporarily block automatic assembly buffer growth.
+  void StartBlockGrowBuffer() {
+    DCHECK(!block_buffer_growth_);
+    block_buffer_growth_ = true;
+  }
+
+  void EndBlockGrowBuffer() {
+    DCHECK(block_buffer_growth_);
+    block_buffer_growth_ = false;
+  }
+
+  bool is_buffer_growth_blocked() const {
+    return block_buffer_growth_;
+  }
+
+ private:
+  // Buffer size and constant pool distance are checked together at regular
+  // intervals of kBufferCheckInterval emitted bytes.
+  static const int kBufferCheckInterval = 1*KB/2;
+
+  // Code generation.
+  // The relocation writer's position is at least kGap bytes below the end of
+  // the generated instructions. This is so that multi-instruction sequences do
+  // not have to check for overflow. The same is true for writes of large
+  // relocation info entries.
+  static const int kGap = 32;
+
+
+  // Repeated checking whether the trampoline pool should be emitted is rather
+  // expensive. By default we only check again once a number of instructions
+  // has been generated.
+  static const int kCheckConstIntervalInst = 32;
+  static const int kCheckConstInterval = kCheckConstIntervalInst * kInstrSize;
+
+  int next_buffer_check_;  // pc offset of next buffer check.
+
+  // Emission of the trampoline pool may be blocked in some code sequences.
+  int trampoline_pool_blocked_nesting_;  // Block emission if this is not zero.
+  int no_trampoline_pool_before_;  // Block emission before this pc offset.
+
+  // Keep track of the last emitted pool to guarantee a maximal distance.
+  int last_trampoline_pool_end_;  // pc offset of the end of the last pool.
+
+  // Automatic growth of the assembly buffer may be blocked for some sequences.
+  bool block_buffer_growth_;  // Block growth when true.
+
+  // Relocation information generation.
+  // Each relocation is encoded as a variable size value.
+  static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
+  RelocInfoWriter reloc_info_writer;
+
+  // The bound position, before this we cannot do instruction elimination.
+  int last_bound_pos_;
+
+  // Code emission.
+  inline void CheckBuffer();
+  void GrowBuffer();
+  inline void emit(Instr x);
+  inline void emit(uint64_t x);
+  inline void CheckTrampolinePoolQuick();
+
+  // Instruction generation.
+  // We have 3 different kind of encoding layout on MIPS.
+  // However due to many different types of objects encoded in the same fields
+  // we have quite a few aliases for each mode.
+  // Using the same structure to refer to Register and FPURegister would spare a
+  // few aliases, but mixing both does not look clean to me.
+  // Anyway we could surely implement this differently.
+
+  void GenInstrRegister(Opcode opcode,
+                        Register rs,
+                        Register rt,
+                        Register rd,
+                        uint16_t sa = 0,
+                        SecondaryField func = NULLSF);
+
+  void GenInstrRegister(Opcode opcode,
+                        Register rs,
+                        Register rt,
+                        uint16_t msb,
+                        uint16_t lsb,
+                        SecondaryField func);
+
+  void GenInstrRegister(Opcode opcode,
+                        SecondaryField fmt,
+                        FPURegister ft,
+                        FPURegister fs,
+                        FPURegister fd,
+                        SecondaryField func = NULLSF);
+
+  void GenInstrRegister(Opcode opcode,
+                        FPURegister fr,
+                        FPURegister ft,
+                        FPURegister fs,
+                        FPURegister fd,
+                        SecondaryField func = NULLSF);
+
+  void GenInstrRegister(Opcode opcode,
+                        SecondaryField fmt,
+                        Register rt,
+                        FPURegister fs,
+                        FPURegister fd,
+                        SecondaryField func = NULLSF);
+
+  void GenInstrRegister(Opcode opcode,
+                        SecondaryField fmt,
+                        Register rt,
+                        FPUControlRegister fs,
+                        SecondaryField func = NULLSF);
+
+
+  void GenInstrImmediate(Opcode opcode,
+                         Register rs,
+                         Register rt,
+                         int32_t  j);
+  void GenInstrImmediate(Opcode opcode,
+                         Register rs,
+                         SecondaryField SF,
+                         int32_t  j);
+  void GenInstrImmediate(Opcode opcode,
+                         Register r1,
+                         FPURegister r2,
+                         int32_t  j);
+
+
+  void GenInstrJump(Opcode opcode,
+                     uint32_t address);
+
+  // Helpers.
+  void LoadRegPlusOffsetToAt(const MemOperand& src);
+
+  // Labels.
+  void print(Label* L);
+  void bind_to(Label* L, int pos);
+  void next(Label* L);
+
+  // One trampoline consists of:
+  // - space for trampoline slots,
+  // - space for labels.
+  //
+  // Space for trampoline slots is equal to slot_count * 2 * kInstrSize.
+  // Space for trampoline slots preceeds space for labels. Each label is of one
+  // instruction size, so total amount for labels is equal to
+  // label_count *  kInstrSize.
+  class Trampoline {
+   public:
+    Trampoline() {
+      start_ = 0;
+      next_slot_ = 0;
+      free_slot_count_ = 0;
+      end_ = 0;
+    }
+    Trampoline(int start, int slot_count) {
+      start_ = start;
+      next_slot_ = start;
+      free_slot_count_ = slot_count;
+      end_ = start + slot_count * kTrampolineSlotsSize;
+    }
+    int start() {
+      return start_;
+    }
+    int end() {
+      return end_;
+    }
+    int take_slot() {
+      int trampoline_slot = kInvalidSlotPos;
+      if (free_slot_count_ <= 0) {
+        // We have run out of space on trampolines.
+        // Make sure we fail in debug mode, so we become aware of each case
+        // when this happens.
+        DCHECK(0);
+        // Internal exception will be caught.
+      } else {
+        trampoline_slot = next_slot_;
+        free_slot_count_--;
+        next_slot_ += kTrampolineSlotsSize;
+      }
+      return trampoline_slot;
+    }
+
+   private:
+    int start_;
+    int end_;
+    int next_slot_;
+    int free_slot_count_;
+  };
+
+  int32_t get_trampoline_entry(int32_t pos);
+  int unbound_labels_count_;
+  // If trampoline is emitted, generated code is becoming large. As this is
+  // already a slow case which can possibly break our code generation for the
+  // extreme case, we use this information to trigger different mode of
+  // branch instruction generation, where we use jump instructions rather
+  // than regular branch instructions.
+  bool trampoline_emitted_;
+  static const int kTrampolineSlotsSize = 6 * kInstrSize;
+  static const int kMaxBranchOffset = (1 << (18 - 1)) - 1;
+  static const int kInvalidSlotPos = -1;
+
+  Trampoline trampoline_;
+  bool internal_trampoline_exception_;
+
+  friend class RegExpMacroAssemblerMIPS;
+  friend class RelocInfo;
+  friend class CodePatcher;
+  friend class BlockTrampolinePoolScope;
+
+  PositionsRecorder positions_recorder_;
+  friend class PositionsRecorder;
+  friend class EnsureSpace;
+};
+
+
+class EnsureSpace BASE_EMBEDDED {
+ public:
+  explicit EnsureSpace(Assembler* assembler) {
+    assembler->CheckBuffer();
+  }
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_ARM_ASSEMBLER_MIPS_H_
diff --git a/src/mips64/builtins-mips64.cc b/src/mips64/builtins-mips64.cc
new file mode 100644
index 0000000..5bdb56c
--- /dev/null
+++ b/src/mips64/builtins-mips64.cc
@@ -0,0 +1,1596 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_MIPS64
+
+#include "src/codegen.h"
+#include "src/debug.h"
+#include "src/deoptimizer.h"
+#include "src/full-codegen.h"
+#include "src/runtime.h"
+
+namespace v8 {
+namespace internal {
+
+
+#define __ ACCESS_MASM(masm)
+
+
+void Builtins::Generate_Adaptor(MacroAssembler* masm,
+                                CFunctionId id,
+                                BuiltinExtraArguments extra_args) {
+  // ----------- S t a t e -------------
+  //  -- a0                 : number of arguments excluding receiver
+  //  -- a1                 : called function (only guaranteed when
+  //  --                      extra_args requires it)
+  //  -- cp                 : context
+  //  -- sp[0]              : last argument
+  //  -- ...
+  //  -- sp[8 * (argc - 1)] : first argument
+  //  -- sp[8 * agrc]       : receiver
+  // -----------------------------------
+
+  // Insert extra arguments.
+  int num_extra_args = 0;
+  if (extra_args == NEEDS_CALLED_FUNCTION) {
+    num_extra_args = 1;
+    __ push(a1);
+  } else {
+    DCHECK(extra_args == NO_EXTRA_ARGUMENTS);
+  }
+
+  // JumpToExternalReference expects s0 to contain the number of arguments
+  // including the receiver and the extra arguments.
+  __ Daddu(s0, a0, num_extra_args + 1);
+  __ dsll(s1, s0, kPointerSizeLog2);
+  __ Dsubu(s1, s1, kPointerSize);
+  __ JumpToExternalReference(ExternalReference(id, masm->isolate()));
+}
+
+
+// Load the built-in InternalArray function from the current context.
+static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
+                                              Register result) {
+  // Load the native context.
+
+  __ ld(result,
+        MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+  __ ld(result,
+        FieldMemOperand(result, GlobalObject::kNativeContextOffset));
+  // Load the InternalArray function from the native context.
+  __ ld(result,
+         MemOperand(result,
+                    Context::SlotOffset(
+                        Context::INTERNAL_ARRAY_FUNCTION_INDEX)));
+}
+
+
+// Load the built-in Array function from the current context.
+static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
+  // Load the native context.
+
+  __ ld(result,
+        MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+  __ ld(result,
+        FieldMemOperand(result, GlobalObject::kNativeContextOffset));
+  // Load the Array function from the native context.
+  __ ld(result,
+        MemOperand(result,
+                   Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
+}
+
+
+void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0     : number of arguments
+  //  -- ra     : return address
+  //  -- sp[...]: constructor arguments
+  // -----------------------------------
+  Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
+
+  // Get the InternalArray function.
+  GenerateLoadInternalArrayFunction(masm, a1);
+
+  if (FLAG_debug_code) {
+    // Initial map for the builtin InternalArray functions should be maps.
+    __ ld(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+    __ SmiTst(a2, a4);
+    __ Assert(ne, kUnexpectedInitialMapForInternalArrayFunction,
+              a4, Operand(zero_reg));
+    __ GetObjectType(a2, a3, a4);
+    __ Assert(eq, kUnexpectedInitialMapForInternalArrayFunction,
+              a4, Operand(MAP_TYPE));
+  }
+
+  // Run the native code for the InternalArray function called as a normal
+  // function.
+  // Tail call a stub.
+  InternalArrayConstructorStub stub(masm->isolate());
+  __ TailCallStub(&stub);
+}
+
+
+void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0     : number of arguments
+  //  -- ra     : return address
+  //  -- sp[...]: constructor arguments
+  // -----------------------------------
+  Label generic_array_code;
+
+  // Get the Array function.
+  GenerateLoadArrayFunction(masm, a1);
+
+  if (FLAG_debug_code) {
+    // Initial map for the builtin Array functions should be maps.
+    __ ld(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+    __ SmiTst(a2, a4);
+    __ Assert(ne, kUnexpectedInitialMapForArrayFunction1,
+              a4, Operand(zero_reg));
+    __ GetObjectType(a2, a3, a4);
+    __ Assert(eq, kUnexpectedInitialMapForArrayFunction2,
+              a4, Operand(MAP_TYPE));
+  }
+
+  // Run the native code for the Array function called as a normal function.
+  // Tail call a stub.
+  __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+  ArrayConstructorStub stub(masm->isolate());
+  __ TailCallStub(&stub);
+}
+
+
+void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0                     : number of arguments
+  //  -- a1                     : constructor function
+  //  -- ra                     : return address
+  //  -- sp[(argc - n - 1) * 8] : arg[n] (zero based)
+  //  -- sp[argc * 8]           : receiver
+  // -----------------------------------
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(counters->string_ctor_calls(), 1, a2, a3);
+
+  Register function = a1;
+  if (FLAG_debug_code) {
+    __ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, a2);
+    __ Assert(eq, kUnexpectedStringFunction, function, Operand(a2));
+  }
+
+  // Load the first arguments in a0 and get rid of the rest.
+  Label no_arguments;
+  __ Branch(&no_arguments, eq, a0, Operand(zero_reg));
+  // First args = sp[(argc - 1) * 8].
+  __ Dsubu(a0, a0, Operand(1));
+  __ dsll(a0, a0, kPointerSizeLog2);
+  __ Daddu(sp, a0, sp);
+  __ ld(a0, MemOperand(sp));
+  // sp now point to args[0], drop args[0] + receiver.
+  __ Drop(2);
+
+  Register argument = a2;
+  Label not_cached, argument_is_string;
+  __ LookupNumberStringCache(a0,        // Input.
+                             argument,  // Result.
+                             a3,        // Scratch.
+                             a4,        // Scratch.
+                             a5,        // Scratch.
+                             &not_cached);
+  __ IncrementCounter(counters->string_ctor_cached_number(), 1, a3, a4);
+  __ bind(&argument_is_string);
+
+  // ----------- S t a t e -------------
+  //  -- a2     : argument converted to string
+  //  -- a1     : constructor function
+  //  -- ra     : return address
+  // -----------------------------------
+
+  Label gc_required;
+  __ Allocate(JSValue::kSize,
+              v0,  // Result.
+              a3,  // Scratch.
+              a4,  // Scratch.
+              &gc_required,
+              TAG_OBJECT);
+
+  // Initialising the String Object.
+  Register map = a3;
+  __ LoadGlobalFunctionInitialMap(function, map, a4);
+  if (FLAG_debug_code) {
+    __ lbu(a4, FieldMemOperand(map, Map::kInstanceSizeOffset));
+    __ Assert(eq, kUnexpectedStringWrapperInstanceSize,
+        a4, Operand(JSValue::kSize >> kPointerSizeLog2));
+    __ lbu(a4, FieldMemOperand(map, Map::kUnusedPropertyFieldsOffset));
+    __ Assert(eq, kUnexpectedUnusedPropertiesOfStringWrapper,
+        a4, Operand(zero_reg));
+  }
+  __ sd(map, FieldMemOperand(v0, HeapObject::kMapOffset));
+
+  __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
+  __ sd(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
+  __ sd(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
+
+  __ sd(argument, FieldMemOperand(v0, JSValue::kValueOffset));
+
+  // Ensure the object is fully initialized.
+  STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
+
+  __ Ret();
+
+  // The argument was not found in the number to string cache. Check
+  // if it's a string already before calling the conversion builtin.
+  Label convert_argument;
+  __ bind(&not_cached);
+  __ JumpIfSmi(a0, &convert_argument);
+
+  // Is it a String?
+  __ ld(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
+  __ lbu(a3, FieldMemOperand(a2, Map::kInstanceTypeOffset));
+  STATIC_ASSERT(kNotStringTag != 0);
+  __ And(a4, a3, Operand(kIsNotStringMask));
+  __ Branch(&convert_argument, ne, a4, Operand(zero_reg));
+  __ mov(argument, a0);
+  __ IncrementCounter(counters->string_ctor_conversions(), 1, a3, a4);
+  __ Branch(&argument_is_string);
+
+  // Invoke the conversion builtin and put the result into a2.
+  __ bind(&convert_argument);
+  __ push(function);  // Preserve the function.
+  __ IncrementCounter(counters->string_ctor_conversions(), 1, a3, a4);
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ push(a0);
+    __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
+  }
+  __ pop(function);
+  __ mov(argument, v0);
+  __ Branch(&argument_is_string);
+
+  // Load the empty string into a2, remove the receiver from the
+  // stack, and jump back to the case where the argument is a string.
+  __ bind(&no_arguments);
+  __ LoadRoot(argument, Heap::kempty_stringRootIndex);
+  __ Drop(1);
+  __ Branch(&argument_is_string);
+
+  // At this point the argument is already a string. Call runtime to
+  // create a string wrapper.
+  __ bind(&gc_required);
+  __ IncrementCounter(counters->string_ctor_gc_required(), 1, a3, a4);
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ push(argument);
+    __ CallRuntime(Runtime::kNewStringWrapper, 1);
+  }
+  __ Ret();
+}
+
+
+static void CallRuntimePassFunction(
+    MacroAssembler* masm, Runtime::FunctionId function_id) {
+  FrameScope scope(masm, StackFrame::INTERNAL);
+  // Push a copy of the function onto the stack.
+  // Push call kind information and function as parameter to the runtime call.
+  __ Push(a1, a1);
+
+  __ CallRuntime(function_id, 1);
+  // Restore call kind information and receiver.
+  __ Pop(a1);
+}
+
+
+static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
+  __ ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+  __ ld(a2, FieldMemOperand(a2, SharedFunctionInfo::kCodeOffset));
+  __ Daddu(at, a2, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ Jump(at);
+}
+
+
+static void GenerateTailCallToReturnedCode(MacroAssembler* masm) {
+  __ Daddu(at, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ Jump(at);
+}
+
+
+void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
+  // Checking whether the queued function is ready for install is optional,
+  // since we come across interrupts and stack checks elsewhere.  However,
+  // not checking may delay installing ready functions, and always checking
+  // would be quite expensive.  A good compromise is to first check against
+  // stack limit as a cue for an interrupt signal.
+  Label ok;
+  __ LoadRoot(a4, Heap::kStackLimitRootIndex);
+  __ Branch(&ok, hs, sp, Operand(a4));
+
+  CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode);
+  GenerateTailCallToReturnedCode(masm);
+
+  __ bind(&ok);
+  GenerateTailCallToSharedCode(masm);
+}
+
+
+static void Generate_JSConstructStubHelper(MacroAssembler* masm,
+                                           bool is_api_function,
+                                           bool create_memento) {
+  // ----------- S t a t e -------------
+  //  -- a0     : number of arguments
+  //  -- a1     : constructor function
+  //  -- a2     : allocation site or undefined
+  //  -- ra     : return address
+  //  -- sp[...]: constructor arguments
+  // -----------------------------------
+
+  // Should never create mementos for api functions.
+  DCHECK(!is_api_function || !create_memento);
+
+  Isolate* isolate = masm->isolate();
+
+  // ----------- S t a t e -------------
+  //  -- a0     : number of arguments
+  //  -- a1     : constructor function
+  //  -- ra     : return address
+  //  -- sp[...]: constructor arguments
+  // -----------------------------------
+
+  // Enter a construct frame.
+  {
+    FrameScope scope(masm, StackFrame::CONSTRUCT);
+
+    if (create_memento) {
+      __ AssertUndefinedOrAllocationSite(a2, a3);
+      __ push(a2);
+    }
+
+    // Preserve the two incoming parameters on the stack.
+    // Tag arguments count.
+    __ dsll32(a0, a0, 0);
+    __ MultiPushReversed(a0.bit() | a1.bit());
+
+    Label rt_call, allocated;
+    // Try to allocate the object without transitioning into C code. If any of
+    // the preconditions is not met, the code bails out to the runtime call.
+    if (FLAG_inline_new) {
+      Label undo_allocation;
+      ExternalReference debug_step_in_fp =
+          ExternalReference::debug_step_in_fp_address(isolate);
+      __ li(a2, Operand(debug_step_in_fp));
+      __ ld(a2, MemOperand(a2));
+      __ Branch(&rt_call, ne, a2, Operand(zero_reg));
+
+      // Load the initial map and verify that it is in fact a map.
+      // a1: constructor function
+      __ ld(a2, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+      __ JumpIfSmi(a2, &rt_call);
+      __ GetObjectType(a2, a3, t0);
+      __ Branch(&rt_call, ne, t0, Operand(MAP_TYPE));
+
+      // Check that the constructor is not constructing a JSFunction (see
+      // comments in Runtime_NewObject in runtime.cc). In which case the
+      // initial map's instance type would be JS_FUNCTION_TYPE.
+      // a1: constructor function
+      // a2: initial map
+      __ lbu(a3, FieldMemOperand(a2, Map::kInstanceTypeOffset));
+      __ Branch(&rt_call, eq, a3, Operand(JS_FUNCTION_TYPE));
+
+      if (!is_api_function) {
+        Label allocate;
+        MemOperand bit_field3 = FieldMemOperand(a2, Map::kBitField3Offset);
+        // Check if slack tracking is enabled.
+        __ lwu(a4, bit_field3);
+        __ DecodeField<Map::ConstructionCount>(a6, a4);
+        __ Branch(&allocate,
+                  eq,
+                  a6,
+                  Operand(static_cast<int64_t>(JSFunction::kNoSlackTracking)));
+        // Decrease generous allocation count.
+        __ Dsubu(a4, a4, Operand(1 << Map::ConstructionCount::kShift));
+        __ Branch(USE_DELAY_SLOT,
+            &allocate, ne, a6, Operand(JSFunction::kFinishSlackTracking));
+        __ sw(a4, bit_field3);  // In delay slot.
+
+        __ Push(a1, a2, a1);  // a1 = Constructor.
+        __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
+
+        __ Pop(a1, a2);
+        // Slack tracking counter is kNoSlackTracking after runtime call.
+        DCHECK(JSFunction::kNoSlackTracking == 0);
+        __ mov(a6, zero_reg);
+
+        __ bind(&allocate);
+      }
+
+      // Now allocate the JSObject on the heap.
+      // a1: constructor function
+      // a2: initial map
+      __ lbu(a3, FieldMemOperand(a2, Map::kInstanceSizeOffset));
+      if (create_memento) {
+        __ Daddu(a3, a3, Operand(AllocationMemento::kSize / kPointerSize));
+      }
+
+      __ Allocate(a3, t0, t1, t2, &rt_call, SIZE_IN_WORDS);
+
+      // Allocated the JSObject, now initialize the fields. Map is set to
+      // initial map and properties and elements are set to empty fixed array.
+      // a1: constructor function
+      // a2: initial map
+      // a3: object size (not including memento if create_memento)
+      // t0: JSObject (not tagged)
+      __ LoadRoot(t2, Heap::kEmptyFixedArrayRootIndex);
+      __ mov(t1, t0);
+      __ sd(a2, MemOperand(t1, JSObject::kMapOffset));
+      __ sd(t2, MemOperand(t1, JSObject::kPropertiesOffset));
+      __ sd(t2, MemOperand(t1, JSObject::kElementsOffset));
+      __ Daddu(t1, t1, Operand(3*kPointerSize));
+      DCHECK_EQ(0 * kPointerSize, JSObject::kMapOffset);
+      DCHECK_EQ(1 * kPointerSize, JSObject::kPropertiesOffset);
+      DCHECK_EQ(2 * kPointerSize, JSObject::kElementsOffset);
+
+      // Fill all the in-object properties with appropriate filler.
+      // a1: constructor function
+      // a2: initial map
+      // a3: object size (in words, including memento if create_memento)
+      // t0: JSObject (not tagged)
+      // t1: First in-object property of JSObject (not tagged)
+      // a6: slack tracking counter (non-API function case)
+      DCHECK_EQ(3 * kPointerSize, JSObject::kHeaderSize);
+
+      // Use t3 to hold undefined, which is used in several places below.
+      __ LoadRoot(t3, Heap::kUndefinedValueRootIndex);
+
+      if (!is_api_function) {
+        Label no_inobject_slack_tracking;
+
+        // Check if slack tracking is enabled.
+        __ Branch(&no_inobject_slack_tracking,
+                  eq,
+                  a6,
+                  Operand(static_cast<int64_t>(JSFunction::kNoSlackTracking)));
+
+        // Allocate object with a slack.
+        __ lwu(a0, FieldMemOperand(a2, Map::kInstanceSizesOffset));
+        __ Ext(a0, a0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
+                kBitsPerByte);
+        __ dsll(at, a0, kPointerSizeLog2);
+        __ daddu(a0, t1, at);
+        // a0: offset of first field after pre-allocated fields
+        if (FLAG_debug_code) {
+          __ dsll(at, a3, kPointerSizeLog2);
+          __ Daddu(t2, t0, Operand(at));   // End of object.
+          __ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields,
+              a0, Operand(t2));
+        }
+        __ InitializeFieldsWithFiller(t1, a0, t3);
+        // To allow for truncation.
+        __ LoadRoot(t3, Heap::kOnePointerFillerMapRootIndex);
+        // Fill the remaining fields with one pointer filler map.
+
+        __ bind(&no_inobject_slack_tracking);
+      }
+
+      if (create_memento) {
+        __ Dsubu(a0, a3, Operand(AllocationMemento::kSize / kPointerSize));
+        __ dsll(a0, a0, kPointerSizeLog2);
+        __ Daddu(a0, t0, Operand(a0));  // End of object.
+        __ InitializeFieldsWithFiller(t1, a0, t3);
+
+        // Fill in memento fields.
+        // t1: points to the allocated but uninitialized memento.
+        __ LoadRoot(t3, Heap::kAllocationMementoMapRootIndex);
+        DCHECK_EQ(0 * kPointerSize, AllocationMemento::kMapOffset);
+        __ sd(t3, MemOperand(t1));
+        __ Daddu(t1, t1, kPointerSize);
+        // Load the AllocationSite.
+        __ ld(t3, MemOperand(sp, 2 * kPointerSize));
+        DCHECK_EQ(1 * kPointerSize, AllocationMemento::kAllocationSiteOffset);
+        __ sd(t3, MemOperand(t1));
+        __ Daddu(t1, t1, kPointerSize);
+      } else {
+        __ dsll(at, a3, kPointerSizeLog2);
+        __ Daddu(a0, t0, Operand(at));  // End of object.
+        __ InitializeFieldsWithFiller(t1, a0, t3);
+      }
+
+      // Add the object tag to make the JSObject real, so that we can continue
+      // and jump into the continuation code at any time from now on. Any
+      // failures need to undo the allocation, so that the heap is in a
+      // consistent state and verifiable.
+      __ Daddu(t0, t0, Operand(kHeapObjectTag));
+
+      // Check if a non-empty properties array is needed. Continue with
+      // allocated object if not fall through to runtime call if it is.
+      // a1: constructor function
+      // t0: JSObject
+      // t1: start of next object (not tagged)
+      __ lbu(a3, FieldMemOperand(a2, Map::kUnusedPropertyFieldsOffset));
+      // The field instance sizes contains both pre-allocated property fields
+      // and in-object properties.
+      __ lw(a0, FieldMemOperand(a2, Map::kInstanceSizesOffset));
+      __ Ext(t2, a0, Map::kPreAllocatedPropertyFieldsByte * kBitsPerByte,
+             kBitsPerByte);
+      __ Daddu(a3, a3, Operand(t2));
+      __ Ext(t2, a0, Map::kInObjectPropertiesByte * kBitsPerByte,
+              kBitsPerByte);
+      __ dsubu(a3, a3, t2);
+
+      // Done if no extra properties are to be allocated.
+      __ Branch(&allocated, eq, a3, Operand(zero_reg));
+      __ Assert(greater_equal, kPropertyAllocationCountFailed,
+          a3, Operand(zero_reg));
+
+      // Scale the number of elements by pointer size and add the header for
+      // FixedArrays to the start of the next object calculation from above.
+      // a1: constructor
+      // a3: number of elements in properties array
+      // t0: JSObject
+      // t1: start of next object
+      __ Daddu(a0, a3, Operand(FixedArray::kHeaderSize / kPointerSize));
+      __ Allocate(
+          a0,
+          t1,
+          t2,
+          a2,
+          &undo_allocation,
+          static_cast<AllocationFlags>(RESULT_CONTAINS_TOP | SIZE_IN_WORDS));
+
+      // Initialize the FixedArray.
+      // a1: constructor
+      // a3: number of elements in properties array (untagged)
+      // t0: JSObject
+      // t1: start of next object
+      __ LoadRoot(t2, Heap::kFixedArrayMapRootIndex);
+      __ mov(a2, t1);
+      __ sd(t2, MemOperand(a2, JSObject::kMapOffset));
+      // Tag number of elements.
+      __ dsll32(a0, a3, 0);
+      __ sd(a0, MemOperand(a2, FixedArray::kLengthOffset));
+      __ Daddu(a2, a2, Operand(2 * kPointerSize));
+
+      DCHECK_EQ(0 * kPointerSize, JSObject::kMapOffset);
+      DCHECK_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+
+      // Initialize the fields to undefined.
+      // a1: constructor
+      // a2: First element of FixedArray (not tagged)
+      // a3: number of elements in properties array
+      // t0: JSObject
+      // t1: FixedArray (not tagged)
+      __ dsll(a7, a3, kPointerSizeLog2);
+      __ daddu(t2, a2, a7);  // End of object.
+      DCHECK_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
+      { Label loop, entry;
+        if (!is_api_function || create_memento) {
+          __ LoadRoot(t3, Heap::kUndefinedValueRootIndex);
+        } else if (FLAG_debug_code) {
+          __ LoadRoot(a6, Heap::kUndefinedValueRootIndex);
+          __ Assert(eq, kUndefinedValueNotLoaded, t3, Operand(a6));
+        }
+        __ jmp(&entry);
+        __ bind(&loop);
+        __ sd(t3, MemOperand(a2));
+        __ daddiu(a2, a2, kPointerSize);
+        __ bind(&entry);
+        __ Branch(&loop, less, a2, Operand(t2));
+      }
+
+      // Store the initialized FixedArray into the properties field of
+      // the JSObject.
+      // a1: constructor function
+      // t0: JSObject
+      // t1: FixedArray (not tagged)
+      __ Daddu(t1, t1, Operand(kHeapObjectTag));  // Add the heap tag.
+      __ sd(t1, FieldMemOperand(t0, JSObject::kPropertiesOffset));
+
+      // Continue with JSObject being successfully allocated.
+      // a1: constructor function
+      // a4: JSObject
+      __ jmp(&allocated);
+
+      // Undo the setting of the new top so that the heap is verifiable. For
+      // example, the map's unused properties potentially do not match the
+      // allocated objects unused properties.
+      // t0: JSObject (previous new top)
+      __ bind(&undo_allocation);
+      __ UndoAllocationInNewSpace(t0, t1);
+    }
+
+    // Allocate the new receiver object using the runtime call.
+    // a1: constructor function
+    __ bind(&rt_call);
+    if (create_memento) {
+      // Get the cell or allocation site.
+      __ ld(a2, MemOperand(sp, 2 * kPointerSize));
+      __ push(a2);
+    }
+
+    __ push(a1);  // Argument for Runtime_NewObject.
+    if (create_memento) {
+      __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 2);
+    } else {
+      __ CallRuntime(Runtime::kNewObject, 1);
+    }
+    __ mov(t0, v0);
+
+    // If we ended up using the runtime, and we want a memento, then the
+    // runtime call made it for us, and we shouldn't do create count
+    // increment.
+    Label count_incremented;
+    if (create_memento) {
+      __ jmp(&count_incremented);
+    }
+
+    // Receiver for constructor call allocated.
+    // t0: JSObject
+    __ bind(&allocated);
+
+    if (create_memento) {
+      __ ld(a2, MemOperand(sp, kPointerSize * 2));
+      __ LoadRoot(t1, Heap::kUndefinedValueRootIndex);
+      __ Branch(&count_incremented, eq, a2, Operand(t1));
+      // a2 is an AllocationSite. We are creating a memento from it, so we
+      // need to increment the memento create count.
+      __ ld(a3, FieldMemOperand(a2,
+                                AllocationSite::kPretenureCreateCountOffset));
+      __ Daddu(a3, a3, Operand(Smi::FromInt(1)));
+      __ sd(a3, FieldMemOperand(a2,
+                                AllocationSite::kPretenureCreateCountOffset));
+      __ bind(&count_incremented);
+    }
+
+    __ Push(t0, t0);
+
+    // Reload the number of arguments from the stack.
+    // sp[0]: receiver
+    // sp[1]: receiver
+    // sp[2]: constructor function
+    // sp[3]: number of arguments (smi-tagged)
+    __ ld(a1, MemOperand(sp, 2 * kPointerSize));
+    __ ld(a3, MemOperand(sp, 3 * kPointerSize));
+
+    // Set up pointer to last argument.
+    __ Daddu(a2, fp, Operand(StandardFrameConstants::kCallerSPOffset));
+
+    // Set up number of arguments for function call below.
+    __ SmiUntag(a0, a3);
+
+    // Copy arguments and receiver to the expression stack.
+    // a0: number of arguments
+    // a1: constructor function
+    // a2: address of last argument (caller sp)
+    // a3: number of arguments (smi-tagged)
+    // sp[0]: receiver
+    // sp[1]: receiver
+    // sp[2]: constructor function
+    // sp[3]: number of arguments (smi-tagged)
+    Label loop, entry;
+    __ SmiUntag(a3);
+    __ jmp(&entry);
+    __ bind(&loop);
+    __ dsll(a4, a3, kPointerSizeLog2);
+    __ Daddu(a4, a2, Operand(a4));
+    __ ld(a5, MemOperand(a4));
+    __ push(a5);
+    __ bind(&entry);
+    __ Daddu(a3, a3, Operand(-1));
+    __ Branch(&loop, greater_equal, a3, Operand(zero_reg));
+
+    // Call the function.
+    // a0: number of arguments
+    // a1: constructor function
+    if (is_api_function) {
+      __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+      Handle<Code> code =
+          masm->isolate()->builtins()->HandleApiCallConstruct();
+      __ Call(code, RelocInfo::CODE_TARGET);
+    } else {
+      ParameterCount actual(a0);
+      __ InvokeFunction(a1, actual, CALL_FUNCTION, NullCallWrapper());
+    }
+
+    // Store offset of return address for deoptimizer.
+    if (!is_api_function) {
+      masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
+    }
+
+    // Restore context from the frame.
+    __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+    // If the result is an object (in the ECMA sense), we should get rid
+    // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+    // on page 74.
+    Label use_receiver, exit;
+
+    // If the result is a smi, it is *not* an object in the ECMA sense.
+    // v0: result
+    // sp[0]: receiver (newly allocated object)
+    // sp[1]: constructor function
+    // sp[2]: number of arguments (smi-tagged)
+    __ JumpIfSmi(v0, &use_receiver);
+
+    // If the type of the result (stored in its map) is less than
+    // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
+    __ GetObjectType(v0, a1, a3);
+    __ Branch(&exit, greater_equal, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+    // Throw away the result of the constructor invocation and use the
+    // on-stack receiver as the result.
+    __ bind(&use_receiver);
+    __ ld(v0, MemOperand(sp));
+
+    // Remove receiver from the stack, remove caller arguments, and
+    // return.
+    __ bind(&exit);
+    // v0: result
+    // sp[0]: receiver (newly allocated object)
+    // sp[1]: constructor function
+    // sp[2]: number of arguments (smi-tagged)
+    __ ld(a1, MemOperand(sp, 2 * kPointerSize));
+
+    // Leave construct frame.
+  }
+
+  __ SmiScale(a4, a1, kPointerSizeLog2);
+  __ Daddu(sp, sp, a4);
+  __ Daddu(sp, sp, kPointerSize);
+  __ IncrementCounter(isolate->counters()->constructed_objects(), 1, a1, a2);
+  __ Ret();
+}
+
+
+void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
+  Generate_JSConstructStubHelper(masm, false, FLAG_pretenuring_call_new);
+}
+
+
+void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
+  Generate_JSConstructStubHelper(masm, true, false);
+}
+
+
+static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
+                                             bool is_construct) {
+  // Called from JSEntryStub::GenerateBody
+
+  // ----------- S t a t e -------------
+  //  -- a0: code entry
+  //  -- a1: function
+  //  -- a2: receiver_pointer
+  //  -- a3: argc
+  //  -- s0: argv
+  // -----------------------------------
+  ProfileEntryHookStub::MaybeCallEntryHook(masm);
+  // Clear the context before we push it when entering the JS frame.
+  __ mov(cp, zero_reg);
+
+  // Enter an internal frame.
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+
+    // Set up the context from the function argument.
+    __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+
+    // Push the function and the receiver onto the stack.
+    __ Push(a1, a2);
+
+    // Copy arguments to the stack in a loop.
+    // a3: argc
+    // s0: argv, i.e. points to first arg
+    Label loop, entry;
+    // TODO(plind): At least on simulator, argc in a3 is an int32_t with junk
+    //    in upper bits. Should fix the root cause, rather than use below
+    //    workaround to clear upper bits.
+    __ dsll32(a3, a3, 0);  // int32_t -> int64_t.
+    __ dsrl32(a3, a3, 0);
+    __ dsll(a4, a3, kPointerSizeLog2);
+    __ daddu(a6, s0, a4);
+    __ b(&entry);
+    __ nop();   // Branch delay slot nop.
+    // a6 points past last arg.
+    __ bind(&loop);
+    __ ld(a4, MemOperand(s0));  // Read next parameter.
+    __ daddiu(s0, s0, kPointerSize);
+    __ ld(a4, MemOperand(a4));  // Dereference handle.
+    __ push(a4);  // Push parameter.
+    __ bind(&entry);
+    __ Branch(&loop, ne, s0, Operand(a6));
+
+    // Initialize all JavaScript callee-saved registers, since they will be seen
+    // by the garbage collector as part of handlers.
+    __ LoadRoot(a4, Heap::kUndefinedValueRootIndex);
+    __ mov(s1, a4);
+    __ mov(s2, a4);
+    __ mov(s3, a4);
+    __ mov(s4, a4);
+    __ mov(s5, a4);
+    // s6 holds the root address. Do not clobber.
+    // s7 is cp. Do not init.
+
+    // Invoke the code and pass argc as a0.
+    __ mov(a0, a3);
+    if (is_construct) {
+      // No type feedback cell is available
+      __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+      CallConstructStub stub(masm->isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
+      __ CallStub(&stub);
+    } else {
+      ParameterCount actual(a0);
+      __ InvokeFunction(a1, actual, CALL_FUNCTION, NullCallWrapper());
+    }
+
+    // Leave internal frame.
+  }
+  __ Jump(ra);
+}
+
+
+void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
+  Generate_JSEntryTrampolineHelper(masm, false);
+}
+
+
+void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
+  Generate_JSEntryTrampolineHelper(masm, true);
+}
+
+
+void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+  CallRuntimePassFunction(masm, Runtime::kCompileLazy);
+  GenerateTailCallToReturnedCode(masm);
+}
+
+
+static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
+  FrameScope scope(masm, StackFrame::INTERNAL);
+  // Push a copy of the function onto the stack.
+  // Push function as parameter to the runtime call.
+  __ Push(a1, a1);
+  // Whether to compile in a background thread.
+  __ Push(masm->isolate()->factory()->ToBoolean(concurrent));
+
+  __ CallRuntime(Runtime::kCompileOptimized, 2);
+  // Restore receiver.
+  __ Pop(a1);
+}
+
+
+void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
+  CallCompileOptimized(masm, false);
+  GenerateTailCallToReturnedCode(masm);
+}
+
+
+void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
+  CallCompileOptimized(masm, true);
+  GenerateTailCallToReturnedCode(masm);
+}
+
+
+static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
+  // For now, we are relying on the fact that make_code_young doesn't do any
+  // garbage collection which allows us to save/restore the registers without
+  // worrying about which of them contain pointers. We also don't build an
+  // internal frame to make the code faster, since we shouldn't have to do stack
+  // crawls in MakeCodeYoung. This seems a bit fragile.
+
+  // Set a0 to point to the head of the PlatformCodeAge sequence.
+  __ Dsubu(a0, a0,
+      Operand(kNoCodeAgeSequenceLength - Assembler::kInstrSize));
+
+  // The following registers must be saved and restored when calling through to
+  // the runtime:
+  //   a0 - contains return address (beginning of patch sequence)
+  //   a1 - isolate
+  RegList saved_regs =
+      (a0.bit() | a1.bit() | ra.bit() | fp.bit()) & ~sp.bit();
+  FrameScope scope(masm, StackFrame::MANUAL);
+  __ MultiPush(saved_regs);
+  __ PrepareCallCFunction(2, 0, a2);
+  __ li(a1, Operand(ExternalReference::isolate_address(masm->isolate())));
+  __ CallCFunction(
+      ExternalReference::get_make_code_young_function(masm->isolate()), 2);
+  __ MultiPop(saved_regs);
+  __ Jump(a0);
+}
+
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C)                 \
+void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking(  \
+    MacroAssembler* masm) {                                  \
+  GenerateMakeCodeYoungAgainCommon(masm);                    \
+}                                                            \
+void Builtins::Generate_Make##C##CodeYoungAgainOddMarking(   \
+    MacroAssembler* masm) {                                  \
+  GenerateMakeCodeYoungAgainCommon(masm);                    \
+}
+CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
+#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
+
+
+void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
+  // For now, as in GenerateMakeCodeYoungAgainCommon, we are relying on the fact
+  // that make_code_young doesn't do any garbage collection which allows us to
+  // save/restore the registers without worrying about which of them contain
+  // pointers.
+
+  // Set a0 to point to the head of the PlatformCodeAge sequence.
+  __ Dsubu(a0, a0,
+      Operand(kNoCodeAgeSequenceLength - Assembler::kInstrSize));
+
+  // The following registers must be saved and restored when calling through to
+  // the runtime:
+  //   a0 - contains return address (beginning of patch sequence)
+  //   a1 - isolate
+  RegList saved_regs =
+      (a0.bit() | a1.bit() | ra.bit() | fp.bit()) & ~sp.bit();
+  FrameScope scope(masm, StackFrame::MANUAL);
+  __ MultiPush(saved_regs);
+  __ PrepareCallCFunction(2, 0, a2);
+  __ li(a1, Operand(ExternalReference::isolate_address(masm->isolate())));
+  __ CallCFunction(
+      ExternalReference::get_mark_code_as_executed_function(masm->isolate()),
+      2);
+  __ MultiPop(saved_regs);
+
+  // Perform prologue operations usually performed by the young code stub.
+  __ Push(ra, fp, cp, a1);
+  __ Daddu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+
+  // Jump to point after the code-age stub.
+  __ Daddu(a0, a0, Operand((kNoCodeAgeSequenceLength)));
+  __ Jump(a0);
+}
+
+
+void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
+  GenerateMakeCodeYoungAgainCommon(masm);
+}
+
+
+static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
+                                             SaveFPRegsMode save_doubles) {
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+
+    // Preserve registers across notification, this is important for compiled
+    // stubs that tail call the runtime on deopts passing their parameters in
+    // registers.
+    __ MultiPush(kJSCallerSaved | kCalleeSaved);
+    // Pass the function and deoptimization type to the runtime system.
+    __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
+    __ MultiPop(kJSCallerSaved | kCalleeSaved);
+  }
+
+  __ Daddu(sp, sp, Operand(kPointerSize));  // Ignore state
+  __ Jump(ra);  // Jump to miss handler
+}
+
+
+void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
+  Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
+}
+
+
+void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
+  Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
+}
+
+
+static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
+                                             Deoptimizer::BailoutType type) {
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    // Pass the function and deoptimization type to the runtime system.
+    __ li(a0, Operand(Smi::FromInt(static_cast<int>(type))));
+    __ push(a0);
+    __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+  }
+
+  // Get the full codegen state from the stack and untag it -> a6.
+  __ ld(a6, MemOperand(sp, 0 * kPointerSize));
+  __ SmiUntag(a6);
+  // Switch on the state.
+  Label with_tos_register, unknown_state;
+  __ Branch(&with_tos_register,
+            ne, a6, Operand(FullCodeGenerator::NO_REGISTERS));
+  __ Ret(USE_DELAY_SLOT);
+  // Safe to fill delay slot Addu will emit one instruction.
+  __ Daddu(sp, sp, Operand(1 * kPointerSize));  // Remove state.
+
+  __ bind(&with_tos_register);
+  __ ld(v0, MemOperand(sp, 1 * kPointerSize));
+  __ Branch(&unknown_state, ne, a6, Operand(FullCodeGenerator::TOS_REG));
+
+  __ Ret(USE_DELAY_SLOT);
+  // Safe to fill delay slot Addu will emit one instruction.
+  __ Daddu(sp, sp, Operand(2 * kPointerSize));  // Remove state.
+
+  __ bind(&unknown_state);
+  __ stop("no cases left");
+}
+
+
+void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
+  Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
+}
+
+
+void Builtins::Generate_NotifySoftDeoptimized(MacroAssembler* masm) {
+  Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
+}
+
+
+void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
+  Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
+}
+
+
+void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
+  // Lookup the function in the JavaScript frame.
+  __ ld(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    // Pass function as argument.
+    __ push(a0);
+    __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+  }
+
+  // If the code object is null, just return to the unoptimized code.
+  __ Ret(eq, v0, Operand(Smi::FromInt(0)));
+
+  // Load deoptimization data from the code object.
+  // <deopt_data> = <code>[#deoptimization_data_offset]
+  __ Uld(a1, MemOperand(v0, Code::kDeoptimizationDataOffset - kHeapObjectTag));
+
+  // Load the OSR entrypoint offset from the deoptimization data.
+  // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
+  __ ld(a1, MemOperand(a1, FixedArray::OffsetOfElementAt(
+      DeoptimizationInputData::kOsrPcOffsetIndex) - kHeapObjectTag));
+  __ SmiUntag(a1);
+
+  // Compute the target address = code_obj + header_size + osr_offset
+  // <entry_addr> = <code_obj> + #header_size + <osr_offset>
+  __ daddu(v0, v0, a1);
+  __ daddiu(ra, v0, Code::kHeaderSize - kHeapObjectTag);
+
+  // And "return" to the OSR entry point of the function.
+  __ Ret();
+}
+
+
+void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
+  // We check the stack limit as indicator that recompilation might be done.
+  Label ok;
+  __ LoadRoot(at, Heap::kStackLimitRootIndex);
+  __ Branch(&ok, hs, sp, Operand(at));
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ CallRuntime(Runtime::kStackGuard, 0);
+  }
+  __ Jump(masm->isolate()->builtins()->OnStackReplacement(),
+          RelocInfo::CODE_TARGET);
+
+  __ bind(&ok);
+  __ Ret();
+}
+
+
+void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
+  // 1. Make sure we have at least one argument.
+  // a0: actual number of arguments
+  { Label done;
+    __ Branch(&done, ne, a0, Operand(zero_reg));
+    __ LoadRoot(a6, Heap::kUndefinedValueRootIndex);
+    __ push(a6);
+    __ Daddu(a0, a0, Operand(1));
+    __ bind(&done);
+  }
+
+  // 2. Get the function to call (passed as receiver) from the stack, check
+  //    if it is a function.
+  // a0: actual number of arguments
+  Label slow, non_function;
+  __ dsll(at, a0, kPointerSizeLog2);
+  __ daddu(at, sp, at);
+  __ ld(a1, MemOperand(at));
+  __ JumpIfSmi(a1, &non_function);
+  __ GetObjectType(a1, a2, a2);
+  __ Branch(&slow, ne, a2, Operand(JS_FUNCTION_TYPE));
+
+  // 3a. Patch the first argument if necessary when calling a function.
+  // a0: actual number of arguments
+  // a1: function
+  Label shift_arguments;
+  __ li(a4, Operand(0, RelocInfo::NONE32));  // Indicate regular JS_FUNCTION.
+  { Label convert_to_object, use_global_proxy, patch_receiver;
+    // Change context eagerly in case we need the global receiver.
+    __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+
+    // Do not transform the receiver for strict mode functions.
+    __ ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+    __ lbu(a3, FieldMemOperand(a2, SharedFunctionInfo::kStrictModeByteOffset));
+    __ And(a7, a3, Operand(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
+    __ Branch(&shift_arguments, ne, a7, Operand(zero_reg));
+
+    // Do not transform the receiver for native (Compilerhints already in a3).
+    __ lbu(a3, FieldMemOperand(a2, SharedFunctionInfo::kNativeByteOffset));
+    __ And(a7, a3, Operand(1 << SharedFunctionInfo::kNativeBitWithinByte));
+    __ Branch(&shift_arguments, ne, a7, Operand(zero_reg));
+
+    // Compute the receiver in sloppy mode.
+    // Load first argument in a2. a2 = -kPointerSize(sp + n_args << 2).
+    __ dsll(at, a0, kPointerSizeLog2);
+    __ daddu(a2, sp, at);
+    __ ld(a2, MemOperand(a2, -kPointerSize));
+    // a0: actual number of arguments
+    // a1: function
+    // a2: first argument
+    __ JumpIfSmi(a2, &convert_to_object, a6);
+
+    __ LoadRoot(a3, Heap::kUndefinedValueRootIndex);
+    __ Branch(&use_global_proxy, eq, a2, Operand(a3));
+    __ LoadRoot(a3, Heap::kNullValueRootIndex);
+    __ Branch(&use_global_proxy, eq, a2, Operand(a3));
+
+    STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+    __ GetObjectType(a2, a3, a3);
+    __ Branch(&shift_arguments, ge, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+    __ bind(&convert_to_object);
+    // Enter an internal frame in order to preserve argument count.
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ SmiTag(a0);
+      __ Push(a0, a2);
+      __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+      __ mov(a2, v0);
+
+      __ pop(a0);
+      __ SmiUntag(a0);
+      // Leave internal frame.
+    }
+    // Restore the function to a1, and the flag to a4.
+    __ dsll(at, a0, kPointerSizeLog2);
+    __ daddu(at, sp, at);
+    __ ld(a1, MemOperand(at));
+    __ Branch(USE_DELAY_SLOT, &patch_receiver);
+    __ li(a4, Operand(0, RelocInfo::NONE32));
+
+    __ bind(&use_global_proxy);
+    __ ld(a2, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
+    __ ld(a2, FieldMemOperand(a2, GlobalObject::kGlobalProxyOffset));
+
+    __ bind(&patch_receiver);
+    __ dsll(at, a0, kPointerSizeLog2);
+    __ daddu(a3, sp, at);
+    __ sd(a2, MemOperand(a3, -kPointerSize));
+
+    __ Branch(&shift_arguments);
+  }
+
+  // 3b. Check for function proxy.
+  __ bind(&slow);
+  __ li(a4, Operand(1, RelocInfo::NONE32));  // Indicate function proxy.
+  __ Branch(&shift_arguments, eq, a2, Operand(JS_FUNCTION_PROXY_TYPE));
+
+  __ bind(&non_function);
+  __ li(a4, Operand(2, RelocInfo::NONE32));  // Indicate non-function.
+
+  // 3c. Patch the first argument when calling a non-function.  The
+  //     CALL_NON_FUNCTION builtin expects the non-function callee as
+  //     receiver, so overwrite the first argument which will ultimately
+  //     become the receiver.
+  // a0: actual number of arguments
+  // a1: function
+  // a4: call type (0: JS function, 1: function proxy, 2: non-function)
+  __ dsll(at, a0, kPointerSizeLog2);
+  __ daddu(a2, sp, at);
+  __ sd(a1, MemOperand(a2, -kPointerSize));
+
+  // 4. Shift arguments and return address one slot down on the stack
+  //    (overwriting the original receiver).  Adjust argument count to make
+  //    the original first argument the new receiver.
+  // a0: actual number of arguments
+  // a1: function
+  // a4: call type (0: JS function, 1: function proxy, 2: non-function)
+  __ bind(&shift_arguments);
+  { Label loop;
+    // Calculate the copy start address (destination). Copy end address is sp.
+    __ dsll(at, a0, kPointerSizeLog2);
+    __ daddu(a2, sp, at);
+
+    __ bind(&loop);
+    __ ld(at, MemOperand(a2, -kPointerSize));
+    __ sd(at, MemOperand(a2));
+    __ Dsubu(a2, a2, Operand(kPointerSize));
+    __ Branch(&loop, ne, a2, Operand(sp));
+    // Adjust the actual number of arguments and remove the top element
+    // (which is a copy of the last argument).
+    __ Dsubu(a0, a0, Operand(1));
+    __ Pop();
+  }
+
+  // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin,
+  //     or a function proxy via CALL_FUNCTION_PROXY.
+  // a0: actual number of arguments
+  // a1: function
+  // a4: call type (0: JS function, 1: function proxy, 2: non-function)
+  { Label function, non_proxy;
+    __ Branch(&function, eq, a4, Operand(zero_reg));
+    // Expected number of arguments is 0 for CALL_NON_FUNCTION.
+    __ mov(a2, zero_reg);
+    __ Branch(&non_proxy, ne, a4, Operand(1));
+
+    __ push(a1);  // Re-add proxy object as additional argument.
+    __ Daddu(a0, a0, Operand(1));
+    __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY);
+    __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+            RelocInfo::CODE_TARGET);
+
+    __ bind(&non_proxy);
+    __ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION);
+    __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+            RelocInfo::CODE_TARGET);
+    __ bind(&function);
+  }
+
+  // 5b. Get the code to call from the function and check that the number of
+  //     expected arguments matches what we're providing.  If so, jump
+  //     (tail-call) to the code in register edx without checking arguments.
+  // a0: actual number of arguments
+  // a1: function
+  __ ld(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+  // The argument count is stored as int32_t on 64-bit platforms.
+  // TODO(plind): Smi on 32-bit platforms.
+  __ lw(a2,
+         FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
+  // Check formal and actual parameter counts.
+  __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+          RelocInfo::CODE_TARGET, ne, a2, Operand(a0));
+
+  __ ld(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+  ParameterCount expected(0);
+  __ InvokeCode(a3, expected, expected, JUMP_FUNCTION, NullCallWrapper());
+}
+
+
+void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
+  const int kIndexOffset    =
+      StandardFrameConstants::kExpressionsOffset - (2 * kPointerSize);
+  const int kLimitOffset    =
+      StandardFrameConstants::kExpressionsOffset - (1 * kPointerSize);
+  const int kArgsOffset     = 2 * kPointerSize;
+  const int kRecvOffset     = 3 * kPointerSize;
+  const int kFunctionOffset = 4 * kPointerSize;
+
+  {
+    FrameScope frame_scope(masm, StackFrame::INTERNAL);
+    __ ld(a0, MemOperand(fp, kFunctionOffset));  // Get the function.
+    __ push(a0);
+    __ ld(a0, MemOperand(fp, kArgsOffset));  // Get the args array.
+    __ push(a0);
+    // Returns (in v0) number of arguments to copy to stack as Smi.
+    __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+
+    // Check the stack for overflow. We are not trying to catch
+    // interruptions (e.g. debug break and preemption) here, so the "real stack
+    // limit" is checked.
+    Label okay;
+    __ LoadRoot(a2, Heap::kRealStackLimitRootIndex);
+    // Make a2 the space we have left. The stack might already be overflowed
+    // here which will cause a2 to become negative.
+    __ dsubu(a2, sp, a2);
+    // Check if the arguments will overflow the stack.
+    __ SmiScale(a7, v0, kPointerSizeLog2);
+    __ Branch(&okay, gt, a2, Operand(a7));  // Signed comparison.
+
+    // Out of stack space.
+    __ ld(a1, MemOperand(fp, kFunctionOffset));
+    __ Push(a1, v0);
+    __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+    // End of stack check.
+
+    // Push current limit and index.
+    __ bind(&okay);
+    __ mov(a1, zero_reg);
+    __ Push(v0, a1);  // Limit and initial index.
+
+    // Get the receiver.
+    __ ld(a0, MemOperand(fp, kRecvOffset));
+
+    // Check that the function is a JS function (otherwise it must be a proxy).
+    Label push_receiver;
+    __ ld(a1, MemOperand(fp, kFunctionOffset));
+    __ GetObjectType(a1, a2, a2);
+    __ Branch(&push_receiver, ne, a2, Operand(JS_FUNCTION_TYPE));
+
+    // Change context eagerly to get the right global object if necessary.
+    __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+    // Load the shared function info while the function is still in a1.
+    __ ld(a2, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+
+    // Compute the receiver.
+    // Do not transform the receiver for strict mode functions.
+    Label call_to_object, use_global_proxy;
+    __ lbu(a7, FieldMemOperand(a2, SharedFunctionInfo::kStrictModeByteOffset));
+    __ And(a7, a7, Operand(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
+    __ Branch(&push_receiver, ne, a7, Operand(zero_reg));
+
+    // Do not transform the receiver for native (Compilerhints already in a2).
+    __ lbu(a7, FieldMemOperand(a2, SharedFunctionInfo::kNativeByteOffset));
+    __ And(a7, a7, Operand(1 << SharedFunctionInfo::kNativeBitWithinByte));
+    __ Branch(&push_receiver, ne, a7, Operand(zero_reg));
+
+    // Compute the receiver in sloppy mode.
+    __ JumpIfSmi(a0, &call_to_object);
+    __ LoadRoot(a1, Heap::kNullValueRootIndex);
+    __ Branch(&use_global_proxy, eq, a0, Operand(a1));
+    __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+    __ Branch(&use_global_proxy, eq, a0, Operand(a2));
+
+    // Check if the receiver is already a JavaScript object.
+    // a0: receiver
+    STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+    __ GetObjectType(a0, a1, a1);
+    __ Branch(&push_receiver, ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+    // Convert the receiver to a regular object.
+    // a0: receiver
+    __ bind(&call_to_object);
+    __ push(a0);
+    __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+    __ mov(a0, v0);  // Put object in a0 to match other paths to push_receiver.
+    __ Branch(&push_receiver);
+
+    __ bind(&use_global_proxy);
+    __ ld(a0, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
+    __ ld(a0, FieldMemOperand(a0, GlobalObject::kGlobalProxyOffset));
+
+    // Push the receiver.
+    // a0: receiver
+    __ bind(&push_receiver);
+    __ push(a0);
+
+    // Copy all arguments from the array to the stack.
+    Label entry, loop;
+    __ ld(a0, MemOperand(fp, kIndexOffset));
+    __ Branch(&entry);
+
+    // Load the current argument from the arguments array and push it to the
+    // stack.
+    // a0: current argument index
+    __ bind(&loop);
+    __ ld(a1, MemOperand(fp, kArgsOffset));
+    __ Push(a1, a0);
+
+    // Call the runtime to access the property in the arguments array.
+    __ CallRuntime(Runtime::kGetProperty, 2);
+    __ push(v0);
+
+    // Use inline caching to access the arguments.
+    __ ld(a0, MemOperand(fp, kIndexOffset));
+    __ Daddu(a0, a0, Operand(Smi::FromInt(1)));
+    __ sd(a0, MemOperand(fp, kIndexOffset));
+
+    // Test if the copy loop has finished copying all the elements from the
+    // arguments object.
+    __ bind(&entry);
+    __ ld(a1, MemOperand(fp, kLimitOffset));
+    __ Branch(&loop, ne, a0, Operand(a1));
+
+    // Call the function.
+    Label call_proxy;
+    ParameterCount actual(a0);
+    __ SmiUntag(a0);
+    __ ld(a1, MemOperand(fp, kFunctionOffset));
+    __ GetObjectType(a1, a2, a2);
+    __ Branch(&call_proxy, ne, a2, Operand(JS_FUNCTION_TYPE));
+
+    __ InvokeFunction(a1, actual, CALL_FUNCTION, NullCallWrapper());
+
+    frame_scope.GenerateLeaveFrame();
+    __ Ret(USE_DELAY_SLOT);
+    __ Daddu(sp, sp, Operand(3 * kPointerSize));  // In delay slot.
+
+    // Call the function proxy.
+    __ bind(&call_proxy);
+    __ push(a1);  // Add function proxy as last argument.
+    __ Daddu(a0, a0, Operand(1));
+    __ li(a2, Operand(0, RelocInfo::NONE32));
+    __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY);
+    __ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+            RelocInfo::CODE_TARGET);
+    // Tear down the internal frame and remove function, receiver and args.
+  }
+
+  __ Ret(USE_DELAY_SLOT);
+  __ Daddu(sp, sp, Operand(3 * kPointerSize));  // In delay slot.
+}
+
+
+static void ArgumentAdaptorStackCheck(MacroAssembler* masm,
+                                      Label* stack_overflow) {
+  // ----------- S t a t e -------------
+  //  -- a0 : actual number of arguments
+  //  -- a1 : function (passed through to callee)
+  //  -- a2 : expected number of arguments
+  // -----------------------------------
+  // Check the stack for overflow. We are not trying to catch
+  // interruptions (e.g. debug break and preemption) here, so the "real stack
+  // limit" is checked.
+  __ LoadRoot(a5, Heap::kRealStackLimitRootIndex);
+  // Make a5 the space we have left. The stack might already be overflowed
+  // here which will cause a5 to become negative.
+  __ dsubu(a5, sp, a5);
+  // Check if the arguments will overflow the stack.
+  __ dsll(at, a2, kPointerSizeLog2);
+  // Signed comparison.
+  __ Branch(stack_overflow, le, a5, Operand(at));
+}
+
+
+static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
+  // __ sll(a0, a0, kSmiTagSize);
+  __ dsll32(a0, a0, 0);
+  __ li(a4, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ MultiPush(a0.bit() | a1.bit() | a4.bit() | fp.bit() | ra.bit());
+  __ Daddu(fp, sp,
+      Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
+}
+
+
+static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- v0 : result being passed through
+  // -----------------------------------
+  // Get the number of arguments passed (as a smi), tear down the frame and
+  // then tear down the parameters.
+  __ ld(a1, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp +
+                             kPointerSize)));
+  __ mov(sp, fp);
+  __ MultiPop(fp.bit() | ra.bit());
+  __ SmiScale(a4, a1, kPointerSizeLog2);
+  __ Daddu(sp, sp, a4);
+  // Adjust for the receiver.
+  __ Daddu(sp, sp, Operand(kPointerSize));
+}
+
+
+void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
+  // State setup as expected by MacroAssembler::InvokePrologue.
+  // ----------- S t a t e -------------
+  //  -- a0: actual arguments count
+  //  -- a1: function (passed through to callee)
+  //  -- a2: expected arguments count
+  // -----------------------------------
+
+  Label stack_overflow;
+  ArgumentAdaptorStackCheck(masm, &stack_overflow);
+  Label invoke, dont_adapt_arguments;
+
+  Label enough, too_few;
+  __ ld(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+  __ Branch(&dont_adapt_arguments, eq,
+      a2, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
+  // We use Uless as the number of argument should always be greater than 0.
+  __ Branch(&too_few, Uless, a0, Operand(a2));
+
+  {  // Enough parameters: actual >= expected.
+    // a0: actual number of arguments as a smi
+    // a1: function
+    // a2: expected number of arguments
+    // a3: code entry to call
+    __ bind(&enough);
+    EnterArgumentsAdaptorFrame(masm);
+
+    // Calculate copy start address into a0 and copy end address into a2.
+    __ SmiScale(a0, a0, kPointerSizeLog2);
+    __ Daddu(a0, fp, a0);
+    // Adjust for return address and receiver.
+    __ Daddu(a0, a0, Operand(2 * kPointerSize));
+    // Compute copy end address.
+    __ dsll(a2, a2, kPointerSizeLog2);
+    __ dsubu(a2, a0, a2);
+
+    // Copy the arguments (including the receiver) to the new stack frame.
+    // a0: copy start address
+    // a1: function
+    // a2: copy end address
+    // a3: code entry to call
+
+    Label copy;
+    __ bind(&copy);
+    __ ld(a4, MemOperand(a0));
+    __ push(a4);
+    __ Branch(USE_DELAY_SLOT, &copy, ne, a0, Operand(a2));
+    __ daddiu(a0, a0, -kPointerSize);  // In delay slot.
+
+    __ jmp(&invoke);
+  }
+
+  {  // Too few parameters: Actual < expected.
+    __ bind(&too_few);
+    EnterArgumentsAdaptorFrame(masm);
+
+    // Calculate copy start address into a0 and copy end address is fp.
+    // a0: actual number of arguments as a smi
+    // a1: function
+    // a2: expected number of arguments
+    // a3: code entry to call
+    __ SmiScale(a0, a0, kPointerSizeLog2);
+    __ Daddu(a0, fp, a0);
+    // Adjust for return address and receiver.
+    __ Daddu(a0, a0, Operand(2 * kPointerSize));
+    // Compute copy end address. Also adjust for return address.
+    __ Daddu(a7, fp, kPointerSize);
+
+    // Copy the arguments (including the receiver) to the new stack frame.
+    // a0: copy start address
+    // a1: function
+    // a2: expected number of arguments
+    // a3: code entry to call
+    // a7: copy end address
+    Label copy;
+    __ bind(&copy);
+    __ ld(a4, MemOperand(a0));  // Adjusted above for return addr and receiver.
+    __ Dsubu(sp, sp, kPointerSize);
+    __ Dsubu(a0, a0, kPointerSize);
+    __ Branch(USE_DELAY_SLOT, &copy, ne, a0, Operand(a7));
+    __ sd(a4, MemOperand(sp));  // In the delay slot.
+
+    // Fill the remaining expected arguments with undefined.
+    // a1: function
+    // a2: expected number of arguments
+    // a3: code entry to call
+    __ LoadRoot(a4, Heap::kUndefinedValueRootIndex);
+    __ dsll(a6, a2, kPointerSizeLog2);
+    __ Dsubu(a2, fp, Operand(a6));
+    // Adjust for frame.
+    __ Dsubu(a2, a2, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
+                            2 * kPointerSize));
+
+    Label fill;
+    __ bind(&fill);
+    __ Dsubu(sp, sp, kPointerSize);
+    __ Branch(USE_DELAY_SLOT, &fill, ne, sp, Operand(a2));
+    __ sd(a4, MemOperand(sp));
+  }
+
+  // Call the entry point.
+  __ bind(&invoke);
+
+  __ Call(a3);
+
+  // Store offset of return address for deoptimizer.
+  masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
+
+  // Exit frame and return.
+  LeaveArgumentsAdaptorFrame(masm);
+  __ Ret();
+
+
+  // -------------------------------------------
+  // Don't adapt arguments.
+  // -------------------------------------------
+  __ bind(&dont_adapt_arguments);
+  __ Jump(a3);
+
+  __ bind(&stack_overflow);
+  {
+    FrameScope frame(masm, StackFrame::MANUAL);
+    EnterArgumentsAdaptorFrame(masm);
+    __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+    __ break_(0xCC);
+  }
+}
+
+
+#undef __
+
+} }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_MIPS64
diff --git a/src/mips64/code-stubs-mips64.cc b/src/mips64/code-stubs-mips64.cc
new file mode 100644
index 0000000..60263b5
--- /dev/null
+++ b/src/mips64/code-stubs-mips64.cc
@@ -0,0 +1,4932 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_MIPS64
+
+#include "src/bootstrapper.h"
+#include "src/code-stubs.h"
+#include "src/codegen.h"
+#include "src/ic/handler-compiler.h"
+#include "src/ic/ic.h"
+#include "src/isolate.h"
+#include "src/jsregexp.h"
+#include "src/regexp-macro-assembler.h"
+#include "src/runtime.h"
+
+namespace v8 {
+namespace internal {
+
+
+static void InitializeArrayConstructorDescriptor(
+    Isolate* isolate, CodeStubDescriptor* descriptor,
+    int constant_stack_parameter_count) {
+  Address deopt_handler = Runtime::FunctionForId(
+      Runtime::kArrayConstructor)->entry;
+
+  if (constant_stack_parameter_count == 0) {
+    descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
+                           JS_FUNCTION_STUB_MODE);
+  } else {
+    descriptor->Initialize(a0, deopt_handler, constant_stack_parameter_count,
+                           JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
+  }
+}
+
+
+static void InitializeInternalArrayConstructorDescriptor(
+    Isolate* isolate, CodeStubDescriptor* descriptor,
+    int constant_stack_parameter_count) {
+  Address deopt_handler = Runtime::FunctionForId(
+      Runtime::kInternalArrayConstructor)->entry;
+
+  if (constant_stack_parameter_count == 0) {
+    descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
+                           JS_FUNCTION_STUB_MODE);
+  } else {
+    descriptor->Initialize(a0, deopt_handler, constant_stack_parameter_count,
+                           JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
+  }
+}
+
+
+void ArrayNoArgumentConstructorStub::InitializeDescriptor(
+    CodeStubDescriptor* descriptor) {
+  InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
+}
+
+
+void ArraySingleArgumentConstructorStub::InitializeDescriptor(
+    CodeStubDescriptor* descriptor) {
+  InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
+}
+
+
+void ArrayNArgumentsConstructorStub::InitializeDescriptor(
+    CodeStubDescriptor* descriptor) {
+  InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
+}
+
+
+void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
+    CodeStubDescriptor* descriptor) {
+  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
+}
+
+
+void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
+    CodeStubDescriptor* descriptor) {
+  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
+}
+
+
+void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
+    CodeStubDescriptor* descriptor) {
+  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
+}
+
+
+#define __ ACCESS_MASM(masm)
+
+
+static void EmitIdenticalObjectComparison(MacroAssembler* masm,
+                                          Label* slow,
+                                          Condition cc);
+static void EmitSmiNonsmiComparison(MacroAssembler* masm,
+                                    Register lhs,
+                                    Register rhs,
+                                    Label* rhs_not_nan,
+                                    Label* slow,
+                                    bool strict);
+static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
+                                           Register lhs,
+                                           Register rhs);
+
+
+void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
+                                               ExternalReference miss) {
+  // Update the static counter each time a new code stub is generated.
+  isolate()->counters()->code_stubs()->Increment();
+
+  CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
+  int param_count = descriptor.GetEnvironmentParameterCount();
+  {
+    // Call the runtime system in a fresh internal frame.
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    DCHECK((param_count == 0) ||
+           a0.is(descriptor.GetEnvironmentParameterRegister(param_count - 1)));
+    // Push arguments, adjust sp.
+    __ Dsubu(sp, sp, Operand(param_count * kPointerSize));
+    for (int i = 0; i < param_count; ++i) {
+      // Store argument to stack.
+      __ sd(descriptor.GetEnvironmentParameterRegister(i),
+            MemOperand(sp, (param_count - 1 - i) * kPointerSize));
+    }
+    __ CallExternalReference(miss, param_count);
+  }
+
+  __ Ret();
+}
+
+
+void DoubleToIStub::Generate(MacroAssembler* masm) {
+  Label out_of_range, only_low, negate, done;
+  Register input_reg = source();
+  Register result_reg = destination();
+
+  int double_offset = offset();
+  // Account for saved regs if input is sp.
+  if (input_reg.is(sp)) double_offset += 3 * kPointerSize;
+
+  Register scratch =
+      GetRegisterThatIsNotOneOf(input_reg, result_reg);
+  Register scratch2 =
+      GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
+  Register scratch3 =
+      GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch2);
+  DoubleRegister double_scratch = kLithiumScratchDouble;
+
+  __ Push(scratch, scratch2, scratch3);
+  if (!skip_fastpath()) {
+    // Load double input.
+    __ ldc1(double_scratch, MemOperand(input_reg, double_offset));
+
+    // Clear cumulative exception flags and save the FCSR.
+    __ cfc1(scratch2, FCSR);
+    __ ctc1(zero_reg, FCSR);
+
+    // Try a conversion to a signed integer.
+    __ Trunc_w_d(double_scratch, double_scratch);
+    // Move the converted value into the result register.
+    __ mfc1(scratch3, double_scratch);
+
+    // Retrieve and restore the FCSR.
+    __ cfc1(scratch, FCSR);
+    __ ctc1(scratch2, FCSR);
+
+    // Check for overflow and NaNs.
+    __ And(
+        scratch, scratch,
+        kFCSROverflowFlagMask | kFCSRUnderflowFlagMask
+           | kFCSRInvalidOpFlagMask);
+    // If we had no exceptions then set result_reg and we are done.
+    Label error;
+    __ Branch(&error, ne, scratch, Operand(zero_reg));
+    __ Move(result_reg, scratch3);
+    __ Branch(&done);
+    __ bind(&error);
+  }
+
+  // Load the double value and perform a manual truncation.
+  Register input_high = scratch2;
+  Register input_low = scratch3;
+
+  __ lw(input_low, MemOperand(input_reg, double_offset));
+  __ lw(input_high, MemOperand(input_reg, double_offset + kIntSize));
+
+  Label normal_exponent, restore_sign;
+  // Extract the biased exponent in result.
+  __ Ext(result_reg,
+         input_high,
+         HeapNumber::kExponentShift,
+         HeapNumber::kExponentBits);
+
+  // Check for Infinity and NaNs, which should return 0.
+  __ Subu(scratch, result_reg, HeapNumber::kExponentMask);
+  __ Movz(result_reg, zero_reg, scratch);
+  __ Branch(&done, eq, scratch, Operand(zero_reg));
+
+  // Express exponent as delta to (number of mantissa bits + 31).
+  __ Subu(result_reg,
+          result_reg,
+          Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits + 31));
+
+  // If the delta is strictly positive, all bits would be shifted away,
+  // which means that we can return 0.
+  __ Branch(&normal_exponent, le, result_reg, Operand(zero_reg));
+  __ mov(result_reg, zero_reg);
+  __ Branch(&done);
+
+  __ bind(&normal_exponent);
+  const int kShiftBase = HeapNumber::kNonMantissaBitsInTopWord - 1;
+  // Calculate shift.
+  __ Addu(scratch, result_reg, Operand(kShiftBase + HeapNumber::kMantissaBits));
+
+  // Save the sign.
+  Register sign = result_reg;
+  result_reg = no_reg;
+  __ And(sign, input_high, Operand(HeapNumber::kSignMask));
+
+  // On ARM shifts > 31 bits are valid and will result in zero. On MIPS we need
+  // to check for this specific case.
+  Label high_shift_needed, high_shift_done;
+  __ Branch(&high_shift_needed, lt, scratch, Operand(32));
+  __ mov(input_high, zero_reg);
+  __ Branch(&high_shift_done);
+  __ bind(&high_shift_needed);
+
+  // Set the implicit 1 before the mantissa part in input_high.
+  __ Or(input_high,
+        input_high,
+        Operand(1 << HeapNumber::kMantissaBitsInTopWord));
+  // Shift the mantissa bits to the correct position.
+  // We don't need to clear non-mantissa bits as they will be shifted away.
+  // If they weren't, it would mean that the answer is in the 32bit range.
+  __ sllv(input_high, input_high, scratch);
+
+  __ bind(&high_shift_done);
+
+  // Replace the shifted bits with bits from the lower mantissa word.
+  Label pos_shift, shift_done;
+  __ li(at, 32);
+  __ subu(scratch, at, scratch);
+  __ Branch(&pos_shift, ge, scratch, Operand(zero_reg));
+
+  // Negate scratch.
+  __ Subu(scratch, zero_reg, scratch);
+  __ sllv(input_low, input_low, scratch);
+  __ Branch(&shift_done);
+
+  __ bind(&pos_shift);
+  __ srlv(input_low, input_low, scratch);
+
+  __ bind(&shift_done);
+  __ Or(input_high, input_high, Operand(input_low));
+  // Restore sign if necessary.
+  __ mov(scratch, sign);
+  result_reg = sign;
+  sign = no_reg;
+  __ Subu(result_reg, zero_reg, input_high);
+  __ Movz(result_reg, input_high, scratch);
+
+  __ bind(&done);
+
+  __ Pop(scratch, scratch2, scratch3);
+  __ Ret();
+}
+
+
+void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(
+    Isolate* isolate) {
+  WriteInt32ToHeapNumberStub stub1(isolate, a1, v0, a2, a3);
+  WriteInt32ToHeapNumberStub stub2(isolate, a2, v0, a3, a0);
+  stub1.GetCode();
+  stub2.GetCode();
+}
+
+
+// See comment for class, this does NOT work for int32's that are in Smi range.
+void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
+  Label max_negative_int;
+  // the_int_ has the answer which is a signed int32 but not a Smi.
+  // We test for the special value that has a different exponent.
+  STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
+  // Test sign, and save for later conditionals.
+  __ And(sign(), the_int(), Operand(0x80000000u));
+  __ Branch(&max_negative_int, eq, the_int(), Operand(0x80000000u));
+
+  // Set up the correct exponent in scratch_.  All non-Smi int32s have the same.
+  // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
+  uint32_t non_smi_exponent =
+      (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
+  __ li(scratch(), Operand(non_smi_exponent));
+  // Set the sign bit in scratch_ if the value was negative.
+  __ or_(scratch(), scratch(), sign());
+  // Subtract from 0 if the value was negative.
+  __ subu(at, zero_reg, the_int());
+  __ Movn(the_int(), at, sign());
+  // We should be masking the implict first digit of the mantissa away here,
+  // but it just ends up combining harmlessly with the last digit of the
+  // exponent that happens to be 1.  The sign bit is 0 so we shift 10 to get
+  // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
+  DCHECK(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
+  const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
+  __ srl(at, the_int(), shift_distance);
+  __ or_(scratch(), scratch(), at);
+  __ sw(scratch(), FieldMemOperand(the_heap_number(),
+                                   HeapNumber::kExponentOffset));
+  __ sll(scratch(), the_int(), 32 - shift_distance);
+  __ Ret(USE_DELAY_SLOT);
+  __ sw(scratch(), FieldMemOperand(the_heap_number(),
+                                   HeapNumber::kMantissaOffset));
+
+  __ bind(&max_negative_int);
+  // The max negative int32 is stored as a positive number in the mantissa of
+  // a double because it uses a sign bit instead of using two's complement.
+  // The actual mantissa bits stored are all 0 because the implicit most
+  // significant 1 bit is not stored.
+  non_smi_exponent += 1 << HeapNumber::kExponentShift;
+  __ li(scratch(), Operand(HeapNumber::kSignMask | non_smi_exponent));
+  __ sw(scratch(),
+        FieldMemOperand(the_heap_number(), HeapNumber::kExponentOffset));
+  __ mov(scratch(), zero_reg);
+  __ Ret(USE_DELAY_SLOT);
+  __ sw(scratch(),
+        FieldMemOperand(the_heap_number(), HeapNumber::kMantissaOffset));
+}
+
+
+// Handle the case where the lhs and rhs are the same object.
+// Equality is almost reflexive (everything but NaN), so this is a test
+// for "identity and not NaN".
+static void EmitIdenticalObjectComparison(MacroAssembler* masm,
+                                          Label* slow,
+                                          Condition cc) {
+  Label not_identical;
+  Label heap_number, return_equal;
+  Register exp_mask_reg = t1;
+
+  __ Branch(&not_identical, ne, a0, Operand(a1));
+
+  __ li(exp_mask_reg, Operand(HeapNumber::kExponentMask));
+
+  // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
+  // so we do the second best thing - test it ourselves.
+  // They are both equal and they are not both Smis so both of them are not
+  // Smis. If it's not a heap number, then return equal.
+  if (cc == less || cc == greater) {
+    __ GetObjectType(a0, t0, t0);
+    __ Branch(slow, greater, t0, Operand(FIRST_SPEC_OBJECT_TYPE));
+  } else {
+    __ GetObjectType(a0, t0, t0);
+    __ Branch(&heap_number, eq, t0, Operand(HEAP_NUMBER_TYPE));
+    // Comparing JS objects with <=, >= is complicated.
+    if (cc != eq) {
+    __ Branch(slow, greater, t0, Operand(FIRST_SPEC_OBJECT_TYPE));
+      // Normally here we fall through to return_equal, but undefined is
+      // special: (undefined == undefined) == true, but
+      // (undefined <= undefined) == false!  See ECMAScript 11.8.5.
+      if (cc == less_equal || cc == greater_equal) {
+        __ Branch(&return_equal, ne, t0, Operand(ODDBALL_TYPE));
+        __ LoadRoot(a6, Heap::kUndefinedValueRootIndex);
+        __ Branch(&return_equal, ne, a0, Operand(a6));
+        DCHECK(is_int16(GREATER) && is_int16(LESS));
+        __ Ret(USE_DELAY_SLOT);
+        if (cc == le) {
+          // undefined <= undefined should fail.
+          __ li(v0, Operand(GREATER));
+        } else  {
+          // undefined >= undefined should fail.
+          __ li(v0, Operand(LESS));
+        }
+      }
+    }
+  }
+
+  __ bind(&return_equal);
+  DCHECK(is_int16(GREATER) && is_int16(LESS));
+  __ Ret(USE_DELAY_SLOT);
+  if (cc == less) {
+    __ li(v0, Operand(GREATER));  // Things aren't less than themselves.
+  } else if (cc == greater) {
+    __ li(v0, Operand(LESS));     // Things aren't greater than themselves.
+  } else {
+    __ mov(v0, zero_reg);         // Things are <=, >=, ==, === themselves.
+  }
+  // For less and greater we don't have to check for NaN since the result of
+  // x < x is false regardless.  For the others here is some code to check
+  // for NaN.
+  if (cc != lt && cc != gt) {
+    __ bind(&heap_number);
+    // It is a heap number, so return non-equal if it's NaN and equal if it's
+    // not NaN.
+
+    // The representation of NaN values has all exponent bits (52..62) set,
+    // and not all mantissa bits (0..51) clear.
+    // Read top bits of double representation (second word of value).
+    __ lwu(a6, FieldMemOperand(a0, HeapNumber::kExponentOffset));
+    // Test that exponent bits are all set.
+    __ And(a7, a6, Operand(exp_mask_reg));
+    // If all bits not set (ne cond), then not a NaN, objects are equal.
+    __ Branch(&return_equal, ne, a7, Operand(exp_mask_reg));
+
+    // Shift out flag and all exponent bits, retaining only mantissa.
+    __ sll(a6, a6, HeapNumber::kNonMantissaBitsInTopWord);
+    // Or with all low-bits of mantissa.
+    __ lwu(a7, FieldMemOperand(a0, HeapNumber::kMantissaOffset));
+    __ Or(v0, a7, Operand(a6));
+    // For equal we already have the right value in v0:  Return zero (equal)
+    // if all bits in mantissa are zero (it's an Infinity) and non-zero if
+    // not (it's a NaN).  For <= and >= we need to load v0 with the failing
+    // value if it's a NaN.
+    if (cc != eq) {
+      // All-zero means Infinity means equal.
+      __ Ret(eq, v0, Operand(zero_reg));
+      DCHECK(is_int16(GREATER) && is_int16(LESS));
+      __ Ret(USE_DELAY_SLOT);
+      if (cc == le) {
+        __ li(v0, Operand(GREATER));  // NaN <= NaN should fail.
+      } else {
+        __ li(v0, Operand(LESS));     // NaN >= NaN should fail.
+      }
+    }
+  }
+  // No fall through here.
+
+  __ bind(&not_identical);
+}
+
+
+static void EmitSmiNonsmiComparison(MacroAssembler* masm,
+                                    Register lhs,
+                                    Register rhs,
+                                    Label* both_loaded_as_doubles,
+                                    Label* slow,
+                                    bool strict) {
+  DCHECK((lhs.is(a0) && rhs.is(a1)) ||
+         (lhs.is(a1) && rhs.is(a0)));
+
+  Label lhs_is_smi;
+  __ JumpIfSmi(lhs, &lhs_is_smi);
+  // Rhs is a Smi.
+  // Check whether the non-smi is a heap number.
+  __ GetObjectType(lhs, t0, t0);
+  if (strict) {
+    // If lhs was not a number and rhs was a Smi then strict equality cannot
+    // succeed. Return non-equal (lhs is already not zero).
+    __ Ret(USE_DELAY_SLOT, ne, t0, Operand(HEAP_NUMBER_TYPE));
+    __ mov(v0, lhs);
+  } else {
+    // Smi compared non-strictly with a non-Smi non-heap-number. Call
+    // the runtime.
+    __ Branch(slow, ne, t0, Operand(HEAP_NUMBER_TYPE));
+  }
+  // Rhs is a smi, lhs is a number.
+  // Convert smi rhs to double.
+  __ SmiUntag(at, rhs);
+  __ mtc1(at, f14);
+  __ cvt_d_w(f14, f14);
+  __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
+
+  // We now have both loaded as doubles.
+  __ jmp(both_loaded_as_doubles);
+
+  __ bind(&lhs_is_smi);
+  // Lhs is a Smi.  Check whether the non-smi is a heap number.
+  __ GetObjectType(rhs, t0, t0);
+  if (strict) {
+    // If lhs was not a number and rhs was a Smi then strict equality cannot
+    // succeed. Return non-equal.
+    __ Ret(USE_DELAY_SLOT, ne, t0, Operand(HEAP_NUMBER_TYPE));
+    __ li(v0, Operand(1));
+  } else {
+    // Smi compared non-strictly with a non-Smi non-heap-number. Call
+    // the runtime.
+    __ Branch(slow, ne, t0, Operand(HEAP_NUMBER_TYPE));
+  }
+
+  // Lhs is a smi, rhs is a number.
+  // Convert smi lhs to double.
+  __ SmiUntag(at, lhs);
+  __ mtc1(at, f12);
+  __ cvt_d_w(f12, f12);
+  __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+  // Fall through to both_loaded_as_doubles.
+}
+
+
+static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
+                                           Register lhs,
+                                           Register rhs) {
+    // If either operand is a JS object or an oddball value, then they are
+    // not equal since their pointers are different.
+    // There is no test for undetectability in strict equality.
+    STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
+    Label first_non_object;
+    // Get the type of the first operand into a2 and compare it with
+    // FIRST_SPEC_OBJECT_TYPE.
+    __ GetObjectType(lhs, a2, a2);
+    __ Branch(&first_non_object, less, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+    // Return non-zero.
+    Label return_not_equal;
+    __ bind(&return_not_equal);
+    __ Ret(USE_DELAY_SLOT);
+    __ li(v0, Operand(1));
+
+    __ bind(&first_non_object);
+    // Check for oddballs: true, false, null, undefined.
+    __ Branch(&return_not_equal, eq, a2, Operand(ODDBALL_TYPE));
+
+    __ GetObjectType(rhs, a3, a3);
+    __ Branch(&return_not_equal, greater, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+    // Check for oddballs: true, false, null, undefined.
+    __ Branch(&return_not_equal, eq, a3, Operand(ODDBALL_TYPE));
+
+    // Now that we have the types we might as well check for
+    // internalized-internalized.
+    STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
+    __ Or(a2, a2, Operand(a3));
+    __ And(at, a2, Operand(kIsNotStringMask | kIsNotInternalizedMask));
+    __ Branch(&return_not_equal, eq, at, Operand(zero_reg));
+}
+
+
+static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm,
+                                       Register lhs,
+                                       Register rhs,
+                                       Label* both_loaded_as_doubles,
+                                       Label* not_heap_numbers,
+                                       Label* slow) {
+  __ GetObjectType(lhs, a3, a2);
+  __ Branch(not_heap_numbers, ne, a2, Operand(HEAP_NUMBER_TYPE));
+  __ ld(a2, FieldMemOperand(rhs, HeapObject::kMapOffset));
+  // If first was a heap number & second wasn't, go to slow case.
+  __ Branch(slow, ne, a3, Operand(a2));
+
+  // Both are heap numbers. Load them up then jump to the code we have
+  // for that.
+  __ ldc1(f12, FieldMemOperand(lhs, HeapNumber::kValueOffset));
+  __ ldc1(f14, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+
+  __ jmp(both_loaded_as_doubles);
+}
+
+
+// Fast negative check for internalized-to-internalized equality.
+static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
+                                                     Register lhs,
+                                                     Register rhs,
+                                                     Label* possible_strings,
+                                                     Label* not_both_strings) {
+  DCHECK((lhs.is(a0) && rhs.is(a1)) ||
+         (lhs.is(a1) && rhs.is(a0)));
+
+  // a2 is object type of rhs.
+  Label object_test;
+  STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
+  __ And(at, a2, Operand(kIsNotStringMask));
+  __ Branch(&object_test, ne, at, Operand(zero_reg));
+  __ And(at, a2, Operand(kIsNotInternalizedMask));
+  __ Branch(possible_strings, ne, at, Operand(zero_reg));
+  __ GetObjectType(rhs, a3, a3);
+  __ Branch(not_both_strings, ge, a3, Operand(FIRST_NONSTRING_TYPE));
+  __ And(at, a3, Operand(kIsNotInternalizedMask));
+  __ Branch(possible_strings, ne, at, Operand(zero_reg));
+
+  // Both are internalized strings. We already checked they weren't the same
+  // pointer so they are not equal.
+  __ Ret(USE_DELAY_SLOT);
+  __ li(v0, Operand(1));   // Non-zero indicates not equal.
+
+  __ bind(&object_test);
+  __ Branch(not_both_strings, lt, a2, Operand(FIRST_SPEC_OBJECT_TYPE));
+  __ GetObjectType(rhs, a2, a3);
+  __ Branch(not_both_strings, lt, a3, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+  // If both objects are undetectable, they are equal.  Otherwise, they
+  // are not equal, since they are different objects and an object is not
+  // equal to undefined.
+  __ ld(a3, FieldMemOperand(lhs, HeapObject::kMapOffset));
+  __ lbu(a2, FieldMemOperand(a2, Map::kBitFieldOffset));
+  __ lbu(a3, FieldMemOperand(a3, Map::kBitFieldOffset));
+  __ and_(a0, a2, a3);
+  __ And(a0, a0, Operand(1 << Map::kIsUndetectable));
+  __ Ret(USE_DELAY_SLOT);
+  __ xori(v0, a0, 1 << Map::kIsUndetectable);
+}
+
+
+static void CompareICStub_CheckInputType(MacroAssembler* masm, Register input,
+                                         Register scratch,
+                                         CompareICState::State expected,
+                                         Label* fail) {
+  Label ok;
+  if (expected == CompareICState::SMI) {
+    __ JumpIfNotSmi(input, fail);
+  } else if (expected == CompareICState::NUMBER) {
+    __ JumpIfSmi(input, &ok);
+    __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
+                DONT_DO_SMI_CHECK);
+  }
+  // We could be strict about internalized/string here, but as long as
+  // hydrogen doesn't care, the stub doesn't have to care either.
+  __ bind(&ok);
+}
+
+
+// On entry a1 and a2 are the values to be compared.
+// On exit a0 is 0, positive or negative to indicate the result of
+// the comparison.
+void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
+  Register lhs = a1;
+  Register rhs = a0;
+  Condition cc = GetCondition();
+
+  Label miss;
+  CompareICStub_CheckInputType(masm, lhs, a2, left(), &miss);
+  CompareICStub_CheckInputType(masm, rhs, a3, right(), &miss);
+
+  Label slow;  // Call builtin.
+  Label not_smis, both_loaded_as_doubles;
+
+  Label not_two_smis, smi_done;
+  __ Or(a2, a1, a0);
+  __ JumpIfNotSmi(a2, &not_two_smis);
+  __ SmiUntag(a1);
+  __ SmiUntag(a0);
+
+  __ Ret(USE_DELAY_SLOT);
+  __ dsubu(v0, a1, a0);
+  __ bind(&not_two_smis);
+
+  // NOTICE! This code is only reached after a smi-fast-case check, so
+  // it is certain that at least one operand isn't a smi.
+
+  // Handle the case where the objects are identical.  Either returns the answer
+  // or goes to slow.  Only falls through if the objects were not identical.
+  EmitIdenticalObjectComparison(masm, &slow, cc);
+
+  // If either is a Smi (we know that not both are), then they can only
+  // be strictly equal if the other is a HeapNumber.
+  STATIC_ASSERT(kSmiTag == 0);
+  DCHECK_EQ(0, Smi::FromInt(0));
+  __ And(a6, lhs, Operand(rhs));
+  __ JumpIfNotSmi(a6, &not_smis, a4);
+  // One operand is a smi. EmitSmiNonsmiComparison generates code that can:
+  // 1) Return the answer.
+  // 2) Go to slow.
+  // 3) Fall through to both_loaded_as_doubles.
+  // 4) Jump to rhs_not_nan.
+  // In cases 3 and 4 we have found out we were dealing with a number-number
+  // comparison and the numbers have been loaded into f12 and f14 as doubles,
+  // or in GP registers (a0, a1, a2, a3) depending on the presence of the FPU.
+  EmitSmiNonsmiComparison(masm, lhs, rhs,
+                          &both_loaded_as_doubles, &slow, strict());
+
+  __ bind(&both_loaded_as_doubles);
+  // f12, f14 are the double representations of the left hand side
+  // and the right hand side if we have FPU. Otherwise a2, a3 represent
+  // left hand side and a0, a1 represent right hand side.
+
+  Label nan;
+  __ li(a4, Operand(LESS));
+  __ li(a5, Operand(GREATER));
+  __ li(a6, Operand(EQUAL));
+
+  // Check if either rhs or lhs is NaN.
+  __ BranchF(NULL, &nan, eq, f12, f14);
+
+  // Check if LESS condition is satisfied. If true, move conditionally
+  // result to v0.
+  if (kArchVariant != kMips64r6) {
+    __ c(OLT, D, f12, f14);
+    __ Movt(v0, a4);
+    // Use previous check to store conditionally to v0 oposite condition
+    // (GREATER). If rhs is equal to lhs, this will be corrected in next
+    // check.
+    __ Movf(v0, a5);
+    // Check if EQUAL condition is satisfied. If true, move conditionally
+    // result to v0.
+    __ c(EQ, D, f12, f14);
+    __ Movt(v0, a6);
+  } else {
+    Label skip;
+    __ BranchF(USE_DELAY_SLOT, &skip, NULL, lt, f12, f14);
+    __ mov(v0, a4);  // Return LESS as result.
+
+    __ BranchF(USE_DELAY_SLOT, &skip, NULL, eq, f12, f14);
+    __ mov(v0, a6);  // Return EQUAL as result.
+
+    __ mov(v0, a5);  // Return GREATER as result.
+    __ bind(&skip);
+  }
+  __ Ret();
+
+  __ bind(&nan);
+  // NaN comparisons always fail.
+  // Load whatever we need in v0 to make the comparison fail.
+  DCHECK(is_int16(GREATER) && is_int16(LESS));
+  __ Ret(USE_DELAY_SLOT);
+  if (cc == lt || cc == le) {
+    __ li(v0, Operand(GREATER));
+  } else {
+    __ li(v0, Operand(LESS));
+  }
+
+
+  __ bind(&not_smis);
+  // At this point we know we are dealing with two different objects,
+  // and neither of them is a Smi. The objects are in lhs_ and rhs_.
+  if (strict()) {
+    // This returns non-equal for some object types, or falls through if it
+    // was not lucky.
+    EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
+  }
+
+  Label check_for_internalized_strings;
+  Label flat_string_check;
+  // Check for heap-number-heap-number comparison. Can jump to slow case,
+  // or load both doubles and jump to the code that handles
+  // that case. If the inputs are not doubles then jumps to
+  // check_for_internalized_strings.
+  // In this case a2 will contain the type of lhs_.
+  EmitCheckForTwoHeapNumbers(masm,
+                             lhs,
+                             rhs,
+                             &both_loaded_as_doubles,
+                             &check_for_internalized_strings,
+                             &flat_string_check);
+
+  __ bind(&check_for_internalized_strings);
+  if (cc == eq && !strict()) {
+    // Returns an answer for two internalized strings or two
+    // detectable objects.
+    // Otherwise jumps to string case or not both strings case.
+    // Assumes that a2 is the type of lhs_ on entry.
+    EmitCheckForInternalizedStringsOrObjects(
+        masm, lhs, rhs, &flat_string_check, &slow);
+  }
+
+  // Check for both being sequential one-byte strings,
+  // and inline if that is the case.
+  __ bind(&flat_string_check);
+
+  __ JumpIfNonSmisNotBothSequentialOneByteStrings(lhs, rhs, a2, a3, &slow);
+
+  __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, a2,
+                      a3);
+  if (cc == eq) {
+    StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, a2, a3, a4);
+  } else {
+    StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, a2, a3, a4,
+                                                    a5);
+  }
+  // Never falls through to here.
+
+  __ bind(&slow);
+  // Prepare for call to builtin. Push object pointers, a0 (lhs) first,
+  // a1 (rhs) second.
+  __ Push(lhs, rhs);
+  // Figure out which native to call and setup the arguments.
+  Builtins::JavaScript native;
+  if (cc == eq) {
+    native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+  } else {
+    native = Builtins::COMPARE;
+    int ncr;  // NaN compare result.
+    if (cc == lt || cc == le) {
+      ncr = GREATER;
+    } else {
+      DCHECK(cc == gt || cc == ge);  // Remaining cases.
+      ncr = LESS;
+    }
+    __ li(a0, Operand(Smi::FromInt(ncr)));
+    __ push(a0);
+  }
+
+  // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
+  // tagged as a small integer.
+  __ InvokeBuiltin(native, JUMP_FUNCTION);
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+
+void StoreRegistersStateStub::Generate(MacroAssembler* masm) {
+  __ mov(t9, ra);
+  __ pop(ra);
+  __ PushSafepointRegisters();
+  __ Jump(t9);
+}
+
+
+void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
+  __ mov(t9, ra);
+  __ pop(ra);
+  __ PopSafepointRegisters();
+  __ Jump(t9);
+}
+
+
+void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
+  // We don't allow a GC during a store buffer overflow so there is no need to
+  // store the registers in any particular way, but we do have to store and
+  // restore them.
+  __ MultiPush(kJSCallerSaved | ra.bit());
+  if (save_doubles()) {
+    __ MultiPushFPU(kCallerSavedFPU);
+  }
+  const int argument_count = 1;
+  const int fp_argument_count = 0;
+  const Register scratch = a1;
+
+  AllowExternalCallThatCantCauseGC scope(masm);
+  __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
+  __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
+  __ CallCFunction(
+      ExternalReference::store_buffer_overflow_function(isolate()),
+      argument_count);
+  if (save_doubles()) {
+    __ MultiPopFPU(kCallerSavedFPU);
+  }
+
+  __ MultiPop(kJSCallerSaved | ra.bit());
+  __ Ret();
+}
+
+
+void MathPowStub::Generate(MacroAssembler* masm) {
+  const Register base = a1;
+  const Register exponent = MathPowTaggedDescriptor::exponent();
+  DCHECK(exponent.is(a2));
+  const Register heapnumbermap = a5;
+  const Register heapnumber = v0;
+  const DoubleRegister double_base = f2;
+  const DoubleRegister double_exponent = f4;
+  const DoubleRegister double_result = f0;
+  const DoubleRegister double_scratch = f6;
+  const FPURegister single_scratch = f8;
+  const Register scratch = t1;
+  const Register scratch2 = a7;
+
+  Label call_runtime, done, int_exponent;
+  if (exponent_type() == ON_STACK) {
+    Label base_is_smi, unpack_exponent;
+    // The exponent and base are supplied as arguments on the stack.
+    // This can only happen if the stub is called from non-optimized code.
+    // Load input parameters from stack to double registers.
+    __ ld(base, MemOperand(sp, 1 * kPointerSize));
+    __ ld(exponent, MemOperand(sp, 0 * kPointerSize));
+
+    __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
+
+    __ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
+    __ ld(scratch, FieldMemOperand(base, JSObject::kMapOffset));
+    __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
+
+    __ ldc1(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
+    __ jmp(&unpack_exponent);
+
+    __ bind(&base_is_smi);
+    __ mtc1(scratch, single_scratch);
+    __ cvt_d_w(double_base, single_scratch);
+    __ bind(&unpack_exponent);
+
+    __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
+
+    __ ld(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
+    __ Branch(&call_runtime, ne, scratch, Operand(heapnumbermap));
+    __ ldc1(double_exponent,
+            FieldMemOperand(exponent, HeapNumber::kValueOffset));
+  } else if (exponent_type() == TAGGED) {
+    // Base is already in double_base.
+    __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
+
+    __ ldc1(double_exponent,
+            FieldMemOperand(exponent, HeapNumber::kValueOffset));
+  }
+
+  if (exponent_type() != INTEGER) {
+    Label int_exponent_convert;
+    // Detect integer exponents stored as double.
+    __ EmitFPUTruncate(kRoundToMinusInf,
+                       scratch,
+                       double_exponent,
+                       at,
+                       double_scratch,
+                       scratch2,
+                       kCheckForInexactConversion);
+    // scratch2 == 0 means there was no conversion error.
+    __ Branch(&int_exponent_convert, eq, scratch2, Operand(zero_reg));
+
+    if (exponent_type() == ON_STACK) {
+      // Detect square root case.  Crankshaft detects constant +/-0.5 at
+      // compile time and uses DoMathPowHalf instead.  We then skip this check
+      // for non-constant cases of +/-0.5 as these hardly occur.
+      Label not_plus_half;
+
+      // Test for 0.5.
+      __ Move(double_scratch, 0.5);
+      __ BranchF(USE_DELAY_SLOT,
+                 &not_plus_half,
+                 NULL,
+                 ne,
+                 double_exponent,
+                 double_scratch);
+      // double_scratch can be overwritten in the delay slot.
+      // Calculates square root of base.  Check for the special case of
+      // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
+      __ Move(double_scratch, -V8_INFINITY);
+      __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
+      __ neg_d(double_result, double_scratch);
+
+      // Add +0 to convert -0 to +0.
+      __ add_d(double_scratch, double_base, kDoubleRegZero);
+      __ sqrt_d(double_result, double_scratch);
+      __ jmp(&done);
+
+      __ bind(&not_plus_half);
+      __ Move(double_scratch, -0.5);
+      __ BranchF(USE_DELAY_SLOT,
+                 &call_runtime,
+                 NULL,
+                 ne,
+                 double_exponent,
+                 double_scratch);
+      // double_scratch can be overwritten in the delay slot.
+      // Calculates square root of base.  Check for the special case of
+      // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
+      __ Move(double_scratch, -V8_INFINITY);
+      __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
+      __ Move(double_result, kDoubleRegZero);
+
+      // Add +0 to convert -0 to +0.
+      __ add_d(double_scratch, double_base, kDoubleRegZero);
+      __ Move(double_result, 1);
+      __ sqrt_d(double_scratch, double_scratch);
+      __ div_d(double_result, double_result, double_scratch);
+      __ jmp(&done);
+    }
+
+    __ push(ra);
+    {
+      AllowExternalCallThatCantCauseGC scope(masm);
+      __ PrepareCallCFunction(0, 2, scratch2);
+      __ MovToFloatParameters(double_base, double_exponent);
+      __ CallCFunction(
+          ExternalReference::power_double_double_function(isolate()),
+          0, 2);
+    }
+    __ pop(ra);
+    __ MovFromFloatResult(double_result);
+    __ jmp(&done);
+
+    __ bind(&int_exponent_convert);
+  }
+
+  // Calculate power with integer exponent.
+  __ bind(&int_exponent);
+
+  // Get two copies of exponent in the registers scratch and exponent.
+  if (exponent_type() == INTEGER) {
+    __ mov(scratch, exponent);
+  } else {
+    // Exponent has previously been stored into scratch as untagged integer.
+    __ mov(exponent, scratch);
+  }
+
+  __ mov_d(double_scratch, double_base);  // Back up base.
+  __ Move(double_result, 1.0);
+
+  // Get absolute value of exponent.
+  Label positive_exponent;
+  __ Branch(&positive_exponent, ge, scratch, Operand(zero_reg));
+  __ Dsubu(scratch, zero_reg, scratch);
+  __ bind(&positive_exponent);
+
+  Label while_true, no_carry, loop_end;
+  __ bind(&while_true);
+
+  __ And(scratch2, scratch, 1);
+
+  __ Branch(&no_carry, eq, scratch2, Operand(zero_reg));
+  __ mul_d(double_result, double_result, double_scratch);
+  __ bind(&no_carry);
+
+  __ dsra(scratch, scratch, 1);
+
+  __ Branch(&loop_end, eq, scratch, Operand(zero_reg));
+  __ mul_d(double_scratch, double_scratch, double_scratch);
+
+  __ Branch(&while_true);
+
+  __ bind(&loop_end);
+
+  __ Branch(&done, ge, exponent, Operand(zero_reg));
+  __ Move(double_scratch, 1.0);
+  __ div_d(double_result, double_scratch, double_result);
+  // Test whether result is zero.  Bail out to check for subnormal result.
+  // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
+  __ BranchF(&done, NULL, ne, double_result, kDoubleRegZero);
+
+  // double_exponent may not contain the exponent value if the input was a
+  // smi.  We set it with exponent value before bailing out.
+  __ mtc1(exponent, single_scratch);
+  __ cvt_d_w(double_exponent, single_scratch);
+
+  // Returning or bailing out.
+  Counters* counters = isolate()->counters();
+  if (exponent_type() == ON_STACK) {
+    // The arguments are still on the stack.
+    __ bind(&call_runtime);
+    __ TailCallRuntime(Runtime::kMathPowRT, 2, 1);
+
+    // The stub is called from non-optimized code, which expects the result
+    // as heap number in exponent.
+    __ bind(&done);
+    __ AllocateHeapNumber(
+        heapnumber, scratch, scratch2, heapnumbermap, &call_runtime);
+    __ sdc1(double_result,
+            FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
+    DCHECK(heapnumber.is(v0));
+    __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
+    __ DropAndRet(2);
+  } else {
+    __ push(ra);
+    {
+      AllowExternalCallThatCantCauseGC scope(masm);
+      __ PrepareCallCFunction(0, 2, scratch);
+      __ MovToFloatParameters(double_base, double_exponent);
+      __ CallCFunction(
+          ExternalReference::power_double_double_function(isolate()),
+          0, 2);
+    }
+    __ pop(ra);
+    __ MovFromFloatResult(double_result);
+
+    __ bind(&done);
+    __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
+    __ Ret();
+  }
+}
+
+
+bool CEntryStub::NeedsImmovableCode() {
+  return true;
+}
+
+
+void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
+  CEntryStub::GenerateAheadOfTime(isolate);
+  WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate);
+  StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
+  StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
+  ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
+  CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
+  BinaryOpICStub::GenerateAheadOfTime(isolate);
+  StoreRegistersStateStub::GenerateAheadOfTime(isolate);
+  RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
+  BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
+}
+
+
+void StoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
+  StoreRegistersStateStub stub(isolate);
+  stub.GetCode();
+}
+
+
+void RestoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
+  RestoreRegistersStateStub stub(isolate);
+  stub.GetCode();
+}
+
+
+void CodeStub::GenerateFPStubs(Isolate* isolate) {
+  // Generate if not already in cache.
+  SaveFPRegsMode mode = kSaveFPRegs;
+  CEntryStub(isolate, 1, mode).GetCode();
+  StoreBufferOverflowStub(isolate, mode).GetCode();
+  isolate->set_fp_stubs_generated(true);
+}
+
+
+void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
+  CEntryStub stub(isolate, 1, kDontSaveFPRegs);
+  stub.GetCode();
+}
+
+
+void CEntryStub::Generate(MacroAssembler* masm) {
+  // Called from JavaScript; parameters are on stack as if calling JS function
+  // s0: number of arguments including receiver
+  // s1: size of arguments excluding receiver
+  // s2: pointer to builtin function
+  // fp: frame pointer    (restored after C call)
+  // sp: stack pointer    (restored as callee's sp after C call)
+  // cp: current context  (C callee-saved)
+
+  ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
+  // NOTE: s0-s2 hold the arguments of this function instead of a0-a2.
+  // The reason for this is that these arguments would need to be saved anyway
+  // so it's faster to set them up directly.
+  // See MacroAssembler::PrepareCEntryArgs and PrepareCEntryFunction.
+
+  // Compute the argv pointer in a callee-saved register.
+  __ Daddu(s1, sp, s1);
+
+  // Enter the exit frame that transitions from JavaScript to C++.
+  FrameScope scope(masm, StackFrame::MANUAL);
+  __ EnterExitFrame(save_doubles());
+
+  // s0: number of arguments  including receiver (C callee-saved)
+  // s1: pointer to first argument (C callee-saved)
+  // s2: pointer to builtin function (C callee-saved)
+
+  // Prepare arguments for C routine.
+  // a0 = argc
+  __ mov(a0, s0);
+  // a1 = argv (set in the delay slot after find_ra below).
+
+  // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
+  // also need to reserve the 4 argument slots on the stack.
+
+  __ AssertStackIsAligned();
+
+  __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
+
+  // To let the GC traverse the return address of the exit frames, we need to
+  // know where the return address is. The CEntryStub is unmovable, so
+  // we can store the address on the stack to be able to find it again and
+  // we never have to restore it, because it will not change.
+  { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
+    // This branch-and-link sequence is needed to find the current PC on mips,
+    // saved to the ra register.
+    // Use masm-> here instead of the double-underscore macro since extra
+    // coverage code can interfere with the proper calculation of ra.
+    Label find_ra;
+    masm->bal(&find_ra);  // bal exposes branch delay slot.
+    masm->mov(a1, s1);
+    masm->bind(&find_ra);
+
+    // Adjust the value in ra to point to the correct return location, 2nd
+    // instruction past the real call into C code (the jalr(t9)), and push it.
+    // This is the return address of the exit frame.
+    const int kNumInstructionsToJump = 5;
+    masm->Daddu(ra, ra, kNumInstructionsToJump * kInt32Size);
+    masm->sd(ra, MemOperand(sp));  // This spot was reserved in EnterExitFrame.
+    // Stack space reservation moved to the branch delay slot below.
+    // Stack is still aligned.
+
+    // Call the C routine.
+    masm->mov(t9, s2);  // Function pointer to t9 to conform to ABI for PIC.
+    masm->jalr(t9);
+    // Set up sp in the delay slot.
+    masm->daddiu(sp, sp, -kCArgsSlotsSize);
+    // Make sure the stored 'ra' points to this position.
+    DCHECK_EQ(kNumInstructionsToJump,
+              masm->InstructionsGeneratedSince(&find_ra));
+  }
+
+  // Runtime functions should not return 'the hole'.  Allowing it to escape may
+  // lead to crashes in the IC code later.
+  if (FLAG_debug_code) {
+    Label okay;
+    __ LoadRoot(a4, Heap::kTheHoleValueRootIndex);
+    __ Branch(&okay, ne, v0, Operand(a4));
+    __ stop("The hole escaped");
+    __ bind(&okay);
+  }
+
+  // Check result for exception sentinel.
+  Label exception_returned;
+  __ LoadRoot(a4, Heap::kExceptionRootIndex);
+  __ Branch(&exception_returned, eq, a4, Operand(v0));
+
+  ExternalReference pending_exception_address(
+      Isolate::kPendingExceptionAddress, isolate());
+
+  // Check that there is no pending exception, otherwise we
+  // should have returned the exception sentinel.
+  if (FLAG_debug_code) {
+    Label okay;
+    __ li(a2, Operand(pending_exception_address));
+    __ ld(a2, MemOperand(a2));
+    __ LoadRoot(a4, Heap::kTheHoleValueRootIndex);
+    // Cannot use check here as it attempts to generate call into runtime.
+    __ Branch(&okay, eq, a4, Operand(a2));
+    __ stop("Unexpected pending exception");
+    __ bind(&okay);
+  }
+
+  // Exit C frame and return.
+  // v0:v1: result
+  // sp: stack pointer
+  // fp: frame pointer
+  // s0: still holds argc (callee-saved).
+  __ LeaveExitFrame(save_doubles(), s0, true, EMIT_RETURN);
+
+  // Handling of exception.
+  __ bind(&exception_returned);
+
+  // Retrieve the pending exception.
+  __ li(a2, Operand(pending_exception_address));
+  __ ld(v0, MemOperand(a2));
+
+  // Clear the pending exception.
+  __ li(a3, Operand(isolate()->factory()->the_hole_value()));
+  __ sd(a3, MemOperand(a2));
+
+  // Special handling of termination exceptions which are uncatchable
+  // by javascript code.
+  Label throw_termination_exception;
+  __ LoadRoot(a4, Heap::kTerminationExceptionRootIndex);
+  __ Branch(&throw_termination_exception, eq, v0, Operand(a4));
+
+  // Handle normal exception.
+  __ Throw(v0);
+
+  __ bind(&throw_termination_exception);
+  __ ThrowUncatchable(v0);
+}
+
+
+void JSEntryStub::Generate(MacroAssembler* masm) {
+  Label invoke, handler_entry, exit;
+  Isolate* isolate = masm->isolate();
+
+  // TODO(plind): unify the ABI description here.
+  // Registers:
+  // a0: entry address
+  // a1: function
+  // a2: receiver
+  // a3: argc
+  // a4 (a4): on mips64
+
+  // Stack:
+  // 0 arg slots on mips64 (4 args slots on mips)
+  // args -- in a4/a4 on mips64, on stack on mips
+
+  ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
+  // Save callee saved registers on the stack.
+  __ MultiPush(kCalleeSaved | ra.bit());
+
+  // Save callee-saved FPU registers.
+  __ MultiPushFPU(kCalleeSavedFPU);
+  // Set up the reserved register for 0.0.
+  __ Move(kDoubleRegZero, 0.0);
+
+  // Load argv in s0 register.
+  if (kMipsAbi == kN64) {
+    __ mov(s0, a4);  // 5th parameter in mips64 a4 (a4) register.
+  } else {  // Abi O32.
+    // 5th parameter on stack for O32 abi.
+    int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize;
+    offset_to_argv += kNumCalleeSavedFPU * kDoubleSize;
+    __ ld(s0, MemOperand(sp, offset_to_argv + kCArgsSlotsSize));
+  }
+
+  __ InitializeRootRegister();
+
+  // We build an EntryFrame.
+  __ li(a7, Operand(-1));  // Push a bad frame pointer to fail if it is used.
+  int marker = type();
+  __ li(a6, Operand(Smi::FromInt(marker)));
+  __ li(a5, Operand(Smi::FromInt(marker)));
+  ExternalReference c_entry_fp(Isolate::kCEntryFPAddress, isolate);
+  __ li(a4, Operand(c_entry_fp));
+  __ ld(a4, MemOperand(a4));
+  __ Push(a7, a6, a5, a4);
+  // Set up frame pointer for the frame to be pushed.
+  __ daddiu(fp, sp, -EntryFrameConstants::kCallerFPOffset);
+
+  // Registers:
+  // a0: entry_address
+  // a1: function
+  // a2: receiver_pointer
+  // a3: argc
+  // s0: argv
+  //
+  // Stack:
+  // caller fp          |
+  // function slot      | entry frame
+  // context slot       |
+  // bad fp (0xff...f)  |
+  // callee saved registers + ra
+  // [ O32: 4 args slots]
+  // args
+
+  // If this is the outermost JS call, set js_entry_sp value.
+  Label non_outermost_js;
+  ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate);
+  __ li(a5, Operand(ExternalReference(js_entry_sp)));
+  __ ld(a6, MemOperand(a5));
+  __ Branch(&non_outermost_js, ne, a6, Operand(zero_reg));
+  __ sd(fp, MemOperand(a5));
+  __ li(a4, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
+  Label cont;
+  __ b(&cont);
+  __ nop();   // Branch delay slot nop.
+  __ bind(&non_outermost_js);
+  __ li(a4, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)));
+  __ bind(&cont);
+  __ push(a4);
+
+  // Jump to a faked try block that does the invoke, with a faked catch
+  // block that sets the pending exception.
+  __ jmp(&invoke);
+  __ bind(&handler_entry);
+  handler_offset_ = handler_entry.pos();
+  // Caught exception: Store result (exception) in the pending exception
+  // field in the JSEnv and return a failure sentinel.  Coming in here the
+  // fp will be invalid because the PushTryHandler below sets it to 0 to
+  // signal the existence of the JSEntry frame.
+  __ li(a4, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
+                                      isolate)));
+  __ sd(v0, MemOperand(a4));  // We come back from 'invoke'. result is in v0.
+  __ LoadRoot(v0, Heap::kExceptionRootIndex);
+  __ b(&exit);  // b exposes branch delay slot.
+  __ nop();   // Branch delay slot nop.
+
+  // Invoke: Link this frame into the handler chain.  There's only one
+  // handler block in this code object, so its index is 0.
+  __ bind(&invoke);
+  __ PushTryHandler(StackHandler::JS_ENTRY, 0);
+  // If an exception not caught by another handler occurs, this handler
+  // returns control to the code after the bal(&invoke) above, which
+  // restores all kCalleeSaved registers (including cp and fp) to their
+  // saved values before returning a failure to C.
+
+  // Clear any pending exceptions.
+  __ LoadRoot(a5, Heap::kTheHoleValueRootIndex);
+  __ li(a4, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
+                                      isolate)));
+  __ sd(a5, MemOperand(a4));
+
+  // Invoke the function by calling through JS entry trampoline builtin.
+  // Notice that we cannot store a reference to the trampoline code directly in
+  // this stub, because runtime stubs are not traversed when doing GC.
+
+  // Registers:
+  // a0: entry_address
+  // a1: function
+  // a2: receiver_pointer
+  // a3: argc
+  // s0: argv
+  //
+  // Stack:
+  // handler frame
+  // entry frame
+  // callee saved registers + ra
+  // [ O32: 4 args slots]
+  // args
+
+  if (type() == StackFrame::ENTRY_CONSTRUCT) {
+    ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
+                                      isolate);
+    __ li(a4, Operand(construct_entry));
+  } else {
+    ExternalReference entry(Builtins::kJSEntryTrampoline, masm->isolate());
+    __ li(a4, Operand(entry));
+  }
+  __ ld(t9, MemOperand(a4));  // Deref address.
+  // Call JSEntryTrampoline.
+  __ daddiu(t9, t9, Code::kHeaderSize - kHeapObjectTag);
+  __ Call(t9);
+
+  // Unlink this frame from the handler chain.
+  __ PopTryHandler();
+
+  __ bind(&exit);  // v0 holds result
+  // Check if the current stack frame is marked as the outermost JS frame.
+  Label non_outermost_js_2;
+  __ pop(a5);
+  __ Branch(&non_outermost_js_2,
+            ne,
+            a5,
+            Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)));
+  __ li(a5, Operand(ExternalReference(js_entry_sp)));
+  __ sd(zero_reg, MemOperand(a5));
+  __ bind(&non_outermost_js_2);
+
+  // Restore the top frame descriptors from the stack.
+  __ pop(a5);
+  __ li(a4, Operand(ExternalReference(Isolate::kCEntryFPAddress,
+                                      isolate)));
+  __ sd(a5, MemOperand(a4));
+
+  // Reset the stack to the callee saved registers.
+  __ daddiu(sp, sp, -EntryFrameConstants::kCallerFPOffset);
+
+  // Restore callee-saved fpu registers.
+  __ MultiPopFPU(kCalleeSavedFPU);
+
+  // Restore callee saved registers from the stack.
+  __ MultiPop(kCalleeSaved | ra.bit());
+  // Return.
+  __ Jump(ra);
+}
+
+
+// Uses registers a0 to a4.
+// Expected input (depending on whether args are in registers or on the stack):
+// * object: a0 or at sp + 1 * kPointerSize.
+// * function: a1 or at sp.
+//
+// An inlined call site may have been generated before calling this stub.
+// In this case the offset to the inline site to patch is passed on the stack,
+// in the safepoint slot for register a4.
+void InstanceofStub::Generate(MacroAssembler* masm) {
+  // Call site inlining and patching implies arguments in registers.
+  DCHECK(HasArgsInRegisters() || !HasCallSiteInlineCheck());
+  // ReturnTrueFalse is only implemented for inlined call sites.
+  DCHECK(!ReturnTrueFalseObject() || HasCallSiteInlineCheck());
+
+  // Fixed register usage throughout the stub:
+  const Register object = a0;  // Object (lhs).
+  Register map = a3;  // Map of the object.
+  const Register function = a1;  // Function (rhs).
+  const Register prototype = a4;  // Prototype of the function.
+  const Register inline_site = t1;
+  const Register scratch = a2;
+
+  const int32_t kDeltaToLoadBoolResult = 7 * Assembler::kInstrSize;
+
+  Label slow, loop, is_instance, is_not_instance, not_js_object;
+
+  if (!HasArgsInRegisters()) {
+    __ ld(object, MemOperand(sp, 1 * kPointerSize));
+    __ ld(function, MemOperand(sp, 0));
+  }
+
+  // Check that the left hand is a JS object and load map.
+  __ JumpIfSmi(object, &not_js_object);
+  __ IsObjectJSObjectType(object, map, scratch, &not_js_object);
+
+  // If there is a call site cache don't look in the global cache, but do the
+  // real lookup and update the call site cache.
+  if (!HasCallSiteInlineCheck()) {
+    Label miss;
+    __ LoadRoot(at, Heap::kInstanceofCacheFunctionRootIndex);
+    __ Branch(&miss, ne, function, Operand(at));
+    __ LoadRoot(at, Heap::kInstanceofCacheMapRootIndex);
+    __ Branch(&miss, ne, map, Operand(at));
+    __ LoadRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
+    __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
+
+    __ bind(&miss);
+  }
+
+  // Get the prototype of the function.
+  __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
+
+  // Check that the function prototype is a JS object.
+  __ JumpIfSmi(prototype, &slow);
+  __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
+
+  // Update the global instanceof or call site inlined cache with the current
+  // map and function. The cached answer will be set when it is known below.
+  if (!HasCallSiteInlineCheck()) {
+    __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
+    __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
+  } else {
+    DCHECK(HasArgsInRegisters());
+    // Patch the (relocated) inlined map check.
+
+    // The offset was stored in a4 safepoint slot.
+    // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
+    __ LoadFromSafepointRegisterSlot(scratch, a4);
+    __ Dsubu(inline_site, ra, scratch);
+    // Get the map location in scratch and patch it.
+    __ GetRelocatedValue(inline_site, scratch, v1);  // v1 used as scratch.
+    __ sd(map, FieldMemOperand(scratch, Cell::kValueOffset));
+  }
+
+  // Register mapping: a3 is object map and a4 is function prototype.
+  // Get prototype of object into a2.
+  __ ld(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
+
+  // We don't need map any more. Use it as a scratch register.
+  Register scratch2 = map;
+  map = no_reg;
+
+  // Loop through the prototype chain looking for the function prototype.
+  __ LoadRoot(scratch2, Heap::kNullValueRootIndex);
+  __ bind(&loop);
+  __ Branch(&is_instance, eq, scratch, Operand(prototype));
+  __ Branch(&is_not_instance, eq, scratch, Operand(scratch2));
+  __ ld(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
+  __ ld(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
+  __ Branch(&loop);
+
+  __ bind(&is_instance);
+  DCHECK(Smi::FromInt(0) == 0);
+  if (!HasCallSiteInlineCheck()) {
+    __ mov(v0, zero_reg);
+    __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
+  } else {
+    // Patch the call site to return true.
+    __ LoadRoot(v0, Heap::kTrueValueRootIndex);
+    __ Daddu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
+    // Get the boolean result location in scratch and patch it.
+    __ PatchRelocatedValue(inline_site, scratch, v0);
+
+    if (!ReturnTrueFalseObject()) {
+      DCHECK_EQ(Smi::FromInt(0), 0);
+      __ mov(v0, zero_reg);
+    }
+  }
+  __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
+
+  __ bind(&is_not_instance);
+  if (!HasCallSiteInlineCheck()) {
+    __ li(v0, Operand(Smi::FromInt(1)));
+    __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
+  } else {
+    // Patch the call site to return false.
+    __ LoadRoot(v0, Heap::kFalseValueRootIndex);
+    __ Daddu(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
+    // Get the boolean result location in scratch and patch it.
+    __ PatchRelocatedValue(inline_site, scratch, v0);
+
+    if (!ReturnTrueFalseObject()) {
+      __ li(v0, Operand(Smi::FromInt(1)));
+    }
+  }
+
+  __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
+
+  Label object_not_null, object_not_null_or_smi;
+  __ bind(&not_js_object);
+  // Before null, smi and string value checks, check that the rhs is a function
+  // as for a non-function rhs an exception needs to be thrown.
+  __ JumpIfSmi(function, &slow);
+  __ GetObjectType(function, scratch2, scratch);
+  __ Branch(&slow, ne, scratch, Operand(JS_FUNCTION_TYPE));
+
+  // Null is not instance of anything.
+  __ Branch(&object_not_null,
+            ne,
+            scratch,
+            Operand(isolate()->factory()->null_value()));
+  __ li(v0, Operand(Smi::FromInt(1)));
+  __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
+
+  __ bind(&object_not_null);
+  // Smi values are not instances of anything.
+  __ JumpIfNotSmi(object, &object_not_null_or_smi);
+  __ li(v0, Operand(Smi::FromInt(1)));
+  __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
+
+  __ bind(&object_not_null_or_smi);
+  // String values are not instances of anything.
+  __ IsObjectJSStringType(object, scratch, &slow);
+  __ li(v0, Operand(Smi::FromInt(1)));
+  __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
+
+  // Slow-case.  Tail call builtin.
+  __ bind(&slow);
+  if (!ReturnTrueFalseObject()) {
+    if (HasArgsInRegisters()) {
+      __ Push(a0, a1);
+    }
+  __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
+  } else {
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ Push(a0, a1);
+      __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
+    }
+    __ mov(a0, v0);
+    __ LoadRoot(v0, Heap::kTrueValueRootIndex);
+    __ DropAndRet(HasArgsInRegisters() ? 0 : 2, eq, a0, Operand(zero_reg));
+    __ LoadRoot(v0, Heap::kFalseValueRootIndex);
+    __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
+  }
+}
+
+
+void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
+  Label miss;
+  Register receiver = LoadDescriptor::ReceiverRegister();
+  NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, a3,
+                                                          a4, &miss);
+  __ bind(&miss);
+  PropertyAccessCompiler::TailCallBuiltin(
+      masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
+}
+
+
+void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
+  // The displacement is the offset of the last parameter (if any)
+  // relative to the frame pointer.
+  const int kDisplacement =
+      StandardFrameConstants::kCallerSPOffset - kPointerSize;
+  DCHECK(a1.is(ArgumentsAccessReadDescriptor::index()));
+  DCHECK(a0.is(ArgumentsAccessReadDescriptor::parameter_count()));
+
+  // Check that the key is a smiGenerateReadElement.
+  Label slow;
+  __ JumpIfNotSmi(a1, &slow);
+
+  // Check if the calling frame is an arguments adaptor frame.
+  Label adaptor;
+  __ ld(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ ld(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
+  __ Branch(&adaptor,
+            eq,
+            a3,
+            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+  // Check index (a1) against formal parameters count limit passed in
+  // through register a0. Use unsigned comparison to get negative
+  // check for free.
+  __ Branch(&slow, hs, a1, Operand(a0));
+
+  // Read the argument from the stack and return it.
+  __ dsubu(a3, a0, a1);
+  __ SmiScale(a7, a3, kPointerSizeLog2);
+  __ Daddu(a3, fp, Operand(a7));
+  __ Ret(USE_DELAY_SLOT);
+  __ ld(v0, MemOperand(a3, kDisplacement));
+
+  // Arguments adaptor case: Check index (a1) against actual arguments
+  // limit found in the arguments adaptor frame. Use unsigned
+  // comparison to get negative check for free.
+  __ bind(&adaptor);
+  __ ld(a0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ Branch(&slow, Ugreater_equal, a1, Operand(a0));
+
+  // Read the argument from the adaptor frame and return it.
+  __ dsubu(a3, a0, a1);
+  __ SmiScale(a7, a3, kPointerSizeLog2);
+  __ Daddu(a3, a2, Operand(a7));
+  __ Ret(USE_DELAY_SLOT);
+  __ ld(v0, MemOperand(a3, kDisplacement));
+
+  // Slow-case: Handle non-smi or out-of-bounds access to arguments
+  // by calling the runtime system.
+  __ bind(&slow);
+  __ push(a1);
+  __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
+  // sp[0] : number of parameters
+  // sp[4] : receiver displacement
+  // sp[8] : function
+  // Check if the calling frame is an arguments adaptor frame.
+  Label runtime;
+  __ ld(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ ld(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
+  __ Branch(&runtime,
+            ne,
+            a2,
+            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+  // Patch the arguments.length and the parameters pointer in the current frame.
+  __ ld(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ sd(a2, MemOperand(sp, 0 * kPointerSize));
+  __ SmiScale(a7, a2, kPointerSizeLog2);
+  __ Daddu(a3, a3, Operand(a7));
+  __ daddiu(a3, a3, StandardFrameConstants::kCallerSPOffset);
+  __ sd(a3, MemOperand(sp, 1 * kPointerSize));
+
+  __ bind(&runtime);
+  __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
+  // Stack layout:
+  //  sp[0] : number of parameters (tagged)
+  //  sp[4] : address of receiver argument
+  //  sp[8] : function
+  // Registers used over whole function:
+  //  a6 : allocated object (tagged)
+  //  t1 : mapped parameter count (tagged)
+
+  __ ld(a1, MemOperand(sp, 0 * kPointerSize));
+  // a1 = parameter count (tagged)
+
+  // Check if the calling frame is an arguments adaptor frame.
+  Label runtime;
+  Label adaptor_frame, try_allocate;
+  __ ld(a3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ ld(a2, MemOperand(a3, StandardFrameConstants::kContextOffset));
+  __ Branch(&adaptor_frame,
+            eq,
+            a2,
+            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+  // No adaptor, parameter count = argument count.
+  __ mov(a2, a1);
+  __ Branch(&try_allocate);
+
+  // We have an adaptor frame. Patch the parameters pointer.
+  __ bind(&adaptor_frame);
+  __ ld(a2, MemOperand(a3, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ SmiScale(t2, a2, kPointerSizeLog2);
+  __ Daddu(a3, a3, Operand(t2));
+  __ Daddu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
+  __ sd(a3, MemOperand(sp, 1 * kPointerSize));
+
+  // a1 = parameter count (tagged)
+  // a2 = argument count (tagged)
+  // Compute the mapped parameter count = min(a1, a2) in a1.
+  Label skip_min;
+  __ Branch(&skip_min, lt, a1, Operand(a2));
+  __ mov(a1, a2);
+  __ bind(&skip_min);
+
+  __ bind(&try_allocate);
+
+  // Compute the sizes of backing store, parameter map, and arguments object.
+  // 1. Parameter map, has 2 extra words containing context and backing store.
+  const int kParameterMapHeaderSize =
+      FixedArray::kHeaderSize + 2 * kPointerSize;
+  // If there are no mapped parameters, we do not need the parameter_map.
+  Label param_map_size;
+  DCHECK_EQ(0, Smi::FromInt(0));
+  __ Branch(USE_DELAY_SLOT, &param_map_size, eq, a1, Operand(zero_reg));
+  __ mov(t1, zero_reg);  // In delay slot: param map size = 0 when a1 == 0.
+  __ SmiScale(t1, a1, kPointerSizeLog2);
+  __ daddiu(t1, t1, kParameterMapHeaderSize);
+  __ bind(&param_map_size);
+
+  // 2. Backing store.
+  __ SmiScale(t2, a2, kPointerSizeLog2);
+  __ Daddu(t1, t1, Operand(t2));
+  __ Daddu(t1, t1, Operand(FixedArray::kHeaderSize));
+
+  // 3. Arguments object.
+  __ Daddu(t1, t1, Operand(Heap::kSloppyArgumentsObjectSize));
+
+  // Do the allocation of all three objects in one go.
+  __ Allocate(t1, v0, a3, a4, &runtime, TAG_OBJECT);
+
+  // v0 = address of new object(s) (tagged)
+  // a2 = argument count (smi-tagged)
+  // Get the arguments boilerplate from the current native context into a4.
+  const int kNormalOffset =
+      Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
+  const int kAliasedOffset =
+      Context::SlotOffset(Context::ALIASED_ARGUMENTS_MAP_INDEX);
+
+  __ ld(a4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+  __ ld(a4, FieldMemOperand(a4, GlobalObject::kNativeContextOffset));
+  Label skip2_ne, skip2_eq;
+  __ Branch(&skip2_ne, ne, a1, Operand(zero_reg));
+  __ ld(a4, MemOperand(a4, kNormalOffset));
+  __ bind(&skip2_ne);
+
+  __ Branch(&skip2_eq, eq, a1, Operand(zero_reg));
+  __ ld(a4, MemOperand(a4, kAliasedOffset));
+  __ bind(&skip2_eq);
+
+  // v0 = address of new object (tagged)
+  // a1 = mapped parameter count (tagged)
+  // a2 = argument count (smi-tagged)
+  // a4 = address of arguments map (tagged)
+  __ sd(a4, FieldMemOperand(v0, JSObject::kMapOffset));
+  __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
+  __ sd(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
+  __ sd(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
+
+  // Set up the callee in-object property.
+  STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
+  __ ld(a3, MemOperand(sp, 2 * kPointerSize));
+  __ AssertNotSmi(a3);
+  const int kCalleeOffset = JSObject::kHeaderSize +
+      Heap::kArgumentsCalleeIndex * kPointerSize;
+  __ sd(a3, FieldMemOperand(v0, kCalleeOffset));
+
+  // Use the length (smi tagged) and set that as an in-object property too.
+  STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
+  const int kLengthOffset = JSObject::kHeaderSize +
+      Heap::kArgumentsLengthIndex * kPointerSize;
+  __ sd(a2, FieldMemOperand(v0, kLengthOffset));
+
+  // Set up the elements pointer in the allocated arguments object.
+  // If we allocated a parameter map, a4 will point there, otherwise
+  // it will point to the backing store.
+  __ Daddu(a4, v0, Operand(Heap::kSloppyArgumentsObjectSize));
+  __ sd(a4, FieldMemOperand(v0, JSObject::kElementsOffset));
+
+  // v0 = address of new object (tagged)
+  // a1 = mapped parameter count (tagged)
+  // a2 = argument count (tagged)
+  // a4 = address of parameter map or backing store (tagged)
+  // Initialize parameter map. If there are no mapped arguments, we're done.
+  Label skip_parameter_map;
+  Label skip3;
+  __ Branch(&skip3, ne, a1, Operand(Smi::FromInt(0)));
+  // Move backing store address to a3, because it is
+  // expected there when filling in the unmapped arguments.
+  __ mov(a3, a4);
+  __ bind(&skip3);
+
+  __ Branch(&skip_parameter_map, eq, a1, Operand(Smi::FromInt(0)));
+
+  __ LoadRoot(a6, Heap::kSloppyArgumentsElementsMapRootIndex);
+  __ sd(a6, FieldMemOperand(a4, FixedArray::kMapOffset));
+  __ Daddu(a6, a1, Operand(Smi::FromInt(2)));
+  __ sd(a6, FieldMemOperand(a4, FixedArray::kLengthOffset));
+  __ sd(cp, FieldMemOperand(a4, FixedArray::kHeaderSize + 0 * kPointerSize));
+  __ SmiScale(t2, a1, kPointerSizeLog2);
+  __ Daddu(a6, a4, Operand(t2));
+  __ Daddu(a6, a6, Operand(kParameterMapHeaderSize));
+  __ sd(a6, FieldMemOperand(a4, FixedArray::kHeaderSize + 1 * kPointerSize));
+
+  // Copy the parameter slots and the holes in the arguments.
+  // We need to fill in mapped_parameter_count slots. They index the context,
+  // where parameters are stored in reverse order, at
+  //   MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
+  // The mapped parameter thus need to get indices
+  //   MIN_CONTEXT_SLOTS+parameter_count-1 ..
+  //       MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
+  // We loop from right to left.
+  Label parameters_loop, parameters_test;
+  __ mov(a6, a1);
+  __ ld(t1, MemOperand(sp, 0 * kPointerSize));
+  __ Daddu(t1, t1, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS)));
+  __ Dsubu(t1, t1, Operand(a1));
+  __ LoadRoot(a7, Heap::kTheHoleValueRootIndex);
+  __ SmiScale(t2, a6, kPointerSizeLog2);
+  __ Daddu(a3, a4, Operand(t2));
+  __ Daddu(a3, a3, Operand(kParameterMapHeaderSize));
+
+  // a6 = loop variable (tagged)
+  // a1 = mapping index (tagged)
+  // a3 = address of backing store (tagged)
+  // a4 = address of parameter map (tagged)
+  // a5 = temporary scratch (a.o., for address calculation)
+  // a7 = the hole value
+  __ jmp(&parameters_test);
+
+  __ bind(&parameters_loop);
+
+  __ Dsubu(a6, a6, Operand(Smi::FromInt(1)));
+  __ SmiScale(a5, a6, kPointerSizeLog2);
+  __ Daddu(a5, a5, Operand(kParameterMapHeaderSize - kHeapObjectTag));
+  __ Daddu(t2, a4, a5);
+  __ sd(t1, MemOperand(t2));
+  __ Dsubu(a5, a5, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
+  __ Daddu(t2, a3, a5);
+  __ sd(a7, MemOperand(t2));
+  __ Daddu(t1, t1, Operand(Smi::FromInt(1)));
+  __ bind(&parameters_test);
+  __ Branch(&parameters_loop, ne, a6, Operand(Smi::FromInt(0)));
+
+  __ bind(&skip_parameter_map);
+  // a2 = argument count (tagged)
+  // a3 = address of backing store (tagged)
+  // a5 = scratch
+  // Copy arguments header and remaining slots (if there are any).
+  __ LoadRoot(a5, Heap::kFixedArrayMapRootIndex);
+  __ sd(a5, FieldMemOperand(a3, FixedArray::kMapOffset));
+  __ sd(a2, FieldMemOperand(a3, FixedArray::kLengthOffset));
+
+  Label arguments_loop, arguments_test;
+  __ mov(t1, a1);
+  __ ld(a4, MemOperand(sp, 1 * kPointerSize));
+  __ SmiScale(t2, t1, kPointerSizeLog2);
+  __ Dsubu(a4, a4, Operand(t2));
+  __ jmp(&arguments_test);
+
+  __ bind(&arguments_loop);
+  __ Dsubu(a4, a4, Operand(kPointerSize));
+  __ ld(a6, MemOperand(a4, 0));
+  __ SmiScale(t2, t1, kPointerSizeLog2);
+  __ Daddu(a5, a3, Operand(t2));
+  __ sd(a6, FieldMemOperand(a5, FixedArray::kHeaderSize));
+  __ Daddu(t1, t1, Operand(Smi::FromInt(1)));
+
+  __ bind(&arguments_test);
+  __ Branch(&arguments_loop, lt, t1, Operand(a2));
+
+  // Return and remove the on-stack parameters.
+  __ DropAndRet(3);
+
+  // Do the runtime call to allocate the arguments object.
+  // a2 = argument count (tagged)
+  __ bind(&runtime);
+  __ sd(a2, MemOperand(sp, 0 * kPointerSize));  // Patch argument count.
+  __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
+}
+
+
+void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
+  // Return address is in ra.
+  Label slow;
+
+  Register receiver = LoadDescriptor::ReceiverRegister();
+  Register key = LoadDescriptor::NameRegister();
+
+  // Check that the key is an array index, that is Uint32.
+  __ And(t0, key, Operand(kSmiTagMask | kSmiSignMask));
+  __ Branch(&slow, ne, t0, Operand(zero_reg));
+
+  // Everything is fine, call runtime.
+  __ Push(receiver, key);  // Receiver, key.
+
+  // Perform tail call to the entry.
+  __ TailCallExternalReference(
+      ExternalReference(IC_Utility(IC::kLoadElementWithInterceptor),
+                        masm->isolate()),
+      2, 1);
+
+  __ bind(&slow);
+  PropertyAccessCompiler::TailCallBuiltin(
+      masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
+}
+
+
+void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
+  // sp[0] : number of parameters
+  // sp[4] : receiver displacement
+  // sp[8] : function
+  // Check if the calling frame is an arguments adaptor frame.
+  Label adaptor_frame, try_allocate, runtime;
+  __ ld(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ ld(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
+  __ Branch(&adaptor_frame,
+            eq,
+            a3,
+            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+  // Get the length from the frame.
+  __ ld(a1, MemOperand(sp, 0));
+  __ Branch(&try_allocate);
+
+  // Patch the arguments.length and the parameters pointer.
+  __ bind(&adaptor_frame);
+  __ ld(a1, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ sd(a1, MemOperand(sp, 0));
+  __ SmiScale(at, a1, kPointerSizeLog2);
+
+  __ Daddu(a3, a2, Operand(at));
+
+  __ Daddu(a3, a3, Operand(StandardFrameConstants::kCallerSPOffset));
+  __ sd(a3, MemOperand(sp, 1 * kPointerSize));
+
+  // Try the new space allocation. Start out with computing the size
+  // of the arguments object and the elements array in words.
+  Label add_arguments_object;
+  __ bind(&try_allocate);
+  __ Branch(&add_arguments_object, eq, a1, Operand(zero_reg));
+  __ SmiUntag(a1);
+
+  __ Daddu(a1, a1, Operand(FixedArray::kHeaderSize / kPointerSize));
+  __ bind(&add_arguments_object);
+  __ Daddu(a1, a1, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
+
+  // Do the allocation of both objects in one go.
+  __ Allocate(a1, v0, a2, a3, &runtime,
+              static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
+
+  // Get the arguments boilerplate from the current native context.
+  __ ld(a4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+  __ ld(a4, FieldMemOperand(a4, GlobalObject::kNativeContextOffset));
+  __ ld(a4, MemOperand(a4, Context::SlotOffset(
+      Context::STRICT_ARGUMENTS_MAP_INDEX)));
+
+  __ sd(a4, FieldMemOperand(v0, JSObject::kMapOffset));
+  __ LoadRoot(a3, Heap::kEmptyFixedArrayRootIndex);
+  __ sd(a3, FieldMemOperand(v0, JSObject::kPropertiesOffset));
+  __ sd(a3, FieldMemOperand(v0, JSObject::kElementsOffset));
+
+  // Get the length (smi tagged) and set that as an in-object property too.
+  STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
+  __ ld(a1, MemOperand(sp, 0 * kPointerSize));
+  __ AssertSmi(a1);
+  __ sd(a1, FieldMemOperand(v0, JSObject::kHeaderSize +
+      Heap::kArgumentsLengthIndex * kPointerSize));
+
+  Label done;
+  __ Branch(&done, eq, a1, Operand(zero_reg));
+
+  // Get the parameters pointer from the stack.
+  __ ld(a2, MemOperand(sp, 1 * kPointerSize));
+
+  // Set up the elements pointer in the allocated arguments object and
+  // initialize the header in the elements fixed array.
+  __ Daddu(a4, v0, Operand(Heap::kStrictArgumentsObjectSize));
+  __ sd(a4, FieldMemOperand(v0, JSObject::kElementsOffset));
+  __ LoadRoot(a3, Heap::kFixedArrayMapRootIndex);
+  __ sd(a3, FieldMemOperand(a4, FixedArray::kMapOffset));
+  __ sd(a1, FieldMemOperand(a4, FixedArray::kLengthOffset));
+  // Untag the length for the loop.
+  __ SmiUntag(a1);
+
+
+  // Copy the fixed array slots.
+  Label loop;
+  // Set up a4 to point to the first array slot.
+  __ Daddu(a4, a4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ bind(&loop);
+  // Pre-decrement a2 with kPointerSize on each iteration.
+  // Pre-decrement in order to skip receiver.
+  __ Daddu(a2, a2, Operand(-kPointerSize));
+  __ ld(a3, MemOperand(a2));
+  // Post-increment a4 with kPointerSize on each iteration.
+  __ sd(a3, MemOperand(a4));
+  __ Daddu(a4, a4, Operand(kPointerSize));
+  __ Dsubu(a1, a1, Operand(1));
+  __ Branch(&loop, ne, a1, Operand(zero_reg));
+
+  // Return and remove the on-stack parameters.
+  __ bind(&done);
+  __ DropAndRet(3);
+
+  // Do the runtime call to allocate the arguments object.
+  __ bind(&runtime);
+  __ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
+}
+
+
+void RegExpExecStub::Generate(MacroAssembler* masm) {
+  // Just jump directly to runtime if native RegExp is not selected at compile
+  // time or if regexp entry in generated code is turned off runtime switch or
+  // at compilation.
+#ifdef V8_INTERPRETED_REGEXP
+  __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
+#else  // V8_INTERPRETED_REGEXP
+
+  // Stack frame on entry.
+  //  sp[0]: last_match_info (expected JSArray)
+  //  sp[4]: previous index
+  //  sp[8]: subject string
+  //  sp[12]: JSRegExp object
+
+  const int kLastMatchInfoOffset = 0 * kPointerSize;
+  const int kPreviousIndexOffset = 1 * kPointerSize;
+  const int kSubjectOffset = 2 * kPointerSize;
+  const int kJSRegExpOffset = 3 * kPointerSize;
+
+  Label runtime;
+  // Allocation of registers for this function. These are in callee save
+  // registers and will be preserved by the call to the native RegExp code, as
+  // this code is called using the normal C calling convention. When calling
+  // directly from generated code the native RegExp code will not do a GC and
+  // therefore the content of these registers are safe to use after the call.
+  // MIPS - using s0..s2, since we are not using CEntry Stub.
+  Register subject = s0;
+  Register regexp_data = s1;
+  Register last_match_info_elements = s2;
+
+  // Ensure that a RegExp stack is allocated.
+  ExternalReference address_of_regexp_stack_memory_address =
+      ExternalReference::address_of_regexp_stack_memory_address(
+          isolate());
+  ExternalReference address_of_regexp_stack_memory_size =
+      ExternalReference::address_of_regexp_stack_memory_size(isolate());
+  __ li(a0, Operand(address_of_regexp_stack_memory_size));
+  __ ld(a0, MemOperand(a0, 0));
+  __ Branch(&runtime, eq, a0, Operand(zero_reg));
+
+  // Check that the first argument is a JSRegExp object.
+  __ ld(a0, MemOperand(sp, kJSRegExpOffset));
+  STATIC_ASSERT(kSmiTag == 0);
+  __ JumpIfSmi(a0, &runtime);
+  __ GetObjectType(a0, a1, a1);
+  __ Branch(&runtime, ne, a1, Operand(JS_REGEXP_TYPE));
+
+  // Check that the RegExp has been compiled (data contains a fixed array).
+  __ ld(regexp_data, FieldMemOperand(a0, JSRegExp::kDataOffset));
+  if (FLAG_debug_code) {
+    __ SmiTst(regexp_data, a4);
+    __ Check(nz,
+             kUnexpectedTypeForRegExpDataFixedArrayExpected,
+             a4,
+             Operand(zero_reg));
+    __ GetObjectType(regexp_data, a0, a0);
+    __ Check(eq,
+             kUnexpectedTypeForRegExpDataFixedArrayExpected,
+             a0,
+             Operand(FIXED_ARRAY_TYPE));
+  }
+
+  // regexp_data: RegExp data (FixedArray)
+  // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
+  __ ld(a0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
+  __ Branch(&runtime, ne, a0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
+
+  // regexp_data: RegExp data (FixedArray)
+  // Check that the number of captures fit in the static offsets vector buffer.
+  __ ld(a2,
+         FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
+  // Check (number_of_captures + 1) * 2 <= offsets vector size
+  // Or          number_of_captures * 2 <= offsets vector size - 2
+  // Or          number_of_captures     <= offsets vector size / 2 - 1
+  // Multiplying by 2 comes for free since a2 is smi-tagged.
+  STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
+  int temp = Isolate::kJSRegexpStaticOffsetsVectorSize / 2 - 1;
+  __ Branch(&runtime, hi, a2, Operand(Smi::FromInt(temp)));
+
+  // Reset offset for possibly sliced string.
+  __ mov(t0, zero_reg);
+  __ ld(subject, MemOperand(sp, kSubjectOffset));
+  __ JumpIfSmi(subject, &runtime);
+  __ mov(a3, subject);  // Make a copy of the original subject string.
+  __ ld(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
+  __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
+  // subject: subject string
+  // a3: subject string
+  // a0: subject string instance type
+  // regexp_data: RegExp data (FixedArray)
+  // Handle subject string according to its encoding and representation:
+  // (1) Sequential string?  If yes, go to (5).
+  // (2) Anything but sequential or cons?  If yes, go to (6).
+  // (3) Cons string.  If the string is flat, replace subject with first string.
+  //     Otherwise bailout.
+  // (4) Is subject external?  If yes, go to (7).
+  // (5) Sequential string.  Load regexp code according to encoding.
+  // (E) Carry on.
+  /// [...]
+
+  // Deferred code at the end of the stub:
+  // (6) Not a long external string?  If yes, go to (8).
+  // (7) External string.  Make it, offset-wise, look like a sequential string.
+  //     Go to (5).
+  // (8) Short external string or not a string?  If yes, bail out to runtime.
+  // (9) Sliced string.  Replace subject with parent.  Go to (4).
+
+  Label check_underlying;   // (4)
+  Label seq_string;         // (5)
+  Label not_seq_nor_cons;   // (6)
+  Label external_string;    // (7)
+  Label not_long_external;  // (8)
+
+  // (1) Sequential string?  If yes, go to (5).
+  __ And(a1,
+         a0,
+         Operand(kIsNotStringMask |
+                 kStringRepresentationMask |
+                 kShortExternalStringMask));
+  STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
+  __ Branch(&seq_string, eq, a1, Operand(zero_reg));  // Go to (5).
+
+  // (2) Anything but sequential or cons?  If yes, go to (6).
+  STATIC_ASSERT(kConsStringTag < kExternalStringTag);
+  STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
+  STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
+  STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
+  // Go to (6).
+  __ Branch(&not_seq_nor_cons, ge, a1, Operand(kExternalStringTag));
+
+  // (3) Cons string.  Check that it's flat.
+  // Replace subject with first string and reload instance type.
+  __ ld(a0, FieldMemOperand(subject, ConsString::kSecondOffset));
+  __ LoadRoot(a1, Heap::kempty_stringRootIndex);
+  __ Branch(&runtime, ne, a0, Operand(a1));
+  __ ld(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
+
+  // (4) Is subject external?  If yes, go to (7).
+  __ bind(&check_underlying);
+  __ ld(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
+  __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
+  STATIC_ASSERT(kSeqStringTag == 0);
+  __ And(at, a0, Operand(kStringRepresentationMask));
+  // The underlying external string is never a short external string.
+  STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength);
+  STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength);
+  __ Branch(&external_string, ne, at, Operand(zero_reg));  // Go to (7).
+
+  // (5) Sequential string.  Load regexp code according to encoding.
+  __ bind(&seq_string);
+  // subject: sequential subject string (or look-alike, external string)
+  // a3: original subject string
+  // Load previous index and check range before a3 is overwritten.  We have to
+  // use a3 instead of subject here because subject might have been only made
+  // to look like a sequential string when it actually is an external string.
+  __ ld(a1, MemOperand(sp, kPreviousIndexOffset));
+  __ JumpIfNotSmi(a1, &runtime);
+  __ ld(a3, FieldMemOperand(a3, String::kLengthOffset));
+  __ Branch(&runtime, ls, a3, Operand(a1));
+  __ SmiUntag(a1);
+
+  STATIC_ASSERT(kStringEncodingMask == 4);
+  STATIC_ASSERT(kOneByteStringTag == 4);
+  STATIC_ASSERT(kTwoByteStringTag == 0);
+  __ And(a0, a0, Operand(kStringEncodingMask));  // Non-zero for one_byte.
+  __ ld(t9, FieldMemOperand(regexp_data, JSRegExp::kDataOneByteCodeOffset));
+  __ dsra(a3, a0, 2);  // a3 is 1 for one_byte, 0 for UC16 (used below).
+  __ ld(a5, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
+  __ Movz(t9, a5, a0);  // If UC16 (a0 is 0), replace t9 w/kDataUC16CodeOffset.
+
+  // (E) Carry on.  String handling is done.
+  // t9: irregexp code
+  // Check that the irregexp code has been generated for the actual string
+  // encoding. If it has, the field contains a code object otherwise it contains
+  // a smi (code flushing support).
+  __ JumpIfSmi(t9, &runtime);
+
+  // a1: previous index
+  // a3: encoding of subject string (1 if one_byte, 0 if two_byte);
+  // t9: code
+  // subject: Subject string
+  // regexp_data: RegExp data (FixedArray)
+  // All checks done. Now push arguments for native regexp code.
+  __ IncrementCounter(isolate()->counters()->regexp_entry_native(),
+                      1, a0, a2);
+
+  // Isolates: note we add an additional parameter here (isolate pointer).
+  const int kRegExpExecuteArguments = 9;
+  const int kParameterRegisters = (kMipsAbi == kN64) ? 8 : 4;
+  __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
+
+  // Stack pointer now points to cell where return address is to be written.
+  // Arguments are before that on the stack or in registers, meaning we
+  // treat the return address as argument 5. Thus every argument after that
+  // needs to be shifted back by 1. Since DirectCEntryStub will handle
+  // allocating space for the c argument slots, we don't need to calculate
+  // that into the argument positions on the stack. This is how the stack will
+  // look (sp meaning the value of sp at this moment):
+  // Abi n64:
+  //   [sp + 1] - Argument 9
+  //   [sp + 0] - saved ra
+  // Abi O32:
+  //   [sp + 5] - Argument 9
+  //   [sp + 4] - Argument 8
+  //   [sp + 3] - Argument 7
+  //   [sp + 2] - Argument 6
+  //   [sp + 1] - Argument 5
+  //   [sp + 0] - saved ra
+
+  if (kMipsAbi == kN64) {
+    // Argument 9: Pass current isolate address.
+    __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
+    __ sd(a0, MemOperand(sp, 1 * kPointerSize));
+
+    // Argument 8: Indicate that this is a direct call from JavaScript.
+    __ li(a7, Operand(1));
+
+    // Argument 7: Start (high end) of backtracking stack memory area.
+    __ li(a0, Operand(address_of_regexp_stack_memory_address));
+    __ ld(a0, MemOperand(a0, 0));
+    __ li(a2, Operand(address_of_regexp_stack_memory_size));
+    __ ld(a2, MemOperand(a2, 0));
+    __ daddu(a6, a0, a2);
+
+    // Argument 6: Set the number of capture registers to zero to force global
+    // regexps to behave as non-global. This does not affect non-global regexps.
+    __ mov(a5, zero_reg);
+
+    // Argument 5: static offsets vector buffer.
+    __ li(a4, Operand(
+          ExternalReference::address_of_static_offsets_vector(isolate())));
+  } else {  // O32.
+    DCHECK(kMipsAbi == kO32);
+
+    // Argument 9: Pass current isolate address.
+    // CFunctionArgumentOperand handles MIPS stack argument slots.
+    __ li(a0, Operand(ExternalReference::isolate_address(isolate())));
+    __ sd(a0, MemOperand(sp, 5 * kPointerSize));
+
+    // Argument 8: Indicate that this is a direct call from JavaScript.
+    __ li(a0, Operand(1));
+    __ sd(a0, MemOperand(sp, 4 * kPointerSize));
+
+    // Argument 7: Start (high end) of backtracking stack memory area.
+    __ li(a0, Operand(address_of_regexp_stack_memory_address));
+    __ ld(a0, MemOperand(a0, 0));
+    __ li(a2, Operand(address_of_regexp_stack_memory_size));
+    __ ld(a2, MemOperand(a2, 0));
+    __ daddu(a0, a0, a2);
+    __ sd(a0, MemOperand(sp, 3 * kPointerSize));
+
+    // Argument 6: Set the number of capture registers to zero to force global
+    // regexps to behave as non-global. This does not affect non-global regexps.
+    __ mov(a0, zero_reg);
+    __ sd(a0, MemOperand(sp, 2 * kPointerSize));
+
+    // Argument 5: static offsets vector buffer.
+    __ li(a0, Operand(
+          ExternalReference::address_of_static_offsets_vector(isolate())));
+    __ sd(a0, MemOperand(sp, 1 * kPointerSize));
+  }
+
+  // For arguments 4 and 3 get string length, calculate start of string data
+  // and calculate the shift of the index (0 for one_byte and 1 for two byte).
+  __ Daddu(t2, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
+  __ Xor(a3, a3, Operand(1));  // 1 for 2-byte str, 0 for 1-byte.
+  // Load the length from the original subject string from the previous stack
+  // frame. Therefore we have to use fp, which points exactly to two pointer
+  // sizes below the previous sp. (Because creating a new stack frame pushes
+  // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
+  __ ld(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
+  // If slice offset is not 0, load the length from the original sliced string.
+  // Argument 4, a3: End of string data
+  // Argument 3, a2: Start of string data
+  // Prepare start and end index of the input.
+  __ dsllv(t1, t0, a3);
+  __ daddu(t0, t2, t1);
+  __ dsllv(t1, a1, a3);
+  __ daddu(a2, t0, t1);
+
+  __ ld(t2, FieldMemOperand(subject, String::kLengthOffset));
+
+  __ SmiUntag(t2);
+  __ dsllv(t1, t2, a3);
+  __ daddu(a3, t0, t1);
+  // Argument 2 (a1): Previous index.
+  // Already there
+
+  // Argument 1 (a0): Subject string.
+  __ mov(a0, subject);
+
+  // Locate the code entry and call it.
+  __ Daddu(t9, t9, Operand(Code::kHeaderSize - kHeapObjectTag));
+  DirectCEntryStub stub(isolate());
+  stub.GenerateCall(masm, t9);
+
+  __ LeaveExitFrame(false, no_reg, true);
+
+  // v0: result
+  // subject: subject string (callee saved)
+  // regexp_data: RegExp data (callee saved)
+  // last_match_info_elements: Last match info elements (callee saved)
+  // Check the result.
+  Label success;
+  __ Branch(&success, eq, v0, Operand(1));
+  // We expect exactly one result since we force the called regexp to behave
+  // as non-global.
+  Label failure;
+  __ Branch(&failure, eq, v0, Operand(NativeRegExpMacroAssembler::FAILURE));
+  // If not exception it can only be retry. Handle that in the runtime system.
+  __ Branch(&runtime, ne, v0, Operand(NativeRegExpMacroAssembler::EXCEPTION));
+  // Result must now be exception. If there is no pending exception already a
+  // stack overflow (on the backtrack stack) was detected in RegExp code but
+  // haven't created the exception yet. Handle that in the runtime system.
+  // TODO(592): Rerunning the RegExp to get the stack overflow exception.
+  __ li(a1, Operand(isolate()->factory()->the_hole_value()));
+  __ li(a2, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
+                                      isolate())));
+  __ ld(v0, MemOperand(a2, 0));
+  __ Branch(&runtime, eq, v0, Operand(a1));
+
+  __ sd(a1, MemOperand(a2, 0));  // Clear pending exception.
+
+  // Check if the exception is a termination. If so, throw as uncatchable.
+  __ LoadRoot(a0, Heap::kTerminationExceptionRootIndex);
+  Label termination_exception;
+  __ Branch(&termination_exception, eq, v0, Operand(a0));
+
+  __ Throw(v0);
+
+  __ bind(&termination_exception);
+  __ ThrowUncatchable(v0);
+
+  __ bind(&failure);
+  // For failure and exception return null.
+  __ li(v0, Operand(isolate()->factory()->null_value()));
+  __ DropAndRet(4);
+
+  // Process the result from the native regexp code.
+  __ bind(&success);
+
+  __ lw(a1, UntagSmiFieldMemOperand(
+      regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
+  // Calculate number of capture registers (number_of_captures + 1) * 2.
+  __ Daddu(a1, a1, Operand(1));
+  __ dsll(a1, a1, 1);  // Multiply by 2.
+
+  __ ld(a0, MemOperand(sp, kLastMatchInfoOffset));
+  __ JumpIfSmi(a0, &runtime);
+  __ GetObjectType(a0, a2, a2);
+  __ Branch(&runtime, ne, a2, Operand(JS_ARRAY_TYPE));
+  // Check that the JSArray is in fast case.
+  __ ld(last_match_info_elements,
+        FieldMemOperand(a0, JSArray::kElementsOffset));
+  __ ld(a0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
+  __ LoadRoot(at, Heap::kFixedArrayMapRootIndex);
+  __ Branch(&runtime, ne, a0, Operand(at));
+  // Check that the last match info has space for the capture registers and the
+  // additional information.
+  __ ld(a0,
+        FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
+  __ Daddu(a2, a1, Operand(RegExpImpl::kLastMatchOverhead));
+
+  __ SmiUntag(at, a0);
+  __ Branch(&runtime, gt, a2, Operand(at));
+
+  // a1: number of capture registers
+  // subject: subject string
+  // Store the capture count.
+  __ SmiTag(a2, a1);  // To smi.
+  __ sd(a2, FieldMemOperand(last_match_info_elements,
+                             RegExpImpl::kLastCaptureCountOffset));
+  // Store last subject and last input.
+  __ sd(subject,
+         FieldMemOperand(last_match_info_elements,
+                         RegExpImpl::kLastSubjectOffset));
+  __ mov(a2, subject);
+  __ RecordWriteField(last_match_info_elements,
+                      RegExpImpl::kLastSubjectOffset,
+                      subject,
+                      a7,
+                      kRAHasNotBeenSaved,
+                      kDontSaveFPRegs);
+  __ mov(subject, a2);
+  __ sd(subject,
+         FieldMemOperand(last_match_info_elements,
+                         RegExpImpl::kLastInputOffset));
+  __ RecordWriteField(last_match_info_elements,
+                      RegExpImpl::kLastInputOffset,
+                      subject,
+                      a7,
+                      kRAHasNotBeenSaved,
+                      kDontSaveFPRegs);
+
+  // Get the static offsets vector filled by the native regexp code.
+  ExternalReference address_of_static_offsets_vector =
+      ExternalReference::address_of_static_offsets_vector(isolate());
+  __ li(a2, Operand(address_of_static_offsets_vector));
+
+  // a1: number of capture registers
+  // a2: offsets vector
+  Label next_capture, done;
+  // Capture register counter starts from number of capture registers and
+  // counts down until wrapping after zero.
+  __ Daddu(a0,
+         last_match_info_elements,
+         Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag));
+  __ bind(&next_capture);
+  __ Dsubu(a1, a1, Operand(1));
+  __ Branch(&done, lt, a1, Operand(zero_reg));
+  // Read the value from the static offsets vector buffer.
+  __ lw(a3, MemOperand(a2, 0));
+  __ daddiu(a2, a2, kIntSize);
+  // Store the smi value in the last match info.
+  __ SmiTag(a3);
+  __ sd(a3, MemOperand(a0, 0));
+  __ Branch(&next_capture, USE_DELAY_SLOT);
+  __ daddiu(a0, a0, kPointerSize);  // In branch delay slot.
+
+  __ bind(&done);
+
+  // Return last match info.
+  __ ld(v0, MemOperand(sp, kLastMatchInfoOffset));
+  __ DropAndRet(4);
+
+  // Do the runtime call to execute the regexp.
+  __ bind(&runtime);
+  __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
+
+  // Deferred code for string handling.
+  // (6) Not a long external string?  If yes, go to (8).
+  __ bind(&not_seq_nor_cons);
+  // Go to (8).
+  __ Branch(&not_long_external, gt, a1, Operand(kExternalStringTag));
+
+  // (7) External string.  Make it, offset-wise, look like a sequential string.
+  __ bind(&external_string);
+  __ ld(a0, FieldMemOperand(subject, HeapObject::kMapOffset));
+  __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset));
+  if (FLAG_debug_code) {
+    // Assert that we do not have a cons or slice (indirect strings) here.
+    // Sequential strings have already been ruled out.
+    __ And(at, a0, Operand(kIsIndirectStringMask));
+    __ Assert(eq,
+              kExternalStringExpectedButNotFound,
+              at,
+              Operand(zero_reg));
+  }
+  __ ld(subject,
+        FieldMemOperand(subject, ExternalString::kResourceDataOffset));
+  // Move the pointer so that offset-wise, it looks like a sequential string.
+  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
+  __ Dsubu(subject,
+          subject,
+          SeqTwoByteString::kHeaderSize - kHeapObjectTag);
+  __ jmp(&seq_string);    // Go to (5).
+
+  // (8) Short external string or not a string?  If yes, bail out to runtime.
+  __ bind(&not_long_external);
+  STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0);
+  __ And(at, a1, Operand(kIsNotStringMask | kShortExternalStringMask));
+  __ Branch(&runtime, ne, at, Operand(zero_reg));
+
+  // (9) Sliced string.  Replace subject with parent.  Go to (4).
+  // Load offset into t0 and replace subject string with parent.
+  __ ld(t0, FieldMemOperand(subject, SlicedString::kOffsetOffset));
+  __ SmiUntag(t0);
+  __ ld(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
+  __ jmp(&check_underlying);  // Go to (4).
+#endif  // V8_INTERPRETED_REGEXP
+}
+
+
+static void GenerateRecordCallTarget(MacroAssembler* masm) {
+  // Cache the called function in a feedback vector slot.  Cache states
+  // are uninitialized, monomorphic (indicated by a JSFunction), and
+  // megamorphic.
+  // a0 : number of arguments to the construct function
+  // a1 : the function to call
+  // a2 : Feedback vector
+  // a3 : slot in feedback vector (Smi)
+  Label initialize, done, miss, megamorphic, not_array_function;
+
+  DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
+            masm->isolate()->heap()->megamorphic_symbol());
+  DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()),
+            masm->isolate()->heap()->uninitialized_symbol());
+
+  // Load the cache state into a4.
+  __ dsrl(a4, a3, 32 - kPointerSizeLog2);
+  __ Daddu(a4, a2, Operand(a4));
+  __ ld(a4, FieldMemOperand(a4, FixedArray::kHeaderSize));
+
+  // A monomorphic cache hit or an already megamorphic state: invoke the
+  // function without changing the state.
+  __ Branch(&done, eq, a4, Operand(a1));
+
+  if (!FLAG_pretenuring_call_new) {
+    // If we came here, we need to see if we are the array function.
+    // If we didn't have a matching function, and we didn't find the megamorph
+    // sentinel, then we have in the slot either some other function or an
+    // AllocationSite. Do a map check on the object in a3.
+    __ ld(a5, FieldMemOperand(a4, 0));
+    __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
+    __ Branch(&miss, ne, a5, Operand(at));
+
+    // Make sure the function is the Array() function
+    __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, a4);
+    __ Branch(&megamorphic, ne, a1, Operand(a4));
+    __ jmp(&done);
+  }
+
+  __ bind(&miss);
+
+  // A monomorphic miss (i.e, here the cache is not uninitialized) goes
+  // megamorphic.
+  __ LoadRoot(at, Heap::kUninitializedSymbolRootIndex);
+  __ Branch(&initialize, eq, a4, Operand(at));
+  // MegamorphicSentinel is an immortal immovable object (undefined) so no
+  // write-barrier is needed.
+  __ bind(&megamorphic);
+  __ dsrl(a4, a3, 32- kPointerSizeLog2);
+  __ Daddu(a4, a2, Operand(a4));
+  __ LoadRoot(at, Heap::kMegamorphicSymbolRootIndex);
+  __ sd(at, FieldMemOperand(a4, FixedArray::kHeaderSize));
+  __ jmp(&done);
+
+  // An uninitialized cache is patched with the function.
+  __ bind(&initialize);
+  if (!FLAG_pretenuring_call_new) {
+    // Make sure the function is the Array() function.
+    __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, a4);
+    __ Branch(&not_array_function, ne, a1, Operand(a4));
+
+    // The target function is the Array constructor,
+    // Create an AllocationSite if we don't already have it, store it in the
+    // slot.
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      const RegList kSavedRegs =
+          1 << 4  |  // a0
+          1 << 5  |  // a1
+          1 << 6  |  // a2
+          1 << 7;    // a3
+
+      // Arguments register must be smi-tagged to call out.
+      __ SmiTag(a0);
+      __ MultiPush(kSavedRegs);
+
+      CreateAllocationSiteStub create_stub(masm->isolate());
+      __ CallStub(&create_stub);
+
+      __ MultiPop(kSavedRegs);
+      __ SmiUntag(a0);
+    }
+    __ Branch(&done);
+
+    __ bind(&not_array_function);
+  }
+
+  __ dsrl(a4, a3, 32 - kPointerSizeLog2);
+  __ Daddu(a4, a2, Operand(a4));
+  __ Daddu(a4, a4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ sd(a1, MemOperand(a4, 0));
+
+  __ Push(a4, a2, a1);
+  __ RecordWrite(a2, a4, a1, kRAHasNotBeenSaved, kDontSaveFPRegs,
+                 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+  __ Pop(a4, a2, a1);
+
+  __ bind(&done);
+}
+
+
+static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
+  __ ld(a3, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+
+  // Do not transform the receiver for strict mode functions.
+  int32_t strict_mode_function_mask =
+      1 <<  SharedFunctionInfo::kStrictModeBitWithinByte ;
+  // Do not transform the receiver for native (Compilerhints already in a3).
+  int32_t native_mask = 1 << SharedFunctionInfo::kNativeBitWithinByte;
+
+  __ lbu(a4, FieldMemOperand(a3, SharedFunctionInfo::kStrictModeByteOffset));
+  __ And(at, a4, Operand(strict_mode_function_mask));
+  __ Branch(cont, ne, at, Operand(zero_reg));
+  __ lbu(a4, FieldMemOperand(a3, SharedFunctionInfo::kNativeByteOffset));
+  __ And(at, a4, Operand(native_mask));
+  __ Branch(cont, ne, at, Operand(zero_reg));
+}
+
+
+static void EmitSlowCase(MacroAssembler* masm,
+                         int argc,
+                         Label* non_function) {
+  // Check for function proxy.
+  __ Branch(non_function, ne, a4, Operand(JS_FUNCTION_PROXY_TYPE));
+  __ push(a1);  // put proxy as additional argument
+  __ li(a0, Operand(argc + 1, RelocInfo::NONE32));
+  __ mov(a2, zero_reg);
+  __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY);
+  {
+    Handle<Code> adaptor =
+        masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
+    __ Jump(adaptor, RelocInfo::CODE_TARGET);
+  }
+
+  // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
+  // of the original receiver from the call site).
+  __ bind(non_function);
+  __ sd(a1, MemOperand(sp, argc * kPointerSize));
+  __ li(a0, Operand(argc));  // Set up the number of arguments.
+  __ mov(a2, zero_reg);
+  __ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION);
+  __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+          RelocInfo::CODE_TARGET);
+}
+
+
+static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
+  // Wrap the receiver and patch it back onto the stack.
+  { FrameScope frame_scope(masm, StackFrame::INTERNAL);
+    __ Push(a1, a3);
+    __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+    __ pop(a1);
+  }
+  __ Branch(USE_DELAY_SLOT, cont);
+  __ sd(v0, MemOperand(sp, argc * kPointerSize));
+}
+
+
+static void CallFunctionNoFeedback(MacroAssembler* masm,
+                                   int argc, bool needs_checks,
+                                   bool call_as_method) {
+  // a1 : the function to call
+  Label slow, non_function, wrap, cont;
+
+  if (needs_checks) {
+    // Check that the function is really a JavaScript function.
+    // a1: pushed function (to be verified)
+    __ JumpIfSmi(a1, &non_function);
+
+    // Goto slow case if we do not have a function.
+    __ GetObjectType(a1, a4, a4);
+    __ Branch(&slow, ne, a4, Operand(JS_FUNCTION_TYPE));
+  }
+
+  // Fast-case: Invoke the function now.
+  // a1: pushed function
+  ParameterCount actual(argc);
+
+  if (call_as_method) {
+    if (needs_checks) {
+      EmitContinueIfStrictOrNative(masm, &cont);
+    }
+
+    // Compute the receiver in sloppy mode.
+    __ ld(a3, MemOperand(sp, argc * kPointerSize));
+
+    if (needs_checks) {
+      __ JumpIfSmi(a3, &wrap);
+      __ GetObjectType(a3, a4, a4);
+      __ Branch(&wrap, lt, a4, Operand(FIRST_SPEC_OBJECT_TYPE));
+    } else {
+      __ jmp(&wrap);
+    }
+
+    __ bind(&cont);
+  }
+  __ InvokeFunction(a1, actual, JUMP_FUNCTION, NullCallWrapper());
+
+  if (needs_checks) {
+    // Slow-case: Non-function called.
+    __ bind(&slow);
+    EmitSlowCase(masm, argc, &non_function);
+  }
+
+  if (call_as_method) {
+    __ bind(&wrap);
+    // Wrap the receiver and patch it back onto the stack.
+    EmitWrapCase(masm, argc, &cont);
+  }
+}
+
+
+void CallFunctionStub::Generate(MacroAssembler* masm) {
+  CallFunctionNoFeedback(masm, argc(), NeedsChecks(), CallAsMethod());
+}
+
+
+void CallConstructStub::Generate(MacroAssembler* masm) {
+  // a0 : number of arguments
+  // a1 : the function to call
+  // a2 : feedback vector
+  // a3 : (only if a2 is not undefined) slot in feedback vector (Smi)
+  Label slow, non_function_call;
+  // Check that the function is not a smi.
+  __ JumpIfSmi(a1, &non_function_call);
+  // Check that the function is a JSFunction.
+  __ GetObjectType(a1, a4, a4);
+  __ Branch(&slow, ne, a4, Operand(JS_FUNCTION_TYPE));
+
+  if (RecordCallTarget()) {
+    GenerateRecordCallTarget(masm);
+
+    __ dsrl(at, a3, 32 - kPointerSizeLog2);
+    __ Daddu(a5, a2, at);
+    if (FLAG_pretenuring_call_new) {
+      // Put the AllocationSite from the feedback vector into a2.
+      // By adding kPointerSize we encode that we know the AllocationSite
+      // entry is at the feedback vector slot given by a3 + 1.
+      __ ld(a2, FieldMemOperand(a5, FixedArray::kHeaderSize + kPointerSize));
+    } else {
+      Label feedback_register_initialized;
+      // Put the AllocationSite from the feedback vector into a2, or undefined.
+      __ ld(a2, FieldMemOperand(a5, FixedArray::kHeaderSize));
+      __ ld(a5, FieldMemOperand(a2, AllocationSite::kMapOffset));
+      __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
+      __ Branch(&feedback_register_initialized, eq, a5, Operand(at));
+      __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+      __ bind(&feedback_register_initialized);
+    }
+
+    __ AssertUndefinedOrAllocationSite(a2, a5);
+  }
+
+  // Jump to the function-specific construct stub.
+  Register jmp_reg = a4;
+  __ ld(jmp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+  __ ld(jmp_reg, FieldMemOperand(jmp_reg,
+                                 SharedFunctionInfo::kConstructStubOffset));
+  __ Daddu(at, jmp_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ Jump(at);
+
+  // a0: number of arguments
+  // a1: called object
+  // a4: object type
+  Label do_call;
+  __ bind(&slow);
+  __ Branch(&non_function_call, ne, a4, Operand(JS_FUNCTION_PROXY_TYPE));
+  __ GetBuiltinFunction(a1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
+  __ jmp(&do_call);
+
+  __ bind(&non_function_call);
+  __ GetBuiltinFunction(a1, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+  __ bind(&do_call);
+  // Set expected number of arguments to zero (not changing r0).
+  __ li(a2, Operand(0, RelocInfo::NONE32));
+  __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+           RelocInfo::CODE_TARGET);
+}
+
+
+// StringCharCodeAtGenerator.
+void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
+  DCHECK(!a4.is(index_));
+  DCHECK(!a4.is(result_));
+  DCHECK(!a4.is(object_));
+
+  // If the receiver is a smi trigger the non-string case.
+  __ JumpIfSmi(object_, receiver_not_string_);
+
+  // Fetch the instance type of the receiver into result register.
+  __ ld(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+  __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
+  // If the receiver is not a string trigger the non-string case.
+  __ And(a4, result_, Operand(kIsNotStringMask));
+  __ Branch(receiver_not_string_, ne, a4, Operand(zero_reg));
+
+  // If the index is non-smi trigger the non-smi case.
+  __ JumpIfNotSmi(index_, &index_not_smi_);
+
+  __ bind(&got_smi_index_);
+
+  // Check for index out of range.
+  __ ld(a4, FieldMemOperand(object_, String::kLengthOffset));
+  __ Branch(index_out_of_range_, ls, a4, Operand(index_));
+
+  __ SmiUntag(index_);
+
+  StringCharLoadGenerator::Generate(masm,
+                                    object_,
+                                    index_,
+                                    result_,
+                                    &call_runtime_);
+
+  __ SmiTag(result_);
+  __ bind(&exit_);
+}
+
+
+static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) {
+  __ ld(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  __ ld(vector, FieldMemOperand(vector,
+                                JSFunction::kSharedFunctionInfoOffset));
+  __ ld(vector, FieldMemOperand(vector,
+                                SharedFunctionInfo::kFeedbackVectorOffset));
+}
+
+
+void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
+  // a1 - function
+  // a3 - slot id
+  Label miss;
+
+  EmitLoadTypeFeedbackVector(masm, a2);
+
+  __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, at);
+  __ Branch(&miss, ne, a1, Operand(at));
+
+  __ li(a0, Operand(arg_count()));
+  __ dsrl(at, a3, 32 - kPointerSizeLog2);
+  __ Daddu(at, a2, Operand(at));
+  __ ld(a4, FieldMemOperand(at, FixedArray::kHeaderSize));
+
+  // Verify that a4 contains an AllocationSite
+  __ ld(a5, FieldMemOperand(a4, HeapObject::kMapOffset));
+  __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
+  __ Branch(&miss, ne, a5, Operand(at));
+
+  __ mov(a2, a4);
+  ArrayConstructorStub stub(masm->isolate(), arg_count());
+  __ TailCallStub(&stub);
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+
+  // The slow case, we need this no matter what to complete a call after a miss.
+  CallFunctionNoFeedback(masm,
+                         arg_count(),
+                         true,
+                         CallAsMethod());
+
+  // Unreachable.
+  __ stop("Unexpected code address");
+}
+
+
+void CallICStub::Generate(MacroAssembler* masm) {
+  // a1 - function
+  // a3 - slot id (Smi)
+  Label extra_checks_or_miss, slow_start;
+  Label slow, non_function, wrap, cont;
+  Label have_js_function;
+  int argc = arg_count();
+  ParameterCount actual(argc);
+
+  EmitLoadTypeFeedbackVector(masm, a2);
+
+  // The checks. First, does r1 match the recorded monomorphic target?
+  __ dsrl(a4, a3, 32 - kPointerSizeLog2);
+  __ Daddu(a4, a2, Operand(a4));
+  __ ld(a4, FieldMemOperand(a4, FixedArray::kHeaderSize));
+  __ Branch(&extra_checks_or_miss, ne, a1, Operand(a4));
+
+  __ bind(&have_js_function);
+  if (CallAsMethod()) {
+    EmitContinueIfStrictOrNative(masm, &cont);
+    // Compute the receiver in sloppy mode.
+    __ ld(a3, MemOperand(sp, argc * kPointerSize));
+
+    __ JumpIfSmi(a3, &wrap);
+    __ GetObjectType(a3, a4, a4);
+    __ Branch(&wrap, lt, a4, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+    __ bind(&cont);
+  }
+
+  __ InvokeFunction(a1, actual, JUMP_FUNCTION, NullCallWrapper());
+
+  __ bind(&slow);
+  EmitSlowCase(masm, argc, &non_function);
+
+  if (CallAsMethod()) {
+    __ bind(&wrap);
+    EmitWrapCase(masm, argc, &cont);
+  }
+
+  __ bind(&extra_checks_or_miss);
+  Label miss;
+
+  __ LoadRoot(at, Heap::kMegamorphicSymbolRootIndex);
+  __ Branch(&slow_start, eq, a4, Operand(at));
+  __ LoadRoot(at, Heap::kUninitializedSymbolRootIndex);
+  __ Branch(&miss, eq, a4, Operand(at));
+
+  if (!FLAG_trace_ic) {
+    // We are going megamorphic. If the feedback is a JSFunction, it is fine
+    // to handle it here. More complex cases are dealt with in the runtime.
+    __ AssertNotSmi(a4);
+    __ GetObjectType(a4, a5, a5);
+    __ Branch(&miss, ne, a5, Operand(JS_FUNCTION_TYPE));
+    __ dsrl(a4, a3, 32 - kPointerSizeLog2);
+    __ Daddu(a4, a2, Operand(a4));
+    __ LoadRoot(at, Heap::kMegamorphicSymbolRootIndex);
+    __ sd(at, FieldMemOperand(a4, FixedArray::kHeaderSize));
+    __ Branch(&slow_start);
+  }
+
+  // We are here because tracing is on or we are going monomorphic.
+  __ bind(&miss);
+  GenerateMiss(masm);
+
+  // the slow case
+  __ bind(&slow_start);
+  // Check that the function is really a JavaScript function.
+  // r1: pushed function (to be verified)
+  __ JumpIfSmi(a1, &non_function);
+
+  // Goto slow case if we do not have a function.
+  __ GetObjectType(a1, a4, a4);
+  __ Branch(&slow, ne, a4, Operand(JS_FUNCTION_TYPE));
+  __ Branch(&have_js_function);
+}
+
+
+void CallICStub::GenerateMiss(MacroAssembler* masm) {
+  // Get the receiver of the function from the stack; 1 ~ return address.
+  __ ld(a4, MemOperand(sp, (arg_count() + 1) * kPointerSize));
+
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+
+    // Push the receiver and the function and feedback info.
+    __ Push(a4, a1, a2, a3);
+
+    // Call the entry.
+    IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
+                                               : IC::kCallIC_Customization_Miss;
+
+    ExternalReference miss = ExternalReference(IC_Utility(id),
+                                               masm->isolate());
+    __ CallExternalReference(miss, 4);
+
+    // Move result to a1 and exit the internal frame.
+    __ mov(a1, v0);
+  }
+}
+
+
+void StringCharCodeAtGenerator::GenerateSlow(
+    MacroAssembler* masm,
+    const RuntimeCallHelper& call_helper) {
+  __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
+
+  // Index is not a smi.
+  __ bind(&index_not_smi_);
+  // If index is a heap number, try converting it to an integer.
+  __ CheckMap(index_,
+              result_,
+              Heap::kHeapNumberMapRootIndex,
+              index_not_number_,
+              DONT_DO_SMI_CHECK);
+  call_helper.BeforeCall(masm);
+  // Consumed by runtime conversion function:
+  __ Push(object_, index_);
+  if (index_flags_ == STRING_INDEX_IS_NUMBER) {
+    __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
+  } else {
+    DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
+    // NumberToSmi discards numbers that are not exact integers.
+    __ CallRuntime(Runtime::kNumberToSmi, 1);
+  }
+
+  // Save the conversion result before the pop instructions below
+  // have a chance to overwrite it.
+
+  __ Move(index_, v0);
+  __ pop(object_);
+  // Reload the instance type.
+  __ ld(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+  __ lbu(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
+  call_helper.AfterCall(masm);
+  // If index is still not a smi, it must be out of range.
+  __ JumpIfNotSmi(index_, index_out_of_range_);
+  // Otherwise, return to the fast path.
+  __ Branch(&got_smi_index_);
+
+  // Call runtime. We get here when the receiver is a string and the
+  // index is a number, but the code of getting the actual character
+  // is too complex (e.g., when the string needs to be flattened).
+  __ bind(&call_runtime_);
+  call_helper.BeforeCall(masm);
+  __ SmiTag(index_);
+  __ Push(object_, index_);
+  __ CallRuntime(Runtime::kStringCharCodeAtRT, 2);
+
+  __ Move(result_, v0);
+
+  call_helper.AfterCall(masm);
+  __ jmp(&exit_);
+
+  __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
+}
+
+
+// -------------------------------------------------------------------------
+// StringCharFromCodeGenerator
+
+void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
+  // Fast case of Heap::LookupSingleCharacterStringFromCode.
+
+  DCHECK(!a4.is(result_));
+  DCHECK(!a4.is(code_));
+
+  STATIC_ASSERT(kSmiTag == 0);
+  DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCode + 1));
+  __ And(a4,
+         code_,
+         Operand(kSmiTagMask |
+                 ((~String::kMaxOneByteCharCode) << kSmiTagSize)));
+  __ Branch(&slow_case_, ne, a4, Operand(zero_reg));
+
+
+  __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
+  // At this point code register contains smi tagged one_byte char code.
+  STATIC_ASSERT(kSmiTag == 0);
+  __ SmiScale(a4, code_, kPointerSizeLog2);
+  __ Daddu(result_, result_, a4);
+  __ ld(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
+  __ LoadRoot(a4, Heap::kUndefinedValueRootIndex);
+  __ Branch(&slow_case_, eq, result_, Operand(a4));
+  __ bind(&exit_);
+}
+
+
+void StringCharFromCodeGenerator::GenerateSlow(
+    MacroAssembler* masm,
+    const RuntimeCallHelper& call_helper) {
+  __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
+
+  __ bind(&slow_case_);
+  call_helper.BeforeCall(masm);
+  __ push(code_);
+  __ CallRuntime(Runtime::kCharFromCode, 1);
+  __ Move(result_, v0);
+
+  call_helper.AfterCall(masm);
+  __ Branch(&exit_);
+
+  __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
+}
+
+
+enum CopyCharactersFlags { COPY_ONE_BYTE = 1, DEST_ALWAYS_ALIGNED = 2 };
+
+
+void StringHelper::GenerateCopyCharacters(MacroAssembler* masm,
+                                          Register dest,
+                                          Register src,
+                                          Register count,
+                                          Register scratch,
+                                          String::Encoding encoding) {
+  if (FLAG_debug_code) {
+    // Check that destination is word aligned.
+    __ And(scratch, dest, Operand(kPointerAlignmentMask));
+    __ Check(eq,
+             kDestinationOfCopyNotAligned,
+             scratch,
+             Operand(zero_reg));
+  }
+
+  // Assumes word reads and writes are little endian.
+  // Nothing to do for zero characters.
+  Label done;
+
+  if (encoding == String::TWO_BYTE_ENCODING) {
+    __ Daddu(count, count, count);
+  }
+
+  Register limit = count;  // Read until dest equals this.
+  __ Daddu(limit, dest, Operand(count));
+
+  Label loop_entry, loop;
+  // Copy bytes from src to dest until dest hits limit.
+  __ Branch(&loop_entry);
+  __ bind(&loop);
+  __ lbu(scratch, MemOperand(src));
+  __ daddiu(src, src, 1);
+  __ sb(scratch, MemOperand(dest));
+  __ daddiu(dest, dest, 1);
+  __ bind(&loop_entry);
+  __ Branch(&loop, lt, dest, Operand(limit));
+
+  __ bind(&done);
+}
+
+
+void SubStringStub::Generate(MacroAssembler* masm) {
+  Label runtime;
+  // Stack frame on entry.
+  //  ra: return address
+  //  sp[0]: to
+  //  sp[4]: from
+  //  sp[8]: string
+
+  // This stub is called from the native-call %_SubString(...), so
+  // nothing can be assumed about the arguments. It is tested that:
+  //  "string" is a sequential string,
+  //  both "from" and "to" are smis, and
+  //  0 <= from <= to <= string.length.
+  // If any of these assumptions fail, we call the runtime system.
+
+  const int kToOffset = 0 * kPointerSize;
+  const int kFromOffset = 1 * kPointerSize;
+  const int kStringOffset = 2 * kPointerSize;
+
+  __ ld(a2, MemOperand(sp, kToOffset));
+  __ ld(a3, MemOperand(sp, kFromOffset));
+// Does not needed?
+//  STATIC_ASSERT(kFromOffset == kToOffset + 4);
+  STATIC_ASSERT(kSmiTag == 0);
+// Does not needed?
+// STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
+
+  // Utilize delay slots. SmiUntag doesn't emit a jump, everything else is
+  // safe in this case.
+  __ JumpIfNotSmi(a2, &runtime);
+  __ JumpIfNotSmi(a3, &runtime);
+  // Both a2 and a3 are untagged integers.
+
+  __ SmiUntag(a2, a2);
+  __ SmiUntag(a3, a3);
+  __ Branch(&runtime, lt, a3, Operand(zero_reg));  // From < 0.
+
+  __ Branch(&runtime, gt, a3, Operand(a2));  // Fail if from > to.
+  __ Dsubu(a2, a2, a3);
+
+  // Make sure first argument is a string.
+  __ ld(v0, MemOperand(sp, kStringOffset));
+  __ JumpIfSmi(v0, &runtime);
+  __ ld(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
+  __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
+  __ And(a4, a1, Operand(kIsNotStringMask));
+
+  __ Branch(&runtime, ne, a4, Operand(zero_reg));
+
+  Label single_char;
+  __ Branch(&single_char, eq, a2, Operand(1));
+
+  // Short-cut for the case of trivial substring.
+  Label return_v0;
+  // v0: original string
+  // a2: result string length
+  __ ld(a4, FieldMemOperand(v0, String::kLengthOffset));
+  __ SmiUntag(a4);
+  // Return original string.
+  __ Branch(&return_v0, eq, a2, Operand(a4));
+  // Longer than original string's length or negative: unsafe arguments.
+  __ Branch(&runtime, hi, a2, Operand(a4));
+  // Shorter than original string's length: an actual substring.
+
+  // Deal with different string types: update the index if necessary
+  // and put the underlying string into a5.
+  // v0: original string
+  // a1: instance type
+  // a2: length
+  // a3: from index (untagged)
+  Label underlying_unpacked, sliced_string, seq_or_external_string;
+  // If the string is not indirect, it can only be sequential or external.
+  STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
+  STATIC_ASSERT(kIsIndirectStringMask != 0);
+  __ And(a4, a1, Operand(kIsIndirectStringMask));
+  __ Branch(USE_DELAY_SLOT, &seq_or_external_string, eq, a4, Operand(zero_reg));
+  // a4 is used as a scratch register and can be overwritten in either case.
+  __ And(a4, a1, Operand(kSlicedNotConsMask));
+  __ Branch(&sliced_string, ne, a4, Operand(zero_reg));
+  // Cons string.  Check whether it is flat, then fetch first part.
+  __ ld(a5, FieldMemOperand(v0, ConsString::kSecondOffset));
+  __ LoadRoot(a4, Heap::kempty_stringRootIndex);
+  __ Branch(&runtime, ne, a5, Operand(a4));
+  __ ld(a5, FieldMemOperand(v0, ConsString::kFirstOffset));
+  // Update instance type.
+  __ ld(a1, FieldMemOperand(a5, HeapObject::kMapOffset));
+  __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
+  __ jmp(&underlying_unpacked);
+
+  __ bind(&sliced_string);
+  // Sliced string.  Fetch parent and correct start index by offset.
+  __ ld(a5, FieldMemOperand(v0, SlicedString::kParentOffset));
+  __ ld(a4, FieldMemOperand(v0, SlicedString::kOffsetOffset));
+  __ SmiUntag(a4);  // Add offset to index.
+  __ Daddu(a3, a3, a4);
+  // Update instance type.
+  __ ld(a1, FieldMemOperand(a5, HeapObject::kMapOffset));
+  __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
+  __ jmp(&underlying_unpacked);
+
+  __ bind(&seq_or_external_string);
+  // Sequential or external string.  Just move string to the expected register.
+  __ mov(a5, v0);
+
+  __ bind(&underlying_unpacked);
+
+  if (FLAG_string_slices) {
+    Label copy_routine;
+    // a5: underlying subject string
+    // a1: instance type of underlying subject string
+    // a2: length
+    // a3: adjusted start index (untagged)
+    // Short slice.  Copy instead of slicing.
+    __ Branch(&copy_routine, lt, a2, Operand(SlicedString::kMinLength));
+    // Allocate new sliced string.  At this point we do not reload the instance
+    // type including the string encoding because we simply rely on the info
+    // provided by the original string.  It does not matter if the original
+    // string's encoding is wrong because we always have to recheck encoding of
+    // the newly created string's parent anyways due to externalized strings.
+    Label two_byte_slice, set_slice_header;
+    STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
+    STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
+    __ And(a4, a1, Operand(kStringEncodingMask));
+    __ Branch(&two_byte_slice, eq, a4, Operand(zero_reg));
+    __ AllocateOneByteSlicedString(v0, a2, a6, a7, &runtime);
+    __ jmp(&set_slice_header);
+    __ bind(&two_byte_slice);
+    __ AllocateTwoByteSlicedString(v0, a2, a6, a7, &runtime);
+    __ bind(&set_slice_header);
+    __ SmiTag(a3);
+    __ sd(a5, FieldMemOperand(v0, SlicedString::kParentOffset));
+    __ sd(a3, FieldMemOperand(v0, SlicedString::kOffsetOffset));
+    __ jmp(&return_v0);
+
+    __ bind(&copy_routine);
+  }
+
+  // a5: underlying subject string
+  // a1: instance type of underlying subject string
+  // a2: length
+  // a3: adjusted start index (untagged)
+  Label two_byte_sequential, sequential_string, allocate_result;
+  STATIC_ASSERT(kExternalStringTag != 0);
+  STATIC_ASSERT(kSeqStringTag == 0);
+  __ And(a4, a1, Operand(kExternalStringTag));
+  __ Branch(&sequential_string, eq, a4, Operand(zero_reg));
+
+  // Handle external string.
+  // Rule out short external strings.
+  STATIC_ASSERT(kShortExternalStringTag != 0);
+  __ And(a4, a1, Operand(kShortExternalStringTag));
+  __ Branch(&runtime, ne, a4, Operand(zero_reg));
+  __ ld(a5, FieldMemOperand(a5, ExternalString::kResourceDataOffset));
+  // a5 already points to the first character of underlying string.
+  __ jmp(&allocate_result);
+
+  __ bind(&sequential_string);
+  // Locate first character of underlying subject string.
+  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
+  __ Daddu(a5, a5, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+
+  __ bind(&allocate_result);
+  // Sequential acii string.  Allocate the result.
+  STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
+  __ And(a4, a1, Operand(kStringEncodingMask));
+  __ Branch(&two_byte_sequential, eq, a4, Operand(zero_reg));
+
+  // Allocate and copy the resulting one_byte string.
+  __ AllocateOneByteString(v0, a2, a4, a6, a7, &runtime);
+
+  // Locate first character of substring to copy.
+  __ Daddu(a5, a5, a3);
+
+  // Locate first character of result.
+  __ Daddu(a1, v0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+
+  // v0: result string
+  // a1: first character of result string
+  // a2: result string length
+  // a5: first character of substring to copy
+  STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+  StringHelper::GenerateCopyCharacters(
+      masm, a1, a5, a2, a3, String::ONE_BYTE_ENCODING);
+  __ jmp(&return_v0);
+
+  // Allocate and copy the resulting two-byte string.
+  __ bind(&two_byte_sequential);
+  __ AllocateTwoByteString(v0, a2, a4, a6, a7, &runtime);
+
+  // Locate first character of substring to copy.
+  STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+  __ dsll(a4, a3, 1);
+  __ Daddu(a5, a5, a4);
+  // Locate first character of result.
+  __ Daddu(a1, v0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+
+  // v0: result string.
+  // a1: first character of result.
+  // a2: result length.
+  // a5: first character of substring to copy.
+  STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+  StringHelper::GenerateCopyCharacters(
+      masm, a1, a5, a2, a3, String::TWO_BYTE_ENCODING);
+
+  __ bind(&return_v0);
+  Counters* counters = isolate()->counters();
+  __ IncrementCounter(counters->sub_string_native(), 1, a3, a4);
+  __ DropAndRet(3);
+
+  // Just jump to runtime to create the sub string.
+  __ bind(&runtime);
+  __ TailCallRuntime(Runtime::kSubString, 3, 1);
+
+  __ bind(&single_char);
+  // v0: original string
+  // a1: instance type
+  // a2: length
+  // a3: from index (untagged)
+  StringCharAtGenerator generator(
+      v0, a3, a2, v0, &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER);
+  generator.GenerateFast(masm);
+  __ DropAndRet(3);
+  generator.SkipSlow(masm, &runtime);
+}
+
+
+void StringHelper::GenerateFlatOneByteStringEquals(
+    MacroAssembler* masm, Register left, Register right, Register scratch1,
+    Register scratch2, Register scratch3) {
+  Register length = scratch1;
+
+  // Compare lengths.
+  Label strings_not_equal, check_zero_length;
+  __ ld(length, FieldMemOperand(left, String::kLengthOffset));
+  __ ld(scratch2, FieldMemOperand(right, String::kLengthOffset));
+  __ Branch(&check_zero_length, eq, length, Operand(scratch2));
+  __ bind(&strings_not_equal);
+  // Can not put li in delayslot, it has multi instructions.
+  __ li(v0, Operand(Smi::FromInt(NOT_EQUAL)));
+  __ Ret();
+
+  // Check if the length is zero.
+  Label compare_chars;
+  __ bind(&check_zero_length);
+  STATIC_ASSERT(kSmiTag == 0);
+  __ Branch(&compare_chars, ne, length, Operand(zero_reg));
+  DCHECK(is_int16((intptr_t)Smi::FromInt(EQUAL)));
+  __ Ret(USE_DELAY_SLOT);
+  __ li(v0, Operand(Smi::FromInt(EQUAL)));
+
+  // Compare characters.
+  __ bind(&compare_chars);
+
+  GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2, scratch3,
+                                  v0, &strings_not_equal);
+
+  // Characters are equal.
+  __ Ret(USE_DELAY_SLOT);
+  __ li(v0, Operand(Smi::FromInt(EQUAL)));
+}
+
+
+void StringHelper::GenerateCompareFlatOneByteStrings(
+    MacroAssembler* masm, Register left, Register right, Register scratch1,
+    Register scratch2, Register scratch3, Register scratch4) {
+  Label result_not_equal, compare_lengths;
+  // Find minimum length and length difference.
+  __ ld(scratch1, FieldMemOperand(left, String::kLengthOffset));
+  __ ld(scratch2, FieldMemOperand(right, String::kLengthOffset));
+  __ Dsubu(scratch3, scratch1, Operand(scratch2));
+  Register length_delta = scratch3;
+  __ slt(scratch4, scratch2, scratch1);
+  __ Movn(scratch1, scratch2, scratch4);
+  Register min_length = scratch1;
+  STATIC_ASSERT(kSmiTag == 0);
+  __ Branch(&compare_lengths, eq, min_length, Operand(zero_reg));
+
+  // Compare loop.
+  GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
+                                  scratch4, v0, &result_not_equal);
+
+  // Compare lengths - strings up to min-length are equal.
+  __ bind(&compare_lengths);
+  DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
+  // Use length_delta as result if it's zero.
+  __ mov(scratch2, length_delta);
+  __ mov(scratch4, zero_reg);
+  __ mov(v0, zero_reg);
+
+  __ bind(&result_not_equal);
+  // Conditionally update the result based either on length_delta or
+  // the last comparion performed in the loop above.
+  Label ret;
+  __ Branch(&ret, eq, scratch2, Operand(scratch4));
+  __ li(v0, Operand(Smi::FromInt(GREATER)));
+  __ Branch(&ret, gt, scratch2, Operand(scratch4));
+  __ li(v0, Operand(Smi::FromInt(LESS)));
+  __ bind(&ret);
+  __ Ret();
+}
+
+
+void StringHelper::GenerateOneByteCharsCompareLoop(
+    MacroAssembler* masm, Register left, Register right, Register length,
+    Register scratch1, Register scratch2, Register scratch3,
+    Label* chars_not_equal) {
+  // Change index to run from -length to -1 by adding length to string
+  // start. This means that loop ends when index reaches zero, which
+  // doesn't need an additional compare.
+  __ SmiUntag(length);
+  __ Daddu(scratch1, length,
+          Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+  __ Daddu(left, left, Operand(scratch1));
+  __ Daddu(right, right, Operand(scratch1));
+  __ Dsubu(length, zero_reg, length);
+  Register index = length;  // index = -length;
+
+
+  // Compare loop.
+  Label loop;
+  __ bind(&loop);
+  __ Daddu(scratch3, left, index);
+  __ lbu(scratch1, MemOperand(scratch3));
+  __ Daddu(scratch3, right, index);
+  __ lbu(scratch2, MemOperand(scratch3));
+  __ Branch(chars_not_equal, ne, scratch1, Operand(scratch2));
+  __ Daddu(index, index, 1);
+  __ Branch(&loop, ne, index, Operand(zero_reg));
+}
+
+
+void StringCompareStub::Generate(MacroAssembler* masm) {
+  Label runtime;
+
+  Counters* counters = isolate()->counters();
+
+  // Stack frame on entry.
+  //  sp[0]: right string
+  //  sp[4]: left string
+  __ ld(a1, MemOperand(sp, 1 * kPointerSize));  // Left.
+  __ ld(a0, MemOperand(sp, 0 * kPointerSize));  // Right.
+
+  Label not_same;
+  __ Branch(&not_same, ne, a0, Operand(a1));
+  STATIC_ASSERT(EQUAL == 0);
+  STATIC_ASSERT(kSmiTag == 0);
+  __ li(v0, Operand(Smi::FromInt(EQUAL)));
+  __ IncrementCounter(counters->string_compare_native(), 1, a1, a2);
+  __ DropAndRet(2);
+
+  __ bind(&not_same);
+
+  // Check that both objects are sequential one_byte strings.
+  __ JumpIfNotBothSequentialOneByteStrings(a1, a0, a2, a3, &runtime);
+
+  // Compare flat one_byte strings natively. Remove arguments from stack first.
+  __ IncrementCounter(counters->string_compare_native(), 1, a2, a3);
+  __ Daddu(sp, sp, Operand(2 * kPointerSize));
+  StringHelper::GenerateCompareFlatOneByteStrings(masm, a1, a0, a2, a3, a4, a5);
+
+  __ bind(&runtime);
+  __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+}
+
+
+void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a1    : left
+  //  -- a0    : right
+  //  -- ra    : return address
+  // -----------------------------------
+
+  // Load a2 with the allocation site. We stick an undefined dummy value here
+  // and replace it with the real allocation site later when we instantiate this
+  // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
+  __ li(a2, handle(isolate()->heap()->undefined_value()));
+
+  // Make sure that we actually patched the allocation site.
+  if (FLAG_debug_code) {
+    __ And(at, a2, Operand(kSmiTagMask));
+    __ Assert(ne, kExpectedAllocationSite, at, Operand(zero_reg));
+    __ ld(a4, FieldMemOperand(a2, HeapObject::kMapOffset));
+    __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
+    __ Assert(eq, kExpectedAllocationSite, a4, Operand(at));
+  }
+
+  // Tail call into the stub that handles binary operations with allocation
+  // sites.
+  BinaryOpWithAllocationSiteStub stub(isolate(), state());
+  __ TailCallStub(&stub);
+}
+
+
+void CompareICStub::GenerateSmis(MacroAssembler* masm) {
+  DCHECK(state() == CompareICState::SMI);
+  Label miss;
+  __ Or(a2, a1, a0);
+  __ JumpIfNotSmi(a2, &miss);
+
+  if (GetCondition() == eq) {
+    // For equality we do not care about the sign of the result.
+    __ Ret(USE_DELAY_SLOT);
+    __ Dsubu(v0, a0, a1);
+  } else {
+    // Untag before subtracting to avoid handling overflow.
+    __ SmiUntag(a1);
+    __ SmiUntag(a0);
+    __ Ret(USE_DELAY_SLOT);
+    __ Dsubu(v0, a1, a0);
+  }
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+
+void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
+  DCHECK(state() == CompareICState::NUMBER);
+
+  Label generic_stub;
+  Label unordered, maybe_undefined1, maybe_undefined2;
+  Label miss;
+
+  if (left() == CompareICState::SMI) {
+    __ JumpIfNotSmi(a1, &miss);
+  }
+  if (right() == CompareICState::SMI) {
+    __ JumpIfNotSmi(a0, &miss);
+  }
+
+  // Inlining the double comparison and falling back to the general compare
+  // stub if NaN is involved.
+  // Load left and right operand.
+  Label done, left, left_smi, right_smi;
+  __ JumpIfSmi(a0, &right_smi);
+  __ CheckMap(a0, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
+              DONT_DO_SMI_CHECK);
+  __ Dsubu(a2, a0, Operand(kHeapObjectTag));
+  __ ldc1(f2, MemOperand(a2, HeapNumber::kValueOffset));
+  __ Branch(&left);
+  __ bind(&right_smi);
+  __ SmiUntag(a2, a0);  // Can't clobber a0 yet.
+  FPURegister single_scratch = f6;
+  __ mtc1(a2, single_scratch);
+  __ cvt_d_w(f2, single_scratch);
+
+  __ bind(&left);
+  __ JumpIfSmi(a1, &left_smi);
+  __ CheckMap(a1, a2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
+              DONT_DO_SMI_CHECK);
+  __ Dsubu(a2, a1, Operand(kHeapObjectTag));
+  __ ldc1(f0, MemOperand(a2, HeapNumber::kValueOffset));
+  __ Branch(&done);
+  __ bind(&left_smi);
+  __ SmiUntag(a2, a1);  // Can't clobber a1 yet.
+  single_scratch = f8;
+  __ mtc1(a2, single_scratch);
+  __ cvt_d_w(f0, single_scratch);
+
+  __ bind(&done);
+
+  // Return a result of -1, 0, or 1, or use CompareStub for NaNs.
+  Label fpu_eq, fpu_lt;
+  // Test if equal, and also handle the unordered/NaN case.
+  __ BranchF(&fpu_eq, &unordered, eq, f0, f2);
+
+  // Test if less (unordered case is already handled).
+  __ BranchF(&fpu_lt, NULL, lt, f0, f2);
+
+  // Otherwise it's greater, so just fall thru, and return.
+  DCHECK(is_int16(GREATER) && is_int16(EQUAL) && is_int16(LESS));
+  __ Ret(USE_DELAY_SLOT);
+  __ li(v0, Operand(GREATER));
+
+  __ bind(&fpu_eq);
+  __ Ret(USE_DELAY_SLOT);
+  __ li(v0, Operand(EQUAL));
+
+  __ bind(&fpu_lt);
+  __ Ret(USE_DELAY_SLOT);
+  __ li(v0, Operand(LESS));
+
+  __ bind(&unordered);
+  __ bind(&generic_stub);
+  CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
+                     CompareICState::GENERIC, CompareICState::GENERIC);
+  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+
+  __ bind(&maybe_undefined1);
+  if (Token::IsOrderedRelationalCompareOp(op())) {
+    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+    __ Branch(&miss, ne, a0, Operand(at));
+    __ JumpIfSmi(a1, &unordered);
+    __ GetObjectType(a1, a2, a2);
+    __ Branch(&maybe_undefined2, ne, a2, Operand(HEAP_NUMBER_TYPE));
+    __ jmp(&unordered);
+  }
+
+  __ bind(&maybe_undefined2);
+  if (Token::IsOrderedRelationalCompareOp(op())) {
+    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+    __ Branch(&unordered, eq, a1, Operand(at));
+  }
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+
+void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
+  DCHECK(state() == CompareICState::INTERNALIZED_STRING);
+  Label miss;
+
+  // Registers containing left and right operands respectively.
+  Register left = a1;
+  Register right = a0;
+  Register tmp1 = a2;
+  Register tmp2 = a3;
+
+  // Check that both operands are heap objects.
+  __ JumpIfEitherSmi(left, right, &miss);
+
+  // Check that both operands are internalized strings.
+  __ ld(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
+  __ ld(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
+  __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
+  __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
+  STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
+  __ Or(tmp1, tmp1, Operand(tmp2));
+  __ And(at, tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask));
+  __ Branch(&miss, ne, at, Operand(zero_reg));
+
+  // Make sure a0 is non-zero. At this point input operands are
+  // guaranteed to be non-zero.
+  DCHECK(right.is(a0));
+  STATIC_ASSERT(EQUAL == 0);
+  STATIC_ASSERT(kSmiTag == 0);
+  __ mov(v0, right);
+  // Internalized strings are compared by identity.
+  __ Ret(ne, left, Operand(right));
+  DCHECK(is_int16(EQUAL));
+  __ Ret(USE_DELAY_SLOT);
+  __ li(v0, Operand(Smi::FromInt(EQUAL)));
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+
+void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
+  DCHECK(state() == CompareICState::UNIQUE_NAME);
+  DCHECK(GetCondition() == eq);
+  Label miss;
+
+  // Registers containing left and right operands respectively.
+  Register left = a1;
+  Register right = a0;
+  Register tmp1 = a2;
+  Register tmp2 = a3;
+
+  // Check that both operands are heap objects.
+  __ JumpIfEitherSmi(left, right, &miss);
+
+  // Check that both operands are unique names. This leaves the instance
+  // types loaded in tmp1 and tmp2.
+  __ ld(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
+  __ ld(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
+  __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
+  __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
+
+  __ JumpIfNotUniqueNameInstanceType(tmp1, &miss);
+  __ JumpIfNotUniqueNameInstanceType(tmp2, &miss);
+
+  // Use a0 as result
+  __ mov(v0, a0);
+
+  // Unique names are compared by identity.
+  Label done;
+  __ Branch(&done, ne, left, Operand(right));
+  // Make sure a0 is non-zero. At this point input operands are
+  // guaranteed to be non-zero.
+  DCHECK(right.is(a0));
+  STATIC_ASSERT(EQUAL == 0);
+  STATIC_ASSERT(kSmiTag == 0);
+  __ li(v0, Operand(Smi::FromInt(EQUAL)));
+  __ bind(&done);
+  __ Ret();
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+
+void CompareICStub::GenerateStrings(MacroAssembler* masm) {
+  DCHECK(state() == CompareICState::STRING);
+  Label miss;
+
+  bool equality = Token::IsEqualityOp(op());
+
+  // Registers containing left and right operands respectively.
+  Register left = a1;
+  Register right = a0;
+  Register tmp1 = a2;
+  Register tmp2 = a3;
+  Register tmp3 = a4;
+  Register tmp4 = a5;
+  Register tmp5 = a6;
+
+  // Check that both operands are heap objects.
+  __ JumpIfEitherSmi(left, right, &miss);
+
+  // Check that both operands are strings. This leaves the instance
+  // types loaded in tmp1 and tmp2.
+  __ ld(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
+  __ ld(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
+  __ lbu(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
+  __ lbu(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
+  STATIC_ASSERT(kNotStringTag != 0);
+  __ Or(tmp3, tmp1, tmp2);
+  __ And(tmp5, tmp3, Operand(kIsNotStringMask));
+  __ Branch(&miss, ne, tmp5, Operand(zero_reg));
+
+  // Fast check for identical strings.
+  Label left_ne_right;
+  STATIC_ASSERT(EQUAL == 0);
+  STATIC_ASSERT(kSmiTag == 0);
+  __ Branch(&left_ne_right, ne, left, Operand(right));
+  __ Ret(USE_DELAY_SLOT);
+  __ mov(v0, zero_reg);  // In the delay slot.
+  __ bind(&left_ne_right);
+
+  // Handle not identical strings.
+
+  // Check that both strings are internalized strings. If they are, we're done
+  // because we already know they are not identical. We know they are both
+  // strings.
+  if (equality) {
+    DCHECK(GetCondition() == eq);
+    STATIC_ASSERT(kInternalizedTag == 0);
+    __ Or(tmp3, tmp1, Operand(tmp2));
+    __ And(tmp5, tmp3, Operand(kIsNotInternalizedMask));
+    Label is_symbol;
+    __ Branch(&is_symbol, ne, tmp5, Operand(zero_reg));
+    // Make sure a0 is non-zero. At this point input operands are
+    // guaranteed to be non-zero.
+    DCHECK(right.is(a0));
+    __ Ret(USE_DELAY_SLOT);
+    __ mov(v0, a0);  // In the delay slot.
+    __ bind(&is_symbol);
+  }
+
+  // Check that both strings are sequential one_byte.
+  Label runtime;
+  __ JumpIfBothInstanceTypesAreNotSequentialOneByte(tmp1, tmp2, tmp3, tmp4,
+                                                    &runtime);
+
+  // Compare flat one_byte strings. Returns when done.
+  if (equality) {
+    StringHelper::GenerateFlatOneByteStringEquals(masm, left, right, tmp1, tmp2,
+                                                  tmp3);
+  } else {
+    StringHelper::GenerateCompareFlatOneByteStrings(masm, left, right, tmp1,
+                                                    tmp2, tmp3, tmp4);
+  }
+
+  // Handle more complex cases in runtime.
+  __ bind(&runtime);
+  __ Push(left, right);
+  if (equality) {
+    __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
+  } else {
+    __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+  }
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+
+void CompareICStub::GenerateObjects(MacroAssembler* masm) {
+  DCHECK(state() == CompareICState::OBJECT);
+  Label miss;
+  __ And(a2, a1, Operand(a0));
+  __ JumpIfSmi(a2, &miss);
+
+  __ GetObjectType(a0, a2, a2);
+  __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
+  __ GetObjectType(a1, a2, a2);
+  __ Branch(&miss, ne, a2, Operand(JS_OBJECT_TYPE));
+
+  DCHECK(GetCondition() == eq);
+  __ Ret(USE_DELAY_SLOT);
+  __ dsubu(v0, a0, a1);
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+
+void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
+  Label miss;
+  __ And(a2, a1, a0);
+  __ JumpIfSmi(a2, &miss);
+  __ ld(a2, FieldMemOperand(a0, HeapObject::kMapOffset));
+  __ ld(a3, FieldMemOperand(a1, HeapObject::kMapOffset));
+  __ Branch(&miss, ne, a2, Operand(known_map_));
+  __ Branch(&miss, ne, a3, Operand(known_map_));
+
+  __ Ret(USE_DELAY_SLOT);
+  __ dsubu(v0, a0, a1);
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+
+void CompareICStub::GenerateMiss(MacroAssembler* masm) {
+  {
+    // Call the runtime system in a fresh internal frame.
+    ExternalReference miss =
+        ExternalReference(IC_Utility(IC::kCompareIC_Miss), isolate());
+    FrameScope scope(masm, StackFrame::INTERNAL);
+    __ Push(a1, a0);
+    __ Push(ra, a1, a0);
+    __ li(a4, Operand(Smi::FromInt(op())));
+    __ daddiu(sp, sp, -kPointerSize);
+    __ CallExternalReference(miss, 3, USE_DELAY_SLOT);
+    __ sd(a4, MemOperand(sp));  // In the delay slot.
+    // Compute the entry point of the rewritten stub.
+    __ Daddu(a2, v0, Operand(Code::kHeaderSize - kHeapObjectTag));
+    // Restore registers.
+    __ Pop(a1, a0, ra);
+  }
+  __ Jump(a2);
+}
+
+
+void DirectCEntryStub::Generate(MacroAssembler* masm) {
+  // Make place for arguments to fit C calling convention. Most of the callers
+  // of DirectCEntryStub::GenerateCall are using EnterExitFrame/LeaveExitFrame
+  // so they handle stack restoring and we don't have to do that here.
+  // Any caller of DirectCEntryStub::GenerateCall must take care of dropping
+  // kCArgsSlotsSize stack space after the call.
+  __ daddiu(sp, sp, -kCArgsSlotsSize);
+  // Place the return address on the stack, making the call
+  // GC safe. The RegExp backend also relies on this.
+  __ sd(ra, MemOperand(sp, kCArgsSlotsSize));
+  __ Call(t9);  // Call the C++ function.
+  __ ld(t9, MemOperand(sp, kCArgsSlotsSize));
+
+  if (FLAG_debug_code && FLAG_enable_slow_asserts) {
+    // In case of an error the return address may point to a memory area
+    // filled with kZapValue by the GC.
+    // Dereference the address and check for this.
+    __ Uld(a4, MemOperand(t9));
+    __ Assert(ne, kReceivedInvalidReturnAddress, a4,
+        Operand(reinterpret_cast<uint64_t>(kZapValue)));
+  }
+  __ Jump(t9);
+}
+
+
+void DirectCEntryStub::GenerateCall(MacroAssembler* masm,
+                                    Register target) {
+  intptr_t loc =
+      reinterpret_cast<intptr_t>(GetCode().location());
+  __ Move(t9, target);
+  __ li(ra, Operand(loc, RelocInfo::CODE_TARGET), CONSTANT_SIZE);
+  __ Call(ra);
+}
+
+
+void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm,
+                                                      Label* miss,
+                                                      Label* done,
+                                                      Register receiver,
+                                                      Register properties,
+                                                      Handle<Name> name,
+                                                      Register scratch0) {
+  DCHECK(name->IsUniqueName());
+  // If names of slots in range from 1 to kProbes - 1 for the hash value are
+  // not equal to the name and kProbes-th slot is not used (its name is the
+  // undefined value), it guarantees the hash table doesn't contain the
+  // property. It's true even if some slots represent deleted properties
+  // (their names are the hole value).
+  for (int i = 0; i < kInlinedProbes; i++) {
+    // scratch0 points to properties hash.
+    // Compute the masked index: (hash + i + i * i) & mask.
+    Register index = scratch0;
+    // Capacity is smi 2^n.
+    __ SmiLoadUntag(index, FieldMemOperand(properties, kCapacityOffset));
+    __ Dsubu(index, index, Operand(1));
+    __ And(index, index,
+           Operand(name->Hash() + NameDictionary::GetProbeOffset(i)));
+
+    // Scale the index by multiplying by the entry size.
+    DCHECK(NameDictionary::kEntrySize == 3);
+    __ dsll(at, index, 1);
+    __ Daddu(index, index, at);  // index *= 3.
+
+    Register entity_name = scratch0;
+    // Having undefined at this place means the name is not contained.
+    DCHECK_EQ(kSmiTagSize, 1);
+    Register tmp = properties;
+
+    __ dsll(scratch0, index, kPointerSizeLog2);
+    __ Daddu(tmp, properties, scratch0);
+    __ ld(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
+
+    DCHECK(!tmp.is(entity_name));
+    __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
+    __ Branch(done, eq, entity_name, Operand(tmp));
+
+    // Load the hole ready for use below:
+    __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
+
+    // Stop if found the property.
+    __ Branch(miss, eq, entity_name, Operand(Handle<Name>(name)));
+
+    Label good;
+    __ Branch(&good, eq, entity_name, Operand(tmp));
+
+    // Check if the entry name is not a unique name.
+    __ ld(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
+    __ lbu(entity_name,
+           FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
+    __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
+    __ bind(&good);
+
+    // Restore the properties.
+    __ ld(properties,
+          FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+  }
+
+  const int spill_mask =
+      (ra.bit() | a6.bit() | a5.bit() | a4.bit() | a3.bit() |
+       a2.bit() | a1.bit() | a0.bit() | v0.bit());
+
+  __ MultiPush(spill_mask);
+  __ ld(a0, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+  __ li(a1, Operand(Handle<Name>(name)));
+  NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
+  __ CallStub(&stub);
+  __ mov(at, v0);
+  __ MultiPop(spill_mask);
+
+  __ Branch(done, eq, at, Operand(zero_reg));
+  __ Branch(miss, ne, at, Operand(zero_reg));
+}
+
+
+// Probe the name dictionary in the |elements| register. Jump to the
+// |done| label if a property with the given name is found. Jump to
+// the |miss| label otherwise.
+// If lookup was successful |scratch2| will be equal to elements + 4 * index.
+void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm,
+                                                      Label* miss,
+                                                      Label* done,
+                                                      Register elements,
+                                                      Register name,
+                                                      Register scratch1,
+                                                      Register scratch2) {
+  DCHECK(!elements.is(scratch1));
+  DCHECK(!elements.is(scratch2));
+  DCHECK(!name.is(scratch1));
+  DCHECK(!name.is(scratch2));
+
+  __ AssertName(name);
+
+  // Compute the capacity mask.
+  __ ld(scratch1, FieldMemOperand(elements, kCapacityOffset));
+  __ SmiUntag(scratch1);
+  __ Dsubu(scratch1, scratch1, Operand(1));
+
+  // Generate an unrolled loop that performs a few probes before
+  // giving up. Measurements done on Gmail indicate that 2 probes
+  // cover ~93% of loads from dictionaries.
+  for (int i = 0; i < kInlinedProbes; i++) {
+    // Compute the masked index: (hash + i + i * i) & mask.
+    __ lwu(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
+    if (i > 0) {
+      // Add the probe offset (i + i * i) left shifted to avoid right shifting
+      // the hash in a separate instruction. The value hash + i + i * i is right
+      // shifted in the following and instruction.
+      DCHECK(NameDictionary::GetProbeOffset(i) <
+             1 << (32 - Name::kHashFieldOffset));
+      __ Daddu(scratch2, scratch2, Operand(
+          NameDictionary::GetProbeOffset(i) << Name::kHashShift));
+    }
+    __ dsrl(scratch2, scratch2, Name::kHashShift);
+    __ And(scratch2, scratch1, scratch2);
+
+    // Scale the index by multiplying by the element size.
+    DCHECK(NameDictionary::kEntrySize == 3);
+    // scratch2 = scratch2 * 3.
+
+    __ dsll(at, scratch2, 1);
+    __ Daddu(scratch2, scratch2, at);
+
+    // Check if the key is identical to the name.
+    __ dsll(at, scratch2, kPointerSizeLog2);
+    __ Daddu(scratch2, elements, at);
+    __ ld(at, FieldMemOperand(scratch2, kElementsStartOffset));
+    __ Branch(done, eq, name, Operand(at));
+  }
+
+  const int spill_mask =
+      (ra.bit() | a6.bit() | a5.bit() | a4.bit() |
+       a3.bit() | a2.bit() | a1.bit() | a0.bit() | v0.bit()) &
+      ~(scratch1.bit() | scratch2.bit());
+
+  __ MultiPush(spill_mask);
+  if (name.is(a0)) {
+    DCHECK(!elements.is(a1));
+    __ Move(a1, name);
+    __ Move(a0, elements);
+  } else {
+    __ Move(a0, elements);
+    __ Move(a1, name);
+  }
+  NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
+  __ CallStub(&stub);
+  __ mov(scratch2, a2);
+  __ mov(at, v0);
+  __ MultiPop(spill_mask);
+
+  __ Branch(done, ne, at, Operand(zero_reg));
+  __ Branch(miss, eq, at, Operand(zero_reg));
+}
+
+
+void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
+  // This stub overrides SometimesSetsUpAFrame() to return false.  That means
+  // we cannot call anything that could cause a GC from this stub.
+  // Registers:
+  //  result: NameDictionary to probe
+  //  a1: key
+  //  dictionary: NameDictionary to probe.
+  //  index: will hold an index of entry if lookup is successful.
+  //         might alias with result_.
+  // Returns:
+  //  result_ is zero if lookup failed, non zero otherwise.
+
+  Register result = v0;
+  Register dictionary = a0;
+  Register key = a1;
+  Register index = a2;
+  Register mask = a3;
+  Register hash = a4;
+  Register undefined = a5;
+  Register entry_key = a6;
+
+  Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
+
+  __ ld(mask, FieldMemOperand(dictionary, kCapacityOffset));
+  __ SmiUntag(mask);
+  __ Dsubu(mask, mask, Operand(1));
+
+  __ lwu(hash, FieldMemOperand(key, Name::kHashFieldOffset));
+
+  __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
+
+  for (int i = kInlinedProbes; i < kTotalProbes; i++) {
+    // Compute the masked index: (hash + i + i * i) & mask.
+    // Capacity is smi 2^n.
+    if (i > 0) {
+      // Add the probe offset (i + i * i) left shifted to avoid right shifting
+      // the hash in a separate instruction. The value hash + i + i * i is right
+      // shifted in the following and instruction.
+      DCHECK(NameDictionary::GetProbeOffset(i) <
+             1 << (32 - Name::kHashFieldOffset));
+      __ Daddu(index, hash, Operand(
+          NameDictionary::GetProbeOffset(i) << Name::kHashShift));
+    } else {
+      __ mov(index, hash);
+    }
+    __ dsrl(index, index, Name::kHashShift);
+    __ And(index, mask, index);
+
+    // Scale the index by multiplying by the entry size.
+    DCHECK(NameDictionary::kEntrySize == 3);
+    // index *= 3.
+    __ mov(at, index);
+    __ dsll(index, index, 1);
+    __ Daddu(index, index, at);
+
+
+    DCHECK_EQ(kSmiTagSize, 1);
+    __ dsll(index, index, kPointerSizeLog2);
+    __ Daddu(index, index, dictionary);
+    __ ld(entry_key, FieldMemOperand(index, kElementsStartOffset));
+
+    // Having undefined at this place means the name is not contained.
+    __ Branch(&not_in_dictionary, eq, entry_key, Operand(undefined));
+
+    // Stop if found the property.
+    __ Branch(&in_dictionary, eq, entry_key, Operand(key));
+
+    if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
+      // Check if the entry name is not a unique name.
+      __ ld(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
+      __ lbu(entry_key,
+             FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
+      __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
+    }
+  }
+
+  __ bind(&maybe_in_dictionary);
+  // If we are doing negative lookup then probing failure should be
+  // treated as a lookup success. For positive lookup probing failure
+  // should be treated as lookup failure.
+  if (mode() == POSITIVE_LOOKUP) {
+    __ Ret(USE_DELAY_SLOT);
+    __ mov(result, zero_reg);
+  }
+
+  __ bind(&in_dictionary);
+  __ Ret(USE_DELAY_SLOT);
+  __ li(result, 1);
+
+  __ bind(&not_in_dictionary);
+  __ Ret(USE_DELAY_SLOT);
+  __ mov(result, zero_reg);
+}
+
+
+void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
+    Isolate* isolate) {
+  StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
+  stub1.GetCode();
+  // Hydrogen code stubs need stub2 at snapshot time.
+  StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
+  stub2.GetCode();
+}
+
+
+// Takes the input in 3 registers: address_ value_ and object_.  A pointer to
+// the value has just been written into the object, now this stub makes sure
+// we keep the GC informed.  The word in the object where the value has been
+// written is in the address register.
+void RecordWriteStub::Generate(MacroAssembler* masm) {
+  Label skip_to_incremental_noncompacting;
+  Label skip_to_incremental_compacting;
+
+  // The first two branch+nop instructions are generated with labels so as to
+  // get the offset fixed up correctly by the bind(Label*) call.  We patch it
+  // back and forth between a "bne zero_reg, zero_reg, ..." (a nop in this
+  // position) and the "beq zero_reg, zero_reg, ..." when we start and stop
+  // incremental heap marking.
+  // See RecordWriteStub::Patch for details.
+  __ beq(zero_reg, zero_reg, &skip_to_incremental_noncompacting);
+  __ nop();
+  __ beq(zero_reg, zero_reg, &skip_to_incremental_compacting);
+  __ nop();
+
+  if (remembered_set_action() == EMIT_REMEMBERED_SET) {
+    __ RememberedSetHelper(object(),
+                           address(),
+                           value(),
+                           save_fp_regs_mode(),
+                           MacroAssembler::kReturnAtEnd);
+  }
+  __ Ret();
+
+  __ bind(&skip_to_incremental_noncompacting);
+  GenerateIncremental(masm, INCREMENTAL);
+
+  __ bind(&skip_to_incremental_compacting);
+  GenerateIncremental(masm, INCREMENTAL_COMPACTION);
+
+  // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
+  // Will be checked in IncrementalMarking::ActivateGeneratedStub.
+
+  PatchBranchIntoNop(masm, 0);
+  PatchBranchIntoNop(masm, 2 * Assembler::kInstrSize);
+}
+
+
+void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
+  regs_.Save(masm);
+
+  if (remembered_set_action() == EMIT_REMEMBERED_SET) {
+    Label dont_need_remembered_set;
+
+    __ ld(regs_.scratch0(), MemOperand(regs_.address(), 0));
+    __ JumpIfNotInNewSpace(regs_.scratch0(),  // Value.
+                           regs_.scratch0(),
+                           &dont_need_remembered_set);
+
+    __ CheckPageFlag(regs_.object(),
+                     regs_.scratch0(),
+                     1 << MemoryChunk::SCAN_ON_SCAVENGE,
+                     ne,
+                     &dont_need_remembered_set);
+
+    // First notify the incremental marker if necessary, then update the
+    // remembered set.
+    CheckNeedsToInformIncrementalMarker(
+        masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
+    InformIncrementalMarker(masm);
+    regs_.Restore(masm);
+    __ RememberedSetHelper(object(),
+                           address(),
+                           value(),
+                           save_fp_regs_mode(),
+                           MacroAssembler::kReturnAtEnd);
+
+    __ bind(&dont_need_remembered_set);
+  }
+
+  CheckNeedsToInformIncrementalMarker(
+      masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
+  InformIncrementalMarker(masm);
+  regs_.Restore(masm);
+  __ Ret();
+}
+
+
+void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
+  regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
+  int argument_count = 3;
+  __ PrepareCallCFunction(argument_count, regs_.scratch0());
+  Register address =
+      a0.is(regs_.address()) ? regs_.scratch0() : regs_.address();
+  DCHECK(!address.is(regs_.object()));
+  DCHECK(!address.is(a0));
+  __ Move(address, regs_.address());
+  __ Move(a0, regs_.object());
+  __ Move(a1, address);
+  __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
+
+  AllowExternalCallThatCantCauseGC scope(masm);
+  __ CallCFunction(
+      ExternalReference::incremental_marking_record_write_function(isolate()),
+      argument_count);
+  regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
+}
+
+
+void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
+    MacroAssembler* masm,
+    OnNoNeedToInformIncrementalMarker on_no_need,
+    Mode mode) {
+  Label on_black;
+  Label need_incremental;
+  Label need_incremental_pop_scratch;
+
+  __ And(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask));
+  __ ld(regs_.scratch1(),
+        MemOperand(regs_.scratch0(),
+                   MemoryChunk::kWriteBarrierCounterOffset));
+  __ Dsubu(regs_.scratch1(), regs_.scratch1(), Operand(1));
+  __ sd(regs_.scratch1(),
+         MemOperand(regs_.scratch0(),
+                    MemoryChunk::kWriteBarrierCounterOffset));
+  __ Branch(&need_incremental, lt, regs_.scratch1(), Operand(zero_reg));
+
+  // Let's look at the color of the object:  If it is not black we don't have
+  // to inform the incremental marker.
+  __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
+
+  regs_.Restore(masm);
+  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+    __ RememberedSetHelper(object(),
+                           address(),
+                           value(),
+                           save_fp_regs_mode(),
+                           MacroAssembler::kReturnAtEnd);
+  } else {
+    __ Ret();
+  }
+
+  __ bind(&on_black);
+
+  // Get the value from the slot.
+  __ ld(regs_.scratch0(), MemOperand(regs_.address(), 0));
+
+  if (mode == INCREMENTAL_COMPACTION) {
+    Label ensure_not_white;
+
+    __ CheckPageFlag(regs_.scratch0(),  // Contains value.
+                     regs_.scratch1(),  // Scratch.
+                     MemoryChunk::kEvacuationCandidateMask,
+                     eq,
+                     &ensure_not_white);
+
+    __ CheckPageFlag(regs_.object(),
+                     regs_.scratch1(),  // Scratch.
+                     MemoryChunk::kSkipEvacuationSlotsRecordingMask,
+                     eq,
+                     &need_incremental);
+
+    __ bind(&ensure_not_white);
+  }
+
+  // We need extra registers for this, so we push the object and the address
+  // register temporarily.
+  __ Push(regs_.object(), regs_.address());
+  __ EnsureNotWhite(regs_.scratch0(),  // The value.
+                    regs_.scratch1(),  // Scratch.
+                    regs_.object(),  // Scratch.
+                    regs_.address(),  // Scratch.
+                    &need_incremental_pop_scratch);
+  __ Pop(regs_.object(), regs_.address());
+
+  regs_.Restore(masm);
+  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+    __ RememberedSetHelper(object(),
+                           address(),
+                           value(),
+                           save_fp_regs_mode(),
+                           MacroAssembler::kReturnAtEnd);
+  } else {
+    __ Ret();
+  }
+
+  __ bind(&need_incremental_pop_scratch);
+  __ Pop(regs_.object(), regs_.address());
+
+  __ bind(&need_incremental);
+
+  // Fall through when we need to inform the incremental marker.
+}
+
+
+void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0    : element value to store
+  //  -- a3    : element index as smi
+  //  -- sp[0] : array literal index in function as smi
+  //  -- sp[4] : array literal
+  // clobbers a1, a2, a4
+  // -----------------------------------
+
+  Label element_done;
+  Label double_elements;
+  Label smi_element;
+  Label slow_elements;
+  Label fast_elements;
+
+  // Get array literal index, array literal and its map.
+  __ ld(a4, MemOperand(sp, 0 * kPointerSize));
+  __ ld(a1, MemOperand(sp, 1 * kPointerSize));
+  __ ld(a2, FieldMemOperand(a1, JSObject::kMapOffset));
+
+  __ CheckFastElements(a2, a5, &double_elements);
+  // Check for FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS elements
+  __ JumpIfSmi(a0, &smi_element);
+  __ CheckFastSmiElements(a2, a5, &fast_elements);
+
+  // Store into the array literal requires a elements transition. Call into
+  // the runtime.
+  __ bind(&slow_elements);
+  // call.
+  __ Push(a1, a3, a0);
+  __ ld(a5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  __ ld(a5, FieldMemOperand(a5, JSFunction::kLiteralsOffset));
+  __ Push(a5, a4);
+  __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
+
+  // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
+  __ bind(&fast_elements);
+  __ ld(a5, FieldMemOperand(a1, JSObject::kElementsOffset));
+  __ SmiScale(a6, a3, kPointerSizeLog2);
+  __ Daddu(a6, a5, a6);
+  __ Daddu(a6, a6, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ sd(a0, MemOperand(a6, 0));
+  // Update the write barrier for the array store.
+  __ RecordWrite(a5, a6, a0, kRAHasNotBeenSaved, kDontSaveFPRegs,
+                 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+  __ Ret(USE_DELAY_SLOT);
+  __ mov(v0, a0);
+
+  // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
+  // and value is Smi.
+  __ bind(&smi_element);
+  __ ld(a5, FieldMemOperand(a1, JSObject::kElementsOffset));
+  __ SmiScale(a6, a3, kPointerSizeLog2);
+  __ Daddu(a6, a5, a6);
+  __ sd(a0, FieldMemOperand(a6, FixedArray::kHeaderSize));
+  __ Ret(USE_DELAY_SLOT);
+  __ mov(v0, a0);
+
+  // Array literal has ElementsKind of FAST_*_DOUBLE_ELEMENTS.
+  __ bind(&double_elements);
+  __ ld(a5, FieldMemOperand(a1, JSObject::kElementsOffset));
+  __ StoreNumberToDoubleElements(a0, a3, a5, a7, t1, a2, &slow_elements);
+  __ Ret(USE_DELAY_SLOT);
+  __ mov(v0, a0);
+}
+
+
+void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
+  CEntryStub ces(isolate(), 1, kSaveFPRegs);
+  __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
+  int parameter_count_offset =
+      StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
+  __ ld(a1, MemOperand(fp, parameter_count_offset));
+  if (function_mode() == JS_FUNCTION_STUB_MODE) {
+    __ Daddu(a1, a1, Operand(1));
+  }
+  masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
+  __ dsll(a1, a1, kPointerSizeLog2);
+  __ Ret(USE_DELAY_SLOT);
+  __ Daddu(sp, sp, a1);
+}
+
+
+void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
+  EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
+  VectorLoadStub stub(isolate(), state());
+  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
+  EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
+  VectorKeyedLoadStub stub(isolate());
+  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
+  if (masm->isolate()->function_entry_hook() != NULL) {
+    ProfileEntryHookStub stub(masm->isolate());
+    __ push(ra);
+    __ CallStub(&stub);
+    __ pop(ra);
+  }
+}
+
+
+void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
+  // The entry hook is a "push ra" instruction, followed by a call.
+  // Note: on MIPS "push" is 2 instruction
+  const int32_t kReturnAddressDistanceFromFunctionStart =
+      Assembler::kCallTargetAddressOffset + (2 * Assembler::kInstrSize);
+
+  // This should contain all kJSCallerSaved registers.
+  const RegList kSavedRegs =
+     kJSCallerSaved |  // Caller saved registers.
+     s5.bit();         // Saved stack pointer.
+
+  // We also save ra, so the count here is one higher than the mask indicates.
+  const int32_t kNumSavedRegs = kNumJSCallerSaved + 2;
+
+  // Save all caller-save registers as this may be called from anywhere.
+  __ MultiPush(kSavedRegs | ra.bit());
+
+  // Compute the function's address for the first argument.
+  __ Dsubu(a0, ra, Operand(kReturnAddressDistanceFromFunctionStart));
+
+  // The caller's return address is above the saved temporaries.
+  // Grab that for the second argument to the hook.
+  __ Daddu(a1, sp, Operand(kNumSavedRegs * kPointerSize));
+
+  // Align the stack if necessary.
+  int frame_alignment = masm->ActivationFrameAlignment();
+  if (frame_alignment > kPointerSize) {
+    __ mov(s5, sp);
+    DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
+    __ And(sp, sp, Operand(-frame_alignment));
+  }
+
+  __ Dsubu(sp, sp, kCArgsSlotsSize);
+#if defined(V8_HOST_ARCH_MIPS) || defined(V8_HOST_ARCH_MIPS64)
+  int64_t entry_hook =
+      reinterpret_cast<int64_t>(isolate()->function_entry_hook());
+  __ li(t9, Operand(entry_hook));
+#else
+  // Under the simulator we need to indirect the entry hook through a
+  // trampoline function at a known address.
+  // It additionally takes an isolate as a third parameter.
+  __ li(a2, Operand(ExternalReference::isolate_address(isolate())));
+
+  ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
+  __ li(t9, Operand(ExternalReference(&dispatcher,
+                                      ExternalReference::BUILTIN_CALL,
+                                      isolate())));
+#endif
+  // Call C function through t9 to conform ABI for PIC.
+  __ Call(t9);
+
+  // Restore the stack pointer if needed.
+  if (frame_alignment > kPointerSize) {
+    __ mov(sp, s5);
+  } else {
+    __ Daddu(sp, sp, kCArgsSlotsSize);
+  }
+
+  // Also pop ra to get Ret(0).
+  __ MultiPop(kSavedRegs | ra.bit());
+  __ Ret();
+}
+
+
+template<class T>
+static void CreateArrayDispatch(MacroAssembler* masm,
+                                AllocationSiteOverrideMode mode) {
+  if (mode == DISABLE_ALLOCATION_SITES) {
+    T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
+    __ TailCallStub(&stub);
+  } else if (mode == DONT_OVERRIDE) {
+    int last_index = GetSequenceIndexFromFastElementsKind(
+        TERMINAL_FAST_ELEMENTS_KIND);
+    for (int i = 0; i <= last_index; ++i) {
+      ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
+      T stub(masm->isolate(), kind);
+      __ TailCallStub(&stub, eq, a3, Operand(kind));
+    }
+
+    // If we reached this point there is a problem.
+    __ Abort(kUnexpectedElementsKindInArrayConstructor);
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
+static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
+                                           AllocationSiteOverrideMode mode) {
+  // a2 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
+  // a3 - kind (if mode != DISABLE_ALLOCATION_SITES)
+  // a0 - number of arguments
+  // a1 - constructor?
+  // sp[0] - last argument
+  Label normal_sequence;
+  if (mode == DONT_OVERRIDE) {
+    DCHECK(FAST_SMI_ELEMENTS == 0);
+    DCHECK(FAST_HOLEY_SMI_ELEMENTS == 1);
+    DCHECK(FAST_ELEMENTS == 2);
+    DCHECK(FAST_HOLEY_ELEMENTS == 3);
+    DCHECK(FAST_DOUBLE_ELEMENTS == 4);
+    DCHECK(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
+
+    // is the low bit set? If so, we are holey and that is good.
+    __ And(at, a3, Operand(1));
+    __ Branch(&normal_sequence, ne, at, Operand(zero_reg));
+  }
+  // look at the first argument
+  __ ld(a5, MemOperand(sp, 0));
+  __ Branch(&normal_sequence, eq, a5, Operand(zero_reg));
+
+  if (mode == DISABLE_ALLOCATION_SITES) {
+    ElementsKind initial = GetInitialFastElementsKind();
+    ElementsKind holey_initial = GetHoleyElementsKind(initial);
+
+    ArraySingleArgumentConstructorStub stub_holey(masm->isolate(),
+                                                  holey_initial,
+                                                  DISABLE_ALLOCATION_SITES);
+    __ TailCallStub(&stub_holey);
+
+    __ bind(&normal_sequence);
+    ArraySingleArgumentConstructorStub stub(masm->isolate(),
+                                            initial,
+                                            DISABLE_ALLOCATION_SITES);
+    __ TailCallStub(&stub);
+  } else if (mode == DONT_OVERRIDE) {
+    // We are going to create a holey array, but our kind is non-holey.
+    // Fix kind and retry (only if we have an allocation site in the slot).
+    __ Daddu(a3, a3, Operand(1));
+
+    if (FLAG_debug_code) {
+      __ ld(a5, FieldMemOperand(a2, 0));
+      __ LoadRoot(at, Heap::kAllocationSiteMapRootIndex);
+      __ Assert(eq, kExpectedAllocationSite, a5, Operand(at));
+    }
+
+    // Save the resulting elements kind in type info. We can't just store a3
+    // in the AllocationSite::transition_info field because elements kind is
+    // restricted to a portion of the field...upper bits need to be left alone.
+    STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
+    __ ld(a4, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
+    __ Daddu(a4, a4, Operand(Smi::FromInt(kFastElementsKindPackedToHoley)));
+    __ sd(a4, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
+
+
+    __ bind(&normal_sequence);
+    int last_index = GetSequenceIndexFromFastElementsKind(
+        TERMINAL_FAST_ELEMENTS_KIND);
+    for (int i = 0; i <= last_index; ++i) {
+      ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
+      ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
+      __ TailCallStub(&stub, eq, a3, Operand(kind));
+    }
+
+    // If we reached this point there is a problem.
+    __ Abort(kUnexpectedElementsKindInArrayConstructor);
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
+template<class T>
+static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
+  int to_index = GetSequenceIndexFromFastElementsKind(
+      TERMINAL_FAST_ELEMENTS_KIND);
+  for (int i = 0; i <= to_index; ++i) {
+    ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
+    T stub(isolate, kind);
+    stub.GetCode();
+    if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
+      T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
+      stub1.GetCode();
+    }
+  }
+}
+
+
+void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
+  ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
+      isolate);
+  ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
+      isolate);
+  ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
+      isolate);
+}
+
+
+void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
+    Isolate* isolate) {
+  ElementsKind kinds[2] = { FAST_ELEMENTS, FAST_HOLEY_ELEMENTS };
+  for (int i = 0; i < 2; i++) {
+    // For internal arrays we only need a few things.
+    InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
+    stubh1.GetCode();
+    InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
+    stubh2.GetCode();
+    InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
+    stubh3.GetCode();
+  }
+}
+
+
+void ArrayConstructorStub::GenerateDispatchToArrayStub(
+    MacroAssembler* masm,
+    AllocationSiteOverrideMode mode) {
+  if (argument_count() == ANY) {
+    Label not_zero_case, not_one_case;
+    __ And(at, a0, a0);
+    __ Branch(&not_zero_case, ne, at, Operand(zero_reg));
+    CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+
+    __ bind(&not_zero_case);
+    __ Branch(&not_one_case, gt, a0, Operand(1));
+    CreateArrayDispatchOneArgument(masm, mode);
+
+    __ bind(&not_one_case);
+    CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+  } else if (argument_count() == NONE) {
+    CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+  } else if (argument_count() == ONE) {
+    CreateArrayDispatchOneArgument(masm, mode);
+  } else if (argument_count() == MORE_THAN_ONE) {
+    CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
+void ArrayConstructorStub::Generate(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0 : argc (only if argument_count() == ANY)
+  //  -- a1 : constructor
+  //  -- a2 : AllocationSite or undefined
+  //  -- sp[0] : return address
+  //  -- sp[4] : last argument
+  // -----------------------------------
+
+  if (FLAG_debug_code) {
+    // The array construct code is only set for the global and natives
+    // builtin Array functions which always have maps.
+
+    // Initial map for the builtin Array function should be a map.
+    __ ld(a4, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+    // Will both indicate a NULL and a Smi.
+    __ SmiTst(a4, at);
+    __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
+        at, Operand(zero_reg));
+    __ GetObjectType(a4, a4, a5);
+    __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
+        a5, Operand(MAP_TYPE));
+
+    // We should either have undefined in a2 or a valid AllocationSite
+    __ AssertUndefinedOrAllocationSite(a2, a4);
+  }
+
+  Label no_info;
+  // Get the elements kind and case on that.
+  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+  __ Branch(&no_info, eq, a2, Operand(at));
+
+  __ ld(a3, FieldMemOperand(a2, AllocationSite::kTransitionInfoOffset));
+  __ SmiUntag(a3);
+  STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
+  __ And(a3, a3, Operand(AllocationSite::ElementsKindBits::kMask));
+  GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
+
+  __ bind(&no_info);
+  GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
+}
+
+
+void InternalArrayConstructorStub::GenerateCase(
+    MacroAssembler* masm, ElementsKind kind) {
+
+  InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
+  __ TailCallStub(&stub0, lo, a0, Operand(1));
+
+  InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
+  __ TailCallStub(&stubN, hi, a0, Operand(1));
+
+  if (IsFastPackedElementsKind(kind)) {
+    // We might need to create a holey array
+    // look at the first argument.
+    __ ld(at, MemOperand(sp, 0));
+
+    InternalArraySingleArgumentConstructorStub
+        stub1_holey(isolate(), GetHoleyElementsKind(kind));
+    __ TailCallStub(&stub1_holey, ne, at, Operand(zero_reg));
+  }
+
+  InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
+  __ TailCallStub(&stub1);
+}
+
+
+void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0 : argc
+  //  -- a1 : constructor
+  //  -- sp[0] : return address
+  //  -- sp[4] : last argument
+  // -----------------------------------
+
+  if (FLAG_debug_code) {
+    // The array construct code is only set for the global and natives
+    // builtin Array functions which always have maps.
+
+    // Initial map for the builtin Array function should be a map.
+    __ ld(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+    // Will both indicate a NULL and a Smi.
+    __ SmiTst(a3, at);
+    __ Assert(ne, kUnexpectedInitialMapForArrayFunction,
+        at, Operand(zero_reg));
+    __ GetObjectType(a3, a3, a4);
+    __ Assert(eq, kUnexpectedInitialMapForArrayFunction,
+        a4, Operand(MAP_TYPE));
+  }
+
+  // Figure out the right elements kind.
+  __ ld(a3, FieldMemOperand(a1, JSFunction::kPrototypeOrInitialMapOffset));
+
+  // Load the map's "bit field 2" into a3. We only need the first byte,
+  // but the following bit field extraction takes care of that anyway.
+  __ lbu(a3, FieldMemOperand(a3, Map::kBitField2Offset));
+  // Retrieve elements_kind from bit field 2.
+  __ DecodeField<Map::ElementsKindBits>(a3);
+
+  if (FLAG_debug_code) {
+    Label done;
+    __ Branch(&done, eq, a3, Operand(FAST_ELEMENTS));
+    __ Assert(
+        eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray,
+        a3, Operand(FAST_HOLEY_ELEMENTS));
+    __ bind(&done);
+  }
+
+  Label fast_elements_case;
+  __ Branch(&fast_elements_case, eq, a3, Operand(FAST_ELEMENTS));
+  GenerateCase(masm, FAST_HOLEY_ELEMENTS);
+
+  __ bind(&fast_elements_case);
+  GenerateCase(masm, FAST_ELEMENTS);
+}
+
+
+void CallApiFunctionStub::Generate(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- a0                  : callee
+  //  -- a4                  : call_data
+  //  -- a2                  : holder
+  //  -- a1                  : api_function_address
+  //  -- cp                  : context
+  //  --
+  //  -- sp[0]               : last argument
+  //  -- ...
+  //  -- sp[(argc - 1)* 4]   : first argument
+  //  -- sp[argc * 4]        : receiver
+  // -----------------------------------
+
+  Register callee = a0;
+  Register call_data = a4;
+  Register holder = a2;
+  Register api_function_address = a1;
+  Register context = cp;
+
+  int argc = this->argc();
+  bool is_store = this->is_store();
+  bool call_data_undefined = this->call_data_undefined();
+
+  typedef FunctionCallbackArguments FCA;
+
+  STATIC_ASSERT(FCA::kContextSaveIndex == 6);
+  STATIC_ASSERT(FCA::kCalleeIndex == 5);
+  STATIC_ASSERT(FCA::kDataIndex == 4);
+  STATIC_ASSERT(FCA::kReturnValueOffset == 3);
+  STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
+  STATIC_ASSERT(FCA::kIsolateIndex == 1);
+  STATIC_ASSERT(FCA::kHolderIndex == 0);
+  STATIC_ASSERT(FCA::kArgsLength == 7);
+
+  // Save context, callee and call data.
+  __ Push(context, callee, call_data);
+  // Load context from callee.
+  __ ld(context, FieldMemOperand(callee, JSFunction::kContextOffset));
+
+  Register scratch = call_data;
+  if (!call_data_undefined) {
+    __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+  }
+  // Push return value and default return value.
+  __ Push(scratch, scratch);
+  __ li(scratch,
+        Operand(ExternalReference::isolate_address(isolate())));
+  // Push isolate and holder.
+  __ Push(scratch, holder);
+
+  // Prepare arguments.
+  __ mov(scratch, sp);
+
+  // Allocate the v8::Arguments structure in the arguments' space since
+  // it's not controlled by GC.
+  const int kApiStackSpace = 4;
+
+  FrameScope frame_scope(masm, StackFrame::MANUAL);
+  __ EnterExitFrame(false, kApiStackSpace);
+
+  DCHECK(!api_function_address.is(a0) && !scratch.is(a0));
+  // a0 = FunctionCallbackInfo&
+  // Arguments is after the return address.
+  __ Daddu(a0, sp, Operand(1 * kPointerSize));
+  // FunctionCallbackInfo::implicit_args_
+  __ sd(scratch, MemOperand(a0, 0 * kPointerSize));
+  // FunctionCallbackInfo::values_
+  __ Daddu(at, scratch, Operand((FCA::kArgsLength - 1 + argc) * kPointerSize));
+  __ sd(at, MemOperand(a0, 1 * kPointerSize));
+  // FunctionCallbackInfo::length_ = argc
+  __ li(at, Operand(argc));
+  __ sd(at, MemOperand(a0, 2 * kPointerSize));
+  // FunctionCallbackInfo::is_construct_call = 0
+  __ sd(zero_reg, MemOperand(a0, 3 * kPointerSize));
+
+  const int kStackUnwindSpace = argc + FCA::kArgsLength + 1;
+  ExternalReference thunk_ref =
+      ExternalReference::invoke_function_callback(isolate());
+
+  AllowExternalCallThatCantCauseGC scope(masm);
+  MemOperand context_restore_operand(
+      fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
+  // Stores return the first js argument.
+  int return_value_offset = 0;
+  if (is_store) {
+    return_value_offset = 2 + FCA::kArgsLength;
+  } else {
+    return_value_offset = 2 + FCA::kReturnValueOffset;
+  }
+  MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
+
+  __ CallApiFunctionAndReturn(api_function_address,
+                              thunk_ref,
+                              kStackUnwindSpace,
+                              return_value_operand,
+                              &context_restore_operand);
+}
+
+
+void CallApiGetterStub::Generate(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- sp[0]                  : name
+  //  -- sp[4 - kArgsLength*4]  : PropertyCallbackArguments object
+  //  -- ...
+  //  -- a2                     : api_function_address
+  // -----------------------------------
+
+  Register api_function_address = ApiGetterDescriptor::function_address();
+  DCHECK(api_function_address.is(a2));
+
+  __ mov(a0, sp);  // a0 = Handle<Name>
+  __ Daddu(a1, a0, Operand(1 * kPointerSize));  // a1 = PCA
+
+  const int kApiStackSpace = 1;
+  FrameScope frame_scope(masm, StackFrame::MANUAL);
+  __ EnterExitFrame(false, kApiStackSpace);
+
+  // Create PropertyAccessorInfo instance on the stack above the exit frame with
+  // a1 (internal::Object** args_) as the data.
+  __ sd(a1, MemOperand(sp, 1 * kPointerSize));
+  __ Daddu(a1, sp, Operand(1 * kPointerSize));  // a1 = AccessorInfo&
+
+  const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+
+  ExternalReference thunk_ref =
+      ExternalReference::invoke_accessor_getter_callback(isolate());
+  __ CallApiFunctionAndReturn(api_function_address,
+                              thunk_ref,
+                              kStackUnwindSpace,
+                              MemOperand(fp, 6 * kPointerSize),
+                              NULL);
+}
+
+
+#undef __
+
+} }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_MIPS64
diff --git a/src/mips64/code-stubs-mips64.h b/src/mips64/code-stubs-mips64.h
new file mode 100644
index 0000000..6c324bb
--- /dev/null
+++ b/src/mips64/code-stubs-mips64.h
@@ -0,0 +1,397 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MIPS_CODE_STUBS_ARM_H_
+#define V8_MIPS_CODE_STUBS_ARM_H_
+
+namespace v8 {
+namespace internal {
+
+
+void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
+
+
+class StringHelper : public AllStatic {
+ public:
+  // Generate code for copying a large number of characters. This function
+  // is allowed to spend extra time setting up conditions to make copying
+  // faster. Copying of overlapping regions is not supported.
+  // Dest register ends at the position after the last character written.
+  static void GenerateCopyCharacters(MacroAssembler* masm,
+                                     Register dest,
+                                     Register src,
+                                     Register count,
+                                     Register scratch,
+                                     String::Encoding encoding);
+
+  // Compares two flat one-byte strings and returns result in v0.
+  static void GenerateCompareFlatOneByteStrings(
+      MacroAssembler* masm, Register left, Register right, Register scratch1,
+      Register scratch2, Register scratch3, Register scratch4);
+
+  // Compares two flat one-byte strings for equality and returns result in v0.
+  static void GenerateFlatOneByteStringEquals(MacroAssembler* masm,
+                                              Register left, Register right,
+                                              Register scratch1,
+                                              Register scratch2,
+                                              Register scratch3);
+
+ private:
+  static void GenerateOneByteCharsCompareLoop(
+      MacroAssembler* masm, Register left, Register right, Register length,
+      Register scratch1, Register scratch2, Register scratch3,
+      Label* chars_not_equal);
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
+};
+
+
+class StoreRegistersStateStub: public PlatformCodeStub {
+ public:
+  explicit StoreRegistersStateStub(Isolate* isolate)
+      : PlatformCodeStub(isolate) {}
+
+  static void GenerateAheadOfTime(Isolate* isolate);
+
+ private:
+  DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+  DEFINE_PLATFORM_CODE_STUB(StoreRegistersState, PlatformCodeStub);
+};
+
+
+class RestoreRegistersStateStub: public PlatformCodeStub {
+ public:
+  explicit RestoreRegistersStateStub(Isolate* isolate)
+      : PlatformCodeStub(isolate) {}
+
+  static void GenerateAheadOfTime(Isolate* isolate);
+
+ private:
+  DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+  DEFINE_PLATFORM_CODE_STUB(RestoreRegistersState, PlatformCodeStub);
+};
+
+// This stub can convert a signed int32 to a heap number (double).  It does
+// not work for int32s that are in Smi range!  No GC occurs during this stub
+// so you don't have to set up the frame.
+class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
+ public:
+  WriteInt32ToHeapNumberStub(Isolate* isolate, Register the_int,
+                             Register the_heap_number, Register scratch,
+                             Register scratch2)
+      : PlatformCodeStub(isolate) {
+    minor_key_ = IntRegisterBits::encode(the_int.code()) |
+                 HeapNumberRegisterBits::encode(the_heap_number.code()) |
+                 ScratchRegisterBits::encode(scratch.code()) |
+                 SignRegisterBits::encode(scratch2.code());
+    DCHECK(IntRegisterBits::is_valid(the_int.code()));
+    DCHECK(HeapNumberRegisterBits::is_valid(the_heap_number.code()));
+    DCHECK(ScratchRegisterBits::is_valid(scratch.code()));
+    DCHECK(SignRegisterBits::is_valid(scratch2.code()));
+  }
+
+  static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
+
+ private:
+  void Generate(MacroAssembler* masm);
+
+  Register the_int() const {
+    return Register::from_code(IntRegisterBits::decode(minor_key_));
+  }
+
+  Register the_heap_number() const {
+    return Register::from_code(HeapNumberRegisterBits::decode(minor_key_));
+  }
+
+  Register scratch() const {
+    return Register::from_code(ScratchRegisterBits::decode(minor_key_));
+  }
+
+  Register sign() const {
+    return Register::from_code(SignRegisterBits::decode(minor_key_));
+  }
+
+  // Minor key encoding in 16 bits.
+  class IntRegisterBits: public BitField<int, 0, 4> {};
+  class HeapNumberRegisterBits: public BitField<int, 4, 4> {};
+  class ScratchRegisterBits: public BitField<int, 8, 4> {};
+  class SignRegisterBits: public BitField<int, 12, 4> {};
+
+  DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+  DEFINE_CODE_STUB(WriteInt32ToHeapNumber, PlatformCodeStub);
+};
+
+
+class RecordWriteStub: public PlatformCodeStub {
+ public:
+  RecordWriteStub(Isolate* isolate,
+                  Register object,
+                  Register value,
+                  Register address,
+                  RememberedSetAction remembered_set_action,
+                  SaveFPRegsMode fp_mode)
+      : PlatformCodeStub(isolate),
+        regs_(object,   // An input reg.
+              address,  // An input reg.
+              value) {  // One scratch reg.
+    minor_key_ = ObjectBits::encode(object.code()) |
+                 ValueBits::encode(value.code()) |
+                 AddressBits::encode(address.code()) |
+                 RememberedSetActionBits::encode(remembered_set_action) |
+                 SaveFPRegsModeBits::encode(fp_mode);
+  }
+
+  RecordWriteStub(uint32_t key, Isolate* isolate)
+      : PlatformCodeStub(key, isolate), regs_(object(), address(), value()) {}
+
+  enum Mode {
+    STORE_BUFFER_ONLY,
+    INCREMENTAL,
+    INCREMENTAL_COMPACTION
+  };
+
+  virtual bool SometimesSetsUpAFrame() { return false; }
+
+  static void PatchBranchIntoNop(MacroAssembler* masm, int pos) {
+    const unsigned offset = masm->instr_at(pos) & kImm16Mask;
+    masm->instr_at_put(pos, BNE | (zero_reg.code() << kRsShift) |
+        (zero_reg.code() << kRtShift) | (offset & kImm16Mask));
+    DCHECK(Assembler::IsBne(masm->instr_at(pos)));
+  }
+
+  static void PatchNopIntoBranch(MacroAssembler* masm, int pos) {
+    const unsigned offset = masm->instr_at(pos) & kImm16Mask;
+    masm->instr_at_put(pos, BEQ | (zero_reg.code() << kRsShift) |
+        (zero_reg.code() << kRtShift) | (offset & kImm16Mask));
+    DCHECK(Assembler::IsBeq(masm->instr_at(pos)));
+  }
+
+  static Mode GetMode(Code* stub) {
+    Instr first_instruction = Assembler::instr_at(stub->instruction_start());
+    Instr second_instruction = Assembler::instr_at(stub->instruction_start() +
+                                                   2 * Assembler::kInstrSize);
+
+    if (Assembler::IsBeq(first_instruction)) {
+      return INCREMENTAL;
+    }
+
+    DCHECK(Assembler::IsBne(first_instruction));
+
+    if (Assembler::IsBeq(second_instruction)) {
+      return INCREMENTAL_COMPACTION;
+    }
+
+    DCHECK(Assembler::IsBne(second_instruction));
+
+    return STORE_BUFFER_ONLY;
+  }
+
+  static void Patch(Code* stub, Mode mode) {
+    MacroAssembler masm(NULL,
+                        stub->instruction_start(),
+                        stub->instruction_size());
+    switch (mode) {
+      case STORE_BUFFER_ONLY:
+        DCHECK(GetMode(stub) == INCREMENTAL ||
+               GetMode(stub) == INCREMENTAL_COMPACTION);
+        PatchBranchIntoNop(&masm, 0);
+        PatchBranchIntoNop(&masm, 2 * Assembler::kInstrSize);
+        break;
+      case INCREMENTAL:
+        DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
+        PatchNopIntoBranch(&masm, 0);
+        break;
+      case INCREMENTAL_COMPACTION:
+        DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
+        PatchNopIntoBranch(&masm, 2 * Assembler::kInstrSize);
+        break;
+    }
+    DCHECK(GetMode(stub) == mode);
+    CpuFeatures::FlushICache(stub->instruction_start(),
+                             4 * Assembler::kInstrSize);
+  }
+
+  DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+
+ private:
+  // This is a helper class for freeing up 3 scratch registers.  The input is
+  // two registers that must be preserved and one scratch register provided by
+  // the caller.
+  class RegisterAllocation {
+   public:
+    RegisterAllocation(Register object,
+                       Register address,
+                       Register scratch0)
+        : object_(object),
+          address_(address),
+          scratch0_(scratch0) {
+      DCHECK(!AreAliased(scratch0, object, address, no_reg));
+      scratch1_ = GetRegisterThatIsNotOneOf(object_, address_, scratch0_);
+    }
+
+    void Save(MacroAssembler* masm) {
+      DCHECK(!AreAliased(object_, address_, scratch1_, scratch0_));
+      // We don't have to save scratch0_ because it was given to us as
+      // a scratch register.
+      masm->push(scratch1_);
+    }
+
+    void Restore(MacroAssembler* masm) {
+      masm->pop(scratch1_);
+    }
+
+    // If we have to call into C then we need to save and restore all caller-
+    // saved registers that were not already preserved.  The scratch registers
+    // will be restored by other means so we don't bother pushing them here.
+    void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
+      masm->MultiPush((kJSCallerSaved | ra.bit()) & ~scratch1_.bit());
+      if (mode == kSaveFPRegs) {
+        masm->MultiPushFPU(kCallerSavedFPU);
+      }
+    }
+
+    inline void RestoreCallerSaveRegisters(MacroAssembler*masm,
+                                           SaveFPRegsMode mode) {
+      if (mode == kSaveFPRegs) {
+        masm->MultiPopFPU(kCallerSavedFPU);
+      }
+      masm->MultiPop((kJSCallerSaved | ra.bit()) & ~scratch1_.bit());
+    }
+
+    inline Register object() { return object_; }
+    inline Register address() { return address_; }
+    inline Register scratch0() { return scratch0_; }
+    inline Register scratch1() { return scratch1_; }
+
+   private:
+    Register object_;
+    Register address_;
+    Register scratch0_;
+    Register scratch1_;
+
+    friend class RecordWriteStub;
+  };
+
+  enum OnNoNeedToInformIncrementalMarker {
+    kReturnOnNoNeedToInformIncrementalMarker,
+    kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
+  };
+
+  virtual inline Major MajorKey() const FINAL OVERRIDE { return RecordWrite; }
+
+  virtual void Generate(MacroAssembler* masm) OVERRIDE;
+  void GenerateIncremental(MacroAssembler* masm, Mode mode);
+  void CheckNeedsToInformIncrementalMarker(
+      MacroAssembler* masm,
+      OnNoNeedToInformIncrementalMarker on_no_need,
+      Mode mode);
+  void InformIncrementalMarker(MacroAssembler* masm);
+
+  void Activate(Code* code) {
+    code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
+  }
+
+  Register object() const {
+    return Register::from_code(ObjectBits::decode(minor_key_));
+  }
+
+  Register value() const {
+    return Register::from_code(ValueBits::decode(minor_key_));
+  }
+
+  Register address() const {
+    return Register::from_code(AddressBits::decode(minor_key_));
+  }
+
+  RememberedSetAction remembered_set_action() const {
+    return RememberedSetActionBits::decode(minor_key_);
+  }
+
+  SaveFPRegsMode save_fp_regs_mode() const {
+    return SaveFPRegsModeBits::decode(minor_key_);
+  }
+
+  class ObjectBits: public BitField<int, 0, 5> {};
+  class ValueBits: public BitField<int, 5, 5> {};
+  class AddressBits: public BitField<int, 10, 5> {};
+  class RememberedSetActionBits: public BitField<RememberedSetAction, 15, 1> {};
+  class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 16, 1> {};
+
+  Label slow_;
+  RegisterAllocation regs_;
+
+  DISALLOW_COPY_AND_ASSIGN(RecordWriteStub);
+};
+
+
+// Trampoline stub to call into native code. To call safely into native code
+// in the presence of compacting GC (which can move code objects) we need to
+// keep the code which called into native pinned in the memory. Currently the
+// simplest approach is to generate such stub early enough so it can never be
+// moved by GC
+class DirectCEntryStub: public PlatformCodeStub {
+ public:
+  explicit DirectCEntryStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
+  void GenerateCall(MacroAssembler* masm, Register target);
+
+ private:
+  bool NeedsImmovableCode() { return true; }
+
+  DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+  DEFINE_PLATFORM_CODE_STUB(DirectCEntry, PlatformCodeStub);
+};
+
+
+class NameDictionaryLookupStub: public PlatformCodeStub {
+ public:
+  enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
+
+  NameDictionaryLookupStub(Isolate* isolate, LookupMode mode)
+      : PlatformCodeStub(isolate) {
+    minor_key_ = LookupModeBits::encode(mode);
+  }
+
+  static void GenerateNegativeLookup(MacroAssembler* masm,
+                                     Label* miss,
+                                     Label* done,
+                                     Register receiver,
+                                     Register properties,
+                                     Handle<Name> name,
+                                     Register scratch0);
+
+  static void GeneratePositiveLookup(MacroAssembler* masm,
+                                     Label* miss,
+                                     Label* done,
+                                     Register elements,
+                                     Register name,
+                                     Register r0,
+                                     Register r1);
+
+  virtual bool SometimesSetsUpAFrame() { return false; }
+
+ private:
+  static const int kInlinedProbes = 4;
+  static const int kTotalProbes = 20;
+
+  static const int kCapacityOffset =
+      NameDictionary::kHeaderSize +
+      NameDictionary::kCapacityIndex * kPointerSize;
+
+  static const int kElementsStartOffset =
+      NameDictionary::kHeaderSize +
+      NameDictionary::kElementsStartIndex * kPointerSize;
+
+  LookupMode mode() const { return LookupModeBits::decode(minor_key_); }
+
+  class LookupModeBits: public BitField<LookupMode, 0, 1> {};
+
+  DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+  DEFINE_PLATFORM_CODE_STUB(NameDictionaryLookup, PlatformCodeStub);
+};
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_MIPS_CODE_STUBS_ARM_H_
diff --git a/src/mips64/codegen-mips64.cc b/src/mips64/codegen-mips64.cc
new file mode 100644
index 0000000..fb395f7
--- /dev/null
+++ b/src/mips64/codegen-mips64.cc
@@ -0,0 +1,1142 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_MIPS64
+
+#include "src/codegen.h"
+#include "src/macro-assembler.h"
+#include "src/mips64/simulator-mips64.h"
+
+namespace v8 {
+namespace internal {
+
+
+#define __ masm.
+
+
+#if defined(USE_SIMULATOR)
+byte* fast_exp_mips_machine_code = NULL;
+double fast_exp_simulator(double x) {
+  return Simulator::current(Isolate::Current())->CallFP(
+      fast_exp_mips_machine_code, x, 0);
+}
+#endif
+
+
+UnaryMathFunction CreateExpFunction() {
+  if (!FLAG_fast_math) return &std::exp;
+  size_t actual_size;
+  byte* buffer =
+      static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
+  if (buffer == NULL) return &std::exp;
+  ExternalReference::InitializeMathExpData();
+
+  MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+
+  {
+    DoubleRegister input = f12;
+    DoubleRegister result = f0;
+    DoubleRegister double_scratch1 = f4;
+    DoubleRegister double_scratch2 = f6;
+    Register temp1 = a4;
+    Register temp2 = a5;
+    Register temp3 = a6;
+
+    if (!IsMipsSoftFloatABI) {
+      // Input value is in f12 anyway, nothing to do.
+    } else {
+      __ Move(input, a0, a1);
+    }
+    __ Push(temp3, temp2, temp1);
+    MathExpGenerator::EmitMathExp(
+        &masm, input, result, double_scratch1, double_scratch2,
+        temp1, temp2, temp3);
+    __ Pop(temp3, temp2, temp1);
+    if (!IsMipsSoftFloatABI) {
+      // Result is already in f0, nothing to do.
+    } else {
+      __ Move(v0, v1, result);
+    }
+    __ Ret();
+  }
+
+  CodeDesc desc;
+  masm.GetCode(&desc);
+  DCHECK(!RelocInfo::RequiresRelocation(desc));
+
+  CpuFeatures::FlushICache(buffer, actual_size);
+  base::OS::ProtectCode(buffer, actual_size);
+
+#if !defined(USE_SIMULATOR)
+  return FUNCTION_CAST<UnaryMathFunction>(buffer);
+#else
+  fast_exp_mips_machine_code = buffer;
+  return &fast_exp_simulator;
+#endif
+}
+
+
+#if defined(V8_HOST_ARCH_MIPS)
+MemCopyUint8Function CreateMemCopyUint8Function(MemCopyUint8Function stub) {
+#if defined(USE_SIMULATOR)
+  return stub;
+#else
+
+  size_t actual_size;
+  byte* buffer =
+      static_cast<byte*>(base::OS::Allocate(3 * KB, &actual_size, true));
+  if (buffer == NULL) return stub;
+
+  // This code assumes that cache lines are 32 bytes and if the cache line is
+  // larger it will not work correctly.
+  MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+
+  {
+    Label lastb, unaligned, aligned, chkw,
+          loop16w, chk1w, wordCopy_loop, skip_pref, lastbloop,
+          leave, ua_chk16w, ua_loop16w, ua_skip_pref, ua_chkw,
+          ua_chk1w, ua_wordCopy_loop, ua_smallCopy, ua_smallCopy_loop;
+
+    // The size of each prefetch.
+    uint32_t pref_chunk = 32;
+    // The maximum size of a prefetch, it must not be less then pref_chunk.
+    // If the real size of a prefetch is greater then max_pref_size and
+    // the kPrefHintPrepareForStore hint is used, the code will not work
+    // correctly.
+    uint32_t max_pref_size = 128;
+    DCHECK(pref_chunk < max_pref_size);
+
+    // pref_limit is set based on the fact that we never use an offset
+    // greater then 5 on a store pref and that a single pref can
+    // never be larger then max_pref_size.
+    uint32_t pref_limit = (5 * pref_chunk) + max_pref_size;
+    int32_t pref_hint_load = kPrefHintLoadStreamed;
+    int32_t pref_hint_store = kPrefHintPrepareForStore;
+    uint32_t loadstore_chunk = 4;
+
+    // The initial prefetches may fetch bytes that are before the buffer being
+    // copied. Start copies with an offset of 4 so avoid this situation when
+    // using kPrefHintPrepareForStore.
+    DCHECK(pref_hint_store != kPrefHintPrepareForStore ||
+           pref_chunk * 4 >= max_pref_size);
+    // If the size is less than 8, go to lastb. Regardless of size,
+    // copy dst pointer to v0 for the retuen value.
+    __ slti(a6, a2, 2 * loadstore_chunk);
+    __ bne(a6, zero_reg, &lastb);
+    __ mov(v0, a0);  // In delay slot.
+
+    // If src and dst have different alignments, go to unaligned, if they
+    // have the same alignment (but are not actually aligned) do a partial
+    // load/store to make them aligned. If they are both already aligned
+    // we can start copying at aligned.
+    __ xor_(t8, a1, a0);
+    __ andi(t8, t8, loadstore_chunk - 1);  // t8 is a0/a1 word-displacement.
+    __ bne(t8, zero_reg, &unaligned);
+    __ subu(a3, zero_reg, a0);  // In delay slot.
+
+    __ andi(a3, a3, loadstore_chunk - 1);  // Copy a3 bytes to align a0/a1.
+    __ beq(a3, zero_reg, &aligned);  // Already aligned.
+    __ subu(a2, a2, a3);  // In delay slot. a2 is the remining bytes count.
+
+    __ lwr(t8, MemOperand(a1));
+    __ addu(a1, a1, a3);
+    __ swr(t8, MemOperand(a0));
+    __ addu(a0, a0, a3);
+
+    // Now dst/src are both aligned to (word) aligned addresses. Set a2 to
+    // count how many bytes we have to copy after all the 64 byte chunks are
+    // copied and a3 to the dst pointer after all the 64 byte chunks have been
+    // copied. We will loop, incrementing a0 and a1 until a0 equals a3.
+    __ bind(&aligned);
+    __ andi(t8, a2, 0x3f);
+    __ beq(a2, t8, &chkw);  // Less than 64?
+    __ subu(a3, a2, t8);  // In delay slot.
+    __ addu(a3, a0, a3);  // Now a3 is the final dst after loop.
+
+    // When in the loop we prefetch with kPrefHintPrepareForStore hint,
+    // in this case the a0+x should be past the "a4-32" address. This means:
+    // for x=128 the last "safe" a0 address is "a4-160". Alternatively, for
+    // x=64 the last "safe" a0 address is "a4-96". In the current version we
+    // will use "pref hint, 128(a0)", so "a4-160" is the limit.
+    if (pref_hint_store == kPrefHintPrepareForStore) {
+      __ addu(a4, a0, a2);  // a4 is the "past the end" address.
+      __ Subu(t9, a4, pref_limit);  // t9 is the "last safe pref" address.
+    }
+
+    __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
+    __ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk));
+    __ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk));
+    __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
+
+    if (pref_hint_store != kPrefHintPrepareForStore) {
+      __ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk));
+      __ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk));
+      __ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk));
+    }
+    __ bind(&loop16w);
+    __ lw(a4, MemOperand(a1));
+
+    if (pref_hint_store == kPrefHintPrepareForStore) {
+      __ sltu(v1, t9, a0);  // If a0 > t9, don't use next prefetch.
+      __ Branch(USE_DELAY_SLOT, &skip_pref, gt, v1, Operand(zero_reg));
+    }
+    __ lw(a5, MemOperand(a1, 1, loadstore_chunk));  // Maybe in delay slot.
+
+    __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
+    __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
+
+    __ bind(&skip_pref);
+    __ lw(a6, MemOperand(a1, 2, loadstore_chunk));
+    __ lw(a7, MemOperand(a1, 3, loadstore_chunk));
+    __ lw(t0, MemOperand(a1, 4, loadstore_chunk));
+    __ lw(t1, MemOperand(a1, 5, loadstore_chunk));
+    __ lw(t2, MemOperand(a1, 6, loadstore_chunk));
+    __ lw(t3, MemOperand(a1, 7, loadstore_chunk));
+    __ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk));
+
+    __ sw(a4, MemOperand(a0));
+    __ sw(a5, MemOperand(a0, 1, loadstore_chunk));
+    __ sw(a6, MemOperand(a0, 2, loadstore_chunk));
+    __ sw(a7, MemOperand(a0, 3, loadstore_chunk));
+    __ sw(t0, MemOperand(a0, 4, loadstore_chunk));
+    __ sw(t1, MemOperand(a0, 5, loadstore_chunk));
+    __ sw(t2, MemOperand(a0, 6, loadstore_chunk));
+    __ sw(t3, MemOperand(a0, 7, loadstore_chunk));
+
+    __ lw(a4, MemOperand(a1, 8, loadstore_chunk));
+    __ lw(a5, MemOperand(a1, 9, loadstore_chunk));
+    __ lw(a6, MemOperand(a1, 10, loadstore_chunk));
+    __ lw(a7, MemOperand(a1, 11, loadstore_chunk));
+    __ lw(t0, MemOperand(a1, 12, loadstore_chunk));
+    __ lw(t1, MemOperand(a1, 13, loadstore_chunk));
+    __ lw(t2, MemOperand(a1, 14, loadstore_chunk));
+    __ lw(t3, MemOperand(a1, 15, loadstore_chunk));
+    __ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk));
+
+    __ sw(a4, MemOperand(a0, 8, loadstore_chunk));
+    __ sw(a5, MemOperand(a0, 9, loadstore_chunk));
+    __ sw(a6, MemOperand(a0, 10, loadstore_chunk));
+    __ sw(a7, MemOperand(a0, 11, loadstore_chunk));
+    __ sw(t0, MemOperand(a0, 12, loadstore_chunk));
+    __ sw(t1, MemOperand(a0, 13, loadstore_chunk));
+    __ sw(t2, MemOperand(a0, 14, loadstore_chunk));
+    __ sw(t3, MemOperand(a0, 15, loadstore_chunk));
+    __ addiu(a0, a0, 16 * loadstore_chunk);
+    __ bne(a0, a3, &loop16w);
+    __ addiu(a1, a1, 16 * loadstore_chunk);  // In delay slot.
+    __ mov(a2, t8);
+
+    // Here we have src and dest word-aligned but less than 64-bytes to go.
+    // Check for a 32 bytes chunk and copy if there is one. Otherwise jump
+    // down to chk1w to handle the tail end of the copy.
+    __ bind(&chkw);
+    __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
+    __ andi(t8, a2, 0x1f);
+    __ beq(a2, t8, &chk1w);  // Less than 32?
+    __ nop();  // In delay slot.
+    __ lw(a4, MemOperand(a1));
+    __ lw(a5, MemOperand(a1, 1, loadstore_chunk));
+    __ lw(a6, MemOperand(a1, 2, loadstore_chunk));
+    __ lw(a7, MemOperand(a1, 3, loadstore_chunk));
+    __ lw(t0, MemOperand(a1, 4, loadstore_chunk));
+    __ lw(t1, MemOperand(a1, 5, loadstore_chunk));
+    __ lw(t2, MemOperand(a1, 6, loadstore_chunk));
+    __ lw(t3, MemOperand(a1, 7, loadstore_chunk));
+    __ addiu(a1, a1, 8 * loadstore_chunk);
+    __ sw(a4, MemOperand(a0));
+    __ sw(a5, MemOperand(a0, 1, loadstore_chunk));
+    __ sw(a6, MemOperand(a0, 2, loadstore_chunk));
+    __ sw(a7, MemOperand(a0, 3, loadstore_chunk));
+    __ sw(t0, MemOperand(a0, 4, loadstore_chunk));
+    __ sw(t1, MemOperand(a0, 5, loadstore_chunk));
+    __ sw(t2, MemOperand(a0, 6, loadstore_chunk));
+    __ sw(t3, MemOperand(a0, 7, loadstore_chunk));
+    __ addiu(a0, a0, 8 * loadstore_chunk);
+
+    // Here we have less than 32 bytes to copy. Set up for a loop to copy
+    // one word at a time. Set a2 to count how many bytes we have to copy
+    // after all the word chunks are copied and a3 to the dst pointer after
+    // all the word chunks have been copied. We will loop, incrementing a0
+    // and a1 untill a0 equals a3.
+    __ bind(&chk1w);
+    __ andi(a2, t8, loadstore_chunk - 1);
+    __ beq(a2, t8, &lastb);
+    __ subu(a3, t8, a2);  // In delay slot.
+    __ addu(a3, a0, a3);
+
+    __ bind(&wordCopy_loop);
+    __ lw(a7, MemOperand(a1));
+    __ addiu(a0, a0, loadstore_chunk);
+    __ addiu(a1, a1, loadstore_chunk);
+    __ bne(a0, a3, &wordCopy_loop);
+    __ sw(a7, MemOperand(a0, -1, loadstore_chunk));  // In delay slot.
+
+    __ bind(&lastb);
+    __ Branch(&leave, le, a2, Operand(zero_reg));
+    __ addu(a3, a0, a2);
+
+    __ bind(&lastbloop);
+    __ lb(v1, MemOperand(a1));
+    __ addiu(a0, a0, 1);
+    __ addiu(a1, a1, 1);
+    __ bne(a0, a3, &lastbloop);
+    __ sb(v1, MemOperand(a0, -1));  // In delay slot.
+
+    __ bind(&leave);
+    __ jr(ra);
+    __ nop();
+
+    // Unaligned case. Only the dst gets aligned so we need to do partial
+    // loads of the source followed by normal stores to the dst (once we
+    // have aligned the destination).
+    __ bind(&unaligned);
+    __ andi(a3, a3, loadstore_chunk - 1);  // Copy a3 bytes to align a0/a1.
+    __ beq(a3, zero_reg, &ua_chk16w);
+    __ subu(a2, a2, a3);  // In delay slot.
+
+    __ lwr(v1, MemOperand(a1));
+    __ lwl(v1,
+           MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
+    __ addu(a1, a1, a3);
+    __ swr(v1, MemOperand(a0));
+    __ addu(a0, a0, a3);
+
+    // Now the dst (but not the source) is aligned. Set a2 to count how many
+    // bytes we have to copy after all the 64 byte chunks are copied and a3 to
+    // the dst pointer after all the 64 byte chunks have been copied. We will
+    // loop, incrementing a0 and a1 until a0 equals a3.
+    __ bind(&ua_chk16w);
+    __ andi(t8, a2, 0x3f);
+    __ beq(a2, t8, &ua_chkw);
+    __ subu(a3, a2, t8);  // In delay slot.
+    __ addu(a3, a0, a3);
+
+    if (pref_hint_store == kPrefHintPrepareForStore) {
+      __ addu(a4, a0, a2);
+      __ Subu(t9, a4, pref_limit);
+    }
+
+    __ Pref(pref_hint_load, MemOperand(a1, 0 * pref_chunk));
+    __ Pref(pref_hint_load, MemOperand(a1, 1 * pref_chunk));
+    __ Pref(pref_hint_load, MemOperand(a1, 2 * pref_chunk));
+
+    if (pref_hint_store != kPrefHintPrepareForStore) {
+      __ Pref(pref_hint_store, MemOperand(a0, 1 * pref_chunk));
+      __ Pref(pref_hint_store, MemOperand(a0, 2 * pref_chunk));
+      __ Pref(pref_hint_store, MemOperand(a0, 3 * pref_chunk));
+    }
+
+    __ bind(&ua_loop16w);
+    __ Pref(pref_hint_load, MemOperand(a1, 3 * pref_chunk));
+    __ lwr(a4, MemOperand(a1));
+    __ lwr(a5, MemOperand(a1, 1, loadstore_chunk));
+    __ lwr(a6, MemOperand(a1, 2, loadstore_chunk));
+
+    if (pref_hint_store == kPrefHintPrepareForStore) {
+      __ sltu(v1, t9, a0);
+      __ Branch(USE_DELAY_SLOT, &ua_skip_pref, gt, v1, Operand(zero_reg));
+    }
+    __ lwr(a7, MemOperand(a1, 3, loadstore_chunk));  // Maybe in delay slot.
+
+    __ Pref(pref_hint_store, MemOperand(a0, 4 * pref_chunk));
+    __ Pref(pref_hint_store, MemOperand(a0, 5 * pref_chunk));
+
+    __ bind(&ua_skip_pref);
+    __ lwr(t0, MemOperand(a1, 4, loadstore_chunk));
+    __ lwr(t1, MemOperand(a1, 5, loadstore_chunk));
+    __ lwr(t2, MemOperand(a1, 6, loadstore_chunk));
+    __ lwr(t3, MemOperand(a1, 7, loadstore_chunk));
+    __ lwl(a4,
+           MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
+    __ lwl(a5,
+           MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
+    __ lwl(a6,
+           MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
+    __ lwl(a7,
+           MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
+    __ lwl(t0,
+           MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
+    __ lwl(t1,
+           MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
+    __ lwl(t2,
+           MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
+    __ lwl(t3,
+           MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
+    __ Pref(pref_hint_load, MemOperand(a1, 4 * pref_chunk));
+    __ sw(a4, MemOperand(a0));
+    __ sw(a5, MemOperand(a0, 1, loadstore_chunk));
+    __ sw(a6, MemOperand(a0, 2, loadstore_chunk));
+    __ sw(a7, MemOperand(a0, 3, loadstore_chunk));
+    __ sw(t0, MemOperand(a0, 4, loadstore_chunk));
+    __ sw(t1, MemOperand(a0, 5, loadstore_chunk));
+    __ sw(t2, MemOperand(a0, 6, loadstore_chunk));
+    __ sw(t3, MemOperand(a0, 7, loadstore_chunk));
+    __ lwr(a4, MemOperand(a1, 8, loadstore_chunk));
+    __ lwr(a5, MemOperand(a1, 9, loadstore_chunk));
+    __ lwr(a6, MemOperand(a1, 10, loadstore_chunk));
+    __ lwr(a7, MemOperand(a1, 11, loadstore_chunk));
+    __ lwr(t0, MemOperand(a1, 12, loadstore_chunk));
+    __ lwr(t1, MemOperand(a1, 13, loadstore_chunk));
+    __ lwr(t2, MemOperand(a1, 14, loadstore_chunk));
+    __ lwr(t3, MemOperand(a1, 15, loadstore_chunk));
+    __ lwl(a4,
+           MemOperand(a1, 9, loadstore_chunk, MemOperand::offset_minus_one));
+    __ lwl(a5,
+           MemOperand(a1, 10, loadstore_chunk, MemOperand::offset_minus_one));
+    __ lwl(a6,
+           MemOperand(a1, 11, loadstore_chunk, MemOperand::offset_minus_one));
+    __ lwl(a7,
+           MemOperand(a1, 12, loadstore_chunk, MemOperand::offset_minus_one));
+    __ lwl(t0,
+           MemOperand(a1, 13, loadstore_chunk, MemOperand::offset_minus_one));
+    __ lwl(t1,
+           MemOperand(a1, 14, loadstore_chunk, MemOperand::offset_minus_one));
+    __ lwl(t2,
+           MemOperand(a1, 15, loadstore_chunk, MemOperand::offset_minus_one));
+    __ lwl(t3,
+           MemOperand(a1, 16, loadstore_chunk, MemOperand::offset_minus_one));
+    __ Pref(pref_hint_load, MemOperand(a1, 5 * pref_chunk));
+    __ sw(a4, MemOperand(a0, 8, loadstore_chunk));
+    __ sw(a5, MemOperand(a0, 9, loadstore_chunk));
+    __ sw(a6, MemOperand(a0, 10, loadstore_chunk));
+    __ sw(a7, MemOperand(a0, 11, loadstore_chunk));
+    __ sw(t0, MemOperand(a0, 12, loadstore_chunk));
+    __ sw(t1, MemOperand(a0, 13, loadstore_chunk));
+    __ sw(t2, MemOperand(a0, 14, loadstore_chunk));
+    __ sw(t3, MemOperand(a0, 15, loadstore_chunk));
+    __ addiu(a0, a0, 16 * loadstore_chunk);
+    __ bne(a0, a3, &ua_loop16w);
+    __ addiu(a1, a1, 16 * loadstore_chunk);  // In delay slot.
+    __ mov(a2, t8);
+
+    // Here less than 64-bytes. Check for
+    // a 32 byte chunk and copy if there is one. Otherwise jump down to
+    // ua_chk1w to handle the tail end of the copy.
+    __ bind(&ua_chkw);
+    __ Pref(pref_hint_load, MemOperand(a1));
+    __ andi(t8, a2, 0x1f);
+
+    __ beq(a2, t8, &ua_chk1w);
+    __ nop();  // In delay slot.
+    __ lwr(a4, MemOperand(a1));
+    __ lwr(a5, MemOperand(a1, 1, loadstore_chunk));
+    __ lwr(a6, MemOperand(a1, 2, loadstore_chunk));
+    __ lwr(a7, MemOperand(a1, 3, loadstore_chunk));
+    __ lwr(t0, MemOperand(a1, 4, loadstore_chunk));
+    __ lwr(t1, MemOperand(a1, 5, loadstore_chunk));
+    __ lwr(t2, MemOperand(a1, 6, loadstore_chunk));
+    __ lwr(t3, MemOperand(a1, 7, loadstore_chunk));
+    __ lwl(a4,
+           MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
+    __ lwl(a5,
+           MemOperand(a1, 2, loadstore_chunk, MemOperand::offset_minus_one));
+    __ lwl(a6,
+           MemOperand(a1, 3, loadstore_chunk, MemOperand::offset_minus_one));
+    __ lwl(a7,
+           MemOperand(a1, 4, loadstore_chunk, MemOperand::offset_minus_one));
+    __ lwl(t0,
+           MemOperand(a1, 5, loadstore_chunk, MemOperand::offset_minus_one));
+    __ lwl(t1,
+           MemOperand(a1, 6, loadstore_chunk, MemOperand::offset_minus_one));
+    __ lwl(t2,
+           MemOperand(a1, 7, loadstore_chunk, MemOperand::offset_minus_one));
+    __ lwl(t3,
+           MemOperand(a1, 8, loadstore_chunk, MemOperand::offset_minus_one));
+    __ addiu(a1, a1, 8 * loadstore_chunk);
+    __ sw(a4, MemOperand(a0));
+    __ sw(a5, MemOperand(a0, 1, loadstore_chunk));
+    __ sw(a6, MemOperand(a0, 2, loadstore_chunk));
+    __ sw(a7, MemOperand(a0, 3, loadstore_chunk));
+    __ sw(t0, MemOperand(a0, 4, loadstore_chunk));
+    __ sw(t1, MemOperand(a0, 5, loadstore_chunk));
+    __ sw(t2, MemOperand(a0, 6, loadstore_chunk));
+    __ sw(t3, MemOperand(a0, 7, loadstore_chunk));
+    __ addiu(a0, a0, 8 * loadstore_chunk);
+
+    // Less than 32 bytes to copy. Set up for a loop to
+    // copy one word at a time.
+    __ bind(&ua_chk1w);
+    __ andi(a2, t8, loadstore_chunk - 1);
+    __ beq(a2, t8, &ua_smallCopy);
+    __ subu(a3, t8, a2);  // In delay slot.
+    __ addu(a3, a0, a3);
+
+    __ bind(&ua_wordCopy_loop);
+    __ lwr(v1, MemOperand(a1));
+    __ lwl(v1,
+           MemOperand(a1, 1, loadstore_chunk, MemOperand::offset_minus_one));
+    __ addiu(a0, a0, loadstore_chunk);
+    __ addiu(a1, a1, loadstore_chunk);
+    __ bne(a0, a3, &ua_wordCopy_loop);
+    __ sw(v1, MemOperand(a0, -1, loadstore_chunk));  // In delay slot.
+
+    // Copy the last 8 bytes.
+    __ bind(&ua_smallCopy);
+    __ beq(a2, zero_reg, &leave);
+    __ addu(a3, a0, a2);  // In delay slot.
+
+    __ bind(&ua_smallCopy_loop);
+    __ lb(v1, MemOperand(a1));
+    __ addiu(a0, a0, 1);
+    __ addiu(a1, a1, 1);
+    __ bne(a0, a3, &ua_smallCopy_loop);
+    __ sb(v1, MemOperand(a0, -1));  // In delay slot.
+
+    __ jr(ra);
+    __ nop();
+  }
+  CodeDesc desc;
+  masm.GetCode(&desc);
+  DCHECK(!RelocInfo::RequiresRelocation(desc));
+
+  CpuFeatures::FlushICache(buffer, actual_size);
+  base::OS::ProtectCode(buffer, actual_size);
+  return FUNCTION_CAST<MemCopyUint8Function>(buffer);
+#endif
+}
+#endif
+
+UnaryMathFunction CreateSqrtFunction() {
+#if defined(USE_SIMULATOR)
+  return &std::sqrt;
+#else
+  size_t actual_size;
+  byte* buffer =
+      static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
+  if (buffer == NULL) return &std::sqrt;
+
+  MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+
+  __ MovFromFloatParameter(f12);
+  __ sqrt_d(f0, f12);
+  __ MovToFloatResult(f0);
+  __ Ret();
+
+  CodeDesc desc;
+  masm.GetCode(&desc);
+  DCHECK(!RelocInfo::RequiresRelocation(desc));
+
+  CpuFeatures::FlushICache(buffer, actual_size);
+  base::OS::ProtectCode(buffer, actual_size);
+  return FUNCTION_CAST<UnaryMathFunction>(buffer);
+#endif
+}
+
+#undef __
+
+
+// -------------------------------------------------------------------------
+// Platform-specific RuntimeCallHelper functions.
+
+void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
+  masm->EnterFrame(StackFrame::INTERNAL);
+  DCHECK(!masm->has_frame());
+  masm->set_has_frame(true);
+}
+
+
+void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
+  masm->LeaveFrame(StackFrame::INTERNAL);
+  DCHECK(masm->has_frame());
+  masm->set_has_frame(false);
+}
+
+
+// -------------------------------------------------------------------------
+// Code generators
+
+#define __ ACCESS_MASM(masm)
+
+void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
+    MacroAssembler* masm,
+    Register receiver,
+    Register key,
+    Register value,
+    Register target_map,
+    AllocationSiteMode mode,
+    Label* allocation_memento_found) {
+  Register scratch_elements = a4;
+  DCHECK(!AreAliased(receiver, key, value, target_map,
+                     scratch_elements));
+
+  if (mode == TRACK_ALLOCATION_SITE) {
+    __ JumpIfJSArrayHasAllocationMemento(
+        receiver, scratch_elements, allocation_memento_found);
+  }
+
+  // Set transitioned map.
+  __ sd(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ RecordWriteField(receiver,
+                      HeapObject::kMapOffset,
+                      target_map,
+                      t1,
+                      kRAHasNotBeenSaved,
+                      kDontSaveFPRegs,
+                      EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+}
+
+
+void ElementsTransitionGenerator::GenerateSmiToDouble(
+    MacroAssembler* masm,
+    Register receiver,
+    Register key,
+    Register value,
+    Register target_map,
+    AllocationSiteMode mode,
+    Label* fail) {
+  // Register ra contains the return address.
+  Label loop, entry, convert_hole, gc_required, only_change_map, done;
+  Register elements = a4;
+  Register length = a5;
+  Register array = a6;
+  Register array_end = array;
+
+  // target_map parameter can be clobbered.
+  Register scratch1 = target_map;
+  Register scratch2 = t1;
+  Register scratch3 = a7;
+
+  // Verify input registers don't conflict with locals.
+  DCHECK(!AreAliased(receiver, key, value, target_map,
+                     elements, length, array, scratch2));
+
+  Register scratch = t2;
+  if (mode == TRACK_ALLOCATION_SITE) {
+    __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
+  }
+
+  // Check for empty arrays, which only require a map transition and no changes
+  // to the backing store.
+  __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
+  __ Branch(&only_change_map, eq, at, Operand(elements));
+
+  __ push(ra);
+  __ ld(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
+  // elements: source FixedArray
+  // length: number of elements (smi-tagged)
+
+  // Allocate new FixedDoubleArray.
+  __ SmiScale(scratch, length, kDoubleSizeLog2);
+  __ Daddu(scratch, scratch, FixedDoubleArray::kHeaderSize);
+  __ Allocate(scratch, array, t3, scratch2, &gc_required, DOUBLE_ALIGNMENT);
+  // array: destination FixedDoubleArray, not tagged as heap object
+
+  // Set destination FixedDoubleArray's length and map.
+  __ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
+  __ sd(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
+  // Update receiver's map.
+  __ sd(scratch2, MemOperand(array, HeapObject::kMapOffset));
+
+  __ sd(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ RecordWriteField(receiver,
+                      HeapObject::kMapOffset,
+                      target_map,
+                      scratch2,
+                      kRAHasBeenSaved,
+                      kDontSaveFPRegs,
+                      OMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+  // Replace receiver's backing store with newly created FixedDoubleArray.
+  __ Daddu(scratch1, array, Operand(kHeapObjectTag));
+  __ sd(scratch1, FieldMemOperand(a2, JSObject::kElementsOffset));
+  __ RecordWriteField(receiver,
+                      JSObject::kElementsOffset,
+                      scratch1,
+                      scratch2,
+                      kRAHasBeenSaved,
+                      kDontSaveFPRegs,
+                      EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+
+
+  // Prepare for conversion loop.
+  __ Daddu(scratch1, elements,
+      Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ Daddu(scratch3, array, Operand(FixedDoubleArray::kHeaderSize));
+  __ SmiScale(array_end, length, kDoubleSizeLog2);
+  __ Daddu(array_end, array_end, scratch3);
+
+  // Repurpose registers no longer in use.
+  Register hole_lower = elements;
+  Register hole_upper = length;
+  __ li(hole_lower, Operand(kHoleNanLower32));
+  // scratch1: begin of source FixedArray element fields, not tagged
+  // hole_lower: kHoleNanLower32
+  // hole_upper: kHoleNanUpper32
+  // array_end: end of destination FixedDoubleArray, not tagged
+  // scratch3: begin of FixedDoubleArray element fields, not tagged
+  __ Branch(USE_DELAY_SLOT, &entry);
+  __ li(hole_upper, Operand(kHoleNanUpper32));  // In delay slot.
+
+  __ bind(&only_change_map);
+  __ sd(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ RecordWriteField(receiver,
+                      HeapObject::kMapOffset,
+                      target_map,
+                      scratch2,
+                      kRAHasBeenSaved,
+                      kDontSaveFPRegs,
+                      OMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+  __ Branch(&done);
+
+  // Call into runtime if GC is required.
+  __ bind(&gc_required);
+  __ ld(ra, MemOperand(sp, 0));
+  __ Branch(USE_DELAY_SLOT, fail);
+  __ daddiu(sp, sp, kPointerSize);  // In delay slot.
+
+  // Convert and copy elements.
+  __ bind(&loop);
+  __ ld(scratch2, MemOperand(scratch1));
+  __ Daddu(scratch1, scratch1, kIntSize);
+  // scratch2: current element
+  __ JumpIfNotSmi(scratch2, &convert_hole);
+  __ SmiUntag(scratch2);
+
+  // Normal smi, convert to double and store.
+  __ mtc1(scratch2, f0);
+  __ cvt_d_w(f0, f0);
+  __ sdc1(f0, MemOperand(scratch3));
+  __ Branch(USE_DELAY_SLOT, &entry);
+  __ daddiu(scratch3, scratch3, kDoubleSize);  // In delay slot.
+
+  // Hole found, store the-hole NaN.
+  __ bind(&convert_hole);
+  if (FLAG_debug_code) {
+    // Restore a "smi-untagged" heap object.
+    __ Or(scratch2, scratch2, Operand(1));
+    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+    __ Assert(eq, kObjectFoundInSmiOnlyArray, at, Operand(scratch2));
+  }
+  // mantissa
+  __ sw(hole_lower, MemOperand(scratch3));
+  // exponent
+  __ sw(hole_upper, MemOperand(scratch3, kIntSize));
+  __ Daddu(scratch3, scratch3, kDoubleSize);
+
+  __ bind(&entry);
+  __ Branch(&loop, lt, scratch3, Operand(array_end));
+
+  __ bind(&done);
+  __ pop(ra);
+}
+
+
+void ElementsTransitionGenerator::GenerateDoubleToObject(
+    MacroAssembler* masm,
+    Register receiver,
+    Register key,
+    Register value,
+    Register target_map,
+    AllocationSiteMode mode,
+    Label* fail) {
+  // Register ra contains the return address.
+  Label entry, loop, convert_hole, gc_required, only_change_map;
+  Register elements = a4;
+  Register array = a6;
+  Register length = a5;
+  Register scratch = t1;
+
+  // Verify input registers don't conflict with locals.
+  DCHECK(!AreAliased(receiver, key, value, target_map,
+                     elements, array, length, scratch));
+  if (mode == TRACK_ALLOCATION_SITE) {
+    __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
+  }
+
+  // Check for empty arrays, which only require a map transition and no changes
+  // to the backing store.
+  __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  __ LoadRoot(at, Heap::kEmptyFixedArrayRootIndex);
+  __ Branch(&only_change_map, eq, at, Operand(elements));
+
+  __ MultiPush(
+      value.bit() | key.bit() | receiver.bit() | target_map.bit() | ra.bit());
+
+  __ ld(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
+  // elements: source FixedArray
+  // length: number of elements (smi-tagged)
+
+  // Allocate new FixedArray.
+  // Re-use value and target_map registers, as they have been saved on the
+  // stack.
+  Register array_size = value;
+  Register allocate_scratch = target_map;
+  __ SmiScale(array_size, length, kPointerSizeLog2);
+  __ Daddu(array_size, array_size, FixedDoubleArray::kHeaderSize);
+  __ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
+              NO_ALLOCATION_FLAGS);
+  // array: destination FixedArray, not tagged as heap object
+  // Set destination FixedDoubleArray's length and map.
+  __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
+  __ sd(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
+  __ sd(scratch, MemOperand(array, HeapObject::kMapOffset));
+
+  // Prepare for conversion loop.
+  Register src_elements = elements;
+  Register dst_elements = target_map;
+  Register dst_end = length;
+  Register heap_number_map = scratch;
+  __ Daddu(src_elements, src_elements,
+      Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4));
+  __ Daddu(dst_elements, array, Operand(FixedArray::kHeaderSize));
+  __ Daddu(array, array, Operand(kHeapObjectTag));
+  __ SmiScale(dst_end, dst_end, kPointerSizeLog2);
+  __ Daddu(dst_end, dst_elements, dst_end);
+  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+  // Using offsetted addresses.
+  // dst_elements: begin of destination FixedArray element fields, not tagged
+  // src_elements: begin of source FixedDoubleArray element fields, not tagged,
+  //               points to the exponent
+  // dst_end: end of destination FixedArray, not tagged
+  // array: destination FixedArray
+  // heap_number_map: heap number map
+  __ Branch(&entry);
+
+  // Call into runtime if GC is required.
+  __ bind(&gc_required);
+  __ MultiPop(
+      value.bit() | key.bit() | receiver.bit() | target_map.bit() | ra.bit());
+
+  __ Branch(fail);
+
+  __ bind(&loop);
+  Register upper_bits = key;
+  __ lw(upper_bits, MemOperand(src_elements));
+  __ Daddu(src_elements, src_elements, kDoubleSize);
+  // upper_bits: current element's upper 32 bit
+  // src_elements: address of next element's upper 32 bit
+  __ Branch(&convert_hole, eq, a1, Operand(kHoleNanUpper32));
+
+  // Non-hole double, copy value into a heap number.
+  Register heap_number = receiver;
+  Register scratch2 = value;
+  Register scratch3 = t2;
+  __ AllocateHeapNumber(heap_number, scratch2, scratch3, heap_number_map,
+                        &gc_required);
+  // heap_number: new heap number
+  // Load mantissa of current element, src_elements
+  // point to exponent of next element.
+  __ lw(scratch2, MemOperand(heap_number, -12));
+  __ sw(scratch2, FieldMemOperand(heap_number, HeapNumber::kMantissaOffset));
+  __ sw(upper_bits, FieldMemOperand(heap_number, HeapNumber::kExponentOffset));
+  __ mov(scratch2, dst_elements);
+  __ sd(heap_number, MemOperand(dst_elements));
+  __ Daddu(dst_elements, dst_elements, kPointerSize);
+  __ RecordWrite(array,
+                 scratch2,
+                 heap_number,
+                 kRAHasBeenSaved,
+                 kDontSaveFPRegs,
+                 EMIT_REMEMBERED_SET,
+                 OMIT_SMI_CHECK);
+  __ Branch(&entry);
+
+  // Replace the-hole NaN with the-hole pointer.
+  __ bind(&convert_hole);
+  __ LoadRoot(scratch2, Heap::kTheHoleValueRootIndex);
+  __ sd(scratch2, MemOperand(dst_elements));
+  __ Daddu(dst_elements, dst_elements, kPointerSize);
+
+  __ bind(&entry);
+  __ Branch(&loop, lt, dst_elements, Operand(dst_end));
+
+  __ MultiPop(receiver.bit() | target_map.bit() | value.bit() | key.bit());
+  // Replace receiver's backing store with newly created and filled FixedArray.
+  __ sd(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  __ RecordWriteField(receiver,
+                      JSObject::kElementsOffset,
+                      array,
+                      scratch,
+                      kRAHasBeenSaved,
+                      kDontSaveFPRegs,
+                      EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+  __ pop(ra);
+
+  __ bind(&only_change_map);
+  // Update receiver's map.
+  __ sd(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ RecordWriteField(receiver,
+                      HeapObject::kMapOffset,
+                      target_map,
+                      scratch,
+                      kRAHasNotBeenSaved,
+                      kDontSaveFPRegs,
+                      OMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+}
+
+
+void StringCharLoadGenerator::Generate(MacroAssembler* masm,
+                                       Register string,
+                                       Register index,
+                                       Register result,
+                                       Label* call_runtime) {
+  // Fetch the instance type of the receiver into result register.
+  __ ld(result, FieldMemOperand(string, HeapObject::kMapOffset));
+  __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+
+  // We need special handling for indirect strings.
+  Label check_sequential;
+  __ And(at, result, Operand(kIsIndirectStringMask));
+  __ Branch(&check_sequential, eq, at, Operand(zero_reg));
+
+  // Dispatch on the indirect string shape: slice or cons.
+  Label cons_string;
+  __ And(at, result, Operand(kSlicedNotConsMask));
+  __ Branch(&cons_string, eq, at, Operand(zero_reg));
+
+  // Handle slices.
+  Label indirect_string_loaded;
+  __ ld(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
+  __ ld(string, FieldMemOperand(string, SlicedString::kParentOffset));
+  __ dsra32(at, result, 0);
+  __ Daddu(index, index, at);
+  __ jmp(&indirect_string_loaded);
+
+  // Handle cons strings.
+  // Check whether the right hand side is the empty string (i.e. if
+  // this is really a flat string in a cons string). If that is not
+  // the case we would rather go to the runtime system now to flatten
+  // the string.
+  __ bind(&cons_string);
+  __ ld(result, FieldMemOperand(string, ConsString::kSecondOffset));
+  __ LoadRoot(at, Heap::kempty_stringRootIndex);
+  __ Branch(call_runtime, ne, result, Operand(at));
+  // Get the first of the two strings and load its instance type.
+  __ ld(string, FieldMemOperand(string, ConsString::kFirstOffset));
+
+  __ bind(&indirect_string_loaded);
+  __ ld(result, FieldMemOperand(string, HeapObject::kMapOffset));
+  __ lbu(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+
+  // Distinguish sequential and external strings. Only these two string
+  // representations can reach here (slices and flat cons strings have been
+  // reduced to the underlying sequential or external string).
+  Label external_string, check_encoding;
+  __ bind(&check_sequential);
+  STATIC_ASSERT(kSeqStringTag == 0);
+  __ And(at, result, Operand(kStringRepresentationMask));
+  __ Branch(&external_string, ne, at, Operand(zero_reg));
+
+  // Prepare sequential strings
+  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
+  __ Daddu(string,
+          string,
+          SeqTwoByteString::kHeaderSize - kHeapObjectTag);
+  __ jmp(&check_encoding);
+
+  // Handle external strings.
+  __ bind(&external_string);
+  if (FLAG_debug_code) {
+    // Assert that we do not have a cons or slice (indirect strings) here.
+    // Sequential strings have already been ruled out.
+    __ And(at, result, Operand(kIsIndirectStringMask));
+    __ Assert(eq, kExternalStringExpectedButNotFound,
+        at, Operand(zero_reg));
+  }
+  // Rule out short external strings.
+  STATIC_ASSERT(kShortExternalStringTag != 0);
+  __ And(at, result, Operand(kShortExternalStringMask));
+  __ Branch(call_runtime, ne, at, Operand(zero_reg));
+  __ ld(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
+
+  Label one_byte, done;
+  __ bind(&check_encoding);
+  STATIC_ASSERT(kTwoByteStringTag == 0);
+  __ And(at, result, Operand(kStringEncodingMask));
+  __ Branch(&one_byte, ne, at, Operand(zero_reg));
+  // Two-byte string.
+  __ dsll(at, index, 1);
+  __ Daddu(at, string, at);
+  __ lhu(result, MemOperand(at));
+  __ jmp(&done);
+  __ bind(&one_byte);
+  // One_byte string.
+  __ Daddu(at, string, index);
+  __ lbu(result, MemOperand(at));
+  __ bind(&done);
+}
+
+
+static MemOperand ExpConstant(int index, Register base) {
+  return MemOperand(base, index * kDoubleSize);
+}
+
+
+void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
+                                   DoubleRegister input,
+                                   DoubleRegister result,
+                                   DoubleRegister double_scratch1,
+                                   DoubleRegister double_scratch2,
+                                   Register temp1,
+                                   Register temp2,
+                                   Register temp3) {
+  DCHECK(!input.is(result));
+  DCHECK(!input.is(double_scratch1));
+  DCHECK(!input.is(double_scratch2));
+  DCHECK(!result.is(double_scratch1));
+  DCHECK(!result.is(double_scratch2));
+  DCHECK(!double_scratch1.is(double_scratch2));
+  DCHECK(!temp1.is(temp2));
+  DCHECK(!temp1.is(temp3));
+  DCHECK(!temp2.is(temp3));
+  DCHECK(ExternalReference::math_exp_constants(0).address() != NULL);
+  DCHECK(!masm->serializer_enabled());  // External references not serializable.
+
+  Label zero, infinity, done;
+  __ li(temp3, Operand(ExternalReference::math_exp_constants(0)));
+
+  __ ldc1(double_scratch1, ExpConstant(0, temp3));
+  __ BranchF(&zero, NULL, ge, double_scratch1, input);
+
+  __ ldc1(double_scratch2, ExpConstant(1, temp3));
+  __ BranchF(&infinity, NULL, ge, input, double_scratch2);
+
+  __ ldc1(double_scratch1, ExpConstant(3, temp3));
+  __ ldc1(result, ExpConstant(4, temp3));
+  __ mul_d(double_scratch1, double_scratch1, input);
+  __ add_d(double_scratch1, double_scratch1, result);
+  __ FmoveLow(temp2, double_scratch1);
+  __ sub_d(double_scratch1, double_scratch1, result);
+  __ ldc1(result, ExpConstant(6, temp3));
+  __ ldc1(double_scratch2, ExpConstant(5, temp3));
+  __ mul_d(double_scratch1, double_scratch1, double_scratch2);
+  __ sub_d(double_scratch1, double_scratch1, input);
+  __ sub_d(result, result, double_scratch1);
+  __ mul_d(double_scratch2, double_scratch1, double_scratch1);
+  __ mul_d(result, result, double_scratch2);
+  __ ldc1(double_scratch2, ExpConstant(7, temp3));
+  __ mul_d(result, result, double_scratch2);
+  __ sub_d(result, result, double_scratch1);
+  // Mov 1 in double_scratch2 as math_exp_constants_array[8] == 1.
+  DCHECK(*reinterpret_cast<double*>
+         (ExternalReference::math_exp_constants(8).address()) == 1);
+  __ Move(double_scratch2, 1);
+  __ add_d(result, result, double_scratch2);
+  __ dsrl(temp1, temp2, 11);
+  __ Ext(temp2, temp2, 0, 11);
+  __ Daddu(temp1, temp1, Operand(0x3ff));
+
+  // Must not call ExpConstant() after overwriting temp3!
+  __ li(temp3, Operand(ExternalReference::math_exp_log_table()));
+  __ dsll(at, temp2, 3);
+  __ Daddu(temp3, temp3, Operand(at));
+  __ lwu(temp2, MemOperand(temp3, 0));
+  __ lwu(temp3, MemOperand(temp3, kIntSize));
+  // The first word is loaded is the lower number register.
+  if (temp2.code() < temp3.code()) {
+    __ dsll(at, temp1, 20);
+    __ Or(temp1, temp3, at);
+    __ Move(double_scratch1, temp2, temp1);
+  } else {
+    __ dsll(at, temp1, 20);
+    __ Or(temp1, temp2, at);
+    __ Move(double_scratch1, temp3, temp1);
+  }
+  __ mul_d(result, result, double_scratch1);
+  __ BranchShort(&done);
+
+  __ bind(&zero);
+  __ Move(result, kDoubleRegZero);
+  __ BranchShort(&done);
+
+  __ bind(&infinity);
+  __ ldc1(result, ExpConstant(2, temp3));
+
+  __ bind(&done);
+}
+
+#ifdef DEBUG
+// nop(CODE_AGE_MARKER_NOP)
+static const uint32_t kCodeAgePatchFirstInstruction = 0x00010180;
+#endif
+
+
+CodeAgingHelper::CodeAgingHelper() {
+  DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
+  // Since patcher is a large object, allocate it dynamically when needed,
+  // to avoid overloading the stack in stress conditions.
+  // DONT_FLUSH is used because the CodeAgingHelper is initialized early in
+  // the process, before MIPS simulator ICache is setup.
+  SmartPointer<CodePatcher> patcher(
+      new CodePatcher(young_sequence_.start(),
+                      young_sequence_.length() / Assembler::kInstrSize,
+                      CodePatcher::DONT_FLUSH));
+  PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
+  patcher->masm()->Push(ra, fp, cp, a1);
+  patcher->masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP);
+  patcher->masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP);
+  patcher->masm()->nop(Assembler::CODE_AGE_SEQUENCE_NOP);
+  patcher->masm()->Daddu(
+      fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+}
+
+
+#ifdef DEBUG
+bool CodeAgingHelper::IsOld(byte* candidate) const {
+  return Memory::uint32_at(candidate) == kCodeAgePatchFirstInstruction;
+}
+#endif
+
+
+bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
+  bool result = isolate->code_aging_helper()->IsYoung(sequence);
+  DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
+  return result;
+}
+
+
+void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
+                               MarkingParity* parity) {
+  if (IsYoungSequence(isolate, sequence)) {
+    *age = kNoAgeCodeAge;
+    *parity = NO_MARKING_PARITY;
+  } else {
+    Address target_address = Assembler::target_address_at(
+        sequence + Assembler::kInstrSize);
+    Code* stub = GetCodeFromTargetAddress(target_address);
+    GetCodeAgeAndParity(stub, age, parity);
+  }
+}
+
+
+void Code::PatchPlatformCodeAge(Isolate* isolate,
+                                byte* sequence,
+                                Code::Age age,
+                                MarkingParity parity) {
+  uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
+  if (age == kNoAgeCodeAge) {
+    isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
+    CpuFeatures::FlushICache(sequence, young_length);
+  } else {
+    Code* stub = GetCodeAgeStub(isolate, age, parity);
+    CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
+    // Mark this code sequence for FindPlatformCodeAgeSequence().
+    patcher.masm()->nop(Assembler::CODE_AGE_MARKER_NOP);
+    // Load the stub address to t9 and call it,
+    // GetCodeAgeAndParity() extracts the stub address from this instruction.
+    patcher.masm()->li(
+        t9,
+        Operand(reinterpret_cast<uint64_t>(stub->instruction_start())),
+        ADDRESS_LOAD);
+    patcher.masm()->nop();  // Prevent jalr to jal optimization.
+    patcher.masm()->jalr(t9, a0);
+    patcher.masm()->nop();  // Branch delay slot nop.
+    patcher.masm()->nop();  // Pad the empty space.
+  }
+}
+
+
+#undef __
+
+} }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_MIPS64
diff --git a/src/mips64/codegen-mips64.h b/src/mips64/codegen-mips64.h
new file mode 100644
index 0000000..b02ec4f
--- /dev/null
+++ b/src/mips64/codegen-mips64.h
@@ -0,0 +1,54 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+#ifndef V8_MIPS_CODEGEN_MIPS_H_
+#define V8_MIPS_CODEGEN_MIPS_H_
+
+
+#include "src/ast.h"
+#include "src/macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+
+enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
+
+
+class StringCharLoadGenerator : public AllStatic {
+ public:
+  // Generates the code for handling different string types and loading the
+  // indexed character into |result|.  We expect |index| as untagged input and
+  // |result| as untagged output.
+  static void Generate(MacroAssembler* masm,
+                       Register string,
+                       Register index,
+                       Register result,
+                       Label* call_runtime);
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
+};
+
+
+class MathExpGenerator : public AllStatic {
+ public:
+  // Register input isn't modified. All other registers are clobbered.
+  static void EmitMathExp(MacroAssembler* masm,
+                          DoubleRegister input,
+                          DoubleRegister result,
+                          DoubleRegister double_scratch1,
+                          DoubleRegister double_scratch2,
+                          Register temp1,
+                          Register temp2,
+                          Register temp3);
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_MIPS_CODEGEN_MIPS_H_
diff --git a/src/mips64/constants-mips64.cc b/src/mips64/constants-mips64.cc
new file mode 100644
index 0000000..dfd6243
--- /dev/null
+++ b/src/mips64/constants-mips64.cc
@@ -0,0 +1,362 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_MIPS64
+
+#include "src/mips64/constants-mips64.h"
+
+namespace v8 {
+namespace internal {
+
+
+// -----------------------------------------------------------------------------
+// Registers.
+
+
+// These register names are defined in a way to match the native disassembler
+// formatting. See for example the command "objdump -d <binary file>".
+const char* Registers::names_[kNumSimuRegisters] = {
+  "zero_reg",
+  "at",
+  "v0", "v1",
+  "a0", "a1", "a2", "a3", "a4", "a5", "a6", "a7",
+  "t0", "t1", "t2", "t3",
+  "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7",
+  "t8", "t9",
+  "k0", "k1",
+  "gp",
+  "sp",
+  "fp",
+  "ra",
+  "LO", "HI",
+  "pc"
+};
+
+
+// List of alias names which can be used when referring to MIPS registers.
+const Registers::RegisterAlias Registers::aliases_[] = {
+  {0, "zero"},
+  {23, "cp"},
+  {30, "s8"},
+  {30, "s8_fp"},
+  {kInvalidRegister, NULL}
+};
+
+
+const char* Registers::Name(int reg) {
+  const char* result;
+  if ((0 <= reg) && (reg < kNumSimuRegisters)) {
+    result = names_[reg];
+  } else {
+    result = "noreg";
+  }
+  return result;
+}
+
+
+int Registers::Number(const char* name) {
+  // Look through the canonical names.
+  for (int i = 0; i < kNumSimuRegisters; i++) {
+    if (strcmp(names_[i], name) == 0) {
+      return i;
+    }
+  }
+
+  // Look through the alias names.
+  int i = 0;
+  while (aliases_[i].reg != kInvalidRegister) {
+    if (strcmp(aliases_[i].name, name) == 0) {
+      return aliases_[i].reg;
+    }
+    i++;
+  }
+
+  // No register with the reguested name found.
+  return kInvalidRegister;
+}
+
+
+const char* FPURegisters::names_[kNumFPURegisters] = {
+  "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10", "f11",
+  "f12", "f13", "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21",
+  "f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"
+};
+
+
+// List of alias names which can be used when referring to MIPS registers.
+const FPURegisters::RegisterAlias FPURegisters::aliases_[] = {
+  {kInvalidRegister, NULL}
+};
+
+
+const char* FPURegisters::Name(int creg) {
+  const char* result;
+  if ((0 <= creg) && (creg < kNumFPURegisters)) {
+    result = names_[creg];
+  } else {
+    result = "nocreg";
+  }
+  return result;
+}
+
+
+int FPURegisters::Number(const char* name) {
+  // Look through the canonical names.
+  for (int i = 0; i < kNumFPURegisters; i++) {
+    if (strcmp(names_[i], name) == 0) {
+      return i;
+    }
+  }
+
+  // Look through the alias names.
+  int i = 0;
+  while (aliases_[i].creg != kInvalidRegister) {
+    if (strcmp(aliases_[i].name, name) == 0) {
+      return aliases_[i].creg;
+    }
+    i++;
+  }
+
+  // No Cregister with the reguested name found.
+  return kInvalidFPURegister;
+}
+
+
+// -----------------------------------------------------------------------------
+// Instructions.
+
+bool Instruction::IsForbiddenInBranchDelay() const {
+  const int op = OpcodeFieldRaw();
+  switch (op) {
+    case J:
+    case JAL:
+    case BEQ:
+    case BNE:
+    case BLEZ:
+    case BGTZ:
+    case BEQL:
+    case BNEL:
+    case BLEZL:
+    case BGTZL:
+      return true;
+    case REGIMM:
+      switch (RtFieldRaw()) {
+        case BLTZ:
+        case BGEZ:
+        case BLTZAL:
+        case BGEZAL:
+          return true;
+        default:
+          return false;
+      }
+      break;
+    case SPECIAL:
+      switch (FunctionFieldRaw()) {
+        case JR:
+        case JALR:
+          return true;
+        default:
+          return false;
+      }
+      break;
+    default:
+      return false;
+  }
+}
+
+
+bool Instruction::IsLinkingInstruction() const {
+  const int op = OpcodeFieldRaw();
+  switch (op) {
+    case JAL:
+      return true;
+    case REGIMM:
+      switch (RtFieldRaw()) {
+        case BGEZAL:
+        case BLTZAL:
+          return true;
+      default:
+        return false;
+      }
+    case SPECIAL:
+      switch (FunctionFieldRaw()) {
+        case JALR:
+          return true;
+        default:
+          return false;
+      }
+    default:
+      return false;
+  }
+}
+
+
+bool Instruction::IsTrap() const {
+  if (OpcodeFieldRaw() != SPECIAL) {
+    return false;
+  } else {
+    switch (FunctionFieldRaw()) {
+      case BREAK:
+      case TGE:
+      case TGEU:
+      case TLT:
+      case TLTU:
+      case TEQ:
+      case TNE:
+        return true;
+      default:
+        return false;
+    }
+  }
+}
+
+
+Instruction::Type Instruction::InstructionType() const {
+  switch (OpcodeFieldRaw()) {
+    case SPECIAL:
+      switch (FunctionFieldRaw()) {
+        case JR:
+        case JALR:
+        case BREAK:
+        case SLL:
+        case DSLL:
+        case DSLL32:
+        case SRL:
+        case DSRL:
+        case DSRL32:
+        case SRA:
+        case DSRA:
+        case DSRA32:
+        case SLLV:
+        case DSLLV:
+        case SRLV:
+        case DSRLV:
+        case SRAV:
+        case DSRAV:
+        case MFHI:
+        case MFLO:
+        case MULT:
+        case DMULT:
+        case MULTU:
+        case DMULTU:
+        case DIV:
+        case DDIV:
+        case DIVU:
+        case DDIVU:
+        case ADD:
+        case DADD:
+        case ADDU:
+        case DADDU:
+        case SUB:
+        case DSUB:
+        case SUBU:
+        case DSUBU:
+        case AND:
+        case OR:
+        case XOR:
+        case NOR:
+        case SLT:
+        case SLTU:
+        case TGE:
+        case TGEU:
+        case TLT:
+        case TLTU:
+        case TEQ:
+        case TNE:
+        case MOVZ:
+        case MOVN:
+        case MOVCI:
+          return kRegisterType;
+        default:
+          return kUnsupported;
+      }
+      break;
+    case SPECIAL2:
+      switch (FunctionFieldRaw()) {
+        case MUL:
+        case CLZ:
+          return kRegisterType;
+        default:
+          return kUnsupported;
+      }
+      break;
+    case SPECIAL3:
+      switch (FunctionFieldRaw()) {
+        case INS:
+        case EXT:
+          return kRegisterType;
+        default:
+          return kUnsupported;
+      }
+      break;
+    case COP1:    // Coprocessor instructions.
+      switch (RsFieldRawNoAssert()) {
+        case BC1:   // Branch on coprocessor condition.
+        case BC1EQZ:
+        case BC1NEZ:
+          return kImmediateType;
+        default:
+          return kRegisterType;
+      }
+      break;
+    case COP1X:
+      return kRegisterType;
+    // 16 bits Immediate type instructions. e.g.: addi dest, src, imm16.
+    case REGIMM:
+    case BEQ:
+    case BNE:
+    case BLEZ:
+    case BGTZ:
+    case ADDI:
+    case DADDI:
+    case ADDIU:
+    case DADDIU:
+    case SLTI:
+    case SLTIU:
+    case ANDI:
+    case ORI:
+    case XORI:
+    case LUI:
+    case BEQL:
+    case BNEL:
+    case BLEZL:
+    case BGTZL:
+    case BEQZC:
+    case BNEZC:
+    case LB:
+    case LH:
+    case LWL:
+    case LW:
+    case LWU:
+    case LD:
+    case LBU:
+    case LHU:
+    case LWR:
+    case SB:
+    case SH:
+    case SWL:
+    case SW:
+    case SD:
+    case SWR:
+    case LWC1:
+    case LDC1:
+    case SWC1:
+    case SDC1:
+      return kImmediateType;
+    // 26 bits immediate type instructions. e.g.: j imm26.
+    case J:
+    case JAL:
+      return kJumpType;
+    default:
+      return kUnsupported;
+  }
+  return kUnsupported;
+}
+
+
+} }   // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_MIPS64
diff --git a/src/mips64/constants-mips64.h b/src/mips64/constants-mips64.h
new file mode 100644
index 0000000..521869b
--- /dev/null
+++ b/src/mips64/constants-mips64.h
@@ -0,0 +1,952 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef  V8_MIPS_CONSTANTS_H_
+#define  V8_MIPS_CONSTANTS_H_
+
+// UNIMPLEMENTED_ macro for MIPS.
+#ifdef DEBUG
+#define UNIMPLEMENTED_MIPS()                                                  \
+  v8::internal::PrintF("%s, \tline %d: \tfunction %s not implemented. \n",    \
+                       __FILE__, __LINE__, __func__)
+#else
+#define UNIMPLEMENTED_MIPS()
+#endif
+
+#define UNSUPPORTED_MIPS() v8::internal::PrintF("Unsupported instruction.\n")
+
+enum ArchVariants {
+  kMips64r2,
+  kMips64r6
+};
+
+
+#ifdef _MIPS_ARCH_MIPS64R2
+  static const ArchVariants kArchVariant = kMips64r2;
+#elif  _MIPS_ARCH_MIPS64R6
+  static const ArchVariants kArchVariant = kMips64r6;
+#else
+  static const ArchVariants kArchVariant = kMips64r2;
+#endif
+
+
+// TODO(plind): consider deriving ABI from compiler flags or build system.
+
+// ABI-dependent definitions are made with #define in simulator-mips64.h,
+// so the ABI choice must be available to the pre-processor. However, in all
+// other cases, we should use the enum AbiVariants with normal if statements.
+
+#define MIPS_ABI_N64 1
+// #define MIPS_ABI_O32 1
+
+// The only supported Abi's are O32, and n64.
+enum AbiVariants {
+  kO32,
+  kN64  // Use upper case N for 'n64' ABI to conform to style standard.
+};
+
+#ifdef MIPS_ABI_N64
+static const AbiVariants kMipsAbi = kN64;
+#else
+static const AbiVariants kMipsAbi = kO32;
+#endif
+
+
+// TODO(plind): consider renaming these ...
+#if(defined(__mips_hard_float) && __mips_hard_float != 0)
+// Use floating-point coprocessor instructions. This flag is raised when
+// -mhard-float is passed to the compiler.
+const bool IsMipsSoftFloatABI = false;
+#elif(defined(__mips_soft_float) && __mips_soft_float != 0)
+// This flag is raised when -msoft-float is passed to the compiler.
+// Although FPU is a base requirement for v8, soft-float ABI is used
+// on soft-float systems with FPU kernel emulation.
+const bool IsMipsSoftFloatABI = true;
+#else
+const bool IsMipsSoftFloatABI = true;
+#endif
+
+
+#ifndef __STDC_FORMAT_MACROS
+#define __STDC_FORMAT_MACROS
+#endif
+#include <inttypes.h>
+
+
+// Defines constants and accessor classes to assemble, disassemble and
+// simulate MIPS32 instructions.
+//
+// See: MIPS32 Architecture For Programmers
+//      Volume II: The MIPS32 Instruction Set
+// Try www.cs.cornell.edu/courses/cs3410/2008fa/MIPS_Vol2.pdf.
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// Registers and FPURegisters.
+
+// Number of general purpose registers.
+const int kNumRegisters = 32;
+const int kInvalidRegister = -1;
+
+// Number of registers with HI, LO, and pc.
+const int kNumSimuRegisters = 35;
+
+// In the simulator, the PC register is simulated as the 34th register.
+const int kPCRegister = 34;
+
+// Number coprocessor registers.
+const int kNumFPURegisters = 32;
+const int kInvalidFPURegister = -1;
+
+// FPU (coprocessor 1) control registers. Currently only FCSR is implemented.
+const int kFCSRRegister = 31;
+const int kInvalidFPUControlRegister = -1;
+const uint32_t kFPUInvalidResult = static_cast<uint32_t>(1 << 31) - 1;
+const uint64_t kFPU64InvalidResult =
+    static_cast<uint64_t>(static_cast<uint64_t>(1) << 63) - 1;
+
+// FCSR constants.
+const uint32_t kFCSRInexactFlagBit = 2;
+const uint32_t kFCSRUnderflowFlagBit = 3;
+const uint32_t kFCSROverflowFlagBit = 4;
+const uint32_t kFCSRDivideByZeroFlagBit = 5;
+const uint32_t kFCSRInvalidOpFlagBit = 6;
+
+const uint32_t kFCSRInexactFlagMask = 1 << kFCSRInexactFlagBit;
+const uint32_t kFCSRUnderflowFlagMask = 1 << kFCSRUnderflowFlagBit;
+const uint32_t kFCSROverflowFlagMask = 1 << kFCSROverflowFlagBit;
+const uint32_t kFCSRDivideByZeroFlagMask = 1 << kFCSRDivideByZeroFlagBit;
+const uint32_t kFCSRInvalidOpFlagMask = 1 << kFCSRInvalidOpFlagBit;
+
+const uint32_t kFCSRFlagMask =
+    kFCSRInexactFlagMask |
+    kFCSRUnderflowFlagMask |
+    kFCSROverflowFlagMask |
+    kFCSRDivideByZeroFlagMask |
+    kFCSRInvalidOpFlagMask;
+
+const uint32_t kFCSRExceptionFlagMask = kFCSRFlagMask ^ kFCSRInexactFlagMask;
+
+// 'pref' instruction hints
+const int32_t kPrefHintLoad = 0;
+const int32_t kPrefHintStore = 1;
+const int32_t kPrefHintLoadStreamed = 4;
+const int32_t kPrefHintStoreStreamed = 5;
+const int32_t kPrefHintLoadRetained = 6;
+const int32_t kPrefHintStoreRetained = 7;
+const int32_t kPrefHintWritebackInvalidate = 25;
+const int32_t kPrefHintPrepareForStore = 30;
+
+// Helper functions for converting between register numbers and names.
+class Registers {
+ public:
+  // Return the name of the register.
+  static const char* Name(int reg);
+
+  // Lookup the register number for the name provided.
+  static int Number(const char* name);
+
+  struct RegisterAlias {
+    int reg;
+    const char* name;
+  };
+
+  static const int64_t kMaxValue = 0x7fffffffffffffffl;
+  static const int64_t kMinValue = 0x8000000000000000l;
+
+ private:
+  static const char* names_[kNumSimuRegisters];
+  static const RegisterAlias aliases_[];
+};
+
+// Helper functions for converting between register numbers and names.
+class FPURegisters {
+ public:
+  // Return the name of the register.
+  static const char* Name(int reg);
+
+  // Lookup the register number for the name provided.
+  static int Number(const char* name);
+
+  struct RegisterAlias {
+    int creg;
+    const char* name;
+  };
+
+ private:
+  static const char* names_[kNumFPURegisters];
+  static const RegisterAlias aliases_[];
+};
+
+
+// -----------------------------------------------------------------------------
+// Instructions encoding constants.
+
+// On MIPS all instructions are 32 bits.
+typedef int32_t Instr;
+
+// Special Software Interrupt codes when used in the presence of the MIPS
+// simulator.
+enum SoftwareInterruptCodes {
+  // Transition to C code.
+  call_rt_redirected = 0xfffff
+};
+
+// On MIPS Simulator breakpoints can have different codes:
+// - Breaks between 0 and kMaxWatchpointCode are treated as simple watchpoints,
+//   the simulator will run through them and print the registers.
+// - Breaks between kMaxWatchpointCode and kMaxStopCode are treated as stop()
+//   instructions (see Assembler::stop()).
+// - Breaks larger than kMaxStopCode are simple breaks, dropping you into the
+//   debugger.
+const uint32_t kMaxWatchpointCode = 31;
+const uint32_t kMaxStopCode = 127;
+STATIC_ASSERT(kMaxWatchpointCode < kMaxStopCode);
+
+
+// ----- Fields offset and length.
+const int kOpcodeShift   = 26;
+const int kOpcodeBits    = 6;
+const int kRsShift       = 21;
+const int kRsBits        = 5;
+const int kRtShift       = 16;
+const int kRtBits        = 5;
+const int kRdShift       = 11;
+const int kRdBits        = 5;
+const int kSaShift       = 6;
+const int kSaBits        = 5;
+const int kFunctionShift = 0;
+const int kFunctionBits  = 6;
+const int kLuiShift      = 16;
+
+const int kImm16Shift = 0;
+const int kImm16Bits  = 16;
+const int kImm21Shift = 0;
+const int kImm21Bits  = 21;
+const int kImm26Shift = 0;
+const int kImm26Bits  = 26;
+const int kImm28Shift = 0;
+const int kImm28Bits  = 28;
+const int kImm32Shift = 0;
+const int kImm32Bits  = 32;
+
+// In branches and jumps immediate fields point to words, not bytes,
+// and are therefore shifted by 2.
+const int kImmFieldShift = 2;
+
+const int kFrBits        = 5;
+const int kFrShift       = 21;
+const int kFsShift       = 11;
+const int kFsBits        = 5;
+const int kFtShift       = 16;
+const int kFtBits        = 5;
+const int kFdShift       = 6;
+const int kFdBits        = 5;
+const int kFCccShift     = 8;
+const int kFCccBits      = 3;
+const int kFBccShift     = 18;
+const int kFBccBits      = 3;
+const int kFBtrueShift   = 16;
+const int kFBtrueBits    = 1;
+
+// ----- Miscellaneous useful masks.
+// Instruction bit masks.
+const int  kOpcodeMask   = ((1 << kOpcodeBits) - 1) << kOpcodeShift;
+const int  kImm16Mask    = ((1 << kImm16Bits) - 1) << kImm16Shift;
+const int  kImm26Mask    = ((1 << kImm26Bits) - 1) << kImm26Shift;
+const int  kImm28Mask    = ((1 << kImm28Bits) - 1) << kImm28Shift;
+const int  kRsFieldMask  = ((1 << kRsBits) - 1) << kRsShift;
+const int  kRtFieldMask  = ((1 << kRtBits) - 1) << kRtShift;
+const int  kRdFieldMask  = ((1 << kRdBits) - 1) << kRdShift;
+const int  kSaFieldMask  = ((1 << kSaBits) - 1) << kSaShift;
+const int  kFunctionFieldMask = ((1 << kFunctionBits) - 1) << kFunctionShift;
+// Misc masks.
+const int  kHiMask       =   0xffff << 16;
+const int  kLoMask       =   0xffff;
+const int  kSignMask     =   0x80000000;
+const int  kJumpAddrMask = (1 << (kImm26Bits + kImmFieldShift)) - 1;
+const int64_t  kHi16MaskOf64 =   (int64_t)0xffff << 48;
+const int64_t  kSe16MaskOf64 =   (int64_t)0xffff << 32;
+const int64_t  kTh16MaskOf64 =   (int64_t)0xffff << 16;
+
+// ----- MIPS Opcodes and Function Fields.
+// We use this presentation to stay close to the table representation in
+// MIPS32 Architecture For Programmers, Volume II: The MIPS32 Instruction Set.
+enum Opcode {
+  SPECIAL   =   0 << kOpcodeShift,
+  REGIMM    =   1 << kOpcodeShift,
+
+  J         =   ((0 << 3) + 2) << kOpcodeShift,
+  JAL       =   ((0 << 3) + 3) << kOpcodeShift,
+  BEQ       =   ((0 << 3) + 4) << kOpcodeShift,
+  BNE       =   ((0 << 3) + 5) << kOpcodeShift,
+  BLEZ      =   ((0 << 3) + 6) << kOpcodeShift,
+  BGTZ      =   ((0 << 3) + 7) << kOpcodeShift,
+
+  ADDI      =   ((1 << 3) + 0) << kOpcodeShift,
+  ADDIU     =   ((1 << 3) + 1) << kOpcodeShift,
+  SLTI      =   ((1 << 3) + 2) << kOpcodeShift,
+  SLTIU     =   ((1 << 3) + 3) << kOpcodeShift,
+  ANDI      =   ((1 << 3) + 4) << kOpcodeShift,
+  ORI       =   ((1 << 3) + 5) << kOpcodeShift,
+  XORI      =   ((1 << 3) + 6) << kOpcodeShift,
+  LUI       =   ((1 << 3) + 7) << kOpcodeShift,  // LUI/AUI family.
+  DAUI      =   ((3 << 3) + 5) << kOpcodeShift,
+
+  BEQC      =   ((2 << 3) + 0) << kOpcodeShift,
+  COP1      =   ((2 << 3) + 1) << kOpcodeShift,  // Coprocessor 1 class.
+  BEQL      =   ((2 << 3) + 4) << kOpcodeShift,
+  BNEL      =   ((2 << 3) + 5) << kOpcodeShift,
+  BLEZL     =   ((2 << 3) + 6) << kOpcodeShift,
+  BGTZL     =   ((2 << 3) + 7) << kOpcodeShift,
+
+  DADDI     =   ((3 << 3) + 0) << kOpcodeShift,  // This is also BNEC.
+  DADDIU    =   ((3 << 3) + 1) << kOpcodeShift,
+  LDL       =   ((3 << 3) + 2) << kOpcodeShift,
+  LDR       =   ((3 << 3) + 3) << kOpcodeShift,
+  SPECIAL2  =   ((3 << 3) + 4) << kOpcodeShift,
+  SPECIAL3  =   ((3 << 3) + 7) << kOpcodeShift,
+
+  LB        =   ((4 << 3) + 0) << kOpcodeShift,
+  LH        =   ((4 << 3) + 1) << kOpcodeShift,
+  LWL       =   ((4 << 3) + 2) << kOpcodeShift,
+  LW        =   ((4 << 3) + 3) << kOpcodeShift,
+  LBU       =   ((4 << 3) + 4) << kOpcodeShift,
+  LHU       =   ((4 << 3) + 5) << kOpcodeShift,
+  LWR       =   ((4 << 3) + 6) << kOpcodeShift,
+  LWU       =   ((4 << 3) + 7) << kOpcodeShift,
+
+  SB        =   ((5 << 3) + 0) << kOpcodeShift,
+  SH        =   ((5 << 3) + 1) << kOpcodeShift,
+  SWL       =   ((5 << 3) + 2) << kOpcodeShift,
+  SW        =   ((5 << 3) + 3) << kOpcodeShift,
+  SDL       =   ((5 << 3) + 4) << kOpcodeShift,
+  SDR       =   ((5 << 3) + 5) << kOpcodeShift,
+  SWR       =   ((5 << 3) + 6) << kOpcodeShift,
+
+  LWC1      =   ((6 << 3) + 1) << kOpcodeShift,
+  LLD       =   ((6 << 3) + 4) << kOpcodeShift,
+  LDC1      =   ((6 << 3) + 5) << kOpcodeShift,
+  BEQZC     =   ((6 << 3) + 6) << kOpcodeShift,
+  LD        =   ((6 << 3) + 7) << kOpcodeShift,
+
+  PREF      =   ((6 << 3) + 3) << kOpcodeShift,
+
+  SWC1      =   ((7 << 3) + 1) << kOpcodeShift,
+  SCD       =   ((7 << 3) + 4) << kOpcodeShift,
+  SDC1      =   ((7 << 3) + 5) << kOpcodeShift,
+  BNEZC     =   ((7 << 3) + 6) << kOpcodeShift,
+  SD        =   ((7 << 3) + 7) << kOpcodeShift,
+
+  COP1X     =   ((1 << 4) + 3) << kOpcodeShift
+};
+
+enum SecondaryField {
+  // SPECIAL Encoding of Function Field.
+  SLL       =   ((0 << 3) + 0),
+  MOVCI     =   ((0 << 3) + 1),
+  SRL       =   ((0 << 3) + 2),
+  SRA       =   ((0 << 3) + 3),
+  SLLV      =   ((0 << 3) + 4),
+  SRLV      =   ((0 << 3) + 6),
+  SRAV      =   ((0 << 3) + 7),
+
+  JR        =   ((1 << 3) + 0),
+  JALR      =   ((1 << 3) + 1),
+  MOVZ      =   ((1 << 3) + 2),
+  MOVN      =   ((1 << 3) + 3),
+  BREAK     =   ((1 << 3) + 5),
+
+  MFHI      =   ((2 << 3) + 0),
+  CLZ_R6    =   ((2 << 3) + 0),
+  CLO_R6    =   ((2 << 3) + 1),
+  MFLO      =   ((2 << 3) + 2),
+  DSLLV     =   ((2 << 3) + 4),
+  DSRLV     =   ((2 << 3) + 6),
+  DSRAV     =   ((2 << 3) + 7),
+
+  MULT      =   ((3 << 3) + 0),
+  MULTU     =   ((3 << 3) + 1),
+  DIV       =   ((3 << 3) + 2),
+  DIVU      =   ((3 << 3) + 3),
+  DMULT     =   ((3 << 3) + 4),
+  DMULTU    =   ((3 << 3) + 5),
+  DDIV      =   ((3 << 3) + 6),
+  DDIVU     =   ((3 << 3) + 7),
+
+  ADD       =   ((4 << 3) + 0),
+  ADDU      =   ((4 << 3) + 1),
+  SUB       =   ((4 << 3) + 2),
+  SUBU      =   ((4 << 3) + 3),
+  AND       =   ((4 << 3) + 4),
+  OR        =   ((4 << 3) + 5),
+  XOR       =   ((4 << 3) + 6),
+  NOR       =   ((4 << 3) + 7),
+
+  SLT       =   ((5 << 3) + 2),
+  SLTU      =   ((5 << 3) + 3),
+  DADD      =   ((5 << 3) + 4),
+  DADDU     =   ((5 << 3) + 5),
+  DSUB      =   ((5 << 3) + 6),
+  DSUBU     =   ((5 << 3) + 7),
+
+  TGE       =   ((6 << 3) + 0),
+  TGEU      =   ((6 << 3) + 1),
+  TLT       =   ((6 << 3) + 2),
+  TLTU      =   ((6 << 3) + 3),
+  TEQ       =   ((6 << 3) + 4),
+  SELEQZ_S  =   ((6 << 3) + 5),
+  TNE       =   ((6 << 3) + 6),
+  SELNEZ_S  =   ((6 << 3) + 7),
+
+  DSLL      =   ((7 << 3) + 0),
+  DSRL      =   ((7 << 3) + 2),
+  DSRA      =   ((7 << 3) + 3),
+  DSLL32    =   ((7 << 3) + 4),
+  DSRL32    =   ((7 << 3) + 6),
+  DSRA32    =   ((7 << 3) + 7),
+
+  // Multiply integers in r6.
+  MUL_MUH   =   ((3 << 3) + 0),  // MUL, MUH.
+  MUL_MUH_U =   ((3 << 3) + 1),  // MUL_U, MUH_U.
+  D_MUL_MUH =   ((7 << 2) + 0),  // DMUL, DMUH.
+  D_MUL_MUH_U = ((7 << 2) + 1),  // DMUL_U, DMUH_U.
+
+  MUL_OP    =   ((0 << 3) + 2),
+  MUH_OP    =   ((0 << 3) + 3),
+  DIV_OP    =   ((0 << 3) + 2),
+  MOD_OP    =   ((0 << 3) + 3),
+
+  DIV_MOD   =   ((3 << 3) + 2),
+  DIV_MOD_U =   ((3 << 3) + 3),
+  D_DIV_MOD =   ((3 << 3) + 6),
+  D_DIV_MOD_U = ((3 << 3) + 7),
+
+  // drotr in special4?
+
+  // SPECIAL2 Encoding of Function Field.
+  MUL       =   ((0 << 3) + 2),
+  CLZ       =   ((4 << 3) + 0),
+  CLO       =   ((4 << 3) + 1),
+
+  // SPECIAL3 Encoding of Function Field.
+  EXT       =   ((0 << 3) + 0),
+  DEXTM     =   ((0 << 3) + 1),
+  DEXTU     =   ((0 << 3) + 2),
+  DEXT      =   ((0 << 3) + 3),
+  INS       =   ((0 << 3) + 4),
+  DINSM     =   ((0 << 3) + 5),
+  DINSU     =   ((0 << 3) + 6),
+  DINS      =   ((0 << 3) + 7),
+
+  DSBH      =   ((4 << 3) + 4),
+
+  // REGIMM  encoding of rt Field.
+  BLTZ      =   ((0 << 3) + 0) << 16,
+  BGEZ      =   ((0 << 3) + 1) << 16,
+  BLTZAL    =   ((2 << 3) + 0) << 16,
+  BGEZAL    =   ((2 << 3) + 1) << 16,
+  BGEZALL   =   ((2 << 3) + 3) << 16,
+  DAHI      =   ((0 << 3) + 6) << 16,
+  DATI      =   ((3 << 3) + 6) << 16,
+
+  // COP1 Encoding of rs Field.
+  MFC1      =   ((0 << 3) + 0) << 21,
+  DMFC1     =   ((0 << 3) + 1) << 21,
+  CFC1      =   ((0 << 3) + 2) << 21,
+  MFHC1     =   ((0 << 3) + 3) << 21,
+  MTC1      =   ((0 << 3) + 4) << 21,
+  DMTC1     =   ((0 << 3) + 5) << 21,
+  CTC1      =   ((0 << 3) + 6) << 21,
+  MTHC1     =   ((0 << 3) + 7) << 21,
+  BC1       =   ((1 << 3) + 0) << 21,
+  S         =   ((2 << 3) + 0) << 21,
+  D         =   ((2 << 3) + 1) << 21,
+  W         =   ((2 << 3) + 4) << 21,
+  L         =   ((2 << 3) + 5) << 21,
+  PS        =   ((2 << 3) + 6) << 21,
+  // COP1 Encoding of Function Field When rs=S.
+  ROUND_L_S =   ((1 << 3) + 0),
+  TRUNC_L_S =   ((1 << 3) + 1),
+  CEIL_L_S  =   ((1 << 3) + 2),
+  FLOOR_L_S =   ((1 << 3) + 3),
+  ROUND_W_S =   ((1 << 3) + 4),
+  TRUNC_W_S =   ((1 << 3) + 5),
+  CEIL_W_S  =   ((1 << 3) + 6),
+  FLOOR_W_S =   ((1 << 3) + 7),
+  CVT_D_S   =   ((4 << 3) + 1),
+  CVT_W_S   =   ((4 << 3) + 4),
+  CVT_L_S   =   ((4 << 3) + 5),
+  CVT_PS_S  =   ((4 << 3) + 6),
+  // COP1 Encoding of Function Field When rs=D.
+  ADD_D     =   ((0 << 3) + 0),
+  SUB_D     =   ((0 << 3) + 1),
+  MUL_D     =   ((0 << 3) + 2),
+  DIV_D     =   ((0 << 3) + 3),
+  SQRT_D    =   ((0 << 3) + 4),
+  ABS_D     =   ((0 << 3) + 5),
+  MOV_D     =   ((0 << 3) + 6),
+  NEG_D     =   ((0 << 3) + 7),
+  ROUND_L_D =   ((1 << 3) + 0),
+  TRUNC_L_D =   ((1 << 3) + 1),
+  CEIL_L_D  =   ((1 << 3) + 2),
+  FLOOR_L_D =   ((1 << 3) + 3),
+  ROUND_W_D =   ((1 << 3) + 4),
+  TRUNC_W_D =   ((1 << 3) + 5),
+  CEIL_W_D  =   ((1 << 3) + 6),
+  FLOOR_W_D =   ((1 << 3) + 7),
+  MIN       =   ((3 << 3) + 4),
+  MINA      =   ((3 << 3) + 5),
+  MAX       =   ((3 << 3) + 6),
+  MAXA      =   ((3 << 3) + 7),
+  CVT_S_D   =   ((4 << 3) + 0),
+  CVT_W_D   =   ((4 << 3) + 4),
+  CVT_L_D   =   ((4 << 3) + 5),
+  C_F_D     =   ((6 << 3) + 0),
+  C_UN_D    =   ((6 << 3) + 1),
+  C_EQ_D    =   ((6 << 3) + 2),
+  C_UEQ_D   =   ((6 << 3) + 3),
+  C_OLT_D   =   ((6 << 3) + 4),
+  C_ULT_D   =   ((6 << 3) + 5),
+  C_OLE_D   =   ((6 << 3) + 6),
+  C_ULE_D   =   ((6 << 3) + 7),
+  // COP1 Encoding of Function Field When rs=W or L.
+  CVT_S_W   =   ((4 << 3) + 0),
+  CVT_D_W   =   ((4 << 3) + 1),
+  CVT_S_L   =   ((4 << 3) + 0),
+  CVT_D_L   =   ((4 << 3) + 1),
+  BC1EQZ    =   ((2 << 2) + 1) << 21,
+  BC1NEZ    =   ((3 << 2) + 1) << 21,
+  // COP1 CMP positive predicates Bit 5..4 = 00.
+  CMP_AF    =   ((0 << 3) + 0),
+  CMP_UN    =   ((0 << 3) + 1),
+  CMP_EQ    =   ((0 << 3) + 2),
+  CMP_UEQ   =   ((0 << 3) + 3),
+  CMP_LT    =   ((0 << 3) + 4),
+  CMP_ULT   =   ((0 << 3) + 5),
+  CMP_LE    =   ((0 << 3) + 6),
+  CMP_ULE   =   ((0 << 3) + 7),
+  CMP_SAF   =   ((1 << 3) + 0),
+  CMP_SUN   =   ((1 << 3) + 1),
+  CMP_SEQ   =   ((1 << 3) + 2),
+  CMP_SUEQ  =   ((1 << 3) + 3),
+  CMP_SSLT  =   ((1 << 3) + 4),
+  CMP_SSULT =   ((1 << 3) + 5),
+  CMP_SLE   =   ((1 << 3) + 6),
+  CMP_SULE  =   ((1 << 3) + 7),
+  // COP1 CMP negative predicates Bit 5..4 = 01.
+  CMP_AT    =   ((2 << 3) + 0),  // Reserved, not implemented.
+  CMP_OR    =   ((2 << 3) + 1),
+  CMP_UNE   =   ((2 << 3) + 2),
+  CMP_NE    =   ((2 << 3) + 3),
+  CMP_UGE   =   ((2 << 3) + 4),  // Reserved, not implemented.
+  CMP_OGE   =   ((2 << 3) + 5),  // Reserved, not implemented.
+  CMP_UGT   =   ((2 << 3) + 6),  // Reserved, not implemented.
+  CMP_OGT   =   ((2 << 3) + 7),  // Reserved, not implemented.
+  CMP_SAT   =   ((3 << 3) + 0),  // Reserved, not implemented.
+  CMP_SOR   =   ((3 << 3) + 1),
+  CMP_SUNE  =   ((3 << 3) + 2),
+  CMP_SNE   =   ((3 << 3) + 3),
+  CMP_SUGE  =   ((3 << 3) + 4),  // Reserved, not implemented.
+  CMP_SOGE  =   ((3 << 3) + 5),  // Reserved, not implemented.
+  CMP_SUGT  =   ((3 << 3) + 6),  // Reserved, not implemented.
+  CMP_SOGT  =   ((3 << 3) + 7),  // Reserved, not implemented.
+
+  SEL       =   ((2 << 3) + 0),
+  SELEQZ_C  =   ((2 << 3) + 4),  // COP1 on FPR registers.
+  SELNEZ_C  =   ((2 << 3) + 7),  // COP1 on FPR registers.
+
+  // COP1 Encoding of Function Field When rs=PS.
+  // COP1X Encoding of Function Field.
+  MADD_D    =   ((4 << 3) + 1),
+
+  NULLSF    =   0
+};
+
+
+// ----- Emulated conditions.
+// On MIPS we use this enum to abstract from conditional branch instructions.
+// The 'U' prefix is used to specify unsigned comparisons.
+// Opposite conditions must be paired as odd/even numbers
+// because 'NegateCondition' function flips LSB to negate condition.
+enum Condition {
+  // Any value < 0 is considered no_condition.
+  kNoCondition  = -1,
+
+  overflow      =  0,
+  no_overflow   =  1,
+  Uless         =  2,
+  Ugreater_equal=  3,
+  equal         =  4,
+  not_equal     =  5,
+  Uless_equal   =  6,
+  Ugreater      =  7,
+  negative      =  8,
+  positive      =  9,
+  parity_even   = 10,
+  parity_odd    = 11,
+  less          = 12,
+  greater_equal = 13,
+  less_equal    = 14,
+  greater       = 15,
+  ueq           = 16,  // Unordered or Equal.
+  nue           = 17,  // Not (Unordered or Equal).
+
+  cc_always     = 18,
+
+  // Aliases.
+  carry         = Uless,
+  not_carry     = Ugreater_equal,
+  zero          = equal,
+  eq            = equal,
+  not_zero      = not_equal,
+  ne            = not_equal,
+  nz            = not_equal,
+  sign          = negative,
+  not_sign      = positive,
+  mi            = negative,
+  pl            = positive,
+  hi            = Ugreater,
+  ls            = Uless_equal,
+  ge            = greater_equal,
+  lt            = less,
+  gt            = greater,
+  le            = less_equal,
+  hs            = Ugreater_equal,
+  lo            = Uless,
+  al            = cc_always,
+
+  cc_default    = kNoCondition
+};
+
+
+// Returns the equivalent of !cc.
+// Negation of the default kNoCondition (-1) results in a non-default
+// no_condition value (-2). As long as tests for no_condition check
+// for condition < 0, this will work as expected.
+inline Condition NegateCondition(Condition cc) {
+  DCHECK(cc != cc_always);
+  return static_cast<Condition>(cc ^ 1);
+}
+
+
+// Commute a condition such that {a cond b == b cond' a}.
+inline Condition CommuteCondition(Condition cc) {
+  switch (cc) {
+    case Uless:
+      return Ugreater;
+    case Ugreater:
+      return Uless;
+    case Ugreater_equal:
+      return Uless_equal;
+    case Uless_equal:
+      return Ugreater_equal;
+    case less:
+      return greater;
+    case greater:
+      return less;
+    case greater_equal:
+      return less_equal;
+    case less_equal:
+      return greater_equal;
+    default:
+      return cc;
+  }
+}
+
+
+// ----- Coprocessor conditions.
+enum FPUCondition {
+  kNoFPUCondition = -1,
+
+  F     = 0,  // False.
+  UN    = 1,  // Unordered.
+  EQ    = 2,  // Equal.
+  UEQ   = 3,  // Unordered or Equal.
+  OLT   = 4,  // Ordered or Less Than.
+  ULT   = 5,  // Unordered or Less Than.
+  OLE   = 6,  // Ordered or Less Than or Equal.
+  ULE   = 7   // Unordered or Less Than or Equal.
+};
+
+
+// FPU rounding modes.
+enum FPURoundingMode {
+  RN = 0 << 0,  // Round to Nearest.
+  RZ = 1 << 0,  // Round towards zero.
+  RP = 2 << 0,  // Round towards Plus Infinity.
+  RM = 3 << 0,  // Round towards Minus Infinity.
+
+  // Aliases.
+  kRoundToNearest = RN,
+  kRoundToZero = RZ,
+  kRoundToPlusInf = RP,
+  kRoundToMinusInf = RM
+};
+
+const uint32_t kFPURoundingModeMask = 3 << 0;
+
+enum CheckForInexactConversion {
+  kCheckForInexactConversion,
+  kDontCheckForInexactConversion
+};
+
+
+// -----------------------------------------------------------------------------
+// Hints.
+
+// Branch hints are not used on the MIPS.  They are defined so that they can
+// appear in shared function signatures, but will be ignored in MIPS
+// implementations.
+enum Hint {
+  no_hint = 0
+};
+
+
+inline Hint NegateHint(Hint hint) {
+  return no_hint;
+}
+
+
+// -----------------------------------------------------------------------------
+// Specific instructions, constants, and masks.
+// These constants are declared in assembler-mips.cc, as they use named
+// registers and other constants.
+
+// addiu(sp, sp, 4) aka Pop() operation or part of Pop(r)
+// operations as post-increment of sp.
+extern const Instr kPopInstruction;
+// addiu(sp, sp, -4) part of Push(r) operation as pre-decrement of sp.
+extern const Instr kPushInstruction;
+// sw(r, MemOperand(sp, 0))
+extern const Instr kPushRegPattern;
+// lw(r, MemOperand(sp, 0))
+extern const Instr kPopRegPattern;
+extern const Instr kLwRegFpOffsetPattern;
+extern const Instr kSwRegFpOffsetPattern;
+extern const Instr kLwRegFpNegOffsetPattern;
+extern const Instr kSwRegFpNegOffsetPattern;
+// A mask for the Rt register for push, pop, lw, sw instructions.
+extern const Instr kRtMask;
+extern const Instr kLwSwInstrTypeMask;
+extern const Instr kLwSwInstrArgumentMask;
+extern const Instr kLwSwOffsetMask;
+
+// Break 0xfffff, reserved for redirected real time call.
+const Instr rtCallRedirInstr = SPECIAL | BREAK | call_rt_redirected << 6;
+// A nop instruction. (Encoding of sll 0 0 0).
+const Instr nopInstr = 0;
+
+class Instruction {
+ public:
+  enum {
+    kInstrSize = 4,
+    kInstrSizeLog2 = 2,
+    // On MIPS PC cannot actually be directly accessed. We behave as if PC was
+    // always the value of the current instruction being executed.
+    kPCReadOffset = 0
+  };
+
+  // Get the raw instruction bits.
+  inline Instr InstructionBits() const {
+    return *reinterpret_cast<const Instr*>(this);
+  }
+
+  // Set the raw instruction bits to value.
+  inline void SetInstructionBits(Instr value) {
+    *reinterpret_cast<Instr*>(this) = value;
+  }
+
+  // Read one particular bit out of the instruction bits.
+  inline int Bit(int nr) const {
+    return (InstructionBits() >> nr) & 1;
+  }
+
+  // Read a bit field out of the instruction bits.
+  inline int Bits(int hi, int lo) const {
+    return (InstructionBits() >> lo) & ((2 << (hi - lo)) - 1);
+  }
+
+  // Instruction type.
+  enum Type {
+    kRegisterType,
+    kImmediateType,
+    kJumpType,
+    kUnsupported = -1
+  };
+
+  // Get the encoding type of the instruction.
+  Type InstructionType() const;
+
+
+  // Accessors for the different named fields used in the MIPS encoding.
+  inline Opcode OpcodeValue() const {
+    return static_cast<Opcode>(
+        Bits(kOpcodeShift + kOpcodeBits - 1, kOpcodeShift));
+  }
+
+  inline int RsValue() const {
+    DCHECK(InstructionType() == kRegisterType ||
+           InstructionType() == kImmediateType);
+    return Bits(kRsShift + kRsBits - 1, kRsShift);
+  }
+
+  inline int RtValue() const {
+    DCHECK(InstructionType() == kRegisterType ||
+           InstructionType() == kImmediateType);
+    return Bits(kRtShift + kRtBits - 1, kRtShift);
+  }
+
+  inline int RdValue() const {
+    DCHECK(InstructionType() == kRegisterType);
+    return Bits(kRdShift + kRdBits - 1, kRdShift);
+  }
+
+  inline int SaValue() const {
+    DCHECK(InstructionType() == kRegisterType);
+    return Bits(kSaShift + kSaBits - 1, kSaShift);
+  }
+
+  inline int FunctionValue() const {
+    DCHECK(InstructionType() == kRegisterType ||
+           InstructionType() == kImmediateType);
+    return Bits(kFunctionShift + kFunctionBits - 1, kFunctionShift);
+  }
+
+  inline int FdValue() const {
+    return Bits(kFdShift + kFdBits - 1, kFdShift);
+  }
+
+  inline int FsValue() const {
+    return Bits(kFsShift + kFsBits - 1, kFsShift);
+  }
+
+  inline int FtValue() const {
+    return Bits(kFtShift + kFtBits - 1, kFtShift);
+  }
+
+  inline int FrValue() const {
+    return Bits(kFrShift + kFrBits -1, kFrShift);
+  }
+
+  // Float Compare condition code instruction bits.
+  inline int FCccValue() const {
+    return Bits(kFCccShift + kFCccBits - 1, kFCccShift);
+  }
+
+  // Float Branch condition code instruction bits.
+  inline int FBccValue() const {
+    return Bits(kFBccShift + kFBccBits - 1, kFBccShift);
+  }
+
+  // Float Branch true/false instruction bit.
+  inline int FBtrueValue() const {
+    return Bits(kFBtrueShift + kFBtrueBits - 1, kFBtrueShift);
+  }
+
+  // Return the fields at their original place in the instruction encoding.
+  inline Opcode OpcodeFieldRaw() const {
+    return static_cast<Opcode>(InstructionBits() & kOpcodeMask);
+  }
+
+  inline int RsFieldRaw() const {
+    DCHECK(InstructionType() == kRegisterType ||
+           InstructionType() == kImmediateType);
+    return InstructionBits() & kRsFieldMask;
+  }
+
+  // Same as above function, but safe to call within InstructionType().
+  inline int RsFieldRawNoAssert() const {
+    return InstructionBits() & kRsFieldMask;
+  }
+
+  inline int RtFieldRaw() const {
+    DCHECK(InstructionType() == kRegisterType ||
+           InstructionType() == kImmediateType);
+    return InstructionBits() & kRtFieldMask;
+  }
+
+  inline int RdFieldRaw() const {
+    DCHECK(InstructionType() == kRegisterType);
+    return InstructionBits() & kRdFieldMask;
+  }
+
+  inline int SaFieldRaw() const {
+    DCHECK(InstructionType() == kRegisterType);
+    return InstructionBits() & kSaFieldMask;
+  }
+
+  inline int FunctionFieldRaw() const {
+    return InstructionBits() & kFunctionFieldMask;
+  }
+
+  // Get the secondary field according to the opcode.
+  inline int SecondaryValue() const {
+    Opcode op = OpcodeFieldRaw();
+    switch (op) {
+      case SPECIAL:
+      case SPECIAL2:
+        return FunctionValue();
+      case COP1:
+        return RsValue();
+      case REGIMM:
+        return RtValue();
+      default:
+        return NULLSF;
+    }
+  }
+
+  inline int32_t Imm16Value() const {
+    DCHECK(InstructionType() == kImmediateType);
+    return Bits(kImm16Shift + kImm16Bits - 1, kImm16Shift);
+  }
+
+  inline int32_t Imm21Value() const {
+    DCHECK(InstructionType() == kImmediateType);
+    return Bits(kImm21Shift + kImm21Bits - 1, kImm21Shift);
+  }
+
+  inline int32_t Imm26Value() const {
+    DCHECK(InstructionType() == kJumpType);
+    return Bits(kImm26Shift + kImm26Bits - 1, kImm26Shift);
+  }
+
+  // Say if the instruction should not be used in a branch delay slot.
+  bool IsForbiddenInBranchDelay() const;
+  // Say if the instruction 'links'. e.g. jal, bal.
+  bool IsLinkingInstruction() const;
+  // Say if the instruction is a break or a trap.
+  bool IsTrap() const;
+
+  // Instructions are read of out a code stream. The only way to get a
+  // reference to an instruction is to convert a pointer. There is no way
+  // to allocate or create instances of class Instruction.
+  // Use the At(pc) function to create references to Instruction.
+  static Instruction* At(byte* pc) {
+    return reinterpret_cast<Instruction*>(pc);
+  }
+
+ private:
+  // We need to prevent the creation of instances of class Instruction.
+  DISALLOW_IMPLICIT_CONSTRUCTORS(Instruction);
+};
+
+
+// -----------------------------------------------------------------------------
+// MIPS assembly various constants.
+
+// C/C++ argument slots size.
+const int kCArgSlotCount = (kMipsAbi == kN64) ? 0 : 4;
+
+// TODO(plind): below should be based on kPointerSize
+// TODO(plind): find all usages and remove the needless instructions for n64.
+const int kCArgsSlotsSize = kCArgSlotCount * Instruction::kInstrSize * 2;
+
+const int kBranchReturnOffset = 2 * Instruction::kInstrSize;
+
+} }   // namespace v8::internal
+
+#endif    // #ifndef V8_MIPS_CONSTANTS_H_
diff --git a/src/mips64/cpu-mips64.cc b/src/mips64/cpu-mips64.cc
new file mode 100644
index 0000000..027d5a1
--- /dev/null
+++ b/src/mips64/cpu-mips64.cc
@@ -0,0 +1,59 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// CPU specific code for arm independent of OS goes here.
+
+#include <sys/syscall.h>
+#include <unistd.h>
+
+#ifdef __mips
+#include <asm/cachectl.h>
+#endif  // #ifdef __mips
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_MIPS64
+
+#include "src/assembler.h"
+#include "src/macro-assembler.h"
+
+#include "src/simulator.h"  // For cache flushing.
+
+namespace v8 {
+namespace internal {
+
+
+void CpuFeatures::FlushICache(void* start, size_t size) {
+  // Nothing to do, flushing no instructions.
+  if (size == 0) {
+    return;
+  }
+
+#if !defined (USE_SIMULATOR)
+#if defined(ANDROID) && !defined(__LP64__)
+  // Bionic cacheflush can typically run in userland, avoiding kernel call.
+  char *end = reinterpret_cast<char *>(start) + size;
+  cacheflush(
+    reinterpret_cast<intptr_t>(start), reinterpret_cast<intptr_t>(end), 0);
+#else  // ANDROID
+  int res;
+  // See http://www.linux-mips.org/wiki/Cacheflush_Syscall.
+  res = syscall(__NR_cacheflush, start, size, ICACHE);
+  if (res) {
+    V8_Fatal(__FILE__, __LINE__, "Failed to flush the instruction cache");
+  }
+#endif  // ANDROID
+#else  // USE_SIMULATOR.
+  // Not generating mips instructions for C-code. This means that we are
+  // building a mips emulator based target.  We should notify the simulator
+  // that the Icache was flushed.
+  // None of this code ends up in the snapshot so there are no issues
+  // around whether or not to generate the code when building snapshots.
+  Simulator::FlushICache(Isolate::Current()->simulator_i_cache(), start, size);
+#endif  // USE_SIMULATOR.
+}
+
+} }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_MIPS64
diff --git a/src/mips64/debug-mips64.cc b/src/mips64/debug-mips64.cc
new file mode 100644
index 0000000..831dc4e
--- /dev/null
+++ b/src/mips64/debug-mips64.cc
@@ -0,0 +1,330 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_MIPS64
+
+#include "src/codegen.h"
+#include "src/debug.h"
+
+namespace v8 {
+namespace internal {
+
+bool BreakLocationIterator::IsDebugBreakAtReturn() {
+  return Debug::IsDebugBreakAtReturn(rinfo());
+}
+
+
+void BreakLocationIterator::SetDebugBreakAtReturn() {
+  // Mips return sequence:
+  // mov sp, fp
+  // lw fp, sp(0)
+  // lw ra, sp(4)
+  // addiu sp, sp, 8
+  // addiu sp, sp, N
+  // jr ra
+  // nop (in branch delay slot)
+
+  // Make sure this constant matches the number if instructions we emit.
+  DCHECK(Assembler::kJSReturnSequenceInstructions == 7);
+  CodePatcher patcher(rinfo()->pc(), Assembler::kJSReturnSequenceInstructions);
+  // li and Call pseudo-instructions emit 6 + 2 instructions.
+  patcher.masm()->li(v8::internal::t9, Operand(reinterpret_cast<int64_t>(
+      debug_info_->GetIsolate()->builtins()->Return_DebugBreak()->entry())),
+      ADDRESS_LOAD);
+  patcher.masm()->Call(v8::internal::t9);
+  // Place nop to match return sequence size.
+  patcher.masm()->nop();
+  // TODO(mips): Open issue about using breakpoint instruction instead of nops.
+  // patcher.masm()->bkpt(0);
+}
+
+
+// Restore the JS frame exit code.
+void BreakLocationIterator::ClearDebugBreakAtReturn() {
+  rinfo()->PatchCode(original_rinfo()->pc(),
+                     Assembler::kJSReturnSequenceInstructions);
+}
+
+
+// A debug break in the exit code is identified by the JS frame exit code
+// having been patched with li/call psuedo-instrunction (liu/ori/jalr).
+bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
+  DCHECK(RelocInfo::IsJSReturn(rinfo->rmode()));
+  return rinfo->IsPatchedReturnSequence();
+}
+
+
+bool BreakLocationIterator::IsDebugBreakAtSlot() {
+  DCHECK(IsDebugBreakSlot());
+  // Check whether the debug break slot instructions have been patched.
+  return rinfo()->IsPatchedDebugBreakSlotSequence();
+}
+
+
+void BreakLocationIterator::SetDebugBreakAtSlot() {
+  DCHECK(IsDebugBreakSlot());
+  // Patch the code changing the debug break slot code from:
+  //   nop(DEBUG_BREAK_NOP) - nop(1) is sll(zero_reg, zero_reg, 1)
+  //   nop(DEBUG_BREAK_NOP)
+  //   nop(DEBUG_BREAK_NOP)
+  //   nop(DEBUG_BREAK_NOP)
+  //   nop(DEBUG_BREAK_NOP)
+  //   nop(DEBUG_BREAK_NOP)
+  // to a call to the debug break slot code.
+  //   li t9, address   (4-instruction sequence on mips64)
+  //   call t9          (jalr t9 / nop instruction pair)
+  CodePatcher patcher(rinfo()->pc(), Assembler::kDebugBreakSlotInstructions);
+  patcher.masm()->li(v8::internal::t9,
+      Operand(reinterpret_cast<int64_t>(
+          debug_info_->GetIsolate()->builtins()->Slot_DebugBreak()->entry())),
+      ADDRESS_LOAD);
+  patcher.masm()->Call(v8::internal::t9);
+}
+
+
+void BreakLocationIterator::ClearDebugBreakAtSlot() {
+  DCHECK(IsDebugBreakSlot());
+  rinfo()->PatchCode(original_rinfo()->pc(),
+                     Assembler::kDebugBreakSlotInstructions);
+}
+
+
+#define __ ACCESS_MASM(masm)
+
+
+
+static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
+                                          RegList object_regs,
+                                          RegList non_object_regs) {
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+
+    // Load padding words on stack.
+    __ li(at, Operand(Smi::FromInt(LiveEdit::kFramePaddingValue)));
+    __ Dsubu(sp, sp,
+            Operand(kPointerSize * LiveEdit::kFramePaddingInitialSize));
+    for (int i = LiveEdit::kFramePaddingInitialSize - 1; i >= 0; i--) {
+      __ sd(at, MemOperand(sp, kPointerSize * i));
+    }
+    __ li(at, Operand(Smi::FromInt(LiveEdit::kFramePaddingInitialSize)));
+    __ push(at);
+
+
+    // TODO(plind): This needs to be revised to store pairs of smi's per
+    //    the other 64-bit arch's.
+
+    // Store the registers containing live values on the expression stack to
+    // make sure that these are correctly updated during GC. Non object values
+    // are stored as a smi causing it to be untouched by GC.
+    DCHECK((object_regs & ~kJSCallerSaved) == 0);
+    DCHECK((non_object_regs & ~kJSCallerSaved) == 0);
+    DCHECK((object_regs & non_object_regs) == 0);
+    for (int i = 0; i < kNumJSCallerSaved; i++) {
+      int r = JSCallerSavedCode(i);
+      Register reg = { r };
+      if ((object_regs & (1 << r)) != 0) {
+        __ push(reg);
+      }
+      if ((non_object_regs & (1 << r)) != 0) {
+        __ PushRegisterAsTwoSmis(reg);
+      }
+    }
+
+#ifdef DEBUG
+    __ RecordComment("// Calling from debug break to runtime - come in - over");
+#endif
+    __ PrepareCEntryArgs(0);  // No arguments.
+    __ PrepareCEntryFunction(ExternalReference::debug_break(masm->isolate()));
+
+    CEntryStub ceb(masm->isolate(), 1);
+    __ CallStub(&ceb);
+
+    // Restore the register values from the expression stack.
+    for (int i = kNumJSCallerSaved - 1; i >= 0; i--) {
+      int r = JSCallerSavedCode(i);
+      Register reg = { r };
+      if ((non_object_regs & (1 << r)) != 0) {
+        __ PopRegisterAsTwoSmis(reg, at);
+      }
+      if ((object_regs & (1 << r)) != 0) {
+        __ pop(reg);
+      }
+      if (FLAG_debug_code &&
+          (((object_regs |non_object_regs) & (1 << r)) == 0)) {
+        __ li(reg, kDebugZapValue);
+      }
+    }
+
+    // Don't bother removing padding bytes pushed on the stack
+    // as the frame is going to be restored right away.
+
+    // Leave the internal frame.
+  }
+
+  // Now that the break point has been handled, resume normal execution by
+  // jumping to the target address intended by the caller and that was
+  // overwritten by the address of DebugBreakXXX.
+  ExternalReference after_break_target =
+      ExternalReference::debug_after_break_target_address(masm->isolate());
+  __ li(t9, Operand(after_break_target));
+  __ ld(t9, MemOperand(t9));
+  __ Jump(t9);
+}
+
+
+void DebugCodegen::GenerateCallICStubDebugBreak(MacroAssembler* masm) {
+  // Register state for CallICStub
+  // ----------- S t a t e -------------
+  //  -- a1 : function
+  //  -- a3 : slot in feedback array (smi)
+  // -----------------------------------
+  Generate_DebugBreakCallHelper(masm, a1.bit() | a3.bit(), 0);
+}
+
+
+void DebugCodegen::GenerateLoadICDebugBreak(MacroAssembler* masm) {
+  Register receiver = LoadDescriptor::ReceiverRegister();
+  Register name = LoadDescriptor::NameRegister();
+  Generate_DebugBreakCallHelper(masm, receiver.bit() | name.bit(), 0);
+}
+
+
+void DebugCodegen::GenerateStoreICDebugBreak(MacroAssembler* masm) {
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  Register name = StoreDescriptor::NameRegister();
+  Register value = StoreDescriptor::ValueRegister();
+  Generate_DebugBreakCallHelper(
+      masm, receiver.bit() | name.bit() | value.bit(), 0);
+}
+
+
+void DebugCodegen::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
+  // Calling convention for keyed IC load (from ic-mips64.cc).
+  GenerateLoadICDebugBreak(masm);
+}
+
+
+void DebugCodegen::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
+  // Calling convention for IC keyed store call (from ic-mips64.cc).
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  Register name = StoreDescriptor::NameRegister();
+  Register value = StoreDescriptor::ValueRegister();
+  Generate_DebugBreakCallHelper(
+      masm, receiver.bit() | name.bit() | value.bit(), 0);
+}
+
+
+void DebugCodegen::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
+  // Register state for CompareNil IC
+  // ----------- S t a t e -------------
+  //  -- a0    : value
+  // -----------------------------------
+  Generate_DebugBreakCallHelper(masm, a0.bit(), 0);
+}
+
+
+void DebugCodegen::GenerateReturnDebugBreak(MacroAssembler* masm) {
+  // In places other than IC call sites it is expected that v0 is TOS which
+  // is an object - this is not generally the case so this should be used with
+  // care.
+  Generate_DebugBreakCallHelper(masm, v0.bit(), 0);
+}
+
+
+void DebugCodegen::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
+  // Register state for CallFunctionStub (from code-stubs-mips.cc).
+  // ----------- S t a t e -------------
+  //  -- a1 : function
+  // -----------------------------------
+  Generate_DebugBreakCallHelper(masm, a1.bit(), 0);
+}
+
+
+void DebugCodegen::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
+  // Calling convention for CallConstructStub (from code-stubs-mips.cc).
+  // ----------- S t a t e -------------
+  //  -- a0     : number of arguments (not smi)
+  //  -- a1     : constructor function
+  // -----------------------------------
+  Generate_DebugBreakCallHelper(masm, a1.bit() , a0.bit());
+}
+
+
+
+void DebugCodegen::GenerateCallConstructStubRecordDebugBreak(
+    MacroAssembler* masm) {
+  // Calling convention for CallConstructStub (from code-stubs-mips.cc).
+  // ----------- S t a t e -------------
+  //  -- a0     : number of arguments (not smi)
+  //  -- a1     : constructor function
+  //  -- a2     : feedback array
+  //  -- a3     : feedback slot (smi)
+  // -----------------------------------
+  Generate_DebugBreakCallHelper(masm, a1.bit() | a2.bit() | a3.bit(), a0.bit());
+}
+
+
+void DebugCodegen::GenerateSlot(MacroAssembler* masm) {
+  // Generate enough nop's to make space for a call instruction. Avoid emitting
+  // the trampoline pool in the debug break slot code.
+  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
+  Label check_codesize;
+  __ bind(&check_codesize);
+  __ RecordDebugBreakSlot();
+  for (int i = 0; i < Assembler::kDebugBreakSlotInstructions; i++) {
+    __ nop(MacroAssembler::DEBUG_BREAK_NOP);
+  }
+  DCHECK_EQ(Assembler::kDebugBreakSlotInstructions,
+            masm->InstructionsGeneratedSince(&check_codesize));
+}
+
+
+void DebugCodegen::GenerateSlotDebugBreak(MacroAssembler* masm) {
+  // In the places where a debug break slot is inserted no registers can contain
+  // object pointers.
+  Generate_DebugBreakCallHelper(masm, 0, 0);
+}
+
+
+void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
+  __ Ret();
+}
+
+
+void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
+  ExternalReference restarter_frame_function_slot =
+      ExternalReference::debug_restarter_frame_function_pointer_address(
+          masm->isolate());
+  __ li(at, Operand(restarter_frame_function_slot));
+  __ sw(zero_reg, MemOperand(at, 0));
+
+  // We do not know our frame height, but set sp based on fp.
+  __ Dsubu(sp, fp, Operand(kPointerSize));
+
+  __ Pop(ra, fp, a1);  // Return address, Frame, Function.
+
+  // Load context from the function.
+  __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+
+  // Get function code.
+  __ ld(at, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+  __ ld(at, FieldMemOperand(at, SharedFunctionInfo::kCodeOffset));
+  __ Daddu(t9, at, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+  // Re-run JSFunction, a1 is function, cp is context.
+  __ Jump(t9);
+}
+
+
+const bool LiveEdit::kFrameDropperSupported = true;
+
+#undef __
+
+} }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_MIPS64
diff --git a/src/mips64/deoptimizer-mips64.cc b/src/mips64/deoptimizer-mips64.cc
new file mode 100644
index 0000000..2550b76
--- /dev/null
+++ b/src/mips64/deoptimizer-mips64.cc
@@ -0,0 +1,379 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/codegen.h"
+#include "src/deoptimizer.h"
+#include "src/full-codegen.h"
+#include "src/safepoint-table.h"
+
+namespace v8 {
+namespace internal {
+
+
+int Deoptimizer::patch_size() {
+  const int kCallInstructionSizeInWords = 6;
+  return kCallInstructionSizeInWords * Assembler::kInstrSize;
+}
+
+
+void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
+  Address code_start_address = code->instruction_start();
+  // Invalidate the relocation information, as it will become invalid by the
+  // code patching below, and is not needed any more.
+  code->InvalidateRelocation();
+
+  if (FLAG_zap_code_space) {
+    // Fail hard and early if we enter this code object again.
+    byte* pointer = code->FindCodeAgeSequence();
+    if (pointer != NULL) {
+      pointer += kNoCodeAgeSequenceLength;
+    } else {
+      pointer = code->instruction_start();
+    }
+    CodePatcher patcher(pointer, 1);
+    patcher.masm()->break_(0xCC);
+
+    DeoptimizationInputData* data =
+        DeoptimizationInputData::cast(code->deoptimization_data());
+    int osr_offset = data->OsrPcOffset()->value();
+    if (osr_offset > 0) {
+      CodePatcher osr_patcher(code->instruction_start() + osr_offset, 1);
+      osr_patcher.masm()->break_(0xCC);
+    }
+  }
+
+  DeoptimizationInputData* deopt_data =
+      DeoptimizationInputData::cast(code->deoptimization_data());
+#ifdef DEBUG
+  Address prev_call_address = NULL;
+#endif
+  // For each LLazyBailout instruction insert a call to the corresponding
+  // deoptimization entry.
+  for (int i = 0; i < deopt_data->DeoptCount(); i++) {
+    if (deopt_data->Pc(i)->value() == -1) continue;
+    Address call_address = code_start_address + deopt_data->Pc(i)->value();
+    Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
+    int call_size_in_bytes = MacroAssembler::CallSize(deopt_entry,
+                                                      RelocInfo::NONE32);
+    int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
+    DCHECK(call_size_in_bytes % Assembler::kInstrSize == 0);
+    DCHECK(call_size_in_bytes <= patch_size());
+    CodePatcher patcher(call_address, call_size_in_words);
+    patcher.masm()->Call(deopt_entry, RelocInfo::NONE32);
+    DCHECK(prev_call_address == NULL ||
+           call_address >= prev_call_address + patch_size());
+    DCHECK(call_address + patch_size() <= code->instruction_end());
+
+#ifdef DEBUG
+    prev_call_address = call_address;
+#endif
+  }
+}
+
+
+void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
+  // Set the register values. The values are not important as there are no
+  // callee saved registers in JavaScript frames, so all registers are
+  // spilled. Registers fp and sp are set to the correct values though.
+
+  for (int i = 0; i < Register::kNumRegisters; i++) {
+    input_->SetRegister(i, i * 4);
+  }
+  input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
+  input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
+  for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
+    input_->SetDoubleRegister(i, 0.0);
+  }
+
+  // Fill the frame content from the actual data on the frame.
+  for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
+    input_->SetFrameSlot(i, Memory::uint64_at(tos + i));
+  }
+}
+
+
+void Deoptimizer::SetPlatformCompiledStubRegisters(
+    FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
+  ApiFunction function(descriptor->deoptimization_handler());
+  ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
+  intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
+  int params = descriptor->GetHandlerParameterCount();
+  output_frame->SetRegister(s0.code(), params);
+  output_frame->SetRegister(s1.code(), (params - 1) * kPointerSize);
+  output_frame->SetRegister(s2.code(), handler);
+}
+
+
+void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
+  for (int i = 0; i < DoubleRegister::kMaxNumRegisters; ++i) {
+    double double_value = input_->GetDoubleRegister(i);
+    output_frame->SetDoubleRegister(i, double_value);
+  }
+}
+
+
+bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
+  // There is no dynamic alignment padding on MIPS in the input frame.
+  return false;
+}
+
+
+#define __ masm()->
+
+
+// This code tries to be close to ia32 code so that any changes can be
+// easily ported.
+void Deoptimizer::EntryGenerator::Generate() {
+  GeneratePrologue();
+
+  // Unlike on ARM we don't save all the registers, just the useful ones.
+  // For the rest, there are gaps on the stack, so the offsets remain the same.
+  const int kNumberOfRegisters = Register::kNumRegisters;
+
+  RegList restored_regs = kJSCallerSaved | kCalleeSaved;
+  RegList saved_regs = restored_regs | sp.bit() | ra.bit();
+
+  const int kDoubleRegsSize =
+      kDoubleSize * FPURegister::kMaxNumAllocatableRegisters;
+
+  // Save all FPU registers before messing with them.
+  __ Dsubu(sp, sp, Operand(kDoubleRegsSize));
+  for (int i = 0; i < FPURegister::kMaxNumAllocatableRegisters; ++i) {
+    FPURegister fpu_reg = FPURegister::FromAllocationIndex(i);
+    int offset = i * kDoubleSize;
+    __ sdc1(fpu_reg, MemOperand(sp, offset));
+  }
+
+  // Push saved_regs (needed to populate FrameDescription::registers_).
+  // Leave gaps for other registers.
+  __ Dsubu(sp, sp, kNumberOfRegisters * kPointerSize);
+  for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
+    if ((saved_regs & (1 << i)) != 0) {
+      __ sd(ToRegister(i), MemOperand(sp, kPointerSize * i));
+    }
+  }
+
+  const int kSavedRegistersAreaSize =
+      (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
+
+  // Get the bailout id from the stack.
+  __ ld(a2, MemOperand(sp, kSavedRegistersAreaSize));
+
+  // Get the address of the location in the code object (a3) (return
+  // address for lazy deoptimization) and compute the fp-to-sp delta in
+  // register a4.
+  __ mov(a3, ra);
+  // Correct one word for bailout id.
+  __ Daddu(a4, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
+
+  __ Dsubu(a4, fp, a4);
+
+  // Allocate a new deoptimizer object.
+  __ PrepareCallCFunction(6, a5);
+  // Pass six arguments, according to O32 or n64 ABI. a0..a3 are same for both.
+  __ li(a1, Operand(type()));  // bailout type,
+  __ ld(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  // a2: bailout id already loaded.
+  // a3: code address or 0 already loaded.
+  if (kMipsAbi == kN64) {
+    // a4: already has fp-to-sp delta.
+    __ li(a5, Operand(ExternalReference::isolate_address(isolate())));
+  } else {  // O32 abi.
+    // Pass four arguments in a0 to a3 and fifth & sixth arguments on stack.
+    __ sd(a4, CFunctionArgumentOperand(5));  // Fp-to-sp delta.
+    __ li(a5, Operand(ExternalReference::isolate_address(isolate())));
+    __ sd(a5, CFunctionArgumentOperand(6));  // Isolate.
+  }
+  // Call Deoptimizer::New().
+  {
+    AllowExternalCallThatCantCauseGC scope(masm());
+    __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6);
+  }
+
+  // Preserve "deoptimizer" object in register v0 and get the input
+  // frame descriptor pointer to a1 (deoptimizer->input_);
+  // Move deopt-obj to a0 for call to Deoptimizer::ComputeOutputFrames() below.
+  __ mov(a0, v0);
+  __ ld(a1, MemOperand(v0, Deoptimizer::input_offset()));
+
+  // Copy core registers into FrameDescription::registers_[kNumRegisters].
+  DCHECK(Register::kNumRegisters == kNumberOfRegisters);
+  for (int i = 0; i < kNumberOfRegisters; i++) {
+    int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+    if ((saved_regs & (1 << i)) != 0) {
+      __ ld(a2, MemOperand(sp, i * kPointerSize));
+      __ sd(a2, MemOperand(a1, offset));
+    } else if (FLAG_debug_code) {
+      __ li(a2, kDebugZapValue);
+      __ sd(a2, MemOperand(a1, offset));
+    }
+  }
+
+  int double_regs_offset = FrameDescription::double_registers_offset();
+  // Copy FPU registers to
+  // double_registers_[DoubleRegister::kNumAllocatableRegisters]
+  for (int i = 0; i < FPURegister::NumAllocatableRegisters(); ++i) {
+    int dst_offset = i * kDoubleSize + double_regs_offset;
+    int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
+    __ ldc1(f0, MemOperand(sp, src_offset));
+    __ sdc1(f0, MemOperand(a1, dst_offset));
+  }
+
+  // Remove the bailout id and the saved registers from the stack.
+  __ Daddu(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
+
+  // Compute a pointer to the unwinding limit in register a2; that is
+  // the first stack slot not part of the input frame.
+  __ ld(a2, MemOperand(a1, FrameDescription::frame_size_offset()));
+  __ Daddu(a2, a2, sp);
+
+  // Unwind the stack down to - but not including - the unwinding
+  // limit and copy the contents of the activation frame to the input
+  // frame description.
+  __ Daddu(a3, a1, Operand(FrameDescription::frame_content_offset()));
+  Label pop_loop;
+  Label pop_loop_header;
+  __ BranchShort(&pop_loop_header);
+  __ bind(&pop_loop);
+  __ pop(a4);
+  __ sd(a4, MemOperand(a3, 0));
+  __ daddiu(a3, a3, sizeof(uint64_t));
+  __ bind(&pop_loop_header);
+  __ BranchShort(&pop_loop, ne, a2, Operand(sp));
+  // Compute the output frame in the deoptimizer.
+  __ push(a0);  // Preserve deoptimizer object across call.
+  // a0: deoptimizer object; a1: scratch.
+  __ PrepareCallCFunction(1, a1);
+  // Call Deoptimizer::ComputeOutputFrames().
+  {
+    AllowExternalCallThatCantCauseGC scope(masm());
+    __ CallCFunction(
+        ExternalReference::compute_output_frames_function(isolate()), 1);
+  }
+  __ pop(a0);  // Restore deoptimizer object (class Deoptimizer).
+
+  // Replace the current (input) frame with the output frames.
+  Label outer_push_loop, inner_push_loop,
+      outer_loop_header, inner_loop_header;
+  // Outer loop state: a4 = current "FrameDescription** output_",
+  // a1 = one past the last FrameDescription**.
+  __ lw(a1, MemOperand(a0, Deoptimizer::output_count_offset()));
+  __ ld(a4, MemOperand(a0, Deoptimizer::output_offset()));  // a4 is output_.
+  __ dsll(a1, a1, kPointerSizeLog2);  // Count to offset.
+  __ daddu(a1, a4, a1);  // a1 = one past the last FrameDescription**.
+  __ jmp(&outer_loop_header);
+  __ bind(&outer_push_loop);
+  // Inner loop state: a2 = current FrameDescription*, a3 = loop index.
+  __ ld(a2, MemOperand(a4, 0));  // output_[ix]
+  __ ld(a3, MemOperand(a2, FrameDescription::frame_size_offset()));
+  __ jmp(&inner_loop_header);
+  __ bind(&inner_push_loop);
+  __ Dsubu(a3, a3, Operand(sizeof(uint64_t)));
+  __ Daddu(a6, a2, Operand(a3));
+  __ ld(a7, MemOperand(a6, FrameDescription::frame_content_offset()));
+  __ push(a7);
+  __ bind(&inner_loop_header);
+  __ BranchShort(&inner_push_loop, ne, a3, Operand(zero_reg));
+
+  __ Daddu(a4, a4, Operand(kPointerSize));
+  __ bind(&outer_loop_header);
+  __ BranchShort(&outer_push_loop, lt, a4, Operand(a1));
+
+  __ ld(a1, MemOperand(a0, Deoptimizer::input_offset()));
+  for (int i = 0; i < FPURegister::kMaxNumAllocatableRegisters; ++i) {
+    const FPURegister fpu_reg = FPURegister::FromAllocationIndex(i);
+    int src_offset = i * kDoubleSize + double_regs_offset;
+    __ ldc1(fpu_reg, MemOperand(a1, src_offset));
+  }
+
+  // Push state, pc, and continuation from the last output frame.
+  __ ld(a6, MemOperand(a2, FrameDescription::state_offset()));
+  __ push(a6);
+
+  __ ld(a6, MemOperand(a2, FrameDescription::pc_offset()));
+  __ push(a6);
+  __ ld(a6, MemOperand(a2, FrameDescription::continuation_offset()));
+  __ push(a6);
+
+
+  // Technically restoring 'at' should work unless zero_reg is also restored
+  // but it's safer to check for this.
+  DCHECK(!(at.bit() & restored_regs));
+  // Restore the registers from the last output frame.
+  __ mov(at, a2);
+  for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
+    int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+    if ((restored_regs & (1 << i)) != 0) {
+      __ ld(ToRegister(i), MemOperand(at, offset));
+    }
+  }
+
+  __ InitializeRootRegister();
+
+  __ pop(at);  // Get continuation, leave pc on stack.
+  __ pop(ra);
+  __ Jump(at);
+  __ stop("Unreachable.");
+}
+
+
+// Maximum size of a table entry generated below.
+const int Deoptimizer::table_entry_size_ = 11 * Assembler::kInstrSize;
+
+void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
+  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
+
+  // Create a sequence of deoptimization entries.
+  // Note that registers are still live when jumping to an entry.
+  Label table_start;
+  __ bind(&table_start);
+  for (int i = 0; i < count(); i++) {
+    Label start;
+    __ bind(&start);
+    __ daddiu(sp, sp, -1 * kPointerSize);
+    // Jump over the remaining deopt entries (including this one).
+    // This code is always reached by calling Jump, which puts the target (label
+    // start) into t9.
+    const int remaining_entries = (count() - i) * table_entry_size_;
+    __ Daddu(t9, t9, remaining_entries);
+    // 'at' was clobbered so we can only load the current entry value here.
+    __ li(t8, i);
+    __ jr(t9);  // Expose delay slot.
+    __ sd(t8, MemOperand(sp, 0 * kPointerSize));  // In the delay slot.
+
+    // Pad the rest of the code.
+    while (table_entry_size_ > (masm()->SizeOfCodeGeneratedSince(&start))) {
+      __ nop();
+    }
+
+    DCHECK_EQ(table_entry_size_, masm()->SizeOfCodeGeneratedSince(&start));
+  }
+
+  DCHECK_EQ(masm()->SizeOfCodeGeneratedSince(&table_start),
+      count() * table_entry_size_);
+}
+
+
+void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
+  SetFrameSlot(offset, value);
+}
+
+
+void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
+  SetFrameSlot(offset, value);
+}
+
+
+void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
+  // No out-of-line constant pool support.
+  UNREACHABLE();
+}
+
+
+#undef __
+
+
+} }  // namespace v8::internal
diff --git a/src/mips64/disasm-mips64.cc b/src/mips64/disasm-mips64.cc
new file mode 100644
index 0000000..d47950f
--- /dev/null
+++ b/src/mips64/disasm-mips64.cc
@@ -0,0 +1,1504 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// A Disassembler object is used to disassemble a block of code instruction by
+// instruction. The default implementation of the NameConverter object can be
+// overriden to modify register names or to do symbol lookup on addresses.
+//
+// The example below will disassemble a block of code and print it to stdout.
+//
+//   NameConverter converter;
+//   Disassembler d(converter);
+//   for (byte* pc = begin; pc < end;) {
+//     v8::internal::EmbeddedVector<char, 256> buffer;
+//     byte* prev_pc = pc;
+//     pc += d.InstructionDecode(buffer, pc);
+//     printf("%p    %08x      %s\n",
+//            prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer);
+//   }
+//
+// The Disassembler class also has a convenience method to disassemble a block
+// of code into a FILE*, meaning that the above functionality could also be
+// achieved by just calling Disassembler::Disassemble(stdout, begin, end);
+
+
+#include <assert.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <string.h>
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_MIPS64
+
+#include "src/base/platform/platform.h"
+#include "src/disasm.h"
+#include "src/macro-assembler.h"
+#include "src/mips64/constants-mips64.h"
+
+namespace v8 {
+namespace internal {
+
+//------------------------------------------------------------------------------
+
+// Decoder decodes and disassembles instructions into an output buffer.
+// It uses the converter to convert register names and call destinations into
+// more informative description.
+class Decoder {
+ public:
+  Decoder(const disasm::NameConverter& converter,
+          v8::internal::Vector<char> out_buffer)
+    : converter_(converter),
+      out_buffer_(out_buffer),
+      out_buffer_pos_(0) {
+    out_buffer_[out_buffer_pos_] = '\0';
+  }
+
+  ~Decoder() {}
+
+  // Writes one disassembled instruction into 'buffer' (0-terminated).
+  // Returns the length of the disassembled machine instruction in bytes.
+  int InstructionDecode(byte* instruction);
+
+ private:
+  // Bottleneck functions to print into the out_buffer.
+  void PrintChar(const char ch);
+  void Print(const char* str);
+
+  // Printing of common values.
+  void PrintRegister(int reg);
+  void PrintFPURegister(int freg);
+  void PrintRs(Instruction* instr);
+  void PrintRt(Instruction* instr);
+  void PrintRd(Instruction* instr);
+  void PrintFs(Instruction* instr);
+  void PrintFt(Instruction* instr);
+  void PrintFd(Instruction* instr);
+  void PrintSa(Instruction* instr);
+  void PrintSd(Instruction* instr);
+  void PrintSs1(Instruction* instr);
+  void PrintSs2(Instruction* instr);
+  void PrintBc(Instruction* instr);
+  void PrintCc(Instruction* instr);
+  void PrintFunction(Instruction* instr);
+  void PrintSecondaryField(Instruction* instr);
+  void PrintUImm16(Instruction* instr);
+  void PrintSImm16(Instruction* instr);
+  void PrintXImm16(Instruction* instr);
+  void PrintXImm21(Instruction* instr);
+  void PrintXImm26(Instruction* instr);
+  void PrintCode(Instruction* instr);   // For break and trap instructions.
+  // Printing of instruction name.
+  void PrintInstructionName(Instruction* instr);
+
+  // Handle formatting of instructions and their options.
+  int FormatRegister(Instruction* instr, const char* option);
+  int FormatFPURegister(Instruction* instr, const char* option);
+  int FormatOption(Instruction* instr, const char* option);
+  void Format(Instruction* instr, const char* format);
+  void Unknown(Instruction* instr);
+  int DecodeBreakInstr(Instruction* instr);
+
+  // Each of these functions decodes one particular instruction type.
+  int DecodeTypeRegister(Instruction* instr);
+  void DecodeTypeImmediate(Instruction* instr);
+  void DecodeTypeJump(Instruction* instr);
+
+  const disasm::NameConverter& converter_;
+  v8::internal::Vector<char> out_buffer_;
+  int out_buffer_pos_;
+
+  DISALLOW_COPY_AND_ASSIGN(Decoder);
+};
+
+
+// Support for assertions in the Decoder formatting functions.
+#define STRING_STARTS_WITH(string, compare_string) \
+  (strncmp(string, compare_string, strlen(compare_string)) == 0)
+
+
+// Append the ch to the output buffer.
+void Decoder::PrintChar(const char ch) {
+  out_buffer_[out_buffer_pos_++] = ch;
+}
+
+
+// Append the str to the output buffer.
+void Decoder::Print(const char* str) {
+  char cur = *str++;
+  while (cur != '\0' && (out_buffer_pos_ < (out_buffer_.length() - 1))) {
+    PrintChar(cur);
+    cur = *str++;
+  }
+  out_buffer_[out_buffer_pos_] = 0;
+}
+
+
+// Print the register name according to the active name converter.
+void Decoder::PrintRegister(int reg) {
+  Print(converter_.NameOfCPURegister(reg));
+}
+
+
+void Decoder::PrintRs(Instruction* instr) {
+  int reg = instr->RsValue();
+  PrintRegister(reg);
+}
+
+
+void Decoder::PrintRt(Instruction* instr) {
+  int reg = instr->RtValue();
+  PrintRegister(reg);
+}
+
+
+void Decoder::PrintRd(Instruction* instr) {
+  int reg = instr->RdValue();
+  PrintRegister(reg);
+}
+
+
+// Print the FPUregister name according to the active name converter.
+void Decoder::PrintFPURegister(int freg) {
+  Print(converter_.NameOfXMMRegister(freg));
+}
+
+
+void Decoder::PrintFs(Instruction* instr) {
+  int freg = instr->RsValue();
+  PrintFPURegister(freg);
+}
+
+
+void Decoder::PrintFt(Instruction* instr) {
+  int freg = instr->RtValue();
+  PrintFPURegister(freg);
+}
+
+
+void Decoder::PrintFd(Instruction* instr) {
+  int freg = instr->RdValue();
+  PrintFPURegister(freg);
+}
+
+
+// Print the integer value of the sa field.
+void Decoder::PrintSa(Instruction* instr) {
+  int sa = instr->SaValue();
+  out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sa);
+}
+
+
+// Print the integer value of the rd field, when it is not used as reg.
+void Decoder::PrintSd(Instruction* instr) {
+  int sd = instr->RdValue();
+  out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", sd);
+}
+
+
+// Print the integer value of the rd field, when used as 'ext' size.
+void Decoder::PrintSs1(Instruction* instr) {
+  int ss = instr->RdValue();
+  out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", ss + 1);
+}
+
+
+// Print the integer value of the rd field, when used as 'ins' size.
+void Decoder::PrintSs2(Instruction* instr) {
+  int ss = instr->RdValue();
+  int pos = instr->SaValue();
+  out_buffer_pos_ +=
+      SNPrintF(out_buffer_ + out_buffer_pos_, "%d", ss - pos + 1);
+}
+
+
+// Print the integer value of the cc field for the bc1t/f instructions.
+void Decoder::PrintBc(Instruction* instr) {
+  int cc = instr->FBccValue();
+  out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", cc);
+}
+
+
+// Print the integer value of the cc field for the FP compare instructions.
+void Decoder::PrintCc(Instruction* instr) {
+  int cc = instr->FCccValue();
+  out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "cc(%d)", cc);
+}
+
+
+// Print 16-bit unsigned immediate value.
+void Decoder::PrintUImm16(Instruction* instr) {
+  int32_t imm = instr->Imm16Value();
+  out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%u", imm);
+}
+
+
+// Print 16-bit signed immediate value.
+void Decoder::PrintSImm16(Instruction* instr) {
+  int32_t imm = ((instr->Imm16Value()) << 16) >> 16;
+  out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
+}
+
+
+// Print 16-bit hexa immediate value.
+void Decoder::PrintXImm16(Instruction* instr) {
+  int32_t imm = instr->Imm16Value();
+  out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm);
+}
+
+
+// Print 21-bit immediate value.
+void Decoder::PrintXImm21(Instruction* instr) {
+  uint32_t imm = instr->Imm21Value();
+  out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm);
+}
+
+
+// Print 26-bit immediate value.
+void Decoder::PrintXImm26(Instruction* instr) {
+  uint32_t imm = instr->Imm26Value() << kImmFieldShift;
+  out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "0x%x", imm);
+}
+
+
+// Print 26-bit immediate value.
+void Decoder::PrintCode(Instruction* instr) {
+  if (instr->OpcodeFieldRaw() != SPECIAL)
+    return;  // Not a break or trap instruction.
+  switch (instr->FunctionFieldRaw()) {
+    case BREAK: {
+      int32_t code = instr->Bits(25, 6);
+      out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+                                  "0x%05x (%d)", code, code);
+      break;
+                }
+    case TGE:
+    case TGEU:
+    case TLT:
+    case TLTU:
+    case TEQ:
+    case TNE: {
+      int32_t code = instr->Bits(15, 6);
+      out_buffer_pos_ +=
+          SNPrintF(out_buffer_ + out_buffer_pos_, "0x%03x", code);
+      break;
+    }
+    default:  // Not a break or trap instruction.
+    break;
+  }
+}
+
+
+// Printing of instruction name.
+void Decoder::PrintInstructionName(Instruction* instr) {
+}
+
+
+// Handle all register based formatting in this function to reduce the
+// complexity of FormatOption.
+int Decoder::FormatRegister(Instruction* instr, const char* format) {
+  DCHECK(format[0] == 'r');
+  if (format[1] == 's') {  // 'rs: Rs register.
+    int reg = instr->RsValue();
+    PrintRegister(reg);
+    return 2;
+  } else if (format[1] == 't') {  // 'rt: rt register.
+    int reg = instr->RtValue();
+    PrintRegister(reg);
+    return 2;
+  } else if (format[1] == 'd') {  // 'rd: rd register.
+    int reg = instr->RdValue();
+    PrintRegister(reg);
+    return 2;
+  }
+  UNREACHABLE();
+  return -1;
+}
+
+
+// Handle all FPUregister based formatting in this function to reduce the
+// complexity of FormatOption.
+int Decoder::FormatFPURegister(Instruction* instr, const char* format) {
+  DCHECK(format[0] == 'f');
+  if (format[1] == 's') {  // 'fs: fs register.
+    int reg = instr->FsValue();
+    PrintFPURegister(reg);
+    return 2;
+  } else if (format[1] == 't') {  // 'ft: ft register.
+    int reg = instr->FtValue();
+    PrintFPURegister(reg);
+    return 2;
+  } else if (format[1] == 'd') {  // 'fd: fd register.
+    int reg = instr->FdValue();
+    PrintFPURegister(reg);
+    return 2;
+  } else if (format[1] == 'r') {  // 'fr: fr register.
+    int reg = instr->FrValue();
+    PrintFPURegister(reg);
+    return 2;
+  }
+  UNREACHABLE();
+  return -1;
+}
+
+
+// FormatOption takes a formatting string and interprets it based on
+// the current instructions. The format string points to the first
+// character of the option string (the option escape has already been
+// consumed by the caller.)  FormatOption returns the number of
+// characters that were consumed from the formatting string.
+int Decoder::FormatOption(Instruction* instr, const char* format) {
+  switch (format[0]) {
+    case 'c': {   // 'code for break or trap instructions.
+      DCHECK(STRING_STARTS_WITH(format, "code"));
+      PrintCode(instr);
+      return 4;
+    }
+    case 'i': {   // 'imm16u or 'imm26.
+      if (format[3] == '1') {
+        DCHECK(STRING_STARTS_WITH(format, "imm16"));
+        if (format[5] == 's') {
+          DCHECK(STRING_STARTS_WITH(format, "imm16s"));
+          PrintSImm16(instr);
+        } else if (format[5] == 'u') {
+          DCHECK(STRING_STARTS_WITH(format, "imm16u"));
+          PrintSImm16(instr);
+        } else {
+          DCHECK(STRING_STARTS_WITH(format, "imm16x"));
+          PrintXImm16(instr);
+        }
+        return 6;
+      } else if (format[3] == '2' && format[4] == '1') {
+        DCHECK(STRING_STARTS_WITH(format, "imm21x"));
+        PrintXImm21(instr);
+        return 6;
+      } else if (format[3] == '2' && format[4] == '6') {
+        DCHECK(STRING_STARTS_WITH(format, "imm26x"));
+        PrintXImm26(instr);
+        return 6;
+      }
+    }
+    case 'r': {   // 'r: registers.
+      return FormatRegister(instr, format);
+    }
+    case 'f': {   // 'f: FPUregisters.
+      return FormatFPURegister(instr, format);
+    }
+    case 's': {   // 'sa.
+      switch (format[1]) {
+        case 'a': {
+          DCHECK(STRING_STARTS_WITH(format, "sa"));
+          PrintSa(instr);
+          return 2;
+        }
+        case 'd': {
+          DCHECK(STRING_STARTS_WITH(format, "sd"));
+          PrintSd(instr);
+          return 2;
+        }
+        case 's': {
+          if (format[2] == '1') {
+              DCHECK(STRING_STARTS_WITH(format, "ss1"));  /* ext size */
+              PrintSs1(instr);
+              return 3;
+          } else {
+              DCHECK(STRING_STARTS_WITH(format, "ss2"));  /* ins size */
+              PrintSs2(instr);
+              return 3;
+          }
+        }
+      }
+    }
+    case 'b': {   // 'bc - Special for bc1 cc field.
+      DCHECK(STRING_STARTS_WITH(format, "bc"));
+      PrintBc(instr);
+      return 2;
+    }
+    case 'C': {   // 'Cc - Special for c.xx.d cc field.
+      DCHECK(STRING_STARTS_WITH(format, "Cc"));
+      PrintCc(instr);
+      return 2;
+    }
+  }
+  UNREACHABLE();
+  return -1;
+}
+
+
+// Format takes a formatting string for a whole instruction and prints it into
+// the output buffer. All escaped options are handed to FormatOption to be
+// parsed further.
+void Decoder::Format(Instruction* instr, const char* format) {
+  char cur = *format++;
+  while ((cur != 0) && (out_buffer_pos_ < (out_buffer_.length() - 1))) {
+    if (cur == '\'') {  // Single quote is used as the formatting escape.
+      format += FormatOption(instr, format);
+    } else {
+      out_buffer_[out_buffer_pos_++] = cur;
+    }
+    cur = *format++;
+  }
+  out_buffer_[out_buffer_pos_]  = '\0';
+}
+
+
+// For currently unimplemented decodings the disassembler calls Unknown(instr)
+// which will just print "unknown" of the instruction bits.
+void Decoder::Unknown(Instruction* instr) {
+  Format(instr, "unknown");
+}
+
+
+int Decoder::DecodeBreakInstr(Instruction* instr) {
+  // This is already known to be BREAK instr, just extract the code.
+  if (instr->Bits(25, 6) == static_cast<int>(kMaxStopCode)) {
+    // This is stop(msg).
+    Format(instr, "break, code: 'code");
+    out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+                                "\n%p       %08lx       stop msg: %s",
+                                static_cast<void*>
+                                      (reinterpret_cast<int32_t*>(instr
+                                              + Instruction::kInstrSize)),
+                                reinterpret_cast<uint64_t>
+                                (*reinterpret_cast<char**>(instr
+                                              + Instruction::kInstrSize)),
+                                *reinterpret_cast<char**>(instr
+                                              + Instruction::kInstrSize));
+    // Size 3: the break_ instr, plus embedded 64-bit char pointer.
+    return 3 * Instruction::kInstrSize;
+  } else {
+    Format(instr, "break, code: 'code");
+    return Instruction::kInstrSize;
+  }
+}
+
+
+int Decoder::DecodeTypeRegister(Instruction* instr) {
+  switch (instr->OpcodeFieldRaw()) {
+    case COP1:    // Coprocessor instructions.
+      switch (instr->RsFieldRaw()) {
+        case MFC1:
+          Format(instr, "mfc1    'rt, 'fs");
+          break;
+        case DMFC1:
+          Format(instr, "dmfc1    'rt, 'fs");
+          break;
+        case MFHC1:
+          Format(instr, "mfhc1   'rt, 'fs");
+          break;
+        case MTC1:
+          Format(instr, "mtc1    'rt, 'fs");
+          break;
+        case DMTC1:
+          Format(instr, "dmtc1    'rt, 'fs");
+          break;
+        // These are called "fs" too, although they are not FPU registers.
+        case CTC1:
+          Format(instr, "ctc1    'rt, 'fs");
+          break;
+        case CFC1:
+          Format(instr, "cfc1    'rt, 'fs");
+          break;
+        case MTHC1:
+          Format(instr, "mthc1   'rt, 'fs");
+          break;
+        case D:
+          switch (instr->FunctionFieldRaw()) {
+            case ADD_D:
+              Format(instr, "add.d   'fd, 'fs, 'ft");
+              break;
+            case SUB_D:
+              Format(instr, "sub.d   'fd, 'fs, 'ft");
+              break;
+            case MUL_D:
+              Format(instr, "mul.d   'fd, 'fs, 'ft");
+              break;
+            case DIV_D:
+              Format(instr, "div.d   'fd, 'fs, 'ft");
+              break;
+            case ABS_D:
+              Format(instr, "abs.d   'fd, 'fs");
+              break;
+            case MOV_D:
+              Format(instr, "mov.d   'fd, 'fs");
+              break;
+            case NEG_D:
+              Format(instr, "neg.d   'fd, 'fs");
+              break;
+            case SQRT_D:
+              Format(instr, "sqrt.d  'fd, 'fs");
+              break;
+            case CVT_W_D:
+              Format(instr, "cvt.w.d 'fd, 'fs");
+              break;
+            case CVT_L_D:
+              Format(instr, "cvt.l.d 'fd, 'fs");
+              break;
+            case TRUNC_W_D:
+              Format(instr, "trunc.w.d 'fd, 'fs");
+              break;
+            case TRUNC_L_D:
+              Format(instr, "trunc.l.d 'fd, 'fs");
+              break;
+            case ROUND_W_D:
+              Format(instr, "round.w.d 'fd, 'fs");
+              break;
+            case ROUND_L_D:
+              Format(instr, "round.l.d 'fd, 'fs");
+              break;
+            case FLOOR_W_D:
+              Format(instr, "floor.w.d 'fd, 'fs");
+              break;
+            case FLOOR_L_D:
+              Format(instr, "floor.l.d 'fd, 'fs");
+              break;
+            case CEIL_W_D:
+              Format(instr, "ceil.w.d 'fd, 'fs");
+              break;
+            case CEIL_L_D:
+              Format(instr, "ceil.l.d 'fd, 'fs");
+              break;
+            case CVT_S_D:
+              Format(instr, "cvt.s.d 'fd, 'fs");
+              break;
+            case C_F_D:
+              Format(instr, "c.f.d   'fs, 'ft, 'Cc");
+              break;
+            case C_UN_D:
+              Format(instr, "c.un.d  'fs, 'ft, 'Cc");
+              break;
+            case C_EQ_D:
+              Format(instr, "c.eq.d  'fs, 'ft, 'Cc");
+              break;
+            case C_UEQ_D:
+              Format(instr, "c.ueq.d 'fs, 'ft, 'Cc");
+              break;
+            case C_OLT_D:
+              Format(instr, "c.olt.d 'fs, 'ft, 'Cc");
+              break;
+            case C_ULT_D:
+              Format(instr, "c.ult.d 'fs, 'ft, 'Cc");
+              break;
+            case C_OLE_D:
+              Format(instr, "c.ole.d 'fs, 'ft, 'Cc");
+              break;
+            case C_ULE_D:
+              Format(instr, "c.ule.d 'fs, 'ft, 'Cc");
+              break;
+            default:
+              Format(instr, "unknown.cop1.d");
+              break;
+          }
+          break;
+        case W:
+          switch (instr->FunctionFieldRaw()) {
+            case CVT_D_W:   // Convert word to double.
+              Format(instr, "cvt.d.w 'fd, 'fs");
+              break;
+            default:
+              UNREACHABLE();
+          }
+          break;
+        case L:
+          switch (instr->FunctionFieldRaw()) {
+            case CVT_D_L:
+              Format(instr, "cvt.d.l 'fd, 'fs");
+              break;
+            case CVT_S_L:
+              Format(instr, "cvt.s.l 'fd, 'fs");
+              break;
+            case CMP_UN:
+              Format(instr, "cmp.un.d  'fd,  'fs, 'ft");
+              break;
+            case CMP_EQ:
+              Format(instr, "cmp.eq.d  'fd,  'fs, 'ft");
+              break;
+            case CMP_UEQ:
+              Format(instr, "cmp.ueq.d  'fd,  'fs, 'ft");
+              break;
+            case CMP_LT:
+              Format(instr, "cmp.lt.d  'fd,  'fs, 'ft");
+              break;
+            case CMP_ULT:
+              Format(instr, "cmp.ult.d  'fd,  'fs, 'ft");
+              break;
+            case CMP_LE:
+              Format(instr, "cmp.le.d  'fd,  'fs, 'ft");
+              break;
+            case CMP_ULE:
+              Format(instr, "cmp.ule.d  'fd,  'fs, 'ft");
+              break;
+            case CMP_OR:
+              Format(instr, "cmp.or.d  'fd,  'fs, 'ft");
+              break;
+            case CMP_UNE:
+              Format(instr, "cmp.une.d  'fd,  'fs, 'ft");
+              break;
+            case CMP_NE:
+              Format(instr, "cmp.ne.d  'fd,  'fs, 'ft");
+              break;
+            default:
+              UNREACHABLE();
+          }
+          break;
+        default:
+          UNREACHABLE();
+      }
+      break;
+    case COP1X:
+      switch (instr->FunctionFieldRaw()) {
+        case MADD_D:
+          Format(instr, "madd.d  'fd, 'fr, 'fs, 'ft");
+          break;
+        default:
+          UNREACHABLE();
+      }
+      break;
+    case SPECIAL:
+      switch (instr->FunctionFieldRaw()) {
+        case JR:
+          Format(instr, "jr      'rs");
+          break;
+        case JALR:
+          Format(instr, "jalr    'rs");
+          break;
+        case SLL:
+          if (0x0 == static_cast<int>(instr->InstructionBits()))
+            Format(instr, "nop");
+          else
+            Format(instr, "sll     'rd, 'rt, 'sa");
+          break;
+        case DSLL:
+            Format(instr, "dsll    'rd, 'rt, 'sa");
+          break;
+        case D_MUL_MUH:  // Equals to DMUL.
+          if (kArchVariant != kMips64r6) {
+            Format(instr, "dmult   'rs, 'rt");
+          } else {
+            if (instr->SaValue() == MUL_OP) {
+              Format(instr, "dmul   'rd, 'rs, 'rt");
+            } else {
+              Format(instr, "dmuh   'rd, 'rs, 'rt");
+            }
+          }
+          break;
+        case DSLL32:
+          Format(instr, "dsll32  'rd, 'rt, 'sa");
+          break;
+        case SRL:
+          if (instr->RsValue() == 0) {
+            Format(instr, "srl     'rd, 'rt, 'sa");
+          } else {
+            if (kArchVariant == kMips64r2) {
+              Format(instr, "rotr    'rd, 'rt, 'sa");
+            } else {
+              Unknown(instr);
+            }
+          }
+          break;
+        case DSRL:
+          if (instr->RsValue() == 0) {
+            Format(instr, "dsrl    'rd, 'rt, 'sa");
+          } else {
+            if (kArchVariant == kMips64r2) {
+              Format(instr, "drotr   'rd, 'rt, 'sa");
+            } else {
+              Unknown(instr);
+            }
+          }
+          break;
+        case DSRL32:
+          Format(instr, "dsrl32  'rd, 'rt, 'sa");
+          break;
+        case SRA:
+          Format(instr, "sra     'rd, 'rt, 'sa");
+          break;
+        case DSRA:
+          Format(instr, "dsra    'rd, 'rt, 'sa");
+          break;
+        case DSRA32:
+          Format(instr, "dsra32  'rd, 'rt, 'sa");
+          break;
+        case SLLV:
+          Format(instr, "sllv    'rd, 'rt, 'rs");
+          break;
+        case DSLLV:
+          Format(instr, "dsllv   'rd, 'rt, 'rs");
+          break;
+        case SRLV:
+          if (instr->SaValue() == 0) {
+            Format(instr, "srlv    'rd, 'rt, 'rs");
+          } else {
+            if (kArchVariant == kMips64r2) {
+              Format(instr, "rotrv   'rd, 'rt, 'rs");
+            } else {
+              Unknown(instr);
+            }
+          }
+          break;
+        case DSRLV:
+          if (instr->SaValue() == 0) {
+            Format(instr, "dsrlv   'rd, 'rt, 'rs");
+          } else {
+            if (kArchVariant == kMips64r2) {
+              Format(instr, "drotrv  'rd, 'rt, 'rs");
+            } else {
+              Unknown(instr);
+            }
+          }
+          break;
+        case SRAV:
+          Format(instr, "srav    'rd, 'rt, 'rs");
+          break;
+        case DSRAV:
+          Format(instr, "dsrav   'rd, 'rt, 'rs");
+          break;
+        case MFHI:
+          if (instr->Bits(25, 16) == 0) {
+            Format(instr, "mfhi    'rd");
+          } else {
+            if ((instr->FunctionFieldRaw() == CLZ_R6)
+                && (instr->FdValue() == 1)) {
+              Format(instr, "clz     'rd, 'rs");
+            } else if ((instr->FunctionFieldRaw() == CLO_R6)
+                && (instr->FdValue() == 1)) {
+              Format(instr, "clo     'rd, 'rs");
+            }
+          }
+          break;
+        case MFLO:
+          Format(instr, "mflo    'rd");
+          break;
+        case D_MUL_MUH_U:  // Equals to DMULTU.
+          if (kArchVariant != kMips64r6) {
+              Format(instr, "dmultu  'rs, 'rt");
+          } else {
+            if (instr->SaValue() == MUL_OP) {
+              Format(instr, "dmulu  'rd, 'rs, 'rt");
+            } else {
+              Format(instr, "dmuhu  'rd, 'rs, 'rt");
+            }
+          }
+          break;
+        case MULT:  // @Mips64r6 == MUL_MUH.
+          if (kArchVariant != kMips64r6) {
+            Format(instr, "mult    'rs, 'rt");
+          } else {
+            if (instr->SaValue() == MUL_OP) {
+              Format(instr, "mul    'rd, 'rs, 'rt");
+            } else {
+              Format(instr, "muh    'rd, 'rs, 'rt");
+            }
+          }
+          break;
+        case MULTU:  // @Mips64r6 == MUL_MUH_U.
+          if (kArchVariant != kMips64r6) {
+            Format(instr, "multu   'rs, 'rt");
+          } else {
+            if (instr->SaValue() == MUL_OP) {
+              Format(instr, "mulu   'rd, 'rs, 'rt");
+            } else {
+              Format(instr, "muhu   'rd, 'rs, 'rt");
+            }
+          }
+
+          break;
+        case DIV:  // @Mips64r6 == DIV_MOD.
+          if (kArchVariant != kMips64r6) {
+            Format(instr, "div     'rs, 'rt");
+          } else {
+            if (instr->SaValue() == DIV_OP) {
+              Format(instr, "div    'rd, 'rs, 'rt");
+            } else {
+              Format(instr, "mod    'rd, 'rs, 'rt");
+            }
+          }
+          break;
+        case DDIV:  // @Mips64r6 == D_DIV_MOD.
+          if (kArchVariant != kMips64r6) {
+            Format(instr, "ddiv    'rs, 'rt");
+          } else {
+            if (instr->SaValue() == DIV_OP) {
+              Format(instr, "ddiv   'rd, 'rs, 'rt");
+            } else {
+              Format(instr, "dmod   'rd, 'rs, 'rt");
+            }
+          }
+          break;
+        case DIVU:  // @Mips64r6 == DIV_MOD_U.
+          if (kArchVariant != kMips64r6) {
+            Format(instr, "divu    'rs, 'rt");
+          } else {
+            if (instr->SaValue() == DIV_OP) {
+              Format(instr, "divu   'rd, 'rs, 'rt");
+            } else {
+              Format(instr, "modu   'rd, 'rs, 'rt");
+            }
+          }
+          break;
+        case DDIVU:  // @Mips64r6 == D_DIV_MOD_U.
+          if (kArchVariant != kMips64r6) {
+            Format(instr, "ddivu   'rs, 'rt");
+          } else {
+            if (instr->SaValue() == DIV_OP) {
+              Format(instr, "ddivu  'rd, 'rs, 'rt");
+            } else {
+              Format(instr, "dmodu  'rd, 'rs, 'rt");
+            }
+          }
+          break;
+        case ADD:
+          Format(instr, "add     'rd, 'rs, 'rt");
+          break;
+        case DADD:
+          Format(instr, "dadd    'rd, 'rs, 'rt");
+          break;
+        case ADDU:
+          Format(instr, "addu    'rd, 'rs, 'rt");
+          break;
+        case DADDU:
+          Format(instr, "daddu   'rd, 'rs, 'rt");
+          break;
+        case SUB:
+          Format(instr, "sub     'rd, 'rs, 'rt");
+          break;
+        case DSUB:
+          Format(instr, "dsub    'rd, 'rs, 'rt");
+          break;
+        case SUBU:
+          Format(instr, "subu    'rd, 'rs, 'rt");
+          break;
+        case DSUBU:
+          Format(instr, "dsubu   'rd, 'rs, 'rt");
+          break;
+        case AND:
+          Format(instr, "and     'rd, 'rs, 'rt");
+          break;
+        case OR:
+          if (0 == instr->RsValue()) {
+            Format(instr, "mov     'rd, 'rt");
+          } else if (0 == instr->RtValue()) {
+            Format(instr, "mov     'rd, 'rs");
+          } else {
+            Format(instr, "or      'rd, 'rs, 'rt");
+          }
+          break;
+        case XOR:
+          Format(instr, "xor     'rd, 'rs, 'rt");
+          break;
+        case NOR:
+          Format(instr, "nor     'rd, 'rs, 'rt");
+          break;
+        case SLT:
+          Format(instr, "slt     'rd, 'rs, 'rt");
+          break;
+        case SLTU:
+          Format(instr, "sltu    'rd, 'rs, 'rt");
+          break;
+        case BREAK:
+          return DecodeBreakInstr(instr);
+        case TGE:
+          Format(instr, "tge     'rs, 'rt, code: 'code");
+          break;
+        case TGEU:
+          Format(instr, "tgeu    'rs, 'rt, code: 'code");
+          break;
+        case TLT:
+          Format(instr, "tlt     'rs, 'rt, code: 'code");
+          break;
+        case TLTU:
+          Format(instr, "tltu    'rs, 'rt, code: 'code");
+          break;
+        case TEQ:
+          Format(instr, "teq     'rs, 'rt, code: 'code");
+          break;
+        case TNE:
+          Format(instr, "tne     'rs, 'rt, code: 'code");
+          break;
+        case MOVZ:
+          Format(instr, "movz    'rd, 'rs, 'rt");
+          break;
+        case MOVN:
+          Format(instr, "movn    'rd, 'rs, 'rt");
+          break;
+        case MOVCI:
+          if (instr->Bit(16)) {
+            Format(instr, "movt    'rd, 'rs, 'bc");
+          } else {
+            Format(instr, "movf    'rd, 'rs, 'bc");
+          }
+          break;
+        case SELEQZ_S:
+          Format(instr, "seleqz    'rd, 'rs, 'rt");
+          break;
+        case SELNEZ_S:
+          Format(instr, "selnez    'rd, 'rs, 'rt");
+          break;
+        default:
+          UNREACHABLE();
+      }
+      break;
+    case SPECIAL2:
+      switch (instr->FunctionFieldRaw()) {
+        case MUL:
+          Format(instr, "mul     'rd, 'rs, 'rt");
+          break;
+        case CLZ:
+          if (kArchVariant != kMips64r6) {
+            Format(instr, "clz     'rd, 'rs");
+          }
+          break;
+        default:
+          UNREACHABLE();
+      }
+      break;
+    case SPECIAL3:
+      switch (instr->FunctionFieldRaw()) {
+        case INS: {
+          Format(instr, "ins     'rt, 'rs, 'sa, 'ss2");
+          break;
+        }
+        case EXT: {
+          Format(instr, "ext     'rt, 'rs, 'sa, 'ss1");
+          break;
+        }
+        default:
+          UNREACHABLE();
+      }
+      break;
+    default:
+      UNREACHABLE();
+  }
+  return Instruction::kInstrSize;
+}
+
+
+void Decoder::DecodeTypeImmediate(Instruction* instr) {
+  switch (instr->OpcodeFieldRaw()) {
+    case COP1:
+      switch (instr->RsFieldRaw()) {
+        case BC1:
+          if (instr->FBtrueValue()) {
+            Format(instr, "bc1t    'bc, 'imm16u");
+          } else {
+            Format(instr, "bc1f    'bc, 'imm16u");
+          }
+          break;
+        case BC1EQZ:
+          Format(instr, "bc1eqz    'ft, 'imm16u");
+          break;
+        case BC1NEZ:
+          Format(instr, "bc1nez    'ft, 'imm16u");
+          break;
+        case W:  // CMP.S instruction.
+          switch (instr->FunctionValue()) {
+            case CMP_AF:
+              Format(instr, "cmp.af.S    'ft, 'fs, 'fd");
+              break;
+            case CMP_UN:
+              Format(instr, "cmp.un.S    'ft, 'fs, 'fd");
+              break;
+            case CMP_EQ:
+              Format(instr, "cmp.eq.S    'ft, 'fs, 'fd");
+              break;
+            case CMP_UEQ:
+              Format(instr, "cmp.ueq.S   'ft, 'fs, 'fd");
+              break;
+            case CMP_LT:
+              Format(instr, "cmp.lt.S    'ft, 'fs, 'fd");
+              break;
+            case CMP_ULT:
+              Format(instr, "cmp.ult.S   'ft, 'fs, 'fd");
+              break;
+            case CMP_LE:
+              Format(instr, "cmp.le.S    'ft, 'fs, 'fd");
+              break;
+            case CMP_ULE:
+              Format(instr, "cmp.ule.S   'ft, 'fs, 'fd");
+              break;
+            case CMP_OR:
+              Format(instr, "cmp.or.S    'ft, 'fs, 'fd");
+              break;
+            case CMP_UNE:
+              Format(instr, "cmp.une.S   'ft, 'fs, 'fd");
+              break;
+            case CMP_NE:
+              Format(instr, "cmp.ne.S    'ft, 'fs, 'fd");
+              break;
+            default:
+              UNREACHABLE();
+          }
+          break;
+        case L:  // CMP.D instruction.
+          switch (instr->FunctionValue()) {
+            case CMP_AF:
+              Format(instr, "cmp.af.D    'ft, 'fs, 'fd");
+              break;
+            case CMP_UN:
+              Format(instr, "cmp.un.D    'ft, 'fs, 'fd");
+              break;
+            case CMP_EQ:
+              Format(instr, "cmp.eq.D    'ft, 'fs, 'fd");
+              break;
+            case CMP_UEQ:
+              Format(instr, "cmp.ueq.D   'ft, 'fs, 'fd");
+              break;
+            case CMP_LT:
+              Format(instr, "cmp.lt.D    'ft, 'fs, 'fd");
+              break;
+            case CMP_ULT:
+              Format(instr, "cmp.ult.D   'ft, 'fs, 'fd");
+              break;
+            case CMP_LE:
+              Format(instr, "cmp.le.D    'ft, 'fs, 'fd");
+              break;
+            case CMP_ULE:
+              Format(instr, "cmp.ule.D   'ft, 'fs, 'fd");
+              break;
+            case CMP_OR:
+              Format(instr, "cmp.or.D    'ft, 'fs, 'fd");
+              break;
+            case CMP_UNE:
+              Format(instr, "cmp.une.D   'ft, 'fs, 'fd");
+              break;
+            case CMP_NE:
+              Format(instr, "cmp.ne.D    'ft, 'fs, 'fd");
+              break;
+            default:
+              UNREACHABLE();
+          }
+          break;
+        case S:
+          switch (instr->FunctionValue()) {
+            case SEL:
+              Format(instr, "sel.S    'ft, 'fs, 'fd");
+              break;
+            case SELEQZ_C:
+              Format(instr, "seleqz.S 'ft, 'fs, 'fd");
+              break;
+            case SELNEZ_C:
+              Format(instr, "selnez.S 'ft, 'fs, 'fd");
+              break;
+            case MIN:
+              Format(instr, "min.S    'ft, 'fs, 'fd");
+              break;
+            case MINA:
+              Format(instr, "mina.S   'ft, 'fs, 'fd");
+              break;
+            case MAX:
+              Format(instr, "max.S    'ft, 'fs, 'fd");
+              break;
+            case MAXA:
+              Format(instr, "maxa.S   'ft, 'fs, 'fd");
+              break;
+            default:
+              UNREACHABLE();
+          }
+          break;
+        case D:
+          switch (instr->FunctionValue()) {
+            case SEL:
+              Format(instr, "sel.D    'ft, 'fs, 'fd");
+              break;
+            case SELEQZ_C:
+              Format(instr, "seleqz.D 'ft, 'fs, 'fd");
+              break;
+            case SELNEZ_C:
+              Format(instr, "selnez.D 'ft, 'fs, 'fd");
+              break;
+            case MIN:
+              Format(instr, "min.D    'ft, 'fs, 'fd");
+              break;
+            case MINA:
+              Format(instr, "mina.D   'ft, 'fs, 'fd");
+              break;
+            case MAX:
+              Format(instr, "max.D    'ft, 'fs, 'fd");
+              break;
+            case MAXA:
+              Format(instr, "maxa.D   'ft, 'fs, 'fd");
+              break;
+            default:
+              UNREACHABLE();
+          }
+          break;
+        default:
+          UNREACHABLE();
+      }
+
+      break;  // Case COP1.
+    // ------------- REGIMM class.
+    case REGIMM:
+      switch (instr->RtFieldRaw()) {
+        case BLTZ:
+          Format(instr, "bltz    'rs, 'imm16u");
+          break;
+        case BLTZAL:
+          Format(instr, "bltzal  'rs, 'imm16u");
+          break;
+        case BGEZ:
+          Format(instr, "bgez    'rs, 'imm16u");
+          break;
+        case BGEZAL:
+          Format(instr, "bgezal  'rs, 'imm16u");
+          break;
+        case BGEZALL:
+          Format(instr, "bgezall 'rs, 'imm16u");
+          break;
+        case DAHI:
+          Format(instr, "dahi    'rs, 'imm16u");
+          break;
+        case DATI:
+          Format(instr, "dati    'rs, 'imm16u");
+          break;
+        default:
+          UNREACHABLE();
+      }
+    break;  // Case REGIMM.
+    // ------------- Branch instructions.
+    case BEQ:
+      Format(instr, "beq     'rs, 'rt, 'imm16u");
+      break;
+    case BNE:
+      Format(instr, "bne     'rs, 'rt, 'imm16u");
+      break;
+    case BLEZ:
+      if ((instr->RtFieldRaw() == 0)
+          && (instr->RsFieldRaw() != 0)) {
+        Format(instr, "blez    'rs, 'imm16u");
+      } else if ((instr->RtFieldRaw() != instr->RsFieldRaw())
+          && (instr->RsFieldRaw() != 0) && (instr->RtFieldRaw() != 0)) {
+        Format(instr, "bgeuc    'rs, 'rt, 'imm16u");
+      } else if ((instr->RtFieldRaw() == instr->RsFieldRaw())
+          && (instr->RtFieldRaw() != 0)) {
+        Format(instr, "bgezalc  'rs, 'imm16u");
+      } else if ((instr->RsFieldRaw() == 0)
+          && (instr->RtFieldRaw() != 0)) {
+        Format(instr, "blezalc  'rs, 'imm16u");
+      } else {
+        UNREACHABLE();
+      }
+      break;
+    case BGTZ:
+      if ((instr->RtFieldRaw() == 0)
+          && (instr->RsFieldRaw() != 0)) {
+        Format(instr, "bgtz    'rs, 'imm16u");
+      } else if ((instr->RtFieldRaw() != instr->RsFieldRaw())
+          && (instr->RsFieldRaw() != 0) && (instr->RtFieldRaw() != 0)) {
+        Format(instr, "bltuc   'rs, 'rt, 'imm16u");
+      } else if ((instr->RtFieldRaw() == instr->RsFieldRaw())
+          && (instr->RtFieldRaw() != 0)) {
+        Format(instr, "bltzalc 'rt, 'imm16u");
+      } else if ((instr->RsFieldRaw() == 0)
+          && (instr->RtFieldRaw() != 0)) {
+        Format(instr, "bgtzalc 'rt, 'imm16u");
+      } else {
+        UNREACHABLE();
+      }
+      break;
+    case BLEZL:
+      if ((instr->RtFieldRaw() == instr->RsFieldRaw())
+          && (instr->RtFieldRaw() != 0)) {
+        Format(instr, "bgezc    'rt, 'imm16u");
+      } else if ((instr->RtFieldRaw() != instr->RsFieldRaw())
+          && (instr->RsFieldRaw() != 0) && (instr->RtFieldRaw() != 0)) {
+        Format(instr, "bgec     'rs, 'rt, 'imm16u");
+      } else if ((instr->RsFieldRaw() == 0)
+          && (instr->RtFieldRaw() != 0)) {
+        Format(instr, "blezc    'rt, 'imm16u");
+      } else {
+        UNREACHABLE();
+      }
+      break;
+    case BGTZL:
+      if ((instr->RtFieldRaw() == instr->RsFieldRaw())
+          && (instr->RtFieldRaw() != 0)) {
+        Format(instr, "bltzc    'rt, 'imm16u");
+      } else if ((instr->RtFieldRaw() != instr->RsFieldRaw())
+          && (instr->RsFieldRaw() != 0) && (instr->RtFieldRaw() != 0)) {
+        Format(instr, "bltc     'rs, 'rt, 'imm16u");
+      } else if ((instr->RsFieldRaw() == 0)
+          && (instr->RtFieldRaw() != 0)) {
+        Format(instr, "bgtzc    'rt, 'imm16u");
+      } else {
+        UNREACHABLE();
+      }
+      break;
+    case BEQZC:
+      if (instr->RsFieldRaw() != 0) {
+        Format(instr, "beqzc   'rs, 'imm21x");
+      }
+      break;
+    case BNEZC:
+      if (instr->RsFieldRaw() != 0) {
+        Format(instr, "bnezc   'rs, 'imm21x");
+      }
+      break;
+    // ------------- Arithmetic instructions.
+    case ADDI:
+      if (kArchVariant != kMips64r6) {
+        Format(instr, "addi    'rt, 'rs, 'imm16s");
+      } else {
+        // Check if BOVC or BEQC instruction.
+        if (instr->RsFieldRaw() >= instr->RtFieldRaw()) {
+          Format(instr, "bovc  'rs, 'rt, 'imm16s");
+        } else if (instr->RsFieldRaw() < instr->RtFieldRaw()) {
+          Format(instr, "beqc  'rs, 'rt, 'imm16s");
+        } else {
+          UNREACHABLE();
+        }
+      }
+      break;
+    case DADDI:
+      if (kArchVariant != kMips64r6) {
+        Format(instr, "daddi   'rt, 'rs, 'imm16s");
+      } else {
+        // Check if BNVC or BNEC instruction.
+        if (instr->RsFieldRaw() >= instr->RtFieldRaw()) {
+          Format(instr, "bnvc  'rs, 'rt, 'imm16s");
+        } else if (instr->RsFieldRaw() < instr->RtFieldRaw()) {
+          Format(instr, "bnec  'rs, 'rt, 'imm16s");
+        } else {
+          UNREACHABLE();
+        }
+      }
+      break;
+    case ADDIU:
+      Format(instr, "addiu   'rt, 'rs, 'imm16s");
+      break;
+    case DADDIU:
+      Format(instr, "daddiu  'rt, 'rs, 'imm16s");
+      break;
+    case SLTI:
+      Format(instr, "slti    'rt, 'rs, 'imm16s");
+      break;
+    case SLTIU:
+      Format(instr, "sltiu   'rt, 'rs, 'imm16u");
+      break;
+    case ANDI:
+      Format(instr, "andi    'rt, 'rs, 'imm16x");
+      break;
+    case ORI:
+      Format(instr, "ori     'rt, 'rs, 'imm16x");
+      break;
+    case XORI:
+      Format(instr, "xori    'rt, 'rs, 'imm16x");
+      break;
+    case LUI:
+      if (kArchVariant != kMips64r6) {
+        Format(instr, "lui     'rt, 'imm16x");
+      } else {
+        if (instr->RsValue() != 0) {
+          Format(instr, "aui     'rt, 'imm16x");
+        } else {
+          Format(instr, "lui     'rt, 'imm16x");
+        }
+      }
+      break;
+    case DAUI:
+      Format(instr, "daui    'rt, 'imm16x");
+      break;
+    // ------------- Memory instructions.
+    case LB:
+      Format(instr, "lb      'rt, 'imm16s('rs)");
+      break;
+    case LH:
+      Format(instr, "lh      'rt, 'imm16s('rs)");
+      break;
+    case LWL:
+      Format(instr, "lwl     'rt, 'imm16s('rs)");
+      break;
+    case LDL:
+      Format(instr, "ldl     'rt, 'imm16s('rs)");
+      break;
+    case LW:
+      Format(instr, "lw      'rt, 'imm16s('rs)");
+      break;
+    case LWU:
+      Format(instr, "lwu     'rt, 'imm16s('rs)");
+      break;
+    case LD:
+      Format(instr, "ld      'rt, 'imm16s('rs)");
+      break;
+    case LBU:
+      Format(instr, "lbu     'rt, 'imm16s('rs)");
+      break;
+    case LHU:
+      Format(instr, "lhu     'rt, 'imm16s('rs)");
+      break;
+    case LWR:
+      Format(instr, "lwr     'rt, 'imm16s('rs)");
+      break;
+    case LDR:
+      Format(instr, "ldr     'rt, 'imm16s('rs)");
+      break;
+    case PREF:
+      Format(instr, "pref    'rt, 'imm16s('rs)");
+      break;
+    case SB:
+      Format(instr, "sb      'rt, 'imm16s('rs)");
+      break;
+    case SH:
+      Format(instr, "sh      'rt, 'imm16s('rs)");
+      break;
+    case SWL:
+      Format(instr, "swl     'rt, 'imm16s('rs)");
+      break;
+    case SW:
+      Format(instr, "sw      'rt, 'imm16s('rs)");
+      break;
+    case SD:
+      Format(instr, "sd      'rt, 'imm16s('rs)");
+      break;
+    case SWR:
+      Format(instr, "swr     'rt, 'imm16s('rs)");
+      break;
+    case LWC1:
+      Format(instr, "lwc1    'ft, 'imm16s('rs)");
+      break;
+    case LDC1:
+      Format(instr, "ldc1    'ft, 'imm16s('rs)");
+      break;
+    case SWC1:
+      Format(instr, "swc1    'ft, 'imm16s('rs)");
+      break;
+    case SDC1:
+      Format(instr, "sdc1    'ft, 'imm16s('rs)");
+      break;
+    default:
+      printf("a 0x%x \n", instr->OpcodeFieldRaw());
+    UNREACHABLE();
+      break;
+  }
+}
+
+
+void Decoder::DecodeTypeJump(Instruction* instr) {
+  switch (instr->OpcodeFieldRaw()) {
+    case J:
+      Format(instr, "j       'imm26x");
+      break;
+    case JAL:
+      Format(instr, "jal     'imm26x");
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+// Disassemble the instruction at *instr_ptr into the output buffer.
+// All instructions are one word long, except for the simulator
+// psuedo-instruction stop(msg). For that one special case, we return
+// size larger than one kInstrSize.
+int Decoder::InstructionDecode(byte* instr_ptr) {
+  Instruction* instr = Instruction::At(instr_ptr);
+  // Print raw instruction bytes.
+  out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_,
+                              "%08x       ",
+                              instr->InstructionBits());
+  switch (instr->InstructionType()) {
+    case Instruction::kRegisterType: {
+      return DecodeTypeRegister(instr);
+    }
+    case Instruction::kImmediateType: {
+      DecodeTypeImmediate(instr);
+      break;
+    }
+    case Instruction::kJumpType: {
+      DecodeTypeJump(instr);
+      break;
+    }
+    default: {
+      Format(instr, "UNSUPPORTED");
+      UNSUPPORTED_MIPS();
+    }
+  }
+  return Instruction::kInstrSize;
+}
+
+
+} }  // namespace v8::internal
+
+
+
+//------------------------------------------------------------------------------
+
+namespace disasm {
+
+const char* NameConverter::NameOfAddress(byte* addr) const {
+  v8::internal::SNPrintF(tmp_buffer_, "%p", addr);
+  return tmp_buffer_.start();
+}
+
+
+const char* NameConverter::NameOfConstant(byte* addr) const {
+  return NameOfAddress(addr);
+}
+
+
+const char* NameConverter::NameOfCPURegister(int reg) const {
+  return v8::internal::Registers::Name(reg);
+}
+
+
+const char* NameConverter::NameOfXMMRegister(int reg) const {
+  return v8::internal::FPURegisters::Name(reg);
+}
+
+
+const char* NameConverter::NameOfByteCPURegister(int reg) const {
+  UNREACHABLE();  // MIPS does not have the concept of a byte register.
+  return "nobytereg";
+}
+
+
+const char* NameConverter::NameInCode(byte* addr) const {
+  // The default name converter is called for unknown code. So we will not try
+  // to access any memory.
+  return "";
+}
+
+
+//------------------------------------------------------------------------------
+
+Disassembler::Disassembler(const NameConverter& converter)
+    : converter_(converter) {}
+
+
+Disassembler::~Disassembler() {}
+
+
+int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
+                                    byte* instruction) {
+  v8::internal::Decoder d(converter_, buffer);
+  return d.InstructionDecode(instruction);
+}
+
+
+// The MIPS assembler does not currently use constant pools.
+int Disassembler::ConstantPoolSizeAt(byte* instruction) {
+  return -1;
+}
+
+
+void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) {
+  NameConverter converter;
+  Disassembler d(converter);
+  for (byte* pc = begin; pc < end;) {
+    v8::internal::EmbeddedVector<char, 128> buffer;
+    buffer[0] = '\0';
+    byte* prev_pc = pc;
+    pc += d.InstructionDecode(buffer, pc);
+    v8::internal::PrintF(f, "%p    %08x      %s\n",
+        prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer.start());
+  }
+}
+
+
+#undef UNSUPPORTED
+
+}  // namespace disasm
+
+#endif  // V8_TARGET_ARCH_MIPS64
diff --git a/src/mips64/frames-mips64.cc b/src/mips64/frames-mips64.cc
new file mode 100644
index 0000000..2991248
--- /dev/null
+++ b/src/mips64/frames-mips64.cc
@@ -0,0 +1,43 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_MIPS64
+
+#include "src/assembler.h"
+#include "src/frames.h"
+#include "src/mips64/assembler-mips64-inl.h"
+#include "src/mips64/assembler-mips64.h"
+
+namespace v8 {
+namespace internal {
+
+
+Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
+Register JavaScriptFrame::context_register() { return cp; }
+Register JavaScriptFrame::constant_pool_pointer_register() {
+  UNREACHABLE();
+  return no_reg;
+}
+
+
+Register StubFailureTrampolineFrame::fp_register() { return v8::internal::fp; }
+Register StubFailureTrampolineFrame::context_register() { return cp; }
+Register StubFailureTrampolineFrame::constant_pool_pointer_register() {
+  UNREACHABLE();
+  return no_reg;
+}
+
+
+Object*& ExitFrame::constant_pool_slot() const {
+  UNREACHABLE();
+  return Memory::Object_at(NULL);
+}
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_MIPS64
diff --git a/src/mips64/frames-mips64.h b/src/mips64/frames-mips64.h
new file mode 100644
index 0000000..eaf29c8
--- /dev/null
+++ b/src/mips64/frames-mips64.h
@@ -0,0 +1,215 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+
+#ifndef V8_MIPS_FRAMES_MIPS_H_
+#define V8_MIPS_FRAMES_MIPS_H_
+
+namespace v8 {
+namespace internal {
+
+// Register lists.
+// Note that the bit values must match those used in actual instruction
+// encoding.
+const int kNumRegs = 32;
+
+const RegList kJSCallerSaved =
+  1 << 2  |  // v0
+  1 << 3  |  // v1
+  1 << 4  |  // a0
+  1 << 5  |  // a1
+  1 << 6  |  // a2
+  1 << 7  |  // a3
+  1 << 8  |  // a4
+  1 << 9  |  // a5
+  1 << 10 |  // a6
+  1 << 11 |  // a7
+  1 << 12 |  // t0
+  1 << 13 |  // t1
+  1 << 14 |  // t2
+  1 << 15;   // t3
+
+const int kNumJSCallerSaved = 14;
+
+
+// Return the code of the n-th caller-saved register available to JavaScript
+// e.g. JSCallerSavedReg(0) returns a0.code() == 4.
+int JSCallerSavedCode(int n);
+
+
+// Callee-saved registers preserved when switching from C to JavaScript.
+const RegList kCalleeSaved =
+  1 << 16 |  // s0
+  1 << 17 |  // s1
+  1 << 18 |  // s2
+  1 << 19 |  // s3
+  1 << 20 |  // s4
+  1 << 21 |  // s5
+  1 << 22 |  // s6 (roots in Javascript code)
+  1 << 23 |  // s7 (cp in Javascript code)
+  1 << 30;   // fp/s8
+
+const int kNumCalleeSaved = 9;
+
+const RegList kCalleeSavedFPU =
+  1 << 20 |  // f20
+  1 << 22 |  // f22
+  1 << 24 |  // f24
+  1 << 26 |  // f26
+  1 << 28 |  // f28
+  1 << 30;   // f30
+
+const int kNumCalleeSavedFPU = 6;
+
+const RegList kCallerSavedFPU =
+  1 << 0  |  // f0
+  1 << 2  |  // f2
+  1 << 4  |  // f4
+  1 << 6  |  // f6
+  1 << 8  |  // f8
+  1 << 10 |  // f10
+  1 << 12 |  // f12
+  1 << 14 |  // f14
+  1 << 16 |  // f16
+  1 << 18;   // f18
+
+
+// Number of registers for which space is reserved in safepoints. Must be a
+// multiple of 8.
+const int kNumSafepointRegisters = 24;
+
+// Define the list of registers actually saved at safepoints.
+// Note that the number of saved registers may be smaller than the reserved
+// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
+const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
+const int kNumSafepointSavedRegisters =
+    kNumJSCallerSaved + kNumCalleeSaved;
+
+const int kUndefIndex = -1;
+// Map with indexes on stack that corresponds to codes of saved registers.
+const int kSafepointRegisterStackIndexMap[kNumRegs] = {
+  kUndefIndex,  // zero_reg
+  kUndefIndex,  // at
+  0,   // v0
+  1,   // v1
+  2,   // a0
+  3,   // a1
+  4,   // a2
+  5,   // a3
+  6,   // a4
+  7,   // a5
+  8,   // a6
+  9,   // a7
+  10,  // t0
+  11,  // t1
+  12,  // t2
+  13,  // t3
+  14,  // s0
+  15,  // s1
+  16,  // s2
+  17,  // s3
+  18,  // s4
+  19,  // s5
+  20,  // s6
+  21,  // s7
+  kUndefIndex,  // t8
+  kUndefIndex,  // t9
+  kUndefIndex,  // k0
+  kUndefIndex,  // k1
+  kUndefIndex,  // gp
+  kUndefIndex,  // sp
+  22,  // fp
+  kUndefIndex
+};
+
+
+// ----------------------------------------------------
+
+class EntryFrameConstants : public AllStatic {
+ public:
+  static const int kCallerFPOffset =
+      -(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
+};
+
+
+class ExitFrameConstants : public AllStatic {
+ public:
+  static const int kFrameSize = 2 * kPointerSize;
+
+  static const int kCodeOffset = -2 * kPointerSize;
+  static const int kSPOffset = -1 * kPointerSize;
+
+  // The caller fields are below the frame pointer on the stack.
+  static const int kCallerFPOffset = +0 * kPointerSize;
+  // The calling JS function is between FP and PC.
+  static const int kCallerPCOffset = +1 * kPointerSize;
+
+  // MIPS-specific: a pointer to the old sp to avoid unnecessary calculations.
+  static const int kCallerSPOffset = +2 * kPointerSize;
+
+  // FP-relative displacement of the caller's SP.
+  static const int kCallerSPDisplacement = +2 * kPointerSize;
+
+  static const int kConstantPoolOffset = 0;  // Not used.
+};
+
+
+class JavaScriptFrameConstants : public AllStatic {
+ public:
+  // FP-relative.
+  static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
+  static const int kLastParameterOffset = +2 * kPointerSize;
+  static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
+
+  // Caller SP-relative.
+  static const int kParam0Offset   = -2 * kPointerSize;
+  static const int kReceiverOffset = -1 * kPointerSize;
+};
+
+
+class ArgumentsAdaptorFrameConstants : public AllStatic {
+ public:
+  // FP-relative.
+  static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
+
+  static const int kFrameSize =
+      StandardFrameConstants::kFixedFrameSize + kPointerSize;
+};
+
+
+class ConstructFrameConstants : public AllStatic {
+ public:
+  // FP-relative.
+  static const int kImplicitReceiverOffset = -6 * kPointerSize;
+  static const int kConstructorOffset      = -5 * kPointerSize;
+  static const int kLengthOffset           = -4 * kPointerSize;
+  static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
+
+  static const int kFrameSize =
+      StandardFrameConstants::kFixedFrameSize + 4 * kPointerSize;
+};
+
+
+class InternalFrameConstants : public AllStatic {
+ public:
+  // FP-relative.
+  static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
+};
+
+
+inline Object* JavaScriptFrame::function_slot_object() const {
+  const int offset = JavaScriptFrameConstants::kFunctionOffset;
+  return Memory::Object_at(fp() + offset);
+}
+
+
+inline void StackHandler::SetFp(Address slot, Address fp) {
+  Memory::Address_at(slot) = fp;
+}
+
+
+} }  // namespace v8::internal
+
+#endif
diff --git a/src/mips64/full-codegen-mips64.cc b/src/mips64/full-codegen-mips64.cc
new file mode 100644
index 0000000..8f8c376
--- /dev/null
+++ b/src/mips64/full-codegen-mips64.cc
@@ -0,0 +1,4977 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_MIPS64
+
+// Note on Mips implementation:
+//
+// The result_register() for mips is the 'v0' register, which is defined
+// by the ABI to contain function return values. However, the first
+// parameter to a function is defined to be 'a0'. So there are many
+// places where we have to move a previous result in v0 to a0 for the
+// next call: mov(a0, v0). This is not needed on the other architectures.
+
+#include "src/code-factory.h"
+#include "src/code-stubs.h"
+#include "src/codegen.h"
+#include "src/compiler.h"
+#include "src/debug.h"
+#include "src/full-codegen.h"
+#include "src/ic/ic.h"
+#include "src/isolate-inl.h"
+#include "src/parser.h"
+#include "src/scopes.h"
+
+#include "src/mips64/code-stubs-mips64.h"
+#include "src/mips64/macro-assembler-mips64.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm_)
+
+
+// A patch site is a location in the code which it is possible to patch. This
+// class has a number of methods to emit the code which is patchable and the
+// method EmitPatchInfo to record a marker back to the patchable code. This
+// marker is a andi zero_reg, rx, #yyyy instruction, and rx * 0x0000ffff + yyyy
+// (raw 16 bit immediate value is used) is the delta from the pc to the first
+// instruction of the patchable code.
+// The marker instruction is effectively a NOP (dest is zero_reg) and will
+// never be emitted by normal code.
+class JumpPatchSite BASE_EMBEDDED {
+ public:
+  explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
+#ifdef DEBUG
+    info_emitted_ = false;
+#endif
+  }
+
+  ~JumpPatchSite() {
+    DCHECK(patch_site_.is_bound() == info_emitted_);
+  }
+
+  // When initially emitting this ensure that a jump is always generated to skip
+  // the inlined smi code.
+  void EmitJumpIfNotSmi(Register reg, Label* target) {
+    DCHECK(!patch_site_.is_bound() && !info_emitted_);
+    Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+    __ bind(&patch_site_);
+    __ andi(at, reg, 0);
+    // Always taken before patched.
+    __ BranchShort(target, eq, at, Operand(zero_reg));
+  }
+
+  // When initially emitting this ensure that a jump is never generated to skip
+  // the inlined smi code.
+  void EmitJumpIfSmi(Register reg, Label* target) {
+    Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+    DCHECK(!patch_site_.is_bound() && !info_emitted_);
+    __ bind(&patch_site_);
+    __ andi(at, reg, 0);
+    // Never taken before patched.
+    __ BranchShort(target, ne, at, Operand(zero_reg));
+  }
+
+  void EmitPatchInfo() {
+    if (patch_site_.is_bound()) {
+      int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_);
+      Register reg = Register::from_code(delta_to_patch_site / kImm16Mask);
+      __ andi(zero_reg, reg, delta_to_patch_site % kImm16Mask);
+#ifdef DEBUG
+      info_emitted_ = true;
+#endif
+    } else {
+      __ nop();  // Signals no inlined code.
+    }
+  }
+
+ private:
+  MacroAssembler* masm_;
+  Label patch_site_;
+#ifdef DEBUG
+  bool info_emitted_;
+#endif
+};
+
+
+// Generate code for a JS function.  On entry to the function the receiver
+// and arguments have been pushed on the stack left to right.  The actual
+// argument count matches the formal parameter count expected by the
+// function.
+//
+// The live registers are:
+//   o a1: the JS function object being called (i.e. ourselves)
+//   o cp: our context
+//   o fp: our caller's frame pointer
+//   o sp: stack pointer
+//   o ra: return address
+//
+// The function builds a JS frame.  Please see JavaScriptFrameConstants in
+// frames-mips.h for its layout.
+void FullCodeGenerator::Generate() {
+  CompilationInfo* info = info_;
+  handler_table_ =
+      isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
+
+  profiling_counter_ = isolate()->factory()->NewCell(
+      Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
+  SetFunctionPosition(function());
+  Comment cmnt(masm_, "[ function compiled by full code generator");
+
+  ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+
+#ifdef DEBUG
+  if (strlen(FLAG_stop_at) > 0 &&
+      info->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
+    __ stop("stop-at");
+  }
+#endif
+
+  // Sloppy mode functions and builtins need to replace the receiver with the
+  // global proxy when called as functions (without an explicit receiver
+  // object).
+  if (info->strict_mode() == SLOPPY && !info->is_native()) {
+    Label ok;
+    int receiver_offset = info->scope()->num_parameters() * kPointerSize;
+    __ ld(at, MemOperand(sp, receiver_offset));
+    __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+    __ Branch(&ok, ne, a2, Operand(at));
+
+    __ ld(a2, GlobalObjectOperand());
+    __ ld(a2, FieldMemOperand(a2, GlobalObject::kGlobalProxyOffset));
+
+    __ sd(a2, MemOperand(sp, receiver_offset));
+    __ bind(&ok);
+  }
+  // Open a frame scope to indicate that there is a frame on the stack.  The
+  // MANUAL indicates that the scope shouldn't actually generate code to set up
+  // the frame (that is done below).
+  FrameScope frame_scope(masm_, StackFrame::MANUAL);
+  info->set_prologue_offset(masm_->pc_offset());
+  __ Prologue(info->IsCodePreAgingActive());
+  info->AddNoFrameRange(0, masm_->pc_offset());
+
+  { Comment cmnt(masm_, "[ Allocate locals");
+    int locals_count = info->scope()->num_stack_slots();
+    // Generators allocate locals, if any, in context slots.
+    DCHECK(!info->function()->is_generator() || locals_count == 0);
+    if (locals_count > 0) {
+      if (locals_count >= 128) {
+        Label ok;
+        __ Dsubu(t1, sp, Operand(locals_count * kPointerSize));
+        __ LoadRoot(a2, Heap::kRealStackLimitRootIndex);
+        __ Branch(&ok, hs, t1, Operand(a2));
+        __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+        __ bind(&ok);
+      }
+      __ LoadRoot(t1, Heap::kUndefinedValueRootIndex);
+      int kMaxPushes = FLAG_optimize_for_size ? 4 : 32;
+      if (locals_count >= kMaxPushes) {
+        int loop_iterations = locals_count / kMaxPushes;
+        __ li(a2, Operand(loop_iterations));
+        Label loop_header;
+        __ bind(&loop_header);
+        // Do pushes.
+        __ Dsubu(sp, sp, Operand(kMaxPushes * kPointerSize));
+        for (int i = 0; i < kMaxPushes; i++) {
+          __ sd(t1, MemOperand(sp, i * kPointerSize));
+        }
+        // Continue loop if not done.
+        __ Dsubu(a2, a2, Operand(1));
+        __ Branch(&loop_header, ne, a2, Operand(zero_reg));
+      }
+      int remaining = locals_count % kMaxPushes;
+      // Emit the remaining pushes.
+      __ Dsubu(sp, sp, Operand(remaining * kPointerSize));
+      for (int i  = 0; i < remaining; i++) {
+        __ sd(t1, MemOperand(sp, i * kPointerSize));
+      }
+    }
+  }
+
+  bool function_in_register = true;
+
+  // Possibly allocate a local context.
+  int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+  if (heap_slots > 0) {
+    Comment cmnt(masm_, "[ Allocate context");
+    // Argument to NewContext is the function, which is still in a1.
+    bool need_write_barrier = true;
+    if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
+      __ push(a1);
+      __ Push(info->scope()->GetScopeInfo());
+      __ CallRuntime(Runtime::kNewGlobalContext, 2);
+    } else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+      FastNewContextStub stub(isolate(), heap_slots);
+      __ CallStub(&stub);
+      // Result of FastNewContextStub is always in new space.
+      need_write_barrier = false;
+    } else {
+      __ push(a1);
+      __ CallRuntime(Runtime::kNewFunctionContext, 1);
+    }
+    function_in_register = false;
+    // Context is returned in v0. It replaces the context passed to us.
+    // It's saved in the stack and kept live in cp.
+    __ mov(cp, v0);
+    __ sd(v0, MemOperand(fp, StandardFrameConstants::kContextOffset));
+    // Copy any necessary parameters into the context.
+    int num_parameters = info->scope()->num_parameters();
+    for (int i = 0; i < num_parameters; i++) {
+      Variable* var = scope()->parameter(i);
+      if (var->IsContextSlot()) {
+        int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+                                 (num_parameters - 1 - i) * kPointerSize;
+        // Load parameter from stack.
+        __ ld(a0, MemOperand(fp, parameter_offset));
+        // Store it in the context.
+        MemOperand target = ContextOperand(cp, var->index());
+        __ sd(a0, target);
+
+        // Update the write barrier.
+        if (need_write_barrier) {
+          __ RecordWriteContextSlot(
+              cp, target.offset(), a0, a3, kRAHasBeenSaved, kDontSaveFPRegs);
+        } else if (FLAG_debug_code) {
+          Label done;
+          __ JumpIfInNewSpace(cp, a0, &done);
+          __ Abort(kExpectedNewSpaceObject);
+          __ bind(&done);
+        }
+      }
+    }
+  }
+  Variable* arguments = scope()->arguments();
+  if (arguments != NULL) {
+    // Function uses arguments object.
+    Comment cmnt(masm_, "[ Allocate arguments object");
+    if (!function_in_register) {
+      // Load this again, if it's used by the local context below.
+      __ ld(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+    } else {
+      __ mov(a3, a1);
+    }
+    // Receiver is just before the parameters on the caller's stack.
+    int num_parameters = info->scope()->num_parameters();
+    int offset = num_parameters * kPointerSize;
+    __ Daddu(a2, fp,
+           Operand(StandardFrameConstants::kCallerSPOffset + offset));
+    __ li(a1, Operand(Smi::FromInt(num_parameters)));
+    __ Push(a3, a2, a1);
+
+    // Arguments to ArgumentsAccessStub:
+    //   function, receiver address, parameter count.
+    // The stub will rewrite receiever and parameter count if the previous
+    // stack frame was an arguments adapter frame.
+    ArgumentsAccessStub::Type type;
+    if (strict_mode() == STRICT) {
+      type = ArgumentsAccessStub::NEW_STRICT;
+    } else if (function()->has_duplicate_parameters()) {
+      type = ArgumentsAccessStub::NEW_SLOPPY_SLOW;
+    } else {
+      type = ArgumentsAccessStub::NEW_SLOPPY_FAST;
+    }
+    ArgumentsAccessStub stub(isolate(), type);
+    __ CallStub(&stub);
+
+    SetVar(arguments, v0, a1, a2);
+  }
+
+  if (FLAG_trace) {
+    __ CallRuntime(Runtime::kTraceEnter, 0);
+  }
+  // Visit the declarations and body unless there is an illegal
+  // redeclaration.
+  if (scope()->HasIllegalRedeclaration()) {
+    Comment cmnt(masm_, "[ Declarations");
+    scope()->VisitIllegalRedeclaration(this);
+
+  } else {
+    PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
+    { Comment cmnt(masm_, "[ Declarations");
+      // For named function expressions, declare the function name as a
+      // constant.
+      if (scope()->is_function_scope() && scope()->function() != NULL) {
+        VariableDeclaration* function = scope()->function();
+        DCHECK(function->proxy()->var()->mode() == CONST ||
+               function->proxy()->var()->mode() == CONST_LEGACY);
+        DCHECK(function->proxy()->var()->location() != Variable::UNALLOCATED);
+        VisitVariableDeclaration(function);
+      }
+      VisitDeclarations(scope()->declarations());
+    }
+    { Comment cmnt(masm_, "[ Stack check");
+      PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
+      Label ok;
+      __ LoadRoot(at, Heap::kStackLimitRootIndex);
+      __ Branch(&ok, hs, sp, Operand(at));
+      Handle<Code> stack_check = isolate()->builtins()->StackCheck();
+      PredictableCodeSizeScope predictable(masm_,
+          masm_->CallSize(stack_check, RelocInfo::CODE_TARGET));
+      __ Call(stack_check, RelocInfo::CODE_TARGET);
+      __ bind(&ok);
+    }
+
+    { Comment cmnt(masm_, "[ Body");
+      DCHECK(loop_depth() == 0);
+
+      VisitStatements(function()->body());
+
+      DCHECK(loop_depth() == 0);
+    }
+  }
+
+  // Always emit a 'return undefined' in case control fell off the end of
+  // the body.
+  { Comment cmnt(masm_, "[ return <undefined>;");
+    __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
+  }
+  EmitReturnSequence();
+}
+
+
+void FullCodeGenerator::ClearAccumulator() {
+  DCHECK(Smi::FromInt(0) == 0);
+  __ mov(v0, zero_reg);
+}
+
+
+void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
+  __ li(a2, Operand(profiling_counter_));
+  __ ld(a3, FieldMemOperand(a2, Cell::kValueOffset));
+  __ Dsubu(a3, a3, Operand(Smi::FromInt(delta)));
+  __ sd(a3, FieldMemOperand(a2, Cell::kValueOffset));
+}
+
+
+void FullCodeGenerator::EmitProfilingCounterReset() {
+  int reset_value = FLAG_interrupt_budget;
+  if (info_->is_debug()) {
+    // Detect debug break requests as soon as possible.
+    reset_value = FLAG_interrupt_budget >> 4;
+  }
+  __ li(a2, Operand(profiling_counter_));
+  __ li(a3, Operand(Smi::FromInt(reset_value)));
+  __ sd(a3, FieldMemOperand(a2, Cell::kValueOffset));
+}
+
+
+void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
+                                                Label* back_edge_target) {
+  // The generated code is used in Deoptimizer::PatchStackCheckCodeAt so we need
+  // to make sure it is constant. Branch may emit a skip-or-jump sequence
+  // instead of the normal Branch. It seems that the "skip" part of that
+  // sequence is about as long as this Branch would be so it is safe to ignore
+  // that.
+  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+  Comment cmnt(masm_, "[ Back edge bookkeeping");
+  Label ok;
+  DCHECK(back_edge_target->is_bound());
+  int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
+  int weight = Min(kMaxBackEdgeWeight,
+                   Max(1, distance / kCodeSizeMultiplier));
+  EmitProfilingCounterDecrement(weight);
+  __ slt(at, a3, zero_reg);
+  __ beq(at, zero_reg, &ok);
+  // Call will emit a li t9 first, so it is safe to use the delay slot.
+  __ Call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
+  // Record a mapping of this PC offset to the OSR id.  This is used to find
+  // the AST id from the unoptimized code in order to use it as a key into
+  // the deoptimization input data found in the optimized code.
+  RecordBackEdge(stmt->OsrEntryId());
+  EmitProfilingCounterReset();
+
+  __ bind(&ok);
+  PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+  // Record a mapping of the OSR id to this PC.  This is used if the OSR
+  // entry becomes the target of a bailout.  We don't expect it to be, but
+  // we want it to work if it is.
+  PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
+}
+
+
+void FullCodeGenerator::EmitReturnSequence() {
+  Comment cmnt(masm_, "[ Return sequence");
+  if (return_label_.is_bound()) {
+    __ Branch(&return_label_);
+  } else {
+    __ bind(&return_label_);
+    if (FLAG_trace) {
+      // Push the return value on the stack as the parameter.
+      // Runtime::TraceExit returns its parameter in v0.
+      __ push(v0);
+      __ CallRuntime(Runtime::kTraceExit, 1);
+    }
+    // Pretend that the exit is a backwards jump to the entry.
+    int weight = 1;
+    if (info_->ShouldSelfOptimize()) {
+      weight = FLAG_interrupt_budget / FLAG_self_opt_count;
+    } else {
+      int distance = masm_->pc_offset();
+      weight = Min(kMaxBackEdgeWeight,
+                   Max(1, distance / kCodeSizeMultiplier));
+    }
+    EmitProfilingCounterDecrement(weight);
+    Label ok;
+    __ Branch(&ok, ge, a3, Operand(zero_reg));
+    __ push(v0);
+    __ Call(isolate()->builtins()->InterruptCheck(),
+            RelocInfo::CODE_TARGET);
+    __ pop(v0);
+    EmitProfilingCounterReset();
+    __ bind(&ok);
+
+#ifdef DEBUG
+    // Add a label for checking the size of the code used for returning.
+    Label check_exit_codesize;
+    masm_->bind(&check_exit_codesize);
+#endif
+    // Make sure that the constant pool is not emitted inside of the return
+    // sequence.
+    { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+      // Here we use masm_-> instead of the __ macro to avoid the code coverage
+      // tool from instrumenting as we rely on the code size here.
+      int32_t sp_delta = (info_->scope()->num_parameters() + 1) * kPointerSize;
+      CodeGenerator::RecordPositions(masm_, function()->end_position() - 1);
+      __ RecordJSReturn();
+      masm_->mov(sp, fp);
+      int no_frame_start = masm_->pc_offset();
+      masm_->MultiPop(static_cast<RegList>(fp.bit() | ra.bit()));
+      masm_->Daddu(sp, sp, Operand(sp_delta));
+      masm_->Jump(ra);
+      info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
+    }
+
+#ifdef DEBUG
+    // Check that the size of the code used for returning is large enough
+    // for the debugger's requirements.
+    DCHECK(Assembler::kJSReturnSequenceInstructions <=
+           masm_->InstructionsGeneratedSince(&check_exit_codesize));
+#endif
+  }
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(Variable* var) const {
+  DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(Variable* var) const {
+  DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+  codegen()->GetVar(result_register(), var);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
+  DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+  codegen()->GetVar(result_register(), var);
+  __ push(result_register());
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Variable* var) const {
+  // For simplicity we always test the accumulator register.
+  codegen()->GetVar(result_register(), var);
+  codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
+  codegen()->DoTest(this);
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(Heap::RootListIndex index) const {
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+    Heap::RootListIndex index) const {
+  __ LoadRoot(result_register(), index);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(
+    Heap::RootListIndex index) const {
+  __ LoadRoot(result_register(), index);
+  __ push(result_register());
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
+  codegen()->PrepareForBailoutBeforeSplit(condition(),
+                                          true,
+                                          true_label_,
+                                          false_label_);
+  if (index == Heap::kUndefinedValueRootIndex ||
+      index == Heap::kNullValueRootIndex ||
+      index == Heap::kFalseValueRootIndex) {
+    if (false_label_ != fall_through_) __ Branch(false_label_);
+  } else if (index == Heap::kTrueValueRootIndex) {
+    if (true_label_ != fall_through_) __ Branch(true_label_);
+  } else {
+    __ LoadRoot(result_register(), index);
+    codegen()->DoTest(this);
+  }
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const {
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+    Handle<Object> lit) const {
+  __ li(result_register(), Operand(lit));
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
+  // Immediates cannot be pushed directly.
+  __ li(result_register(), Operand(lit));
+  __ push(result_register());
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
+  codegen()->PrepareForBailoutBeforeSplit(condition(),
+                                          true,
+                                          true_label_,
+                                          false_label_);
+  DCHECK(!lit->IsUndetectableObject());  // There are no undetectable literals.
+  if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
+    if (false_label_ != fall_through_) __ Branch(false_label_);
+  } else if (lit->IsTrue() || lit->IsJSObject()) {
+    if (true_label_ != fall_through_) __ Branch(true_label_);
+  } else if (lit->IsString()) {
+    if (String::cast(*lit)->length() == 0) {
+      if (false_label_ != fall_through_) __ Branch(false_label_);
+    } else {
+      if (true_label_ != fall_through_) __ Branch(true_label_);
+    }
+  } else if (lit->IsSmi()) {
+    if (Smi::cast(*lit)->value() == 0) {
+      if (false_label_ != fall_through_) __ Branch(false_label_);
+    } else {
+      if (true_label_ != fall_through_) __ Branch(true_label_);
+    }
+  } else {
+    // For simplicity we always test the accumulator register.
+    __ li(result_register(), Operand(lit));
+    codegen()->DoTest(this);
+  }
+}
+
+
+void FullCodeGenerator::EffectContext::DropAndPlug(int count,
+                                                   Register reg) const {
+  DCHECK(count > 0);
+  __ Drop(count);
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
+    int count,
+    Register reg) const {
+  DCHECK(count > 0);
+  __ Drop(count);
+  __ Move(result_register(), reg);
+}
+
+
+void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
+                                                       Register reg) const {
+  DCHECK(count > 0);
+  if (count > 1) __ Drop(count - 1);
+  __ sd(reg, MemOperand(sp, 0));
+}
+
+
+void FullCodeGenerator::TestContext::DropAndPlug(int count,
+                                                 Register reg) const {
+  DCHECK(count > 0);
+  // For simplicity we always test the accumulator register.
+  __ Drop(count);
+  __ Move(result_register(), reg);
+  codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
+  codegen()->DoTest(this);
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
+                                            Label* materialize_false) const {
+  DCHECK(materialize_true == materialize_false);
+  __ bind(materialize_true);
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+    Label* materialize_true,
+    Label* materialize_false) const {
+  Label done;
+  __ bind(materialize_true);
+  __ LoadRoot(result_register(), Heap::kTrueValueRootIndex);
+  __ Branch(&done);
+  __ bind(materialize_false);
+  __ LoadRoot(result_register(), Heap::kFalseValueRootIndex);
+  __ bind(&done);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(
+    Label* materialize_true,
+    Label* materialize_false) const {
+  Label done;
+  __ bind(materialize_true);
+  __ LoadRoot(at, Heap::kTrueValueRootIndex);
+  // Push the value as the following branch can clobber at in long branch mode.
+  __ push(at);
+  __ Branch(&done);
+  __ bind(materialize_false);
+  __ LoadRoot(at, Heap::kFalseValueRootIndex);
+  __ push(at);
+  __ bind(&done);
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
+                                          Label* materialize_false) const {
+  DCHECK(materialize_true == true_label_);
+  DCHECK(materialize_false == false_label_);
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(bool flag) const {
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const {
+  Heap::RootListIndex value_root_index =
+      flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
+  __ LoadRoot(result_register(), value_root_index);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
+  Heap::RootListIndex value_root_index =
+      flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
+  __ LoadRoot(at, value_root_index);
+  __ push(at);
+}
+
+
+void FullCodeGenerator::TestContext::Plug(bool flag) const {
+  codegen()->PrepareForBailoutBeforeSplit(condition(),
+                                          true,
+                                          true_label_,
+                                          false_label_);
+  if (flag) {
+    if (true_label_ != fall_through_) __ Branch(true_label_);
+  } else {
+    if (false_label_ != fall_through_) __ Branch(false_label_);
+  }
+}
+
+
+void FullCodeGenerator::DoTest(Expression* condition,
+                               Label* if_true,
+                               Label* if_false,
+                               Label* fall_through) {
+  __ mov(a0, result_register());
+  Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
+  CallIC(ic, condition->test_id());
+  __ mov(at, zero_reg);
+  Split(ne, v0, Operand(at), if_true, if_false, fall_through);
+}
+
+
+void FullCodeGenerator::Split(Condition cc,
+                              Register lhs,
+                              const Operand&  rhs,
+                              Label* if_true,
+                              Label* if_false,
+                              Label* fall_through) {
+  if (if_false == fall_through) {
+    __ Branch(if_true, cc, lhs, rhs);
+  } else if (if_true == fall_through) {
+    __ Branch(if_false, NegateCondition(cc), lhs, rhs);
+  } else {
+    __ Branch(if_true, cc, lhs, rhs);
+    __ Branch(if_false);
+  }
+}
+
+
+MemOperand FullCodeGenerator::StackOperand(Variable* var) {
+  DCHECK(var->IsStackAllocated());
+  // Offset is negative because higher indexes are at lower addresses.
+  int offset = -var->index() * kPointerSize;
+  // Adjust by a (parameter or local) base offset.
+  if (var->IsParameter()) {
+    offset += (info_->scope()->num_parameters() + 1) * kPointerSize;
+  } else {
+    offset += JavaScriptFrameConstants::kLocal0Offset;
+  }
+  return MemOperand(fp, offset);
+}
+
+
+MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) {
+  DCHECK(var->IsContextSlot() || var->IsStackAllocated());
+  if (var->IsContextSlot()) {
+    int context_chain_length = scope()->ContextChainLength(var->scope());
+    __ LoadContext(scratch, context_chain_length);
+    return ContextOperand(scratch, var->index());
+  } else {
+    return StackOperand(var);
+  }
+}
+
+
+void FullCodeGenerator::GetVar(Register dest, Variable* var) {
+  // Use destination as scratch.
+  MemOperand location = VarOperand(var, dest);
+  __ ld(dest, location);
+}
+
+
+void FullCodeGenerator::SetVar(Variable* var,
+                               Register src,
+                               Register scratch0,
+                               Register scratch1) {
+  DCHECK(var->IsContextSlot() || var->IsStackAllocated());
+  DCHECK(!scratch0.is(src));
+  DCHECK(!scratch0.is(scratch1));
+  DCHECK(!scratch1.is(src));
+  MemOperand location = VarOperand(var, scratch0);
+  __ sd(src, location);
+  // Emit the write barrier code if the location is in the heap.
+  if (var->IsContextSlot()) {
+    __ RecordWriteContextSlot(scratch0,
+                              location.offset(),
+                              src,
+                              scratch1,
+                              kRAHasBeenSaved,
+                              kDontSaveFPRegs);
+  }
+}
+
+
+void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
+                                                     bool should_normalize,
+                                                     Label* if_true,
+                                                     Label* if_false) {
+  // Only prepare for bailouts before splits if we're in a test
+  // context. Otherwise, we let the Visit function deal with the
+  // preparation to avoid preparing with the same AST id twice.
+  if (!context()->IsTest() || !info_->IsOptimizable()) return;
+
+  Label skip;
+  if (should_normalize) __ Branch(&skip);
+  PrepareForBailout(expr, TOS_REG);
+  if (should_normalize) {
+    __ LoadRoot(a4, Heap::kTrueValueRootIndex);
+    Split(eq, a0, Operand(a4), if_true, if_false, NULL);
+    __ bind(&skip);
+  }
+}
+
+
+void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
+  // The variable in the declaration always resides in the current function
+  // context.
+  DCHECK_EQ(0, scope()->ContextChainLength(variable->scope()));
+  if (generate_debug_code_) {
+    // Check that we're not inside a with or catch context.
+    __ ld(a1, FieldMemOperand(cp, HeapObject::kMapOffset));
+    __ LoadRoot(a4, Heap::kWithContextMapRootIndex);
+    __ Check(ne, kDeclarationInWithContext,
+        a1, Operand(a4));
+    __ LoadRoot(a4, Heap::kCatchContextMapRootIndex);
+    __ Check(ne, kDeclarationInCatchContext,
+        a1, Operand(a4));
+  }
+}
+
+
+void FullCodeGenerator::VisitVariableDeclaration(
+    VariableDeclaration* declaration) {
+  // If it was not possible to allocate the variable at compile time, we
+  // need to "declare" it at runtime to make sure it actually exists in the
+  // local context.
+  VariableProxy* proxy = declaration->proxy();
+  VariableMode mode = declaration->mode();
+  Variable* variable = proxy->var();
+  bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
+  switch (variable->location()) {
+    case Variable::UNALLOCATED:
+      globals_->Add(variable->name(), zone());
+      globals_->Add(variable->binding_needs_init()
+                        ? isolate()->factory()->the_hole_value()
+                        : isolate()->factory()->undefined_value(),
+                    zone());
+      break;
+
+    case Variable::PARAMETER:
+    case Variable::LOCAL:
+      if (hole_init) {
+        Comment cmnt(masm_, "[ VariableDeclaration");
+        __ LoadRoot(a4, Heap::kTheHoleValueRootIndex);
+        __ sd(a4, StackOperand(variable));
+      }
+      break;
+
+      case Variable::CONTEXT:
+      if (hole_init) {
+        Comment cmnt(masm_, "[ VariableDeclaration");
+        EmitDebugCheckDeclarationContext(variable);
+          __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+          __ sd(at, ContextOperand(cp, variable->index()));
+          // No write barrier since the_hole_value is in old space.
+          PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+      }
+      break;
+
+    case Variable::LOOKUP: {
+      Comment cmnt(masm_, "[ VariableDeclaration");
+      __ li(a2, Operand(variable->name()));
+      // Declaration nodes are always introduced in one of four modes.
+      DCHECK(IsDeclaredVariableMode(mode));
+      PropertyAttributes attr =
+          IsImmutableVariableMode(mode) ? READ_ONLY : NONE;
+      __ li(a1, Operand(Smi::FromInt(attr)));
+      // Push initial value, if any.
+      // Note: For variables we must not push an initial value (such as
+      // 'undefined') because we may have a (legal) redeclaration and we
+      // must not destroy the current value.
+      if (hole_init) {
+        __ LoadRoot(a0, Heap::kTheHoleValueRootIndex);
+        __ Push(cp, a2, a1, a0);
+      } else {
+        DCHECK(Smi::FromInt(0) == 0);
+        __ mov(a0, zero_reg);  // Smi::FromInt(0) indicates no initial value.
+        __ Push(cp, a2, a1, a0);
+      }
+      __ CallRuntime(Runtime::kDeclareLookupSlot, 4);
+      break;
+    }
+  }
+}
+
+
+void FullCodeGenerator::VisitFunctionDeclaration(
+    FunctionDeclaration* declaration) {
+  VariableProxy* proxy = declaration->proxy();
+  Variable* variable = proxy->var();
+  switch (variable->location()) {
+    case Variable::UNALLOCATED: {
+      globals_->Add(variable->name(), zone());
+      Handle<SharedFunctionInfo> function =
+          Compiler::BuildFunctionInfo(declaration->fun(), script(), info_);
+      // Check for stack-overflow exception.
+      if (function.is_null()) return SetStackOverflow();
+      globals_->Add(function, zone());
+      break;
+    }
+
+    case Variable::PARAMETER:
+    case Variable::LOCAL: {
+      Comment cmnt(masm_, "[ FunctionDeclaration");
+      VisitForAccumulatorValue(declaration->fun());
+      __ sd(result_register(), StackOperand(variable));
+      break;
+    }
+
+    case Variable::CONTEXT: {
+      Comment cmnt(masm_, "[ FunctionDeclaration");
+      EmitDebugCheckDeclarationContext(variable);
+      VisitForAccumulatorValue(declaration->fun());
+      __ sd(result_register(), ContextOperand(cp, variable->index()));
+      int offset = Context::SlotOffset(variable->index());
+      // We know that we have written a function, which is not a smi.
+      __ RecordWriteContextSlot(cp,
+                                offset,
+                                result_register(),
+                                a2,
+                                kRAHasBeenSaved,
+                                kDontSaveFPRegs,
+                                EMIT_REMEMBERED_SET,
+                                OMIT_SMI_CHECK);
+      PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+      break;
+    }
+
+    case Variable::LOOKUP: {
+      Comment cmnt(masm_, "[ FunctionDeclaration");
+      __ li(a2, Operand(variable->name()));
+      __ li(a1, Operand(Smi::FromInt(NONE)));
+      __ Push(cp, a2, a1);
+      // Push initial value for function declaration.
+      VisitForStackValue(declaration->fun());
+      __ CallRuntime(Runtime::kDeclareLookupSlot, 4);
+      break;
+    }
+  }
+}
+
+
+void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
+  Variable* variable = declaration->proxy()->var();
+  DCHECK(variable->location() == Variable::CONTEXT);
+  DCHECK(variable->interface()->IsFrozen());
+  Comment cmnt(masm_, "[ ModuleDeclaration");
+  EmitDebugCheckDeclarationContext(variable);
+
+  // Load instance object.
+  __ LoadContext(a1, scope_->ContextChainLength(scope_->GlobalScope()));
+  __ ld(a1, ContextOperand(a1, variable->interface()->Index()));
+  __ ld(a1, ContextOperand(a1, Context::EXTENSION_INDEX));
+
+  // Assign it.
+  __ sd(a1, ContextOperand(cp, variable->index()));
+  // We know that we have written a module, which is not a smi.
+  __ RecordWriteContextSlot(cp,
+                            Context::SlotOffset(variable->index()),
+                            a1,
+                            a3,
+                            kRAHasBeenSaved,
+                            kDontSaveFPRegs,
+                            EMIT_REMEMBERED_SET,
+                            OMIT_SMI_CHECK);
+  PrepareForBailoutForId(declaration->proxy()->id(), NO_REGISTERS);
+
+  // Traverse into body.
+  Visit(declaration->module());
+}
+
+
+void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* declaration) {
+  VariableProxy* proxy = declaration->proxy();
+  Variable* variable = proxy->var();
+  switch (variable->location()) {
+    case Variable::UNALLOCATED:
+      // TODO(rossberg)
+      break;
+
+    case Variable::CONTEXT: {
+      Comment cmnt(masm_, "[ ImportDeclaration");
+      EmitDebugCheckDeclarationContext(variable);
+      // TODO(rossberg)
+      break;
+    }
+
+    case Variable::PARAMETER:
+    case Variable::LOCAL:
+    case Variable::LOOKUP:
+      UNREACHABLE();
+  }
+}
+
+
+void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* declaration) {
+  // TODO(rossberg)
+}
+
+
+void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
+  // Call the runtime to declare the globals.
+  // The context is the first argument.
+  __ li(a1, Operand(pairs));
+  __ li(a0, Operand(Smi::FromInt(DeclareGlobalsFlags())));
+  __ Push(cp, a1, a0);
+  __ CallRuntime(Runtime::kDeclareGlobals, 3);
+  // Return value is ignored.
+}
+
+
+void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
+  // Call the runtime to declare the modules.
+  __ Push(descriptions);
+  __ CallRuntime(Runtime::kDeclareModules, 1);
+  // Return value is ignored.
+}
+
+
+void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
+  Comment cmnt(masm_, "[ SwitchStatement");
+  Breakable nested_statement(this, stmt);
+  SetStatementPosition(stmt);
+
+  // Keep the switch value on the stack until a case matches.
+  VisitForStackValue(stmt->tag());
+  PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+
+  ZoneList<CaseClause*>* clauses = stmt->cases();
+  CaseClause* default_clause = NULL;  // Can occur anywhere in the list.
+
+  Label next_test;  // Recycled for each test.
+  // Compile all the tests with branches to their bodies.
+  for (int i = 0; i < clauses->length(); i++) {
+    CaseClause* clause = clauses->at(i);
+    clause->body_target()->Unuse();
+
+    // The default is not a test, but remember it as final fall through.
+    if (clause->is_default()) {
+      default_clause = clause;
+      continue;
+    }
+
+    Comment cmnt(masm_, "[ Case comparison");
+    __ bind(&next_test);
+    next_test.Unuse();
+
+    // Compile the label expression.
+    VisitForAccumulatorValue(clause->label());
+    __ mov(a0, result_register());  // CompareStub requires args in a0, a1.
+
+    // Perform the comparison as if via '==='.
+    __ ld(a1, MemOperand(sp, 0));  // Switch value.
+    bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
+    JumpPatchSite patch_site(masm_);
+    if (inline_smi_code) {
+      Label slow_case;
+      __ or_(a2, a1, a0);
+      patch_site.EmitJumpIfNotSmi(a2, &slow_case);
+
+      __ Branch(&next_test, ne, a1, Operand(a0));
+      __ Drop(1);  // Switch value is no longer needed.
+      __ Branch(clause->body_target());
+
+      __ bind(&slow_case);
+    }
+
+    // Record position before stub call for type feedback.
+    SetSourcePosition(clause->position());
+    Handle<Code> ic =
+        CodeFactory::CompareIC(isolate(), Token::EQ_STRICT).code();
+    CallIC(ic, clause->CompareId());
+    patch_site.EmitPatchInfo();
+
+    Label skip;
+    __ Branch(&skip);
+    PrepareForBailout(clause, TOS_REG);
+    __ LoadRoot(at, Heap::kTrueValueRootIndex);
+    __ Branch(&next_test, ne, v0, Operand(at));
+    __ Drop(1);
+    __ Branch(clause->body_target());
+    __ bind(&skip);
+
+    __ Branch(&next_test, ne, v0, Operand(zero_reg));
+    __ Drop(1);  // Switch value is no longer needed.
+    __ Branch(clause->body_target());
+  }
+
+  // Discard the test value and jump to the default if present, otherwise to
+  // the end of the statement.
+  __ bind(&next_test);
+  __ Drop(1);  // Switch value is no longer needed.
+  if (default_clause == NULL) {
+    __ Branch(nested_statement.break_label());
+  } else {
+    __ Branch(default_clause->body_target());
+  }
+
+  // Compile all the case bodies.
+  for (int i = 0; i < clauses->length(); i++) {
+    Comment cmnt(masm_, "[ Case body");
+    CaseClause* clause = clauses->at(i);
+    __ bind(clause->body_target());
+    PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
+    VisitStatements(clause->statements());
+  }
+
+  __ bind(nested_statement.break_label());
+  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+}
+
+
+void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
+  Comment cmnt(masm_, "[ ForInStatement");
+  int slot = stmt->ForInFeedbackSlot();
+  SetStatementPosition(stmt);
+
+  Label loop, exit;
+  ForIn loop_statement(this, stmt);
+  increment_loop_depth();
+
+  // Get the object to enumerate over. If the object is null or undefined, skip
+  // over the loop.  See ECMA-262 version 5, section 12.6.4.
+  VisitForAccumulatorValue(stmt->enumerable());
+  __ mov(a0, result_register());  // Result as param to InvokeBuiltin below.
+  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+  __ Branch(&exit, eq, a0, Operand(at));
+  Register null_value = a5;
+  __ LoadRoot(null_value, Heap::kNullValueRootIndex);
+  __ Branch(&exit, eq, a0, Operand(null_value));
+  PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
+  __ mov(a0, v0);
+  // Convert the object to a JS object.
+  Label convert, done_convert;
+  __ JumpIfSmi(a0, &convert);
+  __ GetObjectType(a0, a1, a1);
+  __ Branch(&done_convert, ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
+  __ bind(&convert);
+  __ push(a0);
+  __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+  __ mov(a0, v0);
+  __ bind(&done_convert);
+  __ push(a0);
+
+  // Check for proxies.
+  Label call_runtime;
+  STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+  __ GetObjectType(a0, a1, a1);
+  __ Branch(&call_runtime, le, a1, Operand(LAST_JS_PROXY_TYPE));
+
+  // Check cache validity in generated code. This is a fast case for
+  // the JSObject::IsSimpleEnum cache validity checks. If we cannot
+  // guarantee cache validity, call the runtime system to check cache
+  // validity or get the property names in a fixed array.
+  __ CheckEnumCache(null_value, &call_runtime);
+
+  // The enum cache is valid.  Load the map of the object being
+  // iterated over and use the cache for the iteration.
+  Label use_cache;
+  __ ld(v0, FieldMemOperand(a0, HeapObject::kMapOffset));
+  __ Branch(&use_cache);
+
+  // Get the set of properties to enumerate.
+  __ bind(&call_runtime);
+  __ push(a0);  // Duplicate the enumerable object on the stack.
+  __ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+
+  // If we got a map from the runtime call, we can do a fast
+  // modification check. Otherwise, we got a fixed array, and we have
+  // to do a slow check.
+  Label fixed_array;
+  __ ld(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
+  __ LoadRoot(at, Heap::kMetaMapRootIndex);
+  __ Branch(&fixed_array, ne, a2, Operand(at));
+
+  // We got a map in register v0. Get the enumeration cache from it.
+  Label no_descriptors;
+  __ bind(&use_cache);
+
+  __ EnumLength(a1, v0);
+  __ Branch(&no_descriptors, eq, a1, Operand(Smi::FromInt(0)));
+
+  __ LoadInstanceDescriptors(v0, a2);
+  __ ld(a2, FieldMemOperand(a2, DescriptorArray::kEnumCacheOffset));
+  __ ld(a2, FieldMemOperand(a2, DescriptorArray::kEnumCacheBridgeCacheOffset));
+
+  // Set up the four remaining stack slots.
+  __ li(a0, Operand(Smi::FromInt(0)));
+  // Push map, enumeration cache, enumeration cache length (as smi) and zero.
+  __ Push(v0, a2, a1, a0);
+  __ jmp(&loop);
+
+  __ bind(&no_descriptors);
+  __ Drop(1);
+  __ jmp(&exit);
+
+  // We got a fixed array in register v0. Iterate through that.
+  Label non_proxy;
+  __ bind(&fixed_array);
+
+  __ li(a1, FeedbackVector());
+  __ li(a2, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
+  __ sd(a2, FieldMemOperand(a1, FixedArray::OffsetOfElementAt(slot)));
+
+  __ li(a1, Operand(Smi::FromInt(1)));  // Smi indicates slow check
+  __ ld(a2, MemOperand(sp, 0 * kPointerSize));  // Get enumerated object
+  STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+  __ GetObjectType(a2, a3, a3);
+  __ Branch(&non_proxy, gt, a3, Operand(LAST_JS_PROXY_TYPE));
+  __ li(a1, Operand(Smi::FromInt(0)));  // Zero indicates proxy
+  __ bind(&non_proxy);
+  __ Push(a1, v0);  // Smi and array
+  __ ld(a1, FieldMemOperand(v0, FixedArray::kLengthOffset));
+  __ li(a0, Operand(Smi::FromInt(0)));
+  __ Push(a1, a0);  // Fixed array length (as smi) and initial index.
+
+  // Generate code for doing the condition check.
+  PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
+  __ bind(&loop);
+  // Load the current count to a0, load the length to a1.
+  __ ld(a0, MemOperand(sp, 0 * kPointerSize));
+  __ ld(a1, MemOperand(sp, 1 * kPointerSize));
+  __ Branch(loop_statement.break_label(), hs, a0, Operand(a1));
+
+  // Get the current entry of the array into register a3.
+  __ ld(a2, MemOperand(sp, 2 * kPointerSize));
+  __ Daddu(a2, a2, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ SmiScale(a4, a0, kPointerSizeLog2);
+  __ daddu(a4, a2, a4);  // Array base + scaled (smi) index.
+  __ ld(a3, MemOperand(a4));  // Current entry.
+
+  // Get the expected map from the stack or a smi in the
+  // permanent slow case into register a2.
+  __ ld(a2, MemOperand(sp, 3 * kPointerSize));
+
+  // Check if the expected map still matches that of the enumerable.
+  // If not, we may have to filter the key.
+  Label update_each;
+  __ ld(a1, MemOperand(sp, 4 * kPointerSize));
+  __ ld(a4, FieldMemOperand(a1, HeapObject::kMapOffset));
+  __ Branch(&update_each, eq, a4, Operand(a2));
+
+  // For proxies, no filtering is done.
+  // TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
+  DCHECK_EQ(Smi::FromInt(0), 0);
+  __ Branch(&update_each, eq, a2, Operand(zero_reg));
+
+  // Convert the entry to a string or (smi) 0 if it isn't a property
+  // any more. If the property has been removed while iterating, we
+  // just skip it.
+  __ Push(a1, a3);  // Enumerable and current entry.
+  __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
+  __ mov(a3, result_register());
+  __ Branch(loop_statement.continue_label(), eq, a3, Operand(zero_reg));
+
+  // Update the 'each' property or variable from the possibly filtered
+  // entry in register a3.
+  __ bind(&update_each);
+  __ mov(result_register(), a3);
+  // Perform the assignment as if via '='.
+  { EffectContext context(this);
+    EmitAssignment(stmt->each());
+  }
+
+  // Generate code for the body of the loop.
+  Visit(stmt->body());
+
+  // Generate code for the going to the next element by incrementing
+  // the index (smi) stored on top of the stack.
+  __ bind(loop_statement.continue_label());
+  __ pop(a0);
+  __ Daddu(a0, a0, Operand(Smi::FromInt(1)));
+  __ push(a0);
+
+  EmitBackEdgeBookkeeping(stmt, &loop);
+  __ Branch(&loop);
+
+  // Remove the pointers stored on the stack.
+  __ bind(loop_statement.break_label());
+  __ Drop(5);
+
+  // Exit and decrement the loop depth.
+  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+  __ bind(&exit);
+  decrement_loop_depth();
+}
+
+
+void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
+  Comment cmnt(masm_, "[ ForOfStatement");
+  SetStatementPosition(stmt);
+
+  Iteration loop_statement(this, stmt);
+  increment_loop_depth();
+
+  // var iterator = iterable[Symbol.iterator]();
+  VisitForEffect(stmt->assign_iterator());
+
+  // Loop entry.
+  __ bind(loop_statement.continue_label());
+
+  // result = iterator.next()
+  VisitForEffect(stmt->next_result());
+
+  // if (result.done) break;
+  Label result_not_done;
+  VisitForControl(stmt->result_done(),
+                  loop_statement.break_label(),
+                  &result_not_done,
+                  &result_not_done);
+  __ bind(&result_not_done);
+
+  // each = result.value
+  VisitForEffect(stmt->assign_each());
+
+  // Generate code for the body of the loop.
+  Visit(stmt->body());
+
+  // Check stack before looping.
+  PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS);
+  EmitBackEdgeBookkeeping(stmt, loop_statement.continue_label());
+  __ jmp(loop_statement.continue_label());
+
+  // Exit and decrement the loop depth.
+  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+  __ bind(loop_statement.break_label());
+  decrement_loop_depth();
+}
+
+
+void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
+                                       bool pretenure) {
+  // Use the fast case closure allocation code that allocates in new
+  // space for nested functions that don't need literals cloning. If
+  // we're running with the --always-opt or the --prepare-always-opt
+  // flag, we need to use the runtime function so that the new function
+  // we are creating here gets a chance to have its code optimized and
+  // doesn't just get a copy of the existing unoptimized code.
+  if (!FLAG_always_opt &&
+      !FLAG_prepare_always_opt &&
+      !pretenure &&
+      scope()->is_function_scope() &&
+      info->num_literals() == 0) {
+    FastNewClosureStub stub(isolate(), info->strict_mode(), info->kind());
+    __ li(a2, Operand(info));
+    __ CallStub(&stub);
+  } else {
+    __ li(a0, Operand(info));
+    __ LoadRoot(a1, pretenure ? Heap::kTrueValueRootIndex
+                              : Heap::kFalseValueRootIndex);
+    __ Push(cp, a0, a1);
+    __ CallRuntime(Runtime::kNewClosure, 3);
+  }
+  context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
+  Comment cmnt(masm_, "[ VariableProxy");
+  EmitVariableLoad(expr);
+}
+
+
+void FullCodeGenerator::EmitLoadHomeObject(SuperReference* expr) {
+  Comment cnmt(masm_, "[ SuperReference ");
+
+  __ ld(LoadDescriptor::ReceiverRegister(),
+        MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+
+  Handle<Symbol> home_object_symbol(isolate()->heap()->home_object_symbol());
+  __ li(LoadDescriptor::NameRegister(), home_object_symbol);
+
+  CallLoadIC(NOT_CONTEXTUAL, expr->HomeObjectFeedbackId());
+
+  Label done;
+  __ Branch(&done, ne, v0, Operand(isolate()->factory()->undefined_value()));
+  __ CallRuntime(Runtime::kThrowNonMethodError, 0);
+  __ bind(&done);
+}
+
+
+void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
+                                                      TypeofState typeof_state,
+                                                      Label* slow) {
+  Register current = cp;
+  Register next = a1;
+  Register temp = a2;
+
+  Scope* s = scope();
+  while (s != NULL) {
+    if (s->num_heap_slots() > 0) {
+      if (s->calls_sloppy_eval()) {
+        // Check that extension is NULL.
+        __ ld(temp, ContextOperand(current, Context::EXTENSION_INDEX));
+        __ Branch(slow, ne, temp, Operand(zero_reg));
+      }
+      // Load next context in chain.
+      __ ld(next, ContextOperand(current, Context::PREVIOUS_INDEX));
+      // Walk the rest of the chain without clobbering cp.
+      current = next;
+    }
+    // If no outer scope calls eval, we do not need to check more
+    // context extensions.
+    if (!s->outer_scope_calls_sloppy_eval() || s->is_eval_scope()) break;
+    s = s->outer_scope();
+  }
+
+  if (s->is_eval_scope()) {
+    Label loop, fast;
+    if (!current.is(next)) {
+      __ Move(next, current);
+    }
+    __ bind(&loop);
+    // Terminate at native context.
+    __ ld(temp, FieldMemOperand(next, HeapObject::kMapOffset));
+    __ LoadRoot(a4, Heap::kNativeContextMapRootIndex);
+    __ Branch(&fast, eq, temp, Operand(a4));
+    // Check that extension is NULL.
+    __ ld(temp, ContextOperand(next, Context::EXTENSION_INDEX));
+    __ Branch(slow, ne, temp, Operand(zero_reg));
+    // Load next context in chain.
+    __ ld(next, ContextOperand(next, Context::PREVIOUS_INDEX));
+    __ Branch(&loop);
+    __ bind(&fast);
+  }
+
+  __ ld(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+  __ li(LoadDescriptor::NameRegister(), Operand(proxy->var()->name()));
+  if (FLAG_vector_ics) {
+    __ li(VectorLoadICDescriptor::SlotRegister(),
+          Operand(Smi::FromInt(proxy->VariableFeedbackSlot())));
+  }
+
+  ContextualMode mode = (typeof_state == INSIDE_TYPEOF)
+      ? NOT_CONTEXTUAL
+      : CONTEXTUAL;
+  CallLoadIC(mode);
+}
+
+
+MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
+                                                                Label* slow) {
+  DCHECK(var->IsContextSlot());
+  Register context = cp;
+  Register next = a3;
+  Register temp = a4;
+
+  for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
+    if (s->num_heap_slots() > 0) {
+      if (s->calls_sloppy_eval()) {
+        // Check that extension is NULL.
+        __ ld(temp, ContextOperand(context, Context::EXTENSION_INDEX));
+        __ Branch(slow, ne, temp, Operand(zero_reg));
+      }
+      __ ld(next, ContextOperand(context, Context::PREVIOUS_INDEX));
+      // Walk the rest of the chain without clobbering cp.
+      context = next;
+    }
+  }
+  // Check that last extension is NULL.
+  __ ld(temp, ContextOperand(context, Context::EXTENSION_INDEX));
+  __ Branch(slow, ne, temp, Operand(zero_reg));
+
+  // This function is used only for loads, not stores, so it's safe to
+  // return an cp-based operand (the write barrier cannot be allowed to
+  // destroy the cp register).
+  return ContextOperand(context, var->index());
+}
+
+
+void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
+                                                  TypeofState typeof_state,
+                                                  Label* slow,
+                                                  Label* done) {
+  // Generate fast-case code for variables that might be shadowed by
+  // eval-introduced variables.  Eval is used a lot without
+  // introducing variables.  In those cases, we do not want to
+  // perform a runtime call for all variables in the scope
+  // containing the eval.
+  Variable* var = proxy->var();
+  if (var->mode() == DYNAMIC_GLOBAL) {
+    EmitLoadGlobalCheckExtensions(proxy, typeof_state, slow);
+    __ Branch(done);
+  } else if (var->mode() == DYNAMIC_LOCAL) {
+    Variable* local = var->local_if_not_shadowed();
+    __ ld(v0, ContextSlotOperandCheckExtensions(local, slow));
+    if (local->mode() == LET || local->mode() == CONST ||
+        local->mode() == CONST_LEGACY) {
+      __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+      __ dsubu(at, v0, at);  // Sub as compare: at == 0 on eq.
+      if (local->mode() == CONST_LEGACY) {
+        __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
+        __ Movz(v0, a0, at);  // Conditional move: return Undefined if TheHole.
+      } else {  // LET || CONST
+        __ Branch(done, ne, at, Operand(zero_reg));
+        __ li(a0, Operand(var->name()));
+        __ push(a0);
+        __ CallRuntime(Runtime::kThrowReferenceError, 1);
+      }
+    }
+    __ Branch(done);
+  }
+}
+
+
+void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
+  // Record position before possible IC call.
+  SetSourcePosition(proxy->position());
+  Variable* var = proxy->var();
+
+  // Three cases: global variables, lookup variables, and all other types of
+  // variables.
+  switch (var->location()) {
+    case Variable::UNALLOCATED: {
+      Comment cmnt(masm_, "[ Global variable");
+      // Use inline caching. Variable name is passed in a2 and the global
+      // object (receiver) in a0.
+      __ ld(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+      __ li(LoadDescriptor::NameRegister(), Operand(var->name()));
+      if (FLAG_vector_ics) {
+        __ li(VectorLoadICDescriptor::SlotRegister(),
+              Operand(Smi::FromInt(proxy->VariableFeedbackSlot())));
+      }
+      CallLoadIC(CONTEXTUAL);
+      context()->Plug(v0);
+      break;
+    }
+
+    case Variable::PARAMETER:
+    case Variable::LOCAL:
+    case Variable::CONTEXT: {
+      Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
+                                               : "[ Stack variable");
+      if (var->binding_needs_init()) {
+        // var->scope() may be NULL when the proxy is located in eval code and
+        // refers to a potential outside binding. Currently those bindings are
+        // always looked up dynamically, i.e. in that case
+        //     var->location() == LOOKUP.
+        // always holds.
+        DCHECK(var->scope() != NULL);
+
+        // Check if the binding really needs an initialization check. The check
+        // can be skipped in the following situation: we have a LET or CONST
+        // binding in harmony mode, both the Variable and the VariableProxy have
+        // the same declaration scope (i.e. they are both in global code, in the
+        // same function or in the same eval code) and the VariableProxy is in
+        // the source physically located after the initializer of the variable.
+        //
+        // We cannot skip any initialization checks for CONST in non-harmony
+        // mode because const variables may be declared but never initialized:
+        //   if (false) { const x; }; var y = x;
+        //
+        // The condition on the declaration scopes is a conservative check for
+        // nested functions that access a binding and are called before the
+        // binding is initialized:
+        //   function() { f(); let x = 1; function f() { x = 2; } }
+        //
+        bool skip_init_check;
+        if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
+          skip_init_check = false;
+        } else {
+          // Check that we always have valid source position.
+          DCHECK(var->initializer_position() != RelocInfo::kNoPosition);
+          DCHECK(proxy->position() != RelocInfo::kNoPosition);
+          skip_init_check = var->mode() != CONST_LEGACY &&
+              var->initializer_position() < proxy->position();
+        }
+
+        if (!skip_init_check) {
+          // Let and const need a read barrier.
+          GetVar(v0, var);
+          __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+          __ dsubu(at, v0, at);  // Sub as compare: at == 0 on eq.
+          if (var->mode() == LET || var->mode() == CONST) {
+            // Throw a reference error when using an uninitialized let/const
+            // binding in harmony mode.
+            Label done;
+            __ Branch(&done, ne, at, Operand(zero_reg));
+            __ li(a0, Operand(var->name()));
+            __ push(a0);
+            __ CallRuntime(Runtime::kThrowReferenceError, 1);
+            __ bind(&done);
+          } else {
+            // Uninitalized const bindings outside of harmony mode are unholed.
+            DCHECK(var->mode() == CONST_LEGACY);
+            __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
+            __ Movz(v0, a0, at);  // Conditional move: Undefined if TheHole.
+          }
+          context()->Plug(v0);
+          break;
+        }
+      }
+      context()->Plug(var);
+      break;
+    }
+
+    case Variable::LOOKUP: {
+      Comment cmnt(masm_, "[ Lookup variable");
+      Label done, slow;
+      // Generate code for loading from variables potentially shadowed
+      // by eval-introduced variables.
+      EmitDynamicLookupFastCase(proxy, NOT_INSIDE_TYPEOF, &slow, &done);
+      __ bind(&slow);
+      __ li(a1, Operand(var->name()));
+      __ Push(cp, a1);  // Context and name.
+      __ CallRuntime(Runtime::kLoadLookupSlot, 2);
+      __ bind(&done);
+      context()->Plug(v0);
+    }
+  }
+}
+
+
+void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
+  Comment cmnt(masm_, "[ RegExpLiteral");
+  Label materialized;
+  // Registers will be used as follows:
+  // a5 = materialized value (RegExp literal)
+  // a4 = JS function, literals array
+  // a3 = literal index
+  // a2 = RegExp pattern
+  // a1 = RegExp flags
+  // a0 = RegExp literal clone
+  __ ld(a0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  __ ld(a4, FieldMemOperand(a0, JSFunction::kLiteralsOffset));
+  int literal_offset =
+      FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
+  __ ld(a5, FieldMemOperand(a4, literal_offset));
+  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+  __ Branch(&materialized, ne, a5, Operand(at));
+
+  // Create regexp literal using runtime function.
+  // Result will be in v0.
+  __ li(a3, Operand(Smi::FromInt(expr->literal_index())));
+  __ li(a2, Operand(expr->pattern()));
+  __ li(a1, Operand(expr->flags()));
+  __ Push(a4, a3, a2, a1);
+  __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
+  __ mov(a5, v0);
+
+  __ bind(&materialized);
+  int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
+  Label allocated, runtime_allocate;
+  __ Allocate(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
+  __ jmp(&allocated);
+
+  __ bind(&runtime_allocate);
+  __ li(a0, Operand(Smi::FromInt(size)));
+  __ Push(a5, a0);
+  __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+  __ pop(a5);
+
+  __ bind(&allocated);
+
+  // After this, registers are used as follows:
+  // v0: Newly allocated regexp.
+  // a5: Materialized regexp.
+  // a2: temp.
+  __ CopyFields(v0, a5, a2.bit(), size / kPointerSize);
+  context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitAccessor(Expression* expression) {
+  if (expression == NULL) {
+    __ LoadRoot(a1, Heap::kNullValueRootIndex);
+    __ push(a1);
+  } else {
+    VisitForStackValue(expression);
+  }
+}
+
+
+void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
+  Comment cmnt(masm_, "[ ObjectLiteral");
+
+  expr->BuildConstantProperties(isolate());
+  Handle<FixedArray> constant_properties = expr->constant_properties();
+  __ ld(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  __ ld(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
+  __ li(a2, Operand(Smi::FromInt(expr->literal_index())));
+  __ li(a1, Operand(constant_properties));
+  int flags = expr->fast_elements()
+      ? ObjectLiteral::kFastElements
+      : ObjectLiteral::kNoFlags;
+  flags |= expr->has_function()
+      ? ObjectLiteral::kHasFunction
+      : ObjectLiteral::kNoFlags;
+  __ li(a0, Operand(Smi::FromInt(flags)));
+  int properties_count = constant_properties->length() / 2;
+  if (expr->may_store_doubles() || expr->depth() > 1 ||
+      masm()->serializer_enabled() || flags != ObjectLiteral::kFastElements ||
+      properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
+    __ Push(a3, a2, a1, a0);
+    __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
+  } else {
+    FastCloneShallowObjectStub stub(isolate(), properties_count);
+    __ CallStub(&stub);
+  }
+
+  // If result_saved is true the result is on top of the stack.  If
+  // result_saved is false the result is in v0.
+  bool result_saved = false;
+
+  // Mark all computed expressions that are bound to a key that
+  // is shadowed by a later occurrence of the same key. For the
+  // marked expressions, no store code is emitted.
+  expr->CalculateEmitStore(zone());
+
+  AccessorTable accessor_table(zone());
+  for (int i = 0; i < expr->properties()->length(); i++) {
+    ObjectLiteral::Property* property = expr->properties()->at(i);
+    if (property->IsCompileTimeValue()) continue;
+
+    Literal* key = property->key();
+    Expression* value = property->value();
+    if (!result_saved) {
+      __ push(v0);  // Save result on stack.
+      result_saved = true;
+    }
+    switch (property->kind()) {
+      case ObjectLiteral::Property::CONSTANT:
+        UNREACHABLE();
+      case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+        DCHECK(!CompileTimeValue::IsCompileTimeValue(property->value()));
+        // Fall through.
+      case ObjectLiteral::Property::COMPUTED:
+        if (key->value()->IsInternalizedString()) {
+          if (property->emit_store()) {
+            VisitForAccumulatorValue(value);
+            __ mov(StoreDescriptor::ValueRegister(), result_register());
+            DCHECK(StoreDescriptor::ValueRegister().is(a0));
+            __ li(StoreDescriptor::NameRegister(), Operand(key->value()));
+            __ ld(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
+            CallStoreIC(key->LiteralFeedbackId());
+            PrepareForBailoutForId(key->id(), NO_REGISTERS);
+          } else {
+            VisitForEffect(value);
+          }
+          break;
+        }
+        // Duplicate receiver on stack.
+        __ ld(a0, MemOperand(sp));
+        __ push(a0);
+        VisitForStackValue(key);
+        VisitForStackValue(value);
+        if (property->emit_store()) {
+          __ li(a0, Operand(Smi::FromInt(SLOPPY)));  // PropertyAttributes.
+          __ push(a0);
+          __ CallRuntime(Runtime::kSetProperty, 4);
+        } else {
+          __ Drop(3);
+        }
+        break;
+      case ObjectLiteral::Property::PROTOTYPE:
+        // Duplicate receiver on stack.
+        __ ld(a0, MemOperand(sp));
+        __ push(a0);
+        VisitForStackValue(value);
+        if (property->emit_store()) {
+          __ CallRuntime(Runtime::kSetPrototype, 2);
+        } else {
+          __ Drop(2);
+        }
+        break;
+      case ObjectLiteral::Property::GETTER:
+        accessor_table.lookup(key)->second->getter = value;
+        break;
+      case ObjectLiteral::Property::SETTER:
+        accessor_table.lookup(key)->second->setter = value;
+        break;
+    }
+  }
+
+  // Emit code to define accessors, using only a single call to the runtime for
+  // each pair of corresponding getters and setters.
+  for (AccessorTable::Iterator it = accessor_table.begin();
+       it != accessor_table.end();
+       ++it) {
+    __ ld(a0, MemOperand(sp));  // Duplicate receiver.
+    __ push(a0);
+    VisitForStackValue(it->first);
+    EmitAccessor(it->second->getter);
+    EmitAccessor(it->second->setter);
+    __ li(a0, Operand(Smi::FromInt(NONE)));
+    __ push(a0);
+    __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
+  }
+
+  if (expr->has_function()) {
+    DCHECK(result_saved);
+    __ ld(a0, MemOperand(sp));
+    __ push(a0);
+    __ CallRuntime(Runtime::kToFastProperties, 1);
+  }
+
+  if (result_saved) {
+    context()->PlugTOS();
+  } else {
+    context()->Plug(v0);
+  }
+}
+
+
+void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
+  Comment cmnt(masm_, "[ ArrayLiteral");
+
+  expr->BuildConstantElements(isolate());
+  int flags = expr->depth() == 1
+      ? ArrayLiteral::kShallowElements
+      : ArrayLiteral::kNoFlags;
+
+  ZoneList<Expression*>* subexprs = expr->values();
+  int length = subexprs->length();
+
+  Handle<FixedArray> constant_elements = expr->constant_elements();
+  DCHECK_EQ(2, constant_elements->length());
+  ElementsKind constant_elements_kind =
+      static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
+  bool has_fast_elements =
+      IsFastObjectElementsKind(constant_elements_kind);
+  Handle<FixedArrayBase> constant_elements_values(
+      FixedArrayBase::cast(constant_elements->get(1)));
+
+  AllocationSiteMode allocation_site_mode = TRACK_ALLOCATION_SITE;
+  if (has_fast_elements && !FLAG_allocation_site_pretenuring) {
+    // If the only customer of allocation sites is transitioning, then
+    // we can turn it off if we don't have anywhere else to transition to.
+    allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
+  }
+
+  __ mov(a0, result_register());
+  __ ld(a3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  __ ld(a3, FieldMemOperand(a3, JSFunction::kLiteralsOffset));
+  __ li(a2, Operand(Smi::FromInt(expr->literal_index())));
+  __ li(a1, Operand(constant_elements));
+  if (expr->depth() > 1 || length > JSObject::kInitialMaxFastElementArray) {
+    __ li(a0, Operand(Smi::FromInt(flags)));
+    __ Push(a3, a2, a1, a0);
+    __ CallRuntime(Runtime::kCreateArrayLiteral, 4);
+  } else {
+    FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
+    __ CallStub(&stub);
+  }
+
+  bool result_saved = false;  // Is the result saved to the stack?
+
+  // Emit code to evaluate all the non-constant subexpressions and to store
+  // them into the newly cloned array.
+  for (int i = 0; i < length; i++) {
+    Expression* subexpr = subexprs->at(i);
+    // If the subexpression is a literal or a simple materialized literal it
+    // is already set in the cloned array.
+    if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
+
+    if (!result_saved) {
+      __ push(v0);  // array literal
+      __ Push(Smi::FromInt(expr->literal_index()));
+      result_saved = true;
+    }
+
+    VisitForAccumulatorValue(subexpr);
+
+    if (IsFastObjectElementsKind(constant_elements_kind)) {
+      int offset = FixedArray::kHeaderSize + (i * kPointerSize);
+      __ ld(a6, MemOperand(sp, kPointerSize));  // Copy of array literal.
+      __ ld(a1, FieldMemOperand(a6, JSObject::kElementsOffset));
+      __ sd(result_register(), FieldMemOperand(a1, offset));
+      // Update the write barrier for the array store.
+      __ RecordWriteField(a1, offset, result_register(), a2,
+                          kRAHasBeenSaved, kDontSaveFPRegs,
+                          EMIT_REMEMBERED_SET, INLINE_SMI_CHECK);
+    } else {
+      __ li(a3, Operand(Smi::FromInt(i)));
+      __ mov(a0, result_register());
+      StoreArrayLiteralElementStub stub(isolate());
+      __ CallStub(&stub);
+    }
+
+    PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
+  }
+  if (result_saved) {
+    __ Pop();  // literal index
+    context()->PlugTOS();
+  } else {
+    context()->Plug(v0);
+  }
+}
+
+
+void FullCodeGenerator::VisitAssignment(Assignment* expr) {
+  DCHECK(expr->target()->IsValidReferenceExpression());
+
+  Comment cmnt(masm_, "[ Assignment");
+
+  // Left-hand side can only be a property, a global or a (parameter or local)
+  // slot.
+  enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+  LhsKind assign_type = VARIABLE;
+  Property* property = expr->target()->AsProperty();
+  if (property != NULL) {
+    assign_type = (property->key()->IsPropertyName())
+        ? NAMED_PROPERTY
+        : KEYED_PROPERTY;
+  }
+
+  // Evaluate LHS expression.
+  switch (assign_type) {
+    case VARIABLE:
+      // Nothing to do here.
+      break;
+    case NAMED_PROPERTY:
+      if (expr->is_compound()) {
+        // We need the receiver both on the stack and in the register.
+        VisitForStackValue(property->obj());
+        __ ld(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
+      } else {
+        VisitForStackValue(property->obj());
+      }
+      break;
+    case KEYED_PROPERTY:
+      // We need the key and receiver on both the stack and in v0 and a1.
+      if (expr->is_compound()) {
+        VisitForStackValue(property->obj());
+        VisitForStackValue(property->key());
+        __ ld(LoadDescriptor::ReceiverRegister(),
+              MemOperand(sp, 1 * kPointerSize));
+        __ ld(LoadDescriptor::NameRegister(), MemOperand(sp, 0));
+      } else {
+        VisitForStackValue(property->obj());
+        VisitForStackValue(property->key());
+      }
+      break;
+  }
+
+  // For compound assignments we need another deoptimization point after the
+  // variable/property load.
+  if (expr->is_compound()) {
+    { AccumulatorValueContext context(this);
+      switch (assign_type) {
+        case VARIABLE:
+          EmitVariableLoad(expr->target()->AsVariableProxy());
+          PrepareForBailout(expr->target(), TOS_REG);
+          break;
+        case NAMED_PROPERTY:
+          EmitNamedPropertyLoad(property);
+          PrepareForBailoutForId(property->LoadId(), TOS_REG);
+          break;
+        case KEYED_PROPERTY:
+          EmitKeyedPropertyLoad(property);
+          PrepareForBailoutForId(property->LoadId(), TOS_REG);
+          break;
+      }
+    }
+
+    Token::Value op = expr->binary_op();
+    __ push(v0);  // Left operand goes on the stack.
+    VisitForAccumulatorValue(expr->value());
+
+    OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
+        ? OVERWRITE_RIGHT
+        : NO_OVERWRITE;
+    SetSourcePosition(expr->position() + 1);
+    AccumulatorValueContext context(this);
+    if (ShouldInlineSmiCase(op)) {
+      EmitInlineSmiBinaryOp(expr->binary_operation(),
+                            op,
+                            mode,
+                            expr->target(),
+                            expr->value());
+    } else {
+      EmitBinaryOp(expr->binary_operation(), op, mode);
+    }
+
+    // Deoptimization point in case the binary operation may have side effects.
+    PrepareForBailout(expr->binary_operation(), TOS_REG);
+  } else {
+    VisitForAccumulatorValue(expr->value());
+  }
+
+  // Record source position before possible IC call.
+  SetSourcePosition(expr->position());
+
+  // Store the value.
+  switch (assign_type) {
+    case VARIABLE:
+      EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
+                             expr->op());
+      PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+      context()->Plug(v0);
+      break;
+    case NAMED_PROPERTY:
+      EmitNamedPropertyAssignment(expr);
+      break;
+    case KEYED_PROPERTY:
+      EmitKeyedPropertyAssignment(expr);
+      break;
+  }
+}
+
+
+void FullCodeGenerator::VisitYield(Yield* expr) {
+  Comment cmnt(masm_, "[ Yield");
+  // Evaluate yielded value first; the initial iterator definition depends on
+  // this.  It stays on the stack while we update the iterator.
+  VisitForStackValue(expr->expression());
+
+  switch (expr->yield_kind()) {
+    case Yield::kSuspend:
+      // Pop value from top-of-stack slot; box result into result register.
+      EmitCreateIteratorResult(false);
+      __ push(result_register());
+      // Fall through.
+    case Yield::kInitial: {
+      Label suspend, continuation, post_runtime, resume;
+
+      __ jmp(&suspend);
+
+      __ bind(&continuation);
+      __ jmp(&resume);
+
+      __ bind(&suspend);
+      VisitForAccumulatorValue(expr->generator_object());
+      DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
+      __ li(a1, Operand(Smi::FromInt(continuation.pos())));
+      __ sd(a1, FieldMemOperand(v0, JSGeneratorObject::kContinuationOffset));
+      __ sd(cp, FieldMemOperand(v0, JSGeneratorObject::kContextOffset));
+      __ mov(a1, cp);
+      __ RecordWriteField(v0, JSGeneratorObject::kContextOffset, a1, a2,
+                          kRAHasBeenSaved, kDontSaveFPRegs);
+      __ Daddu(a1, fp, Operand(StandardFrameConstants::kExpressionsOffset));
+      __ Branch(&post_runtime, eq, sp, Operand(a1));
+      __ push(v0);  // generator object
+      __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+      __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+      __ bind(&post_runtime);
+      __ pop(result_register());
+      EmitReturnSequence();
+
+      __ bind(&resume);
+      context()->Plug(result_register());
+      break;
+    }
+
+    case Yield::kFinal: {
+      VisitForAccumulatorValue(expr->generator_object());
+      __ li(a1, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorClosed)));
+      __ sd(a1, FieldMemOperand(result_register(),
+                                JSGeneratorObject::kContinuationOffset));
+      // Pop value from top-of-stack slot, box result into result register.
+      EmitCreateIteratorResult(true);
+      EmitUnwindBeforeReturn();
+      EmitReturnSequence();
+      break;
+    }
+
+    case Yield::kDelegating: {
+      VisitForStackValue(expr->generator_object());
+
+      // Initial stack layout is as follows:
+      // [sp + 1 * kPointerSize] iter
+      // [sp + 0 * kPointerSize] g
+
+      Label l_catch, l_try, l_suspend, l_continuation, l_resume;
+      Label l_next, l_call;
+      Register load_receiver = LoadDescriptor::ReceiverRegister();
+      Register load_name = LoadDescriptor::NameRegister();
+      // Initial send value is undefined.
+      __ LoadRoot(a0, Heap::kUndefinedValueRootIndex);
+      __ Branch(&l_next);
+
+      // catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; }
+      __ bind(&l_catch);
+      __ mov(a0, v0);
+      handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos()));
+      __ LoadRoot(a2, Heap::kthrow_stringRootIndex);  // "throw"
+      __ ld(a3, MemOperand(sp, 1 * kPointerSize));    // iter
+      __ Push(a2, a3, a0);                            // "throw", iter, except
+      __ jmp(&l_call);
+
+      // try { received = %yield result }
+      // Shuffle the received result above a try handler and yield it without
+      // re-boxing.
+      __ bind(&l_try);
+      __ pop(a0);                                        // result
+      __ PushTryHandler(StackHandler::CATCH, expr->index());
+      const int handler_size = StackHandlerConstants::kSize;
+      __ push(a0);                                       // result
+      __ jmp(&l_suspend);
+      __ bind(&l_continuation);
+      __ mov(a0, v0);
+      __ jmp(&l_resume);
+      __ bind(&l_suspend);
+      const int generator_object_depth = kPointerSize + handler_size;
+      __ ld(a0, MemOperand(sp, generator_object_depth));
+      __ push(a0);                                       // g
+      DCHECK(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
+      __ li(a1, Operand(Smi::FromInt(l_continuation.pos())));
+      __ sd(a1, FieldMemOperand(a0, JSGeneratorObject::kContinuationOffset));
+      __ sd(cp, FieldMemOperand(a0, JSGeneratorObject::kContextOffset));
+      __ mov(a1, cp);
+      __ RecordWriteField(a0, JSGeneratorObject::kContextOffset, a1, a2,
+                          kRAHasBeenSaved, kDontSaveFPRegs);
+      __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+      __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+      __ pop(v0);                                      // result
+      EmitReturnSequence();
+      __ mov(a0, v0);
+      __ bind(&l_resume);                              // received in a0
+      __ PopTryHandler();
+
+      // receiver = iter; f = 'next'; arg = received;
+      __ bind(&l_next);
+      __ LoadRoot(load_name, Heap::knext_stringRootIndex);  // "next"
+      __ ld(a3, MemOperand(sp, 1 * kPointerSize));          // iter
+      __ Push(load_name, a3, a0);                      // "next", iter, received
+
+      // result = receiver[f](arg);
+      __ bind(&l_call);
+      __ ld(load_receiver, MemOperand(sp, kPointerSize));
+      __ ld(load_name, MemOperand(sp, 2 * kPointerSize));
+      if (FLAG_vector_ics) {
+        __ li(VectorLoadICDescriptor::SlotRegister(),
+              Operand(Smi::FromInt(expr->KeyedLoadFeedbackSlot())));
+      }
+      Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
+      CallIC(ic, TypeFeedbackId::None());
+      __ mov(a0, v0);
+      __ mov(a1, a0);
+      __ sd(a1, MemOperand(sp, 2 * kPointerSize));
+      CallFunctionStub stub(isolate(), 1, CALL_AS_METHOD);
+      __ CallStub(&stub);
+
+      __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+      __ Drop(1);  // The function is still on the stack; drop it.
+
+      // if (!result.done) goto l_try;
+      __ Move(load_receiver, v0);
+
+      __ push(load_receiver);                               // save result
+      __ LoadRoot(load_name, Heap::kdone_stringRootIndex);  // "done"
+      if (FLAG_vector_ics) {
+        __ li(VectorLoadICDescriptor::SlotRegister(),
+              Operand(Smi::FromInt(expr->DoneFeedbackSlot())));
+      }
+      CallLoadIC(NOT_CONTEXTUAL);                           // v0=result.done
+      __ mov(a0, v0);
+      Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
+      CallIC(bool_ic);
+      __ Branch(&l_try, eq, v0, Operand(zero_reg));
+
+      // result.value
+      __ pop(load_receiver);                                 // result
+      __ LoadRoot(load_name, Heap::kvalue_stringRootIndex);  // "value"
+      if (FLAG_vector_ics) {
+        __ li(VectorLoadICDescriptor::SlotRegister(),
+              Operand(Smi::FromInt(expr->ValueFeedbackSlot())));
+      }
+      CallLoadIC(NOT_CONTEXTUAL);                            // v0=result.value
+      context()->DropAndPlug(2, v0);                         // drop iter and g
+      break;
+    }
+  }
+}
+
+
+void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
+    Expression *value,
+    JSGeneratorObject::ResumeMode resume_mode) {
+  // The value stays in a0, and is ultimately read by the resumed generator, as
+  // if CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it
+  // is read to throw the value when the resumed generator is already closed.
+  // a1 will hold the generator object until the activation has been resumed.
+  VisitForStackValue(generator);
+  VisitForAccumulatorValue(value);
+  __ pop(a1);
+
+  // Check generator state.
+  Label wrong_state, closed_state, done;
+  __ ld(a3, FieldMemOperand(a1, JSGeneratorObject::kContinuationOffset));
+  STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting < 0);
+  STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed == 0);
+  __ Branch(&closed_state, eq, a3, Operand(zero_reg));
+  __ Branch(&wrong_state, lt, a3, Operand(zero_reg));
+
+  // Load suspended function and context.
+  __ ld(cp, FieldMemOperand(a1, JSGeneratorObject::kContextOffset));
+  __ ld(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
+
+  // Load receiver and store as the first argument.
+  __ ld(a2, FieldMemOperand(a1, JSGeneratorObject::kReceiverOffset));
+  __ push(a2);
+
+  // Push holes for the rest of the arguments to the generator function.
+  __ ld(a3, FieldMemOperand(a4, JSFunction::kSharedFunctionInfoOffset));
+  // The argument count is stored as int32_t on 64-bit platforms.
+  // TODO(plind): Smi on 32-bit platforms.
+  __ lw(a3,
+        FieldMemOperand(a3, SharedFunctionInfo::kFormalParameterCountOffset));
+  __ LoadRoot(a2, Heap::kTheHoleValueRootIndex);
+  Label push_argument_holes, push_frame;
+  __ bind(&push_argument_holes);
+  __ Dsubu(a3, a3, Operand(1));
+  __ Branch(&push_frame, lt, a3, Operand(zero_reg));
+  __ push(a2);
+  __ jmp(&push_argument_holes);
+
+  // Enter a new JavaScript frame, and initialize its slots as they were when
+  // the generator was suspended.
+  Label resume_frame;
+  __ bind(&push_frame);
+  __ Call(&resume_frame);
+  __ jmp(&done);
+  __ bind(&resume_frame);
+  // ra = return address.
+  // fp = caller's frame pointer.
+  // cp = callee's context,
+  // a4 = callee's JS function.
+  __ Push(ra, fp, cp, a4);
+  // Adjust FP to point to saved FP.
+  __ Daddu(fp, sp, 2 * kPointerSize);
+
+  // Load the operand stack size.
+  __ ld(a3, FieldMemOperand(a1, JSGeneratorObject::kOperandStackOffset));
+  __ ld(a3, FieldMemOperand(a3, FixedArray::kLengthOffset));
+  __ SmiUntag(a3);
+
+  // If we are sending a value and there is no operand stack, we can jump back
+  // in directly.
+  if (resume_mode == JSGeneratorObject::NEXT) {
+    Label slow_resume;
+    __ Branch(&slow_resume, ne, a3, Operand(zero_reg));
+    __ ld(a3, FieldMemOperand(a4, JSFunction::kCodeEntryOffset));
+    __ ld(a2, FieldMemOperand(a1, JSGeneratorObject::kContinuationOffset));
+    __ SmiUntag(a2);
+    __ Daddu(a3, a3, Operand(a2));
+    __ li(a2, Operand(Smi::FromInt(JSGeneratorObject::kGeneratorExecuting)));
+    __ sd(a2, FieldMemOperand(a1, JSGeneratorObject::kContinuationOffset));
+    __ Jump(a3);
+    __ bind(&slow_resume);
+  }
+
+  // Otherwise, we push holes for the operand stack and call the runtime to fix
+  // up the stack and the handlers.
+  Label push_operand_holes, call_resume;
+  __ bind(&push_operand_holes);
+  __ Dsubu(a3, a3, Operand(1));
+  __ Branch(&call_resume, lt, a3, Operand(zero_reg));
+  __ push(a2);
+  __ Branch(&push_operand_holes);
+  __ bind(&call_resume);
+  DCHECK(!result_register().is(a1));
+  __ Push(a1, result_register());
+  __ Push(Smi::FromInt(resume_mode));
+  __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
+  // Not reached: the runtime call returns elsewhere.
+  __ stop("not-reached");
+
+  // Reach here when generator is closed.
+  __ bind(&closed_state);
+  if (resume_mode == JSGeneratorObject::NEXT) {
+    // Return completed iterator result when generator is closed.
+    __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+    __ push(a2);
+    // Pop value from top-of-stack slot; box result into result register.
+    EmitCreateIteratorResult(true);
+  } else {
+    // Throw the provided value.
+    __ push(a0);
+    __ CallRuntime(Runtime::kThrow, 1);
+  }
+  __ jmp(&done);
+
+  // Throw error if we attempt to operate on a running generator.
+  __ bind(&wrong_state);
+  __ push(a1);
+  __ CallRuntime(Runtime::kThrowGeneratorStateError, 1);
+
+  __ bind(&done);
+  context()->Plug(result_register());
+}
+
+
+void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
+  Label gc_required;
+  Label allocated;
+
+  const int instance_size = 5 * kPointerSize;
+  DCHECK_EQ(isolate()->native_context()->iterator_result_map()->instance_size(),
+            instance_size);
+
+  __ Allocate(instance_size, v0, a2, a3, &gc_required, TAG_OBJECT);
+  __ jmp(&allocated);
+
+  __ bind(&gc_required);
+  __ Push(Smi::FromInt(instance_size));
+  __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+  __ ld(context_register(),
+        MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+  __ bind(&allocated);
+  __ ld(a1, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
+  __ ld(a1, FieldMemOperand(a1, GlobalObject::kNativeContextOffset));
+  __ ld(a1, ContextOperand(a1, Context::ITERATOR_RESULT_MAP_INDEX));
+  __ pop(a2);
+  __ li(a3, Operand(isolate()->factory()->ToBoolean(done)));
+  __ li(a4, Operand(isolate()->factory()->empty_fixed_array()));
+  __ sd(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
+  __ sd(a4, FieldMemOperand(v0, JSObject::kPropertiesOffset));
+  __ sd(a4, FieldMemOperand(v0, JSObject::kElementsOffset));
+  __ sd(a2,
+        FieldMemOperand(v0, JSGeneratorObject::kResultValuePropertyOffset));
+  __ sd(a3,
+        FieldMemOperand(v0, JSGeneratorObject::kResultDonePropertyOffset));
+
+  // Only the value field needs a write barrier, as the other values are in the
+  // root set.
+  __ RecordWriteField(v0, JSGeneratorObject::kResultValuePropertyOffset,
+                      a2, a3, kRAHasBeenSaved, kDontSaveFPRegs);
+}
+
+
+void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
+  SetSourcePosition(prop->position());
+  Literal* key = prop->key()->AsLiteral();
+  DCHECK(!prop->IsSuperAccess());
+
+  __ li(LoadDescriptor::NameRegister(), Operand(key->value()));
+  if (FLAG_vector_ics) {
+    __ li(VectorLoadICDescriptor::SlotRegister(),
+          Operand(Smi::FromInt(prop->PropertyFeedbackSlot())));
+    CallLoadIC(NOT_CONTEXTUAL);
+  } else {
+    CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId());
+  }
+}
+
+
+void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
+  SetSourcePosition(prop->position());
+  Literal* key = prop->key()->AsLiteral();
+  DCHECK(!key->value()->IsSmi());
+  DCHECK(prop->IsSuperAccess());
+
+  SuperReference* super_ref = prop->obj()->AsSuperReference();
+  EmitLoadHomeObject(super_ref);
+  __ Push(v0);
+  VisitForStackValue(super_ref->this_var());
+  __ Push(key->value());
+  __ CallRuntime(Runtime::kLoadFromSuper, 3);
+}
+
+
+void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
+  SetSourcePosition(prop->position());
+  // Call keyed load IC. It has register arguments receiver and key.
+  Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
+  if (FLAG_vector_ics) {
+    __ li(VectorLoadICDescriptor::SlotRegister(),
+          Operand(Smi::FromInt(prop->PropertyFeedbackSlot())));
+    CallIC(ic);
+  } else {
+    CallIC(ic, prop->PropertyFeedbackId());
+  }
+}
+
+
+void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
+                                              Token::Value op,
+                                              OverwriteMode mode,
+                                              Expression* left_expr,
+                                              Expression* right_expr) {
+  Label done, smi_case, stub_call;
+
+  Register scratch1 = a2;
+  Register scratch2 = a3;
+
+  // Get the arguments.
+  Register left = a1;
+  Register right = a0;
+  __ pop(left);
+  __ mov(a0, result_register());
+
+  // Perform combined smi check on both operands.
+  __ Or(scratch1, left, Operand(right));
+  STATIC_ASSERT(kSmiTag == 0);
+  JumpPatchSite patch_site(masm_);
+  patch_site.EmitJumpIfSmi(scratch1, &smi_case);
+
+  __ bind(&stub_call);
+  Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op, mode).code();
+  CallIC(code, expr->BinaryOperationFeedbackId());
+  patch_site.EmitPatchInfo();
+  __ jmp(&done);
+
+  __ bind(&smi_case);
+  // Smi case. This code works the same way as the smi-smi case in the type
+  // recording binary operation stub, see
+  switch (op) {
+    case Token::SAR:
+      __ GetLeastBitsFromSmi(scratch1, right, 5);
+      __ dsrav(right, left, scratch1);
+      __ And(v0, right, Operand(0xffffffff00000000L));
+      break;
+    case Token::SHL: {
+      __ SmiUntag(scratch1, left);
+      __ GetLeastBitsFromSmi(scratch2, right, 5);
+      __ dsllv(scratch1, scratch1, scratch2);
+      __ SmiTag(v0, scratch1);
+      break;
+    }
+    case Token::SHR: {
+      __ SmiUntag(scratch1, left);
+      __ GetLeastBitsFromSmi(scratch2, right, 5);
+      __ dsrlv(scratch1, scratch1, scratch2);
+      __ And(scratch2, scratch1, 0x80000000);
+      __ Branch(&stub_call, ne, scratch2, Operand(zero_reg));
+      __ SmiTag(v0, scratch1);
+      break;
+    }
+    case Token::ADD:
+      __ AdduAndCheckForOverflow(v0, left, right, scratch1);
+      __ BranchOnOverflow(&stub_call, scratch1);
+      break;
+    case Token::SUB:
+      __ SubuAndCheckForOverflow(v0, left, right, scratch1);
+      __ BranchOnOverflow(&stub_call, scratch1);
+      break;
+    case Token::MUL: {
+      __ Dmulh(v0, left, right);
+      __ dsra32(scratch2, v0, 0);
+      __ sra(scratch1, v0, 31);
+      __ Branch(USE_DELAY_SLOT, &stub_call, ne, scratch2, Operand(scratch1));
+      __ SmiTag(v0);
+      __ Branch(USE_DELAY_SLOT, &done, ne, v0, Operand(zero_reg));
+      __ Daddu(scratch2, right, left);
+      __ Branch(&stub_call, lt, scratch2, Operand(zero_reg));
+      DCHECK(Smi::FromInt(0) == 0);
+      __ mov(v0, zero_reg);
+      break;
+    }
+    case Token::BIT_OR:
+      __ Or(v0, left, Operand(right));
+      break;
+    case Token::BIT_AND:
+      __ And(v0, left, Operand(right));
+      break;
+    case Token::BIT_XOR:
+      __ Xor(v0, left, Operand(right));
+      break;
+    default:
+      UNREACHABLE();
+  }
+
+  __ bind(&done);
+  context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr,
+                                     Token::Value op,
+                                     OverwriteMode mode) {
+  __ mov(a0, result_register());
+  __ pop(a1);
+  Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op, mode).code();
+  JumpPatchSite patch_site(masm_);    // unbound, signals no inlined smi code.
+  CallIC(code, expr->BinaryOperationFeedbackId());
+  patch_site.EmitPatchInfo();
+  context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitAssignment(Expression* expr) {
+  DCHECK(expr->IsValidReferenceExpression());
+
+  // Left-hand side can only be a property, a global or a (parameter or local)
+  // slot.
+  enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+  LhsKind assign_type = VARIABLE;
+  Property* prop = expr->AsProperty();
+  if (prop != NULL) {
+    assign_type = (prop->key()->IsPropertyName())
+        ? NAMED_PROPERTY
+        : KEYED_PROPERTY;
+  }
+
+  switch (assign_type) {
+    case VARIABLE: {
+      Variable* var = expr->AsVariableProxy()->var();
+      EffectContext context(this);
+      EmitVariableAssignment(var, Token::ASSIGN);
+      break;
+    }
+    case NAMED_PROPERTY: {
+      __ push(result_register());  // Preserve value.
+      VisitForAccumulatorValue(prop->obj());
+      __ mov(StoreDescriptor::ReceiverRegister(), result_register());
+      __ pop(StoreDescriptor::ValueRegister());  // Restore value.
+      __ li(StoreDescriptor::NameRegister(),
+            Operand(prop->key()->AsLiteral()->value()));
+      CallStoreIC();
+      break;
+    }
+    case KEYED_PROPERTY: {
+      __ push(result_register());  // Preserve value.
+      VisitForStackValue(prop->obj());
+      VisitForAccumulatorValue(prop->key());
+      __ Move(StoreDescriptor::NameRegister(), result_register());
+      __ Pop(StoreDescriptor::ValueRegister(),
+             StoreDescriptor::ReceiverRegister());
+      Handle<Code> ic =
+          CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
+      CallIC(ic);
+      break;
+    }
+  }
+  context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
+    Variable* var, MemOperand location) {
+  __ sd(result_register(), location);
+  if (var->IsContextSlot()) {
+    // RecordWrite may destroy all its register arguments.
+    __ Move(a3, result_register());
+    int offset = Context::SlotOffset(var->index());
+    __ RecordWriteContextSlot(
+        a1, offset, a3, a2, kRAHasBeenSaved, kDontSaveFPRegs);
+  }
+}
+
+
+void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
+  if (var->IsUnallocated()) {
+    // Global var, const, or let.
+    __ mov(StoreDescriptor::ValueRegister(), result_register());
+    __ li(StoreDescriptor::NameRegister(), Operand(var->name()));
+    __ ld(StoreDescriptor::ReceiverRegister(), GlobalObjectOperand());
+    CallStoreIC();
+  } else if (op == Token::INIT_CONST_LEGACY) {
+    // Const initializers need a write barrier.
+    DCHECK(!var->IsParameter());  // No const parameters.
+    if (var->IsLookupSlot()) {
+      __ li(a0, Operand(var->name()));
+      __ Push(v0, cp, a0);  // Context and name.
+      __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3);
+    } else {
+      DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+      Label skip;
+      MemOperand location = VarOperand(var, a1);
+      __ ld(a2, location);
+      __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+      __ Branch(&skip, ne, a2, Operand(at));
+      EmitStoreToStackLocalOrContextSlot(var, location);
+      __ bind(&skip);
+    }
+
+  } else if (var->mode() == LET && op != Token::INIT_LET) {
+    // Non-initializing assignment to let variable needs a write barrier.
+    DCHECK(!var->IsLookupSlot());
+    DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+    Label assign;
+    MemOperand location = VarOperand(var, a1);
+    __ ld(a3, location);
+    __ LoadRoot(a4, Heap::kTheHoleValueRootIndex);
+    __ Branch(&assign, ne, a3, Operand(a4));
+    __ li(a3, Operand(var->name()));
+    __ push(a3);
+    __ CallRuntime(Runtime::kThrowReferenceError, 1);
+    // Perform the assignment.
+    __ bind(&assign);
+    EmitStoreToStackLocalOrContextSlot(var, location);
+
+  } else if (!var->is_const_mode() || op == Token::INIT_CONST) {
+    if (var->IsLookupSlot()) {
+      // Assignment to var.
+      __ li(a4, Operand(var->name()));
+      __ li(a3, Operand(Smi::FromInt(strict_mode())));
+      // jssp[0]  : mode.
+      // jssp[8]  : name.
+      // jssp[16] : context.
+      // jssp[24] : value.
+      __ Push(v0, cp, a4, a3);
+      __ CallRuntime(Runtime::kStoreLookupSlot, 4);
+    } else {
+      // Assignment to var or initializing assignment to let/const in harmony
+      // mode.
+      DCHECK((var->IsStackAllocated() || var->IsContextSlot()));
+      MemOperand location = VarOperand(var, a1);
+      if (generate_debug_code_ && op == Token::INIT_LET) {
+        // Check for an uninitialized let binding.
+        __ ld(a2, location);
+        __ LoadRoot(a4, Heap::kTheHoleValueRootIndex);
+        __ Check(eq, kLetBindingReInitialization, a2, Operand(a4));
+      }
+      EmitStoreToStackLocalOrContextSlot(var, location);
+    }
+  }
+  // Non-initializing assignments to consts are ignored.
+}
+
+
+void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
+  // Assignment to a property, using a named store IC.
+  Property* prop = expr->target()->AsProperty();
+  DCHECK(prop != NULL);
+  DCHECK(prop->key()->IsLiteral());
+
+  // Record source code position before IC call.
+  SetSourcePosition(expr->position());
+  __ mov(StoreDescriptor::ValueRegister(), result_register());
+  __ li(StoreDescriptor::NameRegister(),
+        Operand(prop->key()->AsLiteral()->value()));
+  __ pop(StoreDescriptor::ReceiverRegister());
+  CallStoreIC(expr->AssignmentFeedbackId());
+
+  PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+  context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
+  // Assignment to a property, using a keyed store IC.
+
+  // Record source code position before IC call.
+  SetSourcePosition(expr->position());
+  // Call keyed store IC.
+  // The arguments are:
+  // - a0 is the value,
+  // - a1 is the key,
+  // - a2 is the receiver.
+  __ mov(StoreDescriptor::ValueRegister(), result_register());
+  __ Pop(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister());
+  DCHECK(StoreDescriptor::ValueRegister().is(a0));
+
+  Handle<Code> ic = CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
+  CallIC(ic, expr->AssignmentFeedbackId());
+
+  PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+  context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::VisitProperty(Property* expr) {
+  Comment cmnt(masm_, "[ Property");
+  Expression* key = expr->key();
+
+  if (key->IsPropertyName()) {
+    if (!expr->IsSuperAccess()) {
+      VisitForAccumulatorValue(expr->obj());
+      __ Move(LoadDescriptor::ReceiverRegister(), v0);
+      EmitNamedPropertyLoad(expr);
+    } else {
+      EmitNamedSuperPropertyLoad(expr);
+    }
+    PrepareForBailoutForId(expr->LoadId(), TOS_REG);
+    context()->Plug(v0);
+  } else {
+    VisitForStackValue(expr->obj());
+    VisitForAccumulatorValue(expr->key());
+    __ Move(LoadDescriptor::NameRegister(), v0);
+    __ pop(LoadDescriptor::ReceiverRegister());
+    EmitKeyedPropertyLoad(expr);
+    context()->Plug(v0);
+  }
+}
+
+
+void FullCodeGenerator::CallIC(Handle<Code> code,
+                               TypeFeedbackId id) {
+  ic_total_count_++;
+  __ Call(code, RelocInfo::CODE_TARGET, id);
+}
+
+
+// Code common for calls using the IC.
+void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
+  Expression* callee = expr->expression();
+
+  CallICState::CallType call_type =
+      callee->IsVariableProxy() ? CallICState::FUNCTION : CallICState::METHOD;
+
+  // Get the target function.
+  if (call_type == CallICState::FUNCTION) {
+    { StackValueContext context(this);
+      EmitVariableLoad(callee->AsVariableProxy());
+      PrepareForBailout(callee, NO_REGISTERS);
+    }
+    // Push undefined as receiver. This is patched in the method prologue if it
+    // is a sloppy mode method.
+    __ Push(isolate()->factory()->undefined_value());
+  } else {
+    // Load the function from the receiver.
+    DCHECK(callee->IsProperty());
+    DCHECK(!callee->AsProperty()->IsSuperAccess());
+    __ ld(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
+    EmitNamedPropertyLoad(callee->AsProperty());
+    PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+    // Push the target function under the receiver.
+    __ ld(at, MemOperand(sp, 0));
+    __ push(at);
+    __ sd(v0, MemOperand(sp, kPointerSize));
+  }
+
+  EmitCall(expr, call_type);
+}
+
+
+void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
+  Expression* callee = expr->expression();
+  DCHECK(callee->IsProperty());
+  Property* prop = callee->AsProperty();
+  DCHECK(prop->IsSuperAccess());
+
+  SetSourcePosition(prop->position());
+  Literal* key = prop->key()->AsLiteral();
+  DCHECK(!key->value()->IsSmi());
+  // Load the function from the receiver.
+  const Register scratch = a1;
+  SuperReference* super_ref = prop->obj()->AsSuperReference();
+  EmitLoadHomeObject(super_ref);
+  __ Push(v0);
+  VisitForAccumulatorValue(super_ref->this_var());
+  __ Push(v0);
+  __ ld(scratch, MemOperand(sp, kPointerSize));
+  __ Push(scratch, v0);
+  __ Push(key->value());
+
+  // Stack here:
+  //  - home_object
+  //  - this (receiver)
+  //  - home_object <-- LoadFromSuper will pop here and below.
+  //  - this (receiver)
+  //  - key
+  __ CallRuntime(Runtime::kLoadFromSuper, 3);
+
+  // Replace home_object with target function.
+  __ sd(v0, MemOperand(sp, kPointerSize));
+
+  // Stack here:
+  // - target function
+  // - this (receiver)
+  EmitCall(expr, CallICState::METHOD);
+}
+
+
+// Code common for calls using the IC.
+void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
+                                                Expression* key) {
+  // Load the key.
+  VisitForAccumulatorValue(key);
+
+  Expression* callee = expr->expression();
+
+  // Load the function from the receiver.
+  DCHECK(callee->IsProperty());
+  __ ld(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
+  __ Move(LoadDescriptor::NameRegister(), v0);
+  EmitKeyedPropertyLoad(callee->AsProperty());
+  PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+
+  // Push the target function under the receiver.
+  __ ld(at, MemOperand(sp, 0));
+  __ push(at);
+  __ sd(v0, MemOperand(sp, kPointerSize));
+
+  EmitCall(expr, CallICState::METHOD);
+}
+
+
+void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
+  // Load the arguments.
+  ZoneList<Expression*>* args = expr->arguments();
+  int arg_count = args->length();
+  { PreservePositionScope scope(masm()->positions_recorder());
+    for (int i = 0; i < arg_count; i++) {
+      VisitForStackValue(args->at(i));
+    }
+  }
+
+  // Record source position of the IC call.
+  SetSourcePosition(expr->position());
+  Handle<Code> ic = CallIC::initialize_stub(
+      isolate(), arg_count, call_type);
+  __ li(a3, Operand(Smi::FromInt(expr->CallFeedbackSlot())));
+  __ ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
+  // Don't assign a type feedback id to the IC, since type feedback is provided
+  // by the vector above.
+  CallIC(ic);
+  RecordJSReturnSite(expr);
+  // Restore context register.
+  __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  context()->DropAndPlug(1, v0);
+}
+
+
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
+  // a7: copy of the first argument or undefined if it doesn't exist.
+  if (arg_count > 0) {
+    __ ld(a7, MemOperand(sp, arg_count * kPointerSize));
+  } else {
+    __ LoadRoot(a7, Heap::kUndefinedValueRootIndex);
+  }
+
+  // a6: the receiver of the enclosing function.
+  __ ld(a6, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+
+  // a5: the receiver of the enclosing function.
+  int receiver_offset = 2 + info_->scope()->num_parameters();
+  __ ld(a5, MemOperand(fp, receiver_offset * kPointerSize));
+
+  // a4: the strict mode.
+  __ li(a4, Operand(Smi::FromInt(strict_mode())));
+
+  // a1: the start position of the scope the calls resides in.
+  __ li(a1, Operand(Smi::FromInt(scope()->start_position())));
+
+  // Do the runtime call.
+  __ Push(a7);
+  __ Push(a6, a5, a4, a1);
+  __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 6);
+}
+
+
+void FullCodeGenerator::VisitCall(Call* expr) {
+#ifdef DEBUG
+  // We want to verify that RecordJSReturnSite gets called on all paths
+  // through this function.  Avoid early returns.
+  expr->return_is_recorded_ = false;
+#endif
+
+  Comment cmnt(masm_, "[ Call");
+  Expression* callee = expr->expression();
+  Call::CallType call_type = expr->GetCallType(isolate());
+
+  if (call_type == Call::POSSIBLY_EVAL_CALL) {
+    // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
+    // to resolve the function we need to call and the receiver of the
+    // call.  Then we call the resolved function using the given
+    // arguments.
+    ZoneList<Expression*>* args = expr->arguments();
+    int arg_count = args->length();
+
+    { PreservePositionScope pos_scope(masm()->positions_recorder());
+      VisitForStackValue(callee);
+      __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+      __ push(a2);  // Reserved receiver slot.
+
+      // Push the arguments.
+      for (int i = 0; i < arg_count; i++) {
+        VisitForStackValue(args->at(i));
+      }
+
+      // Push a copy of the function (found below the arguments) and
+      // resolve eval.
+      __ ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
+      __ push(a1);
+      EmitResolvePossiblyDirectEval(arg_count);
+
+      // The runtime call returns a pair of values in v0 (function) and
+      // v1 (receiver). Touch up the stack with the right values.
+      __ sd(v0, MemOperand(sp, (arg_count + 1) * kPointerSize));
+      __ sd(v1, MemOperand(sp, arg_count * kPointerSize));
+    }
+    // Record source position for debugger.
+    SetSourcePosition(expr->position());
+    CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
+    __ ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
+    __ CallStub(&stub);
+    RecordJSReturnSite(expr);
+    // Restore context register.
+    __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+    context()->DropAndPlug(1, v0);
+  } else if (call_type == Call::GLOBAL_CALL) {
+    EmitCallWithLoadIC(expr);
+  } else if (call_type == Call::LOOKUP_SLOT_CALL) {
+    // Call to a lookup slot (dynamically introduced variable).
+    VariableProxy* proxy = callee->AsVariableProxy();
+    Label slow, done;
+
+    { PreservePositionScope scope(masm()->positions_recorder());
+      // Generate code for loading from variables potentially shadowed
+      // by eval-introduced variables.
+      EmitDynamicLookupFastCase(proxy, NOT_INSIDE_TYPEOF, &slow, &done);
+    }
+
+    __ bind(&slow);
+    // Call the runtime to find the function to call (returned in v0)
+    // and the object holding it (returned in v1).
+    DCHECK(!context_register().is(a2));
+    __ li(a2, Operand(proxy->name()));
+    __ Push(context_register(), a2);
+    __ CallRuntime(Runtime::kLoadLookupSlot, 2);
+    __ Push(v0, v1);  // Function, receiver.
+
+    // If fast case code has been generated, emit code to push the
+    // function and receiver and have the slow path jump around this
+    // code.
+    if (done.is_linked()) {
+      Label call;
+      __ Branch(&call);
+      __ bind(&done);
+      // Push function.
+      __ push(v0);
+      // The receiver is implicitly the global receiver. Indicate this
+      // by passing the hole to the call function stub.
+      __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
+      __ push(a1);
+      __ bind(&call);
+    }
+
+    // The receiver is either the global receiver or an object found
+    // by LoadContextSlot.
+    EmitCall(expr);
+  } else if (call_type == Call::PROPERTY_CALL) {
+    Property* property = callee->AsProperty();
+    bool is_named_call = property->key()->IsPropertyName();
+    // super.x() is handled in EmitCallWithLoadIC.
+    if (property->IsSuperAccess() && is_named_call) {
+      EmitSuperCallWithLoadIC(expr);
+    } else {
+      {
+        PreservePositionScope scope(masm()->positions_recorder());
+        VisitForStackValue(property->obj());
+      }
+      if (is_named_call) {
+        EmitCallWithLoadIC(expr);
+      } else {
+        EmitKeyedCallWithLoadIC(expr, property->key());
+      }
+    }
+  } else {
+    DCHECK(call_type == Call::OTHER_CALL);
+    // Call to an arbitrary expression not handled specially above.
+    { PreservePositionScope scope(masm()->positions_recorder());
+      VisitForStackValue(callee);
+    }
+    __ LoadRoot(a1, Heap::kUndefinedValueRootIndex);
+    __ push(a1);
+    // Emit function call.
+    EmitCall(expr);
+  }
+
+#ifdef DEBUG
+  // RecordJSReturnSite should have been called.
+  DCHECK(expr->return_is_recorded_);
+#endif
+}
+
+
+void FullCodeGenerator::VisitCallNew(CallNew* expr) {
+  Comment cmnt(masm_, "[ CallNew");
+  // According to ECMA-262, section 11.2.2, page 44, the function
+  // expression in new calls must be evaluated before the
+  // arguments.
+
+  // Push constructor on the stack.  If it's not a function it's used as
+  // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
+  // ignored.
+  VisitForStackValue(expr->expression());
+
+  // Push the arguments ("left-to-right") on the stack.
+  ZoneList<Expression*>* args = expr->arguments();
+  int arg_count = args->length();
+  for (int i = 0; i < arg_count; i++) {
+    VisitForStackValue(args->at(i));
+  }
+  // Call the construct call builtin that handles allocation and
+  // constructor invocation.
+  SetSourcePosition(expr->position());
+
+  // Load function and argument count into a1 and a0.
+  __ li(a0, Operand(arg_count));
+  __ ld(a1, MemOperand(sp, arg_count * kPointerSize));
+
+  // Record call targets in unoptimized code.
+  if (FLAG_pretenuring_call_new) {
+    EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot());
+    DCHECK(expr->AllocationSiteFeedbackSlot() ==
+           expr->CallNewFeedbackSlot() + 1);
+  }
+
+  __ li(a2, FeedbackVector());
+  __ li(a3, Operand(Smi::FromInt(expr->CallNewFeedbackSlot())));
+
+  CallConstructStub stub(isolate(), RECORD_CONSTRUCTOR_TARGET);
+  __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+  PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
+  context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() == 1);
+
+  VisitForAccumulatorValue(args->at(0));
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  __ SmiTst(v0, a4);
+  Split(eq, a4, Operand(zero_reg), if_true, if_false, fall_through);
+
+  context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() == 1);
+
+  VisitForAccumulatorValue(args->at(0));
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  __ NonNegativeSmiTst(v0, at);
+  Split(eq, at, Operand(zero_reg), if_true, if_false, fall_through);
+
+  context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() == 1);
+
+  VisitForAccumulatorValue(args->at(0));
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
+  __ JumpIfSmi(v0, if_false);
+  __ LoadRoot(at, Heap::kNullValueRootIndex);
+  __ Branch(if_true, eq, v0, Operand(at));
+  __ ld(a2, FieldMemOperand(v0, HeapObject::kMapOffset));
+  // Undetectable objects behave like undefined when tested with typeof.
+  __ lbu(a1, FieldMemOperand(a2, Map::kBitFieldOffset));
+  __ And(at, a1, Operand(1 << Map::kIsUndetectable));
+  __ Branch(if_false, ne, at, Operand(zero_reg));
+  __ lbu(a1, FieldMemOperand(a2, Map::kInstanceTypeOffset));
+  __ Branch(if_false, lt, a1, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  Split(le, a1, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE),
+        if_true, if_false, fall_through);
+
+  context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() == 1);
+
+  VisitForAccumulatorValue(args->at(0));
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
+  __ JumpIfSmi(v0, if_false);
+  __ GetObjectType(v0, a1, a1);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  Split(ge, a1, Operand(FIRST_SPEC_OBJECT_TYPE),
+        if_true, if_false, fall_through);
+
+  context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() == 1);
+
+  VisitForAccumulatorValue(args->at(0));
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
+  __ JumpIfSmi(v0, if_false);
+  __ ld(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
+  __ lbu(a1, FieldMemOperand(a1, Map::kBitFieldOffset));
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  __ And(at, a1, Operand(1 << Map::kIsUndetectable));
+  Split(ne, at, Operand(zero_reg), if_true, if_false, fall_through);
+
+  context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
+    CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() == 1);
+
+  VisitForAccumulatorValue(args->at(0));
+
+  Label materialize_true, materialize_false, skip_lookup;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
+  __ AssertNotSmi(v0);
+
+  __ ld(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
+  __ lbu(a4, FieldMemOperand(a1, Map::kBitField2Offset));
+  __ And(a4, a4, 1 << Map::kStringWrapperSafeForDefaultValueOf);
+  __ Branch(&skip_lookup, ne, a4, Operand(zero_reg));
+
+  // Check for fast case object. Generate false result for slow case object.
+  __ ld(a2, FieldMemOperand(v0, JSObject::kPropertiesOffset));
+  __ ld(a2, FieldMemOperand(a2, HeapObject::kMapOffset));
+  __ LoadRoot(a4, Heap::kHashTableMapRootIndex);
+  __ Branch(if_false, eq, a2, Operand(a4));
+
+  // Look for valueOf name in the descriptor array, and indicate false if
+  // found. Since we omit an enumeration index check, if it is added via a
+  // transition that shares its descriptor array, this is a false positive.
+  Label entry, loop, done;
+
+  // Skip loop if no descriptors are valid.
+  __ NumberOfOwnDescriptors(a3, a1);
+  __ Branch(&done, eq, a3, Operand(zero_reg));
+
+  __ LoadInstanceDescriptors(a1, a4);
+  // a4: descriptor array.
+  // a3: valid entries in the descriptor array.
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize == 1);
+// Does not need?
+// STATIC_ASSERT(kPointerSize == 4);
+  __ li(at, Operand(DescriptorArray::kDescriptorSize));
+  __ Dmul(a3, a3, at);
+  // Calculate location of the first key name.
+  __ Daddu(a4, a4, Operand(DescriptorArray::kFirstOffset - kHeapObjectTag));
+  // Calculate the end of the descriptor array.
+  __ mov(a2, a4);
+  __ dsll(a5, a3, kPointerSizeLog2);
+  __ Daddu(a2, a2, a5);
+
+  // Loop through all the keys in the descriptor array. If one of these is the
+  // string "valueOf" the result is false.
+  // The use of a6 to store the valueOf string assumes that it is not otherwise
+  // used in the loop below.
+  __ li(a6, Operand(isolate()->factory()->value_of_string()));
+  __ jmp(&entry);
+  __ bind(&loop);
+  __ ld(a3, MemOperand(a4, 0));
+  __ Branch(if_false, eq, a3, Operand(a6));
+  __ Daddu(a4, a4, Operand(DescriptorArray::kDescriptorSize * kPointerSize));
+  __ bind(&entry);
+  __ Branch(&loop, ne, a4, Operand(a2));
+
+  __ bind(&done);
+
+  // Set the bit in the map to indicate that there is no local valueOf field.
+  __ lbu(a2, FieldMemOperand(a1, Map::kBitField2Offset));
+  __ Or(a2, a2, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
+  __ sb(a2, FieldMemOperand(a1, Map::kBitField2Offset));
+
+  __ bind(&skip_lookup);
+
+  // If a valueOf property is not found on the object check that its
+  // prototype is the un-modified String prototype. If not result is false.
+  __ ld(a2, FieldMemOperand(a1, Map::kPrototypeOffset));
+  __ JumpIfSmi(a2, if_false);
+  __ ld(a2, FieldMemOperand(a2, HeapObject::kMapOffset));
+  __ ld(a3, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
+  __ ld(a3, FieldMemOperand(a3, GlobalObject::kNativeContextOffset));
+  __ ld(a3, ContextOperand(a3, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  Split(eq, a2, Operand(a3), if_true, if_false, fall_through);
+
+  context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() == 1);
+
+  VisitForAccumulatorValue(args->at(0));
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
+  __ JumpIfSmi(v0, if_false);
+  __ GetObjectType(v0, a1, a2);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  __ Branch(if_true, eq, a2, Operand(JS_FUNCTION_TYPE));
+  __ Branch(if_false);
+
+  context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() == 1);
+
+  VisitForAccumulatorValue(args->at(0));
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
+  __ CheckMap(v0, a1, Heap::kHeapNumberMapRootIndex, if_false, DO_SMI_CHECK);
+  __ lwu(a2, FieldMemOperand(v0, HeapNumber::kExponentOffset));
+  __ lwu(a1, FieldMemOperand(v0, HeapNumber::kMantissaOffset));
+  __ li(a4, 0x80000000);
+  Label not_nan;
+  __ Branch(&not_nan, ne, a2, Operand(a4));
+  __ mov(a4, zero_reg);
+  __ mov(a2, a1);
+  __ bind(&not_nan);
+
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  Split(eq, a2, Operand(a4), if_true, if_false, fall_through);
+
+  context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() == 1);
+
+  VisitForAccumulatorValue(args->at(0));
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
+  __ JumpIfSmi(v0, if_false);
+  __ GetObjectType(v0, a1, a1);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  Split(eq, a1, Operand(JS_ARRAY_TYPE),
+        if_true, if_false, fall_through);
+
+  context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() == 1);
+
+  VisitForAccumulatorValue(args->at(0));
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
+  __ JumpIfSmi(v0, if_false);
+  __ GetObjectType(v0, a1, a1);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  Split(eq, a1, Operand(JS_REGEXP_TYPE), if_true, if_false, fall_through);
+
+  context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
+  DCHECK(expr->arguments()->length() == 0);
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
+  // Get the frame pointer for the calling frame.
+  __ ld(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+
+  // Skip the arguments adaptor frame if it exists.
+  Label check_frame_marker;
+  __ ld(a1, MemOperand(a2, StandardFrameConstants::kContextOffset));
+  __ Branch(&check_frame_marker, ne,
+            a1, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ ld(a2, MemOperand(a2, StandardFrameConstants::kCallerFPOffset));
+
+  // Check the marker in the calling frame.
+  __ bind(&check_frame_marker);
+  __ ld(a1, MemOperand(a2, StandardFrameConstants::kMarkerOffset));
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  Split(eq, a1, Operand(Smi::FromInt(StackFrame::CONSTRUCT)),
+        if_true, if_false, fall_through);
+
+  context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() == 2);
+
+  // Load the two objects into registers and perform the comparison.
+  VisitForStackValue(args->at(0));
+  VisitForAccumulatorValue(args->at(1));
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
+  __ pop(a1);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  Split(eq, v0, Operand(a1), if_true, if_false, fall_through);
+
+  context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() == 1);
+
+  // ArgumentsAccessStub expects the key in a1 and the formal
+  // parameter count in a0.
+  VisitForAccumulatorValue(args->at(0));
+  __ mov(a1, v0);
+  __ li(a0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
+  ArgumentsAccessStub stub(isolate(), ArgumentsAccessStub::READ_ELEMENT);
+  __ CallStub(&stub);
+  context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
+  DCHECK(expr->arguments()->length() == 0);
+  Label exit;
+  // Get the number of formal parameters.
+  __ li(v0, Operand(Smi::FromInt(info_->scope()->num_parameters())));
+
+  // Check if the calling frame is an arguments adaptor frame.
+  __ ld(a2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ ld(a3, MemOperand(a2, StandardFrameConstants::kContextOffset));
+  __ Branch(&exit, ne, a3,
+            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+  // Arguments adaptor case: Read the arguments length from the
+  // adaptor frame.
+  __ ld(v0, MemOperand(a2, ArgumentsAdaptorFrameConstants::kLengthOffset));
+
+  __ bind(&exit);
+  context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() == 1);
+  Label done, null, function, non_function_constructor;
+
+  VisitForAccumulatorValue(args->at(0));
+
+  // If the object is a smi, we return null.
+  __ JumpIfSmi(v0, &null);
+
+  // Check that the object is a JS object but take special care of JS
+  // functions to make sure they have 'Function' as their class.
+  // Assume that there are only two callable types, and one of them is at
+  // either end of the type range for JS object types. Saves extra comparisons.
+  STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+  __ GetObjectType(v0, v0, a1);  // Map is now in v0.
+  __ Branch(&null, lt, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+  STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+                FIRST_SPEC_OBJECT_TYPE + 1);
+  __ Branch(&function, eq, a1, Operand(FIRST_SPEC_OBJECT_TYPE));
+
+  STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+                LAST_SPEC_OBJECT_TYPE - 1);
+  __ Branch(&function, eq, a1, Operand(LAST_SPEC_OBJECT_TYPE));
+  // Assume that there is no larger type.
+  STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
+
+  // Check if the constructor in the map is a JS function.
+  __ ld(v0, FieldMemOperand(v0, Map::kConstructorOffset));
+  __ GetObjectType(v0, a1, a1);
+  __ Branch(&non_function_constructor, ne, a1, Operand(JS_FUNCTION_TYPE));
+
+  // v0 now contains the constructor function. Grab the
+  // instance class name from there.
+  __ ld(v0, FieldMemOperand(v0, JSFunction::kSharedFunctionInfoOffset));
+  __ ld(v0, FieldMemOperand(v0, SharedFunctionInfo::kInstanceClassNameOffset));
+  __ Branch(&done);
+
+  // Functions have class 'Function'.
+  __ bind(&function);
+  __ LoadRoot(v0, Heap::kFunction_stringRootIndex);
+  __ jmp(&done);
+
+  // Objects with a non-function constructor have class 'Object'.
+  __ bind(&non_function_constructor);
+  __ LoadRoot(v0, Heap::kObject_stringRootIndex);
+  __ jmp(&done);
+
+  // Non-JS objects have class null.
+  __ bind(&null);
+  __ LoadRoot(v0, Heap::kNullValueRootIndex);
+
+  // All done.
+  __ bind(&done);
+
+  context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
+  // Load the arguments on the stack and call the stub.
+  SubStringStub stub(isolate());
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() == 3);
+  VisitForStackValue(args->at(0));
+  VisitForStackValue(args->at(1));
+  VisitForStackValue(args->at(2));
+  __ CallStub(&stub);
+  context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
+  // Load the arguments on the stack and call the stub.
+  RegExpExecStub stub(isolate());
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() == 4);
+  VisitForStackValue(args->at(0));
+  VisitForStackValue(args->at(1));
+  VisitForStackValue(args->at(2));
+  VisitForStackValue(args->at(3));
+  __ CallStub(&stub);
+  context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() == 1);
+
+  VisitForAccumulatorValue(args->at(0));  // Load the object.
+
+  Label done;
+  // If the object is a smi return the object.
+  __ JumpIfSmi(v0, &done);
+  // If the object is not a value type, return the object.
+  __ GetObjectType(v0, a1, a1);
+  __ Branch(&done, ne, a1, Operand(JS_VALUE_TYPE));
+
+  __ ld(v0, FieldMemOperand(v0, JSValue::kValueOffset));
+
+  __ bind(&done);
+  context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() == 2);
+  DCHECK_NE(NULL, args->at(1)->AsLiteral());
+  Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value()));
+
+  VisitForAccumulatorValue(args->at(0));  // Load the object.
+
+  Label runtime, done, not_date_object;
+  Register object = v0;
+  Register result = v0;
+  Register scratch0 = t1;
+  Register scratch1 = a1;
+
+  __ JumpIfSmi(object, &not_date_object);
+  __ GetObjectType(object, scratch1, scratch1);
+  __ Branch(&not_date_object, ne, scratch1, Operand(JS_DATE_TYPE));
+
+  if (index->value() == 0) {
+    __ ld(result, FieldMemOperand(object, JSDate::kValueOffset));
+    __ jmp(&done);
+  } else {
+    if (index->value() < JSDate::kFirstUncachedField) {
+      ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
+      __ li(scratch1, Operand(stamp));
+      __ ld(scratch1, MemOperand(scratch1));
+      __ ld(scratch0, FieldMemOperand(object, JSDate::kCacheStampOffset));
+      __ Branch(&runtime, ne, scratch1, Operand(scratch0));
+      __ ld(result, FieldMemOperand(object, JSDate::kValueOffset +
+                                            kPointerSize * index->value()));
+      __ jmp(&done);
+    }
+    __ bind(&runtime);
+    __ PrepareCallCFunction(2, scratch1);
+    __ li(a1, Operand(index));
+    __ Move(a0, object);
+    __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
+    __ jmp(&done);
+  }
+
+  __ bind(&not_date_object);
+  __ CallRuntime(Runtime::kThrowNotDateError, 0);
+  __ bind(&done);
+  context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK_EQ(3, args->length());
+
+  Register string = v0;
+  Register index = a1;
+  Register value = a2;
+
+  VisitForStackValue(args->at(0));        // index
+  VisitForStackValue(args->at(1));        // value
+  VisitForAccumulatorValue(args->at(2));  // string
+  __ Pop(index, value);
+
+  if (FLAG_debug_code) {
+    __ SmiTst(value, at);
+    __ Check(eq, kNonSmiValue, at, Operand(zero_reg));
+    __ SmiTst(index, at);
+    __ Check(eq, kNonSmiIndex, at, Operand(zero_reg));
+    __ SmiUntag(index, index);
+    static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+    Register scratch = t1;
+    __ EmitSeqStringSetCharCheck(
+        string, index, value, scratch, one_byte_seq_type);
+    __ SmiTag(index, index);
+  }
+
+  __ SmiUntag(value, value);
+  __ Daddu(at,
+          string,
+          Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+  __ SmiUntag(index);
+  __ Daddu(at, at, index);
+  __ sb(value, MemOperand(at));
+  context()->Plug(string);
+}
+
+
+void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK_EQ(3, args->length());
+
+  Register string = v0;
+  Register index = a1;
+  Register value = a2;
+
+  VisitForStackValue(args->at(0));        // index
+  VisitForStackValue(args->at(1));        // value
+  VisitForAccumulatorValue(args->at(2));  // string
+  __ Pop(index, value);
+
+  if (FLAG_debug_code) {
+    __ SmiTst(value, at);
+    __ Check(eq, kNonSmiValue, at, Operand(zero_reg));
+    __ SmiTst(index, at);
+    __ Check(eq, kNonSmiIndex, at, Operand(zero_reg));
+    __ SmiUntag(index, index);
+    static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+    Register scratch = t1;
+    __ EmitSeqStringSetCharCheck(
+        string, index, value, scratch, two_byte_seq_type);
+    __ SmiTag(index, index);
+  }
+
+  __ SmiUntag(value, value);
+  __ Daddu(at,
+          string,
+          Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+  __ dsra(index, index, 32 - 1);
+  __ Daddu(at, at, index);
+  STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
+  __ sh(value, MemOperand(at));
+    context()->Plug(string);
+}
+
+
+void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
+  // Load the arguments on the stack and call the runtime function.
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() == 2);
+  VisitForStackValue(args->at(0));
+  VisitForStackValue(args->at(1));
+  MathPowStub stub(isolate(), MathPowStub::ON_STACK);
+  __ CallStub(&stub);
+  context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() == 2);
+
+  VisitForStackValue(args->at(0));  // Load the object.
+  VisitForAccumulatorValue(args->at(1));  // Load the value.
+  __ pop(a1);  // v0 = value. a1 = object.
+
+  Label done;
+  // If the object is a smi, return the value.
+  __ JumpIfSmi(a1, &done);
+
+  // If the object is not a value type, return the value.
+  __ GetObjectType(a1, a2, a2);
+  __ Branch(&done, ne, a2, Operand(JS_VALUE_TYPE));
+
+  // Store the value.
+  __ sd(v0, FieldMemOperand(a1, JSValue::kValueOffset));
+  // Update the write barrier.  Save the value as it will be
+  // overwritten by the write barrier code and is needed afterward.
+  __ mov(a2, v0);
+  __ RecordWriteField(
+      a1, JSValue::kValueOffset, a2, a3, kRAHasBeenSaved, kDontSaveFPRegs);
+
+  __ bind(&done);
+  context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK_EQ(args->length(), 1);
+
+  // Load the argument into a0 and call the stub.
+  VisitForAccumulatorValue(args->at(0));
+  __ mov(a0, result_register());
+
+  NumberToStringStub stub(isolate());
+  __ CallStub(&stub);
+  context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() == 1);
+
+  VisitForAccumulatorValue(args->at(0));
+
+  Label done;
+  StringCharFromCodeGenerator generator(v0, a1);
+  generator.GenerateFast(masm_);
+  __ jmp(&done);
+
+  NopRuntimeCallHelper call_helper;
+  generator.GenerateSlow(masm_, call_helper);
+
+  __ bind(&done);
+  context()->Plug(a1);
+}
+
+
+void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() == 2);
+
+  VisitForStackValue(args->at(0));
+  VisitForAccumulatorValue(args->at(1));
+  __ mov(a0, result_register());
+
+  Register object = a1;
+  Register index = a0;
+  Register result = v0;
+
+  __ pop(object);
+
+  Label need_conversion;
+  Label index_out_of_range;
+  Label done;
+  StringCharCodeAtGenerator generator(object,
+                                      index,
+                                      result,
+                                      &need_conversion,
+                                      &need_conversion,
+                                      &index_out_of_range,
+                                      STRING_INDEX_IS_NUMBER);
+  generator.GenerateFast(masm_);
+  __ jmp(&done);
+
+  __ bind(&index_out_of_range);
+  // When the index is out of range, the spec requires us to return
+  // NaN.
+  __ LoadRoot(result, Heap::kNanValueRootIndex);
+  __ jmp(&done);
+
+  __ bind(&need_conversion);
+  // Load the undefined value into the result register, which will
+  // trigger conversion.
+  __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+  __ jmp(&done);
+
+  NopRuntimeCallHelper call_helper;
+  generator.GenerateSlow(masm_, call_helper);
+
+  __ bind(&done);
+  context()->Plug(result);
+}
+
+
+void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() == 2);
+
+  VisitForStackValue(args->at(0));
+  VisitForAccumulatorValue(args->at(1));
+  __ mov(a0, result_register());
+
+  Register object = a1;
+  Register index = a0;
+  Register scratch = a3;
+  Register result = v0;
+
+  __ pop(object);
+
+  Label need_conversion;
+  Label index_out_of_range;
+  Label done;
+  StringCharAtGenerator generator(object,
+                                  index,
+                                  scratch,
+                                  result,
+                                  &need_conversion,
+                                  &need_conversion,
+                                  &index_out_of_range,
+                                  STRING_INDEX_IS_NUMBER);
+  generator.GenerateFast(masm_);
+  __ jmp(&done);
+
+  __ bind(&index_out_of_range);
+  // When the index is out of range, the spec requires us to return
+  // the empty string.
+  __ LoadRoot(result, Heap::kempty_stringRootIndex);
+  __ jmp(&done);
+
+  __ bind(&need_conversion);
+  // Move smi zero into the result register, which will trigger
+  // conversion.
+  __ li(result, Operand(Smi::FromInt(0)));
+  __ jmp(&done);
+
+  NopRuntimeCallHelper call_helper;
+  generator.GenerateSlow(masm_, call_helper);
+
+  __ bind(&done);
+  context()->Plug(result);
+}
+
+
+void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK_EQ(2, args->length());
+  VisitForStackValue(args->at(0));
+  VisitForAccumulatorValue(args->at(1));
+
+  __ pop(a1);
+  __ mov(a0, result_register());  // StringAddStub requires args in a0, a1.
+  StringAddStub stub(isolate(), STRING_ADD_CHECK_BOTH, NOT_TENURED);
+  __ CallStub(&stub);
+  context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK_EQ(2, args->length());
+
+  VisitForStackValue(args->at(0));
+  VisitForStackValue(args->at(1));
+
+  StringCompareStub stub(isolate());
+  __ CallStub(&stub);
+  context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() >= 2);
+
+  int arg_count = args->length() - 2;  // 2 ~ receiver and function.
+  for (int i = 0; i < arg_count + 1; i++) {
+    VisitForStackValue(args->at(i));
+  }
+  VisitForAccumulatorValue(args->last());  // Function.
+
+  Label runtime, done;
+  // Check for non-function argument (including proxy).
+  __ JumpIfSmi(v0, &runtime);
+  __ GetObjectType(v0, a1, a1);
+  __ Branch(&runtime, ne, a1, Operand(JS_FUNCTION_TYPE));
+
+  // InvokeFunction requires the function in a1. Move it in there.
+  __ mov(a1, result_register());
+  ParameterCount count(arg_count);
+  __ InvokeFunction(a1, count, CALL_FUNCTION, NullCallWrapper());
+  __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  __ jmp(&done);
+
+  __ bind(&runtime);
+  __ push(v0);
+  __ CallRuntime(Runtime::kCall, args->length());
+  __ bind(&done);
+
+  context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
+  RegExpConstructResultStub stub(isolate());
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() == 3);
+  VisitForStackValue(args->at(0));
+  VisitForStackValue(args->at(1));
+  VisitForAccumulatorValue(args->at(2));
+  __ mov(a0, result_register());
+  __ pop(a1);
+  __ pop(a2);
+  __ CallStub(&stub);
+  context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK_EQ(2, args->length());
+
+  DCHECK_NE(NULL, args->at(0)->AsLiteral());
+  int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value();
+
+  Handle<FixedArray> jsfunction_result_caches(
+      isolate()->native_context()->jsfunction_result_caches());
+  if (jsfunction_result_caches->length() <= cache_id) {
+    __ Abort(kAttemptToUseUndefinedCache);
+    __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
+    context()->Plug(v0);
+    return;
+  }
+
+  VisitForAccumulatorValue(args->at(1));
+
+  Register key = v0;
+  Register cache = a1;
+  __ ld(cache, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
+  __ ld(cache, FieldMemOperand(cache, GlobalObject::kNativeContextOffset));
+  __ ld(cache,
+         ContextOperand(
+             cache, Context::JSFUNCTION_RESULT_CACHES_INDEX));
+  __ ld(cache,
+         FieldMemOperand(cache, FixedArray::OffsetOfElementAt(cache_id)));
+
+
+  Label done, not_found;
+  STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
+  __ ld(a2, FieldMemOperand(cache, JSFunctionResultCache::kFingerOffset));
+  // a2 now holds finger offset as a smi.
+  __ Daddu(a3, cache, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  // a3 now points to the start of fixed array elements.
+  __ SmiScale(at, a2, kPointerSizeLog2);
+  __ daddu(a3, a3, at);
+  // a3 now points to key of indexed element of cache.
+  __ ld(a2, MemOperand(a3));
+  __ Branch(&not_found, ne, key, Operand(a2));
+
+  __ ld(v0, MemOperand(a3, kPointerSize));
+  __ Branch(&done);
+
+  __ bind(&not_found);
+  // Call runtime to perform the lookup.
+  __ Push(cache, key);
+  __ CallRuntime(Runtime::kGetFromCache, 2);
+
+  __ bind(&done);
+  context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  VisitForAccumulatorValue(args->at(0));
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
+  __ lwu(a0, FieldMemOperand(v0, String::kHashFieldOffset));
+  __ And(a0, a0, Operand(String::kContainsCachedArrayIndexMask));
+
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  Split(eq, a0, Operand(zero_reg), if_true, if_false, fall_through);
+
+  context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() == 1);
+  VisitForAccumulatorValue(args->at(0));
+
+  __ AssertString(v0);
+
+  __ lwu(v0, FieldMemOperand(v0, String::kHashFieldOffset));
+  __ IndexFromHash(v0, v0);
+
+  context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
+  Label bailout, done, one_char_separator, long_separator,
+      non_trivial_array, not_size_one_array, loop,
+      empty_separator_loop, one_char_separator_loop,
+      one_char_separator_loop_entry, long_separator_loop;
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() == 2);
+  VisitForStackValue(args->at(1));
+  VisitForAccumulatorValue(args->at(0));
+
+  // All aliases of the same register have disjoint lifetimes.
+  Register array = v0;
+  Register elements = no_reg;  // Will be v0.
+  Register result = no_reg;  // Will be v0.
+  Register separator = a1;
+  Register array_length = a2;
+  Register result_pos = no_reg;  // Will be a2.
+  Register string_length = a3;
+  Register string = a4;
+  Register element = a5;
+  Register elements_end = a6;
+  Register scratch1 = a7;
+  Register scratch2 = t1;
+  Register scratch3 = t0;
+
+  // Separator operand is on the stack.
+  __ pop(separator);
+
+  // Check that the array is a JSArray.
+  __ JumpIfSmi(array, &bailout);
+  __ GetObjectType(array, scratch1, scratch2);
+  __ Branch(&bailout, ne, scratch2, Operand(JS_ARRAY_TYPE));
+
+  // Check that the array has fast elements.
+  __ CheckFastElements(scratch1, scratch2, &bailout);
+
+  // If the array has length zero, return the empty string.
+  __ ld(array_length, FieldMemOperand(array, JSArray::kLengthOffset));
+  __ SmiUntag(array_length);
+  __ Branch(&non_trivial_array, ne, array_length, Operand(zero_reg));
+  __ LoadRoot(v0, Heap::kempty_stringRootIndex);
+  __ Branch(&done);
+
+  __ bind(&non_trivial_array);
+
+  // Get the FixedArray containing array's elements.
+  elements = array;
+  __ ld(elements, FieldMemOperand(array, JSArray::kElementsOffset));
+  array = no_reg;  // End of array's live range.
+
+  // Check that all array elements are sequential one-byte strings, and
+  // accumulate the sum of their lengths, as a smi-encoded value.
+  __ mov(string_length, zero_reg);
+  __ Daddu(element,
+          elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ dsll(elements_end, array_length, kPointerSizeLog2);
+  __ Daddu(elements_end, element, elements_end);
+  // Loop condition: while (element < elements_end).
+  // Live values in registers:
+  //   elements: Fixed array of strings.
+  //   array_length: Length of the fixed array of strings (not smi)
+  //   separator: Separator string
+  //   string_length: Accumulated sum of string lengths (smi).
+  //   element: Current array element.
+  //   elements_end: Array end.
+  if (generate_debug_code_) {
+    __ Assert(gt, kNoEmptyArraysHereInEmitFastOneByteArrayJoin, array_length,
+              Operand(zero_reg));
+  }
+  __ bind(&loop);
+  __ ld(string, MemOperand(element));
+  __ Daddu(element, element, kPointerSize);
+  __ JumpIfSmi(string, &bailout);
+  __ ld(scratch1, FieldMemOperand(string, HeapObject::kMapOffset));
+  __ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+  __ JumpIfInstanceTypeIsNotSequentialOneByte(scratch1, scratch2, &bailout);
+  __ ld(scratch1, FieldMemOperand(string, SeqOneByteString::kLengthOffset));
+  __ AdduAndCheckForOverflow(string_length, string_length, scratch1, scratch3);
+  __ BranchOnOverflow(&bailout, scratch3);
+  __ Branch(&loop, lt, element, Operand(elements_end));
+
+  // If array_length is 1, return elements[0], a string.
+  __ Branch(&not_size_one_array, ne, array_length, Operand(1));
+  __ ld(v0, FieldMemOperand(elements, FixedArray::kHeaderSize));
+  __ Branch(&done);
+
+  __ bind(&not_size_one_array);
+
+  // Live values in registers:
+  //   separator: Separator string
+  //   array_length: Length of the array.
+  //   string_length: Sum of string lengths (smi).
+  //   elements: FixedArray of strings.
+
+  // Check that the separator is a flat one-byte string.
+  __ JumpIfSmi(separator, &bailout);
+  __ ld(scratch1, FieldMemOperand(separator, HeapObject::kMapOffset));
+  __ lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+  __ JumpIfInstanceTypeIsNotSequentialOneByte(scratch1, scratch2, &bailout);
+
+  // Add (separator length times array_length) - separator length to the
+  // string_length to get the length of the result string. array_length is not
+  // smi but the other values are, so the result is a smi.
+  __ ld(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
+  __ Dsubu(string_length, string_length, Operand(scratch1));
+  __ SmiUntag(scratch1);
+  __ Dmul(scratch2, array_length, scratch1);
+  // Check for smi overflow. No overflow if higher 33 bits of 64-bit result are
+  // zero.
+  __ dsra32(scratch1, scratch2, 0);
+  __ Branch(&bailout, ne, scratch2, Operand(zero_reg));
+  __ SmiUntag(string_length);
+  __ AdduAndCheckForOverflow(string_length, string_length, scratch2, scratch3);
+  __ BranchOnOverflow(&bailout, scratch3);
+
+  // Get first element in the array to free up the elements register to be used
+  // for the result.
+  __ Daddu(element,
+          elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  result = elements;  // End of live range for elements.
+  elements = no_reg;
+  // Live values in registers:
+  //   element: First array element
+  //   separator: Separator string
+  //   string_length: Length of result string (not smi)
+  //   array_length: Length of the array.
+  __ AllocateOneByteString(result, string_length, scratch1, scratch2,
+                           elements_end, &bailout);
+  // Prepare for looping. Set up elements_end to end of the array. Set
+  // result_pos to the position of the result where to write the first
+  // character.
+  __ dsll(elements_end, array_length, kPointerSizeLog2);
+  __ Daddu(elements_end, element, elements_end);
+  result_pos = array_length;  // End of live range for array_length.
+  array_length = no_reg;
+  __ Daddu(result_pos,
+          result,
+          Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+
+  // Check the length of the separator.
+  __ ld(scratch1, FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
+  __ li(at, Operand(Smi::FromInt(1)));
+  __ Branch(&one_char_separator, eq, scratch1, Operand(at));
+  __ Branch(&long_separator, gt, scratch1, Operand(at));
+
+  // Empty separator case.
+  __ bind(&empty_separator_loop);
+  // Live values in registers:
+  //   result_pos: the position to which we are currently copying characters.
+  //   element: Current array element.
+  //   elements_end: Array end.
+
+  // Copy next array element to the result.
+  __ ld(string, MemOperand(element));
+  __ Daddu(element, element, kPointerSize);
+  __ ld(string_length, FieldMemOperand(string, String::kLengthOffset));
+  __ SmiUntag(string_length);
+  __ Daddu(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
+  __ CopyBytes(string, result_pos, string_length, scratch1);
+  // End while (element < elements_end).
+  __ Branch(&empty_separator_loop, lt, element, Operand(elements_end));
+  DCHECK(result.is(v0));
+  __ Branch(&done);
+
+  // One-character separator case.
+  __ bind(&one_char_separator);
+  // Replace separator with its one-byte character value.
+  __ lbu(separator, FieldMemOperand(separator, SeqOneByteString::kHeaderSize));
+  // Jump into the loop after the code that copies the separator, so the first
+  // element is not preceded by a separator.
+  __ jmp(&one_char_separator_loop_entry);
+
+  __ bind(&one_char_separator_loop);
+  // Live values in registers:
+  //   result_pos: the position to which we are currently copying characters.
+  //   element: Current array element.
+  //   elements_end: Array end.
+  //   separator: Single separator one-byte char (in lower byte).
+
+  // Copy the separator character to the result.
+  __ sb(separator, MemOperand(result_pos));
+  __ Daddu(result_pos, result_pos, 1);
+
+  // Copy next array element to the result.
+  __ bind(&one_char_separator_loop_entry);
+  __ ld(string, MemOperand(element));
+  __ Daddu(element, element, kPointerSize);
+  __ ld(string_length, FieldMemOperand(string, String::kLengthOffset));
+  __ SmiUntag(string_length);
+  __ Daddu(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
+  __ CopyBytes(string, result_pos, string_length, scratch1);
+  // End while (element < elements_end).
+  __ Branch(&one_char_separator_loop, lt, element, Operand(elements_end));
+  DCHECK(result.is(v0));
+  __ Branch(&done);
+
+  // Long separator case (separator is more than one character). Entry is at the
+  // label long_separator below.
+  __ bind(&long_separator_loop);
+  // Live values in registers:
+  //   result_pos: the position to which we are currently copying characters.
+  //   element: Current array element.
+  //   elements_end: Array end.
+  //   separator: Separator string.
+
+  // Copy the separator to the result.
+  __ ld(string_length, FieldMemOperand(separator, String::kLengthOffset));
+  __ SmiUntag(string_length);
+  __ Daddu(string,
+          separator,
+          Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+  __ CopyBytes(string, result_pos, string_length, scratch1);
+
+  __ bind(&long_separator);
+  __ ld(string, MemOperand(element));
+  __ Daddu(element, element, kPointerSize);
+  __ ld(string_length, FieldMemOperand(string, String::kLengthOffset));
+  __ SmiUntag(string_length);
+  __ Daddu(string, string, SeqOneByteString::kHeaderSize - kHeapObjectTag);
+  __ CopyBytes(string, result_pos, string_length, scratch1);
+  // End while (element < elements_end).
+  __ Branch(&long_separator_loop, lt, element, Operand(elements_end));
+  DCHECK(result.is(v0));
+  __ Branch(&done);
+
+  __ bind(&bailout);
+  __ LoadRoot(v0, Heap::kUndefinedValueRootIndex);
+  __ bind(&done);
+  context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
+  DCHECK(expr->arguments()->length() == 0);
+  ExternalReference debug_is_active =
+      ExternalReference::debug_is_active_address(isolate());
+  __ li(at, Operand(debug_is_active));
+  __ lbu(v0, MemOperand(at));
+  __ SmiTag(v0);
+  context()->Plug(v0);
+}
+
+
+void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
+  if (expr->function() != NULL &&
+      expr->function()->intrinsic_type == Runtime::INLINE) {
+    Comment cmnt(masm_, "[ InlineRuntimeCall");
+    EmitInlineRuntimeCall(expr);
+    return;
+  }
+
+  Comment cmnt(masm_, "[ CallRuntime");
+  ZoneList<Expression*>* args = expr->arguments();
+  int arg_count = args->length();
+
+  if (expr->is_jsruntime()) {
+    // Push the builtins object as the receiver.
+    Register receiver = LoadDescriptor::ReceiverRegister();
+    __ ld(receiver, GlobalObjectOperand());
+    __ ld(receiver, FieldMemOperand(receiver, GlobalObject::kBuiltinsOffset));
+    __ push(receiver);
+
+    // Load the function from the receiver.
+    __ li(LoadDescriptor::NameRegister(), Operand(expr->name()));
+    if (FLAG_vector_ics) {
+      __ li(VectorLoadICDescriptor::SlotRegister(),
+            Operand(Smi::FromInt(expr->CallRuntimeFeedbackSlot())));
+      CallLoadIC(NOT_CONTEXTUAL);
+    } else {
+      CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId());
+    }
+
+    // Push the target function under the receiver.
+    __ ld(at, MemOperand(sp, 0));
+    __ push(at);
+    __ sd(v0, MemOperand(sp, kPointerSize));
+
+    // Push the arguments ("left-to-right").
+    int arg_count = args->length();
+    for (int i = 0; i < arg_count; i++) {
+      VisitForStackValue(args->at(i));
+    }
+
+    // Record source position of the IC call.
+    SetSourcePosition(expr->position());
+    CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
+    __ ld(a1, MemOperand(sp, (arg_count + 1) * kPointerSize));
+    __ CallStub(&stub);
+
+    // Restore context register.
+    __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+    context()->DropAndPlug(1, v0);
+  } else {
+    // Push the arguments ("left-to-right").
+    for (int i = 0; i < arg_count; i++) {
+      VisitForStackValue(args->at(i));
+    }
+
+    // Call the C runtime function.
+    __ CallRuntime(expr->function(), arg_count);
+    context()->Plug(v0);
+  }
+}
+
+
+void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
+  switch (expr->op()) {
+    case Token::DELETE: {
+      Comment cmnt(masm_, "[ UnaryOperation (DELETE)");
+      Property* property = expr->expression()->AsProperty();
+      VariableProxy* proxy = expr->expression()->AsVariableProxy();
+
+      if (property != NULL) {
+        VisitForStackValue(property->obj());
+        VisitForStackValue(property->key());
+        __ li(a1, Operand(Smi::FromInt(strict_mode())));
+        __ push(a1);
+        __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+        context()->Plug(v0);
+      } else if (proxy != NULL) {
+        Variable* var = proxy->var();
+        // Delete of an unqualified identifier is disallowed in strict mode
+        // but "delete this" is allowed.
+        DCHECK(strict_mode() == SLOPPY || var->is_this());
+        if (var->IsUnallocated()) {
+          __ ld(a2, GlobalObjectOperand());
+          __ li(a1, Operand(var->name()));
+          __ li(a0, Operand(Smi::FromInt(SLOPPY)));
+          __ Push(a2, a1, a0);
+          __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+          context()->Plug(v0);
+        } else if (var->IsStackAllocated() || var->IsContextSlot()) {
+          // Result of deleting non-global, non-dynamic variables is false.
+          // The subexpression does not have side effects.
+          context()->Plug(var->is_this());
+        } else {
+          // Non-global variable.  Call the runtime to try to delete from the
+          // context where the variable was introduced.
+          DCHECK(!context_register().is(a2));
+          __ li(a2, Operand(var->name()));
+          __ Push(context_register(), a2);
+          __ CallRuntime(Runtime::kDeleteLookupSlot, 2);
+          context()->Plug(v0);
+        }
+      } else {
+        // Result of deleting non-property, non-variable reference is true.
+        // The subexpression may have side effects.
+        VisitForEffect(expr->expression());
+        context()->Plug(true);
+      }
+      break;
+    }
+
+    case Token::VOID: {
+      Comment cmnt(masm_, "[ UnaryOperation (VOID)");
+      VisitForEffect(expr->expression());
+      context()->Plug(Heap::kUndefinedValueRootIndex);
+      break;
+    }
+
+    case Token::NOT: {
+      Comment cmnt(masm_, "[ UnaryOperation (NOT)");
+      if (context()->IsEffect()) {
+        // Unary NOT has no side effects so it's only necessary to visit the
+        // subexpression.  Match the optimizing compiler by not branching.
+        VisitForEffect(expr->expression());
+      } else if (context()->IsTest()) {
+        const TestContext* test = TestContext::cast(context());
+        // The labels are swapped for the recursive call.
+        VisitForControl(expr->expression(),
+                        test->false_label(),
+                        test->true_label(),
+                        test->fall_through());
+        context()->Plug(test->true_label(), test->false_label());
+      } else {
+        // We handle value contexts explicitly rather than simply visiting
+        // for control and plugging the control flow into the context,
+        // because we need to prepare a pair of extra administrative AST ids
+        // for the optimizing compiler.
+        DCHECK(context()->IsAccumulatorValue() || context()->IsStackValue());
+        Label materialize_true, materialize_false, done;
+        VisitForControl(expr->expression(),
+                        &materialize_false,
+                        &materialize_true,
+                        &materialize_true);
+        __ bind(&materialize_true);
+        PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
+        __ LoadRoot(v0, Heap::kTrueValueRootIndex);
+        if (context()->IsStackValue()) __ push(v0);
+        __ jmp(&done);
+        __ bind(&materialize_false);
+        PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
+        __ LoadRoot(v0, Heap::kFalseValueRootIndex);
+        if (context()->IsStackValue()) __ push(v0);
+        __ bind(&done);
+      }
+      break;
+    }
+
+    case Token::TYPEOF: {
+      Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
+      { StackValueContext context(this);
+        VisitForTypeofValue(expr->expression());
+      }
+      __ CallRuntime(Runtime::kTypeof, 1);
+      context()->Plug(v0);
+      break;
+    }
+
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
+  DCHECK(expr->expression()->IsValidReferenceExpression());
+
+  Comment cmnt(masm_, "[ CountOperation");
+  SetSourcePosition(expr->position());
+
+  // Expression can only be a property, a global or a (parameter or local)
+  // slot.
+  enum LhsKind { VARIABLE, NAMED_PROPERTY, KEYED_PROPERTY };
+  LhsKind assign_type = VARIABLE;
+  Property* prop = expr->expression()->AsProperty();
+  // In case of a property we use the uninitialized expression context
+  // of the key to detect a named property.
+  if (prop != NULL) {
+    assign_type =
+        (prop->key()->IsPropertyName()) ? NAMED_PROPERTY : KEYED_PROPERTY;
+  }
+
+  // Evaluate expression and get value.
+  if (assign_type == VARIABLE) {
+    DCHECK(expr->expression()->AsVariableProxy()->var() != NULL);
+    AccumulatorValueContext context(this);
+    EmitVariableLoad(expr->expression()->AsVariableProxy());
+  } else {
+    // Reserve space for result of postfix operation.
+    if (expr->is_postfix() && !context()->IsEffect()) {
+      __ li(at, Operand(Smi::FromInt(0)));
+      __ push(at);
+    }
+    if (assign_type == NAMED_PROPERTY) {
+      // Put the object both on the stack and in the register.
+      VisitForStackValue(prop->obj());
+      __ ld(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
+      EmitNamedPropertyLoad(prop);
+    } else {
+      VisitForStackValue(prop->obj());
+      VisitForStackValue(prop->key());
+      __ ld(LoadDescriptor::ReceiverRegister(),
+            MemOperand(sp, 1 * kPointerSize));
+      __ ld(LoadDescriptor::NameRegister(), MemOperand(sp, 0));
+      EmitKeyedPropertyLoad(prop);
+    }
+  }
+
+  // We need a second deoptimization point after loading the value
+  // in case evaluating the property load my have a side effect.
+  if (assign_type == VARIABLE) {
+    PrepareForBailout(expr->expression(), TOS_REG);
+  } else {
+    PrepareForBailoutForId(prop->LoadId(), TOS_REG);
+  }
+
+  // Inline smi case if we are in a loop.
+  Label stub_call, done;
+  JumpPatchSite patch_site(masm_);
+
+  int count_value = expr->op() == Token::INC ? 1 : -1;
+  __ mov(a0, v0);
+  if (ShouldInlineSmiCase(expr->op())) {
+    Label slow;
+    patch_site.EmitJumpIfNotSmi(v0, &slow);
+
+    // Save result for postfix expressions.
+    if (expr->is_postfix()) {
+      if (!context()->IsEffect()) {
+        // Save the result on the stack. If we have a named or keyed property
+        // we store the result under the receiver that is currently on top
+        // of the stack.
+        switch (assign_type) {
+          case VARIABLE:
+            __ push(v0);
+            break;
+          case NAMED_PROPERTY:
+            __ sd(v0, MemOperand(sp, kPointerSize));
+            break;
+          case KEYED_PROPERTY:
+            __ sd(v0, MemOperand(sp, 2 * kPointerSize));
+            break;
+        }
+      }
+    }
+
+    Register scratch1 = a1;
+    Register scratch2 = a4;
+    __ li(scratch1, Operand(Smi::FromInt(count_value)));
+    __ AdduAndCheckForOverflow(v0, v0, scratch1, scratch2);
+    __ BranchOnNoOverflow(&done, scratch2);
+    // Call stub. Undo operation first.
+    __ Move(v0, a0);
+    __ jmp(&stub_call);
+    __ bind(&slow);
+  }
+  ToNumberStub convert_stub(isolate());
+  __ CallStub(&convert_stub);
+
+  // Save result for postfix expressions.
+  if (expr->is_postfix()) {
+    if (!context()->IsEffect()) {
+      // Save the result on the stack. If we have a named or keyed property
+      // we store the result under the receiver that is currently on top
+      // of the stack.
+      switch (assign_type) {
+        case VARIABLE:
+          __ push(v0);
+          break;
+        case NAMED_PROPERTY:
+          __ sd(v0, MemOperand(sp, kPointerSize));
+          break;
+        case KEYED_PROPERTY:
+          __ sd(v0, MemOperand(sp, 2 * kPointerSize));
+          break;
+      }
+    }
+  }
+
+  __ bind(&stub_call);
+  __ mov(a1, v0);
+  __ li(a0, Operand(Smi::FromInt(count_value)));
+
+  // Record position before stub call.
+  SetSourcePosition(expr->position());
+
+  Handle<Code> code =
+      CodeFactory::BinaryOpIC(isolate(), Token::ADD, NO_OVERWRITE).code();
+  CallIC(code, expr->CountBinOpFeedbackId());
+  patch_site.EmitPatchInfo();
+  __ bind(&done);
+
+  // Store the value returned in v0.
+  switch (assign_type) {
+    case VARIABLE:
+      if (expr->is_postfix()) {
+        { EffectContext context(this);
+          EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+                                 Token::ASSIGN);
+          PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+          context.Plug(v0);
+        }
+        // For all contexts except EffectConstant we have the result on
+        // top of the stack.
+        if (!context()->IsEffect()) {
+          context()->PlugTOS();
+        }
+      } else {
+        EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+                               Token::ASSIGN);
+        PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+        context()->Plug(v0);
+      }
+      break;
+    case NAMED_PROPERTY: {
+      __ mov(StoreDescriptor::ValueRegister(), result_register());
+      __ li(StoreDescriptor::NameRegister(),
+            Operand(prop->key()->AsLiteral()->value()));
+      __ pop(StoreDescriptor::ReceiverRegister());
+      CallStoreIC(expr->CountStoreFeedbackId());
+      PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+      if (expr->is_postfix()) {
+        if (!context()->IsEffect()) {
+          context()->PlugTOS();
+        }
+      } else {
+        context()->Plug(v0);
+      }
+      break;
+    }
+    case KEYED_PROPERTY: {
+      __ mov(StoreDescriptor::ValueRegister(), result_register());
+      __ Pop(StoreDescriptor::ReceiverRegister(),
+             StoreDescriptor::NameRegister());
+      Handle<Code> ic =
+          CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
+      CallIC(ic, expr->CountStoreFeedbackId());
+      PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+      if (expr->is_postfix()) {
+        if (!context()->IsEffect()) {
+          context()->PlugTOS();
+        }
+      } else {
+        context()->Plug(v0);
+      }
+      break;
+    }
+  }
+}
+
+
+void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
+  DCHECK(!context()->IsEffect());
+  DCHECK(!context()->IsTest());
+  VariableProxy* proxy = expr->AsVariableProxy();
+  if (proxy != NULL && proxy->var()->IsUnallocated()) {
+    Comment cmnt(masm_, "[ Global variable");
+    __ ld(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+    __ li(LoadDescriptor::NameRegister(), Operand(proxy->name()));
+    if (FLAG_vector_ics) {
+      __ li(VectorLoadICDescriptor::SlotRegister(),
+            Operand(Smi::FromInt(proxy->VariableFeedbackSlot())));
+    }
+    // Use a regular load, not a contextual load, to avoid a reference
+    // error.
+    CallLoadIC(NOT_CONTEXTUAL);
+    PrepareForBailout(expr, TOS_REG);
+    context()->Plug(v0);
+  } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
+    Comment cmnt(masm_, "[ Lookup slot");
+    Label done, slow;
+
+    // Generate code for loading from variables potentially shadowed
+    // by eval-introduced variables.
+    EmitDynamicLookupFastCase(proxy, INSIDE_TYPEOF, &slow, &done);
+
+    __ bind(&slow);
+    __ li(a0, Operand(proxy->name()));
+    __ Push(cp, a0);
+    __ CallRuntime(Runtime::kLoadLookupSlotNoReferenceError, 2);
+    PrepareForBailout(expr, TOS_REG);
+    __ bind(&done);
+
+    context()->Plug(v0);
+  } else {
+    // This expression cannot throw a reference error at the top level.
+    VisitInDuplicateContext(expr);
+  }
+}
+
+void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
+                                                 Expression* sub_expr,
+                                                 Handle<String> check) {
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
+  { AccumulatorValueContext context(this);
+    VisitForTypeofValue(sub_expr);
+  }
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+
+  Factory* factory = isolate()->factory();
+  if (String::Equals(check, factory->number_string())) {
+    __ JumpIfSmi(v0, if_true);
+    __ ld(v0, FieldMemOperand(v0, HeapObject::kMapOffset));
+    __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+    Split(eq, v0, Operand(at), if_true, if_false, fall_through);
+  } else if (String::Equals(check, factory->string_string())) {
+    __ JumpIfSmi(v0, if_false);
+    // Check for undetectable objects => false.
+    __ GetObjectType(v0, v0, a1);
+    __ Branch(if_false, ge, a1, Operand(FIRST_NONSTRING_TYPE));
+    __ lbu(a1, FieldMemOperand(v0, Map::kBitFieldOffset));
+    __ And(a1, a1, Operand(1 << Map::kIsUndetectable));
+    Split(eq, a1, Operand(zero_reg),
+          if_true, if_false, fall_through);
+  } else if (String::Equals(check, factory->symbol_string())) {
+    __ JumpIfSmi(v0, if_false);
+    __ GetObjectType(v0, v0, a1);
+    Split(eq, a1, Operand(SYMBOL_TYPE), if_true, if_false, fall_through);
+  } else if (String::Equals(check, factory->boolean_string())) {
+    __ LoadRoot(at, Heap::kTrueValueRootIndex);
+    __ Branch(if_true, eq, v0, Operand(at));
+    __ LoadRoot(at, Heap::kFalseValueRootIndex);
+    Split(eq, v0, Operand(at), if_true, if_false, fall_through);
+  } else if (String::Equals(check, factory->undefined_string())) {
+    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+    __ Branch(if_true, eq, v0, Operand(at));
+    __ JumpIfSmi(v0, if_false);
+    // Check for undetectable objects => true.
+    __ ld(v0, FieldMemOperand(v0, HeapObject::kMapOffset));
+    __ lbu(a1, FieldMemOperand(v0, Map::kBitFieldOffset));
+    __ And(a1, a1, Operand(1 << Map::kIsUndetectable));
+    Split(ne, a1, Operand(zero_reg), if_true, if_false, fall_through);
+  } else if (String::Equals(check, factory->function_string())) {
+    __ JumpIfSmi(v0, if_false);
+    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+    __ GetObjectType(v0, v0, a1);
+    __ Branch(if_true, eq, a1, Operand(JS_FUNCTION_TYPE));
+    Split(eq, a1, Operand(JS_FUNCTION_PROXY_TYPE),
+          if_true, if_false, fall_through);
+  } else if (String::Equals(check, factory->object_string())) {
+    __ JumpIfSmi(v0, if_false);
+    __ LoadRoot(at, Heap::kNullValueRootIndex);
+    __ Branch(if_true, eq, v0, Operand(at));
+    // Check for JS objects => true.
+    __ GetObjectType(v0, v0, a1);
+    __ Branch(if_false, lt, a1, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+    __ lbu(a1, FieldMemOperand(v0, Map::kInstanceTypeOffset));
+    __ Branch(if_false, gt, a1, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
+    // Check for undetectable objects => false.
+    __ lbu(a1, FieldMemOperand(v0, Map::kBitFieldOffset));
+    __ And(a1, a1, Operand(1 << Map::kIsUndetectable));
+    Split(eq, a1, Operand(zero_reg), if_true, if_false, fall_through);
+  } else {
+    if (if_false != fall_through) __ jmp(if_false);
+  }
+  context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
+  Comment cmnt(masm_, "[ CompareOperation");
+  SetSourcePosition(expr->position());
+
+  // First we try a fast inlined version of the compare when one of
+  // the operands is a literal.
+  if (TryLiteralCompare(expr)) return;
+
+  // Always perform the comparison for its control flow.  Pack the result
+  // into the expression's context after the comparison is performed.
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
+  Token::Value op = expr->op();
+  VisitForStackValue(expr->left());
+  switch (op) {
+    case Token::IN:
+      VisitForStackValue(expr->right());
+      __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
+      PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
+      __ LoadRoot(a4, Heap::kTrueValueRootIndex);
+      Split(eq, v0, Operand(a4), if_true, if_false, fall_through);
+      break;
+
+    case Token::INSTANCEOF: {
+      VisitForStackValue(expr->right());
+      InstanceofStub stub(isolate(), InstanceofStub::kNoFlags);
+      __ CallStub(&stub);
+      PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+      // The stub returns 0 for true.
+      Split(eq, v0, Operand(zero_reg), if_true, if_false, fall_through);
+      break;
+    }
+
+    default: {
+      VisitForAccumulatorValue(expr->right());
+      Condition cc = CompareIC::ComputeCondition(op);
+      __ mov(a0, result_register());
+      __ pop(a1);
+
+      bool inline_smi_code = ShouldInlineSmiCase(op);
+      JumpPatchSite patch_site(masm_);
+      if (inline_smi_code) {
+        Label slow_case;
+        __ Or(a2, a0, Operand(a1));
+        patch_site.EmitJumpIfNotSmi(a2, &slow_case);
+        Split(cc, a1, Operand(a0), if_true, if_false, NULL);
+        __ bind(&slow_case);
+      }
+      // Record position and call the compare IC.
+      SetSourcePosition(expr->position());
+      Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
+      CallIC(ic, expr->CompareOperationFeedbackId());
+      patch_site.EmitPatchInfo();
+      PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+      Split(cc, v0, Operand(zero_reg), if_true, if_false, fall_through);
+    }
+  }
+
+  // Convert the result of the comparison into one expected for this
+  // expression's context.
+  context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
+                                              Expression* sub_expr,
+                                              NilValue nil) {
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false,
+                         &if_true, &if_false, &fall_through);
+
+  VisitForAccumulatorValue(sub_expr);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  __ mov(a0, result_register());
+  if (expr->op() == Token::EQ_STRICT) {
+    Heap::RootListIndex nil_value = nil == kNullValue ?
+        Heap::kNullValueRootIndex :
+        Heap::kUndefinedValueRootIndex;
+    __ LoadRoot(a1, nil_value);
+    Split(eq, a0, Operand(a1), if_true, if_false, fall_through);
+  } else {
+    Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
+    CallIC(ic, expr->CompareOperationFeedbackId());
+    Split(ne, v0, Operand(zero_reg), if_true, if_false, fall_through);
+  }
+  context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
+  __ ld(v0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  context()->Plug(v0);
+}
+
+
+Register FullCodeGenerator::result_register() {
+  return v0;
+}
+
+
+Register FullCodeGenerator::context_register() {
+  return cp;
+}
+
+
+void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
+  // DCHECK_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
+  DCHECK(IsAligned(frame_offset, kPointerSize));
+  //  __ sw(value, MemOperand(fp, frame_offset));
+  __ sd(value, MemOperand(fp, frame_offset));
+}
+
+
+void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
+  __ ld(dst, ContextOperand(cp, context_index));
+}
+
+
+void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
+  Scope* declaration_scope = scope()->DeclarationScope();
+  if (declaration_scope->is_global_scope() ||
+      declaration_scope->is_module_scope()) {
+    // Contexts nested in the native context have a canonical empty function
+    // as their closure, not the anonymous closure containing the global
+    // code.  Pass a smi sentinel and let the runtime look up the empty
+    // function.
+    __ li(at, Operand(Smi::FromInt(0)));
+  } else if (declaration_scope->is_eval_scope()) {
+    // Contexts created by a call to eval have the same closure as the
+    // context calling eval, not the anonymous closure containing the eval
+    // code.  Fetch it from the context.
+    __ ld(at, ContextOperand(cp, Context::CLOSURE_INDEX));
+  } else {
+    DCHECK(declaration_scope->is_function_scope());
+    __ ld(at, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  }
+  __ push(at);
+}
+
+
+// ----------------------------------------------------------------------------
+// Non-local control flow support.
+
+void FullCodeGenerator::EnterFinallyBlock() {
+  DCHECK(!result_register().is(a1));
+  // Store result register while executing finally block.
+  __ push(result_register());
+  // Cook return address in link register to stack (smi encoded Code* delta).
+  __ Dsubu(a1, ra, Operand(masm_->CodeObject()));
+  __ SmiTag(a1);
+
+  // Store result register while executing finally block.
+  __ push(a1);
+
+  // Store pending message while executing finally block.
+  ExternalReference pending_message_obj =
+      ExternalReference::address_of_pending_message_obj(isolate());
+  __ li(at, Operand(pending_message_obj));
+  __ ld(a1, MemOperand(at));
+  __ push(a1);
+
+  ExternalReference has_pending_message =
+      ExternalReference::address_of_has_pending_message(isolate());
+  __ li(at, Operand(has_pending_message));
+  __ ld(a1, MemOperand(at));
+  __ SmiTag(a1);
+  __ push(a1);
+
+  ExternalReference pending_message_script =
+      ExternalReference::address_of_pending_message_script(isolate());
+  __ li(at, Operand(pending_message_script));
+  __ ld(a1, MemOperand(at));
+  __ push(a1);
+}
+
+
+void FullCodeGenerator::ExitFinallyBlock() {
+  DCHECK(!result_register().is(a1));
+  // Restore pending message from stack.
+  __ pop(a1);
+  ExternalReference pending_message_script =
+      ExternalReference::address_of_pending_message_script(isolate());
+  __ li(at, Operand(pending_message_script));
+  __ sd(a1, MemOperand(at));
+
+  __ pop(a1);
+  __ SmiUntag(a1);
+  ExternalReference has_pending_message =
+      ExternalReference::address_of_has_pending_message(isolate());
+  __ li(at, Operand(has_pending_message));
+  __ sd(a1, MemOperand(at));
+
+  __ pop(a1);
+  ExternalReference pending_message_obj =
+      ExternalReference::address_of_pending_message_obj(isolate());
+  __ li(at, Operand(pending_message_obj));
+  __ sd(a1, MemOperand(at));
+
+  // Restore result register from stack.
+  __ pop(a1);
+
+  // Uncook return address and return.
+  __ pop(result_register());
+
+  __ SmiUntag(a1);
+  __ Daddu(at, a1, Operand(masm_->CodeObject()));
+  __ Jump(at);
+}
+
+
+#undef __
+
+#define __ ACCESS_MASM(masm())
+
+FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
+    int* stack_depth,
+    int* context_length) {
+  // The macros used here must preserve the result register.
+
+  // Because the handler block contains the context of the finally
+  // code, we can restore it directly from there for the finally code
+  // rather than iteratively unwinding contexts via their previous
+  // links.
+  __ Drop(*stack_depth);  // Down to the handler block.
+  if (*context_length > 0) {
+    // Restore the context to its dedicated register and the stack.
+    __ ld(cp, MemOperand(sp, StackHandlerConstants::kContextOffset));
+    __ sd(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  }
+  __ PopTryHandler();
+  __ Call(finally_entry_);
+
+  *stack_depth = 0;
+  *context_length = 0;
+  return previous_;
+}
+
+
+#undef __
+
+
+void BackEdgeTable::PatchAt(Code* unoptimized_code,
+                            Address pc,
+                            BackEdgeState target_state,
+                            Code* replacement_code) {
+  static const int kInstrSize = Assembler::kInstrSize;
+  Address branch_address = pc - 8 * kInstrSize;
+  CodePatcher patcher(branch_address, 1);
+
+  switch (target_state) {
+    case INTERRUPT:
+      // slt  at, a3, zero_reg (in case of count based interrupts)
+      // beq  at, zero_reg, ok
+      // lui  t9, <interrupt stub address> upper
+      // ori  t9, <interrupt stub address> u-middle
+      // dsll t9, t9, 16
+      // ori  t9, <interrupt stub address> lower
+      // jalr t9
+      // nop
+      // ok-label ----- pc_after points here
+      patcher.masm()->slt(at, a3, zero_reg);
+      break;
+    case ON_STACK_REPLACEMENT:
+    case OSR_AFTER_STACK_CHECK:
+      // addiu at, zero_reg, 1
+      // beq  at, zero_reg, ok  ;; Not changed
+      // lui  t9, <on-stack replacement address> upper
+      // ori  t9, <on-stack replacement address> middle
+      // dsll t9, t9, 16
+      // ori  t9, <on-stack replacement address> lower
+      // jalr t9  ;; Not changed
+      // nop  ;; Not changed
+      // ok-label ----- pc_after points here
+      patcher.masm()->daddiu(at, zero_reg, 1);
+      break;
+  }
+  Address pc_immediate_load_address = pc - 6 * kInstrSize;
+  // Replace the stack check address in the load-immediate (6-instr sequence)
+  // with the entry address of the replacement code.
+  Assembler::set_target_address_at(pc_immediate_load_address,
+                                   replacement_code->entry());
+
+  unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+      unoptimized_code, pc_immediate_load_address, replacement_code);
+}
+
+
+BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
+    Isolate* isolate,
+    Code* unoptimized_code,
+    Address pc) {
+  static const int kInstrSize = Assembler::kInstrSize;
+  Address branch_address = pc - 8 * kInstrSize;
+  Address pc_immediate_load_address = pc - 6 * kInstrSize;
+
+  DCHECK(Assembler::IsBeq(Assembler::instr_at(pc - 7 * kInstrSize)));
+  if (!Assembler::IsAddImmediate(Assembler::instr_at(branch_address))) {
+    DCHECK(reinterpret_cast<uint64_t>(
+        Assembler::target_address_at(pc_immediate_load_address)) ==
+           reinterpret_cast<uint64_t>(
+               isolate->builtins()->InterruptCheck()->entry()));
+    return INTERRUPT;
+  }
+
+  DCHECK(Assembler::IsAddImmediate(Assembler::instr_at(branch_address)));
+
+  if (reinterpret_cast<uint64_t>(
+      Assembler::target_address_at(pc_immediate_load_address)) ==
+          reinterpret_cast<uint64_t>(
+              isolate->builtins()->OnStackReplacement()->entry())) {
+    return ON_STACK_REPLACEMENT;
+  }
+
+  DCHECK(reinterpret_cast<uint64_t>(
+      Assembler::target_address_at(pc_immediate_load_address)) ==
+         reinterpret_cast<uint64_t>(
+             isolate->builtins()->OsrAfterStackCheck()->entry()));
+  return OSR_AFTER_STACK_CHECK;
+}
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_MIPS64
diff --git a/src/mips64/interface-descriptors-mips64.cc b/src/mips64/interface-descriptors-mips64.cc
new file mode 100644
index 0000000..8759bdd
--- /dev/null
+++ b/src/mips64/interface-descriptors-mips64.cc
@@ -0,0 +1,303 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_MIPS64
+
+#include "src/interface-descriptors.h"
+
+namespace v8 {
+namespace internal {
+
+const Register CallInterfaceDescriptor::ContextRegister() { return cp; }
+
+
+const Register LoadDescriptor::ReceiverRegister() { return a1; }
+const Register LoadDescriptor::NameRegister() { return a2; }
+
+
+const Register VectorLoadICTrampolineDescriptor::SlotRegister() { return a0; }
+
+
+const Register VectorLoadICDescriptor::VectorRegister() { return a3; }
+
+
+const Register StoreDescriptor::ReceiverRegister() { return a1; }
+const Register StoreDescriptor::NameRegister() { return a2; }
+const Register StoreDescriptor::ValueRegister() { return a0; }
+
+
+const Register ElementTransitionAndStoreDescriptor::MapRegister() { return a3; }
+
+
+const Register InstanceofDescriptor::left() { return a0; }
+const Register InstanceofDescriptor::right() { return a1; }
+
+
+const Register ArgumentsAccessReadDescriptor::index() { return a1; }
+const Register ArgumentsAccessReadDescriptor::parameter_count() { return a0; }
+
+
+const Register ApiGetterDescriptor::function_address() { return a2; }
+
+
+const Register MathPowTaggedDescriptor::exponent() { return a2; }
+
+
+const Register MathPowIntegerDescriptor::exponent() {
+  return MathPowTaggedDescriptor::exponent();
+}
+
+
+void FastNewClosureDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, a2};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void FastNewContextDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, a1};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ToNumberDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, a0};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void NumberToStringDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, a0};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void FastCloneShallowArrayDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, a3, a2, a1};
+  Representation representations[] = {
+      Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
+      Representation::Tagged()};
+  data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void FastCloneShallowObjectDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, a3, a2, a1, a0};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void CreateAllocationSiteDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, a2, a3};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void StoreArrayLiteralElementDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, a3, a0};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void CallFunctionWithFeedbackDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, a1, a3};
+  Representation representations[] = {Representation::Tagged(),
+                                      Representation::Tagged(),
+                                      Representation::Smi()};
+  data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void CallFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, a1};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void CallConstructDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  // a0 : number of arguments
+  // a1 : the function to call
+  // a2 : feedback vector
+  // a3 : (only if a2 is not the megamorphic symbol) slot in feedback
+  //      vector (Smi)
+  // TODO(turbofan): So far we don't gather type feedback and hence skip the
+  // slot parameter, but ArrayConstructStub needs the vector to be undefined.
+  Register registers[] = {cp, a0, a1, a2};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void RegExpConstructResultDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, a2, a1, a0};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void TransitionElementsKindDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, a0, a1};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ArrayConstructorConstantArgCountDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  // register state
+  // cp -- context
+  // a0 -- number of arguments
+  // a1 -- function
+  // a2 -- allocation site with elements kind
+  Register registers[] = {cp, a1, a2};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ArrayConstructorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  // stack param count needs (constructor pointer, and single argument)
+  Register registers[] = {cp, a1, a2, a0};
+  Representation representations[] = {
+      Representation::Tagged(), Representation::Tagged(),
+      Representation::Tagged(), Representation::Integer32()};
+  data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void InternalArrayConstructorConstantArgCountDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  // register state
+  // cp -- context
+  // a0 -- number of arguments
+  // a1 -- constructor function
+  Register registers[] = {cp, a1};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void InternalArrayConstructorDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  // stack param count needs (constructor pointer, and single argument)
+  Register registers[] = {cp, a1, a0};
+  Representation representations[] = {Representation::Tagged(),
+                                      Representation::Tagged(),
+                                      Representation::Integer32()};
+  data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void CompareNilDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, a0};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ToBooleanDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, a0};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void BinaryOpDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, a1, a0};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void BinaryOpWithAllocationSiteDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, a2, a1, a0};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void StringAddDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, a1, a0};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void KeyedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      cp,  // context
+      a2,  // key
+  };
+  Representation representations[] = {
+      Representation::Tagged(),  // context
+      Representation::Tagged(),  // key
+  };
+  data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void NamedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      cp,  // context
+      a2,  // name
+  };
+  Representation representations[] = {
+      Representation::Tagged(),  // context
+      Representation::Tagged(),  // name
+  };
+  data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void CallHandlerDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      cp,  // context
+      a0,  // receiver
+  };
+  Representation representations[] = {
+      Representation::Tagged(),  // context
+      Representation::Tagged(),  // receiver
+  };
+  data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void ArgumentAdaptorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      cp,  // context
+      a1,  // JSFunction
+      a0,  // actual number of arguments
+      a2,  // expected number of arguments
+  };
+  Representation representations[] = {
+      Representation::Tagged(),     // context
+      Representation::Tagged(),     // JSFunction
+      Representation::Integer32(),  // actual number of arguments
+      Representation::Integer32(),  // expected number of arguments
+  };
+  data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void ApiFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      cp,  // context
+      a0,  // callee
+      a4,  // call_data
+      a2,  // holder
+      a1,  // api_function_address
+  };
+  Representation representations[] = {
+      Representation::Tagged(),    // context
+      Representation::Tagged(),    // callee
+      Representation::Tagged(),    // call_data
+      Representation::Tagged(),    // holder
+      Representation::External(),  // api_function_address
+  };
+  data->Initialize(arraysize(registers), registers, representations);
+}
+}
+}  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_MIPS64
diff --git a/src/mips64/lithium-codegen-mips64.cc b/src/mips64/lithium-codegen-mips64.cc
new file mode 100644
index 0000000..8a0a449
--- /dev/null
+++ b/src/mips64/lithium-codegen-mips64.cc
@@ -0,0 +1,5949 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/code-factory.h"
+#include "src/code-stubs.h"
+#include "src/hydrogen-osr.h"
+#include "src/ic/ic.h"
+#include "src/ic/stub-cache.h"
+#include "src/mips64/lithium-codegen-mips64.h"
+#include "src/mips64/lithium-gap-resolver-mips64.h"
+
+namespace v8 {
+namespace internal {
+
+
+class SafepointGenerator FINAL  : public CallWrapper {
+ public:
+  SafepointGenerator(LCodeGen* codegen,
+                     LPointerMap* pointers,
+                     Safepoint::DeoptMode mode)
+      : codegen_(codegen),
+        pointers_(pointers),
+        deopt_mode_(mode) { }
+  virtual ~SafepointGenerator() {}
+
+  virtual void BeforeCall(int call_size) const OVERRIDE {}
+
+  virtual void AfterCall() const OVERRIDE {
+    codegen_->RecordSafepoint(pointers_, deopt_mode_);
+  }
+
+ private:
+  LCodeGen* codegen_;
+  LPointerMap* pointers_;
+  Safepoint::DeoptMode deopt_mode_;
+};
+
+
+#define __ masm()->
+
+bool LCodeGen::GenerateCode() {
+  LPhase phase("Z_Code generation", chunk());
+  DCHECK(is_unused());
+  status_ = GENERATING;
+
+  // Open a frame scope to indicate that there is a frame on the stack.  The
+  // NONE indicates that the scope shouldn't actually generate code to set up
+  // the frame (that is done in GeneratePrologue).
+  FrameScope frame_scope(masm_, StackFrame::NONE);
+
+  return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
+         GenerateJumpTable() && GenerateSafepointTable();
+}
+
+
+void LCodeGen::FinishCode(Handle<Code> code) {
+  DCHECK(is_done());
+  code->set_stack_slots(GetStackSlotCount());
+  code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
+  if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
+  PopulateDeoptimizationData(code);
+}
+
+
+void LCodeGen::SaveCallerDoubles() {
+  DCHECK(info()->saves_caller_doubles());
+  DCHECK(NeedsEagerFrame());
+  Comment(";;; Save clobbered callee double registers");
+  int count = 0;
+  BitVector* doubles = chunk()->allocated_double_registers();
+  BitVector::Iterator save_iterator(doubles);
+  while (!save_iterator.Done()) {
+    __ sdc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
+            MemOperand(sp, count * kDoubleSize));
+    save_iterator.Advance();
+    count++;
+  }
+}
+
+
+void LCodeGen::RestoreCallerDoubles() {
+  DCHECK(info()->saves_caller_doubles());
+  DCHECK(NeedsEagerFrame());
+  Comment(";;; Restore clobbered callee double registers");
+  BitVector* doubles = chunk()->allocated_double_registers();
+  BitVector::Iterator save_iterator(doubles);
+  int count = 0;
+  while (!save_iterator.Done()) {
+    __ ldc1(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
+            MemOperand(sp, count * kDoubleSize));
+    save_iterator.Advance();
+    count++;
+  }
+}
+
+
+bool LCodeGen::GeneratePrologue() {
+  DCHECK(is_generating());
+
+  if (info()->IsOptimizing()) {
+    ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+
+#ifdef DEBUG
+    if (strlen(FLAG_stop_at) > 0 &&
+        info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
+      __ stop("stop_at");
+    }
+#endif
+
+    // a1: Callee's JS function.
+    // cp: Callee's context.
+    // fp: Caller's frame pointer.
+    // lr: Caller's pc.
+
+    // Sloppy mode functions and builtins need to replace the receiver with the
+    // global proxy when called as functions (without an explicit receiver
+    // object).
+    if (info_->this_has_uses() &&
+        info_->strict_mode() == SLOPPY &&
+        !info_->is_native()) {
+      Label ok;
+      int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
+      __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+      __ ld(a2, MemOperand(sp, receiver_offset));
+      __ Branch(&ok, ne, a2, Operand(at));
+
+      __ ld(a2, GlobalObjectOperand());
+      __ ld(a2, FieldMemOperand(a2, GlobalObject::kGlobalProxyOffset));
+
+      __ sd(a2, MemOperand(sp, receiver_offset));
+
+      __ bind(&ok);
+    }
+  }
+
+  info()->set_prologue_offset(masm_->pc_offset());
+  if (NeedsEagerFrame()) {
+    if (info()->IsStub()) {
+      __ StubPrologue();
+    } else {
+      __ Prologue(info()->IsCodePreAgingActive());
+    }
+    frame_is_built_ = true;
+    info_->AddNoFrameRange(0, masm_->pc_offset());
+  }
+
+  // Reserve space for the stack slots needed by the code.
+  int slots = GetStackSlotCount();
+  if (slots > 0) {
+    if (FLAG_debug_code) {
+      __ Dsubu(sp,  sp, Operand(slots * kPointerSize));
+      __ Push(a0, a1);
+      __ Daddu(a0, sp, Operand(slots *  kPointerSize));
+      __ li(a1, Operand(kSlotsZapValue));
+      Label loop;
+      __ bind(&loop);
+      __ Dsubu(a0, a0, Operand(kPointerSize));
+      __ sd(a1, MemOperand(a0, 2 * kPointerSize));
+      __ Branch(&loop, ne, a0, Operand(sp));
+      __ Pop(a0, a1);
+    } else {
+      __ Dsubu(sp, sp, Operand(slots * kPointerSize));
+    }
+  }
+
+  if (info()->saves_caller_doubles()) {
+    SaveCallerDoubles();
+  }
+
+  // Possibly allocate a local context.
+  int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+  if (heap_slots > 0) {
+    Comment(";;; Allocate local context");
+    bool need_write_barrier = true;
+    // Argument to NewContext is the function, which is in a1.
+    if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+      FastNewContextStub stub(isolate(), heap_slots);
+      __ CallStub(&stub);
+      // Result of FastNewContextStub is always in new space.
+      need_write_barrier = false;
+    } else {
+      __ push(a1);
+      __ CallRuntime(Runtime::kNewFunctionContext, 1);
+    }
+    RecordSafepoint(Safepoint::kNoLazyDeopt);
+    // Context is returned in both v0. It replaces the context passed to us.
+    // It's saved in the stack and kept live in cp.
+    __ mov(cp, v0);
+    __ sd(v0, MemOperand(fp, StandardFrameConstants::kContextOffset));
+    // Copy any necessary parameters into the context.
+    int num_parameters = scope()->num_parameters();
+    for (int i = 0; i < num_parameters; i++) {
+      Variable* var = scope()->parameter(i);
+      if (var->IsContextSlot()) {
+        int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+            (num_parameters - 1 - i) * kPointerSize;
+        // Load parameter from stack.
+        __ ld(a0, MemOperand(fp, parameter_offset));
+        // Store it in the context.
+        MemOperand target = ContextOperand(cp, var->index());
+        __ sd(a0, target);
+        // Update the write barrier. This clobbers a3 and a0.
+        if (need_write_barrier) {
+          __ RecordWriteContextSlot(
+              cp, target.offset(), a0, a3, GetRAState(), kSaveFPRegs);
+        } else if (FLAG_debug_code) {
+          Label done;
+          __ JumpIfInNewSpace(cp, a0, &done);
+          __ Abort(kExpectedNewSpaceObject);
+          __ bind(&done);
+        }
+      }
+    }
+    Comment(";;; End allocate local context");
+  }
+
+  // Trace the call.
+  if (FLAG_trace && info()->IsOptimizing()) {
+    // We have not executed any compiled code yet, so cp still holds the
+    // incoming context.
+    __ CallRuntime(Runtime::kTraceEnter, 0);
+  }
+  return !is_aborted();
+}
+
+
+void LCodeGen::GenerateOsrPrologue() {
+  // Generate the OSR entry prologue at the first unknown OSR value, or if there
+  // are none, at the OSR entrypoint instruction.
+  if (osr_pc_offset_ >= 0) return;
+
+  osr_pc_offset_ = masm()->pc_offset();
+
+  // Adjust the frame size, subsuming the unoptimized frame into the
+  // optimized frame.
+  int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
+  DCHECK(slots >= 0);
+  __ Dsubu(sp, sp, Operand(slots * kPointerSize));
+}
+
+
+void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
+  if (instr->IsCall()) {
+    EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+  }
+  if (!instr->IsLazyBailout() && !instr->IsGap()) {
+    safepoints_.BumpLastLazySafepointIndex();
+  }
+}
+
+
+bool LCodeGen::GenerateDeferredCode() {
+  DCHECK(is_generating());
+  if (deferred_.length() > 0) {
+    for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
+      LDeferredCode* code = deferred_[i];
+
+      HValue* value =
+          instructions_->at(code->instruction_index())->hydrogen_value();
+      RecordAndWritePosition(
+          chunk()->graph()->SourcePositionToScriptPosition(value->position()));
+
+      Comment(";;; <@%d,#%d> "
+              "-------------------- Deferred %s --------------------",
+              code->instruction_index(),
+              code->instr()->hydrogen_value()->id(),
+              code->instr()->Mnemonic());
+      __ bind(code->entry());
+      if (NeedsDeferredFrame()) {
+        Comment(";;; Build frame");
+        DCHECK(!frame_is_built_);
+        DCHECK(info()->IsStub());
+        frame_is_built_ = true;
+        __ MultiPush(cp.bit() | fp.bit() | ra.bit());
+        __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
+        __ push(scratch0());
+        __ Daddu(fp, sp,
+            Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+        Comment(";;; Deferred code");
+      }
+      code->Generate();
+      if (NeedsDeferredFrame()) {
+        Comment(";;; Destroy frame");
+        DCHECK(frame_is_built_);
+        __ pop(at);
+        __ MultiPop(cp.bit() | fp.bit() | ra.bit());
+        frame_is_built_ = false;
+      }
+      __ jmp(code->exit());
+    }
+  }
+  // Deferred code is the last part of the instruction sequence. Mark
+  // the generated code as done unless we bailed out.
+  if (!is_aborted()) status_ = DONE;
+  return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateJumpTable() {
+  if (jump_table_.length() > 0) {
+    Comment(";;; -------------------- Jump table --------------------");
+  }
+  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+  Label table_start;
+  __ bind(&table_start);
+  Label needs_frame;
+  for (int i = 0; i < jump_table_.length(); i++) {
+    Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
+    __ bind(&table_entry->label);
+    Address entry = table_entry->address;
+    DeoptComment(table_entry->reason);
+    __ li(t9, Operand(ExternalReference::ForDeoptEntry(entry)));
+    if (table_entry->needs_frame) {
+      DCHECK(!info()->saves_caller_doubles());
+      if (needs_frame.is_bound()) {
+        __ Branch(&needs_frame);
+      } else {
+        __ bind(&needs_frame);
+        __ MultiPush(cp.bit() | fp.bit() | ra.bit());
+        // This variant of deopt can only be used with stubs. Since we don't
+        // have a function pointer to install in the stack frame that we're
+        // building, install a special marker there instead.
+        DCHECK(info()->IsStub());
+        __ li(scratch0(), Operand(Smi::FromInt(StackFrame::STUB)));
+        __ push(scratch0());
+        __ Daddu(fp, sp,
+            Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+        __ Call(t9);
+      }
+    } else {
+      if (info()->saves_caller_doubles()) {
+        DCHECK(info()->IsStub());
+        RestoreCallerDoubles();
+      }
+      __ Call(t9);
+    }
+  }
+  __ RecordComment("]");
+
+  // The deoptimization jump table is the last part of the instruction
+  // sequence. Mark the generated code as done unless we bailed out.
+  if (!is_aborted()) status_ = DONE;
+  return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateSafepointTable() {
+  DCHECK(is_done());
+  safepoints_.Emit(masm(), GetStackSlotCount());
+  return !is_aborted();
+}
+
+
+Register LCodeGen::ToRegister(int index) const {
+  return Register::FromAllocationIndex(index);
+}
+
+
+DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
+  return DoubleRegister::FromAllocationIndex(index);
+}
+
+
+Register LCodeGen::ToRegister(LOperand* op) const {
+  DCHECK(op->IsRegister());
+  return ToRegister(op->index());
+}
+
+
+Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
+  if (op->IsRegister()) {
+    return ToRegister(op->index());
+  } else if (op->IsConstantOperand()) {
+    LConstantOperand* const_op = LConstantOperand::cast(op);
+    HConstant* constant = chunk_->LookupConstant(const_op);
+    Handle<Object> literal = constant->handle(isolate());
+    Representation r = chunk_->LookupLiteralRepresentation(const_op);
+    if (r.IsInteger32()) {
+      DCHECK(literal->IsNumber());
+      __ li(scratch, Operand(static_cast<int32_t>(literal->Number())));
+    } else if (r.IsSmi()) {
+      DCHECK(constant->HasSmiValue());
+      __ li(scratch, Operand(Smi::FromInt(constant->Integer32Value())));
+    } else if (r.IsDouble()) {
+      Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
+    } else {
+      DCHECK(r.IsSmiOrTagged());
+      __ li(scratch, literal);
+    }
+    return scratch;
+  } else if (op->IsStackSlot()) {
+    __ ld(scratch, ToMemOperand(op));
+    return scratch;
+  }
+  UNREACHABLE();
+  return scratch;
+}
+
+
+DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
+  DCHECK(op->IsDoubleRegister());
+  return ToDoubleRegister(op->index());
+}
+
+
+DoubleRegister LCodeGen::EmitLoadDoubleRegister(LOperand* op,
+                                                FloatRegister flt_scratch,
+                                                DoubleRegister dbl_scratch) {
+  if (op->IsDoubleRegister()) {
+    return ToDoubleRegister(op->index());
+  } else if (op->IsConstantOperand()) {
+    LConstantOperand* const_op = LConstantOperand::cast(op);
+    HConstant* constant = chunk_->LookupConstant(const_op);
+    Handle<Object> literal = constant->handle(isolate());
+    Representation r = chunk_->LookupLiteralRepresentation(const_op);
+    if (r.IsInteger32()) {
+      DCHECK(literal->IsNumber());
+      __ li(at, Operand(static_cast<int32_t>(literal->Number())));
+      __ mtc1(at, flt_scratch);
+      __ cvt_d_w(dbl_scratch, flt_scratch);
+      return dbl_scratch;
+    } else if (r.IsDouble()) {
+      Abort(kUnsupportedDoubleImmediate);
+    } else if (r.IsTagged()) {
+      Abort(kUnsupportedTaggedImmediate);
+    }
+  } else if (op->IsStackSlot()) {
+    MemOperand mem_op = ToMemOperand(op);
+    __ ldc1(dbl_scratch, mem_op);
+    return dbl_scratch;
+  }
+  UNREACHABLE();
+  return dbl_scratch;
+}
+
+
+Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
+  HConstant* constant = chunk_->LookupConstant(op);
+  DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
+  return constant->handle(isolate());
+}
+
+
+bool LCodeGen::IsInteger32(LConstantOperand* op) const {
+  return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
+}
+
+
+bool LCodeGen::IsSmi(LConstantOperand* op) const {
+  return chunk_->LookupLiteralRepresentation(op).IsSmi();
+}
+
+
+int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
+  // return ToRepresentation(op, Representation::Integer32());
+  HConstant* constant = chunk_->LookupConstant(op);
+  return constant->Integer32Value();
+}
+
+
+int32_t LCodeGen::ToRepresentation_donotuse(LConstantOperand* op,
+                                   const Representation& r) const {
+  HConstant* constant = chunk_->LookupConstant(op);
+  int32_t value = constant->Integer32Value();
+  if (r.IsInteger32()) return value;
+  DCHECK(r.IsSmiOrTagged());
+  return reinterpret_cast<int64_t>(Smi::FromInt(value));
+}
+
+
+Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
+  HConstant* constant = chunk_->LookupConstant(op);
+  return Smi::FromInt(constant->Integer32Value());
+}
+
+
+double LCodeGen::ToDouble(LConstantOperand* op) const {
+  HConstant* constant = chunk_->LookupConstant(op);
+  DCHECK(constant->HasDoubleValue());
+  return constant->DoubleValue();
+}
+
+
+Operand LCodeGen::ToOperand(LOperand* op) {
+  if (op->IsConstantOperand()) {
+    LConstantOperand* const_op = LConstantOperand::cast(op);
+    HConstant* constant = chunk()->LookupConstant(const_op);
+    Representation r = chunk_->LookupLiteralRepresentation(const_op);
+    if (r.IsSmi()) {
+      DCHECK(constant->HasSmiValue());
+      return Operand(Smi::FromInt(constant->Integer32Value()));
+    } else if (r.IsInteger32()) {
+      DCHECK(constant->HasInteger32Value());
+      return Operand(constant->Integer32Value());
+    } else if (r.IsDouble()) {
+      Abort(kToOperandUnsupportedDoubleImmediate);
+    }
+    DCHECK(r.IsTagged());
+    return Operand(constant->handle(isolate()));
+  } else if (op->IsRegister()) {
+    return Operand(ToRegister(op));
+  } else if (op->IsDoubleRegister()) {
+    Abort(kToOperandIsDoubleRegisterUnimplemented);
+    return Operand((int64_t)0);
+  }
+  // Stack slots not implemented, use ToMemOperand instead.
+  UNREACHABLE();
+  return Operand((int64_t)0);
+}
+
+
+static int ArgumentsOffsetWithoutFrame(int index) {
+  DCHECK(index < 0);
+  return -(index + 1) * kPointerSize;
+}
+
+
+MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
+  DCHECK(!op->IsRegister());
+  DCHECK(!op->IsDoubleRegister());
+  DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+  if (NeedsEagerFrame()) {
+    return MemOperand(fp, StackSlotOffset(op->index()));
+  } else {
+    // Retrieve parameter without eager stack-frame relative to the
+    // stack-pointer.
+    return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
+  }
+}
+
+
+MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
+  DCHECK(op->IsDoubleStackSlot());
+  if (NeedsEagerFrame()) {
+    // return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
+    return MemOperand(fp, StackSlotOffset(op->index()) + kIntSize);
+  } else {
+    // Retrieve parameter without eager stack-frame relative to the
+    // stack-pointer.
+    // return MemOperand(
+    //    sp, ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
+    return MemOperand(
+        sp, ArgumentsOffsetWithoutFrame(op->index()) + kIntSize);
+  }
+}
+
+
+void LCodeGen::WriteTranslation(LEnvironment* environment,
+                                Translation* translation) {
+  if (environment == NULL) return;
+
+  // The translation includes one command per value in the environment.
+  int translation_size = environment->translation_size();
+  // The output frame height does not include the parameters.
+  int height = translation_size - environment->parameter_count();
+
+  WriteTranslation(environment->outer(), translation);
+  bool has_closure_id = !info()->closure().is_null() &&
+      !info()->closure().is_identical_to(environment->closure());
+  int closure_id = has_closure_id
+      ? DefineDeoptimizationLiteral(environment->closure())
+      : Translation::kSelfLiteralId;
+
+  switch (environment->frame_type()) {
+    case JS_FUNCTION:
+      translation->BeginJSFrame(environment->ast_id(), closure_id, height);
+      break;
+    case JS_CONSTRUCT:
+      translation->BeginConstructStubFrame(closure_id, translation_size);
+      break;
+    case JS_GETTER:
+      DCHECK(translation_size == 1);
+      DCHECK(height == 0);
+      translation->BeginGetterStubFrame(closure_id);
+      break;
+    case JS_SETTER:
+      DCHECK(translation_size == 2);
+      DCHECK(height == 0);
+      translation->BeginSetterStubFrame(closure_id);
+      break;
+    case STUB:
+      translation->BeginCompiledStubFrame();
+      break;
+    case ARGUMENTS_ADAPTOR:
+      translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
+      break;
+  }
+
+  int object_index = 0;
+  int dematerialized_index = 0;
+  for (int i = 0; i < translation_size; ++i) {
+    LOperand* value = environment->values()->at(i);
+    AddToTranslation(environment,
+                     translation,
+                     value,
+                     environment->HasTaggedValueAt(i),
+                     environment->HasUint32ValueAt(i),
+                     &object_index,
+                     &dematerialized_index);
+  }
+}
+
+
+void LCodeGen::AddToTranslation(LEnvironment* environment,
+                                Translation* translation,
+                                LOperand* op,
+                                bool is_tagged,
+                                bool is_uint32,
+                                int* object_index_pointer,
+                                int* dematerialized_index_pointer) {
+  if (op == LEnvironment::materialization_marker()) {
+    int object_index = (*object_index_pointer)++;
+    if (environment->ObjectIsDuplicateAt(object_index)) {
+      int dupe_of = environment->ObjectDuplicateOfAt(object_index);
+      translation->DuplicateObject(dupe_of);
+      return;
+    }
+    int object_length = environment->ObjectLengthAt(object_index);
+    if (environment->ObjectIsArgumentsAt(object_index)) {
+      translation->BeginArgumentsObject(object_length);
+    } else {
+      translation->BeginCapturedObject(object_length);
+    }
+    int dematerialized_index = *dematerialized_index_pointer;
+    int env_offset = environment->translation_size() + dematerialized_index;
+    *dematerialized_index_pointer += object_length;
+    for (int i = 0; i < object_length; ++i) {
+      LOperand* value = environment->values()->at(env_offset + i);
+      AddToTranslation(environment,
+                       translation,
+                       value,
+                       environment->HasTaggedValueAt(env_offset + i),
+                       environment->HasUint32ValueAt(env_offset + i),
+                       object_index_pointer,
+                       dematerialized_index_pointer);
+    }
+    return;
+  }
+
+  if (op->IsStackSlot()) {
+    if (is_tagged) {
+      translation->StoreStackSlot(op->index());
+    } else if (is_uint32) {
+      translation->StoreUint32StackSlot(op->index());
+    } else {
+      translation->StoreInt32StackSlot(op->index());
+    }
+  } else if (op->IsDoubleStackSlot()) {
+    translation->StoreDoubleStackSlot(op->index());
+  } else if (op->IsRegister()) {
+    Register reg = ToRegister(op);
+    if (is_tagged) {
+      translation->StoreRegister(reg);
+    } else if (is_uint32) {
+      translation->StoreUint32Register(reg);
+    } else {
+      translation->StoreInt32Register(reg);
+    }
+  } else if (op->IsDoubleRegister()) {
+    DoubleRegister reg = ToDoubleRegister(op);
+    translation->StoreDoubleRegister(reg);
+  } else if (op->IsConstantOperand()) {
+    HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
+    int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
+    translation->StoreLiteral(src_index);
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
+void LCodeGen::CallCode(Handle<Code> code,
+                        RelocInfo::Mode mode,
+                        LInstruction* instr) {
+  CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
+void LCodeGen::CallCodeGeneric(Handle<Code> code,
+                               RelocInfo::Mode mode,
+                               LInstruction* instr,
+                               SafepointMode safepoint_mode) {
+  DCHECK(instr != NULL);
+  __ Call(code, mode);
+  RecordSafepointWithLazyDeopt(instr, safepoint_mode);
+}
+
+
+void LCodeGen::CallRuntime(const Runtime::Function* function,
+                           int num_arguments,
+                           LInstruction* instr,
+                           SaveFPRegsMode save_doubles) {
+  DCHECK(instr != NULL);
+
+  __ CallRuntime(function, num_arguments, save_doubles);
+
+  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
+void LCodeGen::LoadContextFromDeferred(LOperand* context) {
+  if (context->IsRegister()) {
+    __ Move(cp, ToRegister(context));
+  } else if (context->IsStackSlot()) {
+    __ ld(cp, ToMemOperand(context));
+  } else if (context->IsConstantOperand()) {
+    HConstant* constant =
+        chunk_->LookupConstant(LConstantOperand::cast(context));
+    __ li(cp, Handle<Object>::cast(constant->handle(isolate())));
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
+void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id,
+                                       int argc,
+                                       LInstruction* instr,
+                                       LOperand* context) {
+  LoadContextFromDeferred(context);
+  __ CallRuntimeSaveDoubles(id);
+  RecordSafepointWithRegisters(
+      instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
+}
+
+
+void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
+                                                    Safepoint::DeoptMode mode) {
+  environment->set_has_been_used();
+  if (!environment->HasBeenRegistered()) {
+    // Physical stack frame layout:
+    // -x ............. -4  0 ..................................... y
+    // [incoming arguments] [spill slots] [pushed outgoing arguments]
+
+    // Layout of the environment:
+    // 0 ..................................................... size-1
+    // [parameters] [locals] [expression stack including arguments]
+
+    // Layout of the translation:
+    // 0 ........................................................ size - 1 + 4
+    // [expression stack including arguments] [locals] [4 words] [parameters]
+    // |>------------  translation_size ------------<|
+
+    int frame_count = 0;
+    int jsframe_count = 0;
+    for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
+      ++frame_count;
+      if (e->frame_type() == JS_FUNCTION) {
+        ++jsframe_count;
+      }
+    }
+    Translation translation(&translations_, frame_count, jsframe_count, zone());
+    WriteTranslation(environment, &translation);
+    int deoptimization_index = deoptimizations_.length();
+    int pc_offset = masm()->pc_offset();
+    environment->Register(deoptimization_index,
+                          translation.index(),
+                          (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
+    deoptimizations_.Add(environment, zone());
+  }
+}
+
+
+void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
+                            Deoptimizer::BailoutType bailout_type,
+                            Register src1, const Operand& src2,
+                            const char* detail) {
+  LEnvironment* environment = instr->environment();
+  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
+  DCHECK(environment->HasBeenRegistered());
+  int id = environment->deoptimization_index();
+  DCHECK(info()->IsOptimizing() || info()->IsStub());
+  Address entry =
+      Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
+  if (entry == NULL) {
+    Abort(kBailoutWasNotPrepared);
+    return;
+  }
+
+  if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
+    Register scratch = scratch0();
+    ExternalReference count = ExternalReference::stress_deopt_count(isolate());
+    Label no_deopt;
+    __ Push(a1, scratch);
+    __ li(scratch, Operand(count));
+    __ lw(a1, MemOperand(scratch));
+    __ Subu(a1, a1, Operand(1));
+    __ Branch(&no_deopt, ne, a1, Operand(zero_reg));
+    __ li(a1, Operand(FLAG_deopt_every_n_times));
+    __ sw(a1, MemOperand(scratch));
+    __ Pop(a1, scratch);
+
+    __ Call(entry, RelocInfo::RUNTIME_ENTRY);
+    __ bind(&no_deopt);
+    __ sw(a1, MemOperand(scratch));
+    __ Pop(a1, scratch);
+  }
+
+  if (info()->ShouldTrapOnDeopt()) {
+    Label skip;
+    if (condition != al) {
+      __ Branch(&skip, NegateCondition(condition), src1, src2);
+    }
+    __ stop("trap_on_deopt");
+    __ bind(&skip);
+  }
+
+  Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
+                             instr->Mnemonic(), detail);
+  DCHECK(info()->IsStub() || frame_is_built_);
+  // Go through jump table if we need to handle condition, build frame, or
+  // restore caller doubles.
+  if (condition == al && frame_is_built_ &&
+      !info()->saves_caller_doubles()) {
+    DeoptComment(reason);
+    __ Call(entry, RelocInfo::RUNTIME_ENTRY, condition, src1, src2);
+  } else {
+    Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
+                                            !frame_is_built_);
+    // We often have several deopts to the same entry, reuse the last
+    // jump entry if this is the case.
+    if (jump_table_.is_empty() ||
+        !table_entry.IsEquivalentTo(jump_table_.last())) {
+      jump_table_.Add(table_entry, zone());
+    }
+    __ Branch(&jump_table_.last().label, condition, src1, src2);
+  }
+}
+
+
+void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
+                            Register src1, const Operand& src2,
+                            const char* detail) {
+  Deoptimizer::BailoutType bailout_type = info()->IsStub()
+      ? Deoptimizer::LAZY
+      : Deoptimizer::EAGER;
+  DeoptimizeIf(condition, instr, bailout_type, src1, src2, detail);
+}
+
+
+void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
+  int length = deoptimizations_.length();
+  if (length == 0) return;
+  Handle<DeoptimizationInputData> data =
+      DeoptimizationInputData::New(isolate(), length, TENURED);
+
+  Handle<ByteArray> translations =
+      translations_.CreateByteArray(isolate()->factory());
+  data->SetTranslationByteArray(*translations);
+  data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
+  data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
+  if (info_->IsOptimizing()) {
+    // Reference to shared function info does not change between phases.
+    AllowDeferredHandleDereference allow_handle_dereference;
+    data->SetSharedFunctionInfo(*info_->shared_info());
+  } else {
+    data->SetSharedFunctionInfo(Smi::FromInt(0));
+  }
+
+  Handle<FixedArray> literals =
+      factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
+  { AllowDeferredHandleDereference copy_handles;
+    for (int i = 0; i < deoptimization_literals_.length(); i++) {
+      literals->set(i, *deoptimization_literals_[i]);
+    }
+    data->SetLiteralArray(*literals);
+  }
+
+  data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
+  data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
+
+  // Populate the deoptimization entries.
+  for (int i = 0; i < length; i++) {
+    LEnvironment* env = deoptimizations_[i];
+    data->SetAstId(i, env->ast_id());
+    data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
+    data->SetArgumentsStackHeight(i,
+                                  Smi::FromInt(env->arguments_stack_height()));
+    data->SetPc(i, Smi::FromInt(env->pc_offset()));
+  }
+  code->set_deoptimization_data(*data);
+}
+
+
+int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
+  int result = deoptimization_literals_.length();
+  for (int i = 0; i < deoptimization_literals_.length(); ++i) {
+    if (deoptimization_literals_[i].is_identical_to(literal)) return i;
+  }
+  deoptimization_literals_.Add(literal, zone());
+  return result;
+}
+
+
+void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
+  DCHECK(deoptimization_literals_.length() == 0);
+
+  const ZoneList<Handle<JSFunction> >* inlined_closures =
+      chunk()->inlined_closures();
+
+  for (int i = 0, length = inlined_closures->length();
+       i < length;
+       i++) {
+    DefineDeoptimizationLiteral(inlined_closures->at(i));
+  }
+
+  inlined_function_count_ = deoptimization_literals_.length();
+}
+
+
+void LCodeGen::RecordSafepointWithLazyDeopt(
+    LInstruction* instr, SafepointMode safepoint_mode) {
+  if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
+    RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
+  } else {
+    DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+    RecordSafepointWithRegisters(
+        instr->pointer_map(), 0, Safepoint::kLazyDeopt);
+  }
+}
+
+
+void LCodeGen::RecordSafepoint(
+    LPointerMap* pointers,
+    Safepoint::Kind kind,
+    int arguments,
+    Safepoint::DeoptMode deopt_mode) {
+  DCHECK(expected_safepoint_kind_ == kind);
+
+  const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
+  Safepoint safepoint = safepoints_.DefineSafepoint(masm(),
+      kind, arguments, deopt_mode);
+  for (int i = 0; i < operands->length(); i++) {
+    LOperand* pointer = operands->at(i);
+    if (pointer->IsStackSlot()) {
+      safepoint.DefinePointerSlot(pointer->index(), zone());
+    } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
+      safepoint.DefinePointerRegister(ToRegister(pointer), zone());
+    }
+  }
+}
+
+
+void LCodeGen::RecordSafepoint(LPointerMap* pointers,
+                               Safepoint::DeoptMode deopt_mode) {
+  RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
+}
+
+
+void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
+  LPointerMap empty_pointers(zone());
+  RecordSafepoint(&empty_pointers, deopt_mode);
+}
+
+
+void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
+                                            int arguments,
+                                            Safepoint::DeoptMode deopt_mode) {
+  RecordSafepoint(
+      pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
+}
+
+
+void LCodeGen::RecordAndWritePosition(int position) {
+  if (position == RelocInfo::kNoPosition) return;
+  masm()->positions_recorder()->RecordPosition(position);
+  masm()->positions_recorder()->WriteRecordedPositions();
+}
+
+
+static const char* LabelType(LLabel* label) {
+  if (label->is_loop_header()) return " (loop header)";
+  if (label->is_osr_entry()) return " (OSR entry)";
+  return "";
+}
+
+
+void LCodeGen::DoLabel(LLabel* label) {
+  Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
+          current_instruction_,
+          label->hydrogen_value()->id(),
+          label->block_id(),
+          LabelType(label));
+  __ bind(label->label());
+  current_block_ = label->block_id();
+  DoGap(label);
+}
+
+
+void LCodeGen::DoParallelMove(LParallelMove* move) {
+  resolver_.Resolve(move);
+}
+
+
+void LCodeGen::DoGap(LGap* gap) {
+  for (int i = LGap::FIRST_INNER_POSITION;
+       i <= LGap::LAST_INNER_POSITION;
+       i++) {
+    LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
+    LParallelMove* move = gap->GetParallelMove(inner_pos);
+    if (move != NULL) DoParallelMove(move);
+  }
+}
+
+
+void LCodeGen::DoInstructionGap(LInstructionGap* instr) {
+  DoGap(instr);
+}
+
+
+void LCodeGen::DoParameter(LParameter* instr) {
+  // Nothing to do.
+}
+
+
+void LCodeGen::DoCallStub(LCallStub* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->result()).is(v0));
+  switch (instr->hydrogen()->major_key()) {
+    case CodeStub::RegExpExec: {
+      RegExpExecStub stub(isolate());
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    case CodeStub::SubString: {
+      SubStringStub stub(isolate());
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    case CodeStub::StringCompare: {
+      StringCompareStub stub(isolate());
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
+  GenerateOsrPrologue();
+}
+
+
+void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  int32_t divisor = instr->divisor();
+  DCHECK(dividend.is(ToRegister(instr->result())));
+
+  // Theoretically, a variation of the branch-free code for integer division by
+  // a power of 2 (calculating the remainder via an additional multiplication
+  // (which gets simplified to an 'and') and subtraction) should be faster, and
+  // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
+  // indicate that positive dividends are heavily favored, so the branching
+  // version performs better.
+  HMod* hmod = instr->hydrogen();
+  int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
+  Label dividend_is_not_negative, done;
+
+  if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
+    __ Branch(&dividend_is_not_negative, ge, dividend, Operand(zero_reg));
+    // Note: The code below even works when right contains kMinInt.
+    __ dsubu(dividend, zero_reg, dividend);
+    __ And(dividend, dividend, Operand(mask));
+    if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      DeoptimizeIf(eq, instr, dividend, Operand(zero_reg));
+    }
+    __ Branch(USE_DELAY_SLOT, &done);
+    __ dsubu(dividend, zero_reg, dividend);
+  }
+
+  __ bind(&dividend_is_not_negative);
+  __ And(dividend, dividend, Operand(mask));
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoModByConstI(LModByConstI* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  int32_t divisor = instr->divisor();
+  Register result = ToRegister(instr->result());
+  DCHECK(!dividend.is(result));
+
+  if (divisor == 0) {
+    DeoptimizeIf(al, instr);
+    return;
+  }
+
+  __ TruncatingDiv(result, dividend, Abs(divisor));
+  __ Dmul(result, result, Operand(Abs(divisor)));
+  __ Dsubu(result, dividend, Operand(result));
+
+  // Check for negative zero.
+  HMod* hmod = instr->hydrogen();
+  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    Label remainder_not_zero;
+    __ Branch(&remainder_not_zero, ne, result, Operand(zero_reg));
+    DeoptimizeIf(lt, instr, dividend, Operand(zero_reg));
+    __ bind(&remainder_not_zero);
+  }
+}
+
+
+void LCodeGen::DoModI(LModI* instr) {
+  HMod* hmod = instr->hydrogen();
+  const Register left_reg = ToRegister(instr->left());
+  const Register right_reg = ToRegister(instr->right());
+  const Register result_reg = ToRegister(instr->result());
+
+  // div runs in the background while we check for special cases.
+  __ Dmod(result_reg, left_reg, right_reg);
+
+  Label done;
+  // Check for x % 0, we have to deopt in this case because we can't return a
+  // NaN.
+  if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
+    DeoptimizeIf(eq, instr, right_reg, Operand(zero_reg));
+  }
+
+  // Check for kMinInt % -1, div will return kMinInt, which is not what we
+  // want. We have to deopt if we care about -0, because we can't return that.
+  if (hmod->CheckFlag(HValue::kCanOverflow)) {
+    Label no_overflow_possible;
+    __ Branch(&no_overflow_possible, ne, left_reg, Operand(kMinInt));
+    if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      DeoptimizeIf(eq, instr, right_reg, Operand(-1));
+    } else {
+      __ Branch(&no_overflow_possible, ne, right_reg, Operand(-1));
+      __ Branch(USE_DELAY_SLOT, &done);
+      __ mov(result_reg, zero_reg);
+    }
+    __ bind(&no_overflow_possible);
+  }
+
+  // If we care about -0, test if the dividend is <0 and the result is 0.
+  __ Branch(&done, ge, left_reg, Operand(zero_reg));
+
+  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    DeoptimizeIf(eq, instr, result_reg, Operand(zero_reg));
+  }
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  int32_t divisor = instr->divisor();
+  Register result = ToRegister(instr->result());
+  DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
+  DCHECK(!result.is(dividend));
+
+  // Check for (0 / -x) that will produce negative zero.
+  HDiv* hdiv = instr->hydrogen();
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+    DeoptimizeIf(eq, instr, dividend, Operand(zero_reg));
+  }
+  // Check for (kMinInt / -1).
+  if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
+    DeoptimizeIf(eq, instr, dividend, Operand(kMinInt));
+  }
+  // Deoptimize if remainder will not be 0.
+  if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+      divisor != 1 && divisor != -1) {
+    int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
+    __ And(at, dividend, Operand(mask));
+    DeoptimizeIf(ne, instr, at, Operand(zero_reg));
+  }
+
+  if (divisor == -1) {  // Nice shortcut, not needed for correctness.
+    __ Dsubu(result, zero_reg, dividend);
+    return;
+  }
+  uint16_t shift = WhichPowerOf2Abs(divisor);
+  if (shift == 0) {
+    __ Move(result, dividend);
+  } else if (shift == 1) {
+    __ dsrl32(result, dividend, 31);
+    __ Daddu(result, dividend, Operand(result));
+  } else {
+    __ dsra32(result, dividend, 31);
+    __ dsrl32(result, result, 32 - shift);
+    __ Daddu(result, dividend, Operand(result));
+  }
+  if (shift > 0) __ dsra(result, result, shift);
+  if (divisor < 0) __ Dsubu(result, zero_reg, result);
+}
+
+
+void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  int32_t divisor = instr->divisor();
+  Register result = ToRegister(instr->result());
+  DCHECK(!dividend.is(result));
+
+  if (divisor == 0) {
+    DeoptimizeIf(al, instr);
+    return;
+  }
+
+  // Check for (0 / -x) that will produce negative zero.
+  HDiv* hdiv = instr->hydrogen();
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+    DeoptimizeIf(eq, instr, dividend, Operand(zero_reg));
+  }
+
+  __ TruncatingDiv(result, dividend, Abs(divisor));
+  if (divisor < 0) __ Subu(result, zero_reg, result);
+
+  if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+    __ Dmul(scratch0(), result, Operand(divisor));
+    __ Dsubu(scratch0(), scratch0(), dividend);
+    DeoptimizeIf(ne, instr, scratch0(), Operand(zero_reg));
+  }
+}
+
+
+// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
+void LCodeGen::DoDivI(LDivI* instr) {
+  HBinaryOperation* hdiv = instr->hydrogen();
+  Register dividend = ToRegister(instr->dividend());
+  Register divisor = ToRegister(instr->divisor());
+  const Register result = ToRegister(instr->result());
+
+  // On MIPS div is asynchronous - it will run in the background while we
+  // check for special cases.
+  __ Ddiv(result, dividend, divisor);
+
+  // Check for x / 0.
+  if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
+    DeoptimizeIf(eq, instr, divisor, Operand(zero_reg));
+  }
+
+  // Check for (0 / -x) that will produce negative zero.
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    Label left_not_zero;
+    __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
+    DeoptimizeIf(lt, instr, divisor, Operand(zero_reg));
+    __ bind(&left_not_zero);
+  }
+
+  // Check for (kMinInt / -1).
+  if (hdiv->CheckFlag(HValue::kCanOverflow) &&
+      !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
+    Label left_not_min_int;
+    __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
+    DeoptimizeIf(eq, instr, divisor, Operand(-1));
+    __ bind(&left_not_min_int);
+  }
+
+  if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
+    // Calculate remainder.
+    Register remainder = ToRegister(instr->temp());
+    if (kArchVariant != kMips64r6) {
+      __ mfhi(remainder);
+    } else {
+      __ dmod(remainder, dividend, divisor);
+    }
+    DeoptimizeIf(ne, instr, remainder, Operand(zero_reg));
+  }
+}
+
+
+void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
+  DoubleRegister addend = ToDoubleRegister(instr->addend());
+  DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
+  DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
+
+  // This is computed in-place.
+  DCHECK(addend.is(ToDoubleRegister(instr->result())));
+
+  __ Madd_d(addend, addend, multiplier, multiplicand, double_scratch0());
+}
+
+
+void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  Register result = ToRegister(instr->result());
+  int32_t divisor = instr->divisor();
+  Register scratch = result.is(dividend) ? scratch0() : dividend;
+  DCHECK(!result.is(dividend) || !scratch.is(dividend));
+
+  // If the divisor is 1, return the dividend.
+  if (divisor == 1) {
+    __ Move(result, dividend);
+    return;
+  }
+
+  // If the divisor is positive, things are easy: There can be no deopts and we
+  // can simply do an arithmetic right shift.
+  uint16_t shift = WhichPowerOf2Abs(divisor);
+  if (divisor > 1) {
+    __ dsra(result, dividend, shift);
+    return;
+  }
+
+  // If the divisor is negative, we have to negate and handle edge cases.
+  // Dividend can be the same register as result so save the value of it
+  // for checking overflow.
+  __ Move(scratch, dividend);
+
+  __ Dsubu(result, zero_reg, dividend);
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    DeoptimizeIf(eq, instr, result, Operand(zero_reg));
+  }
+
+  __ Xor(scratch, scratch, result);
+  // Dividing by -1 is basically negation, unless we overflow.
+  if (divisor == -1) {
+    if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
+      DeoptimizeIf(gt, instr, result, Operand(kMaxInt));
+    }
+    return;
+  }
+
+  // If the negation could not overflow, simply shifting is OK.
+  if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
+    __ dsra(result, result, shift);
+    return;
+  }
+
+  Label no_overflow, done;
+  __ Branch(&no_overflow, lt, scratch, Operand(zero_reg));
+  __ li(result, Operand(kMinInt / divisor), CONSTANT_SIZE);
+  __ Branch(&done);
+  __ bind(&no_overflow);
+  __ dsra(result, result, shift);
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  int32_t divisor = instr->divisor();
+  Register result = ToRegister(instr->result());
+  DCHECK(!dividend.is(result));
+
+  if (divisor == 0) {
+    DeoptimizeIf(al, instr);
+    return;
+  }
+
+  // Check for (0 / -x) that will produce negative zero.
+  HMathFloorOfDiv* hdiv = instr->hydrogen();
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+    DeoptimizeIf(eq, instr, dividend, Operand(zero_reg));
+  }
+
+  // Easy case: We need no dynamic check for the dividend and the flooring
+  // division is the same as the truncating division.
+  if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
+      (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
+    __ TruncatingDiv(result, dividend, Abs(divisor));
+    if (divisor < 0) __ Dsubu(result, zero_reg, result);
+    return;
+  }
+
+  // In the general case we may need to adjust before and after the truncating
+  // division to get a flooring division.
+  Register temp = ToRegister(instr->temp());
+  DCHECK(!temp.is(dividend) && !temp.is(result));
+  Label needs_adjustment, done;
+  __ Branch(&needs_adjustment, divisor > 0 ? lt : gt,
+            dividend, Operand(zero_reg));
+  __ TruncatingDiv(result, dividend, Abs(divisor));
+  if (divisor < 0) __ Dsubu(result, zero_reg, result);
+  __ jmp(&done);
+  __ bind(&needs_adjustment);
+  __ Daddu(temp, dividend, Operand(divisor > 0 ? 1 : -1));
+  __ TruncatingDiv(result, temp, Abs(divisor));
+  if (divisor < 0) __ Dsubu(result, zero_reg, result);
+  __ Dsubu(result, result, Operand(1));
+  __ bind(&done);
+}
+
+
+// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
+void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
+  HBinaryOperation* hdiv = instr->hydrogen();
+  Register dividend = ToRegister(instr->dividend());
+  Register divisor = ToRegister(instr->divisor());
+  const Register result = ToRegister(instr->result());
+
+  // On MIPS div is asynchronous - it will run in the background while we
+  // check for special cases.
+  __ Ddiv(result, dividend, divisor);
+
+  // Check for x / 0.
+  if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
+    DeoptimizeIf(eq, instr, divisor, Operand(zero_reg));
+  }
+
+  // Check for (0 / -x) that will produce negative zero.
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    Label left_not_zero;
+    __ Branch(&left_not_zero, ne, dividend, Operand(zero_reg));
+    DeoptimizeIf(lt, instr, divisor, Operand(zero_reg));
+    __ bind(&left_not_zero);
+  }
+
+  // Check for (kMinInt / -1).
+  if (hdiv->CheckFlag(HValue::kCanOverflow) &&
+      !hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
+    Label left_not_min_int;
+    __ Branch(&left_not_min_int, ne, dividend, Operand(kMinInt));
+    DeoptimizeIf(eq, instr, divisor, Operand(-1));
+    __ bind(&left_not_min_int);
+  }
+
+  // We performed a truncating division. Correct the result if necessary.
+  Label done;
+  Register remainder = scratch0();
+  if (kArchVariant != kMips64r6) {
+    __ mfhi(remainder);
+  } else {
+    __ dmod(remainder, dividend, divisor);
+  }
+  __ Branch(&done, eq, remainder, Operand(zero_reg), USE_DELAY_SLOT);
+  __ Xor(remainder, remainder, Operand(divisor));
+  __ Branch(&done, ge, remainder, Operand(zero_reg));
+  __ Dsubu(result, result, Operand(1));
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoMulI(LMulI* instr) {
+  Register scratch = scratch0();
+  Register result = ToRegister(instr->result());
+  // Note that result may alias left.
+  Register left = ToRegister(instr->left());
+  LOperand* right_op = instr->right();
+
+  bool bailout_on_minus_zero =
+    instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
+  bool overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+
+  if (right_op->IsConstantOperand()) {
+    int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
+
+    if (bailout_on_minus_zero && (constant < 0)) {
+      // The case of a null constant will be handled separately.
+      // If constant is negative and left is null, the result should be -0.
+      DeoptimizeIf(eq, instr, left, Operand(zero_reg));
+    }
+
+    switch (constant) {
+      case -1:
+        if (overflow) {
+          __ SubuAndCheckForOverflow(result, zero_reg, left, scratch);
+          DeoptimizeIf(gt, instr, scratch, Operand(kMaxInt));
+        } else {
+          __ Dsubu(result, zero_reg, left);
+        }
+        break;
+      case 0:
+        if (bailout_on_minus_zero) {
+          // If left is strictly negative and the constant is null, the
+          // result is -0. Deoptimize if required, otherwise return 0.
+          DeoptimizeIf(lt, instr, left, Operand(zero_reg));
+        }
+        __ mov(result, zero_reg);
+        break;
+      case 1:
+        // Nothing to do.
+        __ Move(result, left);
+        break;
+      default:
+        // Multiplying by powers of two and powers of two plus or minus
+        // one can be done faster with shifted operands.
+        // For other constants we emit standard code.
+        int32_t mask = constant >> 31;
+        uint32_t constant_abs = (constant + mask) ^ mask;
+
+        if (base::bits::IsPowerOfTwo32(constant_abs)) {
+          int32_t shift = WhichPowerOf2(constant_abs);
+          __ dsll(result, left, shift);
+          // Correct the sign of the result if the constant is negative.
+          if (constant < 0)  __ Dsubu(result, zero_reg, result);
+        } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
+          int32_t shift = WhichPowerOf2(constant_abs - 1);
+          __ dsll(scratch, left, shift);
+          __ Daddu(result, scratch, left);
+          // Correct the sign of the result if the constant is negative.
+          if (constant < 0)  __ Dsubu(result, zero_reg, result);
+        } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
+          int32_t shift = WhichPowerOf2(constant_abs + 1);
+          __ dsll(scratch, left, shift);
+          __ Dsubu(result, scratch, left);
+          // Correct the sign of the result if the constant is negative.
+          if (constant < 0)  __ Dsubu(result, zero_reg, result);
+        } else {
+          // Generate standard code.
+          __ li(at, constant);
+          __ Dmul(result, left, at);
+        }
+    }
+
+  } else {
+    DCHECK(right_op->IsRegister());
+    Register right = ToRegister(right_op);
+
+    if (overflow) {
+      // hi:lo = left * right.
+      if (instr->hydrogen()->representation().IsSmi()) {
+        __ Dmulh(result, left, right);
+      } else {
+        __ Dmul(result, left, right);
+      }
+      __ dsra32(scratch, result, 0);
+      __ sra(at, result, 31);
+      if (instr->hydrogen()->representation().IsSmi()) {
+        __ SmiTag(result);
+      }
+      DeoptimizeIf(ne, instr, scratch, Operand(at));
+    } else {
+      if (instr->hydrogen()->representation().IsSmi()) {
+        __ SmiUntag(result, left);
+        __ Dmul(result, result, right);
+      } else {
+        __ Dmul(result, left, right);
+      }
+    }
+
+    if (bailout_on_minus_zero) {
+      Label done;
+      __ Xor(at, left, right);
+      __ Branch(&done, ge, at, Operand(zero_reg));
+      // Bail out if the result is minus zero.
+      DeoptimizeIf(eq, instr, result, Operand(zero_reg));
+      __ bind(&done);
+    }
+  }
+}
+
+
+void LCodeGen::DoBitI(LBitI* instr) {
+  LOperand* left_op = instr->left();
+  LOperand* right_op = instr->right();
+  DCHECK(left_op->IsRegister());
+  Register left = ToRegister(left_op);
+  Register result = ToRegister(instr->result());
+  Operand right(no_reg);
+
+  if (right_op->IsStackSlot()) {
+    right = Operand(EmitLoadRegister(right_op, at));
+  } else {
+    DCHECK(right_op->IsRegister() || right_op->IsConstantOperand());
+    right = ToOperand(right_op);
+  }
+
+  switch (instr->op()) {
+    case Token::BIT_AND:
+      __ And(result, left, right);
+      break;
+    case Token::BIT_OR:
+      __ Or(result, left, right);
+      break;
+    case Token::BIT_XOR:
+      if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
+        __ Nor(result, zero_reg, left);
+      } else {
+        __ Xor(result, left, right);
+      }
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+}
+
+
+void LCodeGen::DoShiftI(LShiftI* instr) {
+  // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
+  // result may alias either of them.
+  LOperand* right_op = instr->right();
+  Register left = ToRegister(instr->left());
+  Register result = ToRegister(instr->result());
+
+  if (right_op->IsRegister()) {
+    // No need to mask the right operand on MIPS, it is built into the variable
+    // shift instructions.
+    switch (instr->op()) {
+      case Token::ROR:
+        __ Ror(result, left, Operand(ToRegister(right_op)));
+        break;
+      case Token::SAR:
+        __ srav(result, left, ToRegister(right_op));
+        break;
+      case Token::SHR:
+        __ srlv(result, left, ToRegister(right_op));
+        if (instr->can_deopt()) {
+           // TODO(yy): (-1) >>> 0. anything else?
+          DeoptimizeIf(lt, instr, result, Operand(zero_reg));
+          DeoptimizeIf(gt, instr, result, Operand(kMaxInt));
+        }
+        break;
+      case Token::SHL:
+        __ sllv(result, left, ToRegister(right_op));
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+  } else {
+    // Mask the right_op operand.
+    int value = ToInteger32(LConstantOperand::cast(right_op));
+    uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
+    switch (instr->op()) {
+      case Token::ROR:
+        if (shift_count != 0) {
+          __ Ror(result, left, Operand(shift_count));
+        } else {
+          __ Move(result, left);
+        }
+        break;
+      case Token::SAR:
+        if (shift_count != 0) {
+          __ sra(result, left, shift_count);
+        } else {
+          __ Move(result, left);
+        }
+        break;
+      case Token::SHR:
+        if (shift_count != 0) {
+          __ srl(result, left, shift_count);
+        } else {
+          if (instr->can_deopt()) {
+            __ And(at, left, Operand(0x80000000));
+            DeoptimizeIf(ne, instr, at, Operand(zero_reg));
+          }
+          __ Move(result, left);
+        }
+        break;
+      case Token::SHL:
+        if (shift_count != 0) {
+          if (instr->hydrogen_value()->representation().IsSmi()) {
+            __ dsll(result, left, shift_count);
+          } else {
+            __ sll(result, left, shift_count);
+          }
+        } else {
+          __ Move(result, left);
+        }
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+  }
+}
+
+
+void LCodeGen::DoSubI(LSubI* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  LOperand* result = instr->result();
+  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+
+  if (!can_overflow) {
+    if (right->IsStackSlot()) {
+      Register right_reg = EmitLoadRegister(right, at);
+      __ Dsubu(ToRegister(result), ToRegister(left), Operand(right_reg));
+    } else {
+      DCHECK(right->IsRegister() || right->IsConstantOperand());
+      __ Dsubu(ToRegister(result), ToRegister(left), ToOperand(right));
+    }
+  } else {  // can_overflow.
+    Register overflow = scratch0();
+    Register scratch = scratch1();
+    if (right->IsStackSlot() || right->IsConstantOperand()) {
+      Register right_reg = EmitLoadRegister(right, scratch);
+      __ SubuAndCheckForOverflow(ToRegister(result),
+                                 ToRegister(left),
+                                 right_reg,
+                                 overflow);  // Reg at also used as scratch.
+    } else {
+      DCHECK(right->IsRegister());
+      // Due to overflow check macros not supporting constant operands,
+      // handling the IsConstantOperand case was moved to prev if clause.
+      __ SubuAndCheckForOverflow(ToRegister(result),
+                                 ToRegister(left),
+                                 ToRegister(right),
+                                 overflow);  // Reg at also used as scratch.
+    }
+    DeoptimizeIf(lt, instr, overflow, Operand(zero_reg));
+    if (!instr->hydrogen()->representation().IsSmi()) {
+      DeoptimizeIf(gt, instr, ToRegister(result), Operand(kMaxInt));
+      DeoptimizeIf(lt, instr, ToRegister(result), Operand(kMinInt));
+    }
+  }
+}
+
+
+void LCodeGen::DoConstantI(LConstantI* instr) {
+  __ li(ToRegister(instr->result()), Operand(instr->value()));
+}
+
+
+void LCodeGen::DoConstantS(LConstantS* instr) {
+  __ li(ToRegister(instr->result()), Operand(instr->value()));
+}
+
+
+void LCodeGen::DoConstantD(LConstantD* instr) {
+  DCHECK(instr->result()->IsDoubleRegister());
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  double v = instr->value();
+  __ Move(result, v);
+}
+
+
+void LCodeGen::DoConstantE(LConstantE* instr) {
+  __ li(ToRegister(instr->result()), Operand(instr->value()));
+}
+
+
+void LCodeGen::DoConstantT(LConstantT* instr) {
+  Handle<Object> object = instr->value(isolate());
+  AllowDeferredHandleDereference smi_check;
+  __ li(ToRegister(instr->result()), object);
+}
+
+
+void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
+  Register result = ToRegister(instr->result());
+  Register map = ToRegister(instr->value());
+  __ EnumLength(result, map);
+}
+
+
+void LCodeGen::DoDateField(LDateField* instr) {
+  Register object = ToRegister(instr->date());
+  Register result = ToRegister(instr->result());
+  Register scratch = ToRegister(instr->temp());
+  Smi* index = instr->index();
+  Label runtime, done;
+  DCHECK(object.is(a0));
+  DCHECK(result.is(v0));
+  DCHECK(!scratch.is(scratch0()));
+  DCHECK(!scratch.is(object));
+
+  __ SmiTst(object, at);
+  DeoptimizeIf(eq, instr, at, Operand(zero_reg));
+  __ GetObjectType(object, scratch, scratch);
+  DeoptimizeIf(ne, instr, scratch, Operand(JS_DATE_TYPE));
+
+  if (index->value() == 0) {
+    __ ld(result, FieldMemOperand(object, JSDate::kValueOffset));
+  } else {
+    if (index->value() < JSDate::kFirstUncachedField) {
+      ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
+      __ li(scratch, Operand(stamp));
+      __ ld(scratch, MemOperand(scratch));
+      __ ld(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset));
+      __ Branch(&runtime, ne, scratch, Operand(scratch0()));
+      __ ld(result, FieldMemOperand(object, JSDate::kValueOffset +
+                                            kPointerSize * index->value()));
+      __ jmp(&done);
+    }
+    __ bind(&runtime);
+    __ PrepareCallCFunction(2, scratch);
+    __ li(a1, Operand(index));
+    __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
+    __ bind(&done);
+  }
+}
+
+
+MemOperand LCodeGen::BuildSeqStringOperand(Register string,
+                                           LOperand* index,
+                                           String::Encoding encoding) {
+  if (index->IsConstantOperand()) {
+    int offset = ToInteger32(LConstantOperand::cast(index));
+    if (encoding == String::TWO_BYTE_ENCODING) {
+      offset *= kUC16Size;
+    }
+    STATIC_ASSERT(kCharSize == 1);
+    return FieldMemOperand(string, SeqString::kHeaderSize + offset);
+  }
+  Register scratch = scratch0();
+  DCHECK(!scratch.is(string));
+  DCHECK(!scratch.is(ToRegister(index)));
+  if (encoding == String::ONE_BYTE_ENCODING) {
+    __ Daddu(scratch, string, ToRegister(index));
+  } else {
+    STATIC_ASSERT(kUC16Size == 2);
+    __ dsll(scratch, ToRegister(index), 1);
+    __ Daddu(scratch, string, scratch);
+  }
+  return FieldMemOperand(scratch, SeqString::kHeaderSize);
+}
+
+
+void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
+  String::Encoding encoding = instr->hydrogen()->encoding();
+  Register string = ToRegister(instr->string());
+  Register result = ToRegister(instr->result());
+
+  if (FLAG_debug_code) {
+    Register scratch = scratch0();
+    __ ld(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
+    __ lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+
+    __ And(scratch, scratch,
+           Operand(kStringRepresentationMask | kStringEncodingMask));
+    static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+    static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+    __ Dsubu(at, scratch, Operand(encoding == String::ONE_BYTE_ENCODING
+                                ? one_byte_seq_type : two_byte_seq_type));
+    __ Check(eq, kUnexpectedStringType, at, Operand(zero_reg));
+  }
+
+  MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
+  if (encoding == String::ONE_BYTE_ENCODING) {
+    __ lbu(result, operand);
+  } else {
+    __ lhu(result, operand);
+  }
+}
+
+
+void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
+  String::Encoding encoding = instr->hydrogen()->encoding();
+  Register string = ToRegister(instr->string());
+  Register value = ToRegister(instr->value());
+
+  if (FLAG_debug_code) {
+    Register scratch = scratch0();
+    Register index = ToRegister(instr->index());
+    static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+    static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+    int encoding_mask =
+        instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
+        ? one_byte_seq_type : two_byte_seq_type;
+    __ EmitSeqStringSetCharCheck(string, index, value, scratch, encoding_mask);
+  }
+
+  MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
+  if (encoding == String::ONE_BYTE_ENCODING) {
+    __ sb(value, operand);
+  } else {
+    __ sh(value, operand);
+  }
+}
+
+
+void LCodeGen::DoAddI(LAddI* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  LOperand* result = instr->result();
+  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+
+  if (!can_overflow) {
+    if (right->IsStackSlot()) {
+      Register right_reg = EmitLoadRegister(right, at);
+      __ Daddu(ToRegister(result), ToRegister(left), Operand(right_reg));
+    } else {
+      DCHECK(right->IsRegister() || right->IsConstantOperand());
+      __ Daddu(ToRegister(result), ToRegister(left), ToOperand(right));
+    }
+  } else {  // can_overflow.
+    Register overflow = scratch0();
+    Register scratch = scratch1();
+    if (right->IsStackSlot() ||
+        right->IsConstantOperand()) {
+      Register right_reg = EmitLoadRegister(right, scratch);
+      __ AdduAndCheckForOverflow(ToRegister(result),
+                                 ToRegister(left),
+                                 right_reg,
+                                 overflow);  // Reg at also used as scratch.
+    } else {
+      DCHECK(right->IsRegister());
+      // Due to overflow check macros not supporting constant operands,
+      // handling the IsConstantOperand case was moved to prev if clause.
+      __ AdduAndCheckForOverflow(ToRegister(result),
+                                 ToRegister(left),
+                                 ToRegister(right),
+                                 overflow);  // Reg at also used as scratch.
+    }
+    DeoptimizeIf(lt, instr, overflow, Operand(zero_reg));
+    // if not smi, it must int32.
+    if (!instr->hydrogen()->representation().IsSmi()) {
+      DeoptimizeIf(gt, instr, ToRegister(result), Operand(kMaxInt));
+      DeoptimizeIf(lt, instr, ToRegister(result), Operand(kMinInt));
+    }
+  }
+}
+
+
+void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  HMathMinMax::Operation operation = instr->hydrogen()->operation();
+  Condition condition = (operation == HMathMinMax::kMathMin) ? le : ge;
+  if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
+    Register left_reg = ToRegister(left);
+    Register right_reg = EmitLoadRegister(right, scratch0());
+    Register result_reg = ToRegister(instr->result());
+    Label return_right, done;
+    Register scratch = scratch1();
+    __ Slt(scratch, left_reg, Operand(right_reg));
+    if (condition == ge) {
+     __  Movz(result_reg, left_reg, scratch);
+     __  Movn(result_reg, right_reg, scratch);
+    } else {
+     DCHECK(condition == le);
+     __  Movn(result_reg, left_reg, scratch);
+     __  Movz(result_reg, right_reg, scratch);
+    }
+  } else {
+    DCHECK(instr->hydrogen()->representation().IsDouble());
+    FPURegister left_reg = ToDoubleRegister(left);
+    FPURegister right_reg = ToDoubleRegister(right);
+    FPURegister result_reg = ToDoubleRegister(instr->result());
+    Label check_nan_left, check_zero, return_left, return_right, done;
+    __ BranchF(&check_zero, &check_nan_left, eq, left_reg, right_reg);
+    __ BranchF(&return_left, NULL, condition, left_reg, right_reg);
+    __ Branch(&return_right);
+
+    __ bind(&check_zero);
+    // left == right != 0.
+    __ BranchF(&return_left, NULL, ne, left_reg, kDoubleRegZero);
+    // At this point, both left and right are either 0 or -0.
+    if (operation == HMathMinMax::kMathMin) {
+      __ neg_d(left_reg, left_reg);
+      __ sub_d(result_reg, left_reg, right_reg);
+      __ neg_d(result_reg, result_reg);
+    } else {
+      __ add_d(result_reg, left_reg, right_reg);
+    }
+    __ Branch(&done);
+
+    __ bind(&check_nan_left);
+    // left == NaN.
+    __ BranchF(NULL, &return_left, eq, left_reg, left_reg);
+    __ bind(&return_right);
+    if (!right_reg.is(result_reg)) {
+      __ mov_d(result_reg, right_reg);
+    }
+    __ Branch(&done);
+
+    __ bind(&return_left);
+    if (!left_reg.is(result_reg)) {
+      __ mov_d(result_reg, left_reg);
+    }
+    __ bind(&done);
+  }
+}
+
+
+void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
+  DoubleRegister left = ToDoubleRegister(instr->left());
+  DoubleRegister right = ToDoubleRegister(instr->right());
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  switch (instr->op()) {
+    case Token::ADD:
+      __ add_d(result, left, right);
+      break;
+    case Token::SUB:
+      __ sub_d(result, left, right);
+      break;
+    case Token::MUL:
+      __ mul_d(result, left, right);
+      break;
+    case Token::DIV:
+      __ div_d(result, left, right);
+      break;
+    case Token::MOD: {
+      // Save a0-a3 on the stack.
+      RegList saved_regs = a0.bit() | a1.bit() | a2.bit() | a3.bit();
+      __ MultiPush(saved_regs);
+
+      __ PrepareCallCFunction(0, 2, scratch0());
+      __ MovToFloatParameters(left, right);
+      __ CallCFunction(
+          ExternalReference::mod_two_doubles_operation(isolate()),
+          0, 2);
+      // Move the result in the double result register.
+      __ MovFromFloatResult(result);
+
+      // Restore saved register.
+      __ MultiPop(saved_regs);
+      break;
+    }
+    default:
+      UNREACHABLE();
+      break;
+  }
+}
+
+
+void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->left()).is(a1));
+  DCHECK(ToRegister(instr->right()).is(a0));
+  DCHECK(ToRegister(instr->result()).is(v0));
+
+  Handle<Code> code =
+      CodeFactory::BinaryOpIC(isolate(), instr->op(), NO_OVERWRITE).code();
+  CallCode(code, RelocInfo::CODE_TARGET, instr);
+  // Other arch use a nop here, to signal that there is no inlined
+  // patchable code. Mips does not need the nop, since our marker
+  // instruction (andi zero_reg) will never be used in normal code.
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitBranch(InstrType instr,
+                          Condition condition,
+                          Register src1,
+                          const Operand& src2) {
+  int left_block = instr->TrueDestination(chunk_);
+  int right_block = instr->FalseDestination(chunk_);
+
+  int next_block = GetNextEmittedBlock();
+  if (right_block == left_block || condition == al) {
+    EmitGoto(left_block);
+  } else if (left_block == next_block) {
+    __ Branch(chunk_->GetAssemblyLabel(right_block),
+              NegateCondition(condition), src1, src2);
+  } else if (right_block == next_block) {
+    __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
+  } else {
+    __ Branch(chunk_->GetAssemblyLabel(left_block), condition, src1, src2);
+    __ Branch(chunk_->GetAssemblyLabel(right_block));
+  }
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitBranchF(InstrType instr,
+                           Condition condition,
+                           FPURegister src1,
+                           FPURegister src2) {
+  int right_block = instr->FalseDestination(chunk_);
+  int left_block = instr->TrueDestination(chunk_);
+
+  int next_block = GetNextEmittedBlock();
+  if (right_block == left_block) {
+    EmitGoto(left_block);
+  } else if (left_block == next_block) {
+    __ BranchF(chunk_->GetAssemblyLabel(right_block), NULL,
+               NegateCondition(condition), src1, src2);
+  } else if (right_block == next_block) {
+    __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
+               condition, src1, src2);
+  } else {
+    __ BranchF(chunk_->GetAssemblyLabel(left_block), NULL,
+               condition, src1, src2);
+    __ Branch(chunk_->GetAssemblyLabel(right_block));
+  }
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitFalseBranch(InstrType instr,
+                               Condition condition,
+                               Register src1,
+                               const Operand& src2) {
+  int false_block = instr->FalseDestination(chunk_);
+  __ Branch(chunk_->GetAssemblyLabel(false_block), condition, src1, src2);
+}
+
+
+template<class InstrType>
+void LCodeGen::EmitFalseBranchF(InstrType instr,
+                                Condition condition,
+                                FPURegister src1,
+                                FPURegister src2) {
+  int false_block = instr->FalseDestination(chunk_);
+  __ BranchF(chunk_->GetAssemblyLabel(false_block), NULL,
+             condition, src1, src2);
+}
+
+
+void LCodeGen::DoDebugBreak(LDebugBreak* instr) {
+  __ stop("LDebugBreak");
+}
+
+
+void LCodeGen::DoBranch(LBranch* instr) {
+  Representation r = instr->hydrogen()->value()->representation();
+  if (r.IsInteger32() || r.IsSmi()) {
+    DCHECK(!info()->IsStub());
+    Register reg = ToRegister(instr->value());
+    EmitBranch(instr, ne, reg, Operand(zero_reg));
+  } else if (r.IsDouble()) {
+    DCHECK(!info()->IsStub());
+    DoubleRegister reg = ToDoubleRegister(instr->value());
+    // Test the double value. Zero and NaN are false.
+    EmitBranchF(instr, nue, reg, kDoubleRegZero);
+  } else {
+    DCHECK(r.IsTagged());
+    Register reg = ToRegister(instr->value());
+    HType type = instr->hydrogen()->value()->type();
+    if (type.IsBoolean()) {
+      DCHECK(!info()->IsStub());
+      __ LoadRoot(at, Heap::kTrueValueRootIndex);
+      EmitBranch(instr, eq, reg, Operand(at));
+    } else if (type.IsSmi()) {
+      DCHECK(!info()->IsStub());
+      EmitBranch(instr, ne, reg, Operand(zero_reg));
+    } else if (type.IsJSArray()) {
+      DCHECK(!info()->IsStub());
+      EmitBranch(instr, al, zero_reg, Operand(zero_reg));
+    } else if (type.IsHeapNumber()) {
+      DCHECK(!info()->IsStub());
+      DoubleRegister dbl_scratch = double_scratch0();
+      __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
+      // Test the double value. Zero and NaN are false.
+      EmitBranchF(instr, nue, dbl_scratch, kDoubleRegZero);
+    } else if (type.IsString()) {
+      DCHECK(!info()->IsStub());
+      __ ld(at, FieldMemOperand(reg, String::kLengthOffset));
+      EmitBranch(instr, ne, at, Operand(zero_reg));
+    } else {
+      ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
+      // Avoid deopts in the case where we've never executed this path before.
+      if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+
+      if (expected.Contains(ToBooleanStub::UNDEFINED)) {
+        // undefined -> false.
+        __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+        __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
+      }
+      if (expected.Contains(ToBooleanStub::BOOLEAN)) {
+        // Boolean -> its value.
+        __ LoadRoot(at, Heap::kTrueValueRootIndex);
+        __ Branch(instr->TrueLabel(chunk_), eq, reg, Operand(at));
+        __ LoadRoot(at, Heap::kFalseValueRootIndex);
+        __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
+      }
+      if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
+        // 'null' -> false.
+        __ LoadRoot(at, Heap::kNullValueRootIndex);
+        __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(at));
+      }
+
+      if (expected.Contains(ToBooleanStub::SMI)) {
+        // Smis: 0 -> false, all other -> true.
+        __ Branch(instr->FalseLabel(chunk_), eq, reg, Operand(zero_reg));
+        __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
+      } else if (expected.NeedsMap()) {
+        // If we need a map later and have a Smi -> deopt.
+        __ SmiTst(reg, at);
+        DeoptimizeIf(eq, instr, at, Operand(zero_reg));
+      }
+
+      const Register map = scratch0();
+      if (expected.NeedsMap()) {
+        __ ld(map, FieldMemOperand(reg, HeapObject::kMapOffset));
+        if (expected.CanBeUndetectable()) {
+          // Undetectable -> false.
+          __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
+          __ And(at, at, Operand(1 << Map::kIsUndetectable));
+          __ Branch(instr->FalseLabel(chunk_), ne, at, Operand(zero_reg));
+        }
+      }
+
+      if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
+        // spec object -> true.
+        __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
+        __ Branch(instr->TrueLabel(chunk_),
+                  ge, at, Operand(FIRST_SPEC_OBJECT_TYPE));
+      }
+
+      if (expected.Contains(ToBooleanStub::STRING)) {
+        // String value -> false iff empty.
+        Label not_string;
+        __ lbu(at, FieldMemOperand(map, Map::kInstanceTypeOffset));
+        __ Branch(&not_string, ge , at, Operand(FIRST_NONSTRING_TYPE));
+        __ ld(at, FieldMemOperand(reg, String::kLengthOffset));
+        __ Branch(instr->TrueLabel(chunk_), ne, at, Operand(zero_reg));
+        __ Branch(instr->FalseLabel(chunk_));
+        __ bind(&not_string);
+      }
+
+      if (expected.Contains(ToBooleanStub::SYMBOL)) {
+        // Symbol value -> true.
+        const Register scratch = scratch1();
+        __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
+        __ Branch(instr->TrueLabel(chunk_), eq, scratch, Operand(SYMBOL_TYPE));
+      }
+
+      if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
+        // heap number -> false iff +0, -0, or NaN.
+        DoubleRegister dbl_scratch = double_scratch0();
+        Label not_heap_number;
+        __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+        __ Branch(&not_heap_number, ne, map, Operand(at));
+        __ ldc1(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
+        __ BranchF(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
+                   ne, dbl_scratch, kDoubleRegZero);
+        // Falls through if dbl_scratch == 0.
+        __ Branch(instr->FalseLabel(chunk_));
+        __ bind(&not_heap_number);
+      }
+
+      if (!expected.IsGeneric()) {
+        // We've seen something for the first time -> deopt.
+        // This can only happen if we are not generic already.
+        DeoptimizeIf(al, instr, zero_reg, Operand(zero_reg));
+      }
+    }
+  }
+}
+
+
+void LCodeGen::EmitGoto(int block) {
+  if (!IsNextEmittedBlock(block)) {
+    __ jmp(chunk_->GetAssemblyLabel(LookupDestination(block)));
+  }
+}
+
+
+void LCodeGen::DoGoto(LGoto* instr) {
+  EmitGoto(instr->block_id());
+}
+
+
+Condition LCodeGen::TokenToCondition(Token::Value op, bool is_unsigned) {
+  Condition cond = kNoCondition;
+  switch (op) {
+    case Token::EQ:
+    case Token::EQ_STRICT:
+      cond = eq;
+      break;
+    case Token::NE:
+    case Token::NE_STRICT:
+      cond = ne;
+      break;
+    case Token::LT:
+      cond = is_unsigned ? lo : lt;
+      break;
+    case Token::GT:
+      cond = is_unsigned ? hi : gt;
+      break;
+    case Token::LTE:
+      cond = is_unsigned ? ls : le;
+      break;
+    case Token::GTE:
+      cond = is_unsigned ? hs : ge;
+      break;
+    case Token::IN:
+    case Token::INSTANCEOF:
+    default:
+      UNREACHABLE();
+  }
+  return cond;
+}
+
+
+void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  bool is_unsigned =
+      instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
+      instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
+  Condition cond = TokenToCondition(instr->op(), is_unsigned);
+
+  if (left->IsConstantOperand() && right->IsConstantOperand()) {
+    // We can statically evaluate the comparison.
+    double left_val = ToDouble(LConstantOperand::cast(left));
+    double right_val = ToDouble(LConstantOperand::cast(right));
+    int next_block = EvalComparison(instr->op(), left_val, right_val) ?
+        instr->TrueDestination(chunk_) : instr->FalseDestination(chunk_);
+    EmitGoto(next_block);
+  } else {
+    if (instr->is_double()) {
+      // Compare left and right as doubles and load the
+      // resulting flags into the normal status register.
+      FPURegister left_reg = ToDoubleRegister(left);
+      FPURegister right_reg = ToDoubleRegister(right);
+
+      // If a NaN is involved, i.e. the result is unordered,
+      // jump to false block label.
+      __ BranchF(NULL, instr->FalseLabel(chunk_), eq,
+                 left_reg, right_reg);
+
+      EmitBranchF(instr, cond, left_reg, right_reg);
+    } else {
+      Register cmp_left;
+      Operand cmp_right = Operand((int64_t)0);
+      if (right->IsConstantOperand()) {
+        int32_t value = ToInteger32(LConstantOperand::cast(right));
+        if (instr->hydrogen_value()->representation().IsSmi()) {
+          cmp_left = ToRegister(left);
+          cmp_right = Operand(Smi::FromInt(value));
+        } else {
+          cmp_left = ToRegister(left);
+          cmp_right = Operand(value);
+        }
+      } else if (left->IsConstantOperand()) {
+        int32_t value = ToInteger32(LConstantOperand::cast(left));
+        if (instr->hydrogen_value()->representation().IsSmi()) {
+          cmp_left = ToRegister(right);
+          cmp_right = Operand(Smi::FromInt(value));
+        } else {
+          cmp_left = ToRegister(right);
+          cmp_right = Operand(value);
+        }
+        // We commuted the operands, so commute the condition.
+        cond = CommuteCondition(cond);
+      } else {
+        cmp_left = ToRegister(left);
+        cmp_right = Operand(ToRegister(right));
+      }
+
+      EmitBranch(instr, cond, cmp_left, cmp_right);
+    }
+  }
+}
+
+
+void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
+  Register left = ToRegister(instr->left());
+  Register right = ToRegister(instr->right());
+
+  EmitBranch(instr, eq, left, Operand(right));
+}
+
+
+void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
+  if (instr->hydrogen()->representation().IsTagged()) {
+    Register input_reg = ToRegister(instr->object());
+    __ li(at, Operand(factory()->the_hole_value()));
+    EmitBranch(instr, eq, input_reg, Operand(at));
+    return;
+  }
+
+  DoubleRegister input_reg = ToDoubleRegister(instr->object());
+  EmitFalseBranchF(instr, eq, input_reg, input_reg);
+
+  Register scratch = scratch0();
+  __ FmoveHigh(scratch, input_reg);
+  EmitBranch(instr, eq, scratch, Operand(kHoleNanUpper32));
+}
+
+
+void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
+  Representation rep = instr->hydrogen()->value()->representation();
+  DCHECK(!rep.IsInteger32());
+  Register scratch = ToRegister(instr->temp());
+
+  if (rep.IsDouble()) {
+    DoubleRegister value = ToDoubleRegister(instr->value());
+    EmitFalseBranchF(instr, ne, value, kDoubleRegZero);
+    __ FmoveHigh(scratch, value);
+    // Only use low 32-bits of value.
+    __ dsll32(scratch, scratch, 0);
+    __ dsrl32(scratch, scratch, 0);
+    __ li(at, 0x80000000);
+  } else {
+    Register value = ToRegister(instr->value());
+    __ CheckMap(value,
+                scratch,
+                Heap::kHeapNumberMapRootIndex,
+                instr->FalseLabel(chunk()),
+                DO_SMI_CHECK);
+    __ lwu(scratch, FieldMemOperand(value, HeapNumber::kExponentOffset));
+    EmitFalseBranch(instr, ne, scratch, Operand(0x80000000));
+    __ lwu(scratch, FieldMemOperand(value, HeapNumber::kMantissaOffset));
+    __ mov(at, zero_reg);
+  }
+  EmitBranch(instr, eq, scratch, Operand(at));
+}
+
+
+Condition LCodeGen::EmitIsObject(Register input,
+                                 Register temp1,
+                                 Register temp2,
+                                 Label* is_not_object,
+                                 Label* is_object) {
+  __ JumpIfSmi(input, is_not_object);
+
+  __ LoadRoot(temp2, Heap::kNullValueRootIndex);
+  __ Branch(is_object, eq, input, Operand(temp2));
+
+  // Load map.
+  __ ld(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
+  // Undetectable objects behave like undefined.
+  __ lbu(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
+  __ And(temp2, temp2, Operand(1 << Map::kIsUndetectable));
+  __ Branch(is_not_object, ne, temp2, Operand(zero_reg));
+
+  // Load instance type and check that it is in object type range.
+  __ lbu(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
+  __ Branch(is_not_object,
+            lt, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+
+  return le;
+}
+
+
+void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
+  Register reg = ToRegister(instr->value());
+  Register temp1 = ToRegister(instr->temp());
+  Register temp2 = scratch0();
+
+  Condition true_cond =
+      EmitIsObject(reg, temp1, temp2,
+          instr->FalseLabel(chunk_), instr->TrueLabel(chunk_));
+
+  EmitBranch(instr, true_cond, temp2,
+             Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
+}
+
+
+Condition LCodeGen::EmitIsString(Register input,
+                                 Register temp1,
+                                 Label* is_not_string,
+                                 SmiCheck check_needed = INLINE_SMI_CHECK) {
+  if (check_needed == INLINE_SMI_CHECK) {
+    __ JumpIfSmi(input, is_not_string);
+  }
+  __ GetObjectType(input, temp1, temp1);
+
+  return lt;
+}
+
+
+void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
+  Register reg = ToRegister(instr->value());
+  Register temp1 = ToRegister(instr->temp());
+
+  SmiCheck check_needed =
+      instr->hydrogen()->value()->type().IsHeapObject()
+          ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+  Condition true_cond =
+      EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
+
+  EmitBranch(instr, true_cond, temp1,
+             Operand(FIRST_NONSTRING_TYPE));
+}
+
+
+void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
+  Register input_reg = EmitLoadRegister(instr->value(), at);
+  __ And(at, input_reg, kSmiTagMask);
+  EmitBranch(instr, eq, at, Operand(zero_reg));
+}
+
+
+void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
+  Register input = ToRegister(instr->value());
+  Register temp = ToRegister(instr->temp());
+
+  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+    __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+  }
+  __ ld(temp, FieldMemOperand(input, HeapObject::kMapOffset));
+  __ lbu(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
+  __ And(at, temp, Operand(1 << Map::kIsUndetectable));
+  EmitBranch(instr, ne, at, Operand(zero_reg));
+}
+
+
+static Condition ComputeCompareCondition(Token::Value op) {
+  switch (op) {
+    case Token::EQ_STRICT:
+    case Token::EQ:
+      return eq;
+    case Token::LT:
+      return lt;
+    case Token::GT:
+      return gt;
+    case Token::LTE:
+      return le;
+    case Token::GTE:
+      return ge;
+    default:
+      UNREACHABLE();
+      return kNoCondition;
+  }
+}
+
+
+void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  Token::Value op = instr->op();
+
+  Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+
+  Condition condition = ComputeCompareCondition(op);
+
+  EmitBranch(instr, condition, v0, Operand(zero_reg));
+}
+
+
+static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
+  InstanceType from = instr->from();
+  InstanceType to = instr->to();
+  if (from == FIRST_TYPE) return to;
+  DCHECK(from == to || to == LAST_TYPE);
+  return from;
+}
+
+
+static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
+  InstanceType from = instr->from();
+  InstanceType to = instr->to();
+  if (from == to) return eq;
+  if (to == LAST_TYPE) return hs;
+  if (from == FIRST_TYPE) return ls;
+  UNREACHABLE();
+  return eq;
+}
+
+
+void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
+  Register scratch = scratch0();
+  Register input = ToRegister(instr->value());
+
+  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+    __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+  }
+
+  __ GetObjectType(input, scratch, scratch);
+  EmitBranch(instr,
+             BranchCondition(instr->hydrogen()),
+             scratch,
+             Operand(TestType(instr->hydrogen())));
+}
+
+
+void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
+  Register input = ToRegister(instr->value());
+  Register result = ToRegister(instr->result());
+
+  __ AssertString(input);
+
+  __ lwu(result, FieldMemOperand(input, String::kHashFieldOffset));
+  __ IndexFromHash(result, result);
+}
+
+
+void LCodeGen::DoHasCachedArrayIndexAndBranch(
+    LHasCachedArrayIndexAndBranch* instr) {
+  Register input = ToRegister(instr->value());
+  Register scratch = scratch0();
+
+  __ lwu(scratch,
+         FieldMemOperand(input, String::kHashFieldOffset));
+  __ And(at, scratch, Operand(String::kContainsCachedArrayIndexMask));
+  EmitBranch(instr, eq, at, Operand(zero_reg));
+}
+
+
+// Branches to a label or falls through with the answer in flags.  Trashes
+// the temp registers, but not the input.
+void LCodeGen::EmitClassOfTest(Label* is_true,
+                               Label* is_false,
+                               Handle<String>class_name,
+                               Register input,
+                               Register temp,
+                               Register temp2) {
+  DCHECK(!input.is(temp));
+  DCHECK(!input.is(temp2));
+  DCHECK(!temp.is(temp2));
+
+  __ JumpIfSmi(input, is_false);
+
+  if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
+    // Assuming the following assertions, we can use the same compares to test
+    // for both being a function type and being in the object type range.
+    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+    STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+                  FIRST_SPEC_OBJECT_TYPE + 1);
+    STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+                  LAST_SPEC_OBJECT_TYPE - 1);
+    STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+
+    __ GetObjectType(input, temp, temp2);
+    __ Branch(is_false, lt, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
+    __ Branch(is_true, eq, temp2, Operand(FIRST_SPEC_OBJECT_TYPE));
+    __ Branch(is_true, eq, temp2, Operand(LAST_SPEC_OBJECT_TYPE));
+  } else {
+    // Faster code path to avoid two compares: subtract lower bound from the
+    // actual type and do a signed compare with the width of the type range.
+    __ GetObjectType(input, temp, temp2);
+    __ Dsubu(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+    __ Branch(is_false, gt, temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
+                                           FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+  }
+
+  // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
+  // Check if the constructor in the map is a function.
+  __ ld(temp, FieldMemOperand(temp, Map::kConstructorOffset));
+
+  // Objects with a non-function constructor have class 'Object'.
+  __ GetObjectType(temp, temp2, temp2);
+  if (String::Equals(class_name, isolate()->factory()->Object_string())) {
+    __ Branch(is_true, ne, temp2, Operand(JS_FUNCTION_TYPE));
+  } else {
+    __ Branch(is_false, ne, temp2, Operand(JS_FUNCTION_TYPE));
+  }
+
+  // temp now contains the constructor function. Grab the
+  // instance class name from there.
+  __ ld(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
+  __ ld(temp, FieldMemOperand(temp,
+                               SharedFunctionInfo::kInstanceClassNameOffset));
+  // The class name we are testing against is internalized since it's a literal.
+  // The name in the constructor is internalized because of the way the context
+  // is booted.  This routine isn't expected to work for random API-created
+  // classes and it doesn't have to because you can't access it with natives
+  // syntax.  Since both sides are internalized it is sufficient to use an
+  // identity comparison.
+
+  // End with the address of this class_name instance in temp register.
+  // On MIPS, the caller must do the comparison with Handle<String>class_name.
+}
+
+
+void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
+  Register input = ToRegister(instr->value());
+  Register temp = scratch0();
+  Register temp2 = ToRegister(instr->temp());
+  Handle<String> class_name = instr->hydrogen()->class_name();
+
+  EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
+                  class_name, input, temp, temp2);
+
+  EmitBranch(instr, eq, temp, Operand(class_name));
+}
+
+
+void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
+  Register reg = ToRegister(instr->value());
+  Register temp = ToRegister(instr->temp());
+
+  __ ld(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
+  EmitBranch(instr, eq, temp, Operand(instr->map()));
+}
+
+
+void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  Label true_label, done;
+  DCHECK(ToRegister(instr->left()).is(a0));  // Object is in a0.
+  DCHECK(ToRegister(instr->right()).is(a1));  // Function is in a1.
+  Register result = ToRegister(instr->result());
+  DCHECK(result.is(v0));
+
+  InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+
+  __ Branch(&true_label, eq, result, Operand(zero_reg));
+  __ li(result, Operand(factory()->false_value()));
+  __ Branch(&done);
+  __ bind(&true_label);
+  __ li(result, Operand(factory()->true_value()));
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
+  class DeferredInstanceOfKnownGlobal FINAL : public LDeferredCode {
+   public:
+    DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
+                                  LInstanceOfKnownGlobal* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() OVERRIDE {
+      codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
+    }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    Label* map_check() { return &map_check_; }
+
+   private:
+    LInstanceOfKnownGlobal* instr_;
+    Label map_check_;
+  };
+
+  DeferredInstanceOfKnownGlobal* deferred;
+  deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
+
+  Label done, false_result;
+  Register object = ToRegister(instr->value());
+  Register temp = ToRegister(instr->temp());
+  Register result = ToRegister(instr->result());
+
+  DCHECK(object.is(a0));
+  DCHECK(result.is(v0));
+
+  // A Smi is not instance of anything.
+  __ JumpIfSmi(object, &false_result);
+
+  // This is the inlined call site instanceof cache. The two occurences of the
+  // hole value will be patched to the last map/result pair generated by the
+  // instanceof stub.
+  Label cache_miss;
+  Register map = temp;
+  __ ld(map, FieldMemOperand(object, HeapObject::kMapOffset));
+
+  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+  __ bind(deferred->map_check());  // Label for calculating code patching.
+  // We use Factory::the_hole_value() on purpose instead of loading from the
+  // root array to force relocation to be able to later patch with
+  // the cached map.
+  Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
+  __ li(at, Operand(Handle<Object>(cell)));
+  __ ld(at, FieldMemOperand(at, PropertyCell::kValueOffset));
+  __ BranchShort(&cache_miss, ne, map, Operand(at));
+  // We use Factory::the_hole_value() on purpose instead of loading from the
+  // root array to force relocation to be able to later patch
+  // with true or false. The distance from map check has to be constant.
+  __ li(result, Operand(factory()->the_hole_value()));
+  __ Branch(&done);
+
+  // The inlined call site cache did not match. Check null and string before
+  // calling the deferred code.
+  __ bind(&cache_miss);
+  // Null is not instance of anything.
+  __ LoadRoot(temp, Heap::kNullValueRootIndex);
+  __ Branch(&false_result, eq, object, Operand(temp));
+
+  // String values is not instance of anything.
+  Condition cc = __ IsObjectStringType(object, temp, temp);
+  __ Branch(&false_result, cc, temp, Operand(zero_reg));
+
+  // Go to the deferred code.
+  __ Branch(deferred->entry());
+
+  __ bind(&false_result);
+  __ LoadRoot(result, Heap::kFalseValueRootIndex);
+
+  // Here result has either true or false. Deferred code also produces true or
+  // false object.
+  __ bind(deferred->exit());
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
+                                               Label* map_check) {
+  Register result = ToRegister(instr->result());
+  DCHECK(result.is(v0));
+
+  InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
+  flags = static_cast<InstanceofStub::Flags>(
+      flags | InstanceofStub::kArgsInRegisters);
+  flags = static_cast<InstanceofStub::Flags>(
+      flags | InstanceofStub::kCallSiteInlineCheck);
+  flags = static_cast<InstanceofStub::Flags>(
+      flags | InstanceofStub::kReturnTrueFalseObject);
+  InstanceofStub stub(isolate(), flags);
+
+  PushSafepointRegistersScope scope(this);
+  LoadContextFromDeferred(instr->context());
+
+  // Get the temp register reserved by the instruction. This needs to be a4 as
+  // its slot of the pushing of safepoint registers is used to communicate the
+  // offset to the location of the map check.
+  Register temp = ToRegister(instr->temp());
+  DCHECK(temp.is(a4));
+  __ li(InstanceofStub::right(), instr->function());
+  static const int kAdditionalDelta = 13;
+  int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
+  Label before_push_delta;
+  __ bind(&before_push_delta);
+  {
+    Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+    __ li(temp, Operand(delta * kIntSize), CONSTANT_SIZE);
+    __ StoreToSafepointRegisterSlot(temp, temp);
+  }
+  CallCodeGeneric(stub.GetCode(),
+                  RelocInfo::CODE_TARGET,
+                  instr,
+                  RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+  LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
+  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+  // Put the result value into the result register slot and
+  // restore all registers.
+  __ StoreToSafepointRegisterSlot(result, result);
+}
+
+
+void LCodeGen::DoCmpT(LCmpT* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  Token::Value op = instr->op();
+
+  Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+  // On MIPS there is no need for a "no inlined smi code" marker (nop).
+
+  Condition condition = ComputeCompareCondition(op);
+  // A minor optimization that relies on LoadRoot always emitting one
+  // instruction.
+  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
+  Label done, check;
+  __ Branch(USE_DELAY_SLOT, &done, condition, v0, Operand(zero_reg));
+  __ bind(&check);
+  __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
+  DCHECK_EQ(1, masm()->InstructionsGeneratedSince(&check));
+  __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoReturn(LReturn* instr) {
+  if (FLAG_trace && info()->IsOptimizing()) {
+    // Push the return value on the stack as the parameter.
+    // Runtime::TraceExit returns its parameter in v0. We're leaving the code
+    // managed by the register allocator and tearing down the frame, it's
+    // safe to write to the context register.
+    __ push(v0);
+    __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+    __ CallRuntime(Runtime::kTraceExit, 1);
+  }
+  if (info()->saves_caller_doubles()) {
+    RestoreCallerDoubles();
+  }
+  int no_frame_start = -1;
+  if (NeedsEagerFrame()) {
+    __ mov(sp, fp);
+    no_frame_start = masm_->pc_offset();
+    __ Pop(ra, fp);
+  }
+  if (instr->has_constant_parameter_count()) {
+    int parameter_count = ToInteger32(instr->constant_parameter_count());
+    int32_t sp_delta = (parameter_count + 1) * kPointerSize;
+    if (sp_delta != 0) {
+      __ Daddu(sp, sp, Operand(sp_delta));
+    }
+  } else {
+    Register reg = ToRegister(instr->parameter_count());
+    // The argument count parameter is a smi
+    __ SmiUntag(reg);
+    __ dsll(at, reg, kPointerSizeLog2);
+    __ Daddu(sp, sp, at);
+  }
+
+  __ Jump(ra);
+
+  if (no_frame_start != -1) {
+    info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
+  }
+}
+
+
+void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
+  Register result = ToRegister(instr->result());
+  __ li(at, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
+  __ ld(result, FieldMemOperand(at, Cell::kValueOffset));
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+    DeoptimizeIf(eq, instr, result, Operand(at));
+  }
+}
+
+
+template <class T>
+void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
+  DCHECK(FLAG_vector_ics);
+  Register vector = ToRegister(instr->temp_vector());
+  DCHECK(vector.is(VectorLoadICDescriptor::VectorRegister()));
+  __ li(vector, instr->hydrogen()->feedback_vector());
+  // No need to allocate this register.
+  DCHECK(VectorLoadICDescriptor::SlotRegister().is(a0));
+  __ li(VectorLoadICDescriptor::SlotRegister(),
+        Operand(Smi::FromInt(instr->hydrogen()->slot())));
+}
+
+
+void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->global_object())
+            .is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->result()).is(v0));
+
+  __ li(LoadDescriptor::NameRegister(), Operand(instr->name()));
+  if (FLAG_vector_ics) {
+    EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
+  }
+  ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
+  Handle<Code> ic = CodeFactory::LoadIC(isolate(), mode).code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
+  Register value = ToRegister(instr->value());
+  Register cell = scratch0();
+
+  // Load the cell.
+  __ li(cell, Operand(instr->hydrogen()->cell().handle()));
+
+  // If the cell we are storing to contains the hole it could have
+  // been deleted from the property dictionary. In that case, we need
+  // to update the property details in the property dictionary to mark
+  // it as no longer deleted.
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    // We use a temp to check the payload.
+    Register payload = ToRegister(instr->temp());
+    __ ld(payload, FieldMemOperand(cell, Cell::kValueOffset));
+    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+    DeoptimizeIf(eq, instr, payload, Operand(at));
+  }
+
+  // Store the value.
+  __ sd(value, FieldMemOperand(cell, Cell::kValueOffset));
+  // Cells are always rescanned, so no write barrier here.
+}
+
+
+void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
+  Register context = ToRegister(instr->context());
+  Register result = ToRegister(instr->result());
+
+  __ ld(result, ContextOperand(context, instr->slot_index()));
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+
+    if (instr->hydrogen()->DeoptimizesOnHole()) {
+      DeoptimizeIf(eq, instr, result, Operand(at));
+    } else {
+      Label is_not_hole;
+      __ Branch(&is_not_hole, ne, result, Operand(at));
+      __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+      __ bind(&is_not_hole);
+    }
+  }
+}
+
+
+void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
+  Register context = ToRegister(instr->context());
+  Register value = ToRegister(instr->value());
+  Register scratch = scratch0();
+  MemOperand target = ContextOperand(context, instr->slot_index());
+
+  Label skip_assignment;
+
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    __ ld(scratch, target);
+    __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+
+    if (instr->hydrogen()->DeoptimizesOnHole()) {
+      DeoptimizeIf(eq, instr, scratch, Operand(at));
+    } else {
+      __ Branch(&skip_assignment, ne, scratch, Operand(at));
+    }
+  }
+
+  __ sd(value, target);
+  if (instr->hydrogen()->NeedsWriteBarrier()) {
+    SmiCheck check_needed =
+        instr->hydrogen()->value()->type().IsHeapObject()
+            ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+    __ RecordWriteContextSlot(context,
+                              target.offset(),
+                              value,
+                              scratch0(),
+                              GetRAState(),
+                              kSaveFPRegs,
+                              EMIT_REMEMBERED_SET,
+                              check_needed);
+  }
+
+  __ bind(&skip_assignment);
+}
+
+
+void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
+  HObjectAccess access = instr->hydrogen()->access();
+  int offset = access.offset();
+  Register object = ToRegister(instr->object());
+  if (access.IsExternalMemory()) {
+    Register result = ToRegister(instr->result());
+    MemOperand operand = MemOperand(object, offset);
+    __ Load(result, operand, access.representation());
+    return;
+  }
+
+  if (instr->hydrogen()->representation().IsDouble()) {
+    DoubleRegister result = ToDoubleRegister(instr->result());
+    __ ldc1(result, FieldMemOperand(object, offset));
+    return;
+  }
+
+  Register result = ToRegister(instr->result());
+  if (!access.IsInobject()) {
+    __ ld(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
+    object = result;
+  }
+
+  Representation representation = access.representation();
+  if (representation.IsSmi() && SmiValuesAre32Bits() &&
+      instr->hydrogen()->representation().IsInteger32()) {
+    if (FLAG_debug_code) {
+      // Verify this is really an Smi.
+      Register scratch = scratch0();
+      __ Load(scratch, FieldMemOperand(object, offset), representation);
+      __ AssertSmi(scratch);
+    }
+
+    // Read int value directly from upper half of the smi.
+    STATIC_ASSERT(kSmiTag == 0);
+    STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
+    offset += kPointerSize / 2;
+    representation = Representation::Integer32();
+  }
+  __ Load(result, FieldMemOperand(object, offset), representation);
+}
+
+
+void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->result()).is(v0));
+
+  // Name is always in a2.
+  __ li(LoadDescriptor::NameRegister(), Operand(instr->name()));
+  if (FLAG_vector_ics) {
+    EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
+  }
+  Handle<Code> ic = CodeFactory::LoadIC(isolate(), NOT_CONTEXTUAL).code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
+  Register scratch = scratch0();
+  Register function = ToRegister(instr->function());
+  Register result = ToRegister(instr->result());
+
+  // Get the prototype or initial map from the function.
+  __ ld(result,
+         FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+
+  // Check that the function has a prototype or an initial map.
+  __ LoadRoot(at, Heap::kTheHoleValueRootIndex);
+  DeoptimizeIf(eq, instr, result, Operand(at));
+
+  // If the function does not have an initial map, we're done.
+  Label done;
+  __ GetObjectType(result, scratch, scratch);
+  __ Branch(&done, ne, scratch, Operand(MAP_TYPE));
+
+  // Get the prototype from the initial map.
+  __ ld(result, FieldMemOperand(result, Map::kPrototypeOffset));
+
+  // All done.
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
+  Register result = ToRegister(instr->result());
+  __ LoadRoot(result, instr->index());
+}
+
+
+void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
+  Register arguments = ToRegister(instr->arguments());
+  Register result = ToRegister(instr->result());
+  // There are two words between the frame pointer and the last argument.
+  // Subtracting from length accounts for one of them add one more.
+  if (instr->length()->IsConstantOperand()) {
+    int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
+    if (instr->index()->IsConstantOperand()) {
+      int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+      int index = (const_length - const_index) + 1;
+      __ ld(result, MemOperand(arguments, index * kPointerSize));
+    } else {
+      Register index = ToRegister(instr->index());
+      __ li(at, Operand(const_length + 1));
+      __ Dsubu(result, at, index);
+      __ dsll(at, result, kPointerSizeLog2);
+      __ Daddu(at, arguments, at);
+      __ ld(result, MemOperand(at));
+    }
+  } else if (instr->index()->IsConstantOperand()) {
+    Register length = ToRegister(instr->length());
+    int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+    int loc = const_index - 1;
+    if (loc != 0) {
+      __ Dsubu(result, length, Operand(loc));
+      __ dsll(at, result, kPointerSizeLog2);
+      __ Daddu(at, arguments, at);
+      __ ld(result, MemOperand(at));
+    } else {
+      __ dsll(at, length, kPointerSizeLog2);
+      __ Daddu(at, arguments, at);
+      __ ld(result, MemOperand(at));
+    }
+  } else {
+    Register length = ToRegister(instr->length());
+    Register index = ToRegister(instr->index());
+    __ Dsubu(result, length, index);
+    __ Daddu(result, result, 1);
+    __ dsll(at, result, kPointerSizeLog2);
+    __ Daddu(at, arguments, at);
+    __ ld(result, MemOperand(at));
+  }
+}
+
+
+void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
+  Register external_pointer = ToRegister(instr->elements());
+  Register key = no_reg;
+  ElementsKind elements_kind = instr->elements_kind();
+  bool key_is_constant = instr->key()->IsConstantOperand();
+  int constant_key = 0;
+  if (key_is_constant) {
+    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+    if (constant_key & 0xF0000000) {
+      Abort(kArrayIndexConstantValueTooBig);
+    }
+  } else {
+    key = ToRegister(instr->key());
+  }
+  int element_size_shift = ElementsKindToShiftSize(elements_kind);
+  int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
+      ? (element_size_shift - (kSmiTagSize + kSmiShiftSize))
+      : element_size_shift;
+  int base_offset = instr->base_offset();
+
+  if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
+      elements_kind == FLOAT32_ELEMENTS ||
+      elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
+      elements_kind == FLOAT64_ELEMENTS) {
+    int base_offset = instr->base_offset();
+    FPURegister result = ToDoubleRegister(instr->result());
+    if (key_is_constant) {
+      __ Daddu(scratch0(), external_pointer,
+          constant_key << element_size_shift);
+    } else {
+      if (shift_size < 0) {
+         if (shift_size == -32) {
+           __ dsra32(scratch0(), key, 0);
+         } else {
+           __ dsra(scratch0(), key, -shift_size);
+         }
+      } else {
+        __ dsll(scratch0(), key, shift_size);
+      }
+      __ Daddu(scratch0(), scratch0(), external_pointer);
+    }
+    if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
+        elements_kind == FLOAT32_ELEMENTS) {
+      __ lwc1(result, MemOperand(scratch0(), base_offset));
+      __ cvt_d_s(result, result);
+    } else  {  // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
+      __ ldc1(result, MemOperand(scratch0(), base_offset));
+    }
+  } else {
+    Register result = ToRegister(instr->result());
+    MemOperand mem_operand = PrepareKeyedOperand(
+        key, external_pointer, key_is_constant, constant_key,
+        element_size_shift, shift_size, base_offset);
+    switch (elements_kind) {
+      case EXTERNAL_INT8_ELEMENTS:
+      case INT8_ELEMENTS:
+        __ lb(result, mem_operand);
+        break;
+      case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
+      case EXTERNAL_UINT8_ELEMENTS:
+      case UINT8_ELEMENTS:
+      case UINT8_CLAMPED_ELEMENTS:
+        __ lbu(result, mem_operand);
+        break;
+      case EXTERNAL_INT16_ELEMENTS:
+      case INT16_ELEMENTS:
+        __ lh(result, mem_operand);
+        break;
+      case EXTERNAL_UINT16_ELEMENTS:
+      case UINT16_ELEMENTS:
+        __ lhu(result, mem_operand);
+        break;
+      case EXTERNAL_INT32_ELEMENTS:
+      case INT32_ELEMENTS:
+        __ lw(result, mem_operand);
+        break;
+      case EXTERNAL_UINT32_ELEMENTS:
+      case UINT32_ELEMENTS:
+        __ lw(result, mem_operand);
+        if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
+          DeoptimizeIf(Ugreater_equal, instr, result, Operand(0x80000000));
+        }
+        break;
+      case FLOAT32_ELEMENTS:
+      case FLOAT64_ELEMENTS:
+      case EXTERNAL_FLOAT32_ELEMENTS:
+      case EXTERNAL_FLOAT64_ELEMENTS:
+      case FAST_DOUBLE_ELEMENTS:
+      case FAST_ELEMENTS:
+      case FAST_SMI_ELEMENTS:
+      case FAST_HOLEY_DOUBLE_ELEMENTS:
+      case FAST_HOLEY_ELEMENTS:
+      case FAST_HOLEY_SMI_ELEMENTS:
+      case DICTIONARY_ELEMENTS:
+      case SLOPPY_ARGUMENTS_ELEMENTS:
+        UNREACHABLE();
+        break;
+    }
+  }
+}
+
+
+void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
+  Register elements = ToRegister(instr->elements());
+  bool key_is_constant = instr->key()->IsConstantOperand();
+  Register key = no_reg;
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  Register scratch = scratch0();
+
+  int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+
+  int base_offset = instr->base_offset();
+  if (key_is_constant) {
+    int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+    if (constant_key & 0xF0000000) {
+      Abort(kArrayIndexConstantValueTooBig);
+    }
+    base_offset += constant_key * kDoubleSize;
+  }
+  __ Daddu(scratch, elements, Operand(base_offset));
+
+  if (!key_is_constant) {
+    key = ToRegister(instr->key());
+    int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
+        ? (element_size_shift - (kSmiTagSize + kSmiShiftSize))
+        : element_size_shift;
+    if (shift_size > 0) {
+      __ dsll(at, key, shift_size);
+    } else if (shift_size == -32) {
+      __ dsra32(at, key, 0);
+    } else {
+      __ dsra(at, key, -shift_size);
+    }
+    __ Daddu(scratch, scratch, at);
+  }
+
+  __ ldc1(result, MemOperand(scratch));
+
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    __ lw(scratch, MemOperand(scratch, sizeof(kHoleNanLower32)));
+    DeoptimizeIf(eq, instr, scratch, Operand(kHoleNanUpper32));
+  }
+}
+
+
+void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
+  HLoadKeyed* hinstr = instr->hydrogen();
+  Register elements = ToRegister(instr->elements());
+  Register result = ToRegister(instr->result());
+  Register scratch = scratch0();
+  Register store_base = scratch;
+  int offset = instr->base_offset();
+
+  if (instr->key()->IsConstantOperand()) {
+    LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+    offset += ToInteger32(const_operand) * kPointerSize;
+    store_base = elements;
+  } else {
+    Register key = ToRegister(instr->key());
+    // Even though the HLoadKeyed instruction forces the input
+    // representation for the key to be an integer, the input gets replaced
+    // during bound check elimination with the index argument to the bounds
+    // check, which can be tagged, so that case must be handled here, too.
+    if (instr->hydrogen()->key()->representation().IsSmi()) {
+    __ SmiScale(scratch, key, kPointerSizeLog2);
+    __ daddu(scratch, elements, scratch);
+    } else {
+      __ dsll(scratch, key, kPointerSizeLog2);
+      __ daddu(scratch, elements, scratch);
+    }
+  }
+
+  Representation representation = hinstr->representation();
+  if (representation.IsInteger32() && SmiValuesAre32Bits() &&
+      hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
+    DCHECK(!hinstr->RequiresHoleCheck());
+    if (FLAG_debug_code) {
+      Register temp = scratch1();
+      __ Load(temp, MemOperand(store_base, offset), Representation::Smi());
+      __ AssertSmi(temp);
+    }
+
+    // Read int value directly from upper half of the smi.
+    STATIC_ASSERT(kSmiTag == 0);
+    STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
+    offset += kPointerSize / 2;
+  }
+
+  __ Load(result, MemOperand(store_base, offset), representation);
+
+  // Check for the hole value.
+  if (hinstr->RequiresHoleCheck()) {
+    if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
+      __ SmiTst(result, scratch);
+      DeoptimizeIf(ne, instr, scratch, Operand(zero_reg));
+    } else {
+      __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
+      DeoptimizeIf(eq, instr, result, Operand(scratch));
+    }
+  }
+}
+
+
+void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
+  if (instr->is_typed_elements()) {
+    DoLoadKeyedExternalArray(instr);
+  } else if (instr->hydrogen()->representation().IsDouble()) {
+    DoLoadKeyedFixedDoubleArray(instr);
+  } else {
+    DoLoadKeyedFixedArray(instr);
+  }
+}
+
+
+MemOperand LCodeGen::PrepareKeyedOperand(Register key,
+                                         Register base,
+                                         bool key_is_constant,
+                                         int constant_key,
+                                         int element_size,
+                                         int shift_size,
+                                         int base_offset) {
+  if (key_is_constant) {
+    return MemOperand(base, (constant_key << element_size) + base_offset);
+  }
+
+  if (base_offset == 0) {
+    if (shift_size >= 0) {
+      __ dsll(scratch0(), key, shift_size);
+      __ Daddu(scratch0(), base, scratch0());
+      return MemOperand(scratch0());
+    } else {
+      if (shift_size == -32) {
+        __ dsra32(scratch0(), key, 0);
+      } else {
+        __ dsra(scratch0(), key, -shift_size);
+      }
+      __ Daddu(scratch0(), base, scratch0());
+      return MemOperand(scratch0());
+    }
+  }
+
+  if (shift_size >= 0) {
+    __ dsll(scratch0(), key, shift_size);
+    __ Daddu(scratch0(), base, scratch0());
+    return MemOperand(scratch0(), base_offset);
+  } else {
+    if (shift_size == -32) {
+       __ dsra32(scratch0(), key, 0);
+    } else {
+      __ dsra(scratch0(), key, -shift_size);
+    }
+    __ Daddu(scratch0(), base, scratch0());
+    return MemOperand(scratch0(), base_offset);
+  }
+}
+
+
+void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
+
+  if (FLAG_vector_ics) {
+    EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
+  }
+
+  Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
+  Register scratch = scratch0();
+  Register temp = scratch1();
+  Register result = ToRegister(instr->result());
+
+  if (instr->hydrogen()->from_inlined()) {
+    __ Dsubu(result, sp, 2 * kPointerSize);
+  } else {
+    // Check if the calling frame is an arguments adaptor frame.
+    Label done, adapted;
+    __ ld(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+    __ ld(result, MemOperand(scratch, StandardFrameConstants::kContextOffset));
+    __ Xor(temp, result, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+
+    // Result is the frame pointer for the frame if not adapted and for the real
+    // frame below the adaptor frame if adapted.
+    __ Movn(result, fp, temp);  // Move only if temp is not equal to zero (ne).
+    __ Movz(result, scratch, temp);  // Move only if temp is equal to zero (eq).
+  }
+}
+
+
+void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
+  Register elem = ToRegister(instr->elements());
+  Register result = ToRegister(instr->result());
+
+  Label done;
+
+  // If no arguments adaptor frame the number of arguments is fixed.
+  __ Daddu(result, zero_reg, Operand(scope()->num_parameters()));
+  __ Branch(&done, eq, fp, Operand(elem));
+
+  // Arguments adaptor frame present. Get argument length from there.
+  __ ld(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ ld(result,
+        MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ SmiUntag(result);
+
+  // Argument length is in result register.
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
+  Register receiver = ToRegister(instr->receiver());
+  Register function = ToRegister(instr->function());
+  Register result = ToRegister(instr->result());
+  Register scratch = scratch0();
+
+  // If the receiver is null or undefined, we have to pass the global
+  // object as a receiver to normal functions. Values have to be
+  // passed unchanged to builtins and strict-mode functions.
+  Label global_object, result_in_receiver;
+
+  if (!instr->hydrogen()->known_function()) {
+    // Do not transform the receiver to object for strict mode functions.
+    __ ld(scratch,
+           FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+
+    // Do not transform the receiver to object for builtins.
+    int32_t strict_mode_function_mask =
+        1 <<  SharedFunctionInfo::kStrictModeBitWithinByte;
+    int32_t native_mask = 1 << SharedFunctionInfo::kNativeBitWithinByte;
+
+    __ lbu(at,
+           FieldMemOperand(scratch, SharedFunctionInfo::kStrictModeByteOffset));
+    __ And(at, at, Operand(strict_mode_function_mask));
+    __ Branch(&result_in_receiver, ne, at, Operand(zero_reg));
+    __ lbu(at,
+           FieldMemOperand(scratch, SharedFunctionInfo::kNativeByteOffset));
+    __ And(at, at, Operand(native_mask));
+    __ Branch(&result_in_receiver, ne, at, Operand(zero_reg));
+  }
+
+  // Normal function. Replace undefined or null with global receiver.
+  __ LoadRoot(scratch, Heap::kNullValueRootIndex);
+  __ Branch(&global_object, eq, receiver, Operand(scratch));
+  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+  __ Branch(&global_object, eq, receiver, Operand(scratch));
+
+  // Deoptimize if the receiver is not a JS object.
+  __ SmiTst(receiver, scratch);
+  DeoptimizeIf(eq, instr, scratch, Operand(zero_reg));
+
+  __ GetObjectType(receiver, scratch, scratch);
+  DeoptimizeIf(lt, instr, scratch, Operand(FIRST_SPEC_OBJECT_TYPE));
+  __ Branch(&result_in_receiver);
+
+  __ bind(&global_object);
+  __ ld(result, FieldMemOperand(function, JSFunction::kContextOffset));
+  __ ld(result,
+        ContextOperand(result, Context::GLOBAL_OBJECT_INDEX));
+  __ ld(result,
+        FieldMemOperand(result, GlobalObject::kGlobalProxyOffset));
+
+  if (result.is(receiver)) {
+    __ bind(&result_in_receiver);
+  } else {
+    Label result_ok;
+    __ Branch(&result_ok);
+    __ bind(&result_in_receiver);
+    __ mov(result, receiver);
+    __ bind(&result_ok);
+  }
+}
+
+
+void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
+  Register receiver = ToRegister(instr->receiver());
+  Register function = ToRegister(instr->function());
+  Register length = ToRegister(instr->length());
+  Register elements = ToRegister(instr->elements());
+  Register scratch = scratch0();
+  DCHECK(receiver.is(a0));  // Used for parameter count.
+  DCHECK(function.is(a1));  // Required by InvokeFunction.
+  DCHECK(ToRegister(instr->result()).is(v0));
+
+  // Copy the arguments to this function possibly from the
+  // adaptor frame below it.
+  const uint32_t kArgumentsLimit = 1 * KB;
+  DeoptimizeIf(hi, instr, length, Operand(kArgumentsLimit));
+
+  // Push the receiver and use the register to keep the original
+  // number of arguments.
+  __ push(receiver);
+  __ Move(receiver, length);
+  // The arguments are at a one pointer size offset from elements.
+  __ Daddu(elements, elements, Operand(1 * kPointerSize));
+
+  // Loop through the arguments pushing them onto the execution
+  // stack.
+  Label invoke, loop;
+  // length is a small non-negative integer, due to the test above.
+  __ Branch(USE_DELAY_SLOT, &invoke, eq, length, Operand(zero_reg));
+  __ dsll(scratch, length, kPointerSizeLog2);
+  __ bind(&loop);
+  __ Daddu(scratch, elements, scratch);
+  __ ld(scratch, MemOperand(scratch));
+  __ push(scratch);
+  __ Dsubu(length, length, Operand(1));
+  __ Branch(USE_DELAY_SLOT, &loop, ne, length, Operand(zero_reg));
+  __ dsll(scratch, length, kPointerSizeLog2);
+
+  __ bind(&invoke);
+  DCHECK(instr->HasPointerMap());
+  LPointerMap* pointers = instr->pointer_map();
+  SafepointGenerator safepoint_generator(
+      this, pointers, Safepoint::kLazyDeopt);
+  // The number of arguments is stored in receiver which is a0, as expected
+  // by InvokeFunction.
+  ParameterCount actual(receiver);
+  __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
+}
+
+
+void LCodeGen::DoPushArgument(LPushArgument* instr) {
+  LOperand* argument = instr->value();
+  if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
+    Abort(kDoPushArgumentNotImplementedForDoubleType);
+  } else {
+    Register argument_reg = EmitLoadRegister(argument, at);
+    __ push(argument_reg);
+  }
+}
+
+
+void LCodeGen::DoDrop(LDrop* instr) {
+  __ Drop(instr->count());
+}
+
+
+void LCodeGen::DoThisFunction(LThisFunction* instr) {
+  Register result = ToRegister(instr->result());
+  __ ld(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+}
+
+
+void LCodeGen::DoContext(LContext* instr) {
+  // If there is a non-return use, the context must be moved to a register.
+  Register result = ToRegister(instr->result());
+  if (info()->IsOptimizing()) {
+    __ ld(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  } else {
+    // If there is no frame, the context must be in cp.
+    DCHECK(result.is(cp));
+  }
+}
+
+
+void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  __ li(scratch0(), instr->hydrogen()->pairs());
+  __ li(scratch1(), Operand(Smi::FromInt(instr->hydrogen()->flags())));
+  // The context is the first argument.
+  __ Push(cp, scratch0(), scratch1());
+  CallRuntime(Runtime::kDeclareGlobals, 3, instr);
+}
+
+
+void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
+                                 int formal_parameter_count,
+                                 int arity,
+                                 LInstruction* instr,
+                                 A1State a1_state) {
+  bool dont_adapt_arguments =
+      formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
+  bool can_invoke_directly =
+      dont_adapt_arguments || formal_parameter_count == arity;
+
+  LPointerMap* pointers = instr->pointer_map();
+
+  if (can_invoke_directly) {
+    if (a1_state == A1_UNINITIALIZED) {
+      __ li(a1, function);
+    }
+
+    // Change context.
+    __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+
+    // Set r0 to arguments count if adaption is not needed. Assumes that r0
+    // is available to write to at this point.
+    if (dont_adapt_arguments) {
+      __ li(a0, Operand(arity));
+    }
+
+    // Invoke function.
+    __ ld(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+    __ Call(at);
+
+    // Set up deoptimization.
+    RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+  } else {
+    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+    ParameterCount count(arity);
+    ParameterCount expected(formal_parameter_count);
+    __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator);
+  }
+}
+
+
+void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
+  DCHECK(instr->context() != NULL);
+  DCHECK(ToRegister(instr->context()).is(cp));
+  Register input = ToRegister(instr->value());
+  Register result = ToRegister(instr->result());
+  Register scratch = scratch0();
+
+  // Deoptimize if not a heap number.
+  __ ld(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+  DeoptimizeIf(ne, instr, scratch, Operand(at));
+
+  Label done;
+  Register exponent = scratch0();
+  scratch = no_reg;
+  __ lwu(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
+  // Check the sign of the argument. If the argument is positive, just
+  // return it.
+  __ Move(result, input);
+  __ And(at, exponent, Operand(HeapNumber::kSignMask));
+  __ Branch(&done, eq, at, Operand(zero_reg));
+
+  // Input is negative. Reverse its sign.
+  // Preserve the value of all registers.
+  {
+    PushSafepointRegistersScope scope(this);
+
+    // Registers were saved at the safepoint, so we can use
+    // many scratch registers.
+    Register tmp1 = input.is(a1) ? a0 : a1;
+    Register tmp2 = input.is(a2) ? a0 : a2;
+    Register tmp3 = input.is(a3) ? a0 : a3;
+    Register tmp4 = input.is(a4) ? a0 : a4;
+
+    // exponent: floating point exponent value.
+
+    Label allocated, slow;
+    __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
+    __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
+    __ Branch(&allocated);
+
+    // Slow case: Call the runtime system to do the number allocation.
+    __ bind(&slow);
+
+    CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
+                            instr->context());
+    // Set the pointer to the new heap number in tmp.
+    if (!tmp1.is(v0))
+      __ mov(tmp1, v0);
+    // Restore input_reg after call to runtime.
+    __ LoadFromSafepointRegisterSlot(input, input);
+    __ lwu(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
+
+    __ bind(&allocated);
+    // exponent: floating point exponent value.
+    // tmp1: allocated heap number.
+    __ And(exponent, exponent, Operand(~HeapNumber::kSignMask));
+    __ sw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
+    __ lwu(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
+    __ sw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
+
+    __ StoreToSafepointRegisterSlot(tmp1, result);
+  }
+
+  __ bind(&done);
+}
+
+
+void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
+  Register input = ToRegister(instr->value());
+  Register result = ToRegister(instr->result());
+  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+  Label done;
+  __ Branch(USE_DELAY_SLOT, &done, ge, input, Operand(zero_reg));
+  __ mov(result, input);
+  __ dsubu(result, zero_reg, input);
+  // Overflow if result is still negative, i.e. 0x80000000.
+  DeoptimizeIf(lt, instr, result, Operand(zero_reg));
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoMathAbs(LMathAbs* instr) {
+  // Class for deferred case.
+  class DeferredMathAbsTaggedHeapNumber FINAL : public LDeferredCode {
+   public:
+    DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() OVERRIDE {
+      codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
+    }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
+   private:
+    LMathAbs* instr_;
+  };
+
+  Representation r = instr->hydrogen()->value()->representation();
+  if (r.IsDouble()) {
+    FPURegister input = ToDoubleRegister(instr->value());
+    FPURegister result = ToDoubleRegister(instr->result());
+    __ abs_d(result, input);
+  } else if (r.IsSmiOrInteger32()) {
+    EmitIntegerMathAbs(instr);
+  } else {
+    // Representation is tagged.
+    DeferredMathAbsTaggedHeapNumber* deferred =
+        new(zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
+    Register input = ToRegister(instr->value());
+    // Smi check.
+    __ JumpIfNotSmi(input, deferred->entry());
+    // If smi, handle it directly.
+    EmitIntegerMathAbs(instr);
+    __ bind(deferred->exit());
+  }
+}
+
+
+void LCodeGen::DoMathFloor(LMathFloor* instr) {
+  DoubleRegister input = ToDoubleRegister(instr->value());
+  Register result = ToRegister(instr->result());
+  Register scratch1 = scratch0();
+  Register except_flag = ToRegister(instr->temp());
+
+  __ EmitFPUTruncate(kRoundToMinusInf,
+                     result,
+                     input,
+                     scratch1,
+                     double_scratch0(),
+                     except_flag);
+
+  // Deopt if the operation did not succeed.
+  DeoptimizeIf(ne, instr, except_flag, Operand(zero_reg));
+
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    // Test for -0.
+    Label done;
+    __ Branch(&done, ne, result, Operand(zero_reg));
+    __ mfhc1(scratch1, input);  // Get exponent/sign bits.
+    __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
+    DeoptimizeIf(ne, instr, scratch1, Operand(zero_reg));
+    __ bind(&done);
+  }
+}
+
+
+void LCodeGen::DoMathRound(LMathRound* instr) {
+  DoubleRegister input = ToDoubleRegister(instr->value());
+  Register result = ToRegister(instr->result());
+  DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
+  Register scratch = scratch0();
+  Label done, check_sign_on_zero;
+
+  // Extract exponent bits.
+  __ mfhc1(result, input);
+  __ Ext(scratch,
+         result,
+         HeapNumber::kExponentShift,
+         HeapNumber::kExponentBits);
+
+  // If the number is in ]-0.5, +0.5[, the result is +/- 0.
+  Label skip1;
+  __ Branch(&skip1, gt, scratch, Operand(HeapNumber::kExponentBias - 2));
+  __ mov(result, zero_reg);
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    __ Branch(&check_sign_on_zero);
+  } else {
+    __ Branch(&done);
+  }
+  __ bind(&skip1);
+
+  // The following conversion will not work with numbers
+  // outside of ]-2^32, 2^32[.
+  DeoptimizeIf(ge, instr, scratch, Operand(HeapNumber::kExponentBias + 32));
+
+  // Save the original sign for later comparison.
+  __ And(scratch, result, Operand(HeapNumber::kSignMask));
+
+  __ Move(double_scratch0(), 0.5);
+  __ add_d(double_scratch0(), input, double_scratch0());
+
+  // Check sign of the result: if the sign changed, the input
+  // value was in ]0.5, 0[ and the result should be -0.
+  __ mfhc1(result, double_scratch0());
+  // mfhc1 sign-extends, clear the upper bits.
+  __ dsll32(result, result, 0);
+  __ dsrl32(result, result, 0);
+  __ Xor(result, result, Operand(scratch));
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    // ARM uses 'mi' here, which is 'lt'
+    DeoptimizeIf(lt, instr, result, Operand(zero_reg));
+  } else {
+    Label skip2;
+    // ARM uses 'mi' here, which is 'lt'
+    // Negating it results in 'ge'
+    __ Branch(&skip2, ge, result, Operand(zero_reg));
+    __ mov(result, zero_reg);
+    __ Branch(&done);
+    __ bind(&skip2);
+  }
+
+  Register except_flag = scratch;
+  __ EmitFPUTruncate(kRoundToMinusInf,
+                     result,
+                     double_scratch0(),
+                     at,
+                     double_scratch1,
+                     except_flag);
+
+  DeoptimizeIf(ne, instr, except_flag, Operand(zero_reg));
+
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    // Test for -0.
+    __ Branch(&done, ne, result, Operand(zero_reg));
+    __ bind(&check_sign_on_zero);
+    __ mfhc1(scratch, input);  // Get exponent/sign bits.
+    __ And(scratch, scratch, Operand(HeapNumber::kSignMask));
+    DeoptimizeIf(ne, instr, scratch, Operand(zero_reg));
+  }
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoMathFround(LMathFround* instr) {
+  DoubleRegister input = ToDoubleRegister(instr->value());
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  __ cvt_s_d(result, input);
+  __ cvt_d_s(result, result);
+}
+
+
+void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
+  DoubleRegister input = ToDoubleRegister(instr->value());
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  __ sqrt_d(result, input);
+}
+
+
+void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
+  DoubleRegister input = ToDoubleRegister(instr->value());
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  DoubleRegister temp = ToDoubleRegister(instr->temp());
+
+  DCHECK(!input.is(result));
+
+  // Note that according to ECMA-262 15.8.2.13:
+  // Math.pow(-Infinity, 0.5) == Infinity
+  // Math.sqrt(-Infinity) == NaN
+  Label done;
+  __ Move(temp, -V8_INFINITY);
+  __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, temp, input);
+  // Set up Infinity in the delay slot.
+  // result is overwritten if the branch is not taken.
+  __ neg_d(result, temp);
+
+  // Add +0 to convert -0 to +0.
+  __ add_d(result, input, kDoubleRegZero);
+  __ sqrt_d(result, result);
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoPower(LPower* instr) {
+  Representation exponent_type = instr->hydrogen()->right()->representation();
+  // Having marked this as a call, we can use any registers.
+  // Just make sure that the input/output registers are the expected ones.
+  Register tagged_exponent = MathPowTaggedDescriptor::exponent();
+  DCHECK(!instr->right()->IsDoubleRegister() ||
+         ToDoubleRegister(instr->right()).is(f4));
+  DCHECK(!instr->right()->IsRegister() ||
+         ToRegister(instr->right()).is(tagged_exponent));
+  DCHECK(ToDoubleRegister(instr->left()).is(f2));
+  DCHECK(ToDoubleRegister(instr->result()).is(f0));
+
+  if (exponent_type.IsSmi()) {
+    MathPowStub stub(isolate(), MathPowStub::TAGGED);
+    __ CallStub(&stub);
+  } else if (exponent_type.IsTagged()) {
+    Label no_deopt;
+    __ JumpIfSmi(tagged_exponent, &no_deopt);
+    DCHECK(!a7.is(tagged_exponent));
+    __ lw(a7, FieldMemOperand(tagged_exponent, HeapObject::kMapOffset));
+    __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+    DeoptimizeIf(ne, instr, a7, Operand(at));
+    __ bind(&no_deopt);
+    MathPowStub stub(isolate(), MathPowStub::TAGGED);
+    __ CallStub(&stub);
+  } else if (exponent_type.IsInteger32()) {
+    MathPowStub stub(isolate(), MathPowStub::INTEGER);
+    __ CallStub(&stub);
+  } else {
+    DCHECK(exponent_type.IsDouble());
+    MathPowStub stub(isolate(), MathPowStub::DOUBLE);
+    __ CallStub(&stub);
+  }
+}
+
+
+void LCodeGen::DoMathExp(LMathExp* instr) {
+  DoubleRegister input = ToDoubleRegister(instr->value());
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
+  DoubleRegister double_scratch2 = double_scratch0();
+  Register temp1 = ToRegister(instr->temp1());
+  Register temp2 = ToRegister(instr->temp2());
+
+  MathExpGenerator::EmitMathExp(
+      masm(), input, result, double_scratch1, double_scratch2,
+      temp1, temp2, scratch0());
+}
+
+
+void LCodeGen::DoMathLog(LMathLog* instr) {
+  __ PrepareCallCFunction(0, 1, scratch0());
+  __ MovToFloatParameter(ToDoubleRegister(instr->value()));
+  __ CallCFunction(ExternalReference::math_log_double_function(isolate()),
+                   0, 1);
+  __ MovFromFloatResult(ToDoubleRegister(instr->result()));
+}
+
+
+void LCodeGen::DoMathClz32(LMathClz32* instr) {
+  Register input = ToRegister(instr->value());
+  Register result = ToRegister(instr->result());
+  __ Clz(result, input);
+}
+
+
+void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->function()).is(a1));
+  DCHECK(instr->HasPointerMap());
+
+  Handle<JSFunction> known_function = instr->hydrogen()->known_function();
+  if (known_function.is_null()) {
+    LPointerMap* pointers = instr->pointer_map();
+    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+    ParameterCount count(instr->arity());
+    __ InvokeFunction(a1, count, CALL_FUNCTION, generator);
+  } else {
+    CallKnownFunction(known_function,
+                      instr->hydrogen()->formal_parameter_count(),
+                      instr->arity(),
+                      instr,
+                      A1_CONTAINS_TARGET);
+  }
+}
+
+
+void LCodeGen::DoTailCallThroughMegamorphicCache(
+    LTailCallThroughMegamorphicCache* instr) {
+  Register receiver = ToRegister(instr->receiver());
+  Register name = ToRegister(instr->name());
+  DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(name.is(LoadDescriptor::NameRegister()));
+  DCHECK(receiver.is(a1));
+  DCHECK(name.is(a2));
+
+  Register scratch = a3;
+  Register extra = a4;
+  Register extra2 = a5;
+  Register extra3 = a6;
+
+  // Important for the tail-call.
+  bool must_teardown_frame = NeedsEagerFrame();
+
+  // The probe will tail call to a handler if found.
+  isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(),
+                                         must_teardown_frame, receiver, name,
+                                         scratch, extra, extra2, extra3);
+
+  // Tail call to miss if we ended up here.
+  if (must_teardown_frame) __ LeaveFrame(StackFrame::INTERNAL);
+  LoadIC::GenerateMiss(masm());
+}
+
+
+void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
+  DCHECK(ToRegister(instr->result()).is(v0));
+
+  LPointerMap* pointers = instr->pointer_map();
+  SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+
+  if (instr->target()->IsConstantOperand()) {
+    LConstantOperand* target = LConstantOperand::cast(instr->target());
+    Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+    generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
+    __ Call(code, RelocInfo::CODE_TARGET);
+  } else {
+    DCHECK(instr->target()->IsRegister());
+    Register target = ToRegister(instr->target());
+    generator.BeforeCall(__ CallSize(target));
+    __ Daddu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
+    __ Call(target);
+  }
+  generator.AfterCall();
+}
+
+
+void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
+  DCHECK(ToRegister(instr->function()).is(a1));
+  DCHECK(ToRegister(instr->result()).is(v0));
+
+  if (instr->hydrogen()->pass_argument_count()) {
+    __ li(a0, Operand(instr->arity()));
+  }
+
+  // Change context.
+  __ ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+
+  // Load the code entry address
+  __ ld(at, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+  __ Call(at);
+
+  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
+void LCodeGen::DoCallFunction(LCallFunction* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->function()).is(a1));
+  DCHECK(ToRegister(instr->result()).is(v0));
+
+  int arity = instr->arity();
+  CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoCallNew(LCallNew* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->constructor()).is(a1));
+  DCHECK(ToRegister(instr->result()).is(v0));
+
+  __ li(a0, Operand(instr->arity()));
+  // No cell in a2 for construct type feedback in optimized code
+  __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+  CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
+  CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+}
+
+
+void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->constructor()).is(a1));
+  DCHECK(ToRegister(instr->result()).is(v0));
+
+  __ li(a0, Operand(instr->arity()));
+  __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
+  ElementsKind kind = instr->hydrogen()->elements_kind();
+  AllocationSiteOverrideMode override_mode =
+      (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
+          ? DISABLE_ALLOCATION_SITES
+          : DONT_OVERRIDE;
+
+  if (instr->arity() == 0) {
+    ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
+    CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+  } else if (instr->arity() == 1) {
+    Label done;
+    if (IsFastPackedElementsKind(kind)) {
+      Label packed_case;
+      // We might need a change here,
+      // look at the first argument.
+      __ ld(a5, MemOperand(sp, 0));
+      __ Branch(&packed_case, eq, a5, Operand(zero_reg));
+
+      ElementsKind holey_kind = GetHoleyElementsKind(kind);
+      ArraySingleArgumentConstructorStub stub(isolate(),
+                                              holey_kind,
+                                              override_mode);
+      CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+      __ jmp(&done);
+      __ bind(&packed_case);
+    }
+
+    ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
+    CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+    __ bind(&done);
+  } else {
+    ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
+    CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+  }
+}
+
+
+void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
+  CallRuntime(instr->function(), instr->arity(), instr);
+}
+
+
+void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
+  Register function = ToRegister(instr->function());
+  Register code_object = ToRegister(instr->code_object());
+  __ Daddu(code_object, code_object,
+          Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ sd(code_object,
+        FieldMemOperand(function, JSFunction::kCodeEntryOffset));
+}
+
+
+void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
+  Register result = ToRegister(instr->result());
+  Register base = ToRegister(instr->base_object());
+  if (instr->offset()->IsConstantOperand()) {
+    LConstantOperand* offset = LConstantOperand::cast(instr->offset());
+    __ Daddu(result, base, Operand(ToInteger32(offset)));
+  } else {
+    Register offset = ToRegister(instr->offset());
+    __ Daddu(result, base, offset);
+  }
+}
+
+
+void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
+  Representation representation = instr->representation();
+
+  Register object = ToRegister(instr->object());
+  Register scratch2 = scratch1();
+  Register scratch1 = scratch0();
+  HObjectAccess access = instr->hydrogen()->access();
+  int offset = access.offset();
+  if (access.IsExternalMemory()) {
+    Register value = ToRegister(instr->value());
+    MemOperand operand = MemOperand(object, offset);
+    __ Store(value, operand, representation);
+    return;
+  }
+
+  __ AssertNotSmi(object);
+
+  DCHECK(!representation.IsSmi() ||
+         !instr->value()->IsConstantOperand() ||
+         IsSmi(LConstantOperand::cast(instr->value())));
+  if (representation.IsDouble()) {
+    DCHECK(access.IsInobject());
+    DCHECK(!instr->hydrogen()->has_transition());
+    DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
+    DoubleRegister value = ToDoubleRegister(instr->value());
+    __ sdc1(value, FieldMemOperand(object, offset));
+    return;
+  }
+
+  if (instr->hydrogen()->has_transition()) {
+    Handle<Map> transition = instr->hydrogen()->transition_map();
+    AddDeprecationDependency(transition);
+    __ li(scratch1, Operand(transition));
+    __ sd(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
+    if (instr->hydrogen()->NeedsWriteBarrierForMap()) {
+      Register temp = ToRegister(instr->temp());
+      // Update the write barrier for the map field.
+      __ RecordWriteForMap(object,
+                           scratch1,
+                           temp,
+                           GetRAState(),
+                           kSaveFPRegs);
+    }
+  }
+
+  // Do the store.
+  Register destination = object;
+  if (!access.IsInobject()) {
+       destination = scratch1;
+    __ ld(destination, FieldMemOperand(object, JSObject::kPropertiesOffset));
+  }
+  Register value = ToRegister(instr->value());
+  if (representation.IsSmi() && SmiValuesAre32Bits() &&
+      instr->hydrogen()->value()->representation().IsInteger32()) {
+    DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
+    if (FLAG_debug_code) {
+      __ Load(scratch2, FieldMemOperand(destination, offset), representation);
+      __ AssertSmi(scratch2);
+    }
+
+    // Store int value directly to upper half of the smi.
+    offset += kPointerSize / 2;
+    representation = Representation::Integer32();
+  }
+
+  MemOperand operand = FieldMemOperand(destination, offset);
+  __ Store(value, operand, representation);
+  if (instr->hydrogen()->NeedsWriteBarrier()) {
+    // Update the write barrier for the object for in-object properties.
+    __ RecordWriteField(destination,
+                        offset,
+                        value,
+                        scratch2,
+                        GetRAState(),
+                        kSaveFPRegs,
+                        EMIT_REMEMBERED_SET,
+                        instr->hydrogen()->SmiCheckForWriteBarrier(),
+                        instr->hydrogen()->PointersToHereCheckForValue());
+  }
+}
+
+
+void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
+
+  __ li(StoreDescriptor::NameRegister(), Operand(instr->name()));
+  Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
+  Condition cc = instr->hydrogen()->allow_equality() ? hi : hs;
+  Operand operand((int64_t)0);
+  Register reg;
+  if (instr->index()->IsConstantOperand()) {
+    operand = ToOperand(instr->index());
+    reg = ToRegister(instr->length());
+    cc = CommuteCondition(cc);
+  } else {
+    reg = ToRegister(instr->index());
+    operand = ToOperand(instr->length());
+  }
+  if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
+    Label done;
+    __ Branch(&done, NegateCondition(cc), reg, operand);
+    __ stop("eliminated bounds check failed");
+    __ bind(&done);
+  } else {
+    DeoptimizeIf(cc, instr, reg, operand);
+  }
+}
+
+
+void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
+  Register external_pointer = ToRegister(instr->elements());
+  Register key = no_reg;
+  ElementsKind elements_kind = instr->elements_kind();
+  bool key_is_constant = instr->key()->IsConstantOperand();
+  int constant_key = 0;
+  if (key_is_constant) {
+    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+    if (constant_key & 0xF0000000) {
+      Abort(kArrayIndexConstantValueTooBig);
+    }
+  } else {
+    key = ToRegister(instr->key());
+  }
+  int element_size_shift = ElementsKindToShiftSize(elements_kind);
+  int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
+      ? (element_size_shift - (kSmiTagSize + kSmiShiftSize))
+      : element_size_shift;
+  int base_offset = instr->base_offset();
+
+  if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
+      elements_kind == FLOAT32_ELEMENTS ||
+      elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
+      elements_kind == FLOAT64_ELEMENTS) {
+    Register address = scratch0();
+    FPURegister value(ToDoubleRegister(instr->value()));
+    if (key_is_constant) {
+      if (constant_key != 0) {
+        __ Daddu(address, external_pointer,
+                Operand(constant_key << element_size_shift));
+      } else {
+        address = external_pointer;
+      }
+    } else {
+      if (shift_size < 0) {
+        if (shift_size == -32) {
+          __ dsra32(address, key, 0);
+        } else {
+          __ dsra(address, key, -shift_size);
+        }
+      } else {
+        __ dsll(address, key, shift_size);
+      }
+      __ Daddu(address, external_pointer, address);
+    }
+
+    if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
+        elements_kind == FLOAT32_ELEMENTS) {
+      __ cvt_s_d(double_scratch0(), value);
+      __ swc1(double_scratch0(), MemOperand(address, base_offset));
+    } else {  // Storing doubles, not floats.
+      __ sdc1(value, MemOperand(address, base_offset));
+    }
+  } else {
+    Register value(ToRegister(instr->value()));
+    MemOperand mem_operand = PrepareKeyedOperand(
+        key, external_pointer, key_is_constant, constant_key,
+        element_size_shift, shift_size,
+        base_offset);
+    switch (elements_kind) {
+      case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
+      case EXTERNAL_INT8_ELEMENTS:
+      case EXTERNAL_UINT8_ELEMENTS:
+      case UINT8_ELEMENTS:
+      case UINT8_CLAMPED_ELEMENTS:
+      case INT8_ELEMENTS:
+        __ sb(value, mem_operand);
+        break;
+      case EXTERNAL_INT16_ELEMENTS:
+      case EXTERNAL_UINT16_ELEMENTS:
+      case INT16_ELEMENTS:
+      case UINT16_ELEMENTS:
+        __ sh(value, mem_operand);
+        break;
+      case EXTERNAL_INT32_ELEMENTS:
+      case EXTERNAL_UINT32_ELEMENTS:
+      case INT32_ELEMENTS:
+      case UINT32_ELEMENTS:
+        __ sw(value, mem_operand);
+        break;
+      case FLOAT32_ELEMENTS:
+      case FLOAT64_ELEMENTS:
+      case EXTERNAL_FLOAT32_ELEMENTS:
+      case EXTERNAL_FLOAT64_ELEMENTS:
+      case FAST_DOUBLE_ELEMENTS:
+      case FAST_ELEMENTS:
+      case FAST_SMI_ELEMENTS:
+      case FAST_HOLEY_DOUBLE_ELEMENTS:
+      case FAST_HOLEY_ELEMENTS:
+      case FAST_HOLEY_SMI_ELEMENTS:
+      case DICTIONARY_ELEMENTS:
+      case SLOPPY_ARGUMENTS_ELEMENTS:
+        UNREACHABLE();
+        break;
+    }
+  }
+}
+
+
+void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
+  DoubleRegister value = ToDoubleRegister(instr->value());
+  Register elements = ToRegister(instr->elements());
+  Register scratch = scratch0();
+  DoubleRegister double_scratch = double_scratch0();
+  bool key_is_constant = instr->key()->IsConstantOperand();
+  int base_offset = instr->base_offset();
+  Label not_nan, done;
+
+  // Calculate the effective address of the slot in the array to store the
+  // double value.
+  int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+  if (key_is_constant) {
+    int constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+    if (constant_key & 0xF0000000) {
+      Abort(kArrayIndexConstantValueTooBig);
+    }
+    __ Daddu(scratch, elements,
+             Operand((constant_key << element_size_shift) + base_offset));
+  } else {
+    int shift_size = (instr->hydrogen()->key()->representation().IsSmi())
+        ? (element_size_shift - (kSmiTagSize + kSmiShiftSize))
+        : element_size_shift;
+    __ Daddu(scratch, elements, Operand(base_offset));
+    DCHECK((shift_size == 3) || (shift_size == -29));
+    if (shift_size == 3) {
+      __ dsll(at, ToRegister(instr->key()), 3);
+    } else if (shift_size == -29) {
+      __ dsra(at, ToRegister(instr->key()), 29);
+    }
+    __ Daddu(scratch, scratch, at);
+  }
+
+  if (instr->NeedsCanonicalization()) {
+    Label is_nan;
+    // Check for NaN. All NaNs must be canonicalized.
+    __ BranchF(NULL, &is_nan, eq, value, value);
+    __ Branch(&not_nan);
+
+    // Only load canonical NaN if the comparison above set the overflow.
+    __ bind(&is_nan);
+    __ LoadRoot(at, Heap::kNanValueRootIndex);
+    __ ldc1(double_scratch, FieldMemOperand(at, HeapNumber::kValueOffset));
+    __ sdc1(double_scratch, MemOperand(scratch, 0));
+    __ Branch(&done);
+  }
+
+  __ bind(&not_nan);
+  __ sdc1(value, MemOperand(scratch, 0));
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
+  Register value = ToRegister(instr->value());
+  Register elements = ToRegister(instr->elements());
+  Register key = instr->key()->IsRegister() ? ToRegister(instr->key())
+      : no_reg;
+  Register scratch = scratch0();
+  Register store_base = scratch;
+  int offset = instr->base_offset();
+
+  // Do the store.
+  if (instr->key()->IsConstantOperand()) {
+    DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
+    LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+    offset += ToInteger32(const_operand) * kPointerSize;
+    store_base = elements;
+  } else {
+    // Even though the HLoadKeyed instruction forces the input
+    // representation for the key to be an integer, the input gets replaced
+    // during bound check elimination with the index argument to the bounds
+    // check, which can be tagged, so that case must be handled here, too.
+    if (instr->hydrogen()->key()->representation().IsSmi()) {
+      __ SmiScale(scratch, key, kPointerSizeLog2);
+      __ daddu(store_base, elements, scratch);
+    } else {
+      __ dsll(scratch, key, kPointerSizeLog2);
+      __ daddu(store_base, elements, scratch);
+    }
+  }
+
+  Representation representation = instr->hydrogen()->value()->representation();
+  if (representation.IsInteger32() && SmiValuesAre32Bits()) {
+    DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
+    DCHECK(instr->hydrogen()->elements_kind() == FAST_SMI_ELEMENTS);
+    if (FLAG_debug_code) {
+      Register temp = scratch1();
+      __ Load(temp, MemOperand(store_base, offset), Representation::Smi());
+      __ AssertSmi(temp);
+    }
+
+    // Store int value directly to upper half of the smi.
+    STATIC_ASSERT(kSmiTag == 0);
+    STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
+    offset += kPointerSize / 2;
+    representation = Representation::Integer32();
+  }
+
+  __ Store(value, MemOperand(store_base, offset), representation);
+
+  if (instr->hydrogen()->NeedsWriteBarrier()) {
+    SmiCheck check_needed =
+        instr->hydrogen()->value()->type().IsHeapObject()
+            ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
+    // Compute address of modified element and store it into key register.
+    __ Daddu(key, store_base, Operand(offset));
+    __ RecordWrite(elements,
+                   key,
+                   value,
+                   GetRAState(),
+                   kSaveFPRegs,
+                   EMIT_REMEMBERED_SET,
+                   check_needed,
+                   instr->hydrogen()->PointersToHereCheckForValue());
+  }
+}
+
+
+void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
+  // By cases: external, fast double
+  if (instr->is_typed_elements()) {
+    DoStoreKeyedExternalArray(instr);
+  } else if (instr->hydrogen()->value()->representation().IsDouble()) {
+    DoStoreKeyedFixedDoubleArray(instr);
+  } else {
+    DoStoreKeyedFixedArray(instr);
+  }
+}
+
+
+void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
+  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
+
+  Handle<Code> ic =
+      CodeFactory::KeyedStoreIC(isolate(), instr->strict_mode()).code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
+  Register object_reg = ToRegister(instr->object());
+  Register scratch = scratch0();
+
+  Handle<Map> from_map = instr->original_map();
+  Handle<Map> to_map = instr->transitioned_map();
+  ElementsKind from_kind = instr->from_kind();
+  ElementsKind to_kind = instr->to_kind();
+
+  Label not_applicable;
+  __ ld(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
+  __ Branch(&not_applicable, ne, scratch, Operand(from_map));
+
+  if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
+    Register new_map_reg = ToRegister(instr->new_map_temp());
+    __ li(new_map_reg, Operand(to_map));
+    __ sd(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset));
+    // Write barrier.
+    __ RecordWriteForMap(object_reg,
+                         new_map_reg,
+                         scratch,
+                         GetRAState(),
+                         kDontSaveFPRegs);
+  } else {
+    DCHECK(object_reg.is(a0));
+    DCHECK(ToRegister(instr->context()).is(cp));
+    PushSafepointRegistersScope scope(this);
+    __ li(a1, Operand(to_map));
+    bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
+    TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
+    __ CallStub(&stub);
+    RecordSafepointWithRegisters(
+        instr->pointer_map(), 0, Safepoint::kLazyDeopt);
+  }
+  __ bind(&not_applicable);
+}
+
+
+void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
+  Register object = ToRegister(instr->object());
+  Register temp = ToRegister(instr->temp());
+  Label no_memento_found;
+  __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found,
+                                     ne, &no_memento_found);
+  DeoptimizeIf(al, instr);
+  __ bind(&no_memento_found);
+}
+
+
+void LCodeGen::DoStringAdd(LStringAdd* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->left()).is(a1));
+  DCHECK(ToRegister(instr->right()).is(a0));
+  StringAddStub stub(isolate(),
+                     instr->hydrogen()->flags(),
+                     instr->hydrogen()->pretenure_flag());
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
+  class DeferredStringCharCodeAt FINAL : public LDeferredCode {
+   public:
+    DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() OVERRIDE {
+      codegen()->DoDeferredStringCharCodeAt(instr_);
+    }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
+   private:
+    LStringCharCodeAt* instr_;
+  };
+
+  DeferredStringCharCodeAt* deferred =
+      new(zone()) DeferredStringCharCodeAt(this, instr);
+  StringCharLoadGenerator::Generate(masm(),
+                                    ToRegister(instr->string()),
+                                    ToRegister(instr->index()),
+                                    ToRegister(instr->result()),
+                                    deferred->entry());
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
+  Register string = ToRegister(instr->string());
+  Register result = ToRegister(instr->result());
+  Register scratch = scratch0();
+
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  __ mov(result, zero_reg);
+
+  PushSafepointRegistersScope scope(this);
+  __ push(string);
+  // Push the index as a smi. This is safe because of the checks in
+  // DoStringCharCodeAt above.
+  if (instr->index()->IsConstantOperand()) {
+    int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+    __ Daddu(scratch, zero_reg, Operand(Smi::FromInt(const_index)));
+    __ push(scratch);
+  } else {
+    Register index = ToRegister(instr->index());
+    __ SmiTag(index);
+    __ push(index);
+  }
+  CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
+                          instr->context());
+  __ AssertSmi(v0);
+  __ SmiUntag(v0);
+  __ StoreToSafepointRegisterSlot(v0, result);
+}
+
+
+void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
+  class DeferredStringCharFromCode FINAL : public LDeferredCode {
+   public:
+    DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() OVERRIDE {
+      codegen()->DoDeferredStringCharFromCode(instr_);
+    }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
+   private:
+    LStringCharFromCode* instr_;
+  };
+
+  DeferredStringCharFromCode* deferred =
+      new(zone()) DeferredStringCharFromCode(this, instr);
+
+  DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
+  Register char_code = ToRegister(instr->char_code());
+  Register result = ToRegister(instr->result());
+  Register scratch = scratch0();
+  DCHECK(!char_code.is(result));
+
+  __ Branch(deferred->entry(), hi,
+            char_code, Operand(String::kMaxOneByteCharCode));
+  __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
+  __ dsll(scratch, char_code, kPointerSizeLog2);
+  __ Daddu(result, result, scratch);
+  __ ld(result, FieldMemOperand(result, FixedArray::kHeaderSize));
+  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+  __ Branch(deferred->entry(), eq, result, Operand(scratch));
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
+  Register char_code = ToRegister(instr->char_code());
+  Register result = ToRegister(instr->result());
+
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  __ mov(result, zero_reg);
+
+  PushSafepointRegistersScope scope(this);
+  __ SmiTag(char_code);
+  __ push(char_code);
+  CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
+  __ StoreToSafepointRegisterSlot(v0, result);
+}
+
+
+void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
+  LOperand* input = instr->value();
+  DCHECK(input->IsRegister() || input->IsStackSlot());
+  LOperand* output = instr->result();
+  DCHECK(output->IsDoubleRegister());
+  FPURegister single_scratch = double_scratch0().low();
+  if (input->IsStackSlot()) {
+    Register scratch = scratch0();
+    __ ld(scratch, ToMemOperand(input));
+    __ mtc1(scratch, single_scratch);
+  } else {
+    __ mtc1(ToRegister(input), single_scratch);
+  }
+  __ cvt_d_w(ToDoubleRegister(output), single_scratch);
+}
+
+
+void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
+  LOperand* input = instr->value();
+  LOperand* output = instr->result();
+
+  FPURegister dbl_scratch = double_scratch0();
+  __ mtc1(ToRegister(input), dbl_scratch);
+  __ Cvt_d_uw(ToDoubleRegister(output), dbl_scratch, f22);  // TODO(plind): f22?
+}
+
+
+void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
+  class DeferredNumberTagU FINAL : public LDeferredCode {
+   public:
+    DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() OVERRIDE {
+      codegen()->DoDeferredNumberTagIU(instr_,
+                                       instr_->value(),
+                                       instr_->temp1(),
+                                       instr_->temp2(),
+                                       UNSIGNED_INT32);
+    }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
+   private:
+    LNumberTagU* instr_;
+  };
+
+  Register input = ToRegister(instr->value());
+  Register result = ToRegister(instr->result());
+
+  DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
+  __ Branch(deferred->entry(), hi, input, Operand(Smi::kMaxValue));
+  __ SmiTag(result, input);
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr,
+                                     LOperand* value,
+                                     LOperand* temp1,
+                                     LOperand* temp2,
+                                     IntegerSignedness signedness) {
+  Label done, slow;
+  Register src = ToRegister(value);
+  Register dst = ToRegister(instr->result());
+  Register tmp1 = scratch0();
+  Register tmp2 = ToRegister(temp1);
+  Register tmp3 = ToRegister(temp2);
+  DoubleRegister dbl_scratch = double_scratch0();
+
+  if (signedness == SIGNED_INT32) {
+    // There was overflow, so bits 30 and 31 of the original integer
+    // disagree. Try to allocate a heap number in new space and store
+    // the value in there. If that fails, call the runtime system.
+    if (dst.is(src)) {
+      __ SmiUntag(src, dst);
+      __ Xor(src, src, Operand(0x80000000));
+    }
+    __ mtc1(src, dbl_scratch);
+    __ cvt_d_w(dbl_scratch, dbl_scratch);
+  } else {
+    __ mtc1(src, dbl_scratch);
+    __ Cvt_d_uw(dbl_scratch, dbl_scratch, f22);
+  }
+
+  if (FLAG_inline_new) {
+    __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
+    __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow, TAG_RESULT);
+    __ Branch(&done);
+  }
+
+  // Slow case: Call the runtime system to do the number allocation.
+  __ bind(&slow);
+  {
+    // TODO(3095996): Put a valid pointer value in the stack slot where the
+    // result register is stored, as this register is in the pointer map, but
+    // contains an integer value.
+    __ mov(dst, zero_reg);
+    // Preserve the value of all registers.
+    PushSafepointRegistersScope scope(this);
+
+    // NumberTagI and NumberTagD use the context from the frame, rather than
+    // the environment's HContext or HInlinedContext value.
+    // They only call Runtime::kAllocateHeapNumber.
+    // The corresponding HChange instructions are added in a phase that does
+    // not have easy access to the local context.
+    __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+    __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+    RecordSafepointWithRegisters(
+        instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+    __ StoreToSafepointRegisterSlot(v0, dst);
+  }
+
+  // Done. Put the value in dbl_scratch into the value of the allocated heap
+  // number.
+  __ bind(&done);
+  __ sdc1(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
+}
+
+
+void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
+  class DeferredNumberTagD FINAL : public LDeferredCode {
+   public:
+    DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() OVERRIDE {
+      codegen()->DoDeferredNumberTagD(instr_);
+    }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
+   private:
+    LNumberTagD* instr_;
+  };
+
+  DoubleRegister input_reg = ToDoubleRegister(instr->value());
+  Register scratch = scratch0();
+  Register reg = ToRegister(instr->result());
+  Register temp1 = ToRegister(instr->temp());
+  Register temp2 = ToRegister(instr->temp2());
+
+  DeferredNumberTagD* deferred = new(zone()) DeferredNumberTagD(this, instr);
+  if (FLAG_inline_new) {
+    __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
+    // We want the untagged address first for performance
+    __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry(),
+                          DONT_TAG_RESULT);
+  } else {
+    __ Branch(deferred->entry());
+  }
+  __ bind(deferred->exit());
+  __ sdc1(input_reg, MemOperand(reg, HeapNumber::kValueOffset));
+  // Now that we have finished with the object's real address tag it
+  __ Daddu(reg, reg, kHeapObjectTag);
+}
+
+
+void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  Register reg = ToRegister(instr->result());
+  __ mov(reg, zero_reg);
+
+  PushSafepointRegistersScope scope(this);
+  // NumberTagI and NumberTagD use the context from the frame, rather than
+  // the environment's HContext or HInlinedContext value.
+  // They only call Runtime::kAllocateHeapNumber.
+  // The corresponding HChange instructions are added in a phase that does
+  // not have easy access to the local context.
+  __ ld(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+  RecordSafepointWithRegisters(
+      instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+  __ Dsubu(v0, v0, kHeapObjectTag);
+  __ StoreToSafepointRegisterSlot(v0, reg);
+}
+
+
+void LCodeGen::DoSmiTag(LSmiTag* instr) {
+  HChange* hchange = instr->hydrogen();
+  Register input = ToRegister(instr->value());
+  Register output = ToRegister(instr->result());
+  if (hchange->CheckFlag(HValue::kCanOverflow) &&
+      hchange->value()->CheckFlag(HValue::kUint32)) {
+    __ And(at, input, Operand(0x80000000));
+    DeoptimizeIf(ne, instr, at, Operand(zero_reg));
+  }
+  if (hchange->CheckFlag(HValue::kCanOverflow) &&
+      !hchange->value()->CheckFlag(HValue::kUint32)) {
+    __ SmiTagCheckOverflow(output, input, at);
+    DeoptimizeIf(lt, instr, at, Operand(zero_reg));
+  } else {
+    __ SmiTag(output, input);
+  }
+}
+
+
+void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
+  Register scratch = scratch0();
+  Register input = ToRegister(instr->value());
+  Register result = ToRegister(instr->result());
+  if (instr->needs_check()) {
+    STATIC_ASSERT(kHeapObjectTag == 1);
+    // If the input is a HeapObject, value of scratch won't be zero.
+    __ And(scratch, input, Operand(kHeapObjectTag));
+    __ SmiUntag(result, input);
+    DeoptimizeIf(ne, instr, scratch, Operand(zero_reg));
+  } else {
+    __ SmiUntag(result, input);
+  }
+}
+
+
+void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
+                                DoubleRegister result_reg,
+                                NumberUntagDMode mode) {
+  bool can_convert_undefined_to_nan =
+      instr->hydrogen()->can_convert_undefined_to_nan();
+  bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
+
+  Register scratch = scratch0();
+  Label convert, load_smi, done;
+  if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
+    // Smi check.
+    __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
+    // Heap number map check.
+    __ ld(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+    __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+    if (can_convert_undefined_to_nan) {
+      __ Branch(&convert, ne, scratch, Operand(at));
+    } else {
+      DeoptimizeIf(ne, instr, scratch, Operand(at));
+    }
+    // Load heap number.
+    __ ldc1(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
+    if (deoptimize_on_minus_zero) {
+      __ mfc1(at, result_reg);
+      __ Branch(&done, ne, at, Operand(zero_reg));
+      __ mfhc1(scratch, result_reg);  // Get exponent/sign bits.
+      DeoptimizeIf(eq, instr, scratch, Operand(HeapNumber::kSignMask));
+    }
+    __ Branch(&done);
+    if (can_convert_undefined_to_nan) {
+      __ bind(&convert);
+      // Convert undefined (and hole) to NaN.
+      __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+      DeoptimizeIf(ne, instr, input_reg, Operand(at));
+      __ LoadRoot(scratch, Heap::kNanValueRootIndex);
+      __ ldc1(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
+      __ Branch(&done);
+    }
+  } else {
+    __ SmiUntag(scratch, input_reg);
+    DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
+  }
+  // Smi to double register conversion
+  __ bind(&load_smi);
+  // scratch: untagged value of input_reg
+  __ mtc1(scratch, result_reg);
+  __ cvt_d_w(result_reg, result_reg);
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
+  Register input_reg = ToRegister(instr->value());
+  Register scratch1 = scratch0();
+  Register scratch2 = ToRegister(instr->temp());
+  DoubleRegister double_scratch = double_scratch0();
+  DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2());
+
+  DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2));
+  DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1));
+
+  Label done;
+
+  // The input is a tagged HeapObject.
+  // Heap number map check.
+  __ ld(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+  // This 'at' value and scratch1 map value are used for tests in both clauses
+  // of the if.
+
+  if (instr->truncating()) {
+    // Performs a truncating conversion of a floating point number as used by
+    // the JS bitwise operations.
+    Label no_heap_number, check_bools, check_false;
+    // Check HeapNumber map.
+    __ Branch(USE_DELAY_SLOT, &no_heap_number, ne, scratch1, Operand(at));
+    __ mov(scratch2, input_reg);  // In delay slot.
+    __ TruncateHeapNumberToI(input_reg, scratch2);
+    __ Branch(&done);
+
+    // Check for Oddballs. Undefined/False is converted to zero and True to one
+    // for truncating conversions.
+    __ bind(&no_heap_number);
+    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+    __ Branch(&check_bools, ne, input_reg, Operand(at));
+    DCHECK(ToRegister(instr->result()).is(input_reg));
+    __ Branch(USE_DELAY_SLOT, &done);
+    __ mov(input_reg, zero_reg);  // In delay slot.
+
+    __ bind(&check_bools);
+    __ LoadRoot(at, Heap::kTrueValueRootIndex);
+    __ Branch(&check_false, ne, scratch2, Operand(at));
+    __ Branch(USE_DELAY_SLOT, &done);
+    __ li(input_reg, Operand(1));  // In delay slot.
+
+    __ bind(&check_false);
+    __ LoadRoot(at, Heap::kFalseValueRootIndex);
+    DeoptimizeIf(ne, instr, scratch2, Operand(at), "cannot truncate");
+    __ Branch(USE_DELAY_SLOT, &done);
+    __ mov(input_reg, zero_reg);  // In delay slot.
+  } else {
+    DeoptimizeIf(ne, instr, scratch1, Operand(at), "not a heap number");
+
+    // Load the double value.
+    __ ldc1(double_scratch,
+            FieldMemOperand(input_reg, HeapNumber::kValueOffset));
+
+    Register except_flag = scratch2;
+    __ EmitFPUTruncate(kRoundToZero,
+                       input_reg,
+                       double_scratch,
+                       scratch1,
+                       double_scratch2,
+                       except_flag,
+                       kCheckForInexactConversion);
+
+    DeoptimizeIf(ne, instr, except_flag, Operand(zero_reg),
+                 "lost precision or NaN");
+
+    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      __ Branch(&done, ne, input_reg, Operand(zero_reg));
+
+      __ mfhc1(scratch1, double_scratch);  // Get exponent/sign bits.
+      __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
+      DeoptimizeIf(ne, instr, scratch1, Operand(zero_reg), "minus zero");
+    }
+  }
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
+  class DeferredTaggedToI FINAL : public LDeferredCode {
+   public:
+    DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() OVERRIDE {
+      codegen()->DoDeferredTaggedToI(instr_);
+    }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
+   private:
+    LTaggedToI* instr_;
+  };
+
+  LOperand* input = instr->value();
+  DCHECK(input->IsRegister());
+  DCHECK(input->Equals(instr->result()));
+
+  Register input_reg = ToRegister(input);
+
+  if (instr->hydrogen()->value()->representation().IsSmi()) {
+    __ SmiUntag(input_reg);
+  } else {
+    DeferredTaggedToI* deferred = new(zone()) DeferredTaggedToI(this, instr);
+
+    // Let the deferred code handle the HeapObject case.
+    __ JumpIfNotSmi(input_reg, deferred->entry());
+
+    // Smi to int32 conversion.
+    __ SmiUntag(input_reg);
+    __ bind(deferred->exit());
+  }
+}
+
+
+void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
+  LOperand* input = instr->value();
+  DCHECK(input->IsRegister());
+  LOperand* result = instr->result();
+  DCHECK(result->IsDoubleRegister());
+
+  Register input_reg = ToRegister(input);
+  DoubleRegister result_reg = ToDoubleRegister(result);
+
+  HValue* value = instr->hydrogen()->value();
+  NumberUntagDMode mode = value->representation().IsSmi()
+      ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
+
+  EmitNumberUntagD(instr, input_reg, result_reg, mode);
+}
+
+
+void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
+  Register result_reg = ToRegister(instr->result());
+  Register scratch1 = scratch0();
+  DoubleRegister double_input = ToDoubleRegister(instr->value());
+
+  if (instr->truncating()) {
+    __ TruncateDoubleToI(result_reg, double_input);
+  } else {
+    Register except_flag = LCodeGen::scratch1();
+
+    __ EmitFPUTruncate(kRoundToMinusInf,
+                       result_reg,
+                       double_input,
+                       scratch1,
+                       double_scratch0(),
+                       except_flag,
+                       kCheckForInexactConversion);
+
+    // Deopt if the operation did not succeed (except_flag != 0).
+    DeoptimizeIf(ne, instr, except_flag, Operand(zero_reg));
+
+    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      Label done;
+      __ Branch(&done, ne, result_reg, Operand(zero_reg));
+      __ mfhc1(scratch1, double_input);  // Get exponent/sign bits.
+      __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
+      DeoptimizeIf(ne, instr, scratch1, Operand(zero_reg));
+      __ bind(&done);
+    }
+  }
+}
+
+
+void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
+  Register result_reg = ToRegister(instr->result());
+  Register scratch1 = LCodeGen::scratch0();
+  DoubleRegister double_input = ToDoubleRegister(instr->value());
+
+  if (instr->truncating()) {
+    __ TruncateDoubleToI(result_reg, double_input);
+  } else {
+    Register except_flag = LCodeGen::scratch1();
+
+    __ EmitFPUTruncate(kRoundToMinusInf,
+                       result_reg,
+                       double_input,
+                       scratch1,
+                       double_scratch0(),
+                       except_flag,
+                       kCheckForInexactConversion);
+
+    // Deopt if the operation did not succeed (except_flag != 0).
+    DeoptimizeIf(ne, instr, except_flag, Operand(zero_reg));
+
+    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      Label done;
+      __ Branch(&done, ne, result_reg, Operand(zero_reg));
+      __ mfhc1(scratch1, double_input);  // Get exponent/sign bits.
+      __ And(scratch1, scratch1, Operand(HeapNumber::kSignMask));
+      DeoptimizeIf(ne, instr, scratch1, Operand(zero_reg));
+      __ bind(&done);
+    }
+  }
+  __ SmiTag(result_reg, result_reg);
+}
+
+
+void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
+  LOperand* input = instr->value();
+  __ SmiTst(ToRegister(input), at);
+  DeoptimizeIf(ne, instr, at, Operand(zero_reg));
+}
+
+
+void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
+  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+    LOperand* input = instr->value();
+    __ SmiTst(ToRegister(input), at);
+    DeoptimizeIf(eq, instr, at, Operand(zero_reg));
+  }
+}
+
+
+void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
+  Register input = ToRegister(instr->value());
+  Register scratch = scratch0();
+
+  __ GetObjectType(input, scratch, scratch);
+
+  if (instr->hydrogen()->is_interval_check()) {
+    InstanceType first;
+    InstanceType last;
+    instr->hydrogen()->GetCheckInterval(&first, &last);
+
+    // If there is only one type in the interval check for equality.
+    if (first == last) {
+      DeoptimizeIf(ne, instr, scratch, Operand(first));
+    } else {
+      DeoptimizeIf(lo, instr, scratch, Operand(first));
+      // Omit check for the last type.
+      if (last != LAST_TYPE) {
+        DeoptimizeIf(hi, instr, scratch, Operand(last));
+      }
+    }
+  } else {
+    uint8_t mask;
+    uint8_t tag;
+    instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
+
+    if (base::bits::IsPowerOfTwo32(mask)) {
+      DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
+      __ And(at, scratch, mask);
+      DeoptimizeIf(tag == 0 ? ne : eq, instr, at, Operand(zero_reg));
+    } else {
+      __ And(scratch, scratch, Operand(mask));
+      DeoptimizeIf(ne, instr, scratch, Operand(tag));
+    }
+  }
+}
+
+
+void LCodeGen::DoCheckValue(LCheckValue* instr) {
+  Register reg = ToRegister(instr->value());
+  Handle<HeapObject> object = instr->hydrogen()->object().handle();
+  AllowDeferredHandleDereference smi_check;
+  if (isolate()->heap()->InNewSpace(*object)) {
+    Register reg = ToRegister(instr->value());
+    Handle<Cell> cell = isolate()->factory()->NewCell(object);
+    __ li(at, Operand(Handle<Object>(cell)));
+    __ ld(at, FieldMemOperand(at, Cell::kValueOffset));
+    DeoptimizeIf(ne, instr, reg, Operand(at));
+  } else {
+    DeoptimizeIf(ne, instr, reg, Operand(object));
+  }
+}
+
+
+void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
+  {
+    PushSafepointRegistersScope scope(this);
+    __ push(object);
+    __ mov(cp, zero_reg);
+    __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
+    RecordSafepointWithRegisters(
+        instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
+    __ StoreToSafepointRegisterSlot(v0, scratch0());
+  }
+  __ SmiTst(scratch0(), at);
+  DeoptimizeIf(eq, instr, at, Operand(zero_reg));
+}
+
+
+void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
+  class DeferredCheckMaps FINAL : public LDeferredCode {
+   public:
+    DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
+        : LDeferredCode(codegen), instr_(instr), object_(object) {
+      SetExit(check_maps());
+    }
+    virtual void Generate() OVERRIDE {
+      codegen()->DoDeferredInstanceMigration(instr_, object_);
+    }
+    Label* check_maps() { return &check_maps_; }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
+   private:
+    LCheckMaps* instr_;
+    Label check_maps_;
+    Register object_;
+  };
+
+  if (instr->hydrogen()->IsStabilityCheck()) {
+    const UniqueSet<Map>* maps = instr->hydrogen()->maps();
+    for (int i = 0; i < maps->size(); ++i) {
+      AddStabilityDependency(maps->at(i).handle());
+    }
+    return;
+  }
+
+  Register map_reg = scratch0();
+  LOperand* input = instr->value();
+  DCHECK(input->IsRegister());
+  Register reg = ToRegister(input);
+  __ ld(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
+
+  DeferredCheckMaps* deferred = NULL;
+  if (instr->hydrogen()->HasMigrationTarget()) {
+    deferred = new(zone()) DeferredCheckMaps(this, instr, reg);
+    __ bind(deferred->check_maps());
+  }
+
+  const UniqueSet<Map>* maps = instr->hydrogen()->maps();
+  Label success;
+  for (int i = 0; i < maps->size() - 1; i++) {
+    Handle<Map> map = maps->at(i).handle();
+    __ CompareMapAndBranch(map_reg, map, &success, eq, &success);
+  }
+  Handle<Map> map = maps->at(maps->size() - 1).handle();
+  // Do the CompareMap() directly within the Branch() and DeoptimizeIf().
+  if (instr->hydrogen()->HasMigrationTarget()) {
+    __ Branch(deferred->entry(), ne, map_reg, Operand(map));
+  } else {
+    DeoptimizeIf(ne, instr, map_reg, Operand(map));
+  }
+
+  __ bind(&success);
+}
+
+
+void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
+  DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
+  Register result_reg = ToRegister(instr->result());
+  DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
+  __ ClampDoubleToUint8(result_reg, value_reg, temp_reg);
+}
+
+
+void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
+  Register unclamped_reg = ToRegister(instr->unclamped());
+  Register result_reg = ToRegister(instr->result());
+  __ ClampUint8(result_reg, unclamped_reg);
+}
+
+
+void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
+  Register scratch = scratch0();
+  Register input_reg = ToRegister(instr->unclamped());
+  Register result_reg = ToRegister(instr->result());
+  DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
+  Label is_smi, done, heap_number;
+
+  // Both smi and heap number cases are handled.
+  __ UntagAndJumpIfSmi(scratch, input_reg, &is_smi);
+
+  // Check for heap number
+  __ ld(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+  __ Branch(&heap_number, eq, scratch, Operand(factory()->heap_number_map()));
+
+  // Check for undefined. Undefined is converted to zero for clamping
+  // conversions.
+  DeoptimizeIf(ne, instr, input_reg, Operand(factory()->undefined_value()));
+  __ mov(result_reg, zero_reg);
+  __ jmp(&done);
+
+  // Heap number
+  __ bind(&heap_number);
+  __ ldc1(double_scratch0(), FieldMemOperand(input_reg,
+                                             HeapNumber::kValueOffset));
+  __ ClampDoubleToUint8(result_reg, double_scratch0(), temp_reg);
+  __ jmp(&done);
+
+  __ bind(&is_smi);
+  __ ClampUint8(result_reg, scratch);
+
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
+  DoubleRegister value_reg = ToDoubleRegister(instr->value());
+  Register result_reg = ToRegister(instr->result());
+  if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
+    __ FmoveHigh(result_reg, value_reg);
+  } else {
+    __ FmoveLow(result_reg, value_reg);
+  }
+}
+
+
+void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
+  Register hi_reg = ToRegister(instr->hi());
+  Register lo_reg = ToRegister(instr->lo());
+  DoubleRegister result_reg = ToDoubleRegister(instr->result());
+  __ Move(result_reg, lo_reg, hi_reg);
+}
+
+
+void LCodeGen::DoAllocate(LAllocate* instr) {
+  class DeferredAllocate FINAL : public LDeferredCode {
+   public:
+    DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() OVERRIDE {
+      codegen()->DoDeferredAllocate(instr_);
+    }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
+   private:
+    LAllocate* instr_;
+  };
+
+  DeferredAllocate* deferred =
+      new(zone()) DeferredAllocate(this, instr);
+
+  Register result = ToRegister(instr->result());
+  Register scratch = ToRegister(instr->temp1());
+  Register scratch2 = ToRegister(instr->temp2());
+
+  // Allocate memory for the object.
+  AllocationFlags flags = TAG_OBJECT;
+  if (instr->hydrogen()->MustAllocateDoubleAligned()) {
+    flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
+  }
+  if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
+    DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
+    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+    flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
+  } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
+    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+    flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
+  }
+  if (instr->size()->IsConstantOperand()) {
+    int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+    if (size <= Page::kMaxRegularHeapObjectSize) {
+      __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
+    } else {
+      __ jmp(deferred->entry());
+    }
+  } else {
+    Register size = ToRegister(instr->size());
+    __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
+  }
+
+  __ bind(deferred->exit());
+
+  if (instr->hydrogen()->MustPrefillWithFiller()) {
+    STATIC_ASSERT(kHeapObjectTag == 1);
+    if (instr->size()->IsConstantOperand()) {
+      int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+      __ li(scratch, Operand(size - kHeapObjectTag));
+    } else {
+      __ Dsubu(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag));
+    }
+    __ li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
+    Label loop;
+    __ bind(&loop);
+    __ Dsubu(scratch, scratch, Operand(kPointerSize));
+    __ Daddu(at, result, Operand(scratch));
+    __ sd(scratch2, MemOperand(at));
+    __ Branch(&loop, ge, scratch, Operand(zero_reg));
+  }
+}
+
+
+void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
+  Register result = ToRegister(instr->result());
+
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  __ mov(result, zero_reg);
+
+  PushSafepointRegistersScope scope(this);
+  if (instr->size()->IsRegister()) {
+    Register size = ToRegister(instr->size());
+    DCHECK(!size.is(result));
+    __ SmiTag(size);
+    __ push(size);
+  } else {
+    int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+    if (size >= 0 && size <= Smi::kMaxValue) {
+      __ li(v0, Operand(Smi::FromInt(size)));
+      __ Push(v0);
+    } else {
+      // We should never get here at runtime => abort
+      __ stop("invalid allocation size");
+      return;
+    }
+  }
+
+  int flags = AllocateDoubleAlignFlag::encode(
+      instr->hydrogen()->MustAllocateDoubleAligned());
+  if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
+    DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
+    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+    flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
+  } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
+    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+    flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
+  } else {
+    flags = AllocateTargetSpace::update(flags, NEW_SPACE);
+  }
+  __ li(v0, Operand(Smi::FromInt(flags)));
+  __ Push(v0);
+
+  CallRuntimeFromDeferred(
+      Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
+  __ StoreToSafepointRegisterSlot(v0, result);
+}
+
+
+void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
+  DCHECK(ToRegister(instr->value()).is(a0));
+  DCHECK(ToRegister(instr->result()).is(v0));
+  __ push(a0);
+  CallRuntime(Runtime::kToFastProperties, 1, instr);
+}
+
+
+void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  Label materialized;
+  // Registers will be used as follows:
+  // a7 = literals array.
+  // a1 = regexp literal.
+  // a0 = regexp literal clone.
+  // a2 and a4-a6 are used as temporaries.
+  int literal_offset =
+      FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
+  __ li(a7, instr->hydrogen()->literals());
+  __ ld(a1, FieldMemOperand(a7, literal_offset));
+  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+  __ Branch(&materialized, ne, a1, Operand(at));
+
+  // Create regexp literal using runtime function
+  // Result will be in v0.
+  __ li(a6, Operand(Smi::FromInt(instr->hydrogen()->literal_index())));
+  __ li(a5, Operand(instr->hydrogen()->pattern()));
+  __ li(a4, Operand(instr->hydrogen()->flags()));
+  __ Push(a7, a6, a5, a4);
+  CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
+  __ mov(a1, v0);
+
+  __ bind(&materialized);
+  int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
+  Label allocated, runtime_allocate;
+
+  __ Allocate(size, v0, a2, a3, &runtime_allocate, TAG_OBJECT);
+  __ jmp(&allocated);
+
+  __ bind(&runtime_allocate);
+  __ li(a0, Operand(Smi::FromInt(size)));
+  __ Push(a1, a0);
+  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
+  __ pop(a1);
+
+  __ bind(&allocated);
+  // Copy the content into the newly allocated memory.
+  // (Unroll copy loop once for better throughput).
+  for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
+    __ ld(a3, FieldMemOperand(a1, i));
+    __ ld(a2, FieldMemOperand(a1, i + kPointerSize));
+    __ sd(a3, FieldMemOperand(v0, i));
+    __ sd(a2, FieldMemOperand(v0, i + kPointerSize));
+  }
+  if ((size % (2 * kPointerSize)) != 0) {
+    __ ld(a3, FieldMemOperand(a1, size - kPointerSize));
+    __ sd(a3, FieldMemOperand(v0, size - kPointerSize));
+  }
+}
+
+
+void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  // Use the fast case closure allocation code that allocates in new
+  // space for nested functions that don't need literals cloning.
+  bool pretenure = instr->hydrogen()->pretenure();
+  if (!pretenure && instr->hydrogen()->has_no_literals()) {
+    FastNewClosureStub stub(isolate(), instr->hydrogen()->strict_mode(),
+                            instr->hydrogen()->kind());
+    __ li(a2, Operand(instr->hydrogen()->shared_info()));
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  } else {
+    __ li(a2, Operand(instr->hydrogen()->shared_info()));
+    __ li(a1, Operand(pretenure ? factory()->true_value()
+                                : factory()->false_value()));
+    __ Push(cp, a2, a1);
+    CallRuntime(Runtime::kNewClosure, 3, instr);
+  }
+}
+
+
+void LCodeGen::DoTypeof(LTypeof* instr) {
+  DCHECK(ToRegister(instr->result()).is(v0));
+  Register input = ToRegister(instr->value());
+  __ push(input);
+  CallRuntime(Runtime::kTypeof, 1, instr);
+}
+
+
+void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
+  Register input = ToRegister(instr->value());
+
+  Register cmp1 = no_reg;
+  Operand cmp2 = Operand(no_reg);
+
+  Condition final_branch_condition = EmitTypeofIs(instr->TrueLabel(chunk_),
+                                                  instr->FalseLabel(chunk_),
+                                                  input,
+                                                  instr->type_literal(),
+                                                  &cmp1,
+                                                  &cmp2);
+
+  DCHECK(cmp1.is_valid());
+  DCHECK(!cmp2.is_reg() || cmp2.rm().is_valid());
+
+  if (final_branch_condition != kNoCondition) {
+    EmitBranch(instr, final_branch_condition, cmp1, cmp2);
+  }
+}
+
+
+Condition LCodeGen::EmitTypeofIs(Label* true_label,
+                                 Label* false_label,
+                                 Register input,
+                                 Handle<String> type_name,
+                                 Register* cmp1,
+                                 Operand* cmp2) {
+  // This function utilizes the delay slot heavily. This is used to load
+  // values that are always usable without depending on the type of the input
+  // register.
+  Condition final_branch_condition = kNoCondition;
+  Register scratch = scratch0();
+  Factory* factory = isolate()->factory();
+  if (String::Equals(type_name, factory->number_string())) {
+    __ JumpIfSmi(input, true_label);
+    __ ld(input, FieldMemOperand(input, HeapObject::kMapOffset));
+    __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
+    *cmp1 = input;
+    *cmp2 = Operand(at);
+    final_branch_condition = eq;
+
+  } else if (String::Equals(type_name, factory->string_string())) {
+    __ JumpIfSmi(input, false_label);
+    __ GetObjectType(input, input, scratch);
+    __ Branch(USE_DELAY_SLOT, false_label,
+              ge, scratch, Operand(FIRST_NONSTRING_TYPE));
+    // input is an object so we can load the BitFieldOffset even if we take the
+    // other branch.
+    __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
+    __ And(at, at, 1 << Map::kIsUndetectable);
+    *cmp1 = at;
+    *cmp2 = Operand(zero_reg);
+    final_branch_condition = eq;
+
+  } else if (String::Equals(type_name, factory->symbol_string())) {
+    __ JumpIfSmi(input, false_label);
+    __ GetObjectType(input, input, scratch);
+    *cmp1 = scratch;
+    *cmp2 = Operand(SYMBOL_TYPE);
+    final_branch_condition = eq;
+
+  } else if (String::Equals(type_name, factory->boolean_string())) {
+    __ LoadRoot(at, Heap::kTrueValueRootIndex);
+    __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
+    __ LoadRoot(at, Heap::kFalseValueRootIndex);
+    *cmp1 = at;
+    *cmp2 = Operand(input);
+    final_branch_condition = eq;
+
+  } else if (String::Equals(type_name, factory->undefined_string())) {
+    __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+    __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
+    // The first instruction of JumpIfSmi is an And - it is safe in the delay
+    // slot.
+    __ JumpIfSmi(input, false_label);
+    // Check for undetectable objects => true.
+    __ ld(input, FieldMemOperand(input, HeapObject::kMapOffset));
+    __ lbu(at, FieldMemOperand(input, Map::kBitFieldOffset));
+    __ And(at, at, 1 << Map::kIsUndetectable);
+    *cmp1 = at;
+    *cmp2 = Operand(zero_reg);
+    final_branch_condition = ne;
+
+  } else if (String::Equals(type_name, factory->function_string())) {
+    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+    __ JumpIfSmi(input, false_label);
+    __ GetObjectType(input, scratch, input);
+    __ Branch(true_label, eq, input, Operand(JS_FUNCTION_TYPE));
+    *cmp1 = input;
+    *cmp2 = Operand(JS_FUNCTION_PROXY_TYPE);
+    final_branch_condition = eq;
+
+  } else if (String::Equals(type_name, factory->object_string())) {
+    __ JumpIfSmi(input, false_label);
+    __ LoadRoot(at, Heap::kNullValueRootIndex);
+    __ Branch(USE_DELAY_SLOT, true_label, eq, at, Operand(input));
+    Register map = input;
+    __ GetObjectType(input, map, scratch);
+    __ Branch(false_label,
+              lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+    __ Branch(USE_DELAY_SLOT, false_label,
+              gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
+    // map is still valid, so the BitField can be loaded in delay slot.
+    // Check for undetectable objects => false.
+    __ lbu(at, FieldMemOperand(map, Map::kBitFieldOffset));
+    __ And(at, at, 1 << Map::kIsUndetectable);
+    *cmp1 = at;
+    *cmp2 = Operand(zero_reg);
+    final_branch_condition = eq;
+
+  } else {
+    *cmp1 = at;
+    *cmp2 = Operand(zero_reg);  // Set to valid regs, to avoid caller assertion.
+    __ Branch(false_label);
+  }
+
+  return final_branch_condition;
+}
+
+
+void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
+  Register temp1 = ToRegister(instr->temp());
+
+  EmitIsConstructCall(temp1, scratch0());
+
+  EmitBranch(instr, eq, temp1,
+             Operand(Smi::FromInt(StackFrame::CONSTRUCT)));
+}
+
+
+void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
+  DCHECK(!temp1.is(temp2));
+  // Get the frame pointer for the calling frame.
+  __ ld(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+
+  // Skip the arguments adaptor frame if it exists.
+  Label check_frame_marker;
+  __ ld(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
+  __ Branch(&check_frame_marker, ne, temp2,
+            Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
+  __ ld(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
+
+  // Check the marker in the calling frame.
+  __ bind(&check_frame_marker);
+  __ ld(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
+}
+
+
+void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
+  if (!info()->IsStub()) {
+    // Ensure that we have enough space after the previous lazy-bailout
+    // instruction for patching the code here.
+    int current_pc = masm()->pc_offset();
+    if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+      int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+      DCHECK_EQ(0, padding_size % Assembler::kInstrSize);
+      while (padding_size > 0) {
+        __ nop();
+        padding_size -= Assembler::kInstrSize;
+      }
+    }
+  }
+  last_lazy_deopt_pc_ = masm()->pc_offset();
+}
+
+
+void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
+  last_lazy_deopt_pc_ = masm()->pc_offset();
+  DCHECK(instr->HasEnvironment());
+  LEnvironment* env = instr->environment();
+  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+}
+
+
+void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
+  Deoptimizer::BailoutType type = instr->hydrogen()->type();
+  // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
+  // needed return address), even though the implementation of LAZY and EAGER is
+  // now identical. When LAZY is eventually completely folded into EAGER, remove
+  // the special case below.
+  if (info()->IsStub() && type == Deoptimizer::EAGER) {
+    type = Deoptimizer::LAZY;
+  }
+
+  DeoptimizeIf(al, instr, type, zero_reg, Operand(zero_reg),
+               instr->hydrogen()->reason());
+}
+
+
+void LCodeGen::DoDummy(LDummy* instr) {
+  // Nothing to see here, move on!
+}
+
+
+void LCodeGen::DoDummyUse(LDummyUse* instr) {
+  // Nothing to see here, move on!
+}
+
+
+void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
+  PushSafepointRegistersScope scope(this);
+  LoadContextFromDeferred(instr->context());
+  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
+  RecordSafepointWithLazyDeopt(
+      instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+  DCHECK(instr->HasEnvironment());
+  LEnvironment* env = instr->environment();
+  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+}
+
+
+void LCodeGen::DoStackCheck(LStackCheck* instr) {
+  class DeferredStackCheck FINAL : public LDeferredCode {
+   public:
+    DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
+        : LDeferredCode(codegen), instr_(instr) { }
+    virtual void Generate() OVERRIDE {
+      codegen()->DoDeferredStackCheck(instr_);
+    }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
+   private:
+    LStackCheck* instr_;
+  };
+
+  DCHECK(instr->HasEnvironment());
+  LEnvironment* env = instr->environment();
+  // There is no LLazyBailout instruction for stack-checks. We have to
+  // prepare for lazy deoptimization explicitly here.
+  if (instr->hydrogen()->is_function_entry()) {
+    // Perform stack overflow check.
+    Label done;
+    __ LoadRoot(at, Heap::kStackLimitRootIndex);
+    __ Branch(&done, hs, sp, Operand(at));
+    DCHECK(instr->context()->IsRegister());
+    DCHECK(ToRegister(instr->context()).is(cp));
+    CallCode(isolate()->builtins()->StackCheck(),
+             RelocInfo::CODE_TARGET,
+             instr);
+    __ bind(&done);
+  } else {
+    DCHECK(instr->hydrogen()->is_backwards_branch());
+    // Perform stack overflow check if this goto needs it before jumping.
+    DeferredStackCheck* deferred_stack_check =
+        new(zone()) DeferredStackCheck(this, instr);
+    __ LoadRoot(at, Heap::kStackLimitRootIndex);
+    __ Branch(deferred_stack_check->entry(), lo, sp, Operand(at));
+    EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+    __ bind(instr->done_label());
+    deferred_stack_check->SetExit(instr->done_label());
+    RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+    // Don't record a deoptimization index for the safepoint here.
+    // This will be done explicitly when emitting call and the safepoint in
+    // the deferred code.
+  }
+}
+
+
+void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
+  // This is a pseudo-instruction that ensures that the environment here is
+  // properly registered for deoptimization and records the assembler's PC
+  // offset.
+  LEnvironment* environment = instr->environment();
+
+  // If the environment were already registered, we would have no way of
+  // backpatching it with the spill slot operands.
+  DCHECK(!environment->HasBeenRegistered());
+  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
+
+  GenerateOsrPrologue();
+}
+
+
+void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
+  Register result = ToRegister(instr->result());
+  Register object = ToRegister(instr->object());
+  __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+  DeoptimizeIf(eq, instr, object, Operand(at));
+
+  Register null_value = a5;
+  __ LoadRoot(null_value, Heap::kNullValueRootIndex);
+  DeoptimizeIf(eq, instr, object, Operand(null_value));
+
+  __ And(at, object, kSmiTagMask);
+  DeoptimizeIf(eq, instr, at, Operand(zero_reg));
+
+  STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+  __ GetObjectType(object, a1, a1);
+  DeoptimizeIf(le, instr, a1, Operand(LAST_JS_PROXY_TYPE));
+
+  Label use_cache, call_runtime;
+  DCHECK(object.is(a0));
+  __ CheckEnumCache(null_value, &call_runtime);
+
+  __ ld(result, FieldMemOperand(object, HeapObject::kMapOffset));
+  __ Branch(&use_cache);
+
+  // Get the set of properties to enumerate.
+  __ bind(&call_runtime);
+  __ push(object);
+  CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
+
+  __ ld(a1, FieldMemOperand(v0, HeapObject::kMapOffset));
+  DCHECK(result.is(v0));
+  __ LoadRoot(at, Heap::kMetaMapRootIndex);
+  DeoptimizeIf(ne, instr, a1, Operand(at));
+  __ bind(&use_cache);
+}
+
+
+void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
+  Register map = ToRegister(instr->map());
+  Register result = ToRegister(instr->result());
+  Label load_cache, done;
+  __ EnumLength(result, map);
+  __ Branch(&load_cache, ne, result, Operand(Smi::FromInt(0)));
+  __ li(result, Operand(isolate()->factory()->empty_fixed_array()));
+  __ jmp(&done);
+
+  __ bind(&load_cache);
+  __ LoadInstanceDescriptors(map, result);
+  __ ld(result,
+        FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
+  __ ld(result,
+        FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
+  DeoptimizeIf(eq, instr, result, Operand(zero_reg));
+
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
+  Register object = ToRegister(instr->value());
+  Register map = ToRegister(instr->map());
+  __ ld(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
+  DeoptimizeIf(ne, instr, map, Operand(scratch0()));
+}
+
+
+void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+                                           Register result,
+                                           Register object,
+                                           Register index) {
+  PushSafepointRegistersScope scope(this);
+  __ Push(object, index);
+  __ mov(cp, zero_reg);
+  __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
+  RecordSafepointWithRegisters(
+     instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
+  __ StoreToSafepointRegisterSlot(v0, result);
+}
+
+
+void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
+  class DeferredLoadMutableDouble FINAL : public LDeferredCode {
+   public:
+    DeferredLoadMutableDouble(LCodeGen* codegen,
+                              LLoadFieldByIndex* instr,
+                              Register result,
+                              Register object,
+                              Register index)
+        : LDeferredCode(codegen),
+          instr_(instr),
+          result_(result),
+          object_(object),
+          index_(index) {
+    }
+    virtual void Generate() OVERRIDE {
+      codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
+    }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
+   private:
+    LLoadFieldByIndex* instr_;
+    Register result_;
+    Register object_;
+    Register index_;
+  };
+
+  Register object = ToRegister(instr->object());
+  Register index = ToRegister(instr->index());
+  Register result = ToRegister(instr->result());
+  Register scratch = scratch0();
+
+  DeferredLoadMutableDouble* deferred;
+  deferred = new(zone()) DeferredLoadMutableDouble(
+      this, instr, result, object, index);
+
+  Label out_of_object, done;
+
+  __ And(scratch, index, Operand(Smi::FromInt(1)));
+  __ Branch(deferred->entry(), ne, scratch, Operand(zero_reg));
+  __ dsra(index, index, 1);
+
+  __ Branch(USE_DELAY_SLOT, &out_of_object, lt, index, Operand(zero_reg));
+  __ SmiScale(scratch, index, kPointerSizeLog2);  // In delay slot.
+  __ Daddu(scratch, object, scratch);
+  __ ld(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
+
+  __ Branch(&done);
+
+  __ bind(&out_of_object);
+  __ ld(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
+  // Index is equal to negated out of object property index plus 1.
+  __ Dsubu(scratch, result, scratch);
+  __ ld(result, FieldMemOperand(scratch,
+                                FixedArray::kHeaderSize - kPointerSize));
+  __ bind(deferred->exit());
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
+  Register context = ToRegister(instr->context());
+  __ sd(context, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
+  Handle<ScopeInfo> scope_info = instr->scope_info();
+  __ li(at, scope_info);
+  __ Push(at, ToRegister(instr->function()));
+  CallRuntime(Runtime::kPushBlockContext, 2, instr);
+  RecordSafepoint(Safepoint::kNoLazyDeopt);
+}
+
+
+#undef __
+
+} }  // namespace v8::internal
diff --git a/src/mips64/lithium-codegen-mips64.h b/src/mips64/lithium-codegen-mips64.h
new file mode 100644
index 0000000..a4b7adb
--- /dev/null
+++ b/src/mips64/lithium-codegen-mips64.h
@@ -0,0 +1,450 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
+#define V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
+
+#include "src/deoptimizer.h"
+#include "src/lithium-codegen.h"
+#include "src/mips64/lithium-gap-resolver-mips64.h"
+#include "src/mips64/lithium-mips64.h"
+#include "src/safepoint-table.h"
+#include "src/scopes.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class LDeferredCode;
+class SafepointGenerator;
+
+class LCodeGen: public LCodeGenBase {
+ public:
+  LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
+      : LCodeGenBase(chunk, assembler, info),
+        deoptimizations_(4, info->zone()),
+        jump_table_(4, info->zone()),
+        deoptimization_literals_(8, info->zone()),
+        inlined_function_count_(0),
+        scope_(info->scope()),
+        translations_(info->zone()),
+        deferred_(8, info->zone()),
+        osr_pc_offset_(-1),
+        frame_is_built_(false),
+        safepoints_(info->zone()),
+        resolver_(this),
+        expected_safepoint_kind_(Safepoint::kSimple) {
+    PopulateDeoptimizationLiteralsWithInlinedFunctions();
+  }
+
+
+  int LookupDestination(int block_id) const {
+    return chunk()->LookupDestination(block_id);
+  }
+
+  bool IsNextEmittedBlock(int block_id) const {
+    return LookupDestination(block_id) == GetNextEmittedBlock();
+  }
+
+  bool NeedsEagerFrame() const {
+    return GetStackSlotCount() > 0 ||
+        info()->is_non_deferred_calling() ||
+        !info()->IsStub() ||
+        info()->requires_frame();
+  }
+  bool NeedsDeferredFrame() const {
+    return !NeedsEagerFrame() && info()->is_deferred_calling();
+  }
+
+  RAStatus GetRAState() const {
+    return frame_is_built_ ? kRAHasBeenSaved : kRAHasNotBeenSaved;
+  }
+
+  // Support for converting LOperands to assembler types.
+  // LOperand must be a register.
+  Register ToRegister(LOperand* op) const;
+
+  // LOperand is loaded into scratch, unless already a register.
+  Register EmitLoadRegister(LOperand* op, Register scratch);
+
+  // LOperand must be a double register.
+  DoubleRegister ToDoubleRegister(LOperand* op) const;
+
+  // LOperand is loaded into dbl_scratch, unless already a double register.
+  DoubleRegister EmitLoadDoubleRegister(LOperand* op,
+                                        FloatRegister flt_scratch,
+                                        DoubleRegister dbl_scratch);
+  int32_t ToRepresentation_donotuse(LConstantOperand* op,
+                                    const Representation& r) const;
+  int32_t ToInteger32(LConstantOperand* op) const;
+  Smi* ToSmi(LConstantOperand* op) const;
+  double ToDouble(LConstantOperand* op) const;
+  Operand ToOperand(LOperand* op);
+  MemOperand ToMemOperand(LOperand* op) const;
+  // Returns a MemOperand pointing to the high word of a DoubleStackSlot.
+  MemOperand ToHighMemOperand(LOperand* op) const;
+
+  bool IsInteger32(LConstantOperand* op) const;
+  bool IsSmi(LConstantOperand* op) const;
+  Handle<Object> ToHandle(LConstantOperand* op) const;
+
+  // Try to generate code for the entire chunk, but it may fail if the
+  // chunk contains constructs we cannot handle. Returns true if the
+  // code generation attempt succeeded.
+  bool GenerateCode();
+
+  // Finish the code by setting stack height, safepoint, and bailout
+  // information on it.
+  void FinishCode(Handle<Code> code);
+
+  void DoDeferredNumberTagD(LNumberTagD* instr);
+
+  enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
+  void DoDeferredNumberTagIU(LInstruction* instr,
+                             LOperand* value,
+                             LOperand* temp1,
+                             LOperand* temp2,
+                             IntegerSignedness signedness);
+
+  void DoDeferredTaggedToI(LTaggedToI* instr);
+  void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
+  void DoDeferredStackCheck(LStackCheck* instr);
+  void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
+  void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
+  void DoDeferredAllocate(LAllocate* instr);
+  void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
+                                       Label* map_check);
+
+  void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
+  void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+                                   Register result,
+                                   Register object,
+                                   Register index);
+
+  // Parallel move support.
+  void DoParallelMove(LParallelMove* move);
+  void DoGap(LGap* instr);
+
+  MemOperand PrepareKeyedOperand(Register key,
+                                 Register base,
+                                 bool key_is_constant,
+                                 int constant_key,
+                                 int element_size,
+                                 int shift_size,
+                                 int base_offset);
+
+  // Emit frame translation commands for an environment.
+  void WriteTranslation(LEnvironment* environment, Translation* translation);
+
+  // Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) void Do##type(L##type* node);
+  LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+ private:
+  StrictMode strict_mode() const { return info()->strict_mode(); }
+
+  Scope* scope() const { return scope_; }
+
+  Register scratch0() { return kLithiumScratchReg; }
+  Register scratch1() { return kLithiumScratchReg2; }
+  DoubleRegister double_scratch0() { return kLithiumScratchDouble; }
+
+  LInstruction* GetNextInstruction();
+
+  void EmitClassOfTest(Label* if_true,
+                       Label* if_false,
+                       Handle<String> class_name,
+                       Register input,
+                       Register temporary,
+                       Register temporary2);
+
+  int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
+
+  void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
+
+  void SaveCallerDoubles();
+  void RestoreCallerDoubles();
+
+  // Code generation passes.  Returns true if code generation should
+  // continue.
+  void GenerateBodyInstructionPre(LInstruction* instr) OVERRIDE;
+  bool GeneratePrologue();
+  bool GenerateDeferredCode();
+  bool GenerateJumpTable();
+  bool GenerateSafepointTable();
+
+  // Generates the custom OSR entrypoint and sets the osr_pc_offset.
+  void GenerateOsrPrologue();
+
+  enum SafepointMode {
+    RECORD_SIMPLE_SAFEPOINT,
+    RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
+  };
+
+  void CallCode(Handle<Code> code,
+                RelocInfo::Mode mode,
+                LInstruction* instr);
+
+  void CallCodeGeneric(Handle<Code> code,
+                       RelocInfo::Mode mode,
+                       LInstruction* instr,
+                       SafepointMode safepoint_mode);
+
+  void CallRuntime(const Runtime::Function* function,
+                   int num_arguments,
+                   LInstruction* instr,
+                   SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+
+  void CallRuntime(Runtime::FunctionId id,
+                   int num_arguments,
+                   LInstruction* instr) {
+    const Runtime::Function* function = Runtime::FunctionForId(id);
+    CallRuntime(function, num_arguments, instr);
+  }
+
+  void LoadContextFromDeferred(LOperand* context);
+  void CallRuntimeFromDeferred(Runtime::FunctionId id,
+                               int argc,
+                               LInstruction* instr,
+                               LOperand* context);
+
+  enum A1State {
+    A1_UNINITIALIZED,
+    A1_CONTAINS_TARGET
+  };
+
+  // Generate a direct call to a known function.  Expects the function
+  // to be in a1.
+  void CallKnownFunction(Handle<JSFunction> function,
+                         int formal_parameter_count,
+                         int arity,
+                         LInstruction* instr,
+                         A1State a1_state);
+
+  void RecordSafepointWithLazyDeopt(LInstruction* instr,
+                                    SafepointMode safepoint_mode);
+
+  void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
+                                            Safepoint::DeoptMode mode);
+  void DeoptimizeIf(Condition condition, LInstruction* instr,
+                    Deoptimizer::BailoutType bailout_type,
+                    Register src1 = zero_reg,
+                    const Operand& src2 = Operand(zero_reg),
+                    const char* detail = NULL);
+  void DeoptimizeIf(Condition condition, LInstruction* instr,
+                    Register src1 = zero_reg,
+                    const Operand& src2 = Operand(zero_reg),
+                    const char* detail = NULL);
+
+  void AddToTranslation(LEnvironment* environment,
+                        Translation* translation,
+                        LOperand* op,
+                        bool is_tagged,
+                        bool is_uint32,
+                        int* object_index_pointer,
+                        int* dematerialized_index_pointer);
+  void PopulateDeoptimizationData(Handle<Code> code);
+  int DefineDeoptimizationLiteral(Handle<Object> literal);
+
+  void PopulateDeoptimizationLiteralsWithInlinedFunctions();
+
+  Register ToRegister(int index) const;
+  DoubleRegister ToDoubleRegister(int index) const;
+
+  MemOperand BuildSeqStringOperand(Register string,
+                                   LOperand* index,
+                                   String::Encoding encoding);
+
+  void EmitIntegerMathAbs(LMathAbs* instr);
+
+  // Support for recording safepoint and position information.
+  void RecordSafepoint(LPointerMap* pointers,
+                       Safepoint::Kind kind,
+                       int arguments,
+                       Safepoint::DeoptMode mode);
+  void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
+  void RecordSafepoint(Safepoint::DeoptMode mode);
+  void RecordSafepointWithRegisters(LPointerMap* pointers,
+                                    int arguments,
+                                    Safepoint::DeoptMode mode);
+
+  void RecordAndWritePosition(int position) OVERRIDE;
+
+  static Condition TokenToCondition(Token::Value op, bool is_unsigned);
+  void EmitGoto(int block);
+
+  // EmitBranch expects to be the last instruction of a block.
+  template<class InstrType>
+  void EmitBranch(InstrType instr,
+                  Condition condition,
+                  Register src1,
+                  const Operand& src2);
+  template<class InstrType>
+  void EmitBranchF(InstrType instr,
+                   Condition condition,
+                   FPURegister src1,
+                   FPURegister src2);
+  template<class InstrType>
+  void EmitFalseBranch(InstrType instr,
+                       Condition condition,
+                       Register src1,
+                       const Operand& src2);
+  template<class InstrType>
+  void EmitFalseBranchF(InstrType instr,
+                        Condition condition,
+                        FPURegister src1,
+                        FPURegister src2);
+  void EmitCmpI(LOperand* left, LOperand* right);
+  void EmitNumberUntagD(LNumberUntagD* instr, Register input,
+                        DoubleRegister result, NumberUntagDMode mode);
+
+  // Emits optimized code for typeof x == "y".  Modifies input register.
+  // Returns the condition on which a final split to
+  // true and false label should be made, to optimize fallthrough.
+  // Returns two registers in cmp1 and cmp2 that can be used in the
+  // Branch instruction after EmitTypeofIs.
+  Condition EmitTypeofIs(Label* true_label,
+                         Label* false_label,
+                         Register input,
+                         Handle<String> type_name,
+                         Register* cmp1,
+                         Operand* cmp2);
+
+  // Emits optimized code for %_IsObject(x).  Preserves input register.
+  // Returns the condition on which a final split to
+  // true and false label should be made, to optimize fallthrough.
+  Condition EmitIsObject(Register input,
+                         Register temp1,
+                         Register temp2,
+                         Label* is_not_object,
+                         Label* is_object);
+
+  // Emits optimized code for %_IsString(x).  Preserves input register.
+  // Returns the condition on which a final split to
+  // true and false label should be made, to optimize fallthrough.
+  Condition EmitIsString(Register input,
+                         Register temp1,
+                         Label* is_not_string,
+                         SmiCheck check_needed);
+
+  // Emits optimized code for %_IsConstructCall().
+  // Caller should branch on equal condition.
+  void EmitIsConstructCall(Register temp1, Register temp2);
+
+  // Emits optimized code to deep-copy the contents of statically known
+  // object graphs (e.g. object literal boilerplate).
+  void EmitDeepCopy(Handle<JSObject> object,
+                    Register result,
+                    Register source,
+                    int* offset,
+                    AllocationSiteMode mode);
+  // Emit optimized code for integer division.
+  // Inputs are signed.
+  // All registers are clobbered.
+  // If 'remainder' is no_reg, it is not computed.
+  void EmitSignedIntegerDivisionByConstant(Register result,
+                                           Register dividend,
+                                           int32_t divisor,
+                                           Register remainder,
+                                           Register scratch,
+                                           LEnvironment* environment);
+
+
+  void EnsureSpaceForLazyDeopt(int space_needed) OVERRIDE;
+  void DoLoadKeyedExternalArray(LLoadKeyed* instr);
+  void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
+  void DoLoadKeyedFixedArray(LLoadKeyed* instr);
+  void DoStoreKeyedExternalArray(LStoreKeyed* instr);
+  void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
+  void DoStoreKeyedFixedArray(LStoreKeyed* instr);
+
+  template <class T>
+  void EmitVectorLoadICRegisters(T* instr);
+
+  ZoneList<LEnvironment*> deoptimizations_;
+  ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
+  ZoneList<Handle<Object> > deoptimization_literals_;
+  int inlined_function_count_;
+  Scope* const scope_;
+  TranslationBuffer translations_;
+  ZoneList<LDeferredCode*> deferred_;
+  int osr_pc_offset_;
+  bool frame_is_built_;
+
+  // Builder that keeps track of safepoints in the code. The table
+  // itself is emitted at the end of the generated code.
+  SafepointTableBuilder safepoints_;
+
+  // Compiler from a set of parallel moves to a sequential list of moves.
+  LGapResolver resolver_;
+
+  Safepoint::Kind expected_safepoint_kind_;
+
+  class PushSafepointRegistersScope FINAL BASE_EMBEDDED {
+   public:
+    explicit PushSafepointRegistersScope(LCodeGen* codegen)
+        : codegen_(codegen) {
+      DCHECK(codegen_->info()->is_calling());
+      DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
+      codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
+
+      StoreRegistersStateStub stub(codegen_->isolate());
+      codegen_->masm_->push(ra);
+      codegen_->masm_->CallStub(&stub);
+    }
+
+    ~PushSafepointRegistersScope() {
+      DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
+      RestoreRegistersStateStub stub(codegen_->isolate());
+      codegen_->masm_->push(ra);
+      codegen_->masm_->CallStub(&stub);
+      codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
+    }
+
+   private:
+    LCodeGen* codegen_;
+  };
+
+  friend class LDeferredCode;
+  friend class LEnvironment;
+  friend class SafepointGenerator;
+  DISALLOW_COPY_AND_ASSIGN(LCodeGen);
+};
+
+
+class LDeferredCode : public ZoneObject {
+ public:
+  explicit LDeferredCode(LCodeGen* codegen)
+      : codegen_(codegen),
+        external_exit_(NULL),
+        instruction_index_(codegen->current_instruction_) {
+    codegen->AddDeferredCode(this);
+  }
+
+  virtual ~LDeferredCode() {}
+  virtual void Generate() = 0;
+  virtual LInstruction* instr() = 0;
+
+  void SetExit(Label* exit) { external_exit_ = exit; }
+  Label* entry() { return &entry_; }
+  Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
+  int instruction_index() const { return instruction_index_; }
+
+ protected:
+  LCodeGen* codegen() const { return codegen_; }
+  MacroAssembler* masm() const { return codegen_->masm(); }
+
+ private:
+  LCodeGen* codegen_;
+  Label entry_;
+  Label exit_;
+  Label* external_exit_;
+  int instruction_index_;
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_MIPS_LITHIUM_CODEGEN_MIPS_H_
diff --git a/src/mips64/lithium-gap-resolver-mips64.cc b/src/mips64/lithium-gap-resolver-mips64.cc
new file mode 100644
index 0000000..d965f65
--- /dev/null
+++ b/src/mips64/lithium-gap-resolver-mips64.cc
@@ -0,0 +1,300 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/mips64/lithium-codegen-mips64.h"
+#include "src/mips64/lithium-gap-resolver-mips64.h"
+
+namespace v8 {
+namespace internal {
+
+LGapResolver::LGapResolver(LCodeGen* owner)
+    : cgen_(owner),
+      moves_(32, owner->zone()),
+      root_index_(0),
+      in_cycle_(false),
+      saved_destination_(NULL) {}
+
+
+void LGapResolver::Resolve(LParallelMove* parallel_move) {
+  DCHECK(moves_.is_empty());
+  // Build up a worklist of moves.
+  BuildInitialMoveList(parallel_move);
+
+  for (int i = 0; i < moves_.length(); ++i) {
+    LMoveOperands move = moves_[i];
+    // Skip constants to perform them last.  They don't block other moves
+    // and skipping such moves with register destinations keeps those
+    // registers free for the whole algorithm.
+    if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
+      root_index_ = i;  // Any cycle is found when by reaching this move again.
+      PerformMove(i);
+      if (in_cycle_) {
+        RestoreValue();
+      }
+    }
+  }
+
+  // Perform the moves with constant sources.
+  for (int i = 0; i < moves_.length(); ++i) {
+    if (!moves_[i].IsEliminated()) {
+      DCHECK(moves_[i].source()->IsConstantOperand());
+      EmitMove(i);
+    }
+  }
+
+  moves_.Rewind(0);
+}
+
+
+void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
+  // Perform a linear sweep of the moves to add them to the initial list of
+  // moves to perform, ignoring any move that is redundant (the source is
+  // the same as the destination, the destination is ignored and
+  // unallocated, or the move was already eliminated).
+  const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
+  for (int i = 0; i < moves->length(); ++i) {
+    LMoveOperands move = moves->at(i);
+    if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
+  }
+  Verify();
+}
+
+
+void LGapResolver::PerformMove(int index) {
+  // Each call to this function performs a move and deletes it from the move
+  // graph.  We first recursively perform any move blocking this one.  We
+  // mark a move as "pending" on entry to PerformMove in order to detect
+  // cycles in the move graph.
+
+  // We can only find a cycle, when doing a depth-first traversal of moves,
+  // be encountering the starting move again. So by spilling the source of
+  // the starting move, we break the cycle.  All moves are then unblocked,
+  // and the starting move is completed by writing the spilled value to
+  // its destination.  All other moves from the spilled source have been
+  // completed prior to breaking the cycle.
+  // An additional complication is that moves to MemOperands with large
+  // offsets (more than 1K or 4K) require us to spill this spilled value to
+  // the stack, to free up the register.
+  DCHECK(!moves_[index].IsPending());
+  DCHECK(!moves_[index].IsRedundant());
+
+  // Clear this move's destination to indicate a pending move.  The actual
+  // destination is saved in a stack allocated local.  Multiple moves can
+  // be pending because this function is recursive.
+  DCHECK(moves_[index].source() != NULL);  // Or else it will look eliminated.
+  LOperand* destination = moves_[index].destination();
+  moves_[index].set_destination(NULL);
+
+  // Perform a depth-first traversal of the move graph to resolve
+  // dependencies.  Any unperformed, unpending move with a source the same
+  // as this one's destination blocks this one so recursively perform all
+  // such moves.
+  for (int i = 0; i < moves_.length(); ++i) {
+    LMoveOperands other_move = moves_[i];
+    if (other_move.Blocks(destination) && !other_move.IsPending()) {
+      PerformMove(i);
+      // If there is a blocking, pending move it must be moves_[root_index_]
+      // and all other moves with the same source as moves_[root_index_] are
+      // sucessfully executed (because they are cycle-free) by this loop.
+    }
+  }
+
+  // We are about to resolve this move and don't need it marked as
+  // pending, so restore its destination.
+  moves_[index].set_destination(destination);
+
+  // The move may be blocked on a pending move, which must be the starting move.
+  // In this case, we have a cycle, and we save the source of this move to
+  // a scratch register to break it.
+  LMoveOperands other_move = moves_[root_index_];
+  if (other_move.Blocks(destination)) {
+    DCHECK(other_move.IsPending());
+    BreakCycle(index);
+    return;
+  }
+
+  // This move is no longer blocked.
+  EmitMove(index);
+}
+
+
+void LGapResolver::Verify() {
+#ifdef ENABLE_SLOW_DCHECKS
+  // No operand should be the destination for more than one move.
+  for (int i = 0; i < moves_.length(); ++i) {
+    LOperand* destination = moves_[i].destination();
+    for (int j = i + 1; j < moves_.length(); ++j) {
+      SLOW_DCHECK(!destination->Equals(moves_[j].destination()));
+    }
+  }
+#endif
+}
+
+#define __ ACCESS_MASM(cgen_->masm())
+
+void LGapResolver::BreakCycle(int index) {
+  // We save in a register the value that should end up in the source of
+  // moves_[root_index].  After performing all moves in the tree rooted
+  // in that move, we save the value to that source.
+  DCHECK(moves_[index].destination()->Equals(moves_[root_index_].source()));
+  DCHECK(!in_cycle_);
+  in_cycle_ = true;
+  LOperand* source = moves_[index].source();
+  saved_destination_ = moves_[index].destination();
+  if (source->IsRegister()) {
+    __ mov(kLithiumScratchReg, cgen_->ToRegister(source));
+  } else if (source->IsStackSlot()) {
+    __ ld(kLithiumScratchReg, cgen_->ToMemOperand(source));
+  } else if (source->IsDoubleRegister()) {
+    __ mov_d(kLithiumScratchDouble, cgen_->ToDoubleRegister(source));
+  } else if (source->IsDoubleStackSlot()) {
+    __ ldc1(kLithiumScratchDouble, cgen_->ToMemOperand(source));
+  } else {
+    UNREACHABLE();
+  }
+  // This move will be done by restoring the saved value to the destination.
+  moves_[index].Eliminate();
+}
+
+
+void LGapResolver::RestoreValue() {
+  DCHECK(in_cycle_);
+  DCHECK(saved_destination_ != NULL);
+
+  // Spilled value is in kLithiumScratchReg or kLithiumScratchDouble.
+  if (saved_destination_->IsRegister()) {
+    __ mov(cgen_->ToRegister(saved_destination_), kLithiumScratchReg);
+  } else if (saved_destination_->IsStackSlot()) {
+    __ sd(kLithiumScratchReg, cgen_->ToMemOperand(saved_destination_));
+  } else if (saved_destination_->IsDoubleRegister()) {
+    __ mov_d(cgen_->ToDoubleRegister(saved_destination_),
+            kLithiumScratchDouble);
+  } else if (saved_destination_->IsDoubleStackSlot()) {
+    __ sdc1(kLithiumScratchDouble,
+            cgen_->ToMemOperand(saved_destination_));
+  } else {
+    UNREACHABLE();
+  }
+
+  in_cycle_ = false;
+  saved_destination_ = NULL;
+}
+
+
+void LGapResolver::EmitMove(int index) {
+  LOperand* source = moves_[index].source();
+  LOperand* destination = moves_[index].destination();
+
+  // Dispatch on the source and destination operand kinds.  Not all
+  // combinations are possible.
+
+  if (source->IsRegister()) {
+    Register source_register = cgen_->ToRegister(source);
+    if (destination->IsRegister()) {
+      __ mov(cgen_->ToRegister(destination), source_register);
+    } else {
+      DCHECK(destination->IsStackSlot());
+      __ sd(source_register, cgen_->ToMemOperand(destination));
+    }
+  } else if (source->IsStackSlot()) {
+    MemOperand source_operand = cgen_->ToMemOperand(source);
+    if (destination->IsRegister()) {
+      __ ld(cgen_->ToRegister(destination), source_operand);
+    } else {
+      DCHECK(destination->IsStackSlot());
+      MemOperand destination_operand = cgen_->ToMemOperand(destination);
+      if (in_cycle_) {
+        if (!destination_operand.OffsetIsInt16Encodable()) {
+          // 'at' is overwritten while saving the value to the destination.
+          // Therefore we can't use 'at'.  It is OK if the read from the source
+          // destroys 'at', since that happens before the value is read.
+          // This uses only a single reg of the double reg-pair.
+          __ ldc1(kLithiumScratchDouble, source_operand);
+          __ sdc1(kLithiumScratchDouble, destination_operand);
+        } else {
+          __ ld(at, source_operand);
+          __ sd(at, destination_operand);
+        }
+      } else {
+        __ ld(kLithiumScratchReg, source_operand);
+        __ sd(kLithiumScratchReg, destination_operand);
+      }
+    }
+
+  } else if (source->IsConstantOperand()) {
+    LConstantOperand* constant_source = LConstantOperand::cast(source);
+    if (destination->IsRegister()) {
+      Register dst = cgen_->ToRegister(destination);
+      if (cgen_->IsSmi(constant_source)) {
+         __ li(dst, Operand(cgen_->ToSmi(constant_source)));
+      } else if (cgen_->IsInteger32(constant_source)) {
+         __ li(dst, Operand(cgen_->ToInteger32(constant_source)));
+      } else {
+         __ li(dst, cgen_->ToHandle(constant_source));
+      }
+    } else if (destination->IsDoubleRegister()) {
+      DoubleRegister result = cgen_->ToDoubleRegister(destination);
+      double v = cgen_->ToDouble(constant_source);
+      __ Move(result, v);
+    } else {
+      DCHECK(destination->IsStackSlot());
+      DCHECK(!in_cycle_);  // Constant moves happen after all cycles are gone.
+      if (cgen_->IsSmi(constant_source)) {
+         __ li(kLithiumScratchReg, Operand(cgen_->ToSmi(constant_source)));
+         __ sd(kLithiumScratchReg, cgen_->ToMemOperand(destination));
+      } else if (cgen_->IsInteger32(constant_source)) {
+        __ li(kLithiumScratchReg, Operand(cgen_->ToInteger32(constant_source)));
+        __ sd(kLithiumScratchReg, cgen_->ToMemOperand(destination));
+      } else {
+        __ li(kLithiumScratchReg, cgen_->ToHandle(constant_source));
+        __ sd(kLithiumScratchReg, cgen_->ToMemOperand(destination));
+      }
+    }
+
+  } else if (source->IsDoubleRegister()) {
+    DoubleRegister source_register = cgen_->ToDoubleRegister(source);
+    if (destination->IsDoubleRegister()) {
+      __ mov_d(cgen_->ToDoubleRegister(destination), source_register);
+    } else {
+      DCHECK(destination->IsDoubleStackSlot());
+      MemOperand destination_operand = cgen_->ToMemOperand(destination);
+      __ sdc1(source_register, destination_operand);
+    }
+
+  } else if (source->IsDoubleStackSlot()) {
+    MemOperand source_operand = cgen_->ToMemOperand(source);
+    if (destination->IsDoubleRegister()) {
+      __ ldc1(cgen_->ToDoubleRegister(destination), source_operand);
+    } else {
+      DCHECK(destination->IsDoubleStackSlot());
+      MemOperand destination_operand = cgen_->ToMemOperand(destination);
+      if (in_cycle_) {
+        // kLithiumScratchDouble was used to break the cycle,
+        // but kLithiumScratchReg is free.
+        MemOperand source_high_operand =
+            cgen_->ToHighMemOperand(source);
+        MemOperand destination_high_operand =
+            cgen_->ToHighMemOperand(destination);
+        __ lw(kLithiumScratchReg, source_operand);
+        __ sw(kLithiumScratchReg, destination_operand);
+        __ lw(kLithiumScratchReg, source_high_operand);
+        __ sw(kLithiumScratchReg, destination_high_operand);
+      } else {
+        __ ldc1(kLithiumScratchDouble, source_operand);
+        __ sdc1(kLithiumScratchDouble, destination_operand);
+      }
+    }
+  } else {
+    UNREACHABLE();
+  }
+
+  moves_[index].Eliminate();
+}
+
+
+#undef __
+
+} }  // namespace v8::internal
diff --git a/src/mips64/lithium-gap-resolver-mips64.h b/src/mips64/lithium-gap-resolver-mips64.h
new file mode 100644
index 0000000..9e6f14e
--- /dev/null
+++ b/src/mips64/lithium-gap-resolver-mips64.h
@@ -0,0 +1,60 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
+#define V8_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
+
+#include "src/v8.h"
+
+#include "src/lithium.h"
+
+namespace v8 {
+namespace internal {
+
+class LCodeGen;
+class LGapResolver;
+
+class LGapResolver FINAL BASE_EMBEDDED {
+ public:
+  explicit LGapResolver(LCodeGen* owner);
+
+  // Resolve a set of parallel moves, emitting assembler instructions.
+  void Resolve(LParallelMove* parallel_move);
+
+ private:
+  // Build the initial list of moves.
+  void BuildInitialMoveList(LParallelMove* parallel_move);
+
+  // Perform the move at the moves_ index in question (possibly requiring
+  // other moves to satisfy dependencies).
+  void PerformMove(int index);
+
+  // If a cycle is found in the series of moves, save the blocking value to
+  // a scratch register.  The cycle must be found by hitting the root of the
+  // depth-first search.
+  void BreakCycle(int index);
+
+  // After a cycle has been resolved, restore the value from the scratch
+  // register to its proper destination.
+  void RestoreValue();
+
+  // Emit a move and remove it from the move graph.
+  void EmitMove(int index);
+
+  // Verify the move list before performing moves.
+  void Verify();
+
+  LCodeGen* cgen_;
+
+  // List of moves not yet resolved.
+  ZoneList<LMoveOperands> moves_;
+
+  int root_index_;
+  bool in_cycle_;
+  LOperand* saved_destination_;
+};
+
+} }  // namespace v8::internal
+
+#endif  // V8_MIPS_LITHIUM_GAP_RESOLVER_MIPS_H_
diff --git a/src/mips64/lithium-mips64.cc b/src/mips64/lithium-mips64.cc
new file mode 100644
index 0000000..4892611
--- /dev/null
+++ b/src/mips64/lithium-mips64.cc
@@ -0,0 +1,2594 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_MIPS64
+
+#include "src/hydrogen-osr.h"
+#include "src/lithium-inl.h"
+#include "src/mips64/lithium-codegen-mips64.h"
+
+namespace v8 {
+namespace internal {
+
+#define DEFINE_COMPILE(type)                            \
+  void L##type::CompileToNative(LCodeGen* generator) {  \
+    generator->Do##type(this);                          \
+  }
+LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
+#undef DEFINE_COMPILE
+
+#ifdef DEBUG
+void LInstruction::VerifyCall() {
+  // Call instructions can use only fixed registers as temporaries and
+  // outputs because all registers are blocked by the calling convention.
+  // Inputs operands must use a fixed register or use-at-start policy or
+  // a non-register policy.
+  DCHECK(Output() == NULL ||
+         LUnallocated::cast(Output())->HasFixedPolicy() ||
+         !LUnallocated::cast(Output())->HasRegisterPolicy());
+  for (UseIterator it(this); !it.Done(); it.Advance()) {
+    LUnallocated* operand = LUnallocated::cast(it.Current());
+    DCHECK(operand->HasFixedPolicy() ||
+           operand->IsUsedAtStart());
+  }
+  for (TempIterator it(this); !it.Done(); it.Advance()) {
+    LUnallocated* operand = LUnallocated::cast(it.Current());
+    DCHECK(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
+  }
+}
+#endif
+
+
+void LInstruction::PrintTo(StringStream* stream) {
+  stream->Add("%s ", this->Mnemonic());
+
+  PrintOutputOperandTo(stream);
+
+  PrintDataTo(stream);
+
+  if (HasEnvironment()) {
+    stream->Add(" ");
+    environment()->PrintTo(stream);
+  }
+
+  if (HasPointerMap()) {
+    stream->Add(" ");
+    pointer_map()->PrintTo(stream);
+  }
+}
+
+
+void LInstruction::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  for (int i = 0; i < InputCount(); i++) {
+    if (i > 0) stream->Add(" ");
+    if (InputAt(i) == NULL) {
+      stream->Add("NULL");
+    } else {
+      InputAt(i)->PrintTo(stream);
+    }
+  }
+}
+
+
+void LInstruction::PrintOutputOperandTo(StringStream* stream) {
+  if (HasResult()) result()->PrintTo(stream);
+}
+
+
+void LLabel::PrintDataTo(StringStream* stream) {
+  LGap::PrintDataTo(stream);
+  LLabel* rep = replacement();
+  if (rep != NULL) {
+    stream->Add(" Dead block replaced with B%d", rep->block_id());
+  }
+}
+
+
+bool LGap::IsRedundant() const {
+  for (int i = 0; i < 4; i++) {
+    if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) {
+      return false;
+    }
+  }
+
+  return true;
+}
+
+
+void LGap::PrintDataTo(StringStream* stream) {
+  for (int i = 0; i < 4; i++) {
+    stream->Add("(");
+    if (parallel_moves_[i] != NULL) {
+      parallel_moves_[i]->PrintDataTo(stream);
+    }
+    stream->Add(") ");
+  }
+}
+
+
+const char* LArithmeticD::Mnemonic() const {
+  switch (op()) {
+    case Token::ADD: return "add-d";
+    case Token::SUB: return "sub-d";
+    case Token::MUL: return "mul-d";
+    case Token::DIV: return "div-d";
+    case Token::MOD: return "mod-d";
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+
+const char* LArithmeticT::Mnemonic() const {
+  switch (op()) {
+    case Token::ADD: return "add-t";
+    case Token::SUB: return "sub-t";
+    case Token::MUL: return "mul-t";
+    case Token::MOD: return "mod-t";
+    case Token::DIV: return "div-t";
+    case Token::BIT_AND: return "bit-and-t";
+    case Token::BIT_OR: return "bit-or-t";
+    case Token::BIT_XOR: return "bit-xor-t";
+    case Token::ROR: return "ror-t";
+    case Token::SHL: return "sll-t";
+    case Token::SAR: return "sra-t";
+    case Token::SHR: return "srl-t";
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+
+bool LGoto::HasInterestingComment(LCodeGen* gen) const {
+  return !gen->IsNextEmittedBlock(block_id());
+}
+
+
+void LGoto::PrintDataTo(StringStream* stream) {
+  stream->Add("B%d", block_id());
+}
+
+
+void LBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
+  value()->PrintTo(stream);
+}
+
+
+LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) {
+  return new(zone()) LDebugBreak();
+}
+
+
+void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if ");
+  left()->PrintTo(stream);
+  stream->Add(" %s ", Token::String(op()));
+  right()->PrintTo(stream);
+  stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if is_object(");
+  value()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if is_string(");
+  value()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if is_smi(");
+  value()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if is_undetectable(");
+  value()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if string_compare(");
+  left()->PrintTo(stream);
+  right()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if has_instance_type(");
+  value()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if has_cached_array_index(");
+  value()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if class_of_test(");
+  value()->PrintTo(stream);
+  stream->Add(", \"%o\") then B%d else B%d",
+              *hydrogen()->class_name(),
+              true_block_id(),
+              false_block_id());
+}
+
+
+void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if typeof ");
+  value()->PrintTo(stream);
+  stream->Add(" == \"%s\" then B%d else B%d",
+              hydrogen()->type_literal()->ToCString().get(),
+              true_block_id(), false_block_id());
+}
+
+
+void LStoreCodeEntry::PrintDataTo(StringStream* stream) {
+  stream->Add(" = ");
+  function()->PrintTo(stream);
+  stream->Add(".code_entry = ");
+  code_object()->PrintTo(stream);
+}
+
+
+void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
+  stream->Add(" = ");
+  base_object()->PrintTo(stream);
+  stream->Add(" + ");
+  offset()->PrintTo(stream);
+}
+
+
+void LCallJSFunction::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  function()->PrintTo(stream);
+  stream->Add("#%d / ", arity());
+}
+
+
+void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
+  for (int i = 0; i < InputCount(); i++) {
+    InputAt(i)->PrintTo(stream);
+    stream->Add(" ");
+  }
+  stream->Add("#%d / ", arity());
+}
+
+
+void LLoadContextSlot::PrintDataTo(StringStream* stream) {
+  context()->PrintTo(stream);
+  stream->Add("[%d]", slot_index());
+}
+
+
+void LStoreContextSlot::PrintDataTo(StringStream* stream) {
+  context()->PrintTo(stream);
+  stream->Add("[%d] <- ", slot_index());
+  value()->PrintTo(stream);
+}
+
+
+void LInvokeFunction::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  function()->PrintTo(stream);
+  stream->Add(" #%d / ", arity());
+}
+
+
+void LCallNew::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  constructor()->PrintTo(stream);
+  stream->Add(" #%d / ", arity());
+}
+
+
+void LCallNewArray::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  constructor()->PrintTo(stream);
+  stream->Add(" #%d / ", arity());
+  ElementsKind kind = hydrogen()->elements_kind();
+  stream->Add(" (%s) ", ElementsKindToString(kind));
+}
+
+
+void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
+  arguments()->PrintTo(stream);
+  stream->Add(" length ");
+  length()->PrintTo(stream);
+  stream->Add(" index ");
+  index()->PrintTo(stream);
+}
+
+
+void LStoreNamedField::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  OStringStream os;
+  os << hydrogen()->access() << " <- ";
+  stream->Add(os.c_str());
+  value()->PrintTo(stream);
+}
+
+
+void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  stream->Add(".");
+  stream->Add(String::cast(*name())->ToCString().get());
+  stream->Add(" <- ");
+  value()->PrintTo(stream);
+}
+
+
+void LLoadKeyed::PrintDataTo(StringStream* stream) {
+  elements()->PrintTo(stream);
+  stream->Add("[");
+  key()->PrintTo(stream);
+  if (hydrogen()->IsDehoisted()) {
+    stream->Add(" + %d]", base_offset());
+  } else {
+    stream->Add("]");
+  }
+}
+
+
+void LStoreKeyed::PrintDataTo(StringStream* stream) {
+  elements()->PrintTo(stream);
+  stream->Add("[");
+  key()->PrintTo(stream);
+  if (hydrogen()->IsDehoisted()) {
+    stream->Add(" + %d] <-", base_offset());
+  } else {
+    stream->Add("] <- ");
+  }
+
+  if (value() == NULL) {
+    DCHECK(hydrogen()->IsConstantHoleStore() &&
+           hydrogen()->value()->representation().IsDouble());
+    stream->Add("<the hole(nan)>");
+  } else {
+    value()->PrintTo(stream);
+  }
+}
+
+
+void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  stream->Add("[");
+  key()->PrintTo(stream);
+  stream->Add("] <- ");
+  value()->PrintTo(stream);
+}
+
+
+void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  stream->Add(" %p -> %p", *original_map(), *transitioned_map());
+}
+
+
+int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) {
+  // Skip a slot if for a double-width slot.
+  if (kind == DOUBLE_REGISTERS) spill_slot_count_++;
+  return spill_slot_count_++;
+}
+
+
+LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind)  {
+  int index = GetNextSpillIndex(kind);
+  if (kind == DOUBLE_REGISTERS) {
+    return LDoubleStackSlot::Create(index, zone());
+  } else {
+    DCHECK(kind == GENERAL_REGISTERS);
+    return LStackSlot::Create(index, zone());
+  }
+}
+
+
+LPlatformChunk* LChunkBuilder::Build() {
+  DCHECK(is_unused());
+  chunk_ = new(zone()) LPlatformChunk(info(), graph());
+  LPhase phase("L_Building chunk", chunk_);
+  status_ = BUILDING;
+
+  // If compiling for OSR, reserve space for the unoptimized frame,
+  // which will be subsumed into this frame.
+  if (graph()->has_osr()) {
+    for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) {
+      chunk_->GetNextSpillIndex(GENERAL_REGISTERS);
+    }
+  }
+
+  const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
+  for (int i = 0; i < blocks->length(); i++) {
+    HBasicBlock* next = NULL;
+    if (i < blocks->length() - 1) next = blocks->at(i + 1);
+    DoBasicBlock(blocks->at(i), next);
+    if (is_aborted()) return NULL;
+  }
+  status_ = DONE;
+  return chunk_;
+}
+
+
+LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
+  return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
+                                  Register::ToAllocationIndex(reg));
+}
+
+
+LUnallocated* LChunkBuilder::ToUnallocated(DoubleRegister reg) {
+  return new(zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
+                                  DoubleRegister::ToAllocationIndex(reg));
+}
+
+
+LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
+  return Use(value, ToUnallocated(fixed_register));
+}
+
+
+LOperand* LChunkBuilder::UseFixedDouble(HValue* value, DoubleRegister reg) {
+  return Use(value, ToUnallocated(reg));
+}
+
+
+LOperand* LChunkBuilder::UseRegister(HValue* value) {
+  return Use(value, new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+
+LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
+  return Use(value,
+             new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
+                                      LUnallocated::USED_AT_START));
+}
+
+
+LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
+  return Use(value, new(zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER));
+}
+
+
+LOperand* LChunkBuilder::Use(HValue* value) {
+  return Use(value, new(zone()) LUnallocated(LUnallocated::NONE));
+}
+
+
+LOperand* LChunkBuilder::UseAtStart(HValue* value) {
+  return Use(value, new(zone()) LUnallocated(LUnallocated::NONE,
+                                     LUnallocated::USED_AT_START));
+}
+
+
+LOperand* LChunkBuilder::UseOrConstant(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : Use(value);
+}
+
+
+LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : UseAtStart(value);
+}
+
+
+LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : UseRegister(value);
+}
+
+
+LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      : UseRegisterAtStart(value);
+}
+
+
+LOperand* LChunkBuilder::UseConstant(HValue* value) {
+  return chunk_->DefineConstantOperand(HConstant::cast(value));
+}
+
+
+LOperand* LChunkBuilder::UseAny(HValue* value) {
+  return value->IsConstant()
+      ? chunk_->DefineConstantOperand(HConstant::cast(value))
+      :  Use(value, new(zone()) LUnallocated(LUnallocated::ANY));
+}
+
+
+LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
+  if (value->EmitAtUses()) {
+    HInstruction* instr = HInstruction::cast(value);
+    VisitInstruction(instr);
+  }
+  operand->set_virtual_register(value->id());
+  return operand;
+}
+
+
+LInstruction* LChunkBuilder::Define(LTemplateResultInstruction<1>* instr,
+                                    LUnallocated* result) {
+  result->set_virtual_register(current_instruction_->id());
+  instr->set_result(result);
+  return instr;
+}
+
+
+LInstruction* LChunkBuilder::DefineAsRegister(
+    LTemplateResultInstruction<1>* instr) {
+  return Define(instr,
+                new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+
+LInstruction* LChunkBuilder::DefineAsSpilled(
+    LTemplateResultInstruction<1>* instr, int index) {
+  return Define(instr,
+                new(zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
+}
+
+
+LInstruction* LChunkBuilder::DefineSameAsFirst(
+    LTemplateResultInstruction<1>* instr) {
+  return Define(instr,
+                new(zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
+}
+
+
+LInstruction* LChunkBuilder::DefineFixed(
+    LTemplateResultInstruction<1>* instr, Register reg) {
+  return Define(instr, ToUnallocated(reg));
+}
+
+
+LInstruction* LChunkBuilder::DefineFixedDouble(
+    LTemplateResultInstruction<1>* instr, DoubleRegister reg) {
+  return Define(instr, ToUnallocated(reg));
+}
+
+
+LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
+  HEnvironment* hydrogen_env = current_block_->last_environment();
+  int argument_index_accumulator = 0;
+  ZoneList<HValue*> objects_to_materialize(0, zone());
+  instr->set_environment(CreateEnvironment(hydrogen_env,
+                                           &argument_index_accumulator,
+                                           &objects_to_materialize));
+  return instr;
+}
+
+
+LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
+                                        HInstruction* hinstr,
+                                        CanDeoptimize can_deoptimize) {
+  info()->MarkAsNonDeferredCalling();
+#ifdef DEBUG
+  instr->VerifyCall();
+#endif
+  instr->MarkAsCall();
+  instr = AssignPointerMap(instr);
+
+  // If instruction does not have side-effects lazy deoptimization
+  // after the call will try to deoptimize to the point before the call.
+  // Thus we still need to attach environment to this call even if
+  // call sequence can not deoptimize eagerly.
+  bool needs_environment =
+      (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
+      !hinstr->HasObservableSideEffects();
+  if (needs_environment && !instr->HasEnvironment()) {
+    instr = AssignEnvironment(instr);
+    // We can't really figure out if the environment is needed or not.
+    instr->environment()->set_has_been_used();
+  }
+
+  return instr;
+}
+
+
+LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
+  DCHECK(!instr->HasPointerMap());
+  instr->set_pointer_map(new(zone()) LPointerMap(zone()));
+  return instr;
+}
+
+
+LUnallocated* LChunkBuilder::TempRegister() {
+  LUnallocated* operand =
+      new(zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
+  int vreg = allocator_->GetVirtualRegister();
+  if (!allocator_->AllocationOk()) {
+    Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
+    vreg = 0;
+  }
+  operand->set_virtual_register(vreg);
+  return operand;
+}
+
+
+LUnallocated* LChunkBuilder::TempDoubleRegister() {
+  LUnallocated* operand =
+      new(zone()) LUnallocated(LUnallocated::MUST_HAVE_DOUBLE_REGISTER);
+  int vreg = allocator_->GetVirtualRegister();
+  if (!allocator_->AllocationOk()) {
+    Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
+    vreg = 0;
+  }
+  operand->set_virtual_register(vreg);
+  return operand;
+}
+
+
+LOperand* LChunkBuilder::FixedTemp(Register reg) {
+  LUnallocated* operand = ToUnallocated(reg);
+  DCHECK(operand->HasFixedPolicy());
+  return operand;
+}
+
+
+LOperand* LChunkBuilder::FixedTemp(DoubleRegister reg) {
+  LUnallocated* operand = ToUnallocated(reg);
+  DCHECK(operand->HasFixedPolicy());
+  return operand;
+}
+
+
+LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
+  return new(zone()) LLabel(instr->block());
+}
+
+
+LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) {
+  return DefineAsRegister(new(zone()) LDummyUse(UseAny(instr->value())));
+}
+
+
+LInstruction* LChunkBuilder::DoEnvironmentMarker(HEnvironmentMarker* instr) {
+  UNREACHABLE();
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
+  return AssignEnvironment(new(zone()) LDeoptimize);
+}
+
+
+LInstruction* LChunkBuilder::DoShift(Token::Value op,
+                                     HBitwiseBinaryOperation* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+    LOperand* left = UseRegisterAtStart(instr->left());
+
+    HValue* right_value = instr->right();
+    LOperand* right = NULL;
+    int constant_value = 0;
+    bool does_deopt = false;
+    if (right_value->IsConstant()) {
+      HConstant* constant = HConstant::cast(right_value);
+      right = chunk_->DefineConstantOperand(constant);
+      constant_value = constant->Integer32Value() & 0x1f;
+      // Left shifts can deoptimize if we shift by > 0 and the result cannot be
+      // truncated to smi.
+      if (instr->representation().IsSmi() && constant_value > 0) {
+        does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi);
+      }
+    } else {
+      right = UseRegisterAtStart(right_value);
+    }
+
+    // Shift operations can only deoptimize if we do a logical shift
+    // by 0 and the result cannot be truncated to int32.
+    if (op == Token::SHR && constant_value == 0) {
+      if (FLAG_opt_safe_uint32_operations) {
+        does_deopt = !instr->CheckFlag(HInstruction::kUint32);
+      } else {
+        does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToInt32);
+      }
+    }
+
+    LInstruction* result =
+        DefineAsRegister(new(zone()) LShiftI(op, left, right, does_deopt));
+    return does_deopt ? AssignEnvironment(result) : result;
+  } else {
+    return DoArithmeticT(op, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
+                                           HArithmeticBinaryOperation* instr) {
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->left()->representation().IsDouble());
+  DCHECK(instr->right()->representation().IsDouble());
+  if (op == Token::MOD) {
+    LOperand* left = UseFixedDouble(instr->left(), f2);
+    LOperand* right = UseFixedDouble(instr->right(), f4);
+    LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+    // We call a C function for double modulo. It can't trigger a GC. We need
+    // to use fixed result register for the call.
+    // TODO(fschneider): Allow any register as input registers.
+    return MarkAsCall(DefineFixedDouble(result, f2), instr);
+  } else {
+    LOperand* left = UseRegisterAtStart(instr->left());
+    LOperand* right = UseRegisterAtStart(instr->right());
+    LArithmeticD* result = new(zone()) LArithmeticD(op, left, right);
+    return DefineAsRegister(result);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
+                                           HBinaryOperation* instr) {
+  HValue* left = instr->left();
+  HValue* right = instr->right();
+  DCHECK(left->representation().IsTagged());
+  DCHECK(right->representation().IsTagged());
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* left_operand = UseFixed(left, a1);
+  LOperand* right_operand = UseFixed(right, a0);
+  LArithmeticT* result =
+      new(zone()) LArithmeticT(op, context, left_operand, right_operand);
+  return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
+  DCHECK(is_building());
+  current_block_ = block;
+  next_block_ = next_block;
+  if (block->IsStartBlock()) {
+    block->UpdateEnvironment(graph_->start_environment());
+    argument_count_ = 0;
+  } else if (block->predecessors()->length() == 1) {
+    // We have a single predecessor => copy environment and outgoing
+    // argument count from the predecessor.
+    DCHECK(block->phis()->length() == 0);
+    HBasicBlock* pred = block->predecessors()->at(0);
+    HEnvironment* last_environment = pred->last_environment();
+    DCHECK(last_environment != NULL);
+    // Only copy the environment, if it is later used again.
+    if (pred->end()->SecondSuccessor() == NULL) {
+      DCHECK(pred->end()->FirstSuccessor() == block);
+    } else {
+      if (pred->end()->FirstSuccessor()->block_id() > block->block_id() ||
+          pred->end()->SecondSuccessor()->block_id() > block->block_id()) {
+        last_environment = last_environment->Copy();
+      }
+    }
+    block->UpdateEnvironment(last_environment);
+    DCHECK(pred->argument_count() >= 0);
+    argument_count_ = pred->argument_count();
+  } else {
+    // We are at a state join => process phis.
+    HBasicBlock* pred = block->predecessors()->at(0);
+    // No need to copy the environment, it cannot be used later.
+    HEnvironment* last_environment = pred->last_environment();
+    for (int i = 0; i < block->phis()->length(); ++i) {
+      HPhi* phi = block->phis()->at(i);
+      if (phi->HasMergedIndex()) {
+        last_environment->SetValueAt(phi->merged_index(), phi);
+      }
+    }
+    for (int i = 0; i < block->deleted_phis()->length(); ++i) {
+      if (block->deleted_phis()->at(i) < last_environment->length()) {
+        last_environment->SetValueAt(block->deleted_phis()->at(i),
+                                     graph_->GetConstantUndefined());
+      }
+    }
+    block->UpdateEnvironment(last_environment);
+    // Pick up the outgoing argument count of one of the predecessors.
+    argument_count_ = pred->argument_count();
+  }
+  HInstruction* current = block->first();
+  int start = chunk_->instructions()->length();
+  while (current != NULL && !is_aborted()) {
+    // Code for constants in registers is generated lazily.
+    if (!current->EmitAtUses()) {
+      VisitInstruction(current);
+    }
+    current = current->next();
+  }
+  int end = chunk_->instructions()->length() - 1;
+  if (end >= start) {
+    block->set_first_instruction_index(start);
+    block->set_last_instruction_index(end);
+  }
+  block->set_argument_count(argument_count_);
+  next_block_ = NULL;
+  current_block_ = NULL;
+}
+
+
+void LChunkBuilder::VisitInstruction(HInstruction* current) {
+  HInstruction* old_current = current_instruction_;
+  current_instruction_ = current;
+
+  LInstruction* instr = NULL;
+  if (current->CanReplaceWithDummyUses()) {
+    if (current->OperandCount() == 0) {
+      instr = DefineAsRegister(new(zone()) LDummy());
+    } else {
+      DCHECK(!current->OperandAt(0)->IsControlInstruction());
+      instr = DefineAsRegister(new(zone())
+          LDummyUse(UseAny(current->OperandAt(0))));
+    }
+    for (int i = 1; i < current->OperandCount(); ++i) {
+      if (current->OperandAt(i)->IsControlInstruction()) continue;
+      LInstruction* dummy =
+          new(zone()) LDummyUse(UseAny(current->OperandAt(i)));
+      dummy->set_hydrogen_value(current);
+      chunk_->AddInstruction(dummy, current_block_);
+    }
+  } else {
+    HBasicBlock* successor;
+    if (current->IsControlInstruction() &&
+        HControlInstruction::cast(current)->KnownSuccessorBlock(&successor) &&
+        successor != NULL) {
+      instr = new(zone()) LGoto(successor);
+    } else {
+      instr = current->CompileToLithium(this);
+    }
+  }
+
+  argument_count_ += current->argument_delta();
+  DCHECK(argument_count_ >= 0);
+
+  if (instr != NULL) {
+    AddInstruction(instr, current);
+  }
+
+  current_instruction_ = old_current;
+}
+
+
+void LChunkBuilder::AddInstruction(LInstruction* instr,
+                                   HInstruction* hydrogen_val) {
+// Associate the hydrogen instruction first, since we may need it for
+  // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below.
+  instr->set_hydrogen_value(hydrogen_val);
+
+#if DEBUG
+  // Make sure that the lithium instruction has either no fixed register
+  // constraints in temps or the result OR no uses that are only used at
+  // start. If this invariant doesn't hold, the register allocator can decide
+  // to insert a split of a range immediately before the instruction due to an
+  // already allocated register needing to be used for the instruction's fixed
+  // register constraint. In this case, The register allocator won't see an
+  // interference between the split child and the use-at-start (it would if
+  // the it was just a plain use), so it is free to move the split child into
+  // the same register that is used for the use-at-start.
+  // See https://code.google.com/p/chromium/issues/detail?id=201590
+  if (!(instr->ClobbersRegisters() &&
+        instr->ClobbersDoubleRegisters(isolate()))) {
+    int fixed = 0;
+    int used_at_start = 0;
+    for (UseIterator it(instr); !it.Done(); it.Advance()) {
+      LUnallocated* operand = LUnallocated::cast(it.Current());
+      if (operand->IsUsedAtStart()) ++used_at_start;
+    }
+    if (instr->Output() != NULL) {
+      if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed;
+    }
+    for (TempIterator it(instr); !it.Done(); it.Advance()) {
+      LUnallocated* operand = LUnallocated::cast(it.Current());
+      if (operand->HasFixedPolicy()) ++fixed;
+    }
+    DCHECK(fixed == 0 || used_at_start == 0);
+  }
+#endif
+
+  if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
+    instr = AssignPointerMap(instr);
+  }
+  if (FLAG_stress_environments && !instr->HasEnvironment()) {
+    instr = AssignEnvironment(instr);
+  }
+  chunk_->AddInstruction(instr, current_block_);
+
+  if (instr->IsCall()) {
+    HValue* hydrogen_value_for_lazy_bailout = hydrogen_val;
+    LInstruction* instruction_needing_environment = NULL;
+    if (hydrogen_val->HasObservableSideEffects()) {
+      HSimulate* sim = HSimulate::cast(hydrogen_val->next());
+      instruction_needing_environment = instr;
+      sim->ReplayEnvironment(current_block_->last_environment());
+      hydrogen_value_for_lazy_bailout = sim;
+    }
+    LInstruction* bailout = AssignEnvironment(new(zone()) LLazyBailout());
+    bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
+    chunk_->AddInstruction(bailout, current_block_);
+    if (instruction_needing_environment != NULL) {
+      // Store the lazy deopt environment with the instruction if needed.
+      // Right now it is only used for LInstanceOfKnownGlobal.
+      instruction_needing_environment->
+          SetDeferredLazyDeoptimizationEnvironment(bailout->environment());
+    }
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
+  return new(zone()) LGoto(instr->FirstSuccessor());
+}
+
+
+LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
+  HValue* value = instr->value();
+  Representation r = value->representation();
+  HType type = value->type();
+  ToBooleanStub::Types expected = instr->expected_input_types();
+  if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+
+  bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
+      type.IsJSArray() || type.IsHeapNumber() || type.IsString();
+  LInstruction* branch = new(zone()) LBranch(UseRegister(value));
+  if (!easy_case &&
+      ((!expected.Contains(ToBooleanStub::SMI) && expected.NeedsMap()) ||
+       !expected.IsGeneric())) {
+    branch = AssignEnvironment(branch);
+  }
+  return branch;
+}
+
+
+LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* temp = TempRegister();
+  return new(zone()) LCmpMapAndBranch(value, temp);
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* length) {
+  info()->MarkAsRequiresFrame();
+  return DefineAsRegister(
+      new(zone()) LArgumentsLength(UseRegister(length->value())));
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
+  info()->MarkAsRequiresFrame();
+  return DefineAsRegister(new(zone()) LArgumentsElements);
+}
+
+
+LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LInstanceOf* result =
+      new(zone()) LInstanceOf(context, UseFixed(instr->left(), a0),
+                              UseFixed(instr->right(), a1));
+  return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
+    HInstanceOfKnownGlobal* instr) {
+  LInstanceOfKnownGlobal* result =
+      new(zone()) LInstanceOfKnownGlobal(
+          UseFixed(instr->context(), cp),
+          UseFixed(instr->left(), a0),
+          FixedTemp(a4));
+  return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
+  LOperand* receiver = UseRegisterAtStart(instr->receiver());
+  LOperand* function = UseRegisterAtStart(instr->function());
+  LWrapReceiver* result = new(zone()) LWrapReceiver(receiver, function);
+  return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
+  LOperand* function = UseFixed(instr->function(), a1);
+  LOperand* receiver = UseFixed(instr->receiver(), a0);
+  LOperand* length = UseFixed(instr->length(), a2);
+  LOperand* elements = UseFixed(instr->elements(), a3);
+  LApplyArguments* result = new(zone()) LApplyArguments(function,
+                                                        receiver,
+                                                        length,
+                                                        elements);
+  return MarkAsCall(DefineFixed(result, v0), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoPushArguments(HPushArguments* instr) {
+  int argc = instr->OperandCount();
+  for (int i = 0; i < argc; ++i) {
+    LOperand* argument = Use(instr->argument(i));
+    AddInstruction(new(zone()) LPushArgument(argument), instr);
+  }
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoStoreCodeEntry(
+    HStoreCodeEntry* store_code_entry) {
+  LOperand* function = UseRegister(store_code_entry->function());
+  LOperand* code_object = UseTempRegister(store_code_entry->code_object());
+  return new(zone()) LStoreCodeEntry(function, code_object);
+}
+
+
+LInstruction* LChunkBuilder::DoInnerAllocatedObject(
+    HInnerAllocatedObject* instr) {
+  LOperand* base_object = UseRegisterAtStart(instr->base_object());
+  LOperand* offset = UseRegisterOrConstantAtStart(instr->offset());
+  return DefineAsRegister(
+      new(zone()) LInnerAllocatedObject(base_object, offset));
+}
+
+
+LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
+  return instr->HasNoUses()
+      ? NULL
+      : DefineAsRegister(new(zone()) LThisFunction);
+}
+
+
+LInstruction* LChunkBuilder::DoContext(HContext* instr) {
+  if (instr->HasNoUses()) return NULL;
+
+  if (info()->IsStub()) {
+    return DefineFixed(new(zone()) LContext, cp);
+  }
+
+  return DefineAsRegister(new(zone()) LContext);
+}
+
+
+LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  return MarkAsCall(new(zone()) LDeclareGlobals(context), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallJSFunction(
+    HCallJSFunction* instr) {
+  LOperand* function = UseFixed(instr->function(), a1);
+
+  LCallJSFunction* result = new(zone()) LCallJSFunction(function);
+
+  return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallWithDescriptor(
+    HCallWithDescriptor* instr) {
+  CallInterfaceDescriptor descriptor = instr->descriptor();
+
+  LOperand* target = UseRegisterOrConstantAtStart(instr->target());
+  ZoneList<LOperand*> ops(instr->OperandCount(), zone());
+  ops.Add(target, zone());
+  for (int i = 1; i < instr->OperandCount(); i++) {
+    LOperand* op =
+        UseFixed(instr->OperandAt(i), descriptor.GetParameterRegister(i - 1));
+     ops.Add(op, zone());
+  }
+
+  LCallWithDescriptor* result = new(zone()) LCallWithDescriptor(
+      descriptor, ops, zone());
+  return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTailCallThroughMegamorphicCache(
+    HTailCallThroughMegamorphicCache* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* receiver_register =
+      UseFixed(instr->receiver(), LoadDescriptor::ReceiverRegister());
+  LOperand* name_register =
+      UseFixed(instr->name(), LoadDescriptor::NameRegister());
+  // Not marked as call. It can't deoptimize, and it never returns.
+  return new (zone()) LTailCallThroughMegamorphicCache(
+      context, receiver_register, name_register);
+}
+
+
+LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* function = UseFixed(instr->function(), a1);
+  LInvokeFunction* result = new(zone()) LInvokeFunction(context, function);
+  return MarkAsCall(DefineFixed(result, v0), instr, CANNOT_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
+  switch (instr->op()) {
+    case kMathFloor:
+      return DoMathFloor(instr);
+    case kMathRound:
+      return DoMathRound(instr);
+    case kMathFround:
+      return DoMathFround(instr);
+    case kMathAbs:
+      return DoMathAbs(instr);
+    case kMathLog:
+      return DoMathLog(instr);
+    case kMathExp:
+      return DoMathExp(instr);
+    case kMathSqrt:
+      return DoMathSqrt(instr);
+    case kMathPowHalf:
+      return DoMathPowHalf(instr);
+    case kMathClz32:
+      return DoMathClz32(instr);
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->value()->representation().IsDouble());
+  LOperand* input = UseFixedDouble(instr->value(), f4);
+  return MarkAsCall(DefineFixedDouble(new(zone()) LMathLog(input), f4), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) {
+  LOperand* input = UseRegisterAtStart(instr->value());
+  LMathClz32* result = new(zone()) LMathClz32(input);
+  return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->value()->representation().IsDouble());
+  LOperand* input = UseRegister(instr->value());
+  LOperand* temp1 = TempRegister();
+  LOperand* temp2 = TempRegister();
+  LOperand* double_temp = TempDoubleRegister();
+  LMathExp* result = new(zone()) LMathExp(input, double_temp, temp1, temp2);
+  return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
+  // Input cannot be the same as the result, see LCodeGen::DoMathPowHalf.
+  LOperand* input = UseFixedDouble(instr->value(), f8);
+  LOperand* temp = TempDoubleRegister();
+  LMathPowHalf* result = new(zone()) LMathPowHalf(input, temp);
+  return DefineFixedDouble(result, f4);
+}
+
+
+LInstruction* LChunkBuilder::DoMathFround(HUnaryMathOperation* instr) {
+  LOperand* input = UseRegister(instr->value());
+  LMathFround* result = new (zone()) LMathFround(input);
+  return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
+  Representation r = instr->value()->representation();
+  LOperand* context = (r.IsDouble() || r.IsSmiOrInteger32())
+      ? NULL
+      : UseFixed(instr->context(), cp);
+  LOperand* input = UseRegister(instr->value());
+  LInstruction* result =
+      DefineAsRegister(new(zone()) LMathAbs(context, input));
+  if (!r.IsDouble() && !r.IsSmiOrInteger32()) result = AssignPointerMap(result);
+  if (!r.IsDouble()) result = AssignEnvironment(result);
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoMathFloor(HUnaryMathOperation* instr) {
+  LOperand* input = UseRegister(instr->value());
+  LOperand* temp = TempRegister();
+  LMathFloor* result = new(zone()) LMathFloor(input, temp);
+  return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+}
+
+
+LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) {
+  LOperand* input = UseRegister(instr->value());
+  LMathSqrt* result = new(zone()) LMathSqrt(input);
+  return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
+  LOperand* input = UseRegister(instr->value());
+  LOperand* temp = TempDoubleRegister();
+  LMathRound* result = new(zone()) LMathRound(input, temp);
+  return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* constructor = UseFixed(instr->constructor(), a1);
+  LCallNew* result = new(zone()) LCallNew(context, constructor);
+  return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* constructor = UseFixed(instr->constructor(), a1);
+  LCallNewArray* result = new(zone()) LCallNewArray(context, constructor);
+  return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* function = UseFixed(instr->function(), a1);
+  LCallFunction* call = new(zone()) LCallFunction(context, function);
+  return MarkAsCall(DefineFixed(call, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  return MarkAsCall(DefineFixed(new(zone()) LCallRuntime(context), v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoRor(HRor* instr) {
+  return DoShift(Token::ROR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoShr(HShr* instr) {
+  return DoShift(Token::SHR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoSar(HSar* instr) {
+  return DoShift(Token::SAR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoShl(HShl* instr) {
+  return DoShift(Token::SHL, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+    DCHECK(instr->CheckFlag(HValue::kTruncatingToInt32));
+
+    LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+    LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
+    return DefineAsRegister(new(zone()) LBitI(left, right));
+  } else {
+    return DoArithmeticT(instr->op(), instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LInstruction* result = DefineAsRegister(new(zone()) LDivByPowerOf2I(
+          dividend, divisor));
+  if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+      (instr->CheckFlag(HValue::kCanOverflow) && divisor == -1) ||
+      (!instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+      divisor != 1 && divisor != -1)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
+  DCHECK(instr->representation().IsInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LInstruction* result = DefineAsRegister(new(zone()) LDivByConstI(
+          dividend, divisor));
+  if (divisor == 0 ||
+      (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+      !instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivI(HDiv* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  LOperand* divisor = UseRegister(instr->right());
+  LOperand* temp = instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)
+    ? NULL : TempRegister();
+  LInstruction* result =
+      DefineAsRegister(new(zone()) LDivI(dividend, divisor, temp));
+  if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+      instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+      (instr->CheckFlag(HValue::kCanOverflow) &&
+       !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32)) ||
+      (!instr->IsMathFloorOfDiv() &&
+       !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    if (instr->RightIsPowerOf2()) {
+      return DoDivByPowerOf2I(instr);
+    } else if (instr->right()->IsConstant()) {
+      return DoDivByConstI(instr);
+    } else {
+      return DoDivI(instr);
+    }
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::DIV, instr);
+  } else {
+    return DoArithmeticT(Token::DIV, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) {
+  LOperand* dividend = UseRegisterAtStart(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LInstruction* result = DefineAsRegister(new(zone()) LFlooringDivByPowerOf2I(
+          dividend, divisor));
+  if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+      (instr->CheckFlag(HValue::kLeftCanBeMinInt) && divisor == -1)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
+  DCHECK(instr->representation().IsInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LOperand* temp =
+      ((divisor > 0 && !instr->CheckFlag(HValue::kLeftCanBeNegative)) ||
+       (divisor < 0 && !instr->CheckFlag(HValue::kLeftCanBePositive))) ?
+      NULL : TempRegister();
+  LInstruction* result = DefineAsRegister(
+      new(zone()) LFlooringDivByConstI(dividend, divisor, temp));
+  if (divisor == 0 ||
+      (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  LOperand* divisor = UseRegister(instr->right());
+  LFlooringDivI* div = new(zone()) LFlooringDivI(dividend, divisor);
+  return AssignEnvironment(DefineAsRegister(div));
+}
+
+
+LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
+  if (instr->RightIsPowerOf2()) {
+    return DoFlooringDivByPowerOf2I(instr);
+  } else if (instr->right()->IsConstant()) {
+    return DoFlooringDivByConstI(instr);
+  } else {
+    return DoFlooringDivI(instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegisterAtStart(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LInstruction* result = DefineSameAsFirst(new(zone()) LModByPowerOf2I(
+          dividend, divisor));
+  if (instr->CheckFlag(HValue::kLeftCanBeNegative) &&
+      instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LInstruction* result = DefineAsRegister(new(zone()) LModByConstI(
+          dividend, divisor));
+  if (divisor == 0 || instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModI(HMod* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  LOperand* divisor = UseRegister(instr->right());
+  LInstruction* result = DefineAsRegister(new(zone()) LModI(
+      dividend, divisor));
+  if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+      instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoMod(HMod* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    return instr->RightIsPowerOf2() ? DoModByPowerOf2I(instr) : DoModI(instr);
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::MOD, instr);
+  } else {
+    return DoArithmeticT(Token::MOD, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoMul(HMul* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+    HValue* left = instr->BetterLeftOperand();
+    HValue* right = instr->BetterRightOperand();
+    LOperand* left_op;
+    LOperand* right_op;
+    bool can_overflow = instr->CheckFlag(HValue::kCanOverflow);
+    bool bailout_on_minus_zero = instr->CheckFlag(HValue::kBailoutOnMinusZero);
+
+    if (right->IsConstant()) {
+      HConstant* constant = HConstant::cast(right);
+      int32_t constant_value = constant->Integer32Value();
+      // Constants -1, 0 and 1 can be optimized if the result can overflow.
+      // For other constants, it can be optimized only without overflow.
+      if (!can_overflow || ((constant_value >= -1) && (constant_value <= 1))) {
+        left_op = UseRegisterAtStart(left);
+        right_op = UseConstant(right);
+      } else {
+        if (bailout_on_minus_zero) {
+          left_op = UseRegister(left);
+        } else {
+          left_op = UseRegisterAtStart(left);
+        }
+        right_op = UseRegister(right);
+      }
+    } else {
+      if (bailout_on_minus_zero) {
+        left_op = UseRegister(left);
+      } else {
+        left_op = UseRegisterAtStart(left);
+      }
+      right_op = UseRegister(right);
+    }
+    LMulI* mul = new(zone()) LMulI(left_op, right_op);
+    if (can_overflow || bailout_on_minus_zero) {
+      AssignEnvironment(mul);
+    }
+    return DefineAsRegister(mul);
+
+  } else if (instr->representation().IsDouble()) {
+    if (kArchVariant == kMips64r2) {
+      if (instr->HasOneUse() && instr->uses().value()->IsAdd()) {
+        HAdd* add = HAdd::cast(instr->uses().value());
+        if (instr == add->left()) {
+          // This mul is the lhs of an add. The add and mul will be folded
+          // into a multiply-add.
+          return NULL;
+        }
+        if (instr == add->right() && !add->left()->IsMul()) {
+          // This mul is the rhs of an add, where the lhs is not another mul.
+          // The add and mul will be folded into a multiply-add.
+          return NULL;
+        }
+      }
+    }
+    return DoArithmeticD(Token::MUL, instr);
+  } else {
+    return DoArithmeticT(Token::MUL, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoSub(HSub* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+    LOperand* left = UseRegisterAtStart(instr->left());
+    LOperand* right = UseOrConstantAtStart(instr->right());
+    LSubI* sub = new(zone()) LSubI(left, right);
+    LInstruction* result = DefineAsRegister(sub);
+    if (instr->CheckFlag(HValue::kCanOverflow)) {
+      result = AssignEnvironment(result);
+    }
+    return result;
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::SUB, instr);
+  } else {
+    return DoArithmeticT(Token::SUB, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoMultiplyAdd(HMul* mul, HValue* addend) {
+  LOperand* multiplier_op = UseRegisterAtStart(mul->left());
+  LOperand* multiplicand_op = UseRegisterAtStart(mul->right());
+  LOperand* addend_op = UseRegisterAtStart(addend);
+  return DefineSameAsFirst(new(zone()) LMultiplyAddD(addend_op, multiplier_op,
+                                                     multiplicand_op));
+}
+
+
+LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+    LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+    LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
+    LAddI* add = new(zone()) LAddI(left, right);
+    LInstruction* result = DefineAsRegister(add);
+    if (instr->CheckFlag(HValue::kCanOverflow)) {
+      result = AssignEnvironment(result);
+    }
+    return result;
+  } else if (instr->representation().IsExternal()) {
+    DCHECK(instr->left()->representation().IsExternal());
+    DCHECK(instr->right()->representation().IsInteger32());
+    DCHECK(!instr->CheckFlag(HValue::kCanOverflow));
+    LOperand* left = UseRegisterAtStart(instr->left());
+    LOperand* right = UseOrConstantAtStart(instr->right());
+    LAddI* add = new(zone()) LAddI(left, right);
+    LInstruction* result = DefineAsRegister(add);
+    return result;
+  } else if (instr->representation().IsDouble()) {
+    if (kArchVariant == kMips64r2) {
+      if (instr->left()->IsMul())
+        return DoMultiplyAdd(HMul::cast(instr->left()), instr->right());
+
+      if (instr->right()->IsMul()) {
+        DCHECK(!instr->left()->IsMul());
+        return DoMultiplyAdd(HMul::cast(instr->right()), instr->left());
+      }
+    }
+    return DoArithmeticD(Token::ADD, instr);
+  } else {
+    return DoArithmeticT(Token::ADD, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
+  LOperand* left = NULL;
+  LOperand* right = NULL;
+  if (instr->representation().IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+    left = UseRegisterAtStart(instr->BetterLeftOperand());
+    right = UseOrConstantAtStart(instr->BetterRightOperand());
+  } else {
+    DCHECK(instr->representation().IsDouble());
+    DCHECK(instr->left()->representation().IsDouble());
+    DCHECK(instr->right()->representation().IsDouble());
+    left = UseRegisterAtStart(instr->left());
+    right = UseRegisterAtStart(instr->right());
+  }
+  return DefineAsRegister(new(zone()) LMathMinMax(left, right));
+}
+
+
+LInstruction* LChunkBuilder::DoPower(HPower* instr) {
+  DCHECK(instr->representation().IsDouble());
+  // We call a C function for double power. It can't trigger a GC.
+  // We need to use fixed result register for the call.
+  Representation exponent_type = instr->right()->representation();
+  DCHECK(instr->left()->representation().IsDouble());
+  LOperand* left = UseFixedDouble(instr->left(), f2);
+  LOperand* right =
+      exponent_type.IsDouble()
+          ? UseFixedDouble(instr->right(), f4)
+          : UseFixed(instr->right(), MathPowTaggedDescriptor::exponent());
+  LPower* result = new(zone()) LPower(left, right);
+  return MarkAsCall(DefineFixedDouble(result, f0),
+                    instr,
+                    CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
+  DCHECK(instr->left()->representation().IsTagged());
+  DCHECK(instr->right()->representation().IsTagged());
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* left = UseFixed(instr->left(), a1);
+  LOperand* right = UseFixed(instr->right(), a0);
+  LCmpT* result = new(zone()) LCmpT(context, left, right);
+  return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
+    HCompareNumericAndBranch* instr) {
+  Representation r = instr->representation();
+  if (r.IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(r));
+    DCHECK(instr->right()->representation().Equals(r));
+    LOperand* left = UseRegisterOrConstantAtStart(instr->left());
+    LOperand* right = UseRegisterOrConstantAtStart(instr->right());
+    return new(zone()) LCompareNumericAndBranch(left, right);
+  } else {
+    DCHECK(r.IsDouble());
+    DCHECK(instr->left()->representation().IsDouble());
+    DCHECK(instr->right()->representation().IsDouble());
+    LOperand* left = UseRegisterAtStart(instr->left());
+    LOperand* right = UseRegisterAtStart(instr->right());
+    return new(zone()) LCompareNumericAndBranch(left, right);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
+    HCompareObjectEqAndBranch* instr) {
+  LOperand* left = UseRegisterAtStart(instr->left());
+  LOperand* right = UseRegisterAtStart(instr->right());
+  return new(zone()) LCmpObjectEqAndBranch(left, right);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
+    HCompareHoleAndBranch* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return new(zone()) LCmpHoleAndBranch(value);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
+    HCompareMinusZeroAndBranch* instr) {
+  LOperand* value = UseRegister(instr->value());
+  LOperand* scratch = TempRegister();
+  return new(zone()) LCompareMinusZeroAndBranch(value, scratch);
+}
+
+
+LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  LOperand* temp = TempRegister();
+  return new(zone()) LIsObjectAndBranch(UseRegisterAtStart(instr->value()),
+                                        temp);
+}
+
+
+LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  LOperand* temp = TempRegister();
+  return new(zone()) LIsStringAndBranch(UseRegisterAtStart(instr->value()),
+                                        temp);
+}
+
+
+LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  return new(zone()) LIsSmiAndBranch(Use(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
+    HIsUndetectableAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  return new(zone()) LIsUndetectableAndBranch(
+      UseRegisterAtStart(instr->value()), TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoStringCompareAndBranch(
+    HStringCompareAndBranch* instr) {
+  DCHECK(instr->left()->representation().IsTagged());
+  DCHECK(instr->right()->representation().IsTagged());
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* left = UseFixed(instr->left(), a1);
+  LOperand* right = UseFixed(instr->right(), a0);
+  LStringCompareAndBranch* result =
+      new(zone()) LStringCompareAndBranch(context, left, right);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
+    HHasInstanceTypeAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return new(zone()) LHasInstanceTypeAndBranch(value);
+}
+
+
+LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
+    HGetCachedArrayIndex* instr)  {
+  DCHECK(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+
+  return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value));
+}
+
+
+LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
+    HHasCachedArrayIndexAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  return new(zone()) LHasCachedArrayIndexAndBranch(
+      UseRegisterAtStart(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
+    HClassOfTestAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  return new(zone()) LClassOfTestAndBranch(UseRegister(instr->value()),
+                                           TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
+  LOperand* map = UseRegisterAtStart(instr->value());
+  return DefineAsRegister(new(zone()) LMapEnumLength(map));
+}
+
+
+LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
+  LOperand* object = UseFixed(instr->value(), a0);
+  LDateField* result =
+      new(zone()) LDateField(object, FixedTemp(a1), instr->index());
+  return MarkAsCall(DefineFixed(result, v0), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
+  LOperand* string = UseRegisterAtStart(instr->string());
+  LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+  return DefineAsRegister(new(zone()) LSeqStringGetChar(string, index));
+}
+
+
+LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
+  LOperand* string = UseRegisterAtStart(instr->string());
+  LOperand* index = FLAG_debug_code
+      ? UseRegisterAtStart(instr->index())
+      : UseRegisterOrConstantAtStart(instr->index());
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* context = FLAG_debug_code ? UseFixed(instr->context(), cp) : NULL;
+  return new(zone()) LSeqStringSetChar(context, string, index, value);
+}
+
+
+LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
+  if (!FLAG_debug_code && instr->skip_check()) return NULL;
+  LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+  LOperand* length = !index->IsConstantOperand()
+  ? UseRegisterOrConstantAtStart(instr->length())
+  : UseRegisterAtStart(instr->length());
+  LInstruction* result = new(zone()) LBoundsCheck(index, length);
+  if (!FLAG_debug_code || !instr->skip_check()) {
+    result = AssignEnvironment(result);
+  }
+return result;
+}
+
+
+LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
+    HBoundsCheckBaseIndexInformation* instr) {
+  UNREACHABLE();
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
+  // The control instruction marking the end of a block that completed
+  // abruptly (e.g., threw an exception).  There is nothing specific to do.
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) {
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
+  // All HForceRepresentation instructions should be eliminated in the
+  // representation change phase of Hydrogen.
+  UNREACHABLE();
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoChange(HChange* instr) {
+  Representation from = instr->from();
+  Representation to = instr->to();
+  HValue* val = instr->value();
+  if (from.IsSmi()) {
+    if (to.IsTagged()) {
+      LOperand* value = UseRegister(val);
+      return DefineSameAsFirst(new(zone()) LDummyUse(value));
+    }
+    from = Representation::Tagged();
+  }
+  if (from.IsTagged()) {
+    if (to.IsDouble()) {
+      LOperand* value = UseRegister(val);
+      LInstruction* result = DefineAsRegister(new(zone()) LNumberUntagD(value));
+      if (!val->representation().IsSmi()) result = AssignEnvironment(result);
+      return result;
+    } else if (to.IsSmi()) {
+      LOperand* value = UseRegister(val);
+      if (val->type().IsSmi()) {
+        return DefineSameAsFirst(new(zone()) LDummyUse(value));
+      }
+      return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value)));
+    } else {
+      DCHECK(to.IsInteger32());
+      if (val->type().IsSmi() || val->representation().IsSmi()) {
+        LOperand* value = UseRegisterAtStart(val);
+        return DefineAsRegister(new(zone()) LSmiUntag(value, false));
+      } else {
+        LOperand* value = UseRegister(val);
+        LOperand* temp1 = TempRegister();
+        LOperand* temp2 = TempDoubleRegister();
+        LInstruction* result =
+            DefineSameAsFirst(new(zone()) LTaggedToI(value, temp1, temp2));
+        if (!val->representation().IsSmi()) result = AssignEnvironment(result);
+        return result;
+      }
+    }
+  } else if (from.IsDouble()) {
+    if (to.IsTagged()) {
+      info()->MarkAsDeferredCalling();
+      LOperand* value = UseRegister(val);
+      LOperand* temp1 = TempRegister();
+      LOperand* temp2 = TempRegister();
+
+      LUnallocated* result_temp = TempRegister();
+      LNumberTagD* result = new(zone()) LNumberTagD(value, temp1, temp2);
+      return AssignPointerMap(Define(result, result_temp));
+    } else if (to.IsSmi()) {
+      LOperand* value = UseRegister(val);
+      return AssignEnvironment(
+          DefineAsRegister(new(zone()) LDoubleToSmi(value)));
+    } else {
+      DCHECK(to.IsInteger32());
+      LOperand* value = UseRegister(val);
+      LInstruction* result = DefineAsRegister(new(zone()) LDoubleToI(value));
+      if (!instr->CanTruncateToInt32()) result = AssignEnvironment(result);
+      return result;
+    }
+  } else if (from.IsInteger32()) {
+    info()->MarkAsDeferredCalling();
+    if (to.IsTagged()) {
+      if (val->CheckFlag(HInstruction::kUint32)) {
+        LOperand* value = UseRegisterAtStart(val);
+        LOperand* temp1 = TempRegister();
+        LOperand* temp2 = TempRegister();
+        LNumberTagU* result = new(zone()) LNumberTagU(value, temp1, temp2);
+        return AssignPointerMap(DefineAsRegister(result));
+      } else {
+        STATIC_ASSERT((kMinInt == Smi::kMinValue) &&
+                      (kMaxInt == Smi::kMaxValue));
+        LOperand* value = UseRegisterAtStart(val);
+        return DefineAsRegister(new(zone()) LSmiTag(value));
+      }
+    } else if (to.IsSmi()) {
+      LOperand* value = UseRegister(val);
+      LInstruction* result = DefineAsRegister(new(zone()) LSmiTag(value));
+      if (instr->CheckFlag(HValue::kCanOverflow)) {
+        result = AssignEnvironment(result);
+      }
+      return result;
+    } else {
+      DCHECK(to.IsDouble());
+      if (val->CheckFlag(HInstruction::kUint32)) {
+        return DefineAsRegister(new(zone()) LUint32ToDouble(UseRegister(val)));
+      } else {
+        return DefineAsRegister(new(zone()) LInteger32ToDouble(Use(val)));
+      }
+    }
+  }
+  UNREACHABLE();
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LInstruction* result = new(zone()) LCheckNonSmi(value);
+  if (!instr->value()->type().IsHeapObject()) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return AssignEnvironment(new(zone()) LCheckSmi(value));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LInstruction* result = new(zone()) LCheckInstanceType(value);
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return AssignEnvironment(new(zone()) LCheckValue(value));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
+  if (instr->IsStabilityCheck()) return new(zone()) LCheckMaps;
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LInstruction* result = AssignEnvironment(new(zone()) LCheckMaps(value));
+  if (instr->HasMigrationTarget()) {
+    info()->MarkAsDeferredCalling();
+    result = AssignPointerMap(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
+  HValue* value = instr->value();
+  Representation input_rep = value->representation();
+  LOperand* reg = UseRegister(value);
+  if (input_rep.IsDouble()) {
+    // Revisit this decision, here and 8 lines below.
+    return DefineAsRegister(new(zone()) LClampDToUint8(reg,
+        TempDoubleRegister()));
+  } else if (input_rep.IsInteger32()) {
+    return DefineAsRegister(new(zone()) LClampIToUint8(reg));
+  } else {
+    DCHECK(input_rep.IsSmiOrTagged());
+    LClampTToUint8* result =
+        new(zone()) LClampTToUint8(reg, TempDoubleRegister());
+    return AssignEnvironment(DefineAsRegister(result));
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) {
+  HValue* value = instr->value();
+  DCHECK(value->representation().IsDouble());
+  return DefineAsRegister(new(zone()) LDoubleBits(UseRegister(value)));
+}
+
+
+LInstruction* LChunkBuilder::DoConstructDouble(HConstructDouble* instr) {
+  LOperand* lo = UseRegister(instr->lo());
+  LOperand* hi = UseRegister(instr->hi());
+  return DefineAsRegister(new(zone()) LConstructDouble(hi, lo));
+}
+
+
+LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
+  LOperand* context = info()->IsStub()
+      ? UseFixed(instr->context(), cp)
+      : NULL;
+  LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
+  return new(zone()) LReturn(UseFixed(instr->value(), v0), context,
+                             parameter_count);
+}
+
+
+LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
+  Representation r = instr->representation();
+  if (r.IsSmi()) {
+    return DefineAsRegister(new(zone()) LConstantS);
+  } else if (r.IsInteger32()) {
+    return DefineAsRegister(new(zone()) LConstantI);
+  } else if (r.IsDouble()) {
+    return DefineAsRegister(new(zone()) LConstantD);
+  } else if (r.IsExternal()) {
+    return DefineAsRegister(new(zone()) LConstantE);
+  } else if (r.IsTagged()) {
+    return DefineAsRegister(new(zone()) LConstantT);
+  } else {
+    UNREACHABLE();
+    return NULL;
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
+  LLoadGlobalCell* result = new(zone()) LLoadGlobalCell;
+  return instr->RequiresHoleCheck()
+      ? AssignEnvironment(DefineAsRegister(result))
+      : DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* global_object =
+      UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
+  LOperand* vector = NULL;
+  if (FLAG_vector_ics) {
+    vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
+  }
+  LLoadGlobalGeneric* result =
+      new(zone()) LLoadGlobalGeneric(context, global_object, vector);
+  return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
+  LOperand* value = UseRegister(instr->value());
+  // Use a temp to check the value in the cell in the case where we perform
+  // a hole check.
+  return instr->RequiresHoleCheck()
+      ? AssignEnvironment(new(zone()) LStoreGlobalCell(value, TempRegister()))
+      : new(zone()) LStoreGlobalCell(value, NULL);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
+  LOperand* context = UseRegisterAtStart(instr->value());
+  LInstruction* result =
+      DefineAsRegister(new(zone()) LLoadContextSlot(context));
+  if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
+  LOperand* context;
+  LOperand* value;
+  if (instr->NeedsWriteBarrier()) {
+    context = UseTempRegister(instr->context());
+    value = UseTempRegister(instr->value());
+  } else {
+    context = UseRegister(instr->context());
+    value = UseRegister(instr->value());
+  }
+  LInstruction* result = new(zone()) LStoreContextSlot(context, value);
+  if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
+  LOperand* obj = UseRegisterAtStart(instr->object());
+  return DefineAsRegister(new(zone()) LLoadNamedField(obj));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* object =
+      UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
+  LOperand* vector = NULL;
+  if (FLAG_vector_ics) {
+    vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
+  }
+
+  LInstruction* result =
+      DefineFixed(new(zone()) LLoadNamedGeneric(context, object, vector), v0);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
+    HLoadFunctionPrototype* instr) {
+  return AssignEnvironment(DefineAsRegister(
+      new(zone()) LLoadFunctionPrototype(UseRegister(instr->function()))));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
+  return DefineAsRegister(new(zone()) LLoadRoot);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
+  DCHECK(instr->key()->representation().IsSmiOrInteger32());
+  ElementsKind elements_kind = instr->elements_kind();
+  LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+  LInstruction* result = NULL;
+
+  if (!instr->is_typed_elements()) {
+    LOperand* obj = NULL;
+    if (instr->representation().IsDouble()) {
+      obj = UseRegister(instr->elements());
+    } else {
+      DCHECK(instr->representation().IsSmiOrTagged() ||
+             instr->representation().IsInteger32());
+      obj = UseRegisterAtStart(instr->elements());
+    }
+    result = DefineAsRegister(new(zone()) LLoadKeyed(obj, key));
+  } else {
+    DCHECK(
+        (instr->representation().IsInteger32() &&
+         !IsDoubleOrFloatElementsKind(elements_kind)) ||
+        (instr->representation().IsDouble() &&
+         IsDoubleOrFloatElementsKind(elements_kind)));
+    LOperand* backing_store = UseRegister(instr->elements());
+    result = DefineAsRegister(new(zone()) LLoadKeyed(backing_store, key));
+  }
+
+  if ((instr->is_external() || instr->is_fixed_typed_array()) ?
+      // see LCodeGen::DoLoadKeyedExternalArray
+      ((elements_kind == EXTERNAL_UINT32_ELEMENTS ||
+        elements_kind == UINT32_ELEMENTS) &&
+       !instr->CheckFlag(HInstruction::kUint32)) :
+      // see LCodeGen::DoLoadKeyedFixedDoubleArray and
+      // LCodeGen::DoLoadKeyedFixedArray
+      instr->RequiresHoleCheck()) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* object =
+      UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
+  LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
+  LOperand* vector = NULL;
+  if (FLAG_vector_ics) {
+    vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
+  }
+
+  LInstruction* result =
+      DefineFixed(new(zone()) LLoadKeyedGeneric(context, object, key, vector),
+                  v0);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
+  if (!instr->is_typed_elements()) {
+    DCHECK(instr->elements()->representation().IsTagged());
+    bool needs_write_barrier = instr->NeedsWriteBarrier();
+    LOperand* object = NULL;
+    LOperand* val = NULL;
+    LOperand* key = NULL;
+
+    if (instr->value()->representation().IsDouble()) {
+      object = UseRegisterAtStart(instr->elements());
+      key = UseRegisterOrConstantAtStart(instr->key());
+      val = UseRegister(instr->value());
+    } else {
+      DCHECK(instr->value()->representation().IsSmiOrTagged() ||
+             instr->value()->representation().IsInteger32());
+      if (needs_write_barrier) {
+        object = UseTempRegister(instr->elements());
+        val = UseTempRegister(instr->value());
+        key = UseTempRegister(instr->key());
+      } else {
+        object = UseRegisterAtStart(instr->elements());
+        val = UseRegisterAtStart(instr->value());
+        key = UseRegisterOrConstantAtStart(instr->key());
+      }
+    }
+
+    return new(zone()) LStoreKeyed(object, key, val);
+  }
+
+  DCHECK(
+      (instr->value()->representation().IsInteger32() &&
+       !IsDoubleOrFloatElementsKind(instr->elements_kind())) ||
+      (instr->value()->representation().IsDouble() &&
+       IsDoubleOrFloatElementsKind(instr->elements_kind())));
+  DCHECK((instr->is_fixed_typed_array() &&
+          instr->elements()->representation().IsTagged()) ||
+         (instr->is_external() &&
+          instr->elements()->representation().IsExternal()));
+  LOperand* val = UseRegister(instr->value());
+  LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+  LOperand* backing_store = UseRegister(instr->elements());
+  return new(zone()) LStoreKeyed(backing_store, key, val);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* obj =
+      UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
+  LOperand* key = UseFixed(instr->key(), StoreDescriptor::NameRegister());
+  LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
+
+  DCHECK(instr->object()->representation().IsTagged());
+  DCHECK(instr->key()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
+
+  return MarkAsCall(
+      new(zone()) LStoreKeyedGeneric(context, obj, key, val), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTransitionElementsKind(
+    HTransitionElementsKind* instr) {
+  if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
+    LOperand* object = UseRegister(instr->object());
+    LOperand* new_map_reg = TempRegister();
+    LTransitionElementsKind* result =
+        new(zone()) LTransitionElementsKind(object, NULL, new_map_reg);
+    return result;
+  } else {
+    LOperand* object = UseFixed(instr->object(), a0);
+    LOperand* context = UseFixed(instr->context(), cp);
+    LTransitionElementsKind* result =
+        new(zone()) LTransitionElementsKind(object, context, NULL);
+    return MarkAsCall(result, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoTrapAllocationMemento(
+    HTrapAllocationMemento* instr) {
+  LOperand* object = UseRegister(instr->object());
+  LOperand* temp = TempRegister();
+  LTrapAllocationMemento* result =
+      new(zone()) LTrapAllocationMemento(object, temp);
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
+  bool is_in_object = instr->access().IsInobject();
+  bool needs_write_barrier = instr->NeedsWriteBarrier();
+  bool needs_write_barrier_for_map = instr->has_transition() &&
+      instr->NeedsWriteBarrierForMap();
+
+  LOperand* obj;
+  if (needs_write_barrier) {
+    obj = is_in_object
+        ? UseRegister(instr->object())
+        : UseTempRegister(instr->object());
+  } else {
+    obj = needs_write_barrier_for_map
+        ? UseRegister(instr->object())
+        : UseRegisterAtStart(instr->object());
+  }
+
+  LOperand* val;
+  if (needs_write_barrier) {
+    val = UseTempRegister(instr->value());
+  } else if (instr->field_representation().IsDouble()) {
+    val = UseRegisterAtStart(instr->value());
+  } else {
+    val = UseRegister(instr->value());
+  }
+
+  // We need a temporary register for write barrier of the map field.
+  LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL;
+
+  return new(zone()) LStoreNamedField(obj, val, temp);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* obj =
+      UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
+  LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
+
+  LInstruction* result = new(zone()) LStoreNamedGeneric(context, obj, val);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* left = UseFixed(instr->left(), a1);
+  LOperand* right = UseFixed(instr->right(), a0);
+  return MarkAsCall(
+      DefineFixed(new(zone()) LStringAdd(context, left, right), v0),
+      instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
+  LOperand* string = UseTempRegister(instr->string());
+  LOperand* index = UseTempRegister(instr->index());
+  LOperand* context = UseAny(instr->context());
+  LStringCharCodeAt* result =
+      new(zone()) LStringCharCodeAt(context, string, index);
+  return AssignPointerMap(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
+  LOperand* char_code = UseRegister(instr->value());
+  LOperand* context = UseAny(instr->context());
+  LStringCharFromCode* result =
+      new(zone()) LStringCharFromCode(context, char_code);
+  return AssignPointerMap(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
+  info()->MarkAsDeferredCalling();
+  LOperand* context = UseAny(instr->context());
+  LOperand* size = UseRegisterOrConstant(instr->size());
+  LOperand* temp1 = TempRegister();
+  LOperand* temp2 = TempRegister();
+  LAllocate* result = new(zone()) LAllocate(context, size, temp1, temp2);
+  return AssignPointerMap(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  return MarkAsCall(
+      DefineFixed(new(zone()) LRegExpLiteral(context), v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  return MarkAsCall(
+      DefineFixed(new(zone()) LFunctionLiteral(context), v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
+  DCHECK(argument_count_ == 0);
+  allocator_->MarkAsOsrEntry();
+  current_block_->last_environment()->set_ast_id(instr->ast_id());
+  return AssignEnvironment(new(zone()) LOsrEntry);
+}
+
+
+LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
+  LParameter* result = new(zone()) LParameter;
+  if (instr->kind() == HParameter::STACK_PARAMETER) {
+    int spill_index = chunk()->GetParameterStackSlot(instr->index());
+    return DefineAsSpilled(result, spill_index);
+  } else {
+    DCHECK(info()->IsStub());
+    CallInterfaceDescriptor descriptor =
+        info()->code_stub()->GetCallInterfaceDescriptor();
+    int index = static_cast<int>(instr->index());
+    Register reg = descriptor.GetEnvironmentParameterRegister(index);
+    return DefineFixed(result, reg);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
+  // Use an index that corresponds to the location in the unoptimized frame,
+  // which the optimized frame will subsume.
+  int env_index = instr->index();
+  int spill_index = 0;
+  if (instr->environment()->is_parameter_index(env_index)) {
+    spill_index = chunk()->GetParameterStackSlot(env_index);
+  } else {
+    spill_index = env_index - instr->environment()->first_local_index();
+    if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
+      Retry(kTooManySpillSlotsNeededForOSR);
+      spill_index = 0;
+    }
+  }
+  return DefineAsSpilled(new(zone()) LUnknownOSRValue, spill_index);
+}
+
+
+LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  return MarkAsCall(DefineFixed(new(zone()) LCallStub(context), v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
+  // There are no real uses of the arguments object.
+  // arguments.length and element access are supported directly on
+  // stack arguments, and any real arguments object use causes a bailout.
+  // So this value is never used.
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) {
+  instr->ReplayEnvironment(current_block_->last_environment());
+
+  // There are no real uses of a captured object.
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
+  info()->MarkAsRequiresFrame();
+  LOperand* args = UseRegister(instr->arguments());
+  LOperand* length = UseRegisterOrConstantAtStart(instr->length());
+  LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+  return DefineAsRegister(new(zone()) LAccessArgumentsAt(args, length, index));
+}
+
+
+LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
+  LOperand* object = UseFixed(instr->value(), a0);
+  LToFastProperties* result = new(zone()) LToFastProperties(object);
+  return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LTypeof* result = new(zone()) LTypeof(context, UseFixed(instr->value(), a0));
+  return MarkAsCall(DefineFixed(result, v0), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
+  return new(zone()) LTypeofIsAndBranch(UseTempRegister(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
+    HIsConstructCallAndBranch* instr) {
+  return new(zone()) LIsConstructCallAndBranch(TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
+  instr->ReplayEnvironment(current_block_->last_environment());
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
+  if (instr->is_function_entry()) {
+    LOperand* context = UseFixed(instr->context(), cp);
+    return MarkAsCall(new(zone()) LStackCheck(context), instr);
+  } else {
+    DCHECK(instr->is_backwards_branch());
+    LOperand* context = UseAny(instr->context());
+    return AssignEnvironment(
+        AssignPointerMap(new(zone()) LStackCheck(context)));
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
+  HEnvironment* outer = current_block_->last_environment();
+  outer->set_ast_id(instr->ReturnId());
+  HConstant* undefined = graph()->GetConstantUndefined();
+  HEnvironment* inner = outer->CopyForInlining(instr->closure(),
+                                               instr->arguments_count(),
+                                               instr->function(),
+                                               undefined,
+                                               instr->inlining_kind());
+  // Only replay binding of arguments object if it wasn't removed from graph.
+  if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
+    inner->Bind(instr->arguments_var(), instr->arguments_object());
+  }
+  inner->BindContext(instr->closure_context());
+  inner->set_entry(instr);
+  current_block_->UpdateEnvironment(inner);
+  chunk_->AddInlinedClosure(instr->closure());
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
+  LInstruction* pop = NULL;
+
+  HEnvironment* env = current_block_->last_environment();
+
+  if (env->entry()->arguments_pushed()) {
+    int argument_count = env->arguments_environment()->parameter_count();
+    pop = new(zone()) LDrop(argument_count);
+    DCHECK(instr->argument_delta() == -argument_count);
+  }
+
+  HEnvironment* outer = current_block_->last_environment()->
+      DiscardInlined(false);
+  current_block_->UpdateEnvironment(outer);
+
+  return pop;
+}
+
+
+LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* object = UseFixed(instr->enumerable(), a0);
+  LForInPrepareMap* result = new(zone()) LForInPrepareMap(context, object);
+  return MarkAsCall(DefineFixed(result, v0), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
+  LOperand* map = UseRegister(instr->map());
+  return AssignEnvironment(DefineAsRegister(new(zone()) LForInCacheArray(map)));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* map = UseRegisterAtStart(instr->map());
+  return AssignEnvironment(new(zone()) LCheckMapValue(value, map));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
+  LOperand* object = UseRegister(instr->object());
+  LOperand* index = UseTempRegister(instr->index());
+  LLoadFieldByIndex* load = new(zone()) LLoadFieldByIndex(object, index);
+  LInstruction* result = DefineSameAsFirst(load);
+  return AssignPointerMap(result);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreFrameContext(HStoreFrameContext* instr) {
+  LOperand* context = UseRegisterAtStart(instr->context());
+  return new(zone()) LStoreFrameContext(context);
+}
+
+
+LInstruction* LChunkBuilder::DoAllocateBlockContext(
+    HAllocateBlockContext* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* function = UseRegisterAtStart(instr->function());
+  LAllocateBlockContext* result =
+      new(zone()) LAllocateBlockContext(context, function);
+  return MarkAsCall(DefineFixed(result, cp), instr);
+}
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_MIPS64
diff --git a/src/mips64/lithium-mips64.h b/src/mips64/lithium-mips64.h
new file mode 100644
index 0000000..c6257a4
--- /dev/null
+++ b/src/mips64/lithium-mips64.h
@@ -0,0 +1,2841 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MIPS_LITHIUM_MIPS_H_
+#define V8_MIPS_LITHIUM_MIPS_H_
+
+#include "src/hydrogen.h"
+#include "src/lithium.h"
+#include "src/lithium-allocator.h"
+#include "src/safepoint-table.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class LCodeGen;
+
+#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
+  V(AccessArgumentsAt)                       \
+  V(AddI)                                    \
+  V(Allocate)                                \
+  V(AllocateBlockContext)                    \
+  V(ApplyArguments)                          \
+  V(ArgumentsElements)                       \
+  V(ArgumentsLength)                         \
+  V(ArithmeticD)                             \
+  V(ArithmeticT)                             \
+  V(BitI)                                    \
+  V(BoundsCheck)                             \
+  V(Branch)                                  \
+  V(CallJSFunction)                          \
+  V(CallWithDescriptor)                      \
+  V(CallFunction)                            \
+  V(CallNew)                                 \
+  V(CallNewArray)                            \
+  V(CallRuntime)                             \
+  V(CallStub)                                \
+  V(CheckInstanceType)                       \
+  V(CheckMaps)                               \
+  V(CheckMapValue)                           \
+  V(CheckNonSmi)                             \
+  V(CheckSmi)                                \
+  V(CheckValue)                              \
+  V(ClampDToUint8)                           \
+  V(ClampIToUint8)                           \
+  V(ClampTToUint8)                           \
+  V(ClassOfTestAndBranch)                    \
+  V(CompareMinusZeroAndBranch)               \
+  V(CompareNumericAndBranch)                 \
+  V(CmpObjectEqAndBranch)                    \
+  V(CmpHoleAndBranch)                        \
+  V(CmpMapAndBranch)                         \
+  V(CmpT)                                    \
+  V(ConstantD)                               \
+  V(ConstantE)                               \
+  V(ConstantI)                               \
+  V(ConstantS)                               \
+  V(ConstantT)                               \
+  V(ConstructDouble)                         \
+  V(Context)                                 \
+  V(DateField)                               \
+  V(DebugBreak)                              \
+  V(DeclareGlobals)                          \
+  V(Deoptimize)                              \
+  V(DivByConstI)                             \
+  V(DivByPowerOf2I)                          \
+  V(DivI)                                    \
+  V(DoubleToI)                               \
+  V(DoubleBits)                              \
+  V(DoubleToSmi)                             \
+  V(Drop)                                    \
+  V(Dummy)                                   \
+  V(DummyUse)                                \
+  V(FlooringDivByConstI)                     \
+  V(FlooringDivByPowerOf2I)                  \
+  V(FlooringDivI)                            \
+  V(ForInCacheArray)                         \
+  V(ForInPrepareMap)                         \
+  V(FunctionLiteral)                         \
+  V(GetCachedArrayIndex)                     \
+  V(Goto)                                    \
+  V(HasCachedArrayIndexAndBranch)            \
+  V(HasInstanceTypeAndBranch)                \
+  V(InnerAllocatedObject)                    \
+  V(InstanceOf)                              \
+  V(InstanceOfKnownGlobal)                   \
+  V(InstructionGap)                          \
+  V(Integer32ToDouble)                       \
+  V(InvokeFunction)                          \
+  V(IsConstructCallAndBranch)                \
+  V(IsObjectAndBranch)                       \
+  V(IsStringAndBranch)                       \
+  V(IsSmiAndBranch)                          \
+  V(IsUndetectableAndBranch)                 \
+  V(Label)                                   \
+  V(LazyBailout)                             \
+  V(LoadContextSlot)                         \
+  V(LoadRoot)                                \
+  V(LoadFieldByIndex)                        \
+  V(LoadFunctionPrototype)                   \
+  V(LoadGlobalCell)                          \
+  V(LoadGlobalGeneric)                       \
+  V(LoadKeyed)                               \
+  V(LoadKeyedGeneric)                        \
+  V(LoadNamedField)                          \
+  V(LoadNamedGeneric)                        \
+  V(MapEnumLength)                           \
+  V(MathAbs)                                 \
+  V(MathExp)                                 \
+  V(MathClz32)                               \
+  V(MathFloor)                               \
+  V(MathFround)                              \
+  V(MathLog)                                 \
+  V(MathMinMax)                              \
+  V(MathPowHalf)                             \
+  V(MathRound)                               \
+  V(MathSqrt)                                \
+  V(ModByConstI)                             \
+  V(ModByPowerOf2I)                          \
+  V(ModI)                                    \
+  V(MulI)                                    \
+  V(MultiplyAddD)                            \
+  V(NumberTagD)                              \
+  V(NumberTagU)                              \
+  V(NumberUntagD)                            \
+  V(OsrEntry)                                \
+  V(Parameter)                               \
+  V(Power)                                   \
+  V(PushArgument)                            \
+  V(RegExpLiteral)                           \
+  V(Return)                                  \
+  V(SeqStringGetChar)                        \
+  V(SeqStringSetChar)                        \
+  V(ShiftI)                                  \
+  V(SmiTag)                                  \
+  V(SmiUntag)                                \
+  V(StackCheck)                              \
+  V(StoreCodeEntry)                          \
+  V(StoreContextSlot)                        \
+  V(StoreFrameContext)                       \
+  V(StoreGlobalCell)                         \
+  V(StoreKeyed)                              \
+  V(StoreKeyedGeneric)                       \
+  V(StoreNamedField)                         \
+  V(StoreNamedGeneric)                       \
+  V(StringAdd)                               \
+  V(StringCharCodeAt)                        \
+  V(StringCharFromCode)                      \
+  V(StringCompareAndBranch)                  \
+  V(SubI)                                    \
+  V(TaggedToI)                               \
+  V(TailCallThroughMegamorphicCache)         \
+  V(ThisFunction)                            \
+  V(ToFastProperties)                        \
+  V(TransitionElementsKind)                  \
+  V(TrapAllocationMemento)                   \
+  V(Typeof)                                  \
+  V(TypeofIsAndBranch)                       \
+  V(Uint32ToDouble)                          \
+  V(UnknownOSRValue)                         \
+  V(WrapReceiver)
+
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)                        \
+  virtual Opcode opcode() const FINAL OVERRIDE {                      \
+    return LInstruction::k##type;                                           \
+  }                                                                         \
+  virtual void CompileToNative(LCodeGen* generator) FINAL OVERRIDE;   \
+  virtual const char* Mnemonic() const FINAL OVERRIDE {               \
+    return mnemonic;                                                        \
+  }                                                                         \
+  static L##type* cast(LInstruction* instr) {                               \
+    DCHECK(instr->Is##type());                                              \
+    return reinterpret_cast<L##type*>(instr);                               \
+  }
+
+
+#define DECLARE_HYDROGEN_ACCESSOR(type)     \
+  H##type* hydrogen() const {               \
+    return H##type::cast(hydrogen_value()); \
+  }
+
+
+class LInstruction : public ZoneObject {
+ public:
+  LInstruction()
+      : environment_(NULL),
+        hydrogen_value_(NULL),
+        bit_field_(IsCallBits::encode(false)) {
+  }
+
+  virtual ~LInstruction() {}
+
+  virtual void CompileToNative(LCodeGen* generator) = 0;
+  virtual const char* Mnemonic() const = 0;
+  virtual void PrintTo(StringStream* stream);
+  virtual void PrintDataTo(StringStream* stream);
+  virtual void PrintOutputOperandTo(StringStream* stream);
+
+  enum Opcode {
+    // Declare a unique enum value for each instruction.
+#define DECLARE_OPCODE(type) k##type,
+    LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE)
+    kNumberOfInstructions
+#undef DECLARE_OPCODE
+  };
+
+  virtual Opcode opcode() const = 0;
+
+  // Declare non-virtual type testers for all leaf IR classes.
+#define DECLARE_PREDICATE(type) \
+  bool Is##type() const { return opcode() == k##type; }
+  LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
+#undef DECLARE_PREDICATE
+
+  // Declare virtual predicates for instructions that don't have
+  // an opcode.
+  virtual bool IsGap() const { return false; }
+
+  virtual bool IsControl() const { return false; }
+
+  // Try deleting this instruction if possible.
+  virtual bool TryDelete() { return false; }
+
+  void set_environment(LEnvironment* env) { environment_ = env; }
+  LEnvironment* environment() const { return environment_; }
+  bool HasEnvironment() const { return environment_ != NULL; }
+
+  void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
+  LPointerMap* pointer_map() const { return pointer_map_.get(); }
+  bool HasPointerMap() const { return pointer_map_.is_set(); }
+
+  void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
+  HValue* hydrogen_value() const { return hydrogen_value_; }
+
+  virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) { }
+
+  void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
+  bool IsCall() const { return IsCallBits::decode(bit_field_); }
+
+  // Interface to the register allocator and iterators.
+  bool ClobbersTemps() const { return IsCall(); }
+  bool ClobbersRegisters() const { return IsCall(); }
+  virtual bool ClobbersDoubleRegisters(Isolate* isolate) const {
+    return IsCall();
+  }
+
+  // Interface to the register allocator and iterators.
+  bool IsMarkedAsCall() const { return IsCall(); }
+
+  virtual bool HasResult() const = 0;
+  virtual LOperand* result() const = 0;
+
+  LOperand* FirstInput() { return InputAt(0); }
+  LOperand* Output() { return HasResult() ? result() : NULL; }
+
+  virtual bool HasInterestingComment(LCodeGen* gen) const { return true; }
+
+#ifdef DEBUG
+  void VerifyCall();
+#endif
+
+  virtual int InputCount() = 0;
+  virtual LOperand* InputAt(int i) = 0;
+
+ private:
+  // Iterator interface.
+  friend class InputIterator;
+
+  friend class TempIterator;
+  virtual int TempCount() = 0;
+  virtual LOperand* TempAt(int i) = 0;
+
+  class IsCallBits: public BitField<bool, 0, 1> {};
+
+  LEnvironment* environment_;
+  SetOncePointer<LPointerMap> pointer_map_;
+  HValue* hydrogen_value_;
+  int bit_field_;
+};
+
+
+// R = number of result operands (0 or 1).
+template<int R>
+class LTemplateResultInstruction : public LInstruction {
+ public:
+  // Allow 0 or 1 output operands.
+  STATIC_ASSERT(R == 0 || R == 1);
+  virtual bool HasResult() const FINAL OVERRIDE {
+    return R != 0 && result() != NULL;
+  }
+  void set_result(LOperand* operand) { results_[0] = operand; }
+  LOperand* result() const { return results_[0]; }
+
+ protected:
+  EmbeddedContainer<LOperand*, R> results_;
+};
+
+
+// R = number of result operands (0 or 1).
+// I = number of input operands.
+// T = number of temporary operands.
+template<int R, int I, int T>
+class LTemplateInstruction : public LTemplateResultInstruction<R> {
+ protected:
+  EmbeddedContainer<LOperand*, I> inputs_;
+  EmbeddedContainer<LOperand*, T> temps_;
+
+ private:
+  // Iterator support.
+  virtual int InputCount() FINAL OVERRIDE { return I; }
+  virtual LOperand* InputAt(int i) FINAL OVERRIDE { return inputs_[i]; }
+
+  virtual int TempCount() FINAL OVERRIDE { return T; }
+  virtual LOperand* TempAt(int i) FINAL OVERRIDE { return temps_[i]; }
+};
+
+
+class LGap : public LTemplateInstruction<0, 0, 0> {
+ public:
+  explicit LGap(HBasicBlock* block)
+      : block_(block) {
+    parallel_moves_[BEFORE] = NULL;
+    parallel_moves_[START] = NULL;
+    parallel_moves_[END] = NULL;
+    parallel_moves_[AFTER] = NULL;
+  }
+
+  // Can't use the DECLARE-macro here because of sub-classes.
+  virtual bool IsGap() const FINAL OVERRIDE { return true; }
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  static LGap* cast(LInstruction* instr) {
+    DCHECK(instr->IsGap());
+    return reinterpret_cast<LGap*>(instr);
+  }
+
+  bool IsRedundant() const;
+
+  HBasicBlock* block() const { return block_; }
+
+  enum InnerPosition {
+    BEFORE,
+    START,
+    END,
+    AFTER,
+    FIRST_INNER_POSITION = BEFORE,
+    LAST_INNER_POSITION = AFTER
+  };
+
+  LParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone)  {
+    if (parallel_moves_[pos] == NULL) {
+      parallel_moves_[pos] = new(zone) LParallelMove(zone);
+    }
+    return parallel_moves_[pos];
+  }
+
+  LParallelMove* GetParallelMove(InnerPosition pos)  {
+    return parallel_moves_[pos];
+  }
+
+ private:
+  LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
+  HBasicBlock* block_;
+};
+
+
+class LInstructionGap FINAL : public LGap {
+ public:
+  explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
+
+  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
+    return !IsRedundant();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
+};
+
+
+class LGoto FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+  explicit LGoto(HBasicBlock* block) : block_(block) { }
+
+  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE;
+  DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  virtual bool IsControl() const OVERRIDE { return true; }
+
+  int block_id() const { return block_->block_id(); }
+
+ private:
+  HBasicBlock* block_;
+};
+
+
+class LLazyBailout FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+  LLazyBailout() : gap_instructions_size_(0) { }
+
+  DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
+
+  void set_gap_instructions_size(int gap_instructions_size) {
+    gap_instructions_size_ = gap_instructions_size;
+  }
+  int gap_instructions_size() { return gap_instructions_size_; }
+
+ private:
+  int gap_instructions_size_;
+};
+
+
+class LDummy FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+  LDummy() {}
+  DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy")
+};
+
+
+class LDummyUse FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LDummyUse(LOperand* value) {
+    inputs_[0] = value;
+  }
+  DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use")
+};
+
+
+class LDeoptimize FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+  virtual bool IsControl() const OVERRIDE { return true; }
+  DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
+  DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
+};
+
+
+class LLabel FINAL : public LGap {
+ public:
+  explicit LLabel(HBasicBlock* block)
+      : LGap(block), replacement_(NULL) { }
+
+  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
+    return false;
+  }
+  DECLARE_CONCRETE_INSTRUCTION(Label, "label")
+
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+
+  int block_id() const { return block()->block_id(); }
+  bool is_loop_header() const { return block()->IsLoopHeader(); }
+  bool is_osr_entry() const { return block()->is_osr_entry(); }
+  Label* label() { return &label_; }
+  LLabel* replacement() const { return replacement_; }
+  void set_replacement(LLabel* label) { replacement_ = label; }
+  bool HasReplacement() const { return replacement_ != NULL; }
+
+ private:
+  Label label_;
+  LLabel* replacement_;
+};
+
+
+class LParameter FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
+    return false;
+  }
+  DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
+};
+
+
+class LCallStub FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LCallStub(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
+  DECLARE_HYDROGEN_ACCESSOR(CallStub)
+};
+
+
+class LTailCallThroughMegamorphicCache FINAL
+    : public LTemplateInstruction<0, 3, 0> {
+ public:
+  explicit LTailCallThroughMegamorphicCache(LOperand* context,
+                                            LOperand* receiver,
+                                            LOperand* name) {
+    inputs_[0] = context;
+    inputs_[1] = receiver;
+    inputs_[2] = name;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* receiver() { return inputs_[1]; }
+  LOperand* name() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache,
+                               "tail-call-through-megamorphic-cache")
+  DECLARE_HYDROGEN_ACCESSOR(TailCallThroughMegamorphicCache)
+};
+
+
+class LUnknownOSRValue FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
+    return false;
+  }
+  DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
+};
+
+
+template<int I, int T>
+class LControlInstruction : public LTemplateInstruction<0, I, T> {
+ public:
+  LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
+
+  virtual bool IsControl() const FINAL OVERRIDE { return true; }
+
+  int SuccessorCount() { return hydrogen()->SuccessorCount(); }
+  HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
+
+  int TrueDestination(LChunk* chunk) {
+    return chunk->LookupDestination(true_block_id());
+  }
+  int FalseDestination(LChunk* chunk) {
+    return chunk->LookupDestination(false_block_id());
+  }
+
+  Label* TrueLabel(LChunk* chunk) {
+    if (true_label_ == NULL) {
+      true_label_ = chunk->GetAssemblyLabel(TrueDestination(chunk));
+    }
+    return true_label_;
+  }
+  Label* FalseLabel(LChunk* chunk) {
+    if (false_label_ == NULL) {
+      false_label_ = chunk->GetAssemblyLabel(FalseDestination(chunk));
+    }
+    return false_label_;
+  }
+
+ protected:
+  int true_block_id() { return SuccessorAt(0)->block_id(); }
+  int false_block_id() { return SuccessorAt(1)->block_id(); }
+
+ private:
+  HControlInstruction* hydrogen() {
+    return HControlInstruction::cast(this->hydrogen_value());
+  }
+
+  Label* false_label_;
+  Label* true_label_;
+};
+
+
+class LWrapReceiver FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LWrapReceiver(LOperand* receiver, LOperand* function) {
+    inputs_[0] = receiver;
+    inputs_[1] = function;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
+  DECLARE_HYDROGEN_ACCESSOR(WrapReceiver)
+
+  LOperand* receiver() { return inputs_[0]; }
+  LOperand* function() { return inputs_[1]; }
+};
+
+
+class LApplyArguments FINAL : public LTemplateInstruction<1, 4, 0> {
+ public:
+  LApplyArguments(LOperand* function,
+                  LOperand* receiver,
+                  LOperand* length,
+                  LOperand* elements) {
+    inputs_[0] = function;
+    inputs_[1] = receiver;
+    inputs_[2] = length;
+    inputs_[3] = elements;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
+
+  LOperand* function() { return inputs_[0]; }
+  LOperand* receiver() { return inputs_[1]; }
+  LOperand* length() { return inputs_[2]; }
+  LOperand* elements() { return inputs_[3]; }
+};
+
+
+class LAccessArgumentsAt FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
+    inputs_[0] = arguments;
+    inputs_[1] = length;
+    inputs_[2] = index;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
+
+  LOperand* arguments() { return inputs_[0]; }
+  LOperand* length() { return inputs_[1]; }
+  LOperand* index() { return inputs_[2]; }
+
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+};
+
+
+class LArgumentsLength FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LArgumentsLength(LOperand* elements) {
+    inputs_[0] = elements;
+  }
+
+  LOperand* elements() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
+};
+
+
+class LArgumentsElements FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
+  DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
+};
+
+
+class LModByPowerOf2I FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  LModByPowerOf2I(LOperand* dividend, int32_t divisor) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ModByPowerOf2I, "mod-by-power-of-2-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+  int32_t divisor_;
+};
+
+
+class LModByConstI FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  LModByConstI(LOperand* dividend, int32_t divisor) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ModByConstI, "mod-by-const-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+  int32_t divisor_;
+};
+
+
+class LModI FINAL : public LTemplateInstruction<1, 2, 3> {
+ public:
+  LModI(LOperand* left,
+        LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mod)
+};
+
+
+class LDivByPowerOf2I FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  LDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DivByPowerOf2I, "div-by-power-of-2-i")
+  DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+  int32_t divisor_;
+};
+
+
+class LDivByConstI FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  LDivByConstI(LOperand* dividend, int32_t divisor) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DivByConstI, "div-by-const-i")
+  DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+  int32_t divisor_;
+};
+
+
+class LDivI FINAL : public LTemplateInstruction<1, 2, 1> {
+ public:
+  LDivI(LOperand* dividend, LOperand* divisor,  LOperand* temp) {
+    inputs_[0] = dividend;
+    inputs_[1] = divisor;
+    temps_[0] = temp;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  LOperand* divisor() { return inputs_[1]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
+  DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
+};
+
+
+class LFlooringDivByPowerOf2I FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() { return divisor_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(FlooringDivByPowerOf2I,
+                               "flooring-div-by-power-of-2-i")
+  DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+  int32_t divisor_;
+};
+
+
+class LFlooringDivByConstI FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+  LFlooringDivByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+    temps_[0] = temp;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(FlooringDivByConstI, "flooring-div-by-const-i")
+  DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+  int32_t divisor_;
+};
+
+
+class LFlooringDivI FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LFlooringDivI(LOperand* dividend, LOperand* divisor) {
+    inputs_[0] = dividend;
+    inputs_[1] = divisor;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  LOperand* divisor() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(FlooringDivI, "flooring-div-i")
+  DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+};
+
+
+class LMulI FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LMulI(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mul)
+};
+
+
+// Instruction for computing multiplier * multiplicand + addend.
+class LMultiplyAddD FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LMultiplyAddD(LOperand* addend, LOperand* multiplier,
+                LOperand* multiplicand) {
+    inputs_[0] = addend;
+    inputs_[1] = multiplier;
+    inputs_[2] = multiplicand;
+  }
+
+  LOperand* addend() { return inputs_[0]; }
+  LOperand* multiplier() { return inputs_[1]; }
+  LOperand* multiplicand() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MultiplyAddD, "multiply-add-d")
+};
+
+
+class LDebugBreak FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
+};
+
+
+class LCompareNumericAndBranch FINAL : public LControlInstruction<2, 0> {
+ public:
+  LCompareNumericAndBranch(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch,
+                               "compare-numeric-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareNumericAndBranch)
+
+  Token::Value op() const { return hydrogen()->token(); }
+  bool is_double() const {
+    return hydrogen()->representation().IsDouble();
+  }
+
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+};
+
+
+class LMathFloor FINAL : public LTemplateInstruction<1, 1, 1> {
+ public:
+  LMathFloor(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathFloor, "math-floor")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+
+class LMathRound FINAL : public LTemplateInstruction<1, 1, 1> {
+ public:
+  LMathRound(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathRound, "math-round")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+
+class LMathFround FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathFround(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathFround, "math-fround")
+};
+
+
+class LMathAbs FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LMathAbs(LOperand* context, LOperand* value) {
+    inputs_[1] = context;
+    inputs_[0] = value;
+  }
+
+  LOperand* context() { return inputs_[1]; }
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathAbs, "math-abs")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+
+class LMathLog FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathLog(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathLog, "math-log")
+};
+
+
+class LMathClz32 FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathClz32(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
+};
+
+
+class LMathExp FINAL : public LTemplateInstruction<1, 1, 3> {
+ public:
+  LMathExp(LOperand* value,
+           LOperand* double_temp,
+           LOperand* temp1,
+           LOperand* temp2) {
+    inputs_[0] = value;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+    temps_[2] = double_temp;
+    ExternalReference::InitializeMathExpData();
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+  LOperand* double_temp() { return temps_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
+};
+
+
+class LMathSqrt FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathSqrt(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathSqrt, "math-sqrt")
+};
+
+
+class LMathPowHalf FINAL : public LTemplateInstruction<1, 1, 1> {
+ public:
+  LMathPowHalf(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half")
+};
+
+
+class LCmpObjectEqAndBranch FINAL : public LControlInstruction<2, 0> {
+ public:
+  LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch, "cmp-object-eq-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareObjectEqAndBranch)
+};
+
+
+class LCmpHoleAndBranch FINAL : public LControlInstruction<1, 0> {
+ public:
+  explicit LCmpHoleAndBranch(LOperand* object) {
+    inputs_[0] = object;
+  }
+
+  LOperand* object() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranch, "cmp-hole-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch)
+};
+
+
+class LCompareMinusZeroAndBranch FINAL : public LControlInstruction<1, 1> {
+ public:
+  LCompareMinusZeroAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CompareMinusZeroAndBranch,
+                               "cmp-minus-zero-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareMinusZeroAndBranch)
+};
+
+
+class LIsObjectAndBranch FINAL : public LControlInstruction<1, 1> {
+ public:
+  LIsObjectAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LIsStringAndBranch FINAL : public LControlInstruction<1, 1> {
+ public:
+  LIsStringAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
+
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+};
+
+
+class LIsSmiAndBranch FINAL : public LControlInstruction<1, 0> {
+ public:
+  explicit LIsSmiAndBranch(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
+
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+};
+
+
+class LIsUndetectableAndBranch FINAL : public LControlInstruction<1, 1> {
+ public:
+  explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
+                               "is-undetectable-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
+
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+};
+
+
+class LStringCompareAndBranch FINAL : public LControlInstruction<3, 0> {
+ public:
+  LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) {
+    inputs_[0] = context;
+    inputs_[1] = left;
+    inputs_[2] = right;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* left() { return inputs_[1]; }
+  LOperand* right() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
+                               "string-compare-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
+
+  Token::Value op() const { return hydrogen()->token(); }
+
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+};
+
+
+class LHasInstanceTypeAndBranch FINAL : public LControlInstruction<1, 0> {
+ public:
+  explicit LHasInstanceTypeAndBranch(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
+                               "has-instance-type-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
+
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+};
+
+
+class LGetCachedArrayIndex FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LGetCachedArrayIndex(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
+  DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
+};
+
+
+class LHasCachedArrayIndexAndBranch FINAL
+    : public LControlInstruction<1, 0> {
+ public:
+  explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
+                               "has-cached-array-index-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
+
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+};
+
+
+class LClassOfTestAndBranch FINAL : public LControlInstruction<1, 1> {
+ public:
+  LClassOfTestAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch,
+                               "class-of-test-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
+
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+};
+
+
+class LCmpT FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LCmpT(LOperand* context, LOperand* left, LOperand* right) {
+    inputs_[0] = context;
+    inputs_[1] = left;
+    inputs_[2] = right;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* left() { return inputs_[1]; }
+  LOperand* right() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
+  DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
+
+  Token::Value op() const { return hydrogen()->token(); }
+};
+
+
+class LInstanceOf FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
+    inputs_[0] = context;
+    inputs_[1] = left;
+    inputs_[2] = right;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* left() { return inputs_[1]; }
+  LOperand* right() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
+};
+
+
+class LInstanceOfKnownGlobal FINAL : public LTemplateInstruction<1, 2, 1> {
+ public:
+  LInstanceOfKnownGlobal(LOperand* context, LOperand* value, LOperand* temp) {
+    inputs_[0] = context;
+    inputs_[1] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* value() { return inputs_[1]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
+                               "instance-of-known-global")
+  DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal)
+
+  Handle<JSFunction> function() const { return hydrogen()->function(); }
+  LEnvironment* GetDeferredLazyDeoptimizationEnvironment() {
+    return lazy_deopt_env_;
+  }
+  virtual void SetDeferredLazyDeoptimizationEnvironment(
+      LEnvironment* env) OVERRIDE {
+    lazy_deopt_env_ = env;
+  }
+
+ private:
+  LEnvironment* lazy_deopt_env_;
+};
+
+
+class LBoundsCheck FINAL : public LTemplateInstruction<0, 2, 0> {
+ public:
+  LBoundsCheck(LOperand* index, LOperand* length) {
+    inputs_[0] = index;
+    inputs_[1] = length;
+  }
+
+  LOperand* index() { return inputs_[0]; }
+  LOperand* length() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
+  DECLARE_HYDROGEN_ACCESSOR(BoundsCheck)
+};
+
+
+class LBitI FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LBitI(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  Token::Value op() const { return hydrogen()->op(); }
+
+  DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
+  DECLARE_HYDROGEN_ACCESSOR(Bitwise)
+};
+
+
+class LShiftI FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
+      : op_(op), can_deopt_(can_deopt) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  Token::Value op() const { return op_; }
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+  bool can_deopt() const { return can_deopt_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
+
+ private:
+  Token::Value op_;
+  bool can_deopt_;
+};
+
+
+class LSubI FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LSubI(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
+  DECLARE_HYDROGEN_ACCESSOR(Sub)
+};
+
+
+class LConstantI FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
+  DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+  int32_t value() const { return hydrogen()->Integer32Value(); }
+};
+
+
+class LConstantS FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
+  DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+  Smi* value() const { return Smi::FromInt(hydrogen()->Integer32Value()); }
+};
+
+
+class LConstantD FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
+  DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+  double value() const { return hydrogen()->DoubleValue(); }
+};
+
+
+class LConstantE FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e")
+  DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+  ExternalReference value() const {
+    return hydrogen()->ExternalReferenceValue();
+  }
+};
+
+
+class LConstantT FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
+  DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+  Handle<Object> value(Isolate* isolate) const {
+    return hydrogen()->handle(isolate);
+  }
+};
+
+
+class LBranch FINAL : public LControlInstruction<1, 0> {
+ public:
+  explicit LBranch(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
+  DECLARE_HYDROGEN_ACCESSOR(Branch)
+
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+};
+
+
+class LCmpMapAndBranch FINAL : public LControlInstruction<1, 1> {
+ public:
+  LCmpMapAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareMap)
+
+  Handle<Map> map() const { return hydrogen()->map().handle(); }
+};
+
+
+class LMapEnumLength FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMapEnumLength(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MapEnumLength, "map-enum-length")
+};
+
+
+class LDateField FINAL : public LTemplateInstruction<1, 1, 1> {
+ public:
+  LDateField(LOperand* date, LOperand* temp, Smi* index) : index_(index) {
+    inputs_[0] = date;
+    temps_[0] = temp;
+  }
+
+  LOperand* date() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+  Smi* index() const { return index_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DateField, "date-field")
+  DECLARE_HYDROGEN_ACCESSOR(DateField)
+
+ private:
+  Smi* index_;
+};
+
+
+class LSeqStringGetChar FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LSeqStringGetChar(LOperand* string, LOperand* index) {
+    inputs_[0] = string;
+    inputs_[1] = index;
+  }
+
+  LOperand* string() const { return inputs_[0]; }
+  LOperand* index() const { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(SeqStringGetChar, "seq-string-get-char")
+  DECLARE_HYDROGEN_ACCESSOR(SeqStringGetChar)
+};
+
+
+class LSeqStringSetChar FINAL : public LTemplateInstruction<1, 4, 0> {
+ public:
+  LSeqStringSetChar(LOperand* context,
+                    LOperand* string,
+                    LOperand* index,
+                    LOperand* value) {
+    inputs_[0] = context;
+    inputs_[1] = string;
+    inputs_[2] = index;
+    inputs_[3] = value;
+  }
+
+  LOperand* string() { return inputs_[1]; }
+  LOperand* index() { return inputs_[2]; }
+  LOperand* value() { return inputs_[3]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
+  DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
+};
+
+
+class LAddI FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LAddI(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
+  DECLARE_HYDROGEN_ACCESSOR(Add)
+};
+
+
+class LMathMinMax FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LMathMinMax(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "math-min-max")
+  DECLARE_HYDROGEN_ACCESSOR(MathMinMax)
+};
+
+
+class LPower FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LPower(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Power, "power")
+  DECLARE_HYDROGEN_ACCESSOR(Power)
+};
+
+
+class LArithmeticD FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
+      : op_(op) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  Token::Value op() const { return op_; }
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  virtual Opcode opcode() const OVERRIDE {
+    return LInstruction::kArithmeticD;
+  }
+  virtual void CompileToNative(LCodeGen* generator) OVERRIDE;
+  virtual const char* Mnemonic() const OVERRIDE;
+
+ private:
+  Token::Value op_;
+};
+
+
+class LArithmeticT FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LArithmeticT(Token::Value op,
+               LOperand* context,
+               LOperand* left,
+               LOperand* right)
+      : op_(op) {
+    inputs_[0] = context;
+    inputs_[1] = left;
+    inputs_[2] = right;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* left() { return inputs_[1]; }
+  LOperand* right() { return inputs_[2]; }
+  Token::Value op() const { return op_; }
+
+  virtual Opcode opcode() const FINAL  { return LInstruction::kArithmeticT; }
+  virtual void CompileToNative(LCodeGen* generator) OVERRIDE;
+  virtual const char* Mnemonic() const OVERRIDE;
+
+ private:
+  Token::Value op_;
+};
+
+
+class LReturn FINAL : public LTemplateInstruction<0, 3, 0> {
+ public:
+  LReturn(LOperand* value, LOperand* context, LOperand* parameter_count) {
+    inputs_[0] = value;
+    inputs_[1] = context;
+    inputs_[2] = parameter_count;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  bool has_constant_parameter_count() {
+    return parameter_count()->IsConstantOperand();
+  }
+  LConstantOperand* constant_parameter_count() {
+    DCHECK(has_constant_parameter_count());
+    return LConstantOperand::cast(parameter_count());
+  }
+  LOperand* parameter_count() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Return, "return")
+};
+
+
+class LLoadNamedField FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LLoadNamedField(LOperand* object) {
+    inputs_[0] = object;
+  }
+
+  LOperand* object() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
+  DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
+};
+
+
+class LLoadNamedGeneric FINAL : public LTemplateInstruction<1, 2, 1> {
+ public:
+  LLoadNamedGeneric(LOperand* context, LOperand* object, LOperand* vector) {
+    inputs_[0] = context;
+    inputs_[1] = object;
+    temps_[0] = vector;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+  LOperand* temp_vector() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
+  DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
+
+  Handle<Object> name() const { return hydrogen()->name(); }
+};
+
+
+class LLoadFunctionPrototype FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LLoadFunctionPrototype(LOperand* function) {
+    inputs_[0] = function;
+  }
+
+  LOperand* function() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
+  DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
+};
+
+
+class LLoadRoot FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
+  DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
+
+  Heap::RootListIndex index() const { return hydrogen()->index(); }
+};
+
+
+class LLoadKeyed FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LLoadKeyed(LOperand* elements, LOperand* key) {
+    inputs_[0] = elements;
+    inputs_[1] = key;
+  }
+
+  LOperand* elements() { return inputs_[0]; }
+  LOperand* key() { return inputs_[1]; }
+  ElementsKind elements_kind() const {
+    return hydrogen()->elements_kind();
+  }
+  bool is_external() const {
+    return hydrogen()->is_external();
+  }
+  bool is_fixed_typed_array() const {
+    return hydrogen()->is_fixed_typed_array();
+  }
+  bool is_typed_elements() const {
+    return is_external() || is_fixed_typed_array();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
+  DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
+
+  virtual void PrintDataTo(StringStream* stream);
+  uint32_t base_offset() const { return hydrogen()->base_offset(); }
+};
+
+
+class LLoadKeyedGeneric FINAL : public LTemplateInstruction<1, 3, 1> {
+ public:
+  LLoadKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
+                    LOperand* vector) {
+    inputs_[0] = context;
+    inputs_[1] = object;
+    inputs_[2] = key;
+    temps_[0] = vector;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+  LOperand* key() { return inputs_[2]; }
+  LOperand* temp_vector() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
+  DECLARE_HYDROGEN_ACCESSOR(LoadKeyedGeneric)
+};
+
+
+class LLoadGlobalCell FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
+  DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
+};
+
+
+class LLoadGlobalGeneric FINAL : public LTemplateInstruction<1, 2, 1> {
+ public:
+  LLoadGlobalGeneric(LOperand* context, LOperand* global_object,
+                     LOperand* vector) {
+    inputs_[0] = context;
+    inputs_[1] = global_object;
+    temps_[0] = vector;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* global_object() { return inputs_[1]; }
+  LOperand* temp_vector() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
+  DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
+
+  Handle<Object> name() const { return hydrogen()->name(); }
+  bool for_typeof() const { return hydrogen()->for_typeof(); }
+};
+
+
+class LStoreGlobalCell FINAL : public LTemplateInstruction<0, 1, 1> {
+ public:
+  LStoreGlobalCell(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
+  DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
+};
+
+
+class LLoadContextSlot FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LLoadContextSlot(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
+  DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
+
+  int slot_index() { return hydrogen()->slot_index(); }
+
+  virtual void PrintDataTo(StringStream* stream);
+};
+
+
+class LStoreContextSlot FINAL : public LTemplateInstruction<0, 2, 0> {
+ public:
+  LStoreContextSlot(LOperand* context, LOperand* value) {
+    inputs_[0] = context;
+    inputs_[1] = value;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* value() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
+  DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
+
+  int slot_index() { return hydrogen()->slot_index(); }
+
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+};
+
+
+class LPushArgument FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LPushArgument(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
+};
+
+
+class LDrop FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+  explicit LDrop(int count) : count_(count) { }
+
+  int count() const { return count_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Drop, "drop")
+
+ private:
+  int count_;
+};
+
+
+class LStoreCodeEntry FINAL: public LTemplateInstruction<0, 2, 0> {
+ public:
+  LStoreCodeEntry(LOperand* function, LOperand* code_object) {
+    inputs_[0] = function;
+    inputs_[1] = code_object;
+  }
+
+  LOperand* function() { return inputs_[0]; }
+  LOperand* code_object() { return inputs_[1]; }
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
+  DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
+};
+
+
+class LInnerAllocatedObject FINAL: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LInnerAllocatedObject(LOperand* base_object, LOperand* offset) {
+    inputs_[0] = base_object;
+    inputs_[1] = offset;
+  }
+
+  LOperand* base_object() const { return inputs_[0]; }
+  LOperand* offset() const { return inputs_[1]; }
+
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+
+  DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object")
+};
+
+
+class LThisFunction FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
+  DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
+};
+
+
+class LContext FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(Context, "context")
+  DECLARE_HYDROGEN_ACCESSOR(Context)
+};
+
+
+class LDeclareGlobals FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LDeclareGlobals(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
+  DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
+};
+
+
+class LCallJSFunction FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LCallJSFunction(LOperand* function) {
+    inputs_[0] = function;
+  }
+
+  LOperand* function() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
+  DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
+
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallWithDescriptor FINAL : public LTemplateResultInstruction<1> {
+ public:
+  LCallWithDescriptor(CallInterfaceDescriptor descriptor,
+                      const ZoneList<LOperand*>& operands, Zone* zone)
+      : descriptor_(descriptor),
+        inputs_(descriptor.GetRegisterParameterCount() + 1, zone) {
+    DCHECK(descriptor.GetRegisterParameterCount() + 1 == operands.length());
+    inputs_.AddAll(operands, zone);
+  }
+
+  LOperand* target() const { return inputs_[0]; }
+
+  const CallInterfaceDescriptor descriptor() { return descriptor_; }
+
+ private:
+  DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
+  DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
+
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+
+  CallInterfaceDescriptor descriptor_;
+  ZoneList<LOperand*> inputs_;
+
+  // Iterator support.
+  virtual int InputCount() FINAL OVERRIDE { return inputs_.length(); }
+  virtual LOperand* InputAt(int i) FINAL OVERRIDE { return inputs_[i]; }
+
+  virtual int TempCount() FINAL OVERRIDE { return 0; }
+  virtual LOperand* TempAt(int i) FINAL OVERRIDE { return NULL; }
+};
+
+
+
+class LInvokeFunction FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LInvokeFunction(LOperand* context, LOperand* function) {
+    inputs_[0] = context;
+    inputs_[1] = function;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* function() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
+  DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
+
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallFunction FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LCallFunction(LOperand* context, LOperand* function) {
+    inputs_[0] = context;
+    inputs_[1] = function;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* function() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
+  DECLARE_HYDROGEN_ACCESSOR(CallFunction)
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallNew FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LCallNew(LOperand* context, LOperand* constructor) {
+    inputs_[0] = context;
+    inputs_[1] = constructor;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* constructor() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
+  DECLARE_HYDROGEN_ACCESSOR(CallNew)
+
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallNewArray FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LCallNewArray(LOperand* context, LOperand* constructor) {
+    inputs_[0] = context;
+    inputs_[1] = constructor;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* constructor() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
+  DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
+
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallRuntime FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LCallRuntime(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
+  DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
+
+  virtual bool ClobbersDoubleRegisters(Isolate* isolate) const OVERRIDE {
+    return save_doubles() == kDontSaveFPRegs;
+  }
+
+  const Runtime::Function* function() const { return hydrogen()->function(); }
+  int arity() const { return hydrogen()->argument_count(); }
+  SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); }
+};
+
+
+class LInteger32ToDouble FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LInteger32ToDouble(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
+};
+
+
+class LUint32ToDouble FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LUint32ToDouble(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double")
+};
+
+
+class LNumberTagU FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+  LNumberTagU(LOperand* value, LOperand* temp1, LOperand* temp2) {
+    inputs_[0] = value;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
+};
+
+
+class LNumberTagD FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+  LNumberTagD(LOperand* value, LOperand* temp, LOperand* temp2) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+    temps_[1] = temp2;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
+  DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LDoubleToSmi FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LDoubleToSmi(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DoubleToSmi, "double-to-smi")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+
+  bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+
+// Sometimes truncating conversion from a tagged value to an int32.
+class LDoubleToI FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LDoubleToI(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+
+  bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+
+// Truncating conversion from a tagged value to an int32.
+class LTaggedToI FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+  LTaggedToI(LOperand* value,
+             LOperand* temp,
+             LOperand* temp2) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+    temps_[1] = temp2;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
+  DECLARE_HYDROGEN_ACCESSOR(Change)
+
+  bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+
+class LSmiTag FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LSmiTag(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
+  DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LNumberUntagD FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LNumberUntagD(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
+  DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LSmiUntag FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  LSmiUntag(LOperand* value, bool needs_check)
+      : needs_check_(needs_check) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  bool needs_check() const { return needs_check_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
+
+ private:
+  bool needs_check_;
+};
+
+
+class LStoreNamedField FINAL : public LTemplateInstruction<0, 2, 1> {
+ public:
+  LStoreNamedField(LOperand* object, LOperand* value, LOperand* temp) {
+    inputs_[0] = object;
+    inputs_[1] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* value() { return inputs_[1]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
+  DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
+
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+
+  Representation representation() const {
+    return hydrogen()->field_representation();
+  }
+};
+
+
+class LStoreNamedGeneric FINAL : public LTemplateInstruction<0, 3, 0> {
+ public:
+  LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value) {
+    inputs_[0] = context;
+    inputs_[1] = object;
+    inputs_[2] = value;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+  LOperand* value() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
+  DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
+
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+
+  Handle<Object> name() const { return hydrogen()->name(); }
+  StrictMode strict_mode() { return hydrogen()->strict_mode(); }
+};
+
+
+class LStoreKeyed FINAL : public LTemplateInstruction<0, 3, 0> {
+ public:
+  LStoreKeyed(LOperand* object, LOperand* key, LOperand* value) {
+    inputs_[0] = object;
+    inputs_[1] = key;
+    inputs_[2] = value;
+  }
+
+  bool is_external() const { return hydrogen()->is_external(); }
+  bool is_fixed_typed_array() const {
+    return hydrogen()->is_fixed_typed_array();
+  }
+  bool is_typed_elements() const {
+    return is_external() || is_fixed_typed_array();
+  }
+  LOperand* elements() { return inputs_[0]; }
+  LOperand* key() { return inputs_[1]; }
+  LOperand* value() { return inputs_[2]; }
+  ElementsKind elements_kind() const {
+    return hydrogen()->elements_kind();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
+  DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
+
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
+  uint32_t base_offset() const { return hydrogen()->base_offset(); }
+};
+
+
+class LStoreKeyedGeneric FINAL : public LTemplateInstruction<0, 4, 0> {
+ public:
+  LStoreKeyedGeneric(LOperand* context,
+                     LOperand* obj,
+                     LOperand* key,
+                     LOperand* value) {
+    inputs_[0] = context;
+    inputs_[1] = obj;
+    inputs_[2] = key;
+    inputs_[3] = value;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+  LOperand* key() { return inputs_[2]; }
+  LOperand* value() { return inputs_[3]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
+  DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
+
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+
+  StrictMode strict_mode() { return hydrogen()->strict_mode(); }
+};
+
+
+class LTransitionElementsKind FINAL : public LTemplateInstruction<0, 2, 1> {
+ public:
+  LTransitionElementsKind(LOperand* object,
+                          LOperand* context,
+                          LOperand* new_map_temp) {
+    inputs_[0] = object;
+    inputs_[1] = context;
+    temps_[0] = new_map_temp;
+  }
+
+  LOperand* context() { return inputs_[1]; }
+  LOperand* object() { return inputs_[0]; }
+  LOperand* new_map_temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
+                               "transition-elements-kind")
+  DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
+
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+
+  Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
+  Handle<Map> transitioned_map() {
+    return hydrogen()->transitioned_map().handle();
+  }
+  ElementsKind from_kind() { return hydrogen()->from_kind(); }
+  ElementsKind to_kind() { return hydrogen()->to_kind(); }
+};
+
+
+class LTrapAllocationMemento FINAL : public LTemplateInstruction<0, 1, 1> {
+ public:
+  LTrapAllocationMemento(LOperand* object,
+                         LOperand* temp) {
+    inputs_[0] = object;
+    temps_[0] = temp;
+  }
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento,
+                               "trap-allocation-memento")
+};
+
+
+class LStringAdd FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
+    inputs_[0] = context;
+    inputs_[1] = left;
+    inputs_[2] = right;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* left() { return inputs_[1]; }
+  LOperand* right() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
+  DECLARE_HYDROGEN_ACCESSOR(StringAdd)
+};
+
+
+
+class LStringCharCodeAt FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
+    inputs_[0] = context;
+    inputs_[1] = string;
+    inputs_[2] = index;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* string() { return inputs_[1]; }
+  LOperand* index() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
+  DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
+};
+
+
+class LStringCharFromCode FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+  explicit LStringCharFromCode(LOperand* context, LOperand* char_code) {
+    inputs_[0] = context;
+    inputs_[1] = char_code;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* char_code() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
+  DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
+};
+
+
+class LCheckValue FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LCheckValue(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckValue, "check-value")
+  DECLARE_HYDROGEN_ACCESSOR(CheckValue)
+};
+
+
+class LCheckInstanceType FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LCheckInstanceType(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
+  DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
+};
+
+
+class LCheckMaps FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LCheckMaps(LOperand* value = NULL) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps")
+  DECLARE_HYDROGEN_ACCESSOR(CheckMaps)
+};
+
+
+class LCheckSmi FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LCheckSmi(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
+};
+
+
+class LCheckNonSmi FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LCheckNonSmi(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
+  DECLARE_HYDROGEN_ACCESSOR(CheckHeapObject)
+};
+
+
+class LClampDToUint8 FINAL : public LTemplateInstruction<1, 1, 1> {
+ public:
+  LClampDToUint8(LOperand* unclamped, LOperand* temp) {
+    inputs_[0] = unclamped;
+    temps_[0] = temp;
+  }
+
+  LOperand* unclamped() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
+};
+
+
+class LClampIToUint8 FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LClampIToUint8(LOperand* unclamped) {
+    inputs_[0] = unclamped;
+  }
+
+  LOperand* unclamped() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8")
+};
+
+
+class LClampTToUint8 FINAL : public LTemplateInstruction<1, 1, 1> {
+ public:
+  LClampTToUint8(LOperand* unclamped, LOperand* temp) {
+    inputs_[0] = unclamped;
+    temps_[0] = temp;
+  }
+
+  LOperand* unclamped() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
+};
+
+
+class LDoubleBits FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LDoubleBits(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DoubleBits, "double-bits")
+  DECLARE_HYDROGEN_ACCESSOR(DoubleBits)
+};
+
+
+class LConstructDouble FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LConstructDouble(LOperand* hi, LOperand* lo) {
+    inputs_[0] = hi;
+    inputs_[1] = lo;
+  }
+
+  LOperand* hi() { return inputs_[0]; }
+  LOperand* lo() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ConstructDouble, "construct-double")
+};
+
+
+class LAllocate FINAL : public LTemplateInstruction<1, 2, 2> {
+ public:
+  LAllocate(LOperand* context,
+            LOperand* size,
+            LOperand* temp1,
+            LOperand* temp2) {
+    inputs_[0] = context;
+    inputs_[1] = size;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* size() { return inputs_[1]; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate")
+  DECLARE_HYDROGEN_ACCESSOR(Allocate)
+};
+
+
+class LRegExpLiteral FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LRegExpLiteral(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
+  DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
+};
+
+
+class LFunctionLiteral FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LFunctionLiteral(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
+  DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
+};
+
+
+class LToFastProperties FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LToFastProperties(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
+  DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
+};
+
+
+class LTypeof FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LTypeof(LOperand* context, LOperand* value) {
+    inputs_[0] = context;
+    inputs_[1] = value;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* value() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
+};
+
+
+class LTypeofIsAndBranch FINAL : public LControlInstruction<1, 0> {
+ public:
+  explicit LTypeofIsAndBranch(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
+
+  Handle<String> type_literal() { return hydrogen()->type_literal(); }
+
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+};
+
+
+class LIsConstructCallAndBranch FINAL : public LControlInstruction<0, 1> {
+ public:
+  explicit LIsConstructCallAndBranch(LOperand* temp) {
+    temps_[0] = temp;
+  }
+
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
+                               "is-construct-call-and-branch")
+};
+
+
+class LOsrEntry FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+  LOsrEntry() {}
+
+  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
+    return false;
+  }
+  DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
+};
+
+
+class LStackCheck FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LStackCheck(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
+  DECLARE_HYDROGEN_ACCESSOR(StackCheck)
+
+  Label* done_label() { return &done_label_; }
+
+ private:
+  Label done_label_;
+};
+
+
+class LForInPrepareMap FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LForInPrepareMap(LOperand* context, LOperand* object) {
+    inputs_[0] = context;
+    inputs_[1] = object;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
+};
+
+
+class LForInCacheArray FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LForInCacheArray(LOperand* map) {
+    inputs_[0] = map;
+  }
+
+  LOperand* map() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array")
+
+  int idx() {
+    return HForInCacheArray::cast(this->hydrogen_value())->idx();
+  }
+};
+
+
+class LCheckMapValue FINAL : public LTemplateInstruction<0, 2, 0> {
+ public:
+  LCheckMapValue(LOperand* value, LOperand* map) {
+    inputs_[0] = value;
+    inputs_[1] = map;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* map() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value")
+};
+
+
+class LLoadFieldByIndex FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LLoadFieldByIndex(LOperand* object, LOperand* index) {
+    inputs_[0] = object;
+    inputs_[1] = index;
+  }
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* index() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index")
+};
+
+
+class LStoreFrameContext: public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LStoreFrameContext(LOperand* context) {
+    inputs_[0] = context;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreFrameContext, "store-frame-context")
+};
+
+
+class LAllocateBlockContext: public LTemplateInstruction<1, 2, 0> {
+ public:
+  LAllocateBlockContext(LOperand* context, LOperand* function) {
+    inputs_[0] = context;
+    inputs_[1] = function;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* function() { return inputs_[1]; }
+
+  Handle<ScopeInfo> scope_info() { return hydrogen()->scope_info(); }
+
+  DECLARE_CONCRETE_INSTRUCTION(AllocateBlockContext, "allocate-block-context")
+  DECLARE_HYDROGEN_ACCESSOR(AllocateBlockContext)
+};
+
+
+class LChunkBuilder;
+class LPlatformChunk FINAL : public LChunk {
+ public:
+  LPlatformChunk(CompilationInfo* info, HGraph* graph)
+      : LChunk(info, graph) { }
+
+  int GetNextSpillIndex(RegisterKind kind);
+  LOperand* GetNextSpillSlot(RegisterKind kind);
+};
+
+
+class LChunkBuilder FINAL : public LChunkBuilderBase {
+ public:
+  LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
+      : LChunkBuilderBase(info, graph),
+        current_instruction_(NULL),
+        current_block_(NULL),
+        next_block_(NULL),
+        allocator_(allocator) {}
+
+  // Build the sequence for the graph.
+  LPlatformChunk* Build();
+
+  // Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
+  HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+  LInstruction* DoMultiplyAdd(HMul* mul, HValue* addend);
+
+  static bool HasMagicNumberForDivisor(int32_t divisor);
+
+  LInstruction* DoMathFloor(HUnaryMathOperation* instr);
+  LInstruction* DoMathRound(HUnaryMathOperation* instr);
+  LInstruction* DoMathFround(HUnaryMathOperation* instr);
+  LInstruction* DoMathAbs(HUnaryMathOperation* instr);
+  LInstruction* DoMathLog(HUnaryMathOperation* instr);
+  LInstruction* DoMathExp(HUnaryMathOperation* instr);
+  LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
+  LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
+  LInstruction* DoMathClz32(HUnaryMathOperation* instr);
+  LInstruction* DoDivByPowerOf2I(HDiv* instr);
+  LInstruction* DoDivByConstI(HDiv* instr);
+  LInstruction* DoDivI(HDiv* instr);
+  LInstruction* DoModByPowerOf2I(HMod* instr);
+  LInstruction* DoModByConstI(HMod* instr);
+  LInstruction* DoModI(HMod* instr);
+  LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr);
+  LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr);
+  LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr);
+
+ private:
+  // Methods for getting operands for Use / Define / Temp.
+  LUnallocated* ToUnallocated(Register reg);
+  LUnallocated* ToUnallocated(DoubleRegister reg);
+
+  // Methods for setting up define-use relationships.
+  MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
+  MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register);
+  MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value,
+                                           DoubleRegister fixed_register);
+
+  // A value that is guaranteed to be allocated to a register.
+  // Operand created by UseRegister is guaranteed to be live until the end of
+  // instruction. This means that register allocator will not reuse it's
+  // register for any other operand inside instruction.
+  // Operand created by UseRegisterAtStart is guaranteed to be live only at
+  // instruction start. Register allocator is free to assign the same register
+  // to some other operand used inside instruction (i.e. temporary or
+  // output).
+  MUST_USE_RESULT LOperand* UseRegister(HValue* value);
+  MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value);
+
+  // An input operand in a register that may be trashed.
+  MUST_USE_RESULT LOperand* UseTempRegister(HValue* value);
+
+  // An input operand in a register or stack slot.
+  MUST_USE_RESULT LOperand* Use(HValue* value);
+  MUST_USE_RESULT LOperand* UseAtStart(HValue* value);
+
+  // An input operand in a register, stack slot or a constant operand.
+  MUST_USE_RESULT LOperand* UseOrConstant(HValue* value);
+  MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value);
+
+  // An input operand in a register or a constant operand.
+  MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
+  MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
+
+  // An input operand in a constant operand.
+  MUST_USE_RESULT LOperand* UseConstant(HValue* value);
+
+  // An input operand in register, stack slot or a constant operand.
+  // Will not be moved to a register even if one is freely available.
+  virtual MUST_USE_RESULT LOperand* UseAny(HValue* value) OVERRIDE;
+
+  // Temporary operand that must be in a register.
+  MUST_USE_RESULT LUnallocated* TempRegister();
+  MUST_USE_RESULT LUnallocated* TempDoubleRegister();
+  MUST_USE_RESULT LOperand* FixedTemp(Register reg);
+  MUST_USE_RESULT LOperand* FixedTemp(DoubleRegister reg);
+
+  // Methods for setting up define-use relationships.
+  // Return the same instruction that they are passed.
+  LInstruction* Define(LTemplateResultInstruction<1>* instr,
+                       LUnallocated* result);
+  LInstruction* DefineAsRegister(LTemplateResultInstruction<1>* instr);
+  LInstruction* DefineAsSpilled(LTemplateResultInstruction<1>* instr,
+                                int index);
+  LInstruction* DefineSameAsFirst(LTemplateResultInstruction<1>* instr);
+  LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr,
+                            Register reg);
+  LInstruction* DefineFixedDouble(LTemplateResultInstruction<1>* instr,
+                                  DoubleRegister reg);
+  LInstruction* AssignEnvironment(LInstruction* instr);
+  LInstruction* AssignPointerMap(LInstruction* instr);
+
+  enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY };
+
+  // By default we assume that instruction sequences generated for calls
+  // cannot deoptimize eagerly and we do not attach environment to this
+  // instruction.
+  LInstruction* MarkAsCall(
+      LInstruction* instr,
+      HInstruction* hinstr,
+      CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
+
+  void VisitInstruction(HInstruction* current);
+  void AddInstruction(LInstruction* instr, HInstruction* current);
+
+  void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
+  LInstruction* DoBit(Token::Value op, HBitwiseBinaryOperation* instr);
+  LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
+  LInstruction* DoArithmeticD(Token::Value op,
+                              HArithmeticBinaryOperation* instr);
+  LInstruction* DoArithmeticT(Token::Value op,
+                              HBinaryOperation* instr);
+
+  HInstruction* current_instruction_;
+  HBasicBlock* current_block_;
+  HBasicBlock* next_block_;
+  LAllocator* allocator_;
+
+  DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
+};
+
+#undef DECLARE_HYDROGEN_ACCESSOR
+#undef DECLARE_CONCRETE_INSTRUCTION
+
+} }  // namespace v8::internal
+
+#endif  // V8_MIPS_LITHIUM_MIPS_H_
diff --git a/src/mips64/macro-assembler-mips64.cc b/src/mips64/macro-assembler-mips64.cc
new file mode 100644
index 0000000..12d81bc
--- /dev/null
+++ b/src/mips64/macro-assembler-mips64.cc
@@ -0,0 +1,6089 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <limits.h>  // For LONG_MIN, LONG_MAX.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_MIPS64
+
+#include "src/base/division-by-constant.h"
+#include "src/bootstrapper.h"
+#include "src/codegen.h"
+#include "src/cpu-profiler.h"
+#include "src/debug.h"
+#include "src/isolate-inl.h"
+#include "src/runtime.h"
+
+namespace v8 {
+namespace internal {
+
+MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
+    : Assembler(arg_isolate, buffer, size),
+      generating_stub_(false),
+      has_frame_(false) {
+  if (isolate() != NULL) {
+    code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
+                                  isolate());
+  }
+}
+
+
+void MacroAssembler::Load(Register dst,
+                          const MemOperand& src,
+                          Representation r) {
+  DCHECK(!r.IsDouble());
+  if (r.IsInteger8()) {
+    lb(dst, src);
+  } else if (r.IsUInteger8()) {
+    lbu(dst, src);
+  } else if (r.IsInteger16()) {
+    lh(dst, src);
+  } else if (r.IsUInteger16()) {
+    lhu(dst, src);
+  } else if (r.IsInteger32()) {
+    lw(dst, src);
+  } else {
+    ld(dst, src);
+  }
+}
+
+
+void MacroAssembler::Store(Register src,
+                           const MemOperand& dst,
+                           Representation r) {
+  DCHECK(!r.IsDouble());
+  if (r.IsInteger8() || r.IsUInteger8()) {
+    sb(src, dst);
+  } else if (r.IsInteger16() || r.IsUInteger16()) {
+    sh(src, dst);
+  } else if (r.IsInteger32()) {
+    sw(src, dst);
+  } else {
+    if (r.IsHeapObject()) {
+      AssertNotSmi(src);
+    } else if (r.IsSmi()) {
+      AssertSmi(src);
+    }
+    sd(src, dst);
+  }
+}
+
+
+void MacroAssembler::LoadRoot(Register destination,
+                              Heap::RootListIndex index) {
+  ld(destination, MemOperand(s6, index << kPointerSizeLog2));
+}
+
+
+void MacroAssembler::LoadRoot(Register destination,
+                              Heap::RootListIndex index,
+                              Condition cond,
+                              Register src1, const Operand& src2) {
+  Branch(2, NegateCondition(cond), src1, src2);
+  ld(destination, MemOperand(s6, index << kPointerSizeLog2));
+}
+
+
+void MacroAssembler::StoreRoot(Register source,
+                               Heap::RootListIndex index) {
+  sd(source, MemOperand(s6, index << kPointerSizeLog2));
+}
+
+
+void MacroAssembler::StoreRoot(Register source,
+                               Heap::RootListIndex index,
+                               Condition cond,
+                               Register src1, const Operand& src2) {
+  Branch(2, NegateCondition(cond), src1, src2);
+  sd(source, MemOperand(s6, index << kPointerSizeLog2));
+}
+
+
+// Push and pop all registers that can hold pointers.
+void MacroAssembler::PushSafepointRegisters() {
+  // Safepoints expect a block of kNumSafepointRegisters values on the
+  // stack, so adjust the stack for unsaved registers.
+  const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
+  DCHECK(num_unsaved >= 0);
+  if (num_unsaved > 0) {
+    Dsubu(sp, sp, Operand(num_unsaved * kPointerSize));
+  }
+  MultiPush(kSafepointSavedRegisters);
+}
+
+
+void MacroAssembler::PopSafepointRegisters() {
+  const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
+  MultiPop(kSafepointSavedRegisters);
+  if (num_unsaved > 0) {
+    Daddu(sp, sp, Operand(num_unsaved * kPointerSize));
+  }
+}
+
+
+void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
+  sd(src, SafepointRegisterSlot(dst));
+}
+
+
+void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
+  ld(dst, SafepointRegisterSlot(src));
+}
+
+
+int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
+  // The registers are pushed starting with the highest encoding,
+  // which means that lowest encodings are closest to the stack pointer.
+  return kSafepointRegisterStackIndexMap[reg_code];
+}
+
+
+MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
+  return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
+}
+
+
+MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
+  UNIMPLEMENTED_MIPS();
+  // General purpose registers are pushed last on the stack.
+  int doubles_size = FPURegister::NumAllocatableRegisters() * kDoubleSize;
+  int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
+  return MemOperand(sp, doubles_size + register_offset);
+}
+
+
+void MacroAssembler::InNewSpace(Register object,
+                                Register scratch,
+                                Condition cc,
+                                Label* branch) {
+  DCHECK(cc == eq || cc == ne);
+  And(scratch, object, Operand(ExternalReference::new_space_mask(isolate())));
+  Branch(branch, cc, scratch,
+         Operand(ExternalReference::new_space_start(isolate())));
+}
+
+
+void MacroAssembler::RecordWriteField(
+    Register object,
+    int offset,
+    Register value,
+    Register dst,
+    RAStatus ra_status,
+    SaveFPRegsMode save_fp,
+    RememberedSetAction remembered_set_action,
+    SmiCheck smi_check,
+    PointersToHereCheck pointers_to_here_check_for_value) {
+  DCHECK(!AreAliased(value, dst, t8, object));
+  // First, check if a write barrier is even needed. The tests below
+  // catch stores of Smis.
+  Label done;
+
+  // Skip barrier if writing a smi.
+  if (smi_check == INLINE_SMI_CHECK) {
+    JumpIfSmi(value, &done);
+  }
+
+  // Although the object register is tagged, the offset is relative to the start
+  // of the object, so so offset must be a multiple of kPointerSize.
+  DCHECK(IsAligned(offset, kPointerSize));
+
+  Daddu(dst, object, Operand(offset - kHeapObjectTag));
+  if (emit_debug_code()) {
+    Label ok;
+    And(t8, dst, Operand((1 << kPointerSizeLog2) - 1));
+    Branch(&ok, eq, t8, Operand(zero_reg));
+    stop("Unaligned cell in write barrier");
+    bind(&ok);
+  }
+
+  RecordWrite(object,
+              dst,
+              value,
+              ra_status,
+              save_fp,
+              remembered_set_action,
+              OMIT_SMI_CHECK,
+              pointers_to_here_check_for_value);
+
+  bind(&done);
+
+  // Clobber clobbered input registers when running with the debug-code flag
+  // turned on to provoke errors.
+  if (emit_debug_code()) {
+    li(value, Operand(bit_cast<int64_t>(kZapValue + 4)));
+    li(dst, Operand(bit_cast<int64_t>(kZapValue + 8)));
+  }
+}
+
+
+// Will clobber 4 registers: object, map, dst, ip.  The
+// register 'object' contains a heap object pointer.
+void MacroAssembler::RecordWriteForMap(Register object,
+                                       Register map,
+                                       Register dst,
+                                       RAStatus ra_status,
+                                       SaveFPRegsMode fp_mode) {
+  if (emit_debug_code()) {
+    DCHECK(!dst.is(at));
+    ld(dst, FieldMemOperand(map, HeapObject::kMapOffset));
+    Check(eq,
+          kWrongAddressOrValuePassedToRecordWrite,
+          dst,
+          Operand(isolate()->factory()->meta_map()));
+  }
+
+  if (!FLAG_incremental_marking) {
+    return;
+  }
+
+  if (emit_debug_code()) {
+    ld(at, FieldMemOperand(object, HeapObject::kMapOffset));
+    Check(eq,
+          kWrongAddressOrValuePassedToRecordWrite,
+          map,
+          Operand(at));
+  }
+
+  Label done;
+
+  // A single check of the map's pages interesting flag suffices, since it is
+  // only set during incremental collection, and then it's also guaranteed that
+  // the from object's page's interesting flag is also set.  This optimization
+  // relies on the fact that maps can never be in new space.
+  CheckPageFlag(map,
+                map,  // Used as scratch.
+                MemoryChunk::kPointersToHereAreInterestingMask,
+                eq,
+                &done);
+
+  Daddu(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag));
+  if (emit_debug_code()) {
+    Label ok;
+    And(at, dst, Operand((1 << kPointerSizeLog2) - 1));
+    Branch(&ok, eq, at, Operand(zero_reg));
+    stop("Unaligned cell in write barrier");
+    bind(&ok);
+  }
+
+  // Record the actual write.
+  if (ra_status == kRAHasNotBeenSaved) {
+    push(ra);
+  }
+  RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
+                       fp_mode);
+  CallStub(&stub);
+  if (ra_status == kRAHasNotBeenSaved) {
+    pop(ra);
+  }
+
+  bind(&done);
+
+  // Count number of write barriers in generated code.
+  isolate()->counters()->write_barriers_static()->Increment();
+  IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, at, dst);
+
+  // Clobber clobbered registers when running with the debug-code flag
+  // turned on to provoke errors.
+  if (emit_debug_code()) {
+    li(dst, Operand(bit_cast<int64_t>(kZapValue + 12)));
+    li(map, Operand(bit_cast<int64_t>(kZapValue + 16)));
+  }
+}
+
+
+// Will clobber 4 registers: object, address, scratch, ip.  The
+// register 'object' contains a heap object pointer.  The heap object
+// tag is shifted away.
+void MacroAssembler::RecordWrite(
+    Register object,
+    Register address,
+    Register value,
+    RAStatus ra_status,
+    SaveFPRegsMode fp_mode,
+    RememberedSetAction remembered_set_action,
+    SmiCheck smi_check,
+    PointersToHereCheck pointers_to_here_check_for_value) {
+  DCHECK(!AreAliased(object, address, value, t8));
+  DCHECK(!AreAliased(object, address, value, t9));
+
+  if (emit_debug_code()) {
+    ld(at, MemOperand(address));
+    Assert(
+        eq, kWrongAddressOrValuePassedToRecordWrite, at, Operand(value));
+  }
+
+  if (remembered_set_action == OMIT_REMEMBERED_SET &&
+      !FLAG_incremental_marking) {
+    return;
+  }
+
+  // First, check if a write barrier is even needed. The tests below
+  // catch stores of smis and stores into the young generation.
+  Label done;
+
+  if (smi_check == INLINE_SMI_CHECK) {
+    DCHECK_EQ(0, kSmiTag);
+    JumpIfSmi(value, &done);
+  }
+
+  if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
+    CheckPageFlag(value,
+                  value,  // Used as scratch.
+                  MemoryChunk::kPointersToHereAreInterestingMask,
+                  eq,
+                  &done);
+  }
+  CheckPageFlag(object,
+                value,  // Used as scratch.
+                MemoryChunk::kPointersFromHereAreInterestingMask,
+                eq,
+                &done);
+
+  // Record the actual write.
+  if (ra_status == kRAHasNotBeenSaved) {
+    push(ra);
+  }
+  RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
+                       fp_mode);
+  CallStub(&stub);
+  if (ra_status == kRAHasNotBeenSaved) {
+    pop(ra);
+  }
+
+  bind(&done);
+
+  // Count number of write barriers in generated code.
+  isolate()->counters()->write_barriers_static()->Increment();
+  IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, at,
+                   value);
+
+  // Clobber clobbered registers when running with the debug-code flag
+  // turned on to provoke errors.
+  if (emit_debug_code()) {
+    li(address, Operand(bit_cast<int64_t>(kZapValue + 12)));
+    li(value, Operand(bit_cast<int64_t>(kZapValue + 16)));
+  }
+}
+
+
+void MacroAssembler::RememberedSetHelper(Register object,  // For debug tests.
+                                         Register address,
+                                         Register scratch,
+                                         SaveFPRegsMode fp_mode,
+                                         RememberedSetFinalAction and_then) {
+  Label done;
+  if (emit_debug_code()) {
+    Label ok;
+    JumpIfNotInNewSpace(object, scratch, &ok);
+    stop("Remembered set pointer is in new space");
+    bind(&ok);
+  }
+  // Load store buffer top.
+  ExternalReference store_buffer =
+      ExternalReference::store_buffer_top(isolate());
+  li(t8, Operand(store_buffer));
+  ld(scratch, MemOperand(t8));
+  // Store pointer to buffer and increment buffer top.
+  sd(address, MemOperand(scratch));
+  Daddu(scratch, scratch, kPointerSize);
+  // Write back new top of buffer.
+  sd(scratch, MemOperand(t8));
+  // Call stub on end of buffer.
+  // Check for end of buffer.
+  And(t8, scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
+  DCHECK(!scratch.is(t8));
+  if (and_then == kFallThroughAtEnd) {
+    Branch(&done, eq, t8, Operand(zero_reg));
+  } else {
+    DCHECK(and_then == kReturnAtEnd);
+    Ret(eq, t8, Operand(zero_reg));
+  }
+  push(ra);
+  StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
+  CallStub(&store_buffer_overflow);
+  pop(ra);
+  bind(&done);
+  if (and_then == kReturnAtEnd) {
+    Ret();
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+// Allocation support.
+
+
+void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
+                                            Register scratch,
+                                            Label* miss) {
+  Label same_contexts;
+
+  DCHECK(!holder_reg.is(scratch));
+  DCHECK(!holder_reg.is(at));
+  DCHECK(!scratch.is(at));
+
+  // Load current lexical context from the stack frame.
+  ld(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  // In debug mode, make sure the lexical context is set.
+#ifdef DEBUG
+  Check(ne, kWeShouldNotHaveAnEmptyLexicalContext,
+      scratch, Operand(zero_reg));
+#endif
+
+  // Load the native context of the current context.
+  int offset =
+      Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
+  ld(scratch, FieldMemOperand(scratch, offset));
+  ld(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
+
+  // Check the context is a native context.
+  if (emit_debug_code()) {
+    push(holder_reg);  // Temporarily save holder on the stack.
+    // Read the first word and compare to the native_context_map.
+    ld(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
+    LoadRoot(at, Heap::kNativeContextMapRootIndex);
+    Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
+          holder_reg, Operand(at));
+    pop(holder_reg);  // Restore holder.
+  }
+
+  // Check if both contexts are the same.
+  ld(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
+  Branch(&same_contexts, eq, scratch, Operand(at));
+
+  // Check the context is a native context.
+  if (emit_debug_code()) {
+    push(holder_reg);  // Temporarily save holder on the stack.
+    mov(holder_reg, at);  // Move at to its holding place.
+    LoadRoot(at, Heap::kNullValueRootIndex);
+    Check(ne, kJSGlobalProxyContextShouldNotBeNull,
+          holder_reg, Operand(at));
+
+    ld(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
+    LoadRoot(at, Heap::kNativeContextMapRootIndex);
+    Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
+          holder_reg, Operand(at));
+    // Restore at is not needed. at is reloaded below.
+    pop(holder_reg);  // Restore holder.
+    // Restore at to holder's context.
+    ld(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
+  }
+
+  // Check that the security token in the calling global object is
+  // compatible with the security token in the receiving global
+  // object.
+  int token_offset = Context::kHeaderSize +
+                     Context::SECURITY_TOKEN_INDEX * kPointerSize;
+
+  ld(scratch, FieldMemOperand(scratch, token_offset));
+  ld(at, FieldMemOperand(at, token_offset));
+  Branch(miss, ne, scratch, Operand(at));
+
+  bind(&same_contexts);
+}
+
+
+// Compute the hash code from the untagged key.  This must be kept in sync with
+// ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
+// code-stub-hydrogen.cc
+void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
+  // First of all we assign the hash seed to scratch.
+  LoadRoot(scratch, Heap::kHashSeedRootIndex);
+  SmiUntag(scratch);
+
+  // Xor original key with a seed.
+  xor_(reg0, reg0, scratch);
+
+  // Compute the hash code from the untagged key.  This must be kept in sync
+  // with ComputeIntegerHash in utils.h.
+  //
+  // hash = ~hash + (hash << 15);
+  // The algorithm uses 32-bit integer values.
+  nor(scratch, reg0, zero_reg);
+  sll(at, reg0, 15);
+  addu(reg0, scratch, at);
+
+  // hash = hash ^ (hash >> 12);
+  srl(at, reg0, 12);
+  xor_(reg0, reg0, at);
+
+  // hash = hash + (hash << 2);
+  sll(at, reg0, 2);
+  addu(reg0, reg0, at);
+
+  // hash = hash ^ (hash >> 4);
+  srl(at, reg0, 4);
+  xor_(reg0, reg0, at);
+
+  // hash = hash * 2057;
+  sll(scratch, reg0, 11);
+  sll(at, reg0, 3);
+  addu(reg0, reg0, at);
+  addu(reg0, reg0, scratch);
+
+  // hash = hash ^ (hash >> 16);
+  srl(at, reg0, 16);
+  xor_(reg0, reg0, at);
+}
+
+
+void MacroAssembler::LoadFromNumberDictionary(Label* miss,
+                                              Register elements,
+                                              Register key,
+                                              Register result,
+                                              Register reg0,
+                                              Register reg1,
+                                              Register reg2) {
+  // Register use:
+  //
+  // elements - holds the slow-case elements of the receiver on entry.
+  //            Unchanged unless 'result' is the same register.
+  //
+  // key      - holds the smi key on entry.
+  //            Unchanged unless 'result' is the same register.
+  //
+  //
+  // result   - holds the result on exit if the load succeeded.
+  //            Allowed to be the same as 'key' or 'result'.
+  //            Unchanged on bailout so 'key' or 'result' can be used
+  //            in further computation.
+  //
+  // Scratch registers:
+  //
+  // reg0 - holds the untagged key on entry and holds the hash once computed.
+  //
+  // reg1 - Used to hold the capacity mask of the dictionary.
+  //
+  // reg2 - Used for the index into the dictionary.
+  // at   - Temporary (avoid MacroAssembler instructions also using 'at').
+  Label done;
+
+  GetNumberHash(reg0, reg1);
+
+  // Compute the capacity mask.
+  ld(reg1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
+  SmiUntag(reg1, reg1);
+  Dsubu(reg1, reg1, Operand(1));
+
+  // Generate an unrolled loop that performs a few probes before giving up.
+  for (int i = 0; i < kNumberDictionaryProbes; i++) {
+    // Use reg2 for index calculations and keep the hash intact in reg0.
+    mov(reg2, reg0);
+    // Compute the masked index: (hash + i + i * i) & mask.
+    if (i > 0) {
+      Daddu(reg2, reg2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
+    }
+    and_(reg2, reg2, reg1);
+
+    // Scale the index by multiplying by the element size.
+    DCHECK(SeededNumberDictionary::kEntrySize == 3);
+    dsll(at, reg2, 1);  // 2x.
+    daddu(reg2, reg2, at);  // reg2 = reg2 * 3.
+
+    // Check if the key is identical to the name.
+    dsll(at, reg2, kPointerSizeLog2);
+    daddu(reg2, elements, at);
+
+    ld(at, FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset));
+    if (i != kNumberDictionaryProbes - 1) {
+      Branch(&done, eq, key, Operand(at));
+    } else {
+      Branch(miss, ne, key, Operand(at));
+    }
+  }
+
+  bind(&done);
+  // Check that the value is a normal property.
+  // reg2: elements + (index * kPointerSize).
+  const int kDetailsOffset =
+      SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
+  ld(reg1, FieldMemOperand(reg2, kDetailsOffset));
+  And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
+  Branch(miss, ne, at, Operand(zero_reg));
+
+  // Get the value at the masked, scaled index and return.
+  const int kValueOffset =
+      SeededNumberDictionary::kElementsStartOffset + kPointerSize;
+  ld(result, FieldMemOperand(reg2, kValueOffset));
+}
+
+
+// ---------------------------------------------------------------------------
+// Instruction macros.
+
+void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
+  if (rt.is_reg()) {
+    addu(rd, rs, rt.rm());
+  } else {
+    if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
+      addiu(rd, rs, rt.imm64_);
+    } else {
+      // li handles the relocation.
+      DCHECK(!rs.is(at));
+      li(at, rt);
+      addu(rd, rs, at);
+    }
+  }
+}
+
+
+void MacroAssembler::Daddu(Register rd, Register rs, const Operand& rt) {
+  if (rt.is_reg()) {
+    daddu(rd, rs, rt.rm());
+  } else {
+    if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
+      daddiu(rd, rs, rt.imm64_);
+    } else {
+      // li handles the relocation.
+      DCHECK(!rs.is(at));
+      li(at, rt);
+      daddu(rd, rs, at);
+    }
+  }
+}
+
+
+void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
+  if (rt.is_reg()) {
+    subu(rd, rs, rt.rm());
+  } else {
+    if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
+      addiu(rd, rs, -rt.imm64_);  // No subiu instr, use addiu(x, y, -imm).
+    } else {
+      // li handles the relocation.
+      DCHECK(!rs.is(at));
+      li(at, rt);
+      subu(rd, rs, at);
+    }
+  }
+}
+
+
+void MacroAssembler::Dsubu(Register rd, Register rs, const Operand& rt) {
+  if (rt.is_reg()) {
+    dsubu(rd, rs, rt.rm());
+  } else {
+    if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
+      daddiu(rd, rs, -rt.imm64_);  // No subiu instr, use addiu(x, y, -imm).
+    } else {
+      // li handles the relocation.
+      DCHECK(!rs.is(at));
+      li(at, rt);
+      dsubu(rd, rs, at);
+    }
+  }
+}
+
+
+void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
+  if (rt.is_reg()) {
+    mul(rd, rs, rt.rm());
+  } else {
+    // li handles the relocation.
+    DCHECK(!rs.is(at));
+    li(at, rt);
+    mul(rd, rs, at);
+  }
+}
+
+
+void MacroAssembler::Mulh(Register rd, Register rs, const Operand& rt) {
+  if (rt.is_reg()) {
+    if (kArchVariant != kMips64r6) {
+      mult(rs, rt.rm());
+      mfhi(rd);
+    } else {
+      muh(rd, rs, rt.rm());
+    }
+  } else {
+    // li handles the relocation.
+    DCHECK(!rs.is(at));
+    li(at, rt);
+    if (kArchVariant != kMips64r6) {
+      mult(rs, at);
+      mfhi(rd);
+    } else {
+      muh(rd, rs, at);
+    }
+  }
+}
+
+
+void MacroAssembler::Dmul(Register rd, Register rs, const Operand& rt) {
+  if (rt.is_reg()) {
+    if (kArchVariant == kMips64r6) {
+      dmul(rd, rs, rt.rm());
+    } else {
+      dmult(rs, rt.rm());
+      mflo(rd);
+    }
+  } else {
+    // li handles the relocation.
+    DCHECK(!rs.is(at));
+    li(at, rt);
+    if (kArchVariant == kMips64r6) {
+      dmul(rd, rs, at);
+    } else {
+      dmult(rs, at);
+      mflo(rd);
+    }
+  }
+}
+
+
+void MacroAssembler::Dmulh(Register rd, Register rs, const Operand& rt) {
+  if (rt.is_reg()) {
+    if (kArchVariant == kMips64r6) {
+      dmuh(rd, rs, rt.rm());
+    } else {
+      dmult(rs, rt.rm());
+      mfhi(rd);
+    }
+  } else {
+    // li handles the relocation.
+    DCHECK(!rs.is(at));
+    li(at, rt);
+    if (kArchVariant == kMips64r6) {
+      dmuh(rd, rs, at);
+    } else {
+      dmult(rs, at);
+      mfhi(rd);
+    }
+  }
+}
+
+
+void MacroAssembler::Mult(Register rs, const Operand& rt) {
+  if (rt.is_reg()) {
+    mult(rs, rt.rm());
+  } else {
+    // li handles the relocation.
+    DCHECK(!rs.is(at));
+    li(at, rt);
+    mult(rs, at);
+  }
+}
+
+
+void MacroAssembler::Dmult(Register rs, const Operand& rt) {
+  if (rt.is_reg()) {
+    dmult(rs, rt.rm());
+  } else {
+    // li handles the relocation.
+    DCHECK(!rs.is(at));
+    li(at, rt);
+    dmult(rs, at);
+  }
+}
+
+
+void MacroAssembler::Multu(Register rs, const Operand& rt) {
+  if (rt.is_reg()) {
+    multu(rs, rt.rm());
+  } else {
+    // li handles the relocation.
+    DCHECK(!rs.is(at));
+    li(at, rt);
+    multu(rs, at);
+  }
+}
+
+
+void MacroAssembler::Dmultu(Register rs, const Operand& rt) {
+  if (rt.is_reg()) {
+    dmultu(rs, rt.rm());
+  } else {
+    // li handles the relocation.
+    DCHECK(!rs.is(at));
+    li(at, rt);
+    dmultu(rs, at);
+  }
+}
+
+
+void MacroAssembler::Div(Register rs, const Operand& rt) {
+  if (rt.is_reg()) {
+    div(rs, rt.rm());
+  } else {
+    // li handles the relocation.
+    DCHECK(!rs.is(at));
+    li(at, rt);
+    div(rs, at);
+  }
+}
+
+
+void MacroAssembler::Ddiv(Register rs, const Operand& rt) {
+  if (rt.is_reg()) {
+    ddiv(rs, rt.rm());
+  } else {
+    // li handles the relocation.
+    DCHECK(!rs.is(at));
+    li(at, rt);
+    ddiv(rs, at);
+  }
+}
+
+
+void MacroAssembler::Ddiv(Register rd, Register rs, const Operand& rt) {
+  if (kArchVariant != kMips64r6) {
+    if (rt.is_reg()) {
+      ddiv(rs, rt.rm());
+      mflo(rd);
+    } else {
+      // li handles the relocation.
+      DCHECK(!rs.is(at));
+      li(at, rt);
+      ddiv(rs, at);
+      mflo(rd);
+    }
+  } else {
+    if (rt.is_reg()) {
+      ddiv(rd, rs, rt.rm());
+    } else {
+      // li handles the relocation.
+      DCHECK(!rs.is(at));
+      li(at, rt);
+      ddiv(rd, rs, at);
+    }
+  }
+}
+
+
+void MacroAssembler::Divu(Register rs, const Operand& rt) {
+  if (rt.is_reg()) {
+    divu(rs, rt.rm());
+  } else {
+    // li handles the relocation.
+    DCHECK(!rs.is(at));
+    li(at, rt);
+    divu(rs, at);
+  }
+}
+
+
+void MacroAssembler::Ddivu(Register rs, const Operand& rt) {
+  if (rt.is_reg()) {
+    ddivu(rs, rt.rm());
+  } else {
+    // li handles the relocation.
+    DCHECK(!rs.is(at));
+    li(at, rt);
+    ddivu(rs, at);
+  }
+}
+
+
+void MacroAssembler::Dmod(Register rd, Register rs, const Operand& rt) {
+  if (kArchVariant != kMips64r6) {
+    if (rt.is_reg()) {
+      ddiv(rs, rt.rm());
+      mfhi(rd);
+    } else {
+      // li handles the relocation.
+      DCHECK(!rs.is(at));
+      li(at, rt);
+      ddiv(rs, at);
+      mfhi(rd);
+    }
+  } else {
+    if (rt.is_reg()) {
+      dmod(rd, rs, rt.rm());
+    } else {
+      // li handles the relocation.
+      DCHECK(!rs.is(at));
+      li(at, rt);
+      dmod(rd, rs, at);
+    }
+  }
+}
+
+
+void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
+  if (rt.is_reg()) {
+    and_(rd, rs, rt.rm());
+  } else {
+    if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
+      andi(rd, rs, rt.imm64_);
+    } else {
+      // li handles the relocation.
+      DCHECK(!rs.is(at));
+      li(at, rt);
+      and_(rd, rs, at);
+    }
+  }
+}
+
+
+void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
+  if (rt.is_reg()) {
+    or_(rd, rs, rt.rm());
+  } else {
+    if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
+      ori(rd, rs, rt.imm64_);
+    } else {
+      // li handles the relocation.
+      DCHECK(!rs.is(at));
+      li(at, rt);
+      or_(rd, rs, at);
+    }
+  }
+}
+
+
+void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
+  if (rt.is_reg()) {
+    xor_(rd, rs, rt.rm());
+  } else {
+    if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
+      xori(rd, rs, rt.imm64_);
+    } else {
+      // li handles the relocation.
+      DCHECK(!rs.is(at));
+      li(at, rt);
+      xor_(rd, rs, at);
+    }
+  }
+}
+
+
+void MacroAssembler::Nor(Register rd, Register rs, const Operand& rt) {
+  if (rt.is_reg()) {
+    nor(rd, rs, rt.rm());
+  } else {
+    // li handles the relocation.
+    DCHECK(!rs.is(at));
+    li(at, rt);
+    nor(rd, rs, at);
+  }
+}
+
+
+void MacroAssembler::Neg(Register rs, const Operand& rt) {
+  DCHECK(rt.is_reg());
+  DCHECK(!at.is(rs));
+  DCHECK(!at.is(rt.rm()));
+  li(at, -1);
+  xor_(rs, rt.rm(), at);
+}
+
+
+void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
+  if (rt.is_reg()) {
+    slt(rd, rs, rt.rm());
+  } else {
+    if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
+      slti(rd, rs, rt.imm64_);
+    } else {
+      // li handles the relocation.
+      DCHECK(!rs.is(at));
+      li(at, rt);
+      slt(rd, rs, at);
+    }
+  }
+}
+
+
+void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
+  if (rt.is_reg()) {
+    sltu(rd, rs, rt.rm());
+  } else {
+    if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
+      sltiu(rd, rs, rt.imm64_);
+    } else {
+      // li handles the relocation.
+      DCHECK(!rs.is(at));
+      li(at, rt);
+      sltu(rd, rs, at);
+    }
+  }
+}
+
+
+void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
+  if (kArchVariant == kMips64r2) {
+    if (rt.is_reg()) {
+      rotrv(rd, rs, rt.rm());
+    } else {
+      rotr(rd, rs, rt.imm64_);
+    }
+  } else {
+    if (rt.is_reg()) {
+      subu(at, zero_reg, rt.rm());
+      sllv(at, rs, at);
+      srlv(rd, rs, rt.rm());
+      or_(rd, rd, at);
+    } else {
+      if (rt.imm64_ == 0) {
+        srl(rd, rs, 0);
+      } else {
+        srl(at, rs, rt.imm64_);
+        sll(rd, rs, (0x20 - rt.imm64_) & 0x1f);
+        or_(rd, rd, at);
+      }
+    }
+  }
+}
+
+
+void MacroAssembler::Dror(Register rd, Register rs, const Operand& rt) {
+  if (rt.is_reg()) {
+    drotrv(rd, rs, rt.rm());
+  } else {
+    drotr(rd, rs, rt.imm64_);
+  }
+}
+
+
+void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) {
+    pref(hint, rs);
+}
+
+
+// ------------Pseudo-instructions-------------
+
+void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
+  lwr(rd, rs);
+  lwl(rd, MemOperand(rs.rm(), rs.offset() + 3));
+}
+
+
+void MacroAssembler::Usw(Register rd, const MemOperand& rs) {
+  swr(rd, rs);
+  swl(rd, MemOperand(rs.rm(), rs.offset() + 3));
+}
+
+
+// Do 64-bit load from unaligned address. Note this only handles
+// the specific case of 32-bit aligned, but not 64-bit aligned.
+void MacroAssembler::Uld(Register rd, const MemOperand& rs, Register scratch) {
+  // Assert fail if the offset from start of object IS actually aligned.
+  // ONLY use with known misalignment, since there is performance cost.
+  DCHECK((rs.offset() + kHeapObjectTag) & (kPointerSize - 1));
+  // TODO(plind): endian dependency.
+  lwu(rd, rs);
+  lw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
+  dsll32(scratch, scratch, 0);
+  Daddu(rd, rd, scratch);
+}
+
+
+// Do 64-bit store to unaligned address. Note this only handles
+// the specific case of 32-bit aligned, but not 64-bit aligned.
+void MacroAssembler::Usd(Register rd, const MemOperand& rs, Register scratch) {
+  // Assert fail if the offset from start of object IS actually aligned.
+  // ONLY use with known misalignment, since there is performance cost.
+  DCHECK((rs.offset() + kHeapObjectTag) & (kPointerSize - 1));
+  // TODO(plind): endian dependency.
+  sw(rd, rs);
+  dsrl32(scratch, rd, 0);
+  sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
+}
+
+
+void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
+  AllowDeferredHandleDereference smi_check;
+  if (value->IsSmi()) {
+    li(dst, Operand(value), mode);
+  } else {
+    DCHECK(value->IsHeapObject());
+    if (isolate()->heap()->InNewSpace(*value)) {
+      Handle<Cell> cell = isolate()->factory()->NewCell(value);
+      li(dst, Operand(cell));
+      ld(dst, FieldMemOperand(dst, Cell::kValueOffset));
+    } else {
+      li(dst, Operand(value));
+    }
+  }
+}
+
+
+void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
+  DCHECK(!j.is_reg());
+  BlockTrampolinePoolScope block_trampoline_pool(this);
+  if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) {
+    // Normal load of an immediate value which does not need Relocation Info.
+    if (is_int32(j.imm64_)) {
+      if (is_int16(j.imm64_)) {
+        daddiu(rd, zero_reg, (j.imm64_ & kImm16Mask));
+      } else if (!(j.imm64_ & kHiMask)) {
+        ori(rd, zero_reg, (j.imm64_ & kImm16Mask));
+      } else if (!(j.imm64_ & kImm16Mask)) {
+        lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask);
+      } else {
+        lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask);
+        ori(rd, rd, (j.imm64_ & kImm16Mask));
+      }
+    } else {
+      lui(rd, (j.imm64_ >> 48) & kImm16Mask);
+      ori(rd, rd, (j.imm64_ >> 32) & kImm16Mask);
+      dsll(rd, rd, 16);
+      ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
+      dsll(rd, rd, 16);
+      ori(rd, rd, j.imm64_ & kImm16Mask);
+    }
+  } else if (MustUseReg(j.rmode_)) {
+    RecordRelocInfo(j.rmode_, j.imm64_);
+    lui(rd, (j.imm64_ >> 32) & kImm16Mask);
+    ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
+    dsll(rd, rd, 16);
+    ori(rd, rd, j.imm64_ & kImm16Mask);
+  } else if (mode == ADDRESS_LOAD)  {
+    // We always need the same number of instructions as we may need to patch
+    // this code to load another value which may need all 4 instructions.
+    lui(rd, (j.imm64_ >> 32) & kImm16Mask);
+    ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
+    dsll(rd, rd, 16);
+    ori(rd, rd, j.imm64_ & kImm16Mask);
+  } else {
+    lui(rd, (j.imm64_ >> 48) & kImm16Mask);
+    ori(rd, rd, (j.imm64_ >> 32) & kImm16Mask);
+    dsll(rd, rd, 16);
+    ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
+    dsll(rd, rd, 16);
+    ori(rd, rd, j.imm64_ & kImm16Mask);
+  }
+}
+
+
+void MacroAssembler::MultiPush(RegList regs) {
+  int16_t num_to_push = NumberOfBitsSet(regs);
+  int16_t stack_offset = num_to_push * kPointerSize;
+
+  Dsubu(sp, sp, Operand(stack_offset));
+  for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
+    if ((regs & (1 << i)) != 0) {
+      stack_offset -= kPointerSize;
+      sd(ToRegister(i), MemOperand(sp, stack_offset));
+    }
+  }
+}
+
+
+void MacroAssembler::MultiPushReversed(RegList regs) {
+  int16_t num_to_push = NumberOfBitsSet(regs);
+  int16_t stack_offset = num_to_push * kPointerSize;
+
+  Dsubu(sp, sp, Operand(stack_offset));
+  for (int16_t i = 0; i < kNumRegisters; i++) {
+    if ((regs & (1 << i)) != 0) {
+      stack_offset -= kPointerSize;
+      sd(ToRegister(i), MemOperand(sp, stack_offset));
+    }
+  }
+}
+
+
+void MacroAssembler::MultiPop(RegList regs) {
+  int16_t stack_offset = 0;
+
+  for (int16_t i = 0; i < kNumRegisters; i++) {
+    if ((regs & (1 << i)) != 0) {
+      ld(ToRegister(i), MemOperand(sp, stack_offset));
+      stack_offset += kPointerSize;
+    }
+  }
+  daddiu(sp, sp, stack_offset);
+}
+
+
+void MacroAssembler::MultiPopReversed(RegList regs) {
+  int16_t stack_offset = 0;
+
+  for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
+    if ((regs & (1 << i)) != 0) {
+      ld(ToRegister(i), MemOperand(sp, stack_offset));
+      stack_offset += kPointerSize;
+    }
+  }
+  daddiu(sp, sp, stack_offset);
+}
+
+
+void MacroAssembler::MultiPushFPU(RegList regs) {
+  int16_t num_to_push = NumberOfBitsSet(regs);
+  int16_t stack_offset = num_to_push * kDoubleSize;
+
+  Dsubu(sp, sp, Operand(stack_offset));
+  for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
+    if ((regs & (1 << i)) != 0) {
+      stack_offset -= kDoubleSize;
+      sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
+    }
+  }
+}
+
+
+void MacroAssembler::MultiPushReversedFPU(RegList regs) {
+  int16_t num_to_push = NumberOfBitsSet(regs);
+  int16_t stack_offset = num_to_push * kDoubleSize;
+
+  Dsubu(sp, sp, Operand(stack_offset));
+  for (int16_t i = 0; i < kNumRegisters; i++) {
+    if ((regs & (1 << i)) != 0) {
+      stack_offset -= kDoubleSize;
+      sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
+    }
+  }
+}
+
+
+void MacroAssembler::MultiPopFPU(RegList regs) {
+  int16_t stack_offset = 0;
+
+  for (int16_t i = 0; i < kNumRegisters; i++) {
+    if ((regs & (1 << i)) != 0) {
+      ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
+      stack_offset += kDoubleSize;
+    }
+  }
+  daddiu(sp, sp, stack_offset);
+}
+
+
+void MacroAssembler::MultiPopReversedFPU(RegList regs) {
+  int16_t stack_offset = 0;
+
+  for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
+    if ((regs & (1 << i)) != 0) {
+      ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
+      stack_offset += kDoubleSize;
+    }
+  }
+  daddiu(sp, sp, stack_offset);
+}
+
+
+void MacroAssembler::FlushICache(Register address, unsigned instructions) {
+  RegList saved_regs = kJSCallerSaved | ra.bit();
+  MultiPush(saved_regs);
+  AllowExternalCallThatCantCauseGC scope(this);
+
+  // Save to a0 in case address == a4.
+  Move(a0, address);
+  PrepareCallCFunction(2, a4);
+
+  li(a1, instructions * kInstrSize);
+  CallCFunction(ExternalReference::flush_icache_function(isolate()), 2);
+  MultiPop(saved_regs);
+}
+
+
+void MacroAssembler::Ext(Register rt,
+                         Register rs,
+                         uint16_t pos,
+                         uint16_t size) {
+  DCHECK(pos < 32);
+  DCHECK(pos + size < 33);
+  ext_(rt, rs, pos, size);
+}
+
+
+void MacroAssembler::Ins(Register rt,
+                         Register rs,
+                         uint16_t pos,
+                         uint16_t size) {
+  DCHECK(pos < 32);
+  DCHECK(pos + size <= 32);
+  DCHECK(size != 0);
+  ins_(rt, rs, pos, size);
+}
+
+
+void MacroAssembler::Cvt_d_uw(FPURegister fd,
+                              FPURegister fs,
+                              FPURegister scratch) {
+  // Move the data from fs to t8.
+  mfc1(t8, fs);
+  Cvt_d_uw(fd, t8, scratch);
+}
+
+
+void MacroAssembler::Cvt_d_uw(FPURegister fd,
+                              Register rs,
+                              FPURegister scratch) {
+  // Convert rs to a FP value in fd (and fd + 1).
+  // We do this by converting rs minus the MSB to avoid sign conversion,
+  // then adding 2^31 to the result (if needed).
+
+  DCHECK(!fd.is(scratch));
+  DCHECK(!rs.is(t9));
+  DCHECK(!rs.is(at));
+
+  // Save rs's MSB to t9.
+  Ext(t9, rs, 31, 1);
+  // Remove rs's MSB.
+  Ext(at, rs, 0, 31);
+  // Move the result to fd.
+  mtc1(at, fd);
+  mthc1(zero_reg, fd);
+
+  // Convert fd to a real FP value.
+  cvt_d_w(fd, fd);
+
+  Label conversion_done;
+
+  // If rs's MSB was 0, it's done.
+  // Otherwise we need to add that to the FP register.
+  Branch(&conversion_done, eq, t9, Operand(zero_reg));
+
+  // Load 2^31 into f20 as its float representation.
+  li(at, 0x41E00000);
+  mtc1(zero_reg, scratch);
+  mthc1(at, scratch);
+  // Add it to fd.
+  add_d(fd, fd, scratch);
+
+  bind(&conversion_done);
+}
+
+
+void MacroAssembler::Round_l_d(FPURegister fd, FPURegister fs) {
+  round_l_d(fd, fs);
+}
+
+
+void MacroAssembler::Floor_l_d(FPURegister fd, FPURegister fs) {
+  floor_l_d(fd, fs);
+}
+
+
+void MacroAssembler::Ceil_l_d(FPURegister fd, FPURegister fs) {
+  ceil_l_d(fd, fs);
+}
+
+
+void MacroAssembler::Trunc_l_d(FPURegister fd, FPURegister fs) {
+  trunc_l_d(fd, fs);
+}
+
+
+void MacroAssembler::Trunc_l_ud(FPURegister fd,
+                                FPURegister fs,
+                                FPURegister scratch) {
+  // Load to GPR.
+  dmfc1(t8, fs);
+  // Reset sign bit.
+  li(at, 0x7fffffffffffffff);
+  and_(t8, t8, at);
+  dmtc1(t8, fs);
+  trunc_l_d(fd, fs);
+}
+
+
+void MacroAssembler::Trunc_uw_d(FPURegister fd,
+                                FPURegister fs,
+                                FPURegister scratch) {
+  Trunc_uw_d(fs, t8, scratch);
+  mtc1(t8, fd);
+}
+
+
+void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
+  trunc_w_d(fd, fs);
+}
+
+
+void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
+  round_w_d(fd, fs);
+}
+
+
+void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
+  floor_w_d(fd, fs);
+}
+
+
+void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
+  ceil_w_d(fd, fs);
+}
+
+
+void MacroAssembler::Trunc_uw_d(FPURegister fd,
+                                Register rs,
+                                FPURegister scratch) {
+  DCHECK(!fd.is(scratch));
+  DCHECK(!rs.is(at));
+
+  // Load 2^31 into scratch as its float representation.
+  li(at, 0x41E00000);
+  mtc1(zero_reg, scratch);
+  mthc1(at, scratch);
+  // Test if scratch > fd.
+  // If fd < 2^31 we can convert it normally.
+  Label simple_convert;
+  BranchF(&simple_convert, NULL, lt, fd, scratch);
+
+  // First we subtract 2^31 from fd, then trunc it to rs
+  // and add 2^31 to rs.
+  sub_d(scratch, fd, scratch);
+  trunc_w_d(scratch, scratch);
+  mfc1(rs, scratch);
+  Or(rs, rs, 1 << 31);
+
+  Label done;
+  Branch(&done);
+  // Simple conversion.
+  bind(&simple_convert);
+  trunc_w_d(scratch, fd);
+  mfc1(rs, scratch);
+
+  bind(&done);
+}
+
+
+void MacroAssembler::Madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
+    FPURegister ft, FPURegister scratch) {
+  if (0) {  // TODO(plind): find reasonable arch-variant symbol names.
+    madd_d(fd, fr, fs, ft);
+  } else {
+    // Can not change source regs's value.
+    DCHECK(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
+    mul_d(scratch, fs, ft);
+    add_d(fd, fr, scratch);
+  }
+}
+
+
+void MacroAssembler::BranchF(Label* target,
+                             Label* nan,
+                             Condition cc,
+                             FPURegister cmp1,
+                             FPURegister cmp2,
+                             BranchDelaySlot bd) {
+  BlockTrampolinePoolScope block_trampoline_pool(this);
+  if (cc == al) {
+    Branch(bd, target);
+    return;
+  }
+
+  DCHECK(nan || target);
+  // Check for unordered (NaN) cases.
+  if (nan) {
+    if (kArchVariant != kMips64r6) {
+      c(UN, D, cmp1, cmp2);
+      bc1t(nan);
+    } else {
+      // Use f31 for comparison result. It has to be unavailable to lithium
+      // register allocator.
+      DCHECK(!cmp1.is(f31) && !cmp2.is(f31));
+      cmp(UN, L, f31, cmp1, cmp2);
+      bc1nez(nan, f31);
+    }
+  }
+
+  if (kArchVariant != kMips64r6) {
+    if (target) {
+      // Here NaN cases were either handled by this function or are assumed to
+      // have been handled by the caller.
+      switch (cc) {
+        case lt:
+          c(OLT, D, cmp1, cmp2);
+          bc1t(target);
+          break;
+        case gt:
+          c(ULE, D, cmp1, cmp2);
+          bc1f(target);
+          break;
+        case ge:
+          c(ULT, D, cmp1, cmp2);
+          bc1f(target);
+          break;
+        case le:
+          c(OLE, D, cmp1, cmp2);
+          bc1t(target);
+          break;
+        case eq:
+          c(EQ, D, cmp1, cmp2);
+          bc1t(target);
+          break;
+        case ueq:
+          c(UEQ, D, cmp1, cmp2);
+          bc1t(target);
+          break;
+        case ne:
+          c(EQ, D, cmp1, cmp2);
+          bc1f(target);
+          break;
+        case nue:
+          c(UEQ, D, cmp1, cmp2);
+          bc1f(target);
+          break;
+        default:
+          CHECK(0);
+      }
+    }
+  } else {
+    if (target) {
+      // Here NaN cases were either handled by this function or are assumed to
+      // have been handled by the caller.
+      // Unsigned conditions are treated as their signed counterpart.
+      // Use f31 for comparison result, it is valid in fp64 (FR = 1) mode.
+      DCHECK(!cmp1.is(f31) && !cmp2.is(f31));
+      switch (cc) {
+        case lt:
+          cmp(OLT, L, f31, cmp1, cmp2);
+          bc1nez(target, f31);
+          break;
+        case gt:
+          cmp(ULE, L, f31, cmp1, cmp2);
+          bc1eqz(target, f31);
+          break;
+        case ge:
+          cmp(ULT, L, f31, cmp1, cmp2);
+          bc1eqz(target, f31);
+          break;
+        case le:
+          cmp(OLE, L, f31, cmp1, cmp2);
+          bc1nez(target, f31);
+          break;
+        case eq:
+          cmp(EQ, L, f31, cmp1, cmp2);
+          bc1nez(target, f31);
+          break;
+        case ueq:
+          cmp(UEQ, L, f31, cmp1, cmp2);
+          bc1nez(target, f31);
+          break;
+        case ne:
+          cmp(EQ, L, f31, cmp1, cmp2);
+          bc1eqz(target, f31);
+          break;
+        case nue:
+          cmp(UEQ, L, f31, cmp1, cmp2);
+          bc1eqz(target, f31);
+          break;
+        default:
+          CHECK(0);
+      }
+    }
+  }
+
+  if (bd == PROTECT) {
+    nop();
+  }
+}
+
+
+void MacroAssembler::Move(FPURegister dst, double imm) {
+  static const DoubleRepresentation minus_zero(-0.0);
+  static const DoubleRepresentation zero(0.0);
+  DoubleRepresentation value_rep(imm);
+  // Handle special values first.
+  bool force_load = dst.is(kDoubleRegZero);
+  if (value_rep == zero && !force_load) {
+    mov_d(dst, kDoubleRegZero);
+  } else if (value_rep == minus_zero && !force_load) {
+    neg_d(dst, kDoubleRegZero);
+  } else {
+    uint32_t lo, hi;
+    DoubleAsTwoUInt32(imm, &lo, &hi);
+    // Move the low part of the double into the lower bits of the corresponding
+    // FPU register.
+    if (lo != 0) {
+      li(at, Operand(lo));
+      mtc1(at, dst);
+    } else {
+      mtc1(zero_reg, dst);
+    }
+    // Move the high part of the double into the high bits of the corresponding
+    // FPU register.
+    if (hi != 0) {
+      li(at, Operand(hi));
+      mthc1(at, dst);
+    } else {
+      mthc1(zero_reg, dst);
+    }
+  }
+}
+
+
+void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
+  if (kArchVariant == kMips64r6) {
+    Label done;
+    Branch(&done, ne, rt, Operand(zero_reg));
+    mov(rd, rs);
+    bind(&done);
+  } else {
+    movz(rd, rs, rt);
+  }
+}
+
+
+void MacroAssembler::Movn(Register rd, Register rs, Register rt) {
+  if (kArchVariant == kMips64r6) {
+    Label done;
+    Branch(&done, eq, rt, Operand(zero_reg));
+    mov(rd, rs);
+    bind(&done);
+  } else {
+    movn(rd, rs, rt);
+  }
+}
+
+
+void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) {
+  movt(rd, rs, cc);
+}
+
+
+void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) {
+  movf(rd, rs, cc);
+}
+
+
+void MacroAssembler::Clz(Register rd, Register rs) {
+  clz(rd, rs);
+}
+
+
+void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
+                                     Register result,
+                                     DoubleRegister double_input,
+                                     Register scratch,
+                                     DoubleRegister double_scratch,
+                                     Register except_flag,
+                                     CheckForInexactConversion check_inexact) {
+  DCHECK(!result.is(scratch));
+  DCHECK(!double_input.is(double_scratch));
+  DCHECK(!except_flag.is(scratch));
+
+  Label done;
+
+  // Clear the except flag (0 = no exception)
+  mov(except_flag, zero_reg);
+
+  // Test for values that can be exactly represented as a signed 32-bit integer.
+  cvt_w_d(double_scratch, double_input);
+  mfc1(result, double_scratch);
+  cvt_d_w(double_scratch, double_scratch);
+  BranchF(&done, NULL, eq, double_input, double_scratch);
+
+  int32_t except_mask = kFCSRFlagMask;  // Assume interested in all exceptions.
+
+  if (check_inexact == kDontCheckForInexactConversion) {
+    // Ignore inexact exceptions.
+    except_mask &= ~kFCSRInexactFlagMask;
+  }
+
+  // Save FCSR.
+  cfc1(scratch, FCSR);
+  // Disable FPU exceptions.
+  ctc1(zero_reg, FCSR);
+
+  // Do operation based on rounding mode.
+  switch (rounding_mode) {
+    case kRoundToNearest:
+      Round_w_d(double_scratch, double_input);
+      break;
+    case kRoundToZero:
+      Trunc_w_d(double_scratch, double_input);
+      break;
+    case kRoundToPlusInf:
+      Ceil_w_d(double_scratch, double_input);
+      break;
+    case kRoundToMinusInf:
+      Floor_w_d(double_scratch, double_input);
+      break;
+  }  // End of switch-statement.
+
+  // Retrieve FCSR.
+  cfc1(except_flag, FCSR);
+  // Restore FCSR.
+  ctc1(scratch, FCSR);
+  // Move the converted value into the result register.
+  mfc1(result, double_scratch);
+
+  // Check for fpu exceptions.
+  And(except_flag, except_flag, Operand(except_mask));
+
+  bind(&done);
+}
+
+
+void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
+                                                DoubleRegister double_input,
+                                                Label* done) {
+  DoubleRegister single_scratch = kLithiumScratchDouble.low();
+  Register scratch = at;
+  Register scratch2 = t9;
+
+  // Clear cumulative exception flags and save the FCSR.
+  cfc1(scratch2, FCSR);
+  ctc1(zero_reg, FCSR);
+  // Try a conversion to a signed integer.
+  trunc_w_d(single_scratch, double_input);
+  mfc1(result, single_scratch);
+  // Retrieve and restore the FCSR.
+  cfc1(scratch, FCSR);
+  ctc1(scratch2, FCSR);
+  // Check for overflow and NaNs.
+  And(scratch,
+      scratch,
+      kFCSROverflowFlagMask | kFCSRUnderflowFlagMask | kFCSRInvalidOpFlagMask);
+  // If we had no exceptions we are done.
+  Branch(done, eq, scratch, Operand(zero_reg));
+}
+
+
+void MacroAssembler::TruncateDoubleToI(Register result,
+                                       DoubleRegister double_input) {
+  Label done;
+
+  TryInlineTruncateDoubleToI(result, double_input, &done);
+
+  // If we fell through then inline version didn't succeed - call stub instead.
+  push(ra);
+  Dsubu(sp, sp, Operand(kDoubleSize));  // Put input on stack.
+  sdc1(double_input, MemOperand(sp, 0));
+
+  DoubleToIStub stub(isolate(), sp, result, 0, true, true);
+  CallStub(&stub);
+
+  Daddu(sp, sp, Operand(kDoubleSize));
+  pop(ra);
+
+  bind(&done);
+}
+
+
+void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
+  Label done;
+  DoubleRegister double_scratch = f12;
+  DCHECK(!result.is(object));
+
+  ldc1(double_scratch,
+       MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag));
+  TryInlineTruncateDoubleToI(result, double_scratch, &done);
+
+  // If we fell through then inline version didn't succeed - call stub instead.
+  push(ra);
+  DoubleToIStub stub(isolate(),
+                     object,
+                     result,
+                     HeapNumber::kValueOffset - kHeapObjectTag,
+                     true,
+                     true);
+  CallStub(&stub);
+  pop(ra);
+
+  bind(&done);
+}
+
+
+void MacroAssembler::TruncateNumberToI(Register object,
+                                       Register result,
+                                       Register heap_number_map,
+                                       Register scratch,
+                                       Label* not_number) {
+  Label done;
+  DCHECK(!result.is(object));
+
+  UntagAndJumpIfSmi(result, object, &done);
+  JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number);
+  TruncateHeapNumberToI(result, object);
+
+  bind(&done);
+}
+
+
+void MacroAssembler::GetLeastBitsFromSmi(Register dst,
+                                         Register src,
+                                         int num_least_bits) {
+  // Ext(dst, src, kSmiTagSize, num_least_bits);
+  SmiUntag(dst, src);
+  And(dst, dst, Operand((1 << num_least_bits) - 1));
+}
+
+
+void MacroAssembler::GetLeastBitsFromInt32(Register dst,
+                                           Register src,
+                                           int num_least_bits) {
+  DCHECK(!src.is(dst));
+  And(dst, src, Operand((1 << num_least_bits) - 1));
+}
+
+
+// Emulated condtional branches do not emit a nop in the branch delay slot.
+//
+// BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
+#define BRANCH_ARGS_CHECK(cond, rs, rt) DCHECK(                                \
+    (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) ||          \
+    (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
+
+
+void MacroAssembler::Branch(int16_t offset, BranchDelaySlot bdslot) {
+  BranchShort(offset, bdslot);
+}
+
+
+void MacroAssembler::Branch(int16_t offset, Condition cond, Register rs,
+                            const Operand& rt,
+                            BranchDelaySlot bdslot) {
+  BranchShort(offset, cond, rs, rt, bdslot);
+}
+
+
+void MacroAssembler::Branch(Label* L, BranchDelaySlot bdslot) {
+  if (L->is_bound()) {
+    if (is_near(L)) {
+      BranchShort(L, bdslot);
+    } else {
+      Jr(L, bdslot);
+    }
+  } else {
+    if (is_trampoline_emitted()) {
+      Jr(L, bdslot);
+    } else {
+      BranchShort(L, bdslot);
+    }
+  }
+}
+
+
+void MacroAssembler::Branch(Label* L, Condition cond, Register rs,
+                            const Operand& rt,
+                            BranchDelaySlot bdslot) {
+  if (L->is_bound()) {
+    if (is_near(L)) {
+      BranchShort(L, cond, rs, rt, bdslot);
+    } else {
+      if (cond != cc_always) {
+        Label skip;
+        Condition neg_cond = NegateCondition(cond);
+        BranchShort(&skip, neg_cond, rs, rt);
+        Jr(L, bdslot);
+        bind(&skip);
+      } else {
+        Jr(L, bdslot);
+      }
+    }
+  } else {
+    if (is_trampoline_emitted()) {
+      if (cond != cc_always) {
+        Label skip;
+        Condition neg_cond = NegateCondition(cond);
+        BranchShort(&skip, neg_cond, rs, rt);
+        Jr(L, bdslot);
+        bind(&skip);
+      } else {
+        Jr(L, bdslot);
+      }
+    } else {
+      BranchShort(L, cond, rs, rt, bdslot);
+    }
+  }
+}
+
+
+void MacroAssembler::Branch(Label* L,
+                            Condition cond,
+                            Register rs,
+                            Heap::RootListIndex index,
+                            BranchDelaySlot bdslot) {
+  LoadRoot(at, index);
+  Branch(L, cond, rs, Operand(at), bdslot);
+}
+
+
+void MacroAssembler::BranchShort(int16_t offset, BranchDelaySlot bdslot) {
+  b(offset);
+
+  // Emit a nop in the branch delay slot if required.
+  if (bdslot == PROTECT)
+    nop();
+}
+
+
+void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
+                                 const Operand& rt,
+                                 BranchDelaySlot bdslot) {
+  BRANCH_ARGS_CHECK(cond, rs, rt);
+  DCHECK(!rs.is(zero_reg));
+  Register r2 = no_reg;
+  Register scratch = at;
+
+  if (rt.is_reg()) {
+    // NOTE: 'at' can be clobbered by Branch but it is legal to use it as rs or
+    // rt.
+    BlockTrampolinePoolScope block_trampoline_pool(this);
+    r2 = rt.rm_;
+    switch (cond) {
+      case cc_always:
+        b(offset);
+        break;
+      case eq:
+        beq(rs, r2, offset);
+        break;
+      case ne:
+        bne(rs, r2, offset);
+        break;
+      // Signed comparison.
+      case greater:
+        if (r2.is(zero_reg)) {
+          bgtz(rs, offset);
+        } else {
+          slt(scratch, r2, rs);
+          bne(scratch, zero_reg, offset);
+        }
+        break;
+      case greater_equal:
+        if (r2.is(zero_reg)) {
+          bgez(rs, offset);
+        } else {
+          slt(scratch, rs, r2);
+          beq(scratch, zero_reg, offset);
+        }
+        break;
+      case less:
+        if (r2.is(zero_reg)) {
+          bltz(rs, offset);
+        } else {
+          slt(scratch, rs, r2);
+          bne(scratch, zero_reg, offset);
+        }
+        break;
+      case less_equal:
+        if (r2.is(zero_reg)) {
+          blez(rs, offset);
+        } else {
+          slt(scratch, r2, rs);
+          beq(scratch, zero_reg, offset);
+        }
+        break;
+      // Unsigned comparison.
+      case Ugreater:
+        if (r2.is(zero_reg)) {
+          bgtz(rs, offset);
+        } else {
+          sltu(scratch, r2, rs);
+          bne(scratch, zero_reg, offset);
+        }
+        break;
+      case Ugreater_equal:
+        if (r2.is(zero_reg)) {
+          bgez(rs, offset);
+        } else {
+          sltu(scratch, rs, r2);
+          beq(scratch, zero_reg, offset);
+        }
+        break;
+      case Uless:
+        if (r2.is(zero_reg)) {
+          // No code needs to be emitted.
+          return;
+        } else {
+          sltu(scratch, rs, r2);
+          bne(scratch, zero_reg, offset);
+        }
+        break;
+      case Uless_equal:
+        if (r2.is(zero_reg)) {
+          b(offset);
+        } else {
+          sltu(scratch, r2, rs);
+          beq(scratch, zero_reg, offset);
+        }
+        break;
+      default:
+        UNREACHABLE();
+    }
+  } else {
+    // Be careful to always use shifted_branch_offset only just before the
+    // branch instruction, as the location will be remember for patching the
+    // target.
+    BlockTrampolinePoolScope block_trampoline_pool(this);
+    switch (cond) {
+      case cc_always:
+        b(offset);
+        break;
+      case eq:
+        // We don't want any other register but scratch clobbered.
+        DCHECK(!scratch.is(rs));
+        r2 = scratch;
+        li(r2, rt);
+        beq(rs, r2, offset);
+        break;
+      case ne:
+        // We don't want any other register but scratch clobbered.
+        DCHECK(!scratch.is(rs));
+        r2 = scratch;
+        li(r2, rt);
+        bne(rs, r2, offset);
+        break;
+      // Signed comparison.
+      case greater:
+        if (rt.imm64_ == 0) {
+          bgtz(rs, offset);
+        } else {
+          r2 = scratch;
+          li(r2, rt);
+          slt(scratch, r2, rs);
+          bne(scratch, zero_reg, offset);
+        }
+        break;
+      case greater_equal:
+        if (rt.imm64_ == 0) {
+          bgez(rs, offset);
+        } else if (is_int16(rt.imm64_)) {
+          slti(scratch, rs, rt.imm64_);
+          beq(scratch, zero_reg, offset);
+        } else {
+          r2 = scratch;
+          li(r2, rt);
+          slt(scratch, rs, r2);
+          beq(scratch, zero_reg, offset);
+        }
+        break;
+      case less:
+        if (rt.imm64_ == 0) {
+          bltz(rs, offset);
+        } else if (is_int16(rt.imm64_)) {
+          slti(scratch, rs, rt.imm64_);
+          bne(scratch, zero_reg, offset);
+        } else {
+          r2 = scratch;
+          li(r2, rt);
+          slt(scratch, rs, r2);
+          bne(scratch, zero_reg, offset);
+        }
+        break;
+      case less_equal:
+        if (rt.imm64_ == 0) {
+          blez(rs, offset);
+        } else {
+          r2 = scratch;
+          li(r2, rt);
+          slt(scratch, r2, rs);
+          beq(scratch, zero_reg, offset);
+       }
+       break;
+      // Unsigned comparison.
+      case Ugreater:
+        if (rt.imm64_ == 0) {
+          bgtz(rs, offset);
+        } else {
+          r2 = scratch;
+          li(r2, rt);
+          sltu(scratch, r2, rs);
+          bne(scratch, zero_reg, offset);
+        }
+        break;
+      case Ugreater_equal:
+        if (rt.imm64_ == 0) {
+          bgez(rs, offset);
+        } else if (is_int16(rt.imm64_)) {
+          sltiu(scratch, rs, rt.imm64_);
+          beq(scratch, zero_reg, offset);
+        } else {
+          r2 = scratch;
+          li(r2, rt);
+          sltu(scratch, rs, r2);
+          beq(scratch, zero_reg, offset);
+        }
+        break;
+      case Uless:
+        if (rt.imm64_ == 0) {
+          // No code needs to be emitted.
+          return;
+        } else if (is_int16(rt.imm64_)) {
+          sltiu(scratch, rs, rt.imm64_);
+          bne(scratch, zero_reg, offset);
+        } else {
+          r2 = scratch;
+          li(r2, rt);
+          sltu(scratch, rs, r2);
+          bne(scratch, zero_reg, offset);
+        }
+        break;
+      case Uless_equal:
+        if (rt.imm64_ == 0) {
+          b(offset);
+        } else {
+          r2 = scratch;
+          li(r2, rt);
+          sltu(scratch, r2, rs);
+          beq(scratch, zero_reg, offset);
+        }
+        break;
+      default:
+        UNREACHABLE();
+    }
+  }
+  // Emit a nop in the branch delay slot if required.
+  if (bdslot == PROTECT)
+    nop();
+}
+
+
+void MacroAssembler::BranchShort(Label* L, BranchDelaySlot bdslot) {
+  // We use branch_offset as an argument for the branch instructions to be sure
+  // it is called just before generating the branch instruction, as needed.
+
+  b(shifted_branch_offset(L, false));
+
+  // Emit a nop in the branch delay slot if required.
+  if (bdslot == PROTECT)
+    nop();
+}
+
+
+void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
+                                 const Operand& rt,
+                                 BranchDelaySlot bdslot) {
+  BRANCH_ARGS_CHECK(cond, rs, rt);
+
+  int32_t offset = 0;
+  Register r2 = no_reg;
+  Register scratch = at;
+  if (rt.is_reg()) {
+    BlockTrampolinePoolScope block_trampoline_pool(this);
+    r2 = rt.rm_;
+    // Be careful to always use shifted_branch_offset only just before the
+    // branch instruction, as the location will be remember for patching the
+    // target.
+    switch (cond) {
+      case cc_always:
+        offset = shifted_branch_offset(L, false);
+        b(offset);
+        break;
+      case eq:
+        offset = shifted_branch_offset(L, false);
+        beq(rs, r2, offset);
+        break;
+      case ne:
+        offset = shifted_branch_offset(L, false);
+        bne(rs, r2, offset);
+        break;
+      // Signed comparison.
+      case greater:
+        if (r2.is(zero_reg)) {
+          offset = shifted_branch_offset(L, false);
+          bgtz(rs, offset);
+        } else {
+          slt(scratch, r2, rs);
+          offset = shifted_branch_offset(L, false);
+          bne(scratch, zero_reg, offset);
+        }
+        break;
+      case greater_equal:
+        if (r2.is(zero_reg)) {
+          offset = shifted_branch_offset(L, false);
+          bgez(rs, offset);
+        } else {
+          slt(scratch, rs, r2);
+          offset = shifted_branch_offset(L, false);
+          beq(scratch, zero_reg, offset);
+        }
+        break;
+      case less:
+        if (r2.is(zero_reg)) {
+          offset = shifted_branch_offset(L, false);
+          bltz(rs, offset);
+        } else {
+          slt(scratch, rs, r2);
+          offset = shifted_branch_offset(L, false);
+          bne(scratch, zero_reg, offset);
+        }
+        break;
+      case less_equal:
+        if (r2.is(zero_reg)) {
+          offset = shifted_branch_offset(L, false);
+          blez(rs, offset);
+        } else {
+          slt(scratch, r2, rs);
+          offset = shifted_branch_offset(L, false);
+          beq(scratch, zero_reg, offset);
+        }
+        break;
+      // Unsigned comparison.
+      case Ugreater:
+        if (r2.is(zero_reg)) {
+          offset = shifted_branch_offset(L, false);
+           bgtz(rs, offset);
+        } else {
+          sltu(scratch, r2, rs);
+          offset = shifted_branch_offset(L, false);
+          bne(scratch, zero_reg, offset);
+        }
+        break;
+      case Ugreater_equal:
+        if (r2.is(zero_reg)) {
+          offset = shifted_branch_offset(L, false);
+          bgez(rs, offset);
+        } else {
+          sltu(scratch, rs, r2);
+          offset = shifted_branch_offset(L, false);
+          beq(scratch, zero_reg, offset);
+        }
+        break;
+      case Uless:
+        if (r2.is(zero_reg)) {
+          // No code needs to be emitted.
+          return;
+        } else {
+          sltu(scratch, rs, r2);
+          offset = shifted_branch_offset(L, false);
+          bne(scratch, zero_reg, offset);
+        }
+        break;
+      case Uless_equal:
+        if (r2.is(zero_reg)) {
+          offset = shifted_branch_offset(L, false);
+          b(offset);
+        } else {
+          sltu(scratch, r2, rs);
+          offset = shifted_branch_offset(L, false);
+          beq(scratch, zero_reg, offset);
+        }
+        break;
+      default:
+        UNREACHABLE();
+    }
+  } else {
+    // Be careful to always use shifted_branch_offset only just before the
+    // branch instruction, as the location will be remember for patching the
+    // target.
+    BlockTrampolinePoolScope block_trampoline_pool(this);
+    switch (cond) {
+      case cc_always:
+        offset = shifted_branch_offset(L, false);
+        b(offset);
+        break;
+      case eq:
+        DCHECK(!scratch.is(rs));
+        r2 = scratch;
+        li(r2, rt);
+        offset = shifted_branch_offset(L, false);
+        beq(rs, r2, offset);
+        break;
+      case ne:
+        DCHECK(!scratch.is(rs));
+        r2 = scratch;
+        li(r2, rt);
+        offset = shifted_branch_offset(L, false);
+        bne(rs, r2, offset);
+        break;
+      // Signed comparison.
+      case greater:
+        if (rt.imm64_ == 0) {
+          offset = shifted_branch_offset(L, false);
+          bgtz(rs, offset);
+        } else {
+          DCHECK(!scratch.is(rs));
+          r2 = scratch;
+          li(r2, rt);
+          slt(scratch, r2, rs);
+          offset = shifted_branch_offset(L, false);
+          bne(scratch, zero_reg, offset);
+        }
+        break;
+      case greater_equal:
+        if (rt.imm64_ == 0) {
+          offset = shifted_branch_offset(L, false);
+          bgez(rs, offset);
+        } else if (is_int16(rt.imm64_)) {
+          slti(scratch, rs, rt.imm64_);
+          offset = shifted_branch_offset(L, false);
+          beq(scratch, zero_reg, offset);
+        } else {
+          DCHECK(!scratch.is(rs));
+          r2 = scratch;
+          li(r2, rt);
+          slt(scratch, rs, r2);
+          offset = shifted_branch_offset(L, false);
+          beq(scratch, zero_reg, offset);
+        }
+        break;
+      case less:
+        if (rt.imm64_ == 0) {
+          offset = shifted_branch_offset(L, false);
+          bltz(rs, offset);
+        } else if (is_int16(rt.imm64_)) {
+          slti(scratch, rs, rt.imm64_);
+          offset = shifted_branch_offset(L, false);
+          bne(scratch, zero_reg, offset);
+        } else {
+          DCHECK(!scratch.is(rs));
+          r2 = scratch;
+          li(r2, rt);
+          slt(scratch, rs, r2);
+          offset = shifted_branch_offset(L, false);
+          bne(scratch, zero_reg, offset);
+        }
+        break;
+      case less_equal:
+        if (rt.imm64_ == 0) {
+          offset = shifted_branch_offset(L, false);
+          blez(rs, offset);
+        } else {
+          DCHECK(!scratch.is(rs));
+          r2 = scratch;
+          li(r2, rt);
+          slt(scratch, r2, rs);
+          offset = shifted_branch_offset(L, false);
+          beq(scratch, zero_reg, offset);
+        }
+        break;
+      // Unsigned comparison.
+      case Ugreater:
+        if (rt.imm64_ == 0) {
+          offset = shifted_branch_offset(L, false);
+          bne(rs, zero_reg, offset);
+        } else {
+          DCHECK(!scratch.is(rs));
+          r2 = scratch;
+          li(r2, rt);
+          sltu(scratch, r2, rs);
+          offset = shifted_branch_offset(L, false);
+          bne(scratch, zero_reg, offset);
+        }
+        break;
+      case Ugreater_equal:
+        if (rt.imm64_ == 0) {
+          offset = shifted_branch_offset(L, false);
+          bgez(rs, offset);
+        } else if (is_int16(rt.imm64_)) {
+          sltiu(scratch, rs, rt.imm64_);
+          offset = shifted_branch_offset(L, false);
+          beq(scratch, zero_reg, offset);
+        } else {
+          DCHECK(!scratch.is(rs));
+          r2 = scratch;
+          li(r2, rt);
+          sltu(scratch, rs, r2);
+          offset = shifted_branch_offset(L, false);
+          beq(scratch, zero_reg, offset);
+        }
+        break;
+     case Uless:
+        if (rt.imm64_ == 0) {
+          // No code needs to be emitted.
+          return;
+        } else if (is_int16(rt.imm64_)) {
+          sltiu(scratch, rs, rt.imm64_);
+          offset = shifted_branch_offset(L, false);
+          bne(scratch, zero_reg, offset);
+        } else {
+          DCHECK(!scratch.is(rs));
+          r2 = scratch;
+          li(r2, rt);
+          sltu(scratch, rs, r2);
+          offset = shifted_branch_offset(L, false);
+          bne(scratch, zero_reg, offset);
+        }
+        break;
+      case Uless_equal:
+        if (rt.imm64_ == 0) {
+          offset = shifted_branch_offset(L, false);
+          beq(rs, zero_reg, offset);
+        } else {
+          DCHECK(!scratch.is(rs));
+          r2 = scratch;
+          li(r2, rt);
+          sltu(scratch, r2, rs);
+          offset = shifted_branch_offset(L, false);
+          beq(scratch, zero_reg, offset);
+        }
+        break;
+      default:
+        UNREACHABLE();
+    }
+  }
+  // Check that offset could actually hold on an int16_t.
+  DCHECK(is_int16(offset));
+  // Emit a nop in the branch delay slot if required.
+  if (bdslot == PROTECT)
+    nop();
+}
+
+
+void MacroAssembler::BranchAndLink(int16_t offset, BranchDelaySlot bdslot) {
+  BranchAndLinkShort(offset, bdslot);
+}
+
+
+void MacroAssembler::BranchAndLink(int16_t offset, Condition cond, Register rs,
+                                   const Operand& rt,
+                                   BranchDelaySlot bdslot) {
+  BranchAndLinkShort(offset, cond, rs, rt, bdslot);
+}
+
+
+void MacroAssembler::BranchAndLink(Label* L, BranchDelaySlot bdslot) {
+  if (L->is_bound()) {
+    if (is_near(L)) {
+      BranchAndLinkShort(L, bdslot);
+    } else {
+      Jalr(L, bdslot);
+    }
+  } else {
+    if (is_trampoline_emitted()) {
+      Jalr(L, bdslot);
+    } else {
+      BranchAndLinkShort(L, bdslot);
+    }
+  }
+}
+
+
+void MacroAssembler::BranchAndLink(Label* L, Condition cond, Register rs,
+                                   const Operand& rt,
+                                   BranchDelaySlot bdslot) {
+  if (L->is_bound()) {
+    if (is_near(L)) {
+      BranchAndLinkShort(L, cond, rs, rt, bdslot);
+    } else {
+      Label skip;
+      Condition neg_cond = NegateCondition(cond);
+      BranchShort(&skip, neg_cond, rs, rt);
+      Jalr(L, bdslot);
+      bind(&skip);
+    }
+  } else {
+    if (is_trampoline_emitted()) {
+      Label skip;
+      Condition neg_cond = NegateCondition(cond);
+      BranchShort(&skip, neg_cond, rs, rt);
+      Jalr(L, bdslot);
+      bind(&skip);
+    } else {
+      BranchAndLinkShort(L, cond, rs, rt, bdslot);
+    }
+  }
+}
+
+
+// We need to use a bgezal or bltzal, but they can't be used directly with the
+// slt instructions. We could use sub or add instead but we would miss overflow
+// cases, so we keep slt and add an intermediate third instruction.
+void MacroAssembler::BranchAndLinkShort(int16_t offset,
+                                        BranchDelaySlot bdslot) {
+  bal(offset);
+
+  // Emit a nop in the branch delay slot if required.
+  if (bdslot == PROTECT)
+    nop();
+}
+
+
+void MacroAssembler::BranchAndLinkShort(int16_t offset, Condition cond,
+                                        Register rs, const Operand& rt,
+                                        BranchDelaySlot bdslot) {
+  BRANCH_ARGS_CHECK(cond, rs, rt);
+  Register r2 = no_reg;
+  Register scratch = at;
+
+  if (rt.is_reg()) {
+    r2 = rt.rm_;
+  } else if (cond != cc_always) {
+    r2 = scratch;
+    li(r2, rt);
+  }
+
+  {
+    BlockTrampolinePoolScope block_trampoline_pool(this);
+    switch (cond) {
+      case cc_always:
+        bal(offset);
+        break;
+      case eq:
+        bne(rs, r2, 2);
+        nop();
+        bal(offset);
+        break;
+      case ne:
+        beq(rs, r2, 2);
+        nop();
+        bal(offset);
+        break;
+
+      // Signed comparison.
+      case greater:
+        // rs > rt
+        slt(scratch, r2, rs);
+        beq(scratch, zero_reg, 2);
+        nop();
+        bal(offset);
+        break;
+      case greater_equal:
+        // rs >= rt
+        slt(scratch, rs, r2);
+        bne(scratch, zero_reg, 2);
+        nop();
+        bal(offset);
+        break;
+      case less:
+        // rs < r2
+        slt(scratch, rs, r2);
+        bne(scratch, zero_reg, 2);
+        nop();
+        bal(offset);
+        break;
+      case less_equal:
+        // rs <= r2
+        slt(scratch, r2, rs);
+        bne(scratch, zero_reg, 2);
+        nop();
+        bal(offset);
+        break;
+
+
+      // Unsigned comparison.
+      case Ugreater:
+        // rs > rt
+        sltu(scratch, r2, rs);
+        beq(scratch, zero_reg, 2);
+        nop();
+        bal(offset);
+        break;
+      case Ugreater_equal:
+        // rs >= rt
+        sltu(scratch, rs, r2);
+        bne(scratch, zero_reg, 2);
+        nop();
+        bal(offset);
+        break;
+      case Uless:
+        // rs < r2
+        sltu(scratch, rs, r2);
+        bne(scratch, zero_reg, 2);
+        nop();
+        bal(offset);
+        break;
+      case Uless_equal:
+        // rs <= r2
+        sltu(scratch, r2, rs);
+        bne(scratch, zero_reg, 2);
+        nop();
+        bal(offset);
+        break;
+      default:
+        UNREACHABLE();
+    }
+  }
+  // Emit a nop in the branch delay slot if required.
+  if (bdslot == PROTECT)
+    nop();
+}
+
+
+void MacroAssembler::BranchAndLinkShort(Label* L, BranchDelaySlot bdslot) {
+  bal(shifted_branch_offset(L, false));
+
+  // Emit a nop in the branch delay slot if required.
+  if (bdslot == PROTECT)
+    nop();
+}
+
+
+void MacroAssembler::BranchAndLinkShort(Label* L, Condition cond, Register rs,
+                                        const Operand& rt,
+                                        BranchDelaySlot bdslot) {
+  BRANCH_ARGS_CHECK(cond, rs, rt);
+
+  int32_t offset = 0;
+  Register r2 = no_reg;
+  Register scratch = at;
+  if (rt.is_reg()) {
+    r2 = rt.rm_;
+  } else if (cond != cc_always) {
+    r2 = scratch;
+    li(r2, rt);
+  }
+
+  {
+    BlockTrampolinePoolScope block_trampoline_pool(this);
+    switch (cond) {
+      case cc_always:
+        offset = shifted_branch_offset(L, false);
+        bal(offset);
+        break;
+      case eq:
+        bne(rs, r2, 2);
+        nop();
+        offset = shifted_branch_offset(L, false);
+        bal(offset);
+        break;
+      case ne:
+        beq(rs, r2, 2);
+        nop();
+        offset = shifted_branch_offset(L, false);
+        bal(offset);
+        break;
+
+      // Signed comparison.
+      case greater:
+        // rs > rt
+        slt(scratch, r2, rs);
+        beq(scratch, zero_reg, 2);
+        nop();
+        offset = shifted_branch_offset(L, false);
+        bal(offset);
+        break;
+      case greater_equal:
+        // rs >= rt
+        slt(scratch, rs, r2);
+        bne(scratch, zero_reg, 2);
+        nop();
+        offset = shifted_branch_offset(L, false);
+        bal(offset);
+        break;
+      case less:
+        // rs < r2
+        slt(scratch, rs, r2);
+        bne(scratch, zero_reg, 2);
+        nop();
+        offset = shifted_branch_offset(L, false);
+        bal(offset);
+        break;
+      case less_equal:
+        // rs <= r2
+        slt(scratch, r2, rs);
+        bne(scratch, zero_reg, 2);
+        nop();
+        offset = shifted_branch_offset(L, false);
+        bal(offset);
+        break;
+
+
+      // Unsigned comparison.
+      case Ugreater:
+        // rs > rt
+        sltu(scratch, r2, rs);
+        beq(scratch, zero_reg, 2);
+        nop();
+        offset = shifted_branch_offset(L, false);
+        bal(offset);
+        break;
+      case Ugreater_equal:
+        // rs >= rt
+        sltu(scratch, rs, r2);
+        bne(scratch, zero_reg, 2);
+        nop();
+        offset = shifted_branch_offset(L, false);
+        bal(offset);
+        break;
+      case Uless:
+        // rs < r2
+        sltu(scratch, rs, r2);
+        bne(scratch, zero_reg, 2);
+        nop();
+        offset = shifted_branch_offset(L, false);
+        bal(offset);
+        break;
+      case Uless_equal:
+        // rs <= r2
+        sltu(scratch, r2, rs);
+        bne(scratch, zero_reg, 2);
+        nop();
+        offset = shifted_branch_offset(L, false);
+        bal(offset);
+        break;
+
+      default:
+        UNREACHABLE();
+    }
+  }
+  // Check that offset could actually hold on an int16_t.
+  DCHECK(is_int16(offset));
+
+  // Emit a nop in the branch delay slot if required.
+  if (bdslot == PROTECT)
+    nop();
+}
+
+
+void MacroAssembler::Jump(Register target,
+                          Condition cond,
+                          Register rs,
+                          const Operand& rt,
+                          BranchDelaySlot bd) {
+  BlockTrampolinePoolScope block_trampoline_pool(this);
+  if (cond == cc_always) {
+    jr(target);
+  } else {
+    BRANCH_ARGS_CHECK(cond, rs, rt);
+    Branch(2, NegateCondition(cond), rs, rt);
+    jr(target);
+  }
+  // Emit a nop in the branch delay slot if required.
+  if (bd == PROTECT)
+    nop();
+}
+
+
+void MacroAssembler::Jump(intptr_t target,
+                          RelocInfo::Mode rmode,
+                          Condition cond,
+                          Register rs,
+                          const Operand& rt,
+                          BranchDelaySlot bd) {
+  Label skip;
+  if (cond != cc_always) {
+    Branch(USE_DELAY_SLOT, &skip, NegateCondition(cond), rs, rt);
+  }
+  // The first instruction of 'li' may be placed in the delay slot.
+  // This is not an issue, t9 is expected to be clobbered anyway.
+  li(t9, Operand(target, rmode));
+  Jump(t9, al, zero_reg, Operand(zero_reg), bd);
+  bind(&skip);
+}
+
+
+void MacroAssembler::Jump(Address target,
+                          RelocInfo::Mode rmode,
+                          Condition cond,
+                          Register rs,
+                          const Operand& rt,
+                          BranchDelaySlot bd) {
+  DCHECK(!RelocInfo::IsCodeTarget(rmode));
+  Jump(reinterpret_cast<intptr_t>(target), rmode, cond, rs, rt, bd);
+}
+
+
+void MacroAssembler::Jump(Handle<Code> code,
+                          RelocInfo::Mode rmode,
+                          Condition cond,
+                          Register rs,
+                          const Operand& rt,
+                          BranchDelaySlot bd) {
+  DCHECK(RelocInfo::IsCodeTarget(rmode));
+  AllowDeferredHandleDereference embedding_raw_address;
+  Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond, rs, rt, bd);
+}
+
+
+int MacroAssembler::CallSize(Register target,
+                             Condition cond,
+                             Register rs,
+                             const Operand& rt,
+                             BranchDelaySlot bd) {
+  int size = 0;
+
+  if (cond == cc_always) {
+    size += 1;
+  } else {
+    size += 3;
+  }
+
+  if (bd == PROTECT)
+    size += 1;
+
+  return size * kInstrSize;
+}
+
+
+// Note: To call gcc-compiled C code on mips, you must call thru t9.
+void MacroAssembler::Call(Register target,
+                          Condition cond,
+                          Register rs,
+                          const Operand& rt,
+                          BranchDelaySlot bd) {
+  BlockTrampolinePoolScope block_trampoline_pool(this);
+  Label start;
+  bind(&start);
+  if (cond == cc_always) {
+    jalr(target);
+  } else {
+    BRANCH_ARGS_CHECK(cond, rs, rt);
+    Branch(2, NegateCondition(cond), rs, rt);
+    jalr(target);
+  }
+  // Emit a nop in the branch delay slot if required.
+  if (bd == PROTECT)
+    nop();
+
+  DCHECK_EQ(CallSize(target, cond, rs, rt, bd),
+            SizeOfCodeGeneratedSince(&start));
+}
+
+
+int MacroAssembler::CallSize(Address target,
+                             RelocInfo::Mode rmode,
+                             Condition cond,
+                             Register rs,
+                             const Operand& rt,
+                             BranchDelaySlot bd) {
+  int size = CallSize(t9, cond, rs, rt, bd);
+  return size + 4 * kInstrSize;
+}
+
+
+void MacroAssembler::Call(Address target,
+                          RelocInfo::Mode rmode,
+                          Condition cond,
+                          Register rs,
+                          const Operand& rt,
+                          BranchDelaySlot bd) {
+  BlockTrampolinePoolScope block_trampoline_pool(this);
+  Label start;
+  bind(&start);
+  int64_t target_int = reinterpret_cast<int64_t>(target);
+  // Must record previous source positions before the
+  // li() generates a new code target.
+  positions_recorder()->WriteRecordedPositions();
+  li(t9, Operand(target_int, rmode), ADDRESS_LOAD);
+  Call(t9, cond, rs, rt, bd);
+  DCHECK_EQ(CallSize(target, rmode, cond, rs, rt, bd),
+            SizeOfCodeGeneratedSince(&start));
+}
+
+
+int MacroAssembler::CallSize(Handle<Code> code,
+                             RelocInfo::Mode rmode,
+                             TypeFeedbackId ast_id,
+                             Condition cond,
+                             Register rs,
+                             const Operand& rt,
+                             BranchDelaySlot bd) {
+  AllowDeferredHandleDereference using_raw_address;
+  return CallSize(reinterpret_cast<Address>(code.location()),
+      rmode, cond, rs, rt, bd);
+}
+
+
+void MacroAssembler::Call(Handle<Code> code,
+                          RelocInfo::Mode rmode,
+                          TypeFeedbackId ast_id,
+                          Condition cond,
+                          Register rs,
+                          const Operand& rt,
+                          BranchDelaySlot bd) {
+  BlockTrampolinePoolScope block_trampoline_pool(this);
+  Label start;
+  bind(&start);
+  DCHECK(RelocInfo::IsCodeTarget(rmode));
+  if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
+    SetRecordedAstId(ast_id);
+    rmode = RelocInfo::CODE_TARGET_WITH_ID;
+  }
+  AllowDeferredHandleDereference embedding_raw_address;
+  Call(reinterpret_cast<Address>(code.location()), rmode, cond, rs, rt, bd);
+  DCHECK_EQ(CallSize(code, rmode, ast_id, cond, rs, rt, bd),
+            SizeOfCodeGeneratedSince(&start));
+}
+
+
+void MacroAssembler::Ret(Condition cond,
+                         Register rs,
+                         const Operand& rt,
+                         BranchDelaySlot bd) {
+  Jump(ra, cond, rs, rt, bd);
+}
+
+
+void MacroAssembler::J(Label* L, BranchDelaySlot bdslot) {
+  BlockTrampolinePoolScope block_trampoline_pool(this);
+
+  uint64_t imm28;
+  imm28 = jump_address(L);
+  imm28 &= kImm28Mask;
+  { BlockGrowBufferScope block_buf_growth(this);
+    // Buffer growth (and relocation) must be blocked for internal references
+    // until associated instructions are emitted and available to be patched.
+    RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
+    j(imm28);
+  }
+  // Emit a nop in the branch delay slot if required.
+  if (bdslot == PROTECT)
+    nop();
+}
+
+
+void MacroAssembler::Jr(Label* L, BranchDelaySlot bdslot) {
+  BlockTrampolinePoolScope block_trampoline_pool(this);
+
+  uint64_t imm64;
+  imm64 = jump_address(L);
+  { BlockGrowBufferScope block_buf_growth(this);
+    // Buffer growth (and relocation) must be blocked for internal references
+    // until associated instructions are emitted and available to be patched.
+    RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
+    li(at, Operand(imm64), ADDRESS_LOAD);
+  }
+  jr(at);
+
+  // Emit a nop in the branch delay slot if required.
+  if (bdslot == PROTECT)
+    nop();
+}
+
+
+void MacroAssembler::Jalr(Label* L, BranchDelaySlot bdslot) {
+  BlockTrampolinePoolScope block_trampoline_pool(this);
+
+  uint64_t imm64;
+  imm64 = jump_address(L);
+  { BlockGrowBufferScope block_buf_growth(this);
+    // Buffer growth (and relocation) must be blocked for internal references
+    // until associated instructions are emitted and available to be patched.
+    RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
+    li(at, Operand(imm64), ADDRESS_LOAD);
+  }
+  jalr(at);
+
+  // Emit a nop in the branch delay slot if required.
+  if (bdslot == PROTECT)
+    nop();
+}
+
+
+void MacroAssembler::DropAndRet(int drop) {
+  Ret(USE_DELAY_SLOT);
+  daddiu(sp, sp, drop * kPointerSize);
+}
+
+void MacroAssembler::DropAndRet(int drop,
+                                Condition cond,
+                                Register r1,
+                                const Operand& r2) {
+  // Both Drop and Ret need to be conditional.
+  Label skip;
+  if (cond != cc_always) {
+    Branch(&skip, NegateCondition(cond), r1, r2);
+  }
+
+  Drop(drop);
+  Ret();
+
+  if (cond != cc_always) {
+    bind(&skip);
+  }
+}
+
+
+void MacroAssembler::Drop(int count,
+                          Condition cond,
+                          Register reg,
+                          const Operand& op) {
+  if (count <= 0) {
+    return;
+  }
+
+  Label skip;
+
+  if (cond != al) {
+     Branch(&skip, NegateCondition(cond), reg, op);
+  }
+
+  daddiu(sp, sp, count * kPointerSize);
+
+  if (cond != al) {
+    bind(&skip);
+  }
+}
+
+
+
+void MacroAssembler::Swap(Register reg1,
+                          Register reg2,
+                          Register scratch) {
+  if (scratch.is(no_reg)) {
+    Xor(reg1, reg1, Operand(reg2));
+    Xor(reg2, reg2, Operand(reg1));
+    Xor(reg1, reg1, Operand(reg2));
+  } else {
+    mov(scratch, reg1);
+    mov(reg1, reg2);
+    mov(reg2, scratch);
+  }
+}
+
+
+void MacroAssembler::Call(Label* target) {
+  BranchAndLink(target);
+}
+
+
+void MacroAssembler::Push(Handle<Object> handle) {
+  li(at, Operand(handle));
+  push(at);
+}
+
+
+void MacroAssembler::PushRegisterAsTwoSmis(Register src, Register scratch) {
+  DCHECK(!src.is(scratch));
+  mov(scratch, src);
+  dsrl32(src, src, 0);
+  dsll32(src, src, 0);
+  push(src);
+  dsll32(scratch, scratch, 0);
+  push(scratch);
+}
+
+
+void MacroAssembler::PopRegisterAsTwoSmis(Register dst, Register scratch) {
+  DCHECK(!dst.is(scratch));
+  pop(scratch);
+  dsrl32(scratch, scratch, 0);
+  pop(dst);
+  dsrl32(dst, dst, 0);
+  dsll32(dst, dst, 0);
+  or_(dst, dst, scratch);
+}
+
+
+void MacroAssembler::DebugBreak() {
+  PrepareCEntryArgs(0);
+  PrepareCEntryFunction(ExternalReference(Runtime::kDebugBreak, isolate()));
+  CEntryStub ces(isolate(), 1);
+  DCHECK(AllowThisStubCall(&ces));
+  Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
+}
+
+
+// ---------------------------------------------------------------------------
+// Exception handling.
+
+void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
+                                    int handler_index) {
+  // Adjust this code if not the case.
+  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+
+  // For the JSEntry handler, we must preserve a0-a3 and s0.
+  // a5-a7 are available. We will build up the handler from the bottom by
+  // pushing on the stack.
+  // Set up the code object (a5) and the state (a6) for pushing.
+  unsigned state =
+      StackHandler::IndexField::encode(handler_index) |
+      StackHandler::KindField::encode(kind);
+  li(a5, Operand(CodeObject()), CONSTANT_SIZE);
+  li(a6, Operand(state));
+
+  // Push the frame pointer, context, state, and code object.
+  if (kind == StackHandler::JS_ENTRY) {
+    DCHECK_EQ(Smi::FromInt(0), 0);
+    // The second zero_reg indicates no context.
+    // The first zero_reg is the NULL frame pointer.
+    // The operands are reversed to match the order of MultiPush/Pop.
+    Push(zero_reg, zero_reg, a6, a5);
+  } else {
+    MultiPush(a5.bit() | a6.bit() | cp.bit() | fp.bit());
+  }
+
+  // Link the current handler as the next handler.
+  li(a6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
+  ld(a5, MemOperand(a6));
+  push(a5);
+  // Set this new handler as the current one.
+  sd(sp, MemOperand(a6));
+}
+
+
+void MacroAssembler::PopTryHandler() {
+  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+  pop(a1);
+  Daddu(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
+  li(at, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
+  sd(a1, MemOperand(at));
+}
+
+
+void MacroAssembler::JumpToHandlerEntry() {
+  // Compute the handler entry address and jump to it.  The handler table is
+  // a fixed array of (smi-tagged) code offsets.
+  // v0 = exception, a1 = code object, a2 = state.
+  Uld(a3, FieldMemOperand(a1, Code::kHandlerTableOffset));
+  Daddu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  dsrl(a2, a2, StackHandler::kKindWidth);  // Handler index.
+  dsll(a2, a2, kPointerSizeLog2);
+  Daddu(a2, a3, a2);
+  ld(a2, MemOperand(a2));  // Smi-tagged offset.
+  Daddu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag));  // Code start.
+  dsra32(t9, a2, 0);
+  Daddu(t9, t9, a1);
+  Jump(t9);  // Jump.
+}
+
+
+void MacroAssembler::Throw(Register value) {
+  // Adjust this code if not the case.
+  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+
+  // The exception is expected in v0.
+  Move(v0, value);
+
+  // Drop the stack pointer to the top of the top handler.
+  li(a3, Operand(ExternalReference(Isolate::kHandlerAddress,
+                                   isolate())));
+  ld(sp, MemOperand(a3));
+
+  // Restore the next handler.
+  pop(a2);
+  sd(a2, MemOperand(a3));
+
+  // Get the code object (a1) and state (a2).  Restore the context and frame
+  // pointer.
+  MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit());
+
+  // If the handler is a JS frame, restore the context to the frame.
+  // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
+  // or cp.
+  Label done;
+  Branch(&done, eq, cp, Operand(zero_reg));
+  sd(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  bind(&done);
+
+  JumpToHandlerEntry();
+}
+
+
+void MacroAssembler::ThrowUncatchable(Register value) {
+  // Adjust this code if not the case.
+  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+
+  // The exception is expected in v0.
+  if (!value.is(v0)) {
+    mov(v0, value);
+  }
+  // Drop the stack pointer to the top of the top stack handler.
+  li(a3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
+  ld(sp, MemOperand(a3));
+
+  // Unwind the handlers until the ENTRY handler is found.
+  Label fetch_next, check_kind;
+  jmp(&check_kind);
+  bind(&fetch_next);
+  ld(sp, MemOperand(sp, StackHandlerConstants::kNextOffset));
+
+  bind(&check_kind);
+  STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
+  ld(a2, MemOperand(sp, StackHandlerConstants::kStateOffset));
+  And(a2, a2, Operand(StackHandler::KindField::kMask));
+  Branch(&fetch_next, ne, a2, Operand(zero_reg));
+
+  // Set the top handler address to next handler past the top ENTRY handler.
+  pop(a2);
+  sd(a2, MemOperand(a3));
+
+  // Get the code object (a1) and state (a2).  Clear the context and frame
+  // pointer (0 was saved in the handler).
+  MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit());
+
+  JumpToHandlerEntry();
+}
+
+
+void MacroAssembler::Allocate(int object_size,
+                              Register result,
+                              Register scratch1,
+                              Register scratch2,
+                              Label* gc_required,
+                              AllocationFlags flags) {
+  DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+  if (!FLAG_inline_new) {
+    if (emit_debug_code()) {
+      // Trash the registers to simulate an allocation failure.
+      li(result, 0x7091);
+      li(scratch1, 0x7191);
+      li(scratch2, 0x7291);
+    }
+    jmp(gc_required);
+    return;
+  }
+
+  DCHECK(!result.is(scratch1));
+  DCHECK(!result.is(scratch2));
+  DCHECK(!scratch1.is(scratch2));
+  DCHECK(!scratch1.is(t9));
+  DCHECK(!scratch2.is(t9));
+  DCHECK(!result.is(t9));
+
+  // Make object size into bytes.
+  if ((flags & SIZE_IN_WORDS) != 0) {
+    object_size *= kPointerSize;
+  }
+  DCHECK(0 == (object_size & kObjectAlignmentMask));
+
+  // Check relative positions of allocation top and limit addresses.
+  // ARM adds additional checks to make sure the ldm instruction can be
+  // used. On MIPS we don't have ldm so we don't need additional checks either.
+  ExternalReference allocation_top =
+      AllocationUtils::GetAllocationTopReference(isolate(), flags);
+  ExternalReference allocation_limit =
+      AllocationUtils::GetAllocationLimitReference(isolate(), flags);
+
+  intptr_t top   =
+      reinterpret_cast<intptr_t>(allocation_top.address());
+  intptr_t limit =
+      reinterpret_cast<intptr_t>(allocation_limit.address());
+  DCHECK((limit - top) == kPointerSize);
+
+  // Set up allocation top address and object size registers.
+  Register topaddr = scratch1;
+  li(topaddr, Operand(allocation_top));
+
+  // This code stores a temporary value in t9.
+  if ((flags & RESULT_CONTAINS_TOP) == 0) {
+    // Load allocation top into result and allocation limit into t9.
+    ld(result, MemOperand(topaddr));
+    ld(t9, MemOperand(topaddr, kPointerSize));
+  } else {
+    if (emit_debug_code()) {
+      // Assert that result actually contains top on entry. t9 is used
+      // immediately below so this use of t9 does not cause difference with
+      // respect to register content between debug and release mode.
+      ld(t9, MemOperand(topaddr));
+      Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
+    }
+    // Load allocation limit into t9. Result already contains allocation top.
+    ld(t9, MemOperand(topaddr, limit - top));
+  }
+
+  DCHECK(kPointerSize == kDoubleSize);
+  if (emit_debug_code()) {
+    And(at, result, Operand(kDoubleAlignmentMask));
+    Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg));
+  }
+
+  // Calculate new top and bail out if new space is exhausted. Use result
+  // to calculate the new top.
+  Daddu(scratch2, result, Operand(object_size));
+  Branch(gc_required, Ugreater, scratch2, Operand(t9));
+  sd(scratch2, MemOperand(topaddr));
+
+  // Tag object if requested.
+  if ((flags & TAG_OBJECT) != 0) {
+    Daddu(result, result, Operand(kHeapObjectTag));
+  }
+}
+
+
+void MacroAssembler::Allocate(Register object_size,
+                              Register result,
+                              Register scratch1,
+                              Register scratch2,
+                              Label* gc_required,
+                              AllocationFlags flags) {
+  if (!FLAG_inline_new) {
+    if (emit_debug_code()) {
+      // Trash the registers to simulate an allocation failure.
+      li(result, 0x7091);
+      li(scratch1, 0x7191);
+      li(scratch2, 0x7291);
+    }
+    jmp(gc_required);
+    return;
+  }
+
+  DCHECK(!result.is(scratch1));
+  DCHECK(!result.is(scratch2));
+  DCHECK(!scratch1.is(scratch2));
+  DCHECK(!object_size.is(t9));
+  DCHECK(!scratch1.is(t9) && !scratch2.is(t9) && !result.is(t9));
+
+  // Check relative positions of allocation top and limit addresses.
+  // ARM adds additional checks to make sure the ldm instruction can be
+  // used. On MIPS we don't have ldm so we don't need additional checks either.
+  ExternalReference allocation_top =
+      AllocationUtils::GetAllocationTopReference(isolate(), flags);
+  ExternalReference allocation_limit =
+      AllocationUtils::GetAllocationLimitReference(isolate(), flags);
+  intptr_t top   =
+      reinterpret_cast<intptr_t>(allocation_top.address());
+  intptr_t limit =
+      reinterpret_cast<intptr_t>(allocation_limit.address());
+  DCHECK((limit - top) == kPointerSize);
+
+  // Set up allocation top address and object size registers.
+  Register topaddr = scratch1;
+  li(topaddr, Operand(allocation_top));
+
+  // This code stores a temporary value in t9.
+  if ((flags & RESULT_CONTAINS_TOP) == 0) {
+    // Load allocation top into result and allocation limit into t9.
+    ld(result, MemOperand(topaddr));
+    ld(t9, MemOperand(topaddr, kPointerSize));
+  } else {
+    if (emit_debug_code()) {
+      // Assert that result actually contains top on entry. t9 is used
+      // immediately below so this use of t9 does not cause difference with
+      // respect to register content between debug and release mode.
+      ld(t9, MemOperand(topaddr));
+      Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
+    }
+    // Load allocation limit into t9. Result already contains allocation top.
+    ld(t9, MemOperand(topaddr, limit - top));
+  }
+
+  DCHECK(kPointerSize == kDoubleSize);
+  if (emit_debug_code()) {
+    And(at, result, Operand(kDoubleAlignmentMask));
+    Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg));
+  }
+
+  // Calculate new top and bail out if new space is exhausted. Use result
+  // to calculate the new top. Object size may be in words so a shift is
+  // required to get the number of bytes.
+  if ((flags & SIZE_IN_WORDS) != 0) {
+    dsll(scratch2, object_size, kPointerSizeLog2);
+    Daddu(scratch2, result, scratch2);
+  } else {
+    Daddu(scratch2, result, Operand(object_size));
+  }
+  Branch(gc_required, Ugreater, scratch2, Operand(t9));
+
+  // Update allocation top. result temporarily holds the new top.
+  if (emit_debug_code()) {
+    And(t9, scratch2, Operand(kObjectAlignmentMask));
+    Check(eq, kUnalignedAllocationInNewSpace, t9, Operand(zero_reg));
+  }
+  sd(scratch2, MemOperand(topaddr));
+
+  // Tag object if requested.
+  if ((flags & TAG_OBJECT) != 0) {
+    Daddu(result, result, Operand(kHeapObjectTag));
+  }
+}
+
+
+void MacroAssembler::UndoAllocationInNewSpace(Register object,
+                                              Register scratch) {
+  ExternalReference new_space_allocation_top =
+      ExternalReference::new_space_allocation_top_address(isolate());
+
+  // Make sure the object has no tag before resetting top.
+  And(object, object, Operand(~kHeapObjectTagMask));
+#ifdef DEBUG
+  // Check that the object un-allocated is below the current top.
+  li(scratch, Operand(new_space_allocation_top));
+  ld(scratch, MemOperand(scratch));
+  Check(less, kUndoAllocationOfNonAllocatedMemory,
+      object, Operand(scratch));
+#endif
+  // Write the address of the object to un-allocate as the current top.
+  li(scratch, Operand(new_space_allocation_top));
+  sd(object, MemOperand(scratch));
+}
+
+
+void MacroAssembler::AllocateTwoByteString(Register result,
+                                           Register length,
+                                           Register scratch1,
+                                           Register scratch2,
+                                           Register scratch3,
+                                           Label* gc_required) {
+  // Calculate the number of bytes needed for the characters in the string while
+  // observing object alignment.
+  DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+  dsll(scratch1, length, 1);  // Length in bytes, not chars.
+  daddiu(scratch1, scratch1,
+       kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
+  And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
+
+  // Allocate two-byte string in new space.
+  Allocate(scratch1,
+           result,
+           scratch2,
+           scratch3,
+           gc_required,
+           TAG_OBJECT);
+
+  // Set the map, length and hash field.
+  InitializeNewString(result,
+                      length,
+                      Heap::kStringMapRootIndex,
+                      scratch1,
+                      scratch2);
+}
+
+
+void MacroAssembler::AllocateOneByteString(Register result, Register length,
+                                           Register scratch1, Register scratch2,
+                                           Register scratch3,
+                                           Label* gc_required) {
+  // Calculate the number of bytes needed for the characters in the string
+  // while observing object alignment.
+  DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+  DCHECK(kCharSize == 1);
+  daddiu(scratch1, length,
+      kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
+  And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
+
+  // Allocate one-byte string in new space.
+  Allocate(scratch1,
+           result,
+           scratch2,
+           scratch3,
+           gc_required,
+           TAG_OBJECT);
+
+  // Set the map, length and hash field.
+  InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
+                      scratch1, scratch2);
+}
+
+
+void MacroAssembler::AllocateTwoByteConsString(Register result,
+                                               Register length,
+                                               Register scratch1,
+                                               Register scratch2,
+                                               Label* gc_required) {
+  Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
+           TAG_OBJECT);
+  InitializeNewString(result,
+                      length,
+                      Heap::kConsStringMapRootIndex,
+                      scratch1,
+                      scratch2);
+}
+
+
+void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
+                                               Register scratch1,
+                                               Register scratch2,
+                                               Label* gc_required) {
+  Allocate(ConsString::kSize,
+           result,
+           scratch1,
+           scratch2,
+           gc_required,
+           TAG_OBJECT);
+
+  InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
+                      scratch1, scratch2);
+}
+
+
+void MacroAssembler::AllocateTwoByteSlicedString(Register result,
+                                                 Register length,
+                                                 Register scratch1,
+                                                 Register scratch2,
+                                                 Label* gc_required) {
+  Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
+           TAG_OBJECT);
+
+  InitializeNewString(result,
+                      length,
+                      Heap::kSlicedStringMapRootIndex,
+                      scratch1,
+                      scratch2);
+}
+
+
+void MacroAssembler::AllocateOneByteSlicedString(Register result,
+                                                 Register length,
+                                                 Register scratch1,
+                                                 Register scratch2,
+                                                 Label* gc_required) {
+  Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
+           TAG_OBJECT);
+
+  InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
+                      scratch1, scratch2);
+}
+
+
+void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
+                                                     Label* not_unique_name) {
+  STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
+  Label succeed;
+  And(at, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
+  Branch(&succeed, eq, at, Operand(zero_reg));
+  Branch(not_unique_name, ne, reg, Operand(SYMBOL_TYPE));
+
+  bind(&succeed);
+}
+
+
+// Allocates a heap number or jumps to the label if the young space is full and
+// a scavenge is needed.
+void MacroAssembler::AllocateHeapNumber(Register result,
+                                        Register scratch1,
+                                        Register scratch2,
+                                        Register heap_number_map,
+                                        Label* need_gc,
+                                        TaggingMode tagging_mode,
+                                        MutableMode mode) {
+  // Allocate an object in the heap for the heap number and tag it as a heap
+  // object.
+  Allocate(HeapNumber::kSize, result, scratch1, scratch2, need_gc,
+           tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
+
+  Heap::RootListIndex map_index = mode == MUTABLE
+      ? Heap::kMutableHeapNumberMapRootIndex
+      : Heap::kHeapNumberMapRootIndex;
+  AssertIsRoot(heap_number_map, map_index);
+
+  // Store heap number map in the allocated object.
+  if (tagging_mode == TAG_RESULT) {
+    sd(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
+  } else {
+    sd(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
+  }
+}
+
+
+void MacroAssembler::AllocateHeapNumberWithValue(Register result,
+                                                 FPURegister value,
+                                                 Register scratch1,
+                                                 Register scratch2,
+                                                 Label* gc_required) {
+  LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
+  AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required);
+  sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset));
+}
+
+
+// Copies a fixed number of fields of heap objects from src to dst.
+void MacroAssembler::CopyFields(Register dst,
+                                Register src,
+                                RegList temps,
+                                int field_count) {
+  DCHECK((temps & dst.bit()) == 0);
+  DCHECK((temps & src.bit()) == 0);
+  // Primitive implementation using only one temporary register.
+
+  Register tmp = no_reg;
+  // Find a temp register in temps list.
+  for (int i = 0; i < kNumRegisters; i++) {
+    if ((temps & (1 << i)) != 0) {
+      tmp.code_ = i;
+      break;
+    }
+  }
+  DCHECK(!tmp.is(no_reg));
+
+  for (int i = 0; i < field_count; i++) {
+    ld(tmp, FieldMemOperand(src, i * kPointerSize));
+    sd(tmp, FieldMemOperand(dst, i * kPointerSize));
+  }
+}
+
+
+void MacroAssembler::CopyBytes(Register src,
+                               Register dst,
+                               Register length,
+                               Register scratch) {
+  Label align_loop_1, word_loop, byte_loop, byte_loop_1, done;
+
+  // Align src before copying in word size chunks.
+  Branch(&byte_loop, le, length, Operand(kPointerSize));
+  bind(&align_loop_1);
+  And(scratch, src, kPointerSize - 1);
+  Branch(&word_loop, eq, scratch, Operand(zero_reg));
+  lbu(scratch, MemOperand(src));
+  Daddu(src, src, 1);
+  sb(scratch, MemOperand(dst));
+  Daddu(dst, dst, 1);
+  Dsubu(length, length, Operand(1));
+  Branch(&align_loop_1, ne, length, Operand(zero_reg));
+
+  // Copy bytes in word size chunks.
+  bind(&word_loop);
+  if (emit_debug_code()) {
+    And(scratch, src, kPointerSize - 1);
+    Assert(eq, kExpectingAlignmentForCopyBytes,
+        scratch, Operand(zero_reg));
+  }
+  Branch(&byte_loop, lt, length, Operand(kPointerSize));
+  ld(scratch, MemOperand(src));
+  Daddu(src, src, kPointerSize);
+
+  // TODO(kalmard) check if this can be optimized to use sw in most cases.
+  // Can't use unaligned access - copy byte by byte.
+  sb(scratch, MemOperand(dst, 0));
+  dsrl(scratch, scratch, 8);
+  sb(scratch, MemOperand(dst, 1));
+  dsrl(scratch, scratch, 8);
+  sb(scratch, MemOperand(dst, 2));
+  dsrl(scratch, scratch, 8);
+  sb(scratch, MemOperand(dst, 3));
+  dsrl(scratch, scratch, 8);
+  sb(scratch, MemOperand(dst, 4));
+  dsrl(scratch, scratch, 8);
+  sb(scratch, MemOperand(dst, 5));
+  dsrl(scratch, scratch, 8);
+  sb(scratch, MemOperand(dst, 6));
+  dsrl(scratch, scratch, 8);
+  sb(scratch, MemOperand(dst, 7));
+  Daddu(dst, dst, 8);
+
+  Dsubu(length, length, Operand(kPointerSize));
+  Branch(&word_loop);
+
+  // Copy the last bytes if any left.
+  bind(&byte_loop);
+  Branch(&done, eq, length, Operand(zero_reg));
+  bind(&byte_loop_1);
+  lbu(scratch, MemOperand(src));
+  Daddu(src, src, 1);
+  sb(scratch, MemOperand(dst));
+  Daddu(dst, dst, 1);
+  Dsubu(length, length, Operand(1));
+  Branch(&byte_loop_1, ne, length, Operand(zero_reg));
+  bind(&done);
+}
+
+
+void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
+                                                Register end_offset,
+                                                Register filler) {
+  Label loop, entry;
+  Branch(&entry);
+  bind(&loop);
+  sd(filler, MemOperand(start_offset));
+  Daddu(start_offset, start_offset, kPointerSize);
+  bind(&entry);
+  Branch(&loop, lt, start_offset, Operand(end_offset));
+}
+
+
+void MacroAssembler::CheckFastElements(Register map,
+                                       Register scratch,
+                                       Label* fail) {
+  STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+  STATIC_ASSERT(FAST_ELEMENTS == 2);
+  STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+  lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
+  Branch(fail, hi, scratch,
+         Operand(Map::kMaximumBitField2FastHoleyElementValue));
+}
+
+
+void MacroAssembler::CheckFastObjectElements(Register map,
+                                             Register scratch,
+                                             Label* fail) {
+  STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+  STATIC_ASSERT(FAST_ELEMENTS == 2);
+  STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+  lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
+  Branch(fail, ls, scratch,
+         Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
+  Branch(fail, hi, scratch,
+         Operand(Map::kMaximumBitField2FastHoleyElementValue));
+}
+
+
+void MacroAssembler::CheckFastSmiElements(Register map,
+                                          Register scratch,
+                                          Label* fail) {
+  STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+  lbu(scratch, FieldMemOperand(map, Map::kBitField2Offset));
+  Branch(fail, hi, scratch,
+         Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
+}
+
+
+void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
+                                                 Register key_reg,
+                                                 Register elements_reg,
+                                                 Register scratch1,
+                                                 Register scratch2,
+                                                 Register scratch3,
+                                                 Label* fail,
+                                                 int elements_offset) {
+  Label smi_value, maybe_nan, have_double_value, is_nan, done;
+  Register mantissa_reg = scratch2;
+  Register exponent_reg = scratch3;
+
+  // Handle smi values specially.
+  JumpIfSmi(value_reg, &smi_value);
+
+  // Ensure that the object is a heap number
+  CheckMap(value_reg,
+           scratch1,
+           Heap::kHeapNumberMapRootIndex,
+           fail,
+           DONT_DO_SMI_CHECK);
+
+  // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
+  // in the exponent.
+  li(scratch1, Operand(kNaNOrInfinityLowerBoundUpper32));
+  lw(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
+  Branch(&maybe_nan, ge, exponent_reg, Operand(scratch1));
+
+  lwu(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
+
+  bind(&have_double_value);
+  // dsll(scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize);
+  dsra(scratch1, key_reg, 32 - kDoubleSizeLog2);
+  Daddu(scratch1, scratch1, elements_reg);
+  sw(mantissa_reg, FieldMemOperand(
+     scratch1, FixedDoubleArray::kHeaderSize - elements_offset));
+  uint32_t offset = FixedDoubleArray::kHeaderSize - elements_offset +
+      sizeof(kHoleNanLower32);
+  sw(exponent_reg, FieldMemOperand(scratch1, offset));
+  jmp(&done);
+
+  bind(&maybe_nan);
+  // Could be NaN, Infinity or -Infinity. If fraction is not zero, it's NaN,
+  // otherwise it's Infinity or -Infinity, and the non-NaN code path applies.
+  lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
+  Branch(&have_double_value, eq, mantissa_reg, Operand(zero_reg));
+  bind(&is_nan);
+  // Load canonical NaN for storing into the double array.
+  LoadRoot(at, Heap::kNanValueRootIndex);
+  lw(mantissa_reg, FieldMemOperand(at, HeapNumber::kMantissaOffset));
+  lw(exponent_reg, FieldMemOperand(at, HeapNumber::kExponentOffset));
+  jmp(&have_double_value);
+
+  bind(&smi_value);
+  Daddu(scratch1, elements_reg,
+      Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag -
+              elements_offset));
+  // dsll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize);
+  dsra(scratch2, key_reg, 32 - kDoubleSizeLog2);
+  Daddu(scratch1, scratch1, scratch2);
+  // scratch1 is now effective address of the double element
+
+  Register untagged_value = elements_reg;
+  SmiUntag(untagged_value, value_reg);
+  mtc1(untagged_value, f2);
+  cvt_d_w(f0, f2);
+  sdc1(f0, MemOperand(scratch1, 0));
+  bind(&done);
+}
+
+
+void MacroAssembler::CompareMapAndBranch(Register obj,
+                                         Register scratch,
+                                         Handle<Map> map,
+                                         Label* early_success,
+                                         Condition cond,
+                                         Label* branch_to) {
+  ld(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+  CompareMapAndBranch(scratch, map, early_success, cond, branch_to);
+}
+
+
+void MacroAssembler::CompareMapAndBranch(Register obj_map,
+                                         Handle<Map> map,
+                                         Label* early_success,
+                                         Condition cond,
+                                         Label* branch_to) {
+  Branch(branch_to, cond, obj_map, Operand(map));
+}
+
+
+void MacroAssembler::CheckMap(Register obj,
+                              Register scratch,
+                              Handle<Map> map,
+                              Label* fail,
+                              SmiCheckType smi_check_type) {
+  if (smi_check_type == DO_SMI_CHECK) {
+    JumpIfSmi(obj, fail);
+  }
+  Label success;
+  CompareMapAndBranch(obj, scratch, map, &success, ne, fail);
+  bind(&success);
+}
+
+
+void MacroAssembler::DispatchMap(Register obj,
+                                 Register scratch,
+                                 Handle<Map> map,
+                                 Handle<Code> success,
+                                 SmiCheckType smi_check_type) {
+  Label fail;
+  if (smi_check_type == DO_SMI_CHECK) {
+    JumpIfSmi(obj, &fail);
+  }
+  ld(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+  Jump(success, RelocInfo::CODE_TARGET, eq, scratch, Operand(map));
+  bind(&fail);
+}
+
+
+void MacroAssembler::CheckMap(Register obj,
+                              Register scratch,
+                              Heap::RootListIndex index,
+                              Label* fail,
+                              SmiCheckType smi_check_type) {
+  if (smi_check_type == DO_SMI_CHECK) {
+    JumpIfSmi(obj, fail);
+  }
+  ld(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+  LoadRoot(at, index);
+  Branch(fail, ne, scratch, Operand(at));
+}
+
+
+void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) {
+  if (IsMipsSoftFloatABI) {
+    Move(dst, v0, v1);
+  } else {
+    Move(dst, f0);  // Reg f0 is o32 ABI FP return value.
+  }
+}
+
+
+void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) {
+  if (IsMipsSoftFloatABI) {
+    Move(dst, a0, a1);
+  } else {
+    Move(dst, f12);  // Reg f12 is o32 ABI FP first argument value.
+  }
+}
+
+
+void MacroAssembler::MovToFloatParameter(DoubleRegister src) {
+  if (!IsMipsSoftFloatABI) {
+    Move(f12, src);
+  } else {
+    Move(a0, a1, src);
+  }
+}
+
+
+void MacroAssembler::MovToFloatResult(DoubleRegister src) {
+  if (!IsMipsSoftFloatABI) {
+    Move(f0, src);
+  } else {
+    Move(v0, v1, src);
+  }
+}
+
+
+void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
+                                          DoubleRegister src2) {
+  if (!IsMipsSoftFloatABI) {
+    const DoubleRegister fparg2 = (kMipsAbi == kN64) ? f13 : f14;
+    if (src2.is(f12)) {
+      DCHECK(!src1.is(fparg2));
+      Move(fparg2, src2);
+      Move(f12, src1);
+    } else {
+      Move(f12, src1);
+      Move(fparg2, src2);
+    }
+  } else {
+    Move(a0, a1, src1);
+    Move(a2, a3, src2);
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+// JavaScript invokes.
+
+void MacroAssembler::InvokePrologue(const ParameterCount& expected,
+                                    const ParameterCount& actual,
+                                    Handle<Code> code_constant,
+                                    Register code_reg,
+                                    Label* done,
+                                    bool* definitely_mismatches,
+                                    InvokeFlag flag,
+                                    const CallWrapper& call_wrapper) {
+  bool definitely_matches = false;
+  *definitely_mismatches = false;
+  Label regular_invoke;
+
+  // Check whether the expected and actual arguments count match. If not,
+  // setup registers according to contract with ArgumentsAdaptorTrampoline:
+  //  a0: actual arguments count
+  //  a1: function (passed through to callee)
+  //  a2: expected arguments count
+
+  // The code below is made a lot easier because the calling code already sets
+  // up actual and expected registers according to the contract if values are
+  // passed in registers.
+  DCHECK(actual.is_immediate() || actual.reg().is(a0));
+  DCHECK(expected.is_immediate() || expected.reg().is(a2));
+  DCHECK((!code_constant.is_null() && code_reg.is(no_reg)) || code_reg.is(a3));
+
+  if (expected.is_immediate()) {
+    DCHECK(actual.is_immediate());
+    if (expected.immediate() == actual.immediate()) {
+      definitely_matches = true;
+    } else {
+      li(a0, Operand(actual.immediate()));
+      const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
+      if (expected.immediate() == sentinel) {
+        // Don't worry about adapting arguments for builtins that
+        // don't want that done. Skip adaption code by making it look
+        // like we have a match between expected and actual number of
+        // arguments.
+        definitely_matches = true;
+      } else {
+        *definitely_mismatches = true;
+        li(a2, Operand(expected.immediate()));
+      }
+    }
+  } else if (actual.is_immediate()) {
+    Branch(&regular_invoke, eq, expected.reg(), Operand(actual.immediate()));
+    li(a0, Operand(actual.immediate()));
+  } else {
+    Branch(&regular_invoke, eq, expected.reg(), Operand(actual.reg()));
+  }
+
+  if (!definitely_matches) {
+    if (!code_constant.is_null()) {
+      li(a3, Operand(code_constant));
+      daddiu(a3, a3, Code::kHeaderSize - kHeapObjectTag);
+    }
+
+    Handle<Code> adaptor =
+        isolate()->builtins()->ArgumentsAdaptorTrampoline();
+    if (flag == CALL_FUNCTION) {
+      call_wrapper.BeforeCall(CallSize(adaptor));
+      Call(adaptor);
+      call_wrapper.AfterCall();
+      if (!*definitely_mismatches) {
+        Branch(done);
+      }
+    } else {
+      Jump(adaptor, RelocInfo::CODE_TARGET);
+    }
+    bind(&regular_invoke);
+  }
+}
+
+
+void MacroAssembler::InvokeCode(Register code,
+                                const ParameterCount& expected,
+                                const ParameterCount& actual,
+                                InvokeFlag flag,
+                                const CallWrapper& call_wrapper) {
+  // You can't call a function without a valid frame.
+  DCHECK(flag == JUMP_FUNCTION || has_frame());
+
+  Label done;
+
+  bool definitely_mismatches = false;
+  InvokePrologue(expected, actual, Handle<Code>::null(), code,
+                 &done, &definitely_mismatches, flag,
+                 call_wrapper);
+  if (!definitely_mismatches) {
+    if (flag == CALL_FUNCTION) {
+      call_wrapper.BeforeCall(CallSize(code));
+      Call(code);
+      call_wrapper.AfterCall();
+    } else {
+      DCHECK(flag == JUMP_FUNCTION);
+      Jump(code);
+    }
+    // Continue here if InvokePrologue does handle the invocation due to
+    // mismatched parameter counts.
+    bind(&done);
+  }
+}
+
+
+void MacroAssembler::InvokeFunction(Register function,
+                                    const ParameterCount& actual,
+                                    InvokeFlag flag,
+                                    const CallWrapper& call_wrapper) {
+  // You can't call a function without a valid frame.
+  DCHECK(flag == JUMP_FUNCTION || has_frame());
+
+  // Contract with called JS functions requires that function is passed in a1.
+  DCHECK(function.is(a1));
+  Register expected_reg = a2;
+  Register code_reg = a3;
+  ld(code_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
+  ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+  // The argument count is stored as int32_t on 64-bit platforms.
+  // TODO(plind): Smi on 32-bit platforms.
+  lw(expected_reg,
+      FieldMemOperand(code_reg,
+                      SharedFunctionInfo::kFormalParameterCountOffset));
+  ld(code_reg, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+  ParameterCount expected(expected_reg);
+  InvokeCode(code_reg, expected, actual, flag, call_wrapper);
+}
+
+
+void MacroAssembler::InvokeFunction(Register function,
+                                    const ParameterCount& expected,
+                                    const ParameterCount& actual,
+                                    InvokeFlag flag,
+                                    const CallWrapper& call_wrapper) {
+  // You can't call a function without a valid frame.
+  DCHECK(flag == JUMP_FUNCTION || has_frame());
+
+  // Contract with called JS functions requires that function is passed in a1.
+  DCHECK(function.is(a1));
+
+  // Get the function and setup the context.
+  ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
+
+  // We call indirectly through the code field in the function to
+  // allow recompilation to take effect without changing any of the
+  // call sites.
+  ld(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+  InvokeCode(a3, expected, actual, flag, call_wrapper);
+}
+
+
+void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
+                                    const ParameterCount& expected,
+                                    const ParameterCount& actual,
+                                    InvokeFlag flag,
+                                    const CallWrapper& call_wrapper) {
+  li(a1, function);
+  InvokeFunction(a1, expected, actual, flag, call_wrapper);
+}
+
+
+void MacroAssembler::IsObjectJSObjectType(Register heap_object,
+                                          Register map,
+                                          Register scratch,
+                                          Label* fail) {
+  ld(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
+  IsInstanceJSObjectType(map, scratch, fail);
+}
+
+
+void MacroAssembler::IsInstanceJSObjectType(Register map,
+                                            Register scratch,
+                                            Label* fail) {
+  lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
+  Branch(fail, lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+  Branch(fail, gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
+}
+
+
+void MacroAssembler::IsObjectJSStringType(Register object,
+                                          Register scratch,
+                                          Label* fail) {
+  DCHECK(kNotStringTag != 0);
+
+  ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+  lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+  And(scratch, scratch, Operand(kIsNotStringMask));
+  Branch(fail, ne, scratch, Operand(zero_reg));
+}
+
+
+void MacroAssembler::IsObjectNameType(Register object,
+                                      Register scratch,
+                                      Label* fail) {
+  ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+  lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+  Branch(fail, hi, scratch, Operand(LAST_NAME_TYPE));
+}
+
+
+// ---------------------------------------------------------------------------
+// Support functions.
+
+
+void MacroAssembler::TryGetFunctionPrototype(Register function,
+                                             Register result,
+                                             Register scratch,
+                                             Label* miss,
+                                             bool miss_on_bound_function) {
+  Label non_instance;
+  if (miss_on_bound_function) {
+    // Check that the receiver isn't a smi.
+    JumpIfSmi(function, miss);
+
+    // Check that the function really is a function.  Load map into result reg.
+    GetObjectType(function, result, scratch);
+    Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE));
+
+    ld(scratch,
+       FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+    lwu(scratch,
+        FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
+    And(scratch, scratch,
+        Operand(1 << SharedFunctionInfo::kBoundFunction));
+    Branch(miss, ne, scratch, Operand(zero_reg));
+
+    // Make sure that the function has an instance prototype.
+    lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
+    And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
+    Branch(&non_instance, ne, scratch, Operand(zero_reg));
+  }
+
+  // Get the prototype or initial map from the function.
+  ld(result,
+     FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+
+  // If the prototype or initial map is the hole, don't return it and
+  // simply miss the cache instead. This will allow us to allocate a
+  // prototype object on-demand in the runtime system.
+  LoadRoot(t8, Heap::kTheHoleValueRootIndex);
+  Branch(miss, eq, result, Operand(t8));
+
+  // If the function does not have an initial map, we're done.
+  Label done;
+  GetObjectType(result, scratch, scratch);
+  Branch(&done, ne, scratch, Operand(MAP_TYPE));
+
+  // Get the prototype from the initial map.
+  ld(result, FieldMemOperand(result, Map::kPrototypeOffset));
+
+  if (miss_on_bound_function) {
+    jmp(&done);
+
+    // Non-instance prototype: Fetch prototype from constructor field
+    // in initial map.
+    bind(&non_instance);
+    ld(result, FieldMemOperand(result, Map::kConstructorOffset));
+  }
+
+  // All done.
+  bind(&done);
+}
+
+
+void MacroAssembler::GetObjectType(Register object,
+                                   Register map,
+                                   Register type_reg) {
+  ld(map, FieldMemOperand(object, HeapObject::kMapOffset));
+  lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
+}
+
+
+// -----------------------------------------------------------------------------
+// Runtime calls.
+
+void MacroAssembler::CallStub(CodeStub* stub,
+                              TypeFeedbackId ast_id,
+                              Condition cond,
+                              Register r1,
+                              const Operand& r2,
+                              BranchDelaySlot bd) {
+  DCHECK(AllowThisStubCall(stub));  // Stub calls are not allowed in some stubs.
+  Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id,
+       cond, r1, r2, bd);
+}
+
+
+void MacroAssembler::TailCallStub(CodeStub* stub,
+                                  Condition cond,
+                                  Register r1,
+                                  const Operand& r2,
+                                  BranchDelaySlot bd) {
+  Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2, bd);
+}
+
+
+static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
+  int64_t offset = (ref0.address() - ref1.address());
+  DCHECK(static_cast<int>(offset) == offset);
+  return static_cast<int>(offset);
+}
+
+
+void MacroAssembler::CallApiFunctionAndReturn(
+    Register function_address,
+    ExternalReference thunk_ref,
+    int stack_space,
+    MemOperand return_value_operand,
+    MemOperand* context_restore_operand) {
+  ExternalReference next_address =
+      ExternalReference::handle_scope_next_address(isolate());
+  const int kNextOffset = 0;
+  const int kLimitOffset = AddressOffset(
+      ExternalReference::handle_scope_limit_address(isolate()),
+      next_address);
+  const int kLevelOffset = AddressOffset(
+      ExternalReference::handle_scope_level_address(isolate()),
+      next_address);
+
+  DCHECK(function_address.is(a1) || function_address.is(a2));
+
+  Label profiler_disabled;
+  Label end_profiler_check;
+  li(t9, Operand(ExternalReference::is_profiling_address(isolate())));
+  lb(t9, MemOperand(t9, 0));
+  Branch(&profiler_disabled, eq, t9, Operand(zero_reg));
+
+  // Additional parameter is the address of the actual callback.
+  li(t9, Operand(thunk_ref));
+  jmp(&end_profiler_check);
+
+  bind(&profiler_disabled);
+  mov(t9, function_address);
+  bind(&end_profiler_check);
+
+  // Allocate HandleScope in callee-save registers.
+  li(s3, Operand(next_address));
+  ld(s0, MemOperand(s3, kNextOffset));
+  ld(s1, MemOperand(s3, kLimitOffset));
+  ld(s2, MemOperand(s3, kLevelOffset));
+  Daddu(s2, s2, Operand(1));
+  sd(s2, MemOperand(s3, kLevelOffset));
+
+  if (FLAG_log_timer_events) {
+    FrameScope frame(this, StackFrame::MANUAL);
+    PushSafepointRegisters();
+    PrepareCallCFunction(1, a0);
+    li(a0, Operand(ExternalReference::isolate_address(isolate())));
+    CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
+    PopSafepointRegisters();
+  }
+
+  // Native call returns to the DirectCEntry stub which redirects to the
+  // return address pushed on stack (could have moved after GC).
+  // DirectCEntry stub itself is generated early and never moves.
+  DirectCEntryStub stub(isolate());
+  stub.GenerateCall(this, t9);
+
+  if (FLAG_log_timer_events) {
+    FrameScope frame(this, StackFrame::MANUAL);
+    PushSafepointRegisters();
+    PrepareCallCFunction(1, a0);
+    li(a0, Operand(ExternalReference::isolate_address(isolate())));
+    CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
+    PopSafepointRegisters();
+  }
+
+  Label promote_scheduled_exception;
+  Label exception_handled;
+  Label delete_allocated_handles;
+  Label leave_exit_frame;
+  Label return_value_loaded;
+
+  // Load value from ReturnValue.
+  ld(v0, return_value_operand);
+  bind(&return_value_loaded);
+
+  // No more valid handles (the result handle was the last one). Restore
+  // previous handle scope.
+  sd(s0, MemOperand(s3, kNextOffset));
+  if (emit_debug_code()) {
+    ld(a1, MemOperand(s3, kLevelOffset));
+    Check(eq, kUnexpectedLevelAfterReturnFromApiCall, a1, Operand(s2));
+  }
+  Dsubu(s2, s2, Operand(1));
+  sd(s2, MemOperand(s3, kLevelOffset));
+  ld(at, MemOperand(s3, kLimitOffset));
+  Branch(&delete_allocated_handles, ne, s1, Operand(at));
+
+  // Check if the function scheduled an exception.
+  bind(&leave_exit_frame);
+  LoadRoot(a4, Heap::kTheHoleValueRootIndex);
+  li(at, Operand(ExternalReference::scheduled_exception_address(isolate())));
+  ld(a5, MemOperand(at));
+  Branch(&promote_scheduled_exception, ne, a4, Operand(a5));
+  bind(&exception_handled);
+
+  bool restore_context = context_restore_operand != NULL;
+  if (restore_context) {
+    ld(cp, *context_restore_operand);
+  }
+  li(s0, Operand(stack_space));
+  LeaveExitFrame(false, s0, !restore_context, EMIT_RETURN);
+
+  bind(&promote_scheduled_exception);
+  {
+    FrameScope frame(this, StackFrame::INTERNAL);
+    CallExternalReference(
+        ExternalReference(Runtime::kPromoteScheduledException, isolate()),
+        0);
+  }
+  jmp(&exception_handled);
+
+  // HandleScope limit has changed. Delete allocated extensions.
+  bind(&delete_allocated_handles);
+  sd(s1, MemOperand(s3, kLimitOffset));
+  mov(s0, v0);
+  mov(a0, v0);
+  PrepareCallCFunction(1, s1);
+  li(a0, Operand(ExternalReference::isolate_address(isolate())));
+  CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate()),
+      1);
+  mov(v0, s0);
+  jmp(&leave_exit_frame);
+}
+
+
+bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
+  return has_frame_ || !stub->SometimesSetsUpAFrame();
+}
+
+
+void MacroAssembler::IndexFromHash(Register hash, Register index) {
+  // If the hash field contains an array index pick it out. The assert checks
+  // that the constants for the maximum number of digits for an array index
+  // cached in the hash field and the number of bits reserved for it does not
+  // conflict.
+  DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
+         (1 << String::kArrayIndexValueBits));
+  DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash);
+}
+
+
+void MacroAssembler::ObjectToDoubleFPURegister(Register object,
+                                               FPURegister result,
+                                               Register scratch1,
+                                               Register scratch2,
+                                               Register heap_number_map,
+                                               Label* not_number,
+                                               ObjectToDoubleFlags flags) {
+  Label done;
+  if ((flags & OBJECT_NOT_SMI) == 0) {
+    Label not_smi;
+    JumpIfNotSmi(object, &not_smi);
+    // Remove smi tag and convert to double.
+    // dsra(scratch1, object, kSmiTagSize);
+    dsra32(scratch1, object, 0);
+    mtc1(scratch1, result);
+    cvt_d_w(result, result);
+    Branch(&done);
+    bind(&not_smi);
+  }
+  // Check for heap number and load double value from it.
+  ld(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
+  Branch(not_number, ne, scratch1, Operand(heap_number_map));
+
+  if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
+    // If exponent is all ones the number is either a NaN or +/-Infinity.
+    Register exponent = scratch1;
+    Register mask_reg = scratch2;
+    lwu(exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
+    li(mask_reg, HeapNumber::kExponentMask);
+
+    And(exponent, exponent, mask_reg);
+    Branch(not_number, eq, exponent, Operand(mask_reg));
+  }
+  ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset));
+  bind(&done);
+}
+
+
+void MacroAssembler::SmiToDoubleFPURegister(Register smi,
+                                            FPURegister value,
+                                            Register scratch1) {
+  // dsra(scratch1, smi, kSmiTagSize);
+  dsra32(scratch1, smi, 0);
+  mtc1(scratch1, value);
+  cvt_d_w(value, value);
+}
+
+
+void MacroAssembler::AdduAndCheckForOverflow(Register dst,
+                                             Register left,
+                                             Register right,
+                                             Register overflow_dst,
+                                             Register scratch) {
+  DCHECK(!dst.is(overflow_dst));
+  DCHECK(!dst.is(scratch));
+  DCHECK(!overflow_dst.is(scratch));
+  DCHECK(!overflow_dst.is(left));
+  DCHECK(!overflow_dst.is(right));
+
+  if (left.is(right) && dst.is(left)) {
+    DCHECK(!dst.is(t9));
+    DCHECK(!scratch.is(t9));
+    DCHECK(!left.is(t9));
+    DCHECK(!right.is(t9));
+    DCHECK(!overflow_dst.is(t9));
+    mov(t9, right);
+    right = t9;
+  }
+
+  if (dst.is(left)) {
+    mov(scratch, left);  // Preserve left.
+    daddu(dst, left, right);  // Left is overwritten.
+    xor_(scratch, dst, scratch);  // Original left.
+    xor_(overflow_dst, dst, right);
+    and_(overflow_dst, overflow_dst, scratch);
+  } else if (dst.is(right)) {
+    mov(scratch, right);  // Preserve right.
+    daddu(dst, left, right);  // Right is overwritten.
+    xor_(scratch, dst, scratch);  // Original right.
+    xor_(overflow_dst, dst, left);
+    and_(overflow_dst, overflow_dst, scratch);
+  } else {
+    daddu(dst, left, right);
+    xor_(overflow_dst, dst, left);
+    xor_(scratch, dst, right);
+    and_(overflow_dst, scratch, overflow_dst);
+  }
+}
+
+
+void MacroAssembler::SubuAndCheckForOverflow(Register dst,
+                                             Register left,
+                                             Register right,
+                                             Register overflow_dst,
+                                             Register scratch) {
+  DCHECK(!dst.is(overflow_dst));
+  DCHECK(!dst.is(scratch));
+  DCHECK(!overflow_dst.is(scratch));
+  DCHECK(!overflow_dst.is(left));
+  DCHECK(!overflow_dst.is(right));
+  DCHECK(!scratch.is(left));
+  DCHECK(!scratch.is(right));
+
+  // This happens with some crankshaft code. Since Subu works fine if
+  // left == right, let's not make that restriction here.
+  if (left.is(right)) {
+    mov(dst, zero_reg);
+    mov(overflow_dst, zero_reg);
+    return;
+  }
+
+  if (dst.is(left)) {
+    mov(scratch, left);  // Preserve left.
+    dsubu(dst, left, right);  // Left is overwritten.
+    xor_(overflow_dst, dst, scratch);  // scratch is original left.
+    xor_(scratch, scratch, right);  // scratch is original left.
+    and_(overflow_dst, scratch, overflow_dst);
+  } else if (dst.is(right)) {
+    mov(scratch, right);  // Preserve right.
+    dsubu(dst, left, right);  // Right is overwritten.
+    xor_(overflow_dst, dst, left);
+    xor_(scratch, left, scratch);  // Original right.
+    and_(overflow_dst, scratch, overflow_dst);
+  } else {
+    dsubu(dst, left, right);
+    xor_(overflow_dst, dst, left);
+    xor_(scratch, left, right);
+    and_(overflow_dst, scratch, overflow_dst);
+  }
+}
+
+
+void MacroAssembler::CallRuntime(const Runtime::Function* f,
+                                 int num_arguments,
+                                 SaveFPRegsMode save_doubles) {
+  // All parameters are on the stack. v0 has the return value after call.
+
+  // If the expected number of arguments of the runtime function is
+  // constant, we check that the actual number of arguments match the
+  // expectation.
+  CHECK(f->nargs < 0 || f->nargs == num_arguments);
+
+  // TODO(1236192): Most runtime routines don't need the number of
+  // arguments passed in because it is constant. At some point we
+  // should remove this need and make the runtime routine entry code
+  // smarter.
+  PrepareCEntryArgs(num_arguments);
+  PrepareCEntryFunction(ExternalReference(f, isolate()));
+  CEntryStub stub(isolate(), 1, save_doubles);
+  CallStub(&stub);
+}
+
+
+void MacroAssembler::CallExternalReference(const ExternalReference& ext,
+                                           int num_arguments,
+                                           BranchDelaySlot bd) {
+  PrepareCEntryArgs(num_arguments);
+  PrepareCEntryFunction(ext);
+
+  CEntryStub stub(isolate(), 1);
+  CallStub(&stub, TypeFeedbackId::None(), al, zero_reg, Operand(zero_reg), bd);
+}
+
+
+void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
+                                               int num_arguments,
+                                               int result_size) {
+  // TODO(1236192): Most runtime routines don't need the number of
+  // arguments passed in because it is constant. At some point we
+  // should remove this need and make the runtime routine entry code
+  // smarter.
+  PrepareCEntryArgs(num_arguments);
+  JumpToExternalReference(ext);
+}
+
+
+void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
+                                     int num_arguments,
+                                     int result_size) {
+  TailCallExternalReference(ExternalReference(fid, isolate()),
+                            num_arguments,
+                            result_size);
+}
+
+
+void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin,
+                                             BranchDelaySlot bd) {
+  PrepareCEntryFunction(builtin);
+  CEntryStub stub(isolate(), 1);
+  Jump(stub.GetCode(),
+       RelocInfo::CODE_TARGET,
+       al,
+       zero_reg,
+       Operand(zero_reg),
+       bd);
+}
+
+
+void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
+                                   InvokeFlag flag,
+                                   const CallWrapper& call_wrapper) {
+  // You can't call a builtin without a valid frame.
+  DCHECK(flag == JUMP_FUNCTION || has_frame());
+
+  GetBuiltinEntry(t9, id);
+  if (flag == CALL_FUNCTION) {
+    call_wrapper.BeforeCall(CallSize(t9));
+    Call(t9);
+    call_wrapper.AfterCall();
+  } else {
+    DCHECK(flag == JUMP_FUNCTION);
+    Jump(t9);
+  }
+}
+
+
+void MacroAssembler::GetBuiltinFunction(Register target,
+                                        Builtins::JavaScript id) {
+  // Load the builtins object into target register.
+  ld(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+  ld(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
+  // Load the JavaScript builtin function from the builtins object.
+  ld(target, FieldMemOperand(target,
+                          JSBuiltinsObject::OffsetOfFunctionWithId(id)));
+}
+
+
+void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
+  DCHECK(!target.is(a1));
+  GetBuiltinFunction(a1, id);
+  // Load the code entry point from the builtins object.
+  ld(target, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
+}
+
+
+void MacroAssembler::SetCounter(StatsCounter* counter, int value,
+                                Register scratch1, Register scratch2) {
+  if (FLAG_native_code_counters && counter->Enabled()) {
+    li(scratch1, Operand(value));
+    li(scratch2, Operand(ExternalReference(counter)));
+    sd(scratch1, MemOperand(scratch2));
+  }
+}
+
+
+void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
+                                      Register scratch1, Register scratch2) {
+  DCHECK(value > 0);
+  if (FLAG_native_code_counters && counter->Enabled()) {
+    li(scratch2, Operand(ExternalReference(counter)));
+    ld(scratch1, MemOperand(scratch2));
+    Daddu(scratch1, scratch1, Operand(value));
+    sd(scratch1, MemOperand(scratch2));
+  }
+}
+
+
+void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
+                                      Register scratch1, Register scratch2) {
+  DCHECK(value > 0);
+  if (FLAG_native_code_counters && counter->Enabled()) {
+    li(scratch2, Operand(ExternalReference(counter)));
+    ld(scratch1, MemOperand(scratch2));
+    Dsubu(scratch1, scratch1, Operand(value));
+    sd(scratch1, MemOperand(scratch2));
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+// Debugging.
+
+void MacroAssembler::Assert(Condition cc, BailoutReason reason,
+                            Register rs, Operand rt) {
+  if (emit_debug_code())
+    Check(cc, reason, rs, rt);
+}
+
+
+void MacroAssembler::AssertFastElements(Register elements) {
+  if (emit_debug_code()) {
+    DCHECK(!elements.is(at));
+    Label ok;
+    push(elements);
+    ld(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
+    LoadRoot(at, Heap::kFixedArrayMapRootIndex);
+    Branch(&ok, eq, elements, Operand(at));
+    LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
+    Branch(&ok, eq, elements, Operand(at));
+    LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
+    Branch(&ok, eq, elements, Operand(at));
+    Abort(kJSObjectWithFastElementsMapHasSlowElements);
+    bind(&ok);
+    pop(elements);
+  }
+}
+
+
+void MacroAssembler::Check(Condition cc, BailoutReason reason,
+                           Register rs, Operand rt) {
+  Label L;
+  Branch(&L, cc, rs, rt);
+  Abort(reason);
+  // Will not return here.
+  bind(&L);
+}
+
+
+void MacroAssembler::Abort(BailoutReason reason) {
+  Label abort_start;
+  bind(&abort_start);
+#ifdef DEBUG
+  const char* msg = GetBailoutReason(reason);
+  if (msg != NULL) {
+    RecordComment("Abort message: ");
+    RecordComment(msg);
+  }
+
+  if (FLAG_trap_on_abort) {
+    stop(msg);
+    return;
+  }
+#endif
+
+  li(a0, Operand(Smi::FromInt(reason)));
+  push(a0);
+  // Disable stub call restrictions to always allow calls to abort.
+  if (!has_frame_) {
+    // We don't actually want to generate a pile of code for this, so just
+    // claim there is a stack frame, without generating one.
+    FrameScope scope(this, StackFrame::NONE);
+    CallRuntime(Runtime::kAbort, 1);
+  } else {
+    CallRuntime(Runtime::kAbort, 1);
+  }
+  // Will not return here.
+  if (is_trampoline_pool_blocked()) {
+    // If the calling code cares about the exact number of
+    // instructions generated, we insert padding here to keep the size
+    // of the Abort macro constant.
+    // Currently in debug mode with debug_code enabled the number of
+    // generated instructions is 10, so we use this as a maximum value.
+    static const int kExpectedAbortInstructions = 10;
+    int abort_instructions = InstructionsGeneratedSince(&abort_start);
+    DCHECK(abort_instructions <= kExpectedAbortInstructions);
+    while (abort_instructions++ < kExpectedAbortInstructions) {
+      nop();
+    }
+  }
+}
+
+
+void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
+  if (context_chain_length > 0) {
+    // Move up the chain of contexts to the context containing the slot.
+    ld(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+    for (int i = 1; i < context_chain_length; i++) {
+      ld(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+    }
+  } else {
+    // Slot is in the current function context.  Move it into the
+    // destination register in case we store into it (the write barrier
+    // cannot be allowed to destroy the context in esi).
+    Move(dst, cp);
+  }
+}
+
+
+void MacroAssembler::LoadTransitionedArrayMapConditional(
+    ElementsKind expected_kind,
+    ElementsKind transitioned_kind,
+    Register map_in_out,
+    Register scratch,
+    Label* no_map_match) {
+  // Load the global or builtins object from the current context.
+  ld(scratch,
+     MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+  ld(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
+
+  // Check that the function's map is the same as the expected cached map.
+  ld(scratch,
+     MemOperand(scratch,
+                Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
+  size_t offset = expected_kind * kPointerSize +
+      FixedArrayBase::kHeaderSize;
+  ld(at, FieldMemOperand(scratch, offset));
+  Branch(no_map_match, ne, map_in_out, Operand(at));
+
+  // Use the transitioned cached map.
+  offset = transitioned_kind * kPointerSize +
+      FixedArrayBase::kHeaderSize;
+  ld(map_in_out, FieldMemOperand(scratch, offset));
+}
+
+
+void MacroAssembler::LoadGlobalFunction(int index, Register function) {
+  // Load the global or builtins object from the current context.
+  ld(function,
+     MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+  // Load the native context from the global or builtins object.
+  ld(function, FieldMemOperand(function,
+                               GlobalObject::kNativeContextOffset));
+  // Load the function from the native context.
+  ld(function, MemOperand(function, Context::SlotOffset(index)));
+}
+
+
+void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
+                                                  Register map,
+                                                  Register scratch) {
+  // Load the initial map. The global functions all have initial maps.
+  ld(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+  if (emit_debug_code()) {
+    Label ok, fail;
+    CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
+    Branch(&ok);
+    bind(&fail);
+    Abort(kGlobalFunctionsMustHaveInitialMap);
+    bind(&ok);
+  }
+}
+
+
+void MacroAssembler::StubPrologue() {
+    Push(ra, fp, cp);
+    Push(Smi::FromInt(StackFrame::STUB));
+    // Adjust FP to point to saved FP.
+    Daddu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+}
+
+
+void MacroAssembler::Prologue(bool code_pre_aging) {
+  PredictableCodeSizeScope predictible_code_size_scope(
+      this, kNoCodeAgeSequenceLength);
+  // The following three instructions must remain together and unmodified
+  // for code aging to work properly.
+  if (code_pre_aging) {
+    // Pre-age the code.
+    Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
+    nop(Assembler::CODE_AGE_MARKER_NOP);
+    // Load the stub address to t9 and call it,
+    // GetCodeAgeAndParity() extracts the stub address from this instruction.
+    li(t9,
+       Operand(reinterpret_cast<uint64_t>(stub->instruction_start())),
+       ADDRESS_LOAD);
+    nop();  // Prevent jalr to jal optimization.
+    jalr(t9, a0);
+    nop();  // Branch delay slot nop.
+    nop();  // Pad the empty space.
+  } else {
+    Push(ra, fp, cp, a1);
+    nop(Assembler::CODE_AGE_SEQUENCE_NOP);
+    nop(Assembler::CODE_AGE_SEQUENCE_NOP);
+    nop(Assembler::CODE_AGE_SEQUENCE_NOP);
+    // Adjust fp to point to caller's fp.
+    Daddu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+  }
+}
+
+
+void MacroAssembler::EnterFrame(StackFrame::Type type) {
+  daddiu(sp, sp, -5 * kPointerSize);
+  li(t8, Operand(Smi::FromInt(type)));
+  li(t9, Operand(CodeObject()), CONSTANT_SIZE);
+  sd(ra, MemOperand(sp, 4 * kPointerSize));
+  sd(fp, MemOperand(sp, 3 * kPointerSize));
+  sd(cp, MemOperand(sp, 2 * kPointerSize));
+  sd(t8, MemOperand(sp, 1 * kPointerSize));
+  sd(t9, MemOperand(sp, 0 * kPointerSize));
+  // Adjust FP to point to saved FP.
+  Daddu(fp, sp,
+       Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
+}
+
+
+void MacroAssembler::LeaveFrame(StackFrame::Type type) {
+  mov(sp, fp);
+  ld(fp, MemOperand(sp, 0 * kPointerSize));
+  ld(ra, MemOperand(sp, 1 * kPointerSize));
+  daddiu(sp, sp, 2 * kPointerSize);
+}
+
+
+void MacroAssembler::EnterExitFrame(bool save_doubles,
+                                    int stack_space) {
+  // Set up the frame structure on the stack.
+  STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
+  STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
+  STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
+
+  // This is how the stack will look:
+  // fp + 2 (==kCallerSPDisplacement) - old stack's end
+  // [fp + 1 (==kCallerPCOffset)] - saved old ra
+  // [fp + 0 (==kCallerFPOffset)] - saved old fp
+  // [fp - 1 (==kSPOffset)] - sp of the called function
+  // [fp - 2 (==kCodeOffset)] - CodeObject
+  // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the
+  //   new stack (will contain saved ra)
+
+  // Save registers.
+  daddiu(sp, sp, -4 * kPointerSize);
+  sd(ra, MemOperand(sp, 3 * kPointerSize));
+  sd(fp, MemOperand(sp, 2 * kPointerSize));
+  daddiu(fp, sp, 2 * kPointerSize);  // Set up new frame pointer.
+
+  if (emit_debug_code()) {
+    sd(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
+  }
+
+  // Accessed from ExitFrame::code_slot.
+  li(t8, Operand(CodeObject()), CONSTANT_SIZE);
+  sd(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
+
+  // Save the frame pointer and the context in top.
+  li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
+  sd(fp, MemOperand(t8));
+  li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
+  sd(cp, MemOperand(t8));
+
+  const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
+  if (save_doubles) {
+    // The stack is already aligned to 0 modulo 8 for stores with sdc1.
+    int kNumOfSavedRegisters = FPURegister::kMaxNumRegisters / 2;
+    int space = kNumOfSavedRegisters * kDoubleSize ;
+    Dsubu(sp, sp, Operand(space));
+    // Remember: we only need to save every 2nd double FPU value.
+    for (int i = 0; i < kNumOfSavedRegisters; i++) {
+      FPURegister reg = FPURegister::from_code(2 * i);
+      sdc1(reg, MemOperand(sp, i * kDoubleSize));
+    }
+  }
+
+  // Reserve place for the return address, stack space and an optional slot
+  // (used by the DirectCEntryStub to hold the return value if a struct is
+  // returned) and align the frame preparing for calling the runtime function.
+  DCHECK(stack_space >= 0);
+  Dsubu(sp, sp, Operand((stack_space + 2) * kPointerSize));
+  if (frame_alignment > 0) {
+    DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
+    And(sp, sp, Operand(-frame_alignment));  // Align stack.
+  }
+
+  // Set the exit frame sp value to point just before the return address
+  // location.
+  daddiu(at, sp, kPointerSize);
+  sd(at, MemOperand(fp, ExitFrameConstants::kSPOffset));
+}
+
+
+void MacroAssembler::LeaveExitFrame(bool save_doubles,
+                                    Register argument_count,
+                                    bool restore_context,
+                                    bool do_return) {
+  // Optionally restore all double registers.
+  if (save_doubles) {
+    // Remember: we only need to restore every 2nd double FPU value.
+    int kNumOfSavedRegisters = FPURegister::kMaxNumRegisters / 2;
+    Dsubu(t8, fp, Operand(ExitFrameConstants::kFrameSize +
+        kNumOfSavedRegisters * kDoubleSize));
+    for (int i = 0; i < kNumOfSavedRegisters; i++) {
+      FPURegister reg = FPURegister::from_code(2 * i);
+      ldc1(reg, MemOperand(t8, i  * kDoubleSize));
+    }
+  }
+
+  // Clear top frame.
+  li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
+  sd(zero_reg, MemOperand(t8));
+
+  // Restore current context from top and clear it in debug mode.
+  if (restore_context) {
+    li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
+    ld(cp, MemOperand(t8));
+  }
+#ifdef DEBUG
+  li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
+  sd(a3, MemOperand(t8));
+#endif
+
+  // Pop the arguments, restore registers, and return.
+  mov(sp, fp);  // Respect ABI stack constraint.
+  ld(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
+  ld(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
+
+  if (argument_count.is_valid()) {
+    dsll(t8, argument_count, kPointerSizeLog2);
+    daddu(sp, sp, t8);
+  }
+
+  if (do_return) {
+    Ret(USE_DELAY_SLOT);
+    // If returning, the instruction in the delay slot will be the addiu below.
+  }
+  daddiu(sp, sp, 2 * kPointerSize);
+}
+
+
+void MacroAssembler::InitializeNewString(Register string,
+                                         Register length,
+                                         Heap::RootListIndex map_index,
+                                         Register scratch1,
+                                         Register scratch2) {
+  // dsll(scratch1, length, kSmiTagSize);
+  dsll32(scratch1, length, 0);
+  LoadRoot(scratch2, map_index);
+  sd(scratch1, FieldMemOperand(string, String::kLengthOffset));
+  li(scratch1, Operand(String::kEmptyHashField));
+  sd(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
+  sd(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
+}
+
+
+int MacroAssembler::ActivationFrameAlignment() {
+#if V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
+  // Running on the real platform. Use the alignment as mandated by the local
+  // environment.
+  // Note: This will break if we ever start generating snapshots on one Mips
+  // platform for another Mips platform with a different alignment.
+  return base::OS::ActivationFrameAlignment();
+#else  // V8_HOST_ARCH_MIPS
+  // If we are using the simulator then we should always align to the expected
+  // alignment. As the simulator is used to generate snapshots we do not know
+  // if the target platform will need alignment, so this is controlled from a
+  // flag.
+  return FLAG_sim_stack_alignment;
+#endif  // V8_HOST_ARCH_MIPS
+}
+
+
+void MacroAssembler::AssertStackIsAligned() {
+  if (emit_debug_code()) {
+      const int frame_alignment = ActivationFrameAlignment();
+      const int frame_alignment_mask = frame_alignment - 1;
+
+      if (frame_alignment > kPointerSize) {
+        Label alignment_as_expected;
+        DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
+        andi(at, sp, frame_alignment_mask);
+        Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
+        // Don't use Check here, as it will call Runtime_Abort re-entering here.
+        stop("Unexpected stack alignment");
+        bind(&alignment_as_expected);
+      }
+    }
+}
+
+
+void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
+    Register reg,
+    Register scratch,
+    Label* not_power_of_two_or_zero) {
+  Dsubu(scratch, reg, Operand(1));
+  Branch(USE_DELAY_SLOT, not_power_of_two_or_zero, lt,
+         scratch, Operand(zero_reg));
+  and_(at, scratch, reg);  // In the delay slot.
+  Branch(not_power_of_two_or_zero, ne, at, Operand(zero_reg));
+}
+
+
+void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) {
+  DCHECK(!reg.is(overflow));
+  mov(overflow, reg);  // Save original value.
+  SmiTag(reg);
+  xor_(overflow, overflow, reg);  // Overflow if (value ^ 2 * value) < 0.
+}
+
+
+void MacroAssembler::SmiTagCheckOverflow(Register dst,
+                                         Register src,
+                                         Register overflow) {
+  if (dst.is(src)) {
+    // Fall back to slower case.
+    SmiTagCheckOverflow(dst, overflow);
+  } else {
+    DCHECK(!dst.is(src));
+    DCHECK(!dst.is(overflow));
+    DCHECK(!src.is(overflow));
+    SmiTag(dst, src);
+    xor_(overflow, dst, src);  // Overflow if (value ^ 2 * value) < 0.
+  }
+}
+
+
+void MacroAssembler::SmiLoadUntag(Register dst, MemOperand src) {
+  if (SmiValuesAre32Bits()) {
+    lw(dst, UntagSmiMemOperand(src.rm(), src.offset()));
+  } else {
+    lw(dst, src);
+    SmiUntag(dst);
+  }
+}
+
+
+void MacroAssembler::SmiLoadScale(Register dst, MemOperand src, int scale) {
+  if (SmiValuesAre32Bits()) {
+    // TODO(plind): not clear if lw or ld faster here, need micro-benchmark.
+    lw(dst, UntagSmiMemOperand(src.rm(), src.offset()));
+    dsll(dst, dst, scale);
+  } else {
+    lw(dst, src);
+    DCHECK(scale >= kSmiTagSize);
+    sll(dst, dst, scale - kSmiTagSize);
+  }
+}
+
+
+// Returns 2 values: the Smi and a scaled version of the int within the Smi.
+void MacroAssembler::SmiLoadWithScale(Register d_smi,
+                                      Register d_scaled,
+                                      MemOperand src,
+                                      int scale) {
+  if (SmiValuesAre32Bits()) {
+    ld(d_smi, src);
+    dsra(d_scaled, d_smi, kSmiShift - scale);
+  } else {
+    lw(d_smi, src);
+    DCHECK(scale >= kSmiTagSize);
+    sll(d_scaled, d_smi, scale - kSmiTagSize);
+  }
+}
+
+
+// Returns 2 values: the untagged Smi (int32) and scaled version of that int.
+void MacroAssembler::SmiLoadUntagWithScale(Register d_int,
+                                           Register d_scaled,
+                                           MemOperand src,
+                                           int scale) {
+  if (SmiValuesAre32Bits()) {
+    lw(d_int, UntagSmiMemOperand(src.rm(), src.offset()));
+    dsll(d_scaled, d_int, scale);
+  } else {
+    lw(d_int, src);
+    // Need both the int and the scaled in, so use two instructions.
+    SmiUntag(d_int);
+    sll(d_scaled, d_int, scale);
+  }
+}
+
+
+void MacroAssembler::UntagAndJumpIfSmi(Register dst,
+                                       Register src,
+                                       Label* smi_case) {
+  // DCHECK(!dst.is(src));
+  JumpIfSmi(src, smi_case, at, USE_DELAY_SLOT);
+  SmiUntag(dst, src);
+}
+
+
+void MacroAssembler::UntagAndJumpIfNotSmi(Register dst,
+                                          Register src,
+                                          Label* non_smi_case) {
+  // DCHECK(!dst.is(src));
+  JumpIfNotSmi(src, non_smi_case, at, USE_DELAY_SLOT);
+  SmiUntag(dst, src);
+}
+
+void MacroAssembler::JumpIfSmi(Register value,
+                               Label* smi_label,
+                               Register scratch,
+                               BranchDelaySlot bd) {
+  DCHECK_EQ(0, kSmiTag);
+  andi(scratch, value, kSmiTagMask);
+  Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
+}
+
+void MacroAssembler::JumpIfNotSmi(Register value,
+                                  Label* not_smi_label,
+                                  Register scratch,
+                                  BranchDelaySlot bd) {
+  DCHECK_EQ(0, kSmiTag);
+  andi(scratch, value, kSmiTagMask);
+  Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg));
+}
+
+
+void MacroAssembler::JumpIfNotBothSmi(Register reg1,
+                                      Register reg2,
+                                      Label* on_not_both_smi) {
+  STATIC_ASSERT(kSmiTag == 0);
+  // TODO(plind): Find some better to fix this assert issue.
+#if defined(__APPLE__)
+  DCHECK_EQ(1, kSmiTagMask);
+#else
+  DCHECK_EQ((uint64_t)1, kSmiTagMask);
+#endif
+  or_(at, reg1, reg2);
+  JumpIfNotSmi(at, on_not_both_smi);
+}
+
+
+void MacroAssembler::JumpIfEitherSmi(Register reg1,
+                                     Register reg2,
+                                     Label* on_either_smi) {
+  STATIC_ASSERT(kSmiTag == 0);
+  // TODO(plind): Find some better to fix this assert issue.
+#if defined(__APPLE__)
+  DCHECK_EQ(1, kSmiTagMask);
+#else
+  DCHECK_EQ((uint64_t)1, kSmiTagMask);
+#endif
+  // Both Smi tags must be 1 (not Smi).
+  and_(at, reg1, reg2);
+  JumpIfSmi(at, on_either_smi);
+}
+
+
+void MacroAssembler::AssertNotSmi(Register object) {
+  if (emit_debug_code()) {
+    STATIC_ASSERT(kSmiTag == 0);
+    andi(at, object, kSmiTagMask);
+    Check(ne, kOperandIsASmi, at, Operand(zero_reg));
+  }
+}
+
+
+void MacroAssembler::AssertSmi(Register object) {
+  if (emit_debug_code()) {
+    STATIC_ASSERT(kSmiTag == 0);
+    andi(at, object, kSmiTagMask);
+    Check(eq, kOperandIsASmi, at, Operand(zero_reg));
+  }
+}
+
+
+void MacroAssembler::AssertString(Register object) {
+  if (emit_debug_code()) {
+    STATIC_ASSERT(kSmiTag == 0);
+    SmiTst(object, a4);
+    Check(ne, kOperandIsASmiAndNotAString, a4, Operand(zero_reg));
+    push(object);
+    ld(object, FieldMemOperand(object, HeapObject::kMapOffset));
+    lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
+    Check(lo, kOperandIsNotAString, object, Operand(FIRST_NONSTRING_TYPE));
+    pop(object);
+  }
+}
+
+
+void MacroAssembler::AssertName(Register object) {
+  if (emit_debug_code()) {
+    STATIC_ASSERT(kSmiTag == 0);
+    SmiTst(object, a4);
+    Check(ne, kOperandIsASmiAndNotAName, a4, Operand(zero_reg));
+    push(object);
+    ld(object, FieldMemOperand(object, HeapObject::kMapOffset));
+    lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
+    Check(le, kOperandIsNotAName, object, Operand(LAST_NAME_TYPE));
+    pop(object);
+  }
+}
+
+
+void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
+                                                     Register scratch) {
+  if (emit_debug_code()) {
+    Label done_checking;
+    AssertNotSmi(object);
+    LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+    Branch(&done_checking, eq, object, Operand(scratch));
+    push(object);
+    ld(object, FieldMemOperand(object, HeapObject::kMapOffset));
+    LoadRoot(scratch, Heap::kAllocationSiteMapRootIndex);
+    Assert(eq, kExpectedUndefinedOrCell, object, Operand(scratch));
+    pop(object);
+    bind(&done_checking);
+  }
+}
+
+
+void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
+  if (emit_debug_code()) {
+    DCHECK(!reg.is(at));
+    LoadRoot(at, index);
+    Check(eq, kHeapNumberMapRegisterClobbered, reg, Operand(at));
+  }
+}
+
+
+void MacroAssembler::JumpIfNotHeapNumber(Register object,
+                                         Register heap_number_map,
+                                         Register scratch,
+                                         Label* on_not_heap_number) {
+  ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+  AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+  Branch(on_not_heap_number, ne, scratch, Operand(heap_number_map));
+}
+
+
+void MacroAssembler::LookupNumberStringCache(Register object,
+                                             Register result,
+                                             Register scratch1,
+                                             Register scratch2,
+                                             Register scratch3,
+                                             Label* not_found) {
+  // Use of registers. Register result is used as a temporary.
+  Register number_string_cache = result;
+  Register mask = scratch3;
+
+  // Load the number string cache.
+  LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
+
+  // Make the hash mask from the length of the number string cache. It
+  // contains two elements (number and string) for each cache entry.
+  ld(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
+  // Divide length by two (length is a smi).
+  // dsra(mask, mask, kSmiTagSize + 1);
+  dsra32(mask, mask, 1);
+  Daddu(mask, mask, -1);  // Make mask.
+
+  // Calculate the entry in the number string cache. The hash value in the
+  // number string cache for smis is just the smi value, and the hash for
+  // doubles is the xor of the upper and lower words. See
+  // Heap::GetNumberStringCache.
+  Label is_smi;
+  Label load_result_from_cache;
+  JumpIfSmi(object, &is_smi);
+  CheckMap(object,
+           scratch1,
+           Heap::kHeapNumberMapRootIndex,
+           not_found,
+           DONT_DO_SMI_CHECK);
+
+  STATIC_ASSERT(8 == kDoubleSize);
+  Daddu(scratch1,
+       object,
+       Operand(HeapNumber::kValueOffset - kHeapObjectTag));
+  ld(scratch2, MemOperand(scratch1, kPointerSize));
+  ld(scratch1, MemOperand(scratch1, 0));
+  Xor(scratch1, scratch1, Operand(scratch2));
+  And(scratch1, scratch1, Operand(mask));
+
+  // Calculate address of entry in string cache: each entry consists
+  // of two pointer sized fields.
+  dsll(scratch1, scratch1, kPointerSizeLog2 + 1);
+  Daddu(scratch1, number_string_cache, scratch1);
+
+  Register probe = mask;
+  ld(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
+  JumpIfSmi(probe, not_found);
+  ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
+  ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
+  BranchF(&load_result_from_cache, NULL, eq, f12, f14);
+  Branch(not_found);
+
+  bind(&is_smi);
+  Register scratch = scratch1;
+  // dsra(scratch, object, 1);   // Shift away the tag.
+  dsra32(scratch, scratch, 0);
+  And(scratch, mask, Operand(scratch));
+
+  // Calculate address of entry in string cache: each entry consists
+  // of two pointer sized fields.
+  dsll(scratch, scratch, kPointerSizeLog2 + 1);
+  Daddu(scratch, number_string_cache, scratch);
+
+  // Check if the entry is the smi we are looking for.
+  ld(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
+  Branch(not_found, ne, object, Operand(probe));
+
+  // Get the result from the cache.
+  bind(&load_result_from_cache);
+  ld(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
+
+  IncrementCounter(isolate()->counters()->number_to_string_native(),
+                   1,
+                   scratch1,
+                   scratch2);
+}
+
+
+void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
+    Register first, Register second, Register scratch1, Register scratch2,
+    Label* failure) {
+  // Test that both first and second are sequential one-byte strings.
+  // Assume that they are non-smis.
+  ld(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
+  ld(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
+  lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+  lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
+
+  JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1,
+                                                 scratch2, failure);
+}
+
+
+void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first,
+                                                           Register second,
+                                                           Register scratch1,
+                                                           Register scratch2,
+                                                           Label* failure) {
+  // Check that neither is a smi.
+  STATIC_ASSERT(kSmiTag == 0);
+  And(scratch1, first, Operand(second));
+  JumpIfSmi(scratch1, failure);
+  JumpIfNonSmisNotBothSequentialOneByteStrings(first, second, scratch1,
+                                               scratch2, failure);
+}
+
+
+void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
+    Register first, Register second, Register scratch1, Register scratch2,
+    Label* failure) {
+  const int kFlatOneByteStringMask =
+      kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
+  const int kFlatOneByteStringTag =
+      kStringTag | kOneByteStringTag | kSeqStringTag;
+  DCHECK(kFlatOneByteStringTag <= 0xffff);  // Ensure this fits 16-bit immed.
+  andi(scratch1, first, kFlatOneByteStringMask);
+  Branch(failure, ne, scratch1, Operand(kFlatOneByteStringTag));
+  andi(scratch2, second, kFlatOneByteStringMask);
+  Branch(failure, ne, scratch2, Operand(kFlatOneByteStringTag));
+}
+
+
+void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
+                                                              Register scratch,
+                                                              Label* failure) {
+  const int kFlatOneByteStringMask =
+      kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
+  const int kFlatOneByteStringTag =
+      kStringTag | kOneByteStringTag | kSeqStringTag;
+  And(scratch, type, Operand(kFlatOneByteStringMask));
+  Branch(failure, ne, scratch, Operand(kFlatOneByteStringTag));
+}
+
+
+static const int kRegisterPassedArguments = (kMipsAbi == kN64) ? 8 : 4;
+
+int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
+                                              int num_double_arguments) {
+  int stack_passed_words = 0;
+  num_reg_arguments += 2 * num_double_arguments;
+
+  // O32: Up to four simple arguments are passed in registers a0..a3.
+  // N64: Up to eight simple arguments are passed in registers a0..a7.
+  if (num_reg_arguments > kRegisterPassedArguments) {
+    stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
+  }
+  stack_passed_words += kCArgSlotCount;
+  return stack_passed_words;
+}
+
+
+void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
+                                               Register index,
+                                               Register value,
+                                               Register scratch,
+                                               uint32_t encoding_mask) {
+  Label is_object;
+  SmiTst(string, at);
+  Check(ne, kNonObject, at, Operand(zero_reg));
+
+  ld(at, FieldMemOperand(string, HeapObject::kMapOffset));
+  lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset));
+
+  andi(at, at, kStringRepresentationMask | kStringEncodingMask);
+  li(scratch, Operand(encoding_mask));
+  Check(eq, kUnexpectedStringType, at, Operand(scratch));
+
+  // TODO(plind): requires Smi size check code for mips32.
+
+  ld(at, FieldMemOperand(string, String::kLengthOffset));
+  Check(lt, kIndexIsTooLarge, index, Operand(at));
+
+  DCHECK(Smi::FromInt(0) == 0);
+  Check(ge, kIndexIsNegative, index, Operand(zero_reg));
+}
+
+
+void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
+                                          int num_double_arguments,
+                                          Register scratch) {
+  int frame_alignment = ActivationFrameAlignment();
+
+  // n64: Up to eight simple arguments in a0..a3, a4..a7, No argument slots.
+  // O32: Up to four simple arguments are passed in registers a0..a3.
+  // Those four arguments must have reserved argument slots on the stack for
+  // mips, even though those argument slots are not normally used.
+  // Both ABIs: Remaining arguments are pushed on the stack, above (higher
+  // address than) the (O32) argument slots. (arg slot calculation handled by
+  // CalculateStackPassedWords()).
+  int stack_passed_arguments = CalculateStackPassedWords(
+      num_reg_arguments, num_double_arguments);
+  if (frame_alignment > kPointerSize) {
+    // Make stack end at alignment and make room for num_arguments - 4 words
+    // and the original value of sp.
+    mov(scratch, sp);
+    Dsubu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
+    DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
+    And(sp, sp, Operand(-frame_alignment));
+    sd(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
+  } else {
+    Dsubu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
+  }
+}
+
+
+void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
+                                          Register scratch) {
+  PrepareCallCFunction(num_reg_arguments, 0, scratch);
+}
+
+
+void MacroAssembler::CallCFunction(ExternalReference function,
+                                   int num_reg_arguments,
+                                   int num_double_arguments) {
+  li(t8, Operand(function));
+  CallCFunctionHelper(t8, num_reg_arguments, num_double_arguments);
+}
+
+
+void MacroAssembler::CallCFunction(Register function,
+                                   int num_reg_arguments,
+                                   int num_double_arguments) {
+  CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
+}
+
+
+void MacroAssembler::CallCFunction(ExternalReference function,
+                                   int num_arguments) {
+  CallCFunction(function, num_arguments, 0);
+}
+
+
+void MacroAssembler::CallCFunction(Register function,
+                                   int num_arguments) {
+  CallCFunction(function, num_arguments, 0);
+}
+
+
+void MacroAssembler::CallCFunctionHelper(Register function,
+                                         int num_reg_arguments,
+                                         int num_double_arguments) {
+  DCHECK(has_frame());
+  // Make sure that the stack is aligned before calling a C function unless
+  // running in the simulator. The simulator has its own alignment check which
+  // provides more information.
+  // The argument stots are presumed to have been set up by
+  // PrepareCallCFunction. The C function must be called via t9, for mips ABI.
+
+#if V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
+  if (emit_debug_code()) {
+    int frame_alignment = base::OS::ActivationFrameAlignment();
+    int frame_alignment_mask = frame_alignment - 1;
+    if (frame_alignment > kPointerSize) {
+      DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
+      Label alignment_as_expected;
+      And(at, sp, Operand(frame_alignment_mask));
+      Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
+      // Don't use Check here, as it will call Runtime_Abort possibly
+      // re-entering here.
+      stop("Unexpected alignment in CallCFunction");
+      bind(&alignment_as_expected);
+    }
+  }
+#endif  // V8_HOST_ARCH_MIPS
+
+  // Just call directly. The function called cannot cause a GC, or
+  // allow preemption, so the return address in the link register
+  // stays correct.
+
+  if (!function.is(t9)) {
+    mov(t9, function);
+    function = t9;
+  }
+
+  Call(function);
+
+  int stack_passed_arguments = CalculateStackPassedWords(
+      num_reg_arguments, num_double_arguments);
+
+  if (base::OS::ActivationFrameAlignment() > kPointerSize) {
+    ld(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
+  } else {
+    Daddu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
+  }
+}
+
+
+#undef BRANCH_ARGS_CHECK
+
+
+void MacroAssembler::PatchRelocatedValue(Register li_location,
+                                         Register scratch,
+                                         Register new_value) {
+  lwu(scratch, MemOperand(li_location));
+  // At this point scratch is a lui(at, ...) instruction.
+  if (emit_debug_code()) {
+    And(scratch, scratch, kOpcodeMask);
+    Check(eq, kTheInstructionToPatchShouldBeALui,
+        scratch, Operand(LUI));
+    lwu(scratch, MemOperand(li_location));
+  }
+  dsrl32(t9, new_value, 0);
+  Ins(scratch, t9, 0, kImm16Bits);
+  sw(scratch, MemOperand(li_location));
+
+  lwu(scratch, MemOperand(li_location, kInstrSize));
+  // scratch is now ori(at, ...).
+  if (emit_debug_code()) {
+    And(scratch, scratch, kOpcodeMask);
+    Check(eq, kTheInstructionToPatchShouldBeAnOri,
+        scratch, Operand(ORI));
+    lwu(scratch, MemOperand(li_location, kInstrSize));
+  }
+  dsrl(t9, new_value, kImm16Bits);
+  Ins(scratch, t9, 0, kImm16Bits);
+  sw(scratch, MemOperand(li_location, kInstrSize));
+
+  lwu(scratch, MemOperand(li_location, kInstrSize * 3));
+  // scratch is now ori(at, ...).
+  if (emit_debug_code()) {
+    And(scratch, scratch, kOpcodeMask);
+    Check(eq, kTheInstructionToPatchShouldBeAnOri,
+        scratch, Operand(ORI));
+    lwu(scratch, MemOperand(li_location, kInstrSize * 3));
+  }
+
+  Ins(scratch, new_value, 0, kImm16Bits);
+  sw(scratch, MemOperand(li_location, kInstrSize * 3));
+
+  // Update the I-cache so the new lui and ori can be executed.
+  FlushICache(li_location, 4);
+}
+
+void MacroAssembler::GetRelocatedValue(Register li_location,
+                                       Register value,
+                                       Register scratch) {
+  lwu(value, MemOperand(li_location));
+  if (emit_debug_code()) {
+    And(value, value, kOpcodeMask);
+    Check(eq, kTheInstructionShouldBeALui,
+        value, Operand(LUI));
+    lwu(value, MemOperand(li_location));
+  }
+
+  // value now holds a lui instruction. Extract the immediate.
+  andi(value, value, kImm16Mask);
+  dsll32(value, value, kImm16Bits);
+
+  lwu(scratch, MemOperand(li_location, kInstrSize));
+  if (emit_debug_code()) {
+    And(scratch, scratch, kOpcodeMask);
+    Check(eq, kTheInstructionShouldBeAnOri,
+        scratch, Operand(ORI));
+    lwu(scratch, MemOperand(li_location, kInstrSize));
+  }
+  // "scratch" now holds an ori instruction. Extract the immediate.
+  andi(scratch, scratch, kImm16Mask);
+  dsll32(scratch, scratch, 0);
+
+  or_(value, value, scratch);
+
+  lwu(scratch, MemOperand(li_location, kInstrSize * 3));
+  if (emit_debug_code()) {
+    And(scratch, scratch, kOpcodeMask);
+    Check(eq, kTheInstructionShouldBeAnOri,
+        scratch, Operand(ORI));
+    lwu(scratch, MemOperand(li_location, kInstrSize * 3));
+  }
+  // "scratch" now holds an ori instruction. Extract the immediate.
+  andi(scratch, scratch, kImm16Mask);
+  dsll(scratch, scratch, kImm16Bits);
+
+  or_(value, value, scratch);
+  // Sign extend extracted address.
+  dsra(value, value, kImm16Bits);
+}
+
+
+void MacroAssembler::CheckPageFlag(
+    Register object,
+    Register scratch,
+    int mask,
+    Condition cc,
+    Label* condition_met) {
+  And(scratch, object, Operand(~Page::kPageAlignmentMask));
+  ld(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
+  And(scratch, scratch, Operand(mask));
+  Branch(condition_met, cc, scratch, Operand(zero_reg));
+}
+
+
+void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
+                                        Register scratch,
+                                        Label* if_deprecated) {
+  if (map->CanBeDeprecated()) {
+    li(scratch, Operand(map));
+    ld(scratch, FieldMemOperand(scratch, Map::kBitField3Offset));
+    And(scratch, scratch, Operand(Map::Deprecated::kMask));
+    Branch(if_deprecated, ne, scratch, Operand(zero_reg));
+  }
+}
+
+
+void MacroAssembler::JumpIfBlack(Register object,
+                                 Register scratch0,
+                                 Register scratch1,
+                                 Label* on_black) {
+  HasColor(object, scratch0, scratch1, on_black, 1, 0);  // kBlackBitPattern.
+  DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
+}
+
+
+void MacroAssembler::HasColor(Register object,
+                              Register bitmap_scratch,
+                              Register mask_scratch,
+                              Label* has_color,
+                              int first_bit,
+                              int second_bit) {
+  DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t8));
+  DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, t9));
+
+  GetMarkBits(object, bitmap_scratch, mask_scratch);
+
+  Label other_color;
+  // Note that we are using a 4-byte aligned 8-byte load.
+  Uld(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+  And(t8, t9, Operand(mask_scratch));
+  Branch(&other_color, first_bit == 1 ? eq : ne, t8, Operand(zero_reg));
+  // Shift left 1 by adding.
+  Daddu(mask_scratch, mask_scratch, Operand(mask_scratch));
+  And(t8, t9, Operand(mask_scratch));
+  Branch(has_color, second_bit == 1 ? ne : eq, t8, Operand(zero_reg));
+
+  bind(&other_color);
+}
+
+
+// Detect some, but not all, common pointer-free objects.  This is used by the
+// incremental write barrier which doesn't care about oddballs (they are always
+// marked black immediately so this code is not hit).
+void MacroAssembler::JumpIfDataObject(Register value,
+                                      Register scratch,
+                                      Label* not_data_object) {
+  DCHECK(!AreAliased(value, scratch, t8, no_reg));
+  Label is_data_object;
+  ld(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
+  LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
+  Branch(&is_data_object, eq, t8, Operand(scratch));
+  DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+  DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+  // If it's a string and it's not a cons string then it's an object containing
+  // no GC pointers.
+  lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+  And(t8, scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
+  Branch(not_data_object, ne, t8, Operand(zero_reg));
+  bind(&is_data_object);
+}
+
+
+void MacroAssembler::GetMarkBits(Register addr_reg,
+                                 Register bitmap_reg,
+                                 Register mask_reg) {
+  DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
+  // addr_reg is divided into fields:
+  // |63        page base        20|19    high      8|7   shift   3|2  0|
+  // 'high' gives the index of the cell holding color bits for the object.
+  // 'shift' gives the offset in the cell for this object's color.
+  And(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
+  Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
+  const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
+  Ext(t8, addr_reg, kLowBits, kPageSizeBits - kLowBits);
+  dsll(t8, t8, Bitmap::kBytesPerCellLog2);
+  Daddu(bitmap_reg, bitmap_reg, t8);
+  li(t8, Operand(1));
+  dsllv(mask_reg, t8, mask_reg);
+}
+
+
+void MacroAssembler::EnsureNotWhite(
+    Register value,
+    Register bitmap_scratch,
+    Register mask_scratch,
+    Register load_scratch,
+    Label* value_is_white_and_not_data) {
+  DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, t8));
+  GetMarkBits(value, bitmap_scratch, mask_scratch);
+
+  // If the value is black or grey we don't need to do anything.
+  DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+  DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
+  DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
+  DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+
+  Label done;
+
+  // Since both black and grey have a 1 in the first position and white does
+  // not have a 1 there we only need to check one bit.
+  // Note that we are using a 4-byte aligned 8-byte load.
+  Uld(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+  And(t8, mask_scratch, load_scratch);
+  Branch(&done, ne, t8, Operand(zero_reg));
+
+  if (emit_debug_code()) {
+    // Check for impossible bit pattern.
+    Label ok;
+    // sll may overflow, making the check conservative.
+    dsll(t8, mask_scratch, 1);
+    And(t8, load_scratch, t8);
+    Branch(&ok, eq, t8, Operand(zero_reg));
+    stop("Impossible marking bit pattern");
+    bind(&ok);
+  }
+
+  // Value is white.  We check whether it is data that doesn't need scanning.
+  // Currently only checks for HeapNumber and non-cons strings.
+  Register map = load_scratch;  // Holds map while checking type.
+  Register length = load_scratch;  // Holds length of object after testing type.
+  Label is_data_object;
+
+  // Check for heap-number
+  ld(map, FieldMemOperand(value, HeapObject::kMapOffset));
+  LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
+  {
+    Label skip;
+    Branch(&skip, ne, t8, Operand(map));
+    li(length, HeapNumber::kSize);
+    Branch(&is_data_object);
+    bind(&skip);
+  }
+
+  // Check for strings.
+  DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+  DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+  // If it's a string and it's not a cons string then it's an object containing
+  // no GC pointers.
+  Register instance_type = load_scratch;
+  lbu(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
+  And(t8, instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
+  Branch(value_is_white_and_not_data, ne, t8, Operand(zero_reg));
+  // It's a non-indirect (non-cons and non-slice) string.
+  // If it's external, the length is just ExternalString::kSize.
+  // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
+  // External strings are the only ones with the kExternalStringTag bit
+  // set.
+  DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
+  DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
+  And(t8, instance_type, Operand(kExternalStringTag));
+  {
+    Label skip;
+    Branch(&skip, eq, t8, Operand(zero_reg));
+    li(length, ExternalString::kSize);
+    Branch(&is_data_object);
+    bind(&skip);
+  }
+
+  // Sequential string, either Latin1 or UC16.
+  // For Latin1 (char-size of 1) we shift the smi tag away to get the length.
+  // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
+  // getting the length multiplied by 2.
+  DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4);
+  DCHECK(kSmiTag == 0 && kSmiTagSize == 1);
+  lw(t9, UntagSmiFieldMemOperand(value, String::kLengthOffset));
+  And(t8, instance_type, Operand(kStringEncodingMask));
+  {
+    Label skip;
+    Branch(&skip, ne, t8, Operand(zero_reg));
+    // Adjust length for UC16.
+    dsll(t9, t9, 1);
+    bind(&skip);
+  }
+  Daddu(length, t9, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
+  DCHECK(!length.is(t8));
+  And(length, length, Operand(~kObjectAlignmentMask));
+
+  bind(&is_data_object);
+  // Value is a data object, and it is white.  Mark it black.  Since we know
+  // that the object is white we can make it black by flipping one bit.
+  Uld(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+  Or(t8, t8, Operand(mask_scratch));
+  Usd(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+
+  And(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
+  Uld(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
+  Daddu(t8, t8, Operand(length));
+  Usd(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
+
+  bind(&done);
+}
+
+
+void MacroAssembler::LoadInstanceDescriptors(Register map,
+                                             Register descriptors) {
+  ld(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
+}
+
+
+void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
+  ld(dst, FieldMemOperand(map, Map::kBitField3Offset));
+  DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
+}
+
+
+void MacroAssembler::EnumLength(Register dst, Register map) {
+  STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
+  ld(dst, FieldMemOperand(map, Map::kBitField3Offset));
+  And(dst, dst, Operand(Map::EnumLengthBits::kMask));
+  SmiTag(dst);
+}
+
+
+void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
+  Register  empty_fixed_array_value = a6;
+  LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
+  Label next, start;
+  mov(a2, a0);
+
+  // Check if the enum length field is properly initialized, indicating that
+  // there is an enum cache.
+  ld(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
+
+  EnumLength(a3, a1);
+  Branch(
+      call_runtime, eq, a3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel)));
+
+  jmp(&start);
+
+  bind(&next);
+  ld(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
+
+  // For all objects but the receiver, check that the cache is empty.
+  EnumLength(a3, a1);
+  Branch(call_runtime, ne, a3, Operand(Smi::FromInt(0)));
+
+  bind(&start);
+
+  // Check that there are no elements. Register a2 contains the current JS
+  // object we've reached through the prototype chain.
+  Label no_elements;
+  ld(a2, FieldMemOperand(a2, JSObject::kElementsOffset));
+  Branch(&no_elements, eq, a2, Operand(empty_fixed_array_value));
+
+  // Second chance, the object may be using the empty slow element dictionary.
+  LoadRoot(at, Heap::kEmptySlowElementDictionaryRootIndex);
+  Branch(call_runtime, ne, a2, Operand(at));
+
+  bind(&no_elements);
+  ld(a2, FieldMemOperand(a1, Map::kPrototypeOffset));
+  Branch(&next, ne, a2, Operand(null_value));
+}
+
+
+void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
+  DCHECK(!output_reg.is(input_reg));
+  Label done;
+  li(output_reg, Operand(255));
+  // Normal branch: nop in delay slot.
+  Branch(&done, gt, input_reg, Operand(output_reg));
+  // Use delay slot in this branch.
+  Branch(USE_DELAY_SLOT, &done, lt, input_reg, Operand(zero_reg));
+  mov(output_reg, zero_reg);  // In delay slot.
+  mov(output_reg, input_reg);  // Value is in range 0..255.
+  bind(&done);
+}
+
+
+void MacroAssembler::ClampDoubleToUint8(Register result_reg,
+                                        DoubleRegister input_reg,
+                                        DoubleRegister temp_double_reg) {
+  Label above_zero;
+  Label done;
+  Label in_bounds;
+
+  Move(temp_double_reg, 0.0);
+  BranchF(&above_zero, NULL, gt, input_reg, temp_double_reg);
+
+  // Double value is less than zero, NaN or Inf, return 0.
+  mov(result_reg, zero_reg);
+  Branch(&done);
+
+  // Double value is >= 255, return 255.
+  bind(&above_zero);
+  Move(temp_double_reg, 255.0);
+  BranchF(&in_bounds, NULL, le, input_reg, temp_double_reg);
+  li(result_reg, Operand(255));
+  Branch(&done);
+
+  // In 0-255 range, round and truncate.
+  bind(&in_bounds);
+  cvt_w_d(temp_double_reg, input_reg);
+  mfc1(result_reg, temp_double_reg);
+  bind(&done);
+}
+
+
+void MacroAssembler::TestJSArrayForAllocationMemento(
+    Register receiver_reg,
+    Register scratch_reg,
+    Label* no_memento_found,
+    Condition cond,
+    Label* allocation_memento_present) {
+  ExternalReference new_space_start =
+      ExternalReference::new_space_start(isolate());
+  ExternalReference new_space_allocation_top =
+      ExternalReference::new_space_allocation_top_address(isolate());
+  Daddu(scratch_reg, receiver_reg,
+       Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
+  Branch(no_memento_found, lt, scratch_reg, Operand(new_space_start));
+  li(at, Operand(new_space_allocation_top));
+  ld(at, MemOperand(at));
+  Branch(no_memento_found, gt, scratch_reg, Operand(at));
+  ld(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
+  if (allocation_memento_present) {
+    Branch(allocation_memento_present, cond, scratch_reg,
+           Operand(isolate()->factory()->allocation_memento_map()));
+  }
+}
+
+
+Register GetRegisterThatIsNotOneOf(Register reg1,
+                                   Register reg2,
+                                   Register reg3,
+                                   Register reg4,
+                                   Register reg5,
+                                   Register reg6) {
+  RegList regs = 0;
+  if (reg1.is_valid()) regs |= reg1.bit();
+  if (reg2.is_valid()) regs |= reg2.bit();
+  if (reg3.is_valid()) regs |= reg3.bit();
+  if (reg4.is_valid()) regs |= reg4.bit();
+  if (reg5.is_valid()) regs |= reg5.bit();
+  if (reg6.is_valid()) regs |= reg6.bit();
+
+  for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
+    Register candidate = Register::FromAllocationIndex(i);
+    if (regs & candidate.bit()) continue;
+    return candidate;
+  }
+  UNREACHABLE();
+  return no_reg;
+}
+
+
+void MacroAssembler::JumpIfDictionaryInPrototypeChain(
+    Register object,
+    Register scratch0,
+    Register scratch1,
+    Label* found) {
+  DCHECK(!scratch1.is(scratch0));
+  Factory* factory = isolate()->factory();
+  Register current = scratch0;
+  Label loop_again;
+
+  // Scratch contained elements pointer.
+  Move(current, object);
+
+  // Loop based on the map going up the prototype chain.
+  bind(&loop_again);
+  ld(current, FieldMemOperand(current, HeapObject::kMapOffset));
+  lb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
+  DecodeField<Map::ElementsKindBits>(scratch1);
+  Branch(found, eq, scratch1, Operand(DICTIONARY_ELEMENTS));
+  ld(current, FieldMemOperand(current, Map::kPrototypeOffset));
+  Branch(&loop_again, ne, current, Operand(factory->null_value()));
+}
+
+
+bool AreAliased(Register reg1,
+                Register reg2,
+                Register reg3,
+                Register reg4,
+                Register reg5,
+                Register reg6,
+                Register reg7,
+                Register reg8) {
+  int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
+      reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
+      reg7.is_valid() + reg8.is_valid();
+
+  RegList regs = 0;
+  if (reg1.is_valid()) regs |= reg1.bit();
+  if (reg2.is_valid()) regs |= reg2.bit();
+  if (reg3.is_valid()) regs |= reg3.bit();
+  if (reg4.is_valid()) regs |= reg4.bit();
+  if (reg5.is_valid()) regs |= reg5.bit();
+  if (reg6.is_valid()) regs |= reg6.bit();
+  if (reg7.is_valid()) regs |= reg7.bit();
+  if (reg8.is_valid()) regs |= reg8.bit();
+  int n_of_non_aliasing_regs = NumRegs(regs);
+
+  return n_of_valid_regs != n_of_non_aliasing_regs;
+}
+
+
+CodePatcher::CodePatcher(byte* address,
+                         int instructions,
+                         FlushICache flush_cache)
+    : address_(address),
+      size_(instructions * Assembler::kInstrSize),
+      masm_(NULL, address, size_ + Assembler::kGap),
+      flush_cache_(flush_cache) {
+  // Create a new macro assembler pointing to the address of the code to patch.
+  // The size is adjusted with kGap on order for the assembler to generate size
+  // bytes of instructions without failing with buffer size constraints.
+  DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+}
+
+
+CodePatcher::~CodePatcher() {
+  // Indicate that code has changed.
+  if (flush_cache_ == FLUSH) {
+    CpuFeatures::FlushICache(address_, size_);
+  }
+  // Check that the code was patched as expected.
+  DCHECK(masm_.pc_ == address_ + size_);
+  DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+}
+
+
+void CodePatcher::Emit(Instr instr) {
+  masm()->emit(instr);
+}
+
+
+void CodePatcher::Emit(Address addr) {
+  // masm()->emit(reinterpret_cast<Instr>(addr));
+}
+
+
+void CodePatcher::ChangeBranchCondition(Condition cond) {
+  Instr instr = Assembler::instr_at(masm_.pc_);
+  DCHECK(Assembler::IsBranch(instr));
+  uint32_t opcode = Assembler::GetOpcodeField(instr);
+  // Currently only the 'eq' and 'ne' cond values are supported and the simple
+  // branch instructions (with opcode being the branch type).
+  // There are some special cases (see Assembler::IsBranch()) so extending this
+  // would be tricky.
+  DCHECK(opcode == BEQ ||
+         opcode == BNE ||
+        opcode == BLEZ ||
+        opcode == BGTZ ||
+        opcode == BEQL ||
+        opcode == BNEL ||
+       opcode == BLEZL ||
+       opcode == BGTZL);
+  opcode = (cond == eq) ? BEQ : BNE;
+  instr = (instr & ~kOpcodeMask) | opcode;
+  masm_.emit(instr);
+}
+
+
+void MacroAssembler::TruncatingDiv(Register result,
+                                   Register dividend,
+                                   int32_t divisor) {
+  DCHECK(!dividend.is(result));
+  DCHECK(!dividend.is(at));
+  DCHECK(!result.is(at));
+  base::MagicNumbersForDivision<uint32_t> mag =
+  base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
+  li(at, Operand(mag.multiplier));
+  Mulh(result, dividend, Operand(at));
+  bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
+  if (divisor > 0 && neg) {
+    Addu(result, result, Operand(dividend));
+  }
+  if (divisor < 0 && !neg && mag.multiplier > 0) {
+    Subu(result, result, Operand(dividend));
+  }
+  if (mag.shift > 0) sra(result, result, mag.shift);
+  srl(at, dividend, 31);
+  Addu(result, result, Operand(at));
+}
+
+
+} }  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_MIPS64
diff --git a/src/mips64/macro-assembler-mips64.h b/src/mips64/macro-assembler-mips64.h
new file mode 100644
index 0000000..2da48fb
--- /dev/null
+++ b/src/mips64/macro-assembler-mips64.h
@@ -0,0 +1,1781 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
+#define V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
+
+#include "src/assembler.h"
+#include "src/globals.h"
+#include "src/mips64/assembler-mips64.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declaration.
+class JumpTarget;
+
+// Reserved Register Usage Summary.
+//
+// Registers t8, t9, and at are reserved for use by the MacroAssembler.
+//
+// The programmer should know that the MacroAssembler may clobber these three,
+// but won't touch other registers except in special cases.
+//
+// Per the MIPS ABI, register t9 must be used for indirect function call
+// via 'jalr t9' or 'jr t9' instructions. This is relied upon by gcc when
+// trying to update gp register for position-independent-code. Whenever
+// MIPS generated code calls C code, it must be via t9 register.
+
+
+// Flags used for LeaveExitFrame function.
+enum LeaveExitFrameMode {
+  EMIT_RETURN = true,
+  NO_EMIT_RETURN = false
+};
+
+// Flags used for AllocateHeapNumber
+enum TaggingMode {
+  // Tag the result.
+  TAG_RESULT,
+  // Don't tag
+  DONT_TAG_RESULT
+};
+
+// Flags used for the ObjectToDoubleFPURegister function.
+enum ObjectToDoubleFlags {
+  // No special flags.
+  NO_OBJECT_TO_DOUBLE_FLAGS = 0,
+  // Object is known to be a non smi.
+  OBJECT_NOT_SMI = 1 << 0,
+  // Don't load NaNs or infinities, branch to the non number case instead.
+  AVOID_NANS_AND_INFINITIES = 1 << 1
+};
+
+// Allow programmer to use Branch Delay Slot of Branches, Jumps, Calls.
+enum BranchDelaySlot {
+  USE_DELAY_SLOT,
+  PROTECT
+};
+
+// Flags used for the li macro-assembler function.
+enum LiFlags {
+  // If the constant value can be represented in just 16 bits, then
+  // optimize the li to use a single instruction, rather than lui/ori/dsll
+  // sequence.
+  OPTIMIZE_SIZE = 0,
+  // Always use 6 instructions (lui/ori/dsll sequence), even if the constant
+  // could be loaded with just one, so that this value is patchable later.
+  CONSTANT_SIZE = 1,
+  // For address loads only 4 instruction are required. Used to mark
+  // constant load that will be used as address without relocation
+  // information. It ensures predictable code size, so specific sites
+  // in code are patchable.
+  ADDRESS_LOAD  = 2
+};
+
+
+enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
+enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
+enum PointersToHereCheck {
+  kPointersToHereMaybeInteresting,
+  kPointersToHereAreAlwaysInteresting
+};
+enum RAStatus { kRAHasNotBeenSaved, kRAHasBeenSaved };
+
+Register GetRegisterThatIsNotOneOf(Register reg1,
+                                   Register reg2 = no_reg,
+                                   Register reg3 = no_reg,
+                                   Register reg4 = no_reg,
+                                   Register reg5 = no_reg,
+                                   Register reg6 = no_reg);
+
+bool AreAliased(Register reg1,
+                Register reg2,
+                Register reg3 = no_reg,
+                Register reg4 = no_reg,
+                Register reg5 = no_reg,
+                Register reg6 = no_reg,
+                Register reg7 = no_reg,
+                Register reg8 = no_reg);
+
+
+// -----------------------------------------------------------------------------
+// Static helper functions.
+
+inline MemOperand ContextOperand(Register context, int index) {
+  return MemOperand(context, Context::SlotOffset(index));
+}
+
+
+inline MemOperand GlobalObjectOperand()  {
+  return ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX);
+}
+
+
+// Generate a MemOperand for loading a field from an object.
+inline MemOperand FieldMemOperand(Register object, int offset) {
+  return MemOperand(object, offset - kHeapObjectTag);
+}
+
+
+inline MemOperand UntagSmiMemOperand(Register rm, int offset) {
+  // Assumes that Smis are shifted by 32 bits and little endianness.
+  STATIC_ASSERT(kSmiShift == 32);
+  return MemOperand(rm, offset + (kSmiShift / kBitsPerByte));
+}
+
+
+inline MemOperand UntagSmiFieldMemOperand(Register rm, int offset) {
+  return UntagSmiMemOperand(rm, offset - kHeapObjectTag);
+}
+
+
+// Generate a MemOperand for storing arguments 5..N on the stack
+// when calling CallCFunction().
+// TODO(plind): Currently ONLY used for O32. Should be fixed for
+//              n64, and used in RegExp code, and other places
+//              with more than 8 arguments.
+inline MemOperand CFunctionArgumentOperand(int index) {
+  DCHECK(index > kCArgSlotCount);
+  // Argument 5 takes the slot just past the four Arg-slots.
+  int offset = (index - 5) * kPointerSize + kCArgsSlotsSize;
+  return MemOperand(sp, offset);
+}
+
+
+// MacroAssembler implements a collection of frequently used macros.
+class MacroAssembler: public Assembler {
+ public:
+  // The isolate parameter can be NULL if the macro assembler should
+  // not use isolate-dependent functionality. In this case, it's the
+  // responsibility of the caller to never invoke such function on the
+  // macro assembler.
+  MacroAssembler(Isolate* isolate, void* buffer, int size);
+
+  // Arguments macros.
+#define COND_TYPED_ARGS Condition cond, Register r1, const Operand& r2
+#define COND_ARGS cond, r1, r2
+
+  // Cases when relocation is not needed.
+#define DECLARE_NORELOC_PROTOTYPE(Name, target_type) \
+  void Name(target_type target, BranchDelaySlot bd = PROTECT); \
+  inline void Name(BranchDelaySlot bd, target_type target) { \
+    Name(target, bd); \
+  } \
+  void Name(target_type target, \
+            COND_TYPED_ARGS, \
+            BranchDelaySlot bd = PROTECT); \
+  inline void Name(BranchDelaySlot bd, \
+                   target_type target, \
+                   COND_TYPED_ARGS) { \
+    Name(target, COND_ARGS, bd); \
+  }
+
+#define DECLARE_BRANCH_PROTOTYPES(Name) \
+  DECLARE_NORELOC_PROTOTYPE(Name, Label*) \
+  DECLARE_NORELOC_PROTOTYPE(Name, int16_t)
+
+  DECLARE_BRANCH_PROTOTYPES(Branch)
+  DECLARE_BRANCH_PROTOTYPES(BranchAndLink)
+  DECLARE_BRANCH_PROTOTYPES(BranchShort)
+
+#undef DECLARE_BRANCH_PROTOTYPES
+#undef COND_TYPED_ARGS
+#undef COND_ARGS
+
+
+  // Jump, Call, and Ret pseudo instructions implementing inter-working.
+#define COND_ARGS Condition cond = al, Register rs = zero_reg, \
+  const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
+
+  void Jump(Register target, COND_ARGS);
+  void Jump(intptr_t target, RelocInfo::Mode rmode, COND_ARGS);
+  void Jump(Address target, RelocInfo::Mode rmode, COND_ARGS);
+  void Jump(Handle<Code> code, RelocInfo::Mode rmode, COND_ARGS);
+  static int CallSize(Register target, COND_ARGS);
+  void Call(Register target, COND_ARGS);
+  static int CallSize(Address target, RelocInfo::Mode rmode, COND_ARGS);
+  void Call(Address target, RelocInfo::Mode rmode, COND_ARGS);
+  int CallSize(Handle<Code> code,
+               RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+               TypeFeedbackId ast_id = TypeFeedbackId::None(),
+               COND_ARGS);
+  void Call(Handle<Code> code,
+            RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+            TypeFeedbackId ast_id = TypeFeedbackId::None(),
+            COND_ARGS);
+  void Ret(COND_ARGS);
+  inline void Ret(BranchDelaySlot bd, Condition cond = al,
+    Register rs = zero_reg, const Operand& rt = Operand(zero_reg)) {
+    Ret(cond, rs, rt, bd);
+  }
+
+  void Branch(Label* L,
+              Condition cond,
+              Register rs,
+              Heap::RootListIndex index,
+              BranchDelaySlot bdslot = PROTECT);
+
+#undef COND_ARGS
+
+  // Emit code to discard a non-negative number of pointer-sized elements
+  // from the stack, clobbering only the sp register.
+  void Drop(int count,
+            Condition cond = cc_always,
+            Register reg = no_reg,
+            const Operand& op = Operand(no_reg));
+
+  // Trivial case of DropAndRet that utilizes the delay slot and only emits
+  // 2 instructions.
+  void DropAndRet(int drop);
+
+  void DropAndRet(int drop,
+                  Condition cond,
+                  Register reg,
+                  const Operand& op);
+
+  // Swap two registers.  If the scratch register is omitted then a slightly
+  // less efficient form using xor instead of mov is emitted.
+  void Swap(Register reg1, Register reg2, Register scratch = no_reg);
+
+  void Call(Label* target);
+
+  inline void Move(Register dst, Register src) {
+    if (!dst.is(src)) {
+      mov(dst, src);
+    }
+  }
+
+  inline void Move(FPURegister dst, FPURegister src) {
+    if (!dst.is(src)) {
+      mov_d(dst, src);
+    }
+  }
+
+  inline void Move(Register dst_low, Register dst_high, FPURegister src) {
+    mfc1(dst_low, src);
+    mfhc1(dst_high, src);
+  }
+
+  inline void FmoveHigh(Register dst_high, FPURegister src) {
+    mfhc1(dst_high, src);
+  }
+
+  inline void FmoveLow(Register dst_low, FPURegister src) {
+    mfc1(dst_low, src);
+  }
+
+  inline void Move(FPURegister dst, Register src_low, Register src_high) {
+    mtc1(src_low, dst);
+    mthc1(src_high, dst);
+  }
+
+  // Conditional move.
+  void Move(FPURegister dst, double imm);
+  void Movz(Register rd, Register rs, Register rt);
+  void Movn(Register rd, Register rs, Register rt);
+  void Movt(Register rd, Register rs, uint16_t cc = 0);
+  void Movf(Register rd, Register rs, uint16_t cc = 0);
+
+  void Clz(Register rd, Register rs);
+
+  // Jump unconditionally to given label.
+  // We NEED a nop in the branch delay slot, as it used by v8, for example in
+  // CodeGenerator::ProcessDeferred().
+  // Currently the branch delay slot is filled by the MacroAssembler.
+  // Use rather b(Label) for code generation.
+  void jmp(Label* L) {
+    Branch(L);
+  }
+
+  void Load(Register dst, const MemOperand& src, Representation r);
+  void Store(Register src, const MemOperand& dst, Representation r);
+
+  // Load an object from the root table.
+  void LoadRoot(Register destination,
+                Heap::RootListIndex index);
+  void LoadRoot(Register destination,
+                Heap::RootListIndex index,
+                Condition cond, Register src1, const Operand& src2);
+
+  // Store an object to the root table.
+  void StoreRoot(Register source,
+                 Heap::RootListIndex index);
+  void StoreRoot(Register source,
+                 Heap::RootListIndex index,
+                 Condition cond, Register src1, const Operand& src2);
+
+  // ---------------------------------------------------------------------------
+  // GC Support
+
+  void IncrementalMarkingRecordWriteHelper(Register object,
+                                           Register value,
+                                           Register address);
+
+  enum RememberedSetFinalAction {
+    kReturnAtEnd,
+    kFallThroughAtEnd
+  };
+
+
+  // Record in the remembered set the fact that we have a pointer to new space
+  // at the address pointed to by the addr register.  Only works if addr is not
+  // in new space.
+  void RememberedSetHelper(Register object,  // Used for debug code.
+                           Register addr,
+                           Register scratch,
+                           SaveFPRegsMode save_fp,
+                           RememberedSetFinalAction and_then);
+
+  void CheckPageFlag(Register object,
+                     Register scratch,
+                     int mask,
+                     Condition cc,
+                     Label* condition_met);
+
+  void CheckMapDeprecated(Handle<Map> map,
+                          Register scratch,
+                          Label* if_deprecated);
+
+  // Check if object is in new space.  Jumps if the object is not in new space.
+  // The register scratch can be object itself, but it will be clobbered.
+  void JumpIfNotInNewSpace(Register object,
+                           Register scratch,
+                           Label* branch) {
+    InNewSpace(object, scratch, ne, branch);
+  }
+
+  // Check if object is in new space.  Jumps if the object is in new space.
+  // The register scratch can be object itself, but scratch will be clobbered.
+  void JumpIfInNewSpace(Register object,
+                        Register scratch,
+                        Label* branch) {
+    InNewSpace(object, scratch, eq, branch);
+  }
+
+  // Check if an object has a given incremental marking color.
+  void HasColor(Register object,
+                Register scratch0,
+                Register scratch1,
+                Label* has_color,
+                int first_bit,
+                int second_bit);
+
+  void JumpIfBlack(Register object,
+                   Register scratch0,
+                   Register scratch1,
+                   Label* on_black);
+
+  // Checks the color of an object.  If the object is already grey or black
+  // then we just fall through, since it is already live.  If it is white and
+  // we can determine that it doesn't need to be scanned, then we just mark it
+  // black and fall through.  For the rest we jump to the label so the
+  // incremental marker can fix its assumptions.
+  void EnsureNotWhite(Register object,
+                      Register scratch1,
+                      Register scratch2,
+                      Register scratch3,
+                      Label* object_is_white_and_not_data);
+
+  // Detects conservatively whether an object is data-only, i.e. it does need to
+  // be scanned by the garbage collector.
+  void JumpIfDataObject(Register value,
+                        Register scratch,
+                        Label* not_data_object);
+
+  // Notify the garbage collector that we wrote a pointer into an object.
+  // |object| is the object being stored into, |value| is the object being
+  // stored.  value and scratch registers are clobbered by the operation.
+  // The offset is the offset from the start of the object, not the offset from
+  // the tagged HeapObject pointer.  For use with FieldOperand(reg, off).
+  void RecordWriteField(
+      Register object,
+      int offset,
+      Register value,
+      Register scratch,
+      RAStatus ra_status,
+      SaveFPRegsMode save_fp,
+      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+      SmiCheck smi_check = INLINE_SMI_CHECK,
+      PointersToHereCheck pointers_to_here_check_for_value =
+          kPointersToHereMaybeInteresting);
+
+  // As above, but the offset has the tag presubtracted.  For use with
+  // MemOperand(reg, off).
+  inline void RecordWriteContextSlot(
+      Register context,
+      int offset,
+      Register value,
+      Register scratch,
+      RAStatus ra_status,
+      SaveFPRegsMode save_fp,
+      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+      SmiCheck smi_check = INLINE_SMI_CHECK,
+      PointersToHereCheck pointers_to_here_check_for_value =
+          kPointersToHereMaybeInteresting) {
+    RecordWriteField(context,
+                     offset + kHeapObjectTag,
+                     value,
+                     scratch,
+                     ra_status,
+                     save_fp,
+                     remembered_set_action,
+                     smi_check,
+                     pointers_to_here_check_for_value);
+  }
+
+  void RecordWriteForMap(
+      Register object,
+      Register map,
+      Register dst,
+      RAStatus ra_status,
+      SaveFPRegsMode save_fp);
+
+  // For a given |object| notify the garbage collector that the slot |address|
+  // has been written.  |value| is the object being stored. The value and
+  // address registers are clobbered by the operation.
+  void RecordWrite(
+      Register object,
+      Register address,
+      Register value,
+      RAStatus ra_status,
+      SaveFPRegsMode save_fp,
+      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+      SmiCheck smi_check = INLINE_SMI_CHECK,
+      PointersToHereCheck pointers_to_here_check_for_value =
+          kPointersToHereMaybeInteresting);
+
+
+  // ---------------------------------------------------------------------------
+  // Inline caching support.
+
+  // Generate code for checking access rights - used for security checks
+  // on access to global objects across environments. The holder register
+  // is left untouched, whereas both scratch registers are clobbered.
+  void CheckAccessGlobalProxy(Register holder_reg,
+                              Register scratch,
+                              Label* miss);
+
+  void GetNumberHash(Register reg0, Register scratch);
+
+  void LoadFromNumberDictionary(Label* miss,
+                                Register elements,
+                                Register key,
+                                Register result,
+                                Register reg0,
+                                Register reg1,
+                                Register reg2);
+
+
+  inline void MarkCode(NopMarkerTypes type) {
+    nop(type);
+  }
+
+  // Check if the given instruction is a 'type' marker.
+  // i.e. check if it is a sll zero_reg, zero_reg, <type> (referenced as
+  // nop(type)). These instructions are generated to mark special location in
+  // the code, like some special IC code.
+  static inline bool IsMarkedCode(Instr instr, int type) {
+    DCHECK((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
+    return IsNop(instr, type);
+  }
+
+
+  static inline int GetCodeMarker(Instr instr) {
+    uint32_t opcode = ((instr & kOpcodeMask));
+    uint32_t rt = ((instr & kRtFieldMask) >> kRtShift);
+    uint32_t rs = ((instr & kRsFieldMask) >> kRsShift);
+    uint32_t sa = ((instr & kSaFieldMask) >> kSaShift);
+
+    // Return <n> if we have a sll zero_reg, zero_reg, n
+    // else return -1.
+    bool sllzz = (opcode == SLL &&
+                  rt == static_cast<uint32_t>(ToNumber(zero_reg)) &&
+                  rs == static_cast<uint32_t>(ToNumber(zero_reg)));
+    int type =
+        (sllzz && FIRST_IC_MARKER <= sa && sa < LAST_CODE_MARKER) ? sa : -1;
+    DCHECK((type == -1) ||
+           ((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)));
+    return type;
+  }
+
+
+
+  // ---------------------------------------------------------------------------
+  // Allocation support.
+
+  // Allocate an object in new space or old pointer space. The object_size is
+  // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
+  // is passed. If the space is exhausted control continues at the gc_required
+  // label. The allocated object is returned in result. If the flag
+  // tag_allocated_object is true the result is tagged as as a heap object.
+  // All registers are clobbered also when control continues at the gc_required
+  // label.
+  void Allocate(int object_size,
+                Register result,
+                Register scratch1,
+                Register scratch2,
+                Label* gc_required,
+                AllocationFlags flags);
+
+  void Allocate(Register object_size,
+                Register result,
+                Register scratch1,
+                Register scratch2,
+                Label* gc_required,
+                AllocationFlags flags);
+
+  // Undo allocation in new space. The object passed and objects allocated after
+  // it will no longer be allocated. The caller must make sure that no pointers
+  // are left to the object(s) no longer allocated as they would be invalid when
+  // allocation is undone.
+  void UndoAllocationInNewSpace(Register object, Register scratch);
+
+
+  void AllocateTwoByteString(Register result,
+                             Register length,
+                             Register scratch1,
+                             Register scratch2,
+                             Register scratch3,
+                             Label* gc_required);
+  void AllocateOneByteString(Register result, Register length,
+                             Register scratch1, Register scratch2,
+                             Register scratch3, Label* gc_required);
+  void AllocateTwoByteConsString(Register result,
+                                 Register length,
+                                 Register scratch1,
+                                 Register scratch2,
+                                 Label* gc_required);
+  void AllocateOneByteConsString(Register result, Register length,
+                                 Register scratch1, Register scratch2,
+                                 Label* gc_required);
+  void AllocateTwoByteSlicedString(Register result,
+                                   Register length,
+                                   Register scratch1,
+                                   Register scratch2,
+                                   Label* gc_required);
+  void AllocateOneByteSlicedString(Register result, Register length,
+                                   Register scratch1, Register scratch2,
+                                   Label* gc_required);
+
+  // Allocates a heap number or jumps to the gc_required label if the young
+  // space is full and a scavenge is needed. All registers are clobbered also
+  // when control continues at the gc_required label.
+  void AllocateHeapNumber(Register result,
+                          Register scratch1,
+                          Register scratch2,
+                          Register heap_number_map,
+                          Label* gc_required,
+                          TaggingMode tagging_mode = TAG_RESULT,
+                          MutableMode mode = IMMUTABLE);
+
+  void AllocateHeapNumberWithValue(Register result,
+                                   FPURegister value,
+                                   Register scratch1,
+                                   Register scratch2,
+                                   Label* gc_required);
+
+  // ---------------------------------------------------------------------------
+  // Instruction macros.
+
+#define DEFINE_INSTRUCTION(instr)                                              \
+  void instr(Register rd, Register rs, const Operand& rt);                     \
+  void instr(Register rd, Register rs, Register rt) {                          \
+    instr(rd, rs, Operand(rt));                                                \
+  }                                                                            \
+  void instr(Register rs, Register rt, int32_t j) {                            \
+    instr(rs, rt, Operand(j));                                                 \
+  }
+
+#define DEFINE_INSTRUCTION2(instr)                                             \
+  void instr(Register rs, const Operand& rt);                                  \
+  void instr(Register rs, Register rt) {                                       \
+    instr(rs, Operand(rt));                                                    \
+  }                                                                            \
+  void instr(Register rs, int32_t j) {                                         \
+    instr(rs, Operand(j));                                                     \
+  }
+
+  DEFINE_INSTRUCTION(Addu);
+  DEFINE_INSTRUCTION(Daddu);
+  DEFINE_INSTRUCTION(Ddiv);
+  DEFINE_INSTRUCTION(Subu);
+  DEFINE_INSTRUCTION(Dsubu);
+  DEFINE_INSTRUCTION(Dmod);
+  DEFINE_INSTRUCTION(Mul);
+  DEFINE_INSTRUCTION(Mulh);
+  DEFINE_INSTRUCTION(Dmul);
+  DEFINE_INSTRUCTION(Dmulh);
+  DEFINE_INSTRUCTION2(Mult);
+  DEFINE_INSTRUCTION2(Dmult);
+  DEFINE_INSTRUCTION2(Multu);
+  DEFINE_INSTRUCTION2(Dmultu);
+  DEFINE_INSTRUCTION2(Div);
+  DEFINE_INSTRUCTION2(Ddiv);
+  DEFINE_INSTRUCTION2(Divu);
+  DEFINE_INSTRUCTION2(Ddivu);
+
+  DEFINE_INSTRUCTION(And);
+  DEFINE_INSTRUCTION(Or);
+  DEFINE_INSTRUCTION(Xor);
+  DEFINE_INSTRUCTION(Nor);
+  DEFINE_INSTRUCTION2(Neg);
+
+  DEFINE_INSTRUCTION(Slt);
+  DEFINE_INSTRUCTION(Sltu);
+
+  // MIPS32 R2 instruction macro.
+  DEFINE_INSTRUCTION(Ror);
+  DEFINE_INSTRUCTION(Dror);
+
+#undef DEFINE_INSTRUCTION
+#undef DEFINE_INSTRUCTION2
+
+  void Pref(int32_t hint, const MemOperand& rs);
+
+
+  // ---------------------------------------------------------------------------
+  // Pseudo-instructions.
+
+  void mov(Register rd, Register rt) { or_(rd, rt, zero_reg); }
+
+  void Ulw(Register rd, const MemOperand& rs);
+  void Usw(Register rd, const MemOperand& rs);
+  void Uld(Register rd, const MemOperand& rs, Register scratch = at);
+  void Usd(Register rd, const MemOperand& rs, Register scratch = at);
+
+  // Load int32 in the rd register.
+  void li(Register rd, Operand j, LiFlags mode = OPTIMIZE_SIZE);
+  inline void li(Register rd, int64_t j, LiFlags mode = OPTIMIZE_SIZE) {
+    li(rd, Operand(j), mode);
+  }
+  void li(Register dst, Handle<Object> value, LiFlags mode = OPTIMIZE_SIZE);
+
+  // Push multiple registers on the stack.
+  // Registers are saved in numerical order, with higher numbered registers
+  // saved in higher memory addresses.
+  void MultiPush(RegList regs);
+  void MultiPushReversed(RegList regs);
+
+  void MultiPushFPU(RegList regs);
+  void MultiPushReversedFPU(RegList regs);
+
+  void push(Register src) {
+    Daddu(sp, sp, Operand(-kPointerSize));
+    sd(src, MemOperand(sp, 0));
+  }
+  void Push(Register src) { push(src); }
+
+  // Push a handle.
+  void Push(Handle<Object> handle);
+  void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
+
+  // Push two registers. Pushes leftmost register first (to highest address).
+  void Push(Register src1, Register src2) {
+    Dsubu(sp, sp, Operand(2 * kPointerSize));
+    sd(src1, MemOperand(sp, 1 * kPointerSize));
+    sd(src2, MemOperand(sp, 0 * kPointerSize));
+  }
+
+  // Push three registers. Pushes leftmost register first (to highest address).
+  void Push(Register src1, Register src2, Register src3) {
+    Dsubu(sp, sp, Operand(3 * kPointerSize));
+    sd(src1, MemOperand(sp, 2 * kPointerSize));
+    sd(src2, MemOperand(sp, 1 * kPointerSize));
+    sd(src3, MemOperand(sp, 0 * kPointerSize));
+  }
+
+  // Push four registers. Pushes leftmost register first (to highest address).
+  void Push(Register src1, Register src2, Register src3, Register src4) {
+    Dsubu(sp, sp, Operand(4 * kPointerSize));
+    sd(src1, MemOperand(sp, 3 * kPointerSize));
+    sd(src2, MemOperand(sp, 2 * kPointerSize));
+    sd(src3, MemOperand(sp, 1 * kPointerSize));
+    sd(src4, MemOperand(sp, 0 * kPointerSize));
+  }
+
+  void Push(Register src, Condition cond, Register tst1, Register tst2) {
+    // Since we don't have conditional execution we use a Branch.
+    Branch(3, cond, tst1, Operand(tst2));
+    Dsubu(sp, sp, Operand(kPointerSize));
+    sd(src, MemOperand(sp, 0));
+  }
+
+  void PushRegisterAsTwoSmis(Register src, Register scratch = at);
+  void PopRegisterAsTwoSmis(Register dst, Register scratch = at);
+
+  // Pops multiple values from the stack and load them in the
+  // registers specified in regs. Pop order is the opposite as in MultiPush.
+  void MultiPop(RegList regs);
+  void MultiPopReversed(RegList regs);
+
+  void MultiPopFPU(RegList regs);
+  void MultiPopReversedFPU(RegList regs);
+
+  void pop(Register dst) {
+    ld(dst, MemOperand(sp, 0));
+    Daddu(sp, sp, Operand(kPointerSize));
+  }
+  void Pop(Register dst) { pop(dst); }
+
+  // Pop two registers. Pops rightmost register first (from lower address).
+  void Pop(Register src1, Register src2) {
+    DCHECK(!src1.is(src2));
+    ld(src2, MemOperand(sp, 0 * kPointerSize));
+    ld(src1, MemOperand(sp, 1 * kPointerSize));
+    Daddu(sp, sp, 2 * kPointerSize);
+  }
+
+  // Pop three registers. Pops rightmost register first (from lower address).
+  void Pop(Register src1, Register src2, Register src3) {
+    ld(src3, MemOperand(sp, 0 * kPointerSize));
+    ld(src2, MemOperand(sp, 1 * kPointerSize));
+    ld(src1, MemOperand(sp, 2 * kPointerSize));
+    Daddu(sp, sp, 3 * kPointerSize);
+  }
+
+  void Pop(uint32_t count = 1) {
+    Daddu(sp, sp, Operand(count * kPointerSize));
+  }
+
+  // Push and pop the registers that can hold pointers, as defined by the
+  // RegList constant kSafepointSavedRegisters.
+  void PushSafepointRegisters();
+  void PopSafepointRegisters();
+  // Store value in register src in the safepoint stack slot for
+  // register dst.
+  void StoreToSafepointRegisterSlot(Register src, Register dst);
+  // Load the value of the src register from its safepoint stack slot
+  // into register dst.
+  void LoadFromSafepointRegisterSlot(Register dst, Register src);
+
+  // Flush the I-cache from asm code. You should use CpuFeatures::FlushICache
+  // from C.
+  // Does not handle errors.
+  void FlushICache(Register address, unsigned instructions);
+
+  // MIPS64 R2 instruction macro.
+  void Ins(Register rt, Register rs, uint16_t pos, uint16_t size);
+  void Ext(Register rt, Register rs, uint16_t pos, uint16_t size);
+
+  // ---------------------------------------------------------------------------
+  // FPU macros. These do not handle special cases like NaN or +- inf.
+
+  // Convert unsigned word to double.
+  void Cvt_d_uw(FPURegister fd, FPURegister fs, FPURegister scratch);
+  void Cvt_d_uw(FPURegister fd, Register rs, FPURegister scratch);
+
+  // Convert double to unsigned long.
+  void Trunc_l_ud(FPURegister fd, FPURegister fs, FPURegister scratch);
+
+  void Trunc_l_d(FPURegister fd, FPURegister fs);
+  void Round_l_d(FPURegister fd, FPURegister fs);
+  void Floor_l_d(FPURegister fd, FPURegister fs);
+  void Ceil_l_d(FPURegister fd, FPURegister fs);
+
+  // Convert double to unsigned word.
+  void Trunc_uw_d(FPURegister fd, FPURegister fs, FPURegister scratch);
+  void Trunc_uw_d(FPURegister fd, Register rs, FPURegister scratch);
+
+  void Trunc_w_d(FPURegister fd, FPURegister fs);
+  void Round_w_d(FPURegister fd, FPURegister fs);
+  void Floor_w_d(FPURegister fd, FPURegister fs);
+  void Ceil_w_d(FPURegister fd, FPURegister fs);
+
+  void Madd_d(FPURegister fd,
+              FPURegister fr,
+              FPURegister fs,
+              FPURegister ft,
+              FPURegister scratch);
+
+  // Wrapper function for the different cmp/branch types.
+  void BranchF(Label* target,
+               Label* nan,
+               Condition cc,
+               FPURegister cmp1,
+               FPURegister cmp2,
+               BranchDelaySlot bd = PROTECT);
+
+  // Alternate (inline) version for better readability with USE_DELAY_SLOT.
+  inline void BranchF(BranchDelaySlot bd,
+                      Label* target,
+                      Label* nan,
+                      Condition cc,
+                      FPURegister cmp1,
+                      FPURegister cmp2) {
+    BranchF(target, nan, cc, cmp1, cmp2, bd);
+  }
+
+  // Truncates a double using a specific rounding mode, and writes the value
+  // to the result register.
+  // The except_flag will contain any exceptions caused by the instruction.
+  // If check_inexact is kDontCheckForInexactConversion, then the inexact
+  // exception is masked.
+  void EmitFPUTruncate(FPURoundingMode rounding_mode,
+                       Register result,
+                       DoubleRegister double_input,
+                       Register scratch,
+                       DoubleRegister double_scratch,
+                       Register except_flag,
+                       CheckForInexactConversion check_inexact
+                           = kDontCheckForInexactConversion);
+
+  // Performs a truncating conversion of a floating point number as used by
+  // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
+  // succeeds, otherwise falls through if result is saturated. On return
+  // 'result' either holds answer, or is clobbered on fall through.
+  //
+  // Only public for the test code in test-code-stubs-arm.cc.
+  void TryInlineTruncateDoubleToI(Register result,
+                                  DoubleRegister input,
+                                  Label* done);
+
+  // Performs a truncating conversion of a floating point number as used by
+  // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
+  // Exits with 'result' holding the answer.
+  void TruncateDoubleToI(Register result, DoubleRegister double_input);
+
+  // Performs a truncating conversion of a heap number as used by
+  // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input'
+  // must be different registers. Exits with 'result' holding the answer.
+  void TruncateHeapNumberToI(Register result, Register object);
+
+  // Converts the smi or heap number in object to an int32 using the rules
+  // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
+  // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be
+  // different registers.
+  void TruncateNumberToI(Register object,
+                         Register result,
+                         Register heap_number_map,
+                         Register scratch,
+                         Label* not_int32);
+
+  // Loads the number from object into dst register.
+  // If |object| is neither smi nor heap number, |not_number| is jumped to
+  // with |object| still intact.
+  void LoadNumber(Register object,
+                  FPURegister dst,
+                  Register heap_number_map,
+                  Register scratch,
+                  Label* not_number);
+
+  // Loads the number from object into double_dst in the double format.
+  // Control will jump to not_int32 if the value cannot be exactly represented
+  // by a 32-bit integer.
+  // Floating point value in the 32-bit integer range that are not exact integer
+  // won't be loaded.
+  void LoadNumberAsInt32Double(Register object,
+                               DoubleRegister double_dst,
+                               Register heap_number_map,
+                               Register scratch1,
+                               Register scratch2,
+                               FPURegister double_scratch,
+                               Label* not_int32);
+
+  // Loads the number from object into dst as a 32-bit integer.
+  // Control will jump to not_int32 if the object cannot be exactly represented
+  // by a 32-bit integer.
+  // Floating point value in the 32-bit integer range that are not exact integer
+  // won't be converted.
+  void LoadNumberAsInt32(Register object,
+                         Register dst,
+                         Register heap_number_map,
+                         Register scratch1,
+                         Register scratch2,
+                         FPURegister double_scratch0,
+                         FPURegister double_scratch1,
+                         Label* not_int32);
+
+  // Enter exit frame.
+  // argc - argument count to be dropped by LeaveExitFrame.
+  // save_doubles - saves FPU registers on stack, currently disabled.
+  // stack_space - extra stack space.
+  void EnterExitFrame(bool save_doubles,
+                      int stack_space = 0);
+
+  // Leave the current exit frame.
+  void LeaveExitFrame(bool save_doubles,
+                      Register arg_count,
+                      bool restore_context,
+                      bool do_return = NO_EMIT_RETURN);
+
+  // Get the actual activation frame alignment for target environment.
+  static int ActivationFrameAlignment();
+
+  // Make sure the stack is aligned. Only emits code in debug mode.
+  void AssertStackIsAligned();
+
+  void LoadContext(Register dst, int context_chain_length);
+
+  // Conditionally load the cached Array transitioned map of type
+  // transitioned_kind from the native context if the map in register
+  // map_in_out is the cached Array map in the native context of
+  // expected_kind.
+  void LoadTransitionedArrayMapConditional(
+      ElementsKind expected_kind,
+      ElementsKind transitioned_kind,
+      Register map_in_out,
+      Register scratch,
+      Label* no_map_match);
+
+  void LoadGlobalFunction(int index, Register function);
+
+  // Load the initial map from the global function. The registers
+  // function and map can be the same, function is then overwritten.
+  void LoadGlobalFunctionInitialMap(Register function,
+                                    Register map,
+                                    Register scratch);
+
+  void InitializeRootRegister() {
+    ExternalReference roots_array_start =
+        ExternalReference::roots_array_start(isolate());
+    li(kRootRegister, Operand(roots_array_start));
+  }
+
+  // -------------------------------------------------------------------------
+  // JavaScript invokes.
+
+  // Invoke the JavaScript function code by either calling or jumping.
+  void InvokeCode(Register code,
+                  const ParameterCount& expected,
+                  const ParameterCount& actual,
+                  InvokeFlag flag,
+                  const CallWrapper& call_wrapper);
+
+  // Invoke the JavaScript function in the given register. Changes the
+  // current context to the context in the function before invoking.
+  void InvokeFunction(Register function,
+                      const ParameterCount& actual,
+                      InvokeFlag flag,
+                      const CallWrapper& call_wrapper);
+
+  void InvokeFunction(Register function,
+                      const ParameterCount& expected,
+                      const ParameterCount& actual,
+                      InvokeFlag flag,
+                      const CallWrapper& call_wrapper);
+
+  void InvokeFunction(Handle<JSFunction> function,
+                      const ParameterCount& expected,
+                      const ParameterCount& actual,
+                      InvokeFlag flag,
+                      const CallWrapper& call_wrapper);
+
+
+  void IsObjectJSObjectType(Register heap_object,
+                            Register map,
+                            Register scratch,
+                            Label* fail);
+
+  void IsInstanceJSObjectType(Register map,
+                              Register scratch,
+                              Label* fail);
+
+  void IsObjectJSStringType(Register object,
+                            Register scratch,
+                            Label* fail);
+
+  void IsObjectNameType(Register object,
+                        Register scratch,
+                        Label* fail);
+
+  // -------------------------------------------------------------------------
+  // Debugger Support.
+
+  void DebugBreak();
+
+  // -------------------------------------------------------------------------
+  // Exception handling.
+
+  // Push a new try handler and link into try handler chain.
+  void PushTryHandler(StackHandler::Kind kind, int handler_index);
+
+  // Unlink the stack handler on top of the stack from the try handler chain.
+  // Must preserve the result register.
+  void PopTryHandler();
+
+  // Passes thrown value to the handler of top of the try handler chain.
+  void Throw(Register value);
+
+  // Propagates an uncatchable exception to the top of the current JS stack's
+  // handler chain.
+  void ThrowUncatchable(Register value);
+
+  // Copies a fixed number of fields of heap objects from src to dst.
+  void CopyFields(Register dst, Register src, RegList temps, int field_count);
+
+  // Copies a number of bytes from src to dst. All registers are clobbered. On
+  // exit src and dst will point to the place just after where the last byte was
+  // read or written and length will be zero.
+  void CopyBytes(Register src,
+                 Register dst,
+                 Register length,
+                 Register scratch);
+
+  // Initialize fields with filler values.  Fields starting at |start_offset|
+  // not including end_offset are overwritten with the value in |filler|.  At
+  // the end the loop, |start_offset| takes the value of |end_offset|.
+  void InitializeFieldsWithFiller(Register start_offset,
+                                  Register end_offset,
+                                  Register filler);
+
+  // -------------------------------------------------------------------------
+  // Support functions.
+
+  // Try to get function prototype of a function and puts the value in
+  // the result register. Checks that the function really is a
+  // function and jumps to the miss label if the fast checks fail. The
+  // function register will be untouched; the other registers may be
+  // clobbered.
+  void TryGetFunctionPrototype(Register function,
+                               Register result,
+                               Register scratch,
+                               Label* miss,
+                               bool miss_on_bound_function = false);
+
+  void GetObjectType(Register function,
+                     Register map,
+                     Register type_reg);
+
+  // Check if a map for a JSObject indicates that the object has fast elements.
+  // Jump to the specified label if it does not.
+  void CheckFastElements(Register map,
+                         Register scratch,
+                         Label* fail);
+
+  // Check if a map for a JSObject indicates that the object can have both smi
+  // and HeapObject elements.  Jump to the specified label if it does not.
+  void CheckFastObjectElements(Register map,
+                               Register scratch,
+                               Label* fail);
+
+  // Check if a map for a JSObject indicates that the object has fast smi only
+  // elements.  Jump to the specified label if it does not.
+  void CheckFastSmiElements(Register map,
+                            Register scratch,
+                            Label* fail);
+
+  // Check to see if maybe_number can be stored as a double in
+  // FastDoubleElements. If it can, store it at the index specified by key in
+  // the FastDoubleElements array elements. Otherwise jump to fail.
+  void StoreNumberToDoubleElements(Register value_reg,
+                                   Register key_reg,
+                                   Register elements_reg,
+                                   Register scratch1,
+                                   Register scratch2,
+                                   Register scratch3,
+                                   Label* fail,
+                                   int elements_offset = 0);
+
+  // Compare an object's map with the specified map and its transitioned
+  // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Jumps to
+  // "branch_to" if the result of the comparison is "cond". If multiple map
+  // compares are required, the compare sequences branches to early_success.
+  void CompareMapAndBranch(Register obj,
+                           Register scratch,
+                           Handle<Map> map,
+                           Label* early_success,
+                           Condition cond,
+                           Label* branch_to);
+
+  // As above, but the map of the object is already loaded into the register
+  // which is preserved by the code generated.
+  void CompareMapAndBranch(Register obj_map,
+                           Handle<Map> map,
+                           Label* early_success,
+                           Condition cond,
+                           Label* branch_to);
+
+  // Check if the map of an object is equal to a specified map and branch to
+  // label if not. Skip the smi check if not required (object is known to be a
+  // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
+  // against maps that are ElementsKind transition maps of the specificed map.
+  void CheckMap(Register obj,
+                Register scratch,
+                Handle<Map> map,
+                Label* fail,
+                SmiCheckType smi_check_type);
+
+
+  void CheckMap(Register obj,
+                Register scratch,
+                Heap::RootListIndex index,
+                Label* fail,
+                SmiCheckType smi_check_type);
+
+  // Check if the map of an object is equal to a specified map and branch to a
+  // specified target if equal. Skip the smi check if not required (object is
+  // known to be a heap object)
+  void DispatchMap(Register obj,
+                   Register scratch,
+                   Handle<Map> map,
+                   Handle<Code> success,
+                   SmiCheckType smi_check_type);
+
+
+  // Load and check the instance type of an object for being a string.
+  // Loads the type into the second argument register.
+  // Returns a condition that will be enabled if the object was a string.
+  Condition IsObjectStringType(Register obj,
+                               Register type,
+                               Register result) {
+    ld(type, FieldMemOperand(obj, HeapObject::kMapOffset));
+    lbu(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
+    And(type, type, Operand(kIsNotStringMask));
+    DCHECK_EQ(0, kStringTag);
+    return eq;
+  }
+
+
+  // Picks out an array index from the hash field.
+  // Register use:
+  //   hash - holds the index's hash. Clobbered.
+  //   index - holds the overwritten index on exit.
+  void IndexFromHash(Register hash, Register index);
+
+  // Get the number of least significant bits from a register.
+  void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
+  void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
+
+  // Load the value of a number object into a FPU double register. If the
+  // object is not a number a jump to the label not_number is performed
+  // and the FPU double register is unchanged.
+  void ObjectToDoubleFPURegister(
+      Register object,
+      FPURegister value,
+      Register scratch1,
+      Register scratch2,
+      Register heap_number_map,
+      Label* not_number,
+      ObjectToDoubleFlags flags = NO_OBJECT_TO_DOUBLE_FLAGS);
+
+  // Load the value of a smi object into a FPU double register. The register
+  // scratch1 can be the same register as smi in which case smi will hold the
+  // untagged value afterwards.
+  void SmiToDoubleFPURegister(Register smi,
+                              FPURegister value,
+                              Register scratch1);
+
+  // -------------------------------------------------------------------------
+  // Overflow handling functions.
+  // Usage: first call the appropriate arithmetic function, then call one of the
+  // jump functions with the overflow_dst register as the second parameter.
+
+  void AdduAndCheckForOverflow(Register dst,
+                               Register left,
+                               Register right,
+                               Register overflow_dst,
+                               Register scratch = at);
+
+  void SubuAndCheckForOverflow(Register dst,
+                               Register left,
+                               Register right,
+                               Register overflow_dst,
+                               Register scratch = at);
+
+  void BranchOnOverflow(Label* label,
+                        Register overflow_check,
+                        BranchDelaySlot bd = PROTECT) {
+    Branch(label, lt, overflow_check, Operand(zero_reg), bd);
+  }
+
+  void BranchOnNoOverflow(Label* label,
+                          Register overflow_check,
+                          BranchDelaySlot bd = PROTECT) {
+    Branch(label, ge, overflow_check, Operand(zero_reg), bd);
+  }
+
+  void RetOnOverflow(Register overflow_check, BranchDelaySlot bd = PROTECT) {
+    Ret(lt, overflow_check, Operand(zero_reg), bd);
+  }
+
+  void RetOnNoOverflow(Register overflow_check, BranchDelaySlot bd = PROTECT) {
+    Ret(ge, overflow_check, Operand(zero_reg), bd);
+  }
+
+  // -------------------------------------------------------------------------
+  // Runtime calls.
+
+  // See comments at the beginning of CEntryStub::Generate.
+  inline void PrepareCEntryArgs(int num_args) {
+    li(s0, num_args);
+    li(s1, (num_args - 1) * kPointerSize);
+  }
+
+  inline void PrepareCEntryFunction(const ExternalReference& ref) {
+    li(s2, Operand(ref));
+  }
+
+#define COND_ARGS Condition cond = al, Register rs = zero_reg, \
+const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
+
+  // Call a code stub.
+  void CallStub(CodeStub* stub,
+                TypeFeedbackId ast_id = TypeFeedbackId::None(),
+                COND_ARGS);
+
+  // Tail call a code stub (jump).
+  void TailCallStub(CodeStub* stub, COND_ARGS);
+
+#undef COND_ARGS
+
+  void CallJSExitStub(CodeStub* stub);
+
+  // Call a runtime routine.
+  void CallRuntime(const Runtime::Function* f,
+                   int num_arguments,
+                   SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+  void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
+    const Runtime::Function* function = Runtime::FunctionForId(id);
+    CallRuntime(function, function->nargs, kSaveFPRegs);
+  }
+
+  // Convenience function: Same as above, but takes the fid instead.
+  void CallRuntime(Runtime::FunctionId id,
+                   int num_arguments,
+                   SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+    CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
+  }
+
+  // Convenience function: call an external reference.
+  void CallExternalReference(const ExternalReference& ext,
+                             int num_arguments,
+                             BranchDelaySlot bd = PROTECT);
+
+  // Tail call of a runtime routine (jump).
+  // Like JumpToExternalReference, but also takes care of passing the number
+  // of parameters.
+  void TailCallExternalReference(const ExternalReference& ext,
+                                 int num_arguments,
+                                 int result_size);
+
+  // Convenience function: tail call a runtime routine (jump).
+  void TailCallRuntime(Runtime::FunctionId fid,
+                       int num_arguments,
+                       int result_size);
+
+  int CalculateStackPassedWords(int num_reg_arguments,
+                                int num_double_arguments);
+
+  // Before calling a C-function from generated code, align arguments on stack
+  // and add space for the four mips argument slots.
+  // After aligning the frame, non-register arguments must be stored on the
+  // stack, after the argument-slots using helper: CFunctionArgumentOperand().
+  // The argument count assumes all arguments are word sized.
+  // Some compilers/platforms require the stack to be aligned when calling
+  // C++ code.
+  // Needs a scratch register to do some arithmetic. This register will be
+  // trashed.
+  void PrepareCallCFunction(int num_reg_arguments,
+                            int num_double_registers,
+                            Register scratch);
+  void PrepareCallCFunction(int num_reg_arguments,
+                            Register scratch);
+
+  // Arguments 1-4 are placed in registers a0 thru a3 respectively.
+  // Arguments 5..n are stored to stack using following:
+  //  sw(a4, CFunctionArgumentOperand(5));
+
+  // Calls a C function and cleans up the space for arguments allocated
+  // by PrepareCallCFunction. The called function is not allowed to trigger a
+  // garbage collection, since that might move the code and invalidate the
+  // return address (unless this is somehow accounted for by the called
+  // function).
+  void CallCFunction(ExternalReference function, int num_arguments);
+  void CallCFunction(Register function, int num_arguments);
+  void CallCFunction(ExternalReference function,
+                     int num_reg_arguments,
+                     int num_double_arguments);
+  void CallCFunction(Register function,
+                     int num_reg_arguments,
+                     int num_double_arguments);
+  void MovFromFloatResult(DoubleRegister dst);
+  void MovFromFloatParameter(DoubleRegister dst);
+
+  // There are two ways of passing double arguments on MIPS, depending on
+  // whether soft or hard floating point ABI is used. These functions
+  // abstract parameter passing for the three different ways we call
+  // C functions from generated code.
+  void MovToFloatParameter(DoubleRegister src);
+  void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2);
+  void MovToFloatResult(DoubleRegister src);
+
+  // Calls an API function.  Allocates HandleScope, extracts returned value
+  // from handle and propagates exceptions.  Restores context.  stack_space
+  // - space to be unwound on exit (includes the call JS arguments space and
+  // the additional space allocated for the fast call).
+  void CallApiFunctionAndReturn(Register function_address,
+                                ExternalReference thunk_ref,
+                                int stack_space,
+                                MemOperand return_value_operand,
+                                MemOperand* context_restore_operand);
+
+  // Jump to the builtin routine.
+  void JumpToExternalReference(const ExternalReference& builtin,
+                               BranchDelaySlot bd = PROTECT);
+
+  // Invoke specified builtin JavaScript function. Adds an entry to
+  // the unresolved list if the name does not resolve.
+  void InvokeBuiltin(Builtins::JavaScript id,
+                     InvokeFlag flag,
+                     const CallWrapper& call_wrapper = NullCallWrapper());
+
+  // Store the code object for the given builtin in the target register and
+  // setup the function in a1.
+  void GetBuiltinEntry(Register target, Builtins::JavaScript id);
+
+  // Store the function for the given builtin in the target register.
+  void GetBuiltinFunction(Register target, Builtins::JavaScript id);
+
+  struct Unresolved {
+    int pc;
+    uint32_t flags;  // See Bootstrapper::FixupFlags decoders/encoders.
+    const char* name;
+  };
+
+  Handle<Object> CodeObject() {
+    DCHECK(!code_object_.is_null());
+    return code_object_;
+  }
+
+  // Emit code for a truncating division by a constant. The dividend register is
+  // unchanged and at gets clobbered. Dividend and result must be different.
+  void TruncatingDiv(Register result, Register dividend, int32_t divisor);
+
+  // -------------------------------------------------------------------------
+  // StatsCounter support.
+
+  void SetCounter(StatsCounter* counter, int value,
+                  Register scratch1, Register scratch2);
+  void IncrementCounter(StatsCounter* counter, int value,
+                        Register scratch1, Register scratch2);
+  void DecrementCounter(StatsCounter* counter, int value,
+                        Register scratch1, Register scratch2);
+
+
+  // -------------------------------------------------------------------------
+  // Debugging.
+
+  // Calls Abort(msg) if the condition cc is not satisfied.
+  // Use --debug_code to enable.
+  void Assert(Condition cc, BailoutReason reason, Register rs, Operand rt);
+  void AssertFastElements(Register elements);
+
+  // Like Assert(), but always enabled.
+  void Check(Condition cc, BailoutReason reason, Register rs, Operand rt);
+
+  // Print a message to stdout and abort execution.
+  void Abort(BailoutReason msg);
+
+  // Verify restrictions about code generated in stubs.
+  void set_generating_stub(bool value) { generating_stub_ = value; }
+  bool generating_stub() { return generating_stub_; }
+  void set_has_frame(bool value) { has_frame_ = value; }
+  bool has_frame() { return has_frame_; }
+  inline bool AllowThisStubCall(CodeStub* stub);
+
+  // ---------------------------------------------------------------------------
+  // Number utilities.
+
+  // Check whether the value of reg is a power of two and not zero. If not
+  // control continues at the label not_power_of_two. If reg is a power of two
+  // the register scratch contains the value of (reg - 1) when control falls
+  // through.
+  void JumpIfNotPowerOfTwoOrZero(Register reg,
+                                 Register scratch,
+                                 Label* not_power_of_two_or_zero);
+
+  // -------------------------------------------------------------------------
+  // Smi utilities.
+
+  // Test for overflow < 0: use BranchOnOverflow() or BranchOnNoOverflow().
+  void SmiTagCheckOverflow(Register reg, Register overflow);
+  void SmiTagCheckOverflow(Register dst, Register src, Register overflow);
+
+  void SmiTag(Register dst, Register src) {
+    STATIC_ASSERT(kSmiTag == 0);
+    if (SmiValuesAre32Bits()) {
+      STATIC_ASSERT(kSmiShift == 32);
+      dsll32(dst, src, 0);
+    } else {
+      Addu(dst, src, src);
+    }
+  }
+
+  void SmiTag(Register reg) {
+    SmiTag(reg, reg);
+  }
+
+  // Try to convert int32 to smi. If the value is to large, preserve
+  // the original value and jump to not_a_smi. Destroys scratch and
+  // sets flags.
+  void TrySmiTag(Register reg, Register scratch, Label* not_a_smi) {
+    TrySmiTag(reg, reg, scratch, not_a_smi);
+  }
+
+  void TrySmiTag(Register dst,
+                 Register src,
+                 Register scratch,
+                 Label* not_a_smi) {
+    if (SmiValuesAre32Bits()) {
+      SmiTag(dst, src);
+    } else {
+      SmiTagCheckOverflow(at, src, scratch);
+      BranchOnOverflow(not_a_smi, scratch);
+      mov(dst, at);
+    }
+  }
+
+  void SmiUntag(Register dst, Register src) {
+    if (SmiValuesAre32Bits()) {
+      STATIC_ASSERT(kSmiShift == 32);
+      dsra32(dst, src, 0);
+    } else {
+      sra(dst, src, kSmiTagSize);
+    }
+  }
+
+  void SmiUntag(Register reg) {
+    SmiUntag(reg, reg);
+  }
+
+  // Left-shifted from int32 equivalent of Smi.
+  void SmiScale(Register dst, Register src, int scale) {
+    if (SmiValuesAre32Bits()) {
+      // The int portion is upper 32-bits of 64-bit word.
+      dsra(dst, src, kSmiShift - scale);
+    } else {
+      DCHECK(scale >= kSmiTagSize);
+      sll(dst, src, scale - kSmiTagSize);
+    }
+  }
+
+  // Combine load with untagging or scaling.
+  void SmiLoadUntag(Register dst, MemOperand src);
+
+  void SmiLoadScale(Register dst, MemOperand src, int scale);
+
+  // Returns 2 values: the Smi and a scaled version of the int within the Smi.
+  void SmiLoadWithScale(Register d_smi,
+                        Register d_scaled,
+                        MemOperand src,
+                        int scale);
+
+  // Returns 2 values: the untagged Smi (int32) and scaled version of that int.
+  void SmiLoadUntagWithScale(Register d_int,
+                             Register d_scaled,
+                             MemOperand src,
+                             int scale);
+
+
+  // Test if the register contains a smi.
+  inline void SmiTst(Register value, Register scratch) {
+    And(scratch, value, Operand(kSmiTagMask));
+  }
+  inline void NonNegativeSmiTst(Register value, Register scratch) {
+    And(scratch, value, Operand(kSmiTagMask | kSmiSignMask));
+  }
+
+  // Untag the source value into destination and jump if source is a smi.
+  // Source and destination can be the same register.
+  void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
+
+  // Untag the source value into destination and jump if source is not a smi.
+  // Source and destination can be the same register.
+  void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
+
+  // Jump the register contains a smi.
+  void JumpIfSmi(Register value,
+                 Label* smi_label,
+                 Register scratch = at,
+                 BranchDelaySlot bd = PROTECT);
+
+  // Jump if the register contains a non-smi.
+  void JumpIfNotSmi(Register value,
+                    Label* not_smi_label,
+                    Register scratch = at,
+                    BranchDelaySlot bd = PROTECT);
+
+  // Jump if either of the registers contain a non-smi.
+  void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
+  // Jump if either of the registers contain a smi.
+  void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
+
+  // Abort execution if argument is a smi, enabled via --debug-code.
+  void AssertNotSmi(Register object);
+  void AssertSmi(Register object);
+
+  // Abort execution if argument is not a string, enabled via --debug-code.
+  void AssertString(Register object);
+
+  // Abort execution if argument is not a name, enabled via --debug-code.
+  void AssertName(Register object);
+
+  // Abort execution if argument is not undefined or an AllocationSite, enabled
+  // via --debug-code.
+  void AssertUndefinedOrAllocationSite(Register object, Register scratch);
+
+  // Abort execution if reg is not the root value with the given index,
+  // enabled via --debug-code.
+  void AssertIsRoot(Register reg, Heap::RootListIndex index);
+
+  // ---------------------------------------------------------------------------
+  // HeapNumber utilities.
+
+  void JumpIfNotHeapNumber(Register object,
+                           Register heap_number_map,
+                           Register scratch,
+                           Label* on_not_heap_number);
+
+  // -------------------------------------------------------------------------
+  // String utilities.
+
+  // Generate code to do a lookup in the number string cache. If the number in
+  // the register object is found in the cache the generated code falls through
+  // with the result in the result register. The object and the result register
+  // can be the same. If the number is not found in the cache the code jumps to
+  // the label not_found with only the content of register object unchanged.
+  void LookupNumberStringCache(Register object,
+                               Register result,
+                               Register scratch1,
+                               Register scratch2,
+                               Register scratch3,
+                               Label* not_found);
+
+  // Checks if both instance types are sequential one-byte strings and jumps to
+  // label if either is not.
+  void JumpIfBothInstanceTypesAreNotSequentialOneByte(
+      Register first_object_instance_type, Register second_object_instance_type,
+      Register scratch1, Register scratch2, Label* failure);
+
+  // Check if instance type is sequential one-byte string and jump to label if
+  // it is not.
+  void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
+                                                Label* failure);
+
+  void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
+
+  void EmitSeqStringSetCharCheck(Register string,
+                                 Register index,
+                                 Register value,
+                                 Register scratch,
+                                 uint32_t encoding_mask);
+
+  // Checks if both objects are sequential one-byte strings and jumps to label
+  // if either is not. Assumes that neither object is a smi.
+  void JumpIfNonSmisNotBothSequentialOneByteStrings(Register first,
+                                                    Register second,
+                                                    Register scratch1,
+                                                    Register scratch2,
+                                                    Label* failure);
+
+  // Checks if both objects are sequential one-byte strings and jumps to label
+  // if either is not.
+  void JumpIfNotBothSequentialOneByteStrings(Register first, Register second,
+                                             Register scratch1,
+                                             Register scratch2,
+                                             Label* not_flat_one_byte_strings);
+
+  void ClampUint8(Register output_reg, Register input_reg);
+
+  void ClampDoubleToUint8(Register result_reg,
+                          DoubleRegister input_reg,
+                          DoubleRegister temp_double_reg);
+
+
+  void LoadInstanceDescriptors(Register map, Register descriptors);
+  void EnumLength(Register dst, Register map);
+  void NumberOfOwnDescriptors(Register dst, Register map);
+
+  template<typename Field>
+  void DecodeField(Register dst, Register src) {
+    Ext(dst, src, Field::kShift, Field::kSize);
+  }
+
+  template<typename Field>
+  void DecodeField(Register reg) {
+    DecodeField<Field>(reg, reg);
+  }
+
+  template<typename Field>
+  void DecodeFieldToSmi(Register dst, Register src) {
+    static const int shift = Field::kShift;
+    static const int mask = Field::kMask >> shift;
+    dsrl(dst, src, shift);
+    And(dst, dst, Operand(mask));
+    dsll32(dst, dst, 0);
+  }
+
+  template<typename Field>
+  void DecodeFieldToSmi(Register reg) {
+    DecodeField<Field>(reg, reg);
+  }
+  // Generates function and stub prologue code.
+  void StubPrologue();
+  void Prologue(bool code_pre_aging);
+
+  // Activation support.
+  void EnterFrame(StackFrame::Type type);
+  void LeaveFrame(StackFrame::Type type);
+
+  // Patch the relocated value (lui/ori pair).
+  void PatchRelocatedValue(Register li_location,
+                           Register scratch,
+                           Register new_value);
+  // Get the relocatad value (loaded data) from the lui/ori pair.
+  void GetRelocatedValue(Register li_location,
+                         Register value,
+                         Register scratch);
+
+  // Expects object in a0 and returns map with validated enum cache
+  // in a0.  Assumes that any other register can be used as a scratch.
+  void CheckEnumCache(Register null_value, Label* call_runtime);
+
+  // AllocationMemento support. Arrays may have an associated
+  // AllocationMemento object that can be checked for in order to pretransition
+  // to another type.
+  // On entry, receiver_reg should point to the array object.
+  // scratch_reg gets clobbered.
+  // If allocation info is present, jump to allocation_memento_present.
+  void TestJSArrayForAllocationMemento(
+      Register receiver_reg,
+      Register scratch_reg,
+      Label* no_memento_found,
+      Condition cond = al,
+      Label* allocation_memento_present = NULL);
+
+  void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
+                                         Register scratch_reg,
+                                         Label* memento_found) {
+    Label no_memento_found;
+    TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
+                                    &no_memento_found, eq, memento_found);
+    bind(&no_memento_found);
+  }
+
+  // Jumps to found label if a prototype map has dictionary elements.
+  void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
+                                        Register scratch1, Label* found);
+
+ private:
+  void CallCFunctionHelper(Register function,
+                           int num_reg_arguments,
+                           int num_double_arguments);
+
+  void BranchAndLinkShort(int16_t offset, BranchDelaySlot bdslot = PROTECT);
+  void BranchAndLinkShort(int16_t offset, Condition cond, Register rs,
+                          const Operand& rt,
+                          BranchDelaySlot bdslot = PROTECT);
+  void BranchAndLinkShort(Label* L, BranchDelaySlot bdslot = PROTECT);
+  void BranchAndLinkShort(Label* L, Condition cond, Register rs,
+                          const Operand& rt,
+                          BranchDelaySlot bdslot = PROTECT);
+  void J(Label* L, BranchDelaySlot bdslot);
+  void Jr(Label* L, BranchDelaySlot bdslot);
+  void Jalr(Label* L, BranchDelaySlot bdslot);
+
+  // Helper functions for generating invokes.
+  void InvokePrologue(const ParameterCount& expected,
+                      const ParameterCount& actual,
+                      Handle<Code> code_constant,
+                      Register code_reg,
+                      Label* done,
+                      bool* definitely_mismatches,
+                      InvokeFlag flag,
+                      const CallWrapper& call_wrapper);
+
+  // Get the code for the given builtin. Returns if able to resolve
+  // the function in the 'resolved' flag.
+  Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
+
+  void InitializeNewString(Register string,
+                           Register length,
+                           Heap::RootListIndex map_index,
+                           Register scratch1,
+                           Register scratch2);
+
+  // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
+  void InNewSpace(Register object,
+                  Register scratch,
+                  Condition cond,  // eq for new space, ne otherwise.
+                  Label* branch);
+
+  // Helper for finding the mark bits for an address.  Afterwards, the
+  // bitmap register points at the word with the mark bits and the mask
+  // the position of the first bit.  Leaves addr_reg unchanged.
+  inline void GetMarkBits(Register addr_reg,
+                          Register bitmap_reg,
+                          Register mask_reg);
+
+  // Helper for throwing exceptions.  Compute a handler address and jump to
+  // it.  See the implementation for register usage.
+  void JumpToHandlerEntry();
+
+  // Compute memory operands for safepoint stack slots.
+  static int SafepointRegisterStackIndex(int reg_code);
+  MemOperand SafepointRegisterSlot(Register reg);
+  MemOperand SafepointRegistersAndDoublesSlot(Register reg);
+
+  bool generating_stub_;
+  bool has_frame_;
+  // This handle will be patched with the code object on installation.
+  Handle<Object> code_object_;
+
+  // Needs access to SafepointRegisterStackIndex for compiled frame
+  // traversal.
+  friend class StandardFrame;
+};
+
+
+// The code patcher is used to patch (typically) small parts of code e.g. for
+// debugging and other types of instrumentation. When using the code patcher
+// the exact number of bytes specified must be emitted. It is not legal to emit
+// relocation information. If any of these constraints are violated it causes
+// an assertion to fail.
+class CodePatcher {
+ public:
+  enum FlushICache {
+    FLUSH,
+    DONT_FLUSH
+  };
+
+  CodePatcher(byte* address,
+              int instructions,
+              FlushICache flush_cache = FLUSH);
+  virtual ~CodePatcher();
+
+  // Macro assembler to emit code.
+  MacroAssembler* masm() { return &masm_; }
+
+  // Emit an instruction directly.
+  void Emit(Instr instr);
+
+  // Emit an address directly.
+  void Emit(Address addr);
+
+  // Change the condition part of an instruction leaving the rest of the current
+  // instruction unchanged.
+  void ChangeBranchCondition(Condition cond);
+
+ private:
+  byte* address_;  // The address of the code being patched.
+  int size_;  // Number of bytes of the expected patch size.
+  MacroAssembler masm_;  // Macro assembler used to generate the code.
+  FlushICache flush_cache_;  // Whether to flush the I cache after patching.
+};
+
+
+
+#ifdef GENERATED_CODE_COVERAGE
+#define CODE_COVERAGE_STRINGIFY(x) #x
+#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
+#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
+#define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
+#else
+#define ACCESS_MASM(masm) masm->
+#endif
+
+} }  // namespace v8::internal
+
+#endif  // V8_MIPS_MACRO_ASSEMBLER_MIPS_H_
diff --git a/src/mips64/regexp-macro-assembler-mips64.cc b/src/mips64/regexp-macro-assembler-mips64.cc
new file mode 100644
index 0000000..b20cab9
--- /dev/null
+++ b/src/mips64/regexp-macro-assembler-mips64.cc
@@ -0,0 +1,1371 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_MIPS64
+
+#include "src/code-stubs.h"
+#include "src/log.h"
+#include "src/macro-assembler.h"
+#include "src/regexp-macro-assembler.h"
+#include "src/regexp-stack.h"
+#include "src/unicode.h"
+
+#include "src/mips64/regexp-macro-assembler-mips64.h"
+
+namespace v8 {
+namespace internal {
+
+#ifndef V8_INTERPRETED_REGEXP
+/*
+ * This assembler uses the following register assignment convention
+ * - t3 : Temporarily stores the index of capture start after a matching pass
+ *        for a global regexp.
+ * - a5 : Pointer to current code object (Code*) including heap object tag.
+ * - a6 : Current position in input, as negative offset from end of string.
+ *        Please notice that this is the byte offset, not the character offset!
+ * - a7 : Currently loaded character. Must be loaded using
+ *        LoadCurrentCharacter before using any of the dispatch methods.
+ * - t0 : Points to tip of backtrack stack
+ * - t1 : Unused.
+ * - t2 : End of input (points to byte after last character in input).
+ * - fp : Frame pointer. Used to access arguments, local variables and
+ *         RegExp registers.
+ * - sp : Points to tip of C stack.
+ *
+ * The remaining registers are free for computations.
+ * Each call to a public method should retain this convention.
+ *
+ * TODO(plind): O32 documented here with intent of having single 32/64 codebase
+ *              in the future.
+ *
+ * The O32 stack will have the following structure:
+ *
+ *  - fp[76]  Isolate* isolate   (address of the current isolate)
+ *  - fp[72]  direct_call  (if 1, direct call from JavaScript code,
+ *                          if 0, call through the runtime system).
+ *  - fp[68]  stack_area_base (High end of the memory area to use as
+ *                             backtracking stack).
+ *  - fp[64]  capture array size (may fit multiple sets of matches)
+ *  - fp[60]  int* capture_array (int[num_saved_registers_], for output).
+ *  - fp[44..59]  MIPS O32 four argument slots
+ *  - fp[40]  secondary link/return address used by native call.
+ *  --- sp when called ---
+ *  - fp[36]  return address      (lr).
+ *  - fp[32]  old frame pointer   (r11).
+ *  - fp[0..31]  backup of registers s0..s7.
+ *  --- frame pointer ----
+ *  - fp[-4]  end of input       (address of end of string).
+ *  - fp[-8]  start of input     (address of first character in string).
+ *  - fp[-12] start index        (character index of start).
+ *  - fp[-16] void* input_string (location of a handle containing the string).
+ *  - fp[-20] success counter    (only for global regexps to count matches).
+ *  - fp[-24] Offset of location before start of input (effectively character
+ *            position -1). Used to initialize capture registers to a
+ *            non-position.
+ *  - fp[-28] At start (if 1, we are starting at the start of the
+ *    string, otherwise 0)
+ *  - fp[-32] register 0         (Only positions must be stored in the first
+ *  -         register 1          num_saved_registers_ registers)
+ *  -         ...
+ *  -         register num_registers-1
+ *  --- sp ---
+ *
+ *
+ * The N64 stack will have the following structure:
+ *
+ *  - fp[88]  Isolate* isolate   (address of the current isolate)               kIsolate
+ *  - fp[80]  secondary link/return address used by exit frame on native call.  kSecondaryReturnAddress
+                                                                                kStackFrameHeader
+ *  --- sp when called ---
+ *  - fp[72]  ra                 Return from RegExp code (ra).                  kReturnAddress
+ *  - fp[64]  s9, old-fp         Old fp, callee saved(s9).
+ *  - fp[0..63]  s0..s7          Callee-saved registers s0..s7.
+ *  --- frame pointer ----
+ *  - fp[-8]  direct_call        (1 = direct call from JS, 0 = from runtime)    kDirectCall
+ *  - fp[-16] stack_base         (Top of backtracking stack).                   kStackHighEnd
+ *  - fp[-24] capture array size (may fit multiple sets of matches)             kNumOutputRegisters
+ *  - fp[-32] int* capture_array (int[num_saved_registers_], for output).       kRegisterOutput
+ *  - fp[-40] end of input       (address of end of string).                    kInputEnd
+ *  - fp[-48] start of input     (address of first character in string).        kInputStart
+ *  - fp[-56] start index        (character index of start).                    kStartIndex
+ *  - fp[-64] void* input_string (location of a handle containing the string).  kInputString
+ *  - fp[-72] success counter    (only for global regexps to count matches).    kSuccessfulCaptures
+ *  - fp[-80] Offset of location before start of input (effectively character   kInputStartMinusOne
+ *            position -1). Used to initialize capture registers to a
+ *            non-position.
+ *  --------- The following output registers are 32-bit values. ---------
+ *  - fp[-88] register 0         (Only positions must be stored in the first    kRegisterZero
+ *  -         register 1          num_saved_registers_ registers)
+ *  -         ...
+ *  -         register num_registers-1
+ *  --- sp ---
+ *
+ * The first num_saved_registers_ registers are initialized to point to
+ * "character -1" in the string (i.e., char_size() bytes before the first
+ * character of the string). The remaining registers start out as garbage.
+ *
+ * The data up to the return address must be placed there by the calling
+ * code and the remaining arguments are passed in registers, e.g. by calling the
+ * code entry as cast to a function with the signature:
+ * int (*match)(String* input_string,
+ *              int start_index,
+ *              Address start,
+ *              Address end,
+ *              Address secondary_return_address,  // Only used by native call.
+ *              int* capture_output_array,
+ *              byte* stack_area_base,
+ *              bool direct_call = false,
+ *              void* return_address,
+ *              Isolate* isolate);
+ * The call is performed by NativeRegExpMacroAssembler::Execute()
+ * (in regexp-macro-assembler.cc) via the CALL_GENERATED_REGEXP_CODE macro
+ * in mips/simulator-mips.h.
+ * When calling as a non-direct call (i.e., from C++ code), the return address
+ * area is overwritten with the ra register by the RegExp code. When doing a
+ * direct call from generated code, the return address is placed there by
+ * the calling code, as in a normal exit frame.
+ */
+
+#define __ ACCESS_MASM(masm_)
+
+RegExpMacroAssemblerMIPS::RegExpMacroAssemblerMIPS(
+    Mode mode,
+    int registers_to_save,
+    Zone* zone)
+    : NativeRegExpMacroAssembler(zone),
+      masm_(new MacroAssembler(zone->isolate(), NULL, kRegExpCodeSize)),
+      mode_(mode),
+      num_registers_(registers_to_save),
+      num_saved_registers_(registers_to_save),
+      entry_label_(),
+      start_label_(),
+      success_label_(),
+      backtrack_label_(),
+      exit_label_(),
+      internal_failure_label_() {
+  DCHECK_EQ(0, registers_to_save % 2);
+  __ jmp(&entry_label_);   // We'll write the entry code later.
+  // If the code gets too big or corrupted, an internal exception will be
+  // raised, and we will exit right away.
+  __ bind(&internal_failure_label_);
+  __ li(v0, Operand(FAILURE));
+  __ Ret();
+  __ bind(&start_label_);  // And then continue from here.
+}
+
+
+RegExpMacroAssemblerMIPS::~RegExpMacroAssemblerMIPS() {
+  delete masm_;
+  // Unuse labels in case we throw away the assembler without calling GetCode.
+  entry_label_.Unuse();
+  start_label_.Unuse();
+  success_label_.Unuse();
+  backtrack_label_.Unuse();
+  exit_label_.Unuse();
+  check_preempt_label_.Unuse();
+  stack_overflow_label_.Unuse();
+  internal_failure_label_.Unuse();
+}
+
+
+int RegExpMacroAssemblerMIPS::stack_limit_slack()  {
+  return RegExpStack::kStackLimitSlack;
+}
+
+
+void RegExpMacroAssemblerMIPS::AdvanceCurrentPosition(int by) {
+  if (by != 0) {
+    __ Daddu(current_input_offset(),
+            current_input_offset(), Operand(by * char_size()));
+  }
+}
+
+
+void RegExpMacroAssemblerMIPS::AdvanceRegister(int reg, int by) {
+  DCHECK(reg >= 0);
+  DCHECK(reg < num_registers_);
+  if (by != 0) {
+    __ ld(a0, register_location(reg));
+    __ Daddu(a0, a0, Operand(by));
+    __ sd(a0, register_location(reg));
+  }
+}
+
+
+void RegExpMacroAssemblerMIPS::Backtrack() {
+  CheckPreemption();
+  // Pop Code* offset from backtrack stack, add Code* and jump to location.
+  Pop(a0);
+  __ Daddu(a0, a0, code_pointer());
+  __ Jump(a0);
+}
+
+
+void RegExpMacroAssemblerMIPS::Bind(Label* label) {
+  __ bind(label);
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckCharacter(uint32_t c, Label* on_equal) {
+  BranchOrBacktrack(on_equal, eq, current_character(), Operand(c));
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckCharacterGT(uc16 limit, Label* on_greater) {
+  BranchOrBacktrack(on_greater, gt, current_character(), Operand(limit));
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckAtStart(Label* on_at_start) {
+  Label not_at_start;
+  // Did we start the match at the start of the string at all?
+  __ lw(a0, MemOperand(frame_pointer(), kStartIndex));
+  BranchOrBacktrack(&not_at_start, ne, a0, Operand(zero_reg));
+
+  // If we did, are we still at the start of the input?
+  __ ld(a1, MemOperand(frame_pointer(), kInputStart));
+  __ Daddu(a0, end_of_input_address(), Operand(current_input_offset()));
+  BranchOrBacktrack(on_at_start, eq, a0, Operand(a1));
+  __ bind(&not_at_start);
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckNotAtStart(Label* on_not_at_start) {
+  // Did we start the match at the start of the string at all?
+  __ lw(a0, MemOperand(frame_pointer(), kStartIndex));
+  BranchOrBacktrack(on_not_at_start, ne, a0, Operand(zero_reg));
+  // If we did, are we still at the start of the input?
+  __ ld(a1, MemOperand(frame_pointer(), kInputStart));
+  __ Daddu(a0, end_of_input_address(), Operand(current_input_offset()));
+  BranchOrBacktrack(on_not_at_start, ne, a0, Operand(a1));
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckCharacterLT(uc16 limit, Label* on_less) {
+  BranchOrBacktrack(on_less, lt, current_character(), Operand(limit));
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckGreedyLoop(Label* on_equal) {
+  Label backtrack_non_equal;
+  __ lw(a0, MemOperand(backtrack_stackpointer(), 0));
+  __ Branch(&backtrack_non_equal, ne, current_input_offset(), Operand(a0));
+  __ Daddu(backtrack_stackpointer(),
+          backtrack_stackpointer(),
+          Operand(kIntSize));
+  __ bind(&backtrack_non_equal);
+  BranchOrBacktrack(on_equal, eq, current_input_offset(), Operand(a0));
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckNotBackReferenceIgnoreCase(
+    int start_reg,
+    Label* on_no_match) {
+  Label fallthrough;
+  __ ld(a0, register_location(start_reg));  // Index of start of capture.
+  __ ld(a1, register_location(start_reg + 1));  // Index of end of capture.
+  __ Dsubu(a1, a1, a0);  // Length of capture.
+
+  // If length is zero, either the capture is empty or it is not participating.
+  // In either case succeed immediately.
+  __ Branch(&fallthrough, eq, a1, Operand(zero_reg));
+
+  __ Daddu(t1, a1, current_input_offset());
+  // Check that there are enough characters left in the input.
+  BranchOrBacktrack(on_no_match, gt, t1, Operand(zero_reg));
+
+  if (mode_ == LATIN1) {
+    Label success;
+    Label fail;
+    Label loop_check;
+
+    // a0 - offset of start of capture.
+    // a1 - length of capture.
+    __ Daddu(a0, a0, Operand(end_of_input_address()));
+    __ Daddu(a2, end_of_input_address(), Operand(current_input_offset()));
+    __ Daddu(a1, a0, Operand(a1));
+
+    // a0 - Address of start of capture.
+    // a1 - Address of end of capture.
+    // a2 - Address of current input position.
+
+    Label loop;
+    __ bind(&loop);
+    __ lbu(a3, MemOperand(a0, 0));
+    __ daddiu(a0, a0, char_size());
+    __ lbu(a4, MemOperand(a2, 0));
+    __ daddiu(a2, a2, char_size());
+
+    __ Branch(&loop_check, eq, a4, Operand(a3));
+
+    // Mismatch, try case-insensitive match (converting letters to lower-case).
+    __ Or(a3, a3, Operand(0x20));  // Convert capture character to lower-case.
+    __ Or(a4, a4, Operand(0x20));  // Also convert input character.
+    __ Branch(&fail, ne, a4, Operand(a3));
+    __ Dsubu(a3, a3, Operand('a'));
+    __ Branch(&loop_check, ls, a3, Operand('z' - 'a'));
+    // Latin-1: Check for values in range [224,254] but not 247.
+    __ Dsubu(a3, a3, Operand(224 - 'a'));
+    // Weren't Latin-1 letters.
+    __ Branch(&fail, hi, a3, Operand(254 - 224));
+    // Check for 247.
+    __ Branch(&fail, eq, a3, Operand(247 - 224));
+
+    __ bind(&loop_check);
+    __ Branch(&loop, lt, a0, Operand(a1));
+    __ jmp(&success);
+
+    __ bind(&fail);
+    GoTo(on_no_match);
+
+    __ bind(&success);
+    // Compute new value of character position after the matched part.
+    __ Dsubu(current_input_offset(), a2, end_of_input_address());
+  } else {
+    DCHECK(mode_ == UC16);
+    // Put regexp engine registers on stack.
+    RegList regexp_registers_to_retain = current_input_offset().bit() |
+        current_character().bit() | backtrack_stackpointer().bit();
+    __ MultiPush(regexp_registers_to_retain);
+
+    int argument_count = 4;
+    __ PrepareCallCFunction(argument_count, a2);
+
+    // a0 - offset of start of capture.
+    // a1 - length of capture.
+
+    // Put arguments into arguments registers.
+    // Parameters are
+    //   a0: Address byte_offset1 - Address captured substring's start.
+    //   a1: Address byte_offset2 - Address of current character position.
+    //   a2: size_t byte_length - length of capture in bytes(!).
+    //   a3: Isolate* isolate.
+
+    // Address of start of capture.
+    __ Daddu(a0, a0, Operand(end_of_input_address()));
+    // Length of capture.
+    __ mov(a2, a1);
+    // Save length in callee-save register for use on return.
+    __ mov(s3, a1);
+    // Address of current input position.
+    __ Daddu(a1, current_input_offset(), Operand(end_of_input_address()));
+    // Isolate.
+    __ li(a3, Operand(ExternalReference::isolate_address(masm_->isolate())));
+
+    {
+      AllowExternalCallThatCantCauseGC scope(masm_);
+      ExternalReference function =
+          ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
+      __ CallCFunction(function, argument_count);
+    }
+
+    // Restore regexp engine registers.
+    __ MultiPop(regexp_registers_to_retain);
+    __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
+    __ ld(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+
+    // Check if function returned non-zero for success or zero for failure.
+    BranchOrBacktrack(on_no_match, eq, v0, Operand(zero_reg));
+    // On success, increment position by length of capture.
+    __ Daddu(current_input_offset(), current_input_offset(), Operand(s3));
+  }
+
+  __ bind(&fallthrough);
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckNotBackReference(
+    int start_reg,
+    Label* on_no_match) {
+  Label fallthrough;
+  Label success;
+
+  // Find length of back-referenced capture.
+  __ ld(a0, register_location(start_reg));
+  __ ld(a1, register_location(start_reg + 1));
+  __ Dsubu(a1, a1, a0);  // Length to check.
+  // Succeed on empty capture (including no capture).
+  __ Branch(&fallthrough, eq, a1, Operand(zero_reg));
+
+  __ Daddu(t1, a1, current_input_offset());
+  // Check that there are enough characters left in the input.
+  BranchOrBacktrack(on_no_match, gt, t1, Operand(zero_reg));
+
+  // Compute pointers to match string and capture string.
+  __ Daddu(a0, a0, Operand(end_of_input_address()));
+  __ Daddu(a2, end_of_input_address(), Operand(current_input_offset()));
+  __ Daddu(a1, a1, Operand(a0));
+
+  Label loop;
+  __ bind(&loop);
+  if (mode_ == LATIN1) {
+    __ lbu(a3, MemOperand(a0, 0));
+    __ daddiu(a0, a0, char_size());
+    __ lbu(a4, MemOperand(a2, 0));
+    __ daddiu(a2, a2, char_size());
+  } else {
+    DCHECK(mode_ == UC16);
+    __ lhu(a3, MemOperand(a0, 0));
+    __ daddiu(a0, a0, char_size());
+    __ lhu(a4, MemOperand(a2, 0));
+    __ daddiu(a2, a2, char_size());
+  }
+  BranchOrBacktrack(on_no_match, ne, a3, Operand(a4));
+  __ Branch(&loop, lt, a0, Operand(a1));
+
+  // Move current character position to position after match.
+  __ Dsubu(current_input_offset(), a2, end_of_input_address());
+  __ bind(&fallthrough);
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckNotCharacter(uint32_t c,
+                                                 Label* on_not_equal) {
+  BranchOrBacktrack(on_not_equal, ne, current_character(), Operand(c));
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckCharacterAfterAnd(uint32_t c,
+                                                      uint32_t mask,
+                                                      Label* on_equal) {
+  __ And(a0, current_character(), Operand(mask));
+  Operand rhs = (c == 0) ? Operand(zero_reg) : Operand(c);
+  BranchOrBacktrack(on_equal, eq, a0, rhs);
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckNotCharacterAfterAnd(uint32_t c,
+                                                         uint32_t mask,
+                                                         Label* on_not_equal) {
+  __ And(a0, current_character(), Operand(mask));
+  Operand rhs = (c == 0) ? Operand(zero_reg) : Operand(c);
+  BranchOrBacktrack(on_not_equal, ne, a0, rhs);
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckNotCharacterAfterMinusAnd(
+    uc16 c,
+    uc16 minus,
+    uc16 mask,
+    Label* on_not_equal) {
+  DCHECK(minus < String::kMaxUtf16CodeUnit);
+  __ Dsubu(a0, current_character(), Operand(minus));
+  __ And(a0, a0, Operand(mask));
+  BranchOrBacktrack(on_not_equal, ne, a0, Operand(c));
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckCharacterInRange(
+    uc16 from,
+    uc16 to,
+    Label* on_in_range) {
+  __ Dsubu(a0, current_character(), Operand(from));
+  // Unsigned lower-or-same condition.
+  BranchOrBacktrack(on_in_range, ls, a0, Operand(to - from));
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckCharacterNotInRange(
+    uc16 from,
+    uc16 to,
+    Label* on_not_in_range) {
+  __ Dsubu(a0, current_character(), Operand(from));
+  // Unsigned higher condition.
+  BranchOrBacktrack(on_not_in_range, hi, a0, Operand(to - from));
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckBitInTable(
+    Handle<ByteArray> table,
+    Label* on_bit_set) {
+  __ li(a0, Operand(table));
+  if (mode_ != LATIN1 || kTableMask != String::kMaxOneByteCharCode) {
+    __ And(a1, current_character(), Operand(kTableSize - 1));
+    __ Daddu(a0, a0, a1);
+  } else {
+    __ Daddu(a0, a0, current_character());
+  }
+
+  __ lbu(a0, FieldMemOperand(a0, ByteArray::kHeaderSize));
+  BranchOrBacktrack(on_bit_set, ne, a0, Operand(zero_reg));
+}
+
+
+bool RegExpMacroAssemblerMIPS::CheckSpecialCharacterClass(uc16 type,
+                                                          Label* on_no_match) {
+  // Range checks (c in min..max) are generally implemented by an unsigned
+  // (c - min) <= (max - min) check.
+  switch (type) {
+  case 's':
+    // Match space-characters.
+    if (mode_ == LATIN1) {
+      // One byte space characters are '\t'..'\r', ' ' and \u00a0.
+      Label success;
+      __ Branch(&success, eq, current_character(), Operand(' '));
+      // Check range 0x09..0x0d.
+      __ Dsubu(a0, current_character(), Operand('\t'));
+      __ Branch(&success, ls, a0, Operand('\r' - '\t'));
+      // \u00a0 (NBSP).
+      BranchOrBacktrack(on_no_match, ne, a0, Operand(0x00a0 - '\t'));
+      __ bind(&success);
+      return true;
+    }
+    return false;
+  case 'S':
+    // The emitted code for generic character classes is good enough.
+    return false;
+  case 'd':
+    // Match Latin1 digits ('0'..'9').
+    __ Dsubu(a0, current_character(), Operand('0'));
+    BranchOrBacktrack(on_no_match, hi, a0, Operand('9' - '0'));
+    return true;
+  case 'D':
+    // Match non Latin1-digits.
+    __ Dsubu(a0, current_character(), Operand('0'));
+    BranchOrBacktrack(on_no_match, ls, a0, Operand('9' - '0'));
+    return true;
+  case '.': {
+    // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029).
+    __ Xor(a0, current_character(), Operand(0x01));
+    // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c.
+    __ Dsubu(a0, a0, Operand(0x0b));
+    BranchOrBacktrack(on_no_match, ls, a0, Operand(0x0c - 0x0b));
+    if (mode_ == UC16) {
+      // Compare original value to 0x2028 and 0x2029, using the already
+      // computed (current_char ^ 0x01 - 0x0b). I.e., check for
+      // 0x201d (0x2028 - 0x0b) or 0x201e.
+      __ Dsubu(a0, a0, Operand(0x2028 - 0x0b));
+      BranchOrBacktrack(on_no_match, ls, a0, Operand(1));
+    }
+    return true;
+  }
+  case 'n': {
+    // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029).
+    __ Xor(a0, current_character(), Operand(0x01));
+    // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c.
+    __ Dsubu(a0, a0, Operand(0x0b));
+    if (mode_ == LATIN1) {
+      BranchOrBacktrack(on_no_match, hi, a0, Operand(0x0c - 0x0b));
+    } else {
+      Label done;
+      BranchOrBacktrack(&done, ls, a0, Operand(0x0c - 0x0b));
+      // Compare original value to 0x2028 and 0x2029, using the already
+      // computed (current_char ^ 0x01 - 0x0b). I.e., check for
+      // 0x201d (0x2028 - 0x0b) or 0x201e.
+      __ Dsubu(a0, a0, Operand(0x2028 - 0x0b));
+      BranchOrBacktrack(on_no_match, hi, a0, Operand(1));
+      __ bind(&done);
+    }
+    return true;
+  }
+  case 'w': {
+    if (mode_ != LATIN1) {
+      // Table is 256 entries, so all Latin1 characters can be tested.
+      BranchOrBacktrack(on_no_match, hi, current_character(), Operand('z'));
+    }
+    ExternalReference map = ExternalReference::re_word_character_map();
+    __ li(a0, Operand(map));
+    __ Daddu(a0, a0, current_character());
+    __ lbu(a0, MemOperand(a0, 0));
+    BranchOrBacktrack(on_no_match, eq, a0, Operand(zero_reg));
+    return true;
+  }
+  case 'W': {
+    Label done;
+    if (mode_ != LATIN1) {
+      // Table is 256 entries, so all Latin1 characters can be tested.
+      __ Branch(&done, hi, current_character(), Operand('z'));
+    }
+    ExternalReference map = ExternalReference::re_word_character_map();
+    __ li(a0, Operand(map));
+    __ Daddu(a0, a0, current_character());
+    __ lbu(a0, MemOperand(a0, 0));
+    BranchOrBacktrack(on_no_match, ne, a0, Operand(zero_reg));
+    if (mode_ != LATIN1) {
+      __ bind(&done);
+    }
+    return true;
+  }
+  case '*':
+    // Match any character.
+    return true;
+  // No custom implementation (yet): s(UC16), S(UC16).
+  default:
+    return false;
+  }
+}
+
+
+void RegExpMacroAssemblerMIPS::Fail() {
+  __ li(v0, Operand(FAILURE));
+  __ jmp(&exit_label_);
+}
+
+
+Handle<HeapObject> RegExpMacroAssemblerMIPS::GetCode(Handle<String> source) {
+  Label return_v0;
+  if (masm_->has_exception()) {
+    // If the code gets corrupted due to long regular expressions and lack of
+    // space on trampolines, an internal exception flag is set. If this case
+    // is detected, we will jump into exit sequence right away.
+    __ bind_to(&entry_label_, internal_failure_label_.pos());
+  } else {
+    // Finalize code - write the entry point code now we know how many
+    // registers we need.
+
+    // Entry code:
+    __ bind(&entry_label_);
+
+    // Tell the system that we have a stack frame.  Because the type is MANUAL,
+    // no is generated.
+    FrameScope scope(masm_, StackFrame::MANUAL);
+
+    // Actually emit code to start a new stack frame.
+    // Push arguments
+    // Save callee-save registers.
+    // Start new stack frame.
+    // Store link register in existing stack-cell.
+    // Order here should correspond to order of offset constants in header file.
+    // TODO(plind): we save s0..s7, but ONLY use s3 here - use the regs
+    // or dont save.
+    RegList registers_to_retain = s0.bit() | s1.bit() | s2.bit() |
+        s3.bit() | s4.bit() | s5.bit() | s6.bit() | s7.bit() | fp.bit();
+    RegList argument_registers = a0.bit() | a1.bit() | a2.bit() | a3.bit();
+
+    if (kMipsAbi == kN64) {
+      // TODO(plind): Should probably alias a4-a7, for clarity.
+      argument_registers |= a4.bit() | a5.bit() | a6.bit() | a7.bit();
+    }
+
+    __ MultiPush(argument_registers | registers_to_retain | ra.bit());
+    // Set frame pointer in space for it if this is not a direct call
+    // from generated code.
+    // TODO(plind): this 8 is the # of argument regs, should have definition.
+    __ Daddu(frame_pointer(), sp, Operand(8 * kPointerSize));
+    __ mov(a0, zero_reg);
+    __ push(a0);  // Make room for success counter and initialize it to 0.
+    __ push(a0);  // Make room for "position - 1" constant (value irrelevant).
+
+    // Check if we have space on the stack for registers.
+    Label stack_limit_hit;
+    Label stack_ok;
+
+    ExternalReference stack_limit =
+        ExternalReference::address_of_stack_limit(masm_->isolate());
+    __ li(a0, Operand(stack_limit));
+    __ ld(a0, MemOperand(a0));
+    __ Dsubu(a0, sp, a0);
+    // Handle it if the stack pointer is already below the stack limit.
+    __ Branch(&stack_limit_hit, le, a0, Operand(zero_reg));
+    // Check if there is room for the variable number of registers above
+    // the stack limit.
+    __ Branch(&stack_ok, hs, a0, Operand(num_registers_ * kPointerSize));
+    // Exit with OutOfMemory exception. There is not enough space on the stack
+    // for our working registers.
+    __ li(v0, Operand(EXCEPTION));
+    __ jmp(&return_v0);
+
+    __ bind(&stack_limit_hit);
+    CallCheckStackGuardState(a0);
+    // If returned value is non-zero, we exit with the returned value as result.
+    __ Branch(&return_v0, ne, v0, Operand(zero_reg));
+
+    __ bind(&stack_ok);
+    // Allocate space on stack for registers.
+    __ Dsubu(sp, sp, Operand(num_registers_ * kPointerSize));
+    // Load string end.
+    __ ld(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+    // Load input start.
+    __ ld(a0, MemOperand(frame_pointer(), kInputStart));
+    // Find negative length (offset of start relative to end).
+    __ Dsubu(current_input_offset(), a0, end_of_input_address());
+    // Set a0 to address of char before start of the input string
+    // (effectively string position -1).
+    __ ld(a1, MemOperand(frame_pointer(), kStartIndex));
+    __ Dsubu(a0, current_input_offset(), Operand(char_size()));
+    __ dsll(t1, a1, (mode_ == UC16) ? 1 : 0);
+    __ Dsubu(a0, a0, t1);
+    // Store this value in a local variable, for use when clearing
+    // position registers.
+    __ sd(a0, MemOperand(frame_pointer(), kInputStartMinusOne));
+
+    // Initialize code pointer register
+    __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
+
+    Label load_char_start_regexp, start_regexp;
+    // Load newline if index is at start, previous character otherwise.
+    __ Branch(&load_char_start_regexp, ne, a1, Operand(zero_reg));
+    __ li(current_character(), Operand('\n'));
+    __ jmp(&start_regexp);
+
+    // Global regexp restarts matching here.
+    __ bind(&load_char_start_regexp);
+    // Load previous char as initial value of current character register.
+    LoadCurrentCharacterUnchecked(-1, 1);
+    __ bind(&start_regexp);
+
+    // Initialize on-stack registers.
+    if (num_saved_registers_ > 0) {  // Always is, if generated from a regexp.
+      // Fill saved registers with initial value = start offset - 1.
+      if (num_saved_registers_ > 8) {
+        // Address of register 0.
+        __ Daddu(a1, frame_pointer(), Operand(kRegisterZero));
+        __ li(a2, Operand(num_saved_registers_));
+        Label init_loop;
+        __ bind(&init_loop);
+        __ sd(a0, MemOperand(a1));
+        __ Daddu(a1, a1, Operand(-kPointerSize));
+        __ Dsubu(a2, a2, Operand(1));
+        __ Branch(&init_loop, ne, a2, Operand(zero_reg));
+      } else {
+        for (int i = 0; i < num_saved_registers_; i++) {
+          __ sd(a0, register_location(i));
+        }
+      }
+    }
+
+    // Initialize backtrack stack pointer.
+    __ ld(backtrack_stackpointer(), MemOperand(frame_pointer(), kStackHighEnd));
+
+    __ jmp(&start_label_);
+
+
+    // Exit code:
+    if (success_label_.is_linked()) {
+      // Save captures when successful.
+      __ bind(&success_label_);
+      if (num_saved_registers_ > 0) {
+        // Copy captures to output.
+        __ ld(a1, MemOperand(frame_pointer(), kInputStart));
+        __ ld(a0, MemOperand(frame_pointer(), kRegisterOutput));
+        __ ld(a2, MemOperand(frame_pointer(), kStartIndex));
+        __ Dsubu(a1, end_of_input_address(), a1);
+        // a1 is length of input in bytes.
+        if (mode_ == UC16) {
+          __ dsrl(a1, a1, 1);
+        }
+        // a1 is length of input in characters.
+        __ Daddu(a1, a1, Operand(a2));
+        // a1 is length of string in characters.
+
+        DCHECK_EQ(0, num_saved_registers_ % 2);
+        // Always an even number of capture registers. This allows us to
+        // unroll the loop once to add an operation between a load of a register
+        // and the following use of that register.
+        for (int i = 0; i < num_saved_registers_; i += 2) {
+          __ ld(a2, register_location(i));
+          __ ld(a3, register_location(i + 1));
+          if (i == 0 && global_with_zero_length_check()) {
+            // Keep capture start in a4 for the zero-length check later.
+            __ mov(t3, a2);
+          }
+          if (mode_ == UC16) {
+            __ dsra(a2, a2, 1);
+            __ Daddu(a2, a2, a1);
+            __ dsra(a3, a3, 1);
+            __ Daddu(a3, a3, a1);
+          } else {
+            __ Daddu(a2, a1, Operand(a2));
+            __ Daddu(a3, a1, Operand(a3));
+          }
+          // V8 expects the output to be an int32_t array.
+          __ sw(a2, MemOperand(a0));
+          __ Daddu(a0, a0, kIntSize);
+          __ sw(a3, MemOperand(a0));
+          __ Daddu(a0, a0, kIntSize);
+        }
+      }
+
+      if (global()) {
+        // Restart matching if the regular expression is flagged as global.
+        __ ld(a0, MemOperand(frame_pointer(), kSuccessfulCaptures));
+        __ lw(a1, MemOperand(frame_pointer(), kNumOutputRegisters));
+        __ ld(a2, MemOperand(frame_pointer(), kRegisterOutput));
+        // Increment success counter.
+        __ Daddu(a0, a0, 1);
+        __ sd(a0, MemOperand(frame_pointer(), kSuccessfulCaptures));
+        // Capture results have been stored, so the number of remaining global
+        // output registers is reduced by the number of stored captures.
+        __ Dsubu(a1, a1, num_saved_registers_);
+        // Check whether we have enough room for another set of capture results.
+        __ mov(v0, a0);
+        __ Branch(&return_v0, lt, a1, Operand(num_saved_registers_));
+
+        __ sd(a1, MemOperand(frame_pointer(), kNumOutputRegisters));
+        // Advance the location for output.
+        __ Daddu(a2, a2, num_saved_registers_ * kIntSize);
+        __ sd(a2, MemOperand(frame_pointer(), kRegisterOutput));
+
+        // Prepare a0 to initialize registers with its value in the next run.
+        __ ld(a0, MemOperand(frame_pointer(), kInputStartMinusOne));
+
+        if (global_with_zero_length_check()) {
+          // Special case for zero-length matches.
+          // t3: capture start index
+          // Not a zero-length match, restart.
+          __ Branch(
+              &load_char_start_regexp, ne, current_input_offset(), Operand(t3));
+          // Offset from the end is zero if we already reached the end.
+          __ Branch(&exit_label_, eq, current_input_offset(),
+                    Operand(zero_reg));
+          // Advance current position after a zero-length match.
+          __ Daddu(current_input_offset(),
+                  current_input_offset(),
+                  Operand((mode_ == UC16) ? 2 : 1));
+        }
+
+        __ Branch(&load_char_start_regexp);
+      } else {
+        __ li(v0, Operand(SUCCESS));
+      }
+    }
+    // Exit and return v0.
+    __ bind(&exit_label_);
+    if (global()) {
+      __ ld(v0, MemOperand(frame_pointer(), kSuccessfulCaptures));
+    }
+
+    __ bind(&return_v0);
+    // Skip sp past regexp registers and local variables..
+    __ mov(sp, frame_pointer());
+    // Restore registers s0..s7 and return (restoring ra to pc).
+    __ MultiPop(registers_to_retain | ra.bit());
+    __ Ret();
+
+    // Backtrack code (branch target for conditional backtracks).
+    if (backtrack_label_.is_linked()) {
+      __ bind(&backtrack_label_);
+      Backtrack();
+    }
+
+    Label exit_with_exception;
+
+    // Preempt-code.
+    if (check_preempt_label_.is_linked()) {
+      SafeCallTarget(&check_preempt_label_);
+      // Put regexp engine registers on stack.
+      RegList regexp_registers_to_retain = current_input_offset().bit() |
+          current_character().bit() | backtrack_stackpointer().bit();
+      __ MultiPush(regexp_registers_to_retain);
+      CallCheckStackGuardState(a0);
+      __ MultiPop(regexp_registers_to_retain);
+      // If returning non-zero, we should end execution with the given
+      // result as return value.
+      __ Branch(&return_v0, ne, v0, Operand(zero_reg));
+
+      // String might have moved: Reload end of string from frame.
+      __ ld(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+      __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
+      SafeReturn();
+    }
+
+    // Backtrack stack overflow code.
+    if (stack_overflow_label_.is_linked()) {
+      SafeCallTarget(&stack_overflow_label_);
+      // Reached if the backtrack-stack limit has been hit.
+      // Put regexp engine registers on stack first.
+      RegList regexp_registers = current_input_offset().bit() |
+          current_character().bit();
+      __ MultiPush(regexp_registers);
+      Label grow_failed;
+      // Call GrowStack(backtrack_stackpointer(), &stack_base)
+      static const int num_arguments = 3;
+      __ PrepareCallCFunction(num_arguments, a0);
+      __ mov(a0, backtrack_stackpointer());
+      __ Daddu(a1, frame_pointer(), Operand(kStackHighEnd));
+      __ li(a2, Operand(ExternalReference::isolate_address(masm_->isolate())));
+      ExternalReference grow_stack =
+          ExternalReference::re_grow_stack(masm_->isolate());
+      __ CallCFunction(grow_stack, num_arguments);
+      // Restore regexp registers.
+      __ MultiPop(regexp_registers);
+      // If return NULL, we have failed to grow the stack, and
+      // must exit with a stack-overflow exception.
+      __ Branch(&exit_with_exception, eq, v0, Operand(zero_reg));
+      // Otherwise use return value as new stack pointer.
+      __ mov(backtrack_stackpointer(), v0);
+      // Restore saved registers and continue.
+      __ li(code_pointer(), Operand(masm_->CodeObject()), CONSTANT_SIZE);
+      __ ld(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+      SafeReturn();
+    }
+
+    if (exit_with_exception.is_linked()) {
+      // If any of the code above needed to exit with an exception.
+      __ bind(&exit_with_exception);
+      // Exit with Result EXCEPTION(-1) to signal thrown exception.
+      __ li(v0, Operand(EXCEPTION));
+      __ jmp(&return_v0);
+    }
+  }
+
+  CodeDesc code_desc;
+  masm_->GetCode(&code_desc);
+  Handle<Code> code = isolate()->factory()->NewCode(
+      code_desc, Code::ComputeFlags(Code::REGEXP), masm_->CodeObject());
+  LOG(masm_->isolate(), RegExpCodeCreateEvent(*code, *source));
+  return Handle<HeapObject>::cast(code);
+}
+
+
+void RegExpMacroAssemblerMIPS::GoTo(Label* to) {
+  if (to == NULL) {
+    Backtrack();
+    return;
+  }
+  __ jmp(to);
+  return;
+}
+
+
+void RegExpMacroAssemblerMIPS::IfRegisterGE(int reg,
+                                            int comparand,
+                                            Label* if_ge) {
+  __ ld(a0, register_location(reg));
+    BranchOrBacktrack(if_ge, ge, a0, Operand(comparand));
+}
+
+
+void RegExpMacroAssemblerMIPS::IfRegisterLT(int reg,
+                                            int comparand,
+                                            Label* if_lt) {
+  __ ld(a0, register_location(reg));
+  BranchOrBacktrack(if_lt, lt, a0, Operand(comparand));
+}
+
+
+void RegExpMacroAssemblerMIPS::IfRegisterEqPos(int reg,
+                                               Label* if_eq) {
+  __ ld(a0, register_location(reg));
+  BranchOrBacktrack(if_eq, eq, a0, Operand(current_input_offset()));
+}
+
+
+RegExpMacroAssembler::IrregexpImplementation
+    RegExpMacroAssemblerMIPS::Implementation() {
+  return kMIPSImplementation;
+}
+
+
+void RegExpMacroAssemblerMIPS::LoadCurrentCharacter(int cp_offset,
+                                                    Label* on_end_of_input,
+                                                    bool check_bounds,
+                                                    int characters) {
+  DCHECK(cp_offset >= -1);      // ^ and \b can look behind one character.
+  DCHECK(cp_offset < (1<<30));  // Be sane! (And ensure negation works).
+  if (check_bounds) {
+    CheckPosition(cp_offset + characters - 1, on_end_of_input);
+  }
+  LoadCurrentCharacterUnchecked(cp_offset, characters);
+}
+
+
+void RegExpMacroAssemblerMIPS::PopCurrentPosition() {
+  Pop(current_input_offset());
+}
+
+
+void RegExpMacroAssemblerMIPS::PopRegister(int register_index) {
+  Pop(a0);
+  __ sd(a0, register_location(register_index));
+}
+
+
+void RegExpMacroAssemblerMIPS::PushBacktrack(Label* label) {
+  if (label->is_bound()) {
+    int target = label->pos();
+    __ li(a0, Operand(target + Code::kHeaderSize - kHeapObjectTag));
+  } else {
+    Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+    Label after_constant;
+    __ Branch(&after_constant);
+    int offset = masm_->pc_offset();
+    int cp_offset = offset + Code::kHeaderSize - kHeapObjectTag;
+    __ emit(0);
+    masm_->label_at_put(label, offset);
+    __ bind(&after_constant);
+    if (is_int16(cp_offset)) {
+      __ lwu(a0, MemOperand(code_pointer(), cp_offset));
+    } else {
+      __ Daddu(a0, code_pointer(), cp_offset);
+      __ lwu(a0, MemOperand(a0, 0));
+    }
+  }
+  Push(a0);
+  CheckStackLimit();
+}
+
+
+void RegExpMacroAssemblerMIPS::PushCurrentPosition() {
+  Push(current_input_offset());
+}
+
+
+void RegExpMacroAssemblerMIPS::PushRegister(int register_index,
+                                            StackCheckFlag check_stack_limit) {
+  __ ld(a0, register_location(register_index));
+  Push(a0);
+  if (check_stack_limit) CheckStackLimit();
+}
+
+
+void RegExpMacroAssemblerMIPS::ReadCurrentPositionFromRegister(int reg) {
+  __ ld(current_input_offset(), register_location(reg));
+}
+
+
+void RegExpMacroAssemblerMIPS::ReadStackPointerFromRegister(int reg) {
+  __ ld(backtrack_stackpointer(), register_location(reg));
+  __ ld(a0, MemOperand(frame_pointer(), kStackHighEnd));
+  __ Daddu(backtrack_stackpointer(), backtrack_stackpointer(), Operand(a0));
+}
+
+
+void RegExpMacroAssemblerMIPS::SetCurrentPositionFromEnd(int by) {
+  Label after_position;
+  __ Branch(&after_position,
+            ge,
+            current_input_offset(),
+            Operand(-by * char_size()));
+  __ li(current_input_offset(), -by * char_size());
+  // On RegExp code entry (where this operation is used), the character before
+  // the current position is expected to be already loaded.
+  // We have advanced the position, so it's safe to read backwards.
+  LoadCurrentCharacterUnchecked(-1, 1);
+  __ bind(&after_position);
+}
+
+
+void RegExpMacroAssemblerMIPS::SetRegister(int register_index, int to) {
+  DCHECK(register_index >= num_saved_registers_);  // Reserved for positions!
+  __ li(a0, Operand(to));
+  __ sd(a0, register_location(register_index));
+}
+
+
+bool RegExpMacroAssemblerMIPS::Succeed() {
+  __ jmp(&success_label_);
+  return global();
+}
+
+
+void RegExpMacroAssemblerMIPS::WriteCurrentPositionToRegister(int reg,
+                                                              int cp_offset) {
+  if (cp_offset == 0) {
+    __ sd(current_input_offset(), register_location(reg));
+  } else {
+    __ Daddu(a0, current_input_offset(), Operand(cp_offset * char_size()));
+    __ sd(a0, register_location(reg));
+  }
+}
+
+
+void RegExpMacroAssemblerMIPS::ClearRegisters(int reg_from, int reg_to) {
+  DCHECK(reg_from <= reg_to);
+  __ ld(a0, MemOperand(frame_pointer(), kInputStartMinusOne));
+  for (int reg = reg_from; reg <= reg_to; reg++) {
+    __ sd(a0, register_location(reg));
+  }
+}
+
+
+void RegExpMacroAssemblerMIPS::WriteStackPointerToRegister(int reg) {
+  __ ld(a1, MemOperand(frame_pointer(), kStackHighEnd));
+  __ Dsubu(a0, backtrack_stackpointer(), a1);
+  __ sd(a0, register_location(reg));
+}
+
+
+bool RegExpMacroAssemblerMIPS::CanReadUnaligned() {
+  return false;
+}
+
+
+// Private methods:
+
+void RegExpMacroAssemblerMIPS::CallCheckStackGuardState(Register scratch) {
+  int stack_alignment = base::OS::ActivationFrameAlignment();
+
+  // Align the stack pointer and save the original sp value on the stack.
+  __ mov(scratch, sp);
+  __ Dsubu(sp, sp, Operand(kPointerSize));
+  DCHECK(base::bits::IsPowerOfTwo32(stack_alignment));
+  __ And(sp, sp, Operand(-stack_alignment));
+  __ sd(scratch, MemOperand(sp));
+
+  __ mov(a2, frame_pointer());
+  // Code* of self.
+  __ li(a1, Operand(masm_->CodeObject()), CONSTANT_SIZE);
+
+  // We need to make room for the return address on the stack.
+  DCHECK(IsAligned(stack_alignment, kPointerSize));
+  __ Dsubu(sp, sp, Operand(stack_alignment));
+
+  // Stack pointer now points to cell where return address is to be written.
+  // Arguments are in registers, meaning we teat the return address as
+  // argument 5. Since DirectCEntryStub will handleallocating space for the C
+  // argument slots, we don't need to care about that here. This is how the
+  // stack will look (sp meaning the value of sp at this moment):
+  // [sp + 3] - empty slot if needed for alignment.
+  // [sp + 2] - saved sp.
+  // [sp + 1] - second word reserved for return value.
+  // [sp + 0] - first word reserved for return value.
+
+  // a0 will point to the return address, placed by DirectCEntry.
+  __ mov(a0, sp);
+
+  ExternalReference stack_guard_check =
+      ExternalReference::re_check_stack_guard_state(masm_->isolate());
+  __ li(t9, Operand(stack_guard_check));
+  DirectCEntryStub stub(isolate());
+  stub.GenerateCall(masm_, t9);
+
+  // DirectCEntryStub allocated space for the C argument slots so we have to
+  // drop them with the return address from the stack with loading saved sp.
+  // At this point stack must look:
+  // [sp + 7] - empty slot if needed for alignment.
+  // [sp + 6] - saved sp.
+  // [sp + 5] - second word reserved for return value.
+  // [sp + 4] - first word reserved for return value.
+  // [sp + 3] - C argument slot.
+  // [sp + 2] - C argument slot.
+  // [sp + 1] - C argument slot.
+  // [sp + 0] - C argument slot.
+  __ ld(sp, MemOperand(sp, stack_alignment + kCArgsSlotsSize));
+
+  __ li(code_pointer(), Operand(masm_->CodeObject()));
+}
+
+
+// Helper function for reading a value out of a stack frame.
+template <typename T>
+static T& frame_entry(Address re_frame, int frame_offset) {
+  return reinterpret_cast<T&>(Memory::int32_at(re_frame + frame_offset));
+}
+
+
+int RegExpMacroAssemblerMIPS::CheckStackGuardState(Address* return_address,
+                                                   Code* re_code,
+                                                   Address re_frame) {
+  Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate);
+  StackLimitCheck check(isolate);
+  if (check.JsHasOverflowed()) {
+    isolate->StackOverflow();
+    return EXCEPTION;
+  }
+
+  // If not real stack overflow the stack guard was used to interrupt
+  // execution for another purpose.
+
+  // If this is a direct call from JavaScript retry the RegExp forcing the call
+  // through the runtime system. Currently the direct call cannot handle a GC.
+  if (frame_entry<int>(re_frame, kDirectCall) == 1) {
+    return RETRY;
+  }
+
+  // Prepare for possible GC.
+  HandleScope handles(isolate);
+  Handle<Code> code_handle(re_code);
+
+  Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
+  // Current string.
+  bool is_one_byte = subject->IsOneByteRepresentationUnderneath();
+
+  DCHECK(re_code->instruction_start() <= *return_address);
+  DCHECK(*return_address <=
+      re_code->instruction_start() + re_code->instruction_size());
+
+  Object* result = isolate->stack_guard()->HandleInterrupts();
+
+  if (*code_handle != re_code) {  // Return address no longer valid.
+    int delta = code_handle->address() - re_code->address();
+    // Overwrite the return address on the stack.
+    *return_address += delta;
+  }
+
+  if (result->IsException()) {
+    return EXCEPTION;
+  }
+
+  Handle<String> subject_tmp = subject;
+  int slice_offset = 0;
+
+  // Extract the underlying string and the slice offset.
+  if (StringShape(*subject_tmp).IsCons()) {
+    subject_tmp = Handle<String>(ConsString::cast(*subject_tmp)->first());
+  } else if (StringShape(*subject_tmp).IsSliced()) {
+    SlicedString* slice = SlicedString::cast(*subject_tmp);
+    subject_tmp = Handle<String>(slice->parent());
+    slice_offset = slice->offset();
+  }
+
+  // String might have changed.
+  if (subject_tmp->IsOneByteRepresentation() != is_one_byte) {
+    // If we changed between an Latin1 and an UC16 string, the specialized
+    // code cannot be used, and we need to restart regexp matching from
+    // scratch (including, potentially, compiling a new version of the code).
+    return RETRY;
+  }
+
+  // Otherwise, the content of the string might have moved. It must still
+  // be a sequential or external string with the same content.
+  // Update the start and end pointers in the stack frame to the current
+  // location (whether it has actually moved or not).
+  DCHECK(StringShape(*subject_tmp).IsSequential() ||
+      StringShape(*subject_tmp).IsExternal());
+
+  // The original start address of the characters to match.
+  const byte* start_address = frame_entry<const byte*>(re_frame, kInputStart);
+
+  // Find the current start address of the same character at the current string
+  // position.
+  int start_index = frame_entry<int>(re_frame, kStartIndex);
+  const byte* new_address = StringCharacterPosition(*subject_tmp,
+                                                    start_index + slice_offset);
+
+  if (start_address != new_address) {
+    // If there is a difference, update the object pointer and start and end
+    // addresses in the RegExp stack frame to match the new value.
+    const byte* end_address = frame_entry<const byte* >(re_frame, kInputEnd);
+    int byte_length = static_cast<int>(end_address - start_address);
+    frame_entry<const String*>(re_frame, kInputString) = *subject;
+    frame_entry<const byte*>(re_frame, kInputStart) = new_address;
+    frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length;
+  } else if (frame_entry<const String*>(re_frame, kInputString) != *subject) {
+    // Subject string might have been a ConsString that underwent
+    // short-circuiting during GC. That will not change start_address but
+    // will change pointer inside the subject handle.
+    frame_entry<const String*>(re_frame, kInputString) = *subject;
+  }
+
+  return 0;
+}
+
+
+MemOperand RegExpMacroAssemblerMIPS::register_location(int register_index) {
+  DCHECK(register_index < (1<<30));
+  if (num_registers_ <= register_index) {
+    num_registers_ = register_index + 1;
+  }
+  return MemOperand(frame_pointer(),
+                    kRegisterZero - register_index * kPointerSize);
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckPosition(int cp_offset,
+                                             Label* on_outside_input) {
+  BranchOrBacktrack(on_outside_input,
+                    ge,
+                    current_input_offset(),
+                    Operand(-cp_offset * char_size()));
+}
+
+
+void RegExpMacroAssemblerMIPS::BranchOrBacktrack(Label* to,
+                                                 Condition condition,
+                                                 Register rs,
+                                                 const Operand& rt) {
+  if (condition == al) {  // Unconditional.
+    if (to == NULL) {
+      Backtrack();
+      return;
+    }
+    __ jmp(to);
+    return;
+  }
+  if (to == NULL) {
+    __ Branch(&backtrack_label_, condition, rs, rt);
+    return;
+  }
+  __ Branch(to, condition, rs, rt);
+}
+
+
+void RegExpMacroAssemblerMIPS::SafeCall(Label* to,
+                                        Condition cond,
+                                        Register rs,
+                                        const Operand& rt) {
+  __ BranchAndLink(to, cond, rs, rt);
+}
+
+
+void RegExpMacroAssemblerMIPS::SafeReturn() {
+  __ pop(ra);
+  __ Daddu(t1, ra, Operand(masm_->CodeObject()));
+  __ Jump(t1);
+}
+
+
+void RegExpMacroAssemblerMIPS::SafeCallTarget(Label* name) {
+  __ bind(name);
+  __ Dsubu(ra, ra, Operand(masm_->CodeObject()));
+  __ push(ra);
+}
+
+
+void RegExpMacroAssemblerMIPS::Push(Register source) {
+  DCHECK(!source.is(backtrack_stackpointer()));
+  __ Daddu(backtrack_stackpointer(),
+          backtrack_stackpointer(),
+          Operand(-kIntSize));
+  __ sw(source, MemOperand(backtrack_stackpointer()));
+}
+
+
+void RegExpMacroAssemblerMIPS::Pop(Register target) {
+  DCHECK(!target.is(backtrack_stackpointer()));
+  __ lw(target, MemOperand(backtrack_stackpointer()));
+  __ Daddu(backtrack_stackpointer(), backtrack_stackpointer(), kIntSize);
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckPreemption() {
+  // Check for preemption.
+  ExternalReference stack_limit =
+      ExternalReference::address_of_stack_limit(masm_->isolate());
+  __ li(a0, Operand(stack_limit));
+  __ ld(a0, MemOperand(a0));
+  SafeCall(&check_preempt_label_, ls, sp, Operand(a0));
+}
+
+
+void RegExpMacroAssemblerMIPS::CheckStackLimit() {
+  ExternalReference stack_limit =
+      ExternalReference::address_of_regexp_stack_limit(masm_->isolate());
+
+  __ li(a0, Operand(stack_limit));
+  __ ld(a0, MemOperand(a0));
+  SafeCall(&stack_overflow_label_, ls, backtrack_stackpointer(), Operand(a0));
+}
+
+
+void RegExpMacroAssemblerMIPS::LoadCurrentCharacterUnchecked(int cp_offset,
+                                                             int characters) {
+  Register offset = current_input_offset();
+  if (cp_offset != 0) {
+    // t3 is not being used to store the capture start index at this point.
+    __ Daddu(t3, current_input_offset(), Operand(cp_offset * char_size()));
+    offset = t3;
+  }
+  // We assume that we cannot do unaligned loads on MIPS, so this function
+  // must only be used to load a single character at a time.
+  DCHECK(characters == 1);
+  __ Daddu(t1, end_of_input_address(), Operand(offset));
+  if (mode_ == LATIN1) {
+    __ lbu(current_character(), MemOperand(t1, 0));
+  } else {
+    DCHECK(mode_ == UC16);
+    __ lhu(current_character(), MemOperand(t1, 0));
+  }
+}
+
+#undef __
+
+#endif  // V8_INTERPRETED_REGEXP
+
+}}  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_MIPS64
diff --git a/src/mips64/regexp-macro-assembler-mips64.h b/src/mips64/regexp-macro-assembler-mips64.h
new file mode 100644
index 0000000..dd4e8a9
--- /dev/null
+++ b/src/mips64/regexp-macro-assembler-mips64.h
@@ -0,0 +1,269 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+#ifndef V8_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
+#define V8_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
+
+#include "src/macro-assembler.h"
+#include "src/mips64/assembler-mips64-inl.h"
+#include "src/mips64/assembler-mips64.h"
+#include "src/mips64/macro-assembler-mips64.h"
+
+namespace v8 {
+namespace internal {
+
+#ifndef V8_INTERPRETED_REGEXP
+class RegExpMacroAssemblerMIPS: public NativeRegExpMacroAssembler {
+ public:
+  RegExpMacroAssemblerMIPS(Mode mode, int registers_to_save, Zone* zone);
+  virtual ~RegExpMacroAssemblerMIPS();
+  virtual int stack_limit_slack();
+  virtual void AdvanceCurrentPosition(int by);
+  virtual void AdvanceRegister(int reg, int by);
+  virtual void Backtrack();
+  virtual void Bind(Label* label);
+  virtual void CheckAtStart(Label* on_at_start);
+  virtual void CheckCharacter(uint32_t c, Label* on_equal);
+  virtual void CheckCharacterAfterAnd(uint32_t c,
+                                      uint32_t mask,
+                                      Label* on_equal);
+  virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
+  virtual void CheckCharacterLT(uc16 limit, Label* on_less);
+  // A "greedy loop" is a loop that is both greedy and with a simple
+  // body. It has a particularly simple implementation.
+  virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
+  virtual void CheckNotAtStart(Label* on_not_at_start);
+  virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
+  virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
+                                               Label* on_no_match);
+  virtual void CheckNotCharacter(uint32_t c, Label* on_not_equal);
+  virtual void CheckNotCharacterAfterAnd(uint32_t c,
+                                         uint32_t mask,
+                                         Label* on_not_equal);
+  virtual void CheckNotCharacterAfterMinusAnd(uc16 c,
+                                              uc16 minus,
+                                              uc16 mask,
+                                              Label* on_not_equal);
+  virtual void CheckCharacterInRange(uc16 from,
+                                     uc16 to,
+                                     Label* on_in_range);
+  virtual void CheckCharacterNotInRange(uc16 from,
+                                        uc16 to,
+                                        Label* on_not_in_range);
+  virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set);
+
+  // Checks whether the given offset from the current position is before
+  // the end of the string.
+  virtual void CheckPosition(int cp_offset, Label* on_outside_input);
+  virtual bool CheckSpecialCharacterClass(uc16 type,
+                                          Label* on_no_match);
+  virtual void Fail();
+  virtual Handle<HeapObject> GetCode(Handle<String> source);
+  virtual void GoTo(Label* label);
+  virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
+  virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
+  virtual void IfRegisterEqPos(int reg, Label* if_eq);
+  virtual IrregexpImplementation Implementation();
+  virtual void LoadCurrentCharacter(int cp_offset,
+                                    Label* on_end_of_input,
+                                    bool check_bounds = true,
+                                    int characters = 1);
+  virtual void PopCurrentPosition();
+  virtual void PopRegister(int register_index);
+  virtual void PushBacktrack(Label* label);
+  virtual void PushCurrentPosition();
+  virtual void PushRegister(int register_index,
+                            StackCheckFlag check_stack_limit);
+  virtual void ReadCurrentPositionFromRegister(int reg);
+  virtual void ReadStackPointerFromRegister(int reg);
+  virtual void SetCurrentPositionFromEnd(int by);
+  virtual void SetRegister(int register_index, int to);
+  virtual bool Succeed();
+  virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
+  virtual void ClearRegisters(int reg_from, int reg_to);
+  virtual void WriteStackPointerToRegister(int reg);
+  virtual bool CanReadUnaligned();
+
+  // Called from RegExp if the stack-guard is triggered.
+  // If the code object is relocated, the return address is fixed before
+  // returning.
+  static int CheckStackGuardState(Address* return_address,
+                                  Code* re_code,
+                                  Address re_frame);
+
+  void print_regexp_frame_constants();
+
+ private:
+#if defined(MIPS_ABI_N64)
+  // Offsets from frame_pointer() of function parameters and stored registers.
+  static const int kFramePointer = 0;
+
+  // Above the frame pointer - Stored registers and stack passed parameters.
+  // Registers s0 to s7, fp, and ra.
+  static const int kStoredRegisters = kFramePointer;
+  // Return address (stored from link register, read into pc on return).
+
+// TODO(plind): This 9 - is 8 s-regs (s0..s7) plus fp.
+
+  static const int kReturnAddress = kStoredRegisters + 9 * kPointerSize;
+  static const int kSecondaryReturnAddress = kReturnAddress + kPointerSize;
+  // Stack frame header.
+  static const int kStackFrameHeader = kSecondaryReturnAddress;
+  // Stack parameters placed by caller.
+  static const int kIsolate = kStackFrameHeader + kPointerSize;
+
+  // Below the frame pointer.
+  // Register parameters stored by setup code.
+  static const int kDirectCall = kFramePointer - kPointerSize;
+  static const int kStackHighEnd = kDirectCall - kPointerSize;
+  static const int kNumOutputRegisters = kStackHighEnd - kPointerSize;
+  static const int kRegisterOutput = kNumOutputRegisters - kPointerSize;
+  static const int kInputEnd = kRegisterOutput - kPointerSize;
+  static const int kInputStart = kInputEnd - kPointerSize;
+  static const int kStartIndex = kInputStart - kPointerSize;
+  static const int kInputString = kStartIndex - kPointerSize;
+  // When adding local variables remember to push space for them in
+  // the frame in GetCode.
+  static const int kSuccessfulCaptures = kInputString - kPointerSize;
+  static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize;
+  // First register address. Following registers are below it on the stack.
+  static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
+
+#elif defined(MIPS_ABI_O32)
+  // Offsets from frame_pointer() of function parameters and stored registers.
+  static const int kFramePointer = 0;
+
+  // Above the frame pointer - Stored registers and stack passed parameters.
+  // Registers s0 to s7, fp, and ra.
+  static const int kStoredRegisters = kFramePointer;
+  // Return address (stored from link register, read into pc on return).
+  static const int kReturnAddress = kStoredRegisters + 9 * kPointerSize;
+  static const int kSecondaryReturnAddress = kReturnAddress + kPointerSize;
+  // Stack frame header.
+  static const int kStackFrameHeader = kReturnAddress + kPointerSize;
+  // Stack parameters placed by caller.
+  static const int kRegisterOutput =
+      kStackFrameHeader + 4 * kPointerSize + kPointerSize;
+  static const int kNumOutputRegisters = kRegisterOutput + kPointerSize;
+  static const int kStackHighEnd = kNumOutputRegisters + kPointerSize;
+  static const int kDirectCall = kStackHighEnd + kPointerSize;
+  static const int kIsolate = kDirectCall + kPointerSize;
+
+  // Below the frame pointer.
+  // Register parameters stored by setup code.
+  static const int kInputEnd = kFramePointer - kPointerSize;
+  static const int kInputStart = kInputEnd - kPointerSize;
+  static const int kStartIndex = kInputStart - kPointerSize;
+  static const int kInputString = kStartIndex - kPointerSize;
+  // When adding local variables remember to push space for them in
+  // the frame in GetCode.
+  static const int kSuccessfulCaptures = kInputString - kPointerSize;
+  static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize;
+  // First register address. Following registers are below it on the stack.
+  static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
+
+#else
+# error "undefined MIPS ABI"
+#endif
+
+  // Initial size of code buffer.
+  static const size_t kRegExpCodeSize = 1024;
+
+  // Load a number of characters at the given offset from the
+  // current position, into the current-character register.
+  void LoadCurrentCharacterUnchecked(int cp_offset, int character_count);
+
+  // Check whether preemption has been requested.
+  void CheckPreemption();
+
+  // Check whether we are exceeding the stack limit on the backtrack stack.
+  void CheckStackLimit();
+
+
+  // Generate a call to CheckStackGuardState.
+  void CallCheckStackGuardState(Register scratch);
+
+  // The ebp-relative location of a regexp register.
+  MemOperand register_location(int register_index);
+
+  // Register holding the current input position as negative offset from
+  // the end of the string.
+  inline Register current_input_offset() { return a6; }
+
+  // The register containing the current character after LoadCurrentCharacter.
+  inline Register current_character() { return a7; }
+
+  // Register holding address of the end of the input string.
+  inline Register end_of_input_address() { return t2; }
+
+  // Register holding the frame address. Local variables, parameters and
+  // regexp registers are addressed relative to this.
+  inline Register frame_pointer() { return fp; }
+
+  // The register containing the backtrack stack top. Provides a meaningful
+  // name to the register.
+  inline Register backtrack_stackpointer() { return t0; }
+
+  // Register holding pointer to the current code object.
+  inline Register code_pointer() { return a5; }
+
+  // Byte size of chars in the string to match (decided by the Mode argument).
+  inline int char_size() { return static_cast<int>(mode_); }
+
+  // Equivalent to a conditional branch to the label, unless the label
+  // is NULL, in which case it is a conditional Backtrack.
+  void BranchOrBacktrack(Label* to,
+                         Condition condition,
+                         Register rs,
+                         const Operand& rt);
+
+  // Call and return internally in the generated code in a way that
+  // is GC-safe (i.e., doesn't leave absolute code addresses on the stack)
+  inline void SafeCall(Label* to,
+                       Condition cond,
+                       Register rs,
+                       const Operand& rt);
+  inline void SafeReturn();
+  inline void SafeCallTarget(Label* name);
+
+  // Pushes the value of a register on the backtrack stack. Decrements the
+  // stack pointer by a word size and stores the register's value there.
+  inline void Push(Register source);
+
+  // Pops a value from the backtrack stack. Reads the word at the stack pointer
+  // and increments it by a word size.
+  inline void Pop(Register target);
+
+  Isolate* isolate() const { return masm_->isolate(); }
+
+  MacroAssembler* masm_;
+
+  // Which mode to generate code for (Latin1 or UC16).
+  Mode mode_;
+
+  // One greater than maximal register index actually used.
+  int num_registers_;
+
+  // Number of registers to output at the end (the saved registers
+  // are always 0..num_saved_registers_-1).
+  int num_saved_registers_;
+
+  // Labels used internally.
+  Label entry_label_;
+  Label start_label_;
+  Label success_label_;
+  Label backtrack_label_;
+  Label exit_label_;
+  Label check_preempt_label_;
+  Label stack_overflow_label_;
+  Label internal_failure_label_;
+};
+
+#endif  // V8_INTERPRETED_REGEXP
+
+
+}}  // namespace v8::internal
+
+#endif  // V8_MIPS_REGEXP_MACRO_ASSEMBLER_MIPS_H_
diff --git a/src/mips64/simulator-mips64.cc b/src/mips64/simulator-mips64.cc
new file mode 100644
index 0000000..4c74939
--- /dev/null
+++ b/src/mips64/simulator-mips64.cc
@@ -0,0 +1,3446 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <limits.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <cmath>
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_MIPS64
+
+#include "src/assembler.h"
+#include "src/base/bits.h"
+#include "src/disasm.h"
+#include "src/mips64/constants-mips64.h"
+#include "src/mips64/simulator-mips64.h"
+#include "src/ostreams.h"
+
+// Only build the simulator if not compiling for real MIPS hardware.
+#if defined(USE_SIMULATOR)
+
+namespace v8 {
+namespace internal {
+
+// Utils functions.
+bool HaveSameSign(int64_t a, int64_t b) {
+  return ((a ^ b) >= 0);
+}
+
+
+uint32_t get_fcsr_condition_bit(uint32_t cc) {
+  if (cc == 0) {
+    return 23;
+  } else {
+    return 24 + cc;
+  }
+}
+
+
+static int64_t MultiplyHighSigned(int64_t u, int64_t v) {
+  uint64_t u0, v0, w0;
+  int64_t u1, v1, w1, w2, t;
+
+  u0 = u & 0xffffffffL;
+  u1 = u >> 32;
+  v0 = v & 0xffffffffL;
+  v1 = v >> 32;
+
+  w0 = u0 * v0;
+  t = u1 * v0 + (w0 >> 32);
+  w1 = t & 0xffffffffL;
+  w2 = t >> 32;
+  w1 = u0 * v1 + w1;
+
+  return u1 * v1 + w2 + (w1 >> 32);
+}
+
+
+// This macro provides a platform independent use of sscanf. The reason for
+// SScanF not being implemented in a platform independent was through
+// ::v8::internal::OS in the same way as SNPrintF is that the Windows C Run-Time
+// Library does not provide vsscanf.
+#define SScanF sscanf  // NOLINT
+
+// The MipsDebugger class is used by the simulator while debugging simulated
+// code.
+class MipsDebugger {
+ public:
+  explicit MipsDebugger(Simulator* sim) : sim_(sim) { }
+  ~MipsDebugger();
+
+  void Stop(Instruction* instr);
+  void Debug();
+  // Print all registers with a nice formatting.
+  void PrintAllRegs();
+  void PrintAllRegsIncludingFPU();
+
+ private:
+  // We set the breakpoint code to 0xfffff to easily recognize it.
+  static const Instr kBreakpointInstr = SPECIAL | BREAK | 0xfffff << 6;
+  static const Instr kNopInstr =  0x0;
+
+  Simulator* sim_;
+
+  int64_t GetRegisterValue(int regnum);
+  int64_t GetFPURegisterValue(int regnum);
+  float GetFPURegisterValueFloat(int regnum);
+  double GetFPURegisterValueDouble(int regnum);
+  bool GetValue(const char* desc, int64_t* value);
+
+  // Set or delete a breakpoint. Returns true if successful.
+  bool SetBreakpoint(Instruction* breakpc);
+  bool DeleteBreakpoint(Instruction* breakpc);
+
+  // Undo and redo all breakpoints. This is needed to bracket disassembly and
+  // execution to skip past breakpoints when run from the debugger.
+  void UndoBreakpoints();
+  void RedoBreakpoints();
+};
+
+
+MipsDebugger::~MipsDebugger() {
+}
+
+
+#ifdef GENERATED_CODE_COVERAGE
+static FILE* coverage_log = NULL;
+
+
+static void InitializeCoverage() {
+  char* file_name = getenv("V8_GENERATED_CODE_COVERAGE_LOG");
+  if (file_name != NULL) {
+    coverage_log = fopen(file_name, "aw+");
+  }
+}
+
+
+void MipsDebugger::Stop(Instruction* instr) {
+  // Get the stop code.
+  uint32_t code = instr->Bits(25, 6);
+  // Retrieve the encoded address, which comes just after this stop.
+  char** msg_address =
+    reinterpret_cast<char**>(sim_->get_pc() + Instr::kInstrSize);
+  char* msg = *msg_address;
+  DCHECK(msg != NULL);
+
+  // Update this stop description.
+  if (!watched_stops_[code].desc) {
+    watched_stops_[code].desc = msg;
+  }
+
+  if (strlen(msg) > 0) {
+    if (coverage_log != NULL) {
+      fprintf(coverage_log, "%s\n", str);
+      fflush(coverage_log);
+    }
+    // Overwrite the instruction and address with nops.
+    instr->SetInstructionBits(kNopInstr);
+    reinterpret_cast<Instr*>(msg_address)->SetInstructionBits(kNopInstr);
+  }
+  // TODO(yuyin): 2 -> 3?
+  sim_->set_pc(sim_->get_pc() + 3 * Instruction::kInstructionSize);
+}
+
+
+#else  // GENERATED_CODE_COVERAGE
+
+#define UNSUPPORTED() printf("Unsupported instruction.\n");
+
+static void InitializeCoverage() {}
+
+
+void MipsDebugger::Stop(Instruction* instr) {
+  // Get the stop code.
+  uint32_t code = instr->Bits(25, 6);
+  // Retrieve the encoded address, which comes just after this stop.
+  char* msg = *reinterpret_cast<char**>(sim_->get_pc() +
+      Instruction::kInstrSize);
+  // Update this stop description.
+  if (!sim_->watched_stops_[code].desc) {
+    sim_->watched_stops_[code].desc = msg;
+  }
+  PrintF("Simulator hit %s (%u)\n", msg, code);
+  // TODO(yuyin): 2 -> 3?
+  sim_->set_pc(sim_->get_pc() + 3 * Instruction::kInstrSize);
+  Debug();
+}
+#endif  // GENERATED_CODE_COVERAGE
+
+
+int64_t MipsDebugger::GetRegisterValue(int regnum) {
+  if (regnum == kNumSimuRegisters) {
+    return sim_->get_pc();
+  } else {
+    return sim_->get_register(regnum);
+  }
+}
+
+
+int64_t MipsDebugger::GetFPURegisterValue(int regnum) {
+  if (regnum == kNumFPURegisters) {
+    return sim_->get_pc();
+  } else {
+    return sim_->get_fpu_register(regnum);
+  }
+}
+
+
+float MipsDebugger::GetFPURegisterValueFloat(int regnum) {
+  if (regnum == kNumFPURegisters) {
+    return sim_->get_pc();
+  } else {
+    return sim_->get_fpu_register_float(regnum);
+  }
+}
+
+
+double MipsDebugger::GetFPURegisterValueDouble(int regnum) {
+  if (regnum == kNumFPURegisters) {
+    return sim_->get_pc();
+  } else {
+    return sim_->get_fpu_register_double(regnum);
+  }
+}
+
+
+bool MipsDebugger::GetValue(const char* desc, int64_t* value) {
+  int regnum = Registers::Number(desc);
+  int fpuregnum = FPURegisters::Number(desc);
+
+  if (regnum != kInvalidRegister) {
+    *value = GetRegisterValue(regnum);
+    return true;
+  } else if (fpuregnum != kInvalidFPURegister) {
+    *value = GetFPURegisterValue(fpuregnum);
+    return true;
+  } else if (strncmp(desc, "0x", 2) == 0) {
+    return SScanF(desc + 2, "%" SCNx64,
+                  reinterpret_cast<uint64_t*>(value)) == 1;
+  } else {
+    return SScanF(desc, "%" SCNu64, reinterpret_cast<uint64_t*>(value)) == 1;
+  }
+  return false;
+}
+
+
+bool MipsDebugger::SetBreakpoint(Instruction* breakpc) {
+  // Check if a breakpoint can be set. If not return without any side-effects.
+  if (sim_->break_pc_ != NULL) {
+    return false;
+  }
+
+  // Set the breakpoint.
+  sim_->break_pc_ = breakpc;
+  sim_->break_instr_ = breakpc->InstructionBits();
+  // Not setting the breakpoint instruction in the code itself. It will be set
+  // when the debugger shell continues.
+  return true;
+}
+
+
+bool MipsDebugger::DeleteBreakpoint(Instruction* breakpc) {
+  if (sim_->break_pc_ != NULL) {
+    sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
+  }
+
+  sim_->break_pc_ = NULL;
+  sim_->break_instr_ = 0;
+  return true;
+}
+
+
+void MipsDebugger::UndoBreakpoints() {
+  if (sim_->break_pc_ != NULL) {
+    sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
+  }
+}
+
+
+void MipsDebugger::RedoBreakpoints() {
+  if (sim_->break_pc_ != NULL) {
+    sim_->break_pc_->SetInstructionBits(kBreakpointInstr);
+  }
+}
+
+
+void MipsDebugger::PrintAllRegs() {
+#define REG_INFO(n) Registers::Name(n), GetRegisterValue(n), GetRegisterValue(n)
+
+  PrintF("\n");
+  // at, v0, a0.
+  PrintF("%3s: 0x%016lx %14ld\t%3s: 0x%016lx %14ld\t%3s: 0x%016lx %14ld\n",
+         REG_INFO(1), REG_INFO(2), REG_INFO(4));
+  // v1, a1.
+  PrintF("%34s\t%3s: 0x%016lx %14ld\t%3s: 0x%016lx %14ld\n",
+         "", REG_INFO(3), REG_INFO(5));
+  // a2.
+  PrintF("%34s\t%34s\t%3s: 0x%016lx %14ld\n", "", "", REG_INFO(6));
+  // a3.
+  PrintF("%34s\t%34s\t%3s: 0x%016lx %14ld\n", "", "", REG_INFO(7));
+  PrintF("\n");
+  // a4-t3, s0-s7
+  for (int i = 0; i < 8; i++) {
+    PrintF("%3s: 0x%016lx %14ld\t%3s: 0x%016lx %14ld\n",
+           REG_INFO(8+i), REG_INFO(16+i));
+  }
+  PrintF("\n");
+  // t8, k0, LO.
+  PrintF("%3s: 0x%016lx %14ld\t%3s: 0x%016lx %14ld\t%3s: 0x%016lx %14ld\n",
+         REG_INFO(24), REG_INFO(26), REG_INFO(32));
+  // t9, k1, HI.
+  PrintF("%3s: 0x%016lx %14ld\t%3s: 0x%016lx %14ld\t%3s: 0x%016lx %14ld\n",
+         REG_INFO(25), REG_INFO(27), REG_INFO(33));
+  // sp, fp, gp.
+  PrintF("%3s: 0x%016lx %14ld\t%3s: 0x%016lx %14ld\t%3s: 0x%016lx %14ld\n",
+         REG_INFO(29), REG_INFO(30), REG_INFO(28));
+  // pc.
+  PrintF("%3s: 0x%016lx %14ld\t%3s: 0x%016lx %14ld\n",
+         REG_INFO(31), REG_INFO(34));
+
+#undef REG_INFO
+#undef FPU_REG_INFO
+}
+
+
+void MipsDebugger::PrintAllRegsIncludingFPU() {
+#define FPU_REG_INFO(n) FPURegisters::Name(n), \
+        GetFPURegisterValue(n), \
+        GetFPURegisterValueDouble(n)
+
+  PrintAllRegs();
+
+  PrintF("\n\n");
+  // f0, f1, f2, ... f31.
+  // TODO(plind): consider printing 2 columns for space efficiency.
+  PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(0) );
+  PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(1) );
+  PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(2) );
+  PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(3) );
+  PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(4) );
+  PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(5) );
+  PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(6) );
+  PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(7) );
+  PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(8) );
+  PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(9) );
+  PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(10));
+  PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(11));
+  PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(12));
+  PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(13));
+  PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(14));
+  PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(15));
+  PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(16));
+  PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(17));
+  PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(18));
+  PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(19));
+  PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(20));
+  PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(21));
+  PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(22));
+  PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(23));
+  PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(24));
+  PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(25));
+  PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(26));
+  PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(27));
+  PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(28));
+  PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(29));
+  PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(30));
+  PrintF("%3s: 0x%016lx %16.4e\n", FPU_REG_INFO(31));
+
+#undef REG_INFO
+#undef FPU_REG_INFO
+}
+
+
+void MipsDebugger::Debug() {
+  intptr_t last_pc = -1;
+  bool done = false;
+
+#define COMMAND_SIZE 63
+#define ARG_SIZE 255
+
+#define STR(a) #a
+#define XSTR(a) STR(a)
+
+  char cmd[COMMAND_SIZE + 1];
+  char arg1[ARG_SIZE + 1];
+  char arg2[ARG_SIZE + 1];
+  char* argv[3] = { cmd, arg1, arg2 };
+
+  // Make sure to have a proper terminating character if reaching the limit.
+  cmd[COMMAND_SIZE] = 0;
+  arg1[ARG_SIZE] = 0;
+  arg2[ARG_SIZE] = 0;
+
+  // Undo all set breakpoints while running in the debugger shell. This will
+  // make them invisible to all commands.
+  UndoBreakpoints();
+
+  while (!done && (sim_->get_pc() != Simulator::end_sim_pc)) {
+    if (last_pc != sim_->get_pc()) {
+      disasm::NameConverter converter;
+      disasm::Disassembler dasm(converter);
+      // Use a reasonably large buffer.
+      v8::internal::EmbeddedVector<char, 256> buffer;
+      dasm.InstructionDecode(buffer,
+                             reinterpret_cast<byte*>(sim_->get_pc()));
+      PrintF("  0x%016lx  %s\n", sim_->get_pc(), buffer.start());
+      last_pc = sim_->get_pc();
+    }
+    char* line = ReadLine("sim> ");
+    if (line == NULL) {
+      break;
+    } else {
+      char* last_input = sim_->last_debugger_input();
+      if (strcmp(line, "\n") == 0 && last_input != NULL) {
+        line = last_input;
+      } else {
+        // Ownership is transferred to sim_;
+        sim_->set_last_debugger_input(line);
+      }
+      // Use sscanf to parse the individual parts of the command line. At the
+      // moment no command expects more than two parameters.
+      int argc = SScanF(line,
+                        "%" XSTR(COMMAND_SIZE) "s "
+                        "%" XSTR(ARG_SIZE) "s "
+                        "%" XSTR(ARG_SIZE) "s",
+                        cmd, arg1, arg2);
+      if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) {
+        Instruction* instr = reinterpret_cast<Instruction*>(sim_->get_pc());
+        if (!(instr->IsTrap()) ||
+            instr->InstructionBits() == rtCallRedirInstr) {
+          sim_->InstructionDecode(
+              reinterpret_cast<Instruction*>(sim_->get_pc()));
+        } else {
+          // Allow si to jump over generated breakpoints.
+          PrintF("/!\\ Jumping over generated breakpoint.\n");
+          sim_->set_pc(sim_->get_pc() + Instruction::kInstrSize);
+        }
+      } else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) {
+        // Execute the one instruction we broke at with breakpoints disabled.
+        sim_->InstructionDecode(reinterpret_cast<Instruction*>(sim_->get_pc()));
+        // Leave the debugger shell.
+        done = true;
+      } else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) {
+        if (argc == 2) {
+          int64_t value;
+          double dvalue;
+          if (strcmp(arg1, "all") == 0) {
+            PrintAllRegs();
+          } else if (strcmp(arg1, "allf") == 0) {
+            PrintAllRegsIncludingFPU();
+          } else {
+            int regnum = Registers::Number(arg1);
+            int fpuregnum = FPURegisters::Number(arg1);
+
+            if (regnum != kInvalidRegister) {
+              value = GetRegisterValue(regnum);
+              PrintF("%s: 0x%08lx %ld \n", arg1, value, value);
+            } else if (fpuregnum != kInvalidFPURegister) {
+              value = GetFPURegisterValue(fpuregnum);
+              dvalue = GetFPURegisterValueDouble(fpuregnum);
+              PrintF("%3s: 0x%016lx %16.4e\n",
+                     FPURegisters::Name(fpuregnum), value, dvalue);
+            } else {
+              PrintF("%s unrecognized\n", arg1);
+            }
+          }
+        } else {
+          if (argc == 3) {
+            if (strcmp(arg2, "single") == 0) {
+              int64_t value;
+              float fvalue;
+              int fpuregnum = FPURegisters::Number(arg1);
+
+              if (fpuregnum != kInvalidFPURegister) {
+                value = GetFPURegisterValue(fpuregnum);
+                value &= 0xffffffffUL;
+                fvalue = GetFPURegisterValueFloat(fpuregnum);
+                PrintF("%s: 0x%08lx %11.4e\n", arg1, value, fvalue);
+              } else {
+                PrintF("%s unrecognized\n", arg1);
+              }
+            } else {
+              PrintF("print <fpu register> single\n");
+            }
+          } else {
+            PrintF("print <register> or print <fpu register> single\n");
+          }
+        }
+      } else if ((strcmp(cmd, "po") == 0)
+                 || (strcmp(cmd, "printobject") == 0)) {
+        if (argc == 2) {
+          int64_t value;
+          OFStream os(stdout);
+          if (GetValue(arg1, &value)) {
+            Object* obj = reinterpret_cast<Object*>(value);
+            os << arg1 << ": \n";
+#ifdef DEBUG
+            obj->Print(os);
+            os << "\n";
+#else
+            os << Brief(obj) << "\n";
+#endif
+          } else {
+            os << arg1 << " unrecognized\n";
+          }
+        } else {
+          PrintF("printobject <value>\n");
+        }
+      } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0) {
+        int64_t* cur = NULL;
+        int64_t* end = NULL;
+        int next_arg = 1;
+
+        if (strcmp(cmd, "stack") == 0) {
+          cur = reinterpret_cast<int64_t*>(sim_->get_register(Simulator::sp));
+        } else {  // Command "mem".
+          int64_t value;
+          if (!GetValue(arg1, &value)) {
+            PrintF("%s unrecognized\n", arg1);
+            continue;
+          }
+          cur = reinterpret_cast<int64_t*>(value);
+          next_arg++;
+        }
+
+        int64_t words;
+        if (argc == next_arg) {
+          words = 10;
+        } else {
+          if (!GetValue(argv[next_arg], &words)) {
+            words = 10;
+          }
+        }
+        end = cur + words;
+
+        while (cur < end) {
+          PrintF("  0x%012lx:  0x%016lx %14ld",
+                 reinterpret_cast<intptr_t>(cur), *cur, *cur);
+          HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
+          int64_t value = *cur;
+          Heap* current_heap = v8::internal::Isolate::Current()->heap();
+          if (((value & 1) == 0) || current_heap->Contains(obj)) {
+            PrintF(" (");
+            if ((value & 1) == 0) {
+              PrintF("smi %d", static_cast<int>(value >> 32));
+            } else {
+              obj->ShortPrint();
+            }
+            PrintF(")");
+          }
+          PrintF("\n");
+          cur++;
+        }
+
+      } else if ((strcmp(cmd, "disasm") == 0) ||
+                 (strcmp(cmd, "dpc") == 0) ||
+                 (strcmp(cmd, "di") == 0)) {
+        disasm::NameConverter converter;
+        disasm::Disassembler dasm(converter);
+        // Use a reasonably large buffer.
+        v8::internal::EmbeddedVector<char, 256> buffer;
+
+        byte* cur = NULL;
+        byte* end = NULL;
+
+        if (argc == 1) {
+          cur = reinterpret_cast<byte*>(sim_->get_pc());
+          end = cur + (10 * Instruction::kInstrSize);
+        } else if (argc == 2) {
+          int regnum = Registers::Number(arg1);
+          if (regnum != kInvalidRegister || strncmp(arg1, "0x", 2) == 0) {
+            // The argument is an address or a register name.
+            int64_t value;
+            if (GetValue(arg1, &value)) {
+              cur = reinterpret_cast<byte*>(value);
+              // Disassemble 10 instructions at <arg1>.
+              end = cur + (10 * Instruction::kInstrSize);
+            }
+          } else {
+            // The argument is the number of instructions.
+            int64_t value;
+            if (GetValue(arg1, &value)) {
+              cur = reinterpret_cast<byte*>(sim_->get_pc());
+              // Disassemble <arg1> instructions.
+              end = cur + (value * Instruction::kInstrSize);
+            }
+          }
+        } else {
+          int64_t value1;
+          int64_t value2;
+          if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
+            cur = reinterpret_cast<byte*>(value1);
+            end = cur + (value2 * Instruction::kInstrSize);
+          }
+        }
+
+        while (cur < end) {
+          dasm.InstructionDecode(buffer, cur);
+          PrintF("  0x%08lx  %s\n",
+              reinterpret_cast<intptr_t>(cur), buffer.start());
+          cur += Instruction::kInstrSize;
+        }
+      } else if (strcmp(cmd, "gdb") == 0) {
+        PrintF("relinquishing control to gdb\n");
+        v8::base::OS::DebugBreak();
+        PrintF("regaining control from gdb\n");
+      } else if (strcmp(cmd, "break") == 0) {
+        if (argc == 2) {
+          int64_t value;
+          if (GetValue(arg1, &value)) {
+            if (!SetBreakpoint(reinterpret_cast<Instruction*>(value))) {
+              PrintF("setting breakpoint failed\n");
+            }
+          } else {
+            PrintF("%s unrecognized\n", arg1);
+          }
+        } else {
+          PrintF("break <address>\n");
+        }
+      } else if (strcmp(cmd, "del") == 0) {
+        if (!DeleteBreakpoint(NULL)) {
+          PrintF("deleting breakpoint failed\n");
+        }
+      } else if (strcmp(cmd, "flags") == 0) {
+        PrintF("No flags on MIPS !\n");
+      } else if (strcmp(cmd, "stop") == 0) {
+        int64_t value;
+        intptr_t stop_pc = sim_->get_pc() -
+            2 * Instruction::kInstrSize;
+        Instruction* stop_instr = reinterpret_cast<Instruction*>(stop_pc);
+        Instruction* msg_address =
+          reinterpret_cast<Instruction*>(stop_pc +
+              Instruction::kInstrSize);
+        if ((argc == 2) && (strcmp(arg1, "unstop") == 0)) {
+          // Remove the current stop.
+          if (sim_->IsStopInstruction(stop_instr)) {
+            stop_instr->SetInstructionBits(kNopInstr);
+            msg_address->SetInstructionBits(kNopInstr);
+          } else {
+            PrintF("Not at debugger stop.\n");
+          }
+        } else if (argc == 3) {
+          // Print information about all/the specified breakpoint(s).
+          if (strcmp(arg1, "info") == 0) {
+            if (strcmp(arg2, "all") == 0) {
+              PrintF("Stop information:\n");
+              for (uint32_t i = kMaxWatchpointCode + 1;
+                   i <= kMaxStopCode;
+                   i++) {
+                sim_->PrintStopInfo(i);
+              }
+            } else if (GetValue(arg2, &value)) {
+              sim_->PrintStopInfo(value);
+            } else {
+              PrintF("Unrecognized argument.\n");
+            }
+          } else if (strcmp(arg1, "enable") == 0) {
+            // Enable all/the specified breakpoint(s).
+            if (strcmp(arg2, "all") == 0) {
+              for (uint32_t i = kMaxWatchpointCode + 1;
+                   i <= kMaxStopCode;
+                   i++) {
+                sim_->EnableStop(i);
+              }
+            } else if (GetValue(arg2, &value)) {
+              sim_->EnableStop(value);
+            } else {
+              PrintF("Unrecognized argument.\n");
+            }
+          } else if (strcmp(arg1, "disable") == 0) {
+            // Disable all/the specified breakpoint(s).
+            if (strcmp(arg2, "all") == 0) {
+              for (uint32_t i = kMaxWatchpointCode + 1;
+                   i <= kMaxStopCode;
+                   i++) {
+                sim_->DisableStop(i);
+              }
+            } else if (GetValue(arg2, &value)) {
+              sim_->DisableStop(value);
+            } else {
+              PrintF("Unrecognized argument.\n");
+            }
+          }
+        } else {
+          PrintF("Wrong usage. Use help command for more information.\n");
+        }
+      } else if ((strcmp(cmd, "stat") == 0) || (strcmp(cmd, "st") == 0)) {
+        // Print registers and disassemble.
+        PrintAllRegs();
+        PrintF("\n");
+
+        disasm::NameConverter converter;
+        disasm::Disassembler dasm(converter);
+        // Use a reasonably large buffer.
+        v8::internal::EmbeddedVector<char, 256> buffer;
+
+        byte* cur = NULL;
+        byte* end = NULL;
+
+        if (argc == 1) {
+          cur = reinterpret_cast<byte*>(sim_->get_pc());
+          end = cur + (10 * Instruction::kInstrSize);
+        } else if (argc == 2) {
+          int64_t value;
+          if (GetValue(arg1, &value)) {
+            cur = reinterpret_cast<byte*>(value);
+            // no length parameter passed, assume 10 instructions
+            end = cur + (10 * Instruction::kInstrSize);
+          }
+        } else {
+          int64_t value1;
+          int64_t value2;
+          if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
+            cur = reinterpret_cast<byte*>(value1);
+            end = cur + (value2 * Instruction::kInstrSize);
+          }
+        }
+
+        while (cur < end) {
+          dasm.InstructionDecode(buffer, cur);
+          PrintF("  0x%08lx  %s\n",
+                 reinterpret_cast<intptr_t>(cur), buffer.start());
+          cur += Instruction::kInstrSize;
+        }
+      } else if ((strcmp(cmd, "h") == 0) || (strcmp(cmd, "help") == 0)) {
+        PrintF("cont\n");
+        PrintF("  continue execution (alias 'c')\n");
+        PrintF("stepi\n");
+        PrintF("  step one instruction (alias 'si')\n");
+        PrintF("print <register>\n");
+        PrintF("  print register content (alias 'p')\n");
+        PrintF("  use register name 'all' to print all registers\n");
+        PrintF("printobject <register>\n");
+        PrintF("  print an object from a register (alias 'po')\n");
+        PrintF("stack [<words>]\n");
+        PrintF("  dump stack content, default dump 10 words)\n");
+        PrintF("mem <address> [<words>]\n");
+        PrintF("  dump memory content, default dump 10 words)\n");
+        PrintF("flags\n");
+        PrintF("  print flags\n");
+        PrintF("disasm [<instructions>]\n");
+        PrintF("disasm [<address/register>]\n");
+        PrintF("disasm [[<address/register>] <instructions>]\n");
+        PrintF("  disassemble code, default is 10 instructions\n");
+        PrintF("  from pc (alias 'di')\n");
+        PrintF("gdb\n");
+        PrintF("  enter gdb\n");
+        PrintF("break <address>\n");
+        PrintF("  set a break point on the address\n");
+        PrintF("del\n");
+        PrintF("  delete the breakpoint\n");
+        PrintF("stop feature:\n");
+        PrintF("  Description:\n");
+        PrintF("    Stops are debug instructions inserted by\n");
+        PrintF("    the Assembler::stop() function.\n");
+        PrintF("    When hitting a stop, the Simulator will\n");
+        PrintF("    stop and and give control to the Debugger.\n");
+        PrintF("    All stop codes are watched:\n");
+        PrintF("    - They can be enabled / disabled: the Simulator\n");
+        PrintF("       will / won't stop when hitting them.\n");
+        PrintF("    - The Simulator keeps track of how many times they \n");
+        PrintF("      are met. (See the info command.) Going over a\n");
+        PrintF("      disabled stop still increases its counter. \n");
+        PrintF("  Commands:\n");
+        PrintF("    stop info all/<code> : print infos about number <code>\n");
+        PrintF("      or all stop(s).\n");
+        PrintF("    stop enable/disable all/<code> : enables / disables\n");
+        PrintF("      all or number <code> stop(s)\n");
+        PrintF("    stop unstop\n");
+        PrintF("      ignore the stop instruction at the current location\n");
+        PrintF("      from now on\n");
+      } else {
+        PrintF("Unknown command: %s\n", cmd);
+      }
+    }
+  }
+
+  // Add all the breakpoints back to stop execution and enter the debugger
+  // shell when hit.
+  RedoBreakpoints();
+
+#undef COMMAND_SIZE
+#undef ARG_SIZE
+
+#undef STR
+#undef XSTR
+}
+
+
+static bool ICacheMatch(void* one, void* two) {
+  DCHECK((reinterpret_cast<intptr_t>(one) & CachePage::kPageMask) == 0);
+  DCHECK((reinterpret_cast<intptr_t>(two) & CachePage::kPageMask) == 0);
+  return one == two;
+}
+
+
+static uint32_t ICacheHash(void* key) {
+  return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key)) >> 2;
+}
+
+
+static bool AllOnOnePage(uintptr_t start, int size) {
+  intptr_t start_page = (start & ~CachePage::kPageMask);
+  intptr_t end_page = ((start + size) & ~CachePage::kPageMask);
+  return start_page == end_page;
+}
+
+
+void Simulator::set_last_debugger_input(char* input) {
+  DeleteArray(last_debugger_input_);
+  last_debugger_input_ = input;
+}
+
+
+void Simulator::FlushICache(v8::internal::HashMap* i_cache,
+                            void* start_addr,
+                            size_t size) {
+  int64_t start = reinterpret_cast<int64_t>(start_addr);
+  int64_t intra_line = (start & CachePage::kLineMask);
+  start -= intra_line;
+  size += intra_line;
+  size = ((size - 1) | CachePage::kLineMask) + 1;
+  int offset = (start & CachePage::kPageMask);
+  while (!AllOnOnePage(start, size - 1)) {
+    int bytes_to_flush = CachePage::kPageSize - offset;
+    FlushOnePage(i_cache, start, bytes_to_flush);
+    start += bytes_to_flush;
+    size -= bytes_to_flush;
+    DCHECK_EQ((uint64_t)0, start & CachePage::kPageMask);
+    offset = 0;
+  }
+  if (size != 0) {
+    FlushOnePage(i_cache, start, size);
+  }
+}
+
+
+CachePage* Simulator::GetCachePage(v8::internal::HashMap* i_cache, void* page) {
+  v8::internal::HashMap::Entry* entry = i_cache->Lookup(page,
+                                                        ICacheHash(page),
+                                                        true);
+  if (entry->value == NULL) {
+    CachePage* new_page = new CachePage();
+    entry->value = new_page;
+  }
+  return reinterpret_cast<CachePage*>(entry->value);
+}
+
+
+// Flush from start up to and not including start + size.
+void Simulator::FlushOnePage(v8::internal::HashMap* i_cache,
+                             intptr_t start,
+                             int size) {
+  DCHECK(size <= CachePage::kPageSize);
+  DCHECK(AllOnOnePage(start, size - 1));
+  DCHECK((start & CachePage::kLineMask) == 0);
+  DCHECK((size & CachePage::kLineMask) == 0);
+  void* page = reinterpret_cast<void*>(start & (~CachePage::kPageMask));
+  int offset = (start & CachePage::kPageMask);
+  CachePage* cache_page = GetCachePage(i_cache, page);
+  char* valid_bytemap = cache_page->ValidityByte(offset);
+  memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift);
+}
+
+
+void Simulator::CheckICache(v8::internal::HashMap* i_cache,
+                            Instruction* instr) {
+  int64_t address = reinterpret_cast<int64_t>(instr);
+  void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
+  void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
+  int offset = (address & CachePage::kPageMask);
+  CachePage* cache_page = GetCachePage(i_cache, page);
+  char* cache_valid_byte = cache_page->ValidityByte(offset);
+  bool cache_hit = (*cache_valid_byte == CachePage::LINE_VALID);
+  char* cached_line = cache_page->CachedData(offset & ~CachePage::kLineMask);
+  if (cache_hit) {
+    // Check that the data in memory matches the contents of the I-cache.
+    CHECK_EQ(0, memcmp(reinterpret_cast<void*>(instr),
+                       cache_page->CachedData(offset),
+                       Instruction::kInstrSize));
+  } else {
+    // Cache miss.  Load memory into the cache.
+    memcpy(cached_line, line, CachePage::kLineLength);
+    *cache_valid_byte = CachePage::LINE_VALID;
+  }
+}
+
+
+void Simulator::Initialize(Isolate* isolate) {
+  if (isolate->simulator_initialized()) return;
+  isolate->set_simulator_initialized(true);
+  ::v8::internal::ExternalReference::set_redirector(isolate,
+                                                    &RedirectExternalReference);
+}
+
+
+Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
+  i_cache_ = isolate_->simulator_i_cache();
+  if (i_cache_ == NULL) {
+    i_cache_ = new v8::internal::HashMap(&ICacheMatch);
+    isolate_->set_simulator_i_cache(i_cache_);
+  }
+  Initialize(isolate);
+  // Set up simulator support first. Some of this information is needed to
+  // setup the architecture state.
+  stack_size_ = FLAG_sim_stack_size * KB;
+  stack_ = reinterpret_cast<char*>(malloc(stack_size_));
+  pc_modified_ = false;
+  icount_ = 0;
+  break_count_ = 0;
+  break_pc_ = NULL;
+  break_instr_ = 0;
+
+  // Set up architecture state.
+  // All registers are initialized to zero to start with.
+  for (int i = 0; i < kNumSimuRegisters; i++) {
+    registers_[i] = 0;
+  }
+  for (int i = 0; i < kNumFPURegisters; i++) {
+    FPUregisters_[i] = 0;
+  }
+  FCSR_ = 0;
+
+  // The sp is initialized to point to the bottom (high address) of the
+  // allocated stack area. To be safe in potential stack underflows we leave
+  // some buffer below.
+  registers_[sp] = reinterpret_cast<int64_t>(stack_) + stack_size_ - 64;
+  // The ra and pc are initialized to a known bad value that will cause an
+  // access violation if the simulator ever tries to execute it.
+  registers_[pc] = bad_ra;
+  registers_[ra] = bad_ra;
+  InitializeCoverage();
+  for (int i = 0; i < kNumExceptions; i++) {
+    exceptions[i] = 0;
+  }
+
+  last_debugger_input_ = NULL;
+}
+
+
+Simulator::~Simulator() {
+}
+
+
+// When the generated code calls an external reference we need to catch that in
+// the simulator.  The external reference will be a function compiled for the
+// host architecture.  We need to call that function instead of trying to
+// execute it with the simulator.  We do that by redirecting the external
+// reference to a swi (software-interrupt) instruction that is handled by
+// the simulator.  We write the original destination of the jump just at a known
+// offset from the swi instruction so the simulator knows what to call.
+class Redirection {
+ public:
+  Redirection(void* external_function, ExternalReference::Type type)
+      : external_function_(external_function),
+        swi_instruction_(rtCallRedirInstr),
+        type_(type),
+        next_(NULL) {
+    Isolate* isolate = Isolate::Current();
+    next_ = isolate->simulator_redirection();
+    Simulator::current(isolate)->
+        FlushICache(isolate->simulator_i_cache(),
+                    reinterpret_cast<void*>(&swi_instruction_),
+                    Instruction::kInstrSize);
+    isolate->set_simulator_redirection(this);
+  }
+
+  void* address_of_swi_instruction() {
+    return reinterpret_cast<void*>(&swi_instruction_);
+  }
+
+  void* external_function() { return external_function_; }
+  ExternalReference::Type type() { return type_; }
+
+  static Redirection* Get(void* external_function,
+                          ExternalReference::Type type) {
+    Isolate* isolate = Isolate::Current();
+    Redirection* current = isolate->simulator_redirection();
+    for (; current != NULL; current = current->next_) {
+      if (current->external_function_ == external_function) return current;
+    }
+    return new Redirection(external_function, type);
+  }
+
+  static Redirection* FromSwiInstruction(Instruction* swi_instruction) {
+    char* addr_of_swi = reinterpret_cast<char*>(swi_instruction);
+    char* addr_of_redirection =
+        addr_of_swi - OFFSET_OF(Redirection, swi_instruction_);
+    return reinterpret_cast<Redirection*>(addr_of_redirection);
+  }
+
+  static void* ReverseRedirection(int64_t reg) {
+    Redirection* redirection = FromSwiInstruction(
+        reinterpret_cast<Instruction*>(reinterpret_cast<void*>(reg)));
+    return redirection->external_function();
+  }
+
+ private:
+  void* external_function_;
+  uint32_t swi_instruction_;
+  ExternalReference::Type type_;
+  Redirection* next_;
+};
+
+
+void* Simulator::RedirectExternalReference(void* external_function,
+                                           ExternalReference::Type type) {
+  Redirection* redirection = Redirection::Get(external_function, type);
+  return redirection->address_of_swi_instruction();
+}
+
+
+// Get the active Simulator for the current thread.
+Simulator* Simulator::current(Isolate* isolate) {
+  v8::internal::Isolate::PerIsolateThreadData* isolate_data =
+       isolate->FindOrAllocatePerThreadDataForThisThread();
+  DCHECK(isolate_data != NULL);
+  DCHECK(isolate_data != NULL);
+
+  Simulator* sim = isolate_data->simulator();
+  if (sim == NULL) {
+    // TODO(146): delete the simulator object when a thread/isolate goes away.
+    sim = new Simulator(isolate);
+    isolate_data->set_simulator(sim);
+  }
+  return sim;
+}
+
+
+// Sets the register in the architecture state. It will also deal with updating
+// Simulator internal state for special registers such as PC.
+void Simulator::set_register(int reg, int64_t value) {
+  DCHECK((reg >= 0) && (reg < kNumSimuRegisters));
+  if (reg == pc) {
+    pc_modified_ = true;
+  }
+
+  // Zero register always holds 0.
+  registers_[reg] = (reg == 0) ? 0 : value;
+}
+
+
+void Simulator::set_dw_register(int reg, const int* dbl) {
+  DCHECK((reg >= 0) && (reg < kNumSimuRegisters));
+  registers_[reg] = dbl[1];
+  registers_[reg] = registers_[reg] << 32;
+  registers_[reg] += dbl[0];
+}
+
+
+void Simulator::set_fpu_register(int fpureg, int64_t value) {
+  DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+  FPUregisters_[fpureg] = value;
+}
+
+
+void Simulator::set_fpu_register_word(int fpureg, int32_t value) {
+  // Set ONLY lower 32-bits, leaving upper bits untouched.
+  // TODO(plind): big endian issue.
+  DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+  int32_t *pword = reinterpret_cast<int32_t*>(&FPUregisters_[fpureg]);
+  *pword = value;
+}
+
+
+void Simulator::set_fpu_register_hi_word(int fpureg, int32_t value) {
+  // Set ONLY upper 32-bits, leaving lower bits untouched.
+  // TODO(plind): big endian issue.
+  DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+  int32_t *phiword = (reinterpret_cast<int32_t*>(&FPUregisters_[fpureg])) + 1;
+  *phiword = value;
+}
+
+
+void Simulator::set_fpu_register_float(int fpureg, float value) {
+  DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+  *bit_cast<float*>(&FPUregisters_[fpureg]) = value;
+}
+
+
+void Simulator::set_fpu_register_double(int fpureg, double value) {
+  DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+  *bit_cast<double*>(&FPUregisters_[fpureg]) = value;
+}
+
+
+// Get the register from the architecture state. This function does handle
+// the special case of accessing the PC register.
+int64_t Simulator::get_register(int reg) const {
+  DCHECK((reg >= 0) && (reg < kNumSimuRegisters));
+  if (reg == 0)
+    return 0;
+  else
+    return registers_[reg] + ((reg == pc) ? Instruction::kPCReadOffset : 0);
+}
+
+
+double Simulator::get_double_from_register_pair(int reg) {
+  // TODO(plind): bad ABI stuff, refactor or remove.
+  DCHECK((reg >= 0) && (reg < kNumSimuRegisters) && ((reg % 2) == 0));
+
+  double dm_val = 0.0;
+  // Read the bits from the unsigned integer register_[] array
+  // into the double precision floating point value and return it.
+  char buffer[sizeof(registers_[0])];
+  memcpy(buffer, &registers_[reg], sizeof(registers_[0]));
+  memcpy(&dm_val, buffer, sizeof(registers_[0]));
+  return(dm_val);
+}
+
+
+int64_t Simulator::get_fpu_register(int fpureg) const {
+  DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+  return FPUregisters_[fpureg];
+}
+
+
+int32_t Simulator::get_fpu_register_word(int fpureg) const {
+  DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+  return static_cast<int32_t>(FPUregisters_[fpureg] & 0xffffffff);
+}
+
+
+int32_t Simulator::get_fpu_register_signed_word(int fpureg) const {
+  DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+  return static_cast<int32_t>(FPUregisters_[fpureg] & 0xffffffff);
+}
+
+
+int32_t Simulator::get_fpu_register_hi_word(int fpureg) const {
+  DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+  return static_cast<int32_t>((FPUregisters_[fpureg] >> 32) & 0xffffffff);
+}
+
+
+float Simulator::get_fpu_register_float(int fpureg) const {
+  DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+  return *bit_cast<float*>(const_cast<int64_t*>(&FPUregisters_[fpureg]));
+}
+
+
+double Simulator::get_fpu_register_double(int fpureg) const {
+  DCHECK((fpureg >= 0) && (fpureg < kNumFPURegisters));
+  return *bit_cast<double*>(&FPUregisters_[fpureg]);
+}
+
+
+// Runtime FP routines take up to two double arguments and zero
+// or one integer arguments. All are constructed here,
+// from a0-a3 or f12 and f13 (n64), or f14 (O32).
+void Simulator::GetFpArgs(double* x, double* y, int32_t* z) {
+  if (!IsMipsSoftFloatABI) {
+    const int fparg2 = (kMipsAbi == kN64) ? 13 : 14;
+    *x = get_fpu_register_double(12);
+    *y = get_fpu_register_double(fparg2);
+    *z = get_register(a2);
+  } else {
+  // TODO(plind): bad ABI stuff, refactor or remove.
+    // We use a char buffer to get around the strict-aliasing rules which
+    // otherwise allow the compiler to optimize away the copy.
+    char buffer[sizeof(*x)];
+    int32_t* reg_buffer = reinterpret_cast<int32_t*>(buffer);
+
+    // Registers a0 and a1 -> x.
+    reg_buffer[0] = get_register(a0);
+    reg_buffer[1] = get_register(a1);
+    memcpy(x, buffer, sizeof(buffer));
+    // Registers a2 and a3 -> y.
+    reg_buffer[0] = get_register(a2);
+    reg_buffer[1] = get_register(a3);
+    memcpy(y, buffer, sizeof(buffer));
+    // Register 2 -> z.
+    reg_buffer[0] = get_register(a2);
+    memcpy(z, buffer, sizeof(*z));
+  }
+}
+
+
+// The return value is either in v0/v1 or f0.
+void Simulator::SetFpResult(const double& result) {
+  if (!IsMipsSoftFloatABI) {
+    set_fpu_register_double(0, result);
+  } else {
+    char buffer[2 * sizeof(registers_[0])];
+    int64_t* reg_buffer = reinterpret_cast<int64_t*>(buffer);
+    memcpy(buffer, &result, sizeof(buffer));
+    // Copy result to v0 and v1.
+    set_register(v0, reg_buffer[0]);
+    set_register(v1, reg_buffer[1]);
+  }
+}
+
+
+// Helper functions for setting and testing the FCSR register's bits.
+void Simulator::set_fcsr_bit(uint32_t cc, bool value) {
+  if (value) {
+    FCSR_ |= (1 << cc);
+  } else {
+    FCSR_ &= ~(1 << cc);
+  }
+}
+
+
+bool Simulator::test_fcsr_bit(uint32_t cc) {
+  return FCSR_ & (1 << cc);
+}
+
+
+// Sets the rounding error codes in FCSR based on the result of the rounding.
+// Returns true if the operation was invalid.
+bool Simulator::set_fcsr_round_error(double original, double rounded) {
+  bool ret = false;
+  double max_int32 = std::numeric_limits<int32_t>::max();
+  double min_int32 = std::numeric_limits<int32_t>::min();
+
+  if (!std::isfinite(original) || !std::isfinite(rounded)) {
+    set_fcsr_bit(kFCSRInvalidOpFlagBit, true);
+    ret = true;
+  }
+
+  if (original != rounded) {
+    set_fcsr_bit(kFCSRInexactFlagBit, true);
+  }
+
+  if (rounded < DBL_MIN && rounded > -DBL_MIN && rounded != 0) {
+    set_fcsr_bit(kFCSRUnderflowFlagBit, true);
+    ret = true;
+  }
+
+  if (rounded > max_int32 || rounded < min_int32) {
+    set_fcsr_bit(kFCSROverflowFlagBit, true);
+    // The reference is not really clear but it seems this is required:
+    set_fcsr_bit(kFCSRInvalidOpFlagBit, true);
+    ret = true;
+  }
+
+  return ret;
+}
+
+
+// Sets the rounding error codes in FCSR based on the result of the rounding.
+// Returns true if the operation was invalid.
+bool Simulator::set_fcsr_round64_error(double original, double rounded) {
+  bool ret = false;
+  double max_int64 = std::numeric_limits<int64_t>::max();
+  double min_int64 = std::numeric_limits<int64_t>::min();
+
+  if (!std::isfinite(original) || !std::isfinite(rounded)) {
+    set_fcsr_bit(kFCSRInvalidOpFlagBit, true);
+    ret = true;
+  }
+
+  if (original != rounded) {
+    set_fcsr_bit(kFCSRInexactFlagBit, true);
+  }
+
+  if (rounded < DBL_MIN && rounded > -DBL_MIN && rounded != 0) {
+    set_fcsr_bit(kFCSRUnderflowFlagBit, true);
+    ret = true;
+  }
+
+  if (rounded > max_int64 || rounded < min_int64) {
+    set_fcsr_bit(kFCSROverflowFlagBit, true);
+    // The reference is not really clear but it seems this is required:
+    set_fcsr_bit(kFCSRInvalidOpFlagBit, true);
+    ret = true;
+  }
+
+  return ret;
+}
+
+
+// Raw access to the PC register.
+void Simulator::set_pc(int64_t value) {
+  pc_modified_ = true;
+  registers_[pc] = value;
+}
+
+
+bool Simulator::has_bad_pc() const {
+  return ((registers_[pc] == bad_ra) || (registers_[pc] == end_sim_pc));
+}
+
+
+// Raw access to the PC register without the special adjustment when reading.
+int64_t Simulator::get_pc() const {
+  return registers_[pc];
+}
+
+
+// The MIPS cannot do unaligned reads and writes.  On some MIPS platforms an
+// interrupt is caused.  On others it does a funky rotation thing.  For now we
+// simply disallow unaligned reads, but at some point we may want to move to
+// emulating the rotate behaviour.  Note that simulator runs have the runtime
+// system running directly on the host system and only generated code is
+// executed in the simulator.  Since the host is typically IA32 we will not
+// get the correct MIPS-like behaviour on unaligned accesses.
+
+// TODO(plind): refactor this messy debug code when we do unaligned access.
+void Simulator::DieOrDebug() {
+  if (1) {  // Flag for this was removed.
+    MipsDebugger dbg(this);
+    dbg.Debug();
+  } else {
+    base::OS::Abort();
+  }
+}
+
+
+void Simulator::TraceRegWr(int64_t value) {
+  if (::v8::internal::FLAG_trace_sim) {
+    SNPrintF(trace_buf_, "%016lx", value);
+  }
+}
+
+
+// TODO(plind): consider making icount_ printing a flag option.
+void Simulator::TraceMemRd(int64_t addr, int64_t value) {
+  if (::v8::internal::FLAG_trace_sim) {
+    SNPrintF(trace_buf_, "%016lx <-- [%016lx]    (%ld)",
+             value, addr, icount_);
+  }
+}
+
+
+void Simulator::TraceMemWr(int64_t addr, int64_t value, TraceType t) {
+  if (::v8::internal::FLAG_trace_sim) {
+    switch (t) {
+      case BYTE:
+        SNPrintF(trace_buf_, "               %02x --> [%016lx]",
+                 static_cast<int8_t>(value), addr);
+        break;
+      case HALF:
+        SNPrintF(trace_buf_, "            %04x --> [%016lx]",
+                 static_cast<int16_t>(value), addr);
+        break;
+      case WORD:
+        SNPrintF(trace_buf_, "        %08x --> [%016lx]",
+                 static_cast<int32_t>(value), addr);
+        break;
+      case DWORD:
+        SNPrintF(trace_buf_, "%016lx --> [%016lx]    (%ld)",
+                 value, addr, icount_);
+        break;
+    }
+  }
+}
+
+
+// TODO(plind): sign-extend and zero-extend not implmented properly
+// on all the ReadXX functions, I don't think re-interpret cast does it.
+int32_t Simulator::ReadW(int64_t addr, Instruction* instr) {
+  if (addr >=0 && addr < 0x400) {
+    // This has to be a NULL-dereference, drop into debugger.
+    PrintF("Memory read from bad address: 0x%08lx, pc=0x%08lx\n",
+           addr, reinterpret_cast<intptr_t>(instr));
+    DieOrDebug();
+  }
+  if ((addr & 0x3) == 0) {
+    int32_t* ptr = reinterpret_cast<int32_t*>(addr);
+    TraceMemRd(addr, static_cast<int64_t>(*ptr));
+    return *ptr;
+  }
+  PrintF("Unaligned read at 0x%08lx, pc=0x%08" V8PRIxPTR "\n",
+         addr,
+         reinterpret_cast<intptr_t>(instr));
+  DieOrDebug();
+  return 0;
+}
+
+
+uint32_t Simulator::ReadWU(int64_t addr, Instruction* instr) {
+  if (addr >=0 && addr < 0x400) {
+    // This has to be a NULL-dereference, drop into debugger.
+    PrintF("Memory read from bad address: 0x%08lx, pc=0x%08lx\n",
+           addr, reinterpret_cast<intptr_t>(instr));
+    DieOrDebug();
+  }
+  if ((addr & 0x3) == 0) {
+    uint32_t* ptr = reinterpret_cast<uint32_t*>(addr);
+    TraceMemRd(addr, static_cast<int64_t>(*ptr));
+    return *ptr;
+  }
+  PrintF("Unaligned read at 0x%08lx, pc=0x%08" V8PRIxPTR "\n",
+         addr,
+         reinterpret_cast<intptr_t>(instr));
+  DieOrDebug();
+  return 0;
+}
+
+
+void Simulator::WriteW(int64_t addr, int value, Instruction* instr) {
+  if (addr >= 0 && addr < 0x400) {
+    // This has to be a NULL-dereference, drop into debugger.
+    PrintF("Memory write to bad address: 0x%08lx, pc=0x%08lx\n",
+           addr, reinterpret_cast<intptr_t>(instr));
+    DieOrDebug();
+  }
+  if ((addr & 0x3) == 0) {
+    TraceMemWr(addr, value, WORD);
+    int* ptr = reinterpret_cast<int*>(addr);
+    *ptr = value;
+    return;
+  }
+  PrintF("Unaligned write at 0x%08lx, pc=0x%08" V8PRIxPTR "\n",
+         addr,
+         reinterpret_cast<intptr_t>(instr));
+  DieOrDebug();
+}
+
+
+int64_t Simulator::Read2W(int64_t addr, Instruction* instr) {
+  if (addr >=0 && addr < 0x400) {
+    // This has to be a NULL-dereference, drop into debugger.
+    PrintF("Memory read from bad address: 0x%08lx, pc=0x%08lx\n",
+           addr, reinterpret_cast<intptr_t>(instr));
+    DieOrDebug();
+  }
+  if ((addr & kPointerAlignmentMask) == 0) {
+    int64_t* ptr = reinterpret_cast<int64_t*>(addr);
+    TraceMemRd(addr, *ptr);
+    return *ptr;
+  }
+  PrintF("Unaligned read at 0x%08lx, pc=0x%08" V8PRIxPTR "\n",
+         addr,
+         reinterpret_cast<intptr_t>(instr));
+  DieOrDebug();
+  return 0;
+}
+
+
+void Simulator::Write2W(int64_t addr, int64_t value, Instruction* instr) {
+  if (addr >= 0 && addr < 0x400) {
+    // This has to be a NULL-dereference, drop into debugger.
+    PrintF("Memory write to bad address: 0x%08lx, pc=0x%08lx\n",
+           addr, reinterpret_cast<intptr_t>(instr));
+    DieOrDebug();
+  }
+  if ((addr & kPointerAlignmentMask) == 0) {
+    TraceMemWr(addr, value, DWORD);
+    int64_t* ptr = reinterpret_cast<int64_t*>(addr);
+    *ptr = value;
+    return;
+  }
+  PrintF("Unaligned write at 0x%08lx, pc=0x%08" V8PRIxPTR "\n",
+         addr,
+         reinterpret_cast<intptr_t>(instr));
+  DieOrDebug();
+}
+
+
+double Simulator::ReadD(int64_t addr, Instruction* instr) {
+  if ((addr & kDoubleAlignmentMask) == 0) {
+    double* ptr = reinterpret_cast<double*>(addr);
+    return *ptr;
+  }
+  PrintF("Unaligned (double) read at 0x%08lx, pc=0x%08" V8PRIxPTR "\n",
+         addr,
+         reinterpret_cast<intptr_t>(instr));
+  base::OS::Abort();
+  return 0;
+}
+
+
+void Simulator::WriteD(int64_t addr, double value, Instruction* instr) {
+  if ((addr & kDoubleAlignmentMask) == 0) {
+    double* ptr = reinterpret_cast<double*>(addr);
+    *ptr = value;
+    return;
+  }
+  PrintF("Unaligned (double) write at 0x%08lx, pc=0x%08" V8PRIxPTR "\n",
+         addr,
+         reinterpret_cast<intptr_t>(instr));
+  DieOrDebug();
+}
+
+
+uint16_t Simulator::ReadHU(int64_t addr, Instruction* instr) {
+  if ((addr & 1) == 0) {
+    uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
+    TraceMemRd(addr, static_cast<int64_t>(*ptr));
+    return *ptr;
+  }
+  PrintF("Unaligned unsigned halfword read at 0x%08lx, pc=0x%08" V8PRIxPTR "\n",
+         addr,
+         reinterpret_cast<intptr_t>(instr));
+  DieOrDebug();
+  return 0;
+}
+
+
+int16_t Simulator::ReadH(int64_t addr, Instruction* instr) {
+  if ((addr & 1) == 0) {
+    int16_t* ptr = reinterpret_cast<int16_t*>(addr);
+    TraceMemRd(addr, static_cast<int64_t>(*ptr));
+    return *ptr;
+  }
+  PrintF("Unaligned signed halfword read at 0x%08lx, pc=0x%08" V8PRIxPTR "\n",
+         addr,
+         reinterpret_cast<intptr_t>(instr));
+  DieOrDebug();
+  return 0;
+}
+
+
+void Simulator::WriteH(int64_t addr, uint16_t value, Instruction* instr) {
+  if ((addr & 1) == 0) {
+    TraceMemWr(addr, value, HALF);
+    uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
+    *ptr = value;
+    return;
+  }
+  PrintF(
+      "Unaligned unsigned halfword write at 0x%08lx, pc=0x%08" V8PRIxPTR "\n",
+      addr,
+      reinterpret_cast<intptr_t>(instr));
+  DieOrDebug();
+}
+
+
+void Simulator::WriteH(int64_t addr, int16_t value, Instruction* instr) {
+  if ((addr & 1) == 0) {
+    TraceMemWr(addr, value, HALF);
+    int16_t* ptr = reinterpret_cast<int16_t*>(addr);
+    *ptr = value;
+    return;
+  }
+  PrintF("Unaligned halfword write at 0x%08lx, pc=0x%08" V8PRIxPTR "\n",
+         addr,
+         reinterpret_cast<intptr_t>(instr));
+  DieOrDebug();
+}
+
+
+uint32_t Simulator::ReadBU(int64_t addr) {
+  uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
+  TraceMemRd(addr, static_cast<int64_t>(*ptr));
+  return *ptr & 0xff;
+}
+
+
+int32_t Simulator::ReadB(int64_t addr) {
+  int8_t* ptr = reinterpret_cast<int8_t*>(addr);
+  TraceMemRd(addr, static_cast<int64_t>(*ptr));
+  return *ptr;
+}
+
+
+void Simulator::WriteB(int64_t addr, uint8_t value) {
+  TraceMemWr(addr, value, BYTE);
+  uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
+  *ptr = value;
+}
+
+
+void Simulator::WriteB(int64_t addr, int8_t value) {
+  TraceMemWr(addr, value, BYTE);
+  int8_t* ptr = reinterpret_cast<int8_t*>(addr);
+  *ptr = value;
+}
+
+
+// Returns the limit of the stack area to enable checking for stack overflows.
+uintptr_t Simulator::StackLimit() const {
+  // Leave a safety margin of 1024 bytes to prevent overrunning the stack when
+  // pushing values.
+  return reinterpret_cast<uintptr_t>(stack_) + 1024;
+}
+
+
+// Unsupported instructions use Format to print an error and stop execution.
+void Simulator::Format(Instruction* instr, const char* format) {
+  PrintF("Simulator found unsupported instruction:\n 0x%08lx: %s\n",
+         reinterpret_cast<intptr_t>(instr), format);
+  UNIMPLEMENTED_MIPS();
+}
+
+
+// Calls into the V8 runtime are based on this very simple interface.
+// Note: To be able to return two values from some calls the code in runtime.cc
+// uses the ObjectPair which is essentially two 32-bit values stuffed into a
+// 64-bit value. With the code below we assume that all runtime calls return
+// 64 bits of result. If they don't, the v1 result register contains a bogus
+// value, which is fine because it is caller-saved.
+
+struct ObjectPair {
+  Object* x;
+  Object* y;
+};
+
+typedef ObjectPair (*SimulatorRuntimeCall)(int64_t arg0,
+                                        int64_t arg1,
+                                        int64_t arg2,
+                                        int64_t arg3,
+                                        int64_t arg4,
+                                        int64_t arg5);
+
+
+// These prototypes handle the four types of FP calls.
+typedef int64_t (*SimulatorRuntimeCompareCall)(double darg0, double darg1);
+typedef double (*SimulatorRuntimeFPFPCall)(double darg0, double darg1);
+typedef double (*SimulatorRuntimeFPCall)(double darg0);
+typedef double (*SimulatorRuntimeFPIntCall)(double darg0, int32_t arg0);
+
+// This signature supports direct call in to API function native callback
+// (refer to InvocationCallback in v8.h).
+typedef void (*SimulatorRuntimeDirectApiCall)(int64_t arg0);
+typedef void (*SimulatorRuntimeProfilingApiCall)(int64_t arg0, void* arg1);
+
+// This signature supports direct call to accessor getter callback.
+typedef void (*SimulatorRuntimeDirectGetterCall)(int64_t arg0, int64_t arg1);
+typedef void (*SimulatorRuntimeProfilingGetterCall)(
+    int64_t arg0, int64_t arg1, void* arg2);
+
+// Software interrupt instructions are used by the simulator to call into the
+// C-based V8 runtime. They are also used for debugging with simulator.
+void Simulator::SoftwareInterrupt(Instruction* instr) {
+  // There are several instructions that could get us here,
+  // the break_ instruction, or several variants of traps. All
+  // Are "SPECIAL" class opcode, and are distinuished by function.
+  int32_t func = instr->FunctionFieldRaw();
+  uint32_t code = (func == BREAK) ? instr->Bits(25, 6) : -1;
+  // We first check if we met a call_rt_redirected.
+  if (instr->InstructionBits() == rtCallRedirInstr) {
+    Redirection* redirection = Redirection::FromSwiInstruction(instr);
+    int64_t arg0 = get_register(a0);
+    int64_t arg1 = get_register(a1);
+    int64_t arg2 = get_register(a2);
+    int64_t arg3 = get_register(a3);
+    int64_t arg4, arg5;
+
+    if (kMipsAbi == kN64) {
+      arg4 = get_register(a4);  // Abi n64 register a4.
+      arg5 = get_register(a5);  // Abi n64 register a5.
+    } else {  // Abi O32.
+      int64_t* stack_pointer = reinterpret_cast<int64_t*>(get_register(sp));
+      // Args 4 and 5 are on the stack after the reserved space for args 0..3.
+      arg4 = stack_pointer[4];
+      arg5 = stack_pointer[5];
+    }
+    bool fp_call =
+         (redirection->type() == ExternalReference::BUILTIN_FP_FP_CALL) ||
+         (redirection->type() == ExternalReference::BUILTIN_COMPARE_CALL) ||
+         (redirection->type() == ExternalReference::BUILTIN_FP_CALL) ||
+         (redirection->type() == ExternalReference::BUILTIN_FP_INT_CALL);
+
+    if (!IsMipsSoftFloatABI) {
+      // With the hard floating point calling convention, double
+      // arguments are passed in FPU registers. Fetch the arguments
+      // from there and call the builtin using soft floating point
+      // convention.
+      switch (redirection->type()) {
+      case ExternalReference::BUILTIN_FP_FP_CALL:
+      case ExternalReference::BUILTIN_COMPARE_CALL:
+        arg0 = get_fpu_register(f12);
+        arg1 = get_fpu_register(f13);
+        arg2 = get_fpu_register(f14);
+        arg3 = get_fpu_register(f15);
+        break;
+      case ExternalReference::BUILTIN_FP_CALL:
+        arg0 = get_fpu_register(f12);
+        arg1 = get_fpu_register(f13);
+        break;
+      case ExternalReference::BUILTIN_FP_INT_CALL:
+        arg0 = get_fpu_register(f12);
+        arg1 = get_fpu_register(f13);
+        arg2 = get_register(a2);
+        break;
+      default:
+        break;
+      }
+    }
+
+    // This is dodgy but it works because the C entry stubs are never moved.
+    // See comment in codegen-arm.cc and bug 1242173.
+    int64_t saved_ra = get_register(ra);
+
+    intptr_t external =
+          reinterpret_cast<intptr_t>(redirection->external_function());
+
+    // Based on CpuFeatures::IsSupported(FPU), Mips will use either hardware
+    // FPU, or gcc soft-float routines. Hardware FPU is simulated in this
+    // simulator. Soft-float has additional abstraction of ExternalReference,
+    // to support serialization.
+    if (fp_call) {
+      double dval0, dval1;  // one or two double parameters
+      int32_t ival;         // zero or one integer parameters
+      int64_t iresult = 0;  // integer return value
+      double dresult = 0;   // double return value
+      GetFpArgs(&dval0, &dval1, &ival);
+      SimulatorRuntimeCall generic_target =
+          reinterpret_cast<SimulatorRuntimeCall>(external);
+      if (::v8::internal::FLAG_trace_sim) {
+        switch (redirection->type()) {
+          case ExternalReference::BUILTIN_FP_FP_CALL:
+          case ExternalReference::BUILTIN_COMPARE_CALL:
+            PrintF("Call to host function at %p with args %f, %f",
+                   FUNCTION_ADDR(generic_target), dval0, dval1);
+            break;
+          case ExternalReference::BUILTIN_FP_CALL:
+            PrintF("Call to host function at %p with arg %f",
+                FUNCTION_ADDR(generic_target), dval0);
+            break;
+          case ExternalReference::BUILTIN_FP_INT_CALL:
+            PrintF("Call to host function at %p with args %f, %d",
+                   FUNCTION_ADDR(generic_target), dval0, ival);
+            break;
+          default:
+            UNREACHABLE();
+            break;
+        }
+      }
+      switch (redirection->type()) {
+      case ExternalReference::BUILTIN_COMPARE_CALL: {
+        SimulatorRuntimeCompareCall target =
+          reinterpret_cast<SimulatorRuntimeCompareCall>(external);
+        iresult = target(dval0, dval1);
+        set_register(v0, static_cast<int64_t>(iresult));
+      //  set_register(v1, static_cast<int64_t>(iresult >> 32));
+        break;
+      }
+      case ExternalReference::BUILTIN_FP_FP_CALL: {
+        SimulatorRuntimeFPFPCall target =
+          reinterpret_cast<SimulatorRuntimeFPFPCall>(external);
+        dresult = target(dval0, dval1);
+        SetFpResult(dresult);
+        break;
+      }
+      case ExternalReference::BUILTIN_FP_CALL: {
+        SimulatorRuntimeFPCall target =
+          reinterpret_cast<SimulatorRuntimeFPCall>(external);
+        dresult = target(dval0);
+        SetFpResult(dresult);
+        break;
+      }
+      case ExternalReference::BUILTIN_FP_INT_CALL: {
+        SimulatorRuntimeFPIntCall target =
+          reinterpret_cast<SimulatorRuntimeFPIntCall>(external);
+        dresult = target(dval0, ival);
+        SetFpResult(dresult);
+        break;
+      }
+      default:
+        UNREACHABLE();
+        break;
+      }
+      if (::v8::internal::FLAG_trace_sim) {
+        switch (redirection->type()) {
+        case ExternalReference::BUILTIN_COMPARE_CALL:
+          PrintF("Returned %08x\n", static_cast<int32_t>(iresult));
+          break;
+        case ExternalReference::BUILTIN_FP_FP_CALL:
+        case ExternalReference::BUILTIN_FP_CALL:
+        case ExternalReference::BUILTIN_FP_INT_CALL:
+          PrintF("Returned %f\n", dresult);
+          break;
+        default:
+          UNREACHABLE();
+          break;
+        }
+      }
+    } else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
+      if (::v8::internal::FLAG_trace_sim) {
+        PrintF("Call to host function at %p args %08lx\n",
+            reinterpret_cast<void*>(external), arg0);
+      }
+      SimulatorRuntimeDirectApiCall target =
+          reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
+      target(arg0);
+    } else if (
+        redirection->type() == ExternalReference::PROFILING_API_CALL) {
+      if (::v8::internal::FLAG_trace_sim) {
+        PrintF("Call to host function at %p args %08lx %08lx\n",
+            reinterpret_cast<void*>(external), arg0, arg1);
+      }
+      SimulatorRuntimeProfilingApiCall target =
+          reinterpret_cast<SimulatorRuntimeProfilingApiCall>(external);
+      target(arg0, Redirection::ReverseRedirection(arg1));
+    } else if (
+        redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
+      if (::v8::internal::FLAG_trace_sim) {
+        PrintF("Call to host function at %p args %08lx %08lx\n",
+            reinterpret_cast<void*>(external), arg0, arg1);
+      }
+      SimulatorRuntimeDirectGetterCall target =
+          reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
+      target(arg0, arg1);
+    } else if (
+        redirection->type() == ExternalReference::PROFILING_GETTER_CALL) {
+      if (::v8::internal::FLAG_trace_sim) {
+        PrintF("Call to host function at %p args %08lx %08lx %08lx\n",
+            reinterpret_cast<void*>(external), arg0, arg1, arg2);
+      }
+      SimulatorRuntimeProfilingGetterCall target =
+          reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(external);
+      target(arg0, arg1, Redirection::ReverseRedirection(arg2));
+    } else {
+      SimulatorRuntimeCall target =
+                  reinterpret_cast<SimulatorRuntimeCall>(external);
+      if (::v8::internal::FLAG_trace_sim) {
+        PrintF(
+            "Call to host function at %p "
+            "args %08lx, %08lx, %08lx, %08lx, %08lx, %08lx\n",
+            FUNCTION_ADDR(target),
+            arg0,
+            arg1,
+            arg2,
+            arg3,
+            arg4,
+            arg5);
+      }
+      // int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5);
+      // set_register(v0, static_cast<int32_t>(result));
+      // set_register(v1, static_cast<int32_t>(result >> 32));
+      ObjectPair result = target(arg0, arg1, arg2, arg3, arg4, arg5);
+      set_register(v0, (int64_t)(result.x));
+      set_register(v1, (int64_t)(result.y));
+    }
+     if (::v8::internal::FLAG_trace_sim) {
+      PrintF("Returned %08lx : %08lx\n", get_register(v1), get_register(v0));
+    }
+    set_register(ra, saved_ra);
+    set_pc(get_register(ra));
+
+  } else if (func == BREAK && code <= kMaxStopCode) {
+    if (IsWatchpoint(code)) {
+      PrintWatchpoint(code);
+    } else {
+      IncreaseStopCounter(code);
+      HandleStop(code, instr);
+    }
+  } else {
+    // All remaining break_ codes, and all traps are handled here.
+    MipsDebugger dbg(this);
+    dbg.Debug();
+  }
+}
+
+
+// Stop helper functions.
+bool Simulator::IsWatchpoint(uint64_t code) {
+  return (code <= kMaxWatchpointCode);
+}
+
+
+void Simulator::PrintWatchpoint(uint64_t code) {
+  MipsDebugger dbg(this);
+  ++break_count_;
+  PrintF("\n---- break %ld marker: %3d  (instr count: %8ld) ----------"
+         "----------------------------------",
+         code, break_count_, icount_);
+  dbg.PrintAllRegs();  // Print registers and continue running.
+}
+
+
+void Simulator::HandleStop(uint64_t code, Instruction* instr) {
+  // Stop if it is enabled, otherwise go on jumping over the stop
+  // and the message address.
+  if (IsEnabledStop(code)) {
+    MipsDebugger dbg(this);
+    dbg.Stop(instr);
+  } else {
+    set_pc(get_pc() + 2 * Instruction::kInstrSize);
+  }
+}
+
+
+bool Simulator::IsStopInstruction(Instruction* instr) {
+  int32_t func = instr->FunctionFieldRaw();
+  uint32_t code = static_cast<uint32_t>(instr->Bits(25, 6));
+  return (func == BREAK) && code > kMaxWatchpointCode && code <= kMaxStopCode;
+}
+
+
+bool Simulator::IsEnabledStop(uint64_t code) {
+  DCHECK(code <= kMaxStopCode);
+  DCHECK(code > kMaxWatchpointCode);
+  return !(watched_stops_[code].count & kStopDisabledBit);
+}
+
+
+void Simulator::EnableStop(uint64_t code) {
+  if (!IsEnabledStop(code)) {
+    watched_stops_[code].count &= ~kStopDisabledBit;
+  }
+}
+
+
+void Simulator::DisableStop(uint64_t code) {
+  if (IsEnabledStop(code)) {
+    watched_stops_[code].count |= kStopDisabledBit;
+  }
+}
+
+
+void Simulator::IncreaseStopCounter(uint64_t code) {
+  DCHECK(code <= kMaxStopCode);
+  if ((watched_stops_[code].count & ~(1 << 31)) == 0x7fffffff) {
+    PrintF("Stop counter for code %ld has overflowed.\n"
+           "Enabling this code and reseting the counter to 0.\n", code);
+    watched_stops_[code].count = 0;
+    EnableStop(code);
+  } else {
+    watched_stops_[code].count++;
+  }
+}
+
+
+// Print a stop status.
+void Simulator::PrintStopInfo(uint64_t code) {
+  if (code <= kMaxWatchpointCode) {
+    PrintF("That is a watchpoint, not a stop.\n");
+    return;
+  } else if (code > kMaxStopCode) {
+    PrintF("Code too large, only %u stops can be used\n", kMaxStopCode + 1);
+    return;
+  }
+  const char* state = IsEnabledStop(code) ? "Enabled" : "Disabled";
+  int32_t count = watched_stops_[code].count & ~kStopDisabledBit;
+  // Don't print the state of unused breakpoints.
+  if (count != 0) {
+    if (watched_stops_[code].desc) {
+      PrintF("stop %ld - 0x%lx: \t%s, \tcounter = %i, \t%s\n",
+             code, code, state, count, watched_stops_[code].desc);
+    } else {
+      PrintF("stop %ld - 0x%lx: \t%s, \tcounter = %i\n",
+             code, code, state, count);
+    }
+  }
+}
+
+
+void Simulator::SignalExceptions() {
+  for (int i = 1; i < kNumExceptions; i++) {
+    if (exceptions[i] != 0) {
+      V8_Fatal(__FILE__, __LINE__, "Error: Exception %i raised.", i);
+    }
+  }
+}
+
+
+// Handle execution based on instruction types.
+
+void Simulator::ConfigureTypeRegister(Instruction* instr,
+                                      int64_t* alu_out,
+                                      int64_t* i64hilo,
+                                      uint64_t* u64hilo,
+                                      int64_t* next_pc,
+                                      int64_t* return_addr_reg,
+                                      bool* do_interrupt,
+                                      int64_t* i128resultH,
+                                      int64_t* i128resultL) {
+  // Every local variable declared here needs to be const.
+  // This is to make sure that changed values are sent back to
+  // DecodeTypeRegister correctly.
+
+  // Instruction fields.
+  const Opcode   op     = instr->OpcodeFieldRaw();
+  const int64_t  rs_reg = instr->RsValue();
+  const int64_t  rs     = get_register(rs_reg);
+  const uint64_t rs_u   = static_cast<uint64_t>(rs);
+  const int64_t  rt_reg = instr->RtValue();
+  const int64_t  rt     = get_register(rt_reg);
+  const uint64_t rt_u   = static_cast<uint64_t>(rt);
+  const int64_t  rd_reg = instr->RdValue();
+  const uint64_t sa     = instr->SaValue();
+
+  const int32_t  fs_reg = instr->FsValue();
+
+
+  // ---------- Configuration.
+  switch (op) {
+    case COP1:    // Coprocessor instructions.
+      switch (instr->RsFieldRaw()) {
+        case CFC1:
+          // At the moment only FCSR is supported.
+          DCHECK(fs_reg == kFCSRRegister);
+          *alu_out = FCSR_;
+          break;
+        case MFC1:
+          *alu_out = static_cast<int64_t>(get_fpu_register_word(fs_reg));
+          break;
+        case DMFC1:
+          *alu_out = get_fpu_register(fs_reg);
+          break;
+        case MFHC1:
+          *alu_out = get_fpu_register_hi_word(fs_reg);
+          break;
+        case CTC1:
+        case MTC1:
+        case DMTC1:
+        case MTHC1:
+        case S:
+        case D:
+        case W:
+        case L:
+        case PS:
+          // Do everything in the execution step.
+          break;
+        default:
+        // BC1 BC1EQZ BC1NEZ handled in DecodeTypeImmed, should never come here.
+           UNREACHABLE();
+      }
+      break;
+    case COP1X:
+      break;
+    case SPECIAL:
+      switch (instr->FunctionFieldRaw()) {
+        case JR:
+        case JALR:
+          *next_pc = get_register(instr->RsValue());
+          *return_addr_reg = instr->RdValue();
+          break;
+        case SLL:
+          *alu_out = (int32_t)rt << sa;
+          break;
+        case DSLL:
+          *alu_out = rt << sa;
+          break;
+        case DSLL32:
+          *alu_out = rt << sa << 32;
+          break;
+        case SRL:
+          if (rs_reg == 0) {
+            // Regular logical right shift of a word by a fixed number of
+            // bits instruction. RS field is always equal to 0.
+            *alu_out = (uint32_t)rt_u >> sa;
+          } else {
+            // Logical right-rotate of a word by a fixed number of bits. This
+            // is special case of SRL instruction, added in MIPS32 Release 2.
+            // RS field is equal to 00001.
+            *alu_out = ((uint32_t)rt_u >> sa) | ((uint32_t)rt_u << (32 - sa));
+          }
+          break;
+        case DSRL:
+          *alu_out = rt_u >> sa;
+          break;
+        case DSRL32:
+          *alu_out = rt_u >> sa >> 32;
+          break;
+        case SRA:
+          *alu_out = (int32_t)rt >> sa;
+          break;
+        case DSRA:
+          *alu_out = rt >> sa;
+          break;
+        case DSRA32:
+          *alu_out = rt >> sa >> 32;
+          break;
+        case SLLV:
+          *alu_out = (int32_t)rt << rs;
+          break;
+        case DSLLV:
+          *alu_out = rt << rs;
+          break;
+        case SRLV:
+          if (sa == 0) {
+            // Regular logical right-shift of a word by a variable number of
+            // bits instruction. SA field is always equal to 0.
+            *alu_out = (uint32_t)rt_u >> rs;
+          } else {
+            // Logical right-rotate of a word by a variable number of bits.
+            // This is special case od SRLV instruction, added in MIPS32
+            // Release 2. SA field is equal to 00001.
+            *alu_out =
+                ((uint32_t)rt_u >> rs_u) | ((uint32_t)rt_u << (32 - rs_u));
+          }
+          break;
+        case DSRLV:
+          if (sa == 0) {
+            // Regular logical right-shift of a word by a variable number of
+            // bits instruction. SA field is always equal to 0.
+            *alu_out = rt_u >> rs;
+          } else {
+            // Logical right-rotate of a word by a variable number of bits.
+            // This is special case od SRLV instruction, added in MIPS32
+            // Release 2. SA field is equal to 00001.
+            *alu_out = (rt_u >> rs_u) | (rt_u << (32 - rs_u));
+          }
+          break;
+        case SRAV:
+          *alu_out = (int32_t)rt >> rs;
+          break;
+        case DSRAV:
+          *alu_out = rt >> rs;
+          break;
+        case MFHI:  // MFHI == CLZ on R6.
+          if (kArchVariant != kMips64r6) {
+            DCHECK(instr->SaValue() == 0);
+            *alu_out = get_register(HI);
+          } else {
+            // MIPS spec: If no bits were set in GPR rs, the result written to
+            // GPR rd is 32.
+            DCHECK(instr->SaValue() == 1);
+            *alu_out = base::bits::CountLeadingZeros32(rs_u);
+          }
+          break;
+        case MFLO:
+          *alu_out = get_register(LO);
+          break;
+        case MULT:  // MULT == D_MUL_MUH.
+          // TODO(plind) - Unify MULT/DMULT with single set of 64-bit HI/Lo
+          // regs.
+          // TODO(plind) - make the 32-bit MULT ops conform to spec regarding
+          //   checking of 32-bit input values, and un-define operations of HW.
+          *i64hilo = static_cast<int64_t>((int32_t)rs) *
+              static_cast<int64_t>((int32_t)rt);
+          break;
+        case MULTU:
+          *u64hilo = static_cast<uint64_t>(rs_u) * static_cast<uint64_t>(rt_u);
+          break;
+        case DMULT:  // DMULT == D_MUL_MUH.
+          if (kArchVariant != kMips64r6) {
+            *i128resultH = MultiplyHighSigned(rs, rt);
+            *i128resultL = rs * rt;
+          } else {
+            switch (instr->SaValue()) {
+              case MUL_OP:
+                *i128resultL = rs * rt;
+                break;
+              case MUH_OP:
+                *i128resultH = MultiplyHighSigned(rs, rt);
+                break;
+              default:
+                UNIMPLEMENTED_MIPS();
+                break;
+            }
+          }
+          break;
+        case DMULTU:
+          UNIMPLEMENTED_MIPS();
+          break;
+        case ADD:
+        case DADD:
+          if (HaveSameSign(rs, rt)) {
+            if (rs > 0) {
+              exceptions[kIntegerOverflow] = rs > (Registers::kMaxValue - rt);
+            } else if (rs < 0) {
+              exceptions[kIntegerUnderflow] = rs < (Registers::kMinValue - rt);
+            }
+          }
+          *alu_out = rs + rt;
+          break;
+        case ADDU: {
+            int32_t alu32_out = rs + rt;
+            // Sign-extend result of 32bit operation into 64bit register.
+            *alu_out = static_cast<int64_t>(alu32_out);
+          }
+          break;
+        case DADDU:
+          *alu_out = rs + rt;
+          break;
+        case SUB:
+        case DSUB:
+          if (!HaveSameSign(rs, rt)) {
+            if (rs > 0) {
+              exceptions[kIntegerOverflow] = rs > (Registers::kMaxValue + rt);
+            } else if (rs < 0) {
+              exceptions[kIntegerUnderflow] = rs < (Registers::kMinValue + rt);
+            }
+          }
+          *alu_out = rs - rt;
+          break;
+        case SUBU: {
+            int32_t alu32_out = rs - rt;
+            // Sign-extend result of 32bit operation into 64bit register.
+            *alu_out = static_cast<int64_t>(alu32_out);
+          }
+          break;
+        case DSUBU:
+          *alu_out = rs - rt;
+          break;
+        case AND:
+          *alu_out = rs & rt;
+          break;
+        case OR:
+          *alu_out = rs | rt;
+          break;
+        case XOR:
+          *alu_out = rs ^ rt;
+          break;
+        case NOR:
+          *alu_out = ~(rs | rt);
+          break;
+        case SLT:
+          *alu_out = rs < rt ? 1 : 0;
+          break;
+        case SLTU:
+          *alu_out = rs_u < rt_u ? 1 : 0;
+          break;
+        // Break and trap instructions.
+        case BREAK:
+
+          *do_interrupt = true;
+          break;
+        case TGE:
+          *do_interrupt = rs >= rt;
+          break;
+        case TGEU:
+          *do_interrupt = rs_u >= rt_u;
+          break;
+        case TLT:
+          *do_interrupt = rs < rt;
+          break;
+        case TLTU:
+          *do_interrupt = rs_u < rt_u;
+          break;
+        case TEQ:
+          *do_interrupt = rs == rt;
+          break;
+        case TNE:
+          *do_interrupt = rs != rt;
+          break;
+        case MOVN:
+        case MOVZ:
+        case MOVCI:
+          // No action taken on decode.
+          break;
+        case DIV:
+        case DIVU:
+        case DDIV:
+        case DDIVU:
+          // div and divu never raise exceptions.
+          break;
+        default:
+          UNREACHABLE();
+      }
+      break;
+    case SPECIAL2:
+      switch (instr->FunctionFieldRaw()) {
+        case MUL:
+          // Only the lower 32 bits are kept.
+          *alu_out = (int32_t)rs_u * (int32_t)rt_u;
+          break;
+        case CLZ:
+          // MIPS32 spec: If no bits were set in GPR rs, the result written to
+          // GPR rd is 32.
+          *alu_out = base::bits::CountLeadingZeros32(rs_u);
+          break;
+        default:
+          UNREACHABLE();
+      }
+      break;
+    case SPECIAL3:
+      switch (instr->FunctionFieldRaw()) {
+        case INS: {   // Mips32r2 instruction.
+          // Interpret rd field as 5-bit msb of insert.
+          uint16_t msb = rd_reg;
+          // Interpret sa field as 5-bit lsb of insert.
+          uint16_t lsb = sa;
+          uint16_t size = msb - lsb + 1;
+          uint32_t mask = (1 << size) - 1;
+          *alu_out = (rt_u & ~(mask << lsb)) | ((rs_u & mask) << lsb);
+          break;
+        }
+        case EXT: {   // Mips32r2 instruction.
+          // Interpret rd field as 5-bit msb of extract.
+          uint16_t msb = rd_reg;
+          // Interpret sa field as 5-bit lsb of extract.
+          uint16_t lsb = sa;
+          uint16_t size = msb + 1;
+          uint32_t mask = (1 << size) - 1;
+          *alu_out = (rs_u & (mask << lsb)) >> lsb;
+          break;
+        }
+        default:
+          UNREACHABLE();
+      }
+      break;
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void Simulator::DecodeTypeRegister(Instruction* instr) {
+  // Instruction fields.
+  const Opcode   op     = instr->OpcodeFieldRaw();
+  const int64_t  rs_reg = instr->RsValue();
+  const int64_t  rs     = get_register(rs_reg);
+  const uint64_t rs_u   = static_cast<uint32_t>(rs);
+  const int64_t  rt_reg = instr->RtValue();
+  const int64_t  rt     = get_register(rt_reg);
+  const uint64_t rt_u   = static_cast<uint32_t>(rt);
+  const int64_t  rd_reg = instr->RdValue();
+
+  const int32_t  fr_reg = instr->FrValue();
+  const int32_t  fs_reg = instr->FsValue();
+  const int32_t  ft_reg = instr->FtValue();
+  const int64_t  fd_reg = instr->FdValue();
+  int64_t  i64hilo = 0;
+  uint64_t u64hilo = 0;
+
+  // ALU output.
+  // It should not be used as is. Instructions using it should always
+  // initialize it first.
+  int64_t alu_out = 0x12345678;
+
+  // For break and trap instructions.
+  bool do_interrupt = false;
+
+  // For jr and jalr.
+  // Get current pc.
+  int64_t current_pc = get_pc();
+  // Next pc
+  int64_t next_pc = 0;
+  int64_t return_addr_reg = 31;
+
+  int64_t i128resultH;
+  int64_t i128resultL;
+
+  // Set up the variables if needed before executing the instruction.
+  ConfigureTypeRegister(instr,
+                        &alu_out,
+                        &i64hilo,
+                        &u64hilo,
+                        &next_pc,
+                        &return_addr_reg,
+                        &do_interrupt,
+                        &i128resultH,
+                        &i128resultL);
+
+  // ---------- Raise exceptions triggered.
+  SignalExceptions();
+
+  // ---------- Execution.
+  switch (op) {
+    case COP1:
+      switch (instr->RsFieldRaw()) {
+        case BC1:   // Branch on coprocessor condition.
+        case BC1EQZ:
+        case BC1NEZ:
+          UNREACHABLE();
+          break;
+        case CFC1:
+          set_register(rt_reg, alu_out);
+          break;
+        case MFC1:
+        case DMFC1:
+        case MFHC1:
+          set_register(rt_reg, alu_out);
+          break;
+        case CTC1:
+          // At the moment only FCSR is supported.
+          DCHECK(fs_reg == kFCSRRegister);
+          FCSR_ = registers_[rt_reg];
+          break;
+        case MTC1:
+          // Hardware writes upper 32-bits to zero on mtc1.
+          set_fpu_register_hi_word(fs_reg, 0);
+          set_fpu_register_word(fs_reg, registers_[rt_reg]);
+          break;
+        case DMTC1:
+          set_fpu_register(fs_reg, registers_[rt_reg]);
+          break;
+        case MTHC1:
+          set_fpu_register_hi_word(fs_reg, registers_[rt_reg]);
+          break;
+        case S:
+          float f;
+          switch (instr->FunctionFieldRaw()) {
+            case CVT_D_S:
+              f = get_fpu_register_float(fs_reg);
+              set_fpu_register_double(fd_reg, static_cast<double>(f));
+              break;
+            default:
+            // CVT_W_S CVT_L_S TRUNC_W_S ROUND_W_S ROUND_L_S FLOOR_W_S FLOOR_L_S
+            // CEIL_W_S CEIL_L_S CVT_PS_S are unimplemented.
+              UNREACHABLE();
+          }
+          break;
+        case D:
+          double ft, fs;
+          uint32_t cc, fcsr_cc;
+          int64_t  i64;
+          fs = get_fpu_register_double(fs_reg);
+          ft = get_fpu_register_double(ft_reg);
+          cc = instr->FCccValue();
+          fcsr_cc = get_fcsr_condition_bit(cc);
+          switch (instr->FunctionFieldRaw()) {
+            case ADD_D:
+              set_fpu_register_double(fd_reg, fs + ft);
+              break;
+            case SUB_D:
+              set_fpu_register_double(fd_reg, fs - ft);
+              break;
+            case MUL_D:
+              set_fpu_register_double(fd_reg, fs * ft);
+              break;
+            case DIV_D:
+              set_fpu_register_double(fd_reg, fs / ft);
+              break;
+            case ABS_D:
+              set_fpu_register_double(fd_reg, fabs(fs));
+              break;
+            case MOV_D:
+              set_fpu_register_double(fd_reg, fs);
+              break;
+            case NEG_D:
+              set_fpu_register_double(fd_reg, -fs);
+              break;
+            case SQRT_D:
+              set_fpu_register_double(fd_reg, sqrt(fs));
+              break;
+            case C_UN_D:
+              set_fcsr_bit(fcsr_cc, std::isnan(fs) || std::isnan(ft));
+              break;
+            case C_EQ_D:
+              set_fcsr_bit(fcsr_cc, (fs == ft));
+              break;
+            case C_UEQ_D:
+              set_fcsr_bit(fcsr_cc,
+                           (fs == ft) || (std::isnan(fs) || std::isnan(ft)));
+              break;
+            case C_OLT_D:
+              set_fcsr_bit(fcsr_cc, (fs < ft));
+              break;
+            case C_ULT_D:
+              set_fcsr_bit(fcsr_cc,
+                           (fs < ft) || (std::isnan(fs) || std::isnan(ft)));
+              break;
+            case C_OLE_D:
+              set_fcsr_bit(fcsr_cc, (fs <= ft));
+              break;
+            case C_ULE_D:
+              set_fcsr_bit(fcsr_cc,
+                           (fs <= ft) || (std::isnan(fs) || std::isnan(ft)));
+              break;
+            case CVT_W_D:   // Convert double to word.
+              // Rounding modes are not yet supported.
+              DCHECK((FCSR_ & 3) == 0);
+              // In rounding mode 0 it should behave like ROUND.
+              // No break.
+            case ROUND_W_D:  // Round double to word (round half to even).
+              {
+                double rounded = std::floor(fs + 0.5);
+                int32_t result = static_cast<int32_t>(rounded);
+                if ((result & 1) != 0 && result - fs == 0.5) {
+                  // If the number is halfway between two integers,
+                  // round to the even one.
+                  result--;
+                }
+                set_fpu_register_word(fd_reg, result);
+                if (set_fcsr_round_error(fs, rounded)) {
+                  set_fpu_register(fd_reg, kFPUInvalidResult);
+                }
+              }
+              break;
+            case TRUNC_W_D:  // Truncate double to word (round towards 0).
+              {
+                double rounded = trunc(fs);
+                int32_t result = static_cast<int32_t>(rounded);
+                set_fpu_register_word(fd_reg, result);
+                if (set_fcsr_round_error(fs, rounded)) {
+                  set_fpu_register(fd_reg, kFPUInvalidResult);
+                }
+              }
+              break;
+            case FLOOR_W_D:  // Round double to word towards negative infinity.
+              {
+                double rounded = std::floor(fs);
+                int32_t result = static_cast<int32_t>(rounded);
+                set_fpu_register_word(fd_reg, result);
+                if (set_fcsr_round_error(fs, rounded)) {
+                  set_fpu_register(fd_reg, kFPUInvalidResult);
+                }
+              }
+              break;
+            case CEIL_W_D:  // Round double to word towards positive infinity.
+              {
+                double rounded = std::ceil(fs);
+                int32_t result = static_cast<int32_t>(rounded);
+                set_fpu_register_word(fd_reg, result);
+                if (set_fcsr_round_error(fs, rounded)) {
+                  set_fpu_register(fd_reg, kFPUInvalidResult);
+                }
+              }
+              break;
+            case CVT_S_D:  // Convert double to float (single).
+              set_fpu_register_float(fd_reg, static_cast<float>(fs));
+              break;
+            case CVT_L_D:   // Mips64r2: Truncate double to 64-bit long-word.
+              // Rounding modes are not yet supported.
+              DCHECK((FCSR_ & 3) == 0);
+              // In rounding mode 0 it should behave like ROUND.
+              // No break.
+            case ROUND_L_D: {  // Mips64r2 instruction.
+              // check error cases
+              double rounded = fs > 0 ? floor(fs + 0.5) : ceil(fs - 0.5);
+              int64_t result = static_cast<int64_t>(rounded);
+              set_fpu_register(fd_reg, result);
+              if (set_fcsr_round64_error(fs, rounded)) {
+                set_fpu_register(fd_reg, kFPU64InvalidResult);
+              }
+              break;
+            }
+            case TRUNC_L_D: {  // Mips64r2 instruction.
+              double rounded = trunc(fs);
+              int64_t result = static_cast<int64_t>(rounded);
+              set_fpu_register(fd_reg, result);
+              if (set_fcsr_round64_error(fs, rounded)) {
+                set_fpu_register(fd_reg, kFPU64InvalidResult);
+              }
+              break;
+            }
+            case FLOOR_L_D: {  // Mips64r2 instruction.
+              double rounded = floor(fs);
+              int64_t result = static_cast<int64_t>(rounded);
+              set_fpu_register(fd_reg, result);
+              if (set_fcsr_round64_error(fs, rounded)) {
+                set_fpu_register(fd_reg, kFPU64InvalidResult);
+              }
+              break;
+            }
+            case CEIL_L_D: {  // Mips64r2 instruction.
+              double rounded = ceil(fs);
+              int64_t result = static_cast<int64_t>(rounded);
+              set_fpu_register(fd_reg, result);
+              if (set_fcsr_round64_error(fs, rounded)) {
+                set_fpu_register(fd_reg, kFPU64InvalidResult);
+              }
+              break;
+            }
+            case C_F_D:
+              UNIMPLEMENTED_MIPS();
+              break;
+            default:
+              UNREACHABLE();
+          }
+          break;
+        case W:
+          switch (instr->FunctionFieldRaw()) {
+            case CVT_S_W:   // Convert word to float (single).
+              alu_out = get_fpu_register_signed_word(fs_reg);
+              set_fpu_register_float(fd_reg, static_cast<float>(alu_out));
+              break;
+            case CVT_D_W:   // Convert word to double.
+              alu_out = get_fpu_register_signed_word(fs_reg);
+              set_fpu_register_double(fd_reg, static_cast<double>(alu_out));
+              break;
+            default:  // Mips64r6 CMP.S instructions unimplemented.
+              UNREACHABLE();
+          }
+          break;
+        case L:
+          fs = get_fpu_register_double(fs_reg);
+          ft = get_fpu_register_double(ft_reg);
+          switch (instr->FunctionFieldRaw()) {
+            case CVT_D_L:  // Mips32r2 instruction.
+              i64 = get_fpu_register(fs_reg);
+              set_fpu_register_double(fd_reg, static_cast<double>(i64));
+              break;
+            case CVT_S_L:
+              UNIMPLEMENTED_MIPS();
+              break;
+            case CMP_AF:  // Mips64r6 CMP.D instructions.
+              UNIMPLEMENTED_MIPS();
+              break;
+            case CMP_UN:
+              if (std::isnan(fs) || std::isnan(ft)) {
+                set_fpu_register(fd_reg, -1);
+              } else {
+                set_fpu_register(fd_reg, 0);
+              }
+              break;
+            case CMP_EQ:
+              if (fs == ft) {
+                set_fpu_register(fd_reg, -1);
+              } else {
+                set_fpu_register(fd_reg, 0);
+              }
+              break;
+            case CMP_UEQ:
+              if ((fs == ft) || (std::isnan(fs) || std::isnan(ft))) {
+                set_fpu_register(fd_reg, -1);
+              } else {
+                set_fpu_register(fd_reg, 0);
+              }
+              break;
+            case CMP_LT:
+              if (fs < ft) {
+                set_fpu_register(fd_reg, -1);
+              } else {
+                set_fpu_register(fd_reg, 0);
+              }
+              break;
+            case CMP_ULT:
+              if ((fs < ft) || (std::isnan(fs) || std::isnan(ft))) {
+                set_fpu_register(fd_reg, -1);
+              } else {
+                set_fpu_register(fd_reg, 0);
+              }
+              break;
+            case CMP_LE:
+              if (fs <= ft) {
+                set_fpu_register(fd_reg, -1);
+              } else {
+                set_fpu_register(fd_reg, 0);
+              }
+              break;
+            case CMP_ULE:
+              if ((fs <= ft) || (std::isnan(fs) || std::isnan(ft))) {
+                set_fpu_register(fd_reg, -1);
+              } else {
+                set_fpu_register(fd_reg, 0);
+              }
+              break;
+            default:  // CMP_OR CMP_UNE CMP_NE UNIMPLEMENTED
+              UNREACHABLE();
+          }
+          break;
+        default:
+          UNREACHABLE();
+      }
+      break;
+    case COP1X:
+      switch (instr->FunctionFieldRaw()) {
+        case MADD_D:
+          double fr, ft, fs;
+          fr = get_fpu_register_double(fr_reg);
+          fs = get_fpu_register_double(fs_reg);
+          ft = get_fpu_register_double(ft_reg);
+          set_fpu_register_double(fd_reg, fs * ft + fr);
+          break;
+        default:
+          UNREACHABLE();
+      }
+      break;
+    case SPECIAL:
+      switch (instr->FunctionFieldRaw()) {
+        case JR: {
+          Instruction* branch_delay_instr = reinterpret_cast<Instruction*>(
+              current_pc+Instruction::kInstrSize);
+          BranchDelayInstructionDecode(branch_delay_instr);
+          set_pc(next_pc);
+          pc_modified_ = true;
+          break;
+        }
+        case JALR: {
+          Instruction* branch_delay_instr = reinterpret_cast<Instruction*>(
+              current_pc+Instruction::kInstrSize);
+          BranchDelayInstructionDecode(branch_delay_instr);
+          set_register(return_addr_reg,
+                       current_pc + 2 * Instruction::kInstrSize);
+          set_pc(next_pc);
+          pc_modified_ = true;
+          break;
+        }
+        // Instructions using HI and LO registers.
+        case MULT:
+          if (kArchVariant != kMips64r6) {
+            set_register(LO, static_cast<int32_t>(i64hilo & 0xffffffff));
+            set_register(HI, static_cast<int32_t>(i64hilo >> 32));
+          } else {
+            switch (instr->SaValue()) {
+              case MUL_OP:
+                set_register(rd_reg,
+                    static_cast<int32_t>(i64hilo & 0xffffffff));
+                break;
+              case MUH_OP:
+                set_register(rd_reg, static_cast<int32_t>(i64hilo >> 32));
+                break;
+              default:
+                UNIMPLEMENTED_MIPS();
+                break;
+            }
+          }
+          break;
+        case MULTU:
+          set_register(LO, static_cast<int32_t>(u64hilo & 0xffffffff));
+          set_register(HI, static_cast<int32_t>(u64hilo >> 32));
+          break;
+        case DMULT:  // DMULT == D_MUL_MUH.
+          if (kArchVariant != kMips64r6) {
+            set_register(LO, static_cast<int64_t>(i128resultL));
+            set_register(HI, static_cast<int64_t>(i128resultH));
+          } else {
+            switch (instr->SaValue()) {
+              case MUL_OP:
+                set_register(rd_reg, static_cast<int64_t>(i128resultL));
+                break;
+              case MUH_OP:
+                set_register(rd_reg, static_cast<int64_t>(i128resultH));
+                break;
+              default:
+                UNIMPLEMENTED_MIPS();
+                break;
+            }
+          }
+          break;
+        case DMULTU:
+          UNIMPLEMENTED_MIPS();
+          break;
+        case DSLL:
+          set_register(rd_reg, alu_out);
+          break;
+        case DIV:
+        case DDIV:
+          switch (kArchVariant) {
+            case kMips64r2:
+              // Divide by zero and overflow was not checked in the
+              // configuration step - div and divu do not raise exceptions. On
+              // division by 0 the result will be UNPREDICTABLE. On overflow
+              // (INT_MIN/-1), return INT_MIN which is what the hardware does.
+              if (rs == INT_MIN && rt == -1) {
+                set_register(LO, INT_MIN);
+                set_register(HI, 0);
+              } else if (rt != 0) {
+                set_register(LO, rs / rt);
+                set_register(HI, rs % rt);
+              }
+              break;
+            case kMips64r6:
+              switch (instr->SaValue()) {
+                case DIV_OP:
+                  if (rs == INT_MIN && rt == -1) {
+                    set_register(rd_reg, INT_MIN);
+                  } else if (rt != 0) {
+                    set_register(rd_reg, rs / rt);
+                  }
+                  break;
+                case MOD_OP:
+                  if (rs == INT_MIN && rt == -1) {
+                    set_register(rd_reg, 0);
+                  } else if (rt != 0) {
+                    set_register(rd_reg, rs % rt);
+                  }
+                  break;
+                default:
+                  UNIMPLEMENTED_MIPS();
+                  break;
+              }
+              break;
+            default:
+              break;
+          }
+          break;
+        case DIVU:
+          if (rt_u != 0) {
+            set_register(LO, rs_u / rt_u);
+            set_register(HI, rs_u % rt_u);
+          }
+          break;
+        // Break and trap instructions.
+        case BREAK:
+        case TGE:
+        case TGEU:
+        case TLT:
+        case TLTU:
+        case TEQ:
+        case TNE:
+          if (do_interrupt) {
+            SoftwareInterrupt(instr);
+          }
+          break;
+        // Conditional moves.
+        case MOVN:
+          if (rt) {
+            set_register(rd_reg, rs);
+            TraceRegWr(rs);
+          }
+          break;
+        case MOVCI: {
+          uint32_t cc = instr->FBccValue();
+          uint32_t fcsr_cc = get_fcsr_condition_bit(cc);
+          if (instr->Bit(16)) {  // Read Tf bit.
+            if (test_fcsr_bit(fcsr_cc)) set_register(rd_reg, rs);
+          } else {
+            if (!test_fcsr_bit(fcsr_cc)) set_register(rd_reg, rs);
+          }
+          break;
+        }
+        case MOVZ:
+          if (!rt) {
+            set_register(rd_reg, rs);
+            TraceRegWr(rs);
+          }
+          break;
+        default:  // For other special opcodes we do the default operation.
+          set_register(rd_reg, alu_out);
+          TraceRegWr(alu_out);
+      }
+      break;
+    case SPECIAL2:
+      switch (instr->FunctionFieldRaw()) {
+        case MUL:
+          set_register(rd_reg, alu_out);
+          TraceRegWr(alu_out);
+          // HI and LO are UNPREDICTABLE after the operation.
+          set_register(LO, Unpredictable);
+          set_register(HI, Unpredictable);
+          break;
+        default:  // For other special2 opcodes we do the default operation.
+          set_register(rd_reg, alu_out);
+      }
+      break;
+    case SPECIAL3:
+      switch (instr->FunctionFieldRaw()) {
+        case INS:
+          // Ins instr leaves result in Rt, rather than Rd.
+          set_register(rt_reg, alu_out);
+          TraceRegWr(alu_out);
+          break;
+        case EXT:
+          // Ext instr leaves result in Rt, rather than Rd.
+          set_register(rt_reg, alu_out);
+          TraceRegWr(alu_out);
+          break;
+        default:
+          UNREACHABLE();
+      }
+      break;
+    // Unimplemented opcodes raised an error in the configuration step before,
+    // so we can use the default here to set the destination register in common
+    // cases.
+    default:
+      set_register(rd_reg, alu_out);
+      TraceRegWr(alu_out);
+  }
+}
+
+
+// Type 2: instructions using a 16 bytes immediate. (e.g. addi, beq).
+void Simulator::DecodeTypeImmediate(Instruction* instr) {
+  // Instruction fields.
+  Opcode   op     = instr->OpcodeFieldRaw();
+  int64_t  rs     = get_register(instr->RsValue());
+  uint64_t rs_u   = static_cast<uint64_t>(rs);
+  int64_t  rt_reg = instr->RtValue();  // Destination register.
+  int64_t  rt     = get_register(rt_reg);
+  int16_t  imm16  = instr->Imm16Value();
+
+  int32_t  ft_reg = instr->FtValue();  // Destination register.
+  int64_t  ft     = get_fpu_register(ft_reg);
+
+  // Zero extended immediate.
+  uint32_t  oe_imm16 = 0xffff & imm16;
+  // Sign extended immediate.
+  int32_t   se_imm16 = imm16;
+
+  // Get current pc.
+  int64_t current_pc = get_pc();
+  // Next pc.
+  int64_t next_pc = bad_ra;
+
+  // Used for conditional branch instructions.
+  bool do_branch = false;
+  bool execute_branch_delay_instruction = false;
+
+  // Used for arithmetic instructions.
+  int64_t alu_out = 0;
+  // Floating point.
+  double fp_out = 0.0;
+  uint32_t cc, cc_value, fcsr_cc;
+
+  // Used for memory instructions.
+  int64_t addr = 0x0;
+  // Value to be written in memory.
+  uint64_t mem_value = 0x0;
+  // Alignment for 32-bit integers used in LWL, LWR, etc.
+  const int kInt32AlignmentMask = sizeof(uint32_t) - 1;
+
+  // ---------- Configuration (and execution for REGIMM).
+  switch (op) {
+    // ------------- COP1. Coprocessor instructions.
+    case COP1:
+      switch (instr->RsFieldRaw()) {
+        case BC1:   // Branch on coprocessor condition.
+          cc = instr->FBccValue();
+          fcsr_cc = get_fcsr_condition_bit(cc);
+          cc_value = test_fcsr_bit(fcsr_cc);
+          do_branch = (instr->FBtrueValue()) ? cc_value : !cc_value;
+          execute_branch_delay_instruction = true;
+          // Set next_pc.
+          if (do_branch) {
+            next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
+          } else {
+            next_pc = current_pc + kBranchReturnOffset;
+          }
+          break;
+        case BC1EQZ:
+          do_branch = (ft & 0x1) ? false : true;
+          execute_branch_delay_instruction = true;
+          // Set next_pc.
+          if (do_branch) {
+            next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
+          } else {
+            next_pc = current_pc + kBranchReturnOffset;
+          }
+          break;
+        case BC1NEZ:
+          do_branch = (ft & 0x1) ? true : false;
+          execute_branch_delay_instruction = true;
+          // Set next_pc.
+          if (do_branch) {
+            next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
+          } else {
+            next_pc = current_pc + kBranchReturnOffset;
+          }
+          break;
+        default:
+          UNREACHABLE();
+      }
+      break;
+    // ------------- REGIMM class.
+    case REGIMM:
+      switch (instr->RtFieldRaw()) {
+        case BLTZ:
+          do_branch = (rs  < 0);
+          break;
+        case BLTZAL:
+          do_branch = rs  < 0;
+          break;
+        case BGEZ:
+          do_branch = rs >= 0;
+          break;
+        case BGEZAL:
+          do_branch = rs >= 0;
+          break;
+        default:
+          UNREACHABLE();
+      }
+      switch (instr->RtFieldRaw()) {
+        case BLTZ:
+        case BLTZAL:
+        case BGEZ:
+        case BGEZAL:
+          // Branch instructions common part.
+          execute_branch_delay_instruction = true;
+          // Set next_pc.
+          if (do_branch) {
+            next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
+            if (instr->IsLinkingInstruction()) {
+              set_register(31, current_pc + kBranchReturnOffset);
+            }
+          } else {
+            next_pc = current_pc + kBranchReturnOffset;
+          }
+        default:
+          break;
+        }
+    break;  // case REGIMM.
+    // ------------- Branch instructions.
+    // When comparing to zero, the encoding of rt field is always 0, so we don't
+    // need to replace rt with zero.
+    case BEQ:
+      do_branch = (rs == rt);
+      break;
+    case BNE:
+      do_branch = rs != rt;
+      break;
+    case BLEZ:
+      do_branch = rs <= 0;
+      break;
+    case BGTZ:
+      do_branch = rs  > 0;
+      break;
+    // ------------- Arithmetic instructions.
+    case ADDI:
+    case DADDI:
+      if (HaveSameSign(rs, se_imm16)) {
+        if (rs > 0) {
+          exceptions[kIntegerOverflow] = rs > (Registers::kMaxValue - se_imm16);
+        } else if (rs < 0) {
+          exceptions[kIntegerUnderflow] =
+              rs < (Registers::kMinValue - se_imm16);
+        }
+      }
+      alu_out = rs + se_imm16;
+      break;
+    case ADDIU: {
+        int32_t alu32_out = rs + se_imm16;
+        // Sign-extend result of 32bit operation into 64bit register.
+        alu_out = static_cast<int64_t>(alu32_out);
+      }
+      break;
+    case DADDIU:
+      alu_out = rs + se_imm16;
+      break;
+    case SLTI:
+      alu_out = (rs < se_imm16) ? 1 : 0;
+      break;
+    case SLTIU:
+      alu_out = (rs_u < static_cast<uint32_t>(se_imm16)) ? 1 : 0;
+      break;
+    case ANDI:
+        alu_out = rs & oe_imm16;
+      break;
+    case ORI:
+        alu_out = rs | oe_imm16;
+      break;
+    case XORI:
+        alu_out = rs ^ oe_imm16;
+      break;
+    case LUI: {
+        int32_t alu32_out = (oe_imm16 << 16);
+        // Sign-extend result of 32bit operation into 64bit register.
+        alu_out = static_cast<int64_t>(alu32_out);
+      }
+      break;
+    // ------------- Memory instructions.
+    case LB:
+      addr = rs + se_imm16;
+      alu_out = ReadB(addr);
+      break;
+    case LH:
+      addr = rs + se_imm16;
+      alu_out = ReadH(addr, instr);
+      break;
+    case LWL: {
+      // al_offset is offset of the effective address within an aligned word.
+      uint8_t al_offset = (rs + se_imm16) & kInt32AlignmentMask;
+      uint8_t byte_shift = kInt32AlignmentMask - al_offset;
+      uint32_t mask = (1 << byte_shift * 8) - 1;
+      addr = rs + se_imm16 - al_offset;
+      alu_out = ReadW(addr, instr);
+      alu_out <<= byte_shift * 8;
+      alu_out |= rt & mask;
+      break;
+    }
+    case LW:
+      addr = rs + se_imm16;
+      alu_out = ReadW(addr, instr);
+      break;
+    case LWU:
+      addr = rs + se_imm16;
+      alu_out = ReadWU(addr, instr);
+      break;
+    case LD:
+      addr = rs + se_imm16;
+      alu_out = Read2W(addr, instr);
+      break;
+    case LBU:
+      addr = rs + se_imm16;
+      alu_out = ReadBU(addr);
+      break;
+    case LHU:
+      addr = rs + se_imm16;
+      alu_out = ReadHU(addr, instr);
+      break;
+    case LWR: {
+      // al_offset is offset of the effective address within an aligned word.
+      uint8_t al_offset = (rs + se_imm16) & kInt32AlignmentMask;
+      uint8_t byte_shift = kInt32AlignmentMask - al_offset;
+      uint32_t mask = al_offset ? (~0 << (byte_shift + 1) * 8) : 0;
+      addr = rs + se_imm16 - al_offset;
+      alu_out = ReadW(addr, instr);
+      alu_out = static_cast<uint32_t> (alu_out) >> al_offset * 8;
+      alu_out |= rt & mask;
+      break;
+    }
+    case SB:
+      addr = rs + se_imm16;
+      break;
+    case SH:
+      addr = rs + se_imm16;
+      break;
+    case SWL: {
+      uint8_t al_offset = (rs + se_imm16) & kInt32AlignmentMask;
+      uint8_t byte_shift = kInt32AlignmentMask - al_offset;
+      uint32_t mask = byte_shift ? (~0 << (al_offset + 1) * 8) : 0;
+      addr = rs + se_imm16 - al_offset;
+      mem_value = ReadW(addr, instr) & mask;
+      mem_value |= static_cast<uint32_t>(rt) >> byte_shift * 8;
+      break;
+    }
+    case SW:
+    case SD:
+      addr = rs + se_imm16;
+      break;
+    case SWR: {
+      uint8_t al_offset = (rs + se_imm16) & kInt32AlignmentMask;
+      uint32_t mask = (1 << al_offset * 8) - 1;
+      addr = rs + se_imm16 - al_offset;
+      mem_value = ReadW(addr, instr);
+      mem_value = (rt << al_offset * 8) | (mem_value & mask);
+      break;
+    }
+    case LWC1:
+      addr = rs + se_imm16;
+      alu_out = ReadW(addr, instr);
+      break;
+    case LDC1:
+      addr = rs + se_imm16;
+      fp_out = ReadD(addr, instr);
+      break;
+    case SWC1:
+    case SDC1:
+      addr = rs + se_imm16;
+      break;
+    default:
+      UNREACHABLE();
+  }
+
+  // ---------- Raise exceptions triggered.
+  SignalExceptions();
+
+  // ---------- Execution.
+  switch (op) {
+    // ------------- Branch instructions.
+    case BEQ:
+    case BNE:
+    case BLEZ:
+    case BGTZ:
+      // Branch instructions common part.
+      execute_branch_delay_instruction = true;
+      // Set next_pc.
+      if (do_branch) {
+        next_pc = current_pc + (imm16 << 2) + Instruction::kInstrSize;
+        if (instr->IsLinkingInstruction()) {
+          set_register(31, current_pc + 2* Instruction::kInstrSize);
+        }
+      } else {
+        next_pc = current_pc + 2 * Instruction::kInstrSize;
+      }
+      break;
+    // ------------- Arithmetic instructions.
+    case ADDI:
+    case DADDI:
+    case ADDIU:
+    case DADDIU:
+    case SLTI:
+    case SLTIU:
+    case ANDI:
+    case ORI:
+    case XORI:
+    case LUI:
+      set_register(rt_reg, alu_out);
+      TraceRegWr(alu_out);
+      break;
+    // ------------- Memory instructions.
+    case LB:
+    case LH:
+    case LWL:
+    case LW:
+    case LWU:
+    case LD:
+    case LBU:
+    case LHU:
+    case LWR:
+      set_register(rt_reg, alu_out);
+      break;
+    case SB:
+      WriteB(addr, static_cast<int8_t>(rt));
+      break;
+    case SH:
+      WriteH(addr, static_cast<uint16_t>(rt), instr);
+      break;
+    case SWL:
+      WriteW(addr, mem_value, instr);
+      break;
+    case SW:
+      WriteW(addr, rt, instr);
+      break;
+    case SD:
+      Write2W(addr, rt, instr);
+      break;
+    case SWR:
+      WriteW(addr, mem_value, instr);
+      break;
+    case LWC1:
+      set_fpu_register(ft_reg, kFPUInvalidResult);  // Trash upper 32 bits.
+      set_fpu_register_word(ft_reg, static_cast<int32_t>(alu_out));
+      break;
+    case LDC1:
+      set_fpu_register_double(ft_reg, fp_out);
+      break;
+    case SWC1:
+      addr = rs + se_imm16;
+      WriteW(addr, get_fpu_register(ft_reg), instr);
+      break;
+    case SDC1:
+      addr = rs + se_imm16;
+      WriteD(addr, get_fpu_register_double(ft_reg), instr);
+      break;
+    default:
+      break;
+  }
+
+
+  if (execute_branch_delay_instruction) {
+    // Execute branch delay slot
+    // We don't check for end_sim_pc. First it should not be met as the current
+    // pc is valid. Secondly a jump should always execute its branch delay slot.
+    Instruction* branch_delay_instr =
+      reinterpret_cast<Instruction*>(current_pc+Instruction::kInstrSize);
+    BranchDelayInstructionDecode(branch_delay_instr);
+  }
+
+  // If needed update pc after the branch delay execution.
+  if (next_pc != bad_ra) {
+    set_pc(next_pc);
+  }
+}
+
+
+// Type 3: instructions using a 26 bytes immediate. (e.g. j, jal).
+void Simulator::DecodeTypeJump(Instruction* instr) {
+  // Get current pc.
+  int32_t current_pc = get_pc();
+  // Get unchanged bits of pc.
+  int32_t pc_high_bits = current_pc & 0xf0000000;
+  // Next pc.
+  int32_t next_pc = pc_high_bits | (instr->Imm26Value() << 2);
+
+  // Execute branch delay slot.
+  // We don't check for end_sim_pc. First it should not be met as the current pc
+  // is valid. Secondly a jump should always execute its branch delay slot.
+  Instruction* branch_delay_instr =
+      reinterpret_cast<Instruction*>(current_pc + Instruction::kInstrSize);
+  BranchDelayInstructionDecode(branch_delay_instr);
+
+  // Update pc and ra if necessary.
+  // Do this after the branch delay execution.
+  if (instr->IsLinkingInstruction()) {
+    set_register(31, current_pc + 2 * Instruction::kInstrSize);
+  }
+  set_pc(next_pc);
+  pc_modified_ = true;
+}
+
+
+// Executes the current instruction.
+void Simulator::InstructionDecode(Instruction* instr) {
+  if (v8::internal::FLAG_check_icache) {
+    CheckICache(isolate_->simulator_i_cache(), instr);
+  }
+  pc_modified_ = false;
+
+  v8::internal::EmbeddedVector<char, 256> buffer;
+
+  if (::v8::internal::FLAG_trace_sim) {
+    SNPrintF(trace_buf_, " ");
+    disasm::NameConverter converter;
+    disasm::Disassembler dasm(converter);
+    // Use a reasonably large buffer.
+    dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(instr));
+  }
+
+  switch (instr->InstructionType()) {
+    case Instruction::kRegisterType:
+      DecodeTypeRegister(instr);
+      break;
+    case Instruction::kImmediateType:
+      DecodeTypeImmediate(instr);
+      break;
+    case Instruction::kJumpType:
+      DecodeTypeJump(instr);
+      break;
+    default:
+      UNSUPPORTED();
+  }
+
+  if (::v8::internal::FLAG_trace_sim) {
+    PrintF("  0x%08lx  %-44s   %s\n", reinterpret_cast<intptr_t>(instr),
+        buffer.start(), trace_buf_.start());
+  }
+
+  if (!pc_modified_) {
+    set_register(pc, reinterpret_cast<int64_t>(instr) +
+                 Instruction::kInstrSize);
+  }
+}
+
+
+
+void Simulator::Execute() {
+  // Get the PC to simulate. Cannot use the accessor here as we need the
+  // raw PC value and not the one used as input to arithmetic instructions.
+  int64_t program_counter = get_pc();
+  if (::v8::internal::FLAG_stop_sim_at == 0) {
+    // Fast version of the dispatch loop without checking whether the simulator
+    // should be stopping at a particular executed instruction.
+    while (program_counter != end_sim_pc) {
+      Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
+      icount_++;
+      InstructionDecode(instr);
+      program_counter = get_pc();
+    }
+  } else {
+    // FLAG_stop_sim_at is at the non-default value. Stop in the debugger when
+    // we reach the particular instuction count.
+    while (program_counter != end_sim_pc) {
+      Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
+      icount_++;
+      if (icount_ == static_cast<int64_t>(::v8::internal::FLAG_stop_sim_at)) {
+        MipsDebugger dbg(this);
+        dbg.Debug();
+      } else {
+        InstructionDecode(instr);
+      }
+      program_counter = get_pc();
+    }
+  }
+}
+
+
+void Simulator::CallInternal(byte* entry) {
+  // Prepare to execute the code at entry.
+  set_register(pc, reinterpret_cast<int64_t>(entry));
+  // Put down marker for end of simulation. The simulator will stop simulation
+  // when the PC reaches this value. By saving the "end simulation" value into
+  // the LR the simulation stops when returning to this call point.
+  set_register(ra, end_sim_pc);
+
+  // Remember the values of callee-saved registers.
+  // The code below assumes that r9 is not used as sb (static base) in
+  // simulator code and therefore is regarded as a callee-saved register.
+  int64_t s0_val = get_register(s0);
+  int64_t s1_val = get_register(s1);
+  int64_t s2_val = get_register(s2);
+  int64_t s3_val = get_register(s3);
+  int64_t s4_val = get_register(s4);
+  int64_t s5_val = get_register(s5);
+  int64_t s6_val = get_register(s6);
+  int64_t s7_val = get_register(s7);
+  int64_t gp_val = get_register(gp);
+  int64_t sp_val = get_register(sp);
+  int64_t fp_val = get_register(fp);
+
+  // Set up the callee-saved registers with a known value. To be able to check
+  // that they are preserved properly across JS execution.
+  int64_t callee_saved_value = icount_;
+  set_register(s0, callee_saved_value);
+  set_register(s1, callee_saved_value);
+  set_register(s2, callee_saved_value);
+  set_register(s3, callee_saved_value);
+  set_register(s4, callee_saved_value);
+  set_register(s5, callee_saved_value);
+  set_register(s6, callee_saved_value);
+  set_register(s7, callee_saved_value);
+  set_register(gp, callee_saved_value);
+  set_register(fp, callee_saved_value);
+
+  // Start the simulation.
+  Execute();
+
+  // Check that the callee-saved registers have been preserved.
+  CHECK_EQ(callee_saved_value, get_register(s0));
+  CHECK_EQ(callee_saved_value, get_register(s1));
+  CHECK_EQ(callee_saved_value, get_register(s2));
+  CHECK_EQ(callee_saved_value, get_register(s3));
+  CHECK_EQ(callee_saved_value, get_register(s4));
+  CHECK_EQ(callee_saved_value, get_register(s5));
+  CHECK_EQ(callee_saved_value, get_register(s6));
+  CHECK_EQ(callee_saved_value, get_register(s7));
+  CHECK_EQ(callee_saved_value, get_register(gp));
+  CHECK_EQ(callee_saved_value, get_register(fp));
+
+  // Restore callee-saved registers with the original value.
+  set_register(s0, s0_val);
+  set_register(s1, s1_val);
+  set_register(s2, s2_val);
+  set_register(s3, s3_val);
+  set_register(s4, s4_val);
+  set_register(s5, s5_val);
+  set_register(s6, s6_val);
+  set_register(s7, s7_val);
+  set_register(gp, gp_val);
+  set_register(sp, sp_val);
+  set_register(fp, fp_val);
+}
+
+
+int64_t Simulator::Call(byte* entry, int argument_count, ...) {
+  const int kRegisterPassedArguments = (kMipsAbi == kN64) ? 8 : 4;
+  va_list parameters;
+  va_start(parameters, argument_count);
+  // Set up arguments.
+
+  // First four arguments passed in registers in both ABI's.
+  DCHECK(argument_count >= 4);
+  set_register(a0, va_arg(parameters, int64_t));
+  set_register(a1, va_arg(parameters, int64_t));
+  set_register(a2, va_arg(parameters, int64_t));
+  set_register(a3, va_arg(parameters, int64_t));
+
+  if (kMipsAbi == kN64) {
+    // Up to eight arguments passed in registers in N64 ABI.
+    // TODO(plind): N64 ABI calls these regs a4 - a7. Clarify this.
+    if (argument_count >= 5) set_register(a4, va_arg(parameters, int64_t));
+    if (argument_count >= 6) set_register(a5, va_arg(parameters, int64_t));
+    if (argument_count >= 7) set_register(a6, va_arg(parameters, int64_t));
+    if (argument_count >= 8) set_register(a7, va_arg(parameters, int64_t));
+  }
+
+  // Remaining arguments passed on stack.
+  int64_t original_stack = get_register(sp);
+  // Compute position of stack on entry to generated code.
+  int stack_args_count = (argument_count > kRegisterPassedArguments) ?
+                         (argument_count - kRegisterPassedArguments) : 0;
+  int stack_args_size = stack_args_count * sizeof(int64_t) + kCArgsSlotsSize;
+  int64_t entry_stack = original_stack - stack_args_size;
+
+  if (base::OS::ActivationFrameAlignment() != 0) {
+    entry_stack &= -base::OS::ActivationFrameAlignment();
+  }
+  // Store remaining arguments on stack, from low to high memory.
+  intptr_t* stack_argument = reinterpret_cast<intptr_t*>(entry_stack);
+  for (int i = kRegisterPassedArguments; i < argument_count; i++) {
+    int stack_index = i - kRegisterPassedArguments + kCArgSlotCount;
+    stack_argument[stack_index] = va_arg(parameters, int64_t);
+  }
+  va_end(parameters);
+  set_register(sp, entry_stack);
+
+  CallInternal(entry);
+
+  // Pop stack passed arguments.
+  CHECK_EQ(entry_stack, get_register(sp));
+  set_register(sp, original_stack);
+
+  int64_t result = get_register(v0);
+  return result;
+}
+
+
+double Simulator::CallFP(byte* entry, double d0, double d1) {
+  if (!IsMipsSoftFloatABI) {
+    const FPURegister fparg2 = (kMipsAbi == kN64) ? f13 : f14;
+    set_fpu_register_double(f12, d0);
+    set_fpu_register_double(fparg2, d1);
+  } else {
+    int buffer[2];
+    DCHECK(sizeof(buffer[0]) * 2 == sizeof(d0));
+    memcpy(buffer, &d0, sizeof(d0));
+    set_dw_register(a0, buffer);
+    memcpy(buffer, &d1, sizeof(d1));
+    set_dw_register(a2, buffer);
+  }
+  CallInternal(entry);
+  if (!IsMipsSoftFloatABI) {
+    return get_fpu_register_double(f0);
+  } else {
+    return get_double_from_register_pair(v0);
+  }
+}
+
+
+uintptr_t Simulator::PushAddress(uintptr_t address) {
+  int64_t new_sp = get_register(sp) - sizeof(uintptr_t);
+  uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);
+  *stack_slot = address;
+  set_register(sp, new_sp);
+  return new_sp;
+}
+
+
+uintptr_t Simulator::PopAddress() {
+  int64_t current_sp = get_register(sp);
+  uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp);
+  uintptr_t address = *stack_slot;
+  set_register(sp, current_sp + sizeof(uintptr_t));
+  return address;
+}
+
+
+#undef UNSUPPORTED
+
+} }  // namespace v8::internal
+
+#endif  // USE_SIMULATOR
+
+#endif  // V8_TARGET_ARCH_MIPS64
diff --git a/src/mips64/simulator-mips64.h b/src/mips64/simulator-mips64.h
new file mode 100644
index 0000000..5241554
--- /dev/null
+++ b/src/mips64/simulator-mips64.h
@@ -0,0 +1,479 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+// Declares a Simulator for MIPS instructions if we are not generating a native
+// MIPS binary. This Simulator allows us to run and debug MIPS code generation
+// on regular desktop machines.
+// V8 calls into generated code by "calling" the CALL_GENERATED_CODE macro,
+// which will start execution in the Simulator or forwards to the real entry
+// on a MIPS HW platform.
+
+#ifndef V8_MIPS_SIMULATOR_MIPS_H_
+#define V8_MIPS_SIMULATOR_MIPS_H_
+
+#include "src/allocation.h"
+#include "src/mips64/constants-mips64.h"
+
+#if !defined(USE_SIMULATOR)
+// Running without a simulator on a native mips platform.
+
+namespace v8 {
+namespace internal {
+
+// When running without a simulator we call the entry directly.
+#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
+  entry(p0, p1, p2, p3, p4)
+
+
+// Call the generated regexp code directly. The code at the entry address
+// should act as a function matching the type arm_regexp_matcher.
+// The fifth (or ninth) argument is a dummy that reserves the space used for
+// the return address added by the ExitFrame in native calls.
+#ifdef MIPS_ABI_N64
+typedef int (*mips_regexp_matcher)(String* input,
+                                   int64_t start_offset,
+                                   const byte* input_start,
+                                   const byte* input_end,
+                                   int* output,
+                                   int64_t output_size,
+                                   Address stack_base,
+                                   int64_t direct_call,
+                                   void* return_address,
+                                   Isolate* isolate);
+
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
+  (FUNCTION_CAST<mips_regexp_matcher>(entry)( \
+      p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8))
+
+#else  // O32 Abi.
+
+typedef int (*mips_regexp_matcher)(String* input,
+                                   int32_t start_offset,
+                                   const byte* input_start,
+                                   const byte* input_end,
+                                   void* return_address,
+                                   int* output,
+                                   int32_t output_size,
+                                   Address stack_base,
+                                   int32_t direct_call,
+                                   Isolate* isolate);
+
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
+  (FUNCTION_CAST<mips_regexp_matcher>(entry)( \
+      p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8))
+
+#endif  // MIPS_ABI_N64
+
+
+// The stack limit beyond which we will throw stack overflow errors in
+// generated code. Because generated code on mips uses the C stack, we
+// just use the C stack limit.
+class SimulatorStack : public v8::internal::AllStatic {
+ public:
+  static inline uintptr_t JsLimitFromCLimit(Isolate* isolate,
+                                            uintptr_t c_limit) {
+    return c_limit;
+  }
+
+  static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
+    return try_catch_address;
+  }
+
+  static inline void UnregisterCTryCatch() { }
+};
+
+} }  // namespace v8::internal
+
+// Calculated the stack limit beyond which we will throw stack overflow errors.
+// This macro must be called from a C++ method. It relies on being able to take
+// the address of "this" to get a value on the current execution stack and then
+// calculates the stack limit based on that value.
+// NOTE: The check for overflow is not safe as there is no guarantee that the
+// running thread has its stack in all memory up to address 0x00000000.
+#define GENERATED_CODE_STACK_LIMIT(limit) \
+  (reinterpret_cast<uintptr_t>(this) >= limit ? \
+      reinterpret_cast<uintptr_t>(this) - limit : 0)
+
+#else  // !defined(USE_SIMULATOR)
+// Running with a simulator.
+
+#include "src/assembler.h"
+#include "src/hashmap.h"
+
+namespace v8 {
+namespace internal {
+
+// -----------------------------------------------------------------------------
+// Utility functions
+
+class CachePage {
+ public:
+  static const int LINE_VALID = 0;
+  static const int LINE_INVALID = 1;
+
+  static const int kPageShift = 12;
+  static const int kPageSize = 1 << kPageShift;
+  static const int kPageMask = kPageSize - 1;
+  static const int kLineShift = 2;  // The cache line is only 4 bytes right now.
+  static const int kLineLength = 1 << kLineShift;
+  static const int kLineMask = kLineLength - 1;
+
+  CachePage() {
+    memset(&validity_map_, LINE_INVALID, sizeof(validity_map_));
+  }
+
+  char* ValidityByte(int offset) {
+    return &validity_map_[offset >> kLineShift];
+  }
+
+  char* CachedData(int offset) {
+    return &data_[offset];
+  }
+
+ private:
+  char data_[kPageSize];   // The cached data.
+  static const int kValidityMapSize = kPageSize >> kLineShift;
+  char validity_map_[kValidityMapSize];  // One byte per line.
+};
+
+class Simulator {
+ public:
+  friend class MipsDebugger;
+
+  // Registers are declared in order. See SMRL chapter 2.
+  enum Register {
+    no_reg = -1,
+    zero_reg = 0,
+    at,
+    v0, v1,
+    a0, a1, a2, a3, a4, a5, a6, a7,
+    t0, t1, t2, t3,
+    s0, s1, s2, s3, s4, s5, s6, s7,
+    t8, t9,
+    k0, k1,
+    gp,
+    sp,
+    s8,
+    ra,
+    // LO, HI, and pc.
+    LO,
+    HI,
+    pc,   // pc must be the last register.
+    kNumSimuRegisters,
+    // aliases
+    fp = s8
+  };
+
+  // Coprocessor registers.
+  // Generated code will always use doubles. So we will only use even registers.
+  enum FPURegister {
+    f0, f1, f2, f3, f4, f5, f6, f7, f8, f9, f10, f11,
+    f12, f13, f14, f15,   // f12 and f14 are arguments FPURegisters.
+    f16, f17, f18, f19, f20, f21, f22, f23, f24, f25,
+    f26, f27, f28, f29, f30, f31,
+    kNumFPURegisters
+  };
+
+  explicit Simulator(Isolate* isolate);
+  ~Simulator();
+
+  // The currently executing Simulator instance. Potentially there can be one
+  // for each native thread.
+  static Simulator* current(v8::internal::Isolate* isolate);
+
+  // Accessors for register state. Reading the pc value adheres to the MIPS
+  // architecture specification and is off by a 8 from the currently executing
+  // instruction.
+  void set_register(int reg, int64_t value);
+  void set_register_word(int reg, int32_t value);
+  void set_dw_register(int dreg, const int* dbl);
+  int64_t get_register(int reg) const;
+  double get_double_from_register_pair(int reg);
+  // Same for FPURegisters.
+  void set_fpu_register(int fpureg, int64_t value);
+  void set_fpu_register_word(int fpureg, int32_t value);
+  void set_fpu_register_hi_word(int fpureg, int32_t value);
+  void set_fpu_register_float(int fpureg, float value);
+  void set_fpu_register_double(int fpureg, double value);
+  int64_t get_fpu_register(int fpureg) const;
+  int32_t get_fpu_register_word(int fpureg) const;
+  int32_t get_fpu_register_signed_word(int fpureg) const;
+  int32_t get_fpu_register_hi_word(int fpureg) const;
+  float get_fpu_register_float(int fpureg) const;
+  double get_fpu_register_double(int fpureg) const;
+  void set_fcsr_bit(uint32_t cc, bool value);
+  bool test_fcsr_bit(uint32_t cc);
+  bool set_fcsr_round_error(double original, double rounded);
+  bool set_fcsr_round64_error(double original, double rounded);
+
+  // Special case of set_register and get_register to access the raw PC value.
+  void set_pc(int64_t value);
+  int64_t get_pc() const;
+
+  Address get_sp() {
+    return reinterpret_cast<Address>(static_cast<intptr_t>(get_register(sp)));
+  }
+
+  // Accessor to the internal simulator stack area.
+  uintptr_t StackLimit() const;
+
+  // Executes MIPS instructions until the PC reaches end_sim_pc.
+  void Execute();
+
+  // Call on program start.
+  static void Initialize(Isolate* isolate);
+
+  // V8 generally calls into generated JS code with 5 parameters and into
+  // generated RegExp code with 7 parameters. This is a convenience function,
+  // which sets up the simulator state and grabs the result on return.
+  int64_t Call(byte* entry, int argument_count, ...);
+  // Alternative: call a 2-argument double function.
+  double CallFP(byte* entry, double d0, double d1);
+
+  // Push an address onto the JS stack.
+  uintptr_t PushAddress(uintptr_t address);
+
+  // Pop an address from the JS stack.
+  uintptr_t PopAddress();
+
+  // Debugger input.
+  void set_last_debugger_input(char* input);
+  char* last_debugger_input() { return last_debugger_input_; }
+
+  // ICache checking.
+  static void FlushICache(v8::internal::HashMap* i_cache, void* start,
+                          size_t size);
+
+  // Returns true if pc register contains one of the 'special_values' defined
+  // below (bad_ra, end_sim_pc).
+  bool has_bad_pc() const;
+
+ private:
+  enum special_values {
+    // Known bad pc value to ensure that the simulator does not execute
+    // without being properly setup.
+    bad_ra = -1,
+    // A pc value used to signal the simulator to stop execution.  Generally
+    // the ra is set to this value on transition from native C code to
+    // simulated execution, so that the simulator can "return" to the native
+    // C code.
+    end_sim_pc = -2,
+    // Unpredictable value.
+    Unpredictable = 0xbadbeaf
+  };
+
+  // Unsupported instructions use Format to print an error and stop execution.
+  void Format(Instruction* instr, const char* format);
+
+  // Read and write memory.
+  inline uint32_t ReadBU(int64_t addr);
+  inline int32_t ReadB(int64_t addr);
+  inline void WriteB(int64_t addr, uint8_t value);
+  inline void WriteB(int64_t addr, int8_t value);
+
+  inline uint16_t ReadHU(int64_t addr, Instruction* instr);
+  inline int16_t ReadH(int64_t addr, Instruction* instr);
+  // Note: Overloaded on the sign of the value.
+  inline void WriteH(int64_t addr, uint16_t value, Instruction* instr);
+  inline void WriteH(int64_t addr, int16_t value, Instruction* instr);
+
+  inline uint32_t ReadWU(int64_t addr, Instruction* instr);
+  inline int32_t ReadW(int64_t addr, Instruction* instr);
+  inline void WriteW(int64_t addr, int32_t value, Instruction* instr);
+  inline int64_t Read2W(int64_t addr, Instruction* instr);
+  inline void Write2W(int64_t addr, int64_t value, Instruction* instr);
+
+  inline double ReadD(int64_t addr, Instruction* instr);
+  inline void WriteD(int64_t addr, double value, Instruction* instr);
+
+  // Helper for debugging memory access.
+  inline void DieOrDebug();
+
+  // Helpers for data value tracing.
+    enum TraceType {
+    BYTE,
+    HALF,
+    WORD,
+    DWORD
+    // DFLOAT - Floats may have printing issues due to paired lwc1's
+  };
+
+  void TraceRegWr(int64_t value);
+  void TraceMemWr(int64_t addr, int64_t value, TraceType t);
+  void TraceMemRd(int64_t addr, int64_t value);
+
+  // Operations depending on endianness.
+  // Get Double Higher / Lower word.
+  inline int32_t GetDoubleHIW(double* addr);
+  inline int32_t GetDoubleLOW(double* addr);
+  // Set Double Higher / Lower word.
+  inline int32_t SetDoubleHIW(double* addr);
+  inline int32_t SetDoubleLOW(double* addr);
+
+  // Executing is handled based on the instruction type.
+  void DecodeTypeRegister(Instruction* instr);
+
+  // Helper function for DecodeTypeRegister.
+  void ConfigureTypeRegister(Instruction* instr,
+                             int64_t* alu_out,
+                             int64_t* i64hilo,
+                             uint64_t* u64hilo,
+                             int64_t* next_pc,
+                             int64_t* return_addr_reg,
+                             bool* do_interrupt,
+                             int64_t* result128H,
+                             int64_t* result128L);
+
+  void DecodeTypeImmediate(Instruction* instr);
+  void DecodeTypeJump(Instruction* instr);
+
+  // Used for breakpoints and traps.
+  void SoftwareInterrupt(Instruction* instr);
+
+  // Stop helper functions.
+  bool IsWatchpoint(uint64_t code);
+  void PrintWatchpoint(uint64_t code);
+  void HandleStop(uint64_t code, Instruction* instr);
+  bool IsStopInstruction(Instruction* instr);
+  bool IsEnabledStop(uint64_t code);
+  void EnableStop(uint64_t code);
+  void DisableStop(uint64_t code);
+  void IncreaseStopCounter(uint64_t code);
+  void PrintStopInfo(uint64_t code);
+
+
+  // Executes one instruction.
+  void InstructionDecode(Instruction* instr);
+  // Execute one instruction placed in a branch delay slot.
+  void BranchDelayInstructionDecode(Instruction* instr) {
+    if (instr->InstructionBits() == nopInstr) {
+      // Short-cut generic nop instructions. They are always valid and they
+      // never change the simulator state.
+      return;
+    }
+
+    if (instr->IsForbiddenInBranchDelay()) {
+      V8_Fatal(__FILE__, __LINE__,
+               "Eror:Unexpected %i opcode in a branch delay slot.",
+               instr->OpcodeValue());
+    }
+    InstructionDecode(instr);
+  }
+
+  // ICache.
+  static void CheckICache(v8::internal::HashMap* i_cache, Instruction* instr);
+  static void FlushOnePage(v8::internal::HashMap* i_cache, intptr_t start,
+                           int size);
+  static CachePage* GetCachePage(v8::internal::HashMap* i_cache, void* page);
+
+  enum Exception {
+    none,
+    kIntegerOverflow,
+    kIntegerUnderflow,
+    kDivideByZero,
+    kNumExceptions
+  };
+  int16_t exceptions[kNumExceptions];
+
+  // Exceptions.
+  void SignalExceptions();
+
+  // Runtime call support.
+  static void* RedirectExternalReference(void* external_function,
+                                         ExternalReference::Type type);
+
+  // Handle arguments and return value for runtime FP functions.
+  void GetFpArgs(double* x, double* y, int32_t* z);
+  void SetFpResult(const double& result);
+
+  void CallInternal(byte* entry);
+
+  // Architecture state.
+  // Registers.
+  int64_t registers_[kNumSimuRegisters];
+  // Coprocessor Registers.
+  int64_t FPUregisters_[kNumFPURegisters];
+  // FPU control register.
+  uint32_t FCSR_;
+
+  // Simulator support.
+  // Allocate 1MB for stack.
+  size_t stack_size_;
+  char* stack_;
+  bool pc_modified_;
+  int64_t icount_;
+  int break_count_;
+  EmbeddedVector<char, 128> trace_buf_;
+
+  // Debugger input.
+  char* last_debugger_input_;
+
+  // Icache simulation.
+  v8::internal::HashMap* i_cache_;
+
+  v8::internal::Isolate* isolate_;
+
+  // Registered breakpoints.
+  Instruction* break_pc_;
+  Instr break_instr_;
+
+  // Stop is disabled if bit 31 is set.
+  static const uint32_t kStopDisabledBit = 1 << 31;
+
+  // A stop is enabled, meaning the simulator will stop when meeting the
+  // instruction, if bit 31 of watched_stops_[code].count is unset.
+  // The value watched_stops_[code].count & ~(1 << 31) indicates how many times
+  // the breakpoint was hit or gone through.
+  struct StopCountAndDesc {
+    uint32_t count;
+    char* desc;
+  };
+  StopCountAndDesc watched_stops_[kMaxStopCode + 1];
+};
+
+
+// When running with the simulator transition into simulated execution at this
+// point.
+#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
+    reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->Call( \
+      FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4))
+
+#ifdef MIPS_ABI_N64
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
+    Simulator::current(Isolate::Current())->Call( \
+        entry, 10, p0, p1, p2, p3, p4, p5, p6, p7, NULL, p8)
+#else  // Must be O32 Abi.
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
+    Simulator::current(Isolate::Current())->Call( \
+        entry, 10, p0, p1, p2, p3, NULL, p4, p5, p6, p7, p8)
+#endif  // MIPS_ABI_N64
+
+
+// The simulator has its own stack. Thus it has a different stack limit from
+// the C-based native code.  Setting the c_limit to indicate a very small
+// stack cause stack overflow errors, since the simulator ignores the input.
+// This is unlikely to be an issue in practice, though it might cause testing
+// trouble down the line.
+class SimulatorStack : public v8::internal::AllStatic {
+ public:
+  static inline uintptr_t JsLimitFromCLimit(Isolate* isolate,
+                                            uintptr_t c_limit) {
+    return Simulator::current(isolate)->StackLimit();
+  }
+
+  static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
+    Simulator* sim = Simulator::current(Isolate::Current());
+    return sim->PushAddress(try_catch_address);
+  }
+
+  static inline void UnregisterCTryCatch() {
+    Simulator::current(Isolate::Current())->PopAddress();
+  }
+};
+
+} }  // namespace v8::internal
+
+#endif  // !defined(USE_SIMULATOR)
+#endif  // V8_MIPS_SIMULATOR_MIPS_H_
diff --git a/src/mirror-debugger.js b/src/mirror-debugger.js
index 897413c..c36d6fd 100644
--- a/src/mirror-debugger.js
+++ b/src/mirror-debugger.js
@@ -81,8 +81,14 @@
     mirror = new ErrorMirror(value);
   } else if (IS_SCRIPT(value)) {
     mirror = new ScriptMirror(value);
+  } else if (IS_MAP(value) || IS_WEAKMAP(value)) {
+    mirror = new MapMirror(value);
+  } else if (IS_SET(value) || IS_WEAKSET(value)) {
+    mirror = new SetMirror(value);
   } else if (ObjectIsPromise(value)) {
     mirror = new PromiseMirror(value);
+  } else if (IS_GENERATOR(value)) {
+    mirror = new GeneratorMirror(value);
   } else {
     mirror = new ObjectMirror(value, OBJECT_TYPE, opt_transient);
   }
@@ -155,6 +161,9 @@
 var CONTEXT_TYPE = 'context';
 var SCOPE_TYPE = 'scope';
 var PROMISE_TYPE = 'promise';
+var MAP_TYPE = 'map';
+var SET_TYPE = 'set';
+var GENERATOR_TYPE = 'generator';
 
 // Maximum length when sending strings through the JSON protocol.
 var kMaxProtocolStringLength = 80;
@@ -165,16 +174,12 @@
 PropertyKind.Indexed = 2;
 
 
-// A copy of the PropertyType enum from global.h
+// A copy of the PropertyType enum from property-details.h
 var PropertyType = {};
 PropertyType.Normal                  = 0;
 PropertyType.Field                   = 1;
 PropertyType.Constant                = 2;
 PropertyType.Callbacks               = 3;
-PropertyType.Handler                 = 4;
-PropertyType.Interceptor             = 5;
-PropertyType.Transition              = 6;
-PropertyType.Nonexistent             = 7;
 
 
 // Different attributes for a property.
@@ -210,6 +215,9 @@
 //         - RegExpMirror
 //         - ErrorMirror
 //         - PromiseMirror
+//         - MapMirror
+//         - SetMirror
+//         - GeneratorMirror
 //     - PropertyMirror
 //     - InternalPropertyMirror
 //     - FrameMirror
@@ -367,6 +375,15 @@
 
 
 /**
+ * Check whether the mirror reflects a generator object.
+ * @returns {boolean} True if the mirror reflects a generator object
+ */
+Mirror.prototype.isGenerator = function() {
+  return this instanceof GeneratorMirror;
+};
+
+
+/**
  * Check whether the mirror reflects a property.
  * @returns {boolean} True if the mirror reflects a property
  */
@@ -421,6 +438,24 @@
 
 
 /**
+ * Check whether the mirror reflects a map.
+ * @returns {boolean} True if the mirror reflects a map
+ */
+Mirror.prototype.isMap = function() {
+  return this instanceof MapMirror;
+};
+
+
+/**
+ * Check whether the mirror reflects a set.
+ * @returns {boolean} True if the mirror reflects a set
+ */
+Mirror.prototype.isSet = function() {
+  return this instanceof SetMirror;
+};
+
+
+/**
  * Allocate a handle id for this object.
  */
 Mirror.prototype.allocateHandle_ = function() {
@@ -658,6 +693,19 @@
 };
 
 
+// Get all own property names except for private symbols.
+function TryGetPropertyNames(object) {
+  try {
+    // TODO(yangguo): Should there be a special debugger implementation of
+    // %GetOwnPropertyNames that doesn't perform access checks?
+    return %GetOwnPropertyNames(object, PROPERTY_ATTRIBUTES_PRIVATE_SYMBOL);
+  } catch (e) {
+    // Might have hit a failed access check.
+    return [];
+  }
+}
+
+
 /**
  * Return the property names for this object.
  * @param {number} kind Indicate whether named, indexed or both kinds of
@@ -676,9 +724,7 @@
 
   // Find all the named properties.
   if (kind & PropertyKind.Named) {
-    // Get all own property names except for private symbols.
-    propertyNames =
-        %GetOwnPropertyNames(this.value_, PROPERTY_ATTRIBUTES_PRIVATE_SYMBOL);
+    propertyNames = TryGetPropertyNames(this.value_);
     total += propertyNames.length;
 
     // Get names for named interceptor properties if any.
@@ -761,7 +807,7 @@
 
 
 ObjectMirror.prototype.property = function(name) {
-  var details = %DebugGetPropertyDetails(this.value_, %ToString(name));
+  var details = %DebugGetPropertyDetails(this.value_, %ToName(name));
   if (details) {
     return new PropertyMirror(this, name, details);
   }
@@ -953,8 +999,8 @@
  * @return {Number or undefined} in-script position for the function
  */
 FunctionMirror.prototype.sourcePosition_ = function() {
-  // Return script if function is resolved. Otherwise just fall through
-  // to return undefined.
+  // Return position if function is resolved. Otherwise just fall
+  // through to return undefined.
   if (this.resolved()) {
     return %FunctionGetScriptSourcePosition(this.value_);
   }
@@ -1253,6 +1299,131 @@
 };
 
 
+function MapMirror(value) {
+  %_CallFunction(this, value, MAP_TYPE, ObjectMirror);
+}
+inherits(MapMirror, ObjectMirror);
+
+
+/**
+ * Returns an array of key/value pairs of a map.
+ * This will keep keys alive for WeakMaps.
+ *
+ * @returns {Array.<Object>} Array of key/value pairs of a map.
+ */
+MapMirror.prototype.entries = function() {
+  var result = [];
+
+  if (IS_WEAKMAP(this.value_)) {
+    var entries = %GetWeakMapEntries(this.value_);
+    for (var i = 0; i < entries.length; i += 2) {
+      result.push({
+        key: entries[i],
+        value: entries[i + 1]
+      });
+    }
+    return result;
+  }
+
+  var iter = %_CallFunction(this.value_, builtins.MapEntries);
+  var next;
+  while (!(next = iter.next()).done) {
+    result.push({
+      key: next.value[0],
+      value: next.value[1]
+    });
+  }
+  return result;
+};
+
+
+function SetMirror(value) {
+  %_CallFunction(this, value, SET_TYPE, ObjectMirror);
+}
+inherits(SetMirror, ObjectMirror);
+
+
+/**
+ * Returns an array of elements of a set.
+ * This will keep elements alive for WeakSets.
+ *
+ * @returns {Array.<Object>} Array of elements of a set.
+ */
+SetMirror.prototype.values = function() {
+  if (IS_WEAKSET(this.value_)) {
+    return %GetWeakSetValues(this.value_);
+  }
+
+  var result = [];
+  var iter = %_CallFunction(this.value_, builtins.SetValues);
+  var next;
+  while (!(next = iter.next()).done) {
+    result.push(next.value);
+  }
+  return result;
+};
+
+
+/**
+ * Mirror object for a Generator object.
+ * @param {Object} data The Generator object
+ * @constructor
+ * @extends Mirror
+ */
+function GeneratorMirror(value) {
+  %_CallFunction(this, value, GENERATOR_TYPE, ObjectMirror);
+}
+inherits(GeneratorMirror, ObjectMirror);
+
+
+GeneratorMirror.prototype.status = function() {
+  var continuation = %GeneratorGetContinuation(this.value_);
+  if (continuation < 0) return "running";
+  if (continuation == 0) return "closed";
+  return "suspended";
+};
+
+
+GeneratorMirror.prototype.sourcePosition_ = function() {
+  return %GeneratorGetSourcePosition(this.value_);
+};
+
+
+GeneratorMirror.prototype.sourceLocation = function() {
+  var pos = this.sourcePosition_();
+  if (!IS_UNDEFINED(pos)) {
+    var script = this.func().script();
+    if (script) {
+      return script.locationFromPosition(pos, true);
+    }
+  }
+};
+
+
+GeneratorMirror.prototype.func = function() {
+  if (!this.func_) {
+    this.func_ = MakeMirror(%GeneratorGetFunction(this.value_));
+  }
+  return this.func_;
+};
+
+
+GeneratorMirror.prototype.context = function() {
+  if (!this.context_) {
+    this.context_ = new ContextMirror(%GeneratorGetContext(this.value_));
+  }
+  return this.context_;
+};
+
+
+GeneratorMirror.prototype.receiver = function() {
+  if (!this.receiver_) {
+    this.receiver_ = MakeMirror(%GeneratorGetReceiver(this.value_));
+  }
+  return this.receiver_;
+};
+
+
 /**
  * Base mirror object for properties.
  * @param {ObjectMirror} mirror The mirror object having this property
@@ -1267,10 +1438,11 @@
   this.name_ = name;
   this.value_ = details[0];
   this.details_ = details[1];
-  if (details.length > 2) {
-    this.exception_ = details[2];
-    this.getter_ = details[3];
-    this.setter_ = details[4];
+  this.is_interceptor_ = details[2];
+  if (details.length > 3) {
+    this.exception_ = details[3];
+    this.getter_ = details[4];
+    this.setter_ = details[5];
   }
 }
 inherits(PropertyMirror, Mirror);
@@ -1388,7 +1560,7 @@
  *     UndefinedMirror if there is no setter for this property
  */
 PropertyMirror.prototype.isNative = function() {
-  return (this.propertyType() == PropertyType.Interceptor) ||
+  return this.is_interceptor_ ||
          ((this.propertyType() == PropertyType.Callbacks) &&
           !this.hasGetter() && !this.hasSetter());
 };
@@ -2440,6 +2612,7 @@
     case ERROR_TYPE:
     case REGEXP_TYPE:
     case PROMISE_TYPE:
+    case GENERATOR_TYPE:
       // Add object representation.
       this.serializeObject_(mirror, content, details);
       break;
@@ -2569,6 +2742,21 @@
     }
   }
 
+  if (mirror.isGenerator()) {
+    // Add generator specific properties.
+
+    // Either 'running', 'closed', or 'suspended'.
+    content.status = mirror.status();
+
+    content.func = this.serializeReference(mirror.func())
+    content.receiver = this.serializeReference(mirror.receiver())
+
+    // If the generator is suspended, the content add line/column properties.
+    serializeLocationFields(mirror.sourceLocation(), content);
+
+    // TODO(wingo): Also serialize a reference to the context (scope chain).
+  }
+
   if (mirror.isDate()) {
     // Add date specific properties.
     content.value = mirror.value();
diff --git a/src/mksnapshot.cc b/src/mksnapshot.cc
index d4262c4..b4a4018 100644
--- a/src/mksnapshot.cc
+++ b/src/mksnapshot.cc
@@ -11,13 +11,14 @@
 
 #include "src/v8.h"
 
+#include "include/libplatform/libplatform.h"
 #include "src/assembler.h"
+#include "src/base/platform/platform.h"
 #include "src/bootstrapper.h"
 #include "src/flags.h"
-#include "src/natives.h"
-#include "src/platform.h"
-#include "src/serialize.h"
 #include "src/list.h"
+#include "src/natives.h"
+#include "src/serialize.h"
 
 
 using namespace v8;
@@ -26,19 +27,8 @@
 class Compressor {
  public:
   virtual ~Compressor() {}
-  virtual bool Compress(i::Vector<char> input) = 0;
-  virtual i::Vector<char>* output() = 0;
-};
-
-
-class ListSnapshotSink : public i::SnapshotByteSink {
- public:
-  explicit ListSnapshotSink(i::List<char>* data) : data_(data) { }
-  virtual ~ListSnapshotSink() {}
-  virtual void Put(int byte, const char* description) { data_->Add(byte); }
-  virtual int Position() { return data_->length(); }
- private:
-  i::List<char>* data_;
+  virtual bool Compress(i::Vector<i::byte> input) = 0;
+  virtual i::Vector<i::byte>* output() = 0;
 };
 
 
@@ -48,33 +38,80 @@
       : fp_(GetFileDescriptorOrDie(snapshot_file))
       , raw_file_(NULL)
       , raw_context_file_(NULL)
-      , compressor_(NULL)
-      , omit_(false) {
+      , startup_blob_file_(NULL)
+      , compressor_(NULL) {
   }
 
   ~SnapshotWriter() {
     fclose(fp_);
     if (raw_file_) fclose(raw_file_);
     if (raw_context_file_) fclose(raw_context_file_);
+    if (startup_blob_file_) fclose(startup_blob_file_);
   }
 
   void SetCompressor(Compressor* compressor) {
     compressor_ = compressor;
   }
 
-  void SetOmit(bool omit) {
-    omit_ = omit;
-  }
-
   void SetRawFiles(const char* raw_file, const char* raw_context_file) {
     raw_file_ = GetFileDescriptorOrDie(raw_file);
     raw_context_file_ = GetFileDescriptorOrDie(raw_context_file);
   }
 
-  void WriteSnapshot(const i::List<char>& snapshot_data,
+  void SetStartupBlobFile(const char* startup_blob_file) {
+    if (startup_blob_file != NULL)
+      startup_blob_file_ = GetFileDescriptorOrDie(startup_blob_file);
+  }
+
+  void WriteSnapshot(const i::List<i::byte>& snapshot_data,
                      const i::Serializer& serializer,
-                     const i::List<char>& context_snapshot_data,
+                     const i::List<i::byte>& context_snapshot_data,
                      const i::Serializer& context_serializer) const {
+    WriteSnapshotFile(snapshot_data, serializer,
+                      context_snapshot_data, context_serializer);
+    MaybeWriteStartupBlob(snapshot_data, serializer,
+                          context_snapshot_data, context_serializer);
+  }
+
+ private:
+  void MaybeWriteStartupBlob(const i::List<i::byte>& snapshot_data,
+                             const i::Serializer& serializer,
+                             const i::List<i::byte>& context_snapshot_data,
+                             const i::Serializer& context_serializer) const {
+    if (!startup_blob_file_)
+      return;
+
+    i::List<i::byte> startup_blob;
+    i::ListSnapshotSink sink(&startup_blob);
+
+    int spaces[] = {
+        i::NEW_SPACE, i::OLD_POINTER_SPACE, i::OLD_DATA_SPACE, i::CODE_SPACE,
+        i::MAP_SPACE, i::CELL_SPACE,  i::PROPERTY_CELL_SPACE
+    };
+
+    i::byte* snapshot_bytes = snapshot_data.begin();
+    sink.PutBlob(snapshot_bytes, snapshot_data.length(), "snapshot");
+    for (size_t i = 0; i < arraysize(spaces); ++i)
+      sink.PutInt(serializer.CurrentAllocationAddress(spaces[i]), "spaces");
+
+    i::byte* context_bytes = context_snapshot_data.begin();
+    sink.PutBlob(context_bytes, context_snapshot_data.length(), "context");
+    for (size_t i = 0; i < arraysize(spaces); ++i)
+      sink.PutInt(context_serializer.CurrentAllocationAddress(spaces[i]),
+                  "spaces");
+
+    size_t written = fwrite(startup_blob.begin(), 1, startup_blob.length(),
+                            startup_blob_file_);
+    if (written != (size_t)startup_blob.length()) {
+      i::PrintF("Writing snapshot file failed.. Aborting.\n");
+      exit(1);
+    }
+  }
+
+  void WriteSnapshotFile(const i::List<i::byte>& snapshot_data,
+                         const i::Serializer& serializer,
+                         const i::List<i::byte>& context_snapshot_data,
+                         const i::Serializer& context_serializer) const {
     WriteFilePrefix();
     WriteData("", snapshot_data, raw_file_);
     WriteData("context_", context_snapshot_data, raw_context_file_);
@@ -83,11 +120,10 @@
     WriteFileSuffix();
   }
 
- private:
   void WriteFilePrefix() const {
     fprintf(fp_, "// Autogenerated snapshot file. Do not edit.\n\n");
     fprintf(fp_, "#include \"src/v8.h\"\n");
-    fprintf(fp_, "#include \"src/platform.h\"\n\n");
+    fprintf(fp_, "#include \"src/base/platform/platform.h\"\n\n");
     fprintf(fp_, "#include \"src/snapshot.h\"\n\n");
     fprintf(fp_, "namespace v8 {\n");
     fprintf(fp_, "namespace internal {\n\n");
@@ -98,11 +134,10 @@
     fprintf(fp_, "}  // namespace v8\n");
   }
 
-  void WriteData(const char* prefix,
-                 const i::List<char>& source_data,
+  void WriteData(const char* prefix, const i::List<i::byte>& source_data,
                  FILE* raw_file) const {
-    const i::List <char>* data_to_be_written = NULL;
-    i::List<char> compressed_data;
+    const i::List<i::byte>* data_to_be_written = NULL;
+    i::List<i::byte> compressed_data;
     if (!compressor_) {
       data_to_be_written = &source_data;
     } else if (compressor_->Compress(source_data.ToVector())) {
@@ -113,18 +148,18 @@
       exit(1);
     }
 
-    ASSERT(data_to_be_written);
+    DCHECK(data_to_be_written);
     MaybeWriteRawFile(data_to_be_written, raw_file);
     WriteData(prefix, source_data, data_to_be_written);
   }
 
-  void MaybeWriteRawFile(const i::List<char>* data, FILE* raw_file) const {
+  void MaybeWriteRawFile(const i::List<i::byte>* data, FILE* raw_file) const {
     if (!data || !raw_file)
       return;
 
     // Sanity check, whether i::List iterators truly return pointers to an
     // internal array.
-    ASSERT(data->end() - data->begin() == data->length());
+    DCHECK(data->end() - data->begin() == data->length());
 
     size_t written = fwrite(data->begin(), 1, data->length(), raw_file);
     if (written != (size_t)data->length()) {
@@ -133,17 +168,15 @@
     }
   }
 
-  void WriteData(const char* prefix,
-                 const i::List<char>& source_data,
-                 const i::List<char>* data_to_be_written) const {
+  void WriteData(const char* prefix, const i::List<i::byte>& source_data,
+                 const i::List<i::byte>* data_to_be_written) const {
     fprintf(fp_, "const byte Snapshot::%sdata_[] = {\n", prefix);
-    if (!omit_)
-      WriteSnapshotData(data_to_be_written);
+    WriteSnapshotData(data_to_be_written);
     fprintf(fp_, "};\n");
     fprintf(fp_, "const int Snapshot::%ssize_ = %d;\n", prefix,
             data_to_be_written->length());
 
-    if (data_to_be_written == &source_data && !omit_) {
+    if (data_to_be_written == &source_data) {
       fprintf(fp_, "const byte* Snapshot::%sraw_data_ = Snapshot::%sdata_;\n",
               prefix, prefix);
       fprintf(fp_, "const int Snapshot::%sraw_size_ = Snapshot::%ssize_;\n",
@@ -173,7 +206,7 @@
             prefix, name, ser.CurrentAllocationAddress(space));
   }
 
-  void WriteSnapshotData(const i::List<char>* data) const {
+  void WriteSnapshotData(const i::List<i::byte>* data) const {
     for (int i = 0; i < data->length(); i++) {
       if ((i & 0x1f) == 0x1f)
         fprintf(fp_, "\n");
@@ -185,7 +218,7 @@
   }
 
   FILE* GetFileDescriptorOrDie(const char* filename) {
-    FILE* fp = i::OS::FOpen(filename, "wb");
+    FILE* fp = base::OS::FOpen(filename, "wb");
     if (fp == NULL) {
       i::PrintF("Unable to open file \"%s\" for writing.\n", filename);
       exit(1);
@@ -196,8 +229,8 @@
   FILE* fp_;
   FILE* raw_file_;
   FILE* raw_context_file_;
+  FILE* startup_blob_file_;
   Compressor* compressor_;
-  bool omit_;
 };
 
 
@@ -239,7 +272,7 @@
                              int* raw_data_size,
                              const char* compressed_data,
                              int compressed_data_size) {
-    ASSERT_EQ(StartupData::kBZip2,
+    DCHECK_EQ(StartupData::kBZip2,
               V8::GetCompressedStartupDataAlgorithm());
     unsigned int decompressed_size = *raw_data_size;
     int result =
@@ -270,10 +303,6 @@
 
 
 int main(int argc, char** argv) {
-  V8::InitializeICU();
-  i::Isolate::SetCrashIfDefaultIsolateInitialized();
-  i::CpuFeatures::Probe(true);
-
   // By default, log code create information in the snapshot.
   i::FLAG_log_code = true;
 
@@ -285,6 +314,13 @@
     i::FlagList::PrintHelp();
     return !i::FLAG_help;
   }
+
+  i::CpuFeatures::Probe(true);
+  V8::InitializeICU();
+  v8::Platform* platform = v8::platform::CreateDefaultPlatform();
+  v8::V8::InitializePlatform(platform);
+  v8::V8::Initialize();
+
 #ifdef COMPRESS_STARTUP_DATA_BZ2
   BZip2Decompressor natives_decompressor;
   int bz2_result = natives_decompressor.Decompress();
@@ -295,10 +331,11 @@
 #endif
   i::FLAG_logfile_per_isolate = false;
 
-  Isolate* isolate = v8::Isolate::New();
+  Isolate::CreateParams params;
+  params.enable_serializer = true;
+  Isolate* isolate = v8::Isolate::New(params);
   { Isolate::Scope isolate_scope(isolate);
     i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
-    internal_isolate->enable_serializer();
 
     Persistent<Context> context;
     {
@@ -317,7 +354,7 @@
       HandleScope scope(isolate);
       v8::Context::Scope cscope(v8::Local<v8::Context>::New(isolate, context));
       const char* name = i::FLAG_extra_code;
-      FILE* file = i::OS::FOpen(name, "rb");
+      FILE* file = base::OS::FOpen(name, "rb");
       if (file == NULL) {
         fprintf(stderr, "Failed to open '%s': errno %d\n", name, errno);
         exit(1);
@@ -368,22 +405,23 @@
 
     // This results in a somewhat smaller snapshot, probably because it gets
     // rid of some things that are cached between garbage collections.
-    i::List<char> snapshot_data;
-    ListSnapshotSink snapshot_sink(&snapshot_data);
+    i::List<i::byte> snapshot_data;
+    i::ListSnapshotSink snapshot_sink(&snapshot_data);
     i::StartupSerializer ser(internal_isolate, &snapshot_sink);
     ser.SerializeStrongReferences();
 
-    i::List<char> context_data;
-    ListSnapshotSink contex_sink(&context_data);
+    i::List<i::byte> context_data;
+    i::ListSnapshotSink contex_sink(&context_data);
     i::PartialSerializer context_ser(internal_isolate, &ser, &contex_sink);
     context_ser.Serialize(&raw_context);
     ser.SerializeWeakReferences();
 
     {
       SnapshotWriter writer(argv[1]);
-      writer.SetOmit(i::FLAG_omit);
       if (i::FLAG_raw_file && i::FLAG_raw_context_file)
         writer.SetRawFiles(i::FLAG_raw_file, i::FLAG_raw_context_file);
+      if (i::FLAG_startup_blob)
+        writer.SetStartupBlobFile(i::FLAG_startup_blob);
   #ifdef COMPRESS_STARTUP_DATA_BZ2
       BZip2Compressor bzip2;
       writer.SetCompressor(&bzip2);
@@ -394,5 +432,7 @@
 
   isolate->Dispose();
   V8::Dispose();
+  V8::ShutdownPlatform();
+  delete platform;
   return 0;
 }
diff --git a/src/msan.h b/src/msan.h
index 4130d22..f099595 100644
--- a/src/msan.h
+++ b/src/msan.h
@@ -17,12 +17,17 @@
 # define MEMORY_SANITIZER
 #endif
 
-#if defined(MEMORY_SANITIZER) && !defined(USE_SIMULATOR)
+#if defined(MEMORY_SANITIZER)
 # include <sanitizer/msan_interface.h>  // NOLINT
-// Marks a memory range as fully initialized.
-# define MSAN_MEMORY_IS_INITIALIZED_IN_JIT(p, s) __msan_unpoison((p), (s))
+
+// Marks a memory range as uninitialized, as if it was allocated here.
+# define MSAN_ALLOCATED_UNINITIALIZED_MEMORY(p, s) \
+    __msan_allocated_memory((p), (s))
+// Marks a memory range as initialized.
+#define MSAN_MEMORY_IS_INITIALIZED(p, s) __msan_unpoison((p), (s))
 #else
-# define MSAN_MEMORY_IS_INITIALIZED_IN_JIT(p, s)
+# define MSAN_ALLOCATED_UNINITIALIZED_MEMORY(p, s)
+#define MSAN_MEMORY_IS_INITIALIZED(p, s)
 #endif
 
 #endif  // V8_MSAN_H_
diff --git a/src/natives-external.cc b/src/natives-external.cc
new file mode 100644
index 0000000..fc66149
--- /dev/null
+++ b/src/natives-external.cc
@@ -0,0 +1,198 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/natives.h"
+
+#include "src/base/logging.h"
+#include "src/list.h"
+#include "src/list-inl.h"
+#include "src/snapshot-source-sink.h"
+#include "src/vector.h"
+
+namespace v8 {
+namespace internal {
+
+
+/**
+ * NativesStore stores the 'native' (builtin) JS libraries.
+ *
+ * NativesStore needs to be initialized before using V8, usually by the
+ * embedder calling v8::SetNativesDataBlob, which calls SetNativesFromFile
+ * below.
+ */
+class NativesStore {
+ public:
+  ~NativesStore() {}
+
+  int GetBuiltinsCount() { return native_names_.length(); }
+  int GetDebuggerCount() { return debugger_count_; }
+  Vector<const char> GetScriptName(int index) { return native_names_[index]; }
+  Vector<const char> GetRawScriptSource(int index) {
+    return native_source_[index];
+  }
+
+  int GetIndex(const char* name) {
+    for (int i = 0; i < native_names_.length(); ++i) {
+      int native_name_length = native_names_[i].length();
+      if ((static_cast<int>(strlen(name)) == native_name_length) &&
+          (strncmp(name, native_names_[i].start(), native_name_length) == 0)) {
+        return i;
+      }
+    }
+    DCHECK(false);
+    return -1;
+  }
+
+  int GetRawScriptsSize() {
+    DCHECK(false);  // Used for compression. Doesn't really make sense here.
+    return 0;
+  }
+
+  Vector<const byte> GetScriptsSource() {
+    DCHECK(false);  // Used for compression. Doesn't really make sense here.
+    return Vector<const byte>();
+  }
+
+  static NativesStore* MakeFromScriptsSource(SnapshotByteSource* source) {
+    NativesStore* store = new NativesStore;
+
+    // We expect the libraries in the following format:
+    //   int: # of debugger sources.
+    //   2N blobs: N pairs of source name + actual source.
+    //   then, repeat for non-debugger sources.
+    int debugger_count = source->GetInt();
+    for (int i = 0; i < debugger_count; ++i)
+      store->ReadNameAndContentPair(source);
+    int library_count = source->GetInt();
+    for (int i = 0; i < library_count; ++i)
+      store->ReadNameAndContentPair(source);
+
+    store->debugger_count_ = debugger_count;
+    return store;
+  }
+
+ private:
+  NativesStore() : debugger_count_(0) {}
+
+  bool ReadNameAndContentPair(SnapshotByteSource* bytes) {
+    const byte* name;
+    int name_length;
+    const byte* source;
+    int source_length;
+    bool success = bytes->GetBlob(&name, &name_length) &&
+                   bytes->GetBlob(&source, &source_length);
+    if (success) {
+      Vector<const char> name_vector(
+          reinterpret_cast<const char*>(name), name_length);
+      Vector<const char> source_vector(
+          reinterpret_cast<const char*>(source), source_length);
+      native_names_.Add(name_vector);
+      native_source_.Add(source_vector);
+    }
+    return success;
+  }
+
+  List<Vector<const char> > native_names_;
+  List<Vector<const char> > native_source_;
+  int debugger_count_;
+
+  DISALLOW_COPY_AND_ASSIGN(NativesStore);
+};
+
+
+template<NativeType type>
+class NativesHolder {
+ public:
+  static NativesStore* get() {
+    DCHECK(holder_);
+    return holder_;
+  }
+  static void set(NativesStore* store) {
+    DCHECK(store);
+    holder_ = store;
+  }
+
+ private:
+  static NativesStore* holder_;
+};
+
+template<NativeType type>
+NativesStore* NativesHolder<type>::holder_ = NULL;
+
+
+/**
+ * Read the Natives (library sources) blob, as generated by js2c + the build
+ * system.
+ */
+void SetNativesFromFile(StartupData* natives_blob) {
+  DCHECK(natives_blob);
+  DCHECK(natives_blob->data);
+  DCHECK(natives_blob->raw_size > 0);
+
+  SnapshotByteSource bytes(
+      reinterpret_cast<const byte*>(natives_blob->data),
+      natives_blob->raw_size);
+  NativesHolder<CORE>::set(NativesStore::MakeFromScriptsSource(&bytes));
+  NativesHolder<EXPERIMENTAL>::set(NativesStore::MakeFromScriptsSource(&bytes));
+  DCHECK(!bytes.HasMore());
+}
+
+
+// Implement NativesCollection<T> bsaed on NativesHolder + NativesStore.
+//
+// (The callers expect a purely static interface, since this is how the
+//  natives are usually compiled in. Since we implement them based on
+//  runtime content, we have to implement this indirection to offer
+//  a static interface.)
+template<NativeType type>
+int NativesCollection<type>::GetBuiltinsCount() {
+  return NativesHolder<type>::get()->GetBuiltinsCount();
+}
+
+template<NativeType type>
+int NativesCollection<type>::GetDebuggerCount() {
+  return NativesHolder<type>::get()->GetDebuggerCount();
+}
+
+template<NativeType type>
+int NativesCollection<type>::GetIndex(const char* name) {
+  return NativesHolder<type>::get()->GetIndex(name);
+}
+
+template<NativeType type>
+int NativesCollection<type>::GetRawScriptsSize() {
+  return NativesHolder<type>::get()->GetRawScriptsSize();
+}
+
+template<NativeType type>
+Vector<const char> NativesCollection<type>::GetRawScriptSource(int index) {
+  return NativesHolder<type>::get()->GetRawScriptSource(index);
+}
+
+template<NativeType type>
+Vector<const char> NativesCollection<type>::GetScriptName(int index) {
+  return NativesHolder<type>::get()->GetScriptName(index);
+}
+
+template<NativeType type>
+Vector<const byte> NativesCollection<type>::GetScriptsSource() {
+  return NativesHolder<type>::get()->GetScriptsSource();
+}
+
+template<NativeType type>
+void NativesCollection<type>::SetRawScriptsSource(
+    Vector<const char> raw_source) {
+  CHECK(false);  // Use SetNativesFromFile for this implementation.
+}
+
+
+// The compiler can't 'see' all uses of the static methods and hence
+// my chose to elide them. This we'll explicitly instantiate these.
+template class NativesCollection<CORE>;
+template class NativesCollection<EXPERIMENTAL>;
+template class NativesCollection<D8>;
+template class NativesCollection<TEST>;
+
+}  // namespace v8::internal
+}  // namespace v8
diff --git a/src/natives.h b/src/natives.h
index 2f930dc..6ddedf0 100644
--- a/src/natives.h
+++ b/src/natives.h
@@ -5,6 +5,10 @@
 #ifndef V8_NATIVES_H_
 #define V8_NATIVES_H_
 
+#include "src/vector.h"
+
+namespace v8 { class StartupData; }  // Forward declaration.
+
 namespace v8 {
 namespace internal {
 
@@ -39,6 +43,11 @@
 typedef NativesCollection<CORE> Natives;
 typedef NativesCollection<EXPERIMENTAL> ExperimentalNatives;
 
+#ifdef V8_USE_EXTERNAL_STARTUP_DATA
+// Used for reading the natives at runtime. Implementation in natives-empty.cc
+void SetNativesFromFile(StartupData* natives_blob);
+#endif
+
 } }  // namespace v8::internal
 
 #endif  // V8_NATIVES_H_
diff --git a/src/object-observe.js b/src/object-observe.js
index 2dfc752..76f3915 100644
--- a/src/object-observe.js
+++ b/src/object-observe.js
@@ -45,6 +45,7 @@
     observationState.notifierObjectInfoMap = %ObservationWeakMapCreate();
     observationState.pendingObservers = null;
     observationState.nextCallbackPriority = 0;
+    observationState.lastMicrotaskId = 0;
   }
 
   return observationState;
@@ -421,7 +422,18 @@
   var callbackInfo = CallbackInfoNormalize(callback);
   if (IS_NULL(GetPendingObservers())) {
     SetPendingObservers(nullProtoObject());
-    %EnqueueMicrotask(ObserveMicrotaskRunner);
+    if (DEBUG_IS_ACTIVE) {
+      var id = ++GetObservationStateJS().lastMicrotaskId;
+      var name = "Object.observe";
+      %EnqueueMicrotask(function() {
+        %DebugAsyncTaskEvent({ type: "willHandle", id: id, name: name });
+        ObserveMicrotaskRunner();
+        %DebugAsyncTaskEvent({ type: "didHandle", id: id, name: name });
+      });
+      %DebugAsyncTaskEvent({ type: "enqueue", id: id, name: name });
+    } else {
+      %EnqueueMicrotask(ObserveMicrotaskRunner);
+    }
   }
   GetPendingObservers()[callbackInfo.priority] = callback;
   callbackInfo.push(changeRecord);
@@ -438,8 +450,8 @@
 
   for (var prop in changeRecord) {
     if (prop === 'object' || (hasType && prop === 'type')) continue;
-    %DefineOrRedefineDataProperty(newRecord, prop, changeRecord[prop],
-        READ_ONLY + DONT_DELETE);
+    %DefineDataPropertyUnchecked(
+        newRecord, prop, changeRecord[prop], READ_ONLY + DONT_DELETE);
   }
   ObjectFreezeJS(newRecord);
 
diff --git a/src/objects-debug.cc b/src/objects-debug.cc
index 877a9d5..a2395de 100644
--- a/src/objects-debug.cc
+++ b/src/objects-debug.cc
@@ -4,11 +4,12 @@
 
 #include "src/v8.h"
 
-#include "src/disassembler.h"
 #include "src/disasm.h"
+#include "src/disassembler.h"
+#include "src/heap/objects-visiting.h"
 #include "src/jsregexp.h"
 #include "src/macro-assembler.h"
-#include "src/objects-visiting.h"
+#include "src/ostreams.h"
 
 namespace v8 {
 namespace internal {
@@ -54,6 +55,7 @@
       Map::cast(this)->MapVerify();
       break;
     case HEAP_NUMBER_TYPE:
+    case MUTABLE_HEAP_NUMBER_TYPE:
       HeapNumber::cast(this)->HeapNumberVerify();
       break;
     case FIXED_ARRAY_TYPE:
@@ -205,7 +207,7 @@
 
 
 void HeapNumber::HeapNumberVerify() {
-  CHECK(IsHeapNumber());
+  CHECK(IsHeapNumber() || IsMutableHeapNumber());
 }
 
 
@@ -263,10 +265,10 @@
         Representation r = descriptors->GetDetails(i).representation();
         FieldIndex index = FieldIndex::ForDescriptor(map(), i);
         Object* value = RawFastPropertyAt(index);
-        if (r.IsDouble()) ASSERT(value->IsHeapNumber());
+        if (r.IsDouble()) DCHECK(value->IsMutableHeapNumber());
         if (value->IsUninitialized()) continue;
-        if (r.IsSmi()) ASSERT(value->IsSmi());
-        if (r.IsHeapObject()) ASSERT(value->IsHeapObject());
+        if (r.IsSmi()) DCHECK(value->IsSmi());
+        if (r.IsHeapObject()) DCHECK(value->IsHeapObject());
         HeapType* field_type = descriptors->GetFieldType(i);
         if (r.IsNone()) {
           CHECK(field_type->Is(HeapType::None()));
@@ -298,17 +300,17 @@
           instance_size() < heap->Capacity()));
   VerifyHeapPointer(prototype());
   VerifyHeapPointer(instance_descriptors());
-  SLOW_ASSERT(instance_descriptors()->IsSortedNoDuplicates());
+  SLOW_DCHECK(instance_descriptors()->IsSortedNoDuplicates());
   if (HasTransitionArray()) {
-    SLOW_ASSERT(transitions()->IsSortedNoDuplicates());
-    SLOW_ASSERT(transitions()->IsConsistentWithBackPointers(this));
+    SLOW_DCHECK(transitions()->IsSortedNoDuplicates());
+    SLOW_DCHECK(transitions()->IsConsistentWithBackPointers(this));
   }
 }
 
 
-void Map::SharedMapVerify() {
+void Map::DictionaryMapVerify() {
   MapVerify();
-  CHECK(is_shared());
+  CHECK(is_dictionary_map());
   CHECK(instance_descriptors()->IsEmpty());
   CHECK_EQ(0, pre_allocated_property_fields());
   CHECK_EQ(0, unused_property_fields());
@@ -347,6 +349,7 @@
 void TypeFeedbackInfo::TypeFeedbackInfoVerify() {
   VerifyObjectField(kStorage1Offset);
   VerifyObjectField(kStorage2Offset);
+  VerifyObjectField(kStorage3Offset);
 }
 
 
@@ -368,9 +371,9 @@
     if (!is_the_hole(i)) {
       double value = get_scalar(i);
       CHECK(!std::isnan(value) ||
-             (BitCast<uint64_t>(value) ==
-              BitCast<uint64_t>(canonical_not_the_hole_nan_as_double())) ||
-             ((BitCast<uint64_t>(value) & Double::kSignMask) != 0));
+            (bit_cast<uint64_t>(value) ==
+             bit_cast<uint64_t>(canonical_not_the_hole_nan_as_double())) ||
+            ((bit_cast<uint64_t>(value) & Double::kSignMask) != 0));
     }
   }
 }
@@ -638,6 +641,8 @@
       last_gc_pc = it.rinfo()->pc();
     }
   }
+  CHECK(raw_type_feedback_info() == Smi::FromInt(0) ||
+        raw_type_feedback_info()->IsSmi() == IsCodeStubOrIC());
 }
 
 
@@ -747,19 +752,21 @@
       bool is_native = RegExpImpl::UsesNativeRegExp();
 
       FixedArray* arr = FixedArray::cast(data());
-      Object* ascii_data = arr->get(JSRegExp::kIrregexpASCIICodeIndex);
+      Object* one_byte_data = arr->get(JSRegExp::kIrregexpLatin1CodeIndex);
       // Smi : Not compiled yet (-1) or code prepared for flushing.
       // JSObject: Compilation error.
       // Code/ByteArray: Compiled code.
-      CHECK(ascii_data->IsSmi() ||
-             (is_native ? ascii_data->IsCode() : ascii_data->IsByteArray()));
+      CHECK(
+          one_byte_data->IsSmi() ||
+          (is_native ? one_byte_data->IsCode() : one_byte_data->IsByteArray()));
       Object* uc16_data = arr->get(JSRegExp::kIrregexpUC16CodeIndex);
       CHECK(uc16_data->IsSmi() ||
              (is_native ? uc16_data->IsCode() : uc16_data->IsByteArray()));
 
-      Object* ascii_saved = arr->get(JSRegExp::kIrregexpASCIICodeSavedIndex);
-      CHECK(ascii_saved->IsSmi() || ascii_saved->IsString() ||
-             ascii_saved->IsCode());
+      Object* one_byte_saved =
+          arr->get(JSRegExp::kIrregexpLatin1CodeSavedIndex);
+      CHECK(one_byte_saved->IsSmi() || one_byte_saved->IsString() ||
+            one_byte_saved->IsCode());
       Object* uc16_saved = arr->get(JSRegExp::kIrregexpUC16CodeSavedIndex);
       CHECK(uc16_saved->IsSmi() || uc16_saved->IsString() ||
              uc16_saved->IsCode());
@@ -878,7 +885,6 @@
   CHECK(IsAccessorPair());
   VerifyPointer(getter());
   VerifyPointer(setter());
-  VerifySmiField(kAccessFlagsOffset);
 }
 
 
@@ -1008,7 +1014,7 @@
     for (int i = 0; i < length(); i++) {
       Object* e = FixedArray::get(i);
       if (e->IsMap()) {
-        Map::cast(e)->SharedMapVerify();
+        Map::cast(e)->DictionaryMapVerify();
       } else {
         CHECK(e->IsUndefined());
       }
@@ -1140,13 +1146,15 @@
   for (int i = 0; i < number_of_descriptors(); i++) {
     Name* key = GetSortedKey(i);
     if (key == current_key) {
-      PrintDescriptors();
+      OFStream os(stdout);
+      PrintDescriptors(os);
       return false;
     }
     current_key = key;
     uint32_t hash = GetSortedKey(i)->Hash();
     if (hash < current) {
-      PrintDescriptors();
+      OFStream os(stdout);
+      PrintDescriptors(os);
       return false;
     }
     current = hash;
@@ -1156,19 +1164,21 @@
 
 
 bool TransitionArray::IsSortedNoDuplicates(int valid_entries) {
-  ASSERT(valid_entries == -1);
+  DCHECK(valid_entries == -1);
   Name* current_key = NULL;
   uint32_t current = 0;
   for (int i = 0; i < number_of_transitions(); i++) {
     Name* key = GetSortedKey(i);
     if (key == current_key) {
-      PrintTransitions();
+      OFStream os(stdout);
+      PrintTransitions(os);
       return false;
     }
     current_key = key;
     uint32_t hash = GetSortedKey(i)->Hash();
     if (hash < current) {
-      PrintTransitions();
+      OFStream os(stdout);
+      PrintTransitions(os);
       return false;
     }
     current = hash;
diff --git a/src/objects-inl.h b/src/objects-inl.h
index 4848fad..e46dd8e 100644
--- a/src/objects-inl.h
+++ b/src/objects-inl.h
@@ -13,23 +13,26 @@
 #define V8_OBJECTS_INL_H_
 
 #include "src/base/atomicops.h"
-#include "src/elements.h"
-#include "src/objects.h"
+#include "src/base/bits.h"
 #include "src/contexts.h"
 #include "src/conversions-inl.h"
-#include "src/field-index-inl.h"
-#include "src/heap.h"
-#include "src/isolate.h"
-#include "src/heap-inl.h"
-#include "src/property.h"
-#include "src/spaces.h"
-#include "src/store-buffer.h"
-#include "src/v8memory.h"
+#include "src/elements.h"
 #include "src/factory.h"
-#include "src/incremental-marking.h"
-#include "src/transitions-inl.h"
-#include "src/objects-visiting.h"
+#include "src/field-index-inl.h"
+#include "src/heap/heap-inl.h"
+#include "src/heap/heap.h"
+#include "src/heap/incremental-marking.h"
+#include "src/heap/objects-visiting.h"
+#include "src/heap/spaces.h"
+#include "src/heap/store-buffer.h"
+#include "src/isolate.h"
 #include "src/lookup.h"
+#include "src/objects.h"
+#include "src/property.h"
+#include "src/prototype.h"
+#include "src/transitions-inl.h"
+#include "src/type-feedback-vector-inl.h"
+#include "src/v8memory.h"
 
 namespace v8 {
 namespace internal {
@@ -54,43 +57,47 @@
 
 
 #define TYPE_CHECKER(type, instancetype)                                \
-  bool Object::Is##type() {                                             \
+  bool Object::Is##type() const {                                       \
   return Object::IsHeapObject() &&                                      \
       HeapObject::cast(this)->map()->instance_type() == instancetype;   \
   }
 
 
-#define CAST_ACCESSOR(type)                     \
-  type* type::cast(Object* object) {            \
-    SLOW_ASSERT(object->Is##type());            \
-    return reinterpret_cast<type*>(object);     \
+#define CAST_ACCESSOR(type)                       \
+  type* type::cast(Object* object) {              \
+    SLOW_DCHECK(object->Is##type());              \
+    return reinterpret_cast<type*>(object);       \
+  }                                               \
+  const type* type::cast(const Object* object) {  \
+    SLOW_DCHECK(object->Is##type());              \
+    return reinterpret_cast<const type*>(object); \
   }
 
 
-#define INT_ACCESSORS(holder, name, offset)                             \
-  int holder::name() { return READ_INT_FIELD(this, offset); }           \
+#define INT_ACCESSORS(holder, name, offset)                                   \
+  int holder::name() const { return READ_INT_FIELD(this, offset); }           \
   void holder::set_##name(int value) { WRITE_INT_FIELD(this, offset, value); }
 
 
-#define ACCESSORS(holder, name, type, offset)                           \
-  type* holder::name() { return type::cast(READ_FIELD(this, offset)); } \
-  void holder::set_##name(type* value, WriteBarrierMode mode) {         \
-    WRITE_FIELD(this, offset, value);                                   \
-    CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode);    \
+#define ACCESSORS(holder, name, type, offset)                                 \
+  type* holder::name() const { return type::cast(READ_FIELD(this, offset)); } \
+  void holder::set_##name(type* value, WriteBarrierMode mode) {               \
+    WRITE_FIELD(this, offset, value);                                         \
+    CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode);          \
   }
 
 
 // Getter that returns a tagged Smi and setter that writes a tagged Smi.
-#define ACCESSORS_TO_SMI(holder, name, offset)                          \
-  Smi* holder::name() { return Smi::cast(READ_FIELD(this, offset)); }   \
-  void holder::set_##name(Smi* value, WriteBarrierMode mode) {          \
-    WRITE_FIELD(this, offset, value);                                   \
+#define ACCESSORS_TO_SMI(holder, name, offset)                              \
+  Smi* holder::name() const { return Smi::cast(READ_FIELD(this, offset)); } \
+  void holder::set_##name(Smi* value, WriteBarrierMode mode) {              \
+    WRITE_FIELD(this, offset, value);                                       \
   }
 
 
 // Getter that returns a Smi as an int and writes an int as a Smi.
 #define SMI_ACCESSORS(holder, name, offset)             \
-  int holder::name() {                                  \
+  int holder::name() const {                            \
     Object* value = READ_FIELD(this, offset);           \
     return Smi::cast(value)->value();                   \
   }                                                     \
@@ -99,7 +106,7 @@
   }
 
 #define SYNCHRONIZED_SMI_ACCESSORS(holder, name, offset)    \
-  int holder::synchronized_##name() {                       \
+  int holder::synchronized_##name() const {                 \
     Object* value = ACQUIRE_READ_FIELD(this, offset);       \
     return Smi::cast(value)->value();                       \
   }                                                         \
@@ -108,7 +115,7 @@
   }
 
 #define NOBARRIER_SMI_ACCESSORS(holder, name, offset)          \
-  int holder::nobarrier_##name() {                             \
+  int holder::nobarrier_##name() const {                       \
     Object* value = NOBARRIER_READ_FIELD(this, offset);        \
     return Smi::cast(value)->value();                          \
   }                                                            \
@@ -117,13 +124,13 @@
   }
 
 #define BOOL_GETTER(holder, field, name, offset)           \
-  bool holder::name() {                                    \
+  bool holder::name() const {                              \
     return BooleanBit::get(field(), offset);               \
   }                                                        \
 
 
 #define BOOL_ACCESSORS(holder, field, name, offset)        \
-  bool holder::name() {                                    \
+  bool holder::name() const {                              \
     return BooleanBit::get(field(), offset);               \
   }                                                        \
   void holder::set_##name(bool value) {                    \
@@ -131,74 +138,75 @@
   }
 
 
-bool Object::IsFixedArrayBase() {
+bool Object::IsFixedArrayBase() const {
   return IsFixedArray() || IsFixedDoubleArray() || IsConstantPoolArray() ||
          IsFixedTypedArrayBase() || IsExternalArray();
 }
 
 
 // External objects are not extensible, so the map check is enough.
-bool Object::IsExternal() {
+bool Object::IsExternal() const {
   return Object::IsHeapObject() &&
       HeapObject::cast(this)->map() ==
       HeapObject::cast(this)->GetHeap()->external_map();
 }
 
 
-bool Object::IsAccessorInfo() {
+bool Object::IsAccessorInfo() const {
   return IsExecutableAccessorInfo() || IsDeclaredAccessorInfo();
 }
 
 
-bool Object::IsSmi() {
+bool Object::IsSmi() const {
   return HAS_SMI_TAG(this);
 }
 
 
-bool Object::IsHeapObject() {
+bool Object::IsHeapObject() const {
   return Internals::HasHeapObjectTag(this);
 }
 
 
 TYPE_CHECKER(HeapNumber, HEAP_NUMBER_TYPE)
+TYPE_CHECKER(MutableHeapNumber, MUTABLE_HEAP_NUMBER_TYPE)
 TYPE_CHECKER(Symbol, SYMBOL_TYPE)
 
 
-bool Object::IsString() {
+bool Object::IsString() const {
   return Object::IsHeapObject()
     && HeapObject::cast(this)->map()->instance_type() < FIRST_NONSTRING_TYPE;
 }
 
 
-bool Object::IsName() {
+bool Object::IsName() const {
   return IsString() || IsSymbol();
 }
 
 
-bool Object::IsUniqueName() {
+bool Object::IsUniqueName() const {
   return IsInternalizedString() || IsSymbol();
 }
 
 
-bool Object::IsSpecObject() {
+bool Object::IsSpecObject() const {
   return Object::IsHeapObject()
     && HeapObject::cast(this)->map()->instance_type() >= FIRST_SPEC_OBJECT_TYPE;
 }
 
 
-bool Object::IsSpecFunction() {
+bool Object::IsSpecFunction() const {
   if (!Object::IsHeapObject()) return false;
   InstanceType type = HeapObject::cast(this)->map()->instance_type();
   return type == JS_FUNCTION_TYPE || type == JS_FUNCTION_PROXY_TYPE;
 }
 
 
-bool Object::IsTemplateInfo() {
+bool Object::IsTemplateInfo() const {
   return IsObjectTemplateInfo() || IsFunctionTemplateInfo();
 }
 
 
-bool Object::IsInternalizedString() {
+bool Object::IsInternalizedString() const {
   if (!this->IsHeapObject()) return false;
   uint32_t type = HeapObject::cast(this)->map()->instance_type();
   STATIC_ASSERT(kNotInternalizedTag != 0);
@@ -207,52 +215,52 @@
 }
 
 
-bool Object::IsConsString() {
+bool Object::IsConsString() const {
   if (!IsString()) return false;
   return StringShape(String::cast(this)).IsCons();
 }
 
 
-bool Object::IsSlicedString() {
+bool Object::IsSlicedString() const {
   if (!IsString()) return false;
   return StringShape(String::cast(this)).IsSliced();
 }
 
 
-bool Object::IsSeqString() {
+bool Object::IsSeqString() const {
   if (!IsString()) return false;
   return StringShape(String::cast(this)).IsSequential();
 }
 
 
-bool Object::IsSeqOneByteString() {
+bool Object::IsSeqOneByteString() const {
   if (!IsString()) return false;
   return StringShape(String::cast(this)).IsSequential() &&
          String::cast(this)->IsOneByteRepresentation();
 }
 
 
-bool Object::IsSeqTwoByteString() {
+bool Object::IsSeqTwoByteString() const {
   if (!IsString()) return false;
   return StringShape(String::cast(this)).IsSequential() &&
          String::cast(this)->IsTwoByteRepresentation();
 }
 
 
-bool Object::IsExternalString() {
+bool Object::IsExternalString() const {
   if (!IsString()) return false;
   return StringShape(String::cast(this)).IsExternal();
 }
 
 
-bool Object::IsExternalAsciiString() {
+bool Object::IsExternalOneByteString() const {
   if (!IsString()) return false;
   return StringShape(String::cast(this)).IsExternal() &&
          String::cast(this)->IsOneByteRepresentation();
 }
 
 
-bool Object::IsExternalTwoByteString() {
+bool Object::IsExternalTwoByteString() const {
   if (!IsString()) return false;
   return StringShape(String::cast(this)).IsExternal() &&
          String::cast(this)->IsTwoByteRepresentation();
@@ -273,49 +281,66 @@
     return handle(Smi::FromInt(0), isolate);
   }
   if (!representation.IsDouble()) return object;
+  double value;
   if (object->IsUninitialized()) {
-    return isolate->factory()->NewHeapNumber(0);
+    value = 0;
+  } else if (object->IsMutableHeapNumber()) {
+    value = HeapNumber::cast(*object)->value();
+  } else {
+    value = object->Number();
   }
-  return isolate->factory()->NewHeapNumber(object->Number());
+  return isolate->factory()->NewHeapNumber(value, MUTABLE);
 }
 
 
-StringShape::StringShape(String* str)
+Handle<Object> Object::WrapForRead(Isolate* isolate,
+                                   Handle<Object> object,
+                                   Representation representation) {
+  DCHECK(!object->IsUninitialized());
+  if (!representation.IsDouble()) {
+    DCHECK(object->FitsRepresentation(representation));
+    return object;
+  }
+  return isolate->factory()->NewHeapNumber(HeapNumber::cast(*object)->value());
+}
+
+
+StringShape::StringShape(const String* str)
   : type_(str->map()->instance_type()) {
   set_valid();
-  ASSERT((type_ & kIsNotStringMask) == kStringTag);
+  DCHECK((type_ & kIsNotStringMask) == kStringTag);
 }
 
 
 StringShape::StringShape(Map* map)
   : type_(map->instance_type()) {
   set_valid();
-  ASSERT((type_ & kIsNotStringMask) == kStringTag);
+  DCHECK((type_ & kIsNotStringMask) == kStringTag);
 }
 
 
 StringShape::StringShape(InstanceType t)
   : type_(static_cast<uint32_t>(t)) {
   set_valid();
-  ASSERT((type_ & kIsNotStringMask) == kStringTag);
+  DCHECK((type_ & kIsNotStringMask) == kStringTag);
 }
 
 
 bool StringShape::IsInternalized() {
-  ASSERT(valid());
+  DCHECK(valid());
   STATIC_ASSERT(kNotInternalizedTag != 0);
   return (type_ & (kIsNotStringMask | kIsNotInternalizedMask)) ==
       (kStringTag | kInternalizedTag);
 }
 
 
-bool String::IsOneByteRepresentation() {
+bool String::IsOneByteRepresentation() const {
   uint32_t type = map()->instance_type();
   return (type & kStringEncodingMask) == kOneByteStringTag;
 }
 
 
-bool String::IsTwoByteRepresentation() {
+bool String::IsTwoByteRepresentation() const {
   uint32_t type = map()->instance_type();
   return (type & kStringEncodingMask) == kTwoByteStringTag;
 }
@@ -325,7 +350,7 @@
   uint32_t type = map()->instance_type();
   STATIC_ASSERT(kIsIndirectStringTag != 0);
   STATIC_ASSERT((kIsIndirectStringMask & kStringEncodingMask) == 0);
-  ASSERT(IsFlat());
+  DCHECK(IsFlat());
   switch (type & (kIsIndirectStringMask | kStringEncodingMask)) {
     case kOneByteStringTag:
       return true;
@@ -341,7 +366,7 @@
   uint32_t type = map()->instance_type();
   STATIC_ASSERT(kIsIndirectStringTag != 0);
   STATIC_ASSERT((kIsIndirectStringMask & kStringEncodingMask) == 0);
-  ASSERT(IsFlat());
+  DCHECK(IsFlat());
   switch (type & (kIsIndirectStringMask | kStringEncodingMask)) {
     case kOneByteStringTag:
       return false;
@@ -408,7 +433,7 @@
              Internals::kStringEncodingMask);
 
 
-bool StringShape::IsSequentialAscii() {
+bool StringShape::IsSequentialOneByte() {
   return full_representation_tag() == (kSeqStringTag | kOneByteStringTag);
 }
 
@@ -418,15 +443,15 @@
 }
 
 
-bool StringShape::IsExternalAscii() {
+bool StringShape::IsExternalOneByte() {
   return full_representation_tag() == (kExternalStringTag | kOneByteStringTag);
 }
 
 
 STATIC_ASSERT((kExternalStringTag | kOneByteStringTag) ==
-             Internals::kExternalAsciiRepresentationTag);
+              Internals::kExternalOneByteRepresentationTag);
 
-STATIC_ASSERT(v8::String::ASCII_ENCODING == kOneByteStringTag);
+STATIC_ASSERT(v8::String::ONE_BYTE_ENCODING == kOneByteStringTag);
 
 
 bool StringShape::IsExternalTwoByte() {
@@ -440,8 +465,8 @@
 STATIC_ASSERT(v8::String::TWO_BYTE_ENCODING == kTwoByteStringTag);
 
 uc32 FlatStringReader::Get(int index) {
-  ASSERT(0 <= index && index <= length_);
-  if (is_ascii_) {
+  DCHECK(0 <= index && index <= length_);
+  if (is_one_byte_) {
     return static_cast<const byte*>(start_)[index];
   } else {
     return static_cast<const uc16*>(start_)[index];
@@ -476,18 +501,18 @@
   explicit SequentialStringKey(Vector<const Char> string, uint32_t seed)
       : string_(string), hash_field_(0), seed_(seed) { }
 
-  virtual uint32_t Hash() V8_OVERRIDE {
+  virtual uint32_t Hash() OVERRIDE {
     hash_field_ = StringHasher::HashSequentialString<Char>(string_.start(),
                                                            string_.length(),
                                                            seed_);
 
     uint32_t result = hash_field_ >> String::kHashShift;
-    ASSERT(result != 0);  // Ensure that the hash value of 0 is never computed.
+    DCHECK(result != 0);  // Ensure that the hash value of 0 is never computed.
     return result;
   }
 
 
-  virtual uint32_t HashForObject(Object* other) V8_OVERRIDE {
+  virtual uint32_t HashForObject(Object* other) OVERRIDE {
     return String::cast(other)->Hash();
   }
 
@@ -502,55 +527,41 @@
   OneByteStringKey(Vector<const uint8_t> str, uint32_t seed)
       : SequentialStringKey<uint8_t>(str, seed) { }
 
-  virtual bool IsMatch(Object* string) V8_OVERRIDE {
+  virtual bool IsMatch(Object* string) OVERRIDE {
     return String::cast(string)->IsOneByteEqualTo(string_);
   }
 
-  virtual Handle<Object> AsHandle(Isolate* isolate) V8_OVERRIDE;
+  virtual Handle<Object> AsHandle(Isolate* isolate) OVERRIDE;
 };
 
 
-template<class Char>
-class SubStringKey : public HashTableKey {
+class SeqOneByteSubStringKey : public HashTableKey {
  public:
-  SubStringKey(Handle<String> string, int from, int length)
+  SeqOneByteSubStringKey(Handle<SeqOneByteString> string, int from, int length)
       : string_(string), from_(from), length_(length) {
-    if (string_->IsSlicedString()) {
-      string_ = Handle<String>(Unslice(*string_, &from_));
-    }
-    ASSERT(string_->IsSeqString() || string->IsExternalString());
+    DCHECK(string_->IsSeqOneByteString());
   }
 
-  virtual uint32_t Hash() V8_OVERRIDE {
-    ASSERT(length_ >= 0);
-    ASSERT(from_ + length_ <= string_->length());
-    const Char* chars = GetChars() + from_;
+  virtual uint32_t Hash() OVERRIDE {
+    DCHECK(length_ >= 0);
+    DCHECK(from_ + length_ <= string_->length());
+    const uint8_t* chars = string_->GetChars() + from_;
     hash_field_ = StringHasher::HashSequentialString(
         chars, length_, string_->GetHeap()->HashSeed());
     uint32_t result = hash_field_ >> String::kHashShift;
-    ASSERT(result != 0);  // Ensure that the hash value of 0 is never computed.
+    DCHECK(result != 0);  // Ensure that the hash value of 0 is never computed.
     return result;
   }
 
-  virtual uint32_t HashForObject(Object* other) V8_OVERRIDE {
+  virtual uint32_t HashForObject(Object* other) OVERRIDE {
     return String::cast(other)->Hash();
   }
 
-  virtual bool IsMatch(Object* string) V8_OVERRIDE;
-  virtual Handle<Object> AsHandle(Isolate* isolate) V8_OVERRIDE;
+  virtual bool IsMatch(Object* string) OVERRIDE;
+  virtual Handle<Object> AsHandle(Isolate* isolate) OVERRIDE;
 
  private:
-  const Char* GetChars();
-  String* Unslice(String* string, int* offset) {
-    while (string->IsSlicedString()) {
-      SlicedString* sliced = SlicedString::cast(string);
-      *offset += sliced->offset();
-      string = sliced->parent();
-    }
-    return string;
-  }
-
-  Handle<String> string_;
+  Handle<SeqOneByteString> string_;
   int from_;
   int length_;
   uint32_t hash_field_;
@@ -562,11 +573,11 @@
   explicit TwoByteStringKey(Vector<const uc16> str, uint32_t seed)
       : SequentialStringKey<uc16>(str, seed) { }
 
-  virtual bool IsMatch(Object* string) V8_OVERRIDE {
+  virtual bool IsMatch(Object* string) OVERRIDE {
     return String::cast(string)->IsTwoByteEqualTo(string_);
   }
 
-  virtual Handle<Object> AsHandle(Isolate* isolate) V8_OVERRIDE;
+  virtual Handle<Object> AsHandle(Isolate* isolate) OVERRIDE;
 };
 
 
@@ -576,23 +587,23 @@
   explicit Utf8StringKey(Vector<const char> string, uint32_t seed)
       : string_(string), hash_field_(0), seed_(seed) { }
 
-  virtual bool IsMatch(Object* string) V8_OVERRIDE {
+  virtual bool IsMatch(Object* string) OVERRIDE {
     return String::cast(string)->IsUtf8EqualTo(string_);
   }
 
-  virtual uint32_t Hash() V8_OVERRIDE {
+  virtual uint32_t Hash() OVERRIDE {
     if (hash_field_ != 0) return hash_field_ >> String::kHashShift;
     hash_field_ = StringHasher::ComputeUtf8Hash(string_, seed_, &chars_);
     uint32_t result = hash_field_ >> String::kHashShift;
-    ASSERT(result != 0);  // Ensure that the hash value of 0 is never computed.
+    DCHECK(result != 0);  // Ensure that the hash value of 0 is never computed.
     return result;
   }
 
-  virtual uint32_t HashForObject(Object* other) V8_OVERRIDE {
+  virtual uint32_t HashForObject(Object* other) OVERRIDE {
     return String::cast(other)->Hash();
   }
 
-  virtual Handle<Object> AsHandle(Isolate* isolate) V8_OVERRIDE {
+  virtual Handle<Object> AsHandle(Isolate* isolate) OVERRIDE {
     if (hash_field_ == 0) Hash();
     return isolate->factory()->NewInternalizedStringFromUtf8(
         string_, chars_, hash_field_);
@@ -605,7 +616,7 @@
 };
 
 
-bool Object::IsNumber() {
+bool Object::IsNumber() const {
   return IsSmi() || IsHeapNumber();
 }
 
@@ -614,14 +625,14 @@
 TYPE_CHECKER(FreeSpace, FREE_SPACE_TYPE)
 
 
-bool Object::IsFiller() {
+bool Object::IsFiller() const {
   if (!Object::IsHeapObject()) return false;
   InstanceType instance_type = HeapObject::cast(this)->map()->instance_type();
   return instance_type == FREE_SPACE_TYPE || instance_type == FILLER_TYPE;
 }
 
 
-bool Object::IsExternalArray() {
+bool Object::IsExternalArray() const {
   if (!Object::IsHeapObject())
     return false;
   InstanceType instance_type =
@@ -639,7 +650,7 @@
 #undef TYPED_ARRAY_TYPE_CHECKER
 
 
-bool Object::IsFixedTypedArrayBase() {
+bool Object::IsFixedTypedArrayBase() const {
   if (!Object::IsHeapObject()) return false;
 
   InstanceType instance_type =
@@ -649,21 +660,21 @@
 }
 
 
-bool Object::IsJSReceiver() {
+bool Object::IsJSReceiver() const {
   STATIC_ASSERT(LAST_JS_RECEIVER_TYPE == LAST_TYPE);
   return IsHeapObject() &&
       HeapObject::cast(this)->map()->instance_type() >= FIRST_JS_RECEIVER_TYPE;
 }
 
 
-bool Object::IsJSObject() {
+bool Object::IsJSObject() const {
   STATIC_ASSERT(LAST_JS_OBJECT_TYPE == LAST_TYPE);
   return IsHeapObject() &&
       HeapObject::cast(this)->map()->instance_type() >= FIRST_JS_OBJECT_TYPE;
 }
 
 
-bool Object::IsJSProxy() {
+bool Object::IsJSProxy() const {
   if (!Object::IsHeapObject()) return false;
   return  HeapObject::cast(this)->map()->IsJSProxyMap();
 }
@@ -683,22 +694,25 @@
 TYPE_CHECKER(ConstantPoolArray, CONSTANT_POOL_ARRAY_TYPE)
 
 
-bool Object::IsJSWeakCollection() {
+bool Object::IsJSWeakCollection() const {
   return IsJSWeakMap() || IsJSWeakSet();
 }
 
 
-bool Object::IsDescriptorArray() {
+bool Object::IsDescriptorArray() const {
   return IsFixedArray();
 }
 
 
-bool Object::IsTransitionArray() {
+bool Object::IsTransitionArray() const {
   return IsFixedArray();
 }
 
 
-bool Object::IsDeoptimizationInputData() {
+bool Object::IsTypeFeedbackVector() const { return IsFixedArray(); }
+
+
+bool Object::IsDeoptimizationInputData() const {
   // Must be a fixed array.
   if (!IsFixedArray()) return false;
 
@@ -710,12 +724,11 @@
   if (length == 0) return true;
 
   length -= DeoptimizationInputData::kFirstDeoptEntryIndex;
-  return length >= 0 &&
-      length % DeoptimizationInputData::kDeoptEntrySize == 0;
+  return length >= 0 && length % DeoptimizationInputData::kDeoptEntrySize == 0;
 }
 
 
-bool Object::IsDeoptimizationOutputData() {
+bool Object::IsDeoptimizationOutputData() const {
   if (!IsFixedArray()) return false;
   // There's actually no way to see the difference between a fixed array and
   // a deoptimization data array.  Since this is used for asserts we can check
@@ -725,7 +738,7 @@
 }
 
 
-bool Object::IsDependentCode() {
+bool Object::IsDependentCode() const {
   if (!IsFixedArray()) return false;
   // There's actually no way to see the difference between a fixed array and
   // a dependent codes array.
@@ -733,7 +746,7 @@
 }
 
 
-bool Object::IsContext() {
+bool Object::IsContext() const {
   if (!Object::IsHeapObject()) return false;
   Map* map = HeapObject::cast(this)->map();
   Heap* heap = map->GetHeap();
@@ -747,14 +760,14 @@
 }
 
 
-bool Object::IsNativeContext() {
+bool Object::IsNativeContext() const {
   return Object::IsHeapObject() &&
       HeapObject::cast(this)->map() ==
       HeapObject::cast(this)->GetHeap()->native_context_map();
 }
 
 
-bool Object::IsScopeInfo() {
+bool Object::IsScopeInfo() const {
   return Object::IsHeapObject() &&
       HeapObject::cast(this)->map() ==
       HeapObject::cast(this)->GetHeap()->scope_info_map();
@@ -781,7 +794,7 @@
 TYPE_CHECKER(JSMessageObject, JS_MESSAGE_OBJECT_TYPE)
 
 
-bool Object::IsStringWrapper() {
+bool Object::IsStringWrapper() const {
   return IsJSValue() && JSValue::cast(this)->value()->IsString();
 }
 
@@ -789,7 +802,7 @@
 TYPE_CHECKER(Foreign, FOREIGN_TYPE)
 
 
-bool Object::IsBoolean() {
+bool Object::IsBoolean() const {
   return IsOddball() &&
       ((Oddball::cast(this)->kind() & Oddball::kNotBooleanMask) == 0);
 }
@@ -801,7 +814,7 @@
 TYPE_CHECKER(JSDataView, JS_DATA_VIEW_TYPE)
 
 
-bool Object::IsJSArrayBufferView() {
+bool Object::IsJSArrayBufferView() const {
   return IsJSDataView() || IsJSTypedArray();
 }
 
@@ -814,27 +827,47 @@
 }
 
 
-bool Object::IsHashTable() {
+bool Object::IsHashTable() const {
   return Object::IsHeapObject() &&
       HeapObject::cast(this)->map() ==
       HeapObject::cast(this)->GetHeap()->hash_table_map();
 }
 
 
-bool Object::IsDictionary() {
+bool Object::IsWeakHashTable() const {
+  return IsHashTable();
+}
+
+
+bool Object::IsDictionary() const {
   return IsHashTable() &&
       this != HeapObject::cast(this)->GetHeap()->string_table();
 }
 
 
-bool Object::IsStringTable() {
+bool Object::IsNameDictionary() const {
+  return IsDictionary();
+}
+
+
+bool Object::IsSeededNumberDictionary() const {
+  return IsDictionary();
+}
+
+
+bool Object::IsUnseededNumberDictionary() const {
+  return IsDictionary();
+}
+
+
+bool Object::IsStringTable() const {
   return IsHashTable();
 }
 
 
-bool Object::IsJSFunctionResultCache() {
+bool Object::IsJSFunctionResultCache() const {
   if (!IsFixedArray()) return false;
-  FixedArray* self = FixedArray::cast(this);
+  const FixedArray* self = FixedArray::cast(this);
   int length = self->length();
   if (length < JSFunctionResultCache::kEntriesIndex) return false;
   if ((length - JSFunctionResultCache::kEntriesIndex)
@@ -843,7 +876,10 @@
   }
 #ifdef VERIFY_HEAP
   if (FLAG_verify_heap) {
-    reinterpret_cast<JSFunctionResultCache*>(this)->
+    // TODO(svenpanne) We use const_cast here and below to break our dependency
+    // cycle between the predicates and the verifiers. This can be removed when
+    // the verifiers are const-correct, too.
+    reinterpret_cast<JSFunctionResultCache*>(const_cast<Object*>(this))->
         JSFunctionResultCacheVerify();
   }
 #endif
@@ -851,7 +887,7 @@
 }
 
 
-bool Object::IsNormalizedMapCache() {
+bool Object::IsNormalizedMapCache() const {
   return NormalizedMapCache::IsNormalizedMapCache(this);
 }
 
@@ -861,68 +897,79 @@
 }
 
 
-bool NormalizedMapCache::IsNormalizedMapCache(Object* obj) {
+bool NormalizedMapCache::IsNormalizedMapCache(const Object* obj) {
   if (!obj->IsFixedArray()) return false;
   if (FixedArray::cast(obj)->length() != NormalizedMapCache::kEntries) {
     return false;
   }
 #ifdef VERIFY_HEAP
   if (FLAG_verify_heap) {
-    reinterpret_cast<NormalizedMapCache*>(obj)->NormalizedMapCacheVerify();
+    reinterpret_cast<NormalizedMapCache*>(const_cast<Object*>(obj))->
+        NormalizedMapCacheVerify();
   }
 #endif
   return true;
 }
 
 
-bool Object::IsCompilationCacheTable() {
+bool Object::IsCompilationCacheTable() const {
   return IsHashTable();
 }
 
 
-bool Object::IsCodeCacheHashTable() {
+bool Object::IsCodeCacheHashTable() const {
   return IsHashTable();
 }
 
 
-bool Object::IsPolymorphicCodeCacheHashTable() {
+bool Object::IsPolymorphicCodeCacheHashTable() const {
   return IsHashTable();
 }
 
 
-bool Object::IsMapCache() {
+bool Object::IsMapCache() const {
   return IsHashTable();
 }
 
 
-bool Object::IsObjectHashTable() {
+bool Object::IsObjectHashTable() const {
   return IsHashTable();
 }
 
 
-bool Object::IsOrderedHashTable() {
+bool Object::IsOrderedHashTable() const {
   return IsHeapObject() &&
       HeapObject::cast(this)->map() ==
       HeapObject::cast(this)->GetHeap()->ordered_hash_table_map();
 }
 
 
-bool Object::IsPrimitive() {
+bool Object::IsOrderedHashSet() const {
+  return IsOrderedHashTable();
+}
+
+
+bool Object::IsOrderedHashMap() const {
+  return IsOrderedHashTable();
+}
+
+
+bool Object::IsPrimitive() const {
   return IsOddball() || IsNumber() || IsString();
 }
 
 
-bool Object::IsJSGlobalProxy() {
+bool Object::IsJSGlobalProxy() const {
   bool result = IsHeapObject() &&
                 (HeapObject::cast(this)->map()->instance_type() ==
                  JS_GLOBAL_PROXY_TYPE);
-  ASSERT(!result ||
+  DCHECK(!result ||
          HeapObject::cast(this)->map()->is_access_check_needed());
   return result;
 }
 
 
-bool Object::IsGlobalObject() {
+bool Object::IsGlobalObject() const {
   if (!IsHeapObject()) return false;
 
   InstanceType type = HeapObject::cast(this)->map()->instance_type();
@@ -935,25 +982,24 @@
 TYPE_CHECKER(JSBuiltinsObject, JS_BUILTINS_OBJECT_TYPE)
 
 
-bool Object::IsUndetectableObject() {
+bool Object::IsUndetectableObject() const {
   return IsHeapObject()
     && HeapObject::cast(this)->map()->is_undetectable();
 }
 
 
-bool Object::IsAccessCheckNeeded() {
+bool Object::IsAccessCheckNeeded() const {
   if (!IsHeapObject()) return false;
   if (IsJSGlobalProxy()) {
-    JSGlobalProxy* proxy = JSGlobalProxy::cast(this);
-    GlobalObject* global =
-        proxy->GetIsolate()->context()->global_object();
+    const JSGlobalProxy* proxy = JSGlobalProxy::cast(this);
+    GlobalObject* global = proxy->GetIsolate()->context()->global_object();
     return proxy->IsDetachedFrom(global);
   }
   return HeapObject::cast(this)->map()->is_access_check_needed();
 }
 
 
-bool Object::IsStruct() {
+bool Object::IsStruct() const {
   if (!IsHeapObject()) return false;
   switch (HeapObject::cast(this)->map()->instance_type()) {
 #define MAKE_STRUCT_CASE(NAME, Name, name) case NAME##_TYPE: return true;
@@ -964,68 +1010,74 @@
 }
 
 
-#define MAKE_STRUCT_PREDICATE(NAME, Name, name)                  \
-  bool Object::Is##Name() {                                      \
-    return Object::IsHeapObject()                                \
+#define MAKE_STRUCT_PREDICATE(NAME, Name, name)                         \
+  bool Object::Is##Name() const {                                       \
+    return Object::IsHeapObject()                                       \
       && HeapObject::cast(this)->map()->instance_type() == NAME##_TYPE; \
   }
   STRUCT_LIST(MAKE_STRUCT_PREDICATE)
 #undef MAKE_STRUCT_PREDICATE
 
 
-bool Object::IsUndefined() {
+bool Object::IsUndefined() const {
   return IsOddball() && Oddball::cast(this)->kind() == Oddball::kUndefined;
 }
 
 
-bool Object::IsNull() {
+bool Object::IsNull() const {
   return IsOddball() && Oddball::cast(this)->kind() == Oddball::kNull;
 }
 
 
-bool Object::IsTheHole() {
+bool Object::IsTheHole() const {
   return IsOddball() && Oddball::cast(this)->kind() == Oddball::kTheHole;
 }
 
 
-bool Object::IsException() {
+bool Object::IsException() const {
   return IsOddball() && Oddball::cast(this)->kind() == Oddball::kException;
 }
 
 
-bool Object::IsUninitialized() {
+bool Object::IsUninitialized() const {
   return IsOddball() && Oddball::cast(this)->kind() == Oddball::kUninitialized;
 }
 
 
-bool Object::IsTrue() {
+bool Object::IsTrue() const {
   return IsOddball() && Oddball::cast(this)->kind() == Oddball::kTrue;
 }
 
 
-bool Object::IsFalse() {
+bool Object::IsFalse() const {
   return IsOddball() && Oddball::cast(this)->kind() == Oddball::kFalse;
 }
 
 
-bool Object::IsArgumentsMarker() {
+bool Object::IsArgumentsMarker() const {
   return IsOddball() && Oddball::cast(this)->kind() == Oddball::kArgumentMarker;
 }
 
 
 double Object::Number() {
-  ASSERT(IsNumber());
+  DCHECK(IsNumber());
   return IsSmi()
     ? static_cast<double>(reinterpret_cast<Smi*>(this)->value())
     : reinterpret_cast<HeapNumber*>(this)->value();
 }
 
 
-bool Object::IsNaN() {
+bool Object::IsNaN() const {
   return this->IsHeapNumber() && std::isnan(HeapNumber::cast(this)->value());
 }
 
 
+bool Object::IsMinusZero() const {
+  return this->IsHeapNumber() &&
+         i::IsMinusZero(HeapNumber::cast(this)->value());
+}
+
+
 MaybeHandle<Smi> Object::ToSmi(Isolate* isolate, Handle<Object> object) {
   if (object->IsSmi()) return Handle<Smi>::cast(object);
   if (object->IsHeapNumber()) {
@@ -1062,9 +1114,9 @@
                                        Handle<Object> object,
                                        uint32_t index) {
   // GetElement can trigger a getter which can cause allocation.
-  // This was not always the case. This ASSERT is here to catch
+  // This was not always the case. This DCHECK is here to catch
   // leftover incorrect uses.
-  ASSERT(AllowHeapAllocation::IsAllowed());
+  DCHECK(AllowHeapAllocation::IsAllowed());
   return Object::GetElementWithReceiver(isolate, object, object, index);
 }
 
@@ -1082,10 +1134,10 @@
                                         Handle<Object> object,
                                         const char* name) {
   Handle<String> str = isolate->factory()->InternalizeUtf8String(name);
-  ASSERT(!str.is_null());
+  DCHECK(!str.is_null());
 #ifdef DEBUG
   uint32_t index;  // Assert that the name is not an array index.
-  ASSERT(!str->AsArrayIndex(&index));
+  DCHECK(!str->AsArrayIndex(&index));
 #endif  // DEBUG
   return GetProperty(object, str);
 }
@@ -1106,12 +1158,12 @@
                                                    StrictMode strict_mode) {
   Isolate* isolate = proxy->GetIsolate();
   Handle<String> name = isolate->factory()->Uint32ToString(index);
-  return SetPropertyWithHandler(
-      proxy, receiver, name, value, NONE, strict_mode);
+  return SetPropertyWithHandler(proxy, receiver, name, value, strict_mode);
 }
 
 
-bool JSProxy::HasElementWithHandler(Handle<JSProxy> proxy, uint32_t index) {
+Maybe<bool> JSProxy::HasElementWithHandler(Handle<JSProxy> proxy,
+                                           uint32_t index) {
   Isolate* isolate = proxy->GetIsolate();
   Handle<String> name = isolate->factory()->Uint32ToString(index);
   return HasPropertyWithHandler(proxy, name);
@@ -1121,16 +1173,19 @@
 #define FIELD_ADDR(p, offset) \
   (reinterpret_cast<byte*>(p) + offset - kHeapObjectTag)
 
+#define FIELD_ADDR_CONST(p, offset) \
+  (reinterpret_cast<const byte*>(p) + offset - kHeapObjectTag)
+
 #define READ_FIELD(p, offset) \
-  (*reinterpret_cast<Object**>(FIELD_ADDR(p, offset)))
+  (*reinterpret_cast<Object* const*>(FIELD_ADDR_CONST(p, offset)))
 
 #define ACQUIRE_READ_FIELD(p, offset)           \
   reinterpret_cast<Object*>(base::Acquire_Load( \
-      reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset))))
+      reinterpret_cast<const base::AtomicWord*>(FIELD_ADDR_CONST(p, offset))))
 
 #define NOBARRIER_READ_FIELD(p, offset)           \
   reinterpret_cast<Object*>(base::NoBarrier_Load( \
-      reinterpret_cast<base::AtomicWord*>(FIELD_ADDR(p, offset))))
+      reinterpret_cast<const base::AtomicWord*>(FIELD_ADDR_CONST(p, offset))))
 
 #define WRITE_FIELD(p, offset, value) \
   (*reinterpret_cast<Object**>(FIELD_ADDR(p, offset)) = value)
@@ -1163,17 +1218,19 @@
 
 #ifndef V8_TARGET_ARCH_MIPS
   #define READ_DOUBLE_FIELD(p, offset) \
-    (*reinterpret_cast<double*>(FIELD_ADDR(p, offset)))
+    (*reinterpret_cast<const double*>(FIELD_ADDR_CONST(p, offset)))
 #else  // V8_TARGET_ARCH_MIPS
   // Prevent gcc from using load-double (mips ldc1) on (possibly)
   // non-64-bit aligned HeapNumber::value.
-  static inline double read_double_field(void* p, int offset) {
+  static inline double read_double_field(const void* p, int offset) {
     union conversion {
       double d;
       uint32_t u[2];
     } c;
-    c.u[0] = (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset)));
-    c.u[1] = (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset + 4)));
+    c.u[0] = (*reinterpret_cast<const uint32_t*>(
+        FIELD_ADDR_CONST(p, offset)));
+    c.u[1] = (*reinterpret_cast<const uint32_t*>(
+        FIELD_ADDR_CONST(p, offset + 4)));
     return c.d;
   }
   #define READ_DOUBLE_FIELD(p, offset) read_double_field(p, offset)
@@ -1201,43 +1258,43 @@
 
 
 #define READ_INT_FIELD(p, offset) \
-  (*reinterpret_cast<int*>(FIELD_ADDR(p, offset)))
+  (*reinterpret_cast<const int*>(FIELD_ADDR_CONST(p, offset)))
 
 #define WRITE_INT_FIELD(p, offset, value) \
   (*reinterpret_cast<int*>(FIELD_ADDR(p, offset)) = value)
 
 #define READ_INTPTR_FIELD(p, offset) \
-  (*reinterpret_cast<intptr_t*>(FIELD_ADDR(p, offset)))
+  (*reinterpret_cast<const intptr_t*>(FIELD_ADDR_CONST(p, offset)))
 
 #define WRITE_INTPTR_FIELD(p, offset, value) \
   (*reinterpret_cast<intptr_t*>(FIELD_ADDR(p, offset)) = value)
 
 #define READ_UINT32_FIELD(p, offset) \
-  (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset)))
+  (*reinterpret_cast<const uint32_t*>(FIELD_ADDR_CONST(p, offset)))
 
 #define WRITE_UINT32_FIELD(p, offset, value) \
   (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset)) = value)
 
 #define READ_INT32_FIELD(p, offset) \
-  (*reinterpret_cast<int32_t*>(FIELD_ADDR(p, offset)))
+  (*reinterpret_cast<const int32_t*>(FIELD_ADDR_CONST(p, offset)))
 
 #define WRITE_INT32_FIELD(p, offset, value) \
   (*reinterpret_cast<int32_t*>(FIELD_ADDR(p, offset)) = value)
 
 #define READ_INT64_FIELD(p, offset) \
-  (*reinterpret_cast<int64_t*>(FIELD_ADDR(p, offset)))
+  (*reinterpret_cast<const int64_t*>(FIELD_ADDR_CONST(p, offset)))
 
 #define WRITE_INT64_FIELD(p, offset, value) \
   (*reinterpret_cast<int64_t*>(FIELD_ADDR(p, offset)) = value)
 
 #define READ_SHORT_FIELD(p, offset) \
-  (*reinterpret_cast<uint16_t*>(FIELD_ADDR(p, offset)))
+  (*reinterpret_cast<const uint16_t*>(FIELD_ADDR_CONST(p, offset)))
 
 #define WRITE_SHORT_FIELD(p, offset, value) \
   (*reinterpret_cast<uint16_t*>(FIELD_ADDR(p, offset)) = value)
 
 #define READ_BYTE_FIELD(p, offset) \
-  (*reinterpret_cast<byte*>(FIELD_ADDR(p, offset)))
+  (*reinterpret_cast<const byte*>(FIELD_ADDR_CONST(p, offset)))
 
 #define NOBARRIER_READ_BYTE_FIELD(p, offset) \
   static_cast<byte>(base::NoBarrier_Load(    \
@@ -1252,23 +1309,23 @@
       static_cast<base::Atomic8>(value));
 
 Object** HeapObject::RawField(HeapObject* obj, int byte_offset) {
-  return &READ_FIELD(obj, byte_offset);
+  return reinterpret_cast<Object**>(FIELD_ADDR(obj, byte_offset));
 }
 
 
-int Smi::value() {
+int Smi::value() const {
   return Internals::SmiValue(this);
 }
 
 
 Smi* Smi::FromInt(int value) {
-  ASSERT(Smi::IsValid(value));
+  DCHECK(Smi::IsValid(value));
   return reinterpret_cast<Smi*>(Internals::IntToSmi(value));
 }
 
 
 Smi* Smi::FromIntptr(intptr_t value) {
-  ASSERT(Smi::IsValid(value));
+  DCHECK(Smi::IsValid(value));
   int smi_shift_bits = kSmiTagSize + kSmiShiftSize;
   return reinterpret_cast<Smi*>((value << smi_shift_bits) | kSmiTag);
 }
@@ -1276,12 +1333,12 @@
 
 bool Smi::IsValid(intptr_t value) {
   bool result = Internals::IsValidSmi(value);
-  ASSERT_EQ(result, value >= kMinValue && value <= kMaxValue);
+  DCHECK_EQ(result, value >= kMinValue && value <= kMaxValue);
   return result;
 }
 
 
-MapWord MapWord::FromMap(Map* map) {
+MapWord MapWord::FromMap(const Map* map) {
   return MapWord(reinterpret_cast<uintptr_t>(map));
 }
 
@@ -1303,7 +1360,7 @@
 
 
 HeapObject* MapWord::ToForwardingAddress() {
-  ASSERT(IsForwardingAddress());
+  DCHECK(IsForwardingAddress());
   return HeapObject::FromAddress(reinterpret_cast<Address>(value_));
 }
 
@@ -1319,20 +1376,20 @@
 #endif
 
 
-Heap* HeapObject::GetHeap() {
+Heap* HeapObject::GetHeap() const {
   Heap* heap =
-      MemoryChunk::FromAddress(reinterpret_cast<Address>(this))->heap();
-  SLOW_ASSERT(heap != NULL);
+      MemoryChunk::FromAddress(reinterpret_cast<const byte*>(this))->heap();
+  SLOW_DCHECK(heap != NULL);
   return heap;
 }
 
 
-Isolate* HeapObject::GetIsolate() {
+Isolate* HeapObject::GetIsolate() const {
   return GetHeap()->isolate();
 }
 
 
-Map* HeapObject::map() {
+Map* HeapObject::map() const {
 #ifdef DEBUG
   // Clear mark potentially added by PathTracer.
   uintptr_t raw_value =
@@ -1380,7 +1437,7 @@
 }
 
 
-MapWord HeapObject::map_word() {
+MapWord HeapObject::map_word() const {
   return MapWord(
       reinterpret_cast<uintptr_t>(NOBARRIER_READ_FIELD(this, kMapOffset)));
 }
@@ -1392,7 +1449,7 @@
 }
 
 
-MapWord HeapObject::synchronized_map_word() {
+MapWord HeapObject::synchronized_map_word() const {
   return MapWord(
       reinterpret_cast<uintptr_t>(ACQUIRE_READ_FIELD(this, kMapOffset)));
 }
@@ -1405,7 +1462,7 @@
 
 
 HeapObject* HeapObject::FromAddress(Address address) {
-  ASSERT_TAG_ALIGNED(address);
+  DCHECK_TAG_ALIGNED(address);
   return reinterpret_cast<HeapObject*>(address + kHeapObjectTag);
 }
 
@@ -1420,6 +1477,25 @@
 }
 
 
+bool HeapObject::MayContainRawValues() {
+  InstanceType type = map()->instance_type();
+  if (type <= LAST_NAME_TYPE) {
+    if (type == SYMBOL_TYPE) {
+      return false;
+    }
+    DCHECK(type < FIRST_NONSTRING_TYPE);
+    // There are four string representations: sequential strings, external
+    // strings, cons strings, and sliced strings.
+    // Only the former two contain raw values and no heap pointers (besides the
+    // map-word).
+    return ((type & kIsIndirectStringMask) != kIsIndirectStringTag);
+  }
+  // The ConstantPoolArray contains heap pointers, but also raw values.
+  if (type == CONSTANT_POOL_ARRAY_TYPE) return true;
+  return (type <= LAST_DATA_TYPE);
+}
+
+
 void HeapObject::IteratePointers(ObjectVisitor* v, int start, int end) {
   v->VisitPointers(reinterpret_cast<Object**>(FIELD_ADDR(this, start)),
                    reinterpret_cast<Object**>(FIELD_ADDR(this, end)));
@@ -1436,7 +1512,7 @@
 }
 
 
-double HeapNumber::value() {
+double HeapNumber::value() const {
   return READ_DOUBLE_FIELD(this, kValueOffset);
 }
 
@@ -1476,14 +1552,14 @@
 }
 
 
-FixedArrayBase* JSObject::elements() {
+FixedArrayBase* JSObject::elements() const {
   Object* array = READ_FIELD(this, kElementsOffset);
   return static_cast<FixedArrayBase*>(array);
 }
 
 
 void JSObject::ValidateElements(Handle<JSObject> object) {
-#ifdef ENABLE_SLOW_ASSERTS
+#ifdef ENABLE_SLOW_DCHECKS
   if (FLAG_enable_slow_asserts) {
     ElementsAccessor* accessor = object->GetElementsAccessor();
     accessor->Validate(object);
@@ -1504,7 +1580,7 @@
 
 
 void AllocationSite::MarkZombie() {
-  ASSERT(!IsZombie());
+  DCHECK(!IsZombie());
   Initialize();
   set_pretenure_decision(kZombie);
 }
@@ -1564,10 +1640,10 @@
   int value = pretenure_data()->value();
   // Verify that we can count more mementos than we can possibly find in one
   // new space collection.
-  ASSERT((GetHeap()->MaxSemiSpaceSize() /
+  DCHECK((GetHeap()->MaxSemiSpaceSize() /
           (StaticVisitorBase::kMinObjectSizeInWords * kPointerSize +
            AllocationMemento::kSize)) < MementoFoundCountBits::kMax);
-  ASSERT(count < MementoFoundCountBits::kMax);
+  DCHECK(count < MementoFoundCountBits::kMax);
   set_pretenure_data(
       Smi::FromInt(MementoFoundCountBits::update(value, count)),
       SKIP_WRITE_BARRIER);
@@ -1583,7 +1659,7 @@
 
 
 inline void AllocationSite::IncrementMementoCreateCount() {
-  ASSERT(FLAG_allocation_site_pretenuring);
+  DCHECK(FLAG_allocation_site_pretenuring);
   int value = memento_create_count();
   set_memento_create_count(value + 1);
 }
@@ -1667,7 +1743,7 @@
   ElementsKind target_kind = current_kind;
   {
     DisallowHeapAllocation no_allocation;
-    ASSERT(mode != ALLOW_COPIED_DOUBLE_ELEMENTS);
+    DCHECK(mode != ALLOW_COPIED_DOUBLE_ELEMENTS);
     bool is_holey = IsFastHoleyElementsKind(current_kind);
     if (current_kind == FAST_HOLEY_ELEMENTS) return;
     Heap* heap = object->GetHeap();
@@ -1707,7 +1783,7 @@
                                         EnsureElementsMode mode) {
   Heap* heap = object->GetHeap();
   if (elements->map() != heap->fixed_double_array_map()) {
-    ASSERT(elements->map() == heap->fixed_array_map() ||
+    DCHECK(elements->map() == heap->fixed_array_map() ||
            elements->map() == heap->fixed_cow_array_map());
     if (mode == ALLOW_COPIED_DOUBLE_ELEMENTS) {
       mode = DONT_ALLOW_DOUBLE_ELEMENTS;
@@ -1718,7 +1794,7 @@
     return;
   }
 
-  ASSERT(mode == ALLOW_COPIED_DOUBLE_ELEMENTS);
+  DCHECK(mode == ALLOW_COPIED_DOUBLE_ELEMENTS);
   if (object->GetElementsKind() == FAST_HOLEY_SMI_ELEMENTS) {
     TransitionElementsKind(object, FAST_HOLEY_DOUBLE_ELEMENTS);
   } else if (object->GetElementsKind() == FAST_SMI_ELEMENTS) {
@@ -1739,11 +1815,11 @@
                                  Handle<Map> new_map,
                                  Handle<FixedArrayBase> value) {
   JSObject::MigrateToMap(object, new_map);
-  ASSERT((object->map()->has_fast_smi_or_object_elements() ||
+  DCHECK((object->map()->has_fast_smi_or_object_elements() ||
           (*value == object->GetHeap()->empty_fixed_array())) ==
          (value->map() == object->GetHeap()->fixed_array_map() ||
           value->map() == object->GetHeap()->fixed_cow_array_map()));
-  ASSERT((*value == object->GetHeap()->empty_fixed_array()) ||
+  DCHECK((*value == object->GetHeap()->empty_fixed_array()) ||
          (object->map()->has_fast_double_elements() ==
           value->IsFixedDoubleArray()));
   object->set_elements(*value);
@@ -1757,7 +1833,7 @@
 
 
 void JSObject::initialize_properties() {
-  ASSERT(!GetHeap()->InNewSpace(GetHeap()->empty_fixed_array()));
+  DCHECK(!GetHeap()->InNewSpace(GetHeap()->empty_fixed_array()));
   WRITE_FIELD(this, kPropertiesOffset, GetHeap()->empty_fixed_array());
 }
 
@@ -1768,7 +1844,7 @@
 }
 
 
-Handle<String> JSObject::ExpectedTransitionKey(Handle<Map> map) {
+Handle<String> Map::ExpectedTransitionKey(Handle<Map> map) {
   DisallowHeapAllocation no_gc;
   if (!map->HasTransitionArray()) return Handle<String>::null();
   TransitionArray* transitions = map->transitions();
@@ -1783,14 +1859,14 @@
 }
 
 
-Handle<Map> JSObject::ExpectedTransitionTarget(Handle<Map> map) {
-  ASSERT(!ExpectedTransitionKey(map).is_null());
+Handle<Map> Map::ExpectedTransitionTarget(Handle<Map> map) {
+  DCHECK(!ExpectedTransitionKey(map).is_null());
   return Handle<Map>(map->transitions()->GetTarget(
       TransitionArray::kSimpleTransitionIndex));
 }
 
 
-Handle<Map> JSObject::FindTransitionToField(Handle<Map> map, Handle<Name> key) {
+Handle<Map> Map::FindTransitionToField(Handle<Map> map, Handle<Name> key) {
   DisallowHeapAllocation no_allocation;
   if (!map->HasTransitionArray()) return Handle<Map>::null();
   TransitionArray* transitions = map->transitions();
@@ -1807,7 +1883,7 @@
 ACCESSORS(Oddball, to_number, Object, kToNumberOffset)
 
 
-byte Oddball::kind() {
+byte Oddball::kind() const {
   return Smi::cast(READ_FIELD(this, kKindOffset))->value();
 }
 
@@ -1817,20 +1893,20 @@
 }
 
 
-Object* Cell::value() {
+Object* Cell::value() const {
   return READ_FIELD(this, kValueOffset);
 }
 
 
 void Cell::set_value(Object* val, WriteBarrierMode ignored) {
   // The write barrier is not used for global property cells.
-  ASSERT(!val->IsPropertyCell() && !val->IsCell());
+  DCHECK(!val->IsPropertyCell() && !val->IsCell());
   WRITE_FIELD(this, kValueOffset, val);
 }
 
 ACCESSORS(PropertyCell, dependent_code, DependentCode, kDependentCodeOffset)
 
-Object* PropertyCell::type_raw() {
+Object* PropertyCell::type_raw() const {
   return READ_FIELD(this, kTypeOffset);
 }
 
@@ -1899,7 +1975,7 @@
 
 
 int JSObject::GetInternalFieldCount() {
-  ASSERT(1 << kPointerSizeLog2 == kPointerSize);
+  DCHECK(1 << kPointerSizeLog2 == kPointerSize);
   // Make sure to adjust for the number of in-object properties. These
   // properties do contribute to the size, but are not internal fields.
   return ((Size() - GetHeaderSize()) >> kPointerSizeLog2) -
@@ -1908,13 +1984,13 @@
 
 
 int JSObject::GetInternalFieldOffset(int index) {
-  ASSERT(index < GetInternalFieldCount() && index >= 0);
+  DCHECK(index < GetInternalFieldCount() && index >= 0);
   return GetHeaderSize() + (kPointerSize * index);
 }
 
 
 Object* JSObject::GetInternalField(int index) {
-  ASSERT(index < GetInternalFieldCount() && index >= 0);
+  DCHECK(index < GetInternalFieldCount() && index >= 0);
   // Internal objects do follow immediately after the header, whereas in-object
   // properties are at the end of the object. Therefore there is no need
   // to adjust the index here.
@@ -1923,7 +1999,7 @@
 
 
 void JSObject::SetInternalField(int index, Object* value) {
-  ASSERT(index < GetInternalFieldCount() && index >= 0);
+  DCHECK(index < GetInternalFieldCount() && index >= 0);
   // Internal objects do follow immediately after the header, whereas in-object
   // properties are at the end of the object. Therefore there is no need
   // to adjust the index here.
@@ -1934,7 +2010,7 @@
 
 
 void JSObject::SetInternalField(int index, Smi* value) {
-  ASSERT(index < GetInternalFieldCount() && index >= 0);
+  DCHECK(index < GetInternalFieldCount() && index >= 0);
   // Internal objects do follow immediately after the header, whereas in-object
   // properties are at the end of the object. Therefore there is no need
   // to adjust the index here.
@@ -1992,15 +2068,15 @@
 void JSObject::InitializeBody(Map* map,
                               Object* pre_allocated_value,
                               Object* filler_value) {
-  ASSERT(!filler_value->IsHeapObject() ||
+  DCHECK(!filler_value->IsHeapObject() ||
          !GetHeap()->InNewSpace(filler_value));
-  ASSERT(!pre_allocated_value->IsHeapObject() ||
+  DCHECK(!pre_allocated_value->IsHeapObject() ||
          !GetHeap()->InNewSpace(pre_allocated_value));
   int size = map->instance_size();
   int offset = kHeaderSize;
   if (filler_value != pre_allocated_value) {
     int pre_allocated = map->pre_allocated_property_fields();
-    ASSERT(pre_allocated * kPointerSize + kHeaderSize <= size);
+    DCHECK(pre_allocated * kPointerSize + kHeaderSize <= size);
     for (int i = 0; i < pre_allocated; i++) {
       WRITE_FIELD(this, offset, pre_allocated_value);
       offset += kPointerSize;
@@ -2014,28 +2090,18 @@
 
 
 bool JSObject::HasFastProperties() {
-  ASSERT(properties()->IsDictionary() == map()->is_dictionary_map());
+  DCHECK(properties()->IsDictionary() == map()->is_dictionary_map());
   return !properties()->IsDictionary();
 }
 
 
-bool JSObject::TooManyFastProperties(StoreFromKeyed store_mode) {
-  // Allow extra fast properties if the object has more than
-  // kFastPropertiesSoftLimit in-object properties. When this is the case, it is
-  // very unlikely that the object is being used as a dictionary and there is a
-  // good chance that allowing more map transitions will be worth it.
-  Map* map = this->map();
-  if (map->unused_property_fields() != 0) return false;
-
-  int inobject = map->inobject_properties();
-
-  int limit;
-  if (store_mode == CERTAINLY_NOT_STORE_FROM_KEYED) {
-    limit = Max(inobject, kMaxFastProperties);
-  } else {
-    limit = Max(inobject, kFastPropertiesSoftLimit);
-  }
-  return properties()->length() > limit;
+bool Map::TooManyFastProperties(StoreFromKeyed store_mode) {
+  if (unused_property_fields() != 0) return false;
+  if (is_prototype_map()) return false;
+  int minimum = store_mode == CERTAINLY_NOT_STORE_FROM_KEYED ? 128 : 12;
+  int limit = Max(minimum, inobject_properties());
+  int external = NumberOfFields() - inobject_properties();
+  return external > limit;
 }
 
 
@@ -2096,14 +2162,8 @@
 }
 
 
-FixedArrayBase* FixedArrayBase::cast(Object* object) {
-  ASSERT(object->IsFixedArrayBase());
-  return reinterpret_cast<FixedArrayBase*>(object);
-}
-
-
 Object* FixedArray::get(int index) {
-  SLOW_ASSERT(index >= 0 && index < this->length());
+  SLOW_DCHECK(index >= 0 && index < this->length());
   return READ_FIELD(this, kHeaderSize + index * kPointerSize);
 }
 
@@ -2119,17 +2179,18 @@
 
 
 void FixedArray::set(int index, Smi* value) {
-  ASSERT(map() != GetHeap()->fixed_cow_array_map());
-  ASSERT(index >= 0 && index < this->length());
-  ASSERT(reinterpret_cast<Object*>(value)->IsSmi());
+  DCHECK(map() != GetHeap()->fixed_cow_array_map());
+  DCHECK(index >= 0 && index < this->length());
+  DCHECK(reinterpret_cast<Object*>(value)->IsSmi());
   int offset = kHeaderSize + index * kPointerSize;
   WRITE_FIELD(this, offset, value);
 }
 
 
 void FixedArray::set(int index, Object* value) {
-  ASSERT(map() != GetHeap()->fixed_cow_array_map());
-  ASSERT(index >= 0 && index < this->length());
+  DCHECK_NE(GetHeap()->fixed_cow_array_map(), map());
+  DCHECK_EQ(FIXED_ARRAY_TYPE, map()->instance_type());
+  DCHECK(index >= 0 && index < this->length());
   int offset = kHeaderSize + index * kPointerSize;
   WRITE_FIELD(this, offset, value);
   WRITE_BARRIER(GetHeap(), this, offset, value);
@@ -2137,35 +2198,35 @@
 
 
 inline bool FixedDoubleArray::is_the_hole_nan(double value) {
-  return BitCast<uint64_t, double>(value) == kHoleNanInt64;
+  return bit_cast<uint64_t, double>(value) == kHoleNanInt64;
 }
 
 
 inline double FixedDoubleArray::hole_nan_as_double() {
-  return BitCast<double, uint64_t>(kHoleNanInt64);
+  return bit_cast<double, uint64_t>(kHoleNanInt64);
 }
 
 
 inline double FixedDoubleArray::canonical_not_the_hole_nan_as_double() {
-  ASSERT(BitCast<uint64_t>(OS::nan_value()) != kHoleNanInt64);
-  ASSERT((BitCast<uint64_t>(OS::nan_value()) >> 32) != kHoleNanUpper32);
-  return OS::nan_value();
+  DCHECK(bit_cast<uint64_t>(base::OS::nan_value()) != kHoleNanInt64);
+  DCHECK((bit_cast<uint64_t>(base::OS::nan_value()) >> 32) != kHoleNanUpper32);
+  return base::OS::nan_value();
 }
 
 
 double FixedDoubleArray::get_scalar(int index) {
-  ASSERT(map() != GetHeap()->fixed_cow_array_map() &&
+  DCHECK(map() != GetHeap()->fixed_cow_array_map() &&
          map() != GetHeap()->fixed_array_map());
-  ASSERT(index >= 0 && index < this->length());
+  DCHECK(index >= 0 && index < this->length());
   double result = READ_DOUBLE_FIELD(this, kHeaderSize + index * kDoubleSize);
-  ASSERT(!is_the_hole_nan(result));
+  DCHECK(!is_the_hole_nan(result));
   return result;
 }
 
 int64_t FixedDoubleArray::get_representation(int index) {
-  ASSERT(map() != GetHeap()->fixed_cow_array_map() &&
+  DCHECK(map() != GetHeap()->fixed_cow_array_map() &&
          map() != GetHeap()->fixed_array_map());
-  ASSERT(index >= 0 && index < this->length());
+  DCHECK(index >= 0 && index < this->length());
   return READ_INT64_FIELD(this, kHeaderSize + index * kDoubleSize);
 }
 
@@ -2181,7 +2242,7 @@
 
 
 void FixedDoubleArray::set(int index, double value) {
-  ASSERT(map() != GetHeap()->fixed_cow_array_map() &&
+  DCHECK(map() != GetHeap()->fixed_cow_array_map() &&
          map() != GetHeap()->fixed_array_map());
   int offset = kHeaderSize + index * kDoubleSize;
   if (std::isnan(value)) value = canonical_not_the_hole_nan_as_double();
@@ -2190,7 +2251,7 @@
 
 
 void FixedDoubleArray::set_the_hole(int index) {
-  ASSERT(map() != GetHeap()->fixed_cow_array_map() &&
+  DCHECK(map() != GetHeap()->fixed_cow_array_map() &&
          map() != GetHeap()->fixed_array_map());
   int offset = kHeaderSize + index * kDoubleSize;
   WRITE_DOUBLE_FIELD(this, offset, hole_nan_as_double());
@@ -2215,6 +2276,84 @@
 }
 
 
+void ConstantPoolArray::NumberOfEntries::increment(Type type) {
+  DCHECK(type < NUMBER_OF_TYPES);
+  element_counts_[type]++;
+}
+
+
+int ConstantPoolArray::NumberOfEntries::equals(
+    const ConstantPoolArray::NumberOfEntries& other) const {
+  for (int i = 0; i < NUMBER_OF_TYPES; i++) {
+    if (element_counts_[i] != other.element_counts_[i]) return false;
+  }
+  return true;
+}
+
+
+bool ConstantPoolArray::NumberOfEntries::is_empty() const {
+  return total_count() == 0;
+}
+
+
+int ConstantPoolArray::NumberOfEntries::count_of(Type type) const {
+  DCHECK(type < NUMBER_OF_TYPES);
+  return element_counts_[type];
+}
+
+
+int ConstantPoolArray::NumberOfEntries::base_of(Type type) const {
+  int base = 0;
+  DCHECK(type < NUMBER_OF_TYPES);
+  for (int i = 0; i < type; i++) {
+    base += element_counts_[i];
+  }
+  return base;
+}
+
+
+int ConstantPoolArray::NumberOfEntries::total_count() const {
+  int count = 0;
+  for (int i = 0; i < NUMBER_OF_TYPES; i++) {
+    count += element_counts_[i];
+  }
+  return count;
+}
+
+
+int ConstantPoolArray::NumberOfEntries::are_in_range(int min, int max) const {
+  for (int i = FIRST_TYPE; i < NUMBER_OF_TYPES; i++) {
+    if (element_counts_[i] < min || element_counts_[i] > max) {
+      return false;
+    }
+  }
+  return true;
+}
+
+
+int ConstantPoolArray::Iterator::next_index() {
+  DCHECK(!is_finished());
+  int ret = next_index_++;
+  update_section();
+  return ret;
+}
+
+
+bool ConstantPoolArray::Iterator::is_finished() {
+  return next_index_ > array_->last_index(type_, final_section_);
+}
+
+
+void ConstantPoolArray::Iterator::update_section() {
+  if (next_index_ > array_->last_index(type_, current_section_) &&
+      current_section_ != final_section_) {
+    DCHECK(final_section_ == EXTENDED_SECTION);
+    current_section_ = EXTENDED_SECTION;
+    next_index_ = array_->first_index(type_, EXTENDED_SECTION);
+  }
+}
+
+
 bool ConstantPoolArray::is_extended_layout() {
   uint32_t small_layout_1 = READ_UINT32_FIELD(this, kSmallLayout1Offset);
   return IsExtendedField::decode(small_layout_1);
@@ -2227,7 +2366,7 @@
 
 
 int ConstantPoolArray::first_extended_section_index() {
-  ASSERT(is_extended_layout());
+  DCHECK(is_extended_layout());
   uint32_t small_layout_2 = READ_UINT32_FIELD(this, kSmallLayout2Offset);
   return TotalCountField::decode(small_layout_2);
 }
@@ -2255,7 +2394,7 @@
 int ConstantPoolArray::first_index(Type type, LayoutSection section) {
   int index = 0;
   if (section == EXTENDED_SECTION) {
-    ASSERT(is_extended_layout());
+    DCHECK(is_extended_layout());
     index += first_extended_section_index();
   }
 
@@ -2291,7 +2430,7 @@
         return 0;
     }
   } else {
-    ASSERT(section == EXTENDED_SECTION && is_extended_layout());
+    DCHECK(section == EXTENDED_SECTION && is_extended_layout());
     int offset = get_extended_section_header_offset();
     switch (type) {
       case INT64:
@@ -2314,6 +2453,15 @@
 }
 
 
+bool ConstantPoolArray::offset_is_type(int offset, Type type) {
+  return (offset >= OffsetOfElementAt(first_index(type, SMALL_SECTION)) &&
+          offset <= OffsetOfElementAt(last_index(type, SMALL_SECTION))) ||
+         (is_extended_layout() &&
+          offset >= OffsetOfElementAt(first_index(type, EXTENDED_SECTION)) &&
+          offset <= OffsetOfElementAt(last_index(type, EXTENDED_SECTION)));
+}
+
+
 ConstantPoolArray::Type ConstantPoolArray::get_type(int index) {
   LayoutSection section;
   if (is_extended_layout() && index >= first_extended_section_index()) {
@@ -2326,84 +2474,123 @@
   while (index > last_index(type, section)) {
     type = next_type(type);
   }
-  ASSERT(type <= LAST_TYPE);
+  DCHECK(type <= LAST_TYPE);
   return type;
 }
 
 
 int64_t ConstantPoolArray::get_int64_entry(int index) {
-  ASSERT(map() == GetHeap()->constant_pool_array_map());
-  ASSERT(get_type(index) == INT64);
+  DCHECK(map() == GetHeap()->constant_pool_array_map());
+  DCHECK(get_type(index) == INT64);
   return READ_INT64_FIELD(this, OffsetOfElementAt(index));
 }
 
 
 double ConstantPoolArray::get_int64_entry_as_double(int index) {
   STATIC_ASSERT(kDoubleSize == kInt64Size);
-  ASSERT(map() == GetHeap()->constant_pool_array_map());
-  ASSERT(get_type(index) == INT64);
+  DCHECK(map() == GetHeap()->constant_pool_array_map());
+  DCHECK(get_type(index) == INT64);
   return READ_DOUBLE_FIELD(this, OffsetOfElementAt(index));
 }
 
 
 Address ConstantPoolArray::get_code_ptr_entry(int index) {
-  ASSERT(map() == GetHeap()->constant_pool_array_map());
-  ASSERT(get_type(index) == CODE_PTR);
+  DCHECK(map() == GetHeap()->constant_pool_array_map());
+  DCHECK(get_type(index) == CODE_PTR);
   return reinterpret_cast<Address>(READ_FIELD(this, OffsetOfElementAt(index)));
 }
 
 
 Object* ConstantPoolArray::get_heap_ptr_entry(int index) {
-  ASSERT(map() == GetHeap()->constant_pool_array_map());
-  ASSERT(get_type(index) == HEAP_PTR);
+  DCHECK(map() == GetHeap()->constant_pool_array_map());
+  DCHECK(get_type(index) == HEAP_PTR);
   return READ_FIELD(this, OffsetOfElementAt(index));
 }
 
 
 int32_t ConstantPoolArray::get_int32_entry(int index) {
-  ASSERT(map() == GetHeap()->constant_pool_array_map());
-  ASSERT(get_type(index) == INT32);
+  DCHECK(map() == GetHeap()->constant_pool_array_map());
+  DCHECK(get_type(index) == INT32);
   return READ_INT32_FIELD(this, OffsetOfElementAt(index));
 }
 
 
 void ConstantPoolArray::set(int index, int64_t value) {
-  ASSERT(map() == GetHeap()->constant_pool_array_map());
-  ASSERT(get_type(index) == INT64);
+  DCHECK(map() == GetHeap()->constant_pool_array_map());
+  DCHECK(get_type(index) == INT64);
   WRITE_INT64_FIELD(this, OffsetOfElementAt(index), value);
 }
 
 
 void ConstantPoolArray::set(int index, double value) {
   STATIC_ASSERT(kDoubleSize == kInt64Size);
-  ASSERT(map() == GetHeap()->constant_pool_array_map());
-  ASSERT(get_type(index) == INT64);
+  DCHECK(map() == GetHeap()->constant_pool_array_map());
+  DCHECK(get_type(index) == INT64);
   WRITE_DOUBLE_FIELD(this, OffsetOfElementAt(index), value);
 }
 
 
 void ConstantPoolArray::set(int index, Address value) {
-  ASSERT(map() == GetHeap()->constant_pool_array_map());
-  ASSERT(get_type(index) == CODE_PTR);
+  DCHECK(map() == GetHeap()->constant_pool_array_map());
+  DCHECK(get_type(index) == CODE_PTR);
   WRITE_FIELD(this, OffsetOfElementAt(index), reinterpret_cast<Object*>(value));
 }
 
 
 void ConstantPoolArray::set(int index, Object* value) {
-  ASSERT(map() == GetHeap()->constant_pool_array_map());
-  ASSERT(get_type(index) == HEAP_PTR);
+  DCHECK(map() == GetHeap()->constant_pool_array_map());
+  DCHECK(!GetHeap()->InNewSpace(value));
+  DCHECK(get_type(index) == HEAP_PTR);
   WRITE_FIELD(this, OffsetOfElementAt(index), value);
   WRITE_BARRIER(GetHeap(), this, OffsetOfElementAt(index), value);
 }
 
 
 void ConstantPoolArray::set(int index, int32_t value) {
-  ASSERT(map() == GetHeap()->constant_pool_array_map());
-  ASSERT(get_type(index) == INT32);
+  DCHECK(map() == GetHeap()->constant_pool_array_map());
+  DCHECK(get_type(index) == INT32);
   WRITE_INT32_FIELD(this, OffsetOfElementAt(index), value);
 }
 
 
+void ConstantPoolArray::set_at_offset(int offset, int32_t value) {
+  DCHECK(map() == GetHeap()->constant_pool_array_map());
+  DCHECK(offset_is_type(offset, INT32));
+  WRITE_INT32_FIELD(this, offset, value);
+}
+
+
+void ConstantPoolArray::set_at_offset(int offset, int64_t value) {
+  DCHECK(map() == GetHeap()->constant_pool_array_map());
+  DCHECK(offset_is_type(offset, INT64));
+  WRITE_INT64_FIELD(this, offset, value);
+}
+
+
+void ConstantPoolArray::set_at_offset(int offset, double value) {
+  DCHECK(map() == GetHeap()->constant_pool_array_map());
+  DCHECK(offset_is_type(offset, INT64));
+  WRITE_DOUBLE_FIELD(this, offset, value);
+}
+
+
+void ConstantPoolArray::set_at_offset(int offset, Address value) {
+  DCHECK(map() == GetHeap()->constant_pool_array_map());
+  DCHECK(offset_is_type(offset, CODE_PTR));
+  WRITE_FIELD(this, offset, reinterpret_cast<Object*>(value));
+  WRITE_BARRIER(GetHeap(), this, offset, reinterpret_cast<Object*>(value));
+}
+
+
+void ConstantPoolArray::set_at_offset(int offset, Object* value) {
+  DCHECK(map() == GetHeap()->constant_pool_array_map());
+  DCHECK(!GetHeap()->InNewSpace(value));
+  DCHECK(offset_is_type(offset, HEAP_PTR));
+  WRITE_FIELD(this, offset, value);
+  WRITE_BARRIER(GetHeap(), this, offset, value);
+}
+
+
 void ConstantPoolArray::Init(const NumberOfEntries& small) {
   uint32_t small_layout_1 =
       Int64CountField::encode(small.count_of(INT64)) |
@@ -2417,7 +2604,7 @@
   WRITE_UINT32_FIELD(this, kSmallLayout1Offset, small_layout_1);
   WRITE_UINT32_FIELD(this, kSmallLayout2Offset, small_layout_2);
   if (kHeaderSize != kFirstEntryOffset) {
-    ASSERT(kFirstEntryOffset - kHeaderSize == kInt32Size);
+    DCHECK(kFirstEntryOffset - kHeaderSize == kInt32Size);
     WRITE_UINT32_FIELD(this, kHeaderSize, 0);  // Zero out header padding.
   }
 }
@@ -2470,29 +2657,6 @@
 }
 
 
-int ConstantPoolArray::Iterator::next_index() {
-  ASSERT(!is_finished());
-  int ret = next_index_++;
-  update_section();
-  return ret;
-}
-
-
-bool ConstantPoolArray::Iterator::is_finished() {
-  return next_index_ > array_->last_index(type_, final_section_);
-}
-
-
-void ConstantPoolArray::Iterator::update_section() {
-  if (next_index_ > array_->last_index(type_, current_section_) &&
-      current_section_ != final_section_) {
-    ASSERT(final_section_ == EXTENDED_SECTION);
-    current_section_ = EXTENDED_SECTION;
-    next_index_ = array_->first_index(type_, EXTENDED_SECTION);
-  }
-}
-
-
 WriteBarrierMode HeapObject::GetWriteBarrierMode(
     const DisallowHeapAllocation& promise) {
   Heap* heap = GetHeap();
@@ -2505,8 +2669,8 @@
 void FixedArray::set(int index,
                      Object* value,
                      WriteBarrierMode mode) {
-  ASSERT(map() != GetHeap()->fixed_cow_array_map());
-  ASSERT(index >= 0 && index < this->length());
+  DCHECK(map() != GetHeap()->fixed_cow_array_map());
+  DCHECK(index >= 0 && index < this->length());
   int offset = kHeaderSize + index * kPointerSize;
   WRITE_FIELD(this, offset, value);
   CONDITIONAL_WRITE_BARRIER(GetHeap(), this, offset, value, mode);
@@ -2516,8 +2680,8 @@
 void FixedArray::NoIncrementalWriteBarrierSet(FixedArray* array,
                                               int index,
                                               Object* value) {
-  ASSERT(array->map() != array->GetHeap()->fixed_cow_array_map());
-  ASSERT(index >= 0 && index < array->length());
+  DCHECK(array->map() != array->GetHeap()->fixed_cow_array_map());
+  DCHECK(index >= 0 && index < array->length());
   int offset = kHeaderSize + index * kPointerSize;
   WRITE_FIELD(array, offset, value);
   Heap* heap = array->GetHeap();
@@ -2530,17 +2694,17 @@
 void FixedArray::NoWriteBarrierSet(FixedArray* array,
                                    int index,
                                    Object* value) {
-  ASSERT(array->map() != array->GetHeap()->fixed_cow_array_map());
-  ASSERT(index >= 0 && index < array->length());
-  ASSERT(!array->GetHeap()->InNewSpace(value));
+  DCHECK(array->map() != array->GetHeap()->fixed_cow_array_map());
+  DCHECK(index >= 0 && index < array->length());
+  DCHECK(!array->GetHeap()->InNewSpace(value));
   WRITE_FIELD(array, kHeaderSize + index * kPointerSize, value);
 }
 
 
 void FixedArray::set_undefined(int index) {
-  ASSERT(map() != GetHeap()->fixed_cow_array_map());
-  ASSERT(index >= 0 && index < this->length());
-  ASSERT(!GetHeap()->InNewSpace(GetHeap()->undefined_value()));
+  DCHECK(map() != GetHeap()->fixed_cow_array_map());
+  DCHECK(index >= 0 && index < this->length());
+  DCHECK(!GetHeap()->InNewSpace(GetHeap()->undefined_value()));
   WRITE_FIELD(this,
               kHeaderSize + index * kPointerSize,
               GetHeap()->undefined_value());
@@ -2548,8 +2712,8 @@
 
 
 void FixedArray::set_null(int index) {
-  ASSERT(index >= 0 && index < this->length());
-  ASSERT(!GetHeap()->InNewSpace(GetHeap()->null_value()));
+  DCHECK(index >= 0 && index < this->length());
+  DCHECK(!GetHeap()->InNewSpace(GetHeap()->null_value()));
   WRITE_FIELD(this,
               kHeaderSize + index * kPointerSize,
               GetHeap()->null_value());
@@ -2557,9 +2721,9 @@
 
 
 void FixedArray::set_the_hole(int index) {
-  ASSERT(map() != GetHeap()->fixed_cow_array_map());
-  ASSERT(index >= 0 && index < this->length());
-  ASSERT(!GetHeap()->InNewSpace(GetHeap()->the_hole_value()));
+  DCHECK(map() != GetHeap()->fixed_cow_array_map());
+  DCHECK(index >= 0 && index < this->length());
+  DCHECK(!GetHeap()->InNewSpace(GetHeap()->the_hole_value()));
   WRITE_FIELD(this,
               kHeaderSize + index * kPointerSize,
               GetHeap()->the_hole_value());
@@ -2579,7 +2743,7 @@
 
 
 bool DescriptorArray::IsEmpty() {
-  ASSERT(length() >= kFirstIndex ||
+  DCHECK(length() >= kFirstIndex ||
          this == GetHeap()->empty_descriptor_array());
   return length() < kFirstIndex;
 }
@@ -2599,7 +2763,7 @@
   uint32_t hash = name->Hash();
   int limit = high;
 
-  ASSERT(low <= high);
+  DCHECK(low <= high);
 
   while (low != high) {
     int mid = (low + high) / 2;
@@ -2643,7 +2807,7 @@
       if (current_hash == hash && entry->Equals(name)) return sorted_index;
     }
   } else {
-    ASSERT(len >= valid_entries);
+    DCHECK(len >= valid_entries);
     for (int number = 0; number < valid_entries; number++) {
       Name* entry = array->GetKey(number);
       uint32_t current_hash = entry->Hash();
@@ -2657,9 +2821,9 @@
 template<SearchMode search_mode, typename T>
 int Search(T* array, Name* name, int valid_entries) {
   if (search_mode == VALID_ENTRIES) {
-    SLOW_ASSERT(array->IsSortedNoDuplicates(valid_entries));
+    SLOW_DCHECK(array->IsSortedNoDuplicates(valid_entries));
   } else {
-    SLOW_ASSERT(array->IsSortedNoDuplicates());
+    SLOW_DCHECK(array->IsSortedNoDuplicates());
   }
 
   int nof = array->number_of_entries();
@@ -2727,20 +2891,17 @@
 FixedArrayBase* Map::GetInitialElements() {
   if (has_fast_smi_or_object_elements() ||
       has_fast_double_elements()) {
-    ASSERT(!GetHeap()->InNewSpace(GetHeap()->empty_fixed_array()));
+    DCHECK(!GetHeap()->InNewSpace(GetHeap()->empty_fixed_array()));
     return GetHeap()->empty_fixed_array();
   } else if (has_external_array_elements()) {
     ExternalArray* empty_array = GetHeap()->EmptyExternalArrayForMap(this);
-    ASSERT(!GetHeap()->InNewSpace(empty_array));
+    DCHECK(!GetHeap()->InNewSpace(empty_array));
     return empty_array;
   } else if (has_fixed_typed_array_elements()) {
     FixedTypedArrayBase* empty_array =
       GetHeap()->EmptyFixedTypedArrayForMap(this);
-    ASSERT(!GetHeap()->InNewSpace(empty_array));
+    DCHECK(!GetHeap()->InNewSpace(empty_array));
     return empty_array;
-  } else if (has_dictionary_elements()) {
-    ASSERT(!GetHeap()->InNewSpace(GetHeap()->empty_slow_element_dictionary()));
-    return GetHeap()->empty_slow_element_dictionary();
   } else {
     UNREACHABLE();
   }
@@ -2749,7 +2910,7 @@
 
 
 Object** DescriptorArray::GetKeySlot(int descriptor_number) {
-  ASSERT(descriptor_number < number_of_descriptors());
+  DCHECK(descriptor_number < number_of_descriptors());
   return RawFieldOfElementAt(ToKeyIndex(descriptor_number));
 }
 
@@ -2765,7 +2926,7 @@
 
 
 Name* DescriptorArray::GetKey(int descriptor_number) {
-  ASSERT(descriptor_number < number_of_descriptors());
+  DCHECK(descriptor_number < number_of_descriptors());
   return Name::cast(get(ToKeyIndex(descriptor_number)));
 }
 
@@ -2788,7 +2949,7 @@
 
 void DescriptorArray::SetRepresentation(int descriptor_index,
                                         Representation representation) {
-  ASSERT(!representation.IsNone());
+  DCHECK(!representation.IsNone());
   PropertyDetails details = GetDetails(descriptor_index);
   set(ToDetailsIndex(descriptor_index),
       details.CopyWithRepresentation(representation).AsSmi());
@@ -2796,13 +2957,18 @@
 
 
 Object** DescriptorArray::GetValueSlot(int descriptor_number) {
-  ASSERT(descriptor_number < number_of_descriptors());
+  DCHECK(descriptor_number < number_of_descriptors());
   return RawFieldOfElementAt(ToValueIndex(descriptor_number));
 }
 
 
+int DescriptorArray::GetValueOffset(int descriptor_number) {
+  return OffsetOfElementAt(ToValueIndex(descriptor_number));
+}
+
+
 Object* DescriptorArray::GetValue(int descriptor_number) {
-  ASSERT(descriptor_number < number_of_descriptors());
+  DCHECK(descriptor_number < number_of_descriptors());
   return get(ToValueIndex(descriptor_number));
 }
 
@@ -2813,7 +2979,7 @@
 
 
 PropertyDetails DescriptorArray::GetDetails(int descriptor_number) {
-  ASSERT(descriptor_number < number_of_descriptors());
+  DCHECK(descriptor_number < number_of_descriptors());
   Object* details = get(ToDetailsIndex(descriptor_number));
   return PropertyDetails(Smi::cast(details));
 }
@@ -2825,13 +2991,13 @@
 
 
 int DescriptorArray::GetFieldIndex(int descriptor_number) {
-  ASSERT(GetDetails(descriptor_number).type() == FIELD);
+  DCHECK(GetDetails(descriptor_number).type() == FIELD);
   return GetDetails(descriptor_number).field_index();
 }
 
 
 HeapType* DescriptorArray::GetFieldType(int descriptor_number) {
-  ASSERT(GetDetails(descriptor_number).type() == FIELD);
+  DCHECK(GetDetails(descriptor_number).type() == FIELD);
   return HeapType::cast(GetValue(descriptor_number));
 }
 
@@ -2842,13 +3008,13 @@
 
 
 Object* DescriptorArray::GetCallbacksObject(int descriptor_number) {
-  ASSERT(GetType(descriptor_number) == CALLBACKS);
+  DCHECK(GetType(descriptor_number) == CALLBACKS);
   return GetValue(descriptor_number);
 }
 
 
 AccessorDescriptor* DescriptorArray::GetCallbacks(int descriptor_number) {
-  ASSERT(GetType(descriptor_number) == CALLBACKS);
+  DCHECK(GetType(descriptor_number) == CALLBACKS);
   Foreign* p = Foreign::cast(GetCallbacksObject(descriptor_number));
   return reinterpret_cast<AccessorDescriptor*>(p->foreign_address());
 }
@@ -2865,7 +3031,7 @@
                           Descriptor* desc,
                           const WhitenessWitness&) {
   // Range check.
-  ASSERT(descriptor_number < number_of_descriptors());
+  DCHECK(descriptor_number < number_of_descriptors());
 
   NoIncrementalWriteBarrierSet(this,
                                ToKeyIndex(descriptor_number),
@@ -2881,7 +3047,7 @@
 
 void DescriptorArray::Set(int descriptor_number, Descriptor* desc) {
   // Range check.
-  ASSERT(descriptor_number < number_of_descriptors());
+  DCHECK(descriptor_number < number_of_descriptors());
 
   set(ToKeyIndex(descriptor_number), *desc->GetKey());
   set(ToValueIndex(descriptor_number), *desc->GetValue());
@@ -2889,27 +3055,6 @@
 }
 
 
-void DescriptorArray::Append(Descriptor* desc,
-                             const WhitenessWitness& witness) {
-  DisallowHeapAllocation no_gc;
-  int descriptor_number = number_of_descriptors();
-  SetNumberOfDescriptors(descriptor_number + 1);
-  Set(descriptor_number, desc, witness);
-
-  uint32_t hash = desc->GetKey()->Hash();
-
-  int insertion;
-
-  for (insertion = descriptor_number; insertion > 0; --insertion) {
-    Name* key = GetSortedKey(insertion - 1);
-    if (key->Hash() <= hash) break;
-    SetSortedKey(insertion, GetSortedKeyIndex(insertion - 1));
-  }
-
-  SetSortedKey(insertion, descriptor_number);
-}
-
-
 void DescriptorArray::Append(Descriptor* desc) {
   DisallowHeapAllocation no_gc;
   int descriptor_number = number_of_descriptors();
@@ -2940,7 +3085,7 @@
 DescriptorArray::WhitenessWitness::WhitenessWitness(DescriptorArray* array)
     : marking_(array->GetHeap()->incremental_marking()) {
   marking_->EnterNoMarkingScope();
-  ASSERT(!marking_->IsMarking() ||
+  DCHECK(!marking_->IsMarking() ||
          Marking::Color(array) == Marking::WHITE_OBJECT);
 }
 
@@ -2953,7 +3098,7 @@
 template<typename Derived, typename Shape, typename Key>
 int HashTable<Derived, Shape, Key>::ComputeCapacity(int at_least_space_for) {
   const int kMinCapacity = 32;
-  int capacity = RoundUpToPowerOf2(at_least_space_for * 2);
+  int capacity = base::bits::RoundUpToPowerOfTwo32(at_least_space_for * 2);
   if (capacity < kMinCapacity) {
     capacity = kMinCapacity;  // Guarantee min capacity.
   }
@@ -2995,7 +3140,7 @@
 }
 
 uint32_t SeededNumberDictionary::max_number_key() {
-  ASSERT(!requires_slow_elements());
+  DCHECK(!requires_slow_elements());
   Object* max_index_object = get(kMaxNumberKeyIndex);
   if (!max_index_object->IsSmi()) return 0;
   uint32_t value = static_cast<uint32_t>(Smi::cast(max_index_object)->value());
@@ -3011,84 +3156,108 @@
 // Cast operations
 
 
-CAST_ACCESSOR(FixedArray)
-CAST_ACCESSOR(FixedDoubleArray)
-CAST_ACCESSOR(FixedTypedArrayBase)
+CAST_ACCESSOR(AccessorInfo)
+CAST_ACCESSOR(ByteArray)
+CAST_ACCESSOR(Cell)
+CAST_ACCESSOR(Code)
+CAST_ACCESSOR(CodeCacheHashTable)
+CAST_ACCESSOR(CompilationCacheTable)
+CAST_ACCESSOR(ConsString)
 CAST_ACCESSOR(ConstantPoolArray)
-CAST_ACCESSOR(DescriptorArray)
 CAST_ACCESSOR(DeoptimizationInputData)
 CAST_ACCESSOR(DeoptimizationOutputData)
 CAST_ACCESSOR(DependentCode)
-CAST_ACCESSOR(StringTable)
-CAST_ACCESSOR(JSFunctionResultCache)
-CAST_ACCESSOR(NormalizedMapCache)
-CAST_ACCESSOR(ScopeInfo)
-CAST_ACCESSOR(CompilationCacheTable)
-CAST_ACCESSOR(CodeCacheHashTable)
-CAST_ACCESSOR(PolymorphicCodeCacheHashTable)
-CAST_ACCESSOR(MapCache)
-CAST_ACCESSOR(String)
-CAST_ACCESSOR(SeqString)
-CAST_ACCESSOR(SeqOneByteString)
-CAST_ACCESSOR(SeqTwoByteString)
-CAST_ACCESSOR(SlicedString)
-CAST_ACCESSOR(ConsString)
+CAST_ACCESSOR(DescriptorArray)
+CAST_ACCESSOR(ExternalArray)
+CAST_ACCESSOR(ExternalOneByteString)
+CAST_ACCESSOR(ExternalFloat32Array)
+CAST_ACCESSOR(ExternalFloat64Array)
+CAST_ACCESSOR(ExternalInt16Array)
+CAST_ACCESSOR(ExternalInt32Array)
+CAST_ACCESSOR(ExternalInt8Array)
 CAST_ACCESSOR(ExternalString)
-CAST_ACCESSOR(ExternalAsciiString)
 CAST_ACCESSOR(ExternalTwoByteString)
-CAST_ACCESSOR(Symbol)
-CAST_ACCESSOR(Name)
-CAST_ACCESSOR(JSReceiver)
-CAST_ACCESSOR(JSObject)
-CAST_ACCESSOR(Smi)
-CAST_ACCESSOR(HeapObject)
-CAST_ACCESSOR(HeapNumber)
-CAST_ACCESSOR(Oddball)
-CAST_ACCESSOR(Cell)
-CAST_ACCESSOR(PropertyCell)
-CAST_ACCESSOR(SharedFunctionInfo)
-CAST_ACCESSOR(Map)
-CAST_ACCESSOR(JSFunction)
+CAST_ACCESSOR(ExternalUint16Array)
+CAST_ACCESSOR(ExternalUint32Array)
+CAST_ACCESSOR(ExternalUint8Array)
+CAST_ACCESSOR(ExternalUint8ClampedArray)
+CAST_ACCESSOR(FixedArray)
+CAST_ACCESSOR(FixedArrayBase)
+CAST_ACCESSOR(FixedDoubleArray)
+CAST_ACCESSOR(FixedTypedArrayBase)
+CAST_ACCESSOR(Foreign)
+CAST_ACCESSOR(FreeSpace)
 CAST_ACCESSOR(GlobalObject)
-CAST_ACCESSOR(JSGlobalProxy)
-CAST_ACCESSOR(JSGlobalObject)
-CAST_ACCESSOR(JSBuiltinsObject)
-CAST_ACCESSOR(Code)
+CAST_ACCESSOR(HeapObject)
 CAST_ACCESSOR(JSArray)
 CAST_ACCESSOR(JSArrayBuffer)
 CAST_ACCESSOR(JSArrayBufferView)
-CAST_ACCESSOR(JSTypedArray)
+CAST_ACCESSOR(JSBuiltinsObject)
 CAST_ACCESSOR(JSDataView)
-CAST_ACCESSOR(JSRegExp)
-CAST_ACCESSOR(JSProxy)
+CAST_ACCESSOR(JSDate)
+CAST_ACCESSOR(JSFunction)
 CAST_ACCESSOR(JSFunctionProxy)
-CAST_ACCESSOR(JSSet)
+CAST_ACCESSOR(JSFunctionResultCache)
+CAST_ACCESSOR(JSGeneratorObject)
+CAST_ACCESSOR(JSGlobalObject)
+CAST_ACCESSOR(JSGlobalProxy)
 CAST_ACCESSOR(JSMap)
-CAST_ACCESSOR(JSSetIterator)
 CAST_ACCESSOR(JSMapIterator)
+CAST_ACCESSOR(JSMessageObject)
+CAST_ACCESSOR(JSModule)
+CAST_ACCESSOR(JSObject)
+CAST_ACCESSOR(JSProxy)
+CAST_ACCESSOR(JSReceiver)
+CAST_ACCESSOR(JSRegExp)
+CAST_ACCESSOR(JSSet)
+CAST_ACCESSOR(JSSetIterator)
+CAST_ACCESSOR(JSTypedArray)
+CAST_ACCESSOR(JSValue)
 CAST_ACCESSOR(JSWeakMap)
 CAST_ACCESSOR(JSWeakSet)
-CAST_ACCESSOR(Foreign)
-CAST_ACCESSOR(ByteArray)
-CAST_ACCESSOR(FreeSpace)
-CAST_ACCESSOR(ExternalArray)
-CAST_ACCESSOR(ExternalInt8Array)
-CAST_ACCESSOR(ExternalUint8Array)
-CAST_ACCESSOR(ExternalInt16Array)
-CAST_ACCESSOR(ExternalUint16Array)
-CAST_ACCESSOR(ExternalInt32Array)
-CAST_ACCESSOR(ExternalUint32Array)
-CAST_ACCESSOR(ExternalFloat32Array)
-CAST_ACCESSOR(ExternalFloat64Array)
-CAST_ACCESSOR(ExternalUint8ClampedArray)
+CAST_ACCESSOR(Map)
+CAST_ACCESSOR(MapCache)
+CAST_ACCESSOR(Name)
+CAST_ACCESSOR(NameDictionary)
+CAST_ACCESSOR(NormalizedMapCache)
+CAST_ACCESSOR(Object)
+CAST_ACCESSOR(ObjectHashTable)
+CAST_ACCESSOR(Oddball)
+CAST_ACCESSOR(OrderedHashMap)
+CAST_ACCESSOR(OrderedHashSet)
+CAST_ACCESSOR(PolymorphicCodeCacheHashTable)
+CAST_ACCESSOR(PropertyCell)
+CAST_ACCESSOR(ScopeInfo)
+CAST_ACCESSOR(SeededNumberDictionary)
+CAST_ACCESSOR(SeqOneByteString)
+CAST_ACCESSOR(SeqString)
+CAST_ACCESSOR(SeqTwoByteString)
+CAST_ACCESSOR(SharedFunctionInfo)
+CAST_ACCESSOR(SlicedString)
+CAST_ACCESSOR(Smi)
+CAST_ACCESSOR(String)
+CAST_ACCESSOR(StringTable)
 CAST_ACCESSOR(Struct)
-CAST_ACCESSOR(AccessorInfo)
+CAST_ACCESSOR(Symbol)
+CAST_ACCESSOR(UnseededNumberDictionary)
+CAST_ACCESSOR(WeakHashTable)
+
 
 template <class Traits>
 FixedTypedArray<Traits>* FixedTypedArray<Traits>::cast(Object* object) {
-  SLOW_ASSERT(object->IsHeapObject() &&
-      HeapObject::cast(object)->map()->instance_type() ==
-          Traits::kInstanceType);
+  SLOW_DCHECK(object->IsHeapObject() &&
+              HeapObject::cast(object)->map()->instance_type() ==
+              Traits::kInstanceType);
+  return reinterpret_cast<FixedTypedArray<Traits>*>(object);
+}
+
+
+template <class Traits>
+const FixedTypedArray<Traits>*
+FixedTypedArray<Traits>::cast(const Object* object) {
+  SLOW_DCHECK(object->IsHeapObject() &&
+              HeapObject::cast(object)->map()->instance_type() ==
+              Traits::kInstanceType);
   return reinterpret_cast<FixedTypedArray<Traits>*>(object);
 }
 
@@ -3101,11 +3270,19 @@
 template <typename Derived, typename Shape, typename Key>
 HashTable<Derived, Shape, Key>*
 HashTable<Derived, Shape, Key>::cast(Object* obj) {
-  ASSERT(obj->IsHashTable());
+  SLOW_DCHECK(obj->IsHashTable());
   return reinterpret_cast<HashTable*>(obj);
 }
 
 
+template <typename Derived, typename Shape, typename Key>
+const HashTable<Derived, Shape, Key>*
+HashTable<Derived, Shape, Key>::cast(const Object* obj) {
+  SLOW_DCHECK(obj->IsHashTable());
+  return reinterpret_cast<const HashTable*>(obj);
+}
+
+
 SMI_ACCESSORS(FixedArrayBase, length, kLengthOffset)
 SYNCHRONIZED_SMI_ACCESSORS(FixedArrayBase, length, kLengthOffset)
 
@@ -3153,6 +3330,7 @@
 ACCESSORS(Symbol, name, Object, kNameOffset)
 ACCESSORS(Symbol, flags, Smi, kFlagsOffset)
 BOOL_ACCESSORS(Symbol, flags, is_private, kPrivateBit)
+BOOL_ACCESSORS(Symbol, flags, is_own, kOwnBit)
 
 
 bool String::Equals(String* other) {
@@ -3182,7 +3360,7 @@
 
 
 uint16_t String::Get(int index) {
-  ASSERT(index >= 0 && index < length());
+  DCHECK(index >= 0 && index < length());
   switch (StringShape(this).full_representation_tag()) {
     case kSeqStringTag | kOneByteStringTag:
       return SeqOneByteString::cast(this)->SeqOneByteStringGet(index);
@@ -3192,7 +3370,7 @@
     case kConsStringTag | kTwoByteStringTag:
       return ConsString::cast(this)->ConsStringGet(index);
     case kExternalStringTag | kOneByteStringTag:
-      return ExternalAsciiString::cast(this)->ExternalAsciiStringGet(index);
+      return ExternalOneByteString::cast(this)->ExternalOneByteStringGet(index);
     case kExternalStringTag | kTwoByteStringTag:
       return ExternalTwoByteString::cast(this)->ExternalTwoByteStringGet(index);
     case kSlicedStringTag | kOneByteStringTag:
@@ -3208,8 +3386,8 @@
 
 
 void String::Set(int index, uint16_t value) {
-  ASSERT(index >= 0 && index < length());
-  ASSERT(StringShape(this).IsSequential());
+  DCHECK(index >= 0 && index < length());
+  DCHECK(StringShape(this).IsSequential());
 
   return this->IsOneByteRepresentation()
       ? SeqOneByteString::cast(this)->SeqOneByteStringSet(index, value)
@@ -3226,8 +3404,8 @@
 String* String::GetUnderlying() {
   // Giving direct access to underlying string only makes sense if the
   // wrapping string is already flattened.
-  ASSERT(this->IsFlat());
-  ASSERT(StringShape(this).IsIndirect());
+  DCHECK(this->IsFlat());
+  DCHECK(StringShape(this).IsIndirect());
   STATIC_ASSERT(ConsString::kFirstOffset == SlicedString::kParentOffset);
   const int kUnderlyingOffset = SlicedString::kParentOffset;
   return String::cast(READ_FIELD(this, kUnderlyingOffset));
@@ -3240,7 +3418,7 @@
                               const int offset) {
   int slice_offset = offset;
   const int length = string->length();
-  ASSERT(offset <= length);
+  DCHECK(offset <= length);
   while (true) {
     int32_t type = string->map()->instance_type();
     switch (type & (kStringRepresentationMask | kStringEncodingMask)) {
@@ -3258,7 +3436,7 @@
 
       case kExternalStringTag | kOneByteStringTag:
         visitor->VisitOneByteString(
-            ExternalAsciiString::cast(string)->GetChars() + slice_offset,
+            ExternalOneByteString::cast(string)->GetChars() + slice_offset,
             length - offset);
         return NULL;
 
@@ -3289,13 +3467,13 @@
 
 
 uint16_t SeqOneByteString::SeqOneByteStringGet(int index) {
-  ASSERT(index >= 0 && index < length());
+  DCHECK(index >= 0 && index < length());
   return READ_BYTE_FIELD(this, kHeaderSize + index * kCharSize);
 }
 
 
 void SeqOneByteString::SeqOneByteStringSet(int index, uint16_t value) {
-  ASSERT(index >= 0 && index < length() && value <= kMaxOneByteCharCode);
+  DCHECK(index >= 0 && index < length() && value <= kMaxOneByteCharCode);
   WRITE_BYTE_FIELD(this, kHeaderSize + index * kCharSize,
                    static_cast<byte>(value));
 }
@@ -3322,13 +3500,13 @@
 
 
 uint16_t SeqTwoByteString::SeqTwoByteStringGet(int index) {
-  ASSERT(index >= 0 && index < length());
+  DCHECK(index >= 0 && index < length());
   return READ_SHORT_FIELD(this, kHeaderSize + index * kShortSize);
 }
 
 
 void SeqTwoByteString::SeqTwoByteStringSet(int index, uint16_t value) {
-  ASSERT(index >= 0 && index < length());
+  DCHECK(index >= 0 && index < length());
   WRITE_SHORT_FIELD(this, kHeaderSize + index * kShortSize, value);
 }
 
@@ -3349,7 +3527,7 @@
 
 
 void SlicedString::set_parent(String* parent, WriteBarrierMode mode) {
-  ASSERT(parent->IsSeqString() || parent->IsExternalString());
+  DCHECK(parent->IsSeqString() || parent->IsExternalString());
   WRITE_FIELD(this, kParentOffset, parent);
   CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kParentOffset, parent, mode);
 }
@@ -3396,12 +3574,12 @@
 }
 
 
-const ExternalAsciiString::Resource* ExternalAsciiString::resource() {
+const ExternalOneByteString::Resource* ExternalOneByteString::resource() {
   return *reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset));
 }
 
 
-void ExternalAsciiString::update_data_cache() {
+void ExternalOneByteString::update_data_cache() {
   if (is_short()) return;
   const char** data_field =
       reinterpret_cast<const char**>(FIELD_ADDR(this, kResourceDataOffset));
@@ -3409,22 +3587,22 @@
 }
 
 
-void ExternalAsciiString::set_resource(
-    const ExternalAsciiString::Resource* resource) {
-  ASSERT(IsAligned(reinterpret_cast<intptr_t>(resource), kPointerSize));
+void ExternalOneByteString::set_resource(
+    const ExternalOneByteString::Resource* resource) {
+  DCHECK(IsAligned(reinterpret_cast<intptr_t>(resource), kPointerSize));
   *reinterpret_cast<const Resource**>(
       FIELD_ADDR(this, kResourceOffset)) = resource;
   if (resource != NULL) update_data_cache();
 }
 
 
-const uint8_t* ExternalAsciiString::GetChars() {
+const uint8_t* ExternalOneByteString::GetChars() {
   return reinterpret_cast<const uint8_t*>(resource()->data());
 }
 
 
-uint16_t ExternalAsciiString::ExternalAsciiStringGet(int index) {
-  ASSERT(index >= 0 && index < length());
+uint16_t ExternalOneByteString::ExternalOneByteStringGet(int index) {
+  DCHECK(index >= 0 && index < length());
   return GetChars()[index];
 }
 
@@ -3456,7 +3634,7 @@
 
 
 uint16_t ExternalTwoByteString::ExternalTwoByteStringGet(int index) {
-  ASSERT(index >= 0 && index < length());
+  DCHECK(index >= 0 && index < length());
   return GetChars()[index];
 }
 
@@ -3489,17 +3667,17 @@
 
 
 void ConsStringIteratorOp::Pop() {
-  ASSERT(depth_ > 0);
-  ASSERT(depth_ <= maximum_depth_);
+  DCHECK(depth_ > 0);
+  DCHECK(depth_ <= maximum_depth_);
   depth_--;
 }
 
 
 uint16_t StringCharacterStream::GetNext() {
-  ASSERT(buffer8_ != NULL && end_ != NULL);
+  DCHECK(buffer8_ != NULL && end_ != NULL);
   // Advance cursor if needed.
   if (buffer8_ == end_) HasMore();
-  ASSERT(buffer8_ < end_);
+  DCHECK(buffer8_ < end_);
   return is_one_byte_ ? *buffer8_++ : *buffer16_++;
 }
 
@@ -3529,10 +3707,10 @@
   if (buffer8_ != end_) return true;
   int offset;
   String* string = op_->Next(&offset);
-  ASSERT_EQ(offset, 0);
+  DCHECK_EQ(offset, 0);
   if (string == NULL) return false;
   String::VisitFlat(this, string);
-  ASSERT(buffer8_ != end_);
+  DCHECK(buffer8_ != end_);
   return true;
 }
 
@@ -3590,25 +3768,25 @@
 
 
 byte ByteArray::get(int index) {
-  ASSERT(index >= 0 && index < this->length());
+  DCHECK(index >= 0 && index < this->length());
   return READ_BYTE_FIELD(this, kHeaderSize + index * kCharSize);
 }
 
 
 void ByteArray::set(int index, byte value) {
-  ASSERT(index >= 0 && index < this->length());
+  DCHECK(index >= 0 && index < this->length());
   WRITE_BYTE_FIELD(this, kHeaderSize + index * kCharSize, value);
 }
 
 
 int ByteArray::get_int(int index) {
-  ASSERT(index >= 0 && (index * kIntSize) < this->length());
+  DCHECK(index >= 0 && (index * kIntSize) < this->length());
   return READ_INT_FIELD(this, kHeaderSize + index * kIntSize);
 }
 
 
 ByteArray* ByteArray::FromDataStartAddress(Address address) {
-  ASSERT_TAG_ALIGNED(address);
+  DCHECK_TAG_ALIGNED(address);
   return reinterpret_cast<ByteArray*>(address - kHeaderSize + kHeapObjectTag);
 }
 
@@ -3624,7 +3802,7 @@
 
 
 uint8_t ExternalUint8ClampedArray::get_scalar(int index) {
-  ASSERT((index >= 0) && (index < this->length()));
+  DCHECK((index >= 0) && (index < this->length()));
   uint8_t* ptr = external_uint8_clamped_pointer();
   return ptr[index];
 }
@@ -3639,13 +3817,13 @@
 
 
 void ExternalUint8ClampedArray::set(int index, uint8_t value) {
-  ASSERT((index >= 0) && (index < this->length()));
+  DCHECK((index >= 0) && (index < this->length()));
   uint8_t* ptr = external_uint8_clamped_pointer();
   ptr[index] = value;
 }
 
 
-void* ExternalArray::external_pointer() {
+void* ExternalArray::external_pointer() const {
   intptr_t ptr = READ_INTPTR_FIELD(this, kExternalPointerOffset);
   return reinterpret_cast<void*>(ptr);
 }
@@ -3658,7 +3836,7 @@
 
 
 int8_t ExternalInt8Array::get_scalar(int index) {
-  ASSERT((index >= 0) && (index < this->length()));
+  DCHECK((index >= 0) && (index < this->length()));
   int8_t* ptr = static_cast<int8_t*>(external_pointer());
   return ptr[index];
 }
@@ -3672,14 +3850,14 @@
 
 
 void ExternalInt8Array::set(int index, int8_t value) {
-  ASSERT((index >= 0) && (index < this->length()));
+  DCHECK((index >= 0) && (index < this->length()));
   int8_t* ptr = static_cast<int8_t*>(external_pointer());
   ptr[index] = value;
 }
 
 
 uint8_t ExternalUint8Array::get_scalar(int index) {
-  ASSERT((index >= 0) && (index < this->length()));
+  DCHECK((index >= 0) && (index < this->length()));
   uint8_t* ptr = static_cast<uint8_t*>(external_pointer());
   return ptr[index];
 }
@@ -3693,14 +3871,14 @@
 
 
 void ExternalUint8Array::set(int index, uint8_t value) {
-  ASSERT((index >= 0) && (index < this->length()));
+  DCHECK((index >= 0) && (index < this->length()));
   uint8_t* ptr = static_cast<uint8_t*>(external_pointer());
   ptr[index] = value;
 }
 
 
 int16_t ExternalInt16Array::get_scalar(int index) {
-  ASSERT((index >= 0) && (index < this->length()));
+  DCHECK((index >= 0) && (index < this->length()));
   int16_t* ptr = static_cast<int16_t*>(external_pointer());
   return ptr[index];
 }
@@ -3714,14 +3892,14 @@
 
 
 void ExternalInt16Array::set(int index, int16_t value) {
-  ASSERT((index >= 0) && (index < this->length()));
+  DCHECK((index >= 0) && (index < this->length()));
   int16_t* ptr = static_cast<int16_t*>(external_pointer());
   ptr[index] = value;
 }
 
 
 uint16_t ExternalUint16Array::get_scalar(int index) {
-  ASSERT((index >= 0) && (index < this->length()));
+  DCHECK((index >= 0) && (index < this->length()));
   uint16_t* ptr = static_cast<uint16_t*>(external_pointer());
   return ptr[index];
 }
@@ -3735,14 +3913,14 @@
 
 
 void ExternalUint16Array::set(int index, uint16_t value) {
-  ASSERT((index >= 0) && (index < this->length()));
+  DCHECK((index >= 0) && (index < this->length()));
   uint16_t* ptr = static_cast<uint16_t*>(external_pointer());
   ptr[index] = value;
 }
 
 
 int32_t ExternalInt32Array::get_scalar(int index) {
-  ASSERT((index >= 0) && (index < this->length()));
+  DCHECK((index >= 0) && (index < this->length()));
   int32_t* ptr = static_cast<int32_t*>(external_pointer());
   return ptr[index];
 }
@@ -3756,14 +3934,14 @@
 
 
 void ExternalInt32Array::set(int index, int32_t value) {
-  ASSERT((index >= 0) && (index < this->length()));
+  DCHECK((index >= 0) && (index < this->length()));
   int32_t* ptr = static_cast<int32_t*>(external_pointer());
   ptr[index] = value;
 }
 
 
 uint32_t ExternalUint32Array::get_scalar(int index) {
-  ASSERT((index >= 0) && (index < this->length()));
+  DCHECK((index >= 0) && (index < this->length()));
   uint32_t* ptr = static_cast<uint32_t*>(external_pointer());
   return ptr[index];
 }
@@ -3777,14 +3955,14 @@
 
 
 void ExternalUint32Array::set(int index, uint32_t value) {
-  ASSERT((index >= 0) && (index < this->length()));
+  DCHECK((index >= 0) && (index < this->length()));
   uint32_t* ptr = static_cast<uint32_t*>(external_pointer());
   ptr[index] = value;
 }
 
 
 float ExternalFloat32Array::get_scalar(int index) {
-  ASSERT((index >= 0) && (index < this->length()));
+  DCHECK((index >= 0) && (index < this->length()));
   float* ptr = static_cast<float*>(external_pointer());
   return ptr[index];
 }
@@ -3797,14 +3975,14 @@
 
 
 void ExternalFloat32Array::set(int index, float value) {
-  ASSERT((index >= 0) && (index < this->length()));
+  DCHECK((index >= 0) && (index < this->length()));
   float* ptr = static_cast<float*>(external_pointer());
   ptr[index] = value;
 }
 
 
 double ExternalFloat64Array::get_scalar(int index) {
-  ASSERT((index >= 0) && (index < this->length()));
+  DCHECK((index >= 0) && (index < this->length()));
   double* ptr = static_cast<double*>(external_pointer());
   return ptr[index];
 }
@@ -3817,7 +3995,7 @@
 
 
 void ExternalFloat64Array::set(int index, double value) {
-  ASSERT((index >= 0) && (index < this->length()));
+  DCHECK((index >= 0) && (index < this->length()));
   double* ptr = static_cast<double*>(external_pointer());
   ptr[index] = value;
 }
@@ -3883,16 +4061,16 @@
 
 
 float Float32ArrayTraits::defaultValue() {
-  return static_cast<float>(OS::nan_value());
+  return static_cast<float>(base::OS::nan_value());
 }
 
 
-double Float64ArrayTraits::defaultValue() { return OS::nan_value(); }
+double Float64ArrayTraits::defaultValue() { return base::OS::nan_value(); }
 
 
 template <class Traits>
 typename Traits::ElementType FixedTypedArray<Traits>::get_scalar(int index) {
-  ASSERT((index >= 0) && (index < this->length()));
+  DCHECK((index >= 0) && (index < this->length()));
   ElementType* ptr = reinterpret_cast<ElementType*>(
       FIELD_ADDR(this, kDataOffset));
   return ptr[index];
@@ -3902,14 +4080,14 @@
 template<> inline
 FixedTypedArray<Float64ArrayTraits>::ElementType
     FixedTypedArray<Float64ArrayTraits>::get_scalar(int index) {
-  ASSERT((index >= 0) && (index < this->length()));
+  DCHECK((index >= 0) && (index < this->length()));
   return READ_DOUBLE_FIELD(this, ElementOffset(index));
 }
 
 
 template <class Traits>
 void FixedTypedArray<Traits>::set(int index, ElementType value) {
-  ASSERT((index >= 0) && (index < this->length()));
+  DCHECK((index >= 0) && (index < this->length()));
   ElementType* ptr = reinterpret_cast<ElementType*>(
       FIELD_ADDR(this, kDataOffset));
   ptr[index] = value;
@@ -3919,7 +4097,7 @@
 template<> inline
 void FixedTypedArray<Float64ArrayTraits>::set(
     int index, Float64ArrayTraits::ElementType value) {
-  ASSERT((index >= 0) && (index < this->length()));
+  DCHECK((index >= 0) && (index < this->length()));
   WRITE_DOUBLE_FIELD(this, ElementOffset(index), value);
 }
 
@@ -3989,7 +4167,7 @@
     } else {
       // Clamp undefined to the default value. All other types have been
       // converted to a number type further up in the call chain.
-      ASSERT(value->IsUndefined());
+      DCHECK(value->IsUndefined());
     }
     array->set(index, cast_value);
   }
@@ -4049,7 +4227,7 @@
 
 
 void Map::set_visitor_id(int id) {
-  ASSERT(0 <= id && id < 256);
+  DCHECK(0 <= id && id < 256);
   WRITE_BYTE_FIELD(this, kVisitorIdOffset, static_cast<byte>(id));
 }
 
@@ -4073,7 +4251,7 @@
 int Map::GetInObjectPropertyOffset(int index) {
   // Adjust for the number of properties stored in the object.
   index -= inobject_properties();
-  ASSERT(index <= 0);
+  DCHECK(index <= 0);
   return instance_size() + (index * kPointerSize);
 }
 
@@ -4086,8 +4264,8 @@
   if (instance_type == FIXED_ARRAY_TYPE) {
     return FixedArray::BodyDescriptor::SizeOf(map, this);
   }
-  if (instance_type == ASCII_STRING_TYPE ||
-      instance_type == ASCII_INTERNALIZED_STRING_TYPE) {
+  if (instance_type == ONE_BYTE_STRING_TYPE ||
+      instance_type == ONE_BYTE_INTERNALIZED_STRING_TYPE) {
     return SeqOneByteString::SizeFor(
         reinterpret_cast<SeqOneByteString*>(this)->length());
   }
@@ -4114,28 +4292,28 @@
     return reinterpret_cast<FixedTypedArrayBase*>(
         this)->TypedArraySize(instance_type);
   }
-  ASSERT(instance_type == CODE_TYPE);
+  DCHECK(instance_type == CODE_TYPE);
   return reinterpret_cast<Code*>(this)->CodeSize();
 }
 
 
 void Map::set_instance_size(int value) {
-  ASSERT_EQ(0, value & (kPointerSize - 1));
+  DCHECK_EQ(0, value & (kPointerSize - 1));
   value >>= kPointerSizeLog2;
-  ASSERT(0 <= value && value < 256);
+  DCHECK(0 <= value && value < 256);
   NOBARRIER_WRITE_BYTE_FIELD(
       this, kInstanceSizeOffset, static_cast<byte>(value));
 }
 
 
 void Map::set_inobject_properties(int value) {
-  ASSERT(0 <= value && value < 256);
+  DCHECK(0 <= value && value < 256);
   WRITE_BYTE_FIELD(this, kInObjectPropertiesOffset, static_cast<byte>(value));
 }
 
 
 void Map::set_pre_allocated_property_fields(int value) {
-  ASSERT(0 <= value && value < 256);
+  DCHECK(0 <= value && value < 256);
   WRITE_BYTE_FIELD(this,
                    kPreAllocatedPropertyFieldsOffset,
                    static_cast<byte>(value));
@@ -4233,13 +4411,13 @@
 }
 
 
-void Map::set_is_shared(bool value) {
-  set_bit_field3(IsShared::update(bit_field3(), value));
+void Map::set_is_prototype_map(bool value) {
+  set_bit_field2(IsPrototypeMapBits::update(bit_field2(), value));
 }
 
-
-bool Map::is_shared() {
-  return IsShared::decode(bit_field3()); }
+bool Map::is_prototype_map() {
+  return IsPrototypeMapBits::decode(bit_field2());
+}
 
 
 void Map::set_dictionary_map(bool value) {
@@ -4259,8 +4437,8 @@
 }
 
 
-void Map::set_owns_descriptors(bool is_shared) {
-  set_bit_field3(OwnsDescriptors::update(bit_field3(), is_shared));
+void Map::set_owns_descriptors(bool owns_descriptors) {
+  set_bit_field3(OwnsDescriptors::update(bit_field3(), owns_descriptors));
 }
 
 
@@ -4445,12 +4623,21 @@
 }
 
 
+bool Code::IsCodeStubOrIC() {
+  return kind() == STUB || kind() == HANDLER || kind() == LOAD_IC ||
+         kind() == KEYED_LOAD_IC || kind() == CALL_IC || kind() == STORE_IC ||
+         kind() == KEYED_STORE_IC || kind() == BINARY_OP_IC ||
+         kind() == COMPARE_IC || kind() == COMPARE_NIL_IC ||
+         kind() == TO_BOOLEAN_IC;
+}
+
+
 InlineCacheState Code::ic_state() {
   InlineCacheState result = ExtractICStateFromFlags(flags());
   // Only allow uninitialized or debugger states for non-IC code
   // objects. This is used in the debugger to determine whether or not
   // a call to code object has been replaced with a debug break call.
-  ASSERT(is_inline_cache_stub() ||
+  DCHECK(is_inline_cache_stub() ||
          result == UNINITIALIZED ||
          result == DEBUG_STUB);
   return result;
@@ -4458,7 +4645,7 @@
 
 
 ExtraICState Code::extra_ic_state() {
-  ASSERT(is_inline_cache_stub() || ic_state() == DEBUG_STUB);
+  DCHECK(is_inline_cache_stub() || ic_state() == DEBUG_STUB);
   return ExtractExtraICStateFromFlags(flags());
 }
 
@@ -4485,6 +4672,11 @@
 }
 
 
+inline bool Code::is_hydrogen_stub() {
+  return is_crankshafted() && kind() != OPTIMIZED_FUNCTION;
+}
+
+
 inline void Code::set_is_crankshafted(bool value) {
   int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
   int updated = IsCrankshaftedField::update(previous, value);
@@ -4492,58 +4684,42 @@
 }
 
 
-int Code::major_key() {
-  ASSERT(has_major_key());
-  return StubMajorKeyField::decode(
-      READ_UINT32_FIELD(this, kKindSpecificFlags2Offset));
+inline bool Code::is_turbofanned() {
+  DCHECK(kind() == OPTIMIZED_FUNCTION || kind() == STUB);
+  return IsTurbofannedField::decode(
+      READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
 }
 
 
-void Code::set_major_key(int major) {
-  ASSERT(has_major_key());
-  ASSERT(0 <= major && major < 256);
-  int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
-  int updated = StubMajorKeyField::update(previous, major);
-  WRITE_UINT32_FIELD(this, kKindSpecificFlags2Offset, updated);
-}
-
-
-bool Code::has_major_key() {
-  return kind() == STUB ||
-      kind() == HANDLER ||
-      kind() == BINARY_OP_IC ||
-      kind() == COMPARE_IC ||
-      kind() == COMPARE_NIL_IC ||
-      kind() == LOAD_IC ||
-      kind() == KEYED_LOAD_IC ||
-      kind() == STORE_IC ||
-      kind() == CALL_IC ||
-      kind() == KEYED_STORE_IC ||
-      kind() == TO_BOOLEAN_IC;
+inline void Code::set_is_turbofanned(bool value) {
+  DCHECK(kind() == OPTIMIZED_FUNCTION || kind() == STUB);
+  int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
+  int updated = IsTurbofannedField::update(previous, value);
+  WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
 }
 
 
 bool Code::optimizable() {
-  ASSERT_EQ(FUNCTION, kind());
+  DCHECK_EQ(FUNCTION, kind());
   return READ_BYTE_FIELD(this, kOptimizableOffset) == 1;
 }
 
 
 void Code::set_optimizable(bool value) {
-  ASSERT_EQ(FUNCTION, kind());
+  DCHECK_EQ(FUNCTION, kind());
   WRITE_BYTE_FIELD(this, kOptimizableOffset, value ? 1 : 0);
 }
 
 
 bool Code::has_deoptimization_support() {
-  ASSERT_EQ(FUNCTION, kind());
+  DCHECK_EQ(FUNCTION, kind());
   byte flags = READ_BYTE_FIELD(this, kFullCodeFlags);
   return FullCodeFlagsHasDeoptimizationSupportField::decode(flags);
 }
 
 
 void Code::set_has_deoptimization_support(bool value) {
-  ASSERT_EQ(FUNCTION, kind());
+  DCHECK_EQ(FUNCTION, kind());
   byte flags = READ_BYTE_FIELD(this, kFullCodeFlags);
   flags = FullCodeFlagsHasDeoptimizationSupportField::update(flags, value);
   WRITE_BYTE_FIELD(this, kFullCodeFlags, flags);
@@ -4551,14 +4727,14 @@
 
 
 bool Code::has_debug_break_slots() {
-  ASSERT_EQ(FUNCTION, kind());
+  DCHECK_EQ(FUNCTION, kind());
   byte flags = READ_BYTE_FIELD(this, kFullCodeFlags);
   return FullCodeFlagsHasDebugBreakSlotsField::decode(flags);
 }
 
 
 void Code::set_has_debug_break_slots(bool value) {
-  ASSERT_EQ(FUNCTION, kind());
+  DCHECK_EQ(FUNCTION, kind());
   byte flags = READ_BYTE_FIELD(this, kFullCodeFlags);
   flags = FullCodeFlagsHasDebugBreakSlotsField::update(flags, value);
   WRITE_BYTE_FIELD(this, kFullCodeFlags, flags);
@@ -4566,14 +4742,14 @@
 
 
 bool Code::is_compiled_optimizable() {
-  ASSERT_EQ(FUNCTION, kind());
+  DCHECK_EQ(FUNCTION, kind());
   byte flags = READ_BYTE_FIELD(this, kFullCodeFlags);
   return FullCodeFlagsIsCompiledOptimizable::decode(flags);
 }
 
 
 void Code::set_compiled_optimizable(bool value) {
-  ASSERT_EQ(FUNCTION, kind());
+  DCHECK_EQ(FUNCTION, kind());
   byte flags = READ_BYTE_FIELD(this, kFullCodeFlags);
   flags = FullCodeFlagsIsCompiledOptimizable::update(flags, value);
   WRITE_BYTE_FIELD(this, kFullCodeFlags, flags);
@@ -4581,33 +4757,49 @@
 
 
 int Code::allow_osr_at_loop_nesting_level() {
-  ASSERT_EQ(FUNCTION, kind());
-  return READ_BYTE_FIELD(this, kAllowOSRAtLoopNestingLevelOffset);
+  DCHECK_EQ(FUNCTION, kind());
+  int fields = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
+  return AllowOSRAtLoopNestingLevelField::decode(fields);
 }
 
 
 void Code::set_allow_osr_at_loop_nesting_level(int level) {
-  ASSERT_EQ(FUNCTION, kind());
-  ASSERT(level >= 0 && level <= kMaxLoopNestingMarker);
-  WRITE_BYTE_FIELD(this, kAllowOSRAtLoopNestingLevelOffset, level);
+  DCHECK_EQ(FUNCTION, kind());
+  DCHECK(level >= 0 && level <= kMaxLoopNestingMarker);
+  int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
+  int updated = AllowOSRAtLoopNestingLevelField::update(previous, level);
+  WRITE_UINT32_FIELD(this, kKindSpecificFlags2Offset, updated);
 }
 
 
 int Code::profiler_ticks() {
-  ASSERT_EQ(FUNCTION, kind());
+  DCHECK_EQ(FUNCTION, kind());
   return READ_BYTE_FIELD(this, kProfilerTicksOffset);
 }
 
 
 void Code::set_profiler_ticks(int ticks) {
-  ASSERT_EQ(FUNCTION, kind());
-  ASSERT(ticks < 256);
-  WRITE_BYTE_FIELD(this, kProfilerTicksOffset, ticks);
+  DCHECK(ticks < 256);
+  if (kind() == FUNCTION) {
+    WRITE_BYTE_FIELD(this, kProfilerTicksOffset, ticks);
+  }
+}
+
+
+int Code::builtin_index() {
+  DCHECK_EQ(BUILTIN, kind());
+  return READ_INT32_FIELD(this, kKindSpecificFlags1Offset);
+}
+
+
+void Code::set_builtin_index(int index) {
+  DCHECK_EQ(BUILTIN, kind());
+  WRITE_INT32_FIELD(this, kKindSpecificFlags1Offset, index);
 }
 
 
 unsigned Code::stack_slots() {
-  ASSERT(is_crankshafted());
+  DCHECK(is_crankshafted());
   return StackSlotsField::decode(
       READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
 }
@@ -4615,7 +4807,7 @@
 
 void Code::set_stack_slots(unsigned slots) {
   CHECK(slots <= (1 << kStackSlotsBitCount));
-  ASSERT(is_crankshafted());
+  DCHECK(is_crankshafted());
   int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
   int updated = StackSlotsField::update(previous, slots);
   WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
@@ -4623,7 +4815,7 @@
 
 
 unsigned Code::safepoint_table_offset() {
-  ASSERT(is_crankshafted());
+  DCHECK(is_crankshafted());
   return SafepointTableOffsetField::decode(
       READ_UINT32_FIELD(this, kKindSpecificFlags2Offset));
 }
@@ -4631,8 +4823,8 @@
 
 void Code::set_safepoint_table_offset(unsigned offset) {
   CHECK(offset <= (1 << kSafepointTableOffsetBitCount));
-  ASSERT(is_crankshafted());
-  ASSERT(IsAligned(offset, static_cast<unsigned>(kIntSize)));
+  DCHECK(is_crankshafted());
+  DCHECK(IsAligned(offset, static_cast<unsigned>(kIntSize)));
   int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
   int updated = SafepointTableOffsetField::update(previous, offset);
   WRITE_UINT32_FIELD(this, kKindSpecificFlags2Offset, updated);
@@ -4640,15 +4832,16 @@
 
 
 unsigned Code::back_edge_table_offset() {
-  ASSERT_EQ(FUNCTION, kind());
+  DCHECK_EQ(FUNCTION, kind());
   return BackEdgeTableOffsetField::decode(
-      READ_UINT32_FIELD(this, kKindSpecificFlags2Offset));
+      READ_UINT32_FIELD(this, kKindSpecificFlags2Offset)) << kPointerSizeLog2;
 }
 
 
 void Code::set_back_edge_table_offset(unsigned offset) {
-  ASSERT_EQ(FUNCTION, kind());
-  ASSERT(IsAligned(offset, static_cast<unsigned>(kIntSize)));
+  DCHECK_EQ(FUNCTION, kind());
+  DCHECK(IsAligned(offset, static_cast<unsigned>(kPointerSize)));
+  offset = offset >> kPointerSizeLog2;
   int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
   int updated = BackEdgeTableOffsetField::update(previous, offset);
   WRITE_UINT32_FIELD(this, kKindSpecificFlags2Offset, updated);
@@ -4656,35 +4849,25 @@
 
 
 bool Code::back_edges_patched_for_osr() {
-  ASSERT_EQ(FUNCTION, kind());
-  return BackEdgesPatchedForOSRField::decode(
-      READ_UINT32_FIELD(this, kKindSpecificFlags2Offset));
+  DCHECK_EQ(FUNCTION, kind());
+  return allow_osr_at_loop_nesting_level() > 0;
 }
 
 
-void Code::set_back_edges_patched_for_osr(bool value) {
-  ASSERT_EQ(FUNCTION, kind());
-  int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
-  int updated = BackEdgesPatchedForOSRField::update(previous, value);
-  WRITE_UINT32_FIELD(this, kKindSpecificFlags2Offset, updated);
-}
-
-
-
 byte Code::to_boolean_state() {
   return extra_ic_state();
 }
 
 
 bool Code::has_function_cache() {
-  ASSERT(kind() == STUB);
+  DCHECK(kind() == STUB);
   return HasFunctionCacheField::decode(
       READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
 }
 
 
 void Code::set_has_function_cache(bool flag) {
-  ASSERT(kind() == STUB);
+  DCHECK(kind() == STUB);
   int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
   int updated = HasFunctionCacheField::update(previous, flag);
   WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
@@ -4692,15 +4875,15 @@
 
 
 bool Code::marked_for_deoptimization() {
-  ASSERT(kind() == OPTIMIZED_FUNCTION);
+  DCHECK(kind() == OPTIMIZED_FUNCTION);
   return MarkedForDeoptimizationField::decode(
       READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
 }
 
 
 void Code::set_marked_for_deoptimization(bool flag) {
-  ASSERT(kind() == OPTIMIZED_FUNCTION);
-  ASSERT(!flag || AllowDeoptimization::IsAllowed(GetIsolate()));
+  DCHECK(kind() == OPTIMIZED_FUNCTION);
+  DCHECK(!flag || AllowDeoptimization::IsAllowed(GetIsolate()));
   int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
   int updated = MarkedForDeoptimizationField::update(previous, flag);
   WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
@@ -4714,7 +4897,7 @@
 
 
 void Code::mark_as_weak_stub() {
-  ASSERT(CanBeWeakStub());
+  DCHECK(CanBeWeakStub());
   int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
   int updated = WeakStubField::update(previous, true);
   WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
@@ -4728,7 +4911,7 @@
 
 
 void Code::mark_as_invalidated_weak_stub() {
-  ASSERT(is_inline_cache_stub());
+  DCHECK(is_inline_cache_stub());
   int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
   int updated = InvalidatedWeakStubField::update(previous, true);
   WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
@@ -4762,17 +4945,15 @@
 
 
 void Code::set_constant_pool(Object* value) {
-  ASSERT(value->IsConstantPoolArray());
+  DCHECK(value->IsConstantPoolArray());
   WRITE_FIELD(this, kConstantPoolOffset, value);
   WRITE_BARRIER(GetHeap(), this, kConstantPoolOffset, value);
 }
 
 
-Code::Flags Code::ComputeFlags(Kind kind,
-                               InlineCacheState ic_state,
-                               ExtraICState extra_ic_state,
-                               StubType type,
-                               InlineCacheHolderFlag holder) {
+Code::Flags Code::ComputeFlags(Kind kind, InlineCacheState ic_state,
+                               ExtraICState extra_ic_state, StubType type,
+                               CacheHolderFlag holder) {
   // Compute the bit mask.
   unsigned int bits = KindField::encode(kind)
       | ICStateField::encode(ic_state)
@@ -4785,15 +4966,14 @@
 
 Code::Flags Code::ComputeMonomorphicFlags(Kind kind,
                                           ExtraICState extra_ic_state,
-                                          InlineCacheHolderFlag holder,
+                                          CacheHolderFlag holder,
                                           StubType type) {
   return ComputeFlags(kind, MONOMORPHIC, extra_ic_state, type, holder);
 }
 
 
-Code::Flags Code::ComputeHandlerFlags(Kind handler_kind,
-                                      StubType type,
-                                      InlineCacheHolderFlag holder) {
+Code::Flags Code::ComputeHandlerFlags(Kind handler_kind, StubType type,
+                                      CacheHolderFlag holder) {
   return ComputeFlags(Code::HANDLER, MONOMORPHIC, handler_kind, type, holder);
 }
 
@@ -4818,7 +4998,7 @@
 }
 
 
-InlineCacheHolderFlag Code::ExtractCacheHolderFromFlags(Flags flags) {
+CacheHolderFlag Code::ExtractCacheHolderFromFlags(Flags flags) {
   return CacheHolderField::decode(flags);
 }
 
@@ -4829,6 +5009,12 @@
 }
 
 
+Code::Flags Code::RemoveTypeAndHolderFromFlags(Flags flags) {
+  int bits = flags & ~TypeField::kMask & ~CacheHolderField::kMask;
+  return static_cast<Flags>(bits);
+}
+
+
 Code* Code::GetCodeFromTargetAddress(Address address) {
   HeapObject* code = HeapObject::FromAddress(address - Code::kHeaderSize);
   // GetCodeFromTargetAddress might be called when marking objects during mark
@@ -4864,7 +5050,7 @@
  public:
   FindAndReplacePattern() : count_(0) { }
   void Add(Handle<Map> map_to_find, Handle<Object> obj_to_replace) {
-    ASSERT(count_ < kMaxCount);
+    DCHECK(count_ < kMaxCount);
     find_[count_] = map_to_find;
     replace_[count_] = obj_to_replace;
     ++count_;
@@ -4885,13 +5071,13 @@
 }
 
 
-Object* Map::prototype() {
+Object* Map::prototype() const {
   return READ_FIELD(this, kPrototypeOffset);
 }
 
 
 void Map::set_prototype(Object* value, WriteBarrierMode mode) {
-  ASSERT(value->IsNull() || value->IsJSReceiver());
+  DCHECK(value->IsNull() || value->IsJSReceiver());
   WRITE_FIELD(this, kPrototypeOffset, value);
   CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kPrototypeOffset, value, mode);
 }
@@ -4939,7 +5125,7 @@
 void Map::AppendDescriptor(Descriptor* desc) {
   DescriptorArray* descriptors = instance_descriptors();
   int number_of_own_descriptors = NumberOfOwnDescriptors();
-  ASSERT(descriptors->number_of_descriptors() == number_of_own_descriptors);
+  DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
   descriptors->Append(desc);
   SetNumberOfOwnDescriptors(number_of_own_descriptors + 1);
 }
@@ -4950,7 +5136,7 @@
   if (object->IsDescriptorArray()) {
     return TransitionArray::cast(object)->back_pointer_storage();
   } else {
-    ASSERT(object->IsMap() || object->IsUndefined());
+    DCHECK(object->IsMap() || object->IsUndefined());
     return object;
   }
 }
@@ -4961,7 +5147,7 @@
 }
 
 
-bool Map::HasTransitionArray() {
+bool Map::HasTransitionArray() const {
   Object* object = READ_FIELD(this, kTransitionsOrBackPointerOffset);
   return object->IsTransitionArray();
 }
@@ -5007,7 +5193,7 @@
   int old_number_of_transitions = map->NumberOfProtoTransitions();
 #ifdef DEBUG
   if (map->HasPrototypeTransitions()) {
-    ASSERT(map->GetPrototypeTransitions() != *proto_transitions);
+    DCHECK(map->GetPrototypeTransitions() != *proto_transitions);
     map->ZapPrototypeTransitions();
   }
 #endif
@@ -5021,8 +5207,8 @@
 }
 
 
-TransitionArray* Map::transitions() {
-  ASSERT(HasTransitionArray());
+TransitionArray* Map::transitions() const {
+  DCHECK(HasTransitionArray());
   Object* object = READ_FIELD(this, kTransitionsOrBackPointerOffset);
   return TransitionArray::cast(object);
 }
@@ -5041,12 +5227,12 @@
       if (target->instance_descriptors() == instance_descriptors()) {
         Name* key = transitions()->GetKey(i);
         int new_target_index = transition_array->Search(key);
-        ASSERT(new_target_index != TransitionArray::kNotFound);
-        ASSERT(transition_array->GetTarget(new_target_index) == target);
+        DCHECK(new_target_index != TransitionArray::kNotFound);
+        DCHECK(transition_array->GetTarget(new_target_index) == target);
       }
     }
 #endif
-    ASSERT(transitions() != transition_array);
+    DCHECK(transitions() != transition_array);
     ZapTransitions();
   }
 
@@ -5057,14 +5243,14 @@
 
 
 void Map::init_back_pointer(Object* undefined) {
-  ASSERT(undefined->IsUndefined());
+  DCHECK(undefined->IsUndefined());
   WRITE_FIELD(this, kTransitionsOrBackPointerOffset, undefined);
 }
 
 
 void Map::SetBackPointer(Object* value, WriteBarrierMode mode) {
-  ASSERT(instance_type() >= FIRST_JS_RECEIVER_TYPE);
-  ASSERT((value->IsUndefined() && GetBackPointer()->IsMap()) ||
+  DCHECK(instance_type() >= FIRST_JS_RECEIVER_TYPE);
+  DCHECK((value->IsUndefined() && GetBackPointer()->IsMap()) ||
          (value->IsMap() && GetBackPointer()->IsUndefined()));
   Object* object = READ_FIELD(this, kTransitionsOrBackPointerOffset);
   if (object->IsTransitionArray()) {
@@ -5088,7 +5274,7 @@
 ACCESSORS(GlobalObject, builtins, JSBuiltinsObject, kBuiltinsOffset)
 ACCESSORS(GlobalObject, native_context, Context, kNativeContextOffset)
 ACCESSORS(GlobalObject, global_context, Context, kGlobalContextOffset)
-ACCESSORS(GlobalObject, global_receiver, JSObject, kGlobalReceiverOffset)
+ACCESSORS(GlobalObject, global_proxy, JSObject, kGlobalProxyOffset)
 
 ACCESSORS(JSGlobalProxy, native_context, Object, kNativeContextOffset)
 ACCESSORS(JSGlobalProxy, hash, Object, kHashOffset)
@@ -5112,7 +5298,6 @@
 
 ACCESSORS(AccessorPair, getter, Object, kGetterOffset)
 ACCESSORS(AccessorPair, setter, Object, kSetterOffset)
-ACCESSORS_TO_SMI(AccessorPair, access_flags, kAccessFlagsOffset)
 
 ACCESSORS(AccessCheckInfo, named_callback, Object, kNamedCallbackOffset)
 ACCESSORS(AccessCheckInfo, indexed_callback, Object, kIndexedCallbackOffset)
@@ -5184,6 +5369,8 @@
                  kEvalFrominstructionsOffsetOffset)
 ACCESSORS_TO_SMI(Script, flags, kFlagsOffset)
 BOOL_ACCESSORS(Script, flags, is_shared_cross_origin, kIsSharedCrossOriginBit)
+ACCESSORS(Script, source_url, Object, kSourceUrlOffset)
+ACCESSORS(Script, source_mapping_url, Object, kSourceMappingUrlOffset)
 
 Script::CompilationType Script::compilation_type() {
   return BooleanBit::get(flags(), kCompilationTypeBit) ?
@@ -5217,7 +5404,7 @@
 ACCESSORS(SharedFunctionInfo, optimized_code_map, Object,
                  kOptimizedCodeMapOffset)
 ACCESSORS(SharedFunctionInfo, construct_stub, Code, kConstructStubOffset)
-ACCESSORS(SharedFunctionInfo, feedback_vector, FixedArray,
+ACCESSORS(SharedFunctionInfo, feedback_vector, TypeFeedbackVector,
           kFeedbackVectorOffset)
 ACCESSORS(SharedFunctionInfo, instance_class_name, Object,
           kInstanceClassNameOffset)
@@ -5260,6 +5447,7 @@
                compiler_hints,
                has_duplicate_parameters,
                kHasDuplicateParameters)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, asm_function, kIsAsmFunction)
 
 
 #if V8_HOST_ARCH_32_BIT
@@ -5286,15 +5474,15 @@
 
 #define PSEUDO_SMI_ACCESSORS_LO(holder, name, offset)             \
   STATIC_ASSERT(holder::offset % kPointerSize == 0);              \
-  int holder::name() {                                            \
+  int holder::name() const {                                      \
     int value = READ_INT_FIELD(this, offset);                     \
-    ASSERT(kHeapObjectTag == 1);                                  \
-    ASSERT((value & kHeapObjectTag) == 0);                        \
+    DCHECK(kHeapObjectTag == 1);                                  \
+    DCHECK((value & kHeapObjectTag) == 0);                        \
     return value >> 1;                                            \
   }                                                               \
   void holder::set_##name(int value) {                            \
-    ASSERT(kHeapObjectTag == 1);                                  \
-    ASSERT((value & 0xC0000000) == 0xC0000000 ||                  \
+    DCHECK(kHeapObjectTag == 1);                                  \
+    DCHECK((value & 0xC0000000) == 0xC0000000 ||                  \
            (value & 0xC0000000) == 0x0);                          \
     WRITE_INT_FIELD(this,                                         \
                     offset,                                       \
@@ -5369,13 +5557,26 @@
 
 void SharedFunctionInfo::set_strict_mode(StrictMode strict_mode) {
   // We only allow mode transitions from sloppy to strict.
-  ASSERT(this->strict_mode() == SLOPPY || this->strict_mode() == strict_mode);
+  DCHECK(this->strict_mode() == SLOPPY || this->strict_mode() == strict_mode);
   int hints = compiler_hints();
   hints = BooleanBit::set(hints, kStrictModeFunction, strict_mode == STRICT);
   set_compiler_hints(hints);
 }
 
 
+FunctionKind SharedFunctionInfo::kind() {
+  return FunctionKindBits::decode(compiler_hints());
+}
+
+
+void SharedFunctionInfo::set_kind(FunctionKind kind) {
+  DCHECK(IsValidFunctionKind(kind));
+  int hints = compiler_hints();
+  hints = FunctionKindBits::update(hints, kind);
+  set_compiler_hints(hints);
+}
+
+
 BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, native, kNative)
 BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, inline_builtin,
                kInlineBuiltin)
@@ -5385,12 +5586,12 @@
 BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, bound, kBoundFunction)
 BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_anonymous, kIsAnonymous)
 BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_function, kIsFunction)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_optimize,
-               kDontOptimize)
-BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_inline, kDontInline)
 BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_cache, kDontCache)
 BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, dont_flush, kDontFlush)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_arrow, kIsArrow)
 BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_generator, kIsGenerator)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_concise_method,
+               kIsConciseMethod)
 
 ACCESSORS(CodeCache, default_cache, FixedArray, kDefaultCacheOffset)
 ACCESSORS(CodeCache, normal_type_cache, Object, kNormalTypeCacheOffset)
@@ -5403,7 +5604,7 @@
   String* src_str = String::cast(src);
   if (!StringShape(src_str).IsExternal()) return true;
   if (src_str->IsOneByteRepresentation()) {
-    return ExternalAsciiString::cast(src)->resource() != NULL;
+    return ExternalOneByteString::cast(src)->resource() != NULL;
   } else if (src_str->IsTwoByteRepresentation()) {
     return ExternalTwoByteString::cast(src)->resource() != NULL;
   }
@@ -5412,12 +5613,12 @@
 
 
 void SharedFunctionInfo::DontAdaptArguments() {
-  ASSERT(code()->kind() == Code::BUILTIN);
+  DCHECK(code()->kind() == Code::BUILTIN);
   set_formal_parameter_count(kDontAdaptArgumentsSentinel);
 }
 
 
-int SharedFunctionInfo::start_position() {
+int SharedFunctionInfo::start_position() const {
   return start_position_and_type() >> kStartPositionShift;
 }
 
@@ -5428,13 +5629,13 @@
 }
 
 
-Code* SharedFunctionInfo::code() {
+Code* SharedFunctionInfo::code() const {
   return Code::cast(READ_FIELD(this, kCodeOffset));
 }
 
 
 void SharedFunctionInfo::set_code(Code* value, WriteBarrierMode mode) {
-  ASSERT(value->kind() != Code::OPTIMIZED_FUNCTION);
+  DCHECK(value->kind() != Code::OPTIMIZED_FUNCTION);
   WRITE_FIELD(this, kCodeOffset, value);
   CONDITIONAL_WRITE_BARRIER(value->GetHeap(), this, kCodeOffset, value, mode);
 }
@@ -5448,13 +5649,13 @@
     flusher->EvictCandidate(this);
   }
 
-  ASSERT(code()->gc_metadata() == NULL && value->gc_metadata() == NULL);
+  DCHECK(code()->gc_metadata() == NULL && value->gc_metadata() == NULL);
 
   set_code(value);
 }
 
 
-ScopeInfo* SharedFunctionInfo::scope_info() {
+ScopeInfo* SharedFunctionInfo::scope_info() const {
   return reinterpret_cast<ScopeInfo*>(READ_FIELD(this, kScopeInfoOffset));
 }
 
@@ -5471,8 +5672,7 @@
 
 
 bool SharedFunctionInfo::is_compiled() {
-  return code() !=
-      GetIsolate()->builtins()->builtin(Builtins::kCompileUnoptimized);
+  return code() != GetIsolate()->builtins()->builtin(Builtins::kCompileLazy);
 }
 
 
@@ -5482,7 +5682,7 @@
 
 
 FunctionTemplateInfo* SharedFunctionInfo::get_api_func_data() {
-  ASSERT(IsApiFunction());
+  DCHECK(IsApiFunction());
   return FunctionTemplateInfo::cast(function_data());
 }
 
@@ -5493,7 +5693,7 @@
 
 
 BuiltinFunctionId SharedFunctionInfo::builtin_function_id() {
-  ASSERT(HasBuiltinFunctionId());
+  DCHECK(HasBuiltinFunctionId());
   return static_cast<BuiltinFunctionId>(Smi::cast(function_data())->value());
 }
 
@@ -5579,15 +5779,22 @@
 }
 
 
-bool JSFunction::IsNative() {
+bool JSFunction::IsFromNativeScript() {
   Object* script = shared()->script();
   bool native = script->IsScript() &&
                 Script::cast(script)->type()->value() == Script::TYPE_NATIVE;
-  ASSERT(!IsBuiltin() || native);  // All builtins are also native.
+  DCHECK(!IsBuiltin() || native);  // All builtins are also native.
   return native;
 }
 
 
+bool JSFunction::IsFromExtensionScript() {
+  Object* script = shared()->script();
+  return script->IsScript() &&
+         Script::cast(script)->type()->value() == Script::TYPE_EXTENSION;
+}
+
+
 bool JSFunction::NeedsArgumentsAdaption() {
   return shared()->formal_parameter_count() !=
       SharedFunctionInfo::kDontAdaptArgumentsSentinel;
@@ -5635,7 +5842,7 @@
 
 
 void JSFunction::set_code(Code* value) {
-  ASSERT(!GetHeap()->InNewSpace(value));
+  DCHECK(!GetHeap()->InNewSpace(value));
   Address entry = value->entry();
   WRITE_INTPTR_FIELD(this, kCodeEntryOffset, reinterpret_cast<intptr_t>(entry));
   GetHeap()->incremental_marking()->RecordWriteOfCodeEntry(
@@ -5646,7 +5853,7 @@
 
 
 void JSFunction::set_code_no_write_barrier(Code* value) {
-  ASSERT(!GetHeap()->InNewSpace(value));
+  DCHECK(!GetHeap()->InNewSpace(value));
   Address entry = value->entry();
   WRITE_INTPTR_FIELD(this, kCodeEntryOffset, reinterpret_cast<intptr_t>(entry));
 }
@@ -5680,8 +5887,13 @@
 }
 
 
+JSObject* JSFunction::global_proxy() {
+  return context()->global_proxy();
+}
+
+
 void JSFunction::set_context(Object* value) {
-  ASSERT(value->IsUndefined() || value->IsContext());
+  DCHECK(value->IsUndefined() || value->IsContext());
   WRITE_FIELD(this, kContextOffset, value);
   WRITE_BARRIER(GetHeap(), this, kContextOffset, value);
 }
@@ -5695,11 +5907,6 @@
 }
 
 
-void JSFunction::set_initial_map(Map* value) {
-  set_prototype_or_initial_map(value);
-}
-
-
 bool JSFunction::has_initial_map() {
   return prototype_or_initial_map()->IsMap();
 }
@@ -5716,7 +5923,7 @@
 
 
 Object* JSFunction::instance_prototype() {
-  ASSERT(has_instance_prototype());
+  DCHECK(has_instance_prototype());
   if (has_initial_map()) return initial_map()->prototype();
   // When there is no initial map and the prototype is a JSObject, the
   // initial map field is used for the prototype field.
@@ -5725,7 +5932,7 @@
 
 
 Object* JSFunction::prototype() {
-  ASSERT(has_prototype());
+  DCHECK(has_prototype());
   // If the function's prototype property has been set to a non-JSObject
   // value, that value is stored in the constructor field of the map.
   if (map()->has_non_instance_prototype()) return map()->constructor();
@@ -5739,70 +5946,69 @@
 
 
 bool JSFunction::is_compiled() {
-  return code() !=
-      GetIsolate()->builtins()->builtin(Builtins::kCompileUnoptimized);
+  return code() != GetIsolate()->builtins()->builtin(Builtins::kCompileLazy);
 }
 
 
 FixedArray* JSFunction::literals() {
-  ASSERT(!shared()->bound());
+  DCHECK(!shared()->bound());
   return literals_or_bindings();
 }
 
 
 void JSFunction::set_literals(FixedArray* literals) {
-  ASSERT(!shared()->bound());
+  DCHECK(!shared()->bound());
   set_literals_or_bindings(literals);
 }
 
 
 FixedArray* JSFunction::function_bindings() {
-  ASSERT(shared()->bound());
+  DCHECK(shared()->bound());
   return literals_or_bindings();
 }
 
 
 void JSFunction::set_function_bindings(FixedArray* bindings) {
-  ASSERT(shared()->bound());
+  DCHECK(shared()->bound());
   // Bound function literal may be initialized to the empty fixed array
   // before the bindings are set.
-  ASSERT(bindings == GetHeap()->empty_fixed_array() ||
+  DCHECK(bindings == GetHeap()->empty_fixed_array() ||
          bindings->map() == GetHeap()->fixed_cow_array_map());
   set_literals_or_bindings(bindings);
 }
 
 
 int JSFunction::NumberOfLiterals() {
-  ASSERT(!shared()->bound());
+  DCHECK(!shared()->bound());
   return literals()->length();
 }
 
 
 Object* JSBuiltinsObject::javascript_builtin(Builtins::JavaScript id) {
-  ASSERT(id < kJSBuiltinsCount);  // id is unsigned.
+  DCHECK(id < kJSBuiltinsCount);  // id is unsigned.
   return READ_FIELD(this, OffsetOfFunctionWithId(id));
 }
 
 
 void JSBuiltinsObject::set_javascript_builtin(Builtins::JavaScript id,
                                               Object* value) {
-  ASSERT(id < kJSBuiltinsCount);  // id is unsigned.
+  DCHECK(id < kJSBuiltinsCount);  // id is unsigned.
   WRITE_FIELD(this, OffsetOfFunctionWithId(id), value);
   WRITE_BARRIER(GetHeap(), this, OffsetOfFunctionWithId(id), value);
 }
 
 
 Code* JSBuiltinsObject::javascript_builtin_code(Builtins::JavaScript id) {
-  ASSERT(id < kJSBuiltinsCount);  // id is unsigned.
+  DCHECK(id < kJSBuiltinsCount);  // id is unsigned.
   return Code::cast(READ_FIELD(this, OffsetOfCodeWithId(id)));
 }
 
 
 void JSBuiltinsObject::set_javascript_builtin_code(Builtins::JavaScript id,
                                                    Code* value) {
-  ASSERT(id < kJSBuiltinsCount);  // id is unsigned.
+  DCHECK(id < kJSBuiltinsCount);  // id is unsigned.
   WRITE_FIELD(this, OffsetOfCodeWithId(id), value);
-  ASSERT(!GetHeap()->InNewSpace(value));
+  DCHECK(!GetHeap()->InNewSpace(value));
 }
 
 
@@ -5813,20 +6019,19 @@
 
 
 void JSProxy::InitializeBody(int object_size, Object* value) {
-  ASSERT(!value->IsHeapObject() || !GetHeap()->InNewSpace(value));
+  DCHECK(!value->IsHeapObject() || !GetHeap()->InNewSpace(value));
   for (int offset = kHeaderSize; offset < object_size; offset += kPointerSize) {
     WRITE_FIELD(this, offset, value);
   }
 }
 
 
-ACCESSORS(JSSet, table, Object, kTableOffset)
-ACCESSORS(JSMap, table, Object, kTableOffset)
+ACCESSORS(JSCollection, table, Object, kTableOffset)
 
 
 #define ORDERED_HASH_TABLE_ITERATOR_ACCESSORS(name, type, offset)    \
   template<class Derived, class TableType>                           \
-  type* OrderedHashTableIterator<Derived, TableType>::name() {       \
+  type* OrderedHashTableIterator<Derived, TableType>::name() const { \
     return type::cast(READ_FIELD(this, offset));                     \
   }                                                                  \
   template<class Derived, class TableType>                           \
@@ -5837,8 +6042,8 @@
   }
 
 ORDERED_HASH_TABLE_ITERATOR_ACCESSORS(table, Object, kTableOffset)
-ORDERED_HASH_TABLE_ITERATOR_ACCESSORS(index, Smi, kIndexOffset)
-ORDERED_HASH_TABLE_ITERATOR_ACCESSORS(kind, Smi, kKindOffset)
+ORDERED_HASH_TABLE_ITERATOR_ACCESSORS(index, Object, kIndexOffset)
+ORDERED_HASH_TABLE_ITERATOR_ACCESSORS(kind, Object, kKindOffset)
 
 #undef ORDERED_HASH_TABLE_ITERATOR_ACCESSORS
 
@@ -5865,8 +6070,8 @@
 SMI_ACCESSORS(JSGeneratorObject, stack_handler_index, kStackHandlerIndexOffset)
 
 bool JSGeneratorObject::is_suspended() {
-  ASSERT_LT(kGeneratorExecuting, kGeneratorClosed);
-  ASSERT_EQ(kGeneratorClosed, 0);
+  DCHECK_LT(kGeneratorExecuting, kGeneratorClosed);
+  DCHECK_EQ(kGeneratorClosed, 0);
   return continuation() > 0;
 }
 
@@ -5878,31 +6083,22 @@
   return continuation() == kGeneratorExecuting;
 }
 
-JSGeneratorObject* JSGeneratorObject::cast(Object* obj) {
-  ASSERT(obj->IsJSGeneratorObject());
-  ASSERT(HeapObject::cast(obj)->Size() == JSGeneratorObject::kSize);
-  return reinterpret_cast<JSGeneratorObject*>(obj);
-}
-
-
 ACCESSORS(JSModule, context, Object, kContextOffset)
 ACCESSORS(JSModule, scope_info, ScopeInfo, kScopeInfoOffset)
 
 
-JSModule* JSModule::cast(Object* obj) {
-  ASSERT(obj->IsJSModule());
-  ASSERT(HeapObject::cast(obj)->Size() == JSModule::kSize);
-  return reinterpret_cast<JSModule*>(obj);
-}
-
-
 ACCESSORS(JSValue, value, Object, kValueOffset)
 
 
-JSValue* JSValue::cast(Object* obj) {
-  ASSERT(obj->IsJSValue());
-  ASSERT(HeapObject::cast(obj)->Size() == JSValue::kSize);
-  return reinterpret_cast<JSValue*>(obj);
+HeapNumber* HeapNumber::cast(Object* object) {
+  SLOW_DCHECK(object->IsHeapNumber() || object->IsMutableHeapNumber());
+  return reinterpret_cast<HeapNumber*>(object);
+}
+
+
+const HeapNumber* HeapNumber::cast(const Object* object) {
+  SLOW_DCHECK(object->IsHeapNumber() || object->IsMutableHeapNumber());
+  return reinterpret_cast<const HeapNumber*>(object);
 }
 
 
@@ -5917,13 +6113,6 @@
 ACCESSORS(JSDate, sec, Object, kSecOffset)
 
 
-JSDate* JSDate::cast(Object* obj) {
-  ASSERT(obj->IsJSDate());
-  ASSERT(HeapObject::cast(obj)->Size() == JSDate::kSize);
-  return reinterpret_cast<JSDate*>(obj);
-}
-
-
 ACCESSORS(JSMessageObject, type, String, kTypeOffset)
 ACCESSORS(JSMessageObject, arguments, JSArray, kArgumentsOffset)
 ACCESSORS(JSMessageObject, script, Object, kScriptOffset)
@@ -5932,13 +6121,6 @@
 SMI_ACCESSORS(JSMessageObject, end_position, kEndPositionOffset)
 
 
-JSMessageObject* JSMessageObject::cast(Object* obj) {
-  ASSERT(obj->IsJSMessageObject());
-  ASSERT(HeapObject::cast(obj)->Size() == JSMessageObject::kSize);
-  return reinterpret_cast<JSMessageObject*>(obj);
-}
-
-
 INT_ACCESSORS(Code, instruction_size, kInstructionSizeOffset)
 INT_ACCESSORS(Code, prologue_offset, kPrologueOffset)
 ACCESSORS(Code, relocation_info, ByteArray, kRelocationInfoOffset)
@@ -5953,7 +6135,7 @@
   WRITE_FIELD(this, kHandlerTableOffset, NULL);
   WRITE_FIELD(this, kDeoptimizationDataOffset, NULL);
   WRITE_FIELD(this, kConstantPoolOffset, NULL);
-  // Do not wipe out e.g. a minor key.
+  // Do not wipe out major/minor keys on a code stub or IC
   if (!READ_FIELD(this, kTypeFeedbackInfoOffset)->IsSmi()) {
     WRITE_FIELD(this, kTypeFeedbackInfoOffset, NULL);
   }
@@ -5961,37 +6143,29 @@
 
 
 Object* Code::type_feedback_info() {
-  ASSERT(kind() == FUNCTION);
+  DCHECK(kind() == FUNCTION);
   return raw_type_feedback_info();
 }
 
 
 void Code::set_type_feedback_info(Object* value, WriteBarrierMode mode) {
-  ASSERT(kind() == FUNCTION);
+  DCHECK(kind() == FUNCTION);
   set_raw_type_feedback_info(value, mode);
   CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kTypeFeedbackInfoOffset,
                             value, mode);
 }
 
 
-int Code::stub_info() {
-  ASSERT(kind() == COMPARE_IC || kind() == COMPARE_NIL_IC ||
-         kind() == BINARY_OP_IC || kind() == LOAD_IC || kind() == CALL_IC);
-  return Smi::cast(raw_type_feedback_info())->value();
+uint32_t Code::stub_key() {
+  DCHECK(IsCodeStubOrIC());
+  Smi* smi_key = Smi::cast(raw_type_feedback_info());
+  return static_cast<uint32_t>(smi_key->value());
 }
 
 
-void Code::set_stub_info(int value) {
-  ASSERT(kind() == COMPARE_IC ||
-         kind() == COMPARE_NIL_IC ||
-         kind() == BINARY_OP_IC ||
-         kind() == STUB ||
-         kind() == LOAD_IC ||
-         kind() == CALL_IC ||
-         kind() == KEYED_LOAD_IC ||
-         kind() == STORE_IC ||
-         kind() == KEYED_STORE_IC);
-  set_raw_type_feedback_info(Smi::FromInt(value));
+void Code::set_stub_key(uint32_t key) {
+  DCHECK(IsCodeStubOrIC());
+  set_raw_type_feedback_info(Smi::FromInt(key));
 }
 
 
@@ -6042,7 +6216,7 @@
 ACCESSORS(JSArray, length, Object, kLengthOffset)
 
 
-void* JSArrayBuffer::backing_store() {
+void* JSArrayBuffer::backing_store() const {
   intptr_t ptr = READ_INTPTR_FIELD(this, kBackingStoreOffset);
   return reinterpret_cast<void*>(ptr);
 }
@@ -6113,7 +6287,7 @@
 
 
 JSRegExp::Flags JSRegExp::GetFlags() {
-  ASSERT(this->data()->IsFixedArray());
+  DCHECK(this->data()->IsFixedArray());
   Object* data = this->data();
   Smi* smi = Smi::cast(FixedArray::cast(data)->get(kFlagsIndex));
   return Flags(smi->value());
@@ -6121,7 +6295,7 @@
 
 
 String* JSRegExp::Pattern() {
-  ASSERT(this->data()->IsFixedArray());
+  DCHECK(this->data()->IsFixedArray());
   Object* data = this->data();
   String* pattern= String::cast(FixedArray::cast(data)->get(kSourceIndex));
   return pattern;
@@ -6129,14 +6303,14 @@
 
 
 Object* JSRegExp::DataAt(int index) {
-  ASSERT(TypeTag() != NOT_COMPILED);
+  DCHECK(TypeTag() != NOT_COMPILED);
   return FixedArray::cast(data())->get(index);
 }
 
 
 void JSRegExp::SetDataAt(int index, Object* value) {
-  ASSERT(TypeTag() != NOT_COMPILED);
-  ASSERT(index >= kDataIndex);  // Only implementation data can be set this way.
+  DCHECK(TypeTag() != NOT_COMPILED);
+  DCHECK(index >= kDataIndex);  // Only implementation data can be set this way.
   FixedArray::cast(data())->set(index, value);
 }
 
@@ -6151,7 +6325,7 @@
   // pointer may point to a one pointer filler map.
   if (ElementsAreSafeToExamine()) {
     Map* map = fixed_array->map();
-    ASSERT((IsFastSmiOrObjectElementsKind(kind) &&
+    DCHECK((IsFastSmiOrObjectElementsKind(kind) &&
             (map == GetHeap()->fixed_array_map() ||
              map == GetHeap()->fixed_cow_array_map())) ||
            (IsFastDoubleElementsKind(kind) &&
@@ -6161,7 +6335,7 @@
             fixed_array->IsFixedArray() &&
             fixed_array->IsDictionary()) ||
            (kind > DICTIONARY_ELEMENTS));
-    ASSERT((kind != SLOPPY_ARGUMENTS_ELEMENTS) ||
+    DCHECK((kind != SLOPPY_ARGUMENTS_ELEMENTS) ||
            (elements()->IsFixedArray() && elements()->length() >= 2));
   }
 #endif
@@ -6216,7 +6390,7 @@
 
 bool JSObject::HasExternalArrayElements() {
   HeapObject* array = elements();
-  ASSERT(array != NULL);
+  DCHECK(array != NULL);
   return array->IsExternalArray();
 }
 
@@ -6224,7 +6398,7 @@
 #define EXTERNAL_ELEMENTS_CHECK(Type, type, TYPE, ctype, size)          \
 bool JSObject::HasExternal##Type##Elements() {                          \
   HeapObject* array = elements();                                       \
-  ASSERT(array != NULL);                                                \
+  DCHECK(array != NULL);                                                \
   if (!array->IsHeapObject())                                           \
     return false;                                                       \
   return array->map()->instance_type() == EXTERNAL_##TYPE##_ARRAY_TYPE; \
@@ -6237,7 +6411,7 @@
 
 bool JSObject::HasFixedTypedArrayElements() {
   HeapObject* array = elements();
-  ASSERT(array != NULL);
+  DCHECK(array != NULL);
   return array->IsFixedTypedArrayBase();
 }
 
@@ -6245,7 +6419,7 @@
 #define FIXED_TYPED_ELEMENTS_CHECK(Type, type, TYPE, ctype, size)         \
 bool JSObject::HasFixed##Type##Elements() {                               \
   HeapObject* array = elements();                                         \
-  ASSERT(array != NULL);                                                  \
+  DCHECK(array != NULL);                                                  \
   if (!array->IsHeapObject())                                             \
     return false;                                                         \
   return array->map()->instance_type() == FIXED_##TYPE##_ARRAY_TYPE;      \
@@ -6267,13 +6441,13 @@
 
 
 NameDictionary* JSObject::property_dictionary() {
-  ASSERT(!HasFastProperties());
+  DCHECK(!HasFastProperties());
   return NameDictionary::cast(properties());
 }
 
 
 SeededNumberDictionary* JSObject::element_dictionary() {
-  ASSERT(HasDictionaryElements());
+  DCHECK(HasDictionaryElements());
   return SeededNumberDictionary::cast(elements());
 }
 
@@ -6296,6 +6470,10 @@
   return String::cast(this)->ComputeAndSetHash();
 }
 
+bool Name::IsOwn() {
+  return this->IsSymbol() && Symbol::cast(this)->is_own();
+}
+
 
 StringHasher::StringHasher(int length, uint32_t seed)
   : length_(length),
@@ -6303,7 +6481,7 @@
     array_index_(0),
     is_array_index_(0 < length_ && length_ <= String::kMaxArrayIndexSize),
     is_first_char_(true) {
-  ASSERT(FLAG_randomize_hashes || raw_running_hash_ == 0);
+  DCHECK(FLAG_randomize_hashes || raw_running_hash_ == 0);
 }
 
 
@@ -6339,7 +6517,7 @@
 
 
 bool StringHasher::UpdateIndex(uint16_t c) {
-  ASSERT(is_array_index_);
+  DCHECK(is_array_index_);
   if (c < '0' || c > '9') {
     is_array_index_ = false;
     return false;
@@ -6363,7 +6541,7 @@
 
 template<typename Char>
 inline void StringHasher::AddCharacters(const Char* chars, int length) {
-  ASSERT(sizeof(Char) == 1 || sizeof(Char) == 2);
+  DCHECK(sizeof(Char) == 1 || sizeof(Char) == 2);
   int i = 0;
   if (is_array_index_) {
     for (; i < length; i++) {
@@ -6375,7 +6553,7 @@
     }
   }
   for (; i < length; i++) {
-    ASSERT(!is_array_index_);
+    DCHECK(!is_array_index_);
     AddCharacter(chars[i]);
   }
 }
@@ -6391,6 +6569,35 @@
 }
 
 
+uint32_t IteratingStringHasher::Hash(String* string, uint32_t seed) {
+  IteratingStringHasher hasher(string->length(), seed);
+  // Nothing to do.
+  if (hasher.has_trivial_hash()) return hasher.GetHashField();
+  ConsString* cons_string = String::VisitFlat(&hasher, string);
+  // The string was flat.
+  if (cons_string == NULL) return hasher.GetHashField();
+  // This is a ConsString, iterate across it.
+  ConsStringIteratorOp op(cons_string);
+  int offset;
+  while (NULL != (string = op.Next(&offset))) {
+    String::VisitFlat(&hasher, string, offset);
+  }
+  return hasher.GetHashField();
+}
+
+
+void IteratingStringHasher::VisitOneByteString(const uint8_t* chars,
+                                               int length) {
+  AddCharacters(chars, length);
+}
+
+
+void IteratingStringHasher::VisitTwoByteString(const uint16_t* chars,
+                                               int length) {
+  AddCharacters(chars, length);
+}
+
+
 bool Name::AsArrayIndex(uint32_t* index) {
   return IsString() && String::cast(this)->AsArrayIndex(index);
 }
@@ -6405,8 +6612,29 @@
 }
 
 
-Object* JSReceiver::GetPrototype() {
-  return map()->prototype();
+void String::SetForwardedInternalizedString(String* canonical) {
+  DCHECK(IsInternalizedString());
+  DCHECK(HasHashCode());
+  if (canonical == this) return;  // No need to forward.
+  DCHECK(SlowEquals(canonical));
+  DCHECK(canonical->IsInternalizedString());
+  DCHECK(canonical->HasHashCode());
+  WRITE_FIELD(this, kHashFieldOffset, canonical);
+  // Setting the hash field to a tagged value sets the LSB, causing the hash
+  // code to be interpreted as uninitialized.  We use this fact to recognize
+  // that we have a forwarded string.
+  DCHECK(!HasHashCode());
+}
+
+
+String* String::GetForwardedInternalizedString() {
+  DCHECK(IsInternalizedString());
+  if (HasHashCode()) return this;
+  String* canonical = String::cast(READ_FIELD(this, kHashFieldOffset));
+  DCHECK(canonical->IsInternalizedString());
+  DCHECK(SlowEquals(canonical));
+  DCHECK(canonical->HasHashCode());
+  return canonical;
 }
 
 
@@ -6415,27 +6643,32 @@
 }
 
 
-bool JSReceiver::HasProperty(Handle<JSReceiver> object,
-                             Handle<Name> name) {
+Maybe<bool> JSReceiver::HasProperty(Handle<JSReceiver> object,
+                                    Handle<Name> name) {
   if (object->IsJSProxy()) {
     Handle<JSProxy> proxy = Handle<JSProxy>::cast(object);
     return JSProxy::HasPropertyWithHandler(proxy, name);
   }
-  return GetPropertyAttributes(object, name) != ABSENT;
+  Maybe<PropertyAttributes> result = GetPropertyAttributes(object, name);
+  if (!result.has_value) return Maybe<bool>();
+  return maybe(result.value != ABSENT);
 }
 
 
-bool JSReceiver::HasOwnProperty(Handle<JSReceiver> object, Handle<Name> name) {
+Maybe<bool> JSReceiver::HasOwnProperty(Handle<JSReceiver> object,
+                                       Handle<Name> name) {
   if (object->IsJSProxy()) {
     Handle<JSProxy> proxy = Handle<JSProxy>::cast(object);
     return JSProxy::HasPropertyWithHandler(proxy, name);
   }
-  return GetOwnPropertyAttributes(object, name) != ABSENT;
+  Maybe<PropertyAttributes> result = GetOwnPropertyAttributes(object, name);
+  if (!result.has_value) return Maybe<bool>();
+  return maybe(result.value != ABSENT);
 }
 
 
-PropertyAttributes JSReceiver::GetPropertyAttributes(Handle<JSReceiver> object,
-                                                     Handle<Name> key) {
+Maybe<PropertyAttributes> JSReceiver::GetPropertyAttributes(
+    Handle<JSReceiver> object, Handle<Name> key) {
   uint32_t index;
   if (object->IsJSObject() && key->AsArrayIndex(&index)) {
     return GetElementAttribute(object, index);
@@ -6445,8 +6678,8 @@
 }
 
 
-PropertyAttributes JSReceiver::GetElementAttribute(Handle<JSReceiver> object,
-                                                   uint32_t index) {
+Maybe<PropertyAttributes> JSReceiver::GetElementAttribute(
+    Handle<JSReceiver> object, uint32_t index) {
   if (object->IsJSProxy()) {
     return JSProxy::GetElementAttributeWithHandler(
         Handle<JSProxy>::cast(object), object, index);
@@ -6457,12 +6690,14 @@
 
 
 bool JSGlobalObject::IsDetached() {
-  return JSGlobalProxy::cast(global_receiver())->IsDetachedFrom(this);
+  return JSGlobalProxy::cast(global_proxy())->IsDetachedFrom(this);
 }
 
 
-bool JSGlobalProxy::IsDetachedFrom(GlobalObject* global) {
-  return GetPrototype() != global;
+bool JSGlobalProxy::IsDetachedFrom(GlobalObject* global) const {
+  const PrototypeIterator iter(this->GetIsolate(),
+                               const_cast<JSGlobalProxy*>(this));
+  return iter.GetCurrent() != global;
 }
 
 
@@ -6480,27 +6715,32 @@
 }
 
 
-bool JSReceiver::HasElement(Handle<JSReceiver> object, uint32_t index) {
+Maybe<bool> JSReceiver::HasElement(Handle<JSReceiver> object, uint32_t index) {
   if (object->IsJSProxy()) {
     Handle<JSProxy> proxy = Handle<JSProxy>::cast(object);
     return JSProxy::HasElementWithHandler(proxy, index);
   }
-  return JSObject::GetElementAttributeWithReceiver(
-      Handle<JSObject>::cast(object), object, index, true) != ABSENT;
+  Maybe<PropertyAttributes> result = JSObject::GetElementAttributeWithReceiver(
+      Handle<JSObject>::cast(object), object, index, true);
+  if (!result.has_value) return Maybe<bool>();
+  return maybe(result.value != ABSENT);
 }
 
 
-bool JSReceiver::HasOwnElement(Handle<JSReceiver> object, uint32_t index) {
+Maybe<bool> JSReceiver::HasOwnElement(Handle<JSReceiver> object,
+                                      uint32_t index) {
   if (object->IsJSProxy()) {
     Handle<JSProxy> proxy = Handle<JSProxy>::cast(object);
     return JSProxy::HasElementWithHandler(proxy, index);
   }
-  return JSObject::GetElementAttributeWithReceiver(
-      Handle<JSObject>::cast(object), object, index, false) != ABSENT;
+  Maybe<PropertyAttributes> result = JSObject::GetElementAttributeWithReceiver(
+      Handle<JSObject>::cast(object), object, index, false);
+  if (!result.has_value) return Maybe<bool>();
+  return maybe(result.value != ABSENT);
 }
 
 
-PropertyAttributes JSReceiver::GetOwnElementAttribute(
+Maybe<PropertyAttributes> JSReceiver::GetOwnElementAttribute(
     Handle<JSReceiver> object, uint32_t index) {
   if (object->IsJSProxy()) {
     return JSProxy::GetElementAttributeWithHandler(
@@ -6542,9 +6782,10 @@
 
 
 bool AccessorInfo::IsCompatibleReceiver(Object* receiver) {
-  Object* function_template = expected_receiver_type();
-  if (!function_template->IsFunctionTemplateInfo()) return true;
-  return FunctionTemplateInfo::cast(function_template)->IsTemplateFor(receiver);
+  if (!HasExpectedReceiverType()) return true;
+  if (!receiver->IsJSObject()) return false;
+  return FunctionTemplateInfo::cast(expected_receiver_type())
+      ->IsTemplateFor(JSObject::cast(receiver)->map());
 }
 
 
@@ -6553,28 +6794,6 @@
 }
 
 
-void AccessorPair::set_access_flags(v8::AccessControl access_control) {
-  int current = access_flags()->value();
-  current = BooleanBit::set(current,
-                            kAllCanReadBit,
-                            access_control & ALL_CAN_READ);
-  current = BooleanBit::set(current,
-                            kAllCanWriteBit,
-                            access_control & ALL_CAN_WRITE);
-  set_access_flags(Smi::FromInt(current));
-}
-
-
-bool AccessorPair::all_can_read() {
-  return BooleanBit::get(access_flags(), kAllCanReadBit);
-}
-
-
-bool AccessorPair::all_can_write() {
-  return BooleanBit::get(access_flags(), kAllCanWriteBit);
-}
-
-
 template<typename Derived, typename Shape, typename Key>
 void Dictionary<Derived, Shape, Key>::SetEntry(int entry,
                                                Handle<Object> key,
@@ -6588,7 +6807,7 @@
                                                Handle<Object> key,
                                                Handle<Object> value,
                                                PropertyDetails details) {
-  ASSERT(!key->IsName() ||
+  DCHECK(!key->IsName() ||
          details.IsDeleted() ||
          details.dictionary_index() > 0);
   int index = DerivedHashTable::EntryToIndex(entry);
@@ -6601,7 +6820,7 @@
 
 
 bool NumberDictionaryShape::IsMatch(uint32_t key, Object* other) {
-  ASSERT(other->IsNumber());
+  DCHECK(other->IsNumber());
   return key == static_cast<uint32_t>(other->Number());
 }
 
@@ -6613,7 +6832,7 @@
 
 uint32_t UnseededNumberDictionaryShape::HashForObject(uint32_t key,
                                                       Object* other) {
-  ASSERT(other->IsNumber());
+  DCHECK(other->IsNumber());
   return ComputeIntegerHash(static_cast<uint32_t>(other->Number()), 0);
 }
 
@@ -6626,7 +6845,7 @@
 uint32_t SeededNumberDictionaryShape::SeededHashForObject(uint32_t key,
                                                           uint32_t seed,
                                                           Object* other) {
-  ASSERT(other->IsNumber());
+  DCHECK(other->IsNumber());
   return ComputeIntegerHash(static_cast<uint32_t>(other->Number()), seed);
 }
 
@@ -6656,7 +6875,7 @@
 
 Handle<Object> NameDictionaryShape::AsHandle(Isolate* isolate,
                                              Handle<Name> key) {
-  ASSERT(key->IsUniqueName());
+  DCHECK(key->IsUniqueName());
   return key;
 }
 
@@ -6728,13 +6947,13 @@
   // Please note this function is used during marking:
   //  - MarkCompactCollector::MarkUnmarkedObject
   //  - IncrementalMarking::Step
-  ASSERT(!heap->InNewSpace(heap->empty_fixed_array()));
+  DCHECK(!heap->InNewSpace(heap->empty_fixed_array()));
   WRITE_FIELD(this, kCodeCacheOffset, heap->empty_fixed_array());
 }
 
 
 void JSArray::EnsureSize(Handle<JSArray> array, int required_size) {
-  ASSERT(array->HasFastSmiOrObjectElements());
+  DCHECK(array->HasFastSmiOrObjectElements());
   Handle<FixedArray> elts = handle(FixedArray::cast(array->elements()));
   const int kArraySizeThatFitsComfortablyInNewSpace = 128;
   if (elts->length() < required_size) {
@@ -6759,7 +6978,7 @@
 
 bool JSArray::AllowsSetElementsLength() {
   bool result = elements()->IsFixedArray() || elements()->IsFixedDoubleArray();
-  ASSERT(result == !HasExternalArrayElements());
+  DCHECK(result == !HasExternalArrayElements());
   return result;
 }
 
@@ -6769,7 +6988,7 @@
   EnsureCanContainElements(array, storage, storage->length(),
                            ALLOW_COPIED_DOUBLE_ELEMENTS);
 
-  ASSERT((storage->map() == array->GetHeap()->fixed_double_array_map() &&
+  DCHECK((storage->map() == array->GetHeap()->fixed_double_array_map() &&
           IsFastDoubleElementsKind(array->GetElementsKind())) ||
          ((storage->map() != array->GetHeap()->fixed_double_array_map()) &&
           (IsFastObjectElementsKind(array->GetElementsKind()) ||
@@ -6780,27 +6999,6 @@
 }
 
 
-Handle<Object> TypeFeedbackInfo::UninitializedSentinel(Isolate* isolate) {
-  return isolate->factory()->uninitialized_symbol();
-}
-
-
-Handle<Object> TypeFeedbackInfo::MegamorphicSentinel(Isolate* isolate) {
-  return isolate->factory()->megamorphic_symbol();
-}
-
-
-Handle<Object> TypeFeedbackInfo::MonomorphicArraySentinel(Isolate* isolate,
-    ElementsKind elements_kind) {
-  return Handle<Object>(Smi::FromInt(static_cast<int>(elements_kind)), isolate);
-}
-
-
-Object* TypeFeedbackInfo::RawUninitializedSentinel(Heap* heap) {
-  return heap->uninitialized_symbol();
-}
-
-
 int TypeFeedbackInfo::ic_total_count() {
   int current = Smi::cast(READ_FIELD(this, kStorage1Offset))->value();
   return ICTotalCountField::decode(current);
@@ -6822,6 +7020,7 @@
 
 
 void TypeFeedbackInfo::change_ic_with_type_info_count(int delta) {
+  if (delta == 0) return;
   int value = Smi::cast(READ_FIELD(this, kStorage2Offset))->value();
   int new_count = ICsWithTypeInfoCountField::decode(value) + delta;
   // We can get negative count here when the type-feedback info is
@@ -6837,9 +7036,25 @@
 }
 
 
+int TypeFeedbackInfo::ic_generic_count() {
+  return Smi::cast(READ_FIELD(this, kStorage3Offset))->value();
+}
+
+
+void TypeFeedbackInfo::change_ic_generic_count(int delta) {
+  if (delta == 0) return;
+  int new_count = ic_generic_count() + delta;
+  if (new_count >= 0) {
+    new_count &= ~Smi::kMinValue;
+    WRITE_FIELD(this, kStorage3Offset, Smi::FromInt(new_count));
+  }
+}
+
+
 void TypeFeedbackInfo::initialize_storage() {
   WRITE_FIELD(this, kStorage1Offset, Smi::FromInt(0));
   WRITE_FIELD(this, kStorage2Offset, Smi::FromInt(0));
+  WRITE_FIELD(this, kStorage3Offset, Smi::FromInt(0));
 }
 
 
@@ -6890,7 +7105,7 @@
 
 
 Relocatable::~Relocatable() {
-  ASSERT_EQ(isolate_->relocatable_top(), this);
+  DCHECK_EQ(isolate_->relocatable_top(), this);
   isolate_->set_relocatable_top(prev_);
 }
 
@@ -6913,17 +7128,17 @@
 }
 
 
-void ExternalAsciiString::ExternalAsciiStringIterateBody(ObjectVisitor* v) {
-  typedef v8::String::ExternalAsciiStringResource Resource;
-  v->VisitExternalAsciiString(
+void ExternalOneByteString::ExternalOneByteStringIterateBody(ObjectVisitor* v) {
+  typedef v8::String::ExternalOneByteStringResource Resource;
+  v->VisitExternalOneByteString(
       reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)));
 }
 
 
-template<typename StaticVisitor>
-void ExternalAsciiString::ExternalAsciiStringIterateBody() {
-  typedef v8::String::ExternalAsciiStringResource Resource;
-  StaticVisitor::VisitExternalAsciiString(
+template <typename StaticVisitor>
+void ExternalOneByteString::ExternalOneByteStringIterateBody() {
+  typedef v8::String::ExternalOneByteStringResource Resource;
+  StaticVisitor::VisitExternalOneByteString(
       reinterpret_cast<Resource**>(FIELD_ADDR(this, kResourceOffset)));
 }
 
@@ -6961,6 +7176,36 @@
 }
 
 
+template<class Derived, class TableType>
+Object* OrderedHashTableIterator<Derived, TableType>::CurrentKey() {
+  TableType* table(TableType::cast(this->table()));
+  int index = Smi::cast(this->index())->value();
+  Object* key = table->KeyAt(index);
+  DCHECK(!key->IsTheHole());
+  return key;
+}
+
+
+void JSSetIterator::PopulateValueArray(FixedArray* array) {
+  array->set(0, CurrentKey());
+}
+
+
+void JSMapIterator::PopulateValueArray(FixedArray* array) {
+  array->set(0, CurrentKey());
+  array->set(1, CurrentValue());
+}
+
+
+Object* JSMapIterator::CurrentValue() {
+  OrderedHashMap* table(OrderedHashMap::cast(this->table()));
+  int index = Smi::cast(this->index())->value();
+  Object* value = table->ValueAt(index);
+  DCHECK(!value->IsTheHole());
+  return value;
+}
+
+
 #undef TYPE_CHECKER
 #undef CAST_ACCESSOR
 #undef INT_ACCESSORS
@@ -6972,6 +7217,7 @@
 #undef BOOL_GETTER
 #undef BOOL_ACCESSORS
 #undef FIELD_ADDR
+#undef FIELD_ADDR_CONST
 #undef READ_FIELD
 #undef NOBARRIER_READ_FIELD
 #undef WRITE_FIELD
diff --git a/src/objects-printer.cc b/src/objects-printer.cc
index 54a7b55..d709a20 100644
--- a/src/objects-printer.cc
+++ b/src/objects-printer.cc
@@ -4,10 +4,11 @@
 
 #include "src/v8.h"
 
-#include "src/disassembler.h"
 #include "src/disasm.h"
+#include "src/disassembler.h"
+#include "src/heap/objects-visiting.h"
 #include "src/jsregexp.h"
-#include "src/objects-visiting.h"
+#include "src/ostreams.h"
 
 namespace v8 {
 namespace internal {
@@ -15,201 +16,196 @@
 #ifdef OBJECT_PRINT
 
 void Object::Print() {
-  Print(stdout);
+  OFStream os(stdout);
+  this->Print(os);
+  os << flush;
 }
 
 
-void Object::Print(FILE* out) {
+void Object::Print(OStream& os) {  // NOLINT
   if (IsSmi()) {
-    Smi::cast(this)->SmiPrint(out);
+    Smi::cast(this)->SmiPrint(os);
   } else {
-    HeapObject::cast(this)->HeapObjectPrint(out);
+    HeapObject::cast(this)->HeapObjectPrint(os);
   }
-  Flush(out);
 }
 
 
-void Object::PrintLn() {
-  PrintLn(stdout);
+void HeapObject::PrintHeader(OStream& os, const char* id) {  // NOLINT
+  os << "" << reinterpret_cast<void*>(this) << ": [" << id << "]\n";
 }
 
 
-void Object::PrintLn(FILE* out) {
-  Print(out);
-  PrintF(out, "\n");
-}
-
-
-void HeapObject::PrintHeader(FILE* out, const char* id) {
-  PrintF(out, "%p: [%s]\n", reinterpret_cast<void*>(this), id);
-}
-
-
-void HeapObject::HeapObjectPrint(FILE* out) {
+void HeapObject::HeapObjectPrint(OStream& os) {  // NOLINT
   InstanceType instance_type = map()->instance_type();
 
   HandleScope scope(GetIsolate());
   if (instance_type < FIRST_NONSTRING_TYPE) {
-    String::cast(this)->StringPrint(out);
+    String::cast(this)->StringPrint(os);
     return;
   }
 
   switch (instance_type) {
     case SYMBOL_TYPE:
-      Symbol::cast(this)->SymbolPrint(out);
+      Symbol::cast(this)->SymbolPrint(os);
       break;
     case MAP_TYPE:
-      Map::cast(this)->MapPrint(out);
+      Map::cast(this)->MapPrint(os);
       break;
     case HEAP_NUMBER_TYPE:
-      HeapNumber::cast(this)->HeapNumberPrint(out);
+      HeapNumber::cast(this)->HeapNumberPrint(os);
+      break;
+    case MUTABLE_HEAP_NUMBER_TYPE:
+      os << "<mutable ";
+      HeapNumber::cast(this)->HeapNumberPrint(os);
+      os << ">";
       break;
     case FIXED_DOUBLE_ARRAY_TYPE:
-      FixedDoubleArray::cast(this)->FixedDoubleArrayPrint(out);
+      FixedDoubleArray::cast(this)->FixedDoubleArrayPrint(os);
       break;
     case CONSTANT_POOL_ARRAY_TYPE:
-      ConstantPoolArray::cast(this)->ConstantPoolArrayPrint(out);
+      ConstantPoolArray::cast(this)->ConstantPoolArrayPrint(os);
       break;
     case FIXED_ARRAY_TYPE:
-      FixedArray::cast(this)->FixedArrayPrint(out);
+      FixedArray::cast(this)->FixedArrayPrint(os);
       break;
     case BYTE_ARRAY_TYPE:
-      ByteArray::cast(this)->ByteArrayPrint(out);
+      ByteArray::cast(this)->ByteArrayPrint(os);
       break;
     case FREE_SPACE_TYPE:
-      FreeSpace::cast(this)->FreeSpacePrint(out);
+      FreeSpace::cast(this)->FreeSpacePrint(os);
       break;
 
-#define PRINT_EXTERNAL_ARRAY(Type, type, TYPE, ctype, size)                    \
-    case EXTERNAL_##TYPE##_ARRAY_TYPE:                                         \
-      External##Type##Array::cast(this)->External##Type##ArrayPrint(out);      \
-      break;
+#define PRINT_EXTERNAL_ARRAY(Type, type, TYPE, ctype, size)            \
+  case EXTERNAL_##TYPE##_ARRAY_TYPE:                                   \
+    External##Type##Array::cast(this)->External##Type##ArrayPrint(os); \
+    break;
 
      TYPED_ARRAYS(PRINT_EXTERNAL_ARRAY)
 #undef PRINT_EXTERNAL_ARRAY
 
-#define PRINT_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size)                 \
-    case Fixed##Type##Array::kInstanceType:                                    \
-      Fixed##Type##Array::cast(this)->FixedTypedArrayPrint(out);               \
-      break;
+#define PRINT_FIXED_TYPED_ARRAY(Type, type, TYPE, ctype, size) \
+  case Fixed##Type##Array::kInstanceType:                      \
+    Fixed##Type##Array::cast(this)->FixedTypedArrayPrint(os);  \
+    break;
 
     TYPED_ARRAYS(PRINT_FIXED_TYPED_ARRAY)
 #undef PRINT_FIXED_TYPED_ARRAY
 
     case FILLER_TYPE:
-      PrintF(out, "filler");
+      os << "filler";
       break;
     case JS_OBJECT_TYPE:  // fall through
     case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
     case JS_ARRAY_TYPE:
     case JS_GENERATOR_OBJECT_TYPE:
     case JS_REGEXP_TYPE:
-      JSObject::cast(this)->JSObjectPrint(out);
+      JSObject::cast(this)->JSObjectPrint(os);
       break;
     case ODDBALL_TYPE:
-      Oddball::cast(this)->to_string()->Print(out);
+      Oddball::cast(this)->to_string()->Print(os);
       break;
     case JS_MODULE_TYPE:
-      JSModule::cast(this)->JSModulePrint(out);
+      JSModule::cast(this)->JSModulePrint(os);
       break;
     case JS_FUNCTION_TYPE:
-      JSFunction::cast(this)->JSFunctionPrint(out);
+      JSFunction::cast(this)->JSFunctionPrint(os);
       break;
     case JS_GLOBAL_PROXY_TYPE:
-      JSGlobalProxy::cast(this)->JSGlobalProxyPrint(out);
+      JSGlobalProxy::cast(this)->JSGlobalProxyPrint(os);
       break;
     case JS_GLOBAL_OBJECT_TYPE:
-      JSGlobalObject::cast(this)->JSGlobalObjectPrint(out);
+      JSGlobalObject::cast(this)->JSGlobalObjectPrint(os);
       break;
     case JS_BUILTINS_OBJECT_TYPE:
-      JSBuiltinsObject::cast(this)->JSBuiltinsObjectPrint(out);
+      JSBuiltinsObject::cast(this)->JSBuiltinsObjectPrint(os);
       break;
     case JS_VALUE_TYPE:
-      PrintF(out, "Value wrapper around:");
-      JSValue::cast(this)->value()->Print(out);
+      os << "Value wrapper around:";
+      JSValue::cast(this)->value()->Print(os);
       break;
     case JS_DATE_TYPE:
-      JSDate::cast(this)->JSDatePrint(out);
+      JSDate::cast(this)->JSDatePrint(os);
       break;
     case CODE_TYPE:
-      Code::cast(this)->CodePrint(out);
+      Code::cast(this)->CodePrint(os);
       break;
     case JS_PROXY_TYPE:
-      JSProxy::cast(this)->JSProxyPrint(out);
+      JSProxy::cast(this)->JSProxyPrint(os);
       break;
     case JS_FUNCTION_PROXY_TYPE:
-      JSFunctionProxy::cast(this)->JSFunctionProxyPrint(out);
+      JSFunctionProxy::cast(this)->JSFunctionProxyPrint(os);
       break;
     case JS_SET_TYPE:
-      JSSet::cast(this)->JSSetPrint(out);
+      JSSet::cast(this)->JSSetPrint(os);
       break;
     case JS_MAP_TYPE:
-      JSMap::cast(this)->JSMapPrint(out);
+      JSMap::cast(this)->JSMapPrint(os);
       break;
     case JS_SET_ITERATOR_TYPE:
-      JSSetIterator::cast(this)->JSSetIteratorPrint(out);
+      JSSetIterator::cast(this)->JSSetIteratorPrint(os);
       break;
     case JS_MAP_ITERATOR_TYPE:
-      JSMapIterator::cast(this)->JSMapIteratorPrint(out);
+      JSMapIterator::cast(this)->JSMapIteratorPrint(os);
       break;
     case JS_WEAK_MAP_TYPE:
-      JSWeakMap::cast(this)->JSWeakMapPrint(out);
+      JSWeakMap::cast(this)->JSWeakMapPrint(os);
       break;
     case JS_WEAK_SET_TYPE:
-      JSWeakSet::cast(this)->JSWeakSetPrint(out);
+      JSWeakSet::cast(this)->JSWeakSetPrint(os);
       break;
     case FOREIGN_TYPE:
-      Foreign::cast(this)->ForeignPrint(out);
+      Foreign::cast(this)->ForeignPrint(os);
       break;
     case SHARED_FUNCTION_INFO_TYPE:
-      SharedFunctionInfo::cast(this)->SharedFunctionInfoPrint(out);
+      SharedFunctionInfo::cast(this)->SharedFunctionInfoPrint(os);
       break;
     case JS_MESSAGE_OBJECT_TYPE:
-      JSMessageObject::cast(this)->JSMessageObjectPrint(out);
+      JSMessageObject::cast(this)->JSMessageObjectPrint(os);
       break;
     case CELL_TYPE:
-      Cell::cast(this)->CellPrint(out);
+      Cell::cast(this)->CellPrint(os);
       break;
     case PROPERTY_CELL_TYPE:
-      PropertyCell::cast(this)->PropertyCellPrint(out);
+      PropertyCell::cast(this)->PropertyCellPrint(os);
       break;
     case JS_ARRAY_BUFFER_TYPE:
-      JSArrayBuffer::cast(this)->JSArrayBufferPrint(out);
+      JSArrayBuffer::cast(this)->JSArrayBufferPrint(os);
       break;
     case JS_TYPED_ARRAY_TYPE:
-      JSTypedArray::cast(this)->JSTypedArrayPrint(out);
+      JSTypedArray::cast(this)->JSTypedArrayPrint(os);
       break;
     case JS_DATA_VIEW_TYPE:
-      JSDataView::cast(this)->JSDataViewPrint(out);
+      JSDataView::cast(this)->JSDataViewPrint(os);
       break;
 #define MAKE_STRUCT_CASE(NAME, Name, name) \
   case NAME##_TYPE:                        \
-    Name::cast(this)->Name##Print(out);    \
+    Name::cast(this)->Name##Print(os);     \
     break;
   STRUCT_LIST(MAKE_STRUCT_CASE)
 #undef MAKE_STRUCT_CASE
 
     default:
-      PrintF(out, "UNKNOWN TYPE %d", map()->instance_type());
+      os << "UNKNOWN TYPE " << map()->instance_type();
       UNREACHABLE();
       break;
   }
 }
 
 
-void ByteArray::ByteArrayPrint(FILE* out) {
-  PrintF(out, "byte array, data starts at %p", GetDataStartAddress());
+void ByteArray::ByteArrayPrint(OStream& os) {  // NOLINT
+  os << "byte array, data starts at " << GetDataStartAddress();
 }
 
 
-void FreeSpace::FreeSpacePrint(FILE* out) {
-  PrintF(out, "free space, size %d", Size());
+void FreeSpace::FreeSpacePrint(OStream& os) {  // NOLINT
+  os << "free space, size " << Size();
 }
 
 
-#define EXTERNAL_ARRAY_PRINTER(Type, type, TYPE, ctype, size)                 \
-  void External##Type##Array::External##Type##ArrayPrint(FILE* out) {         \
-    PrintF(out, "external " #type " array");                                  \
+#define EXTERNAL_ARRAY_PRINTER(Type, type, TYPE, ctype, size)           \
+  void External##Type##Array::External##Type##ArrayPrint(OStream& os) { \
+    os << "external " #type " array";                                   \
   }
 
 TYPED_ARRAYS(EXTERNAL_ARRAY_PRINTER)
@@ -218,67 +214,52 @@
 
 
 template <class Traits>
-void FixedTypedArray<Traits>::FixedTypedArrayPrint(FILE* out) {
-  PrintF(out, "fixed %s", Traits::Designator());
+void FixedTypedArray<Traits>::FixedTypedArrayPrint(OStream& os) {  // NOLINT
+  os << "fixed " << Traits::Designator();
 }
 
 
-void JSObject::PrintProperties(FILE* out) {
+void JSObject::PrintProperties(OStream& os) {  // NOLINT
   if (HasFastProperties()) {
     DescriptorArray* descs = map()->instance_descriptors();
     for (int i = 0; i < map()->NumberOfOwnDescriptors(); i++) {
-      PrintF(out, "   ");
-      descs->GetKey(i)->NamePrint(out);
-      PrintF(out, ": ");
+      os << "   ";
+      descs->GetKey(i)->NamePrint(os);
+      os << ": ";
       switch (descs->GetType(i)) {
         case FIELD: {
           FieldIndex index = FieldIndex::ForDescriptor(map(), i);
-          RawFastPropertyAt(index)->ShortPrint(out);
-          PrintF(out, " (field at offset %d)\n", index.property_index());
+          os << Brief(RawFastPropertyAt(index)) << " (field at offset "
+             << index.property_index() << ")\n";
           break;
         }
         case CONSTANT:
-          descs->GetConstant(i)->ShortPrint(out);
-          PrintF(out, " (constant)\n");
+          os << Brief(descs->GetConstant(i)) << " (constant)\n";
           break;
         case CALLBACKS:
-          descs->GetCallbacksObject(i)->ShortPrint(out);
-          PrintF(out, " (callback)\n");
+          os << Brief(descs->GetCallbacksObject(i)) << " (callback)\n";
           break;
         case NORMAL:  // only in slow mode
-        case HANDLER:  // only in lookup results, not in descriptors
-        case INTERCEPTOR:  // only in lookup results, not in descriptors
-        // There are no transitions in the descriptor array.
-        case NONEXISTENT:
           UNREACHABLE();
           break;
       }
     }
   } else {
-    property_dictionary()->Print(out);
+    property_dictionary()->Print(os);
   }
 }
 
 
-template<class T>
-static void DoPrintElements(FILE *out, Object* object) {
+template <class T>
+static void DoPrintElements(OStream& os, Object* object) {  // NOLINT
   T* p = T::cast(object);
   for (int i = 0; i < p->length(); i++) {
-    PrintF(out, "   %d: %d\n", i, p->get_scalar(i));
+    os << "   " << i << ": " << p->get_scalar(i) << "\n";
   }
 }
 
 
-template<class T>
-static void DoPrintDoubleElements(FILE* out, Object* object) {
-  T* p = T::cast(object);
-  for (int i = 0; i < p->length(); i++) {
-    PrintF(out, "   %d: %f\n", i, p->get_scalar(i));
-  }
-}
-
-
-void JSObject::PrintElements(FILE* out) {
+void JSObject::PrintElements(OStream& os) {  // NOLINT
   // Don't call GetElementsKind, its validation code can cause the printer to
   // fail when debugging.
   switch (map()->elements_kind()) {
@@ -289,9 +270,7 @@
       // Print in array notation for non-sparse arrays.
       FixedArray* p = FixedArray::cast(elements());
       for (int i = 0; i < p->length(); i++) {
-        PrintF(out, "   %d: ", i);
-        p->get(i)->ShortPrint(out);
-        PrintF(out, "\n");
+        os << "   " << i << ": " << Brief(p->get(i)) << "\n";
       }
       break;
     }
@@ -301,29 +280,24 @@
       if (elements()->length() > 0) {
         FixedDoubleArray* p = FixedDoubleArray::cast(elements());
         for (int i = 0; i < p->length(); i++) {
+          os << "   " << i << ": ";
           if (p->is_the_hole(i)) {
-            PrintF(out, "   %d: <the hole>", i);
+            os << "<the hole>";
           } else {
-            PrintF(out, "   %d: %g", i, p->get_scalar(i));
+            os << p->get_scalar(i);
           }
-          PrintF(out, "\n");
+          os << "\n";
         }
       }
       break;
     }
 
 
-#define PRINT_ELEMENTS(Kind, Type)                                          \
-    case Kind: {                                                            \
-      DoPrintElements<Type>(out, elements());                               \
-      break;                                                                \
-    }
-
-#define PRINT_DOUBLE_ELEMENTS(Kind, Type)                                   \
-    case Kind: {                                                            \
-      DoPrintDoubleElements<Type>(out, elements());                         \
-      break;                                                                \
-    }
+#define PRINT_ELEMENTS(Kind, Type)         \
+  case Kind: {                             \
+    DoPrintElements<Type>(os, elements()); \
+    break;                                 \
+  }
 
     PRINT_ELEMENTS(EXTERNAL_UINT8_CLAMPED_ELEMENTS, ExternalUint8ClampedArray)
     PRINT_ELEMENTS(EXTERNAL_INT8_ELEMENTS, ExternalInt8Array)
@@ -335,9 +309,8 @@
     PRINT_ELEMENTS(EXTERNAL_INT32_ELEMENTS, ExternalInt32Array)
     PRINT_ELEMENTS(EXTERNAL_UINT32_ELEMENTS,
         ExternalUint32Array)
-    PRINT_DOUBLE_ELEMENTS(EXTERNAL_FLOAT32_ELEMENTS, ExternalFloat32Array)
-    PRINT_DOUBLE_ELEMENTS(EXTERNAL_FLOAT64_ELEMENTS, ExternalFloat64Array)
-
+    PRINT_ELEMENTS(EXTERNAL_FLOAT32_ELEMENTS, ExternalFloat32Array)
+    PRINT_ELEMENTS(EXTERNAL_FLOAT64_ELEMENTS, ExternalFloat64Array)
 
     PRINT_ELEMENTS(UINT8_ELEMENTS, FixedUint8Array)
     PRINT_ELEMENTS(UINT8_CLAMPED_ELEMENTS, FixedUint8ClampedArray)
@@ -346,66 +319,58 @@
     PRINT_ELEMENTS(INT16_ELEMENTS, FixedInt16Array)
     PRINT_ELEMENTS(UINT32_ELEMENTS, FixedUint32Array)
     PRINT_ELEMENTS(INT32_ELEMENTS, FixedInt32Array)
-    PRINT_DOUBLE_ELEMENTS(FLOAT32_ELEMENTS, FixedFloat32Array)
-    PRINT_DOUBLE_ELEMENTS(FLOAT64_ELEMENTS, FixedFloat64Array)
+    PRINT_ELEMENTS(FLOAT32_ELEMENTS, FixedFloat32Array)
+    PRINT_ELEMENTS(FLOAT64_ELEMENTS, FixedFloat64Array)
 
-#undef PRINT_DOUBLE_ELEMENTS
 #undef PRINT_ELEMENTS
 
     case DICTIONARY_ELEMENTS:
-      elements()->Print(out);
+      elements()->Print(os);
       break;
     case SLOPPY_ARGUMENTS_ELEMENTS: {
       FixedArray* p = FixedArray::cast(elements());
-      PrintF(out, "   parameter map:");
+      os << "   parameter map:";
       for (int i = 2; i < p->length(); i++) {
-        PrintF(out, " %d:", i - 2);
-        p->get(i)->ShortPrint(out);
+        os << " " << (i - 2) << ":" << Brief(p->get(i));
       }
-      PrintF(out, "\n   context: ");
-      p->get(0)->ShortPrint(out);
-      PrintF(out, "\n   arguments: ");
-      p->get(1)->ShortPrint(out);
-      PrintF(out, "\n");
+      os << "\n   context: " << Brief(p->get(0))
+         << "\n   arguments: " << Brief(p->get(1)) << "\n";
       break;
     }
   }
 }
 
 
-void JSObject::PrintTransitions(FILE* out) {
+void JSObject::PrintTransitions(OStream& os) {  // NOLINT
   if (!map()->HasTransitionArray()) return;
   TransitionArray* transitions = map()->transitions();
   for (int i = 0; i < transitions->number_of_transitions(); i++) {
     Name* key = transitions->GetKey(i);
-    PrintF(out, "   ");
-    key->NamePrint(out);
-    PrintF(out, ": ");
+    os << "   ";
+    key->NamePrint(os);
+    os << ": ";
     if (key == GetHeap()->frozen_symbol()) {
-      PrintF(out, " (transition to frozen)\n");
+      os << " (transition to frozen)\n";
     } else if (key == GetHeap()->elements_transition_symbol()) {
-      PrintF(out, " (transition to ");
-      PrintElementsKind(out, transitions->GetTarget(i)->elements_kind());
-      PrintF(out, ")\n");
+      os << " (transition to "
+         << ElementsKindToString(transitions->GetTarget(i)->elements_kind())
+         << ")\n";
     } else if (key == GetHeap()->observed_symbol()) {
-      PrintF(out, " (transition to Object.observe)\n");
+      os << " (transition to Object.observe)\n";
     } else {
       switch (transitions->GetTargetDetails(i).type()) {
         case FIELD: {
-          PrintF(out, " (transition to field)\n");
+          os << " (transition to field)\n";
           break;
         }
         case CONSTANT:
-          PrintF(out, " (transition to constant)\n");
+          os << " (transition to constant)\n";
           break;
         case CALLBACKS:
-          PrintF(out, " (transition to callback)\n");
+          os << " (transition to callback)\n";
           break;
         // Values below are never in the target descriptor array.
         case NORMAL:
-        case HANDLER:
-        case INTERCEPTOR:
-        case NONEXISTENT:
           UNREACHABLE();
           break;
       }
@@ -414,35 +379,32 @@
 }
 
 
-void JSObject::JSObjectPrint(FILE* out) {
-  PrintF(out, "%p: [JSObject]\n", reinterpret_cast<void*>(this));
-  PrintF(out, " - map = %p [", reinterpret_cast<void*>(map()));
+void JSObject::JSObjectPrint(OStream& os) {  // NOLINT
+  HeapObject::PrintHeader(os, "JSObject");
   // Don't call GetElementsKind, its validation code can cause the printer to
   // fail when debugging.
-  PrintElementsKind(out, this->map()->elements_kind());
-  PrintF(out,
-         "]\n - prototype = %p\n",
-         reinterpret_cast<void*>(GetPrototype()));
-  PrintF(out, " {\n");
-  PrintProperties(out);
-  PrintTransitions(out);
-  PrintElements(out);
-  PrintF(out, " }\n");
+  PrototypeIterator iter(GetIsolate(), this);
+  os << " - map = " << reinterpret_cast<void*>(map()) << " ["
+     << ElementsKindToString(this->map()->elements_kind())
+     << "]\n - prototype = " << reinterpret_cast<void*>(iter.GetCurrent())
+     << "\n {\n";
+  PrintProperties(os);
+  PrintTransitions(os);
+  PrintElements(os);
+  os << " }\n";
 }
 
 
-void JSModule::JSModulePrint(FILE* out) {
-  HeapObject::PrintHeader(out, "JSModule");
-  PrintF(out, " - map = %p\n", reinterpret_cast<void*>(map()));
-  PrintF(out, " - context = ");
-  context()->Print(out);
-  PrintF(out, " - scope_info = ");
-  scope_info()->ShortPrint(out);
-  PrintElementsKind(out, this->map()->elements_kind());
-  PrintF(out, " {\n");
-  PrintProperties(out);
-  PrintElements(out);
-  PrintF(out, " }\n");
+void JSModule::JSModulePrint(OStream& os) {  // NOLINT
+  HeapObject::PrintHeader(os, "JSModule");
+  os << " - map = " << reinterpret_cast<void*>(map()) << "\n"
+     << " - context = ";
+  context()->Print(os);
+  os << " - scope_info = " << Brief(scope_info())
+     << ElementsKindToString(this->map()->elements_kind()) << " {\n";
+  PrintProperties(os);
+  PrintElements(os);
+  os << " }\n";
 }
 
 
@@ -457,191 +419,165 @@
 }
 
 
-void Symbol::SymbolPrint(FILE* out) {
-  HeapObject::PrintHeader(out, "Symbol");
-  PrintF(out, " - hash: %d\n", Hash());
-  PrintF(out, " - name: ");
-  name()->ShortPrint();
-  PrintF(out, " - private: %d\n", is_private());
-  PrintF(out, "\n");
+void Symbol::SymbolPrint(OStream& os) {  // NOLINT
+  HeapObject::PrintHeader(os, "Symbol");
+  os << " - hash: " << Hash();
+  os << "\n - name: " << Brief(name());
+  os << "\n - private: " << is_private();
+  os << "\n - own: " << is_own();
+  os << "\n";
 }
 
 
-void Map::MapPrint(FILE* out) {
-  HeapObject::PrintHeader(out, "Map");
-  PrintF(out, " - type: %s\n", TypeToString(instance_type()));
-  PrintF(out, " - instance size: %d\n", instance_size());
-  PrintF(out, " - inobject properties: %d\n", inobject_properties());
-  PrintF(out, " - elements kind: ");
-  PrintElementsKind(out, elements_kind());
-  PrintF(out, "\n - pre-allocated property fields: %d\n",
-      pre_allocated_property_fields());
-  PrintF(out, " - unused property fields: %d\n", unused_property_fields());
-  if (is_hidden_prototype()) {
-    PrintF(out, " - hidden_prototype\n");
-  }
-  if (has_named_interceptor()) {
-    PrintF(out, " - named_interceptor\n");
-  }
-  if (has_indexed_interceptor()) {
-    PrintF(out, " - indexed_interceptor\n");
-  }
-  if (is_undetectable()) {
-    PrintF(out, " - undetectable\n");
-  }
-  if (has_instance_call_handler()) {
-    PrintF(out, " - instance_call_handler\n");
-  }
-  if (is_access_check_needed()) {
-    PrintF(out, " - access_check_needed\n");
-  }
+void Map::MapPrint(OStream& os) {  // NOLINT
+  HeapObject::PrintHeader(os, "Map");
+  os << " - type: " << TypeToString(instance_type()) << "\n";
+  os << " - instance size: " << instance_size() << "\n";
+  os << " - inobject properties: " << inobject_properties() << "\n";
+  os << " - elements kind: " << ElementsKindToString(elements_kind());
+  os << "\n - pre-allocated property fields: "
+     << pre_allocated_property_fields() << "\n";
+  os << " - unused property fields: " << unused_property_fields() << "\n";
+  if (is_hidden_prototype()) os << " - hidden_prototype\n";
+  if (has_named_interceptor()) os << " - named_interceptor\n";
+  if (has_indexed_interceptor()) os << " - indexed_interceptor\n";
+  if (is_undetectable()) os << " - undetectable\n";
+  if (has_instance_call_handler()) os << " - instance_call_handler\n";
+  if (is_access_check_needed()) os << " - access_check_needed\n";
   if (is_frozen()) {
-    PrintF(out, " - frozen\n");
+    os << " - frozen\n";
   } else if (!is_extensible()) {
-    PrintF(out, " - sealed\n");
+    os << " - sealed\n";
   }
-  PrintF(out, " - back pointer: ");
-  GetBackPointer()->ShortPrint(out);
-  PrintF(out, "\n - instance descriptors %s#%i: ",
-         owns_descriptors() ? "(own) " : "",
-         NumberOfOwnDescriptors());
-  instance_descriptors()->ShortPrint(out);
+  os << " - back pointer: " << Brief(GetBackPointer());
+  os << "\n - instance descriptors " << (owns_descriptors() ? "(own) " : "")
+     << "#" << NumberOfOwnDescriptors() << ": "
+     << Brief(instance_descriptors());
   if (HasTransitionArray()) {
-    PrintF(out, "\n - transitions: ");
-    transitions()->ShortPrint(out);
+    os << "\n - transitions: " << Brief(transitions());
   }
-  PrintF(out, "\n - prototype: ");
-  prototype()->ShortPrint(out);
-  PrintF(out, "\n - constructor: ");
-  constructor()->ShortPrint(out);
-  PrintF(out, "\n - code cache: ");
-  code_cache()->ShortPrint(out);
-  PrintF(out, "\n - dependent code: ");
-  dependent_code()->ShortPrint(out);
-  PrintF(out, "\n");
+  os << "\n - prototype: " << Brief(prototype());
+  os << "\n - constructor: " << Brief(constructor());
+  os << "\n - code cache: " << Brief(code_cache());
+  os << "\n - dependent code: " << Brief(dependent_code());
+  os << "\n";
 }
 
 
-void CodeCache::CodeCachePrint(FILE* out) {
-  HeapObject::PrintHeader(out, "CodeCache");
-  PrintF(out, "\n - default_cache: ");
-  default_cache()->ShortPrint(out);
-  PrintF(out, "\n - normal_type_cache: ");
-  normal_type_cache()->ShortPrint(out);
+void CodeCache::CodeCachePrint(OStream& os) {  // NOLINT
+  HeapObject::PrintHeader(os, "CodeCache");
+  os << "\n - default_cache: " << Brief(default_cache());
+  os << "\n - normal_type_cache: " << Brief(normal_type_cache());
 }
 
 
-void PolymorphicCodeCache::PolymorphicCodeCachePrint(FILE* out) {
-  HeapObject::PrintHeader(out, "PolymorphicCodeCache");
-  PrintF(out, "\n - cache: ");
-  cache()->ShortPrint(out);
+void PolymorphicCodeCache::PolymorphicCodeCachePrint(OStream& os) {  // NOLINT
+  HeapObject::PrintHeader(os, "PolymorphicCodeCache");
+  os << "\n - cache: " << Brief(cache());
 }
 
 
-void TypeFeedbackInfo::TypeFeedbackInfoPrint(FILE* out) {
-  HeapObject::PrintHeader(out, "TypeFeedbackInfo");
-  PrintF(out, " - ic_total_count: %d, ic_with_type_info_count: %d\n",
-         ic_total_count(), ic_with_type_info_count());
+void TypeFeedbackInfo::TypeFeedbackInfoPrint(OStream& os) {  // NOLINT
+  HeapObject::PrintHeader(os, "TypeFeedbackInfo");
+  os << " - ic_total_count: " << ic_total_count()
+     << ", ic_with_type_info_count: " << ic_with_type_info_count()
+     << ", ic_generic_count: " << ic_generic_count() << "\n";
 }
 
 
-void AliasedArgumentsEntry::AliasedArgumentsEntryPrint(FILE* out) {
-  HeapObject::PrintHeader(out, "AliasedArgumentsEntry");
-  PrintF(out, "\n - aliased_context_slot: %d", aliased_context_slot());
+void AliasedArgumentsEntry::AliasedArgumentsEntryPrint(OStream& os) {  // NOLINT
+  HeapObject::PrintHeader(os, "AliasedArgumentsEntry");
+  os << "\n - aliased_context_slot: " << aliased_context_slot();
 }
 
 
-void FixedArray::FixedArrayPrint(FILE* out) {
-  HeapObject::PrintHeader(out, "FixedArray");
-  PrintF(out, " - length: %d", length());
+void FixedArray::FixedArrayPrint(OStream& os) {  // NOLINT
+  HeapObject::PrintHeader(os, "FixedArray");
+  os << " - length: " << length();
   for (int i = 0; i < length(); i++) {
-    PrintF(out, "\n  [%d]: ", i);
-    get(i)->ShortPrint(out);
+    os << "\n  [" << i << "]: " << Brief(get(i));
   }
-  PrintF(out, "\n");
+  os << "\n";
 }
 
 
-void FixedDoubleArray::FixedDoubleArrayPrint(FILE* out) {
-  HeapObject::PrintHeader(out, "FixedDoubleArray");
-  PrintF(out, " - length: %d", length());
+void FixedDoubleArray::FixedDoubleArrayPrint(OStream& os) {  // NOLINT
+  HeapObject::PrintHeader(os, "FixedDoubleArray");
+  os << " - length: " << length();
   for (int i = 0; i < length(); i++) {
+    os << "\n  [" << i << "]: ";
     if (is_the_hole(i)) {
-      PrintF(out, "\n  [%d]: <the hole>", i);
+      os << "<the hole>";
     } else {
-      PrintF(out, "\n  [%d]: %g", i, get_scalar(i));
+      os << get_scalar(i);
     }
   }
-  PrintF(out, "\n");
+  os << "\n";
 }
 
 
-void ConstantPoolArray::ConstantPoolArrayPrint(FILE* out) {
-  HeapObject::PrintHeader(out, "ConstantPoolArray");
-  PrintF(out, " - length: %d", length());
+void ConstantPoolArray::ConstantPoolArrayPrint(OStream& os) {  // NOLINT
+  HeapObject::PrintHeader(os, "ConstantPoolArray");
+  os << " - length: " << length();
   for (int i = 0; i <= last_index(INT32, SMALL_SECTION); i++) {
     if (i < last_index(INT64, SMALL_SECTION)) {
-      PrintF(out, "\n  [%d]: double: %g", i, get_int64_entry_as_double(i));
+      os << "\n  [" << i << "]: double: " << get_int64_entry_as_double(i);
     } else if (i <= last_index(CODE_PTR, SMALL_SECTION)) {
-      PrintF(out, "\n  [%d]: code target pointer: %p", i,
-             reinterpret_cast<void*>(get_code_ptr_entry(i)));
+      os << "\n  [" << i << "]: code target pointer: "
+         << reinterpret_cast<void*>(get_code_ptr_entry(i));
     } else if (i <= last_index(HEAP_PTR, SMALL_SECTION)) {
-      PrintF(out, "\n  [%d]: heap pointer: %p", i,
-             reinterpret_cast<void*>(get_heap_ptr_entry(i)));
+      os << "\n  [" << i << "]: heap pointer: "
+         << reinterpret_cast<void*>(get_heap_ptr_entry(i));
     } else if (i <= last_index(INT32, SMALL_SECTION)) {
-      PrintF(out, "\n  [%d]: int32: %d", i, get_int32_entry(i));
+      os << "\n  [" << i << "]: int32: " << get_int32_entry(i);
     }
   }
   if (is_extended_layout()) {
-    PrintF(out, "\n  Extended section:");
+    os << "\n  Extended section:";
     for (int i = first_extended_section_index();
          i <= last_index(INT32, EXTENDED_SECTION); i++) {
-    if (i < last_index(INT64, EXTENDED_SECTION)) {
-      PrintF(out, "\n  [%d]: double: %g", i, get_int64_entry_as_double(i));
-    } else if (i <= last_index(CODE_PTR, EXTENDED_SECTION)) {
-      PrintF(out, "\n  [%d]: code target pointer: %p", i,
-             reinterpret_cast<void*>(get_code_ptr_entry(i)));
-    } else if (i <= last_index(HEAP_PTR, EXTENDED_SECTION)) {
-      PrintF(out, "\n  [%d]: heap pointer: %p", i,
-             reinterpret_cast<void*>(get_heap_ptr_entry(i)));
-    } else if (i <= last_index(INT32, EXTENDED_SECTION)) {
-      PrintF(out, "\n  [%d]: int32: %d", i, get_int32_entry(i));
+      if (i < last_index(INT64, EXTENDED_SECTION)) {
+        os << "\n  [" << i << "]: double: " << get_int64_entry_as_double(i);
+      } else if (i <= last_index(CODE_PTR, EXTENDED_SECTION)) {
+        os << "\n  [" << i << "]: code target pointer: "
+           << reinterpret_cast<void*>(get_code_ptr_entry(i));
+      } else if (i <= last_index(HEAP_PTR, EXTENDED_SECTION)) {
+        os << "\n  [" << i << "]: heap pointer: "
+           << reinterpret_cast<void*>(get_heap_ptr_entry(i));
+      } else if (i <= last_index(INT32, EXTENDED_SECTION)) {
+        os << "\n  [" << i << "]: int32: " << get_int32_entry(i);
+      }
     }
   }
-  }
-  PrintF(out, "\n");
+  os << "\n";
 }
 
 
-void JSValue::JSValuePrint(FILE* out) {
-  HeapObject::PrintHeader(out, "ValueObject");
-  value()->Print(out);
+void JSValue::JSValuePrint(OStream& os) {  // NOLINT
+  HeapObject::PrintHeader(os, "ValueObject");
+  value()->Print(os);
 }
 
 
-void JSMessageObject::JSMessageObjectPrint(FILE* out) {
-  HeapObject::PrintHeader(out, "JSMessageObject");
-  PrintF(out, " - type: ");
-  type()->ShortPrint(out);
-  PrintF(out, "\n - arguments: ");
-  arguments()->ShortPrint(out);
-  PrintF(out, "\n - start_position: %d", start_position());
-  PrintF(out, "\n - end_position: %d", end_position());
-  PrintF(out, "\n - script: ");
-  script()->ShortPrint(out);
-  PrintF(out, "\n - stack_frames: ");
-  stack_frames()->ShortPrint(out);
-  PrintF(out, "\n");
+void JSMessageObject::JSMessageObjectPrint(OStream& os) {  // NOLINT
+  HeapObject::PrintHeader(os, "JSMessageObject");
+  os << " - type: " << Brief(type());
+  os << "\n - arguments: " << Brief(arguments());
+  os << "\n - start_position: " << start_position();
+  os << "\n - end_position: " << end_position();
+  os << "\n - script: " << Brief(script());
+  os << "\n - stack_frames: " << Brief(stack_frames());
+  os << "\n";
 }
 
 
-void String::StringPrint(FILE* out) {
+void String::StringPrint(OStream& os) {  // NOLINT
   if (StringShape(this).IsInternalized()) {
-    PrintF(out, "#");
+    os << "#";
   } else if (StringShape(this).IsCons()) {
-    PrintF(out, "c\"");
+    os << "c\"";
   } else {
-    PrintF(out, "\"");
+    os << "\"";
   }
 
   const char truncated_epilogue[] = "...<truncated>";
@@ -652,26 +588,26 @@
     }
   }
   for (int i = 0; i < len; i++) {
-    PrintF(out, "%c", Get(i));
+    os << AsUC16(Get(i));
   }
   if (len != length()) {
-    PrintF(out, "%s", truncated_epilogue);
+    os << truncated_epilogue;
   }
 
-  if (!StringShape(this).IsInternalized()) PrintF(out, "\"");
+  if (!StringShape(this).IsInternalized()) os << "\"";
 }
 
 
-void Name::NamePrint(FILE* out) {
+void Name::NamePrint(OStream& os) {  // NOLINT
   if (IsString())
-    String::cast(this)->StringPrint(out);
+    String::cast(this)->StringPrint(os);
   else
-    ShortPrint();
+    os << Brief(this);
 }
 
 
 // This method is only meant to be called from gdb for debugging purposes.
-// Since the string can also be in two-byte encoding, non-ASCII characters
+// Since the string can also be in two-byte encoding, non-Latin1 characters
 // will be ignored in the output.
 char* String::ToAsciiArray() {
   // Static so that subsequent calls frees previously allocated space.
@@ -690,204 +626,181 @@
 };
 
 
-void JSDate::JSDatePrint(FILE* out) {
-  HeapObject::PrintHeader(out, "JSDate");
-  PrintF(out, " - map = %p\n", reinterpret_cast<void*>(map()));
-  PrintF(out, " - value = ");
-  value()->Print(out);
+void JSDate::JSDatePrint(OStream& os) {  // NOLINT
+  HeapObject::PrintHeader(os, "JSDate");
+  os << " - map = " << reinterpret_cast<void*>(map()) << "\n";
+  os << " - value = ";
+  value()->Print(os);
   if (!year()->IsSmi()) {
-    PrintF(out, " - time = NaN\n");
+    os << " - time = NaN\n";
   } else {
-    PrintF(out, " - time = %s %04d/%02d/%02d %02d:%02d:%02d\n",
-           weekdays[weekday()->IsSmi() ? Smi::cast(weekday())->value() + 1 : 0],
-           year()->IsSmi() ? Smi::cast(year())->value() : -1,
-           month()->IsSmi() ? Smi::cast(month())->value() : -1,
-           day()->IsSmi() ? Smi::cast(day())->value() : -1,
-           hour()->IsSmi() ? Smi::cast(hour())->value() : -1,
-           min()->IsSmi() ? Smi::cast(min())->value() : -1,
-           sec()->IsSmi() ? Smi::cast(sec())->value() : -1);
+    // TODO(svenpanne) Add some basic formatting to our streams.
+    Vector<char> buf = Vector<char>::New(100);
+    SNPrintF(
+        buf, " - time = %s %04d/%02d/%02d %02d:%02d:%02d\n",
+        weekdays[weekday()->IsSmi() ? Smi::cast(weekday())->value() + 1 : 0],
+        year()->IsSmi() ? Smi::cast(year())->value() : -1,
+        month()->IsSmi() ? Smi::cast(month())->value() : -1,
+        day()->IsSmi() ? Smi::cast(day())->value() : -1,
+        hour()->IsSmi() ? Smi::cast(hour())->value() : -1,
+        min()->IsSmi() ? Smi::cast(min())->value() : -1,
+        sec()->IsSmi() ? Smi::cast(sec())->value() : -1);
+    os << buf.start();
   }
 }
 
 
-void JSProxy::JSProxyPrint(FILE* out) {
-  HeapObject::PrintHeader(out, "JSProxy");
-  PrintF(out, " - map = %p\n", reinterpret_cast<void*>(map()));
-  PrintF(out, " - handler = ");
-  handler()->Print(out);
-  PrintF(out, "\n - hash = ");
-  hash()->Print(out);
-  PrintF(out, "\n");
+void JSProxy::JSProxyPrint(OStream& os) {  // NOLINT
+  HeapObject::PrintHeader(os, "JSProxy");
+  os << " - map = " << reinterpret_cast<void*>(map()) << "\n";
+  os << " - handler = ";
+  handler()->Print(os);
+  os << "\n - hash = ";
+  hash()->Print(os);
+  os << "\n";
 }
 
 
-void JSFunctionProxy::JSFunctionProxyPrint(FILE* out) {
-  HeapObject::PrintHeader(out, "JSFunctionProxy");
-  PrintF(out, " - map = %p\n", reinterpret_cast<void*>(map()));
-  PrintF(out, " - handler = ");
-  handler()->Print(out);
-  PrintF(out, "\n - call_trap = ");
-  call_trap()->Print(out);
-  PrintF(out, "\n - construct_trap = ");
-  construct_trap()->Print(out);
-  PrintF(out, "\n");
+void JSFunctionProxy::JSFunctionProxyPrint(OStream& os) {  // NOLINT
+  HeapObject::PrintHeader(os, "JSFunctionProxy");
+  os << " - map = " << reinterpret_cast<void*>(map()) << "\n";
+  os << " - handler = ";
+  handler()->Print(os);
+  os << "\n - call_trap = ";
+  call_trap()->Print(os);
+  os << "\n - construct_trap = ";
+  construct_trap()->Print(os);
+  os << "\n";
 }
 
 
-void JSSet::JSSetPrint(FILE* out) {
-  HeapObject::PrintHeader(out, "JSSet");
-  PrintF(out, " - map = %p\n", reinterpret_cast<void*>(map()));
-  PrintF(out, " - table = ");
-  table()->ShortPrint(out);
-  PrintF(out, "\n");
+void JSSet::JSSetPrint(OStream& os) {  // NOLINT
+  HeapObject::PrintHeader(os, "JSSet");
+  os << " - map = " << reinterpret_cast<void*>(map()) << "\n";
+  os << " - table = " << Brief(table());
+  os << "\n";
 }
 
 
-void JSMap::JSMapPrint(FILE* out) {
-  HeapObject::PrintHeader(out, "JSMap");
-  PrintF(out, " - map = %p\n", reinterpret_cast<void*>(map()));
-  PrintF(out, " - table = ");
-  table()->ShortPrint(out);
-  PrintF(out, "\n");
+void JSMap::JSMapPrint(OStream& os) {  // NOLINT
+  HeapObject::PrintHeader(os, "JSMap");
+  os << " - map = " << reinterpret_cast<void*>(map()) << "\n";
+  os << " - table = " << Brief(table());
+  os << "\n";
 }
 
 
-template<class Derived, class TableType>
-void OrderedHashTableIterator<Derived, TableType>::
-    OrderedHashTableIteratorPrint(FILE* out) {
-  PrintF(out, " - map = %p\n", reinterpret_cast<void*>(map()));
-  PrintF(out, " - table = ");
-  table()->ShortPrint(out);
-  PrintF(out, "\n - index = ");
-  index()->ShortPrint(out);
-  PrintF(out, "\n - kind = ");
-  kind()->ShortPrint(out);
-  PrintF(out, "\n");
+template <class Derived, class TableType>
+void OrderedHashTableIterator<
+    Derived, TableType>::OrderedHashTableIteratorPrint(OStream& os) {  // NOLINT
+  os << " - map = " << reinterpret_cast<void*>(map()) << "\n";
+  os << " - table = " << Brief(table());
+  os << "\n - index = " << Brief(index());
+  os << "\n - kind = " << Brief(kind());
+  os << "\n";
 }
 
 
-template void
-OrderedHashTableIterator<JSSetIterator,
-    OrderedHashSet>::OrderedHashTableIteratorPrint(FILE* out);
+template void OrderedHashTableIterator<
+    JSSetIterator,
+    OrderedHashSet>::OrderedHashTableIteratorPrint(OStream& os);  // NOLINT
 
 
-template void
-OrderedHashTableIterator<JSMapIterator,
-    OrderedHashMap>::OrderedHashTableIteratorPrint(FILE* out);
+template void OrderedHashTableIterator<
+    JSMapIterator,
+    OrderedHashMap>::OrderedHashTableIteratorPrint(OStream& os);  // NOLINT
 
 
-void JSSetIterator::JSSetIteratorPrint(FILE* out) {
-  HeapObject::PrintHeader(out, "JSSetIterator");
-  OrderedHashTableIteratorPrint(out);
+void JSSetIterator::JSSetIteratorPrint(OStream& os) {  // NOLINT
+  HeapObject::PrintHeader(os, "JSSetIterator");
+  OrderedHashTableIteratorPrint(os);
 }
 
 
-void JSMapIterator::JSMapIteratorPrint(FILE* out) {
-  HeapObject::PrintHeader(out, "JSMapIterator");
-  OrderedHashTableIteratorPrint(out);
+void JSMapIterator::JSMapIteratorPrint(OStream& os) {  // NOLINT
+  HeapObject::PrintHeader(os, "JSMapIterator");
+  OrderedHashTableIteratorPrint(os);
 }
 
 
-void JSWeakMap::JSWeakMapPrint(FILE* out) {
-  HeapObject::PrintHeader(out, "JSWeakMap");
-  PrintF(out, " - map = %p\n", reinterpret_cast<void*>(map()));
-  PrintF(out, " - table = ");
-  table()->ShortPrint(out);
-  PrintF(out, "\n");
+void JSWeakMap::JSWeakMapPrint(OStream& os) {  // NOLINT
+  HeapObject::PrintHeader(os, "JSWeakMap");
+  os << " - map = " << reinterpret_cast<void*>(map()) << "\n";
+  os << " - table = " << Brief(table());
+  os << "\n";
 }
 
 
-void JSWeakSet::JSWeakSetPrint(FILE* out) {
-  HeapObject::PrintHeader(out, "JSWeakSet");
-  PrintF(out, " - map = %p\n", reinterpret_cast<void*>(map()));
-  PrintF(out, " - table = ");
-  table()->ShortPrint(out);
-  PrintF(out, "\n");
+void JSWeakSet::JSWeakSetPrint(OStream& os) {  // NOLINT
+  HeapObject::PrintHeader(os, "JSWeakSet");
+  os << " - map = " << reinterpret_cast<void*>(map()) << "\n";
+  os << " - table = " << Brief(table());
+  os << "\n";
 }
 
 
-void JSArrayBuffer::JSArrayBufferPrint(FILE* out) {
-  HeapObject::PrintHeader(out, "JSArrayBuffer");
-  PrintF(out, " - map = %p\n", reinterpret_cast<void*>(map()));
-  PrintF(out, " - backing_store = %p\n", backing_store());
-  PrintF(out, " - byte_length = ");
-  byte_length()->ShortPrint(out);
-  PrintF(out, "\n");
+void JSArrayBuffer::JSArrayBufferPrint(OStream& os) {  // NOLINT
+  HeapObject::PrintHeader(os, "JSArrayBuffer");
+  os << " - map = " << reinterpret_cast<void*>(map()) << "\n";
+  os << " - backing_store = " << backing_store() << "\n";
+  os << " - byte_length = " << Brief(byte_length());
+  os << "\n";
 }
 
 
-void JSTypedArray::JSTypedArrayPrint(FILE* out) {
-  HeapObject::PrintHeader(out, "JSTypedArray");
-  PrintF(out, " - map = %p\n", reinterpret_cast<void*>(map()));
-  PrintF(out, " - buffer =");
-  buffer()->ShortPrint(out);
-  PrintF(out, "\n - byte_offset = ");
-  byte_offset()->ShortPrint(out);
-  PrintF(out, "\n - byte_length = ");
-  byte_length()->ShortPrint(out);
-  PrintF(out, "\n - length = ");
-  length()->ShortPrint(out);
-  PrintF(out, "\n");
-  PrintElements(out);
+void JSTypedArray::JSTypedArrayPrint(OStream& os) {  // NOLINT
+  HeapObject::PrintHeader(os, "JSTypedArray");
+  os << " - map = " << reinterpret_cast<void*>(map()) << "\n";
+  os << " - buffer = " << Brief(buffer());
+  os << "\n - byte_offset = " << Brief(byte_offset());
+  os << "\n - byte_length = " << Brief(byte_length());
+  os << "\n - length = " << Brief(length());
+  os << "\n";
+  PrintElements(os);
 }
 
 
-void JSDataView::JSDataViewPrint(FILE* out) {
-  HeapObject::PrintHeader(out, "JSDataView");
-  PrintF(out, " - map = %p\n", reinterpret_cast<void*>(map()));
-  PrintF(out, " - buffer =");
-  buffer()->ShortPrint(out);
-  PrintF(out, "\n - byte_offset = ");
-  byte_offset()->ShortPrint(out);
-  PrintF(out, "\n - byte_length = ");
-  byte_length()->ShortPrint(out);
-  PrintF(out, "\n");
+void JSDataView::JSDataViewPrint(OStream& os) {  // NOLINT
+  HeapObject::PrintHeader(os, "JSDataView");
+  os << " - map = " << reinterpret_cast<void*>(map()) << "\n";
+  os << " - buffer =" << Brief(buffer());
+  os << "\n - byte_offset = " << Brief(byte_offset());
+  os << "\n - byte_length = " << Brief(byte_length());
+  os << "\n";
 }
 
 
-void JSFunction::JSFunctionPrint(FILE* out) {
-  HeapObject::PrintHeader(out, "Function");
-  PrintF(out, " - map = %p\n", reinterpret_cast<void*>(map()));
-  PrintF(out, " - initial_map = ");
-  if (has_initial_map()) {
-    initial_map()->ShortPrint(out);
-  }
-  PrintF(out, "\n - shared_info = ");
-  shared()->ShortPrint(out);
-  PrintF(out, "\n   - name = ");
-  shared()->name()->Print(out);
-  PrintF(out, "\n - context = ");
-  context()->ShortPrint(out);
+void JSFunction::JSFunctionPrint(OStream& os) {  // NOLINT
+  HeapObject::PrintHeader(os, "Function");
+  os << " - map = " << reinterpret_cast<void*>(map()) << "\n";
+  os << " - initial_map = ";
+  if (has_initial_map()) os << Brief(initial_map());
+  os << "\n - shared_info = " << Brief(shared());
+  os << "\n   - name = " << Brief(shared()->name());
+  os << "\n - context = " << Brief(context());
   if (shared()->bound()) {
-    PrintF(out, "\n - bindings = ");
-    function_bindings()->ShortPrint(out);
+    os << "\n - bindings = " << Brief(function_bindings());
   } else {
-    PrintF(out, "\n - literals = ");
-    literals()->ShortPrint(out);
+    os << "\n - literals = " << Brief(literals());
   }
-  PrintF(out, "\n - code = ");
-  code()->ShortPrint(out);
-  PrintF(out, "\n");
-
-  PrintProperties(out);
-  PrintElements(out);
-
-  PrintF(out, "\n");
+  os << "\n - code = " << Brief(code());
+  os << "\n";
+  PrintProperties(os);
+  PrintElements(os);
+  os << "\n";
 }
 
 
-void SharedFunctionInfo::SharedFunctionInfoPrint(FILE* out) {
-  HeapObject::PrintHeader(out, "SharedFunctionInfo");
-  PrintF(out, " - name: ");
-  name()->ShortPrint(out);
-  PrintF(out, "\n - expected_nof_properties: %d", expected_nof_properties());
-  PrintF(out, "\n - ast_node_count: %d", ast_node_count());
-  PrintF(out, "\n - instance class name = ");
-  instance_class_name()->Print(out);
-  PrintF(out, "\n - code = ");
-  code()->ShortPrint(out);
+void SharedFunctionInfo::SharedFunctionInfoPrint(OStream& os) {  // NOLINT
+  HeapObject::PrintHeader(os, "SharedFunctionInfo");
+  os << " - name: " << Brief(name());
+  os << "\n - expected_nof_properties: " << expected_nof_properties();
+  os << "\n - ast_node_count: " << ast_node_count();
+  os << "\n - instance class name = ";
+  instance_class_name()->Print(os);
+  os << "\n - code = " << Brief(code());
   if (HasSourceCode()) {
-    PrintF(out, "\n - source code = ");
+    os << "\n - source code = ";
     String* source = String::cast(Script::cast(script())->source());
     int start = start_position();
     int length = end_position() - start;
@@ -895,368 +808,301 @@
         source->ToCString(DISALLOW_NULLS,
                           FAST_STRING_TRAVERSAL,
                           start, length, NULL);
-    PrintF(out, "%s", source_string.get());
+    os << source_string.get();
   }
   // Script files are often large, hard to read.
-  // PrintF(out, "\n - script =");
-  // script()->Print(out);
-  PrintF(out, "\n - function token position = %d", function_token_position());
-  PrintF(out, "\n - start position = %d", start_position());
-  PrintF(out, "\n - end position = %d", end_position());
-  PrintF(out, "\n - is expression = %d", is_expression());
-  PrintF(out, "\n - debug info = ");
-  debug_info()->ShortPrint(out);
-  PrintF(out, "\n - length = %d", length());
-  PrintF(out, "\n - optimized_code_map = ");
-  optimized_code_map()->ShortPrint(out);
-  PrintF(out, "\n - feedback_vector = ");
-  feedback_vector()->FixedArrayPrint(out);
-  PrintF(out, "\n");
+  // os << "\n - script =";
+  // script()->Print(os);
+  os << "\n - function token position = " << function_token_position();
+  os << "\n - start position = " << start_position();
+  os << "\n - end position = " << end_position();
+  os << "\n - is expression = " << is_expression();
+  os << "\n - debug info = " << Brief(debug_info());
+  os << "\n - length = " << length();
+  os << "\n - optimized_code_map = " << Brief(optimized_code_map());
+  os << "\n - feedback_vector = ";
+  feedback_vector()->FixedArrayPrint(os);
+  os << "\n";
 }
 
 
-void JSGlobalProxy::JSGlobalProxyPrint(FILE* out) {
-  PrintF(out, "global_proxy ");
-  JSObjectPrint(out);
-  PrintF(out, "native context : ");
-  native_context()->ShortPrint(out);
-  PrintF(out, "\n");
+void JSGlobalProxy::JSGlobalProxyPrint(OStream& os) {  // NOLINT
+  os << "global_proxy ";
+  JSObjectPrint(os);
+  os << "native context : " << Brief(native_context());
+  os << "\n";
 }
 
 
-void JSGlobalObject::JSGlobalObjectPrint(FILE* out) {
-  PrintF(out, "global ");
-  JSObjectPrint(out);
-  PrintF(out, "native context : ");
-  native_context()->ShortPrint(out);
-  PrintF(out, "\n");
+void JSGlobalObject::JSGlobalObjectPrint(OStream& os) {  // NOLINT
+  os << "global ";
+  JSObjectPrint(os);
+  os << "native context : " << Brief(native_context());
+  os << "\n";
 }
 
 
-void JSBuiltinsObject::JSBuiltinsObjectPrint(FILE* out) {
-  PrintF(out, "builtins ");
-  JSObjectPrint(out);
+void JSBuiltinsObject::JSBuiltinsObjectPrint(OStream& os) {  // NOLINT
+  os << "builtins ";
+  JSObjectPrint(os);
 }
 
 
-void Cell::CellPrint(FILE* out) {
-  HeapObject::PrintHeader(out, "Cell");
+void Cell::CellPrint(OStream& os) {  // NOLINT
+  HeapObject::PrintHeader(os, "Cell");
 }
 
 
-void PropertyCell::PropertyCellPrint(FILE* out) {
-  HeapObject::PrintHeader(out, "PropertyCell");
+void PropertyCell::PropertyCellPrint(OStream& os) {  // NOLINT
+  HeapObject::PrintHeader(os, "PropertyCell");
 }
 
 
-void Code::CodePrint(FILE* out) {
-  HeapObject::PrintHeader(out, "Code");
+void Code::CodePrint(OStream& os) {  // NOLINT
+  HeapObject::PrintHeader(os, "Code");
 #ifdef ENABLE_DISASSEMBLER
   if (FLAG_use_verbose_printer) {
-    Disassemble(NULL, out);
+    Disassemble(NULL, os);
   }
 #endif
 }
 
 
-void Foreign::ForeignPrint(FILE* out) {
-  PrintF(out, "foreign address : %p", foreign_address());
+void Foreign::ForeignPrint(OStream& os) {  // NOLINT
+  os << "foreign address : " << foreign_address();
 }
 
 
-void ExecutableAccessorInfo::ExecutableAccessorInfoPrint(FILE* out) {
-  HeapObject::PrintHeader(out, "ExecutableAccessorInfo");
-  PrintF(out, "\n - name: ");
-  name()->ShortPrint(out);
-  PrintF(out, "\n - flag: ");
-  flag()->ShortPrint(out);
-  PrintF(out, "\n - getter: ");
-  getter()->ShortPrint(out);
-  PrintF(out, "\n - setter: ");
-  setter()->ShortPrint(out);
-  PrintF(out, "\n - data: ");
-  data()->ShortPrint(out);
+void ExecutableAccessorInfo::ExecutableAccessorInfoPrint(
+    OStream& os) {  // NOLINT
+  HeapObject::PrintHeader(os, "ExecutableAccessorInfo");
+  os << "\n - name: " << Brief(name());
+  os << "\n - flag: " << Brief(flag());
+  os << "\n - getter: " << Brief(getter());
+  os << "\n - setter: " << Brief(setter());
+  os << "\n - data: " << Brief(data());
+  os << "\n";
 }
 
 
-void DeclaredAccessorInfo::DeclaredAccessorInfoPrint(FILE* out) {
-  HeapObject::PrintHeader(out, "DeclaredAccessorInfo");
-  PrintF(out, "\n - name: ");
-  name()->ShortPrint(out);
-  PrintF(out, "\n - flag: ");
-  flag()->ShortPrint(out);
-  PrintF(out, "\n - descriptor: ");
-  descriptor()->ShortPrint(out);
+void DeclaredAccessorInfo::DeclaredAccessorInfoPrint(OStream& os) {  // NOLINT
+  HeapObject::PrintHeader(os, "DeclaredAccessorInfo");
+  os << "\n - name: " << Brief(name());
+  os << "\n - flag: " << Brief(flag());
+  os << "\n - descriptor: " << Brief(descriptor());
+  os << "\n";
 }
 
 
-void DeclaredAccessorDescriptor::DeclaredAccessorDescriptorPrint(FILE* out) {
-  HeapObject::PrintHeader(out, "DeclaredAccessorDescriptor");
-  PrintF(out, "\n - internal field: ");
-  serialized_data()->ShortPrint(out);
+void DeclaredAccessorDescriptor::DeclaredAccessorDescriptorPrint(
+    OStream& os) {  // NOLINT
+  HeapObject::PrintHeader(os, "DeclaredAccessorDescriptor");
+  os << "\n - internal field: " << Brief(serialized_data());
+  os << "\n";
 }
 
 
-void Box::BoxPrint(FILE* out) {
-  HeapObject::PrintHeader(out, "Box");
-  PrintF(out, "\n - value: ");
-  value()->ShortPrint(out);
+void Box::BoxPrint(OStream& os) {  // NOLINT
+  HeapObject::PrintHeader(os, "Box");
+  os << "\n - value: " << Brief(value());
+  os << "\n";
 }
 
 
-void AccessorPair::AccessorPairPrint(FILE* out) {
-  HeapObject::PrintHeader(out, "AccessorPair");
-  PrintF(out, "\n - getter: ");
-  getter()->ShortPrint(out);
-  PrintF(out, "\n - setter: ");
-  setter()->ShortPrint(out);
-  PrintF(out, "\n - flag: ");
-  access_flags()->ShortPrint(out);
+void AccessorPair::AccessorPairPrint(OStream& os) {  // NOLINT
+  HeapObject::PrintHeader(os, "AccessorPair");
+  os << "\n - getter: " << Brief(getter());
+  os << "\n - setter: " << Brief(setter());
+  os << "\n";
 }
 
 
-void AccessCheckInfo::AccessCheckInfoPrint(FILE* out) {
-  HeapObject::PrintHeader(out, "AccessCheckInfo");
-  PrintF(out, "\n - named_callback: ");
-  named_callback()->ShortPrint(out);
-  PrintF(out, "\n - indexed_callback: ");
-  indexed_callback()->ShortPrint(out);
-  PrintF(out, "\n - data: ");
-  data()->ShortPrint(out);
+void AccessCheckInfo::AccessCheckInfoPrint(OStream& os) {  // NOLINT
+  HeapObject::PrintHeader(os, "AccessCheckInfo");
+  os << "\n - named_callback: " << Brief(named_callback());
+  os << "\n - indexed_callback: " << Brief(indexed_callback());
+  os << "\n - data: " << Brief(data());
+  os << "\n";
 }
 
 
-void InterceptorInfo::InterceptorInfoPrint(FILE* out) {
-  HeapObject::PrintHeader(out, "InterceptorInfo");
-  PrintF(out, "\n - getter: ");
-  getter()->ShortPrint(out);
-  PrintF(out, "\n - setter: ");
-  setter()->ShortPrint(out);
-  PrintF(out, "\n - query: ");
-  query()->ShortPrint(out);
-  PrintF(out, "\n - deleter: ");
-  deleter()->ShortPrint(out);
-  PrintF(out, "\n - enumerator: ");
-  enumerator()->ShortPrint(out);
-  PrintF(out, "\n - data: ");
-  data()->ShortPrint(out);
+void InterceptorInfo::InterceptorInfoPrint(OStream& os) {  // NOLINT
+  HeapObject::PrintHeader(os, "InterceptorInfo");
+  os << "\n - getter: " << Brief(getter());
+  os << "\n - setter: " << Brief(setter());
+  os << "\n - query: " << Brief(query());
+  os << "\n - deleter: " << Brief(deleter());
+  os << "\n - enumerator: " << Brief(enumerator());
+  os << "\n - data: " << Brief(data());
+  os << "\n";
 }
 
 
-void CallHandlerInfo::CallHandlerInfoPrint(FILE* out) {
-  HeapObject::PrintHeader(out, "CallHandlerInfo");
-  PrintF(out, "\n - callback: ");
-  callback()->ShortPrint(out);
-  PrintF(out, "\n - data: ");
-  data()->ShortPrint(out);
-  PrintF(out, "\n - call_stub_cache: ");
+void CallHandlerInfo::CallHandlerInfoPrint(OStream& os) {  // NOLINT
+  HeapObject::PrintHeader(os, "CallHandlerInfo");
+  os << "\n - callback: " << Brief(callback());
+  os << "\n - data: " << Brief(data());
+  os << "\n";
 }
 
 
-void FunctionTemplateInfo::FunctionTemplateInfoPrint(FILE* out) {
-  HeapObject::PrintHeader(out, "FunctionTemplateInfo");
-  PrintF(out, "\n - class name: ");
-  class_name()->ShortPrint(out);
-  PrintF(out, "\n - tag: ");
-  tag()->ShortPrint(out);
-  PrintF(out, "\n - property_list: ");
-  property_list()->ShortPrint(out);
-  PrintF(out, "\n - serial_number: ");
-  serial_number()->ShortPrint(out);
-  PrintF(out, "\n - call_code: ");
-  call_code()->ShortPrint(out);
-  PrintF(out, "\n - property_accessors: ");
-  property_accessors()->ShortPrint(out);
-  PrintF(out, "\n - prototype_template: ");
-  prototype_template()->ShortPrint(out);
-  PrintF(out, "\n - parent_template: ");
-  parent_template()->ShortPrint(out);
-  PrintF(out, "\n - named_property_handler: ");
-  named_property_handler()->ShortPrint(out);
-  PrintF(out, "\n - indexed_property_handler: ");
-  indexed_property_handler()->ShortPrint(out);
-  PrintF(out, "\n - instance_template: ");
-  instance_template()->ShortPrint(out);
-  PrintF(out, "\n - signature: ");
-  signature()->ShortPrint(out);
-  PrintF(out, "\n - access_check_info: ");
-  access_check_info()->ShortPrint(out);
-  PrintF(out, "\n - hidden_prototype: %s",
-         hidden_prototype() ? "true" : "false");
-  PrintF(out, "\n - undetectable: %s", undetectable() ? "true" : "false");
-  PrintF(out, "\n - need_access_check: %s",
-         needs_access_check() ? "true" : "false");
+void FunctionTemplateInfo::FunctionTemplateInfoPrint(OStream& os) {  // NOLINT
+  HeapObject::PrintHeader(os, "FunctionTemplateInfo");
+  os << "\n - class name: " << Brief(class_name());
+  os << "\n - tag: " << Brief(tag());
+  os << "\n - property_list: " << Brief(property_list());
+  os << "\n - serial_number: " << Brief(serial_number());
+  os << "\n - call_code: " << Brief(call_code());
+  os << "\n - property_accessors: " << Brief(property_accessors());
+  os << "\n - prototype_template: " << Brief(prototype_template());
+  os << "\n - parent_template: " << Brief(parent_template());
+  os << "\n - named_property_handler: " << Brief(named_property_handler());
+  os << "\n - indexed_property_handler: " << Brief(indexed_property_handler());
+  os << "\n - instance_template: " << Brief(instance_template());
+  os << "\n - signature: " << Brief(signature());
+  os << "\n - access_check_info: " << Brief(access_check_info());
+  os << "\n - hidden_prototype: " << (hidden_prototype() ? "true" : "false");
+  os << "\n - undetectable: " << (undetectable() ? "true" : "false");
+  os << "\n - need_access_check: " << (needs_access_check() ? "true" : "false");
+  os << "\n";
 }
 
 
-void ObjectTemplateInfo::ObjectTemplateInfoPrint(FILE* out) {
-  HeapObject::PrintHeader(out, "ObjectTemplateInfo");
-  PrintF(out, " - tag: ");
-  tag()->ShortPrint(out);
-  PrintF(out, "\n - property_list: ");
-  property_list()->ShortPrint(out);
-  PrintF(out, "\n - property_accessors: ");
-  property_accessors()->ShortPrint(out);
-  PrintF(out, "\n - constructor: ");
-  constructor()->ShortPrint(out);
-  PrintF(out, "\n - internal_field_count: ");
-  internal_field_count()->ShortPrint(out);
-  PrintF(out, "\n");
+void ObjectTemplateInfo::ObjectTemplateInfoPrint(OStream& os) {  // NOLINT
+  HeapObject::PrintHeader(os, "ObjectTemplateInfo");
+  os << " - tag: " << Brief(tag());
+  os << "\n - property_list: " << Brief(property_list());
+  os << "\n - property_accessors: " << Brief(property_accessors());
+  os << "\n - constructor: " << Brief(constructor());
+  os << "\n - internal_field_count: " << Brief(internal_field_count());
+  os << "\n";
 }
 
 
-void SignatureInfo::SignatureInfoPrint(FILE* out) {
-  HeapObject::PrintHeader(out, "SignatureInfo");
-  PrintF(out, "\n - receiver: ");
-  receiver()->ShortPrint(out);
-  PrintF(out, "\n - args: ");
-  args()->ShortPrint(out);
+void SignatureInfo::SignatureInfoPrint(OStream& os) {  // NOLINT
+  HeapObject::PrintHeader(os, "SignatureInfo");
+  os << "\n - receiver: " << Brief(receiver());
+  os << "\n - args: " << Brief(args());
+  os << "\n";
 }
 
 
-void TypeSwitchInfo::TypeSwitchInfoPrint(FILE* out) {
-  HeapObject::PrintHeader(out, "TypeSwitchInfo");
-  PrintF(out, "\n - types: ");
-  types()->ShortPrint(out);
+void TypeSwitchInfo::TypeSwitchInfoPrint(OStream& os) {  // NOLINT
+  HeapObject::PrintHeader(os, "TypeSwitchInfo");
+  os << "\n - types: " << Brief(types());
+  os << "\n";
 }
 
 
-void AllocationSite::AllocationSitePrint(FILE* out) {
-  HeapObject::PrintHeader(out, "AllocationSite");
-  PrintF(out, " - weak_next: ");
-  weak_next()->ShortPrint(out);
-  PrintF(out, "\n - dependent code: ");
-  dependent_code()->ShortPrint(out);
-  PrintF(out, "\n - nested site: ");
-  nested_site()->ShortPrint(out);
-  PrintF(out, "\n - memento found count: ");
-  Smi::FromInt(memento_found_count())->ShortPrint(out);
-  PrintF(out, "\n - memento create count: ");
-  Smi::FromInt(memento_create_count())->ShortPrint(out);
-  PrintF(out, "\n - pretenure decision: ");
-  Smi::FromInt(pretenure_decision())->ShortPrint(out);
-  PrintF(out, "\n - transition_info: ");
+void AllocationSite::AllocationSitePrint(OStream& os) {  // NOLINT
+  HeapObject::PrintHeader(os, "AllocationSite");
+  os << " - weak_next: " << Brief(weak_next());
+  os << "\n - dependent code: " << Brief(dependent_code());
+  os << "\n - nested site: " << Brief(nested_site());
+  os << "\n - memento found count: "
+     << Brief(Smi::FromInt(memento_found_count()));
+  os << "\n - memento create count: "
+     << Brief(Smi::FromInt(memento_create_count()));
+  os << "\n - pretenure decision: "
+     << Brief(Smi::FromInt(pretenure_decision()));
+  os << "\n - transition_info: ";
   if (transition_info()->IsSmi()) {
     ElementsKind kind = GetElementsKind();
-    PrintF(out, "Array allocation with ElementsKind ");
-    PrintElementsKind(out, kind);
-    PrintF(out, "\n");
-    return;
+    os << "Array allocation with ElementsKind " << ElementsKindToString(kind);
   } else if (transition_info()->IsJSArray()) {
-    PrintF(out, "Array literal ");
-    transition_info()->ShortPrint(out);
-    PrintF(out, "\n");
-    return;
-  }
-
-  PrintF(out, "unknown transition_info");
-  transition_info()->ShortPrint(out);
-  PrintF(out, "\n");
-}
-
-
-void AllocationMemento::AllocationMementoPrint(FILE* out) {
-  HeapObject::PrintHeader(out, "AllocationMemento");
-  PrintF(out, " - allocation site: ");
-  if (IsValid()) {
-    GetAllocationSite()->Print();
+    os << "Array literal " << Brief(transition_info());
   } else {
-    PrintF(out, "<invalid>\n");
+    os << "unknown transition_info" << Brief(transition_info());
+  }
+  os << "\n";
+}
+
+
+void AllocationMemento::AllocationMementoPrint(OStream& os) {  // NOLINT
+  HeapObject::PrintHeader(os, "AllocationMemento");
+  os << " - allocation site: ";
+  if (IsValid()) {
+    GetAllocationSite()->Print(os);
+  } else {
+    os << "<invalid>\n";
   }
 }
 
 
-void Script::ScriptPrint(FILE* out) {
-  HeapObject::PrintHeader(out, "Script");
-  PrintF(out, "\n - source: ");
-  source()->ShortPrint(out);
-  PrintF(out, "\n - name: ");
-  name()->ShortPrint(out);
-  PrintF(out, "\n - line_offset: ");
-  line_offset()->ShortPrint(out);
-  PrintF(out, "\n - column_offset: ");
-  column_offset()->ShortPrint(out);
-  PrintF(out, "\n - type: ");
-  type()->ShortPrint(out);
-  PrintF(out, "\n - id: ");
-  id()->ShortPrint(out);
-  PrintF(out, "\n - context data: ");
-  context_data()->ShortPrint(out);
-  PrintF(out, "\n - wrapper: ");
-  wrapper()->ShortPrint(out);
-  PrintF(out, "\n - compilation type: %d", compilation_type());
-  PrintF(out, "\n - line ends: ");
-  line_ends()->ShortPrint(out);
-  PrintF(out, "\n - eval from shared: ");
-  eval_from_shared()->ShortPrint(out);
-  PrintF(out, "\n - eval from instructions offset: ");
-  eval_from_instructions_offset()->ShortPrint(out);
-  PrintF(out, "\n");
+void Script::ScriptPrint(OStream& os) {  // NOLINT
+  HeapObject::PrintHeader(os, "Script");
+  os << "\n - source: " << Brief(source());
+  os << "\n - name: " << Brief(name());
+  os << "\n - line_offset: " << Brief(line_offset());
+  os << "\n - column_offset: " << Brief(column_offset());
+  os << "\n - type: " << Brief(type());
+  os << "\n - id: " << Brief(id());
+  os << "\n - context data: " << Brief(context_data());
+  os << "\n - wrapper: " << Brief(wrapper());
+  os << "\n - compilation type: " << compilation_type();
+  os << "\n - line ends: " << Brief(line_ends());
+  os << "\n - eval from shared: " << Brief(eval_from_shared());
+  os << "\n - eval from instructions offset: "
+     << Brief(eval_from_instructions_offset());
+  os << "\n";
 }
 
 
-void DebugInfo::DebugInfoPrint(FILE* out) {
-  HeapObject::PrintHeader(out, "DebugInfo");
-  PrintF(out, "\n - shared: ");
-  shared()->ShortPrint(out);
-  PrintF(out, "\n - original_code: ");
-  original_code()->ShortPrint(out);
-  PrintF(out, "\n - code: ");
-  code()->ShortPrint(out);
-  PrintF(out, "\n - break_points: ");
-  break_points()->Print(out);
+void DebugInfo::DebugInfoPrint(OStream& os) {  // NOLINT
+  HeapObject::PrintHeader(os, "DebugInfo");
+  os << "\n - shared: " << Brief(shared());
+  os << "\n - original_code: " << Brief(original_code());
+  os << "\n - code: " << Brief(code());
+  os << "\n - break_points: ";
+  break_points()->Print(os);
 }
 
 
-void BreakPointInfo::BreakPointInfoPrint(FILE* out) {
-  HeapObject::PrintHeader(out, "BreakPointInfo");
-  PrintF(out, "\n - code_position: %d", code_position()->value());
-  PrintF(out, "\n - source_position: %d", source_position()->value());
-  PrintF(out, "\n - statement_position: %d", statement_position()->value());
-  PrintF(out, "\n - break_point_objects: ");
-  break_point_objects()->ShortPrint(out);
+void BreakPointInfo::BreakPointInfoPrint(OStream& os) {  // NOLINT
+  HeapObject::PrintHeader(os, "BreakPointInfo");
+  os << "\n - code_position: " << code_position()->value();
+  os << "\n - source_position: " << source_position()->value();
+  os << "\n - statement_position: " << statement_position()->value();
+  os << "\n - break_point_objects: " << Brief(break_point_objects());
+  os << "\n";
 }
 
 
-void DescriptorArray::PrintDescriptors(FILE* out) {
-  PrintF(out, "Descriptor array  %d\n", number_of_descriptors());
+void DescriptorArray::PrintDescriptors(OStream& os) {  // NOLINT
+  os << "Descriptor array  " << number_of_descriptors() << "\n";
   for (int i = 0; i < number_of_descriptors(); i++) {
-    PrintF(out, " %d: ", i);
     Descriptor desc;
     Get(i, &desc);
-    desc.Print(out);
+    os << " " << i << ": " << desc;
   }
-  PrintF(out, "\n");
+  os << "\n";
 }
 
 
-void TransitionArray::PrintTransitions(FILE* out) {
-  PrintF(out, "Transition array  %d\n", number_of_transitions());
+void TransitionArray::PrintTransitions(OStream& os) {  // NOLINT
+  os << "Transition array  %d\n", number_of_transitions();
   for (int i = 0; i < number_of_transitions(); i++) {
-    PrintF(out, " %d: ", i);
-    GetKey(i)->NamePrint(out);
-    PrintF(out, ": ");
+    os << " " << i << ": ";
+    GetKey(i)->NamePrint(os);
+    os << ": ";
     switch (GetTargetDetails(i).type()) {
       case FIELD: {
-        PrintF(out, " (transition to field)\n");
+        os << " (transition to field)\n";
         break;
       }
       case CONSTANT:
-        PrintF(out, " (transition to constant)\n");
+        os << " (transition to constant)\n";
         break;
       case CALLBACKS:
-        PrintF(out, " (transition to callback)\n");
+        os << " (transition to callback)\n";
         break;
       // Values below are never in the target descriptor array.
       case NORMAL:
-      case HANDLER:
-      case INTERCEPTOR:
-      case NONEXISTENT:
         UNREACHABLE();
         break;
     }
   }
-  PrintF(out, "\n");
+  os << "\n";
 }
 
 
diff --git a/src/objects-visiting-inl.h b/src/objects-visiting-inl.h
deleted file mode 100644
index 887a3de..0000000
--- a/src/objects-visiting-inl.h
+++ /dev/null
@@ -1,969 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_OBJECTS_VISITING_INL_H_
-#define V8_OBJECTS_VISITING_INL_H_
-
-
-namespace v8 {
-namespace internal {
-
-template<typename StaticVisitor>
-void StaticNewSpaceVisitor<StaticVisitor>::Initialize() {
-  table_.Register(kVisitShortcutCandidate,
-                  &FixedBodyVisitor<StaticVisitor,
-                  ConsString::BodyDescriptor,
-                  int>::Visit);
-
-  table_.Register(kVisitConsString,
-                  &FixedBodyVisitor<StaticVisitor,
-                  ConsString::BodyDescriptor,
-                  int>::Visit);
-
-  table_.Register(kVisitSlicedString,
-                  &FixedBodyVisitor<StaticVisitor,
-                  SlicedString::BodyDescriptor,
-                  int>::Visit);
-
-  table_.Register(kVisitSymbol,
-                  &FixedBodyVisitor<StaticVisitor,
-                  Symbol::BodyDescriptor,
-                  int>::Visit);
-
-  table_.Register(kVisitFixedArray,
-                  &FlexibleBodyVisitor<StaticVisitor,
-                  FixedArray::BodyDescriptor,
-                  int>::Visit);
-
-  table_.Register(kVisitFixedDoubleArray, &VisitFixedDoubleArray);
-  table_.Register(kVisitFixedTypedArray, &VisitFixedTypedArray);
-  table_.Register(kVisitFixedFloat64Array, &VisitFixedTypedArray);
-
-  table_.Register(kVisitNativeContext,
-                  &FixedBodyVisitor<StaticVisitor,
-                  Context::ScavengeBodyDescriptor,
-                  int>::Visit);
-
-  table_.Register(kVisitByteArray, &VisitByteArray);
-
-  table_.Register(kVisitSharedFunctionInfo,
-                  &FixedBodyVisitor<StaticVisitor,
-                  SharedFunctionInfo::BodyDescriptor,
-                  int>::Visit);
-
-  table_.Register(kVisitSeqOneByteString, &VisitSeqOneByteString);
-
-  table_.Register(kVisitSeqTwoByteString, &VisitSeqTwoByteString);
-
-  table_.Register(kVisitJSFunction, &VisitJSFunction);
-
-  table_.Register(kVisitJSArrayBuffer, &VisitJSArrayBuffer);
-
-  table_.Register(kVisitJSTypedArray, &VisitJSTypedArray);
-
-  table_.Register(kVisitJSDataView, &VisitJSDataView);
-
-  table_.Register(kVisitFreeSpace, &VisitFreeSpace);
-
-  table_.Register(kVisitJSWeakCollection, &JSObjectVisitor::Visit);
-
-  table_.Register(kVisitJSRegExp, &JSObjectVisitor::Visit);
-
-  table_.template RegisterSpecializations<DataObjectVisitor,
-                                          kVisitDataObject,
-                                          kVisitDataObjectGeneric>();
-
-  table_.template RegisterSpecializations<JSObjectVisitor,
-                                          kVisitJSObject,
-                                          kVisitJSObjectGeneric>();
-  table_.template RegisterSpecializations<StructVisitor,
-                                          kVisitStruct,
-                                          kVisitStructGeneric>();
-}
-
-
-template<typename StaticVisitor>
-int StaticNewSpaceVisitor<StaticVisitor>::VisitJSArrayBuffer(
-    Map* map, HeapObject* object) {
-  Heap* heap = map->GetHeap();
-
-  STATIC_ASSERT(
-      JSArrayBuffer::kWeakFirstViewOffset ==
-      JSArrayBuffer::kWeakNextOffset + kPointerSize);
-  VisitPointers(
-      heap,
-      HeapObject::RawField(object, JSArrayBuffer::BodyDescriptor::kStartOffset),
-      HeapObject::RawField(object, JSArrayBuffer::kWeakNextOffset));
-  VisitPointers(
-      heap,
-      HeapObject::RawField(object,
-          JSArrayBuffer::kWeakNextOffset + 2 * kPointerSize),
-      HeapObject::RawField(object, JSArrayBuffer::kSizeWithInternalFields));
-  return JSArrayBuffer::kSizeWithInternalFields;
-}
-
-
-template<typename StaticVisitor>
-int StaticNewSpaceVisitor<StaticVisitor>::VisitJSTypedArray(
-    Map* map, HeapObject* object) {
-  VisitPointers(
-      map->GetHeap(),
-      HeapObject::RawField(object, JSTypedArray::BodyDescriptor::kStartOffset),
-      HeapObject::RawField(object, JSTypedArray::kWeakNextOffset));
-  VisitPointers(
-      map->GetHeap(),
-      HeapObject::RawField(object,
-          JSTypedArray::kWeakNextOffset + kPointerSize),
-      HeapObject::RawField(object, JSTypedArray::kSizeWithInternalFields));
-  return JSTypedArray::kSizeWithInternalFields;
-}
-
-
-template<typename StaticVisitor>
-int StaticNewSpaceVisitor<StaticVisitor>::VisitJSDataView(
-    Map* map, HeapObject* object) {
-  VisitPointers(
-      map->GetHeap(),
-      HeapObject::RawField(object, JSDataView::BodyDescriptor::kStartOffset),
-      HeapObject::RawField(object, JSDataView::kWeakNextOffset));
-  VisitPointers(
-      map->GetHeap(),
-      HeapObject::RawField(object,
-          JSDataView::kWeakNextOffset + kPointerSize),
-      HeapObject::RawField(object, JSDataView::kSizeWithInternalFields));
-  return JSDataView::kSizeWithInternalFields;
-}
-
-
-template<typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::Initialize() {
-  table_.Register(kVisitShortcutCandidate,
-                  &FixedBodyVisitor<StaticVisitor,
-                  ConsString::BodyDescriptor,
-                  void>::Visit);
-
-  table_.Register(kVisitConsString,
-                  &FixedBodyVisitor<StaticVisitor,
-                  ConsString::BodyDescriptor,
-                  void>::Visit);
-
-  table_.Register(kVisitSlicedString,
-                  &FixedBodyVisitor<StaticVisitor,
-                  SlicedString::BodyDescriptor,
-                  void>::Visit);
-
-  table_.Register(kVisitSymbol,
-                  &FixedBodyVisitor<StaticVisitor,
-                  Symbol::BodyDescriptor,
-                  void>::Visit);
-
-  table_.Register(kVisitFixedArray, &FixedArrayVisitor::Visit);
-
-  table_.Register(kVisitFixedDoubleArray, &DataObjectVisitor::Visit);
-
-  table_.Register(kVisitFixedTypedArray, &DataObjectVisitor::Visit);
-
-  table_.Register(kVisitFixedFloat64Array, &DataObjectVisitor::Visit);
-
-  table_.Register(kVisitConstantPoolArray, &VisitConstantPoolArray);
-
-  table_.Register(kVisitNativeContext, &VisitNativeContext);
-
-  table_.Register(kVisitAllocationSite, &VisitAllocationSite);
-
-  table_.Register(kVisitByteArray, &DataObjectVisitor::Visit);
-
-  table_.Register(kVisitFreeSpace, &DataObjectVisitor::Visit);
-
-  table_.Register(kVisitSeqOneByteString, &DataObjectVisitor::Visit);
-
-  table_.Register(kVisitSeqTwoByteString, &DataObjectVisitor::Visit);
-
-  table_.Register(kVisitJSWeakCollection, &VisitWeakCollection);
-
-  table_.Register(kVisitOddball,
-                  &FixedBodyVisitor<StaticVisitor,
-                  Oddball::BodyDescriptor,
-                  void>::Visit);
-
-  table_.Register(kVisitMap, &VisitMap);
-
-  table_.Register(kVisitCode, &VisitCode);
-
-  table_.Register(kVisitSharedFunctionInfo, &VisitSharedFunctionInfo);
-
-  table_.Register(kVisitJSFunction, &VisitJSFunction);
-
-  table_.Register(kVisitJSArrayBuffer, &VisitJSArrayBuffer);
-
-  table_.Register(kVisitJSTypedArray, &VisitJSTypedArray);
-
-  table_.Register(kVisitJSDataView, &VisitJSDataView);
-
-  // Registration for kVisitJSRegExp is done by StaticVisitor.
-
-  table_.Register(kVisitCell,
-                  &FixedBodyVisitor<StaticVisitor,
-                  Cell::BodyDescriptor,
-                  void>::Visit);
-
-  table_.Register(kVisitPropertyCell, &VisitPropertyCell);
-
-  table_.template RegisterSpecializations<DataObjectVisitor,
-                                          kVisitDataObject,
-                                          kVisitDataObjectGeneric>();
-
-  table_.template RegisterSpecializations<JSObjectVisitor,
-                                          kVisitJSObject,
-                                          kVisitJSObjectGeneric>();
-
-  table_.template RegisterSpecializations<StructObjectVisitor,
-                                          kVisitStruct,
-                                          kVisitStructGeneric>();
-}
-
-
-template<typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitCodeEntry(
-    Heap* heap, Address entry_address) {
-  Code* code = Code::cast(Code::GetObjectFromEntryAddress(entry_address));
-  heap->mark_compact_collector()->RecordCodeEntrySlot(entry_address, code);
-  StaticVisitor::MarkObject(heap, code);
-}
-
-
-template<typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitEmbeddedPointer(
-    Heap* heap, RelocInfo* rinfo) {
-  ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
-  ASSERT(!rinfo->target_object()->IsConsString());
-  HeapObject* object = HeapObject::cast(rinfo->target_object());
-  heap->mark_compact_collector()->RecordRelocSlot(rinfo, object);
-  // TODO(ulan): It could be better to record slots only for strongly embedded
-  // objects here and record slots for weakly embedded object during clearing
-  // of non-live references in mark-compact.
-  if (!rinfo->host()->IsWeakObject(object)) {
-    StaticVisitor::MarkObject(heap, object);
-  }
-}
-
-
-template<typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitCell(
-    Heap* heap, RelocInfo* rinfo) {
-  ASSERT(rinfo->rmode() == RelocInfo::CELL);
-  Cell* cell = rinfo->target_cell();
-  // No need to record slots because the cell space is not compacted during GC.
-  if (!rinfo->host()->IsWeakObject(cell)) {
-    StaticVisitor::MarkObject(heap, cell);
-  }
-}
-
-
-template<typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitDebugTarget(
-    Heap* heap, RelocInfo* rinfo) {
-  ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
-          rinfo->IsPatchedReturnSequence()) ||
-         (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
-          rinfo->IsPatchedDebugBreakSlotSequence()));
-  Code* target = Code::GetCodeFromTargetAddress(rinfo->call_address());
-  heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
-  StaticVisitor::MarkObject(heap, target);
-}
-
-
-template<typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitCodeTarget(
-    Heap* heap, RelocInfo* rinfo) {
-  ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
-  Code* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
-  // Monomorphic ICs are preserved when possible, but need to be flushed
-  // when they might be keeping a Context alive, or when the heap is about
-  // to be serialized.
-  if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub()
-      && (target->ic_state() == MEGAMORPHIC || target->ic_state() == GENERIC ||
-          target->ic_state() == POLYMORPHIC || heap->flush_monomorphic_ics() ||
-          heap->isolate()->serializer_enabled() ||
-          target->ic_age() != heap->global_ic_age() ||
-          target->is_invalidated_weak_stub())) {
-    IC::Clear(heap->isolate(), rinfo->pc(), rinfo->host()->constant_pool());
-    target = Code::GetCodeFromTargetAddress(rinfo->target_address());
-  }
-  heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
-  StaticVisitor::MarkObject(heap, target);
-}
-
-
-template<typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitCodeAgeSequence(
-    Heap* heap, RelocInfo* rinfo) {
-  ASSERT(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
-  Code* target = rinfo->code_age_stub();
-  ASSERT(target != NULL);
-  heap->mark_compact_collector()->RecordRelocSlot(rinfo, target);
-  StaticVisitor::MarkObject(heap, target);
-}
-
-
-template<typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitNativeContext(
-    Map* map, HeapObject* object) {
-  FixedBodyVisitor<StaticVisitor,
-                   Context::MarkCompactBodyDescriptor,
-                   void>::Visit(map, object);
-
-  MarkCompactCollector* collector = map->GetHeap()->mark_compact_collector();
-  for (int idx = Context::FIRST_WEAK_SLOT;
-       idx < Context::NATIVE_CONTEXT_SLOTS;
-       ++idx) {
-    Object** slot = Context::cast(object)->RawFieldOfElementAt(idx);
-    collector->RecordSlot(slot, slot, *slot);
-  }
-}
-
-
-template<typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitMap(
-    Map* map, HeapObject* object) {
-  Heap* heap = map->GetHeap();
-  Map* map_object = Map::cast(object);
-
-  // Clears the cache of ICs related to this map.
-  if (FLAG_cleanup_code_caches_at_gc) {
-    map_object->ClearCodeCache(heap);
-  }
-
-  // When map collection is enabled we have to mark through map's transitions
-  // and back pointers in a special way to make these links weak.
-  if (FLAG_collect_maps && map_object->CanTransition()) {
-    MarkMapContents(heap, map_object);
-  } else {
-    StaticVisitor::VisitPointers(heap,
-        HeapObject::RawField(object, Map::kPointerFieldsBeginOffset),
-        HeapObject::RawField(object, Map::kPointerFieldsEndOffset));
-  }
-}
-
-
-template<typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitPropertyCell(
-    Map* map, HeapObject* object) {
-  Heap* heap = map->GetHeap();
-
-  Object** slot =
-      HeapObject::RawField(object, PropertyCell::kDependentCodeOffset);
-  if (FLAG_collect_maps) {
-    // Mark property cell dependent codes array but do not push it onto marking
-    // stack, this will make references from it weak. We will clean dead
-    // codes when we iterate over property cells in ClearNonLiveReferences.
-    HeapObject* obj = HeapObject::cast(*slot);
-    heap->mark_compact_collector()->RecordSlot(slot, slot, obj);
-    StaticVisitor::MarkObjectWithoutPush(heap, obj);
-  } else {
-    StaticVisitor::VisitPointer(heap, slot);
-  }
-
-  StaticVisitor::VisitPointers(heap,
-      HeapObject::RawField(object, PropertyCell::kPointerFieldsBeginOffset),
-      HeapObject::RawField(object, PropertyCell::kPointerFieldsEndOffset));
-}
-
-
-template<typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitAllocationSite(
-    Map* map, HeapObject* object) {
-  Heap* heap = map->GetHeap();
-
-  Object** slot =
-      HeapObject::RawField(object, AllocationSite::kDependentCodeOffset);
-  if (FLAG_collect_maps) {
-    // Mark allocation site dependent codes array but do not push it onto
-    // marking stack, this will make references from it weak. We will clean
-    // dead codes when we iterate over allocation sites in
-    // ClearNonLiveReferences.
-    HeapObject* obj = HeapObject::cast(*slot);
-    heap->mark_compact_collector()->RecordSlot(slot, slot, obj);
-    StaticVisitor::MarkObjectWithoutPush(heap, obj);
-  } else {
-    StaticVisitor::VisitPointer(heap, slot);
-  }
-
-  StaticVisitor::VisitPointers(heap,
-      HeapObject::RawField(object, AllocationSite::kPointerFieldsBeginOffset),
-      HeapObject::RawField(object, AllocationSite::kPointerFieldsEndOffset));
-}
-
-
-template<typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitWeakCollection(
-    Map* map, HeapObject* object) {
-  Heap* heap = map->GetHeap();
-  JSWeakCollection* weak_collection =
-      reinterpret_cast<JSWeakCollection*>(object);
-
-  // Enqueue weak collection in linked list of encountered weak collections.
-  if (weak_collection->next() == heap->undefined_value()) {
-    weak_collection->set_next(heap->encountered_weak_collections());
-    heap->set_encountered_weak_collections(weak_collection);
-  }
-
-  // Skip visiting the backing hash table containing the mappings and the
-  // pointer to the other enqueued weak collections, both are post-processed.
-  StaticVisitor::VisitPointers(heap,
-      HeapObject::RawField(object, JSWeakCollection::kPropertiesOffset),
-      HeapObject::RawField(object, JSWeakCollection::kTableOffset));
-  STATIC_ASSERT(JSWeakCollection::kTableOffset + kPointerSize ==
-      JSWeakCollection::kNextOffset);
-  STATIC_ASSERT(JSWeakCollection::kNextOffset + kPointerSize ==
-      JSWeakCollection::kSize);
-
-  // Partially initialized weak collection is enqueued, but table is ignored.
-  if (!weak_collection->table()->IsHashTable()) return;
-
-  // Mark the backing hash table without pushing it on the marking stack.
-  Object** slot = HeapObject::RawField(object, JSWeakCollection::kTableOffset);
-  HeapObject* obj = HeapObject::cast(*slot);
-  heap->mark_compact_collector()->RecordSlot(slot, slot, obj);
-  StaticVisitor::MarkObjectWithoutPush(heap, obj);
-}
-
-
-template<typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitCode(
-    Map* map, HeapObject* object) {
-  Heap* heap = map->GetHeap();
-  Code* code = Code::cast(object);
-  if (FLAG_age_code && !heap->isolate()->serializer_enabled()) {
-    code->MakeOlder(heap->mark_compact_collector()->marking_parity());
-  }
-  code->CodeIterateBody<StaticVisitor>(heap);
-}
-
-
-template<typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfo(
-    Map* map, HeapObject* object) {
-  Heap* heap = map->GetHeap();
-  SharedFunctionInfo* shared = SharedFunctionInfo::cast(object);
-  if (shared->ic_age() != heap->global_ic_age()) {
-    shared->ResetForNewContext(heap->global_ic_age());
-  }
-  if (FLAG_cleanup_code_caches_at_gc) {
-    shared->ClearTypeFeedbackInfo();
-  }
-  if (FLAG_cache_optimized_code &&
-      FLAG_flush_optimized_code_cache &&
-      !shared->optimized_code_map()->IsSmi()) {
-    // Always flush the optimized code map if requested by flag.
-    shared->ClearOptimizedCodeMap();
-  }
-  MarkCompactCollector* collector = heap->mark_compact_collector();
-  if (collector->is_code_flushing_enabled()) {
-    if (FLAG_cache_optimized_code && !shared->optimized_code_map()->IsSmi()) {
-      // Add the shared function info holding an optimized code map to
-      // the code flusher for processing of code maps after marking.
-      collector->code_flusher()->AddOptimizedCodeMap(shared);
-      // Treat all references within the code map weakly by marking the
-      // code map itself but not pushing it onto the marking deque.
-      FixedArray* code_map = FixedArray::cast(shared->optimized_code_map());
-      StaticVisitor::MarkObjectWithoutPush(heap, code_map);
-    }
-    if (IsFlushable(heap, shared)) {
-      // This function's code looks flushable. But we have to postpone
-      // the decision until we see all functions that point to the same
-      // SharedFunctionInfo because some of them might be optimized.
-      // That would also make the non-optimized version of the code
-      // non-flushable, because it is required for bailing out from
-      // optimized code.
-      collector->code_flusher()->AddCandidate(shared);
-      // Treat the reference to the code object weakly.
-      VisitSharedFunctionInfoWeakCode(heap, object);
-      return;
-    }
-  } else {
-    if (FLAG_cache_optimized_code && !shared->optimized_code_map()->IsSmi()) {
-      // Flush optimized code map on major GCs without code flushing,
-      // needed because cached code doesn't contain breakpoints.
-      shared->ClearOptimizedCodeMap();
-    }
-  }
-  VisitSharedFunctionInfoStrongCode(heap, object);
-}
-
-
-template<typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitConstantPoolArray(
-    Map* map, HeapObject* object) {
-  Heap* heap = map->GetHeap();
-  ConstantPoolArray* array = ConstantPoolArray::cast(object);
-  ConstantPoolArray::Iterator code_iter(array, ConstantPoolArray::CODE_PTR);
-  while (!code_iter.is_finished()) {
-    Address code_entry = reinterpret_cast<Address>(
-        array->RawFieldOfElementAt(code_iter.next_index()));
-    StaticVisitor::VisitCodeEntry(heap, code_entry);
-  }
-
-  ConstantPoolArray::Iterator heap_iter(array, ConstantPoolArray::HEAP_PTR);
-  while (!heap_iter.is_finished()) {
-    Object** slot = array->RawFieldOfElementAt(heap_iter.next_index());
-    HeapObject* object = HeapObject::cast(*slot);
-    heap->mark_compact_collector()->RecordSlot(slot, slot, object);
-    bool is_weak_object =
-        (array->get_weak_object_state() ==
-              ConstantPoolArray::WEAK_OBJECTS_IN_OPTIMIZED_CODE &&
-         Code::IsWeakObjectInOptimizedCode(object)) ||
-        (array->get_weak_object_state() ==
-              ConstantPoolArray::WEAK_OBJECTS_IN_IC &&
-         Code::IsWeakObjectInIC(object));
-    if (!is_weak_object) {
-      StaticVisitor::MarkObject(heap, object);
-    }
-  }
-}
-
-
-template<typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitJSFunction(
-    Map* map, HeapObject* object) {
-  Heap* heap = map->GetHeap();
-  JSFunction* function = JSFunction::cast(object);
-  MarkCompactCollector* collector = heap->mark_compact_collector();
-  if (collector->is_code_flushing_enabled()) {
-    if (IsFlushable(heap, function)) {
-      // This function's code looks flushable. But we have to postpone
-      // the decision until we see all functions that point to the same
-      // SharedFunctionInfo because some of them might be optimized.
-      // That would also make the non-optimized version of the code
-      // non-flushable, because it is required for bailing out from
-      // optimized code.
-      collector->code_flusher()->AddCandidate(function);
-      // Visit shared function info immediately to avoid double checking
-      // of its flushability later. This is just an optimization because
-      // the shared function info would eventually be visited.
-      SharedFunctionInfo* shared = function->shared();
-      if (StaticVisitor::MarkObjectWithoutPush(heap, shared)) {
-        StaticVisitor::MarkObject(heap, shared->map());
-        VisitSharedFunctionInfoWeakCode(heap, shared);
-      }
-      // Treat the reference to the code object weakly.
-      VisitJSFunctionWeakCode(heap, object);
-      return;
-    } else {
-      // Visit all unoptimized code objects to prevent flushing them.
-      StaticVisitor::MarkObject(heap, function->shared()->code());
-      if (function->code()->kind() == Code::OPTIMIZED_FUNCTION) {
-        MarkInlinedFunctionsCode(heap, function->code());
-      }
-    }
-  }
-  VisitJSFunctionStrongCode(heap, object);
-}
-
-
-template<typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitJSRegExp(
-    Map* map, HeapObject* object) {
-  int last_property_offset =
-      JSRegExp::kSize + kPointerSize * map->inobject_properties();
-  StaticVisitor::VisitPointers(map->GetHeap(),
-      HeapObject::RawField(object, JSRegExp::kPropertiesOffset),
-      HeapObject::RawField(object, last_property_offset));
-}
-
-
-template<typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitJSArrayBuffer(
-    Map* map, HeapObject* object) {
-  Heap* heap = map->GetHeap();
-
-  STATIC_ASSERT(
-      JSArrayBuffer::kWeakFirstViewOffset ==
-      JSArrayBuffer::kWeakNextOffset + kPointerSize);
-  StaticVisitor::VisitPointers(
-      heap,
-      HeapObject::RawField(object, JSArrayBuffer::BodyDescriptor::kStartOffset),
-      HeapObject::RawField(object, JSArrayBuffer::kWeakNextOffset));
-  StaticVisitor::VisitPointers(
-      heap,
-      HeapObject::RawField(object,
-          JSArrayBuffer::kWeakNextOffset + 2 * kPointerSize),
-      HeapObject::RawField(object, JSArrayBuffer::kSizeWithInternalFields));
-}
-
-
-template<typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitJSTypedArray(
-    Map* map, HeapObject* object) {
-  StaticVisitor::VisitPointers(
-      map->GetHeap(),
-      HeapObject::RawField(object, JSTypedArray::BodyDescriptor::kStartOffset),
-      HeapObject::RawField(object, JSTypedArray::kWeakNextOffset));
-  StaticVisitor::VisitPointers(
-      map->GetHeap(),
-      HeapObject::RawField(object,
-        JSTypedArray::kWeakNextOffset + kPointerSize),
-      HeapObject::RawField(object, JSTypedArray::kSizeWithInternalFields));
-}
-
-
-template<typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitJSDataView(
-    Map* map, HeapObject* object) {
-  StaticVisitor::VisitPointers(
-      map->GetHeap(),
-      HeapObject::RawField(object, JSDataView::BodyDescriptor::kStartOffset),
-      HeapObject::RawField(object, JSDataView::kWeakNextOffset));
-  StaticVisitor::VisitPointers(
-      map->GetHeap(),
-      HeapObject::RawField(object,
-        JSDataView::kWeakNextOffset + kPointerSize),
-      HeapObject::RawField(object, JSDataView::kSizeWithInternalFields));
-}
-
-
-template<typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::MarkMapContents(
-    Heap* heap, Map* map) {
-  // Make sure that the back pointer stored either in the map itself or
-  // inside its transitions array is marked. Skip recording the back
-  // pointer slot since map space is not compacted.
-  StaticVisitor::MarkObject(heap, HeapObject::cast(map->GetBackPointer()));
-
-  // Treat pointers in the transitions array as weak and also mark that
-  // array to prevent visiting it later. Skip recording the transition
-  // array slot, since it will be implicitly recorded when the pointer
-  // fields of this map are visited.
-  if (map->HasTransitionArray()) {
-    TransitionArray* transitions = map->transitions();
-    MarkTransitionArray(heap, transitions);
-  }
-
-  // Since descriptor arrays are potentially shared, ensure that only the
-  // descriptors that belong to this map are marked. The first time a
-  // non-empty descriptor array is marked, its header is also visited. The slot
-  // holding the descriptor array will be implicitly recorded when the pointer
-  // fields of this map are visited.
-  DescriptorArray* descriptors = map->instance_descriptors();
-  if (StaticVisitor::MarkObjectWithoutPush(heap, descriptors) &&
-      descriptors->length() > 0) {
-    StaticVisitor::VisitPointers(heap,
-        descriptors->GetFirstElementAddress(),
-        descriptors->GetDescriptorEndSlot(0));
-  }
-  int start = 0;
-  int end = map->NumberOfOwnDescriptors();
-  if (start < end) {
-    StaticVisitor::VisitPointers(heap,
-        descriptors->GetDescriptorStartSlot(start),
-        descriptors->GetDescriptorEndSlot(end));
-  }
-
-  // Mark prototype dependent codes array but do not push it onto marking
-  // stack, this will make references from it weak. We will clean dead
-  // codes when we iterate over maps in ClearNonLiveTransitions.
-  Object** slot = HeapObject::RawField(map, Map::kDependentCodeOffset);
-  HeapObject* obj = HeapObject::cast(*slot);
-  heap->mark_compact_collector()->RecordSlot(slot, slot, obj);
-  StaticVisitor::MarkObjectWithoutPush(heap, obj);
-
-  // Mark the pointer fields of the Map. Since the transitions array has
-  // been marked already, it is fine that one of these fields contains a
-  // pointer to it.
-  StaticVisitor::VisitPointers(heap,
-      HeapObject::RawField(map, Map::kPointerFieldsBeginOffset),
-      HeapObject::RawField(map, Map::kPointerFieldsEndOffset));
-}
-
-
-template<typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::MarkTransitionArray(
-    Heap* heap, TransitionArray* transitions) {
-  if (!StaticVisitor::MarkObjectWithoutPush(heap, transitions)) return;
-
-  // Simple transitions do not have keys nor prototype transitions.
-  if (transitions->IsSimpleTransition()) return;
-
-  if (transitions->HasPrototypeTransitions()) {
-    // Mark prototype transitions array but do not push it onto marking
-    // stack, this will make references from it weak. We will clean dead
-    // prototype transitions in ClearNonLiveTransitions.
-    Object** slot = transitions->GetPrototypeTransitionsSlot();
-    HeapObject* obj = HeapObject::cast(*slot);
-    heap->mark_compact_collector()->RecordSlot(slot, slot, obj);
-    StaticVisitor::MarkObjectWithoutPush(heap, obj);
-  }
-
-  for (int i = 0; i < transitions->number_of_transitions(); ++i) {
-    StaticVisitor::VisitPointer(heap, transitions->GetKeySlot(i));
-  }
-}
-
-
-template<typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::MarkInlinedFunctionsCode(
-    Heap* heap, Code* code) {
-  // For optimized functions we should retain both non-optimized version
-  // of its code and non-optimized version of all inlined functions.
-  // This is required to support bailing out from inlined code.
-  DeoptimizationInputData* data =
-      DeoptimizationInputData::cast(code->deoptimization_data());
-  FixedArray* literals = data->LiteralArray();
-  for (int i = 0, count = data->InlinedFunctionCount()->value();
-       i < count;
-       i++) {
-    JSFunction* inlined = JSFunction::cast(literals->get(i));
-    StaticVisitor::MarkObject(heap, inlined->shared()->code());
-  }
-}
-
-
-inline static bool IsValidNonBuiltinContext(Object* context) {
-  return context->IsContext() &&
-      !Context::cast(context)->global_object()->IsJSBuiltinsObject();
-}
-
-
-inline static bool HasSourceCode(Heap* heap, SharedFunctionInfo* info) {
-  Object* undefined = heap->undefined_value();
-  return (info->script() != undefined) &&
-      (reinterpret_cast<Script*>(info->script())->source() != undefined);
-}
-
-
-template<typename StaticVisitor>
-bool StaticMarkingVisitor<StaticVisitor>::IsFlushable(
-    Heap* heap, JSFunction* function) {
-  SharedFunctionInfo* shared_info = function->shared();
-
-  // Code is either on stack, in compilation cache or referenced
-  // by optimized version of function.
-  MarkBit code_mark = Marking::MarkBitFrom(function->code());
-  if (code_mark.Get()) {
-    return false;
-  }
-
-  // The function must have a valid context and not be a builtin.
-  if (!IsValidNonBuiltinContext(function->context())) {
-    return false;
-  }
-
-  // We do not (yet) flush code for optimized functions.
-  if (function->code() != shared_info->code()) {
-    return false;
-  }
-
-  // Check age of optimized code.
-  if (FLAG_age_code && !function->code()->IsOld()) {
-    return false;
-  }
-
-  return IsFlushable(heap, shared_info);
-}
-
-
-template<typename StaticVisitor>
-bool StaticMarkingVisitor<StaticVisitor>::IsFlushable(
-    Heap* heap, SharedFunctionInfo* shared_info) {
-  // Code is either on stack, in compilation cache or referenced
-  // by optimized version of function.
-  MarkBit code_mark = Marking::MarkBitFrom(shared_info->code());
-  if (code_mark.Get()) {
-    return false;
-  }
-
-  // The function must be compiled and have the source code available,
-  // to be able to recompile it in case we need the function again.
-  if (!(shared_info->is_compiled() && HasSourceCode(heap, shared_info))) {
-    return false;
-  }
-
-  // We never flush code for API functions.
-  Object* function_data = shared_info->function_data();
-  if (function_data->IsFunctionTemplateInfo()) {
-    return false;
-  }
-
-  // Only flush code for functions.
-  if (shared_info->code()->kind() != Code::FUNCTION) {
-    return false;
-  }
-
-  // Function must be lazy compilable.
-  if (!shared_info->allows_lazy_compilation()) {
-    return false;
-  }
-
-  // We do not (yet?) flush code for generator functions, because we don't know
-  // if there are still live activations (generator objects) on the heap.
-  if (shared_info->is_generator()) {
-    return false;
-  }
-
-  // If this is a full script wrapped in a function we do not flush the code.
-  if (shared_info->is_toplevel()) {
-    return false;
-  }
-
-  // If this is a function initialized with %SetCode then the one-to-one
-  // relation between SharedFunctionInfo and Code is broken.
-  if (shared_info->dont_flush()) {
-    return false;
-  }
-
-  // Check age of code. If code aging is disabled we never flush.
-  if (!FLAG_age_code || !shared_info->code()->IsOld()) {
-    return false;
-  }
-
-  return true;
-}
-
-
-template<typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfoStrongCode(
-    Heap* heap, HeapObject* object) {
-  Object** start_slot =
-      HeapObject::RawField(object,
-                           SharedFunctionInfo::BodyDescriptor::kStartOffset);
-  Object** end_slot =
-      HeapObject::RawField(object,
-                           SharedFunctionInfo::BodyDescriptor::kEndOffset);
-  StaticVisitor::VisitPointers(heap, start_slot, end_slot);
-}
-
-
-template<typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitSharedFunctionInfoWeakCode(
-    Heap* heap, HeapObject* object) {
-  Object** name_slot =
-      HeapObject::RawField(object, SharedFunctionInfo::kNameOffset);
-  StaticVisitor::VisitPointer(heap, name_slot);
-
-  // Skip visiting kCodeOffset as it is treated weakly here.
-  STATIC_ASSERT(SharedFunctionInfo::kNameOffset + kPointerSize ==
-      SharedFunctionInfo::kCodeOffset);
-  STATIC_ASSERT(SharedFunctionInfo::kCodeOffset + kPointerSize ==
-      SharedFunctionInfo::kOptimizedCodeMapOffset);
-
-  Object** start_slot =
-      HeapObject::RawField(object,
-                           SharedFunctionInfo::kOptimizedCodeMapOffset);
-  Object** end_slot =
-      HeapObject::RawField(object,
-                           SharedFunctionInfo::BodyDescriptor::kEndOffset);
-  StaticVisitor::VisitPointers(heap, start_slot, end_slot);
-}
-
-
-template<typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitJSFunctionStrongCode(
-    Heap* heap, HeapObject* object) {
-  Object** start_slot =
-      HeapObject::RawField(object, JSFunction::kPropertiesOffset);
-  Object** end_slot =
-      HeapObject::RawField(object, JSFunction::kCodeEntryOffset);
-  StaticVisitor::VisitPointers(heap, start_slot, end_slot);
-
-  VisitCodeEntry(heap, object->address() + JSFunction::kCodeEntryOffset);
-  STATIC_ASSERT(JSFunction::kCodeEntryOffset + kPointerSize ==
-      JSFunction::kPrototypeOrInitialMapOffset);
-
-  start_slot =
-      HeapObject::RawField(object, JSFunction::kPrototypeOrInitialMapOffset);
-  end_slot =
-      HeapObject::RawField(object, JSFunction::kNonWeakFieldsEndOffset);
-  StaticVisitor::VisitPointers(heap, start_slot, end_slot);
-}
-
-
-template<typename StaticVisitor>
-void StaticMarkingVisitor<StaticVisitor>::VisitJSFunctionWeakCode(
-    Heap* heap, HeapObject* object) {
-  Object** start_slot =
-      HeapObject::RawField(object, JSFunction::kPropertiesOffset);
-  Object** end_slot =
-      HeapObject::RawField(object, JSFunction::kCodeEntryOffset);
-  StaticVisitor::VisitPointers(heap, start_slot, end_slot);
-
-  // Skip visiting kCodeEntryOffset as it is treated weakly here.
-  STATIC_ASSERT(JSFunction::kCodeEntryOffset + kPointerSize ==
-      JSFunction::kPrototypeOrInitialMapOffset);
-
-  start_slot =
-      HeapObject::RawField(object, JSFunction::kPrototypeOrInitialMapOffset);
-  end_slot =
-      HeapObject::RawField(object, JSFunction::kNonWeakFieldsEndOffset);
-  StaticVisitor::VisitPointers(heap, start_slot, end_slot);
-}
-
-
-void Code::CodeIterateBody(ObjectVisitor* v) {
-  int mode_mask = RelocInfo::kCodeTargetMask |
-                  RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
-                  RelocInfo::ModeMask(RelocInfo::CELL) |
-                  RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
-                  RelocInfo::ModeMask(RelocInfo::JS_RETURN) |
-                  RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
-                  RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
-
-  // There are two places where we iterate code bodies: here and the
-  // templated CodeIterateBody (below). They should be kept in sync.
-  IteratePointer(v, kRelocationInfoOffset);
-  IteratePointer(v, kHandlerTableOffset);
-  IteratePointer(v, kDeoptimizationDataOffset);
-  IteratePointer(v, kTypeFeedbackInfoOffset);
-  IterateNextCodeLink(v, kNextCodeLinkOffset);
-  IteratePointer(v, kConstantPoolOffset);
-
-  RelocIterator it(this, mode_mask);
-  Isolate* isolate = this->GetIsolate();
-  for (; !it.done(); it.next()) {
-    it.rinfo()->Visit(isolate, v);
-  }
-}
-
-
-template<typename StaticVisitor>
-void Code::CodeIterateBody(Heap* heap) {
-  int mode_mask = RelocInfo::kCodeTargetMask |
-                  RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT) |
-                  RelocInfo::ModeMask(RelocInfo::CELL) |
-                  RelocInfo::ModeMask(RelocInfo::EXTERNAL_REFERENCE) |
-                  RelocInfo::ModeMask(RelocInfo::JS_RETURN) |
-                  RelocInfo::ModeMask(RelocInfo::DEBUG_BREAK_SLOT) |
-                  RelocInfo::ModeMask(RelocInfo::RUNTIME_ENTRY);
-
-  // There are two places where we iterate code bodies: here and the non-
-  // templated CodeIterateBody (above). They should be kept in sync.
-  StaticVisitor::VisitPointer(
-      heap,
-      reinterpret_cast<Object**>(this->address() + kRelocationInfoOffset));
-  StaticVisitor::VisitPointer(
-      heap,
-      reinterpret_cast<Object**>(this->address() + kHandlerTableOffset));
-  StaticVisitor::VisitPointer(
-      heap,
-      reinterpret_cast<Object**>(this->address() + kDeoptimizationDataOffset));
-  StaticVisitor::VisitPointer(
-      heap,
-      reinterpret_cast<Object**>(this->address() + kTypeFeedbackInfoOffset));
-  StaticVisitor::VisitNextCodeLink(
-      heap,
-      reinterpret_cast<Object**>(this->address() + kNextCodeLinkOffset));
-  StaticVisitor::VisitPointer(
-      heap,
-      reinterpret_cast<Object**>(this->address() + kConstantPoolOffset));
-
-
-  RelocIterator it(this, mode_mask);
-  for (; !it.done(); it.next()) {
-    it.rinfo()->template Visit<StaticVisitor>(heap);
-  }
-}
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_OBJECTS_VISITING_INL_H_
diff --git a/src/objects-visiting.cc b/src/objects-visiting.cc
deleted file mode 100644
index f2f47b0..0000000
--- a/src/objects-visiting.cc
+++ /dev/null
@@ -1,459 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#include "src/ic-inl.h"
-#include "src/objects-visiting.h"
-
-namespace v8 {
-namespace internal {
-
-
-static inline bool IsShortcutCandidate(int type) {
-  return ((type & kShortcutTypeMask) == kShortcutTypeTag);
-}
-
-
-StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
-    int instance_type,
-    int instance_size) {
-  if (instance_type < FIRST_NONSTRING_TYPE) {
-    switch (instance_type & kStringRepresentationMask) {
-      case kSeqStringTag:
-        if ((instance_type & kStringEncodingMask) == kOneByteStringTag) {
-          return kVisitSeqOneByteString;
-        } else {
-          return kVisitSeqTwoByteString;
-        }
-
-      case kConsStringTag:
-        if (IsShortcutCandidate(instance_type)) {
-          return kVisitShortcutCandidate;
-        } else {
-          return kVisitConsString;
-        }
-
-      case kSlicedStringTag:
-        return kVisitSlicedString;
-
-      case kExternalStringTag:
-        return GetVisitorIdForSize(kVisitDataObject,
-                                   kVisitDataObjectGeneric,
-                                   instance_size);
-    }
-    UNREACHABLE();
-  }
-
-  switch (instance_type) {
-    case BYTE_ARRAY_TYPE:
-      return kVisitByteArray;
-
-    case FREE_SPACE_TYPE:
-      return kVisitFreeSpace;
-
-    case FIXED_ARRAY_TYPE:
-      return kVisitFixedArray;
-
-    case FIXED_DOUBLE_ARRAY_TYPE:
-      return kVisitFixedDoubleArray;
-
-    case CONSTANT_POOL_ARRAY_TYPE:
-      return kVisitConstantPoolArray;
-
-    case ODDBALL_TYPE:
-      return kVisitOddball;
-
-    case MAP_TYPE:
-      return kVisitMap;
-
-    case CODE_TYPE:
-      return kVisitCode;
-
-    case CELL_TYPE:
-      return kVisitCell;
-
-    case PROPERTY_CELL_TYPE:
-      return kVisitPropertyCell;
-
-    case JS_SET_TYPE:
-      return GetVisitorIdForSize(kVisitStruct,
-                                 kVisitStructGeneric,
-                                 JSSet::kSize);
-
-    case JS_MAP_TYPE:
-      return GetVisitorIdForSize(kVisitStruct,
-                                 kVisitStructGeneric,
-                                 JSMap::kSize);
-
-    case JS_WEAK_MAP_TYPE:
-    case JS_WEAK_SET_TYPE:
-      return kVisitJSWeakCollection;
-
-    case JS_REGEXP_TYPE:
-      return kVisitJSRegExp;
-
-    case SHARED_FUNCTION_INFO_TYPE:
-      return kVisitSharedFunctionInfo;
-
-    case JS_PROXY_TYPE:
-      return GetVisitorIdForSize(kVisitStruct,
-                                 kVisitStructGeneric,
-                                 JSProxy::kSize);
-
-    case JS_FUNCTION_PROXY_TYPE:
-      return GetVisitorIdForSize(kVisitStruct,
-                                 kVisitStructGeneric,
-                                 JSFunctionProxy::kSize);
-
-    case FOREIGN_TYPE:
-      return GetVisitorIdForSize(kVisitDataObject,
-                                 kVisitDataObjectGeneric,
-                                 Foreign::kSize);
-
-    case SYMBOL_TYPE:
-      return kVisitSymbol;
-
-    case FILLER_TYPE:
-      return kVisitDataObjectGeneric;
-
-    case JS_ARRAY_BUFFER_TYPE:
-      return kVisitJSArrayBuffer;
-
-    case JS_TYPED_ARRAY_TYPE:
-      return kVisitJSTypedArray;
-
-    case JS_DATA_VIEW_TYPE:
-      return kVisitJSDataView;
-
-    case JS_OBJECT_TYPE:
-    case JS_CONTEXT_EXTENSION_OBJECT_TYPE:
-    case JS_GENERATOR_OBJECT_TYPE:
-    case JS_MODULE_TYPE:
-    case JS_VALUE_TYPE:
-    case JS_DATE_TYPE:
-    case JS_ARRAY_TYPE:
-    case JS_GLOBAL_PROXY_TYPE:
-    case JS_GLOBAL_OBJECT_TYPE:
-    case JS_BUILTINS_OBJECT_TYPE:
-    case JS_MESSAGE_OBJECT_TYPE:
-    case JS_SET_ITERATOR_TYPE:
-    case JS_MAP_ITERATOR_TYPE:
-      return GetVisitorIdForSize(kVisitJSObject,
-                                 kVisitJSObjectGeneric,
-                                 instance_size);
-
-    case JS_FUNCTION_TYPE:
-      return kVisitJSFunction;
-
-    case HEAP_NUMBER_TYPE:
-#define EXTERNAL_ARRAY_CASE(Type, type, TYPE, ctype, size)                     \
-    case EXTERNAL_##TYPE##_ARRAY_TYPE:
-
-    TYPED_ARRAYS(EXTERNAL_ARRAY_CASE)
-      return GetVisitorIdForSize(kVisitDataObject,
-                                 kVisitDataObjectGeneric,
-                                 instance_size);
-#undef EXTERNAL_ARRAY_CASE
-
-    case FIXED_UINT8_ARRAY_TYPE:
-    case FIXED_INT8_ARRAY_TYPE:
-    case FIXED_UINT16_ARRAY_TYPE:
-    case FIXED_INT16_ARRAY_TYPE:
-    case FIXED_UINT32_ARRAY_TYPE:
-    case FIXED_INT32_ARRAY_TYPE:
-    case FIXED_FLOAT32_ARRAY_TYPE:
-    case FIXED_UINT8_CLAMPED_ARRAY_TYPE:
-      return kVisitFixedTypedArray;
-
-    case FIXED_FLOAT64_ARRAY_TYPE:
-      return kVisitFixedFloat64Array;
-
-#define MAKE_STRUCT_CASE(NAME, Name, name) \
-        case NAME##_TYPE:
-      STRUCT_LIST(MAKE_STRUCT_CASE)
-#undef MAKE_STRUCT_CASE
-          if (instance_type == ALLOCATION_SITE_TYPE) {
-            return kVisitAllocationSite;
-          }
-
-          return GetVisitorIdForSize(kVisitStruct,
-                                     kVisitStructGeneric,
-                                     instance_size);
-
-    default:
-      UNREACHABLE();
-      return kVisitorIdCount;
-  }
-}
-
-
-// We don't record weak slots during marking or scavenges. Instead we do it
-// once when we complete mark-compact cycle.  Note that write barrier has no
-// effect if we are already in the middle of compacting mark-sweep cycle and we
-// have to record slots manually.
-static bool MustRecordSlots(Heap* heap) {
-  return heap->gc_state() == Heap::MARK_COMPACT &&
-      heap->mark_compact_collector()->is_compacting();
-}
-
-
-template <class T>
-struct WeakListVisitor;
-
-
-template <class T>
-Object* VisitWeakList(Heap* heap,
-                      Object* list,
-                      WeakObjectRetainer* retainer) {
-  Object* undefined = heap->undefined_value();
-  Object* head = undefined;
-  T* tail = NULL;
-  MarkCompactCollector* collector = heap->mark_compact_collector();
-  bool record_slots = MustRecordSlots(heap);
-  while (list != undefined) {
-    // Check whether to keep the candidate in the list.
-    T* candidate = reinterpret_cast<T*>(list);
-    Object* retained = retainer->RetainAs(list);
-    if (retained != NULL) {
-      if (head == undefined) {
-        // First element in the list.
-        head = retained;
-      } else {
-        // Subsequent elements in the list.
-        ASSERT(tail != NULL);
-        WeakListVisitor<T>::SetWeakNext(tail, retained);
-        if (record_slots) {
-          Object** next_slot =
-            HeapObject::RawField(tail, WeakListVisitor<T>::WeakNextOffset());
-          collector->RecordSlot(next_slot, next_slot, retained);
-        }
-      }
-      // Retained object is new tail.
-      ASSERT(!retained->IsUndefined());
-      candidate = reinterpret_cast<T*>(retained);
-      tail = candidate;
-
-
-      // tail is a live object, visit it.
-      WeakListVisitor<T>::VisitLiveObject(heap, tail, retainer);
-    } else {
-      WeakListVisitor<T>::VisitPhantomObject(heap, candidate);
-    }
-
-    // Move to next element in the list.
-    list = WeakListVisitor<T>::WeakNext(candidate);
-  }
-
-  // Terminate the list if there is one or more elements.
-  if (tail != NULL) {
-    WeakListVisitor<T>::SetWeakNext(tail, undefined);
-  }
-  return head;
-}
-
-
-template <class T>
-static void ClearWeakList(Heap* heap,
-                          Object* list) {
-  Object* undefined = heap->undefined_value();
-  while (list != undefined) {
-    T* candidate = reinterpret_cast<T*>(list);
-    list = WeakListVisitor<T>::WeakNext(candidate);
-    WeakListVisitor<T>::SetWeakNext(candidate, undefined);
-  }
-}
-
-
-template<>
-struct WeakListVisitor<JSFunction> {
-  static void SetWeakNext(JSFunction* function, Object* next) {
-    function->set_next_function_link(next);
-  }
-
-  static Object* WeakNext(JSFunction* function) {
-    return function->next_function_link();
-  }
-
-  static int WeakNextOffset() {
-    return JSFunction::kNextFunctionLinkOffset;
-  }
-
-  static void VisitLiveObject(Heap*, JSFunction*, WeakObjectRetainer*) {}
-
-  static void VisitPhantomObject(Heap*, JSFunction*) {}
-};
-
-
-template<>
-struct WeakListVisitor<Code> {
-  static void SetWeakNext(Code* code, Object* next) {
-    code->set_next_code_link(next);
-  }
-
-  static Object* WeakNext(Code* code) {
-    return code->next_code_link();
-  }
-
-  static int WeakNextOffset() {
-    return Code::kNextCodeLinkOffset;
-  }
-
-  static void VisitLiveObject(Heap*, Code*, WeakObjectRetainer*) {}
-
-  static void VisitPhantomObject(Heap*, Code*) {}
-};
-
-
-template<>
-struct WeakListVisitor<Context> {
-  static void SetWeakNext(Context* context, Object* next) {
-    context->set(Context::NEXT_CONTEXT_LINK,
-                 next,
-                 UPDATE_WRITE_BARRIER);
-  }
-
-  static Object* WeakNext(Context* context) {
-    return context->get(Context::NEXT_CONTEXT_LINK);
-  }
-
-  static int WeakNextOffset() {
-    return FixedArray::SizeFor(Context::NEXT_CONTEXT_LINK);
-  }
-
-  static void VisitLiveObject(Heap* heap,
-                              Context* context,
-                              WeakObjectRetainer* retainer) {
-    // Process the three weak lists linked off the context.
-    DoWeakList<JSFunction>(heap, context, retainer,
-        Context::OPTIMIZED_FUNCTIONS_LIST);
-    DoWeakList<Code>(heap, context, retainer, Context::OPTIMIZED_CODE_LIST);
-    DoWeakList<Code>(heap, context, retainer, Context::DEOPTIMIZED_CODE_LIST);
-  }
-
-  template<class T>
-  static void DoWeakList(Heap* heap,
-                         Context* context,
-                         WeakObjectRetainer* retainer,
-                         int index) {
-    // Visit the weak list, removing dead intermediate elements.
-    Object* list_head = VisitWeakList<T>(heap, context->get(index), retainer);
-
-    // Update the list head.
-    context->set(index, list_head, UPDATE_WRITE_BARRIER);
-
-    if (MustRecordSlots(heap)) {
-      // Record the updated slot if necessary.
-      Object** head_slot = HeapObject::RawField(
-          context, FixedArray::SizeFor(index));
-      heap->mark_compact_collector()->RecordSlot(
-          head_slot, head_slot, list_head);
-    }
-  }
-
-  static void VisitPhantomObject(Heap* heap, Context* context) {
-    ClearWeakList<JSFunction>(heap,
-        context->get(Context::OPTIMIZED_FUNCTIONS_LIST));
-    ClearWeakList<Code>(heap, context->get(Context::OPTIMIZED_CODE_LIST));
-    ClearWeakList<Code>(heap, context->get(Context::DEOPTIMIZED_CODE_LIST));
-  }
-};
-
-
-template<>
-struct WeakListVisitor<JSArrayBufferView> {
-  static void SetWeakNext(JSArrayBufferView* obj, Object* next) {
-    obj->set_weak_next(next);
-  }
-
-  static Object* WeakNext(JSArrayBufferView* obj) {
-    return obj->weak_next();
-  }
-
-  static int WeakNextOffset() {
-    return JSArrayBufferView::kWeakNextOffset;
-  }
-
-  static void VisitLiveObject(Heap*, JSArrayBufferView*, WeakObjectRetainer*) {}
-
-  static void VisitPhantomObject(Heap*, JSArrayBufferView*) {}
-};
-
-
-template<>
-struct WeakListVisitor<JSArrayBuffer> {
-  static void SetWeakNext(JSArrayBuffer* obj, Object* next) {
-    obj->set_weak_next(next);
-  }
-
-  static Object* WeakNext(JSArrayBuffer* obj) {
-    return obj->weak_next();
-  }
-
-  static int WeakNextOffset() {
-    return JSArrayBuffer::kWeakNextOffset;
-  }
-
-  static void VisitLiveObject(Heap* heap,
-                              JSArrayBuffer* array_buffer,
-                              WeakObjectRetainer* retainer) {
-    Object* typed_array_obj =
-        VisitWeakList<JSArrayBufferView>(
-            heap,
-            array_buffer->weak_first_view(),
-            retainer);
-    array_buffer->set_weak_first_view(typed_array_obj);
-    if (typed_array_obj != heap->undefined_value() && MustRecordSlots(heap)) {
-      Object** slot = HeapObject::RawField(
-          array_buffer, JSArrayBuffer::kWeakFirstViewOffset);
-      heap->mark_compact_collector()->RecordSlot(slot, slot, typed_array_obj);
-    }
-  }
-
-  static void VisitPhantomObject(Heap* heap, JSArrayBuffer* phantom) {
-    Runtime::FreeArrayBuffer(heap->isolate(), phantom);
-  }
-};
-
-
-template<>
-struct WeakListVisitor<AllocationSite> {
-  static void SetWeakNext(AllocationSite* obj, Object* next) {
-    obj->set_weak_next(next);
-  }
-
-  static Object* WeakNext(AllocationSite* obj) {
-    return obj->weak_next();
-  }
-
-  static int WeakNextOffset() {
-    return AllocationSite::kWeakNextOffset;
-  }
-
-  static void VisitLiveObject(Heap*, AllocationSite*, WeakObjectRetainer*) {}
-
-  static void VisitPhantomObject(Heap*, AllocationSite*) {}
-};
-
-
-template Object* VisitWeakList<Code>(
-    Heap* heap, Object* list, WeakObjectRetainer* retainer);
-
-
-template Object* VisitWeakList<JSFunction>(
-    Heap* heap, Object* list, WeakObjectRetainer* retainer);
-
-
-template Object* VisitWeakList<Context>(
-    Heap* heap, Object* list, WeakObjectRetainer* retainer);
-
-
-template Object* VisitWeakList<JSArrayBuffer>(
-    Heap* heap, Object* list, WeakObjectRetainer* retainer);
-
-
-template Object* VisitWeakList<AllocationSite>(
-    Heap* heap, Object* list, WeakObjectRetainer* retainer);
-
-} }  // namespace v8::internal
diff --git a/src/objects-visiting.h b/src/objects-visiting.h
deleted file mode 100644
index f6fda9d..0000000
--- a/src/objects-visiting.h
+++ /dev/null
@@ -1,476 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_OBJECTS_VISITING_H_
-#define V8_OBJECTS_VISITING_H_
-
-#include "src/allocation.h"
-
-// This file provides base classes and auxiliary methods for defining
-// static object visitors used during GC.
-// Visiting HeapObject body with a normal ObjectVisitor requires performing
-// two switches on object's instance type to determine object size and layout
-// and one or more virtual method calls on visitor itself.
-// Static visitor is different: it provides a dispatch table which contains
-// pointers to specialized visit functions. Each map has the visitor_id
-// field which contains an index of specialized visitor to use.
-
-namespace v8 {
-namespace internal {
-
-
-// Base class for all static visitors.
-class StaticVisitorBase : public AllStatic {
- public:
-#define VISITOR_ID_LIST(V)    \
-  V(SeqOneByteString)         \
-  V(SeqTwoByteString)         \
-  V(ShortcutCandidate)        \
-  V(ByteArray)                \
-  V(FreeSpace)                \
-  V(FixedArray)               \
-  V(FixedDoubleArray)         \
-  V(FixedTypedArray)          \
-  V(FixedFloat64Array)        \
-  V(ConstantPoolArray)        \
-  V(NativeContext)            \
-  V(AllocationSite)           \
-  V(DataObject2)              \
-  V(DataObject3)              \
-  V(DataObject4)              \
-  V(DataObject5)              \
-  V(DataObject6)              \
-  V(DataObject7)              \
-  V(DataObject8)              \
-  V(DataObject9)              \
-  V(DataObjectGeneric)        \
-  V(JSObject2)                \
-  V(JSObject3)                \
-  V(JSObject4)                \
-  V(JSObject5)                \
-  V(JSObject6)                \
-  V(JSObject7)                \
-  V(JSObject8)                \
-  V(JSObject9)                \
-  V(JSObjectGeneric)          \
-  V(Struct2)                  \
-  V(Struct3)                  \
-  V(Struct4)                  \
-  V(Struct5)                  \
-  V(Struct6)                  \
-  V(Struct7)                  \
-  V(Struct8)                  \
-  V(Struct9)                  \
-  V(StructGeneric)            \
-  V(ConsString)               \
-  V(SlicedString)             \
-  V(Symbol)                   \
-  V(Oddball)                  \
-  V(Code)                     \
-  V(Map)                      \
-  V(Cell)                     \
-  V(PropertyCell)             \
-  V(SharedFunctionInfo)       \
-  V(JSFunction)               \
-  V(JSWeakCollection)         \
-  V(JSArrayBuffer)            \
-  V(JSTypedArray)             \
-  V(JSDataView)               \
-  V(JSRegExp)
-
-  // For data objects, JS objects and structs along with generic visitor which
-  // can visit object of any size we provide visitors specialized by
-  // object size in words.
-  // Ids of specialized visitors are declared in a linear order (without
-  // holes) starting from the id of visitor specialized for 2 words objects
-  // (base visitor id) and ending with the id of generic visitor.
-  // Method GetVisitorIdForSize depends on this ordering to calculate visitor
-  // id of specialized visitor from given instance size, base visitor id and
-  // generic visitor's id.
-  enum VisitorId {
-#define VISITOR_ID_ENUM_DECL(id)  kVisit##id,
-    VISITOR_ID_LIST(VISITOR_ID_ENUM_DECL)
-#undef VISITOR_ID_ENUM_DECL
-    kVisitorIdCount,
-    kVisitDataObject = kVisitDataObject2,
-    kVisitJSObject = kVisitJSObject2,
-    kVisitStruct = kVisitStruct2,
-    kMinObjectSizeInWords = 2
-  };
-
-  // Visitor ID should fit in one byte.
-  STATIC_ASSERT(kVisitorIdCount <= 256);
-
-  // Determine which specialized visitor should be used for given instance type
-  // and instance type.
-  static VisitorId GetVisitorId(int instance_type, int instance_size);
-
-  static VisitorId GetVisitorId(Map* map) {
-    return GetVisitorId(map->instance_type(), map->instance_size());
-  }
-
-  // For visitors that allow specialization by size calculate VisitorId based
-  // on size, base visitor id and generic visitor id.
-  static VisitorId GetVisitorIdForSize(VisitorId base,
-                                       VisitorId generic,
-                                       int object_size) {
-    ASSERT((base == kVisitDataObject) ||
-           (base == kVisitStruct) ||
-           (base == kVisitJSObject));
-    ASSERT(IsAligned(object_size, kPointerSize));
-    ASSERT(kMinObjectSizeInWords * kPointerSize <= object_size);
-    ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
-
-    const VisitorId specialization = static_cast<VisitorId>(
-        base + (object_size >> kPointerSizeLog2) - kMinObjectSizeInWords);
-
-    return Min(specialization, generic);
-  }
-};
-
-
-template<typename Callback>
-class VisitorDispatchTable {
- public:
-  void CopyFrom(VisitorDispatchTable* other) {
-    // We are not using memcpy to guarantee that during update
-    // every element of callbacks_ array will remain correct
-    // pointer (memcpy might be implemented as a byte copying loop).
-    for (int i = 0; i < StaticVisitorBase::kVisitorIdCount; i++) {
-      base::NoBarrier_Store(&callbacks_[i], other->callbacks_[i]);
-    }
-  }
-
-  inline Callback GetVisitorById(StaticVisitorBase::VisitorId id) {
-    return reinterpret_cast<Callback>(callbacks_[id]);
-  }
-
-  inline Callback GetVisitor(Map* map) {
-    return reinterpret_cast<Callback>(callbacks_[map->visitor_id()]);
-  }
-
-  void Register(StaticVisitorBase::VisitorId id, Callback callback) {
-    ASSERT(id < StaticVisitorBase::kVisitorIdCount);  // id is unsigned.
-    callbacks_[id] = reinterpret_cast<base::AtomicWord>(callback);
-  }
-
-  template<typename Visitor,
-           StaticVisitorBase::VisitorId base,
-           StaticVisitorBase::VisitorId generic,
-           int object_size_in_words>
-  void RegisterSpecialization() {
-    static const int size = object_size_in_words * kPointerSize;
-    Register(StaticVisitorBase::GetVisitorIdForSize(base, generic, size),
-             &Visitor::template VisitSpecialized<size>);
-  }
-
-
-  template<typename Visitor,
-           StaticVisitorBase::VisitorId base,
-           StaticVisitorBase::VisitorId generic>
-  void RegisterSpecializations() {
-    STATIC_ASSERT(
-        (generic - base + StaticVisitorBase::kMinObjectSizeInWords) == 10);
-    RegisterSpecialization<Visitor, base, generic, 2>();
-    RegisterSpecialization<Visitor, base, generic, 3>();
-    RegisterSpecialization<Visitor, base, generic, 4>();
-    RegisterSpecialization<Visitor, base, generic, 5>();
-    RegisterSpecialization<Visitor, base, generic, 6>();
-    RegisterSpecialization<Visitor, base, generic, 7>();
-    RegisterSpecialization<Visitor, base, generic, 8>();
-    RegisterSpecialization<Visitor, base, generic, 9>();
-    Register(generic, &Visitor::Visit);
-  }
-
- private:
-  base::AtomicWord callbacks_[StaticVisitorBase::kVisitorIdCount];
-};
-
-
-template<typename StaticVisitor>
-class BodyVisitorBase : public AllStatic {
- public:
-  INLINE(static void IteratePointers(Heap* heap,
-                                     HeapObject* object,
-                                     int start_offset,
-                                     int end_offset)) {
-    Object** start_slot = reinterpret_cast<Object**>(object->address() +
-                                                     start_offset);
-    Object** end_slot = reinterpret_cast<Object**>(object->address() +
-                                                   end_offset);
-    StaticVisitor::VisitPointers(heap, start_slot, end_slot);
-  }
-};
-
-
-template<typename StaticVisitor, typename BodyDescriptor, typename ReturnType>
-class FlexibleBodyVisitor : public BodyVisitorBase<StaticVisitor> {
- public:
-  INLINE(static ReturnType Visit(Map* map, HeapObject* object)) {
-    int object_size = BodyDescriptor::SizeOf(map, object);
-    BodyVisitorBase<StaticVisitor>::IteratePointers(
-        map->GetHeap(),
-        object,
-        BodyDescriptor::kStartOffset,
-        object_size);
-    return static_cast<ReturnType>(object_size);
-  }
-
-  template<int object_size>
-  static inline ReturnType VisitSpecialized(Map* map, HeapObject* object) {
-    ASSERT(BodyDescriptor::SizeOf(map, object) == object_size);
-    BodyVisitorBase<StaticVisitor>::IteratePointers(
-        map->GetHeap(),
-        object,
-        BodyDescriptor::kStartOffset,
-        object_size);
-    return static_cast<ReturnType>(object_size);
-  }
-};
-
-
-template<typename StaticVisitor, typename BodyDescriptor, typename ReturnType>
-class FixedBodyVisitor : public BodyVisitorBase<StaticVisitor> {
- public:
-  INLINE(static ReturnType Visit(Map* map, HeapObject* object)) {
-    BodyVisitorBase<StaticVisitor>::IteratePointers(
-        map->GetHeap(),
-        object,
-        BodyDescriptor::kStartOffset,
-        BodyDescriptor::kEndOffset);
-    return static_cast<ReturnType>(BodyDescriptor::kSize);
-  }
-};
-
-
-// Base class for visitors used for a linear new space iteration.
-// IterateBody returns size of visited object.
-// Certain types of objects (i.e. Code objects) are not handled
-// by dispatch table of this visitor because they cannot appear
-// in the new space.
-//
-// This class is intended to be used in the following way:
-//
-//   class SomeVisitor : public StaticNewSpaceVisitor<SomeVisitor> {
-//     ...
-//   }
-//
-// This is an example of Curiously recurring template pattern
-// (see http://en.wikipedia.org/wiki/Curiously_recurring_template_pattern).
-// We use CRTP to guarantee aggressive compile time optimizations (i.e.
-// inlining and specialization of StaticVisitor::VisitPointers methods).
-template<typename StaticVisitor>
-class StaticNewSpaceVisitor : public StaticVisitorBase {
- public:
-  static void Initialize();
-
-  INLINE(static int IterateBody(Map* map, HeapObject* obj)) {
-    return table_.GetVisitor(map)(map, obj);
-  }
-
-  INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) {
-    for (Object** p = start; p < end; p++) StaticVisitor::VisitPointer(heap, p);
-  }
-
- private:
-  INLINE(static int VisitJSFunction(Map* map, HeapObject* object)) {
-    Heap* heap = map->GetHeap();
-    VisitPointers(heap,
-                  HeapObject::RawField(object, JSFunction::kPropertiesOffset),
-                  HeapObject::RawField(object, JSFunction::kCodeEntryOffset));
-
-    // Don't visit code entry. We are using this visitor only during scavenges.
-
-    VisitPointers(
-        heap,
-        HeapObject::RawField(object,
-                             JSFunction::kCodeEntryOffset + kPointerSize),
-        HeapObject::RawField(object,
-                             JSFunction::kNonWeakFieldsEndOffset));
-    return JSFunction::kSize;
-  }
-
-  INLINE(static int VisitByteArray(Map* map, HeapObject* object)) {
-    return reinterpret_cast<ByteArray*>(object)->ByteArraySize();
-  }
-
-  INLINE(static int VisitFixedDoubleArray(Map* map, HeapObject* object)) {
-    int length = reinterpret_cast<FixedDoubleArray*>(object)->length();
-    return FixedDoubleArray::SizeFor(length);
-  }
-
-  INLINE(static int VisitFixedTypedArray(Map* map, HeapObject* object)) {
-    return reinterpret_cast<FixedTypedArrayBase*>(object)->size();
-  }
-
-  INLINE(static int VisitJSObject(Map* map, HeapObject* object)) {
-    return JSObjectVisitor::Visit(map, object);
-  }
-
-  INLINE(static int VisitSeqOneByteString(Map* map, HeapObject* object)) {
-    return SeqOneByteString::cast(object)->
-        SeqOneByteStringSize(map->instance_type());
-  }
-
-  INLINE(static int VisitSeqTwoByteString(Map* map, HeapObject* object)) {
-    return SeqTwoByteString::cast(object)->
-        SeqTwoByteStringSize(map->instance_type());
-  }
-
-  INLINE(static int VisitFreeSpace(Map* map, HeapObject* object)) {
-    return FreeSpace::cast(object)->Size();
-  }
-
-  INLINE(static int VisitJSArrayBuffer(Map* map, HeapObject* object));
-  INLINE(static int VisitJSTypedArray(Map* map, HeapObject* object));
-  INLINE(static int VisitJSDataView(Map* map, HeapObject* object));
-
-  class DataObjectVisitor {
-   public:
-    template<int object_size>
-    static inline int VisitSpecialized(Map* map, HeapObject* object) {
-      return object_size;
-    }
-
-    INLINE(static int Visit(Map* map, HeapObject* object)) {
-      return map->instance_size();
-    }
-  };
-
-  typedef FlexibleBodyVisitor<StaticVisitor,
-                              StructBodyDescriptor,
-                              int> StructVisitor;
-
-  typedef FlexibleBodyVisitor<StaticVisitor,
-                              JSObject::BodyDescriptor,
-                              int> JSObjectVisitor;
-
-  typedef int (*Callback)(Map* map, HeapObject* object);
-
-  static VisitorDispatchTable<Callback> table_;
-};
-
-
-template<typename StaticVisitor>
-VisitorDispatchTable<typename StaticNewSpaceVisitor<StaticVisitor>::Callback>
-    StaticNewSpaceVisitor<StaticVisitor>::table_;
-
-
-// Base class for visitors used to transitively mark the entire heap.
-// IterateBody returns nothing.
-// Certain types of objects might not be handled by this base class and
-// no visitor function is registered by the generic initialization. A
-// specialized visitor function needs to be provided by the inheriting
-// class itself for those cases.
-//
-// This class is intended to be used in the following way:
-//
-//   class SomeVisitor : public StaticMarkingVisitor<SomeVisitor> {
-//     ...
-//   }
-//
-// This is an example of Curiously recurring template pattern.
-template<typename StaticVisitor>
-class StaticMarkingVisitor : public StaticVisitorBase {
- public:
-  static void Initialize();
-
-  INLINE(static void IterateBody(Map* map, HeapObject* obj)) {
-    table_.GetVisitor(map)(map, obj);
-  }
-
-  INLINE(static void VisitPropertyCell(Map* map, HeapObject* object));
-  INLINE(static void VisitCodeEntry(Heap* heap, Address entry_address));
-  INLINE(static void VisitEmbeddedPointer(Heap* heap, RelocInfo* rinfo));
-  INLINE(static void VisitCell(Heap* heap, RelocInfo* rinfo));
-  INLINE(static void VisitDebugTarget(Heap* heap, RelocInfo* rinfo));
-  INLINE(static void VisitCodeTarget(Heap* heap, RelocInfo* rinfo));
-  INLINE(static void VisitCodeAgeSequence(Heap* heap, RelocInfo* rinfo));
-  INLINE(static void VisitExternalReference(RelocInfo* rinfo)) { }
-  INLINE(static void VisitRuntimeEntry(RelocInfo* rinfo)) { }
-  // Skip the weak next code link in a code object.
-  INLINE(static void VisitNextCodeLink(Heap* heap, Object** slot)) { }
-
-  // TODO(mstarzinger): This should be made protected once refactoring is done.
-  // Mark non-optimize code for functions inlined into the given optimized
-  // code. This will prevent it from being flushed.
-  static void MarkInlinedFunctionsCode(Heap* heap, Code* code);
-
- protected:
-  INLINE(static void VisitMap(Map* map, HeapObject* object));
-  INLINE(static void VisitCode(Map* map, HeapObject* object));
-  INLINE(static void VisitSharedFunctionInfo(Map* map, HeapObject* object));
-  INLINE(static void VisitConstantPoolArray(Map* map, HeapObject* object));
-  INLINE(static void VisitAllocationSite(Map* map, HeapObject* object));
-  INLINE(static void VisitWeakCollection(Map* map, HeapObject* object));
-  INLINE(static void VisitJSFunction(Map* map, HeapObject* object));
-  INLINE(static void VisitJSRegExp(Map* map, HeapObject* object));
-  INLINE(static void VisitJSArrayBuffer(Map* map, HeapObject* object));
-  INLINE(static void VisitJSTypedArray(Map* map, HeapObject* object));
-  INLINE(static void VisitJSDataView(Map* map, HeapObject* object));
-  INLINE(static void VisitNativeContext(Map* map, HeapObject* object));
-
-  // Mark pointers in a Map and its TransitionArray together, possibly
-  // treating transitions or back pointers weak.
-  static void MarkMapContents(Heap* heap, Map* map);
-  static void MarkTransitionArray(Heap* heap, TransitionArray* transitions);
-
-  // Code flushing support.
-  INLINE(static bool IsFlushable(Heap* heap, JSFunction* function));
-  INLINE(static bool IsFlushable(Heap* heap, SharedFunctionInfo* shared_info));
-
-  // Helpers used by code flushing support that visit pointer fields and treat
-  // references to code objects either strongly or weakly.
-  static void VisitSharedFunctionInfoStrongCode(Heap* heap, HeapObject* object);
-  static void VisitSharedFunctionInfoWeakCode(Heap* heap, HeapObject* object);
-  static void VisitJSFunctionStrongCode(Heap* heap, HeapObject* object);
-  static void VisitJSFunctionWeakCode(Heap* heap, HeapObject* object);
-
-  class DataObjectVisitor {
-   public:
-    template<int size>
-    static inline void VisitSpecialized(Map* map, HeapObject* object) {
-    }
-
-    INLINE(static void Visit(Map* map, HeapObject* object)) {
-    }
-  };
-
-  typedef FlexibleBodyVisitor<StaticVisitor,
-                              FixedArray::BodyDescriptor,
-                              void> FixedArrayVisitor;
-
-  typedef FlexibleBodyVisitor<StaticVisitor,
-                              JSObject::BodyDescriptor,
-                              void> JSObjectVisitor;
-
-  typedef FlexibleBodyVisitor<StaticVisitor,
-                              StructBodyDescriptor,
-                              void> StructObjectVisitor;
-
-  typedef void (*Callback)(Map* map, HeapObject* object);
-
-  static VisitorDispatchTable<Callback> table_;
-};
-
-
-template<typename StaticVisitor>
-VisitorDispatchTable<typename StaticMarkingVisitor<StaticVisitor>::Callback>
-    StaticMarkingVisitor<StaticVisitor>::table_;
-
-
-class WeakObjectRetainer;
-
-
-// A weak list is single linked list where each element has a weak pointer to
-// the next element. Given the head of the list, this function removes dead
-// elements from the list and if requested records slots for next-element
-// pointers. The template parameter T is a WeakListVisitor that defines how to
-// access the next-element pointers.
-template <class T>
-Object* VisitWeakList(Heap* heap, Object* list, WeakObjectRetainer* retainer);
-
-} }  // namespace v8::internal
-
-#endif  // V8_OBJECTS_VISITING_H_
diff --git a/src/objects.cc b/src/objects.cc
index 1d3b022..39d3d7c 100644
--- a/src/objects.cc
+++ b/src/objects.cc
@@ -8,26 +8,29 @@
 #include "src/allocation-site-scopes.h"
 #include "src/api.h"
 #include "src/arguments.h"
+#include "src/base/bits.h"
 #include "src/bootstrapper.h"
-#include "src/codegen.h"
 #include "src/code-stubs.h"
+#include "src/codegen.h"
 #include "src/cpu-profiler.h"
+#include "src/date.h"
 #include "src/debug.h"
 #include "src/deoptimizer.h"
-#include "src/date.h"
 #include "src/elements.h"
 #include "src/execution.h"
-#include "src/field-index.h"
 #include "src/field-index-inl.h"
+#include "src/field-index.h"
 #include "src/full-codegen.h"
+#include "src/heap/mark-compact.h"
+#include "src/heap/objects-visiting-inl.h"
 #include "src/hydrogen.h"
+#include "src/ic/ic.h"
 #include "src/isolate-inl.h"
 #include "src/log.h"
 #include "src/lookup.h"
-#include "src/objects-inl.h"
-#include "src/objects-visiting-inl.h"
 #include "src/macro-assembler.h"
-#include "src/mark-compact.h"
+#include "src/objects-inl.h"
+#include "src/prototype.h"
 #include "src/safepoint-table.h"
 #include "src/string-search.h"
 #include "src/string-stream.h"
@@ -92,8 +95,8 @@
 }
 
 
-bool Object::IsCallable() {
-  Object* fun = this;
+bool Object::IsCallable() const {
+  const Object* fun = this;
   while (fun->IsJSFunctionProxy()) {
     fun = JSFunctionProxy::cast(fun)->call_trap();
   }
@@ -103,42 +106,18 @@
 }
 
 
-void Object::Lookup(Handle<Name> name, LookupResult* result) {
-  DisallowHeapAllocation no_gc;
-  Object* holder = NULL;
-  if (IsJSReceiver()) {
-    holder = this;
-  } else {
-    Context* native_context = result->isolate()->context()->native_context();
-    if (IsNumber()) {
-      holder = native_context->number_function()->instance_prototype();
-    } else if (IsString()) {
-      holder = native_context->string_function()->instance_prototype();
-    } else if (IsSymbol()) {
-      holder = native_context->symbol_function()->instance_prototype();
-    } else if (IsBoolean()) {
-      holder = native_context->boolean_function()->instance_prototype();
-    } else {
-      result->isolate()->PushStackTraceAndDie(
-          0xDEAD0000, this, JSReceiver::cast(this)->map(), 0xDEAD0001);
-    }
-  }
-  ASSERT(holder != NULL);  // Cannot handle null or undefined.
-  JSReceiver::cast(holder)->Lookup(name, result);
-}
-
-
 MaybeHandle<Object> Object::GetProperty(LookupIterator* it) {
   for (; it->IsFound(); it->Next()) {
     switch (it->state()) {
       case LookupIterator::NOT_FOUND:
+      case LookupIterator::TRANSITION:
         UNREACHABLE();
       case LookupIterator::JSPROXY:
-        return JSProxy::GetPropertyWithHandler(
-            it->GetJSProxy(), it->GetReceiver(), it->name());
+        return JSProxy::GetPropertyWithHandler(it->GetHolder<JSProxy>(),
+                                               it->GetReceiver(), it->name());
       case LookupIterator::INTERCEPTOR: {
         MaybeHandle<Object> maybe_result = JSObject::GetPropertyWithInterceptor(
-            it->GetHolder(), it->GetReceiver(), it->name());
+            it->GetHolder<JSObject>(), it->GetReceiver(), it->name());
         if (!maybe_result.is_null()) return maybe_result;
         if (it->isolate()->has_pending_exception()) return maybe_result;
         break;
@@ -146,24 +125,53 @@
       case LookupIterator::ACCESS_CHECK:
         if (it->HasAccess(v8::ACCESS_GET)) break;
         return JSObject::GetPropertyWithFailedAccessCheck(it);
-      case LookupIterator::PROPERTY:
-        if (it->HasProperty()) {
-          switch (it->property_kind()) {
-            case LookupIterator::ACCESSOR:
-              return GetPropertyWithAccessor(
-                  it->GetReceiver(), it->name(),
-                  it->GetHolder(), it->GetAccessors());
-            case LookupIterator::DATA:
-              return it->GetDataValue();
-          }
-        }
-        break;
+      case LookupIterator::ACCESSOR:
+        return GetPropertyWithAccessor(it->GetReceiver(), it->name(),
+                                       it->GetHolder<JSObject>(),
+                                       it->GetAccessors());
+      case LookupIterator::DATA:
+        return it->GetDataValue();
     }
   }
   return it->factory()->undefined_value();
 }
 
 
+Handle<Object> JSObject::GetDataProperty(Handle<JSObject> object,
+                                         Handle<Name> key) {
+  LookupIterator it(object, key,
+                    LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
+  return GetDataProperty(&it);
+}
+
+
+Handle<Object> JSObject::GetDataProperty(LookupIterator* it) {
+  for (; it->IsFound(); it->Next()) {
+    switch (it->state()) {
+      case LookupIterator::INTERCEPTOR:
+      case LookupIterator::NOT_FOUND:
+      case LookupIterator::TRANSITION:
+        UNREACHABLE();
+      case LookupIterator::ACCESS_CHECK:
+        if (it->HasAccess(v8::ACCESS_GET)) continue;
+      // Fall through.
+      case LookupIterator::JSPROXY:
+        it->NotFound();
+        return it->isolate()->factory()->undefined_value();
+      case LookupIterator::ACCESSOR:
+        // TODO(verwaest): For now this doesn't call into
+        // ExecutableAccessorInfo, since clients don't need it. Update once
+        // relevant.
+        it->NotFound();
+        return it->isolate()->factory()->undefined_value();
+      case LookupIterator::DATA:
+        return it->GetDataValue();
+    }
+  }
+  return it->isolate()->factory()->undefined_value();
+}
+
+
 bool Object::ToInt32(int32_t* value) {
   if (IsSmi()) {
     *value = Smi::cast(this)->value();
@@ -227,7 +235,7 @@
 template<typename To>
 static inline To* CheckedCast(void *from) {
   uintptr_t temp = reinterpret_cast<uintptr_t>(from);
-  ASSERT(temp % sizeof(To) == 0);
+  DCHECK(temp % sizeof(To) == 0);
   return reinterpret_cast<To*>(temp);
 }
 
@@ -329,39 +337,39 @@
     const DeclaredAccessorDescriptorData* data = iterator.Next();
     switch (data->type) {
       case kDescriptorReturnObject: {
-        ASSERT(iterator.Complete());
+        DCHECK(iterator.Complete());
         current = *CheckedCast<char*>(current);
         return handle(*CheckedCast<Object*>(current), isolate);
       }
       case kDescriptorPointerDereference:
-        ASSERT(!iterator.Complete());
+        DCHECK(!iterator.Complete());
         current = *reinterpret_cast<char**>(current);
         break;
       case kDescriptorPointerShift:
-        ASSERT(!iterator.Complete());
+        DCHECK(!iterator.Complete());
         current += data->pointer_shift_descriptor.byte_offset;
         break;
       case kDescriptorObjectDereference: {
-        ASSERT(!iterator.Complete());
+        DCHECK(!iterator.Complete());
         Object* object = CheckedCast<Object>(current);
         int field = data->object_dereference_descriptor.internal_field;
         Object* smi = JSObject::cast(object)->GetInternalField(field);
-        ASSERT(smi->IsSmi());
+        DCHECK(smi->IsSmi());
         current = reinterpret_cast<char*>(smi);
         break;
       }
       case kDescriptorBitmaskCompare:
-        ASSERT(iterator.Complete());
+        DCHECK(iterator.Complete());
         return PerformCompare(data->bitmask_compare_descriptor,
                               current,
                               isolate);
       case kDescriptorPointerCompare:
-        ASSERT(iterator.Complete());
+        DCHECK(iterator.Complete());
         return PerformCompare(data->pointer_compare_descriptor,
                               current,
                               isolate);
       case kDescriptorPrimitiveValue:
-        ASSERT(iterator.Complete());
+        DCHECK(iterator.Complete());
         return GetPrimitiveValue(data->primitive_value_descriptor,
                                  current,
                                  isolate);
@@ -374,7 +382,7 @@
 
 Handle<FixedArray> JSObject::EnsureWritableFastElements(
     Handle<JSObject> object) {
-  ASSERT(object->HasFastSmiOrObjectElements());
+  DCHECK(object->HasFastSmiOrObjectElements());
   Isolate* isolate = object->GetIsolate();
   Handle<FixedArray> elems(FixedArray::cast(object->elements()), isolate);
   if (elems->map() != isolate->heap()->fixed_cow_array_map()) return elems;
@@ -396,7 +404,7 @@
 
   Handle<Object> args[] = { receiver, name };
   return CallTrap(
-      proxy, "get",  isolate->derived_get_trap(), ARRAY_SIZE(args), args);
+      proxy, "get",  isolate->derived_get_trap(), arraysize(args), args);
 }
 
 
@@ -405,21 +413,17 @@
                                                     Handle<JSObject> holder,
                                                     Handle<Object> structure) {
   Isolate* isolate = name->GetIsolate();
-  ASSERT(!structure->IsForeign());
+  DCHECK(!structure->IsForeign());
   // api style callbacks.
   if (structure->IsAccessorInfo()) {
-    Handle<AccessorInfo> accessor_info = Handle<AccessorInfo>::cast(structure);
-    if (!accessor_info->IsCompatibleReceiver(*receiver)) {
+    Handle<AccessorInfo> info = Handle<AccessorInfo>::cast(structure);
+    if (!info->IsCompatibleReceiver(*receiver)) {
       Handle<Object> args[2] = { name, receiver };
-      Handle<Object> error =
-          isolate->factory()->NewTypeError("incompatible_method_receiver",
-                                           HandleVector(args,
-                                                        ARRAY_SIZE(args)));
-      return isolate->Throw<Object>(error);
+      THROW_NEW_ERROR(isolate,
+                      NewTypeError("incompatible_method_receiver",
+                                   HandleVector(args, arraysize(args))),
+                      Object);
     }
-    // TODO(rossberg): Handling symbols in the API requires changing the API,
-    // so we do not support it for now.
-    if (name->IsSymbol()) return isolate->factory()->undefined_value();
     if (structure->IsDeclaredAccessorInfo()) {
       return GetDeclaredAccessorProperty(
           receiver,
@@ -429,15 +433,14 @@
 
     Handle<ExecutableAccessorInfo> data =
         Handle<ExecutableAccessorInfo>::cast(structure);
-    v8::AccessorGetterCallback call_fun =
-        v8::ToCData<v8::AccessorGetterCallback>(data->getter());
+    v8::AccessorNameGetterCallback call_fun =
+        v8::ToCData<v8::AccessorNameGetterCallback>(data->getter());
     if (call_fun == NULL) return isolate->factory()->undefined_value();
 
-    Handle<String> key = Handle<String>::cast(name);
     LOG(isolate, ApiNamedPropertyAccess("load", *holder, *name));
     PropertyCallbackArguments args(isolate, data->data(), *receiver, *holder);
     v8::Handle<v8::Value> result =
-        args.Call(call_fun, v8::Utils::ToLocal(key));
+        args.Call(call_fun, v8::Utils::ToLocal(name));
     RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
     if (result.IsEmpty()) {
       return isolate->factory()->undefined_value();
@@ -461,40 +464,45 @@
 }
 
 
-MaybeHandle<Object> Object::SetPropertyWithCallback(Handle<Object> receiver,
-                                                    Handle<Name> name,
-                                                    Handle<Object> value,
-                                                    Handle<JSObject> holder,
-                                                    Handle<Object> structure,
-                                                    StrictMode strict_mode) {
+bool AccessorInfo::IsCompatibleReceiverType(Isolate* isolate,
+                                            Handle<AccessorInfo> info,
+                                            Handle<HeapType> type) {
+  if (!info->HasExpectedReceiverType()) return true;
+  Handle<Map> map = IC::TypeToMap(*type, isolate);
+  if (!map->IsJSObjectMap()) return false;
+  return FunctionTemplateInfo::cast(info->expected_receiver_type())
+      ->IsTemplateFor(*map);
+}
+
+
+MaybeHandle<Object> Object::SetPropertyWithAccessor(
+    Handle<Object> receiver, Handle<Name> name, Handle<Object> value,
+    Handle<JSObject> holder, Handle<Object> structure, StrictMode strict_mode) {
   Isolate* isolate = name->GetIsolate();
 
   // We should never get here to initialize a const with the hole
   // value since a const declaration would conflict with the setter.
-  ASSERT(!value->IsTheHole());
-  ASSERT(!structure->IsForeign());
+  DCHECK(!structure->IsForeign());
   if (structure->IsExecutableAccessorInfo()) {
+    // Don't call executable accessor setters with non-JSObject receivers.
+    if (!receiver->IsJSObject()) return value;
     // api style callbacks
-    ExecutableAccessorInfo* data = ExecutableAccessorInfo::cast(*structure);
-    if (!data->IsCompatibleReceiver(*receiver)) {
+    ExecutableAccessorInfo* info = ExecutableAccessorInfo::cast(*structure);
+    if (!info->IsCompatibleReceiver(*receiver)) {
       Handle<Object> args[2] = { name, receiver };
-      Handle<Object> error =
-          isolate->factory()->NewTypeError("incompatible_method_receiver",
-                                           HandleVector(args,
-                                                        ARRAY_SIZE(args)));
-      return isolate->Throw<Object>(error);
+      THROW_NEW_ERROR(isolate,
+                      NewTypeError("incompatible_method_receiver",
+                                   HandleVector(args, arraysize(args))),
+                      Object);
     }
-    // TODO(rossberg): Support symbols in the API.
-    if (name->IsSymbol()) return value;
-    Object* call_obj = data->setter();
-    v8::AccessorSetterCallback call_fun =
-        v8::ToCData<v8::AccessorSetterCallback>(call_obj);
+    Object* call_obj = info->setter();
+    v8::AccessorNameSetterCallback call_fun =
+        v8::ToCData<v8::AccessorNameSetterCallback>(call_obj);
     if (call_fun == NULL) return value;
-    Handle<String> key = Handle<String>::cast(name);
     LOG(isolate, ApiNamedPropertyAccess("store", *holder, *name));
-    PropertyCallbackArguments args(isolate, data->data(), *receiver, *holder);
+    PropertyCallbackArguments args(isolate, info->data(), *receiver, *holder);
     args.Call(call_fun,
-              v8::Utils::ToLocal(key),
+              v8::Utils::ToLocal(name),
               v8::Utils::ToLocal(value));
     RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
     return value;
@@ -509,10 +517,9 @@
     } else {
       if (strict_mode == SLOPPY) return value;
       Handle<Object> args[2] = { name, holder };
-      Handle<Object> error =
-          isolate->factory()->NewTypeError("no_setter_in_callback",
-                                           HandleVector(args, 2));
-      return isolate->Throw<Object>(error);
+      THROW_NEW_ERROR(
+          isolate, NewTypeError("no_setter_in_callback", HandleVector(args, 2)),
+          Object);
     }
   }
 
@@ -557,26 +564,19 @@
   }
 
   Handle<Object> argv[] = { value };
-  RETURN_ON_EXCEPTION(
-      isolate,
-      Execution::Call(isolate, setter, receiver, ARRAY_SIZE(argv), argv),
-      Object);
+  RETURN_ON_EXCEPTION(isolate, Execution::Call(isolate, setter, receiver,
+                                               arraysize(argv), argv, true),
+                      Object);
   return value;
 }
 
 
 static bool FindAllCanReadHolder(LookupIterator* it) {
-  it->skip_interceptor();
-  it->skip_access_check();
   for (; it->IsFound(); it->Next()) {
-    if (it->state() == LookupIterator::PROPERTY &&
-        it->HasProperty() &&
-        it->property_kind() == LookupIterator::ACCESSOR) {
+    if (it->state() == LookupIterator::ACCESSOR) {
       Handle<Object> accessors = it->GetAccessors();
       if (accessors->IsAccessorInfo()) {
         if (AccessorInfo::cast(*accessors)->all_can_read()) return true;
-      } else if (accessors->IsAccessorPair()) {
-        if (AccessorPair::cast(*accessors)->all_can_read()) return true;
       }
     }
   }
@@ -586,10 +586,11 @@
 
 MaybeHandle<Object> JSObject::GetPropertyWithFailedAccessCheck(
     LookupIterator* it) {
-  Handle<JSObject> checked = Handle<JSObject>::cast(it->GetHolder());
+  Handle<JSObject> checked = it->GetHolder<JSObject>();
   if (FindAllCanReadHolder(it)) {
-    return GetPropertyWithAccessor(
-        it->GetReceiver(), it->name(), it->GetHolder(), it->GetAccessors());
+    return GetPropertyWithAccessor(it->GetReceiver(), it->name(),
+                                   it->GetHolder<JSObject>(),
+                                   it->GetAccessors());
   }
   it->isolate()->ReportFailedAccessCheck(checked, v8::ACCESS_GET);
   RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(it->isolate(), Object);
@@ -597,109 +598,51 @@
 }
 
 
-PropertyAttributes JSObject::GetPropertyAttributesWithFailedAccessCheck(
+Maybe<PropertyAttributes> JSObject::GetPropertyAttributesWithFailedAccessCheck(
     LookupIterator* it) {
-  Handle<JSObject> checked = Handle<JSObject>::cast(it->GetHolder());
-  if (FindAllCanReadHolder(it)) return it->property_details().attributes();
+  Handle<JSObject> checked = it->GetHolder<JSObject>();
+  if (FindAllCanReadHolder(it))
+    return maybe(it->property_details().attributes());
   it->isolate()->ReportFailedAccessCheck(checked, v8::ACCESS_HAS);
-  // TODO(yangguo): Issue 3269, check for scheduled exception missing?
-  return ABSENT;
+  RETURN_VALUE_IF_SCHEDULED_EXCEPTION(it->isolate(),
+                                      Maybe<PropertyAttributes>());
+  return maybe(ABSENT);
 }
 
 
-static bool FindAllCanWriteHolder(LookupResult* result,
-                                  Handle<Name> name,
-                                  bool check_prototype) {
-  if (result->IsInterceptor()) {
-    result->holder()->LookupOwnRealNamedProperty(name, result);
-  }
-
-  while (result->IsProperty()) {
-    if (result->type() == CALLBACKS) {
-      Object* callback_obj = result->GetCallbackObject();
-      if (callback_obj->IsAccessorInfo()) {
-        if (AccessorInfo::cast(callback_obj)->all_can_write()) return true;
-      } else if (callback_obj->IsAccessorPair()) {
-        if (AccessorPair::cast(callback_obj)->all_can_write()) return true;
+static bool FindAllCanWriteHolder(LookupIterator* it) {
+  for (; it->IsFound(); it->Next()) {
+    if (it->state() == LookupIterator::ACCESSOR) {
+      Handle<Object> accessors = it->GetAccessors();
+      if (accessors->IsAccessorInfo()) {
+        if (AccessorInfo::cast(*accessors)->all_can_write()) return true;
       }
     }
-    if (!check_prototype) break;
-    result->holder()->LookupRealNamedPropertyInPrototypes(name, result);
   }
   return false;
 }
 
 
 MaybeHandle<Object> JSObject::SetPropertyWithFailedAccessCheck(
-    Handle<JSObject> object,
-    LookupResult* result,
-    Handle<Name> name,
-    Handle<Object> value,
-    bool check_prototype,
-    StrictMode strict_mode) {
-  if (check_prototype && !result->IsProperty()) {
-    object->LookupRealNamedPropertyInPrototypes(name, result);
+    LookupIterator* it, Handle<Object> value, StrictMode strict_mode) {
+  Handle<JSObject> checked = it->GetHolder<JSObject>();
+  if (FindAllCanWriteHolder(it)) {
+    return SetPropertyWithAccessor(it->GetReceiver(), it->name(), value,
+                                   it->GetHolder<JSObject>(),
+                                   it->GetAccessors(), strict_mode);
   }
 
-  if (FindAllCanWriteHolder(result, name, check_prototype)) {
-    Handle<JSObject> holder(result->holder());
-    Handle<Object> callbacks(result->GetCallbackObject(), result->isolate());
-    return SetPropertyWithCallback(
-        object, name, value, holder, callbacks, strict_mode);
-  }
-
-  Isolate* isolate = object->GetIsolate();
-  isolate->ReportFailedAccessCheck(object, v8::ACCESS_SET);
-  RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
+  it->isolate()->ReportFailedAccessCheck(checked, v8::ACCESS_SET);
+  RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(it->isolate(), Object);
   return value;
 }
 
 
-Object* JSObject::GetNormalizedProperty(const LookupResult* result) {
-  ASSERT(!HasFastProperties());
-  Object* value = property_dictionary()->ValueAt(result->GetDictionaryEntry());
-  if (IsGlobalObject()) {
-    value = PropertyCell::cast(value)->value();
-  }
-  ASSERT(!value->IsPropertyCell() && !value->IsCell());
-  return value;
-}
-
-
-Handle<Object> JSObject::GetNormalizedProperty(Handle<JSObject> object,
-                                               const LookupResult* result) {
-  ASSERT(!object->HasFastProperties());
-  Isolate* isolate = object->GetIsolate();
-  Handle<Object> value(object->property_dictionary()->ValueAt(
-      result->GetDictionaryEntry()), isolate);
-  if (object->IsGlobalObject()) {
-    value = Handle<Object>(Handle<PropertyCell>::cast(value)->value(), isolate);
-  }
-  ASSERT(!value->IsPropertyCell() && !value->IsCell());
-  return value;
-}
-
-
-void JSObject::SetNormalizedProperty(Handle<JSObject> object,
-                                     const LookupResult* result,
-                                     Handle<Object> value) {
-  ASSERT(!object->HasFastProperties());
-  NameDictionary* property_dictionary = object->property_dictionary();
-  if (object->IsGlobalObject()) {
-    Handle<PropertyCell> cell(PropertyCell::cast(
-        property_dictionary->ValueAt(result->GetDictionaryEntry())));
-    PropertyCell::SetValueInferType(cell, value);
-  } else {
-    property_dictionary->ValueAtPut(result->GetDictionaryEntry(), *value);
-  }
-}
-
-
 void JSObject::SetNormalizedProperty(Handle<JSObject> object,
                                      Handle<Name> name,
                                      Handle<Object> value,
                                      PropertyDetails details) {
-  ASSERT(!object->HasFastProperties());
+  DCHECK(!object->HasFastProperties());
   Handle<NameDictionary> property_dictionary(object->property_dictionary());
 
   if (!name->IsUniqueName()) {
@@ -728,7 +671,7 @@
     property_dictionary->SetNextEnumerationIndex(enumeration_index + 1);
   } else {
     enumeration_index = original_details.dictionary_index();
-    ASSERT(enumeration_index > 0);
+    DCHECK(enumeration_index > 0);
   }
 
   details = PropertyDetails(
@@ -749,7 +692,7 @@
 Handle<Object> JSObject::DeleteNormalizedProperty(Handle<JSObject> object,
                                                   Handle<Name> name,
                                                   DeleteMode mode) {
-  ASSERT(!object->HasFastProperties());
+  DCHECK(!object->HasFastProperties());
   Isolate* isolate = object->GetIsolate();
   Handle<NameDictionary> dictionary(object->property_dictionary());
   int entry = dictionary->FindEntry(name);
@@ -757,15 +700,15 @@
     // If we have a global object set the cell to the hole.
     if (object->IsGlobalObject()) {
       PropertyDetails details = dictionary->DetailsAt(entry);
-      if (details.IsDontDelete()) {
+      if (!details.IsConfigurable()) {
         if (mode != FORCE_DELETION) return isolate->factory()->false_value();
         // When forced to delete global properties, we have to make a
         // map change to invalidate any ICs that think they can load
-        // from the DontDelete cell without checking if it contains
+        // from the non-configurable cell without checking if it contains
         // the hole value.
         Handle<Map> new_map = Map::CopyDropDescriptors(handle(object->map()));
-        ASSERT(new_map->is_dictionary_map());
-        object->set_map(*new_map);
+        DCHECK(new_map->is_dictionary_map());
+        JSObject::MigrateToMap(object, new_map);
       }
       Handle<PropertyCell> cell(PropertyCell::cast(dictionary->ValueAt(entry)));
       Handle<Object> value = isolate->factory()->the_hole_value();
@@ -805,41 +748,30 @@
                                                    Handle<Object> object,
                                                    Handle<Object> receiver,
                                                    uint32_t index) {
-  Handle<Object> holder;
+  if (object->IsUndefined()) {
+    // TODO(verwaest): Why is this check here?
+    UNREACHABLE();
+    return isolate->factory()->undefined_value();
+  }
 
   // Iterate up the prototype chain until an element is found or the null
   // prototype is encountered.
-  for (holder = object;
-       !holder->IsNull();
-       holder = Handle<Object>(holder->GetPrototype(isolate), isolate)) {
-    if (!holder->IsJSObject()) {
-      Context* native_context = isolate->context()->native_context();
-      if (holder->IsNumber()) {
-        holder = Handle<Object>(
-            native_context->number_function()->instance_prototype(), isolate);
-      } else if (holder->IsString()) {
-        holder = Handle<Object>(
-            native_context->string_function()->instance_prototype(), isolate);
-      } else if (holder->IsSymbol()) {
-        holder = Handle<Object>(
-            native_context->symbol_function()->instance_prototype(), isolate);
-      } else if (holder->IsBoolean()) {
-        holder = Handle<Object>(
-            native_context->boolean_function()->instance_prototype(), isolate);
-      } else if (holder->IsJSProxy()) {
-        return JSProxy::GetElementWithHandler(
-            Handle<JSProxy>::cast(holder), receiver, index);
-      } else {
-        // Undefined and null have no indexed properties.
-        ASSERT(holder->IsUndefined() || holder->IsNull());
-        return isolate->factory()->undefined_value();
-      }
+  for (PrototypeIterator iter(isolate, object,
+                              object->IsJSProxy() || object->IsJSObject()
+                                  ? PrototypeIterator::START_AT_RECEIVER
+                                  : PrototypeIterator::START_AT_PROTOTYPE);
+       !iter.IsAtEnd(); iter.Advance()) {
+    if (PrototypeIterator::GetCurrent(iter)->IsJSProxy()) {
+      return JSProxy::GetElementWithHandler(
+          Handle<JSProxy>::cast(PrototypeIterator::GetCurrent(iter)), receiver,
+          index);
     }
 
     // Inline the case for JSObjects. Doing so significantly improves the
     // performance of fetching elements where checking the prototype chain is
     // necessary.
-    Handle<JSObject> js_object = Handle<JSObject>::cast(holder);
+    Handle<JSObject> js_object =
+        Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter));
 
     // Check access rights if needed.
     if (js_object->IsAccessCheckNeeded()) {
@@ -868,11 +800,11 @@
 }
 
 
-Object* Object::GetPrototype(Isolate* isolate) {
+Map* Object::GetRootMap(Isolate* isolate) {
   DisallowHeapAllocation no_alloc;
   if (IsSmi()) {
     Context* context = isolate->context()->native_context();
-    return context->number_function()->instance_prototype();
+    return context->number_function()->initial_map();
   }
 
   HeapObject* heap_object = HeapObject::cast(this);
@@ -880,30 +812,23 @@
   // The object is either a number, a string, a boolean,
   // a real JS object, or a Harmony proxy.
   if (heap_object->IsJSReceiver()) {
-    return heap_object->map()->prototype();
+    return heap_object->map();
   }
   Context* context = isolate->context()->native_context();
 
   if (heap_object->IsHeapNumber()) {
-    return context->number_function()->instance_prototype();
+    return context->number_function()->initial_map();
   }
   if (heap_object->IsString()) {
-    return context->string_function()->instance_prototype();
+    return context->string_function()->initial_map();
   }
   if (heap_object->IsSymbol()) {
-    return context->symbol_function()->instance_prototype();
+    return context->symbol_function()->initial_map();
   }
   if (heap_object->IsBoolean()) {
-    return context->boolean_function()->instance_prototype();
-  } else {
-    return isolate->heap()->null_value();
+    return context->boolean_function()->initial_map();
   }
-}
-
-
-Handle<Object> Object::GetPrototype(Isolate* isolate,
-                                    Handle<Object> object) {
-  return handle(object->GetPrototype(isolate), isolate);
+  return isolate->heap()->null_value()->map();
 }
 
 
@@ -923,7 +848,7 @@
     return Smi::FromInt(hash);
   }
 
-  ASSERT(IsJSReceiver());
+  DCHECK(IsJSReceiver());
   return JSReceiver::cast(this)->GetIdentityHash();
 }
 
@@ -932,7 +857,7 @@
   Handle<Object> hash(object->GetHash(), isolate);
   if (hash->IsSmi()) return Handle<Smi>::cast(hash);
 
-  ASSERT(object->IsJSReceiver());
+  DCHECK(object->IsJSReceiver());
   return JSReceiver::GetOrCreateIdentityHash(Handle<JSReceiver>::cast(object));
 }
 
@@ -978,29 +903,32 @@
 
 
 void Object::ShortPrint(FILE* out) {
-  HeapStringAllocator allocator;
-  StringStream accumulator(&allocator);
-  ShortPrint(&accumulator);
-  accumulator.OutputToFile(out);
+  OFStream os(out);
+  os << Brief(this);
 }
 
 
 void Object::ShortPrint(StringStream* accumulator) {
-  if (IsSmi()) {
-    Smi::cast(this)->SmiPrint(accumulator);
+  OStringStream os;
+  os << Brief(this);
+  accumulator->Add(os.c_str());
+}
+
+
+OStream& operator<<(OStream& os, const Brief& v) {
+  if (v.value->IsSmi()) {
+    Smi::cast(v.value)->SmiPrint(os);
   } else {
-    HeapObject::cast(this)->HeapObjectShortPrint(accumulator);
+    // TODO(svenpanne) Const-correct HeapObjectShortPrint!
+    HeapObject* obj = const_cast<HeapObject*>(HeapObject::cast(v.value));
+    obj->HeapObjectShortPrint(os);
   }
+  return os;
 }
 
 
-void Smi::SmiPrint(FILE* out) {
-  PrintF(out, "%d", value());
-}
-
-
-void Smi::SmiPrint(StringStream* accumulator) {
-  accumulator->Add("%d", value());
+void Smi::SmiPrint(OStream& os) const {  // NOLINT
+  os << value();
 }
 
 
@@ -1030,8 +958,8 @@
 
 Handle<String> String::SlowFlatten(Handle<ConsString> cons,
                                    PretenureFlag pretenure) {
-  ASSERT(AllowHeapAllocation::IsAllowed());
-  ASSERT(cons->second()->length() != 0);
+  DCHECK(AllowHeapAllocation::IsAllowed());
+  DCHECK(cons->second()->length() != 0);
   Isolate* isolate = cons->GetIsolate();
   int length = cons->length();
   PretenureFlag tenure = isolate->heap()->InNewSpace(*cons) ? pretenure
@@ -1052,7 +980,7 @@
   }
   cons->set_first(*result);
   cons->set_second(isolate->heap()->empty_string());
-  ASSERT(result->IsFlat());
+  DCHECK(result->IsFlat());
   return result;
 }
 
@@ -1061,55 +989,46 @@
 bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
   // Externalizing twice leaks the external resource, so it's
   // prohibited by the API.
-  ASSERT(!this->IsExternalString());
-#ifdef ENABLE_SLOW_ASSERTS
+  DCHECK(!this->IsExternalString());
+#ifdef ENABLE_SLOW_DCHECKS
   if (FLAG_enable_slow_asserts) {
     // Assert that the resource and the string are equivalent.
-    ASSERT(static_cast<size_t>(this->length()) == resource->length());
+    DCHECK(static_cast<size_t>(this->length()) == resource->length());
     ScopedVector<uc16> smart_chars(this->length());
     String::WriteToFlat(this, smart_chars.start(), 0, this->length());
-    ASSERT(memcmp(smart_chars.start(),
+    DCHECK(memcmp(smart_chars.start(),
                   resource->data(),
                   resource->length() * sizeof(smart_chars[0])) == 0);
   }
 #endif  // DEBUG
-  Heap* heap = GetHeap();
   int size = this->Size();  // Byte size of the original string.
-  if (size < ExternalString::kShortSize) {
-    return false;
-  }
-  bool is_ascii = this->IsOneByteRepresentation();
+  // Abort if size does not allow in-place conversion.
+  if (size < ExternalString::kShortSize) return false;
+  Heap* heap = GetHeap();
+  bool is_one_byte = this->IsOneByteRepresentation();
   bool is_internalized = this->IsInternalizedString();
 
   // Morph the string to an external string by replacing the map and
-  // reinitializing the fields.  This won't work if
-  // - the space the existing string occupies is too small for a regular
-  //   external string.
-  // - the existing string is in old pointer space and the backing store of
-  //   the external string is not aligned.  The GC cannot deal with a field
-  //   containing a possibly unaligned address to outside of V8's heap.
-  // In either case we resort to a short external string instead, omitting
+  // reinitializing the fields.  This won't work if the space the existing
+  // string occupies is too small for a regular  external string.
+  // Instead, we resort to a short external string instead, omitting
   // the field caching the address of the backing store.  When we encounter
   // short external strings in generated code, we need to bailout to runtime.
   Map* new_map;
-  if (size < ExternalString::kSize ||
-      heap->old_pointer_space()->Contains(this)) {
+  if (size < ExternalString::kSize) {
     new_map = is_internalized
-        ? (is_ascii
-            ? heap->
-                short_external_internalized_string_with_one_byte_data_map()
-            : heap->short_external_internalized_string_map())
-        : (is_ascii
-            ? heap->short_external_string_with_one_byte_data_map()
-            : heap->short_external_string_map());
+        ? (is_one_byte
+           ? heap->short_external_internalized_string_with_one_byte_data_map()
+           : heap->short_external_internalized_string_map())
+        : (is_one_byte ? heap->short_external_string_with_one_byte_data_map()
+                       : heap->short_external_string_map());
   } else {
     new_map = is_internalized
-        ? (is_ascii
-            ? heap->external_internalized_string_with_one_byte_data_map()
-            : heap->external_internalized_string_map())
-        : (is_ascii
-            ? heap->external_string_with_one_byte_data_map()
-            : heap->external_string_map());
+        ? (is_one_byte
+           ? heap->external_internalized_string_with_one_byte_data_map()
+           : heap->external_internalized_string_map())
+        : (is_one_byte ? heap->external_string_with_one_byte_data_map()
+                       : heap->external_string_map());
   }
 
   // Byte size of the external String object.
@@ -1129,50 +1048,47 @@
 }
 
 
-bool String::MakeExternal(v8::String::ExternalAsciiStringResource* resource) {
-#ifdef ENABLE_SLOW_ASSERTS
+bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
+  // Externalizing twice leaks the external resource, so it's
+  // prohibited by the API.
+  DCHECK(!this->IsExternalString());
+#ifdef ENABLE_SLOW_DCHECKS
   if (FLAG_enable_slow_asserts) {
     // Assert that the resource and the string are equivalent.
-    ASSERT(static_cast<size_t>(this->length()) == resource->length());
+    DCHECK(static_cast<size_t>(this->length()) == resource->length());
     if (this->IsTwoByteRepresentation()) {
       ScopedVector<uint16_t> smart_chars(this->length());
       String::WriteToFlat(this, smart_chars.start(), 0, this->length());
-      ASSERT(String::IsOneByte(smart_chars.start(), this->length()));
+      DCHECK(String::IsOneByte(smart_chars.start(), this->length()));
     }
     ScopedVector<char> smart_chars(this->length());
     String::WriteToFlat(this, smart_chars.start(), 0, this->length());
-    ASSERT(memcmp(smart_chars.start(),
+    DCHECK(memcmp(smart_chars.start(),
                   resource->data(),
                   resource->length() * sizeof(smart_chars[0])) == 0);
   }
 #endif  // DEBUG
-  Heap* heap = GetHeap();
   int size = this->Size();  // Byte size of the original string.
-  if (size < ExternalString::kShortSize) {
-    return false;
-  }
+  // Abort if size does not allow in-place conversion.
+  if (size < ExternalString::kShortSize) return false;
+  Heap* heap = GetHeap();
   bool is_internalized = this->IsInternalizedString();
 
   // Morph the string to an external string by replacing the map and
-  // reinitializing the fields.  This won't work if
-  // - the space the existing string occupies is too small for a regular
-  //   external string.
-  // - the existing string is in old pointer space and the backing store of
-  //   the external string is not aligned.  The GC cannot deal with a field
-  //   containing a possibly unaligned address to outside of V8's heap.
-  // In either case we resort to a short external string instead, omitting
+  // reinitializing the fields.  This won't work if the space the existing
+  // string occupies is too small for a regular  external string.
+  // Instead, we resort to a short external string instead, omitting
   // the field caching the address of the backing store.  When we encounter
   // short external strings in generated code, we need to bailout to runtime.
   Map* new_map;
-  if (size < ExternalString::kSize ||
-      heap->old_pointer_space()->Contains(this)) {
+  if (size < ExternalString::kSize) {
     new_map = is_internalized
-        ? heap->short_external_ascii_internalized_string_map()
-        : heap->short_external_ascii_string_map();
+                  ? heap->short_external_one_byte_internalized_string_map()
+                  : heap->short_external_one_byte_string_map();
   } else {
     new_map = is_internalized
-        ? heap->external_ascii_internalized_string_map()
-        : heap->external_ascii_string_map();
+                  ? heap->external_one_byte_internalized_string_map()
+                  : heap->external_one_byte_string_map();
   }
 
   // Byte size of the external String object.
@@ -1183,7 +1099,7 @@
   // the left-over space to avoid races with the sweeper thread.
   this->synchronized_set_map(new_map);
 
-  ExternalAsciiString* self = ExternalAsciiString::cast(this);
+  ExternalOneByteString* self = ExternalOneByteString::cast(this);
   self->set_resource(resource);
   if (is_internalized) self->Hash();  // Force regeneration of the hash value.
 
@@ -1212,16 +1128,16 @@
     len = kMaxShortPrintLength;
     truncated = true;
   }
-  bool ascii = true;
+  bool one_byte = true;
   for (int i = 0; i < len; i++) {
     uint16_t c = stream.GetNext();
 
     if (c < 32 || c >= 127) {
-      ascii = false;
+      one_byte = false;
     }
   }
   stream.Reset(this);
-  if (ascii) {
+  if (one_byte) {
     accumulator->Add("<String[%u]: ", length());
     for (int i = 0; i < len; i++) {
       accumulator->Put(static_cast<char>(stream.GetNext()));
@@ -1256,6 +1172,16 @@
 }
 
 
+void String::PrintUC16(OStream& os, int start, int end) {  // NOLINT
+  if (end < 0) end = length();
+  ConsStringIteratorOp op;
+  StringCharacterStream stream(this, &op, start);
+  for (int i = start; i < end && stream.HasMore(); i++) {
+    os << AsUC16(stream.GetNext());
+  }
+}
+
+
 void JSObject::JSObjectShortPrint(StringStream* accumulator) {
   switch (map()->instance_type()) {
     case JS_ARRAY_TYPE: {
@@ -1359,11 +1285,9 @@
     ElementsKind from_kind, Handle<FixedArrayBase> from_elements,
     ElementsKind to_kind, Handle<FixedArrayBase> to_elements) {
   if (from_kind != to_kind) {
-    PrintF(file, "elements transition [");
-    PrintElementsKind(file, from_kind);
-    PrintF(file, " -> ");
-    PrintElementsKind(file, to_kind);
-    PrintF(file, "] in ");
+    OFStream os(file);
+    os << "elements transition [" << ElementsKindToString(from_kind) << " -> "
+       << ElementsKindToString(to_kind) << "] in ";
     JavaScriptFrame::PrintTop(object->GetIsolate(), file, false, true);
     PrintF(file, " for ");
     object->ShortPrint(file);
@@ -1386,37 +1310,35 @@
                               Representation new_representation,
                               HeapType* old_field_type,
                               HeapType* new_field_type) {
-  PrintF(file, "[generalizing ");
+  OFStream os(file);
+  os << "[generalizing ";
   constructor_name()->PrintOn(file);
-  PrintF(file, "] ");
+  os << "] ";
   Name* name = instance_descriptors()->GetKey(modify_index);
   if (name->IsString()) {
     String::cast(name)->PrintOn(file);
   } else {
-    PrintF(file, "{symbol %p}", static_cast<void*>(name));
+    os << "{symbol " << static_cast<void*>(name) << "}";
   }
-  PrintF(file, ":");
+  os << ":";
   if (constant_to_field) {
-    PrintF(file, "c");
+    os << "c";
   } else {
-    PrintF(file, "%s", old_representation.Mnemonic());
-    PrintF(file, "{");
-    old_field_type->TypePrint(file, HeapType::SEMANTIC_DIM);
-    PrintF(file, "}");
+    os << old_representation.Mnemonic() << "{";
+    old_field_type->PrintTo(os, HeapType::SEMANTIC_DIM);
+    os << "}";
   }
-  PrintF(file, "->%s", new_representation.Mnemonic());
-  PrintF(file, "{");
-  new_field_type->TypePrint(file, HeapType::SEMANTIC_DIM);
-  PrintF(file, "}");
-  PrintF(file, " (");
+  os << "->" << new_representation.Mnemonic() << "{";
+  new_field_type->PrintTo(os, HeapType::SEMANTIC_DIM);
+  os << "} (";
   if (strlen(reason) > 0) {
-    PrintF(file, "%s", reason);
+    os << reason;
   } else {
-    PrintF(file, "+%i maps", descriptors - split);
+    os << "+" << (descriptors - split) << " maps";
   }
-  PrintF(file, ") [");
+  os << ") [";
   JavaScriptFrame::PrintTop(GetIsolate(), file, false, true);
-  PrintF(file, "]\n");
+  os << "]\n";
 }
 
 
@@ -1449,53 +1371,59 @@
 }
 
 
-void HeapObject::HeapObjectShortPrint(StringStream* accumulator) {
+void HeapObject::HeapObjectShortPrint(OStream& os) {  // NOLINT
   Heap* heap = GetHeap();
   if (!heap->Contains(this)) {
-    accumulator->Add("!!!INVALID POINTER!!!");
+    os << "!!!INVALID POINTER!!!";
     return;
   }
   if (!heap->Contains(map())) {
-    accumulator->Add("!!!INVALID MAP!!!");
+    os << "!!!INVALID MAP!!!";
     return;
   }
 
-  accumulator->Add("%p ", this);
+  os << this << " ";
 
   if (IsString()) {
-    String::cast(this)->StringShortPrint(accumulator);
+    HeapStringAllocator allocator;
+    StringStream accumulator(&allocator);
+    String::cast(this)->StringShortPrint(&accumulator);
+    os << accumulator.ToCString().get();
     return;
   }
   if (IsJSObject()) {
-    JSObject::cast(this)->JSObjectShortPrint(accumulator);
+    HeapStringAllocator allocator;
+    StringStream accumulator(&allocator);
+    JSObject::cast(this)->JSObjectShortPrint(&accumulator);
+    os << accumulator.ToCString().get();
     return;
   }
   switch (map()->instance_type()) {
     case MAP_TYPE:
-      accumulator->Add("<Map(elements=%u)>", Map::cast(this)->elements_kind());
+      os << "<Map(elements=" << Map::cast(this)->elements_kind() << ")>";
       break;
     case FIXED_ARRAY_TYPE:
-      accumulator->Add("<FixedArray[%u]>", FixedArray::cast(this)->length());
+      os << "<FixedArray[" << FixedArray::cast(this)->length() << "]>";
       break;
     case FIXED_DOUBLE_ARRAY_TYPE:
-      accumulator->Add("<FixedDoubleArray[%u]>",
-                       FixedDoubleArray::cast(this)->length());
+      os << "<FixedDoubleArray[" << FixedDoubleArray::cast(this)->length()
+         << "]>";
       break;
     case BYTE_ARRAY_TYPE:
-      accumulator->Add("<ByteArray[%u]>", ByteArray::cast(this)->length());
+      os << "<ByteArray[" << ByteArray::cast(this)->length() << "]>";
       break;
     case FREE_SPACE_TYPE:
-      accumulator->Add("<FreeSpace[%u]>", FreeSpace::cast(this)->Size());
+      os << "<FreeSpace[" << FreeSpace::cast(this)->Size() << "]>";
       break;
-#define TYPED_ARRAY_SHORT_PRINT(Type, type, TYPE, ctype, size)                 \
-    case EXTERNAL_##TYPE##_ARRAY_TYPE:                                         \
-      accumulator->Add("<External" #Type "Array[%u]>",                         \
-                       External##Type##Array::cast(this)->length());           \
-      break;                                                                   \
-    case FIXED_##TYPE##_ARRAY_TYPE:                                            \
-      accumulator->Add("<Fixed" #Type "Array[%u]>",                            \
-                       Fixed##Type##Array::cast(this)->length());              \
-      break;
+#define TYPED_ARRAY_SHORT_PRINT(Type, type, TYPE, ctype, size)                \
+  case EXTERNAL_##TYPE##_ARRAY_TYPE:                                          \
+    os << "<External" #Type "Array["                                          \
+       << External##Type##Array::cast(this)->length() << "]>";                \
+    break;                                                                    \
+  case FIXED_##TYPE##_ARRAY_TYPE:                                             \
+    os << "<Fixed" #Type "Array[" << Fixed##Type##Array::cast(this)->length() \
+       << "]>";                                                               \
+    break;
 
     TYPED_ARRAYS(TYPED_ARRAY_SHORT_PRINT)
 #undef TYPED_ARRAY_SHORT_PRINT
@@ -1505,75 +1433,94 @@
       SmartArrayPointer<char> debug_name =
           shared->DebugName()->ToCString();
       if (debug_name[0] != 0) {
-        accumulator->Add("<SharedFunctionInfo %s>", debug_name.get());
+        os << "<SharedFunctionInfo " << debug_name.get() << ">";
       } else {
-        accumulator->Add("<SharedFunctionInfo>");
+        os << "<SharedFunctionInfo>";
       }
       break;
     }
     case JS_MESSAGE_OBJECT_TYPE:
-      accumulator->Add("<JSMessageObject>");
+      os << "<JSMessageObject>";
       break;
 #define MAKE_STRUCT_CASE(NAME, Name, name) \
   case NAME##_TYPE:                        \
-    accumulator->Put('<');                 \
-    accumulator->Add(#Name);               \
-    accumulator->Put('>');                 \
+    os << "<" #Name ">";                   \
     break;
   STRUCT_LIST(MAKE_STRUCT_CASE)
 #undef MAKE_STRUCT_CASE
-    case CODE_TYPE:
-      accumulator->Add("<Code>");
+    case CODE_TYPE: {
+      Code* code = Code::cast(this);
+      os << "<Code: " << Code::Kind2String(code->kind()) << ">";
       break;
+    }
     case ODDBALL_TYPE: {
-      if (IsUndefined())
-        accumulator->Add("<undefined>");
-      else if (IsTheHole())
-        accumulator->Add("<the hole>");
-      else if (IsNull())
-        accumulator->Add("<null>");
-      else if (IsTrue())
-        accumulator->Add("<true>");
-      else if (IsFalse())
-        accumulator->Add("<false>");
-      else
-        accumulator->Add("<Odd Oddball>");
+      if (IsUndefined()) {
+        os << "<undefined>";
+      } else if (IsTheHole()) {
+        os << "<the hole>";
+      } else if (IsNull()) {
+        os << "<null>";
+      } else if (IsTrue()) {
+        os << "<true>";
+      } else if (IsFalse()) {
+        os << "<false>";
+      } else {
+        os << "<Odd Oddball>";
+      }
       break;
     }
     case SYMBOL_TYPE: {
       Symbol* symbol = Symbol::cast(this);
-      accumulator->Add("<Symbol: %d", symbol->Hash());
+      os << "<Symbol: " << symbol->Hash();
       if (!symbol->name()->IsUndefined()) {
-        accumulator->Add(" ");
-        String::cast(symbol->name())->StringShortPrint(accumulator);
+        os << " ";
+        HeapStringAllocator allocator;
+        StringStream accumulator(&allocator);
+        String::cast(symbol->name())->StringShortPrint(&accumulator);
+        os << accumulator.ToCString().get();
       }
-      accumulator->Add(">");
+      os << ">";
       break;
     }
-    case HEAP_NUMBER_TYPE:
-      accumulator->Add("<Number: ");
-      HeapNumber::cast(this)->HeapNumberPrint(accumulator);
-      accumulator->Put('>');
+    case HEAP_NUMBER_TYPE: {
+      os << "<Number: ";
+      HeapNumber::cast(this)->HeapNumberPrint(os);
+      os << ">";
       break;
+    }
+    case MUTABLE_HEAP_NUMBER_TYPE: {
+      os << "<MutableNumber: ";
+      HeapNumber::cast(this)->HeapNumberPrint(os);
+      os << '>';
+      break;
+    }
     case JS_PROXY_TYPE:
-      accumulator->Add("<JSProxy>");
+      os << "<JSProxy>";
       break;
     case JS_FUNCTION_PROXY_TYPE:
-      accumulator->Add("<JSFunctionProxy>");
+      os << "<JSFunctionProxy>";
       break;
     case FOREIGN_TYPE:
-      accumulator->Add("<Foreign>");
+      os << "<Foreign>";
       break;
-    case CELL_TYPE:
-      accumulator->Add("Cell for ");
-      Cell::cast(this)->value()->ShortPrint(accumulator);
+    case CELL_TYPE: {
+      os << "Cell for ";
+      HeapStringAllocator allocator;
+      StringStream accumulator(&allocator);
+      Cell::cast(this)->value()->ShortPrint(&accumulator);
+      os << accumulator.ToCString().get();
       break;
-    case PROPERTY_CELL_TYPE:
-      accumulator->Add("PropertyCell for ");
-      PropertyCell::cast(this)->value()->ShortPrint(accumulator);
+    }
+    case PROPERTY_CELL_TYPE: {
+      os << "PropertyCell for ";
+      HeapStringAllocator allocator;
+      StringStream accumulator(&allocator);
+      PropertyCell::cast(this)->value()->ShortPrint(&accumulator);
+      os << accumulator.ToCString().get();
       break;
+    }
     default:
-      accumulator->Add("<Other heap object (%d)>", map()->instance_type());
+      os << "<Other heap object (" << map()->instance_type() << ")>";
       break;
   }
 }
@@ -1604,8 +1551,8 @@
         break;
       case kExternalStringTag:
         if ((type & kStringEncodingMask) == kOneByteStringTag) {
-          reinterpret_cast<ExternalAsciiString*>(this)->
-              ExternalAsciiStringIterateBody(v);
+          reinterpret_cast<ExternalOneByteString*>(this)
+              ->ExternalOneByteStringIterateBody(v);
         } else {
           reinterpret_cast<ExternalTwoByteString*>(this)->
               ExternalTwoByteStringIterateBody(v);
@@ -1680,6 +1627,7 @@
       break;
 
     case HEAP_NUMBER_TYPE:
+    case MUTABLE_HEAP_NUMBER_TYPE:
     case FILLER_TYPE:
     case BYTE_ARRAY_TYPE:
     case FREE_SPACE_TYPE:
@@ -1716,46 +1664,18 @@
 
 
 bool HeapNumber::HeapNumberBooleanValue() {
-  // NaN, +0, and -0 should return the false object
-#if __BYTE_ORDER == __LITTLE_ENDIAN
-  union IeeeDoubleLittleEndianArchType u;
-#elif __BYTE_ORDER == __BIG_ENDIAN
-  union IeeeDoubleBigEndianArchType u;
-#endif
-  u.d = value();
-  if (u.bits.exp == 2047) {
-    // Detect NaN for IEEE double precision floating point.
-    if ((u.bits.man_low | u.bits.man_high) != 0) return false;
-  }
-  if (u.bits.exp == 0) {
-    // Detect +0, and -0 for IEEE double precision floating point.
-    if ((u.bits.man_low | u.bits.man_high) == 0) return false;
-  }
-  return true;
+  return DoubleToBoolean(value());
 }
 
 
-void HeapNumber::HeapNumberPrint(FILE* out) {
-  PrintF(out, "%.16g", Number());
-}
-
-
-void HeapNumber::HeapNumberPrint(StringStream* accumulator) {
-  // The Windows version of vsnprintf can allocate when printing a %g string
-  // into a buffer that may not be big enough.  We don't want random memory
-  // allocation when producing post-crash stack traces, so we print into a
-  // buffer that is plenty big enough for any floating point number, then
-  // print that using vsnprintf (which may truncate but never allocate if
-  // there is no more space in the buffer).
-  EmbeddedVector<char, 100> buffer;
-  SNPrintF(buffer, "%.16g", Number());
-  accumulator->Add("%s", buffer.start());
+void HeapNumber::HeapNumberPrint(OStream& os) {  // NOLINT
+  os << value();
 }
 
 
 String* JSReceiver::class_name() {
-  if (IsJSFunction() && IsJSFunctionProxy()) {
-    return GetHeap()->function_class_string();
+  if (IsJSFunction() || IsJSFunctionProxy()) {
+    return GetHeap()->Function_string();
   }
   if (map()->constructor()->IsJSFunction()) {
     JSFunction* constructor = JSFunction::cast(map()->constructor());
@@ -1793,7 +1713,7 @@
                                     PropertyAttributes attributes,
                                     Representation representation,
                                     TransitionFlag flag) {
-  ASSERT(DescriptorArray::kNotFound ==
+  DCHECK(DescriptorArray::kNotFound ==
          map->instance_descriptors()->Search(
              *name, map->NumberOfOwnDescriptors()));
 
@@ -1839,43 +1759,11 @@
 }
 
 
-void JSObject::AddFastProperty(Handle<JSObject> object,
-                               Handle<Name> name,
-                               Handle<Object> value,
-                               PropertyAttributes attributes,
-                               StoreFromKeyed store_mode,
-                               ValueType value_type,
-                               TransitionFlag flag) {
-  ASSERT(!object->IsJSGlobalProxy());
-
-  MaybeHandle<Map> maybe_map;
-  if (value->IsJSFunction()) {
-    maybe_map = Map::CopyWithConstant(
-        handle(object->map()), name, value, attributes, flag);
-  } else if (!object->TooManyFastProperties(store_mode)) {
-    Isolate* isolate = object->GetIsolate();
-    Representation representation = value->OptimalRepresentation(value_type);
-    maybe_map = Map::CopyWithField(
-        handle(object->map(), isolate), name,
-        value->OptimalType(isolate, representation),
-        attributes, representation, flag);
-  }
-
-  Handle<Map> new_map;
-  if (!maybe_map.ToHandle(&new_map)) {
-    NormalizeProperties(object, CLEAR_INOBJECT_PROPERTIES, 0);
-    return;
-  }
-
-  JSObject::MigrateToNewProperty(object, new_map, value);
-}
-
-
 void JSObject::AddSlowProperty(Handle<JSObject> object,
                                Handle<Name> name,
                                Handle<Object> value,
                                PropertyAttributes attributes) {
-  ASSERT(!object->HasFastProperties());
+  DCHECK(!object->HasFastProperties());
   Isolate* isolate = object->GetIsolate();
   Handle<NameDictionary> dict(object->property_dictionary());
   if (object->IsGlobalObject()) {
@@ -1903,56 +1791,6 @@
 }
 
 
-MaybeHandle<Object> JSObject::AddProperty(
-    Handle<JSObject> object,
-    Handle<Name> name,
-    Handle<Object> value,
-    PropertyAttributes attributes,
-    StrictMode strict_mode,
-    JSReceiver::StoreFromKeyed store_mode,
-    ExtensibilityCheck extensibility_check,
-    ValueType value_type,
-    StoreMode mode,
-    TransitionFlag transition_flag) {
-  ASSERT(!object->IsJSGlobalProxy());
-  Isolate* isolate = object->GetIsolate();
-
-  if (!name->IsUniqueName()) {
-    name = isolate->factory()->InternalizeString(
-        Handle<String>::cast(name));
-  }
-
-  if (extensibility_check == PERFORM_EXTENSIBILITY_CHECK &&
-      !object->map()->is_extensible()) {
-    if (strict_mode == SLOPPY) {
-      return value;
-    } else {
-      Handle<Object> args[1] = { name };
-      Handle<Object> error = isolate->factory()->NewTypeError(
-          "object_not_extensible", HandleVector(args, ARRAY_SIZE(args)));
-      return isolate->Throw<Object>(error);
-    }
-  }
-
-  if (object->HasFastProperties()) {
-    AddFastProperty(object, name, value, attributes, store_mode,
-                    value_type, transition_flag);
-  }
-
-  if (!object->HasFastProperties()) {
-    AddSlowProperty(object, name, value, attributes);
-  }
-
-  if (object->map()->is_observed() &&
-      *name != isolate->heap()->hidden_string()) {
-    Handle<Object> old_value = isolate->factory()->the_hole_value();
-    EnqueueChangeRecord(object, "add", name, old_value);
-  }
-
-  return value;
-}
-
-
 Context* JSObject::GetCreationContext() {
   Object* constructor = this->map()->constructor();
   JSFunction* function;
@@ -1972,8 +1810,8 @@
                                    const char* type_str,
                                    Handle<Name> name,
                                    Handle<Object> old_value) {
-  ASSERT(!object->IsJSGlobalProxy());
-  ASSERT(!object->IsJSGlobalObject());
+  DCHECK(!object->IsJSGlobalProxy());
+  DCHECK(!object->IsJSGlobalObject());
   Isolate* isolate = object->GetIsolate();
   HandleScope scope(isolate);
   Handle<String> type = isolate->factory()->InternalizeUtf8String(type_str);
@@ -1987,41 +1825,6 @@
 }
 
 
-MaybeHandle<Object> JSObject::SetPropertyPostInterceptor(
-    Handle<JSObject> object,
-    Handle<Name> name,
-    Handle<Object> value,
-    PropertyAttributes attributes,
-    StrictMode strict_mode) {
-  // Check own property, ignore interceptor.
-  Isolate* isolate = object->GetIsolate();
-  LookupResult result(isolate);
-  object->LookupOwnRealNamedProperty(name, &result);
-  if (!result.IsFound()) {
-    object->map()->LookupTransition(*object, *name, &result);
-  }
-  return SetPropertyForResult(object, &result, name, value, attributes,
-                              strict_mode, MAY_BE_STORE_FROM_KEYED);
-}
-
-
-static void ReplaceSlowProperty(Handle<JSObject> object,
-                                Handle<Name> name,
-                                Handle<Object> value,
-                                PropertyAttributes attributes) {
-  NameDictionary* dictionary = object->property_dictionary();
-  int old_index = dictionary->FindEntry(name);
-  int new_enumeration_index = 0;  // 0 means "Use the next available index."
-  if (old_index != -1) {
-    // All calls to ReplaceSlowProperty have had all transitions removed.
-    new_enumeration_index = dictionary->DetailsAt(old_index).dictionary_index();
-  }
-
-  PropertyDetails new_details(attributes, NORMAL, new_enumeration_index);
-  JSObject::SetNormalizedProperty(object, name, value, new_details);
-}
-
-
 const char* Representation::Mnemonic() const {
   switch (kind_) {
     case kNone: return "v";
@@ -2038,70 +1841,21 @@
 }
 
 
-static void ZapEndOfFixedArray(Address new_end, int to_trim) {
-  // If we are doing a big trim in old space then we zap the space.
-  Object** zap = reinterpret_cast<Object**>(new_end);
-  zap++;  // Header of filler must be at least one word so skip that.
-  for (int i = 1; i < to_trim; i++) {
-    *zap++ = Smi::FromInt(0);
-  }
-}
-
-
-template<Heap::InvocationMode mode>
-static void RightTrimFixedArray(Heap* heap, FixedArray* elms, int to_trim) {
-  ASSERT(elms->map() != heap->fixed_cow_array_map());
-  // For now this trick is only applied to fixed arrays in new and paged space.
-  ASSERT(!heap->lo_space()->Contains(elms));
-
-  const int len = elms->length();
-
-  ASSERT(to_trim < len);
-
-  Address new_end = elms->address() + FixedArray::SizeFor(len - to_trim);
-
-  if (mode != Heap::FROM_GC || Heap::ShouldZapGarbage()) {
-    ZapEndOfFixedArray(new_end, to_trim);
-  }
-
-  int size_delta = to_trim * kPointerSize;
-
-  // Technically in new space this write might be omitted (except for
-  // debug mode which iterates through the heap), but to play safer
-  // we still do it.
-  heap->CreateFillerObjectAt(new_end, size_delta);
-
-  // We are storing the new length using release store after creating a filler
-  // for the left-over space to avoid races with the sweeper thread.
-  elms->synchronized_set_length(len - to_trim);
-
-  heap->AdjustLiveBytes(elms->address(), -size_delta, mode);
-
-  // The array may not be moved during GC,
-  // and size has to be adjusted nevertheless.
-  HeapProfiler* profiler = heap->isolate()->heap_profiler();
-  if (profiler->is_tracking_allocations()) {
-    profiler->UpdateObjectSizeEvent(elms->address(), elms->Size());
-  }
-}
-
-
-bool Map::InstancesNeedRewriting(Map* target,
-                                 int target_number_of_fields,
-                                 int target_inobject,
-                                 int target_unused) {
+bool Map::InstancesNeedRewriting(Map* target, int target_number_of_fields,
+                                 int target_inobject, int target_unused,
+                                 int* old_number_of_fields) {
   // If fields were added (or removed), rewrite the instance.
-  int number_of_fields = NumberOfFields();
-  ASSERT(target_number_of_fields >= number_of_fields);
-  if (target_number_of_fields != number_of_fields) return true;
+  *old_number_of_fields = NumberOfFields();
+  DCHECK(target_number_of_fields >= *old_number_of_fields);
+  if (target_number_of_fields != *old_number_of_fields) return true;
 
   // If smi descriptors were replaced by double descriptors, rewrite.
   DescriptorArray* old_desc = instance_descriptors();
   DescriptorArray* new_desc = target->instance_descriptors();
   int limit = NumberOfOwnDescriptors();
   for (int i = 0; i < limit; i++) {
-    if (new_desc->GetDetails(i).representation().IsDouble() &&
-        !old_desc->GetDetails(i).representation().IsDouble()) {
+    if (new_desc->GetDetails(i).representation().IsDouble() !=
+        old_desc->GetDetails(i).representation().IsDouble()) {
       return true;
     }
   }
@@ -2112,9 +1866,9 @@
   // In-object slack tracking may have reduced the object size of the new map.
   // In that case, succeed if all existing fields were inobject, and they still
   // fit within the new inobject size.
-  ASSERT(target_inobject < inobject_properties());
+  DCHECK(target_inobject < inobject_properties());
   if (target_number_of_fields <= target_inobject) {
-    ASSERT(target_number_of_fields + target_unused == target_inobject);
+    DCHECK(target_number_of_fields + target_unused == target_inobject);
     return false;
   }
   // Otherwise, properties will need to be moved to the backing store.
@@ -2122,19 +1876,43 @@
 }
 
 
-Handle<TransitionArray> Map::SetElementsTransitionMap(
-    Handle<Map> map, Handle<Map> transitioned_map) {
-  Handle<TransitionArray> transitions = TransitionArray::CopyInsert(
-      map,
-      map->GetIsolate()->factory()->elements_transition_symbol(),
-      transitioned_map,
-      FULL_TRANSITION);
-  map->set_transitions(*transitions);
-  return transitions;
+void Map::ConnectElementsTransition(Handle<Map> parent, Handle<Map> child) {
+  Isolate* isolate = parent->GetIsolate();
+  Handle<Name> name = isolate->factory()->elements_transition_symbol();
+  ConnectTransition(parent, child, name, FULL_TRANSITION);
 }
 
 
-// To migrate an instance to a map:
+void JSObject::MigrateToMap(Handle<JSObject> object, Handle<Map> new_map) {
+  if (object->map() == *new_map) return;
+  if (object->HasFastProperties()) {
+    if (!new_map->is_dictionary_map()) {
+      Handle<Map> old_map(object->map());
+      MigrateFastToFast(object, new_map);
+      if (old_map->is_prototype_map()) {
+        // Clear out the old descriptor array to avoid problems to sharing
+        // the descriptor array without using an explicit.
+        old_map->InitializeDescriptors(
+            old_map->GetHeap()->empty_descriptor_array());
+        // Ensure that no transition was inserted for prototype migrations.
+        DCHECK(!old_map->HasTransitionArray());
+        DCHECK(new_map->GetBackPointer()->IsUndefined());
+      }
+    } else {
+      MigrateFastToSlow(object, new_map, 0);
+    }
+  } else {
+    // For slow-to-fast migrations JSObject::TransformToFastProperties()
+    // must be used instead.
+    CHECK(new_map->is_dictionary_map());
+
+    // Slow-to-slow migration is trivial.
+    object->set_map(*new_map);
+  }
+}
+
+
+// To migrate a fast instance to a fast map:
 // - First check whether the instance needs to be rewritten. If not, simply
 //   change the map.
 // - Otherwise, allocate a fixed array large enough to hold all fields, in
@@ -2149,25 +1927,67 @@
 //     to temporarily store the inobject properties.
 //   * If there are properties left in the backing store, install the backing
 //     store.
-void JSObject::MigrateToMap(Handle<JSObject> object, Handle<Map> new_map) {
+void JSObject::MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
   Isolate* isolate = object->GetIsolate();
   Handle<Map> old_map(object->map());
+  int old_number_of_fields;
   int number_of_fields = new_map->NumberOfFields();
   int inobject = new_map->inobject_properties();
   int unused = new_map->unused_property_fields();
 
   // Nothing to do if no functions were converted to fields and no smis were
   // converted to doubles.
-  if (!old_map->InstancesNeedRewriting(
-          *new_map, number_of_fields, inobject, unused)) {
-    // Writing the new map here does not require synchronization since it does
-    // not change the actual object size.
+  if (!old_map->InstancesNeedRewriting(*new_map, number_of_fields, inobject,
+                                       unused, &old_number_of_fields)) {
     object->synchronized_set_map(*new_map);
     return;
   }
 
   int total_size = number_of_fields + unused;
   int external = total_size - inobject;
+
+  if (number_of_fields != old_number_of_fields &&
+      new_map->GetBackPointer() == *old_map) {
+    PropertyDetails details = new_map->GetLastDescriptorDetails();
+
+    if (old_map->unused_property_fields() > 0) {
+      if (details.representation().IsDouble()) {
+        Handle<Object> value = isolate->factory()->NewHeapNumber(0, MUTABLE);
+        FieldIndex index =
+            FieldIndex::ForDescriptor(*new_map, new_map->LastAdded());
+        object->FastPropertyAtPut(index, *value);
+      }
+      object->synchronized_set_map(*new_map);
+      return;
+    }
+
+    DCHECK(number_of_fields == old_number_of_fields + 1);
+    // This migration is a transition from a map that has run out out property
+    // space. Therefore it could be done by extending the backing store.
+    Handle<FixedArray> old_storage = handle(object->properties(), isolate);
+    Handle<FixedArray> new_storage =
+        FixedArray::CopySize(old_storage, external);
+
+    // Properly initialize newly added property.
+    Handle<Object> value;
+    if (details.representation().IsDouble()) {
+      value = isolate->factory()->NewHeapNumber(0, MUTABLE);
+    } else {
+      value = isolate->factory()->uninitialized_value();
+    }
+    DCHECK(details.type() == FIELD);
+    int target_index = details.field_index() - inobject;
+    DCHECK(target_index >= 0);  // Must be a backing store index.
+    new_storage->set(target_index, *value);
+
+    // From here on we cannot fail and we shouldn't GC anymore.
+    DisallowHeapAllocation no_allocation;
+
+    // Set the new property value and do the map transition.
+    object->set_properties(*new_storage);
+    object->synchronized_set_map(*new_map);
+    return;
+  }
   Handle<FixedArray> array = isolate->factory()->NewFixedArray(total_size);
 
   Handle<DescriptorArray> old_descriptors(old_map->instance_descriptors());
@@ -2177,17 +1997,17 @@
 
   // This method only supports generalizing instances to at least the same
   // number of properties.
-  ASSERT(old_nof <= new_nof);
+  DCHECK(old_nof <= new_nof);
 
   for (int i = 0; i < old_nof; i++) {
     PropertyDetails details = new_descriptors->GetDetails(i);
     if (details.type() != FIELD) continue;
     PropertyDetails old_details = old_descriptors->GetDetails(i);
     if (old_details.type() == CALLBACKS) {
-      ASSERT(details.representation().IsTagged());
+      DCHECK(details.representation().IsTagged());
       continue;
     }
-    ASSERT(old_details.type() == CONSTANT ||
+    DCHECK(old_details.type() == CONSTANT ||
            old_details.type() == FIELD);
     Object* raw_value = old_details.type() == CONSTANT
         ? old_descriptors->GetValue(i)
@@ -2199,8 +2019,11 @@
         value = handle(Smi::FromInt(0), isolate);
       }
       value = Object::NewStorageFor(isolate, value, details.representation());
+    } else if (old_details.representation().IsDouble() &&
+               !details.representation().IsDouble()) {
+      value = Object::WrapForRead(isolate, value, old_details.representation());
     }
-    ASSERT(!(details.representation().IsDouble() && value->IsSmi()));
+    DCHECK(!(details.representation().IsDouble() && value->IsSmi()));
     int target_index = new_descriptors->GetFieldIndex(i) - inobject;
     if (target_index < 0) target_index += total_size;
     array->set(target_index, *value);
@@ -2211,7 +2034,7 @@
     if (details.type() != FIELD) continue;
     Handle<Object> value;
     if (details.representation().IsDouble()) {
-      value = isolate->factory()->NewHeapNumber(0);
+      value = isolate->factory()->NewHeapNumber(0, MUTABLE);
     } else {
       value = isolate->factory()->uninitialized_value();
     }
@@ -2231,43 +2054,41 @@
     object->FastPropertyAtPut(index, array->get(external + i));
   }
 
-  // Create filler object past the new instance size.
-  int new_instance_size = new_map->instance_size();
-  int instance_size_delta = old_map->instance_size() - new_instance_size;
-  ASSERT(instance_size_delta >= 0);
-  Address address = object->address() + new_instance_size;
-
-  // The trimming is performed on a newly allocated object, which is on a
-  // fresly allocated page or on an already swept page. Hence, the sweeper
-  // thread can not get confused with the filler creation. No synchronization
-  // needed.
-  isolate->heap()->CreateFillerObjectAt(address, instance_size_delta);
+  Heap* heap = isolate->heap();
 
   // If there are properties in the new backing store, trim it to the correct
   // size and install the backing store into the object.
   if (external > 0) {
-    RightTrimFixedArray<Heap::FROM_MUTATOR>(isolate->heap(), *array, inobject);
+    heap->RightTrimFixedArray<Heap::FROM_MUTATOR>(*array, inobject);
     object->set_properties(*array);
   }
 
-  // The trimming is performed on a newly allocated object, which is on a
-  // fresly allocated page or on an already swept page. Hence, the sweeper
-  // thread can not get confused with the filler creation. No synchronization
-  // needed.
-  object->set_map(*new_map);
+  // Create filler object past the new instance size.
+  int new_instance_size = new_map->instance_size();
+  int instance_size_delta = old_map->instance_size() - new_instance_size;
+  DCHECK(instance_size_delta >= 0);
+
+  if (instance_size_delta > 0) {
+    Address address = object->address();
+    heap->CreateFillerObjectAt(
+        address + new_instance_size, instance_size_delta);
+    heap->AdjustLiveBytes(address, -instance_size_delta, Heap::FROM_MUTATOR);
+  }
+
+  // We are storing the new map using release store after creating a filler for
+  // the left-over space to avoid races with the sweeper thread.
+  object->synchronized_set_map(*new_map);
 }
 
 
 void JSObject::GeneralizeFieldRepresentation(Handle<JSObject> object,
                                              int modify_index,
                                              Representation new_representation,
-                                             Handle<HeapType> new_field_type,
-                                             StoreMode store_mode) {
+                                             Handle<HeapType> new_field_type) {
   Handle<Map> new_map = Map::GeneralizeRepresentation(
-      handle(object->map()), modify_index, new_representation,
-      new_field_type, store_mode);
-  if (object->map() == *new_map) return;
-  return MigrateToMap(object, new_map);
+      handle(object->map()), modify_index, new_representation, new_field_type,
+      FORCE_FIELD);
+  MigrateToMap(object, new_map);
 }
 
 
@@ -2300,17 +2121,22 @@
 
   // Unless the instance is being migrated, ensure that modify_index is a field.
   PropertyDetails details = descriptors->GetDetails(modify_index);
-  if (store_mode == FORCE_FIELD && details.type() != FIELD) {
+  if (store_mode == FORCE_FIELD &&
+      (details.type() != FIELD || details.attributes() != attributes)) {
+    int field_index = details.type() == FIELD ? details.field_index()
+                                              : new_map->NumberOfFields();
     FieldDescriptor d(handle(descriptors->GetKey(modify_index), isolate),
-                      new_map->NumberOfFields(),
-                      attributes,
-                      Representation::Tagged());
+                      field_index, attributes, Representation::Tagged());
     descriptors->Replace(modify_index, &d);
-    int unused_property_fields = new_map->unused_property_fields() - 1;
-    if (unused_property_fields < 0) {
-      unused_property_fields += JSObject::kFieldsAdded;
+    if (details.type() != FIELD) {
+      int unused_property_fields = new_map->unused_property_fields() - 1;
+      if (unused_property_fields < 0) {
+        unused_property_fields += JSObject::kFieldsAdded;
+      }
+      new_map->set_unused_property_fields(unused_property_fields);
     }
-    new_map->set_unused_property_fields(unused_property_fields);
+  } else {
+    DCHECK(details.attributes() == attributes);
   }
 
   if (FLAG_trace_generalization) {
@@ -2401,7 +2227,7 @@
   DisallowHeapAllocation no_allocation;
 
   // This can only be called on roots of transition trees.
-  ASSERT(GetBackPointer()->IsUndefined());
+  DCHECK(GetBackPointer()->IsUndefined());
 
   Map* current = this;
 
@@ -2435,7 +2261,7 @@
 
 Map* Map::FindFieldOwner(int descriptor) {
   DisallowHeapAllocation no_allocation;
-  ASSERT_EQ(FIELD, instance_descriptors()->GetDetails(descriptor).type());
+  DCHECK_EQ(FIELD, instance_descriptors()->GetDetails(descriptor).type());
   Map* result = this;
   while (true) {
     Object* back = result->GetBackPointer();
@@ -2448,15 +2274,22 @@
 }
 
 
-void Map::UpdateDescriptor(int descriptor_number, Descriptor* desc) {
+void Map::UpdateFieldType(int descriptor, Handle<Name> name,
+                          Handle<HeapType> new_type) {
   DisallowHeapAllocation no_allocation;
+  PropertyDetails details = instance_descriptors()->GetDetails(descriptor);
+  if (details.type() != FIELD) return;
   if (HasTransitionArray()) {
     TransitionArray* transitions = this->transitions();
     for (int i = 0; i < transitions->number_of_transitions(); ++i) {
-      transitions->GetTarget(i)->UpdateDescriptor(descriptor_number, desc);
+      transitions->GetTarget(i)->UpdateFieldType(descriptor, name, new_type);
     }
   }
-  instance_descriptors()->Replace(descriptor_number, desc);;
+  // Skip if already updated the shared descriptor.
+  if (instance_descriptors()->GetFieldType(descriptor) == *new_type) return;
+  FieldDescriptor d(name, instance_descriptors()->GetFieldIndex(descriptor),
+                    new_type, details.attributes(), details.representation());
+  instance_descriptors()->Replace(descriptor, &d);
 }
 
 
@@ -2470,9 +2303,9 @@
   if (type1->NowStable() && type2->NowStable()) {
     Handle<HeapType> type = HeapType::Union(type1, type2, isolate);
     if (type->NumClasses() <= kMaxClassesPerFieldType) {
-      ASSERT(type->NowStable());
-      ASSERT(type1->NowIs(type));
-      ASSERT(type2->NowIs(type));
+      DCHECK(type->NowStable());
+      DCHECK(type1->NowIs(type));
+      DCHECK(type2->NowIs(type));
       return type;
     }
   }
@@ -2490,7 +2323,7 @@
   Handle<HeapType> old_field_type(
       map->instance_descriptors()->GetFieldType(modify_index), isolate);
   if (new_field_type->NowIs(old_field_type)) {
-    ASSERT(Map::GeneralizeFieldType(old_field_type,
+    DCHECK(Map::GeneralizeFieldType(old_field_type,
                                     new_field_type,
                                     isolate)->NowIs(old_field_type));
     return;
@@ -2500,19 +2333,15 @@
   Handle<Map> field_owner(map->FindFieldOwner(modify_index), isolate);
   Handle<DescriptorArray> descriptors(
       field_owner->instance_descriptors(), isolate);
-  ASSERT_EQ(*old_field_type, descriptors->GetFieldType(modify_index));
+  DCHECK_EQ(*old_field_type, descriptors->GetFieldType(modify_index));
 
   // Determine the generalized new field type.
   new_field_type = Map::GeneralizeFieldType(
       old_field_type, new_field_type, isolate);
 
   PropertyDetails details = descriptors->GetDetails(modify_index);
-  FieldDescriptor d(handle(descriptors->GetKey(modify_index), isolate),
-                    descriptors->GetFieldIndex(modify_index),
-                    new_field_type,
-                    details.attributes(),
-                    details.representation());
-  field_owner->UpdateDescriptor(modify_index, &d);
+  Handle<Name> name(descriptors->GetKey(modify_index));
+  field_owner->UpdateFieldType(modify_index, name, new_field_type);
   field_owner->dependent_code()->DeoptimizeDependentCodeGroup(
       isolate, DependentCode::kFieldTypeGroup);
 
@@ -2566,8 +2395,8 @@
   if (old_representation.IsNone() &&
       !new_representation.IsNone() &&
       !new_representation.IsDouble()) {
-    ASSERT(old_details.type() == FIELD);
-    ASSERT(old_descriptors->GetFieldType(modify_index)->NowIs(
+    DCHECK(old_details.type() == FIELD);
+    DCHECK(old_descriptors->GetFieldType(modify_index)->NowIs(
             HeapType::None()));
     if (FLAG_trace_generalization) {
       old_map->PrintGeneralization(
@@ -2644,8 +2473,8 @@
         break;
       }
     } else {
-      ASSERT_EQ(tmp_type, old_type);
-      ASSERT_EQ(tmp_descriptors->GetValue(i), old_descriptors->GetValue(i));
+      DCHECK_EQ(tmp_type, old_type);
+      DCHECK_EQ(tmp_descriptors->GetValue(i), old_descriptors->GetValue(i));
     }
     target_map = tmp_map;
   }
@@ -2657,10 +2486,10 @@
   if (target_nof == old_nof &&
       (store_mode != FORCE_FIELD ||
        target_descriptors->GetDetails(modify_index).type() == FIELD)) {
-    ASSERT(modify_index < target_nof);
-    ASSERT(new_representation.fits_into(
+    DCHECK(modify_index < target_nof);
+    DCHECK(new_representation.fits_into(
             target_descriptors->GetDetails(modify_index).representation()));
-    ASSERT(target_descriptors->GetDetails(modify_index).type() != FIELD ||
+    DCHECK(target_descriptors->GetDetails(modify_index).type() != FIELD ||
            new_field_type->NowIs(
                target_descriptors->GetFieldType(modify_index)));
     return target_map;
@@ -2696,11 +2525,11 @@
       old_nof, old_descriptors->number_of_descriptors()) - old_nof;
   Handle<DescriptorArray> new_descriptors = DescriptorArray::Allocate(
       isolate, old_nof, new_slack);
-  ASSERT(new_descriptors->length() > target_descriptors->length() ||
+  DCHECK(new_descriptors->length() > target_descriptors->length() ||
          new_descriptors->NumberOfSlackDescriptors() > 0 ||
          new_descriptors->number_of_descriptors() ==
          old_descriptors->number_of_descriptors());
-  ASSERT(new_descriptors->number_of_descriptors() == old_nof);
+  DCHECK(new_descriptors->number_of_descriptors() == old_nof);
 
   // 0 -> |root_nof|
   int current_offset = 0;
@@ -2725,7 +2554,7 @@
       target_details = target_details.CopyWithRepresentation(
           new_representation.generalize(target_details.representation()));
     }
-    ASSERT_EQ(old_details.attributes(), target_details.attributes());
+    DCHECK_EQ(old_details.attributes(), target_details.attributes());
     if (old_details.type() == FIELD ||
         target_details.type() == FIELD ||
         (modify_index == i && store_mode == FORCE_FIELD) ||
@@ -2751,7 +2580,7 @@
                         target_details.representation());
       new_descriptors->Set(i, &d);
     } else {
-      ASSERT_NE(FIELD, target_details.type());
+      DCHECK_NE(FIELD, target_details.type());
       Descriptor d(target_key,
                    handle(target_descriptors->GetValue(i), isolate),
                    target_details);
@@ -2781,7 +2610,7 @@
                         old_details.representation());
       new_descriptors->Set(i, &d);
     } else {
-      ASSERT(old_details.type() == CONSTANT || old_details.type() == CALLBACKS);
+      DCHECK(old_details.type() == CONSTANT || old_details.type() == CALLBACKS);
       if (modify_index == i && store_mode == FORCE_FIELD) {
         FieldDescriptor d(old_key,
                           current_offset++,
@@ -2793,7 +2622,7 @@
                           old_details.representation());
         new_descriptors->Set(i, &d);
       } else {
-        ASSERT_NE(FIELD, old_details.type());
+        DCHECK_NE(FIELD, old_details.type());
         Descriptor d(old_key,
                      handle(old_descriptors->GetValue(i), isolate),
                      old_details);
@@ -2804,13 +2633,13 @@
 
   new_descriptors->Sort();
 
-  ASSERT(store_mode != FORCE_FIELD ||
+  DCHECK(store_mode != FORCE_FIELD ||
          new_descriptors->GetDetails(modify_index).type() == FIELD);
 
   Handle<Map> split_map(root_map->FindLastMatchMap(
           root_nof, old_nof, *new_descriptors), isolate);
   int split_nof = split_map->NumberOfOwnDescriptors();
-  ASSERT_NE(old_nof, split_nof);
+  DCHECK_NE(old_nof, split_nof);
 
   split_map->DeprecateTarget(
       old_descriptors->GetKey(split_nof), *new_descriptors);
@@ -2859,7 +2688,7 @@
 
 
 // static
-MaybeHandle<Map> Map::CurrentMapForDeprecated(Handle<Map> map) {
+MaybeHandle<Map> Map::TryUpdate(Handle<Map> map) {
   Handle<Map> proto_map(map);
   while (proto_map->prototype()->IsJSObject()) {
     Handle<JSObject> holder(JSObject::cast(proto_map->prototype()));
@@ -2868,12 +2697,21 @@
       proto_map = Handle<Map>(holder->map());
     }
   }
-  return CurrentMapForDeprecatedInternal(map);
+  return TryUpdateInternal(map);
 }
 
 
 // static
-MaybeHandle<Map> Map::CurrentMapForDeprecatedInternal(Handle<Map> old_map) {
+Handle<Map> Map::Update(Handle<Map> map) {
+  if (!map->is_deprecated()) return map;
+  return GeneralizeRepresentation(map, 0, Representation::None(),
+                                  HeapType::None(map->GetIsolate()),
+                                  ALLOW_AS_CONSTANT);
+}
+
+
+// static
+MaybeHandle<Map> Map::TryUpdateInternal(Handle<Map> old_map) {
   DisallowHeapAllocation no_allocation;
   DisallowDeoptimization no_deoptimization(old_map->GetIsolate());
 
@@ -2924,9 +2762,6 @@
         break;
 
       case NORMAL:
-      case HANDLER:
-      case INTERCEPTOR:
-      case NONEXISTENT:
         UNREACHABLE();
     }
   }
@@ -2935,50 +2770,239 @@
 }
 
 
-MaybeHandle<Object> JSObject::SetPropertyWithInterceptor(
-    Handle<JSObject> object,
-    Handle<Name> name,
-    Handle<Object> value,
-    PropertyAttributes attributes,
-    StrictMode strict_mode) {
+MaybeHandle<Object> JSObject::SetPropertyWithInterceptor(LookupIterator* it,
+                                                         Handle<Object> value) {
   // TODO(rossberg): Support symbols in the API.
-  if (name->IsSymbol()) return value;
-  Isolate* isolate = object->GetIsolate();
-  Handle<String> name_string = Handle<String>::cast(name);
-  Handle<InterceptorInfo> interceptor(object->GetNamedInterceptor());
-  if (!interceptor->setter()->IsUndefined()) {
-    LOG(isolate,
-        ApiNamedPropertyAccess("interceptor-named-set", *object, *name));
-    PropertyCallbackArguments args(
-        isolate, interceptor->data(), *object, *object);
-    v8::NamedPropertySetterCallback setter =
-        v8::ToCData<v8::NamedPropertySetterCallback>(interceptor->setter());
-    Handle<Object> value_unhole = value->IsTheHole()
-        ? Handle<Object>(isolate->factory()->undefined_value()) : value;
-    v8::Handle<v8::Value> result = args.Call(setter,
-                                             v8::Utils::ToLocal(name_string),
-                                             v8::Utils::ToLocal(value_unhole));
-    RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
-    if (!result.IsEmpty()) return value;
-  }
-  return SetPropertyPostInterceptor(
-      object, name, value, attributes, strict_mode);
+  if (it->name()->IsSymbol()) return value;
+
+  Handle<String> name_string = Handle<String>::cast(it->name());
+  Handle<JSObject> holder = it->GetHolder<JSObject>();
+  Handle<InterceptorInfo> interceptor(holder->GetNamedInterceptor());
+  if (interceptor->setter()->IsUndefined()) return MaybeHandle<Object>();
+
+  LOG(it->isolate(),
+      ApiNamedPropertyAccess("interceptor-named-set", *holder, *name_string));
+  PropertyCallbackArguments args(it->isolate(), interceptor->data(), *holder,
+                                 *holder);
+  v8::NamedPropertySetterCallback setter =
+      v8::ToCData<v8::NamedPropertySetterCallback>(interceptor->setter());
+  v8::Handle<v8::Value> result = args.Call(
+      setter, v8::Utils::ToLocal(name_string), v8::Utils::ToLocal(value));
+  RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(it->isolate(), Object);
+  if (!result.IsEmpty()) return value;
+
+  return MaybeHandle<Object>();
 }
 
 
-MaybeHandle<Object> JSReceiver::SetProperty(Handle<JSReceiver> object,
-                                            Handle<Name> name,
+MaybeHandle<Object> Object::SetProperty(Handle<Object> object,
+                                        Handle<Name> name, Handle<Object> value,
+                                        StrictMode strict_mode,
+                                        StoreFromKeyed store_mode) {
+  LookupIterator it(object, name);
+  return SetProperty(&it, value, strict_mode, store_mode);
+}
+
+
+MaybeHandle<Object> Object::SetProperty(LookupIterator* it,
+                                        Handle<Object> value,
+                                        StrictMode strict_mode,
+                                        StoreFromKeyed store_mode) {
+  // Make sure that the top context does not change when doing callbacks or
+  // interceptor calls.
+  AssertNoContextChange ncc(it->isolate());
+
+  bool done = false;
+  for (; it->IsFound(); it->Next()) {
+    switch (it->state()) {
+      case LookupIterator::NOT_FOUND:
+        UNREACHABLE();
+
+      case LookupIterator::ACCESS_CHECK:
+        // TODO(verwaest): Remove the distinction. This is mostly bogus since we
+        // don't know whether we'll want to fetch attributes or call a setter
+        // until we find the property.
+        if (it->HasAccess(v8::ACCESS_SET)) break;
+        return JSObject::SetPropertyWithFailedAccessCheck(it, value,
+                                                          strict_mode);
+
+      case LookupIterator::JSPROXY:
+        if (it->HolderIsReceiverOrHiddenPrototype()) {
+          return JSProxy::SetPropertyWithHandler(it->GetHolder<JSProxy>(),
+                                                 it->GetReceiver(), it->name(),
+                                                 value, strict_mode);
+        } else {
+          // TODO(verwaest): Use the MaybeHandle to indicate result.
+          bool has_result = false;
+          MaybeHandle<Object> maybe_result =
+              JSProxy::SetPropertyViaPrototypesWithHandler(
+                  it->GetHolder<JSProxy>(), it->GetReceiver(), it->name(),
+                  value, strict_mode, &has_result);
+          if (has_result) return maybe_result;
+          done = true;
+        }
+        break;
+
+      case LookupIterator::INTERCEPTOR:
+        if (it->HolderIsReceiverOrHiddenPrototype()) {
+          MaybeHandle<Object> maybe_result =
+              JSObject::SetPropertyWithInterceptor(it, value);
+          if (!maybe_result.is_null()) return maybe_result;
+          if (it->isolate()->has_pending_exception()) return maybe_result;
+        } else {
+          Maybe<PropertyAttributes> maybe_attributes =
+              JSObject::GetPropertyAttributesWithInterceptor(
+                  it->GetHolder<JSObject>(), it->GetReceiver(), it->name());
+          if (!maybe_attributes.has_value) return MaybeHandle<Object>();
+          done = maybe_attributes.value != ABSENT;
+          if (done && (maybe_attributes.value & READ_ONLY) != 0) {
+            return WriteToReadOnlyProperty(it, value, strict_mode);
+          }
+        }
+        break;
+
+      case LookupIterator::ACCESSOR:
+        if (it->property_details().IsReadOnly()) {
+          return WriteToReadOnlyProperty(it, value, strict_mode);
+        }
+        if (it->HolderIsReceiverOrHiddenPrototype() ||
+            !it->GetAccessors()->IsDeclaredAccessorInfo()) {
+          return SetPropertyWithAccessor(it->GetReceiver(), it->name(), value,
+                                         it->GetHolder<JSObject>(),
+                                         it->GetAccessors(), strict_mode);
+        }
+        done = true;
+        break;
+
+      case LookupIterator::DATA:
+        if (it->property_details().IsReadOnly()) {
+          return WriteToReadOnlyProperty(it, value, strict_mode);
+        }
+        if (it->HolderIsReceiverOrHiddenPrototype()) {
+          return SetDataProperty(it, value);
+        }
+        done = true;
+        break;
+
+      case LookupIterator::TRANSITION:
+        done = true;
+        break;
+    }
+
+    if (done) break;
+  }
+
+  // If the receiver is the JSGlobalObject, the store was contextual. In case
+  // the property did not exist yet on the global object itself, we have to
+  // throw a reference error in strict mode.
+  if (it->GetReceiver()->IsJSGlobalObject() && strict_mode == STRICT) {
+    Handle<Object> args[1] = {it->name()};
+    THROW_NEW_ERROR(it->isolate(),
+                    NewReferenceError("not_defined", HandleVector(args, 1)),
+                    Object);
+  }
+
+  return AddDataProperty(it, value, NONE, strict_mode, store_mode);
+}
+
+
+MaybeHandle<Object> Object::WriteToReadOnlyProperty(LookupIterator* it,
+                                                    Handle<Object> value,
+                                                    StrictMode strict_mode) {
+  if (strict_mode != STRICT) return value;
+
+  Handle<Object> args[] = {it->name(), it->GetReceiver()};
+  THROW_NEW_ERROR(it->isolate(),
+                  NewTypeError("strict_read_only_property",
+                               HandleVector(args, arraysize(args))),
+                  Object);
+}
+
+
+Handle<Object> Object::SetDataProperty(LookupIterator* it,
+                                       Handle<Object> value) {
+  // Proxies are handled on the WithHandler path. Other non-JSObjects cannot
+  // have own properties.
+  Handle<JSObject> receiver = Handle<JSObject>::cast(it->GetReceiver());
+
+  // Store on the holder which may be hidden behind the receiver.
+  DCHECK(it->HolderIsReceiverOrHiddenPrototype());
+
+  // Old value for the observation change record.
+  // Fetch before transforming the object since the encoding may become
+  // incompatible with what's cached in |it|.
+  bool is_observed =
+      receiver->map()->is_observed() &&
+      !it->name().is_identical_to(it->factory()->hidden_string());
+  MaybeHandle<Object> maybe_old;
+  if (is_observed) maybe_old = it->GetDataValue();
+
+  // Possibly migrate to the most up-to-date map that will be able to store
+  // |value| under it->name().
+  it->PrepareForDataProperty(value);
+
+  // Write the property value.
+  it->WriteDataValue(value);
+
+  // Send the change record if there are observers.
+  if (is_observed && !value->SameValue(*maybe_old.ToHandleChecked())) {
+    JSObject::EnqueueChangeRecord(receiver, "update", it->name(),
+                                  maybe_old.ToHandleChecked());
+  }
+
+  return value;
+}
+
+
+MaybeHandle<Object> Object::AddDataProperty(LookupIterator* it,
                                             Handle<Object> value,
                                             PropertyAttributes attributes,
                                             StrictMode strict_mode,
                                             StoreFromKeyed store_mode) {
-  LookupResult result(object->GetIsolate());
-  object->LookupOwn(name, &result, true);
-  if (!result.IsFound()) {
-    object->map()->LookupTransition(JSObject::cast(*object), *name, &result);
+  DCHECK(!it->GetReceiver()->IsJSProxy());
+  if (!it->GetReceiver()->IsJSObject()) {
+    // TODO(verwaest): Throw a TypeError with a more specific message.
+    return WriteToReadOnlyProperty(it, value, strict_mode);
   }
-  return SetProperty(object, &result, name, value, attributes, strict_mode,
-                     store_mode);
+
+  Handle<JSObject> receiver = it->GetStoreTarget();
+
+  // If the receiver is a JSGlobalProxy, store on the prototype (JSGlobalObject)
+  // instead. If the prototype is Null, the proxy is detached.
+  if (receiver->IsJSGlobalProxy()) return value;
+
+  // Possibly migrate to the most up-to-date map that will be able to store
+  // |value| under it->name() with |attributes|.
+  it->PrepareTransitionToDataProperty(value, attributes, store_mode);
+  if (it->state() != LookupIterator::TRANSITION) {
+    if (strict_mode == SLOPPY) return value;
+
+    Handle<Object> args[1] = {it->name()};
+    THROW_NEW_ERROR(it->isolate(),
+                    NewTypeError("object_not_extensible",
+                                 HandleVector(args, arraysize(args))),
+                    Object);
+  }
+  it->ApplyTransitionToDataProperty();
+
+  // TODO(verwaest): Encapsulate dictionary handling better.
+  if (receiver->map()->is_dictionary_map()) {
+    // TODO(verwaest): Probably should ensure this is done beforehand.
+    it->InternalizeName();
+    JSObject::AddSlowProperty(receiver, it->name(), value, attributes);
+  } else {
+    // Write the property value.
+    it->WriteDataValue(value);
+  }
+
+  // Send the change record if there are observers.
+  if (receiver->map()->is_observed() &&
+      !it->name().is_identical_to(it->factory()->hidden_string())) {
+    JSObject::EnqueueChangeRecord(receiver, "add", it->name(),
+                                  it->factory()->the_hole_value());
+  }
+
+  return value;
 }
 
 
@@ -2989,20 +3013,16 @@
     bool* found,
     StrictMode strict_mode) {
   Isolate *isolate = object->GetIsolate();
-  for (Handle<Object> proto = handle(object->GetPrototype(), isolate);
-       !proto->IsNull();
-       proto = handle(proto->GetPrototype(isolate), isolate)) {
-    if (proto->IsJSProxy()) {
+  for (PrototypeIterator iter(isolate, object); !iter.IsAtEnd();
+       iter.Advance()) {
+    if (PrototypeIterator::GetCurrent(iter)->IsJSProxy()) {
       return JSProxy::SetPropertyViaPrototypesWithHandler(
-          Handle<JSProxy>::cast(proto),
-          object,
+          Handle<JSProxy>::cast(PrototypeIterator::GetCurrent(iter)), object,
           isolate->factory()->Uint32ToString(index),  // name
-          value,
-          NONE,
-          strict_mode,
-          found);
+          value, strict_mode, found);
     }
-    Handle<JSObject> js_proto = Handle<JSObject>::cast(proto);
+    Handle<JSObject> js_proto =
+        Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter));
     if (!js_proto->HasDictionaryElements()) {
       continue;
     }
@@ -3023,70 +3043,9 @@
 }
 
 
-MaybeHandle<Object> JSObject::SetPropertyViaPrototypes(
-    Handle<JSObject> object,
-    Handle<Name> name,
-    Handle<Object> value,
-    PropertyAttributes attributes,
-    StrictMode strict_mode,
-    bool* done) {
-  Isolate* isolate = object->GetIsolate();
-
-  *done = false;
-  // We could not find an own property, so let's check whether there is an
-  // accessor that wants to handle the property, or whether the property is
-  // read-only on the prototype chain.
-  LookupResult result(isolate);
-  object->LookupRealNamedPropertyInPrototypes(name, &result);
-  if (result.IsFound()) {
-    switch (result.type()) {
-      case NORMAL:
-      case FIELD:
-      case CONSTANT:
-        *done = result.IsReadOnly();
-        break;
-      case INTERCEPTOR: {
-        LookupIterator it(object, name, handle(result.holder()));
-        PropertyAttributes attr = GetPropertyAttributes(&it);
-        *done = !!(attr & READ_ONLY);
-        break;
-      }
-      case CALLBACKS: {
-        *done = true;
-        if (!result.IsReadOnly()) {
-          Handle<Object> callback_object(result.GetCallbackObject(), isolate);
-          return SetPropertyWithCallback(object, name, value,
-                                         handle(result.holder()),
-                                         callback_object, strict_mode);
-        }
-        break;
-      }
-      case HANDLER: {
-        Handle<JSProxy> proxy(result.proxy());
-        return JSProxy::SetPropertyViaPrototypesWithHandler(
-            proxy, object, name, value, attributes, strict_mode, done);
-      }
-      case NONEXISTENT:
-        UNREACHABLE();
-        break;
-    }
-  }
-
-  // If we get here with *done true, we have encountered a read-only property.
-  if (*done) {
-    if (strict_mode == SLOPPY) return value;
-    Handle<Object> args[] = { name, object };
-    Handle<Object> error = isolate->factory()->NewTypeError(
-        "strict_read_only_property", HandleVector(args, ARRAY_SIZE(args)));
-    return isolate->Throw<Object>(error);
-  }
-  return isolate->factory()->the_hole_value();
-}
-
-
 void Map::EnsureDescriptorSlack(Handle<Map> map, int slack) {
   // Only supports adding slack to owned descriptors.
-  ASSERT(map->owns_descriptors());
+  DCHECK(map->owns_descriptors());
 
   Handle<DescriptorArray> descriptors(map->instance_descriptors());
   int old_size = map->NumberOfOwnDescriptors();
@@ -3206,7 +3165,7 @@
   int nof = map->NumberOfOwnDescriptors();
   Handle<DescriptorArray> array(map->instance_descriptors());
   NeanderArray callbacks(descriptors);
-  ASSERT(array->NumberOfSlackDescriptors() >= callbacks.length());
+  DCHECK(array->NumberOfSlackDescriptors() >= callbacks.length());
   nof = AppendUniqueCallbacks<DescriptorArrayAppender>(&callbacks, array, nof);
   map->SetNumberOfOwnDescriptors(nof);
 }
@@ -3216,7 +3175,7 @@
                                Handle<FixedArray> array,
                                int valid_descriptors) {
   NeanderArray callbacks(descriptors);
-  ASSERT(array->length() >= callbacks.length() + valid_descriptors);
+  DCHECK(array->length() >= callbacks.length() + valid_descriptors);
   return AppendUniqueCallbacks<FixedArrayAppender>(&callbacks,
                                                    array,
                                                    valid_descriptors);
@@ -3224,7 +3183,7 @@
 
 
 static bool ContainsMap(MapHandleList* maps, Handle<Map> map) {
-  ASSERT(!map.is_null());
+  DCHECK(!map.is_null());
   for (int i = 0; i < maps->length(); ++i) {
     if (!maps->at(i).is_null() && maps->at(i).is_identical_to(map)) return true;
   }
@@ -3290,12 +3249,12 @@
   }
 
   if (to_kind != kind && current_map->HasElementsTransition()) {
-    ASSERT(to_kind == DICTIONARY_ELEMENTS);
+    DCHECK(to_kind == DICTIONARY_ELEMENTS);
     Map* next_map = current_map->elements_transition_map();
     if (next_map->elements_kind() == to_kind) return next_map;
   }
 
-  ASSERT(current_map->elements_kind() == target_kind);
+  DCHECK(current_map->elements_kind() == target_kind);
   return current_map;
 }
 
@@ -3323,15 +3282,17 @@
 
 static Handle<Map> AddMissingElementsTransitions(Handle<Map> map,
                                                  ElementsKind to_kind) {
-  ASSERT(IsTransitionElementsKind(map->elements_kind()));
+  DCHECK(IsTransitionElementsKind(map->elements_kind()));
 
   Handle<Map> current_map = map;
 
   ElementsKind kind = map->elements_kind();
-  while (kind != to_kind && !IsTerminalElementsKind(kind)) {
-    kind = GetNextTransitionElementsKind(kind);
-    current_map = Map::CopyAsElementsKind(
-        current_map, kind, INSERT_TRANSITION);
+  if (!map->is_prototype_map()) {
+    while (kind != to_kind && !IsTerminalElementsKind(kind)) {
+      kind = GetNextTransitionElementsKind(kind);
+      current_map =
+          Map::CopyAsElementsKind(current_map, kind, INSERT_TRANSITION);
+    }
   }
 
   // In case we are exiting the fast elements kind system, just add the map in
@@ -3341,7 +3302,7 @@
         current_map, to_kind, INSERT_TRANSITION);
   }
 
-  ASSERT(current_map->elements_kind() == to_kind);
+  DCHECK(current_map->elements_kind() == to_kind);
   return current_map;
 }
 
@@ -3380,7 +3341,7 @@
   bool allow_store_transition =
       // Only remember the map transition if there is not an already existing
       // non-matching element transition.
-      !map->IsUndefined() && !map->is_shared() &&
+      !map->IsUndefined() && !map->is_dictionary_map() &&
       IsTransitionElementsKind(from_kind);
 
   // Only store fast element maps in ascending generality.
@@ -3417,129 +3378,29 @@
 }
 
 
-void JSObject::LookupOwnRealNamedProperty(Handle<Name> name,
-                                          LookupResult* result) {
-  DisallowHeapAllocation no_gc;
-  if (IsJSGlobalProxy()) {
-    Object* proto = GetPrototype();
-    if (proto->IsNull()) return result->NotFound();
-    ASSERT(proto->IsJSGlobalObject());
-    return JSObject::cast(proto)->LookupOwnRealNamedProperty(name, result);
-  }
-
-  if (HasFastProperties()) {
-    map()->LookupDescriptor(this, *name, result);
-    // A property or a map transition was found. We return all of these result
-    // types because LookupOwnRealNamedProperty is used when setting
-    // properties where map transitions are handled.
-    ASSERT(!result->IsFound() ||
-           (result->holder() == this && result->IsFastPropertyType()));
-    // Disallow caching for uninitialized constants. These can only
-    // occur as fields.
-    if (result->IsField() &&
-        result->IsReadOnly() &&
-        RawFastPropertyAt(result->GetFieldIndex())->IsTheHole()) {
-      result->DisallowCaching();
-    }
-    return;
-  }
-
-  int entry = property_dictionary()->FindEntry(name);
-  if (entry != NameDictionary::kNotFound) {
-    Object* value = property_dictionary()->ValueAt(entry);
-    if (IsGlobalObject()) {
-      PropertyDetails d = property_dictionary()->DetailsAt(entry);
-      if (d.IsDeleted()) {
-        result->NotFound();
-        return;
-      }
-      value = PropertyCell::cast(value)->value();
-    }
-    // Make sure to disallow caching for uninitialized constants
-    // found in the dictionary-mode objects.
-    if (value->IsTheHole()) result->DisallowCaching();
-    result->DictionaryResult(this, entry);
-    return;
-  }
-
-  result->NotFound();
-}
-
-
-void JSObject::LookupRealNamedProperty(Handle<Name> name,
-                                       LookupResult* result) {
-  DisallowHeapAllocation no_gc;
-  LookupOwnRealNamedProperty(name, result);
-  if (result->IsFound()) return;
-
-  LookupRealNamedPropertyInPrototypes(name, result);
-}
-
-
-void JSObject::LookupRealNamedPropertyInPrototypes(Handle<Name> name,
-                                                   LookupResult* result) {
-  DisallowHeapAllocation no_gc;
-  Isolate* isolate = GetIsolate();
-  Heap* heap = isolate->heap();
-  for (Object* pt = GetPrototype();
-       pt != heap->null_value();
-       pt = pt->GetPrototype(isolate)) {
-    if (pt->IsJSProxy()) {
-      return result->HandlerResult(JSProxy::cast(pt));
-    }
-    JSObject::cast(pt)->LookupOwnRealNamedProperty(name, result);
-    ASSERT(!(result->IsFound() && result->type() == INTERCEPTOR));
-    if (result->IsFound()) return;
-  }
-  result->NotFound();
-}
-
-
-MaybeHandle<Object> JSReceiver::SetProperty(Handle<JSReceiver> object,
-                                            LookupResult* result,
-                                            Handle<Name> key,
-                                            Handle<Object> value,
-                                            PropertyAttributes attributes,
-                                            StrictMode strict_mode,
-                                            StoreFromKeyed store_mode) {
-  if (result->IsHandler()) {
-    return JSProxy::SetPropertyWithHandler(handle(result->proxy()),
-        object, key, value, attributes, strict_mode);
-  } else {
-    return JSObject::SetPropertyForResult(Handle<JSObject>::cast(object),
-        result, key, value, attributes, strict_mode, store_mode);
-  }
-}
-
-
-bool JSProxy::HasPropertyWithHandler(Handle<JSProxy> proxy, Handle<Name> name) {
+Maybe<bool> JSProxy::HasPropertyWithHandler(Handle<JSProxy> proxy,
+                                            Handle<Name> name) {
   Isolate* isolate = proxy->GetIsolate();
 
   // TODO(rossberg): adjust once there is a story for symbols vs proxies.
-  if (name->IsSymbol()) return false;
+  if (name->IsSymbol()) return maybe(false);
 
   Handle<Object> args[] = { name };
   Handle<Object> result;
   ASSIGN_RETURN_ON_EXCEPTION_VALUE(
-      isolate, result,
-      CallTrap(proxy,
-               "has",
-               isolate->derived_has_trap(),
-               ARRAY_SIZE(args),
-               args),
-      false);
+      isolate, result, CallTrap(proxy, "has", isolate->derived_has_trap(),
+                                arraysize(args), args),
+      Maybe<bool>());
 
-  return result->BooleanValue();
+  return maybe(result->BooleanValue());
 }
 
 
-MaybeHandle<Object> JSProxy::SetPropertyWithHandler(
-    Handle<JSProxy> proxy,
-    Handle<JSReceiver> receiver,
-    Handle<Name> name,
-    Handle<Object> value,
-    PropertyAttributes attributes,
-    StrictMode strict_mode) {
+MaybeHandle<Object> JSProxy::SetPropertyWithHandler(Handle<JSProxy> proxy,
+                                                    Handle<Object> receiver,
+                                                    Handle<Name> name,
+                                                    Handle<Object> value,
+                                                    StrictMode strict_mode) {
   Isolate* isolate = proxy->GetIsolate();
 
   // TODO(rossberg): adjust once there is a story for symbols vs proxies.
@@ -3551,7 +3412,7 @@
       CallTrap(proxy,
                "set",
                isolate->derived_set_trap(),
-               ARRAY_SIZE(args),
+               arraysize(args),
                args),
       Object);
 
@@ -3560,13 +3421,8 @@
 
 
 MaybeHandle<Object> JSProxy::SetPropertyViaPrototypesWithHandler(
-    Handle<JSProxy> proxy,
-    Handle<JSReceiver> receiver,
-    Handle<Name> name,
-    Handle<Object> value,
-    PropertyAttributes attributes,
-    StrictMode strict_mode,
-    bool* done) {
+    Handle<JSProxy> proxy, Handle<Object> receiver, Handle<Name> name,
+    Handle<Object> value, StrictMode strict_mode, bool* done) {
   Isolate* isolate = proxy->GetIsolate();
   Handle<Object> handler(proxy->handler(), isolate);  // Trap might morph proxy.
 
@@ -3584,7 +3440,7 @@
       CallTrap(proxy,
                "getPropertyDescriptor",
                Handle<Object>(),
-               ARRAY_SIZE(args),
+               arraysize(args),
                args),
       Object);
 
@@ -3601,54 +3457,52 @@
       Execution::Call(isolate,
                       isolate->to_complete_property_descriptor(),
                       result,
-                      ARRAY_SIZE(argv),
+                      arraysize(argv),
                       argv),
       Object);
 
   // [[GetProperty]] requires to check that all properties are configurable.
   Handle<String> configurable_name =
       isolate->factory()->InternalizeOneByteString(
-          STATIC_ASCII_VECTOR("configurable_"));
+          STATIC_CHAR_VECTOR("configurable_"));
   Handle<Object> configurable =
       Object::GetProperty(desc, configurable_name).ToHandleChecked();
-  ASSERT(configurable->IsBoolean());
+  DCHECK(configurable->IsBoolean());
   if (configurable->IsFalse()) {
-    Handle<String> trap =
-        isolate->factory()->InternalizeOneByteString(
-            STATIC_ASCII_VECTOR("getPropertyDescriptor"));
+    Handle<String> trap = isolate->factory()->InternalizeOneByteString(
+        STATIC_CHAR_VECTOR("getPropertyDescriptor"));
     Handle<Object> args[] = { handler, trap, name };
-    Handle<Object> error = isolate->factory()->NewTypeError(
-        "proxy_prop_not_configurable", HandleVector(args, ARRAY_SIZE(args)));
-    return isolate->Throw<Object>(error);
+    THROW_NEW_ERROR(isolate, NewTypeError("proxy_prop_not_configurable",
+                                          HandleVector(args, arraysize(args))),
+                    Object);
   }
-  ASSERT(configurable->IsTrue());
+  DCHECK(configurable->IsTrue());
 
   // Check for DataDescriptor.
   Handle<String> hasWritable_name =
       isolate->factory()->InternalizeOneByteString(
-          STATIC_ASCII_VECTOR("hasWritable_"));
+          STATIC_CHAR_VECTOR("hasWritable_"));
   Handle<Object> hasWritable =
       Object::GetProperty(desc, hasWritable_name).ToHandleChecked();
-  ASSERT(hasWritable->IsBoolean());
+  DCHECK(hasWritable->IsBoolean());
   if (hasWritable->IsTrue()) {
-    Handle<String> writable_name =
-        isolate->factory()->InternalizeOneByteString(
-            STATIC_ASCII_VECTOR("writable_"));
+    Handle<String> writable_name = isolate->factory()->InternalizeOneByteString(
+        STATIC_CHAR_VECTOR("writable_"));
     Handle<Object> writable =
         Object::GetProperty(desc, writable_name).ToHandleChecked();
-    ASSERT(writable->IsBoolean());
+    DCHECK(writable->IsBoolean());
     *done = writable->IsFalse();
     if (!*done) return isolate->factory()->the_hole_value();
     if (strict_mode == SLOPPY) return value;
     Handle<Object> args[] = { name, receiver };
-    Handle<Object> error = isolate->factory()->NewTypeError(
-        "strict_read_only_property", HandleVector(args, ARRAY_SIZE(args)));
-    return isolate->Throw<Object>(error);
+    THROW_NEW_ERROR(isolate, NewTypeError("strict_read_only_property",
+                                          HandleVector(args, arraysize(args))),
+                    Object);
   }
 
   // We have an AccessorDescriptor.
-  Handle<String> set_name = isolate->factory()->InternalizeOneByteString(
-      STATIC_ASCII_VECTOR("set_"));
+  Handle<String> set_name =
+      isolate->factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("set_"));
   Handle<Object> setter = Object::GetProperty(desc, set_name).ToHandleChecked();
   if (!setter->IsUndefined()) {
     // TODO(rossberg): nicer would be to cast to some JSCallable here...
@@ -3658,9 +3512,9 @@
 
   if (strict_mode == SLOPPY) return value;
   Handle<Object> args2[] = { name, proxy };
-  Handle<Object> error = isolate->factory()->NewTypeError(
-      "no_setter_in_callback", HandleVector(args2, ARRAY_SIZE(args2)));
-  return isolate->Throw<Object>(error);
+  THROW_NEW_ERROR(isolate, NewTypeError("no_setter_in_callback",
+                                        HandleVector(args2, arraysize(args2))),
+                  Object);
 }
 
 
@@ -3678,7 +3532,7 @@
       CallTrap(proxy,
                "delete",
                Handle<Object>(),
-               ARRAY_SIZE(args),
+               arraysize(args),
                args),
       Object);
 
@@ -3686,11 +3540,11 @@
   if (mode == STRICT_DELETION && !result_bool) {
     Handle<Object> handler(proxy->handler(), isolate);
     Handle<String> trap_name = isolate->factory()->InternalizeOneByteString(
-        STATIC_ASCII_VECTOR("delete"));
+        STATIC_CHAR_VECTOR("delete"));
     Handle<Object> args[] = { handler, trap_name };
-    Handle<Object> error = isolate->factory()->NewTypeError(
-        "handler_failed", HandleVector(args, ARRAY_SIZE(args)));
-    return isolate->Throw<Object>(error);
+    THROW_NEW_ERROR(isolate, NewTypeError("handler_failed",
+                                          HandleVector(args, arraysize(args))),
+                    Object);
   }
   return isolate->factory()->ToBoolean(result_bool);
 }
@@ -3704,88 +3558,83 @@
 }
 
 
-PropertyAttributes JSProxy::GetPropertyAttributesWithHandler(
-    Handle<JSProxy> proxy,
-    Handle<Object> receiver,
-    Handle<Name> name) {
+Maybe<PropertyAttributes> JSProxy::GetPropertyAttributesWithHandler(
+    Handle<JSProxy> proxy, Handle<Object> receiver, Handle<Name> name) {
   Isolate* isolate = proxy->GetIsolate();
   HandleScope scope(isolate);
 
   // TODO(rossberg): adjust once there is a story for symbols vs proxies.
-  if (name->IsSymbol()) return ABSENT;
+  if (name->IsSymbol()) return maybe(ABSENT);
 
   Handle<Object> args[] = { name };
   Handle<Object> result;
   ASSIGN_RETURN_ON_EXCEPTION_VALUE(
       isolate, result,
-      proxy->CallTrap(proxy,
-                      "getPropertyDescriptor",
-                      Handle<Object>(),
-                      ARRAY_SIZE(args),
-                      args),
-      NONE);
+      proxy->CallTrap(proxy, "getPropertyDescriptor", Handle<Object>(),
+                      arraysize(args), args),
+      Maybe<PropertyAttributes>());
 
-  if (result->IsUndefined()) return ABSENT;
+  if (result->IsUndefined()) return maybe(ABSENT);
 
   Handle<Object> argv[] = { result };
   Handle<Object> desc;
   ASSIGN_RETURN_ON_EXCEPTION_VALUE(
       isolate, desc,
-      Execution::Call(isolate,
-                      isolate->to_complete_property_descriptor(),
-                      result,
-                      ARRAY_SIZE(argv),
-                      argv),
-      NONE);
+      Execution::Call(isolate, isolate->to_complete_property_descriptor(),
+                      result, arraysize(argv), argv),
+      Maybe<PropertyAttributes>());
 
   // Convert result to PropertyAttributes.
   Handle<String> enum_n = isolate->factory()->InternalizeOneByteString(
-      STATIC_ASCII_VECTOR("enumerable_"));
+      STATIC_CHAR_VECTOR("enumerable_"));
   Handle<Object> enumerable;
-  ASSIGN_RETURN_ON_EXCEPTION_VALUE(
-      isolate, enumerable, Object::GetProperty(desc, enum_n), NONE);
+  ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, enumerable,
+                                   Object::GetProperty(desc, enum_n),
+                                   Maybe<PropertyAttributes>());
   Handle<String> conf_n = isolate->factory()->InternalizeOneByteString(
-      STATIC_ASCII_VECTOR("configurable_"));
+      STATIC_CHAR_VECTOR("configurable_"));
   Handle<Object> configurable;
-  ASSIGN_RETURN_ON_EXCEPTION_VALUE(
-      isolate, configurable, Object::GetProperty(desc, conf_n), NONE);
+  ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, configurable,
+                                   Object::GetProperty(desc, conf_n),
+                                   Maybe<PropertyAttributes>());
   Handle<String> writ_n = isolate->factory()->InternalizeOneByteString(
-      STATIC_ASCII_VECTOR("writable_"));
+      STATIC_CHAR_VECTOR("writable_"));
   Handle<Object> writable;
-  ASSIGN_RETURN_ON_EXCEPTION_VALUE(
-      isolate, writable, Object::GetProperty(desc, writ_n), NONE);
+  ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, writable,
+                                   Object::GetProperty(desc, writ_n),
+                                   Maybe<PropertyAttributes>());
   if (!writable->BooleanValue()) {
     Handle<String> set_n = isolate->factory()->InternalizeOneByteString(
-        STATIC_ASCII_VECTOR("set_"));
+        STATIC_CHAR_VECTOR("set_"));
     Handle<Object> setter;
-    ASSIGN_RETURN_ON_EXCEPTION_VALUE(
-        isolate, setter, Object::GetProperty(desc, set_n), NONE);
+    ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, setter,
+                                     Object::GetProperty(desc, set_n),
+                                     Maybe<PropertyAttributes>());
     writable = isolate->factory()->ToBoolean(!setter->IsUndefined());
   }
 
   if (configurable->IsFalse()) {
     Handle<Object> handler(proxy->handler(), isolate);
     Handle<String> trap = isolate->factory()->InternalizeOneByteString(
-        STATIC_ASCII_VECTOR("getPropertyDescriptor"));
+        STATIC_CHAR_VECTOR("getPropertyDescriptor"));
     Handle<Object> args[] = { handler, trap, name };
-    Handle<Object> error = isolate->factory()->NewTypeError(
-        "proxy_prop_not_configurable", HandleVector(args, ARRAY_SIZE(args)));
-    isolate->Throw(*error);
-    return NONE;
+    Handle<Object> error;
+    MaybeHandle<Object> maybe_error = isolate->factory()->NewTypeError(
+        "proxy_prop_not_configurable", HandleVector(args, arraysize(args)));
+    if (maybe_error.ToHandle(&error)) isolate->Throw(*error);
+    return maybe(NONE);
   }
 
   int attributes = NONE;
   if (!enumerable->BooleanValue()) attributes |= DONT_ENUM;
   if (!configurable->BooleanValue()) attributes |= DONT_DELETE;
   if (!writable->BooleanValue()) attributes |= READ_ONLY;
-  return static_cast<PropertyAttributes>(attributes);
+  return maybe(static_cast<PropertyAttributes>(attributes));
 }
 
 
-PropertyAttributes JSProxy::GetElementAttributeWithHandler(
-    Handle<JSProxy> proxy,
-    Handle<JSReceiver> receiver,
-    uint32_t index) {
+Maybe<PropertyAttributes> JSProxy::GetElementAttributeWithHandler(
+    Handle<JSProxy> proxy, Handle<JSReceiver> receiver, uint32_t index) {
   Isolate* isolate = proxy->GetIsolate();
   Handle<String> name = isolate->factory()->Uint32ToString(index);
   return GetPropertyAttributesWithHandler(proxy, receiver, name);
@@ -3804,7 +3653,7 @@
   } else {
     isolate->factory()->BecomeJSObject(proxy);
   }
-  ASSERT(proxy->IsJSObject());
+  DCHECK(proxy->IsJSObject());
 
   // Inherit identity, if it was present.
   if (hash->IsSmi()) {
@@ -3832,9 +3681,10 @@
   if (trap->IsUndefined()) {
     if (derived.is_null()) {
       Handle<Object> args[] = { handler, trap_name };
-      Handle<Object> error = isolate->factory()->NewTypeError(
-        "handler_trap_missing", HandleVector(args, ARRAY_SIZE(args)));
-      return isolate->Throw<Object>(error);
+      THROW_NEW_ERROR(isolate,
+                      NewTypeError("handler_trap_missing",
+                                   HandleVector(args, arraysize(args))),
+                      Object);
     }
     trap = Handle<Object>(derived);
   }
@@ -3844,7 +3694,7 @@
 
 
 void JSObject::AllocateStorageForMap(Handle<JSObject> object, Handle<Map> map) {
-  ASSERT(object->map()->inobject_properties() == map->inobject_properties());
+  DCHECK(object->map()->inobject_properties() == map->inobject_properties());
   ElementsKind obj_kind = object->map()->elements_kind();
   ElementsKind map_kind = map->elements_kind();
   if (map_kind != obj_kind) {
@@ -3865,17 +3715,12 @@
 
 
 void JSObject::MigrateInstance(Handle<JSObject> object) {
-  // Converting any field to the most specific type will cause the
-  // GeneralizeFieldRepresentation algorithm to create the most general existing
-  // transition that matches the object. This achieves what is needed.
   Handle<Map> original_map(object->map());
-  GeneralizeFieldRepresentation(
-      object, 0, Representation::None(),
-      HeapType::None(object->GetIsolate()),
-      ALLOW_AS_CONSTANT);
-  object->map()->set_migration_target(true);
+  Handle<Map> map = Map::Update(original_map);
+  map->set_migration_target(true);
+  MigrateToMap(object, map);
   if (FLAG_trace_migration) {
-    object->PrintInstanceMigration(stdout, *original_map, object->map());
+    object->PrintInstanceMigration(stdout, *original_map, *map);
   }
 }
 
@@ -3886,7 +3731,7 @@
   DisallowDeoptimization no_deoptimization(isolate);
   Handle<Map> original_map(object->map(), isolate);
   Handle<Map> new_map;
-  if (!Map::CurrentMapForDeprecatedInternal(original_map).ToHandle(&new_map)) {
+  if (!Map::TryUpdate(original_map).ToHandle(&new_map)) {
     return false;
   }
   JSObject::MigrateToMap(object, new_map);
@@ -3897,46 +3742,6 @@
 }
 
 
-MaybeHandle<Object> JSObject::SetPropertyUsingTransition(
-    Handle<JSObject> object,
-    LookupResult* lookup,
-    Handle<Name> name,
-    Handle<Object> value,
-    PropertyAttributes attributes) {
-  Handle<Map> transition_map(lookup->GetTransitionTarget());
-  int descriptor = transition_map->LastAdded();
-
-  Handle<DescriptorArray> descriptors(transition_map->instance_descriptors());
-  PropertyDetails details = descriptors->GetDetails(descriptor);
-
-  if (details.type() == CALLBACKS || attributes != details.attributes()) {
-    // AddProperty will either normalize the object, or create a new fast copy
-    // of the map. If we get a fast copy of the map, all field representations
-    // will be tagged since the transition is omitted.
-    return JSObject::AddProperty(
-        object, name, value, attributes, SLOPPY,
-        JSReceiver::CERTAINLY_NOT_STORE_FROM_KEYED,
-        JSReceiver::OMIT_EXTENSIBILITY_CHECK,
-        JSObject::FORCE_TAGGED, FORCE_FIELD, OMIT_TRANSITION);
-  }
-
-  // Keep the target CONSTANT if the same value is stored.
-  // TODO(verwaest): Also support keeping the placeholder
-  // (value->IsUninitialized) as constant.
-  if (!lookup->CanHoldValue(value)) {
-    Representation field_representation = value->OptimalRepresentation();
-    Handle<HeapType> field_type = value->OptimalType(
-        lookup->isolate(), field_representation);
-    transition_map = Map::GeneralizeRepresentation(
-        transition_map, descriptor,
-        field_representation, field_type, FORCE_FIELD);
-  }
-
-  JSObject::MigrateToNewProperty(object, transition_map, value);
-  return value;
-}
-
-
 void JSObject::MigrateToNewProperty(Handle<JSObject> object,
                                     Handle<Map> map,
                                     Handle<Object> value) {
@@ -3952,13 +3757,14 @@
   DescriptorArray* desc = map()->instance_descriptors();
   PropertyDetails details = desc->GetDetails(descriptor);
 
-  ASSERT(details.type() == FIELD);
+  DCHECK(details.type() == FIELD);
 
   FieldIndex index = FieldIndex::ForDescriptor(map(), descriptor);
   if (details.representation().IsDouble()) {
     // Nothing more to be done.
     if (value->IsUninitialized()) return;
     HeapNumber* box = HeapNumber::cast(RawFastPropertyAt(index));
+    DCHECK(box->IsMutableHeapNumber());
     box->set_value(value->Number());
   } else {
     FastPropertyAtPut(index, value);
@@ -3966,367 +3772,157 @@
 }
 
 
-static void SetPropertyToField(LookupResult* lookup,
-                               Handle<Object> value) {
-  if (lookup->type() == CONSTANT || !lookup->CanHoldValue(value)) {
-    Representation field_representation = value->OptimalRepresentation();
-    Handle<HeapType> field_type = value->OptimalType(
-        lookup->isolate(), field_representation);
-    JSObject::GeneralizeFieldRepresentation(handle(lookup->holder()),
-                                            lookup->GetDescriptorIndex(),
-                                            field_representation, field_type,
-                                            FORCE_FIELD);
-  }
-  lookup->holder()->WriteToField(lookup->GetDescriptorIndex(), *value);
+void JSObject::AddProperty(Handle<JSObject> object, Handle<Name> name,
+                           Handle<Object> value,
+                           PropertyAttributes attributes) {
+  LookupIterator it(object, name, LookupIterator::OWN_SKIP_INTERCEPTOR);
+  CHECK_NE(LookupIterator::ACCESS_CHECK, it.state());
+#ifdef DEBUG
+  uint32_t index;
+  DCHECK(!object->IsJSProxy());
+  DCHECK(!name->AsArrayIndex(&index));
+  Maybe<PropertyAttributes> maybe = GetPropertyAttributes(&it);
+  DCHECK(maybe.has_value);
+  DCHECK(!it.IsFound());
+  DCHECK(object->map()->is_extensible() ||
+         name.is_identical_to(it.isolate()->factory()->hidden_string()));
+#endif
+  AddDataProperty(&it, value, attributes, STRICT,
+                  CERTAINLY_NOT_STORE_FROM_KEYED).Check();
 }
 
 
-static void ConvertAndSetOwnProperty(LookupResult* lookup,
-                                     Handle<Name> name,
-                                     Handle<Object> value,
-                                     PropertyAttributes attributes) {
-  Handle<JSObject> object(lookup->holder());
-  if (object->TooManyFastProperties()) {
-    JSObject::NormalizeProperties(object, CLEAR_INOBJECT_PROPERTIES, 0);
-  }
-
-  if (!object->HasFastProperties()) {
-    ReplaceSlowProperty(object, name, value, attributes);
-    return;
-  }
-
-  int descriptor_index = lookup->GetDescriptorIndex();
-  if (lookup->GetAttributes() == attributes) {
-    JSObject::GeneralizeFieldRepresentation(
-        object, descriptor_index, Representation::Tagged(),
-        HeapType::Any(lookup->isolate()), FORCE_FIELD);
-  } else {
-    Handle<Map> old_map(object->map());
-    Handle<Map> new_map = Map::CopyGeneralizeAllRepresentations(old_map,
-        descriptor_index, FORCE_FIELD, attributes, "attributes mismatch");
-    JSObject::MigrateToMap(object, new_map);
-  }
-
-  object->WriteToField(descriptor_index, *value);
-}
-
-
-static void SetPropertyToFieldWithAttributes(LookupResult* lookup,
-                                             Handle<Name> name,
-                                             Handle<Object> value,
-                                             PropertyAttributes attributes) {
-  if (lookup->GetAttributes() == attributes) {
-    if (value->IsUninitialized()) return;
-    SetPropertyToField(lookup, value);
-  } else {
-    ConvertAndSetOwnProperty(lookup, name, value, attributes);
-  }
-}
-
-
-MaybeHandle<Object> JSObject::SetPropertyForResult(
-    Handle<JSObject> object,
-    LookupResult* lookup,
-    Handle<Name> name,
-    Handle<Object> value,
-    PropertyAttributes attributes,
-    StrictMode strict_mode,
-    StoreFromKeyed store_mode) {
-  Isolate* isolate = object->GetIsolate();
-
-  // Make sure that the top context does not change when doing callbacks or
-  // interceptor calls.
-  AssertNoContextChange ncc(isolate);
-
-  // Optimization for 2-byte strings often used as keys in a decompression
-  // dictionary.  We internalize these short keys to avoid constantly
-  // reallocating them.
-  if (name->IsString() && !name->IsInternalizedString() &&
-      Handle<String>::cast(name)->length() <= 2) {
-    name = isolate->factory()->InternalizeString(Handle<String>::cast(name));
-  }
-
-  // Check access rights if needed.
-  if (object->IsAccessCheckNeeded()) {
-    if (!isolate->MayNamedAccess(object, name, v8::ACCESS_SET)) {
-      return SetPropertyWithFailedAccessCheck(object, lookup, name, value,
-                                              true, strict_mode);
-    }
-  }
-
-  if (object->IsJSGlobalProxy()) {
-    Handle<Object> proto(object->GetPrototype(), isolate);
-    if (proto->IsNull()) return value;
-    ASSERT(proto->IsJSGlobalObject());
-    return SetPropertyForResult(Handle<JSObject>::cast(proto),
-        lookup, name, value, attributes, strict_mode, store_mode);
-  }
-
-  ASSERT(!lookup->IsFound() || lookup->holder() == *object ||
-         lookup->holder()->map()->is_hidden_prototype());
-
-  if (!lookup->IsProperty() && !object->IsJSContextExtensionObject()) {
-    bool done = false;
-    Handle<Object> result_object;
-    ASSIGN_RETURN_ON_EXCEPTION(
-        isolate, result_object,
-        SetPropertyViaPrototypes(
-            object, name, value, attributes, strict_mode, &done),
-        Object);
-    if (done) return result_object;
-  }
-
-  if (!lookup->IsFound()) {
-    // Neither properties nor transitions found.
-    return AddProperty(
-        object, name, value, attributes, strict_mode, store_mode);
-  }
-
-  if (lookup->IsProperty() && lookup->IsReadOnly()) {
-    if (strict_mode == STRICT) {
-      Handle<Object> args[] = { name, object };
-      Handle<Object> error = isolate->factory()->NewTypeError(
-          "strict_read_only_property", HandleVector(args, ARRAY_SIZE(args)));
-      return isolate->Throw<Object>(error);
-    } else {
-      return value;
-    }
-  }
-
-  Handle<Object> old_value = isolate->factory()->the_hole_value();
-  bool is_observed = object->map()->is_observed() &&
-                     *name != isolate->heap()->hidden_string();
-  if (is_observed && lookup->IsDataProperty()) {
-    old_value = Object::GetPropertyOrElement(object, name).ToHandleChecked();
-  }
-
-  // This is a real property that is not read-only, or it is a
-  // transition or null descriptor and there are no setters in the prototypes.
-  MaybeHandle<Object> maybe_result = value;
-  if (lookup->IsTransition()) {
-    maybe_result = SetPropertyUsingTransition(handle(lookup->holder()), lookup,
-                                              name, value, attributes);
-  } else {
-    switch (lookup->type()) {
-      case NORMAL:
-        SetNormalizedProperty(handle(lookup->holder()), lookup, value);
-        break;
-      case FIELD:
-        SetPropertyToField(lookup, value);
-        break;
-      case CONSTANT:
-        // Only replace the constant if necessary.
-        if (*value == lookup->GetConstant()) return value;
-        SetPropertyToField(lookup, value);
-        break;
-      case CALLBACKS: {
-        Handle<Object> callback_object(lookup->GetCallbackObject(), isolate);
-        return SetPropertyWithCallback(object, name, value,
-                                       handle(lookup->holder()),
-                                       callback_object, strict_mode);
-      }
-      case INTERCEPTOR:
-        maybe_result = SetPropertyWithInterceptor(
-            handle(lookup->holder()), name, value, attributes, strict_mode);
-        break;
-      case HANDLER:
-      case NONEXISTENT:
-        UNREACHABLE();
-    }
-  }
-
-  Handle<Object> result;
-  ASSIGN_RETURN_ON_EXCEPTION(isolate, result, maybe_result, Object);
-
-  if (is_observed) {
-    if (lookup->IsTransition()) {
-      EnqueueChangeRecord(object, "add", name, old_value);
-    } else {
-      LookupResult new_lookup(isolate);
-      object->LookupOwn(name, &new_lookup, true);
-      if (new_lookup.IsDataProperty()) {
-        Handle<Object> new_value =
-            Object::GetPropertyOrElement(object, name).ToHandleChecked();
-        if (!new_value->SameValue(*old_value)) {
-          EnqueueChangeRecord(object, "update", name, old_value);
-        }
-      }
-    }
-  }
-
-  return result;
-}
-
-
-// Set a real own property, even if it is READ_ONLY.  If the property is not
-// present, add it with attributes NONE.  This code is an exact clone of
-// SetProperty, with the check for IsReadOnly and the check for a
-// callback setter removed.  The two lines looking up the LookupResult
-// result are also added.  If one of the functions is changed, the other
-// should be.
+// Reconfigures a property to a data property with attributes, even if it is not
+// reconfigurable.
 MaybeHandle<Object> JSObject::SetOwnPropertyIgnoreAttributes(
     Handle<JSObject> object,
     Handle<Name> name,
     Handle<Object> value,
     PropertyAttributes attributes,
-    ValueType value_type,
-    StoreMode mode,
-    ExtensibilityCheck extensibility_check,
-    StoreFromKeyed store_from_keyed,
     ExecutableAccessorInfoHandling handling) {
-  Isolate* isolate = object->GetIsolate();
-
-  // Make sure that the top context does not change when doing callbacks or
-  // interceptor calls.
-  AssertNoContextChange ncc(isolate);
-
-  LookupResult lookup(isolate);
-  object->LookupOwn(name, &lookup, true);
-  if (!lookup.IsFound()) {
-    object->map()->LookupTransition(*object, *name, &lookup);
-  }
-
-  // Check access rights if needed.
-  if (object->IsAccessCheckNeeded()) {
-    if (!isolate->MayNamedAccess(object, name, v8::ACCESS_SET)) {
-      return SetPropertyWithFailedAccessCheck(object, &lookup, name, value,
-                                              false, SLOPPY);
-    }
-  }
-
-  if (object->IsJSGlobalProxy()) {
-    Handle<Object> proto(object->GetPrototype(), isolate);
-    if (proto->IsNull()) return value;
-    ASSERT(proto->IsJSGlobalObject());
-    return SetOwnPropertyIgnoreAttributes(Handle<JSObject>::cast(proto),
-        name, value, attributes, value_type, mode, extensibility_check);
-  }
-
-  if (lookup.IsInterceptor() ||
-      (lookup.IsDescriptorOrDictionary() && lookup.type() == CALLBACKS)) {
-    object->LookupOwnRealNamedProperty(name, &lookup);
-  }
-
-  // Check for accessor in prototype chain removed here in clone.
-  if (!lookup.IsFound()) {
-    object->map()->LookupTransition(*object, *name, &lookup);
-    TransitionFlag flag = lookup.IsFound()
-        ? OMIT_TRANSITION : INSERT_TRANSITION;
-    // Neither properties nor transitions found.
-    return AddProperty(object, name, value, attributes, SLOPPY,
-        store_from_keyed, extensibility_check, value_type, mode, flag);
-  }
-
-  Handle<Object> old_value = isolate->factory()->the_hole_value();
-  PropertyAttributes old_attributes = ABSENT;
+  DCHECK(!value->IsTheHole());
+  LookupIterator it(object, name, LookupIterator::OWN_SKIP_INTERCEPTOR);
   bool is_observed = object->map()->is_observed() &&
-                     *name != isolate->heap()->hidden_string();
-  if (is_observed && lookup.IsProperty()) {
-    if (lookup.IsDataProperty()) {
-      old_value = Object::GetPropertyOrElement(object, name).ToHandleChecked();
-    }
-    old_attributes = lookup.GetAttributes();
-  }
+                     *name != it.isolate()->heap()->hidden_string();
+  for (; it.IsFound(); it.Next()) {
+    switch (it.state()) {
+      case LookupIterator::INTERCEPTOR:
+      case LookupIterator::JSPROXY:
+      case LookupIterator::NOT_FOUND:
+      case LookupIterator::TRANSITION:
+        UNREACHABLE();
 
-  bool executed_set_prototype = false;
-
-  // Check of IsReadOnly removed from here in clone.
-  if (lookup.IsTransition()) {
-    Handle<Object> result;
-    ASSIGN_RETURN_ON_EXCEPTION(
-        isolate, result,
-        SetPropertyUsingTransition(
-            handle(lookup.holder()), &lookup, name, value, attributes),
-        Object);
-  } else {
-    switch (lookup.type()) {
-      case NORMAL:
-        ReplaceSlowProperty(object, name, value, attributes);
-        break;
-      case FIELD:
-        SetPropertyToFieldWithAttributes(&lookup, name, value, attributes);
-        break;
-      case CONSTANT:
-        // Only replace the constant if necessary.
-        if (lookup.GetAttributes() != attributes ||
-            *value != lookup.GetConstant()) {
-          SetPropertyToFieldWithAttributes(&lookup, name, value, attributes);
+      case LookupIterator::ACCESS_CHECK:
+        if (!it.isolate()->MayNamedAccess(object, name, v8::ACCESS_SET)) {
+          return SetPropertyWithFailedAccessCheck(&it, value, SLOPPY);
         }
         break;
-      case CALLBACKS:
-      {
-        Handle<Object> callback(lookup.GetCallbackObject(), isolate);
-        if (callback->IsExecutableAccessorInfo() &&
-            handling == DONT_FORCE_FIELD) {
+
+      case LookupIterator::ACCESSOR: {
+        PropertyDetails details = it.property_details();
+        Handle<Object> old_value = it.isolate()->factory()->the_hole_value();
+        // Ensure the context isn't changed after calling into accessors.
+        AssertNoContextChange ncc(it.isolate());
+
+        Handle<Object> accessors = it.GetAccessors();
+
+        if (is_observed && accessors->IsAccessorInfo()) {
+          ASSIGN_RETURN_ON_EXCEPTION(
+              it.isolate(), old_value,
+              GetPropertyWithAccessor(it.GetReceiver(), it.name(),
+                                      it.GetHolder<JSObject>(), accessors),
+              Object);
+        }
+
+        // Special handling for ExecutableAccessorInfo, which behaves like a
+        // data property.
+        if (handling == DONT_FORCE_FIELD &&
+            accessors->IsExecutableAccessorInfo()) {
           Handle<Object> result;
           ASSIGN_RETURN_ON_EXCEPTION(
-              isolate, result,
-              JSObject::SetPropertyWithCallback(object,
-                                                name,
-                                                value,
-                                                handle(lookup.holder()),
-                                                callback,
-                                                STRICT),
+              it.isolate(), result,
+              JSObject::SetPropertyWithAccessor(it.GetReceiver(), it.name(),
+                                                value, it.GetHolder<JSObject>(),
+                                                accessors, STRICT),
               Object);
+          DCHECK(result->SameValue(*value));
 
-          if (attributes != lookup.GetAttributes()) {
-            Handle<ExecutableAccessorInfo> new_data =
-                Accessors::CloneAccessor(
-                    isolate, Handle<ExecutableAccessorInfo>::cast(callback));
-            new_data->set_property_attributes(attributes);
-            if (attributes & READ_ONLY) {
-              // This way we don't have to introduce a lookup to the setter,
-              // simply make it unavailable to reflect the attributes.
-              new_data->clear_setter();
+          if (details.attributes() == attributes) {
+            // Regular property update if the attributes match.
+            if (is_observed && !old_value->SameValue(*value)) {
+              // If we are setting the prototype of a function and are
+              // observed, don't send change records because the prototype
+              // handles that itself.
+              if (!object->IsJSFunction() ||
+                  !Name::Equals(it.isolate()->factory()->prototype_string(),
+                                name) ||
+                  !Handle<JSFunction>::cast(object)->should_have_prototype()) {
+                EnqueueChangeRecord(object, "update", name, old_value);
+              }
             }
-
-            SetPropertyCallback(object, name, new_data, attributes);
+            return value;
           }
+
+          // Reconfigure the accessor if attributes mismatch.
+          Handle<ExecutableAccessorInfo> new_data = Accessors::CloneAccessor(
+              it.isolate(), Handle<ExecutableAccessorInfo>::cast(accessors));
+          new_data->set_property_attributes(attributes);
+          // By clearing the setter we don't have to introduce a lookup to
+          // the setter, simply make it unavailable to reflect the
+          // attributes.
+          if (attributes & READ_ONLY) new_data->clear_setter();
+          SetPropertyCallback(object, name, new_data, attributes);
           if (is_observed) {
-            // If we are setting the prototype of a function and are observed,
-            // don't send change records because the prototype handles that
-            // itself.
-            executed_set_prototype = object->IsJSFunction() &&
-                String::Equals(isolate->factory()->prototype_string(),
-                               Handle<String>::cast(name)) &&
-                Handle<JSFunction>::cast(object)->should_have_prototype();
+            if (old_value->SameValue(*value)) {
+              old_value = it.isolate()->factory()->the_hole_value();
+            }
+            EnqueueChangeRecord(object, "reconfigure", name, old_value);
           }
-        } else {
-          ConvertAndSetOwnProperty(&lookup, name, value, attributes);
+          return value;
         }
-        break;
-      }
-      case NONEXISTENT:
-      case HANDLER:
-      case INTERCEPTOR:
-        UNREACHABLE();
-    }
-  }
 
-  if (is_observed && !executed_set_prototype) {
-    if (lookup.IsTransition()) {
-      EnqueueChangeRecord(object, "add", name, old_value);
-    } else if (old_value->IsTheHole()) {
-      EnqueueChangeRecord(object, "reconfigure", name, old_value);
-    } else {
-      LookupResult new_lookup(isolate);
-      object->LookupOwn(name, &new_lookup, true);
-      bool value_changed = false;
-      if (new_lookup.IsDataProperty()) {
-        Handle<Object> new_value =
-            Object::GetPropertyOrElement(object, name).ToHandleChecked();
-        value_changed = !old_value->SameValue(*new_value);
+        it.ReconfigureDataProperty(value, attributes);
+        it.PrepareForDataProperty(value);
+        it.WriteDataValue(value);
+
+        if (is_observed) {
+          if (old_value->SameValue(*value)) {
+            old_value = it.isolate()->factory()->the_hole_value();
+          }
+          EnqueueChangeRecord(object, "reconfigure", name, old_value);
+        }
+
+        return value;
       }
-      if (new_lookup.GetAttributes() != old_attributes) {
-        if (!value_changed) old_value = isolate->factory()->the_hole_value();
-        EnqueueChangeRecord(object, "reconfigure", name, old_value);
-      } else if (value_changed) {
-        EnqueueChangeRecord(object, "update", name, old_value);
+
+      case LookupIterator::DATA: {
+        PropertyDetails details = it.property_details();
+        Handle<Object> old_value = it.isolate()->factory()->the_hole_value();
+        // Regular property update if the attributes match.
+        if (details.attributes() == attributes) {
+          return SetDataProperty(&it, value);
+        }
+        // Reconfigure the data property if the attributes mismatch.
+        if (is_observed) old_value = it.GetDataValue();
+
+        it.ReconfigureDataProperty(value, attributes);
+        it.PrepareForDataProperty(value);
+        it.WriteDataValue(value);
+
+        if (is_observed) {
+          if (old_value->SameValue(*value)) {
+            old_value = it.isolate()->factory()->the_hole_value();
+          }
+          EnqueueChangeRecord(object, "reconfigure", name, old_value);
+        }
+
+        return value;
       }
     }
   }
 
-  return value;
+  return AddDataProperty(&it, value, attributes, STRICT,
+                         CERTAINLY_NOT_STORE_FROM_KEYED);
 }
 
 
@@ -4335,7 +3931,7 @@
     Handle<Object> receiver,
     Handle<Name> name) {
   // TODO(rossberg): Support symbols in the API.
-  if (name->IsSymbol()) return Maybe<PropertyAttributes>(ABSENT);
+  if (name->IsSymbol()) return maybe(ABSENT);
 
   Isolate* isolate = holder->GetIsolate();
   HandleScope scope(isolate);
@@ -4355,9 +3951,8 @@
     v8::Handle<v8::Integer> result =
         args.Call(query, v8::Utils::ToLocal(Handle<String>::cast(name)));
     if (!result.IsEmpty()) {
-      ASSERT(result->IsInt32());
-      return Maybe<PropertyAttributes>(
-          static_cast<PropertyAttributes>(result->Int32Value()));
+      DCHECK(result->IsInt32());
+      return maybe(static_cast<PropertyAttributes>(result->Int32Value()));
     }
   } else if (!interceptor->getter()->IsUndefined()) {
     v8::NamedPropertyGetterCallback getter =
@@ -4366,55 +3961,58 @@
         ApiNamedPropertyAccess("interceptor-named-get-has", *holder, *name));
     v8::Handle<v8::Value> result =
         args.Call(getter, v8::Utils::ToLocal(Handle<String>::cast(name)));
-    if (!result.IsEmpty()) return Maybe<PropertyAttributes>(DONT_ENUM);
+    if (!result.IsEmpty()) return maybe(DONT_ENUM);
   }
-  return Maybe<PropertyAttributes>();
+
+  RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Maybe<PropertyAttributes>());
+  return maybe(ABSENT);
 }
 
 
-PropertyAttributes JSReceiver::GetOwnPropertyAttributes(
+Maybe<PropertyAttributes> JSReceiver::GetOwnPropertyAttributes(
     Handle<JSReceiver> object, Handle<Name> name) {
   // Check whether the name is an array index.
   uint32_t index = 0;
   if (object->IsJSObject() && name->AsArrayIndex(&index)) {
     return GetOwnElementAttribute(object, index);
   }
-  LookupIterator it(object, name, LookupIterator::CHECK_OWN);
+  LookupIterator it(object, name, LookupIterator::HIDDEN);
   return GetPropertyAttributes(&it);
 }
 
 
-PropertyAttributes JSReceiver::GetPropertyAttributes(LookupIterator* it) {
+Maybe<PropertyAttributes> JSReceiver::GetPropertyAttributes(
+    LookupIterator* it) {
   for (; it->IsFound(); it->Next()) {
     switch (it->state()) {
       case LookupIterator::NOT_FOUND:
+      case LookupIterator::TRANSITION:
         UNREACHABLE();
       case LookupIterator::JSPROXY:
         return JSProxy::GetPropertyAttributesWithHandler(
-            it->GetJSProxy(), it->GetReceiver(), it->name());
+            it->GetHolder<JSProxy>(), it->GetReceiver(), it->name());
       case LookupIterator::INTERCEPTOR: {
         Maybe<PropertyAttributes> result =
             JSObject::GetPropertyAttributesWithInterceptor(
-                it->GetHolder(), it->GetReceiver(), it->name());
-        if (result.has_value) return result.value;
+                it->GetHolder<JSObject>(), it->GetReceiver(), it->name());
+        if (!result.has_value) return result;
+        if (result.value != ABSENT) return result;
         break;
       }
       case LookupIterator::ACCESS_CHECK:
         if (it->HasAccess(v8::ACCESS_HAS)) break;
         return JSObject::GetPropertyAttributesWithFailedAccessCheck(it);
-      case LookupIterator::PROPERTY:
-        if (it->HasProperty()) return it->property_details().attributes();
-        break;
+      case LookupIterator::ACCESSOR:
+      case LookupIterator::DATA:
+        return maybe(it->property_details().attributes());
     }
   }
-  return ABSENT;
+  return maybe(ABSENT);
 }
 
 
-PropertyAttributes JSObject::GetElementAttributeWithReceiver(
-    Handle<JSObject> object,
-    Handle<JSReceiver> receiver,
-    uint32_t index,
+Maybe<PropertyAttributes> JSObject::GetElementAttributeWithReceiver(
+    Handle<JSObject> object, Handle<JSReceiver> receiver, uint32_t index,
     bool check_prototype) {
   Isolate* isolate = object->GetIsolate();
 
@@ -4422,17 +4020,18 @@
   if (object->IsAccessCheckNeeded()) {
     if (!isolate->MayIndexedAccess(object, index, v8::ACCESS_HAS)) {
       isolate->ReportFailedAccessCheck(object, v8::ACCESS_HAS);
-      // TODO(yangguo): Issue 3269, check for scheduled exception missing?
-      return ABSENT;
+      RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Maybe<PropertyAttributes>());
+      return maybe(ABSENT);
     }
   }
 
   if (object->IsJSGlobalProxy()) {
-    Handle<Object> proto(object->GetPrototype(), isolate);
-    if (proto->IsNull()) return ABSENT;
-    ASSERT(proto->IsJSGlobalObject());
+    PrototypeIterator iter(isolate, object);
+    if (iter.IsAtEnd()) return maybe(ABSENT);
+    DCHECK(PrototypeIterator::GetCurrent(iter)->IsJSGlobalObject());
     return JSObject::GetElementAttributeWithReceiver(
-        Handle<JSObject>::cast(proto), receiver, index, check_prototype);
+        Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)), receiver,
+        index, check_prototype);
   }
 
   // Check for lookup interceptor except when bootstrapping.
@@ -4446,10 +4045,8 @@
 }
 
 
-PropertyAttributes JSObject::GetElementAttributeWithInterceptor(
-    Handle<JSObject> object,
-    Handle<JSReceiver> receiver,
-    uint32_t index,
+Maybe<PropertyAttributes> JSObject::GetElementAttributeWithInterceptor(
+    Handle<JSObject> object, Handle<JSReceiver> receiver, uint32_t index,
     bool check_prototype) {
   Isolate* isolate = object->GetIsolate();
   HandleScope scope(isolate);
@@ -4468,7 +4065,7 @@
         ApiIndexedPropertyAccess("interceptor-indexed-has", *object, index));
     v8::Handle<v8::Integer> result = args.Call(query, index);
     if (!result.IsEmpty())
-      return static_cast<PropertyAttributes>(result->Int32Value());
+      return maybe(static_cast<PropertyAttributes>(result->Int32Value()));
   } else if (!interceptor->getter()->IsUndefined()) {
     v8::IndexedPropertyGetterCallback getter =
         v8::ToCData<v8::IndexedPropertyGetterCallback>(interceptor->getter());
@@ -4476,7 +4073,7 @@
         ApiIndexedPropertyAccess(
             "interceptor-indexed-get-has", *object, index));
     v8::Handle<v8::Value> result = args.Call(getter, index);
-    if (!result.IsEmpty()) return NONE;
+    if (!result.IsEmpty()) return maybe(NONE);
   }
 
   return GetElementAttributeWithoutInterceptor(
@@ -4484,31 +4081,31 @@
 }
 
 
-PropertyAttributes JSObject::GetElementAttributeWithoutInterceptor(
-    Handle<JSObject> object,
-    Handle<JSReceiver> receiver,
-    uint32_t index,
+Maybe<PropertyAttributes> JSObject::GetElementAttributeWithoutInterceptor(
+    Handle<JSObject> object, Handle<JSReceiver> receiver, uint32_t index,
     bool check_prototype) {
   PropertyAttributes attr = object->GetElementsAccessor()->GetAttributes(
       receiver, object, index);
-  if (attr != ABSENT) return attr;
+  if (attr != ABSENT) return maybe(attr);
 
   // Handle [] on String objects.
   if (object->IsStringObjectWithCharacterAt(index)) {
-    return static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
+    return maybe(static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE));
   }
 
-  if (!check_prototype) return ABSENT;
+  if (!check_prototype) return maybe(ABSENT);
 
-  Handle<Object> proto(object->GetPrototype(), object->GetIsolate());
-  if (proto->IsJSProxy()) {
+  PrototypeIterator iter(object->GetIsolate(), object);
+  if (PrototypeIterator::GetCurrent(iter)->IsJSProxy()) {
     // We need to follow the spec and simulate a call to [[GetOwnProperty]].
     return JSProxy::GetElementAttributeWithHandler(
-        Handle<JSProxy>::cast(proto), receiver, index);
+        Handle<JSProxy>::cast(PrototypeIterator::GetCurrent(iter)), receiver,
+        index);
   }
-  if (proto->IsNull()) return ABSENT;
+  if (iter.IsAtEnd()) return maybe(ABSENT);
   return GetElementAttributeWithReceiver(
-      Handle<JSObject>::cast(proto), receiver, index, true);
+      Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)), receiver,
+      index, true);
 }
 
 
@@ -4534,7 +4131,7 @@
 void NormalizedMapCache::Set(Handle<Map> fast_map,
                              Handle<Map> normalized_map) {
   DisallowHeapAllocation no_gc;
-  ASSERT(normalized_map->is_dictionary_map());
+  DCHECK(normalized_map->is_dictionary_map());
   FixedArray::set(GetIndex(fast_map), *normalized_map);
 }
 
@@ -4560,15 +4157,24 @@
                                    int expected_additional_properties) {
   if (!object->HasFastProperties()) return;
 
+  Handle<Map> map(object->map());
+  Handle<Map> new_map = Map::Normalize(map, mode);
+
+  MigrateFastToSlow(object, new_map, expected_additional_properties);
+}
+
+
+void JSObject::MigrateFastToSlow(Handle<JSObject> object,
+                                 Handle<Map> new_map,
+                                 int expected_additional_properties) {
   // The global object is always normalized.
-  ASSERT(!object->IsGlobalObject());
+  DCHECK(!object->IsGlobalObject());
   // JSGlobalProxy must never be normalized
-  ASSERT(!object->IsJSGlobalProxy());
+  DCHECK(!object->IsJSGlobalProxy());
 
   Isolate* isolate = object->GetIsolate();
   HandleScope scope(isolate);
   Handle<Map> map(object->map());
-  Handle<Map> new_map = Map::Normalize(map, mode);
 
   // Allocate new content.
   int real_size = map->NumberOfOwnDescriptors();
@@ -4598,6 +4204,11 @@
         FieldIndex index = FieldIndex::ForDescriptor(*map, i);
         Handle<Object> value(
             object->RawFastPropertyAt(index), isolate);
+        if (details.representation().IsDouble()) {
+          DCHECK(value->IsMutableHeapNumber());
+          Handle<HeapNumber> old = Handle<HeapNumber>::cast(value);
+          value = isolate->factory()->NewHeapNumber(old->value());
+        }
         PropertyDetails d =
             PropertyDetails(details.attributes(), NORMAL, i + 1);
         dictionary = NameDictionary::Add(dictionary, key, value, d);
@@ -4611,11 +4222,7 @@
         dictionary = NameDictionary::Add(dictionary, key, value, d);
         break;
       }
-      case INTERCEPTOR:
-        break;
-      case HANDLER:
       case NORMAL:
-      case NONEXISTENT:
         UNREACHABLE();
         break;
     }
@@ -4630,13 +4237,15 @@
   // Resize the object in the heap if necessary.
   int new_instance_size = new_map->instance_size();
   int instance_size_delta = map->instance_size() - new_instance_size;
-  ASSERT(instance_size_delta >= 0);
-  Heap* heap = isolate->heap();
-  heap->CreateFillerObjectAt(object->address() + new_instance_size,
-                             instance_size_delta);
-  heap->AdjustLiveBytes(object->address(),
-                        -instance_size_delta,
-                        Heap::FROM_MUTATOR);
+  DCHECK(instance_size_delta >= 0);
+
+  if (instance_size_delta > 0) {
+    Heap* heap = isolate->heap();
+    heap->CreateFillerObjectAt(object->address() + new_instance_size,
+                               instance_size_delta);
+    heap->AdjustLiveBytes(object->address(), -instance_size_delta,
+                          Heap::FROM_MUTATOR);
+  }
 
   // We are storing the new map using release store after creating a filler for
   // the left-over space to avoid races with the sweeper thread.
@@ -4648,17 +4257,18 @@
 
 #ifdef DEBUG
   if (FLAG_trace_normalization) {
-    PrintF("Object properties have been normalized:\n");
-    object->Print();
+    OFStream os(stdout);
+    os << "Object properties have been normalized:\n";
+    object->Print(os);
   }
 #endif
 }
 
 
-void JSObject::TransformToFastProperties(Handle<JSObject> object,
-                                         int unused_property_fields) {
+void JSObject::MigrateSlowToFast(Handle<JSObject> object,
+                                 int unused_property_fields) {
   if (object->HasFastProperties()) return;
-  ASSERT(!object->IsGlobalObject());
+  DCHECK(!object->IsGlobalObject());
   Isolate* isolate = object->GetIsolate();
   Factory* factory = isolate->factory();
   Handle<NameDictionary> dictionary(object->property_dictionary());
@@ -4682,7 +4292,7 @@
     if (dictionary->IsKey(k)) {
       Object* value = dictionary->ValueAt(i);
       PropertyType type = dictionary->DetailsAt(i).type();
-      ASSERT(type != FIELD);
+      DCHECK(type != FIELD);
       instance_descriptor_length++;
       if (type == NORMAL && !value->IsJSFunction()) {
         number_of_fields += 1;
@@ -4698,13 +4308,13 @@
 
   if (instance_descriptor_length == 0) {
     DisallowHeapAllocation no_gc;
-    ASSERT_LE(unused_property_fields, inobject_props);
+    DCHECK_LE(unused_property_fields, inobject_props);
     // Transform the object.
     new_map->set_unused_property_fields(inobject_props);
-    object->set_map(*new_map);
+    object->synchronized_set_map(*new_map);
     object->set_properties(isolate->heap()->empty_fixed_array());
     // Check that it really works.
-    ASSERT(object->HasFastProperties());
+    DCHECK(object->HasFastProperties());
     return;
   }
 
@@ -4773,7 +4383,7 @@
       }
     }
   }
-  ASSERT(current_offset == number_of_fields);
+  DCHECK(current_offset == number_of_fields);
 
   descriptors->Sort();
 
@@ -4782,20 +4392,26 @@
   new_map->set_unused_property_fields(unused_property_fields);
 
   // Transform the object.
-  object->set_map(*new_map);
+  object->synchronized_set_map(*new_map);
 
   object->set_properties(*fields);
-  ASSERT(object->IsJSObject());
+  DCHECK(object->IsJSObject());
 
   // Check that it really works.
-  ASSERT(object->HasFastProperties());
+  DCHECK(object->HasFastProperties());
 }
 
 
 void JSObject::ResetElements(Handle<JSObject> object) {
-  Heap* heap = object->GetIsolate()->heap();
-  CHECK(object->map() != heap->sloppy_arguments_elements_map());
-  object->set_elements(object->map()->GetInitialElements());
+  Isolate* isolate = object->GetIsolate();
+  CHECK(object->map() != isolate->heap()->sloppy_arguments_elements_map());
+  if (object->map()->has_dictionary_elements()) {
+    Handle<SeededNumberDictionary> new_elements =
+        SeededNumberDictionary::New(isolate, 0);
+    object->set_elements(*new_elements);
+  } else {
+    object->set_elements(object->map()->GetInitialElements());
+  }
 }
 
 
@@ -4831,7 +4447,7 @@
 
 Handle<SeededNumberDictionary> JSObject::NormalizeElements(
     Handle<JSObject> object) {
-  ASSERT(!object->HasExternalArrayElements() &&
+  DCHECK(!object->HasExternalArrayElements() &&
          !object->HasFixedTypedArrayElements());
   Isolate* isolate = object->GetIsolate();
 
@@ -4845,7 +4461,7 @@
   }
   if (array->IsDictionary()) return Handle<SeededNumberDictionary>::cast(array);
 
-  ASSERT(object->HasFastSmiOrObjectElements() ||
+  DCHECK(object->HasFastSmiOrObjectElements() ||
          object->HasFastDoubleElements() ||
          object->HasFastArgumentsElements());
   // Compute the effective length and allocate a new backing store.
@@ -4877,12 +4493,13 @@
 
 #ifdef DEBUG
   if (FLAG_trace_normalization) {
-    PrintF("Object elements have been normalized:\n");
-    object->Print();
+    OFStream os(stdout);
+    os << "Object elements have been normalized:\n";
+    object->Print(os);
   }
 #endif
 
-  ASSERT(object->HasDictionaryElements() ||
+  DCHECK(object->HasDictionaryElements() ||
          object->HasDictionaryArgumentsElements());
   return dictionary;
 }
@@ -4904,7 +4521,7 @@
 
 
 void JSObject::SetIdentityHash(Handle<JSObject> object, Handle<Smi> hash) {
-  ASSERT(!object->IsJSGlobalProxy());
+  DCHECK(!object->IsJSGlobalProxy());
   Isolate* isolate = object->GetIsolate();
   SetHiddenProperty(object, isolate->factory()->identity_hash_string(), hash);
 }
@@ -4965,18 +4582,18 @@
 
 Object* JSObject::GetHiddenProperty(Handle<Name> key) {
   DisallowHeapAllocation no_gc;
-  ASSERT(key->IsUniqueName());
+  DCHECK(key->IsUniqueName());
   if (IsJSGlobalProxy()) {
     // JSGlobalProxies store their hash internally.
-    ASSERT(*key != GetHeap()->identity_hash_string());
+    DCHECK(*key != GetHeap()->identity_hash_string());
     // For a proxy, use the prototype as target object.
-    Object* proxy_parent = GetPrototype();
+    PrototypeIterator iter(GetIsolate(), this);
     // If the proxy is detached, return undefined.
-    if (proxy_parent->IsNull()) return GetHeap()->the_hole_value();
-    ASSERT(proxy_parent->IsJSGlobalObject());
-    return JSObject::cast(proxy_parent)->GetHiddenProperty(key);
+    if (iter.IsAtEnd()) return GetHeap()->the_hole_value();
+    DCHECK(iter.GetCurrent()->IsJSGlobalObject());
+    return JSObject::cast(iter.GetCurrent())->GetHiddenProperty(key);
   }
-  ASSERT(!IsJSGlobalProxy());
+  DCHECK(!IsJSGlobalProxy());
   Object* inline_value = GetHiddenPropertiesHashTable();
 
   if (inline_value->IsSmi()) {
@@ -5001,18 +4618,20 @@
                                            Handle<Object> value) {
   Isolate* isolate = object->GetIsolate();
 
-  ASSERT(key->IsUniqueName());
+  DCHECK(key->IsUniqueName());
   if (object->IsJSGlobalProxy()) {
     // JSGlobalProxies store their hash internally.
-    ASSERT(*key != *isolate->factory()->identity_hash_string());
+    DCHECK(*key != *isolate->factory()->identity_hash_string());
     // For a proxy, use the prototype as target object.
-    Handle<Object> proxy_parent(object->GetPrototype(), isolate);
+    PrototypeIterator iter(isolate, object);
     // If the proxy is detached, return undefined.
-    if (proxy_parent->IsNull()) return isolate->factory()->undefined_value();
-    ASSERT(proxy_parent->IsJSGlobalObject());
-    return SetHiddenProperty(Handle<JSObject>::cast(proxy_parent), key, value);
+    if (iter.IsAtEnd()) return isolate->factory()->undefined_value();
+    DCHECK(PrototypeIterator::GetCurrent(iter)->IsJSGlobalObject());
+    return SetHiddenProperty(
+        Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)), key,
+        value);
   }
-  ASSERT(!object->IsJSGlobalProxy());
+  DCHECK(!object->IsJSGlobalProxy());
 
   Handle<Object> inline_value(object->GetHiddenPropertiesHashTable(), isolate);
 
@@ -5042,19 +4661,20 @@
 
 void JSObject::DeleteHiddenProperty(Handle<JSObject> object, Handle<Name> key) {
   Isolate* isolate = object->GetIsolate();
-  ASSERT(key->IsUniqueName());
+  DCHECK(key->IsUniqueName());
 
   if (object->IsJSGlobalProxy()) {
-    Handle<Object> proto(object->GetPrototype(), isolate);
-    if (proto->IsNull()) return;
-    ASSERT(proto->IsJSGlobalObject());
-    return DeleteHiddenProperty(Handle<JSObject>::cast(proto), key);
+    PrototypeIterator iter(isolate, object);
+    if (iter.IsAtEnd()) return;
+    DCHECK(PrototypeIterator::GetCurrent(iter)->IsJSGlobalObject());
+    return DeleteHiddenProperty(
+        Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)), key);
   }
 
   Object* inline_value = object->GetHiddenPropertiesHashTable();
 
   // We never delete (inline-stored) identity hashes.
-  ASSERT(*key != *isolate->factory()->identity_hash_string());
+  DCHECK(*key != *isolate->factory()->identity_hash_string());
   if (inline_value->IsUndefined() || inline_value->IsSmi()) return;
 
   Handle<ObjectHashTable> hashtable(ObjectHashTable::cast(inline_value));
@@ -5065,13 +4685,16 @@
 
 bool JSObject::HasHiddenProperties(Handle<JSObject> object) {
   Handle<Name> hidden = object->GetIsolate()->factory()->hidden_string();
-  LookupIterator it(object, hidden, LookupIterator::CHECK_OWN_REAL);
-  return GetPropertyAttributes(&it) != ABSENT;
+  LookupIterator it(object, hidden, LookupIterator::OWN_SKIP_INTERCEPTOR);
+  Maybe<PropertyAttributes> maybe = GetPropertyAttributes(&it);
+  // Cannot get an exception since the hidden_string isn't accessible to JS.
+  DCHECK(maybe.has_value);
+  return maybe.value != ABSENT;
 }
 
 
 Object* JSObject::GetHiddenPropertiesHashTable() {
-  ASSERT(!IsJSGlobalProxy());
+  DCHECK(!IsJSGlobalProxy());
   if (HasFastProperties()) {
     // If the object has fast properties, check whether the first slot
     // in the descriptor array matches the hidden string. Since the
@@ -5082,8 +4705,8 @@
       int sorted_index = descriptors->GetSortedKeyIndex(0);
       if (descriptors->GetKey(sorted_index) == GetHeap()->hidden_string() &&
           sorted_index < map()->NumberOfOwnDescriptors()) {
-        ASSERT(descriptors->GetType(sorted_index) == FIELD);
-        ASSERT(descriptors->GetDetails(sorted_index).representation().
+        DCHECK(descriptors->GetType(sorted_index) == FIELD);
+        DCHECK(descriptors->GetDetails(sorted_index).representation().
                IsCompatibleForLoad(Representation::Tagged()));
         FieldIndex index = FieldIndex::ForDescriptor(this->map(),
                                                      sorted_index);
@@ -5096,15 +4719,10 @@
     }
   } else {
     Isolate* isolate = GetIsolate();
-    LookupResult result(isolate);
-    LookupOwnRealNamedProperty(isolate->factory()->hidden_string(), &result);
-    if (result.IsFound()) {
-      ASSERT(result.IsNormal());
-      ASSERT(result.holder() == this);
-      Object* value = GetNormalizedProperty(&result);
-      if (!value->IsTheHole()) return value;
-    }
-    return GetHeap()->undefined_value();
+    LookupIterator it(handle(this), isolate->factory()->hidden_string(),
+                      LookupIterator::OWN_SKIP_INTERCEPTOR);
+    // Access check is always skipped for the hidden string anyways.
+    return *GetDataProperty(&it);
   }
 }
 
@@ -5129,100 +4747,47 @@
                                      inline_value);
   }
 
-  JSObject::SetOwnPropertyIgnoreAttributes(
-      object,
-      isolate->factory()->hidden_string(),
-      hashtable,
-      DONT_ENUM,
-      OPTIMAL_REPRESENTATION,
-      ALLOW_AS_CONSTANT,
-      OMIT_EXTENSIBILITY_CHECK).Assert();
-
+  SetHiddenPropertiesHashTable(object, hashtable);
   return hashtable;
 }
 
 
 Handle<Object> JSObject::SetHiddenPropertiesHashTable(Handle<JSObject> object,
                                                       Handle<Object> value) {
-  ASSERT(!object->IsJSGlobalProxy());
-
+  DCHECK(!object->IsJSGlobalProxy());
   Isolate* isolate = object->GetIsolate();
-
-  // We can store the identity hash inline iff there is no backing store
-  // for hidden properties yet.
-  ASSERT(JSObject::HasHiddenProperties(object) != value->IsSmi());
-  if (object->HasFastProperties()) {
-    // If the object has fast properties, check whether the first slot
-    // in the descriptor array matches the hidden string. Since the
-    // hidden strings hash code is zero (and no other name has hash
-    // code zero) it will always occupy the first entry if present.
-    DescriptorArray* descriptors = object->map()->instance_descriptors();
-    if (descriptors->number_of_descriptors() > 0) {
-      int sorted_index = descriptors->GetSortedKeyIndex(0);
-      if (descriptors->GetKey(sorted_index) == isolate->heap()->hidden_string()
-          && sorted_index < object->map()->NumberOfOwnDescriptors()) {
-        object->WriteToField(sorted_index, *value);
-        return object;
-      }
-    }
-  }
-
-  SetOwnPropertyIgnoreAttributes(object,
-                                 isolate->factory()->hidden_string(),
-                                 value,
-                                 DONT_ENUM,
-                                 OPTIMAL_REPRESENTATION,
-                                 ALLOW_AS_CONSTANT,
-                                 OMIT_EXTENSIBILITY_CHECK).Assert();
+  Handle<Name> name = isolate->factory()->hidden_string();
+  SetOwnPropertyIgnoreAttributes(object, name, value, DONT_ENUM).Assert();
   return object;
 }
 
 
-Handle<Object> JSObject::DeletePropertyPostInterceptor(Handle<JSObject> object,
-                                                       Handle<Name> name,
-                                                       DeleteMode mode) {
-  // Check own property, ignore interceptor.
-  Isolate* isolate = object->GetIsolate();
-  LookupResult result(isolate);
-  object->LookupOwnRealNamedProperty(name, &result);
-  if (!result.IsFound()) return isolate->factory()->true_value();
-
-  // Normalize object if needed.
-  NormalizeProperties(object, CLEAR_INOBJECT_PROPERTIES, 0);
-
-  return DeleteNormalizedProperty(object, name, mode);
-}
-
-
 MaybeHandle<Object> JSObject::DeletePropertyWithInterceptor(
-    Handle<JSObject> object, Handle<Name> name) {
-  Isolate* isolate = object->GetIsolate();
+    Handle<JSObject> holder, Handle<JSObject> receiver, Handle<Name> name) {
+  Isolate* isolate = holder->GetIsolate();
 
   // TODO(rossberg): Support symbols in the API.
-  if (name->IsSymbol()) return isolate->factory()->false_value();
+  if (name->IsSymbol()) return MaybeHandle<Object>();
 
-  Handle<InterceptorInfo> interceptor(object->GetNamedInterceptor());
-  if (!interceptor->deleter()->IsUndefined()) {
-    v8::NamedPropertyDeleterCallback deleter =
-        v8::ToCData<v8::NamedPropertyDeleterCallback>(interceptor->deleter());
-    LOG(isolate,
-        ApiNamedPropertyAccess("interceptor-named-delete", *object, *name));
-    PropertyCallbackArguments args(
-        isolate, interceptor->data(), *object, *object);
-    v8::Handle<v8::Boolean> result =
-        args.Call(deleter, v8::Utils::ToLocal(Handle<String>::cast(name)));
-    RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
-    if (!result.IsEmpty()) {
-      ASSERT(result->IsBoolean());
-      Handle<Object> result_internal = v8::Utils::OpenHandle(*result);
-      result_internal->VerifyApiCallResultType();
-      // Rebox CustomArguments::kReturnValueOffset before returning.
-      return handle(*result_internal, isolate);
-    }
-  }
-  Handle<Object> result =
-      DeletePropertyPostInterceptor(object, name, NORMAL_DELETION);
-  return result;
+  Handle<InterceptorInfo> interceptor(holder->GetNamedInterceptor());
+  if (interceptor->deleter()->IsUndefined()) return MaybeHandle<Object>();
+
+  v8::NamedPropertyDeleterCallback deleter =
+      v8::ToCData<v8::NamedPropertyDeleterCallback>(interceptor->deleter());
+  LOG(isolate,
+      ApiNamedPropertyAccess("interceptor-named-delete", *holder, *name));
+  PropertyCallbackArguments args(isolate, interceptor->data(), *receiver,
+                                 *holder);
+  v8::Handle<v8::Boolean> result =
+      args.Call(deleter, v8::Utils::ToLocal(Handle<String>::cast(name)));
+  RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
+  if (result.IsEmpty()) return MaybeHandle<Object>();
+
+  DCHECK(result->IsBoolean());
+  Handle<Object> result_internal = v8::Utils::OpenHandle(*result);
+  result_internal->VerifyApiCallResultType();
+  // Rebox CustomArguments::kReturnValueOffset before returning.
+  return handle(*result_internal, isolate);
 }
 
 
@@ -5247,7 +4812,7 @@
   v8::Handle<v8::Boolean> result = args.Call(deleter, index);
   RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
   if (!result.IsEmpty()) {
-    ASSERT(result->IsBoolean());
+    DCHECK(result->IsBoolean());
     Handle<Object> result_internal = v8::Utils::OpenHandle(*result);
     result_internal->VerifyApiCallResultType();
     // Rebox CustomArguments::kReturnValueOffset before returning.
@@ -5278,26 +4843,28 @@
       // Deleting a non-configurable property in strict mode.
       Handle<Object> name = factory->NewNumberFromUint(index);
       Handle<Object> args[2] = { name, object };
-      Handle<Object> error =
-          factory->NewTypeError("strict_delete_property",
-                                HandleVector(args, 2));
-      isolate->Throw(*error);
-      return Handle<Object>();
+      THROW_NEW_ERROR(isolate, NewTypeError("strict_delete_property",
+                                            HandleVector(args, 2)),
+                      Object);
     }
     return factory->false_value();
   }
 
   if (object->IsJSGlobalProxy()) {
-    Handle<Object> proto(object->GetPrototype(), isolate);
-    if (proto->IsNull()) return factory->false_value();
-    ASSERT(proto->IsJSGlobalObject());
-    return DeleteElement(Handle<JSObject>::cast(proto), index, mode);
+    PrototypeIterator iter(isolate, object);
+    if (iter.IsAtEnd()) return factory->false_value();
+    DCHECK(PrototypeIterator::GetCurrent(iter)->IsJSGlobalObject());
+    return DeleteElement(
+        Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)), index,
+        mode);
   }
 
   Handle<Object> old_value;
   bool should_enqueue_change_record = false;
   if (object->map()->is_observed()) {
-    should_enqueue_change_record = HasOwnElement(object, index);
+    Maybe<bool> maybe = HasOwnElement(object, index);
+    if (!maybe.has_value) return MaybeHandle<Object>();
+    should_enqueue_change_record = maybe.value;
     if (should_enqueue_change_record) {
       if (!GetOwnElementAccessorPair(object, index).is_null()) {
         old_value = Handle<Object>::cast(factory->the_hole_value());
@@ -5318,9 +4885,13 @@
   Handle<Object> result;
   ASSIGN_RETURN_ON_EXCEPTION(isolate, result, maybe_result, Object);
 
-  if (should_enqueue_change_record && !HasOwnElement(object, index)) {
-    Handle<String> name = factory->Uint32ToString(index);
-    EnqueueChangeRecord(object, "delete", name, old_value);
+  if (should_enqueue_change_record) {
+    Maybe<bool> maybe = HasOwnElement(object, index);
+    if (!maybe.has_value) return MaybeHandle<Object>();
+    if (!maybe.value) {
+      Handle<String> name = factory->Uint32ToString(index);
+      EnqueueChangeRecord(object, "delete", name, old_value);
+    }
   }
 
   return result;
@@ -5329,79 +4900,91 @@
 
 MaybeHandle<Object> JSObject::DeleteProperty(Handle<JSObject> object,
                                              Handle<Name> name,
-                                             DeleteMode mode) {
-  Isolate* isolate = object->GetIsolate();
+                                             DeleteMode delete_mode) {
   // ECMA-262, 3rd, 8.6.2.5
-  ASSERT(name->IsName());
-
-  // Check access rights if needed.
-  if (object->IsAccessCheckNeeded() &&
-      !isolate->MayNamedAccess(object, name, v8::ACCESS_DELETE)) {
-    isolate->ReportFailedAccessCheck(object, v8::ACCESS_DELETE);
-    RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
-    return isolate->factory()->false_value();
-  }
-
-  if (object->IsJSGlobalProxy()) {
-    Object* proto = object->GetPrototype();
-    if (proto->IsNull()) return isolate->factory()->false_value();
-    ASSERT(proto->IsJSGlobalObject());
-    return JSGlobalObject::DeleteProperty(
-        handle(JSGlobalObject::cast(proto)), name, mode);
-  }
+  DCHECK(name->IsName());
 
   uint32_t index = 0;
   if (name->AsArrayIndex(&index)) {
-    return DeleteElement(object, index, mode);
+    return DeleteElement(object, index, delete_mode);
   }
 
-  LookupResult lookup(isolate);
-  object->LookupOwn(name, &lookup, true);
-  if (!lookup.IsFound()) return isolate->factory()->true_value();
-  // Ignore attributes if forcing a deletion.
-  if (lookup.IsDontDelete() && mode != FORCE_DELETION) {
-    if (mode == STRICT_DELETION) {
-      // Deleting a non-configurable property in strict mode.
-      Handle<Object> args[2] = { name, object };
-      Handle<Object> error = isolate->factory()->NewTypeError(
-          "strict_delete_property", HandleVector(args, ARRAY_SIZE(args)));
-      isolate->Throw(*error);
-      return Handle<Object>();
-    }
-    return isolate->factory()->false_value();
-  }
+  // Skip interceptors on FORCE_DELETION.
+  LookupIterator::Configuration config =
+      delete_mode == FORCE_DELETION ? LookupIterator::HIDDEN_SKIP_INTERCEPTOR
+                                    : LookupIterator::HIDDEN;
 
-  Handle<Object> old_value = isolate->factory()->the_hole_value();
+  LookupIterator it(object, name, config);
+
   bool is_observed = object->map()->is_observed() &&
-                     *name != isolate->heap()->hidden_string();
-  if (is_observed && lookup.IsDataProperty()) {
-    old_value = Object::GetPropertyOrElement(object, name).ToHandleChecked();
-  }
-  Handle<Object> result;
+                     *name != it.isolate()->heap()->hidden_string();
+  Handle<Object> old_value = it.isolate()->factory()->the_hole_value();
 
-  // Check for interceptor.
-  if (lookup.IsInterceptor()) {
-    // Skip interceptor if forcing a deletion.
-    if (mode == FORCE_DELETION) {
-      result = DeletePropertyPostInterceptor(object, name, mode);
-    } else {
-      ASSIGN_RETURN_ON_EXCEPTION(
-          isolate, result,
-          DeletePropertyWithInterceptor(object, name),
-          Object);
+  for (; it.IsFound(); it.Next()) {
+    switch (it.state()) {
+      case LookupIterator::JSPROXY:
+      case LookupIterator::NOT_FOUND:
+      case LookupIterator::TRANSITION:
+        UNREACHABLE();
+      case LookupIterator::ACCESS_CHECK:
+        if (it.HasAccess(v8::ACCESS_DELETE)) break;
+        it.isolate()->ReportFailedAccessCheck(it.GetHolder<JSObject>(),
+                                              v8::ACCESS_DELETE);
+        RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(it.isolate(), Object);
+        return it.isolate()->factory()->false_value();
+      case LookupIterator::INTERCEPTOR: {
+        MaybeHandle<Object> maybe_result =
+            JSObject::DeletePropertyWithInterceptor(it.GetHolder<JSObject>(),
+                                                    object, it.name());
+        // Delete with interceptor succeeded. Return result.
+        if (!maybe_result.is_null()) return maybe_result;
+        // An exception was thrown in the interceptor. Propagate.
+        if (it.isolate()->has_pending_exception()) return maybe_result;
+        break;
+      }
+      case LookupIterator::DATA:
+        if (is_observed) {
+          old_value = it.GetDataValue();
+        }
+      // Fall through.
+      case LookupIterator::ACCESSOR: {
+        if (delete_mode != FORCE_DELETION && !it.IsConfigurable()) {
+          // Fail if the property is not configurable.
+          if (delete_mode == STRICT_DELETION) {
+            Handle<Object> args[2] = {name, object};
+            THROW_NEW_ERROR(it.isolate(),
+                            NewTypeError("strict_delete_property",
+                                         HandleVector(args, arraysize(args))),
+                            Object);
+          }
+          return it.isolate()->factory()->false_value();
+        }
+
+        PropertyNormalizationMode mode = object->map()->is_prototype_map()
+                                             ? KEEP_INOBJECT_PROPERTIES
+                                             : CLEAR_INOBJECT_PROPERTIES;
+        Handle<JSObject> holder = it.GetHolder<JSObject>();
+        // TODO(verwaest): Remove this temporary compatibility hack when blink
+        // tests are updated.
+        if (!holder.is_identical_to(object) &&
+            !(object->IsJSGlobalProxy() && holder->IsJSGlobalObject())) {
+          return it.isolate()->factory()->true_value();
+        }
+        NormalizeProperties(holder, mode, 0);
+        Handle<Object> result =
+            DeleteNormalizedProperty(holder, name, delete_mode);
+        ReoptimizeIfPrototype(holder);
+
+        if (is_observed) {
+          EnqueueChangeRecord(object, "delete", name, old_value);
+        }
+
+        return result;
+      }
     }
-  } else {
-    // Normalize object if needed.
-    NormalizeProperties(object, CLEAR_INOBJECT_PROPERTIES, 0);
-    // Make sure the properties are normalized before removing the entry.
-    result = DeleteNormalizedProperty(object, name, mode);
   }
 
-  if (is_observed && !HasOwnProperty(object, name)) {
-    EnqueueChangeRecord(object, "delete", name, old_value);
-  }
-
-  return result;
+  return it.isolate()->factory()->true_value();
 }
 
 
@@ -5430,7 +5013,7 @@
 bool JSObject::ReferencesObjectFromElements(FixedArray* elements,
                                             ElementsKind kind,
                                             Object* object) {
-  ASSERT(IsFastObjectElementsKind(kind) ||
+  DCHECK(IsFastObjectElementsKind(kind) ||
          kind == DICTIONARY_ELEMENTS);
   if (IsFastObjectElementsKind(kind)) {
     int length = IsJSArray()
@@ -5517,11 +5100,10 @@
   // For functions check the context.
   if (IsJSFunction()) {
     // Get the constructor function for arguments array.
-    JSObject* arguments_boilerplate =
-        heap->isolate()->context()->native_context()->
-            sloppy_arguments_boilerplate();
+    Map* arguments_map =
+        heap->isolate()->context()->native_context()->sloppy_arguments_map();
     JSFunction* arguments_function =
-        JSFunction::cast(arguments_boilerplate->map()->constructor());
+        JSFunction::cast(arguments_map->constructor());
 
     // Get the context and don't check if it is the native context.
     JSFunction* f = JSFunction::cast(this);
@@ -5577,25 +5159,25 @@
   }
 
   if (object->IsJSGlobalProxy()) {
-    Handle<Object> proto(object->GetPrototype(), isolate);
-    if (proto->IsNull()) return object;
-    ASSERT(proto->IsJSGlobalObject());
-    return PreventExtensions(Handle<JSObject>::cast(proto));
+    PrototypeIterator iter(isolate, object);
+    if (iter.IsAtEnd()) return object;
+    DCHECK(PrototypeIterator::GetCurrent(iter)->IsJSGlobalObject());
+    return PreventExtensions(
+        Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)));
   }
 
   // It's not possible to seal objects with external array elements
   if (object->HasExternalArrayElements() ||
       object->HasFixedTypedArrayElements()) {
-    Handle<Object> error  =
-        isolate->factory()->NewTypeError(
-            "cant_prevent_ext_external_array_elements",
-            HandleVector(&object, 1));
-    return isolate->Throw<Object>(error);
+    THROW_NEW_ERROR(isolate,
+                    NewTypeError("cant_prevent_ext_external_array_elements",
+                                 HandleVector(&object, 1)),
+                    Object);
   }
 
   // If there are fast elements we normalize.
   Handle<SeededNumberDictionary> dictionary = NormalizeElements(object);
-  ASSERT(object->HasDictionaryElements() ||
+  DCHECK(object->HasDictionaryElements() ||
          object->HasDictionaryArgumentsElements());
 
   // Make sure that we never go back to fast case.
@@ -5608,7 +5190,7 @@
 
   new_map->set_is_extensible(false);
   JSObject::MigrateToMap(object, new_map);
-  ASSERT(!object->map()->is_extensible());
+  DCHECK(!object->map()->is_extensible());
 
   if (object->map()->is_observed()) {
     EnqueueChangeRecord(object, "preventExtensions", Handle<Name>(),
@@ -5623,7 +5205,8 @@
   int capacity = dictionary->Capacity();
   for (int i = 0; i < capacity; i++) {
     Object* k = dictionary->KeyAt(i);
-    if (dictionary->IsKey(k)) {
+    if (dictionary->IsKey(k) &&
+        !(k->IsSymbol() && Symbol::cast(k)->is_private())) {
       PropertyDetails details = dictionary->DetailsAt(i);
       int attrs = DONT_DELETE;
       // READ_ONLY is an invalid attribute for JS setters/getters.
@@ -5644,8 +5227,8 @@
 
 MaybeHandle<Object> JSObject::Freeze(Handle<JSObject> object) {
   // Freezing sloppy arguments should be handled elsewhere.
-  ASSERT(!object->HasSloppyArgumentsElements());
-  ASSERT(!object->map()->is_observed());
+  DCHECK(!object->HasSloppyArgumentsElements());
+  DCHECK(!object->map()->is_observed());
 
   if (object->map()->is_frozen()) return object;
 
@@ -5659,20 +5242,19 @@
   }
 
   if (object->IsJSGlobalProxy()) {
-    Handle<Object> proto(object->GetPrototype(), isolate);
-    if (proto->IsNull()) return object;
-    ASSERT(proto->IsJSGlobalObject());
-    return Freeze(Handle<JSObject>::cast(proto));
+    PrototypeIterator iter(isolate, object);
+    if (iter.IsAtEnd()) return object;
+    DCHECK(PrototypeIterator::GetCurrent(iter)->IsJSGlobalObject());
+    return Freeze(Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)));
   }
 
   // It's not possible to freeze objects with external array elements
   if (object->HasExternalArrayElements() ||
       object->HasFixedTypedArrayElements()) {
-    Handle<Object> error  =
-        isolate->factory()->NewTypeError(
-            "cant_prevent_ext_external_array_elements",
-            HandleVector(&object, 1));
-    return isolate->Throw<Object>(error);
+    THROW_NEW_ERROR(isolate,
+                    NewTypeError("cant_prevent_ext_external_array_elements",
+                                 HandleVector(&object, 1)),
+                    Object);
   }
 
   Handle<SeededNumberDictionary> new_element_dictionary;
@@ -5702,15 +5284,16 @@
       isolate->heap()->frozen_symbol());
   if (transition_index != TransitionArray::kNotFound) {
     Handle<Map> transition_map(old_map->GetTransition(transition_index));
-    ASSERT(transition_map->has_dictionary_elements());
-    ASSERT(transition_map->is_frozen());
-    ASSERT(!transition_map->is_extensible());
+    DCHECK(transition_map->has_dictionary_elements());
+    DCHECK(transition_map->is_frozen());
+    DCHECK(!transition_map->is_extensible());
     JSObject::MigrateToMap(object, transition_map);
   } else if (object->HasFastProperties() && old_map->CanHaveMoreTransitions()) {
     // Create a new descriptor array with fully-frozen properties
     Handle<Map> new_map = Map::CopyForFreeze(old_map);
     JSObject::MigrateToMap(object, new_map);
   } else {
+    DCHECK(old_map->is_dictionary_map() || !old_map->is_prototype_map());
     // Slow path: need to normalize properties for safety
     NormalizeProperties(object, CLEAR_INOBJECT_PROPERTIES, 0);
 
@@ -5726,7 +5309,7 @@
     FreezeDictionary(object->property_dictionary());
   }
 
-  ASSERT(object->map()->has_dictionary_elements());
+  DCHECK(object->map()->has_dictionary_elements());
   if (!new_element_dictionary.is_null()) {
     object->set_elements(*new_element_dictionary);
   }
@@ -5744,17 +5327,17 @@
 
 
 void JSObject::SetObserved(Handle<JSObject> object) {
-  ASSERT(!object->IsJSGlobalProxy());
-  ASSERT(!object->IsJSGlobalObject());
+  DCHECK(!object->IsJSGlobalProxy());
+  DCHECK(!object->IsJSGlobalObject());
   Isolate* isolate = object->GetIsolate();
   Handle<Map> new_map;
   Handle<Map> old_map(object->map(), isolate);
-  ASSERT(!old_map->is_observed());
+  DCHECK(!old_map->is_observed());
   int transition_index = old_map->SearchTransition(
       isolate->heap()->observed_symbol());
   if (transition_index != TransitionArray::kNotFound) {
     new_map = handle(old_map->GetTransition(transition_index), isolate);
-    ASSERT(new_map->is_observed());
+    DCHECK(new_map->is_observed());
   } else if (object->HasFastProperties() && old_map->CanHaveMoreTransitions()) {
     new_map = Map::CopyForObserved(old_map);
   } else {
@@ -5770,7 +5353,7 @@
                                         FieldIndex index) {
   Isolate* isolate = object->GetIsolate();
   Handle<Object> raw_value(object->RawFastPropertyAt(index), isolate);
-  return Object::NewStorageFor(isolate, raw_value, representation);
+  return Object::WrapForRead(isolate, raw_value, representation);
 }
 
 
@@ -5812,7 +5395,7 @@
     Handle<JSObject> object) {
   Isolate* isolate = this->isolate();
   bool copying = this->copying();
-  bool shallow = hints_ == JSObject::kObjectIsShallowArray;
+  bool shallow = hints_ == JSObject::kObjectIsShallow;
 
   if (!shallow) {
     StackLimitCheck check(isolate);
@@ -5839,7 +5422,7 @@
     copy = object;
   }
 
-  ASSERT(copying || copy.is_identical_to(object));
+  DCHECK(copying || copy.is_identical_to(object));
 
   ElementsKind kind = copy->GetElementsKind();
   if (copying && IsFastSmiOrObjectElementsKind(kind) &&
@@ -5878,10 +5461,12 @@
           isolate->factory()->NewFixedArray(copy->NumberOfOwnProperties());
       copy->GetOwnPropertyNames(*names, 0);
       for (int i = 0; i < names->length(); i++) {
-        ASSERT(names->get(i)->IsString());
+        DCHECK(names->get(i)->IsString());
         Handle<String> key_string(String::cast(names->get(i)));
-        PropertyAttributes attributes =
+        Maybe<PropertyAttributes> maybe =
             JSReceiver::GetOwnPropertyAttributes(copy, key_string);
+        DCHECK(maybe.has_value);
+        PropertyAttributes attributes = maybe.value;
         // Only deep copy fields from the object literal expression.
         // In particular, don't try to copy the length attribute of
         // an array.
@@ -5896,8 +5481,7 @@
               JSObject);
           if (copying) {
             // Creating object copy for literals. No strict mode needed.
-            JSObject::SetProperty(
-                copy, key_string, result, NONE, SLOPPY).Assert();
+            JSObject::SetProperty(copy, key_string, result, SLOPPY).Assert();
           }
         }
       }
@@ -5905,7 +5489,7 @@
 
     // Deep copy own elements.
     // Pixel elements cannot be created using an object literal.
-    ASSERT(!copy->HasExternalArrayElements());
+    DCHECK(!copy->HasExternalArrayElements());
     switch (kind) {
       case FAST_SMI_ELEMENTS:
       case FAST_ELEMENTS:
@@ -5915,13 +5499,13 @@
         if (elements->map() == isolate->heap()->fixed_cow_array_map()) {
 #ifdef DEBUG
           for (int i = 0; i < elements->length(); i++) {
-            ASSERT(!elements->get(i)->IsJSObject());
+            DCHECK(!elements->get(i)->IsJSObject());
           }
 #endif
         } else {
           for (int i = 0; i < elements->length(); i++) {
             Handle<Object> value(elements->get(i), isolate);
-            ASSERT(value->IsSmi() ||
+            DCHECK(value->IsSmi() ||
                    value->IsTheHole() ||
                    (IsFastObjectElementsKind(copy->GetElementsKind())));
             if (value->IsJSObject()) {
@@ -5990,7 +5574,7 @@
                                                        kNoHints);
   MaybeHandle<JSObject> result = v.StructureWalk(object);
   Handle<JSObject> for_assert;
-  ASSERT(!result.ToHandle(&for_assert) || for_assert.is_identical_to(object));
+  DCHECK(!result.ToHandle(&for_assert) || for_assert.is_identical_to(object));
   return result;
 }
 
@@ -6002,63 +5586,27 @@
   JSObjectWalkVisitor<AllocationSiteUsageContext> v(site_context, true, hints);
   MaybeHandle<JSObject> copy = v.StructureWalk(object);
   Handle<JSObject> for_assert;
-  ASSERT(!copy.ToHandle(&for_assert) || !for_assert.is_identical_to(object));
+  DCHECK(!copy.ToHandle(&for_assert) || !for_assert.is_identical_to(object));
   return copy;
 }
 
 
-Handle<Object> JSObject::GetDataProperty(Handle<JSObject> object,
-                                         Handle<Name> key) {
-  Isolate* isolate = object->GetIsolate();
-  LookupResult lookup(isolate);
-  {
-    DisallowHeapAllocation no_allocation;
-    object->LookupRealNamedProperty(key, &lookup);
-  }
-  Handle<Object> result = isolate->factory()->undefined_value();
-  if (lookup.IsFound() && !lookup.IsTransition()) {
-    switch (lookup.type()) {
-      case NORMAL:
-        result = GetNormalizedProperty(
-            Handle<JSObject>(lookup.holder(), isolate), &lookup);
-        break;
-      case FIELD:
-        result = FastPropertyAt(Handle<JSObject>(lookup.holder(), isolate),
-                                lookup.representation(),
-                                lookup.GetFieldIndex());
-        break;
-      case CONSTANT:
-        result = Handle<Object>(lookup.GetConstant(), isolate);
-        break;
-      case CALLBACKS:
-      case HANDLER:
-      case INTERCEPTOR:
-        break;
-      case NONEXISTENT:
-        UNREACHABLE();
-    }
-  }
-  return result;
-}
-
-
 // Tests for the fast common case for property enumeration:
 // - This object and all prototypes has an enum cache (which means that
 //   it is no proxy, has no interceptors and needs no access checks).
 // - This object has no elements.
 // - No prototype has enumerable properties/elements.
 bool JSReceiver::IsSimpleEnum() {
-  Heap* heap = GetHeap();
-  for (Object* o = this;
-       o != heap->null_value();
-       o = JSObject::cast(o)->GetPrototype()) {
-    if (!o->IsJSObject()) return false;
-    JSObject* curr = JSObject::cast(o);
+  for (PrototypeIterator iter(GetIsolate(), this,
+                              PrototypeIterator::START_AT_RECEIVER);
+       !iter.IsAtEnd(); iter.Advance()) {
+    if (!iter.GetCurrent()->IsJSObject()) return false;
+    JSObject* curr = JSObject::cast(iter.GetCurrent());
     int enum_length = curr->map()->EnumLength();
     if (enum_length == kInvalidEnumCacheSentinel) return false;
     if (curr->IsAccessCheckNeeded()) return false;
-    ASSERT(!curr->HasNamedInterceptor());
-    ASSERT(!curr->HasIndexedInterceptor());
+    DCHECK(!curr->HasNamedInterceptor());
+    DCHECK(!curr->HasIndexedInterceptor());
     if (curr->NumberOfEnumElements() > 0) return false;
     if (curr != this && enum_length != 0) return false;
   }
@@ -6115,65 +5663,6 @@
 }
 
 
-void JSReceiver::LookupOwn(
-    Handle<Name> name, LookupResult* result, bool search_hidden_prototypes) {
-  DisallowHeapAllocation no_gc;
-  ASSERT(name->IsName());
-
-  if (IsJSGlobalProxy()) {
-    Object* proto = GetPrototype();
-    if (proto->IsNull()) return result->NotFound();
-    ASSERT(proto->IsJSGlobalObject());
-    return JSReceiver::cast(proto)->LookupOwn(
-        name, result, search_hidden_prototypes);
-  }
-
-  if (IsJSProxy()) {
-    result->HandlerResult(JSProxy::cast(this));
-    return;
-  }
-
-  // Do not use inline caching if the object is a non-global object
-  // that requires access checks.
-  if (IsAccessCheckNeeded()) {
-    result->DisallowCaching();
-  }
-
-  JSObject* js_object = JSObject::cast(this);
-
-  // Check for lookup interceptor except when bootstrapping.
-  if (js_object->HasNamedInterceptor() &&
-      !GetIsolate()->bootstrapper()->IsActive()) {
-    result->InterceptorResult(js_object);
-    return;
-  }
-
-  js_object->LookupOwnRealNamedProperty(name, result);
-  if (result->IsFound() || !search_hidden_prototypes) return;
-
-  Object* proto = js_object->GetPrototype();
-  if (!proto->IsJSReceiver()) return;
-  JSReceiver* receiver = JSReceiver::cast(proto);
-  if (receiver->map()->is_hidden_prototype()) {
-    receiver->LookupOwn(name, result, search_hidden_prototypes);
-  }
-}
-
-
-void JSReceiver::Lookup(Handle<Name> name, LookupResult* result) {
-  DisallowHeapAllocation no_gc;
-  // Ecma-262 3rd 8.6.2.4
-  Handle<Object> null_value = GetIsolate()->factory()->null_value();
-  for (Object* current = this;
-       current != *null_value;
-       current = JSObject::cast(current)->GetPrototype()) {
-    JSReceiver::cast(current)->LookupOwn(name, result, false);
-    if (result->IsFound()) return;
-  }
-  result->NotFound();
-}
-
-
 static bool ContainsOnlyValidKeys(Handle<FixedArray> array) {
   int len = array->length();
   for (int i = 0; i < len; i++) {
@@ -6186,7 +5675,7 @@
 
 static Handle<FixedArray> ReduceFixedArrayTo(
     Handle<FixedArray> array, int length) {
-  ASSERT(array->length() >= length);
+  DCHECK(array->length() >= length);
   if (array->length() == length) return array;
 
   Handle<FixedArray> new_array =
@@ -6210,7 +5699,7 @@
       own_property_count = object->map()->NumberOfDescribedProperties(
           OWN_DESCRIPTORS, DONT_SHOW);
     } else {
-      ASSERT(own_property_count == object->map()->NumberOfDescribedProperties(
+      DCHECK(own_property_count == object->map()->NumberOfDescribedProperties(
           OWN_DESCRIPTORS, DONT_SHOW));
     }
 
@@ -6267,7 +5756,7 @@
         index++;
       }
     }
-    ASSERT(index == storage->length());
+    DCHECK(index == storage->length());
 
     Handle<FixedArray> bridge_storage =
         isolate->factory()->NewFixedArray(
@@ -6299,19 +5788,16 @@
   USE(ContainsOnlyValidKeys);
   Isolate* isolate = object->GetIsolate();
   Handle<FixedArray> content = isolate->factory()->empty_fixed_array();
-  Handle<JSObject> arguments_boilerplate = Handle<JSObject>(
-      isolate->context()->native_context()->sloppy_arguments_boilerplate(),
-      isolate);
-  Handle<JSFunction> arguments_function = Handle<JSFunction>(
-      JSFunction::cast(arguments_boilerplate->map()->constructor()),
-      isolate);
+  Handle<JSFunction> arguments_function(
+      JSFunction::cast(isolate->sloppy_arguments_map()->constructor()));
 
   // Only collect keys if access is permitted.
-  for (Handle<Object> p = object;
-       *p != isolate->heap()->null_value();
-       p = Handle<Object>(p->GetPrototype(isolate), isolate)) {
-    if (p->IsJSProxy()) {
-      Handle<JSProxy> proxy(JSProxy::cast(*p), isolate);
+  for (PrototypeIterator iter(isolate, object,
+                              PrototypeIterator::START_AT_RECEIVER);
+       !iter.IsAtEnd(); iter.Advance()) {
+    if (PrototypeIterator::GetCurrent(iter)->IsJSProxy()) {
+      Handle<JSProxy> proxy(JSProxy::cast(*PrototypeIterator::GetCurrent(iter)),
+                            isolate);
       Handle<Object> args[] = { proxy };
       Handle<Object> names;
       ASSIGN_RETURN_ON_EXCEPTION(
@@ -6319,7 +5805,7 @@
           Execution::Call(isolate,
                           isolate->proxy_enumerate(),
                           object,
-                          ARRAY_SIZE(args),
+                          arraysize(args),
                           args),
           FixedArray);
       ASSIGN_RETURN_ON_EXCEPTION(
@@ -6330,7 +5816,8 @@
       break;
     }
 
-    Handle<JSObject> current(JSObject::cast(*p), isolate);
+    Handle<JSObject> current =
+        Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter));
 
     // Check access rights if required.
     if (current->IsAccessCheckNeeded() &&
@@ -6349,7 +5836,7 @@
         isolate, content,
         FixedArray::UnionOfKeys(content, element_keys),
         FixedArray);
-    ASSERT(ContainsOnlyValidKeys(content));
+    DCHECK(ContainsOnlyValidKeys(content));
 
     // Add the element keys from the interceptor.
     if (current->HasIndexedInterceptor()) {
@@ -6361,7 +5848,7 @@
             FixedArray::AddKeysFromArrayLike(content, result),
             FixedArray);
       }
-      ASSERT(ContainsOnlyValidKeys(content));
+      DCHECK(ContainsOnlyValidKeys(content));
     }
 
     // We can cache the computed property keys if access checks are
@@ -6386,7 +5873,7 @@
         FixedArray::UnionOfKeys(
             content, GetEnumPropertyKeys(current, cache_enum_keys)),
         FixedArray);
-    ASSERT(ContainsOnlyValidKeys(content));
+    DCHECK(ContainsOnlyValidKeys(content));
 
     // Add the property keys from the interceptor.
     if (current->HasNamedInterceptor()) {
@@ -6398,7 +5885,7 @@
             FixedArray::AddKeysFromArrayLike(content, result),
             FixedArray);
       }
-      ASSERT(ContainsOnlyValidKeys(content));
+      DCHECK(ContainsOnlyValidKeys(content));
     }
 
     // If we only want own properties we bail out after the first
@@ -6422,7 +5909,7 @@
     Object* result = dictionary->ValueAt(entry);
     PropertyDetails details = dictionary->DetailsAt(entry);
     if (details.type() == CALLBACKS && result->IsAccessorPair()) {
-      ASSERT(!details.IsDontDelete());
+      DCHECK(details.IsConfigurable());
       if (details.attributes() != attributes) {
         dictionary->DetailsAtPut(
             entry,
@@ -6440,8 +5927,7 @@
                                      uint32_t index,
                                      Handle<Object> getter,
                                      Handle<Object> setter,
-                                     PropertyAttributes attributes,
-                                     v8::AccessControl access_control) {
+                                     PropertyAttributes attributes) {
   switch (object->GetElementsKind()) {
     case FAST_SMI_ELEMENTS:
     case FAST_ELEMENTS:
@@ -6498,77 +5984,24 @@
   Isolate* isolate = object->GetIsolate();
   Handle<AccessorPair> accessors = isolate->factory()->NewAccessorPair();
   accessors->SetComponents(*getter, *setter);
-  accessors->set_access_flags(access_control);
 
   SetElementCallback(object, index, accessors, attributes);
 }
 
 
-Handle<AccessorPair> JSObject::CreateAccessorPairFor(Handle<JSObject> object,
-                                                     Handle<Name> name) {
-  Isolate* isolate = object->GetIsolate();
-  LookupResult result(isolate);
-  object->LookupOwnRealNamedProperty(name, &result);
-  if (result.IsPropertyCallbacks()) {
-    // Note that the result can actually have IsDontDelete() == true when we
-    // e.g. have to fall back to the slow case while adding a setter after
-    // successfully reusing a map transition for a getter. Nevertheless, this is
-    // OK, because the assertion only holds for the whole addition of both
-    // accessors, not for the addition of each part. See first comment in
-    // DefinePropertyAccessor below.
-    Object* obj = result.GetCallbackObject();
-    if (obj->IsAccessorPair()) {
-      return AccessorPair::Copy(handle(AccessorPair::cast(obj), isolate));
-    }
-  }
-  return isolate->factory()->NewAccessorPair();
-}
-
-
-void JSObject::DefinePropertyAccessor(Handle<JSObject> object,
-                                      Handle<Name> name,
-                                      Handle<Object> getter,
-                                      Handle<Object> setter,
-                                      PropertyAttributes attributes,
-                                      v8::AccessControl access_control) {
-  // We could assert that the property is configurable here, but we would need
-  // to do a lookup, which seems to be a bit of overkill.
-  bool only_attribute_changes = getter->IsNull() && setter->IsNull();
-  if (object->HasFastProperties() && !only_attribute_changes &&
-      access_control == v8::DEFAULT &&
-      (object->map()->NumberOfOwnDescriptors() <= kMaxNumberOfDescriptors)) {
-    bool getterOk = getter->IsNull() ||
-        DefineFastAccessor(object, name, ACCESSOR_GETTER, getter, attributes);
-    bool setterOk = !getterOk || setter->IsNull() ||
-        DefineFastAccessor(object, name, ACCESSOR_SETTER, setter, attributes);
-    if (getterOk && setterOk) return;
-  }
-
-  Handle<AccessorPair> accessors = CreateAccessorPairFor(object, name);
-  accessors->SetComponents(*getter, *setter);
-  accessors->set_access_flags(access_control);
-
-  SetPropertyCallback(object, name, accessors, attributes);
-}
-
-
 bool Map::DictionaryElementsInPrototypeChainOnly() {
-  Heap* heap = GetHeap();
-
   if (IsDictionaryElementsKind(elements_kind())) {
     return false;
   }
 
-  for (Object* prototype = this->prototype();
-       prototype != heap->null_value();
-       prototype = prototype->GetPrototype(GetIsolate())) {
-    if (prototype->IsJSProxy()) {
+  for (PrototypeIterator iter(this); !iter.IsAtEnd(); iter.Advance()) {
+    if (iter.GetCurrent()->IsJSProxy()) {
       // Be conservative, don't walk into proxies.
       return true;
     }
 
     if (IsDictionaryElementsKind(
-            JSObject::cast(prototype)->map()->elements_kind())) {
+            JSObject::cast(iter.GetCurrent())->map()->elements_kind())) {
       return true;
     }
   }
@@ -6587,7 +6020,7 @@
   // Normalize elements to make this operation simple.
   bool had_dictionary_elements = object->HasDictionaryElements();
   Handle<SeededNumberDictionary> dictionary = NormalizeElements(object);
-  ASSERT(object->HasDictionaryElements() ||
+  DCHECK(object->HasDictionaryElements() ||
          object->HasDictionaryArgumentsElements());
   // Update the dictionary with the new CALLBACKS property.
   dictionary = SeededNumberDictionary::Set(dictionary, index, structure,
@@ -6621,15 +6054,18 @@
                                    Handle<Name> name,
                                    Handle<Object> structure,
                                    PropertyAttributes attributes) {
+  PropertyNormalizationMode mode = object->map()->is_prototype_map()
+                                       ? KEEP_INOBJECT_PROPERTIES
+                                       : CLEAR_INOBJECT_PROPERTIES;
   // Normalize object to make this operation simple.
-  NormalizeProperties(object, CLEAR_INOBJECT_PROPERTIES, 0);
+  NormalizeProperties(object, mode, 0);
 
   // For the global object allocate a new map to invalidate the global inline
   // caches which have a global property cell reference directly in the code.
   if (object->IsGlobalObject()) {
     Handle<Map> new_map = Map::CopyDropDescriptors(handle(object->map()));
-    ASSERT(new_map->is_dictionary_map());
-    object->set_map(*new_map);
+    DCHECK(new_map->is_dictionary_map());
+    JSObject::MigrateToMap(object, new_map);
 
     // When running crankshaft, changing the map is not enough. We
     // need to deoptimize all functions that rely on this global
@@ -6640,35 +6076,32 @@
   // Update the dictionary with the new CALLBACKS property.
   PropertyDetails details = PropertyDetails(attributes, CALLBACKS, 0);
   SetNormalizedProperty(object, name, structure, details);
+
+  ReoptimizeIfPrototype(object);
 }
 
 
-void JSObject::DefineAccessor(Handle<JSObject> object,
-                              Handle<Name> name,
-                              Handle<Object> getter,
-                              Handle<Object> setter,
-                              PropertyAttributes attributes,
-                              v8::AccessControl access_control) {
+MaybeHandle<Object> JSObject::DefineAccessor(Handle<JSObject> object,
+                                             Handle<Name> name,
+                                             Handle<Object> getter,
+                                             Handle<Object> setter,
+                                             PropertyAttributes attributes) {
   Isolate* isolate = object->GetIsolate();
   // Check access rights if needed.
   if (object->IsAccessCheckNeeded() &&
       !isolate->MayNamedAccess(object, name, v8::ACCESS_SET)) {
     isolate->ReportFailedAccessCheck(object, v8::ACCESS_SET);
-    // TODO(yangguo): Issue 3269, check for scheduled exception missing?
-    return;
+    RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
+    return isolate->factory()->undefined_value();
   }
 
   if (object->IsJSGlobalProxy()) {
-    Handle<Object> proto(object->GetPrototype(), isolate);
-    if (proto->IsNull()) return;
-    ASSERT(proto->IsJSGlobalObject());
-    DefineAccessor(Handle<JSObject>::cast(proto),
-                   name,
-                   getter,
-                   setter,
-                   attributes,
-                   access_control);
-    return;
+    PrototypeIterator iter(isolate, object);
+    if (iter.IsAtEnd()) return isolate->factory()->undefined_value();
+    DCHECK(PrototypeIterator::GetCurrent(iter)->IsJSGlobalObject());
+    DefineAccessor(Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)),
+                   name, getter, setter, attributes);
+    return isolate->factory()->undefined_value();
   }
 
   // Make sure that the top context does not change when doing callbacks or
@@ -6687,137 +6120,57 @@
   bool preexists = false;
   if (is_observed) {
     if (is_element) {
-      preexists = HasOwnElement(object, index);
+      Maybe<bool> maybe = HasOwnElement(object, index);
+      // Workaround for a GCC 4.4.3 bug which leads to "‘preexists’ may be used
+      // uninitialized in this function".
+      if (!maybe.has_value) {
+        DCHECK(false);
+        return isolate->factory()->undefined_value();
+      }
+      preexists = maybe.value;
       if (preexists && GetOwnElementAccessorPair(object, index).is_null()) {
         old_value =
             Object::GetElement(isolate, object, index).ToHandleChecked();
       }
     } else {
-      LookupResult lookup(isolate);
-      object->LookupOwn(name, &lookup, true);
-      preexists = lookup.IsProperty();
-      if (preexists && lookup.IsDataProperty()) {
-        old_value =
-            Object::GetPropertyOrElement(object, name).ToHandleChecked();
+      LookupIterator it(object, name, LookupIterator::HIDDEN_SKIP_INTERCEPTOR);
+      CHECK(GetPropertyAttributes(&it).has_value);
+      preexists = it.IsFound();
+      if (preexists && (it.state() == LookupIterator::DATA ||
+                        it.GetAccessors()->IsAccessorInfo())) {
+        old_value = GetProperty(&it).ToHandleChecked();
       }
     }
   }
 
   if (is_element) {
-    DefineElementAccessor(
-        object, index, getter, setter, attributes, access_control);
+    DefineElementAccessor(object, index, getter, setter, attributes);
   } else {
-    DefinePropertyAccessor(
-        object, name, getter, setter, attributes, access_control);
+    DCHECK(getter->IsSpecFunction() || getter->IsUndefined() ||
+           getter->IsNull());
+    DCHECK(setter->IsSpecFunction() || setter->IsUndefined() ||
+           setter->IsNull());
+    // At least one of the accessors needs to be a new value.
+    DCHECK(!getter->IsNull() || !setter->IsNull());
+    LookupIterator it(object, name, LookupIterator::OWN_SKIP_INTERCEPTOR);
+    if (it.state() == LookupIterator::ACCESS_CHECK) {
+      // We already did an access check before. We do have access.
+      it.Next();
+    }
+    if (!getter->IsNull()) {
+      it.TransitionToAccessorProperty(ACCESSOR_GETTER, getter, attributes);
+    }
+    if (!setter->IsNull()) {
+      it.TransitionToAccessorProperty(ACCESSOR_SETTER, setter, attributes);
+    }
   }
 
   if (is_observed) {
     const char* type = preexists ? "reconfigure" : "add";
     EnqueueChangeRecord(object, type, name, old_value);
   }
-}
 
-
-static bool TryAccessorTransition(Handle<JSObject> self,
-                                  Handle<Map> transitioned_map,
-                                  int target_descriptor,
-                                  AccessorComponent component,
-                                  Handle<Object> accessor,
-                                  PropertyAttributes attributes) {
-  DescriptorArray* descs = transitioned_map->instance_descriptors();
-  PropertyDetails details = descs->GetDetails(target_descriptor);
-
-  // If the transition target was not callbacks, fall back to the slow case.
-  if (details.type() != CALLBACKS) return false;
-  Object* descriptor = descs->GetCallbacksObject(target_descriptor);
-  if (!descriptor->IsAccessorPair()) return false;
-
-  Object* target_accessor = AccessorPair::cast(descriptor)->get(component);
-  PropertyAttributes target_attributes = details.attributes();
-
-  // Reuse transition if adding same accessor with same attributes.
-  if (target_accessor == *accessor && target_attributes == attributes) {
-    JSObject::MigrateToMap(self, transitioned_map);
-    return true;
-  }
-
-  // If either not the same accessor, or not the same attributes, fall back to
-  // the slow case.
-  return false;
-}
-
-
-bool JSObject::DefineFastAccessor(Handle<JSObject> object,
-                                  Handle<Name> name,
-                                  AccessorComponent component,
-                                  Handle<Object> accessor,
-                                  PropertyAttributes attributes) {
-  ASSERT(accessor->IsSpecFunction() || accessor->IsUndefined());
-  Isolate* isolate = object->GetIsolate();
-  LookupResult result(isolate);
-  object->LookupOwn(name, &result);
-
-  if (result.IsFound() && !result.IsPropertyCallbacks()) {
-    return false;
-  }
-
-  // Return success if the same accessor with the same attributes already exist.
-  AccessorPair* source_accessors = NULL;
-  if (result.IsPropertyCallbacks()) {
-    Object* callback_value = result.GetCallbackObject();
-    if (callback_value->IsAccessorPair()) {
-      source_accessors = AccessorPair::cast(callback_value);
-      Object* entry = source_accessors->get(component);
-      if (entry == *accessor && result.GetAttributes() == attributes) {
-        return true;
-      }
-    } else {
-      return false;
-    }
-
-    int descriptor_number = result.GetDescriptorIndex();
-
-    object->map()->LookupTransition(*object, *name, &result);
-
-    if (result.IsFound()) {
-      Handle<Map> target(result.GetTransitionTarget());
-      ASSERT(target->NumberOfOwnDescriptors() ==
-             object->map()->NumberOfOwnDescriptors());
-      // This works since descriptors are sorted in order of addition.
-      ASSERT(Name::Equals(handle(object->map()->instance_descriptors()->
-             GetKey(descriptor_number)), name));
-      return TryAccessorTransition(object, target, descriptor_number,
-                                   component, accessor, attributes);
-    }
-  } else {
-    // If not, lookup a transition.
-    object->map()->LookupTransition(*object, *name, &result);
-
-    // If there is a transition, try to follow it.
-    if (result.IsFound()) {
-      Handle<Map> target(result.GetTransitionTarget());
-      int descriptor_number = target->LastAdded();
-      ASSERT(Name::Equals(name,
-          handle(target->instance_descriptors()->GetKey(descriptor_number))));
-      return TryAccessorTransition(object, target, descriptor_number,
-                                   component, accessor, attributes);
-    }
-  }
-
-  // If there is no transition yet, add a transition to the a new accessor pair
-  // containing the accessor.  Allocate a new pair if there were no source
-  // accessors.  Otherwise, copy the pair and modify the accessor.
-  Handle<AccessorPair> accessors = source_accessors != NULL
-      ? AccessorPair::Copy(Handle<AccessorPair>(source_accessors))
-      : isolate->factory()->NewAccessorPair();
-  accessors->set(component, *accessor);
-
-  CallbacksDescriptor new_accessors_desc(name, accessors, attributes);
-  Handle<Map> new_map = Map::CopyInsertDescriptor(
-      handle(object->map()), &new_accessors_desc, INSERT_TRANSITION);
-
-  JSObject::MigrateToMap(object, new_map);
-  return true;
+  return isolate->factory()->undefined_value();
 }
 
 
@@ -6836,10 +6189,11 @@
   }
 
   if (object->IsJSGlobalProxy()) {
-    Handle<Object> proto(object->GetPrototype(), isolate);
-    if (proto->IsNull()) return object;
-    ASSERT(proto->IsJSGlobalObject());
-    return SetAccessor(Handle<JSObject>::cast(proto), info);
+    PrototypeIterator iter(isolate, object);
+    if (iter.IsAtEnd()) return object;
+    DCHECK(PrototypeIterator::GetCurrent(iter)->IsJSGlobalObject());
+    return SetAccessor(
+        Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)), info);
   }
 
   // Make sure that the top context does not change when doing callbacks or
@@ -6885,11 +6239,11 @@
     SetElementCallback(object, index, info, info->property_attributes());
   } else {
     // Lookup the name.
-    LookupResult result(isolate);
-    object->LookupOwn(name, &result, true);
+    LookupIterator it(object, name, LookupIterator::HIDDEN_SKIP_INTERCEPTOR);
+    CHECK(GetPropertyAttributes(&it).has_value);
     // ES5 forbids turning a property into an accessor if it's not
-    // configurable (that is IsDontDelete in ES3 and v8), see 8.6.1 (Table 5).
-    if (result.IsFound() && (result.IsReadOnly() || result.IsDontDelete())) {
+    // configurable. See 8.6.1 (Table 5).
+    if (it.IsFound() && (it.IsReadOnly() || !it.IsConfigurable())) {
       return factory->undefined_value();
     }
 
@@ -6912,20 +6266,23 @@
   // Make the lookup and include prototypes.
   uint32_t index = 0;
   if (name->AsArrayIndex(&index)) {
-    for (Handle<Object> obj = object;
-         !obj->IsNull();
-         obj = handle(JSReceiver::cast(*obj)->GetPrototype(), isolate)) {
-      if (obj->IsAccessCheckNeeded() &&
-          !isolate->MayNamedAccess(Handle<JSObject>::cast(obj), name,
+    for (PrototypeIterator iter(isolate, object,
+                                PrototypeIterator::START_AT_RECEIVER);
+         !iter.IsAtEnd(); iter.Advance()) {
+      Handle<Object> current = PrototypeIterator::GetCurrent(iter);
+      // Check access rights if needed.
+      if (current->IsAccessCheckNeeded() &&
+          !isolate->MayNamedAccess(Handle<JSObject>::cast(current), name,
                                    v8::ACCESS_HAS)) {
-        isolate->ReportFailedAccessCheck(Handle<JSObject>::cast(obj),
+        isolate->ReportFailedAccessCheck(Handle<JSObject>::cast(current),
                                          v8::ACCESS_HAS);
         RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
         return isolate->factory()->undefined_value();
       }
 
-      if (obj->IsJSObject() && JSObject::cast(*obj)->HasDictionaryElements()) {
-        JSObject* js_object = JSObject::cast(*obj);
+      if (current->IsJSObject() &&
+          Handle<JSObject>::cast(current)->HasDictionaryElements()) {
+        JSObject* js_object = JSObject::cast(*current);
         SeededNumberDictionary* dictionary = js_object->element_dictionary();
         int entry = dictionary->FindEntry(index);
         if (entry != SeededNumberDictionary::kNotFound) {
@@ -6939,26 +6296,33 @@
       }
     }
   } else {
-    for (Handle<Object> obj = object;
-         !obj->IsNull();
-         obj = handle(JSReceiver::cast(*obj)->GetPrototype(), isolate)) {
-      if (obj->IsAccessCheckNeeded() &&
-          !isolate->MayNamedAccess(Handle<JSObject>::cast(obj), name,
-                                   v8::ACCESS_HAS)) {
-        isolate->ReportFailedAccessCheck(Handle<JSObject>::cast(obj),
-                                         v8::ACCESS_HAS);
-        RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
-        return isolate->factory()->undefined_value();
-      }
-      LookupResult result(isolate);
-      JSReceiver::cast(*obj)->LookupOwn(name, &result);
-      if (result.IsFound()) {
-        if (result.IsReadOnly()) return isolate->factory()->undefined_value();
-        if (result.IsPropertyCallbacks()) {
-          Object* obj = result.GetCallbackObject();
-          if (obj->IsAccessorPair()) {
-            return handle(AccessorPair::cast(obj)->GetComponent(component),
-                          isolate);
+    LookupIterator it(object, name,
+                      LookupIterator::PROTOTYPE_CHAIN_SKIP_INTERCEPTOR);
+    for (; it.IsFound(); it.Next()) {
+      switch (it.state()) {
+        case LookupIterator::INTERCEPTOR:
+        case LookupIterator::NOT_FOUND:
+        case LookupIterator::TRANSITION:
+          UNREACHABLE();
+
+        case LookupIterator::ACCESS_CHECK:
+          if (it.HasAccess(v8::ACCESS_HAS)) continue;
+          isolate->ReportFailedAccessCheck(it.GetHolder<JSObject>(),
+                                           v8::ACCESS_HAS);
+          RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
+          return isolate->factory()->undefined_value();
+
+        case LookupIterator::JSPROXY:
+          return isolate->factory()->undefined_value();
+
+        case LookupIterator::DATA:
+          continue;
+        case LookupIterator::ACCESSOR: {
+          Handle<Object> maybe_pair = it.GetAccessors();
+          if (maybe_pair->IsAccessorPair()) {
+            return handle(
+                AccessorPair::cast(*maybe_pair)->GetComponent(component),
+                isolate);
           }
         }
       }
@@ -6977,7 +6341,7 @@
         Object* property =
             RawFastPropertyAt(FieldIndex::ForDescriptor(map(), i));
         if (descs->GetDetails(i).representation().IsDouble()) {
-          ASSERT(property->IsHeapNumber());
+          DCHECK(property->IsMutableHeapNumber());
           if (value->IsNumber() && property->Number() == value->Number()) {
             return descs->GetKey(i);
           }
@@ -7022,42 +6386,44 @@
 
 Handle<Map> Map::Normalize(Handle<Map> fast_map,
                            PropertyNormalizationMode mode) {
-  ASSERT(!fast_map->is_dictionary_map());
+  DCHECK(!fast_map->is_dictionary_map());
 
   Isolate* isolate = fast_map->GetIsolate();
-  Handle<NormalizedMapCache> cache(
-      isolate->context()->native_context()->normalized_map_cache());
+  Handle<Object> maybe_cache(isolate->native_context()->normalized_map_cache(),
+                             isolate);
+  bool use_cache = !maybe_cache->IsUndefined();
+  Handle<NormalizedMapCache> cache;
+  if (use_cache) cache = Handle<NormalizedMapCache>::cast(maybe_cache);
 
   Handle<Map> new_map;
-  if (cache->Get(fast_map, mode).ToHandle(&new_map)) {
+  if (use_cache && cache->Get(fast_map, mode).ToHandle(&new_map)) {
 #ifdef VERIFY_HEAP
-    if (FLAG_verify_heap) {
-      new_map->SharedMapVerify();
-    }
+    if (FLAG_verify_heap) new_map->DictionaryMapVerify();
 #endif
-#ifdef ENABLE_SLOW_ASSERTS
+#ifdef ENABLE_SLOW_DCHECKS
     if (FLAG_enable_slow_asserts) {
       // The cached map should match newly created normalized map bit-by-bit,
       // except for the code cache, which can contain some ics which can be
       // applied to the shared map.
-      Handle<Map> fresh = Map::CopyNormalized(
-          fast_map, mode, SHARED_NORMALIZED_MAP);
+      Handle<Map> fresh = Map::CopyNormalized(fast_map, mode);
 
-      ASSERT(memcmp(fresh->address(),
+      DCHECK(memcmp(fresh->address(),
                     new_map->address(),
                     Map::kCodeCacheOffset) == 0);
       STATIC_ASSERT(Map::kDependentCodeOffset ==
                     Map::kCodeCacheOffset + kPointerSize);
       int offset = Map::kDependentCodeOffset + kPointerSize;
-      ASSERT(memcmp(fresh->address() + offset,
+      DCHECK(memcmp(fresh->address() + offset,
                     new_map->address() + offset,
                     Map::kSize - offset) == 0);
     }
 #endif
   } else {
-    new_map = Map::CopyNormalized(fast_map, mode, SHARED_NORMALIZED_MAP);
-    cache->Set(fast_map, new_map);
-    isolate->counters()->normalized_maps()->Increment();
+    new_map = Map::CopyNormalized(fast_map, mode);
+    if (use_cache) {
+      cache->Set(fast_map, new_map);
+      isolate->counters()->normalized_maps()->Increment();
+    }
   }
   fast_map->NotifyLeafMapLayoutChange();
   return new_map;
@@ -7065,8 +6431,7 @@
 
 
 Handle<Map> Map::CopyNormalized(Handle<Map> map,
-                                PropertyNormalizationMode mode,
-                                NormalizedMapSharingMode sharing) {
+                                PropertyNormalizationMode mode) {
   int new_instance_size = map->instance_size();
   if (mode == CLEAR_INOBJECT_PROPERTIES) {
     new_instance_size -= map->inobject_properties() * kPointerSize;
@@ -7078,14 +6443,11 @@
     result->set_inobject_properties(map->inobject_properties());
   }
 
-  result->set_is_shared(sharing == SHARED_NORMALIZED_MAP);
   result->set_dictionary_map(true);
   result->set_migration_target(false);
 
 #ifdef VERIFY_HEAP
-  if (FLAG_verify_heap && result->is_shared()) {
-    result->SharedMapVerify();
-  }
+  if (FLAG_verify_heap) result->DictionaryMapVerify();
 #endif
 
   return result;
@@ -7101,7 +6463,6 @@
 
   result->set_pre_allocated_property_fields(
       map->pre_allocated_property_fields());
-  result->set_is_shared(false);
   result->ClearCodeCache(map->GetHeap());
   map->NotifyLeafMapLayoutChange();
   return result;
@@ -7114,13 +6475,11 @@
   // Sanity check. This path is only to be taken if the map owns its descriptor
   // array, implying that its NumberOfOwnDescriptors equals the number of
   // descriptors in the descriptor array.
-  ASSERT(map->NumberOfOwnDescriptors() ==
+  DCHECK(map->NumberOfOwnDescriptors() ==
          map->instance_descriptors()->number_of_descriptors());
 
   Handle<Map> result = CopyDropDescriptors(map);
   Handle<Name> name = descriptor->GetKey();
-  Handle<TransitionArray> transitions =
-      TransitionArray::CopyInsert(map, name, result, SIMPLE_TRANSITION);
 
   // Ensure there's space for the new descriptor in the shared descriptor array.
   if (descriptors->NumberOfSlackDescriptors() == 0) {
@@ -7133,45 +6492,55 @@
     }
   }
 
-  // Commit the state atomically.
-  DisallowHeapAllocation no_gc;
+  {
+    DisallowHeapAllocation no_gc;
+    descriptors->Append(descriptor);
+    result->InitializeDescriptors(*descriptors);
+  }
 
-  descriptors->Append(descriptor);
-  result->SetBackPointer(*map);
-  result->InitializeDescriptors(*descriptors);
-
-  ASSERT(result->NumberOfOwnDescriptors() == map->NumberOfOwnDescriptors() + 1);
-
-  map->set_transitions(*transitions);
-  map->set_owns_descriptors(false);
+  DCHECK(result->NumberOfOwnDescriptors() == map->NumberOfOwnDescriptors() + 1);
+  ConnectTransition(map, result, name, SIMPLE_TRANSITION);
 
   return result;
 }
 
 
+void Map::ConnectTransition(Handle<Map> parent, Handle<Map> child,
+                            Handle<Name> name, SimpleTransitionFlag flag) {
+  parent->set_owns_descriptors(false);
+  if (parent->is_prototype_map()) {
+    DCHECK(child->is_prototype_map());
+  } else {
+    Handle<TransitionArray> transitions =
+        TransitionArray::CopyInsert(parent, name, child, flag);
+    parent->set_transitions(*transitions);
+    child->SetBackPointer(*parent);
+  }
+}
+
+
 Handle<Map> Map::CopyReplaceDescriptors(Handle<Map> map,
                                         Handle<DescriptorArray> descriptors,
                                         TransitionFlag flag,
                                         MaybeHandle<Name> maybe_name,
                                         SimpleTransitionFlag simple_flag) {
-  ASSERT(descriptors->IsSortedNoDuplicates());
+  DCHECK(descriptors->IsSortedNoDuplicates());
 
   Handle<Map> result = CopyDropDescriptors(map);
   result->InitializeDescriptors(*descriptors);
 
-  if (flag == INSERT_TRANSITION && map->CanHaveMoreTransitions()) {
-    Handle<Name> name;
-    CHECK(maybe_name.ToHandle(&name));
-    Handle<TransitionArray> transitions = TransitionArray::CopyInsert(
-        map, name, result, simple_flag);
-    map->set_transitions(*transitions);
-    result->SetBackPointer(*map);
-  } else {
-    int length = descriptors->number_of_descriptors();
-    for (int i = 0; i < length; i++) {
-      descriptors->SetRepresentation(i, Representation::Tagged());
-      if (descriptors->GetDetails(i).type() == FIELD) {
-        descriptors->SetValue(i, HeapType::Any());
+  if (!map->is_prototype_map()) {
+    if (flag == INSERT_TRANSITION && map->CanHaveMoreTransitions()) {
+      Handle<Name> name;
+      CHECK(maybe_name.ToHandle(&name));
+      ConnectTransition(map, result, name, simple_flag);
+    } else {
+      int length = descriptors->number_of_descriptors();
+      for (int i = 0; i < length; i++) {
+        descriptors->SetRepresentation(i, Representation::Tagged());
+        if (descriptors->GetDetails(i).type() == FIELD) {
+          descriptors->SetValue(i, HeapType::Any());
+        }
       }
     }
   }
@@ -7185,7 +6554,7 @@
 Handle<Map> Map::CopyInstallDescriptors(Handle<Map> map,
                                         int new_descriptor,
                                         Handle<DescriptorArray> descriptors) {
-  ASSERT(descriptors->IsSortedNoDuplicates());
+  DCHECK(descriptors->IsSortedNoDuplicates());
 
   Handle<Map> result = CopyDropDescriptors(map);
 
@@ -7201,14 +6570,9 @@
   }
 
   result->set_unused_property_fields(unused_property_fields);
-  result->set_owns_descriptors(false);
 
   Handle<Name> name = handle(descriptors->GetKey(new_descriptor));
-  Handle<TransitionArray> transitions = TransitionArray::CopyInsert(
-      map, name, result, SIMPLE_TRANSITION);
-
-  map->set_transitions(*transitions);
-  result->SetBackPointer(*map);
+  ConnectTransition(map, result, name, SIMPLE_TRANSITION);
 
   return result;
 }
@@ -7217,16 +6581,16 @@
 Handle<Map> Map::CopyAsElementsKind(Handle<Map> map, ElementsKind kind,
                                     TransitionFlag flag) {
   if (flag == INSERT_TRANSITION) {
-    ASSERT(!map->HasElementsTransition() ||
+    DCHECK(!map->HasElementsTransition() ||
         ((map->elements_transition_map()->elements_kind() ==
           DICTIONARY_ELEMENTS ||
           IsExternalArrayElementsKind(
               map->elements_transition_map()->elements_kind())) &&
          (kind == DICTIONARY_ELEMENTS ||
           IsExternalArrayElementsKind(kind))));
-    ASSERT(!IsFastElementsKind(kind) ||
+    DCHECK(!IsFastElementsKind(kind) ||
            IsMoreGeneralElementsKindTransition(map->elements_kind(), kind));
-    ASSERT(kind != map->elements_kind());
+    DCHECK(kind != map->elements_kind());
   }
 
   bool insert_transition =
@@ -7237,12 +6601,10 @@
     // transfer ownership to the new map.
     Handle<Map> new_map = CopyDropDescriptors(map);
 
-    SetElementsTransitionMap(map, new_map);
+    ConnectElementsTransition(map, new_map);
 
     new_map->set_elements_kind(kind);
     new_map->InitializeDescriptors(map->instance_descriptors());
-    new_map->SetBackPointer(*map);
-    map->set_owns_descriptors(false);
     return new_map;
   }
 
@@ -7254,8 +6616,7 @@
   new_map->set_elements_kind(kind);
 
   if (insert_transition) {
-    SetElementsTransitionMap(map, new_map);
-    new_map->SetBackPointer(*map);
+    ConnectElementsTransition(map, new_map);
   }
 
   return new_map;
@@ -7263,7 +6624,7 @@
 
 
 Handle<Map> Map::CopyForObserved(Handle<Map> map) {
-  ASSERT(!map->is_observed());
+  DCHECK(!map->is_observed());
 
   Isolate* isolate = map->GetIsolate();
 
@@ -7273,22 +6634,18 @@
   if (map->owns_descriptors()) {
     new_map = CopyDropDescriptors(map);
   } else {
+    DCHECK(!map->is_prototype_map());
     new_map = Copy(map);
   }
 
-  Handle<TransitionArray> transitions = TransitionArray::CopyInsert(
-      map, isolate->factory()->observed_symbol(), new_map, FULL_TRANSITION);
-
-  map->set_transitions(*transitions);
-
   new_map->set_is_observed();
-
   if (map->owns_descriptors()) {
     new_map->InitializeDescriptors(map->instance_descriptors());
-    map->set_owns_descriptors(false);
   }
 
-  new_map->SetBackPointer(*map);
+  Handle<Name> name = isolate->factory()->observed_symbol();
+  ConnectTransition(map, new_map, name, FULL_TRANSITION);
+
   return new_map;
 }
 
@@ -7303,30 +6660,26 @@
 }
 
 
-Handle<Map> Map::Create(Handle<JSFunction> constructor,
-                        int extra_inobject_properties) {
-  Handle<Map> copy = Copy(handle(constructor->initial_map()));
+Handle<Map> Map::Create(Isolate* isolate, int inobject_properties) {
+  Handle<Map> copy = Copy(handle(isolate->object_function()->initial_map()));
 
-  // Check that we do not overflow the instance size when adding the
-  // extra inobject properties.
-  int instance_size_delta = extra_inobject_properties * kPointerSize;
-  int max_instance_size_delta =
-      JSObject::kMaxInstanceSize - copy->instance_size();
-  int max_extra_properties = max_instance_size_delta >> kPointerSizeLog2;
+  // Check that we do not overflow the instance size when adding the extra
+  // inobject properties. If the instance size overflows, we allocate as many
+  // properties as we can as inobject properties.
+  int max_extra_properties =
+      (JSObject::kMaxInstanceSize - JSObject::kHeaderSize) >> kPointerSizeLog2;
 
-  // If the instance size overflows, we allocate as many properties as we can as
-  // inobject properties.
-  if (extra_inobject_properties > max_extra_properties) {
-    instance_size_delta = max_instance_size_delta;
-    extra_inobject_properties = max_extra_properties;
+  if (inobject_properties > max_extra_properties) {
+    inobject_properties = max_extra_properties;
   }
 
+  int new_instance_size =
+      JSObject::kHeaderSize + kPointerSize * inobject_properties;
+
   // Adjust the map with the extra inobject properties.
-  int inobject_properties =
-      copy->inobject_properties() + extra_inobject_properties;
   copy->set_inobject_properties(inobject_properties);
   copy->set_unused_property_fields(inobject_properties);
-  copy->set_instance_size(copy->instance_size() + instance_size_delta);
+  copy->set_instance_size(new_instance_size);
   copy->set_visitor_id(StaticVisitorBase::GetVisitorId(*copy));
   return copy;
 }
@@ -7346,6 +6699,204 @@
 }
 
 
+bool DescriptorArray::CanHoldValue(int descriptor, Object* value) {
+  PropertyDetails details = GetDetails(descriptor);
+  switch (details.type()) {
+    case FIELD:
+      return value->FitsRepresentation(details.representation()) &&
+             GetFieldType(descriptor)->NowContains(value);
+
+    case CONSTANT:
+      DCHECK(GetConstant(descriptor) != value ||
+             value->FitsRepresentation(details.representation()));
+      return GetConstant(descriptor) == value;
+
+    case CALLBACKS:
+      return false;
+
+    case NORMAL:
+      UNREACHABLE();
+      break;
+  }
+
+  UNREACHABLE();
+  return false;
+}
+
+
+Handle<Map> Map::PrepareForDataProperty(Handle<Map> map, int descriptor,
+                                        Handle<Object> value) {
+  // Dictionaries can store any property value.
+  if (map->is_dictionary_map()) return map;
+
+  // Migrate to the newest map before storing the property.
+  map = Update(map);
+
+  Handle<DescriptorArray> descriptors(map->instance_descriptors());
+
+  if (descriptors->CanHoldValue(descriptor, *value)) return map;
+
+  Isolate* isolate = map->GetIsolate();
+  Representation representation = value->OptimalRepresentation();
+  Handle<HeapType> type = value->OptimalType(isolate, representation);
+
+  return GeneralizeRepresentation(map, descriptor, representation, type,
+                                  FORCE_FIELD);
+}
+
+
+Handle<Map> Map::TransitionToDataProperty(Handle<Map> map, Handle<Name> name,
+                                          Handle<Object> value,
+                                          PropertyAttributes attributes,
+                                          StoreFromKeyed store_mode) {
+  // Dictionary maps can always have additional data properties.
+  if (map->is_dictionary_map()) return map;
+
+  // Migrate to the newest map before storing the property.
+  map = Update(map);
+
+  int index = map->SearchTransition(*name);
+  if (index != TransitionArray::kNotFound) {
+    Handle<Map> transition(map->GetTransition(index));
+    int descriptor = transition->LastAdded();
+
+    // TODO(verwaest): Handle attributes better.
+    DescriptorArray* descriptors = transition->instance_descriptors();
+    if (descriptors->GetDetails(descriptor).attributes() != attributes) {
+      return Map::Normalize(map, CLEAR_INOBJECT_PROPERTIES);
+    }
+
+    return Map::PrepareForDataProperty(transition, descriptor, value);
+  }
+
+  TransitionFlag flag = INSERT_TRANSITION;
+  MaybeHandle<Map> maybe_map;
+  if (value->IsJSFunction()) {
+    maybe_map = Map::CopyWithConstant(map, name, value, attributes, flag);
+  } else if (!map->TooManyFastProperties(store_mode)) {
+    Isolate* isolate = name->GetIsolate();
+    Representation representation = value->OptimalRepresentation();
+    Handle<HeapType> type = value->OptimalType(isolate, representation);
+    maybe_map =
+        Map::CopyWithField(map, name, type, attributes, representation, flag);
+  }
+
+  Handle<Map> result;
+  if (!maybe_map.ToHandle(&result)) {
+    return Map::Normalize(map, CLEAR_INOBJECT_PROPERTIES);
+  }
+
+  return result;
+}
+
+
+Handle<Map> Map::ReconfigureDataProperty(Handle<Map> map, int descriptor,
+                                         PropertyAttributes attributes) {
+  // Dictionaries have to be reconfigured in-place.
+  DCHECK(!map->is_dictionary_map());
+
+  // For now, give up on transitioning and just create a unique map.
+  // TODO(verwaest/ishell): Cache transitions with different attributes.
+  return CopyGeneralizeAllRepresentations(map, descriptor, FORCE_FIELD,
+                                          attributes, "attributes mismatch");
+}
+
+
+Handle<Map> Map::TransitionToAccessorProperty(Handle<Map> map,
+                                              Handle<Name> name,
+                                              AccessorComponent component,
+                                              Handle<Object> accessor,
+                                              PropertyAttributes attributes) {
+  Isolate* isolate = name->GetIsolate();
+
+  // Dictionary maps can always have additional data properties.
+  if (map->is_dictionary_map()) {
+    // For global objects, property cells are inlined. We need to change the
+    // map.
+    if (map->IsGlobalObjectMap()) return Copy(map);
+    return map;
+  }
+
+  // Migrate to the newest map before transitioning to the new property.
+  map = Update(map);
+
+  PropertyNormalizationMode mode = map->is_prototype_map()
+                                       ? KEEP_INOBJECT_PROPERTIES
+                                       : CLEAR_INOBJECT_PROPERTIES;
+
+  int index = map->SearchTransition(*name);
+  if (index != TransitionArray::kNotFound) {
+    Handle<Map> transition(map->GetTransition(index));
+    DescriptorArray* descriptors = transition->instance_descriptors();
+    // Fast path, assume that we're modifying the last added descriptor.
+    int descriptor = transition->LastAdded();
+    if (descriptors->GetKey(descriptor) != *name) {
+      // If not, search for the descriptor.
+      descriptor = descriptors->SearchWithCache(*name, *transition);
+    }
+
+    if (descriptors->GetDetails(descriptor).type() != CALLBACKS) {
+      return Map::Normalize(map, mode);
+    }
+
+    // TODO(verwaest): Handle attributes better.
+    if (descriptors->GetDetails(descriptor).attributes() != attributes) {
+      return Map::Normalize(map, mode);
+    }
+
+    Handle<Object> maybe_pair(descriptors->GetValue(descriptor), isolate);
+    if (!maybe_pair->IsAccessorPair()) {
+      return Map::Normalize(map, mode);
+    }
+
+    Handle<AccessorPair> pair = Handle<AccessorPair>::cast(maybe_pair);
+    if (pair->get(component) != *accessor) {
+      return Map::Normalize(map, mode);
+    }
+
+    return transition;
+  }
+
+  Handle<AccessorPair> pair;
+  DescriptorArray* old_descriptors = map->instance_descriptors();
+  int descriptor = old_descriptors->SearchWithCache(*name, *map);
+  if (descriptor != DescriptorArray::kNotFound) {
+    PropertyDetails old_details = old_descriptors->GetDetails(descriptor);
+    if (old_details.type() != CALLBACKS) {
+      return Map::Normalize(map, mode);
+    }
+
+    if (old_details.attributes() != attributes) {
+      return Map::Normalize(map, mode);
+    }
+
+    Handle<Object> maybe_pair(old_descriptors->GetValue(descriptor), isolate);
+    if (!maybe_pair->IsAccessorPair()) {
+      return Map::Normalize(map, mode);
+    }
+
+    Object* current = Handle<AccessorPair>::cast(maybe_pair)->get(component);
+    if (current == *accessor) return map;
+
+    if (!current->IsTheHole()) {
+      return Map::Normalize(map, mode);
+    }
+
+    pair = AccessorPair::Copy(Handle<AccessorPair>::cast(maybe_pair));
+  } else if (map->NumberOfOwnDescriptors() >= kMaxNumberOfDescriptors ||
+             map->TooManyFastProperties(CERTAINLY_NOT_STORE_FROM_KEYED)) {
+    return Map::Normalize(map, CLEAR_INOBJECT_PROPERTIES);
+  } else {
+    pair = isolate->factory()->NewAccessorPair();
+  }
+
+  pair->set(component, *accessor);
+  TransitionFlag flag = INSERT_TRANSITION;
+  CallbacksDescriptor new_desc(name, pair, attributes);
+  return Map::CopyInsertDescriptor(map, &new_desc, flag);
+}
+
+
 Handle<Map> Map::CopyAddDescriptor(Handle<Map> map,
                                    Descriptor* descriptor,
                                    TransitionFlag flag) {
@@ -7413,17 +6964,20 @@
   if (attributes != NONE) {
     for (int i = 0; i < size; ++i) {
       Object* value = desc->GetValue(i);
+      Name* key = desc->GetKey(i);
       PropertyDetails details = desc->GetDetails(i);
-      int mask = DONT_DELETE | DONT_ENUM;
-      // READ_ONLY is an invalid attribute for JS setters/getters.
-      if (details.type() != CALLBACKS || !value->IsAccessorPair()) {
-        mask |= READ_ONLY;
+      // Bulk attribute changes never affect private properties.
+      if (!key->IsSymbol() || !Symbol::cast(key)->is_private()) {
+        int mask = DONT_DELETE | DONT_ENUM;
+        // READ_ONLY is an invalid attribute for JS setters/getters.
+        if (details.type() != CALLBACKS || !value->IsAccessorPair()) {
+          mask |= READ_ONLY;
+        }
+        details = details.CopyAddAttributes(
+            static_cast<PropertyAttributes>(attributes & mask));
       }
-      details = details.CopyAddAttributes(
-          static_cast<PropertyAttributes>(attributes & mask));
-      Descriptor inner_desc(handle(desc->GetKey(i)),
-                            handle(value, desc->GetIsolate()),
-                            details);
+      Descriptor inner_desc(
+          handle(key), handle(value, desc->GetIsolate()), details);
       descriptors->Set(i, &inner_desc, witness);
     }
   } else {
@@ -7447,7 +7001,7 @@
   descriptor->KeyToUniqueName();
 
   Handle<Name> key = descriptor->GetKey();
-  ASSERT(*key == descriptors->GetKey(insertion_index));
+  DCHECK(*key == descriptors->GetKey(insertion_index));
 
   Handle<DescriptorArray> new_descriptors = DescriptorArray::CopyUpTo(
       descriptors, map->NumberOfOwnDescriptors());
@@ -7501,7 +7055,7 @@
 void Map::RemoveFromCodeCache(Name* name, Code* code, int index) {
   // No GC is supposed to happen between a call to IndexInCodeCache and
   // RemoveFromCodeCache so the code cache must be there.
-  ASSERT(!code_cache()->IsFixedArray());
+  DCHECK(!code_cache()->IsFixedArray());
   CodeCache::cast(code_cache())->RemoveByIndex(name, code, index);
 }
 
@@ -7519,9 +7073,9 @@
         constructor_(constructor) { }
 
   void StartIfNotStarted() {
-    ASSERT(!(*IteratorField())->IsSmi() || IsIterating());
+    DCHECK(!(*IteratorField())->IsSmi() || IsIterating());
     if (!(*IteratorField())->IsSmi()) {
-      ASSERT(*IteratorField() == constructor_);
+      DCHECK(*IteratorField() == constructor_);
       *IteratorField() = Smi::FromInt(-1);
     }
   }
@@ -7532,7 +7086,7 @@
   }
 
   Map* Next() {
-    ASSERT(IsIterating());
+    DCHECK(IsIterating());
     int value = Smi::cast(*IteratorField())->value();
     int index = -value - 1;
     int number_of_transitions = transition_array_->number_of_transitions();
@@ -7568,7 +7122,7 @@
 
   void StartIfNotStarted() {
     if (!(*IteratorField())->IsSmi()) {
-      ASSERT(*IteratorField() == constructor_);
+      DCHECK(*IteratorField() == constructor_);
       *IteratorField() = Smi::FromInt(0);
     }
   }
@@ -7579,7 +7133,7 @@
   }
 
   Map* Next() {
-    ASSERT(IsIterating());
+    DCHECK(IsIterating());
     int transitionNumber = Smi::cast(*IteratorField())->value();
     if (transitionNumber < NumberOfTransitions()) {
       *IteratorField() = Smi::FromInt(transitionNumber + 1);
@@ -7727,7 +7281,7 @@
     }
     UpdateNormalTypeCache(code_cache, name, code);
   } else {
-    ASSERT(code_cache->default_cache()->IsFixedArray());
+    DCHECK(code_cache->default_cache()->IsFixedArray());
     UpdateDefaultCache(code_cache, name, code);
   }
 }
@@ -7782,7 +7336,7 @@
   // multiple of the entry size.
   int new_length = length + ((length >> 1)) + kCodeCacheEntrySize;
   new_length = new_length - new_length % kCodeCacheEntrySize;
-  ASSERT((new_length % kCodeCacheEntrySize) == 0);
+  DCHECK((new_length % kCodeCacheEntrySize) == 0);
   cache = FixedArray::CopySize(cache, new_length);
 
   // Add the (name, code) pair to the new cache.
@@ -7859,17 +7413,17 @@
 
 void CodeCache::RemoveByIndex(Object* name, Code* code, int index) {
   if (code->type() == Code::NORMAL) {
-    ASSERT(!normal_type_cache()->IsUndefined());
+    DCHECK(!normal_type_cache()->IsUndefined());
     CodeCacheHashTable* cache = CodeCacheHashTable::cast(normal_type_cache());
-    ASSERT(cache->GetIndex(Name::cast(name), code->flags()) == index);
+    DCHECK(cache->GetIndex(Name::cast(name), code->flags()) == index);
     cache->RemoveByIndex(index);
   } else {
     FixedArray* array = default_cache();
-    ASSERT(array->length() >= index && array->get(index)->IsCode());
+    DCHECK(array->length() >= index && array->get(index)->IsCode());
     // Use null instead of undefined for deleted elements to distinguish
     // deleted elements from unused elements.  This distinction is used
     // when looking up in the cache and when updating the cache.
-    ASSERT_EQ(1, kCodeCacheEntryCodeOffset - kCodeCacheEntryNameOffset);
+    DCHECK_EQ(1, kCodeCacheEntryCodeOffset - kCodeCacheEntryNameOffset);
     array->set_null(index - 1);  // Name.
     array->set_null(index);  // Code.
   }
@@ -7888,7 +7442,7 @@
   CodeCacheHashTableKey(Handle<Name> name, Handle<Code> code)
       : name_(name), flags_(code->flags()), code_(code) { }
 
-  bool IsMatch(Object* other) V8_OVERRIDE {
+  bool IsMatch(Object* other) OVERRIDE {
     if (!other->IsFixedArray()) return false;
     FixedArray* pair = FixedArray::cast(other);
     Name* name = Name::cast(pair->get(0));
@@ -7903,16 +7457,16 @@
     return name->Hash() ^ flags;
   }
 
-  uint32_t Hash() V8_OVERRIDE { return NameFlagsHashHelper(*name_, flags_); }
+  uint32_t Hash() OVERRIDE { return NameFlagsHashHelper(*name_, flags_); }
 
-  uint32_t HashForObject(Object* obj) V8_OVERRIDE {
+  uint32_t HashForObject(Object* obj) OVERRIDE {
     FixedArray* pair = FixedArray::cast(obj);
     Name* name = Name::cast(pair->get(0));
     Code* code = Code::cast(pair->get(1));
     return NameFlagsHashHelper(name, code->flags());
   }
 
-  MUST_USE_RESULT Handle<Object> AsHandle(Isolate* isolate) V8_OVERRIDE {
+  MUST_USE_RESULT Handle<Object> AsHandle(Isolate* isolate) OVERRIDE {
     Handle<Code> code = code_.ToHandleChecked();
     Handle<FixedArray> pair = isolate->factory()->NewFixedArray(2);
     pair->set(0, *name_);
@@ -7962,7 +7516,7 @@
 
 
 void CodeCacheHashTable::RemoveByIndex(int index) {
-  ASSERT(index >= 0);
+  DCHECK(index >= 0);
   Heap* heap = GetHeap();
   set(EntryToIndex(index), heap->the_hole_value());
   set(EntryToIndex(index) + 1, heap->the_hole_value());
@@ -7983,7 +7537,7 @@
     code_cache->set_cache(*result);
   } else {
     // This entry shouldn't be contained in the cache yet.
-    ASSERT(PolymorphicCodeCacheHashTable::cast(code_cache->cache())
+    DCHECK(PolymorphicCodeCacheHashTable::cast(code_cache->cache())
                ->Lookup(maps, flags)->IsUndefined());
   }
   Handle<PolymorphicCodeCacheHashTable> hash_table =
@@ -8016,7 +7570,7 @@
       : maps_(maps),
         code_flags_(code_flags) {}
 
-  bool IsMatch(Object* other) V8_OVERRIDE {
+  bool IsMatch(Object* other) OVERRIDE {
     MapHandleList other_maps(kDefaultListAllocationSize);
     int other_flags;
     FromObject(other, &other_flags, &other_maps);
@@ -8051,18 +7605,18 @@
     return hash;
   }
 
-  uint32_t Hash() V8_OVERRIDE {
+  uint32_t Hash() OVERRIDE {
     return MapsHashHelper(maps_, code_flags_);
   }
 
-  uint32_t HashForObject(Object* obj) V8_OVERRIDE {
+  uint32_t HashForObject(Object* obj) OVERRIDE {
     MapHandleList other_maps(kDefaultListAllocationSize);
     int other_flags;
     FromObject(obj, &other_flags, &other_maps);
     return MapsHashHelper(&other_maps, other_flags);
   }
 
-  MUST_USE_RESULT Handle<Object> AsHandle(Isolate* isolate) V8_OVERRIDE {
+  MUST_USE_RESULT Handle<Object> AsHandle(Isolate* isolate) OVERRIDE {
     // The maps in |maps_| must be copied to a newly allocated FixedArray,
     // both because the referenced MapList is short-lived, and because C++
     // objects can't be stored in the heap anyway.
@@ -8123,10 +7677,10 @@
 
 
 void FixedArray::Shrink(int new_length) {
-  ASSERT(0 <= new_length && new_length <= length());
+  DCHECK(0 <= new_length && new_length <= length());
   if (new_length < length()) {
-    RightTrimFixedArray<Heap::FROM_MUTATOR>(
-        GetHeap(), this, length() - new_length);
+    GetHeap()->RightTrimFixedArray<Heap::FROM_MUTATOR>(
+        this, length() - new_length);
   }
 }
 
@@ -8134,7 +7688,7 @@
 MaybeHandle<FixedArray> FixedArray::AddKeysFromArrayLike(
     Handle<FixedArray> content,
     Handle<JSObject> array) {
-  ASSERT(array->IsJSArray() || array->HasSloppyArgumentsElements());
+  DCHECK(array->IsJSArray() || array->HasSloppyArgumentsElements());
   ElementsAccessor* accessor = array->GetElementsAccessor();
   Handle<FixedArray> result;
   ASSIGN_RETURN_ON_EXCEPTION(
@@ -8142,12 +7696,12 @@
       accessor->AddElementsToFixedArray(array, array, content),
       FixedArray);
 
-#ifdef ENABLE_SLOW_ASSERTS
+#ifdef ENABLE_SLOW_DCHECKS
   if (FLAG_enable_slow_asserts) {
     DisallowHeapAllocation no_allocation;
     for (int i = 0; i < result->length(); i++) {
       Object* current = result->get(i);
-      ASSERT(current->IsNumber() || current->IsName());
+      DCHECK(current->IsNumber() || current->IsName());
     }
   }
 #endif
@@ -8168,12 +7722,12 @@
           Handle<FixedArrayBase>::cast(second)),
       FixedArray);
 
-#ifdef ENABLE_SLOW_ASSERTS
+#ifdef ENABLE_SLOW_DCHECKS
   if (FLAG_enable_slow_asserts) {
     DisallowHeapAllocation no_allocation;
     for (int i = 0; i < result->length(); i++) {
       Object* current = result->get(i);
-      ASSERT(current->IsNumber() || current->IsName());
+      DCHECK(current->IsNumber() || current->IsName());
     }
   }
 #endif
@@ -8225,7 +7779,7 @@
 Handle<DescriptorArray> DescriptorArray::Allocate(Isolate* isolate,
                                                   int number_of_descriptors,
                                                   int slack) {
-  ASSERT(0 <= number_of_descriptors);
+  DCHECK(0 <= number_of_descriptors);
   Factory* factory = isolate->factory();
   // Do not use DescriptorArray::cast on incomplete object.
   int size = number_of_descriptors + slack;
@@ -8253,10 +7807,10 @@
 void DescriptorArray::SetEnumCache(FixedArray* bridge_storage,
                                    FixedArray* new_cache,
                                    Object* new_index_cache) {
-  ASSERT(bridge_storage->length() >= kEnumCacheBridgeLength);
-  ASSERT(new_index_cache->IsSmi() || new_index_cache->IsFixedArray());
-  ASSERT(!IsEmpty());
-  ASSERT(!HasEnumCache() || new_cache->length() > GetEnumCache()->length());
+  DCHECK(bridge_storage->length() >= kEnumCacheBridgeLength);
+  DCHECK(new_index_cache->IsSmi() || new_index_cache->IsFixedArray());
+  DCHECK(!IsEmpty());
+  DCHECK(!HasEnumCache() || new_cache->length() > GetEnumCache()->length());
   FixedArray::cast(bridge_storage)->
     set(kEnumCacheBridgeCacheIndex, new_cache);
   FixedArray::cast(bridge_storage)->
@@ -8332,7 +7886,7 @@
       parent_index = child_index;
     }
   }
-  ASSERT(IsSortedNoDuplicates());
+  DCHECK(IsSortedNoDuplicates());
 }
 
 
@@ -8351,13 +7905,11 @@
 
 
 Handle<DeoptimizationInputData> DeoptimizationInputData::New(
-    Isolate* isolate,
-    int deopt_entry_count,
-    PretenureFlag pretenure) {
-  ASSERT(deopt_entry_count > 0);
+    Isolate* isolate, int deopt_entry_count, PretenureFlag pretenure) {
+  DCHECK(deopt_entry_count > 0);
   return Handle<DeoptimizationInputData>::cast(
-      isolate->factory()->NewFixedArray(
-          LengthFor(deopt_entry_count), pretenure));
+      isolate->factory()->NewFixedArray(LengthFor(deopt_entry_count),
+                                        pretenure));
 }
 
 
@@ -8396,7 +7948,7 @@
 
 
 String::FlatContent String::GetFlatContent() {
-  ASSERT(!AllowHeapAllocation::IsAllowed());
+  DCHECK(!AllowHeapAllocation::IsAllowed());
   int length = this->length();
   StringShape shape(this);
   String* string = this;
@@ -8414,7 +7966,7 @@
     offset = slice->offset();
     string = slice->parent();
     shape = StringShape(string);
-    ASSERT(shape.representation_tag() != kConsStringTag &&
+    DCHECK(shape.representation_tag() != kConsStringTag &&
            shape.representation_tag() != kSlicedStringTag);
   }
   if (shape.encoding_tag() == kOneByteStringTag) {
@@ -8422,11 +7974,11 @@
     if (shape.representation_tag() == kSeqStringTag) {
       start = SeqOneByteString::cast(string)->GetChars();
     } else {
-      start = ExternalAsciiString::cast(string)->GetChars();
+      start = ExternalOneByteString::cast(string)->GetChars();
     }
     return FlatContent(start + offset, length);
   } else {
-    ASSERT(shape.encoding_tag() == kTwoByteStringTag);
+    DCHECK(shape.encoding_tag() == kTwoByteStringTag);
     const uc16* start;
     if (shape.representation_tag() == kSeqStringTag) {
       start = SeqTwoByteString::cast(string)->GetChars();
@@ -8497,7 +8049,7 @@
 
 
 const uc16* String::GetTwoByteData(unsigned start) {
-  ASSERT(!IsOneByteRepresentationUnderneath());
+  DCHECK(!IsOneByteRepresentationUnderneath());
   switch (StringShape(this).representation_tag()) {
     case kSeqStringTag:
       return SeqTwoByteString::cast(this)->SeqTwoByteStringGetData(start);
@@ -8607,21 +8159,21 @@
 FlatStringReader::FlatStringReader(Isolate* isolate, Vector<const char> input)
     : Relocatable(isolate),
       str_(0),
-      is_ascii_(true),
+      is_one_byte_(true),
       length_(input.length()),
-      start_(input.start()) { }
+      start_(input.start()) {}
 
 
 void FlatStringReader::PostGarbageCollection() {
   if (str_ == NULL) return;
   Handle<String> str(str_);
-  ASSERT(str->IsFlat());
+  DCHECK(str->IsFlat());
   DisallowHeapAllocation no_gc;
   // This does not actually prevent the vector from being relocated later.
   String::FlatContent content = str->GetFlatContent();
-  ASSERT(content.IsFlat());
-  is_ascii_ = content.IsAscii();
-  if (is_ascii_) {
+  DCHECK(content.IsFlat());
+  is_one_byte_ = content.IsOneByte();
+  if (is_one_byte_) {
     start_ = content.ToOneByteVector().start();
   } else {
     start_ = content.ToUC16Vector().start();
@@ -8630,26 +8182,26 @@
 
 
 void ConsStringIteratorOp::Initialize(ConsString* cons_string, int offset) {
-  ASSERT(cons_string != NULL);
+  DCHECK(cons_string != NULL);
   root_ = cons_string;
   consumed_ = offset;
   // Force stack blown condition to trigger restart.
   depth_ = 1;
   maximum_depth_ = kStackSize + depth_;
-  ASSERT(StackBlown());
+  DCHECK(StackBlown());
 }
 
 
 String* ConsStringIteratorOp::Continue(int* offset_out) {
-  ASSERT(depth_ != 0);
-  ASSERT_EQ(0, *offset_out);
+  DCHECK(depth_ != 0);
+  DCHECK_EQ(0, *offset_out);
   bool blew_stack = StackBlown();
   String* string = NULL;
   // Get the next leaf if there is one.
   if (!blew_stack) string = NextLeaf(&blew_stack);
   // Restart search from root.
   if (blew_stack) {
-    ASSERT(string == NULL);
+    DCHECK(string == NULL);
     string = Search(offset_out);
   }
   // Ensure future calls return null immediately.
@@ -8708,7 +8260,7 @@
       // Pop stack so next iteration is in correct place.
       Pop();
     }
-    ASSERT(length != 0);
+    DCHECK(length != 0);
     // Adjust return values and exit.
     consumed_ = offset + length;
     *offset_out = consumed - offset;
@@ -8754,7 +8306,7 @@
       if ((type & kStringRepresentationMask) != kConsStringTag) {
         AdjustMaximumDepth();
         int length = string->length();
-        ASSERT(length != 0);
+        DCHECK(length != 0);
         consumed_ += length;
         return string;
       }
@@ -8768,7 +8320,7 @@
 
 
 uint16_t ConsString::ConsStringGet(int index) {
-  ASSERT(index >= 0 && index < this->length());
+  DCHECK(index >= 0 && index < this->length());
 
   // Check for a flattened cons string
   if (second()->length() == 0) {
@@ -8812,11 +8364,10 @@
   int from = f;
   int to = t;
   while (true) {
-    ASSERT(0 <= from && from <= to && to <= source->length());
+    DCHECK(0 <= from && from <= to && to <= source->length());
     switch (StringShape(source).full_representation_tag()) {
       case kOneByteStringTag | kExternalStringTag: {
-        CopyChars(sink,
-                  ExternalAsciiString::cast(source)->GetChars() + from,
+        CopyChars(sink, ExternalOneByteString::cast(source)->GetChars() + from,
                   to - from);
         return;
       }
@@ -8862,7 +8413,7 @@
             String* second = cons_string->second();
             // When repeatedly appending to a string, we get a cons string that
             // is unbalanced to the left, a list, essentially.  We inline the
-            // common case of sequential ascii right child.
+            // common case of sequential one-byte right child.
             if (to - boundary == 1) {
               sink[boundary - from] = static_cast<sinkchar>(second->Get(0));
             } else if (second->IsSeqOneByteString()) {
@@ -8900,7 +8451,7 @@
                                   Vector<const SourceChar> src,
                                   bool include_ending_line) {
   const int src_len = src.length();
-  StringSearch<uint8_t, SourceChar> search(isolate, STATIC_ASCII_VECTOR("\n"));
+  StringSearch<uint8_t, SourceChar> search(isolate, STATIC_CHAR_VECTOR("\n"));
 
   // Find and record line ends.
   int position = 0;
@@ -8929,8 +8480,8 @@
   { DisallowHeapAllocation no_allocation;  // ensure vectors stay valid.
     // Dispatch on type of strings.
     String::FlatContent content = src->GetFlatContent();
-    ASSERT(content.IsFlat());
-    if (content.IsAscii()) {
+    DCHECK(content.IsFlat());
+    if (content.IsOneByte()) {
       CalculateLineEndsImpl(isolate,
                             &line_ends,
                             content.ToOneByteVector(),
@@ -8957,36 +8508,7 @@
 static inline bool CompareRawStringContents(const Char* const a,
                                             const Char* const b,
                                             int length) {
-  int i = 0;
-#ifndef V8_HOST_CAN_READ_UNALIGNED
-  // If this architecture isn't comfortable reading unaligned ints
-  // then we have to check that the strings are aligned before
-  // comparing them blockwise.
-  const int kAlignmentMask = sizeof(uint32_t) - 1;  // NOLINT
-  uint32_t pa_addr = reinterpret_cast<uint32_t>(a);
-  uint32_t pb_addr = reinterpret_cast<uint32_t>(b);
-  if (((pa_addr & kAlignmentMask) | (pb_addr & kAlignmentMask)) == 0) {
-#endif
-    const int kStepSize = sizeof(int) / sizeof(Char);  // NOLINT
-    int endpoint = length - kStepSize;
-    // Compare blocks until we reach near the end of the string.
-    for (; i <= endpoint; i += kStepSize) {
-      uint32_t wa = *reinterpret_cast<const uint32_t*>(a + i);
-      uint32_t wb = *reinterpret_cast<const uint32_t*>(b + i);
-      if (wa != wb) {
-        return false;
-      }
-    }
-#ifndef V8_HOST_CAN_READ_UNALIGNED
-  }
-#endif
-  // Compare the remaining characters that didn't fit into a block.
-  for (; i < length; i++) {
-    if (a[i] != b[i]) {
-      return false;
-    }
-  }
-  return true;
+  return CompareChars(a, b, length) == 0;
 }
 
 
@@ -8994,7 +8516,7 @@
 class RawStringComparator : public AllStatic {
  public:
   static inline bool compare(const Chars1* a, const Chars2* b, int len) {
-    ASSERT(sizeof(Chars1) != sizeof(Chars2));
+    DCHECK(sizeof(Chars1) != sizeof(Chars2));
     for (int i = 0; i < len; i++) {
       if (a[i] != b[i]) {
         return false;
@@ -9052,7 +8574,7 @@
     }
 
     void Advance(int consumed) {
-      ASSERT(consumed <= length_);
+      DCHECK(consumed <= length_);
       // Still in buffer.
       if (length_ != consumed) {
         if (is_one_byte_) {
@@ -9066,8 +8588,8 @@
       // Advance state.
       int offset;
       String* next = op_->Next(&offset);
-      ASSERT_EQ(0, offset);
-      ASSERT(next != NULL);
+      DCHECK_EQ(0, offset);
+      DCHECK(next != NULL);
       String::VisitFlat(this, next);
     }
 
@@ -9103,7 +8625,7 @@
     state_2_.Init(string_2);
     while (true) {
       int to_check = Min(state_1_.length_, state_2_.length_);
-      ASSERT(to_check > 0 && to_check <= length);
+      DCHECK(to_check > 0 && to_check <= length);
       bool is_equal;
       if (state_1_.is_one_byte_) {
         if (state_2_.is_one_byte_) {
@@ -9145,7 +8667,7 @@
   // Fast check: if hash code is computed for both strings
   // a fast negative check can be performed.
   if (HasHashCode() && other->HasHashCode()) {
-#ifdef ENABLE_SLOW_ASSERTS
+#ifdef ENABLE_SLOW_DCHECKS
     if (FLAG_enable_slow_asserts) {
       if (Hash() != other->Hash()) {
         bool found_difference = false;
@@ -9155,7 +8677,7 @@
             break;
           }
         }
-        ASSERT(found_difference);
+        DCHECK(found_difference);
       }
     }
 #endif
@@ -9189,7 +8711,7 @@
   // Fast check: if hash code is computed for both strings
   // a fast negative check can be performed.
   if (one->HasHashCode() && two->HasHashCode()) {
-#ifdef ENABLE_SLOW_ASSERTS
+#ifdef ENABLE_SLOW_DCHECKS
     if (FLAG_enable_slow_asserts) {
       if (one->Hash() != two->Hash()) {
         bool found_difference = false;
@@ -9199,7 +8721,7 @@
             break;
           }
         }
-        ASSERT(found_difference);
+        DCHECK(found_difference);
       }
     }
 #endif
@@ -9217,7 +8739,7 @@
   String::FlatContent flat1 = one->GetFlatContent();
   String::FlatContent flat2 = two->GetFlatContent();
 
-  if (flat1.IsAscii() && flat2.IsAscii()) {
+  if (flat1.IsOneByte() && flat2.IsOneByte()) {
       return CompareRawStringContents(flat1.ToOneByteVector().start(),
                                       flat2.ToOneByteVector().start(),
                                       one_length);
@@ -9238,8 +8760,8 @@
   if (map == heap->string_map()) {
     this->set_map(heap->undetectable_string_map());
     return true;
-  } else if (map == heap->ascii_string_map()) {
-    this->set_map(heap->undetectable_ascii_string_map());
+  } else if (map == heap->one_byte_string_map()) {
+    this->set_map(heap->undetectable_one_byte_string_map());
     return true;
   }
   // Rest cannot be marked as undetectable
@@ -9262,7 +8784,7 @@
   for (i = 0; i < slen && remaining_in_str > 0; i++) {
     unsigned cursor = 0;
     uint32_t r = unibrow::Utf8::ValueOf(utf8_data, remaining_in_str, &cursor);
-    ASSERT(cursor > 0 && cursor <= remaining_in_str);
+    DCHECK(cursor > 0 && cursor <= remaining_in_str);
     if (r > unibrow::Utf16::kMaxNonSurrogateCharCode) {
       if (i > slen - 1) return false;
       if (Get(i++) != unibrow::Utf16::LeadSurrogate(r)) return false;
@@ -9282,7 +8804,7 @@
   if (str.length() != slen) return false;
   DisallowHeapAllocation no_gc;
   FlatContent content = GetFlatContent();
-  if (content.IsAscii()) {
+  if (content.IsOneByte()) {
     return CompareChars(content.ToOneByteVector().start(),
                         str.start(), slen) == 0;
   }
@@ -9308,50 +8830,18 @@
 }
 
 
-class IteratingStringHasher: public StringHasher {
- public:
-  static inline uint32_t Hash(String* string, uint32_t seed) {
-    IteratingStringHasher hasher(string->length(), seed);
-    // Nothing to do.
-    if (hasher.has_trivial_hash()) return hasher.GetHashField();
-    ConsString* cons_string = String::VisitFlat(&hasher, string);
-    // The string was flat.
-    if (cons_string == NULL) return hasher.GetHashField();
-    // This is a ConsString, iterate across it.
-    ConsStringIteratorOp op(cons_string);
-    int offset;
-    while (NULL != (string = op.Next(&offset))) {
-      String::VisitFlat(&hasher, string, offset);
-    }
-    return hasher.GetHashField();
-  }
-  inline void VisitOneByteString(const uint8_t* chars, int length) {
-    AddCharacters(chars, length);
-  }
-  inline void VisitTwoByteString(const uint16_t* chars, int length) {
-    AddCharacters(chars, length);
-  }
-
- private:
-  inline IteratingStringHasher(int len, uint32_t seed)
-      : StringHasher(len, seed) {
-  }
-  DISALLOW_COPY_AND_ASSIGN(IteratingStringHasher);
-};
-
-
 uint32_t String::ComputeAndSetHash() {
   // Should only be called if hash code has not yet been computed.
-  ASSERT(!HasHashCode());
+  DCHECK(!HasHashCode());
 
   // Store the hash code in the object.
   uint32_t field = IteratingStringHasher::Hash(this, GetHeap()->HashSeed());
   set_hash_field(field);
 
   // Check the hash code is there.
-  ASSERT(HasHashCode());
+  DCHECK(HasHashCode());
   uint32_t result = field >> kHashShift;
-  ASSERT(result != 0);  // Ensure that the hash value of 0 is never computed.
+  DCHECK(result != 0);  // Ensure that the hash value of 0 is never computed.
   return result;
 }
 
@@ -9361,29 +8851,7 @@
   if (length == 0 || length > kMaxArrayIndexSize) return false;
   ConsStringIteratorOp op;
   StringCharacterStream stream(this, &op);
-  uint16_t ch = stream.GetNext();
-
-  // If the string begins with a '0' character, it must only consist
-  // of it to be a legal array index.
-  if (ch == '0') {
-    *index = 0;
-    return length == 1;
-  }
-
-  // Convert string to uint32 array index; character by character.
-  int d = ch - '0';
-  if (d < 0 || d > 9) return false;
-  uint32_t result = d;
-  while (stream.HasMore()) {
-    d = stream.GetNext() - '0';
-    if (d < 0 || d > 9) return false;
-    // Check that the new result is below the 32 bit limit.
-    if (result > 429496729U - ((d > 5) ? 1 : 0)) return false;
-    result = (result * 10) + d;
-  }
-
-  *index = result;
-  return true;
+  return StringToArrayIndex(&stream, index);
 }
 
 
@@ -9410,7 +8878,7 @@
     old_size = SeqOneByteString::SizeFor(old_length);
     new_size = SeqOneByteString::SizeFor(new_length);
   } else {
-    ASSERT(string->IsSeqTwoByteString());
+    DCHECK(string->IsSeqTwoByteString());
     old_size = SeqTwoByteString::SizeFor(old_length);
     new_size = SeqTwoByteString::SizeFor(new_length);
   }
@@ -9418,8 +8886,8 @@
   int delta = old_size - new_size;
 
   Address start_of_string = string->address();
-  ASSERT_OBJECT_ALIGNED(start_of_string);
-  ASSERT_OBJECT_ALIGNED(start_of_string + new_size);
+  DCHECK_OBJECT_ALIGNED(start_of_string);
+  DCHECK_OBJECT_ALIGNED(start_of_string + new_size);
 
   Heap* heap = string->GetHeap();
   NewSpace* newspace = heap->new_space();
@@ -9446,16 +8914,16 @@
 uint32_t StringHasher::MakeArrayIndexHash(uint32_t value, int length) {
   // For array indexes mix the length into the hash as an array index could
   // be zero.
-  ASSERT(length > 0);
-  ASSERT(length <= String::kMaxArrayIndexSize);
-  ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
+  DCHECK(length > 0);
+  DCHECK(length <= String::kMaxArrayIndexSize);
+  DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
          (1 << String::kArrayIndexValueBits));
 
   value <<= String::ArrayIndexValueBits::kShift;
   value |= length << String::ArrayIndexLengthBits::kShift;
 
-  ASSERT((value & String::kIsNotArrayIndexMask) == 0);
-  ASSERT((length > String::kMaxCachedArrayIndexLength) ||
+  DCHECK((value & String::kIsNotArrayIndexMask) == 0);
+  DCHECK((length > String::kMaxCachedArrayIndexLength) ||
          (value & String::kContainsCachedArrayIndexMask) == 0);
   return value;
 }
@@ -9480,7 +8948,7 @@
   int vector_length = chars.length();
   // Handle some edge cases
   if (vector_length <= 1) {
-    ASSERT(vector_length == 0 ||
+    DCHECK(vector_length == 0 ||
            static_cast<uint8_t>(chars.start()[0]) <=
                unibrow::Utf8::kMaxOneByteChar);
     *utf16_length_out = vector_length;
@@ -9493,11 +8961,11 @@
   const uint8_t* stream = reinterpret_cast<const uint8_t*>(chars.start());
   int utf16_length = 0;
   bool is_index = true;
-  ASSERT(hasher.is_array_index_);
+  DCHECK(hasher.is_array_index_);
   while (remaining > 0) {
     unsigned consumed = 0;
     uint32_t c = unibrow::Utf8::ValueOf(stream, remaining, &consumed);
-    ASSERT(consumed > 0 && consumed <= remaining);
+    DCHECK(consumed > 0 && consumed <= remaining);
     stream += consumed;
     remaining -= consumed;
     bool is_two_characters = c > unibrow::Utf16::kMaxNonSurrogateCharCode;
@@ -9531,119 +8999,6 @@
 }
 
 
-static void TrimEnumCache(Heap* heap, Map* map, DescriptorArray* descriptors) {
-  int live_enum = map->EnumLength();
-  if (live_enum == kInvalidEnumCacheSentinel) {
-    live_enum = map->NumberOfDescribedProperties(OWN_DESCRIPTORS, DONT_ENUM);
-  }
-  if (live_enum == 0) return descriptors->ClearEnumCache();
-
-  FixedArray* enum_cache = descriptors->GetEnumCache();
-
-  int to_trim = enum_cache->length() - live_enum;
-  if (to_trim <= 0) return;
-  RightTrimFixedArray<Heap::FROM_GC>(
-      heap, descriptors->GetEnumCache(), to_trim);
-
-  if (!descriptors->HasEnumIndicesCache()) return;
-  FixedArray* enum_indices_cache = descriptors->GetEnumIndicesCache();
-  RightTrimFixedArray<Heap::FROM_GC>(heap, enum_indices_cache, to_trim);
-}
-
-
-static void TrimDescriptorArray(Heap* heap,
-                                Map* map,
-                                DescriptorArray* descriptors,
-                                int number_of_own_descriptors) {
-  int number_of_descriptors = descriptors->number_of_descriptors_storage();
-  int to_trim = number_of_descriptors - number_of_own_descriptors;
-  if (to_trim == 0) return;
-
-  RightTrimFixedArray<Heap::FROM_GC>(
-      heap, descriptors, to_trim * DescriptorArray::kDescriptorSize);
-  descriptors->SetNumberOfDescriptors(number_of_own_descriptors);
-
-  if (descriptors->HasEnumCache()) TrimEnumCache(heap, map, descriptors);
-  descriptors->Sort();
-}
-
-
-// Clear a possible back pointer in case the transition leads to a dead map.
-// Return true in case a back pointer has been cleared and false otherwise.
-static bool ClearBackPointer(Heap* heap, Map* target) {
-  if (Marking::MarkBitFrom(target).Get()) return false;
-  target->SetBackPointer(heap->undefined_value(), SKIP_WRITE_BARRIER);
-  return true;
-}
-
-
-// TODO(mstarzinger): This method should be moved into MarkCompactCollector,
-// because it cannot be called from outside the GC and we already have methods
-// depending on the transitions layout in the GC anyways.
-void Map::ClearNonLiveTransitions(Heap* heap) {
-  // If there are no transitions to be cleared, return.
-  // TODO(verwaest) Should be an assert, otherwise back pointers are not
-  // properly cleared.
-  if (!HasTransitionArray()) return;
-
-  TransitionArray* t = transitions();
-  MarkCompactCollector* collector = heap->mark_compact_collector();
-
-  int transition_index = 0;
-
-  DescriptorArray* descriptors = instance_descriptors();
-  bool descriptors_owner_died = false;
-
-  // Compact all live descriptors to the left.
-  for (int i = 0; i < t->number_of_transitions(); ++i) {
-    Map* target = t->GetTarget(i);
-    if (ClearBackPointer(heap, target)) {
-      if (target->instance_descriptors() == descriptors) {
-        descriptors_owner_died = true;
-      }
-    } else {
-      if (i != transition_index) {
-        Name* key = t->GetKey(i);
-        t->SetKey(transition_index, key);
-        Object** key_slot = t->GetKeySlot(transition_index);
-        collector->RecordSlot(key_slot, key_slot, key);
-        // Target slots do not need to be recorded since maps are not compacted.
-        t->SetTarget(transition_index, t->GetTarget(i));
-      }
-      transition_index++;
-    }
-  }
-
-  // If there are no transitions to be cleared, return.
-  // TODO(verwaest) Should be an assert, otherwise back pointers are not
-  // properly cleared.
-  if (transition_index == t->number_of_transitions()) return;
-
-  int number_of_own_descriptors = NumberOfOwnDescriptors();
-
-  if (descriptors_owner_died) {
-    if (number_of_own_descriptors > 0) {
-      TrimDescriptorArray(heap, this, descriptors, number_of_own_descriptors);
-      ASSERT(descriptors->number_of_descriptors() == number_of_own_descriptors);
-      set_owns_descriptors(true);
-    } else {
-      ASSERT(descriptors == GetHeap()->empty_descriptor_array());
-    }
-  }
-
-  // Note that we never eliminate a transition array, though we might right-trim
-  // such that number_of_transitions() == 0. If this assumption changes,
-  // TransitionArray::CopyInsert() will need to deal with the case that a
-  // transition array disappeared during GC.
-  int trim = t->number_of_transitions() - transition_index;
-  if (trim > 0) {
-    RightTrimFixedArray<Heap::FROM_GC>(heap, t, t->IsSimpleTransition()
-        ? trim : trim * TransitionArray::kTransitionSize);
-  }
-  ASSERT(HasTransitionArray());
-}
-
-
 int Map::Hash() {
   // For performance reasons we only hash the 3 most variable fields of a map:
   // constructor, prototype and bit_field2.
@@ -9688,15 +9043,23 @@
 
 
 void ConstantPoolArray::ConstantPoolIterateBody(ObjectVisitor* v) {
-  ConstantPoolArray::Iterator code_iter(this, ConstantPoolArray::CODE_PTR);
-  while (!code_iter.is_finished()) {
-    v->VisitCodeEntry(reinterpret_cast<Address>(
-        RawFieldOfElementAt(code_iter.next_index())));
-  }
+  // Unfortunately the serializer relies on pointers within an object being
+  // visited in-order, so we have to iterate both the code and heap pointers in
+  // the small section before doing so in the extended section.
+  for (int s = 0; s <= final_section(); ++s) {
+    LayoutSection section = static_cast<LayoutSection>(s);
+    ConstantPoolArray::Iterator code_iter(this, ConstantPoolArray::CODE_PTR,
+                                          section);
+    while (!code_iter.is_finished()) {
+      v->VisitCodeEntry(reinterpret_cast<Address>(
+          RawFieldOfElementAt(code_iter.next_index())));
+    }
 
-  ConstantPoolArray::Iterator heap_iter(this, ConstantPoolArray::HEAP_PTR);
-  while (!heap_iter.is_finished()) {
-    v->VisitPointer(RawFieldOfElementAt(heap_iter.next_index()));
+    ConstantPoolArray::Iterator heap_iter(this, ConstantPoolArray::HEAP_PTR,
+                                          section);
+    while (!heap_iter.is_finished()) {
+      v->VisitPointer(RawFieldOfElementAt(heap_iter.next_index()));
+    }
   }
 }
 
@@ -9732,11 +9095,10 @@
 
 
 void JSFunction::MarkForOptimization() {
-  ASSERT(is_compiled() || GetIsolate()->DebuggerHasBreakPoints());
-  ASSERT(!IsOptimized());
-  ASSERT(shared()->allows_lazy_compilation() ||
+  DCHECK(!IsOptimized());
+  DCHECK(shared()->allows_lazy_compilation() ||
          code()->optimizable());
-  ASSERT(!shared()->is_generator());
+  DCHECK(!shared()->is_generator());
   set_code_no_write_barrier(
       GetIsolate()->builtins()->builtin(Builtins::kCompileOptimized));
   // No write barrier required, since the builtin is part of the root set.
@@ -9744,14 +9106,14 @@
 
 
 void JSFunction::MarkForConcurrentOptimization() {
-  ASSERT(is_compiled() || GetIsolate()->DebuggerHasBreakPoints());
-  ASSERT(!IsOptimized());
-  ASSERT(shared()->allows_lazy_compilation() || code()->optimizable());
-  ASSERT(!shared()->is_generator());
-  ASSERT(GetIsolate()->concurrent_recompilation_enabled());
+  DCHECK(is_compiled() || GetIsolate()->DebuggerHasBreakPoints());
+  DCHECK(!IsOptimized());
+  DCHECK(shared()->allows_lazy_compilation() || code()->optimizable());
+  DCHECK(!shared()->is_generator());
+  DCHECK(GetIsolate()->concurrent_recompilation_enabled());
   if (FLAG_trace_concurrent_recompilation) {
     PrintF("  ** Marking ");
-    PrintName();
+    ShortPrint();
     PrintF(" for concurrent recompilation.\n");
   }
   set_code_no_write_barrier(
@@ -9763,13 +9125,13 @@
 void JSFunction::MarkInOptimizationQueue() {
   // We can only arrive here via the concurrent-recompilation builtin.  If
   // break points were set, the code would point to the lazy-compile builtin.
-  ASSERT(!GetIsolate()->DebuggerHasBreakPoints());
-  ASSERT(IsMarkedForConcurrentOptimization() && !IsOptimized());
-  ASSERT(shared()->allows_lazy_compilation() || code()->optimizable());
-  ASSERT(GetIsolate()->concurrent_recompilation_enabled());
+  DCHECK(!GetIsolate()->DebuggerHasBreakPoints());
+  DCHECK(IsMarkedForConcurrentOptimization() && !IsOptimized());
+  DCHECK(shared()->allows_lazy_compilation() || code()->optimizable());
+  DCHECK(GetIsolate()->concurrent_recompilation_enabled());
   if (FLAG_trace_concurrent_recompilation) {
     PrintF("  ** Queueing ");
-    PrintName();
+    ShortPrint();
     PrintF(" for concurrent recompilation.\n");
   }
   set_code_no_write_barrier(
@@ -9778,6 +9140,30 @@
 }
 
 
+Handle<JSFunction> JSFunction::CloneClosure(Handle<JSFunction> function) {
+  Isolate* isolate = function->GetIsolate();
+  Handle<Map> map(function->map());
+  Handle<SharedFunctionInfo> shared(function->shared());
+  Handle<Context> context(function->context());
+  Handle<JSFunction> clone =
+      isolate->factory()->NewFunctionFromSharedFunctionInfo(shared, context);
+
+  if (shared->bound()) {
+    clone->set_function_bindings(function->function_bindings());
+  }
+
+  // In typical case, __proto__ of ``function`` is the default Function
+  // prototype, which means that SetPrototype below is a no-op.
+  // In rare cases when that is not true, we mutate the clone's __proto__.
+  Handle<Object> original_prototype(map->prototype(), isolate);
+  if (*original_prototype != clone->map()->prototype()) {
+    JSObject::SetPrototype(clone, original_prototype, false).Assert();
+  }
+
+  return clone;
+}
+
+
 void SharedFunctionInfo::AddToOptimizedCodeMap(
     Handle<SharedFunctionInfo> shared,
     Handle<Context> native_context,
@@ -9785,22 +9171,22 @@
     Handle<FixedArray> literals,
     BailoutId osr_ast_id) {
   Isolate* isolate = shared->GetIsolate();
-  ASSERT(code->kind() == Code::OPTIMIZED_FUNCTION);
-  ASSERT(native_context->IsNativeContext());
+  DCHECK(code->kind() == Code::OPTIMIZED_FUNCTION);
+  DCHECK(native_context->IsNativeContext());
   STATIC_ASSERT(kEntryLength == 4);
   Handle<FixedArray> new_code_map;
   Handle<Object> value(shared->optimized_code_map(), isolate);
   int old_length;
   if (value->IsSmi()) {
     // No optimized code map.
-    ASSERT_EQ(0, Smi::cast(*value)->value());
+    DCHECK_EQ(0, Smi::cast(*value)->value());
     // Create 3 entries per context {context, code, literals}.
     new_code_map = isolate->factory()->NewFixedArray(kInitialLength);
     old_length = kEntriesStart;
   } else {
     // Copy old map and append one new entry.
     Handle<FixedArray> old_code_map = Handle<FixedArray>::cast(value);
-    ASSERT_EQ(-1, shared->SearchOptimizedCodeMap(*native_context, osr_ast_id));
+    DCHECK_EQ(-1, shared->SearchOptimizedCodeMap(*native_context, osr_ast_id));
     old_length = old_code_map->length();
     new_code_map = FixedArray::CopySize(
         old_code_map, old_length + kEntryLength);
@@ -9818,12 +9204,12 @@
 
 #ifdef DEBUG
   for (int i = kEntriesStart; i < new_code_map->length(); i += kEntryLength) {
-    ASSERT(new_code_map->get(i + kContextOffset)->IsNativeContext());
-    ASSERT(new_code_map->get(i + kCachedCodeOffset)->IsCode());
-    ASSERT(Code::cast(new_code_map->get(i + kCachedCodeOffset))->kind() ==
+    DCHECK(new_code_map->get(i + kContextOffset)->IsNativeContext());
+    DCHECK(new_code_map->get(i + kCachedCodeOffset)->IsCode());
+    DCHECK(Code::cast(new_code_map->get(i + kCachedCodeOffset))->kind() ==
            Code::OPTIMIZED_FUNCTION);
-    ASSERT(new_code_map->get(i + kLiteralsOffset)->IsFixedArray());
-    ASSERT(new_code_map->get(i + kOsrAstIdOffset)->IsSmi());
+    DCHECK(new_code_map->get(i + kLiteralsOffset)->IsFixedArray());
+    DCHECK(new_code_map->get(i + kOsrAstIdOffset)->IsSmi());
   }
 #endif
   shared->set_optimized_code_map(*new_code_map);
@@ -9831,11 +9217,11 @@
 
 
 FixedArray* SharedFunctionInfo::GetLiteralsFromOptimizedCodeMap(int index) {
-  ASSERT(index > kEntriesStart);
+  DCHECK(index > kEntriesStart);
   FixedArray* code_map = FixedArray::cast(optimized_code_map());
   if (!bound()) {
     FixedArray* cached_literals = FixedArray::cast(code_map->get(index + 1));
-    ASSERT_NE(NULL, cached_literals);
+    DCHECK_NE(NULL, cached_literals);
     return cached_literals;
   }
   return NULL;
@@ -9843,10 +9229,10 @@
 
 
 Code* SharedFunctionInfo::GetCodeFromOptimizedCodeMap(int index) {
-  ASSERT(index > kEntriesStart);
+  DCHECK(index > kEntriesStart);
   FixedArray* code_map = FixedArray::cast(optimized_code_map());
   Code* code = Code::cast(code_map->get(index));
-  ASSERT_NE(NULL, code);
+  DCHECK_NE(NULL, code);
   return code;
 }
 
@@ -9861,7 +9247,7 @@
     flusher->EvictOptimizedCodeMap(this);
   }
 
-  ASSERT(code_map->get(kNextMapIndex)->IsUndefined());
+  DCHECK(code_map->get(kNextMapIndex)->IsUndefined());
   set_optimized_code_map(Smi::FromInt(0));
 }
 
@@ -9875,7 +9261,7 @@
   int dst = kEntriesStart;
   int length = code_map->length();
   for (int src = kEntriesStart; src < length; src += kEntryLength) {
-    ASSERT(code_map->get(src)->IsNativeContext());
+    DCHECK(code_map->get(src)->IsNativeContext());
     if (Code::cast(code_map->get(src + kCachedCodeOffset)) == optimized_code) {
       // Evict the src entry by not copying it to the dst entry.
       if (FLAG_trace_opt) {
@@ -9905,7 +9291,7 @@
   }
   if (dst != length) {
     // Always trim even when array is cleared because of heap verifier.
-    RightTrimFixedArray<Heap::FROM_MUTATOR>(GetHeap(), code_map, length - dst);
+    GetHeap()->RightTrimFixedArray<Heap::FROM_MUTATOR>(code_map, length - dst);
     if (code_map->length() == kEntriesStart) ClearOptimizedCodeMap();
   }
 }
@@ -9913,24 +9299,39 @@
 
 void SharedFunctionInfo::TrimOptimizedCodeMap(int shrink_by) {
   FixedArray* code_map = FixedArray::cast(optimized_code_map());
-  ASSERT(shrink_by % kEntryLength == 0);
-  ASSERT(shrink_by <= code_map->length() - kEntriesStart);
+  DCHECK(shrink_by % kEntryLength == 0);
+  DCHECK(shrink_by <= code_map->length() - kEntriesStart);
   // Always trim even when array is cleared because of heap verifier.
-  RightTrimFixedArray<Heap::FROM_GC>(GetHeap(), code_map, shrink_by);
+  GetHeap()->RightTrimFixedArray<Heap::FROM_GC>(code_map, shrink_by);
   if (code_map->length() == kEntriesStart) {
     ClearOptimizedCodeMap();
   }
 }
 
 
-void JSObject::OptimizeAsPrototype(Handle<JSObject> object) {
+void JSObject::OptimizeAsPrototype(Handle<JSObject> object,
+                                   PrototypeOptimizationMode mode) {
   if (object->IsGlobalObject()) return;
-
-  // Make sure prototypes are fast objects and their maps have the bit set
-  // so they remain fast.
-  if (!object->HasFastProperties()) {
-    TransformToFastProperties(object, 0);
+  if (object->IsJSGlobalProxy()) return;
+  if (mode == FAST_PROTOTYPE && !object->map()->is_prototype_map()) {
+    // First normalize to ensure all JSFunctions are CONSTANT.
+    JSObject::NormalizeProperties(object, KEEP_INOBJECT_PROPERTIES, 0);
   }
+  if (!object->HasFastProperties()) {
+    JSObject::MigrateSlowToFast(object, 0);
+  }
+  if (mode == FAST_PROTOTYPE && object->HasFastProperties() &&
+      !object->map()->is_prototype_map()) {
+    Handle<Map> new_map = Map::Copy(handle(object->map()));
+    JSObject::MigrateToMap(object, new_map);
+    object->map()->set_is_prototype_map(true);
+  }
+}
+
+
+void JSObject::ReoptimizeIfPrototype(Handle<JSObject> object) {
+  if (!object->map()->is_prototype_map()) return;
+  OptimizeAsPrototype(object, FAST_PROTOTYPE);
 }
 
 
@@ -9944,7 +9345,7 @@
 
   Handle<Map> current_map = initial_map;
   ElementsKind kind = current_map->elements_kind();
-  ASSERT(kind == GetInitialFastElementsKind());
+  DCHECK(kind == GetInitialFastElementsKind());
   maps->set(kind, *current_map);
   for (int i = GetSequenceIndexFromFastElementsKind(kind) + 1;
        i < kFastElementsKindCount; ++i) {
@@ -9952,7 +9353,7 @@
     ElementsKind next_kind = GetFastElementsKindFromSequenceIndex(i);
     if (current_map->HasElementsTransition()) {
       new_map = handle(current_map->elements_transition_map());
-      ASSERT(new_map->elements_kind() == next_kind);
+      DCHECK(new_map->elements_kind() == next_kind);
     } else {
       new_map = Map::CopyAsElementsKind(
           current_map, next_kind, INSERT_TRANSITION);
@@ -9969,13 +9370,7 @@
                                       Handle<Object> value) {
   Isolate* isolate = function->GetIsolate();
 
-  ASSERT(value->IsJSReceiver());
-
-  // First some logic for the map of the prototype to make sure it is in fast
-  // mode.
-  if (value->IsJSObject()) {
-    JSObject::OptimizeAsPrototype(Handle<JSObject>::cast(value));
-  }
+  DCHECK(value->IsJSReceiver());
 
   // Now some logic for the maps of the objects that are created by using this
   // function as a constructor.
@@ -9987,21 +9382,30 @@
     if (function->IsInobjectSlackTrackingInProgress()) {
       function->CompleteInobjectSlackTracking();
     }
+
     Handle<Map> initial_map(function->initial_map(), isolate);
-    Handle<Map> new_map = Map::Copy(initial_map);
-    new_map->set_prototype(*value);
 
-    // If the function is used as the global Array function, cache the
-    // initial map (and transitioned versions) in the native context.
-    Context* native_context = function->context()->native_context();
-    Object* array_function = native_context->get(Context::ARRAY_FUNCTION_INDEX);
-    if (array_function->IsJSFunction() &&
-        *function == JSFunction::cast(array_function)) {
-      CacheInitialJSArrayMaps(handle(native_context, isolate), new_map);
+    if (!initial_map->GetIsolate()->bootstrapper()->IsActive() &&
+        initial_map->instance_type() == JS_OBJECT_TYPE) {
+      // Put the value in the initial map field until an initial map is needed.
+      // At that point, a new initial map is created and the prototype is put
+      // into the initial map where it belongs.
+      function->set_prototype_or_initial_map(*value);
+    } else {
+      Handle<Map> new_map = Map::Copy(initial_map);
+      JSFunction::SetInitialMap(function, new_map, value);
+
+      // If the function is used as the global Array function, cache the
+      // initial map (and transitioned versions) in the native context.
+      Context* native_context = function->context()->native_context();
+      Object* array_function =
+          native_context->get(Context::ARRAY_FUNCTION_INDEX);
+      if (array_function->IsJSFunction() &&
+          *function == JSFunction::cast(array_function)) {
+        CacheInitialJSArrayMaps(handle(native_context, isolate), new_map);
+      }
     }
 
-    function->set_initial_map(*new_map);
-
     // Deoptimize all code that embeds the previous initial map.
     initial_map->dependent_code()->DeoptimizeDependentCodeGroup(
         isolate, DependentCode::kInitialMapChangedGroup);
@@ -10017,7 +9421,7 @@
 
 void JSFunction::SetPrototype(Handle<JSFunction> function,
                               Handle<Object> value) {
-  ASSERT(function->should_have_prototype());
+  DCHECK(function->should_have_prototype());
   Handle<Object> construct_prototype = value;
 
   // If the value is not a JSReceiver, store the value in the map's
@@ -10067,6 +9471,18 @@
 }
 
 
+void JSFunction::SetInitialMap(Handle<JSFunction> function, Handle<Map> map,
+                               Handle<Object> prototype) {
+  if (prototype->IsJSObject()) {
+    Handle<JSObject> js_proto = Handle<JSObject>::cast(prototype);
+    JSObject::OptimizeAsPrototype(js_proto, FAST_PROTOTYPE);
+  }
+  map->set_prototype(*prototype);
+  function->set_prototype_or_initial_map(*map);
+  map->set_constructor(*function);
+}
+
+
 void JSFunction::EnsureHasInitialMap(Handle<JSFunction> function) {
   if (function->has_initial_map()) return;
   Isolate* isolate = function->GetIsolate();
@@ -10091,21 +9507,15 @@
   Handle<Object> prototype;
   if (function->has_instance_prototype()) {
     prototype = handle(function->instance_prototype(), isolate);
-    for (Handle<Object> p = prototype; !p->IsNull() && !p->IsJSProxy();
-         p = Object::GetPrototype(isolate, p)) {
-      JSObject::OptimizeAsPrototype(Handle<JSObject>::cast(p));
-    }
   } else {
     prototype = isolate->factory()->NewFunctionPrototype(function);
   }
   map->set_inobject_properties(in_object_properties);
   map->set_unused_property_fields(in_object_properties);
-  map->set_prototype(*prototype);
-  ASSERT(map->has_fast_object_elements());
+  DCHECK(map->has_fast_object_elements());
 
   // Finally link initial map and constructor function.
-  function->set_initial_map(*map);
-  map->set_constructor(*function);
+  JSFunction::SetInitialMap(function, map, Handle<JSReceiver>::cast(prototype));
 
   if (!function->shared()->is_generator()) {
     function->StartInobjectSlackTracking();
@@ -10136,6 +9546,7 @@
 //   ""       only the top-level function
 //   "name"   only the function "name"
 //   "name*"  only functions starting with "name"
+//   "~"      none; the tilde is not an identifier
 bool JSFunction::PassesFilter(const char* raw_filter) {
   if (*raw_filter == '*') return true;
   String* name = shared()->DebugName();
@@ -10184,10 +9595,10 @@
   Isolate* isolate = script->GetIsolate();
 
   if (!script->source()->IsString()) {
-    ASSERT(script->source()->IsUndefined());
+    DCHECK(script->source()->IsUndefined());
     Handle<FixedArray> empty = isolate->factory()->NewFixedArray(0);
     script->set_line_ends(*empty);
-    ASSERT(script->line_ends()->IsFixedArray());
+    DCHECK(script->line_ends()->IsFixedArray());
     return;
   }
 
@@ -10200,7 +9611,7 @@
   }
 
   script->set_line_ends(*array);
-  ASSERT(script->line_ends()->IsFixedArray());
+  DCHECK(script->line_ends()->IsFixedArray());
 }
 
 
@@ -10220,7 +9631,7 @@
 
 int Script::GetLineNumberWithArray(int code_pos) {
   DisallowHeapAllocation no_allocation;
-  ASSERT(line_ends()->IsFixedArray());
+  DCHECK(line_ends()->IsFixedArray());
   FixedArray* line_ends_array = FixedArray::cast(line_ends());
   int line_ends_len = line_ends_array->length();
   if (line_ends_len == 0) return -1;
@@ -10270,11 +9681,11 @@
   Isolate* isolate = script->GetIsolate();
   Handle<String> name_or_source_url_key =
       isolate->factory()->InternalizeOneByteString(
-          STATIC_ASCII_VECTOR("nameOrSourceURL"));
+          STATIC_CHAR_VECTOR("nameOrSourceURL"));
   Handle<JSObject> script_wrapper = Script::GetWrapper(script);
   Handle<Object> property = Object::GetProperty(
       script_wrapper, name_or_source_url_key).ToHandleChecked();
-  ASSERT(property->IsJSFunction());
+  DCHECK(property->IsJSFunction());
   Handle<JSFunction> method = Handle<JSFunction>::cast(property);
   Handle<Object> result;
   // Do not check against pending exception, since this function may be called
@@ -10292,16 +9703,21 @@
 // collector will call the weak callback on the global handle
 // associated with the wrapper and get rid of both the wrapper and the
 // handle.
-static void ClearWrapperCache(
+static void ClearWrapperCacheWeakCallback(
     const v8::WeakCallbackData<v8::Value, void>& data) {
   Object** location = reinterpret_cast<Object**>(data.GetParameter());
   JSValue* wrapper = JSValue::cast(*location);
-  Foreign* foreign = Script::cast(wrapper->value())->wrapper();
-  ASSERT_EQ(foreign->foreign_address(), reinterpret_cast<Address>(location));
+  Script::cast(wrapper->value())->ClearWrapperCache();
+}
+
+
+void Script::ClearWrapperCache() {
+  Foreign* foreign = wrapper();
+  Object** location = reinterpret_cast<Object**>(foreign->foreign_address());
+  DCHECK_EQ(foreign->foreign_address(), reinterpret_cast<Address>(location));
   foreign->set_foreign_address(0);
   GlobalHandles::Destroy(location);
-  Isolate* isolate = reinterpret_cast<Isolate*>(data.GetIsolate());
-  isolate->counters()->script_wrappers()->Decrement();
+  GetIsolate()->counters()->script_wrappers()->Decrement();
 }
 
 
@@ -10326,7 +9742,7 @@
   Handle<Object> handle = isolate->global_handles()->Create(*result);
   GlobalHandles::MakeWeak(handle.location(),
                           reinterpret_cast<void*>(handle.location()),
-                          &ClearWrapperCache);
+                          &ClearWrapperCacheWeakCallback);
   script->wrapper()->set_foreign_address(
       reinterpret_cast<Address>(handle.location()));
   return result;
@@ -10340,7 +9756,7 @@
 }
 
 
-bool SharedFunctionInfo::HasSourceCode() {
+bool SharedFunctionInfo::HasSourceCode() const {
   return !script()->IsUndefined() &&
          !reinterpret_cast<Script*>(script())->source()->IsUndefined();
 }
@@ -10385,43 +9801,36 @@
 }
 
 
-// Support function for printing the source code to a StringStream
-// without any allocation in the heap.
-void SharedFunctionInfo::SourceCodePrint(StringStream* accumulator,
-                                         int max_length) {
+// Output the source code without any allocation in the heap.
+OStream& operator<<(OStream& os, const SourceCodeOf& v) {
+  const SharedFunctionInfo* s = v.value;
   // For some native functions there is no source.
-  if (!HasSourceCode()) {
-    accumulator->Add("<No Source>");
-    return;
-  }
+  if (!s->HasSourceCode()) return os << "<No Source>";
 
   // Get the source for the script which this function came from.
   // Don't use String::cast because we don't want more assertion errors while
   // we are already creating a stack dump.
   String* script_source =
-      reinterpret_cast<String*>(Script::cast(script())->source());
+      reinterpret_cast<String*>(Script::cast(s->script())->source());
 
-  if (!script_source->LooksValid()) {
-    accumulator->Add("<Invalid Source>");
-    return;
-  }
+  if (!script_source->LooksValid()) return os << "<Invalid Source>";
 
-  if (!is_toplevel()) {
-    accumulator->Add("function ");
-    Object* name = this->name();
+  if (!s->is_toplevel()) {
+    os << "function ";
+    Object* name = s->name();
     if (name->IsString() && String::cast(name)->length() > 0) {
-      accumulator->PrintName(name);
+      String::cast(name)->PrintUC16(os);
     }
   }
 
-  int len = end_position() - start_position();
-  if (len <= max_length || max_length < 0) {
-    accumulator->Put(script_source, start_position(), end_position());
+  int len = s->end_position() - s->start_position();
+  if (len <= v.max_length || v.max_length < 0) {
+    script_source->PrintUC16(os, s->start_position(), s->end_position());
+    return os;
   } else {
-    accumulator->Put(script_source,
-                     start_position(),
-                     start_position() + max_length);
-    accumulator->Add("...\n");
+    script_source->PrintUC16(os, s->start_position(),
+                             s->start_position() + v.max_length);
+    return os << "...\n";
   }
 }
 
@@ -10440,7 +9849,7 @@
 
 
 void SharedFunctionInfo::EnableDeoptimizationSupport(Code* recompiled) {
-  ASSERT(!has_deoptimization_support());
+  DCHECK(!has_deoptimization_support());
   DisallowHeapAllocation no_allocation;
   Code* code = this->code();
   if (IsCodeEquivalent(code, recompiled)) {
@@ -10454,7 +9863,7 @@
     // effectively resetting all IC state.
     ReplaceCode(recompiled);
   }
-  ASSERT(has_deoptimization_support());
+  DCHECK(has_deoptimization_support());
 }
 
 
@@ -10470,7 +9879,7 @@
   set_bailout_reason(reason);
   // Code should be the lazy compilation stub or else unoptimized.  If the
   // latter, disable optimization for the code too.
-  ASSERT(code()->kind() == Code::FUNCTION || code()->kind() == Code::BUILTIN);
+  DCHECK(code()->kind() == Code::FUNCTION || code()->kind() == Code::BUILTIN);
   if (code()->kind() == Code::FUNCTION) {
     code()->set_optimizable(false);
   }
@@ -10484,18 +9893,18 @@
 
 
 bool SharedFunctionInfo::VerifyBailoutId(BailoutId id) {
-  ASSERT(!id.IsNone());
+  DCHECK(!id.IsNone());
   Code* unoptimized = code();
   DeoptimizationOutputData* data =
       DeoptimizationOutputData::cast(unoptimized->deoptimization_data());
   unsigned ignore = Deoptimizer::GetOutputInfo(data, id, this);
   USE(ignore);
-  return true;  // Return true if there was no ASSERT.
+  return true;  // Return true if there was no DCHECK.
 }
 
 
 void JSFunction::StartInobjectSlackTracking() {
-  ASSERT(has_initial_map() && !IsInobjectSlackTrackingInProgress());
+  DCHECK(has_initial_map() && !IsInobjectSlackTrackingInProgress());
 
   if (!FLAG_clever_optimizations) return;
   Map* map = initial_map();
@@ -10554,10 +9963,10 @@
 
 
 void JSFunction::CompleteInobjectSlackTracking() {
-  ASSERT(has_initial_map());
+  DCHECK(has_initial_map());
   Map* map = initial_map();
 
-  ASSERT(map->done_inobject_slack_tracking());
+  DCHECK(map->done_inobject_slack_tracking());
   map->set_construction_count(kNoSlackTracking);
 
   int slack = map->unused_property_fields();
@@ -10572,7 +9981,7 @@
 int SharedFunctionInfo::SearchOptimizedCodeMap(Context* native_context,
                                                BailoutId osr_ast_id) {
   DisallowHeapAllocation no_gc;
-  ASSERT(native_context->IsNativeContext());
+  DCHECK(native_context->IsNativeContext());
   if (!FLAG_cache_optimized_code) return -1;
   Object* value = optimized_code_map();
   if (!value->IsSmi()) {
@@ -10612,7 +10021,7 @@
 
 
 void ObjectVisitor::VisitCodeTarget(RelocInfo* rinfo) {
-  ASSERT(RelocInfo::IsCodeTarget(rinfo->rmode()));
+  DCHECK(RelocInfo::IsCodeTarget(rinfo->rmode()));
   Object* target = Code::GetCodeFromTargetAddress(rinfo->target_address());
   Object* old_target = target;
   VisitPointer(&target);
@@ -10621,7 +10030,7 @@
 
 
 void ObjectVisitor::VisitCodeAgeSequence(RelocInfo* rinfo) {
-  ASSERT(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
+  DCHECK(RelocInfo::IsCodeAgeSequence(rinfo->rmode()));
   Object* stub = rinfo->code_age_stub();
   if (stub) {
     VisitPointer(&stub);
@@ -10640,7 +10049,7 @@
 
 
 void ObjectVisitor::VisitCell(RelocInfo* rinfo) {
-  ASSERT(rinfo->rmode() == RelocInfo::CELL);
+  DCHECK(rinfo->rmode() == RelocInfo::CELL);
   Object* cell = rinfo->target_cell();
   Object* old_cell = cell;
   VisitPointer(&cell);
@@ -10651,7 +10060,7 @@
 
 
 void ObjectVisitor::VisitDebugTarget(RelocInfo* rinfo) {
-  ASSERT((RelocInfo::IsJSReturn(rinfo->rmode()) &&
+  DCHECK((RelocInfo::IsJSReturn(rinfo->rmode()) &&
           rinfo->IsPatchedReturnSequence()) ||
          (RelocInfo::IsDebugBreakSlot(rinfo->rmode()) &&
           rinfo->IsPatchedDebugBreakSlotSequence()));
@@ -10663,7 +10072,7 @@
 
 
 void ObjectVisitor::VisitEmbeddedPointer(RelocInfo* rinfo) {
-  ASSERT(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
+  DCHECK(rinfo->rmode() == RelocInfo::EMBEDDED_OBJECT);
   Object* p = rinfo->target_object();
   VisitPointer(&p);
 }
@@ -10676,6 +10085,7 @@
 
 
 void Code::InvalidateRelocation() {
+  InvalidateEmbeddedObjects();
   set_relocation_info(GetHeap()->empty_byte_array());
 }
 
@@ -10700,12 +10110,12 @@
   for (RelocIterator it(this, RelocInfo::kApplyMask); !it.done(); it.next()) {
     it.rinfo()->apply(delta, SKIP_ICACHE_FLUSH);
   }
-  CPU::FlushICache(instruction_start(), instruction_size());
+  CpuFeatures::FlushICache(instruction_start(), instruction_size());
 }
 
 
 void Code::CopyFrom(const CodeDesc& desc) {
-  ASSERT(Marking::Color(this) == Marking::WHITE_OBJECT);
+  DCHECK(Marking::Color(this) == Marking::WHITE_OBJECT);
 
   // copy code
   CopyBytes(instruction_start(), desc.buffer,
@@ -10754,7 +10164,7 @@
       it.rinfo()->apply(delta, SKIP_ICACHE_FLUSH);
     }
   }
-  CPU::FlushICache(instruction_start(), instruction_size());
+  CpuFeatures::FlushICache(instruction_start(), instruction_size());
 }
 
 
@@ -10821,7 +10231,7 @@
 
 
 Object* Code::FindNthObject(int n, Map* match_map) {
-  ASSERT(is_inline_cache_stub());
+  DCHECK(is_inline_cache_stub());
   DisallowHeapAllocation no_allocation;
   int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
   for (RelocIterator it(this, mask); !it.done(); it.next()) {
@@ -10850,7 +10260,7 @@
 
 
 void Code::FindAndReplace(const FindAndReplacePattern& pattern) {
-  ASSERT(is_inline_cache_stub() || is_handler());
+  DCHECK(is_inline_cache_stub() || is_handler());
   DisallowHeapAllocation no_allocation;
   int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
   STATIC_ASSERT(FindAndReplacePattern::kMaxCount < 32);
@@ -10871,7 +10281,7 @@
 
 
 void Code::FindAllMaps(MapHandleList* maps) {
-  ASSERT(is_inline_cache_stub());
+  DCHECK(is_inline_cache_stub());
   DisallowHeapAllocation no_allocation;
   int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
   for (RelocIterator it(this, mask); !it.done(); it.next()) {
@@ -10883,7 +10293,7 @@
 
 
 Code* Code::FindFirstHandler() {
-  ASSERT(is_inline_cache_stub());
+  DCHECK(is_inline_cache_stub());
   DisallowHeapAllocation no_allocation;
   int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET);
   for (RelocIterator it(this, mask); !it.done(); it.next()) {
@@ -10896,7 +10306,7 @@
 
 
 bool Code::FindHandlers(CodeHandleList* code_list, int length) {
-  ASSERT(is_inline_cache_stub());
+  DCHECK(is_inline_cache_stub());
   DisallowHeapAllocation no_allocation;
   int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET);
   int i = 0;
@@ -10914,8 +10324,28 @@
 }
 
 
+MaybeHandle<Code> Code::FindHandlerForMap(Map* map) {
+  DCHECK(is_inline_cache_stub());
+  int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
+             RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
+  bool return_next = false;
+  for (RelocIterator it(this, mask); !it.done(); it.next()) {
+    RelocInfo* info = it.rinfo();
+    if (info->rmode() == RelocInfo::EMBEDDED_OBJECT) {
+      Object* object = info->target_object();
+      if (object == map) return_next = true;
+    } else if (return_next) {
+      Code* code = Code::GetCodeFromTargetAddress(info->target_address());
+      DCHECK(code->kind() == Code::HANDLER);
+      return handle(code);
+    }
+  }
+  return MaybeHandle<Code>();
+}
+
+
 Name* Code::FindFirstName() {
-  ASSERT(is_inline_cache_stub());
+  DCHECK(is_inline_cache_stub());
   DisallowHeapAllocation no_allocation;
   int mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
   for (RelocIterator it(this, mask); !it.done(); it.next()) {
@@ -10955,7 +10385,7 @@
 
 
 void SharedFunctionInfo::ClearTypeFeedbackInfo() {
-  FixedArray* vector = feedback_vector();
+  TypeFeedbackVector* vector = feedback_vector();
   Heap* heap = GetHeap();
   int length = vector->length();
 
@@ -10971,7 +10401,7 @@
           break;
           // Fall through...
         default:
-          vector->set(i, TypeFeedbackInfo::RawUninitializedSentinel(heap),
+          vector->set(i, TypeFeedbackVector::RawUninitializedSentinel(heap),
                       SKIP_WRITE_BARRIER);
       }
     }
@@ -10981,7 +10411,7 @@
 
 BailoutId Code::TranslatePcOffsetToAstId(uint32_t pc_offset) {
   DisallowHeapAllocation no_gc;
-  ASSERT(kind() == FUNCTION);
+  DCHECK(kind() == FUNCTION);
   BackEdgeTable back_edges(this, &no_gc);
   for (uint32_t i = 0; i < back_edges.length(); i++) {
     if (back_edges.pc_offset(i) == pc_offset) return back_edges.ast_id(i);
@@ -10992,7 +10422,7 @@
 
 uint32_t Code::TranslateAstIdToPcOffset(BailoutId ast_id) {
   DisallowHeapAllocation no_gc;
-  ASSERT(kind() == FUNCTION);
+  DCHECK(kind() == FUNCTION);
   BackEdgeTable back_edges(this, &no_gc);
   for (uint32_t i = 0; i < back_edges.length(); i++) {
     if (back_edges.ast_id(i) == ast_id) return back_edges.pc_offset(i);
@@ -11124,11 +10554,11 @@
     CODE_AGE_LIST(HANDLE_CODE_AGE)
 #undef HANDLE_CODE_AGE
     case kNotExecutedCodeAge: {
-      ASSERT(parity == NO_MARKING_PARITY);
+      DCHECK(parity == NO_MARKING_PARITY);
       return *builtins->MarkCodeAsExecutedOnce();
     }
     case kExecutedOnceCodeAge: {
-      ASSERT(parity == NO_MARKING_PARITY);
+      DCHECK(parity == NO_MARKING_PARITY);
       return *builtins->MarkCodeAsExecutedTwice();
     }
     default:
@@ -11151,7 +10581,9 @@
       if ((bailout_id == Deoptimizer::GetDeoptimizationId(
               GetIsolate(), info->target_address(), Deoptimizer::EAGER)) ||
           (bailout_id == Deoptimizer::GetDeoptimizationId(
-              GetIsolate(), info->target_address(), Deoptimizer::SOFT))) {
+              GetIsolate(), info->target_address(), Deoptimizer::SOFT)) ||
+          (bailout_id == Deoptimizer::GetDeoptimizationId(
+              GetIsolate(), info->target_address(), Deoptimizer::LAZY))) {
         CHECK(RelocInfo::IsRuntimeEntry(info->rmode()));
         PrintF(out, "            %s\n", last_comment);
         return;
@@ -11189,23 +10621,25 @@
 
 #ifdef ENABLE_DISASSEMBLER
 
-void DeoptimizationInputData::DeoptimizationInputDataPrint(FILE* out) {
+void DeoptimizationInputData::DeoptimizationInputDataPrint(
+    OStream& os) {  // NOLINT
   disasm::NameConverter converter;
   int deopt_count = DeoptCount();
-  PrintF(out, "Deoptimization Input Data (deopt points = %d)\n", deopt_count);
-  if (0 == deopt_count) return;
-
-  PrintF(out, "%6s  %6s  %6s %6s %12s\n", "index", "ast id", "argc", "pc",
-         FLAG_print_code_verbose ? "commands" : "");
+  os << "Deoptimization Input Data (deopt points = " << deopt_count << ")\n";
+  if (0 != deopt_count) {
+    os << " index  ast id    argc     pc";
+    if (FLAG_print_code_verbose) os << "  commands";
+    os << "\n";
+  }
   for (int i = 0; i < deopt_count; i++) {
-    PrintF(out, "%6d  %6d  %6d %6d",
-           i,
-           AstId(i).ToInt(),
-           ArgumentsStackHeight(i)->value(),
-           Pc(i)->value());
+    // TODO(svenpanne) Add some basic formatting to our streams.
+    Vector<char> buf1 = Vector<char>::New(128);
+    SNPrintF(buf1, "%6d  %6d  %6d %6d", i, AstId(i).ToInt(),
+             ArgumentsStackHeight(i)->value(), Pc(i)->value());
+    os << buf1.start();
 
     if (!FLAG_print_code_verbose) {
-      PrintF(out, "\n");
+      os << "\n";
       continue;
     }
     // Print details of the frame translation.
@@ -11213,18 +10647,19 @@
     TranslationIterator iterator(TranslationByteArray(), translation_index);
     Translation::Opcode opcode =
         static_cast<Translation::Opcode>(iterator.Next());
-    ASSERT(Translation::BEGIN == opcode);
+    DCHECK(Translation::BEGIN == opcode);
     int frame_count = iterator.Next();
     int jsframe_count = iterator.Next();
-    PrintF(out, "  %s {frame count=%d, js frame count=%d}\n",
-           Translation::StringFor(opcode),
-           frame_count,
-           jsframe_count);
+    os << "  " << Translation::StringFor(opcode)
+       << " {frame count=" << frame_count
+       << ", js frame count=" << jsframe_count << "}\n";
 
     while (iterator.HasNext() &&
            Translation::BEGIN !=
            (opcode = static_cast<Translation::Opcode>(iterator.Next()))) {
-      PrintF(out, "%24s    %s ", "", Translation::StringFor(opcode));
+      Vector<char> buf2 = Vector<char>::New(128);
+      SNPrintF(buf2, "%27s    %s ", "", Translation::StringFor(opcode));
+      os << buf2.start();
 
       switch (opcode) {
         case Translation::BEGIN:
@@ -11235,20 +10670,20 @@
           int ast_id = iterator.Next();
           int function_id = iterator.Next();
           unsigned height = iterator.Next();
-          PrintF(out, "{ast_id=%d, function=", ast_id);
+          os << "{ast_id=" << ast_id << ", function=";
           if (function_id != Translation::kSelfLiteralId) {
             Object* function = LiteralArray()->get(function_id);
-            JSFunction::cast(function)->PrintName(out);
+            os << Brief(JSFunction::cast(function)->shared()->DebugName());
           } else {
-            PrintF(out, "<self>");
+            os << "<self>";
           }
-          PrintF(out, ", height=%u}", height);
+          os << ", height=" << height << "}";
           break;
         }
 
         case Translation::COMPILED_STUB_FRAME: {
           Code::Kind stub_kind = static_cast<Code::Kind>(iterator.Next());
-          PrintF(out, "{kind=%d}", stub_kind);
+          os << "{kind=" << stub_kind << "}";
           break;
         }
 
@@ -11258,9 +10693,8 @@
           JSFunction* function =
               JSFunction::cast(LiteralArray()->get(function_id));
           unsigned height = iterator.Next();
-          PrintF(out, "{function=");
-          function->PrintName(out);
-          PrintF(out, ", height=%u}", height);
+          os << "{function=" << Brief(function->shared()->DebugName())
+             << ", height=" << height << "}";
           break;
         }
 
@@ -11269,100 +10703,101 @@
           int function_id = iterator.Next();
           JSFunction* function =
               JSFunction::cast(LiteralArray()->get(function_id));
-          PrintF(out, "{function=");
-          function->PrintName(out);
-          PrintF(out, "}");
+          os << "{function=" << Brief(function->shared()->DebugName()) << "}";
           break;
         }
 
         case Translation::REGISTER: {
           int reg_code = iterator.Next();
-            PrintF(out, "{input=%s}", converter.NameOfCPURegister(reg_code));
+          os << "{input=" << converter.NameOfCPURegister(reg_code) << "}";
           break;
         }
 
         case Translation::INT32_REGISTER: {
           int reg_code = iterator.Next();
-          PrintF(out, "{input=%s}", converter.NameOfCPURegister(reg_code));
+          os << "{input=" << converter.NameOfCPURegister(reg_code) << "}";
           break;
         }
 
         case Translation::UINT32_REGISTER: {
           int reg_code = iterator.Next();
-          PrintF(out, "{input=%s (unsigned)}",
-                 converter.NameOfCPURegister(reg_code));
+          os << "{input=" << converter.NameOfCPURegister(reg_code)
+             << " (unsigned)}";
           break;
         }
 
         case Translation::DOUBLE_REGISTER: {
           int reg_code = iterator.Next();
-          PrintF(out, "{input=%s}",
-                 DoubleRegister::AllocationIndexToString(reg_code));
+          os << "{input=" << DoubleRegister::AllocationIndexToString(reg_code)
+             << "}";
           break;
         }
 
         case Translation::STACK_SLOT: {
           int input_slot_index = iterator.Next();
-          PrintF(out, "{input=%d}", input_slot_index);
+          os << "{input=" << input_slot_index << "}";
           break;
         }
 
         case Translation::INT32_STACK_SLOT: {
           int input_slot_index = iterator.Next();
-          PrintF(out, "{input=%d}", input_slot_index);
+          os << "{input=" << input_slot_index << "}";
           break;
         }
 
         case Translation::UINT32_STACK_SLOT: {
           int input_slot_index = iterator.Next();
-          PrintF(out, "{input=%d (unsigned)}", input_slot_index);
+          os << "{input=" << input_slot_index << " (unsigned)}";
           break;
         }
 
         case Translation::DOUBLE_STACK_SLOT: {
           int input_slot_index = iterator.Next();
-          PrintF(out, "{input=%d}", input_slot_index);
+          os << "{input=" << input_slot_index << "}";
           break;
         }
 
         case Translation::LITERAL: {
           unsigned literal_index = iterator.Next();
-          PrintF(out, "{literal_id=%u}", literal_index);
+          os << "{literal_id=" << literal_index << "}";
           break;
         }
 
         case Translation::DUPLICATED_OBJECT: {
           int object_index = iterator.Next();
-          PrintF(out, "{object_index=%d}", object_index);
+          os << "{object_index=" << object_index << "}";
           break;
         }
 
         case Translation::ARGUMENTS_OBJECT:
         case Translation::CAPTURED_OBJECT: {
           int args_length = iterator.Next();
-          PrintF(out, "{length=%d}", args_length);
+          os << "{length=" << args_length << "}";
           break;
         }
       }
-      PrintF(out, "\n");
+      os << "\n";
     }
   }
 }
 
 
-void DeoptimizationOutputData::DeoptimizationOutputDataPrint(FILE* out) {
-  PrintF(out, "Deoptimization Output Data (deopt points = %d)\n",
-         this->DeoptPoints());
+void DeoptimizationOutputData::DeoptimizationOutputDataPrint(
+    OStream& os) {  // NOLINT
+  os << "Deoptimization Output Data (deopt points = " << this->DeoptPoints()
+     << ")\n";
   if (this->DeoptPoints() == 0) return;
 
-  PrintF(out, "%6s  %8s  %s\n", "ast id", "pc", "state");
+  os << "ast id        pc  state\n";
   for (int i = 0; i < this->DeoptPoints(); i++) {
     int pc_and_state = this->PcAndState(i)->value();
-    PrintF(out, "%6d  %8d  %s\n",
-           this->AstId(i).ToInt(),
-           FullCodeGenerator::PcField::decode(pc_and_state),
-           FullCodeGenerator::State2String(
-               FullCodeGenerator::StateField::decode(pc_and_state)));
+    // TODO(svenpanne) Add some basic formatting to our streams.
+    Vector<char> buf = Vector<char>::New(100);
+    SNPrintF(buf, "%6d  %8d  %s\n", this->AstId(i).ToInt(),
+             FullCodeGenerator::PcField::decode(pc_and_state),
+             FullCodeGenerator::State2String(
+                 FullCodeGenerator::StateField::decode(pc_and_state)));
+    os << buf.start();
   }
 }
 
@@ -11372,11 +10807,14 @@
     case UNINITIALIZED: return "UNINITIALIZED";
     case PREMONOMORPHIC: return "PREMONOMORPHIC";
     case MONOMORPHIC: return "MONOMORPHIC";
-    case MONOMORPHIC_PROTOTYPE_FAILURE: return "MONOMORPHIC_PROTOTYPE_FAILURE";
+    case PROTOTYPE_FAILURE:
+      return "PROTOTYPE_FAILURE";
     case POLYMORPHIC: return "POLYMORPHIC";
     case MEGAMORPHIC: return "MEGAMORPHIC";
     case GENERIC: return "GENERIC";
     case DEBUG_STUB: return "DEBUG_STUB";
+    case DEFAULT:
+      return "DEFAULT";
   }
   UNREACHABLE();
   return NULL;
@@ -11393,92 +10831,90 @@
 }
 
 
-void Code::PrintExtraICState(FILE* out, Kind kind, ExtraICState extra) {
-  PrintF(out, "extra_ic_state = ");
-  const char* name = NULL;
-  switch (kind) {
-    case STORE_IC:
-    case KEYED_STORE_IC:
-      if (extra == STRICT) name = "STRICT";
-      break;
-    default:
-      break;
-  }
-  if (name != NULL) {
-    PrintF(out, "%s\n", name);
+void Code::PrintExtraICState(OStream& os,  // NOLINT
+                             Kind kind, ExtraICState extra) {
+  os << "extra_ic_state = ";
+  if ((kind == STORE_IC || kind == KEYED_STORE_IC) && (extra == STRICT)) {
+    os << "STRICT\n";
   } else {
-    PrintF(out, "%d\n", extra);
+    os << extra << "\n";
   }
 }
 
 
-void Code::Disassemble(const char* name, FILE* out) {
-  PrintF(out, "kind = %s\n", Kind2String(kind()));
-  if (has_major_key()) {
-    PrintF(out, "major_key = %s\n",
-           CodeStub::MajorName(CodeStub::GetMajorKey(this), true));
+void Code::Disassemble(const char* name, OStream& os) {  // NOLINT
+  os << "kind = " << Kind2String(kind()) << "\n";
+  if (IsCodeStubOrIC()) {
+    const char* n = CodeStub::MajorName(CodeStub::GetMajorKey(this), true);
+    os << "major_key = " << (n == NULL ? "null" : n) << "\n";
   }
   if (is_inline_cache_stub()) {
-    PrintF(out, "ic_state = %s\n", ICState2String(ic_state()));
-    PrintExtraICState(out, kind(), extra_ic_state());
+    os << "ic_state = " << ICState2String(ic_state()) << "\n";
+    PrintExtraICState(os, kind(), extra_ic_state());
     if (ic_state() == MONOMORPHIC) {
-      PrintF(out, "type = %s\n", StubType2String(type()));
+      os << "type = " << StubType2String(type()) << "\n";
     }
     if (is_compare_ic_stub()) {
-      ASSERT(major_key() == CodeStub::CompareIC);
-      CompareIC::State left_state, right_state, handler_state;
-      Token::Value op;
-      ICCompareStub::DecodeMinorKey(stub_info(), &left_state, &right_state,
-                                    &handler_state, &op);
-      PrintF(out, "compare_state = %s*%s -> %s\n",
-             CompareIC::GetStateName(left_state),
-             CompareIC::GetStateName(right_state),
-             CompareIC::GetStateName(handler_state));
-      PrintF(out, "compare_operation = %s\n", Token::Name(op));
+      DCHECK(CodeStub::GetMajorKey(this) == CodeStub::CompareIC);
+      CompareICStub stub(stub_key(), GetIsolate());
+      os << "compare_state = " << CompareICState::GetStateName(stub.left())
+         << "*" << CompareICState::GetStateName(stub.right()) << " -> "
+         << CompareICState::GetStateName(stub.state()) << "\n";
+      os << "compare_operation = " << Token::Name(stub.op()) << "\n";
     }
   }
   if ((name != NULL) && (name[0] != '\0')) {
-    PrintF(out, "name = %s\n", name);
+    os << "name = " << name << "\n";
   }
   if (kind() == OPTIMIZED_FUNCTION) {
-    PrintF(out, "stack_slots = %d\n", stack_slots());
+    os << "stack_slots = " << stack_slots() << "\n";
   }
 
-  PrintF(out, "Instructions (size = %d)\n", instruction_size());
-  Disassembler::Decode(out, this);
-  PrintF(out, "\n");
+  os << "Instructions (size = " << instruction_size() << ")\n";
+  // TODO(svenpanne) The Disassembler should use streams, too!
+  {
+    CodeTracer::Scope trace_scope(GetIsolate()->GetCodeTracer());
+    Disassembler::Decode(trace_scope.file(), this);
+  }
+  os << "\n";
 
   if (kind() == FUNCTION) {
     DeoptimizationOutputData* data =
         DeoptimizationOutputData::cast(this->deoptimization_data());
-    data->DeoptimizationOutputDataPrint(out);
+    data->DeoptimizationOutputDataPrint(os);
   } else if (kind() == OPTIMIZED_FUNCTION) {
     DeoptimizationInputData* data =
         DeoptimizationInputData::cast(this->deoptimization_data());
-    data->DeoptimizationInputDataPrint(out);
+    data->DeoptimizationInputDataPrint(os);
   }
-  PrintF(out, "\n");
+  os << "\n";
 
   if (is_crankshafted()) {
     SafepointTable table(this);
-    PrintF(out, "Safepoints (size = %u)\n", table.size());
+    os << "Safepoints (size = " << table.size() << ")\n";
     for (unsigned i = 0; i < table.length(); i++) {
       unsigned pc_offset = table.GetPcOffset(i);
-      PrintF(out, "%p  %4d  ", (instruction_start() + pc_offset), pc_offset);
-      table.PrintEntry(i, out);
-      PrintF(out, " (sp -> fp)");
+      os << (instruction_start() + pc_offset) << "  ";
+      // TODO(svenpanne) Add some basic formatting to our streams.
+      Vector<char> buf1 = Vector<char>::New(30);
+      SNPrintF(buf1, "%4d", pc_offset);
+      os << buf1.start() << "  ";
+      table.PrintEntry(i, os);
+      os << " (sp -> fp)  ";
       SafepointEntry entry = table.GetEntry(i);
       if (entry.deoptimization_index() != Safepoint::kNoDeoptimizationIndex) {
-        PrintF(out, "  %6d", entry.deoptimization_index());
+        Vector<char> buf2 = Vector<char>::New(30);
+        SNPrintF(buf2, "%6d", entry.deoptimization_index());
+        os << buf2.start();
       } else {
-        PrintF(out, "  <none>");
+        os << "<none>";
       }
       if (entry.argument_count() > 0) {
-        PrintF(out, " argc: %d", entry.argument_count());
+        os << " argc: " << entry.argument_count();
       }
-      PrintF(out, "\n");
+      os << "\n";
     }
-    PrintF(out, "\n");
+    os << "\n";
   } else if (kind() == FUNCTION) {
     unsigned offset = back_edge_table_offset();
     // If there is no back edge table, the "table start" will be at or after
@@ -11487,30 +10923,32 @@
       DisallowHeapAllocation no_gc;
       BackEdgeTable back_edges(this, &no_gc);
 
-      PrintF(out, "Back edges (size = %u)\n", back_edges.length());
-      PrintF(out, "ast_id  pc_offset  loop_depth\n");
+      os << "Back edges (size = " << back_edges.length() << ")\n";
+      os << "ast_id  pc_offset  loop_depth\n";
 
       for (uint32_t i = 0; i < back_edges.length(); i++) {
-        PrintF(out, "%6d  %9u  %10u\n", back_edges.ast_id(i).ToInt(),
-                                        back_edges.pc_offset(i),
-                                        back_edges.loop_depth(i));
+        Vector<char> buf = Vector<char>::New(100);
+        SNPrintF(buf, "%6d  %9u  %10u\n", back_edges.ast_id(i).ToInt(),
+                 back_edges.pc_offset(i), back_edges.loop_depth(i));
+        os << buf.start();
       }
 
-      PrintF(out, "\n");
+      os << "\n";
     }
 #ifdef OBJECT_PRINT
     if (!type_feedback_info()->IsUndefined()) {
-      TypeFeedbackInfo::cast(type_feedback_info())->TypeFeedbackInfoPrint(out);
-      PrintF(out, "\n");
+      OFStream os(stdout);
+      TypeFeedbackInfo::cast(type_feedback_info())->TypeFeedbackInfoPrint(os);
+      os << "\n";
     }
 #endif
   }
 
-  PrintF(out, "RelocInfo (size = %d)\n", relocation_size());
+  os << "RelocInfo (size = " << relocation_size() << ")\n";
   for (RelocIterator it(this); !it.done(); it.next()) {
-    it.rinfo()->Print(GetIsolate(), out);
+    it.rinfo()->Print(GetIsolate(), os);
   }
-  PrintF(out, "\n");
+  os << "\n";
 }
 #endif  // ENABLE_DISASSEMBLER
 
@@ -11521,7 +10959,7 @@
     int length,
     SetFastElementsCapacitySmiMode smi_mode) {
   // We should never end in here with a pixel or external array.
-  ASSERT(!object->HasExternalArrayElements());
+  DCHECK(!object->HasExternalArrayElements());
 
   // Allocate a new fast elements backing store.
   Handle<FixedArray> new_elements =
@@ -11581,7 +11019,7 @@
                                                       int capacity,
                                                       int length) {
   // We should never end in here with a pixel or external array.
-  ASSERT(!object->HasExternalArrayElements());
+  DCHECK(!object->HasExternalArrayElements());
 
   Handle<FixedArrayBase> elems =
       object->GetIsolate()->factory()->NewFixedDoubleArray(capacity);
@@ -11617,7 +11055,7 @@
 
 // static
 void JSArray::Initialize(Handle<JSArray> array, int capacity, int length) {
-  ASSERT(capacity >= 0);
+  DCHECK(capacity >= 0);
   array->GetIsolate()->factory()->NewJSArrayStorage(
       array, length, capacity, INITIALIZE_ARRAY_ELEMENTS_WITH_HOLE);
 }
@@ -11637,10 +11075,11 @@
                         uint32_t index,
                         List<Handle<Object> >* old_values,
                         List<uint32_t>* indices) {
-  PropertyAttributes attributes =
+  Maybe<PropertyAttributes> maybe =
       JSReceiver::GetOwnElementAttribute(object, index);
-  ASSERT(attributes != ABSENT);
-  if (attributes == DONT_DELETE) return false;
+  DCHECK(maybe.has_value);
+  DCHECK(maybe.value != ABSENT);
+  if (maybe.value == DONT_DELETE) return false;
   Handle<Object> value;
   if (!JSObject::GetOwnElementAccessorPair(object, index).is_null()) {
     value = Handle<Object>::cast(isolate->factory()->the_hole_value());
@@ -11668,7 +11107,7 @@
   Execution::Call(isolate,
                   Handle<JSFunction>(isolate->observers_enqueue_splice()),
                   isolate->factory()->undefined_value(),
-                  ARRAY_SIZE(args),
+                  arraysize(args),
                   args).Assert();
 }
 
@@ -11681,7 +11120,7 @@
   Execution::Call(isolate,
                   Handle<JSFunction>(isolate->observers_begin_perform_splice()),
                   isolate->factory()->undefined_value(),
-                  ARRAY_SIZE(args),
+                  arraysize(args),
                   args).Assert();
 }
 
@@ -11694,7 +11133,7 @@
   Execution::Call(isolate,
                   Handle<JSFunction>(isolate->observers_end_perform_splice()),
                   isolate->factory()->undefined_value(),
-                  ARRAY_SIZE(args),
+                  arraysize(args),
                   args).Assert();
 }
 
@@ -11702,8 +11141,19 @@
 MaybeHandle<Object> JSArray::SetElementsLength(
     Handle<JSArray> array,
     Handle<Object> new_length_handle) {
+  if (array->HasFastElements()) {
+    // If the new array won't fit in a some non-trivial fraction of the max old
+    // space size, then force it to go dictionary mode.
+    int max_fast_array_size = static_cast<int>(
+        (array->GetHeap()->MaxOldGenerationSize() / kDoubleSize) / 4);
+    if (new_length_handle->IsNumber() &&
+        NumberToInt32(*new_length_handle) >= max_fast_array_size) {
+      NormalizeElements(array);
+    }
+  }
+
   // We should never end in here with a pixel or external array.
-  ASSERT(array->AllowsSetElementsLength());
+  DCHECK(array->AllowsSetElementsLength());
   if (!array->map()->is_observed()) {
     return array->GetElementsAccessor()->SetLength(array, new_length_handle);
   }
@@ -11779,7 +11229,7 @@
 
     SetProperty(deleted, isolate->factory()->length_string(),
                 isolate->factory()->NewNumberFromUint(delete_count),
-                NONE, SLOPPY).Assert();
+                STRICT).Assert();
   }
 
   EnqueueSpliceRecord(array, index, deleted, add_count);
@@ -11809,10 +11259,12 @@
 Handle<Map> Map::PutPrototypeTransition(Handle<Map> map,
                                         Handle<Object> prototype,
                                         Handle<Map> target_map) {
-  ASSERT(target_map->IsMap());
-  ASSERT(HeapObject::cast(*prototype)->map()->IsMap());
-  // Don't cache prototype transition if this map is shared.
-  if (map->is_shared() || !FLAG_cache_prototype_transitions) return map;
+  DCHECK(target_map->IsMap());
+  DCHECK(HeapObject::cast(*prototype)->map()->IsMap());
+  // Don't cache prototype transition if this map is either shared, or a map of
+  // a prototype.
+  if (map->is_prototype_map()) return map;
+  if (map->is_dictionary_map() || !FLAG_cache_prototype_transitions) return map;
 
   const int step = kProtoTransitionElementsPerEntry;
   const int header = kProtoTransitionHeaderSize;
@@ -11888,7 +11340,7 @@
 // static
 void Map::AddDependentIC(Handle<Map> map,
                          Handle<Code> stub) {
-  ASSERT(stub->next_code_link()->IsUndefined());
+  DCHECK(stub->next_code_link()->IsUndefined());
   int n = map->dependent_code()->number_of_entries(DependentCode::kWeakICGroup);
   if (n == 0) {
     // Slow path: insert the head of the list with possible heap allocation.
@@ -11896,7 +11348,7 @@
   } else {
     // Fast path: link the stub to the existing head of the list without any
     // heap allocation.
-    ASSERT(n == 1);
+    DCHECK(n == 1);
     map->dependent_code()->AddToDependentICList(stub);
   }
 }
@@ -11987,7 +11439,7 @@
 
 #ifdef DEBUG
   for (int i = start; i < end; i++) {
-    ASSERT(is_code_at(i) || compilation_info_at(i) != info);
+    DCHECK(is_code_at(i) || compilation_info_at(i) != info);
   }
 #endif
 }
@@ -12014,18 +11466,18 @@
   // Use the last of each group to fill the gap in the previous group.
   for (int i = group; i < kGroupCount; i++) {
     int last_of_group = starts.at(i + 1) - 1;
-    ASSERT(last_of_group >= gap);
+    DCHECK(last_of_group >= gap);
     if (last_of_group == gap) continue;
     copy(last_of_group, gap);
     gap = last_of_group;
   }
-  ASSERT(gap == starts.number_of_entries() - 1);
+  DCHECK(gap == starts.number_of_entries() - 1);
   clear_at(gap);  // Clear last gap.
   set_number_of_entries(group, end - start - 1);
 
 #ifdef DEBUG
   for (int i = start; i < end - 1; i++) {
-    ASSERT(is_code_at(i) || compilation_info_at(i) != info);
+    DCHECK(is_code_at(i) || compilation_info_at(i) != info);
   }
 #endif
 }
@@ -12070,7 +11522,7 @@
     if (is_code_at(i)) {
       Code* code = code_at(i);
       if (!code->marked_for_deoptimization()) {
-        code->set_marked_for_deoptimization(true);
+        SetMarkedForDeoptimization(code, group);
         marked = true;
       }
     } else {
@@ -12095,7 +11547,7 @@
 void DependentCode::DeoptimizeDependentCodeGroup(
     Isolate* isolate,
     DependentCode::DependencyGroup group) {
-  ASSERT(AllowCodeDependencyChange::IsAllowed());
+  DCHECK(AllowCodeDependencyChange::IsAllowed());
   DisallowHeapAllocation no_allocation_scope;
   bool marked = MarkCodeForDeoptimization(isolate, group);
 
@@ -12121,6 +11573,50 @@
 }
 
 
+void DependentCode::SetMarkedForDeoptimization(Code* code,
+                                               DependencyGroup group) {
+  code->set_marked_for_deoptimization(true);
+  if (FLAG_trace_deopt &&
+      (code->deoptimization_data() != code->GetHeap()->empty_fixed_array())) {
+    DeoptimizationInputData* deopt_data =
+        DeoptimizationInputData::cast(code->deoptimization_data());
+    CodeTracer::Scope scope(code->GetHeap()->isolate()->GetCodeTracer());
+    PrintF(scope.file(), "[marking dependent code 0x%08" V8PRIxPTR
+                         " (opt #%d) for deoptimization, reason: %s]\n",
+           reinterpret_cast<intptr_t>(code),
+           deopt_data->OptimizationId()->value(), DependencyGroupName(group));
+  }
+}
+
+
+const char* DependentCode::DependencyGroupName(DependencyGroup group) {
+  switch (group) {
+    case kWeakICGroup:
+      return "weak-ic";
+    case kWeakCodeGroup:
+      return "weak-code";
+    case kTransitionGroup:
+      return "transition";
+    case kPrototypeCheckGroup:
+      return "prototype-check";
+    case kElementsCantBeAddedGroup:
+      return "elements-cant-be-added";
+    case kPropertyCellChangedGroup:
+      return "property-cell-changed";
+    case kFieldTypeGroup:
+      return "field-type";
+    case kInitialMapChangedGroup:
+      return "initial-map-changed";
+    case kAllocationSiteTenuringChangedGroup:
+      return "allocation-site-tenuring-changed";
+    case kAllocationSiteTransitionChangedGroup:
+      return "allocation-site-transition-changed";
+  }
+  UNREACHABLE();
+  return "?";
+}
+
+
 Handle<Map> Map::TransitionToPrototype(Handle<Map> map,
                                        Handle<Object> prototype) {
   Handle<Map> new_map = GetPrototypeTransition(map, prototype);
@@ -12135,7 +11631,7 @@
 
 MaybeHandle<Object> JSObject::SetPrototype(Handle<JSObject> object,
                                            Handle<Object> value,
-                                           bool skip_hidden_prototypes) {
+                                           bool from_javascript) {
 #ifdef DEBUG
   int size = object->Size();
 #endif
@@ -12156,23 +11652,23 @@
   // paragraph.
   if (!object->map()->is_extensible()) {
     Handle<Object> args[] = { object };
-    Handle<Object> error = isolate->factory()->NewTypeError(
-        "non_extensible_proto", HandleVector(args, ARRAY_SIZE(args)));
-    return isolate->Throw<Object>(error);
+    THROW_NEW_ERROR(isolate, NewTypeError("non_extensible_proto",
+                                          HandleVector(args, arraysize(args))),
+                    Object);
   }
 
   // Before we can set the prototype we need to be sure
   // prototype cycles are prevented.
   // It is sufficient to validate that the receiver is not in the new prototype
   // chain.
-  for (Object* pt = *value;
-       pt != heap->null_value();
-       pt = pt->GetPrototype(isolate)) {
-    if (JSReceiver::cast(pt) == *object) {
+  for (PrototypeIterator iter(isolate, *value,
+                              PrototypeIterator::START_AT_RECEIVER);
+       !iter.IsAtEnd(); iter.Advance()) {
+    if (JSReceiver::cast(iter.GetCurrent()) == *object) {
       // Cycle detected.
-      Handle<Object> error = isolate->factory()->NewError(
-          "cyclic_proto", HandleVector<Object>(NULL, 0));
-      return isolate->Throw<Object>(error);
+      THROW_NEW_ERROR(isolate,
+                      NewError("cyclic_proto", HandleVector<Object>(NULL, 0)),
+                      Object);
     }
   }
 
@@ -12180,14 +11676,14 @@
       object->map()->DictionaryElementsInPrototypeChainOnly();
   Handle<JSObject> real_receiver = object;
 
-  if (skip_hidden_prototypes) {
+  if (from_javascript) {
     // Find the first object in the chain whose prototype object is not
     // hidden and set the new prototype on that object.
-    Object* current_proto = real_receiver->GetPrototype();
-    while (current_proto->IsJSObject() &&
-          JSObject::cast(current_proto)->map()->is_hidden_prototype()) {
-      real_receiver = handle(JSObject::cast(current_proto), isolate);
-      current_proto = current_proto->GetPrototype(isolate);
+    PrototypeIterator iter(isolate, real_receiver);
+    while (!iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN)) {
+      real_receiver =
+          Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter));
+      iter.Advance();
     }
   }
 
@@ -12198,11 +11694,13 @@
   if (map->prototype() == *value) return value;
 
   if (value->IsJSObject()) {
-    JSObject::OptimizeAsPrototype(Handle<JSObject>::cast(value));
+    PrototypeOptimizationMode mode =
+        from_javascript ? REGULAR_PROTOTYPE : FAST_PROTOTYPE;
+    JSObject::OptimizeAsPrototype(Handle<JSObject>::cast(value), mode);
   }
 
   Handle<Map> new_map = Map::TransitionToPrototype(map, value);
-  ASSERT(new_map->prototype() == *value);
+  DCHECK(new_map->prototype() == *value);
   JSObject::MigrateToMap(real_receiver, new_map);
 
   if (!dictionary_elements_in_chain &&
@@ -12214,7 +11712,7 @@
   }
 
   heap->ClearInstanceofCache();
-  ASSERT(size == object->Size());
+  DCHECK(size == object->Size());
   return value;
 }
 
@@ -12232,34 +11730,15 @@
 }
 
 
-MaybeHandle<AccessorPair> JSObject::GetOwnPropertyAccessorPair(
-    Handle<JSObject> object,
-    Handle<Name> name) {
-  uint32_t index = 0;
-  if (name->AsArrayIndex(&index)) {
-    return GetOwnElementAccessorPair(object, index);
-  }
-
-  Isolate* isolate = object->GetIsolate();
-  LookupResult lookup(isolate);
-  object->LookupOwnRealNamedProperty(name, &lookup);
-
-  if (lookup.IsPropertyCallbacks() &&
-      lookup.GetCallbackObject()->IsAccessorPair()) {
-    return handle(AccessorPair::cast(lookup.GetCallbackObject()), isolate);
-  }
-  return MaybeHandle<AccessorPair>();
-}
-
-
 MaybeHandle<AccessorPair> JSObject::GetOwnElementAccessorPair(
     Handle<JSObject> object,
     uint32_t index) {
   if (object->IsJSGlobalProxy()) {
-    Handle<Object> proto(object->GetPrototype(), object->GetIsolate());
-    if (proto->IsNull()) return MaybeHandle<AccessorPair>();
-    ASSERT(proto->IsJSGlobalObject());
-    return GetOwnElementAccessorPair(Handle<JSObject>::cast(proto), index);
+    PrototypeIterator iter(object->GetIsolate(), object);
+    if (iter.IsAtEnd()) return MaybeHandle<AccessorPair>();
+    DCHECK(PrototypeIterator::GetCurrent(iter)->IsJSGlobalObject());
+    return GetOwnElementAccessorPair(
+        Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)), index);
   }
 
   // Check for lookup interceptor.
@@ -12311,14 +11790,14 @@
     uint32_t index,
     Handle<Object> holder) {
   Isolate* isolate = object->GetIsolate();
-  ASSERT(!structure->IsForeign());
+  DCHECK(!structure->IsForeign());
   // api style callbacks.
   if (structure->IsExecutableAccessorInfo()) {
     Handle<ExecutableAccessorInfo> data =
         Handle<ExecutableAccessorInfo>::cast(structure);
     Object* fun_obj = data->getter();
-    v8::AccessorGetterCallback call_fun =
-        v8::ToCData<v8::AccessorGetterCallback>(fun_obj);
+    v8::AccessorNameGetterCallback call_fun =
+        v8::ToCData<v8::AccessorNameGetterCallback>(fun_obj);
     if (call_fun == NULL) return isolate->factory()->undefined_value();
     Handle<JSObject> holder_handle = Handle<JSObject>::cast(holder);
     Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
@@ -12368,15 +11847,15 @@
 
   // We should never get here to initialize a const with the hole
   // value since a const declaration would conflict with the setter.
-  ASSERT(!value->IsTheHole());
-  ASSERT(!structure->IsForeign());
+  DCHECK(!value->IsTheHole());
+  DCHECK(!structure->IsForeign());
   if (structure->IsExecutableAccessorInfo()) {
     // api style callbacks
     Handle<ExecutableAccessorInfo> data =
         Handle<ExecutableAccessorInfo>::cast(structure);
     Object* call_obj = data->setter();
-    v8::AccessorSetterCallback call_fun =
-        v8::ToCData<v8::AccessorSetterCallback>(call_obj);
+    v8::AccessorNameSetterCallback call_fun =
+        v8::ToCData<v8::AccessorNameSetterCallback>(call_obj);
     if (call_fun == NULL) return value;
     Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
     Handle<String> key(isolate->factory()->NumberToString(number));
@@ -12400,9 +11879,9 @@
       if (strict_mode == SLOPPY) return value;
       Handle<Object> key(isolate->factory()->NewNumberFromUint(index));
       Handle<Object> args[2] = { key, holder };
-      Handle<Object> error = isolate->factory()->NewTypeError(
-          "no_setter_in_callback", HandleVector(args, 2));
-      return isolate->Throw<Object>(error);
+      THROW_NEW_ERROR(
+          isolate, NewTypeError("no_setter_in_callback", HandleVector(args, 2)),
+          Object);
     }
   }
 
@@ -12446,7 +11925,7 @@
                                              Handle<Object> value,
                                              StrictMode strict_mode,
                                              bool check_prototype) {
-  ASSERT(object->HasFastSmiOrObjectElements() ||
+  DCHECK(object->HasFastSmiOrObjectElements() ||
          object->HasFastArgumentsElements());
 
   Isolate* isolate = object->GetIsolate();
@@ -12509,7 +11988,7 @@
     bool convert_to_slow = true;
     if ((index - capacity) < kMaxGap) {
       new_capacity = NewElementsCapacity(index + 1);
-      ASSERT(new_capacity > index);
+      DCHECK(new_capacity > index);
       if (!object->ShouldConvertToSlowElements(new_capacity)) {
         convert_to_slow = false;
       }
@@ -12543,7 +12022,7 @@
     UpdateAllocationSite(object, kind);
     Handle<Map> new_map = GetElementsTransitionMap(object, kind);
     JSObject::MigrateToMap(object, new_map);
-    ASSERT(IsFastObjectElementsKind(object->GetElementsKind()));
+    DCHECK(IsFastObjectElementsKind(object->GetElementsKind()));
   }
   // Increase backing store capacity if that's been decided previously.
   if (new_capacity != capacity) {
@@ -12560,7 +12039,7 @@
   }
 
   // Finally, set the new element and length.
-  ASSERT(object->elements()->IsFixedArray());
+  DCHECK(object->elements()->IsFixedArray());
   backing_store->set(index, *value);
   if (must_update_array_length) {
     Handle<JSArray>::cast(object)->set_length(Smi::FromInt(array_length));
@@ -12577,7 +12056,7 @@
     StrictMode strict_mode,
     bool check_prototype,
     SetPropertyMode set_mode) {
-  ASSERT(object->HasDictionaryElements() ||
+  DCHECK(object->HasDictionaryElements() ||
          object->HasDictionaryArgumentsElements());
   Isolate* isolate = object->GetIsolate();
 
@@ -12611,10 +12090,9 @@
         } else {
           Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
           Handle<Object> args[2] = { number, object };
-          Handle<Object> error =
-              isolate->factory()->NewTypeError("strict_read_only_property",
-                                               HandleVector(args, 2));
-          return isolate->Throw<Object>(error);
+          THROW_NEW_ERROR(isolate, NewTypeError("strict_read_only_property",
+                                                HandleVector(args, 2)),
+                          Object);
         }
       }
       // Elements of the arguments object in slow mode might be slow aliases.
@@ -12623,7 +12101,7 @@
             Handle<AliasedArgumentsEntry>::cast(element);
         Handle<Context> context(Context::cast(elements->get(0)));
         int context_index = entry->aliased_context_slot();
-        ASSERT(!context->get(context_index)->IsTheHole());
+        DCHECK(!context->get(context_index)->IsTheHole());
         context->set(context_index, *value);
         // For elements that are still writable we keep slow aliasing.
         if (!details.IsReadOnly()) value = element;
@@ -12649,10 +12127,9 @@
         Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
         Handle<String> name = isolate->factory()->NumberToString(number);
         Handle<Object> args[1] = { name };
-        Handle<Object> error =
-            isolate->factory()->NewTypeError("object_not_extensible",
-                                             HandleVector(args, 1));
-        return isolate->Throw<Object>(error);
+        THROW_NEW_ERROR(isolate, NewTypeError("object_not_extensible",
+                                              HandleVector(args, 1)),
+                        Object);
       }
     }
 
@@ -12684,15 +12161,11 @@
     } else {
       new_length = dictionary->max_number_key() + 1;
     }
-    SetFastElementsCapacitySmiMode smi_mode = FLAG_smi_only_arrays
-        ? kAllowSmiElements
-        : kDontAllowSmiElements;
     bool has_smi_only_elements = false;
     bool should_convert_to_fast_double_elements =
         object->ShouldConvertToFastDoubleElements(&has_smi_only_elements);
-    if (has_smi_only_elements) {
-      smi_mode = kForceSmiElements;
-    }
+    SetFastElementsCapacitySmiMode smi_mode =
+        has_smi_only_elements ? kForceSmiElements : kAllowSmiElements;
 
     if (should_convert_to_fast_double_elements) {
       SetFastDoubleElementsCapacityAndLength(object, new_length, new_length);
@@ -12703,8 +12176,9 @@
     JSObject::ValidateElements(object);
 #ifdef DEBUG
     if (FLAG_trace_normalization) {
-      PrintF("Object elements are fast case again:\n");
-      object->Print();
+      OFStream os(stdout);
+      os << "Object elements are fast case again:\n";
+      object->Print(os);
     }
 #endif
   }
@@ -12717,7 +12191,7 @@
     Handle<Object> value,
     StrictMode strict_mode,
     bool check_prototype) {
-  ASSERT(object->HasFastDoubleElements());
+  DCHECK(object->HasFastDoubleElements());
 
   Handle<FixedArrayBase> base_elms(FixedArrayBase::cast(object->elements()));
   uint32_t elms_length = static_cast<uint32_t>(base_elms->length());
@@ -12790,7 +12264,7 @@
     // Try allocating extra space.
     int new_capacity = NewElementsCapacity(index+1);
     if (!object->ShouldConvertToSlowElements(new_capacity)) {
-      ASSERT(static_cast<uint32_t>(new_capacity) > index);
+      DCHECK(static_cast<uint32_t>(new_capacity) > index);
       SetFastDoubleElementsCapacityAndLength(object, new_capacity, index + 1);
       FixedDoubleArray::cast(object->elements())->set(index, double_value);
       JSObject::ValidateElements(object);
@@ -12799,13 +12273,13 @@
   }
 
   // Otherwise default to slow case.
-  ASSERT(object->HasFastDoubleElements());
-  ASSERT(object->map()->has_fast_double_elements());
-  ASSERT(object->elements()->IsFixedDoubleArray() ||
+  DCHECK(object->HasFastDoubleElements());
+  DCHECK(object->map()->has_fast_double_elements());
+  DCHECK(object->elements()->IsFixedDoubleArray() ||
          object->elements()->length() == 0);
 
   NormalizeElements(object);
-  ASSERT(object->HasDictionaryElements());
+  DCHECK(object->HasDictionaryElements());
   return SetElement(object, index, value, NONE, strict_mode, check_prototype);
 }
 
@@ -12828,7 +12302,7 @@
                                             uint32_t index,
                                             Handle<Object> value,
                                             StrictMode strict_mode) {
-  ASSERT(!object->HasExternalArrayElements());
+  DCHECK(!object->HasExternalArrayElements());
   return JSObject::SetElement(object, index, value, NONE, strict_mode, false);
 }
 
@@ -12861,13 +12335,12 @@
   }
 
   if (object->IsJSGlobalProxy()) {
-    Handle<Object> proto(object->GetPrototype(), isolate);
-    if (proto->IsNull()) return value;
-    ASSERT(proto->IsJSGlobalObject());
-    return SetElement(Handle<JSObject>::cast(proto), index, value, attributes,
-                      strict_mode,
-                      check_prototype,
-                      set_mode);
+    PrototypeIterator iter(isolate, object);
+    if (iter.IsAtEnd()) return value;
+    DCHECK(PrototypeIterator::GetCurrent(iter)->IsJSGlobalObject());
+    return SetElement(
+        Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)), index,
+        value, attributes, strict_mode, check_prototype, set_mode);
   }
 
   // Don't allow element properties to be redefined for external arrays.
@@ -12876,9 +12349,9 @@
       set_mode == DEFINE_PROPERTY) {
     Handle<Object> number = isolate->factory()->NewNumberFromUint(index);
     Handle<Object> args[] = { object, number };
-    Handle<Object> error = isolate->factory()->NewTypeError(
-        "redef_external_array_element", HandleVector(args, ARRAY_SIZE(args)));
-    return isolate->Throw<Object>(error);
+    THROW_NEW_ERROR(isolate, NewTypeError("redef_external_array_element",
+                                          HandleVector(args, arraysize(args))),
+                    Object);
   }
 
   // Normalize the elements to enable attributes on the property.
@@ -12896,8 +12369,11 @@
                                      strict_mode, check_prototype, set_mode);
   }
 
-  PropertyAttributes old_attributes =
+  Maybe<PropertyAttributes> maybe =
       JSReceiver::GetOwnElementAttribute(object, index);
+  if (!maybe.has_value) return MaybeHandle<Object>();
+  PropertyAttributes old_attributes = maybe.value;
+
   Handle<Object> old_value = isolate->factory()->the_hole_value();
   Handle<Object> old_length_handle;
   Handle<Object> new_length_handle;
@@ -12926,7 +12402,10 @@
       Object);
 
   Handle<String> name = isolate->factory()->Uint32ToString(index);
-  PropertyAttributes new_attributes = GetOwnElementAttribute(object, index);
+  maybe = GetOwnElementAttribute(object, index);
+  if (!maybe.has_value) return MaybeHandle<Object>();
+  PropertyAttributes new_attributes = maybe.value;
+
   if (old_attributes == ABSENT) {
     if (object->IsJSArray() &&
         !old_length_handle->SameValue(
@@ -12975,7 +12454,7 @@
     StrictMode strict_mode,
     bool check_prototype,
     SetPropertyMode set_mode) {
-  ASSERT(object->HasDictionaryElements() ||
+  DCHECK(object->HasDictionaryElements() ||
          object->HasDictionaryArgumentsElements() ||
          (attributes & (DONT_DELETE | DONT_ENUM | READ_ONLY)) == 0);
   Isolate* isolate = object->GetIsolate();
@@ -13037,7 +12516,7 @@
       if (!probe.is_null() && !probe->IsTheHole()) {
         Handle<Context> context(Context::cast(parameter_map->get(0)));
         int context_index = Handle<Smi>::cast(probe)->value();
-        ASSERT(!context->get(context_index)->IsTheHole());
+        DCHECK(!context->get(context_index)->IsTheHole());
         context->set(context_index, *value);
         // Redefining attributes of an aliased element destroys fast aliasing.
         if (set_mode == SET_PROPERTY || attributes == NONE) return value;
@@ -13085,7 +12564,7 @@
 
 
 bool AllocationSite::IsNestedSite() {
-  ASSERT(FLAG_trace_track_allocation_sites);
+  DCHECK(FLAG_trace_track_allocation_sites);
   Object* current = GetHeap()->allocation_sites_list();
   while (current->IsAllocationSite()) {
     AllocationSite* current_site = AllocationSite::cast(current);
@@ -13218,7 +12697,7 @@
        IsFastSmiOrObjectElementsKind(to_kind)) ||
       (from_kind == FAST_DOUBLE_ELEMENTS &&
        to_kind == FAST_HOLEY_DOUBLE_ELEMENTS)) {
-    ASSERT(from_kind != TERMINAL_FAST_ELEMENTS_KIND);
+    DCHECK(from_kind != TERMINAL_FAST_ELEMENTS_KIND);
     // No change is needed to the elements() buffer, the transition
     // only requires a map change.
     Handle<Map> new_map = GetElementsTransitionMap(object, to_kind);
@@ -13296,12 +12775,12 @@
 
 
 bool JSArray::IsReadOnlyLengthDescriptor(Handle<Map> jsarray_map) {
-    Isolate* isolate = jsarray_map->GetIsolate();
-    ASSERT(!jsarray_map->is_dictionary_map());
-    LookupResult lookup(isolate);
-    Handle<Name> length_string = isolate->factory()->length_string();
-    jsarray_map->LookupDescriptor(NULL, *length_string, &lookup);
-    return lookup.IsReadOnly();
+  Isolate* isolate = jsarray_map->GetIsolate();
+  DCHECK(!jsarray_map->is_dictionary_map());
+  LookupResult lookup(isolate);
+  Handle<Name> length_string = isolate->factory()->length_string();
+  jsarray_map->LookupDescriptor(NULL, *length_string, &lookup);
+  return lookup.IsReadOnly();
 }
 
 
@@ -13310,11 +12789,12 @@
   uint32_t length = 0;
   CHECK(array->length()->ToArrayIndex(&length));
   if (length <= index) {
-    Isolate* isolate = array->GetIsolate();
-    LookupResult lookup(isolate);
-    Handle<Name> length_string = isolate->factory()->length_string();
-    array->LookupOwnRealNamedProperty(length_string, &lookup);
-    return lookup.IsReadOnly();
+    LookupIterator it(array, array->GetIsolate()->factory()->length_string(),
+                      LookupIterator::OWN_SKIP_INTERCEPTOR);
+    CHECK_NE(LookupIterator::ACCESS_CHECK, it.state());
+    CHECK(it.IsFound());
+    CHECK_EQ(LookupIterator::ACCESSOR, it.state());
+    return it.IsReadOnly();
   }
   return false;
 }
@@ -13324,9 +12804,9 @@
   Isolate* isolate = array->GetIsolate();
   Handle<Name> length = isolate->factory()->length_string();
   Handle<Object> args[2] = { length, array };
-  Handle<Object> error = isolate->factory()->NewTypeError(
-      "strict_read_only_property", HandleVector(args, ARRAY_SIZE(args)));
-  return isolate->Throw<Object>(error);
+  THROW_NEW_ERROR(isolate, NewTypeError("strict_read_only_property",
+                                        HandleVector(args, arraysize(args))),
+                  Object);
 }
 
 
@@ -13365,9 +12845,10 @@
       Object);
   if (!result->IsTheHole()) return result;
 
-  Handle<Object> proto(object->GetPrototype(), isolate);
-  if (proto->IsNull()) return isolate->factory()->undefined_value();
-  return Object::GetElementWithReceiver(isolate, proto, receiver, index);
+  PrototypeIterator iter(isolate, object);
+  if (iter.IsAtEnd()) return isolate->factory()->undefined_value();
+  return Object::GetElementWithReceiver(
+      isolate, PrototypeIterator::GetCurrent(iter), receiver, index);
 }
 
 
@@ -13490,7 +12971,7 @@
 
 
 bool JSObject::ShouldConvertToFastElements() {
-  ASSERT(HasDictionaryElements() || HasDictionaryArgumentsElements());
+  DCHECK(HasDictionaryElements() || HasDictionaryArgumentsElements());
   // If the elements are sparse, we should not go back to fast case.
   if (!HasDenseElements()) return false;
   // An object requiring access checks is never allowed to have fast
@@ -13530,7 +13011,7 @@
   *has_smi_only_elements = false;
   if (HasSloppyArgumentsElements()) return false;
   if (FLAG_unbox_double_arrays) {
-    ASSERT(HasDictionaryElements());
+    DCHECK(HasDictionaryElements());
     SeededNumberDictionary* dictionary = element_dictionary();
     bool found_double = false;
     for (int i = 0; i < dictionary->Capacity(); i++) {
@@ -13557,21 +13038,19 @@
 // together, so even though this function belongs in objects-debug.cc,
 // we keep it here instead to satisfy certain compilers.
 #ifdef OBJECT_PRINT
-template<typename Derived, typename Shape, typename Key>
-void Dictionary<Derived, Shape, Key>::Print(FILE* out) {
+template <typename Derived, typename Shape, typename Key>
+void Dictionary<Derived, Shape, Key>::Print(OStream& os) {  // NOLINT
   int capacity = DerivedHashTable::Capacity();
   for (int i = 0; i < capacity; i++) {
     Object* k = DerivedHashTable::KeyAt(i);
     if (DerivedHashTable::IsKey(k)) {
-      PrintF(out, " ");
+      os << " ";
       if (k->IsString()) {
-        String::cast(k)->StringPrint(out);
+        String::cast(k)->StringPrint(os);
       } else {
-        k->ShortPrint(out);
+        os << Brief(k);
       }
-      PrintF(out, ": ");
-      ValueAt(i)->ShortPrint(out);
-      PrintF(out, "\n");
+      os << ": " << Brief(ValueAt(i)) << "\n";
     }
   }
 }
@@ -13590,14 +13069,14 @@
       elements->set(pos++, ValueAt(i), mode);
     }
   }
-  ASSERT(pos == elements->length());
+  DCHECK(pos == elements->length());
 }
 
 
 InterceptorInfo* JSObject::GetNamedInterceptor() {
-  ASSERT(map()->has_named_interceptor());
+  DCHECK(map()->has_named_interceptor());
   JSFunction* constructor = JSFunction::cast(map()->constructor());
-  ASSERT(constructor->shared()->IsApiFunction());
+  DCHECK(constructor->shared()->IsApiFunction());
   Object* result =
       constructor->shared()->get_api_func_data()->named_property_handler();
   return InterceptorInfo::cast(result);
@@ -13605,9 +13084,9 @@
 
 
 InterceptorInfo* JSObject::GetIndexedInterceptor() {
-  ASSERT(map()->has_indexed_interceptor());
+  DCHECK(map()->has_indexed_interceptor());
   JSFunction* constructor = JSFunction::cast(map()->constructor());
-  ASSERT(constructor->shared()->IsApiFunction());
+  DCHECK(constructor->shared()->IsApiFunction());
   Object* result =
       constructor->shared()->get_api_func_data()->indexed_property_handler();
   return InterceptorInfo::cast(result);
@@ -13697,66 +13176,50 @@
 }
 
 
-bool JSObject::HasRealNamedProperty(Handle<JSObject> object,
-                                    Handle<Name> key) {
-  Isolate* isolate = object->GetIsolate();
-  SealHandleScope shs(isolate);
-  // Check access rights if needed.
-  if (object->IsAccessCheckNeeded()) {
-    if (!isolate->MayNamedAccess(object, key, v8::ACCESS_HAS)) {
-      isolate->ReportFailedAccessCheck(object, v8::ACCESS_HAS);
-      // TODO(yangguo): Issue 3269, check for scheduled exception missing?
-      return false;
-    }
-  }
-
-  LookupResult result(isolate);
-  object->LookupOwnRealNamedProperty(key, &result);
-  return result.IsFound() && !result.IsInterceptor();
+Maybe<bool> JSObject::HasRealNamedProperty(Handle<JSObject> object,
+                                           Handle<Name> key) {
+  LookupIterator it(object, key, LookupIterator::OWN_SKIP_INTERCEPTOR);
+  Maybe<PropertyAttributes> maybe_result = GetPropertyAttributes(&it);
+  if (!maybe_result.has_value) return Maybe<bool>();
+  return maybe(it.IsFound());
 }
 
 
-bool JSObject::HasRealElementProperty(Handle<JSObject> object, uint32_t index) {
+Maybe<bool> JSObject::HasRealElementProperty(Handle<JSObject> object,
+                                             uint32_t index) {
   Isolate* isolate = object->GetIsolate();
   HandleScope scope(isolate);
   // Check access rights if needed.
   if (object->IsAccessCheckNeeded()) {
     if (!isolate->MayIndexedAccess(object, index, v8::ACCESS_HAS)) {
       isolate->ReportFailedAccessCheck(object, v8::ACCESS_HAS);
-      // TODO(yangguo): Issue 3269, check for scheduled exception missing?
-      return false;
+      RETURN_VALUE_IF_SCHEDULED_EXCEPTION(isolate, Maybe<bool>());
+      return maybe(false);
     }
   }
 
   if (object->IsJSGlobalProxy()) {
     HandleScope scope(isolate);
-    Handle<Object> proto(object->GetPrototype(), isolate);
-    if (proto->IsNull()) return false;
-    ASSERT(proto->IsJSGlobalObject());
-    return HasRealElementProperty(Handle<JSObject>::cast(proto), index);
+    PrototypeIterator iter(isolate, object);
+    if (iter.IsAtEnd()) return maybe(false);
+    DCHECK(PrototypeIterator::GetCurrent(iter)->IsJSGlobalObject());
+    return HasRealElementProperty(
+        Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)), index);
   }
 
-  return GetElementAttributeWithoutInterceptor(
-             object, object, index, false) != ABSENT;
+  Maybe<PropertyAttributes> result =
+      GetElementAttributeWithoutInterceptor(object, object, index, false);
+  if (!result.has_value) return Maybe<bool>();
+  return maybe(result.value != ABSENT);
 }
 
 
-bool JSObject::HasRealNamedCallbackProperty(Handle<JSObject> object,
-                                            Handle<Name> key) {
-  Isolate* isolate = object->GetIsolate();
-  SealHandleScope shs(isolate);
-  // Check access rights if needed.
-  if (object->IsAccessCheckNeeded()) {
-    if (!isolate->MayNamedAccess(object, key, v8::ACCESS_HAS)) {
-      isolate->ReportFailedAccessCheck(object, v8::ACCESS_HAS);
-      // TODO(yangguo): Issue 3269, check for scheduled exception missing?
-      return false;
-    }
-  }
-
-  LookupResult result(isolate);
-  object->LookupOwnRealNamedProperty(key, &result);
-  return result.IsPropertyCallbacks();
+Maybe<bool> JSObject::HasRealNamedCallbackProperty(Handle<JSObject> object,
+                                                   Handle<Name> key) {
+  LookupIterator it(object, key, LookupIterator::OWN_SKIP_INTERCEPTOR);
+  Maybe<PropertyAttributes> maybe_result = GetPropertyAttributes(&it);
+  if (!maybe_result.has_value) return Maybe<bool>();
+  return maybe(it.state() == LookupIterator::ACCESSOR);
 }
 
 
@@ -13803,7 +13266,7 @@
 
 void HeapSortPairs(FixedArray* content, FixedArray* numbers, int len) {
   // In-place heap sort.
-  ASSERT(content->length() == numbers->length());
+  DCHECK(content->length() == numbers->length());
 
   // Bottom-up max-heap construction.
   for (int i = 1; i < len; ++i) {
@@ -13849,7 +13312,7 @@
 
 // Sort this array and the numbers as pairs wrt. the (distinct) numbers.
 void FixedArray::SortPairs(FixedArray* numbers, uint32_t len) {
-  ASSERT(this->length() == numbers->length());
+  DCHECK(this->length() == numbers->length());
   // For small arrays, simply use insertion sort.
   if (len <= 10) {
     InsertionSortPairs(this, numbers, len);
@@ -13892,7 +13355,7 @@
 // mirrors.
 void JSObject::GetOwnPropertyNames(
     FixedArray* storage, int index, PropertyAttributes filter) {
-  ASSERT(storage->length() >= (NumberOfOwnProperties(filter) - index));
+  DCHECK(storage->length() >= (NumberOfOwnProperties(filter) - index));
   if (HasFastProperties()) {
     int real_size = map()->NumberOfOwnDescriptors();
     DescriptorArray* descs = map()->instance_descriptors();
@@ -13949,7 +13412,7 @@
           counter++;
         }
       }
-      ASSERT(!storage || storage->length() >= counter);
+      DCHECK(!storage || storage->length() >= counter);
       break;
     }
     case FAST_DOUBLE_ELEMENTS:
@@ -13965,7 +13428,7 @@
           counter++;
         }
       }
-      ASSERT(!storage || storage->length() >= counter);
+      DCHECK(!storage || storage->length() >= counter);
       break;
     }
 
@@ -13983,7 +13446,7 @@
         }
         counter++;
       }
-      ASSERT(!storage || storage->length() >= counter);
+      DCHECK(!storage || storage->length() >= counter);
       break;
     }
 
@@ -14051,7 +13514,7 @@
       counter += str->length();
     }
   }
-  ASSERT(!storage || storage->length() == counter);
+  DCHECK(!storage || storage->length() == counter);
   return counter;
 }
 
@@ -14061,33 +13524,6 @@
 }
 
 
-// StringKey simply carries a string object as key.
-class StringKey : public HashTableKey {
- public:
-  explicit StringKey(String* string) :
-      string_(string),
-      hash_(HashForObject(string)) { }
-
-  bool IsMatch(Object* string) {
-    // We know that all entries in a hash table had their hash keys created.
-    // Use that knowledge to have fast failure.
-    if (hash_ != HashForObject(string)) {
-      return false;
-    }
-    return string_->Equals(String::cast(string));
-  }
-
-  uint32_t Hash() { return hash_; }
-
-  uint32_t HashForObject(Object* other) { return String::cast(other)->Hash(); }
-
-  Object* AsObject(Heap* heap) { return string_; }
-
-  String* string_;
-  uint32_t hash_;
-};
-
-
 // StringSharedKeys are used as keys in the eval cache.
 class StringSharedKey : public HashTableKey {
  public:
@@ -14100,14 +13536,14 @@
         strict_mode_(strict_mode),
         scope_position_(scope_position) { }
 
-  bool IsMatch(Object* other) V8_OVERRIDE {
+  bool IsMatch(Object* other) OVERRIDE {
     DisallowHeapAllocation no_allocation;
     if (!other->IsFixedArray()) return false;
     FixedArray* other_array = FixedArray::cast(other);
     SharedFunctionInfo* shared = SharedFunctionInfo::cast(other_array->get(0));
     if (shared != *shared_) return false;
     int strict_unchecked = Smi::cast(other_array->get(2))->value();
-    ASSERT(strict_unchecked == SLOPPY || strict_unchecked == STRICT);
+    DCHECK(strict_unchecked == SLOPPY || strict_unchecked == STRICT);
     StrictMode strict_mode = static_cast<StrictMode>(strict_unchecked);
     if (strict_mode != strict_mode_) return false;
     int scope_position = Smi::cast(other_array->get(3))->value();
@@ -14135,18 +13571,18 @@
     return hash;
   }
 
-  uint32_t Hash() V8_OVERRIDE {
+  uint32_t Hash() OVERRIDE {
     return StringSharedHashHelper(*source_, *shared_, strict_mode_,
                                   scope_position_);
   }
 
-  uint32_t HashForObject(Object* obj) V8_OVERRIDE {
+  uint32_t HashForObject(Object* obj) OVERRIDE {
     DisallowHeapAllocation no_allocation;
     FixedArray* other_array = FixedArray::cast(obj);
     SharedFunctionInfo* shared = SharedFunctionInfo::cast(other_array->get(0));
     String* source = String::cast(other_array->get(1));
     int strict_unchecked = Smi::cast(other_array->get(2))->value();
-    ASSERT(strict_unchecked == SLOPPY || strict_unchecked == STRICT);
+    DCHECK(strict_unchecked == SLOPPY || strict_unchecked == STRICT);
     StrictMode strict_mode = static_cast<StrictMode>(strict_unchecked);
     int scope_position = Smi::cast(other_array->get(3))->value();
     return StringSharedHashHelper(
@@ -14154,7 +13590,7 @@
   }
 
 
-  Handle<Object> AsHandle(Isolate* isolate) V8_OVERRIDE {
+  Handle<Object> AsHandle(Isolate* isolate) OVERRIDE {
     Handle<FixedArray> array = isolate->factory()->NewFixedArray(4);
     array->set(0, *shared_);
     array->set(1, *source_);
@@ -14182,22 +13618,22 @@
   // stored value is stored where the key should be.  IsMatch then
   // compares the search key to the found object, rather than comparing
   // a key to a key.
-  bool IsMatch(Object* obj) V8_OVERRIDE {
+  bool IsMatch(Object* obj) OVERRIDE {
     FixedArray* val = FixedArray::cast(obj);
     return string_->Equals(String::cast(val->get(JSRegExp::kSourceIndex)))
         && (flags_ == val->get(JSRegExp::kFlagsIndex));
   }
 
-  uint32_t Hash() V8_OVERRIDE { return RegExpHash(*string_, flags_); }
+  uint32_t Hash() OVERRIDE { return RegExpHash(*string_, flags_); }
 
-  Handle<Object> AsHandle(Isolate* isolate) V8_OVERRIDE {
+  Handle<Object> AsHandle(Isolate* isolate) OVERRIDE {
     // Plain hash maps, which is where regexp keys are used, don't
     // use this function.
     UNREACHABLE();
     return MaybeHandle<Object>().ToHandleChecked();
   }
 
-  uint32_t HashForObject(Object* obj) V8_OVERRIDE {
+  uint32_t HashForObject(Object* obj) OVERRIDE {
     FixedArray* val = FixedArray::cast(obj);
     return RegExpHash(String::cast(val->get(JSRegExp::kSourceIndex)),
                       Smi::cast(val->get(JSRegExp::kFlagsIndex)));
@@ -14224,80 +13660,43 @@
 }
 
 
-template<>
-const uint8_t* SubStringKey<uint8_t>::GetChars() {
-  return string_->IsSeqOneByteString()
-      ? SeqOneByteString::cast(*string_)->GetChars()
-      : ExternalAsciiString::cast(*string_)->GetChars();
-}
-
-
-template<>
-const uint16_t* SubStringKey<uint16_t>::GetChars() {
-  return string_->IsSeqTwoByteString()
-      ? SeqTwoByteString::cast(*string_)->GetChars()
-      : ExternalTwoByteString::cast(*string_)->GetChars();
-}
-
-
-template<>
-Handle<Object> SubStringKey<uint8_t>::AsHandle(Isolate* isolate) {
+Handle<Object> SeqOneByteSubStringKey::AsHandle(Isolate* isolate) {
   if (hash_field_ == 0) Hash();
-  Vector<const uint8_t> chars(GetChars() + from_, length_);
-  return isolate->factory()->NewOneByteInternalizedString(chars, hash_field_);
+  return isolate->factory()->NewOneByteInternalizedSubString(
+      string_, from_, length_, hash_field_);
 }
 
 
-template<>
-Handle<Object> SubStringKey<uint16_t>::AsHandle(Isolate* isolate) {
-  if (hash_field_ == 0) Hash();
-  Vector<const uint16_t> chars(GetChars() + from_, length_);
-  return isolate->factory()->NewTwoByteInternalizedString(chars, hash_field_);
-}
-
-
-template<>
-bool SubStringKey<uint8_t>::IsMatch(Object* string) {
-  Vector<const uint8_t> chars(GetChars() + from_, length_);
+bool SeqOneByteSubStringKey::IsMatch(Object* string) {
+  Vector<const uint8_t> chars(string_->GetChars() + from_, length_);
   return String::cast(string)->IsOneByteEqualTo(chars);
 }
 
 
-template<>
-bool SubStringKey<uint16_t>::IsMatch(Object* string) {
-  Vector<const uint16_t> chars(GetChars() + from_, length_);
-  return String::cast(string)->IsTwoByteEqualTo(chars);
-}
-
-
-template class SubStringKey<uint8_t>;
-template class SubStringKey<uint16_t>;
-
-
 // InternalizedStringKey carries a string/internalized-string object as key.
 class InternalizedStringKey : public HashTableKey {
  public:
   explicit InternalizedStringKey(Handle<String> string)
       : string_(string) { }
 
-  virtual bool IsMatch(Object* string) V8_OVERRIDE {
+  virtual bool IsMatch(Object* string) OVERRIDE {
     return String::cast(string)->Equals(*string_);
   }
 
-  virtual uint32_t Hash() V8_OVERRIDE { return string_->Hash(); }
+  virtual uint32_t Hash() OVERRIDE { return string_->Hash(); }
 
-  virtual uint32_t HashForObject(Object* other) V8_OVERRIDE {
+  virtual uint32_t HashForObject(Object* other) OVERRIDE {
     return String::cast(other)->Hash();
   }
 
-  virtual Handle<Object> AsHandle(Isolate* isolate) V8_OVERRIDE {
+  virtual Handle<Object> AsHandle(Isolate* isolate) OVERRIDE {
     // Internalize the string if possible.
     MaybeHandle<Map> maybe_map =
         isolate->factory()->InternalizedStringMapForString(string_);
     Handle<Map> map;
     if (maybe_map.ToHandle(&map)) {
       string_->set_map_no_write_barrier(*map);
-      ASSERT(string_->IsInternalizedString());
+      DCHECK(string_->IsInternalizedString());
       return string_;
     }
     // Otherwise allocate a new internalized string.
@@ -14333,8 +13732,8 @@
     int at_least_space_for,
     MinimumCapacity capacity_option,
     PretenureFlag pretenure) {
-  ASSERT(0 <= at_least_space_for);
-  ASSERT(!capacity_option || IsPowerOf2(at_least_space_for));
+  DCHECK(0 <= at_least_space_for);
+  DCHECK(!capacity_option || base::bits::IsPowerOfTwo32(at_least_space_for));
   int capacity = (capacity_option == USE_CUSTOM_MINIMUM_CAPACITY)
                      ? at_least_space_for
                      : ComputeCapacity(at_least_space_for);
@@ -14388,7 +13787,7 @@
       set(index, *key);
       return entry;
     }
-    ASSERT(element->IsTheHole() || !Name::cast(element)->Equals(*key));
+    DCHECK(element->IsTheHole() || !Name::cast(element)->Equals(*key));
     entry = NextProbe(entry, count++, capacity);
   }
   return kNotFound;
@@ -14399,7 +13798,7 @@
 void HashTable<Derived, Shape, Key>::Rehash(
     Handle<Derived> new_table,
     Key key) {
-  ASSERT(NumberOfElements() < new_table->Capacity());
+  DCHECK(NumberOfElements() < new_table->Capacity());
 
   DisallowHeapAllocation no_gc;
   WriteBarrierMode mode = new_table->GetWriteBarrierMode(no_gc);
@@ -14631,10 +14030,6 @@
     SlowReverseLookup(Object* value);
 
 template Object*
-Dictionary<UnseededNumberDictionary, UnseededNumberDictionaryShape, uint32_t>::
-    SlowReverseLookup(Object* value);
-
-template Object*
 Dictionary<NameDictionary, NameDictionaryShape, Handle<Name> >::
     SlowReverseLookup(Object* value);
 
@@ -14732,7 +14127,7 @@
 
 Handle<Object> JSObject::PrepareSlowElementsForSort(
     Handle<JSObject> object, uint32_t limit) {
-  ASSERT(object->HasDictionaryElements());
+  DCHECK(object->HasDictionaryElements());
   Isolate* isolate = object->GetIsolate();
   // Must stay in dictionary mode, either because of requires_slow_elements,
   // or because we are not going to sort (and therefore compact) all of the
@@ -14752,10 +14147,10 @@
     Object* k = dict->KeyAt(i);
     if (!dict->IsKey(k)) continue;
 
-    ASSERT(k->IsNumber());
-    ASSERT(!k->IsSmi() || Smi::cast(k)->value() >= 0);
-    ASSERT(!k->IsHeapNumber() || HeapNumber::cast(k)->value() >= 0);
-    ASSERT(!k->IsHeapNumber() || HeapNumber::cast(k)->value() <= kMaxUInt32);
+    DCHECK(k->IsNumber());
+    DCHECK(!k->IsSmi() || Smi::cast(k)->value() >= 0);
+    DCHECK(!k->IsHeapNumber() || HeapNumber::cast(k)->value() >= 0);
+    DCHECK(!k->IsHeapNumber() || HeapNumber::cast(k)->value() <= kMaxUInt32);
 
     HandleScope scope(isolate);
     Handle<Object> value(dict->ValueAt(i), isolate);
@@ -14777,7 +14172,7 @@
       } else {
         Handle<Object> result = SeededNumberDictionary::AddNumberEntry(
             new_dict, pos, value, details);
-        ASSERT(result.is_identical_to(new_dict));
+        DCHECK(result.is_identical_to(new_dict));
         USE(result);
         pos++;
       }
@@ -14788,7 +14183,7 @@
     } else {
       Handle<Object> result = SeededNumberDictionary::AddNumberEntry(
           new_dict, key, value, details);
-      ASSERT(result.is_identical_to(new_dict));
+      DCHECK(result.is_identical_to(new_dict));
       USE(result);
     }
   }
@@ -14804,7 +14199,7 @@
     HandleScope scope(isolate);
     Handle<Object> result = SeededNumberDictionary::AddNumberEntry(
         new_dict, pos, isolate->factory()->undefined_value(), no_details);
-    ASSERT(result.is_identical_to(new_dict));
+    DCHECK(result.is_identical_to(new_dict));
     USE(result);
     pos++;
     undefs--;
@@ -14858,7 +14253,7 @@
   } else if (!object->HasFastDoubleElements()) {
     EnsureWritableFastElements(object);
   }
-  ASSERT(object->HasFastSmiOrObjectElements() ||
+  DCHECK(object->HasFastSmiOrObjectElements() ||
          object->HasFastDoubleElements());
 
   // Collect holes at the end, undefined before that and the rest at the
@@ -15014,7 +14409,7 @@
     } else {
       // Clamp undefined to zero (default). All other types have been
       // converted to a number type further up in the call chain.
-      ASSERT(value->IsUndefined());
+      DCHECK(value->IsUndefined());
     }
     array->set(index, clamped_value);
   }
@@ -15039,7 +14434,7 @@
     } else {
       // Clamp undefined to zero (default). All other types have been
       // converted to a number type further up in the call chain.
-      ASSERT(value->IsUndefined());
+      DCHECK(value->IsUndefined());
     }
     receiver->set(index, cast_value);
   }
@@ -15102,7 +14497,7 @@
     } else {
       // Clamp undefined to zero (default). All other types have been
       // converted to a number type further up in the call chain.
-      ASSERT(value->IsUndefined());
+      DCHECK(value->IsUndefined());
     }
     array->set(index, cast_value);
   }
@@ -15114,7 +14509,7 @@
     Handle<ExternalFloat32Array> array,
     uint32_t index,
     Handle<Object> value) {
-  float cast_value = static_cast<float>(OS::nan_value());
+  float cast_value = static_cast<float>(base::OS::nan_value());
   if (index < static_cast<uint32_t>(array->length())) {
     if (value->IsSmi()) {
       int int_value = Handle<Smi>::cast(value)->value();
@@ -15125,7 +14520,7 @@
     } else {
       // Clamp undefined to NaN (default). All other types have been
       // converted to a number type further up in the call chain.
-      ASSERT(value->IsUndefined());
+      DCHECK(value->IsUndefined());
     }
     array->set(index, cast_value);
   }
@@ -15137,14 +14532,14 @@
     Handle<ExternalFloat64Array> array,
     uint32_t index,
     Handle<Object> value) {
-  double double_value = OS::nan_value();
+  double double_value = base::OS::nan_value();
   if (index < static_cast<uint32_t>(array->length())) {
     if (value->IsNumber()) {
       double_value = value->Number();
     } else {
       // Clamp undefined to NaN (default). All other types have been
       // converted to a number type further up in the call chain.
-      ASSERT(value->IsUndefined());
+      DCHECK(value->IsUndefined());
     }
     array->set(index, double_value);
   }
@@ -15152,17 +14547,10 @@
 }
 
 
-PropertyCell* GlobalObject::GetPropertyCell(LookupResult* result) {
-  ASSERT(!HasFastProperties());
-  Object* value = property_dictionary()->ValueAt(result->GetDictionaryEntry());
-  return PropertyCell::cast(value);
-}
-
-
 Handle<PropertyCell> JSGlobalObject::EnsurePropertyCell(
     Handle<JSGlobalObject> global,
     Handle<Name> name) {
-  ASSERT(!global->HasFastProperties());
+  DCHECK(!global->HasFastProperties());
   int entry = global->property_dictionary()->FindEntry(name);
   if (entry == NameDictionary::kNotFound) {
     Isolate* isolate = global->GetIsolate();
@@ -15176,7 +14564,7 @@
     return cell;
   } else {
     Object* value = global->property_dictionary()->ValueAt(entry);
-    ASSERT(value->IsPropertyCell());
+    DCHECK(value->IsPropertyCell());
     return handle(PropertyCell::cast(value));
   }
 }
@@ -15214,11 +14602,11 @@
     uint16_t chars[2] = {c1, c2};
     uint32_t check_hash = StringHasher::HashSequentialString(chars, 2, seed);
     hash = (hash << String::kHashShift) | String::kIsNotArrayIndexMask;
-    ASSERT_EQ(static_cast<int32_t>(hash), static_cast<int32_t>(check_hash));
+    DCHECK_EQ(static_cast<int32_t>(hash), static_cast<int32_t>(check_hash));
 #endif
   }
 
-  bool IsMatch(Object* o) V8_OVERRIDE {
+  bool IsMatch(Object* o) OVERRIDE {
     if (!o->IsString()) return false;
     String* other = String::cast(o);
     if (other->length() != 2) return false;
@@ -15226,13 +14614,13 @@
     return other->Get(1) == c2_;
   }
 
-  uint32_t Hash() V8_OVERRIDE { return hash_; }
-  uint32_t HashForObject(Object* key) V8_OVERRIDE {
+  uint32_t Hash() OVERRIDE { return hash_; }
+  uint32_t HashForObject(Object* key) OVERRIDE {
     if (!key->IsString()) return 0;
     return String::cast(key)->Hash();
   }
 
-  Handle<Object> AsHandle(Isolate* isolate) V8_OVERRIDE {
+  Handle<Object> AsHandle(Isolate* isolate) OVERRIDE {
     // The TwoCharHashTableKey is only used for looking in the string
     // table, not for adding to it.
     UNREACHABLE();
@@ -15266,7 +14654,7 @@
     return MaybeHandle<String>();
   } else {
     Handle<String> result(String::cast(string_table->KeyAt(entry)), isolate);
-    ASSERT(StringShape(*result).IsInternalized());
+    DCHECK(StringShape(*result).IsInternalized());
     return result;
   }
 }
@@ -15283,7 +14671,7 @@
     return MaybeHandle<String>();
   } else {
     Handle<String> result(String::cast(string_table->KeyAt(entry)), isolate);
-    ASSERT(StringShape(*result).IsInternalized());
+    DCHECK(StringShape(*result).IsInternalized());
     return result;
   }
 }
@@ -15336,13 +14724,13 @@
 }
 
 
-Handle<Object> CompilationCacheTable::LookupEval(Handle<String> src,
-                                                 Handle<Context> context,
-                                                 StrictMode strict_mode,
-                                                 int scope_position) {
+Handle<Object> CompilationCacheTable::LookupEval(
+    Handle<String> src, Handle<SharedFunctionInfo> outer_info,
+    StrictMode strict_mode, int scope_position) {
   Isolate* isolate = GetIsolate();
-  Handle<SharedFunctionInfo> shared(context->closure()->shared());
-  StringSharedKey key(src, shared, strict_mode, scope_position);
+  // Cache key is the tuple (source, outer shared function info, scope position)
+  // to unambiguously identify the context chain the cached eval code assumes.
+  StringSharedKey key(src, outer_info, strict_mode, scope_position);
   int entry = FindEntry(&key);
   if (entry == kNotFound) return isolate->factory()->undefined_value();
   return Handle<Object>(get(EntryToIndex(entry) + 1), isolate);
@@ -15379,11 +14767,10 @@
 
 Handle<CompilationCacheTable> CompilationCacheTable::PutEval(
     Handle<CompilationCacheTable> cache, Handle<String> src,
-    Handle<Context> context, Handle<SharedFunctionInfo> value,
+    Handle<SharedFunctionInfo> outer_info, Handle<SharedFunctionInfo> value,
     int scope_position) {
   Isolate* isolate = cache->GetIsolate();
-  Handle<SharedFunctionInfo> shared(context->closure()->shared());
-  StringSharedKey key(src, shared, value->strict_mode(), scope_position);
+  StringSharedKey key(src, outer_info, value->strict_mode(), scope_position);
   cache = EnsureCapacity(cache, 1, &key);
   Handle<Object> k = key.AsHandle(isolate);
   int entry = cache->FindInsertionEntry(key.Hash());
@@ -15430,7 +14817,7 @@
  public:
   explicit StringsKey(Handle<FixedArray> strings) : strings_(strings) { }
 
-  bool IsMatch(Object* strings) V8_OVERRIDE {
+  bool IsMatch(Object* strings) OVERRIDE {
     FixedArray* o = FixedArray::cast(strings);
     int len = strings_->length();
     if (o->length() != len) return false;
@@ -15440,9 +14827,9 @@
     return true;
   }
 
-  uint32_t Hash() V8_OVERRIDE { return HashForObject(*strings_); }
+  uint32_t Hash() OVERRIDE { return HashForObject(*strings_); }
 
-  uint32_t HashForObject(Object* obj) V8_OVERRIDE {
+  uint32_t HashForObject(Object* obj) OVERRIDE {
     FixedArray* strings = FixedArray::cast(obj);
     int len = strings->length();
     uint32_t hash = 0;
@@ -15452,7 +14839,7 @@
     return hash;
   }
 
-  Handle<Object> AsHandle(Isolate* isolate) V8_OVERRIDE { return strings_; }
+  Handle<Object> AsHandle(Isolate* isolate) OVERRIDE { return strings_; }
 
  private:
   Handle<FixedArray> strings_;
@@ -15486,7 +14873,7 @@
     Isolate* isolate,
     int at_least_space_for,
     PretenureFlag pretenure) {
-  ASSERT(0 <= at_least_space_for);
+  DCHECK(0 <= at_least_space_for);
   Handle<Derived> dict = DerivedHashTable::New(isolate,
                                                at_least_space_for,
                                                USE_DEFAULT_MINIMUM_CAPACITY,
@@ -15572,7 +14959,7 @@
   Factory* factory = dictionary->GetIsolate()->factory();
   PropertyDetails details = dictionary->DetailsAt(entry);
   // Ignore attributes if forcing a deletion.
-  if (details.IsDontDelete() && mode != JSReceiver::FORCE_DELETION) {
+  if (!details.IsConfigurable() && mode != JSReceiver::FORCE_DELETION) {
     return factory->false_value();
   }
 
@@ -15613,7 +15000,7 @@
     Handle<Object> value,
     PropertyDetails details) {
   // Valdate key is absent.
-  SLOW_ASSERT((dictionary->FindEntry(key) == Dictionary::kNotFound));
+  SLOW_DCHECK((dictionary->FindEntry(key) == Dictionary::kNotFound));
   // Check whether the dictionary should be extended.
   dictionary = EnsureCapacity(dictionary, 1, key);
 
@@ -15645,7 +15032,7 @@
     dictionary->SetNextEnumerationIndex(index + 1);
   }
   dictionary->SetEntry(entry, k, value, details);
-  ASSERT((dictionary->KeyAt(entry)->IsNumber() ||
+  DCHECK((dictionary->KeyAt(entry)->IsNumber() ||
           dictionary->KeyAt(entry)->IsName()));
   dictionary->ElementAdded();
 }
@@ -15677,7 +15064,7 @@
     Handle<Object> value,
     PropertyDetails details) {
   dictionary->UpdateMaxNumberKey(key);
-  SLOW_ASSERT(dictionary->FindEntry(key) == kNotFound);
+  SLOW_DCHECK(dictionary->FindEntry(key) == kNotFound);
   return Add(dictionary, key, value, details);
 }
 
@@ -15686,7 +15073,7 @@
     Handle<UnseededNumberDictionary> dictionary,
     uint32_t key,
     Handle<Object> value) {
-  SLOW_ASSERT(dictionary->FindEntry(key) == kNotFound);
+  SLOW_DCHECK(dictionary->FindEntry(key) == kNotFound);
   return Add(dictionary, key, value, PropertyDetails(NONE, NORMAL, 0));
 }
 
@@ -15772,7 +15159,7 @@
     FixedArray* storage,
     PropertyAttributes filter,
     typename Dictionary<Derived, Shape, Key>::SortMode sort_mode) {
-  ASSERT(storage->length() >= NumberOfElementsFilterAttributes(filter));
+  DCHECK(storage->length() >= NumberOfElementsFilterAttributes(filter));
   int capacity = DerivedHashTable::Capacity();
   int index = 0;
   for (int i = 0; i < capacity; i++) {
@@ -15787,7 +15174,7 @@
   if (sort_mode == Dictionary::SORTED) {
     storage->SortPairs(storage, index);
   }
-  ASSERT(storage->length() >= index);
+  DCHECK(storage->length() >= index);
 }
 
 
@@ -15833,7 +15220,7 @@
     int index,
     PropertyAttributes filter,
     typename Dictionary<Derived, Shape, Key>::SortMode sort_mode) {
-  ASSERT(storage->length() >= NumberOfElementsFilterAttributes(filter));
+  DCHECK(storage->length() >= NumberOfElementsFilterAttributes(filter));
   int capacity = DerivedHashTable::Capacity();
   for (int i = 0; i < capacity; i++) {
     Object* k = DerivedHashTable::KeyAt(i);
@@ -15847,7 +15234,7 @@
   if (sort_mode == Dictionary::SORTED) {
     storage->SortPairs(storage, index);
   }
-  ASSERT(storage->length() >= index);
+  DCHECK(storage->length() >= index);
 }
 
 
@@ -15872,7 +15259,7 @@
 
 Object* ObjectHashTable::Lookup(Handle<Object> key) {
   DisallowHeapAllocation no_gc;
-  ASSERT(IsKey(*key));
+  DCHECK(IsKey(*key));
 
   // If the object does not have an identity hash, it was never used as a key.
   Object* hash = key->GetHash();
@@ -15888,8 +15275,8 @@
 Handle<ObjectHashTable> ObjectHashTable::Put(Handle<ObjectHashTable> table,
                                              Handle<Object> key,
                                              Handle<Object> value) {
-  ASSERT(table->IsKey(*key));
-  ASSERT(!value->IsTheHole());
+  DCHECK(table->IsKey(*key));
+  DCHECK(!value->IsTheHole());
 
   Isolate* isolate = table->GetIsolate();
 
@@ -15916,7 +15303,7 @@
 Handle<ObjectHashTable> ObjectHashTable::Remove(Handle<ObjectHashTable> table,
                                                 Handle<Object> key,
                                                 bool* was_present) {
-  ASSERT(table->IsKey(*key));
+  DCHECK(table->IsKey(*key));
 
   Object* hash = key->GetHash();
   if (hash->IsUndefined()) {
@@ -15952,7 +15339,7 @@
 
 Object* WeakHashTable::Lookup(Handle<Object> key) {
   DisallowHeapAllocation no_gc;
-  ASSERT(IsKey(*key));
+  DCHECK(IsKey(*key));
   int entry = FindEntry(key);
   if (entry == kNotFound) return GetHeap()->the_hole_value();
   return get(EntryToValueIndex(entry));
@@ -15962,7 +15349,7 @@
 Handle<WeakHashTable> WeakHashTable::Put(Handle<WeakHashTable> table,
                                          Handle<Object> key,
                                          Handle<Object> value) {
-  ASSERT(table->IsKey(*key));
+  DCHECK(table->IsKey(*key));
   int entry = table->FindEntry(key);
   // Key is already in table, just overwrite value.
   if (entry != kNotFound) {
@@ -16002,7 +15389,7 @@
   // from number of buckets. If we decide to change kLoadFactor
   // to something other than 2, capacity should be stored as another
   // field of this object.
-  capacity = RoundUpToPowerOf2(Max(kMinCapacity, capacity));
+  capacity = base::bits::RoundUpToPowerOfTwo32(Max(kMinCapacity, capacity));
   if (capacity > kMaxCapacity) {
     v8::internal::Heap::FatalProcessOutOfMemory("invalid table size", true);
   }
@@ -16025,7 +15412,7 @@
 template<class Derived, class Iterator, int entrysize>
 Handle<Derived> OrderedHashTable<Derived, Iterator, entrysize>::EnsureGrowable(
     Handle<Derived> table) {
-  ASSERT(!table->IsObsolete());
+  DCHECK(!table->IsObsolete());
 
   int nof = table->NumberOfElements();
   int nod = table->NumberOfDeletedElements();
@@ -16041,7 +15428,7 @@
 template<class Derived, class Iterator, int entrysize>
 Handle<Derived> OrderedHashTable<Derived, Iterator, entrysize>::Shrink(
     Handle<Derived> table) {
-  ASSERT(!table->IsObsolete());
+  DCHECK(!table->IsObsolete());
 
   int nof = table->NumberOfElements();
   int capacity = table->Capacity();
@@ -16053,7 +15440,7 @@
 template<class Derived, class Iterator, int entrysize>
 Handle<Derived> OrderedHashTable<Derived, Iterator, entrysize>::Clear(
     Handle<Derived> table) {
-  ASSERT(!table->IsObsolete());
+  DCHECK(!table->IsObsolete());
 
   Handle<Derived> new_table =
       Allocate(table->GetIsolate(),
@@ -16084,7 +15471,7 @@
 template<class Derived, class Iterator, int entrysize>
 Handle<Derived> OrderedHashTable<Derived, Iterator, entrysize>::Rehash(
     Handle<Derived> table, int new_capacity) {
-  ASSERT(!table->IsObsolete());
+  DCHECK(!table->IsObsolete());
 
   Handle<Derived> new_table =
       Allocate(table->GetIsolate(),
@@ -16117,7 +15504,7 @@
     ++new_entry;
   }
 
-  ASSERT_EQ(nod, removed_holes_index);
+  DCHECK_EQ(nod, removed_holes_index);
 
   new_table->SetNumberOfElements(nof);
   table->SetNextTable(*new_table);
@@ -16126,17 +15513,14 @@
 }
 
 
-template<class Derived, class Iterator, int entrysize>
+template <class Derived, class Iterator, int entrysize>
 int OrderedHashTable<Derived, Iterator, entrysize>::FindEntry(
-    Handle<Object> key) {
-  ASSERT(!IsObsolete());
+    Handle<Object> key, int hash) {
+  DCHECK(!IsObsolete());
 
   DisallowHeapAllocation no_gc;
-  ASSERT(!key->IsTheHole());
-  Object* hash = key->GetHash();
-  if (hash->IsUndefined()) return kNotFound;
-  for (int entry = HashToEntry(Smi::cast(hash)->value());
-       entry != kNotFound;
+  DCHECK(!key->IsTheHole());
+  for (int entry = HashToEntry(hash); entry != kNotFound;
        entry = ChainAt(entry)) {
     Object* candidate = KeyAt(entry);
     if (candidate->SameValueZero(*key))
@@ -16146,9 +15530,19 @@
 }
 
 
-template<class Derived, class Iterator, int entrysize>
+template <class Derived, class Iterator, int entrysize>
+int OrderedHashTable<Derived, Iterator, entrysize>::FindEntry(
+    Handle<Object> key) {
+  DisallowHeapAllocation no_gc;
+  Object* hash = key->GetHash();
+  if (!hash->IsSmi()) return kNotFound;
+  return FindEntry(key, Smi::cast(hash)->value());
+}
+
+
+template <class Derived, class Iterator, int entrysize>
 int OrderedHashTable<Derived, Iterator, entrysize>::AddEntry(int hash) {
-  ASSERT(!IsObsolete());
+  DCHECK(!IsObsolete());
 
   int entry = UsedCapacity();
   int bucket = HashToBucket(hash);
@@ -16163,7 +15557,7 @@
 
 template<class Derived, class Iterator, int entrysize>
 void OrderedHashTable<Derived, Iterator, entrysize>::RemoveEntry(int entry) {
-  ASSERT(!IsObsolete());
+  DCHECK(!IsObsolete());
 
   int index = EntryToIndex(entry);
   for (int i = 0; i < entrysize; ++i) {
@@ -16194,8 +15588,9 @@
 OrderedHashTable<OrderedHashSet, JSSetIterator, 1>::Remove(
     Handle<OrderedHashSet> table, Handle<Object> key, bool* was_present);
 
-template int
-OrderedHashTable<OrderedHashSet, JSSetIterator, 1>::FindEntry(
+template int OrderedHashTable<OrderedHashSet, JSSetIterator, 1>::FindEntry(
+    Handle<Object> key, int hash);
+template int OrderedHashTable<OrderedHashSet, JSSetIterator, 1>::FindEntry(
     Handle<Object> key);
 
 template int
@@ -16225,8 +15620,9 @@
 OrderedHashTable<OrderedHashMap, JSMapIterator, 2>::Remove(
     Handle<OrderedHashMap> table, Handle<Object> key, bool* was_present);
 
-template int
-OrderedHashTable<OrderedHashMap, JSMapIterator, 2>::FindEntry(
+template int OrderedHashTable<OrderedHashMap, JSMapIterator, 2>::FindEntry(
+    Handle<Object> key, int hash);
+template int OrderedHashTable<OrderedHashMap, JSMapIterator, 2>::FindEntry(
     Handle<Object> key);
 
 template int
@@ -16243,12 +15639,12 @@
 
 Handle<OrderedHashSet> OrderedHashSet::Add(Handle<OrderedHashSet> table,
                                            Handle<Object> key) {
-  if (table->FindEntry(key) != kNotFound) return table;
+  int hash = GetOrCreateHash(table->GetIsolate(), key)->value();
+  if (table->FindEntry(key, hash) != kNotFound) return table;
 
   table = EnsureGrowable(table);
 
-  Handle<Smi> hash = GetOrCreateHash(table->GetIsolate(), key);
-  int index = table->AddEntry(hash->value());
+  int index = table->AddEntry(hash);
   table->set(index, *key);
   return table;
 }
@@ -16265,9 +15661,10 @@
 Handle<OrderedHashMap> OrderedHashMap::Put(Handle<OrderedHashMap> table,
                                            Handle<Object> key,
                                            Handle<Object> value) {
-  ASSERT(!key->IsTheHole());
+  DCHECK(!key->IsTheHole());
 
-  int entry = table->FindEntry(key);
+  int hash = GetOrCreateHash(table->GetIsolate(), key)->value();
+  int entry = table->FindEntry(key, hash);
 
   if (entry != kNotFound) {
     table->set(table->EntryToIndex(entry) + kValueOffset, *value);
@@ -16276,8 +15673,7 @@
 
   table = EnsureGrowable(table);
 
-  Handle<Smi> hash = GetOrCreateHash(table->GetIsolate(), key);
-  int index = table->AddEntry(hash->value());
+  int index = table->AddEntry(hash);
   table->set(index, *key);
   table->set(index + kValueOffset, *value);
   return table;
@@ -16285,47 +15681,14 @@
 
 
 template<class Derived, class TableType>
-Handle<JSObject> OrderedHashTableIterator<Derived, TableType>::Next(
-    Handle<Derived> iterator) {
-  Isolate* isolate = iterator->GetIsolate();
-  Factory* factory = isolate->factory();
-
-  Handle<Object> maybe_table(iterator->table(), isolate);
-  if (!maybe_table->IsUndefined()) {
-    iterator->Transition();
-
-    Handle<TableType> table(TableType::cast(iterator->table()), isolate);
-    int index = Smi::cast(iterator->index())->value();
-    int used_capacity = table->UsedCapacity();
-
-    while (index < used_capacity && table->KeyAt(index)->IsTheHole()) {
-      index++;
-    }
-
-    if (index < used_capacity) {
-      int entry_index = table->EntryToIndex(index);
-      Handle<Object> value =
-          Derived::ValueForKind(iterator, entry_index);
-      iterator->set_index(Smi::FromInt(index + 1));
-      return factory->NewIteratorResultObject(value, false);
-    }
-
-    iterator->set_table(iterator->GetHeap()->undefined_value());
-  }
-
-  return factory->NewIteratorResultObject(factory->undefined_value(), true);
-}
-
-
-template<class Derived, class TableType>
 void OrderedHashTableIterator<Derived, TableType>::Transition() {
-  Isolate* isolate = GetIsolate();
-  Handle<TableType> table(TableType::cast(this->table()), isolate);
+  DisallowHeapAllocation no_allocation;
+  TableType* table = TableType::cast(this->table());
   if (!table->IsObsolete()) return;
 
   int index = Smi::cast(this->index())->value();
   while (table->IsObsolete()) {
-    Handle<TableType> next_table(table->NextTable(), isolate);
+    TableType* next_table = table->NextTable();
 
     if (index > 0) {
       int nod = table->NumberOfDeletedElements();
@@ -16346,84 +15709,82 @@
     table = next_table;
   }
 
-  set_table(*table);
+  set_table(table);
   set_index(Smi::FromInt(index));
 }
 
 
-template Handle<JSObject>
+template<class Derived, class TableType>
+bool OrderedHashTableIterator<Derived, TableType>::HasMore() {
+  DisallowHeapAllocation no_allocation;
+  if (this->table()->IsUndefined()) return false;
+
+  Transition();
+
+  TableType* table = TableType::cast(this->table());
+  int index = Smi::cast(this->index())->value();
+  int used_capacity = table->UsedCapacity();
+
+  while (index < used_capacity && table->KeyAt(index)->IsTheHole()) {
+    index++;
+  }
+
+  set_index(Smi::FromInt(index));
+
+  if (index < used_capacity) return true;
+
+  set_table(GetHeap()->undefined_value());
+  return false;
+}
+
+
+template<class Derived, class TableType>
+Smi* OrderedHashTableIterator<Derived, TableType>::Next(JSArray* value_array) {
+  DisallowHeapAllocation no_allocation;
+  if (HasMore()) {
+    FixedArray* array = FixedArray::cast(value_array->elements());
+    static_cast<Derived*>(this)->PopulateValueArray(array);
+    MoveNext();
+    return Smi::cast(kind());
+  }
+  return Smi::FromInt(0);
+}
+
+
+template Smi*
 OrderedHashTableIterator<JSSetIterator, OrderedHashSet>::Next(
-    Handle<JSSetIterator> iterator);
+    JSArray* value_array);
+
+template bool
+OrderedHashTableIterator<JSSetIterator, OrderedHashSet>::HasMore();
+
+template void
+OrderedHashTableIterator<JSSetIterator, OrderedHashSet>::MoveNext();
+
+template Object*
+OrderedHashTableIterator<JSSetIterator, OrderedHashSet>::CurrentKey();
 
 template void
 OrderedHashTableIterator<JSSetIterator, OrderedHashSet>::Transition();
 
 
-template Handle<JSObject>
+template Smi*
 OrderedHashTableIterator<JSMapIterator, OrderedHashMap>::Next(
-    Handle<JSMapIterator> iterator);
+    JSArray* value_array);
+
+template bool
+OrderedHashTableIterator<JSMapIterator, OrderedHashMap>::HasMore();
+
+template void
+OrderedHashTableIterator<JSMapIterator, OrderedHashMap>::MoveNext();
+
+template Object*
+OrderedHashTableIterator<JSMapIterator, OrderedHashMap>::CurrentKey();
 
 template void
 OrderedHashTableIterator<JSMapIterator, OrderedHashMap>::Transition();
 
 
-Handle<Object> JSSetIterator::ValueForKind(
-    Handle<JSSetIterator> iterator, int entry_index) {
-  int kind = iterator->kind()->value();
-  // Set.prototype only has values and entries.
-  ASSERT(kind == kKindValues || kind == kKindEntries);
-
-  Isolate* isolate = iterator->GetIsolate();
-  Factory* factory = isolate->factory();
-
-  Handle<OrderedHashSet> table(
-      OrderedHashSet::cast(iterator->table()), isolate);
-  Handle<Object> value = Handle<Object>(table->get(entry_index), isolate);
-
-  if (kind == kKindEntries) {
-    Handle<FixedArray> array = factory->NewFixedArray(2);
-    array->set(0, *value);
-    array->set(1, *value);
-    return factory->NewJSArrayWithElements(array);
-  }
-
-  return value;
-}
-
-
-Handle<Object> JSMapIterator::ValueForKind(
-    Handle<JSMapIterator> iterator, int entry_index) {
-  int kind = iterator->kind()->value();
-  ASSERT(kind == kKindKeys || kind == kKindValues || kind == kKindEntries);
-
-  Isolate* isolate = iterator->GetIsolate();
-  Factory* factory = isolate->factory();
-
-  Handle<OrderedHashMap> table(
-      OrderedHashMap::cast(iterator->table()), isolate);
-
-  switch (kind) {
-    case kKindKeys:
-      return Handle<Object>(table->get(entry_index), isolate);
-
-    case kKindValues:
-      return Handle<Object>(table->get(entry_index + 1), isolate);
-
-    case kKindEntries: {
-      Handle<Object> key(table->get(entry_index), isolate);
-      Handle<Object> value(table->get(entry_index + 1), isolate);
-      Handle<FixedArray> array = factory->NewFixedArray(2);
-      array->set(0, *key);
-      array->set(1, *value);
-      return factory->NewJSArrayWithElements(array);
-    }
-  }
-
-  UNREACHABLE();
-  return factory->undefined_value();
-}
-
-
 DeclaredAccessorDescriptorIterator::DeclaredAccessorDescriptorIterator(
     DeclaredAccessorDescriptor* descriptor)
     : array_(descriptor->serialized_data()->GetDataStartAddress()),
@@ -16434,13 +15795,13 @@
 
 const DeclaredAccessorDescriptorData*
   DeclaredAccessorDescriptorIterator::Next() {
-  ASSERT(offset_ < length_);
+  DCHECK(offset_ < length_);
   uint8_t* ptr = &array_[offset_];
-  ASSERT(reinterpret_cast<uintptr_t>(ptr) % sizeof(uintptr_t) == 0);
+  DCHECK(reinterpret_cast<uintptr_t>(ptr) % sizeof(uintptr_t) == 0);
   const DeclaredAccessorDescriptorData* data =
       reinterpret_cast<const DeclaredAccessorDescriptorData*>(ptr);
   offset_ += sizeof(*data);
-  ASSERT(offset_ <= length_);
+  DCHECK(offset_ <= length_);
   return data;
 }
 
@@ -16467,7 +15828,7 @@
       MemCopy(array, previous_array, previous_length);
       array += previous_length;
     }
-    ASSERT(reinterpret_cast<uintptr_t>(array) % sizeof(uintptr_t) == 0);
+    DCHECK(reinterpret_cast<uintptr_t>(array) % sizeof(uintptr_t) == 0);
     DeclaredAccessorDescriptorData* data =
         reinterpret_cast<DeclaredAccessorDescriptorData*>(array);
     *data = descriptor;
@@ -16551,7 +15912,7 @@
     }
     index = old_break_points->length();
   }
-  ASSERT(index != kNoBreakPointInfo);
+  DCHECK(index != kNoBreakPointInfo);
 
   // Allocate new BreakPointInfo object and set the break point.
   Handle<BreakPointInfo> new_break_point_info = Handle<BreakPointInfo>::cast(
@@ -16643,7 +16004,7 @@
     return;
   }
   // If there are multiple break points shrink the array
-  ASSERT(break_point_info->break_point_objects()->IsFixedArray());
+  DCHECK(break_point_info->break_point_objects()->IsFixedArray());
   Handle<FixedArray> old_array =
       Handle<FixedArray>(
           FixedArray::cast(break_point_info->break_point_objects()));
@@ -16652,7 +16013,7 @@
   int found_count = 0;
   for (int i = 0; i < old_array->length(); i++) {
     if (old_array->get(i) == *break_point_object) {
-      ASSERT(found_count == 0);
+      DCHECK(found_count == 0);
       found_count++;
     } else {
       new_array->set(i - found_count, old_array->get(i));
@@ -16738,7 +16099,7 @@
 
 
 Object* JSDate::DoGetField(FieldIndex index) {
-  ASSERT(index != kDateValue);
+  DCHECK(index != kDateValue);
 
   DateCache* date_cache = GetIsolate()->date_cache();
 
@@ -16776,7 +16137,7 @@
 
   int time_in_day_ms = DateCache::TimeInDay(local_time_ms, days);
   if (index == kMillisecond) return Smi::FromInt(time_in_day_ms % 1000);
-  ASSERT(index == kTimeInDay);
+  DCHECK(index == kTimeInDay);
   return Smi::FromInt(time_in_day_ms);
 }
 
@@ -16784,7 +16145,7 @@
 Object* JSDate::GetUTCField(FieldIndex index,
                             double value,
                             DateCache* date_cache) {
-  ASSERT(index >= kFirstUTCField);
+  DCHECK(index >= kFirstUTCField);
 
   if (std::isnan(value)) return GetIsolate()->heap()->nan_value();
 
@@ -16803,7 +16164,7 @@
     date_cache->YearMonthDayFromDays(days, &year, &month, &day);
     if (index == kYearUTC) return Smi::FromInt(year);
     if (index == kMonthUTC) return Smi::FromInt(month);
-    ASSERT(index == kDayUTC);
+    DCHECK(index == kDayUTC);
     return Smi::FromInt(day);
   }
 
@@ -16862,7 +16223,7 @@
 
 
 void JSArrayBuffer::Neuter() {
-  ASSERT(is_external());
+  DCHECK(is_external());
   set_backing_store(NULL);
   set_byte_length(Smi::FromInt(0));
 }
@@ -16907,7 +16268,7 @@
   Handle<Map> map(typed_array->map());
   Isolate* isolate = typed_array->GetIsolate();
 
-  ASSERT(IsFixedTypedArrayElementsKind(map->elements_kind()));
+  DCHECK(IsFixedTypedArrayElementsKind(map->elements_kind()));
 
   Handle<Map> new_map = Map::TransitionElementsTo(
           map,
@@ -16927,7 +16288,7 @@
           static_cast<uint8_t*>(buffer->backing_store()));
 
   buffer->set_weak_first_view(*typed_array);
-  ASSERT(typed_array->weak_next() == isolate->heap()->undefined_value());
+  DCHECK(typed_array->weak_next() == isolate->heap()->undefined_value());
   typed_array->set_buffer(*buffer);
   JSObject::SetMapAndElements(typed_array, new_map, new_elements);
 
@@ -16938,7 +16299,7 @@
 Handle<JSArrayBuffer> JSTypedArray::GetBuffer() {
   Handle<Object> result(buffer(), GetIsolate());
   if (*result != Smi::FromInt(0)) {
-    ASSERT(IsExternalArrayElementsKind(map()->elements_kind()));
+    DCHECK(IsExternalArrayElementsKind(map()->elements_kind()));
     return Handle<JSArrayBuffer>::cast(result);
   }
   Handle<JSTypedArray> self(this);
@@ -16952,7 +16313,7 @@
 
 
 void PropertyCell::set_type(HeapType* type, WriteBarrierMode ignored) {
-  ASSERT(IsPropertyCell());
+  DCHECK(IsPropertyCell());
   set_type_raw(type, ignored);
 }
 
@@ -16961,14 +16322,9 @@
                                            Handle<Object> value) {
   Isolate* isolate = cell->GetIsolate();
   Handle<HeapType> old_type(cell->type(), isolate);
-  // TODO(2803): Do not track ConsString as constant because they cannot be
-  // embedded into code.
-  Handle<HeapType> new_type = value->IsConsString() || value->IsTheHole()
-      ? HeapType::Any(isolate) : HeapType::Constant(value, isolate);
+  Handle<HeapType> new_type = HeapType::Constant(value, isolate);
 
-  if (new_type->Is(old_type)) {
-    return old_type;
-  }
+  if (new_type->Is(old_type)) return old_type;
 
   cell->dependent_code()->DeoptimizeDependentCodeGroup(
       isolate, DependentCode::kPropertyCellChangedGroup);
@@ -17003,16 +16359,4 @@
       cell, info->zone());
 }
 
-
-const char* GetBailoutReason(BailoutReason reason) {
-  ASSERT(reason < kLastErrorMessage);
-#define ERROR_MESSAGES_TEXTS(C, T) T,
-  static const char* error_messages_[] = {
-      ERROR_MESSAGES_LIST(ERROR_MESSAGES_TEXTS)
-  };
-#undef ERROR_MESSAGES_TEXTS
-  return error_messages_[reason];
-}
-
-
 } }  // namespace v8::internal
diff --git a/src/objects.h b/src/objects.h
index 73566d8..3340350 100644
--- a/src/objects.h
+++ b/src/objects.h
@@ -7,7 +7,10 @@
 
 #include "src/allocation.h"
 #include "src/assert-scope.h"
+#include "src/bailout-reason.h"
+#include "src/base/bits.h"
 #include "src/builtins.h"
+#include "src/checks.h"
 #include "src/elements-kind.h"
 #include "src/field-index.h"
 #include "src/flags.h"
@@ -15,16 +18,18 @@
 #include "src/property-details.h"
 #include "src/smart-pointers.h"
 #include "src/unicode-inl.h"
-#if V8_TARGET_ARCH_ARM64
-#include "src/arm64/constants-arm64.h"
-#elif V8_TARGET_ARCH_ARM
-#include "src/arm/constants-arm.h"
-#elif V8_TARGET_ARCH_MIPS
-#include "src/mips/constants-mips.h"
-#endif
-#include "src/v8checks.h"
 #include "src/zone.h"
 
+#if V8_TARGET_ARCH_ARM
+#include "src/arm/constants-arm.h"  // NOLINT
+#elif V8_TARGET_ARCH_ARM64
+#include "src/arm64/constants-arm64.h"  // NOLINT
+#elif V8_TARGET_ARCH_MIPS
+#include "src/mips/constants-mips.h"  // NOLINT
+#elif V8_TARGET_ARCH_MIPS64
+#include "src/mips64/constants-mips64.h"  // NOLINT
+#endif
+
 
 //
 // Most object types in the V8 JavaScript are described in this file.
@@ -40,8 +45,9 @@
 //         - JSArrayBufferView
 //           - JSTypedArray
 //           - JSDataView
-//         - JSSet
-//         - JSMap
+//         - JSCollection
+//           - JSSet
+//           - JSMap
 //         - JSSetIterator
 //         - JSMapIterator
 //         - JSWeakCollection
@@ -74,6 +80,7 @@
 //           - OrderedHashSet
 //           - OrderedHashMap
 //         - Context
+//         - TypeFeedbackVector
 //         - JSFunctionResultCache
 //         - ScopeInfo
 //         - TransitionArray
@@ -95,7 +102,7 @@
 //         - SlicedString
 //         - ConsString
 //         - ExternalString
-//           - ExternalAsciiString
+//           - ExternalOneByteString
 //           - ExternalTwoByteString
 //         - InternalizedString
 //           - SeqInternalizedString
@@ -103,7 +110,7 @@
 //             - SeqTwoByteInternalizedString
 //           - ConsInternalizedString
 //           - ExternalInternalizedString
-//             - ExternalAsciiInternalizedString
+//             - ExternalOneByteInternalizedString
 //             - ExternalTwoByteInternalizedString
 //       - Symbol
 //     - HeapNumber
@@ -141,6 +148,8 @@
 namespace v8 {
 namespace internal {
 
+class OStream;
+
 enum KeyedAccessStoreMode {
   STANDARD_STORE,
   STORE_TRANSITION_SMI_TO_OBJECT,
@@ -167,6 +176,12 @@
 };
 
 
+enum MutableMode {
+  MUTABLE,
+  IMMUTABLE
+};
+
+
 static const int kGrowICDelta = STORE_AND_GROW_NO_TRANSITION -
     STANDARD_STORE;
 STATIC_ASSERT(STANDARD_STORE == 0);
@@ -235,12 +250,12 @@
 };
 
 
-// NormalizedMapSharingMode is used to specify whether a map may be shared
-// by different objects with normalized properties.
-enum NormalizedMapSharingMode {
-  UNIQUE_NORMALIZED_MAP,
-  SHARED_NORMALIZED_MAP
-};
+// Indicates how aggressively the prototype should be optimized. FAST_PROTOTYPE
+// will give the fastest result by tailoring the map to the prototype, but that
+// will cause polymorphism with other objects. REGULAR_PROTOTYPE is to be used
+// (at least for now) when dynamically modifying the prototype chain of an
+// object using __proto__ or Object.setPrototypeOf.
+enum PrototypeOptimizationMode { REGULAR_PROTOTYPE, FAST_PROTOTYPE };
 
 
 // Indicates whether transitions can be added to a source map or not.
@@ -293,8 +308,10 @@
 // Instance size sentinel for objects of variable size.
 const int kVariableSizeSentinel = 0;
 
+// We may store the unsigned bit field as signed Smi value and do not
+// use the sign bit.
 const int kStubMajorKeyBits = 7;
-const int kStubMinorKeyBits = kBitsPerInt - kSmiTagSize - kStubMajorKeyBits;
+const int kStubMinorKeyBits = kSmiValueSize - kStubMajorKeyBits - 1;
 
 // All Maps have a field instance_type containing a InstanceType.
 // It describes the type of the instances.
@@ -304,7 +321,7 @@
 //
 // The names of the string instance types are intended to systematically
 // mirror their encoding in the instance_type field of the map.  The default
-// encoding is considered TWO_BYTE.  It is not mentioned in the name.  ASCII
+// encoding is considered TWO_BYTE.  It is not mentioned in the name.  ONE_BYTE
 // encoding is mentioned explicitly in the name.  Likewise, the default
 // representation is considered sequential.  It is not mentioned in the
 // name.  The other representations (e.g. CONS, EXTERNAL) are explicitly
@@ -319,205 +336,172 @@
 // NOTE: Everything following JS_VALUE_TYPE is considered a
 // JSObject for GC purposes. The first four entries here have typeof
 // 'object', whereas JS_FUNCTION_TYPE has typeof 'function'.
-#define INSTANCE_TYPE_LIST(V)                                                  \
-  V(STRING_TYPE)                                                               \
-  V(ASCII_STRING_TYPE)                                                         \
-  V(CONS_STRING_TYPE)                                                          \
-  V(CONS_ASCII_STRING_TYPE)                                                    \
-  V(SLICED_STRING_TYPE)                                                        \
-  V(SLICED_ASCII_STRING_TYPE)                                                  \
-  V(EXTERNAL_STRING_TYPE)                                                      \
-  V(EXTERNAL_ASCII_STRING_TYPE)                                                \
-  V(EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE)                                   \
-  V(SHORT_EXTERNAL_STRING_TYPE)                                                \
-  V(SHORT_EXTERNAL_ASCII_STRING_TYPE)                                          \
-  V(SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE)                             \
-                                                                               \
-  V(INTERNALIZED_STRING_TYPE)                                                  \
-  V(ASCII_INTERNALIZED_STRING_TYPE)                                            \
-  V(EXTERNAL_INTERNALIZED_STRING_TYPE)                                         \
-  V(EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE)                                   \
-  V(EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE)                      \
-  V(SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE)                                   \
-  V(SHORT_EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE)                             \
-  V(SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE)                \
-                                                                               \
-  V(SYMBOL_TYPE)                                                               \
-                                                                               \
-  V(MAP_TYPE)                                                                  \
-  V(CODE_TYPE)                                                                 \
-  V(ODDBALL_TYPE)                                                              \
-  V(CELL_TYPE)                                                                 \
-  V(PROPERTY_CELL_TYPE)                                                        \
-                                                                               \
-  V(HEAP_NUMBER_TYPE)                                                          \
-  V(FOREIGN_TYPE)                                                              \
-  V(BYTE_ARRAY_TYPE)                                                           \
-  V(FREE_SPACE_TYPE)                                                           \
-  /* Note: the order of these external array */                                \
-  /* types is relied upon in */                                                \
-  /* Object::IsExternalArray(). */                                             \
-  V(EXTERNAL_INT8_ARRAY_TYPE)                                                  \
-  V(EXTERNAL_UINT8_ARRAY_TYPE)                                                 \
-  V(EXTERNAL_INT16_ARRAY_TYPE)                                                 \
-  V(EXTERNAL_UINT16_ARRAY_TYPE)                                                \
-  V(EXTERNAL_INT32_ARRAY_TYPE)                                                 \
-  V(EXTERNAL_UINT32_ARRAY_TYPE)                                                \
-  V(EXTERNAL_FLOAT32_ARRAY_TYPE)                                               \
-  V(EXTERNAL_FLOAT64_ARRAY_TYPE)                                               \
-  V(EXTERNAL_UINT8_CLAMPED_ARRAY_TYPE)                                         \
-                                                                               \
-  V(FIXED_INT8_ARRAY_TYPE)                                                     \
-  V(FIXED_UINT8_ARRAY_TYPE)                                                    \
-  V(FIXED_INT16_ARRAY_TYPE)                                                    \
-  V(FIXED_UINT16_ARRAY_TYPE)                                                   \
-  V(FIXED_INT32_ARRAY_TYPE)                                                    \
-  V(FIXED_UINT32_ARRAY_TYPE)                                                   \
-  V(FIXED_FLOAT32_ARRAY_TYPE)                                                  \
-  V(FIXED_FLOAT64_ARRAY_TYPE)                                                  \
-  V(FIXED_UINT8_CLAMPED_ARRAY_TYPE)                                            \
-                                                                               \
-  V(FILLER_TYPE)                                                               \
-                                                                               \
-  V(DECLARED_ACCESSOR_DESCRIPTOR_TYPE)                                         \
-  V(DECLARED_ACCESSOR_INFO_TYPE)                                               \
-  V(EXECUTABLE_ACCESSOR_INFO_TYPE)                                             \
-  V(ACCESSOR_PAIR_TYPE)                                                        \
-  V(ACCESS_CHECK_INFO_TYPE)                                                    \
-  V(INTERCEPTOR_INFO_TYPE)                                                     \
-  V(CALL_HANDLER_INFO_TYPE)                                                    \
-  V(FUNCTION_TEMPLATE_INFO_TYPE)                                               \
-  V(OBJECT_TEMPLATE_INFO_TYPE)                                                 \
-  V(SIGNATURE_INFO_TYPE)                                                       \
-  V(TYPE_SWITCH_INFO_TYPE)                                                     \
-  V(ALLOCATION_MEMENTO_TYPE)                                                   \
-  V(ALLOCATION_SITE_TYPE)                                                      \
-  V(SCRIPT_TYPE)                                                               \
-  V(CODE_CACHE_TYPE)                                                           \
-  V(POLYMORPHIC_CODE_CACHE_TYPE)                                               \
-  V(TYPE_FEEDBACK_INFO_TYPE)                                                   \
-  V(ALIASED_ARGUMENTS_ENTRY_TYPE)                                              \
-  V(BOX_TYPE)                                                                  \
-                                                                               \
-  V(FIXED_ARRAY_TYPE)                                                          \
-  V(FIXED_DOUBLE_ARRAY_TYPE)                                                   \
-  V(CONSTANT_POOL_ARRAY_TYPE)                                                  \
-  V(SHARED_FUNCTION_INFO_TYPE)                                                 \
-                                                                               \
-  V(JS_MESSAGE_OBJECT_TYPE)                                                    \
-                                                                               \
-  V(JS_VALUE_TYPE)                                                             \
-  V(JS_DATE_TYPE)                                                              \
-  V(JS_OBJECT_TYPE)                                                            \
-  V(JS_CONTEXT_EXTENSION_OBJECT_TYPE)                                          \
-  V(JS_GENERATOR_OBJECT_TYPE)                                                  \
-  V(JS_MODULE_TYPE)                                                            \
-  V(JS_GLOBAL_OBJECT_TYPE)                                                     \
-  V(JS_BUILTINS_OBJECT_TYPE)                                                   \
-  V(JS_GLOBAL_PROXY_TYPE)                                                      \
-  V(JS_ARRAY_TYPE)                                                             \
-  V(JS_ARRAY_BUFFER_TYPE)                                                      \
-  V(JS_TYPED_ARRAY_TYPE)                                                       \
-  V(JS_DATA_VIEW_TYPE)                                                         \
-  V(JS_PROXY_TYPE)                                                             \
-  V(JS_SET_TYPE)                                                               \
-  V(JS_MAP_TYPE)                                                               \
-  V(JS_SET_ITERATOR_TYPE)                                                      \
-  V(JS_MAP_ITERATOR_TYPE)                                                      \
-  V(JS_WEAK_MAP_TYPE)                                                          \
-  V(JS_WEAK_SET_TYPE)                                                          \
-  V(JS_REGEXP_TYPE)                                                            \
-                                                                               \
-  V(JS_FUNCTION_TYPE)                                                          \
-  V(JS_FUNCTION_PROXY_TYPE)                                                    \
-  V(DEBUG_INFO_TYPE)                                                           \
+#define INSTANCE_TYPE_LIST(V)                                   \
+  V(STRING_TYPE)                                                \
+  V(ONE_BYTE_STRING_TYPE)                                       \
+  V(CONS_STRING_TYPE)                                           \
+  V(CONS_ONE_BYTE_STRING_TYPE)                                  \
+  V(SLICED_STRING_TYPE)                                         \
+  V(SLICED_ONE_BYTE_STRING_TYPE)                                \
+  V(EXTERNAL_STRING_TYPE)                                       \
+  V(EXTERNAL_ONE_BYTE_STRING_TYPE)                              \
+  V(EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE)                    \
+  V(SHORT_EXTERNAL_STRING_TYPE)                                 \
+  V(SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE)                        \
+  V(SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE)              \
+                                                                \
+  V(INTERNALIZED_STRING_TYPE)                                   \
+  V(ONE_BYTE_INTERNALIZED_STRING_TYPE)                          \
+  V(EXTERNAL_INTERNALIZED_STRING_TYPE)                          \
+  V(EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE)                 \
+  V(EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE)       \
+  V(SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE)                    \
+  V(SHORT_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE)           \
+  V(SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE) \
+                                                                \
+  V(SYMBOL_TYPE)                                                \
+                                                                \
+  V(MAP_TYPE)                                                   \
+  V(CODE_TYPE)                                                  \
+  V(ODDBALL_TYPE)                                               \
+  V(CELL_TYPE)                                                  \
+  V(PROPERTY_CELL_TYPE)                                         \
+                                                                \
+  V(HEAP_NUMBER_TYPE)                                           \
+  V(MUTABLE_HEAP_NUMBER_TYPE)                                   \
+  V(FOREIGN_TYPE)                                               \
+  V(BYTE_ARRAY_TYPE)                                            \
+  V(FREE_SPACE_TYPE)                                            \
+  /* Note: the order of these external array */                 \
+  /* types is relied upon in */                                 \
+  /* Object::IsExternalArray(). */                              \
+  V(EXTERNAL_INT8_ARRAY_TYPE)                                   \
+  V(EXTERNAL_UINT8_ARRAY_TYPE)                                  \
+  V(EXTERNAL_INT16_ARRAY_TYPE)                                  \
+  V(EXTERNAL_UINT16_ARRAY_TYPE)                                 \
+  V(EXTERNAL_INT32_ARRAY_TYPE)                                  \
+  V(EXTERNAL_UINT32_ARRAY_TYPE)                                 \
+  V(EXTERNAL_FLOAT32_ARRAY_TYPE)                                \
+  V(EXTERNAL_FLOAT64_ARRAY_TYPE)                                \
+  V(EXTERNAL_UINT8_CLAMPED_ARRAY_TYPE)                          \
+                                                                \
+  V(FIXED_INT8_ARRAY_TYPE)                                      \
+  V(FIXED_UINT8_ARRAY_TYPE)                                     \
+  V(FIXED_INT16_ARRAY_TYPE)                                     \
+  V(FIXED_UINT16_ARRAY_TYPE)                                    \
+  V(FIXED_INT32_ARRAY_TYPE)                                     \
+  V(FIXED_UINT32_ARRAY_TYPE)                                    \
+  V(FIXED_FLOAT32_ARRAY_TYPE)                                   \
+  V(FIXED_FLOAT64_ARRAY_TYPE)                                   \
+  V(FIXED_UINT8_CLAMPED_ARRAY_TYPE)                             \
+                                                                \
+  V(FILLER_TYPE)                                                \
+                                                                \
+  V(DECLARED_ACCESSOR_DESCRIPTOR_TYPE)                          \
+  V(DECLARED_ACCESSOR_INFO_TYPE)                                \
+  V(EXECUTABLE_ACCESSOR_INFO_TYPE)                              \
+  V(ACCESSOR_PAIR_TYPE)                                         \
+  V(ACCESS_CHECK_INFO_TYPE)                                     \
+  V(INTERCEPTOR_INFO_TYPE)                                      \
+  V(CALL_HANDLER_INFO_TYPE)                                     \
+  V(FUNCTION_TEMPLATE_INFO_TYPE)                                \
+  V(OBJECT_TEMPLATE_INFO_TYPE)                                  \
+  V(SIGNATURE_INFO_TYPE)                                        \
+  V(TYPE_SWITCH_INFO_TYPE)                                      \
+  V(ALLOCATION_MEMENTO_TYPE)                                    \
+  V(ALLOCATION_SITE_TYPE)                                       \
+  V(SCRIPT_TYPE)                                                \
+  V(CODE_CACHE_TYPE)                                            \
+  V(POLYMORPHIC_CODE_CACHE_TYPE)                                \
+  V(TYPE_FEEDBACK_INFO_TYPE)                                    \
+  V(ALIASED_ARGUMENTS_ENTRY_TYPE)                               \
+  V(BOX_TYPE)                                                   \
+                                                                \
+  V(FIXED_ARRAY_TYPE)                                           \
+  V(FIXED_DOUBLE_ARRAY_TYPE)                                    \
+  V(CONSTANT_POOL_ARRAY_TYPE)                                   \
+  V(SHARED_FUNCTION_INFO_TYPE)                                  \
+                                                                \
+  V(JS_MESSAGE_OBJECT_TYPE)                                     \
+                                                                \
+  V(JS_VALUE_TYPE)                                              \
+  V(JS_DATE_TYPE)                                               \
+  V(JS_OBJECT_TYPE)                                             \
+  V(JS_CONTEXT_EXTENSION_OBJECT_TYPE)                           \
+  V(JS_GENERATOR_OBJECT_TYPE)                                   \
+  V(JS_MODULE_TYPE)                                             \
+  V(JS_GLOBAL_OBJECT_TYPE)                                      \
+  V(JS_BUILTINS_OBJECT_TYPE)                                    \
+  V(JS_GLOBAL_PROXY_TYPE)                                       \
+  V(JS_ARRAY_TYPE)                                              \
+  V(JS_ARRAY_BUFFER_TYPE)                                       \
+  V(JS_TYPED_ARRAY_TYPE)                                        \
+  V(JS_DATA_VIEW_TYPE)                                          \
+  V(JS_PROXY_TYPE)                                              \
+  V(JS_SET_TYPE)                                                \
+  V(JS_MAP_TYPE)                                                \
+  V(JS_SET_ITERATOR_TYPE)                                       \
+  V(JS_MAP_ITERATOR_TYPE)                                       \
+  V(JS_WEAK_MAP_TYPE)                                           \
+  V(JS_WEAK_SET_TYPE)                                           \
+  V(JS_REGEXP_TYPE)                                             \
+                                                                \
+  V(JS_FUNCTION_TYPE)                                           \
+  V(JS_FUNCTION_PROXY_TYPE)                                     \
+  V(DEBUG_INFO_TYPE)                                            \
   V(BREAK_POINT_INFO_TYPE)
 
 
 // Since string types are not consecutive, this macro is used to
 // iterate over them.
-#define STRING_TYPE_LIST(V)                                                    \
-  V(STRING_TYPE,                                                               \
-    kVariableSizeSentinel,                                                     \
-    string,                                                                    \
-    String)                                                                    \
-  V(ASCII_STRING_TYPE,                                                         \
-    kVariableSizeSentinel,                                                     \
-    ascii_string,                                                              \
-    AsciiString)                                                               \
-  V(CONS_STRING_TYPE,                                                          \
-    ConsString::kSize,                                                         \
-    cons_string,                                                               \
-    ConsString)                                                                \
-  V(CONS_ASCII_STRING_TYPE,                                                    \
-    ConsString::kSize,                                                         \
-    cons_ascii_string,                                                         \
-    ConsAsciiString)                                                           \
-  V(SLICED_STRING_TYPE,                                                        \
-    SlicedString::kSize,                                                       \
-    sliced_string,                                                             \
-    SlicedString)                                                              \
-  V(SLICED_ASCII_STRING_TYPE,                                                  \
-    SlicedString::kSize,                                                       \
-    sliced_ascii_string,                                                       \
-    SlicedAsciiString)                                                         \
-  V(EXTERNAL_STRING_TYPE,                                                      \
-    ExternalTwoByteString::kSize,                                              \
-    external_string,                                                           \
-    ExternalString)                                                            \
-  V(EXTERNAL_ASCII_STRING_TYPE,                                                \
-    ExternalAsciiString::kSize,                                                \
-    external_ascii_string,                                                     \
-    ExternalAsciiString)                                                       \
-  V(EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE,                                   \
-    ExternalTwoByteString::kSize,                                              \
-    external_string_with_one_byte_data,                                        \
-    ExternalStringWithOneByteData)                                             \
-  V(SHORT_EXTERNAL_STRING_TYPE,                                                \
-    ExternalTwoByteString::kShortSize,                                         \
-    short_external_string,                                                     \
-    ShortExternalString)                                                       \
-  V(SHORT_EXTERNAL_ASCII_STRING_TYPE,                                          \
-    ExternalAsciiString::kShortSize,                                           \
-    short_external_ascii_string,                                               \
-    ShortExternalAsciiString)                                                  \
-  V(SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE,                             \
-    ExternalTwoByteString::kShortSize,                                         \
-    short_external_string_with_one_byte_data,                                  \
-    ShortExternalStringWithOneByteData)                                        \
-                                                                               \
-  V(INTERNALIZED_STRING_TYPE,                                                  \
-    kVariableSizeSentinel,                                                     \
-    internalized_string,                                                       \
-    InternalizedString)                                                        \
-  V(ASCII_INTERNALIZED_STRING_TYPE,                                            \
-    kVariableSizeSentinel,                                                     \
-    ascii_internalized_string,                                                 \
-    AsciiInternalizedString)                                                   \
-  V(EXTERNAL_INTERNALIZED_STRING_TYPE,                                         \
-    ExternalTwoByteString::kSize,                                              \
-    external_internalized_string,                                              \
-    ExternalInternalizedString)                                                \
-  V(EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE,                                   \
-    ExternalAsciiString::kSize,                                                \
-    external_ascii_internalized_string,                                        \
-    ExternalAsciiInternalizedString)                                           \
-  V(EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE,                      \
-    ExternalTwoByteString::kSize,                                              \
-    external_internalized_string_with_one_byte_data,                           \
-    ExternalInternalizedStringWithOneByteData)                                 \
-  V(SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE,                                   \
-    ExternalTwoByteString::kShortSize,                                         \
-    short_external_internalized_string,                                        \
-    ShortExternalInternalizedString)                                           \
-  V(SHORT_EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE,                             \
-    ExternalAsciiString::kShortSize,                                           \
-    short_external_ascii_internalized_string,                                  \
-    ShortExternalAsciiInternalizedString)                                      \
-  V(SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE,                \
-    ExternalTwoByteString::kShortSize,                                         \
-    short_external_internalized_string_with_one_byte_data,                     \
-    ShortExternalInternalizedStringWithOneByteData)                            \
+#define STRING_TYPE_LIST(V)                                                   \
+  V(STRING_TYPE, kVariableSizeSentinel, string, String)                       \
+  V(ONE_BYTE_STRING_TYPE, kVariableSizeSentinel, one_byte_string,             \
+    OneByteString)                                                            \
+  V(CONS_STRING_TYPE, ConsString::kSize, cons_string, ConsString)             \
+  V(CONS_ONE_BYTE_STRING_TYPE, ConsString::kSize, cons_one_byte_string,       \
+    ConsOneByteString)                                                        \
+  V(SLICED_STRING_TYPE, SlicedString::kSize, sliced_string, SlicedString)     \
+  V(SLICED_ONE_BYTE_STRING_TYPE, SlicedString::kSize, sliced_one_byte_string, \
+    SlicedOneByteString)                                                      \
+  V(EXTERNAL_STRING_TYPE, ExternalTwoByteString::kSize, external_string,      \
+    ExternalString)                                                           \
+  V(EXTERNAL_ONE_BYTE_STRING_TYPE, ExternalOneByteString::kSize,              \
+    external_one_byte_string, ExternalOneByteString)                          \
+  V(EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE, ExternalTwoByteString::kSize,    \
+    external_string_with_one_byte_data, ExternalStringWithOneByteData)        \
+  V(SHORT_EXTERNAL_STRING_TYPE, ExternalTwoByteString::kShortSize,            \
+    short_external_string, ShortExternalString)                               \
+  V(SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE, ExternalOneByteString::kShortSize,   \
+    short_external_one_byte_string, ShortExternalOneByteString)               \
+  V(SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE,                            \
+    ExternalTwoByteString::kShortSize,                                        \
+    short_external_string_with_one_byte_data,                                 \
+    ShortExternalStringWithOneByteData)                                       \
+                                                                              \
+  V(INTERNALIZED_STRING_TYPE, kVariableSizeSentinel, internalized_string,     \
+    InternalizedString)                                                       \
+  V(ONE_BYTE_INTERNALIZED_STRING_TYPE, kVariableSizeSentinel,                 \
+    one_byte_internalized_string, OneByteInternalizedString)                  \
+  V(EXTERNAL_INTERNALIZED_STRING_TYPE, ExternalTwoByteString::kSize,          \
+    external_internalized_string, ExternalInternalizedString)                 \
+  V(EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE, ExternalOneByteString::kSize, \
+    external_one_byte_internalized_string, ExternalOneByteInternalizedString) \
+  V(EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE,                     \
+    ExternalTwoByteString::kSize,                                             \
+    external_internalized_string_with_one_byte_data,                          \
+    ExternalInternalizedStringWithOneByteData)                                \
+  V(SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE,                                  \
+    ExternalTwoByteString::kShortSize, short_external_internalized_string,    \
+    ShortExternalInternalizedString)                                          \
+  V(SHORT_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE,                         \
+    ExternalOneByteString::kShortSize,                                        \
+    short_external_one_byte_internalized_string,                              \
+    ShortExternalOneByteInternalizedString)                                   \
+  V(SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE,               \
+    ExternalTwoByteString::kShortSize,                                        \
+    short_external_internalized_string_with_one_byte_data,                    \
+    ShortExternalInternalizedStringWithOneByteData)
 
 // A struct is a simple object a set of object-valued fields.  Including an
 // object type in this causes the compiler to generate most of the boilerplate
@@ -607,64 +591,68 @@
 
 
 // A ConsString with an empty string as the right side is a candidate
-// for being shortcut by the garbage collector unless it is internalized.
-// It's not common to have non-flat internalized strings, so we do not
-// shortcut them thereby avoiding turning internalized strings into strings.
-// See heap.cc and mark-compact.cc.
+// for being shortcut by the garbage collector. We don't allocate any
+// non-flat internalized strings, so we do not shortcut them thereby
+// avoiding turning internalized strings into strings. The bit-masks
+// below contain the internalized bit as additional safety.
+// See heap.cc, mark-compact.cc and objects-visiting.cc.
 const uint32_t kShortcutTypeMask =
     kIsNotStringMask |
     kIsNotInternalizedMask |
     kStringRepresentationMask;
 const uint32_t kShortcutTypeTag = kConsStringTag | kNotInternalizedTag;
 
+static inline bool IsShortcutCandidate(int type) {
+  return ((type & kShortcutTypeMask) == kShortcutTypeTag);
+}
+
 
 enum InstanceType {
   // String types.
-  INTERNALIZED_STRING_TYPE = kTwoByteStringTag | kSeqStringTag
-      | kInternalizedTag,
-  ASCII_INTERNALIZED_STRING_TYPE = kOneByteStringTag | kSeqStringTag
-      | kInternalizedTag,
-  EXTERNAL_INTERNALIZED_STRING_TYPE = kTwoByteStringTag | kExternalStringTag
-      | kInternalizedTag,
-  EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE = kOneByteStringTag
-      | kExternalStringTag | kInternalizedTag,
+  INTERNALIZED_STRING_TYPE =
+      kTwoByteStringTag | kSeqStringTag | kInternalizedTag,
+  ONE_BYTE_INTERNALIZED_STRING_TYPE =
+      kOneByteStringTag | kSeqStringTag | kInternalizedTag,
+  EXTERNAL_INTERNALIZED_STRING_TYPE =
+      kTwoByteStringTag | kExternalStringTag | kInternalizedTag,
+  EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE =
+      kOneByteStringTag | kExternalStringTag | kInternalizedTag,
   EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE =
-      EXTERNAL_INTERNALIZED_STRING_TYPE | kOneByteDataHintTag
-      | kInternalizedTag,
-  SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE =
-      EXTERNAL_INTERNALIZED_STRING_TYPE | kShortExternalStringTag
-      | kInternalizedTag,
-  SHORT_EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE =
-      EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE | kShortExternalStringTag
-      | kInternalizedTag,
+      EXTERNAL_INTERNALIZED_STRING_TYPE | kOneByteDataHintTag |
+      kInternalizedTag,
+  SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE = EXTERNAL_INTERNALIZED_STRING_TYPE |
+                                            kShortExternalStringTag |
+                                            kInternalizedTag,
+  SHORT_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE =
+      EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE | kShortExternalStringTag |
+      kInternalizedTag,
   SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE =
-      EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE
-      | kShortExternalStringTag | kInternalizedTag,
-
+      EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE |
+      kShortExternalStringTag | kInternalizedTag,
   STRING_TYPE = INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
-  ASCII_STRING_TYPE = ASCII_INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
+  ONE_BYTE_STRING_TYPE =
+      ONE_BYTE_INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
   CONS_STRING_TYPE = kTwoByteStringTag | kConsStringTag | kNotInternalizedTag,
-  CONS_ASCII_STRING_TYPE =
+  CONS_ONE_BYTE_STRING_TYPE =
       kOneByteStringTag | kConsStringTag | kNotInternalizedTag,
-
   SLICED_STRING_TYPE =
       kTwoByteStringTag | kSlicedStringTag | kNotInternalizedTag,
-  SLICED_ASCII_STRING_TYPE =
+  SLICED_ONE_BYTE_STRING_TYPE =
       kOneByteStringTag | kSlicedStringTag | kNotInternalizedTag,
   EXTERNAL_STRING_TYPE =
-  EXTERNAL_INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
-  EXTERNAL_ASCII_STRING_TYPE =
-  EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
+      EXTERNAL_INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
+  EXTERNAL_ONE_BYTE_STRING_TYPE =
+      EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
   EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE =
-      EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE
-      | kNotInternalizedTag,
+      EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE |
+      kNotInternalizedTag,
   SHORT_EXTERNAL_STRING_TYPE =
       SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
-  SHORT_EXTERNAL_ASCII_STRING_TYPE =
-      SHORT_EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
+  SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE =
+      SHORT_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE | kNotInternalizedTag,
   SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE =
-      SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE
-      | kNotInternalizedTag,
+      SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE |
+      kNotInternalizedTag,
 
   // Non-string names
   SYMBOL_TYPE = kNotStringTag,  // FIRST_NONSTRING_TYPE, LAST_NAME_TYPE
@@ -679,10 +667,10 @@
   // "Data", objects that cannot contain non-map-word pointers to heap
   // objects.
   HEAP_NUMBER_TYPE,
+  MUTABLE_HEAP_NUMBER_TYPE,
   FOREIGN_TYPE,
   BYTE_ARRAY_TYPE,
   FREE_SPACE_TYPE,
-
   EXTERNAL_INT8_ARRAY_TYPE,  // FIRST_EXTERNAL_ARRAY_TYPE
   EXTERNAL_UINT8_ARRAY_TYPE,
   EXTERNAL_INT16_ARRAY_TYPE,
@@ -692,8 +680,7 @@
   EXTERNAL_FLOAT32_ARRAY_TYPE,
   EXTERNAL_FLOAT64_ARRAY_TYPE,
   EXTERNAL_UINT8_CLAMPED_ARRAY_TYPE,  // LAST_EXTERNAL_ARRAY_TYPE
-
-  FIXED_INT8_ARRAY_TYPE,  // FIRST_FIXED_TYPED_ARRAY_TYPE
+  FIXED_INT8_ARRAY_TYPE,              // FIRST_FIXED_TYPED_ARRAY_TYPE
   FIXED_UINT8_ARRAY_TYPE,
   FIXED_INT16_ARRAY_TYPE,
   FIXED_UINT16_ARRAY_TYPE,
@@ -702,7 +689,6 @@
   FIXED_FLOAT32_ARRAY_TYPE,
   FIXED_FLOAT64_ARRAY_TYPE,
   FIXED_UINT8_CLAMPED_ARRAY_TYPE,  // LAST_FIXED_TYPED_ARRAY_TYPE
-
   FIXED_DOUBLE_ARRAY_TYPE,
   FILLER_TYPE,  // LAST_DATA_TYPE
 
@@ -728,7 +714,6 @@
   BOX_TYPE,
   DEBUG_INFO_TYPE,
   BREAK_POINT_INFO_TYPE,
-
   FIXED_ARRAY_TYPE,
   CONSTANT_POOL_ARRAY_TYPE,
   SHARED_FUNCTION_INFO_TYPE,
@@ -739,9 +724,8 @@
   // compares for checking the JS_RECEIVER/SPEC_OBJECT range and the
   // NONCALLABLE_JS_OBJECT range.
   JS_FUNCTION_PROXY_TYPE,  // FIRST_JS_RECEIVER_TYPE, FIRST_JS_PROXY_TYPE
-  JS_PROXY_TYPE,  // LAST_JS_PROXY_TYPE
-
-  JS_VALUE_TYPE,  // FIRST_JS_OBJECT_TYPE
+  JS_PROXY_TYPE,           // LAST_JS_PROXY_TYPE
+  JS_VALUE_TYPE,           // FIRST_JS_OBJECT_TYPE
   JS_MESSAGE_OBJECT_TYPE,
   JS_DATE_TYPE,
   JS_OBJECT_TYPE,
@@ -761,9 +745,7 @@
   JS_MAP_ITERATOR_TYPE,
   JS_WEAK_MAP_TYPE,
   JS_WEAK_SET_TYPE,
-
   JS_REGEXP_TYPE,
-
   JS_FUNCTION_TYPE,  // LAST_JS_OBJECT_TYPE, LAST_JS_RECEIVER_TYPE
 
   // Pseudo-types
@@ -844,15 +826,21 @@
 
 
 #define DECL_BOOLEAN_ACCESSORS(name)   \
-  inline bool name();                  \
+  inline bool name() const;            \
   inline void set_##name(bool value);  \
 
 
 #define DECL_ACCESSORS(name, type)                                      \
-  inline type* name();                                                  \
+  inline type* name() const;                                            \
   inline void set_##name(type* value,                                   \
                          WriteBarrierMode mode = UPDATE_WRITE_BARRIER); \
 
+
+#define DECLARE_CAST(type)                              \
+  INLINE(static type* cast(Object* object));            \
+  INLINE(static const type* cast(const Object* object));
+
+
 class AccessorPair;
 class AllocationSite;
 class AllocationSiteCreationContext;
@@ -864,6 +852,7 @@
 class ObjectVisitor;
 class LookupIterator;
 class StringStream;
+class TypeFeedbackVector;
 // We cannot just say "class HeapType;" if it is created from a template... =8-?
 template<class> class TypeImpl;
 struct HeapTypeConfig;
@@ -880,468 +869,121 @@
 #endif
 
 #ifdef OBJECT_PRINT
-#define DECLARE_PRINTER(Name) void Name##Print(FILE* out = stdout);
+#define DECLARE_PRINTER(Name) void Name##Print(OStream& os);  // NOLINT
 #else
 #define DECLARE_PRINTER(Name)
 #endif
 
 
-#define OBJECT_TYPE_LIST(V)                    \
-  V(Smi)                                       \
-  V(HeapObject)                                \
-  V(Number)                                    \
+#define OBJECT_TYPE_LIST(V) \
+  V(Smi)                    \
+  V(HeapObject)             \
+  V(Number)
 
-#define HEAP_OBJECT_TYPE_LIST(V)               \
-  V(HeapNumber)                                \
-  V(Name)                                      \
-  V(UniqueName)                                \
-  V(String)                                    \
-  V(SeqString)                                 \
-  V(ExternalString)                            \
-  V(ConsString)                                \
-  V(SlicedString)                              \
-  V(ExternalTwoByteString)                     \
-  V(ExternalAsciiString)                       \
-  V(SeqTwoByteString)                          \
-  V(SeqOneByteString)                          \
-  V(InternalizedString)                        \
-  V(Symbol)                                    \
-                                               \
-  V(ExternalArray)                             \
-  V(ExternalInt8Array)                         \
-  V(ExternalUint8Array)                        \
-  V(ExternalInt16Array)                        \
-  V(ExternalUint16Array)                       \
-  V(ExternalInt32Array)                        \
-  V(ExternalUint32Array)                       \
-  V(ExternalFloat32Array)                      \
-  V(ExternalFloat64Array)                      \
-  V(ExternalUint8ClampedArray)                 \
-  V(FixedTypedArrayBase)                       \
-  V(FixedUint8Array)                           \
-  V(FixedInt8Array)                            \
-  V(FixedUint16Array)                          \
-  V(FixedInt16Array)                           \
-  V(FixedUint32Array)                          \
-  V(FixedInt32Array)                           \
-  V(FixedFloat32Array)                         \
-  V(FixedFloat64Array)                         \
-  V(FixedUint8ClampedArray)                    \
-  V(ByteArray)                                 \
-  V(FreeSpace)                                 \
-  V(JSReceiver)                                \
-  V(JSObject)                                  \
-  V(JSContextExtensionObject)                  \
-  V(JSGeneratorObject)                         \
-  V(JSModule)                                  \
-  V(Map)                                       \
-  V(DescriptorArray)                           \
-  V(TransitionArray)                           \
-  V(DeoptimizationInputData)                   \
-  V(DeoptimizationOutputData)                  \
-  V(DependentCode)                             \
-  V(FixedArray)                                \
-  V(FixedDoubleArray)                          \
-  V(ConstantPoolArray)                         \
-  V(Context)                                   \
-  V(NativeContext)                             \
-  V(ScopeInfo)                                 \
-  V(JSFunction)                                \
-  V(Code)                                      \
-  V(Oddball)                                   \
-  V(SharedFunctionInfo)                        \
-  V(JSValue)                                   \
-  V(JSDate)                                    \
-  V(JSMessageObject)                           \
-  V(StringWrapper)                             \
-  V(Foreign)                                   \
-  V(Boolean)                                   \
-  V(JSArray)                                   \
-  V(JSArrayBuffer)                             \
-  V(JSArrayBufferView)                         \
-  V(JSTypedArray)                              \
-  V(JSDataView)                                \
-  V(JSProxy)                                   \
-  V(JSFunctionProxy)                           \
-  V(JSSet)                                     \
-  V(JSMap)                                     \
-  V(JSSetIterator)                             \
-  V(JSMapIterator)                             \
-  V(JSWeakCollection)                          \
-  V(JSWeakMap)                                 \
-  V(JSWeakSet)                                 \
-  V(JSRegExp)                                  \
-  V(HashTable)                                 \
-  V(Dictionary)                                \
-  V(StringTable)                               \
-  V(JSFunctionResultCache)                     \
-  V(NormalizedMapCache)                        \
-  V(CompilationCacheTable)                     \
-  V(CodeCacheHashTable)                        \
-  V(PolymorphicCodeCacheHashTable)             \
-  V(MapCache)                                  \
-  V(Primitive)                                 \
-  V(GlobalObject)                              \
-  V(JSGlobalObject)                            \
-  V(JSBuiltinsObject)                          \
-  V(JSGlobalProxy)                             \
-  V(UndetectableObject)                        \
-  V(AccessCheckNeeded)                         \
-  V(Cell)                                      \
-  V(PropertyCell)                              \
-  V(ObjectHashTable)                           \
-  V(WeakHashTable)                             \
+#define HEAP_OBJECT_TYPE_LIST(V)   \
+  V(HeapNumber)                    \
+  V(MutableHeapNumber)             \
+  V(Name)                          \
+  V(UniqueName)                    \
+  V(String)                        \
+  V(SeqString)                     \
+  V(ExternalString)                \
+  V(ConsString)                    \
+  V(SlicedString)                  \
+  V(ExternalTwoByteString)         \
+  V(ExternalOneByteString)         \
+  V(SeqTwoByteString)              \
+  V(SeqOneByteString)              \
+  V(InternalizedString)            \
+  V(Symbol)                        \
+                                   \
+  V(ExternalArray)                 \
+  V(ExternalInt8Array)             \
+  V(ExternalUint8Array)            \
+  V(ExternalInt16Array)            \
+  V(ExternalUint16Array)           \
+  V(ExternalInt32Array)            \
+  V(ExternalUint32Array)           \
+  V(ExternalFloat32Array)          \
+  V(ExternalFloat64Array)          \
+  V(ExternalUint8ClampedArray)     \
+  V(FixedTypedArrayBase)           \
+  V(FixedUint8Array)               \
+  V(FixedInt8Array)                \
+  V(FixedUint16Array)              \
+  V(FixedInt16Array)               \
+  V(FixedUint32Array)              \
+  V(FixedInt32Array)               \
+  V(FixedFloat32Array)             \
+  V(FixedFloat64Array)             \
+  V(FixedUint8ClampedArray)        \
+  V(ByteArray)                     \
+  V(FreeSpace)                     \
+  V(JSReceiver)                    \
+  V(JSObject)                      \
+  V(JSContextExtensionObject)      \
+  V(JSGeneratorObject)             \
+  V(JSModule)                      \
+  V(Map)                           \
+  V(DescriptorArray)               \
+  V(TransitionArray)               \
+  V(TypeFeedbackVector)            \
+  V(DeoptimizationInputData)       \
+  V(DeoptimizationOutputData)      \
+  V(DependentCode)                 \
+  V(FixedArray)                    \
+  V(FixedDoubleArray)              \
+  V(ConstantPoolArray)             \
+  V(Context)                       \
+  V(NativeContext)                 \
+  V(ScopeInfo)                     \
+  V(JSFunction)                    \
+  V(Code)                          \
+  V(Oddball)                       \
+  V(SharedFunctionInfo)            \
+  V(JSValue)                       \
+  V(JSDate)                        \
+  V(JSMessageObject)               \
+  V(StringWrapper)                 \
+  V(Foreign)                       \
+  V(Boolean)                       \
+  V(JSArray)                       \
+  V(JSArrayBuffer)                 \
+  V(JSArrayBufferView)             \
+  V(JSTypedArray)                  \
+  V(JSDataView)                    \
+  V(JSProxy)                       \
+  V(JSFunctionProxy)               \
+  V(JSSet)                         \
+  V(JSMap)                         \
+  V(JSSetIterator)                 \
+  V(JSMapIterator)                 \
+  V(JSWeakCollection)              \
+  V(JSWeakMap)                     \
+  V(JSWeakSet)                     \
+  V(JSRegExp)                      \
+  V(HashTable)                     \
+  V(Dictionary)                    \
+  V(StringTable)                   \
+  V(JSFunctionResultCache)         \
+  V(NormalizedMapCache)            \
+  V(CompilationCacheTable)         \
+  V(CodeCacheHashTable)            \
+  V(PolymorphicCodeCacheHashTable) \
+  V(MapCache)                      \
+  V(Primitive)                     \
+  V(GlobalObject)                  \
+  V(JSGlobalObject)                \
+  V(JSBuiltinsObject)              \
+  V(JSGlobalProxy)                 \
+  V(UndetectableObject)            \
+  V(AccessCheckNeeded)             \
+  V(Cell)                          \
+  V(PropertyCell)                  \
+  V(ObjectHashTable)               \
+  V(WeakHashTable)                 \
   V(OrderedHashTable)
 
-
-#define ERROR_MESSAGES_LIST(V) \
-  V(kNoReason, "no reason")                                                   \
-                                                                              \
-  V(k32BitValueInRegisterIsNotZeroExtended,                                   \
-    "32 bit value in register is not zero-extended")                          \
-  V(kAlignmentMarkerExpected, "Alignment marker expected")                    \
-  V(kAllocationIsNotDoubleAligned, "Allocation is not double aligned")        \
-  V(kAPICallReturnedInvalidObject, "API call returned invalid object")        \
-  V(kArgumentsObjectValueInATestContext,                                      \
-    "Arguments object value in a test context")                               \
-  V(kArrayBoilerplateCreationFailed, "Array boilerplate creation failed")     \
-  V(kArrayIndexConstantValueTooBig, "Array index constant value too big")     \
-  V(kAssignmentToArguments, "Assignment to arguments")                        \
-  V(kAssignmentToLetVariableBeforeInitialization,                             \
-    "Assignment to let variable before initialization")                       \
-  V(kAssignmentToLOOKUPVariable, "Assignment to LOOKUP variable")             \
-  V(kAssignmentToParameterFunctionUsesArgumentsObject,                        \
-    "Assignment to parameter, function uses arguments object")                \
-  V(kAssignmentToParameterInArgumentsObject,                                  \
-    "Assignment to parameter in arguments object")                            \
-  V(kAttemptToUseUndefinedCache, "Attempt to use undefined cache")            \
-  V(kBadValueContextForArgumentsObjectValue,                                  \
-    "Bad value context for arguments object value")                           \
-  V(kBadValueContextForArgumentsValue,                                        \
-    "Bad value context for arguments value")                                  \
-  V(kBailedOutDueToDependencyChange, "Bailed out due to dependency change")   \
-  V(kBailoutWasNotPrepared, "Bailout was not prepared")                       \
-  V(kBinaryStubGenerateFloatingPointCode,                                     \
-    "BinaryStub_GenerateFloatingPointCode")                                   \
-  V(kBothRegistersWereSmisInSelectNonSmi,                                     \
-    "Both registers were smis in SelectNonSmi")                               \
-  V(kCallToAJavaScriptRuntimeFunction,                                        \
-    "Call to a JavaScript runtime function")                                  \
-  V(kCannotTranslatePositionInChangedArea,                                    \
-    "Cannot translate position in changed area")                              \
-  V(kCodeGenerationFailed, "Code generation failed")                          \
-  V(kCodeObjectNotProperlyPatched, "Code object not properly patched")        \
-  V(kCompoundAssignmentToLookupSlot, "Compound assignment to lookup slot")    \
-  V(kContextAllocatedArguments, "Context-allocated arguments")                \
-  V(kCopyBuffersOverlap, "Copy buffers overlap")                              \
-  V(kCouldNotGenerateZero, "Could not generate +0.0")                         \
-  V(kCouldNotGenerateNegativeZero, "Could not generate -0.0")                 \
-  V(kDebuggerHasBreakPoints, "Debugger has break points")                     \
-  V(kDebuggerStatement, "DebuggerStatement")                                  \
-  V(kDeclarationInCatchContext, "Declaration in catch context")               \
-  V(kDeclarationInWithContext, "Declaration in with context")                 \
-  V(kDefaultNaNModeNotSet, "Default NaN mode not set")                        \
-  V(kDeleteWithGlobalVariable, "Delete with global variable")                 \
-  V(kDeleteWithNonGlobalVariable, "Delete with non-global variable")          \
-  V(kDestinationOfCopyNotAligned, "Destination of copy not aligned")          \
-  V(kDontDeleteCellsCannotContainTheHole,                                     \
-    "DontDelete cells can't contain the hole")                                \
-  V(kDoPushArgumentNotImplementedForDoubleType,                               \
-    "DoPushArgument not implemented for double type")                         \
-  V(kEliminatedBoundsCheckFailed, "Eliminated bounds check failed")           \
-  V(kEmitLoadRegisterUnsupportedDoubleImmediate,                              \
-    "EmitLoadRegister: Unsupported double immediate")                         \
-  V(kEval, "eval")                                                            \
-  V(kExpected0AsASmiSentinel, "Expected 0 as a Smi sentinel")                 \
-  V(kExpectedAlignmentMarker, "Expected alignment marker")                    \
-  V(kExpectedAllocationSite, "Expected allocation site")                      \
-  V(kExpectedFunctionObject, "Expected function object in register")          \
-  V(kExpectedHeapNumber, "Expected HeapNumber")                               \
-  V(kExpectedNativeContext, "Expected native context")                        \
-  V(kExpectedNonIdenticalObjects, "Expected non-identical objects")           \
-  V(kExpectedNonNullContext, "Expected non-null context")                     \
-  V(kExpectedPositiveZero, "Expected +0.0")                                   \
-  V(kExpectedAllocationSiteInCell,                                            \
-    "Expected AllocationSite in property cell")                               \
-  V(kExpectedFixedArrayInFeedbackVector,                                      \
-    "Expected fixed array in feedback vector")                                \
-  V(kExpectedFixedArrayInRegisterA2,                                          \
-    "Expected fixed array in register a2")                                    \
-  V(kExpectedFixedArrayInRegisterEbx,                                         \
-    "Expected fixed array in register ebx")                                   \
-  V(kExpectedFixedArrayInRegisterR2,                                          \
-    "Expected fixed array in register r2")                                    \
-  V(kExpectedFixedArrayInRegisterRbx,                                         \
-    "Expected fixed array in register rbx")                                   \
-  V(kExpectedNewSpaceObject, "Expected new space object")                     \
-  V(kExpectedSmiOrHeapNumber, "Expected smi or HeapNumber")                   \
-  V(kExpectedUndefinedOrCell,                                                 \
-    "Expected undefined or cell in register")                                 \
-  V(kExpectingAlignmentForCopyBytes,                                          \
-    "Expecting alignment for CopyBytes")                                      \
-  V(kExportDeclaration, "Export declaration")                                 \
-  V(kExternalStringExpectedButNotFound,                                       \
-    "External string expected, but not found")                                \
-  V(kFailedBailedOutLastTime, "Failed/bailed out last time")                  \
-  V(kForInStatementIsNotFastCase, "ForInStatement is not fast case")          \
-  V(kForInStatementOptimizationIsDisabled,                                    \
-    "ForInStatement optimization is disabled")                                \
-  V(kForInStatementWithNonLocalEachVariable,                                  \
-    "ForInStatement with non-local each variable")                            \
-  V(kForOfStatement, "ForOfStatement")                                        \
-  V(kFrameIsExpectedToBeAligned, "Frame is expected to be aligned")           \
-  V(kFunctionCallsEval, "Function calls eval")                                \
-  V(kFunctionIsAGenerator, "Function is a generator")                         \
-  V(kFunctionWithIllegalRedeclaration, "Function with illegal redeclaration") \
-  V(kGeneratedCodeIsTooLarge, "Generated code is too large")                  \
-  V(kGeneratorFailedToResume, "Generator failed to resume")                   \
-  V(kGenerator, "Generator")                                                  \
-  V(kGlobalFunctionsMustHaveInitialMap,                                       \
-    "Global functions must have initial map")                                 \
-  V(kHeapNumberMapRegisterClobbered, "HeapNumberMap register clobbered")      \
-  V(kHydrogenFilter, "Optimization disabled by filter")                       \
-  V(kImportDeclaration, "Import declaration")                                 \
-  V(kImproperObjectOnPrototypeChainForStore,                                  \
-    "Improper object on prototype chain for store")                           \
-  V(kIndexIsNegative, "Index is negative")                                    \
-  V(kIndexIsTooLarge, "Index is too large")                                   \
-  V(kInlinedRuntimeFunctionClassOf, "Inlined runtime function: ClassOf")      \
-  V(kInlinedRuntimeFunctionFastAsciiArrayJoin,                                \
-    "Inlined runtime function: FastAsciiArrayJoin")                           \
-  V(kInlinedRuntimeFunctionGeneratorNext,                                     \
-    "Inlined runtime function: GeneratorNext")                                \
-  V(kInlinedRuntimeFunctionGeneratorThrow,                                    \
-    "Inlined runtime function: GeneratorThrow")                               \
-  V(kInlinedRuntimeFunctionGetFromCache,                                      \
-    "Inlined runtime function: GetFromCache")                                 \
-  V(kInlinedRuntimeFunctionIsNonNegativeSmi,                                  \
-    "Inlined runtime function: IsNonNegativeSmi")                             \
-  V(kInlinedRuntimeFunctionIsStringWrapperSafeForDefaultValueOf,              \
-    "Inlined runtime function: IsStringWrapperSafeForDefaultValueOf")         \
-  V(kInliningBailedOut, "Inlining bailed out")                                \
-  V(kInputGPRIsExpectedToHaveUpper32Cleared,                                  \
-    "Input GPR is expected to have upper32 cleared")                          \
-  V(kInputStringTooLong, "Input string too long")                             \
-  V(kInstanceofStubUnexpectedCallSiteCacheCheck,                              \
-    "InstanceofStub unexpected call site cache (check)")                      \
-  V(kInstanceofStubUnexpectedCallSiteCacheCmp1,                               \
-    "InstanceofStub unexpected call site cache (cmp 1)")                      \
-  V(kInstanceofStubUnexpectedCallSiteCacheCmp2,                               \
-    "InstanceofStub unexpected call site cache (cmp 2)")                      \
-  V(kInstanceofStubUnexpectedCallSiteCacheMov,                                \
-    "InstanceofStub unexpected call site cache (mov)")                        \
-  V(kInteger32ToSmiFieldWritingToNonSmiLocation,                              \
-    "Integer32ToSmiField writing to non-smi location")                        \
-  V(kInvalidCaptureReferenced, "Invalid capture referenced")                  \
-  V(kInvalidElementsKindForInternalArrayOrInternalPackedArray,                \
-    "Invalid ElementsKind for InternalArray or InternalPackedArray")          \
-  V(kInvalidFullCodegenState, "invalid full-codegen state")                   \
-  V(kInvalidHandleScopeLevel, "Invalid HandleScope level")                    \
-  V(kInvalidLeftHandSideInAssignment, "Invalid left-hand side in assignment") \
-  V(kInvalidLhsInCompoundAssignment, "Invalid lhs in compound assignment")    \
-  V(kInvalidLhsInCountOperation, "Invalid lhs in count operation")            \
-  V(kInvalidMinLength, "Invalid min_length")                                  \
-  V(kJSGlobalObjectNativeContextShouldBeANativeContext,                       \
-    "JSGlobalObject::native_context should be a native context")              \
-  V(kJSGlobalProxyContextShouldNotBeNull,                                     \
-    "JSGlobalProxy::context() should not be null")                            \
-  V(kJSObjectWithFastElementsMapHasSlowElements,                              \
-    "JSObject with fast elements map has slow elements")                      \
-  V(kLetBindingReInitialization, "Let binding re-initialization")             \
-  V(kLhsHasBeenClobbered, "lhs has been clobbered")                           \
-  V(kLiveBytesCountOverflowChunkSize, "Live Bytes Count overflow chunk size") \
-  V(kLiveEditFrameDroppingIsNotSupportedOnARM64,                              \
-    "LiveEdit frame dropping is not supported on arm64")                      \
-  V(kLiveEditFrameDroppingIsNotSupportedOnArm,                                \
-    "LiveEdit frame dropping is not supported on arm")                        \
-  V(kLiveEditFrameDroppingIsNotSupportedOnMips,                               \
-    "LiveEdit frame dropping is not supported on mips")                       \
-  V(kLiveEdit, "LiveEdit")                                                    \
-  V(kLookupVariableInCountOperation,                                          \
-    "Lookup variable in count operation")                                     \
-  V(kMapBecameDeprecated, "Map became deprecated")                            \
-  V(kMapBecameUnstable, "Map became unstable")                                \
-  V(kMapIsNoLongerInEax, "Map is no longer in eax")                           \
-  V(kModuleDeclaration, "Module declaration")                                 \
-  V(kModuleLiteral, "Module literal")                                         \
-  V(kModulePath, "Module path")                                               \
-  V(kModuleStatement, "Module statement")                                     \
-  V(kModuleVariable, "Module variable")                                       \
-  V(kModuleUrl, "Module url")                                                 \
-  V(kNativeFunctionLiteral, "Native function literal")                        \
-  V(kNeedSmiLiteral, "Need a Smi literal here")                               \
-  V(kNoCasesLeft, "No cases left")                                            \
-  V(kNoEmptyArraysHereInEmitFastAsciiArrayJoin,                               \
-    "No empty arrays here in EmitFastAsciiArrayJoin")                         \
-  V(kNonInitializerAssignmentToConst,                                         \
-    "Non-initializer assignment to const")                                    \
-  V(kNonSmiIndex, "Non-smi index")                                            \
-  V(kNonSmiKeyInArrayLiteral, "Non-smi key in array literal")                 \
-  V(kNonSmiValue, "Non-smi value")                                            \
-  V(kNonObject, "Non-object value")                                           \
-  V(kNotEnoughVirtualRegistersForValues,                                      \
-    "Not enough virtual registers for values")                                \
-  V(kNotEnoughSpillSlotsForOsr,                                               \
-    "Not enough spill slots for OSR")                                         \
-  V(kNotEnoughVirtualRegistersRegalloc,                                       \
-    "Not enough virtual registers (regalloc)")                                \
-  V(kObjectFoundInSmiOnlyArray, "Object found in smi-only array")             \
-  V(kObjectLiteralWithComplexProperty,                                        \
-    "Object literal with complex property")                                   \
-  V(kOddballInStringTableIsNotUndefinedOrTheHole,                             \
-    "Oddball in string table is not undefined or the hole")                   \
-  V(kOffsetOutOfRange, "Offset out of range")                                 \
-  V(kOperandIsASmiAndNotAName, "Operand is a smi and not a name")             \
-  V(kOperandIsASmiAndNotAString, "Operand is a smi and not a string")         \
-  V(kOperandIsASmi, "Operand is a smi")                                       \
-  V(kOperandIsNotAName, "Operand is not a name")                              \
-  V(kOperandIsNotANumber, "Operand is not a number")                          \
-  V(kOperandIsNotASmi, "Operand is not a smi")                                \
-  V(kOperandIsNotAString, "Operand is not a string")                          \
-  V(kOperandIsNotSmi, "Operand is not smi")                                   \
-  V(kOperandNotANumber, "Operand not a number")                               \
-  V(kObjectTagged, "The object is tagged")                                    \
-  V(kObjectNotTagged, "The object is not tagged")                             \
-  V(kOptimizationDisabled, "Optimization is disabled")                        \
-  V(kOptimizedTooManyTimes, "Optimized too many times")                       \
-  V(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister,                  \
-    "Out of virtual registers while trying to allocate temp register")        \
-  V(kParseScopeError, "Parse/scope error")                                    \
-  V(kPossibleDirectCallToEval, "Possible direct call to eval")                \
-  V(kPreconditionsWereNotMet, "Preconditions were not met")                   \
-  V(kPropertyAllocationCountFailed, "Property allocation count failed")       \
-  V(kReceivedInvalidReturnAddress, "Received invalid return address")         \
-  V(kReferenceToAVariableWhichRequiresDynamicLookup,                          \
-    "Reference to a variable which requires dynamic lookup")                  \
-  V(kReferenceToGlobalLexicalVariable,                                        \
-    "Reference to global lexical variable")                                   \
-  V(kReferenceToUninitializedVariable, "Reference to uninitialized variable") \
-  V(kRegisterDidNotMatchExpectedRoot, "Register did not match expected root") \
-  V(kRegisterWasClobbered, "Register was clobbered")                          \
-  V(kRememberedSetPointerInNewSpace, "Remembered set pointer is in new space") \
-  V(kReturnAddressNotFoundInFrame, "Return address not found in frame")       \
-  V(kRhsHasBeenClobbered, "Rhs has been clobbered")                           \
-  V(kScopedBlock, "ScopedBlock")                                              \
-  V(kSmiAdditionOverflow, "Smi addition overflow")                            \
-  V(kSmiSubtractionOverflow, "Smi subtraction overflow")                      \
-  V(kStackAccessBelowStackPointer, "Stack access below stack pointer")        \
-  V(kStackFrameTypesMustMatch, "Stack frame types must match")                \
-  V(kSwitchStatementMixedOrNonLiteralSwitchLabels,                            \
-    "SwitchStatement: mixed or non-literal switch labels")                    \
-  V(kSwitchStatementTooManyClauses, "SwitchStatement: too many clauses")      \
-  V(kTheCurrentStackPointerIsBelowCsp,                                        \
-    "The current stack pointer is below csp")                                 \
-  V(kTheInstructionShouldBeALui, "The instruction should be a lui")           \
-  V(kTheInstructionShouldBeAnOri, "The instruction should be an ori")         \
-  V(kTheInstructionToPatchShouldBeALoadFromPc,                                \
-    "The instruction to patch should be a load from pc")                      \
-  V(kTheInstructionToPatchShouldBeALoadFromPp,                                \
-    "The instruction to patch should be a load from pp")                      \
-  V(kTheInstructionToPatchShouldBeAnLdrLiteral,                               \
-    "The instruction to patch should be a ldr literal")                       \
-  V(kTheInstructionToPatchShouldBeALui,                                       \
-    "The instruction to patch should be a lui")                               \
-  V(kTheInstructionToPatchShouldBeAnOri,                                      \
-    "The instruction to patch should be an ori")                              \
-  V(kTheSourceAndDestinationAreTheSame,                                       \
-    "The source and destination are the same")                                \
-  V(kTheStackPointerIsNotAligned, "The stack pointer is not aligned.")        \
-  V(kTheStackWasCorruptedByMacroAssemblerCall,                                \
-    "The stack was corrupted by MacroAssembler::Call()")                      \
-  V(kTooManyParametersLocals, "Too many parameters/locals")                   \
-  V(kTooManyParameters, "Too many parameters")                                \
-  V(kTooManySpillSlotsNeededForOSR, "Too many spill slots needed for OSR")    \
-  V(kToOperand32UnsupportedImmediate, "ToOperand32 unsupported immediate.")   \
-  V(kToOperandIsDoubleRegisterUnimplemented,                                  \
-    "ToOperand IsDoubleRegister unimplemented")                               \
-  V(kToOperandUnsupportedDoubleImmediate,                                     \
-    "ToOperand Unsupported double immediate")                                 \
-  V(kTryCatchStatement, "TryCatchStatement")                                  \
-  V(kTryFinallyStatement, "TryFinallyStatement")                              \
-  V(kUnableToEncodeValueAsSmi, "Unable to encode value as smi")               \
-  V(kUnalignedAllocationInNewSpace, "Unaligned allocation in new space")      \
-  V(kUnalignedCellInWriteBarrier, "Unaligned cell in write barrier")          \
-  V(kUndefinedValueNotLoaded, "Undefined value not loaded")                   \
-  V(kUndoAllocationOfNonAllocatedMemory,                                      \
-    "Undo allocation of non allocated memory")                                \
-  V(kUnexpectedAllocationTop, "Unexpected allocation top")                    \
-  V(kUnexpectedColorFound, "Unexpected color bit pattern found")              \
-  V(kUnexpectedElementsKindInArrayConstructor,                                \
-    "Unexpected ElementsKind in array constructor")                           \
-  V(kUnexpectedFallthroughFromCharCodeAtSlowCase,                             \
-    "Unexpected fallthrough from CharCodeAt slow case")                       \
-  V(kUnexpectedFallthroughFromCharFromCodeSlowCase,                           \
-    "Unexpected fallthrough from CharFromCode slow case")                     \
-  V(kUnexpectedFallThroughFromStringComparison,                               \
-    "Unexpected fall-through from string comparison")                         \
-  V(kUnexpectedFallThroughInBinaryStubGenerateFloatingPointCode,              \
-    "Unexpected fall-through in BinaryStub_GenerateFloatingPointCode")        \
-  V(kUnexpectedFallthroughToCharCodeAtSlowCase,                               \
-    "Unexpected fallthrough to CharCodeAt slow case")                         \
-  V(kUnexpectedFallthroughToCharFromCodeSlowCase,                             \
-    "Unexpected fallthrough to CharFromCode slow case")                       \
-  V(kUnexpectedFPUStackDepthAfterInstruction,                                 \
-    "Unexpected FPU stack depth after instruction")                           \
-  V(kUnexpectedInitialMapForArrayFunction1,                                   \
-    "Unexpected initial map for Array function (1)")                          \
-  V(kUnexpectedInitialMapForArrayFunction2,                                   \
-    "Unexpected initial map for Array function (2)")                          \
-  V(kUnexpectedInitialMapForArrayFunction,                                    \
-    "Unexpected initial map for Array function")                              \
-  V(kUnexpectedInitialMapForInternalArrayFunction,                            \
-    "Unexpected initial map for InternalArray function")                      \
-  V(kUnexpectedLevelAfterReturnFromApiCall,                                   \
-    "Unexpected level after return from api call")                            \
-  V(kUnexpectedNegativeValue, "Unexpected negative value")                    \
-  V(kUnexpectedNumberOfPreAllocatedPropertyFields,                            \
-    "Unexpected number of pre-allocated property fields")                     \
-  V(kUnexpectedFPCRMode, "Unexpected FPCR mode.")                             \
-  V(kUnexpectedSmi, "Unexpected smi value")                                   \
-  V(kUnexpectedStringFunction, "Unexpected String function")                  \
-  V(kUnexpectedStringType, "Unexpected string type")                          \
-  V(kUnexpectedStringWrapperInstanceSize,                                     \
-    "Unexpected string wrapper instance size")                                \
-  V(kUnexpectedTypeForRegExpDataFixedArrayExpected,                           \
-    "Unexpected type for RegExp data, FixedArray expected")                   \
-  V(kUnexpectedValue, "Unexpected value")                                     \
-  V(kUnexpectedUnusedPropertiesOfStringWrapper,                               \
-    "Unexpected unused properties of string wrapper")                         \
-  V(kUnimplemented, "unimplemented")                                          \
-  V(kUninitializedKSmiConstantRegister, "Uninitialized kSmiConstantRegister") \
-  V(kUnknown, "Unknown")                                                      \
-  V(kUnsupportedConstCompoundAssignment,                                      \
-    "Unsupported const compound assignment")                                  \
-  V(kUnsupportedCountOperationWithConst,                                      \
-    "Unsupported count operation with const")                                 \
-  V(kUnsupportedDoubleImmediate, "Unsupported double immediate")              \
-  V(kUnsupportedLetCompoundAssignment, "Unsupported let compound assignment") \
-  V(kUnsupportedLookupSlotInDeclaration,                                      \
-    "Unsupported lookup slot in declaration")                                 \
-  V(kUnsupportedNonPrimitiveCompare, "Unsupported non-primitive compare")     \
-  V(kUnsupportedPhiUseOfArguments, "Unsupported phi use of arguments")        \
-  V(kUnsupportedPhiUseOfConstVariable,                                        \
-    "Unsupported phi use of const variable")                                  \
-  V(kUnsupportedTaggedImmediate, "Unsupported tagged immediate")              \
-  V(kVariableResolvedToWithContext, "Variable resolved to with context")      \
-  V(kWeShouldNotHaveAnEmptyLexicalContext,                                    \
-    "We should not have an empty lexical context")                            \
-  V(kWithStatement, "WithStatement")                                          \
-  V(kWrongAddressOrValuePassedToRecordWrite,                                  \
-    "Wrong address or value passed to RecordWrite")                           \
-  V(kYield, "Yield")
-
-
-#define ERROR_MESSAGES_CONSTANTS(C, T) C,
-enum BailoutReason {
-  ERROR_MESSAGES_LIST(ERROR_MESSAGES_CONSTANTS)
-  kLastErrorMessage
-};
-#undef ERROR_MESSAGES_CONSTANTS
-
-
-const char* GetBailoutReason(BailoutReason reason);
-
-
 // Object is the abstract superclass for all classes in the
 // object hierarchy.
 // Object does not use any virtual functions to avoid the
@@ -1351,57 +993,62 @@
 class Object {
  public:
   // Type testing.
-  bool IsObject() { return true; }
+  bool IsObject() const { return true; }
 
-#define IS_TYPE_FUNCTION_DECL(type_)  inline bool Is##type_();
+#define IS_TYPE_FUNCTION_DECL(type_)  INLINE(bool Is##type_() const);
   OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DECL)
   HEAP_OBJECT_TYPE_LIST(IS_TYPE_FUNCTION_DECL)
 #undef IS_TYPE_FUNCTION_DECL
 
-  inline bool IsFixedArrayBase();
-  inline bool IsExternal();
-  inline bool IsAccessorInfo();
+  // A non-keyed store is of the form a.x = foo or a["x"] = foo whereas
+  // a keyed store is of the form a[expression] = foo.
+  enum StoreFromKeyed {
+    MAY_BE_STORE_FROM_KEYED,
+    CERTAINLY_NOT_STORE_FROM_KEYED
+  };
 
-  inline bool IsStruct();
-#define DECLARE_STRUCT_PREDICATE(NAME, Name, name) inline bool Is##Name();
+  INLINE(bool IsFixedArrayBase() const);
+  INLINE(bool IsExternal() const);
+  INLINE(bool IsAccessorInfo() const);
+
+  INLINE(bool IsStruct() const);
+#define DECLARE_STRUCT_PREDICATE(NAME, Name, name) \
+  INLINE(bool Is##Name() const);
   STRUCT_LIST(DECLARE_STRUCT_PREDICATE)
 #undef DECLARE_STRUCT_PREDICATE
 
-  INLINE(bool IsSpecObject());
-  INLINE(bool IsSpecFunction());
-  INLINE(bool IsTemplateInfo());
-  bool IsCallable();
+  INLINE(bool IsSpecObject()) const;
+  INLINE(bool IsSpecFunction()) const;
+  INLINE(bool IsTemplateInfo()) const;
+  INLINE(bool IsNameDictionary() const);
+  INLINE(bool IsSeededNumberDictionary() const);
+  INLINE(bool IsUnseededNumberDictionary() const);
+  INLINE(bool IsOrderedHashSet() const);
+  INLINE(bool IsOrderedHashMap() const);
+  bool IsCallable() const;
 
   // Oddball testing.
-  INLINE(bool IsUndefined());
-  INLINE(bool IsNull());
-  INLINE(bool IsTheHole());
-  INLINE(bool IsException());
-  INLINE(bool IsUninitialized());
-  INLINE(bool IsTrue());
-  INLINE(bool IsFalse());
-  inline bool IsArgumentsMarker();
+  INLINE(bool IsUndefined() const);
+  INLINE(bool IsNull() const);
+  INLINE(bool IsTheHole() const);
+  INLINE(bool IsException() const);
+  INLINE(bool IsUninitialized() const);
+  INLINE(bool IsTrue() const);
+  INLINE(bool IsFalse() const);
+  INLINE(bool IsArgumentsMarker() const);
 
   // Filler objects (fillers and free space objects).
-  inline bool IsFiller();
+  INLINE(bool IsFiller() const);
 
   // Extract the number.
   inline double Number();
-  inline bool IsNaN();
+  INLINE(bool IsNaN() const);
+  INLINE(bool IsMinusZero() const);
   bool ToInt32(int32_t* value);
   bool ToUint32(uint32_t* value);
 
-  // Indicates whether OptimalRepresentation can do its work, or whether it
-  // always has to return Representation::Tagged().
-  enum ValueType {
-    OPTIMAL_REPRESENTATION,
-    FORCE_TAGGED
-  };
-
-  inline Representation OptimalRepresentation(
-      ValueType type = OPTIMAL_REPRESENTATION) {
+  inline Representation OptimalRepresentation() {
     if (!FLAG_track_fields) return Representation::Tagged();
-    if (type == FORCE_TAGGED) return Representation::Tagged();
     if (IsSmi()) {
       return Representation::Smi();
     } else if (FLAG_track_double_fields && IsHeapNumber()) {
@@ -1409,7 +1056,7 @@
     } else if (FLAG_track_computed_fields && IsUninitialized()) {
       return Representation::None();
     } else if (FLAG_track_heap_object_fields) {
-      ASSERT(IsHeapObject());
+      DCHECK(IsHeapObject());
       return Representation::HeapObject();
     } else {
       return Representation::Tagged();
@@ -1422,7 +1069,7 @@
     } else if (FLAG_track_fields && representation.IsSmi()) {
       return IsSmi();
     } else if (FLAG_track_double_fields && representation.IsDouble()) {
-      return IsNumber();
+      return IsMutableHeapNumber() || IsNumber();
     } else if (FLAG_track_heap_object_fields && representation.IsHeapObject()) {
       return IsHeapObject();
     }
@@ -1435,6 +1082,10 @@
                                              Handle<Object> object,
                                              Representation representation);
 
+  inline static Handle<Object> WrapForRead(Isolate* isolate,
+                                           Handle<Object> object,
+                                           Representation representation);
+
   // Returns true if the object is of the correct type to be used as a
   // implementation of a JSObject's elements.
   inline bool HasValidElements();
@@ -1455,9 +1106,24 @@
   static MUST_USE_RESULT inline MaybeHandle<Smi> ToSmi(Isolate* isolate,
                                                        Handle<Object> object);
 
-  void Lookup(Handle<Name> name, LookupResult* result);
-
   MUST_USE_RESULT static MaybeHandle<Object> GetProperty(LookupIterator* it);
+
+  // Implementation of [[Put]], ECMA-262 5th edition, section 8.12.5.
+  MUST_USE_RESULT static MaybeHandle<Object> SetProperty(
+      Handle<Object> object, Handle<Name> key, Handle<Object> value,
+      StrictMode strict_mode,
+      StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED);
+
+  MUST_USE_RESULT static MaybeHandle<Object> SetProperty(
+      LookupIterator* it, Handle<Object> value, StrictMode strict_mode,
+      StoreFromKeyed store_mode);
+  MUST_USE_RESULT static MaybeHandle<Object> WriteToReadOnlyProperty(
+      LookupIterator* it, Handle<Object> value, StrictMode strict_mode);
+  static Handle<Object> SetDataProperty(LookupIterator* it,
+                                        Handle<Object> value);
+  MUST_USE_RESULT static MaybeHandle<Object> AddDataProperty(
+      LookupIterator* it, Handle<Object> value, PropertyAttributes attributes,
+      StrictMode strict_mode, StoreFromKeyed store_mode);
   MUST_USE_RESULT static inline MaybeHandle<Object> GetPropertyOrElement(
       Handle<Object> object,
       Handle<Name> key);
@@ -1474,12 +1140,9 @@
       Handle<Name> name,
       Handle<JSObject> holder,
       Handle<Object> structure);
-  MUST_USE_RESULT static MaybeHandle<Object> SetPropertyWithCallback(
-      Handle<Object> receiver,
-      Handle<Name> name,
-      Handle<Object> value,
-      Handle<JSObject> holder,
-      Handle<Object> structure,
+  MUST_USE_RESULT static MaybeHandle<Object> SetPropertyWithAccessor(
+      Handle<Object> receiver, Handle<Name> name, Handle<Object> value,
+      Handle<JSObject> holder, Handle<Object> structure,
       StrictMode strict_mode);
 
   MUST_USE_RESULT static MaybeHandle<Object> GetPropertyWithDefinedGetter(
@@ -1501,10 +1164,6 @@
       Handle<Object> receiver,
       uint32_t index);
 
-  // Return the object's prototype (might be Heap::null_value()).
-  Object* GetPrototype(Isolate* isolate);
-  static Handle<Object> GetPrototype(Isolate* isolate, Handle<Object> object);
-
   // Returns the permanent hash code associated with this object. May return
   // undefined if not yet created.
   Object* GetHash();
@@ -1547,25 +1206,39 @@
   // Prints this object without details to a message accumulator.
   void ShortPrint(StringStream* accumulator);
 
-  // Casting: This cast is only needed to satisfy macros in objects-inl.h.
-  static Object* cast(Object* value) { return value; }
+  DECLARE_CAST(Object)
 
   // Layout description.
   static const int kHeaderSize = 0;  // Object does not take up any space.
 
 #ifdef OBJECT_PRINT
-  // Prints this object with details.
+  // For our gdb macros, we should perhaps change these in the future.
   void Print();
-  void Print(FILE* out);
-  void PrintLn();
-  void PrintLn(FILE* out);
+
+  // Prints this object with details.
+  void Print(OStream& os);  // NOLINT
 #endif
 
  private:
+  friend class LookupIterator;
+  friend class PrototypeIterator;
+
+  // Return the map of the root of object's prototype chain.
+  Map* GetRootMap(Isolate* isolate);
+
   DISALLOW_IMPLICIT_CONSTRUCTORS(Object);
 };
 
 
+struct Brief {
+  explicit Brief(const Object* const v) : value(v) {}
+  const Object* value;
+};
+
+
+OStream& operator<<(OStream& os, const Brief& v);
+
+
 // Smi represents integer Numbers that can be stored in 31 bits.
 // Smis are immediate which means they are NOT allocated in the heap.
 // The this pointer has the following format: [31 bit signed int] 0
@@ -1575,7 +1248,7 @@
 class Smi: public Object {
  public:
   // Returns the integer value.
-  inline int value();
+  inline int value() const;
 
   // Convert a value to a Smi object.
   static inline Smi* FromInt(int value);
@@ -1585,13 +1258,10 @@
   // Returns whether value can be represented in a Smi.
   static inline bool IsValid(intptr_t value);
 
-  // Casting.
-  static inline Smi* cast(Object* object);
+  DECLARE_CAST(Smi)
 
   // Dispatched behavior.
-  void SmiPrint(FILE* out = stdout);
-  void SmiPrint(StringStream* accumulator);
-
+  void SmiPrint(OStream& os) const;  // NOLINT
   DECLARE_VERIFIER(Smi)
 
   static const int kMinValue =
@@ -1612,7 +1282,7 @@
   // Normal state: the map word contains a map pointer.
 
   // Create a map word from a map pointer.
-  static inline MapWord FromMap(Map* map);
+  static inline MapWord FromMap(const Map* map);
 
   // View this map word as a map pointer.
   inline Map* ToMap();
@@ -1656,7 +1326,7 @@
  public:
   // [map]: Contains a map which contains the object's reflective
   // information.
-  inline Map* map();
+  inline Map* map() const;
   inline void set_map(Map* value);
   // The no-write-barrier version.  This is OK if the object is white and in
   // new space, or if the value is an immortal immutable object, like the maps
@@ -1665,7 +1335,7 @@
 
   // Get the map using acquire load.
   inline Map* synchronized_map();
-  inline MapWord synchronized_map_word();
+  inline MapWord synchronized_map_word() const;
 
   // Set the map using release store
   inline void synchronized_set_map(Map* value);
@@ -1674,14 +1344,14 @@
 
   // During garbage collection, the map word of a heap object does not
   // necessarily contain a map pointer.
-  inline MapWord map_word();
+  inline MapWord map_word() const;
   inline void set_map_word(MapWord map_word);
 
   // The Heap the object was allocated in. Used also to access Isolate.
-  inline Heap* GetHeap();
+  inline Heap* GetHeap() const;
 
   // Convenience method to get current isolate.
-  inline Isolate* GetIsolate();
+  inline Isolate* GetIsolate() const;
 
   // Converts an address to a HeapObject pointer.
   static inline HeapObject* FromAddress(Address address);
@@ -1701,6 +1371,10 @@
   // Returns the heap object's size in bytes
   inline int Size();
 
+  // Returns true if this heap object may contain raw values, i.e., values that
+  // look like pointers to heap objects.
+  inline bool MayContainRawValues();
+
   // Given a heap object's map pointer, returns the heap size in bytes
   // Useful when the map pointer field is used for other purposes.
   // GC internal.
@@ -1719,8 +1393,7 @@
                                  Handle<Name> name,
                                  Handle<Code> code);
 
-  // Casting.
-  static inline HeapObject* cast(Object* obj);
+  DECLARE_CAST(HeapObject)
 
   // Return the write barrier mode for this. Callers of this function
   // must be able to present a reference to an DisallowHeapAllocation
@@ -1731,9 +1404,9 @@
       const DisallowHeapAllocation& promise);
 
   // Dispatched behavior.
-  void HeapObjectShortPrint(StringStream* accumulator);
+  void HeapObjectShortPrint(OStream& os);  // NOLINT
 #ifdef OBJECT_PRINT
-  void PrintHeader(FILE* out, const char* id);
+  void PrintHeader(OStream& os, const char* id);  // NOLINT
 #endif
   DECLARE_PRINTER(HeapObject)
   DECLARE_VERIFIER(HeapObject)
@@ -1812,17 +1485,15 @@
 class HeapNumber: public HeapObject {
  public:
   // [value]: number value.
-  inline double value();
+  inline double value() const;
   inline void set_value(double value);
 
-  // Casting.
-  static inline HeapNumber* cast(Object* obj);
+  DECLARE_CAST(HeapNumber)
 
   // Dispatched behavior.
   bool HeapNumberBooleanValue();
 
-  void HeapNumberPrint(FILE* out = stdout);
-  void HeapNumberPrint(StringStream* accumulator);
+  void HeapNumberPrint(OStream& os);  // NOLINT
   DECLARE_VERIFIER(HeapNumber)
 
   inline int get_exponent();
@@ -1896,31 +1567,8 @@
     FORCE_DELETION
   };
 
-  // A non-keyed store is of the form a.x = foo or a["x"] = foo whereas
-  // a keyed store is of the form a[expression] = foo.
-  enum StoreFromKeyed {
-    MAY_BE_STORE_FROM_KEYED,
-    CERTAINLY_NOT_STORE_FROM_KEYED
-  };
+  DECLARE_CAST(JSReceiver)
 
-  // Internal properties (e.g. the hidden properties dictionary) might
-  // be added even though the receiver is non-extensible.
-  enum ExtensibilityCheck {
-    PERFORM_EXTENSIBILITY_CHECK,
-    OMIT_EXTENSIBILITY_CHECK
-  };
-
-  // Casting.
-  static inline JSReceiver* cast(Object* obj);
-
-  // Implementation of [[Put]], ECMA-262 5th edition, section 8.12.5.
-  MUST_USE_RESULT static MaybeHandle<Object> SetProperty(
-      Handle<JSReceiver> object,
-      Handle<Name> key,
-      Handle<Object> value,
-      PropertyAttributes attributes,
-      StrictMode strict_mode,
-      StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED);
   MUST_USE_RESULT static MaybeHandle<Object> SetElement(
       Handle<JSReceiver> object,
       uint32_t index,
@@ -1929,10 +1577,14 @@
       StrictMode strict_mode);
 
   // Implementation of [[HasProperty]], ECMA-262 5th edition, section 8.12.6.
-  static inline bool HasProperty(Handle<JSReceiver> object, Handle<Name> name);
-  static inline bool HasOwnProperty(Handle<JSReceiver>, Handle<Name> name);
-  static inline bool HasElement(Handle<JSReceiver> object, uint32_t index);
-  static inline bool HasOwnElement(Handle<JSReceiver> object, uint32_t index);
+  MUST_USE_RESULT static inline Maybe<bool> HasProperty(
+      Handle<JSReceiver> object, Handle<Name> name);
+  MUST_USE_RESULT static inline Maybe<bool> HasOwnProperty(Handle<JSReceiver>,
+                                                           Handle<Name> name);
+  MUST_USE_RESULT static inline Maybe<bool> HasElement(
+      Handle<JSReceiver> object, uint32_t index);
+  MUST_USE_RESULT static inline Maybe<bool> HasOwnElement(
+      Handle<JSReceiver> object, uint32_t index);
 
   // Implementation of [[Delete]], ECMA-262 5th edition, section 8.12.7.
   MUST_USE_RESULT static MaybeHandle<Object> DeleteProperty(
@@ -1954,23 +1606,17 @@
   // function that was used to instantiate the object).
   String* constructor_name();
 
-  static inline PropertyAttributes GetPropertyAttributes(
-      Handle<JSReceiver> object,
-      Handle<Name> name);
-  static PropertyAttributes GetPropertyAttributes(LookupIterator* it);
-  static PropertyAttributes GetOwnPropertyAttributes(
-      Handle<JSReceiver> object,
-      Handle<Name> name);
+  MUST_USE_RESULT static inline Maybe<PropertyAttributes> GetPropertyAttributes(
+      Handle<JSReceiver> object, Handle<Name> name);
+  MUST_USE_RESULT static Maybe<PropertyAttributes> GetPropertyAttributes(
+      LookupIterator* it);
+  MUST_USE_RESULT static Maybe<PropertyAttributes> GetOwnPropertyAttributes(
+      Handle<JSReceiver> object, Handle<Name> name);
 
-  static inline PropertyAttributes GetElementAttribute(
-      Handle<JSReceiver> object,
-      uint32_t index);
-  static inline PropertyAttributes GetOwnElementAttribute(
-      Handle<JSReceiver> object,
-      uint32_t index);
-
-  // Return the object's prototype (might be Heap::null_value()).
-  inline Object* GetPrototype();
+  MUST_USE_RESULT static inline Maybe<PropertyAttributes> GetElementAttribute(
+      Handle<JSReceiver> object, uint32_t index);
+  MUST_USE_RESULT static inline Maybe<PropertyAttributes>
+      GetOwnElementAttribute(Handle<JSReceiver> object, uint32_t index);
 
   // Return the constructor function (may be Heap::null_value()).
   inline Object* GetConstructor();
@@ -1984,12 +1630,6 @@
   inline static Handle<Smi> GetOrCreateIdentityHash(
       Handle<JSReceiver> object);
 
-  // Lookup a property.  If found, the result is valid and has
-  // detailed information.
-  void LookupOwn(Handle<Name> name, LookupResult* result,
-                 bool search_hidden_prototypes = false);
-  void Lookup(Handle<Name> name, LookupResult* result);
-
   enum KeyCollectionType { OWN_ONLY, INCLUDE_PROTOS };
 
   // Computes the enumerable keys for a JSObject. Used for implementing
@@ -1999,15 +1639,6 @@
       KeyCollectionType type);
 
  private:
-  MUST_USE_RESULT static MaybeHandle<Object> SetProperty(
-      Handle<JSReceiver> receiver,
-      LookupResult* result,
-      Handle<Name> key,
-      Handle<Object> value,
-      PropertyAttributes attributes,
-      StrictMode strict_mode,
-      StoreFromKeyed store_from_keyed);
-
   DISALLOW_IMPLICIT_CONSTRUCTORS(JSReceiver);
 };
 
@@ -2120,20 +1751,7 @@
                                                    uint32_t limit);
 
   MUST_USE_RESULT static MaybeHandle<Object> SetPropertyWithInterceptor(
-      Handle<JSObject> object,
-      Handle<Name> name,
-      Handle<Object> value,
-      PropertyAttributes attributes,
-      StrictMode strict_mode);
-
-  MUST_USE_RESULT static MaybeHandle<Object> SetPropertyForResult(
-      Handle<JSObject> object,
-      LookupResult* result,
-      Handle<Name> name,
-      Handle<Object> value,
-      PropertyAttributes attributes,
-      StrictMode strict_mode,
-      StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED);
+      LookupIterator* it, Handle<Object> value);
 
   // SetLocalPropertyIgnoreAttributes converts callbacks to fields. We need to
   // grant an exemption to ExecutableAccessor callbacks in some cases.
@@ -2147,19 +1765,10 @@
       Handle<Name> key,
       Handle<Object> value,
       PropertyAttributes attributes,
-      ValueType value_type = OPTIMAL_REPRESENTATION,
-      StoreMode mode = ALLOW_AS_CONSTANT,
-      ExtensibilityCheck extensibility_check = PERFORM_EXTENSIBILITY_CHECK,
-      StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED,
       ExecutableAccessorInfoHandling handling = DEFAULT_HANDLING);
 
-  static inline Handle<String> ExpectedTransitionKey(Handle<Map> map);
-  static inline Handle<Map> ExpectedTransitionTarget(Handle<Map> map);
-
-  // Try to follow an existing transition to a field with attributes NONE. The
-  // return value indicates whether the transition was successful.
-  static inline Handle<Map> FindTransitionToField(Handle<Map> map,
-                                                  Handle<Name> key);
+  static void AddProperty(Handle<JSObject> object, Handle<Name> key,
+                          Handle<Object> value, PropertyAttributes attributes);
 
   // Extend the receiver with a single fast property appeared first in the
   // passed map. This also extends the property backing store if necessary.
@@ -2173,18 +1782,6 @@
   // or returns false if such a map is not yet available.
   static bool TryMigrateInstance(Handle<JSObject> instance);
 
-  // Retrieve a value in a normalized object given a lookup result.
-  // Handles the special representation of JS global objects.
-  Object* GetNormalizedProperty(const LookupResult* result);
-  static Handle<Object> GetNormalizedProperty(Handle<JSObject> object,
-                                              const LookupResult* result);
-
-  // Sets the property value in a normalized object given a lookup result.
-  // Handles the special representation of JS global objects.
-  static void SetNormalizedProperty(Handle<JSObject> object,
-                                    const LookupResult* result,
-                                    Handle<Object> value);
-
   // Sets the property value in a normalized object given (key, value, details).
   // Handles the special representation of JS global objects.
   static void SetNormalizedProperty(Handle<JSObject> object,
@@ -2192,24 +1789,25 @@
                                     Handle<Object> value,
                                     PropertyDetails details);
 
-  static void OptimizeAsPrototype(Handle<JSObject> object);
+  static void OptimizeAsPrototype(Handle<JSObject> object,
+                                  PrototypeOptimizationMode mode);
+  static void ReoptimizeIfPrototype(Handle<JSObject> object);
 
   // Retrieve interceptors.
   InterceptorInfo* GetNamedInterceptor();
   InterceptorInfo* GetIndexedInterceptor();
 
   // Used from JSReceiver.
-  static Maybe<PropertyAttributes> GetPropertyAttributesWithInterceptor(
-      Handle<JSObject> holder,
-      Handle<Object> receiver,
-      Handle<Name> name);
-  static PropertyAttributes GetPropertyAttributesWithFailedAccessCheck(
-      LookupIterator* it);
-  static PropertyAttributes GetElementAttributeWithReceiver(
-      Handle<JSObject> object,
-      Handle<JSReceiver> receiver,
-      uint32_t index,
-      bool check_prototype);
+  MUST_USE_RESULT static Maybe<PropertyAttributes>
+      GetPropertyAttributesWithInterceptor(Handle<JSObject> holder,
+                                           Handle<Object> receiver,
+                                           Handle<Name> name);
+  MUST_USE_RESULT static Maybe<PropertyAttributes>
+      GetPropertyAttributesWithFailedAccessCheck(LookupIterator* it);
+  MUST_USE_RESULT static Maybe<PropertyAttributes>
+      GetElementAttributeWithReceiver(Handle<JSObject> object,
+                                      Handle<JSReceiver> receiver,
+                                      uint32_t index, bool check_prototype);
 
   // Retrieves an AccessorPair property from the given object. Might return
   // undefined if the property doesn't exist or is of a different kind.
@@ -2219,14 +1817,12 @@
       AccessorComponent component);
 
   // Defines an AccessorPair property on the given object.
-  // TODO(mstarzinger): Rename to SetAccessor() and return empty handle on
-  // exception instead of letting callers check for scheduled exception.
-  static void DefineAccessor(Handle<JSObject> object,
-                             Handle<Name> name,
-                             Handle<Object> getter,
-                             Handle<Object> setter,
-                             PropertyAttributes attributes,
-                             v8::AccessControl access_control = v8::DEFAULT);
+  // TODO(mstarzinger): Rename to SetAccessor().
+  static MaybeHandle<Object> DefineAccessor(Handle<JSObject> object,
+                                            Handle<Name> name,
+                                            Handle<Object> getter,
+                                            Handle<Object> setter,
+                                            PropertyAttributes attributes);
 
   // Defines an AccessorInfo property on the given object.
   MUST_USE_RESULT static MaybeHandle<Object> SetAccessor(
@@ -2315,9 +1911,6 @@
   }
 
   // These methods do not perform access checks!
-  MUST_USE_RESULT static MaybeHandle<AccessorPair> GetOwnPropertyAccessorPair(
-      Handle<JSObject> object,
-      Handle<Name> name);
   MUST_USE_RESULT static MaybeHandle<AccessorPair> GetOwnElementAccessorPair(
       Handle<JSObject> object,
       uint32_t index);
@@ -2386,11 +1979,12 @@
       Handle<JSReceiver> receiver);
 
   // Support functions for v8 api (needed for correct interceptor behavior).
-  static bool HasRealNamedProperty(Handle<JSObject> object,
-                                   Handle<Name> key);
-  static bool HasRealElementProperty(Handle<JSObject> object, uint32_t index);
-  static bool HasRealNamedCallbackProperty(Handle<JSObject> object,
-                                           Handle<Name> key);
+  MUST_USE_RESULT static Maybe<bool> HasRealNamedProperty(
+      Handle<JSObject> object, Handle<Name> key);
+  MUST_USE_RESULT static Maybe<bool> HasRealElementProperty(
+      Handle<JSObject> object, uint32_t index);
+  MUST_USE_RESULT static Maybe<bool> HasRealNamedCallbackProperty(
+      Handle<JSObject> object, Handle<Name> key);
 
   // Get the header size for a JSObject.  Used to compute the index of
   // internal fields as well as the number of internal fields.
@@ -2402,12 +1996,6 @@
   inline void SetInternalField(int index, Object* value);
   inline void SetInternalField(int index, Smi* value);
 
-  // The following lookup functions skip interceptors.
-  void LookupOwnRealNamedProperty(Handle<Name> name, LookupResult* result);
-  void LookupRealNamedProperty(Handle<Name> name, LookupResult* result);
-  void LookupRealNamedPropertyInPrototypes(Handle<Name> name,
-                                           LookupResult* result);
-
   // Returns the number of properties on this object filtering out properties
   // with the specified attributes (ignoring interceptors).
   int NumberOfOwnProperties(PropertyAttributes filter = NONE);
@@ -2438,13 +2026,7 @@
   static void TransitionElementsKind(Handle<JSObject> object,
                                      ElementsKind to_kind);
 
-  // TODO(mstarzinger): Both public because of ConvertAndSetOwnProperty().
   static void MigrateToMap(Handle<JSObject> object, Handle<Map> new_map);
-  static void GeneralizeFieldRepresentation(Handle<JSObject> object,
-                                            int modify_index,
-                                            Representation new_representation,
-                                            Handle<HeapType> new_field_type,
-                                            StoreMode store_mode);
 
   // Convert the object to use the canonical dictionary
   // representation. If the object is expected to have additional properties
@@ -2460,8 +2042,8 @@
       Handle<JSObject> object);
 
   // Transform slow named properties to fast variants.
-  static void TransformToFastProperties(Handle<JSObject> object,
-                                        int unused_property_fields);
+  static void MigrateSlowToFast(Handle<JSObject> object,
+                                int unused_property_fields);
 
   // Access fast-case object properties at index.
   static Handle<Object> FastPropertyAt(Handle<JSObject> object,
@@ -2481,9 +2063,7 @@
 
   // Set the object's prototype (only JSReceiver and null are allowed values).
   MUST_USE_RESULT static MaybeHandle<Object> SetPrototype(
-      Handle<JSObject> object,
-      Handle<Object> value,
-      bool skip_hidden_prototypes = false);
+      Handle<JSObject> object, Handle<Object> value, bool from_javascript);
 
   // Initializes the body after properties slot, properties slot is
   // initialized by set_properties.  Fill the pre-allocated fields with
@@ -2508,10 +2088,7 @@
   static void SetObserved(Handle<JSObject> object);
 
   // Copy object.
-  enum DeepCopyHints {
-    kNoHints = 0,
-    kObjectIsShallowArray = 1
-  };
+  enum DeepCopyHints { kNoHints = 0, kObjectIsShallow = 1 };
 
   static Handle<JSObject> Copy(Handle<JSObject> object);
   MUST_USE_RESULT static MaybeHandle<JSObject> DeepCopy(
@@ -2524,18 +2101,18 @@
 
   static Handle<Object> GetDataProperty(Handle<JSObject> object,
                                         Handle<Name> key);
+  static Handle<Object> GetDataProperty(LookupIterator* it);
 
-  // Casting.
-  static inline JSObject* cast(Object* obj);
+  DECLARE_CAST(JSObject)
 
   // Dispatched behavior.
   void JSObjectShortPrint(StringStream* accumulator);
   DECLARE_PRINTER(JSObject)
   DECLARE_VERIFIER(JSObject)
 #ifdef OBJECT_PRINT
-  void PrintProperties(FILE* out = stdout);
-  void PrintElements(FILE* out = stdout);
-  void PrintTransitions(FILE* out = stdout);
+  void PrintProperties(OStream& os);   // NOLINT
+  void PrintElements(OStream& os);     // NOLINT
+  void PrintTransitions(OStream& os);  // NOLINT
 #endif
 
   static void PrintElementsTransition(
@@ -2576,12 +2153,6 @@
 
   Object* SlowReverseLookup(Object* value);
 
-  // Maximal number of fast properties for the JSObject. Used to
-  // restrict the number of map transitions to avoid an explosion in
-  // the number of maps for objects used as dictionaries.
-  inline bool TooManyFastProperties(
-      StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED);
-
   // Maximal number of elements (numbered 0 .. kMaxElementCount - 1).
   // Also maximal value of JSArray's length property.
   static const uint32_t kMaxElementCount = 0xffffffffu;
@@ -2602,15 +2173,13 @@
   static const int kMaxUncheckedOldFastElementsLength = 500;
 
   // Note that Page::kMaxRegularHeapObjectSize puts a limit on
-  // permissible values (see the ASSERT in heap.cc).
+  // permissible values (see the DCHECK in heap.cc).
   static const int kInitialMaxFastElementArray = 100000;
 
   // This constant applies only to the initial map of "$Object" aka
   // "global.Object" and not to arbitrary other JSObject maps.
   static const int kInitialGlobalObjectUnusedPropertiesCount = 4;
 
-  static const int kFastPropertiesSoftLimit = 12;
-  static const int kMaxFastProperties = 128;
   static const int kMaxInstanceSize = 255 * kPointerSize;
   // When extending the backing storage for property values, we increase
   // its size by more than the 1 entry necessary, so sequentially adding fields
@@ -2637,11 +2206,25 @@
                                   Handle<Name> name,
                                   Handle<Object> old_value);
 
+  static void MigrateToNewProperty(Handle<JSObject> object,
+                                   Handle<Map> transition,
+                                   Handle<Object> value);
+
  private:
   friend class DictionaryElementsAccessor;
   friend class JSReceiver;
   friend class Object;
 
+  static void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map);
+  static void MigrateFastToSlow(Handle<JSObject> object,
+                                Handle<Map> new_map,
+                                int expected_additional_properties);
+
+  static void GeneralizeFieldRepresentation(Handle<JSObject> object,
+                                            int modify_index,
+                                            Representation new_representation,
+                                            Handle<HeapType> new_field_type);
+
   static void UpdateAllocationSite(Handle<JSObject> object,
                                    ElementsKind to_kind);
 
@@ -2656,16 +2239,15 @@
       uint32_t index,
       Handle<Object> holder);
 
-  static PropertyAttributes GetElementAttributeWithInterceptor(
-      Handle<JSObject> object,
-      Handle<JSReceiver> receiver,
-      uint32_t index,
-      bool continue_search);
-  static PropertyAttributes GetElementAttributeWithoutInterceptor(
-      Handle<JSObject> object,
-      Handle<JSReceiver> receiver,
-      uint32_t index,
-      bool continue_search);
+  MUST_USE_RESULT static Maybe<PropertyAttributes>
+      GetElementAttributeWithInterceptor(Handle<JSObject> object,
+                                         Handle<JSReceiver> receiver,
+                                         uint32_t index, bool continue_search);
+  MUST_USE_RESULT static Maybe<PropertyAttributes>
+      GetElementAttributeWithoutInterceptor(Handle<JSObject> object,
+                                            Handle<JSReceiver> receiver,
+                                            uint32_t index,
+                                            bool continue_search);
   MUST_USE_RESULT static MaybeHandle<Object> SetElementWithCallback(
       Handle<JSObject> object,
       Handle<Object> structure,
@@ -2711,62 +2293,8 @@
       StrictMode strict_mode,
       bool check_prototype = true);
 
-  // Searches the prototype chain for property 'name'. If it is found and
-  // has a setter, invoke it and set '*done' to true. If it is found and is
-  // read-only, reject and set '*done' to true. Otherwise, set '*done' to
-  // false. Can throw and return an empty handle with '*done==true'.
-  MUST_USE_RESULT static MaybeHandle<Object> SetPropertyViaPrototypes(
-      Handle<JSObject> object,
-      Handle<Name> name,
-      Handle<Object> value,
-      PropertyAttributes attributes,
-      StrictMode strict_mode,
-      bool* done);
-  MUST_USE_RESULT static MaybeHandle<Object> SetPropertyPostInterceptor(
-      Handle<JSObject> object,
-      Handle<Name> name,
-      Handle<Object> value,
-      PropertyAttributes attributes,
-      StrictMode strict_mode);
-  MUST_USE_RESULT static MaybeHandle<Object> SetPropertyUsingTransition(
-      Handle<JSObject> object,
-      LookupResult* lookup,
-      Handle<Name> name,
-      Handle<Object> value,
-      PropertyAttributes attributes);
   MUST_USE_RESULT static MaybeHandle<Object> SetPropertyWithFailedAccessCheck(
-      Handle<JSObject> object,
-      LookupResult* result,
-      Handle<Name> name,
-      Handle<Object> value,
-      bool check_prototype,
-      StrictMode strict_mode);
-
-  // Add a property to an object.
-  MUST_USE_RESULT static MaybeHandle<Object> AddProperty(
-      Handle<JSObject> object,
-      Handle<Name> name,
-      Handle<Object> value,
-      PropertyAttributes attributes,
-      StrictMode strict_mode,
-      StoreFromKeyed store_mode = MAY_BE_STORE_FROM_KEYED,
-      ExtensibilityCheck extensibility_check = PERFORM_EXTENSIBILITY_CHECK,
-      ValueType value_type = OPTIMAL_REPRESENTATION,
-      StoreMode mode = ALLOW_AS_CONSTANT,
-      TransitionFlag flag = INSERT_TRANSITION);
-
-  // Add a property to a fast-case object.
-  static void AddFastProperty(Handle<JSObject> object,
-                              Handle<Name> name,
-                              Handle<Object> value,
-                              PropertyAttributes attributes,
-                              StoreFromKeyed store_mode,
-                              ValueType value_type,
-                              TransitionFlag flag);
-
-  static void MigrateToNewProperty(Handle<JSObject> object,
-                                   Handle<Map> transition,
-                                   Handle<Object> value);
+      LookupIterator* it, Handle<Object> value, StrictMode strict_mode);
 
   // Add a property to a slow-case object.
   static void AddSlowProperty(Handle<JSObject> object,
@@ -2778,12 +2306,8 @@
       Handle<JSObject> object,
       Handle<Name> name,
       DeleteMode mode);
-  static Handle<Object> DeletePropertyPostInterceptor(Handle<JSObject> object,
-                                                      Handle<Name> name,
-                                                      DeleteMode mode);
   MUST_USE_RESULT static MaybeHandle<Object> DeletePropertyWithInterceptor(
-      Handle<JSObject> object,
-      Handle<Name> name);
+      Handle<JSObject> holder, Handle<JSObject> receiver, Handle<Name> name);
 
   // Deletes the named property in a normalized object.
   static Handle<Object> DeleteNormalizedProperty(Handle<JSObject> object,
@@ -2821,25 +2345,7 @@
                                     uint32_t index,
                                     Handle<Object> getter,
                                     Handle<Object> setter,
-                                    PropertyAttributes attributes,
-                                    v8::AccessControl access_control);
-  static Handle<AccessorPair> CreateAccessorPairFor(Handle<JSObject> object,
-                                                    Handle<Name> name);
-  static void DefinePropertyAccessor(Handle<JSObject> object,
-                                     Handle<Name> name,
-                                     Handle<Object> getter,
-                                     Handle<Object> setter,
-                                     PropertyAttributes attributes,
-                                     v8::AccessControl access_control);
-
-  // Try to define a single accessor paying attention to map transitions.
-  // Returns false if this was not possible and we have to use the slow case.
-  static bool DefineFastAccessor(Handle<JSObject> object,
-                                 Handle<Name> name,
-                                 AccessorComponent component,
-                                 Handle<Object> accessor,
-                                 PropertyAttributes attributes);
-
+                                    PropertyAttributes attributes);
 
   // Return the hash table backing store or the inline stored identity hash,
   // whatever is found.
@@ -2869,14 +2375,14 @@
 class FixedArrayBase: public HeapObject {
  public:
   // [length]: length of the array.
-  inline int length();
+  inline int length() const;
   inline void set_length(int value);
 
   // Get and set the length using acquire loads and release stores.
-  inline int synchronized_length();
+  inline int synchronized_length() const;
   inline void synchronized_set_length(int value);
 
-  inline static FixedArrayBase* cast(Object* object);
+  DECLARE_CAST(FixedArrayBase)
 
   // Layout description.
   // Length is smi tagged when it is stored.
@@ -2950,8 +2456,7 @@
     return HeapObject::RawField(this, OffsetOfElementAt(index));
   }
 
-  // Casting.
-  static inline FixedArray* cast(Object* obj);
+  DECLARE_CAST(FixedArray)
 
   // Maximal allowed size, in bytes, of a single FixedArray.
   // Prevents overflowing size computations, as well as extreme memory
@@ -3036,8 +2541,7 @@
   inline static double hole_nan_as_double();
   inline static double canonical_not_the_hole_nan_as_double();
 
-  // Casting.
-  static inline FixedDoubleArray* cast(Object* obj);
+  DECLARE_CAST(FixedDoubleArray)
 
   // Maximal allowed size, in bytes, of a single FixedDoubleArray.
   // Prevents overflowing size computations, as well as extreme memory
@@ -3111,11 +2615,18 @@
 
   enum LayoutSection {
     SMALL_SECTION = 0,
-    EXTENDED_SECTION
+    EXTENDED_SECTION,
+    NUMBER_OF_LAYOUT_SECTIONS
   };
 
   class NumberOfEntries BASE_EMBEDDED {
    public:
+    inline NumberOfEntries() {
+      for (int i = 0; i < NUMBER_OF_TYPES; i++) {
+        element_counts_[i] = 0;
+      }
+    }
+
     inline NumberOfEntries(int int64_count, int code_ptr_count,
                            int heap_ptr_count, int int32_count) {
       element_counts_[INT64] = int64_count;
@@ -3131,27 +2642,13 @@
       element_counts_[INT32] = array->number_of_entries(INT32, section);
     }
 
-    inline int count_of(Type type) const {
-      ASSERT(type < NUMBER_OF_TYPES);
-      return element_counts_[type];
-    }
-
-    inline int total_count() const {
-      int count = 0;
-      for (int i = 0; i < NUMBER_OF_TYPES; i++) {
-        count += element_counts_[i];
-      }
-      return count;
-    }
-
-    inline int are_in_range(int min, int max) const {
-      for (int i = FIRST_TYPE; i < NUMBER_OF_TYPES; i++) {
-        if (element_counts_[i] < min || element_counts_[i] > max) {
-          return false;
-        }
-      }
-      return true;
-    }
+    inline void increment(Type type);
+    inline int equals(const NumberOfEntries& other) const;
+    inline bool is_empty() const;
+    inline int count_of(Type type) const;
+    inline int base_of(Type type) const;
+    inline int total_count() const;
+    inline int are_in_range(int min, int max) const;
 
    private:
     int element_counts_[NUMBER_OF_TYPES];
@@ -3160,14 +2657,26 @@
   class Iterator BASE_EMBEDDED {
    public:
     inline Iterator(ConstantPoolArray* array, Type type)
-        : array_(array), type_(type), final_section_(array->final_section()) {
-      current_section_ = SMALL_SECTION;
-      next_index_ = array->first_index(type, SMALL_SECTION);
+        : array_(array),
+          type_(type),
+          final_section_(array->final_section()),
+          current_section_(SMALL_SECTION),
+          next_index_(array->first_index(type, SMALL_SECTION)) {
+      update_section();
+    }
+
+    inline Iterator(ConstantPoolArray* array, Type type, LayoutSection section)
+        : array_(array),
+          type_(type),
+          final_section_(section),
+          current_section_(section),
+          next_index_(array->first_index(type, section)) {
       update_section();
     }
 
     inline int next_index();
     inline bool is_finished();
+
    private:
     inline void update_section();
     ConstantPoolArray* array_;
@@ -3186,6 +2695,7 @@
 
   // Returns the type of the entry at the given index.
   inline Type get_type(int index);
+  inline bool offset_is_type(int offset, Type type);
 
   // Setter and getter for pool elements.
   inline Address get_code_ptr_entry(int index);
@@ -3200,6 +2710,13 @@
   inline void set(int index, double value);
   inline void set(int index, int32_t value);
 
+  // Setters which take a raw offset rather than an index (for code generation).
+  inline void set_at_offset(int offset, int32_t value);
+  inline void set_at_offset(int offset, int64_t value);
+  inline void set_at_offset(int offset, double value);
+  inline void set_at_offset(int offset, Address value);
+  inline void set_at_offset(int offset, Object* value);
+
   // Setter and getter for weak objects state
   inline void set_weak_object_state(WeakObjectState state);
   inline WeakObjectState get_weak_object_state();
@@ -3227,6 +2744,11 @@
   // Garbage collection support.
   inline int size();
 
+
+  inline static int MaxInt64Offset(int number_of_int64) {
+    return kFirstEntryOffset + (number_of_int64 * kInt64Size);
+  }
+
   inline static int SizeFor(const NumberOfEntries& small) {
     int size = kFirstEntryOffset +
         (small.count_of(INT64)  * kInt64Size) +
@@ -3276,7 +2798,7 @@
     }
 
     // Add offsets for the preceding type sections.
-    ASSERT(index <= last_index(LAST_TYPE, section));
+    DCHECK(index <= last_index(LAST_TYPE, section));
     for (Type type = FIRST_TYPE; index > last_index(type, section);
          type = next_type(type)) {
       offset += entry_size(type) * number_of_entries(type, section);
@@ -3288,8 +2810,7 @@
     return offset;
   }
 
-  // Casting.
-  static inline ConstantPoolArray* cast(Object* obj);
+  DECLARE_CAST(ConstantPoolArray)
 
   // Garbage collection support.
   Object** RawFieldOfElementAt(int index) {
@@ -3339,7 +2860,7 @@
   inline int get_extended_section_header_offset();
 
   inline static Type next_type(Type type) {
-    ASSERT(type >= FIRST_TYPE && type < NUMBER_OF_TYPES);
+    DCHECK(type >= FIRST_TYPE && type < NUMBER_OF_TYPES);
     int type_int = static_cast<int>(type);
     return static_cast<Type>(++type_int);
   }
@@ -3365,7 +2886,7 @@
 
   // Returns the number of descriptors in the array.
   int number_of_descriptors() {
-    ASSERT(length() >= kFirstIndex || IsEmpty());
+    DCHECK(length() >= kFirstIndex || IsEmpty());
     int len = length();
     return len == 0 ? 0 : Smi::cast(get(kDescriptorLengthIndex))->value();
   }
@@ -3391,7 +2912,7 @@
   }
 
   FixedArray* GetEnumCache() {
-    ASSERT(HasEnumCache());
+    DCHECK(HasEnumCache());
     FixedArray* bridge = FixedArray::cast(get(kEnumCacheIndex));
     return FixedArray::cast(bridge->get(kEnumCacheBridgeCacheIndex));
   }
@@ -3405,13 +2926,13 @@
   }
 
   FixedArray* GetEnumIndicesCache() {
-    ASSERT(HasEnumIndicesCache());
+    DCHECK(HasEnumIndicesCache());
     FixedArray* bridge = FixedArray::cast(get(kEnumCacheIndex));
     return FixedArray::cast(bridge->get(kEnumCacheBridgeIndicesCacheIndex));
   }
 
   Object** GetEnumCacheSlot() {
-    ASSERT(HasEnumCache());
+    DCHECK(HasEnumCache());
     return HeapObject::RawField(reinterpret_cast<HeapObject*>(this),
                                 kEnumCacheOffset);
   }
@@ -3424,12 +2945,15 @@
                     FixedArray* new_cache,
                     Object* new_index_cache);
 
+  bool CanHoldValue(int descriptor, Object* value);
+
   // Accessors for fetching instance descriptor at descriptor number.
   inline Name* GetKey(int descriptor_number);
   inline Object** GetKeySlot(int descriptor_number);
   inline Object* GetValue(int descriptor_number);
   inline void SetValue(int descriptor_number, Object* value);
   inline Object** GetValueSlot(int descriptor_number);
+  static inline int GetValueOffset(int descriptor_number);
   inline Object** GetDescriptorStartSlot(int descriptor_number);
   inline Object** GetDescriptorEndSlot(int descriptor_number);
   inline PropertyDetails GetDetails(int descriptor_number);
@@ -3482,8 +3006,7 @@
                                           int number_of_descriptors,
                                           int slack = 0);
 
-  // Casting.
-  static inline DescriptorArray* cast(Object* obj);
+  DECLARE_CAST(DescriptorArray)
 
   // Constant for denoting key was not found.
   static const int kNotFound = -1;
@@ -3513,7 +3036,7 @@
 
 #ifdef OBJECT_PRINT
   // Print all the descriptors.
-  void PrintDescriptors(FILE* out = stdout);
+  void PrintDescriptors(OStream& os);  // NOLINT
 #endif
 
 #ifdef DEBUG
@@ -3594,8 +3117,6 @@
                   Descriptor* desc,
                   const WhitenessWitness&);
 
-  inline void Append(Descriptor* desc, const WhitenessWitness&);
-
   // Swap first and second descriptor.
   inline void SwapSortedKeys(int first, int second);
 
@@ -3652,12 +3173,12 @@
   static const bool UsesSeed = false;
   static uint32_t Hash(Key key) { return 0; }
   static uint32_t SeededHash(Key key, uint32_t seed) {
-    ASSERT(UsesSeed);
+    DCHECK(UsesSeed);
     return Hash(key);
   }
   static uint32_t HashForObject(Key key, Object* object) { return 0; }
   static uint32_t SeededHashForObject(Key key, uint32_t seed, Object* object) {
-    ASSERT(UsesSeed);
+    DCHECK(UsesSeed);
     return HashForObject(key, object);
   }
 };
@@ -3736,8 +3257,7 @@
   void IteratePrefix(ObjectVisitor* visitor);
   void IterateElements(ObjectVisitor* visitor);
 
-  // Casting.
-  static inline HashTable* cast(Object* obj);
+  DECLARE_CAST(HashTable)
 
   // Compute the probe offset (quadratic probing).
   INLINE(static uint32_t GetProbeOffset(uint32_t n)) {
@@ -3799,15 +3319,15 @@
     // To scale a computed hash code to fit within the hash table, we
     // use bit-wise AND with a mask, so the capacity must be positive
     // and non-zero.
-    ASSERT(capacity > 0);
-    ASSERT(capacity <= kMaxCapacity);
+    DCHECK(capacity > 0);
+    DCHECK(capacity <= kMaxCapacity);
     set(kCapacityIndex, Smi::FromInt(capacity));
   }
 
 
   // Returns probe entry.
   static uint32_t GetProbe(uint32_t hash, uint32_t number, uint32_t size) {
-    ASSERT(IsPowerOf2(size));
+    DCHECK(base::bits::IsPowerOfTwo32(size));
     return (hash + GetProbeOffset(number)) & (size - 1);
   }
 
@@ -3910,11 +3430,11 @@
       uint16_t c1,
       uint16_t c2);
 
-  // Casting.
-  static inline StringTable* cast(Object* obj);
+  DECLARE_CAST(StringTable)
 
  private:
-  template <bool seq_ascii> friend class JsonParser;
+  template <bool seq_one_byte>
+  friend class JsonParser;
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(StringTable);
 };
@@ -3951,7 +3471,7 @@
   Object* Lookup(FixedArray* key);
   static Handle<MapCache> Put(
       Handle<MapCache> map_cache, Handle<FixedArray> key, Handle<Map> value);
-  static inline MapCache* cast(Object* obj);
+  DECLARE_CAST(MapCache)
 
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(MapCache);
@@ -3964,10 +3484,6 @@
   typedef HashTable<Derived, Shape, Key> DerivedHashTable;
 
  public:
-  static inline Dictionary* cast(Object* obj) {
-    return reinterpret_cast<Dictionary*>(obj);
-  }
-
   // Returns the value at entry.
   Object* ValueAt(int entry) {
     return this->get(DerivedHashTable::EntryToIndex(entry) + 1);
@@ -3980,7 +3496,7 @@
 
   // Returns the property details for the property at entry.
   PropertyDetails DetailsAt(int entry) {
-    ASSERT(entry >= 0);  // Not found is -1, which is not caught by get().
+    DCHECK(entry >= 0);  // Not found is -1, which is not caught by get().
     return PropertyDetails(
         Smi::cast(this->get(DerivedHashTable::EntryToIndex(entry) + 2)));
   }
@@ -4026,7 +3542,7 @@
 
   // Accessors for next enumeration index.
   void SetNextEnumerationIndex(int index) {
-    ASSERT(index != 0);
+    DCHECK(index != 0);
     this->set(kNextEnumerationIndexIndex, Smi::FromInt(index));
   }
 
@@ -4044,7 +3560,7 @@
   static Handle<Derived> EnsureCapacity(Handle<Derived> obj, int n, Key key);
 
 #ifdef OBJECT_PRINT
-  void Print(FILE* out = stdout);
+  void Print(OStream& os);  // NOLINT
 #endif
   // Returns the key (slow).
   Object* SlowReverseLookup(Object* value);
@@ -4105,10 +3621,7 @@
       NameDictionary, NameDictionaryShape, Handle<Name> > DerivedDictionary;
 
  public:
-  static inline NameDictionary* cast(Object* obj) {
-    ASSERT(obj->IsDictionary());
-    return reinterpret_cast<NameDictionary*>(obj);
-  }
+  DECLARE_CAST(NameDictionary)
 
   // Copies enumerable keys to preallocated fixed array.
   void CopyEnumKeysTo(FixedArray* storage);
@@ -4156,10 +3669,7 @@
                         SeededNumberDictionaryShape,
                         uint32_t> {
  public:
-  static SeededNumberDictionary* cast(Object* obj) {
-    ASSERT(obj->IsDictionary());
-    return reinterpret_cast<SeededNumberDictionary*>(obj);
-  }
+  DECLARE_CAST(SeededNumberDictionary)
 
   // Type specific at put (default NONE attributes is used when adding).
   MUST_USE_RESULT static Handle<SeededNumberDictionary> AtNumberPut(
@@ -4207,10 +3717,7 @@
                         UnseededNumberDictionaryShape,
                         uint32_t> {
  public:
-  static UnseededNumberDictionary* cast(Object* obj) {
-    ASSERT(obj->IsDictionary());
-    return reinterpret_cast<UnseededNumberDictionary*>(obj);
-  }
+  DECLARE_CAST(UnseededNumberDictionary)
 
   // Type specific at put (default NONE attributes is used when adding).
   MUST_USE_RESULT static Handle<UnseededNumberDictionary> AtNumberPut(
@@ -4250,10 +3757,7 @@
   typedef HashTable<
       ObjectHashTable, ObjectHashTableShape, Handle<Object> > DerivedHashTable;
  public:
-  static inline ObjectHashTable* cast(Object* obj) {
-    ASSERT(obj->IsHashTable());
-    return reinterpret_cast<ObjectHashTable*>(obj);
-  }
+  DECLARE_CAST(ObjectHashTable)
 
   // Attempt to shrink hash table after removal of key.
   MUST_USE_RESULT static inline Handle<ObjectHashTable> Shrink(
@@ -4348,6 +3852,9 @@
       bool* was_present);
 
   // Returns kNotFound if the key isn't present.
+  int FindEntry(Handle<Object> key, int hash);
+
+  // Like the above, but doesn't require the caller to provide a hash.
   int FindEntry(Handle<Object> key);
 
   int NumberOfElements() {
@@ -4462,10 +3969,7 @@
 class OrderedHashSet: public OrderedHashTable<
     OrderedHashSet, JSSetIterator, 1> {
  public:
-  static OrderedHashSet* cast(Object* obj) {
-    ASSERT(obj->IsOrderedHashTable());
-    return reinterpret_cast<OrderedHashSet*>(obj);
-  }
+  DECLARE_CAST(OrderedHashSet)
 
   bool Contains(Handle<Object> key);
   static Handle<OrderedHashSet> Add(
@@ -4479,10 +3983,7 @@
 class OrderedHashMap:public OrderedHashTable<
     OrderedHashMap, JSMapIterator, 2> {
  public:
-  static OrderedHashMap* cast(Object* obj) {
-    ASSERT(obj->IsOrderedHashTable());
-    return reinterpret_cast<OrderedHashMap*>(obj);
-  }
+  DECLARE_CAST(OrderedHashMap)
 
   Object* Lookup(Handle<Object> key);
   static Handle<OrderedHashMap> Put(
@@ -4490,11 +3991,11 @@
       Handle<Object> key,
       Handle<Object> value);
 
- private:
   Object* ValueAt(int entry) {
     return get(EntryToIndex(entry) + kValueOffset);
   }
 
+ private:
   static const int kValueOffset = 1;
 };
 
@@ -4520,10 +4021,7 @@
   typedef HashTable<
       WeakHashTable, WeakHashTableShape<2>, Handle<Object> > DerivedHashTable;
  public:
-  static inline WeakHashTable* cast(Object* obj) {
-    ASSERT(obj->IsHashTable());
-    return reinterpret_cast<WeakHashTable*>(obj);
-  }
+  DECLARE_CAST(WeakHashTable)
 
   // Looks up the value associated with the given key. The hole value is
   // returned in case the key is not present.
@@ -4585,8 +4083,7 @@
   inline int finger_index();
   inline void set_finger_index(int finger_index);
 
-  // Casting
-  static inline JSFunctionResultCache* cast(Object* obj);
+  DECLARE_CAST(JSFunctionResultCache)
 
   DECLARE_VERIFIER(JSFunctionResultCache)
 };
@@ -4601,7 +4098,7 @@
 // routines.
 class ScopeInfo : public FixedArray {
  public:
-  static inline ScopeInfo* cast(Object* object);
+  DECLARE_CAST(ScopeInfo)
 
   // Return the type of this scope.
   ScopeType scope_type();
@@ -4643,6 +4140,12 @@
   // Return if contexts are allocated for this scope.
   bool HasContext();
 
+  // Return if this is a function scope with "use asm".
+  bool IsAsmModule() { return AsmModuleField::decode(Flags()); }
+
+  // Return if this is a nested function within an asm module scope.
+  bool IsAsmFunction() { return AsmFunctionField::decode(Flags()); }
+
   // Return the function_name if present.
   String* FunctionName();
 
@@ -4664,6 +4167,9 @@
   // Return the initialization flag of the given context local.
   InitializationFlag ContextLocalInitFlag(int var);
 
+  // Return the initialization flag of the given context local.
+  MaybeAssignedFlag ContextLocalMaybeAssignedFlag(int var);
+
   // Return true if this local was introduced by the compiler, and should not be
   // exposed to the user in a debugger.
   bool LocalIsSynthetic(int var);
@@ -4679,10 +4185,9 @@
   // returns a value < 0. The name must be an internalized string.
   // If the slot is present and mode != NULL, sets *mode to the corresponding
   // mode for that variable.
-  static int ContextSlotIndex(Handle<ScopeInfo> scope_info,
-                              Handle<String> name,
-                              VariableMode* mode,
-                              InitializationFlag* init_flag);
+  static int ContextSlotIndex(Handle<ScopeInfo> scope_info, Handle<String> name,
+                              VariableMode* mode, InitializationFlag* init_flag,
+                              MaybeAssignedFlag* maybe_assigned_flag);
 
   // Lookup support for serialized scope info. Returns the
   // parameter index for a given parameter name if the parameter is present;
@@ -4795,11 +4300,15 @@
   class StrictModeField:       public BitField<StrictMode,           4, 1> {};
   class FunctionVariableField: public BitField<FunctionVariableInfo, 5, 2> {};
   class FunctionVariableMode:  public BitField<VariableMode,         7, 3> {};
+  class AsmModuleField : public BitField<bool, 10, 1> {};
+  class AsmFunctionField : public BitField<bool, 11, 1> {};
 
   // BitFields representing the encoded information for context locals in the
   // ContextLocalInfoEntries part.
   class ContextLocalMode:      public BitField<VariableMode,         0, 3> {};
   class ContextLocalInitFlag:  public BitField<InitializationFlag,   3, 1> {};
+  class ContextLocalMaybeAssignedFlag
+      : public BitField<MaybeAssignedFlag, 4, 1> {};
 };
 
 
@@ -4816,9 +4325,9 @@
 
   void Clear();
 
-  // Casting
-  static inline NormalizedMapCache* cast(Object* obj);
-  static inline bool IsNormalizedMapCache(Object* obj);
+  DECLARE_CAST(NormalizedMapCache)
+
+  static inline bool IsNormalizedMapCache(const Object* obj);
 
   DECLARE_VERIFIER(NormalizedMapCache)
  private:
@@ -4853,8 +4362,8 @@
   // array, this function returns the number of elements a byte array should
   // have.
   static int LengthFor(int size_in_bytes) {
-    ASSERT(IsAligned(size_in_bytes, kPointerSize));
-    ASSERT(size_in_bytes >= kHeaderSize);
+    DCHECK(IsAligned(size_in_bytes, kPointerSize));
+    DCHECK(size_in_bytes >= kHeaderSize);
     return size_in_bytes - kHeaderSize;
   }
 
@@ -4864,8 +4373,7 @@
   // Returns a pointer to the ByteArray object for a given data start address.
   static inline ByteArray* FromDataStartAddress(Address address);
 
-  // Casting.
-  static inline ByteArray* cast(Object* obj);
+  DECLARE_CAST(ByteArray)
 
   // Dispatched behavior.
   inline int ByteArraySize() {
@@ -4892,16 +4400,15 @@
 class FreeSpace: public HeapObject {
  public:
   // [size]: size of the free space including the header.
-  inline int size();
+  inline int size() const;
   inline void set_size(int value);
 
-  inline int nobarrier_size();
+  inline int nobarrier_size() const;
   inline void nobarrier_set_size(int value);
 
   inline int Size() { return size(); }
 
-  // Casting.
-  static inline FreeSpace* cast(Object* obj);
+  DECLARE_CAST(FreeSpace)
 
   // Dispatched behavior.
   DECLARE_PRINTER(FreeSpace)
@@ -4952,8 +4459,7 @@
   // external array.
   DECL_ACCESSORS(external_pointer, void)  // Pointer to the data store.
 
-  // Casting.
-  static inline ExternalArray* cast(Object* obj);
+  DECLARE_CAST(ExternalArray)
 
   // Maximal acceptable length for an external array.
   static const int kMaxLength = 0x3fffffff;
@@ -4993,8 +4499,7 @@
                                  uint32_t index,
                                  Handle<Object> value);
 
-  // Casting.
-  static inline ExternalUint8ClampedArray* cast(Object* obj);
+  DECLARE_CAST(ExternalUint8ClampedArray)
 
   // Dispatched behavior.
   DECLARE_PRINTER(ExternalUint8ClampedArray)
@@ -5018,8 +4523,7 @@
                                  uint32_t index,
                                  Handle<Object> value);
 
-  // Casting.
-  static inline ExternalInt8Array* cast(Object* obj);
+  DECLARE_CAST(ExternalInt8Array)
 
   // Dispatched behavior.
   DECLARE_PRINTER(ExternalInt8Array)
@@ -5043,8 +4547,7 @@
                                  uint32_t index,
                                  Handle<Object> value);
 
-  // Casting.
-  static inline ExternalUint8Array* cast(Object* obj);
+  DECLARE_CAST(ExternalUint8Array)
 
   // Dispatched behavior.
   DECLARE_PRINTER(ExternalUint8Array)
@@ -5068,8 +4571,7 @@
                                  uint32_t index,
                                  Handle<Object> value);
 
-  // Casting.
-  static inline ExternalInt16Array* cast(Object* obj);
+  DECLARE_CAST(ExternalInt16Array)
 
   // Dispatched behavior.
   DECLARE_PRINTER(ExternalInt16Array)
@@ -5094,8 +4596,7 @@
                                  uint32_t index,
                                  Handle<Object> value);
 
-  // Casting.
-  static inline ExternalUint16Array* cast(Object* obj);
+  DECLARE_CAST(ExternalUint16Array)
 
   // Dispatched behavior.
   DECLARE_PRINTER(ExternalUint16Array)
@@ -5119,8 +4620,7 @@
                                  uint32_t index,
                                  Handle<Object> value);
 
-  // Casting.
-  static inline ExternalInt32Array* cast(Object* obj);
+  DECLARE_CAST(ExternalInt32Array)
 
   // Dispatched behavior.
   DECLARE_PRINTER(ExternalInt32Array)
@@ -5145,8 +4645,7 @@
                                  uint32_t index,
                                  Handle<Object> value);
 
-  // Casting.
-  static inline ExternalUint32Array* cast(Object* obj);
+  DECLARE_CAST(ExternalUint32Array)
 
   // Dispatched behavior.
   DECLARE_PRINTER(ExternalUint32Array)
@@ -5171,8 +4670,7 @@
                                  uint32_t index,
                                  Handle<Object> value);
 
-  // Casting.
-  static inline ExternalFloat32Array* cast(Object* obj);
+  DECLARE_CAST(ExternalFloat32Array)
 
   // Dispatched behavior.
   DECLARE_PRINTER(ExternalFloat32Array)
@@ -5197,8 +4695,7 @@
                                  uint32_t index,
                                  Handle<Object> value);
 
-  // Casting.
-  static inline ExternalFloat64Array* cast(Object* obj);
+  DECLARE_CAST(ExternalFloat64Array)
 
   // Dispatched behavior.
   DECLARE_PRINTER(ExternalFloat64Array)
@@ -5211,8 +4708,7 @@
 
 class FixedTypedArrayBase: public FixedArrayBase {
  public:
-  // Casting:
-  static inline FixedTypedArrayBase* cast(Object* obj);
+  DECLARE_CAST(FixedTypedArrayBase)
 
   static const int kDataOffset = kHeaderSize;
 
@@ -5238,8 +4734,7 @@
   typedef typename Traits::ElementType ElementType;
   static const InstanceType kInstanceType = Traits::kInstanceType;
 
-  // Casting:
-  static inline FixedTypedArray<Traits>* cast(Object* obj);
+  DECLARE_CAST(FixedTypedArray<Traits>)
 
   static inline int ElementOffset(int index) {
     return kDataOffset + index * sizeof(ElementType);
@@ -5332,12 +4827,12 @@
 #undef DEFINE_ELEMENT_ACCESSORS
 
   // Accessors for elements of the ith deoptimization entry.
-#define DEFINE_ENTRY_ACCESSORS(name, type)                       \
-  type* name(int i) {                                            \
-    return type::cast(get(IndexForEntry(i) + k##name##Offset));  \
-  }                                                              \
-  void Set##name(int i, type* value) {                           \
-    set(IndexForEntry(i) + k##name##Offset, value);              \
+#define DEFINE_ENTRY_ACCESSORS(name, type)                      \
+  type* name(int i) {                                           \
+    return type::cast(get(IndexForEntry(i) + k##name##Offset)); \
+  }                                                             \
+  void Set##name(int i, type* value) {                          \
+    set(IndexForEntry(i) + k##name##Offset, value);             \
   }
 
   DEFINE_ENTRY_ACCESSORS(AstIdRaw, Smi)
@@ -5345,7 +4840,7 @@
   DEFINE_ENTRY_ACCESSORS(ArgumentsStackHeight, Smi)
   DEFINE_ENTRY_ACCESSORS(Pc, Smi)
 
-#undef DEFINE_ENTRY_ACCESSORS
+#undef DEFINE_DEOPT_ENTRY_ACCESSORS
 
   BailoutId AstId(int i) {
     return BailoutId(AstIdRaw(i)->value());
@@ -5364,11 +4859,10 @@
                                              int deopt_entry_count,
                                              PretenureFlag pretenure);
 
-  // Casting.
-  static inline DeoptimizationInputData* cast(Object* obj);
+  DECLARE_CAST(DeoptimizationInputData)
 
 #ifdef ENABLE_DISASSEMBLER
-  void DeoptimizationInputDataPrint(FILE* out);
+  void DeoptimizationInputDataPrint(OStream& os);  // NOLINT
 #endif
 
  private:
@@ -5376,9 +4870,8 @@
     return kFirstDeoptEntryIndex + (i * kDeoptEntrySize);
   }
 
-  static int LengthFor(int entry_count) {
-    return IndexForEntry(entry_count);
-  }
+
+  static int LengthFor(int entry_count) { return IndexForEntry(entry_count); }
 };
 
 
@@ -5411,11 +4904,10 @@
                                               int number_of_deopt_points,
                                               PretenureFlag pretenure);
 
-  // Casting.
-  static inline DeoptimizationOutputData* cast(Object* obj);
+  DECLARE_CAST(DeoptimizationOutputData)
 
 #if defined(OBJECT_PRINT) || defined(ENABLE_DISASSEMBLER)
-  void DeoptimizationOutputDataPrint(FILE* out);
+  void DeoptimizationOutputDataPrint(OStream& os);  // NOLINT
 #endif
 };
 
@@ -5481,12 +4973,13 @@
   // Printing
   static const char* ICState2String(InlineCacheState state);
   static const char* StubType2String(StubType type);
-  static void PrintExtraICState(FILE* out, Kind kind, ExtraICState extra);
-  void Disassemble(const char* name, FILE* out = stdout);
+  static void PrintExtraICState(OStream& os,  // NOLINT
+                                Kind kind, ExtraICState extra);
+  void Disassemble(const char* name, OStream& os);  // NOLINT
 #endif  // ENABLE_DISASSEMBLER
 
   // [instruction_size]: Size of the native instructions
-  inline int instruction_size();
+  inline int instruction_size() const;
   inline void set_instruction_size(int value);
 
   // [relocation_info]: Code relocation information
@@ -5503,13 +4996,13 @@
   // [raw_type_feedback_info]: This field stores various things, depending on
   // the kind of the code object.
   //   FUNCTION           => type feedback information.
-  //   STUB               => various things, e.g. a SMI
+  //   STUB and ICs       => major/minor key as Smi.
   DECL_ACCESSORS(raw_type_feedback_info, Object)
   inline Object* type_feedback_info();
   inline void set_type_feedback_info(
       Object* value, WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
-  inline int stub_info();
-  inline void set_stub_info(int info);
+  inline uint32_t stub_key();
+  inline void set_stub_key(uint32_t key);
 
   // [next_code_link]: Link for lists of optimized or deoptimized code.
   // Note that storage for this field is overlapped with typefeedback_info.
@@ -5523,11 +5016,11 @@
   // [ic_age]: Inline caching age: the value of the Heap::global_ic_age
   // at the moment when this object was created.
   inline void set_ic_age(int count);
-  inline int ic_age();
+  inline int ic_age() const;
 
   // [prologue_offset]: Offset of the function prologue, used for aging
   // FUNCTIONs and OPTIMIZED_FUNCTIONs.
-  inline int prologue_offset();
+  inline int prologue_offset() const;
   inline void set_prologue_offset(int offset);
 
   // Unchecked accessors to be used during GC.
@@ -5573,19 +5066,23 @@
            ic_state() == MONOMORPHIC;
   }
 
+  inline bool IsCodeStubOrIC();
+
   inline void set_raw_kind_specific_flags1(int value);
   inline void set_raw_kind_specific_flags2(int value);
 
-  // [major_key]: For kind STUB or BINARY_OP_IC, the major key.
-  inline int major_key();
-  inline void set_major_key(int value);
-  inline bool has_major_key();
-
-  // For kind STUB or ICs, tells whether or not a code object was generated by
-  // the optimizing compiler (but it may not be an optimized function).
-  bool is_crankshafted();
+  // [is_crankshafted]: For kind STUB or ICs, tells whether or not a code
+  // object was generated by either the hydrogen or the TurboFan optimizing
+  // compiler (but it may not be an optimized function).
+  inline bool is_crankshafted();
+  inline bool is_hydrogen_stub();  // Crankshafted, but not a function.
   inline void set_is_crankshafted(bool value);
 
+  // [is_turbofanned]: For kind STUB or OPTIMIZED_FUNCTION, tells whether the
+  // code object was generated by the TurboFan optimizing compiler.
+  inline bool is_turbofanned();
+  inline void set_is_turbofanned(bool value);
+
   // [optimizable]: For FUNCTION kind, tells if it is optimizable.
   inline bool optimizable();
   inline void set_optimizable(bool value);
@@ -5617,12 +5114,16 @@
   inline int profiler_ticks();
   inline void set_profiler_ticks(int ticks);
 
+  // [builtin_index]: For BUILTIN kind, tells which builtin index it has.
+  inline int builtin_index();
+  inline void set_builtin_index(int id);
+
   // [stack_slots]: For kind OPTIMIZED_FUNCTION, the number of stack slots
   // reserved in the code prologue.
   inline unsigned stack_slots();
   inline void set_stack_slots(unsigned slots);
 
-  // [safepoint_table_start]: For kind OPTIMIZED_CODE, the offset in
+  // [safepoint_table_start]: For kind OPTIMIZED_FUNCTION, the offset in
   // the instruction stream where the safepoint table starts.
   inline unsigned safepoint_table_offset();
   inline void set_safepoint_table_offset(unsigned offset);
@@ -5633,7 +5134,6 @@
   inline void set_back_edge_table_offset(unsigned offset);
 
   inline bool back_edges_patched_for_osr();
-  inline void set_back_edges_patched_for_osr(bool value);
 
   // [to_boolean_foo]: For kind TO_BOOLEAN_IC tells what state the stub is in.
   inline byte to_boolean_state();
@@ -5673,6 +5173,9 @@
   // enough handlers can be found.
   bool FindHandlers(CodeHandleList* code_list, int length = -1);
 
+  // Find the handler for |map|.
+  MaybeHandle<Code> FindHandlerForMap(Map* map);
+
   // Find the first name in an IC stub.
   Name* FindFirstName();
 
@@ -5694,30 +5197,26 @@
 
   // Flags operations.
   static inline Flags ComputeFlags(
-      Kind kind,
-      InlineCacheState ic_state = UNINITIALIZED,
-      ExtraICState extra_ic_state = kNoExtraICState,
-      StubType type = NORMAL,
-      InlineCacheHolderFlag holder = OWN_MAP);
+      Kind kind, InlineCacheState ic_state = UNINITIALIZED,
+      ExtraICState extra_ic_state = kNoExtraICState, StubType type = NORMAL,
+      CacheHolderFlag holder = kCacheOnReceiver);
 
   static inline Flags ComputeMonomorphicFlags(
-      Kind kind,
-      ExtraICState extra_ic_state = kNoExtraICState,
-      InlineCacheHolderFlag holder = OWN_MAP,
-      StubType type = NORMAL);
+      Kind kind, ExtraICState extra_ic_state = kNoExtraICState,
+      CacheHolderFlag holder = kCacheOnReceiver, StubType type = NORMAL);
 
   static inline Flags ComputeHandlerFlags(
-      Kind handler_kind,
-      StubType type = NORMAL,
-      InlineCacheHolderFlag holder = OWN_MAP);
+      Kind handler_kind, StubType type = NORMAL,
+      CacheHolderFlag holder = kCacheOnReceiver);
 
   static inline InlineCacheState ExtractICStateFromFlags(Flags flags);
   static inline StubType ExtractTypeFromFlags(Flags flags);
+  static inline CacheHolderFlag ExtractCacheHolderFromFlags(Flags flags);
   static inline Kind ExtractKindFromFlags(Flags flags);
-  static inline InlineCacheHolderFlag ExtractCacheHolderFromFlags(Flags flags);
   static inline ExtraICState ExtractExtraICStateFromFlags(Flags flags);
 
   static inline Flags RemoveTypeFromFlags(Flags flags);
+  static inline Flags RemoveTypeAndHolderFromFlags(Flags flags);
 
   // Convert a target address into a code object.
   static inline Code* GetCodeFromTargetAddress(Address address);
@@ -5752,7 +5251,7 @@
 
   // Returns the object size for a given body (used for allocation).
   static int SizeFor(int body_size) {
-    ASSERT_SIZE_TAG_ALIGNED(body_size);
+    DCHECK_SIZE_TAG_ALIGNED(body_size);
     return RoundUp(kHeaderSize + body_size, kCodeAlignment);
   }
 
@@ -5760,7 +5259,7 @@
   // the layout of the code object into account.
   int ExecutableSize() {
     // Check that the assumptions about the layout of the code object holds.
-    ASSERT_EQ(static_cast<int>(instruction_start() - address()),
+    DCHECK_EQ(static_cast<int>(instruction_start() - address()),
               Code::kHeaderSize);
     return instruction_size() + Code::kHeaderSize;
   }
@@ -5769,8 +5268,7 @@
   int SourcePosition(Address pc);
   int SourceStatementPosition(Address pc);
 
-  // Casting.
-  static inline Code* cast(Object* obj);
+  DECLARE_CAST(Code)
 
   // Dispatched behavior.
   int CodeSize() { return SizeFor(body_size()); }
@@ -5832,7 +5330,8 @@
   }
 
   inline bool IsWeakObject(Object* object) {
-    return (is_optimized_code() && IsWeakObjectInOptimizedCode(object)) ||
+    return (is_optimized_code() && !is_turbofanned() &&
+            IsWeakObjectInOptimizedCode(object)) ||
            (is_weak_stub() && IsWeakObjectInIC(object));
   }
 
@@ -5849,6 +5348,7 @@
   static const int kHandlerTableOffset = kRelocationInfoOffset + kPointerSize;
   static const int kDeoptimizationDataOffset =
       kHandlerTableOffset + kPointerSize;
+  // For FUNCTION kind, we store the type feedback info here.
   static const int kTypeFeedbackInfoOffset =
       kDeoptimizationDataOffset + kPointerSize;
   static const int kNextCodeLinkOffset = kTypeFeedbackInfoOffset + kPointerSize;
@@ -5879,52 +5379,40 @@
   class FullCodeFlagsHasDebugBreakSlotsField: public BitField<bool, 1, 1> {};
   class FullCodeFlagsIsCompiledOptimizable: public BitField<bool, 2, 1> {};
 
-  static const int kAllowOSRAtLoopNestingLevelOffset = kFullCodeFlags + 1;
-  static const int kProfilerTicksOffset = kAllowOSRAtLoopNestingLevelOffset + 1;
+  static const int kProfilerTicksOffset = kFullCodeFlags + 1;
 
   // Flags layout.  BitField<type, shift, size>.
-  class ICStateField: public BitField<InlineCacheState, 0, 3> {};
-  class TypeField: public BitField<StubType, 3, 1> {};
-  class CacheHolderField: public BitField<InlineCacheHolderFlag, 5, 1> {};
-  class KindField: public BitField<Kind, 6, 4> {};
-  // TODO(bmeurer): Bit 10 is available for free use. :-)
+  class ICStateField : public BitField<InlineCacheState, 0, 4> {};
+  class TypeField : public BitField<StubType, 4, 1> {};
+  class CacheHolderField : public BitField<CacheHolderFlag, 5, 2> {};
+  class KindField : public BitField<Kind, 7, 4> {};
   class ExtraICStateField: public BitField<ExtraICState, 11,
       PlatformSmiTagging::kSmiValueSize - 11 + 1> {};  // NOLINT
 
   // KindSpecificFlags1 layout (STUB and OPTIMIZED_FUNCTION)
   static const int kStackSlotsFirstBit = 0;
   static const int kStackSlotsBitCount = 24;
-  static const int kHasFunctionCacheFirstBit =
+  static const int kHasFunctionCacheBit =
       kStackSlotsFirstBit + kStackSlotsBitCount;
-  static const int kHasFunctionCacheBitCount = 1;
-  static const int kMarkedForDeoptimizationFirstBit =
-      kStackSlotsFirstBit + kStackSlotsBitCount + 1;
-  static const int kMarkedForDeoptimizationBitCount = 1;
-  static const int kWeakStubFirstBit =
-      kMarkedForDeoptimizationFirstBit + kMarkedForDeoptimizationBitCount;
-  static const int kWeakStubBitCount = 1;
-  static const int kInvalidatedWeakStubFirstBit =
-      kWeakStubFirstBit + kWeakStubBitCount;
-  static const int kInvalidatedWeakStubBitCount = 1;
+  static const int kMarkedForDeoptimizationBit = kHasFunctionCacheBit + 1;
+  static const int kWeakStubBit = kMarkedForDeoptimizationBit + 1;
+  static const int kInvalidatedWeakStubBit = kWeakStubBit + 1;
+  static const int kIsTurbofannedBit = kInvalidatedWeakStubBit + 1;
 
   STATIC_ASSERT(kStackSlotsFirstBit + kStackSlotsBitCount <= 32);
-  STATIC_ASSERT(kHasFunctionCacheFirstBit + kHasFunctionCacheBitCount <= 32);
-  STATIC_ASSERT(kInvalidatedWeakStubFirstBit +
-                kInvalidatedWeakStubBitCount <= 32);
+  STATIC_ASSERT(kIsTurbofannedBit + 1 <= 32);
 
   class StackSlotsField: public BitField<int,
       kStackSlotsFirstBit, kStackSlotsBitCount> {};  // NOLINT
-  class HasFunctionCacheField: public BitField<bool,
-      kHasFunctionCacheFirstBit, kHasFunctionCacheBitCount> {};  // NOLINT
-  class MarkedForDeoptimizationField: public BitField<bool,
-      kMarkedForDeoptimizationFirstBit,
-      kMarkedForDeoptimizationBitCount> {};  // NOLINT
-  class WeakStubField: public BitField<bool,
-      kWeakStubFirstBit,
-      kWeakStubBitCount> {};  // NOLINT
-  class InvalidatedWeakStubField: public BitField<bool,
-      kInvalidatedWeakStubFirstBit,
-      kInvalidatedWeakStubBitCount> {};  // NOLINT
+  class HasFunctionCacheField : public BitField<bool, kHasFunctionCacheBit, 1> {
+  };  // NOLINT
+  class MarkedForDeoptimizationField
+      : public BitField<bool, kMarkedForDeoptimizationBit, 1> {};   // NOLINT
+  class WeakStubField : public BitField<bool, kWeakStubBit, 1> {};  // NOLINT
+  class InvalidatedWeakStubField
+      : public BitField<bool, kInvalidatedWeakStubBit, 1> {};  // NOLINT
+  class IsTurbofannedField : public BitField<bool, kIsTurbofannedBit, 1> {
+  };  // NOLINT
 
   // KindSpecificFlags2 layout (ALL)
   static const int kIsCrankshaftedBit = 0;
@@ -5932,28 +5420,23 @@
       kIsCrankshaftedBit, 1> {};  // NOLINT
 
   // KindSpecificFlags2 layout (STUB and OPTIMIZED_FUNCTION)
-  static const int kStubMajorKeyFirstBit = kIsCrankshaftedBit + 1;
-  static const int kSafepointTableOffsetFirstBit =
-      kStubMajorKeyFirstBit + kStubMajorKeyBits;
+  static const int kSafepointTableOffsetFirstBit = kIsCrankshaftedBit + 1;
   static const int kSafepointTableOffsetBitCount = 24;
 
-  STATIC_ASSERT(kStubMajorKeyFirstBit + kStubMajorKeyBits <= 32);
   STATIC_ASSERT(kSafepointTableOffsetFirstBit +
                 kSafepointTableOffsetBitCount <= 32);
-  STATIC_ASSERT(1 + kStubMajorKeyBits +
-                kSafepointTableOffsetBitCount <= 32);
+  STATIC_ASSERT(1 + kSafepointTableOffsetBitCount <= 32);
 
   class SafepointTableOffsetField: public BitField<int,
       kSafepointTableOffsetFirstBit,
       kSafepointTableOffsetBitCount> {};  // NOLINT
-  class StubMajorKeyField: public BitField<int,
-      kStubMajorKeyFirstBit, kStubMajorKeyBits> {};  // NOLINT
 
   // KindSpecificFlags2 layout (FUNCTION)
   class BackEdgeTableOffsetField: public BitField<int,
-      kIsCrankshaftedBit + 1, 29> {};  // NOLINT
-  class BackEdgesPatchedForOSRField: public BitField<bool,
-      kIsCrankshaftedBit + 1 + 29, 1> {};  // NOLINT
+      kIsCrankshaftedBit + 1, 27> {};  // NOLINT
+  class AllowOSRAtLoopNestingLevelField: public BitField<int,
+      kIsCrankshaftedBit + 1 + 27, 4> {};  // NOLINT
+  STATIC_ASSERT(AllowOSRAtLoopNestingLevelField::kMax >= kMaxLoopNestingMarker);
 
   static const int kArgumentsBits = 16;
   static const int kMaxArguments = (1 << kArgumentsBits) - 1;
@@ -6042,10 +5525,11 @@
     kAllocationSiteTenuringChangedGroup,
     // Group of code that depends on element transition information in
     // AllocationSites not being changed.
-    kAllocationSiteTransitionChangedGroup,
-    kGroupCount = kAllocationSiteTransitionChangedGroup + 1
+    kAllocationSiteTransitionChangedGroup
   };
 
+  static const int kGroupCount = kAllocationSiteTransitionChangedGroup + 1;
+
   // Array for holding the index of the first code object of each group.
   // The last element stores the total number of code objects.
   class GroupStartIndexes {
@@ -6087,11 +5571,14 @@
   inline Object* object_at(int i);
   inline void clear_at(int i);
   inline void copy(int from, int to);
-  static inline DependentCode* cast(Object* object);
+  DECLARE_CAST(DependentCode)
 
   static DependentCode* ForObject(Handle<HeapObject> object,
                                   DependencyGroup group);
 
+  static const char* DependencyGroupName(DependencyGroup group);
+  static void SetMarkedForDeoptimization(Code* code, DependencyGroup group);
+
  private:
   // Make a room at the end of the given group by moving out the first
   // code objects of the subsequent groups.
@@ -6146,15 +5633,16 @@
   class NumberOfOwnDescriptorsBits: public BitField<int,
       kDescriptorIndexBitCount, kDescriptorIndexBitCount> {};  // NOLINT
   STATIC_ASSERT(kDescriptorIndexBitCount + kDescriptorIndexBitCount == 20);
-  class IsShared:                   public BitField<bool, 20,  1> {};
-  class DictionaryMap:              public BitField<bool, 21,  1> {};
-  class OwnsDescriptors:            public BitField<bool, 22,  1> {};
-  class HasInstanceCallHandler:     public BitField<bool, 23,  1> {};
-  class Deprecated:                 public BitField<bool, 24,  1> {};
-  class IsFrozen:                   public BitField<bool, 25,  1> {};
-  class IsUnstable:                 public BitField<bool, 26,  1> {};
-  class IsMigrationTarget:          public BitField<bool, 27,  1> {};
-  class DoneInobjectSlackTracking:  public BitField<bool, 28,  1> {};
+  class DictionaryMap : public BitField<bool, 20, 1> {};
+  class OwnsDescriptors : public BitField<bool, 21, 1> {};
+  class HasInstanceCallHandler : public BitField<bool, 22, 1> {};
+  class Deprecated : public BitField<bool, 23, 1> {};
+  class IsFrozen : public BitField<bool, 24, 1> {};
+  class IsUnstable : public BitField<bool, 25, 1> {};
+  class IsMigrationTarget : public BitField<bool, 26, 1> {};
+  class DoneInobjectSlackTracking : public BitField<bool, 27, 1> {};
+  // Bit 28 is free.
+
   // Keep this bit field at the very end for better code in
   // Builtins::kJSConstructStubGeneric stub.
   class ConstructionCount:          public BitField<int, 29, 3> {};
@@ -6226,12 +5714,14 @@
 
   inline void set_is_extensible(bool value);
   inline bool is_extensible();
+  inline void set_is_prototype_map(bool value);
+  inline bool is_prototype_map();
 
   inline void set_elements_kind(ElementsKind elements_kind) {
-    ASSERT(elements_kind < kElementsKindCount);
-    ASSERT(kElementsKindCount <= (1 << Map::ElementsKindBits::kSize));
+    DCHECK(elements_kind < kElementsKindCount);
+    DCHECK(kElementsKindCount <= (1 << Map::ElementsKindBits::kSize));
     set_bit_field2(Map::ElementsKindBits::update(bit_field2(), elements_kind));
-    ASSERT(this->elements_kind() == elements_kind);
+    DCHECK(this->elements_kind() == elements_kind);
   }
 
   inline ElementsKind elements_kind() {
@@ -6288,17 +5778,24 @@
   // map with DICTIONARY_ELEMENTS was found in the prototype chain.
   bool DictionaryElementsInPrototypeChainOnly();
 
-  inline bool HasTransitionArray();
+  inline bool HasTransitionArray() const;
   inline bool HasElementsTransition();
   inline Map* elements_transition_map();
-  static Handle<TransitionArray> SetElementsTransitionMap(
-      Handle<Map> map, Handle<Map> transitioned_map);
+
   inline Map* GetTransition(int transition_index);
   inline int SearchTransition(Name* name);
   inline FixedArrayBase* GetInitialElements();
 
   DECL_ACCESSORS(transitions, TransitionArray)
 
+  static inline Handle<String> ExpectedTransitionKey(Handle<Map> map);
+  static inline Handle<Map> ExpectedTransitionTarget(Handle<Map> map);
+
+  // Try to follow an existing transition to a field with attributes NONE. The
+  // return value indicates whether the transition was successful.
+  static inline Handle<Map> FindTransitionToField(Handle<Map> map,
+                                                  Handle<Name> key);
+
   Map* FindRootMap();
   Map* FindFieldOwner(int descriptor);
 
@@ -6306,15 +5803,16 @@
 
   int NumberOfFields();
 
-  bool InstancesNeedRewriting(Map* target,
-                              int target_number_of_fields,
-                              int target_inobject,
-                              int target_unused);
+  // TODO(ishell): candidate with JSObject::MigrateToMap().
+  bool InstancesNeedRewriting(Map* target, int target_number_of_fields,
+                              int target_inobject, int target_unused,
+                              int* old_number_of_fields);
+  // TODO(ishell): moveit!
   static Handle<Map> GeneralizeAllFieldRepresentations(Handle<Map> map);
-  static Handle<HeapType> GeneralizeFieldType(Handle<HeapType> type1,
-                                              Handle<HeapType> type2,
-                                              Isolate* isolate)
-      V8_WARN_UNUSED_RESULT;
+  MUST_USE_RESULT static Handle<HeapType> GeneralizeFieldType(
+      Handle<HeapType> type1,
+      Handle<HeapType> type2,
+      Isolate* isolate);
   static void GeneralizeFieldType(Handle<Map> map,
                                   int modify_index,
                                   Handle<HeapType> new_field_type);
@@ -6336,18 +5834,16 @@
       StoreMode store_mode,
       const char* reason);
 
+  static Handle<Map> PrepareForDataProperty(Handle<Map> old_map,
+                                            int descriptor_number,
+                                            Handle<Object> value);
+
   static Handle<Map> Normalize(Handle<Map> map, PropertyNormalizationMode mode);
 
   // Returns the constructor name (the name (possibly, inferred name) of the
   // function that was used to instantiate the object).
   String* constructor_name();
 
-  // Tells whether the map is shared between objects that may have different
-  // behavior. If true, the map should never be modified, instead a clone
-  // should be created and modified.
-  inline void set_is_shared(bool value);
-  inline bool is_shared();
-
   // Tells whether the map is used for JSObjects in dictionary mode (ie
   // normalized objects, ie objects for which HasFastProperties returns false).
   // A map can never be used for both dictionary mode and fast mode JSObjects.
@@ -6414,7 +5910,7 @@
 
   inline void SetNumberOfProtoTransitions(int value) {
     FixedArray* cache = GetPrototypeTransitions();
-    ASSERT(cache->length() != 0);
+    DCHECK(cache->length() != 0);
     cache->set(kProtoTransitionNumberOfEntriesOffset, Smi::FromInt(value));
   }
 
@@ -6438,7 +5934,7 @@
 
   int LastAdded() {
     int number_of_own_descriptors = NumberOfOwnDescriptors();
-    ASSERT(number_of_own_descriptors > 0);
+    DCHECK(number_of_own_descriptors > 0);
     return number_of_own_descriptors - 1;
   }
 
@@ -6447,7 +5943,7 @@
   }
 
   void SetNumberOfOwnDescriptors(int number) {
-    ASSERT(number <= instance_descriptors()->number_of_descriptors());
+    DCHECK(number <= instance_descriptors()->number_of_descriptors());
     set_bit_field3(NumberOfOwnDescriptorsBits::update(bit_field3(), number));
   }
 
@@ -6459,15 +5955,15 @@
 
   void SetEnumLength(int length) {
     if (length != kInvalidEnumCacheSentinel) {
-      ASSERT(length >= 0);
-      ASSERT(length == 0 || instance_descriptors()->HasEnumCache());
-      ASSERT(length <= NumberOfOwnDescriptors());
+      DCHECK(length >= 0);
+      DCHECK(length == 0 || instance_descriptors()->HasEnumCache());
+      DCHECK(length <= NumberOfOwnDescriptors());
     }
     set_bit_field3(EnumLengthBits::update(bit_field3(), length));
   }
 
   inline bool owns_descriptors();
-  inline void set_owns_descriptors(bool is_shared);
+  inline void set_owns_descriptors(bool owns_descriptors);
   inline bool has_instance_call_handler();
   inline void set_has_instance_call_handler();
   inline void freeze();
@@ -6488,11 +5984,15 @@
   // is found by re-transitioning from the root of the transition tree using the
   // descriptor array of the map. Returns NULL if no updated map is found.
   // This method also applies any pending migrations along the prototype chain.
-  static MaybeHandle<Map> CurrentMapForDeprecated(Handle<Map> map)
-      V8_WARN_UNUSED_RESULT;
+  static MaybeHandle<Map> TryUpdate(Handle<Map> map) WARN_UNUSED_RESULT;
   // Same as above, but does not touch the prototype chain.
-  static MaybeHandle<Map> CurrentMapForDeprecatedInternal(Handle<Map> map)
-      V8_WARN_UNUSED_RESULT;
+  static MaybeHandle<Map> TryUpdateInternal(Handle<Map> map)
+      WARN_UNUSED_RESULT;
+
+  // Returns a non-deprecated version of the input. This method may deprecate
+  // existing maps along the way if encodings conflict. Not for use while
+  // gathering type feedback. Use TryUpdate in those cases instead.
+  static Handle<Map> Update(Handle<Map> map);
 
   static Handle<Map> CopyDropDescriptors(Handle<Map> map);
   static Handle<Map> CopyInsertDescriptor(Handle<Map> map,
@@ -6528,14 +6028,27 @@
   static Handle<Map> CopyForObserved(Handle<Map> map);
 
   static Handle<Map> CopyForFreeze(Handle<Map> map);
+  // Maximal number of fast properties. Used to restrict the number of map
+  // transitions to avoid an explosion in the number of maps for objects used as
+  // dictionaries.
+  inline bool TooManyFastProperties(StoreFromKeyed store_mode);
+  static Handle<Map> TransitionToDataProperty(Handle<Map> map,
+                                              Handle<Name> name,
+                                              Handle<Object> value,
+                                              PropertyAttributes attributes,
+                                              StoreFromKeyed store_mode);
+  static Handle<Map> TransitionToAccessorProperty(
+      Handle<Map> map, Handle<Name> name, AccessorComponent component,
+      Handle<Object> accessor, PropertyAttributes attributes);
+  static Handle<Map> ReconfigureDataProperty(Handle<Map> map, int descriptor,
+                                             PropertyAttributes attributes);
 
   inline void AppendDescriptor(Descriptor* desc);
 
   // Returns a copy of the map, with all transitions dropped from the
   // instance descriptors.
   static Handle<Map> Copy(Handle<Map> map);
-  static Handle<Map> Create(Handle<JSFunction> constructor,
-                            int extra_inobject_properties);
+  static Handle<Map> Create(Isolate* isolate, int inobject_properties);
 
   // Returns the next free property index (only valid for FAST MODE).
   int NextFreePropertyIndex();
@@ -6552,8 +6065,7 @@
         inobject_properties();
   }
 
-  // Casting.
-  static inline Map* cast(Object* obj);
+  DECLARE_CAST(Map)
 
   // Code cache operations.
 
@@ -6600,7 +6112,6 @@
   // elements_kind that's found in |candidates|, or null handle if no match is
   // found at all.
   Handle<Map> FindTransitionedMap(MapHandleList* candidates);
-  Map* FindTransitionedMap(MapList* candidates);
 
   bool CanTransition() {
     // Only JSObject and subtypes have map transitions and back pointers.
@@ -6645,7 +6156,7 @@
   DECLARE_VERIFIER(Map)
 
 #ifdef VERIFY_HEAP
-  void SharedMapVerify();
+  void DictionaryMapVerify();
   void VerifyOmittedMapChecks();
 #endif
 
@@ -6707,17 +6218,20 @@
 #if V8_TARGET_LITTLE_ENDIAN
   // Order instance type and bit field together such that they can be loaded
   // together as a 16-bit word with instance type in the lower 8 bits regardless
-  // of endianess.
+  // of endianess. Also provide endian-independent offset to that 16-bit word.
   static const int kInstanceTypeOffset = kInstanceAttributesOffset + 0;
   static const int kBitFieldOffset = kInstanceAttributesOffset + 1;
 #else
   static const int kBitFieldOffset = kInstanceAttributesOffset + 0;
   static const int kInstanceTypeOffset = kInstanceAttributesOffset + 1;
 #endif
+  static const int kInstanceTypeAndBitFieldOffset =
+      kInstanceAttributesOffset + 0;
   static const int kBitField2Offset = kInstanceAttributesOffset + 2;
   static const int kUnusedPropertyFieldsOffset = kInstanceAttributesOffset + 3;
 
-  STATIC_ASSERT(kInstanceTypeOffset == Internals::kMapInstanceTypeOffset);
+  STATIC_ASSERT(kInstanceTypeAndBitFieldOffset ==
+                Internals::kMapInstanceTypeAndBitFieldOffset);
 
   // Bit positions for bit field.
   static const int kHasNonInstancePrototype = 0;
@@ -6732,7 +6246,7 @@
   // Bit positions for bit field 2
   static const int kIsExtensible = 0;
   static const int kStringWrapperSafeForDefaultValueOf = 1;
-  // Currently bit 2 is not used.
+  class IsPrototypeMapBits : public BitField<bool, 2, 1> {};
   class ElementsKindBits: public BitField<ElementsKind, 3, 5> {};
 
   // Derived values from bit field 2
@@ -6759,6 +6273,10 @@
   bool EquivalentToForNormalization(Map* other, PropertyNormalizationMode mode);
 
  private:
+  static void ConnectElementsTransition(Handle<Map> parent, Handle<Map> child);
+  static void ConnectTransition(Handle<Map> parent, Handle<Map> child,
+                                Handle<Name> name, SimpleTransitionFlag flag);
+
   bool EquivalentToForTransition(Map* other);
   static Handle<Map> RawCopy(Handle<Map> map, int instance_size);
   static Handle<Map> ShareDescriptor(Handle<Map> map,
@@ -6784,8 +6302,7 @@
                                            TransitionFlag flag);
 
   static Handle<Map> CopyNormalized(Handle<Map> map,
-                                    PropertyNormalizationMode mode,
-                                    NormalizedMapSharingMode sharing);
+                                    PropertyNormalizationMode mode);
 
   // Fires when the layout of an object with a leaf map changes.
   // This includes adding transitions to the leaf map or changing
@@ -6809,7 +6326,8 @@
 
   Map* FindLastMatchMap(int verbatim, int length, DescriptorArray* descriptors);
 
-  void UpdateDescriptor(int descriptor_number, Descriptor* desc);
+  void UpdateFieldType(int descriptor_number, Handle<Name> name,
+                       Handle<HeapType> new_type);
 
   void PrintGeneralization(FILE* file,
                            const char* reason,
@@ -6832,6 +6350,9 @@
                                             Handle<Object> prototype,
                                             Handle<Map> target_map);
 
+  static const int kFastPropertiesSoftLimit = 12;
+  static const int kMaxFastProperties = 128;
+
   DISALLOW_IMPLICIT_CONSTRUCTORS(Map);
 };
 
@@ -6842,7 +6363,7 @@
 class Struct: public HeapObject {
  public:
   inline void InitializeBody(int object_size);
-  static inline Struct* cast(Object* that);
+  DECLARE_CAST(Struct)
 };
 
 
@@ -6852,7 +6373,7 @@
   // [value]: the boxed contents.
   DECL_ACCESSORS(value, Object)
 
-  static inline Box* cast(Object* obj);
+  DECLARE_CAST(Box)
 
   // Dispatched behavior.
   DECLARE_PRINTER(Box)
@@ -6927,6 +6448,12 @@
   // [flags]: Holds an exciting bitfield.
   DECL_ACCESSORS(flags, Smi)
 
+  // [source_url]: sourceURL from magic comment
+  DECL_ACCESSORS(source_url, Object)
+
+  // [source_url]: sourceMappingURL magic comment
+  DECL_ACCESSORS(source_mapping_url, Object)
+
   // [compilation_type]: how the the script was compiled. Encoded in the
   // 'flags' field.
   inline CompilationType compilation_type();
@@ -6943,7 +6470,7 @@
   // the 'flags' field.
   DECL_BOOLEAN_ACCESSORS(is_shared_cross_origin)
 
-  static inline Script* cast(Object* obj);
+  DECLARE_CAST(Script)
 
   // If script source is an external string, check that the underlying
   // resource is accessible. Otherwise, always return true.
@@ -6964,6 +6491,7 @@
 
   // Get the JS object wrapping the given script; create it if none exists.
   static Handle<JSObject> GetWrapper(Handle<Script> script);
+  void ClearWrapperCache();
 
   // Dispatched behavior.
   DECLARE_PRINTER(Script)
@@ -6983,7 +6511,9 @@
       kEvalFromSharedOffset + kPointerSize;
   static const int kFlagsOffset =
       kEvalFrominstructionsOffsetOffset + kPointerSize;
-  static const int kSize = kFlagsOffset + kPointerSize;
+  static const int kSourceUrlOffset = kFlagsOffset + kPointerSize;
+  static const int kSourceMappingUrlOffset = kSourceUrlOffset + kPointerSize;
+  static const int kSize = kSourceMappingUrlOffset + kPointerSize;
 
  private:
   int GetLineNumberWithArray(int code_pos);
@@ -7006,27 +6536,29 @@
 //
 // Installation of ids for the selected builtin functions is handled
 // by the bootstrapper.
-#define FUNCTIONS_WITH_ID_LIST(V)                     \
-  V(Array.prototype, indexOf, ArrayIndexOf)           \
-  V(Array.prototype, lastIndexOf, ArrayLastIndexOf)   \
-  V(Array.prototype, push, ArrayPush)                 \
-  V(Array.prototype, pop, ArrayPop)                   \
-  V(Array.prototype, shift, ArrayShift)               \
-  V(Function.prototype, apply, FunctionApply)         \
-  V(String.prototype, charCodeAt, StringCharCodeAt)   \
-  V(String.prototype, charAt, StringCharAt)           \
-  V(String, fromCharCode, StringFromCharCode)         \
-  V(Math, floor, MathFloor)                           \
-  V(Math, round, MathRound)                           \
-  V(Math, ceil, MathCeil)                             \
-  V(Math, abs, MathAbs)                               \
-  V(Math, log, MathLog)                               \
-  V(Math, exp, MathExp)                               \
-  V(Math, sqrt, MathSqrt)                             \
-  V(Math, pow, MathPow)                               \
-  V(Math, max, MathMax)                               \
-  V(Math, min, MathMin)                               \
-  V(Math, imul, MathImul)
+#define FUNCTIONS_WITH_ID_LIST(V)                   \
+  V(Array.prototype, indexOf, ArrayIndexOf)         \
+  V(Array.prototype, lastIndexOf, ArrayLastIndexOf) \
+  V(Array.prototype, push, ArrayPush)               \
+  V(Array.prototype, pop, ArrayPop)                 \
+  V(Array.prototype, shift, ArrayShift)             \
+  V(Function.prototype, apply, FunctionApply)       \
+  V(String.prototype, charCodeAt, StringCharCodeAt) \
+  V(String.prototype, charAt, StringCharAt)         \
+  V(String, fromCharCode, StringFromCharCode)       \
+  V(Math, floor, MathFloor)                         \
+  V(Math, round, MathRound)                         \
+  V(Math, ceil, MathCeil)                           \
+  V(Math, abs, MathAbs)                             \
+  V(Math, log, MathLog)                             \
+  V(Math, exp, MathExp)                             \
+  V(Math, sqrt, MathSqrt)                           \
+  V(Math, pow, MathPow)                             \
+  V(Math, max, MathMax)                             \
+  V(Math, min, MathMin)                             \
+  V(Math, imul, MathImul)                           \
+  V(Math, clz32, MathClz32)                         \
+  V(Math, fround, MathFround)
 
 enum BuiltinFunctionId {
   kArrayCode,
@@ -7036,9 +6568,7 @@
 #undef DECLARE_FUNCTION_ID
   // Fake id for a special case of Math.pow. Note, it continues the
   // list of math functions.
-  kMathPowHalf,
-  // Installed only on --harmony-maths.
-  kMathClz32
+  kMathPowHalf
 };
 
 
@@ -7107,11 +6637,11 @@
 
   // [length]: The function length - usually the number of declared parameters.
   // Use up to 2^30 parameters.
-  inline int length();
+  inline int length() const;
   inline void set_length(int value);
 
   // [formal parameter count]: The declared number of parameters.
-  inline int formal_parameter_count();
+  inline int formal_parameter_count() const;
   inline void set_formal_parameter_count(int value);
 
   // Set the formal parameter count so the function code will be
@@ -7119,14 +6649,13 @@
   inline void DontAdaptArguments();
 
   // [expected_nof_properties]: Expected number of properties for the function.
-  inline int expected_nof_properties();
+  inline int expected_nof_properties() const;
   inline void set_expected_nof_properties(int value);
 
   // [feedback_vector] - accumulates ast node feedback from full-codegen and
   // (increasingly) from crankshafted code where sufficient feedback isn't
-  // available. Currently the field is duplicated in
-  // TypeFeedbackInfo::feedback_vector, but the allocation is done here.
-  DECL_ACCESSORS(feedback_vector, FixedArray)
+  // available.
+  DECL_ACCESSORS(feedback_vector, TypeFeedbackVector)
 
   // [instance class name]: class name for instances.
   DECL_ACCESSORS(instance_class_name, Object)
@@ -7148,7 +6677,7 @@
   DECL_ACCESSORS(script, Object)
 
   // [num_literals]: Number of literals used by this function.
-  inline int num_literals();
+  inline int num_literals() const;
   inline void set_num_literals(int value);
 
   // [start_position_and_type]: Field used to store both the source code
@@ -7156,7 +6685,7 @@
   // and whether or not the function is a toplevel function. The two
   // least significants bit indicates whether the function is an
   // expression and the rest contains the source code position.
-  inline int start_position_and_type();
+  inline int start_position_and_type() const;
   inline void set_start_position_and_type(int value);
 
   // [debug info]: Debug information.
@@ -7173,15 +6702,15 @@
   String* DebugName();
 
   // Position of the 'function' token in the script source.
-  inline int function_token_position();
+  inline int function_token_position() const;
   inline void set_function_token_position(int function_token_position);
 
   // Position of this function in the script source.
-  inline int start_position();
+  inline int start_position() const;
   inline void set_start_position(int start_position);
 
   // End position of this function in the script source.
-  inline int end_position();
+  inline int end_position() const;
   inline void set_end_position(int end_position);
 
   // Is this function a function expression in the source code.
@@ -7192,13 +6721,13 @@
 
   // Bit field containing various information collected by the compiler to
   // drive optimization.
-  inline int compiler_hints();
+  inline int compiler_hints() const;
   inline void set_compiler_hints(int value);
 
-  inline int ast_node_count();
+  inline int ast_node_count() const;
   inline void set_ast_node_count(int count);
 
-  inline int profiler_ticks();
+  inline int profiler_ticks() const;
   inline void set_profiler_ticks(int ticks);
 
   // Inline cache age is used to infer whether the function survived a context
@@ -7259,12 +6788,6 @@
   // Is this a function or top-level/eval code.
   DECL_BOOLEAN_ACCESSORS(is_function)
 
-  // Indicates that the function cannot be optimized.
-  DECL_BOOLEAN_ACCESSORS(dont_optimize)
-
-  // Indicates that the function cannot be inlined.
-  DECL_BOOLEAN_ACCESSORS(dont_inline)
-
   // Indicates that code for this function cannot be cached.
   DECL_BOOLEAN_ACCESSORS(dont_cache)
 
@@ -7274,6 +6797,18 @@
   // Indicates that this function is a generator.
   DECL_BOOLEAN_ACCESSORS(is_generator)
 
+  // Indicates that this function is an arrow function.
+  DECL_BOOLEAN_ACCESSORS(is_arrow)
+
+  // Indicates that this function is a concise method.
+  DECL_BOOLEAN_ACCESSORS(is_concise_method)
+
+  // Indicates that this function is an asm function.
+  DECL_BOOLEAN_ACCESSORS(asm_function)
+
+  inline FunctionKind kind();
+  inline void set_kind(FunctionKind kind);
+
   // Indicates whether or not the code in the shared function support
   // deoptimization.
   inline bool has_deoptimization_support();
@@ -7287,13 +6822,13 @@
 
   inline BailoutReason DisableOptimizationReason();
 
-  // Lookup the bailout ID and ASSERT that it exists in the non-optimized
+  // Lookup the bailout ID and DCHECK that it exists in the non-optimized
   // code, returns whether it asserted (i.e., always true if assertions are
   // disabled).
   bool VerifyBailoutId(BailoutId id);
 
   // [source code]: Source code for the function.
-  bool HasSourceCode();
+  bool HasSourceCode() const;
   Handle<Object> GetSourceCode();
 
   // Number of times the function was optimized.
@@ -7314,11 +6849,11 @@
 
   // Stores deopt_count, opt_reenable_tries and ic_age as bit-fields.
   inline void set_counters(int value);
-  inline int counters();
+  inline int counters() const;
 
   // Stores opt_count and bailout_reason as bit-fields.
   inline void set_opt_count_and_bailout_reason(int value);
-  inline int opt_count_and_bailout_reason();
+  inline int opt_count_and_bailout_reason() const;
 
   void set_bailout_reason(BailoutReason reason) {
     set_opt_count_and_bailout_reason(
@@ -7326,11 +6861,6 @@
                                                reason));
   }
 
-  void set_dont_optimize_reason(BailoutReason reason) {
-    set_bailout_reason(reason);
-    set_dont_optimize(reason != kNoReason);
-  }
-
   // Check whether or not this function is inlineable.
   bool IsInlineable();
 
@@ -7344,15 +6874,12 @@
   int CalculateInObjectProperties();
 
   // Dispatched behavior.
-  // Set max_length to -1 for unlimited length.
-  void SourceCodePrint(StringStream* accumulator, int max_length);
   DECLARE_PRINTER(SharedFunctionInfo)
   DECLARE_VERIFIER(SharedFunctionInfo)
 
   void ResetForNewContext(int new_ic_age);
 
-  // Casting.
-  static inline SharedFunctionInfo* cast(Object* obj);
+  DECLARE_CAST(SharedFunctionInfo)
 
   // Constants.
   static const int kDontAdaptArgumentsSentinel = -1;
@@ -7474,20 +7001,23 @@
     kIsAnonymous,
     kNameShouldPrintAsAnonymous,
     kIsFunction,
-    kDontOptimize,
-    kDontInline,
     kDontCache,
     kDontFlush,
+    kIsArrow,
     kIsGenerator,
+    kIsConciseMethod,
+    kIsAsmFunction,
     kCompilerHintsCount  // Pseudo entry
   };
 
-  class DeoptCountBits: public BitField<int, 0, 4> {};
-  class OptReenableTriesBits: public BitField<int, 4, 18> {};
-  class ICAgeBits: public BitField<int, 22, 8> {};
+  class FunctionKindBits : public BitField<FunctionKind, kIsArrow, 3> {};
 
-  class OptCountBits: public BitField<int, 0, 22> {};
-  class DisabledOptimizationReasonBits: public BitField<int, 22, 8> {};
+  class DeoptCountBits : public BitField<int, 0, 4> {};
+  class OptReenableTriesBits : public BitField<int, 4, 18> {};
+  class ICAgeBits : public BitField<int, 22, 8> {};
+
+  class OptCountBits : public BitField<int, 0, 22> {};
+  class DisabledOptimizationReasonBits : public BitField<int, 22, 8> {};
 
  private:
 #if V8_HOST_ARCH_32_BIT
@@ -7534,6 +7064,18 @@
 };
 
 
+// Printing support.
+struct SourceCodeOf {
+  explicit SourceCodeOf(SharedFunctionInfo* v, int max = -1)
+      : value(v), max_length(max) {}
+  const SharedFunctionInfo* value;
+  int max_length;
+};
+
+
+OStream& operator<<(OStream& os, const SourceCodeOf& v);
+
+
 class JSGeneratorObject: public JSObject {
  public:
   // [function]: The function corresponding to this generator object.
@@ -7550,7 +7092,7 @@
   // A positive offset indicates a suspended generator.  The special
   // kGeneratorExecuting and kGeneratorClosed values indicate that a generator
   // cannot be resumed.
-  inline int continuation();
+  inline int continuation() const;
   inline void set_continuation(int continuation);
   inline bool is_closed();
   inline bool is_executing();
@@ -7561,11 +7103,10 @@
 
   // [stack_handler_index]: Index of first stack handler in operand_stack, or -1
   // if the captured activation had no stack handler.
-  inline int stack_handler_index();
+  inline int stack_handler_index() const;
   inline void set_stack_handler_index(int stack_handler_index);
 
-  // Casting.
-  static inline JSGeneratorObject* cast(Object* obj);
+  DECLARE_CAST(JSGeneratorObject)
 
   // Dispatched behavior.
   DECLARE_PRINTER(JSGeneratorObject)
@@ -7613,8 +7154,7 @@
   // [scope_info]: Scope info.
   DECL_ACCESSORS(scope_info, ScopeInfo)
 
-  // Casting.
-  static inline JSModule* cast(Object* obj);
+  DECLARE_CAST(JSModule)
 
   // Dispatched behavior.
   DECLARE_PRINTER(JSModule)
@@ -7643,6 +7183,7 @@
   // [context]: The context for this function.
   inline Context* context();
   inline void set_context(Object* context);
+  inline JSObject* global_proxy();
 
   // [code]: The generated code object for this function.  Executed
   // when the function is invoked, e.g. foo() or new foo(). See
@@ -7657,7 +7198,10 @@
   inline bool IsBuiltin();
 
   // Tells whether this function is defined in a native script.
-  inline bool IsNative();
+  inline bool IsFromNativeScript();
+
+  // Tells whether this function is defined in an extension script.
+  inline bool IsFromExtensionScript();
 
   // Tells whether or not the function needs arguments adaption.
   inline bool NeedsArgumentsAdaption();
@@ -7754,7 +7298,8 @@
 
   // The initial map for an object created by this constructor.
   inline Map* initial_map();
-  inline void set_initial_map(Map* value);
+  static void SetInitialMap(Handle<JSFunction> function, Handle<Map> map,
+                            Handle<Object> prototype);
   inline bool has_initial_map();
   static void EnsureHasInitialMap(Handle<JSFunction> function);
 
@@ -7771,6 +7316,11 @@
   static void SetInstancePrototype(Handle<JSFunction> function,
                                    Handle<Object> value);
 
+  // Creates a new closure for the fucntion with the same bindings,
+  // bound values, and prototype. An equivalent of spec operations
+  // ``CloneMethod`` and ``CloneBoundFunction``.
+  static Handle<JSFunction> CloneClosure(Handle<JSFunction> function);
+
   // After prototype is removed, it will not be created when accessed, and
   // [[Construct]] from this function will not be allowed.
   bool RemovePrototype();
@@ -7799,8 +7349,7 @@
   // Prints the name of the function using PrintF.
   void PrintName(FILE* out = stdout);
 
-  // Casting.
-  static inline JSFunction* cast(Object* obj);
+  DECLARE_CAST(JSFunction)
 
   // Iterates the objects, including code objects indirectly referenced
   // through pointers to the first instruction in the code object.
@@ -7863,10 +7412,9 @@
   // [hash]: The hash code property (undefined if not initialized yet).
   DECL_ACCESSORS(hash, Object)
 
-  // Casting.
-  static inline JSGlobalProxy* cast(Object* obj);
+  DECLARE_CAST(JSGlobalProxy)
 
-  inline bool IsDetachedFrom(GlobalObject* global);
+  inline bool IsDetachedFrom(GlobalObject* global) const;
 
   // Dispatched behavior.
   DECLARE_PRINTER(JSGlobalProxy)
@@ -7898,21 +7446,17 @@
   // [global context]: the most recent (i.e. innermost) global context.
   DECL_ACCESSORS(global_context, Context)
 
-  // [global receiver]: the global receiver object of the context
-  DECL_ACCESSORS(global_receiver, JSObject)
+  // [global proxy]: the global proxy object of the context
+  DECL_ACCESSORS(global_proxy, JSObject)
 
-  // Retrieve the property cell used to store a property.
-  PropertyCell* GetPropertyCell(LookupResult* result);
-
-  // Casting.
-  static inline GlobalObject* cast(Object* obj);
+  DECLARE_CAST(GlobalObject)
 
   // Layout description.
   static const int kBuiltinsOffset = JSObject::kHeaderSize;
   static const int kNativeContextOffset = kBuiltinsOffset + kPointerSize;
   static const int kGlobalContextOffset = kNativeContextOffset + kPointerSize;
-  static const int kGlobalReceiverOffset = kGlobalContextOffset + kPointerSize;
-  static const int kHeaderSize = kGlobalReceiverOffset + kPointerSize;
+  static const int kGlobalProxyOffset = kGlobalContextOffset + kPointerSize;
+  static const int kHeaderSize = kGlobalProxyOffset + kPointerSize;
 
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(GlobalObject);
@@ -7922,8 +7466,7 @@
 // JavaScript global object.
 class JSGlobalObject: public GlobalObject {
  public:
-  // Casting.
-  static inline JSGlobalObject* cast(Object* obj);
+  DECLARE_CAST(JSGlobalObject)
 
   // Ensure that the global object has a cell for the given property name.
   static Handle<PropertyCell> EnsurePropertyCell(Handle<JSGlobalObject> global,
@@ -7955,8 +7498,7 @@
   inline Code* javascript_builtin_code(Builtins::JavaScript id);
   inline void set_javascript_builtin_code(Builtins::JavaScript id, Code* value);
 
-  // Casting.
-  static inline JSBuiltinsObject* cast(Object* obj);
+  DECLARE_CAST(JSBuiltinsObject)
 
   // Dispatched behavior.
   DECLARE_PRINTER(JSBuiltinsObject)
@@ -7991,8 +7533,7 @@
   // [value]: the object being wrapped.
   DECL_ACCESSORS(value, Object)
 
-  // Casting.
-  static inline JSValue* cast(Object* obj);
+  DECLARE_CAST(JSValue)
 
   // Dispatched behavior.
   DECLARE_PRINTER(JSValue)
@@ -8033,8 +7574,7 @@
   // moment when chached fields were cached.
   DECL_ACCESSORS(cache_stamp, Object)
 
-  // Casting.
-  static inline JSDate* cast(Object* obj);
+  DECLARE_CAST(JSDate)
 
   // Returns the date field with the specified index.
   // See FieldIndex for the list of date fields.
@@ -8122,15 +7662,14 @@
   DECL_ACCESSORS(stack_frames, Object)
 
   // [start_position]: the start position in the script for the error message.
-  inline int start_position();
+  inline int start_position() const;
   inline void set_start_position(int value);
 
   // [end_position]: the end position in the script for the error message.
-  inline int end_position();
+  inline int end_position() const;
   inline void set_end_position(int value);
 
-  // Casting.
-  static inline JSMessageObject* cast(Object* obj);
+  DECLARE_CAST(JSMessageObject)
 
   // Dispatched behavior.
   DECLARE_PRINTER(JSMessageObject)
@@ -8161,7 +7700,7 @@
 // If it is an atom regexp
 // - a reference to a literal string to search for
 // If it is an irregexp regexp:
-// - a reference to code for ASCII inputs (bytecode or compiled), or a smi
+// - a reference to code for Latin1 inputs (bytecode or compiled), or a smi
 // used for tracking the last usage (used for code flushing).
 // - a reference to code for UC16 inputs (bytecode or compiled), or a smi
 // used for tracking the last usage (used for code flushing)..
@@ -8175,7 +7714,13 @@
   // IRREGEXP: Compiled with Irregexp.
   // IRREGEXP_NATIVE: Compiled to native code with Irregexp.
   enum Type { NOT_COMPILED, ATOM, IRREGEXP };
-  enum Flag { NONE = 0, GLOBAL = 1, IGNORE_CASE = 2, MULTILINE = 4 };
+  enum Flag {
+    NONE = 0,
+    GLOBAL = 1,
+    IGNORE_CASE = 2,
+    MULTILINE = 4,
+    STICKY = 8
+  };
 
   class Flags {
    public:
@@ -8183,6 +7728,7 @@
     bool is_global() { return (value_ & GLOBAL) != 0; }
     bool is_ignore_case() { return (value_ & IGNORE_CASE) != 0; }
     bool is_multiline() { return (value_ & MULTILINE) != 0; }
+    bool is_sticky() { return (value_ & STICKY) != 0; }
     uint32_t value() { return value_; }
    private:
     uint32_t value_;
@@ -8198,23 +7744,23 @@
   // Set implementation data after the object has been prepared.
   inline void SetDataAt(int index, Object* value);
 
-  static int code_index(bool is_ascii) {
-    if (is_ascii) {
-      return kIrregexpASCIICodeIndex;
+  static int code_index(bool is_latin1) {
+    if (is_latin1) {
+      return kIrregexpLatin1CodeIndex;
     } else {
       return kIrregexpUC16CodeIndex;
     }
   }
 
-  static int saved_code_index(bool is_ascii) {
-    if (is_ascii) {
-      return kIrregexpASCIICodeSavedIndex;
+  static int saved_code_index(bool is_latin1) {
+    if (is_latin1) {
+      return kIrregexpLatin1CodeSavedIndex;
     } else {
       return kIrregexpUC16CodeSavedIndex;
     }
   }
 
-  static inline JSRegExp* cast(Object* obj);
+  DECLARE_CAST(JSRegExp)
 
   // Dispatched behavior.
   DECLARE_VERIFIER(JSRegExp)
@@ -8234,23 +7780,23 @@
 
   static const int kAtomDataSize = kAtomPatternIndex + 1;
 
-  // Irregexp compiled code or bytecode for ASCII. If compilation
+  // Irregexp compiled code or bytecode for Latin1. If compilation
   // fails, this fields hold an exception object that should be
   // thrown if the regexp is used again.
-  static const int kIrregexpASCIICodeIndex = kDataIndex;
+  static const int kIrregexpLatin1CodeIndex = kDataIndex;
   // Irregexp compiled code or bytecode for UC16.  If compilation
   // fails, this fields hold an exception object that should be
   // thrown if the regexp is used again.
   static const int kIrregexpUC16CodeIndex = kDataIndex + 1;
 
-  // Saved instance of Irregexp compiled code or bytecode for ASCII that
+  // Saved instance of Irregexp compiled code or bytecode for Latin1 that
   // is a potential candidate for flushing.
-  static const int kIrregexpASCIICodeSavedIndex = kDataIndex + 2;
+  static const int kIrregexpLatin1CodeSavedIndex = kDataIndex + 2;
   // Saved instance of Irregexp compiled code or bytecode for UC16 that is
   // a potential candidate for flushing.
   static const int kIrregexpUC16CodeSavedIndex = kDataIndex + 3;
 
-  // Maximal number of registers used by either ASCII or UC16.
+  // Maximal number of registers used by either Latin1 or UC16.
   // Only used to check that there is enough stack space
   static const int kIrregexpMaxRegisterCountIndex = kDataIndex + 4;
   // Number of captures in the compiled regexp.
@@ -8261,8 +7807,8 @@
   // Offsets directly into the data fixed array.
   static const int kDataTagOffset =
       FixedArray::kHeaderSize + kTagIndex * kPointerSize;
-  static const int kDataAsciiCodeOffset =
-      FixedArray::kHeaderSize + kIrregexpASCIICodeIndex * kPointerSize;
+  static const int kDataOneByteCodeOffset =
+      FixedArray::kHeaderSize + kIrregexpLatin1CodeIndex * kPointerSize;
   static const int kDataUC16CodeOffset =
       FixedArray::kHeaderSize + kIrregexpUC16CodeIndex * kPointerSize;
   static const int kIrregexpCaptureCountOffset =
@@ -8317,22 +7863,23 @@
  public:
   // Find cached value for a string key, otherwise return null.
   Handle<Object> Lookup(Handle<String> src, Handle<Context> context);
-  Handle<Object> LookupEval(Handle<String> src, Handle<Context> context,
-                     StrictMode strict_mode, int scope_position);
+  Handle<Object> LookupEval(Handle<String> src,
+                            Handle<SharedFunctionInfo> shared,
+                            StrictMode strict_mode, int scope_position);
   Handle<Object> LookupRegExp(Handle<String> source, JSRegExp::Flags flags);
   static Handle<CompilationCacheTable> Put(
       Handle<CompilationCacheTable> cache, Handle<String> src,
       Handle<Context> context, Handle<Object> value);
   static Handle<CompilationCacheTable> PutEval(
       Handle<CompilationCacheTable> cache, Handle<String> src,
-      Handle<Context> context, Handle<SharedFunctionInfo> value,
+      Handle<SharedFunctionInfo> context, Handle<SharedFunctionInfo> value,
       int scope_position);
   static Handle<CompilationCacheTable> PutRegExp(
       Handle<CompilationCacheTable> cache, Handle<String> src,
       JSRegExp::Flags flags, Handle<FixedArray> value);
   void Remove(Object* value);
 
-  static inline CompilationCacheTable* cast(Object* obj);
+  DECLARE_CAST(CompilationCacheTable)
 
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheTable);
@@ -8361,7 +7908,7 @@
   // Remove an object from the cache with the provided internal index.
   void RemoveByIndex(Object* name, Code* code, int index);
 
-  static inline CodeCache* cast(Object* obj);
+  DECLARE_CAST(CodeCache)
 
   // Dispatched behavior.
   DECLARE_PRINTER(CodeCache)
@@ -8424,7 +7971,7 @@
   int GetIndex(Name* name, Code::Flags flags);
   void RemoveByIndex(int index);
 
-  static inline CodeCacheHashTable* cast(Object* obj);
+  DECLARE_CAST(CodeCacheHashTable)
 
   // Initial size of the fixed array backing the hash table.
   static const int kInitialSize = 64;
@@ -8447,7 +7994,7 @@
   // Returns an undefined value if the entry is not found.
   Handle<Object> Lookup(MapHandleList* maps, Code::Flags flags);
 
-  static inline PolymorphicCodeCache* cast(Object* obj);
+  DECLARE_CAST(PolymorphicCodeCache)
 
   // Dispatched behavior.
   DECLARE_PRINTER(PolymorphicCodeCache)
@@ -8474,7 +8021,7 @@
       int code_kind,
       Handle<Code> code);
 
-  static inline PolymorphicCodeCacheHashTable* cast(Object* obj);
+  DECLARE_CAST(PolymorphicCodeCacheHashTable)
 
   static const int kInitialSize = 64;
  private:
@@ -8488,7 +8035,10 @@
   inline void set_ic_total_count(int count);
 
   inline int ic_with_type_info_count();
-  inline void change_ic_with_type_info_count(int count);
+  inline void change_ic_with_type_info_count(int delta);
+
+  inline int ic_generic_count();
+  inline void change_ic_generic_count(int delta);
 
   inline void initialize_storage();
 
@@ -8499,7 +8049,7 @@
   inline bool matches_inlined_type_change_checksum(int checksum);
 
 
-  static inline TypeFeedbackInfo* cast(Object* obj);
+  DECLARE_CAST(TypeFeedbackInfo)
 
   // Dispatched behavior.
   DECLARE_PRINTER(TypeFeedbackInfo)
@@ -8507,23 +8057,8 @@
 
   static const int kStorage1Offset = HeapObject::kHeaderSize;
   static const int kStorage2Offset = kStorage1Offset + kPointerSize;
-  static const int kSize = kStorage2Offset + kPointerSize;
-
-  // TODO(mvstanton): move these sentinel declarations to shared function info.
-  // The object that indicates an uninitialized cache.
-  static inline Handle<Object> UninitializedSentinel(Isolate* isolate);
-
-  // The object that indicates a megamorphic state.
-  static inline Handle<Object> MegamorphicSentinel(Isolate* isolate);
-
-  // The object that indicates a monomorphic state of Array with
-  // ElementsKind
-  static inline Handle<Object> MonomorphicArraySentinel(Isolate* isolate,
-      ElementsKind elements_kind);
-
-  // A raw version of the uninitialized sentinel that's safe to read during
-  // garbage collection (e.g., for patching the cache).
-  static inline Object* RawUninitializedSentinel(Heap* heap);
+  static const int kStorage3Offset = kStorage2Offset + kPointerSize;
+  static const int kSize = kStorage3Offset + kPointerSize;
 
  private:
   static const int kTypeChangeChecksumBits = 7;
@@ -8664,7 +8199,7 @@
   inline bool DigestPretenuringFeedback(bool maximum_size_scavenge);
 
   ElementsKind GetElementsKind() {
-    ASSERT(!SitePointsToLiteral());
+    DCHECK(!SitePointsToLiteral());
     int value = Smi::cast(transition_info())->value();
     return ElementsKindBits::decode(value);
   }
@@ -8708,7 +8243,7 @@
   DECLARE_PRINTER(AllocationSite)
   DECLARE_VERIFIER(AllocationSite)
 
-  static inline AllocationSite* cast(Object* obj);
+  DECLARE_CAST(AllocationSite)
   static inline AllocationSiteMode GetMode(
       ElementsKind boilerplate_elements_kind);
   static inline AllocationSiteMode GetMode(ElementsKind from, ElementsKind to);
@@ -8756,14 +8291,14 @@
         !AllocationSite::cast(allocation_site())->IsZombie();
   }
   AllocationSite* GetAllocationSite() {
-    ASSERT(IsValid());
+    DCHECK(IsValid());
     return AllocationSite::cast(allocation_site());
   }
 
   DECLARE_PRINTER(AllocationMemento)
   DECLARE_VERIFIER(AllocationMemento)
 
-  static inline AllocationMemento* cast(Object* obj);
+  DECLARE_CAST(AllocationMemento)
 
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(AllocationMemento);
@@ -8780,10 +8315,10 @@
 // - all attributes are available as part if the property details
 class AliasedArgumentsEntry: public Struct {
  public:
-  inline int aliased_context_slot();
+  inline int aliased_context_slot() const;
   inline void set_aliased_context_slot(int count);
 
-  static inline AliasedArgumentsEntry* cast(Object* obj);
+  DECLARE_CAST(AliasedArgumentsEntry)
 
   // Dispatched behavior.
   DECLARE_PRINTER(AliasedArgumentsEntry)
@@ -8855,6 +8390,19 @@
 };
 
 
+class IteratingStringHasher : public StringHasher {
+ public:
+  static inline uint32_t Hash(String* string, uint32_t seed);
+  inline void VisitOneByteString(const uint8_t* chars, int length);
+  inline void VisitTwoByteString(const uint16_t* chars, int length);
+
+ private:
+  inline IteratingStringHasher(int len, uint32_t seed)
+      : StringHasher(len, seed) {}
+  DISALLOW_COPY_AND_ASSIGN(IteratingStringHasher);
+};
+
+
 // The characteristics of a string are stored in its map.  Retrieving these
 // few bits of information is moderately expensive, involving two memory
 // loads where the second is dependent on the first.  To improve efficiency
@@ -8868,7 +8416,7 @@
 // concrete performance benefit at that particular point in the code.
 class StringShape BASE_EMBEDDED {
  public:
-  inline explicit StringShape(String* s);
+  inline explicit StringShape(const String* s);
   inline explicit StringShape(Map* s);
   inline explicit StringShape(InstanceType t);
   inline bool IsSequential();
@@ -8876,9 +8424,9 @@
   inline bool IsCons();
   inline bool IsSliced();
   inline bool IsIndirect();
-  inline bool IsExternalAscii();
+  inline bool IsExternalOneByte();
   inline bool IsExternalTwoByte();
-  inline bool IsSequentialAscii();
+  inline bool IsSequentialOneByte();
   inline bool IsSequentialTwoByte();
   inline bool IsInternalized();
   inline StringRepresentationTag representation_tag();
@@ -8925,8 +8473,10 @@
   // Conversion.
   inline bool AsArrayIndex(uint32_t* index);
 
-  // Casting.
-  static inline Name* cast(Object* obj);
+  // Whether name can only name own properties.
+  inline bool IsOwn();
+
+  DECLARE_CAST(Name)
 
   DECLARE_PRINTER(Name)
 
@@ -8974,7 +8524,8 @@
   STATIC_ASSERT(IS_POWER_OF_TWO(kMaxCachedArrayIndexLength + 1));
 
   static const unsigned int kContainsCachedArrayIndexMask =
-      (~kMaxCachedArrayIndexLength << ArrayIndexLengthBits::kShift) |
+      (~static_cast<unsigned>(kMaxCachedArrayIndexLength)
+       << ArrayIndexLengthBits::kShift) |
       kIsNotArrayIndexMask;
 
   // Value of empty hash field indicating that the hash is not computed.
@@ -9000,8 +8551,11 @@
   // [is_private]: whether this is a private symbol.
   DECL_BOOLEAN_ACCESSORS(is_private)
 
-  // Casting.
-  static inline Symbol* cast(Object* obj);
+  // [is_own]: whether this is an own symbol, that is, only used to designate
+  // own properties of objects.
+  DECL_BOOLEAN_ACCESSORS(is_own)
+
+  DECLARE_CAST(Symbol)
 
   // Dispatched behavior.
   DECLARE_PRINTER(Symbol)
@@ -9016,6 +8570,7 @@
 
  private:
   static const int kPrivateBit = 0;
+  static const int kOwnBit = 1;
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(Symbol);
 };
@@ -9059,49 +8614,50 @@
   STATIC_ASSERT(IS_POWER_OF_TWO(kMaxCachedArrayIndexLength + 1));
 
   static const unsigned int kContainsCachedArrayIndexMask =
-      (~kMaxCachedArrayIndexLength << ArrayIndexLengthBits::kShift) |
+      (~static_cast<unsigned>(kMaxCachedArrayIndexLength)
+       << ArrayIndexLengthBits::kShift) |
       kIsNotArrayIndexMask;
 
   // Representation of the flat content of a String.
   // A non-flat string doesn't have flat content.
   // A flat string has content that's encoded as a sequence of either
-  // ASCII chars or two-byte UC16.
+  // one-byte chars or two-byte UC16.
   // Returned by String::GetFlatContent().
   class FlatContent {
    public:
     // Returns true if the string is flat and this structure contains content.
     bool IsFlat() { return state_ != NON_FLAT; }
-    // Returns true if the structure contains ASCII content.
-    bool IsAscii() { return state_ == ASCII; }
+    // Returns true if the structure contains one-byte content.
+    bool IsOneByte() { return state_ == ONE_BYTE; }
     // Returns true if the structure contains two-byte content.
     bool IsTwoByte() { return state_ == TWO_BYTE; }
 
-    // Return the one byte content of the string. Only use if IsAscii() returns
-    // true.
+    // Return the one byte content of the string. Only use if IsOneByte()
+    // returns true.
     Vector<const uint8_t> ToOneByteVector() {
-      ASSERT_EQ(ASCII, state_);
+      DCHECK_EQ(ONE_BYTE, state_);
       return Vector<const uint8_t>(onebyte_start, length_);
     }
     // Return the two-byte content of the string. Only use if IsTwoByte()
     // returns true.
     Vector<const uc16> ToUC16Vector() {
-      ASSERT_EQ(TWO_BYTE, state_);
+      DCHECK_EQ(TWO_BYTE, state_);
       return Vector<const uc16>(twobyte_start, length_);
     }
 
     uc16 Get(int i) {
-      ASSERT(i < length_);
-      ASSERT(state_ != NON_FLAT);
-      if (state_ == ASCII) return onebyte_start[i];
+      DCHECK(i < length_);
+      DCHECK(state_ != NON_FLAT);
+      if (state_ == ONE_BYTE) return onebyte_start[i];
       return twobyte_start[i];
     }
 
    private:
-    enum State { NON_FLAT, ASCII, TWO_BYTE };
+    enum State { NON_FLAT, ONE_BYTE, TWO_BYTE };
 
     // Constructors only used by String::GetFlatContent().
     explicit FlatContent(const uint8_t* start, int length)
-        : onebyte_start(start), length_(length), state_(ASCII) { }
+        : onebyte_start(start), length_(length), state_(ONE_BYTE) {}
     explicit FlatContent(const uc16* start, int length)
         : twobyte_start(start), length_(length), state_(TWO_BYTE) { }
     FlatContent() : onebyte_start(NULL), length_(0), state_(NON_FLAT) { }
@@ -9117,20 +8673,20 @@
   };
 
   // Get and set the length of the string.
-  inline int length();
+  inline int length() const;
   inline void set_length(int value);
 
   // Get and set the length of the string using acquire loads and release
   // stores.
-  inline int synchronized_length();
+  inline int synchronized_length() const;
   inline void synchronized_set_length(int value);
 
-  // Returns whether this string has only ASCII chars, i.e. all of them can
-  // be ASCII encoded.  This might be the case even if the string is
+  // Returns whether this string has only one-byte chars, i.e. all of them can
+  // be one-byte encoded.  This might be the case even if the string is
   // two-byte.  Such strings may appear when the embedder prefers
-  // two-byte external representations even for ASCII data.
-  inline bool IsOneByteRepresentation();
-  inline bool IsTwoByteRepresentation();
+  // two-byte external representations even for one-byte data.
+  inline bool IsOneByteRepresentation() const;
+  inline bool IsTwoByteRepresentation() const;
 
   // Cons and slices have an encoding flag that may not represent the actual
   // encoding of the underlying string.  This is taken into account here.
@@ -9176,7 +8732,7 @@
   inline String* GetUnderlying();
 
   // Mark the string as an undetectable object. It only applies to
-  // ASCII and two byte string types.
+  // one-byte and two-byte string types.
   bool MarkAsUndetectable();
 
   // String equality operations.
@@ -9217,13 +8773,12 @@
 
   // Externalization.
   bool MakeExternal(v8::String::ExternalStringResource* resource);
-  bool MakeExternal(v8::String::ExternalAsciiStringResource* resource);
+  bool MakeExternal(v8::String::ExternalOneByteStringResource* resource);
 
   // Conversion.
   inline bool AsArrayIndex(uint32_t* index);
 
-  // Casting.
-  static inline String* cast(Object* obj);
+  DECLARE_CAST(String)
 
   void PrintOn(FILE* out);
 
@@ -9232,6 +8787,7 @@
 
   // Dispatched behavior.
   void StringShortPrint(StringStream* accumulator);
+  void PrintUC16(OStream& os, int start = 0, int end = -1);  // NOLINT
 #ifdef OBJECT_PRINT
   char* ToAsciiArray();
 #endif
@@ -9278,28 +8834,40 @@
                           int from,
                           int to);
 
-  // The return value may point to the first aligned word containing the
-  // first non-ascii character, rather than directly to the non-ascii character.
-  // If the return value is >= the passed length, the entire string was ASCII.
+  // The return value may point to the first aligned word containing the first
+  // non-one-byte character, rather than directly to the non-one-byte character.
+  // If the return value is >= the passed length, the entire string was
+  // one-byte.
   static inline int NonAsciiStart(const char* chars, int length) {
     const char* start = chars;
     const char* limit = chars + length;
-#ifdef V8_HOST_CAN_READ_UNALIGNED
-    ASSERT(unibrow::Utf8::kMaxOneByteChar == 0x7F);
-    const uintptr_t non_ascii_mask = kUintptrAllBitsSet / 0xFF * 0x80;
-    while (chars + sizeof(uintptr_t) <= limit) {
-      if (*reinterpret_cast<const uintptr_t*>(chars) & non_ascii_mask) {
-        return static_cast<int>(chars - start);
+
+    if (length >= kIntptrSize) {
+      // Check unaligned bytes.
+      while (!IsAligned(reinterpret_cast<intptr_t>(chars), sizeof(uintptr_t))) {
+        if (static_cast<uint8_t>(*chars) > unibrow::Utf8::kMaxOneByteChar) {
+          return static_cast<int>(chars - start);
+        }
+        ++chars;
       }
-      chars += sizeof(uintptr_t);
+      // Check aligned words.
+      DCHECK(unibrow::Utf8::kMaxOneByteChar == 0x7F);
+      const uintptr_t non_one_byte_mask = kUintptrAllBitsSet / 0xFF * 0x80;
+      while (chars + sizeof(uintptr_t) <= limit) {
+        if (*reinterpret_cast<const uintptr_t*>(chars) & non_one_byte_mask) {
+          return static_cast<int>(chars - start);
+        }
+        chars += sizeof(uintptr_t);
+      }
     }
-#endif
+    // Check remaining unaligned bytes.
     while (chars < limit) {
       if (static_cast<uint8_t>(*chars) > unibrow::Utf8::kMaxOneByteChar) {
         return static_cast<int>(chars - start);
       }
       ++chars;
     }
+
     return static_cast<int>(chars - start);
   }
 
@@ -9334,8 +8902,14 @@
   static Handle<FixedArray> CalculateLineEnds(Handle<String> string,
                                               bool include_ending_line);
 
+  // Use the hash field to forward to the canonical internalized string
+  // when deserializing an internalized string.
+  inline void SetForwardedInternalizedString(String* string);
+  inline String* GetForwardedInternalizedString();
+
  private:
   friend class Name;
+  friend class StringTableInsertionKey;
 
   static Handle<String> SlowFlatten(Handle<ConsString> cons,
                                     PretenureFlag tenure);
@@ -9359,8 +8933,7 @@
 // The SeqString abstract class captures sequential string values.
 class SeqString: public String {
  public:
-  // Casting.
-  static inline SeqString* cast(Object* obj);
+  DECLARE_CAST(SeqString)
 
   // Layout description.
   static const int kHeaderSize = String::kSize;
@@ -9375,11 +8948,11 @@
 };
 
 
-// The AsciiString class captures sequential ASCII string objects.
-// Each character in the AsciiString is an ASCII character.
+// The OneByteString class captures sequential one-byte string objects.
+// Each character in the OneByteString is an one-byte character.
 class SeqOneByteString: public SeqString {
  public:
-  static const bool kHasAsciiEncoding = true;
+  static const bool kHasOneByteEncoding = true;
 
   // Dispatched behavior.
   inline uint16_t SeqOneByteStringGet(int index);
@@ -9390,20 +8963,19 @@
 
   inline uint8_t* GetChars();
 
-  // Casting
-  static inline SeqOneByteString* cast(Object* obj);
+  DECLARE_CAST(SeqOneByteString)
 
   // Garbage collection support.  This method is called by the
-  // garbage collector to compute the actual size of an AsciiString
+  // garbage collector to compute the actual size of an OneByteString
   // instance.
   inline int SeqOneByteStringSize(InstanceType instance_type);
 
-  // Computes the size for an AsciiString instance of a given length.
+  // Computes the size for an OneByteString instance of a given length.
   static int SizeFor(int length) {
     return OBJECT_POINTER_ALIGN(kHeaderSize + length * kCharSize);
   }
 
-  // Maximal memory usage for a single sequential ASCII string.
+  // Maximal memory usage for a single sequential one-byte string.
   static const int kMaxSize = 512 * MB - 1;
   STATIC_ASSERT((kMaxSize - kHeaderSize) >= String::kMaxLength);
 
@@ -9416,7 +8988,7 @@
 // Each character in the TwoByteString is a two-byte uint16_t.
 class SeqTwoByteString: public SeqString {
  public:
-  static const bool kHasAsciiEncoding = false;
+  static const bool kHasOneByteEncoding = false;
 
   // Dispatched behavior.
   inline uint16_t SeqTwoByteStringGet(int index);
@@ -9430,8 +9002,7 @@
   // For regexp code.
   const uint16_t* SeqTwoByteStringGetData(unsigned start);
 
-  // Casting
-  static inline SeqTwoByteString* cast(Object* obj);
+  DECLARE_CAST(SeqTwoByteString)
 
   // Garbage collection support.  This method is called by the
   // garbage collector to compute the actual size of a TwoByteString
@@ -9482,8 +9053,7 @@
   // Dispatched behavior.
   uint16_t ConsStringGet(int index);
 
-  // Casting.
-  static inline ConsString* cast(Object* obj);
+  DECLARE_CAST(ConsString)
 
   // Layout description.
   static const int kFirstOffset = POINTER_SIZE_ALIGN(String::kSize);
@@ -9520,14 +9090,13 @@
   inline String* parent();
   inline void set_parent(String* parent,
                          WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
-  inline int offset();
+  inline int offset() const;
   inline void set_offset(int offset);
 
   // Dispatched behavior.
   uint16_t SlicedStringGet(int index);
 
-  // Casting.
-  static inline SlicedString* cast(Object* obj);
+  DECLARE_CAST(SlicedString)
 
   // Layout description.
   static const int kParentOffset = POINTER_SIZE_ALIGN(String::kSize);
@@ -9559,8 +9128,7 @@
 // API.  Therefore, ExternalStrings should not be used internally.
 class ExternalString: public String {
  public:
-  // Casting
-  static inline ExternalString* cast(Object* obj);
+  DECLARE_CAST(ExternalString)
 
   // Layout description.
   static const int kResourceOffset = POINTER_SIZE_ALIGN(String::kSize);
@@ -9581,13 +9149,13 @@
 };
 
 
-// The ExternalAsciiString class is an external string backed by an
-// ASCII string.
-class ExternalAsciiString: public ExternalString {
+// The ExternalOneByteString class is an external string backed by an
+// one-byte string.
+class ExternalOneByteString : public ExternalString {
  public:
-  static const bool kHasAsciiEncoding = true;
+  static const bool kHasOneByteEncoding = true;
 
-  typedef v8::String::ExternalAsciiStringResource Resource;
+  typedef v8::String::ExternalOneByteStringResource Resource;
 
   // The underlying resource.
   inline const Resource* resource();
@@ -9602,19 +9170,18 @@
   inline const uint8_t* GetChars();
 
   // Dispatched behavior.
-  inline uint16_t ExternalAsciiStringGet(int index);
+  inline uint16_t ExternalOneByteStringGet(int index);
 
-  // Casting.
-  static inline ExternalAsciiString* cast(Object* obj);
+  DECLARE_CAST(ExternalOneByteString)
 
   // Garbage collection support.
-  inline void ExternalAsciiStringIterateBody(ObjectVisitor* v);
+  inline void ExternalOneByteStringIterateBody(ObjectVisitor* v);
 
-  template<typename StaticVisitor>
-  inline void ExternalAsciiStringIterateBody();
+  template <typename StaticVisitor>
+  inline void ExternalOneByteStringIterateBody();
 
  private:
-  DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalAsciiString);
+  DISALLOW_IMPLICIT_CONSTRUCTORS(ExternalOneByteString);
 };
 
 
@@ -9622,7 +9189,7 @@
 // encoded string.
 class ExternalTwoByteString: public ExternalString {
  public:
-  static const bool kHasAsciiEncoding = false;
+  static const bool kHasOneByteEncoding = false;
 
   typedef v8::String::ExternalStringResource Resource;
 
@@ -9644,8 +9211,7 @@
   // For regexp code.
   inline const uint16_t* ExternalTwoByteStringGetData(unsigned start);
 
-  // Casting.
-  static inline ExternalTwoByteString* cast(Object* obj);
+  DECLARE_CAST(ExternalTwoByteString)
 
   // Garbage collection support.
   inline void ExternalTwoByteStringIterateBody(ObjectVisitor* v);
@@ -9694,7 +9260,7 @@
   int length() { return length_; }
  private:
   String** str_;
-  bool is_ascii_;
+  bool is_one_byte_;
   int length_;
   const void* start_;
 };
@@ -9718,7 +9284,8 @@
 class ConsStringIteratorOp {
  public:
   inline ConsStringIteratorOp() {}
-  inline ConsStringIteratorOp(ConsString* cons_string, int offset = 0) {
+  inline explicit ConsStringIteratorOp(ConsString* cons_string,
+                                       int offset = 0) {
     Reset(cons_string, offset);
   }
   inline void Reset(ConsString* cons_string, int offset = 0) {
@@ -9807,11 +9374,10 @@
   // [to_number]: Cached to_number computed at startup.
   DECL_ACCESSORS(to_number, Object)
 
-  inline byte kind();
+  inline byte kind() const;
   inline void set_kind(byte kind);
 
-  // Casting.
-  static inline Oddball* cast(Object* obj);
+  DECLARE_CAST(Oddball)
 
   // Dispatched behavior.
   DECLARE_VERIFIER(Oddball)
@@ -9858,12 +9424,11 @@
   // [value]: value of the global property.
   DECL_ACCESSORS(value, Object)
 
-  // Casting.
-  static inline Cell* cast(Object* obj);
+  DECLARE_CAST(Cell)
 
   static inline Cell* FromValueAddress(Address value) {
     Object* result = FromAddress(value - kValueOffset);
-    ASSERT(result->IsCell() || result->IsPropertyCell());
+    DCHECK(result->IsCell() || result->IsPropertyCell());
     return static_cast<Cell*>(result);
   }
 
@@ -9913,8 +9478,7 @@
   static void AddDependentCompilationInfo(Handle<PropertyCell> cell,
                                           CompilationInfo* info);
 
-  // Casting.
-  static inline PropertyCell* cast(Object* obj);
+  DECLARE_CAST(PropertyCell)
 
   inline Address TypeAddress() {
     return address() + kTypeOffset;
@@ -9951,8 +9515,7 @@
   // [hash]: The hash code property (undefined if not initialized yet).
   DECL_ACCESSORS(hash, Object)
 
-  // Casting.
-  static inline JSProxy* cast(Object* obj);
+  DECLARE_CAST(JSProxy)
 
   MUST_USE_RESULT static MaybeHandle<Object> GetPropertyWithHandler(
       Handle<JSProxy> proxy,
@@ -9969,22 +9532,20 @@
   // otherwise set it to false.
   MUST_USE_RESULT
   static MaybeHandle<Object> SetPropertyViaPrototypesWithHandler(
-      Handle<JSProxy> proxy,
-      Handle<JSReceiver> receiver,
-      Handle<Name> name,
-      Handle<Object> value,
-      PropertyAttributes attributes,
-      StrictMode strict_mode,
-      bool* done);
+      Handle<JSProxy> proxy, Handle<Object> receiver, Handle<Name> name,
+      Handle<Object> value, StrictMode strict_mode, bool* done);
 
-  static PropertyAttributes GetPropertyAttributesWithHandler(
-      Handle<JSProxy> proxy,
-      Handle<Object> receiver,
-      Handle<Name> name);
-  static PropertyAttributes GetElementAttributeWithHandler(
-      Handle<JSProxy> proxy,
-      Handle<JSReceiver> receiver,
-      uint32_t index);
+  MUST_USE_RESULT static Maybe<PropertyAttributes>
+      GetPropertyAttributesWithHandler(Handle<JSProxy> proxy,
+                                       Handle<Object> receiver,
+                                       Handle<Name> name);
+  MUST_USE_RESULT static Maybe<PropertyAttributes>
+      GetElementAttributeWithHandler(Handle<JSProxy> proxy,
+                                     Handle<JSReceiver> receiver,
+                                     uint32_t index);
+  MUST_USE_RESULT static MaybeHandle<Object> SetPropertyWithHandler(
+      Handle<JSProxy> proxy, Handle<Object> receiver, Handle<Name> name,
+      Handle<Object> value, StrictMode strict_mode);
 
   // Turn the proxy into an (empty) JSObject.
   static void Fix(Handle<JSProxy> proxy);
@@ -10024,13 +9585,6 @@
  private:
   friend class JSReceiver;
 
-  MUST_USE_RESULT static MaybeHandle<Object> SetPropertyWithHandler(
-      Handle<JSProxy> proxy,
-      Handle<JSReceiver> receiver,
-      Handle<Name> name,
-      Handle<Object> value,
-      PropertyAttributes attributes,
-      StrictMode strict_mode);
   MUST_USE_RESULT static inline MaybeHandle<Object> SetElementWithHandler(
       Handle<JSProxy> proxy,
       Handle<JSReceiver> receiver,
@@ -10038,9 +9592,10 @@
       Handle<Object> value,
       StrictMode strict_mode);
 
-  static bool HasPropertyWithHandler(Handle<JSProxy> proxy, Handle<Name> name);
-  static inline bool HasElementWithHandler(Handle<JSProxy> proxy,
-                                           uint32_t index);
+  MUST_USE_RESULT static Maybe<bool> HasPropertyWithHandler(
+      Handle<JSProxy> proxy, Handle<Name> name);
+  MUST_USE_RESULT static inline Maybe<bool> HasElementWithHandler(
+      Handle<JSProxy> proxy, uint32_t index);
 
   MUST_USE_RESULT static MaybeHandle<Object> DeletePropertyWithHandler(
       Handle<JSProxy> proxy,
@@ -10067,8 +9622,7 @@
   // [construct_trap]: The construct trap.
   DECL_ACCESSORS(construct_trap, Object)
 
-  // Casting.
-  static inline JSFunctionProxy* cast(Object* obj);
+  DECLARE_CAST(JSFunctionProxy)
 
   // Dispatched behavior.
   DECLARE_PRINTER(JSFunctionProxy)
@@ -10092,43 +9646,42 @@
 };
 
 
-// The JSSet describes EcmaScript Harmony sets
-class JSSet: public JSObject {
+class JSCollection : public JSObject {
  public:
-  // [set]: the backing hash set containing keys.
+  // [table]: the backing hash table
   DECL_ACCESSORS(table, Object)
 
-  // Casting.
-  static inline JSSet* cast(Object* obj);
+  static const int kTableOffset = JSObject::kHeaderSize;
+  static const int kSize = kTableOffset + kPointerSize;
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(JSCollection);
+};
+
+
+// The JSSet describes EcmaScript Harmony sets
+class JSSet : public JSCollection {
+ public:
+  DECLARE_CAST(JSSet)
 
   // Dispatched behavior.
   DECLARE_PRINTER(JSSet)
   DECLARE_VERIFIER(JSSet)
 
-  static const int kTableOffset = JSObject::kHeaderSize;
-  static const int kSize = kTableOffset + kPointerSize;
-
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(JSSet);
 };
 
 
 // The JSMap describes EcmaScript Harmony maps
-class JSMap: public JSObject {
+class JSMap : public JSCollection {
  public:
-  // [table]: the backing hash table mapping keys to values.
-  DECL_ACCESSORS(table, Object)
-
-  // Casting.
-  static inline JSMap* cast(Object* obj);
+  DECLARE_CAST(JSMap)
 
   // Dispatched behavior.
   DECLARE_PRINTER(JSMap)
   DECLARE_VERIFIER(JSMap)
 
-  static const int kTableOffset = JSObject::kHeaderSize;
-  static const int kSize = kTableOffset + kPointerSize;
-
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(JSMap);
 };
@@ -10153,13 +9706,13 @@
   DECL_ACCESSORS(table, Object)
 
   // [index]: The index into the data table.
-  DECL_ACCESSORS(index, Smi)
+  DECL_ACCESSORS(index, Object)
 
   // [kind]: The kind of iteration this is. One of the [Kind] enum values.
-  DECL_ACCESSORS(kind, Smi)
+  DECL_ACCESSORS(kind, Object)
 
 #ifdef OBJECT_PRINT
-  void OrderedHashTableIteratorPrint(FILE* out);
+  void OrderedHashTableIteratorPrint(OStream& os);  // NOLINT
 #endif
 
   static const int kTableOffset = JSObject::kHeaderSize;
@@ -10173,13 +9726,26 @@
     kKindEntries = 3
   };
 
-  // Returns an iterator result object: {value: any, done: boolean} and moves
-  // the index to the next valid entry. Closes the iterator if moving past the
-  // end.
-  static Handle<JSObject> Next(Handle<Derived> iterator);
+  // Whether the iterator has more elements. This needs to be called before
+  // calling |CurrentKey| and/or |CurrentValue|.
+  bool HasMore();
+
+  // Move the index forward one.
+  void MoveNext() {
+    set_index(Smi::FromInt(Smi::cast(index())->value() + 1));
+  }
+
+  // Populates the array with the next key and value and then moves the iterator
+  // forward.
+  // This returns the |kind| or 0 if the iterator is already at the end.
+  Smi* Next(JSArray* value_array);
+
+  // Returns the current key of the iterator. This should only be called when
+  // |HasMore| returns true.
+  inline Object* CurrentKey();
 
  private:
-  // Transitions the iterator to the non obsolote backing store. This is a NOP
+  // Transitions the iterator to the non obsolete backing store. This is a NOP
   // if the [table] is not obsolete.
   void Transition();
 
@@ -10194,12 +9760,11 @@
   DECLARE_PRINTER(JSSetIterator)
   DECLARE_VERIFIER(JSSetIterator)
 
-  // Casting.
-  static inline JSSetIterator* cast(Object* obj);
+  DECLARE_CAST(JSSetIterator)
 
-  static Handle<Object> ValueForKind(
-      Handle<JSSetIterator> iterator,
-      int entry_index);
+  // Called by |Next| to populate the array. This allows the subclasses to
+  // populate the array differently.
+  inline void PopulateValueArray(FixedArray* array);
 
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(JSSetIterator);
@@ -10213,14 +9778,17 @@
   DECLARE_PRINTER(JSMapIterator)
   DECLARE_VERIFIER(JSMapIterator)
 
-  // Casting.
-  static inline JSMapIterator* cast(Object* obj);
+  DECLARE_CAST(JSMapIterator)
 
-  static Handle<Object> ValueForKind(
-      Handle<JSMapIterator> iterator,
-      int entry_index);
+  // Called by |Next| to populate the array. This allows the subclasses to
+  // populate the array differently.
+  inline void PopulateValueArray(FixedArray* array);
 
  private:
+  // Returns the current value of the iterator. This should only be called when
+  // |HasMore| returns true.
+  inline Object* CurrentValue();
+
   DISALLOW_IMPLICIT_CONSTRUCTORS(JSMapIterator);
 };
 
@@ -10246,8 +9814,7 @@
 // The JSWeakMap describes EcmaScript Harmony weak maps
 class JSWeakMap: public JSWeakCollection {
  public:
-  // Casting.
-  static inline JSWeakMap* cast(Object* obj);
+  DECLARE_CAST(JSWeakMap)
 
   // Dispatched behavior.
   DECLARE_PRINTER(JSWeakMap)
@@ -10261,8 +9828,7 @@
 // The JSWeakSet describes EcmaScript Harmony weak sets
 class JSWeakSet: public JSWeakCollection {
  public:
-  // Casting.
-  static inline JSWeakSet* cast(Object* obj);
+  DECLARE_CAST(JSWeakSet)
 
   // Dispatched behavior.
   DECLARE_PRINTER(JSWeakSet)
@@ -10296,8 +9862,7 @@
   // [weak_first_array]: weak linked list of views.
   DECL_ACCESSORS(weak_first_view, Object)
 
-  // Casting.
-  static inline JSArrayBuffer* cast(Object* obj);
+  DECLARE_CAST(JSArrayBuffer)
 
   // Neutering. Only neuters the buffer, not associated typed arrays.
   void Neuter();
@@ -10339,8 +9904,7 @@
   // [weak_next]: linked list of typed arrays over the same array buffer.
   DECL_ACCESSORS(weak_next, Object)
 
-  // Casting.
-  static inline JSArrayBufferView* cast(Object* obj);
+  DECLARE_CAST(JSArrayBufferView)
 
   DECLARE_VERIFIER(JSArrayBufferView)
 
@@ -10366,8 +9930,7 @@
   // Neutering. Only neuters this typed array.
   void Neuter();
 
-  // Casting.
-  static inline JSTypedArray* cast(Object* obj);
+  DECLARE_CAST(JSTypedArray)
 
   ExternalArrayType type();
   size_t element_size();
@@ -10397,8 +9960,7 @@
   // Only neuters this DataView
   void Neuter();
 
-  // Casting.
-  static inline JSDataView* cast(Object* obj);
+  DECLARE_CAST(JSDataView)
 
   // Dispatched behavior.
   DECLARE_PRINTER(JSDataView)
@@ -10423,8 +9985,7 @@
   inline Address foreign_address();
   inline void set_foreign_address(Address value);
 
-  // Casting.
-  static inline Foreign* cast(Object* obj);
+  DECLARE_CAST(Foreign)
 
   // Dispatched behavior.
   inline void ForeignIterateBody(ObjectVisitor* v);
@@ -10486,8 +10047,7 @@
   static inline void SetContent(Handle<JSArray> array,
                                 Handle<FixedArrayBase> storage);
 
-  // Casting.
-  static inline JSArray* cast(Object* obj);
+  DECLARE_CAST(JSArray)
 
   // Ensures that the fixed array backing the JSArray has at
   // least the stated size.
@@ -10555,9 +10115,12 @@
   inline void set_property_attributes(PropertyAttributes attributes);
 
   // Checks whether the given receiver is compatible with this accessor.
+  static bool IsCompatibleReceiverType(Isolate* isolate,
+                                       Handle<AccessorInfo> info,
+                                       Handle<HeapType> type);
   inline bool IsCompatibleReceiver(Object* receiver);
 
-  static inline AccessorInfo* cast(Object* obj);
+  DECLARE_CAST(AccessorInfo)
 
   // Dispatched behavior.
   DECLARE_VERIFIER(AccessorInfo)
@@ -10574,6 +10137,9 @@
   static const int kSize = kExpectedReceiverTypeOffset + kPointerSize;
 
  private:
+  inline bool HasExpectedReceiverType() {
+    return expected_receiver_type()->IsFunctionTemplateInfo();
+  }
   // Bit positions in flag.
   static const int kAllCanReadBit = 0;
   static const int kAllCanWriteBit = 1;
@@ -10655,7 +10221,7 @@
  public:
   DECL_ACCESSORS(serialized_data, ByteArray)
 
-  static inline DeclaredAccessorDescriptor* cast(Object* obj);
+  DECLARE_CAST(DeclaredAccessorDescriptor)
 
   static Handle<DeclaredAccessorDescriptor> Create(
       Isolate* isolate,
@@ -10678,7 +10244,7 @@
  public:
   DECL_ACCESSORS(descriptor, DeclaredAccessorDescriptor)
 
-  static inline DeclaredAccessorInfo* cast(Object* obj);
+  DECLARE_CAST(DeclaredAccessorInfo)
 
   // Dispatched behavior.
   DECLARE_PRINTER(DeclaredAccessorInfo)
@@ -10707,7 +10273,7 @@
   DECL_ACCESSORS(setter, Object)
   DECL_ACCESSORS(data, Object)
 
-  static inline ExecutableAccessorInfo* cast(Object* obj);
+  DECLARE_CAST(ExecutableAccessorInfo)
 
   // Dispatched behavior.
   DECLARE_PRINTER(ExecutableAccessorInfo)
@@ -10731,19 +10297,12 @@
 //   * undefined: considered an accessor by the spec, too, strangely enough
 //   * the hole: an accessor which has not been set
 //   * a pointer to a map: a transition used to ensure map sharing
-// access_flags provides the ability to override access checks on access check
-// failure.
 class AccessorPair: public Struct {
  public:
   DECL_ACCESSORS(getter, Object)
   DECL_ACCESSORS(setter, Object)
-  DECL_ACCESSORS(access_flags, Smi)
 
-  inline void set_access_flags(v8::AccessControl access_control);
-  inline bool all_can_read();
-  inline bool all_can_write();
-
-  static inline AccessorPair* cast(Object* obj);
+  DECLARE_CAST(AccessorPair)
 
   static Handle<AccessorPair> Copy(Handle<AccessorPair> pair);
 
@@ -10778,13 +10337,9 @@
 
   static const int kGetterOffset = HeapObject::kHeaderSize;
   static const int kSetterOffset = kGetterOffset + kPointerSize;
-  static const int kAccessFlagsOffset = kSetterOffset + kPointerSize;
-  static const int kSize = kAccessFlagsOffset + kPointerSize;
+  static const int kSize = kSetterOffset + kPointerSize;
 
  private:
-  static const int kAllCanReadBit = 0;
-  static const int kAllCanWriteBit = 1;
-
   // Strangely enough, in addition to functions and harmony proxies, the spec
   // requires us to consider undefined as a kind of accessor, too:
   //    var obj = {};
@@ -10804,7 +10359,7 @@
   DECL_ACCESSORS(indexed_callback, Object)
   DECL_ACCESSORS(data, Object)
 
-  static inline AccessCheckInfo* cast(Object* obj);
+  DECLARE_CAST(AccessCheckInfo)
 
   // Dispatched behavior.
   DECLARE_PRINTER(AccessCheckInfo)
@@ -10829,7 +10384,7 @@
   DECL_ACCESSORS(enumerator, Object)
   DECL_ACCESSORS(data, Object)
 
-  static inline InterceptorInfo* cast(Object* obj);
+  DECLARE_CAST(InterceptorInfo)
 
   // Dispatched behavior.
   DECLARE_PRINTER(InterceptorInfo)
@@ -10853,7 +10408,7 @@
   DECL_ACCESSORS(callback, Object)
   DECL_ACCESSORS(data, Object)
 
-  static inline CallHandlerInfo* cast(Object* obj);
+  DECLARE_CAST(CallHandlerInfo)
 
   // Dispatched behavior.
   DECLARE_PRINTER(CallHandlerInfo)
@@ -10902,7 +10457,7 @@
   DECL_ACCESSORS(access_check_info, Object)
   DECL_ACCESSORS(flag, Smi)
 
-  inline int length();
+  inline int length() const;
   inline void set_length(int value);
 
   // Following properties use flag bits.
@@ -10915,7 +10470,7 @@
   DECL_BOOLEAN_ACCESSORS(remove_prototype)
   DECL_BOOLEAN_ACCESSORS(do_not_cache)
 
-  static inline FunctionTemplateInfo* cast(Object* obj);
+  DECLARE_CAST(FunctionTemplateInfo)
 
   // Dispatched behavior.
   DECLARE_PRINTER(FunctionTemplateInfo)
@@ -10964,7 +10519,7 @@
   DECL_ACCESSORS(constructor, Object)
   DECL_ACCESSORS(internal_field_count, Object)
 
-  static inline ObjectTemplateInfo* cast(Object* obj);
+  DECLARE_CAST(ObjectTemplateInfo)
 
   // Dispatched behavior.
   DECLARE_PRINTER(ObjectTemplateInfo)
@@ -10982,7 +10537,7 @@
   DECL_ACCESSORS(receiver, Object)
   DECL_ACCESSORS(args, Object)
 
-  static inline SignatureInfo* cast(Object* obj);
+  DECLARE_CAST(SignatureInfo)
 
   // Dispatched behavior.
   DECLARE_PRINTER(SignatureInfo)
@@ -11001,7 +10556,7 @@
  public:
   DECL_ACCESSORS(types, Object)
 
-  static inline TypeSwitchInfo* cast(Object* obj);
+  DECLARE_CAST(TypeSwitchInfo)
 
   // Dispatched behavior.
   DECLARE_PRINTER(TypeSwitchInfo)
@@ -11046,7 +10601,7 @@
   // Get the number of break points for this function.
   int GetBreakPointCount();
 
-  static inline DebugInfo* cast(Object* obj);
+  DECLARE_CAST(DebugInfo)
 
   // Dispatched behavior.
   DECLARE_PRINTER(DebugInfo)
@@ -11100,7 +10655,7 @@
   // Get the number of break points for this code position.
   int GetBreakPointCount();
 
-  static inline BreakPointInfo* cast(Object* obj);
+  DECLARE_CAST(BreakPointInfo)
 
   // Dispatched behavior.
   DECLARE_PRINTER(BreakPointInfo)
@@ -11121,6 +10676,7 @@
 
 #undef DECL_BOOLEAN_ACCESSORS
 #undef DECL_ACCESSORS
+#undef DECLARE_CAST
 #undef DECLARE_VERIFIER
 
 #define VISITOR_SYNCHRONIZATION_TAGS_LIST(V)                            \
@@ -11185,9 +10741,9 @@
   // Visits a runtime entry in the instruction stream.
   virtual void VisitRuntimeEntry(RelocInfo* rinfo) {}
 
-  // Visits the resource of an ASCII or two-byte string.
-  virtual void VisitExternalAsciiString(
-      v8::String::ExternalAsciiStringResource** resource) {}
+  // Visits the resource of an one-byte or two-byte string.
+  virtual void VisitExternalOneByteString(
+      v8::String::ExternalOneByteStringResource** resource) {}
   virtual void VisitExternalTwoByteString(
       v8::String::ExternalStringResource** resource) {}
 
diff --git a/src/optimizing-compiler-thread.cc b/src/optimizing-compiler-thread.cc
index 987bac2..387e9c0 100644
--- a/src/optimizing-compiler-thread.cc
+++ b/src/optimizing-compiler-thread.cc
@@ -16,7 +16,7 @@
 namespace internal {
 
 OptimizingCompilerThread::~OptimizingCompilerThread() {
-  ASSERT_EQ(0, input_queue_length_);
+  DCHECK_EQ(0, input_queue_length_);
   DeleteArray(input_queue_);
   if (FLAG_concurrent_osr) {
 #ifdef DEBUG
@@ -31,7 +31,7 @@
 
 void OptimizingCompilerThread::Run() {
 #ifdef DEBUG
-  { LockGuard<Mutex> lock_guard(&thread_id_mutex_);
+  { base::LockGuard<base::Mutex> lock_guard(&thread_id_mutex_);
     thread_id_ = ThreadId::Current().ToInteger();
   }
 #endif
@@ -40,16 +40,15 @@
   DisallowHandleAllocation no_handles;
   DisallowHandleDereference no_deref;
 
-  ElapsedTimer total_timer;
+  base::ElapsedTimer total_timer;
   if (FLAG_trace_concurrent_recompilation) total_timer.Start();
 
   while (true) {
     input_queue_semaphore_.Wait();
-    Logger::TimerEventScope timer(
-        isolate_, Logger::TimerEventScope::v8_recompile_concurrent);
+    TimerEventScope<TimerEventRecompileConcurrent> timer(isolate_);
 
     if (FLAG_concurrent_recompilation_delay != 0) {
-      OS::Sleep(FLAG_concurrent_recompilation_delay);
+      base::OS::Sleep(FLAG_concurrent_recompilation_delay);
     }
 
     switch (static_cast<StopFlag>(base::Acquire_Load(&stop_thread_))) {
@@ -73,7 +72,7 @@
         continue;
     }
 
-    ElapsedTimer compiling_timer;
+    base::ElapsedTimer compiling_timer;
     if (FLAG_trace_concurrent_recompilation) compiling_timer.Start();
 
     CompileNext();
@@ -86,10 +85,10 @@
 
 
 OptimizedCompileJob* OptimizingCompilerThread::NextInput() {
-  LockGuard<Mutex> access_input_queue_(&input_queue_mutex_);
+  base::LockGuard<base::Mutex> access_input_queue_(&input_queue_mutex_);
   if (input_queue_length_ == 0) return NULL;
   OptimizedCompileJob* job = input_queue_[InputQueueIndex(0)];
-  ASSERT_NE(NULL, job);
+  DCHECK_NE(NULL, job);
   input_queue_shift_ = InputQueueIndex(1);
   input_queue_length_--;
   return job;
@@ -98,12 +97,12 @@
 
 void OptimizingCompilerThread::CompileNext() {
   OptimizedCompileJob* job = NextInput();
-  ASSERT_NE(NULL, job);
+  DCHECK_NE(NULL, job);
 
   // The function may have already been optimized by OSR.  Simply continue.
   OptimizedCompileJob::Status status = job->OptimizeGraph();
   USE(status);   // Prevent an unused-variable error in release mode.
-  ASSERT(status != OptimizedCompileJob::FAILED);
+  DCHECK(status != OptimizedCompileJob::FAILED);
 
   // The function may have already been optimized by OSR.  Simply continue.
   // Use a mutex to make sure that functions marked for install
@@ -170,7 +169,7 @@
 
 
 void OptimizingCompilerThread::Flush() {
-  ASSERT(!IsOptimizerThread());
+  DCHECK(!IsOptimizerThread());
   base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(FLUSH));
   if (FLAG_block_concurrent_recompilation) Unblock();
   input_queue_semaphore_.Signal();
@@ -184,7 +183,7 @@
 
 
 void OptimizingCompilerThread::Stop() {
-  ASSERT(!IsOptimizerThread());
+  DCHECK(!IsOptimizerThread());
   base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(STOP));
   if (FLAG_block_concurrent_recompilation) Unblock();
   input_queue_semaphore_.Signal();
@@ -217,7 +216,7 @@
 
 
 void OptimizingCompilerThread::InstallOptimizedFunctions() {
-  ASSERT(!IsOptimizerThread());
+  DCHECK(!IsOptimizerThread());
   HandleScope handle_scope(isolate_);
 
   OptimizedCompileJob* job;
@@ -227,7 +226,7 @@
     if (info->is_osr()) {
       if (FLAG_trace_osr) {
         PrintF("[COSR - ");
-        info->closure()->PrintName();
+        function->ShortPrint();
         PrintF(" is ready for install and entry at AST id %d]\n",
                info->osr_ast_id().ToInt());
       }
@@ -238,6 +237,11 @@
       BackEdgeTable::RemoveStackCheck(code, offset);
     } else {
       if (function->IsOptimized()) {
+        if (FLAG_trace_concurrent_recompilation) {
+          PrintF("  ** Aborting compilation for ");
+          function->ShortPrint();
+          PrintF(" as it has already been optimized.\n");
+        }
         DisposeOptimizedCompileJob(job, false);
       } else {
         Handle<Code> code = Compiler::GetConcurrentlyOptimizedCode(job);
@@ -250,23 +254,23 @@
 
 
 void OptimizingCompilerThread::QueueForOptimization(OptimizedCompileJob* job) {
-  ASSERT(IsQueueAvailable());
-  ASSERT(!IsOptimizerThread());
+  DCHECK(IsQueueAvailable());
+  DCHECK(!IsOptimizerThread());
   CompilationInfo* info = job->info();
   if (info->is_osr()) {
     osr_attempts_++;
     AddToOsrBuffer(job);
     // Add job to the front of the input queue.
-    LockGuard<Mutex> access_input_queue(&input_queue_mutex_);
-    ASSERT_LT(input_queue_length_, input_queue_capacity_);
+    base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_);
+    DCHECK_LT(input_queue_length_, input_queue_capacity_);
     // Move shift_ back by one.
     input_queue_shift_ = InputQueueIndex(input_queue_capacity_ - 1);
     input_queue_[InputQueueIndex(0)] = job;
     input_queue_length_++;
   } else {
     // Add job to the back of the input queue.
-    LockGuard<Mutex> access_input_queue(&input_queue_mutex_);
-    ASSERT_LT(input_queue_length_, input_queue_capacity_);
+    base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_);
+    DCHECK_LT(input_queue_length_, input_queue_capacity_);
     input_queue_[InputQueueIndex(input_queue_length_)] = job;
     input_queue_length_++;
   }
@@ -279,7 +283,7 @@
 
 
 void OptimizingCompilerThread::Unblock() {
-  ASSERT(!IsOptimizerThread());
+  DCHECK(!IsOptimizerThread());
   while (blocked_jobs_ > 0) {
     input_queue_semaphore_.Signal();
     blocked_jobs_--;
@@ -289,7 +293,7 @@
 
 OptimizedCompileJob* OptimizingCompilerThread::FindReadyOSRCandidate(
     Handle<JSFunction> function, BailoutId osr_ast_id) {
-  ASSERT(!IsOptimizerThread());
+  DCHECK(!IsOptimizerThread());
   for (int i = 0; i < osr_buffer_capacity_; i++) {
     OptimizedCompileJob* current = osr_buffer_[i];
     if (current != NULL &&
@@ -306,7 +310,7 @@
 
 bool OptimizingCompilerThread::IsQueuedForOSR(Handle<JSFunction> function,
                                               BailoutId osr_ast_id) {
-  ASSERT(!IsOptimizerThread());
+  DCHECK(!IsOptimizerThread());
   for (int i = 0; i < osr_buffer_capacity_; i++) {
     OptimizedCompileJob* current = osr_buffer_[i];
     if (current != NULL &&
@@ -319,7 +323,7 @@
 
 
 bool OptimizingCompilerThread::IsQueuedForOSR(JSFunction* function) {
-  ASSERT(!IsOptimizerThread());
+  DCHECK(!IsOptimizerThread());
   for (int i = 0; i < osr_buffer_capacity_; i++) {
     OptimizedCompileJob* current = osr_buffer_[i];
     if (current != NULL && *current->info()->closure() == function) {
@@ -331,7 +335,7 @@
 
 
 void OptimizingCompilerThread::AddToOsrBuffer(OptimizedCompileJob* job) {
-  ASSERT(!IsOptimizerThread());
+  DCHECK(!IsOptimizerThread());
   // Find the next slot that is empty or has a stale job.
   OptimizedCompileJob* stale = NULL;
   while (true) {
@@ -342,7 +346,7 @@
 
   // Add to found slot and dispose the evicted job.
   if (stale != NULL) {
-    ASSERT(stale->IsWaitingForInstall());
+    DCHECK(stale->IsWaitingForInstall());
     CompilationInfo* info = stale->info();
     if (FLAG_trace_osr) {
       PrintF("[COSR - Discarded ");
@@ -364,7 +368,7 @@
 
 
 bool OptimizingCompilerThread::IsOptimizerThread() {
-  LockGuard<Mutex> lock_guard(&thread_id_mutex_);
+  base::LockGuard<base::Mutex> lock_guard(&thread_id_mutex_);
   return ThreadId::Current().ToInteger() == thread_id_;
 }
 #endif
diff --git a/src/optimizing-compiler-thread.h b/src/optimizing-compiler-thread.h
index a6bcbed..6ff4f2a 100644
--- a/src/optimizing-compiler-thread.h
+++ b/src/optimizing-compiler-thread.h
@@ -6,11 +6,11 @@
 #define V8_OPTIMIZING_COMPILER_THREAD_H_
 
 #include "src/base/atomicops.h"
+#include "src/base/platform/mutex.h"
+#include "src/base/platform/platform.h"
+#include "src/base/platform/time.h"
 #include "src/flags.h"
 #include "src/list.h"
-#include "src/platform.h"
-#include "src/platform/mutex.h"
-#include "src/platform/time.h"
 #include "src/unbound-queue-inl.h"
 
 namespace v8 {
@@ -20,24 +20,24 @@
 class OptimizedCompileJob;
 class SharedFunctionInfo;
 
-class OptimizingCompilerThread : public Thread {
+class OptimizingCompilerThread : public base::Thread {
  public:
-  explicit OptimizingCompilerThread(Isolate *isolate) :
-      Thread("OptimizingCompilerThread"),
+  explicit OptimizingCompilerThread(Isolate* isolate)
+      : Thread(Options("OptimizingCompilerThread")),
 #ifdef DEBUG
-      thread_id_(0),
+        thread_id_(0),
 #endif
-      isolate_(isolate),
-      stop_semaphore_(0),
-      input_queue_semaphore_(0),
-      input_queue_capacity_(FLAG_concurrent_recompilation_queue_length),
-      input_queue_length_(0),
-      input_queue_shift_(0),
-      osr_buffer_capacity_(FLAG_concurrent_recompilation_queue_length + 4),
-      osr_buffer_cursor_(0),
-      osr_hits_(0),
-      osr_attempts_(0),
-      blocked_jobs_(0) {
+        isolate_(isolate),
+        stop_semaphore_(0),
+        input_queue_semaphore_(0),
+        input_queue_capacity_(FLAG_concurrent_recompilation_queue_length),
+        input_queue_length_(0),
+        input_queue_shift_(0),
+        osr_buffer_capacity_(FLAG_concurrent_recompilation_queue_length + 4),
+        osr_buffer_cursor_(0),
+        osr_hits_(0),
+        osr_attempts_(0),
+        blocked_jobs_(0) {
     base::NoBarrier_Store(&stop_thread_,
                           static_cast<base::AtomicWord>(CONTINUE));
     input_queue_ = NewArray<OptimizedCompileJob*>(input_queue_capacity_);
@@ -63,7 +63,7 @@
   bool IsQueuedForOSR(JSFunction* function);
 
   inline bool IsQueueAvailable() {
-    LockGuard<Mutex> access_input_queue(&input_queue_mutex_);
+    base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_);
     return input_queue_length_ < input_queue_capacity_;
   }
 
@@ -98,26 +98,26 @@
 
   inline int InputQueueIndex(int i) {
     int result = (i + input_queue_shift_) % input_queue_capacity_;
-    ASSERT_LE(0, result);
-    ASSERT_LT(result, input_queue_capacity_);
+    DCHECK_LE(0, result);
+    DCHECK_LT(result, input_queue_capacity_);
     return result;
   }
 
 #ifdef DEBUG
   int thread_id_;
-  Mutex thread_id_mutex_;
+  base::Mutex thread_id_mutex_;
 #endif
 
   Isolate* isolate_;
-  Semaphore stop_semaphore_;
-  Semaphore input_queue_semaphore_;
+  base::Semaphore stop_semaphore_;
+  base::Semaphore input_queue_semaphore_;
 
   // Circular queue of incoming recompilation tasks (including OSR).
   OptimizedCompileJob** input_queue_;
   int input_queue_capacity_;
   int input_queue_length_;
   int input_queue_shift_;
-  Mutex input_queue_mutex_;
+  base::Mutex input_queue_mutex_;
 
   // Queue of recompilation tasks ready to be installed (excluding OSR).
   UnboundQueue<OptimizedCompileJob*> output_queue_;
@@ -128,8 +128,8 @@
   int osr_buffer_cursor_;
 
   volatile base::AtomicWord stop_thread_;
-  TimeDelta time_spent_compiling_;
-  TimeDelta time_spent_total_;
+  base::TimeDelta time_spent_compiling_;
+  base::TimeDelta time_spent_total_;
 
   int osr_hits_;
   int osr_attempts_;
diff --git a/src/ostreams.cc b/src/ostreams.cc
new file mode 100644
index 0000000..e927e6b
--- /dev/null
+++ b/src/ostreams.cc
@@ -0,0 +1,189 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/ostreams.h"
+
+#include <algorithm>
+#include <cmath>
+
+#include "src/base/platform/platform.h"  // For isinf/isnan with MSVC
+
+#if V8_OS_WIN
+#define snprintf sprintf_s
+#endif
+
+namespace v8 {
+namespace internal {
+
+// Be lazy and delegate the value=>char conversion to snprintf.
+template<class T>
+OStream& OStream::print(const char* format, T x) {
+  char buf[32];
+  int n = snprintf(buf, sizeof(buf), format, x);
+  return (n < 0) ? *this : write(buf, n);
+}
+
+
+OStream& OStream::operator<<(short x) {  // NOLINT(runtime/int)
+  return print(hex_ ? "%hx" : "%hd", x);
+}
+
+
+OStream& OStream::operator<<(unsigned short x) {  // NOLINT(runtime/int)
+  return print(hex_ ? "%hx" : "%hu", x);
+}
+
+
+OStream& OStream::operator<<(int x) {
+  return print(hex_ ? "%x" : "%d", x);
+}
+
+
+OStream& OStream::operator<<(unsigned int x) {
+  return print(hex_ ? "%x" : "%u", x);
+}
+
+
+OStream& OStream::operator<<(long x) {  // NOLINT(runtime/int)
+  return print(hex_ ? "%lx" : "%ld", x);
+}
+
+
+OStream& OStream::operator<<(unsigned long x) {  // NOLINT(runtime/int)
+  return print(hex_ ? "%lx" : "%lu", x);
+}
+
+
+OStream& OStream::operator<<(long long x) {  // NOLINT(runtime/int)
+  return print(hex_ ? "%llx" : "%lld", x);
+}
+
+
+OStream& OStream::operator<<(unsigned long long x) {  // NOLINT(runtime/int)
+  return print(hex_ ? "%llx" : "%llu", x);
+}
+
+
+OStream& OStream::operator<<(double x) {
+  if (std::isinf(x)) return *this << (x < 0 ? "-inf" : "inf");
+  if (std::isnan(x)) return *this << "nan";
+  return print("%g", x);
+}
+
+
+OStream& OStream::operator<<(void* x) {
+  return print("%p", x);
+}
+
+
+OStream& OStream::operator<<(char x) {
+  return put(x);
+}
+
+
+OStream& OStream::operator<<(signed char x) {
+  return put(x);
+}
+
+
+OStream& OStream::operator<<(unsigned char x) {
+  return put(x);
+}
+
+
+OStream& OStream::dec() {
+  hex_ = false;
+  return *this;
+}
+
+
+OStream& OStream::hex() {
+  hex_ = true;
+  return *this;
+}
+
+
+OStream& flush(OStream& os) {  // NOLINT(runtime/references)
+  return os.flush();
+}
+
+
+OStream& endl(OStream& os) {  // NOLINT(runtime/references)
+  return flush(os.put('\n'));
+}
+
+
+OStream& hex(OStream& os) {  // NOLINT(runtime/references)
+  return os.hex();
+}
+
+
+OStream& dec(OStream& os) {  // NOLINT(runtime/references)
+  return os.dec();
+}
+
+
+OStringStream& OStringStream::write(const char* s, size_t n) {
+  size_t new_size = size_ + n;
+  if (new_size < size_) return *this;  // Overflow => no-op.
+  reserve(new_size + 1);
+  memcpy(data_ + size_, s, n);
+  size_ = new_size;
+  data_[size_] = '\0';
+  return *this;
+}
+
+
+OStringStream& OStringStream::flush() {
+  return *this;
+}
+
+
+void OStringStream::reserve(size_t requested_capacity) {
+  if (requested_capacity <= capacity_) return;
+  size_t new_capacity =  // Handle possible overflow by not doubling.
+      std::max(std::max(capacity_ * 2, capacity_), requested_capacity);
+  char * new_data = allocate(new_capacity);
+  memcpy(new_data, data_, size_);
+  deallocate(data_, capacity_);
+  capacity_ = new_capacity;
+  data_ = new_data;
+}
+
+
+OFStream& OFStream::write(const char* s, size_t n) {
+  if (f_) fwrite(s, n, 1, f_);
+  return *this;
+}
+
+
+OFStream& OFStream::flush() {
+  if (f_) fflush(f_);
+  return *this;
+}
+
+
+// Locale-independent predicates.
+static bool IsPrint(uint16_t c) { return 0x20 <= c && c <= 0x7e; }
+static bool IsSpace(uint16_t c) { return (0x9 <= c && c <= 0xd) || c == 0x20; }
+static bool IsOK(uint16_t c) { return (IsPrint(c) || IsSpace(c)) && c != '\\'; }
+
+
+static OStream& PrintUC16(OStream& os, uint16_t c, bool (*pred)(uint16_t)) {
+  char buf[10];
+  const char* format = pred(c) ? "%c" : (c <= 0xff) ? "\\x%02x" : "\\u%04x";
+  snprintf(buf, sizeof(buf), format, c);
+  return os << buf;
+}
+
+
+OStream& operator<<(OStream& os, const AsReversiblyEscapedUC16& c) {
+  return PrintUC16(os, c.value, IsOK);
+}
+
+
+OStream& operator<<(OStream& os, const AsUC16& c) {
+  return PrintUC16(os, c.value, IsPrint);
+}
+} }  // namespace v8::internal
diff --git a/src/ostreams.h b/src/ostreams.h
new file mode 100644
index 0000000..508a88d
--- /dev/null
+++ b/src/ostreams.h
@@ -0,0 +1,143 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_OSTREAMS_H_
+#define V8_OSTREAMS_H_
+
+#include <stddef.h>
+#include <stdio.h>
+#include <string.h>
+
+#include "include/v8config.h"
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace internal {
+
+// An abstract base class for output streams with a cut-down standard interface.
+class OStream {
+ public:
+  OStream() : hex_(false) { }
+  virtual ~OStream() { }
+
+  // For manipulators like 'os << endl' or 'os << flush', etc.
+  OStream& operator<<(OStream& (*manipulator)(OStream& os)) {
+    return manipulator(*this);
+  }
+
+  // Numeric conversions.
+  OStream& operator<<(short x);  // NOLINT(runtime/int)
+  OStream& operator<<(unsigned short x);  // NOLINT(runtime/int)
+  OStream& operator<<(int x);
+  OStream& operator<<(unsigned int x);
+  OStream& operator<<(long x);  // NOLINT(runtime/int)
+  OStream& operator<<(unsigned long x);  // NOLINT(runtime/int)
+  OStream& operator<<(long long x);  // NOLINT(runtime/int)
+  OStream& operator<<(unsigned long long x);  // NOLINT(runtime/int)
+  OStream& operator<<(double x);
+  OStream& operator<<(void* x);
+
+  // Character output.
+  OStream& operator<<(char x);
+  OStream& operator<<(signed char x);
+  OStream& operator<<(unsigned char x);
+  OStream& operator<<(const char* s) { return write(s, strlen(s)); }
+  OStream& put(char c) { return write(&c, 1); }
+
+  // Primitive format flag handling, can be extended if needed.
+  OStream& dec();
+  OStream& hex();
+
+  virtual OStream& write(const char* s, size_t n) = 0;
+  virtual OStream& flush() = 0;
+
+ private:
+  template<class T> OStream& print(const char* format, T x);
+
+  bool hex_;
+
+  DISALLOW_COPY_AND_ASSIGN(OStream);
+};
+
+
+// Some manipulators.
+OStream& flush(OStream& os);  // NOLINT(runtime/references)
+OStream& endl(OStream& os);  // NOLINT(runtime/references)
+OStream& dec(OStream& os);  // NOLINT(runtime/references)
+OStream& hex(OStream& os);  // NOLINT(runtime/references)
+
+
+// An output stream writing to a character buffer.
+class OStringStream: public OStream {
+ public:
+  OStringStream() : size_(0), capacity_(32), data_(allocate(capacity_)) {
+    data_[0] = '\0';
+  }
+  ~OStringStream() { deallocate(data_, capacity_); }
+
+  size_t size() const { return size_; }
+  size_t capacity() const { return capacity_; }
+  const char* data() const { return data_; }
+
+  // Internally, our character data is always 0-terminated.
+  const char* c_str() const { return data(); }
+
+  virtual OStringStream& write(const char* s, size_t n) OVERRIDE;
+  virtual OStringStream& flush() OVERRIDE;
+
+ private:
+  // Primitive allocator interface, can be extracted if needed.
+  static char* allocate (size_t n) { return new char[n]; }
+  static void deallocate (char* s, size_t n) { delete[] s; }
+
+  void reserve(size_t requested_capacity);
+
+  size_t size_;
+  size_t capacity_;
+  char* data_;
+
+  DISALLOW_COPY_AND_ASSIGN(OStringStream);
+};
+
+
+// An output stream writing to a file.
+class OFStream: public OStream {
+ public:
+  explicit OFStream(FILE* f) : f_(f) { }
+  virtual ~OFStream() { }
+
+  virtual OFStream& write(const char* s, size_t n) OVERRIDE;
+  virtual OFStream& flush() OVERRIDE;
+
+ private:
+  FILE* const f_;
+
+  DISALLOW_COPY_AND_ASSIGN(OFStream);
+};
+
+
+// Wrappers to disambiguate uint16_t and uc16.
+struct AsUC16 {
+  explicit AsUC16(uint16_t v) : value(v) {}
+  uint16_t value;
+};
+
+
+struct AsReversiblyEscapedUC16 {
+  explicit AsReversiblyEscapedUC16(uint16_t v) : value(v) {}
+  uint16_t value;
+};
+
+
+// Writes the given character to the output escaping everything outside of
+// printable/space ASCII range. Additionally escapes '\' making escaping
+// reversible.
+OStream& operator<<(OStream& os, const AsReversiblyEscapedUC16& c);
+
+// Writes the given character to the output escaping everything outside
+// of printable ASCII range.
+OStream& operator<<(OStream& os, const AsUC16& c);
+} }  // namespace v8::internal
+
+#endif  // V8_OSTREAMS_H_
diff --git a/src/parser.cc b/src/parser.cc
index fd0dd29..7cef210 100644
--- a/src/parser.cc
+++ b/src/parser.cc
@@ -6,13 +6,14 @@
 
 #include "src/api.h"
 #include "src/ast.h"
+#include "src/bailout-reason.h"
+#include "src/base/platform/platform.h"
 #include "src/bootstrapper.h"
 #include "src/char-predicates-inl.h"
 #include "src/codegen.h"
 #include "src/compiler.h"
 #include "src/messages.h"
 #include "src/parser.h"
-#include "src/platform.h"
 #include "src/preparser.h"
 #include "src/runtime.h"
 #include "src/scanner-character-streams.h"
@@ -143,7 +144,7 @@
   }
   RegExpTree* atom;
   if (characters_ != NULL) {
-    ASSERT(last_added_ == ADD_CHAR);
+    DCHECK(last_added_ == ADD_CHAR);
     // Last atom was character.
     Vector<const uc16> char_vector = characters_->ToConstVector();
     int num_chars = char_vector.length();
@@ -156,11 +157,11 @@
     atom = new(zone()) RegExpAtom(char_vector);
     FlushText();
   } else if (text_.length() > 0) {
-    ASSERT(last_added_ == ADD_ATOM);
+    DCHECK(last_added_ == ADD_ATOM);
     atom = text_.RemoveLast();
     FlushText();
   } else if (terms_.length() > 0) {
-    ASSERT(last_added_ == ADD_ATOM);
+    DCHECK(last_added_ == ADD_ATOM);
     atom = terms_.RemoveLast();
     if (atom->max_match() == 0) {
       // Guaranteed to only match an empty string.
@@ -182,151 +183,93 @@
 }
 
 
-ScriptData* ScriptData::New(const char* data, int length) {
-  // The length is obviously invalid.
-  if (length % sizeof(unsigned) != 0) {
-    return NULL;
-  }
-
-  int deserialized_data_length = length / sizeof(unsigned);
-  unsigned* deserialized_data;
-  bool owns_store = reinterpret_cast<intptr_t>(data) % sizeof(unsigned) != 0;
-  if (owns_store) {
-    // Copy the data to align it.
-    deserialized_data = i::NewArray<unsigned>(deserialized_data_length);
-    i::CopyBytes(reinterpret_cast<char*>(deserialized_data),
-                 data, static_cast<size_t>(length));
-  } else {
-    // If aligned, don't create a copy of the data.
-    deserialized_data = reinterpret_cast<unsigned*>(const_cast<char*>(data));
-  }
-  return new ScriptData(
-      Vector<unsigned>(deserialized_data, deserialized_data_length),
-      owns_store);
-}
-
-
-FunctionEntry ScriptData::GetFunctionEntry(int start) {
+FunctionEntry ParseData::GetFunctionEntry(int start) {
   // The current pre-data entry must be a FunctionEntry with the given
   // start position.
-  if ((function_index_ + FunctionEntry::kSize <= store_.length())
-      && (static_cast<int>(store_[function_index_]) == start)) {
+  if ((function_index_ + FunctionEntry::kSize <= Length()) &&
+      (static_cast<int>(Data()[function_index_]) == start)) {
     int index = function_index_;
     function_index_ += FunctionEntry::kSize;
-    return FunctionEntry(store_.SubVector(index,
-                                          index + FunctionEntry::kSize));
+    Vector<unsigned> subvector(&(Data()[index]), FunctionEntry::kSize);
+    return FunctionEntry(subvector);
   }
   return FunctionEntry();
 }
 
 
-int ScriptData::GetSymbolIdentifier() {
-  return ReadNumber(&symbol_data_);
+int ParseData::FunctionCount() {
+  int functions_size = FunctionsSize();
+  if (functions_size < 0) return 0;
+  if (functions_size % FunctionEntry::kSize != 0) return 0;
+  return functions_size / FunctionEntry::kSize;
 }
 
 
-bool ScriptData::SanityCheck() {
+bool ParseData::IsSane() {
   // Check that the header data is valid and doesn't specify
   // point to positions outside the store.
-  if (store_.length() < PreparseDataConstants::kHeaderSize) return false;
-  if (magic() != PreparseDataConstants::kMagicNumber) return false;
-  if (version() != PreparseDataConstants::kCurrentVersion) return false;
-  if (has_error()) {
-    // Extra sane sanity check for error message encoding.
-    if (store_.length() <= PreparseDataConstants::kHeaderSize
-                         + PreparseDataConstants::kMessageTextPos) {
-      return false;
-    }
-    if (Read(PreparseDataConstants::kMessageStartPos) >
-        Read(PreparseDataConstants::kMessageEndPos)) {
-      return false;
-    }
-    unsigned arg_count = Read(PreparseDataConstants::kMessageArgCountPos);
-    int pos = PreparseDataConstants::kMessageTextPos;
-    for (unsigned int i = 0; i <= arg_count; i++) {
-      if (store_.length() <= PreparseDataConstants::kHeaderSize + pos) {
-        return false;
-      }
-      int length = static_cast<int>(Read(pos));
-      if (length < 0) return false;
-      pos += 1 + length;
-    }
-    if (store_.length() < PreparseDataConstants::kHeaderSize + pos) {
-      return false;
-    }
-    return true;
-  }
+  int data_length = Length();
+  if (data_length < PreparseDataConstants::kHeaderSize) return false;
+  if (Magic() != PreparseDataConstants::kMagicNumber) return false;
+  if (Version() != PreparseDataConstants::kCurrentVersion) return false;
+  if (HasError()) return false;
   // Check that the space allocated for function entries is sane.
-  int functions_size =
-      static_cast<int>(store_[PreparseDataConstants::kFunctionsSizeOffset]);
+  int functions_size = FunctionsSize();
   if (functions_size < 0) return false;
   if (functions_size % FunctionEntry::kSize != 0) return false;
   // Check that the total size has room for header and function entries.
   int minimum_size =
       PreparseDataConstants::kHeaderSize + functions_size;
-  if (store_.length() < minimum_size) return false;
+  if (data_length < minimum_size) return false;
   return true;
 }
 
 
-
-const char* ScriptData::ReadString(unsigned* start, int* chars) {
-  int length = start[0];
-  char* result = NewArray<char>(length + 1);
-  for (int i = 0; i < length; i++) {
-    result[i] = start[i + 1];
+void ParseData::Initialize() {
+  // Prepares state for use.
+  int data_length = Length();
+  if (data_length >= PreparseDataConstants::kHeaderSize) {
+    function_index_ = PreparseDataConstants::kHeaderSize;
   }
-  result[length] = '\0';
-  if (chars != NULL) *chars = length;
-  return result;
 }
 
 
-Scanner::Location ScriptData::MessageLocation() const {
-  int beg_pos = Read(PreparseDataConstants::kMessageStartPos);
-  int end_pos = Read(PreparseDataConstants::kMessageEndPos);
-  return Scanner::Location(beg_pos, end_pos);
+bool ParseData::HasError() {
+  return Data()[PreparseDataConstants::kHasErrorOffset];
 }
 
 
-bool ScriptData::IsReferenceError() const {
-  return Read(PreparseDataConstants::kIsReferenceErrorPos);
+unsigned ParseData::Magic() {
+  return Data()[PreparseDataConstants::kMagicOffset];
 }
 
 
-const char* ScriptData::BuildMessage() const {
-  unsigned* start = ReadAddress(PreparseDataConstants::kMessageTextPos);
-  return ReadString(start, NULL);
+unsigned ParseData::Version() {
+  return Data()[PreparseDataConstants::kVersionOffset];
 }
 
 
-const char* ScriptData::BuildArg() const {
-  int arg_count = Read(PreparseDataConstants::kMessageArgCountPos);
-  ASSERT(arg_count == 0 || arg_count == 1);
-  if (arg_count == 0) {
-    return NULL;
+int ParseData::FunctionsSize() {
+  return static_cast<int>(Data()[PreparseDataConstants::kFunctionsSizeOffset]);
+}
+
+
+void Parser::SetCachedData() {
+  if (compile_options() == ScriptCompiler::kNoCompileOptions) {
+    cached_parse_data_ = NULL;
+  } else {
+    DCHECK(info_->cached_data() != NULL);
+    if (compile_options() == ScriptCompiler::kConsumeParserCache) {
+      cached_parse_data_ = new ParseData(*info_->cached_data());
+    }
   }
-  // Position after text found by skipping past length field and
-  // length field content words.
-  int pos = PreparseDataConstants::kMessageTextPos + 1
-      + Read(PreparseDataConstants::kMessageTextPos);
-  int count = 0;
-  return ReadString(ReadAddress(pos), &count);
-}
-
-
-unsigned ScriptData::Read(int position) const {
-  return store_[PreparseDataConstants::kHeaderSize + position];
-}
-
-
-unsigned* ScriptData::ReadAddress(int position) const {
-  return &store_[PreparseDataConstants::kHeaderSize + position];
 }
 
 
 Scope* Parser::NewScope(Scope* parent, ScopeType scope_type) {
-  Scope* result = new(zone()) Scope(parent, scope_type, zone());
+  DCHECK(ast_value_factory());
+  Scope* result =
+      new (zone()) Scope(parent, scope_type, ast_value_factory(), zone());
   result->Initialize();
   return result;
 }
@@ -399,15 +342,43 @@
 // ----------------------------------------------------------------------------
 // Implementation of Parser
 
-bool ParserTraits::IsEvalOrArguments(Handle<String> identifier) const {
-  Factory* factory = parser_->isolate()->factory();
-  return identifier.is_identical_to(factory->eval_string())
-      || identifier.is_identical_to(factory->arguments_string());
+class ParserTraits::Checkpoint
+    : public ParserBase<ParserTraits>::CheckpointBase {
+ public:
+  explicit Checkpoint(ParserBase<ParserTraits>* parser)
+      : CheckpointBase(parser), parser_(parser) {
+    saved_ast_node_id_gen_ = *parser_->ast_node_id_gen_;
+  }
+
+  void Restore() {
+    CheckpointBase::Restore();
+    *parser_->ast_node_id_gen_ = saved_ast_node_id_gen_;
+  }
+
+ private:
+  ParserBase<ParserTraits>* parser_;
+  AstNode::IdGen saved_ast_node_id_gen_;
+};
+
+
+bool ParserTraits::IsEvalOrArguments(const AstRawString* identifier) const {
+  return identifier == parser_->ast_value_factory()->eval_string() ||
+         identifier == parser_->ast_value_factory()->arguments_string();
+}
+
+
+bool ParserTraits::IsPrototype(const AstRawString* identifier) const {
+  return identifier == parser_->ast_value_factory()->prototype_string();
+}
+
+
+bool ParserTraits::IsConstructor(const AstRawString* identifier) const {
+  return identifier == parser_->ast_value_factory()->constructor_string();
 }
 
 
 bool ParserTraits::IsThisProperty(Expression* expression) {
-  ASSERT(expression != NULL);
+  DCHECK(expression != NULL);
   Property* property = expression->AsProperty();
   return property != NULL &&
       property->obj()->AsVariableProxy() != NULL &&
@@ -424,17 +395,17 @@
 void ParserTraits::PushPropertyName(FuncNameInferrer* fni,
                                     Expression* expression) {
   if (expression->IsPropertyName()) {
-    fni->PushLiteralName(expression->AsLiteral()->AsPropertyName());
+    fni->PushLiteralName(expression->AsLiteral()->AsRawPropertyName());
   } else {
     fni->PushLiteralName(
-        parser_->isolate()->factory()->anonymous_function_string());
+        parser_->ast_value_factory()->anonymous_function_string());
   }
 }
 
 
 void ParserTraits::CheckAssigningFunctionLiteralToProperty(Expression* left,
                                                            Expression* right) {
-  ASSERT(left != NULL);
+  DCHECK(left != NULL);
   if (left->AsProperty() != NULL &&
       right->AsFunctionLiteral() != NULL) {
     right->AsFunctionLiteral()->set_pretenure();
@@ -446,17 +417,16 @@
                                          Scope* scope) {
   VariableProxy* callee = expression->AsVariableProxy();
   if (callee != NULL &&
-      callee->IsVariable(parser_->isolate()->factory()->eval_string())) {
+      callee->raw_name() == parser_->ast_value_factory()->eval_string()) {
     scope->DeclarationScope()->RecordEvalCall();
   }
 }
 
 
-Expression* ParserTraits::MarkExpressionAsLValue(Expression* expression) {
-  VariableProxy* proxy = expression != NULL
-      ? expression->AsVariableProxy()
-      : NULL;
-  if (proxy != NULL) proxy->MarkAsLValue();
+Expression* ParserTraits::MarkExpressionAsAssigned(Expression* expression) {
+  VariableProxy* proxy =
+      expression != NULL ? expression->AsVariableProxy() : NULL;
+  if (proxy != NULL) proxy->set_is_assigned();
   return expression;
 }
 
@@ -464,10 +434,10 @@
 bool ParserTraits::ShortcutNumericLiteralBinaryExpression(
     Expression** x, Expression* y, Token::Value op, int pos,
     AstNodeFactory<AstConstructionVisitor>* factory) {
-  if ((*x)->AsLiteral() && (*x)->AsLiteral()->value()->IsNumber() &&
-      y->AsLiteral() && y->AsLiteral()->value()->IsNumber()) {
-    double x_val = (*x)->AsLiteral()->value()->Number();
-    double y_val = y->AsLiteral()->value()->Number();
+  if ((*x)->AsLiteral() && (*x)->AsLiteral()->raw_value()->IsNumber() &&
+      y->AsLiteral() && y->AsLiteral()->raw_value()->IsNumber()) {
+    double x_val = (*x)->AsLiteral()->raw_value()->AsNumber();
+    double y_val = y->AsLiteral()->raw_value()->AsNumber();
     switch (op) {
       case Token::ADD:
         *x = factory->NewNumberLiteral(x_val + y_val, pos);
@@ -524,18 +494,16 @@
 Expression* ParserTraits::BuildUnaryExpression(
     Expression* expression, Token::Value op, int pos,
     AstNodeFactory<AstConstructionVisitor>* factory) {
-  ASSERT(expression != NULL);
+  DCHECK(expression != NULL);
   if (expression->IsLiteral()) {
-    Handle<Object> literal = expression->AsLiteral()->value();
+    const AstValue* literal = expression->AsLiteral()->raw_value();
     if (op == Token::NOT) {
       // Convert the literal to a boolean condition and negate it.
       bool condition = literal->BooleanValue();
-      Handle<Object> result =
-          parser_->isolate()->factory()->ToBoolean(!condition);
-      return factory->NewLiteral(result, pos);
+      return factory->NewBooleanLiteral(!condition, pos);
     } else if (literal->IsNumber()) {
       // Compute some expressions involving only number literals.
-      double value = literal->Number();
+      double value = literal->AsNumber();
       switch (op) {
         case Token::ADD:
           return expression;
@@ -569,51 +537,40 @@
 
 Expression* ParserTraits::NewThrowReferenceError(const char* message, int pos) {
   return NewThrowError(
-      parser_->isolate()->factory()->MakeReferenceError_string(),
-      message, HandleVector<Object>(NULL, 0), pos);
+      parser_->ast_value_factory()->make_reference_error_string(), message,
+      NULL, pos);
 }
 
 
 Expression* ParserTraits::NewThrowSyntaxError(
-    const char* message, Handle<Object> arg, int pos) {
-  int argc = arg.is_null() ? 0 : 1;
-  Vector< Handle<Object> > arguments = HandleVector<Object>(&arg, argc);
-  return NewThrowError(
-      parser_->isolate()->factory()->MakeSyntaxError_string(),
-      message, arguments, pos);
+    const char* message, const AstRawString* arg, int pos) {
+  return NewThrowError(parser_->ast_value_factory()->make_syntax_error_string(),
+                       message, arg, pos);
 }
 
 
 Expression* ParserTraits::NewThrowTypeError(
-    const char* message, Handle<Object> arg, int pos) {
-  int argc = arg.is_null() ? 0 : 1;
-  Vector< Handle<Object> > arguments = HandleVector<Object>(&arg, argc);
-  return NewThrowError(
-      parser_->isolate()->factory()->MakeTypeError_string(),
-      message, arguments, pos);
+    const char* message, const AstRawString* arg, int pos) {
+  return NewThrowError(parser_->ast_value_factory()->make_type_error_string(),
+                       message, arg, pos);
 }
 
 
 Expression* ParserTraits::NewThrowError(
-    Handle<String> constructor, const char* message,
-    Vector<Handle<Object> > arguments, int pos) {
+    const AstRawString* constructor, const char* message,
+    const AstRawString* arg, int pos) {
   Zone* zone = parser_->zone();
-  Factory* factory = parser_->isolate()->factory();
-  int argc = arguments.length();
-  Handle<FixedArray> elements = factory->NewFixedArray(argc, TENURED);
-  for (int i = 0; i < argc; i++) {
-    Handle<Object> element = arguments[i];
-    if (!element.is_null()) {
-      elements->set(i, *element);
-    }
+  int argc = arg != NULL ? 1 : 0;
+  const AstRawString* type =
+      parser_->ast_value_factory()->GetOneByteString(message);
+  ZoneList<const AstRawString*>* array =
+      new (zone) ZoneList<const AstRawString*>(argc, zone);
+  if (arg != NULL) {
+    array->Add(arg, zone);
   }
-  Handle<JSArray> array =
-      factory->NewJSArrayWithElements(elements, FAST_ELEMENTS, TENURED);
-
-  ZoneList<Expression*>* args = new(zone) ZoneList<Expression*>(2, zone);
-  Handle<String> type = factory->InternalizeUtf8String(message);
-  args->Add(parser_->factory()->NewLiteral(type, pos), zone);
-  args->Add(parser_->factory()->NewLiteral(array, pos), zone);
+  ZoneList<Expression*>* args = new (zone) ZoneList<Expression*>(2, zone);
+  args->Add(parser_->factory()->NewStringLiteral(type, pos), zone);
+  args->Add(parser_->factory()->NewStringListLiteral(array, pos), zone);
   CallRuntime* call_constructor =
       parser_->factory()->NewCallRuntime(constructor, NULL, args, pos);
   return parser_->factory()->NewThrow(call_constructor, pos);
@@ -634,13 +591,21 @@
   parser_->pending_error_location_ = source_location;
   parser_->pending_error_message_ = message;
   parser_->pending_error_char_arg_ = arg;
-  parser_->pending_error_arg_ = Handle<String>();
+  parser_->pending_error_arg_ = NULL;
   parser_->pending_error_is_reference_error_ = is_reference_error;
 }
 
 
 void ParserTraits::ReportMessage(const char* message,
-                                 MaybeHandle<String> arg,
+                                 const char* arg,
+                                 bool is_reference_error) {
+  Scanner::Location source_location = parser_->scanner()->location();
+  ReportMessageAt(source_location, message, arg, is_reference_error);
+}
+
+
+void ParserTraits::ReportMessage(const char* message,
+                                 const AstRawString* arg,
                                  bool is_reference_error) {
   Scanner::Location source_location = parser_->scanner()->location();
   ReportMessageAt(source_location, message, arg, is_reference_error);
@@ -649,7 +614,7 @@
 
 void ParserTraits::ReportMessageAt(Scanner::Location source_location,
                                    const char* message,
-                                   MaybeHandle<String> arg,
+                                   const AstRawString* arg,
                                    bool is_reference_error) {
   if (parser_->stack_overflow()) {
     // Suppress the error message (syntax error or such) in the presence of a
@@ -666,58 +631,77 @@
 }
 
 
-Handle<String> ParserTraits::GetSymbol(Scanner* scanner) {
-  Handle<String> result =
-      parser_->scanner()->AllocateInternalizedString(parser_->isolate());
-  ASSERT(!result.is_null());
+const AstRawString* ParserTraits::GetSymbol(Scanner* scanner) {
+  const AstRawString* result =
+      parser_->scanner()->CurrentSymbol(parser_->ast_value_factory());
+  DCHECK(result != NULL);
   return result;
 }
 
 
-Handle<String> ParserTraits::NextLiteralString(Scanner* scanner,
-                                               PretenureFlag tenured) {
-  return scanner->AllocateNextLiteralString(parser_->isolate(), tenured);
+const AstRawString* ParserTraits::GetNumberAsSymbol(Scanner* scanner) {
+  double double_value = parser_->scanner()->DoubleValue();
+  char array[100];
+  const char* string =
+      DoubleToCString(double_value, Vector<char>(array, arraysize(array)));
+  return ast_value_factory()->GetOneByteString(string);
+}
+
+
+const AstRawString* ParserTraits::GetNextSymbol(Scanner* scanner) {
+  return parser_->scanner()->NextSymbol(parser_->ast_value_factory());
 }
 
 
 Expression* ParserTraits::ThisExpression(
-    Scope* scope,
-    AstNodeFactory<AstConstructionVisitor>* factory) {
-  return factory->NewVariableProxy(scope->receiver());
+    Scope* scope, AstNodeFactory<AstConstructionVisitor>* factory, int pos) {
+  return factory->NewVariableProxy(scope->receiver(), pos);
 }
 
+Expression* ParserTraits::SuperReference(
+    Scope* scope, AstNodeFactory<AstConstructionVisitor>* factory, int pos) {
+  return factory->NewSuperReference(
+      ThisExpression(scope, factory, pos)->AsVariableProxy(),
+      pos);
+}
+
+Expression* ParserTraits::ClassLiteral(
+    const AstRawString* name, Expression* extends, Expression* constructor,
+    ZoneList<ObjectLiteral::Property*>* properties, int pos,
+    AstNodeFactory<AstConstructionVisitor>* factory) {
+  return factory->NewClassLiteral(name, extends, constructor, properties, pos);
+}
 
 Literal* ParserTraits::ExpressionFromLiteral(
     Token::Value token, int pos,
     Scanner* scanner,
     AstNodeFactory<AstConstructionVisitor>* factory) {
-  Factory* isolate_factory = parser_->isolate()->factory();
   switch (token) {
     case Token::NULL_LITERAL:
-      return factory->NewLiteral(isolate_factory->null_value(), pos);
+      return factory->NewNullLiteral(pos);
     case Token::TRUE_LITERAL:
-      return factory->NewLiteral(isolate_factory->true_value(), pos);
+      return factory->NewBooleanLiteral(true, pos);
     case Token::FALSE_LITERAL:
-      return factory->NewLiteral(isolate_factory->false_value(), pos);
+      return factory->NewBooleanLiteral(false, pos);
     case Token::NUMBER: {
       double value = scanner->DoubleValue();
       return factory->NewNumberLiteral(value, pos);
     }
     default:
-      ASSERT(false);
+      DCHECK(false);
   }
   return NULL;
 }
 
 
 Expression* ParserTraits::ExpressionFromIdentifier(
-    Handle<String> name, int pos, Scope* scope,
+    const AstRawString* name, int pos, Scope* scope,
     AstNodeFactory<AstConstructionVisitor>* factory) {
   if (parser_->fni_ != NULL) parser_->fni_->PushVariableName(name);
   // The name may refer to a module instance object, so its type is unknown.
 #ifdef DEBUG
   if (FLAG_print_interface_details)
-    PrintF("# Variable %s ", name->ToAsciiArray());
+    PrintF("# Variable %.*s ", name->length(), name->raw_data());
 #endif
   Interface* interface = Interface::NewUnknown(parser_->zone());
   return scope->NewUnresolved(factory, name, interface, pos);
@@ -727,16 +711,28 @@
 Expression* ParserTraits::ExpressionFromString(
     int pos, Scanner* scanner,
     AstNodeFactory<AstConstructionVisitor>* factory) {
-  Handle<String> symbol = GetSymbol(scanner);
+  const AstRawString* symbol = GetSymbol(scanner);
   if (parser_->fni_ != NULL) parser_->fni_->PushLiteralName(symbol);
-  return factory->NewLiteral(symbol, pos);
+  return factory->NewStringLiteral(symbol, pos);
+}
+
+
+Expression* ParserTraits::GetIterator(
+    Expression* iterable, AstNodeFactory<AstConstructionVisitor>* factory) {
+  Expression* iterator_symbol_literal =
+      factory->NewSymbolLiteral("symbolIterator", RelocInfo::kNoPosition);
+  int pos = iterable->position();
+  Expression* prop =
+      factory->NewProperty(iterable, iterator_symbol_literal, pos);
+  Zone* zone = parser_->zone();
+  ZoneList<Expression*>* args = new (zone) ZoneList<Expression*>(0, zone);
+  return factory->NewCall(prop, args, pos);
 }
 
 
 Literal* ParserTraits::GetLiteralTheHole(
     int position, AstNodeFactory<AstConstructionVisitor>* factory) {
-  return factory->NewLiteral(parser_->isolate()->factory()->the_hole_value(),
-                             RelocInfo::kNoPosition);
+  return factory->NewTheHoleLiteral(RelocInfo::kNoPosition);
 }
 
 
@@ -746,74 +742,82 @@
 
 
 FunctionLiteral* ParserTraits::ParseFunctionLiteral(
-    Handle<String> name,
-    Scanner::Location function_name_location,
-    bool name_is_strict_reserved,
-    bool is_generator,
-    int function_token_position,
-    FunctionLiteral::FunctionType type,
-    FunctionLiteral::ArityRestriction arity_restriction,
-    bool* ok) {
-  return parser_->ParseFunctionLiteral(name, function_name_location,
-                                       name_is_strict_reserved, is_generator,
-                                       function_token_position, type,
-                                       arity_restriction, ok);
+    const AstRawString* name, Scanner::Location function_name_location,
+    bool name_is_strict_reserved, FunctionKind kind,
+    int function_token_position, FunctionLiteral::FunctionType type,
+    FunctionLiteral::ArityRestriction arity_restriction, bool* ok) {
+  return parser_->ParseFunctionLiteral(
+      name, function_name_location, name_is_strict_reserved, kind,
+      function_token_position, type, arity_restriction, ok);
 }
 
 
-Parser::Parser(CompilationInfo* info)
-    : ParserBase<ParserTraits>(&scanner_,
-                               info->isolate()->stack_guard()->real_climit(),
-                               info->extension(),
-                               NULL,
-                               info->zone(),
-                               this),
-      isolate_(info->isolate()),
-      script_(info->script()),
-      scanner_(isolate_->unicode_cache()),
+Parser::Parser(CompilationInfo* info, ParseInfo* parse_info)
+    : ParserBase<ParserTraits>(&scanner_, parse_info->stack_limit,
+                               info->extension(), NULL, info->zone(),
+                               info->ast_node_id_gen(), this),
+      scanner_(parse_info->unicode_cache),
       reusable_preparser_(NULL),
       original_scope_(NULL),
       target_stack_(NULL),
-      cached_data_(NULL),
-      cached_data_mode_(NO_CACHED_DATA),
+      cached_parse_data_(NULL),
       info_(info),
       has_pending_error_(false),
       pending_error_message_(NULL),
-      pending_error_char_arg_(NULL) {
-  ASSERT(!script_.is_null());
-  isolate_->set_ast_node_id(0);
+      pending_error_arg_(NULL),
+      pending_error_char_arg_(NULL),
+      total_preparse_skipped_(0),
+      pre_parse_timer_(NULL) {
+  DCHECK(!script().is_null() || info->source_stream() != NULL);
   set_allow_harmony_scoping(!info->is_native() && FLAG_harmony_scoping);
   set_allow_modules(!info->is_native() && FLAG_harmony_modules);
   set_allow_natives_syntax(FLAG_allow_natives_syntax || info->is_native());
   set_allow_lazy(false);  // Must be explicitly enabled.
-  set_allow_generators(FLAG_harmony_generators);
-  set_allow_for_of(FLAG_harmony_iteration);
+  set_allow_arrow_functions(FLAG_harmony_arrow_functions);
   set_allow_harmony_numeric_literals(FLAG_harmony_numeric_literals);
+  set_allow_classes(FLAG_harmony_classes);
+  set_allow_harmony_object_literals(FLAG_harmony_object_literals);
+  for (int feature = 0; feature < v8::Isolate::kUseCounterFeatureCount;
+       ++feature) {
+    use_counts_[feature] = 0;
+  }
+  if (info->ast_value_factory() == NULL) {
+    // info takes ownership of AstValueFactory.
+    info->SetAstValueFactory(
+        new AstValueFactory(zone(), parse_info->hash_seed));
+  }
 }
 
 
 FunctionLiteral* Parser::ParseProgram() {
   // TODO(bmeurer): We temporarily need to pass allow_nesting = true here,
   // see comment for HistogramTimerScope class.
+
+  // It's OK to use the counters here, since this function is only called in
+  // the main thread.
   HistogramTimerScope timer_scope(isolate()->counters()->parse(), true);
-  Handle<String> source(String::cast(script_->source()));
+  Handle<String> source(String::cast(script()->source()));
   isolate()->counters()->total_parse_size()->Increment(source->length());
-  ElapsedTimer timer;
+  base::ElapsedTimer timer;
   if (FLAG_trace_parse) {
     timer.Start();
   }
-  fni_ = new(zone()) FuncNameInferrer(isolate(), zone());
+  fni_ = new (zone()) FuncNameInferrer(ast_value_factory(), zone());
 
   // Initialize parser state.
   CompleteParserRecorder recorder;
-  if (cached_data_mode_ == PRODUCE_CACHED_DATA) {
+
+  if (compile_options() == ScriptCompiler::kProduceParserCache) {
     log_ = &recorder;
-  } else if (cached_data_mode_ == CONSUME_CACHED_DATA) {
-    (*cached_data_)->Initialize();
+  } else if (compile_options() == ScriptCompiler::kConsumeParserCache) {
+    cached_parse_data_->Initialize();
   }
 
   source = String::Flatten(source);
   FunctionLiteral* result;
+
+  Scope* top_scope = NULL;
+  Scope* eval_scope = NULL;
   if (source->IsExternalTwoByteString()) {
     // Notice that the stream is destroyed at the end of the branch block.
     // The last line of the blocks can't be moved outside, even though they're
@@ -821,12 +825,17 @@
     ExternalTwoByteStringUtf16CharacterStream stream(
         Handle<ExternalTwoByteString>::cast(source), 0, source->length());
     scanner_.Initialize(&stream);
-    result = DoParseProgram(info(), source);
+    result = DoParseProgram(info(), &top_scope, &eval_scope);
   } else {
     GenericStringUtf16CharacterStream stream(source, 0, source->length());
     scanner_.Initialize(&stream);
-    result = DoParseProgram(info(), source);
+    result = DoParseProgram(info(), &top_scope, &eval_scope);
   }
+  top_scope->set_end_position(source->length());
+  if (eval_scope != NULL) {
+    eval_scope->set_end_position(source->length());
+  }
+  HandleSourceURLComments();
 
   if (FLAG_trace_parse && result != NULL) {
     double ms = timer.Elapsed().InMillisecondsF();
@@ -841,58 +850,61 @@
     }
     PrintF(" - took %0.3f ms]\n", ms);
   }
-  if (cached_data_mode_ == PRODUCE_CACHED_DATA) {
-    if (result != NULL) {
-      Vector<unsigned> store = recorder.ExtractData();
-      *cached_data_ = new ScriptData(store);
-    }
+  if (compile_options() == ScriptCompiler::kProduceParserCache) {
+    if (result != NULL) *info_->cached_data() = recorder.GetScriptData();
     log_ = NULL;
   }
   return result;
 }
 
 
-FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info,
-                                        Handle<String> source) {
-  ASSERT(scope_ == NULL);
-  ASSERT(target_stack_ == NULL);
-
-  Handle<String> no_name = isolate()->factory()->empty_string();
+FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info, Scope** scope,
+                                        Scope** eval_scope) {
+  DCHECK(scope_ == NULL);
+  DCHECK(target_stack_ == NULL);
 
   FunctionLiteral* result = NULL;
-  { Scope* scope = NewScope(scope_, GLOBAL_SCOPE);
-    info->SetGlobalScope(scope);
-    if (!info->context().is_null()) {
-      scope = Scope::DeserializeScopeChain(*info->context(), scope, zone());
+  {
+    *scope = NewScope(scope_, GLOBAL_SCOPE);
+    info->SetGlobalScope(*scope);
+    if (!info->context().is_null() && !info->context()->IsNativeContext()) {
+      *scope = Scope::DeserializeScopeChain(*info->context(), *scope, zone());
+      // The Scope is backed up by ScopeInfo (which is in the V8 heap); this
+      // means the Parser cannot operate independent of the V8 heap. Tell the
+      // string table to internalize strings and values right after they're
+      // created.
+      ast_value_factory()->Internalize(isolate());
     }
-    original_scope_ = scope;
+    original_scope_ = *scope;
     if (info->is_eval()) {
-      if (!scope->is_global_scope() || info->strict_mode() == STRICT) {
-        scope = NewScope(scope, EVAL_SCOPE);
+      if (!(*scope)->is_global_scope() || info->strict_mode() == STRICT) {
+        *scope = NewScope(*scope, EVAL_SCOPE);
       }
     } else if (info->is_global()) {
-      scope = NewScope(scope, GLOBAL_SCOPE);
+      *scope = NewScope(*scope, GLOBAL_SCOPE);
     }
-    scope->set_start_position(0);
-    scope->set_end_position(source->length());
+    (*scope)->set_start_position(0);
+    // End position will be set by the caller.
 
     // Compute the parsing mode.
     Mode mode = (FLAG_lazy && allow_lazy()) ? PARSE_LAZILY : PARSE_EAGERLY;
-    if (allow_natives_syntax() ||
-        extension_ != NULL ||
-        scope->is_eval_scope()) {
+    if (allow_natives_syntax() || extension_ != NULL ||
+        (*scope)->is_eval_scope()) {
       mode = PARSE_EAGERLY;
     }
     ParsingModeScope parsing_mode(this, mode);
 
     // Enters 'scope'.
-    FunctionState function_state(&function_state_, &scope_, scope, zone());
+    FunctionState function_state(&function_state_, &scope_, *scope, zone(),
+                                 ast_value_factory(), info->ast_node_id_gen());
 
     scope_->SetStrictMode(info->strict_mode());
     ZoneList<Statement*>* body = new(zone()) ZoneList<Statement*>(16, zone());
     bool ok = true;
     int beg_pos = scanner()->location().beg_pos;
-    ParseSourceElements(body, Token::EOS, info->is_eval(), true, &ok);
+    ParseSourceElements(body, Token::EOS, info->is_eval(), true, eval_scope,
+                        &ok);
+
     if (ok && strict_mode() == STRICT) {
       CheckOctalLiteral(beg_pos, scanner()->location().end_pos, &ok);
     }
@@ -913,41 +925,33 @@
 
     if (ok) {
       result = factory()->NewFunctionLiteral(
-          no_name,
-          scope_,
-          body,
-          function_state.materialized_literal_count(),
+          ast_value_factory()->empty_string(), ast_value_factory(), scope_,
+          body, function_state.materialized_literal_count(),
           function_state.expected_property_count(),
-          function_state.handler_count(),
-          0,
+          function_state.handler_count(), 0,
           FunctionLiteral::kNoDuplicateParameters,
-          FunctionLiteral::ANONYMOUS_EXPRESSION,
-          FunctionLiteral::kGlobalOrEval,
-          FunctionLiteral::kNotParenthesized,
-          FunctionLiteral::kNotGenerator,
-          0);
+          FunctionLiteral::ANONYMOUS_EXPRESSION, FunctionLiteral::kGlobalOrEval,
+          FunctionLiteral::kNotParenthesized, FunctionKind::kNormalFunction, 0);
       result->set_ast_properties(factory()->visitor()->ast_properties());
       result->set_dont_optimize_reason(
           factory()->visitor()->dont_optimize_reason());
-    } else if (stack_overflow()) {
-      isolate()->StackOverflow();
-    } else {
-      ThrowPendingError();
     }
   }
 
   // Make sure the target stack is empty.
-  ASSERT(target_stack_ == NULL);
+  DCHECK(target_stack_ == NULL);
 
   return result;
 }
 
 
 FunctionLiteral* Parser::ParseLazy() {
+  // It's OK to use the counters here, since this function is only called in
+  // the main thread.
   HistogramTimerScope timer_scope(isolate()->counters()->parse_lazy());
-  Handle<String> source(String::cast(script_->source()));
+  Handle<String> source(String::cast(script()->source()));
   isolate()->counters()->total_parse_size()->Increment(source->length());
-  ElapsedTimer timer;
+  base::ElapsedTimer timer;
   if (FLAG_trace_parse) {
     timer.Start();
   }
@@ -981,12 +985,14 @@
 FunctionLiteral* Parser::ParseLazy(Utf16CharacterStream* source) {
   Handle<SharedFunctionInfo> shared_info = info()->shared_info();
   scanner_.Initialize(source);
-  ASSERT(scope_ == NULL);
-  ASSERT(target_stack_ == NULL);
+  DCHECK(scope_ == NULL);
+  DCHECK(target_stack_ == NULL);
 
   Handle<String> name(String::cast(shared_info->name()));
-  fni_ = new(zone()) FuncNameInferrer(isolate(), zone());
-  fni_->PushEnclosingName(name);
+  DCHECK(ast_value_factory());
+  fni_ = new (zone()) FuncNameInferrer(ast_value_factory(), zone());
+  const AstRawString* raw_name = ast_value_factory()->GetString(name);
+  fni_->PushEnclosingName(raw_name);
 
   ParsingModeScope parsing_mode(this, PARSE_EAGERLY);
 
@@ -1002,9 +1008,11 @@
                                            zone());
     }
     original_scope_ = scope;
-    FunctionState function_state(&function_state_, &scope_, scope, zone());
-    ASSERT(scope->strict_mode() == SLOPPY || info()->strict_mode() == STRICT);
-    ASSERT(info()->strict_mode() == shared_info->strict_mode());
+    FunctionState function_state(&function_state_, &scope_, scope, zone(),
+                                 ast_value_factory(),
+                                 info()->ast_node_id_gen());
+    DCHECK(scope->strict_mode() == SLOPPY || info()->strict_mode() == STRICT);
+    DCHECK(info()->strict_mode() == shared_info->strict_mode());
     scope->SetStrictMode(shared_info->strict_mode());
     FunctionLiteral::FunctionType function_type = shared_info->is_expression()
         ? (shared_info->is_anonymous()
@@ -1012,28 +1020,26 @@
               : FunctionLiteral::NAMED_EXPRESSION)
         : FunctionLiteral::DECLARATION;
     bool ok = true;
-    result = ParseFunctionLiteral(name,
-                                  Scanner::Location::invalid(),
-                                  false,  // Strict mode name already checked.
-                                  shared_info->is_generator(),
-                                  RelocInfo::kNoPosition,
-                                  function_type,
-                                  FunctionLiteral::NORMAL_ARITY,
-                                  &ok);
+
+    if (shared_info->is_arrow()) {
+      Expression* expression = ParseExpression(false, &ok);
+      DCHECK(expression->IsFunctionLiteral());
+      result = expression->AsFunctionLiteral();
+    } else {
+      result = ParseFunctionLiteral(raw_name, Scanner::Location::invalid(),
+                                    false,  // Strict mode name already checked.
+                                    shared_info->kind(), RelocInfo::kNoPosition,
+                                    function_type,
+                                    FunctionLiteral::NORMAL_ARITY, &ok);
+    }
     // Make sure the results agree.
-    ASSERT(ok == (result != NULL));
+    DCHECK(ok == (result != NULL));
   }
 
   // Make sure the target stack is empty.
-  ASSERT(target_stack_ == NULL);
+  DCHECK(target_stack_ == NULL);
 
-  if (result == NULL) {
-    if (stack_overflow()) {
-      isolate()->StackOverflow();
-    } else {
-      ThrowPendingError();
-    }
-  } else {
+  if (result != NULL) {
     Handle<String> inferred_name(shared_info->inferred_name());
     result->set_inferred_name(inferred_name);
   }
@@ -1042,10 +1048,8 @@
 
 
 void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
-                                  int end_token,
-                                  bool is_eval,
-                                  bool is_global,
-                                  bool* ok) {
+                                  int end_token, bool is_eval, bool is_global,
+                                  Scope** eval_scope, bool* ok) {
   // SourceElements ::
   //   (ModuleElement)* <end_token>
 
@@ -1055,7 +1059,7 @@
   // functions.
   TargetScope scope(&this->target_stack_);
 
-  ASSERT(processor != NULL);
+  DCHECK(processor != NULL);
   bool directive_prologue = true;     // Parsing directive prologue.
 
   while (peek() != end_token) {
@@ -1082,31 +1086,42 @@
       // Still processing directive prologue?
       if ((e_stat = stat->AsExpressionStatement()) != NULL &&
           (literal = e_stat->expression()->AsLiteral()) != NULL &&
-          literal->value()->IsString()) {
-        Handle<String> directive = Handle<String>::cast(literal->value());
-
-        // Check "use strict" directive (ES5 14.1).
+          literal->raw_value()->IsString()) {
+        // Check "use strict" directive (ES5 14.1) and "use asm" directive. Only
+        // one can be present.
         if (strict_mode() == SLOPPY &&
-            String::Equals(isolate()->factory()->use_strict_string(),
-                           directive) &&
+            literal->raw_value()->AsString() ==
+                ast_value_factory()->use_strict_string() &&
             token_loc.end_pos - token_loc.beg_pos ==
-              isolate()->heap()->use_strict_string()->length() + 2) {
+                ast_value_factory()->use_strict_string()->length() + 2) {
           // TODO(mstarzinger): Global strict eval calls, need their own scope
           // as specified in ES5 10.4.2(3). The correct fix would be to always
           // add this scope in DoParseProgram(), but that requires adaptations
           // all over the code base, so we go with a quick-fix for now.
           // In the same manner, we have to patch the parsing mode.
           if (is_eval && !scope_->is_eval_scope()) {
-            ASSERT(scope_->is_global_scope());
+            DCHECK(scope_->is_global_scope());
             Scope* scope = NewScope(scope_, EVAL_SCOPE);
             scope->set_start_position(scope_->start_position());
             scope->set_end_position(scope_->end_position());
             scope_ = scope;
+            if (eval_scope != NULL) {
+              // Caller will correct the positions of the ad hoc eval scope.
+              *eval_scope = scope;
+            }
             mode_ = PARSE_EAGERLY;
           }
           scope_->SetStrictMode(STRICT);
           // "use strict" is the only directive for now.
           directive_prologue = false;
+        } else if (literal->raw_value()->AsString() ==
+                       ast_value_factory()->use_asm_string() &&
+                   token_loc.end_pos - token_loc.beg_pos ==
+                       ast_value_factory()->use_asm_string()->length() + 2) {
+          // Store the usage count; The actual use counter on the isolate is
+          // incremented after parsing is done.
+          ++use_counts_[v8::Isolate::kUseAsm];
+          scope_->SetAsmModule();
         }
       } else {
         // End of the directive prologue.
@@ -1121,7 +1136,7 @@
 }
 
 
-Statement* Parser::ParseModuleElement(ZoneStringList* labels,
+Statement* Parser::ParseModuleElement(ZoneList<const AstRawString*>* labels,
                                       bool* ok) {
   // (Ecma 262 5th Edition, clause 14):
   // SourceElement:
@@ -1140,13 +1155,20 @@
   switch (peek()) {
     case Token::FUNCTION:
       return ParseFunctionDeclaration(NULL, ok);
-    case Token::LET:
-    case Token::CONST:
-      return ParseVariableStatement(kModuleElement, NULL, ok);
+    case Token::CLASS:
+      return ParseClassDeclaration(NULL, ok);
     case Token::IMPORT:
       return ParseImportDeclaration(ok);
     case Token::EXPORT:
       return ParseExportDeclaration(ok);
+    case Token::CONST:
+      return ParseVariableStatement(kModuleElement, NULL, ok);
+    case Token::LET:
+      DCHECK(allow_harmony_scoping());
+      if (strict_mode() == STRICT) {
+        return ParseVariableStatement(kModuleElement, NULL, ok);
+      }
+      // Fall through.
     default: {
       Statement* stmt = ParseStatement(labels, CHECK_OK);
       // Handle 'module' as a context-sensitive keyword.
@@ -1155,10 +1177,9 @@
           !scanner()->HasAnyLineTerminatorBeforeNext() &&
           stmt != NULL) {
         ExpressionStatement* estmt = stmt->AsExpressionStatement();
-        if (estmt != NULL &&
-            estmt->expression()->AsVariableProxy() != NULL &&
-            String::Equals(isolate()->factory()->module_string(),
-                           estmt->expression()->AsVariableProxy()->name()) &&
+        if (estmt != NULL && estmt->expression()->AsVariableProxy() != NULL &&
+            estmt->expression()->AsVariableProxy()->raw_name() ==
+                ast_value_factory()->module_string() &&
             !scanner()->literal_contains_escapes()) {
           return ParseModuleDeclaration(NULL, ok);
         }
@@ -1169,16 +1190,18 @@
 }
 
 
-Statement* Parser::ParseModuleDeclaration(ZoneStringList* names, bool* ok) {
+Statement* Parser::ParseModuleDeclaration(ZoneList<const AstRawString*>* names,
+                                          bool* ok) {
   // ModuleDeclaration:
   //    'module' Identifier Module
 
   int pos = peek_position();
-  Handle<String> name = ParseIdentifier(kDontAllowEvalOrArguments, CHECK_OK);
+  const AstRawString* name =
+      ParseIdentifier(kDontAllowEvalOrArguments, CHECK_OK);
 
 #ifdef DEBUG
   if (FLAG_print_interface_details)
-    PrintF("# Module %s...\n", name->ToAsciiArray());
+    PrintF("# Module %.*s ", name->length(), name->raw_data());
 #endif
 
   Module* module = ParseModule(CHECK_OK);
@@ -1189,10 +1212,9 @@
 
 #ifdef DEBUG
   if (FLAG_print_interface_details)
-    PrintF("# Module %s.\n", name->ToAsciiArray());
-
+    PrintF("# Module %.*s ", name->length(), name->raw_data());
   if (FLAG_print_interfaces) {
-    PrintF("module %s : ", name->ToAsciiArray());
+    PrintF("module %.*s: ", name->length(), name->raw_data());
     module->interface()->Print();
   }
 #endif
@@ -1278,9 +1300,9 @@
   }
 
   interface->MakeModule(ok);
-  ASSERT(*ok);
+  DCHECK(*ok);
   interface->Freeze(ok);
-  ASSERT(*ok);
+  DCHECK(*ok);
   return factory()->NewModuleLiteral(body, interface, pos);
 }
 
@@ -1293,17 +1315,17 @@
   int pos = peek_position();
   Module* result = ParseModuleVariable(CHECK_OK);
   while (Check(Token::PERIOD)) {
-    Handle<String> name = ParseIdentifierName(CHECK_OK);
+    const AstRawString* name = ParseIdentifierName(CHECK_OK);
 #ifdef DEBUG
     if (FLAG_print_interface_details)
-      PrintF("# Path .%s ", name->ToAsciiArray());
+      PrintF("# Path .%.*s ", name->length(), name->raw_data());
 #endif
     Module* member = factory()->NewModulePath(result, name, pos);
     result->interface()->Add(name, member->interface(), zone(), ok);
     if (!*ok) {
 #ifdef DEBUG
       if (FLAG_print_interfaces) {
-        PrintF("PATH TYPE ERROR at '%s'\n", name->ToAsciiArray());
+        PrintF("PATH TYPE ERROR at '%.*s'\n", name->length(), name->raw_data());
         PrintF("result: ");
         result->interface()->Print();
         PrintF("member: ");
@@ -1325,10 +1347,11 @@
   //    Identifier
 
   int pos = peek_position();
-  Handle<String> name = ParseIdentifier(kDontAllowEvalOrArguments, CHECK_OK);
+  const AstRawString* name =
+      ParseIdentifier(kDontAllowEvalOrArguments, CHECK_OK);
 #ifdef DEBUG
   if (FLAG_print_interface_details)
-    PrintF("# Module variable %s ", name->ToAsciiArray());
+    PrintF("# Module variable %.*s ", name->length(), name->raw_data());
 #endif
   VariableProxy* proxy = scope_->NewUnresolved(
       factory(), name, Interface::NewModule(zone()),
@@ -1344,7 +1367,7 @@
 
   int pos = peek_position();
   Expect(Token::STRING, CHECK_OK);
-  Handle<String> symbol = GetSymbol();
+  const AstRawString* symbol = GetSymbol(scanner());
 
   // TODO(ES6): Request JS resource from environment...
 
@@ -1360,9 +1383,9 @@
   Interface* interface = scope->interface();
   Module* result = factory()->NewModuleLiteral(body, interface, pos);
   interface->Freeze(ok);
-  ASSERT(*ok);
+  DCHECK(*ok);
   interface->Unify(scope->interface(), zone(), ok);
-  ASSERT(*ok);
+  DCHECK(*ok);
   return result;
 }
 
@@ -1388,9 +1411,9 @@
 
   int pos = peek_position();
   Expect(Token::IMPORT, CHECK_OK);
-  ZoneStringList names(1, zone());
+  ZoneList<const AstRawString*> names(1, zone());
 
-  Handle<String> name = ParseIdentifierName(CHECK_OK);
+  const AstRawString* name = ParseIdentifierName(CHECK_OK);
   names.Add(name, zone());
   while (peek() == Token::COMMA) {
     Consume(Token::COMMA);
@@ -1408,14 +1431,15 @@
   for (int i = 0; i < names.length(); ++i) {
 #ifdef DEBUG
     if (FLAG_print_interface_details)
-      PrintF("# Import %s ", names[i]->ToAsciiArray());
+      PrintF("# Import %.*s ", name->length(), name->raw_data());
 #endif
     Interface* interface = Interface::NewUnknown(zone());
     module->interface()->Add(names[i], interface, zone(), ok);
     if (!*ok) {
 #ifdef DEBUG
       if (FLAG_print_interfaces) {
-        PrintF("IMPORT TYPE ERROR at '%s'\n", names[i]->ToAsciiArray());
+        PrintF("IMPORT TYPE ERROR at '%.*s'\n", name->length(),
+               name->raw_data());
         PrintF("module: ");
         module->interface()->Print();
       }
@@ -1446,14 +1470,14 @@
   Expect(Token::EXPORT, CHECK_OK);
 
   Statement* result = NULL;
-  ZoneStringList names(1, zone());
+  ZoneList<const AstRawString*> names(1, zone());
   switch (peek()) {
     case Token::IDENTIFIER: {
       int pos = position();
-      Handle<String> name =
+      const AstRawString* name =
           ParseIdentifier(kDontAllowEvalOrArguments, CHECK_OK);
       // Handle 'module' as a context-sensitive keyword.
-      if (!name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("module"))) {
+      if (name != ast_value_factory()->module_string()) {
         names.Add(name, zone());
         while (peek() == Token::COMMA) {
           Consume(Token::COMMA);
@@ -1472,6 +1496,10 @@
       result = ParseFunctionDeclaration(&names, CHECK_OK);
       break;
 
+    case Token::CLASS:
+      result = ParseClassDeclaration(&names, CHECK_OK);
+      break;
+
     case Token::VAR:
     case Token::LET:
     case Token::CONST:
@@ -1484,12 +1512,25 @@
       return NULL;
   }
 
+  // Every export of a module may be assigned.
+  for (int i = 0; i < names.length(); ++i) {
+    Variable* var = scope_->Lookup(names[i]);
+    if (var == NULL) {
+      // TODO(sigurds) This is an export that has no definition yet,
+      // not clear what to do in this case.
+      continue;
+    }
+    if (!IsImmutableVariableMode(var->mode())) {
+      var->set_maybe_assigned();
+    }
+  }
+
   // Extract declared names into export declarations and interface.
   Interface* interface = scope_->interface();
   for (int i = 0; i < names.length(); ++i) {
 #ifdef DEBUG
     if (FLAG_print_interface_details)
-      PrintF("# Export %s ", names[i]->ToAsciiArray());
+      PrintF("# Export %.*s ", names[i]->length(), names[i]->raw_data());
 #endif
     Interface* inner = Interface::NewUnknown(zone());
     interface->Add(names[i], inner, zone(), CHECK_OK);
@@ -1504,12 +1545,12 @@
     // scope_->AddDeclaration(declaration);
   }
 
-  ASSERT(result != NULL);
+  DCHECK(result != NULL);
   return result;
 }
 
 
-Statement* Parser::ParseBlockElement(ZoneStringList* labels,
+Statement* Parser::ParseBlockElement(ZoneList<const AstRawString*>* labels,
                                      bool* ok) {
   // (Ecma 262 5th Edition, clause 14):
   // SourceElement:
@@ -1521,20 +1562,29 @@
   //    LetDeclaration
   //    ConstDeclaration
   //    GeneratorDeclaration
+  //    ClassDeclaration
 
   switch (peek()) {
     case Token::FUNCTION:
       return ParseFunctionDeclaration(NULL, ok);
-    case Token::LET:
+    case Token::CLASS:
+      return ParseClassDeclaration(NULL, ok);
     case Token::CONST:
       return ParseVariableStatement(kModuleElement, NULL, ok);
+    case Token::LET:
+      DCHECK(allow_harmony_scoping());
+      if (strict_mode() == STRICT) {
+        return ParseVariableStatement(kModuleElement, NULL, ok);
+      }
+      // Fall through.
     default:
       return ParseStatement(labels, ok);
   }
 }
 
 
-Statement* Parser::ParseStatement(ZoneStringList* labels, bool* ok) {
+Statement* Parser::ParseStatement(ZoneList<const AstRawString*>* labels,
+                                  bool* ok) {
   // Statement ::
   //   Block
   //   VariableStatement
@@ -1562,11 +1612,6 @@
     case Token::LBRACE:
       return ParseBlock(labels, ok);
 
-    case Token::CONST:  // fall through
-    case Token::LET:
-    case Token::VAR:
-      return ParseVariableStatement(kStatement, NULL, ok);
-
     case Token::SEMICOLON:
       Next();
       return factory()->NewEmptyStatement(RelocInfo::kNoPosition);
@@ -1635,17 +1680,30 @@
       return ParseFunctionDeclaration(NULL, ok);
     }
 
+    case Token::CLASS:
+      return ParseClassDeclaration(NULL, ok);
+
     case Token::DEBUGGER:
       return ParseDebuggerStatement(ok);
 
+    case Token::VAR:
+    case Token::CONST:
+      return ParseVariableStatement(kStatement, NULL, ok);
+
+    case Token::LET:
+      DCHECK(allow_harmony_scoping());
+      if (strict_mode() == STRICT) {
+        return ParseVariableStatement(kStatement, NULL, ok);
+      }
+      // Fall through.
     default:
       return ParseExpressionOrLabelledStatement(labels, ok);
   }
 }
 
 
-VariableProxy* Parser::NewUnresolved(
-    Handle<String> name, VariableMode mode, Interface* interface) {
+VariableProxy* Parser::NewUnresolved(const AstRawString* name,
+                                     VariableMode mode, Interface* interface) {
   // If we are inside a function, a declaration of a var/const variable is a
   // truly local variable, and the scope of the variable is always the function
   // scope.
@@ -1658,7 +1716,8 @@
 
 void Parser::Declare(Declaration* declaration, bool resolve, bool* ok) {
   VariableProxy* proxy = declaration->proxy();
-  Handle<String> name = proxy->name();
+  DCHECK(proxy->raw_name() != NULL);
+  const AstRawString* name = proxy->raw_name();
   VariableMode mode = declaration->mode();
   Scope* declaration_scope = DeclarationScope(mode);
   Variable* var = NULL;
@@ -1685,17 +1744,17 @@
         : declaration_scope->LookupLocal(name);
     if (var == NULL) {
       // Declare the name.
-      var = declaration_scope->DeclareLocal(
-          name, mode, declaration->initialization(), proxy->interface());
-    } else if ((mode != VAR || var->mode() != VAR) &&
-               (!declaration_scope->is_global_scope() ||
-                IsLexicalVariableMode(mode) ||
-                IsLexicalVariableMode(var->mode()))) {
+      var = declaration_scope->DeclareLocal(name, mode,
+                                            declaration->initialization(),
+                                            kNotAssigned, proxy->interface());
+    } else if (IsLexicalVariableMode(mode) || IsLexicalVariableMode(var->mode())
+               || ((mode == CONST_LEGACY || var->mode() == CONST_LEGACY) &&
+                   !declaration_scope->is_global_scope())) {
       // The name was declared in this scope before; check for conflicting
       // re-declarations. We have a conflict if either of the declarations is
       // not a var (in the global scope, we also have to ignore legacy const for
       // compatibility). There is similar code in runtime.cc in the Declare
-      // functions. The function CheckNonConflictingScope checks for conflicting
+      // functions. The function CheckConflictingVarDeclarations checks for
       // var and let bindings from different scopes whereas this is a check for
       // conflicting declarations within the same scope. This check also covers
       // the special case
@@ -1704,7 +1763,7 @@
       //
       // because the var declaration is hoisted to the function scope where 'x'
       // is already bound.
-      ASSERT(IsDeclaredVariableMode(var->mode()));
+      DCHECK(IsDeclaredVariableMode(var->mode()));
       if (allow_harmony_scoping() && strict_mode() == STRICT) {
         // In harmony we treat re-declarations as early errors. See
         // ES5 16 for a definition of early errors.
@@ -1715,6 +1774,8 @@
       Expression* expression = NewThrowTypeError(
           "var_redeclaration", name, declaration->position());
       declaration_scope->SetIllegalRedeclaration(expression);
+    } else if (mode == VAR) {
+      var->set_maybe_assigned();
     }
   }
 
@@ -1733,25 +1794,26 @@
   // same variable if it is declared several times. This is not a
   // semantic issue as long as we keep the source order, but it may be
   // a performance issue since it may lead to repeated
-  // RuntimeHidden_DeclareContextSlot calls.
+  // RuntimeHidden_DeclareLookupSlot calls.
   declaration_scope->AddDeclaration(declaration);
 
   if (mode == CONST_LEGACY && declaration_scope->is_global_scope()) {
     // For global const variables we bind the proxy to a variable.
-    ASSERT(resolve);  // should be set by all callers
+    DCHECK(resolve);  // should be set by all callers
     Variable::Kind kind = Variable::NORMAL;
-    var = new(zone()) Variable(
-        declaration_scope, name, mode, true, kind,
-        kNeedsInitialization, proxy->interface());
+    var = new (zone())
+        Variable(declaration_scope, name, mode, true, kind,
+                 kNeedsInitialization, kNotAssigned, proxy->interface());
   } else if (declaration_scope->is_eval_scope() &&
              declaration_scope->strict_mode() == SLOPPY) {
     // For variable declarations in a sloppy eval scope the proxy is bound
     // to a lookup variable to force a dynamic declaration using the
-    // DeclareContextSlot runtime function.
+    // DeclareLookupSlot runtime function.
     Variable::Kind kind = Variable::NORMAL;
-    var = new(zone()) Variable(
-        declaration_scope, name, mode, true, kind,
-        declaration->initialization(), proxy->interface());
+    // TODO(sigurds) figure out if kNotAssigned is OK here
+    var = new (zone()) Variable(declaration_scope, name, mode, true, kind,
+                                declaration->initialization(), kNotAssigned,
+                                proxy->interface());
     var->AllocateTo(Variable::LOOKUP, -1);
     resolve = true;
   }
@@ -1786,8 +1848,10 @@
     if (FLAG_harmony_modules) {
       bool ok;
 #ifdef DEBUG
-      if (FLAG_print_interface_details)
-        PrintF("# Declare %s\n", var->name()->ToAsciiArray());
+      if (FLAG_print_interface_details) {
+        PrintF("# Declare %.*s ", var->raw_name()->length(),
+               var->raw_name()->raw_data());
+      }
 #endif
       proxy->interface()->Unify(var->interface(), zone(), &ok);
       if (!ok) {
@@ -1815,7 +1879,7 @@
   int pos = peek_position();
   Expect(Token::FUNCTION, CHECK_OK);
   // Allow "eval" or "arguments" for backward compatibility.
-  Handle<String> name = ParseIdentifier(kAllowEvalOrArguments, CHECK_OK);
+  const AstRawString* name = ParseIdentifier(kAllowEvalOrArguments, CHECK_OK);
   Expect(Token::LPAREN, CHECK_OK);
   bool done = (peek() == Token::RPAREN);
   while (!done) {
@@ -1850,7 +1914,8 @@
 }
 
 
-Statement* Parser::ParseFunctionDeclaration(ZoneStringList* names, bool* ok) {
+Statement* Parser::ParseFunctionDeclaration(
+    ZoneList<const AstRawString*>* names, bool* ok) {
   // FunctionDeclaration ::
   //   'function' Identifier '(' FormalParameterListopt ')' '{' FunctionBody '}'
   // GeneratorDeclaration ::
@@ -1858,26 +1923,25 @@
   //      '{' FunctionBody '}'
   Expect(Token::FUNCTION, CHECK_OK);
   int pos = position();
-  bool is_generator = allow_generators() && Check(Token::MUL);
+  bool is_generator = Check(Token::MUL);
   bool is_strict_reserved = false;
-  Handle<String> name = ParseIdentifierOrStrictReservedWord(
+  const AstRawString* name = ParseIdentifierOrStrictReservedWord(
       &is_strict_reserved, CHECK_OK);
-  FunctionLiteral* fun = ParseFunctionLiteral(name,
-                                              scanner()->location(),
-                                              is_strict_reserved,
-                                              is_generator,
-                                              pos,
-                                              FunctionLiteral::DECLARATION,
-                                              FunctionLiteral::NORMAL_ARITY,
-                                              CHECK_OK);
+  FunctionLiteral* fun =
+      ParseFunctionLiteral(name, scanner()->location(), is_strict_reserved,
+                           is_generator ? FunctionKind::kGeneratorFunction
+                                        : FunctionKind::kNormalFunction,
+                           pos, FunctionLiteral::DECLARATION,
+                           FunctionLiteral::NORMAL_ARITY, CHECK_OK);
   // Even if we're not at the top-level of the global or a function
   // scope, we treat it as such and introduce the function with its
   // initial value upon entering the corresponding scope.
-  // In extended mode, a function behaves as a lexical binding, except in the
-  // global scope.
+  // In ES6, a function behaves as a lexical binding, except in the
+  // global scope, or the initial scope of eval or another function.
   VariableMode mode =
-      allow_harmony_scoping() &&
-      strict_mode() == STRICT && !scope_->is_global_scope() ? LET : VAR;
+      allow_harmony_scoping() && strict_mode() == STRICT &&
+      !(scope_->is_global_scope() || scope_->is_eval_scope() ||
+          scope_->is_function_scope()) ? LET : VAR;
   VariableProxy* proxy = NewUnresolved(name, mode, Interface::NewValue());
   Declaration* declaration =
       factory()->NewFunctionDeclaration(proxy, mode, fun, scope_, pos);
@@ -1887,7 +1951,48 @@
 }
 
 
-Block* Parser::ParseBlock(ZoneStringList* labels, bool* ok) {
+Statement* Parser::ParseClassDeclaration(ZoneList<const AstRawString*>* names,
+                                         bool* ok) {
+  // ClassDeclaration ::
+  //   'class' Identifier ('extends' LeftHandExpression)? '{' ClassBody '}'
+  //
+  // A ClassDeclaration
+  //
+  //   class C { ... }
+  //
+  // has the same semantics as:
+  //
+  //   let C = class C { ... };
+  //
+  // so rewrite it as such.
+
+  Expect(Token::CLASS, CHECK_OK);
+  int pos = position();
+  bool is_strict_reserved = false;
+  const AstRawString* name =
+      ParseIdentifierOrStrictReservedWord(&is_strict_reserved, CHECK_OK);
+  Expression* value = ParseClassLiteral(name, scanner()->location(),
+                                        is_strict_reserved, pos, CHECK_OK);
+
+  Block* block = factory()->NewBlock(NULL, 1, true, pos);
+  VariableMode mode = LET;
+  VariableProxy* proxy = NewUnresolved(name, mode, Interface::NewValue());
+  Declaration* declaration =
+      factory()->NewVariableDeclaration(proxy, mode, scope_, pos);
+  Declare(declaration, true, CHECK_OK);
+
+  Token::Value init_op = Token::INIT_LET;
+  Assignment* assignment = factory()->NewAssignment(init_op, proxy, value, pos);
+  block->AddStatement(
+      factory()->NewExpressionStatement(assignment, RelocInfo::kNoPosition),
+      zone());
+
+  if (names) names->Add(name, zone());
+  return block;
+}
+
+
+Block* Parser::ParseBlock(ZoneList<const AstRawString*>* labels, bool* ok) {
   if (allow_harmony_scoping() && strict_mode() == STRICT) {
     return ParseScopedBlock(labels, ok);
   }
@@ -1914,7 +2019,8 @@
 }
 
 
-Block* Parser::ParseScopedBlock(ZoneStringList* labels, bool* ok) {
+Block* Parser::ParseScopedBlock(ZoneList<const AstRawString*>* labels,
+                                bool* ok) {
   // The harmony mode uses block elements instead of statements.
   //
   // Block ::
@@ -1949,12 +2055,12 @@
 
 
 Block* Parser::ParseVariableStatement(VariableDeclarationContext var_context,
-                                      ZoneStringList* names,
+                                      ZoneList<const AstRawString*>* names,
                                       bool* ok) {
   // VariableStatement ::
   //   VariableDeclarations ';'
 
-  Handle<String> ignore;
+  const AstRawString* ignore;
   Block* result =
       ParseVariableDeclarations(var_context, NULL, names, &ignore, CHECK_OK);
   ExpectSemicolon(CHECK_OK);
@@ -1970,8 +2076,8 @@
 Block* Parser::ParseVariableDeclarations(
     VariableDeclarationContext var_context,
     VariableDeclarationProperties* decl_props,
-    ZoneStringList* names,
-    Handle<String>* out,
+    ZoneList<const AstRawString*>* names,
+    const AstRawString** out,
     bool* ok) {
   // VariableDeclarations ::
   //   ('var' | 'const' | 'let') (Identifier ('=' AssignmentExpression)?)+[',']
@@ -2034,20 +2140,8 @@
     }
     is_const = true;
     needs_init = true;
-  } else if (peek() == Token::LET) {
-    // ES6 Draft Rev4 section 12.2.1:
-    //
-    // LetDeclaration : let LetBindingList ;
-    //
-    // * It is a Syntax Error if the code that matches this production is not
-    //   contained in extended code.
-    //
-    // TODO(rossberg): make 'let' a legal identifier in sloppy mode.
-    if (!allow_harmony_scoping() || strict_mode() == SLOPPY) {
-      ReportMessage("illegal_let");
-      *ok = false;
-      return NULL;
-    }
+  } else if (peek() == Token::LET && strict_mode() == STRICT) {
+    DCHECK(allow_harmony_scoping());
     Consume(Token::LET);
     if (var_context == kStatement) {
       // Let declarations are only allowed in source element positions.
@@ -2079,7 +2173,7 @@
   // Create new block with one expected declaration.
   Block* block = factory()->NewBlock(NULL, 1, true, pos);
   int nvars = 0;  // the number of variables declared
-  Handle<String> name;
+  const AstRawString* name = NULL;
   do {
     if (fni_ != NULL) fni_->Enter();
 
@@ -2185,9 +2279,8 @@
     // executed.
     //
     // Executing the variable declaration statement will always
-    // guarantee to give the global object a "local" variable; a
-    // variable defined in the global object and not in any
-    // prototype. This way, global variable declarations can shadow
+    // guarantee to give the global object an own property.
+    // This way, global variable declarations can shadow
     // properties in the prototype chain, but only after the variable
     // declaration statement has been executed. This is important in
     // browsers where the global object (window) has lots of
@@ -2198,7 +2291,7 @@
       ZoneList<Expression*>* arguments =
           new(zone()) ZoneList<Expression*>(3, zone());
       // We have at least 1 parameter.
-      arguments->Add(factory()->NewLiteral(name, pos), zone());
+      arguments->Add(factory()->NewStringLiteral(name, pos), zone());
       CallRuntime* initialize;
 
       if (is_const) {
@@ -2210,9 +2303,9 @@
         // Note that the function does different things depending on
         // the number of arguments (1 or 2).
         initialize = factory()->NewCallRuntime(
-            isolate()->factory()->InitializeConstGlobal_string(),
-            Runtime::FunctionForId(Runtime::kHiddenInitializeConstGlobal),
-            arguments, pos);
+            ast_value_factory()->initialize_const_global_string(),
+            Runtime::FunctionForId(Runtime::kInitializeConstGlobal), arguments,
+            pos);
       } else {
         // Add strict mode.
         // We may want to pass singleton to avoid Literal allocations.
@@ -2226,21 +2319,22 @@
         if (value != NULL && !inside_with()) {
           arguments->Add(value, zone());
           value = NULL;  // zap the value to avoid the unnecessary assignment
+          // Construct the call to Runtime_InitializeVarGlobal
+          // and add it to the initialization statement block.
+          initialize = factory()->NewCallRuntime(
+              ast_value_factory()->initialize_var_global_string(),
+              Runtime::FunctionForId(Runtime::kInitializeVarGlobal), arguments,
+              pos);
+        } else {
+          initialize = NULL;
         }
-
-        // Construct the call to Runtime_InitializeVarGlobal
-        // and add it to the initialization statement block.
-        // Note that the function does different things depending on
-        // the number of arguments (2 or 3).
-        initialize = factory()->NewCallRuntime(
-            isolate()->factory()->InitializeVarGlobal_string(),
-            Runtime::FunctionForId(Runtime::kInitializeVarGlobal),
-            arguments, pos);
       }
 
-      block->AddStatement(
-          factory()->NewExpressionStatement(initialize, RelocInfo::kNoPosition),
-          zone());
+      if (initialize != NULL) {
+        block->AddStatement(factory()->NewExpressionStatement(
+                                initialize, RelocInfo::kNoPosition),
+                            zone());
+      }
     } else if (needs_init) {
       // Constant initializations always assign to the declared constant which
       // is always at the function scope level. This is only relevant for
@@ -2249,9 +2343,9 @@
       // context for var declared variables). Sigh...
       // For 'let' and 'const' declared variables in harmony mode the
       // initialization also always assigns to the declared variable.
-      ASSERT(proxy != NULL);
-      ASSERT(proxy->var() != NULL);
-      ASSERT(value != NULL);
+      DCHECK(proxy != NULL);
+      DCHECK(proxy->var() != NULL);
+      DCHECK(value != NULL);
       Assignment* assignment =
           factory()->NewAssignment(init_op, proxy, value, pos);
       block->AddStatement(
@@ -2263,7 +2357,7 @@
     // Add an assignment node to the initialization statement block if we still
     // have a pending initialization value.
     if (value != NULL) {
-      ASSERT(mode == VAR);
+      DCHECK(mode == VAR);
       // 'var' initializations are simply assignments (with all the consequences
       // if they are inside a 'with' statement - they may change a 'with' object
       // property).
@@ -2289,11 +2383,12 @@
 }
 
 
-static bool ContainsLabel(ZoneStringList* labels, Handle<String> label) {
-  ASSERT(!label.is_null());
+static bool ContainsLabel(ZoneList<const AstRawString*>* labels,
+                          const AstRawString* label) {
+  DCHECK(label != NULL);
   if (labels != NULL) {
     for (int i = labels->length(); i-- > 0; ) {
-      if (labels->at(i).is_identical_to(label)) {
+      if (labels->at(i) == label) {
         return true;
       }
     }
@@ -2302,8 +2397,8 @@
 }
 
 
-Statement* Parser::ParseExpressionOrLabelledStatement(ZoneStringList* labels,
-                                                      bool* ok) {
+Statement* Parser::ParseExpressionOrLabelledStatement(
+    ZoneList<const AstRawString*>* labels, bool* ok) {
   // ExpressionStatement | LabelledStatement ::
   //   Expression ';'
   //   Identifier ':' Statement
@@ -2316,7 +2411,7 @@
     // Expression is a single identifier, and not, e.g., a parenthesized
     // identifier.
     VariableProxy* var = expr->AsVariableProxy();
-    Handle<String> label = var->name();
+    const AstRawString* label = var->raw_name();
     // TODO(1240780): We don't check for redeclaration of labels
     // during preparsing since keeping track of the set of active
     // labels requires nontrivial changes to the way scopes are
@@ -2328,7 +2423,7 @@
       return NULL;
     }
     if (labels == NULL) {
-      labels = new(zone()) ZoneStringList(4, zone());
+      labels = new(zone()) ZoneList<const AstRawString*>(4, zone());
     }
     labels->Add(label, zone());
     // Remove the "ghost" variable that turned out to be a label
@@ -2342,25 +2437,22 @@
   // If we have an extension, we allow a native function declaration.
   // A native function declaration starts with "native function" with
   // no line-terminator between the two words.
-  if (extension_ != NULL &&
-      peek() == Token::FUNCTION &&
-      !scanner()->HasAnyLineTerminatorBeforeNext() &&
-      expr != NULL &&
+  if (extension_ != NULL && peek() == Token::FUNCTION &&
+      !scanner()->HasAnyLineTerminatorBeforeNext() && expr != NULL &&
       expr->AsVariableProxy() != NULL &&
-      String::Equals(isolate()->factory()->native_string(),
-                     expr->AsVariableProxy()->name()) &&
+      expr->AsVariableProxy()->raw_name() ==
+          ast_value_factory()->native_string() &&
       !scanner()->literal_contains_escapes()) {
     return ParseNativeDeclaration(ok);
   }
 
   // Parsed expression statement, or the context-sensitive 'module' keyword.
   // Only expect semicolon in the former case.
-  if (!FLAG_harmony_modules ||
-      peek() != Token::IDENTIFIER ||
+  if (!FLAG_harmony_modules || peek() != Token::IDENTIFIER ||
       scanner()->HasAnyLineTerminatorBeforeNext() ||
       expr->AsVariableProxy() == NULL ||
-      !String::Equals(isolate()->factory()->module_string(),
-                      expr->AsVariableProxy()->name()) ||
+      expr->AsVariableProxy()->raw_name() !=
+          ast_value_factory()->module_string() ||
       scanner()->literal_contains_escapes()) {
     ExpectSemicolon(CHECK_OK);
   }
@@ -2368,7 +2460,8 @@
 }
 
 
-IfStatement* Parser::ParseIfStatement(ZoneStringList* labels, bool* ok) {
+IfStatement* Parser::ParseIfStatement(ZoneList<const AstRawString*>* labels,
+                                      bool* ok) {
   // IfStatement ::
   //   'if' '(' Expression ')' Statement ('else' Statement)?
 
@@ -2396,19 +2489,18 @@
 
   int pos = peek_position();
   Expect(Token::CONTINUE, CHECK_OK);
-  Handle<String> label = Handle<String>::null();
+  const AstRawString* label = NULL;
   Token::Value tok = peek();
   if (!scanner()->HasAnyLineTerminatorBeforeNext() &&
       tok != Token::SEMICOLON && tok != Token::RBRACE && tok != Token::EOS) {
     // ECMA allows "eval" or "arguments" as labels even in strict mode.
     label = ParseIdentifier(kAllowEvalOrArguments, CHECK_OK);
   }
-  IterationStatement* target = NULL;
-  target = LookupContinueTarget(label, CHECK_OK);
+  IterationStatement* target = LookupContinueTarget(label, CHECK_OK);
   if (target == NULL) {
     // Illegal continue statement.
     const char* message = "illegal_continue";
-    if (!label.is_null()) {
+    if (label != NULL) {
       message = "unknown_label";
     }
     ParserTraits::ReportMessage(message, label);
@@ -2420,13 +2512,14 @@
 }
 
 
-Statement* Parser::ParseBreakStatement(ZoneStringList* labels, bool* ok) {
+Statement* Parser::ParseBreakStatement(ZoneList<const AstRawString*>* labels,
+                                       bool* ok) {
   // BreakStatement ::
   //   'break' Identifier? ';'
 
   int pos = peek_position();
   Expect(Token::BREAK, CHECK_OK);
-  Handle<String> label;
+  const AstRawString* label = NULL;
   Token::Value tok = peek();
   if (!scanner()->HasAnyLineTerminatorBeforeNext() &&
       tok != Token::SEMICOLON && tok != Token::RBRACE && tok != Token::EOS) {
@@ -2435,7 +2528,7 @@
   }
   // Parse labeled break statements that target themselves into
   // empty statements, e.g. 'l1: l2: l3: break l2;'
-  if (!label.is_null() && ContainsLabel(labels, label)) {
+  if (label != NULL && ContainsLabel(labels, label)) {
     ExpectSemicolon(CHECK_OK);
     return factory()->NewEmptyStatement(pos);
   }
@@ -2444,7 +2537,7 @@
   if (target == NULL) {
     // Illegal break statement.
     const char* message = "illegal_break";
-    if (!label.is_null()) {
+    if (label != NULL) {
       message = "unknown_label";
     }
     ParserTraits::ReportMessage(message, label);
@@ -2482,7 +2575,7 @@
     Expression* generator = factory()->NewVariableProxy(
         function_state_->generator_object_variable());
     Expression* yield = factory()->NewYield(
-        generator, return_value, Yield::FINAL, loc.beg_pos);
+        generator, return_value, Yield::kFinal, loc.beg_pos);
     result = factory()->NewExpressionStatement(yield, loc.beg_pos);
   } else {
     result = factory()->NewReturnStatement(return_value, loc.beg_pos);
@@ -2498,7 +2591,8 @@
 }
 
 
-Statement* Parser::ParseWithStatement(ZoneStringList* labels, bool* ok) {
+Statement* Parser::ParseWithStatement(ZoneList<const AstRawString*>* labels,
+                                      bool* ok) {
   // WithStatement ::
   //   'with' '(' Expression ')' Statement
 
@@ -2560,8 +2654,8 @@
 }
 
 
-SwitchStatement* Parser::ParseSwitchStatement(ZoneStringList* labels,
-                                              bool* ok) {
+SwitchStatement* Parser::ParseSwitchStatement(
+    ZoneList<const AstRawString*>* labels, bool* ok) {
   // SwitchStatement ::
   //   'switch' '(' Expression ')' '{' CaseClause* '}'
 
@@ -2644,7 +2738,7 @@
   Scope* catch_scope = NULL;
   Variable* catch_variable = NULL;
   Block* catch_block = NULL;
-  Handle<String> name;
+  const AstRawString* name = NULL;
   if (tok == Token::CATCH) {
     Consume(Token::CATCH);
 
@@ -2658,9 +2752,7 @@
     Target target(&this->target_stack_, &catch_collector);
     VariableMode mode =
         allow_harmony_scoping() && strict_mode() == STRICT ? LET : VAR;
-    catch_variable =
-        catch_scope->DeclareLocal(name, mode, kCreatedInitialized);
-
+    catch_variable = catch_scope->DeclareLocal(name, mode, kCreatedInitialized);
     BlockState block_state(&scope_, catch_scope);
     catch_block = ParseBlock(NULL, CHECK_OK);
 
@@ -2669,7 +2761,7 @@
   }
 
   Block* finally_block = NULL;
-  ASSERT(tok == Token::FINALLY || catch_block != NULL);
+  DCHECK(tok == Token::FINALLY || catch_block != NULL);
   if (tok == Token::FINALLY) {
     Consume(Token::FINALLY);
     finally_block = ParseBlock(NULL, CHECK_OK);
@@ -2682,7 +2774,7 @@
 
   if (catch_block != NULL && finally_block != NULL) {
     // If we have both, create an inner try/catch.
-    ASSERT(catch_scope != NULL && catch_variable != NULL);
+    DCHECK(catch_scope != NULL && catch_variable != NULL);
     int index = function_state_->NextHandlerIndex();
     TryCatchStatement* statement = factory()->NewTryCatchStatement(
         index, try_block, catch_scope, catch_variable, catch_block,
@@ -2695,13 +2787,13 @@
 
   TryStatement* result = NULL;
   if (catch_block != NULL) {
-    ASSERT(finally_block == NULL);
-    ASSERT(catch_scope != NULL && catch_variable != NULL);
+    DCHECK(finally_block == NULL);
+    DCHECK(catch_scope != NULL && catch_variable != NULL);
     int index = function_state_->NextHandlerIndex();
     result = factory()->NewTryCatchStatement(
         index, try_block, catch_scope, catch_variable, catch_block, pos);
   } else {
-    ASSERT(finally_block != NULL);
+    DCHECK(finally_block != NULL);
     int index = function_state_->NextHandlerIndex();
     result = factory()->NewTryFinallyStatement(
         index, try_block, finally_block, pos);
@@ -2714,8 +2806,8 @@
 }
 
 
-DoWhileStatement* Parser::ParseDoWhileStatement(ZoneStringList* labels,
-                                                bool* ok) {
+DoWhileStatement* Parser::ParseDoWhileStatement(
+    ZoneList<const AstRawString*>* labels, bool* ok) {
   // DoStatement ::
   //   'do' Statement 'while' '(' Expression ')' ';'
 
@@ -2742,7 +2834,8 @@
 }
 
 
-WhileStatement* Parser::ParseWhileStatement(ZoneStringList* labels, bool* ok) {
+WhileStatement* Parser::ParseWhileStatement(
+    ZoneList<const AstRawString*>* labels, bool* ok) {
   // WhileStatement ::
   //   'while' '(' Expression ')' Statement
 
@@ -2765,8 +2858,7 @@
   if (Check(Token::IN)) {
     *visit_mode = ForEachStatement::ENUMERATE;
     return true;
-  } else if (allow_for_of() && accept_OF &&
-             CheckContextualKeyword(CStrVector("of"))) {
+  } else if (accept_OF && CheckContextualKeyword(CStrVector("of"))) {
     *visit_mode = ForEachStatement::ITERATE;
     return true;
   }
@@ -2781,54 +2873,26 @@
   ForOfStatement* for_of = stmt->AsForOfStatement();
 
   if (for_of != NULL) {
-    Factory* heap_factory = isolate()->factory();
-    Variable* iterable = scope_->DeclarationScope()->NewTemporary(
-        heap_factory->dot_iterable_string());
     Variable* iterator = scope_->DeclarationScope()->NewTemporary(
-        heap_factory->dot_iterator_string());
+        ast_value_factory()->dot_iterator_string());
     Variable* result = scope_->DeclarationScope()->NewTemporary(
-        heap_factory->dot_result_string());
+        ast_value_factory()->dot_result_string());
 
-    Expression* assign_iterable;
     Expression* assign_iterator;
     Expression* next_result;
     Expression* result_done;
     Expression* assign_each;
 
-    // var iterable = subject;
-    {
-      Expression* iterable_proxy = factory()->NewVariableProxy(iterable);
-      assign_iterable = factory()->NewAssignment(
-          Token::ASSIGN, iterable_proxy, subject, subject->position());
-    }
-
-    // var iterator = iterable[Symbol.iterator]();
-    {
-      Expression* iterable_proxy = factory()->NewVariableProxy(iterable);
-      Handle<Symbol> iterator_symbol(
-          isolate()->native_context()->iterator_symbol(), isolate());
-      Expression* iterator_symbol_literal = factory()->NewLiteral(
-          iterator_symbol, RelocInfo::kNoPosition);
-      // FIXME(wingo): Unhappily, it will be a common error that the RHS of a
-      // for-of doesn't have a Symbol.iterator property.  We should do better
-      // than informing the user that "undefined is not a function".
-      int pos = subject->position();
-      Expression* iterator_property = factory()->NewProperty(
-          iterable_proxy, iterator_symbol_literal, pos);
-      ZoneList<Expression*>* iterator_arguments =
-          new(zone()) ZoneList<Expression*>(0, zone());
-      Expression* iterator_call = factory()->NewCall(
-          iterator_property, iterator_arguments, pos);
-      Expression* iterator_proxy = factory()->NewVariableProxy(iterator);
-      assign_iterator = factory()->NewAssignment(
-          Token::ASSIGN, iterator_proxy, iterator_call, RelocInfo::kNoPosition);
-    }
+    // var iterator = subject[Symbol.iterator]();
+    assign_iterator = factory()->NewAssignment(
+        Token::ASSIGN, factory()->NewVariableProxy(iterator),
+        GetIterator(subject, factory()), RelocInfo::kNoPosition);
 
     // var result = iterator.next();
     {
       Expression* iterator_proxy = factory()->NewVariableProxy(iterator);
-      Expression* next_literal = factory()->NewLiteral(
-          heap_factory->next_string(), RelocInfo::kNoPosition);
+      Expression* next_literal = factory()->NewStringLiteral(
+          ast_value_factory()->next_string(), RelocInfo::kNoPosition);
       Expression* next_property = factory()->NewProperty(
           iterator_proxy, next_literal, RelocInfo::kNoPosition);
       ZoneList<Expression*>* next_arguments =
@@ -2842,8 +2906,8 @@
 
     // result.done
     {
-      Expression* done_literal = factory()->NewLiteral(
-          heap_factory->done_string(), RelocInfo::kNoPosition);
+      Expression* done_literal = factory()->NewStringLiteral(
+          ast_value_factory()->done_string(), RelocInfo::kNoPosition);
       Expression* result_proxy = factory()->NewVariableProxy(result);
       result_done = factory()->NewProperty(
           result_proxy, done_literal, RelocInfo::kNoPosition);
@@ -2851,8 +2915,8 @@
 
     // each = result.value
     {
-      Expression* value_literal = factory()->NewLiteral(
-          heap_factory->value_string(), RelocInfo::kNoPosition);
+      Expression* value_literal = factory()->NewStringLiteral(
+          ast_value_factory()->value_string(), RelocInfo::kNoPosition);
       Expression* result_proxy = factory()->NewVariableProxy(result);
       Expression* result_value = factory()->NewProperty(
           result_proxy, value_literal, RelocInfo::kNoPosition);
@@ -2861,7 +2925,6 @@
     }
 
     for_of->Initialize(each, subject, body,
-                       assign_iterable,
                        assign_iterator,
                        next_result,
                        result_done,
@@ -2873,9 +2936,9 @@
 
 
 Statement* Parser::DesugarLetBindingsInForStatement(
-    Scope* inner_scope, ZoneStringList* names, ForStatement* loop,
-    Statement* init, Expression* cond, Statement* next, Statement* body,
-    bool* ok) {
+    Scope* inner_scope, ZoneList<const AstRawString*>* names,
+    ForStatement* loop, Statement* init, Expression* cond, Statement* next,
+    Statement* body, bool* ok) {
   // ES6 13.6.3.4 specifies that on each loop iteration the let variables are
   // copied into a new environment. After copying, the "next" statement of the
   // loop is executed to update the loop variables. The loop condition is
@@ -2908,7 +2971,7 @@
   //     }
   //  }
 
-  ASSERT(names->length() > 0);
+  DCHECK(names->length() > 0);
   Scope* for_scope = scope_;
   ZoneList<Variable*> temps(names->length(), zone());
 
@@ -2916,10 +2979,7 @@
                                            RelocInfo::kNoPosition);
   outer_block->AddStatement(init, zone());
 
-  Handle<String> temp_name = isolate()->factory()->dot_for_string();
-  Handle<Smi> smi0 = handle(Smi::FromInt(0), isolate());
-  Handle<Smi> smi1 = handle(Smi::FromInt(1), isolate());
-
+  const AstRawString* temp_name = ast_value_factory()->dot_for_string();
 
   // For each let variable x:
   //   make statement: temp_x = x.
@@ -2940,7 +3000,7 @@
   // Make statement: flag = 1.
   {
     VariableProxy* flag_proxy = factory()->NewVariableProxy(flag);
-    Expression* const1 = factory()->NewLiteral(smi1, RelocInfo::kNoPosition);
+    Expression* const1 = factory()->NewSmiLiteral(1, RelocInfo::kNoPosition);
     Assignment* assignment = factory()->NewAssignment(
         Token::ASSIGN, flag_proxy, const1, RelocInfo::kNoPosition);
     Statement* assignment_statement = factory()->NewExpressionStatement(
@@ -2976,11 +3036,11 @@
   }
 
   // Make statement: if (flag == 1) { flag = 0; } else { next; }.
-  {
+  if (next) {
     Expression* compare = NULL;
     // Make compare expresion: flag == 1.
     {
-      Expression* const1 = factory()->NewLiteral(smi1, RelocInfo::kNoPosition);
+      Expression* const1 = factory()->NewSmiLiteral(1, RelocInfo::kNoPosition);
       VariableProxy* flag_proxy = factory()->NewVariableProxy(flag);
       compare = factory()->NewCompareOperation(
           Token::EQ, flag_proxy, const1, pos);
@@ -2989,7 +3049,7 @@
     // Make statement: flag = 0.
     {
       VariableProxy* flag_proxy = factory()->NewVariableProxy(flag);
-      Expression* const0 = factory()->NewLiteral(smi0, RelocInfo::kNoPosition);
+      Expression* const0 = factory()->NewSmiLiteral(0, RelocInfo::kNoPosition);
       Assignment* assignment = factory()->NewAssignment(
           Token::ASSIGN, flag_proxy, const0, RelocInfo::kNoPosition);
       clear_flag = factory()->NewExpressionStatement(assignment, pos);
@@ -3001,9 +3061,9 @@
 
 
   // Make statement: if (cond) { } else { break; }.
-  {
+  if (cond) {
     Statement* empty = factory()->NewEmptyStatement(RelocInfo::kNoPosition);
-    BreakableStatement* t = LookupBreakTarget(Handle<String>(), CHECK_OK);
+    BreakableStatement* t = LookupBreakTarget(NULL, CHECK_OK);
     Statement* stop = factory()->NewBreakStatement(t, RelocInfo::kNoPosition);
     Statement* if_not_cond_break = factory()->NewIfStatement(
         cond, empty, stop, cond->position());
@@ -3034,13 +3094,14 @@
 }
 
 
-Statement* Parser::ParseForStatement(ZoneStringList* labels, bool* ok) {
+Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
+                                     bool* ok) {
   // ForStatement ::
   //   'for' '(' Expression? ';' Expression? ';' Expression? ')' Statement
 
   int pos = peek_position();
   Statement* init = NULL;
-  ZoneStringList let_bindings(1, zone());
+  ZoneList<const AstRawString*> let_bindings(1, zone());
 
   // Create an in-between scope for let-bound iteration variables.
   Scope* saved_scope = scope_;
@@ -3053,7 +3114,7 @@
   if (peek() != Token::SEMICOLON) {
     if (peek() == Token::VAR || peek() == Token::CONST) {
       bool is_const = peek() == Token::CONST;
-      Handle<String> name;
+      const AstRawString* name = NULL;
       VariableDeclarationProperties decl_props = kHasNoInitializers;
       Block* variable_statement =
           ParseVariableDeclarations(kForStatement, &decl_props, NULL, &name,
@@ -3061,7 +3122,7 @@
       bool accept_OF = decl_props == kHasNoInitializers;
       ForEachStatement::VisitMode mode;
 
-      if (!name.is_null() && CheckInOrOf(accept_OF, &mode)) {
+      if (name != NULL && CheckInOrOf(accept_OF, &mode)) {
         Interface* interface =
             is_const ? Interface::NewConst() : Interface::NewValue();
         ForEachStatement* loop =
@@ -3082,19 +3143,20 @@
         scope_ = saved_scope;
         for_scope->set_end_position(scanner()->location().end_pos);
         for_scope = for_scope->FinalizeBlockScope();
-        ASSERT(for_scope == NULL);
+        DCHECK(for_scope == NULL);
         // Parsed for-in loop w/ variable/const declaration.
         return result;
       } else {
         init = variable_statement;
       }
-    } else if (peek() == Token::LET) {
-      Handle<String> name;
+    } else if (peek() == Token::LET && strict_mode() == STRICT) {
+      DCHECK(allow_harmony_scoping());
+      const AstRawString* name = NULL;
       VariableDeclarationProperties decl_props = kHasNoInitializers;
       Block* variable_statement =
-         ParseVariableDeclarations(kForStatement, &decl_props, &let_bindings,
-                                   &name, CHECK_OK);
-      bool accept_IN = !name.is_null() && decl_props != kHasInitializers;
+          ParseVariableDeclarations(kForStatement, &decl_props, &let_bindings,
+                                    &name, CHECK_OK);
+      bool accept_IN = name != NULL && decl_props != kHasInitializers;
       bool accept_OF = decl_props == kHasNoInitializers;
       ForEachStatement::VisitMode mode;
 
@@ -3114,14 +3176,8 @@
 
         // TODO(keuchel): Move the temporary variable to the block scope, after
         // implementing stack allocated block scoped variables.
-        Factory* heap_factory = isolate()->factory();
-        Handle<String> tempstr;
-        ASSIGN_RETURN_ON_EXCEPTION_VALUE(
-            isolate(), tempstr,
-            heap_factory->NewConsString(heap_factory->dot_for_string(), name),
-            0);
-        Handle<String> tempname = heap_factory->InternalizeString(tempstr);
-        Variable* temp = scope_->DeclarationScope()->NewTemporary(tempname);
+        Variable* temp = scope_->DeclarationScope()->NewTemporary(
+            ast_value_factory()->dot_for_string());
         VariableProxy* temp_proxy = factory()->NewVariableProxy(temp);
         ForEachStatement* loop =
             factory()->NewForEachStatement(mode, labels, pos);
@@ -3178,7 +3234,7 @@
         scope_ = saved_scope;
         for_scope->set_end_position(scanner()->location().end_pos);
         for_scope = for_scope->FinalizeBlockScope();
-        ASSERT(for_scope == NULL);
+        DCHECK(for_scope == NULL);
         // Parsed for-in loop.
         return loop;
 
@@ -3228,11 +3284,31 @@
     scope_ = saved_scope;
     for_scope->set_end_position(scanner()->location().end_pos);
   } else {
-    loop->Initialize(init, cond, next, body);
-    result = loop;
     scope_ = saved_scope;
     for_scope->set_end_position(scanner()->location().end_pos);
-    for_scope->FinalizeBlockScope();
+    for_scope = for_scope->FinalizeBlockScope();
+    if (for_scope) {
+      // Rewrite a for statement of the form
+      //   for (const x = i; c; n) b
+      //
+      // into
+      //
+      //   {
+      //     const x = i;
+      //     for (; c; n) b
+      //   }
+      DCHECK(init != NULL);
+      Block* block =
+          factory()->NewBlock(NULL, 2, false, RelocInfo::kNoPosition);
+      block->AddStatement(init, zone());
+      block->AddStatement(loop, zone());
+      block->set_scope(for_scope);
+      loop->Initialize(NULL, cond, next, body);
+      result = block;
+    } else {
+      loop->Initialize(init, cond, next, body);
+      result = loop;
+    }
   }
   return result;
 }
@@ -3252,12 +3328,6 @@
 }
 
 
-void Parser::ReportInvalidCachedData(Handle<String> name, bool* ok) {
-  ParserTraits::ReportMessage("invalid_cached_data_function", name);
-  *ok = false;
-}
-
-
 bool CompileTimeValue::IsCompileTimeValue(Expression* expression) {
   if (expression->IsLiteral()) return true;
   MaterializedLiteral* lit = expression->AsMaterializedLiteral();
@@ -3268,11 +3338,11 @@
 Handle<FixedArray> CompileTimeValue::GetValue(Isolate* isolate,
                                               Expression* expression) {
   Factory* factory = isolate->factory();
-  ASSERT(IsCompileTimeValue(expression));
+  DCHECK(IsCompileTimeValue(expression));
   Handle<FixedArray> result = factory->NewFixedArray(2, TENURED);
   ObjectLiteral* object_literal = expression->AsObjectLiteral();
   if (object_literal != NULL) {
-    ASSERT(object_literal->is_simple());
+    DCHECK(object_literal->is_simple());
     if (object_literal->fast_elements()) {
       result->set(kLiteralTypeSlot, Smi::FromInt(OBJECT_LITERAL_FAST_ELEMENTS));
     } else {
@@ -3281,7 +3351,7 @@
     result->set(kElementsSlot, *object_literal->constant_properties());
   } else {
     ArrayLiteral* array_literal = expression->AsArrayLiteral();
-    ASSERT(array_literal != NULL && array_literal->is_simple());
+    DCHECK(array_literal != NULL && array_literal->is_simple());
     result->set(kLiteralTypeSlot, Smi::FromInt(ARRAY_LITERAL));
     result->set(kElementsSlot, *array_literal->constant_elements());
   }
@@ -3301,15 +3371,73 @@
 }
 
 
-FunctionLiteral* Parser::ParseFunctionLiteral(
-    Handle<String> function_name,
-    Scanner::Location function_name_location,
-    bool name_is_strict_reserved,
-    bool is_generator,
-    int function_token_pos,
-    FunctionLiteral::FunctionType function_type,
-    FunctionLiteral::ArityRestriction arity_restriction,
+bool CheckAndDeclareArrowParameter(ParserTraits* traits, Expression* expression,
+                                   Scope* scope, int* num_params,
+                                   Scanner::Location* dupe_loc) {
+  // Case for empty parameter lists:
+  //   () => ...
+  if (expression == NULL) return true;
+
+  // Too many parentheses around expression:
+  //   (( ... )) => ...
+  if (expression->parenthesization_level() > 1) return false;
+
+  // Case for a single parameter:
+  //   (foo) => ...
+  //   foo => ...
+  if (expression->IsVariableProxy()) {
+    if (expression->AsVariableProxy()->is_this()) return false;
+
+    const AstRawString* raw_name = expression->AsVariableProxy()->raw_name();
+    if (traits->IsEvalOrArguments(raw_name) ||
+        traits->IsFutureStrictReserved(raw_name))
+      return false;
+
+    if (scope->IsDeclared(raw_name)) {
+      *dupe_loc = Scanner::Location(
+          expression->position(), expression->position() + raw_name->length());
+      return false;
+    }
+
+    scope->DeclareParameter(raw_name, VAR);
+    ++(*num_params);
+    return true;
+  }
+
+  // Case for more than one parameter:
+  //   (foo, bar [, ...]) => ...
+  if (expression->IsBinaryOperation()) {
+    BinaryOperation* binop = expression->AsBinaryOperation();
+    if (binop->op() != Token::COMMA || binop->left()->is_parenthesized() ||
+        binop->right()->is_parenthesized())
+      return false;
+
+    return CheckAndDeclareArrowParameter(traits, binop->left(), scope,
+                                         num_params, dupe_loc) &&
+           CheckAndDeclareArrowParameter(traits, binop->right(), scope,
+                                         num_params, dupe_loc);
+  }
+
+  // Any other kind of expression is not a valid parameter list.
+  return false;
+}
+
+
+int ParserTraits::DeclareArrowParametersFromExpression(
+    Expression* expression, Scope* scope, Scanner::Location* dupe_loc,
     bool* ok) {
+  int num_params = 0;
+  *ok = CheckAndDeclareArrowParameter(this, expression, scope, &num_params,
+                                      dupe_loc);
+  return num_params;
+}
+
+
+FunctionLiteral* Parser::ParseFunctionLiteral(
+    const AstRawString* function_name, Scanner::Location function_name_location,
+    bool name_is_strict_reserved, FunctionKind kind, int function_token_pos,
+    FunctionLiteral::FunctionType function_type,
+    FunctionLiteral::ArityRestriction arity_restriction, bool* ok) {
   // Function ::
   //   '(' FormalParameterList? ')' '{' FunctionBody '}'
   //
@@ -3322,14 +3450,16 @@
   int pos = function_token_pos == RelocInfo::kNoPosition
       ? peek_position() : function_token_pos;
 
+  bool is_generator = IsGeneratorFunction(kind);
+
   // Anonymous functions were passed either the empty symbol or a null
   // handle as the function name.  Remember if we were passed a non-empty
   // handle to decide whether to invoke function name inference.
-  bool should_infer_name = function_name.is_null();
+  bool should_infer_name = function_name == NULL;
 
   // We want a non-null handle as the function name.
   if (should_infer_name) {
-    function_name = isolate()->factory()->empty_string();
+    function_name = ast_value_factory()->empty_string();
   }
 
   int num_parameters = 0;
@@ -3381,7 +3511,10 @@
   AstProperties ast_properties;
   BailoutReason dont_optimize_reason = kNoReason;
   // Parse function body.
-  { FunctionState function_state(&function_state_, &scope_, scope, zone());
+  {
+    FunctionState function_state(&function_state_, &scope_, scope, zone(),
+                                 ast_value_factory(),
+                                 info()->ast_node_id_gen());
     scope_->SetScopeName(function_name);
 
     if (is_generator) {
@@ -3394,7 +3527,7 @@
       // in a temporary variable, a definition that is used by "yield"
       // expressions. This also marks the FunctionState as a generator.
       Variable* temp = scope_->DeclarationScope()->NewTemporary(
-          isolate()->factory()->dot_generator_object_string());
+          ast_value_factory()->dot_generator_object_string());
       function_state.set_generator_object_variable(temp);
     }
 
@@ -3415,7 +3548,7 @@
          arity_restriction != FunctionLiteral::SETTER_ARITY);
     while (!done) {
       bool is_strict_reserved = false;
-      Handle<String> param_name =
+      const AstRawString* param_name =
           ParseIdentifierOrStrictReservedWord(&is_strict_reserved, CHECK_OK);
 
       // Store locations for possible future error reports.
@@ -3430,7 +3563,14 @@
         dupe_error_loc = scanner()->location();
       }
 
-      scope_->DeclareParameter(param_name, VAR);
+      Variable* var = scope_->DeclareParameter(param_name, VAR);
+      if (scope->strict_mode() == SLOPPY) {
+        // TODO(sigurds) Mark every parameter as maybe assigned. This is a
+        // conservative approximation necessary to account for parameters
+        // that are assigned via the arguments array.
+        var->set_maybe_assigned();
+      }
+
       num_parameters++;
       if (num_parameters > Code::kMaxArguments) {
         ReportMessage("too_many_parameters");
@@ -3458,11 +3598,13 @@
         fvar_init_op = Token::INIT_CONST;
       }
       VariableMode fvar_mode =
-          allow_harmony_scoping() && strict_mode() == STRICT ? CONST
-                                                             : CONST_LEGACY;
-      fvar = new(zone()) Variable(scope_,
-         function_name, fvar_mode, true /* is valid LHS */,
-         Variable::NORMAL, kCreatedInitialized, Interface::NewConst());
+          allow_harmony_scoping() && strict_mode() == STRICT
+              ? CONST : CONST_LEGACY;
+      DCHECK(function_name != NULL);
+      fvar = new (zone())
+          Variable(scope_, function_name, fvar_mode, true /* is valid LHS */,
+                   Variable::NORMAL, kCreatedInitialized, kNotAssigned,
+                   Interface::NewConst());
       VariableProxy* proxy = factory()->NewVariableProxy(fvar);
       VariableDeclaration* fvar_declaration = factory()->NewVariableDeclaration(
           proxy, fvar_mode, scope_, RelocInfo::kNoPosition);
@@ -3518,63 +3660,35 @@
       handler_count = function_state.handler_count();
     }
 
-    // Validate strict mode. We can do this only after parsing the function,
-    // since the function can declare itself strict.
+    // Validate strict mode.
+    // Concise methods use StrictFormalParameters.
+    if (strict_mode() == STRICT || IsConciseMethod(kind)) {
+      CheckStrictFunctionNameAndParameters(function_name,
+                                           name_is_strict_reserved,
+                                           function_name_location,
+                                           eval_args_error_log,
+                                           dupe_error_loc,
+                                           reserved_loc,
+                                           CHECK_OK);
+    }
     if (strict_mode() == STRICT) {
-      if (IsEvalOrArguments(function_name)) {
-        ReportMessageAt(function_name_location, "strict_eval_arguments");
-        *ok = false;
-        return NULL;
-      }
-      if (name_is_strict_reserved) {
-        ReportMessageAt(function_name_location, "unexpected_strict_reserved");
-        *ok = false;
-        return NULL;
-      }
-      if (eval_args_error_log.IsValid()) {
-        ReportMessageAt(eval_args_error_log, "strict_eval_arguments");
-        *ok = false;
-        return NULL;
-      }
-      if (dupe_error_loc.IsValid()) {
-        ReportMessageAt(dupe_error_loc, "strict_param_dupe");
-        *ok = false;
-        return NULL;
-      }
-      if (reserved_loc.IsValid()) {
-        ReportMessageAt(reserved_loc, "unexpected_strict_reserved");
-        *ok = false;
-        return NULL;
-      }
       CheckOctalLiteral(scope->start_position(),
                         scope->end_position(),
                         CHECK_OK);
     }
     ast_properties = *factory()->visitor()->ast_properties();
     dont_optimize_reason = factory()->visitor()->dont_optimize_reason();
+
+    if (allow_harmony_scoping() && strict_mode() == STRICT) {
+      CheckConflictingVarDeclarations(scope, CHECK_OK);
+    }
   }
 
-  if (allow_harmony_scoping() && strict_mode() == STRICT) {
-    CheckConflictingVarDeclarations(scope, CHECK_OK);
-  }
-
-  FunctionLiteral::IsGeneratorFlag generator = is_generator
-      ? FunctionLiteral::kIsGenerator
-      : FunctionLiteral::kNotGenerator;
-  FunctionLiteral* function_literal =
-      factory()->NewFunctionLiteral(function_name,
-                                    scope,
-                                    body,
-                                    materialized_literal_count,
-                                    expected_property_count,
-                                    handler_count,
-                                    num_parameters,
-                                    duplicate_parameters,
-                                    function_type,
-                                    FunctionLiteral::kIsFunction,
-                                    parenthesized,
-                                    generator,
-                                    pos);
+  FunctionLiteral* function_literal = factory()->NewFunctionLiteral(
+      function_name, ast_value_factory(), scope, body,
+      materialized_literal_count, expected_property_count, handler_count,
+      num_parameters, duplicate_parameters, function_type,
+      FunctionLiteral::kIsFunction, parenthesized, kind, pos);
   function_literal->set_function_token_position(function_token_pos);
   function_literal->set_ast_properties(&ast_properties);
   function_literal->set_dont_optimize_reason(dont_optimize_reason);
@@ -3584,42 +3698,31 @@
 }
 
 
-void Parser::SkipLazyFunctionBody(Handle<String> function_name,
+void Parser::SkipLazyFunctionBody(const AstRawString* function_name,
                                   int* materialized_literal_count,
                                   int* expected_property_count,
                                   bool* ok) {
   int function_block_pos = position();
-  if (cached_data_mode_ == CONSUME_CACHED_DATA) {
+  if (compile_options() == ScriptCompiler::kConsumeParserCache) {
     // If we have cached data, we use it to skip parsing the function body. The
     // data contains the information we need to construct the lazy function.
     FunctionEntry entry =
-        (*cached_data())->GetFunctionEntry(function_block_pos);
-    if (entry.is_valid()) {
-      if (entry.end_pos() <= function_block_pos) {
-        // End position greater than end of stream is safe, and hard to check.
-        ReportInvalidCachedData(function_name, ok);
-        if (!*ok) {
-          return;
-        }
-      }
-      scanner()->SeekForward(entry.end_pos() - 1);
+        cached_parse_data_->GetFunctionEntry(function_block_pos);
+    // Check that cached data is valid.
+    CHECK(entry.is_valid());
+    // End position greater than end of stream is safe, and hard to check.
+    CHECK(entry.end_pos() > function_block_pos);
+    scanner()->SeekForward(entry.end_pos() - 1);
 
-      scope_->set_end_position(entry.end_pos());
-      Expect(Token::RBRACE, ok);
-      if (!*ok) {
-        return;
-      }
-      isolate()->counters()->total_preparse_skipped()->Increment(
-          scope_->end_position() - function_block_pos);
-      *materialized_literal_count = entry.literal_count();
-      *expected_property_count = entry.property_count();
-      scope_->SetStrictMode(entry.strict_mode());
-    } else {
-      // This case happens when we have preparse data but it doesn't contain an
-      // entry for the function. Fail the compilation.
-      ReportInvalidCachedData(function_name, ok);
+    scope_->set_end_position(entry.end_pos());
+    Expect(Token::RBRACE, ok);
+    if (!*ok) {
       return;
     }
+    total_preparse_skipped_ += scope_->end_position() - function_block_pos;
+    *materialized_literal_count = entry.literal_count();
+    *expected_property_count = entry.property_count();
+    scope_->SetStrictMode(entry.strict_mode());
   } else {
     // With no cached data, we partially parse the function, without building an
     // AST. This gathers the data needed to build a lazy function.
@@ -3644,13 +3747,12 @@
     if (!*ok) {
       return;
     }
-    isolate()->counters()->total_preparse_skipped()->Increment(
-        scope_->end_position() - function_block_pos);
+    total_preparse_skipped_ += scope_->end_position() - function_block_pos;
     *materialized_literal_count = logger.literals();
     *expected_property_count = logger.properties();
     scope_->SetStrictMode(logger.strict_mode());
-    if (cached_data_mode_ == PRODUCE_CACHED_DATA) {
-      ASSERT(log_);
+    if (compile_options() == ScriptCompiler::kProduceParserCache) {
+      DCHECK(log_);
       // Position right after terminal '}'.
       int body_end = scanner()->location().end_pos;
       log_->LogFunction(function_block_pos, body_end,
@@ -3663,7 +3765,7 @@
 
 
 ZoneList<Statement*>* Parser::ParseEagerFunctionBody(
-    Handle<String> function_name, int pos, Variable* fvar,
+    const AstRawString* function_name, int pos, Variable* fvar,
     Token::Value fvar_init_op, bool is_generator, bool* ok) {
   // Everything inside an eagerly parsed function will be parsed eagerly
   // (see comment above).
@@ -3686,9 +3788,9 @@
     ZoneList<Expression*>* arguments =
         new(zone()) ZoneList<Expression*>(0, zone());
     CallRuntime* allocation = factory()->NewCallRuntime(
-        isolate()->factory()->empty_string(),
-        Runtime::FunctionForId(Runtime::kHiddenCreateJSGeneratorObject),
-        arguments, pos);
+        ast_value_factory()->empty_string(),
+        Runtime::FunctionForId(Runtime::kCreateJSGeneratorObject), arguments,
+        pos);
     VariableProxy* init_proxy = factory()->NewVariableProxy(
         function_state_->generator_object_variable());
     Assignment* assignment = factory()->NewAssignment(
@@ -3696,20 +3798,20 @@
     VariableProxy* get_proxy = factory()->NewVariableProxy(
         function_state_->generator_object_variable());
     Yield* yield = factory()->NewYield(
-        get_proxy, assignment, Yield::INITIAL, RelocInfo::kNoPosition);
+        get_proxy, assignment, Yield::kInitial, RelocInfo::kNoPosition);
     body->Add(factory()->NewExpressionStatement(
         yield, RelocInfo::kNoPosition), zone());
   }
 
-  ParseSourceElements(body, Token::RBRACE, false, false, CHECK_OK);
+  ParseSourceElements(body, Token::RBRACE, false, false, NULL, CHECK_OK);
 
   if (is_generator) {
     VariableProxy* get_proxy = factory()->NewVariableProxy(
         function_state_->generator_object_variable());
-    Expression *undefined = factory()->NewLiteral(
-        isolate()->factory()->undefined_value(), RelocInfo::kNoPosition);
-    Yield* yield = factory()->NewYield(
-        get_proxy, undefined, Yield::FINAL, RelocInfo::kNoPosition);
+    Expression* undefined =
+        factory()->NewUndefinedLiteral(RelocInfo::kNoPosition);
+    Yield* yield = factory()->NewYield(get_proxy, undefined, Yield::kFinal,
+                                       RelocInfo::kNoPosition);
     body->Add(factory()->NewExpressionStatement(
         yield, RelocInfo::kNoPosition), zone());
   }
@@ -3723,25 +3825,33 @@
 
 PreParser::PreParseResult Parser::ParseLazyFunctionBodyWithPreParser(
     SingletonLogger* logger) {
-  HistogramTimerScope preparse_scope(isolate()->counters()->pre_parse());
-  ASSERT_EQ(Token::LBRACE, scanner()->current_token());
+  // This function may be called on a background thread too; record only the
+  // main thread preparse times.
+  if (pre_parse_timer_ != NULL) {
+    pre_parse_timer_->Start();
+  }
+  DCHECK_EQ(Token::LBRACE, scanner()->current_token());
 
   if (reusable_preparser_ == NULL) {
-    intptr_t stack_limit = isolate()->stack_guard()->real_climit();
-    reusable_preparser_ = new PreParser(&scanner_, NULL, stack_limit);
+    reusable_preparser_ = new PreParser(&scanner_, NULL, stack_limit_);
     reusable_preparser_->set_allow_harmony_scoping(allow_harmony_scoping());
     reusable_preparser_->set_allow_modules(allow_modules());
     reusable_preparser_->set_allow_natives_syntax(allow_natives_syntax());
     reusable_preparser_->set_allow_lazy(true);
-    reusable_preparser_->set_allow_generators(allow_generators());
-    reusable_preparser_->set_allow_for_of(allow_for_of());
+    reusable_preparser_->set_allow_arrow_functions(allow_arrow_functions());
     reusable_preparser_->set_allow_harmony_numeric_literals(
         allow_harmony_numeric_literals());
+    reusable_preparser_->set_allow_classes(allow_classes());
+    reusable_preparser_->set_allow_harmony_object_literals(
+        allow_harmony_object_literals());
   }
   PreParser::PreParseResult result =
       reusable_preparser_->PreParseLazyFunction(strict_mode(),
                                                 is_generator(),
                                                 logger);
+  if (pre_parse_timer_ != NULL) {
+    pre_parse_timer_->Stop();
+  }
   return result;
 }
 
@@ -3753,7 +3863,7 @@
   int pos = peek_position();
   Expect(Token::MOD, CHECK_OK);
   // Allow "eval" or "arguments" for backward compatibility.
-  Handle<String> name = ParseIdentifier(kAllowEvalOrArguments, CHECK_OK);
+  const AstRawString* name = ParseIdentifier(kAllowEvalOrArguments, CHECK_OK);
   ZoneList<Expression*>* args = ParseArguments(CHECK_OK);
 
   if (extension_ != NULL) {
@@ -3762,7 +3872,7 @@
     scope_->DeclarationScope()->ForceEagerCompilation();
   }
 
-  const Runtime::Function* function = Runtime::FunctionForName(name);
+  const Runtime::Function* function = Runtime::FunctionForName(name->string());
 
   // Check for built-in IS_VAR macro.
   if (function != NULL &&
@@ -3790,7 +3900,7 @@
   }
 
   // Check that the function is defined if it's an inline runtime call.
-  if (function == NULL && name->Get(0) == '_') {
+  if (function == NULL && name->FirstCharacter() == '_') {
     ParserTraits::ReportMessage("not_defined", name);
     *ok = false;
     return NULL;
@@ -3802,8 +3912,7 @@
 
 
 Literal* Parser::GetLiteralUndefined(int position) {
-  return factory()->NewLiteral(
-      isolate()->factory()->undefined_value(), position);
+  return factory()->NewUndefinedLiteral(position);
 }
 
 
@@ -3812,7 +3921,7 @@
   if (decl != NULL) {
     // In harmony mode we treat conflicting variable bindinds as early
     // errors. See ES5 16 for a definition of early errors.
-    Handle<String> name = decl->proxy()->name();
+    const AstRawString* name = decl->proxy()->raw_name();
     int position = decl->proxy()->position();
     Scanner::Location location = position == RelocInfo::kNoPosition
         ? Scanner::Location::invalid()
@@ -3827,7 +3936,7 @@
 // Parser support
 
 
-bool Parser::TargetStackContainsLabel(Handle<String> label) {
+bool Parser::TargetStackContainsLabel(const AstRawString* label) {
   for (Target* t = target_stack_; t != NULL; t = t->previous()) {
     BreakableStatement* stat = t->node()->AsBreakableStatement();
     if (stat != NULL && ContainsLabel(stat->labels(), label))
@@ -3837,8 +3946,9 @@
 }
 
 
-BreakableStatement* Parser::LookupBreakTarget(Handle<String> label, bool* ok) {
-  bool anonymous = label.is_null();
+BreakableStatement* Parser::LookupBreakTarget(const AstRawString* label,
+                                              bool* ok) {
+  bool anonymous = label == NULL;
   for (Target* t = target_stack_; t != NULL; t = t->previous()) {
     BreakableStatement* stat = t->node()->AsBreakableStatement();
     if (stat == NULL) continue;
@@ -3852,14 +3962,14 @@
 }
 
 
-IterationStatement* Parser::LookupContinueTarget(Handle<String> label,
+IterationStatement* Parser::LookupContinueTarget(const AstRawString* label,
                                                  bool* ok) {
-  bool anonymous = label.is_null();
+  bool anonymous = label == NULL;
   for (Target* t = target_stack_; t != NULL; t = t->previous()) {
     IterationStatement* stat = t->node()->AsIterationStatement();
     if (stat == NULL) continue;
 
-    ASSERT(stat->is_target_for_anonymous());
+    DCHECK(stat->is_target_for_anonymous());
     if (anonymous || ContainsLabel(stat->labels(), label)) {
       RegisterTargetUse(stat->continue_target(), t->previous());
       return stat;
@@ -3880,32 +3990,75 @@
 }
 
 
+void Parser::HandleSourceURLComments() {
+  if (scanner_.source_url()->length() > 0) {
+    Handle<String> source_url = scanner_.source_url()->Internalize(isolate());
+    info_->script()->set_source_url(*source_url);
+  }
+  if (scanner_.source_mapping_url()->length() > 0) {
+    Handle<String> source_mapping_url =
+        scanner_.source_mapping_url()->Internalize(isolate());
+    info_->script()->set_source_mapping_url(*source_mapping_url);
+  }
+}
+
+
 void Parser::ThrowPendingError() {
+  DCHECK(ast_value_factory()->IsInternalized());
   if (has_pending_error_) {
-    MessageLocation location(script_,
-                             pending_error_location_.beg_pos,
+    MessageLocation location(script(), pending_error_location_.beg_pos,
                              pending_error_location_.end_pos);
     Factory* factory = isolate()->factory();
     bool has_arg =
-        !pending_error_arg_.is_null() || pending_error_char_arg_ != NULL;
+        pending_error_arg_ != NULL || pending_error_char_arg_ != NULL;
     Handle<FixedArray> elements = factory->NewFixedArray(has_arg ? 1 : 0);
-    if (!pending_error_arg_.is_null()) {
-      elements->set(0, *(pending_error_arg_.ToHandleChecked()));
+    if (pending_error_arg_ != NULL) {
+      Handle<String> arg_string = pending_error_arg_->string();
+      elements->set(0, *arg_string);
     } else if (pending_error_char_arg_ != NULL) {
       Handle<String> arg_string =
           factory->NewStringFromUtf8(CStrVector(pending_error_char_arg_))
           .ToHandleChecked();
       elements->set(0, *arg_string);
     }
+    isolate()->debug()->OnCompileError(script());
+
     Handle<JSArray> array = factory->NewJSArrayWithElements(elements);
-    Handle<Object> result = pending_error_is_reference_error_
-        ? factory->NewReferenceError(pending_error_message_, array)
-        : factory->NewSyntaxError(pending_error_message_, array);
-    isolate()->Throw(*result, &location);
+    Handle<Object> error;
+    MaybeHandle<Object> maybe_error =
+        pending_error_is_reference_error_
+            ? factory->NewReferenceError(pending_error_message_, array)
+            : factory->NewSyntaxError(pending_error_message_, array);
+    if (maybe_error.ToHandle(&error)) isolate()->Throw(*error, &location);
   }
 }
 
 
+void Parser::Internalize() {
+  // Internalize strings.
+  ast_value_factory()->Internalize(isolate());
+
+  // Error processing.
+  if (info()->function() == NULL) {
+    if (stack_overflow()) {
+      isolate()->StackOverflow();
+    } else {
+      ThrowPendingError();
+    }
+  }
+
+  // Move statistics to Isolate.
+  for (int feature = 0; feature < v8::Isolate::kUseCounterFeatureCount;
+       ++feature) {
+    for (int i = 0; i < use_counts_[feature]; ++i) {
+      isolate()->CountUsage(v8::Isolate::UseCounterFeature(feature));
+    }
+  }
+  isolate()->counters()->total_preparse_skipped()->Increment(
+      total_preparse_skipped_);
+}
+
+
 // ----------------------------------------------------------------------------
 // Regular expressions
 
@@ -3991,7 +4144,7 @@
 //   Disjunction
 RegExpTree* RegExpParser::ParsePattern() {
   RegExpTree* result = ParseDisjunction(CHECK_FAILED);
-  ASSERT(!has_more());
+  DCHECK(!has_more());
   // If the result of parsing is a literal string atom, and it has the
   // same length as the input, then the atom is identical to the input.
   if (result->IsAtom() && result->AsAtom()->length() == in()->length()) {
@@ -4024,14 +4177,14 @@
         // Inside a parenthesized group when hitting end of input.
         ReportError(CStrVector("Unterminated group") CHECK_FAILED);
       }
-      ASSERT_EQ(INITIAL, stored_state->group_type());
+      DCHECK_EQ(INITIAL, stored_state->group_type());
       // Parsing completed successfully.
       return builder->ToRegExp();
     case ')': {
       if (!stored_state->IsSubexpression()) {
         ReportError(CStrVector("Unmatched ')'") CHECK_FAILED);
       }
-      ASSERT_NE(INITIAL, stored_state->group_type());
+      DCHECK_NE(INITIAL, stored_state->group_type());
 
       Advance();
       // End disjunction parsing and convert builder content to new single
@@ -4053,7 +4206,7 @@
         captures_->at(capture_index - 1) = capture;
         body = capture;
       } else if (group_type != GROUPING) {
-        ASSERT(group_type == POSITIVE_LOOKAHEAD ||
+        DCHECK(group_type == POSITIVE_LOOKAHEAD ||
                group_type == NEGATIVE_LOOKAHEAD);
         bool is_positive = (group_type == POSITIVE_LOOKAHEAD);
         body = new(zone()) RegExpLookahead(body,
@@ -4337,7 +4490,7 @@
 
 
 #ifdef DEBUG
-// Currently only used in an ASSERT.
+// Currently only used in an DCHECK.
 static bool IsSpecialClassEscape(uc32 c) {
   switch (c) {
     case 'd': case 'D':
@@ -4391,8 +4544,8 @@
 
 
 bool RegExpParser::ParseBackReferenceIndex(int* index_out) {
-  ASSERT_EQ('\\', current());
-  ASSERT('1' <= Next() && Next() <= '9');
+  DCHECK_EQ('\\', current());
+  DCHECK('1' <= Next() && Next() <= '9');
   // Try to parse a decimal literal that is no greater than the total number
   // of left capturing parentheses in the input.
   int start = position();
@@ -4435,7 +4588,7 @@
 // Returns true if parsing succeeds, and set the min_out and max_out
 // values. Values are truncated to RegExpTree::kInfinity if they overflow.
 bool RegExpParser::ParseIntervalQuantifier(int* min_out, int* max_out) {
-  ASSERT_EQ(current(), '{');
+  DCHECK_EQ(current(), '{');
   int start = position();
   Advance();
   int min = 0;
@@ -4495,7 +4648,7 @@
 
 
 uc32 RegExpParser::ParseOctalLiteral() {
-  ASSERT(('0' <= current() && current() <= '7') || current() == kEndMarker);
+  DCHECK(('0' <= current() && current() <= '7') || current() == kEndMarker);
   // For compatibility with some other browsers (not all), we parse
   // up to three octal digits with a value below 256.
   uc32 value = current() - '0';
@@ -4535,8 +4688,8 @@
 
 
 uc32 RegExpParser::ParseClassCharacterEscape() {
-  ASSERT(current() == '\\');
-  ASSERT(has_next() && !IsSpecialClassEscape(Next()));
+  DCHECK(current() == '\\');
+  DCHECK(has_next() && !IsSpecialClassEscape(Next()));
   Advance();
   switch (current()) {
     case 'b':
@@ -4616,7 +4769,7 @@
 
 
 CharacterRange RegExpParser::ParseClassAtom(uc16* char_class) {
-  ASSERT_EQ(0, *char_class);
+  DCHECK_EQ(0, *char_class);
   uc32 first = current();
   if (first == '\\') {
     switch (Next()) {
@@ -4659,7 +4812,7 @@
   static const char* kUnterminated = "Unterminated character class";
   static const char* kRangeOutOfOrder = "Range out of order in character class";
 
-  ASSERT_EQ(current(), '[');
+  DCHECK_EQ(current(), '[');
   Advance();
   bool is_negated = false;
   if (current() == '^') {
@@ -4714,83 +4867,19 @@
 // ----------------------------------------------------------------------------
 // The Parser interface.
 
-ScriptData::~ScriptData() {
-  if (owns_store_) store_.Dispose();
-}
-
-
-int ScriptData::Length() {
-  return store_.length() * sizeof(unsigned);
-}
-
-
-const char* ScriptData::Data() {
-  return reinterpret_cast<const char*>(store_.start());
-}
-
-
-bool ScriptData::HasError() {
-  return has_error();
-}
-
-
-void ScriptData::Initialize() {
-  // Prepares state for use.
-  if (store_.length() >= PreparseDataConstants::kHeaderSize) {
-    function_index_ = PreparseDataConstants::kHeaderSize;
-    int symbol_data_offset = PreparseDataConstants::kHeaderSize
-        + store_[PreparseDataConstants::kFunctionsSizeOffset];
-    if (store_.length() > symbol_data_offset) {
-      symbol_data_ = reinterpret_cast<byte*>(&store_[symbol_data_offset]);
-    } else {
-      // Partial preparse causes no symbol information.
-      symbol_data_ = reinterpret_cast<byte*>(&store_[0] + store_.length());
-    }
-    symbol_data_end_ = reinterpret_cast<byte*>(&store_[0] + store_.length());
-  }
-}
-
-
-int ScriptData::ReadNumber(byte** source) {
-  // Reads a number from symbol_data_ in base 128. The most significant
-  // bit marks that there are more digits.
-  // If the first byte is 0x80 (kNumberTerminator), it would normally
-  // represent a leading zero. Since that is useless, and therefore won't
-  // appear as the first digit of any actual value, it is used to
-  // mark the end of the input stream.
-  byte* data = *source;
-  if (data >= symbol_data_end_) return -1;
-  byte input = *data;
-  if (input == PreparseDataConstants::kNumberTerminator) {
-    // End of stream marker.
-    return -1;
-  }
-  int result = input & 0x7f;
-  data++;
-  while ((input & 0x80u) != 0) {
-    if (data >= symbol_data_end_) return -1;
-    input = *data;
-    result = (result << 7) | (input & 0x7f);
-    data++;
-  }
-  *source = data;
-  return result;
-}
-
-
 bool RegExpParser::ParseRegExp(FlatStringReader* input,
                                bool multiline,
                                RegExpCompileData* result,
                                Zone* zone) {
-  ASSERT(result != NULL);
+  DCHECK(result != NULL);
   RegExpParser parser(input, &result->error, multiline, zone);
   RegExpTree* tree = parser.ParsePattern();
   if (parser.failed()) {
-    ASSERT(tree == NULL);
-    ASSERT(!result->error.is_null());
+    DCHECK(tree == NULL);
+    DCHECK(!result->error.is_null());
   } else {
-    ASSERT(tree != NULL);
-    ASSERT(result->error.is_null());
+    DCHECK(tree != NULL);
+    DCHECK(result->error.is_null());
     result->tree = tree;
     int capture_count = parser.captures_started();
     result->simple = tree->IsAtom() && parser.simple() && capture_count == 0;
@@ -4802,34 +4891,74 @@
 
 
 bool Parser::Parse() {
-  ASSERT(info()->function() == NULL);
+  DCHECK(info()->function() == NULL);
   FunctionLiteral* result = NULL;
+  pre_parse_timer_ = isolate()->counters()->pre_parse();
+  if (FLAG_trace_parse || allow_natives_syntax() || extension_ != NULL) {
+    // If intrinsics are allowed, the Parser cannot operate independent of the
+    // V8 heap because of Runtime. Tell the string table to internalize strings
+    // and values right after they're created.
+    ast_value_factory()->Internalize(isolate());
+  }
+
   if (info()->is_lazy()) {
-    ASSERT(!info()->is_eval());
+    DCHECK(!info()->is_eval());
     if (info()->shared_info()->is_function()) {
       result = ParseLazy();
     } else {
       result = ParseProgram();
     }
   } else {
-    SetCachedData(info()->cached_data(), info()->cached_data_mode());
-    if (info()->cached_data_mode() == CONSUME_CACHED_DATA &&
-        (*info()->cached_data())->has_error()) {
-      ScriptData* cached_data = *(info()->cached_data());
-      Scanner::Location loc = cached_data->MessageLocation();
-      const char* message = cached_data->BuildMessage();
-      const char* arg = cached_data->BuildArg();
-      ParserTraits::ReportMessageAt(loc, message, arg,
-                                    cached_data->IsReferenceError());
-      DeleteArray(message);
-      DeleteArray(arg);
-      ASSERT(info()->isolate()->has_pending_exception());
-    } else {
-      result = ParseProgram();
-    }
+    SetCachedData();
+    result = ParseProgram();
   }
   info()->SetFunction(result);
+
+  Internalize();
+  DCHECK(ast_value_factory()->IsInternalized());
   return (result != NULL);
 }
 
+
+void Parser::ParseOnBackground() {
+  DCHECK(info()->function() == NULL);
+  FunctionLiteral* result = NULL;
+  fni_ = new (zone()) FuncNameInferrer(ast_value_factory(), zone());
+
+  CompleteParserRecorder recorder;
+  if (compile_options() == ScriptCompiler::kProduceParserCache) {
+    log_ = &recorder;
+  }
+
+  DCHECK(info()->source_stream() != NULL);
+  ExternalStreamingStream stream(info()->source_stream(),
+                                 info()->source_stream_encoding());
+  scanner_.Initialize(&stream);
+  DCHECK(info()->context().is_null() || info()->context()->IsNativeContext());
+
+  // When streaming, we don't know the length of the source until we have parsed
+  // it. The raw data can be UTF-8, so we wouldn't know the source length until
+  // we have decoded it anyway even if we knew the raw data length (which we
+  // don't). We work around this by storing all the scopes which need their end
+  // position set at the end of the script (the top scope and possible eval
+  // scopes) and set their end position after we know the script length.
+  Scope* top_scope = NULL;
+  Scope* eval_scope = NULL;
+  result = DoParseProgram(info(), &top_scope, &eval_scope);
+
+  top_scope->set_end_position(scanner()->location().end_pos);
+  if (eval_scope != NULL) {
+    eval_scope->set_end_position(scanner()->location().end_pos);
+  }
+
+  info()->SetFunction(result);
+
+  // We cannot internalize on a background thread; a foreground task will take
+  // care of calling Parser::Internalize just before compilation.
+
+  if (compile_options() == ScriptCompiler::kProduceParserCache) {
+    if (result != NULL) *info_->cached_data() = recorder.GetScriptData();
+    log_ = NULL;
+  }
+}
 } }  // namespace v8::internal
diff --git a/src/parser.h b/src/parser.h
index 7cb364b..40886f6 100644
--- a/src/parser.h
+++ b/src/parser.h
@@ -8,10 +8,10 @@
 #include "src/allocation.h"
 #include "src/ast.h"
 #include "src/compiler.h"  // For CachedDataMode
-#include "src/preparse-data-format.h"
 #include "src/preparse-data.h"
-#include "src/scopes.h"
+#include "src/preparse-data-format.h"
 #include "src/preparser.h"
+#include "src/scopes.h"
 
 namespace v8 {
 class ScriptCompiler;
@@ -47,7 +47,7 @@
   int literal_count() { return backing_[kLiteralCountIndex]; }
   int property_count() { return backing_[kPropertyCountIndex]; }
   StrictMode strict_mode() {
-    ASSERT(backing_[kStrictModeIndex] == SLOPPY ||
+    DCHECK(backing_[kStrictModeIndex] == SLOPPY ||
            backing_[kStrictModeIndex] == STRICT);
     return static_cast<StrictMode>(backing_[kStrictModeIndex]);
   }
@@ -59,73 +59,39 @@
 };
 
 
-class ScriptData {
+// Wrapper around ScriptData to provide parser-specific functionality.
+class ParseData {
  public:
-  explicit ScriptData(Vector<unsigned> store)
-      : store_(store),
-        owns_store_(true) { }
-
-  ScriptData(Vector<unsigned> store, bool owns_store)
-      : store_(store),
-        owns_store_(owns_store) { }
-
-  // The created ScriptData won't take ownership of the data. If the alignment
-  // is not correct, this will copy the data (and the created ScriptData will
-  // take ownership of the copy).
-  static ScriptData* New(const char* data, int length);
-
-  virtual ~ScriptData();
-  virtual int Length();
-  virtual const char* Data();
-  virtual bool HasError();
-
-  void Initialize();
-  void ReadNextSymbolPosition();
-
-  FunctionEntry GetFunctionEntry(int start);
-  int GetSymbolIdentifier();
-  bool SanityCheck();
-
-  Scanner::Location MessageLocation() const;
-  bool IsReferenceError() const;
-  const char* BuildMessage() const;
-  const char* BuildArg() const;
-
-  int function_count() {
-    int functions_size =
-        static_cast<int>(store_[PreparseDataConstants::kFunctionsSizeOffset]);
-    if (functions_size < 0) return 0;
-    if (functions_size % FunctionEntry::kSize != 0) return 0;
-    return functions_size / FunctionEntry::kSize;
+  explicit ParseData(ScriptData* script_data) : script_data_(script_data) {
+    CHECK(IsAligned(script_data->length(), sizeof(unsigned)));
+    CHECK(IsSane());
   }
-  // The following functions should only be called if SanityCheck has
-  // returned true.
-  bool has_error() { return store_[PreparseDataConstants::kHasErrorOffset]; }
-  unsigned magic() { return store_[PreparseDataConstants::kMagicOffset]; }
-  unsigned version() { return store_[PreparseDataConstants::kVersionOffset]; }
+  void Initialize();
+  FunctionEntry GetFunctionEntry(int start);
+  int FunctionCount();
+
+  bool HasError();
+
+  unsigned* Data() {  // Writable data as unsigned int array.
+    return reinterpret_cast<unsigned*>(const_cast<byte*>(script_data_->data()));
+  }
 
  private:
-  // Disable copying and assigning; because of owns_store they won't be correct.
-  ScriptData(const ScriptData&);
-  ScriptData& operator=(const ScriptData&);
+  bool IsSane();
+  unsigned Magic();
+  unsigned Version();
+  int FunctionsSize();
+  int Length() const {
+    // Script data length is already checked to be a multiple of unsigned size.
+    return script_data_->length() / sizeof(unsigned);
+  }
 
-  friend class v8::ScriptCompiler;
-  Vector<unsigned> store_;
-  unsigned char* symbol_data_;
-  unsigned char* symbol_data_end_;
+  ScriptData* script_data_;
   int function_index_;
-  bool owns_store_;
 
-  unsigned Read(int position) const;
-  unsigned* ReadAddress(int position) const;
-  // Reads a number from the current symbols
-  int ReadNumber(byte** source);
-
-  // Read strings written by ParserRecorder::WriteString.
-  static const char* ReadString(unsigned* start, int* chars);
+  DISALLOW_COPY_AND_ASSIGN(ParseData);
 };
 
-
 // ----------------------------------------------------------------------------
 // REGEXP PARSING
 
@@ -154,12 +120,12 @@
   }
 
   T* last() {
-    ASSERT(last_ != NULL);
+    DCHECK(last_ != NULL);
     return last_;
   }
 
   T* RemoveLast() {
-    ASSERT(last_ != NULL);
+    DCHECK(last_ != NULL);
     T* result = last_;
     if ((list_ != NULL) && (list_->length() > 0))
       last_ = list_->RemoveLast();
@@ -169,13 +135,13 @@
   }
 
   T* Get(int i) {
-    ASSERT((0 <= i) && (i < length()));
+    DCHECK((0 <= i) && (i < length()));
     if (list_ == NULL) {
-      ASSERT_EQ(0, i);
+      DCHECK_EQ(0, i);
       return last_;
     } else {
       if (i == list_->length()) {
-        ASSERT(last_ != NULL);
+        DCHECK(last_ != NULL);
         return last_;
       } else {
         return list_->at(i);
@@ -385,14 +351,19 @@
 
     // Used by FunctionState and BlockState.
     typedef v8::internal::Scope Scope;
+    typedef v8::internal::Scope* ScopePtr;
     typedef Variable GeneratorVariable;
     typedef v8::internal::Zone Zone;
 
+    typedef v8::internal::AstProperties AstProperties;
+    typedef Vector<VariableProxy*> ParameterIdentifierVector;
+
     // Return types for traversing functions.
-    typedef Handle<String> Identifier;
+    typedef const AstRawString* Identifier;
     typedef v8::internal::Expression* Expression;
     typedef Yield* YieldExpression;
     typedef v8::internal::FunctionLiteral* FunctionLiteral;
+    typedef v8::internal::ClassLiteral* ClassLiteral;
     typedef v8::internal::Literal* Literal;
     typedef ObjectLiteral::Property* ObjectLiteralProperty;
     typedef ZoneList<v8::internal::Expression*>* ExpressionList;
@@ -403,53 +374,65 @@
     typedef AstNodeFactory<AstConstructionVisitor> Factory;
   };
 
+  class Checkpoint;
+
   explicit ParserTraits(Parser* parser) : parser_(parser) {}
 
   // Custom operations executed when FunctionStates are created and destructed.
-  template<typename FunctionState>
-  static void SetUpFunctionState(FunctionState* function_state, Zone* zone) {
-    Isolate* isolate = zone->isolate();
-    function_state->saved_ast_node_id_ = isolate->ast_node_id();
-    isolate->set_ast_node_id(BailoutId::FirstUsable().ToInt());
+  template <typename FunctionState>
+  static void SetUpFunctionState(FunctionState* function_state) {
+    function_state->saved_id_gen_ = *function_state->ast_node_id_gen_;
+    *function_state->ast_node_id_gen_ =
+        AstNode::IdGen(BailoutId::FirstUsable().ToInt());
   }
 
-  template<typename FunctionState>
-  static void TearDownFunctionState(FunctionState* function_state, Zone* zone) {
+  template <typename FunctionState>
+  static void TearDownFunctionState(FunctionState* function_state) {
     if (function_state->outer_function_state_ != NULL) {
-      zone->isolate()->set_ast_node_id(function_state->saved_ast_node_id_);
+      *function_state->ast_node_id_gen_ = function_state->saved_id_gen_;
     }
   }
 
   // Helper functions for recursive descent.
-  bool IsEvalOrArguments(Handle<String> identifier) const;
+  bool IsEvalOrArguments(const AstRawString* identifier) const;
+  V8_INLINE bool IsFutureStrictReserved(const AstRawString* identifier) const;
 
   // Returns true if the expression is of type "this.foo".
   static bool IsThisProperty(Expression* expression);
 
   static bool IsIdentifier(Expression* expression);
 
-  static Handle<String> AsIdentifier(Expression* expression) {
-    ASSERT(IsIdentifier(expression));
-    return expression->AsVariableProxy()->name();
+  bool IsPrototype(const AstRawString* identifier) const;
+
+  bool IsConstructor(const AstRawString* identifier) const;
+
+  static const AstRawString* AsIdentifier(Expression* expression) {
+    DCHECK(IsIdentifier(expression));
+    return expression->AsVariableProxy()->raw_name();
   }
 
   static bool IsBoilerplateProperty(ObjectLiteral::Property* property) {
     return ObjectLiteral::IsBoilerplateProperty(property);
   }
 
-  static bool IsArrayIndex(Handle<String> string, uint32_t* index) {
-    return !string.is_null() && string->AsArrayIndex(index);
+  static bool IsArrayIndex(const AstRawString* string, uint32_t* index) {
+    return string->AsArrayIndex(index);
   }
 
   // Functions for encapsulating the differences between parsing and preparsing;
   // operations interleaved with the recursive descent.
-  static void PushLiteralName(FuncNameInferrer* fni, Handle<String> id) {
+  static void PushLiteralName(FuncNameInferrer* fni, const AstRawString* id) {
     fni->PushLiteralName(id);
   }
   void PushPropertyName(FuncNameInferrer* fni, Expression* expression);
+  static void InferFunctionName(FuncNameInferrer* fni,
+                                FunctionLiteral* func_to_infer) {
+    fni->AddFunction(func_to_infer);
+  }
 
   static void CheckFunctionLiteralInsideTopLevelObjectLiteral(
-      Scope* scope, Expression* value, bool* has_function) {
+      Scope* scope, ObjectLiteralProperty* property, bool* has_function) {
+    Expression* value = property->value();
     if (scope->DeclarationScope()->is_global_scope() &&
         value->AsFunctionLiteral() != NULL) {
       *has_function = true;
@@ -468,9 +451,8 @@
   void CheckPossibleEvalCall(Expression* expression, Scope* scope);
 
   // Determine if the expression is a variable proxy and mark it as being used
-  // in an assignment or with a increment/decrement operator. This is currently
-  // used on for the statically checking assignments to harmony const bindings.
-  static Expression* MarkExpressionAsLValue(Expression* expression);
+  // in an assignment or with a increment/decrement operator.
+  static Expression* MarkExpressionAsAssigned(Expression* expression);
 
   // Returns true if we have a binary expression between two numeric
   // literals. In that case, *x will be changed to an expression which is the
@@ -501,64 +483,88 @@
   // type. The first argument may be null (in the handle sense) in
   // which case no arguments are passed to the constructor.
   Expression* NewThrowSyntaxError(
-      const char* type, Handle<Object> arg, int pos);
+      const char* type, const AstRawString* arg, int pos);
 
   // Generate AST node that throws a TypeError with the given
   // type. Both arguments must be non-null (in the handle sense).
-  Expression* NewThrowTypeError(const char* type, Handle<Object> arg, int pos);
+  Expression* NewThrowTypeError(const char* type, const AstRawString* arg,
+                                int pos);
 
   // Generic AST generator for throwing errors from compiled code.
   Expression* NewThrowError(
-      Handle<String> constructor, const char* type,
-      Vector<Handle<Object> > arguments, int pos);
+      const AstRawString* constructor, const char* type,
+      const AstRawString* arg, int pos);
 
   // Reporting errors.
   void ReportMessageAt(Scanner::Location source_location,
                        const char* message,
-                       const char* arg,
+                       const char* arg = NULL,
                        bool is_reference_error = false);
   void ReportMessage(const char* message,
-                     MaybeHandle<String> arg,
+                     const char* arg = NULL,
+                     bool is_reference_error = false);
+  void ReportMessage(const char* message,
+                     const AstRawString* arg,
                      bool is_reference_error = false);
   void ReportMessageAt(Scanner::Location source_location,
                        const char* message,
-                       MaybeHandle<String> arg,
+                       const AstRawString* arg,
                        bool is_reference_error = false);
 
   // "null" return type creators.
-  static Handle<String> EmptyIdentifier() {
-    return Handle<String>();
+  static const AstRawString* EmptyIdentifier() {
+    return NULL;
   }
   static Expression* EmptyExpression() {
     return NULL;
   }
+  static Expression* EmptyArrowParamList() { return NULL; }
   static Literal* EmptyLiteral() {
     return NULL;
   }
+  static ObjectLiteralProperty* EmptyObjectLiteralProperty() { return NULL; }
+  static FunctionLiteral* EmptyFunctionLiteral() { return NULL; }
+
   // Used in error return values.
   static ZoneList<Expression*>* NullExpressionList() {
     return NULL;
   }
 
+  // Non-NULL empty string.
+  V8_INLINE const AstRawString* EmptyIdentifierString();
+
   // Odd-ball literal creators.
   Literal* GetLiteralTheHole(int position,
                              AstNodeFactory<AstConstructionVisitor>* factory);
 
   // Producing data during the recursive descent.
-  Handle<String> GetSymbol(Scanner* scanner = NULL);
-  Handle<String> NextLiteralString(Scanner* scanner,
-                                   PretenureFlag tenured);
+  const AstRawString* GetSymbol(Scanner* scanner);
+  const AstRawString* GetNextSymbol(Scanner* scanner);
+  const AstRawString* GetNumberAsSymbol(Scanner* scanner);
+
   Expression* ThisExpression(Scope* scope,
-                             AstNodeFactory<AstConstructionVisitor>* factory);
+                             AstNodeFactory<AstConstructionVisitor>* factory,
+                             int pos = RelocInfo::kNoPosition);
+  Expression* SuperReference(Scope* scope,
+                             AstNodeFactory<AstConstructionVisitor>* factory,
+                             int pos = RelocInfo::kNoPosition);
+  Expression* ClassLiteral(const AstRawString* name, Expression* extends,
+                           Expression* constructor,
+                           ZoneList<ObjectLiteral::Property*>* properties,
+                           int pos,
+                           AstNodeFactory<AstConstructionVisitor>* factory);
+
   Literal* ExpressionFromLiteral(
       Token::Value token, int pos, Scanner* scanner,
       AstNodeFactory<AstConstructionVisitor>* factory);
   Expression* ExpressionFromIdentifier(
-      Handle<String> name, int pos, Scope* scope,
+      const AstRawString* name, int pos, Scope* scope,
       AstNodeFactory<AstConstructionVisitor>* factory);
   Expression* ExpressionFromString(
       int pos, Scanner* scanner,
       AstNodeFactory<AstConstructionVisitor>* factory);
+  Expression* GetIterator(Expression* iterable,
+                          AstNodeFactory<AstConstructionVisitor>* factory);
   ZoneList<v8::internal::Expression*>* NewExpressionList(int size, Zone* zone) {
     return new(zone) ZoneList<v8::internal::Expression*>(size, zone);
   }
@@ -568,18 +574,29 @@
   ZoneList<v8::internal::Statement*>* NewStatementList(int size, Zone* zone) {
     return new(zone) ZoneList<v8::internal::Statement*>(size, zone);
   }
+  V8_INLINE Scope* NewScope(Scope* parent_scope, ScopeType scope_type);
+
+  // Utility functions
+  int DeclareArrowParametersFromExpression(Expression* expression, Scope* scope,
+                                           Scanner::Location* dupe_loc,
+                                           bool* ok);
+  V8_INLINE AstValueFactory* ast_value_factory();
 
   // Temporary glue; these functions will move to ParserBase.
   Expression* ParseV8Intrinsic(bool* ok);
   FunctionLiteral* ParseFunctionLiteral(
-      Handle<String> name,
-      Scanner::Location function_name_location,
-      bool name_is_strict_reserved,
-      bool is_generator,
-      int function_token_position,
-      FunctionLiteral::FunctionType type,
-      FunctionLiteral::ArityRestriction arity_restriction,
-      bool* ok);
+      const AstRawString* name, Scanner::Location function_name_location,
+      bool name_is_strict_reserved, FunctionKind kind,
+      int function_token_position, FunctionLiteral::FunctionType type,
+      FunctionLiteral::ArityRestriction arity_restriction, bool* ok);
+  V8_INLINE void SkipLazyFunctionBody(const AstRawString* name,
+                                      int* materialized_literal_count,
+                                      int* expected_property_count, bool* ok);
+  V8_INLINE ZoneList<Statement*>* ParseEagerFunctionBody(
+      const AstRawString* name, int pos, Variable* fvar,
+      Token::Value fvar_init_op, bool is_generator, bool* ok);
+  V8_INLINE void CheckConflictingVarDeclarations(v8::internal::Scope* scope,
+                                                 bool* ok);
 
  private:
   Parser* parser_;
@@ -588,10 +605,21 @@
 
 class Parser : public ParserBase<ParserTraits> {
  public:
-  explicit Parser(CompilationInfo* info);
+  // Note that the hash seed in ParseInfo must be the hash seed from the
+  // Isolate's heap, otherwise the heap will be in an inconsistent state once
+  // the strings created by the Parser are internalized.
+  struct ParseInfo {
+    uintptr_t stack_limit;
+    uint32_t hash_seed;
+    UnicodeCache* unicode_cache;
+  };
+
+  Parser(CompilationInfo* info, ParseInfo* parse_info);
   ~Parser() {
     delete reusable_preparser_;
     reusable_preparser_ = NULL;
+    delete cached_parse_data_;
+    cached_parse_data_ = NULL;
   }
 
   // Parses the source code represented by the compilation info and sets its
@@ -599,11 +627,23 @@
   // nodes) if parsing failed.
   static bool Parse(CompilationInfo* info,
                     bool allow_lazy = false) {
-    Parser parser(info);
+    ParseInfo parse_info = {info->isolate()->stack_guard()->real_climit(),
+                            info->isolate()->heap()->HashSeed(),
+                            info->isolate()->unicode_cache()};
+    Parser parser(info, &parse_info);
     parser.set_allow_lazy(allow_lazy);
-    return parser.Parse();
+    if (parser.Parse()) {
+      info->SetStrictMode(info->function()->strict_mode());
+      return true;
+    }
+    return false;
   }
   bool Parse();
+  void ParseOnBackground();
+
+  // Handle errors detected during parsing, move statistics to Isolate,
+  // internalize strings (move them to the heap).
+  void Internalize();
 
  private:
   friend class ParserTraits;
@@ -636,30 +676,23 @@
   FunctionLiteral* ParseLazy();
   FunctionLiteral* ParseLazy(Utf16CharacterStream* source);
 
-  Isolate* isolate() { return isolate_; }
+  Isolate* isolate() { return info_->isolate(); }
   CompilationInfo* info() const { return info_; }
-
-  // Called by ParseProgram after setting up the scanner.
-  FunctionLiteral* DoParseProgram(CompilationInfo* info,
-                                  Handle<String> source);
-
-  // Report syntax error
-  void ReportInvalidCachedData(Handle<String> name, bool* ok);
-
-  void SetCachedData(ScriptData** data,
-                     CachedDataMode cached_data_mode) {
-    cached_data_mode_ = cached_data_mode;
-    if (cached_data_mode == NO_CACHED_DATA) {
-      cached_data_ = NULL;
-    } else {
-      ASSERT(data != NULL);
-      cached_data_ = data;
-    }
+  Handle<Script> script() const { return info_->script(); }
+  AstValueFactory* ast_value_factory() const {
+    return info_->ast_value_factory();
   }
 
+  // Called by ParseProgram after setting up the scanner.
+  FunctionLiteral* DoParseProgram(CompilationInfo* info, Scope** scope,
+                                  Scope** ad_hoc_eval_scope);
+
+  void SetCachedData();
+
   bool inside_with() const { return scope_->inside_with(); }
-  ScriptData** cached_data() const { return cached_data_; }
-  CachedDataMode cached_data_mode() const { return cached_data_mode_; }
+  ScriptCompiler::CompileOptions compile_options() const {
+    return info_->compile_options();
+  }
   Scope* DeclarationScope(VariableMode mode) {
     return IsLexicalVariableMode(mode)
         ? scope_ : scope_->DeclarationScope();
@@ -670,9 +703,12 @@
   // By making the 'exception handling' explicit, we are forced to check
   // for failure at the call sites.
   void* ParseSourceElements(ZoneList<Statement*>* processor, int end_token,
-                            bool is_eval, bool is_global, bool* ok);
-  Statement* ParseModuleElement(ZoneStringList* labels, bool* ok);
-  Statement* ParseModuleDeclaration(ZoneStringList* names, bool* ok);
+                            bool is_eval, bool is_global,
+                            Scope** ad_hoc_eval_scope, bool* ok);
+  Statement* ParseModuleElement(ZoneList<const AstRawString*>* labels,
+                                bool* ok);
+  Statement* ParseModuleDeclaration(ZoneList<const AstRawString*>* names,
+                                    bool* ok);
   Module* ParseModule(bool* ok);
   Module* ParseModuleLiteral(bool* ok);
   Module* ParseModulePath(bool* ok);
@@ -681,38 +717,47 @@
   Module* ParseModuleSpecifier(bool* ok);
   Block* ParseImportDeclaration(bool* ok);
   Statement* ParseExportDeclaration(bool* ok);
-  Statement* ParseBlockElement(ZoneStringList* labels, bool* ok);
-  Statement* ParseStatement(ZoneStringList* labels, bool* ok);
-  Statement* ParseFunctionDeclaration(ZoneStringList* names, bool* ok);
+  Statement* ParseBlockElement(ZoneList<const AstRawString*>* labels, bool* ok);
+  Statement* ParseStatement(ZoneList<const AstRawString*>* labels, bool* ok);
+  Statement* ParseFunctionDeclaration(ZoneList<const AstRawString*>* names,
+                                      bool* ok);
+  Statement* ParseClassDeclaration(ZoneList<const AstRawString*>* names,
+                                   bool* ok);
   Statement* ParseNativeDeclaration(bool* ok);
-  Block* ParseBlock(ZoneStringList* labels, bool* ok);
+  Block* ParseBlock(ZoneList<const AstRawString*>* labels, bool* ok);
   Block* ParseVariableStatement(VariableDeclarationContext var_context,
-                                ZoneStringList* names,
+                                ZoneList<const AstRawString*>* names,
                                 bool* ok);
   Block* ParseVariableDeclarations(VariableDeclarationContext var_context,
                                    VariableDeclarationProperties* decl_props,
-                                   ZoneStringList* names,
-                                   Handle<String>* out,
+                                   ZoneList<const AstRawString*>* names,
+                                   const AstRawString** out,
                                    bool* ok);
-  Statement* ParseExpressionOrLabelledStatement(ZoneStringList* labels,
-                                                bool* ok);
-  IfStatement* ParseIfStatement(ZoneStringList* labels, bool* ok);
+  Statement* ParseExpressionOrLabelledStatement(
+      ZoneList<const AstRawString*>* labels, bool* ok);
+  IfStatement* ParseIfStatement(ZoneList<const AstRawString*>* labels,
+                                bool* ok);
   Statement* ParseContinueStatement(bool* ok);
-  Statement* ParseBreakStatement(ZoneStringList* labels, bool* ok);
+  Statement* ParseBreakStatement(ZoneList<const AstRawString*>* labels,
+                                 bool* ok);
   Statement* ParseReturnStatement(bool* ok);
-  Statement* ParseWithStatement(ZoneStringList* labels, bool* ok);
+  Statement* ParseWithStatement(ZoneList<const AstRawString*>* labels,
+                                bool* ok);
   CaseClause* ParseCaseClause(bool* default_seen_ptr, bool* ok);
-  SwitchStatement* ParseSwitchStatement(ZoneStringList* labels, bool* ok);
-  DoWhileStatement* ParseDoWhileStatement(ZoneStringList* labels, bool* ok);
-  WhileStatement* ParseWhileStatement(ZoneStringList* labels, bool* ok);
-  Statement* ParseForStatement(ZoneStringList* labels, bool* ok);
+  SwitchStatement* ParseSwitchStatement(ZoneList<const AstRawString*>* labels,
+                                        bool* ok);
+  DoWhileStatement* ParseDoWhileStatement(ZoneList<const AstRawString*>* labels,
+                                          bool* ok);
+  WhileStatement* ParseWhileStatement(ZoneList<const AstRawString*>* labels,
+                                      bool* ok);
+  Statement* ParseForStatement(ZoneList<const AstRawString*>* labels, bool* ok);
   Statement* ParseThrowStatement(bool* ok);
   Expression* MakeCatchContext(Handle<String> id, VariableProxy* value);
   TryStatement* ParseTryStatement(bool* ok);
   DebuggerStatement* ParseDebuggerStatement(bool* ok);
 
   // Support for hamony block scoped bindings.
-  Block* ParseScopedBlock(ZoneStringList* labels, bool* ok);
+  Block* ParseScopedBlock(ZoneList<const AstRawString*>* labels, bool* ok);
 
   // Initialize the components of a for-in / for-of statement.
   void InitializeForEachStatement(ForEachStatement* stmt,
@@ -720,19 +765,15 @@
                                   Expression* subject,
                                   Statement* body);
   Statement* DesugarLetBindingsInForStatement(
-      Scope* inner_scope, ZoneStringList* names, ForStatement* loop,
-      Statement* init, Expression* cond, Statement* next, Statement* body,
-      bool* ok);
+      Scope* inner_scope, ZoneList<const AstRawString*>* names,
+      ForStatement* loop, Statement* init, Expression* cond, Statement* next,
+      Statement* body, bool* ok);
 
   FunctionLiteral* ParseFunctionLiteral(
-      Handle<String> name,
-      Scanner::Location function_name_location,
-      bool name_is_strict_reserved,
-      bool is_generator,
-      int function_token_position,
-      FunctionLiteral::FunctionType type,
-      FunctionLiteral::ArityRestriction arity_restriction,
-      bool* ok);
+      const AstRawString* name, Scanner::Location function_name_location,
+      bool name_is_strict_reserved, FunctionKind kind,
+      int function_token_position, FunctionLiteral::FunctionType type,
+      FunctionLiteral::ArityRestriction arity_restriction, bool* ok);
 
   // Magical syntax support.
   Expression* ParseV8Intrinsic(bool* ok);
@@ -754,14 +795,14 @@
   void CheckConflictingVarDeclarations(Scope* scope, bool* ok);
 
   // Parser support
-  VariableProxy* NewUnresolved(Handle<String> name,
+  VariableProxy* NewUnresolved(const AstRawString* name,
                                VariableMode mode,
                                Interface* interface);
   void Declare(Declaration* declaration, bool resolve, bool* ok);
 
-  bool TargetStackContainsLabel(Handle<String> label);
-  BreakableStatement* LookupBreakTarget(Handle<String> label, bool* ok);
-  IterationStatement* LookupContinueTarget(Handle<String> label, bool* ok);
+  bool TargetStackContainsLabel(const AstRawString* label);
+  BreakableStatement* LookupBreakTarget(const AstRawString* label, bool* ok);
+  IterationStatement* LookupContinueTarget(const AstRawString* label, bool* ok);
 
   void RegisterTargetUse(Label* target, Target* stop);
 
@@ -771,7 +812,7 @@
 
   // Skip over a lazy function, either using cached data if we have it, or
   // by parsing the function with PreParser. Consumes the ending }.
-  void SkipLazyFunctionBody(Handle<String> function_name,
+  void SkipLazyFunctionBody(const AstRawString* function_name,
                             int* materialized_literal_count,
                             int* expected_property_count,
                             bool* ok);
@@ -780,24 +821,19 @@
       SingletonLogger* logger);
 
   // Consumes the ending }.
-  ZoneList<Statement*>* ParseEagerFunctionBody(Handle<String> function_name,
-                                               int pos,
-                                               Variable* fvar,
-                                               Token::Value fvar_init_op,
-                                               bool is_generator,
-                                               bool* ok);
+  ZoneList<Statement*>* ParseEagerFunctionBody(
+      const AstRawString* function_name, int pos, Variable* fvar,
+      Token::Value fvar_init_op, bool is_generator, bool* ok);
+
+  void HandleSourceURLComments();
 
   void ThrowPendingError();
 
-  Isolate* isolate_;
-
-  Handle<Script> script_;
   Scanner scanner_;
   PreParser* reusable_preparser_;
   Scope* original_scope_;  // for ES5 function declarations in sloppy eval
   Target* target_stack_;  // for break, continue statements
-  ScriptData** cached_data_;
-  CachedDataMode cached_data_mode_;
+  ParseData* cached_parse_data_;
 
   CompilationInfo* info_;
 
@@ -805,12 +841,62 @@
   bool has_pending_error_;
   Scanner::Location pending_error_location_;
   const char* pending_error_message_;
-  MaybeHandle<String> pending_error_arg_;
+  const AstRawString* pending_error_arg_;
   const char* pending_error_char_arg_;
   bool pending_error_is_reference_error_;
+
+  // Other information which will be stored in Parser and moved to Isolate after
+  // parsing.
+  int use_counts_[v8::Isolate::kUseCounterFeatureCount];
+  int total_preparse_skipped_;
+  HistogramTimer* pre_parse_timer_;
 };
 
 
+bool ParserTraits::IsFutureStrictReserved(
+    const AstRawString* identifier) const {
+  return identifier->IsOneByteEqualTo("yield") ||
+         parser_->scanner()->IdentifierIsFutureStrictReserved(identifier);
+}
+
+
+Scope* ParserTraits::NewScope(Scope* parent_scope, ScopeType scope_type) {
+  return parser_->NewScope(parent_scope, scope_type);
+}
+
+
+const AstRawString* ParserTraits::EmptyIdentifierString() {
+  return parser_->ast_value_factory()->empty_string();
+}
+
+
+void ParserTraits::SkipLazyFunctionBody(const AstRawString* function_name,
+                                        int* materialized_literal_count,
+                                        int* expected_property_count,
+                                        bool* ok) {
+  return parser_->SkipLazyFunctionBody(
+      function_name, materialized_literal_count, expected_property_count, ok);
+}
+
+
+ZoneList<Statement*>* ParserTraits::ParseEagerFunctionBody(
+    const AstRawString* name, int pos, Variable* fvar,
+    Token::Value fvar_init_op, bool is_generator, bool* ok) {
+  return parser_->ParseEagerFunctionBody(name, pos, fvar, fvar_init_op,
+                                         is_generator, ok);
+}
+
+void ParserTraits::CheckConflictingVarDeclarations(v8::internal::Scope* scope,
+                                                   bool* ok) {
+  parser_->CheckConflictingVarDeclarations(scope, ok);
+}
+
+
+AstValueFactory* ParserTraits::ast_value_factory() {
+  return parser_->ast_value_factory();
+}
+
+
 // Support for handling complex values (array and object literals) that
 // can be fully handled at compile time.
 class CompileTimeValue: public AllStatic {
diff --git a/src/perf-jit.cc b/src/perf-jit.cc
new file mode 100644
index 0000000..3f30e38
--- /dev/null
+++ b/src/perf-jit.cc
@@ -0,0 +1,148 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "src/perf-jit.h"
+
+#if V8_OS_LINUX
+#include <fcntl.h>
+#include <unistd.h>
+#include "src/third_party/kernel/tools/perf/util/jitdump.h"
+#endif  // V8_OS_LINUX
+
+namespace v8 {
+namespace internal {
+
+#if V8_OS_LINUX
+
+const char PerfJitLogger::kFilenameFormatString[] = "perfjit-%d.dump";
+
+// Extra padding for the PID in the filename
+const int PerfJitLogger::kFilenameBufferPadding = 16;
+
+
+PerfJitLogger::PerfJitLogger() : perf_output_handle_(NULL), code_index_(0) {
+  if (!base::TimeTicks::KernelTimestampAvailable()) {
+    FATAL("Cannot profile with perf JIT - kernel timestamps not available.");
+  }
+
+  // Open the perf JIT dump file.
+  int bufferSize = sizeof(kFilenameFormatString) + kFilenameBufferPadding;
+  ScopedVector<char> perf_dump_name(bufferSize);
+  int size = SNPrintF(perf_dump_name, kFilenameFormatString,
+                      base::OS::GetCurrentProcessId());
+  CHECK_NE(size, -1);
+  perf_output_handle_ =
+      base::OS::FOpen(perf_dump_name.start(), base::OS::LogFileOpenMode);
+  CHECK_NE(perf_output_handle_, NULL);
+  setvbuf(perf_output_handle_, NULL, _IOFBF, kLogBufferSize);
+
+  LogWriteHeader();
+}
+
+
+PerfJitLogger::~PerfJitLogger() {
+  fclose(perf_output_handle_);
+  perf_output_handle_ = NULL;
+}
+
+
+uint64_t PerfJitLogger::GetTimestamp() {
+  return static_cast<int64_t>(
+      base::TimeTicks::KernelTimestampNow().ToInternalValue());
+}
+
+
+void PerfJitLogger::LogRecordedBuffer(Code* code, SharedFunctionInfo*,
+                                      const char* name, int length) {
+  DCHECK(code->instruction_start() == code->address() + Code::kHeaderSize);
+  DCHECK(perf_output_handle_ != NULL);
+
+  const char* code_name = name;
+  uint8_t* code_pointer = reinterpret_cast<uint8_t*>(code->instruction_start());
+  uint32_t code_size = code->is_crankshafted() ? code->safepoint_table_offset()
+                                               : code->instruction_size();
+
+  static const char string_terminator[] = "\0";
+
+  jr_code_load code_load;
+  code_load.p.id = JIT_CODE_LOAD;
+  code_load.p.total_size = sizeof(code_load) + length + 1 + code_size;
+  code_load.p.timestamp = GetTimestamp();
+  code_load.pid = static_cast<uint32_t>(base::OS::GetCurrentProcessId());
+  code_load.tid = static_cast<uint32_t>(base::OS::GetCurrentThreadId());
+  code_load.vma = 0x0;  //  Our addresses are absolute.
+  code_load.code_addr = reinterpret_cast<uint64_t>(code_pointer);
+  code_load.code_size = code_size;
+  code_load.code_index = code_index_;
+
+  code_index_++;
+
+  LogWriteBytes(reinterpret_cast<const char*>(&code_load), sizeof(code_load));
+  LogWriteBytes(code_name, length);
+  LogWriteBytes(string_terminator, 1);
+  LogWriteBytes(reinterpret_cast<const char*>(code_pointer), code_size);
+}
+
+
+void PerfJitLogger::CodeMoveEvent(Address from, Address to) {
+  // Code relocation not supported.
+  UNREACHABLE();
+}
+
+
+void PerfJitLogger::CodeDeleteEvent(Address from) {
+  // V8 does not send notification on code unload
+}
+
+
+void PerfJitLogger::SnapshotPositionEvent(Address addr, int pos) {}
+
+
+void PerfJitLogger::LogWriteBytes(const char* bytes, int size) {
+  size_t rv = fwrite(bytes, 1, size, perf_output_handle_);
+  DCHECK(static_cast<size_t>(size) == rv);
+  USE(rv);
+}
+
+
+void PerfJitLogger::LogWriteHeader() {
+  DCHECK(perf_output_handle_ != NULL);
+  jitheader header;
+  header.magic = JITHEADER_MAGIC;
+  header.version = JITHEADER_VERSION;
+  header.total_size = sizeof(jitheader);
+  header.pad1 = 0xdeadbeef;
+  header.elf_mach = GetElfMach();
+  header.pid = base::OS::GetCurrentProcessId();
+  header.timestamp =
+      static_cast<uint64_t>(base::OS::TimeCurrentMillis() * 1000.0);
+  LogWriteBytes(reinterpret_cast<const char*>(&header), sizeof(header));
+}
+
+#endif  // V8_OS_LINUX
+}
+}  // namespace v8::internal
diff --git a/src/perf-jit.h b/src/perf-jit.h
new file mode 100644
index 0000000..7872910
--- /dev/null
+++ b/src/perf-jit.h
@@ -0,0 +1,120 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#ifndef V8_PERF_JIT_H_
+#define V8_PERF_JIT_H_
+
+#include "src/v8.h"
+
+namespace v8 {
+namespace internal {
+
+// TODO(jarin) For now, we disable perf integration on Android because of a
+// build problem - when building the snapshot with AOSP, librt is not
+// available, so we cannot use the clock_gettime function. To fix this, we
+// should thread through the V8_LIBRT_NOT_AVAILABLE flag here and only disable
+// the perf integration when this flag is present (the perf integration is not
+// needed when generating snapshot, so it is fine to ifdef it away).
+
+#if V8_OS_LINUX
+
+// Linux perf tool logging support
+class PerfJitLogger : public CodeEventLogger {
+ public:
+  PerfJitLogger();
+  virtual ~PerfJitLogger();
+
+  virtual void CodeMoveEvent(Address from, Address to);
+  virtual void CodeDeleteEvent(Address from);
+  virtual void CodeDisableOptEvent(Code* code, SharedFunctionInfo* shared) {}
+  virtual void SnapshotPositionEvent(Address addr, int pos);
+
+ private:
+  uint64_t GetTimestamp();
+  virtual void LogRecordedBuffer(Code* code, SharedFunctionInfo* shared,
+                                 const char* name, int length);
+
+  // Extension added to V8 log file name to get the low-level log name.
+  static const char kFilenameFormatString[];
+  static const int kFilenameBufferPadding;
+
+  // File buffer size of the low-level log. We don't use the default to
+  // minimize the associated overhead.
+  static const int kLogBufferSize = 2 * MB;
+
+  void LogWriteBytes(const char* bytes, int size);
+  void LogWriteHeader();
+
+  static const uint32_t kElfMachIA32 = 3;
+  static const uint32_t kElfMachX64 = 62;
+  static const uint32_t kElfMachARM = 40;
+  static const uint32_t kElfMachMIPS = 10;
+
+  uint32_t GetElfMach() {
+#if V8_TARGET_ARCH_IA32
+    return kElfMachIA32;
+#elif V8_TARGET_ARCH_X64
+    return kElfMachX64;
+#elif V8_TARGET_ARCH_ARM
+    return kElfMachARM;
+#elif V8_TARGET_ARCH_MIPS
+    return kElfMachMIPS;
+#else
+    UNIMPLEMENTED();
+    return 0;
+#endif
+  }
+
+  FILE* perf_output_handle_;
+  uint64_t code_index_;
+};
+
+#else
+
+// PerfJitLogger is only implemented on Linux
+class PerfJitLogger : public CodeEventLogger {
+ public:
+  virtual void CodeMoveEvent(Address from, Address to) { UNIMPLEMENTED(); }
+
+  virtual void CodeDeleteEvent(Address from) { UNIMPLEMENTED(); }
+
+  virtual void CodeDisableOptEvent(Code* code, SharedFunctionInfo* shared) {
+    UNIMPLEMENTED();
+  }
+
+  virtual void SnapshotPositionEvent(Address addr, int pos) { UNIMPLEMENTED(); }
+
+  virtual void LogRecordedBuffer(Code* code, SharedFunctionInfo* shared,
+                                 const char* name, int length) {
+    UNIMPLEMENTED();
+  }
+};
+
+#endif  // V8_OS_LINUX
+}
+}  // namespace v8::internal
+#endif
diff --git a/src/platform-cygwin.cc b/src/platform-cygwin.cc
deleted file mode 100644
index 91235cf..0000000
--- a/src/platform-cygwin.cc
+++ /dev/null
@@ -1,330 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Platform-specific code for Cygwin goes here. For the POSIX-compatible
-// parts, the implementation is in platform-posix.cc.
-
-#include <errno.h>
-#include <pthread.h>
-#include <semaphore.h>
-#include <stdarg.h>
-#include <strings.h>    // index
-#include <sys/time.h>
-#include <sys/mman.h>   // mmap & munmap
-#include <unistd.h>     // sysconf
-
-#undef MAP_TYPE
-
-#include "src/v8.h"
-
-#include "src/base/win32-headers.h"
-#include "src/platform.h"
-
-namespace v8 {
-namespace internal {
-
-
-const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
-  if (std::isnan(time)) return "";
-  time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
-  struct tm* t = localtime(&tv);
-  if (NULL == t) return "";
-  return tzname[0];  // The location of the timezone string on Cygwin.
-}
-
-
-double OS::LocalTimeOffset(TimezoneCache* cache) {
-  // On Cygwin, struct tm does not contain a tm_gmtoff field.
-  time_t utc = time(NULL);
-  ASSERT(utc != -1);
-  struct tm* loc = localtime(&utc);
-  ASSERT(loc != NULL);
-  // time - localtime includes any daylight savings offset, so subtract it.
-  return static_cast<double>((mktime(loc) - utc) * msPerSecond -
-                             (loc->tm_isdst > 0 ? 3600 * msPerSecond : 0));
-}
-
-
-void* OS::Allocate(const size_t requested,
-                   size_t* allocated,
-                   bool is_executable) {
-  const size_t msize = RoundUp(requested, sysconf(_SC_PAGESIZE));
-  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-  void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
-  if (mbase == MAP_FAILED) return NULL;
-  *allocated = msize;
-  return mbase;
-}
-
-
-class PosixMemoryMappedFile : public OS::MemoryMappedFile {
- public:
-  PosixMemoryMappedFile(FILE* file, void* memory, int size)
-    : file_(file), memory_(memory), size_(size) { }
-  virtual ~PosixMemoryMappedFile();
-  virtual void* memory() { return memory_; }
-  virtual int size() { return size_; }
- private:
-  FILE* file_;
-  void* memory_;
-  int size_;
-};
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
-  FILE* file = fopen(name, "r+");
-  if (file == NULL) return NULL;
-
-  fseek(file, 0, SEEK_END);
-  int size = ftell(file);
-
-  void* memory =
-      mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
-  return new PosixMemoryMappedFile(file, memory, size);
-}
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
-    void* initial) {
-  FILE* file = fopen(name, "w+");
-  if (file == NULL) return NULL;
-  int result = fwrite(initial, size, 1, file);
-  if (result < 1) {
-    fclose(file);
-    return NULL;
-  }
-  void* memory =
-      mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
-  return new PosixMemoryMappedFile(file, memory, size);
-}
-
-
-PosixMemoryMappedFile::~PosixMemoryMappedFile() {
-  if (memory_) munmap(memory_, size_);
-  fclose(file_);
-}
-
-
-std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
-  std::vector<SharedLibraryAddresses> result;
-  // This function assumes that the layout of the file is as follows:
-  // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
-  // If we encounter an unexpected situation we abort scanning further entries.
-  FILE* fp = fopen("/proc/self/maps", "r");
-  if (fp == NULL) return result;
-
-  // Allocate enough room to be able to store a full file name.
-  const int kLibNameLen = FILENAME_MAX + 1;
-  char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));
-
-  // This loop will terminate once the scanning hits an EOF.
-  while (true) {
-    uintptr_t start, end;
-    char attr_r, attr_w, attr_x, attr_p;
-    // Parse the addresses and permission bits at the beginning of the line.
-    if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break;
-    if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break;
-
-    int c;
-    if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') {
-      // Found a read-only executable entry. Skip characters until we reach
-      // the beginning of the filename or the end of the line.
-      do {
-        c = getc(fp);
-      } while ((c != EOF) && (c != '\n') && (c != '/'));
-      if (c == EOF) break;  // EOF: Was unexpected, just exit.
-
-      // Process the filename if found.
-      if (c == '/') {
-        ungetc(c, fp);  // Push the '/' back into the stream to be read below.
-
-        // Read to the end of the line. Exit if the read fails.
-        if (fgets(lib_name, kLibNameLen, fp) == NULL) break;
-
-        // Drop the newline character read by fgets. We do not need to check
-        // for a zero-length string because we know that we at least read the
-        // '/' character.
-        lib_name[strlen(lib_name) - 1] = '\0';
-      } else {
-        // No library name found, just record the raw address range.
-        snprintf(lib_name, kLibNameLen,
-                 "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end);
-      }
-      result.push_back(SharedLibraryAddress(lib_name, start, end));
-    } else {
-      // Entry not describing executable data. Skip to end of line to set up
-      // reading the next entry.
-      do {
-        c = getc(fp);
-      } while ((c != EOF) && (c != '\n'));
-      if (c == EOF) break;
-    }
-  }
-  free(lib_name);
-  fclose(fp);
-  return result;
-}
-
-
-void OS::SignalCodeMovingGC() {
-  // Nothing to do on Cygwin.
-}
-
-
-// The VirtualMemory implementation is taken from platform-win32.cc.
-// The mmap-based virtual memory implementation as it is used on most posix
-// platforms does not work well because Cygwin does not support MAP_FIXED.
-// This causes VirtualMemory::Commit to not always commit the memory region
-// specified.
-
-static void* GetRandomAddr() {
-  Isolate* isolate = Isolate::UncheckedCurrent();
-  // Note that the current isolate isn't set up in a call path via
-  // CpuFeatures::Probe. We don't care about randomization in this case because
-  // the code page is immediately freed.
-  if (isolate != NULL) {
-    // The address range used to randomize RWX allocations in OS::Allocate
-    // Try not to map pages into the default range that windows loads DLLs
-    // Use a multiple of 64k to prevent committing unused memory.
-    // Note: This does not guarantee RWX regions will be within the
-    // range kAllocationRandomAddressMin to kAllocationRandomAddressMax
-#ifdef V8_HOST_ARCH_64_BIT
-    static const intptr_t kAllocationRandomAddressMin = 0x0000000080000000;
-    static const intptr_t kAllocationRandomAddressMax = 0x000003FFFFFF0000;
-#else
-    static const intptr_t kAllocationRandomAddressMin = 0x04000000;
-    static const intptr_t kAllocationRandomAddressMax = 0x3FFF0000;
-#endif
-    uintptr_t address =
-        (isolate->random_number_generator()->NextInt() << kPageSizeBits) |
-        kAllocationRandomAddressMin;
-    address &= kAllocationRandomAddressMax;
-    return reinterpret_cast<void *>(address);
-  }
-  return NULL;
-}
-
-
-static void* RandomizedVirtualAlloc(size_t size, int action, int protection) {
-  LPVOID base = NULL;
-
-  if (protection == PAGE_EXECUTE_READWRITE || protection == PAGE_NOACCESS) {
-    // For exectutable pages try and randomize the allocation address
-    for (size_t attempts = 0; base == NULL && attempts < 3; ++attempts) {
-      base = VirtualAlloc(GetRandomAddr(), size, action, protection);
-    }
-  }
-
-  // After three attempts give up and let the OS find an address to use.
-  if (base == NULL) base = VirtualAlloc(NULL, size, action, protection);
-
-  return base;
-}
-
-
-VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
-
-
-VirtualMemory::VirtualMemory(size_t size)
-    : address_(ReserveRegion(size)), size_(size) { }
-
-
-VirtualMemory::VirtualMemory(size_t size, size_t alignment)
-    : address_(NULL), size_(0) {
-  ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
-  size_t request_size = RoundUp(size + alignment,
-                                static_cast<intptr_t>(OS::AllocateAlignment()));
-  void* address = ReserveRegion(request_size);
-  if (address == NULL) return;
-  Address base = RoundUp(static_cast<Address>(address), alignment);
-  // Try reducing the size by freeing and then reallocating a specific area.
-  bool result = ReleaseRegion(address, request_size);
-  USE(result);
-  ASSERT(result);
-  address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS);
-  if (address != NULL) {
-    request_size = size;
-    ASSERT(base == static_cast<Address>(address));
-  } else {
-    // Resizing failed, just go with a bigger area.
-    address = ReserveRegion(request_size);
-    if (address == NULL) return;
-  }
-  address_ = address;
-  size_ = request_size;
-}
-
-
-VirtualMemory::~VirtualMemory() {
-  if (IsReserved()) {
-    bool result = ReleaseRegion(address_, size_);
-    ASSERT(result);
-    USE(result);
-  }
-}
-
-
-bool VirtualMemory::IsReserved() {
-  return address_ != NULL;
-}
-
-
-void VirtualMemory::Reset() {
-  address_ = NULL;
-  size_ = 0;
-}
-
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
-  return CommitRegion(address, size, is_executable);
-}
-
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
-  ASSERT(IsReserved());
-  return UncommitRegion(address, size);
-}
-
-
-void* VirtualMemory::ReserveRegion(size_t size) {
-  return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS);
-}
-
-
-bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
-  int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
-  if (NULL == VirtualAlloc(base, size, MEM_COMMIT, prot)) {
-    return false;
-  }
-  return true;
-}
-
-
-bool VirtualMemory::Guard(void* address) {
-  if (NULL == VirtualAlloc(address,
-                           OS::CommitPageSize(),
-                           MEM_COMMIT,
-                           PAGE_NOACCESS)) {
-    return false;
-  }
-  return true;
-}
-
-
-bool VirtualMemory::UncommitRegion(void* base, size_t size) {
-  return VirtualFree(base, size, MEM_DECOMMIT) != 0;
-}
-
-
-bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
-  return VirtualFree(base, 0, MEM_RELEASE) != 0;
-}
-
-
-bool VirtualMemory::HasLazyCommits() {
-  // TODO(alph): implement for the platform.
-  return false;
-}
-
-} }  // namespace v8::internal
diff --git a/src/platform-freebsd.cc b/src/platform-freebsd.cc
deleted file mode 100644
index a1a0739..0000000
--- a/src/platform-freebsd.cc
+++ /dev/null
@@ -1,306 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Platform-specific code for FreeBSD goes here. For the POSIX-compatible
-// parts, the implementation is in platform-posix.cc.
-
-#include <pthread.h>
-#include <semaphore.h>
-#include <signal.h>
-#include <sys/time.h>
-#include <sys/resource.h>
-#include <sys/types.h>
-#include <sys/ucontext.h>
-#include <stdlib.h>
-
-#include <sys/types.h>  // mmap & munmap
-#include <sys/mman.h>   // mmap & munmap
-#include <sys/stat.h>   // open
-#include <sys/fcntl.h>  // open
-#include <unistd.h>     // getpagesize
-// If you don't have execinfo.h then you need devel/libexecinfo from ports.
-#include <strings.h>    // index
-#include <errno.h>
-#include <stdarg.h>
-#include <limits.h>
-
-#undef MAP_TYPE
-
-#include "src/v8.h"
-
-#include "src/platform.h"
-
-
-namespace v8 {
-namespace internal {
-
-
-const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
-  if (std::isnan(time)) return "";
-  time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
-  struct tm* t = localtime(&tv);
-  if (NULL == t) return "";
-  return t->tm_zone;
-}
-
-
-double OS::LocalTimeOffset(TimezoneCache* cache) {
-  time_t tv = time(NULL);
-  struct tm* t = localtime(&tv);
-  // tm_gmtoff includes any daylight savings offset, so subtract it.
-  return static_cast<double>(t->tm_gmtoff * msPerSecond -
-                             (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
-}
-
-
-void* OS::Allocate(const size_t requested,
-                   size_t* allocated,
-                   bool executable) {
-  const size_t msize = RoundUp(requested, getpagesize());
-  int prot = PROT_READ | PROT_WRITE | (executable ? PROT_EXEC : 0);
-  void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
-
-  if (mbase == MAP_FAILED) return NULL;
-  *allocated = msize;
-  return mbase;
-}
-
-
-class PosixMemoryMappedFile : public OS::MemoryMappedFile {
- public:
-  PosixMemoryMappedFile(FILE* file, void* memory, int size)
-    : file_(file), memory_(memory), size_(size) { }
-  virtual ~PosixMemoryMappedFile();
-  virtual void* memory() { return memory_; }
-  virtual int size() { return size_; }
- private:
-  FILE* file_;
-  void* memory_;
-  int size_;
-};
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
-  FILE* file = fopen(name, "r+");
-  if (file == NULL) return NULL;
-
-  fseek(file, 0, SEEK_END);
-  int size = ftell(file);
-
-  void* memory =
-      mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
-  return new PosixMemoryMappedFile(file, memory, size);
-}
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
-    void* initial) {
-  FILE* file = fopen(name, "w+");
-  if (file == NULL) return NULL;
-  int result = fwrite(initial, size, 1, file);
-  if (result < 1) {
-    fclose(file);
-    return NULL;
-  }
-  void* memory =
-      mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
-  return new PosixMemoryMappedFile(file, memory, size);
-}
-
-
-PosixMemoryMappedFile::~PosixMemoryMappedFile() {
-  if (memory_) munmap(memory_, size_);
-  fclose(file_);
-}
-
-
-static unsigned StringToLong(char* buffer) {
-  return static_cast<unsigned>(strtol(buffer, NULL, 16));  // NOLINT
-}
-
-
-std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
-  std::vector<SharedLibraryAddress> result;
-  static const int MAP_LENGTH = 1024;
-  int fd = open("/proc/self/maps", O_RDONLY);
-  if (fd < 0) return result;
-  while (true) {
-    char addr_buffer[11];
-    addr_buffer[0] = '0';
-    addr_buffer[1] = 'x';
-    addr_buffer[10] = 0;
-    int result = read(fd, addr_buffer + 2, 8);
-    if (result < 8) break;
-    unsigned start = StringToLong(addr_buffer);
-    result = read(fd, addr_buffer + 2, 1);
-    if (result < 1) break;
-    if (addr_buffer[2] != '-') break;
-    result = read(fd, addr_buffer + 2, 8);
-    if (result < 8) break;
-    unsigned end = StringToLong(addr_buffer);
-    char buffer[MAP_LENGTH];
-    int bytes_read = -1;
-    do {
-      bytes_read++;
-      if (bytes_read >= MAP_LENGTH - 1)
-        break;
-      result = read(fd, buffer + bytes_read, 1);
-      if (result < 1) break;
-    } while (buffer[bytes_read] != '\n');
-    buffer[bytes_read] = 0;
-    // Ignore mappings that are not executable.
-    if (buffer[3] != 'x') continue;
-    char* start_of_path = index(buffer, '/');
-    // There may be no filename in this line.  Skip to next.
-    if (start_of_path == NULL) continue;
-    buffer[bytes_read] = 0;
-    result.push_back(SharedLibraryAddress(start_of_path, start, end));
-  }
-  close(fd);
-  return result;
-}
-
-
-void OS::SignalCodeMovingGC() {
-}
-
-
-
-// Constants used for mmap.
-static const int kMmapFd = -1;
-static const int kMmapFdOffset = 0;
-
-
-VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
-
-
-VirtualMemory::VirtualMemory(size_t size)
-    : address_(ReserveRegion(size)), size_(size) { }
-
-
-VirtualMemory::VirtualMemory(size_t size, size_t alignment)
-    : address_(NULL), size_(0) {
-  ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
-  size_t request_size = RoundUp(size + alignment,
-                                static_cast<intptr_t>(OS::AllocateAlignment()));
-  void* reservation = mmap(OS::GetRandomMmapAddr(),
-                           request_size,
-                           PROT_NONE,
-                           MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
-                           kMmapFd,
-                           kMmapFdOffset);
-  if (reservation == MAP_FAILED) return;
-
-  Address base = static_cast<Address>(reservation);
-  Address aligned_base = RoundUp(base, alignment);
-  ASSERT_LE(base, aligned_base);
-
-  // Unmap extra memory reserved before and after the desired block.
-  if (aligned_base != base) {
-    size_t prefix_size = static_cast<size_t>(aligned_base - base);
-    OS::Free(base, prefix_size);
-    request_size -= prefix_size;
-  }
-
-  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
-  ASSERT_LE(aligned_size, request_size);
-
-  if (aligned_size != request_size) {
-    size_t suffix_size = request_size - aligned_size;
-    OS::Free(aligned_base + aligned_size, suffix_size);
-    request_size -= suffix_size;
-  }
-
-  ASSERT(aligned_size == request_size);
-
-  address_ = static_cast<void*>(aligned_base);
-  size_ = aligned_size;
-}
-
-
-VirtualMemory::~VirtualMemory() {
-  if (IsReserved()) {
-    bool result = ReleaseRegion(address(), size());
-    ASSERT(result);
-    USE(result);
-  }
-}
-
-
-bool VirtualMemory::IsReserved() {
-  return address_ != NULL;
-}
-
-
-void VirtualMemory::Reset() {
-  address_ = NULL;
-  size_ = 0;
-}
-
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
-  return CommitRegion(address, size, is_executable);
-}
-
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
-  return UncommitRegion(address, size);
-}
-
-
-bool VirtualMemory::Guard(void* address) {
-  OS::Guard(address, OS::CommitPageSize());
-  return true;
-}
-
-
-void* VirtualMemory::ReserveRegion(size_t size) {
-  void* result = mmap(OS::GetRandomMmapAddr(),
-                      size,
-                      PROT_NONE,
-                      MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
-                      kMmapFd,
-                      kMmapFdOffset);
-
-  if (result == MAP_FAILED) return NULL;
-
-  return result;
-}
-
-
-bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
-  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-  if (MAP_FAILED == mmap(base,
-                         size,
-                         prot,
-                         MAP_PRIVATE | MAP_ANON | MAP_FIXED,
-                         kMmapFd,
-                         kMmapFdOffset)) {
-    return false;
-  }
-  return true;
-}
-
-
-bool VirtualMemory::UncommitRegion(void* base, size_t size) {
-  return mmap(base,
-              size,
-              PROT_NONE,
-              MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
-              kMmapFd,
-              kMmapFdOffset) != MAP_FAILED;
-}
-
-
-bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
-  return munmap(base, size) == 0;
-}
-
-
-bool VirtualMemory::HasLazyCommits() {
-  // TODO(alph): implement for the platform.
-  return false;
-}
-
-} }  // namespace v8::internal
diff --git a/src/platform-linux.cc b/src/platform-linux.cc
deleted file mode 100644
index 3cbf4da..0000000
--- a/src/platform-linux.cc
+++ /dev/null
@@ -1,431 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Platform-specific code for Linux goes here. For the POSIX-compatible
-// parts, the implementation is in platform-posix.cc.
-
-#include <pthread.h>
-#include <semaphore.h>
-#include <signal.h>
-#include <sys/prctl.h>
-#include <sys/time.h>
-#include <sys/resource.h>
-#include <sys/syscall.h>
-#include <sys/types.h>
-#include <stdlib.h>
-
-// Ubuntu Dapper requires memory pages to be marked as
-// executable. Otherwise, OS raises an exception when executing code
-// in that page.
-#include <sys/types.h>  // mmap & munmap
-#include <sys/mman.h>   // mmap & munmap
-#include <sys/stat.h>   // open
-#include <fcntl.h>      // open
-#include <unistd.h>     // sysconf
-#include <strings.h>    // index
-#include <errno.h>
-#include <stdarg.h>
-
-// GLibc on ARM defines mcontext_t has a typedef for 'struct sigcontext'.
-// Old versions of the C library <signal.h> didn't define the type.
-#if defined(__ANDROID__) && !defined(__BIONIC_HAVE_UCONTEXT_T) && \
-    (defined(__arm__) || defined(__aarch64__)) && \
-    !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
-#include <asm/sigcontext.h>
-#endif
-
-#if defined(LEAK_SANITIZER)
-#include <sanitizer/lsan_interface.h>
-#endif
-
-#undef MAP_TYPE
-
-#include "src/v8.h"
-
-#include "src/platform.h"
-
-
-namespace v8 {
-namespace internal {
-
-
-#ifdef __arm__
-
-bool OS::ArmUsingHardFloat() {
-  // GCC versions 4.6 and above define __ARM_PCS or __ARM_PCS_VFP to specify
-  // the Floating Point ABI used (PCS stands for Procedure Call Standard).
-  // We use these as well as a couple of other defines to statically determine
-  // what FP ABI used.
-  // GCC versions 4.4 and below don't support hard-fp.
-  // GCC versions 4.5 may support hard-fp without defining __ARM_PCS or
-  // __ARM_PCS_VFP.
-
-#define GCC_VERSION (__GNUC__ * 10000                                          \
-                     + __GNUC_MINOR__ * 100                                    \
-                     + __GNUC_PATCHLEVEL__)
-#if GCC_VERSION >= 40600
-#if defined(__ARM_PCS_VFP)
-  return true;
-#else
-  return false;
-#endif
-
-#elif GCC_VERSION < 40500
-  return false;
-
-#else
-#if defined(__ARM_PCS_VFP)
-  return true;
-#elif defined(__ARM_PCS) || defined(__SOFTFP__) || defined(__SOFTFP) || \
-      !defined(__VFP_FP__)
-  return false;
-#else
-#error "Your version of GCC does not report the FP ABI compiled for."          \
-       "Please report it on this issue"                                        \
-       "http://code.google.com/p/v8/issues/detail?id=2140"
-
-#endif
-#endif
-#undef GCC_VERSION
-}
-
-#endif  // def __arm__
-
-
-const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
-  if (std::isnan(time)) return "";
-  time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
-  struct tm* t = localtime(&tv);
-  if (NULL == t) return "";
-  return t->tm_zone;
-}
-
-
-double OS::LocalTimeOffset(TimezoneCache* cache) {
-  time_t tv = time(NULL);
-  struct tm* t = localtime(&tv);
-  // tm_gmtoff includes any daylight savings offset, so subtract it.
-  return static_cast<double>(t->tm_gmtoff * msPerSecond -
-                             (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
-}
-
-
-void* OS::Allocate(const size_t requested,
-                   size_t* allocated,
-                   bool is_executable) {
-  const size_t msize = RoundUp(requested, AllocateAlignment());
-  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-  void* addr = OS::GetRandomMmapAddr();
-  void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
-  if (mbase == MAP_FAILED) return NULL;
-  *allocated = msize;
-  return mbase;
-}
-
-
-class PosixMemoryMappedFile : public OS::MemoryMappedFile {
- public:
-  PosixMemoryMappedFile(FILE* file, void* memory, int size)
-    : file_(file), memory_(memory), size_(size) { }
-  virtual ~PosixMemoryMappedFile();
-  virtual void* memory() { return memory_; }
-  virtual int size() { return size_; }
- private:
-  FILE* file_;
-  void* memory_;
-  int size_;
-};
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
-  FILE* file = fopen(name, "r+");
-  if (file == NULL) return NULL;
-
-  fseek(file, 0, SEEK_END);
-  int size = ftell(file);
-
-  void* memory =
-      mmap(OS::GetRandomMmapAddr(),
-           size,
-           PROT_READ | PROT_WRITE,
-           MAP_SHARED,
-           fileno(file),
-           0);
-  return new PosixMemoryMappedFile(file, memory, size);
-}
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
-    void* initial) {
-  FILE* file = fopen(name, "w+");
-  if (file == NULL) return NULL;
-  int result = fwrite(initial, size, 1, file);
-  if (result < 1) {
-    fclose(file);
-    return NULL;
-  }
-  void* memory =
-      mmap(OS::GetRandomMmapAddr(),
-           size,
-           PROT_READ | PROT_WRITE,
-           MAP_SHARED,
-           fileno(file),
-           0);
-  return new PosixMemoryMappedFile(file, memory, size);
-}
-
-
-PosixMemoryMappedFile::~PosixMemoryMappedFile() {
-  if (memory_) OS::Free(memory_, size_);
-  fclose(file_);
-}
-
-
-std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
-  std::vector<SharedLibraryAddress> result;
-  // This function assumes that the layout of the file is as follows:
-  // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
-  // If we encounter an unexpected situation we abort scanning further entries.
-  FILE* fp = fopen("/proc/self/maps", "r");
-  if (fp == NULL) return result;
-
-  // Allocate enough room to be able to store a full file name.
-  const int kLibNameLen = FILENAME_MAX + 1;
-  char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));
-
-  // This loop will terminate once the scanning hits an EOF.
-  while (true) {
-    uintptr_t start, end;
-    char attr_r, attr_w, attr_x, attr_p;
-    // Parse the addresses and permission bits at the beginning of the line.
-    if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break;
-    if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break;
-
-    int c;
-    if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') {
-      // Found a read-only executable entry. Skip characters until we reach
-      // the beginning of the filename or the end of the line.
-      do {
-        c = getc(fp);
-      } while ((c != EOF) && (c != '\n') && (c != '/') && (c != '['));
-      if (c == EOF) break;  // EOF: Was unexpected, just exit.
-
-      // Process the filename if found.
-      if ((c == '/') || (c == '[')) {
-        // Push the '/' or '[' back into the stream to be read below.
-        ungetc(c, fp);
-
-        // Read to the end of the line. Exit if the read fails.
-        if (fgets(lib_name, kLibNameLen, fp) == NULL) break;
-
-        // Drop the newline character read by fgets. We do not need to check
-        // for a zero-length string because we know that we at least read the
-        // '/' or '[' character.
-        lib_name[strlen(lib_name) - 1] = '\0';
-      } else {
-        // No library name found, just record the raw address range.
-        snprintf(lib_name, kLibNameLen,
-                 "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end);
-      }
-      result.push_back(SharedLibraryAddress(lib_name, start, end));
-    } else {
-      // Entry not describing executable data. Skip to end of line to set up
-      // reading the next entry.
-      do {
-        c = getc(fp);
-      } while ((c != EOF) && (c != '\n'));
-      if (c == EOF) break;
-    }
-  }
-  free(lib_name);
-  fclose(fp);
-  return result;
-}
-
-
-void OS::SignalCodeMovingGC() {
-  // Support for ll_prof.py.
-  //
-  // The Linux profiler built into the kernel logs all mmap's with
-  // PROT_EXEC so that analysis tools can properly attribute ticks. We
-  // do a mmap with a name known by ll_prof.py and immediately munmap
-  // it. This injects a GC marker into the stream of events generated
-  // by the kernel and allows us to synchronize V8 code log and the
-  // kernel log.
-  int size = sysconf(_SC_PAGESIZE);
-  FILE* f = fopen(FLAG_gc_fake_mmap, "w+");
-  if (f == NULL) {
-    OS::PrintError("Failed to open %s\n", FLAG_gc_fake_mmap);
-    OS::Abort();
-  }
-  void* addr = mmap(OS::GetRandomMmapAddr(),
-                    size,
-#if defined(__native_client__)
-                    // The Native Client port of V8 uses an interpreter,
-                    // so code pages don't need PROT_EXEC.
-                    PROT_READ,
-#else
-                    PROT_READ | PROT_EXEC,
-#endif
-                    MAP_PRIVATE,
-                    fileno(f),
-                    0);
-  ASSERT(addr != MAP_FAILED);
-  OS::Free(addr, size);
-  fclose(f);
-}
-
-
-// Constants used for mmap.
-static const int kMmapFd = -1;
-static const int kMmapFdOffset = 0;
-
-
-VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
-
-
-VirtualMemory::VirtualMemory(size_t size)
-    : address_(ReserveRegion(size)), size_(size) { }
-
-
-VirtualMemory::VirtualMemory(size_t size, size_t alignment)
-    : address_(NULL), size_(0) {
-  ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
-  size_t request_size = RoundUp(size + alignment,
-                                static_cast<intptr_t>(OS::AllocateAlignment()));
-  void* reservation = mmap(OS::GetRandomMmapAddr(),
-                           request_size,
-                           PROT_NONE,
-                           MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
-                           kMmapFd,
-                           kMmapFdOffset);
-  if (reservation == MAP_FAILED) return;
-
-  Address base = static_cast<Address>(reservation);
-  Address aligned_base = RoundUp(base, alignment);
-  ASSERT_LE(base, aligned_base);
-
-  // Unmap extra memory reserved before and after the desired block.
-  if (aligned_base != base) {
-    size_t prefix_size = static_cast<size_t>(aligned_base - base);
-    OS::Free(base, prefix_size);
-    request_size -= prefix_size;
-  }
-
-  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
-  ASSERT_LE(aligned_size, request_size);
-
-  if (aligned_size != request_size) {
-    size_t suffix_size = request_size - aligned_size;
-    OS::Free(aligned_base + aligned_size, suffix_size);
-    request_size -= suffix_size;
-  }
-
-  ASSERT(aligned_size == request_size);
-
-  address_ = static_cast<void*>(aligned_base);
-  size_ = aligned_size;
-#if defined(LEAK_SANITIZER)
-  __lsan_register_root_region(address_, size_);
-#endif
-}
-
-
-VirtualMemory::~VirtualMemory() {
-  if (IsReserved()) {
-    bool result = ReleaseRegion(address(), size());
-    ASSERT(result);
-    USE(result);
-  }
-}
-
-
-bool VirtualMemory::IsReserved() {
-  return address_ != NULL;
-}
-
-
-void VirtualMemory::Reset() {
-  address_ = NULL;
-  size_ = 0;
-}
-
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
-  return CommitRegion(address, size, is_executable);
-}
-
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
-  return UncommitRegion(address, size);
-}
-
-
-bool VirtualMemory::Guard(void* address) {
-  OS::Guard(address, OS::CommitPageSize());
-  return true;
-}
-
-
-void* VirtualMemory::ReserveRegion(size_t size) {
-  void* result = mmap(OS::GetRandomMmapAddr(),
-                      size,
-                      PROT_NONE,
-                      MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
-                      kMmapFd,
-                      kMmapFdOffset);
-
-  if (result == MAP_FAILED) return NULL;
-
-#if defined(LEAK_SANITIZER)
-  __lsan_register_root_region(result, size);
-#endif
-  return result;
-}
-
-
-bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
-#if defined(__native_client__)
-  // The Native Client port of V8 uses an interpreter,
-  // so code pages don't need PROT_EXEC.
-  int prot = PROT_READ | PROT_WRITE;
-#else
-  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-#endif
-  if (MAP_FAILED == mmap(base,
-                         size,
-                         prot,
-                         MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
-                         kMmapFd,
-                         kMmapFdOffset)) {
-    return false;
-  }
-
-  return true;
-}
-
-
-bool VirtualMemory::UncommitRegion(void* base, size_t size) {
-  return mmap(base,
-              size,
-              PROT_NONE,
-              MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED,
-              kMmapFd,
-              kMmapFdOffset) != MAP_FAILED;
-}
-
-
-bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
-#if defined(LEAK_SANITIZER)
-  __lsan_unregister_root_region(base, size);
-#endif
-  return munmap(base, size) == 0;
-}
-
-
-bool VirtualMemory::HasLazyCommits() {
-  return true;
-}
-
-} }  // namespace v8::internal
diff --git a/src/platform-macos.cc b/src/platform-macos.cc
deleted file mode 100644
index 4301875..0000000
--- a/src/platform-macos.cc
+++ /dev/null
@@ -1,309 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Platform-specific code for MacOS goes here. For the POSIX-compatible
-// parts, the implementation is in platform-posix.cc.
-
-#include <dlfcn.h>
-#include <unistd.h>
-#include <sys/mman.h>
-#include <mach/mach_init.h>
-#include <mach-o/dyld.h>
-#include <mach-o/getsect.h>
-
-#include <AvailabilityMacros.h>
-
-#include <pthread.h>
-#include <semaphore.h>
-#include <signal.h>
-#include <libkern/OSAtomic.h>
-#include <mach/mach.h>
-#include <mach/semaphore.h>
-#include <mach/task.h>
-#include <mach/vm_statistics.h>
-#include <sys/time.h>
-#include <sys/resource.h>
-#include <sys/types.h>
-#include <sys/sysctl.h>
-#include <stdarg.h>
-#include <stdlib.h>
-#include <string.h>
-#include <errno.h>
-
-#undef MAP_TYPE
-
-#include "src/v8.h"
-
-#include "src/platform.h"
-
-
-namespace v8 {
-namespace internal {
-
-
-// Constants used for mmap.
-// kMmapFd is used to pass vm_alloc flags to tag the region with the user
-// defined tag 255 This helps identify V8-allocated regions in memory analysis
-// tools like vmmap(1).
-static const int kMmapFd = VM_MAKE_TAG(255);
-static const off_t kMmapFdOffset = 0;
-
-
-void* OS::Allocate(const size_t requested,
-                   size_t* allocated,
-                   bool is_executable) {
-  const size_t msize = RoundUp(requested, getpagesize());
-  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-  void* mbase = mmap(OS::GetRandomMmapAddr(),
-                     msize,
-                     prot,
-                     MAP_PRIVATE | MAP_ANON,
-                     kMmapFd,
-                     kMmapFdOffset);
-  if (mbase == MAP_FAILED) return NULL;
-  *allocated = msize;
-  return mbase;
-}
-
-
-class PosixMemoryMappedFile : public OS::MemoryMappedFile {
- public:
-  PosixMemoryMappedFile(FILE* file, void* memory, int size)
-    : file_(file), memory_(memory), size_(size) { }
-  virtual ~PosixMemoryMappedFile();
-  virtual void* memory() { return memory_; }
-  virtual int size() { return size_; }
- private:
-  FILE* file_;
-  void* memory_;
-  int size_;
-};
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
-  FILE* file = fopen(name, "r+");
-  if (file == NULL) return NULL;
-
-  fseek(file, 0, SEEK_END);
-  int size = ftell(file);
-
-  void* memory =
-      mmap(OS::GetRandomMmapAddr(),
-           size,
-           PROT_READ | PROT_WRITE,
-           MAP_SHARED,
-           fileno(file),
-           0);
-  return new PosixMemoryMappedFile(file, memory, size);
-}
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
-    void* initial) {
-  FILE* file = fopen(name, "w+");
-  if (file == NULL) return NULL;
-  int result = fwrite(initial, size, 1, file);
-  if (result < 1) {
-    fclose(file);
-    return NULL;
-  }
-  void* memory =
-      mmap(OS::GetRandomMmapAddr(),
-          size,
-          PROT_READ | PROT_WRITE,
-          MAP_SHARED,
-          fileno(file),
-          0);
-  return new PosixMemoryMappedFile(file, memory, size);
-}
-
-
-PosixMemoryMappedFile::~PosixMemoryMappedFile() {
-  if (memory_) OS::Free(memory_, size_);
-  fclose(file_);
-}
-
-
-std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
-  std::vector<SharedLibraryAddress> result;
-  unsigned int images_count = _dyld_image_count();
-  for (unsigned int i = 0; i < images_count; ++i) {
-    const mach_header* header = _dyld_get_image_header(i);
-    if (header == NULL) continue;
-#if V8_HOST_ARCH_X64
-    uint64_t size;
-    char* code_ptr = getsectdatafromheader_64(
-        reinterpret_cast<const mach_header_64*>(header),
-        SEG_TEXT,
-        SECT_TEXT,
-        &size);
-#else
-    unsigned int size;
-    char* code_ptr = getsectdatafromheader(header, SEG_TEXT, SECT_TEXT, &size);
-#endif
-    if (code_ptr == NULL) continue;
-    const uintptr_t slide = _dyld_get_image_vmaddr_slide(i);
-    const uintptr_t start = reinterpret_cast<uintptr_t>(code_ptr) + slide;
-    result.push_back(
-        SharedLibraryAddress(_dyld_get_image_name(i), start, start + size));
-  }
-  return result;
-}
-
-
-void OS::SignalCodeMovingGC() {
-}
-
-
-const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
-  if (std::isnan(time)) return "";
-  time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
-  struct tm* t = localtime(&tv);
-  if (NULL == t) return "";
-  return t->tm_zone;
-}
-
-
-double OS::LocalTimeOffset(TimezoneCache* cache) {
-  time_t tv = time(NULL);
-  struct tm* t = localtime(&tv);
-  // tm_gmtoff includes any daylight savings offset, so subtract it.
-  return static_cast<double>(t->tm_gmtoff * msPerSecond -
-                             (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
-}
-
-
-VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
-
-
-VirtualMemory::VirtualMemory(size_t size)
-    : address_(ReserveRegion(size)), size_(size) { }
-
-
-VirtualMemory::VirtualMemory(size_t size, size_t alignment)
-    : address_(NULL), size_(0) {
-  ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
-  size_t request_size = RoundUp(size + alignment,
-                                static_cast<intptr_t>(OS::AllocateAlignment()));
-  void* reservation = mmap(OS::GetRandomMmapAddr(),
-                           request_size,
-                           PROT_NONE,
-                           MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
-                           kMmapFd,
-                           kMmapFdOffset);
-  if (reservation == MAP_FAILED) return;
-
-  Address base = static_cast<Address>(reservation);
-  Address aligned_base = RoundUp(base, alignment);
-  ASSERT_LE(base, aligned_base);
-
-  // Unmap extra memory reserved before and after the desired block.
-  if (aligned_base != base) {
-    size_t prefix_size = static_cast<size_t>(aligned_base - base);
-    OS::Free(base, prefix_size);
-    request_size -= prefix_size;
-  }
-
-  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
-  ASSERT_LE(aligned_size, request_size);
-
-  if (aligned_size != request_size) {
-    size_t suffix_size = request_size - aligned_size;
-    OS::Free(aligned_base + aligned_size, suffix_size);
-    request_size -= suffix_size;
-  }
-
-  ASSERT(aligned_size == request_size);
-
-  address_ = static_cast<void*>(aligned_base);
-  size_ = aligned_size;
-}
-
-
-VirtualMemory::~VirtualMemory() {
-  if (IsReserved()) {
-    bool result = ReleaseRegion(address(), size());
-    ASSERT(result);
-    USE(result);
-  }
-}
-
-
-bool VirtualMemory::IsReserved() {
-  return address_ != NULL;
-}
-
-
-void VirtualMemory::Reset() {
-  address_ = NULL;
-  size_ = 0;
-}
-
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
-  return CommitRegion(address, size, is_executable);
-}
-
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
-  return UncommitRegion(address, size);
-}
-
-
-bool VirtualMemory::Guard(void* address) {
-  OS::Guard(address, OS::CommitPageSize());
-  return true;
-}
-
-
-void* VirtualMemory::ReserveRegion(size_t size) {
-  void* result = mmap(OS::GetRandomMmapAddr(),
-                      size,
-                      PROT_NONE,
-                      MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
-                      kMmapFd,
-                      kMmapFdOffset);
-
-  if (result == MAP_FAILED) return NULL;
-
-  return result;
-}
-
-
-bool VirtualMemory::CommitRegion(void* address,
-                                 size_t size,
-                                 bool is_executable) {
-  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-  if (MAP_FAILED == mmap(address,
-                         size,
-                         prot,
-                         MAP_PRIVATE | MAP_ANON | MAP_FIXED,
-                         kMmapFd,
-                         kMmapFdOffset)) {
-    return false;
-  }
-  return true;
-}
-
-
-bool VirtualMemory::UncommitRegion(void* address, size_t size) {
-  return mmap(address,
-              size,
-              PROT_NONE,
-              MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
-              kMmapFd,
-              kMmapFdOffset) != MAP_FAILED;
-}
-
-
-bool VirtualMemory::ReleaseRegion(void* address, size_t size) {
-  return munmap(address, size) == 0;
-}
-
-
-bool VirtualMemory::HasLazyCommits() {
-  return false;
-}
-
-} }  // namespace v8::internal
diff --git a/src/platform-openbsd.cc b/src/platform-openbsd.cc
deleted file mode 100644
index 1f8e239..0000000
--- a/src/platform-openbsd.cc
+++ /dev/null
@@ -1,337 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Platform-specific code for OpenBSD and NetBSD goes here. For the
-// POSIX-compatible parts, the implementation is in platform-posix.cc.
-
-#include <pthread.h>
-#include <semaphore.h>
-#include <signal.h>
-#include <sys/time.h>
-#include <sys/resource.h>
-#include <sys/syscall.h>
-#include <sys/types.h>
-#include <stdlib.h>
-
-#include <sys/types.h>  // mmap & munmap
-#include <sys/mman.h>   // mmap & munmap
-#include <sys/stat.h>   // open
-#include <fcntl.h>      // open
-#include <unistd.h>     // sysconf
-#include <strings.h>    // index
-#include <errno.h>
-#include <stdarg.h>
-
-#undef MAP_TYPE
-
-#include "src/v8.h"
-
-#include "src/platform.h"
-
-
-namespace v8 {
-namespace internal {
-
-
-const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
-  if (std::isnan(time)) return "";
-  time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
-  struct tm* t = localtime(&tv);
-  if (NULL == t) return "";
-  return t->tm_zone;
-}
-
-
-double OS::LocalTimeOffset(TimezoneCache* cache) {
-  time_t tv = time(NULL);
-  struct tm* t = localtime(&tv);
-  // tm_gmtoff includes any daylight savings offset, so subtract it.
-  return static_cast<double>(t->tm_gmtoff * msPerSecond -
-                             (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
-}
-
-
-void* OS::Allocate(const size_t requested,
-                   size_t* allocated,
-                   bool is_executable) {
-  const size_t msize = RoundUp(requested, AllocateAlignment());
-  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-  void* addr = OS::GetRandomMmapAddr();
-  void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
-  if (mbase == MAP_FAILED) return NULL;
-  *allocated = msize;
-  return mbase;
-}
-
-
-class PosixMemoryMappedFile : public OS::MemoryMappedFile {
- public:
-  PosixMemoryMappedFile(FILE* file, void* memory, int size)
-    : file_(file), memory_(memory), size_(size) { }
-  virtual ~PosixMemoryMappedFile();
-  virtual void* memory() { return memory_; }
-  virtual int size() { return size_; }
- private:
-  FILE* file_;
-  void* memory_;
-  int size_;
-};
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
-  FILE* file = fopen(name, "r+");
-  if (file == NULL) return NULL;
-
-  fseek(file, 0, SEEK_END);
-  int size = ftell(file);
-
-  void* memory =
-      mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
-  return new PosixMemoryMappedFile(file, memory, size);
-}
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
-    void* initial) {
-  FILE* file = fopen(name, "w+");
-  if (file == NULL) return NULL;
-  int result = fwrite(initial, size, 1, file);
-  if (result < 1) {
-    fclose(file);
-    return NULL;
-  }
-  void* memory =
-      mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
-  return new PosixMemoryMappedFile(file, memory, size);
-}
-
-
-PosixMemoryMappedFile::~PosixMemoryMappedFile() {
-  if (memory_) OS::Free(memory_, size_);
-  fclose(file_);
-}
-
-
-std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
-  std::vector<SharedLibraryAddress> result;
-  // This function assumes that the layout of the file is as follows:
-  // hex_start_addr-hex_end_addr rwxp <unused data> [binary_file_name]
-  // If we encounter an unexpected situation we abort scanning further entries.
-  FILE* fp = fopen("/proc/self/maps", "r");
-  if (fp == NULL) return result;
-
-  // Allocate enough room to be able to store a full file name.
-  const int kLibNameLen = FILENAME_MAX + 1;
-  char* lib_name = reinterpret_cast<char*>(malloc(kLibNameLen));
-
-  // This loop will terminate once the scanning hits an EOF.
-  while (true) {
-    uintptr_t start, end;
-    char attr_r, attr_w, attr_x, attr_p;
-    // Parse the addresses and permission bits at the beginning of the line.
-    if (fscanf(fp, "%" V8PRIxPTR "-%" V8PRIxPTR, &start, &end) != 2) break;
-    if (fscanf(fp, " %c%c%c%c", &attr_r, &attr_w, &attr_x, &attr_p) != 4) break;
-
-    int c;
-    if (attr_r == 'r' && attr_w != 'w' && attr_x == 'x') {
-      // Found a read-only executable entry. Skip characters until we reach
-      // the beginning of the filename or the end of the line.
-      do {
-        c = getc(fp);
-      } while ((c != EOF) && (c != '\n') && (c != '/'));
-      if (c == EOF) break;  // EOF: Was unexpected, just exit.
-
-      // Process the filename if found.
-      if (c == '/') {
-        ungetc(c, fp);  // Push the '/' back into the stream to be read below.
-
-        // Read to the end of the line. Exit if the read fails.
-        if (fgets(lib_name, kLibNameLen, fp) == NULL) break;
-
-        // Drop the newline character read by fgets. We do not need to check
-        // for a zero-length string because we know that we at least read the
-        // '/' character.
-        lib_name[strlen(lib_name) - 1] = '\0';
-      } else {
-        // No library name found, just record the raw address range.
-        snprintf(lib_name, kLibNameLen,
-                 "%08" V8PRIxPTR "-%08" V8PRIxPTR, start, end);
-      }
-      result.push_back(SharedLibraryAddress(lib_name, start, end));
-    } else {
-      // Entry not describing executable data. Skip to end of line to set up
-      // reading the next entry.
-      do {
-        c = getc(fp);
-      } while ((c != EOF) && (c != '\n'));
-      if (c == EOF) break;
-    }
-  }
-  free(lib_name);
-  fclose(fp);
-  return result;
-}
-
-
-void OS::SignalCodeMovingGC() {
-  // Support for ll_prof.py.
-  //
-  // The Linux profiler built into the kernel logs all mmap's with
-  // PROT_EXEC so that analysis tools can properly attribute ticks. We
-  // do a mmap with a name known by ll_prof.py and immediately munmap
-  // it. This injects a GC marker into the stream of events generated
-  // by the kernel and allows us to synchronize V8 code log and the
-  // kernel log.
-  int size = sysconf(_SC_PAGESIZE);
-  FILE* f = fopen(FLAG_gc_fake_mmap, "w+");
-  if (f == NULL) {
-    OS::PrintError("Failed to open %s\n", FLAG_gc_fake_mmap);
-    OS::Abort();
-  }
-  void* addr = mmap(NULL, size, PROT_READ | PROT_EXEC, MAP_PRIVATE,
-                    fileno(f), 0);
-  ASSERT(addr != MAP_FAILED);
-  OS::Free(addr, size);
-  fclose(f);
-}
-
-
-
-// Constants used for mmap.
-static const int kMmapFd = -1;
-static const int kMmapFdOffset = 0;
-
-
-VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
-
-
-VirtualMemory::VirtualMemory(size_t size)
-    : address_(ReserveRegion(size)), size_(size) { }
-
-
-VirtualMemory::VirtualMemory(size_t size, size_t alignment)
-    : address_(NULL), size_(0) {
-  ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
-  size_t request_size = RoundUp(size + alignment,
-                                static_cast<intptr_t>(OS::AllocateAlignment()));
-  void* reservation = mmap(OS::GetRandomMmapAddr(),
-                           request_size,
-                           PROT_NONE,
-                           MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
-                           kMmapFd,
-                           kMmapFdOffset);
-  if (reservation == MAP_FAILED) return;
-
-  Address base = static_cast<Address>(reservation);
-  Address aligned_base = RoundUp(base, alignment);
-  ASSERT_LE(base, aligned_base);
-
-  // Unmap extra memory reserved before and after the desired block.
-  if (aligned_base != base) {
-    size_t prefix_size = static_cast<size_t>(aligned_base - base);
-    OS::Free(base, prefix_size);
-    request_size -= prefix_size;
-  }
-
-  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
-  ASSERT_LE(aligned_size, request_size);
-
-  if (aligned_size != request_size) {
-    size_t suffix_size = request_size - aligned_size;
-    OS::Free(aligned_base + aligned_size, suffix_size);
-    request_size -= suffix_size;
-  }
-
-  ASSERT(aligned_size == request_size);
-
-  address_ = static_cast<void*>(aligned_base);
-  size_ = aligned_size;
-}
-
-
-VirtualMemory::~VirtualMemory() {
-  if (IsReserved()) {
-    bool result = ReleaseRegion(address(), size());
-    ASSERT(result);
-    USE(result);
-  }
-}
-
-
-bool VirtualMemory::IsReserved() {
-  return address_ != NULL;
-}
-
-
-void VirtualMemory::Reset() {
-  address_ = NULL;
-  size_ = 0;
-}
-
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
-  return CommitRegion(address, size, is_executable);
-}
-
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
-  return UncommitRegion(address, size);
-}
-
-
-bool VirtualMemory::Guard(void* address) {
-  OS::Guard(address, OS::CommitPageSize());
-  return true;
-}
-
-
-void* VirtualMemory::ReserveRegion(size_t size) {
-  void* result = mmap(OS::GetRandomMmapAddr(),
-                      size,
-                      PROT_NONE,
-                      MAP_PRIVATE | MAP_ANON | MAP_NORESERVE,
-                      kMmapFd,
-                      kMmapFdOffset);
-
-  if (result == MAP_FAILED) return NULL;
-
-  return result;
-}
-
-
-bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
-  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-  if (MAP_FAILED == mmap(base,
-                         size,
-                         prot,
-                         MAP_PRIVATE | MAP_ANON | MAP_FIXED,
-                         kMmapFd,
-                         kMmapFdOffset)) {
-    return false;
-  }
-  return true;
-}
-
-
-bool VirtualMemory::UncommitRegion(void* base, size_t size) {
-  return mmap(base,
-              size,
-              PROT_NONE,
-              MAP_PRIVATE | MAP_ANON | MAP_NORESERVE | MAP_FIXED,
-              kMmapFd,
-              kMmapFdOffset) != MAP_FAILED;
-}
-
-
-bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
-  return munmap(base, size) == 0;
-}
-
-
-bool VirtualMemory::HasLazyCommits() {
-  // TODO(alph): implement for the platform.
-  return false;
-}
-
-} }  // namespace v8::internal
diff --git a/src/platform-posix.cc b/src/platform-posix.cc
deleted file mode 100644
index c963fb1..0000000
--- a/src/platform-posix.cc
+++ /dev/null
@@ -1,699 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Platform-specific code for POSIX goes here. This is not a platform on its
-// own, but contains the parts which are the same across the POSIX platforms
-// Linux, MacOS, FreeBSD, OpenBSD, NetBSD and QNX.
-
-#include <dlfcn.h>
-#include <pthread.h>
-#if defined(__DragonFly__) || defined(__FreeBSD__) || defined(__OpenBSD__)
-#include <pthread_np.h>  // for pthread_set_name_np
-#endif
-#include <sched.h>  // for sched_yield
-#include <unistd.h>
-#include <errno.h>
-#include <time.h>
-
-#include <sys/mman.h>
-#include <sys/resource.h>
-#include <sys/time.h>
-#include <sys/types.h>
-#include <sys/stat.h>
-#if defined(__linux__)
-#include <sys/prctl.h>  // for prctl
-#endif
-#if defined(__APPLE__) || defined(__DragonFly__) || defined(__FreeBSD__) || \
-    defined(__NetBSD__) || defined(__OpenBSD__)
-#include <sys/sysctl.h>  // for sysctl
-#endif
-
-#include <arpa/inet.h>
-#include <netinet/in.h>
-#include <netdb.h>
-
-#undef MAP_TYPE
-
-#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
-#define LOG_TAG "v8"
-#include <android/log.h>
-#endif
-
-#include "src/v8.h"
-
-#include "src/isolate-inl.h"
-#include "src/platform.h"
-
-#ifdef V8_FAST_TLS_SUPPORTED
-#include "src/base/atomicops.h"
-#endif
-
-namespace v8 {
-namespace internal {
-
-// 0 is never a valid thread id.
-static const pthread_t kNoThread = (pthread_t) 0;
-
-
-int OS::NumberOfProcessorsOnline() {
-  return static_cast<int>(sysconf(_SC_NPROCESSORS_ONLN));
-}
-
-
-// Maximum size of the virtual memory.  0 means there is no artificial
-// limit.
-
-intptr_t OS::MaxVirtualMemory() {
-  struct rlimit limit;
-  int result = getrlimit(RLIMIT_DATA, &limit);
-  if (result != 0) return 0;
-#if V8_OS_NACL
-  // The NaCl compiler doesn't like resource.h constants.
-  if (static_cast<int>(limit.rlim_cur) == -1) return 0;
-#else
-  if (limit.rlim_cur == RLIM_INFINITY) return 0;
-#endif
-  return limit.rlim_cur;
-}
-
-
-uint64_t OS::TotalPhysicalMemory() {
-#if V8_OS_MACOSX
-  int mib[2];
-  mib[0] = CTL_HW;
-  mib[1] = HW_MEMSIZE;
-  int64_t size = 0;
-  size_t len = sizeof(size);
-  if (sysctl(mib, 2, &size, &len, NULL, 0) != 0) {
-    UNREACHABLE();
-    return 0;
-  }
-  return static_cast<uint64_t>(size);
-#elif V8_OS_FREEBSD
-  int pages, page_size;
-  size_t size = sizeof(pages);
-  sysctlbyname("vm.stats.vm.v_page_count", &pages, &size, NULL, 0);
-  sysctlbyname("vm.stats.vm.v_page_size", &page_size, &size, NULL, 0);
-  if (pages == -1 || page_size == -1) {
-    UNREACHABLE();
-    return 0;
-  }
-  return static_cast<uint64_t>(pages) * page_size;
-#elif V8_OS_CYGWIN
-  MEMORYSTATUS memory_info;
-  memory_info.dwLength = sizeof(memory_info);
-  if (!GlobalMemoryStatus(&memory_info)) {
-    UNREACHABLE();
-    return 0;
-  }
-  return static_cast<uint64_t>(memory_info.dwTotalPhys);
-#elif V8_OS_QNX
-  struct stat stat_buf;
-  if (stat("/proc", &stat_buf) != 0) {
-    UNREACHABLE();
-    return 0;
-  }
-  return static_cast<uint64_t>(stat_buf.st_size);
-#else
-  intptr_t pages = sysconf(_SC_PHYS_PAGES);
-  intptr_t page_size = sysconf(_SC_PAGESIZE);
-  if (pages == -1 || page_size == -1) {
-    UNREACHABLE();
-    return 0;
-  }
-  return static_cast<uint64_t>(pages) * page_size;
-#endif
-}
-
-
-int OS::ActivationFrameAlignment() {
-#if V8_TARGET_ARCH_ARM
-  // On EABI ARM targets this is required for fp correctness in the
-  // runtime system.
-  return 8;
-#elif V8_TARGET_ARCH_MIPS
-  return 8;
-#else
-  // Otherwise we just assume 16 byte alignment, i.e.:
-  // - With gcc 4.4 the tree vectorization optimizer can generate code
-  //   that requires 16 byte alignment such as movdqa on x86.
-  // - Mac OS X and Solaris (64-bit) activation frames must be 16 byte-aligned;
-  //   see "Mac OS X ABI Function Call Guide"
-  return 16;
-#endif
-}
-
-
-intptr_t OS::CommitPageSize() {
-  static intptr_t page_size = getpagesize();
-  return page_size;
-}
-
-
-void OS::Free(void* address, const size_t size) {
-  // TODO(1240712): munmap has a return value which is ignored here.
-  int result = munmap(address, size);
-  USE(result);
-  ASSERT(result == 0);
-}
-
-
-// Get rid of writable permission on code allocations.
-void OS::ProtectCode(void* address, const size_t size) {
-#if V8_OS_CYGWIN
-  DWORD old_protect;
-  VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect);
-#elif V8_OS_NACL
-  // The Native Client port of V8 uses an interpreter, so
-  // code pages don't need PROT_EXEC.
-  mprotect(address, size, PROT_READ);
-#else
-  mprotect(address, size, PROT_READ | PROT_EXEC);
-#endif
-}
-
-
-// Create guard pages.
-void OS::Guard(void* address, const size_t size) {
-#if V8_OS_CYGWIN
-  DWORD oldprotect;
-  VirtualProtect(address, size, PAGE_NOACCESS, &oldprotect);
-#else
-  mprotect(address, size, PROT_NONE);
-#endif
-}
-
-
-void* OS::GetRandomMmapAddr() {
-#if V8_OS_NACL
-  // TODO(bradchen): restore randomization once Native Client gets
-  // smarter about using mmap address hints.
-  // See http://code.google.com/p/nativeclient/issues/3341
-  return NULL;
-#endif
-#if defined(ADDRESS_SANITIZER) || defined(MEMORY_SANITIZER) || \
-    defined(THREAD_SANITIZER)
-  // Dynamic tools do not support custom mmap addresses.
-  return NULL;
-#endif
-  Isolate* isolate = Isolate::UncheckedCurrent();
-  // Note that the current isolate isn't set up in a call path via
-  // CpuFeatures::Probe. We don't care about randomization in this case because
-  // the code page is immediately freed.
-  if (isolate != NULL) {
-    uintptr_t raw_addr;
-    isolate->random_number_generator()->NextBytes(&raw_addr, sizeof(raw_addr));
-#if V8_TARGET_ARCH_X64
-    // Currently available CPUs have 48 bits of virtual addressing.  Truncate
-    // the hint address to 46 bits to give the kernel a fighting chance of
-    // fulfilling our placement request.
-    raw_addr &= V8_UINT64_C(0x3ffffffff000);
-#else
-    raw_addr &= 0x3ffff000;
-
-# ifdef __sun
-    // For our Solaris/illumos mmap hint, we pick a random address in the bottom
-    // half of the top half of the address space (that is, the third quarter).
-    // Because we do not MAP_FIXED, this will be treated only as a hint -- the
-    // system will not fail to mmap() because something else happens to already
-    // be mapped at our random address. We deliberately set the hint high enough
-    // to get well above the system's break (that is, the heap); Solaris and
-    // illumos will try the hint and if that fails allocate as if there were
-    // no hint at all. The high hint prevents the break from getting hemmed in
-    // at low values, ceding half of the address space to the system heap.
-    raw_addr += 0x80000000;
-# else
-    // The range 0x20000000 - 0x60000000 is relatively unpopulated across a
-    // variety of ASLR modes (PAE kernel, NX compat mode, etc) and on macos
-    // 10.6 and 10.7.
-    raw_addr += 0x20000000;
-# endif
-#endif
-    return reinterpret_cast<void*>(raw_addr);
-  }
-  return NULL;
-}
-
-
-size_t OS::AllocateAlignment() {
-  return static_cast<size_t>(sysconf(_SC_PAGESIZE));
-}
-
-
-void OS::Sleep(int milliseconds) {
-  useconds_t ms = static_cast<useconds_t>(milliseconds);
-  usleep(1000 * ms);
-}
-
-
-void OS::Abort() {
-  if (FLAG_hard_abort) {
-    V8_IMMEDIATE_CRASH();
-  }
-  // Redirect to std abort to signal abnormal program termination.
-  abort();
-}
-
-
-void OS::DebugBreak() {
-#if V8_HOST_ARCH_ARM
-  asm("bkpt 0");
-#elif V8_HOST_ARCH_ARM64
-  asm("brk 0");
-#elif V8_HOST_ARCH_MIPS
-  asm("break");
-#elif V8_HOST_ARCH_IA32
-#if defined(__native_client__)
-  asm("hlt");
-#else
-  asm("int $3");
-#endif  // __native_client__
-#elif V8_HOST_ARCH_X64
-  asm("int $3");
-#else
-#error Unsupported host architecture.
-#endif
-}
-
-
-// ----------------------------------------------------------------------------
-// Math functions
-
-double OS::nan_value() {
-  // NAN from math.h is defined in C99 and not in POSIX.
-  return NAN;
-}
-
-
-int OS::GetCurrentProcessId() {
-  return static_cast<int>(getpid());
-}
-
-
-// ----------------------------------------------------------------------------
-// POSIX date/time support.
-//
-
-int OS::GetUserTime(uint32_t* secs,  uint32_t* usecs) {
-  struct rusage usage;
-
-  if (getrusage(RUSAGE_SELF, &usage) < 0) return -1;
-  *secs = usage.ru_utime.tv_sec;
-  *usecs = usage.ru_utime.tv_usec;
-  return 0;
-}
-
-
-double OS::TimeCurrentMillis() {
-  return Time::Now().ToJsTime();
-}
-
-
-class TimezoneCache {};
-
-
-TimezoneCache* OS::CreateTimezoneCache() {
-  return NULL;
-}
-
-
-void OS::DisposeTimezoneCache(TimezoneCache* cache) {
-  ASSERT(cache == NULL);
-}
-
-
-void OS::ClearTimezoneCache(TimezoneCache* cache) {
-  ASSERT(cache == NULL);
-}
-
-
-double OS::DaylightSavingsOffset(double time, TimezoneCache*) {
-  if (std::isnan(time)) return nan_value();
-  time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
-  struct tm* t = localtime(&tv);
-  if (NULL == t) return nan_value();
-  return t->tm_isdst > 0 ? 3600 * msPerSecond : 0;
-}
-
-
-int OS::GetLastError() {
-  return errno;
-}
-
-
-// ----------------------------------------------------------------------------
-// POSIX stdio support.
-//
-
-FILE* OS::FOpen(const char* path, const char* mode) {
-  FILE* file = fopen(path, mode);
-  if (file == NULL) return NULL;
-  struct stat file_stat;
-  if (fstat(fileno(file), &file_stat) != 0) return NULL;
-  bool is_regular_file = ((file_stat.st_mode & S_IFREG) != 0);
-  if (is_regular_file) return file;
-  fclose(file);
-  return NULL;
-}
-
-
-bool OS::Remove(const char* path) {
-  return (remove(path) == 0);
-}
-
-
-FILE* OS::OpenTemporaryFile() {
-  return tmpfile();
-}
-
-
-const char* const OS::LogFileOpenMode = "w";
-
-
-void OS::Print(const char* format, ...) {
-  va_list args;
-  va_start(args, format);
-  VPrint(format, args);
-  va_end(args);
-}
-
-
-void OS::VPrint(const char* format, va_list args) {
-#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
-  __android_log_vprint(ANDROID_LOG_INFO, LOG_TAG, format, args);
-#else
-  vprintf(format, args);
-#endif
-}
-
-
-void OS::FPrint(FILE* out, const char* format, ...) {
-  va_list args;
-  va_start(args, format);
-  VFPrint(out, format, args);
-  va_end(args);
-}
-
-
-void OS::VFPrint(FILE* out, const char* format, va_list args) {
-#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
-  __android_log_vprint(ANDROID_LOG_INFO, LOG_TAG, format, args);
-#else
-  vfprintf(out, format, args);
-#endif
-}
-
-
-void OS::PrintError(const char* format, ...) {
-  va_list args;
-  va_start(args, format);
-  VPrintError(format, args);
-  va_end(args);
-}
-
-
-void OS::VPrintError(const char* format, va_list args) {
-#if defined(ANDROID) && !defined(V8_ANDROID_LOG_STDOUT)
-  __android_log_vprint(ANDROID_LOG_ERROR, LOG_TAG, format, args);
-#else
-  vfprintf(stderr, format, args);
-#endif
-}
-
-
-int OS::SNPrintF(char* str, int length, const char* format, ...) {
-  va_list args;
-  va_start(args, format);
-  int result = VSNPrintF(str, length, format, args);
-  va_end(args);
-  return result;
-}
-
-
-int OS::VSNPrintF(char* str,
-                  int length,
-                  const char* format,
-                  va_list args) {
-  int n = vsnprintf(str, length, format, args);
-  if (n < 0 || n >= length) {
-    // If the length is zero, the assignment fails.
-    if (length > 0)
-      str[length - 1] = '\0';
-    return -1;
-  } else {
-    return n;
-  }
-}
-
-
-// ----------------------------------------------------------------------------
-// POSIX string support.
-//
-
-char* OS::StrChr(char* str, int c) {
-  return strchr(str, c);
-}
-
-
-void OS::StrNCpy(char* dest, int length, const char* src, size_t n) {
-  strncpy(dest, src, n);
-}
-
-
-// ----------------------------------------------------------------------------
-// POSIX thread support.
-//
-
-class Thread::PlatformData : public Malloced {
- public:
-  PlatformData() : thread_(kNoThread) {}
-  pthread_t thread_;  // Thread handle for pthread.
-  // Synchronizes thread creation
-  Mutex thread_creation_mutex_;
-};
-
-Thread::Thread(const Options& options)
-    : data_(new PlatformData),
-      stack_size_(options.stack_size()),
-      start_semaphore_(NULL) {
-  if (stack_size_ > 0 && stack_size_ < PTHREAD_STACK_MIN) {
-    stack_size_ = PTHREAD_STACK_MIN;
-  }
-  set_name(options.name());
-}
-
-
-Thread::~Thread() {
-  delete data_;
-}
-
-
-static void SetThreadName(const char* name) {
-#if V8_OS_DRAGONFLYBSD || V8_OS_FREEBSD || V8_OS_OPENBSD
-  pthread_set_name_np(pthread_self(), name);
-#elif V8_OS_NETBSD
-  STATIC_ASSERT(Thread::kMaxThreadNameLength <= PTHREAD_MAX_NAMELEN_NP);
-  pthread_setname_np(pthread_self(), "%s", name);
-#elif V8_OS_MACOSX
-  // pthread_setname_np is only available in 10.6 or later, so test
-  // for it at runtime.
-  int (*dynamic_pthread_setname_np)(const char*);
-  *reinterpret_cast<void**>(&dynamic_pthread_setname_np) =
-    dlsym(RTLD_DEFAULT, "pthread_setname_np");
-  if (dynamic_pthread_setname_np == NULL)
-    return;
-
-  // Mac OS X does not expose the length limit of the name, so hardcode it.
-  static const int kMaxNameLength = 63;
-  STATIC_ASSERT(Thread::kMaxThreadNameLength <= kMaxNameLength);
-  dynamic_pthread_setname_np(name);
-#elif defined(PR_SET_NAME)
-  prctl(PR_SET_NAME,
-        reinterpret_cast<unsigned long>(name),  // NOLINT
-        0, 0, 0);
-#endif
-}
-
-
-static void* ThreadEntry(void* arg) {
-  Thread* thread = reinterpret_cast<Thread*>(arg);
-  // We take the lock here to make sure that pthread_create finished first since
-  // we don't know which thread will run first (the original thread or the new
-  // one).
-  { LockGuard<Mutex> lock_guard(&thread->data()->thread_creation_mutex_); }
-  SetThreadName(thread->name());
-  ASSERT(thread->data()->thread_ != kNoThread);
-  thread->NotifyStartedAndRun();
-  return NULL;
-}
-
-
-void Thread::set_name(const char* name) {
-  strncpy(name_, name, sizeof(name_));
-  name_[sizeof(name_) - 1] = '\0';
-}
-
-
-void Thread::Start() {
-  int result;
-  pthread_attr_t attr;
-  memset(&attr, 0, sizeof(attr));
-  result = pthread_attr_init(&attr);
-  ASSERT_EQ(0, result);
-  // Native client uses default stack size.
-#if !V8_OS_NACL
-  if (stack_size_ > 0) {
-    result = pthread_attr_setstacksize(&attr, static_cast<size_t>(stack_size_));
-    ASSERT_EQ(0, result);
-  }
-#endif
-  {
-    LockGuard<Mutex> lock_guard(&data_->thread_creation_mutex_);
-    result = pthread_create(&data_->thread_, &attr, ThreadEntry, this);
-  }
-  ASSERT_EQ(0, result);
-  result = pthread_attr_destroy(&attr);
-  ASSERT_EQ(0, result);
-  ASSERT(data_->thread_ != kNoThread);
-  USE(result);
-}
-
-
-void Thread::Join() {
-  pthread_join(data_->thread_, NULL);
-}
-
-
-void Thread::YieldCPU() {
-  int result = sched_yield();
-  ASSERT_EQ(0, result);
-  USE(result);
-}
-
-
-static Thread::LocalStorageKey PthreadKeyToLocalKey(pthread_key_t pthread_key) {
-#if V8_OS_CYGWIN
-  // We need to cast pthread_key_t to Thread::LocalStorageKey in two steps
-  // because pthread_key_t is a pointer type on Cygwin. This will probably not
-  // work on 64-bit platforms, but Cygwin doesn't support 64-bit anyway.
-  STATIC_ASSERT(sizeof(Thread::LocalStorageKey) == sizeof(pthread_key_t));
-  intptr_t ptr_key = reinterpret_cast<intptr_t>(pthread_key);
-  return static_cast<Thread::LocalStorageKey>(ptr_key);
-#else
-  return static_cast<Thread::LocalStorageKey>(pthread_key);
-#endif
-}
-
-
-static pthread_key_t LocalKeyToPthreadKey(Thread::LocalStorageKey local_key) {
-#if V8_OS_CYGWIN
-  STATIC_ASSERT(sizeof(Thread::LocalStorageKey) == sizeof(pthread_key_t));
-  intptr_t ptr_key = static_cast<intptr_t>(local_key);
-  return reinterpret_cast<pthread_key_t>(ptr_key);
-#else
-  return static_cast<pthread_key_t>(local_key);
-#endif
-}
-
-
-#ifdef V8_FAST_TLS_SUPPORTED
-
-static base::Atomic32 tls_base_offset_initialized = 0;
-intptr_t kMacTlsBaseOffset = 0;
-
-// It's safe to do the initialization more that once, but it has to be
-// done at least once.
-static void InitializeTlsBaseOffset() {
-  const size_t kBufferSize = 128;
-  char buffer[kBufferSize];
-  size_t buffer_size = kBufferSize;
-  int ctl_name[] = { CTL_KERN , KERN_OSRELEASE };
-  if (sysctl(ctl_name, 2, buffer, &buffer_size, NULL, 0) != 0) {
-    V8_Fatal(__FILE__, __LINE__, "V8 failed to get kernel version");
-  }
-  // The buffer now contains a string of the form XX.YY.ZZ, where
-  // XX is the major kernel version component.
-  // Make sure the buffer is 0-terminated.
-  buffer[kBufferSize - 1] = '\0';
-  char* period_pos = strchr(buffer, '.');
-  *period_pos = '\0';
-  int kernel_version_major =
-      static_cast<int>(strtol(buffer, NULL, 10));  // NOLINT
-  // The constants below are taken from pthreads.s from the XNU kernel
-  // sources archive at www.opensource.apple.com.
-  if (kernel_version_major < 11) {
-    // 8.x.x (Tiger), 9.x.x (Leopard), 10.x.x (Snow Leopard) have the
-    // same offsets.
-#if V8_HOST_ARCH_IA32
-    kMacTlsBaseOffset = 0x48;
-#else
-    kMacTlsBaseOffset = 0x60;
-#endif
-  } else {
-    // 11.x.x (Lion) changed the offset.
-    kMacTlsBaseOffset = 0;
-  }
-
-  base::Release_Store(&tls_base_offset_initialized, 1);
-}
-
-
-static void CheckFastTls(Thread::LocalStorageKey key) {
-  void* expected = reinterpret_cast<void*>(0x1234CAFE);
-  Thread::SetThreadLocal(key, expected);
-  void* actual = Thread::GetExistingThreadLocal(key);
-  if (expected != actual) {
-    V8_Fatal(__FILE__, __LINE__,
-             "V8 failed to initialize fast TLS on current kernel");
-  }
-  Thread::SetThreadLocal(key, NULL);
-}
-
-#endif  // V8_FAST_TLS_SUPPORTED
-
-
-Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
-#ifdef V8_FAST_TLS_SUPPORTED
-  bool check_fast_tls = false;
-  if (tls_base_offset_initialized == 0) {
-    check_fast_tls = true;
-    InitializeTlsBaseOffset();
-  }
-#endif
-  pthread_key_t key;
-  int result = pthread_key_create(&key, NULL);
-  ASSERT_EQ(0, result);
-  USE(result);
-  LocalStorageKey local_key = PthreadKeyToLocalKey(key);
-#ifdef V8_FAST_TLS_SUPPORTED
-  // If we just initialized fast TLS support, make sure it works.
-  if (check_fast_tls) CheckFastTls(local_key);
-#endif
-  return local_key;
-}
-
-
-void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
-  pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
-  int result = pthread_key_delete(pthread_key);
-  ASSERT_EQ(0, result);
-  USE(result);
-}
-
-
-void* Thread::GetThreadLocal(LocalStorageKey key) {
-  pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
-  return pthread_getspecific(pthread_key);
-}
-
-
-void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
-  pthread_key_t pthread_key = LocalKeyToPthreadKey(key);
-  int result = pthread_setspecific(pthread_key, value);
-  ASSERT_EQ(0, result);
-  USE(result);
-}
-
-
-} }  // namespace v8::internal
diff --git a/src/platform-qnx.cc b/src/platform-qnx.cc
deleted file mode 100644
index 3c95650..0000000
--- a/src/platform-qnx.cc
+++ /dev/null
@@ -1,373 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Platform-specific code for QNX goes here. For the POSIX-compatible
-// parts the implementation is in platform-posix.cc.
-
-#include <pthread.h>
-#include <semaphore.h>
-#include <signal.h>
-#include <sys/time.h>
-#include <sys/resource.h>
-#include <sys/types.h>
-#include <stdlib.h>
-#include <ucontext.h>
-#include <backtrace.h>
-
-// QNX requires memory pages to be marked as executable.
-// Otherwise, the OS raises an exception when executing code in that page.
-#include <sys/types.h>  // mmap & munmap
-#include <sys/mman.h>   // mmap & munmap
-#include <sys/stat.h>   // open
-#include <fcntl.h>      // open
-#include <unistd.h>     // sysconf
-#include <strings.h>    // index
-#include <errno.h>
-#include <stdarg.h>
-#include <sys/procfs.h>
-
-#undef MAP_TYPE
-
-#include "src/v8.h"
-
-#include "src/platform.h"
-
-
-namespace v8 {
-namespace internal {
-
-// 0 is never a valid thread id on Qnx since tids and pids share a
-// name space and pid 0 is reserved (see man 2 kill).
-static const pthread_t kNoThread = (pthread_t) 0;
-
-
-#ifdef __arm__
-
-bool OS::ArmUsingHardFloat() {
-  // GCC versions 4.6 and above define __ARM_PCS or __ARM_PCS_VFP to specify
-  // the Floating Point ABI used (PCS stands for Procedure Call Standard).
-  // We use these as well as a couple of other defines to statically determine
-  // what FP ABI used.
-  // GCC versions 4.4 and below don't support hard-fp.
-  // GCC versions 4.5 may support hard-fp without defining __ARM_PCS or
-  // __ARM_PCS_VFP.
-
-#define GCC_VERSION (__GNUC__ * 10000                                          \
-                     + __GNUC_MINOR__ * 100                                    \
-                     + __GNUC_PATCHLEVEL__)
-#if GCC_VERSION >= 40600
-#if defined(__ARM_PCS_VFP)
-  return true;
-#else
-  return false;
-#endif
-
-#elif GCC_VERSION < 40500
-  return false;
-
-#else
-#if defined(__ARM_PCS_VFP)
-  return true;
-#elif defined(__ARM_PCS) || defined(__SOFTFP__) || defined(__SOFTFP) || \
-      !defined(__VFP_FP__)
-  return false;
-#else
-#error "Your version of GCC does not report the FP ABI compiled for."          \
-       "Please report it on this issue"                                        \
-       "http://code.google.com/p/v8/issues/detail?id=2140"
-
-#endif
-#endif
-#undef GCC_VERSION
-}
-
-#endif  // __arm__
-
-
-const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
-  if (std::isnan(time)) return "";
-  time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
-  struct tm* t = localtime(&tv);
-  if (NULL == t) return "";
-  return t->tm_zone;
-}
-
-
-double OS::LocalTimeOffset(TimezoneCache* cache) {
-  time_t tv = time(NULL);
-  struct tm* t = localtime(&tv);
-  // tm_gmtoff includes any daylight savings offset, so subtract it.
-  return static_cast<double>(t->tm_gmtoff * msPerSecond -
-                             (t->tm_isdst > 0 ? 3600 * msPerSecond : 0));
-}
-
-
-void* OS::Allocate(const size_t requested,
-                   size_t* allocated,
-                   bool is_executable) {
-  const size_t msize = RoundUp(requested, AllocateAlignment());
-  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-  void* addr = OS::GetRandomMmapAddr();
-  void* mbase = mmap(addr, msize, prot, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
-  if (mbase == MAP_FAILED) return NULL;
-  *allocated = msize;
-  return mbase;
-}
-
-
-class PosixMemoryMappedFile : public OS::MemoryMappedFile {
- public:
-  PosixMemoryMappedFile(FILE* file, void* memory, int size)
-    : file_(file), memory_(memory), size_(size) { }
-  virtual ~PosixMemoryMappedFile();
-  virtual void* memory() { return memory_; }
-  virtual int size() { return size_; }
- private:
-  FILE* file_;
-  void* memory_;
-  int size_;
-};
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
-  FILE* file = fopen(name, "r+");
-  if (file == NULL) return NULL;
-
-  fseek(file, 0, SEEK_END);
-  int size = ftell(file);
-
-  void* memory =
-      mmap(OS::GetRandomMmapAddr(),
-           size,
-           PROT_READ | PROT_WRITE,
-           MAP_SHARED,
-           fileno(file),
-           0);
-  return new PosixMemoryMappedFile(file, memory, size);
-}
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
-    void* initial) {
-  FILE* file = fopen(name, "w+");
-  if (file == NULL) return NULL;
-  int result = fwrite(initial, size, 1, file);
-  if (result < 1) {
-    fclose(file);
-    return NULL;
-  }
-  void* memory =
-      mmap(OS::GetRandomMmapAddr(),
-           size,
-           PROT_READ | PROT_WRITE,
-           MAP_SHARED,
-           fileno(file),
-           0);
-  return new PosixMemoryMappedFile(file, memory, size);
-}
-
-
-PosixMemoryMappedFile::~PosixMemoryMappedFile() {
-  if (memory_) OS::Free(memory_, size_);
-  fclose(file_);
-}
-
-
-std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
-  std::vector<SharedLibraryAddress> result;
-  procfs_mapinfo *mapinfos = NULL, *mapinfo;
-  int proc_fd, num, i;
-
-  struct {
-    procfs_debuginfo info;
-    char buff[PATH_MAX];
-  } map;
-
-  char buf[PATH_MAX + 1];
-  snprintf(buf, PATH_MAX + 1, "/proc/%d/as", getpid());
-
-  if ((proc_fd = open(buf, O_RDONLY)) == -1) {
-    close(proc_fd);
-    return result;
-  }
-
-  /* Get the number of map entries.  */
-  if (devctl(proc_fd, DCMD_PROC_MAPINFO, NULL, 0, &num) != EOK) {
-    close(proc_fd);
-    return result;
-  }
-
-  mapinfos = reinterpret_cast<procfs_mapinfo *>(
-      malloc(num * sizeof(procfs_mapinfo)));
-  if (mapinfos == NULL) {
-    close(proc_fd);
-    return result;
-  }
-
-  /* Fill the map entries.  */
-  if (devctl(proc_fd, DCMD_PROC_PAGEDATA,
-      mapinfos, num * sizeof(procfs_mapinfo), &num) != EOK) {
-    free(mapinfos);
-    close(proc_fd);
-    return result;
-  }
-
-  for (i = 0; i < num; i++) {
-    mapinfo = mapinfos + i;
-    if (mapinfo->flags & MAP_ELF) {
-      map.info.vaddr = mapinfo->vaddr;
-      if (devctl(proc_fd, DCMD_PROC_MAPDEBUG, &map, sizeof(map), 0) != EOK) {
-        continue;
-      }
-      result.push_back(SharedLibraryAddress(
-          map.info.path, mapinfo->vaddr, mapinfo->vaddr + mapinfo->size));
-    }
-  }
-  free(mapinfos);
-  close(proc_fd);
-  return result;
-}
-
-
-void OS::SignalCodeMovingGC() {
-}
-
-
-// Constants used for mmap.
-static const int kMmapFd = -1;
-static const int kMmapFdOffset = 0;
-
-
-VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
-
-
-VirtualMemory::VirtualMemory(size_t size)
-    : address_(ReserveRegion(size)), size_(size) { }
-
-
-VirtualMemory::VirtualMemory(size_t size, size_t alignment)
-    : address_(NULL), size_(0) {
-  ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
-  size_t request_size = RoundUp(size + alignment,
-                                static_cast<intptr_t>(OS::AllocateAlignment()));
-  void* reservation = mmap(OS::GetRandomMmapAddr(),
-                           request_size,
-                           PROT_NONE,
-                           MAP_PRIVATE | MAP_ANONYMOUS | MAP_LAZY,
-                           kMmapFd,
-                           kMmapFdOffset);
-  if (reservation == MAP_FAILED) return;
-
-  Address base = static_cast<Address>(reservation);
-  Address aligned_base = RoundUp(base, alignment);
-  ASSERT_LE(base, aligned_base);
-
-  // Unmap extra memory reserved before and after the desired block.
-  if (aligned_base != base) {
-    size_t prefix_size = static_cast<size_t>(aligned_base - base);
-    OS::Free(base, prefix_size);
-    request_size -= prefix_size;
-  }
-
-  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
-  ASSERT_LE(aligned_size, request_size);
-
-  if (aligned_size != request_size) {
-    size_t suffix_size = request_size - aligned_size;
-    OS::Free(aligned_base + aligned_size, suffix_size);
-    request_size -= suffix_size;
-  }
-
-  ASSERT(aligned_size == request_size);
-
-  address_ = static_cast<void*>(aligned_base);
-  size_ = aligned_size;
-}
-
-
-VirtualMemory::~VirtualMemory() {
-  if (IsReserved()) {
-    bool result = ReleaseRegion(address(), size());
-    ASSERT(result);
-    USE(result);
-  }
-}
-
-
-bool VirtualMemory::IsReserved() {
-  return address_ != NULL;
-}
-
-
-void VirtualMemory::Reset() {
-  address_ = NULL;
-  size_ = 0;
-}
-
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
-  return CommitRegion(address, size, is_executable);
-}
-
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
-  return UncommitRegion(address, size);
-}
-
-
-bool VirtualMemory::Guard(void* address) {
-  OS::Guard(address, OS::CommitPageSize());
-  return true;
-}
-
-
-void* VirtualMemory::ReserveRegion(size_t size) {
-  void* result = mmap(OS::GetRandomMmapAddr(),
-                      size,
-                      PROT_NONE,
-                      MAP_PRIVATE | MAP_ANONYMOUS | MAP_LAZY,
-                      kMmapFd,
-                      kMmapFdOffset);
-
-  if (result == MAP_FAILED) return NULL;
-
-  return result;
-}
-
-
-bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
-  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-  if (MAP_FAILED == mmap(base,
-                         size,
-                         prot,
-                         MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
-                         kMmapFd,
-                         kMmapFdOffset)) {
-    return false;
-  }
-
-  return true;
-}
-
-
-bool VirtualMemory::UncommitRegion(void* base, size_t size) {
-  return mmap(base,
-              size,
-              PROT_NONE,
-              MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED | MAP_LAZY,
-              kMmapFd,
-              kMmapFdOffset) != MAP_FAILED;
-}
-
-
-bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
-  return munmap(base, size) == 0;
-}
-
-
-bool VirtualMemory::HasLazyCommits() {
-  return false;
-}
-
-} }  // namespace v8::internal
diff --git a/src/platform-solaris.cc b/src/platform-solaris.cc
deleted file mode 100644
index fc8cb72..0000000
--- a/src/platform-solaris.cc
+++ /dev/null
@@ -1,279 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Platform-specific code for Solaris 10 goes here. For the POSIX-compatible
-// parts, the implementation is in platform-posix.cc.
-
-#ifdef __sparc
-# error "V8 does not support the SPARC CPU architecture."
-#endif
-
-#include <sys/stack.h>  // for stack alignment
-#include <unistd.h>  // getpagesize(), usleep()
-#include <sys/mman.h>  // mmap()
-#include <ucontext.h>  // walkstack(), getcontext()
-#include <dlfcn.h>     // dladdr
-#include <pthread.h>
-#include <semaphore.h>
-#include <time.h>
-#include <sys/time.h>  // gettimeofday(), timeradd()
-#include <errno.h>
-#include <ieeefp.h>  // finite()
-#include <signal.h>  // sigemptyset(), etc
-#include <sys/regset.h>
-
-
-#undef MAP_TYPE
-
-#include "src/v8.h"
-
-#include "src/platform.h"
-
-
-// It seems there is a bug in some Solaris distributions (experienced in
-// SunOS 5.10 Generic_141445-09) which make it difficult or impossible to
-// access signbit() despite the availability of other C99 math functions.
-#ifndef signbit
-namespace std {
-// Test sign - usually defined in math.h
-int signbit(double x) {
-  // We need to take care of the special case of both positive and negative
-  // versions of zero.
-  if (x == 0) {
-    return fpclass(x) & FP_NZERO;
-  } else {
-    // This won't detect negative NaN but that should be okay since we don't
-    // assume that behavior.
-    return x < 0;
-  }
-}
-}  // namespace std
-#endif  // signbit
-
-namespace v8 {
-namespace internal {
-
-
-const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
-  if (std::isnan(time)) return "";
-  time_t tv = static_cast<time_t>(std::floor(time/msPerSecond));
-  struct tm* t = localtime(&tv);
-  if (NULL == t) return "";
-  return tzname[0];  // The location of the timezone string on Solaris.
-}
-
-
-double OS::LocalTimeOffset(TimezoneCache* cache) {
-  tzset();
-  return -static_cast<double>(timezone * msPerSecond);
-}
-
-
-void* OS::Allocate(const size_t requested,
-                   size_t* allocated,
-                   bool is_executable) {
-  const size_t msize = RoundUp(requested, getpagesize());
-  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-  void* mbase = mmap(NULL, msize, prot, MAP_PRIVATE | MAP_ANON, -1, 0);
-
-  if (mbase == MAP_FAILED) return NULL;
-  *allocated = msize;
-  return mbase;
-}
-
-
-class PosixMemoryMappedFile : public OS::MemoryMappedFile {
- public:
-  PosixMemoryMappedFile(FILE* file, void* memory, int size)
-    : file_(file), memory_(memory), size_(size) { }
-  virtual ~PosixMemoryMappedFile();
-  virtual void* memory() { return memory_; }
-  virtual int size() { return size_; }
- private:
-  FILE* file_;
-  void* memory_;
-  int size_;
-};
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
-  FILE* file = fopen(name, "r+");
-  if (file == NULL) return NULL;
-
-  fseek(file, 0, SEEK_END);
-  int size = ftell(file);
-
-  void* memory =
-      mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
-  return new PosixMemoryMappedFile(file, memory, size);
-}
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
-    void* initial) {
-  FILE* file = fopen(name, "w+");
-  if (file == NULL) return NULL;
-  int result = fwrite(initial, size, 1, file);
-  if (result < 1) {
-    fclose(file);
-    return NULL;
-  }
-  void* memory =
-      mmap(0, size, PROT_READ | PROT_WRITE, MAP_SHARED, fileno(file), 0);
-  return new PosixMemoryMappedFile(file, memory, size);
-}
-
-
-PosixMemoryMappedFile::~PosixMemoryMappedFile() {
-  if (memory_) munmap(memory_, size_);
-  fclose(file_);
-}
-
-
-std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
-  return std::vector<SharedLibraryAddress>();
-}
-
-
-void OS::SignalCodeMovingGC() {
-}
-
-
-// Constants used for mmap.
-static const int kMmapFd = -1;
-static const int kMmapFdOffset = 0;
-
-
-VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
-
-
-VirtualMemory::VirtualMemory(size_t size)
-    : address_(ReserveRegion(size)), size_(size) { }
-
-
-VirtualMemory::VirtualMemory(size_t size, size_t alignment)
-    : address_(NULL), size_(0) {
-  ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
-  size_t request_size = RoundUp(size + alignment,
-                                static_cast<intptr_t>(OS::AllocateAlignment()));
-  void* reservation = mmap(OS::GetRandomMmapAddr(),
-                           request_size,
-                           PROT_NONE,
-                           MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
-                           kMmapFd,
-                           kMmapFdOffset);
-  if (reservation == MAP_FAILED) return;
-
-  Address base = static_cast<Address>(reservation);
-  Address aligned_base = RoundUp(base, alignment);
-  ASSERT_LE(base, aligned_base);
-
-  // Unmap extra memory reserved before and after the desired block.
-  if (aligned_base != base) {
-    size_t prefix_size = static_cast<size_t>(aligned_base - base);
-    OS::Free(base, prefix_size);
-    request_size -= prefix_size;
-  }
-
-  size_t aligned_size = RoundUp(size, OS::AllocateAlignment());
-  ASSERT_LE(aligned_size, request_size);
-
-  if (aligned_size != request_size) {
-    size_t suffix_size = request_size - aligned_size;
-    OS::Free(aligned_base + aligned_size, suffix_size);
-    request_size -= suffix_size;
-  }
-
-  ASSERT(aligned_size == request_size);
-
-  address_ = static_cast<void*>(aligned_base);
-  size_ = aligned_size;
-}
-
-
-VirtualMemory::~VirtualMemory() {
-  if (IsReserved()) {
-    bool result = ReleaseRegion(address(), size());
-    ASSERT(result);
-    USE(result);
-  }
-}
-
-
-bool VirtualMemory::IsReserved() {
-  return address_ != NULL;
-}
-
-
-void VirtualMemory::Reset() {
-  address_ = NULL;
-  size_ = 0;
-}
-
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
-  return CommitRegion(address, size, is_executable);
-}
-
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
-  return UncommitRegion(address, size);
-}
-
-
-bool VirtualMemory::Guard(void* address) {
-  OS::Guard(address, OS::CommitPageSize());
-  return true;
-}
-
-
-void* VirtualMemory::ReserveRegion(size_t size) {
-  void* result = mmap(OS::GetRandomMmapAddr(),
-                      size,
-                      PROT_NONE,
-                      MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE,
-                      kMmapFd,
-                      kMmapFdOffset);
-
-  if (result == MAP_FAILED) return NULL;
-
-  return result;
-}
-
-
-bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
-  int prot = PROT_READ | PROT_WRITE | (is_executable ? PROT_EXEC : 0);
-  if (MAP_FAILED == mmap(base,
-                         size,
-                         prot,
-                         MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED,
-                         kMmapFd,
-                         kMmapFdOffset)) {
-    return false;
-  }
-  return true;
-}
-
-
-bool VirtualMemory::UncommitRegion(void* base, size_t size) {
-  return mmap(base,
-              size,
-              PROT_NONE,
-              MAP_PRIVATE | MAP_ANONYMOUS | MAP_NORESERVE | MAP_FIXED,
-              kMmapFd,
-              kMmapFdOffset) != MAP_FAILED;
-}
-
-
-bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
-  return munmap(base, size) == 0;
-}
-
-
-bool VirtualMemory::HasLazyCommits() {
-  // TODO(alph): implement for the platform.
-  return false;
-}
-
-} }  // namespace v8::internal
diff --git a/src/platform-win32.cc b/src/platform-win32.cc
deleted file mode 100644
index b1e6478..0000000
--- a/src/platform-win32.cc
+++ /dev/null
@@ -1,1410 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Platform-specific code for Win32.
-
-// Secure API functions are not available using MinGW with msvcrt.dll
-// on Windows XP. Make sure MINGW_HAS_SECURE_API is not defined to
-// disable definition of secure API functions in standard headers that
-// would conflict with our own implementation.
-#ifdef __MINGW32__
-#include <_mingw.h>
-#ifdef MINGW_HAS_SECURE_API
-#undef MINGW_HAS_SECURE_API
-#endif  // MINGW_HAS_SECURE_API
-#endif  // __MINGW32__
-
-#include "src/base/win32-headers.h"
-
-#include "src/v8.h"
-
-#include "src/isolate-inl.h"
-#include "src/platform.h"
-
-#ifdef _MSC_VER
-
-// Case-insensitive bounded string comparisons. Use stricmp() on Win32. Usually
-// defined in strings.h.
-int strncasecmp(const char* s1, const char* s2, int n) {
-  return _strnicmp(s1, s2, n);
-}
-
-#endif  // _MSC_VER
-
-
-// Extra functions for MinGW. Most of these are the _s functions which are in
-// the Microsoft Visual Studio C++ CRT.
-#ifdef __MINGW32__
-
-
-#ifndef __MINGW64_VERSION_MAJOR
-
-#define _TRUNCATE 0
-#define STRUNCATE 80
-
-inline void MemoryBarrier() {
-  int barrier = 0;
-  __asm__ __volatile__("xchgl %%eax,%0 ":"=r" (barrier));
-}
-
-#endif  // __MINGW64_VERSION_MAJOR
-
-
-int localtime_s(tm* out_tm, const time_t* time) {
-  tm* posix_local_time_struct = localtime(time);
-  if (posix_local_time_struct == NULL) return 1;
-  *out_tm = *posix_local_time_struct;
-  return 0;
-}
-
-
-int fopen_s(FILE** pFile, const char* filename, const char* mode) {
-  *pFile = fopen(filename, mode);
-  return *pFile != NULL ? 0 : 1;
-}
-
-int _vsnprintf_s(char* buffer, size_t sizeOfBuffer, size_t count,
-                 const char* format, va_list argptr) {
-  ASSERT(count == _TRUNCATE);
-  return _vsnprintf(buffer, sizeOfBuffer, format, argptr);
-}
-
-
-int strncpy_s(char* dest, size_t dest_size, const char* source, size_t count) {
-  CHECK(source != NULL);
-  CHECK(dest != NULL);
-  CHECK_GT(dest_size, 0);
-
-  if (count == _TRUNCATE) {
-    while (dest_size > 0 && *source != 0) {
-      *(dest++) = *(source++);
-      --dest_size;
-    }
-    if (dest_size == 0) {
-      *(dest - 1) = 0;
-      return STRUNCATE;
-    }
-  } else {
-    while (dest_size > 0 && count > 0 && *source != 0) {
-      *(dest++) = *(source++);
-      --dest_size;
-      --count;
-    }
-  }
-  CHECK_GT(dest_size, 0);
-  *dest = 0;
-  return 0;
-}
-
-#endif  // __MINGW32__
-
-namespace v8 {
-namespace internal {
-
-intptr_t OS::MaxVirtualMemory() {
-  return 0;
-}
-
-
-class TimezoneCache {
- public:
-  TimezoneCache() : initialized_(false) { }
-
-  void Clear() {
-    initialized_ = false;
-  }
-
-  // Initialize timezone information. The timezone information is obtained from
-  // windows. If we cannot get the timezone information we fall back to CET.
-  void InitializeIfNeeded() {
-    // Just return if timezone information has already been initialized.
-    if (initialized_) return;
-
-    // Initialize POSIX time zone data.
-    _tzset();
-    // Obtain timezone information from operating system.
-    memset(&tzinfo_, 0, sizeof(tzinfo_));
-    if (GetTimeZoneInformation(&tzinfo_) == TIME_ZONE_ID_INVALID) {
-      // If we cannot get timezone information we fall back to CET.
-      tzinfo_.Bias = -60;
-      tzinfo_.StandardDate.wMonth = 10;
-      tzinfo_.StandardDate.wDay = 5;
-      tzinfo_.StandardDate.wHour = 3;
-      tzinfo_.StandardBias = 0;
-      tzinfo_.DaylightDate.wMonth = 3;
-      tzinfo_.DaylightDate.wDay = 5;
-      tzinfo_.DaylightDate.wHour = 2;
-      tzinfo_.DaylightBias = -60;
-    }
-
-    // Make standard and DST timezone names.
-    WideCharToMultiByte(CP_UTF8, 0, tzinfo_.StandardName, -1,
-                        std_tz_name_, kTzNameSize, NULL, NULL);
-    std_tz_name_[kTzNameSize - 1] = '\0';
-    WideCharToMultiByte(CP_UTF8, 0, tzinfo_.DaylightName, -1,
-                        dst_tz_name_, kTzNameSize, NULL, NULL);
-    dst_tz_name_[kTzNameSize - 1] = '\0';
-
-    // If OS returned empty string or resource id (like "@tzres.dll,-211")
-    // simply guess the name from the UTC bias of the timezone.
-    // To properly resolve the resource identifier requires a library load,
-    // which is not possible in a sandbox.
-    if (std_tz_name_[0] == '\0' || std_tz_name_[0] == '@') {
-      OS::SNPrintF(std_tz_name_, kTzNameSize - 1,
-                   "%s Standard Time",
-                   GuessTimezoneNameFromBias(tzinfo_.Bias));
-    }
-    if (dst_tz_name_[0] == '\0' || dst_tz_name_[0] == '@') {
-      OS::SNPrintF(dst_tz_name_, kTzNameSize - 1,
-                   "%s Daylight Time",
-                   GuessTimezoneNameFromBias(tzinfo_.Bias));
-    }
-    // Timezone information initialized.
-    initialized_ = true;
-  }
-
-  // Guess the name of the timezone from the bias.
-  // The guess is very biased towards the northern hemisphere.
-  const char* GuessTimezoneNameFromBias(int bias) {
-    static const int kHour = 60;
-    switch (-bias) {
-      case -9*kHour: return "Alaska";
-      case -8*kHour: return "Pacific";
-      case -7*kHour: return "Mountain";
-      case -6*kHour: return "Central";
-      case -5*kHour: return "Eastern";
-      case -4*kHour: return "Atlantic";
-      case  0*kHour: return "GMT";
-      case +1*kHour: return "Central Europe";
-      case +2*kHour: return "Eastern Europe";
-      case +3*kHour: return "Russia";
-      case +5*kHour + 30: return "India";
-      case +8*kHour: return "China";
-      case +9*kHour: return "Japan";
-      case +12*kHour: return "New Zealand";
-      default: return "Local";
-    }
-  }
-
-
- private:
-  static const int kTzNameSize = 128;
-  bool initialized_;
-  char std_tz_name_[kTzNameSize];
-  char dst_tz_name_[kTzNameSize];
-  TIME_ZONE_INFORMATION tzinfo_;
-  friend class Win32Time;
-};
-
-
-// ----------------------------------------------------------------------------
-// The Time class represents time on win32. A timestamp is represented as
-// a 64-bit integer in 100 nanoseconds since January 1, 1601 (UTC). JavaScript
-// timestamps are represented as a doubles in milliseconds since 00:00:00 UTC,
-// January 1, 1970.
-
-class Win32Time {
- public:
-  // Constructors.
-  Win32Time();
-  explicit Win32Time(double jstime);
-  Win32Time(int year, int mon, int day, int hour, int min, int sec);
-
-  // Convert timestamp to JavaScript representation.
-  double ToJSTime();
-
-  // Set timestamp to current time.
-  void SetToCurrentTime();
-
-  // Returns the local timezone offset in milliseconds east of UTC. This is
-  // the number of milliseconds you must add to UTC to get local time, i.e.
-  // LocalOffset(CET) = 3600000 and LocalOffset(PST) = -28800000. This
-  // routine also takes into account whether daylight saving is effect
-  // at the time.
-  int64_t LocalOffset(TimezoneCache* cache);
-
-  // Returns the daylight savings time offset for the time in milliseconds.
-  int64_t DaylightSavingsOffset(TimezoneCache* cache);
-
-  // Returns a string identifying the current timezone for the
-  // timestamp taking into account daylight saving.
-  char* LocalTimezone(TimezoneCache* cache);
-
- private:
-  // Constants for time conversion.
-  static const int64_t kTimeEpoc = 116444736000000000LL;
-  static const int64_t kTimeScaler = 10000;
-  static const int64_t kMsPerMinute = 60000;
-
-  // Constants for timezone information.
-  static const bool kShortTzNames = false;
-
-  // Return whether or not daylight savings time is in effect at this time.
-  bool InDST(TimezoneCache* cache);
-
-  // Accessor for FILETIME representation.
-  FILETIME& ft() { return time_.ft_; }
-
-  // Accessor for integer representation.
-  int64_t& t() { return time_.t_; }
-
-  // Although win32 uses 64-bit integers for representing timestamps,
-  // these are packed into a FILETIME structure. The FILETIME structure
-  // is just a struct representing a 64-bit integer. The TimeStamp union
-  // allows access to both a FILETIME and an integer representation of
-  // the timestamp.
-  union TimeStamp {
-    FILETIME ft_;
-    int64_t t_;
-  };
-
-  TimeStamp time_;
-};
-
-
-// Initialize timestamp to start of epoc.
-Win32Time::Win32Time() {
-  t() = 0;
-}
-
-
-// Initialize timestamp from a JavaScript timestamp.
-Win32Time::Win32Time(double jstime) {
-  t() = static_cast<int64_t>(jstime) * kTimeScaler + kTimeEpoc;
-}
-
-
-// Initialize timestamp from date/time components.
-Win32Time::Win32Time(int year, int mon, int day, int hour, int min, int sec) {
-  SYSTEMTIME st;
-  st.wYear = year;
-  st.wMonth = mon;
-  st.wDay = day;
-  st.wHour = hour;
-  st.wMinute = min;
-  st.wSecond = sec;
-  st.wMilliseconds = 0;
-  SystemTimeToFileTime(&st, &ft());
-}
-
-
-// Convert timestamp to JavaScript timestamp.
-double Win32Time::ToJSTime() {
-  return static_cast<double>((t() - kTimeEpoc) / kTimeScaler);
-}
-
-
-// Set timestamp to current time.
-void Win32Time::SetToCurrentTime() {
-  // The default GetSystemTimeAsFileTime has a ~15.5ms resolution.
-  // Because we're fast, we like fast timers which have at least a
-  // 1ms resolution.
-  //
-  // timeGetTime() provides 1ms granularity when combined with
-  // timeBeginPeriod().  If the host application for v8 wants fast
-  // timers, it can use timeBeginPeriod to increase the resolution.
-  //
-  // Using timeGetTime() has a drawback because it is a 32bit value
-  // and hence rolls-over every ~49days.
-  //
-  // To use the clock, we use GetSystemTimeAsFileTime as our base;
-  // and then use timeGetTime to extrapolate current time from the
-  // start time.  To deal with rollovers, we resync the clock
-  // any time when more than kMaxClockElapsedTime has passed or
-  // whenever timeGetTime creates a rollover.
-
-  static bool initialized = false;
-  static TimeStamp init_time;
-  static DWORD init_ticks;
-  static const int64_t kHundredNanosecondsPerSecond = 10000000;
-  static const int64_t kMaxClockElapsedTime =
-      60*kHundredNanosecondsPerSecond;  // 1 minute
-
-  // If we are uninitialized, we need to resync the clock.
-  bool needs_resync = !initialized;
-
-  // Get the current time.
-  TimeStamp time_now;
-  GetSystemTimeAsFileTime(&time_now.ft_);
-  DWORD ticks_now = timeGetTime();
-
-  // Check if we need to resync due to clock rollover.
-  needs_resync |= ticks_now < init_ticks;
-
-  // Check if we need to resync due to elapsed time.
-  needs_resync |= (time_now.t_ - init_time.t_) > kMaxClockElapsedTime;
-
-  // Check if we need to resync due to backwards time change.
-  needs_resync |= time_now.t_ < init_time.t_;
-
-  // Resync the clock if necessary.
-  if (needs_resync) {
-    GetSystemTimeAsFileTime(&init_time.ft_);
-    init_ticks = ticks_now = timeGetTime();
-    initialized = true;
-  }
-
-  // Finally, compute the actual time.  Why is this so hard.
-  DWORD elapsed = ticks_now - init_ticks;
-  this->time_.t_ = init_time.t_ + (static_cast<int64_t>(elapsed) * 10000);
-}
-
-
-// Return the local timezone offset in milliseconds east of UTC. This
-// takes into account whether daylight saving is in effect at the time.
-// Only times in the 32-bit Unix range may be passed to this function.
-// Also, adding the time-zone offset to the input must not overflow.
-// The function EquivalentTime() in date.js guarantees this.
-int64_t Win32Time::LocalOffset(TimezoneCache* cache) {
-  cache->InitializeIfNeeded();
-
-  Win32Time rounded_to_second(*this);
-  rounded_to_second.t() = rounded_to_second.t() / 1000 / kTimeScaler *
-      1000 * kTimeScaler;
-  // Convert to local time using POSIX localtime function.
-  // Windows XP Service Pack 3 made SystemTimeToTzSpecificLocalTime()
-  // very slow.  Other browsers use localtime().
-
-  // Convert from JavaScript milliseconds past 1/1/1970 0:00:00 to
-  // POSIX seconds past 1/1/1970 0:00:00.
-  double unchecked_posix_time = rounded_to_second.ToJSTime() / 1000;
-  if (unchecked_posix_time > INT_MAX || unchecked_posix_time < 0) {
-    return 0;
-  }
-  // Because _USE_32BIT_TIME_T is defined, time_t is a 32-bit int.
-  time_t posix_time = static_cast<time_t>(unchecked_posix_time);
-
-  // Convert to local time, as struct with fields for day, hour, year, etc.
-  tm posix_local_time_struct;
-  if (localtime_s(&posix_local_time_struct, &posix_time)) return 0;
-
-  if (posix_local_time_struct.tm_isdst > 0) {
-    return (cache->tzinfo_.Bias + cache->tzinfo_.DaylightBias) * -kMsPerMinute;
-  } else if (posix_local_time_struct.tm_isdst == 0) {
-    return (cache->tzinfo_.Bias + cache->tzinfo_.StandardBias) * -kMsPerMinute;
-  } else {
-    return cache->tzinfo_.Bias * -kMsPerMinute;
-  }
-}
-
-
-// Return whether or not daylight savings time is in effect at this time.
-bool Win32Time::InDST(TimezoneCache* cache) {
-  cache->InitializeIfNeeded();
-
-  // Determine if DST is in effect at the specified time.
-  bool in_dst = false;
-  if (cache->tzinfo_.StandardDate.wMonth != 0 ||
-      cache->tzinfo_.DaylightDate.wMonth != 0) {
-    // Get the local timezone offset for the timestamp in milliseconds.
-    int64_t offset = LocalOffset(cache);
-
-    // Compute the offset for DST. The bias parameters in the timezone info
-    // are specified in minutes. These must be converted to milliseconds.
-    int64_t dstofs =
-        -(cache->tzinfo_.Bias + cache->tzinfo_.DaylightBias) * kMsPerMinute;
-
-    // If the local time offset equals the timezone bias plus the daylight
-    // bias then DST is in effect.
-    in_dst = offset == dstofs;
-  }
-
-  return in_dst;
-}
-
-
-// Return the daylight savings time offset for this time.
-int64_t Win32Time::DaylightSavingsOffset(TimezoneCache* cache) {
-  return InDST(cache) ? 60 * kMsPerMinute : 0;
-}
-
-
-// Returns a string identifying the current timezone for the
-// timestamp taking into account daylight saving.
-char* Win32Time::LocalTimezone(TimezoneCache* cache) {
-  // Return the standard or DST time zone name based on whether daylight
-  // saving is in effect at the given time.
-  return InDST(cache) ? cache->dst_tz_name_ : cache->std_tz_name_;
-}
-
-
-// Returns the accumulated user time for thread.
-int OS::GetUserTime(uint32_t* secs,  uint32_t* usecs) {
-  FILETIME dummy;
-  uint64_t usertime;
-
-  // Get the amount of time that the thread has executed in user mode.
-  if (!GetThreadTimes(GetCurrentThread(), &dummy, &dummy, &dummy,
-                      reinterpret_cast<FILETIME*>(&usertime))) return -1;
-
-  // Adjust the resolution to micro-seconds.
-  usertime /= 10;
-
-  // Convert to seconds and microseconds
-  *secs = static_cast<uint32_t>(usertime / 1000000);
-  *usecs = static_cast<uint32_t>(usertime % 1000000);
-  return 0;
-}
-
-
-// Returns current time as the number of milliseconds since
-// 00:00:00 UTC, January 1, 1970.
-double OS::TimeCurrentMillis() {
-  return Time::Now().ToJsTime();
-}
-
-
-TimezoneCache* OS::CreateTimezoneCache() {
-  return new TimezoneCache();
-}
-
-
-void OS::DisposeTimezoneCache(TimezoneCache* cache) {
-  delete cache;
-}
-
-
-void OS::ClearTimezoneCache(TimezoneCache* cache) {
-  cache->Clear();
-}
-
-
-// Returns a string identifying the current timezone taking into
-// account daylight saving.
-const char* OS::LocalTimezone(double time, TimezoneCache* cache) {
-  return Win32Time(time).LocalTimezone(cache);
-}
-
-
-// Returns the local time offset in milliseconds east of UTC without
-// taking daylight savings time into account.
-double OS::LocalTimeOffset(TimezoneCache* cache) {
-  // Use current time, rounded to the millisecond.
-  Win32Time t(TimeCurrentMillis());
-  // Time::LocalOffset inlcudes any daylight savings offset, so subtract it.
-  return static_cast<double>(t.LocalOffset(cache) -
-                             t.DaylightSavingsOffset(cache));
-}
-
-
-// Returns the daylight savings offset in milliseconds for the given
-// time.
-double OS::DaylightSavingsOffset(double time, TimezoneCache* cache) {
-  int64_t offset = Win32Time(time).DaylightSavingsOffset(cache);
-  return static_cast<double>(offset);
-}
-
-
-int OS::GetLastError() {
-  return ::GetLastError();
-}
-
-
-int OS::GetCurrentProcessId() {
-  return static_cast<int>(::GetCurrentProcessId());
-}
-
-
-// ----------------------------------------------------------------------------
-// Win32 console output.
-//
-// If a Win32 application is linked as a console application it has a normal
-// standard output and standard error. In this case normal printf works fine
-// for output. However, if the application is linked as a GUI application,
-// the process doesn't have a console, and therefore (debugging) output is lost.
-// This is the case if we are embedded in a windows program (like a browser).
-// In order to be able to get debug output in this case the the debugging
-// facility using OutputDebugString. This output goes to the active debugger
-// for the process (if any). Else the output can be monitored using DBMON.EXE.
-
-enum OutputMode {
-  UNKNOWN,  // Output method has not yet been determined.
-  CONSOLE,  // Output is written to stdout.
-  ODS       // Output is written to debug facility.
-};
-
-static OutputMode output_mode = UNKNOWN;  // Current output mode.
-
-
-// Determine if the process has a console for output.
-static bool HasConsole() {
-  // Only check the first time. Eventual race conditions are not a problem,
-  // because all threads will eventually determine the same mode.
-  if (output_mode == UNKNOWN) {
-    // We cannot just check that the standard output is attached to a console
-    // because this would fail if output is redirected to a file. Therefore we
-    // say that a process does not have an output console if either the
-    // standard output handle is invalid or its file type is unknown.
-    if (GetStdHandle(STD_OUTPUT_HANDLE) != INVALID_HANDLE_VALUE &&
-        GetFileType(GetStdHandle(STD_OUTPUT_HANDLE)) != FILE_TYPE_UNKNOWN)
-      output_mode = CONSOLE;
-    else
-      output_mode = ODS;
-  }
-  return output_mode == CONSOLE;
-}
-
-
-static void VPrintHelper(FILE* stream, const char* format, va_list args) {
-  if ((stream == stdout || stream == stderr) && !HasConsole()) {
-    // It is important to use safe print here in order to avoid
-    // overflowing the buffer. We might truncate the output, but this
-    // does not crash.
-    char buffer[4096];
-    OS::VSNPrintF(buffer, sizeof(buffer), format, args);
-    OutputDebugStringA(buffer);
-  } else {
-    vfprintf(stream, format, args);
-  }
-}
-
-
-FILE* OS::FOpen(const char* path, const char* mode) {
-  FILE* result;
-  if (fopen_s(&result, path, mode) == 0) {
-    return result;
-  } else {
-    return NULL;
-  }
-}
-
-
-bool OS::Remove(const char* path) {
-  return (DeleteFileA(path) != 0);
-}
-
-
-FILE* OS::OpenTemporaryFile() {
-  // tmpfile_s tries to use the root dir, don't use it.
-  char tempPathBuffer[MAX_PATH];
-  DWORD path_result = 0;
-  path_result = GetTempPathA(MAX_PATH, tempPathBuffer);
-  if (path_result > MAX_PATH || path_result == 0) return NULL;
-  UINT name_result = 0;
-  char tempNameBuffer[MAX_PATH];
-  name_result = GetTempFileNameA(tempPathBuffer, "", 0, tempNameBuffer);
-  if (name_result == 0) return NULL;
-  FILE* result = FOpen(tempNameBuffer, "w+");  // Same mode as tmpfile uses.
-  if (result != NULL) {
-    Remove(tempNameBuffer);  // Delete on close.
-  }
-  return result;
-}
-
-
-// Open log file in binary mode to avoid /n -> /r/n conversion.
-const char* const OS::LogFileOpenMode = "wb";
-
-
-// Print (debug) message to console.
-void OS::Print(const char* format, ...) {
-  va_list args;
-  va_start(args, format);
-  VPrint(format, args);
-  va_end(args);
-}
-
-
-void OS::VPrint(const char* format, va_list args) {
-  VPrintHelper(stdout, format, args);
-}
-
-
-void OS::FPrint(FILE* out, const char* format, ...) {
-  va_list args;
-  va_start(args, format);
-  VFPrint(out, format, args);
-  va_end(args);
-}
-
-
-void OS::VFPrint(FILE* out, const char* format, va_list args) {
-  VPrintHelper(out, format, args);
-}
-
-
-// Print error message to console.
-void OS::PrintError(const char* format, ...) {
-  va_list args;
-  va_start(args, format);
-  VPrintError(format, args);
-  va_end(args);
-}
-
-
-void OS::VPrintError(const char* format, va_list args) {
-  VPrintHelper(stderr, format, args);
-}
-
-
-int OS::SNPrintF(char* str, int length, const char* format, ...) {
-  va_list args;
-  va_start(args, format);
-  int result = VSNPrintF(str, length, format, args);
-  va_end(args);
-  return result;
-}
-
-
-int OS::VSNPrintF(char* str, int length, const char* format, va_list args) {
-  int n = _vsnprintf_s(str, length, _TRUNCATE, format, args);
-  // Make sure to zero-terminate the string if the output was
-  // truncated or if there was an error.
-  if (n < 0 || n >= length) {
-    if (length > 0)
-      str[length - 1] = '\0';
-    return -1;
-  } else {
-    return n;
-  }
-}
-
-
-char* OS::StrChr(char* str, int c) {
-  return const_cast<char*>(strchr(str, c));
-}
-
-
-void OS::StrNCpy(char* dest, int length, const char* src, size_t n) {
-  // Use _TRUNCATE or strncpy_s crashes (by design) if buffer is too small.
-  size_t buffer_size = static_cast<size_t>(length);
-  if (n + 1 > buffer_size)  // count for trailing '\0'
-    n = _TRUNCATE;
-  int result = strncpy_s(dest, length, src, n);
-  USE(result);
-  ASSERT(result == 0 || (n == _TRUNCATE && result == STRUNCATE));
-}
-
-
-#undef _TRUNCATE
-#undef STRUNCATE
-
-
-// Get the system's page size used by VirtualAlloc() or the next power
-// of two. The reason for always returning a power of two is that the
-// rounding up in OS::Allocate expects that.
-static size_t GetPageSize() {
-  static size_t page_size = 0;
-  if (page_size == 0) {
-    SYSTEM_INFO info;
-    GetSystemInfo(&info);
-    page_size = RoundUpToPowerOf2(info.dwPageSize);
-  }
-  return page_size;
-}
-
-
-// The allocation alignment is the guaranteed alignment for
-// VirtualAlloc'ed blocks of memory.
-size_t OS::AllocateAlignment() {
-  static size_t allocate_alignment = 0;
-  if (allocate_alignment == 0) {
-    SYSTEM_INFO info;
-    GetSystemInfo(&info);
-    allocate_alignment = info.dwAllocationGranularity;
-  }
-  return allocate_alignment;
-}
-
-
-void* OS::GetRandomMmapAddr() {
-  Isolate* isolate = Isolate::UncheckedCurrent();
-  // Note that the current isolate isn't set up in a call path via
-  // CpuFeatures::Probe. We don't care about randomization in this case because
-  // the code page is immediately freed.
-  if (isolate != NULL) {
-    // The address range used to randomize RWX allocations in OS::Allocate
-    // Try not to map pages into the default range that windows loads DLLs
-    // Use a multiple of 64k to prevent committing unused memory.
-    // Note: This does not guarantee RWX regions will be within the
-    // range kAllocationRandomAddressMin to kAllocationRandomAddressMax
-#ifdef V8_HOST_ARCH_64_BIT
-    static const intptr_t kAllocationRandomAddressMin = 0x0000000080000000;
-    static const intptr_t kAllocationRandomAddressMax = 0x000003FFFFFF0000;
-#else
-    static const intptr_t kAllocationRandomAddressMin = 0x04000000;
-    static const intptr_t kAllocationRandomAddressMax = 0x3FFF0000;
-#endif
-    uintptr_t address =
-        (isolate->random_number_generator()->NextInt() << kPageSizeBits) |
-        kAllocationRandomAddressMin;
-    address &= kAllocationRandomAddressMax;
-    return reinterpret_cast<void *>(address);
-  }
-  return NULL;
-}
-
-
-static void* RandomizedVirtualAlloc(size_t size, int action, int protection) {
-  LPVOID base = NULL;
-
-  if (protection == PAGE_EXECUTE_READWRITE || protection == PAGE_NOACCESS) {
-    // For exectutable pages try and randomize the allocation address
-    for (size_t attempts = 0; base == NULL && attempts < 3; ++attempts) {
-      base = VirtualAlloc(OS::GetRandomMmapAddr(), size, action, protection);
-    }
-  }
-
-  // After three attempts give up and let the OS find an address to use.
-  if (base == NULL) base = VirtualAlloc(NULL, size, action, protection);
-
-  return base;
-}
-
-
-void* OS::Allocate(const size_t requested,
-                   size_t* allocated,
-                   bool is_executable) {
-  // VirtualAlloc rounds allocated size to page size automatically.
-  size_t msize = RoundUp(requested, static_cast<int>(GetPageSize()));
-
-  // Windows XP SP2 allows Data Excution Prevention (DEP).
-  int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
-
-  LPVOID mbase = RandomizedVirtualAlloc(msize,
-                                        MEM_COMMIT | MEM_RESERVE,
-                                        prot);
-
-  if (mbase == NULL) return NULL;
-
-  ASSERT(IsAligned(reinterpret_cast<size_t>(mbase), OS::AllocateAlignment()));
-
-  *allocated = msize;
-  return mbase;
-}
-
-
-void OS::Free(void* address, const size_t size) {
-  // TODO(1240712): VirtualFree has a return value which is ignored here.
-  VirtualFree(address, 0, MEM_RELEASE);
-  USE(size);
-}
-
-
-intptr_t OS::CommitPageSize() {
-  return 4096;
-}
-
-
-void OS::ProtectCode(void* address, const size_t size) {
-  DWORD old_protect;
-  VirtualProtect(address, size, PAGE_EXECUTE_READ, &old_protect);
-}
-
-
-void OS::Guard(void* address, const size_t size) {
-  DWORD oldprotect;
-  VirtualProtect(address, size, PAGE_NOACCESS, &oldprotect);
-}
-
-
-void OS::Sleep(int milliseconds) {
-  ::Sleep(milliseconds);
-}
-
-
-void OS::Abort() {
-  if (FLAG_hard_abort) {
-    V8_IMMEDIATE_CRASH();
-  }
-  // Make the MSVCRT do a silent abort.
-  raise(SIGABRT);
-}
-
-
-void OS::DebugBreak() {
-#ifdef _MSC_VER
-  // To avoid Visual Studio runtime support the following code can be used
-  // instead
-  // __asm { int 3 }
-  __debugbreak();
-#else
-  ::DebugBreak();
-#endif
-}
-
-
-class Win32MemoryMappedFile : public OS::MemoryMappedFile {
- public:
-  Win32MemoryMappedFile(HANDLE file,
-                        HANDLE file_mapping,
-                        void* memory,
-                        int size)
-      : file_(file),
-        file_mapping_(file_mapping),
-        memory_(memory),
-        size_(size) { }
-  virtual ~Win32MemoryMappedFile();
-  virtual void* memory() { return memory_; }
-  virtual int size() { return size_; }
- private:
-  HANDLE file_;
-  HANDLE file_mapping_;
-  void* memory_;
-  int size_;
-};
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::open(const char* name) {
-  // Open a physical file
-  HANDLE file = CreateFileA(name, GENERIC_READ | GENERIC_WRITE,
-      FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, OPEN_EXISTING, 0, NULL);
-  if (file == INVALID_HANDLE_VALUE) return NULL;
-
-  int size = static_cast<int>(GetFileSize(file, NULL));
-
-  // Create a file mapping for the physical file
-  HANDLE file_mapping = CreateFileMapping(file, NULL,
-      PAGE_READWRITE, 0, static_cast<DWORD>(size), NULL);
-  if (file_mapping == NULL) return NULL;
-
-  // Map a view of the file into memory
-  void* memory = MapViewOfFile(file_mapping, FILE_MAP_ALL_ACCESS, 0, 0, size);
-  return new Win32MemoryMappedFile(file, file_mapping, memory, size);
-}
-
-
-OS::MemoryMappedFile* OS::MemoryMappedFile::create(const char* name, int size,
-    void* initial) {
-  // Open a physical file
-  HANDLE file = CreateFileA(name, GENERIC_READ | GENERIC_WRITE,
-      FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, OPEN_ALWAYS, 0, NULL);
-  if (file == NULL) return NULL;
-  // Create a file mapping for the physical file
-  HANDLE file_mapping = CreateFileMapping(file, NULL,
-      PAGE_READWRITE, 0, static_cast<DWORD>(size), NULL);
-  if (file_mapping == NULL) return NULL;
-  // Map a view of the file into memory
-  void* memory = MapViewOfFile(file_mapping, FILE_MAP_ALL_ACCESS, 0, 0, size);
-  if (memory) MemMove(memory, initial, size);
-  return new Win32MemoryMappedFile(file, file_mapping, memory, size);
-}
-
-
-Win32MemoryMappedFile::~Win32MemoryMappedFile() {
-  if (memory_ != NULL)
-    UnmapViewOfFile(memory_);
-  CloseHandle(file_mapping_);
-  CloseHandle(file_);
-}
-
-
-// The following code loads functions defined in DbhHelp.h and TlHelp32.h
-// dynamically. This is to avoid being depending on dbghelp.dll and
-// tlhelp32.dll when running (the functions in tlhelp32.dll have been moved to
-// kernel32.dll at some point so loading functions defines in TlHelp32.h
-// dynamically might not be necessary any more - for some versions of Windows?).
-
-// Function pointers to functions dynamically loaded from dbghelp.dll.
-#define DBGHELP_FUNCTION_LIST(V)  \
-  V(SymInitialize)                \
-  V(SymGetOptions)                \
-  V(SymSetOptions)                \
-  V(SymGetSearchPath)             \
-  V(SymLoadModule64)              \
-  V(StackWalk64)                  \
-  V(SymGetSymFromAddr64)          \
-  V(SymGetLineFromAddr64)         \
-  V(SymFunctionTableAccess64)     \
-  V(SymGetModuleBase64)
-
-// Function pointers to functions dynamically loaded from dbghelp.dll.
-#define TLHELP32_FUNCTION_LIST(V)  \
-  V(CreateToolhelp32Snapshot)      \
-  V(Module32FirstW)                \
-  V(Module32NextW)
-
-// Define the decoration to use for the type and variable name used for
-// dynamically loaded DLL function..
-#define DLL_FUNC_TYPE(name) _##name##_
-#define DLL_FUNC_VAR(name) _##name
-
-// Define the type for each dynamically loaded DLL function. The function
-// definitions are copied from DbgHelp.h and TlHelp32.h. The IN and VOID macros
-// from the Windows include files are redefined here to have the function
-// definitions to be as close to the ones in the original .h files as possible.
-#ifndef IN
-#define IN
-#endif
-#ifndef VOID
-#define VOID void
-#endif
-
-// DbgHelp isn't supported on MinGW yet
-#ifndef __MINGW32__
-// DbgHelp.h functions.
-typedef BOOL (__stdcall *DLL_FUNC_TYPE(SymInitialize))(IN HANDLE hProcess,
-                                                       IN PSTR UserSearchPath,
-                                                       IN BOOL fInvadeProcess);
-typedef DWORD (__stdcall *DLL_FUNC_TYPE(SymGetOptions))(VOID);
-typedef DWORD (__stdcall *DLL_FUNC_TYPE(SymSetOptions))(IN DWORD SymOptions);
-typedef BOOL (__stdcall *DLL_FUNC_TYPE(SymGetSearchPath))(
-    IN HANDLE hProcess,
-    OUT PSTR SearchPath,
-    IN DWORD SearchPathLength);
-typedef DWORD64 (__stdcall *DLL_FUNC_TYPE(SymLoadModule64))(
-    IN HANDLE hProcess,
-    IN HANDLE hFile,
-    IN PSTR ImageName,
-    IN PSTR ModuleName,
-    IN DWORD64 BaseOfDll,
-    IN DWORD SizeOfDll);
-typedef BOOL (__stdcall *DLL_FUNC_TYPE(StackWalk64))(
-    DWORD MachineType,
-    HANDLE hProcess,
-    HANDLE hThread,
-    LPSTACKFRAME64 StackFrame,
-    PVOID ContextRecord,
-    PREAD_PROCESS_MEMORY_ROUTINE64 ReadMemoryRoutine,
-    PFUNCTION_TABLE_ACCESS_ROUTINE64 FunctionTableAccessRoutine,
-    PGET_MODULE_BASE_ROUTINE64 GetModuleBaseRoutine,
-    PTRANSLATE_ADDRESS_ROUTINE64 TranslateAddress);
-typedef BOOL (__stdcall *DLL_FUNC_TYPE(SymGetSymFromAddr64))(
-    IN HANDLE hProcess,
-    IN DWORD64 qwAddr,
-    OUT PDWORD64 pdwDisplacement,
-    OUT PIMAGEHLP_SYMBOL64 Symbol);
-typedef BOOL (__stdcall *DLL_FUNC_TYPE(SymGetLineFromAddr64))(
-    IN HANDLE hProcess,
-    IN DWORD64 qwAddr,
-    OUT PDWORD pdwDisplacement,
-    OUT PIMAGEHLP_LINE64 Line64);
-// DbgHelp.h typedefs. Implementation found in dbghelp.dll.
-typedef PVOID (__stdcall *DLL_FUNC_TYPE(SymFunctionTableAccess64))(
-    HANDLE hProcess,
-    DWORD64 AddrBase);  // DbgHelp.h typedef PFUNCTION_TABLE_ACCESS_ROUTINE64
-typedef DWORD64 (__stdcall *DLL_FUNC_TYPE(SymGetModuleBase64))(
-    HANDLE hProcess,
-    DWORD64 AddrBase);  // DbgHelp.h typedef PGET_MODULE_BASE_ROUTINE64
-
-// TlHelp32.h functions.
-typedef HANDLE (__stdcall *DLL_FUNC_TYPE(CreateToolhelp32Snapshot))(
-    DWORD dwFlags,
-    DWORD th32ProcessID);
-typedef BOOL (__stdcall *DLL_FUNC_TYPE(Module32FirstW))(HANDLE hSnapshot,
-                                                        LPMODULEENTRY32W lpme);
-typedef BOOL (__stdcall *DLL_FUNC_TYPE(Module32NextW))(HANDLE hSnapshot,
-                                                       LPMODULEENTRY32W lpme);
-
-#undef IN
-#undef VOID
-
-// Declare a variable for each dynamically loaded DLL function.
-#define DEF_DLL_FUNCTION(name) DLL_FUNC_TYPE(name) DLL_FUNC_VAR(name) = NULL;
-DBGHELP_FUNCTION_LIST(DEF_DLL_FUNCTION)
-TLHELP32_FUNCTION_LIST(DEF_DLL_FUNCTION)
-#undef DEF_DLL_FUNCTION
-
-// Load the functions. This function has a lot of "ugly" macros in order to
-// keep down code duplication.
-
-static bool LoadDbgHelpAndTlHelp32() {
-  static bool dbghelp_loaded = false;
-
-  if (dbghelp_loaded) return true;
-
-  HMODULE module;
-
-  // Load functions from the dbghelp.dll module.
-  module = LoadLibrary(TEXT("dbghelp.dll"));
-  if (module == NULL) {
-    return false;
-  }
-
-#define LOAD_DLL_FUNC(name)                                                 \
-  DLL_FUNC_VAR(name) =                                                      \
-      reinterpret_cast<DLL_FUNC_TYPE(name)>(GetProcAddress(module, #name));
-
-DBGHELP_FUNCTION_LIST(LOAD_DLL_FUNC)
-
-#undef LOAD_DLL_FUNC
-
-  // Load functions from the kernel32.dll module (the TlHelp32.h function used
-  // to be in tlhelp32.dll but are now moved to kernel32.dll).
-  module = LoadLibrary(TEXT("kernel32.dll"));
-  if (module == NULL) {
-    return false;
-  }
-
-#define LOAD_DLL_FUNC(name)                                                 \
-  DLL_FUNC_VAR(name) =                                                      \
-      reinterpret_cast<DLL_FUNC_TYPE(name)>(GetProcAddress(module, #name));
-
-TLHELP32_FUNCTION_LIST(LOAD_DLL_FUNC)
-
-#undef LOAD_DLL_FUNC
-
-  // Check that all functions where loaded.
-  bool result =
-#define DLL_FUNC_LOADED(name) (DLL_FUNC_VAR(name) != NULL) &&
-
-DBGHELP_FUNCTION_LIST(DLL_FUNC_LOADED)
-TLHELP32_FUNCTION_LIST(DLL_FUNC_LOADED)
-
-#undef DLL_FUNC_LOADED
-  true;
-
-  dbghelp_loaded = result;
-  return result;
-  // NOTE: The modules are never unloaded and will stay around until the
-  // application is closed.
-}
-
-#undef DBGHELP_FUNCTION_LIST
-#undef TLHELP32_FUNCTION_LIST
-#undef DLL_FUNC_VAR
-#undef DLL_FUNC_TYPE
-
-
-// Load the symbols for generating stack traces.
-static std::vector<OS::SharedLibraryAddress> LoadSymbols(
-    HANDLE process_handle) {
-  static std::vector<OS::SharedLibraryAddress> result;
-
-  static bool symbols_loaded = false;
-
-  if (symbols_loaded) return result;
-
-  BOOL ok;
-
-  // Initialize the symbol engine.
-  ok = _SymInitialize(process_handle,  // hProcess
-                      NULL,            // UserSearchPath
-                      false);          // fInvadeProcess
-  if (!ok) return result;
-
-  DWORD options = _SymGetOptions();
-  options |= SYMOPT_LOAD_LINES;
-  options |= SYMOPT_FAIL_CRITICAL_ERRORS;
-  options = _SymSetOptions(options);
-
-  char buf[OS::kStackWalkMaxNameLen] = {0};
-  ok = _SymGetSearchPath(process_handle, buf, OS::kStackWalkMaxNameLen);
-  if (!ok) {
-    int err = GetLastError();
-    PrintF("%d\n", err);
-    return result;
-  }
-
-  HANDLE snapshot = _CreateToolhelp32Snapshot(
-      TH32CS_SNAPMODULE,       // dwFlags
-      GetCurrentProcessId());  // th32ProcessId
-  if (snapshot == INVALID_HANDLE_VALUE) return result;
-  MODULEENTRY32W module_entry;
-  module_entry.dwSize = sizeof(module_entry);  // Set the size of the structure.
-  BOOL cont = _Module32FirstW(snapshot, &module_entry);
-  while (cont) {
-    DWORD64 base;
-    // NOTE the SymLoadModule64 function has the peculiarity of accepting a
-    // both unicode and ASCII strings even though the parameter is PSTR.
-    base = _SymLoadModule64(
-        process_handle,                                       // hProcess
-        0,                                                    // hFile
-        reinterpret_cast<PSTR>(module_entry.szExePath),       // ImageName
-        reinterpret_cast<PSTR>(module_entry.szModule),        // ModuleName
-        reinterpret_cast<DWORD64>(module_entry.modBaseAddr),  // BaseOfDll
-        module_entry.modBaseSize);                            // SizeOfDll
-    if (base == 0) {
-      int err = GetLastError();
-      if (err != ERROR_MOD_NOT_FOUND &&
-          err != ERROR_INVALID_HANDLE) {
-        result.clear();
-        return result;
-      }
-    }
-    int lib_name_length = WideCharToMultiByte(
-        CP_UTF8, 0, module_entry.szExePath, -1, NULL, 0, NULL, NULL);
-    std::string lib_name(lib_name_length, 0);
-    WideCharToMultiByte(CP_UTF8, 0, module_entry.szExePath, -1, &lib_name[0],
-                        lib_name_length, NULL, NULL);
-    result.push_back(OS::SharedLibraryAddress(
-        lib_name, reinterpret_cast<unsigned int>(module_entry.modBaseAddr),
-        reinterpret_cast<unsigned int>(module_entry.modBaseAddr +
-                                       module_entry.modBaseSize)));
-    cont = _Module32NextW(snapshot, &module_entry);
-  }
-  CloseHandle(snapshot);
-
-  symbols_loaded = true;
-  return result;
-}
-
-
-std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
-  // SharedLibraryEvents are logged when loading symbol information.
-  // Only the shared libraries loaded at the time of the call to
-  // GetSharedLibraryAddresses are logged.  DLLs loaded after
-  // initialization are not accounted for.
-  if (!LoadDbgHelpAndTlHelp32()) return std::vector<OS::SharedLibraryAddress>();
-  HANDLE process_handle = GetCurrentProcess();
-  return LoadSymbols(process_handle);
-}
-
-
-void OS::SignalCodeMovingGC() {
-}
-
-
-uint64_t OS::TotalPhysicalMemory() {
-  MEMORYSTATUSEX memory_info;
-  memory_info.dwLength = sizeof(memory_info);
-  if (!GlobalMemoryStatusEx(&memory_info)) {
-    UNREACHABLE();
-    return 0;
-  }
-
-  return static_cast<uint64_t>(memory_info.ullTotalPhys);
-}
-
-
-#else  // __MINGW32__
-std::vector<OS::SharedLibraryAddress> OS::GetSharedLibraryAddresses() {
-  return std::vector<OS::SharedLibraryAddress>();
-}
-
-
-void OS::SignalCodeMovingGC() { }
-#endif  // __MINGW32__
-
-
-int OS::NumberOfProcessorsOnline() {
-  SYSTEM_INFO info;
-  GetSystemInfo(&info);
-  return info.dwNumberOfProcessors;
-}
-
-
-double OS::nan_value() {
-#ifdef _MSC_VER
-  // Positive Quiet NaN with no payload (aka. Indeterminate) has all bits
-  // in mask set, so value equals mask.
-  static const __int64 nanval = kQuietNaNMask;
-  return *reinterpret_cast<const double*>(&nanval);
-#else  // _MSC_VER
-  return NAN;
-#endif  // _MSC_VER
-}
-
-
-int OS::ActivationFrameAlignment() {
-#ifdef _WIN64
-  return 16;  // Windows 64-bit ABI requires the stack to be 16-byte aligned.
-#elif defined(__MINGW32__)
-  // With gcc 4.4 the tree vectorization optimizer can generate code
-  // that requires 16 byte alignment such as movdqa on x86.
-  return 16;
-#else
-  return 8;  // Floating-point math runs faster with 8-byte alignment.
-#endif
-}
-
-
-VirtualMemory::VirtualMemory() : address_(NULL), size_(0) { }
-
-
-VirtualMemory::VirtualMemory(size_t size)
-    : address_(ReserveRegion(size)), size_(size) { }
-
-
-VirtualMemory::VirtualMemory(size_t size, size_t alignment)
-    : address_(NULL), size_(0) {
-  ASSERT(IsAligned(alignment, static_cast<intptr_t>(OS::AllocateAlignment())));
-  size_t request_size = RoundUp(size + alignment,
-                                static_cast<intptr_t>(OS::AllocateAlignment()));
-  void* address = ReserveRegion(request_size);
-  if (address == NULL) return;
-  Address base = RoundUp(static_cast<Address>(address), alignment);
-  // Try reducing the size by freeing and then reallocating a specific area.
-  bool result = ReleaseRegion(address, request_size);
-  USE(result);
-  ASSERT(result);
-  address = VirtualAlloc(base, size, MEM_RESERVE, PAGE_NOACCESS);
-  if (address != NULL) {
-    request_size = size;
-    ASSERT(base == static_cast<Address>(address));
-  } else {
-    // Resizing failed, just go with a bigger area.
-    address = ReserveRegion(request_size);
-    if (address == NULL) return;
-  }
-  address_ = address;
-  size_ = request_size;
-}
-
-
-VirtualMemory::~VirtualMemory() {
-  if (IsReserved()) {
-    bool result = ReleaseRegion(address(), size());
-    ASSERT(result);
-    USE(result);
-  }
-}
-
-
-bool VirtualMemory::IsReserved() {
-  return address_ != NULL;
-}
-
-
-void VirtualMemory::Reset() {
-  address_ = NULL;
-  size_ = 0;
-}
-
-
-bool VirtualMemory::Commit(void* address, size_t size, bool is_executable) {
-  return CommitRegion(address, size, is_executable);
-}
-
-
-bool VirtualMemory::Uncommit(void* address, size_t size) {
-  ASSERT(IsReserved());
-  return UncommitRegion(address, size);
-}
-
-
-bool VirtualMemory::Guard(void* address) {
-  if (NULL == VirtualAlloc(address,
-                           OS::CommitPageSize(),
-                           MEM_COMMIT,
-                           PAGE_NOACCESS)) {
-    return false;
-  }
-  return true;
-}
-
-
-void* VirtualMemory::ReserveRegion(size_t size) {
-  return RandomizedVirtualAlloc(size, MEM_RESERVE, PAGE_NOACCESS);
-}
-
-
-bool VirtualMemory::CommitRegion(void* base, size_t size, bool is_executable) {
-  int prot = is_executable ? PAGE_EXECUTE_READWRITE : PAGE_READWRITE;
-  if (NULL == VirtualAlloc(base, size, MEM_COMMIT, prot)) {
-    return false;
-  }
-  return true;
-}
-
-
-bool VirtualMemory::UncommitRegion(void* base, size_t size) {
-  return VirtualFree(base, size, MEM_DECOMMIT) != 0;
-}
-
-
-bool VirtualMemory::ReleaseRegion(void* base, size_t size) {
-  return VirtualFree(base, 0, MEM_RELEASE) != 0;
-}
-
-
-bool VirtualMemory::HasLazyCommits() {
-  // TODO(alph): implement for the platform.
-  return false;
-}
-
-
-// ----------------------------------------------------------------------------
-// Win32 thread support.
-
-// Definition of invalid thread handle and id.
-static const HANDLE kNoThread = INVALID_HANDLE_VALUE;
-
-// Entry point for threads. The supplied argument is a pointer to the thread
-// object. The entry function dispatches to the run method in the thread
-// object. It is important that this function has __stdcall calling
-// convention.
-static unsigned int __stdcall ThreadEntry(void* arg) {
-  Thread* thread = reinterpret_cast<Thread*>(arg);
-  thread->NotifyStartedAndRun();
-  return 0;
-}
-
-
-class Thread::PlatformData : public Malloced {
- public:
-  explicit PlatformData(HANDLE thread) : thread_(thread) {}
-  HANDLE thread_;
-  unsigned thread_id_;
-};
-
-
-// Initialize a Win32 thread object. The thread has an invalid thread
-// handle until it is started.
-
-Thread::Thread(const Options& options)
-    : stack_size_(options.stack_size()),
-      start_semaphore_(NULL) {
-  data_ = new PlatformData(kNoThread);
-  set_name(options.name());
-}
-
-
-void Thread::set_name(const char* name) {
-  OS::StrNCpy(name_, sizeof(name_), name, strlen(name));
-  name_[sizeof(name_) - 1] = '\0';
-}
-
-
-// Close our own handle for the thread.
-Thread::~Thread() {
-  if (data_->thread_ != kNoThread) CloseHandle(data_->thread_);
-  delete data_;
-}
-
-
-// Create a new thread. It is important to use _beginthreadex() instead of
-// the Win32 function CreateThread(), because the CreateThread() does not
-// initialize thread specific structures in the C runtime library.
-void Thread::Start() {
-  data_->thread_ = reinterpret_cast<HANDLE>(
-      _beginthreadex(NULL,
-                     static_cast<unsigned>(stack_size_),
-                     ThreadEntry,
-                     this,
-                     0,
-                     &data_->thread_id_));
-}
-
-
-// Wait for thread to terminate.
-void Thread::Join() {
-  if (data_->thread_id_ != GetCurrentThreadId()) {
-    WaitForSingleObject(data_->thread_, INFINITE);
-  }
-}
-
-
-Thread::LocalStorageKey Thread::CreateThreadLocalKey() {
-  DWORD result = TlsAlloc();
-  ASSERT(result != TLS_OUT_OF_INDEXES);
-  return static_cast<LocalStorageKey>(result);
-}
-
-
-void Thread::DeleteThreadLocalKey(LocalStorageKey key) {
-  BOOL result = TlsFree(static_cast<DWORD>(key));
-  USE(result);
-  ASSERT(result);
-}
-
-
-void* Thread::GetThreadLocal(LocalStorageKey key) {
-  return TlsGetValue(static_cast<DWORD>(key));
-}
-
-
-void Thread::SetThreadLocal(LocalStorageKey key, void* value) {
-  BOOL result = TlsSetValue(static_cast<DWORD>(key), value);
-  USE(result);
-  ASSERT(result);
-}
-
-
-
-void Thread::YieldCPU() {
-  Sleep(0);
-}
-
-} }  // namespace v8::internal
diff --git a/src/platform.h b/src/platform.h
deleted file mode 100644
index 497e3a8..0000000
--- a/src/platform.h
+++ /dev/null
@@ -1,512 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// This module contains the platform-specific code. This make the rest of the
-// code less dependent on operating system, compilers and runtime libraries.
-// This module does specifically not deal with differences between different
-// processor architecture.
-// The platform classes have the same definition for all platforms. The
-// implementation for a particular platform is put in platform_<os>.cc.
-// The build system then uses the implementation for the target platform.
-//
-// This design has been chosen because it is simple and fast. Alternatively,
-// the platform dependent classes could have been implemented using abstract
-// superclasses with virtual methods and having specializations for each
-// platform. This design was rejected because it was more complicated and
-// slower. It would require factory methods for selecting the right
-// implementation and the overhead of virtual methods for performance
-// sensitive like mutex locking/unlocking.
-
-#ifndef V8_PLATFORM_H_
-#define V8_PLATFORM_H_
-
-#include <stdarg.h>
-#include <string>
-#include <vector>
-
-#include "src/base/build_config.h"
-#include "src/platform/mutex.h"
-#include "src/platform/semaphore.h"
-
-#ifdef __sun
-# ifndef signbit
-namespace std {
-int signbit(double x);
-}
-# endif
-#endif
-
-#if V8_OS_QNX
-#include "src/qnx-math.h"
-#endif
-
-// Microsoft Visual C++ specific stuff.
-#if V8_LIBC_MSVCRT
-
-#include "src/base/win32-headers.h"
-#include "src/win32-math.h"
-
-int strncasecmp(const char* s1, const char* s2, int n);
-
-// Visual C++ 2013 and higher implement this function.
-#if (_MSC_VER < 1800)
-inline int lrint(double flt) {
-  int intgr;
-#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
-  __asm {
-    fld flt
-    fistp intgr
-  };
-#else
-  intgr = static_cast<int>(flt + 0.5);
-  if ((intgr & 1) != 0 && intgr - flt == 0.5) {
-    // If the number is halfway between two integers, round to the even one.
-    intgr--;
-  }
-#endif
-  return intgr;
-}
-#endif  // _MSC_VER < 1800
-
-#endif  // V8_LIBC_MSVCRT
-
-namespace v8 {
-namespace internal {
-
-// ----------------------------------------------------------------------------
-// Fast TLS support
-
-#ifndef V8_NO_FAST_TLS
-
-#if defined(_MSC_VER) && (V8_HOST_ARCH_IA32)
-
-#define V8_FAST_TLS_SUPPORTED 1
-
-INLINE(intptr_t InternalGetExistingThreadLocal(intptr_t index));
-
-inline intptr_t InternalGetExistingThreadLocal(intptr_t index) {
-  const intptr_t kTibInlineTlsOffset = 0xE10;
-  const intptr_t kTibExtraTlsOffset = 0xF94;
-  const intptr_t kMaxInlineSlots = 64;
-  const intptr_t kMaxSlots = kMaxInlineSlots + 1024;
-  const intptr_t kPointerSize = sizeof(void*);
-  ASSERT(0 <= index && index < kMaxSlots);
-  if (index < kMaxInlineSlots) {
-    return static_cast<intptr_t>(__readfsdword(kTibInlineTlsOffset +
-                                               kPointerSize * index));
-  }
-  intptr_t extra = static_cast<intptr_t>(__readfsdword(kTibExtraTlsOffset));
-  ASSERT(extra != 0);
-  return *reinterpret_cast<intptr_t*>(extra +
-                                      kPointerSize * (index - kMaxInlineSlots));
-}
-
-#elif defined(__APPLE__) && (V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64)
-
-#define V8_FAST_TLS_SUPPORTED 1
-
-extern intptr_t kMacTlsBaseOffset;
-
-INLINE(intptr_t InternalGetExistingThreadLocal(intptr_t index));
-
-inline intptr_t InternalGetExistingThreadLocal(intptr_t index) {
-  intptr_t result;
-#if V8_HOST_ARCH_IA32
-  asm("movl %%gs:(%1,%2,4), %0;"
-      :"=r"(result)  // Output must be a writable register.
-      :"r"(kMacTlsBaseOffset), "r"(index));
-#else
-  asm("movq %%gs:(%1,%2,8), %0;"
-      :"=r"(result)
-      :"r"(kMacTlsBaseOffset), "r"(index));
-#endif
-  return result;
-}
-
-#endif
-
-#endif  // V8_NO_FAST_TLS
-
-
-class TimezoneCache;
-
-
-// ----------------------------------------------------------------------------
-// OS
-//
-// This class has static methods for the different platform specific
-// functions. Add methods here to cope with differences between the
-// supported platforms.
-
-class OS {
- public:
-  // Returns the accumulated user time for thread. This routine
-  // can be used for profiling. The implementation should
-  // strive for high-precision timer resolution, preferable
-  // micro-second resolution.
-  static int GetUserTime(uint32_t* secs,  uint32_t* usecs);
-
-  // Returns current time as the number of milliseconds since
-  // 00:00:00 UTC, January 1, 1970.
-  static double TimeCurrentMillis();
-
-  static TimezoneCache* CreateTimezoneCache();
-  static void DisposeTimezoneCache(TimezoneCache* cache);
-  static void ClearTimezoneCache(TimezoneCache* cache);
-
-  // Returns a string identifying the current time zone. The
-  // timestamp is used for determining if DST is in effect.
-  static const char* LocalTimezone(double time, TimezoneCache* cache);
-
-  // Returns the local time offset in milliseconds east of UTC without
-  // taking daylight savings time into account.
-  static double LocalTimeOffset(TimezoneCache* cache);
-
-  // Returns the daylight savings offset for the given time.
-  static double DaylightSavingsOffset(double time, TimezoneCache* cache);
-
-  // Returns last OS error.
-  static int GetLastError();
-
-  static FILE* FOpen(const char* path, const char* mode);
-  static bool Remove(const char* path);
-
-  // Opens a temporary file, the file is auto removed on close.
-  static FILE* OpenTemporaryFile();
-
-  // Log file open mode is platform-dependent due to line ends issues.
-  static const char* const LogFileOpenMode;
-
-  // Print output to console. This is mostly used for debugging output.
-  // On platforms that has standard terminal output, the output
-  // should go to stdout.
-  static void Print(const char* format, ...);
-  static void VPrint(const char* format, va_list args);
-
-  // Print output to a file. This is mostly used for debugging output.
-  static void FPrint(FILE* out, const char* format, ...);
-  static void VFPrint(FILE* out, const char* format, va_list args);
-
-  // Print error output to console. This is mostly used for error message
-  // output. On platforms that has standard terminal output, the output
-  // should go to stderr.
-  static void PrintError(const char* format, ...);
-  static void VPrintError(const char* format, va_list args);
-
-  // Allocate/Free memory used by JS heap. Pages are readable/writable, but
-  // they are not guaranteed to be executable unless 'executable' is true.
-  // Returns the address of allocated memory, or NULL if failed.
-  static void* Allocate(const size_t requested,
-                        size_t* allocated,
-                        bool is_executable);
-  static void Free(void* address, const size_t size);
-
-  // This is the granularity at which the ProtectCode(...) call can set page
-  // permissions.
-  static intptr_t CommitPageSize();
-
-  // Mark code segments non-writable.
-  static void ProtectCode(void* address, const size_t size);
-
-  // Assign memory as a guard page so that access will cause an exception.
-  static void Guard(void* address, const size_t size);
-
-  // Generate a random address to be used for hinting mmap().
-  static void* GetRandomMmapAddr();
-
-  // Get the Alignment guaranteed by Allocate().
-  static size_t AllocateAlignment();
-
-  // Sleep for a number of milliseconds.
-  static void Sleep(const int milliseconds);
-
-  // Abort the current process.
-  static void Abort();
-
-  // Debug break.
-  static void DebugBreak();
-
-  // Walk the stack.
-  static const int kStackWalkError = -1;
-  static const int kStackWalkMaxNameLen = 256;
-  static const int kStackWalkMaxTextLen = 256;
-  struct StackFrame {
-    void* address;
-    char text[kStackWalkMaxTextLen];
-  };
-
-  class MemoryMappedFile {
-   public:
-    static MemoryMappedFile* open(const char* name);
-    static MemoryMappedFile* create(const char* name, int size, void* initial);
-    virtual ~MemoryMappedFile() { }
-    virtual void* memory() = 0;
-    virtual int size() = 0;
-  };
-
-  // Safe formatting print. Ensures that str is always null-terminated.
-  // Returns the number of chars written, or -1 if output was truncated.
-  static int SNPrintF(char* str, int length, const char* format, ...);
-  static int VSNPrintF(char* str,
-                       int length,
-                       const char* format,
-                       va_list args);
-
-  static char* StrChr(char* str, int c);
-  static void StrNCpy(char* dest, int length, const char* src, size_t n);
-
-  // Support for the profiler.  Can do nothing, in which case ticks
-  // occuring in shared libraries will not be properly accounted for.
-  struct SharedLibraryAddress {
-    SharedLibraryAddress(
-        const std::string& library_path, uintptr_t start, uintptr_t end)
-        : library_path(library_path), start(start), end(end) {}
-
-    std::string library_path;
-    uintptr_t start;
-    uintptr_t end;
-  };
-
-  static std::vector<SharedLibraryAddress> GetSharedLibraryAddresses();
-
-  // Support for the profiler.  Notifies the external profiling
-  // process that a code moving garbage collection starts.  Can do
-  // nothing, in which case the code objects must not move (e.g., by
-  // using --never-compact) if accurate profiling is desired.
-  static void SignalCodeMovingGC();
-
-  // Returns the number of processors online.
-  static int NumberOfProcessorsOnline();
-
-  // The total amount of physical memory available on the current system.
-  static uint64_t TotalPhysicalMemory();
-
-  // Maximum size of the virtual memory.  0 means there is no artificial
-  // limit.
-  static intptr_t MaxVirtualMemory();
-
-  // Returns the double constant NAN
-  static double nan_value();
-
-  // Support runtime detection of whether the hard float option of the
-  // EABI is used.
-  static bool ArmUsingHardFloat();
-
-  // Returns the activation frame alignment constraint or zero if
-  // the platform doesn't care. Guaranteed to be a power of two.
-  static int ActivationFrameAlignment();
-
-  static int GetCurrentProcessId();
-
- private:
-  static const int msPerSecond = 1000;
-
-  DISALLOW_IMPLICIT_CONSTRUCTORS(OS);
-};
-
-// Represents and controls an area of reserved memory.
-// Control of the reserved memory can be assigned to another VirtualMemory
-// object by assignment or copy-contructing. This removes the reserved memory
-// from the original object.
-class VirtualMemory {
- public:
-  // Empty VirtualMemory object, controlling no reserved memory.
-  VirtualMemory();
-
-  // Reserves virtual memory with size.
-  explicit VirtualMemory(size_t size);
-
-  // Reserves virtual memory containing an area of the given size that
-  // is aligned per alignment. This may not be at the position returned
-  // by address().
-  VirtualMemory(size_t size, size_t alignment);
-
-  // Releases the reserved memory, if any, controlled by this VirtualMemory
-  // object.
-  ~VirtualMemory();
-
-  // Returns whether the memory has been reserved.
-  bool IsReserved();
-
-  // Initialize or resets an embedded VirtualMemory object.
-  void Reset();
-
-  // Returns the start address of the reserved memory.
-  // If the memory was reserved with an alignment, this address is not
-  // necessarily aligned. The user might need to round it up to a multiple of
-  // the alignment to get the start of the aligned block.
-  void* address() {
-    ASSERT(IsReserved());
-    return address_;
-  }
-
-  // Returns the size of the reserved memory. The returned value is only
-  // meaningful when IsReserved() returns true.
-  // If the memory was reserved with an alignment, this size may be larger
-  // than the requested size.
-  size_t size() { return size_; }
-
-  // Commits real memory. Returns whether the operation succeeded.
-  bool Commit(void* address, size_t size, bool is_executable);
-
-  // Uncommit real memory.  Returns whether the operation succeeded.
-  bool Uncommit(void* address, size_t size);
-
-  // Creates a single guard page at the given address.
-  bool Guard(void* address);
-
-  void Release() {
-    ASSERT(IsReserved());
-    // Notice: Order is important here. The VirtualMemory object might live
-    // inside the allocated region.
-    void* address = address_;
-    size_t size = size_;
-    Reset();
-    bool result = ReleaseRegion(address, size);
-    USE(result);
-    ASSERT(result);
-  }
-
-  // Assign control of the reserved region to a different VirtualMemory object.
-  // The old object is no longer functional (IsReserved() returns false).
-  void TakeControl(VirtualMemory* from) {
-    ASSERT(!IsReserved());
-    address_ = from->address_;
-    size_ = from->size_;
-    from->Reset();
-  }
-
-  static void* ReserveRegion(size_t size);
-
-  static bool CommitRegion(void* base, size_t size, bool is_executable);
-
-  static bool UncommitRegion(void* base, size_t size);
-
-  // Must be called with a base pointer that has been returned by ReserveRegion
-  // and the same size it was reserved with.
-  static bool ReleaseRegion(void* base, size_t size);
-
-  // Returns true if OS performs lazy commits, i.e. the memory allocation call
-  // defers actual physical memory allocation till the first memory access.
-  // Otherwise returns false.
-  static bool HasLazyCommits();
-
- private:
-  void* address_;  // Start address of the virtual memory.
-  size_t size_;  // Size of the virtual memory.
-};
-
-
-// ----------------------------------------------------------------------------
-// Thread
-//
-// Thread objects are used for creating and running threads. When the start()
-// method is called the new thread starts running the run() method in the new
-// thread. The Thread object should not be deallocated before the thread has
-// terminated.
-
-class Thread {
- public:
-  // Opaque data type for thread-local storage keys.
-  typedef int32_t LocalStorageKey;
-
-  class Options {
-   public:
-    Options() : name_("v8:<unknown>"), stack_size_(0) {}
-    Options(const char* name, int stack_size = 0)
-        : name_(name), stack_size_(stack_size) {}
-
-    const char* name() const { return name_; }
-    int stack_size() const { return stack_size_; }
-
-   private:
-    const char* name_;
-    int stack_size_;
-  };
-
-  // Create new thread.
-  explicit Thread(const Options& options);
-  virtual ~Thread();
-
-  // Start new thread by calling the Run() method on the new thread.
-  void Start();
-
-  // Start new thread and wait until Run() method is called on the new thread.
-  void StartSynchronously() {
-    start_semaphore_ = new Semaphore(0);
-    Start();
-    start_semaphore_->Wait();
-    delete start_semaphore_;
-    start_semaphore_ = NULL;
-  }
-
-  // Wait until thread terminates.
-  void Join();
-
-  inline const char* name() const {
-    return name_;
-  }
-
-  // Abstract method for run handler.
-  virtual void Run() = 0;
-
-  // Thread-local storage.
-  static LocalStorageKey CreateThreadLocalKey();
-  static void DeleteThreadLocalKey(LocalStorageKey key);
-  static void* GetThreadLocal(LocalStorageKey key);
-  static int GetThreadLocalInt(LocalStorageKey key) {
-    return static_cast<int>(reinterpret_cast<intptr_t>(GetThreadLocal(key)));
-  }
-  static void SetThreadLocal(LocalStorageKey key, void* value);
-  static void SetThreadLocalInt(LocalStorageKey key, int value) {
-    SetThreadLocal(key, reinterpret_cast<void*>(static_cast<intptr_t>(value)));
-  }
-  static bool HasThreadLocal(LocalStorageKey key) {
-    return GetThreadLocal(key) != NULL;
-  }
-
-#ifdef V8_FAST_TLS_SUPPORTED
-  static inline void* GetExistingThreadLocal(LocalStorageKey key) {
-    void* result = reinterpret_cast<void*>(
-        InternalGetExistingThreadLocal(static_cast<intptr_t>(key)));
-    ASSERT(result == GetThreadLocal(key));
-    return result;
-  }
-#else
-  static inline void* GetExistingThreadLocal(LocalStorageKey key) {
-    return GetThreadLocal(key);
-  }
-#endif
-
-  // A hint to the scheduler to let another thread run.
-  static void YieldCPU();
-
-
-  // The thread name length is limited to 16 based on Linux's implementation of
-  // prctl().
-  static const int kMaxThreadNameLength = 16;
-
-  class PlatformData;
-  PlatformData* data() { return data_; }
-
-  void NotifyStartedAndRun() {
-    if (start_semaphore_) start_semaphore_->Signal();
-    Run();
-  }
-
- private:
-  void set_name(const char* name);
-
-  PlatformData* data_;
-
-  char name_[kMaxThreadNameLength];
-  int stack_size_;
-  Semaphore* start_semaphore_;
-
-  DISALLOW_COPY_AND_ASSIGN(Thread);
-};
-
-} }  // namespace v8::internal
-
-#endif  // V8_PLATFORM_H_
diff --git a/src/platform/condition-variable.cc b/src/platform/condition-variable.cc
deleted file mode 100644
index e180acd..0000000
--- a/src/platform/condition-variable.cc
+++ /dev/null
@@ -1,322 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/platform/condition-variable.h"
-
-#include <errno.h>
-#include <time.h>
-
-#include "src/platform/time.h"
-
-namespace v8 {
-namespace internal {
-
-#if V8_OS_POSIX
-
-ConditionVariable::ConditionVariable() {
-  // TODO(bmeurer): The test for V8_LIBRT_NOT_AVAILABLE is a temporary
-  // hack to support cross-compiling Chrome for Android in AOSP. Remove
-  // this once AOSP is fixed.
-#if (V8_OS_FREEBSD || V8_OS_NETBSD || V8_OS_OPENBSD || \
-     (V8_OS_LINUX && V8_LIBC_GLIBC)) && !V8_LIBRT_NOT_AVAILABLE
-  // On Free/Net/OpenBSD and Linux with glibc we can change the time
-  // source for pthread_cond_timedwait() to use the monotonic clock.
-  pthread_condattr_t attr;
-  int result = pthread_condattr_init(&attr);
-  ASSERT_EQ(0, result);
-  result = pthread_condattr_setclock(&attr, CLOCK_MONOTONIC);
-  ASSERT_EQ(0, result);
-  result = pthread_cond_init(&native_handle_, &attr);
-  ASSERT_EQ(0, result);
-  result = pthread_condattr_destroy(&attr);
-#else
-  int result = pthread_cond_init(&native_handle_, NULL);
-#endif
-  ASSERT_EQ(0, result);
-  USE(result);
-}
-
-
-ConditionVariable::~ConditionVariable() {
-  int result = pthread_cond_destroy(&native_handle_);
-  ASSERT_EQ(0, result);
-  USE(result);
-}
-
-
-void ConditionVariable::NotifyOne() {
-  int result = pthread_cond_signal(&native_handle_);
-  ASSERT_EQ(0, result);
-  USE(result);
-}
-
-
-void ConditionVariable::NotifyAll() {
-  int result = pthread_cond_broadcast(&native_handle_);
-  ASSERT_EQ(0, result);
-  USE(result);
-}
-
-
-void ConditionVariable::Wait(Mutex* mutex) {
-  mutex->AssertHeldAndUnmark();
-  int result = pthread_cond_wait(&native_handle_, &mutex->native_handle());
-  ASSERT_EQ(0, result);
-  USE(result);
-  mutex->AssertUnheldAndMark();
-}
-
-
-bool ConditionVariable::WaitFor(Mutex* mutex, const TimeDelta& rel_time) {
-  struct timespec ts;
-  int result;
-  mutex->AssertHeldAndUnmark();
-#if V8_OS_MACOSX
-  // Mac OS X provides pthread_cond_timedwait_relative_np(), which does
-  // not depend on the real time clock, which is what you really WANT here!
-  ts = rel_time.ToTimespec();
-  ASSERT_GE(ts.tv_sec, 0);
-  ASSERT_GE(ts.tv_nsec, 0);
-  result = pthread_cond_timedwait_relative_np(
-      &native_handle_, &mutex->native_handle(), &ts);
-#else
-  // TODO(bmeurer): The test for V8_LIBRT_NOT_AVAILABLE is a temporary
-  // hack to support cross-compiling Chrome for Android in AOSP. Remove
-  // this once AOSP is fixed.
-#if (V8_OS_FREEBSD || V8_OS_NETBSD || V8_OS_OPENBSD || \
-     (V8_OS_LINUX && V8_LIBC_GLIBC)) && !V8_LIBRT_NOT_AVAILABLE
-  // On Free/Net/OpenBSD and Linux with glibc we can change the time
-  // source for pthread_cond_timedwait() to use the monotonic clock.
-  result = clock_gettime(CLOCK_MONOTONIC, &ts);
-  ASSERT_EQ(0, result);
-  Time now = Time::FromTimespec(ts);
-#else
-  // The timeout argument to pthread_cond_timedwait() is in absolute time.
-  Time now = Time::NowFromSystemTime();
-#endif
-  Time end_time = now + rel_time;
-  ASSERT_GE(end_time, now);
-  ts = end_time.ToTimespec();
-  result = pthread_cond_timedwait(
-      &native_handle_, &mutex->native_handle(), &ts);
-#endif  // V8_OS_MACOSX
-  mutex->AssertUnheldAndMark();
-  if (result == ETIMEDOUT) {
-    return false;
-  }
-  ASSERT_EQ(0, result);
-  return true;
-}
-
-#elif V8_OS_WIN
-
-struct ConditionVariable::Event {
-  Event() : handle_(::CreateEventA(NULL, true, false, NULL)) {
-    ASSERT(handle_ != NULL);
-  }
-
-  ~Event() {
-    BOOL ok = ::CloseHandle(handle_);
-    ASSERT(ok);
-    USE(ok);
-  }
-
-  bool WaitFor(DWORD timeout_ms) {
-    DWORD result = ::WaitForSingleObject(handle_, timeout_ms);
-    if (result == WAIT_OBJECT_0) {
-      return true;
-    }
-    ASSERT(result == WAIT_TIMEOUT);
-    return false;
-  }
-
-  HANDLE handle_;
-  Event* next_;
-  HANDLE thread_;
-  volatile bool notified_;
-};
-
-
-ConditionVariable::NativeHandle::~NativeHandle() {
-  ASSERT(waitlist_ == NULL);
-
-  while (freelist_ != NULL) {
-    Event* event = freelist_;
-    freelist_ = event->next_;
-    delete event;
-  }
-}
-
-
-ConditionVariable::Event* ConditionVariable::NativeHandle::Pre() {
-  LockGuard<Mutex> lock_guard(&mutex_);
-
-  // Grab an event from the free list or create a new one.
-  Event* event = freelist_;
-  if (event != NULL) {
-    freelist_ = event->next_;
-  } else {
-    event = new Event;
-  }
-  event->thread_ = GetCurrentThread();
-  event->notified_ = false;
-
-#ifdef DEBUG
-  // The event must not be on the wait list.
-  for (Event* we = waitlist_; we != NULL; we = we->next_) {
-    ASSERT_NE(event, we);
-  }
-#endif
-
-  // Prepend the event to the wait list.
-  event->next_ = waitlist_;
-  waitlist_ = event;
-
-  return event;
-}
-
-
-void ConditionVariable::NativeHandle::Post(Event* event, bool result) {
-  LockGuard<Mutex> lock_guard(&mutex_);
-
-  // Remove the event from the wait list.
-  for (Event** wep = &waitlist_;; wep = &(*wep)->next_) {
-    ASSERT_NE(NULL, *wep);
-    if (*wep == event) {
-      *wep = event->next_;
-      break;
-    }
-  }
-
-#ifdef DEBUG
-  // The event must not be on the free list.
-  for (Event* fe = freelist_; fe != NULL; fe = fe->next_) {
-    ASSERT_NE(event, fe);
-  }
-#endif
-
-  // Reset the event.
-  BOOL ok = ::ResetEvent(event->handle_);
-  ASSERT(ok);
-  USE(ok);
-
-  // Insert the event into the free list.
-  event->next_ = freelist_;
-  freelist_ = event;
-
-  // Forward signals delivered after the timeout to the next waiting event.
-  if (!result && event->notified_ && waitlist_ != NULL) {
-    ok = ::SetEvent(waitlist_->handle_);
-    ASSERT(ok);
-    USE(ok);
-    waitlist_->notified_ = true;
-  }
-}
-
-
-ConditionVariable::ConditionVariable() {}
-
-
-ConditionVariable::~ConditionVariable() {}
-
-
-void ConditionVariable::NotifyOne() {
-  // Notify the thread with the highest priority in the waitlist
-  // that was not already signalled.
-  LockGuard<Mutex> lock_guard(native_handle_.mutex());
-  Event* highest_event = NULL;
-  int highest_priority = std::numeric_limits<int>::min();
-  for (Event* event = native_handle().waitlist();
-       event != NULL;
-       event = event->next_) {
-    if (event->notified_) {
-      continue;
-    }
-    int priority = GetThreadPriority(event->thread_);
-    ASSERT_NE(THREAD_PRIORITY_ERROR_RETURN, priority);
-    if (priority >= highest_priority) {
-      highest_priority = priority;
-      highest_event = event;
-    }
-  }
-  if (highest_event != NULL) {
-    ASSERT(!highest_event->notified_);
-    ::SetEvent(highest_event->handle_);
-    highest_event->notified_ = true;
-  }
-}
-
-
-void ConditionVariable::NotifyAll() {
-  // Notify all threads on the waitlist.
-  LockGuard<Mutex> lock_guard(native_handle_.mutex());
-  for (Event* event = native_handle().waitlist();
-       event != NULL;
-       event = event->next_) {
-    if (!event->notified_) {
-      ::SetEvent(event->handle_);
-      event->notified_ = true;
-    }
-  }
-}
-
-
-void ConditionVariable::Wait(Mutex* mutex) {
-  // Create and setup the wait event.
-  Event* event = native_handle_.Pre();
-
-  // Release the user mutex.
-  mutex->Unlock();
-
-  // Wait on the wait event.
-  while (!event->WaitFor(INFINITE))
-    ;
-
-  // Reaquire the user mutex.
-  mutex->Lock();
-
-  // Release the wait event (we must have been notified).
-  ASSERT(event->notified_);
-  native_handle_.Post(event, true);
-}
-
-
-bool ConditionVariable::WaitFor(Mutex* mutex, const TimeDelta& rel_time) {
-  // Create and setup the wait event.
-  Event* event = native_handle_.Pre();
-
-  // Release the user mutex.
-  mutex->Unlock();
-
-  // Wait on the wait event.
-  TimeTicks now = TimeTicks::Now();
-  TimeTicks end = now + rel_time;
-  bool result = false;
-  while (true) {
-    int64_t msec = (end - now).InMilliseconds();
-    if (msec >= static_cast<int64_t>(INFINITE)) {
-      result = event->WaitFor(INFINITE - 1);
-      if (result) {
-        break;
-      }
-      now = TimeTicks::Now();
-    } else {
-      result = event->WaitFor((msec < 0) ? 0 : static_cast<DWORD>(msec));
-      break;
-    }
-  }
-
-  // Reaquire the user mutex.
-  mutex->Lock();
-
-  // Release the wait event.
-  ASSERT(!result || event->notified_);
-  native_handle_.Post(event, result);
-
-  return result;
-}
-
-#endif  // V8_OS_POSIX
-
-} }  // namespace v8::internal
diff --git a/src/platform/condition-variable.h b/src/platform/condition-variable.h
deleted file mode 100644
index 4e8724c..0000000
--- a/src/platform/condition-variable.h
+++ /dev/null
@@ -1,118 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_PLATFORM_CONDITION_VARIABLE_H_
-#define V8_PLATFORM_CONDITION_VARIABLE_H_
-
-#include "src/base/lazy-instance.h"
-#include "src/platform/mutex.h"
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class ConditionVariableEvent;
-class TimeDelta;
-
-// -----------------------------------------------------------------------------
-// ConditionVariable
-//
-// This class is a synchronization primitive that can be used to block a thread,
-// or multiple threads at the same time, until:
-// - a notification is received from another thread,
-// - a timeout expires, or
-// - a spurious wakeup occurs
-// Any thread that intends to wait on a ConditionVariable has to acquire a lock
-// on a Mutex first. The |Wait()| and |WaitFor()| operations atomically release
-// the mutex and suspend the execution of the calling thread. When the condition
-// variable is notified, the thread is awakened, and the mutex is reacquired.
-
-class ConditionVariable V8_FINAL {
- public:
-  ConditionVariable();
-  ~ConditionVariable();
-
-  // If any threads are waiting on this condition variable, calling
-  // |NotifyOne()| unblocks one of the waiting threads.
-  void NotifyOne();
-
-  // Unblocks all threads currently waiting for this condition variable.
-  void NotifyAll();
-
-  // |Wait()| causes the calling thread to block until the condition variable is
-  // notified or a spurious wakeup occurs. Atomically releases the mutex, blocks
-  // the current executing thread, and adds it to the list of threads waiting on
-  // this condition variable. The thread will be unblocked when |NotifyAll()| or
-  // |NotifyOne()| is executed. It may also be unblocked spuriously. When
-  // unblocked, regardless of the reason, the lock on the mutex is reacquired
-  // and |Wait()| exits.
-  void Wait(Mutex* mutex);
-
-  // Atomically releases the mutex, blocks the current executing thread, and
-  // adds it to the list of threads waiting on this condition variable. The
-  // thread will be unblocked when |NotifyAll()| or |NotifyOne()| is executed,
-  // or when the relative timeout |rel_time| expires. It may also be unblocked
-  // spuriously. When unblocked, regardless of the reason, the lock on the mutex
-  // is reacquired and |WaitFor()| exits. Returns true if the condition variable
-  // was notified prior to the timeout.
-  bool WaitFor(Mutex* mutex, const TimeDelta& rel_time) V8_WARN_UNUSED_RESULT;
-
-  // The implementation-defined native handle type.
-#if V8_OS_POSIX
-  typedef pthread_cond_t NativeHandle;
-#elif V8_OS_WIN
-  struct Event;
-  class NativeHandle V8_FINAL {
-   public:
-    NativeHandle() : waitlist_(NULL), freelist_(NULL) {}
-    ~NativeHandle();
-
-    Event* Pre() V8_WARN_UNUSED_RESULT;
-    void Post(Event* event, bool result);
-
-    Mutex* mutex() { return &mutex_; }
-    Event* waitlist() { return waitlist_; }
-
-   private:
-    Event* waitlist_;
-    Event* freelist_;
-    Mutex mutex_;
-
-    DISALLOW_COPY_AND_ASSIGN(NativeHandle);
-  };
-#endif
-
-  NativeHandle& native_handle() {
-    return native_handle_;
-  }
-  const NativeHandle& native_handle() const {
-    return native_handle_;
-  }
-
- private:
-  NativeHandle native_handle_;
-
-  DISALLOW_COPY_AND_ASSIGN(ConditionVariable);
-};
-
-
-// POD ConditionVariable initialized lazily (i.e. the first time Pointer() is
-// called).
-// Usage:
-//   static LazyConditionVariable my_condvar =
-//       LAZY_CONDITION_VARIABLE_INITIALIZER;
-//
-//   void my_function() {
-//     LockGuard<Mutex> lock_guard(&my_mutex);
-//     my_condvar.Pointer()->Wait(&my_mutex);
-//   }
-typedef base::LazyStaticInstance<
-    ConditionVariable, base::DefaultConstructTrait<ConditionVariable>,
-    base::ThreadSafeInitOnceTrait>::type LazyConditionVariable;
-
-#define LAZY_CONDITION_VARIABLE_INITIALIZER LAZY_STATIC_INSTANCE_INITIALIZER
-
-} }  // namespace v8::internal
-
-#endif  // V8_PLATFORM_CONDITION_VARIABLE_H_
diff --git a/src/platform/elapsed-timer.h b/src/platform/elapsed-timer.h
deleted file mode 100644
index 9955c3e..0000000
--- a/src/platform/elapsed-timer.h
+++ /dev/null
@@ -1,97 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_PLATFORM_ELAPSED_TIMER_H_
-#define V8_PLATFORM_ELAPSED_TIMER_H_
-
-#include "src/checks.h"
-#include "src/platform/time.h"
-
-namespace v8 {
-namespace internal {
-
-class ElapsedTimer V8_FINAL BASE_EMBEDDED {
- public:
-#ifdef DEBUG
-  ElapsedTimer() : started_(false) {}
-#endif
-
-  // Starts this timer. Once started a timer can be checked with
-  // |Elapsed()| or |HasExpired()|, and may be restarted using |Restart()|.
-  // This method must not be called on an already started timer.
-  void Start() {
-    ASSERT(!IsStarted());
-    start_ticks_ = Now();
-#ifdef DEBUG
-    started_ = true;
-#endif
-    ASSERT(IsStarted());
-  }
-
-  // Stops this timer. Must not be called on a timer that was not
-  // started before.
-  void Stop() {
-    ASSERT(IsStarted());
-    start_ticks_ = TimeTicks();
-#ifdef DEBUG
-    started_ = false;
-#endif
-    ASSERT(!IsStarted());
-  }
-
-  // Returns |true| if this timer was started previously.
-  bool IsStarted() const {
-    ASSERT(started_ || start_ticks_.IsNull());
-    ASSERT(!started_ || !start_ticks_.IsNull());
-    return !start_ticks_.IsNull();
-  }
-
-  // Restarts the timer and returns the time elapsed since the previous start.
-  // This method is equivalent to obtaining the elapsed time with |Elapsed()|
-  // and then starting the timer again, but does so in one single operation,
-  // avoiding the need to obtain the clock value twice. It may only be called
-  // on a previously started timer.
-  TimeDelta Restart() {
-    ASSERT(IsStarted());
-    TimeTicks ticks = Now();
-    TimeDelta elapsed = ticks - start_ticks_;
-    ASSERT(elapsed.InMicroseconds() >= 0);
-    start_ticks_ = ticks;
-    ASSERT(IsStarted());
-    return elapsed;
-  }
-
-  // Returns the time elapsed since the previous start. This method may only
-  // be called on a previously started timer.
-  TimeDelta Elapsed() const {
-    ASSERT(IsStarted());
-    TimeDelta elapsed = Now() - start_ticks_;
-    ASSERT(elapsed.InMicroseconds() >= 0);
-    return elapsed;
-  }
-
-  // Returns |true| if the specified |time_delta| has elapsed since the
-  // previous start, or |false| if not. This method may only be called on
-  // a previously started timer.
-  bool HasExpired(TimeDelta time_delta) const {
-    ASSERT(IsStarted());
-    return Elapsed() >= time_delta;
-  }
-
- private:
-  static V8_INLINE TimeTicks Now() {
-    TimeTicks now = TimeTicks::HighResolutionNow();
-    ASSERT(!now.IsNull());
-    return now;
-  }
-
-  TimeTicks start_ticks_;
-#ifdef DEBUG
-  bool started_;
-#endif
-};
-
-} }  // namespace v8::internal
-
-#endif  // V8_PLATFORM_ELAPSED_TIMER_H_
diff --git a/src/platform/mutex.cc b/src/platform/mutex.cc
deleted file mode 100644
index 014b41a..0000000
--- a/src/platform/mutex.cc
+++ /dev/null
@@ -1,191 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/platform/mutex.h"
-
-#include <errno.h>
-
-namespace v8 {
-namespace internal {
-
-#if V8_OS_POSIX
-
-static V8_INLINE void InitializeNativeHandle(pthread_mutex_t* mutex) {
-  int result;
-#if defined(DEBUG)
-  // Use an error checking mutex in debug mode.
-  pthread_mutexattr_t attr;
-  result = pthread_mutexattr_init(&attr);
-  ASSERT_EQ(0, result);
-  result = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK);
-  ASSERT_EQ(0, result);
-  result = pthread_mutex_init(mutex, &attr);
-  ASSERT_EQ(0, result);
-  result = pthread_mutexattr_destroy(&attr);
-#else
-  // Use a fast mutex (default attributes).
-  result = pthread_mutex_init(mutex, NULL);
-#endif  // defined(DEBUG)
-  ASSERT_EQ(0, result);
-  USE(result);
-}
-
-
-static V8_INLINE void InitializeRecursiveNativeHandle(pthread_mutex_t* mutex) {
-  pthread_mutexattr_t attr;
-  int result = pthread_mutexattr_init(&attr);
-  ASSERT_EQ(0, result);
-  result = pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
-  ASSERT_EQ(0, result);
-  result = pthread_mutex_init(mutex, &attr);
-  ASSERT_EQ(0, result);
-  result = pthread_mutexattr_destroy(&attr);
-  ASSERT_EQ(0, result);
-  USE(result);
-}
-
-
-static V8_INLINE void DestroyNativeHandle(pthread_mutex_t* mutex) {
-  int result = pthread_mutex_destroy(mutex);
-  ASSERT_EQ(0, result);
-  USE(result);
-}
-
-
-static V8_INLINE void LockNativeHandle(pthread_mutex_t* mutex) {
-  int result = pthread_mutex_lock(mutex);
-  ASSERT_EQ(0, result);
-  USE(result);
-}
-
-
-static V8_INLINE void UnlockNativeHandle(pthread_mutex_t* mutex) {
-  int result = pthread_mutex_unlock(mutex);
-  ASSERT_EQ(0, result);
-  USE(result);
-}
-
-
-static V8_INLINE bool TryLockNativeHandle(pthread_mutex_t* mutex) {
-  int result = pthread_mutex_trylock(mutex);
-  if (result == EBUSY) {
-    return false;
-  }
-  ASSERT_EQ(0, result);
-  return true;
-}
-
-#elif V8_OS_WIN
-
-static V8_INLINE void InitializeNativeHandle(PCRITICAL_SECTION cs) {
-  InitializeCriticalSection(cs);
-}
-
-
-static V8_INLINE void InitializeRecursiveNativeHandle(PCRITICAL_SECTION cs) {
-  InitializeCriticalSection(cs);
-}
-
-
-static V8_INLINE void DestroyNativeHandle(PCRITICAL_SECTION cs) {
-  DeleteCriticalSection(cs);
-}
-
-
-static V8_INLINE void LockNativeHandle(PCRITICAL_SECTION cs) {
-  EnterCriticalSection(cs);
-}
-
-
-static V8_INLINE void UnlockNativeHandle(PCRITICAL_SECTION cs) {
-  LeaveCriticalSection(cs);
-}
-
-
-static V8_INLINE bool TryLockNativeHandle(PCRITICAL_SECTION cs) {
-  return TryEnterCriticalSection(cs);
-}
-
-#endif  // V8_OS_POSIX
-
-
-Mutex::Mutex() {
-  InitializeNativeHandle(&native_handle_);
-#ifdef DEBUG
-  level_ = 0;
-#endif
-}
-
-
-Mutex::~Mutex() {
-  DestroyNativeHandle(&native_handle_);
-  ASSERT_EQ(0, level_);
-}
-
-
-void Mutex::Lock() {
-  LockNativeHandle(&native_handle_);
-  AssertUnheldAndMark();
-}
-
-
-void Mutex::Unlock() {
-  AssertHeldAndUnmark();
-  UnlockNativeHandle(&native_handle_);
-}
-
-
-bool Mutex::TryLock() {
-  if (!TryLockNativeHandle(&native_handle_)) {
-    return false;
-  }
-  AssertUnheldAndMark();
-  return true;
-}
-
-
-RecursiveMutex::RecursiveMutex() {
-  InitializeRecursiveNativeHandle(&native_handle_);
-#ifdef DEBUG
-  level_ = 0;
-#endif
-}
-
-
-RecursiveMutex::~RecursiveMutex() {
-  DestroyNativeHandle(&native_handle_);
-  ASSERT_EQ(0, level_);
-}
-
-
-void RecursiveMutex::Lock() {
-  LockNativeHandle(&native_handle_);
-#ifdef DEBUG
-  ASSERT_LE(0, level_);
-  level_++;
-#endif
-}
-
-
-void RecursiveMutex::Unlock() {
-#ifdef DEBUG
-  ASSERT_LT(0, level_);
-  level_--;
-#endif
-  UnlockNativeHandle(&native_handle_);
-}
-
-
-bool RecursiveMutex::TryLock() {
-  if (!TryLockNativeHandle(&native_handle_)) {
-    return false;
-  }
-#ifdef DEBUG
-  ASSERT_LE(0, level_);
-  level_++;
-#endif
-  return true;
-}
-
-} }  // namespace v8::internal
diff --git a/src/platform/mutex.h b/src/platform/mutex.h
deleted file mode 100644
index 1e93468..0000000
--- a/src/platform/mutex.h
+++ /dev/null
@@ -1,216 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_PLATFORM_MUTEX_H_
-#define V8_PLATFORM_MUTEX_H_
-
-#include "src/base/lazy-instance.h"
-#if V8_OS_WIN
-#include "src/base/win32-headers.h"
-#endif
-#include "src/checks.h"
-
-#if V8_OS_POSIX
-#include <pthread.h>  // NOLINT
-#endif
-
-namespace v8 {
-namespace internal {
-
-// ----------------------------------------------------------------------------
-// Mutex
-//
-// This class is a synchronization primitive that can be used to protect shared
-// data from being simultaneously accessed by multiple threads. A mutex offers
-// exclusive, non-recursive ownership semantics:
-// - A calling thread owns a mutex from the time that it successfully calls
-//   either |Lock()| or |TryLock()| until it calls |Unlock()|.
-// - When a thread owns a mutex, all other threads will block (for calls to
-//   |Lock()|) or receive a |false| return value (for |TryLock()|) if they
-//   attempt to claim ownership of the mutex.
-// A calling thread must not own the mutex prior to calling |Lock()| or
-// |TryLock()|. The behavior of a program is undefined if a mutex is destroyed
-// while still owned by some thread. The Mutex class is non-copyable.
-
-class Mutex V8_FINAL {
- public:
-  Mutex();
-  ~Mutex();
-
-  // Locks the given mutex. If the mutex is currently unlocked, it becomes
-  // locked and owned by the calling thread, and immediately. If the mutex
-  // is already locked by another thread, suspends the calling thread until
-  // the mutex is unlocked.
-  void Lock();
-
-  // Unlocks the given mutex. The mutex is assumed to be locked and owned by
-  // the calling thread on entrance.
-  void Unlock();
-
-  // Tries to lock the given mutex. Returns whether the mutex was
-  // successfully locked.
-  bool TryLock() V8_WARN_UNUSED_RESULT;
-
-  // The implementation-defined native handle type.
-#if V8_OS_POSIX
-  typedef pthread_mutex_t NativeHandle;
-#elif V8_OS_WIN
-  typedef CRITICAL_SECTION NativeHandle;
-#endif
-
-  NativeHandle& native_handle() {
-    return native_handle_;
-  }
-  const NativeHandle& native_handle() const {
-    return native_handle_;
-  }
-
- private:
-  NativeHandle native_handle_;
-#ifdef DEBUG
-  int level_;
-#endif
-
-  V8_INLINE void AssertHeldAndUnmark() {
-#ifdef DEBUG
-    ASSERT_EQ(1, level_);
-    level_--;
-#endif
-  }
-
-  V8_INLINE void AssertUnheldAndMark() {
-#ifdef DEBUG
-    ASSERT_EQ(0, level_);
-    level_++;
-#endif
-  }
-
-  friend class ConditionVariable;
-
-  DISALLOW_COPY_AND_ASSIGN(Mutex);
-};
-
-
-// POD Mutex initialized lazily (i.e. the first time Pointer() is called).
-// Usage:
-//   static LazyMutex my_mutex = LAZY_MUTEX_INITIALIZER;
-//
-//   void my_function() {
-//     LockGuard<Mutex> guard(my_mutex.Pointer());
-//     // Do something.
-//   }
-//
-typedef v8::base::LazyStaticInstance<
-    Mutex, v8::base::DefaultConstructTrait<Mutex>,
-    v8::base::ThreadSafeInitOnceTrait>::type LazyMutex;
-
-#define LAZY_MUTEX_INITIALIZER LAZY_STATIC_INSTANCE_INITIALIZER
-
-
-// -----------------------------------------------------------------------------
-// RecursiveMutex
-//
-// This class is a synchronization primitive that can be used to protect shared
-// data from being simultaneously accessed by multiple threads. A recursive
-// mutex offers exclusive, recursive ownership semantics:
-// - A calling thread owns a recursive mutex for a period of time that starts
-//   when it successfully calls either |Lock()| or |TryLock()|. During this
-//   period, the thread may make additional calls to |Lock()| or |TryLock()|.
-//   The period of ownership ends when the thread makes a matching number of
-//   calls to |Unlock()|.
-// - When a thread owns a recursive mutex, all other threads will block (for
-//   calls to |Lock()|) or receive a |false| return value (for |TryLock()|) if
-//   they attempt to claim ownership of the recursive mutex.
-// - The maximum number of times that a recursive mutex may be locked is
-//   unspecified, but after that number is reached, calls to |Lock()| will
-//   probably abort the process and calls to |TryLock()| return false.
-// The behavior of a program is undefined if a recursive mutex is destroyed
-// while still owned by some thread. The RecursiveMutex class is non-copyable.
-
-class RecursiveMutex V8_FINAL {
- public:
-  RecursiveMutex();
-  ~RecursiveMutex();
-
-  // Locks the mutex. If another thread has already locked the mutex, a call to
-  // |Lock()| will block execution until the lock is acquired. A thread may call
-  // |Lock()| on a recursive mutex repeatedly. Ownership will only be released
-  // after the thread makes a matching number of calls to |Unlock()|.
-  // The behavior is undefined if the mutex is not unlocked before being
-  // destroyed, i.e. some thread still owns it.
-  void Lock();
-
-  // Unlocks the mutex if its level of ownership is 1 (there was exactly one
-  // more call to |Lock()| than there were calls to unlock() made by this
-  // thread), reduces the level of ownership by 1 otherwise. The mutex must be
-  // locked by the current thread of execution, otherwise, the behavior is
-  // undefined.
-  void Unlock();
-
-  // Tries to lock the given mutex. Returns whether the mutex was
-  // successfully locked.
-  bool TryLock() V8_WARN_UNUSED_RESULT;
-
-  // The implementation-defined native handle type.
-  typedef Mutex::NativeHandle NativeHandle;
-
-  NativeHandle& native_handle() {
-    return native_handle_;
-  }
-  const NativeHandle& native_handle() const {
-    return native_handle_;
-  }
-
- private:
-  NativeHandle native_handle_;
-#ifdef DEBUG
-  int level_;
-#endif
-
-  DISALLOW_COPY_AND_ASSIGN(RecursiveMutex);
-};
-
-
-// POD RecursiveMutex initialized lazily (i.e. the first time Pointer() is
-// called).
-// Usage:
-//   static LazyRecursiveMutex my_mutex = LAZY_RECURSIVE_MUTEX_INITIALIZER;
-//
-//   void my_function() {
-//     LockGuard<RecursiveMutex> guard(my_mutex.Pointer());
-//     // Do something.
-//   }
-//
-typedef v8::base::LazyStaticInstance<
-    RecursiveMutex, v8::base::DefaultConstructTrait<RecursiveMutex>,
-    v8::base::ThreadSafeInitOnceTrait>::type LazyRecursiveMutex;
-
-#define LAZY_RECURSIVE_MUTEX_INITIALIZER LAZY_STATIC_INSTANCE_INITIALIZER
-
-
-// -----------------------------------------------------------------------------
-// LockGuard
-//
-// This class is a mutex wrapper that provides a convenient RAII-style mechanism
-// for owning a mutex for the duration of a scoped block.
-// When a LockGuard object is created, it attempts to take ownership of the
-// mutex it is given. When control leaves the scope in which the LockGuard
-// object was created, the LockGuard is destructed and the mutex is released.
-// The LockGuard class is non-copyable.
-
-template <typename Mutex>
-class LockGuard V8_FINAL {
- public:
-  explicit LockGuard(Mutex* mutex) : mutex_(mutex) { mutex_->Lock(); }
-  ~LockGuard() { mutex_->Unlock(); }
-
- private:
-  Mutex* mutex_;
-
-  DISALLOW_COPY_AND_ASSIGN(LockGuard);
-};
-
-} }  // namespace v8::internal
-
-#endif  // V8_PLATFORM_MUTEX_H_
diff --git a/src/platform/semaphore.cc b/src/platform/semaphore.cc
deleted file mode 100644
index 18264f4..0000000
--- a/src/platform/semaphore.cc
+++ /dev/null
@@ -1,191 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/platform/semaphore.h"
-
-#if V8_OS_MACOSX
-#include <mach/mach_init.h>
-#include <mach/task.h>
-#endif
-
-#include <errno.h>
-
-#include "src/checks.h"
-#include "src/platform/time.h"
-
-namespace v8 {
-namespace internal {
-
-#if V8_OS_MACOSX
-
-Semaphore::Semaphore(int count) {
-  kern_return_t result = semaphore_create(
-      mach_task_self(), &native_handle_, SYNC_POLICY_FIFO, count);
-  ASSERT_EQ(KERN_SUCCESS, result);
-  USE(result);
-}
-
-
-Semaphore::~Semaphore() {
-  kern_return_t result = semaphore_destroy(mach_task_self(), native_handle_);
-  ASSERT_EQ(KERN_SUCCESS, result);
-  USE(result);
-}
-
-
-void Semaphore::Signal() {
-  kern_return_t result = semaphore_signal(native_handle_);
-  ASSERT_EQ(KERN_SUCCESS, result);
-  USE(result);
-}
-
-
-void Semaphore::Wait() {
-  while (true) {
-    kern_return_t result = semaphore_wait(native_handle_);
-    if (result == KERN_SUCCESS) return;  // Semaphore was signalled.
-    ASSERT_EQ(KERN_ABORTED, result);
-  }
-}
-
-
-bool Semaphore::WaitFor(const TimeDelta& rel_time) {
-  TimeTicks now = TimeTicks::Now();
-  TimeTicks end = now + rel_time;
-  while (true) {
-    mach_timespec_t ts;
-    if (now >= end) {
-      // Return immediately if semaphore was not signalled.
-      ts.tv_sec = 0;
-      ts.tv_nsec = 0;
-    } else {
-      ts = (end - now).ToMachTimespec();
-    }
-    kern_return_t result = semaphore_timedwait(native_handle_, ts);
-    if (result == KERN_SUCCESS) return true;  // Semaphore was signalled.
-    if (result == KERN_OPERATION_TIMED_OUT) return false;  // Timeout.
-    ASSERT_EQ(KERN_ABORTED, result);
-    now = TimeTicks::Now();
-  }
-}
-
-#elif V8_OS_POSIX
-
-Semaphore::Semaphore(int count) {
-  ASSERT(count >= 0);
-  int result = sem_init(&native_handle_, 0, count);
-  ASSERT_EQ(0, result);
-  USE(result);
-}
-
-
-Semaphore::~Semaphore() {
-  int result = sem_destroy(&native_handle_);
-  ASSERT_EQ(0, result);
-  USE(result);
-}
-
-
-void Semaphore::Signal() {
-  int result = sem_post(&native_handle_);
-  ASSERT_EQ(0, result);
-  USE(result);
-}
-
-
-void Semaphore::Wait() {
-  while (true) {
-    int result = sem_wait(&native_handle_);
-    if (result == 0) return;  // Semaphore was signalled.
-    // Signal caused spurious wakeup.
-    ASSERT_EQ(-1, result);
-    ASSERT_EQ(EINTR, errno);
-  }
-}
-
-
-bool Semaphore::WaitFor(const TimeDelta& rel_time) {
-  // Compute the time for end of timeout.
-  const Time time = Time::NowFromSystemTime() + rel_time;
-  const struct timespec ts = time.ToTimespec();
-
-  // Wait for semaphore signalled or timeout.
-  while (true) {
-    int result = sem_timedwait(&native_handle_, &ts);
-    if (result == 0) return true;  // Semaphore was signalled.
-#if V8_LIBC_GLIBC && !V8_GLIBC_PREREQ(2, 4)
-    if (result > 0) {
-      // sem_timedwait in glibc prior to 2.3.4 returns the errno instead of -1.
-      errno = result;
-      result = -1;
-    }
-#endif
-    if (result == -1 && errno == ETIMEDOUT) {
-      // Timed out while waiting for semaphore.
-      return false;
-    }
-    // Signal caused spurious wakeup.
-    ASSERT_EQ(-1, result);
-    ASSERT_EQ(EINTR, errno);
-  }
-}
-
-#elif V8_OS_WIN
-
-Semaphore::Semaphore(int count) {
-  ASSERT(count >= 0);
-  native_handle_ = ::CreateSemaphoreA(NULL, count, 0x7fffffff, NULL);
-  ASSERT(native_handle_ != NULL);
-}
-
-
-Semaphore::~Semaphore() {
-  BOOL result = CloseHandle(native_handle_);
-  ASSERT(result);
-  USE(result);
-}
-
-
-void Semaphore::Signal() {
-  LONG dummy;
-  BOOL result = ReleaseSemaphore(native_handle_, 1, &dummy);
-  ASSERT(result);
-  USE(result);
-}
-
-
-void Semaphore::Wait() {
-  DWORD result = WaitForSingleObject(native_handle_, INFINITE);
-  ASSERT(result == WAIT_OBJECT_0);
-  USE(result);
-}
-
-
-bool Semaphore::WaitFor(const TimeDelta& rel_time) {
-  TimeTicks now = TimeTicks::Now();
-  TimeTicks end = now + rel_time;
-  while (true) {
-    int64_t msec = (end - now).InMilliseconds();
-    if (msec >= static_cast<int64_t>(INFINITE)) {
-      DWORD result = WaitForSingleObject(native_handle_, INFINITE - 1);
-      if (result == WAIT_OBJECT_0) {
-        return true;
-      }
-      ASSERT(result == WAIT_TIMEOUT);
-      now = TimeTicks::Now();
-    } else {
-      DWORD result = WaitForSingleObject(
-          native_handle_, (msec < 0) ? 0 : static_cast<DWORD>(msec));
-      if (result == WAIT_TIMEOUT) {
-        return false;
-      }
-      ASSERT(result == WAIT_OBJECT_0);
-      return true;
-    }
-  }
-}
-
-#endif  // V8_OS_MACOSX
-
-} }  // namespace v8::internal
diff --git a/src/platform/semaphore.h b/src/platform/semaphore.h
deleted file mode 100644
index 028af92..0000000
--- a/src/platform/semaphore.h
+++ /dev/null
@@ -1,103 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_PLATFORM_SEMAPHORE_H_
-#define V8_PLATFORM_SEMAPHORE_H_
-
-#include "src/base/lazy-instance.h"
-#if V8_OS_WIN
-#include "src/base/win32-headers.h"
-#endif
-
-#if V8_OS_MACOSX
-#include <mach/semaphore.h>  // NOLINT
-#elif V8_OS_POSIX
-#include <semaphore.h>  // NOLINT
-#endif
-
-namespace v8 {
-namespace internal {
-
-// Forward declarations.
-class TimeDelta;
-
-// ----------------------------------------------------------------------------
-// Semaphore
-//
-// A semaphore object is a synchronization object that maintains a count. The
-// count is decremented each time a thread completes a wait for the semaphore
-// object and incremented each time a thread signals the semaphore. When the
-// count reaches zero,  threads waiting for the semaphore blocks until the
-// count becomes non-zero.
-
-class Semaphore V8_FINAL {
- public:
-  explicit Semaphore(int count);
-  ~Semaphore();
-
-  // Increments the semaphore counter.
-  void Signal();
-
-  // Suspends the calling thread until the semaphore counter is non zero
-  // and then decrements the semaphore counter.
-  void Wait();
-
-  // Suspends the calling thread until the counter is non zero or the timeout
-  // time has passed. If timeout happens the return value is false and the
-  // counter is unchanged. Otherwise the semaphore counter is decremented and
-  // true is returned.
-  bool WaitFor(const TimeDelta& rel_time) V8_WARN_UNUSED_RESULT;
-
-#if V8_OS_MACOSX
-  typedef semaphore_t NativeHandle;
-#elif V8_OS_POSIX
-  typedef sem_t NativeHandle;
-#elif V8_OS_WIN
-  typedef HANDLE NativeHandle;
-#endif
-
-  NativeHandle& native_handle() {
-    return native_handle_;
-  }
-  const NativeHandle& native_handle() const {
-    return native_handle_;
-  }
-
- private:
-  NativeHandle native_handle_;
-
-  DISALLOW_COPY_AND_ASSIGN(Semaphore);
-};
-
-
-// POD Semaphore initialized lazily (i.e. the first time Pointer() is called).
-// Usage:
-//   // The following semaphore starts at 0.
-//   static LazySemaphore<0>::type my_semaphore = LAZY_SEMAPHORE_INITIALIZER;
-//
-//   void my_function() {
-//     // Do something with my_semaphore.Pointer().
-//   }
-//
-
-template <int N>
-struct CreateSemaphoreTrait {
-  static Semaphore* Create() {
-    return new Semaphore(N);
-  }
-};
-
-template <int N>
-struct LazySemaphore {
-  typedef typename v8::base::LazyDynamicInstance<
-      Semaphore,
-      CreateSemaphoreTrait<N>,
-      v8::base::ThreadSafeInitOnceTrait>::type type;
-};
-
-#define LAZY_SEMAPHORE_INITIALIZER LAZY_DYNAMIC_INSTANCE_INITIALIZER
-
-} }  // namespace v8::internal
-
-#endif  // V8_PLATFORM_SEMAPHORE_H_
diff --git a/src/platform/time.cc b/src/platform/time.cc
deleted file mode 100644
index 09b6f8a..0000000
--- a/src/platform/time.cc
+++ /dev/null
@@ -1,570 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/platform/time.h"
-
-#if V8_OS_POSIX
-#include <sys/time.h>
-#endif
-#if V8_OS_MACOSX
-#include <mach/mach_time.h>
-#endif
-
-#include <string.h>
-
-#if V8_OS_WIN
-#include "src/base/lazy-instance.h"
-#include "src/base/win32-headers.h"
-#endif
-#include "src/checks.h"
-#include "src/cpu.h"
-#include "src/platform.h"
-
-namespace v8 {
-namespace internal {
-
-TimeDelta TimeDelta::FromDays(int days) {
-  return TimeDelta(days * Time::kMicrosecondsPerDay);
-}
-
-
-TimeDelta TimeDelta::FromHours(int hours) {
-  return TimeDelta(hours * Time::kMicrosecondsPerHour);
-}
-
-
-TimeDelta TimeDelta::FromMinutes(int minutes) {
-  return TimeDelta(minutes * Time::kMicrosecondsPerMinute);
-}
-
-
-TimeDelta TimeDelta::FromSeconds(int64_t seconds) {
-  return TimeDelta(seconds * Time::kMicrosecondsPerSecond);
-}
-
-
-TimeDelta TimeDelta::FromMilliseconds(int64_t milliseconds) {
-  return TimeDelta(milliseconds * Time::kMicrosecondsPerMillisecond);
-}
-
-
-TimeDelta TimeDelta::FromNanoseconds(int64_t nanoseconds) {
-  return TimeDelta(nanoseconds / Time::kNanosecondsPerMicrosecond);
-}
-
-
-int TimeDelta::InDays() const {
-  return static_cast<int>(delta_ / Time::kMicrosecondsPerDay);
-}
-
-
-int TimeDelta::InHours() const {
-  return static_cast<int>(delta_ / Time::kMicrosecondsPerHour);
-}
-
-
-int TimeDelta::InMinutes() const {
-  return static_cast<int>(delta_ / Time::kMicrosecondsPerMinute);
-}
-
-
-double TimeDelta::InSecondsF() const {
-  return static_cast<double>(delta_) / Time::kMicrosecondsPerSecond;
-}
-
-
-int64_t TimeDelta::InSeconds() const {
-  return delta_ / Time::kMicrosecondsPerSecond;
-}
-
-
-double TimeDelta::InMillisecondsF() const {
-  return static_cast<double>(delta_) / Time::kMicrosecondsPerMillisecond;
-}
-
-
-int64_t TimeDelta::InMilliseconds() const {
-  return delta_ / Time::kMicrosecondsPerMillisecond;
-}
-
-
-int64_t TimeDelta::InNanoseconds() const {
-  return delta_ * Time::kNanosecondsPerMicrosecond;
-}
-
-
-#if V8_OS_MACOSX
-
-TimeDelta TimeDelta::FromMachTimespec(struct mach_timespec ts) {
-  ASSERT_GE(ts.tv_nsec, 0);
-  ASSERT_LT(ts.tv_nsec,
-            static_cast<long>(Time::kNanosecondsPerSecond));  // NOLINT
-  return TimeDelta(ts.tv_sec * Time::kMicrosecondsPerSecond +
-                   ts.tv_nsec / Time::kNanosecondsPerMicrosecond);
-}
-
-
-struct mach_timespec TimeDelta::ToMachTimespec() const {
-  struct mach_timespec ts;
-  ASSERT(delta_ >= 0);
-  ts.tv_sec = delta_ / Time::kMicrosecondsPerSecond;
-  ts.tv_nsec = (delta_ % Time::kMicrosecondsPerSecond) *
-      Time::kNanosecondsPerMicrosecond;
-  return ts;
-}
-
-#endif  // V8_OS_MACOSX
-
-
-#if V8_OS_POSIX
-
-TimeDelta TimeDelta::FromTimespec(struct timespec ts) {
-  ASSERT_GE(ts.tv_nsec, 0);
-  ASSERT_LT(ts.tv_nsec,
-            static_cast<long>(Time::kNanosecondsPerSecond));  // NOLINT
-  return TimeDelta(ts.tv_sec * Time::kMicrosecondsPerSecond +
-                   ts.tv_nsec / Time::kNanosecondsPerMicrosecond);
-}
-
-
-struct timespec TimeDelta::ToTimespec() const {
-  struct timespec ts;
-  ts.tv_sec = delta_ / Time::kMicrosecondsPerSecond;
-  ts.tv_nsec = (delta_ % Time::kMicrosecondsPerSecond) *
-      Time::kNanosecondsPerMicrosecond;
-  return ts;
-}
-
-#endif  // V8_OS_POSIX
-
-
-#if V8_OS_WIN
-
-// We implement time using the high-resolution timers so that we can get
-// timeouts which are smaller than 10-15ms. To avoid any drift, we
-// periodically resync the internal clock to the system clock.
-class Clock V8_FINAL {
- public:
-  Clock() : initial_ticks_(GetSystemTicks()), initial_time_(GetSystemTime()) {}
-
-  Time Now() {
-    // Time between resampling the un-granular clock for this API (1 minute).
-    const TimeDelta kMaxElapsedTime = TimeDelta::FromMinutes(1);
-
-    LockGuard<Mutex> lock_guard(&mutex_);
-
-    // Determine current time and ticks.
-    TimeTicks ticks = GetSystemTicks();
-    Time time = GetSystemTime();
-
-    // Check if we need to synchronize with the system clock due to a backwards
-    // time change or the amount of time elapsed.
-    TimeDelta elapsed = ticks - initial_ticks_;
-    if (time < initial_time_ || elapsed > kMaxElapsedTime) {
-      initial_ticks_ = ticks;
-      initial_time_ = time;
-      return time;
-    }
-
-    return initial_time_ + elapsed;
-  }
-
-  Time NowFromSystemTime() {
-    LockGuard<Mutex> lock_guard(&mutex_);
-    initial_ticks_ = GetSystemTicks();
-    initial_time_ = GetSystemTime();
-    return initial_time_;
-  }
-
- private:
-  static TimeTicks GetSystemTicks() {
-    return TimeTicks::Now();
-  }
-
-  static Time GetSystemTime() {
-    FILETIME ft;
-    ::GetSystemTimeAsFileTime(&ft);
-    return Time::FromFiletime(ft);
-  }
-
-  TimeTicks initial_ticks_;
-  Time initial_time_;
-  Mutex mutex_;
-};
-
-
-static base::LazyStaticInstance<Clock, base::DefaultConstructTrait<Clock>,
-                                base::ThreadSafeInitOnceTrait>::type clock =
-    LAZY_STATIC_INSTANCE_INITIALIZER;
-
-
-Time Time::Now() {
-  return clock.Pointer()->Now();
-}
-
-
-Time Time::NowFromSystemTime() {
-  return clock.Pointer()->NowFromSystemTime();
-}
-
-
-// Time between windows epoch and standard epoch.
-static const int64_t kTimeToEpochInMicroseconds = V8_INT64_C(11644473600000000);
-
-
-Time Time::FromFiletime(FILETIME ft) {
-  if (ft.dwLowDateTime == 0 && ft.dwHighDateTime == 0) {
-    return Time();
-  }
-  if (ft.dwLowDateTime == std::numeric_limits<DWORD>::max() &&
-      ft.dwHighDateTime == std::numeric_limits<DWORD>::max()) {
-    return Max();
-  }
-  int64_t us = (static_cast<uint64_t>(ft.dwLowDateTime) +
-                (static_cast<uint64_t>(ft.dwHighDateTime) << 32)) / 10;
-  return Time(us - kTimeToEpochInMicroseconds);
-}
-
-
-FILETIME Time::ToFiletime() const {
-  ASSERT(us_ >= 0);
-  FILETIME ft;
-  if (IsNull()) {
-    ft.dwLowDateTime = 0;
-    ft.dwHighDateTime = 0;
-    return ft;
-  }
-  if (IsMax()) {
-    ft.dwLowDateTime = std::numeric_limits<DWORD>::max();
-    ft.dwHighDateTime = std::numeric_limits<DWORD>::max();
-    return ft;
-  }
-  uint64_t us = static_cast<uint64_t>(us_ + kTimeToEpochInMicroseconds) * 10;
-  ft.dwLowDateTime = static_cast<DWORD>(us);
-  ft.dwHighDateTime = static_cast<DWORD>(us >> 32);
-  return ft;
-}
-
-#elif V8_OS_POSIX
-
-Time Time::Now() {
-  struct timeval tv;
-  int result = gettimeofday(&tv, NULL);
-  ASSERT_EQ(0, result);
-  USE(result);
-  return FromTimeval(tv);
-}
-
-
-Time Time::NowFromSystemTime() {
-  return Now();
-}
-
-
-Time Time::FromTimespec(struct timespec ts) {
-  ASSERT(ts.tv_nsec >= 0);
-  ASSERT(ts.tv_nsec < static_cast<long>(kNanosecondsPerSecond));  // NOLINT
-  if (ts.tv_nsec == 0 && ts.tv_sec == 0) {
-    return Time();
-  }
-  if (ts.tv_nsec == static_cast<long>(kNanosecondsPerSecond - 1) &&  // NOLINT
-      ts.tv_sec == std::numeric_limits<time_t>::max()) {
-    return Max();
-  }
-  return Time(ts.tv_sec * kMicrosecondsPerSecond +
-              ts.tv_nsec / kNanosecondsPerMicrosecond);
-}
-
-
-struct timespec Time::ToTimespec() const {
-  struct timespec ts;
-  if (IsNull()) {
-    ts.tv_sec = 0;
-    ts.tv_nsec = 0;
-    return ts;
-  }
-  if (IsMax()) {
-    ts.tv_sec = std::numeric_limits<time_t>::max();
-    ts.tv_nsec = static_cast<long>(kNanosecondsPerSecond - 1);  // NOLINT
-    return ts;
-  }
-  ts.tv_sec = us_ / kMicrosecondsPerSecond;
-  ts.tv_nsec = (us_ % kMicrosecondsPerSecond) * kNanosecondsPerMicrosecond;
-  return ts;
-}
-
-
-Time Time::FromTimeval(struct timeval tv) {
-  ASSERT(tv.tv_usec >= 0);
-  ASSERT(tv.tv_usec < static_cast<suseconds_t>(kMicrosecondsPerSecond));
-  if (tv.tv_usec == 0 && tv.tv_sec == 0) {
-    return Time();
-  }
-  if (tv.tv_usec == static_cast<suseconds_t>(kMicrosecondsPerSecond - 1) &&
-      tv.tv_sec == std::numeric_limits<time_t>::max()) {
-    return Max();
-  }
-  return Time(tv.tv_sec * kMicrosecondsPerSecond + tv.tv_usec);
-}
-
-
-struct timeval Time::ToTimeval() const {
-  struct timeval tv;
-  if (IsNull()) {
-    tv.tv_sec = 0;
-    tv.tv_usec = 0;
-    return tv;
-  }
-  if (IsMax()) {
-    tv.tv_sec = std::numeric_limits<time_t>::max();
-    tv.tv_usec = static_cast<suseconds_t>(kMicrosecondsPerSecond - 1);
-    return tv;
-  }
-  tv.tv_sec = us_ / kMicrosecondsPerSecond;
-  tv.tv_usec = us_ % kMicrosecondsPerSecond;
-  return tv;
-}
-
-#endif  // V8_OS_WIN
-
-
-Time Time::FromJsTime(double ms_since_epoch) {
-  // The epoch is a valid time, so this constructor doesn't interpret
-  // 0 as the null time.
-  if (ms_since_epoch == std::numeric_limits<double>::max()) {
-    return Max();
-  }
-  return Time(
-      static_cast<int64_t>(ms_since_epoch * kMicrosecondsPerMillisecond));
-}
-
-
-double Time::ToJsTime() const {
-  if (IsNull()) {
-    // Preserve 0 so the invalid result doesn't depend on the platform.
-    return 0;
-  }
-  if (IsMax()) {
-    // Preserve max without offset to prevent overflow.
-    return std::numeric_limits<double>::max();
-  }
-  return static_cast<double>(us_) / kMicrosecondsPerMillisecond;
-}
-
-
-#if V8_OS_WIN
-
-class TickClock {
- public:
-  virtual ~TickClock() {}
-  virtual int64_t Now() = 0;
-  virtual bool IsHighResolution() = 0;
-};
-
-
-// Overview of time counters:
-// (1) CPU cycle counter. (Retrieved via RDTSC)
-// The CPU counter provides the highest resolution time stamp and is the least
-// expensive to retrieve. However, the CPU counter is unreliable and should not
-// be used in production. Its biggest issue is that it is per processor and it
-// is not synchronized between processors. Also, on some computers, the counters
-// will change frequency due to thermal and power changes, and stop in some
-// states.
-//
-// (2) QueryPerformanceCounter (QPC). The QPC counter provides a high-
-// resolution (100 nanoseconds) time stamp but is comparatively more expensive
-// to retrieve. What QueryPerformanceCounter actually does is up to the HAL.
-// (with some help from ACPI).
-// According to http://blogs.msdn.com/oldnewthing/archive/2005/09/02/459952.aspx
-// in the worst case, it gets the counter from the rollover interrupt on the
-// programmable interrupt timer. In best cases, the HAL may conclude that the
-// RDTSC counter runs at a constant frequency, then it uses that instead. On
-// multiprocessor machines, it will try to verify the values returned from
-// RDTSC on each processor are consistent with each other, and apply a handful
-// of workarounds for known buggy hardware. In other words, QPC is supposed to
-// give consistent result on a multiprocessor computer, but it is unreliable in
-// reality due to bugs in BIOS or HAL on some, especially old computers.
-// With recent updates on HAL and newer BIOS, QPC is getting more reliable but
-// it should be used with caution.
-//
-// (3) System time. The system time provides a low-resolution (typically 10ms
-// to 55 milliseconds) time stamp but is comparatively less expensive to
-// retrieve and more reliable.
-class HighResolutionTickClock V8_FINAL : public TickClock {
- public:
-  explicit HighResolutionTickClock(int64_t ticks_per_second)
-      : ticks_per_second_(ticks_per_second) {
-    ASSERT_LT(0, ticks_per_second);
-  }
-  virtual ~HighResolutionTickClock() {}
-
-  virtual int64_t Now() V8_OVERRIDE {
-    LARGE_INTEGER now;
-    BOOL result = QueryPerformanceCounter(&now);
-    ASSERT(result);
-    USE(result);
-
-    // Intentionally calculate microseconds in a round about manner to avoid
-    // overflow and precision issues. Think twice before simplifying!
-    int64_t whole_seconds = now.QuadPart / ticks_per_second_;
-    int64_t leftover_ticks = now.QuadPart % ticks_per_second_;
-    int64_t ticks = (whole_seconds * Time::kMicrosecondsPerSecond) +
-        ((leftover_ticks * Time::kMicrosecondsPerSecond) / ticks_per_second_);
-
-    // Make sure we never return 0 here, so that TimeTicks::HighResolutionNow()
-    // will never return 0.
-    return ticks + 1;
-  }
-
-  virtual bool IsHighResolution() V8_OVERRIDE {
-    return true;
-  }
-
- private:
-  int64_t ticks_per_second_;
-};
-
-
-class RolloverProtectedTickClock V8_FINAL : public TickClock {
- public:
-  // We initialize rollover_ms_ to 1 to ensure that we will never
-  // return 0 from TimeTicks::HighResolutionNow() and TimeTicks::Now() below.
-  RolloverProtectedTickClock() : last_seen_now_(0), rollover_ms_(1) {}
-  virtual ~RolloverProtectedTickClock() {}
-
-  virtual int64_t Now() V8_OVERRIDE {
-    LockGuard<Mutex> lock_guard(&mutex_);
-    // We use timeGetTime() to implement TimeTicks::Now(), which rolls over
-    // every ~49.7 days. We try to track rollover ourselves, which works if
-    // TimeTicks::Now() is called at least every 49 days.
-    // Note that we do not use GetTickCount() here, since timeGetTime() gives
-    // more predictable delta values, as described here:
-    // http://blogs.msdn.com/b/larryosterman/archive/2009/09/02/what-s-the-difference-between-gettickcount-and-timegettime.aspx
-    // timeGetTime() provides 1ms granularity when combined with
-    // timeBeginPeriod(). If the host application for V8 wants fast timers, it
-    // can use timeBeginPeriod() to increase the resolution.
-    DWORD now = timeGetTime();
-    if (now < last_seen_now_) {
-      rollover_ms_ += V8_INT64_C(0x100000000);  // ~49.7 days.
-    }
-    last_seen_now_ = now;
-    return (now + rollover_ms_) * Time::kMicrosecondsPerMillisecond;
-  }
-
-  virtual bool IsHighResolution() V8_OVERRIDE {
-    return false;
-  }
-
- private:
-  Mutex mutex_;
-  DWORD last_seen_now_;
-  int64_t rollover_ms_;
-};
-
-
-static base::LazyStaticInstance<
-    RolloverProtectedTickClock,
-    base::DefaultConstructTrait<RolloverProtectedTickClock>,
-    base::ThreadSafeInitOnceTrait>::type tick_clock =
-    LAZY_STATIC_INSTANCE_INITIALIZER;
-
-
-struct CreateHighResTickClockTrait {
-  static TickClock* Create() {
-    // Check if the installed hardware supports a high-resolution performance
-    // counter, and if not fallback to the low-resolution tick clock.
-    LARGE_INTEGER ticks_per_second;
-    if (!QueryPerformanceFrequency(&ticks_per_second)) {
-      return tick_clock.Pointer();
-    }
-
-    // On Athlon X2 CPUs (e.g. model 15) the QueryPerformanceCounter
-    // is unreliable, fallback to the low-resolution tick clock.
-    CPU cpu;
-    if (strcmp(cpu.vendor(), "AuthenticAMD") == 0 && cpu.family() == 15) {
-      return tick_clock.Pointer();
-    }
-
-    return new HighResolutionTickClock(ticks_per_second.QuadPart);
-  }
-};
-
-
-static base::LazyDynamicInstance<TickClock,
-    CreateHighResTickClockTrait,
-    base::ThreadSafeInitOnceTrait>::type high_res_tick_clock =
-        LAZY_DYNAMIC_INSTANCE_INITIALIZER;
-
-
-TimeTicks TimeTicks::Now() {
-  // Make sure we never return 0 here.
-  TimeTicks ticks(tick_clock.Pointer()->Now());
-  ASSERT(!ticks.IsNull());
-  return ticks;
-}
-
-
-TimeTicks TimeTicks::HighResolutionNow() {
-  // Make sure we never return 0 here.
-  TimeTicks ticks(high_res_tick_clock.Pointer()->Now());
-  ASSERT(!ticks.IsNull());
-  return ticks;
-}
-
-
-// static
-bool TimeTicks::IsHighResolutionClockWorking() {
-  return high_res_tick_clock.Pointer()->IsHighResolution();
-}
-
-#else  // V8_OS_WIN
-
-TimeTicks TimeTicks::Now() {
-  return HighResolutionNow();
-}
-
-
-TimeTicks TimeTicks::HighResolutionNow() {
-  int64_t ticks;
-#if V8_OS_MACOSX
-  static struct mach_timebase_info info;
-  if (info.denom == 0) {
-    kern_return_t result = mach_timebase_info(&info);
-    ASSERT_EQ(KERN_SUCCESS, result);
-    USE(result);
-  }
-  ticks = (mach_absolute_time() / Time::kNanosecondsPerMicrosecond *
-           info.numer / info.denom);
-#elif V8_OS_SOLARIS
-  ticks = (gethrtime() / Time::kNanosecondsPerMicrosecond);
-#elif V8_LIBRT_NOT_AVAILABLE
-  // TODO(bmeurer): This is a temporary hack to support cross-compiling
-  // Chrome for Android in AOSP. Remove this once AOSP is fixed, also
-  // cleanup the tools/gyp/v8.gyp file.
-  struct timeval tv;
-  int result = gettimeofday(&tv, NULL);
-  ASSERT_EQ(0, result);
-  USE(result);
-  ticks = (tv.tv_sec * Time::kMicrosecondsPerSecond + tv.tv_usec);
-#elif V8_OS_POSIX
-  struct timespec ts;
-  int result = clock_gettime(CLOCK_MONOTONIC, &ts);
-  ASSERT_EQ(0, result);
-  USE(result);
-  ticks = (ts.tv_sec * Time::kMicrosecondsPerSecond +
-           ts.tv_nsec / Time::kNanosecondsPerMicrosecond);
-#endif  // V8_OS_MACOSX
-  // Make sure we never return 0 here.
-  return TimeTicks(ticks + 1);
-}
-
-
-// static
-bool TimeTicks::IsHighResolutionClockWorking() {
-  return true;
-}
-
-#endif  // V8_OS_WIN
-
-} }  // namespace v8::internal
diff --git a/src/platform/time.h b/src/platform/time.h
deleted file mode 100644
index 0cd234c..0000000
--- a/src/platform/time.h
+++ /dev/null
@@ -1,393 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_PLATFORM_TIME_H_
-#define V8_PLATFORM_TIME_H_
-
-#include <time.h>
-#include <limits>
-
-#include "src/allocation.h"
-
-// Forward declarations.
-extern "C" {
-struct _FILETIME;
-struct mach_timespec;
-struct timespec;
-struct timeval;
-}
-
-namespace v8 {
-namespace internal {
-
-class Time;
-class TimeTicks;
-
-// -----------------------------------------------------------------------------
-// TimeDelta
-//
-// This class represents a duration of time, internally represented in
-// microseonds.
-
-class TimeDelta V8_FINAL BASE_EMBEDDED {
- public:
-  TimeDelta() : delta_(0) {}
-
-  // Converts units of time to TimeDeltas.
-  static TimeDelta FromDays(int days);
-  static TimeDelta FromHours(int hours);
-  static TimeDelta FromMinutes(int minutes);
-  static TimeDelta FromSeconds(int64_t seconds);
-  static TimeDelta FromMilliseconds(int64_t milliseconds);
-  static TimeDelta FromMicroseconds(int64_t microseconds) {
-    return TimeDelta(microseconds);
-  }
-  static TimeDelta FromNanoseconds(int64_t nanoseconds);
-
-  // Returns the time delta in some unit. The F versions return a floating
-  // point value, the "regular" versions return a rounded-down value.
-  //
-  // InMillisecondsRoundedUp() instead returns an integer that is rounded up
-  // to the next full millisecond.
-  int InDays() const;
-  int InHours() const;
-  int InMinutes() const;
-  double InSecondsF() const;
-  int64_t InSeconds() const;
-  double InMillisecondsF() const;
-  int64_t InMilliseconds() const;
-  int64_t InMillisecondsRoundedUp() const;
-  int64_t InMicroseconds() const { return delta_; }
-  int64_t InNanoseconds() const;
-
-  // Converts to/from Mach time specs.
-  static TimeDelta FromMachTimespec(struct mach_timespec ts);
-  struct mach_timespec ToMachTimespec() const;
-
-  // Converts to/from POSIX time specs.
-  static TimeDelta FromTimespec(struct timespec ts);
-  struct timespec ToTimespec() const;
-
-  TimeDelta& operator=(const TimeDelta& other) {
-    delta_ = other.delta_;
-    return *this;
-  }
-
-  // Computations with other deltas.
-  TimeDelta operator+(const TimeDelta& other) const {
-    return TimeDelta(delta_ + other.delta_);
-  }
-  TimeDelta operator-(const TimeDelta& other) const {
-    return TimeDelta(delta_ - other.delta_);
-  }
-
-  TimeDelta& operator+=(const TimeDelta& other) {
-    delta_ += other.delta_;
-    return *this;
-  }
-  TimeDelta& operator-=(const TimeDelta& other) {
-    delta_ -= other.delta_;
-    return *this;
-  }
-  TimeDelta operator-() const {
-    return TimeDelta(-delta_);
-  }
-
-  double TimesOf(const TimeDelta& other) const {
-    return static_cast<double>(delta_) / static_cast<double>(other.delta_);
-  }
-  double PercentOf(const TimeDelta& other) const {
-    return TimesOf(other) * 100.0;
-  }
-
-  // Computations with ints, note that we only allow multiplicative operations
-  // with ints, and additive operations with other deltas.
-  TimeDelta operator*(int64_t a) const {
-    return TimeDelta(delta_ * a);
-  }
-  TimeDelta operator/(int64_t a) const {
-    return TimeDelta(delta_ / a);
-  }
-  TimeDelta& operator*=(int64_t a) {
-    delta_ *= a;
-    return *this;
-  }
-  TimeDelta& operator/=(int64_t a) {
-    delta_ /= a;
-    return *this;
-  }
-  int64_t operator/(const TimeDelta& other) const {
-    return delta_ / other.delta_;
-  }
-
-  // Comparison operators.
-  bool operator==(const TimeDelta& other) const {
-    return delta_ == other.delta_;
-  }
-  bool operator!=(const TimeDelta& other) const {
-    return delta_ != other.delta_;
-  }
-  bool operator<(const TimeDelta& other) const {
-    return delta_ < other.delta_;
-  }
-  bool operator<=(const TimeDelta& other) const {
-    return delta_ <= other.delta_;
-  }
-  bool operator>(const TimeDelta& other) const {
-    return delta_ > other.delta_;
-  }
-  bool operator>=(const TimeDelta& other) const {
-    return delta_ >= other.delta_;
-  }
-
- private:
-  // Constructs a delta given the duration in microseconds. This is private
-  // to avoid confusion by callers with an integer constructor. Use
-  // FromSeconds, FromMilliseconds, etc. instead.
-  explicit TimeDelta(int64_t delta) : delta_(delta) {}
-
-  // Delta in microseconds.
-  int64_t delta_;
-};
-
-
-// -----------------------------------------------------------------------------
-// Time
-//
-// This class represents an absolute point in time, internally represented as
-// microseconds (s/1,000,000) since 00:00:00 UTC, January 1, 1970.
-
-class Time V8_FINAL BASE_EMBEDDED {
- public:
-  static const int64_t kMillisecondsPerSecond = 1000;
-  static const int64_t kMicrosecondsPerMillisecond = 1000;
-  static const int64_t kMicrosecondsPerSecond = kMicrosecondsPerMillisecond *
-                                                kMillisecondsPerSecond;
-  static const int64_t kMicrosecondsPerMinute = kMicrosecondsPerSecond * 60;
-  static const int64_t kMicrosecondsPerHour = kMicrosecondsPerMinute * 60;
-  static const int64_t kMicrosecondsPerDay = kMicrosecondsPerHour * 24;
-  static const int64_t kMicrosecondsPerWeek = kMicrosecondsPerDay * 7;
-  static const int64_t kNanosecondsPerMicrosecond = 1000;
-  static const int64_t kNanosecondsPerSecond = kNanosecondsPerMicrosecond *
-                                               kMicrosecondsPerSecond;
-
-  // Contains the NULL time. Use Time::Now() to get the current time.
-  Time() : us_(0) {}
-
-  // Returns true if the time object has not been initialized.
-  bool IsNull() const { return us_ == 0; }
-
-  // Returns true if the time object is the maximum time.
-  bool IsMax() const { return us_ == std::numeric_limits<int64_t>::max(); }
-
-  // Returns the current time. Watch out, the system might adjust its clock
-  // in which case time will actually go backwards. We don't guarantee that
-  // times are increasing, or that two calls to Now() won't be the same.
-  static Time Now();
-
-  // Returns the current time. Same as Now() except that this function always
-  // uses system time so that there are no discrepancies between the returned
-  // time and system time even on virtual environments including our test bot.
-  // For timing sensitive unittests, this function should be used.
-  static Time NowFromSystemTime();
-
-  // Returns the time for epoch in Unix-like system (Jan 1, 1970).
-  static Time UnixEpoch() { return Time(0); }
-
-  // Returns the maximum time, which should be greater than any reasonable time
-  // with which we might compare it.
-  static Time Max() { return Time(std::numeric_limits<int64_t>::max()); }
-
-  // Converts to/from internal values. The meaning of the "internal value" is
-  // completely up to the implementation, so it should be treated as opaque.
-  static Time FromInternalValue(int64_t value) {
-    return Time(value);
-  }
-  int64_t ToInternalValue() const {
-    return us_;
-  }
-
-  // Converts to/from POSIX time specs.
-  static Time FromTimespec(struct timespec ts);
-  struct timespec ToTimespec() const;
-
-  // Converts to/from POSIX time values.
-  static Time FromTimeval(struct timeval tv);
-  struct timeval ToTimeval() const;
-
-  // Converts to/from Windows file times.
-  static Time FromFiletime(struct _FILETIME ft);
-  struct _FILETIME ToFiletime() const;
-
-  // Converts to/from the Javascript convention for times, a number of
-  // milliseconds since the epoch:
-  static Time FromJsTime(double ms_since_epoch);
-  double ToJsTime() const;
-
-  Time& operator=(const Time& other) {
-    us_ = other.us_;
-    return *this;
-  }
-
-  // Compute the difference between two times.
-  TimeDelta operator-(const Time& other) const {
-    return TimeDelta::FromMicroseconds(us_ - other.us_);
-  }
-
-  // Modify by some time delta.
-  Time& operator+=(const TimeDelta& delta) {
-    us_ += delta.InMicroseconds();
-    return *this;
-  }
-  Time& operator-=(const TimeDelta& delta) {
-    us_ -= delta.InMicroseconds();
-    return *this;
-  }
-
-  // Return a new time modified by some delta.
-  Time operator+(const TimeDelta& delta) const {
-    return Time(us_ + delta.InMicroseconds());
-  }
-  Time operator-(const TimeDelta& delta) const {
-    return Time(us_ - delta.InMicroseconds());
-  }
-
-  // Comparison operators
-  bool operator==(const Time& other) const {
-    return us_ == other.us_;
-  }
-  bool operator!=(const Time& other) const {
-    return us_ != other.us_;
-  }
-  bool operator<(const Time& other) const {
-    return us_ < other.us_;
-  }
-  bool operator<=(const Time& other) const {
-    return us_ <= other.us_;
-  }
-  bool operator>(const Time& other) const {
-    return us_ > other.us_;
-  }
-  bool operator>=(const Time& other) const {
-    return us_ >= other.us_;
-  }
-
- private:
-  explicit Time(int64_t us) : us_(us) {}
-
-  // Time in microseconds in UTC.
-  int64_t us_;
-};
-
-inline Time operator+(const TimeDelta& delta, const Time& time) {
-  return time + delta;
-}
-
-
-// -----------------------------------------------------------------------------
-// TimeTicks
-//
-// This class represents an abstract time that is most of the time incrementing
-// for use in measuring time durations. It is internally represented in
-// microseconds.  It can not be converted to a human-readable time, but is
-// guaranteed not to decrease (if the user changes the computer clock,
-// Time::Now() may actually decrease or jump).  But note that TimeTicks may
-// "stand still", for example if the computer suspended.
-
-class TimeTicks V8_FINAL BASE_EMBEDDED {
- public:
-  TimeTicks() : ticks_(0) {}
-
-  // Platform-dependent tick count representing "right now."
-  // The resolution of this clock is ~1-15ms.  Resolution varies depending
-  // on hardware/operating system configuration.
-  // This method never returns a null TimeTicks.
-  static TimeTicks Now();
-
-  // Returns a platform-dependent high-resolution tick count. Implementation
-  // is hardware dependent and may or may not return sub-millisecond
-  // resolution.  THIS CALL IS GENERALLY MUCH MORE EXPENSIVE THAN Now() AND
-  // SHOULD ONLY BE USED WHEN IT IS REALLY NEEDED.
-  // This method never returns a null TimeTicks.
-  static TimeTicks HighResolutionNow();
-
-  // Returns true if the high-resolution clock is working on this system.
-  static bool IsHighResolutionClockWorking();
-
-  // Returns true if this object has not been initialized.
-  bool IsNull() const { return ticks_ == 0; }
-
-  // Converts to/from internal values. The meaning of the "internal value" is
-  // completely up to the implementation, so it should be treated as opaque.
-  static TimeTicks FromInternalValue(int64_t value) {
-    return TimeTicks(value);
-  }
-  int64_t ToInternalValue() const {
-    return ticks_;
-  }
-
-  TimeTicks& operator=(const TimeTicks other) {
-    ticks_ = other.ticks_;
-    return *this;
-  }
-
-  // Compute the difference between two times.
-  TimeDelta operator-(const TimeTicks other) const {
-    return TimeDelta::FromMicroseconds(ticks_ - other.ticks_);
-  }
-
-  // Modify by some time delta.
-  TimeTicks& operator+=(const TimeDelta& delta) {
-    ticks_ += delta.InMicroseconds();
-    return *this;
-  }
-  TimeTicks& operator-=(const TimeDelta& delta) {
-    ticks_ -= delta.InMicroseconds();
-    return *this;
-  }
-
-  // Return a new TimeTicks modified by some delta.
-  TimeTicks operator+(const TimeDelta& delta) const {
-    return TimeTicks(ticks_ + delta.InMicroseconds());
-  }
-  TimeTicks operator-(const TimeDelta& delta) const {
-    return TimeTicks(ticks_ - delta.InMicroseconds());
-  }
-
-  // Comparison operators
-  bool operator==(const TimeTicks& other) const {
-    return ticks_ == other.ticks_;
-  }
-  bool operator!=(const TimeTicks& other) const {
-    return ticks_ != other.ticks_;
-  }
-  bool operator<(const TimeTicks& other) const {
-    return ticks_ < other.ticks_;
-  }
-  bool operator<=(const TimeTicks& other) const {
-    return ticks_ <= other.ticks_;
-  }
-  bool operator>(const TimeTicks& other) const {
-    return ticks_ > other.ticks_;
-  }
-  bool operator>=(const TimeTicks& other) const {
-    return ticks_ >= other.ticks_;
-  }
-
- private:
-  // Please use Now() to create a new object. This is for internal use
-  // and testing. Ticks is in microseconds.
-  explicit TimeTicks(int64_t ticks) : ticks_(ticks) {}
-
-  // Tick count in microseconds.
-  int64_t ticks_;
-};
-
-inline TimeTicks operator+(const TimeDelta& delta, const TimeTicks& ticks) {
-  return ticks + delta;
-}
-
-} }  // namespace v8::internal
-
-#endif  // V8_PLATFORM_TIME_H_
diff --git a/src/preparse-data.cc b/src/preparse-data.cc
index 5860e41..15509c0 100644
--- a/src/preparse-data.cc
+++ b/src/preparse-data.cc
@@ -3,13 +3,12 @@
 // found in the LICENSE file.
 
 #include "include/v8stdint.h"
-
-#include "src/preparse-data-format.h"
-#include "src/preparse-data.h"
-
-#include "src/checks.h"
+#include "src/base/logging.h"
+#include "src/compiler.h"
 #include "src/globals.h"
 #include "src/hashmap.h"
+#include "src/preparse-data.h"
+#include "src/preparse-data-format.h"
 
 namespace v8 {
 namespace internal {
@@ -24,7 +23,7 @@
   preamble_[PreparseDataConstants::kHasErrorOffset] = false;
   preamble_[PreparseDataConstants::kFunctionsSizeOffset] = 0;
   preamble_[PreparseDataConstants::kSizeOffset] = 0;
-  ASSERT_EQ(5, PreparseDataConstants::kHeaderSize);
+  DCHECK_EQ(5, PreparseDataConstants::kHeaderSize);
 #ifdef DEBUG
   prev_start_ = -1;
 #endif
@@ -36,7 +35,7 @@
                                         const char* message,
                                         const char* arg_opt,
                                         bool is_reference_error) {
-  if (has_error()) return;
+  if (HasError()) return;
   preamble_[PreparseDataConstants::kHasErrorOffset] = true;
   function_store_.Reset();
   STATIC_ASSERT(PreparseDataConstants::kMessageStartPos == 0);
@@ -61,17 +60,21 @@
 }
 
 
-Vector<unsigned> CompleteParserRecorder::ExtractData() {
+ScriptData* CompleteParserRecorder::GetScriptData() {
   int function_size = function_store_.size();
   int total_size = PreparseDataConstants::kHeaderSize + function_size;
-  Vector<unsigned> data = Vector<unsigned>::New(total_size);
+  unsigned* data = NewArray<unsigned>(total_size);
   preamble_[PreparseDataConstants::kFunctionsSizeOffset] = function_size;
-  MemCopy(data.start(), preamble_, sizeof(preamble_));
+  MemCopy(data, preamble_, sizeof(preamble_));
   if (function_size > 0) {
-    function_store_.WriteTo(data.SubVector(PreparseDataConstants::kHeaderSize,
-                                           total_size));
+    function_store_.WriteTo(Vector<unsigned>(
+        data + PreparseDataConstants::kHeaderSize, function_size));
   }
-  return data;
+  DCHECK(IsAligned(reinterpret_cast<intptr_t>(data), kPointerAlignment));
+  ScriptData* result = new ScriptData(reinterpret_cast<byte*>(data),
+                                      total_size * sizeof(unsigned));
+  result->AcquireDataOwnership();
+  return result;
 }
 
 
diff --git a/src/preparse-data.h b/src/preparse-data.h
index 8978098..c1331d0 100644
--- a/src/preparse-data.h
+++ b/src/preparse-data.h
@@ -7,11 +7,14 @@
 
 #include "src/allocation.h"
 #include "src/hashmap.h"
+#include "src/preparse-data-format.h"
 #include "src/utils-inl.h"
 
 namespace v8 {
 namespace internal {
 
+class ScriptData;
+
 
 // Abstract interface for preparse data recorder.
 class ParserRecorder {
@@ -52,7 +55,7 @@
                            int literals,
                            int properties,
                            StrictMode strict_mode) {
-    ASSERT(!has_error_);
+    DCHECK(!has_error_);
     start_ = start;
     end_ = end;
     literals_ = literals;
@@ -82,24 +85,24 @@
   int start() const { return start_; }
   int end() const { return end_; }
   int literals() const {
-    ASSERT(!has_error_);
+    DCHECK(!has_error_);
     return literals_;
   }
   int properties() const {
-    ASSERT(!has_error_);
+    DCHECK(!has_error_);
     return properties_;
   }
   StrictMode strict_mode() const {
-    ASSERT(!has_error_);
+    DCHECK(!has_error_);
     return strict_mode_;
   }
   int is_reference_error() const { return is_reference_error_; }
   const char* message() {
-    ASSERT(has_error_);
+    DCHECK(has_error_);
     return message_;
   }
   const char* argument_opt() const {
-    ASSERT(has_error_);
+    DCHECK(has_error_);
     return argument_opt_;
   }
 
@@ -148,13 +151,17 @@
                           const char* message,
                           const char* argument_opt,
                           bool is_reference_error_);
-  Vector<unsigned> ExtractData();
+  ScriptData* GetScriptData();
 
- private:
-  bool has_error() {
+  bool HasError() {
     return static_cast<bool>(preamble_[PreparseDataConstants::kHasErrorOffset]);
   }
+  Vector<unsigned> ErrorMessageData() {
+    DCHECK(HasError());
+    return function_store_.ToVector();
+  }
 
+ private:
   void WriteString(Vector<const char> str);
 
   // Write a non-negative number to the symbol store.
diff --git a/src/preparser.cc b/src/preparser.cc
index 63462c8..3173cc0 100644
--- a/src/preparser.cc
+++ b/src/preparser.cc
@@ -7,14 +7,14 @@
 #include "include/v8stdint.h"
 
 #include "src/allocation.h"
-#include "src/checks.h"
-#include "src/conversions.h"
+#include "src/base/logging.h"
 #include "src/conversions-inl.h"
+#include "src/conversions.h"
 #include "src/globals.h"
 #include "src/hashmap.h"
 #include "src/list.h"
-#include "src/preparse-data-format.h"
 #include "src/preparse-data.h"
+#include "src/preparse-data-format.h"
 #include "src/preparser.h"
 #include "src/unicode.h"
 #include "src/utils.h"
@@ -32,6 +32,12 @@
 namespace v8 {
 namespace internal {
 
+class PreParserTraits::Checkpoint
+    : public ParserBase<PreParserTraits>::CheckpointBase {
+ public:
+  explicit Checkpoint(ParserBase<PreParserTraits>* parser)
+      : ParserBase<PreParserTraits>::CheckpointBase(parser) {}
+};
 
 void PreParserTraits::ReportMessageAt(Scanner::Location location,
                                       const char* message,
@@ -61,6 +67,8 @@
   } else if (scanner->current_token() ==
              Token::FUTURE_STRICT_RESERVED_WORD) {
     return PreParserIdentifier::FutureStrictReserved();
+  } else if (scanner->current_token() == Token::LET) {
+    return PreParserIdentifier::Let();
   } else if (scanner->current_token() == Token::YIELD) {
     return PreParserIdentifier::Yield();
   }
@@ -70,6 +78,17 @@
   if (scanner->UnescapedLiteralMatches("arguments", 9)) {
     return PreParserIdentifier::Arguments();
   }
+  if (scanner->UnescapedLiteralMatches("prototype", 9)) {
+    return PreParserIdentifier::Prototype();
+  }
+  if (scanner->UnescapedLiteralMatches("constructor", 11)) {
+    return PreParserIdentifier::Constructor();
+  }
+  return PreParserIdentifier::Default();
+}
+
+
+PreParserIdentifier PreParserTraits::GetNumberAsSymbol(Scanner* scanner) {
   return PreParserIdentifier::Default();
 }
 
@@ -89,16 +108,12 @@
 
 
 PreParserExpression PreParserTraits::ParseFunctionLiteral(
-    PreParserIdentifier name,
-    Scanner::Location function_name_location,
-    bool name_is_strict_reserved,
-    bool is_generator,
-    int function_token_position,
-    FunctionLiteral::FunctionType type,
-    FunctionLiteral::ArityRestriction arity_restriction,
-    bool* ok) {
+    PreParserIdentifier name, Scanner::Location function_name_location,
+    bool name_is_strict_reserved, FunctionKind kind,
+    int function_token_position, FunctionLiteral::FunctionType type,
+    FunctionLiteral::ArityRestriction arity_restriction, bool* ok) {
   return pre_parser_->ParseFunctionLiteral(
-      name, function_name_location, name_is_strict_reserved, is_generator,
+      name, function_name_location, name_is_strict_reserved, kind,
       function_token_position, type, arity_restriction, ok);
 }
 
@@ -108,12 +123,14 @@
   log_ = log;
   // Lazy functions always have trivial outer scopes (no with/catch scopes).
   PreParserScope top_scope(scope_, GLOBAL_SCOPE);
-  FunctionState top_state(&function_state_, &scope_, &top_scope);
+  FunctionState top_state(&function_state_, &scope_, &top_scope, NULL,
+                          this->ast_value_factory());
   scope_->SetStrictMode(strict_mode);
   PreParserScope function_scope(scope_, FUNCTION_SCOPE);
-  FunctionState function_state(&function_state_, &scope_, &function_scope);
+  FunctionState function_state(&function_state_, &scope_, &function_scope, NULL,
+                               this->ast_value_factory());
   function_state.set_is_generator(is_generator);
-  ASSERT_EQ(Token::LBRACE, scanner()->current_token());
+  DCHECK_EQ(Token::LBRACE, scanner()->current_token());
   bool ok = true;
   int start_position = peek_position();
   ParseLazyFunctionLiteralBody(&ok);
@@ -121,7 +138,7 @@
   if (!ok) {
     ReportUnexpectedToken(scanner()->current_token());
   } else {
-    ASSERT_EQ(Token::RBRACE, scanner()->peek());
+    DCHECK_EQ(Token::RBRACE, scanner()->peek());
     if (scope_->strict_mode() == STRICT) {
       int end_pos = scanner()->location().end_pos;
       CheckOctalLiteral(start_position, end_pos, &ok);
@@ -167,9 +184,16 @@
   switch (peek()) {
     case Token::FUNCTION:
       return ParseFunctionDeclaration(ok);
-    case Token::LET:
+    case Token::CLASS:
+      return ParseClassDeclaration(ok);
     case Token::CONST:
       return ParseVariableStatement(kSourceElement, ok);
+    case Token::LET:
+      DCHECK(allow_harmony_scoping());
+      if (strict_mode() == STRICT) {
+        return ParseVariableStatement(kSourceElement, ok);
+      }
+      // Fall through.
     default:
       return ParseStatement(ok);
   }
@@ -237,11 +261,6 @@
     case Token::LBRACE:
       return ParseBlock(ok);
 
-    case Token::CONST:
-    case Token::LET:
-    case Token::VAR:
-      return ParseVariableStatement(kStatement, ok);
-
     case Token::SEMICOLON:
       Next();
       return Statement::Default();
@@ -294,9 +313,22 @@
       }
     }
 
+    case Token::CLASS:
+      return ParseClassDeclaration(CHECK_OK);
+
     case Token::DEBUGGER:
       return ParseDebuggerStatement(ok);
 
+    case Token::VAR:
+    case Token::CONST:
+      return ParseVariableStatement(kStatement, ok);
+
+    case Token::LET:
+      DCHECK(allow_harmony_scoping());
+      if (strict_mode() == STRICT) {
+        return ParseVariableStatement(kStatement, ok);
+      }
+      // Fall through.
     default:
       return ParseExpressionOrLabelledStatement(ok);
   }
@@ -311,22 +343,31 @@
   //      '{' FunctionBody '}'
   Expect(Token::FUNCTION, CHECK_OK);
   int pos = position();
-  bool is_generator = allow_generators() && Check(Token::MUL);
+  bool is_generator = Check(Token::MUL);
   bool is_strict_reserved = false;
   Identifier name = ParseIdentifierOrStrictReservedWord(
       &is_strict_reserved, CHECK_OK);
-  ParseFunctionLiteral(name,
-                       scanner()->location(),
-                       is_strict_reserved,
-                       is_generator,
-                       pos,
-                       FunctionLiteral::DECLARATION,
-                       FunctionLiteral::NORMAL_ARITY,
-                       CHECK_OK);
+  ParseFunctionLiteral(name, scanner()->location(), is_strict_reserved,
+                       is_generator ? FunctionKind::kGeneratorFunction
+                                    : FunctionKind::kNormalFunction,
+                       pos, FunctionLiteral::DECLARATION,
+                       FunctionLiteral::NORMAL_ARITY, CHECK_OK);
   return Statement::FunctionDeclaration();
 }
 
 
+PreParser::Statement PreParser::ParseClassDeclaration(bool* ok) {
+  Expect(Token::CLASS, CHECK_OK);
+  int pos = position();
+  bool is_strict_reserved = false;
+  Identifier name =
+      ParseIdentifierOrStrictReservedWord(&is_strict_reserved, CHECK_OK);
+  ParseClassLiteral(name, scanner()->location(), is_strict_reserved, pos,
+                    CHECK_OK);
+  return Statement::Default();
+}
+
+
 PreParser::Statement PreParser::ParseBlock(bool* ok) {
   // Block ::
   //   '{' Statement* '}'
@@ -415,23 +456,9 @@
         return Statement::Default();
       }
     }
-  } else if (peek() == Token::LET) {
-    // ES6 Draft Rev4 section 12.2.1:
-    //
-    // LetDeclaration : let LetBindingList ;
-    //
-    // * It is a Syntax Error if the code that matches this production is not
-    //   contained in extended code.
-    //
-    // TODO(rossberg): make 'let' a legal identifier in sloppy mode.
-    if (!allow_harmony_scoping() || strict_mode() == SLOPPY) {
-      ReportMessageAt(scanner()->peek_location(), "illegal_let");
-      *ok = false;
-      return Statement::Default();
-    }
+  } else if (peek() == Token::LET && strict_mode() == STRICT) {
     Consume(Token::LET);
-    if (var_context != kSourceElement &&
-        var_context != kForStatement) {
+    if (var_context != kSourceElement && var_context != kForStatement) {
       ReportMessageAt(scanner()->peek_location(), "unprotected_let");
       *ok = false;
       return Statement::Default();
@@ -476,8 +503,8 @@
   if (starts_with_identifier && expr.IsIdentifier() && peek() == Token::COLON) {
     // Expression is a single identifier, and not, e.g., a parenthesized
     // identifier.
-    ASSERT(!expr.AsIdentifier().IsFutureReserved());
-    ASSERT(strict_mode() == SLOPPY ||
+    DCHECK(!expr.AsIdentifier().IsFutureReserved());
+    DCHECK(strict_mode() == SLOPPY ||
            (!expr.AsIdentifier().IsFutureStrictReserved() &&
             !expr.AsIdentifier().IsYield()));
     Consume(Token::COLON);
@@ -653,8 +680,7 @@
 
 bool PreParser::CheckInOrOf(bool accept_OF) {
   if (Check(Token::IN) ||
-      (allow_for_of() && accept_OF &&
-       CheckContextualKeyword(CStrVector("of")))) {
+      (accept_OF && CheckContextualKeyword(CStrVector("of")))) {
     return true;
   }
   return false;
@@ -669,7 +695,7 @@
   Expect(Token::LPAREN, CHECK_OK);
   if (peek() != Token::SEMICOLON) {
     if (peek() == Token::VAR || peek() == Token::CONST ||
-        peek() == Token::LET) {
+        (peek() == Token::LET && strict_mode() == STRICT)) {
       bool is_let = peek() == Token::LET;
       int decl_count;
       VariableDeclarationProperties decl_props = kHasNoInitializers;
@@ -795,22 +821,19 @@
 
 
 PreParser::Expression PreParser::ParseFunctionLiteral(
-    Identifier function_name,
-    Scanner::Location function_name_location,
-    bool name_is_strict_reserved,
-    bool is_generator,
-    int function_token_pos,
+    Identifier function_name, Scanner::Location function_name_location,
+    bool name_is_strict_reserved, FunctionKind kind, int function_token_pos,
     FunctionLiteral::FunctionType function_type,
-    FunctionLiteral::ArityRestriction arity_restriction,
-    bool* ok) {
+    FunctionLiteral::ArityRestriction arity_restriction, bool* ok) {
   // Function ::
   //   '(' FormalParameterList? ')' '{' FunctionBody '}'
 
   // Parse function body.
   ScopeType outer_scope_type = scope_->type();
   PreParserScope function_scope(scope_, FUNCTION_SCOPE);
-  FunctionState function_state(&function_state_, &scope_, &function_scope);
-  function_state.set_is_generator(is_generator);
+  FunctionState function_state(&function_state_, &scope_, &function_scope, NULL,
+                               this->ast_value_factory());
+  function_state.set_is_generator(IsGeneratorFunction(kind));
   //  FormalParameterList ::
   //    '(' (Identifier)*[','] ')'
   Expect(Token::LPAREN, CHECK_OK);
@@ -865,7 +888,8 @@
 
   // Validate strict mode. We can do this only after parsing the function,
   // since the function can declare itself strict.
-  if (strict_mode() == STRICT) {
+  // Concise methods use StrictFormalParameters.
+  if (strict_mode() == STRICT || IsConciseMethod(kind)) {
     if (function_name.IsEvalOrArguments()) {
       ReportMessageAt(function_name_location, "strict_eval_arguments");
       *ok = false;
@@ -906,7 +930,7 @@
   if (!*ok) return;
 
   // Position right after terminal '}'.
-  ASSERT_EQ(Token::RBRACE, scanner()->peek());
+  DCHECK_EQ(Token::RBRACE, scanner()->peek());
   int body_end = scanner()->peek_location().end_pos;
   log_->LogFunction(body_start, body_end,
                     function_state_->materialized_literal_count(),
diff --git a/src/preparser.h b/src/preparser.h
index 94f4279..78f6a26 100644
--- a/src/preparser.h
+++ b/src/preparser.h
@@ -5,12 +5,14 @@
 #ifndef V8_PREPARSER_H
 #define V8_PREPARSER_H
 
+#include "src/v8.h"
+
+#include "src/bailout-reason.h"
 #include "src/func-name-inferrer.h"
 #include "src/hashmap.h"
+#include "src/scanner.h"
 #include "src/scopes.h"
 #include "src/token.h"
-#include "src/scanner.h"
-#include "src/v8.h"
 
 namespace v8 {
 namespace internal {
@@ -29,7 +31,7 @@
 // interface as AstNodeFactory, so ParserBase doesn't need to care which one is
 // used.
 
-// - Miscellanous other tasks interleaved with the recursive descent. For
+// - Miscellaneous other tasks interleaved with the recursive descent. For
 // example, Parser keeps track of which function literals should be marked as
 // pretenured, and PreParser doesn't care.
 
@@ -45,6 +47,7 @@
 //     typedef Identifier;
 //     typedef Expression;
 //     typedef FunctionLiteral;
+//     typedef ClassLiteral;
 //     typedef ObjectLiteralProperty;
 //     typedef Literal;
 //     typedef ExpressionList;
@@ -61,11 +64,13 @@
   // Shorten type names defined by Traits.
   typedef typename Traits::Type::Expression ExpressionT;
   typedef typename Traits::Type::Identifier IdentifierT;
+  typedef typename Traits::Type::FunctionLiteral FunctionLiteralT;
+  typedef typename Traits::Type::Literal LiteralT;
+  typedef typename Traits::Type::ObjectLiteralProperty ObjectLiteralPropertyT;
 
-  ParserBase(Scanner* scanner, uintptr_t stack_limit,
-             v8::Extension* extension,
-             ParserRecorder* log,
-             typename Traits::Type::Zone* zone,
+  ParserBase(Scanner* scanner, uintptr_t stack_limit, v8::Extension* extension,
+             ParserRecorder* log, typename Traits::Type::Zone* zone,
+             AstNode::IdGen* ast_node_id_gen,
              typename Traits::Type::Parser this_object)
       : Traits(this_object),
         parenthesized_function_(false),
@@ -75,33 +80,36 @@
         fni_(NULL),
         log_(log),
         mode_(PARSE_EAGERLY),  // Lazy mode must be set explicitly.
-        scanner_(scanner),
         stack_limit_(stack_limit),
+        scanner_(scanner),
         stack_overflow_(false),
         allow_lazy_(false),
         allow_natives_syntax_(false),
-        allow_generators_(false),
-        allow_for_of_(false),
-        zone_(zone) { }
+        allow_arrow_functions_(false),
+        allow_harmony_object_literals_(false),
+        zone_(zone),
+        ast_node_id_gen_(ast_node_id_gen) {}
 
   // Getters that indicate whether certain syntactical constructs are
   // allowed to be parsed by this instance of the parser.
   bool allow_lazy() const { return allow_lazy_; }
   bool allow_natives_syntax() const { return allow_natives_syntax_; }
-  bool allow_generators() const { return allow_generators_; }
-  bool allow_for_of() const { return allow_for_of_; }
+  bool allow_arrow_functions() const { return allow_arrow_functions_; }
   bool allow_modules() const { return scanner()->HarmonyModules(); }
   bool allow_harmony_scoping() const { return scanner()->HarmonyScoping(); }
   bool allow_harmony_numeric_literals() const {
     return scanner()->HarmonyNumericLiterals();
   }
+  bool allow_classes() const { return scanner()->HarmonyClasses(); }
+  bool allow_harmony_object_literals() const {
+    return allow_harmony_object_literals_;
+  }
 
   // Setters that determine whether certain syntactical constructs are
   // allowed to be parsed by this instance of the parser.
   void set_allow_lazy(bool allow) { allow_lazy_ = allow; }
   void set_allow_natives_syntax(bool allow) { allow_natives_syntax_ = allow; }
-  void set_allow_generators(bool allow) { allow_generators_ = allow; }
-  void set_allow_for_of(bool allow) { allow_for_of_ = allow; }
+  void set_allow_arrow_functions(bool allow) { allow_arrow_functions_ = allow; }
   void set_allow_modules(bool allow) { scanner()->SetHarmonyModules(allow); }
   void set_allow_harmony_scoping(bool allow) {
     scanner()->SetHarmonyScoping(allow);
@@ -109,8 +117,14 @@
   void set_allow_harmony_numeric_literals(bool allow) {
     scanner()->SetHarmonyNumericLiterals(allow);
   }
+  void set_allow_classes(bool allow) { scanner()->SetHarmonyClasses(allow); }
+  void set_allow_harmony_object_literals(bool allow) {
+    allow_harmony_object_literals_ = allow;
+  }
 
  protected:
+  friend class Traits::Checkpoint;
+
   enum AllowEvalOrArgumentsAsIdentifier {
     kAllowEvalOrArguments,
     kDontAllowEvalOrArguments
@@ -121,6 +135,9 @@
     PARSE_EAGERLY
   };
 
+  class CheckpointBase;
+  class ObjectLiteralChecker;
+
   // ---------------------------------------------------------------------------
   // FunctionState and BlockState together implement the parser's scope stack.
   // The parser's current scope is in scope_. BlockState and FunctionState
@@ -145,11 +162,18 @@
 
   class FunctionState BASE_EMBEDDED {
    public:
-    FunctionState(
-        FunctionState** function_state_stack,
-        typename Traits::Type::Scope** scope_stack,
-        typename Traits::Type::Scope* scope,
-        typename Traits::Type::Zone* zone = NULL);
+    FunctionState(FunctionState** function_state_stack,
+                  typename Traits::Type::Scope** scope_stack,
+                  typename Traits::Type::Scope* scope,
+                  typename Traits::Type::Zone* zone = NULL,
+                  AstValueFactory* ast_value_factory = NULL,
+                  AstNode::IdGen* ast_node_id_gen = NULL);
+    FunctionState(FunctionState** function_state_stack,
+                  typename Traits::Type::Scope** scope_stack,
+                  typename Traits::Type::Scope** scope,
+                  typename Traits::Type::Zone* zone = NULL,
+                  AstValueFactory* ast_value_factory = NULL,
+                  AstNode::IdGen* ast_node_id_gen = NULL);
     ~FunctionState();
 
     int NextMaterializedLiteralIndex() {
@@ -170,8 +194,8 @@
 
     void set_generator_object_variable(
         typename Traits::Type::GeneratorVariable* variable) {
-      ASSERT(variable != NULL);
-      ASSERT(!is_generator());
+      DCHECK(variable != NULL);
+      DCHECK(!is_generator());
       generator_object_variable_ = variable;
       is_generator_ = true;
     }
@@ -205,11 +229,41 @@
     FunctionState* outer_function_state_;
     typename Traits::Type::Scope** scope_stack_;
     typename Traits::Type::Scope* outer_scope_;
-    int saved_ast_node_id_;  // Only used by ParserTraits.
+    AstNode::IdGen* ast_node_id_gen_;  // Only used by ParserTraits.
+    AstNode::IdGen saved_id_gen_;      // Ditto.
     typename Traits::Type::Zone* extra_param_;
     typename Traits::Type::Factory factory_;
 
     friend class ParserTraits;
+    friend class CheckpointBase;
+  };
+
+  // Annoyingly, arrow functions first parse as comma expressions, then when we
+  // see the => we have to go back and reinterpret the arguments as being formal
+  // parameters.  To do so we need to reset some of the parser state back to
+  // what it was before the arguments were first seen.
+  class CheckpointBase BASE_EMBEDDED {
+   public:
+    explicit CheckpointBase(ParserBase* parser) {
+      function_state_ = parser->function_state_;
+      next_materialized_literal_index_ =
+          function_state_->next_materialized_literal_index_;
+      next_handler_index_ = function_state_->next_handler_index_;
+      expected_property_count_ = function_state_->expected_property_count_;
+    }
+
+    void Restore() {
+      function_state_->next_materialized_literal_index_ =
+          next_materialized_literal_index_;
+      function_state_->next_handler_index_ = next_handler_index_;
+      function_state_->expected_property_count_ = expected_property_count_;
+    }
+
+   private:
+    FunctionState* function_state_;
+    int next_materialized_literal_index_;
+    int next_handler_index_;
+    int expected_property_count_;
   };
 
   class ParsingModeScope BASE_EMBEDDED {
@@ -235,6 +289,7 @@
   void set_stack_overflow() { stack_overflow_ = true; }
   Mode mode() const { return mode_; }
   typename Traits::Type::Zone* zone() const { return zone_; }
+  AstNode::IdGen* ast_node_id_gen() const { return ast_node_id_gen_; }
 
   INLINE(Token::Value peek()) {
     if (stack_overflow_) return Token::ILLEGAL;
@@ -244,8 +299,7 @@
   INLINE(Token::Value Next()) {
     if (stack_overflow_) return Token::ILLEGAL;
     {
-      int marker;
-      if (reinterpret_cast<uintptr_t>(&marker) < stack_limit_) {
+      if (GetCurrentStackPosition() < stack_limit_) {
         // Any further calls to Next or peek will return the illegal token.
         // The current call must return the next token, which might already
         // have been peek'ed.
@@ -259,7 +313,7 @@
     Token::Value next = Next();
     USE(next);
     USE(token);
-    ASSERT(next == token);
+    DCHECK(next == token);
   }
 
   bool Check(Token::Value token) {
@@ -300,6 +354,7 @@
     return next == Token::IDENTIFIER ||
         next == Token::FUTURE_RESERVED_WORD ||
         next == Token::FUTURE_STRICT_RESERVED_WORD ||
+        next == Token::LET ||
         next == Token::YIELD;
   }
 
@@ -333,6 +388,44 @@
     }
   }
 
+  // Validates strict mode for function parameter lists. This has to be
+  // done after parsing the function, since the function can declare
+  // itself strict.
+  void CheckStrictFunctionNameAndParameters(
+      IdentifierT function_name,
+      bool function_name_is_strict_reserved,
+      const Scanner::Location& function_name_loc,
+      const Scanner::Location& eval_args_error_loc,
+      const Scanner::Location& dupe_error_loc,
+      const Scanner::Location& reserved_loc,
+      bool* ok) {
+    if (this->IsEvalOrArguments(function_name)) {
+      Traits::ReportMessageAt(function_name_loc, "strict_eval_arguments");
+      *ok = false;
+      return;
+    }
+    if (function_name_is_strict_reserved) {
+      Traits::ReportMessageAt(function_name_loc, "unexpected_strict_reserved");
+      *ok = false;
+      return;
+    }
+    if (eval_args_error_loc.IsValid()) {
+      Traits::ReportMessageAt(eval_args_error_loc, "strict_eval_arguments");
+      *ok = false;
+      return;
+    }
+    if (dupe_error_loc.IsValid()) {
+      Traits::ReportMessageAt(dupe_error_loc, "strict_param_dupe");
+      *ok = false;
+      return;
+    }
+    if (reserved_loc.IsValid()) {
+      Traits::ReportMessageAt(reserved_loc, "unexpected_strict_reserved");
+      *ok = false;
+      return;
+    }
+  }
+
   // Determine precedence of given token.
   static int Precedence(Token::Value token, bool accept_IN) {
     if (token == Token::IN && !accept_IN)
@@ -356,7 +449,9 @@
 
   void ReportMessageAt(Scanner::Location location, const char* message,
                        bool is_reference_error = false) {
-    Traits::ReportMessageAt(location, message, NULL, is_reference_error);
+    Traits::ReportMessageAt(location, message,
+                            reinterpret_cast<const char*>(NULL),
+                            is_reference_error);
   }
 
   void ReportUnexpectedToken(Token::Value token);
@@ -387,7 +482,12 @@
   ExpressionT ParsePrimaryExpression(bool* ok);
   ExpressionT ParseExpression(bool accept_IN, bool* ok);
   ExpressionT ParseArrayLiteral(bool* ok);
+  IdentifierT ParsePropertyName(bool* is_get, bool* is_set, bool* is_static,
+                                bool* ok);
   ExpressionT ParseObjectLiteral(bool* ok);
+  ObjectLiteralPropertyT ParsePropertyDefinition(ObjectLiteralChecker* checker,
+                                                 bool in_class, bool is_static,
+                                                 bool* ok);
   typename Traits::Type::ExpressionList ParseArguments(bool* ok);
   ExpressionT ParseAssignmentExpression(bool accept_IN, bool* ok);
   ExpressionT ParseYieldExpression(bool* ok);
@@ -400,6 +500,12 @@
   ExpressionT ParseMemberExpression(bool* ok);
   ExpressionT ParseMemberExpressionContinuation(ExpressionT expression,
                                                 bool* ok);
+  ExpressionT ParseArrowFunctionLiteral(int start_pos, ExpressionT params_ast,
+                                        bool* ok);
+  ExpressionT ParseClassLiteral(IdentifierT name,
+                                Scanner::Location function_name_location,
+                                bool name_is_strict_reserved, int pos,
+                                bool* ok);
 
   // Checks if the expression is a valid reference expression (e.g., on the
   // left-hand side of assignments). Although ruled out by ECMA as early errors,
@@ -429,13 +535,13 @@
     kValueFlag = 4
   };
 
-  // Validation per ECMA 262 - 11.1.5 "Object Initialiser".
+  // Validation per ECMA 262 - 11.1.5 "Object Initializer".
   class ObjectLiteralChecker {
    public:
     ObjectLiteralChecker(ParserBase* parser, StrictMode strict_mode)
         : parser_(parser),
           finder_(scanner()->unicode_cache()),
-          strict_mode_(strict_mode) { }
+          strict_mode_(strict_mode) {}
 
     void CheckProperty(Token::Value property, PropertyKind type, bool* ok);
 
@@ -474,18 +580,19 @@
   FuncNameInferrer* fni_;
   ParserRecorder* log_;
   Mode mode_;
+  uintptr_t stack_limit_;
 
  private:
   Scanner* scanner_;
-  uintptr_t stack_limit_;
   bool stack_overflow_;
 
   bool allow_lazy_;
   bool allow_natives_syntax_;
-  bool allow_generators_;
-  bool allow_for_of_;
+  bool allow_arrow_functions_;
+  bool allow_harmony_object_literals_;
 
   typename Traits::Type::Zone* zone_;  // Only used by Parser.
+  AstNode::IdGen* ast_node_id_gen_;
 };
 
 
@@ -507,32 +614,57 @@
   static PreParserIdentifier FutureStrictReserved() {
     return PreParserIdentifier(kFutureStrictReservedIdentifier);
   }
+  static PreParserIdentifier Let() {
+    return PreParserIdentifier(kLetIdentifier);
+  }
   static PreParserIdentifier Yield() {
     return PreParserIdentifier(kYieldIdentifier);
   }
-  bool IsEval() { return type_ == kEvalIdentifier; }
-  bool IsArguments() { return type_ == kArgumentsIdentifier; }
-  bool IsEvalOrArguments() { return type_ >= kEvalIdentifier; }
-  bool IsYield() { return type_ == kYieldIdentifier; }
-  bool IsFutureReserved() { return type_ == kFutureReservedIdentifier; }
-  bool IsFutureStrictReserved() {
+  static PreParserIdentifier Prototype() {
+    return PreParserIdentifier(kPrototypeIdentifier);
+  }
+  static PreParserIdentifier Constructor() {
+    return PreParserIdentifier(kConstructorIdentifier);
+  }
+  bool IsEval() const { return type_ == kEvalIdentifier; }
+  bool IsArguments() const { return type_ == kArgumentsIdentifier; }
+  bool IsYield() const { return type_ == kYieldIdentifier; }
+  bool IsPrototype() const { return type_ == kPrototypeIdentifier; }
+  bool IsConstructor() const { return type_ == kConstructorIdentifier; }
+  bool IsEvalOrArguments() const {
+    return type_ == kEvalIdentifier || type_ == kArgumentsIdentifier;
+  }
+  bool IsFutureReserved() const { return type_ == kFutureReservedIdentifier; }
+  bool IsFutureStrictReserved() const {
     return type_ == kFutureStrictReservedIdentifier;
   }
-  bool IsValidStrictVariable() { return type_ == kUnknownIdentifier; }
+  bool IsValidStrictVariable() const { return type_ == kUnknownIdentifier; }
+
+  // Allow identifier->name()[->length()] to work. The preparser
+  // does not need the actual positions/lengths of the identifiers.
+  const PreParserIdentifier* operator->() const { return this; }
+  const PreParserIdentifier raw_name() const { return *this; }
+
+  int position() const { return 0; }
+  int length() const { return 0; }
 
  private:
   enum Type {
     kUnknownIdentifier,
     kFutureReservedIdentifier,
     kFutureStrictReservedIdentifier,
+    kLetIdentifier,
     kYieldIdentifier,
     kEvalIdentifier,
-    kArgumentsIdentifier
+    kArgumentsIdentifier,
+    kPrototypeIdentifier,
+    kConstructorIdentifier
   };
   explicit PreParserIdentifier(Type type) : type_(type) {}
   Type type_;
 
   friend class PreParserExpression;
+  friend class PreParserScope;
 };
 
 
@@ -548,10 +680,26 @@
   }
 
   static PreParserExpression FromIdentifier(PreParserIdentifier id) {
-    return PreParserExpression(kIdentifierFlag |
+    return PreParserExpression(kTypeIdentifier |
                                (id.type_ << kIdentifierShift));
   }
 
+  static PreParserExpression BinaryOperation(PreParserExpression left,
+                                             Token::Value op,
+                                             PreParserExpression right) {
+    int code = ((op == Token::COMMA) && !left.is_parenthesized() &&
+                !right.is_parenthesized())
+                   ? left.ArrowParamListBit() & right.ArrowParamListBit()
+                   : 0;
+    return PreParserExpression(kTypeBinaryOperation | code);
+  }
+
+  static PreParserExpression EmptyArrowParamList() {
+    // Any expression for which IsValidArrowParamList() returns true
+    // will work here.
+    return FromIdentifier(PreParserIdentifier::Default());
+  }
+
   static PreParserExpression StringLiteral() {
     return PreParserExpression(kUnknownStringLiteral);
   }
@@ -564,6 +712,10 @@
     return PreParserExpression(kThisExpression);
   }
 
+  static PreParserExpression Super() {
+    return PreParserExpression(kSuperExpression);
+  }
+
   static PreParserExpression ThisProperty() {
     return PreParserExpression(kThisPropertyExpression);
   }
@@ -576,40 +728,63 @@
     return PreParserExpression(kCallExpression);
   }
 
-  bool IsIdentifier() { return (code_ & kIdentifierFlag) != 0; }
+  bool IsIdentifier() const { return (code_ & kTypeMask) == kTypeIdentifier; }
 
-  PreParserIdentifier AsIdentifier() {
-    ASSERT(IsIdentifier());
+  PreParserIdentifier AsIdentifier() const {
+    DCHECK(IsIdentifier());
     return PreParserIdentifier(
         static_cast<PreParserIdentifier::Type>(code_ >> kIdentifierShift));
   }
 
-  bool IsStringLiteral() { return (code_ & kStringLiteralFlag) != 0; }
-
-  bool IsUseStrictLiteral() {
-    return (code_ & kStringLiteralMask) == kUseStrictString;
+  bool IsStringLiteral() const {
+    return (code_ & kTypeMask) == kTypeStringLiteral;
   }
 
-  bool IsThis() { return code_ == kThisExpression; }
-
-  bool IsThisProperty() { return code_ == kThisPropertyExpression; }
-
-  bool IsProperty() {
-    return code_ == kPropertyExpression || code_ == kThisPropertyExpression;
+  bool IsUseStrictLiteral() const {
+    return (code_ & kUseStrictString) == kUseStrictString;
   }
 
-  bool IsCall() { return code_ == kCallExpression; }
+  bool IsThis() const { return (code_ & kThisExpression) == kThisExpression; }
 
-  bool IsValidReferenceExpression() {
+  bool IsThisProperty() const {
+    return (code_ & kThisPropertyExpression) == kThisPropertyExpression;
+  }
+
+  bool IsProperty() const {
+    return (code_ & kPropertyExpression) == kPropertyExpression ||
+           (code_ & kThisPropertyExpression) == kThisPropertyExpression;
+  }
+
+  bool IsCall() const { return (code_ & kCallExpression) == kCallExpression; }
+
+  bool IsValidReferenceExpression() const {
     return IsIdentifier() || IsProperty();
   }
 
+  bool IsValidArrowParamList() const {
+    return (ArrowParamListBit() & kBinaryOperationArrowParamList) != 0 &&
+           (code_ & kMultiParenthesizedExpression) == 0;
+  }
+
   // At the moment PreParser doesn't track these expression types.
   bool IsFunctionLiteral() const { return false; }
   bool IsCallNew() const { return false; }
 
   PreParserExpression AsFunctionLiteral() { return *this; }
 
+  bool IsBinaryOperation() const {
+    return (code_ & kTypeMask) == kTypeBinaryOperation;
+  }
+
+  bool is_parenthesized() const {
+    return (code_ & kParenthesizedExpression) != 0;
+  }
+
+  void increase_parenthesization_level() {
+    code_ |= is_parenthesized() ? kMultiParenthesizedExpression
+                                : kParenthesizedExpression;
+  }
+
   // Dummy implementation for making expression->somefunc() work in both Parser
   // and PreParser.
   PreParserExpression* operator->() { return this; }
@@ -618,33 +793,70 @@
   void set_index(int index) {}  // For YieldExpressions
   void set_parenthesized() {}
 
+  int position() const { return RelocInfo::kNoPosition; }
+  void set_function_token_position(int position) {}
+  void set_ast_properties(int* ast_properties) {}
+  void set_dont_optimize_reason(BailoutReason dont_optimize_reason) {}
+
+  bool operator==(const PreParserExpression& other) const {
+    return code_ == other.code_;
+  }
+  bool operator!=(const PreParserExpression& other) const {
+    return code_ != other.code_;
+  }
+
  private:
-  // Least significant 2 bits are used as flags. Bits 0 and 1 represent
-  // identifiers or strings literals, and are mutually exclusive, but can both
-  // be absent. If the expression is an identifier or a string literal, the
-  // other bits describe the type (see PreParserIdentifier::Type and string
-  // literal constants below).
+  // Least significant 2 bits are used as expression type. The third least
+  // significant bit tracks whether an expression is parenthesized. If the
+  // expression is an identifier or a string literal, the other bits
+  // describe the type/ (see PreParserIdentifier::Type and string literal
+  // constants below). For binary operations, the other bits are flags
+  // which further describe the contents of the expression.
   enum {
     kUnknownExpression = 0,
-    // Identifiers
-    kIdentifierFlag = 1,  // Used to detect labels.
-    kIdentifierShift = 3,
+    kTypeMask = 1 | 2,
+    kParenthesizedExpression = (1 << 2),
+    kMultiParenthesizedExpression = (1 << 3),
 
-    kStringLiteralFlag = 2,  // Used to detect directive prologue.
-    kUnknownStringLiteral = kStringLiteralFlag,
-    kUseStrictString = kStringLiteralFlag | 8,
+    // Identifiers
+    kTypeIdentifier = 1,  // Used to detect labels.
+    kIdentifierShift = 5,
+    kTypeStringLiteral = 2,  // Used to detect directive prologue.
+    kUnknownStringLiteral = kTypeStringLiteral,
+    kUseStrictString = kTypeStringLiteral | 32,
     kStringLiteralMask = kUseStrictString,
 
+    // Binary operations. Those are needed to detect certain keywords and
+    // duplicated identifier in parameter lists for arrow functions, because
+    // they are initially parsed as comma-separated expressions.
+    kTypeBinaryOperation = 3,
+    kBinaryOperationArrowParamList = (1 << 4),
+
     // Below here applies if neither identifier nor string literal. Reserve the
     // 2 least significant bits for flags.
-    kThisExpression = 1 << 2,
-    kThisPropertyExpression = 2 << 2,
-    kPropertyExpression = 3 << 2,
-    kCallExpression = 4 << 2
+    kThisExpression = (1 << 4),
+    kThisPropertyExpression = (2 << 4),
+    kPropertyExpression = (3 << 4),
+    kCallExpression = (4 << 4),
+    kSuperExpression = (5 << 4)
   };
 
   explicit PreParserExpression(int expression_code) : code_(expression_code) {}
 
+  V8_INLINE int ArrowParamListBit() const {
+    if (IsBinaryOperation()) return code_ & kBinaryOperationArrowParamList;
+    if (IsIdentifier()) {
+      const PreParserIdentifier ident = AsIdentifier();
+      // A valid identifier can be an arrow function parameter list
+      // except for eval, arguments, yield, and reserved keywords.
+      if (ident.IsEval() || ident.IsArguments() || ident.IsYield() ||
+          ident.IsFutureStrictReserved())
+        return 0;
+      return kBinaryOperationArrowParamList;
+    }
+    return 0;
+  }
+
   int code_;
 };
 
@@ -726,7 +938,8 @@
 
 class PreParserScope {
  public:
-  explicit PreParserScope(PreParserScope* outer_scope, ScopeType scope_type)
+  explicit PreParserScope(PreParserScope* outer_scope, ScopeType scope_type,
+                          void* = NULL)
       : scope_type_(scope_type) {
     strict_mode_ = outer_scope ? outer_scope->strict_mode() : SLOPPY;
   }
@@ -734,6 +947,20 @@
   ScopeType type() { return scope_type_; }
   StrictMode strict_mode() const { return strict_mode_; }
   void SetStrictMode(StrictMode strict_mode) { strict_mode_ = strict_mode; }
+  void SetScopeName(PreParserIdentifier name) {}
+
+  // When PreParser is in use, lazy compilation is already being done,
+  // things cannot get lazier than that.
+  bool AllowsLazyCompilation() const { return false; }
+
+  void set_start_position(int position) {}
+  void set_end_position(int position) {}
+
+  bool IsDeclared(const PreParserIdentifier& identifier) const { return false; }
+  void DeclareParameter(const PreParserIdentifier& identifier, VariableMode) {}
+
+  // Allow scope->Foo() to work.
+  PreParserScope* operator->() { return this; }
 
  private:
   ScopeType scope_type_;
@@ -743,9 +970,9 @@
 
 class PreParserFactory {
  public:
-  explicit PreParserFactory(void* extra_param) {}
-  PreParserExpression NewLiteral(PreParserIdentifier identifier,
-                                 int pos) {
+  PreParserFactory(void*, void*, void*) {}
+  PreParserExpression NewStringLiteral(PreParserIdentifier identifier,
+                                       int pos) {
     return PreParserExpression::Default();
   }
   PreParserExpression NewNumberLiteral(double number,
@@ -765,11 +992,12 @@
   }
   PreParserExpression NewObjectLiteralProperty(bool is_getter,
                                                PreParserExpression value,
-                                               int pos) {
+                                               int pos, bool is_static) {
     return PreParserExpression::Default();
   }
   PreParserExpression NewObjectLiteralProperty(PreParserExpression key,
-                                               PreParserExpression value) {
+                                               PreParserExpression value,
+                                               bool is_static) {
     return PreParserExpression::Default();
   }
   PreParserExpression NewObjectLiteral(PreParserExpressionList properties,
@@ -779,7 +1007,7 @@
                                        int pos) {
     return PreParserExpression::Default();
   }
-  PreParserExpression NewVariableProxy(void* generator_variable) {
+  PreParserExpression NewVariableProxy(void* variable) {
     return PreParserExpression::Default();
   }
   PreParserExpression NewProperty(PreParserExpression obj,
@@ -798,7 +1026,7 @@
   PreParserExpression NewBinaryOperation(Token::Value op,
                                          PreParserExpression left,
                                          PreParserExpression right, int pos) {
-    return PreParserExpression::Default();
+    return PreParserExpression::BinaryOperation(left, op, right);
   }
   PreParserExpression NewCompareOperation(Token::Value op,
                                           PreParserExpression left,
@@ -839,6 +1067,38 @@
                                  int pos) {
     return PreParserExpression::Default();
   }
+  PreParserStatement NewReturnStatement(PreParserExpression expression,
+                                        int pos) {
+    return PreParserStatement::Default();
+  }
+  PreParserExpression NewFunctionLiteral(
+      PreParserIdentifier name, AstValueFactory* ast_value_factory,
+      const PreParserScope& scope, PreParserStatementList body,
+      int materialized_literal_count, int expected_property_count,
+      int handler_count, int parameter_count,
+      FunctionLiteral::ParameterFlag has_duplicate_parameters,
+      FunctionLiteral::FunctionType function_type,
+      FunctionLiteral::IsFunctionFlag is_function,
+      FunctionLiteral::IsParenthesizedFlag is_parenthesized, FunctionKind kind,
+      int position) {
+    return PreParserExpression::Default();
+  }
+  PreParserExpression NewClassLiteral(PreParserIdentifier name,
+                                      PreParserExpression extends,
+                                      PreParserExpression constructor,
+                                      PreParserExpressionList properties,
+                                      int position) {
+    return PreParserExpression::Default();
+  }
+
+  // Return the object itself as AstVisitor and implement the needed
+  // dummy method right in this class.
+  PreParserFactory* visitor() { return this; }
+  BailoutReason dont_optimize_reason() { return kNoReason; }
+  int* ast_properties() {
+    static int dummy = 42;
+    return &dummy;
+  }
 };
 
 
@@ -853,16 +1113,22 @@
 
     // Used by FunctionState and BlockState.
     typedef PreParserScope Scope;
+    typedef PreParserScope ScopePtr;
+
     // PreParser doesn't need to store generator variables.
     typedef void GeneratorVariable;
     // No interaction with Zones.
     typedef void Zone;
 
+    typedef int AstProperties;
+    typedef Vector<PreParserIdentifier> ParameterIdentifierVector;
+
     // Return types for traversing functions.
     typedef PreParserIdentifier Identifier;
     typedef PreParserExpression Expression;
     typedef PreParserExpression YieldExpression;
     typedef PreParserExpression FunctionLiteral;
+    typedef PreParserExpression ClassLiteral;
     typedef PreParserExpression ObjectLiteralProperty;
     typedef PreParserExpression Literal;
     typedef PreParserExpressionList ExpressionList;
@@ -873,20 +1139,30 @@
     typedef PreParserFactory Factory;
   };
 
+  class Checkpoint;
+
   explicit PreParserTraits(PreParser* pre_parser) : pre_parser_(pre_parser) {}
 
   // Custom operations executed when FunctionStates are created and
   // destructed. (The PreParser doesn't need to do anything.)
-  template<typename FunctionState>
-  static void SetUpFunctionState(FunctionState* function_state, void*) {}
-  template<typename FunctionState>
-  static void TearDownFunctionState(FunctionState* function_state, void*) {}
+  template <typename FunctionState>
+  static void SetUpFunctionState(FunctionState* function_state) {}
+  template <typename FunctionState>
+  static void TearDownFunctionState(FunctionState* function_state) {}
 
   // Helper functions for recursive descent.
   static bool IsEvalOrArguments(PreParserIdentifier identifier) {
     return identifier.IsEvalOrArguments();
   }
 
+  static bool IsPrototype(PreParserIdentifier identifier) {
+    return identifier.IsPrototype();
+  }
+
+  static bool IsConstructor(PreParserIdentifier identifier) {
+    return identifier.IsConstructor();
+  }
+
   // Returns true if the expression is of type "this.foo".
   static bool IsThisProperty(PreParserExpression expression) {
     return expression.IsThisProperty();
@@ -900,6 +1176,10 @@
     return expression.AsIdentifier();
   }
 
+  static bool IsFutureStrictReserved(PreParserIdentifier identifier) {
+    return identifier.IsYield() || identifier.IsFutureStrictReserved();
+  }
+
   static bool IsBoilerplateProperty(PreParserExpression property) {
     // PreParser doesn't count boilerplate properties.
     return false;
@@ -920,9 +1200,15 @@
     // PreParser should not use FuncNameInferrer.
     UNREACHABLE();
   }
+  static void InferFunctionName(FuncNameInferrer* fni,
+                                PreParserExpression expression) {
+    // PreParser should not use FuncNameInferrer.
+    UNREACHABLE();
+  }
 
   static void CheckFunctionLiteralInsideTopLevelObjectLiteral(
-      PreParserScope* scope, PreParserExpression value, bool* has_function) {}
+      PreParserScope* scope, PreParserExpression property, bool* has_function) {
+  }
 
   static void CheckAssigningFunctionLiteralToProperty(
       PreParserExpression left, PreParserExpression right) {}
@@ -931,10 +1217,10 @@
   static void CheckPossibleEvalCall(PreParserExpression expression,
                                     PreParserScope* scope) {}
 
-  static PreParserExpression MarkExpressionAsLValue(
+  static PreParserExpression MarkExpressionAsAssigned(
       PreParserExpression expression) {
     // TODO(marja): To be able to produce the same errors, the preparser needs
-    // to start tracking which expressions are variables and which are lvalues.
+    // to start tracking which expressions are variables and which are assigned.
     return expression;
   }
 
@@ -963,6 +1249,9 @@
       const char* type, Handle<Object> arg, int pos) {
     return PreParserExpression::Default();
   }
+  PreParserScope NewScope(PreParserScope* outer_scope, ScopeType scope_type) {
+    return PreParserScope(outer_scope, scope_type);
+  }
 
   // Reporting errors.
   void ReportMessageAt(Scanner::Location location,
@@ -979,12 +1268,24 @@
   static PreParserIdentifier EmptyIdentifier() {
     return PreParserIdentifier::Default();
   }
+  static PreParserIdentifier EmptyIdentifierString() {
+    return PreParserIdentifier::Default();
+  }
   static PreParserExpression EmptyExpression() {
     return PreParserExpression::Default();
   }
+  static PreParserExpression EmptyArrowParamList() {
+    return PreParserExpression::EmptyArrowParamList();
+  }
   static PreParserExpression EmptyLiteral() {
     return PreParserExpression::Default();
   }
+  static PreParserExpression EmptyObjectLiteralProperty() {
+    return PreParserExpression::Default();
+  }
+  static PreParserExpression EmptyFunctionLiteral() {
+    return PreParserExpression::Default();
+  }
   static PreParserExpressionList NullExpressionList() {
     return PreParserExpressionList();
   }
@@ -997,8 +1298,9 @@
 
   // Producing data during the recursive descent.
   PreParserIdentifier GetSymbol(Scanner* scanner);
-  static PreParserIdentifier NextLiteralString(Scanner* scanner,
-                                               PretenureFlag tenured) {
+  PreParserIdentifier GetNumberAsSymbol(Scanner* scanner);
+
+  static PreParserIdentifier GetNextSymbol(Scanner* scanner) {
     return PreParserIdentifier::Default();
   }
 
@@ -1007,6 +1309,20 @@
     return PreParserExpression::This();
   }
 
+  static PreParserExpression SuperReference(PreParserScope* scope,
+                                            PreParserFactory* factory) {
+    return PreParserExpression::Super();
+  }
+
+  static PreParserExpression ClassLiteral(PreParserIdentifier name,
+                                          PreParserExpression extends,
+                                          PreParserExpression constructor,
+                                          PreParserExpressionList properties,
+                                          int position,
+                                          PreParserFactory* factory) {
+    return PreParserExpression::Default();
+  }
+
   static PreParserExpression ExpressionFromLiteral(
       Token::Value token, int pos, Scanner* scanner,
       PreParserFactory* factory) {
@@ -1023,6 +1339,11 @@
                                            Scanner* scanner,
                                            PreParserFactory* factory = NULL);
 
+  PreParserExpression GetIterator(PreParserExpression iterable,
+                                  PreParserFactory* factory) {
+    return PreParserExpression::Default();
+  }
+
   static PreParserExpressionList NewExpressionList(int size, void* zone) {
     return PreParserExpressionList();
   }
@@ -1035,17 +1356,38 @@
     return PreParserExpressionList();
   }
 
+  V8_INLINE void SkipLazyFunctionBody(PreParserIdentifier function_name,
+                                      int* materialized_literal_count,
+                                      int* expected_property_count, bool* ok) {
+    UNREACHABLE();
+  }
+
+  V8_INLINE PreParserStatementList
+      ParseEagerFunctionBody(PreParserIdentifier function_name, int pos,
+                             Variable* fvar, Token::Value fvar_init_op,
+                             bool is_generator, bool* ok);
+
+  // Utility functions
+  int DeclareArrowParametersFromExpression(PreParserExpression expression,
+                                           PreParserScope* scope,
+                                           Scanner::Location* dupe_loc,
+                                           bool* ok) {
+    // TODO(aperez): Detect duplicated identifiers in paramlists.
+    *ok = expression.IsValidArrowParamList();
+    return 0;
+  }
+
+  static AstValueFactory* ast_value_factory() { return NULL; }
+
+  void CheckConflictingVarDeclarations(PreParserScope scope, bool* ok) {}
+
   // Temporary glue; these functions will move to ParserBase.
   PreParserExpression ParseV8Intrinsic(bool* ok);
   PreParserExpression ParseFunctionLiteral(
-      PreParserIdentifier name,
-      Scanner::Location function_name_location,
-      bool name_is_strict_reserved,
-      bool is_generator,
-      int function_token_position,
-      FunctionLiteral::FunctionType type,
-      FunctionLiteral::ArityRestriction arity_restriction,
-      bool* ok);
+      PreParserIdentifier name, Scanner::Location function_name_location,
+      bool name_is_strict_reserved, FunctionKind kind,
+      int function_token_position, FunctionLiteral::FunctionType type,
+      FunctionLiteral::ArityRestriction arity_restriction, bool* ok);
 
  private:
   PreParser* pre_parser_;
@@ -1076,7 +1418,7 @@
   };
 
   PreParser(Scanner* scanner, ParserRecorder* log, uintptr_t stack_limit)
-      : ParserBase<PreParserTraits>(scanner, stack_limit, NULL, log, NULL,
+      : ParserBase<PreParserTraits>(scanner, stack_limit, NULL, log, NULL, NULL,
                                     this) {}
 
   // Pre-parse the program from the character stream; returns true on
@@ -1085,7 +1427,7 @@
   // during parsing.
   PreParseResult PreParseProgram() {
     PreParserScope scope(scope_, GLOBAL_SCOPE);
-    FunctionState top_scope(&function_state_, &scope_, &scope, NULL);
+    FunctionState top_scope(&function_state_, &scope_, &scope);
     bool ok = true;
     int start_position = scanner()->peek_location().beg_pos;
     ParseSourceElements(Token::EOS, &ok);
@@ -1143,6 +1485,7 @@
   SourceElements ParseSourceElements(int end_token, bool* ok);
   Statement ParseStatement(bool* ok);
   Statement ParseFunctionDeclaration(bool* ok);
+  Statement ParseClassDeclaration(bool* ok);
   Statement ParseBlock(bool* ok);
   Statement ParseVariableStatement(VariableDeclarationContext var_context,
                                    bool* ok);
@@ -1167,26 +1510,52 @@
   Expression ParseObjectLiteral(bool* ok);
   Expression ParseV8Intrinsic(bool* ok);
 
+  V8_INLINE void SkipLazyFunctionBody(PreParserIdentifier function_name,
+                                      int* materialized_literal_count,
+                                      int* expected_property_count, bool* ok);
+  V8_INLINE PreParserStatementList
+      ParseEagerFunctionBody(PreParserIdentifier function_name, int pos,
+                             Variable* fvar, Token::Value fvar_init_op,
+                             bool is_generator, bool* ok);
+
   Expression ParseFunctionLiteral(
-      Identifier name,
-      Scanner::Location function_name_location,
-      bool name_is_strict_reserved,
-      bool is_generator,
-      int function_token_pos,
+      Identifier name, Scanner::Location function_name_location,
+      bool name_is_strict_reserved, FunctionKind kind, int function_token_pos,
       FunctionLiteral::FunctionType function_type,
-      FunctionLiteral::ArityRestriction arity_restriction,
-      bool* ok);
+      FunctionLiteral::ArityRestriction arity_restriction, bool* ok);
   void ParseLazyFunctionLiteralBody(bool* ok);
 
   bool CheckInOrOf(bool accept_OF);
 };
 
-template<class Traits>
+
+PreParserStatementList PreParser::ParseEagerFunctionBody(
+    PreParserIdentifier function_name, int pos, Variable* fvar,
+    Token::Value fvar_init_op, bool is_generator, bool* ok) {
+  ParsingModeScope parsing_mode(this, PARSE_EAGERLY);
+
+  ParseSourceElements(Token::RBRACE, ok);
+  if (!*ok) return PreParserStatementList();
+
+  Expect(Token::RBRACE, ok);
+  return PreParserStatementList();
+}
+
+
+PreParserStatementList PreParserTraits::ParseEagerFunctionBody(
+    PreParserIdentifier function_name, int pos, Variable* fvar,
+    Token::Value fvar_init_op, bool is_generator, bool* ok) {
+  return pre_parser_->ParseEagerFunctionBody(function_name, pos, fvar,
+                                             fvar_init_op, is_generator, ok);
+}
+
+
+template <class Traits>
 ParserBase<Traits>::FunctionState::FunctionState(
     FunctionState** function_state_stack,
     typename Traits::Type::Scope** scope_stack,
-    typename Traits::Type::Scope* scope,
-    typename Traits::Type::Zone* extra_param)
+    typename Traits::Type::Scope* scope, typename Traits::Type::Zone* zone,
+    AstValueFactory* ast_value_factory, AstNode::IdGen* ast_node_id_gen)
     : next_materialized_literal_index_(JSFunction::kLiteralsPrefixSize),
       next_handler_index_(0),
       expected_property_count_(0),
@@ -1196,20 +1565,42 @@
       outer_function_state_(*function_state_stack),
       scope_stack_(scope_stack),
       outer_scope_(*scope_stack),
-      saved_ast_node_id_(0),
-      extra_param_(extra_param),
-      factory_(extra_param) {
+      ast_node_id_gen_(ast_node_id_gen),
+      factory_(zone, ast_value_factory, ast_node_id_gen) {
   *scope_stack_ = scope;
   *function_state_stack = this;
-  Traits::SetUpFunctionState(this, extra_param);
+  Traits::SetUpFunctionState(this);
 }
 
 
-template<class Traits>
+template <class Traits>
+ParserBase<Traits>::FunctionState::FunctionState(
+    FunctionState** function_state_stack,
+    typename Traits::Type::Scope** scope_stack,
+    typename Traits::Type::Scope** scope, typename Traits::Type::Zone* zone,
+    AstValueFactory* ast_value_factory, AstNode::IdGen* ast_node_id_gen)
+    : next_materialized_literal_index_(JSFunction::kLiteralsPrefixSize),
+      next_handler_index_(0),
+      expected_property_count_(0),
+      is_generator_(false),
+      generator_object_variable_(NULL),
+      function_state_stack_(function_state_stack),
+      outer_function_state_(*function_state_stack),
+      scope_stack_(scope_stack),
+      outer_scope_(*scope_stack),
+      ast_node_id_gen_(ast_node_id_gen),
+      factory_(zone, ast_value_factory, ast_node_id_gen) {
+  *scope_stack_ = *scope;
+  *function_state_stack = this;
+  Traits::SetUpFunctionState(this);
+}
+
+
+template <class Traits>
 ParserBase<Traits>::FunctionState::~FunctionState() {
   *scope_stack_ = outer_scope_;
   *function_state_stack_ = outer_function_state_;
-  Traits::TearDownFunctionState(this, extra_param_);
+  Traits::TearDownFunctionState(this);
 }
 
 
@@ -1229,13 +1620,14 @@
       return ReportMessageAt(source_location, "unexpected_token_identifier");
     case Token::FUTURE_RESERVED_WORD:
       return ReportMessageAt(source_location, "unexpected_reserved");
+    case Token::LET:
     case Token::YIELD:
     case Token::FUTURE_STRICT_RESERVED_WORD:
       return ReportMessageAt(source_location, strict_mode() == SLOPPY
           ? "unexpected_token_identifier" : "unexpected_strict_reserved");
     default:
       const char* name = Token::String(token);
-      ASSERT(name != NULL);
+      DCHECK(name != NULL);
       Traits::ReportMessageAt(source_location, "unexpected_token", name);
   }
 }
@@ -1256,6 +1648,7 @@
     return name;
   } else if (strict_mode() == SLOPPY &&
              (next == Token::FUTURE_STRICT_RESERVED_WORD ||
+             (next == Token::LET) ||
              (next == Token::YIELD && !is_generator()))) {
     return this->GetSymbol(scanner());
   } else {
@@ -1274,6 +1667,7 @@
   if (next == Token::IDENTIFIER) {
     *is_strict_reserved = false;
   } else if (next == Token::FUTURE_STRICT_RESERVED_WORD ||
+             next == Token::LET ||
              (next == Token::YIELD && !this->is_generator())) {
     *is_strict_reserved = true;
   } else {
@@ -1290,6 +1684,7 @@
 ParserBase<Traits>::ParseIdentifierName(bool* ok) {
   Token::Value next = Next();
   if (next != Token::IDENTIFIER && next != Token::FUTURE_RESERVED_WORD &&
+      next != Token::LET && next != Token::YIELD &&
       next != Token::FUTURE_STRICT_RESERVED_WORD && !Token::IsKeyword(next)) {
     this->ReportUnexpectedToken(next);
     *ok = false;
@@ -1324,14 +1719,14 @@
 
   int literal_index = function_state_->NextMaterializedLiteralIndex();
 
-  IdentifierT js_pattern = this->NextLiteralString(scanner(), TENURED);
+  IdentifierT js_pattern = this->GetNextSymbol(scanner());
   if (!scanner()->ScanRegExpFlags()) {
     Next();
     ReportMessage("invalid_regexp_flags");
     *ok = false;
     return Traits::EmptyExpression();
   }
-  IdentifierT js_flags = this->NextLiteralString(scanner(), TENURED);
+  IdentifierT js_flags = this->GetNextSymbol(scanner());
   Next();
   return factory()->NewRegExpLiteral(js_pattern, js_flags, literal_index, pos);
 }
@@ -1364,6 +1759,7 @@
   //   ArrayLiteral
   //   ObjectLiteral
   //   RegExpLiteral
+  //   ClassLiteral
   //   '(' Expression ')'
 
   int pos = peek_position();
@@ -1385,6 +1781,7 @@
       break;
 
     case Token::IDENTIFIER:
+    case Token::LET:
     case Token::YIELD:
     case Token::FUTURE_STRICT_RESERVED_WORD: {
       // Using eval or arguments in this context is OK even in strict mode.
@@ -1417,13 +1814,39 @@
 
     case Token::LPAREN:
       Consume(Token::LPAREN);
-      // Heuristically try to detect immediately called functions before
-      // seeing the call parentheses.
-      parenthesized_function_ = (peek() == Token::FUNCTION);
-      result = this->ParseExpression(true, CHECK_OK);
-      Expect(Token::RPAREN, CHECK_OK);
+      if (allow_arrow_functions() && peek() == Token::RPAREN) {
+        // Arrow functions are the only expression type constructions
+        // for which an empty parameter list "()" is valid input.
+        Consume(Token::RPAREN);
+        result = this->ParseArrowFunctionLiteral(
+            pos, this->EmptyArrowParamList(), CHECK_OK);
+      } else {
+        // Heuristically try to detect immediately called functions before
+        // seeing the call parentheses.
+        parenthesized_function_ = (peek() == Token::FUNCTION);
+        result = this->ParseExpression(true, CHECK_OK);
+        result->increase_parenthesization_level();
+        Expect(Token::RPAREN, CHECK_OK);
+      }
       break;
 
+    case Token::CLASS: {
+      Consume(Token::CLASS);
+      int class_token_position = position();
+      IdentifierT name = this->EmptyIdentifier();
+      bool is_strict_reserved_name = false;
+      Scanner::Location class_name_location = Scanner::Location::invalid();
+      if (peek_any_identifier()) {
+        name = ParseIdentifierOrStrictReservedWord(&is_strict_reserved_name,
+                                                   CHECK_OK);
+        class_name_location = scanner()->location();
+      }
+      result = this->ParseClassLiteral(name, class_name_location,
+                                       is_strict_reserved_name,
+                                       class_token_position, CHECK_OK);
+      break;
+    }
+
     case Token::MOD:
       if (allow_natives_syntax() || extension_ != NULL) {
         result = this->ParseV8Intrinsic(CHECK_OK);
@@ -1493,14 +1916,138 @@
 
 
 template <class Traits>
+typename ParserBase<Traits>::IdentifierT ParserBase<Traits>::ParsePropertyName(
+    bool* is_get, bool* is_set, bool* is_static, bool* ok) {
+  Token::Value next = peek();
+  switch (next) {
+    case Token::STRING:
+      Consume(Token::STRING);
+      return this->GetSymbol(scanner_);
+    case Token::NUMBER:
+      Consume(Token::NUMBER);
+      return this->GetNumberAsSymbol(scanner_);
+    case Token::STATIC:
+      *is_static = true;
+      // Fall through.
+    default:
+      return ParseIdentifierNameOrGetOrSet(is_get, is_set, ok);
+  }
+  UNREACHABLE();
+  return this->EmptyIdentifier();
+}
+
+
+template <class Traits>
+typename ParserBase<Traits>::ObjectLiteralPropertyT ParserBase<
+    Traits>::ParsePropertyDefinition(ObjectLiteralChecker* checker,
+                                     bool in_class, bool is_static, bool* ok) {
+  ExpressionT value = this->EmptyExpression();
+  bool is_get = false;
+  bool is_set = false;
+  bool name_is_static = false;
+  bool is_generator = allow_harmony_object_literals_ && Check(Token::MUL);
+
+  Token::Value name_token = peek();
+  int next_pos = peek_position();
+  IdentifierT name =
+      ParsePropertyName(&is_get, &is_set, &name_is_static,
+                        CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+
+  if (fni_ != NULL) this->PushLiteralName(fni_, name);
+
+  if (!in_class && !is_generator && peek() == Token::COLON) {
+    // PropertyDefinition : PropertyName ':' AssignmentExpression
+    checker->CheckProperty(name_token, kValueProperty,
+                           CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+    Consume(Token::COLON);
+    value = this->ParseAssignmentExpression(
+        true, CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+
+  } else if (is_generator ||
+             (allow_harmony_object_literals_ && peek() == Token::LPAREN)) {
+    // Concise Method
+
+    if (is_static && this->IsPrototype(name)) {
+      ReportMessageAt(scanner()->location(), "static_prototype");
+      *ok = false;
+      return this->EmptyObjectLiteralProperty();
+    }
+    if (is_generator && in_class && !is_static && this->IsConstructor(name)) {
+      ReportMessageAt(scanner()->location(), "constructor_special_method");
+      *ok = false;
+      return this->EmptyObjectLiteralProperty();
+    }
+
+    checker->CheckProperty(name_token, kValueProperty,
+                           CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+    FunctionKind kind = is_generator ? FunctionKind::kConciseGeneratorMethod
+                                     : FunctionKind::kConciseMethod;
+
+    value = this->ParseFunctionLiteral(
+        name, scanner()->location(),
+        false,  // reserved words are allowed here
+        kind, RelocInfo::kNoPosition, FunctionLiteral::ANONYMOUS_EXPRESSION,
+        FunctionLiteral::NORMAL_ARITY,
+        CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+
+  } else if (in_class && name_is_static && !is_static) {
+    // static MethodDefinition
+    return ParsePropertyDefinition(checker, true, true, ok);
+
+  } else if (is_get || is_set) {
+    // Accessor
+    bool dont_care = false;
+    name_token = peek();
+    name = ParsePropertyName(&dont_care, &dont_care, &dont_care,
+                             CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+
+    // Validate the property.
+    if (is_static && this->IsPrototype(name)) {
+      ReportMessageAt(scanner()->location(), "static_prototype");
+      *ok = false;
+      return this->EmptyObjectLiteralProperty();
+    } else if (in_class && !is_static && this->IsConstructor(name)) {
+      // ES6, spec draft rev 27, treats static get constructor as an error too.
+      // https://bugs.ecmascript.org/show_bug.cgi?id=3223
+      // TODO(arv): Update when bug is resolved.
+      ReportMessageAt(scanner()->location(), "constructor_special_method");
+      *ok = false;
+      return this->EmptyObjectLiteralProperty();
+    }
+    checker->CheckProperty(name_token,
+                           is_get ? kGetterProperty : kSetterProperty,
+                           CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+
+    typename Traits::Type::FunctionLiteral value = this->ParseFunctionLiteral(
+        name, scanner()->location(),
+        false,  // reserved words are allowed here
+        FunctionKind::kNormalFunction, RelocInfo::kNoPosition,
+        FunctionLiteral::ANONYMOUS_EXPRESSION,
+        is_get ? FunctionLiteral::GETTER_ARITY : FunctionLiteral::SETTER_ARITY,
+        CHECK_OK_CUSTOM(EmptyObjectLiteralProperty));
+    return factory()->NewObjectLiteralProperty(is_get, value, next_pos,
+                                               is_static);
+  } else {
+    Token::Value next = Next();
+    ReportUnexpectedToken(next);
+    *ok = false;
+    return this->EmptyObjectLiteralProperty();
+  }
+
+  uint32_t index;
+  LiteralT key = this->IsArrayIndex(name, &index)
+                     ? factory()->NewNumberLiteral(index, next_pos)
+                     : factory()->NewStringLiteral(name, next_pos);
+
+  return factory()->NewObjectLiteralProperty(key, value, is_static);
+}
+
+
+template <class Traits>
 typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseObjectLiteral(
     bool* ok) {
   // ObjectLiteral ::
-  // '{' ((
-  //       ((IdentifierName | String | Number) ':' AssignmentExpression) |
-  //       (('get' | 'set') (IdentifierName | String | Number) FunctionLiteral)
-  //      ) ',')* '}'
-  // (Except that trailing comma is not required and not allowed.)
+  // '{' (PropertyDefinition (',' PropertyDefinition)* ','? )? '}'
 
   int pos = peek_position();
   typename Traits::Type::PropertyList properties =
@@ -1515,114 +2062,15 @@
   while (peek() != Token::RBRACE) {
     if (fni_ != NULL) fni_->Enter();
 
-    typename Traits::Type::Literal key = this->EmptyLiteral();
-    Token::Value next = peek();
-    int next_pos = peek_position();
-
-    switch (next) {
-      case Token::FUTURE_RESERVED_WORD:
-      case Token::FUTURE_STRICT_RESERVED_WORD:
-      case Token::IDENTIFIER: {
-        bool is_getter = false;
-        bool is_setter = false;
-        IdentifierT id =
-            ParseIdentifierNameOrGetOrSet(&is_getter, &is_setter, CHECK_OK);
-        if (fni_ != NULL) this->PushLiteralName(fni_, id);
-
-        if ((is_getter || is_setter) && peek() != Token::COLON) {
-          // Special handling of getter and setter syntax:
-          // { ... , get foo() { ... }, ... , set foo(v) { ... v ... } , ... }
-          // We have already read the "get" or "set" keyword.
-          Token::Value next = Next();
-          if (next != i::Token::IDENTIFIER &&
-              next != i::Token::FUTURE_RESERVED_WORD &&
-              next != i::Token::FUTURE_STRICT_RESERVED_WORD &&
-              next != i::Token::NUMBER &&
-              next != i::Token::STRING &&
-              !Token::IsKeyword(next)) {
-            ReportUnexpectedToken(next);
-            *ok = false;
-            return this->EmptyLiteral();
-          }
-          // Validate the property.
-          PropertyKind type = is_getter ? kGetterProperty : kSetterProperty;
-          checker.CheckProperty(next, type, CHECK_OK);
-          IdentifierT name = this->GetSymbol(scanner_);
-          typename Traits::Type::FunctionLiteral value =
-              this->ParseFunctionLiteral(
-                  name, scanner()->location(),
-                  false,  // reserved words are allowed here
-                  false,  // not a generator
-                  RelocInfo::kNoPosition, FunctionLiteral::ANONYMOUS_EXPRESSION,
-                  is_getter ? FunctionLiteral::GETTER_ARITY
-                            : FunctionLiteral::SETTER_ARITY,
-                  CHECK_OK);
-          typename Traits::Type::ObjectLiteralProperty property =
-              factory()->NewObjectLiteralProperty(is_getter, value, next_pos);
-          if (this->IsBoilerplateProperty(property)) {
-            number_of_boilerplate_properties++;
-          }
-          properties->Add(property, zone());
-          if (peek() != Token::RBRACE) {
-            // Need {} because of the CHECK_OK macro.
-            Expect(Token::COMMA, CHECK_OK);
-          }
-
-          if (fni_ != NULL) {
-            fni_->Infer();
-            fni_->Leave();
-          }
-          continue;  // restart the while
-        }
-        // Failed to parse as get/set property, so it's just a normal property
-        // (which might be called "get" or "set" or something else).
-        key = factory()->NewLiteral(id, next_pos);
-        break;
-      }
-      case Token::STRING: {
-        Consume(Token::STRING);
-        IdentifierT string = this->GetSymbol(scanner_);
-        if (fni_ != NULL) this->PushLiteralName(fni_, string);
-        uint32_t index;
-        if (this->IsArrayIndex(string, &index)) {
-          key = factory()->NewNumberLiteral(index, next_pos);
-          break;
-        }
-        key = factory()->NewLiteral(string, next_pos);
-        break;
-      }
-      case Token::NUMBER: {
-        Consume(Token::NUMBER);
-        key = this->ExpressionFromLiteral(Token::NUMBER, next_pos, scanner_,
-                                          factory());
-        break;
-      }
-      default:
-        if (Token::IsKeyword(next)) {
-          Consume(next);
-          IdentifierT string = this->GetSymbol(scanner_);
-          key = factory()->NewLiteral(string, next_pos);
-        } else {
-          Token::Value next = Next();
-          ReportUnexpectedToken(next);
-          *ok = false;
-          return this->EmptyLiteral();
-        }
-    }
-
-    // Validate the property
-    checker.CheckProperty(next, kValueProperty, CHECK_OK);
-
-    Expect(Token::COLON, CHECK_OK);
-    ExpressionT value = this->ParseAssignmentExpression(true, CHECK_OK);
-
-    typename Traits::Type::ObjectLiteralProperty property =
-        factory()->NewObjectLiteralProperty(key, value);
+    const bool in_class = false;
+    const bool is_static = false;
+    ObjectLiteralPropertyT property =
+        this->ParsePropertyDefinition(&checker, in_class, is_static, CHECK_OK);
 
     // Mark top-level object literals that contain function literals and
     // pretenure the literal so it can be added as a constant function
     // property. (Parser only.)
-    this->CheckFunctionLiteralInsideTopLevelObjectLiteral(scope_, value,
+    this->CheckFunctionLiteralInsideTopLevelObjectLiteral(scope_, property,
                                                           &has_function);
 
     // Count CONSTANT or COMPUTED properties to maintain the enumeration order.
@@ -1631,7 +2079,6 @@
     }
     properties->Add(property, zone());
 
-    // TODO(1240767): Consider allowing trailing comma.
     if (peek() != Token::RBRACE) {
       // Need {} because of the CHECK_OK macro.
       Expect(Token::COMMA, CHECK_OK);
@@ -1690,6 +2137,7 @@
 ParserBase<Traits>::ParseAssignmentExpression(bool accept_IN, bool* ok) {
   // AssignmentExpression ::
   //   ConditionalExpression
+  //   ArrowFunction
   //   YieldExpression
   //   LeftHandSideExpression AssignmentOperator AssignmentExpression
 
@@ -1700,9 +2148,17 @@
   }
 
   if (fni_ != NULL) fni_->Enter();
+  typename Traits::Checkpoint checkpoint(this);
   ExpressionT expression =
       this->ParseConditionalExpression(accept_IN, CHECK_OK);
 
+  if (allow_arrow_functions() && peek() == Token::ARROW) {
+    checkpoint.Restore();
+    expression = this->ParseArrowFunctionLiteral(lhs_location.beg_pos,
+                                                 expression, CHECK_OK);
+    return expression;
+  }
+
   if (!Token::IsAssignmentOp(peek())) {
     if (fni_ != NULL) fni_->Leave();
     // Parsed conditional expression only (no assignment).
@@ -1711,7 +2167,7 @@
 
   expression = this->CheckAndRewriteReferenceExpression(
       expression, lhs_location, "invalid_lhs_in_assignment", CHECK_OK);
-  expression = this->MarkExpressionAsLValue(expression);
+  expression = this->MarkExpressionAsAssigned(expression);
 
   Token::Value op = Next();  // Get assignment operator.
   int pos = position();
@@ -1750,18 +2206,43 @@
 typename ParserBase<Traits>::ExpressionT
 ParserBase<Traits>::ParseYieldExpression(bool* ok) {
   // YieldExpression ::
-  //   'yield' '*'? AssignmentExpression
+  //   'yield' ([no line terminator] '*'? AssignmentExpression)?
   int pos = peek_position();
   Expect(Token::YIELD, CHECK_OK);
-  Yield::Kind kind =
-      Check(Token::MUL) ? Yield::DELEGATING : Yield::SUSPEND;
   ExpressionT generator_object =
       factory()->NewVariableProxy(function_state_->generator_object_variable());
-  ExpressionT expression =
-      ParseAssignmentExpression(false, CHECK_OK);
+  ExpressionT expression = Traits::EmptyExpression();
+  Yield::Kind kind = Yield::kSuspend;
+  if (!scanner()->HasAnyLineTerminatorBeforeNext()) {
+    if (Check(Token::MUL)) kind = Yield::kDelegating;
+    switch (peek()) {
+      case Token::EOS:
+      case Token::SEMICOLON:
+      case Token::RBRACE:
+      case Token::RBRACK:
+      case Token::RPAREN:
+      case Token::COLON:
+      case Token::COMMA:
+        // The above set of tokens is the complete set of tokens that can appear
+        // after an AssignmentExpression, and none of them can start an
+        // AssignmentExpression.  This allows us to avoid looking for an RHS for
+        // a Yield::kSuspend operation, given only one look-ahead token.
+        if (kind == Yield::kSuspend)
+          break;
+        DCHECK_EQ(Yield::kDelegating, kind);
+        // Delegating yields require an RHS; fall through.
+      default:
+        expression = ParseAssignmentExpression(false, CHECK_OK);
+        break;
+    }
+  }
+  if (kind == Yield::kDelegating) {
+    // var iterator = subject[Symbol.iterator]();
+    expression = this->GetIterator(expression, factory());
+  }
   typename Traits::Type::YieldExpression yield =
       factory()->NewYield(generator_object, expression, kind, pos);
-  if (kind == Yield::DELEGATING) {
+  if (kind == Yield::kDelegating) {
     yield->set_index(function_state_->NextHandlerIndex());
   }
   return yield;
@@ -1795,7 +2276,7 @@
 template <class Traits>
 typename ParserBase<Traits>::ExpressionT
 ParserBase<Traits>::ParseBinaryExpression(int prec, bool accept_IN, bool* ok) {
-  ASSERT(prec >= 4);
+  DCHECK(prec >= 4);
   ExpressionT x = this->ParseUnaryExpression(CHECK_OK);
   for (int prec1 = Precedence(peek(), accept_IN); prec1 >= prec; prec1--) {
     // prec1 >= 4
@@ -1873,7 +2354,7 @@
     ExpressionT expression = this->ParseUnaryExpression(CHECK_OK);
     expression = this->CheckAndRewriteReferenceExpression(
         expression, lhs_location, "invalid_lhs_in_prefix_op", CHECK_OK);
-    this->MarkExpressionAsLValue(expression);
+    this->MarkExpressionAsAssigned(expression);
 
     return factory()->NewCountOperation(op,
                                         true /* prefix */,
@@ -1898,7 +2379,7 @@
       Token::IsCountOp(peek())) {
     expression = this->CheckAndRewriteReferenceExpression(
         expression, lhs_location, "invalid_lhs_in_postfix_op", CHECK_OK);
-    expression = this->MarkExpressionAsLValue(expression);
+    expression = this->MarkExpressionAsAssigned(expression);
 
     Token::Value next = Next();
     expression =
@@ -1970,7 +2451,7 @@
         int pos = position();
         IdentifierT name = ParseIdentifierName(CHECK_OK);
         result = factory()->NewProperty(
-            result, factory()->NewLiteral(name, pos), pos);
+            result, factory()->NewStringLiteral(name, pos), pos);
         if (fni_ != NULL) this->PushLiteralName(fni_, name);
         break;
       }
@@ -2005,7 +2486,12 @@
   if (peek() == Token::NEW) {
     Consume(Token::NEW);
     int new_pos = position();
-    ExpressionT result = this->ParseMemberWithNewPrefixesExpression(CHECK_OK);
+    ExpressionT result = this->EmptyExpression();
+    if (Check(Token::SUPER)) {
+      result = this->SuperReference(scope_, factory());
+    } else {
+      result = this->ParseMemberWithNewPrefixesExpression(CHECK_OK);
+    }
     if (peek() == Token::LPAREN) {
       // NewExpression with arguments.
       typename Traits::Type::ExpressionList args =
@@ -2019,7 +2505,7 @@
     return factory()->NewCallNew(result, this->NewExpressionList(0, zone_),
                                  new_pos);
   }
-  // No 'new' keyword.
+  // No 'new' or 'super' keyword.
   return this->ParseMemberExpression(ok);
 }
 
@@ -2028,7 +2514,7 @@
 typename ParserBase<Traits>::ExpressionT
 ParserBase<Traits>::ParseMemberExpression(bool* ok) {
   // MemberExpression ::
-  //   (PrimaryExpression | FunctionLiteral)
+  //   (PrimaryExpression | FunctionLiteral | ClassLiteral)
   //     ('[' Expression ']' | '.' Identifier | Arguments)*
 
   // The '[' Expression ']' and '.' Identifier parts are parsed by
@@ -2040,7 +2526,7 @@
   if (peek() == Token::FUNCTION) {
     Consume(Token::FUNCTION);
     int function_token_position = position();
-    bool is_generator = allow_generators() && Check(Token::MUL);
+    bool is_generator = Check(Token::MUL);
     IdentifierT name = this->EmptyIdentifier();
     bool is_strict_reserved_name = false;
     Scanner::Location function_name_location = Scanner::Location::invalid();
@@ -2052,14 +2538,25 @@
       function_name_location = scanner()->location();
       function_type = FunctionLiteral::NAMED_EXPRESSION;
     }
-    result = this->ParseFunctionLiteral(name,
-                                        function_name_location,
-                                        is_strict_reserved_name,
-                                        is_generator,
-                                        function_token_position,
-                                        function_type,
-                                        FunctionLiteral::NORMAL_ARITY,
-                                        CHECK_OK);
+    result = this->ParseFunctionLiteral(
+        name, function_name_location, is_strict_reserved_name,
+        is_generator ? FunctionKind::kGeneratorFunction
+                     : FunctionKind::kNormalFunction,
+        function_token_position, function_type, FunctionLiteral::NORMAL_ARITY,
+        CHECK_OK);
+  } else if (peek() == Token::SUPER) {
+    int beg_pos = position();
+    Consume(Token::SUPER);
+    Token::Value next = peek();
+    if (next == Token::PERIOD || next == Token::LBRACK ||
+        next == Token::LPAREN) {
+      result = this->SuperReference(scope_, factory());
+    } else {
+      ReportMessageAt(Scanner::Location(beg_pos, position()),
+                      "unexpected_super");
+      *ok = false;
+      return this->EmptyExpression();
+    }
   } else {
     result = ParsePrimaryExpression(CHECK_OK);
   }
@@ -2093,7 +2590,7 @@
         int pos = position();
         IdentifierT name = ParseIdentifierName(CHECK_OK);
         expression = factory()->NewProperty(
-            expression, factory()->NewLiteral(name, pos), pos);
+            expression, factory()->NewStringLiteral(name, pos), pos);
         if (fni_ != NULL) {
           this->PushLiteralName(fni_, name);
         }
@@ -2103,11 +2600,183 @@
         return expression;
     }
   }
-  ASSERT(false);
+  DCHECK(false);
   return this->EmptyExpression();
 }
 
 
+template <class Traits>
+typename ParserBase<Traits>::ExpressionT ParserBase<
+    Traits>::ParseArrowFunctionLiteral(int start_pos, ExpressionT params_ast,
+                                       bool* ok) {
+  // TODO(aperez): Change this to use ARROW_SCOPE
+  typename Traits::Type::ScopePtr scope =
+      this->NewScope(scope_, FUNCTION_SCOPE);
+  typename Traits::Type::StatementList body;
+  typename Traits::Type::AstProperties ast_properties;
+  BailoutReason dont_optimize_reason = kNoReason;
+  int num_parameters = -1;
+  int materialized_literal_count = -1;
+  int expected_property_count = -1;
+  int handler_count = 0;
+
+  {
+    FunctionState function_state(&function_state_, &scope_, &scope, zone(),
+                                 this->ast_value_factory(), ast_node_id_gen_);
+    Scanner::Location dupe_error_loc = Scanner::Location::invalid();
+    num_parameters = Traits::DeclareArrowParametersFromExpression(
+        params_ast, scope_, &dupe_error_loc, ok);
+    if (!*ok) {
+      ReportMessageAt(
+          Scanner::Location(start_pos, scanner()->location().beg_pos),
+          "malformed_arrow_function_parameter_list");
+      return this->EmptyExpression();
+    }
+
+    if (num_parameters > Code::kMaxArguments) {
+      ReportMessageAt(Scanner::Location(params_ast->position(), position()),
+                      "too_many_parameters");
+      *ok = false;
+      return this->EmptyExpression();
+    }
+
+    Expect(Token::ARROW, CHECK_OK);
+
+    if (peek() == Token::LBRACE) {
+      // Multiple statemente body
+      Consume(Token::LBRACE);
+      bool is_lazily_parsed =
+          (mode() == PARSE_LAZILY && scope_->AllowsLazyCompilation());
+      if (is_lazily_parsed) {
+        body = this->NewStatementList(0, zone());
+        this->SkipLazyFunctionBody(this->EmptyIdentifier(),
+                                   &materialized_literal_count,
+                                   &expected_property_count, CHECK_OK);
+      } else {
+        body = this->ParseEagerFunctionBody(
+            this->EmptyIdentifier(), RelocInfo::kNoPosition, NULL,
+            Token::INIT_VAR, false,  // Not a generator.
+            CHECK_OK);
+        materialized_literal_count =
+            function_state.materialized_literal_count();
+        expected_property_count = function_state.expected_property_count();
+        handler_count = function_state.handler_count();
+      }
+    } else {
+      // Single-expression body
+      int pos = position();
+      parenthesized_function_ = false;
+      ExpressionT expression = ParseAssignmentExpression(true, CHECK_OK);
+      body = this->NewStatementList(1, zone());
+      body->Add(factory()->NewReturnStatement(expression, pos), zone());
+      materialized_literal_count = function_state.materialized_literal_count();
+      expected_property_count = function_state.expected_property_count();
+      handler_count = function_state.handler_count();
+    }
+
+    scope->set_start_position(start_pos);
+    scope->set_end_position(scanner()->location().end_pos);
+
+    // Arrow function *parameter lists* are always checked as in strict mode.
+    bool function_name_is_strict_reserved = false;
+    Scanner::Location function_name_loc = Scanner::Location::invalid();
+    Scanner::Location eval_args_error_loc = Scanner::Location::invalid();
+    Scanner::Location reserved_loc = Scanner::Location::invalid();
+    this->CheckStrictFunctionNameAndParameters(
+        this->EmptyIdentifier(), function_name_is_strict_reserved,
+        function_name_loc, eval_args_error_loc, dupe_error_loc, reserved_loc,
+        CHECK_OK);
+
+    // Validate strict mode.
+    if (strict_mode() == STRICT) {
+      CheckOctalLiteral(start_pos, scanner()->location().end_pos, CHECK_OK);
+    }
+
+    if (allow_harmony_scoping() && strict_mode() == STRICT)
+      this->CheckConflictingVarDeclarations(scope, CHECK_OK);
+
+    ast_properties = *factory()->visitor()->ast_properties();
+    dont_optimize_reason = factory()->visitor()->dont_optimize_reason();
+  }
+
+  FunctionLiteralT function_literal = factory()->NewFunctionLiteral(
+      this->EmptyIdentifierString(), this->ast_value_factory(), scope, body,
+      materialized_literal_count, expected_property_count, handler_count,
+      num_parameters, FunctionLiteral::kNoDuplicateParameters,
+      FunctionLiteral::ANONYMOUS_EXPRESSION, FunctionLiteral::kIsFunction,
+      FunctionLiteral::kNotParenthesized, FunctionKind::kArrowFunction,
+      start_pos);
+
+  function_literal->set_function_token_position(start_pos);
+  function_literal->set_ast_properties(&ast_properties);
+  function_literal->set_dont_optimize_reason(dont_optimize_reason);
+
+  if (fni_ != NULL) this->InferFunctionName(fni_, function_literal);
+
+  return function_literal;
+}
+
+
+template <class Traits>
+typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseClassLiteral(
+    IdentifierT name, Scanner::Location class_name_location,
+    bool name_is_strict_reserved, int pos, bool* ok) {
+  // All parts of a ClassDeclaration or a ClassExpression are strict code.
+  if (name_is_strict_reserved) {
+    ReportMessageAt(class_name_location, "unexpected_strict_reserved");
+    *ok = false;
+    return this->EmptyExpression();
+  }
+  if (this->IsEvalOrArguments(name)) {
+    ReportMessageAt(class_name_location, "strict_eval_arguments");
+    *ok = false;
+    return this->EmptyExpression();
+  }
+
+  // TODO(arv): Implement scopes and name binding in class body only.
+  // TODO(arv): Maybe add CLASS_SCOPE?
+  typename Traits::Type::ScopePtr extends_scope =
+      this->NewScope(scope_, BLOCK_SCOPE);
+  FunctionState extends_function_state(
+      &function_state_, &scope_, &extends_scope, zone(),
+      this->ast_value_factory(), ast_node_id_gen_);
+  scope_->SetStrictMode(STRICT);
+  scope_->SetScopeName(name);
+
+  ExpressionT extends = this->EmptyExpression();
+  if (Check(Token::EXTENDS)) {
+    extends = this->ParseLeftHandSideExpression(CHECK_OK);
+  }
+
+  ObjectLiteralChecker checker(this, STRICT);
+  typename Traits::Type::PropertyList properties =
+      this->NewPropertyList(4, zone_);
+  FunctionLiteralT constructor = this->EmptyFunctionLiteral();
+
+  Expect(Token::LBRACE, CHECK_OK);
+  while (peek() != Token::RBRACE) {
+    if (Check(Token::SEMICOLON)) continue;
+    if (fni_ != NULL) fni_->Enter();
+
+    const bool in_class = true;
+    const bool is_static = false;
+    ObjectLiteralPropertyT property =
+        this->ParsePropertyDefinition(&checker, in_class, is_static, CHECK_OK);
+
+    properties->Add(property, zone());
+
+    if (fni_ != NULL) {
+      fni_->Infer();
+      fni_->Leave();
+    }
+  }
+  Expect(Token::RBRACE, CHECK_OK);
+
+  return this->ClassLiteral(name, extends, constructor, properties, pos,
+                            factory());
+}
+
+
 template <typename Traits>
 typename ParserBase<Traits>::ExpressionT
 ParserBase<Traits>::CheckAndRewriteReferenceExpression(
@@ -2140,9 +2809,7 @@
 
 template <typename Traits>
 void ParserBase<Traits>::ObjectLiteralChecker::CheckProperty(
-    Token::Value property,
-    PropertyKind type,
-    bool* ok) {
+    Token::Value property, PropertyKind type, bool* ok) {
   int old;
   if (property == Token::NUMBER) {
     old = scanner()->FindNumber(&finder_, type);
@@ -2159,15 +2826,13 @@
       // Both a data and an accessor property with the same name.
       parser()->ReportMessage("accessor_data_property");
     } else {
-      ASSERT(IsAccessorAccessorConflict(old_type, type));
+      DCHECK(IsAccessorAccessorConflict(old_type, type));
       // Both accessors of the same type.
       parser()->ReportMessage("accessor_get_set");
     }
     *ok = false;
   }
 }
-
-
 } }  // v8::internal
 
 #endif  // V8_PREPARSER_H
diff --git a/src/prettyprinter.cc b/src/prettyprinter.cc
index f46f6f1..1ff2edd 100644
--- a/src/prettyprinter.cc
+++ b/src/prettyprinter.cc
@@ -6,9 +6,10 @@
 
 #include "src/v8.h"
 
+#include "src/ast-value-factory.h"
+#include "src/base/platform/platform.h"
 #include "src/prettyprinter.h"
 #include "src/scopes.h"
-#include "src/platform.h"
 
 namespace v8 {
 namespace internal {
@@ -133,10 +134,10 @@
 
 void PrettyPrinter::VisitContinueStatement(ContinueStatement* node) {
   Print("continue");
-  ZoneStringList* labels = node->target()->labels();
+  ZoneList<const AstRawString*>* labels = node->target()->labels();
   if (labels != NULL) {
     Print(" ");
-    ASSERT(labels->length() > 0);  // guaranteed to have at least one entry
+    DCHECK(labels->length() > 0);  // guaranteed to have at least one entry
     PrintLiteral(labels->at(0), false);  // any label from the list is fine
   }
   Print(";");
@@ -145,10 +146,10 @@
 
 void PrettyPrinter::VisitBreakStatement(BreakStatement* node) {
   Print("break");
-  ZoneStringList* labels = node->target()->labels();
+  ZoneList<const AstRawString*>* labels = node->target()->labels();
   if (labels != NULL) {
     Print(" ");
-    ASSERT(labels->length() > 0);  // guaranteed to have at least one entry
+    DCHECK(labels->length() > 0);  // guaranteed to have at least one entry
     PrintLiteral(labels->at(0), false);  // any label from the list is fine
   }
   Print(";");
@@ -288,6 +289,21 @@
 }
 
 
+void PrettyPrinter::VisitClassLiteral(ClassLiteral* node) {
+  Print("(class ");
+  PrintLiteral(node->name(), false);
+  if (node->extends()) {
+    Print(" extends ");
+    Visit(node->extends());
+  }
+  Print(" { ");
+  for (int i = 0; i < node->properties()->length(); i++) {
+    PrintObjectLiteralProperty(node->properties()->at(i));
+  }
+  Print(" })");
+}
+
+
 void PrettyPrinter::VisitNativeFunctionLiteral(NativeFunctionLiteral* node) {
   Print("(");
   PrintLiteral(node->name(), false);
@@ -322,16 +338,22 @@
   Print("{ ");
   for (int i = 0; i < node->properties()->length(); i++) {
     if (i != 0) Print(",");
-    ObjectLiteral::Property* property = node->properties()->at(i);
-    Print(" ");
-    Visit(property->key());
-    Print(": ");
-    Visit(property->value());
+    PrintObjectLiteralProperty(node->properties()->at(i));
   }
   Print(" }");
 }
 
 
+void PrettyPrinter::PrintObjectLiteralProperty(
+    ObjectLiteralProperty* property) {
+  // TODO(arv): Better printing of methods etc.
+  Print(" ");
+  Visit(property->key());
+  Print(": ");
+  Visit(property->value());
+}
+
+
 void PrettyPrinter::VisitArrayLiteral(ArrayLiteral* node) {
   Print("[ ");
   for (int i = 0; i < node->values()->length(); i++) {
@@ -446,6 +468,11 @@
 }
 
 
+void PrettyPrinter::VisitSuperReference(SuperReference* node) {
+  Print("<super-reference>");
+}
+
+
 const char* PrettyPrinter::Print(AstNode* node) {
   Init();
   Visit(node);
@@ -478,7 +505,7 @@
 
 void PrettyPrinter::Init() {
   if (size_ == 0) {
-    ASSERT(output_ == NULL);
+    DCHECK(output_ == NULL);
     const int initial_size = 256;
     output_ = NewArray<char>(initial_size);
     size_ = initial_size;
@@ -524,7 +551,7 @@
 }
 
 
-void PrettyPrinter::PrintLabels(ZoneStringList* labels) {
+void PrettyPrinter::PrintLabels(ZoneList<const AstRawString*>* labels) {
   if (labels != NULL) {
     for (int i = 0; i < labels->length(); i++) {
       PrintLiteral(labels->at(i), false);
@@ -582,6 +609,11 @@
 }
 
 
+void PrettyPrinter::PrintLiteral(const AstRawString* value, bool quote) {
+  PrintLiteral(value->string(), quote);
+}
+
+
 void PrettyPrinter::PrintParameters(Scope* scope) {
   Print("(");
   for (int i = 0; i < scope->num_parameters(); i++) {
@@ -639,7 +671,7 @@
 
 
 AstPrinter::~AstPrinter() {
-  ASSERT(indent_ == 0);
+  DCHECK(indent_ == 0);
 }
 
 
@@ -676,7 +708,7 @@
 }
 
 
-void AstPrinter::PrintLabelsIndented(ZoneStringList* labels) {
+void AstPrinter::PrintLabelsIndented(ZoneList<const AstRawString*>* labels) {
   if (labels == NULL || labels->length() == 0) return;
   PrintIndented("LABELS ");
   PrintLabels(labels);
@@ -958,6 +990,12 @@
 }
 
 
+void AstPrinter::VisitClassLiteral(ClassLiteral* node) {
+  IndentedScope indent(this, "CLASS LITERAL");
+  PrintLiteralIndented("NAME", node->name(), false);
+}
+
+
 void AstPrinter::VisitNativeFunctionLiteral(NativeFunctionLiteral* node) {
   IndentedScope indent(this, "NATIVE FUNC LITERAL");
   PrintLiteralIndented("NAME", node->name(), false);
@@ -1139,6 +1177,11 @@
   IndentedScope indent(this, "THIS-FUNCTION");
 }
 
+
+void AstPrinter::VisitSuperReference(SuperReference* node) {
+  IndentedScope indent(this, "SUPER-REFERENCE");
+}
+
 #endif  // DEBUG
 
 } }  // namespace v8::internal
diff --git a/src/prettyprinter.h b/src/prettyprinter.h
index 585734e..d300d9a 100644
--- a/src/prettyprinter.h
+++ b/src/prettyprinter.h
@@ -44,13 +44,15 @@
   const char* Output() const { return output_; }
 
   virtual void PrintStatements(ZoneList<Statement*>* statements);
-  void PrintLabels(ZoneStringList* labels);
+  void PrintLabels(ZoneList<const AstRawString*>* labels);
   virtual void PrintArguments(ZoneList<Expression*>* arguments);
   void PrintLiteral(Handle<Object> value, bool quote);
+  void PrintLiteral(const AstRawString* value, bool quote);
   void PrintParameters(Scope* scope);
   void PrintDeclarations(ZoneList<Declaration*>* declarations);
   void PrintFunctionLiteral(FunctionLiteral* function);
   void PrintCaseClause(CaseClause* clause);
+  void PrintObjectLiteralProperty(ObjectLiteralProperty* property);
 
   DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
 };
@@ -83,7 +85,7 @@
   void PrintLiteralWithModeIndented(const char* info,
                                     Variable* var,
                                     Handle<Object> value);
-  void PrintLabelsIndented(ZoneStringList* labels);
+  void PrintLabelsIndented(ZoneList<const AstRawString*>* labels);
 
   void inc_indent() { indent_++; }
   void dec_indent() { indent_--; }
diff --git a/src/profile-generator.cc b/src/profile-generator.cc
index 5c17792..6017f12 100644
--- a/src/profile-generator.cc
+++ b/src/profile-generator.cc
@@ -8,8 +8,8 @@
 
 #include "src/compiler.h"
 #include "src/debug.h"
-#include "src/sampler.h"
 #include "src/global-handles.h"
+#include "src/sampler.h"
 #include "src/scopeinfo.h"
 #include "src/unicode.h"
 #include "src/zone-inl.h"
@@ -107,17 +107,12 @@
 
 
 const char* StringsStorage::GetFunctionName(Name* name) {
-  return BeautifyFunctionName(GetName(name));
+  return GetName(name);
 }
 
 
 const char* StringsStorage::GetFunctionName(const char* name) {
-  return BeautifyFunctionName(GetCopy(name));
-}
-
-
-const char* StringsStorage::BeautifyFunctionName(const char* name) {
-  return (*name == 0) ? ProfileGenerator::kAnonymousFunctionName : name;
+  return GetCopy(name);
 }
 
 
@@ -208,17 +203,12 @@
 
 
 void ProfileNode::Print(int indent) {
-  OS::Print("%5u %*s %s%s %d #%d %s",
-            self_ticks_,
-            indent, "",
-            entry_->name_prefix(),
-            entry_->name(),
-            entry_->script_id(),
-            id(),
-            entry_->bailout_reason());
+  base::OS::Print("%5u %*s %s%s %d #%d %s", self_ticks_, indent, "",
+                  entry_->name_prefix(), entry_->name(), entry_->script_id(),
+                  id(), entry_->bailout_reason());
   if (entry_->resource_name()[0] != '\0')
-    OS::Print(" %s:%d", entry_->resource_name(), entry_->line_number());
-  OS::Print("\n");
+    base::OS::Print(" %s:%d", entry_->resource_name(), entry_->line_number());
+  base::OS::Print("\n");
   for (HashMap::Entry* p = children_.Start();
        p != NULL;
        p = children_.Next(p)) {
@@ -332,11 +322,12 @@
 CpuProfile::CpuProfile(const char* title, bool record_samples)
     : title_(title),
       record_samples_(record_samples),
-      start_time_(TimeTicks::HighResolutionNow()) {
+      start_time_(base::TimeTicks::HighResolutionNow()) {
 }
 
 
-void CpuProfile::AddPath(TimeTicks timestamp, const Vector<CodeEntry*>& path) {
+void CpuProfile::AddPath(base::TimeTicks timestamp,
+                         const Vector<CodeEntry*>& path) {
   ProfileNode* top_frame_node = top_down_.AddPathFromEnd(path);
   if (record_samples_) {
     timestamps_.Add(timestamp);
@@ -346,12 +337,12 @@
 
 
 void CpuProfile::CalculateTotalTicksAndSamplingRate() {
-  end_time_ = TimeTicks::HighResolutionNow();
+  end_time_ = base::TimeTicks::HighResolutionNow();
 }
 
 
 void CpuProfile::Print() {
-  OS::Print("[Top down]:\n");
+  base::OS::Print("[Top down]:\n");
   top_down_.Print();
 }
 
@@ -403,7 +394,7 @@
   // For shared function entries, 'size' field is used to store their IDs.
   if (tree_.Find(addr, &locator)) {
     const CodeEntryInfo& entry = locator.value();
-    ASSERT(entry.entry == kSharedFunctionCodeEntry);
+    DCHECK(entry.entry == kSharedFunctionCodeEntry);
     return entry.size;
   } else {
     tree_.Insert(addr, &locator);
@@ -428,9 +419,9 @@
     const Address& key, const CodeMap::CodeEntryInfo& value) {
   // For shared function entries, 'size' field is used to store their IDs.
   if (value.entry == kSharedFunctionCodeEntry) {
-    OS::Print("%p SharedFunctionInfo %d\n", key, value.size);
+    base::OS::Print("%p SharedFunctionInfo %d\n", key, value.size);
   } else {
-    OS::Print("%p %5d %s\n", key, value.size, value.entry->name());
+    base::OS::Print("%p %5d %s\n", key, value.size, value.entry->name());
   }
 }
 
@@ -473,9 +464,10 @@
   }
   for (int i = 0; i < current_profiles_.length(); ++i) {
     if (strcmp(current_profiles_[i]->title(), title) == 0) {
-      // Ignore attempts to start profile with the same title.
+      // Ignore attempts to start profile with the same title...
       current_profiles_semaphore_.Signal();
-      return false;
+      // ... though return true to force it collect a sample.
+      return true;
     }
   }
   current_profiles_.Add(new CpuProfile(title, record_samples));
@@ -525,7 +517,7 @@
 
 
 void CpuProfilesCollection::AddPathToCurrentProfiles(
-    TimeTicks timestamp, const Vector<CodeEntry*>& path) {
+    base::TimeTicks timestamp, const Vector<CodeEntry*>& path) {
   // As starting / stopping profiles is rare relatively to this
   // method, we don't bother minimizing the duration of lock holding,
   // e.g. copying contents of the list to a local vector.
@@ -555,8 +547,6 @@
 }
 
 
-const char* const ProfileGenerator::kAnonymousFunctionName =
-    "(anonymous function)";
 const char* const ProfileGenerator::kProgramEntryName =
     "(program)";
 const char* const ProfileGenerator::kIdleEntryName =
diff --git a/src/profile-generator.h b/src/profile-generator.h
index c89b41a..5ebb92b 100644
--- a/src/profile-generator.h
+++ b/src/profile-generator.h
@@ -5,9 +5,9 @@
 #ifndef V8_PROFILE_GENERATOR_H_
 #define V8_PROFILE_GENERATOR_H_
 
+#include "include/v8-profiler.h"
 #include "src/allocation.h"
 #include "src/hashmap.h"
-#include "include/v8-profiler.h"
 
 namespace v8 {
 namespace internal {
@@ -34,7 +34,6 @@
   static const int kMaxNameSize = 1024;
 
   static bool StringsMatch(void* key1, void* key2);
-  const char* BeautifyFunctionName(const char* name);
   const char* AddOrDisposeString(char* str, int len);
   HashMap::Entry* GetEntry(const char* str, int len);
 
@@ -176,7 +175,7 @@
   CpuProfile(const char* title, bool record_samples);
 
   // Add pc -> ... -> main() call path to the profile.
-  void AddPath(TimeTicks timestamp, const Vector<CodeEntry*>& path);
+  void AddPath(base::TimeTicks timestamp, const Vector<CodeEntry*>& path);
   void CalculateTotalTicksAndSamplingRate();
 
   const char* title() const { return title_; }
@@ -184,10 +183,12 @@
 
   int samples_count() const { return samples_.length(); }
   ProfileNode* sample(int index) const { return samples_.at(index); }
-  TimeTicks sample_timestamp(int index) const { return timestamps_.at(index); }
+  base::TimeTicks sample_timestamp(int index) const {
+    return timestamps_.at(index);
+  }
 
-  TimeTicks start_time() const { return start_time_; }
-  TimeTicks end_time() const { return end_time_; }
+  base::TimeTicks start_time() const { return start_time_; }
+  base::TimeTicks end_time() const { return end_time_; }
 
   void UpdateTicksScale();
 
@@ -196,10 +197,10 @@
  private:
   const char* title_;
   bool record_samples_;
-  TimeTicks start_time_;
-  TimeTicks end_time_;
+  base::TimeTicks start_time_;
+  base::TimeTicks end_time_;
   List<ProfileNode*> samples_;
-  List<TimeTicks> timestamps_;
+  List<base::TimeTicks> timestamps_;
   ProfileTree top_down_;
 
   DISALLOW_COPY_AND_ASSIGN(CpuProfile);
@@ -285,7 +286,7 @@
 
   // Called from profile generator thread.
   void AddPathToCurrentProfiles(
-      TimeTicks timestamp, const Vector<CodeEntry*>& path);
+      base::TimeTicks timestamp, const Vector<CodeEntry*>& path);
 
   // Limits the number of profiles that can be simultaneously collected.
   static const int kMaxSimultaneousProfiles = 100;
@@ -297,7 +298,7 @@
 
   // Accessed by VM thread and profile generator thread.
   List<CpuProfile*> current_profiles_;
-  Semaphore current_profiles_semaphore_;
+  base::Semaphore current_profiles_semaphore_;
 
   DISALLOW_COPY_AND_ASSIGN(CpuProfilesCollection);
 };
@@ -311,7 +312,6 @@
 
   CodeMap* code_map() { return &code_map_; }
 
-  static const char* const kAnonymousFunctionName;
   static const char* const kProgramEntryName;
   static const char* const kIdleEntryName;
   static const char* const kGarbageCollectorEntryName;
diff --git a/src/promise.js b/src/promise.js
index 710abad..37c10ec 100644
--- a/src/promise.js
+++ b/src/promise.js
@@ -18,6 +18,7 @@
 var PromiseChain;
 var PromiseCatch;
 var PromiseThen;
+var PromiseHasRejectHandler;
 
 // mirror-debugger.js currently uses builtins.promiseStatus. It would be nice
 // if we could move these property names into the closure below.
@@ -29,6 +30,8 @@
 var promiseOnResolve = GLOBAL_PRIVATE("Promise#onResolve");
 var promiseOnReject = GLOBAL_PRIVATE("Promise#onReject");
 var promiseRaw = GLOBAL_PRIVATE("Promise#raw");
+var promiseDebug = GLOBAL_PRIVATE("Promise#debug");
+var lastMicrotaskId = 0;
 
 (function() {
 
@@ -39,13 +42,13 @@
       throw MakeTypeError('resolver_not_a_function', [resolver]);
     var promise = PromiseInit(this);
     try {
-      %DebugPromiseHandlePrologue(function() { return promise });
+      %DebugPushPromise(promise);
       resolver(function(x) { PromiseResolve(promise, x) },
                function(r) { PromiseReject(promise, r) });
     } catch (e) {
       PromiseReject(promise, e);
     } finally {
-      %DebugPromiseHandleEpilogue();
+      %DebugPopPromise();
     }
   }
 
@@ -56,6 +59,9 @@
     SET_PRIVATE(promise, promiseValue, value);
     SET_PRIVATE(promise, promiseOnResolve, onResolve);
     SET_PRIVATE(promise, promiseOnReject, onReject);
+    if (DEBUG_IS_ACTIVE) {
+      %DebugPromiseEvent({ promise: promise, status: status, value: value });
+    }
     return promise;
   }
 
@@ -66,7 +72,7 @@
 
   function PromiseDone(promise, status, value, promiseQueue) {
     if (GET_PRIVATE(promise, promiseStatus) === 0) {
-      PromiseEnqueue(value, GET_PRIVATE(promise, promiseQueue));
+      PromiseEnqueue(value, GET_PRIVATE(promise, promiseQueue), status);
       PromiseSet(promise, status, value);
     }
   }
@@ -94,11 +100,7 @@
 
   function PromiseHandle(value, handler, deferred) {
     try {
-      %DebugPromiseHandlePrologue(
-          function() {
-            var queue = GET_PRIVATE(deferred.promise, promiseOnReject);
-            return (queue && queue.length == 0) ? deferred.promise : UNDEFINED;
-          });
+      %DebugPushPromise(deferred.promise);
       var result = handler(value);
       if (result === deferred.promise)
         throw MakeTypeError('promise_cyclic', [result]);
@@ -107,23 +109,30 @@
       else
         deferred.resolve(result);
     } catch (exception) {
-      try {
-        %DebugPromiseHandlePrologue(function() { return deferred.promise });
-        deferred.reject(exception);
-      } catch (e) { } finally {
-        %DebugPromiseHandleEpilogue();
-      }
+      try { deferred.reject(exception); } catch (e) { }
     } finally {
-      %DebugPromiseHandleEpilogue();
+      %DebugPopPromise();
     }
   }
 
-  function PromiseEnqueue(value, tasks) {
+  function PromiseEnqueue(value, tasks, status) {
+    var id, name, instrumenting = DEBUG_IS_ACTIVE;
     %EnqueueMicrotask(function() {
+      if (instrumenting) {
+        %DebugAsyncTaskEvent({ type: "willHandle", id: id, name: name });
+      }
       for (var i = 0; i < tasks.length; i += 2) {
         PromiseHandle(value, tasks[i], tasks[i + 1])
       }
+      if (instrumenting) {
+        %DebugAsyncTaskEvent({ type: "didHandle", id: id, name: name });
+      }
     });
+    if (instrumenting) {
+      id = ++lastMicrotaskId;
+      name = status > 0 ? "Promise.resolve" : "Promise.reject";
+      %DebugAsyncTaskEvent({ type: "enqueue", id: id, name: name });
+    }
   }
 
   function PromiseIdResolveHandler(x) { return x }
@@ -137,7 +146,7 @@
   // For bootstrapper.
 
   IsPromise = function IsPromise(x) {
-    return IS_SPEC_OBJECT(x) && HAS_PRIVATE(x, promiseStatus);
+    return IS_SPEC_OBJECT(x) && HAS_DEFINED_PRIVATE(x, promiseStatus);
   }
 
   PromiseCreate = function PromiseCreate() {
@@ -149,6 +158,13 @@
   }
 
   PromiseReject = function PromiseReject(promise, r) {
+    // Check promise status to confirm that this reject has an effect.
+    // Check promiseDebug property to avoid duplicate event.
+    if (DEBUG_IS_ACTIVE &&
+        GET_PRIVATE(promise, promiseStatus) == 0 &&
+        !HAS_DEFINED_PRIVATE(promise, promiseDebug)) {
+      %DebugPromiseRejectEvent(promise, r);
+    }
     PromiseDone(promise, -1, r, promiseOnReject)
   }
 
@@ -194,7 +210,7 @@
   // Simple chaining.
 
   PromiseChain = function PromiseChain(onResolve, onReject) {  // a.k.a.
-                                                                // flatMap
+                                                               // flatMap
     onResolve = IS_UNDEFINED(onResolve) ? PromiseIdResolveHandler : onResolve;
     onReject = IS_UNDEFINED(onReject) ? PromiseIdRejectHandler : onReject;
     var deferred = %_CallFunction(this.constructor, PromiseDeferred);
@@ -206,12 +222,19 @@
         GET_PRIVATE(this, promiseOnReject).push(onReject, deferred);
         break;
       case +1:  // Resolved
-        PromiseEnqueue(GET_PRIVATE(this, promiseValue), [onResolve, deferred]);
+        PromiseEnqueue(GET_PRIVATE(this, promiseValue),
+                       [onResolve, deferred],
+                       +1);
         break;
       case -1:  // Rejected
-        PromiseEnqueue(GET_PRIVATE(this, promiseValue), [onReject, deferred]);
+        PromiseEnqueue(GET_PRIVATE(this, promiseValue),
+                       [onReject, deferred],
+                       -1);
         break;
     }
+    if (DEBUG_IS_ACTIVE) {
+      %DebugPromiseEvent({ promise: deferred.promise, parentPromise: this });
+    }
     return deferred.promise;
   }
 
@@ -261,11 +284,15 @@
       } else {
         for (var i = 0; i < values.length; ++i) {
           this.resolve(values[i]).then(
-            function(i, x) {
-              resolutions[i] = x;
-              if (--count === 0) deferred.resolve(resolutions);
-            }.bind(UNDEFINED, i),  // TODO(rossberg): use let loop once
-                                    // available
+            (function() {
+              // Nested scope to get closure over current i (and avoid .bind).
+              // TODO(rossberg): Use for-let instead once available.
+              var i_captured = i;
+              return function(x) {
+                resolutions[i_captured] = x;
+                if (--count === 0) deferred.resolve(resolutions);
+              };
+            })(),
             function(r) { deferred.reject(r) }
           );
         }
@@ -295,11 +322,32 @@
     return deferred.promise;
   }
 
+
+  // Utility for debugger
+
+  function PromiseHasRejectHandlerRecursive(promise) {
+    var queue = GET_PRIVATE(promise, promiseOnReject);
+    if (IS_UNDEFINED(queue)) return false;
+    // Do a depth first search for a reject handler that's not
+    // the default PromiseIdRejectHandler.
+    for (var i = 0; i < queue.length; i += 2) {
+      if (queue[i] != PromiseIdRejectHandler) return true;
+      if (PromiseHasRejectHandlerRecursive(queue[i + 1].promise)) return true;
+    }
+    return false;
+  }
+
+  PromiseHasRejectHandler = function PromiseHasRejectHandler() {
+    // Mark promise as already having triggered a reject event.
+    SET_PRIVATE(this, promiseDebug, true);
+    return PromiseHasRejectHandlerRecursive(this);
+  };
+
   // -------------------------------------------------------------------
   // Install exported functions.
 
   %CheckIsBootstrapping();
-  %SetProperty(global, 'Promise', $Promise, DONT_ENUM);
+  %AddNamedProperty(global, 'Promise', $Promise, DONT_ENUM);
   InstallFunctions($Promise, DONT_ENUM, [
     "defer", PromiseDeferred,
     "accept", PromiseResolved,
diff --git a/src/property-details-inl.h b/src/property-details-inl.h
index eaa596f..efb27b3 100644
--- a/src/property-details-inl.h
+++ b/src/property-details-inl.h
@@ -13,18 +13,6 @@
 namespace v8 {
 namespace internal {
 
-inline bool Representation::CanContainDouble(double value) {
-  if (IsDouble() || is_more_general_than(Representation::Double())) {
-    return true;
-  }
-  if (IsInt32Double(value)) {
-    if (IsInteger32()) return true;
-    if (IsSmi()) return Smi::IsValid(static_cast<int32_t>(value));
-  }
-  return false;
-}
-
-
 Representation Representation::FromType(Type* type) {
   DisallowHeapAllocation no_allocation;
   if (type->Is(Type::None())) return Representation::None();
diff --git a/src/property-details.h b/src/property-details.h
index cfe257e..f75bcff 100644
--- a/src/property-details.h
+++ b/src/property-details.h
@@ -46,16 +46,11 @@
 // A copy of this is in mirror-debugger.js.
 enum PropertyType {
   // Only in slow mode.
-  NORMAL                    = 0,
+  NORMAL = 0,
   // Only in fast mode.
-  FIELD                     = 1,
-  CONSTANT                  = 2,
-  CALLBACKS                 = 3,
-  // Only in lookup results, not in descriptors.
-  HANDLER                   = 4,
-  INTERCEPTOR               = 5,
-  // Only used as a marker in LookupResult.
-  NONEXISTENT               = 6
+  FIELD = 1,
+  CONSTANT = 2,
+  CALLBACKS = 3
 };
 
 
@@ -112,8 +107,8 @@
     if (kind_ == kExternal && other.kind_ == kExternal) return false;
     if (kind_ == kNone && other.kind_ == kExternal) return false;
 
-    ASSERT(kind_ != kExternal);
-    ASSERT(other.kind_ != kExternal);
+    DCHECK(kind_ != kExternal);
+    DCHECK(other.kind_ != kExternal);
     if (IsHeapObject()) return other.IsNone();
     if (kind_ == kUInteger8 && other.kind_ == kInteger8) return false;
     if (kind_ == kUInteger16 && other.kind_ == kInteger16) return false;
@@ -124,8 +119,6 @@
     return other.is_more_general_than(*this) || other.Equals(*this);
   }
 
-  bool CanContainDouble(double value);
-
   Representation generalize(Representation other) {
     if (other.fits_into(*this)) return *this;
     if (other.is_more_general_than(*this)) return other;
@@ -133,7 +126,7 @@
   }
 
   int size() const {
-    ASSERT(!IsNone());
+    DCHECK(!IsNone());
     if (IsInteger8() || IsUInteger8()) {
       return sizeof(uint8_t);
     }
@@ -197,8 +190,8 @@
         | AttributesField::encode(attributes)
         | DictionaryStorageField::encode(index);
 
-    ASSERT(type == this->type());
-    ASSERT(attributes == this->attributes());
+    DCHECK(type == this->type());
+    DCHECK(attributes == this->attributes());
   }
 
   PropertyDetails(PropertyAttributes attributes,
@@ -247,7 +240,7 @@
   }
 
   Representation representation() const {
-    ASSERT(type() != NORMAL);
+    DCHECK(type() != NORMAL);
     return DecodeRepresentation(RepresentationField::decode(value_));
   }
 
@@ -262,28 +255,28 @@
   }
 
   bool IsReadOnly() const { return (attributes() & READ_ONLY) != 0; }
-  bool IsDontDelete() const { return (attributes() & DONT_DELETE) != 0; }
+  bool IsConfigurable() const { return (attributes() & DONT_DELETE) == 0; }
   bool IsDontEnum() const { return (attributes() & DONT_ENUM) != 0; }
   bool IsDeleted() const { return DeletedField::decode(value_) != 0;}
 
   // Bit fields in value_ (type, shift, size). Must be public so the
   // constants can be embedded in generated code.
-  class TypeField:                public BitField<PropertyType,       0,  3> {};
-  class AttributesField:          public BitField<PropertyAttributes, 3,  3> {};
+  class TypeField : public BitField<PropertyType, 0, 2> {};
+  class AttributesField : public BitField<PropertyAttributes, 2, 3> {};
 
   // Bit fields for normalized objects.
-  class DeletedField:             public BitField<uint32_t,           6,  1> {};
-  class DictionaryStorageField:   public BitField<uint32_t,           7, 24> {};
+  class DeletedField : public BitField<uint32_t, 5, 1> {};
+  class DictionaryStorageField : public BitField<uint32_t, 6, 24> {};
 
   // Bit fields for fast objects.
-  class RepresentationField:      public BitField<uint32_t,           6,  4> {};
-  class DescriptorPointer:        public BitField<uint32_t, 10,
-      kDescriptorIndexBitCount> {};  // NOLINT
-  class FieldIndexField:          public BitField<uint32_t,
-      10 + kDescriptorIndexBitCount,
-      kDescriptorIndexBitCount> {};  // NOLINT
+  class RepresentationField : public BitField<uint32_t, 5, 4> {};
+  class DescriptorPointer
+      : public BitField<uint32_t, 9, kDescriptorIndexBitCount> {};  // NOLINT
+  class FieldIndexField
+      : public BitField<uint32_t, 9 + kDescriptorIndexBitCount,
+                        kDescriptorIndexBitCount> {};  // NOLINT
   // All bits for fast objects must fix in a smi.
-  STATIC_ASSERT(10 + kDescriptorIndexBitCount + kDescriptorIndexBitCount <= 31);
+  STATIC_ASSERT(9 + kDescriptorIndexBitCount + kDescriptorIndexBitCount <= 31);
 
   static const int kInitialIndex = 1;
 
diff --git a/src/property.cc b/src/property.cc
index 24b39a9..f0ff95c 100644
--- a/src/property.cc
+++ b/src/property.cc
@@ -5,6 +5,7 @@
 #include "src/property.h"
 
 #include "src/handles-inl.h"
+#include "src/ostreams.h"
 
 namespace v8 {
 namespace internal {
@@ -12,70 +13,27 @@
 void LookupResult::Iterate(ObjectVisitor* visitor) {
   LookupResult* current = this;  // Could be NULL.
   while (current != NULL) {
-    visitor->VisitPointer(BitCast<Object**>(&current->holder_));
-    visitor->VisitPointer(BitCast<Object**>(&current->transition_));
+    visitor->VisitPointer(bit_cast<Object**>(&current->holder_));
+    visitor->VisitPointer(bit_cast<Object**>(&current->transition_));
     current = current->next_;
   }
 }
 
 
-#ifdef OBJECT_PRINT
-void LookupResult::Print(FILE* out) {
-  if (!IsFound()) {
-    PrintF(out, "Not Found\n");
-    return;
-  }
+OStream& operator<<(OStream& os, const LookupResult& r) {
+  if (!r.IsFound()) return os << "Not Found\n";
 
-  PrintF(out, "LookupResult:\n");
-  PrintF(out, " -cacheable = %s\n", IsCacheable() ? "true" : "false");
-  PrintF(out, " -attributes = %x\n", GetAttributes());
-  if (IsTransition()) {
-    PrintF(out, " -transition target:\n");
-    GetTransitionTarget()->Print(out);
-    PrintF(out, "\n");
+  os << "LookupResult:\n";
+  if (r.IsTransition()) {
+    os << " -transition target:\n" << Brief(r.GetTransitionTarget()) << "\n";
   }
-  switch (type()) {
-    case NORMAL:
-      PrintF(out, " -type = normal\n");
-      PrintF(out, " -entry = %d", GetDictionaryEntry());
-      break;
-    case CONSTANT:
-      PrintF(out, " -type = constant\n");
-      PrintF(out, " -value:\n");
-      GetConstant()->Print(out);
-      PrintF(out, "\n");
-      break;
-    case FIELD:
-      PrintF(out, " -type = field\n");
-      PrintF(out, " -index = %d\n",
-             GetFieldIndex().property_index());
-      PrintF(out, " -field type:\n");
-      GetFieldType()->TypePrint(out);
-      break;
-    case CALLBACKS:
-      PrintF(out, " -type = call backs\n");
-      PrintF(out, " -callback object:\n");
-      GetCallbackObject()->Print(out);
-      break;
-    case HANDLER:
-      PrintF(out, " -type = lookup proxy\n");
-      break;
-    case INTERCEPTOR:
-      PrintF(out, " -type = lookup interceptor\n");
-      break;
-    case NONEXISTENT:
-      UNREACHABLE();
-      break;
-  }
+  return os;
 }
 
 
-void Descriptor::Print(FILE* out) {
-  PrintF(out, "Descriptor ");
-  GetKey()->ShortPrint(out);
-  PrintF(out, " @ ");
-  GetValue()->ShortPrint(out);
+OStream& operator<<(OStream& os, const Descriptor& d) {
+  return os << "Descriptor " << Brief(*d.GetKey()) << " @ "
+            << Brief(*d.GetValue());
 }
-#endif
 
 } }  // namespace v8::internal
diff --git a/src/property.h b/src/property.h
index ebb4343..779d9fc 100644
--- a/src/property.h
+++ b/src/property.h
@@ -5,15 +5,17 @@
 #ifndef V8_PROPERTY_H_
 #define V8_PROPERTY_H_
 
-#include "src/isolate.h"
 #include "src/factory.h"
 #include "src/field-index.h"
 #include "src/field-index-inl.h"
+#include "src/isolate.h"
 #include "src/types.h"
 
 namespace v8 {
 namespace internal {
 
+class OStream;
+
 // Abstraction for elements in instance-descriptor arrays.
 //
 // Each descriptor has a key, property attributes, property type,
@@ -28,13 +30,9 @@
     }
   }
 
-  Handle<Name> GetKey() { return key_; }
-  Handle<Object> GetValue() { return value_; }
-  PropertyDetails GetDetails() { return details_; }
-
-#ifdef OBJECT_PRINT
-  void Print(FILE* out);
-#endif
+  Handle<Name> GetKey() const { return key_; }
+  Handle<Object> GetValue() const { return value_; }
+  PropertyDetails GetDetails() const { return details_; }
 
   void SetSortedKeyIndex(int index) { details_ = details_.set_pointer(index); }
 
@@ -72,7 +70,10 @@
 };
 
 
-class FieldDescriptor V8_FINAL : public Descriptor {
+OStream& operator<<(OStream& os, const Descriptor& d);
+
+
+class FieldDescriptor FINAL : public Descriptor {
  public:
   FieldDescriptor(Handle<Name> key,
                   int field_index,
@@ -90,7 +91,7 @@
 };
 
 
-class ConstantDescriptor V8_FINAL : public Descriptor {
+class ConstantDescriptor FINAL : public Descriptor {
  public:
   ConstantDescriptor(Handle<Name> key,
                      Handle<Object> value,
@@ -100,7 +101,7 @@
 };
 
 
-class CallbacksDescriptor V8_FINAL : public Descriptor {
+class CallbacksDescriptor FINAL : public Descriptor {
  public:
   CallbacksDescriptor(Handle<Name> key,
                       Handle<Object> foreign,
@@ -110,7 +111,7 @@
 };
 
 
-class LookupResult V8_FINAL BASE_EMBEDDED {
+class LookupResult FINAL BASE_EMBEDDED {
  public:
   explicit LookupResult(Isolate* isolate)
       : isolate_(isolate),
@@ -118,13 +119,12 @@
         lookup_type_(NOT_FOUND),
         holder_(NULL),
         transition_(NULL),
-        cacheable_(true),
-        details_(NONE, NONEXISTENT, Representation::None()) {
+        details_(NONE, NORMAL, Representation::None()) {
     isolate->set_top_lookup_result(this);
   }
 
   ~LookupResult() {
-    ASSERT(isolate()->top_lookup_result() == this);
+    DCHECK(isolate()->top_lookup_result() == this);
     isolate()->set_top_lookup_result(next_);
   }
 
@@ -138,28 +138,6 @@
     number_ = number;
   }
 
-  bool CanHoldValue(Handle<Object> value) const {
-    switch (type()) {
-      case NORMAL:
-        return true;
-      case FIELD:
-        return value->FitsRepresentation(representation()) &&
-            GetFieldType()->NowContains(value);
-      case CONSTANT:
-        ASSERT(GetConstant() != *value ||
-               value->FitsRepresentation(representation()));
-        return GetConstant() == *value;
-      case CALLBACKS:
-      case HANDLER:
-      case INTERCEPTOR:
-        return true;
-      case NONEXISTENT:
-        UNREACHABLE();
-    }
-    UNREACHABLE();
-    return true;
-  }
-
   void TransitionResult(JSObject* holder, Map* target) {
     lookup_type_ = TRANSITION_TYPE;
     number_ = target->LastAdded();
@@ -168,190 +146,50 @@
     transition_ = target;
   }
 
-  void DictionaryResult(JSObject* holder, int entry) {
-    lookup_type_ = DICTIONARY_TYPE;
-    holder_ = holder;
-    transition_ = NULL;
-    details_ = holder->property_dictionary()->DetailsAt(entry);
-    number_ = entry;
-  }
-
-  void HandlerResult(JSProxy* proxy) {
-    lookup_type_ = HANDLER_TYPE;
-    holder_ = proxy;
-    transition_ = NULL;
-    details_ = PropertyDetails(NONE, HANDLER, Representation::Tagged());
-    cacheable_ = false;
-  }
-
-  void InterceptorResult(JSObject* holder) {
-    lookup_type_ = INTERCEPTOR_TYPE;
-    holder_ = holder;
-    transition_ = NULL;
-    details_ = PropertyDetails(NONE, INTERCEPTOR, Representation::Tagged());
-  }
-
   void NotFound() {
     lookup_type_ = NOT_FOUND;
-    details_ = PropertyDetails(NONE, NONEXISTENT, Representation::None());
+    details_ = PropertyDetails(NONE, NORMAL, Representation::None());
     holder_ = NULL;
     transition_ = NULL;
   }
 
-  JSObject* holder() const {
-    ASSERT(IsFound());
-    return JSObject::cast(holder_);
-  }
-
-  JSProxy* proxy() const {
-    ASSERT(IsHandler());
-    return JSProxy::cast(holder_);
-  }
-
-  PropertyType type() const {
-    ASSERT(IsFound());
-    return details_.type();
-  }
-
   Representation representation() const {
-    ASSERT(IsFound());
-    ASSERT(details_.type() != NONEXISTENT);
+    DCHECK(IsFound());
     return details_.representation();
   }
 
-  PropertyAttributes GetAttributes() const {
-    ASSERT(IsFound());
-    ASSERT(details_.type() != NONEXISTENT);
-    return details_.attributes();
-  }
-
-  PropertyDetails GetPropertyDetails() const {
-    return details_;
-  }
-
-  bool IsFastPropertyType() const {
-    ASSERT(IsFound());
-    return IsTransition() || type() != NORMAL;
-  }
-
   // Property callbacks does not include transitions to callbacks.
   bool IsPropertyCallbacks() const {
-    ASSERT(!(details_.type() == CALLBACKS && !IsFound()));
+    DCHECK(!(details_.type() == CALLBACKS && !IsFound()));
     return !IsTransition() && details_.type() == CALLBACKS;
   }
 
   bool IsReadOnly() const {
-    ASSERT(IsFound());
-    ASSERT(details_.type() != NONEXISTENT);
+    DCHECK(IsFound());
     return details_.IsReadOnly();
   }
 
   bool IsField() const {
-    ASSERT(!(details_.type() == FIELD && !IsFound()));
-    return IsDescriptorOrDictionary() && type() == FIELD;
-  }
-
-  bool IsNormal() const {
-    ASSERT(!(details_.type() == NORMAL && !IsFound()));
-    return IsDescriptorOrDictionary() && type() == NORMAL;
+    DCHECK(!(details_.type() == FIELD && !IsFound()));
+    return lookup_type_ == DESCRIPTOR_TYPE && details_.type() == FIELD;
   }
 
   bool IsConstant() const {
-    ASSERT(!(details_.type() == CONSTANT && !IsFound()));
-    return IsDescriptorOrDictionary() && type() == CONSTANT;
+    DCHECK(!(details_.type() == CONSTANT && !IsFound()));
+    return lookup_type_ == DESCRIPTOR_TYPE && details_.type() == CONSTANT;
   }
 
-  bool IsConstantFunction() const {
-    return IsConstant() && GetConstant()->IsJSFunction();
-  }
-
-  bool IsDontDelete() const { return details_.IsDontDelete(); }
-  bool IsDontEnum() const { return details_.IsDontEnum(); }
+  bool IsConfigurable() const { return details_.IsConfigurable(); }
   bool IsFound() const { return lookup_type_ != NOT_FOUND; }
-  bool IsDescriptorOrDictionary() const {
-    return lookup_type_ == DESCRIPTOR_TYPE || lookup_type_ == DICTIONARY_TYPE;
-  }
   bool IsTransition() const { return lookup_type_ == TRANSITION_TYPE; }
-  bool IsHandler() const { return lookup_type_ == HANDLER_TYPE; }
-  bool IsInterceptor() const { return lookup_type_ == INTERCEPTOR_TYPE; }
 
   // Is the result is a property excluding transitions and the null descriptor?
   bool IsProperty() const {
     return IsFound() && !IsTransition();
   }
 
-  bool IsDataProperty() const {
-    switch (lookup_type_) {
-      case NOT_FOUND:
-      case TRANSITION_TYPE:
-      case HANDLER_TYPE:
-      case INTERCEPTOR_TYPE:
-        return false;
-
-      case DESCRIPTOR_TYPE:
-      case DICTIONARY_TYPE:
-        switch (type()) {
-          case FIELD:
-          case NORMAL:
-          case CONSTANT:
-            return true;
-          case CALLBACKS: {
-            Object* callback = GetCallbackObject();
-            ASSERT(!callback->IsForeign());
-            return callback->IsAccessorInfo();
-          }
-          case HANDLER:
-          case INTERCEPTOR:
-          case NONEXISTENT:
-            UNREACHABLE();
-            return false;
-        }
-    }
-    UNREACHABLE();
-    return false;
-  }
-
-  bool IsCacheable() const { return cacheable_; }
-  void DisallowCaching() { cacheable_ = false; }
-
-  Object* GetLazyValue() const {
-    switch (lookup_type_) {
-      case NOT_FOUND:
-      case TRANSITION_TYPE:
-      case HANDLER_TYPE:
-      case INTERCEPTOR_TYPE:
-        return isolate()->heap()->the_hole_value();
-
-      case DESCRIPTOR_TYPE:
-      case DICTIONARY_TYPE:
-        switch (type()) {
-          case FIELD:
-            return holder()->RawFastPropertyAt(GetFieldIndex());
-          case NORMAL: {
-            Object* value = holder()->property_dictionary()->ValueAt(
-                GetDictionaryEntry());
-            if (holder()->IsGlobalObject()) {
-              value = PropertyCell::cast(value)->value();
-            }
-            return value;
-          }
-          case CONSTANT:
-            return GetConstant();
-          case CALLBACKS:
-            return isolate()->heap()->the_hole_value();
-          case HANDLER:
-          case INTERCEPTOR:
-          case NONEXISTENT:
-            UNREACHABLE();
-            return NULL;
-        }
-    }
-    UNREACHABLE();
-    return NULL;
-  }
-
   Map* GetTransitionTarget() const {
-    ASSERT(IsTransition());
+    DCHECK(IsTransition());
     return transition_;
   }
 
@@ -359,108 +197,39 @@
     return IsTransition() && details_.type() == FIELD;
   }
 
-  bool IsTransitionToConstant() const {
-    return IsTransition() && details_.type() == CONSTANT;
-  }
-
-  int GetDescriptorIndex() const {
-    ASSERT(lookup_type_ == DESCRIPTOR_TYPE);
-    return number_;
-  }
-
-  FieldIndex GetFieldIndex() const {
-    ASSERT(lookup_type_ == DESCRIPTOR_TYPE ||
-           lookup_type_ == TRANSITION_TYPE);
-    return FieldIndex::ForLookupResult(this);
-  }
-
   int GetLocalFieldIndexFromMap(Map* map) const {
     return GetFieldIndexFromMap(map) - map->inobject_properties();
   }
 
-  int GetDictionaryEntry() const {
-    ASSERT(lookup_type_ == DICTIONARY_TYPE);
-    return number_;
-  }
-
-  JSFunction* GetConstantFunction() const {
-    ASSERT(type() == CONSTANT);
-    return JSFunction::cast(GetValue());
-  }
-
   Object* GetConstantFromMap(Map* map) const {
-    ASSERT(type() == CONSTANT);
+    DCHECK(details_.type() == CONSTANT);
     return GetValueFromMap(map);
   }
 
-  JSFunction* GetConstantFunctionFromMap(Map* map) const {
-    return JSFunction::cast(GetConstantFromMap(map));
-  }
-
-  Object* GetConstant() const {
-    ASSERT(type() == CONSTANT);
-    return GetValue();
-  }
-
-  Object* GetCallbackObject() const {
-    ASSERT(!IsTransition());
-    ASSERT(type() == CALLBACKS);
-    return GetValue();
-  }
-
-#ifdef OBJECT_PRINT
-  void Print(FILE* out);
-#endif
-
-  Object* GetValue() const {
-    if (lookup_type_ == DESCRIPTOR_TYPE) {
-      return GetValueFromMap(holder()->map());
-    } else if (lookup_type_ == TRANSITION_TYPE) {
-      return GetValueFromMap(transition_);
-    }
-    // In the dictionary case, the data is held in the value field.
-    ASSERT(lookup_type_ == DICTIONARY_TYPE);
-    return holder()->GetNormalizedProperty(this);
-  }
-
   Object* GetValueFromMap(Map* map) const {
-    ASSERT(lookup_type_ == DESCRIPTOR_TYPE ||
+    DCHECK(lookup_type_ == DESCRIPTOR_TYPE ||
            lookup_type_ == TRANSITION_TYPE);
-    ASSERT(number_ < map->NumberOfOwnDescriptors());
+    DCHECK(number_ < map->NumberOfOwnDescriptors());
     return map->instance_descriptors()->GetValue(number_);
   }
 
   int GetFieldIndexFromMap(Map* map) const {
-    ASSERT(lookup_type_ == DESCRIPTOR_TYPE ||
+    DCHECK(lookup_type_ == DESCRIPTOR_TYPE ||
            lookup_type_ == TRANSITION_TYPE);
-    ASSERT(number_ < map->NumberOfOwnDescriptors());
+    DCHECK(number_ < map->NumberOfOwnDescriptors());
     return map->instance_descriptors()->GetFieldIndex(number_);
   }
 
-  HeapType* GetFieldType() const {
-    ASSERT(type() == FIELD);
-    if (lookup_type_ == DESCRIPTOR_TYPE) {
-      return GetFieldTypeFromMap(holder()->map());
-    }
-    ASSERT(lookup_type_ == TRANSITION_TYPE);
-    return GetFieldTypeFromMap(transition_);
-  }
-
   HeapType* GetFieldTypeFromMap(Map* map) const {
-    ASSERT(lookup_type_ == DESCRIPTOR_TYPE ||
-           lookup_type_ == TRANSITION_TYPE);
-    ASSERT(number_ < map->NumberOfOwnDescriptors());
+    DCHECK_NE(NOT_FOUND, lookup_type_);
+    DCHECK(number_ < map->NumberOfOwnDescriptors());
     return map->instance_descriptors()->GetFieldType(number_);
   }
 
-  Map* GetFieldOwner() const {
-    return GetFieldOwnerFromMap(holder()->map());
-  }
-
   Map* GetFieldOwnerFromMap(Map* map) const {
-    ASSERT(lookup_type_ == DESCRIPTOR_TYPE ||
+    DCHECK(lookup_type_ == DESCRIPTOR_TYPE ||
            lookup_type_ == TRANSITION_TYPE);
-    ASSERT(number_ < map->NumberOfOwnDescriptors());
+    DCHECK(number_ < map->NumberOfOwnDescriptors());
     return map->FindFieldOwner(number_);
   }
 
@@ -471,22 +240,16 @@
   LookupResult* next_;
 
   // Where did we find the result;
-  enum {
-    NOT_FOUND,
-    DESCRIPTOR_TYPE,
-    TRANSITION_TYPE,
-    DICTIONARY_TYPE,
-    HANDLER_TYPE,
-    INTERCEPTOR_TYPE
-  } lookup_type_;
+  enum { NOT_FOUND, DESCRIPTOR_TYPE, TRANSITION_TYPE } lookup_type_;
 
   JSReceiver* holder_;
   Map* transition_;
   int number_;
-  bool cacheable_;
   PropertyDetails details_;
 };
 
+
+OStream& operator<<(OStream& os, const LookupResult& r);
 } }  // namespace v8::internal
 
 #endif  // V8_PROPERTY_H_
diff --git a/src/prototype.h b/src/prototype.h
new file mode 100644
index 0000000..4df1114
--- /dev/null
+++ b/src/prototype.h
@@ -0,0 +1,135 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PROTOTYPE_H_
+#define V8_PROTOTYPE_H_
+
+#include "src/isolate.h"
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+
+/**
+ * A class to uniformly access the prototype of any Object and walk its
+ * prototype chain.
+ *
+ * The PrototypeIterator can either start at the prototype (default), or
+ * include the receiver itself. If a PrototypeIterator is constructed for a
+ * Map, it will always start at the prototype.
+ *
+ * The PrototypeIterator can either run to the null_value(), the first
+ * non-hidden prototype, or a given object.
+ */
+class PrototypeIterator {
+ public:
+  enum WhereToStart { START_AT_RECEIVER, START_AT_PROTOTYPE };
+
+  enum WhereToEnd { END_AT_NULL, END_AT_NON_HIDDEN };
+
+  PrototypeIterator(Isolate* isolate, Handle<Object> receiver,
+                    WhereToStart where_to_start = START_AT_PROTOTYPE)
+      : did_jump_to_prototype_chain_(false),
+        object_(NULL),
+        handle_(receiver),
+        isolate_(isolate) {
+    CHECK(!handle_.is_null());
+    if (where_to_start == START_AT_PROTOTYPE) {
+      Advance();
+    }
+  }
+  PrototypeIterator(Isolate* isolate, Object* receiver,
+                    WhereToStart where_to_start = START_AT_PROTOTYPE)
+      : did_jump_to_prototype_chain_(false),
+        object_(receiver),
+        isolate_(isolate) {
+    if (where_to_start == START_AT_PROTOTYPE) {
+      Advance();
+    }
+  }
+  explicit PrototypeIterator(Map* receiver_map)
+      : did_jump_to_prototype_chain_(true),
+        object_(receiver_map->prototype()),
+        isolate_(receiver_map->GetIsolate()) {}
+  explicit PrototypeIterator(Handle<Map> receiver_map)
+      : did_jump_to_prototype_chain_(true),
+        object_(NULL),
+        handle_(handle(receiver_map->prototype(), receiver_map->GetIsolate())),
+        isolate_(receiver_map->GetIsolate()) {}
+  ~PrototypeIterator() {}
+
+  Object* GetCurrent() const {
+    DCHECK(handle_.is_null());
+    return object_;
+  }
+  static Handle<Object> GetCurrent(const PrototypeIterator& iterator) {
+    DCHECK(!iterator.handle_.is_null());
+    return iterator.handle_;
+  }
+  void Advance() {
+    if (handle_.is_null() && object_->IsJSProxy()) {
+      did_jump_to_prototype_chain_ = true;
+      object_ = isolate_->heap()->null_value();
+      return;
+    } else if (!handle_.is_null() && handle_->IsJSProxy()) {
+      did_jump_to_prototype_chain_ = true;
+      handle_ = handle(isolate_->heap()->null_value(), isolate_);
+      return;
+    }
+    AdvanceIgnoringProxies();
+  }
+  void AdvanceIgnoringProxies() {
+    if (!did_jump_to_prototype_chain_) {
+      did_jump_to_prototype_chain_ = true;
+      if (handle_.is_null()) {
+        object_ = object_->GetRootMap(isolate_)->prototype();
+      } else {
+        handle_ = handle(handle_->GetRootMap(isolate_)->prototype(), isolate_);
+      }
+    } else {
+      if (handle_.is_null()) {
+        object_ = HeapObject::cast(object_)->map()->prototype();
+      } else {
+        handle_ =
+            handle(HeapObject::cast(*handle_)->map()->prototype(), isolate_);
+      }
+    }
+  }
+  bool IsAtEnd(WhereToEnd where_to_end = END_AT_NULL) const {
+    if (handle_.is_null()) {
+      return object_->IsNull() ||
+             (did_jump_to_prototype_chain_ &&
+              where_to_end == END_AT_NON_HIDDEN &&
+              !HeapObject::cast(object_)->map()->is_hidden_prototype());
+    } else {
+      return handle_->IsNull() ||
+             (did_jump_to_prototype_chain_ &&
+              where_to_end == END_AT_NON_HIDDEN &&
+              !Handle<HeapObject>::cast(handle_)->map()->is_hidden_prototype());
+    }
+  }
+  bool IsAtEnd(Object* final_object) {
+    DCHECK(handle_.is_null());
+    return object_->IsNull() || object_ == final_object;
+  }
+  bool IsAtEnd(Handle<Object> final_object) {
+    DCHECK(!handle_.is_null());
+    return handle_->IsNull() || *handle_ == *final_object;
+  }
+
+ private:
+  bool did_jump_to_prototype_chain_;
+  Object* object_;
+  Handle<Object> handle_;
+  Isolate* isolate_;
+
+  DISALLOW_COPY_AND_ASSIGN(PrototypeIterator);
+};
+
+
+}  // namespace internal
+
+}  // namespace v8
+
+#endif  // V8_PROTOTYPE_H_
diff --git a/src/proxy.js b/src/proxy.js
index 99f9dab..0776eea 100644
--- a/src/proxy.js
+++ b/src/proxy.js
@@ -49,8 +49,8 @@
 function SetUpProxy() {
   %CheckIsBootstrapping()
 
-  var global_receiver = %GlobalReceiver(global);
-  global_receiver.Proxy = $Proxy;
+  var global_proxy = %GlobalProxy(global);
+  global_proxy.Proxy = $Proxy;
 
   // Set up non-enumerable properties of the Proxy object.
   InstallFunctions($Proxy, DONT_ENUM, [
diff --git a/src/qnx-math.h b/src/qnx-math.h
deleted file mode 100644
index 8cf65d2..0000000
--- a/src/qnx-math.h
+++ /dev/null
@@ -1,19 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_QNX_MATH_H_
-#define V8_QNX_MATH_H_
-
-#include <cmath>
-
-#undef fpclassify
-#undef isfinite
-#undef isinf
-#undef isnan
-#undef isnormal
-#undef signbit
-
-using std::lrint;
-
-#endif  // V8_QNX_MATH_H_
diff --git a/src/regexp-macro-assembler-irregexp-inl.h b/src/regexp-macro-assembler-irregexp-inl.h
index a2359ba..942cf57 100644
--- a/src/regexp-macro-assembler-irregexp-inl.h
+++ b/src/regexp-macro-assembler-irregexp-inl.h
@@ -6,6 +6,7 @@
 
 
 #include "src/v8.h"
+
 #include "src/ast.h"
 #include "src/bytecodes-irregexp.h"
 
@@ -20,7 +21,7 @@
 void RegExpMacroAssemblerIrregexp::Emit(uint32_t byte,
                                         uint32_t twenty_four_bits) {
   uint32_t word = ((twenty_four_bits << BYTECODE_SHIFT) | byte);
-  ASSERT(pc_ <= buffer_.length());
+  DCHECK(pc_ <= buffer_.length());
   if (pc_  + 3 >= buffer_.length()) {
     Expand();
   }
@@ -30,7 +31,7 @@
 
 
 void RegExpMacroAssemblerIrregexp::Emit16(uint32_t word) {
-  ASSERT(pc_ <= buffer_.length());
+  DCHECK(pc_ <= buffer_.length());
   if (pc_ + 1 >= buffer_.length()) {
     Expand();
   }
@@ -40,7 +41,7 @@
 
 
 void RegExpMacroAssemblerIrregexp::Emit8(uint32_t word) {
-  ASSERT(pc_ <= buffer_.length());
+  DCHECK(pc_ <= buffer_.length());
   if (pc_ == buffer_.length()) {
     Expand();
   }
@@ -50,7 +51,7 @@
 
 
 void RegExpMacroAssemblerIrregexp::Emit32(uint32_t word) {
-  ASSERT(pc_ <= buffer_.length());
+  DCHECK(pc_ <= buffer_.length());
   if (pc_ + 3 >= buffer_.length()) {
     Expand();
   }
diff --git a/src/regexp-macro-assembler-irregexp.cc b/src/regexp-macro-assembler-irregexp.cc
index 92d7dee..469fb8c 100644
--- a/src/regexp-macro-assembler-irregexp.cc
+++ b/src/regexp-macro-assembler-irregexp.cc
@@ -3,6 +3,7 @@
 // found in the LICENSE file.
 
 #include "src/v8.h"
+
 #include "src/ast.h"
 #include "src/bytecodes-irregexp.h"
 #include "src/regexp-macro-assembler.h"
@@ -39,7 +40,7 @@
 
 void RegExpMacroAssemblerIrregexp::Bind(Label* l) {
   advance_current_end_ = kInvalidPC;
-  ASSERT(!l->is_bound());
+  DCHECK(!l->is_bound());
   if (l->is_linked()) {
     int pos = l->pos();
     while (pos != 0) {
@@ -68,8 +69,8 @@
 
 
 void RegExpMacroAssemblerIrregexp::PopRegister(int register_index) {
-  ASSERT(register_index >= 0);
-  ASSERT(register_index <= kMaxRegister);
+  DCHECK(register_index >= 0);
+  DCHECK(register_index <= kMaxRegister);
   Emit(BC_POP_REGISTER, register_index);
 }
 
@@ -77,23 +78,23 @@
 void RegExpMacroAssemblerIrregexp::PushRegister(
     int register_index,
     StackCheckFlag check_stack_limit) {
-  ASSERT(register_index >= 0);
-  ASSERT(register_index <= kMaxRegister);
+  DCHECK(register_index >= 0);
+  DCHECK(register_index <= kMaxRegister);
   Emit(BC_PUSH_REGISTER, register_index);
 }
 
 
 void RegExpMacroAssemblerIrregexp::WriteCurrentPositionToRegister(
     int register_index, int cp_offset) {
-  ASSERT(register_index >= 0);
-  ASSERT(register_index <= kMaxRegister);
+  DCHECK(register_index >= 0);
+  DCHECK(register_index <= kMaxRegister);
   Emit(BC_SET_REGISTER_TO_CP, register_index);
   Emit32(cp_offset);  // Current position offset.
 }
 
 
 void RegExpMacroAssemblerIrregexp::ClearRegisters(int reg_from, int reg_to) {
-  ASSERT(reg_from <= reg_to);
+  DCHECK(reg_from <= reg_to);
   for (int reg = reg_from; reg <= reg_to; reg++) {
     SetRegister(reg, -1);
   }
@@ -102,45 +103,45 @@
 
 void RegExpMacroAssemblerIrregexp::ReadCurrentPositionFromRegister(
     int register_index) {
-  ASSERT(register_index >= 0);
-  ASSERT(register_index <= kMaxRegister);
+  DCHECK(register_index >= 0);
+  DCHECK(register_index <= kMaxRegister);
   Emit(BC_SET_CP_TO_REGISTER, register_index);
 }
 
 
 void RegExpMacroAssemblerIrregexp::WriteStackPointerToRegister(
     int register_index) {
-  ASSERT(register_index >= 0);
-  ASSERT(register_index <= kMaxRegister);
+  DCHECK(register_index >= 0);
+  DCHECK(register_index <= kMaxRegister);
   Emit(BC_SET_REGISTER_TO_SP, register_index);
 }
 
 
 void RegExpMacroAssemblerIrregexp::ReadStackPointerFromRegister(
     int register_index) {
-  ASSERT(register_index >= 0);
-  ASSERT(register_index <= kMaxRegister);
+  DCHECK(register_index >= 0);
+  DCHECK(register_index <= kMaxRegister);
   Emit(BC_SET_SP_TO_REGISTER, register_index);
 }
 
 
 void RegExpMacroAssemblerIrregexp::SetCurrentPositionFromEnd(int by) {
-  ASSERT(is_uint24(by));
+  DCHECK(is_uint24(by));
   Emit(BC_SET_CURRENT_POSITION_FROM_END, by);
 }
 
 
 void RegExpMacroAssemblerIrregexp::SetRegister(int register_index, int to) {
-  ASSERT(register_index >= 0);
-  ASSERT(register_index <= kMaxRegister);
+  DCHECK(register_index >= 0);
+  DCHECK(register_index <= kMaxRegister);
   Emit(BC_SET_REGISTER, register_index);
   Emit32(to);
 }
 
 
 void RegExpMacroAssemblerIrregexp::AdvanceRegister(int register_index, int by) {
-  ASSERT(register_index >= 0);
-  ASSERT(register_index <= kMaxRegister);
+  DCHECK(register_index >= 0);
+  DCHECK(register_index <= kMaxRegister);
   Emit(BC_ADVANCE_REGISTER, register_index);
   Emit32(by);
 }
@@ -194,8 +195,8 @@
 
 
 void RegExpMacroAssemblerIrregexp::AdvanceCurrentPosition(int by) {
-  ASSERT(by >= kMinCPOffset);
-  ASSERT(by <= kMaxCPOffset);
+  DCHECK(by >= kMinCPOffset);
+  DCHECK(by <= kMaxCPOffset);
   advance_current_start_ = pc_;
   advance_current_offset_ = by;
   Emit(BC_ADVANCE_CP, by);
@@ -214,8 +215,8 @@
                                                         Label* on_failure,
                                                         bool check_bounds,
                                                         int characters) {
-  ASSERT(cp_offset >= kMinCPOffset);
-  ASSERT(cp_offset <= kMaxCPOffset);
+  DCHECK(cp_offset >= kMinCPOffset);
+  DCHECK(cp_offset <= kMaxCPOffset);
   int bytecode;
   if (check_bounds) {
     if (characters == 4) {
@@ -223,7 +224,7 @@
     } else if (characters == 2) {
       bytecode = BC_LOAD_2_CURRENT_CHARS;
     } else {
-      ASSERT(characters == 1);
+      DCHECK(characters == 1);
       bytecode = BC_LOAD_CURRENT_CHAR;
     }
   } else {
@@ -232,7 +233,7 @@
     } else if (characters == 2) {
       bytecode = BC_LOAD_2_CURRENT_CHARS_UNCHECKED;
     } else {
-      ASSERT(characters == 1);
+      DCHECK(characters == 1);
       bytecode = BC_LOAD_CURRENT_CHAR_UNCHECKED;
     }
   }
@@ -370,8 +371,8 @@
 
 void RegExpMacroAssemblerIrregexp::CheckNotBackReference(int start_reg,
                                                          Label* on_not_equal) {
-  ASSERT(start_reg >= 0);
-  ASSERT(start_reg <= kMaxRegister);
+  DCHECK(start_reg >= 0);
+  DCHECK(start_reg <= kMaxRegister);
   Emit(BC_CHECK_NOT_BACK_REF, start_reg);
   EmitOrLink(on_not_equal);
 }
@@ -380,8 +381,8 @@
 void RegExpMacroAssemblerIrregexp::CheckNotBackReferenceIgnoreCase(
     int start_reg,
     Label* on_not_equal) {
-  ASSERT(start_reg >= 0);
-  ASSERT(start_reg <= kMaxRegister);
+  DCHECK(start_reg >= 0);
+  DCHECK(start_reg <= kMaxRegister);
   Emit(BC_CHECK_NOT_BACK_REF_NO_CASE, start_reg);
   EmitOrLink(on_not_equal);
 }
@@ -390,8 +391,8 @@
 void RegExpMacroAssemblerIrregexp::IfRegisterLT(int register_index,
                                                 int comparand,
                                                 Label* on_less_than) {
-  ASSERT(register_index >= 0);
-  ASSERT(register_index <= kMaxRegister);
+  DCHECK(register_index >= 0);
+  DCHECK(register_index <= kMaxRegister);
   Emit(BC_CHECK_REGISTER_LT, register_index);
   Emit32(comparand);
   EmitOrLink(on_less_than);
@@ -401,8 +402,8 @@
 void RegExpMacroAssemblerIrregexp::IfRegisterGE(int register_index,
                                                 int comparand,
                                                 Label* on_greater_or_equal) {
-  ASSERT(register_index >= 0);
-  ASSERT(register_index <= kMaxRegister);
+  DCHECK(register_index >= 0);
+  DCHECK(register_index <= kMaxRegister);
   Emit(BC_CHECK_REGISTER_GE, register_index);
   Emit32(comparand);
   EmitOrLink(on_greater_or_equal);
@@ -411,8 +412,8 @@
 
 void RegExpMacroAssemblerIrregexp::IfRegisterEqPos(int register_index,
                                                    Label* on_eq) {
-  ASSERT(register_index >= 0);
-  ASSERT(register_index <= kMaxRegister);
+  DCHECK(register_index >= 0);
+  DCHECK(register_index <= kMaxRegister);
   Emit(BC_CHECK_REGISTER_EQ_POS, register_index);
   EmitOrLink(on_eq);
 }
diff --git a/src/regexp-macro-assembler-irregexp.h b/src/regexp-macro-assembler-irregexp.h
index cdfb46a..b192c22 100644
--- a/src/regexp-macro-assembler-irregexp.h
+++ b/src/regexp-macro-assembler-irregexp.h
@@ -31,6 +31,7 @@
   virtual ~RegExpMacroAssemblerIrregexp();
   // The byte-code interpreter checks on each push anyway.
   virtual int stack_limit_slack() { return 1; }
+  virtual bool CanReadUnaligned() { return false; }
   virtual void Bind(Label* label);
   virtual void AdvanceCurrentPosition(int by);  // Signed cp change.
   virtual void PopCurrentPosition();
diff --git a/src/regexp-macro-assembler-tracer.cc b/src/regexp-macro-assembler-tracer.cc
index 1e745d9..14da2da 100644
--- a/src/regexp-macro-assembler-tracer.cc
+++ b/src/regexp-macro-assembler-tracer.cc
@@ -3,6 +3,7 @@
 // found in the LICENSE file.
 
 #include "src/v8.h"
+
 #include "src/ast.h"
 #include "src/regexp-macro-assembler.h"
 #include "src/regexp-macro-assembler-tracer.h"
@@ -15,7 +16,7 @@
   RegExpMacroAssembler(assembler->zone()),
   assembler_(assembler) {
   unsigned int type = assembler->Implementation();
-  ASSERT(type < 6);
+  DCHECK(type < 6);
   const char* impl_names[] = {"IA32", "ARM", "ARM64",
                               "MIPS", "X64", "X87", "Bytecode"};
   PrintF("RegExpMacroAssembler%s();\n", impl_names[type]);
diff --git a/src/regexp-macro-assembler.cc b/src/regexp-macro-assembler.cc
index 88adf97..52df648 100644
--- a/src/regexp-macro-assembler.cc
+++ b/src/regexp-macro-assembler.cc
@@ -3,10 +3,11 @@
 // found in the LICENSE file.
 
 #include "src/v8.h"
-#include "src/ast.h"
+
 #include "src/assembler.h"
-#include "src/regexp-stack.h"
+#include "src/ast.h"
 #include "src/regexp-macro-assembler.h"
+#include "src/regexp-stack.h"
 #include "src/simulator.h"
 
 namespace v8 {
@@ -23,15 +24,6 @@
 }
 
 
-bool RegExpMacroAssembler::CanReadUnaligned() {
-#ifdef V8_HOST_CAN_READ_UNALIGNED
-  return true;
-#else
-  return false;
-#endif
-}
-
-
 #ifndef V8_INTERPRETED_REGEXP  // Avoid unused code, e.g., on ARM.
 
 NativeRegExpMacroAssembler::NativeRegExpMacroAssembler(Zone* zone)
@@ -51,16 +43,16 @@
     String* subject,
     int start_index) {
   // Not just flat, but ultra flat.
-  ASSERT(subject->IsExternalString() || subject->IsSeqString());
-  ASSERT(start_index >= 0);
-  ASSERT(start_index <= subject->length());
+  DCHECK(subject->IsExternalString() || subject->IsSeqString());
+  DCHECK(start_index >= 0);
+  DCHECK(start_index <= subject->length());
   if (subject->IsOneByteRepresentation()) {
     const byte* address;
     if (StringShape(subject).IsExternal()) {
-      const uint8_t* data = ExternalAsciiString::cast(subject)->GetChars();
+      const uint8_t* data = ExternalOneByteString::cast(subject)->GetChars();
       address = reinterpret_cast<const byte*>(data);
     } else {
-      ASSERT(subject->IsSeqOneByteString());
+      DCHECK(subject->IsSeqOneByteString());
       const uint8_t* data = SeqOneByteString::cast(subject)->GetChars();
       address = reinterpret_cast<const byte*>(data);
     }
@@ -70,7 +62,7 @@
   if (StringShape(subject).IsExternal()) {
     data = ExternalTwoByteString::cast(subject)->GetChars();
   } else {
-    ASSERT(subject->IsSeqTwoByteString());
+    DCHECK(subject->IsSeqTwoByteString());
     data = SeqTwoByteString::cast(subject)->GetChars();
   }
   return reinterpret_cast<const byte*>(data + start_index);
@@ -85,9 +77,9 @@
     int previous_index,
     Isolate* isolate) {
 
-  ASSERT(subject->IsFlat());
-  ASSERT(previous_index >= 0);
-  ASSERT(previous_index <= subject->length());
+  DCHECK(subject->IsFlat());
+  DCHECK(previous_index >= 0);
+  DCHECK(previous_index <= subject->length());
 
   // No allocations before calling the regexp, but we can't use
   // DisallowHeapAllocation, since regexps might be preempted, and another
@@ -102,18 +94,18 @@
   // The string has been flattened, so if it is a cons string it contains the
   // full string in the first part.
   if (StringShape(subject_ptr).IsCons()) {
-    ASSERT_EQ(0, ConsString::cast(subject_ptr)->second()->length());
+    DCHECK_EQ(0, ConsString::cast(subject_ptr)->second()->length());
     subject_ptr = ConsString::cast(subject_ptr)->first();
   } else if (StringShape(subject_ptr).IsSliced()) {
     SlicedString* slice = SlicedString::cast(subject_ptr);
     subject_ptr = slice->parent();
     slice_offset = slice->offset();
   }
-  // Ensure that an underlying string has the same ASCII-ness.
-  bool is_ascii = subject_ptr->IsOneByteRepresentation();
-  ASSERT(subject_ptr->IsExternalString() || subject_ptr->IsSeqString());
+  // Ensure that an underlying string has the same representation.
+  bool is_one_byte = subject_ptr->IsOneByteRepresentation();
+  DCHECK(subject_ptr->IsExternalString() || subject_ptr->IsSeqString());
   // String is now either Sequential or External
-  int char_size_shift = is_ascii ? 0 : 1;
+  int char_size_shift = is_one_byte ? 0 : 1;
 
   const byte* input_start =
       StringCharacterPosition(subject_ptr, start_offset + slice_offset);
@@ -155,7 +147,7 @@
                                           stack_base,
                                           direct_call,
                                           isolate);
-  ASSERT(result >= RETRY);
+  DCHECK(result >= RETRY);
 
   if (result == EXCEPTION && !isolate->has_pending_exception()) {
     // We detected a stack overflow (on the backtrack stack) in RegExp code,
@@ -219,7 +211,7 @@
   // This function is not allowed to cause a garbage collection.
   // A GC might move the calling generated code and invalidate the
   // return address on the stack.
-  ASSERT(byte_length % 2 == 0);
+  DCHECK(byte_length % 2 == 0);
   uc16* substring1 = reinterpret_cast<uc16*>(byte_offset1);
   uc16* substring2 = reinterpret_cast<uc16*>(byte_offset2);
   size_t length = byte_length >> 1;
@@ -249,9 +241,9 @@
   RegExpStack* regexp_stack = isolate->regexp_stack();
   size_t size = regexp_stack->stack_capacity();
   Address old_stack_base = regexp_stack->stack_base();
-  ASSERT(old_stack_base == *stack_base);
-  ASSERT(stack_pointer <= old_stack_base);
-  ASSERT(static_cast<size_t>(old_stack_base - stack_pointer) <= size);
+  DCHECK(old_stack_base == *stack_base);
+  DCHECK(stack_pointer <= old_stack_base);
+  DCHECK(static_cast<size_t>(old_stack_base - stack_pointer) <= size);
   Address new_stack_base = regexp_stack->EnsureCapacity(size * 2);
   if (new_stack_base == NULL) {
     return NULL;
diff --git a/src/regexp-macro-assembler.h b/src/regexp-macro-assembler.h
index f0cfc46..f72cc4d 100644
--- a/src/regexp-macro-assembler.h
+++ b/src/regexp-macro-assembler.h
@@ -48,7 +48,7 @@
   // kCheckStackLimit flag to push operations (instead of kNoStackLimitCheck)
   // at least once for every stack_limit() pushes that are executed.
   virtual int stack_limit_slack() = 0;
-  virtual bool CanReadUnaligned();
+  virtual bool CanReadUnaligned() = 0;
   virtual void AdvanceCurrentPosition(int by) = 0;  // Signed cp change.
   virtual void AdvanceRegister(int reg, int by) = 0;  // r[reg] += by.
   // Continues execution from the position pushed on the top of the backtrack
@@ -171,7 +171,7 @@
 class NativeRegExpMacroAssembler: public RegExpMacroAssembler {
  public:
   // Type of input string to generate code for.
-  enum Mode { ASCII = 1, UC16 = 2 };
+  enum Mode { LATIN1 = 1, UC16 = 2 };
 
   // Result of calling generated native RegExp code.
   // RETRY: Something significant changed during execution, and the matching
diff --git a/src/regexp-stack.cc b/src/regexp-stack.cc
index 97835cc..f114ae4 100644
--- a/src/regexp-stack.cc
+++ b/src/regexp-stack.cc
@@ -3,6 +3,7 @@
 // found in the LICENSE file.
 
 #include "src/v8.h"
+
 #include "src/regexp-stack.h"
 
 namespace v8 {
diff --git a/src/regexp-stack.h b/src/regexp-stack.h
index 745782d..d18ce70 100644
--- a/src/regexp-stack.h
+++ b/src/regexp-stack.h
@@ -41,7 +41,7 @@
 
   // Gives the top of the memory used as stack.
   Address stack_base() {
-    ASSERT(thread_local_.memory_size_ != 0);
+    DCHECK(thread_local_.memory_size_ != 0);
     return thread_local_.memory_ + thread_local_.memory_size_;
   }
 
diff --git a/src/regexp.js b/src/regexp.js
index 8a805b0..0f3dbb6 100644
--- a/src/regexp.js
+++ b/src/regexp.js
@@ -22,6 +22,8 @@
     flags = (pattern.global ? 'g' : '')
         + (pattern.ignoreCase ? 'i' : '')
         + (pattern.multiline ? 'm' : '');
+    if (harmony_regexps)
+        flags += (pattern.sticky ? 'y' : '');
     pattern = pattern.source;
   }
 
@@ -31,6 +33,7 @@
   var global = false;
   var ignoreCase = false;
   var multiline = false;
+  var sticky = false;
   for (var i = 0; i < flags.length; i++) {
     var c = %_CallFunction(flags, i, StringCharAt);
     switch (c) {
@@ -52,12 +55,18 @@
         }
         multiline = true;
         break;
+      case 'y':
+        if (!harmony_regexps || sticky) {
+          throw MakeSyntaxError("invalid_regexp_flags", [flags]);
+        }
+        sticky = true;
+        break;
       default:
         throw MakeSyntaxError("invalid_regexp_flags", [flags]);
     }
   }
 
-  %RegExpInitializeObject(object, pattern, global, ignoreCase, multiline);
+  %RegExpInitializeObject(object, pattern, global, ignoreCase, multiline, sticky);
 
   // Call internal function to compile the pattern.
   %RegExpCompile(object, pattern, flags);
@@ -159,8 +168,8 @@
   // algorithm, step 5) even if the value is discarded for non-global RegExps.
   var i = TO_INTEGER(lastIndex);
 
-  var global = this.global;
-  if (global) {
+  var updateLastIndex = this.global || (harmony_regexps && this.sticky);
+  if (updateLastIndex) {
     if (i < 0 || i > string.length) {
       this.lastIndex = 0;
       return null;
@@ -179,7 +188,7 @@
 
   // Successful match.
   lastMatchInfoOverride = null;
-  if (global) {
+  if (updateLastIndex) {
     this.lastIndex = lastMatchInfo[CAPTURE1];
   }
   RETURN_NEW_RESULT_FROM_MATCH_INFO(matchIndices, string);
@@ -207,7 +216,7 @@
   // algorithm, step 5) even if the value is discarded for non-global RegExps.
   var i = TO_INTEGER(lastIndex);
 
-  if (this.global) {
+  if (this.global || (harmony_regexps && this.sticky)) {
     if (i < 0 || i > string.length) {
       this.lastIndex = 0;
       return false;
@@ -222,12 +231,13 @@
     this.lastIndex = lastMatchInfo[CAPTURE1];
     return true;
   } else {
-    // Non-global regexp.
-    // Remove irrelevant preceeding '.*' in a non-global test regexp.
-    // The expression checks whether this.source starts with '.*' and
-    // that the third char is not a '?'.
+    // Non-global, non-sticky regexp.
+    // Remove irrelevant preceeding '.*' in a test regexp.  The expression
+    // checks whether this.source starts with '.*' and that the third char is
+    // not a '?'.  But see https://code.google.com/p/v8/issues/detail?id=3560
     var regexp = this;
-    if (%_StringCharCodeAt(regexp.source, 0) == 46 &&  // '.'
+    if (regexp.source.length >= 3 &&
+        %_StringCharCodeAt(regexp.source, 0) == 46 &&  // '.'
         %_StringCharCodeAt(regexp.source, 1) == 42 &&  // '*'
         %_StringCharCodeAt(regexp.source, 2) != 63) {  // '?'
       regexp = TrimRegExp(regexp);
@@ -264,6 +274,7 @@
   if (this.global) result += 'g';
   if (this.ignoreCase) result += 'i';
   if (this.multiline) result += 'm';
+  if (harmony_regexps && this.sticky) result += 'y';
   return result;
 }
 
@@ -381,7 +392,7 @@
 function SetUpRegExp() {
   %CheckIsBootstrapping();
   %FunctionSetInstanceClassName($RegExp, 'RegExp');
-  %SetProperty($RegExp.prototype, 'constructor', $RegExp, DONT_ENUM);
+  %AddNamedProperty($RegExp.prototype, 'constructor', $RegExp, DONT_ENUM);
   %SetCode($RegExp, RegExpConstructor);
 
   InstallFunctions($RegExp.prototype, DONT_ENUM, $Array(
@@ -394,7 +405,7 @@
   // The length of compile is 1 in SpiderMonkey.
   %FunctionSetLength($RegExp.prototype.compile, 1);
 
-  // The properties input, $input, and $_ are aliases for each other.  When this
+  // The properties `input` and `$_` are aliases for each other.  When this
   // value is set the value it is set to is coerced to a string.
   // Getter and setter for the input.
   var RegExpGetInput = function() {
@@ -406,12 +417,10 @@
   };
 
   %OptimizeObjectForAddingMultipleProperties($RegExp, 22);
-  %DefineOrRedefineAccessorProperty($RegExp, 'input', RegExpGetInput,
-                                    RegExpSetInput, DONT_DELETE);
-  %DefineOrRedefineAccessorProperty($RegExp, '$_', RegExpGetInput,
-                                    RegExpSetInput, DONT_ENUM | DONT_DELETE);
-  %DefineOrRedefineAccessorProperty($RegExp, '$input', RegExpGetInput,
-                                    RegExpSetInput, DONT_ENUM | DONT_DELETE);
+  %DefineAccessorPropertyUnchecked($RegExp, 'input', RegExpGetInput,
+                                   RegExpSetInput, DONT_DELETE);
+  %DefineAccessorPropertyUnchecked($RegExp, '$_', RegExpGetInput,
+                                   RegExpSetInput, DONT_ENUM | DONT_DELETE);
 
   // The properties multiline and $* are aliases for each other.  When this
   // value is set in SpiderMonkey, the value it is set to is coerced to a
@@ -425,40 +434,40 @@
   var RegExpGetMultiline = function() { return multiline; };
   var RegExpSetMultiline = function(flag) { multiline = flag ? true : false; };
 
-  %DefineOrRedefineAccessorProperty($RegExp, 'multiline', RegExpGetMultiline,
-                                    RegExpSetMultiline, DONT_DELETE);
-  %DefineOrRedefineAccessorProperty($RegExp, '$*', RegExpGetMultiline,
-                                    RegExpSetMultiline,
-                                    DONT_ENUM | DONT_DELETE);
+  %DefineAccessorPropertyUnchecked($RegExp, 'multiline', RegExpGetMultiline,
+                                   RegExpSetMultiline, DONT_DELETE);
+  %DefineAccessorPropertyUnchecked($RegExp, '$*', RegExpGetMultiline,
+                                   RegExpSetMultiline,
+                                   DONT_ENUM | DONT_DELETE);
 
 
   var NoOpSetter = function(ignored) {};
 
 
   // Static properties set by a successful match.
-  %DefineOrRedefineAccessorProperty($RegExp, 'lastMatch', RegExpGetLastMatch,
-                                    NoOpSetter, DONT_DELETE);
-  %DefineOrRedefineAccessorProperty($RegExp, '$&', RegExpGetLastMatch,
-                                    NoOpSetter, DONT_ENUM | DONT_DELETE);
-  %DefineOrRedefineAccessorProperty($RegExp, 'lastParen', RegExpGetLastParen,
-                                    NoOpSetter, DONT_DELETE);
-  %DefineOrRedefineAccessorProperty($RegExp, '$+', RegExpGetLastParen,
-                                    NoOpSetter, DONT_ENUM | DONT_DELETE);
-  %DefineOrRedefineAccessorProperty($RegExp, 'leftContext',
-                                    RegExpGetLeftContext, NoOpSetter,
-                                    DONT_DELETE);
-  %DefineOrRedefineAccessorProperty($RegExp, '$`', RegExpGetLeftContext,
-                                    NoOpSetter, DONT_ENUM | DONT_DELETE);
-  %DefineOrRedefineAccessorProperty($RegExp, 'rightContext',
-                                    RegExpGetRightContext, NoOpSetter,
-                                    DONT_DELETE);
-  %DefineOrRedefineAccessorProperty($RegExp, "$'", RegExpGetRightContext,
-                                    NoOpSetter, DONT_ENUM | DONT_DELETE);
+  %DefineAccessorPropertyUnchecked($RegExp, 'lastMatch', RegExpGetLastMatch,
+                                   NoOpSetter, DONT_DELETE);
+  %DefineAccessorPropertyUnchecked($RegExp, '$&', RegExpGetLastMatch,
+                                   NoOpSetter, DONT_ENUM | DONT_DELETE);
+  %DefineAccessorPropertyUnchecked($RegExp, 'lastParen', RegExpGetLastParen,
+                                   NoOpSetter, DONT_DELETE);
+  %DefineAccessorPropertyUnchecked($RegExp, '$+', RegExpGetLastParen,
+                                   NoOpSetter, DONT_ENUM | DONT_DELETE);
+  %DefineAccessorPropertyUnchecked($RegExp, 'leftContext',
+                                   RegExpGetLeftContext, NoOpSetter,
+                                   DONT_DELETE);
+  %DefineAccessorPropertyUnchecked($RegExp, '$`', RegExpGetLeftContext,
+                                   NoOpSetter, DONT_ENUM | DONT_DELETE);
+  %DefineAccessorPropertyUnchecked($RegExp, 'rightContext',
+                                   RegExpGetRightContext, NoOpSetter,
+                                   DONT_DELETE);
+  %DefineAccessorPropertyUnchecked($RegExp, "$'", RegExpGetRightContext,
+                                   NoOpSetter, DONT_ENUM | DONT_DELETE);
 
   for (var i = 1; i < 10; ++i) {
-    %DefineOrRedefineAccessorProperty($RegExp, '$' + i,
-                                      RegExpMakeCaptureGetter(i), NoOpSetter,
-                                      DONT_DELETE);
+    %DefineAccessorPropertyUnchecked($RegExp, '$' + i,
+                                     RegExpMakeCaptureGetter(i), NoOpSetter,
+                                     DONT_DELETE);
   }
   %ToFastProperties($RegExp);
 }
diff --git a/src/rewriter.cc b/src/rewriter.cc
index c92ccda..867229a 100644
--- a/src/rewriter.cc
+++ b/src/rewriter.cc
@@ -15,12 +15,14 @@
 
 class Processor: public AstVisitor {
  public:
-  Processor(Variable* result, Zone* zone)
+  Processor(Variable* result, Zone* zone, AstNode::IdGen* ast_node_id_gen)
       : result_(result),
         result_assigned_(false),
         is_set_(false),
         in_try_(false),
-        factory_(zone) {
+        // Passing a null AstValueFactory is fine, because Processor doesn't
+        // need to create strings or literals.
+        factory_(zone, NULL, ast_node_id_gen) {
     InitializeAstVisitor(zone);
   }
 
@@ -227,21 +229,23 @@
 // continue to be used in the case of failure.
 bool Rewriter::Rewrite(CompilationInfo* info) {
   FunctionLiteral* function = info->function();
-  ASSERT(function != NULL);
+  DCHECK(function != NULL);
   Scope* scope = function->scope();
-  ASSERT(scope != NULL);
+  DCHECK(scope != NULL);
   if (!scope->is_global_scope() && !scope->is_eval_scope()) return true;
 
   ZoneList<Statement*>* body = function->body();
   if (!body->is_empty()) {
-    Variable* result = scope->NewTemporary(
-        info->isolate()->factory()->dot_result_string());
-    Processor processor(result, info->zone());
+    Variable* result =
+        scope->NewTemporary(info->ast_value_factory()->dot_result_string());
+    // The name string must be internalized at this point.
+    DCHECK(!result->name().is_null());
+    Processor processor(result, info->zone(), info->ast_node_id_gen());
     processor.Process(body);
     if (processor.HasStackOverflow()) return false;
 
     if (processor.result_assigned()) {
-      ASSERT(function->end_position() != RelocInfo::kNoPosition);
+      DCHECK(function->end_position() != RelocInfo::kNoPosition);
       // Set the position of the assignment statement one character past the
       // source code, such that it definitely is not in the source code range
       // of an immediate inner scope. For example in
@@ -250,7 +254,7 @@
       // coincides with the end of the with scope which is the position of '1'.
       int pos = function->end_position();
       VariableProxy* result_proxy = processor.factory()->NewVariableProxy(
-          result->name(), false, result->interface(), pos);
+          result->raw_name(), false, result->interface(), pos);
       result_proxy->BindTo(result);
       Statement* result_statement =
           processor.factory()->NewReturnStatement(result_proxy, pos);
diff --git a/src/runtime-profiler.cc b/src/runtime-profiler.cc
index dddcad0..d6099d4 100644
--- a/src/runtime-profiler.cc
+++ b/src/runtime-profiler.cc
@@ -7,15 +7,15 @@
 #include "src/runtime-profiler.h"
 
 #include "src/assembler.h"
+#include "src/base/platform/platform.h"
 #include "src/bootstrapper.h"
 #include "src/code-stubs.h"
 #include "src/compilation-cache.h"
 #include "src/execution.h"
 #include "src/full-codegen.h"
 #include "src/global-handles.h"
+#include "src/heap/mark-compact.h"
 #include "src/isolate-inl.h"
-#include "src/mark-compact.h"
-#include "src/platform.h"
 #include "src/scopeinfo.h"
 
 namespace v8 {
@@ -57,35 +57,43 @@
 }
 
 
-static void GetICCounts(Code* shared_code,
-                        int* ic_with_type_info_count,
-                        int* ic_total_count,
-                        int* percentage) {
+static void GetICCounts(Code* shared_code, int* ic_with_type_info_count,
+                        int* ic_generic_count, int* ic_total_count,
+                        int* type_info_percentage, int* generic_percentage) {
   *ic_total_count = 0;
+  *ic_generic_count = 0;
   *ic_with_type_info_count = 0;
   Object* raw_info = shared_code->type_feedback_info();
   if (raw_info->IsTypeFeedbackInfo()) {
     TypeFeedbackInfo* info = TypeFeedbackInfo::cast(raw_info);
     *ic_with_type_info_count = info->ic_with_type_info_count();
+    *ic_generic_count = info->ic_generic_count();
     *ic_total_count = info->ic_total_count();
   }
-  *percentage = *ic_total_count > 0
-      ? 100 * *ic_with_type_info_count / *ic_total_count
-      : 100;
+  if (*ic_total_count > 0) {
+    *type_info_percentage = 100 * *ic_with_type_info_count / *ic_total_count;
+    *generic_percentage = 100 * *ic_generic_count / *ic_total_count;
+  } else {
+    *type_info_percentage = 100;  // Compared against lower bound.
+    *generic_percentage = 0;      // Compared against upper bound.
+  }
 }
 
 
 void RuntimeProfiler::Optimize(JSFunction* function, const char* reason) {
-  ASSERT(function->IsOptimizable());
+  DCHECK(function->IsOptimizable());
 
   if (FLAG_trace_opt && function->PassesFilter(FLAG_hydrogen_filter)) {
     PrintF("[marking ");
     function->ShortPrint();
     PrintF(" for recompilation, reason: %s", reason);
     if (FLAG_type_info_threshold > 0) {
-      int typeinfo, total, percentage;
-      GetICCounts(function->shared()->code(), &typeinfo, &total, &percentage);
-      PrintF(", ICs with typeinfo: %d/%d (%d%%)", typeinfo, total, percentage);
+      int typeinfo, generic, total, type_percentage, generic_percentage;
+      GetICCounts(function->shared()->code(), &typeinfo, &generic, &total,
+                  &type_percentage, &generic_percentage);
+      PrintF(", ICs with typeinfo: %d/%d (%d%%)", typeinfo, total,
+             type_percentage);
+      PrintF(", generic ICs: %d/%d (%d%%)", generic, total, generic_percentage);
     }
     PrintF("]\n");
   }
@@ -101,7 +109,7 @@
       // recompilation race.  This goes away as soon as OSR becomes one-shot.
       return;
     }
-    ASSERT(!function->IsInOptimizationQueue());
+    DCHECK(!function->IsInOptimizationQueue());
     function->MarkForConcurrentOptimization();
   } else {
     // The next call to the function will trigger optimization.
@@ -110,7 +118,9 @@
 }
 
 
-void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function) {
+void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function,
+                                                int loop_nesting_levels) {
+  SharedFunctionInfo* shared = function->shared();
   // See AlwaysFullCompiler (in compiler.cc) comment on why we need
   // Debug::has_break_points().
   if (!FLAG_use_osr ||
@@ -119,7 +129,6 @@
     return;
   }
 
-  SharedFunctionInfo* shared = function->shared();
   // If the code is not optimizable, don't try OSR.
   if (!shared->code()->optimizable()) return;
 
@@ -137,7 +146,9 @@
     PrintF("]\n");
   }
 
-  BackEdgeTable::Patch(isolate_, shared->code());
+  for (int i = 0; i < loop_nesting_levels; i++) {
+    BackEdgeTable::Patch(isolate_, shared->code());
+  }
 }
 
 
@@ -175,14 +186,8 @@
     if (shared_code->kind() != Code::FUNCTION) continue;
     if (function->IsInOptimizationQueue()) continue;
 
-    if (FLAG_always_osr &&
-        shared_code->allow_osr_at_loop_nesting_level() == 0) {
-      // Testing mode: always try an OSR compile for every function.
-      for (int i = 0; i < Code::kMaxLoopNestingMarker; i++) {
-        // TODO(titzer): fix AttemptOnStackReplacement to avoid this dumb loop.
-        shared_code->set_allow_osr_at_loop_nesting_level(i);
-        AttemptOnStackReplacement(function);
-      }
+    if (FLAG_always_osr) {
+      AttemptOnStackReplacement(function, Code::kMaxLoopNestingMarker);
       // Fall through and do a normal optimized compile as well.
     } else if (!frame->is_optimized() &&
         (function->IsMarkedForOptimization() ||
@@ -196,12 +201,7 @@
       if (shared_code->CodeSize() > allowance) {
         if (ticks < 255) shared_code->set_profiler_ticks(ticks + 1);
       } else {
-        int nesting = shared_code->allow_osr_at_loop_nesting_level();
-        if (nesting < Code::kMaxLoopNestingMarker) {
-          int new_nesting = nesting + 1;
-          shared_code->set_allow_osr_at_loop_nesting_level(new_nesting);
-          AttemptOnStackReplacement(function);
-        }
+        AttemptOnStackReplacement(function);
       }
       continue;
     }
@@ -235,9 +235,11 @@
     int ticks = shared_code->profiler_ticks();
 
     if (ticks >= kProfilerTicksBeforeOptimization) {
-      int typeinfo, total, percentage;
-      GetICCounts(shared_code, &typeinfo, &total, &percentage);
-      if (percentage >= FLAG_type_info_threshold) {
+      int typeinfo, generic, total, type_percentage, generic_percentage;
+      GetICCounts(shared_code, &typeinfo, &generic, &total, &type_percentage,
+                  &generic_percentage);
+      if (type_percentage >= FLAG_type_info_threshold &&
+          generic_percentage <= FLAG_generic_ic_threshold) {
         // If this particular function hasn't had any ICs patched for enough
         // ticks, optimize it now.
         Optimize(function, "hot and stable");
@@ -248,15 +250,23 @@
         if (FLAG_trace_opt_verbose) {
           PrintF("[not yet optimizing ");
           function->PrintName();
-          PrintF(", not enough type info: %d/%d (%d%%)]\n",
-                 typeinfo, total, percentage);
+          PrintF(", not enough type info: %d/%d (%d%%)]\n", typeinfo, total,
+                 type_percentage);
         }
       }
     } else if (!any_ic_changed_ &&
                shared_code->instruction_size() < kMaxSizeEarlyOpt) {
       // If no IC was patched since the last tick and this function is very
       // small, optimistically optimize it now.
-      Optimize(function, "small function");
+      int typeinfo, generic, total, type_percentage, generic_percentage;
+      GetICCounts(shared_code, &typeinfo, &generic, &total, &type_percentage,
+                  &generic_percentage);
+      if (type_percentage >= FLAG_type_info_threshold &&
+          generic_percentage <= FLAG_generic_ic_threshold) {
+        Optimize(function, "small function");
+      } else {
+        shared_code->set_profiler_ticks(ticks + 1);
+      }
     } else {
       shared_code->set_profiler_ticks(ticks + 1);
     }
diff --git a/src/runtime-profiler.h b/src/runtime-profiler.h
index fa8352d..eff443d 100644
--- a/src/runtime-profiler.h
+++ b/src/runtime-profiler.h
@@ -8,12 +8,16 @@
 #include "src/allocation.h"
 
 namespace v8 {
+
+namespace base {
+class Semaphore;
+}
+
 namespace internal {
 
 class Isolate;
 class JSFunction;
 class Object;
-class Semaphore;
 
 class RuntimeProfiler {
  public:
@@ -23,7 +27,7 @@
 
   void NotifyICChanged() { any_ic_changed_ = true; }
 
-  void AttemptOnStackReplacement(JSFunction* function);
+  void AttemptOnStackReplacement(JSFunction* function, int nesting_levels = 1);
 
  private:
   void Optimize(JSFunction* function, const char* reason);
diff --git a/src/runtime.cc b/src/runtime.cc
index 36b3177..cfef8c2 100644
--- a/src/runtime.cc
+++ b/src/runtime.cc
@@ -11,38 +11,41 @@
 #include "src/allocation-site-scopes.h"
 #include "src/api.h"
 #include "src/arguments.h"
+#include "src/bailout-reason.h"
+#include "src/base/cpu.h"
+#include "src/base/platform/platform.h"
 #include "src/bootstrapper.h"
 #include "src/codegen.h"
 #include "src/compilation-cache.h"
 #include "src/compiler.h"
 #include "src/conversions.h"
-#include "src/cpu.h"
 #include "src/cpu-profiler.h"
+#include "src/date.h"
 #include "src/dateparser-inl.h"
 #include "src/debug.h"
 #include "src/deoptimizer.h"
-#include "src/date.h"
 #include "src/execution.h"
 #include "src/full-codegen.h"
 #include "src/global-handles.h"
 #include "src/isolate-inl.h"
-#include "src/jsregexp.h"
-#include "src/jsregexp-inl.h"
 #include "src/json-parser.h"
 #include "src/json-stringifier.h"
+#include "src/jsregexp-inl.h"
+#include "src/jsregexp.h"
 #include "src/liveedit.h"
 #include "src/misc-intrinsics.h"
 #include "src/parser.h"
-#include "src/platform.h"
-#include "src/runtime-profiler.h"
+#include "src/prototype.h"
 #include "src/runtime.h"
+#include "src/runtime-profiler.h"
 #include "src/scopeinfo.h"
 #include "src/smart-pointers.h"
 #include "src/string-search.h"
-#include "src/stub-cache.h"
 #include "src/uri.h"
+#include "src/utils.h"
 #include "src/v8threads.h"
 #include "src/vm-state-inl.h"
+#include "third_party/fdlibm/fdlibm.h"
 
 #ifdef V8_I18N_SUPPORT
 #include "src/i18n.h"
@@ -149,6 +152,15 @@
   StrictMode name = static_cast<StrictMode>(args.smi_at(index));
 
 
+// Assert that the given argument is a number within the Int32 range
+// and convert it to int32_t.  If the argument is not an Int32 call
+// IllegalOperation and return.
+#define CONVERT_INT32_ARG_CHECKED(name, index)                       \
+  RUNTIME_ASSERT(args[index]->IsNumber());                           \
+  int32_t name = 0;                                                  \
+  RUNTIME_ASSERT(args[index]->ToInt32(&name));
+
+
 static Handle<Map> ComputeObjectLiteralMap(
     Handle<Context> context,
     Handle<FixedArray> constant_properties,
@@ -169,8 +181,8 @@
     } else {
       // Bail out as a non-internalized-string non-index key makes caching
       // impossible.
-      // ASSERT to make sure that the if condition after the loop is false.
-      ASSERT(number_of_string_keys != number_of_properties);
+      // DCHECK to make sure that the if condition after the loop is false.
+      DCHECK(number_of_string_keys != number_of_properties);
       break;
     }
   }
@@ -190,13 +202,13 @@
           keys->set(index++, key);
         }
       }
-      ASSERT(index == number_of_string_keys);
+      DCHECK(index == number_of_string_keys);
     }
     *is_result_from_cache = true;
     return isolate->factory()->ObjectLiteralMapFromCache(context, keys);
   }
   *is_result_from_cache = false;
-  return Map::Create(handle(context->object_function()), number_of_properties);
+  return Map::Create(isolate, number_of_properties);
 }
 
 
@@ -251,9 +263,6 @@
     JSObject::NormalizeProperties(
         boilerplate, KEEP_INOBJECT_PROPERTIES, length / 2);
   }
-  Object::ValueType value_type = should_normalize
-      ? Object::FORCE_TAGGED : Object::OPTIMAL_REPRESENTATION;
-
   // TODO(verwaest): Support tracking representations in the boilerplate.
   for (int index = 0; index < length; index +=2) {
     Handle<Object> key(constant_properties->get(index+0), isolate);
@@ -269,34 +278,33 @@
     }
     MaybeHandle<Object> maybe_result;
     uint32_t element_index = 0;
-    StoreMode mode = value->IsJSObject() ? FORCE_FIELD : ALLOW_AS_CONSTANT;
     if (key->IsInternalizedString()) {
       if (Handle<String>::cast(key)->AsArrayIndex(&element_index)) {
         // Array index as string (uint32).
-        maybe_result = JSObject::SetOwnElement(
-            boilerplate, element_index, value, SLOPPY);
+        if (value->IsUninitialized()) value = handle(Smi::FromInt(0), isolate);
+        maybe_result =
+            JSObject::SetOwnElement(boilerplate, element_index, value, SLOPPY);
       } else {
         Handle<String> name(String::cast(*key));
-        ASSERT(!name->AsArrayIndex(&element_index));
+        DCHECK(!name->AsArrayIndex(&element_index));
         maybe_result = JSObject::SetOwnPropertyIgnoreAttributes(
-            boilerplate, name, value, NONE,
-            value_type, mode);
+            boilerplate, name, value, NONE);
       }
     } else if (key->ToArrayIndex(&element_index)) {
       // Array index (uint32).
-      maybe_result = JSObject::SetOwnElement(
-          boilerplate, element_index, value, SLOPPY);
+      if (value->IsUninitialized()) value = handle(Smi::FromInt(0), isolate);
+      maybe_result =
+          JSObject::SetOwnElement(boilerplate, element_index, value, SLOPPY);
     } else {
       // Non-uint32 number.
-      ASSERT(key->IsNumber());
+      DCHECK(key->IsNumber());
       double num = key->Number();
       char arr[100];
-      Vector<char> buffer(arr, ARRAY_SIZE(arr));
+      Vector<char> buffer(arr, arraysize(arr));
       const char* str = DoubleToCString(num, buffer);
       Handle<String> name = isolate->factory()->NewStringFromAsciiChecked(str);
-      maybe_result = JSObject::SetOwnPropertyIgnoreAttributes(
-          boilerplate, name, value, NONE,
-          value_type, mode);
+      maybe_result = JSObject::SetOwnPropertyIgnoreAttributes(boilerplate, name,
+                                                              value, NONE);
     }
     // If setting the property on the boilerplate throws an
     // exception, the exception is converted to an empty handle in
@@ -310,7 +318,7 @@
   // computed properties have been assigned so that we can generate
   // constant function properties.
   if (should_transform && !has_function_literal) {
-    JSObject::TransformToFastProperties(
+    JSObject::MigrateSlowToFast(
         boilerplate, boilerplate->map()->unused_property_fields());
   }
 
@@ -338,9 +346,6 @@
 }
 
 
-static const int kSmiLiteralMinimumLength = 1024;
-
-
 MaybeHandle<Object> Runtime::CreateArrayLiteralBoilerplate(
     Isolate* isolate,
     Handle<FixedArray> literals,
@@ -361,21 +366,20 @@
       FixedArrayBase::cast(elements->get(1)));
 
   { DisallowHeapAllocation no_gc;
-    ASSERT(IsFastElementsKind(constant_elements_kind));
+    DCHECK(IsFastElementsKind(constant_elements_kind));
     Context* native_context = isolate->context()->native_context();
     Object* maps_array = native_context->js_array_maps();
-    ASSERT(!maps_array->IsUndefined());
+    DCHECK(!maps_array->IsUndefined());
     Object* map = FixedArray::cast(maps_array)->get(constant_elements_kind);
     object->set_map(Map::cast(map));
   }
 
   Handle<FixedArrayBase> copied_elements_values;
   if (IsFastDoubleElementsKind(constant_elements_kind)) {
-    ASSERT(FLAG_smi_only_arrays);
     copied_elements_values = isolate->factory()->CopyFixedDoubleArray(
         Handle<FixedDoubleArray>::cast(constant_elements_values));
   } else {
-    ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind));
+    DCHECK(IsFastSmiOrObjectElementsKind(constant_elements_kind));
     const bool is_cow =
         (constant_elements_values->map() ==
          isolate->heap()->fixed_cow_array_map());
@@ -385,7 +389,7 @@
       Handle<FixedArray> fixed_array_values =
           Handle<FixedArray>::cast(copied_elements_values);
       for (int i = 0; i < fixed_array_values->length(); i++) {
-        ASSERT(!fixed_array_values->get(i)->IsFixedArray());
+        DCHECK(!fixed_array_values->get(i)->IsFixedArray());
       }
 #endif
     } else {
@@ -412,20 +416,6 @@
   object->set_elements(*copied_elements_values);
   object->set_length(Smi::FromInt(copied_elements_values->length()));
 
-  //  Ensure that the boilerplate object has FAST_*_ELEMENTS, unless the flag is
-  //  on or the object is larger than the threshold.
-  if (!FLAG_smi_only_arrays &&
-      constant_elements_values->length() < kSmiLiteralMinimumLength) {
-    ElementsKind elements_kind = object->GetElementsKind();
-    if (!IsFastObjectElementsKind(elements_kind)) {
-      if (IsFastHoleyElementsKind(elements_kind)) {
-        TransitionElements(object, FAST_HOLEY_ELEMENTS, isolate).Check();
-      } else {
-        TransitionElements(object, FAST_ELEMENTS, isolate).Check();
-      }
-    }
-  }
-
   JSObject::ValidateElements(object);
   return object;
 }
@@ -460,9 +450,9 @@
 }
 
 
-RUNTIME_FUNCTION(RuntimeHidden_CreateObjectLiteral) {
+RUNTIME_FUNCTION(Runtime_CreateObjectLiteral) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 4);
+  DCHECK(args.length() == 4);
   CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 0);
   CONVERT_SMI_ARG_CHECKED(literals_index, 1);
   CONVERT_ARG_HANDLE_CHECKED(FixedArray, constant_properties, 2);
@@ -523,7 +513,7 @@
   Handle<Object> literal_site(literals->get(literals_index), isolate);
   Handle<AllocationSite> site;
   if (*literal_site == isolate->heap()->undefined_value()) {
-    ASSERT(*elements != isolate->heap()->empty_fixed_array());
+    DCHECK(*elements != isolate->heap()->empty_fixed_array());
     Handle<Object> boilerplate;
     ASSIGN_RETURN_ON_EXCEPTION(
         isolate, boilerplate,
@@ -565,8 +555,8 @@
   AllocationSiteUsageContext usage_context(isolate, site, enable_mementos);
   usage_context.EnterNewScope();
   JSObject::DeepCopyHints hints = (flags & ArrayLiteral::kShallowElements) == 0
-      ? JSObject::kNoHints
-      : JSObject::kObjectIsShallowArray;
+                                      ? JSObject::kNoHints
+                                      : JSObject::kObjectIsShallow;
   MaybeHandle<JSObject> copy = JSObject::DeepCopy(boilerplate, &usage_context,
                                                   hints);
   usage_context.ExitScope(site, boilerplate);
@@ -574,9 +564,9 @@
 }
 
 
-RUNTIME_FUNCTION(RuntimeHidden_CreateArrayLiteral) {
+RUNTIME_FUNCTION(Runtime_CreateArrayLiteral) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 4);
+  DCHECK(args.length() == 4);
   CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 0);
   CONVERT_SMI_ARG_CHECKED(literals_index, 1);
   CONVERT_ARG_HANDLE_CHECKED(FixedArray, elements, 2);
@@ -590,9 +580,9 @@
 }
 
 
-RUNTIME_FUNCTION(RuntimeHidden_CreateArrayLiteralStubBailout) {
+RUNTIME_FUNCTION(Runtime_CreateArrayLiteralStubBailout) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 3);
+  DCHECK(args.length() == 3);
   CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 0);
   CONVERT_SMI_ARG_CHECKED(literals_index, 1);
   CONVERT_ARG_HANDLE_CHECKED(FixedArray, elements, 2);
@@ -607,7 +597,7 @@
 
 RUNTIME_FUNCTION(Runtime_CreateSymbol) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(Object, name, 0);
   RUNTIME_ASSERT(name->IsString() || name->IsUndefined());
   Handle<Symbol> symbol = isolate->factory()->NewSymbol();
@@ -618,7 +608,7 @@
 
 RUNTIME_FUNCTION(Runtime_CreatePrivateSymbol) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(Object, name, 0);
   RUNTIME_ASSERT(name->IsString() || name->IsUndefined());
   Handle<Symbol> symbol = isolate->factory()->NewPrivateSymbol();
@@ -627,9 +617,20 @@
 }
 
 
-RUNTIME_FUNCTION(Runtime_CreateGlobalPrivateSymbol) {
+RUNTIME_FUNCTION(Runtime_CreatePrivateOwnSymbol) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
+  CONVERT_ARG_HANDLE_CHECKED(Object, name, 0);
+  RUNTIME_ASSERT(name->IsString() || name->IsUndefined());
+  Handle<Symbol> symbol = isolate->factory()->NewPrivateOwnSymbol();
+  if (name->IsString()) symbol->set_name(*name);
+  return *symbol;
+}
+
+
+RUNTIME_FUNCTION(Runtime_CreateGlobalPrivateOwnSymbol) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
   Handle<JSObject> registry = isolate->GetSymbolRegistry();
   Handle<String> part = isolate->factory()->private_intern_string();
@@ -640,11 +641,12 @@
   ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
       isolate, symbol, Object::GetPropertyOrElement(privates, name));
   if (!symbol->IsSymbol()) {
-    ASSERT(symbol->IsUndefined());
+    DCHECK(symbol->IsUndefined());
     symbol = isolate->factory()->NewPrivateSymbol();
     Handle<Symbol>::cast(symbol)->set_name(*name);
-    JSObject::SetProperty(Handle<JSObject>::cast(privates),
-                          name, symbol, NONE, STRICT).Assert();
+    Handle<Symbol>::cast(symbol)->set_is_own(true);
+    JSObject::SetProperty(Handle<JSObject>::cast(privates), name, symbol,
+                          STRICT).Assert();
   }
   return *symbol;
 }
@@ -652,7 +654,7 @@
 
 RUNTIME_FUNCTION(Runtime_NewSymbolWrapper) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(Symbol, symbol, 0);
   return *Object::ToObject(isolate, symbol).ToHandleChecked();
 }
@@ -660,7 +662,7 @@
 
 RUNTIME_FUNCTION(Runtime_SymbolDescription) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_CHECKED(Symbol, symbol, 0);
   return symbol->name();
 }
@@ -668,14 +670,14 @@
 
 RUNTIME_FUNCTION(Runtime_SymbolRegistry) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 0);
+  DCHECK(args.length() == 0);
   return *isolate->GetSymbolRegistry();
 }
 
 
 RUNTIME_FUNCTION(Runtime_SymbolIsPrivate) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_CHECKED(Symbol, symbol, 0);
   return isolate->heap()->ToBoolean(symbol->is_private());
 }
@@ -683,7 +685,7 @@
 
 RUNTIME_FUNCTION(Runtime_CreateJSProxy) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
   CONVERT_ARG_HANDLE_CHECKED(JSReceiver, handler, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, prototype, 1);
   if (!prototype->IsJSReceiver()) prototype = isolate->factory()->null_value();
@@ -693,7 +695,7 @@
 
 RUNTIME_FUNCTION(Runtime_CreateJSFunctionProxy) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 4);
+  DCHECK(args.length() == 4);
   CONVERT_ARG_HANDLE_CHECKED(JSReceiver, handler, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, call_trap, 1);
   RUNTIME_ASSERT(call_trap->IsJSFunction() || call_trap->IsJSFunctionProxy());
@@ -707,7 +709,7 @@
 
 RUNTIME_FUNCTION(Runtime_IsJSProxy) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(Object, obj, 0);
   return isolate->heap()->ToBoolean(obj->IsJSProxy());
 }
@@ -715,7 +717,7 @@
 
 RUNTIME_FUNCTION(Runtime_IsJSFunctionProxy) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(Object, obj, 0);
   return isolate->heap()->ToBoolean(obj->IsJSFunctionProxy());
 }
@@ -723,7 +725,7 @@
 
 RUNTIME_FUNCTION(Runtime_GetHandler) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_CHECKED(JSProxy, proxy, 0);
   return proxy->handler();
 }
@@ -731,7 +733,7 @@
 
 RUNTIME_FUNCTION(Runtime_GetCallTrap) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_CHECKED(JSFunctionProxy, proxy, 0);
   return proxy->call_trap();
 }
@@ -739,7 +741,7 @@
 
 RUNTIME_FUNCTION(Runtime_GetConstructTrap) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_CHECKED(JSFunctionProxy, proxy, 0);
   return proxy->construct_trap();
 }
@@ -747,7 +749,7 @@
 
 RUNTIME_FUNCTION(Runtime_Fix) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(JSProxy, proxy, 0);
   JSProxy::Fix(proxy);
   return isolate->heap()->undefined_value();
@@ -757,7 +759,7 @@
 void Runtime::FreeArrayBuffer(Isolate* isolate,
                               JSArrayBuffer* phantom_array_buffer) {
   if (phantom_array_buffer->should_be_freed()) {
-    ASSERT(phantom_array_buffer->is_external());
+    DCHECK(phantom_array_buffer->is_external());
     free(phantom_array_buffer->backing_store());
   }
   if (phantom_array_buffer->is_external()) return;
@@ -780,7 +782,7 @@
                                bool is_external,
                                void* data,
                                size_t allocated_length) {
-  ASSERT(array_buffer->GetInternalFieldCount() ==
+  DCHECK(array_buffer->GetInternalFieldCount() ==
       v8::ArrayBuffer::kInternalFieldCount);
   for (int i = 0; i < v8::ArrayBuffer::kInternalFieldCount; i++) {
     array_buffer->SetInternalField(i, Smi::FromInt(0));
@@ -848,7 +850,7 @@
 
 RUNTIME_FUNCTION(Runtime_ArrayBufferInitialize) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
   CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, holder, 0);
   CONVERT_NUMBER_ARG_HANDLE_CHECKED(byteLength, 1);
   if (!holder->byte_length()->IsUndefined()) {
@@ -857,15 +859,15 @@
   }
   size_t allocated_length = 0;
   if (!TryNumberToSize(isolate, *byteLength, &allocated_length)) {
-    return isolate->Throw(
-        *isolate->factory()->NewRangeError("invalid_array_buffer_length",
-                                           HandleVector<Object>(NULL, 0)));
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewRangeError("invalid_array_buffer_length",
+                               HandleVector<Object>(NULL, 0)));
   }
   if (!Runtime::SetupArrayBufferAllocatingData(isolate,
                                                holder, allocated_length)) {
-    return isolate->Throw(
-        *isolate->factory()->NewRangeError("invalid_array_buffer_length",
-                                           HandleVector<Object>(NULL, 0)));
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewRangeError("invalid_array_buffer_length",
+                               HandleVector<Object>(NULL, 0)));
   }
   return *holder;
 }
@@ -873,7 +875,7 @@
 
 RUNTIME_FUNCTION(Runtime_ArrayBufferGetByteLength) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_CHECKED(JSArrayBuffer, holder, 0);
   return holder->byte_length();
 }
@@ -881,7 +883,7 @@
 
 RUNTIME_FUNCTION(Runtime_ArrayBufferSliceImpl) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 3);
+  DCHECK(args.length() == 3);
   CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, source, 0);
   CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, target, 1);
   CONVERT_NUMBER_ARG_HANDLE_CHECKED(first, 2);
@@ -904,7 +906,7 @@
 
 RUNTIME_FUNCTION(Runtime_ArrayBufferIsView) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_CHECKED(Object, object, 0);
   return isolate->heap()->ToBoolean(object->IsJSArrayBufferView());
 }
@@ -912,13 +914,13 @@
 
 RUNTIME_FUNCTION(Runtime_ArrayBufferNeuter) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, array_buffer, 0);
   if (array_buffer->backing_store() == NULL) {
     CHECK(Smi::FromInt(0) == array_buffer->byte_length());
     return isolate->heap()->undefined_value();
   }
-  ASSERT(!array_buffer->is_external());
+  DCHECK(!array_buffer->is_external());
   void* backing_store = array_buffer->backing_store();
   size_t byte_length = NumberToSize(isolate, array_buffer->byte_length());
   array_buffer->set_is_external(true);
@@ -954,7 +956,7 @@
 
 RUNTIME_FUNCTION(Runtime_TypedArrayInitialize) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 5);
+  DCHECK(args.length() == 5);
   CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, holder, 0);
   CONVERT_SMI_ARG_CHECKED(arrayId, 1);
   CONVERT_ARG_HANDLE_CHECKED(Object, maybe_buffer, 2);
@@ -995,14 +997,14 @@
   size_t length = byte_length / element_size;
 
   if (length > static_cast<unsigned>(Smi::kMaxValue)) {
-    return isolate->Throw(
-        *isolate->factory()->NewRangeError("invalid_typed_array_length",
-                                           HandleVector<Object>(NULL, 0)));
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewRangeError("invalid_typed_array_length",
+                               HandleVector<Object>(NULL, 0)));
   }
 
   // All checks are done, now we can modify objects.
 
-  ASSERT(holder->GetInternalFieldCount() ==
+  DCHECK(holder->GetInternalFieldCount() ==
       v8::ArrayBufferView::kInternalFieldCount);
   for (int i = 0; i < v8::ArrayBufferView::kInternalFieldCount; i++) {
     holder->SetInternalField(i, Smi::FromInt(0));
@@ -1025,7 +1027,7 @@
     Handle<Map> map =
         JSObject::GetElementsTransitionMap(holder, external_elements_kind);
     JSObject::SetMapAndElements(holder, map, elements);
-    ASSERT(IsExternalArrayElementsKind(holder->map()->elements_kind()));
+    DCHECK(IsExternalArrayElementsKind(holder->map()->elements_kind()));
   } else {
     holder->set_buffer(Smi::FromInt(0));
     holder->set_weak_next(isolate->heap()->undefined_value());
@@ -1045,7 +1047,7 @@
 // Returns true if backing store was initialized or false otherwise.
 RUNTIME_FUNCTION(Runtime_TypedArrayInitializeFromArrayLike) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 4);
+  DCHECK(args.length() == 4);
   CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, holder, 0);
   CONVERT_SMI_ARG_CHECKED(arrayId, 1);
   CONVERT_ARG_HANDLE_CHECKED(Object, source, 2);
@@ -1077,13 +1079,13 @@
 
   if ((length > static_cast<unsigned>(Smi::kMaxValue)) ||
       (length > (kMaxInt / element_size))) {
-    return isolate->Throw(*isolate->factory()->
-          NewRangeError("invalid_typed_array_length",
-            HandleVector<Object>(NULL, 0)));
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewRangeError("invalid_typed_array_length",
+                               HandleVector<Object>(NULL, 0)));
   }
   size_t byte_length = length * element_size;
 
-  ASSERT(holder->GetInternalFieldCount() ==
+  DCHECK(holder->GetInternalFieldCount() ==
       v8::ArrayBufferView::kInternalFieldCount);
   for (int i = 0; i < v8::ArrayBufferView::kInternalFieldCount; i++) {
     holder->SetInternalField(i, Smi::FromInt(0));
@@ -1107,9 +1109,9 @@
 
   if (!Runtime::SetupArrayBufferAllocatingData(
         isolate, buffer, byte_length, false)) {
-    return isolate->Throw(*isolate->factory()->
-          NewRangeError("invalid_array_buffer_length",
-            HandleVector<Object>(NULL, 0)));
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewRangeError("invalid_array_buffer_length",
+                               HandleVector<Object>(NULL, 0)));
   }
 
   holder->set_buffer(*buffer);
@@ -1153,7 +1155,7 @@
 #define BUFFER_VIEW_GETTER(Type, getter, accessor) \
   RUNTIME_FUNCTION(Runtime_##Type##Get##getter) {                    \
     HandleScope scope(isolate);                                               \
-    ASSERT(args.length() == 1);                                               \
+    DCHECK(args.length() == 1);                                               \
     CONVERT_ARG_HANDLE_CHECKED(JS##Type, holder, 0);                          \
     return holder->accessor();                                                \
   }
@@ -1167,7 +1169,7 @@
 
 RUNTIME_FUNCTION(Runtime_TypedArrayGetBuffer) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(JSTypedArray, holder, 0);
   return *holder->GetBuffer();
 }
@@ -1190,10 +1192,12 @@
 
 RUNTIME_FUNCTION(Runtime_TypedArraySetFastCases) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 3);
-  if (!args[0]->IsJSTypedArray())
-    return isolate->Throw(*isolate->factory()->NewTypeError(
-        "not_typed_array", HandleVector<Object>(NULL, 0)));
+  DCHECK(args.length() == 3);
+  if (!args[0]->IsJSTypedArray()) {
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate,
+        NewTypeError("not_typed_array", HandleVector<Object>(NULL, 0)));
+  }
 
   if (!args[1]->IsJSTypedArray())
     return Smi::FromInt(TYPED_ARRAY_SET_NON_TYPED_ARRAY);
@@ -1210,11 +1214,12 @@
   size_t source_length = NumberToSize(isolate, source->length());
   size_t target_byte_length = NumberToSize(isolate, target->byte_length());
   size_t source_byte_length = NumberToSize(isolate, source->byte_length());
-  if (offset > target_length ||
-      offset + source_length > target_length ||
-      offset + source_length < offset)  // overflow
-    return isolate->Throw(*isolate->factory()->NewRangeError(
-          "typed_array_set_source_too_large", HandleVector<Object>(NULL, 0)));
+  if (offset > target_length || offset + source_length > target_length ||
+      offset + source_length < offset) {  // overflow
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewRangeError("typed_array_set_source_too_large",
+                               HandleVector<Object>(NULL, 0)));
+  }
 
   size_t target_offset = NumberToSize(isolate, target->byte_offset());
   size_t source_offset = NumberToSize(isolate, source->byte_offset());
@@ -1238,7 +1243,7 @@
       (target_base <= source_base &&
         target_base + target_byte_length > source_base)) {
     // We do not support overlapping ArrayBuffers
-    ASSERT(
+    DCHECK(
       target->GetBuffer()->backing_store() ==
       source->GetBuffer()->backing_store());
     return Smi::FromInt(TYPED_ARRAY_SET_TYPED_ARRAY_OVERLAPPING);
@@ -1249,8 +1254,8 @@
 
 
 RUNTIME_FUNCTION(Runtime_TypedArrayMaxSizeInHeap) {
-  ASSERT(args.length() == 0);
-  ASSERT_OBJECT_SIZE(
+  DCHECK(args.length() == 0);
+  DCHECK_OBJECT_SIZE(
       FLAG_typed_array_max_size_in_heap + FixedTypedArrayBase::kDataOffset);
   return Smi::FromInt(FLAG_typed_array_max_size_in_heap);
 }
@@ -1258,13 +1263,13 @@
 
 RUNTIME_FUNCTION(Runtime_DataViewInitialize) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 4);
+  DCHECK(args.length() == 4);
   CONVERT_ARG_HANDLE_CHECKED(JSDataView, holder, 0);
   CONVERT_ARG_HANDLE_CHECKED(JSArrayBuffer, buffer, 1);
   CONVERT_NUMBER_ARG_HANDLE_CHECKED(byte_offset, 2);
   CONVERT_NUMBER_ARG_HANDLE_CHECKED(byte_length, 3);
 
-  ASSERT(holder->GetInternalFieldCount() ==
+  DCHECK(holder->GetInternalFieldCount() ==
       v8::ArrayBufferView::kInternalFieldCount);
   for (int i = 0; i < v8::ArrayBufferView::kInternalFieldCount; i++) {
     holder->SetInternalField(i, Smi::FromInt(0));
@@ -1350,7 +1355,7 @@
 
   Value value;
   size_t buffer_offset = data_view_byte_offset + byte_offset;
-  ASSERT(
+  DCHECK(
       NumberToSize(isolate, buffer->byte_length())
       >= buffer_offset + sizeof(T));
   uint8_t* source =
@@ -1395,7 +1400,7 @@
   Value value;
   value.data = data;
   size_t buffer_offset = data_view_byte_offset + byte_offset;
-  ASSERT(
+  DCHECK(
       NumberToSize(isolate, buffer->byte_length())
       >= buffer_offset + sizeof(T));
   uint8_t* target =
@@ -1409,22 +1414,22 @@
 }
 
 
-#define DATA_VIEW_GETTER(TypeName, Type, Converter)                           \
-  RUNTIME_FUNCTION(Runtime_DataViewGet##TypeName) {                           \
-    HandleScope scope(isolate);                                               \
-    ASSERT(args.length() == 3);                                               \
-    CONVERT_ARG_HANDLE_CHECKED(JSDataView, holder, 0);                        \
-    CONVERT_NUMBER_ARG_HANDLE_CHECKED(offset, 1);                             \
-    CONVERT_BOOLEAN_ARG_CHECKED(is_little_endian, 2);                         \
-    Type result;                                                              \
-    if (DataViewGetValue(                                                     \
-          isolate, holder, offset, is_little_endian, &result)) {              \
-      return *isolate->factory()->Converter(result);                          \
-    } else {                                                                  \
-      return isolate->Throw(*isolate->factory()->NewRangeError(               \
-          "invalid_data_view_accessor_offset",                                \
-          HandleVector<Object>(NULL, 0)));                                    \
-    }                                                                         \
+#define DATA_VIEW_GETTER(TypeName, Type, Converter)                   \
+  RUNTIME_FUNCTION(Runtime_DataViewGet##TypeName) {                   \
+    HandleScope scope(isolate);                                       \
+    DCHECK(args.length() == 3);                                       \
+    CONVERT_ARG_HANDLE_CHECKED(JSDataView, holder, 0);                \
+    CONVERT_NUMBER_ARG_HANDLE_CHECKED(offset, 1);                     \
+    CONVERT_BOOLEAN_ARG_CHECKED(is_little_endian, 2);                 \
+    Type result;                                                      \
+    if (DataViewGetValue(isolate, holder, offset, is_little_endian,   \
+                         &result)) {                                  \
+      return *isolate->factory()->Converter(result);                  \
+    } else {                                                          \
+      THROW_NEW_ERROR_RETURN_FAILURE(                                 \
+          isolate, NewRangeError("invalid_data_view_accessor_offset", \
+                                 HandleVector<Object>(NULL, 0)));     \
+    }                                                                 \
   }
 
 DATA_VIEW_GETTER(Uint8, uint8_t, NewNumberFromUint)
@@ -1491,23 +1496,22 @@
 }
 
 
-#define DATA_VIEW_SETTER(TypeName, Type)                                      \
-  RUNTIME_FUNCTION(Runtime_DataViewSet##TypeName) {                           \
-    HandleScope scope(isolate);                                               \
-    ASSERT(args.length() == 4);                                               \
-    CONVERT_ARG_HANDLE_CHECKED(JSDataView, holder, 0);                        \
-    CONVERT_NUMBER_ARG_HANDLE_CHECKED(offset, 1);                             \
-    CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);                              \
-    CONVERT_BOOLEAN_ARG_CHECKED(is_little_endian, 3);                         \
-    Type v = DataViewConvertValue<Type>(value->Number());                     \
-    if (DataViewSetValue(                                                     \
-          isolate, holder, offset, is_little_endian, v)) {                    \
-      return isolate->heap()->undefined_value();                              \
-    } else {                                                                  \
-      return isolate->Throw(*isolate->factory()->NewRangeError(               \
-          "invalid_data_view_accessor_offset",                                \
-          HandleVector<Object>(NULL, 0)));                                    \
-    }                                                                         \
+#define DATA_VIEW_SETTER(TypeName, Type)                                  \
+  RUNTIME_FUNCTION(Runtime_DataViewSet##TypeName) {                       \
+    HandleScope scope(isolate);                                           \
+    DCHECK(args.length() == 4);                                           \
+    CONVERT_ARG_HANDLE_CHECKED(JSDataView, holder, 0);                    \
+    CONVERT_NUMBER_ARG_HANDLE_CHECKED(offset, 1);                         \
+    CONVERT_NUMBER_ARG_HANDLE_CHECKED(value, 2);                          \
+    CONVERT_BOOLEAN_ARG_CHECKED(is_little_endian, 3);                     \
+    Type v = DataViewConvertValue<Type>(value->Number());                 \
+    if (DataViewSetValue(isolate, holder, offset, is_little_endian, v)) { \
+      return isolate->heap()->undefined_value();                          \
+    } else {                                                              \
+      THROW_NEW_ERROR_RETURN_FAILURE(                                     \
+          isolate, NewRangeError("invalid_data_view_accessor_offset",     \
+                                 HandleVector<Object>(NULL, 0)));         \
+    }                                                                     \
   }
 
 DATA_VIEW_SETTER(Uint8, uint8_t)
@@ -1524,7 +1528,7 @@
 
 RUNTIME_FUNCTION(Runtime_SetInitialize) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
   Handle<OrderedHashSet> table = isolate->factory()->NewOrderedHashSet();
   holder->set_table(*table);
@@ -1534,19 +1538,19 @@
 
 RUNTIME_FUNCTION(Runtime_SetAdd) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
   CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
   Handle<OrderedHashSet> table(OrderedHashSet::cast(holder->table()));
   table = OrderedHashSet::Add(table, key);
   holder->set_table(*table);
-  return isolate->heap()->undefined_value();
+  return *holder;
 }
 
 
 RUNTIME_FUNCTION(Runtime_SetHas) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
   CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
   Handle<OrderedHashSet> table(OrderedHashSet::cast(holder->table()));
@@ -1556,7 +1560,7 @@
 
 RUNTIME_FUNCTION(Runtime_SetDelete) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
   CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
   Handle<OrderedHashSet> table(OrderedHashSet::cast(holder->table()));
@@ -1569,7 +1573,7 @@
 
 RUNTIME_FUNCTION(Runtime_SetClear) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
   Handle<OrderedHashSet> table(OrderedHashSet::cast(holder->table()));
   table = OrderedHashSet::Clear(table);
@@ -1580,7 +1584,7 @@
 
 RUNTIME_FUNCTION(Runtime_SetGetSize) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(JSSet, holder, 0);
   Handle<OrderedHashSet> table(OrderedHashSet::cast(holder->table()));
   return Smi::FromInt(table->NumberOfElements());
@@ -1589,7 +1593,7 @@
 
 RUNTIME_FUNCTION(Runtime_SetIteratorInitialize) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 3);
+  DCHECK(args.length() == 3);
   CONVERT_ARG_HANDLE_CHECKED(JSSetIterator, holder, 0);
   CONVERT_ARG_HANDLE_CHECKED(JSSet, set, 1);
   CONVERT_SMI_ARG_CHECKED(kind, 2)
@@ -1604,16 +1608,17 @@
 
 
 RUNTIME_FUNCTION(Runtime_SetIteratorNext) {
-  HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
-  CONVERT_ARG_HANDLE_CHECKED(JSSetIterator, holder, 0);
-  return *JSSetIterator::Next(holder);
+  SealHandleScope shs(isolate);
+  DCHECK(args.length() == 2);
+  CONVERT_ARG_CHECKED(JSSetIterator, holder, 0);
+  CONVERT_ARG_CHECKED(JSArray, value_array, 1);
+  return holder->Next(value_array);
 }
 
 
 RUNTIME_FUNCTION(Runtime_MapInitialize) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
   Handle<OrderedHashMap> table = isolate->factory()->NewOrderedHashMap();
   holder->set_table(*table);
@@ -1623,7 +1628,7 @@
 
 RUNTIME_FUNCTION(Runtime_MapGet) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
   CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
   Handle<OrderedHashMap> table(OrderedHashMap::cast(holder->table()));
@@ -1634,7 +1639,7 @@
 
 RUNTIME_FUNCTION(Runtime_MapHas) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
   CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
   Handle<OrderedHashMap> table(OrderedHashMap::cast(holder->table()));
@@ -1645,7 +1650,7 @@
 
 RUNTIME_FUNCTION(Runtime_MapDelete) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
   CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
   Handle<OrderedHashMap> table(OrderedHashMap::cast(holder->table()));
@@ -1659,7 +1664,7 @@
 
 RUNTIME_FUNCTION(Runtime_MapClear) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
   Handle<OrderedHashMap> table(OrderedHashMap::cast(holder->table()));
   table = OrderedHashMap::Clear(table);
@@ -1670,20 +1675,20 @@
 
 RUNTIME_FUNCTION(Runtime_MapSet) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 3);
+  DCHECK(args.length() == 3);
   CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
   CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
   Handle<OrderedHashMap> table(OrderedHashMap::cast(holder->table()));
   Handle<OrderedHashMap> new_table = OrderedHashMap::Put(table, key, value);
   holder->set_table(*new_table);
-  return isolate->heap()->undefined_value();
+  return *holder;
 }
 
 
 RUNTIME_FUNCTION(Runtime_MapGetSize) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(JSMap, holder, 0);
   Handle<OrderedHashMap> table(OrderedHashMap::cast(holder->table()));
   return Smi::FromInt(table->NumberOfElements());
@@ -1692,7 +1697,7 @@
 
 RUNTIME_FUNCTION(Runtime_MapIteratorInitialize) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 3);
+  DCHECK(args.length() == 3);
   CONVERT_ARG_HANDLE_CHECKED(JSMapIterator, holder, 0);
   CONVERT_ARG_HANDLE_CHECKED(JSMap, map, 1);
   CONVERT_SMI_ARG_CHECKED(kind, 2)
@@ -1707,18 +1712,43 @@
 }
 
 
-RUNTIME_FUNCTION(Runtime_MapIteratorNext) {
+RUNTIME_FUNCTION(Runtime_GetWeakMapEntries) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
-  CONVERT_ARG_HANDLE_CHECKED(JSMapIterator, holder, 0);
-  return *JSMapIterator::Next(holder);
+  DCHECK(args.length() == 1);
+  CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, holder, 0);
+  Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table()));
+  Handle<FixedArray> entries =
+      isolate->factory()->NewFixedArray(table->NumberOfElements() * 2);
+  {
+    DisallowHeapAllocation no_gc;
+    int number_of_non_hole_elements = 0;
+    for (int i = 0; i < table->Capacity(); i++) {
+      Handle<Object> key(table->KeyAt(i), isolate);
+      if (table->IsKey(*key)) {
+        entries->set(number_of_non_hole_elements++, *key);
+        Object* value = table->Lookup(key);
+        entries->set(number_of_non_hole_elements++, value);
+      }
+    }
+    DCHECK_EQ(table->NumberOfElements() * 2, number_of_non_hole_elements);
+  }
+  return *isolate->factory()->NewJSArrayWithElements(entries);
+}
+
+
+RUNTIME_FUNCTION(Runtime_MapIteratorNext) {
+  SealHandleScope shs(isolate);
+  DCHECK(args.length() == 2);
+  CONVERT_ARG_CHECKED(JSMapIterator, holder, 0);
+  CONVERT_ARG_CHECKED(JSArray, value_array, 1);
+  return holder->Next(value_array);
 }
 
 
 static Handle<JSWeakCollection> WeakCollectionInitialize(
     Isolate* isolate,
     Handle<JSWeakCollection> weak_collection) {
-  ASSERT(weak_collection->map()->inobject_properties() == 0);
+  DCHECK(weak_collection->map()->inobject_properties() == 0);
   Handle<ObjectHashTable> table = ObjectHashTable::New(isolate, 0);
   weak_collection->set_table(*table);
   return weak_collection;
@@ -1727,7 +1757,7 @@
 
 RUNTIME_FUNCTION(Runtime_WeakCollectionInitialize) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0);
   return *WeakCollectionInitialize(isolate, weak_collection);
 }
@@ -1735,7 +1765,7 @@
 
 RUNTIME_FUNCTION(Runtime_WeakCollectionGet) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
   CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
   RUNTIME_ASSERT(key->IsJSReceiver() || key->IsSymbol());
@@ -1749,7 +1779,7 @@
 
 RUNTIME_FUNCTION(Runtime_WeakCollectionHas) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
   CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
   RUNTIME_ASSERT(key->IsJSReceiver() || key->IsSymbol());
@@ -1763,7 +1793,7 @@
 
 RUNTIME_FUNCTION(Runtime_WeakCollectionDelete) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
   CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
   RUNTIME_ASSERT(key->IsJSReceiver() || key->IsSymbol());
@@ -1780,7 +1810,7 @@
 
 RUNTIME_FUNCTION(Runtime_WeakCollectionSet) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 3);
+  DCHECK(args.length() == 3);
   CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, weak_collection, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
   RUNTIME_ASSERT(key->IsJSReceiver() || key->IsSymbol());
@@ -1790,56 +1820,89 @@
   RUNTIME_ASSERT(table->IsKey(*key));
   Handle<ObjectHashTable> new_table = ObjectHashTable::Put(table, key, value);
   weak_collection->set_table(*new_table);
-  return isolate->heap()->undefined_value();
+  return *weak_collection;
 }
 
 
-RUNTIME_FUNCTION(Runtime_ClassOf) {
-  SealHandleScope shs(isolate);
-  ASSERT(args.length() == 1);
-  CONVERT_ARG_CHECKED(Object, obj, 0);
-  if (!obj->IsJSObject()) return isolate->heap()->null_value();
-  return JSObject::cast(obj)->class_name();
+RUNTIME_FUNCTION(Runtime_GetWeakSetValues) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 1);
+  CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, holder, 0);
+  Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table()));
+  Handle<FixedArray> values =
+      isolate->factory()->NewFixedArray(table->NumberOfElements());
+  {
+    DisallowHeapAllocation no_gc;
+    int number_of_non_hole_elements = 0;
+    for (int i = 0; i < table->Capacity(); i++) {
+      Handle<Object> key(table->KeyAt(i), isolate);
+      if (table->IsKey(*key)) {
+        values->set(number_of_non_hole_elements++, *key);
+      }
+    }
+    DCHECK_EQ(table->NumberOfElements(), number_of_non_hole_elements);
+  }
+  return *isolate->factory()->NewJSArrayWithElements(values);
 }
 
 
 RUNTIME_FUNCTION(Runtime_GetPrototype) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(Object, obj, 0);
   // We don't expect access checks to be needed on JSProxy objects.
-  ASSERT(!obj->IsAccessCheckNeeded() || obj->IsJSObject());
+  DCHECK(!obj->IsAccessCheckNeeded() || obj->IsJSObject());
+  PrototypeIterator iter(isolate, obj, PrototypeIterator::START_AT_RECEIVER);
   do {
-    if (obj->IsAccessCheckNeeded() &&
-        !isolate->MayNamedAccess(Handle<JSObject>::cast(obj),
-                                 isolate->factory()->proto_string(),
-                                 v8::ACCESS_GET)) {
-      isolate->ReportFailedAccessCheck(Handle<JSObject>::cast(obj),
-                                       v8::ACCESS_GET);
+    if (PrototypeIterator::GetCurrent(iter)->IsAccessCheckNeeded() &&
+        !isolate->MayNamedAccess(
+            Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)),
+            isolate->factory()->proto_string(), v8::ACCESS_GET)) {
+      isolate->ReportFailedAccessCheck(
+          Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)),
+          v8::ACCESS_GET);
       RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
       return isolate->heap()->undefined_value();
     }
-    obj = Object::GetPrototype(isolate, obj);
-  } while (obj->IsJSObject() &&
-           JSObject::cast(*obj)->map()->is_hidden_prototype());
-  return *obj;
+    iter.AdvanceIgnoringProxies();
+    if (PrototypeIterator::GetCurrent(iter)->IsJSProxy()) {
+      return *PrototypeIterator::GetCurrent(iter);
+    }
+  } while (!iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN));
+  return *PrototypeIterator::GetCurrent(iter);
 }
 
 
 static inline Handle<Object> GetPrototypeSkipHiddenPrototypes(
     Isolate* isolate, Handle<Object> receiver) {
-  Handle<Object> current = Object::GetPrototype(isolate, receiver);
-  while (current->IsJSObject() &&
-         JSObject::cast(*current)->map()->is_hidden_prototype()) {
-    current = Object::GetPrototype(isolate, current);
+  PrototypeIterator iter(isolate, receiver);
+  while (!iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN)) {
+    if (PrototypeIterator::GetCurrent(iter)->IsJSProxy()) {
+      return PrototypeIterator::GetCurrent(iter);
+    }
+    iter.Advance();
   }
-  return current;
+  return PrototypeIterator::GetCurrent(iter);
+}
+
+
+RUNTIME_FUNCTION(Runtime_InternalSetPrototype) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 2);
+  CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
+  CONVERT_ARG_HANDLE_CHECKED(Object, prototype, 1);
+  DCHECK(!obj->IsAccessCheckNeeded());
+  DCHECK(!obj->map()->is_observed());
+  Handle<Object> result;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, result, JSObject::SetPrototype(obj, prototype, false));
+  return *result;
 }
 
 
 RUNTIME_FUNCTION(Runtime_SetPrototype) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
   CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, prototype, 1);
   if (obj->IsAccessCheckNeeded() &&
@@ -1874,126 +1937,19 @@
 
 RUNTIME_FUNCTION(Runtime_IsInPrototypeChain) {
   HandleScope shs(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
   // See ECMA-262, section 15.3.5.3, page 88 (steps 5 - 8).
   CONVERT_ARG_HANDLE_CHECKED(Object, O, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, V, 1);
+  PrototypeIterator iter(isolate, V, PrototypeIterator::START_AT_RECEIVER);
   while (true) {
-    Handle<Object> prototype = Object::GetPrototype(isolate, V);
-    if (prototype->IsNull()) return isolate->heap()->false_value();
-    if (*O == *prototype) return isolate->heap()->true_value();
-    V = prototype;
+    iter.AdvanceIgnoringProxies();
+    if (iter.IsAtEnd()) return isolate->heap()->false_value();
+    if (iter.IsAtEnd(O)) return isolate->heap()->true_value();
   }
 }
 
 
-static bool CheckAccessException(Object* callback,
-                                 v8::AccessType access_type) {
-  DisallowHeapAllocation no_gc;
-  ASSERT(!callback->IsForeign());
-  if (callback->IsAccessorInfo()) {
-    AccessorInfo* info = AccessorInfo::cast(callback);
-    return
-        (access_type == v8::ACCESS_HAS &&
-           (info->all_can_read() || info->all_can_write())) ||
-        (access_type == v8::ACCESS_GET && info->all_can_read()) ||
-        (access_type == v8::ACCESS_SET && info->all_can_write());
-  }
-  if (callback->IsAccessorPair()) {
-    AccessorPair* info = AccessorPair::cast(callback);
-    return
-        (access_type == v8::ACCESS_HAS &&
-           (info->all_can_read() || info->all_can_write())) ||
-        (access_type == v8::ACCESS_GET && info->all_can_read()) ||
-        (access_type == v8::ACCESS_SET && info->all_can_write());
-  }
-  return false;
-}
-
-
-template<class Key>
-static bool CheckGenericAccess(
-    Handle<JSObject> receiver,
-    Handle<JSObject> holder,
-    Key key,
-    v8::AccessType access_type,
-    bool (Isolate::*mayAccess)(Handle<JSObject>, Key, v8::AccessType)) {
-  Isolate* isolate = receiver->GetIsolate();
-  for (Handle<JSObject> current = receiver;
-       true;
-       current = handle(JSObject::cast(current->GetPrototype()), isolate)) {
-    if (current->IsAccessCheckNeeded() &&
-        !(isolate->*mayAccess)(current, key, access_type)) {
-      return false;
-    }
-    if (current.is_identical_to(holder)) break;
-  }
-  return true;
-}
-
-
-enum AccessCheckResult {
-  ACCESS_FORBIDDEN,
-  ACCESS_ALLOWED,
-  ACCESS_ABSENT
-};
-
-
-static AccessCheckResult CheckPropertyAccess(Handle<JSObject> obj,
-                                             Handle<Name> name,
-                                             v8::AccessType access_type) {
-  uint32_t index;
-  if (name->AsArrayIndex(&index)) {
-    // TODO(1095): we should traverse hidden prototype hierachy as well.
-    if (CheckGenericAccess(
-            obj, obj, index, access_type, &Isolate::MayIndexedAccess)) {
-      return ACCESS_ALLOWED;
-    }
-
-    obj->GetIsolate()->ReportFailedAccessCheck(obj, access_type);
-    return ACCESS_FORBIDDEN;
-  }
-
-  Isolate* isolate = obj->GetIsolate();
-  LookupResult lookup(isolate);
-  obj->LookupOwn(name, &lookup, true);
-
-  if (!lookup.IsProperty()) return ACCESS_ABSENT;
-  Handle<JSObject> holder(lookup.holder(), isolate);
-  if (CheckGenericAccess<Handle<Object> >(
-          obj, holder, name, access_type, &Isolate::MayNamedAccess)) {
-    return ACCESS_ALLOWED;
-  }
-
-  // Access check callback denied the access, but some properties
-  // can have a special permissions which override callbacks descision
-  // (currently see v8::AccessControl).
-  // API callbacks can have per callback access exceptions.
-  switch (lookup.type()) {
-    case CALLBACKS:
-      if (CheckAccessException(lookup.GetCallbackObject(), access_type)) {
-        return ACCESS_ALLOWED;
-      }
-      break;
-    case INTERCEPTOR:
-      // If the object has an interceptor, try real named properties.
-      // Overwrite the result to fetch the correct property later.
-      holder->LookupRealNamedProperty(name, &lookup);
-      if (lookup.IsProperty() && lookup.IsPropertyCallbacks()) {
-        if (CheckAccessException(lookup.GetCallbackObject(), access_type)) {
-          return ACCESS_ALLOWED;
-        }
-      }
-      break;
-    default:
-      break;
-  }
-
-  isolate->ReportFailedAccessCheck(obj, access_type);
-  return ACCESS_FORBIDDEN;
-}
-
-
 // Enumerator used as indices into the array returned from GetOwnProperty
 enum PropertyDescriptorIndices {
   IS_ACCESSOR_INDEX,
@@ -2012,61 +1968,67 @@
                                                           Handle<Name> name) {
   Heap* heap = isolate->heap();
   Factory* factory = isolate->factory();
-  // Due to some WebKit tests, we want to make sure that we do not log
-  // more than one access failure here.
-  AccessCheckResult access_check_result =
-      CheckPropertyAccess(obj, name, v8::ACCESS_HAS);
-  RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
-  switch (access_check_result) {
-    case ACCESS_FORBIDDEN: return factory->false_value();
-    case ACCESS_ALLOWED: break;
-    case ACCESS_ABSENT: return factory->undefined_value();
-  }
 
-  PropertyAttributes attrs = JSReceiver::GetOwnPropertyAttributes(obj, name);
-  if (attrs == ABSENT) {
-    RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
-    return factory->undefined_value();
+  PropertyAttributes attrs;
+  uint32_t index = 0;
+  Handle<Object> value;
+  MaybeHandle<AccessorPair> maybe_accessors;
+  // TODO(verwaest): Unify once indexed properties can be handled by the
+  // LookupIterator.
+  if (name->AsArrayIndex(&index)) {
+    // Get attributes.
+    Maybe<PropertyAttributes> maybe =
+        JSReceiver::GetOwnElementAttribute(obj, index);
+    if (!maybe.has_value) return MaybeHandle<Object>();
+    attrs = maybe.value;
+    if (attrs == ABSENT) return factory->undefined_value();
+
+    // Get AccessorPair if present.
+    maybe_accessors = JSObject::GetOwnElementAccessorPair(obj, index);
+
+    // Get value if not an AccessorPair.
+    if (maybe_accessors.is_null()) {
+      ASSIGN_RETURN_ON_EXCEPTION(isolate, value,
+          Runtime::GetElementOrCharAt(isolate, obj, index), Object);
+    }
+  } else {
+    // Get attributes.
+    LookupIterator it(obj, name, LookupIterator::HIDDEN);
+    Maybe<PropertyAttributes> maybe = JSObject::GetPropertyAttributes(&it);
+    if (!maybe.has_value) return MaybeHandle<Object>();
+    attrs = maybe.value;
+    if (attrs == ABSENT) return factory->undefined_value();
+
+    // Get AccessorPair if present.
+    if (it.state() == LookupIterator::ACCESSOR &&
+        it.GetAccessors()->IsAccessorPair()) {
+      maybe_accessors = Handle<AccessorPair>::cast(it.GetAccessors());
+    }
+
+    // Get value if not an AccessorPair.
+    if (maybe_accessors.is_null()) {
+      ASSIGN_RETURN_ON_EXCEPTION(
+          isolate, value, Object::GetProperty(&it), Object);
+    }
   }
-  ASSERT(!isolate->has_scheduled_exception());
-  Handle<AccessorPair> accessors;
-  bool has_accessors =
-      JSObject::GetOwnPropertyAccessorPair(obj, name).ToHandle(&accessors);
-  Handle<FixedArray> elms = isolate->factory()->NewFixedArray(DESCRIPTOR_SIZE);
+  DCHECK(!isolate->has_pending_exception());
+  Handle<FixedArray> elms = factory->NewFixedArray(DESCRIPTOR_SIZE);
   elms->set(ENUMERABLE_INDEX, heap->ToBoolean((attrs & DONT_ENUM) == 0));
   elms->set(CONFIGURABLE_INDEX, heap->ToBoolean((attrs & DONT_DELETE) == 0));
-  elms->set(IS_ACCESSOR_INDEX, heap->ToBoolean(has_accessors));
+  elms->set(IS_ACCESSOR_INDEX, heap->ToBoolean(!maybe_accessors.is_null()));
 
-  if (!has_accessors) {
-    elms->set(WRITABLE_INDEX, heap->ToBoolean((attrs & READ_ONLY) == 0));
-    // Runtime::GetObjectProperty does access check.
-    Handle<Object> value;
-    ASSIGN_RETURN_ON_EXCEPTION(
-        isolate, value, Runtime::GetObjectProperty(isolate, obj, name),
-        Object);
-    elms->set(VALUE_INDEX, *value);
-  } else {
-    // Access checks are performed for both accessors separately.
-    // When they fail, the respective field is not set in the descriptor.
+  Handle<AccessorPair> accessors;
+  if (maybe_accessors.ToHandle(&accessors)) {
     Handle<Object> getter(accessors->GetComponent(ACCESSOR_GETTER), isolate);
     Handle<Object> setter(accessors->GetComponent(ACCESSOR_SETTER), isolate);
-
-    if (!getter->IsMap() && CheckPropertyAccess(obj, name, v8::ACCESS_GET)) {
-      ASSERT(!isolate->has_scheduled_exception());
-      elms->set(GETTER_INDEX, *getter);
-    } else {
-      RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
-    }
-
-    if (!setter->IsMap() && CheckPropertyAccess(obj, name, v8::ACCESS_SET)) {
-      ASSERT(!isolate->has_scheduled_exception());
-      elms->set(SETTER_INDEX, *setter);
-    } else {
-      RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
-    }
+    elms->set(GETTER_INDEX, *getter);
+    elms->set(SETTER_INDEX, *setter);
+  } else {
+    elms->set(WRITABLE_INDEX, heap->ToBoolean((attrs & READ_ONLY) == 0));
+    elms->set(VALUE_INDEX, *value);
   }
 
-  return isolate->factory()->NewJSArrayWithElements(elms);
+  return factory->NewJSArrayWithElements(elms);
 }
 
 
@@ -2079,7 +2041,7 @@
 //         [true, GetFunction, SetFunction, Enumerable, Configurable]
 RUNTIME_FUNCTION(Runtime_GetOwnProperty) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
   CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
   CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
   Handle<Object> result;
@@ -2091,7 +2053,7 @@
 
 RUNTIME_FUNCTION(Runtime_PreventExtensions) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
   Handle<Object> result;
   ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
@@ -2100,15 +2062,58 @@
 }
 
 
+RUNTIME_FUNCTION(Runtime_ToMethod) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 2);
+  CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
+  CONVERT_ARG_HANDLE_CHECKED(JSObject, home_object, 1);
+  Handle<JSFunction> clone = JSFunction::CloneClosure(fun);
+  Handle<Symbol> home_object_symbol(isolate->heap()->home_object_symbol());
+  JSObject::SetOwnPropertyIgnoreAttributes(clone, home_object_symbol,
+                                           home_object, DONT_ENUM).Assert();
+  return *clone;
+}
+
+
+RUNTIME_FUNCTION(Runtime_HomeObjectSymbol) {
+  DCHECK(args.length() == 0);
+  return isolate->heap()->home_object_symbol();
+}
+
+
+RUNTIME_FUNCTION(Runtime_LoadFromSuper) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 3);
+  CONVERT_ARG_HANDLE_CHECKED(JSObject, home_object, 0);
+  CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 1);
+  CONVERT_ARG_HANDLE_CHECKED(Name, name, 2);
+
+  if (home_object->IsAccessCheckNeeded() &&
+      !isolate->MayNamedAccess(home_object, name, v8::ACCESS_GET)) {
+    isolate->ReportFailedAccessCheck(home_object, v8::ACCESS_GET);
+    RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
+  }
+
+  PrototypeIterator iter(isolate, home_object);
+  Handle<Object> proto = PrototypeIterator::GetCurrent(iter);
+  if (!proto->IsJSReceiver()) return isolate->heap()->undefined_value();
+
+  LookupIterator it(receiver, name, Handle<JSReceiver>::cast(proto));
+  Handle<Object> result;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, Object::GetProperty(&it));
+  return *result;
+}
+
+
 RUNTIME_FUNCTION(Runtime_IsExtensible) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_CHECKED(JSObject, obj, 0);
   if (obj->IsJSGlobalProxy()) {
-    Object* proto = obj->GetPrototype();
-    if (proto->IsNull()) return isolate->heap()->false_value();
-    ASSERT(proto->IsJSGlobalObject());
-    obj = JSObject::cast(proto);
+    PrototypeIterator iter(isolate, obj);
+    if (iter.IsAtEnd()) return isolate->heap()->false_value();
+    DCHECK(iter.GetCurrent()->IsJSGlobalObject());
+    obj = JSObject::cast(iter.GetCurrent());
   }
   return isolate->heap()->ToBoolean(obj->map()->is_extensible());
 }
@@ -2116,7 +2121,7 @@
 
 RUNTIME_FUNCTION(Runtime_RegExpCompile) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 3);
+  DCHECK(args.length() == 3);
   CONVERT_ARG_HANDLE_CHECKED(JSRegExp, re, 0);
   CONVERT_ARG_HANDLE_CHECKED(String, pattern, 1);
   CONVERT_ARG_HANDLE_CHECKED(String, flags, 2);
@@ -2129,7 +2134,7 @@
 
 RUNTIME_FUNCTION(Runtime_CreateApiFunction) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
   CONVERT_ARG_HANDLE_CHECKED(FunctionTemplateInfo, data, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, prototype, 1);
   return *isolate->factory()->CreateApiFunction(data, prototype);
@@ -2138,7 +2143,7 @@
 
 RUNTIME_FUNCTION(Runtime_IsTemplate) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(Object, arg, 0);
   bool result = arg->IsObjectTemplateInfo() || arg->IsFunctionTemplateInfo();
   return isolate->heap()->ToBoolean(result);
@@ -2147,7 +2152,7 @@
 
 RUNTIME_FUNCTION(Runtime_GetTemplateField) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
   CONVERT_ARG_CHECKED(HeapObject, templ, 0);
   CONVERT_SMI_ARG_CHECKED(index, 1);
   int offset = index * kPointerSize + HeapObject::kHeaderSize;
@@ -2166,7 +2171,7 @@
 
 RUNTIME_FUNCTION(Runtime_DisableAccessChecks) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(HeapObject, object, 0);
   Handle<Map> old_map(object->map());
   bool needs_access_checks = old_map->is_access_check_needed();
@@ -2174,11 +2179,7 @@
     // Copy map so it won't interfere constructor's initial map.
     Handle<Map> new_map = Map::Copy(old_map);
     new_map->set_is_access_check_needed(false);
-    if (object->IsJSObject()) {
-      JSObject::MigrateToMap(Handle<JSObject>::cast(object), new_map);
-    } else {
-      object->set_map(*new_map);
-    }
+    JSObject::MigrateToMap(Handle<JSObject>::cast(object), new_map);
   }
   return isolate->heap()->ToBoolean(needs_access_checks);
 }
@@ -2186,52 +2187,14 @@
 
 RUNTIME_FUNCTION(Runtime_EnableAccessChecks) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
-  CONVERT_ARG_HANDLE_CHECKED(HeapObject, object, 0);
-  Handle<Map> old_map(object->map());
-  if (!old_map->is_access_check_needed()) {
-    // Copy map so it won't interfere constructor's initial map.
-    Handle<Map> new_map = Map::Copy(old_map);
-    new_map->set_is_access_check_needed(true);
-    if (object->IsJSObject()) {
-      JSObject::MigrateToMap(Handle<JSObject>::cast(object), new_map);
-    } else {
-      object->set_map(*new_map);
-    }
-  }
-  return isolate->heap()->undefined_value();
-}
-
-
-// Transform getter or setter into something DefineAccessor can handle.
-static Handle<Object> InstantiateAccessorComponent(Isolate* isolate,
-                                                   Handle<Object> component) {
-  if (component->IsUndefined()) return isolate->factory()->null_value();
-  Handle<FunctionTemplateInfo> info =
-      Handle<FunctionTemplateInfo>::cast(component);
-  return Utils::OpenHandle(*Utils::ToLocal(info)->GetFunction());
-}
-
-
-RUNTIME_FUNCTION(Runtime_SetAccessorProperty) {
-  HandleScope scope(isolate);
-  ASSERT(args.length() == 6);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
-  CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
-  CONVERT_ARG_HANDLE_CHECKED(Object, getter, 2);
-  CONVERT_ARG_HANDLE_CHECKED(Object, setter, 3);
-  CONVERT_SMI_ARG_CHECKED(attribute, 4);
-  CONVERT_SMI_ARG_CHECKED(access_control, 5);
-  RUNTIME_ASSERT(getter->IsUndefined() || getter->IsFunctionTemplateInfo());
-  RUNTIME_ASSERT(setter->IsUndefined() || setter->IsFunctionTemplateInfo());
-  RUNTIME_ASSERT(PropertyDetails::AttributesField::is_valid(
-      static_cast<PropertyAttributes>(attribute)));
-  JSObject::DefineAccessor(object,
-                           name,
-                           InstantiateAccessorComponent(isolate, getter),
-                           InstantiateAccessorComponent(isolate, setter),
-                           static_cast<PropertyAttributes>(attribute),
-                           static_cast<v8::AccessControl>(access_control));
+  Handle<Map> old_map(object->map());
+  RUNTIME_ASSERT(!old_map->is_access_check_needed());
+  // Copy map so it won't interfere constructor's initial map.
+  Handle<Map> new_map = Map::Copy(old_map);
+  new_map->set_is_access_check_needed(true);
+  JSObject::MigrateToMap(object, new_map);
   return isolate->heap()->undefined_value();
 }
 
@@ -2239,17 +2202,61 @@
 static Object* ThrowRedeclarationError(Isolate* isolate, Handle<String> name) {
   HandleScope scope(isolate);
   Handle<Object> args[1] = { name };
-  Handle<Object> error = isolate->factory()->NewTypeError(
-      "var_redeclaration", HandleVector(args, 1));
-  return isolate->Throw(*error);
+  THROW_NEW_ERROR_RETURN_FAILURE(
+      isolate, NewTypeError("var_redeclaration", HandleVector(args, 1)));
 }
 
 
-RUNTIME_FUNCTION(RuntimeHidden_DeclareGlobals) {
+// May throw a RedeclarationError.
+static Object* DeclareGlobals(Isolate* isolate, Handle<GlobalObject> global,
+                              Handle<String> name, Handle<Object> value,
+                              PropertyAttributes attr, bool is_var,
+                              bool is_const, bool is_function) {
+  // Do the lookup own properties only, see ES5 erratum.
+  LookupIterator it(global, name, LookupIterator::HIDDEN_SKIP_INTERCEPTOR);
+  Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
+  if (!maybe.has_value) return isolate->heap()->exception();
+
+  if (it.IsFound()) {
+    PropertyAttributes old_attributes = maybe.value;
+    // The name was declared before; check for conflicting re-declarations.
+    if (is_const) return ThrowRedeclarationError(isolate, name);
+
+    // Skip var re-declarations.
+    if (is_var) return isolate->heap()->undefined_value();
+
+    DCHECK(is_function);
+    if ((old_attributes & DONT_DELETE) != 0) {
+      // Only allow reconfiguring globals to functions in user code (no
+      // natives, which are marked as read-only).
+      DCHECK((attr & READ_ONLY) == 0);
+
+      // Check whether we can reconfigure the existing property into a
+      // function.
+      PropertyDetails old_details = it.property_details();
+      // TODO(verwaest): CALLBACKS invalidly includes ExecutableAccessInfo,
+      // which are actually data properties, not accessor properties.
+      if (old_details.IsReadOnly() || old_details.IsDontEnum() ||
+          old_details.type() == CALLBACKS) {
+        return ThrowRedeclarationError(isolate, name);
+      }
+      // If the existing property is not configurable, keep its attributes. Do
+      attr = old_attributes;
+    }
+  }
+
+  // Define or redefine own property.
+  RETURN_FAILURE_ON_EXCEPTION(isolate, JSObject::SetOwnPropertyIgnoreAttributes(
+                                           global, name, value, attr));
+
+  return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(Runtime_DeclareGlobals) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 3);
-  Handle<GlobalObject> global = Handle<GlobalObject>(
-      isolate->context()->global_object());
+  DCHECK(args.length() == 3);
+  Handle<GlobalObject> global(isolate->global_object());
 
   CONVERT_ARG_HANDLE_CHECKED(Context, context, 0);
   CONVERT_ARG_HANDLE_CHECKED(FixedArray, pairs, 1);
@@ -2260,181 +2267,42 @@
   for (int i = 0; i < length; i += 2) {
     HandleScope scope(isolate);
     Handle<String> name(String::cast(pairs->get(i)));
-    Handle<Object> value(pairs->get(i + 1), isolate);
+    Handle<Object> initial_value(pairs->get(i + 1), isolate);
 
     // We have to declare a global const property. To capture we only
     // assign to it when evaluating the assignment for "const x =
     // <expr>" the initial value is the hole.
-    bool is_var = value->IsUndefined();
-    bool is_const = value->IsTheHole();
-    bool is_function = value->IsSharedFunctionInfo();
-    ASSERT(is_var + is_const + is_function == 1);
+    bool is_var = initial_value->IsUndefined();
+    bool is_const = initial_value->IsTheHole();
+    bool is_function = initial_value->IsSharedFunctionInfo();
+    DCHECK(is_var + is_const + is_function == 1);
 
-    if (is_var || is_const) {
-      // Lookup the property in the global object, and don't set the
-      // value of the variable if the property is already there.
-      // Do the lookup own properties only, see ES5 erratum.
-      LookupResult lookup(isolate);
-      global->LookupOwn(name, &lookup, true);
-      if (lookup.IsFound()) {
-        // We found an existing property. Unless it was an interceptor
-        // that claims the property is absent, skip this declaration.
-        if (!lookup.IsInterceptor()) continue;
-        if (JSReceiver::GetPropertyAttributes(global, name) != ABSENT) continue;
-        // Fall-through and introduce the absent property by using
-        // SetProperty.
-      }
-    } else if (is_function) {
+    Handle<Object> value;
+    if (is_function) {
       // Copy the function and update its context. Use it as value.
       Handle<SharedFunctionInfo> shared =
-          Handle<SharedFunctionInfo>::cast(value);
+          Handle<SharedFunctionInfo>::cast(initial_value);
       Handle<JSFunction> function =
-          isolate->factory()->NewFunctionFromSharedFunctionInfo(
-              shared, context, TENURED);
+          isolate->factory()->NewFunctionFromSharedFunctionInfo(shared, context,
+                                                                TENURED);
       value = function;
+    } else {
+      value = isolate->factory()->undefined_value();
     }
 
-    LookupResult lookup(isolate);
-    global->LookupOwn(name, &lookup, true);
-
     // Compute the property attributes. According to ECMA-262,
     // the property must be non-configurable except in eval.
-    int attr = NONE;
-    bool is_eval = DeclareGlobalsEvalFlag::decode(flags);
-    if (!is_eval) {
-      attr |= DONT_DELETE;
-    }
     bool is_native = DeclareGlobalsNativeFlag::decode(flags);
-    if (is_const || (is_native && is_function)) {
-      attr |= READ_ONLY;
-    }
+    bool is_eval = DeclareGlobalsEvalFlag::decode(flags);
+    int attr = NONE;
+    if (is_const) attr |= READ_ONLY;
+    if (is_function && is_native) attr |= READ_ONLY;
+    if (!is_const && !is_eval) attr |= DONT_DELETE;
 
-    StrictMode strict_mode = DeclareGlobalsStrictMode::decode(flags);
-
-    if (!lookup.IsFound() || is_function) {
-      // If the own property exists, check that we can reconfigure it
-      // as required for function declarations.
-      if (lookup.IsFound() && lookup.IsDontDelete()) {
-        if (lookup.IsReadOnly() || lookup.IsDontEnum() ||
-            lookup.IsPropertyCallbacks()) {
-          return ThrowRedeclarationError(isolate, name);
-        }
-        // If the existing property is not configurable, keep its attributes.
-        attr = lookup.GetAttributes();
-      }
-      // Define or redefine own property.
-      RETURN_FAILURE_ON_EXCEPTION(isolate,
-          JSObject::SetOwnPropertyIgnoreAttributes(
-              global, name, value, static_cast<PropertyAttributes>(attr)));
-    } else {
-      // Do a [[Put]] on the existing (own) property.
-      RETURN_FAILURE_ON_EXCEPTION(
-          isolate,
-          JSObject::SetProperty(
-              global, name, value, static_cast<PropertyAttributes>(attr),
-              strict_mode));
-    }
-  }
-
-  ASSERT(!isolate->has_pending_exception());
-  return isolate->heap()->undefined_value();
-}
-
-
-RUNTIME_FUNCTION(RuntimeHidden_DeclareContextSlot) {
-  HandleScope scope(isolate);
-  ASSERT(args.length() == 4);
-
-  // Declarations are always made in a function or native context.  In the
-  // case of eval code, the context passed is the context of the caller,
-  // which may be some nested context and not the declaration context.
-  CONVERT_ARG_HANDLE_CHECKED(Context, context_arg, 0);
-  Handle<Context> context(context_arg->declaration_context());
-  CONVERT_ARG_HANDLE_CHECKED(String, name, 1);
-  CONVERT_SMI_ARG_CHECKED(mode_arg, 2);
-  PropertyAttributes mode = static_cast<PropertyAttributes>(mode_arg);
-  RUNTIME_ASSERT(mode == READ_ONLY || mode == NONE);
-  CONVERT_ARG_HANDLE_CHECKED(Object, initial_value, 3);
-
-  int index;
-  PropertyAttributes attributes;
-  ContextLookupFlags flags = DONT_FOLLOW_CHAINS;
-  BindingFlags binding_flags;
-  Handle<Object> holder =
-      context->Lookup(name, flags, &index, &attributes, &binding_flags);
-
-  if (attributes != ABSENT) {
-    // The name was declared before; check for conflicting re-declarations.
-    // Note: this is actually inconsistent with what happens for globals (where
-    // we silently ignore such declarations).
-    if (((attributes & READ_ONLY) != 0) || (mode == READ_ONLY)) {
-      // Functions are not read-only.
-      ASSERT(mode != READ_ONLY || initial_value->IsTheHole());
-      return ThrowRedeclarationError(isolate, name);
-    }
-
-    // Initialize it if necessary.
-    if (*initial_value != NULL) {
-      if (index >= 0) {
-        ASSERT(holder.is_identical_to(context));
-        if (((attributes & READ_ONLY) == 0) ||
-            context->get(index)->IsTheHole()) {
-          context->set(index, *initial_value);
-        }
-      } else {
-        // Slow case: The property is in the context extension object of a
-        // function context or the global object of a native context.
-        Handle<JSObject> object = Handle<JSObject>::cast(holder);
-        RETURN_FAILURE_ON_EXCEPTION(
-            isolate,
-            JSReceiver::SetProperty(object, name, initial_value, mode, SLOPPY));
-      }
-    }
-
-  } else {
-    // The property is not in the function context. It needs to be
-    // "declared" in the function context's extension context or as a
-    // property of the the global object.
-    Handle<JSObject> object;
-    if (context->has_extension()) {
-      object = Handle<JSObject>(JSObject::cast(context->extension()));
-    } else {
-      // Context extension objects are allocated lazily.
-      ASSERT(context->IsFunctionContext());
-      object = isolate->factory()->NewJSObject(
-          isolate->context_extension_function());
-      context->set_extension(*object);
-    }
-    ASSERT(*object != NULL);
-
-    // Declare the property by setting it to the initial value if provided,
-    // or undefined, and use the correct mode (e.g. READ_ONLY attribute for
-    // constant declarations).
-    ASSERT(!JSReceiver::HasOwnProperty(object, name));
-    Handle<Object> value(isolate->heap()->undefined_value(), isolate);
-    if (*initial_value != NULL) value = initial_value;
-    // Declaring a const context slot is a conflicting declaration if
-    // there is a callback with that name in a prototype. It is
-    // allowed to introduce const variables in
-    // JSContextExtensionObjects. They are treated specially in
-    // SetProperty and no setters are invoked for those since they are
-    // not real JSObjects.
-    if (initial_value->IsTheHole() &&
-        !object->IsJSContextExtensionObject()) {
-      LookupResult lookup(isolate);
-      object->Lookup(name, &lookup);
-      if (lookup.IsPropertyCallbacks()) {
-        return ThrowRedeclarationError(isolate, name);
-      }
-    }
-    if (object->IsJSGlobalObject()) {
-      // Define own property on the global object.
-      RETURN_FAILURE_ON_EXCEPTION(isolate,
-         JSObject::SetOwnPropertyIgnoreAttributes(object, name, value, mode));
-    } else {
-      RETURN_FAILURE_ON_EXCEPTION(isolate,
-         JSReceiver::SetProperty(object, name, value, mode, SLOPPY));
-    }
+    Object* result = DeclareGlobals(isolate, global, name, value,
+                                    static_cast<PropertyAttributes>(attr),
+                                    is_var, is_const, is_function);
+    if (isolate->has_pending_exception()) return result;
   }
 
   return isolate->heap()->undefined_value();
@@ -2449,60 +2317,22 @@
 
   // Determine if we need to assign to the variable if it already
   // exists (based on the number of arguments).
-  RUNTIME_ASSERT(args.length() == 2 || args.length() == 3);
-  bool assign = args.length() == 3;
+  RUNTIME_ASSERT(args.length() == 3);
 
   CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
   CONVERT_STRICT_MODE_ARG_CHECKED(strict_mode, 1);
+  CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
 
-  // According to ECMA-262, section 12.2, page 62, the property must
-  // not be deletable.
-  PropertyAttributes attributes = DONT_DELETE;
-
-  // Lookup the property as own on the global object. If it isn't
-  // there, there is a property with this name in the prototype chain.
-  // We follow Safari and Firefox behavior and only set the property
-  // if there is an explicit initialization value that we have
-  // to assign to the property.
-  // Note that objects can have hidden prototypes, so we need to traverse
-  // the whole chain of hidden prototypes to do an 'own' lookup.
-  LookupResult lookup(isolate);
-  isolate->context()->global_object()->LookupOwn(name, &lookup, true);
-  if (lookup.IsInterceptor()) {
-    Handle<JSObject> holder(lookup.holder());
-    PropertyAttributes intercepted =
-        JSReceiver::GetPropertyAttributes(holder, name);
-    if (intercepted != ABSENT && (intercepted & READ_ONLY) == 0) {
-      // Found an interceptor that's not read only.
-      if (assign) {
-        CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
-        Handle<Object> result;
-        ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-            isolate, result,
-            JSObject::SetPropertyForResult(
-                holder, &lookup, name, value, attributes, strict_mode));
-        return *result;
-      } else {
-        return isolate->heap()->undefined_value();
-      }
-    }
-  }
-
-  if (assign) {
-    CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
-    Handle<GlobalObject> global(isolate->context()->global_object());
-    Handle<Object> result;
-    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-        isolate, result,
-        JSReceiver::SetProperty(global, name, value, attributes, strict_mode));
-    return *result;
-  }
-  return isolate->heap()->undefined_value();
+  Handle<GlobalObject> global(isolate->context()->global_object());
+  Handle<Object> result;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, result, Object::SetProperty(global, name, value, strict_mode));
+  return *result;
 }
 
 
-RUNTIME_FUNCTION(RuntimeHidden_InitializeConstGlobal) {
-  SealHandleScope shs(isolate);
+RUNTIME_FUNCTION(Runtime_InitializeConstGlobal) {
+  HandleScope handle_scope(isolate);
   // All constants are declared with an initial value. The name
   // of the constant is the first argument and the initial value
   // is the second.
@@ -2510,79 +2340,119 @@
   CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
 
-  // Get the current global object from top.
-  GlobalObject* global = isolate->context()->global_object();
+  Handle<GlobalObject> global = isolate->global_object();
 
-  // According to ECMA-262, section 12.2, page 62, the property must
-  // not be deletable. Since it's a const, it must be READ_ONLY too.
-  PropertyAttributes attributes =
+  // Lookup the property as own on the global object.
+  LookupIterator it(global, name, LookupIterator::HIDDEN_SKIP_INTERCEPTOR);
+  Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
+  DCHECK(maybe.has_value);
+  PropertyAttributes old_attributes = maybe.value;
+
+  PropertyAttributes attr =
       static_cast<PropertyAttributes>(DONT_DELETE | READ_ONLY);
-
-  // Lookup the property as own on the global object. If it isn't
-  // there, we add the property and take special precautions to always
-  // add it even in case of callbacks in the prototype chain (this rules
-  // out using SetProperty). We use SetOwnPropertyIgnoreAttributes instead
-  LookupResult lookup(isolate);
-  global->LookupOwn(name, &lookup);
-  if (!lookup.IsFound()) {
-    HandleScope handle_scope(isolate);
-    Handle<GlobalObject> global(isolate->context()->global_object());
-    RETURN_FAILURE_ON_EXCEPTION(
-        isolate,
-        JSObject::SetOwnPropertyIgnoreAttributes(global, name, value,
-                                                 attributes));
-    return *value;
-  }
-
-  if (!lookup.IsReadOnly()) {
-    // Restore global object from context (in case of GC) and continue
-    // with setting the value.
-    HandleScope handle_scope(isolate);
-    Handle<GlobalObject> global(isolate->context()->global_object());
-
-    // BUG 1213575: Handle the case where we have to set a read-only
-    // property through an interceptor and only do it if it's
-    // uninitialized, e.g. the hole. Nirk...
-    // Passing sloppy mode because the property is writable.
-    RETURN_FAILURE_ON_EXCEPTION(
-        isolate,
-        JSReceiver::SetProperty(global, name, value, attributes, SLOPPY));
-    return *value;
-  }
-
-  // Set the value, but only if we're assigning the initial value to a
-  // constant. For now, we determine this by checking if the
-  // current value is the hole.
-  // Strict mode handling not needed (const is disallowed in strict mode).
-  if (lookup.IsField()) {
-    FixedArray* properties = global->properties();
-    int index = lookup.GetFieldIndex().outobject_array_index();
-    if (properties->get(index)->IsTheHole() || !lookup.IsReadOnly()) {
-      properties->set(index, *value);
+  // Set the value if the property is either missing, or the property attributes
+  // allow setting the value without invoking an accessor.
+  if (it.IsFound()) {
+    // Ignore if we can't reconfigure the value.
+    if ((old_attributes & DONT_DELETE) != 0) {
+      if ((old_attributes & READ_ONLY) != 0 ||
+          it.state() == LookupIterator::ACCESSOR) {
+        return *value;
+      }
+      attr = static_cast<PropertyAttributes>(old_attributes | READ_ONLY);
     }
-  } else if (lookup.IsNormal()) {
-    if (global->GetNormalizedProperty(&lookup)->IsTheHole() ||
-        !lookup.IsReadOnly()) {
-      HandleScope scope(isolate);
-      JSObject::SetNormalizedProperty(Handle<JSObject>(global), &lookup, value);
-    }
-  } else {
-    // Ignore re-initialization of constants that have already been
-    // assigned a constant value.
-    ASSERT(lookup.IsReadOnly() && lookup.IsConstant());
   }
 
-  // Use the set value as the result of the operation.
+  RETURN_FAILURE_ON_EXCEPTION(isolate, JSObject::SetOwnPropertyIgnoreAttributes(
+                                           global, name, value, attr));
+
   return *value;
 }
 
 
-RUNTIME_FUNCTION(RuntimeHidden_InitializeConstContextSlot) {
+RUNTIME_FUNCTION(Runtime_DeclareLookupSlot) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 3);
+  DCHECK(args.length() == 4);
+
+  // Declarations are always made in a function, native, or global context. In
+  // the case of eval code, the context passed is the context of the caller,
+  // which may be some nested context and not the declaration context.
+  CONVERT_ARG_HANDLE_CHECKED(Context, context_arg, 0);
+  Handle<Context> context(context_arg->declaration_context());
+  CONVERT_ARG_HANDLE_CHECKED(String, name, 1);
+  CONVERT_SMI_ARG_CHECKED(attr_arg, 2);
+  PropertyAttributes attr = static_cast<PropertyAttributes>(attr_arg);
+  RUNTIME_ASSERT(attr == READ_ONLY || attr == NONE);
+  CONVERT_ARG_HANDLE_CHECKED(Object, initial_value, 3);
+
+  // TODO(verwaest): Unify the encoding indicating "var" with DeclareGlobals.
+  bool is_var = *initial_value == NULL;
+  bool is_const = initial_value->IsTheHole();
+  bool is_function = initial_value->IsJSFunction();
+  DCHECK(is_var + is_const + is_function == 1);
+
+  int index;
+  PropertyAttributes attributes;
+  ContextLookupFlags flags = DONT_FOLLOW_CHAINS;
+  BindingFlags binding_flags;
+  Handle<Object> holder =
+      context->Lookup(name, flags, &index, &attributes, &binding_flags);
+
+  Handle<JSObject> object;
+  Handle<Object> value =
+      is_function ? initial_value
+                  : Handle<Object>::cast(isolate->factory()->undefined_value());
+
+  // TODO(verwaest): This case should probably not be covered by this function,
+  // but by DeclareGlobals instead.
+  if ((attributes != ABSENT && holder->IsJSGlobalObject()) ||
+      (context_arg->has_extension() &&
+       context_arg->extension()->IsJSGlobalObject())) {
+    return DeclareGlobals(isolate, Handle<JSGlobalObject>::cast(holder), name,
+                          value, attr, is_var, is_const, is_function);
+  }
+
+  if (attributes != ABSENT) {
+    // The name was declared before; check for conflicting re-declarations.
+    if (is_const || (attributes & READ_ONLY) != 0) {
+      return ThrowRedeclarationError(isolate, name);
+    }
+
+    // Skip var re-declarations.
+    if (is_var) return isolate->heap()->undefined_value();
+
+    DCHECK(is_function);
+    if (index >= 0) {
+      DCHECK(holder.is_identical_to(context));
+      context->set(index, *initial_value);
+      return isolate->heap()->undefined_value();
+    }
+
+    object = Handle<JSObject>::cast(holder);
+
+  } else if (context->has_extension()) {
+    object = handle(JSObject::cast(context->extension()));
+    DCHECK(object->IsJSContextExtensionObject() || object->IsJSGlobalObject());
+  } else {
+    DCHECK(context->IsFunctionContext());
+    object =
+        isolate->factory()->NewJSObject(isolate->context_extension_function());
+    context->set_extension(*object);
+  }
+
+  RETURN_FAILURE_ON_EXCEPTION(isolate, JSObject::SetOwnPropertyIgnoreAttributes(
+                                           object, name, value, attr));
+
+  return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(Runtime_InitializeLegacyConstLookupSlot) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 3);
 
   CONVERT_ARG_HANDLE_CHECKED(Object, value, 0);
-  ASSERT(!value->IsTheHole());
+  DCHECK(!value->IsTheHole());
   // Initializations are always done in a function or native context.
   CONVERT_ARG_HANDLE_CHECKED(Context, context_arg, 1);
   Handle<Context> context(context_arg->declaration_context());
@@ -2590,93 +2460,65 @@
 
   int index;
   PropertyAttributes attributes;
-  ContextLookupFlags flags = FOLLOW_CHAINS;
+  ContextLookupFlags flags = DONT_FOLLOW_CHAINS;
   BindingFlags binding_flags;
   Handle<Object> holder =
       context->Lookup(name, flags, &index, &attributes, &binding_flags);
 
   if (index >= 0) {
-    ASSERT(holder->IsContext());
-    // Property was found in a context.  Perform the assignment if we
-    // found some non-constant or an uninitialized constant.
+    DCHECK(holder->IsContext());
+    // Property was found in a context.  Perform the assignment if the constant
+    // was uninitialized.
     Handle<Context> context = Handle<Context>::cast(holder);
-    if ((attributes & READ_ONLY) == 0 || context->get(index)->IsTheHole()) {
-      context->set(index, *value);
-    }
+    DCHECK((attributes & READ_ONLY) != 0);
+    if (context->get(index)->IsTheHole()) context->set(index, *value);
     return *value;
   }
 
-  // The property could not be found, we introduce it as a property of the
-  // global object.
+  PropertyAttributes attr =
+      static_cast<PropertyAttributes>(DONT_DELETE | READ_ONLY);
+
+  // Strict mode handling not needed (legacy const is disallowed in strict
+  // mode).
+
+  // The declared const was configurable, and may have been deleted in the
+  // meanwhile. If so, re-introduce the variable in the context extension.
+  DCHECK(context_arg->has_extension());
   if (attributes == ABSENT) {
-    Handle<JSObject> global = Handle<JSObject>(
-        isolate->context()->global_object());
-    // Strict mode not needed (const disallowed in strict mode).
-    RETURN_FAILURE_ON_EXCEPTION(
-        isolate,
-        JSReceiver::SetProperty(global, name, value, NONE, SLOPPY));
-    return *value;
-  }
-
-  // The property was present in some function's context extension object,
-  // as a property on the subject of a with, or as a property of the global
-  // object.
-  //
-  // In most situations, eval-introduced consts should still be present in
-  // the context extension object.  However, because declaration and
-  // initialization are separate, the property might have been deleted
-  // before we reach the initialization point.
-  //
-  // Example:
-  //
-  //    function f() { eval("delete x; const x;"); }
-  //
-  // In that case, the initialization behaves like a normal assignment.
-  Handle<JSObject> object = Handle<JSObject>::cast(holder);
-
-  if (*object == context->extension()) {
-    // This is the property that was introduced by the const declaration.
-    // Set it if it hasn't been set before.  NOTE: We cannot use
-    // GetProperty() to get the current value as it 'unholes' the value.
-    LookupResult lookup(isolate);
-    object->LookupOwnRealNamedProperty(name, &lookup);
-    ASSERT(lookup.IsFound());  // the property was declared
-    ASSERT(lookup.IsReadOnly());  // and it was declared as read-only
-
-    if (lookup.IsField()) {
-      FixedArray* properties = object->properties();
-      FieldIndex index = lookup.GetFieldIndex();
-      ASSERT(!index.is_inobject());
-      if (properties->get(index.outobject_array_index())->IsTheHole()) {
-        properties->set(index.outobject_array_index(), *value);
-      }
-    } else if (lookup.IsNormal()) {
-      if (object->GetNormalizedProperty(&lookup)->IsTheHole()) {
-        JSObject::SetNormalizedProperty(object, &lookup, value);
-      }
-    } else {
-      // We should not reach here. Any real, named property should be
-      // either a field or a dictionary slot.
-      UNREACHABLE();
-    }
+    holder = handle(context_arg->extension(), isolate);
   } else {
-    // The property was found on some other object.  Set it if it is not a
-    // read-only property.
-    if ((attributes & READ_ONLY) == 0) {
-      // Strict mode not needed (const disallowed in strict mode).
-      RETURN_FAILURE_ON_EXCEPTION(
-          isolate,
-          JSReceiver::SetProperty(object, name, value, attributes, SLOPPY));
+    // For JSContextExtensionObjects, the initializer can be run multiple times
+    // if in a for loop: for (var i = 0; i < 2; i++) { const x = i; }. Only the
+    // first assignment should go through. For JSGlobalObjects, additionally any
+    // code can run in between that modifies the declared property.
+    DCHECK(holder->IsJSGlobalObject() || holder->IsJSContextExtensionObject());
+
+    LookupIterator it(holder, name, LookupIterator::HIDDEN_SKIP_INTERCEPTOR);
+    Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
+    if (!maybe.has_value) return isolate->heap()->exception();
+    PropertyAttributes old_attributes = maybe.value;
+
+    // Ignore if we can't reconfigure the value.
+    if ((old_attributes & DONT_DELETE) != 0) {
+      if ((old_attributes & READ_ONLY) != 0 ||
+          it.state() == LookupIterator::ACCESSOR) {
+        return *value;
+      }
+      attr = static_cast<PropertyAttributes>(old_attributes | READ_ONLY);
     }
   }
 
+  RETURN_FAILURE_ON_EXCEPTION(
+      isolate, JSObject::SetOwnPropertyIgnoreAttributes(
+                   Handle<JSObject>::cast(holder), name, value, attr));
+
   return *value;
 }
 
 
 RUNTIME_FUNCTION(Runtime_OptimizeObjectForAddingMultipleProperties) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
   CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
   CONVERT_SMI_ARG_CHECKED(properties, 1);
   // Conservative upper limit to prevent fuzz tests from going OOM.
@@ -2688,15 +2530,15 @@
 }
 
 
-RUNTIME_FUNCTION(RuntimeHidden_RegExpExec) {
+RUNTIME_FUNCTION(Runtime_RegExpExecRT) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 4);
+  DCHECK(args.length() == 4);
   CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
   CONVERT_ARG_HANDLE_CHECKED(String, subject, 1);
+  CONVERT_INT32_ARG_CHECKED(index, 2);
+  CONVERT_ARG_HANDLE_CHECKED(JSArray, last_match_info, 3);
   // Due to the way the JS calls are constructed this must be less than the
   // length of a string, i.e. it is always a Smi.  We check anyway for security.
-  CONVERT_SMI_ARG_CHECKED(index, 2);
-  CONVERT_ARG_HANDLE_CHECKED(JSArray, last_match_info, 3);
   RUNTIME_ASSERT(index >= 0);
   RUNTIME_ASSERT(index <= subject->length());
   isolate->counters()->regexp_entry_runtime()->Increment();
@@ -2708,9 +2550,9 @@
 }
 
 
-RUNTIME_FUNCTION(RuntimeHidden_RegExpConstructResult) {
+RUNTIME_FUNCTION(Runtime_RegExpConstructResult) {
   HandleScope handle_scope(isolate);
-  ASSERT(args.length() == 3);
+  DCHECK(args.length() == 3);
   CONVERT_SMI_ARG_CHECKED(size, 0);
   RUNTIME_ASSERT(size >= 0 && size <= FixedArray::kMaxLength);
   CONVERT_ARG_HANDLE_CHECKED(Object, index, 1);
@@ -2731,7 +2573,7 @@
 
 RUNTIME_FUNCTION(Runtime_RegExpInitializeObject) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 5);
+  DCHECK(args.length() == 6);
   CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
   CONVERT_ARG_HANDLE_CHECKED(String, source, 1);
   // If source is the empty string we set it to "(?:)" instead as
@@ -2747,9 +2589,13 @@
   CONVERT_ARG_HANDLE_CHECKED(Object, multiline, 4);
   if (!multiline->IsTrue()) multiline = isolate->factory()->false_value();
 
+  CONVERT_ARG_HANDLE_CHECKED(Object, sticky, 5);
+  if (!sticky->IsTrue()) sticky = isolate->factory()->false_value();
+
   Map* map = regexp->map();
   Object* constructor = map->constructor();
-  if (constructor->IsJSFunction() &&
+  if (!FLAG_harmony_regexps &&
+      constructor->IsJSFunction() &&
       JSFunction::cast(constructor)->initial_map() == map) {
     // If we still have the original map, set in-object properties directly.
     regexp->InObjectPropertyAtPut(JSRegExp::kSourceFieldIndex, *source);
@@ -2766,7 +2612,11 @@
     return *regexp;
   }
 
-  // Map has changed, so use generic, but slower, method.
+  // Map has changed, so use generic, but slower, method.  We also end here if
+  // the --harmony-regexp flag is set, because the initial map does not have
+  // space for the 'sticky' flag, since it is from the snapshot, but must work
+  // both with and without --harmony-regexp.  When sticky comes out from under
+  // the flag, we will be able to use the fast initial map.
   PropertyAttributes final =
       static_cast<PropertyAttributes>(READ_ONLY | DONT_ENUM | DONT_DELETE);
   PropertyAttributes writable =
@@ -2781,6 +2631,10 @@
       regexp, factory->ignore_case_string(), ignoreCase, final).Check();
   JSObject::SetOwnPropertyIgnoreAttributes(
       regexp, factory->multiline_string(), multiline, final).Check();
+  if (FLAG_harmony_regexps) {
+    JSObject::SetOwnPropertyIgnoreAttributes(
+        regexp, factory->sticky_string(), sticky, final).Check();
+  }
   JSObject::SetOwnPropertyIgnoreAttributes(
       regexp, factory->last_index_string(), zero, writable).Check();
   return *regexp;
@@ -2789,7 +2643,7 @@
 
 RUNTIME_FUNCTION(Runtime_FinishArrayPrototypeSetup) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(JSArray, prototype, 0);
   Object* length = prototype->length();
   RUNTIME_ASSERT(length->IsSmi() && Smi::cast(length)->value() == 0);
@@ -2810,13 +2664,13 @@
   Handle<JSFunction> optimized =
       isolate->factory()->NewFunctionWithoutPrototype(key, code);
   optimized->shared()->DontAdaptArguments();
-  JSReceiver::SetProperty(holder, key, optimized, NONE, STRICT).Assert();
+  JSObject::AddProperty(holder, key, optimized, NONE);
 }
 
 
 RUNTIME_FUNCTION(Runtime_SpecialArrayFunctions) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 0);
+  DCHECK(args.length() == 0);
   Handle<JSObject> holder =
       isolate->factory()->NewJSObject(isolate->object_function());
 
@@ -2834,7 +2688,7 @@
 
 RUNTIME_FUNCTION(Runtime_IsSloppyModeFunction) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_CHECKED(JSReceiver, callable, 0);
   if (!callable->IsJSFunction()) {
     HandleScope scope(isolate);
@@ -2853,7 +2707,7 @@
 
 RUNTIME_FUNCTION(Runtime_GetDefaultReceiver) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_CHECKED(JSReceiver, callable, 0);
 
   if (!callable->IsJSFunction()) {
@@ -2874,15 +2728,13 @@
   // Returns undefined for strict or native functions, or
   // the associated global receiver for "normal" functions.
 
-  Context* native_context =
-      function->context()->global_object()->native_context();
-  return native_context->global_object()->global_receiver();
+  return function->global_proxy();
 }
 
 
-RUNTIME_FUNCTION(RuntimeHidden_MaterializeRegExpLiteral) {
+RUNTIME_FUNCTION(Runtime_MaterializeRegExpLiteral) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 4);
+  DCHECK(args.length() == 4);
   CONVERT_ARG_HANDLE_CHECKED(FixedArray, literals, 0);
   CONVERT_SMI_ARG_CHECKED(index, 1);
   CONVERT_ARG_HANDLE_CHECKED(String, pattern, 2);
@@ -2908,7 +2760,7 @@
 
 RUNTIME_FUNCTION(Runtime_FunctionGetName) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
 
   CONVERT_ARG_CHECKED(JSFunction, f, 0);
   return f->shared()->name();
@@ -2917,7 +2769,7 @@
 
 RUNTIME_FUNCTION(Runtime_FunctionSetName) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
 
   CONVERT_ARG_CHECKED(JSFunction, f, 0);
   CONVERT_ARG_CHECKED(String, name, 1);
@@ -2928,7 +2780,7 @@
 
 RUNTIME_FUNCTION(Runtime_FunctionNameShouldPrintAsAnonymous) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_CHECKED(JSFunction, f, 0);
   return isolate->heap()->ToBoolean(
       f->shared()->name_should_print_as_anonymous());
@@ -2937,7 +2789,7 @@
 
 RUNTIME_FUNCTION(Runtime_FunctionMarkNameShouldPrintAsAnonymous) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_CHECKED(JSFunction, f, 0);
   f->shared()->set_name_should_print_as_anonymous(true);
   return isolate->heap()->undefined_value();
@@ -2946,15 +2798,31 @@
 
 RUNTIME_FUNCTION(Runtime_FunctionIsGenerator) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_CHECKED(JSFunction, f, 0);
   return isolate->heap()->ToBoolean(f->shared()->is_generator());
 }
 
 
+RUNTIME_FUNCTION(Runtime_FunctionIsArrow) {
+  SealHandleScope shs(isolate);
+  DCHECK(args.length() == 1);
+  CONVERT_ARG_CHECKED(JSFunction, f, 0);
+  return isolate->heap()->ToBoolean(f->shared()->is_arrow());
+}
+
+
+RUNTIME_FUNCTION(Runtime_FunctionIsConciseMethod) {
+  SealHandleScope shs(isolate);
+  DCHECK(args.length() == 1);
+  CONVERT_ARG_CHECKED(JSFunction, f, 0);
+  return isolate->heap()->ToBoolean(f->shared()->is_concise_method());
+}
+
+
 RUNTIME_FUNCTION(Runtime_FunctionRemovePrototype) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
 
   CONVERT_ARG_CHECKED(JSFunction, f, 0);
   RUNTIME_ASSERT(f->RemovePrototype());
@@ -2965,7 +2833,7 @@
 
 RUNTIME_FUNCTION(Runtime_FunctionGetScript) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
 
   CONVERT_ARG_CHECKED(JSFunction, fun, 0);
   Handle<Object> script = Handle<Object>(fun->shared()->script(), isolate);
@@ -2977,7 +2845,7 @@
 
 RUNTIME_FUNCTION(Runtime_FunctionGetSourceCode) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
 
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, f, 0);
   Handle<SharedFunctionInfo> shared(f->shared());
@@ -2987,7 +2855,7 @@
 
 RUNTIME_FUNCTION(Runtime_FunctionGetScriptSourcePosition) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
 
   CONVERT_ARG_CHECKED(JSFunction, fun, 0);
   int pos = fun->shared()->start_position();
@@ -2997,7 +2865,7 @@
 
 RUNTIME_FUNCTION(Runtime_FunctionGetPositionForOffset) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
 
   CONVERT_ARG_CHECKED(Code, code, 0);
   CONVERT_NUMBER_CHECKED(int, offset, Int32, args[1]);
@@ -3011,7 +2879,7 @@
 
 RUNTIME_FUNCTION(Runtime_FunctionSetInstanceClassName) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
 
   CONVERT_ARG_CHECKED(JSFunction, fun, 0);
   CONVERT_ARG_CHECKED(String, name, 1);
@@ -3022,7 +2890,7 @@
 
 RUNTIME_FUNCTION(Runtime_FunctionSetLength) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
 
   CONVERT_ARG_CHECKED(JSFunction, fun, 0);
   CONVERT_SMI_ARG_CHECKED(length, 1);
@@ -3035,7 +2903,7 @@
 
 RUNTIME_FUNCTION(Runtime_FunctionSetPrototype) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
 
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
@@ -3047,7 +2915,7 @@
 
 RUNTIME_FUNCTION(Runtime_FunctionIsAPIFunction) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
 
   CONVERT_ARG_CHECKED(JSFunction, f, 0);
   return isolate->heap()->ToBoolean(f->shared()->IsApiFunction());
@@ -3056,7 +2924,7 @@
 
 RUNTIME_FUNCTION(Runtime_FunctionIsBuiltin) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
 
   CONVERT_ARG_CHECKED(JSFunction, f, 0);
   return isolate->heap()->ToBoolean(f->IsBuiltin());
@@ -3065,7 +2933,7 @@
 
 RUNTIME_FUNCTION(Runtime_SetCode) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
 
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, target, 0);
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, source, 1);
@@ -3080,8 +2948,8 @@
 
   // Mark both, the source and the target, as un-flushable because the
   // shared unoptimized code makes them impossible to enqueue in a list.
-  ASSERT(target_shared->code()->gc_metadata() == NULL);
-  ASSERT(source_shared->code()->gc_metadata() == NULL);
+  DCHECK(target_shared->code()->gc_metadata() == NULL);
+  DCHECK(source_shared->code()->gc_metadata() == NULL);
   target_shared->set_dont_flush(true);
   source_shared->set_dont_flush(true);
 
@@ -3104,7 +2972,7 @@
 
   // Set the code of the target function.
   target->ReplaceCode(source_shared->code());
-  ASSERT(target->next_function_link()->IsUndefined());
+  DCHECK(target->next_function_link()->IsUndefined());
 
   // Make sure we get a fresh copy of the literal vector to avoid cross
   // context contamination.
@@ -3129,9 +2997,9 @@
 }
 
 
-RUNTIME_FUNCTION(RuntimeHidden_CreateJSGeneratorObject) {
+RUNTIME_FUNCTION(Runtime_CreateJSGeneratorObject) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 0);
+  DCHECK(args.length() == 0);
 
   JavaScriptFrameIterator it(isolate);
   JavaScriptFrame* frame = it.frame();
@@ -3155,36 +3023,36 @@
 }
 
 
-RUNTIME_FUNCTION(RuntimeHidden_SuspendJSGeneratorObject) {
+RUNTIME_FUNCTION(Runtime_SuspendJSGeneratorObject) {
   HandleScope handle_scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator_object, 0);
 
   JavaScriptFrameIterator stack_iterator(isolate);
   JavaScriptFrame* frame = stack_iterator.frame();
   RUNTIME_ASSERT(frame->function()->shared()->is_generator());
-  ASSERT_EQ(frame->function(), generator_object->function());
+  DCHECK_EQ(frame->function(), generator_object->function());
 
   // The caller should have saved the context and continuation already.
-  ASSERT_EQ(generator_object->context(), Context::cast(frame->context()));
-  ASSERT_LT(0, generator_object->continuation());
+  DCHECK_EQ(generator_object->context(), Context::cast(frame->context()));
+  DCHECK_LT(0, generator_object->continuation());
 
   // We expect there to be at least two values on the operand stack: the return
   // value of the yield expression, and the argument to this runtime call.
   // Neither of those should be saved.
   int operands_count = frame->ComputeOperandsCount();
-  ASSERT_GE(operands_count, 2);
+  DCHECK_GE(operands_count, 2);
   operands_count -= 2;
 
   if (operands_count == 0) {
     // Although it's semantically harmless to call this function with an
     // operands_count of zero, it is also unnecessary.
-    ASSERT_EQ(generator_object->operand_stack(),
+    DCHECK_EQ(generator_object->operand_stack(),
               isolate->heap()->empty_fixed_array());
-    ASSERT_EQ(generator_object->stack_handler_index(), -1);
+    DCHECK_EQ(generator_object->stack_handler_index(), -1);
     // If there are no operands on the stack, there shouldn't be a handler
     // active either.
-    ASSERT(!frame->HasHandler());
+    DCHECK(!frame->HasHandler());
   } else {
     int stack_handler_index = -1;
     Handle<FixedArray> operand_stack =
@@ -3205,24 +3073,24 @@
 // inlined into GeneratorNext and GeneratorThrow.  EmitGeneratorResumeResume is
 // called in any case, as it needs to reconstruct the stack frame and make space
 // for arguments and operands.
-RUNTIME_FUNCTION(RuntimeHidden_ResumeJSGeneratorObject) {
+RUNTIME_FUNCTION(Runtime_ResumeJSGeneratorObject) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 3);
+  DCHECK(args.length() == 3);
   CONVERT_ARG_CHECKED(JSGeneratorObject, generator_object, 0);
   CONVERT_ARG_CHECKED(Object, value, 1);
   CONVERT_SMI_ARG_CHECKED(resume_mode_int, 2);
   JavaScriptFrameIterator stack_iterator(isolate);
   JavaScriptFrame* frame = stack_iterator.frame();
 
-  ASSERT_EQ(frame->function(), generator_object->function());
-  ASSERT(frame->function()->is_compiled());
+  DCHECK_EQ(frame->function(), generator_object->function());
+  DCHECK(frame->function()->is_compiled());
 
   STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting < 0);
   STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed == 0);
 
   Address pc = generator_object->function()->code()->instruction_start();
   int offset = generator_object->continuation();
-  ASSERT(offset > 0);
+  DCHECK(offset > 0);
   frame->set_pc(pc + offset);
   if (FLAG_enable_ool_constant_pool) {
     frame->set_constant_pool(
@@ -3253,22 +3121,21 @@
 }
 
 
-RUNTIME_FUNCTION(RuntimeHidden_ThrowGeneratorStateError) {
+RUNTIME_FUNCTION(Runtime_ThrowGeneratorStateError) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
   int continuation = generator->continuation();
   const char* message = continuation == JSGeneratorObject::kGeneratorClosed ?
       "generator_finished" : "generator_running";
   Vector< Handle<Object> > argv = HandleVector<Object>(NULL, 0);
-  Handle<Object> error = isolate->factory()->NewError(message, argv);
-  return isolate->Throw(*error);
+  THROW_NEW_ERROR_RETURN_FAILURE(isolate, NewError(message, argv));
 }
 
 
 RUNTIME_FUNCTION(Runtime_ObjectFreeze) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
 
   // %ObjectFreeze is a fast path and these cases are handled elsewhere.
@@ -3282,9 +3149,9 @@
 }
 
 
-RUNTIME_FUNCTION(RuntimeHidden_StringCharCodeAt) {
+RUNTIME_FUNCTION(Runtime_StringCharCodeAtRT) {
   HandleScope handle_scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
 
   CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
   CONVERT_NUMBER_CHECKED(uint32_t, i, Uint32, args[1]);
@@ -3304,7 +3171,7 @@
 
 RUNTIME_FUNCTION(Runtime_CharFromCode) {
   HandleScope handlescope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   if (args[0]->IsNumber()) {
     CONVERT_NUMBER_CHECKED(uint32_t, code, Uint32, args[0]);
     code &= 0xffff;
@@ -3322,7 +3189,7 @@
         has_non_smi_elements_(false) {
     // Require a non-zero initial size. Ensures that doubling the size to
     // extend the array will work.
-    ASSERT(initial_capacity > 0);
+    DCHECK(initial_capacity > 0);
   }
 
   explicit FixedArrayBuilder(Handle<FixedArray> backing_store)
@@ -3331,7 +3198,7 @@
         has_non_smi_elements_(false) {
     // Require a non-zero initial size. Ensures that doubling the size to
     // extend the array will work.
-    ASSERT(backing_store->length() > 0);
+    DCHECK(backing_store->length() > 0);
   }
 
   bool HasCapacity(int elements) {
@@ -3356,16 +3223,16 @@
   }
 
   void Add(Object* value) {
-    ASSERT(!value->IsSmi());
-    ASSERT(length_ < capacity());
+    DCHECK(!value->IsSmi());
+    DCHECK(length_ < capacity());
     array_->set(length_, value);
     length_++;
     has_non_smi_elements_ = true;
   }
 
   void Add(Smi* value) {
-    ASSERT(value->IsSmi());
-    ASSERT(length_ < capacity());
+    DCHECK(value->IsSmi());
+    DCHECK(length_ < capacity());
     array_->set(length_, value);
     length_++;
   }
@@ -3416,25 +3283,24 @@
 
 class ReplacementStringBuilder {
  public:
-  ReplacementStringBuilder(Heap* heap,
-                           Handle<String> subject,
+  ReplacementStringBuilder(Heap* heap, Handle<String> subject,
                            int estimated_part_count)
       : heap_(heap),
         array_builder_(heap->isolate(), estimated_part_count),
         subject_(subject),
         character_count_(0),
-        is_ascii_(subject->IsOneByteRepresentation()) {
+        is_one_byte_(subject->IsOneByteRepresentation()) {
     // Require a non-zero initial size. Ensures that doubling the size to
     // extend the array will work.
-    ASSERT(estimated_part_count > 0);
+    DCHECK(estimated_part_count > 0);
   }
 
   static inline void AddSubjectSlice(FixedArrayBuilder* builder,
                                      int from,
                                      int to) {
-    ASSERT(from >= 0);
+    DCHECK(from >= 0);
     int length = to - from;
-    ASSERT(length > 0);
+    DCHECK(length > 0);
     if (StringBuilderSubstringLength::is_valid(length) &&
         StringBuilderSubstringPosition::is_valid(from)) {
       int encoded_slice = StringBuilderSubstringLength::encode(length) |
@@ -3461,10 +3327,10 @@
 
   void AddString(Handle<String> string) {
     int length = string->length();
-    ASSERT(length > 0);
+    DCHECK(length > 0);
     AddElement(*string);
     if (!string->IsOneByteRepresentation()) {
-      is_ascii_ = false;
+      is_one_byte_ = false;
     }
     IncrementCharacterCount(length);
   }
@@ -3477,7 +3343,7 @@
     }
 
     Handle<String> joined_string;
-    if (is_ascii_) {
+    if (is_one_byte_) {
       Handle<SeqOneByteString> seq;
       ASSIGN_RETURN_ON_EXCEPTION(
           isolate, seq,
@@ -3492,7 +3358,7 @@
                                 array_builder_.length());
       joined_string = Handle<String>::cast(seq);
     } else {
-      // Non-ASCII.
+      // Two-byte.
       Handle<SeqTwoByteString> seq;
       ASSIGN_RETURN_ON_EXCEPTION(
           isolate, seq,
@@ -3522,8 +3388,8 @@
 
  private:
   void AddElement(Object* element) {
-    ASSERT(element->IsSmi() || element->IsString());
-    ASSERT(array_builder_.capacity() > array_builder_.length());
+    DCHECK(element->IsSmi() || element->IsString());
+    DCHECK(array_builder_.capacity() > array_builder_.length());
     array_builder_.Add(element);
   }
 
@@ -3531,7 +3397,7 @@
   FixedArrayBuilder array_builder_;
   Handle<String> subject_;
   int character_count_;
-  bool is_ascii_;
+  bool is_one_byte_;
 };
 
 
@@ -3586,8 +3452,8 @@
       return ReplacementPart(REPLACEMENT_STRING, 0);
     }
     static inline ReplacementPart ReplacementSubString(int from, int to) {
-      ASSERT(from >= 0);
-      ASSERT(to > from);
+      DCHECK(from >= 0);
+      DCHECK(to > from);
       return ReplacementPart(-from, to);
     }
 
@@ -3596,7 +3462,7 @@
     ReplacementPart(int tag, int data)
         : tag(tag), data(data) {
       // Must be non-positive or a PartType value.
-      ASSERT(tag < NUMBER_OF_PART_TYPES);
+      DCHECK(tag < NUMBER_OF_PART_TYPES);
     }
     // Either a value of PartType or a non-positive number that is
     // the negation of an index into the replacement string.
@@ -3699,7 +3565,7 @@
             if (i > last) {
               parts->Add(ReplacementPart::ReplacementSubString(last, i), zone);
             }
-            ASSERT(capture_ref <= capture_count);
+            DCHECK(capture_ref <= capture_count);
             parts->Add(ReplacementPart::SubjectCapture(capture_ref), zone);
             last = next_index + 1;
           }
@@ -3735,16 +3601,16 @@
   {
     DisallowHeapAllocation no_gc;
     String::FlatContent content = replacement->GetFlatContent();
-    ASSERT(content.IsFlat());
+    DCHECK(content.IsFlat());
     bool simple = false;
-    if (content.IsAscii()) {
+    if (content.IsOneByte()) {
       simple = ParseReplacementPattern(&parts_,
                                        content.ToOneByteVector(),
                                        capture_count,
                                        subject_length,
                                        zone());
     } else {
-      ASSERT(content.IsTwoByte());
+      DCHECK(content.IsTwoByte());
       simple = ParseReplacementPattern(&parts_,
                                        content.ToUC16Vector(),
                                        capture_count,
@@ -3781,7 +3647,7 @@
                                 int match_from,
                                 int match_to,
                                 int32_t* match) {
-  ASSERT_LT(0, parts_.length());
+  DCHECK_LT(0, parts_.length());
   for (int i = 0, n = parts_.length(); i < n; i++) {
     ReplacementPart part = parts_[i];
     switch (part.tag) {
@@ -3815,12 +3681,10 @@
 }
 
 
-void FindAsciiStringIndices(Vector<const uint8_t> subject,
-                            char pattern,
-                            ZoneList<int>* indices,
-                            unsigned int limit,
-                            Zone* zone) {
-  ASSERT(limit > 0);
+void FindOneByteStringIndices(Vector<const uint8_t> subject, char pattern,
+                              ZoneList<int>* indices, unsigned int limit,
+                              Zone* zone) {
+  DCHECK(limit > 0);
   // Collect indices of pattern in subject using memchr.
   // Stop after finding at most limit values.
   const uint8_t* subject_start = subject.start();
@@ -3842,7 +3706,7 @@
                               ZoneList<int>* indices,
                               unsigned int limit,
                               Zone* zone) {
-  ASSERT(limit > 0);
+  DCHECK(limit > 0);
   const uc16* subject_start = subject.start();
   const uc16* subject_end = subject_start + subject.length();
   for (const uc16* pos = subject_start; pos < subject_end && limit > 0; pos++) {
@@ -3861,7 +3725,7 @@
                        ZoneList<int>* indices,
                        unsigned int limit,
                        Zone* zone) {
-  ASSERT(limit > 0);
+  DCHECK(limit > 0);
   // Collect indices of pattern in subject.
   // Stop after finding at most limit values.
   int pattern_length = pattern.length();
@@ -3887,19 +3751,16 @@
     DisallowHeapAllocation no_gc;
     String::FlatContent subject_content = subject->GetFlatContent();
     String::FlatContent pattern_content = pattern->GetFlatContent();
-    ASSERT(subject_content.IsFlat());
-    ASSERT(pattern_content.IsFlat());
-    if (subject_content.IsAscii()) {
+    DCHECK(subject_content.IsFlat());
+    DCHECK(pattern_content.IsFlat());
+    if (subject_content.IsOneByte()) {
       Vector<const uint8_t> subject_vector = subject_content.ToOneByteVector();
-      if (pattern_content.IsAscii()) {
+      if (pattern_content.IsOneByte()) {
         Vector<const uint8_t> pattern_vector =
             pattern_content.ToOneByteVector();
         if (pattern_vector.length() == 1) {
-          FindAsciiStringIndices(subject_vector,
-                                 pattern_vector[0],
-                                 indices,
-                                 limit,
-                                 zone);
+          FindOneByteStringIndices(subject_vector, pattern_vector[0], indices,
+                                   limit, zone);
         } else {
           FindStringIndices(isolate,
                             subject_vector,
@@ -3918,7 +3779,7 @@
       }
     } else {
       Vector<const uc16> subject_vector = subject_content.ToUC16Vector();
-      if (pattern_content.IsAscii()) {
+      if (pattern_content.IsOneByte()) {
         Vector<const uint8_t> pattern_vector =
             pattern_content.ToOneByteVector();
         if (pattern_vector.length() == 1) {
@@ -3964,12 +3825,12 @@
     Handle<JSRegExp> pattern_regexp,
     Handle<String> replacement,
     Handle<JSArray> last_match_info) {
-  ASSERT(subject->IsFlat());
-  ASSERT(replacement->IsFlat());
+  DCHECK(subject->IsFlat());
+  DCHECK(replacement->IsFlat());
 
   ZoneScope zone_scope(isolate->runtime_zone());
   ZoneList<int> indices(8, zone_scope.zone());
-  ASSERT_EQ(JSRegExp::ATOM, pattern_regexp->TypeTag());
+  DCHECK_EQ(JSRegExp::ATOM, pattern_regexp->TypeTag());
   String* pattern =
       String::cast(pattern_regexp->DataAt(JSRegExp::kAtomPatternIndex));
   int subject_len = subject->length();
@@ -4000,7 +3861,7 @@
   int result_pos = 0;
 
   MaybeHandle<SeqString> maybe_res;
-  if (ResultSeqString::kHasAsciiEncoding) {
+  if (ResultSeqString::kHasOneByteEncoding) {
     maybe_res = isolate->factory()->NewRawOneByteString(result_len);
   } else {
     maybe_res = isolate->factory()->NewRawTwoByteString(result_len);
@@ -4052,8 +3913,8 @@
     Handle<JSRegExp> regexp,
     Handle<String> replacement,
     Handle<JSArray> last_match_info) {
-  ASSERT(subject->IsFlat());
-  ASSERT(replacement->IsFlat());
+  DCHECK(subject->IsFlat());
+  DCHECK(replacement->IsFlat());
 
   int capture_count = regexp->CaptureCount();
   int subject_length = subject->length();
@@ -4148,7 +4009,7 @@
     Handle<String> subject,
     Handle<JSRegExp> regexp,
     Handle<JSArray> last_match_info) {
-  ASSERT(subject->IsFlat());
+  DCHECK(subject->IsFlat());
 
   // Shortcut for simple non-regexp global replacements
   if (regexp->TypeTag() == JSRegExp::ATOM) {
@@ -4180,7 +4041,7 @@
   if (new_length == 0) return isolate->heap()->empty_string();
 
   Handle<ResultSeqString> answer;
-  if (ResultSeqString::kHasAsciiEncoding) {
+  if (ResultSeqString::kHasOneByteEncoding) {
     answer = Handle<ResultSeqString>::cast(
         isolate->factory()->NewRawOneByteString(new_length).ToHandleChecked());
   } else {
@@ -4243,7 +4104,7 @@
 
 RUNTIME_FUNCTION(Runtime_StringReplaceGlobalRegExpWithString) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 4);
+  DCHECK(args.length() == 4);
 
   CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
   CONVERT_ARG_HANDLE_CHECKED(String, replacement, 2);
@@ -4325,7 +4186,7 @@
 
 RUNTIME_FUNCTION(Runtime_StringReplaceOneCharWithString) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 3);
+  DCHECK(args.length() == 3);
   CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
   CONVERT_ARG_HANDLE_CHECKED(String, search, 1);
   CONVERT_ARG_HANDLE_CHECKED(String, replace, 2);
@@ -4358,8 +4219,8 @@
                          Handle<String> sub,
                          Handle<String> pat,
                          int start_index) {
-  ASSERT(0 <= start_index);
-  ASSERT(start_index <= sub->length());
+  DCHECK(0 <= start_index);
+  DCHECK(start_index <= sub->length());
 
   int pattern_length = pat->length();
   if (pattern_length == 0) return start_index;
@@ -4371,14 +4232,14 @@
   pat = String::Flatten(pat);
 
   DisallowHeapAllocation no_gc;  // ensure vectors stay valid
-  // Extract flattened substrings of cons strings before determining asciiness.
+  // Extract flattened substrings of cons strings before getting encoding.
   String::FlatContent seq_sub = sub->GetFlatContent();
   String::FlatContent seq_pat = pat->GetFlatContent();
 
   // dispatch on type of strings
-  if (seq_pat.IsAscii()) {
+  if (seq_pat.IsOneByte()) {
     Vector<const uint8_t> pat_vector = seq_pat.ToOneByteVector();
-    if (seq_sub.IsAscii()) {
+    if (seq_sub.IsOneByte()) {
       return SearchString(isolate,
                           seq_sub.ToOneByteVector(),
                           pat_vector,
@@ -4390,7 +4251,7 @@
                         start_index);
   }
   Vector<const uc16> pat_vector = seq_pat.ToUC16Vector();
-  if (seq_sub.IsAscii()) {
+  if (seq_sub.IsOneByte()) {
     return SearchString(isolate,
                         seq_sub.ToOneByteVector(),
                         pat_vector,
@@ -4405,7 +4266,7 @@
 
 RUNTIME_FUNCTION(Runtime_StringIndexOf) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 3);
+  DCHECK(args.length() == 3);
 
   CONVERT_ARG_HANDLE_CHECKED(String, sub, 0);
   CONVERT_ARG_HANDLE_CHECKED(String, pat, 1);
@@ -4425,8 +4286,8 @@
                                 Vector<const pchar> pattern,
                                 int idx) {
   int pattern_length = pattern.length();
-  ASSERT(pattern_length >= 1);
-  ASSERT(idx + pattern_length <= subject.length());
+  DCHECK(pattern_length >= 1);
+  DCHECK(idx + pattern_length <= subject.length());
 
   if (sizeof(schar) == 1 && sizeof(pchar) > 1) {
     for (int i = 0; i < pattern_length; i++) {
@@ -4457,7 +4318,7 @@
 
 RUNTIME_FUNCTION(Runtime_StringLastIndexOf) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 3);
+  DCHECK(args.length() == 3);
 
   CONVERT_ARG_HANDLE_CHECKED(String, sub, 0);
   CONVERT_ARG_HANDLE_CHECKED(String, pat, 1);
@@ -4486,9 +4347,9 @@
   String::FlatContent sub_content = sub->GetFlatContent();
   String::FlatContent pat_content = pat->GetFlatContent();
 
-  if (pat_content.IsAscii()) {
+  if (pat_content.IsOneByte()) {
     Vector<const uint8_t> pat_vector = pat_content.ToOneByteVector();
-    if (sub_content.IsAscii()) {
+    if (sub_content.IsOneByte()) {
       position = StringMatchBackwards(sub_content.ToOneByteVector(),
                                       pat_vector,
                                       start_index);
@@ -4499,7 +4360,7 @@
     }
   } else {
     Vector<const uc16> pat_vector = pat_content.ToUC16Vector();
-    if (sub_content.IsAscii()) {
+    if (sub_content.IsOneByte()) {
       position = StringMatchBackwards(sub_content.ToOneByteVector(),
                                       pat_vector,
                                       start_index);
@@ -4516,7 +4377,7 @@
 
 RUNTIME_FUNCTION(Runtime_StringLocaleCompare) {
   HandleScope handle_scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
 
   CONVERT_ARG_HANDLE_CHECKED(String, str1, 0);
   CONVERT_ARG_HANDLE_CHECKED(String, str2, 1);
@@ -4558,9 +4419,9 @@
 }
 
 
-RUNTIME_FUNCTION(RuntimeHidden_SubString) {
+RUNTIME_FUNCTION(Runtime_SubString) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 3);
+  DCHECK(args.length() == 3);
 
   CONVERT_ARG_HANDLE_CHECKED(String, string, 0);
   int start, end;
@@ -4586,9 +4447,17 @@
 }
 
 
+RUNTIME_FUNCTION(Runtime_InternalizeString) {
+  HandleScope handles(isolate);
+  RUNTIME_ASSERT(args.length() == 1);
+  CONVERT_ARG_HANDLE_CHECKED(String, string, 0);
+  return *isolate->factory()->InternalizeString(string);
+}
+
+
 RUNTIME_FUNCTION(Runtime_StringMatch) {
   HandleScope handles(isolate);
-  ASSERT(args.length() == 3);
+  DCHECK(args.length() == 3);
 
   CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
   CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 1);
@@ -4651,8 +4520,8 @@
     Handle<JSRegExp> regexp,
     Handle<JSArray> last_match_array,
     Handle<JSArray> result_array) {
-  ASSERT(subject->IsFlat());
-  ASSERT_NE(has_capture, regexp->CaptureCount() == 0);
+  DCHECK(subject->IsFlat());
+  DCHECK_NE(has_capture, regexp->CaptureCount() == 0);
 
   int capture_count = regexp->CaptureCount();
   int subject_length = subject->length();
@@ -4686,7 +4555,7 @@
   if (global_cache.HasException()) return isolate->heap()->exception();
 
   // Ensured in Runtime_RegExpExecMultiple.
-  ASSERT(result_array->HasFastObjectElements());
+  DCHECK(result_array->HasFastObjectElements());
   Handle<FixedArray> result_elements(
       FixedArray::cast(result_array->elements()));
   if (result_elements->length() < 16) {
@@ -4740,12 +4609,12 @@
           int start = current_match[i * 2];
           if (start >= 0) {
             int end = current_match[i * 2 + 1];
-            ASSERT(start <= end);
+            DCHECK(start <= end);
             Handle<String> substring =
                 isolate->factory()->NewSubString(subject, start, end);
             elements->set(i, *substring);
           } else {
-            ASSERT(current_match[i * 2 + 1] < 0);
+            DCHECK(current_match[i * 2 + 1] < 0);
             elements->set(i, isolate->heap()->undefined_value());
           }
         }
@@ -4797,7 +4666,7 @@
 // set any other last match array info.
 RUNTIME_FUNCTION(Runtime_RegExpExecMultiple) {
   HandleScope handles(isolate);
-  ASSERT(args.length() == 4);
+  DCHECK(args.length() == 4);
 
   CONVERT_ARG_HANDLE_CHECKED(String, subject, 1);
   CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
@@ -4821,7 +4690,7 @@
 
 RUNTIME_FUNCTION(Runtime_NumberToRadixString) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
   CONVERT_SMI_ARG_CHECKED(radix, 1);
   RUNTIME_ASSERT(2 <= radix && radix <= 36);
 
@@ -4856,7 +4725,7 @@
 
 RUNTIME_FUNCTION(Runtime_NumberToFixed) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
 
   CONVERT_DOUBLE_ARG_CHECKED(value, 0);
   CONVERT_DOUBLE_ARG_CHECKED(f_number, 1);
@@ -4873,7 +4742,7 @@
 
 RUNTIME_FUNCTION(Runtime_NumberToExponential) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
 
   CONVERT_DOUBLE_ARG_CHECKED(value, 0);
   CONVERT_DOUBLE_ARG_CHECKED(f_number, 1);
@@ -4889,7 +4758,7 @@
 
 RUNTIME_FUNCTION(Runtime_NumberToPrecision) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
 
   CONVERT_DOUBLE_ARG_CHECKED(value, 0);
   CONVERT_DOUBLE_ARG_CHECKED(f_number, 1);
@@ -4905,7 +4774,7 @@
 
 RUNTIME_FUNCTION(Runtime_IsValidSmi) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
 
   CONVERT_NUMBER_CHECKED(int32_t, number, Int32, args[0]);
   return isolate->heap()->ToBoolean(Smi::IsValid(number));
@@ -4943,8 +4812,9 @@
 
   Handle<Object> result;
   if (object->IsString() || object->IsNumber() || object->IsBoolean()) {
-    Handle<Object> proto(object->GetPrototype(isolate), isolate);
-    return Object::GetElement(isolate, proto, index);
+    PrototypeIterator iter(isolate, object);
+    return Object::GetElement(isolate, PrototypeIterator::GetCurrent(iter),
+                              index);
   } else {
     return Object::GetElement(isolate, object, index);
   }
@@ -4967,17 +4837,21 @@
 MaybeHandle<Object> Runtime::HasObjectProperty(Isolate* isolate,
                                                Handle<JSReceiver> object,
                                                Handle<Object> key) {
+  Maybe<bool> maybe;
   // Check if the given key is an array index.
   uint32_t index;
   if (key->ToArrayIndex(&index)) {
-    return isolate->factory()->ToBoolean(JSReceiver::HasElement(object, index));
+    maybe = JSReceiver::HasElement(object, index);
+  } else {
+    // Convert the key to a name - possibly by calling back into JavaScript.
+    Handle<Name> name;
+    ASSIGN_RETURN_ON_EXCEPTION(isolate, name, ToName(isolate, key), Object);
+
+    maybe = JSReceiver::HasProperty(object, name);
   }
 
-  // Convert the key to a name - possibly by calling back into JavaScript.
-  Handle<Name> name;
-  ASSIGN_RETURN_ON_EXCEPTION(isolate, name, ToName(isolate, key), Object);
-
-  return isolate->factory()->ToBoolean(JSReceiver::HasProperty(object, name));
+  if (!maybe.has_value) return MaybeHandle<Object>();
+  return isolate->factory()->ToBoolean(maybe.value);
 }
 
 
@@ -4986,9 +4860,9 @@
                                                Handle<Object> key) {
   if (object->IsUndefined() || object->IsNull()) {
     Handle<Object> args[2] = { key, object };
-    return isolate->Throw<Object>(
-        isolate->factory()->NewTypeError("non_object_property_load",
-                                         HandleVector(args, 2)));
+    THROW_NEW_ERROR(isolate, NewTypeError("non_object_property_load",
+                                          HandleVector(args, 2)),
+                    Object);
   }
 
   // Check if the given key is an array index.
@@ -5013,7 +4887,7 @@
 
 RUNTIME_FUNCTION(Runtime_GetProperty) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
 
   CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
@@ -5028,7 +4902,7 @@
 // KeyedGetProperty is called from KeyedLoadIC::GenerateGeneric.
 RUNTIME_FUNCTION(Runtime_KeyedGetProperty) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
 
   CONVERT_ARG_HANDLE_CHECKED(Object, receiver_obj, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, key_obj, 1);
@@ -5058,26 +4932,23 @@
         int index = keyed_lookup_cache->Lookup(receiver_map, key);
         if (index != -1) {
           // Doubles are not cached, so raw read the value.
-          Object* value = receiver->RawFastPropertyAt(
+          return receiver->RawFastPropertyAt(
               FieldIndex::ForKeyedLookupCacheIndex(*receiver_map, index));
-          return value->IsTheHole()
-              ? isolate->heap()->undefined_value()
-              : value;
         }
         // Lookup cache miss.  Perform lookup and update the cache if
         // appropriate.
-        LookupResult result(isolate);
-        receiver->LookupOwn(key, &result);
-        if (result.IsField()) {
-          FieldIndex field_index = result.GetFieldIndex();
+        LookupIterator it(receiver, key, LookupIterator::OWN);
+        if (it.state() == LookupIterator::DATA &&
+            it.property_details().type() == FIELD) {
+          FieldIndex field_index = it.GetFieldIndex();
           // Do not track double fields in the keyed lookup cache. Reading
           // double values requires boxing.
-          if (!result.representation().IsDouble()) {
+          if (!it.representation().IsDouble()) {
             keyed_lookup_cache->Update(receiver_map, key,
                 field_index.GetKeyedLookupCacheIndex());
           }
           AllowHeapAllocation allow_allocation;
-          return *JSObject::FastPropertyAt(receiver, result.representation(),
+          return *JSObject::FastPropertyAt(receiver, it.representation(),
                                            field_index);
         }
       } else {
@@ -5090,10 +4961,10 @@
           if (!receiver->IsGlobalObject()) return value;
           value = PropertyCell::cast(value)->value();
           if (!value->IsTheHole()) return value;
-          // If value is the hole do the general lookup.
+          // If value is the hole (meaning, absent) do the general lookup.
         }
       }
-    } else if (FLAG_smi_only_arrays && key_obj->IsSmi()) {
+    } else if (key_obj->IsSmi()) {
       // JSObject without a name key. If the key is a Smi, check for a
       // definite out-of-bounds access to elements, which is a strong indicator
       // that subsequent accesses will also call the runtime. Proactively
@@ -5114,7 +4985,7 @@
               isolate, TransitionElements(js_object, elements_kind, isolate));
         }
       } else {
-        ASSERT(IsFastSmiOrObjectElementsKind(elements_kind) ||
+        DCHECK(IsFastSmiOrObjectElementsKind(elements_kind) ||
                !IsFastElementsKind(elements_kind));
       }
     }
@@ -5141,15 +5012,46 @@
 }
 
 
+// Transform getter or setter into something DefineAccessor can handle.
+static Handle<Object> InstantiateAccessorComponent(Isolate* isolate,
+                                                   Handle<Object> component) {
+  if (component->IsUndefined()) return isolate->factory()->undefined_value();
+  Handle<FunctionTemplateInfo> info =
+      Handle<FunctionTemplateInfo>::cast(component);
+  return Utils::OpenHandle(*Utils::ToLocal(info)->GetFunction());
+}
+
+
+RUNTIME_FUNCTION(Runtime_DefineApiAccessorProperty) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 5);
+  CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
+  CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
+  CONVERT_ARG_HANDLE_CHECKED(Object, getter, 2);
+  CONVERT_ARG_HANDLE_CHECKED(Object, setter, 3);
+  CONVERT_SMI_ARG_CHECKED(attribute, 4);
+  RUNTIME_ASSERT(getter->IsUndefined() || getter->IsFunctionTemplateInfo());
+  RUNTIME_ASSERT(setter->IsUndefined() || setter->IsFunctionTemplateInfo());
+  RUNTIME_ASSERT(PropertyDetails::AttributesField::is_valid(
+      static_cast<PropertyAttributes>(attribute)));
+  RETURN_FAILURE_ON_EXCEPTION(
+      isolate, JSObject::DefineAccessor(
+                   object, name, InstantiateAccessorComponent(isolate, getter),
+                   InstantiateAccessorComponent(isolate, setter),
+                   static_cast<PropertyAttributes>(attribute)));
+  return isolate->heap()->undefined_value();
+}
+
+
 // Implements part of 8.12.9 DefineOwnProperty.
 // There are 3 cases that lead here:
 // Step 4b - define a new accessor property.
 // Steps 9c & 12 - replace an existing data property with an accessor property.
 // Step 12 - update an existing accessor property with an accessor or generic
 //           descriptor.
-RUNTIME_FUNCTION(Runtime_DefineOrRedefineAccessorProperty) {
+RUNTIME_FUNCTION(Runtime_DefineAccessorPropertyUnchecked) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 5);
+  DCHECK(args.length() == 5);
   CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
   RUNTIME_ASSERT(!obj->IsNull());
   CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
@@ -5162,10 +5064,9 @@
   PropertyAttributes attr = static_cast<PropertyAttributes>(unchecked);
 
   bool fast = obj->HasFastProperties();
-  // DefineAccessor checks access rights.
-  JSObject::DefineAccessor(obj, name, getter, setter, attr);
-  RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
-  if (fast) JSObject::TransformToFastProperties(obj, 0);
+  RETURN_FAILURE_ON_EXCEPTION(
+      isolate, JSObject::DefineAccessor(obj, name, getter, setter, attr));
+  if (fast) JSObject::MigrateSlowToFast(obj, 0);
   return isolate->heap()->undefined_value();
 }
 
@@ -5176,9 +5077,9 @@
 // Steps 9b & 12 - replace an existing accessor property with a data property.
 // Step 12 - update an existing data property with a data or generic
 //           descriptor.
-RUNTIME_FUNCTION(Runtime_DefineOrRedefineDataProperty) {
+RUNTIME_FUNCTION(Runtime_DefineDataPropertyUnchecked) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 4);
+  DCHECK(args.length() == 4);
   CONVERT_ARG_HANDLE_CHECKED(JSObject, js_object, 0);
   CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
   CONVERT_ARG_HANDLE_CHECKED(Object, obj_value, 2);
@@ -5186,36 +5087,17 @@
   RUNTIME_ASSERT((unchecked & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
   PropertyAttributes attr = static_cast<PropertyAttributes>(unchecked);
 
-  // Check access rights if needed.
-  if (js_object->IsAccessCheckNeeded() &&
-      !isolate->MayNamedAccess(js_object, name, v8::ACCESS_SET)) {
-    return isolate->heap()->undefined_value();
+  LookupIterator it(js_object, name, LookupIterator::OWN_SKIP_INTERCEPTOR);
+  if (it.IsFound() && it.state() == LookupIterator::ACCESS_CHECK) {
+    if (!isolate->MayNamedAccess(js_object, name, v8::ACCESS_SET)) {
+      return isolate->heap()->undefined_value();
+    }
+    it.Next();
   }
 
-  LookupResult lookup(isolate);
-  js_object->LookupOwnRealNamedProperty(name, &lookup);
-
   // Take special care when attributes are different and there is already
-  // a property. For simplicity we normalize the property which enables us
-  // to not worry about changing the instance_descriptor and creating a new
-  // map. The current version of SetObjectProperty does not handle attributes
-  // correctly in the case where a property is a field and is reset with
-  // new attributes.
-  if (lookup.IsFound() &&
-      (attr != lookup.GetAttributes() || lookup.IsPropertyCallbacks())) {
-    // New attributes - normalize to avoid writing to instance descriptor
-    if (js_object->IsJSGlobalProxy()) {
-      // Since the result is a property, the prototype will exist so
-      // we don't have to check for null.
-      js_object = Handle<JSObject>(JSObject::cast(js_object->GetPrototype()));
-    }
-
-    if (attr != lookup.GetAttributes() ||
-        (lookup.IsPropertyCallbacks() &&
-         !lookup.GetCallbackObject()->IsAccessorInfo())) {
-      JSObject::NormalizeProperties(js_object, CLEAR_INOBJECT_PROPERTIES, 0);
-    }
-
+  // a property.
+  if (it.state() == LookupIterator::ACCESSOR) {
     // Use IgnoreAttributes version since a readonly property may be
     // overridden and SetProperty does not allow this.
     Handle<Object> result;
@@ -5223,10 +5105,6 @@
         isolate, result,
         JSObject::SetOwnPropertyIgnoreAttributes(
             js_object, name, obj_value, attr,
-            Object::OPTIMAL_REPRESENTATION,
-            ALLOW_AS_CONSTANT,
-            JSReceiver::PERFORM_EXTENSIBILITY_CHECK,
-            JSReceiver::MAY_BE_STORE_FROM_KEYED,
             JSObject::DONT_FORCE_FIELD));
     return *result;
   }
@@ -5234,9 +5112,7 @@
   Handle<Object> result;
   ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
       isolate, result,
-      Runtime::ForceSetObjectProperty(
-          js_object, name, obj_value, attr,
-          JSReceiver::CERTAINLY_NOT_STORE_FROM_KEYED));
+      Runtime::DefineObjectProperty(js_object, name, obj_value, attr));
   return *result;
 }
 
@@ -5244,7 +5120,7 @@
 // Return property without being observable by accessors or interceptors.
 RUNTIME_FUNCTION(Runtime_GetDataProperty) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
   CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
   CONVERT_ARG_HANDLE_CHECKED(Name, key, 1);
   return *JSObject::GetDataProperty(object, key);
@@ -5255,16 +5131,12 @@
                                                Handle<Object> object,
                                                Handle<Object> key,
                                                Handle<Object> value,
-                                               PropertyAttributes attr,
                                                StrictMode strict_mode) {
-  SetPropertyMode set_mode = attr == NONE ? SET_PROPERTY : DEFINE_PROPERTY;
-
   if (object->IsUndefined() || object->IsNull()) {
     Handle<Object> args[2] = { key, object };
-    Handle<Object> error =
-        isolate->factory()->NewTypeError("non_object_property_store",
-                                         HandleVector(args, 2));
-    return isolate->Throw<Object>(error);
+    THROW_NEW_ERROR(isolate, NewTypeError("non_object_property_store",
+                                          HandleVector(args, 2)),
+                    Object);
   }
 
   if (object->IsJSProxy()) {
@@ -5276,19 +5148,17 @@
           isolate, name_object, Execution::ToString(isolate, key), Object);
     }
     Handle<Name> name = Handle<Name>::cast(name_object);
-    return JSReceiver::SetProperty(Handle<JSProxy>::cast(object), name, value,
-                                   attr,
-                                   strict_mode);
+    return Object::SetProperty(Handle<JSProxy>::cast(object), name, value,
+                               strict_mode);
   }
 
-  // If the object isn't a JavaScript object, we ignore the store.
-  if (!object->IsJSObject()) return value;
-
-  Handle<JSObject> js_object = Handle<JSObject>::cast(object);
-
   // Check if the given key is an array index.
   uint32_t index;
   if (key->ToArrayIndex(&index)) {
+    // TODO(verwaest): Support non-JSObject receivers.
+    if (!object->IsJSObject()) return value;
+    Handle<JSObject> js_object = Handle<JSObject>::cast(object);
+
     // In Firefox/SpiderMonkey, Safari and Opera you can access the characters
     // of a string using [] notation.  We need to support this too in
     // JavaScript.
@@ -5310,7 +5180,7 @@
     }
 
     MaybeHandle<Object> result = JSObject::SetElement(
-        js_object, index, value, attr, strict_mode, true, set_mode);
+        js_object, index, value, NONE, strict_mode, true, SET_PROPERTY);
     JSObject::ValidateElements(js_object);
 
     return result.is_null() ? result : value;
@@ -5319,17 +5189,20 @@
   if (key->IsName()) {
     Handle<Name> name = Handle<Name>::cast(key);
     if (name->AsArrayIndex(&index)) {
+      // TODO(verwaest): Support non-JSObject receivers.
+      if (!object->IsJSObject()) return value;
+      Handle<JSObject> js_object = Handle<JSObject>::cast(object);
       if (js_object->HasExternalArrayElements()) {
         if (!value->IsNumber() && !value->IsUndefined()) {
           ASSIGN_RETURN_ON_EXCEPTION(
               isolate, value, Execution::ToNumber(isolate, value), Object);
         }
       }
-      return JSObject::SetElement(js_object, index, value, attr,
-                                  strict_mode, true, set_mode);
+      return JSObject::SetElement(js_object, index, value, NONE, strict_mode,
+                                  true, SET_PROPERTY);
     } else {
       if (name->IsString()) name = String::Flatten(Handle<String>::cast(name));
-      return JSReceiver::SetProperty(js_object, name, value, attr, strict_mode);
+      return Object::SetProperty(object, name, value, strict_mode);
     }
   }
 
@@ -5340,20 +5213,20 @@
   Handle<String> name = Handle<String>::cast(converted);
 
   if (name->AsArrayIndex(&index)) {
-    return JSObject::SetElement(js_object, index, value, attr,
-                                strict_mode, true, set_mode);
-  } else {
-    return JSReceiver::SetProperty(js_object, name, value, attr, strict_mode);
+    // TODO(verwaest): Support non-JSObject receivers.
+    if (!object->IsJSObject()) return value;
+    Handle<JSObject> js_object = Handle<JSObject>::cast(object);
+    return JSObject::SetElement(js_object, index, value, NONE, strict_mode,
+                                true, SET_PROPERTY);
   }
+  return Object::SetProperty(object, name, value, strict_mode);
 }
 
 
-MaybeHandle<Object> Runtime::ForceSetObjectProperty(
-    Handle<JSObject> js_object,
-    Handle<Object> key,
-    Handle<Object> value,
-    PropertyAttributes attr,
-    JSReceiver::StoreFromKeyed store_from_keyed) {
+MaybeHandle<Object> Runtime::DefineObjectProperty(Handle<JSObject> js_object,
+                                                  Handle<Object> key,
+                                                  Handle<Object> value,
+                                                  PropertyAttributes attr) {
   Isolate* isolate = js_object->GetIsolate();
   // Check if the given key is an array index.
   uint32_t index;
@@ -5380,10 +5253,8 @@
                                   SLOPPY, false, DEFINE_PROPERTY);
     } else {
       if (name->IsString()) name = String::Flatten(Handle<String>::cast(name));
-      return JSObject::SetOwnPropertyIgnoreAttributes(
-          js_object, name, value, attr, Object::OPTIMAL_REPRESENTATION,
-          ALLOW_AS_CONSTANT, JSReceiver::PERFORM_EXTENSIBILITY_CHECK,
-          store_from_keyed);
+      return JSObject::SetOwnPropertyIgnoreAttributes(js_object, name, value,
+                                                      attr);
     }
   }
 
@@ -5397,10 +5268,8 @@
     return JSObject::SetElement(js_object, index, value, attr,
                                 SLOPPY, false, DEFINE_PROPERTY);
   } else {
-    return JSObject::SetOwnPropertyIgnoreAttributes(
-        js_object, name, value, attr, Object::OPTIMAL_REPRESENTATION,
-        ALLOW_AS_CONSTANT, JSReceiver::PERFORM_EXTENSIBILITY_CHECK,
-        store_from_keyed);
+    return JSObject::SetOwnPropertyIgnoreAttributes(js_object, name, value,
+                                                    attr);
   }
 }
 
@@ -5453,11 +5322,42 @@
 }
 
 
-RUNTIME_FUNCTION(Runtime_SetProperty) {
+RUNTIME_FUNCTION(Runtime_AddNamedProperty) {
   HandleScope scope(isolate);
-  RUNTIME_ASSERT(args.length() == 4 || args.length() == 5);
+  RUNTIME_ASSERT(args.length() == 4);
 
-  CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+  CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
+  CONVERT_ARG_HANDLE_CHECKED(Name, key, 1);
+  CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
+  CONVERT_SMI_ARG_CHECKED(unchecked_attributes, 3);
+  RUNTIME_ASSERT(
+      (unchecked_attributes & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
+  // Compute attributes.
+  PropertyAttributes attributes =
+      static_cast<PropertyAttributes>(unchecked_attributes);
+
+#ifdef DEBUG
+  uint32_t index = 0;
+  DCHECK(!key->ToArrayIndex(&index));
+  LookupIterator it(object, key, LookupIterator::OWN_SKIP_INTERCEPTOR);
+  Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
+  if (!maybe.has_value) return isolate->heap()->exception();
+  RUNTIME_ASSERT(!it.IsFound());
+#endif
+
+  Handle<Object> result;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, result,
+      JSObject::SetOwnPropertyIgnoreAttributes(object, key, value, attributes));
+  return *result;
+}
+
+
+RUNTIME_FUNCTION(Runtime_AddPropertyForTemplate) {
+  HandleScope scope(isolate);
+  RUNTIME_ASSERT(args.length() == 4);
+
+  CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
   CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
   CONVERT_SMI_ARG_CHECKED(unchecked_attributes, 3);
@@ -5467,17 +5367,78 @@
   PropertyAttributes attributes =
       static_cast<PropertyAttributes>(unchecked_attributes);
 
-  StrictMode strict_mode = SLOPPY;
-  if (args.length() == 5) {
-    CONVERT_STRICT_MODE_ARG_CHECKED(strict_mode_arg, 4);
-    strict_mode = strict_mode_arg;
+#ifdef DEBUG
+  bool duplicate;
+  if (key->IsName()) {
+    LookupIterator it(object, Handle<Name>::cast(key),
+                      LookupIterator::OWN_SKIP_INTERCEPTOR);
+    Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
+    DCHECK(maybe.has_value);
+    duplicate = it.IsFound();
+  } else {
+    uint32_t index = 0;
+    RUNTIME_ASSERT(key->ToArrayIndex(&index));
+    Maybe<bool> maybe = JSReceiver::HasOwnElement(object, index);
+    if (!maybe.has_value) return isolate->heap()->exception();
+    duplicate = maybe.value;
   }
+  if (duplicate) {
+    Handle<Object> args[1] = { key };
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate,
+        NewTypeError("duplicate_template_property", HandleVector(args, 1)));
+  }
+#endif
 
   Handle<Object> result;
   ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
       isolate, result,
-      Runtime::SetObjectProperty(
-          isolate, object, key, value, attributes, strict_mode));
+      Runtime::DefineObjectProperty(object, key, value, attributes));
+  return *result;
+}
+
+
+RUNTIME_FUNCTION(Runtime_SetProperty) {
+  HandleScope scope(isolate);
+  RUNTIME_ASSERT(args.length() == 4);
+
+  CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+  CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
+  CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
+  CONVERT_STRICT_MODE_ARG_CHECKED(strict_mode_arg, 3);
+  StrictMode strict_mode = strict_mode_arg;
+
+  Handle<Object> result;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, result,
+      Runtime::SetObjectProperty(isolate, object, key, value, strict_mode));
+  return *result;
+}
+
+
+// Adds an element to an array.
+// This is used to create an indexed data property into an array.
+RUNTIME_FUNCTION(Runtime_AddElement) {
+  HandleScope scope(isolate);
+  RUNTIME_ASSERT(args.length() == 4);
+
+  CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
+  CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
+  CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
+  CONVERT_SMI_ARG_CHECKED(unchecked_attributes, 3);
+  RUNTIME_ASSERT(
+      (unchecked_attributes & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
+  // Compute attributes.
+  PropertyAttributes attributes =
+      static_cast<PropertyAttributes>(unchecked_attributes);
+
+  uint32_t index = 0;
+  key->ToArrayIndex(&index);
+
+  Handle<Object> result;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, result, JSObject::SetElement(object, index, value, attributes,
+                                            SLOPPY, false, DEFINE_PROPERTY));
   return *result;
 }
 
@@ -5541,12 +5502,12 @@
   }
   Handle<JSArray> boilerplate_object(boilerplate);
   ElementsKind elements_kind = object->GetElementsKind();
-  ASSERT(IsFastElementsKind(elements_kind));
+  DCHECK(IsFastElementsKind(elements_kind));
   // Smis should never trigger transitions.
-  ASSERT(!value->IsSmi());
+  DCHECK(!value->IsSmi());
 
   if (value->IsNumber()) {
-    ASSERT(IsFastSmiElementsKind(elements_kind));
+    DCHECK(IsFastSmiElementsKind(elements_kind));
     ElementsKind transitioned_kind = IsFastHoleyElementsKind(elements_kind)
         ? FAST_HOLEY_DOUBLE_ELEMENTS
         : FAST_DOUBLE_ELEMENTS;
@@ -5556,7 +5517,7 @@
       JSObject::TransitionElementsKind(boilerplate_object, transitioned_kind);
     }
     JSObject::TransitionElementsKind(object, transitioned_kind);
-    ASSERT(IsFastDoubleElementsKind(object->GetElementsKind()));
+    DCHECK(IsFastDoubleElementsKind(object->GetElementsKind()));
     FixedDoubleArray* double_array = FixedDoubleArray::cast(object->elements());
     HeapNumber* number = HeapNumber::cast(*value);
     double_array->set(store_index, number->Number());
@@ -5583,7 +5544,7 @@
 // Check whether debugger and is about to step into the callback that is passed
 // to a built-in function such as Array.forEach.
 RUNTIME_FUNCTION(Runtime_DebugCallbackSupportsStepping) {
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   if (!isolate->debug()->is_active() || !isolate->debug()->StepInActive()) {
     return isolate->heap()->false_value();
   }
@@ -5597,69 +5558,77 @@
 // Set one shot breakpoints for the callback function that is passed to a
 // built-in function such as Array.forEach to enable stepping into the callback.
 RUNTIME_FUNCTION(Runtime_DebugPrepareStepInIfStepping) {
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   Debug* debug = isolate->debug();
   if (!debug->IsStepping()) return isolate->heap()->undefined_value();
-  CONVERT_ARG_HANDLE_CHECKED(JSFunction, callback, 0);
+
   HandleScope scope(isolate);
-  // When leaving the callback, step out has been activated, but not performed
-  // if we do not leave the builtin.  To be able to step into the callback
+  CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
+  RUNTIME_ASSERT(object->IsJSFunction() || object->IsJSGeneratorObject());
+  Handle<JSFunction> fun;
+  if (object->IsJSFunction()) {
+    fun = Handle<JSFunction>::cast(object);
+  } else {
+    fun = Handle<JSFunction>(
+        Handle<JSGeneratorObject>::cast(object)->function(), isolate);
+  }
+  // When leaving the function, step out has been activated, but not performed
+  // if we do not leave the builtin.  To be able to step into the function
   // again, we need to clear the step out at this point.
   debug->ClearStepOut();
-  debug->FloodWithOneShot(callback);
+  debug->FloodWithOneShot(fun);
   return isolate->heap()->undefined_value();
 }
 
 
-// The argument is a closure that is kept until the epilogue is called.
-// On exception, the closure is called, which returns the promise if the
-// exception is considered uncaught, or undefined otherwise.
-RUNTIME_FUNCTION(Runtime_DebugPromiseHandlePrologue) {
-  ASSERT(args.length() == 1);
+RUNTIME_FUNCTION(Runtime_DebugPushPromise) {
+  DCHECK(args.length() == 1);
   HandleScope scope(isolate);
-  CONVERT_ARG_HANDLE_CHECKED(JSFunction, promise_getter, 0);
-  isolate->debug()->PromiseHandlePrologue(promise_getter);
+  CONVERT_ARG_HANDLE_CHECKED(JSObject, promise, 0);
+  isolate->PushPromise(promise);
   return isolate->heap()->undefined_value();
 }
 
 
-RUNTIME_FUNCTION(Runtime_DebugPromiseHandleEpilogue) {
-  ASSERT(args.length() == 0);
+RUNTIME_FUNCTION(Runtime_DebugPopPromise) {
+  DCHECK(args.length() == 0);
   SealHandleScope shs(isolate);
-  isolate->debug()->PromiseHandleEpilogue();
+  isolate->PopPromise();
   return isolate->heap()->undefined_value();
 }
 
 
-// Set an own property, even if it is READ_ONLY.  If the property does not
-// exist, it will be added with attributes NONE.
-RUNTIME_FUNCTION(Runtime_IgnoreAttributesAndSetProperty) {
+RUNTIME_FUNCTION(Runtime_DebugPromiseEvent) {
+  DCHECK(args.length() == 1);
   HandleScope scope(isolate);
-  RUNTIME_ASSERT(args.length() == 3 || args.length() == 4);
-  CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
-  CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
-  CONVERT_ARG_HANDLE_CHECKED(Object, value, 2);
-  // Compute attributes.
-  PropertyAttributes attributes = NONE;
-  if (args.length() == 4) {
-    CONVERT_SMI_ARG_CHECKED(unchecked_value, 3);
-    // Only attribute bits should be set.
-    RUNTIME_ASSERT(
-        (unchecked_value & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
-    attributes = static_cast<PropertyAttributes>(unchecked_value);
-  }
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result,
-      JSObject::SetOwnPropertyIgnoreAttributes(
-          object, name, value, attributes));
-  return *result;
+  CONVERT_ARG_HANDLE_CHECKED(JSObject, data, 0);
+  isolate->debug()->OnPromiseEvent(data);
+  return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(Runtime_DebugPromiseRejectEvent) {
+  DCHECK(args.length() == 2);
+  HandleScope scope(isolate);
+  CONVERT_ARG_HANDLE_CHECKED(JSObject, promise, 0);
+  CONVERT_ARG_HANDLE_CHECKED(Object, value, 1);
+  isolate->debug()->OnPromiseReject(promise, value);
+  return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(Runtime_DebugAsyncTaskEvent) {
+  DCHECK(args.length() == 1);
+  HandleScope scope(isolate);
+  CONVERT_ARG_HANDLE_CHECKED(JSObject, data, 0);
+  isolate->debug()->OnAsyncTaskEvent(data);
+  return isolate->heap()->undefined_value();
 }
 
 
 RUNTIME_FUNCTION(Runtime_DeleteProperty) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 3);
+  DCHECK(args.length() == 3);
   CONVERT_ARG_HANDLE_CHECKED(JSReceiver, object, 0);
   CONVERT_ARG_HANDLE_CHECKED(Name, key, 1);
   CONVERT_STRICT_MODE_ARG_CHECKED(strict_mode, 2);
@@ -5676,18 +5645,22 @@
 static Object* HasOwnPropertyImplementation(Isolate* isolate,
                                             Handle<JSObject> object,
                                             Handle<Name> key) {
-  if (JSReceiver::HasOwnProperty(object, key)) {
-    return isolate->heap()->true_value();
-  }
+  Maybe<bool> maybe = JSReceiver::HasOwnProperty(object, key);
+  if (!maybe.has_value) return isolate->heap()->exception();
+  if (maybe.value) return isolate->heap()->true_value();
   // Handle hidden prototypes.  If there's a hidden prototype above this thing
   // then we have to check it for properties, because they are supposed to
   // look like they are on this object.
-  Handle<Object> proto(object->GetPrototype(), isolate);
-  if (proto->IsJSObject() &&
-      Handle<JSObject>::cast(proto)->map()->is_hidden_prototype()) {
-    return HasOwnPropertyImplementation(isolate,
-                                        Handle<JSObject>::cast(proto),
-                                        key);
+  PrototypeIterator iter(isolate, object);
+  if (!iter.IsAtEnd() &&
+      Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter))
+          ->map()
+          ->is_hidden_prototype()) {
+    // TODO(verwaest): The recursion is not necessary for keys that are array
+    // indices. Removing this.
+    return HasOwnPropertyImplementation(
+        isolate, Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)),
+        key);
   }
   RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
   return isolate->heap()->false_value();
@@ -5696,7 +5669,7 @@
 
 RUNTIME_FUNCTION(Runtime_HasOwnProperty) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
   CONVERT_ARG_HANDLE_CHECKED(Object, object, 0)
   CONVERT_ARG_HANDLE_CHECKED(Name, key, 1);
 
@@ -5709,11 +5682,11 @@
     // Fast case: either the key is a real named property or it is not
     // an array index and there are no interceptors or hidden
     // prototypes.
-    if (JSObject::HasRealNamedProperty(js_obj, key)) {
-      ASSERT(!isolate->has_scheduled_exception());
+    Maybe<bool> maybe = JSObject::HasRealNamedProperty(js_obj, key);
+    if (!maybe.has_value) return isolate->heap()->exception();
+    DCHECK(!isolate->has_pending_exception());
+    if (maybe.value) {
       return isolate->heap()->true_value();
-    } else {
-      RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
     }
     Map* map = js_obj->map();
     if (!key_is_array_index &&
@@ -5738,49 +5711,46 @@
 
 RUNTIME_FUNCTION(Runtime_HasProperty) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
   CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
   CONVERT_ARG_HANDLE_CHECKED(Name, key, 1);
 
-  bool result = JSReceiver::HasProperty(receiver, key);
-  RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
-  if (isolate->has_pending_exception()) return isolate->heap()->exception();
-  return isolate->heap()->ToBoolean(result);
+  Maybe<bool> maybe = JSReceiver::HasProperty(receiver, key);
+  if (!maybe.has_value) return isolate->heap()->exception();
+  return isolate->heap()->ToBoolean(maybe.value);
 }
 
 
 RUNTIME_FUNCTION(Runtime_HasElement) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
   CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
   CONVERT_SMI_ARG_CHECKED(index, 1);
 
-  bool result = JSReceiver::HasElement(receiver, index);
-  RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
-  return isolate->heap()->ToBoolean(result);
+  Maybe<bool> maybe = JSReceiver::HasElement(receiver, index);
+  if (!maybe.has_value) return isolate->heap()->exception();
+  return isolate->heap()->ToBoolean(maybe.value);
 }
 
 
 RUNTIME_FUNCTION(Runtime_IsPropertyEnumerable) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
 
   CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
   CONVERT_ARG_HANDLE_CHECKED(Name, key, 1);
 
-  PropertyAttributes att = JSReceiver::GetOwnPropertyAttributes(object, key);
-  if (att == ABSENT || (att & DONT_ENUM) != 0) {
-    RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
-    return isolate->heap()->false_value();
-  }
-  ASSERT(!isolate->has_scheduled_exception());
-  return isolate->heap()->true_value();
+  Maybe<PropertyAttributes> maybe =
+      JSReceiver::GetOwnPropertyAttributes(object, key);
+  if (!maybe.has_value) return isolate->heap()->exception();
+  if (maybe.value == ABSENT) maybe.value = DONT_ENUM;
+  return isolate->heap()->ToBoolean((maybe.value & DONT_ENUM) == 0);
 }
 
 
 RUNTIME_FUNCTION(Runtime_GetPropertyNames) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(JSReceiver, object, 0);
   Handle<JSArray> result;
 
@@ -5800,7 +5770,7 @@
 // the check for deletions during a for-in.
 RUNTIME_FUNCTION(Runtime_GetPropertyNamesFast) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
 
   CONVERT_ARG_CHECKED(JSReceiver, raw_object, 0);
 
@@ -5825,11 +5795,9 @@
 // is prototype for.
 static int OwnPrototypeChainLength(JSObject* obj) {
   int count = 1;
-  Object* proto = obj->GetPrototype();
-  while (proto->IsJSObject() &&
-         JSObject::cast(proto)->map()->is_hidden_prototype()) {
+  for (PrototypeIterator iter(obj->GetIsolate(), obj);
+       !iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN); iter.Advance()) {
     count++;
-    proto = JSObject::cast(proto)->GetPrototype();
   }
   return count;
 }
@@ -5840,7 +5808,7 @@
 // args[1]: PropertyAttributes as int
 RUNTIME_FUNCTION(Runtime_GetOwnPropertyNames) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
   if (!args[0]->IsJSObject()) {
     return isolate->heap()->undefined_value();
   }
@@ -5859,7 +5827,8 @@
       RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
       return *isolate->factory()->NewJSArray(0);
     }
-    obj = Handle<JSObject>(JSObject::cast(obj->GetPrototype()));
+    PrototypeIterator iter(isolate, obj);
+    obj = Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter));
   }
 
   // Find the number of objects making up this.
@@ -5868,22 +5837,26 @@
   // Find the number of own properties for each of the objects.
   ScopedVector<int> own_property_count(length);
   int total_property_count = 0;
-  Handle<JSObject> jsproto = obj;
-  for (int i = 0; i < length; i++) {
-    // Only collect names if access is permitted.
-    if (jsproto->IsAccessCheckNeeded() &&
-        !isolate->MayNamedAccess(
-            jsproto, isolate->factory()->undefined_value(), v8::ACCESS_KEYS)) {
-      isolate->ReportFailedAccessCheck(jsproto, v8::ACCESS_KEYS);
-      RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
-      return *isolate->factory()->NewJSArray(0);
-    }
-    int n;
-    n = jsproto->NumberOfOwnProperties(filter);
-    own_property_count[i] = n;
-    total_property_count += n;
-    if (i < length - 1) {
-      jsproto = Handle<JSObject>(JSObject::cast(jsproto->GetPrototype()));
+  {
+    PrototypeIterator iter(isolate, obj, PrototypeIterator::START_AT_RECEIVER);
+    for (int i = 0; i < length; i++) {
+      DCHECK(!iter.IsAtEnd());
+      Handle<JSObject> jsproto =
+          Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter));
+      // Only collect names if access is permitted.
+      if (jsproto->IsAccessCheckNeeded() &&
+          !isolate->MayNamedAccess(jsproto,
+                                   isolate->factory()->undefined_value(),
+                                   v8::ACCESS_KEYS)) {
+        isolate->ReportFailedAccessCheck(jsproto, v8::ACCESS_KEYS);
+        RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
+        return *isolate->factory()->NewJSArray(0);
+      }
+      int n;
+      n = jsproto->NumberOfOwnProperties(filter);
+      own_property_count[i] = n;
+      total_property_count += n;
+      iter.Advance();
     }
   }
 
@@ -5892,39 +5865,41 @@
       isolate->factory()->NewFixedArray(total_property_count);
 
   // Get the property names.
-  jsproto = obj;
   int next_copy_index = 0;
   int hidden_strings = 0;
-  for (int i = 0; i < length; i++) {
-    jsproto->GetOwnPropertyNames(*names, next_copy_index, filter);
-    if (i > 0) {
-      // Names from hidden prototypes may already have been added
-      // for inherited function template instances. Count the duplicates
-      // and stub them out; the final copy pass at the end ignores holes.
-      for (int j = next_copy_index;
-           j < next_copy_index + own_property_count[i];
-           j++) {
-        Object* name_from_hidden_proto = names->get(j);
-        for (int k = 0; k < next_copy_index; k++) {
-          if (names->get(k) != isolate->heap()->hidden_string()) {
-            Object* name = names->get(k);
-            if (name_from_hidden_proto == name) {
-              names->set(j, isolate->heap()->hidden_string());
-              hidden_strings++;
-              break;
+  {
+    PrototypeIterator iter(isolate, obj, PrototypeIterator::START_AT_RECEIVER);
+    for (int i = 0; i < length; i++) {
+      DCHECK(!iter.IsAtEnd());
+      Handle<JSObject> jsproto =
+          Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter));
+      jsproto->GetOwnPropertyNames(*names, next_copy_index, filter);
+      if (i > 0) {
+        // Names from hidden prototypes may already have been added
+        // for inherited function template instances. Count the duplicates
+        // and stub them out; the final copy pass at the end ignores holes.
+        for (int j = next_copy_index;
+             j < next_copy_index + own_property_count[i]; j++) {
+          Object* name_from_hidden_proto = names->get(j);
+          for (int k = 0; k < next_copy_index; k++) {
+            if (names->get(k) != isolate->heap()->hidden_string()) {
+              Object* name = names->get(k);
+              if (name_from_hidden_proto == name) {
+                names->set(j, isolate->heap()->hidden_string());
+                hidden_strings++;
+                break;
+              }
             }
           }
         }
       }
-    }
-    next_copy_index += own_property_count[i];
+      next_copy_index += own_property_count[i];
 
-    // Hidden properties only show up if the filter does not skip strings.
-    if ((filter & STRING) == 0 && JSObject::HasHiddenProperties(jsproto)) {
-      hidden_strings++;
-    }
-    if (i < length - 1) {
-      jsproto = Handle<JSObject>(JSObject::cast(jsproto->GetPrototype()));
+      // Hidden properties only show up if the filter does not skip strings.
+      if ((filter & STRING) == 0 && JSObject::HasHiddenProperties(jsproto)) {
+        hidden_strings++;
+      }
+      iter.Advance();
     }
   }
 
@@ -5943,7 +5918,7 @@
       }
       names->set(dest_pos++, name);
     }
-    ASSERT_EQ(0, hidden_strings);
+    DCHECK_EQ(0, hidden_strings);
   }
 
   return *isolate->factory()->NewJSArrayWithElements(names);
@@ -5954,7 +5929,7 @@
 // args[0]: object
 RUNTIME_FUNCTION(Runtime_GetOwnElementNames) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   if (!args[0]->IsJSObject()) {
     return isolate->heap()->undefined_value();
   }
@@ -5971,7 +5946,7 @@
 // args[0]: object
 RUNTIME_FUNCTION(Runtime_GetInterceptorInfo) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   if (!args[0]->IsJSObject()) {
     return Smi::FromInt(0);
   }
@@ -5989,7 +5964,7 @@
 // args[0]: object
 RUNTIME_FUNCTION(Runtime_GetNamedInterceptorPropertyNames) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
 
   if (obj->HasNamedInterceptor()) {
@@ -6006,7 +5981,7 @@
 // args[0]: object
 RUNTIME_FUNCTION(Runtime_GetIndexedInterceptorElementNames) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
 
   if (obj->HasIndexedInterceptor()) {
@@ -6021,7 +5996,7 @@
 
 RUNTIME_FUNCTION(Runtime_OwnKeys) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_CHECKED(JSObject, raw_object, 0);
   Handle<JSObject> object(raw_object);
 
@@ -6035,10 +6010,10 @@
       return *isolate->factory()->NewJSArray(0);
     }
 
-    Handle<Object> proto(object->GetPrototype(), isolate);
+    PrototypeIterator iter(isolate, object);
     // If proxy is detached we simply return an empty array.
-    if (proto->IsNull()) return *isolate->factory()->NewJSArray(0);
-    object = Handle<JSObject>::cast(proto);
+    if (iter.IsAtEnd()) return *isolate->factory()->NewJSArray(0);
+    object = Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter));
   }
 
   Handle<FixedArray> contents;
@@ -6056,7 +6031,7 @@
     if (entry->IsString()) {
       copy->set(i, entry);
     } else {
-      ASSERT(entry->IsNumber());
+      DCHECK(entry->IsNumber());
       HandleScope scope(isolate);
       Handle<Object> entry_handle(entry, isolate);
       Handle<Object> entry_str =
@@ -6070,7 +6045,7 @@
 
 RUNTIME_FUNCTION(Runtime_GetArgumentsProperty) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(Object, raw_key, 0);
 
   // Compute the frame holding the arguments.
@@ -6090,6 +6065,10 @@
 
   HandleScope scope(isolate);
   if (raw_key->IsSymbol()) {
+    Handle<Symbol> symbol = Handle<Symbol>::cast(raw_key);
+    if (symbol->Equals(isolate->native_context()->iterator_symbol())) {
+      return isolate->native_context()->array_values_iterator();
+    }
     // Lookup in the initial Object.prototype object.
     Handle<Object> result;
     ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
@@ -6126,8 +6105,9 @@
   if (String::Equals(isolate->factory()->callee_string(), key)) {
     JSFunction* function = frame->function();
     if (function->shared()->strict_mode() == STRICT) {
-      return isolate->Throw(*isolate->factory()->NewTypeError(
-          "strict_arguments_callee", HandleVector<Object>(NULL, 0)));
+      THROW_NEW_ERROR_RETURN_FAILURE(
+          isolate, NewTypeError("strict_arguments_callee",
+                                HandleVector<Object>(NULL, 0)));
     }
     return function;
   }
@@ -6143,10 +6123,10 @@
 
 RUNTIME_FUNCTION(Runtime_ToFastProperties) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
   if (object->IsJSObject() && !object->IsGlobalObject()) {
-    JSObject::TransformToFastProperties(Handle<JSObject>::cast(object), 0);
+    JSObject::MigrateSlowToFast(Handle<JSObject>::cast(object), 0);
   }
   return *object;
 }
@@ -6154,7 +6134,7 @@
 
 RUNTIME_FUNCTION(Runtime_ToBool) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_CHECKED(Object, object, 0);
 
   return isolate->heap()->ToBoolean(object->BooleanValue());
@@ -6165,7 +6145,7 @@
 // Possible optimizations: put the type string into the oddballs.
 RUNTIME_FUNCTION(Runtime_Typeof) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_CHECKED(Object, obj, 0);
   if (obj->IsNumber()) return isolate->heap()->number_string();
   HeapObject* heap_obj = HeapObject::cast(obj);
@@ -6186,11 +6166,9 @@
         return isolate->heap()->boolean_string();
       }
       if (heap_obj->IsNull()) {
-        return FLAG_harmony_typeof
-            ? isolate->heap()->null_string()
-            : isolate->heap()->object_string();
+        return isolate->heap()->object_string();
       }
-      ASSERT(heap_obj->IsUndefined());
+      DCHECK(heap_obj->IsUndefined());
       return isolate->heap()->undefined_string();
     case SYMBOL_TYPE:
       return isolate->heap()->symbol_string();
@@ -6205,6 +6183,35 @@
 }
 
 
+RUNTIME_FUNCTION(Runtime_Booleanize) {
+  SealHandleScope shs(isolate);
+  DCHECK(args.length() == 2);
+  CONVERT_ARG_CHECKED(Object, value_raw, 0);
+  CONVERT_SMI_ARG_CHECKED(token_raw, 1);
+  intptr_t value = reinterpret_cast<intptr_t>(value_raw);
+  Token::Value token = static_cast<Token::Value>(token_raw);
+  switch (token) {
+    case Token::EQ:
+    case Token::EQ_STRICT:
+      return isolate->heap()->ToBoolean(value == 0);
+    case Token::NE:
+    case Token::NE_STRICT:
+      return isolate->heap()->ToBoolean(value != 0);
+    case Token::LT:
+      return isolate->heap()->ToBoolean(value < 0);
+    case Token::GT:
+      return isolate->heap()->ToBoolean(value > 0);
+    case Token::LTE:
+      return isolate->heap()->ToBoolean(value <= 0);
+    case Token::GTE:
+      return isolate->heap()->ToBoolean(value >= 0);
+    default:
+      // This should only happen during natives fuzzing.
+      return isolate->heap()->undefined_value();
+  }
+}
+
+
 static bool AreDigits(const uint8_t*s, int from, int to) {
   for (int i = from; i < to; i++) {
     if (s[i] < '0' || s[i] > '9') return false;
@@ -6215,8 +6222,8 @@
 
 
 static int ParseDecimalInteger(const uint8_t*s, int from, int to) {
-  ASSERT(to - from < 10);  // Overflow is not possible.
-  ASSERT(from < to);
+  DCHECK(to - from < 10);  // Overflow is not possible.
+  DCHECK(from < to);
   int d = s[from] - '0';
 
   for (int i = from + 1; i < to; i++) {
@@ -6229,7 +6236,7 @@
 
 RUNTIME_FUNCTION(Runtime_StringToNumber) {
   HandleScope handle_scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
   subject = String::Flatten(subject);
 
@@ -6268,7 +6275,7 @@
         uint32_t hash = StringHasher::MakeArrayIndexHash(d, len);
 #ifdef DEBUG
         subject->Hash();  // Force hash calculation.
-        ASSERT_EQ(static_cast<int>(subject->hash_field()),
+        DCHECK_EQ(static_cast<int>(subject->hash_field()),
                   static_cast<int>(hash));
 #endif
         subject->set_hash_field(hash);
@@ -6292,8 +6299,8 @@
 
 RUNTIME_FUNCTION(Runtime_NewString) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
-  CONVERT_SMI_ARG_CHECKED(length, 0);
+  DCHECK(args.length() == 2);
+  CONVERT_INT32_ARG_CHECKED(length, 0);
   CONVERT_BOOLEAN_ARG_CHECKED(is_one_byte, 1);
   if (length == 0) return isolate->heap()->empty_string();
   Handle<String> result;
@@ -6310,9 +6317,9 @@
 
 RUNTIME_FUNCTION(Runtime_TruncateString) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
   CONVERT_ARG_HANDLE_CHECKED(SeqString, string, 0);
-  CONVERT_SMI_ARG_CHECKED(new_length, 1);
+  CONVERT_INT32_ARG_CHECKED(new_length, 1);
   RUNTIME_ASSERT(new_length >= 0);
   return *SeqString::Truncate(string, new_length);
 }
@@ -6320,10 +6327,10 @@
 
 RUNTIME_FUNCTION(Runtime_URIEscape) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(String, source, 0);
   Handle<String> string = String::Flatten(source);
-  ASSERT(string->IsFlat());
+  DCHECK(string->IsFlat());
   Handle<String> result;
   ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
       isolate, result,
@@ -6336,10 +6343,10 @@
 
 RUNTIME_FUNCTION(Runtime_URIUnescape) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(String, source, 0);
   Handle<String> string = String::Flatten(source);
-  ASSERT(string->IsFlat());
+  DCHECK(string->IsFlat());
   Handle<String> result;
   ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
       isolate, result,
@@ -6353,7 +6360,7 @@
 RUNTIME_FUNCTION(Runtime_QuoteJSONString) {
   HandleScope scope(isolate);
   CONVERT_ARG_HANDLE_CHECKED(String, string, 0);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   Handle<Object> result;
   ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
       isolate, result, BasicJsonStringifier::StringifyString(isolate, string));
@@ -6363,7 +6370,7 @@
 
 RUNTIME_FUNCTION(Runtime_BasicJSONStringify) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
   BasicJsonStringifier stringifier(isolate);
   Handle<Object> result;
@@ -6375,7 +6382,7 @@
 
 RUNTIME_FUNCTION(Runtime_StringParseInt) {
   HandleScope handle_scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
   CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
   CONVERT_NUMBER_CHECKED(int, radix, Int32, args[1]);
   RUNTIME_ASSERT(radix == 0 || (2 <= radix && radix <= 36));
@@ -6387,7 +6394,7 @@
     String::FlatContent flat = subject->GetFlatContent();
 
     // ECMA-262 section 15.1.2.3, empty string is NaN
-    if (flat.IsAscii()) {
+    if (flat.IsOneByte()) {
       value = StringToInt(
           isolate->unicode_cache(), flat.ToOneByteVector(), radix);
     } else {
@@ -6402,12 +6409,12 @@
 
 RUNTIME_FUNCTION(Runtime_StringParseFloat) {
   HandleScope shs(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
 
   subject = String::Flatten(subject);
-  double value = StringToDouble(
-      isolate->unicode_cache(), *subject, ALLOW_TRAILING_JUNK, OS::nan_value());
+  double value = StringToDouble(isolate->unicode_cache(), *subject,
+                                ALLOW_TRAILING_JUNK, base::OS::nan_value());
 
   return *isolate->factory()->NewNumber(value);
 }
@@ -6461,7 +6468,7 @@
     } else if (char_length == 1 &&
                (ignore_overflow || !ToUpperOverflows(current))) {
       // Common case: converting the letter resulted in one character.
-      ASSERT(static_cast<uc32>(chars[0]) != current);
+      DCHECK(static_cast<uc32>(chars[0]) != current);
       result->Set(i, chars[0]);
       has_changed_character = true;
       i++;
@@ -6496,7 +6503,8 @@
         current_length += char_length;
         if (current_length > String::kMaxLength) {
           AllowHeapAllocation allocate_error_and_return;
-          return isolate->ThrowInvalidStringLength();
+          THROW_NEW_ERROR_RETURN_FAILURE(isolate,
+                                         NewInvalidStringLengthError());
         }
       }
       // Try again with the real length.  Return signed if we need
@@ -6539,7 +6547,7 @@
 static inline uintptr_t AsciiRangeMask(uintptr_t w, char m, char n) {
   // Use strict inequalities since in edge cases the function could be
   // further simplified.
-  ASSERT(0 < m && m < n);
+  DCHECK(0 < m && m < n);
   // Has high bit set in every w byte less than n.
   uintptr_t tmp1 = kOneInEveryByte * (0x7F + n) - w;
   // Has high bit set in every w byte greater than m.
@@ -6559,11 +6567,11 @@
     if (dst[i] == src[i]) continue;
     expected_changed = true;
     if (is_to_lower) {
-      ASSERT('A' <= src[i] && src[i] <= 'Z');
-      ASSERT(dst[i] == src[i] + ('a' - 'A'));
+      DCHECK('A' <= src[i] && src[i] <= 'Z');
+      DCHECK(dst[i] == src[i] + ('a' - 'A'));
     } else {
-      ASSERT('a' <= src[i] && src[i] <= 'z');
-      ASSERT(dst[i] == src[i] - ('a' - 'A'));
+      DCHECK('a' <= src[i] && src[i] <= 'z');
+      DCHECK(dst[i] == src[i] - ('a' - 'A'));
     }
   }
   return (expected_changed == changed);
@@ -6583,41 +6591,45 @@
   DisallowHeapAllocation no_gc;
   // We rely on the distance between upper and lower case letters
   // being a known power of 2.
-  ASSERT('a' - 'A' == (1 << 5));
+  DCHECK('a' - 'A' == (1 << 5));
   // Boundaries for the range of input characters than require conversion.
   static const char lo = Converter::kIsToLower ? 'A' - 1 : 'a' - 1;
   static const char hi = Converter::kIsToLower ? 'Z' + 1 : 'z' + 1;
   bool changed = false;
   uintptr_t or_acc = 0;
   const char* const limit = src + length;
-#ifdef V8_HOST_CAN_READ_UNALIGNED
-  // Process the prefix of the input that requires no conversion one
-  // (machine) word at a time.
-  while (src <= limit - sizeof(uintptr_t)) {
-    const uintptr_t w = *reinterpret_cast<const uintptr_t*>(src);
-    or_acc |= w;
-    if (AsciiRangeMask(w, lo, hi) != 0) {
-      changed = true;
-      break;
+
+  // dst is newly allocated and always aligned.
+  DCHECK(IsAligned(reinterpret_cast<intptr_t>(dst), sizeof(uintptr_t)));
+  // Only attempt processing one word at a time if src is also aligned.
+  if (IsAligned(reinterpret_cast<intptr_t>(src), sizeof(uintptr_t))) {
+    // Process the prefix of the input that requires no conversion one aligned
+    // (machine) word at a time.
+    while (src <= limit - sizeof(uintptr_t)) {
+      const uintptr_t w = *reinterpret_cast<const uintptr_t*>(src);
+      or_acc |= w;
+      if (AsciiRangeMask(w, lo, hi) != 0) {
+        changed = true;
+        break;
+      }
+      *reinterpret_cast<uintptr_t*>(dst) = w;
+      src += sizeof(uintptr_t);
+      dst += sizeof(uintptr_t);
     }
-    *reinterpret_cast<uintptr_t*>(dst) = w;
-    src += sizeof(uintptr_t);
-    dst += sizeof(uintptr_t);
+    // Process the remainder of the input performing conversion when
+    // required one word at a time.
+    while (src <= limit - sizeof(uintptr_t)) {
+      const uintptr_t w = *reinterpret_cast<const uintptr_t*>(src);
+      or_acc |= w;
+      uintptr_t m = AsciiRangeMask(w, lo, hi);
+      // The mask has high (7th) bit set in every byte that needs
+      // conversion and we know that the distance between cases is
+      // 1 << 5.
+      *reinterpret_cast<uintptr_t*>(dst) = w ^ (m >> 2);
+      src += sizeof(uintptr_t);
+      dst += sizeof(uintptr_t);
+    }
   }
-  // Process the remainder of the input performing conversion when
-  // required one word at a time.
-  while (src <= limit - sizeof(uintptr_t)) {
-    const uintptr_t w = *reinterpret_cast<const uintptr_t*>(src);
-    or_acc |= w;
-    uintptr_t m = AsciiRangeMask(w, lo, hi);
-    // The mask has high (7th) bit set in every byte that needs
-    // conversion and we know that the distance between cases is
-    // 1 << 5.
-    *reinterpret_cast<uintptr_t*>(dst) = w ^ (m >> 2);
-    src += sizeof(uintptr_t);
-    dst += sizeof(uintptr_t);
-  }
-#endif
   // Process the last few bytes of the input (or the whole input if
   // unaligned access is not supported).
   while (src < limit) {
@@ -6631,11 +6643,10 @@
     ++src;
     ++dst;
   }
-  if ((or_acc & kAsciiMask) != 0) {
-    return false;
-  }
 
-  ASSERT(CheckFastAsciiConvert(
+  if ((or_acc & kAsciiMask) != 0) return false;
+
+  DCHECK(CheckFastAsciiConvert(
              saved_dst, saved_src, length, changed, Converter::kIsToLower));
 
   *changed_out = changed;
@@ -6667,7 +6678,7 @@
         isolate->factory()->NewRawOneByteString(length).ToHandleChecked();
     DisallowHeapAllocation no_gc;
     String::FlatContent flat_content = s->GetFlatContent();
-    ASSERT(flat_content.IsFlat());
+    DCHECK(flat_content.IsFlat());
     bool has_changed_character = false;
     bool is_ascii = FastAsciiConvert<Converter>(
         reinterpret_cast<char*>(result->GetChars()),
@@ -6688,7 +6699,7 @@
   Object* answer = ConvertCaseHelper(isolate, *s, *result, length, mapping);
   if (answer->IsException() || answer->IsString()) return answer;
 
-  ASSERT(answer->IsSmi());
+  DCHECK(answer->IsSmi());
   length = Smi::cast(answer)->value();
   if (s->IsOneByteRepresentation() && length > 0) {
     ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
@@ -6704,7 +6715,7 @@
 
 RUNTIME_FUNCTION(Runtime_StringToLowerCase) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(String, s, 0);
   return ConvertCase(
       s, isolate, isolate->runtime_state()->to_lower_mapping());
@@ -6713,7 +6724,7 @@
 
 RUNTIME_FUNCTION(Runtime_StringToUpperCase) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(String, s, 0);
   return ConvertCase(
       s, isolate, isolate->runtime_state()->to_upper_mapping());
@@ -6722,7 +6733,7 @@
 
 RUNTIME_FUNCTION(Runtime_StringTrim) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 3);
+  DCHECK(args.length() == 3);
 
   CONVERT_ARG_HANDLE_CHECKED(String, string, 0);
   CONVERT_BOOLEAN_ARG_CHECKED(trimLeft, 1);
@@ -6755,7 +6766,7 @@
 
 RUNTIME_FUNCTION(Runtime_StringSplit) {
   HandleScope handle_scope(isolate);
-  ASSERT(args.length() == 3);
+  DCHECK(args.length() == 3);
   CONVERT_ARG_HANDLE_CHECKED(String, subject, 0);
   CONVERT_ARG_HANDLE_CHECKED(String, pattern, 1);
   CONVERT_NUMBER_CHECKED(uint32_t, limit, Uint32, args[2]);
@@ -6812,7 +6823,7 @@
   JSObject::EnsureCanContainHeapObjectElements(result);
   result->set_length(Smi::FromInt(part_count));
 
-  ASSERT(result->HasFastObjectElements());
+  DCHECK(result->HasFastObjectElements());
 
   if (part_count == 1 && indices.at(0) == subject_length) {
     FixedArray::cast(result->elements())->set(0, *subject);
@@ -6844,32 +6855,30 @@
 }
 
 
-// Copies ASCII characters to the given fixed array looking up
+// Copies Latin1 characters to the given fixed array looking up
 // one-char strings in the cache. Gives up on the first char that is
 // not in the cache and fills the remainder with smi zeros. Returns
 // the length of the successfully copied prefix.
-static int CopyCachedAsciiCharsToArray(Heap* heap,
-                                       const uint8_t* chars,
-                                       FixedArray* elements,
-                                       int length) {
+static int CopyCachedOneByteCharsToArray(Heap* heap, const uint8_t* chars,
+                                         FixedArray* elements, int length) {
   DisallowHeapAllocation no_gc;
-  FixedArray* ascii_cache = heap->single_character_string_cache();
+  FixedArray* one_byte_cache = heap->single_character_string_cache();
   Object* undefined = heap->undefined_value();
   int i;
   WriteBarrierMode mode = elements->GetWriteBarrierMode(no_gc);
   for (i = 0; i < length; ++i) {
-    Object* value = ascii_cache->get(chars[i]);
+    Object* value = one_byte_cache->get(chars[i]);
     if (value == undefined) break;
     elements->set(i, value, mode);
   }
   if (i < length) {
-    ASSERT(Smi::FromInt(0) == 0);
+    DCHECK(Smi::FromInt(0) == 0);
     memset(elements->data_start() + i, 0, kPointerSize * (length - i));
   }
 #ifdef DEBUG
   for (int j = 0; j < length; ++j) {
     Object* element = elements->get(j);
-    ASSERT(element == Smi::FromInt(0) ||
+    DCHECK(element == Smi::FromInt(0) ||
            (element->IsString() && String::cast(element)->LooksValid()));
   }
 #endif
@@ -6881,7 +6890,7 @@
 // For example, "foo" => ["f", "o", "o"].
 RUNTIME_FUNCTION(Runtime_StringToArray) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
   CONVERT_ARG_HANDLE_CHECKED(String, s, 0);
   CONVERT_NUMBER_CHECKED(uint32_t, limit, Uint32, args[1]);
 
@@ -6896,14 +6905,12 @@
 
     DisallowHeapAllocation no_gc;
     String::FlatContent content = s->GetFlatContent();
-    if (content.IsAscii()) {
+    if (content.IsOneByte()) {
       Vector<const uint8_t> chars = content.ToOneByteVector();
       // Note, this will initialize all elements (not only the prefix)
       // to prevent GC from seeing partially initialized array.
-      position = CopyCachedAsciiCharsToArray(isolate->heap(),
-                                             chars.start(),
-                                             *elements,
-                                             length);
+      position = CopyCachedOneByteCharsToArray(isolate->heap(), chars.start(),
+                                               *elements, length);
     } else {
       MemsetPointer(elements->data_start(),
                     isolate->heap()->undefined_value(),
@@ -6920,7 +6927,7 @@
 
 #ifdef DEBUG
   for (int i = 0; i < length; ++i) {
-    ASSERT(String::cast(elements->get(i))->length() == 1);
+    DCHECK(String::cast(elements->get(i))->length() == 1);
   }
 #endif
 
@@ -6930,7 +6937,7 @@
 
 RUNTIME_FUNCTION(Runtime_NewStringWrapper) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(String, value, 0);
   return *Object::ToObject(isolate, value).ToHandleChecked();
 }
@@ -6943,18 +6950,18 @@
 }
 
 
-RUNTIME_FUNCTION(RuntimeHidden_NumberToString) {
+RUNTIME_FUNCTION(Runtime_NumberToStringRT) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_NUMBER_ARG_HANDLE_CHECKED(number, 0);
 
   return *isolate->factory()->NumberToString(number);
 }
 
 
-RUNTIME_FUNCTION(RuntimeHidden_NumberToStringSkipCache) {
+RUNTIME_FUNCTION(Runtime_NumberToStringSkipCache) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_NUMBER_ARG_HANDLE_CHECKED(number, 0);
 
   return *isolate->factory()->NumberToString(number, false);
@@ -6963,7 +6970,7 @@
 
 RUNTIME_FUNCTION(Runtime_NumberToInteger) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
 
   CONVERT_DOUBLE_ARG_CHECKED(number, 0);
   return *isolate->factory()->NewNumber(DoubleToInteger(number));
@@ -6972,7 +6979,7 @@
 
 RUNTIME_FUNCTION(Runtime_NumberToIntegerMapMinusZero) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
 
   CONVERT_DOUBLE_ARG_CHECKED(number, 0);
   double double_value = DoubleToInteger(number);
@@ -6985,7 +6992,7 @@
 
 RUNTIME_FUNCTION(Runtime_NumberToJSUint32) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
 
   CONVERT_NUMBER_CHECKED(int32_t, number, Uint32, args[0]);
   return *isolate->factory()->NewNumberFromUint(number);
@@ -6994,7 +7001,7 @@
 
 RUNTIME_FUNCTION(Runtime_NumberToJSInt32) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
 
   CONVERT_DOUBLE_ARG_CHECKED(number, 0);
   return *isolate->factory()->NewNumberFromInt(DoubleToInt32(number));
@@ -7003,9 +7010,9 @@
 
 // Converts a Number to a Smi, if possible. Returns NaN if the number is not
 // a small integer.
-RUNTIME_FUNCTION(RuntimeHidden_NumberToSmi) {
+RUNTIME_FUNCTION(Runtime_NumberToSmi) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_CHECKED(Object, obj, 0);
   if (obj->IsSmi()) {
     return obj;
@@ -7021,16 +7028,16 @@
 }
 
 
-RUNTIME_FUNCTION(RuntimeHidden_AllocateHeapNumber) {
+RUNTIME_FUNCTION(Runtime_AllocateHeapNumber) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 0);
+  DCHECK(args.length() == 0);
   return *isolate->factory()->NewHeapNumber(0);
 }
 
 
 RUNTIME_FUNCTION(Runtime_NumberAdd) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
 
   CONVERT_DOUBLE_ARG_CHECKED(x, 0);
   CONVERT_DOUBLE_ARG_CHECKED(y, 1);
@@ -7040,7 +7047,7 @@
 
 RUNTIME_FUNCTION(Runtime_NumberSub) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
 
   CONVERT_DOUBLE_ARG_CHECKED(x, 0);
   CONVERT_DOUBLE_ARG_CHECKED(y, 1);
@@ -7050,7 +7057,7 @@
 
 RUNTIME_FUNCTION(Runtime_NumberMul) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
 
   CONVERT_DOUBLE_ARG_CHECKED(x, 0);
   CONVERT_DOUBLE_ARG_CHECKED(y, 1);
@@ -7060,7 +7067,7 @@
 
 RUNTIME_FUNCTION(Runtime_NumberUnaryMinus) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
 
   CONVERT_DOUBLE_ARG_CHECKED(x, 0);
   return *isolate->factory()->NewNumber(-x);
@@ -7069,7 +7076,7 @@
 
 RUNTIME_FUNCTION(Runtime_NumberDiv) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
 
   CONVERT_DOUBLE_ARG_CHECKED(x, 0);
   CONVERT_DOUBLE_ARG_CHECKED(y, 1);
@@ -7079,7 +7086,7 @@
 
 RUNTIME_FUNCTION(Runtime_NumberMod) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
 
   CONVERT_DOUBLE_ARG_CHECKED(x, 0);
   CONVERT_DOUBLE_ARG_CHECKED(y, 1);
@@ -7089,17 +7096,20 @@
 
 RUNTIME_FUNCTION(Runtime_NumberImul) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
 
-  CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
-  CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
-  return *isolate->factory()->NewNumberFromInt(x * y);
+  // We rely on implementation-defined behavior below, but at least not on
+  // undefined behavior.
+  CONVERT_NUMBER_CHECKED(uint32_t, x, Int32, args[0]);
+  CONVERT_NUMBER_CHECKED(uint32_t, y, Int32, args[1]);
+  int32_t product = static_cast<int32_t>(x * y);
+  return *isolate->factory()->NewNumberFromInt(product);
 }
 
 
-RUNTIME_FUNCTION(RuntimeHidden_StringAdd) {
+RUNTIME_FUNCTION(Runtime_StringAdd) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
   CONVERT_ARG_HANDLE_CHECKED(String, str1, 0);
   CONVERT_ARG_HANDLE_CHECKED(String, str2, 1);
   isolate->counters()->string_add_runtime()->Increment();
@@ -7131,7 +7141,7 @@
       } else {
         // Position and length encoded in two smis.
         Object* obj = fixed_array->get(++i);
-        ASSERT(obj->IsSmi());
+        DCHECK(obj->IsSmi());
         pos = Smi::cast(obj)->value();
         len = -encoded_slice;
       }
@@ -7181,8 +7191,8 @@
         pos = Smi::cast(next_smi)->value();
         if (pos < 0) return -1;
       }
-      ASSERT(pos >= 0);
-      ASSERT(len >= 0);
+      DCHECK(pos >= 0);
+      DCHECK(len >= 0);
       if (pos > special_length || len > special_length - pos) return -1;
       increment = len;
     } else if (elt->IsString()) {
@@ -7206,10 +7216,12 @@
 
 RUNTIME_FUNCTION(Runtime_StringBuilderConcat) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 3);
+  DCHECK(args.length() == 3);
   CONVERT_ARG_HANDLE_CHECKED(JSArray, array, 0);
-  if (!args[1]->IsSmi()) return isolate->ThrowInvalidStringLength();
-  CONVERT_SMI_ARG_CHECKED(array_length, 1);
+  int32_t array_length;
+  if (!args[1]->ToInt32(&array_length)) {
+    THROW_NEW_ERROR_RETURN_FAILURE(isolate, NewInvalidStringLengthError());
+  }
   CONVERT_ARG_HANDLE_CHECKED(String, special, 2);
 
   size_t actual_array_length = 0;
@@ -7219,7 +7231,7 @@
   RUNTIME_ASSERT(static_cast<size_t>(array_length) <= actual_array_length);
 
   // This assumption is used by the slice encoding in one or two smis.
-  ASSERT(Smi::kMaxValue >= String::kMaxLength);
+  DCHECK(Smi::kMaxValue >= String::kMaxLength);
 
   RUNTIME_ASSERT(array->HasFastElements());
   JSObject::EnsureCanContainHeapObjectElements(array);
@@ -7278,10 +7290,12 @@
 
 RUNTIME_FUNCTION(Runtime_StringBuilderJoin) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 3);
+  DCHECK(args.length() == 3);
   CONVERT_ARG_HANDLE_CHECKED(JSArray, array, 0);
-  if (!args[1]->IsSmi()) return isolate->ThrowInvalidStringLength();
-  CONVERT_SMI_ARG_CHECKED(array_length, 1);
+  int32_t array_length;
+  if (!args[1]->ToInt32(&array_length)) {
+    THROW_NEW_ERROR_RETURN_FAILURE(isolate, NewInvalidStringLengthError());
+  }
   CONVERT_ARG_HANDLE_CHECKED(String, separator, 2);
   RUNTIME_ASSERT(array->HasFastObjectElements());
   RUNTIME_ASSERT(array_length >= 0);
@@ -7304,7 +7318,7 @@
   int max_nof_separators =
       (String::kMaxLength + separator_length - 1) / separator_length;
   if (max_nof_separators < (array_length - 1)) {
-    return isolate->ThrowInvalidStringLength();
+    THROW_NEW_ERROR_RETURN_FAILURE(isolate, NewInvalidStringLengthError());
   }
   int length = (array_length - 1) * separator_length;
   for (int i = 0; i < array_length; i++) {
@@ -7340,21 +7354,21 @@
   sink += first_length;
 
   for (int i = 1; i < array_length; i++) {
-    ASSERT(sink + separator_length <= end);
+    DCHECK(sink + separator_length <= end);
     String::WriteToFlat(separator_raw, sink, 0, separator_length);
     sink += separator_length;
 
     RUNTIME_ASSERT(fixed_array->get(i)->IsString());
     String* element = String::cast(fixed_array->get(i));
     int element_length = element->length();
-    ASSERT(sink + element_length <= end);
+    DCHECK(sink + element_length <= end);
     String::WriteToFlat(element, sink, 0, element_length);
     sink += element_length;
   }
-  ASSERT(sink == end);
+  DCHECK(sink == end);
 
-  // Use %_FastAsciiArrayJoin instead.
-  ASSERT(!answer->IsOneByteRepresentation());
+  // Use %_FastOneByteArrayJoin instead.
+  DCHECK(!answer->IsOneByteRepresentation());
   return *answer;
 }
 
@@ -7387,7 +7401,7 @@
   if (separator_length > 0) {
     // Array length must be representable as a signed 32-bit number,
     // otherwise the total string length would have been too large.
-    ASSERT(array_length <= 0x7fffffff);  // Is int32_t.
+    DCHECK(array_length <= 0x7fffffff);  // Is int32_t.
     int last_array_index = static_cast<int>(array_length - 1);
     while (previous_separator_position < last_array_index) {
       String::WriteToFlat<Char>(separator, &buffer[cursor],
@@ -7396,13 +7410,13 @@
       previous_separator_position++;
     }
   }
-  ASSERT(cursor <= buffer.length());
+  DCHECK(cursor <= buffer.length());
 }
 
 
 RUNTIME_FUNCTION(Runtime_SparseJoinWithSeparator) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 3);
+  DCHECK(args.length() == 3);
   CONVERT_ARG_HANDLE_CHECKED(JSArray, elements_array, 0);
   CONVERT_NUMBER_CHECKED(uint32_t, array_length, Uint32, args[1]);
   CONVERT_ARG_HANDLE_CHECKED(String, separator, 2);
@@ -7415,7 +7429,7 @@
 
   // Find total length of join result.
   int string_length = 0;
-  bool is_ascii = separator->IsOneByteRepresentation();
+  bool is_one_byte = separator->IsOneByteRepresentation();
   bool overflow = false;
   CONVERT_NUMBER_CHECKED(int, elements_length, Int32, elements_array->length());
   RUNTIME_ASSERT(elements_length <= elements_array->elements()->length());
@@ -7432,8 +7446,8 @@
     for (int i = 0; i < elements_length; i += 2) {
       String* string = String::cast(elements->get(i + 1));
       int length = string->length();
-      if (is_ascii && !string->IsOneByteRepresentation()) {
-        is_ascii = false;
+      if (is_one_byte && !string->IsOneByteRepresentation()) {
+        is_one_byte = false;
       }
       if (length > String::kMaxLength ||
           String::kMaxLength - length < string_length) {
@@ -7466,10 +7480,10 @@
     // Throw an exception if the resulting string is too large. See
     // https://code.google.com/p/chromium/issues/detail?id=336820
     // for details.
-    return isolate->ThrowInvalidStringLength();
+    THROW_NEW_ERROR_RETURN_FAILURE(isolate, NewInvalidStringLengthError());
   }
 
-  if (is_ascii) {
+  if (is_one_byte) {
     Handle<SeqOneByteString> result = isolate->factory()->NewRawOneByteString(
         string_length).ToHandleChecked();
     JoinSparseArrayWithSeparator<uint8_t>(
@@ -7495,7 +7509,7 @@
 
 RUNTIME_FUNCTION(Runtime_NumberOr) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
 
   CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
   CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
@@ -7505,7 +7519,7 @@
 
 RUNTIME_FUNCTION(Runtime_NumberAnd) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
 
   CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
   CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
@@ -7515,7 +7529,7 @@
 
 RUNTIME_FUNCTION(Runtime_NumberXor) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
 
   CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
   CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
@@ -7525,7 +7539,7 @@
 
 RUNTIME_FUNCTION(Runtime_NumberShl) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
 
   CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
   CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
@@ -7535,7 +7549,7 @@
 
 RUNTIME_FUNCTION(Runtime_NumberShr) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
 
   CONVERT_NUMBER_CHECKED(uint32_t, x, Uint32, args[0]);
   CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
@@ -7545,7 +7559,7 @@
 
 RUNTIME_FUNCTION(Runtime_NumberSar) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
 
   CONVERT_NUMBER_CHECKED(int32_t, x, Int32, args[0]);
   CONVERT_NUMBER_CHECKED(int32_t, y, Int32, args[1]);
@@ -7556,7 +7570,7 @@
 
 RUNTIME_FUNCTION(Runtime_NumberEquals) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
 
   CONVERT_DOUBLE_ARG_CHECKED(x, 0);
   CONVERT_DOUBLE_ARG_CHECKED(y, 1);
@@ -7575,7 +7589,7 @@
 
 RUNTIME_FUNCTION(Runtime_StringEquals) {
   HandleScope handle_scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
 
   CONVERT_ARG_HANDLE_CHECKED(String, x, 0);
   CONVERT_ARG_HANDLE_CHECKED(String, y, 1);
@@ -7584,7 +7598,7 @@
   // This is slightly convoluted because the value that signifies
   // equality is 0 and inequality is 1 so we have to negate the result
   // from String::Equals.
-  ASSERT(not_equal == 0 || not_equal == 1);
+  DCHECK(not_equal == 0 || not_equal == 1);
   STATIC_ASSERT(EQUAL == 0);
   STATIC_ASSERT(NOT_EQUAL == 1);
   return Smi::FromInt(not_equal);
@@ -7593,7 +7607,7 @@
 
 RUNTIME_FUNCTION(Runtime_NumberCompare) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 3);
+  DCHECK(args.length() == 3);
 
   CONVERT_DOUBLE_ARG_CHECKED(x, 0);
   CONVERT_DOUBLE_ARG_CHECKED(y, 1);
@@ -7609,7 +7623,7 @@
 // compared lexicographically.
 RUNTIME_FUNCTION(Runtime_SmiLexicographicCompare) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
   CONVERT_SMI_ARG_CHECKED(x_value, 0);
   CONVERT_SMI_ARG_CHECKED(y_value, 1);
 
@@ -7682,9 +7696,9 @@
 }
 
 
-RUNTIME_FUNCTION(RuntimeHidden_StringCompare) {
+RUNTIME_FUNCTION(Runtime_StringCompare) {
   HandleScope handle_scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
 
   CONVERT_ARG_HANDLE_CHECKED(String, x, 0);
   CONVERT_ARG_HANDLE_CHECKED(String, y, 1);
@@ -7720,9 +7734,9 @@
   int r;
   String::FlatContent x_content = x->GetFlatContent();
   String::FlatContent y_content = y->GetFlatContent();
-  if (x_content.IsAscii()) {
+  if (x_content.IsOneByte()) {
     Vector<const uint8_t> x_chars = x_content.ToOneByteVector();
-    if (y_content.IsAscii()) {
+    if (y_content.IsOneByte()) {
       Vector<const uint8_t> y_chars = y_content.ToOneByteVector();
       r = CompareChars(x_chars.start(), y_chars.start(), prefix_length);
     } else {
@@ -7731,7 +7745,7 @@
     }
   } else {
     Vector<const uc16> x_chars = x_content.ToUC16Vector();
-    if (y_content.IsAscii()) {
+    if (y_content.IsOneByte()) {
       Vector<const uint8_t> y_chars = y_content.ToOneByteVector();
       r = CompareChars(x_chars.start(), y_chars.start(), prefix_length);
     } else {
@@ -7752,7 +7766,7 @@
 #define RUNTIME_UNARY_MATH(Name, name)                                         \
 RUNTIME_FUNCTION(Runtime_Math##Name) {                           \
   HandleScope scope(isolate);                                                  \
-  ASSERT(args.length() == 1);                                                  \
+  DCHECK(args.length() == 1);                                                  \
   isolate->counters()->math_##name()->Increment();                             \
   CONVERT_DOUBLE_ARG_CHECKED(x, 0);                                            \
   return *isolate->factory()->NewHeapNumber(std::name(x));                     \
@@ -7767,7 +7781,7 @@
 
 RUNTIME_FUNCTION(Runtime_DoubleHi) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_DOUBLE_ARG_CHECKED(x, 0);
   uint64_t integer = double_to_uint64(x);
   integer = (integer >> 32) & 0xFFFFFFFFu;
@@ -7777,7 +7791,7 @@
 
 RUNTIME_FUNCTION(Runtime_DoubleLo) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_DOUBLE_ARG_CHECKED(x, 0);
   return *isolate->factory()->NewNumber(
       static_cast<int32_t>(double_to_uint64(x) & 0xFFFFFFFFu));
@@ -7786,7 +7800,7 @@
 
 RUNTIME_FUNCTION(Runtime_ConstructDouble) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
   CONVERT_NUMBER_CHECKED(uint32_t, hi, Uint32, args[0]);
   CONVERT_NUMBER_CHECKED(uint32_t, lo, Uint32, args[1]);
   uint64_t result = (static_cast<uint64_t>(hi) << 32) | lo;
@@ -7794,12 +7808,29 @@
 }
 
 
+RUNTIME_FUNCTION(Runtime_RemPiO2) {
+  HandleScope handle_scope(isolate);
+  DCHECK(args.length() == 1);
+  CONVERT_DOUBLE_ARG_CHECKED(x, 0);
+  Factory* factory = isolate->factory();
+  double y[2] = {0.0, 0.0};
+  int n = fdlibm::rempio2(x, y);
+  Handle<FixedArray> array = factory->NewFixedArray(3);
+  Handle<HeapNumber> y0 = factory->NewHeapNumber(y[0]);
+  Handle<HeapNumber> y1 = factory->NewHeapNumber(y[1]);
+  array->set(0, Smi::FromInt(n));
+  array->set(1, *y0);
+  array->set(2, *y1);
+  return *factory->NewJSArrayWithElements(array);
+}
+
+
 static const double kPiDividedBy4 = 0.78539816339744830962;
 
 
 RUNTIME_FUNCTION(Runtime_MathAtan2) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
   isolate->counters()->math_atan2()->Increment();
 
   CONVERT_DOUBLE_ARG_CHECKED(x, 0);
@@ -7822,7 +7853,7 @@
 
 RUNTIME_FUNCTION(Runtime_MathExpRT) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   isolate->counters()->math_exp()->Increment();
 
   CONVERT_DOUBLE_ARG_CHECKED(x, 0);
@@ -7833,19 +7864,19 @@
 
 RUNTIME_FUNCTION(Runtime_MathFloorRT) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   isolate->counters()->math_floor()->Increment();
 
   CONVERT_DOUBLE_ARG_CHECKED(x, 0);
-  return *isolate->factory()->NewNumber(std::floor(x));
+  return *isolate->factory()->NewNumber(Floor(x));
 }
 
 
 // Slow version of Math.pow.  We check for fast paths for special cases.
 // Used if VFP3 is not available.
-RUNTIME_FUNCTION(RuntimeHidden_MathPowSlow) {
+RUNTIME_FUNCTION(Runtime_MathPowSlow) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
   isolate->counters()->math_pow()->Increment();
 
   CONVERT_DOUBLE_ARG_CHECKED(x, 0);
@@ -7866,9 +7897,9 @@
 
 // Fast version of Math.pow if we know that y is not an integer and y is not
 // -0.5 or 0.5.  Used as slow case from full codegen.
-RUNTIME_FUNCTION(RuntimeHidden_MathPow) {
+RUNTIME_FUNCTION(Runtime_MathPowRT) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
   isolate->counters()->math_pow()->Increment();
 
   CONVERT_DOUBLE_ARG_CHECKED(x, 0);
@@ -7885,12 +7916,12 @@
 
 RUNTIME_FUNCTION(Runtime_RoundNumber) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_NUMBER_ARG_HANDLE_CHECKED(input, 0);
   isolate->counters()->math_round()->Increment();
 
   if (!input->IsHeapNumber()) {
-    ASSERT(input->IsSmi());
+    DCHECK(input->IsSmi());
     return *input;
   }
 
@@ -7922,13 +7953,13 @@
   if (sign && value >= -0.5) return isolate->heap()->minus_zero_value();
 
   // Do not call NumberFromDouble() to avoid extra checks.
-  return *isolate->factory()->NewNumber(std::floor(value + 0.5));
+  return *isolate->factory()->NewNumber(Floor(value + 0.5));
 }
 
 
 RUNTIME_FUNCTION(Runtime_MathSqrtRT) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   isolate->counters()->math_sqrt()->Increment();
 
   CONVERT_DOUBLE_ARG_CHECKED(x, 0);
@@ -7938,17 +7969,17 @@
 
 RUNTIME_FUNCTION(Runtime_MathFround) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
 
   CONVERT_DOUBLE_ARG_CHECKED(x, 0);
-  float xf = static_cast<float>(x);
+  float xf = DoubleToFloat32(x);
   return *isolate->factory()->NewNumber(xf);
 }
 
 
 RUNTIME_FUNCTION(Runtime_DateMakeDay) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
 
   CONVERT_SMI_ARG_CHECKED(year, 0);
   CONVERT_SMI_ARG_CHECKED(month, 1);
@@ -7961,7 +7992,7 @@
 
 RUNTIME_FUNCTION(Runtime_DateSetValue) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 3);
+  DCHECK(args.length() == 3);
 
   CONVERT_ARG_HANDLE_CHECKED(JSDate, date, 0);
   CONVERT_DOUBLE_ARG_CHECKED(time, 1);
@@ -7994,16 +8025,13 @@
 }
 
 
-RUNTIME_FUNCTION(RuntimeHidden_NewSloppyArguments) {
-  HandleScope scope(isolate);
-  ASSERT(args.length() == 3);
-
-  CONVERT_ARG_HANDLE_CHECKED(JSFunction, callee, 0);
-  Object** parameters = reinterpret_cast<Object**>(args[1]);
-  CONVERT_SMI_ARG_CHECKED(argument_count, 2);
-
+static Handle<JSObject> NewSloppyArguments(Isolate* isolate,
+                                           Handle<JSFunction> callee,
+                                           Object** parameters,
+                                           int argument_count) {
   Handle<JSObject> result =
       isolate->factory()->NewArgumentsObject(callee, argument_count);
+
   // Allocate the elements if needed.
   int parameter_count = callee->shared()->formal_parameter_count();
   if (argument_count > 0) {
@@ -8065,7 +8093,7 @@
               break;
             }
           }
-          ASSERT(context_index >= 0);
+          DCHECK(context_index >= 0);
           arguments->set_the_hole(index);
           parameter_map->set(index + 2, Smi::FromInt(
               Context::MIN_CONTEXT_SLOTS + context_index));
@@ -8084,48 +8112,84 @@
       }
     }
   }
-  return *result;
+  return result;
 }
 
 
-RUNTIME_FUNCTION(RuntimeHidden_NewStrictArguments) {
-  HandleScope scope(isolate);
-  ASSERT(args.length() == 3);
-  CONVERT_ARG_HANDLE_CHECKED(JSFunction, callee, 0)
-  Object** parameters = reinterpret_cast<Object**>(args[1]);
-  CONVERT_SMI_ARG_CHECKED(length, 2);
-
+static Handle<JSObject> NewStrictArguments(Isolate* isolate,
+                                           Handle<JSFunction> callee,
+                                           Object** parameters,
+                                           int argument_count) {
   Handle<JSObject> result =
-        isolate->factory()->NewArgumentsObject(callee, length);
+      isolate->factory()->NewArgumentsObject(callee, argument_count);
 
-  if (length > 0) {
+  if (argument_count > 0) {
     Handle<FixedArray> array =
-        isolate->factory()->NewUninitializedFixedArray(length);
+        isolate->factory()->NewUninitializedFixedArray(argument_count);
     DisallowHeapAllocation no_gc;
     WriteBarrierMode mode = array->GetWriteBarrierMode(no_gc);
-    for (int i = 0; i < length; i++) {
+    for (int i = 0; i < argument_count; i++) {
       array->set(i, *--parameters, mode);
     }
     result->set_elements(*array);
   }
-  return *result;
+  return result;
 }
 
 
-RUNTIME_FUNCTION(RuntimeHidden_NewClosureFromStubFailure) {
+RUNTIME_FUNCTION(Runtime_NewArguments) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
+  CONVERT_ARG_HANDLE_CHECKED(JSFunction, callee, 0);
+  JavaScriptFrameIterator it(isolate);
+
+  // Find the frame that holds the actual arguments passed to the function.
+  it.AdvanceToArgumentsFrame();
+  JavaScriptFrame* frame = it.frame();
+
+  // Determine parameter location on the stack and dispatch on language mode.
+  int argument_count = frame->GetArgumentsLength();
+  Object** parameters = reinterpret_cast<Object**>(frame->GetParameterSlot(-1));
+  return callee->shared()->strict_mode() == STRICT
+             ? *NewStrictArguments(isolate, callee, parameters, argument_count)
+             : *NewSloppyArguments(isolate, callee, parameters, argument_count);
+}
+
+
+RUNTIME_FUNCTION(Runtime_NewSloppyArguments) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 3);
+  CONVERT_ARG_HANDLE_CHECKED(JSFunction, callee, 0);
+  Object** parameters = reinterpret_cast<Object**>(args[1]);
+  CONVERT_SMI_ARG_CHECKED(argument_count, 2);
+  return *NewSloppyArguments(isolate, callee, parameters, argument_count);
+}
+
+
+RUNTIME_FUNCTION(Runtime_NewStrictArguments) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 3);
+  CONVERT_ARG_HANDLE_CHECKED(JSFunction, callee, 0)
+  Object** parameters = reinterpret_cast<Object**>(args[1]);
+  CONVERT_SMI_ARG_CHECKED(argument_count, 2);
+  return *NewStrictArguments(isolate, callee, parameters, argument_count);
+}
+
+
+RUNTIME_FUNCTION(Runtime_NewClosureFromStubFailure) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(SharedFunctionInfo, shared, 0);
   Handle<Context> context(isolate->context());
   PretenureFlag pretenure_flag = NOT_TENURED;
-  return *isolate->factory()->NewFunctionFromSharedFunctionInfo(
-      shared,  context, pretenure_flag);
+  return *isolate->factory()->NewFunctionFromSharedFunctionInfo(shared, context,
+                                                                pretenure_flag);
 }
 
 
-RUNTIME_FUNCTION(RuntimeHidden_NewClosure) {
+RUNTIME_FUNCTION(Runtime_NewClosure) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 3);
+  DCHECK(args.length() == 3);
   CONVERT_ARG_HANDLE_CHECKED(Context, context, 0);
   CONVERT_ARG_HANDLE_CHECKED(SharedFunctionInfo, shared, 1);
   CONVERT_BOOLEAN_ARG_CHECKED(pretenure, 2);
@@ -8190,7 +8254,7 @@
 
 RUNTIME_FUNCTION(Runtime_FunctionBindArguments) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 4);
+  DCHECK(args.length() == 4);
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, bound_function, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, bindee, 1);
   CONVERT_ARG_HANDLE_CHECKED(Object, this_object, 2);
@@ -8251,15 +8315,15 @@
       static_cast<PropertyAttributes>(DONT_DELETE | DONT_ENUM | READ_ONLY);
   RETURN_FAILURE_ON_EXCEPTION(
       isolate,
-      JSObject::SetOwnPropertyIgnoreAttributes(bound_function, length_string,
-                                               new_length, attr));
+      JSObject::SetOwnPropertyIgnoreAttributes(
+          bound_function, length_string, new_length, attr));
   return *bound_function;
 }
 
 
 RUNTIME_FUNCTION(Runtime_BoundFunctionGetBindings) {
   HandleScope handles(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(JSReceiver, callable, 0);
   if (callable->IsJSFunction()) {
     Handle<JSFunction> function = Handle<JSFunction>::cast(callable);
@@ -8275,7 +8339,7 @@
 
 RUNTIME_FUNCTION(Runtime_NewObjectFromBound) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   // First argument is a function to use as a constructor.
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
   RUNTIME_ASSERT(function->shared()->bound());
@@ -8288,7 +8352,7 @@
   Handle<Object> bound_function(
       JSReceiver::cast(bound_args->get(JSFunction::kBoundFunctionIndex)),
       isolate);
-  ASSERT(!bound_function->IsJSFunction() ||
+  DCHECK(!bound_function->IsJSFunction() ||
          !Handle<JSFunction>::cast(bound_function)->shared()->bound());
 
   int total_argc = 0;
@@ -8304,7 +8368,7 @@
         isolate, bound_function,
         Execution::TryGetConstructorDelegate(isolate, bound_function));
   }
-  ASSERT(bound_function->IsJSFunction());
+  DCHECK(bound_function->IsJSFunction());
 
   Handle<Object> result;
   ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
@@ -8321,9 +8385,8 @@
   // If the constructor isn't a proper function we throw a type error.
   if (!constructor->IsJSFunction()) {
     Vector< Handle<Object> > arguments = HandleVector(&constructor, 1);
-    Handle<Object> type_error =
-        isolate->factory()->NewTypeError("not_constructor", arguments);
-    return isolate->Throw(*type_error);
+    THROW_NEW_ERROR_RETURN_FAILURE(isolate,
+                                   NewTypeError("not_constructor", arguments));
   }
 
   Handle<JSFunction> function = Handle<JSFunction>::cast(constructor);
@@ -8332,9 +8395,8 @@
   // case generated code bailouts here, since function has no initial_map.
   if (!function->should_have_prototype() && !function->shared()->bound()) {
     Vector< Handle<Object> > arguments = HandleVector(&constructor, 1);
-    Handle<Object> type_error =
-        isolate->factory()->NewTypeError("not_constructor", arguments);
-    return isolate->Throw(*type_error);
+    THROW_NEW_ERROR_RETURN_FAILURE(isolate,
+                                   NewTypeError("not_constructor", arguments));
   }
 
   Debug* debug = isolate->debug();
@@ -8356,7 +8418,7 @@
       // instead of a new JSFunction object. This way, errors are
       // reported the same way whether or not 'Function' is called
       // using 'new'.
-      return isolate->context()->global_object();
+      return isolate->global_proxy();
     }
   }
 
@@ -8378,9 +8440,9 @@
 }
 
 
-RUNTIME_FUNCTION(RuntimeHidden_NewObject) {
+RUNTIME_FUNCTION(Runtime_NewObject) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(Object, constructor, 0);
   return Runtime_NewObjectHelper(isolate,
                                  constructor,
@@ -8388,9 +8450,9 @@
 }
 
 
-RUNTIME_FUNCTION(RuntimeHidden_NewObjectWithAllocationSite) {
+RUNTIME_FUNCTION(Runtime_NewObjectWithAllocationSite) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
   CONVERT_ARG_HANDLE_CHECKED(Object, constructor, 1);
   CONVERT_ARG_HANDLE_CHECKED(Object, feedback, 0);
   Handle<AllocationSite> site;
@@ -8402,9 +8464,9 @@
 }
 
 
-RUNTIME_FUNCTION(RuntimeHidden_FinalizeInstanceSize) {
+RUNTIME_FUNCTION(Runtime_FinalizeInstanceSize) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
 
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
   function->CompleteInobjectSlackTracking();
@@ -8413,9 +8475,9 @@
 }
 
 
-RUNTIME_FUNCTION(RuntimeHidden_CompileUnoptimized) {
+RUNTIME_FUNCTION(Runtime_CompileLazy) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
 #ifdef DEBUG
   if (FLAG_trace_lazy && !function->shared()->is_compiled()) {
@@ -8426,38 +8488,28 @@
 #endif
 
   // Compile the target function.
-  ASSERT(function->shared()->allows_lazy_compilation());
+  DCHECK(function->shared()->allows_lazy_compilation());
 
   Handle<Code> code;
   ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, code,
-                                     Compiler::GetUnoptimizedCode(function));
+                                     Compiler::GetLazyCode(function));
+  DCHECK(code->kind() == Code::FUNCTION ||
+         code->kind() == Code::OPTIMIZED_FUNCTION);
   function->ReplaceCode(*code);
-
-  // All done. Return the compiled code.
-  ASSERT(function->is_compiled());
-  ASSERT(function->code()->kind() == Code::FUNCTION ||
-         (FLAG_always_opt &&
-          function->code()->kind() == Code::OPTIMIZED_FUNCTION));
   return *code;
 }
 
 
-RUNTIME_FUNCTION(RuntimeHidden_CompileOptimized) {
+RUNTIME_FUNCTION(Runtime_CompileOptimized) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
   CONVERT_BOOLEAN_ARG_CHECKED(concurrent, 1);
 
   Handle<Code> unoptimized(function->shared()->code());
-  if (!function->shared()->is_compiled()) {
-    // If the function is not compiled, do not optimize.
-    // This can happen if the debugger is activated and
-    // the function is returned to the not compiled state.
-    // TODO(yangguo): reconsider this.
-    function->ReplaceCode(function->shared()->code());
-  } else if (!isolate->use_crankshaft() ||
-             function->shared()->optimization_disabled() ||
-             isolate->DebuggerHasBreakPoints()) {
+  if (!isolate->use_crankshaft() ||
+      function->shared()->optimization_disabled() ||
+      isolate->DebuggerHasBreakPoints()) {
     // If the function is not optimizable or debugger is active continue
     // using the code from the full compiler.
     if (FLAG_trace_opt) {
@@ -8468,19 +8520,19 @@
           isolate->DebuggerHasBreakPoints() ? "T" : "F");
     }
     function->ReplaceCode(*unoptimized);
-  } else {
-    Compiler::ConcurrencyMode mode = concurrent ? Compiler::CONCURRENT
-                                                : Compiler::NOT_CONCURRENT;
-    Handle<Code> code;
-    if (Compiler::GetOptimizedCode(
-            function, unoptimized, mode).ToHandle(&code)) {
-      function->ReplaceCode(*code);
-    } else {
-      function->ReplaceCode(*unoptimized);
-    }
+    return function->code();
   }
 
-  ASSERT(function->code()->kind() == Code::FUNCTION ||
+  Compiler::ConcurrencyMode mode =
+      concurrent ? Compiler::CONCURRENT : Compiler::NOT_CONCURRENT;
+  Handle<Code> code;
+  if (Compiler::GetOptimizedCode(function, unoptimized, mode).ToHandle(&code)) {
+    function->ReplaceCode(*code);
+  } else {
+    function->ReplaceCode(function->shared()->code());
+  }
+
+  DCHECK(function->code()->kind() == Code::FUNCTION ||
          function->code()->kind() == Code::OPTIMIZED_FUNCTION ||
          function->IsInOptimizationQueue());
   return function->code();
@@ -8510,30 +8562,30 @@
 };
 
 
-RUNTIME_FUNCTION(RuntimeHidden_NotifyStubFailure) {
+RUNTIME_FUNCTION(Runtime_NotifyStubFailure) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 0);
+  DCHECK(args.length() == 0);
   Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
-  ASSERT(AllowHeapAllocation::IsAllowed());
+  DCHECK(AllowHeapAllocation::IsAllowed());
   delete deoptimizer;
   return isolate->heap()->undefined_value();
 }
 
 
-RUNTIME_FUNCTION(RuntimeHidden_NotifyDeoptimized) {
+RUNTIME_FUNCTION(Runtime_NotifyDeoptimized) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_SMI_ARG_CHECKED(type_arg, 0);
   Deoptimizer::BailoutType type =
       static_cast<Deoptimizer::BailoutType>(type_arg);
   Deoptimizer* deoptimizer = Deoptimizer::Grab(isolate);
-  ASSERT(AllowHeapAllocation::IsAllowed());
+  DCHECK(AllowHeapAllocation::IsAllowed());
 
   Handle<JSFunction> function = deoptimizer->function();
   Handle<Code> optimized_code = deoptimizer->compiled_code();
 
-  ASSERT(optimized_code->kind() == Code::OPTIMIZED_FUNCTION);
-  ASSERT(type == deoptimizer->bailout_type());
+  DCHECK(optimized_code->kind() == Code::OPTIMIZED_FUNCTION);
+  DCHECK(type == deoptimizer->bailout_type());
 
   // Make sure to materialize objects before causing any allocation.
   JavaScriptFrameIterator it(isolate);
@@ -8542,7 +8594,7 @@
 
   JavaScriptFrame* frame = it.frame();
   RUNTIME_ASSERT(frame->function()->IsJSFunction());
-  ASSERT(frame->function() == *function);
+  DCHECK(frame->function() == *function);
 
   // Avoid doing too much work when running with --always-opt and keep
   // the optimized code around.
@@ -8581,10 +8633,15 @@
 
 RUNTIME_FUNCTION(Runtime_DeoptimizeFunction) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
   if (!function->IsOptimized()) return isolate->heap()->undefined_value();
 
+  // TODO(turbofan): Deoptimization is not supported yet.
+  if (function->code()->is_turbofanned() && !FLAG_turbo_deoptimization) {
+    return isolate->heap()->undefined_value();
+  }
+
   Deoptimizer::DeoptimizeFunction(*function);
 
   return isolate->heap()->undefined_value();
@@ -8593,7 +8650,7 @@
 
 RUNTIME_FUNCTION(Runtime_ClearFunctionTypeFeedback) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
   function->shared()->ClearTypeFeedbackInfo();
   Code* unoptimized = function->shared()->code();
@@ -8606,7 +8663,7 @@
 
 RUNTIME_FUNCTION(Runtime_RunningInSimulator) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 0);
+  DCHECK(args.length() == 0);
 #if defined(USE_SIMULATOR)
   return isolate->heap()->true_value();
 #else
@@ -8617,7 +8674,7 @@
 
 RUNTIME_FUNCTION(Runtime_IsConcurrentRecompilationSupported) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 0);
+  DCHECK(args.length() == 0);
   return isolate->heap()->ToBoolean(
       isolate->concurrent_recompilation_enabled());
 }
@@ -8627,12 +8684,15 @@
   HandleScope scope(isolate);
   RUNTIME_ASSERT(args.length() == 1 || args.length() == 2);
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+  // The following two assertions are lifted from the DCHECKs inside
+  // JSFunction::MarkForOptimization().
+  RUNTIME_ASSERT(!function->shared()->is_generator());
+  RUNTIME_ASSERT(function->shared()->allows_lazy_compilation() ||
+                 (function->code()->kind() == Code::FUNCTION &&
+                  function->code()->optimizable()));
 
-  if (!function->IsOptimizable() &&
-      !function->IsMarkedForConcurrentOptimization() &&
-      !function->IsInOptimizationQueue()) {
-    return isolate->heap()->undefined_value();
-  }
+  // If the function is optimized, just return.
+  if (function->IsOptimized()) return isolate->heap()->undefined_value();
 
   function->MarkForOptimization();
 
@@ -8640,17 +8700,12 @@
   if (args.length() == 2 &&
       unoptimized->kind() == Code::FUNCTION) {
     CONVERT_ARG_HANDLE_CHECKED(String, type, 1);
-    if (type->IsOneByteEqualTo(STATIC_ASCII_VECTOR("osr"))) {
+    if (type->IsOneByteEqualTo(STATIC_CHAR_VECTOR("osr")) && FLAG_use_osr) {
       // Start patching from the currently patched loop nesting level.
-      int current_level = unoptimized->allow_osr_at_loop_nesting_level();
-      ASSERT(BackEdgeTable::Verify(isolate, unoptimized, current_level));
-      if (FLAG_use_osr) {
-        for (int i = current_level + 1; i <= Code::kMaxLoopNestingMarker; i++) {
-          unoptimized->set_allow_osr_at_loop_nesting_level(i);
-          isolate->runtime_profiler()->AttemptOnStackReplacement(*function);
-        }
-      }
-    } else if (type->IsOneByteEqualTo(STATIC_ASCII_VECTOR("concurrent")) &&
+      DCHECK(BackEdgeTable::Verify(isolate, unoptimized));
+      isolate->runtime_profiler()->AttemptOnStackReplacement(
+          *function, Code::kMaxLoopNestingMarker);
+    } else if (type->IsOneByteEqualTo(STATIC_CHAR_VECTOR("concurrent")) &&
                isolate->concurrent_recompilation_enabled()) {
       function->MarkForConcurrentOptimization();
     }
@@ -8662,7 +8717,7 @@
 
 RUNTIME_FUNCTION(Runtime_NeverOptimizeFunction) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_CHECKED(JSFunction, function, 0);
   function->shared()->set_optimization_disabled(true);
   return isolate->heap()->undefined_value();
@@ -8678,7 +8733,7 @@
   bool sync_with_compiler_thread = true;
   if (args.length() == 2) {
     CONVERT_ARG_HANDLE_CHECKED(String, sync, 1);
-    if (sync->IsOneByteEqualTo(STATIC_ASCII_VECTOR("no sync"))) {
+    if (sync->IsOneByteEqualTo(STATIC_CHAR_VECTOR("no sync"))) {
       sync_with_compiler_thread = false;
     }
   }
@@ -8687,7 +8742,7 @@
       sync_with_compiler_thread) {
     while (function->IsInOptimizationQueue()) {
       isolate->optimizing_compiler_thread()->InstallOptimizedFunctions();
-      OS::Sleep(50);
+      base::OS::Sleep(50);
     }
   }
   if (FLAG_always_opt) {
@@ -8699,13 +8754,16 @@
   if (FLAG_deopt_every_n_times) {
     return Smi::FromInt(6);  // 6 == "maybe deopted".
   }
+  if (function->IsOptimized() && function->code()->is_turbofanned()) {
+    return Smi::FromInt(7);  // 7 == "TurboFan compiler".
+  }
   return function->IsOptimized() ? Smi::FromInt(1)   // 1 == "yes".
                                  : Smi::FromInt(2);  // 2 == "no".
 }
 
 
 RUNTIME_FUNCTION(Runtime_UnblockConcurrentRecompilation) {
-  ASSERT(args.length() == 0);
+  DCHECK(args.length() == 0);
   RUNTIME_ASSERT(FLAG_block_concurrent_recompilation);
   RUNTIME_ASSERT(isolate->concurrent_recompilation_enabled());
   isolate->optimizing_compiler_thread()->Unblock();
@@ -8715,7 +8773,7 @@
 
 RUNTIME_FUNCTION(Runtime_GetOptimizationCount) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
   return Smi::FromInt(function->shared()->opt_count());
 }
@@ -8742,12 +8800,12 @@
 
 RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
   Handle<Code> caller_code(function->shared()->code());
 
   // We're not prepared to handle a function with arguments object.
-  ASSERT(!function->shared()->uses_arguments());
+  DCHECK(!function->shared()->uses_arguments());
 
   RUNTIME_ASSERT(FLAG_use_osr);
 
@@ -8765,14 +8823,14 @@
       frame->pc() - caller_code->instruction_start());
 
 #ifdef DEBUG
-  ASSERT_EQ(frame->function(), *function);
-  ASSERT_EQ(frame->LookupCode(), *caller_code);
-  ASSERT(caller_code->contains(frame->pc()));
+  DCHECK_EQ(frame->function(), *function);
+  DCHECK_EQ(frame->LookupCode(), *caller_code);
+  DCHECK(caller_code->contains(frame->pc()));
 #endif  // DEBUG
 
 
   BailoutId ast_id = caller_code->TranslatePcOffsetToAstId(pc_offset);
-  ASSERT(!ast_id.IsNone());
+  DCHECK(!ast_id.IsNone());
 
   Compiler::ConcurrencyMode mode =
       isolate->concurrent_osr_enabled() &&
@@ -8829,7 +8887,7 @@
         DeoptimizationInputData::cast(result->deoptimization_data());
 
     if (data->OsrPcOffset()->value() >= 0) {
-      ASSERT(BailoutId(data->OsrAstId()->value()) == ast_id);
+      DCHECK(BailoutId(data->OsrAstId()->value()) == ast_id);
       if (FLAG_trace_osr) {
         PrintF("[OSR - Entry at AST id %d, offset %d in optimized code]\n",
                ast_id.ToInt(), data->OsrPcOffset()->value());
@@ -8860,7 +8918,7 @@
 
 RUNTIME_FUNCTION(Runtime_SetAllocationTimeout) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 2 || args.length() == 3);
+  DCHECK(args.length() == 2 || args.length() == 3);
 #ifdef DEBUG
   CONVERT_SMI_ARG_CHECKED(interval, 0);
   CONVERT_SMI_ARG_CHECKED(timeout, 1);
@@ -8882,7 +8940,7 @@
 
 RUNTIME_FUNCTION(Runtime_CheckIsBootstrapping) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 0);
+  DCHECK(args.length() == 0);
   RUNTIME_ASSERT(isolate->bootstrapper()->IsActive());
   return isolate->heap()->undefined_value();
 }
@@ -8890,7 +8948,7 @@
 
 RUNTIME_FUNCTION(Runtime_GetRootNaN) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 0);
+  DCHECK(args.length() == 0);
   RUNTIME_ASSERT(isolate->bootstrapper()->IsActive());
   return isolate->heap()->nan_value();
 }
@@ -8898,7 +8956,7 @@
 
 RUNTIME_FUNCTION(Runtime_Call) {
   HandleScope scope(isolate);
-  ASSERT(args.length() >= 2);
+  DCHECK(args.length() >= 2);
   int argc = args.length() - 2;
   CONVERT_ARG_CHECKED(JSReceiver, fun, argc + 1);
   Object* receiver = args[0];
@@ -8930,12 +8988,12 @@
 
 RUNTIME_FUNCTION(Runtime_Apply) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 5);
+  DCHECK(args.length() == 5);
   CONVERT_ARG_HANDLE_CHECKED(JSReceiver, fun, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, receiver, 1);
   CONVERT_ARG_HANDLE_CHECKED(JSObject, arguments, 2);
-  CONVERT_SMI_ARG_CHECKED(offset, 3);
-  CONVERT_SMI_ARG_CHECKED(argc, 4);
+  CONVERT_INT32_ARG_CHECKED(offset, 3);
+  CONVERT_INT32_ARG_CHECKED(argc, 4);
   RUNTIME_ASSERT(offset >= 0);
   // Loose upper bound to allow fuzzing. We'll most likely run out of
   // stack space before hitting this limit.
@@ -8969,7 +9027,7 @@
 
 RUNTIME_FUNCTION(Runtime_GetFunctionDelegate) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
   RUNTIME_ASSERT(!object->IsJSFunction());
   return *Execution::GetFunctionDelegate(isolate, object);
@@ -8978,42 +9036,44 @@
 
 RUNTIME_FUNCTION(Runtime_GetConstructorDelegate) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
   RUNTIME_ASSERT(!object->IsJSFunction());
   return *Execution::GetConstructorDelegate(isolate, object);
 }
 
 
-RUNTIME_FUNCTION(RuntimeHidden_NewGlobalContext) {
+RUNTIME_FUNCTION(Runtime_NewGlobalContext) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
 
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
   CONVERT_ARG_HANDLE_CHECKED(ScopeInfo, scope_info, 1);
   Handle<Context> result =
       isolate->factory()->NewGlobalContext(function, scope_info);
 
-  ASSERT(function->context() == isolate->context());
-  ASSERT(function->context()->global_object() == result->global_object());
+  DCHECK(function->context() == isolate->context());
+  DCHECK(function->context()->global_object() == result->global_object());
   result->global_object()->set_global_context(*result);
   return *result;
 }
 
 
-RUNTIME_FUNCTION(RuntimeHidden_NewFunctionContext) {
+RUNTIME_FUNCTION(Runtime_NewFunctionContext) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
 
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
+
+  DCHECK(function->context() == isolate->context());
   int length = function->shared()->scope_info()->ContextLength();
   return *isolate->factory()->NewFunctionContext(length, function);
 }
 
 
-RUNTIME_FUNCTION(RuntimeHidden_PushWithContext) {
+RUNTIME_FUNCTION(Runtime_PushWithContext) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
   Handle<JSReceiver> extension_object;
   if (args[0]->IsJSReceiver()) {
     extension_object = args.at<JSReceiver>(0);
@@ -9023,10 +9083,8 @@
         Object::ToObject(isolate, args.at<Object>(0));
     if (!maybe_object.ToHandle(&extension_object)) {
       Handle<Object> handle = args.at<Object>(0);
-      Handle<Object> result =
-          isolate->factory()->NewTypeError("with_expression",
-                                           HandleVector(&handle, 1));
-      return isolate->Throw(*result);
+      THROW_NEW_ERROR_RETURN_FAILURE(
+          isolate, NewTypeError("with_expression", HandleVector(&handle, 1)));
     }
   }
 
@@ -9035,7 +9093,7 @@
     // A smi sentinel indicates a context nested inside global code rather
     // than some function.  There is a canonical empty function that can be
     // gotten from the native context.
-    function = handle(isolate->context()->native_context()->closure());
+    function = handle(isolate->native_context()->closure());
   } else {
     function = args.at<JSFunction>(1);
   }
@@ -9048,9 +9106,9 @@
 }
 
 
-RUNTIME_FUNCTION(RuntimeHidden_PushCatchContext) {
+RUNTIME_FUNCTION(Runtime_PushCatchContext) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 3);
+  DCHECK(args.length() == 3);
   CONVERT_ARG_HANDLE_CHECKED(String, name, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, thrown_object, 1);
   Handle<JSFunction> function;
@@ -9058,7 +9116,7 @@
     // A smi sentinel indicates a context nested inside global code rather
     // than some function.  There is a canonical empty function that can be
     // gotten from the native context.
-    function = handle(isolate->context()->native_context()->closure());
+    function = handle(isolate->native_context()->closure());
   } else {
     function = args.at<JSFunction>(2);
   }
@@ -9070,16 +9128,16 @@
 }
 
 
-RUNTIME_FUNCTION(RuntimeHidden_PushBlockContext) {
+RUNTIME_FUNCTION(Runtime_PushBlockContext) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
   CONVERT_ARG_HANDLE_CHECKED(ScopeInfo, scope_info, 0);
   Handle<JSFunction> function;
   if (args[1]->IsSmi()) {
     // A smi sentinel indicates a context nested inside global code rather
     // than some function.  There is a canonical empty function that can be
     // gotten from the native context.
-    function = handle(isolate->context()->native_context()->closure());
+    function = handle(isolate->native_context()->closure());
   } else {
     function = args.at<JSFunction>(1);
   }
@@ -9093,22 +9151,22 @@
 
 RUNTIME_FUNCTION(Runtime_IsJSModule) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_CHECKED(Object, obj, 0);
   return isolate->heap()->ToBoolean(obj->IsJSModule());
 }
 
 
-RUNTIME_FUNCTION(RuntimeHidden_PushModuleContext) {
+RUNTIME_FUNCTION(Runtime_PushModuleContext) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
   CONVERT_SMI_ARG_CHECKED(index, 0);
 
   if (!args[1]->IsScopeInfo()) {
     // Module already initialized. Find hosting context and retrieve context.
     Context* host = Context::cast(isolate->context())->global_context();
     Context* context = Context::cast(host->get(index));
-    ASSERT(context->previous() == isolate->context());
+    DCHECK(context->previous() == isolate->context());
     isolate->set_context(context);
     return context;
   }
@@ -9134,9 +9192,9 @@
 }
 
 
-RUNTIME_FUNCTION(RuntimeHidden_DeclareModules) {
+RUNTIME_FUNCTION(Runtime_DeclareModules) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(FixedArray, descriptions, 0);
   Context* host_context = isolate->context();
 
@@ -9161,14 +9219,15 @@
               Accessors::MakeModuleExport(name, index, attr);
           Handle<Object> result =
               JSObject::SetAccessor(module, info).ToHandleChecked();
-          ASSERT(!result->IsUndefined());
+          DCHECK(!result->IsUndefined());
           USE(result);
           break;
         }
         case MODULE: {
           Object* referenced_context = Context::cast(host_context)->get(index);
           Handle<JSModule> value(Context::cast(referenced_context)->module());
-          JSReceiver::SetProperty(module, name, value, FROZEN, STRICT).Assert();
+          JSObject::SetOwnPropertyIgnoreAttributes(module, name, value, FROZEN)
+              .Assert();
           break;
         }
         case INTERNAL:
@@ -9183,14 +9242,14 @@
     JSObject::PreventExtensions(module).Assert();
   }
 
-  ASSERT(!isolate->has_pending_exception());
+  DCHECK(!isolate->has_pending_exception());
   return isolate->heap()->undefined_value();
 }
 
 
-RUNTIME_FUNCTION(RuntimeHidden_DeleteContextSlot) {
+RUNTIME_FUNCTION(Runtime_DeleteLookupSlot) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
 
   CONVERT_ARG_HANDLE_CHECKED(Context, context, 0);
   CONVERT_ARG_HANDLE_CHECKED(String, name, 1);
@@ -9248,6 +9307,23 @@
   // In Win64 they are assigned to a hidden first argument.
   return result;
 }
+#elif V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT
+// For x32 a 128-bit struct return is done as rax and rdx from the ObjectPair
+// are used in the full codegen and Crankshaft compiler. An alternative is
+// using uint64_t and modifying full codegen and Crankshaft compiler.
+struct ObjectPair {
+  Object* x;
+  uint32_t x_upper;
+  Object* y;
+  uint32_t y_upper;
+};
+
+
+static inline ObjectPair MakePair(Object* x, Object* y) {
+  ObjectPair result = {x, 0, y, 0};
+  // Pointers x and y returned in rax and rdx, in x32-abi.
+  return result;
+}
 #else
 typedef uint64_t ObjectPair;
 static inline ObjectPair MakePair(Object* x, Object* y) {
@@ -9266,7 +9342,7 @@
 
 static Object* ComputeReceiverForNonGlobal(Isolate* isolate,
                                            JSObject* holder) {
-  ASSERT(!holder->IsGlobalObject());
+  DCHECK(!holder->IsGlobalObject());
   Context* top = isolate->context();
   // Get the context extension function.
   JSFunction* context_extension_function =
@@ -9284,11 +9360,10 @@
 }
 
 
-static ObjectPair LoadContextSlotHelper(Arguments args,
-                                        Isolate* isolate,
-                                        bool throw_error) {
+static ObjectPair LoadLookupSlotHelper(Arguments args, Isolate* isolate,
+                                       bool throw_error) {
   HandleScope scope(isolate);
-  ASSERT_EQ(2, args.length());
+  DCHECK_EQ(2, args.length());
 
   if (!args[0]->IsContext() || !args[1]->IsString()) {
     return MakePair(isolate->ThrowIllegalOperation(), NULL);
@@ -9311,7 +9386,7 @@
 
   // If the index is non-negative, the slot has been found in a context.
   if (index >= 0) {
-    ASSERT(holder->IsContext());
+    DCHECK(holder->IsContext());
     // If the "property" we were looking for is a local variable, the
     // receiver is the global object; see ECMA-262, 3rd., 10.1.6 and 10.2.3.
     Handle<Object> receiver = isolate->factory()->undefined_value();
@@ -9321,20 +9396,22 @@
       case MUTABLE_CHECK_INITIALIZED:
       case IMMUTABLE_CHECK_INITIALIZED_HARMONY:
         if (value->IsTheHole()) {
-          Handle<Object> reference_error =
+          Handle<Object> error;
+          MaybeHandle<Object> maybe_error =
               isolate->factory()->NewReferenceError("not_defined",
                                                     HandleVector(&name, 1));
-          return MakePair(isolate->Throw(*reference_error), NULL);
+          if (maybe_error.ToHandle(&error)) isolate->Throw(*error);
+          return MakePair(isolate->heap()->exception(), NULL);
         }
         // FALLTHROUGH
       case MUTABLE_IS_INITIALIZED:
       case IMMUTABLE_IS_INITIALIZED:
       case IMMUTABLE_IS_INITIALIZED_HARMONY:
-        ASSERT(!value->IsTheHole());
+        DCHECK(!value->IsTheHole());
         return MakePair(value, *receiver);
       case IMMUTABLE_CHECK_INITIALIZED:
         if (value->IsTheHole()) {
-          ASSERT((attributes & READ_ONLY) != 0);
+          DCHECK((attributes & READ_ONLY) != 0);
           value = isolate->heap()->undefined_value();
         }
         return MakePair(value, *receiver);
@@ -9349,7 +9426,13 @@
   // property from it.
   if (!holder.is_null()) {
     Handle<JSReceiver> object = Handle<JSReceiver>::cast(holder);
-    ASSERT(object->IsJSProxy() || JSReceiver::HasProperty(object, name));
+#ifdef DEBUG
+    if (!object->IsJSProxy()) {
+      Maybe<bool> maybe = JSReceiver::HasProperty(object, name);
+      DCHECK(maybe.has_value);
+      DCHECK(maybe.value);
+    }
+#endif
     // GetProperty below can cause GC.
     Handle<Object> receiver_handle(
         object->IsGlobalObject()
@@ -9370,10 +9453,11 @@
 
   if (throw_error) {
     // The property doesn't exist - throw exception.
-    Handle<Object> reference_error =
-        isolate->factory()->NewReferenceError("not_defined",
-                                              HandleVector(&name, 1));
-    return MakePair(isolate->Throw(*reference_error), NULL);
+    Handle<Object> error;
+    MaybeHandle<Object> maybe_error = isolate->factory()->NewReferenceError(
+        "not_defined", HandleVector(&name, 1));
+    if (maybe_error.ToHandle(&error)) isolate->Throw(*error);
+    return MakePair(isolate->heap()->exception(), NULL);
   } else {
     // The property doesn't exist - return undefined.
     return MakePair(isolate->heap()->undefined_value(),
@@ -9382,19 +9466,19 @@
 }
 
 
-RUNTIME_FUNCTION_RETURN_PAIR(RuntimeHidden_LoadContextSlot) {
-  return LoadContextSlotHelper(args, isolate, true);
+RUNTIME_FUNCTION_RETURN_PAIR(Runtime_LoadLookupSlot) {
+  return LoadLookupSlotHelper(args, isolate, true);
 }
 
 
-RUNTIME_FUNCTION_RETURN_PAIR(RuntimeHidden_LoadContextSlotNoReferenceError) {
-  return LoadContextSlotHelper(args, isolate, false);
+RUNTIME_FUNCTION_RETURN_PAIR(Runtime_LoadLookupSlotNoReferenceError) {
+  return LoadLookupSlotHelper(args, isolate, false);
 }
 
 
-RUNTIME_FUNCTION(RuntimeHidden_StoreContextSlot) {
+RUNTIME_FUNCTION(Runtime_StoreLookupSlot) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 4);
+  DCHECK(args.length() == 4);
 
   CONVERT_ARG_HANDLE_CHECKED(Object, value, 0);
   CONVERT_ARG_HANDLE_CHECKED(Context, context, 1);
@@ -9410,28 +9494,18 @@
                                           &index,
                                           &attributes,
                                           &binding_flags);
+  // In case of JSProxy, an exception might have been thrown.
   if (isolate->has_pending_exception()) return isolate->heap()->exception();
 
+  // The property was found in a context slot.
   if (index >= 0) {
-    // The property was found in a context slot.
-    Handle<Context> context = Handle<Context>::cast(holder);
-    if (binding_flags == MUTABLE_CHECK_INITIALIZED &&
-        context->get(index)->IsTheHole()) {
-      Handle<Object> error =
-          isolate->factory()->NewReferenceError("not_defined",
-                                                HandleVector(&name, 1));
-      return isolate->Throw(*error);
-    }
-    // Ignore if read_only variable.
     if ((attributes & READ_ONLY) == 0) {
-      // Context is a fixed array and set cannot fail.
-      context->set(index, *value);
+      Handle<Context>::cast(holder)->set(index, *value);
     } else if (strict_mode == STRICT) {
       // Setting read only property in strict mode.
-      Handle<Object> error =
-          isolate->factory()->NewTypeError("strict_cannot_assign",
-                                           HandleVector(&name, 1));
-      return isolate->Throw(*error);
+      THROW_NEW_ERROR_RETURN_FAILURE(
+          isolate,
+          NewTypeError("strict_cannot_assign", HandleVector(&name, 1)));
     }
     return *value;
   }
@@ -9440,88 +9514,85 @@
   // context extension object, a property of the subject of a with, or a
   // property of the global object.
   Handle<JSReceiver> object;
-
-  if (!holder.is_null()) {
+  if (attributes != ABSENT) {
     // The property exists on the holder.
     object = Handle<JSReceiver>::cast(holder);
+  } else if (strict_mode == STRICT) {
+    // If absent in strict mode: throw.
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewReferenceError("not_defined", HandleVector(&name, 1)));
   } else {
-    // The property was not found.
-    ASSERT(attributes == ABSENT);
-
-    if (strict_mode == STRICT) {
-      // Throw in strict mode (assignment to undefined variable).
-      Handle<Object> error =
-          isolate->factory()->NewReferenceError(
-              "not_defined", HandleVector(&name, 1));
-      return isolate->Throw(*error);
-    }
-    // In sloppy mode, the property is added to the global object.
-    attributes = NONE;
-    object = Handle<JSReceiver>(isolate->context()->global_object());
+    // If absent in sloppy mode: add the property to the global object.
+    object = Handle<JSReceiver>(context->global_object());
   }
 
-  // Set the property if it's not read only or doesn't yet exist.
-  if ((attributes & READ_ONLY) == 0 ||
-      (JSReceiver::GetOwnPropertyAttributes(object, name) == ABSENT)) {
-    RETURN_FAILURE_ON_EXCEPTION(
-        isolate,
-        JSReceiver::SetProperty(object, name, value, NONE, strict_mode));
-  } else if (strict_mode == STRICT && (attributes & READ_ONLY) != 0) {
-    // Setting read only property in strict mode.
-    Handle<Object> error =
-      isolate->factory()->NewTypeError(
-          "strict_cannot_assign", HandleVector(&name, 1));
-    return isolate->Throw(*error);
-  }
+  RETURN_FAILURE_ON_EXCEPTION(
+      isolate, Object::SetProperty(object, name, value, strict_mode));
+
   return *value;
 }
 
 
-RUNTIME_FUNCTION(RuntimeHidden_Throw) {
+RUNTIME_FUNCTION(Runtime_Throw) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
 
   return isolate->Throw(args[0]);
 }
 
 
-RUNTIME_FUNCTION(RuntimeHidden_ReThrow) {
+RUNTIME_FUNCTION(Runtime_ReThrow) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
 
   return isolate->ReThrow(args[0]);
 }
 
 
-RUNTIME_FUNCTION(RuntimeHidden_PromoteScheduledException) {
+RUNTIME_FUNCTION(Runtime_PromoteScheduledException) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 0);
+  DCHECK(args.length() == 0);
   return isolate->PromoteScheduledException();
 }
 
 
-RUNTIME_FUNCTION(RuntimeHidden_ThrowReferenceError) {
+RUNTIME_FUNCTION(Runtime_ThrowReferenceError) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(Object, name, 0);
-  Handle<Object> reference_error =
-    isolate->factory()->NewReferenceError("not_defined",
-                                          HandleVector(&name, 1));
-  return isolate->Throw(*reference_error);
+  THROW_NEW_ERROR_RETURN_FAILURE(
+      isolate, NewReferenceError("not_defined", HandleVector(&name, 1)));
 }
 
 
-RUNTIME_FUNCTION(RuntimeHidden_ThrowNotDateError) {
+RUNTIME_FUNCTION(Runtime_ThrowNonMethodError) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 0);
-  return isolate->Throw(*isolate->factory()->NewTypeError(
-      "not_date_object", HandleVector<Object>(NULL, 0)));
+  DCHECK(args.length() == 0);
+  THROW_NEW_ERROR_RETURN_FAILURE(
+      isolate, NewReferenceError("non_method", HandleVector<Object>(NULL, 0)));
 }
 
 
-RUNTIME_FUNCTION(RuntimeHidden_StackGuard) {
+RUNTIME_FUNCTION(Runtime_ThrowUnsupportedSuperError) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 0);
+  THROW_NEW_ERROR_RETURN_FAILURE(
+      isolate,
+      NewReferenceError("unsupported_super", HandleVector<Object>(NULL, 0)));
+}
+
+
+RUNTIME_FUNCTION(Runtime_ThrowNotDateError) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 0);
+  THROW_NEW_ERROR_RETURN_FAILURE(
+      isolate, NewTypeError("not_date_object", HandleVector<Object>(NULL, 0)));
+}
+
+
+RUNTIME_FUNCTION(Runtime_StackGuard) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 0);
+  DCHECK(args.length() == 0);
 
   // First check if this is a real stack overflow.
   StackLimitCheck check(isolate);
@@ -9533,9 +9604,9 @@
 }
 
 
-RUNTIME_FUNCTION(RuntimeHidden_TryInstallOptimizedCode) {
+RUNTIME_FUNCTION(Runtime_TryInstallOptimizedCode) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
 
   // First check if this is a real stack overflow.
@@ -9551,9 +9622,9 @@
 }
 
 
-RUNTIME_FUNCTION(RuntimeHidden_Interrupt) {
+RUNTIME_FUNCTION(Runtime_Interrupt) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 0);
+  DCHECK(args.length() == 0);
   return isolate->stack_guard()->HandleInterrupts();
 }
 
@@ -9589,7 +9660,7 @@
 
 RUNTIME_FUNCTION(Runtime_TraceEnter) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 0);
+  DCHECK(args.length() == 0);
   PrintTransition(isolate, NULL);
   return isolate->heap()->undefined_value();
 }
@@ -9597,7 +9668,7 @@
 
 RUNTIME_FUNCTION(Runtime_TraceExit) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_CHECKED(Object, obj, 0);
   PrintTransition(isolate, obj);
   return obj;  // return TOS
@@ -9606,30 +9677,30 @@
 
 RUNTIME_FUNCTION(Runtime_DebugPrint) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
 
+  OFStream os(stdout);
 #ifdef DEBUG
   if (args[0]->IsString()) {
     // If we have a string, assume it's a code "marker"
     // and print some interesting cpu debugging info.
     JavaScriptFrameIterator it(isolate);
     JavaScriptFrame* frame = it.frame();
-    PrintF("fp = %p, sp = %p, caller_sp = %p: ",
-           frame->fp(), frame->sp(), frame->caller_sp());
+    os << "fp = " << frame->fp() << ", sp = " << frame->sp()
+       << ", caller_sp = " << frame->caller_sp() << ": ";
   } else {
-    PrintF("DebugPrint: ");
+    os << "DebugPrint: ";
   }
-  args[0]->Print();
+  args[0]->Print(os);
   if (args[0]->IsHeapObject()) {
-    PrintF("\n");
-    HeapObject::cast(args[0])->map()->Print();
+    os << "\n";
+    HeapObject::cast(args[0])->map()->Print(os);
   }
 #else
   // ShortPrint is available in release mode. Print is not.
-  args[0]->ShortPrint();
+  os << Brief(args[0]);
 #endif
-  PrintF("\n");
-  Flush();
+  os << endl;
 
   return args[0];  // return TOS
 }
@@ -9637,7 +9708,7 @@
 
 RUNTIME_FUNCTION(Runtime_DebugTrace) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 0);
+  DCHECK(args.length() == 0);
   isolate->PrintStack(stdout);
   return isolate->heap()->undefined_value();
 }
@@ -9645,21 +9716,27 @@
 
 RUNTIME_FUNCTION(Runtime_DateCurrentTime) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 0);
+  DCHECK(args.length() == 0);
   if (FLAG_log_timer_events) LOG(isolate, CurrentTimeEvent());
 
   // According to ECMA-262, section 15.9.1, page 117, the precision of
   // the number in a Date object representing a particular instant in
   // time is milliseconds. Therefore, we floor the result of getting
   // the OS time.
-  double millis = std::floor(OS::TimeCurrentMillis());
+  double millis;
+  if (FLAG_verify_predictable) {
+    millis = 1388534400000.0;  // Jan 1 2014 00:00:00 GMT+0000
+    millis += Floor(isolate->heap()->synthetic_time());
+  } else {
+    millis = Floor(base::OS::TimeCurrentMillis());
+  }
   return *isolate->factory()->NewNumber(millis);
 }
 
 
 RUNTIME_FUNCTION(Runtime_DateParseString) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
   CONVERT_ARG_HANDLE_CHECKED(String, str, 0);
   CONVERT_ARG_HANDLE_CHECKED(JSArray, output, 1);
 
@@ -9674,12 +9751,12 @@
 
   bool result;
   String::FlatContent str_content = str->GetFlatContent();
-  if (str_content.IsAscii()) {
+  if (str_content.IsOneByte()) {
     result = DateParser::Parse(str_content.ToOneByteVector(),
                                *output_array,
                                isolate->unicode_cache());
   } else {
-    ASSERT(str_content.IsTwoByte());
+    DCHECK(str_content.IsTwoByte());
     result = DateParser::Parse(str_content.ToUC16Vector(),
                                *output_array,
                                isolate->unicode_cache());
@@ -9695,7 +9772,7 @@
 
 RUNTIME_FUNCTION(Runtime_DateLocalTimezone) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
 
   CONVERT_DOUBLE_ARG_CHECKED(x, 0);
   RUNTIME_ASSERT(x >= -DateCache::kMaxTimeBeforeUTCInMs &&
@@ -9710,7 +9787,7 @@
 
 RUNTIME_FUNCTION(Runtime_DateToUTC) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
 
   CONVERT_DOUBLE_ARG_CHECKED(x, 0);
   RUNTIME_ASSERT(x >= -DateCache::kMaxTimeBeforeUTCInMs &&
@@ -9723,7 +9800,7 @@
 
 RUNTIME_FUNCTION(Runtime_DateCacheVersion) {
   HandleScope hs(isolate);
-  ASSERT(args.length() == 0);
+  DCHECK(args.length() == 0);
   if (!isolate->eternal_handles()->Exists(EternalHandles::DATE_CACHE_VERSION)) {
     Handle<FixedArray> date_cache_version =
         isolate->factory()->NewFixedArray(1, TENURED);
@@ -9742,18 +9819,18 @@
 }
 
 
-RUNTIME_FUNCTION(Runtime_GlobalReceiver) {
+RUNTIME_FUNCTION(Runtime_GlobalProxy) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_CHECKED(Object, global, 0);
   if (!global->IsJSGlobalObject()) return isolate->heap()->null_value();
-  return JSGlobalObject::cast(global)->global_receiver();
+  return JSGlobalObject::cast(global)->global_proxy();
 }
 
 
 RUNTIME_FUNCTION(Runtime_IsAttachedGlobal) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_CHECKED(Object, global, 0);
   if (!global->IsJSGlobalObject()) return isolate->heap()->false_value();
   return isolate->heap()->ToBoolean(
@@ -9763,11 +9840,11 @@
 
 RUNTIME_FUNCTION(Runtime_ParseJson) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(String, source, 0);
 
   source = String::Flatten(source);
-  // Optimized fast case where we only have ASCII characters.
+  // Optimized fast case where we only have Latin1 characters.
   Handle<Object> result;
   ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
       isolate, result,
@@ -9779,7 +9856,7 @@
 
 bool CodeGenerationFromStringsAllowed(Isolate* isolate,
                                       Handle<Context> context) {
-  ASSERT(context->allow_code_gen_from_strings()->IsFalse());
+  DCHECK(context->allow_code_gen_from_strings()->IsFalse());
   // Check with callback if set.
   AllowCodeGenerationFromStringsCallback callback =
       isolate->allow_code_gen_callback();
@@ -9794,72 +9871,14 @@
 }
 
 
-// Walk up the stack expecting:
-//  - Runtime_CompileString
-//  - JSFunction callee (eval, Function constructor, etc)
-//  - call() (maybe)
-//  - apply() (maybe)
-//  - bind() (maybe)
-// - JSFunction caller (maybe)
-//
-// return true if the caller has the same security token as the callee
-// or if an exit frame was hit, in which case allow it through, as it could
-// have come through the api.
-static bool TokensMatchForCompileString(Isolate* isolate) {
-  MaybeHandle<JSFunction> callee;
-  bool exit_handled = true;
-  bool tokens_match = true;
-  bool done = false;
-  for (StackFrameIterator it(isolate); !it.done() && !done; it.Advance()) {
-    StackFrame* raw_frame = it.frame();
-    if (!raw_frame->is_java_script()) {
-      if (raw_frame->is_exit()) exit_handled = false;
-      continue;
-    }
-    JavaScriptFrame* outer_frame = JavaScriptFrame::cast(raw_frame);
-    List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
-    outer_frame->Summarize(&frames);
-    for (int i = frames.length() - 1; i >= 0 && !done; --i) {
-      FrameSummary& frame = frames[i];
-      Handle<JSFunction> fun = frame.function();
-      // Capture the callee function.
-      if (callee.is_null()) {
-        callee = fun;
-        exit_handled = true;
-        continue;
-      }
-      // Exit condition.
-      Handle<Context> context(callee.ToHandleChecked()->context());
-      if (!fun->context()->HasSameSecurityTokenAs(*context)) {
-        tokens_match = false;
-        done = true;
-        continue;
-      }
-      // Skip bound functions in correct origin.
-      if (fun->shared()->bound()) {
-        exit_handled = true;
-        continue;
-      }
-      done = true;
-    }
-  }
-  return !exit_handled || tokens_match;
-}
-
-
 RUNTIME_FUNCTION(Runtime_CompileString) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
   CONVERT_ARG_HANDLE_CHECKED(String, source, 0);
   CONVERT_BOOLEAN_ARG_CHECKED(function_literal_only, 1);
 
   // Extract native context.
-  Handle<Context> context(isolate->context()->native_context());
-
-  // Filter cross security context calls.
-  if (!TokensMatchForCompileString(isolate)) {
-    return isolate->heap()->undefined_value();
-  }
+  Handle<Context> context(isolate->native_context());
 
   // Check if native context allows code generation from
   // strings. Throw an exception if it doesn't.
@@ -9867,24 +9886,28 @@
       !CodeGenerationFromStringsAllowed(isolate, context)) {
     Handle<Object> error_message =
         context->ErrorMessageForCodeGenerationFromStrings();
-    return isolate->Throw(*isolate->factory()->NewEvalError(
-        "code_gen_from_strings", HandleVector<Object>(&error_message, 1)));
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewEvalError("code_gen_from_strings",
+                              HandleVector<Object>(&error_message, 1)));
   }
 
   // Compile source string in the native context.
   ParseRestriction restriction = function_literal_only
       ? ONLY_SINGLE_FUNCTION_LITERAL : NO_PARSE_RESTRICTION;
+  Handle<SharedFunctionInfo> outer_info(context->closure()->shared(), isolate);
   Handle<JSFunction> fun;
   ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
       isolate, fun,
       Compiler::GetFunctionFromEval(
-          source, context, SLOPPY, restriction, RelocInfo::kNoPosition));
+          source, outer_info,
+          context, SLOPPY, restriction, RelocInfo::kNoPosition));
   return *fun;
 }
 
 
 static ObjectPair CompileGlobalEval(Isolate* isolate,
                                     Handle<String> source,
+                                    Handle<SharedFunctionInfo> outer_info,
                                     Handle<Object> receiver,
                                     StrictMode strict_mode,
                                     int scope_position) {
@@ -9897,8 +9920,10 @@
       !CodeGenerationFromStringsAllowed(isolate, native_context)) {
     Handle<Object> error_message =
         native_context->ErrorMessageForCodeGenerationFromStrings();
-    isolate->Throw(*isolate->factory()->NewEvalError(
-        "code_gen_from_strings", HandleVector<Object>(&error_message, 1)));
+    Handle<Object> error;
+    MaybeHandle<Object> maybe_error = isolate->factory()->NewEvalError(
+        "code_gen_from_strings", HandleVector<Object>(&error_message, 1));
+    if (maybe_error.ToHandle(&error)) isolate->Throw(*error);
     return MakePair(isolate->heap()->exception(), NULL);
   }
 
@@ -9909,15 +9934,16 @@
   ASSIGN_RETURN_ON_EXCEPTION_VALUE(
       isolate, compiled,
       Compiler::GetFunctionFromEval(
-          source, context, strict_mode, restriction, scope_position),
+          source, outer_info,
+          context, strict_mode, restriction, scope_position),
       MakePair(isolate->heap()->exception(), NULL));
   return MakePair(*compiled, *receiver);
 }
 
 
-RUNTIME_FUNCTION_RETURN_PAIR(RuntimeHidden_ResolvePossiblyDirectEval) {
+RUNTIME_FUNCTION_RETURN_PAIR(Runtime_ResolvePossiblyDirectEval) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 5);
+  DCHECK(args.length() == 6);
 
   Handle<Object> callee = args.at<Object>(0);
 
@@ -9931,21 +9957,24 @@
     return MakePair(*callee, isolate->heap()->undefined_value());
   }
 
-  ASSERT(args[3]->IsSmi());
-  ASSERT(args.smi_at(3) == SLOPPY || args.smi_at(3) == STRICT);
-  StrictMode strict_mode = static_cast<StrictMode>(args.smi_at(3));
-  ASSERT(args[4]->IsSmi());
+  DCHECK(args[4]->IsSmi());
+  DCHECK(args.smi_at(4) == SLOPPY || args.smi_at(4) == STRICT);
+  StrictMode strict_mode = static_cast<StrictMode>(args.smi_at(4));
+  DCHECK(args[5]->IsSmi());
+  Handle<SharedFunctionInfo> outer_info(args.at<JSFunction>(2)->shared(),
+                                        isolate);
   return CompileGlobalEval(isolate,
                            args.at<String>(1),
-                           args.at<Object>(2),
+                           outer_info,
+                           args.at<Object>(3),
                            strict_mode,
-                           args.smi_at(4));
+                           args.smi_at(5));
 }
 
 
-RUNTIME_FUNCTION(RuntimeHidden_AllocateInNewSpace) {
+RUNTIME_FUNCTION(Runtime_AllocateInNewSpace) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_SMI_ARG_CHECKED(size, 0);
   RUNTIME_ASSERT(IsAligned(size, kPointerSize));
   RUNTIME_ASSERT(size > 0);
@@ -9954,9 +9983,9 @@
 }
 
 
-RUNTIME_FUNCTION(RuntimeHidden_AllocateInTargetSpace) {
+RUNTIME_FUNCTION(Runtime_AllocateInTargetSpace) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
   CONVERT_SMI_ARG_CHECKED(size, 0);
   CONVERT_SMI_ARG_CHECKED(flags, 1);
   RUNTIME_ASSERT(IsAligned(size, kPointerSize));
@@ -9973,7 +10002,7 @@
 // false otherwise.
 RUNTIME_FUNCTION(Runtime_PushIfAbsent) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
   CONVERT_ARG_HANDLE_CHECKED(JSArray, array, 0);
   CONVERT_ARG_HANDLE_CHECKED(JSReceiver, element, 1);
   RUNTIME_ASSERT(array->HasFastSmiOrObjectElements());
@@ -10037,7 +10066,7 @@
       SetDictionaryMode();
       // Fall-through to dictionary mode.
     }
-    ASSERT(!fast_elements_);
+    DCHECK(!fast_elements_);
     Handle<SeededNumberDictionary> dict(
         SeededNumberDictionary::cast(*storage_));
     Handle<SeededNumberDictionary> result =
@@ -10085,7 +10114,7 @@
  private:
   // Convert storage to dictionary mode.
   void SetDictionaryMode() {
-    ASSERT(fast_elements_);
+    DCHECK(fast_elements_);
     Handle<FixedArray> current_storage(*storage_);
     Handle<SeededNumberDictionary> slow_storage(
         SeededNumberDictionary::New(isolate_, current_storage->length()));
@@ -10135,7 +10164,7 @@
     case FAST_HOLEY_ELEMENTS: {
       // Fast elements can't have lengths that are not representable by
       // a 32-bit signed integer.
-      ASSERT(static_cast<int32_t>(FixedArray::kMaxLength) >= 0);
+      DCHECK(static_cast<int32_t>(FixedArray::kMaxLength) >= 0);
       int fast_length = static_cast<int>(length);
       Handle<FixedArray> elements(FixedArray::cast(array->elements()));
       for (int i = 0; i < fast_length; i++) {
@@ -10147,10 +10176,10 @@
     case FAST_HOLEY_DOUBLE_ELEMENTS: {
       // Fast elements can't have lengths that are not representable by
       // a 32-bit signed integer.
-      ASSERT(static_cast<int32_t>(FixedDoubleArray::kMaxLength) >= 0);
+      DCHECK(static_cast<int32_t>(FixedDoubleArray::kMaxLength) >= 0);
       int fast_length = static_cast<int>(length);
       if (array->elements()->IsFixedArray()) {
-        ASSERT(FixedArray::cast(array->elements())->length() == 0);
+        DCHECK(FixedArray::cast(array->elements())->length() == 0);
         break;
       }
       Handle<FixedDoubleArray> elements(
@@ -10199,7 +10228,7 @@
       ExternalArrayClass::cast(receiver->elements()));
   uint32_t len = static_cast<uint32_t>(array->length());
 
-  ASSERT(visitor != NULL);
+  DCHECK(visitor != NULL);
   if (elements_are_ints) {
     if (elements_are_guaranteed_smis) {
       for (uint32_t j = 0; j < len; j++) {
@@ -10262,8 +10291,19 @@
     }
     case FAST_HOLEY_DOUBLE_ELEMENTS:
     case FAST_DOUBLE_ELEMENTS: {
-      // TODO(1810): Decide if it's worthwhile to implement this.
-      UNREACHABLE();
+      if (object->elements()->IsFixedArray()) {
+        DCHECK(object->elements()->length() == 0);
+        break;
+      }
+      Handle<FixedDoubleArray> elements(
+          FixedDoubleArray::cast(object->elements()));
+      uint32_t length = static_cast<uint32_t>(elements->length());
+      if (range < length) length = range;
+      for (uint32_t i = 0; i < length; i++) {
+        if (!elements->is_the_hole(i)) {
+          indices->Add(i);
+        }
+      }
       break;
     }
     case DICTIONARY_ELEMENTS: {
@@ -10274,7 +10314,7 @@
         HandleScope loop_scope(isolate);
         Handle<Object> k(dict->KeyAt(j), isolate);
         if (dict->IsKey(*k)) {
-          ASSERT(k->IsNumber());
+          DCHECK(k->IsNumber());
           uint32_t index = static_cast<uint32_t>(k->Number());
           if (index < range) {
             indices->Add(index);
@@ -10283,25 +10323,15 @@
       }
       break;
     }
-    default: {
-      int dense_elements_length;
-      switch (kind) {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size)                        \
-        case EXTERNAL_##TYPE##_ELEMENTS: {                                     \
-          dense_elements_length =                                              \
-              External##Type##Array::cast(object->elements())->length();       \
-          break;                                                               \
-        }
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+    case TYPE##_ELEMENTS:                               \
+    case EXTERNAL_##TYPE##_ELEMENTS:
 
-        TYPED_ARRAYS(TYPED_ARRAY_CASE)
+      TYPED_ARRAYS(TYPED_ARRAY_CASE)
 #undef TYPED_ARRAY_CASE
-
-        default:
-          UNREACHABLE();
-          dense_elements_length = 0;
-          break;
-      }
-      uint32_t length = static_cast<uint32_t>(dense_elements_length);
+    {
+      uint32_t length = static_cast<uint32_t>(
+          FixedArrayBase::cast(object->elements())->length());
       if (range <= length) {
         length = range;
         // We will add all indices, so we might as well clear it first
@@ -10314,13 +10344,28 @@
       if (length == range) return;  // All indices accounted for already.
       break;
     }
+    case SLOPPY_ARGUMENTS_ELEMENTS: {
+      MaybeHandle<Object> length_obj =
+          Object::GetProperty(object, isolate->factory()->length_string());
+      double length_num = length_obj.ToHandleChecked()->Number();
+      uint32_t length = static_cast<uint32_t>(DoubleToInt32(length_num));
+      ElementsAccessor* accessor = object->GetElementsAccessor();
+      for (uint32_t i = 0; i < length; i++) {
+        if (accessor->HasElement(object, object, i)) {
+          indices->Add(i);
+        }
+      }
+      break;
+    }
   }
 
-  Handle<Object> prototype(object->GetPrototype(), isolate);
-  if (prototype->IsJSObject()) {
+  PrototypeIterator iter(isolate, object);
+  if (!iter.IsAtEnd()) {
     // The prototype will usually have no inherited element indices,
     // but we have to check.
-    CollectElementIndices(Handle<JSObject>::cast(prototype), range, indices);
+    CollectElementIndices(
+        Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)), range,
+        indices);
   }
 }
 
@@ -10348,20 +10393,23 @@
       // to check the prototype for missing elements.
       Handle<FixedArray> elements(FixedArray::cast(receiver->elements()));
       int fast_length = static_cast<int>(length);
-      ASSERT(fast_length <= elements->length());
+      DCHECK(fast_length <= elements->length());
       for (int j = 0; j < fast_length; j++) {
         HandleScope loop_scope(isolate);
         Handle<Object> element_value(elements->get(j), isolate);
         if (!element_value->IsTheHole()) {
           visitor->visit(j, element_value);
-        } else if (JSReceiver::HasElement(receiver, j)) {
-          // Call GetElement on receiver, not its prototype, or getters won't
-          // have the correct receiver.
-          ASSIGN_RETURN_ON_EXCEPTION_VALUE(
-              isolate, element_value,
-              Object::GetElement(isolate, receiver, j),
-              false);
-          visitor->visit(j, element_value);
+        } else {
+          Maybe<bool> maybe = JSReceiver::HasElement(receiver, j);
+          if (!maybe.has_value) return false;
+          if (maybe.value) {
+            // Call GetElement on receiver, not its prototype, or getters won't
+            // have the correct receiver.
+            ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+                isolate, element_value,
+                Object::GetElement(isolate, receiver, j), false);
+            visitor->visit(j, element_value);
+          }
         }
       }
       break;
@@ -10373,13 +10421,13 @@
       // Run through the elements FixedArray and use HasElement and GetElement
       // to check the prototype for missing elements.
       if (receiver->elements()->IsFixedArray()) {
-        ASSERT(receiver->elements()->length() == 0);
+        DCHECK(receiver->elements()->length() == 0);
         break;
       }
       Handle<FixedDoubleArray> elements(
           FixedDoubleArray::cast(receiver->elements()));
       int fast_length = static_cast<int>(length);
-      ASSERT(fast_length <= elements->length());
+      DCHECK(fast_length <= elements->length());
       for (int j = 0; j < fast_length; j++) {
         HandleScope loop_scope(isolate);
         if (!elements->is_the_hole(j)) {
@@ -10387,15 +10435,18 @@
           Handle<Object> element_value =
               isolate->factory()->NewNumber(double_value);
           visitor->visit(j, element_value);
-        } else if (JSReceiver::HasElement(receiver, j)) {
-          // Call GetElement on receiver, not its prototype, or getters won't
-          // have the correct receiver.
-          Handle<Object> element_value;
-          ASSIGN_RETURN_ON_EXCEPTION_VALUE(
-              isolate, element_value,
-              Object::GetElement(isolate, receiver, j),
-              false);
-          visitor->visit(j, element_value);
+        } else {
+          Maybe<bool> maybe = JSReceiver::HasElement(receiver, j);
+          if (!maybe.has_value) return false;
+          if (maybe.value) {
+            // Call GetElement on receiver, not its prototype, or getters won't
+            // have the correct receiver.
+            Handle<Object> element_value;
+            ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+                isolate, element_value,
+                Object::GetElement(isolate, receiver, j), false);
+            visitor->visit(j, element_value);
+          }
         }
       }
       break;
@@ -10491,7 +10542,7 @@
  */
 RUNTIME_FUNCTION(Runtime_ArrayConcat) {
   HandleScope handle_scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
 
   CONVERT_ARG_HANDLE_CHECKED(JSArray, arguments, 0);
   int argument_count = static_cast<int>(arguments->length()->Number());
@@ -10560,10 +10611,10 @@
     Handle<FixedArrayBase> storage =
         isolate->factory()->NewFixedDoubleArray(estimate_result_length);
     int j = 0;
+    bool failure = false;
     if (estimate_result_length > 0) {
       Handle<FixedDoubleArray> double_storage =
           Handle<FixedDoubleArray>::cast(storage);
-      bool failure = false;
       for (int i = 0; i < argument_count; i++) {
         Handle<Object> obj(elements->get(i), isolate);
         if (obj->IsSmi()) {
@@ -10584,6 +10635,11 @@
                   FixedDoubleArray::cast(array->elements());
               for (uint32_t i = 0; i < length; i++) {
                 if (elements->is_the_hole(i)) {
+                  // TODO(jkummerow/verwaest): We could be a bit more clever
+                  // here: Check if there are no elements/getters on the
+                  // prototype chain, and if so, allow creation of a holey
+                  // result array.
+                  // Same thing below (holey smi case).
                   failure = true;
                   break;
                 }
@@ -10610,7 +10666,8 @@
               break;
             }
             case FAST_HOLEY_ELEMENTS:
-              ASSERT_EQ(0, length);
+            case FAST_ELEMENTS:
+              DCHECK_EQ(0, length);
               break;
             default:
               UNREACHABLE();
@@ -10619,14 +10676,17 @@
         if (failure) break;
       }
     }
-    Handle<JSArray> array = isolate->factory()->NewJSArray(0);
-    Smi* length = Smi::FromInt(j);
-    Handle<Map> map;
-    map = JSObject::GetElementsTransitionMap(array, kind);
-    array->set_map(*map);
-    array->set_length(length);
-    array->set_elements(*storage);
-    return *array;
+    if (!failure) {
+      Handle<JSArray> array = isolate->factory()->NewJSArray(0);
+      Smi* length = Smi::FromInt(j);
+      Handle<Map> map;
+      map = JSObject::GetElementsTransitionMap(array, kind);
+      array->set_map(*map);
+      array->set_length(length);
+      array->set_elements(*storage);
+      return *array;
+    }
+    // In case of failure, fall through.
   }
 
   Handle<FixedArray> storage;
@@ -10659,9 +10719,9 @@
   }
 
   if (visitor.exceeds_array_limit()) {
-    return isolate->Throw(
-        *isolate->factory()->NewRangeError("invalid_array_length",
-                                           HandleVector<Object>(NULL, 0)));
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate,
+        NewRangeError("invalid_array_length", HandleVector<Object>(NULL, 0)));
   }
   return *visitor.ToArray();
 }
@@ -10671,7 +10731,7 @@
 // very slowly for very deeply nested ConsStrings.  For debugging use only.
 RUNTIME_FUNCTION(Runtime_GlobalPrint) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
 
   CONVERT_ARG_CHECKED(String, string, 0);
   ConsStringIteratorOp op;
@@ -10692,7 +10752,7 @@
 // Returns -1 if hole removal is not supported by this method.
 RUNTIME_FUNCTION(Runtime_RemoveArrayHoles) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
   CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
   CONVERT_NUMBER_CHECKED(uint32_t, limit, Uint32, args[1]);
   return *JSObject::PrepareElementsForSort(object, limit);
@@ -10702,7 +10762,7 @@
 // Move contents of argument 0 (an array) to argument 1 (an array)
 RUNTIME_FUNCTION(Runtime_MoveArrayContents) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
   CONVERT_ARG_HANDLE_CHECKED(JSArray, from, 0);
   CONVERT_ARG_HANDLE_CHECKED(JSArray, to, 1);
   JSObject::ValidateElements(from);
@@ -10724,15 +10784,39 @@
 
 // How many elements does this object/array have?
 RUNTIME_FUNCTION(Runtime_EstimateNumberOfElements) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 1);
+  CONVERT_ARG_HANDLE_CHECKED(JSArray, array, 0);
+  Handle<FixedArrayBase> elements(array->elements(), isolate);
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 1);
-  CONVERT_ARG_CHECKED(JSArray, object, 0);
-  HeapObject* elements = object->elements();
   if (elements->IsDictionary()) {
-    int result = SeededNumberDictionary::cast(elements)->NumberOfElements();
+    int result =
+        Handle<SeededNumberDictionary>::cast(elements)->NumberOfElements();
     return Smi::FromInt(result);
   } else {
-    return object->length();
+    DCHECK(array->length()->IsSmi());
+    // For packed elements, we know the exact number of elements
+    int length = elements->length();
+    ElementsKind kind = array->GetElementsKind();
+    if (IsFastPackedElementsKind(kind)) {
+      return Smi::FromInt(length);
+    }
+    // For holey elements, take samples from the buffer checking for holes
+    // to generate the estimate.
+    const int kNumberOfHoleCheckSamples = 97;
+    int increment = (length < kNumberOfHoleCheckSamples)
+                        ? 1
+                        : static_cast<int>(length / kNumberOfHoleCheckSamples);
+    ElementsAccessor* accessor = array->GetElementsAccessor();
+    int holes = 0;
+    for (int i = 0; i < length; i += increment) {
+      if (!accessor->HasElement(array, array, i, elements)) {
+        ++holes;
+      }
+    }
+    int estimate = static_cast<int>((kNumberOfHoleCheckSamples - holes) /
+                                    kNumberOfHoleCheckSamples * length);
+    return Smi::FromInt(estimate);
   }
 }
 
@@ -10744,20 +10828,23 @@
 // Intervals can span over some keys that are not in the object.
 RUNTIME_FUNCTION(Runtime_GetArrayKeys) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
   CONVERT_ARG_HANDLE_CHECKED(JSObject, array, 0);
   CONVERT_NUMBER_CHECKED(uint32_t, length, Uint32, args[1]);
   if (array->elements()->IsDictionary()) {
     Handle<FixedArray> keys = isolate->factory()->empty_fixed_array();
-    for (Handle<Object> p = array;
-         !p->IsNull();
-         p = Handle<Object>(p->GetPrototype(isolate), isolate)) {
-      if (p->IsJSProxy() || JSObject::cast(*p)->HasIndexedInterceptor()) {
+    for (PrototypeIterator iter(isolate, array,
+                                PrototypeIterator::START_AT_RECEIVER);
+         !iter.IsAtEnd(); iter.Advance()) {
+      if (PrototypeIterator::GetCurrent(iter)->IsJSProxy() ||
+          JSObject::cast(*PrototypeIterator::GetCurrent(iter))
+              ->HasIndexedInterceptor()) {
         // Bail out if we find a proxy or interceptor, likely not worth
         // collecting keys in that case.
         return *isolate->factory()->NewNumberFromUint(length);
       }
-      Handle<JSObject> current = Handle<JSObject>::cast(p);
+      Handle<JSObject> current =
+          Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter));
       Handle<FixedArray> current_keys =
           isolate->factory()->NewFixedArray(current->NumberOfOwnElements(NONE));
       current->GetOwnElementKeys(*current_keys, NONE);
@@ -10782,7 +10869,7 @@
 
 RUNTIME_FUNCTION(Runtime_LookupAccessor) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 3);
+  DCHECK(args.length() == 3);
   CONVERT_ARG_HANDLE_CHECKED(JSReceiver, receiver, 0);
   CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
   CONVERT_SMI_ARG_CHECKED(flag, 2);
@@ -10798,7 +10885,7 @@
 
 RUNTIME_FUNCTION(Runtime_DebugBreak) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 0);
+  DCHECK(args.length() == 0);
   isolate->debug()->HandleDebugBreak();
   return isolate->heap()->undefined_value();
 }
@@ -10806,7 +10893,7 @@
 
 // Helper functions for wrapping and unwrapping stack frame ids.
 static Smi* WrapFrameId(StackFrame::Id id) {
-  ASSERT(IsAligned(OffsetFrom(id), static_cast<intptr_t>(4)));
+  DCHECK(IsAligned(OffsetFrom(id), static_cast<intptr_t>(4)));
   return Smi::FromInt(id >> 2);
 }
 
@@ -10822,7 +10909,7 @@
 // args[1]: object supplied during callback
 RUNTIME_FUNCTION(Runtime_SetDebugEventListener) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
   RUNTIME_ASSERT(args[0]->IsJSFunction() ||
                  args[0]->IsUndefined() ||
                  args[0]->IsNull());
@@ -10836,75 +10923,62 @@
 
 RUNTIME_FUNCTION(Runtime_Break) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 0);
+  DCHECK(args.length() == 0);
   isolate->stack_guard()->RequestDebugBreak();
   return isolate->heap()->undefined_value();
 }
 
 
-static Handle<Object> DebugLookupResultValue(Isolate* isolate,
-                                             Handle<Object> receiver,
-                                             Handle<Name> name,
-                                             LookupResult* result,
-                                             bool* has_caught = NULL) {
-  Handle<Object> value = isolate->factory()->undefined_value();
-  if  (!result->IsFound()) return value;
-  switch (result->type()) {
-    case NORMAL:
-      value = JSObject::GetNormalizedProperty(
-          handle(result->holder(), isolate), result);
-      break;
-    case FIELD:
-      value = JSObject::FastPropertyAt(handle(result->holder(), isolate),
-                                       result->representation(),
-                                       result->GetFieldIndex());
-      break;
-    case CONSTANT:
-      return handle(result->GetConstant(), isolate);
-    case CALLBACKS: {
-      Handle<Object> structure(result->GetCallbackObject(), isolate);
-      ASSERT(!structure->IsForeign());
-      if (structure->IsAccessorInfo()) {
-        MaybeHandle<Object> obj = JSObject::GetPropertyWithAccessor(
-            receiver, name, handle(result->holder(), isolate), structure);
-        if (!obj.ToHandle(&value)) {
-          value = handle(isolate->pending_exception(), isolate);
-          isolate->clear_pending_exception();
-          if (has_caught != NULL) *has_caught = true;
-          return value;
+static Handle<Object> DebugGetProperty(LookupIterator* it,
+                                       bool* has_caught = NULL) {
+  for (; it->IsFound(); it->Next()) {
+    switch (it->state()) {
+      case LookupIterator::NOT_FOUND:
+      case LookupIterator::TRANSITION:
+        UNREACHABLE();
+      case LookupIterator::ACCESS_CHECK:
+        // Ignore access checks.
+        break;
+      case LookupIterator::INTERCEPTOR:
+      case LookupIterator::JSPROXY:
+        return it->isolate()->factory()->undefined_value();
+      case LookupIterator::ACCESSOR: {
+        Handle<Object> accessors = it->GetAccessors();
+        if (!accessors->IsAccessorInfo()) {
+          return it->isolate()->factory()->undefined_value();
         }
+        MaybeHandle<Object> maybe_result = JSObject::GetPropertyWithAccessor(
+            it->GetReceiver(), it->name(), it->GetHolder<JSObject>(),
+            accessors);
+        Handle<Object> result;
+        if (!maybe_result.ToHandle(&result)) {
+          result = handle(it->isolate()->pending_exception(), it->isolate());
+          it->isolate()->clear_pending_exception();
+          if (has_caught != NULL) *has_caught = true;
+        }
+        return result;
       }
-      break;
+
+      case LookupIterator::DATA:
+        return it->GetDataValue();
     }
-    case INTERCEPTOR:
-    case HANDLER:
-      break;
-    case NONEXISTENT:
-      UNREACHABLE();
-      break;
   }
-  ASSERT(!value->IsTheHole() || result->IsReadOnly());
-  return value->IsTheHole()
-      ? Handle<Object>::cast(isolate->factory()->undefined_value()) : value;
+
+  return it->isolate()->factory()->undefined_value();
 }
 
 
-// Get debugger related details for an object property.
-// args[0]: object holding property
-// args[1]: name of the property
-//
-// The array returned contains the following information:
+// Get debugger related details for an object property, in the following format:
 // 0: Property value
 // 1: Property details
 // 2: Property value is exception
 // 3: Getter function if defined
 // 4: Setter function if defined
-// Items 2-4 are only filled if the property has either a getter or a setter
-// defined through __defineGetter__ and/or __defineSetter__.
+// Items 2-4 are only filled if the property has either a getter or a setter.
 RUNTIME_FUNCTION(Runtime_DebugGetPropertyDetails) {
   HandleScope scope(isolate);
 
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
 
   CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
   CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
@@ -10920,13 +10994,6 @@
     isolate->set_context(*isolate->debug()->debugger_entry()->GetContext());
   }
 
-  // Skip the global proxy as it has no properties and always delegates to the
-  // real global object.
-  if (obj->IsJSGlobalProxy()) {
-    obj = Handle<JSObject>(JSObject::cast(obj->GetPrototype()));
-  }
-
-
   // Check if the name is trivially convertible to an index and get the element
   // if so.
   uint32_t index;
@@ -10942,66 +11009,50 @@
     return *isolate->factory()->NewJSArrayWithElements(details);
   }
 
-  // Find the number of objects making up this.
-  int length = OwnPrototypeChainLength(*obj);
+  LookupIterator it(obj, name, LookupIterator::HIDDEN);
+  bool has_caught = false;
+  Handle<Object> value = DebugGetProperty(&it, &has_caught);
+  if (!it.IsFound()) return isolate->heap()->undefined_value();
 
-  // Try own lookup on each of the objects.
-  Handle<JSObject> jsproto = obj;
-  for (int i = 0; i < length; i++) {
-    LookupResult result(isolate);
-    jsproto->LookupOwn(name, &result);
-    if (result.IsFound()) {
-      // LookupResult is not GC safe as it holds raw object pointers.
-      // GC can happen later in this code so put the required fields into
-      // local variables using handles when required for later use.
-      Handle<Object> result_callback_obj;
-      if (result.IsPropertyCallbacks()) {
-        result_callback_obj = Handle<Object>(result.GetCallbackObject(),
-                                             isolate);
-      }
-
-
-      bool has_caught = false;
-      Handle<Object> value = DebugLookupResultValue(
-          isolate, obj, name, &result, &has_caught);
-
-      // If the callback object is a fixed array then it contains JavaScript
-      // getter and/or setter.
-      bool has_js_accessors = result.IsPropertyCallbacks() &&
-                              result_callback_obj->IsAccessorPair();
-      Handle<FixedArray> details =
-          isolate->factory()->NewFixedArray(has_js_accessors ? 5 : 2);
-      details->set(0, *value);
-      details->set(1, result.GetPropertyDetails().AsSmi());
-      if (has_js_accessors) {
-        AccessorPair* accessors = AccessorPair::cast(*result_callback_obj);
-        details->set(2, isolate->heap()->ToBoolean(has_caught));
-        details->set(3, accessors->GetComponent(ACCESSOR_GETTER));
-        details->set(4, accessors->GetComponent(ACCESSOR_SETTER));
-      }
-
-      return *isolate->factory()->NewJSArrayWithElements(details);
-    }
-    if (i < length - 1) {
-      jsproto = Handle<JSObject>(JSObject::cast(jsproto->GetPrototype()));
-    }
+  Handle<Object> maybe_pair;
+  if (it.state() == LookupIterator::ACCESSOR) {
+    maybe_pair = it.GetAccessors();
   }
 
-  return isolate->heap()->undefined_value();
+  // If the callback object is a fixed array then it contains JavaScript
+  // getter and/or setter.
+  bool has_js_accessors = !maybe_pair.is_null() && maybe_pair->IsAccessorPair();
+  Handle<FixedArray> details =
+      isolate->factory()->NewFixedArray(has_js_accessors ? 6 : 3);
+  details->set(0, *value);
+  // TODO(verwaest): Get rid of this random way of handling interceptors.
+  PropertyDetails d = it.state() == LookupIterator::INTERCEPTOR
+                          ? PropertyDetails(NONE, NORMAL, 0)
+                          : it.property_details();
+  details->set(1, d.AsSmi());
+  details->set(
+      2, isolate->heap()->ToBoolean(it.state() == LookupIterator::INTERCEPTOR));
+  if (has_js_accessors) {
+    AccessorPair* accessors = AccessorPair::cast(*maybe_pair);
+    details->set(3, isolate->heap()->ToBoolean(has_caught));
+    details->set(4, accessors->GetComponent(ACCESSOR_GETTER));
+    details->set(5, accessors->GetComponent(ACCESSOR_SETTER));
+  }
+
+  return *isolate->factory()->NewJSArrayWithElements(details);
 }
 
 
 RUNTIME_FUNCTION(Runtime_DebugGetProperty) {
   HandleScope scope(isolate);
 
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
 
   CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
   CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
 
-  LookupResult result(isolate);
-  obj->Lookup(name, &result);
-  return *DebugLookupResultValue(isolate, obj, name, &result);
+  LookupIterator it(obj, name);
+  return *DebugGetProperty(&it);
 }
 
 
@@ -11009,7 +11060,7 @@
 // args[0]: smi with property details.
 RUNTIME_FUNCTION(Runtime_DebugPropertyTypeFromDetails) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_PROPERTY_DETAILS_CHECKED(details, 0);
   return Smi::FromInt(static_cast<int>(details.type()));
 }
@@ -11019,7 +11070,7 @@
 // args[0]: smi with property details.
 RUNTIME_FUNCTION(Runtime_DebugPropertyAttributesFromDetails) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_PROPERTY_DETAILS_CHECKED(details, 0);
   return Smi::FromInt(static_cast<int>(details.attributes()));
 }
@@ -11029,7 +11080,7 @@
 // args[0]: smi with property details.
 RUNTIME_FUNCTION(Runtime_DebugPropertyIndexFromDetails) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_PROPERTY_DETAILS_CHECKED(details, 0);
   // TODO(verwaest): Depends on the type of details.
   return Smi::FromInt(details.dictionary_index());
@@ -11041,15 +11092,14 @@
 // args[1]: property name
 RUNTIME_FUNCTION(Runtime_DebugNamedInterceptorPropertyValue) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
   CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
   RUNTIME_ASSERT(obj->HasNamedInterceptor());
   CONVERT_ARG_HANDLE_CHECKED(Name, name, 1);
 
   Handle<Object> result;
-  LookupIterator it(obj, name, obj);
   ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result, JSObject::GetProperty(&it));
+      isolate, result, JSObject::GetProperty(obj, name));
   return *result;
 }
 
@@ -11059,7 +11109,7 @@
 // args[1]: index
 RUNTIME_FUNCTION(Runtime_DebugIndexedInterceptorElementValue) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
   CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
   RUNTIME_ASSERT(obj->HasIndexedInterceptor());
   CONVERT_NUMBER_CHECKED(uint32_t, index, Uint32, args[1]);
@@ -11079,7 +11129,7 @@
 
 RUNTIME_FUNCTION(Runtime_CheckExecutionState) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
   RUNTIME_ASSERT(CheckExecutionState(isolate, break_id));
   return isolate->heap()->true_value();
@@ -11088,7 +11138,7 @@
 
 RUNTIME_FUNCTION(Runtime_GetFrameCount) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
   RUNTIME_ASSERT(CheckExecutionState(isolate, break_id));
 
@@ -11101,7 +11151,12 @@
   }
 
   for (JavaScriptFrameIterator it(isolate, id); !it.done(); it.Advance()) {
-    n += it.frame()->GetInlineCount();
+    List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
+    it.frame()->Summarize(&frames);
+    for (int i = frames.length() - 1; i >= 0; i--) {
+      // Omit functions from native scripts.
+      if (!frames[i].function()->IsFromNativeScript()) n++;
+    }
   }
   return Smi::FromInt(n);
 }
@@ -11162,14 +11217,17 @@
         ? deoptimized_frame_->HasConstructStub()
         : frame_->IsConstructor();
   }
+  Object* GetContext() {
+    return is_optimized_ ? deoptimized_frame_->GetContext() : frame_->context();
+  }
 
   // To inspect all the provided arguments the frame might need to be
   // replaced with the arguments frame.
   void SetArgumentsFrame(JavaScriptFrame* frame) {
-    ASSERT(has_adapted_arguments_);
+    DCHECK(has_adapted_arguments_);
     frame_ = frame;
     is_optimized_ = frame_->is_optimized();
-    ASSERT(!is_optimized_);
+    DCHECK(!is_optimized_);
   }
 
  private:
@@ -11202,11 +11260,28 @@
   while (save != NULL && !save->IsBelowFrame(frame)) {
     save = save->prev();
   }
-  ASSERT(save != NULL);
+  DCHECK(save != NULL);
   return save;
 }
 
 
+// Advances the iterator to the frame that matches the index and returns the
+// inlined frame index, or -1 if not found.  Skips native JS functions.
+static int FindIndexedNonNativeFrame(JavaScriptFrameIterator* it, int index) {
+  int count = -1;
+  for (; !it->done(); it->Advance()) {
+    List<FrameSummary> frames(FLAG_max_inlining_levels + 1);
+    it->frame()->Summarize(&frames);
+    for (int i = frames.length() - 1; i >= 0; i--) {
+      // Omit functions from native scripts.
+      if (frames[i].function()->IsFromNativeScript()) continue;
+      if (++count == index) return i;
+    }
+  }
+  return -1;
+}
+
+
 // Return an array with frame details
 // args[0]: number: break id
 // args[1]: number: frame index
@@ -11226,7 +11301,7 @@
 // Return value if any
 RUNTIME_FUNCTION(Runtime_GetFrameDetails) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
   CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
   RUNTIME_ASSERT(CheckExecutionState(isolate, break_id));
 
@@ -11240,22 +11315,13 @@
     return heap->undefined_value();
   }
 
-  int count = 0;
   JavaScriptFrameIterator it(isolate, id);
-  for (; !it.done(); it.Advance()) {
-    if (index < count + it.frame()->GetInlineCount()) break;
-    count += it.frame()->GetInlineCount();
-  }
-  if (it.done()) return heap->undefined_value();
+  // Inlined frame index in optimized frame, starting from outer function.
+  int inlined_jsframe_index = FindIndexedNonNativeFrame(&it, index);
+  if (inlined_jsframe_index == -1) return heap->undefined_value();
 
-  bool is_optimized = it.frame()->is_optimized();
-
-  int inlined_jsframe_index = 0;  // Inlined frame index in optimized frame.
-  if (is_optimized) {
-    inlined_jsframe_index =
-        it.frame()->GetInlineCount() - (index - count) - 1;
-  }
   FrameInspector frame_inspector(it.frame(), inlined_jsframe_index, isolate);
+  bool is_optimized = it.frame()->is_optimized();
 
   // Traverse the saved contexts chain to find the active context for the
   // selected frame.
@@ -11274,7 +11340,7 @@
   Handle<JSFunction> function(JSFunction::cast(frame_inspector.GetFunction()));
   Handle<SharedFunctionInfo> shared(function->shared());
   Handle<ScopeInfo> scope_info(shared->scope_info());
-  ASSERT(*scope_info != ScopeInfo::Empty(isolate));
+  DCHECK(*scope_info != ScopeInfo::Empty(isolate));
 
   // Get the locals names and values into a temporary array.
   int local_count = scope_info->LocalCount();
@@ -11302,16 +11368,17 @@
   if (local < local_count) {
     // Get the context containing declarations.
     Handle<Context> context(
-        Context::cast(it.frame()->context())->declaration_context());
+        Context::cast(frame_inspector.GetContext())->declaration_context());
     for (; i < scope_info->LocalCount(); ++i) {
       if (scope_info->LocalIsSynthetic(i))
         continue;
       Handle<String> name(scope_info->LocalName(i));
       VariableMode mode;
       InitializationFlag init_flag;
+      MaybeAssignedFlag maybe_assigned_flag;
       locals->set(local * 2, *name);
-      int context_slot_index =
-          ScopeInfo::ContextSlotIndex(scope_info, name, &mode, &init_flag);
+      int context_slot_index = ScopeInfo::ContextSlotIndex(
+          scope_info, name, &mode, &init_flag, &maybe_assigned_flag);
       Object* value = context->get(context_slot_index);
       locals->set(local * 2 + 1, value);
       local++;
@@ -11465,19 +11532,20 @@
     // native context.
     it.Advance();
     if (receiver->IsUndefined()) {
-      Context* context = function->context();
-      receiver = handle(context->global_object()->global_receiver());
+      receiver = handle(function->global_proxy());
     } else {
-      ASSERT(!receiver->IsNull());
       Context* context = Context::cast(it.frame()->context());
       Handle<Context> native_context(Context::cast(context->native_context()));
-      receiver = Object::ToObject(
-          isolate, receiver, native_context).ToHandleChecked();
+      if (!Object::ToObject(isolate, receiver, native_context)
+               .ToHandle(&receiver)) {
+        // This only happens if the receiver is forcibly set in %_CallFunction.
+        return heap->undefined_value();
+      }
     }
   }
   details->set(kFrameDetailsReceiverIndex, *receiver);
 
-  ASSERT_EQ(details_size, details_index);
+  DCHECK_EQ(details_size, details_index);
   return *isolate->factory()->NewJSArrayWithElements(details);
 }
 
@@ -11485,8 +11553,10 @@
 static bool ParameterIsShadowedByContextLocal(Handle<ScopeInfo> info,
                                               Handle<String> parameter_name) {
   VariableMode mode;
-  InitializationFlag flag;
-  return ScopeInfo::ContextSlotIndex(info, parameter_name, &mode, &flag) != -1;
+  InitializationFlag init_flag;
+  MaybeAssignedFlag maybe_assigned_flag;
+  return ScopeInfo::ContextSlotIndex(info, parameter_name, &mode, &init_flag,
+                                     &maybe_assigned_flag) != -1;
 }
 
 
@@ -11512,11 +11582,11 @@
                              ? frame_inspector->GetParameter(i)
                              : isolate->heap()->undefined_value(),
                          isolate);
-    ASSERT(!value->IsTheHole());
+    DCHECK(!value->IsTheHole());
 
     RETURN_ON_EXCEPTION(
         isolate,
-        Runtime::SetObjectProperty(isolate, target, name, value, NONE, SLOPPY),
+        Runtime::SetObjectProperty(isolate, target, name, value, SLOPPY),
         JSObject);
   }
 
@@ -11529,7 +11599,7 @@
 
     RETURN_ON_EXCEPTION(
         isolate,
-        Runtime::SetObjectProperty(isolate, target, name, value, NONE, SLOPPY),
+        Runtime::SetObjectProperty(isolate, target, name, value, SLOPPY),
         JSObject);
   }
 
@@ -11558,7 +11628,7 @@
     Handle<String> name(scope_info->ParameterName(i));
     if (ParameterIsShadowedByContextLocal(scope_info, name)) continue;
 
-    ASSERT(!frame->GetParameter(i)->IsTheHole());
+    DCHECK(!frame->GetParameter(i)->IsTheHole());
     HandleScope scope(isolate);
     Handle<Object> value =
         Object::GetPropertyOrElement(target, name).ToHandleChecked();
@@ -11611,15 +11681,14 @@
 
       for (int i = 0; i < keys->length(); i++) {
         // Names of variables introduced by eval are strings.
-        ASSERT(keys->get(i)->IsString());
+        DCHECK(keys->get(i)->IsString());
         Handle<String> key(String::cast(keys->get(i)));
         Handle<Object> value;
         ASSIGN_RETURN_ON_EXCEPTION(
             isolate, value, Object::GetPropertyOrElement(ext, key), JSObject);
         RETURN_ON_EXCEPTION(
             isolate,
-            Runtime::SetObjectProperty(
-                isolate, target, key, value, NONE, SLOPPY),
+            Runtime::SetObjectProperty(isolate, target, key, value, SLOPPY),
             JSObject);
       }
     }
@@ -11659,8 +11728,9 @@
     if (String::Equals(variable_name, next_name)) {
       VariableMode mode;
       InitializationFlag init_flag;
-      int context_index =
-          ScopeInfo::ContextSlotIndex(scope_info, next_name, &mode, &init_flag);
+      MaybeAssignedFlag maybe_assigned_flag;
+      int context_index = ScopeInfo::ContextSlotIndex(
+          scope_info, next_name, &mode, &init_flag, &maybe_assigned_flag);
       context->set(context_index, *new_value);
       return true;
     }
@@ -11720,11 +11790,13 @@
           !function_context->IsNativeContext()) {
         Handle<JSObject> ext(JSObject::cast(function_context->extension()));
 
-        if (JSReceiver::HasProperty(ext, variable_name)) {
+        Maybe<bool> maybe = JSReceiver::HasProperty(ext, variable_name);
+        DCHECK(maybe.has_value);
+        if (maybe.value) {
           // We don't expect this to do anything except replacing
           // property value.
           Runtime::SetObjectProperty(isolate, ext, variable_name, new_value,
-                                     NONE, SLOPPY).Assert();
+                                     SLOPPY).Assert();
           return true;
         }
       }
@@ -11740,7 +11812,7 @@
 MUST_USE_RESULT static MaybeHandle<JSObject> MaterializeClosure(
     Isolate* isolate,
     Handle<Context> context) {
-  ASSERT(context->IsFunctionContext());
+  DCHECK(context->IsFunctionContext());
 
   Handle<SharedFunctionInfo> shared(context->closure()->shared());
   Handle<ScopeInfo> scope_info(shared->scope_info());
@@ -11768,15 +11840,14 @@
     for (int i = 0; i < keys->length(); i++) {
       HandleScope scope(isolate);
       // Names of variables introduced by eval are strings.
-      ASSERT(keys->get(i)->IsString());
+      DCHECK(keys->get(i)->IsString());
       Handle<String> key(String::cast(keys->get(i)));
       Handle<Object> value;
       ASSIGN_RETURN_ON_EXCEPTION(
           isolate, value, Object::GetPropertyOrElement(ext, key), JSObject);
       RETURN_ON_EXCEPTION(
           isolate,
-          Runtime::SetObjectProperty(
-              isolate, closure_scope, key, value, NONE, SLOPPY),
+          Runtime::DefineObjectProperty(closure_scope, key, value, NONE),
           JSObject);
     }
   }
@@ -11790,7 +11861,7 @@
                                     Handle<Context> context,
                                     Handle<String> variable_name,
                                     Handle<Object> new_value) {
-  ASSERT(context->IsFunctionContext());
+  DCHECK(context->IsFunctionContext());
 
   Handle<SharedFunctionInfo> shared(context->closure()->shared());
   Handle<ScopeInfo> scope_info(shared->scope_info());
@@ -11805,10 +11876,12 @@
   // be variables introduced by eval.
   if (context->has_extension()) {
     Handle<JSObject> ext(JSObject::cast(context->extension()));
-    if (JSReceiver::HasProperty(ext, variable_name)) {
+    Maybe<bool> maybe = JSReceiver::HasProperty(ext, variable_name);
+    DCHECK(maybe.has_value);
+    if (maybe.value) {
       // We don't expect this to do anything except replacing property value.
-      Runtime::SetObjectProperty(isolate, ext, variable_name, new_value,
-                                 NONE, SLOPPY).Assert();
+      Runtime::DefineObjectProperty(
+          ext, variable_name, new_value, NONE).Assert();
       return true;
     }
   }
@@ -11822,7 +11895,7 @@
 MUST_USE_RESULT static MaybeHandle<JSObject> MaterializeCatchScope(
     Isolate* isolate,
     Handle<Context> context) {
-  ASSERT(context->IsCatchContext());
+  DCHECK(context->IsCatchContext());
   Handle<String> name(String::cast(context->extension()));
   Handle<Object> thrown_object(context->get(Context::THROWN_OBJECT_INDEX),
                                isolate);
@@ -11830,8 +11903,7 @@
       isolate->factory()->NewJSObject(isolate->object_function());
   RETURN_ON_EXCEPTION(
       isolate,
-      Runtime::SetObjectProperty(isolate, catch_scope, name, thrown_object,
-                                 NONE, SLOPPY),
+      Runtime::DefineObjectProperty(catch_scope, name, thrown_object, NONE),
       JSObject);
   return catch_scope;
 }
@@ -11841,7 +11913,7 @@
                                   Handle<Context> context,
                                   Handle<String> variable_name,
                                   Handle<Object> new_value) {
-  ASSERT(context->IsCatchContext());
+  DCHECK(context->IsCatchContext());
   Handle<String> name(String::cast(context->extension()));
   if (!String::Equals(name, variable_name)) {
     return false;
@@ -11856,7 +11928,7 @@
 MUST_USE_RESULT static MaybeHandle<JSObject> MaterializeBlockScope(
     Isolate* isolate,
     Handle<Context> context) {
-  ASSERT(context->IsBlockContext());
+  DCHECK(context->IsBlockContext());
   Handle<ScopeInfo> scope_info(ScopeInfo::cast(context->extension()));
 
   // Allocate and initialize a JSObject with all the arguments, stack locals
@@ -11879,7 +11951,7 @@
 MUST_USE_RESULT static MaybeHandle<JSObject> MaterializeModuleScope(
     Isolate* isolate,
     Handle<Context> context) {
-  ASSERT(context->IsModuleContext());
+  DCHECK(context->IsModuleContext());
   Handle<ScopeInfo> scope_info(ScopeInfo::cast(context->extension()));
 
   // Allocate and initialize a JSObject with all the members of the debugged
@@ -11988,7 +12060,7 @@
         if (scope_info->scope_type() == GLOBAL_SCOPE) {
           info.MarkAsGlobal();
         } else {
-          ASSERT(scope_info->scope_type() == EVAL_SCOPE);
+          DCHECK(scope_info->scope_type() == EVAL_SCOPE);
           info.MarkAsEval();
           info.SetContext(Handle<Context>(function_->context()));
         }
@@ -12022,7 +12094,7 @@
 
   // More scopes?
   bool Done() {
-    ASSERT(!failed_);
+    DCHECK(!failed_);
     return context_.is_null();
   }
 
@@ -12030,11 +12102,11 @@
 
   // Move to the next scope.
   void Next() {
-    ASSERT(!failed_);
+    DCHECK(!failed_);
     ScopeType scope_type = Type();
     if (scope_type == ScopeTypeGlobal) {
       // The global scope is always the last in the chain.
-      ASSERT(context_->IsNativeContext());
+      DCHECK(context_->IsNativeContext());
       context_ = Handle<Context>();
       return;
     }
@@ -12042,7 +12114,7 @@
       context_ = Handle<Context>(context_->previous(), isolate_);
     } else {
       if (nested_scope_chain_.last()->HasContext()) {
-        ASSERT(context_->previous() != NULL);
+        DCHECK(context_->previous() != NULL);
         context_ = Handle<Context>(context_->previous(), isolate_);
       }
       nested_scope_chain_.RemoveLast();
@@ -12051,28 +12123,28 @@
 
   // Return the type of the current scope.
   ScopeType Type() {
-    ASSERT(!failed_);
+    DCHECK(!failed_);
     if (!nested_scope_chain_.is_empty()) {
       Handle<ScopeInfo> scope_info = nested_scope_chain_.last();
       switch (scope_info->scope_type()) {
         case FUNCTION_SCOPE:
-          ASSERT(context_->IsFunctionContext() ||
+          DCHECK(context_->IsFunctionContext() ||
                  !scope_info->HasContext());
           return ScopeTypeLocal;
         case MODULE_SCOPE:
-          ASSERT(context_->IsModuleContext());
+          DCHECK(context_->IsModuleContext());
           return ScopeTypeModule;
         case GLOBAL_SCOPE:
-          ASSERT(context_->IsNativeContext());
+          DCHECK(context_->IsNativeContext());
           return ScopeTypeGlobal;
         case WITH_SCOPE:
-          ASSERT(context_->IsWithContext());
+          DCHECK(context_->IsWithContext());
           return ScopeTypeWith;
         case CATCH_SCOPE:
-          ASSERT(context_->IsCatchContext());
+          DCHECK(context_->IsCatchContext());
           return ScopeTypeCatch;
         case BLOCK_SCOPE:
-          ASSERT(!scope_info->HasContext() ||
+          DCHECK(!scope_info->HasContext() ||
                  context_->IsBlockContext());
           return ScopeTypeBlock;
         case EVAL_SCOPE:
@@ -12080,7 +12152,7 @@
       }
     }
     if (context_->IsNativeContext()) {
-      ASSERT(context_->global_object()->IsGlobalObject());
+      DCHECK(context_->global_object()->IsGlobalObject());
       return ScopeTypeGlobal;
     }
     if (context_->IsFunctionContext()) {
@@ -12095,19 +12167,19 @@
     if (context_->IsModuleContext()) {
       return ScopeTypeModule;
     }
-    ASSERT(context_->IsWithContext());
+    DCHECK(context_->IsWithContext());
     return ScopeTypeWith;
   }
 
   // Return the JavaScript object with the content of the current scope.
   MaybeHandle<JSObject> ScopeObject() {
-    ASSERT(!failed_);
+    DCHECK(!failed_);
     switch (Type()) {
       case ScopeIterator::ScopeTypeGlobal:
         return Handle<JSObject>(CurrentContext()->global_object());
       case ScopeIterator::ScopeTypeLocal:
         // Materialize the content of the local scope into a JSObject.
-        ASSERT(nested_scope_chain_.length() == 1);
+        DCHECK(nested_scope_chain_.length() == 1);
         return MaterializeLocalScope(isolate_, frame_, inlined_jsframe_index_);
       case ScopeIterator::ScopeTypeWith:
         // Return the with object.
@@ -12128,7 +12200,7 @@
 
   bool SetVariableValue(Handle<String> variable_name,
                         Handle<Object> new_value) {
-    ASSERT(!failed_);
+    DCHECK(!failed_);
     switch (Type()) {
       case ScopeIterator::ScopeTypeGlobal:
         break;
@@ -12154,7 +12226,7 @@
   }
 
   Handle<ScopeInfo> CurrentScopeInfo() {
-    ASSERT(!failed_);
+    DCHECK(!failed_);
     if (!nested_scope_chain_.is_empty()) {
       return nested_scope_chain_.last();
     } else if (context_->IsBlockContext()) {
@@ -12168,7 +12240,7 @@
   // Return the context for this scope. For the local context there might not
   // be an actual context.
   Handle<Context> CurrentContext() {
-    ASSERT(!failed_);
+    DCHECK(!failed_);
     if (Type() == ScopeTypeGlobal ||
         nested_scope_chain_.is_empty()) {
       return context_;
@@ -12182,22 +12254,23 @@
 #ifdef DEBUG
   // Debug print of the content of the current scope.
   void DebugPrint() {
-    ASSERT(!failed_);
+    OFStream os(stdout);
+    DCHECK(!failed_);
     switch (Type()) {
       case ScopeIterator::ScopeTypeGlobal:
-        PrintF("Global:\n");
-        CurrentContext()->Print();
+        os << "Global:\n";
+        CurrentContext()->Print(os);
         break;
 
       case ScopeIterator::ScopeTypeLocal: {
-        PrintF("Local:\n");
+        os << "Local:\n";
         function_->shared()->scope_info()->Print();
         if (!CurrentContext().is_null()) {
-          CurrentContext()->Print();
+          CurrentContext()->Print(os);
           if (CurrentContext()->has_extension()) {
             Handle<Object> extension(CurrentContext()->extension(), isolate_);
             if (extension->IsJSContextExtensionObject()) {
-              extension->Print();
+              extension->Print(os);
             }
           }
         }
@@ -12205,23 +12278,23 @@
       }
 
       case ScopeIterator::ScopeTypeWith:
-        PrintF("With:\n");
-        CurrentContext()->extension()->Print();
+        os << "With:\n";
+        CurrentContext()->extension()->Print(os);
         break;
 
       case ScopeIterator::ScopeTypeCatch:
-        PrintF("Catch:\n");
-        CurrentContext()->extension()->Print();
-        CurrentContext()->get(Context::THROWN_OBJECT_INDEX)->Print();
+        os << "Catch:\n";
+        CurrentContext()->extension()->Print(os);
+        CurrentContext()->get(Context::THROWN_OBJECT_INDEX)->Print(os);
         break;
 
       case ScopeIterator::ScopeTypeClosure:
-        PrintF("Closure:\n");
-        CurrentContext()->Print();
+        os << "Closure:\n";
+        CurrentContext()->Print(os);
         if (CurrentContext()->has_extension()) {
           Handle<Object> extension(CurrentContext()->extension(), isolate_);
           if (extension->IsJSContextExtensionObject()) {
-            extension->Print();
+            extension->Print(os);
           }
         }
         break;
@@ -12254,7 +12327,7 @@
       // information we get from the context chain but nothing about
       // completely stack allocated scopes or stack allocated locals.
       // Or it could be due to stack overflow.
-      ASSERT(isolate_->has_pending_exception());
+      DCHECK(isolate_->has_pending_exception());
       failed_ = true;
     }
   }
@@ -12265,7 +12338,7 @@
 
 RUNTIME_FUNCTION(Runtime_GetScopeCount) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
   CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
   RUNTIME_ASSERT(CheckExecutionState(isolate, break_id));
 
@@ -12293,7 +12366,7 @@
 // of the corresponding statement.
 RUNTIME_FUNCTION(Runtime_GetStepInPositions) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
   CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
   RUNTIME_ASSERT(CheckExecutionState(isolate, break_id));
 
@@ -12400,7 +12473,7 @@
 // 1: Scope object
 RUNTIME_FUNCTION(Runtime_GetScopeDetails) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 4);
+  DCHECK(args.length() == 4);
   CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
   RUNTIME_ASSERT(CheckExecutionState(isolate, break_id));
 
@@ -12440,7 +12513,7 @@
 // 1: Scope object
 RUNTIME_FUNCTION(Runtime_GetAllScopesDetails) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 3 || args.length() == 4);
+  DCHECK(args.length() == 3 || args.length() == 4);
   CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
   RUNTIME_ASSERT(CheckExecutionState(isolate, break_id));
 
@@ -12477,7 +12550,7 @@
 
 RUNTIME_FUNCTION(Runtime_GetFunctionScopeCount) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
 
   // Check arguments.
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
@@ -12494,7 +12567,7 @@
 
 RUNTIME_FUNCTION(Runtime_GetFunctionScopeDetails) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
 
   // Check arguments.
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
@@ -12541,7 +12614,7 @@
 // Return true if success and false otherwise
 RUNTIME_FUNCTION(Runtime_SetScopeVariableValue) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 6);
+  DCHECK(args.length() == 6);
 
   // Check arguments.
   CONVERT_NUMBER_CHECKED(int, index, Int32, args[3]);
@@ -12575,7 +12648,7 @@
 
 RUNTIME_FUNCTION(Runtime_DebugPrintScopes) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 0);
+  DCHECK(args.length() == 0);
 
 #ifdef DEBUG
   // Print the scopes for the top frame.
@@ -12593,7 +12666,7 @@
 
 RUNTIME_FUNCTION(Runtime_GetThreadCount) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
   RUNTIME_ASSERT(CheckExecutionState(isolate, break_id));
 
@@ -12624,7 +12697,7 @@
 // 1: Thread id
 RUNTIME_FUNCTION(Runtime_GetThreadDetails) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
   CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
   RUNTIME_ASSERT(CheckExecutionState(isolate, break_id));
 
@@ -12670,7 +12743,7 @@
 // args[0]: disable break state
 RUNTIME_FUNCTION(Runtime_SetDisableBreak) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_BOOLEAN_ARG_CHECKED(disable_break, 0);
   isolate->debug()->set_disable_break(disable_break);
   return  isolate->heap()->undefined_value();
@@ -12684,7 +12757,7 @@
 
 RUNTIME_FUNCTION(Runtime_GetBreakLocations) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
 
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, fun, 0);
   CONVERT_NUMBER_CHECKED(int32_t, statement_aligned_code, Int32, args[1]);
@@ -12712,7 +12785,7 @@
 // args[2]: number: break point object
 RUNTIME_FUNCTION(Runtime_SetFunctionBreakPoint) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 3);
+  DCHECK(args.length() == 3);
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
   CONVERT_NUMBER_CHECKED(int32_t, source_position, Int32, args[1]);
   RUNTIME_ASSERT(source_position >= function->shared()->start_position() &&
@@ -12736,7 +12809,7 @@
 // args[3]: number: break point object
 RUNTIME_FUNCTION(Runtime_SetScriptBreakPoint) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 4);
+  DCHECK(args.length() == 4);
   CONVERT_ARG_HANDLE_CHECKED(JSValue, wrapper, 0);
   CONVERT_NUMBER_CHECKED(int32_t, source_position, Int32, args[1]);
   RUNTIME_ASSERT(source_position >= 0);
@@ -12768,7 +12841,7 @@
 // args[0]: number: break point object
 RUNTIME_FUNCTION(Runtime_ClearBreakPoint) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(Object, break_point_object_arg, 0);
 
   // Clear break point.
@@ -12783,7 +12856,7 @@
 // args[1]: Boolean indicating on/off.
 RUNTIME_FUNCTION(Runtime_ChangeBreakOnException) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
   CONVERT_NUMBER_CHECKED(uint32_t, type_arg, Uint32, args[0]);
   CONVERT_BOOLEAN_ARG_CHECKED(enable, 1);
 
@@ -12800,7 +12873,7 @@
 // args[0]: boolean indicating uncaught exceptions
 RUNTIME_FUNCTION(Runtime_IsBreakOnException) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_NUMBER_CHECKED(uint32_t, type_arg, Uint32, args[0]);
 
   ExceptionBreakType type = static_cast<ExceptionBreakType>(type_arg);
@@ -12816,7 +12889,7 @@
 //          of frames to step down.
 RUNTIME_FUNCTION(Runtime_PrepareStep) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 4);
+  DCHECK(args.length() == 4);
   CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
   RUNTIME_ASSERT(CheckExecutionState(isolate, break_id));
 
@@ -12868,7 +12941,7 @@
 // Clear all stepping set by PrepareStep.
 RUNTIME_FUNCTION(Runtime_ClearStepping) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 0);
+  DCHECK(args.length() == 0);
   isolate->debug()->ClearStepping();
   return isolate->heap()->undefined_value();
 }
@@ -12882,11 +12955,11 @@
     Handle<JSFunction> function) {
   // Do not materialize the arguments object for eval or top-level code.
   // Skip if "arguments" is already taken.
-  if (!function->shared()->is_function() ||
-      JSReceiver::HasOwnProperty(
-          target, isolate->factory()->arguments_string())) {
-    return target;
-  }
+  if (!function->shared()->is_function()) return target;
+  Maybe<bool> maybe = JSReceiver::HasOwnProperty(
+      target, isolate->factory()->arguments_string());
+  if (!maybe.has_value) return MaybeHandle<JSObject>();
+  if (maybe.value) return target;
 
   // FunctionGetArguments can't throw an exception.
   Handle<JSObject> arguments = Handle<JSObject>::cast(
@@ -12894,8 +12967,7 @@
   Handle<String> arguments_str = isolate->factory()->arguments_string();
   RETURN_ON_EXCEPTION(
       isolate,
-      Runtime::SetObjectProperty(
-          isolate, target, arguments_str, arguments, ::NONE, SLOPPY),
+      Runtime::DefineObjectProperty(target, arguments_str, arguments, NONE),
       JSObject);
   return target;
 }
@@ -12903,6 +12975,7 @@
 
 // Compile and evaluate source for the given context.
 static MaybeHandle<Object> DebugEvaluate(Isolate* isolate,
+                                         Handle<SharedFunctionInfo> outer_info,
                                          Handle<Context> context,
                                          Handle<Object> context_extension,
                                          Handle<Object> receiver,
@@ -12917,6 +12990,7 @@
   ASSIGN_RETURN_ON_EXCEPTION(
       isolate, eval_fun,
       Compiler::GetFunctionFromEval(source,
+                                    outer_info,
                                     context,
                                     SLOPPY,
                                     NO_PARSE_RESTRICTION,
@@ -12932,7 +13006,9 @@
   // Skip the global proxy as it has no properties and always delegates to the
   // real global object.
   if (result->IsJSGlobalProxy()) {
-    result = Handle<JSObject>(JSObject::cast(result->GetPrototype(isolate)));
+    PrototypeIterator iter(isolate, result);
+    // TODO(verwaest): This will crash when the global proxy is detached.
+    result = Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter));
   }
 
   // Clear the oneshot breakpoints so that the debugger does not step further.
@@ -12941,6 +13017,16 @@
 }
 
 
+static Handle<JSObject> NewJSObjectWithNullProto(Isolate* isolate) {
+  Handle<JSObject> result =
+      isolate->factory()->NewJSObject(isolate->object_function());
+  Handle<Map> new_map = Map::Copy(Handle<Map>(result->map()));
+  new_map->set_prototype(*isolate->factory()->null_value());
+  JSObject::MigrateToMap(result, new_map);
+  return result;
+}
+
+
 // Evaluate a piece of JavaScript in the context of a stack frame for
 // debugging.  Things that need special attention are:
 // - Parameters and stack-allocated locals need to be materialized.  Altered
@@ -12951,7 +13037,7 @@
 
   // Check the execution state and decode arguments frame and source to be
   // evaluated.
-  ASSERT(args.length() == 6);
+  DCHECK(args.length() == 6);
   CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
   RUNTIME_ASSERT(CheckExecutionState(isolate, break_id));
 
@@ -12970,6 +13056,7 @@
   JavaScriptFrame* frame = it.frame();
   FrameInspector frame_inspector(frame, inlined_jsframe_index, isolate);
   Handle<JSFunction> function(JSFunction::cast(frame_inspector.GetFunction()));
+  Handle<SharedFunctionInfo> outer_info(function->shared());
 
   // Traverse the saved contexts chain to find the active context for the
   // selected frame.
@@ -12979,12 +13066,11 @@
   isolate->set_context(*(save->context()));
 
   // Evaluate on the context of the frame.
-  Handle<Context> context(Context::cast(frame->context()));
-  ASSERT(!context.is_null());
+  Handle<Context> context(Context::cast(frame_inspector.GetContext()));
+  DCHECK(!context.is_null());
 
   // Materialize stack locals and the arguments object.
-  Handle<JSObject> materialized =
-      isolate->factory()->NewJSObject(isolate->object_function());
+  Handle<JSObject> materialized = NewJSObjectWithNullProto(isolate);
 
   ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
       isolate, materialized,
@@ -13002,7 +13088,8 @@
   Handle<Object> result;
   ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
       isolate, result,
-      DebugEvaluate(isolate, context, context_extension, receiver, source));
+      DebugEvaluate(isolate, outer_info,
+                    context, context_extension, receiver, source));
 
   // Write back potential changes to materialized stack locals to the stack.
   UpdateStackLocalsFromMaterializedObject(
@@ -13017,7 +13104,7 @@
 
   // Check the execution state and decode arguments frame and source to be
   // evaluated.
-  ASSERT(args.length() == 4);
+  DCHECK(args.length() == 4);
   CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
   RUNTIME_ASSERT(CheckExecutionState(isolate, break_id));
 
@@ -13041,18 +13128,20 @@
   // Get the native context now set to the top context from before the
   // debugger was invoked.
   Handle<Context> context = isolate->native_context();
-  Handle<Object> receiver = isolate->global_object();
+  Handle<JSObject> receiver(context->global_proxy());
+  Handle<SharedFunctionInfo> outer_info(context->closure()->shared(), isolate);
   Handle<Object> result;
   ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
       isolate, result,
-      DebugEvaluate(isolate, context, context_extension, receiver, source));
+      DebugEvaluate(isolate, outer_info,
+                    context, context_extension, receiver, source));
   return *result;
 }
 
 
 RUNTIME_FUNCTION(Runtime_DebugGetLoadedScripts) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 0);
+  DCHECK(args.length() == 0);
 
   // Fill the script objects.
   Handle<FixedArray> instances = isolate->debug()->GetLoadedScripts();
@@ -13108,17 +13197,12 @@
         // Check instance filter if supplied. This is normally used to avoid
         // references from mirror objects (see Runtime_IsInPrototypeChain).
         if (!instance_filter->IsUndefined()) {
-          Object* V = obj;
-          while (true) {
-            Object* prototype = V->GetPrototype(isolate);
-            if (prototype->IsNull()) {
-              break;
-            }
-            if (instance_filter == prototype) {
+          for (PrototypeIterator iter(isolate, obj); !iter.IsAtEnd();
+               iter.Advance()) {
+            if (iter.GetCurrent() == instance_filter) {
               obj = NULL;  // Don't add this object.
               break;
             }
-            V = prototype;
           }
         }
 
@@ -13154,7 +13238,7 @@
 // args[2]: the the maximum number of objects to return
 RUNTIME_FUNCTION(Runtime_DebugReferencedBy) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 3);
+  DCHECK(args.length() == 3);
 
   // Check parameters.
   CONVERT_ARG_HANDLE_CHECKED(JSObject, target, 0);
@@ -13166,10 +13250,8 @@
 
 
   // Get the constructor function for context extension and arguments array.
-  Handle<JSObject> arguments_boilerplate(
-      isolate->context()->native_context()->sloppy_arguments_boilerplate());
   Handle<JSFunction> arguments_function(
-      JSFunction::cast(arguments_boilerplate->map()->constructor()));
+      JSFunction::cast(isolate->sloppy_arguments_map()->constructor()));
 
   // Get the number of referencing objects.
   int count;
@@ -13196,8 +13278,7 @@
   }
 
   // Return result as JS array.
-  Handle<JSFunction> constructor(
-      isolate->context()->native_context()->array_function());
+  Handle<JSFunction> constructor = isolate->array_function();
 
   Handle<JSObject> result = isolate->factory()->NewJSObject(constructor);
   JSArray::SetContent(Handle<JSArray>::cast(result), instances);
@@ -13242,7 +13323,7 @@
 // args[1]: the the maximum number of objects to return
 RUNTIME_FUNCTION(Runtime_DebugConstructedBy) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
 
 
   // Check parameters.
@@ -13279,8 +13360,7 @@
   }
 
   // Return result as JS array.
-  Handle<JSFunction> array_function(
-      isolate->context()->native_context()->array_function());
+  Handle<JSFunction> array_function = isolate->array_function();
   Handle<JSObject> result = isolate->factory()->NewJSObject(array_function);
   JSArray::SetContent(Handle<JSArray>::cast(result), instances);
   return *result;
@@ -13291,7 +13371,7 @@
 // args[0]: the object to find the prototype for.
 RUNTIME_FUNCTION(Runtime_DebugGetPrototype) {
   HandleScope shs(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(JSObject, obj, 0);
   return *GetPrototypeSkipHiddenPrototypes(isolate, obj);
 }
@@ -13300,7 +13380,7 @@
 // Patches script source (should be called upon BeforeCompile event).
 RUNTIME_FUNCTION(Runtime_DebugSetScriptSource) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
 
   CONVERT_ARG_HANDLE_CHECKED(JSValue, script_wrapper, 0);
   CONVERT_ARG_HANDLE_CHECKED(String, source, 1);
@@ -13318,8 +13398,8 @@
 
 RUNTIME_FUNCTION(Runtime_SystemBreak) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 0);
-  OS::DebugBreak();
+  DCHECK(args.length() == 0);
+  base::OS::DebugBreak();
   return isolate->heap()->undefined_value();
 }
 
@@ -13327,13 +13407,15 @@
 RUNTIME_FUNCTION(Runtime_DebugDisassembleFunction) {
   HandleScope scope(isolate);
 #ifdef DEBUG
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   // Get the function and make sure it is compiled.
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, func, 0);
   if (!Compiler::EnsureCompiled(func, KEEP_EXCEPTION)) {
     return isolate->heap()->exception();
   }
-  func->code()->PrintLn();
+  OFStream os(stdout);
+  func->code()->Print(os);
+  os << endl;
 #endif  // DEBUG
   return isolate->heap()->undefined_value();
 }
@@ -13342,13 +13424,15 @@
 RUNTIME_FUNCTION(Runtime_DebugDisassembleConstructor) {
   HandleScope scope(isolate);
 #ifdef DEBUG
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   // Get the function and make sure it is compiled.
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, func, 0);
   if (!Compiler::EnsureCompiled(func, KEEP_EXCEPTION)) {
     return isolate->heap()->exception();
   }
-  func->shared()->construct_stub()->PrintLn();
+  OFStream os(stdout);
+  func->shared()->construct_stub()->Print(os);
+  os << endl;
 #endif  // DEBUG
   return isolate->heap()->undefined_value();
 }
@@ -13356,7 +13440,7 @@
 
 RUNTIME_FUNCTION(Runtime_FunctionGetInferredName) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
 
   CONVERT_ARG_CHECKED(JSFunction, f, 0);
   return f->shared()->inferred_name();
@@ -13372,7 +13456,7 @@
   for (HeapObject* obj = iterator->next();
        obj != NULL;
        obj = iterator->next()) {
-    ASSERT(obj != NULL);
+    DCHECK(obj != NULL);
     if (!obj->IsSharedFunctionInfo()) {
       continue;
     }
@@ -13395,7 +13479,7 @@
 RUNTIME_FUNCTION(Runtime_LiveEditFindSharedFunctionInfosForScript) {
   HandleScope scope(isolate);
   CHECK(isolate->debug()->live_edit_enabled());
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_CHECKED(JSValue, script_value, 0);
 
   RUNTIME_ASSERT(script_value->value()->IsScript());
@@ -13440,7 +13524,7 @@
 RUNTIME_FUNCTION(Runtime_LiveEditGatherCompileInfo) {
   HandleScope scope(isolate);
   CHECK(isolate->debug()->live_edit_enabled());
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
   CONVERT_ARG_CHECKED(JSValue, script, 0);
   CONVERT_ARG_HANDLE_CHECKED(String, source, 1);
 
@@ -13460,7 +13544,7 @@
 RUNTIME_FUNCTION(Runtime_LiveEditReplaceScript) {
   HandleScope scope(isolate);
   CHECK(isolate->debug()->live_edit_enabled());
-  ASSERT(args.length() == 3);
+  DCHECK(args.length() == 3);
   CONVERT_ARG_CHECKED(JSValue, original_script_value, 0);
   CONVERT_ARG_HANDLE_CHECKED(String, new_source, 1);
   CONVERT_ARG_HANDLE_CHECKED(Object, old_script_name, 2);
@@ -13483,7 +13567,7 @@
 RUNTIME_FUNCTION(Runtime_LiveEditFunctionSourceUpdated) {
   HandleScope scope(isolate);
   CHECK(isolate->debug()->live_edit_enabled());
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(JSArray, shared_info, 0);
   RUNTIME_ASSERT(SharedInfoWrapper::IsInstance(shared_info));
 
@@ -13496,7 +13580,7 @@
 RUNTIME_FUNCTION(Runtime_LiveEditReplaceFunctionCode) {
   HandleScope scope(isolate);
   CHECK(isolate->debug()->live_edit_enabled());
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
   CONVERT_ARG_HANDLE_CHECKED(JSArray, new_compile_info, 0);
   CONVERT_ARG_HANDLE_CHECKED(JSArray, shared_info, 1);
   RUNTIME_ASSERT(SharedInfoWrapper::IsInstance(shared_info));
@@ -13510,7 +13594,7 @@
 RUNTIME_FUNCTION(Runtime_LiveEditFunctionSetScript) {
   HandleScope scope(isolate);
   CHECK(isolate->debug()->live_edit_enabled());
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
   CONVERT_ARG_HANDLE_CHECKED(Object, function_object, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, script_object, 1);
 
@@ -13537,7 +13621,7 @@
 RUNTIME_FUNCTION(Runtime_LiveEditReplaceRefToNestedFunction) {
   HandleScope scope(isolate);
   CHECK(isolate->debug()->live_edit_enabled());
-  ASSERT(args.length() == 3);
+  DCHECK(args.length() == 3);
 
   CONVERT_ARG_HANDLE_CHECKED(JSValue, parent_wrapper, 0);
   CONVERT_ARG_HANDLE_CHECKED(JSValue, orig_wrapper, 1);
@@ -13560,7 +13644,7 @@
 RUNTIME_FUNCTION(Runtime_LiveEditPatchFunctionPositions) {
   HandleScope scope(isolate);
   CHECK(isolate->debug()->live_edit_enabled());
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
   CONVERT_ARG_HANDLE_CHECKED(JSArray, shared_array, 0);
   CONVERT_ARG_HANDLE_CHECKED(JSArray, position_change_array, 1);
   RUNTIME_ASSERT(SharedInfoWrapper::IsInstance(shared_array))
@@ -13577,10 +13661,11 @@
 RUNTIME_FUNCTION(Runtime_LiveEditCheckAndDropActivations) {
   HandleScope scope(isolate);
   CHECK(isolate->debug()->live_edit_enabled());
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
   CONVERT_ARG_HANDLE_CHECKED(JSArray, shared_array, 0);
   CONVERT_BOOLEAN_ARG_CHECKED(do_drop, 1);
   RUNTIME_ASSERT(shared_array->length()->IsSmi());
+  RUNTIME_ASSERT(shared_array->HasFastElements())
   int array_length = Smi::cast(shared_array->length())->value();
   for (int i = 0; i < array_length; i++) {
     Handle<Object> element =
@@ -13600,7 +13685,7 @@
 RUNTIME_FUNCTION(Runtime_LiveEditCompareStrings) {
   HandleScope scope(isolate);
   CHECK(isolate->debug()->live_edit_enabled());
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
   CONVERT_ARG_HANDLE_CHECKED(String, s1, 0);
   CONVERT_ARG_HANDLE_CHECKED(String, s2, 1);
 
@@ -13613,7 +13698,7 @@
 RUNTIME_FUNCTION(Runtime_LiveEditRestartFrame) {
   HandleScope scope(isolate);
   CHECK(isolate->debug()->live_edit_enabled());
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
   CONVERT_NUMBER_CHECKED(int, break_id, Int32, args[0]);
   RUNTIME_ASSERT(CheckExecutionState(isolate, break_id));
 
@@ -13627,14 +13712,11 @@
     return heap->undefined_value();
   }
 
-  int count = 0;
   JavaScriptFrameIterator it(isolate, id);
-  for (; !it.done(); it.Advance()) {
-    if (index < count + it.frame()->GetInlineCount()) break;
-    count += it.frame()->GetInlineCount();
-  }
-  if (it.done()) return heap->undefined_value();
-
+  int inlined_jsframe_index = FindIndexedNonNativeFrame(&it, index);
+  if (inlined_jsframe_index == -1) return heap->undefined_value();
+  // We don't really care what the inlined frame index is, since we are
+  // throwing away the entire frame anyways.
   const char* error_message = LiveEdit::RestartFrame(it.frame());
   if (error_message) {
     return *(isolate->factory()->InternalizeUtf8String(error_message));
@@ -13648,7 +13730,7 @@
 RUNTIME_FUNCTION(Runtime_GetFunctionCodePositionFromSource) {
   HandleScope scope(isolate);
   CHECK(isolate->debug()->live_edit_enabled());
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
   CONVERT_NUMBER_CHECKED(int32_t, source_position, Int32, args[1]);
 
@@ -13685,7 +13767,7 @@
 // to have a stack with C++ frame in the middle.
 RUNTIME_FUNCTION(Runtime_ExecuteInDebugContext) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
   CONVERT_BOOLEAN_ARG_CHECKED(without_debugger, 1);
 
@@ -13693,14 +13775,14 @@
   if (without_debugger) {
     maybe_result = Execution::Call(isolate,
                                    function,
-                                   isolate->global_object(),
+                                   handle(function->global_proxy()),
                                    0,
                                    NULL);
   } else {
     DebugScope debug_scope(isolate->debug());
     maybe_result = Execution::Call(isolate,
                                    function,
-                                   isolate->global_object(),
+                                   handle(function->global_proxy()),
                                    0,
                                    NULL);
   }
@@ -13713,7 +13795,7 @@
 // Sets a v8 flag.
 RUNTIME_FUNCTION(Runtime_SetFlags) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_CHECKED(String, arg, 0);
   SmartArrayPointer<char> flags =
       arg->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
@@ -13726,7 +13808,7 @@
 // Presently, it only does a full GC.
 RUNTIME_FUNCTION(Runtime_CollectGarbage) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags, "%CollectGarbage");
   return isolate->heap()->undefined_value();
 }
@@ -13735,7 +13817,7 @@
 // Gets the current heap usage.
 RUNTIME_FUNCTION(Runtime_GetHeapUsage) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 0);
+  DCHECK(args.length() == 0);
   int usage = static_cast<int>(isolate->heap()->SizeOfObjects());
   if (!Smi::IsValid(usage)) {
     return *isolate->factory()->NewNumberFromInt(usage);
@@ -13749,7 +13831,7 @@
   HandleScope scope(isolate);
   Factory* factory = isolate->factory();
 
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(String, locale_id_str, 0);
 
   v8::String::Utf8Value locale_id(v8::Utils::ToLocal(locale_id_str));
@@ -13784,7 +13866,7 @@
   HandleScope scope(isolate);
   Factory* factory = isolate->factory();
 
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(String, service, 0);
 
   const icu::Locale* available_locales = NULL;
@@ -13832,7 +13914,7 @@
   HandleScope scope(isolate);
   Factory* factory = isolate->factory();
 
-  ASSERT(args.length() == 0);
+  DCHECK(args.length() == 0);
 
   icu::Locale default_locale;
 
@@ -13845,7 +13927,7 @@
     return *factory->NewStringFromAsciiChecked(result);
   }
 
-  return *factory->NewStringFromStaticAscii("und");
+  return *factory->NewStringFromStaticChars("und");
 }
 
 
@@ -13853,7 +13935,7 @@
   HandleScope scope(isolate);
   Factory* factory = isolate->factory();
 
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
 
   CONVERT_ARG_HANDLE_CHECKED(JSArray, input, 0);
 
@@ -13862,8 +13944,8 @@
   // Can be bumped when callers' requirements change.
   RUNTIME_ASSERT(length < 100);
   Handle<FixedArray> output = factory->NewFixedArray(length);
-  Handle<Name> maximized = factory->NewStringFromStaticAscii("maximized");
-  Handle<Name> base = factory->NewStringFromStaticAscii("base");
+  Handle<Name> maximized = factory->NewStringFromStaticChars("maximized");
+  Handle<Name> base = factory->NewStringFromStaticChars("base");
   for (unsigned int i = 0; i < length; ++i) {
     Handle<Object> locale_id;
     ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
@@ -13921,18 +14003,10 @@
     }
 
     Handle<JSObject> result = factory->NewJSObject(isolate->object_function());
-    RETURN_FAILURE_ON_EXCEPTION(isolate,
-        JSObject::SetOwnPropertyIgnoreAttributes(
-            result,
-            maximized,
-            factory->NewStringFromAsciiChecked(base_max_locale),
-            NONE));
-    RETURN_FAILURE_ON_EXCEPTION(isolate,
-        JSObject::SetOwnPropertyIgnoreAttributes(
-            result,
-            base,
-            factory->NewStringFromAsciiChecked(base_locale),
-            NONE));
+    Handle<String> value = factory->NewStringFromAsciiChecked(base_max_locale);
+    JSObject::AddProperty(result, maximized, value, NONE);
+    value = factory->NewStringFromAsciiChecked(base_locale);
+    JSObject::AddProperty(result, base, value, NONE);
     output->set(i, *result);
   }
 
@@ -13945,7 +14019,7 @@
 RUNTIME_FUNCTION(Runtime_IsInitializedIntlObject) {
   HandleScope scope(isolate);
 
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
 
   CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
 
@@ -13961,7 +14035,7 @@
 RUNTIME_FUNCTION(Runtime_IsInitializedIntlObjectOfType) {
   HandleScope scope(isolate);
 
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
 
   CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
   CONVERT_ARG_HANDLE_CHECKED(String, expected_type, 1);
@@ -13979,7 +14053,7 @@
 RUNTIME_FUNCTION(Runtime_MarkAsInitializedIntlObjectOfType) {
   HandleScope scope(isolate);
 
-  ASSERT(args.length() == 3);
+  DCHECK(args.length() == 3);
 
   CONVERT_ARG_HANDLE_CHECKED(JSObject, input, 0);
   CONVERT_ARG_HANDLE_CHECKED(String, type, 1);
@@ -13998,15 +14072,14 @@
 RUNTIME_FUNCTION(Runtime_GetImplFromInitializedIntlObject) {
   HandleScope scope(isolate);
 
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
 
   CONVERT_ARG_HANDLE_CHECKED(Object, input, 0);
 
   if (!input->IsJSObject()) {
     Vector< Handle<Object> > arguments = HandleVector(&input, 1);
-    Handle<Object> type_error =
-        isolate->factory()->NewTypeError("not_intl_object", arguments);
-    return isolate->Throw(*type_error);
+    THROW_NEW_ERROR_RETURN_FAILURE(isolate,
+                                   NewTypeError("not_intl_object", arguments));
   }
 
   Handle<JSObject> obj = Handle<JSObject>::cast(input);
@@ -14015,9 +14088,8 @@
   Handle<Object> impl(obj->GetHiddenProperty(marker), isolate);
   if (impl->IsTheHole()) {
     Vector< Handle<Object> > arguments = HandleVector(&obj, 1);
-    Handle<Object> type_error =
-        isolate->factory()->NewTypeError("not_intl_object", arguments);
-    return isolate->Throw(*type_error);
+    THROW_NEW_ERROR_RETURN_FAILURE(isolate,
+                                   NewTypeError("not_intl_object", arguments));
   }
   return *impl;
 }
@@ -14026,7 +14098,7 @@
 RUNTIME_FUNCTION(Runtime_CreateDateTimeFormat) {
   HandleScope scope(isolate);
 
-  ASSERT(args.length() == 3);
+  DCHECK(args.length() == 3);
 
   CONVERT_ARG_HANDLE_CHECKED(String, locale, 0);
   CONVERT_ARG_HANDLE_CHECKED(JSObject, options, 1);
@@ -14049,12 +14121,10 @@
 
   local_object->SetInternalField(0, reinterpret_cast<Smi*>(date_format));
 
-  RETURN_FAILURE_ON_EXCEPTION(isolate,
-      JSObject::SetOwnPropertyIgnoreAttributes(
-          local_object,
-          isolate->factory()->NewStringFromStaticAscii("dateFormat"),
-          isolate->factory()->NewStringFromStaticAscii("valid"),
-          NONE));
+  Factory* factory = isolate->factory();
+  Handle<String> key = factory->NewStringFromStaticChars("dateFormat");
+  Handle<String> value = factory->NewStringFromStaticChars("valid");
+  JSObject::AddProperty(local_object, key, value, NONE);
 
   // Make object handle weak so we can delete the data format once GC kicks in.
   Handle<Object> wrapper = isolate->global_handles()->Create(*local_object);
@@ -14068,7 +14138,7 @@
 RUNTIME_FUNCTION(Runtime_InternalDateFormat) {
   HandleScope scope(isolate);
 
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
 
   CONVERT_ARG_HANDLE_CHECKED(JSObject, date_format_holder, 0);
   CONVERT_ARG_HANDLE_CHECKED(JSDate, date, 1);
@@ -14098,7 +14168,7 @@
 RUNTIME_FUNCTION(Runtime_InternalDateParse) {
   HandleScope scope(isolate);
 
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
 
   CONVERT_ARG_HANDLE_CHECKED(JSObject, date_format_holder, 0);
   CONVERT_ARG_HANDLE_CHECKED(String, date_string, 1);
@@ -14117,7 +14187,7 @@
   ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
       isolate, result,
       Execution::NewDate(isolate, static_cast<double>(date)));
-  ASSERT(result->IsJSDate());
+  DCHECK(result->IsJSDate());
   return *result;
 }
 
@@ -14125,7 +14195,7 @@
 RUNTIME_FUNCTION(Runtime_CreateNumberFormat) {
   HandleScope scope(isolate);
 
-  ASSERT(args.length() == 3);
+  DCHECK(args.length() == 3);
 
   CONVERT_ARG_HANDLE_CHECKED(String, locale, 0);
   CONVERT_ARG_HANDLE_CHECKED(JSObject, options, 1);
@@ -14148,12 +14218,10 @@
 
   local_object->SetInternalField(0, reinterpret_cast<Smi*>(number_format));
 
-  RETURN_FAILURE_ON_EXCEPTION(isolate,
-      JSObject::SetOwnPropertyIgnoreAttributes(
-          local_object,
-          isolate->factory()->NewStringFromStaticAscii("numberFormat"),
-          isolate->factory()->NewStringFromStaticAscii("valid"),
-          NONE));
+  Factory* factory = isolate->factory();
+  Handle<String> key = factory->NewStringFromStaticChars("numberFormat");
+  Handle<String> value = factory->NewStringFromStaticChars("valid");
+  JSObject::AddProperty(local_object, key, value, NONE);
 
   Handle<Object> wrapper = isolate->global_handles()->Create(*local_object);
   GlobalHandles::MakeWeak(wrapper.location(),
@@ -14166,7 +14234,7 @@
 RUNTIME_FUNCTION(Runtime_InternalNumberFormat) {
   HandleScope scope(isolate);
 
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
 
   CONVERT_ARG_HANDLE_CHECKED(JSObject, number_format_holder, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, number, 1);
@@ -14196,7 +14264,7 @@
 RUNTIME_FUNCTION(Runtime_InternalNumberParse) {
   HandleScope scope(isolate);
 
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
 
   CONVERT_ARG_HANDLE_CHECKED(JSObject, number_format_holder, 0);
   CONVERT_ARG_HANDLE_CHECKED(String, number_string, 1);
@@ -14235,7 +14303,7 @@
 RUNTIME_FUNCTION(Runtime_CreateCollator) {
   HandleScope scope(isolate);
 
-  ASSERT(args.length() == 3);
+  DCHECK(args.length() == 3);
 
   CONVERT_ARG_HANDLE_CHECKED(String, locale, 0);
   CONVERT_ARG_HANDLE_CHECKED(JSObject, options, 1);
@@ -14256,12 +14324,10 @@
 
   local_object->SetInternalField(0, reinterpret_cast<Smi*>(collator));
 
-  RETURN_FAILURE_ON_EXCEPTION(isolate,
-      JSObject::SetOwnPropertyIgnoreAttributes(
-          local_object,
-          isolate->factory()->NewStringFromStaticAscii("collator"),
-          isolate->factory()->NewStringFromStaticAscii("valid"),
-          NONE));
+  Factory* factory = isolate->factory();
+  Handle<String> key = factory->NewStringFromStaticChars("collator");
+  Handle<String> value = factory->NewStringFromStaticChars("valid");
+  JSObject::AddProperty(local_object, key, value, NONE);
 
   Handle<Object> wrapper = isolate->global_handles()->Create(*local_object);
   GlobalHandles::MakeWeak(wrapper.location(),
@@ -14274,7 +14340,7 @@
 RUNTIME_FUNCTION(Runtime_InternalCompare) {
   HandleScope scope(isolate);
 
-  ASSERT(args.length() == 3);
+  DCHECK(args.length() == 3);
 
   CONVERT_ARG_HANDLE_CHECKED(JSObject, collator_holder, 0);
   CONVERT_ARG_HANDLE_CHECKED(String, string1, 1);
@@ -14304,12 +14370,12 @@
   static const UNormalizationMode normalizationForms[] =
       { UNORM_NFC, UNORM_NFD, UNORM_NFKC, UNORM_NFKD };
 
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
 
   CONVERT_ARG_HANDLE_CHECKED(String, stringValue, 0);
   CONVERT_NUMBER_CHECKED(int, form_id, Int32, args[1]);
   RUNTIME_ASSERT(form_id >= 0 &&
-                 static_cast<size_t>(form_id) < ARRAY_SIZE(normalizationForms));
+                 static_cast<size_t>(form_id) < arraysize(normalizationForms));
 
   v8::String::Value string_value(v8::Utils::ToLocal(stringValue));
   const UChar* u_value = reinterpret_cast<const UChar*>(*string_value);
@@ -14337,7 +14403,7 @@
 RUNTIME_FUNCTION(Runtime_CreateBreakIterator) {
   HandleScope scope(isolate);
 
-  ASSERT(args.length() == 3);
+  DCHECK(args.length() == 3);
 
   CONVERT_ARG_HANDLE_CHECKED(String, locale, 0);
   CONVERT_ARG_HANDLE_CHECKED(JSObject, options, 1);
@@ -14362,12 +14428,10 @@
   // Make sure that the pointer to adopted text is NULL.
   local_object->SetInternalField(1, reinterpret_cast<Smi*>(NULL));
 
-  RETURN_FAILURE_ON_EXCEPTION(isolate,
-      JSObject::SetOwnPropertyIgnoreAttributes(
-          local_object,
-          isolate->factory()->NewStringFromStaticAscii("breakIterator"),
-          isolate->factory()->NewStringFromStaticAscii("valid"),
-          NONE));
+  Factory* factory = isolate->factory();
+  Handle<String> key = factory->NewStringFromStaticChars("breakIterator");
+  Handle<String> value = factory->NewStringFromStaticChars("valid");
+  JSObject::AddProperty(local_object, key, value, NONE);
 
   // Make object handle weak so we can delete the break iterator once GC kicks
   // in.
@@ -14382,7 +14446,7 @@
 RUNTIME_FUNCTION(Runtime_BreakIteratorAdoptText) {
   HandleScope scope(isolate);
 
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
 
   CONVERT_ARG_HANDLE_CHECKED(JSObject, break_iterator_holder, 0);
   CONVERT_ARG_HANDLE_CHECKED(String, text, 1);
@@ -14409,7 +14473,7 @@
 RUNTIME_FUNCTION(Runtime_BreakIteratorFirst) {
   HandleScope scope(isolate);
 
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
 
   CONVERT_ARG_HANDLE_CHECKED(JSObject, break_iterator_holder, 0);
 
@@ -14424,7 +14488,7 @@
 RUNTIME_FUNCTION(Runtime_BreakIteratorNext) {
   HandleScope scope(isolate);
 
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
 
   CONVERT_ARG_HANDLE_CHECKED(JSObject, break_iterator_holder, 0);
 
@@ -14439,7 +14503,7 @@
 RUNTIME_FUNCTION(Runtime_BreakIteratorCurrent) {
   HandleScope scope(isolate);
 
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
 
   CONVERT_ARG_HANDLE_CHECKED(JSObject, break_iterator_holder, 0);
 
@@ -14454,7 +14518,7 @@
 RUNTIME_FUNCTION(Runtime_BreakIteratorBreakType) {
   HandleScope scope(isolate);
 
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
 
   CONVERT_ARG_HANDLE_CHECKED(JSObject, break_iterator_holder, 0);
 
@@ -14468,17 +14532,17 @@
   int32_t status = rule_based_iterator->getRuleStatus();
   // Keep return values in sync with JavaScript BreakType enum.
   if (status >= UBRK_WORD_NONE && status < UBRK_WORD_NONE_LIMIT) {
-    return *isolate->factory()->NewStringFromStaticAscii("none");
+    return *isolate->factory()->NewStringFromStaticChars("none");
   } else if (status >= UBRK_WORD_NUMBER && status < UBRK_WORD_NUMBER_LIMIT) {
-    return *isolate->factory()->NewStringFromStaticAscii("number");
+    return *isolate->factory()->number_string();
   } else if (status >= UBRK_WORD_LETTER && status < UBRK_WORD_LETTER_LIMIT) {
-    return *isolate->factory()->NewStringFromStaticAscii("letter");
+    return *isolate->factory()->NewStringFromStaticChars("letter");
   } else if (status >= UBRK_WORD_KANA && status < UBRK_WORD_KANA_LIMIT) {
-    return *isolate->factory()->NewStringFromStaticAscii("kana");
+    return *isolate->factory()->NewStringFromStaticChars("kana");
   } else if (status >= UBRK_WORD_IDEO && status < UBRK_WORD_IDEO_LIMIT) {
-    return *isolate->factory()->NewStringFromStaticAscii("ideo");
+    return *isolate->factory()->NewStringFromStaticChars("ideo");
   } else {
-    return *isolate->factory()->NewStringFromStaticAscii("unknown");
+    return *isolate->factory()->NewStringFromStaticChars("unknown");
   }
 }
 #endif  // V8_I18N_SUPPORT
@@ -14524,7 +14588,7 @@
 RUNTIME_FUNCTION(Runtime_GetScript) {
   HandleScope scope(isolate);
 
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
 
   CONVERT_ARG_CHECKED(String, script_name, 0);
 
@@ -14540,37 +14604,24 @@
 // native code offset.
 RUNTIME_FUNCTION(Runtime_CollectStackTrace) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 3);
+  DCHECK(args.length() == 2);
   CONVERT_ARG_HANDLE_CHECKED(JSObject, error_object, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, caller, 1);
-  CONVERT_NUMBER_CHECKED(int32_t, limit, Int32, args[2]);
 
-  // Optionally capture a more detailed stack trace for the message.
-  isolate->CaptureAndSetDetailedStackTrace(error_object);
-  // Capture a simple stack trace for the stack property.
-  return *isolate->CaptureSimpleStackTrace(error_object, caller, limit);
-}
-
-
-// Retrieve the stack trace.  This is the raw stack trace that yet has to
-// be formatted.  Since we only need this once, clear it afterwards.
-RUNTIME_FUNCTION(Runtime_GetAndClearOverflowedStackTrace) {
-  HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
-  CONVERT_ARG_HANDLE_CHECKED(JSObject, error_object, 0);
-  Handle<String> key = isolate->factory()->hidden_stack_trace_string();
-  Handle<Object> result(error_object->GetHiddenProperty(key), isolate);
-  if (result->IsTheHole()) return isolate->heap()->undefined_value();
-  RUNTIME_ASSERT(result->IsJSArray() || result->IsUndefined());
-  JSObject::DeleteHiddenProperty(error_object, key);
-  return *result;
+  if (!isolate->bootstrapper()->IsActive()) {
+    // Optionally capture a more detailed stack trace for the message.
+    isolate->CaptureAndSetDetailedStackTrace(error_object);
+    // Capture a simple stack trace for the stack property.
+    isolate->CaptureAndSetSimpleStackTrace(error_object, caller);
+  }
+  return isolate->heap()->undefined_value();
 }
 
 
 // Returns V8 version as a string.
 RUNTIME_FUNCTION(Runtime_GetV8Version) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 0);
+  DCHECK(args.length() == 0);
 
   const char* version_string = v8::V8::GetVersion();
 
@@ -14578,15 +14629,74 @@
 }
 
 
+// Returns function of generator activation.
+RUNTIME_FUNCTION(Runtime_GeneratorGetFunction) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 1);
+  CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
+
+  return generator->function();
+}
+
+
+// Returns context of generator activation.
+RUNTIME_FUNCTION(Runtime_GeneratorGetContext) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 1);
+  CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
+
+  return generator->context();
+}
+
+
+// Returns receiver of generator activation.
+RUNTIME_FUNCTION(Runtime_GeneratorGetReceiver) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 1);
+  CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
+
+  return generator->receiver();
+}
+
+
+// Returns generator continuation as a PC offset, or the magic -1 or 0 values.
+RUNTIME_FUNCTION(Runtime_GeneratorGetContinuation) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 1);
+  CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
+
+  return Smi::FromInt(generator->continuation());
+}
+
+
+RUNTIME_FUNCTION(Runtime_GeneratorGetSourcePosition) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 1);
+  CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
+
+  if (generator->is_suspended()) {
+    Handle<Code> code(generator->function()->code(), isolate);
+    int offset = generator->continuation();
+
+    RUNTIME_ASSERT(0 <= offset && offset < code->Size());
+    Address pc = code->address() + offset;
+
+    return Smi::FromInt(code->SourcePosition(pc));
+  }
+
+  return isolate->heap()->undefined_value();
+}
+
+
 RUNTIME_FUNCTION(Runtime_Abort) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_SMI_ARG_CHECKED(message_id, 0);
   const char* message = GetBailoutReason(
       static_cast<BailoutReason>(message_id));
-  OS::PrintError("abort: %s\n", message);
+  base::OS::PrintError("abort: %s\n", message);
   isolate->PrintStack(stderr);
-  OS::Abort();
+  base::OS::Abort();
   UNREACHABLE();
   return NULL;
 }
@@ -14594,11 +14704,11 @@
 
 RUNTIME_FUNCTION(Runtime_AbortJS) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(String, message, 0);
-  OS::PrintError("abort: %s\n", message->ToCString().get());
+  base::OS::PrintError("abort: %s\n", message->ToCString().get());
   isolate->PrintStack(stderr);
-  OS::Abort();
+  base::OS::Abort();
   UNREACHABLE();
   return NULL;
 }
@@ -14606,7 +14716,7 @@
 
 RUNTIME_FUNCTION(Runtime_FlattenString) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(String, str, 0);
   return *String::Flatten(str);
 }
@@ -14614,7 +14724,7 @@
 
 RUNTIME_FUNCTION(Runtime_NotifyContextDisposed) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 0);
+  DCHECK(args.length() == 0);
   isolate->heap()->NotifyContextDisposed();
   return isolate->heap()->undefined_value();
 }
@@ -14622,7 +14732,7 @@
 
 RUNTIME_FUNCTION(Runtime_LoadMutableDouble) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
   CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
   CONVERT_ARG_HANDLE_CHECKED(Smi, index, 1);
   RUNTIME_ASSERT((index->value() & 1) == 1);
@@ -14636,14 +14746,14 @@
                    object->properties()->length());
   }
   Handle<Object> raw_value(object->RawFastPropertyAt(field_index), isolate);
-  RUNTIME_ASSERT(raw_value->IsNumber() || raw_value->IsUninitialized());
-  return *Object::NewStorageFor(isolate, raw_value, Representation::Double());
+  RUNTIME_ASSERT(raw_value->IsMutableHeapNumber());
+  return *Object::WrapForRead(isolate, raw_value, Representation::Double());
 }
 
 
 RUNTIME_FUNCTION(Runtime_TryMigrateInstance) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
   if (!object->IsJSObject()) return Smi::FromInt(0);
   Handle<JSObject> js_object = Handle<JSObject>::cast(object);
@@ -14657,7 +14767,7 @@
 }
 
 
-RUNTIME_FUNCTION(RuntimeHidden_GetFromCache) {
+RUNTIME_FUNCTION(Runtime_GetFromCache) {
   SealHandleScope shs(isolate);
   // This is only called from codegen, so checks might be more lax.
   CONVERT_ARG_CHECKED(JSFunctionResultCache, cache, 0);
@@ -14684,7 +14794,7 @@
     }
 
     int size = cache->size();
-    ASSERT(size <= cache->length());
+    DCHECK(size <= cache->length());
 
     for (int i = size - 2; i > finger_index; i -= 2) {
       o = cache->get(i);
@@ -14705,13 +14815,12 @@
     Handle<JSFunction> factory(JSFunction::cast(
           cache_handle->get(JSFunctionResultCache::kFactoryIndex)));
     // TODO(antonm): consider passing a receiver when constructing a cache.
-    Handle<Object> receiver(isolate->native_context()->global_object(),
-                            isolate);
+    Handle<JSObject> receiver(isolate->global_proxy());
     // This handle is nor shared, nor used later, so it's safe.
     Handle<Object> argv[] = { key_handle };
     ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
         isolate, value,
-        Execution::Call(isolate, factory, receiver, ARRAY_SIZE(argv), argv));
+        Execution::Call(isolate, factory, receiver, arraysize(argv), argv));
   }
 
 #ifdef VERIFY_HEAP
@@ -14737,9 +14846,9 @@
     }
   }
 
-  ASSERT(index % 2 == 0);
-  ASSERT(index >= JSFunctionResultCache::kEntriesIndex);
-  ASSERT(index < cache_handle->length());
+  DCHECK(index % 2 == 0);
+  DCHECK(index >= JSFunctionResultCache::kEntriesIndex);
+  DCHECK(index < cache_handle->length());
 
   cache_handle->set(index, *key_handle);
   cache_handle->set(index + 1, *value);
@@ -14757,7 +14866,7 @@
 
 RUNTIME_FUNCTION(Runtime_MessageGetStartPosition) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_CHECKED(JSMessageObject, message, 0);
   return Smi::FromInt(message->start_position());
 }
@@ -14765,7 +14874,7 @@
 
 RUNTIME_FUNCTION(Runtime_MessageGetScript) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_CHECKED(JSMessageObject, message, 0);
   return message->script();
 }
@@ -14776,11 +14885,10 @@
 // Exclude the code in release mode.
 RUNTIME_FUNCTION(Runtime_ListNatives) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 0);
+  DCHECK(args.length() == 0);
 #define COUNT_ENTRY(Name, argc, ressize) + 1
   int entry_count = 0
       RUNTIME_FUNCTION_LIST(COUNT_ENTRY)
-      RUNTIME_HIDDEN_FUNCTION_LIST(COUNT_ENTRY)
       INLINE_FUNCTION_LIST(COUNT_ENTRY)
       INLINE_OPTIMIZED_FUNCTION_LIST(COUNT_ENTRY);
 #undef COUNT_ENTRY
@@ -14788,31 +14896,29 @@
   Handle<FixedArray> elements = factory->NewFixedArray(entry_count);
   int index = 0;
   bool inline_runtime_functions = false;
-#define ADD_ENTRY(Name, argc, ressize)                                       \
-  {                                                                          \
-    HandleScope inner(isolate);                                              \
-    Handle<String> name;                                                     \
-    /* Inline runtime functions have an underscore in front of the name. */  \
-    if (inline_runtime_functions) {                                          \
-      name = factory->NewStringFromStaticAscii("_" #Name);                   \
-    } else {                                                                 \
-      name = factory->NewStringFromStaticAscii(#Name);                       \
-    }                                                                        \
-    Handle<FixedArray> pair_elements = factory->NewFixedArray(2);            \
-    pair_elements->set(0, *name);                                            \
-    pair_elements->set(1, Smi::FromInt(argc));                               \
-    Handle<JSArray> pair = factory->NewJSArrayWithElements(pair_elements);   \
-    elements->set(index++, *pair);                                           \
+#define ADD_ENTRY(Name, argc, ressize)                                      \
+  {                                                                         \
+    HandleScope inner(isolate);                                             \
+    Handle<String> name;                                                    \
+    /* Inline runtime functions have an underscore in front of the name. */ \
+    if (inline_runtime_functions) {                                         \
+      name = factory->NewStringFromStaticChars("_" #Name);                  \
+    } else {                                                                \
+      name = factory->NewStringFromStaticChars(#Name);                      \
+    }                                                                       \
+    Handle<FixedArray> pair_elements = factory->NewFixedArray(2);           \
+    pair_elements->set(0, *name);                                           \
+    pair_elements->set(1, Smi::FromInt(argc));                              \
+    Handle<JSArray> pair = factory->NewJSArrayWithElements(pair_elements);  \
+    elements->set(index++, *pair);                                          \
   }
   inline_runtime_functions = false;
   RUNTIME_FUNCTION_LIST(ADD_ENTRY)
   INLINE_OPTIMIZED_FUNCTION_LIST(ADD_ENTRY)
-  // Calling hidden runtime functions should just throw.
-  RUNTIME_HIDDEN_FUNCTION_LIST(ADD_ENTRY)
   inline_runtime_functions = true;
   INLINE_FUNCTION_LIST(ADD_ENTRY)
 #undef ADD_ENTRY
-  ASSERT_EQ(index, entry_count);
+  DCHECK_EQ(index, entry_count);
   Handle<JSArray> result = factory->NewJSArrayWithElements(elements);
   return *result;
 }
@@ -14869,7 +14975,7 @@
 
 RUNTIME_FUNCTION(Runtime_HaveSameMap) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 2);
+  DCHECK(args.length() == 2);
   CONVERT_ARG_CHECKED(JSObject, obj1, 0);
   CONVERT_ARG_CHECKED(JSObject, obj2, 1);
   return isolate->heap()->ToBoolean(obj1->map() == obj2->map());
@@ -14878,7 +14984,7 @@
 
 RUNTIME_FUNCTION(Runtime_IsJSGlobalProxy) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_CHECKED(Object, obj, 0);
   return isolate->heap()->ToBoolean(obj->IsJSGlobalProxy());
 }
@@ -14886,24 +14992,24 @@
 
 RUNTIME_FUNCTION(Runtime_IsObserved) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
 
   if (!args[0]->IsJSReceiver()) return isolate->heap()->false_value();
   CONVERT_ARG_CHECKED(JSReceiver, obj, 0);
-  ASSERT(!obj->IsJSGlobalProxy() || !obj->map()->is_observed());
+  DCHECK(!obj->IsJSGlobalProxy() || !obj->map()->is_observed());
   return isolate->heap()->ToBoolean(obj->map()->is_observed());
 }
 
 
 RUNTIME_FUNCTION(Runtime_SetIsObserved) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(JSReceiver, obj, 0);
   RUNTIME_ASSERT(!obj->IsJSGlobalProxy());
   if (obj->IsJSProxy()) return isolate->heap()->undefined_value();
   RUNTIME_ASSERT(!obj->map()->is_observed());
 
-  ASSERT(obj->IsJSObject());
+  DCHECK(obj->IsJSObject());
   JSObject::SetObserved(Handle<JSObject>::cast(obj));
   return isolate->heap()->undefined_value();
 }
@@ -14911,7 +15017,7 @@
 
 RUNTIME_FUNCTION(Runtime_EnqueueMicrotask) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, microtask, 0);
   isolate->EnqueueMicrotask(microtask);
   return isolate->heap()->undefined_value();
@@ -14920,7 +15026,7 @@
 
 RUNTIME_FUNCTION(Runtime_RunMicrotasks) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 0);
+  DCHECK(args.length() == 0);
   isolate->RunMicrotasks();
   return isolate->heap()->undefined_value();
 }
@@ -14928,14 +15034,14 @@
 
 RUNTIME_FUNCTION(Runtime_GetObservationState) {
   SealHandleScope shs(isolate);
-  ASSERT(args.length() == 0);
+  DCHECK(args.length() == 0);
   return isolate->heap()->observation_state();
 }
 
 
 RUNTIME_FUNCTION(Runtime_ObservationWeakMapCreate) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 0);
+  DCHECK(args.length() == 0);
   // TODO(adamk): Currently this runtime function is only called three times per
   // isolate. If it's called more often, the map should be moved into the
   // strong root list.
@@ -14955,13 +15061,12 @@
 
 RUNTIME_FUNCTION(Runtime_ObserverObjectAndRecordHaveSameOrigin) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 3);
+  DCHECK(args.length() == 3);
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, observer, 0);
   CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 1);
   CONVERT_ARG_HANDLE_CHECKED(JSObject, record, 2);
 
-  Handle<Context> observer_context(observer->context()->native_context(),
-      isolate);
+  Handle<Context> observer_context(observer->context()->native_context());
   Handle<Context> object_context(object->GetCreationContext());
   Handle<Context> record_context(record->GetCreationContext());
 
@@ -14973,7 +15078,7 @@
 
 RUNTIME_FUNCTION(Runtime_ObjectWasCreatedInCurrentOrigin) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
 
   Handle<Context> creation_context(object->GetCreationContext(), isolate);
@@ -14984,7 +15089,7 @@
 
 RUNTIME_FUNCTION(Runtime_GetObjectContextObjectObserve) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
 
   Handle<Context> context(object->GetCreationContext(), isolate);
@@ -14994,7 +15099,7 @@
 
 RUNTIME_FUNCTION(Runtime_GetObjectContextObjectGetNotifier) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
 
   Handle<Context> context(object->GetCreationContext(), isolate);
@@ -15004,7 +15109,7 @@
 
 RUNTIME_FUNCTION(Runtime_GetObjectContextNotifierPerformChange) {
   HandleScope scope(isolate);
-  ASSERT(args.length() == 1);
+  DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(JSObject, object_info, 0);
 
   Handle<Context> context(object_info->GetCreationContext(), isolate);
@@ -15090,7 +15195,7 @@
 }
 
 
-RUNTIME_FUNCTION(RuntimeHidden_ArrayConstructor) {
+RUNTIME_FUNCTION(Runtime_ArrayConstructor) {
   HandleScope scope(isolate);
   // If we get 2 arguments then they are the stub parameters (constructor, type
   // info).  If we get 4, then the first one is a pointer to the arguments
@@ -15099,7 +15204,7 @@
   // with an assert).
   Arguments empty_args(0, NULL);
   bool no_caller_args = args.length() == 2;
-  ASSERT(no_caller_args || args.length() == 4);
+  DCHECK(no_caller_args || args.length() == 4);
   int parameters_start = no_caller_args ? 0 : 1;
   Arguments* caller_args = no_caller_args
       ? &empty_args
@@ -15109,7 +15214,7 @@
 #ifdef DEBUG
   if (!no_caller_args) {
     CONVERT_SMI_ARG_CHECKED(arg_count, parameters_start + 2);
-    ASSERT(arg_count == caller_args->length());
+    DCHECK(arg_count == caller_args->length());
   }
 #endif
 
@@ -15117,7 +15222,7 @@
   if (!type_info.is_null() &&
       *type_info != isolate->heap()->undefined_value()) {
     site = Handle<AllocationSite>::cast(type_info);
-    ASSERT(!site->SitePointsToLiteral());
+    DCHECK(!site->SitePointsToLiteral());
   }
 
   return ArrayConstructorCommon(isolate,
@@ -15127,11 +15232,11 @@
 }
 
 
-RUNTIME_FUNCTION(RuntimeHidden_InternalArrayConstructor) {
+RUNTIME_FUNCTION(Runtime_InternalArrayConstructor) {
   HandleScope scope(isolate);
   Arguments empty_args(0, NULL);
   bool no_caller_args = args.length() == 1;
-  ASSERT(no_caller_args || args.length() == 3);
+  DCHECK(no_caller_args || args.length() == 3);
   int parameters_start = no_caller_args ? 0 : 1;
   Arguments* caller_args = no_caller_args
       ? &empty_args
@@ -15140,7 +15245,7 @@
 #ifdef DEBUG
   if (!no_caller_args) {
     CONVERT_SMI_ARG_CHECKED(arg_count, parameters_start + 1);
-    ASSERT(arg_count == caller_args->length());
+    DCHECK(arg_count == caller_args->length());
   }
 #endif
   return ArrayConstructorCommon(isolate,
@@ -15150,52 +15255,478 @@
 }
 
 
+RUNTIME_FUNCTION(Runtime_NormalizeElements) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 1);
+  CONVERT_ARG_HANDLE_CHECKED(JSObject, array, 0);
+  RUNTIME_ASSERT(!array->HasExternalArrayElements() &&
+                 !array->HasFixedTypedArrayElements());
+  JSObject::NormalizeElements(array);
+  return *array;
+}
+
+
 RUNTIME_FUNCTION(Runtime_MaxSmi) {
-  ASSERT(args.length() == 0);
+  SealHandleScope shs(isolate);
+  DCHECK(args.length() == 0);
   return Smi::FromInt(Smi::kMaxValue);
 }
 
 
+// TODO(dcarney): remove this function when TurboFan supports it.
+// Takes the object to be iterated over and the result of GetPropertyNamesFast
+// Returns pair (cache_array, cache_type).
+RUNTIME_FUNCTION_RETURN_PAIR(Runtime_ForInInit) {
+  SealHandleScope scope(isolate);
+  DCHECK(args.length() == 2);
+  // This simulates CONVERT_ARG_HANDLE_CHECKED for calls returning pairs.
+  // Not worth creating a macro atm as this function should be removed.
+  if (!args[0]->IsJSReceiver() || !args[1]->IsObject()) {
+    Object* error = isolate->ThrowIllegalOperation();
+    return MakePair(error, isolate->heap()->undefined_value());
+  }
+  Handle<JSReceiver> object = args.at<JSReceiver>(0);
+  Handle<Object> cache_type = args.at<Object>(1);
+  if (cache_type->IsMap()) {
+    // Enum cache case.
+    if (Map::EnumLengthBits::decode(Map::cast(*cache_type)->bit_field3()) ==
+        0) {
+      // 0 length enum.
+      // Can't handle this case in the graph builder,
+      // so transform it into the empty fixed array case.
+      return MakePair(isolate->heap()->empty_fixed_array(), Smi::FromInt(1));
+    }
+    return MakePair(object->map()->instance_descriptors()->GetEnumCache(),
+                    *cache_type);
+  } else {
+    // FixedArray case.
+    Smi* new_cache_type = Smi::FromInt(object->IsJSProxy() ? 0 : 1);
+    return MakePair(*Handle<FixedArray>::cast(cache_type), new_cache_type);
+  }
+}
+
+
+// TODO(dcarney): remove this function when TurboFan supports it.
+RUNTIME_FUNCTION(Runtime_ForInCacheArrayLength) {
+  SealHandleScope shs(isolate);
+  DCHECK(args.length() == 2);
+  CONVERT_ARG_HANDLE_CHECKED(Object, cache_type, 0);
+  CONVERT_ARG_HANDLE_CHECKED(FixedArray, array, 1);
+  int length = 0;
+  if (cache_type->IsMap()) {
+    length = Map::cast(*cache_type)->EnumLength();
+  } else {
+    DCHECK(cache_type->IsSmi());
+    length = array->length();
+  }
+  return Smi::FromInt(length);
+}
+
+
+// TODO(dcarney): remove this function when TurboFan supports it.
+// Takes (the object to be iterated over,
+//        cache_array from ForInInit,
+//        cache_type from ForInInit,
+//        the current index)
+// Returns pair (array[index], needs_filtering).
+RUNTIME_FUNCTION_RETURN_PAIR(Runtime_ForInNext) {
+  SealHandleScope scope(isolate);
+  DCHECK(args.length() == 4);
+  int32_t index;
+  // This simulates CONVERT_ARG_HANDLE_CHECKED for calls returning pairs.
+  // Not worth creating a macro atm as this function should be removed.
+  if (!args[0]->IsJSReceiver() || !args[1]->IsFixedArray() ||
+      !args[2]->IsObject() || !args[3]->ToInt32(&index)) {
+    Object* error = isolate->ThrowIllegalOperation();
+    return MakePair(error, isolate->heap()->undefined_value());
+  }
+  Handle<JSReceiver> object = args.at<JSReceiver>(0);
+  Handle<FixedArray> array = args.at<FixedArray>(1);
+  Handle<Object> cache_type = args.at<Object>(2);
+  // Figure out first if a slow check is needed for this object.
+  bool slow_check_needed = false;
+  if (cache_type->IsMap()) {
+    if (object->map() != Map::cast(*cache_type)) {
+      // Object transitioned.  Need slow check.
+      slow_check_needed = true;
+    }
+  } else {
+    // No slow check needed for proxies.
+    slow_check_needed = Smi::cast(*cache_type)->value() == 1;
+  }
+  return MakePair(array->get(index),
+                  isolate->heap()->ToBoolean(slow_check_needed));
+}
+
+
+// ----------------------------------------------------------------------------
+// Reference implementation for inlined runtime functions.  Only used when the
+// compiler does not support a certain intrinsic.  Don't optimize these, but
+// implement the intrinsic in the respective compiler instead.
+
+// TODO(mstarzinger): These are place-holder stubs for TurboFan and will
+// eventually all have a C++ implementation and this macro will be gone.
+#define U(name)                               \
+  RUNTIME_FUNCTION(RuntimeReference_##name) { \
+    UNIMPLEMENTED();                          \
+    return NULL;                              \
+  }
+
+U(IsStringWrapperSafeForDefaultValueOf)
+U(DebugBreakInOptimizedCode)
+
+#undef U
+
+
+RUNTIME_FUNCTION(RuntimeReference_IsSmi) {
+  SealHandleScope shs(isolate);
+  DCHECK(args.length() == 1);
+  CONVERT_ARG_CHECKED(Object, obj, 0);
+  return isolate->heap()->ToBoolean(obj->IsSmi());
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_IsNonNegativeSmi) {
+  SealHandleScope shs(isolate);
+  DCHECK(args.length() == 1);
+  CONVERT_ARG_CHECKED(Object, obj, 0);
+  return isolate->heap()->ToBoolean(obj->IsSmi() &&
+                                    Smi::cast(obj)->value() >= 0);
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_IsArray) {
+  SealHandleScope shs(isolate);
+  DCHECK(args.length() == 1);
+  CONVERT_ARG_CHECKED(Object, obj, 0);
+  return isolate->heap()->ToBoolean(obj->IsJSArray());
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_IsRegExp) {
+  SealHandleScope shs(isolate);
+  DCHECK(args.length() == 1);
+  CONVERT_ARG_CHECKED(Object, obj, 0);
+  return isolate->heap()->ToBoolean(obj->IsJSRegExp());
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_IsConstructCall) {
+  SealHandleScope shs(isolate);
+  DCHECK(args.length() == 0);
+  JavaScriptFrameIterator it(isolate);
+  JavaScriptFrame* frame = it.frame();
+  return isolate->heap()->ToBoolean(frame->IsConstructor());
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_CallFunction) {
+  SealHandleScope shs(isolate);
+  return __RT_impl_Runtime_Call(args, isolate);
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_ArgumentsLength) {
+  SealHandleScope shs(isolate);
+  DCHECK(args.length() == 0);
+  JavaScriptFrameIterator it(isolate);
+  JavaScriptFrame* frame = it.frame();
+  return Smi::FromInt(frame->GetArgumentsLength());
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_Arguments) {
+  SealHandleScope shs(isolate);
+  return __RT_impl_Runtime_GetArgumentsProperty(args, isolate);
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_ValueOf) {
+  SealHandleScope shs(isolate);
+  DCHECK(args.length() == 1);
+  CONVERT_ARG_CHECKED(Object, obj, 0);
+  if (!obj->IsJSValue()) return obj;
+  return JSValue::cast(obj)->value();
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_SetValueOf) {
+  SealHandleScope shs(isolate);
+  DCHECK(args.length() == 2);
+  CONVERT_ARG_CHECKED(Object, obj, 0);
+  CONVERT_ARG_CHECKED(Object, value, 1);
+  if (!obj->IsJSValue()) return value;
+  JSValue::cast(obj)->set_value(value);
+  return value;
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_DateField) {
+  SealHandleScope shs(isolate);
+  DCHECK(args.length() == 2);
+  CONVERT_ARG_CHECKED(Object, obj, 0);
+  CONVERT_SMI_ARG_CHECKED(index, 1);
+  if (!obj->IsJSDate()) {
+    HandleScope scope(isolate);
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate,
+        NewTypeError("not_date_object", HandleVector<Object>(NULL, 0)));
+  }
+  JSDate* date = JSDate::cast(obj);
+  if (index == 0) return date->value();
+  return JSDate::GetField(date, Smi::FromInt(index));
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_StringCharFromCode) {
+  SealHandleScope shs(isolate);
+  return __RT_impl_Runtime_CharFromCode(args, isolate);
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_StringCharAt) {
+  SealHandleScope shs(isolate);
+  DCHECK(args.length() == 2);
+  if (!args[0]->IsString()) return Smi::FromInt(0);
+  if (!args[1]->IsNumber()) return Smi::FromInt(0);
+  if (std::isinf(args.number_at(1))) return isolate->heap()->empty_string();
+  Object* code = __RT_impl_Runtime_StringCharCodeAtRT(args, isolate);
+  if (code->IsNaN()) return isolate->heap()->empty_string();
+  return __RT_impl_Runtime_CharFromCode(Arguments(1, &code), isolate);
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_OneByteSeqStringSetChar) {
+  SealHandleScope shs(isolate);
+  DCHECK(args.length() == 3);
+  CONVERT_INT32_ARG_CHECKED(index, 0);
+  CONVERT_INT32_ARG_CHECKED(value, 1);
+  CONVERT_ARG_CHECKED(SeqOneByteString, string, 2);
+  string->SeqOneByteStringSet(index, value);
+  return string;
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_TwoByteSeqStringSetChar) {
+  SealHandleScope shs(isolate);
+  DCHECK(args.length() == 3);
+  CONVERT_INT32_ARG_CHECKED(index, 0);
+  CONVERT_INT32_ARG_CHECKED(value, 1);
+  CONVERT_ARG_CHECKED(SeqTwoByteString, string, 2);
+  string->SeqTwoByteStringSet(index, value);
+  return string;
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_ObjectEquals) {
+  SealHandleScope shs(isolate);
+  DCHECK(args.length() == 2);
+  CONVERT_ARG_CHECKED(Object, obj1, 0);
+  CONVERT_ARG_CHECKED(Object, obj2, 1);
+  return isolate->heap()->ToBoolean(obj1 == obj2);
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_IsObject) {
+  SealHandleScope shs(isolate);
+  DCHECK(args.length() == 1);
+  CONVERT_ARG_CHECKED(Object, obj, 0);
+  if (!obj->IsHeapObject()) return isolate->heap()->false_value();
+  if (obj->IsNull()) return isolate->heap()->true_value();
+  if (obj->IsUndetectableObject()) return isolate->heap()->false_value();
+  Map* map = HeapObject::cast(obj)->map();
+  bool is_non_callable_spec_object =
+      map->instance_type() >= FIRST_NONCALLABLE_SPEC_OBJECT_TYPE &&
+      map->instance_type() <= LAST_NONCALLABLE_SPEC_OBJECT_TYPE;
+  return isolate->heap()->ToBoolean(is_non_callable_spec_object);
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_IsFunction) {
+  SealHandleScope shs(isolate);
+  DCHECK(args.length() == 1);
+  CONVERT_ARG_CHECKED(Object, obj, 0);
+  return isolate->heap()->ToBoolean(obj->IsJSFunction());
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_IsUndetectableObject) {
+  SealHandleScope shs(isolate);
+  DCHECK(args.length() == 1);
+  CONVERT_ARG_CHECKED(Object, obj, 0);
+  return isolate->heap()->ToBoolean(obj->IsUndetectableObject());
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_IsSpecObject) {
+  SealHandleScope shs(isolate);
+  DCHECK(args.length() == 1);
+  CONVERT_ARG_CHECKED(Object, obj, 0);
+  return isolate->heap()->ToBoolean(obj->IsSpecObject());
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_MathPow) {
+  SealHandleScope shs(isolate);
+  return __RT_impl_Runtime_MathPowSlow(args, isolate);
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_IsMinusZero) {
+  SealHandleScope shs(isolate);
+  DCHECK(args.length() == 1);
+  CONVERT_ARG_CHECKED(Object, obj, 0);
+  if (!obj->IsHeapNumber()) return isolate->heap()->false_value();
+  HeapNumber* number = HeapNumber::cast(obj);
+  return isolate->heap()->ToBoolean(IsMinusZero(number->value()));
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_HasCachedArrayIndex) {
+  SealHandleScope shs(isolate);
+  DCHECK(args.length() == 1);
+  return isolate->heap()->false_value();
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_GetCachedArrayIndex) {
+  SealHandleScope shs(isolate);
+  DCHECK(args.length() == 1);
+  return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_FastOneByteArrayJoin) {
+  SealHandleScope shs(isolate);
+  DCHECK(args.length() == 2);
+  return isolate->heap()->undefined_value();
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_GeneratorNext) {
+  UNREACHABLE();  // Optimization disabled in SetUpGenerators().
+  return NULL;
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_GeneratorThrow) {
+  UNREACHABLE();  // Optimization disabled in SetUpGenerators().
+  return NULL;
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_ClassOf) {
+  SealHandleScope shs(isolate);
+  DCHECK(args.length() == 1);
+  CONVERT_ARG_CHECKED(Object, obj, 0);
+  if (!obj->IsJSReceiver()) return isolate->heap()->null_value();
+  return JSReceiver::cast(obj)->class_name();
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_StringCharCodeAt) {
+  SealHandleScope shs(isolate);
+  DCHECK(args.length() == 2);
+  if (!args[0]->IsString()) return isolate->heap()->undefined_value();
+  if (!args[1]->IsNumber()) return isolate->heap()->undefined_value();
+  if (std::isinf(args.number_at(1))) return isolate->heap()->nan_value();
+  return __RT_impl_Runtime_StringCharCodeAtRT(args, isolate);
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_StringAdd) {
+  SealHandleScope shs(isolate);
+  return __RT_impl_Runtime_StringAdd(args, isolate);
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_SubString) {
+  SealHandleScope shs(isolate);
+  return __RT_impl_Runtime_SubString(args, isolate);
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_StringCompare) {
+  SealHandleScope shs(isolate);
+  return __RT_impl_Runtime_StringCompare(args, isolate);
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_RegExpExec) {
+  SealHandleScope shs(isolate);
+  return __RT_impl_Runtime_RegExpExecRT(args, isolate);
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_RegExpConstructResult) {
+  SealHandleScope shs(isolate);
+  return __RT_impl_Runtime_RegExpConstructResult(args, isolate);
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_GetFromCache) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 2);
+  CONVERT_SMI_ARG_CHECKED(id, 0);
+  args[0] = isolate->native_context()->jsfunction_result_caches()->get(id);
+  return __RT_impl_Runtime_GetFromCache(args, isolate);
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_NumberToString) {
+  SealHandleScope shs(isolate);
+  return __RT_impl_Runtime_NumberToStringRT(args, isolate);
+}
+
+
+RUNTIME_FUNCTION(RuntimeReference_DebugIsActive) {
+  SealHandleScope shs(isolate);
+  return Smi::FromInt(isolate->debug()->is_active());
+}
+
+
 // ----------------------------------------------------------------------------
 // Implementation of Runtime
 
-#define F(name, number_of_args, result_size)                             \
-  { Runtime::k##name, Runtime::RUNTIME, #name,   \
-    FUNCTION_ADDR(Runtime_##name), number_of_args, result_size },
+#define F(name, number_of_args, result_size)                                  \
+  {                                                                           \
+    Runtime::k##name, Runtime::RUNTIME, #name, FUNCTION_ADDR(Runtime_##name), \
+        number_of_args, result_size                                           \
+  }                                                                           \
+  ,
 
 
-#define FH(name, number_of_args, result_size)                             \
-  { Runtime::kHidden##name, Runtime::RUNTIME_HIDDEN, NULL,   \
-    FUNCTION_ADDR(RuntimeHidden_##name), number_of_args, result_size },
+#define I(name, number_of_args, result_size)                                \
+  {                                                                         \
+    Runtime::kInline##name, Runtime::INLINE, "_" #name,                     \
+        FUNCTION_ADDR(RuntimeReference_##name), number_of_args, result_size \
+  }                                                                         \
+  ,
 
 
-#define I(name, number_of_args, result_size)                             \
-  { Runtime::kInline##name, Runtime::INLINE,     \
-    "_" #name, NULL, number_of_args, result_size },
-
-
-#define IO(name, number_of_args, result_size) \
-  { Runtime::kInlineOptimized##name, Runtime::INLINE_OPTIMIZED, \
-    "_" #name, FUNCTION_ADDR(Runtime_##name), number_of_args, result_size },
+#define IO(name, number_of_args, result_size)                              \
+  {                                                                        \
+    Runtime::kInlineOptimized##name, Runtime::INLINE_OPTIMIZED, "_" #name, \
+        FUNCTION_ADDR(Runtime_##name), number_of_args, result_size         \
+  }                                                                        \
+  ,
 
 
 static const Runtime::Function kIntrinsicFunctions[] = {
   RUNTIME_FUNCTION_LIST(F)
   INLINE_OPTIMIZED_FUNCTION_LIST(F)
-  RUNTIME_HIDDEN_FUNCTION_LIST(FH)
   INLINE_FUNCTION_LIST(I)
   INLINE_OPTIMIZED_FUNCTION_LIST(IO)
 };
 
 #undef IO
 #undef I
-#undef FH
 #undef F
 
 
 void Runtime::InitializeIntrinsicFunctionNames(Isolate* isolate,
                                                Handle<NameDictionary> dict) {
-  ASSERT(dict->NumberOfElements() == 0);
+  DCHECK(dict->NumberOfElements() == 0);
   HandleScope scope(isolate);
   for (int i = 0; i < kNumFunctions; ++i) {
     const char* name = kIntrinsicFunctions[i].name;
@@ -15223,6 +15754,16 @@
 }
 
 
+const Runtime::Function* Runtime::FunctionForEntry(Address entry) {
+  for (size_t i = 0; i < arraysize(kIntrinsicFunctions); ++i) {
+    if (entry == kIntrinsicFunctions[i].entry) {
+      return &(kIntrinsicFunctions[i]);
+    }
+  }
+  return NULL;
+}
+
+
 const Runtime::Function* Runtime::FunctionForId(Runtime::FunctionId id) {
   return &(kIntrinsicFunctions[static_cast<int>(id)]);
 }
diff --git a/src/runtime.h b/src/runtime.h
index d6ed830..e63cd90 100644
--- a/src/runtime.h
+++ b/src/runtime.h
@@ -21,380 +21,497 @@
 // WARNING: RUNTIME_FUNCTION_LIST_ALWAYS_* is a very large macro that caused
 // MSVC Intellisense to crash.  It was broken into two macros to work around
 // this problem. Please avoid large recursive macros whenever possible.
-#define RUNTIME_FUNCTION_LIST_ALWAYS_1(F) \
-  /* Property access */ \
-  F(GetProperty, 2, 1) \
-  F(KeyedGetProperty, 2, 1) \
-  F(DeleteProperty, 3, 1) \
-  F(HasOwnProperty, 2, 1) \
-  F(HasProperty, 2, 1) \
-  F(HasElement, 2, 1) \
-  F(IsPropertyEnumerable, 2, 1) \
-  F(GetPropertyNames, 1, 1) \
-  F(GetPropertyNamesFast, 1, 1) \
-  F(GetOwnPropertyNames, 2, 1) \
-  F(GetOwnElementNames, 1, 1) \
-  F(GetInterceptorInfo, 1, 1) \
-  F(GetNamedInterceptorPropertyNames, 1, 1) \
-  F(GetIndexedInterceptorElementNames, 1, 1) \
-  F(GetArgumentsProperty, 1, 1) \
-  F(ToFastProperties, 1, 1) \
-  F(FinishArrayPrototypeSetup, 1, 1) \
-  F(SpecialArrayFunctions, 0, 1) \
-  F(IsSloppyModeFunction, 1, 1) \
-  F(GetDefaultReceiver, 1, 1) \
-  \
-  F(GetPrototype, 1, 1) \
-  F(SetPrototype, 2, 1) \
-  F(IsInPrototypeChain, 2, 1) \
-  \
-  F(GetOwnProperty, 2, 1) \
-  \
-  F(IsExtensible, 1, 1) \
-  F(PreventExtensions, 1, 1)\
-  \
-  /* Utilities */ \
-  F(CheckIsBootstrapping, 0, 1) \
-  F(GetRootNaN, 0, 1) \
-  F(Call, -1 /* >= 2 */, 1) \
-  F(Apply, 5, 1) \
-  F(GetFunctionDelegate, 1, 1) \
-  F(GetConstructorDelegate, 1, 1) \
-  F(DeoptimizeFunction, 1, 1) \
-  F(ClearFunctionTypeFeedback, 1, 1) \
-  F(RunningInSimulator, 0, 1) \
-  F(IsConcurrentRecompilationSupported, 0, 1) \
-  F(OptimizeFunctionOnNextCall, -1, 1) \
-  F(NeverOptimizeFunction, 1, 1) \
-  F(GetOptimizationStatus, -1, 1) \
-  F(GetOptimizationCount, 1, 1) \
-  F(UnblockConcurrentRecompilation, 0, 1) \
-  F(CompileForOnStackReplacement, 1, 1) \
-  F(SetAllocationTimeout, -1 /* 2 || 3 */, 1) \
-  F(SetNativeFlag, 1, 1) \
-  F(SetInlineBuiltinFlag, 1, 1) \
-  F(StoreArrayLiteralElement, 5, 1) \
-  F(DebugPrepareStepInIfStepping, 1, 1) \
-  F(DebugPromiseHandlePrologue, 1, 1) \
-  F(DebugPromiseHandleEpilogue, 0, 1) \
-  F(FlattenString, 1, 1) \
-  F(LoadMutableDouble, 2, 1) \
-  F(TryMigrateInstance, 1, 1) \
-  F(NotifyContextDisposed, 0, 1) \
-  \
-  /* Array join support */ \
-  F(PushIfAbsent, 2, 1) \
-  F(ArrayConcat, 1, 1) \
-  \
-  /* Conversions */ \
-  F(ToBool, 1, 1) \
-  F(Typeof, 1, 1) \
-  \
-  F(StringToNumber, 1, 1) \
-  F(StringParseInt, 2, 1) \
-  F(StringParseFloat, 1, 1) \
-  F(StringToLowerCase, 1, 1) \
-  F(StringToUpperCase, 1, 1) \
-  F(StringSplit, 3, 1) \
-  F(CharFromCode, 1, 1) \
-  F(URIEscape, 1, 1) \
-  F(URIUnescape, 1, 1) \
-  \
-  F(NumberToInteger, 1, 1) \
-  F(NumberToIntegerMapMinusZero, 1, 1) \
-  F(NumberToJSUint32, 1, 1) \
-  F(NumberToJSInt32, 1, 1) \
-  \
-  /* Arithmetic operations */ \
-  F(NumberAdd, 2, 1) \
-  F(NumberSub, 2, 1) \
-  F(NumberMul, 2, 1) \
-  F(NumberDiv, 2, 1) \
-  F(NumberMod, 2, 1) \
-  F(NumberUnaryMinus, 1, 1) \
-  F(NumberImul, 2, 1) \
-  \
-  F(StringBuilderConcat, 3, 1) \
-  F(StringBuilderJoin, 3, 1) \
-  F(SparseJoinWithSeparator, 3, 1) \
-  \
-  /* Bit operations */ \
-  F(NumberOr, 2, 1) \
-  F(NumberAnd, 2, 1) \
-  F(NumberXor, 2, 1) \
-  \
-  F(NumberShl, 2, 1) \
-  F(NumberShr, 2, 1) \
-  F(NumberSar, 2, 1) \
-  \
-  /* Comparisons */ \
-  F(NumberEquals, 2, 1) \
-  F(StringEquals, 2, 1) \
-  \
-  F(NumberCompare, 3, 1) \
-  F(SmiLexicographicCompare, 2, 1) \
-  \
-  /* Math */ \
-  F(MathAcos, 1, 1) \
-  F(MathAsin, 1, 1) \
-  F(MathAtan, 1, 1) \
-  F(MathFloorRT, 1, 1) \
-  F(MathAtan2, 2, 1) \
-  F(MathExpRT, 1, 1) \
-  F(RoundNumber, 1, 1) \
-  F(MathFround, 1, 1) \
-  \
-  /* Regular expressions */ \
-  F(RegExpCompile, 3, 1) \
-  F(RegExpExecMultiple, 4, 1) \
-  F(RegExpInitializeObject, 5, 1) \
-  \
-  /* JSON */ \
-  F(ParseJson, 1, 1) \
-  F(BasicJSONStringify, 1, 1) \
-  F(QuoteJSONString, 1, 1) \
-  \
-  /* Strings */ \
-  F(StringIndexOf, 3, 1) \
-  F(StringLastIndexOf, 3, 1) \
-  F(StringLocaleCompare, 2, 1) \
-  F(StringReplaceGlobalRegExpWithString, 4, 1) \
-  F(StringReplaceOneCharWithString, 3, 1) \
-  F(StringMatch, 3, 1) \
-  F(StringTrim, 3, 1) \
-  F(StringToArray, 2, 1) \
-  F(NewStringWrapper, 1, 1) \
-  F(NewString, 2, 1) \
-  F(TruncateString, 2, 1) \
-  \
-  /* Numbers */ \
-  F(NumberToRadixString, 2, 1) \
-  F(NumberToFixed, 2, 1) \
-  F(NumberToExponential, 2, 1) \
-  F(NumberToPrecision, 2, 1) \
-  F(IsValidSmi, 1, 1)
+#define RUNTIME_FUNCTION_LIST_ALWAYS_1(F)                  \
+  /* Property access */                                    \
+  F(GetProperty, 2, 1)                                     \
+  F(KeyedGetProperty, 2, 1)                                \
+  F(DeleteProperty, 3, 1)                                  \
+  F(HasOwnProperty, 2, 1)                                  \
+  F(HasProperty, 2, 1)                                     \
+  F(HasElement, 2, 1)                                      \
+  F(IsPropertyEnumerable, 2, 1)                            \
+  F(GetPropertyNames, 1, 1)                                \
+  F(GetPropertyNamesFast, 1, 1)                            \
+  F(GetOwnPropertyNames, 2, 1)                             \
+  F(GetOwnElementNames, 1, 1)                              \
+  F(GetInterceptorInfo, 1, 1)                              \
+  F(GetNamedInterceptorPropertyNames, 1, 1)                \
+  F(GetIndexedInterceptorElementNames, 1, 1)               \
+  F(GetArgumentsProperty, 1, 1)                            \
+  F(ToFastProperties, 1, 1)                                \
+  F(FinishArrayPrototypeSetup, 1, 1)                       \
+  F(SpecialArrayFunctions, 0, 1)                           \
+  F(IsSloppyModeFunction, 1, 1)                            \
+  F(GetDefaultReceiver, 1, 1)                              \
+                                                           \
+  F(GetPrototype, 1, 1)                                    \
+  F(SetPrototype, 2, 1)                                    \
+  F(InternalSetPrototype, 2, 1)                            \
+  F(IsInPrototypeChain, 2, 1)                              \
+                                                           \
+  F(GetOwnProperty, 2, 1)                                  \
+                                                           \
+  F(IsExtensible, 1, 1)                                    \
+  F(PreventExtensions, 1, 1)                               \
+                                                           \
+  /* Utilities */                                          \
+  F(CheckIsBootstrapping, 0, 1)                            \
+  F(GetRootNaN, 0, 1)                                      \
+  F(Call, -1 /* >= 2 */, 1)                                \
+  F(Apply, 5, 1)                                           \
+  F(GetFunctionDelegate, 1, 1)                             \
+  F(GetConstructorDelegate, 1, 1)                          \
+  F(DeoptimizeFunction, 1, 1)                              \
+  F(ClearFunctionTypeFeedback, 1, 1)                       \
+  F(RunningInSimulator, 0, 1)                              \
+  F(IsConcurrentRecompilationSupported, 0, 1)              \
+  F(OptimizeFunctionOnNextCall, -1, 1)                     \
+  F(NeverOptimizeFunction, 1, 1)                           \
+  F(GetOptimizationStatus, -1, 1)                          \
+  F(GetOptimizationCount, 1, 1)                            \
+  F(UnblockConcurrentRecompilation, 0, 1)                  \
+  F(CompileForOnStackReplacement, 1, 1)                    \
+  F(SetAllocationTimeout, -1 /* 2 || 3 */, 1)              \
+  F(SetNativeFlag, 1, 1)                                   \
+  F(SetInlineBuiltinFlag, 1, 1)                            \
+  F(StoreArrayLiteralElement, 5, 1)                        \
+  F(DebugPrepareStepInIfStepping, 1, 1)                    \
+  F(DebugPushPromise, 1, 1)                                \
+  F(DebugPopPromise, 0, 1)                                 \
+  F(DebugPromiseEvent, 1, 1)                               \
+  F(DebugPromiseRejectEvent, 2, 1)                         \
+  F(DebugAsyncTaskEvent, 1, 1)                             \
+  F(FlattenString, 1, 1)                                   \
+  F(LoadMutableDouble, 2, 1)                               \
+  F(TryMigrateInstance, 1, 1)                              \
+  F(NotifyContextDisposed, 0, 1)                           \
+                                                           \
+  /* Array join support */                                 \
+  F(PushIfAbsent, 2, 1)                                    \
+  F(ArrayConcat, 1, 1)                                     \
+                                                           \
+  /* Conversions */                                        \
+  F(ToBool, 1, 1)                                          \
+  F(Typeof, 1, 1)                                          \
+                                                           \
+  F(Booleanize, 2, 1) /* TODO(turbofan): Only temporary */ \
+                                                           \
+  F(StringToNumber, 1, 1)                                  \
+  F(StringParseInt, 2, 1)                                  \
+  F(StringParseFloat, 1, 1)                                \
+  F(StringToLowerCase, 1, 1)                               \
+  F(StringToUpperCase, 1, 1)                               \
+  F(StringSplit, 3, 1)                                     \
+  F(CharFromCode, 1, 1)                                    \
+  F(URIEscape, 1, 1)                                       \
+  F(URIUnescape, 1, 1)                                     \
+                                                           \
+  F(NumberToInteger, 1, 1)                                 \
+  F(NumberToIntegerMapMinusZero, 1, 1)                     \
+  F(NumberToJSUint32, 1, 1)                                \
+  F(NumberToJSInt32, 1, 1)                                 \
+                                                           \
+  /* Arithmetic operations */                              \
+  F(NumberAdd, 2, 1)                                       \
+  F(NumberSub, 2, 1)                                       \
+  F(NumberMul, 2, 1)                                       \
+  F(NumberDiv, 2, 1)                                       \
+  F(NumberMod, 2, 1)                                       \
+  F(NumberUnaryMinus, 1, 1)                                \
+  F(NumberImul, 2, 1)                                      \
+                                                           \
+  F(StringBuilderConcat, 3, 1)                             \
+  F(StringBuilderJoin, 3, 1)                               \
+  F(SparseJoinWithSeparator, 3, 1)                         \
+                                                           \
+  /* Bit operations */                                     \
+  F(NumberOr, 2, 1)                                        \
+  F(NumberAnd, 2, 1)                                       \
+  F(NumberXor, 2, 1)                                       \
+                                                           \
+  F(NumberShl, 2, 1)                                       \
+  F(NumberShr, 2, 1)                                       \
+  F(NumberSar, 2, 1)                                       \
+                                                           \
+  /* Comparisons */                                        \
+  F(NumberEquals, 2, 1)                                    \
+  F(StringEquals, 2, 1)                                    \
+                                                           \
+  F(NumberCompare, 3, 1)                                   \
+  F(SmiLexicographicCompare, 2, 1)                         \
+                                                           \
+  /* Math */                                               \
+  F(MathAcos, 1, 1)                                        \
+  F(MathAsin, 1, 1)                                        \
+  F(MathAtan, 1, 1)                                        \
+  F(MathFloorRT, 1, 1)                                     \
+  F(MathAtan2, 2, 1)                                       \
+  F(MathExpRT, 1, 1)                                       \
+  F(RoundNumber, 1, 1)                                     \
+  F(MathFround, 1, 1)                                      \
+  F(RemPiO2, 1, 1)                                         \
+                                                           \
+  /* Regular expressions */                                \
+  F(RegExpCompile, 3, 1)                                   \
+  F(RegExpExecMultiple, 4, 1)                              \
+  F(RegExpInitializeObject, 6, 1)                          \
+                                                           \
+  /* JSON */                                               \
+  F(ParseJson, 1, 1)                                       \
+  F(BasicJSONStringify, 1, 1)                              \
+  F(QuoteJSONString, 1, 1)                                 \
+                                                           \
+  /* Strings */                                            \
+  F(StringIndexOf, 3, 1)                                   \
+  F(StringLastIndexOf, 3, 1)                               \
+  F(StringLocaleCompare, 2, 1)                             \
+  F(StringReplaceGlobalRegExpWithString, 4, 1)             \
+  F(StringReplaceOneCharWithString, 3, 1)                  \
+  F(StringMatch, 3, 1)                                     \
+  F(StringTrim, 3, 1)                                      \
+  F(StringToArray, 2, 1)                                   \
+  F(NewStringWrapper, 1, 1)                                \
+  F(NewString, 2, 1)                                       \
+  F(TruncateString, 2, 1)                                  \
+                                                           \
+  /* Numbers */                                            \
+  F(NumberToRadixString, 2, 1)                             \
+  F(NumberToFixed, 2, 1)                                   \
+  F(NumberToExponential, 2, 1)                             \
+  F(NumberToPrecision, 2, 1)                               \
+  F(IsValidSmi, 1, 1)                                      \
+                                                           \
+  /* Classes support */                                    \
+  F(ToMethod, 2, 1)                                        \
+  F(HomeObjectSymbol, 0, 1)                                \
+  F(ThrowNonMethodError, 0, 1)                             \
+  F(ThrowUnsupportedSuperError, 0, 1)                      \
+  F(LoadFromSuper, 3, 1)
 
 
-#define RUNTIME_FUNCTION_LIST_ALWAYS_2(F) \
-  /* Reflection */ \
-  F(FunctionSetInstanceClassName, 2, 1) \
-  F(FunctionSetLength, 2, 1) \
-  F(FunctionSetPrototype, 2, 1) \
-  F(FunctionGetName, 1, 1) \
-  F(FunctionSetName, 2, 1) \
-  F(FunctionNameShouldPrintAsAnonymous, 1, 1) \
-  F(FunctionMarkNameShouldPrintAsAnonymous, 1, 1) \
-  F(FunctionIsGenerator, 1, 1) \
-  F(FunctionBindArguments, 4, 1) \
-  F(BoundFunctionGetBindings, 1, 1) \
-  F(FunctionRemovePrototype, 1, 1) \
-  F(FunctionGetSourceCode, 1, 1) \
-  F(FunctionGetScript, 1, 1) \
-  F(FunctionGetScriptSourcePosition, 1, 1) \
-  F(FunctionGetPositionForOffset, 2, 1) \
-  F(FunctionIsAPIFunction, 1, 1) \
-  F(FunctionIsBuiltin, 1, 1) \
-  F(GetScript, 1, 1) \
-  F(CollectStackTrace, 3, 1) \
-  F(GetAndClearOverflowedStackTrace, 1, 1) \
-  F(GetV8Version, 0, 1) \
-  \
-  F(SetCode, 2, 1) \
-  \
-  F(CreateApiFunction, 2, 1) \
-  F(IsTemplate, 1, 1) \
-  F(GetTemplateField, 2, 1) \
-  F(DisableAccessChecks, 1, 1) \
-  F(EnableAccessChecks, 1, 1) \
-  F(SetAccessorProperty, 6, 1) \
-  \
-  /* Dates */ \
-  F(DateCurrentTime, 0, 1) \
-  F(DateParseString, 2, 1) \
-  F(DateLocalTimezone, 1, 1) \
-  F(DateToUTC, 1, 1) \
-  F(DateMakeDay, 2, 1) \
-  F(DateSetValue, 3, 1) \
-  F(DateCacheVersion, 0, 1) \
-  \
-  /* Globals */ \
-  F(CompileString, 2, 1) \
-  \
-  /* Eval */ \
-  F(GlobalReceiver, 1, 1) \
-  F(IsAttachedGlobal, 1, 1) \
-  \
-  F(SetProperty, -1 /* 4 or 5 */, 1) \
-  F(DefineOrRedefineDataProperty, 4, 1) \
-  F(DefineOrRedefineAccessorProperty, 5, 1) \
-  F(IgnoreAttributesAndSetProperty, -1 /* 3 or 4 */, 1) \
-  F(GetDataProperty, 2, 1) \
-  F(SetHiddenProperty, 3, 1) \
-  \
-  /* Arrays */ \
-  F(RemoveArrayHoles, 2, 1) \
-  F(GetArrayKeys, 2, 1) \
-  F(MoveArrayContents, 2, 1) \
-  F(EstimateNumberOfElements, 1, 1) \
-  \
-  /* Getters and Setters */ \
-  F(LookupAccessor, 3, 1) \
-  \
-  /* ES5 */ \
-  F(ObjectFreeze, 1, 1) \
-  \
-  /* Harmony modules */ \
-  F(IsJSModule, 1, 1) \
-  \
-  /* Harmony symbols */ \
-  F(CreateSymbol, 1, 1) \
-  F(CreatePrivateSymbol, 1, 1) \
-  F(CreateGlobalPrivateSymbol, 1, 1) \
-  F(NewSymbolWrapper, 1, 1) \
-  F(SymbolDescription, 1, 1) \
-  F(SymbolRegistry, 0, 1) \
-  F(SymbolIsPrivate, 1, 1) \
-  \
-  /* Harmony proxies */ \
-  F(CreateJSProxy, 2, 1) \
-  F(CreateJSFunctionProxy, 4, 1) \
-  F(IsJSProxy, 1, 1) \
-  F(IsJSFunctionProxy, 1, 1) \
-  F(GetHandler, 1, 1) \
-  F(GetCallTrap, 1, 1) \
-  F(GetConstructTrap, 1, 1) \
-  F(Fix, 1, 1) \
-  \
-  /* Harmony sets */ \
-  F(SetInitialize, 1, 1) \
-  F(SetAdd, 2, 1) \
-  F(SetHas, 2, 1) \
-  F(SetDelete, 2, 1) \
-  F(SetClear, 1, 1) \
-  F(SetGetSize, 1, 1) \
-  \
-  F(SetIteratorInitialize, 3, 1) \
-  F(SetIteratorNext, 1, 1) \
-  \
-  /* Harmony maps */ \
-  F(MapInitialize, 1, 1) \
-  F(MapGet, 2, 1) \
-  F(MapHas, 2, 1) \
-  F(MapDelete, 2, 1) \
-  F(MapClear, 1, 1) \
-  F(MapSet, 3, 1) \
-  F(MapGetSize, 1, 1) \
-  \
-  F(MapIteratorInitialize, 3, 1) \
-  F(MapIteratorNext, 1, 1) \
-  \
-  /* Harmony weak maps and sets */ \
-  F(WeakCollectionInitialize, 1, 1) \
-  F(WeakCollectionGet, 2, 1) \
-  F(WeakCollectionHas, 2, 1) \
-  F(WeakCollectionDelete, 2, 1) \
-  F(WeakCollectionSet, 3, 1) \
-  \
-  /* Harmony events */ \
-  F(EnqueueMicrotask, 1, 1) \
-  F(RunMicrotasks, 0, 1) \
-  \
-  /* Harmony observe */ \
-  F(IsObserved, 1, 1) \
-  F(SetIsObserved, 1, 1) \
-  F(GetObservationState, 0, 1) \
-  F(ObservationWeakMapCreate, 0, 1) \
-  F(ObserverObjectAndRecordHaveSameOrigin, 3, 1) \
-  F(ObjectWasCreatedInCurrentOrigin, 1, 1) \
-  F(GetObjectContextObjectObserve, 1, 1) \
-  F(GetObjectContextObjectGetNotifier, 1, 1) \
-  F(GetObjectContextNotifierPerformChange, 1, 1) \
-  \
-  /* Harmony typed arrays */ \
-  F(ArrayBufferInitialize, 2, 1)\
-  F(ArrayBufferSliceImpl, 3, 1) \
-  F(ArrayBufferIsView, 1, 1) \
-  F(ArrayBufferNeuter, 1, 1) \
-  \
-  F(TypedArrayInitializeFromArrayLike, 4, 1) \
-  F(TypedArrayGetBuffer, 1, 1) \
-  F(TypedArraySetFastCases, 3, 1) \
-  \
-  F(DataViewGetBuffer, 1, 1) \
-  F(DataViewGetInt8, 3, 1) \
-  F(DataViewGetUint8, 3, 1) \
-  F(DataViewGetInt16, 3, 1) \
-  F(DataViewGetUint16, 3, 1) \
-  F(DataViewGetInt32, 3, 1) \
-  F(DataViewGetUint32, 3, 1) \
-  F(DataViewGetFloat32, 3, 1) \
-  F(DataViewGetFloat64, 3, 1) \
-  \
-  F(DataViewSetInt8, 4, 1) \
-  F(DataViewSetUint8, 4, 1) \
-  F(DataViewSetInt16, 4, 1) \
-  F(DataViewSetUint16, 4, 1) \
-  F(DataViewSetInt32, 4, 1) \
-  F(DataViewSetUint32, 4, 1) \
-  F(DataViewSetFloat32, 4, 1) \
-  F(DataViewSetFloat64, 4, 1) \
-  \
-  /* Statements */ \
-  F(NewObjectFromBound, 1, 1) \
-  \
-  /* Declarations and initialization */ \
-  F(InitializeVarGlobal, -1 /* 2 or 3 */, 1) \
-  F(OptimizeObjectForAddingMultipleProperties, 2, 1) \
-  \
-  /* Debugging */ \
-  F(DebugPrint, 1, 1) \
-  F(GlobalPrint, 1, 1) \
-  F(DebugTrace, 0, 1) \
-  F(TraceEnter, 0, 1) \
-  F(TraceExit, 1, 1) \
-  F(Abort, 1, 1) \
-  F(AbortJS, 1, 1) \
-  /* ES5 */ \
-  F(OwnKeys, 1, 1) \
-  \
-  /* Message objects */ \
-  F(MessageGetStartPosition, 1, 1) \
-  F(MessageGetScript, 1, 1) \
-  \
-  /* Pseudo functions - handled as macros by parser */ \
-  F(IS_VAR, 1, 1) \
-  \
-  /* expose boolean functions from objects-inl.h */ \
-  F(HasFastSmiElements, 1, 1) \
-  F(HasFastSmiOrObjectElements, 1, 1) \
-  F(HasFastObjectElements, 1, 1) \
-  F(HasFastDoubleElements, 1, 1) \
-  F(HasFastHoleyElements, 1, 1) \
-  F(HasDictionaryElements, 1, 1) \
-  F(HasSloppyArgumentsElements, 1, 1) \
-  F(HasExternalUint8ClampedElements, 1, 1) \
-  F(HasExternalArrayElements, 1, 1) \
-  F(HasExternalInt8Elements, 1, 1) \
-  F(HasExternalUint8Elements, 1, 1) \
-  F(HasExternalInt16Elements, 1, 1) \
-  F(HasExternalUint16Elements, 1, 1) \
-  F(HasExternalInt32Elements, 1, 1) \
-  F(HasExternalUint32Elements, 1, 1) \
-  F(HasExternalFloat32Elements, 1, 1) \
-  F(HasExternalFloat64Elements, 1, 1) \
-  F(HasFixedUint8ClampedElements, 1, 1) \
-  F(HasFixedInt8Elements, 1, 1) \
-  F(HasFixedUint8Elements, 1, 1) \
-  F(HasFixedInt16Elements, 1, 1) \
-  F(HasFixedUint16Elements, 1, 1) \
-  F(HasFixedInt32Elements, 1, 1) \
-  F(HasFixedUint32Elements, 1, 1) \
-  F(HasFixedFloat32Elements, 1, 1) \
-  F(HasFixedFloat64Elements, 1, 1) \
-  F(HasFastProperties, 1, 1) \
-  F(TransitionElementsKind, 2, 1) \
-  F(HaveSameMap, 2, 1) \
-  F(IsJSGlobalProxy, 1, 1)
+#define RUNTIME_FUNCTION_LIST_ALWAYS_2(F)                             \
+  /* Reflection */                                                    \
+  F(FunctionSetInstanceClassName, 2, 1)                               \
+  F(FunctionSetLength, 2, 1)                                          \
+  F(FunctionSetPrototype, 2, 1)                                       \
+  F(FunctionGetName, 1, 1)                                            \
+  F(FunctionSetName, 2, 1)                                            \
+  F(FunctionNameShouldPrintAsAnonymous, 1, 1)                         \
+  F(FunctionMarkNameShouldPrintAsAnonymous, 1, 1)                     \
+  F(FunctionIsGenerator, 1, 1)                                        \
+  F(FunctionIsArrow, 1, 1)                                            \
+  F(FunctionIsConciseMethod, 1, 1)                                    \
+  F(FunctionBindArguments, 4, 1)                                      \
+  F(BoundFunctionGetBindings, 1, 1)                                   \
+  F(FunctionRemovePrototype, 1, 1)                                    \
+  F(FunctionGetSourceCode, 1, 1)                                      \
+  F(FunctionGetScript, 1, 1)                                          \
+  F(FunctionGetScriptSourcePosition, 1, 1)                            \
+  F(FunctionGetPositionForOffset, 2, 1)                               \
+  F(FunctionIsAPIFunction, 1, 1)                                      \
+  F(FunctionIsBuiltin, 1, 1)                                          \
+  F(GetScript, 1, 1)                                                  \
+  F(CollectStackTrace, 2, 1)                                          \
+  F(GetV8Version, 0, 1)                                               \
+  F(GeneratorGetFunction, 1, 1)                                       \
+  F(GeneratorGetContext, 1, 1)                                        \
+  F(GeneratorGetReceiver, 1, 1)                                       \
+  F(GeneratorGetContinuation, 1, 1)                                   \
+  F(GeneratorGetSourcePosition, 1, 1)                                 \
+                                                                      \
+  F(SetCode, 2, 1)                                                    \
+                                                                      \
+  F(CreateApiFunction, 2, 1)                                          \
+  F(IsTemplate, 1, 1)                                                 \
+  F(GetTemplateField, 2, 1)                                           \
+  F(DisableAccessChecks, 1, 1)                                        \
+  F(EnableAccessChecks, 1, 1)                                         \
+                                                                      \
+  /* Dates */                                                         \
+  F(DateCurrentTime, 0, 1)                                            \
+  F(DateParseString, 2, 1)                                            \
+  F(DateLocalTimezone, 1, 1)                                          \
+  F(DateToUTC, 1, 1)                                                  \
+  F(DateMakeDay, 2, 1)                                                \
+  F(DateSetValue, 3, 1)                                               \
+  F(DateCacheVersion, 0, 1)                                           \
+                                                                      \
+  /* Globals */                                                       \
+  F(CompileString, 2, 1)                                              \
+                                                                      \
+  /* Eval */                                                          \
+  F(GlobalProxy, 1, 1)                                                \
+  F(IsAttachedGlobal, 1, 1)                                           \
+                                                                      \
+  F(AddNamedProperty, 4, 1)                                           \
+  F(AddPropertyForTemplate, 4, 1)                                     \
+  F(SetProperty, 4, 1)                                                \
+  F(AddElement, 4, 1)                                                 \
+  F(DefineApiAccessorProperty, 5, 1)                                  \
+  F(DefineDataPropertyUnchecked, 4, 1)                                \
+  F(DefineAccessorPropertyUnchecked, 5, 1)                            \
+  F(GetDataProperty, 2, 1)                                            \
+  F(SetHiddenProperty, 3, 1)                                          \
+                                                                      \
+  /* Arrays */                                                        \
+  F(RemoveArrayHoles, 2, 1)                                           \
+  F(GetArrayKeys, 2, 1)                                               \
+  F(MoveArrayContents, 2, 1)                                          \
+  F(EstimateNumberOfElements, 1, 1)                                   \
+  F(NormalizeElements, 1, 1)                                          \
+                                                                      \
+  /* Getters and Setters */                                           \
+  F(LookupAccessor, 3, 1)                                             \
+                                                                      \
+  /* ES5 */                                                           \
+  F(ObjectFreeze, 1, 1)                                               \
+                                                                      \
+  /* Harmony modules */                                               \
+  F(IsJSModule, 1, 1)                                                 \
+                                                                      \
+  /* Harmony symbols */                                               \
+  F(CreateSymbol, 1, 1)                                               \
+  F(CreatePrivateSymbol, 1, 1)                                        \
+  F(CreateGlobalPrivateOwnSymbol, 1, 1)                               \
+  F(CreatePrivateOwnSymbol, 1, 1)                                     \
+  F(NewSymbolWrapper, 1, 1)                                           \
+  F(SymbolDescription, 1, 1)                                          \
+  F(SymbolRegistry, 0, 1)                                             \
+  F(SymbolIsPrivate, 1, 1)                                            \
+                                                                      \
+  /* Harmony proxies */                                               \
+  F(CreateJSProxy, 2, 1)                                              \
+  F(CreateJSFunctionProxy, 4, 1)                                      \
+  F(IsJSProxy, 1, 1)                                                  \
+  F(IsJSFunctionProxy, 1, 1)                                          \
+  F(GetHandler, 1, 1)                                                 \
+  F(GetCallTrap, 1, 1)                                                \
+  F(GetConstructTrap, 1, 1)                                           \
+  F(Fix, 1, 1)                                                        \
+                                                                      \
+  /* Harmony sets */                                                  \
+  F(SetInitialize, 1, 1)                                              \
+  F(SetAdd, 2, 1)                                                     \
+  F(SetHas, 2, 1)                                                     \
+  F(SetDelete, 2, 1)                                                  \
+  F(SetClear, 1, 1)                                                   \
+  F(SetGetSize, 1, 1)                                                 \
+                                                                      \
+  F(SetIteratorInitialize, 3, 1)                                      \
+  F(SetIteratorNext, 2, 1)                                            \
+                                                                      \
+  /* Harmony maps */                                                  \
+  F(MapInitialize, 1, 1)                                              \
+  F(MapGet, 2, 1)                                                     \
+  F(MapHas, 2, 1)                                                     \
+  F(MapDelete, 2, 1)                                                  \
+  F(MapClear, 1, 1)                                                   \
+  F(MapSet, 3, 1)                                                     \
+  F(MapGetSize, 1, 1)                                                 \
+                                                                      \
+  F(MapIteratorInitialize, 3, 1)                                      \
+  F(MapIteratorNext, 2, 1)                                            \
+                                                                      \
+  /* Harmony weak maps and sets */                                    \
+  F(WeakCollectionInitialize, 1, 1)                                   \
+  F(WeakCollectionGet, 2, 1)                                          \
+  F(WeakCollectionHas, 2, 1)                                          \
+  F(WeakCollectionDelete, 2, 1)                                       \
+  F(WeakCollectionSet, 3, 1)                                          \
+                                                                      \
+  F(GetWeakMapEntries, 1, 1)                                          \
+  F(GetWeakSetValues, 1, 1)                                           \
+                                                                      \
+  /* Harmony events */                                                \
+  F(EnqueueMicrotask, 1, 1)                                           \
+  F(RunMicrotasks, 0, 1)                                              \
+                                                                      \
+  /* Harmony observe */                                               \
+  F(IsObserved, 1, 1)                                                 \
+  F(SetIsObserved, 1, 1)                                              \
+  F(GetObservationState, 0, 1)                                        \
+  F(ObservationWeakMapCreate, 0, 1)                                   \
+  F(ObserverObjectAndRecordHaveSameOrigin, 3, 1)                      \
+  F(ObjectWasCreatedInCurrentOrigin, 1, 1)                            \
+  F(GetObjectContextObjectObserve, 1, 1)                              \
+  F(GetObjectContextObjectGetNotifier, 1, 1)                          \
+  F(GetObjectContextNotifierPerformChange, 1, 1)                      \
+                                                                      \
+  /* Harmony typed arrays */                                          \
+  F(ArrayBufferInitialize, 2, 1)                                      \
+  F(ArrayBufferSliceImpl, 3, 1)                                       \
+  F(ArrayBufferIsView, 1, 1)                                          \
+  F(ArrayBufferNeuter, 1, 1)                                          \
+                                                                      \
+  F(TypedArrayInitializeFromArrayLike, 4, 1)                          \
+  F(TypedArrayGetBuffer, 1, 1)                                        \
+  F(TypedArraySetFastCases, 3, 1)                                     \
+                                                                      \
+  F(DataViewGetBuffer, 1, 1)                                          \
+  F(DataViewGetInt8, 3, 1)                                            \
+  F(DataViewGetUint8, 3, 1)                                           \
+  F(DataViewGetInt16, 3, 1)                                           \
+  F(DataViewGetUint16, 3, 1)                                          \
+  F(DataViewGetInt32, 3, 1)                                           \
+  F(DataViewGetUint32, 3, 1)                                          \
+  F(DataViewGetFloat32, 3, 1)                                         \
+  F(DataViewGetFloat64, 3, 1)                                         \
+                                                                      \
+  F(DataViewSetInt8, 4, 1)                                            \
+  F(DataViewSetUint8, 4, 1)                                           \
+  F(DataViewSetInt16, 4, 1)                                           \
+  F(DataViewSetUint16, 4, 1)                                          \
+  F(DataViewSetInt32, 4, 1)                                           \
+  F(DataViewSetUint32, 4, 1)                                          \
+  F(DataViewSetFloat32, 4, 1)                                         \
+  F(DataViewSetFloat64, 4, 1)                                         \
+                                                                      \
+  /* Statements */                                                    \
+  F(NewObjectFromBound, 1, 1)                                         \
+                                                                      \
+  /* Declarations and initialization */                               \
+  F(InitializeVarGlobal, 3, 1)                                        \
+  F(OptimizeObjectForAddingMultipleProperties, 2, 1)                  \
+                                                                      \
+  /* Debugging */                                                     \
+  F(DebugPrint, 1, 1)                                                 \
+  F(GlobalPrint, 1, 1)                                                \
+  F(DebugTrace, 0, 1)                                                 \
+  F(TraceEnter, 0, 1)                                                 \
+  F(TraceExit, 1, 1)                                                  \
+  F(Abort, 1, 1)                                                      \
+  F(AbortJS, 1, 1)                                                    \
+  /* ES5 */                                                           \
+  F(OwnKeys, 1, 1)                                                    \
+                                                                      \
+  /* Message objects */                                               \
+  F(MessageGetStartPosition, 1, 1)                                    \
+  F(MessageGetScript, 1, 1)                                           \
+                                                                      \
+  /* Pseudo functions - handled as macros by parser */                \
+  F(IS_VAR, 1, 1)                                                     \
+                                                                      \
+  /* expose boolean functions from objects-inl.h */                   \
+  F(HasFastSmiElements, 1, 1)                                         \
+  F(HasFastSmiOrObjectElements, 1, 1)                                 \
+  F(HasFastObjectElements, 1, 1)                                      \
+  F(HasFastDoubleElements, 1, 1)                                      \
+  F(HasFastHoleyElements, 1, 1)                                       \
+  F(HasDictionaryElements, 1, 1)                                      \
+  F(HasSloppyArgumentsElements, 1, 1)                                 \
+  F(HasExternalUint8ClampedElements, 1, 1)                            \
+  F(HasExternalArrayElements, 1, 1)                                   \
+  F(HasExternalInt8Elements, 1, 1)                                    \
+  F(HasExternalUint8Elements, 1, 1)                                   \
+  F(HasExternalInt16Elements, 1, 1)                                   \
+  F(HasExternalUint16Elements, 1, 1)                                  \
+  F(HasExternalInt32Elements, 1, 1)                                   \
+  F(HasExternalUint32Elements, 1, 1)                                  \
+  F(HasExternalFloat32Elements, 1, 1)                                 \
+  F(HasExternalFloat64Elements, 1, 1)                                 \
+  F(HasFixedUint8ClampedElements, 1, 1)                               \
+  F(HasFixedInt8Elements, 1, 1)                                       \
+  F(HasFixedUint8Elements, 1, 1)                                      \
+  F(HasFixedInt16Elements, 1, 1)                                      \
+  F(HasFixedUint16Elements, 1, 1)                                     \
+  F(HasFixedInt32Elements, 1, 1)                                      \
+  F(HasFixedUint32Elements, 1, 1)                                     \
+  F(HasFixedFloat32Elements, 1, 1)                                    \
+  F(HasFixedFloat64Elements, 1, 1)                                    \
+  F(HasFastProperties, 1, 1)                                          \
+  F(TransitionElementsKind, 2, 1)                                     \
+  F(HaveSameMap, 2, 1)                                                \
+  F(IsJSGlobalProxy, 1, 1)                                            \
+  F(ForInInit, 2, 2)             /* TODO(turbofan): Only temporary */ \
+  F(ForInNext, 4, 2)             /* TODO(turbofan): Only temporary */ \
+  F(ForInCacheArrayLength, 2, 1) /* TODO(turbofan): Only temporary */
+
+
+#define RUNTIME_FUNCTION_LIST_ALWAYS_3(F)                    \
+  /* String and Regexp */                                    \
+  F(NumberToStringRT, 1, 1)                                  \
+  F(RegExpConstructResult, 3, 1)                             \
+  F(RegExpExecRT, 4, 1)                                      \
+  F(StringAdd, 2, 1)                                         \
+  F(SubString, 3, 1)                                         \
+  F(InternalizeString, 1, 1)                                 \
+  F(StringCompare, 2, 1)                                     \
+  F(StringCharCodeAtRT, 2, 1)                                \
+  F(GetFromCache, 2, 1)                                      \
+                                                             \
+  /* Compilation */                                          \
+  F(CompileLazy, 1, 1)                                       \
+  F(CompileOptimized, 2, 1)                                  \
+  F(TryInstallOptimizedCode, 1, 1)                           \
+  F(NotifyDeoptimized, 1, 1)                                 \
+  F(NotifyStubFailure, 0, 1)                                 \
+                                                             \
+  /* Utilities */                                            \
+  F(AllocateInNewSpace, 1, 1)                                \
+  F(AllocateInTargetSpace, 2, 1)                             \
+  F(AllocateHeapNumber, 0, 1)                                \
+  F(NumberToSmi, 1, 1)                                       \
+  F(NumberToStringSkipCache, 1, 1)                           \
+                                                             \
+  F(NewArguments, 1, 1) /* TODO(turbofan): Only temporary */ \
+  F(NewSloppyArguments, 3, 1)                                \
+  F(NewStrictArguments, 3, 1)                                \
+                                                             \
+  /* Harmony generators */                                   \
+  F(CreateJSGeneratorObject, 0, 1)                           \
+  F(SuspendJSGeneratorObject, 1, 1)                          \
+  F(ResumeJSGeneratorObject, 3, 1)                           \
+  F(ThrowGeneratorStateError, 1, 1)                          \
+                                                             \
+  /* Arrays */                                               \
+  F(ArrayConstructor, -1, 1)                                 \
+  F(InternalArrayConstructor, -1, 1)                         \
+                                                             \
+  /* Literals */                                             \
+  F(MaterializeRegExpLiteral, 4, 1)                          \
+  F(CreateObjectLiteral, 4, 1)                               \
+  F(CreateArrayLiteral, 4, 1)                                \
+  F(CreateArrayLiteralStubBailout, 3, 1)                     \
+                                                             \
+  /* Statements */                                           \
+  F(NewClosure, 3, 1)                                        \
+  F(NewClosureFromStubFailure, 1, 1)                         \
+  F(NewObject, 1, 1)                                         \
+  F(NewObjectWithAllocationSite, 2, 1)                       \
+  F(FinalizeInstanceSize, 1, 1)                              \
+  F(Throw, 1, 1)                                             \
+  F(ReThrow, 1, 1)                                           \
+  F(ThrowReferenceError, 1, 1)                               \
+  F(ThrowNotDateError, 0, 1)                                 \
+  F(StackGuard, 0, 1)                                        \
+  F(Interrupt, 0, 1)                                         \
+  F(PromoteScheduledException, 0, 1)                         \
+                                                             \
+  /* Contexts */                                             \
+  F(NewGlobalContext, 2, 1)                                  \
+  F(NewFunctionContext, 1, 1)                                \
+  F(PushWithContext, 2, 1)                                   \
+  F(PushCatchContext, 3, 1)                                  \
+  F(PushBlockContext, 2, 1)                                  \
+  F(PushModuleContext, 2, 1)                                 \
+  F(DeleteLookupSlot, 2, 1)                                  \
+  F(LoadLookupSlot, 2, 2)                                    \
+  F(LoadLookupSlotNoReferenceError, 2, 2)                    \
+  F(StoreLookupSlot, 4, 1)                                   \
+                                                             \
+  /* Declarations and initialization */                      \
+  F(DeclareGlobals, 3, 1)                                    \
+  F(DeclareModules, 1, 1)                                    \
+  F(DeclareLookupSlot, 4, 1)                                 \
+  F(InitializeConstGlobal, 2, 1)                             \
+  F(InitializeLegacyConstLookupSlot, 3, 1)                   \
+                                                             \
+  /* Eval */                                                 \
+  F(ResolvePossiblyDirectEval, 6, 2)                         \
+                                                             \
+  /* Maths */                                                \
+  F(MathPowSlow, 2, 1)                                       \
+  F(MathPowRT, 2, 1)
 
 
 #define RUNTIME_FUNCTION_LIST_DEBUGGER(F) \
@@ -438,6 +555,7 @@
   F(DebugConstructedBy, 2, 1) \
   F(DebugGetPrototype, 1, 1) \
   F(DebugSetScriptSource, 2, 1) \
+  F(DebugCallbackSupportsStepping, 1, 1) \
   F(SystemBreak, 0, 1) \
   F(DebugDisassembleFunction, 1, 1) \
   F(DebugDisassembleConstructor, 1, 1) \
@@ -521,140 +639,55 @@
 #define RUNTIME_FUNCTION_LIST(F) \
   RUNTIME_FUNCTION_LIST_ALWAYS_1(F) \
   RUNTIME_FUNCTION_LIST_ALWAYS_2(F) \
+  RUNTIME_FUNCTION_LIST_ALWAYS_3(F) \
   RUNTIME_FUNCTION_LIST_DEBUG(F) \
   RUNTIME_FUNCTION_LIST_DEBUGGER(F) \
   RUNTIME_FUNCTION_LIST_I18N_SUPPORT(F)
 
-// RUNTIME_HIDDEN_FUNCTION_LIST defines all runtime functions accessed
-// by id from code generator, but not via native call by name.
-// Entries have the form F(name, number of arguments, number of return values).
-#define RUNTIME_HIDDEN_FUNCTION_LIST(F) \
-  /* String and Regexp */ \
-  F(NumberToString, 1, 1) \
-  F(RegExpConstructResult, 3, 1) \
-  F(RegExpExec, 4, 1) \
-  F(StringAdd, 2, 1)  \
-  F(SubString, 3, 1) \
-  F(StringCompare, 2, 1) \
-  F(StringCharCodeAt, 2, 1) \
-  F(GetFromCache, 2, 1) \
-  \
-  /* Compilation */ \
-  F(CompileUnoptimized, 1, 1) \
-  F(CompileOptimized, 2, 1) \
-  F(TryInstallOptimizedCode, 1, 1) \
-  F(NotifyDeoptimized, 1, 1) \
-  F(NotifyStubFailure, 0, 1) \
-  \
-  /* Utilities */ \
-  F(AllocateInNewSpace, 1, 1) \
-  F(AllocateInTargetSpace, 2, 1) \
-  F(AllocateHeapNumber, 0, 1) \
-  F(NumberToSmi, 1, 1) \
-  F(NumberToStringSkipCache, 1, 1) \
-  \
-  F(NewSloppyArguments, 3, 1) \
-  F(NewStrictArguments, 3, 1) \
-  \
-  /* Harmony generators */ \
-  F(CreateJSGeneratorObject, 0, 1) \
-  F(SuspendJSGeneratorObject, 1, 1) \
-  F(ResumeJSGeneratorObject, 3, 1) \
-  F(ThrowGeneratorStateError, 1, 1) \
-  \
-  /* Arrays */ \
-  F(ArrayConstructor, -1, 1) \
-  F(InternalArrayConstructor, -1, 1) \
-  \
-  /* Literals */ \
-  F(MaterializeRegExpLiteral, 4, 1)\
-  F(CreateObjectLiteral, 4, 1) \
-  F(CreateArrayLiteral, 4, 1) \
-  F(CreateArrayLiteralStubBailout, 3, 1) \
-  \
-  /* Statements */ \
-  F(NewClosure, 3, 1) \
-  F(NewClosureFromStubFailure, 1, 1) \
-  F(NewObject, 1, 1) \
-  F(NewObjectWithAllocationSite, 2, 1) \
-  F(FinalizeInstanceSize, 1, 1) \
-  F(Throw, 1, 1) \
-  F(ReThrow, 1, 1) \
-  F(ThrowReferenceError, 1, 1) \
-  F(ThrowNotDateError, 0, 1) \
-  F(StackGuard, 0, 1) \
-  F(Interrupt, 0, 1) \
-  F(PromoteScheduledException, 0, 1) \
-  \
-  /* Contexts */ \
-  F(NewGlobalContext, 2, 1) \
-  F(NewFunctionContext, 1, 1) \
-  F(PushWithContext, 2, 1) \
-  F(PushCatchContext, 3, 1) \
-  F(PushBlockContext, 2, 1) \
-  F(PushModuleContext, 2, 1) \
-  F(DeleteContextSlot, 2, 1) \
-  F(LoadContextSlot, 2, 2) \
-  F(LoadContextSlotNoReferenceError, 2, 2) \
-  F(StoreContextSlot, 4, 1) \
-  \
-  /* Declarations and initialization */ \
-  F(DeclareGlobals, 3, 1) \
-  F(DeclareModules, 1, 1) \
-  F(DeclareContextSlot, 4, 1) \
-  F(InitializeConstGlobal, 2, 1) \
-  F(InitializeConstContextSlot, 3, 1) \
-  \
-  /* Eval */ \
-  F(ResolvePossiblyDirectEval, 5, 2) \
-  \
-  /* Maths */ \
-  F(MathPowSlow, 2, 1) \
-  F(MathPow, 2, 1)
-
 // ----------------------------------------------------------------------------
 // INLINE_FUNCTION_LIST defines all inlined functions accessed
 // with a native call of the form %_name from within JS code.
 // Entries have the form F(name, number of arguments, number of return values).
-#define INLINE_FUNCTION_LIST(F) \
-  F(IsSmi, 1, 1)                                                             \
-  F(IsNonNegativeSmi, 1, 1)                                                  \
-  F(IsArray, 1, 1)                                                           \
-  F(IsRegExp, 1, 1)                                                          \
-  F(IsConstructCall, 0, 1)                                                   \
-  F(CallFunction, -1 /* receiver + n args + function */, 1)                  \
-  F(ArgumentsLength, 0, 1)                                                   \
-  F(Arguments, 1, 1)                                                         \
-  F(ValueOf, 1, 1)                                                           \
-  F(SetValueOf, 2, 1)                                                        \
-  F(DateField, 2 /* date object, field index */, 1)                          \
-  F(StringCharFromCode, 1, 1)                                                \
-  F(StringCharAt, 2, 1)                                                      \
-  F(OneByteSeqStringSetChar, 3, 1)                                           \
-  F(TwoByteSeqStringSetChar, 3, 1)                                           \
-  F(ObjectEquals, 2, 1)                                                      \
-  F(IsObject, 1, 1)                                                          \
-  F(IsFunction, 1, 1)                                                        \
-  F(IsUndetectableObject, 1, 1)                                              \
-  F(IsSpecObject, 1, 1)                                                      \
-  F(IsStringWrapperSafeForDefaultValueOf, 1, 1)                              \
-  F(MathPow, 2, 1)                                                           \
-  F(IsMinusZero, 1, 1)                                                       \
-  F(HasCachedArrayIndex, 1, 1)                                               \
-  F(GetCachedArrayIndex, 1, 1)                                               \
-  F(FastAsciiArrayJoin, 2, 1)                                                \
-  F(GeneratorNext, 2, 1)                                                     \
-  F(GeneratorThrow, 2, 1)                                                    \
-  F(DebugBreakInOptimizedCode, 0, 1)                                         \
-  F(ClassOf, 1, 1)                                                           \
-  F(StringCharCodeAt, 2, 1)                                                  \
-  F(StringAdd, 2, 1)                                                         \
-  F(SubString, 3, 1)                                                         \
-  F(StringCompare, 2, 1)                                                     \
-  F(RegExpExec, 4, 1)                                                        \
-  F(RegExpConstructResult, 3, 1)                                             \
-  F(GetFromCache, 2, 1)                                                      \
-  F(NumberToString, 1, 1)
+#define INLINE_FUNCTION_LIST(F)                             \
+  F(IsSmi, 1, 1)                                            \
+  F(IsNonNegativeSmi, 1, 1)                                 \
+  F(IsArray, 1, 1)                                          \
+  F(IsRegExp, 1, 1)                                         \
+  F(IsConstructCall, 0, 1)                                  \
+  F(CallFunction, -1 /* receiver + n args + function */, 1) \
+  F(ArgumentsLength, 0, 1)                                  \
+  F(Arguments, 1, 1)                                        \
+  F(ValueOf, 1, 1)                                          \
+  F(SetValueOf, 2, 1)                                       \
+  F(DateField, 2 /* date object, field index */, 1)         \
+  F(StringCharFromCode, 1, 1)                               \
+  F(StringCharAt, 2, 1)                                     \
+  F(OneByteSeqStringSetChar, 3, 1)                          \
+  F(TwoByteSeqStringSetChar, 3, 1)                          \
+  F(ObjectEquals, 2, 1)                                     \
+  F(IsObject, 1, 1)                                         \
+  F(IsFunction, 1, 1)                                       \
+  F(IsUndetectableObject, 1, 1)                             \
+  F(IsSpecObject, 1, 1)                                     \
+  F(IsStringWrapperSafeForDefaultValueOf, 1, 1)             \
+  F(MathPow, 2, 1)                                          \
+  F(IsMinusZero, 1, 1)                                      \
+  F(HasCachedArrayIndex, 1, 1)                              \
+  F(GetCachedArrayIndex, 1, 1)                              \
+  F(FastOneByteArrayJoin, 2, 1)                             \
+  F(GeneratorNext, 2, 1)                                    \
+  F(GeneratorThrow, 2, 1)                                   \
+  F(DebugBreakInOptimizedCode, 0, 1)                        \
+  F(ClassOf, 1, 1)                                          \
+  F(StringCharCodeAt, 2, 1)                                 \
+  F(StringAdd, 2, 1)                                        \
+  F(SubString, 3, 1)                                        \
+  F(StringCompare, 2, 1)                                    \
+  F(RegExpExec, 4, 1)                                       \
+  F(RegExpConstructResult, 3, 1)                            \
+  F(GetFromCache, 2, 1)                                     \
+  F(NumberToString, 1, 1)                                   \
+  F(DebugIsActive, 0, 1)
 
 
 // ----------------------------------------------------------------------------
@@ -680,9 +713,7 @@
   F(DoubleHi, 1, 1)                                                          \
   F(DoubleLo, 1, 1)                                                          \
   F(MathSqrtRT, 1, 1)                                                        \
-  F(MathLogRT, 1, 1)                                                         \
-  /* Debugger */                                                             \
-  F(DebugCallbackSupportsStepping, 1, 1)
+  F(MathLogRT, 1, 1)
 
 
 //---------------------------------------------------------------------------
@@ -737,9 +768,6 @@
     RUNTIME_FUNCTION_LIST(F)
     INLINE_OPTIMIZED_FUNCTION_LIST(F)
 #undef F
-#define F(name, nargs, ressize) kHidden##name,
-    RUNTIME_HIDDEN_FUNCTION_LIST(F)
-#undef F
 #define F(name, nargs, ressize) kInline##name,
     INLINE_FUNCTION_LIST(F)
 #undef F
@@ -752,7 +780,6 @@
 
   enum IntrinsicType {
     RUNTIME,
-    RUNTIME_HIDDEN,
     INLINE,
     INLINE_OPTIMIZED
   };
@@ -787,6 +814,9 @@
   // Get the intrinsic function with the given FunctionId.
   static const Function* FunctionForId(FunctionId id);
 
+  // Get the intrinsic function with the given function entry address.
+  static const Function* FunctionForEntry(Address ref);
+
   // General-purpose helper functions for runtime system.
   static int StringMatch(Isolate* isolate,
                          Handle<String> sub,
@@ -806,20 +836,12 @@
       uint32_t index);
 
   MUST_USE_RESULT static MaybeHandle<Object> SetObjectProperty(
-      Isolate* isolate,
-      Handle<Object> object,
-      Handle<Object> key,
-      Handle<Object> value,
-      PropertyAttributes attr,
-      StrictMode strict_mode);
+      Isolate* isolate, Handle<Object> object, Handle<Object> key,
+      Handle<Object> value, StrictMode strict_mode);
 
-  MUST_USE_RESULT static MaybeHandle<Object> ForceSetObjectProperty(
-      Handle<JSObject> object,
-      Handle<Object> key,
-      Handle<Object> value,
-      PropertyAttributes attr,
-      JSReceiver::StoreFromKeyed store_from_keyed
-        = JSReceiver::MAY_BE_STORE_FROM_KEYED);
+  MUST_USE_RESULT static MaybeHandle<Object> DefineObjectProperty(
+      Handle<JSObject> object, Handle<Object> key, Handle<Object> value,
+      PropertyAttributes attr);
 
   MUST_USE_RESULT static MaybeHandle<Object> DeleteObjectProperty(
       Isolate* isolate,
diff --git a/src/runtime.js b/src/runtime.js
index 1dee2e0..4d15d20 100644
--- a/src/runtime.js
+++ b/src/runtime.js
@@ -36,6 +36,7 @@
       while (true) {
         if (IS_NUMBER(y)) return %NumberEquals(x, y);
         if (IS_NULL_OR_UNDEFINED(y)) return 1;  // not equal
+        if (IS_SYMBOL(y)) return 1;  // not equal
         if (!IS_SPEC_OBJECT(y)) {
           // String or boolean.
           return %NumberEquals(x, %ToNumber(y));
@@ -501,7 +502,7 @@
   }
   if (IS_BOOLEAN(x)) return x ? 1 : 0;
   if (IS_UNDEFINED(x)) return NAN;
-  if (IS_SYMBOL(x)) return NAN;
+  if (IS_SYMBOL(x)) throw MakeTypeError('symbol_to_number', []);
   return (IS_NULL(x)) ? 0 : ToNumber(%DefaultNumber(x));
 }
 
@@ -512,7 +513,7 @@
   }
   if (IS_BOOLEAN(x)) return x ? 1 : 0;
   if (IS_UNDEFINED(x)) return NAN;
-  if (IS_SYMBOL(x)) return NAN;
+  if (IS_SYMBOL(x)) throw MakeTypeError('symbol_to_number', []);
   return (IS_NULL(x)) ? 0 : ToNumber(%DefaultNumber(x));
 }
 
@@ -562,6 +563,14 @@
 }
 
 
+// ES6, draft 08-24-14, section 7.1.15
+function ToLength(arg) {
+  arg = ToInteger(arg);
+  if (arg < 0) return 0;
+  return arg < $Number.MAX_SAFE_INTEGER ? arg : $Number.MAX_SAFE_INTEGER;
+}
+
+
 // ECMA-262, section 9.6, page 34.
 function ToUint32(x) {
   if (%_IsSmi(x) && x >= 0) return x;
@@ -607,35 +616,37 @@
 
 // ECMA-262, section 8.6.2.6, page 28.
 function DefaultNumber(x) {
-  var valueOf = x.valueOf;
-  if (IS_SPEC_FUNCTION(valueOf)) {
-    var v = %_CallFunction(x, valueOf);
-    if (%IsPrimitive(v)) return v;
-  }
+  if (!IS_SYMBOL_WRAPPER(x)) {
+    var valueOf = x.valueOf;
+    if (IS_SPEC_FUNCTION(valueOf)) {
+      var v = %_CallFunction(x, valueOf);
+      if (%IsPrimitive(v)) return v;
+    }
 
-  var toString = x.toString;
-  if (IS_SPEC_FUNCTION(toString)) {
-    var s = %_CallFunction(x, toString);
-    if (%IsPrimitive(s)) return s;
+    var toString = x.toString;
+    if (IS_SPEC_FUNCTION(toString)) {
+      var s = %_CallFunction(x, toString);
+      if (%IsPrimitive(s)) return s;
+    }
   }
-
   throw %MakeTypeError('cannot_convert_to_primitive', []);
 }
 
 // ECMA-262, section 8.6.2.6, page 28.
 function DefaultString(x) {
-  var toString = x.toString;
-  if (IS_SPEC_FUNCTION(toString)) {
-    var s = %_CallFunction(x, toString);
-    if (%IsPrimitive(s)) return s;
-  }
+  if (!IS_SYMBOL_WRAPPER(x)) {
+    var toString = x.toString;
+    if (IS_SPEC_FUNCTION(toString)) {
+      var s = %_CallFunction(x, toString);
+      if (%IsPrimitive(s)) return s;
+    }
 
-  var valueOf = x.valueOf;
-  if (IS_SPEC_FUNCTION(valueOf)) {
-    var v = %_CallFunction(x, valueOf);
-    if (%IsPrimitive(v)) return v;
+    var valueOf = x.valueOf;
+    if (IS_SPEC_FUNCTION(valueOf)) {
+      var v = %_CallFunction(x, valueOf);
+      if (%IsPrimitive(v)) return v;
+    }
   }
-
   throw %MakeTypeError('cannot_convert_to_primitive', []);
 }
 
diff --git a/src/safepoint-table.cc b/src/safepoint-table.cc
index e041e17..89500e2 100644
--- a/src/safepoint-table.cc
+++ b/src/safepoint-table.cc
@@ -9,6 +9,7 @@
 #include "src/deoptimizer.h"
 #include "src/disasm.h"
 #include "src/macro-assembler.h"
+#include "src/ostreams.h"
 #include "src/zone-inl.h"
 
 namespace v8 {
@@ -16,8 +17,8 @@
 
 
 bool SafepointEntry::HasRegisters() const {
-  ASSERT(is_valid());
-  ASSERT(IsAligned(kNumSafepointRegisters, kBitsPerByte));
+  DCHECK(is_valid());
+  DCHECK(IsAligned(kNumSafepointRegisters, kBitsPerByte));
   const int num_reg_bytes = kNumSafepointRegisters >> kBitsPerByteLog2;
   for (int i = 0; i < num_reg_bytes; i++) {
     if (bits_[i] != SafepointTable::kNoRegisters) return true;
@@ -27,8 +28,8 @@
 
 
 bool SafepointEntry::HasRegisterAt(int reg_index) const {
-  ASSERT(is_valid());
-  ASSERT(reg_index >= 0 && reg_index < kNumSafepointRegisters);
+  DCHECK(is_valid());
+  DCHECK(reg_index >= 0 && reg_index < kNumSafepointRegisters);
   int byte_index = reg_index >> kBitsPerByteLog2;
   int bit_index = reg_index & (kBitsPerByte - 1);
   return (bits_[byte_index] & (1 << bit_index)) != 0;
@@ -36,15 +37,15 @@
 
 
 SafepointTable::SafepointTable(Code* code) {
-  ASSERT(code->is_crankshafted());
+  DCHECK(code->is_crankshafted());
   code_ = code;
   Address header = code->instruction_start() + code->safepoint_table_offset();
   length_ = Memory::uint32_at(header + kLengthOffset);
   entry_size_ = Memory::uint32_at(header + kEntrySizeOffset);
   pc_and_deoptimization_indexes_ = header + kHeaderSize;
   entries_ = pc_and_deoptimization_indexes_ +
-            (length_ * kPcAndDeoptimizationIndexSize);
-  ASSERT(entry_size_ > 0);
+             (length_ * kPcAndDeoptimizationIndexSize);
+  DCHECK(entry_size_ > 0);
   STATIC_ASSERT(SafepointEntry::DeoptimizationIndexField::kMax ==
                 Safepoint::kNoDeoptimizationIndex);
 }
@@ -60,35 +61,36 @@
 }
 
 
-void SafepointTable::PrintEntry(unsigned index, FILE* out) const {
+void SafepointTable::PrintEntry(unsigned index, OStream& os) const {  // NOLINT
   disasm::NameConverter converter;
   SafepointEntry entry = GetEntry(index);
   uint8_t* bits = entry.bits();
 
   // Print the stack slot bits.
   if (entry_size_ > 0) {
-    ASSERT(IsAligned(kNumSafepointRegisters, kBitsPerByte));
+    DCHECK(IsAligned(kNumSafepointRegisters, kBitsPerByte));
     const int first = kNumSafepointRegisters >> kBitsPerByteLog2;
     int last = entry_size_ - 1;
-    for (int i = first; i < last; i++) PrintBits(out, bits[i], kBitsPerByte);
+    for (int i = first; i < last; i++) PrintBits(os, bits[i], kBitsPerByte);
     int last_bits = code_->stack_slots() - ((last - first) * kBitsPerByte);
-    PrintBits(out, bits[last], last_bits);
+    PrintBits(os, bits[last], last_bits);
 
     // Print the registers (if any).
     if (!entry.HasRegisters()) return;
     for (int j = 0; j < kNumSafepointRegisters; j++) {
       if (entry.HasRegisterAt(j)) {
-        PrintF(out, " | %s", converter.NameOfCPURegister(j));
+        os << " | " << converter.NameOfCPURegister(j);
       }
     }
   }
 }
 
 
-void SafepointTable::PrintBits(FILE* out, uint8_t byte, int digits) {
-  ASSERT(digits >= 0 && digits <= kBitsPerByte);
+void SafepointTable::PrintBits(OStream& os,  // NOLINT
+                               uint8_t byte, int digits) {
+  DCHECK(digits >= 0 && digits <= kBitsPerByte);
   for (int i = 0; i < digits; i++) {
-    PrintF(out, "%c", ((byte & (1 << i)) == 0) ? '0' : '1');
+    os << (((byte & (1 << i)) == 0) ? "0" : "1");
   }
 }
 
@@ -103,7 +105,7 @@
     Safepoint::Kind kind,
     int arguments,
     Safepoint::DeoptMode deopt_mode) {
-  ASSERT(arguments >= 0);
+  DCHECK(arguments >= 0);
   DeoptimizationInfo info;
   info.pc = assembler->pc_offset();
   info.arguments = arguments;
@@ -129,7 +131,7 @@
 }
 
 unsigned SafepointTableBuilder::GetCodeOffset() const {
-  ASSERT(emitted_);
+  DCHECK(emitted_);
   return offset_;
 }
 
@@ -168,7 +170,7 @@
     bits.AddBlock(0, bytes_per_entry, zone_);
 
     // Run through the registers (if any).
-    ASSERT(IsAligned(kNumSafepointRegisters, kBitsPerByte));
+    DCHECK(IsAligned(kNumSafepointRegisters, kBitsPerByte));
     if (registers == NULL) {
       const int num_reg_bytes = kNumSafepointRegisters >> kBitsPerByteLog2;
       for (int j = 0; j < num_reg_bytes; j++) {
@@ -177,7 +179,7 @@
     } else {
       for (int j = 0; j < registers->length(); j++) {
         int index = registers->at(j);
-        ASSERT(index >= 0 && index < kNumSafepointRegisters);
+        DCHECK(index >= 0 && index < kNumSafepointRegisters);
         int byte_index = index >> kBitsPerByteLog2;
         int bit_index = index & (kBitsPerByte - 1);
         bits[byte_index] |= (1 << bit_index);
diff --git a/src/safepoint-table.h b/src/safepoint-table.h
index 2fed5a7..5fbfe41 100644
--- a/src/safepoint-table.h
+++ b/src/safepoint-table.h
@@ -6,7 +6,7 @@
 #define V8_SAFEPOINT_TABLE_H_
 
 #include "src/allocation.h"
-#include "src/heap.h"
+#include "src/heap/heap.h"
 #include "src/v8memory.h"
 #include "src/zone.h"
 
@@ -20,7 +20,7 @@
   SafepointEntry() : info_(0), bits_(NULL) {}
 
   SafepointEntry(unsigned info, uint8_t* bits) : info_(info), bits_(bits) {
-    ASSERT(is_valid());
+    DCHECK(is_valid());
   }
 
   bool is_valid() const { return bits_ != NULL; }
@@ -35,7 +35,7 @@
   }
 
   int deoptimization_index() const {
-    ASSERT(is_valid());
+    DCHECK(is_valid());
     return DeoptimizationIndexField::decode(info_);
   }
 
@@ -55,17 +55,17 @@
                     kSaveDoublesFieldBits> { }; // NOLINT
 
   int argument_count() const {
-    ASSERT(is_valid());
+    DCHECK(is_valid());
     return ArgumentsField::decode(info_);
   }
 
   bool has_doubles() const {
-    ASSERT(is_valid());
+    DCHECK(is_valid());
     return SaveDoublesField::decode(info_);
   }
 
   uint8_t* bits() {
-    ASSERT(is_valid());
+    DCHECK(is_valid());
     return bits_;
   }
 
@@ -84,17 +84,18 @@
 
   int size() const {
     return kHeaderSize +
-           (length_ * (kPcAndDeoptimizationIndexSize + entry_size_)); }
+           (length_ * (kPcAndDeoptimizationIndexSize + entry_size_));
+  }
   unsigned length() const { return length_; }
   unsigned entry_size() const { return entry_size_; }
 
   unsigned GetPcOffset(unsigned index) const {
-    ASSERT(index < length_);
+    DCHECK(index < length_);
     return Memory::uint32_at(GetPcOffsetLocation(index));
   }
 
   SafepointEntry GetEntry(unsigned index) const {
-    ASSERT(index < length_);
+    DCHECK(index < length_);
     unsigned info = Memory::uint32_at(GetInfoLocation(index));
     uint8_t* bits = &Memory::uint8_at(entries_ + (index * entry_size_));
     return SafepointEntry(info, bits);
@@ -103,7 +104,7 @@
   // Returns the entry for the given pc.
   SafepointEntry FindEntry(Address pc) const;
 
-  void PrintEntry(unsigned index, FILE* out = stdout) const;
+  void PrintEntry(unsigned index, OStream& os) const;  // NOLINT
 
  private:
   static const uint8_t kNoRegisters = 0xFF;
@@ -126,7 +127,8 @@
     return GetPcOffsetLocation(index) + kPcSize;
   }
 
-  static void PrintBits(FILE* out, uint8_t byte, int digits);
+  static void PrintBits(OStream& os,  // NOLINT
+                        uint8_t byte, int digits);
 
   DisallowHeapAllocation no_allocation_;
   Code* code_;
@@ -164,8 +166,8 @@
   void DefinePointerRegister(Register reg, Zone* zone);
 
  private:
-  Safepoint(ZoneList<int>* indexes, ZoneList<int>* registers) :
-      indexes_(indexes), registers_(registers) { }
+  Safepoint(ZoneList<int>* indexes, ZoneList<int>* registers)
+      : indexes_(indexes), registers_(registers) {}
   ZoneList<int>* indexes_;
   ZoneList<int>* registers_;
 
diff --git a/src/sampler.cc b/src/sampler.cc
index dcb4be7..394efeb 100644
--- a/src/sampler.cc
+++ b/src/sampler.cc
@@ -13,16 +13,16 @@
 #include <signal.h>
 #include <sys/time.h>
 
-#if !V8_OS_QNX
-#include <sys/syscall.h>
+#if !V8_OS_QNX && !V8_OS_NACL
+#include <sys/syscall.h>  // NOLINT
 #endif
 
 #if V8_OS_MACOSX
 #include <mach/mach.h>
 // OpenBSD doesn't have <ucontext.h>. ucontext_t lives in <signal.h>
 // and is a typedef for struct sigcontext. There is no uc_mcontext.
-#elif(!V8_OS_ANDROID || defined(__BIONIC_HAVE_UCONTEXT_T)) \
-    && !V8_OS_OPENBSD
+#elif(!V8_OS_ANDROID || defined(__BIONIC_HAVE_UCONTEXT_T)) && \
+    !V8_OS_OPENBSD && !V8_OS_NACL
 #include <ucontext.h>
 #endif
 
@@ -33,7 +33,7 @@
 #if V8_OS_ANDROID && !defined(__BIONIC_HAVE_UCONTEXT_T) && \
     (defined(__arm__) || defined(__aarch64__)) && \
     !defined(__BIONIC_HAVE_STRUCT_SIGCONTEXT)
-#include <asm/sigcontext.h>
+#include <asm/sigcontext.h>  // NOLINT
 #endif
 
 #elif V8_OS_WIN || V8_OS_CYGWIN
@@ -44,11 +44,11 @@
 
 #include "src/v8.h"
 
+#include "src/base/platform/platform.h"
 #include "src/cpu-profiler-inl.h"
 #include "src/flags.h"
 #include "src/frames-inl.h"
 #include "src/log.h"
-#include "src/platform.h"
 #include "src/simulator.h"
 #include "src/v8threads.h"
 #include "src/vm-state-inl.h"
@@ -256,6 +256,12 @@
         Simulator::sp));
     state->fp = reinterpret_cast<Address>(simulator_->get_register(
         Simulator::fp));
+#elif V8_TARGET_ARCH_MIPS64
+    state->pc = reinterpret_cast<Address>(simulator_->get_pc());
+    state->sp = reinterpret_cast<Address>(simulator_->get_register(
+        Simulator::sp));
+    state->fp = reinterpret_cast<Address>(simulator_->get_register(
+        Simulator::fp));
 #endif
   }
 
@@ -269,16 +275,16 @@
 
 class SignalHandler : public AllStatic {
  public:
-  static void SetUp() { if (!mutex_) mutex_ = new Mutex(); }
-  static void TearDown() { delete mutex_; }
+  static void SetUp() { if (!mutex_) mutex_ = new base::Mutex(); }
+  static void TearDown() { delete mutex_; mutex_ = NULL; }
 
   static void IncreaseSamplerCount() {
-    LockGuard<Mutex> lock_guard(mutex_);
+    base::LockGuard<base::Mutex> lock_guard(mutex_);
     if (++client_count_ == 1) Install();
   }
 
   static void DecreaseSamplerCount() {
-    LockGuard<Mutex> lock_guard(mutex_);
+    base::LockGuard<base::Mutex> lock_guard(mutex_);
     if (--client_count_ == 0) Restore();
   }
 
@@ -288,6 +294,7 @@
 
  private:
   static void Install() {
+#if !V8_OS_NACL
     struct sigaction sa;
     sa.sa_sigaction = &HandleProfilerSignal;
     sigemptyset(&sa.sa_mask);
@@ -298,40 +305,42 @@
 #endif
     signal_handler_installed_ =
         (sigaction(SIGPROF, &sa, &old_signal_handler_) == 0);
+#endif
   }
 
   static void Restore() {
+#if !V8_OS_NACL
     if (signal_handler_installed_) {
       sigaction(SIGPROF, &old_signal_handler_, 0);
       signal_handler_installed_ = false;
     }
+#endif
   }
 
+#if !V8_OS_NACL
   static void HandleProfilerSignal(int signal, siginfo_t* info, void* context);
+#endif
   // Protects the process wide state below.
-  static Mutex* mutex_;
+  static base::Mutex* mutex_;
   static int client_count_;
   static bool signal_handler_installed_;
   static struct sigaction old_signal_handler_;
 };
 
 
-Mutex* SignalHandler::mutex_ = NULL;
+base::Mutex* SignalHandler::mutex_ = NULL;
 int SignalHandler::client_count_ = 0;
 struct sigaction SignalHandler::old_signal_handler_;
 bool SignalHandler::signal_handler_installed_ = false;
 
 
+// As Native Client does not support signal handling, profiling is disabled.
+#if !V8_OS_NACL
 void SignalHandler::HandleProfilerSignal(int signal, siginfo_t* info,
                                          void* context) {
-#if V8_OS_NACL
-  // As Native Client does not support signal handling, profiling
-  // is disabled.
-  return;
-#else
   USE(info);
   if (signal != SIGPROF) return;
-  Isolate* isolate = Isolate::UncheckedCurrent();
+  Isolate* isolate = Isolate::UnsafeCurrent();
   if (isolate == NULL || !isolate->IsInitialized() || !isolate->IsInUse()) {
     // We require a fully initialized and entered isolate.
     return;
@@ -393,6 +402,10 @@
   state.pc = reinterpret_cast<Address>(mcontext.pc);
   state.sp = reinterpret_cast<Address>(mcontext.gregs[29]);
   state.fp = reinterpret_cast<Address>(mcontext.gregs[30]);
+#elif V8_HOST_ARCH_MIPS64
+  state.pc = reinterpret_cast<Address>(mcontext.pc);
+  state.sp = reinterpret_cast<Address>(mcontext.gregs[29]);
+  state.fp = reinterpret_cast<Address>(mcontext.gregs[30]);
 #endif  // V8_HOST_ARCH_*
 #elif V8_OS_MACOSX
 #if V8_HOST_ARCH_X64
@@ -467,26 +480,26 @@
 #endif  // V8_OS_QNX
 #endif  // USE_SIMULATOR
   sampler->SampleStack(state);
-#endif  // V8_OS_NACL
 }
+#endif  // V8_OS_NACL
 
 #endif
 
 
-class SamplerThread : public Thread {
+class SamplerThread : public base::Thread {
  public:
   static const int kSamplerThreadStackSize = 64 * KB;
 
   explicit SamplerThread(int interval)
-      : Thread(Thread::Options("SamplerThread", kSamplerThreadStackSize)),
+      : Thread(base::Thread::Options("SamplerThread", kSamplerThreadStackSize)),
         interval_(interval) {}
 
-  static void SetUp() { if (!mutex_) mutex_ = new Mutex(); }
+  static void SetUp() { if (!mutex_) mutex_ = new base::Mutex(); }
   static void TearDown() { delete mutex_; mutex_ = NULL; }
 
   static void AddActiveSampler(Sampler* sampler) {
     bool need_to_start = false;
-    LockGuard<Mutex> lock_guard(mutex_);
+    base::LockGuard<base::Mutex> lock_guard(mutex_);
     if (instance_ == NULL) {
       // Start a thread that will send SIGPROF signal to VM threads,
       // when CPU profiling will be enabled.
@@ -494,9 +507,9 @@
       need_to_start = true;
     }
 
-    ASSERT(sampler->IsActive());
-    ASSERT(!instance_->active_samplers_.Contains(sampler));
-    ASSERT(instance_->interval_ == sampler->interval());
+    DCHECK(sampler->IsActive());
+    DCHECK(!instance_->active_samplers_.Contains(sampler));
+    DCHECK(instance_->interval_ == sampler->interval());
     instance_->active_samplers_.Add(sampler);
 
     if (need_to_start) instance_->StartSynchronously();
@@ -505,11 +518,11 @@
   static void RemoveActiveSampler(Sampler* sampler) {
     SamplerThread* instance_to_remove = NULL;
     {
-      LockGuard<Mutex> lock_guard(mutex_);
+      base::LockGuard<base::Mutex> lock_guard(mutex_);
 
-      ASSERT(sampler->IsActive());
+      DCHECK(sampler->IsActive());
       bool removed = instance_->active_samplers_.RemoveElement(sampler);
-      ASSERT(removed);
+      DCHECK(removed);
       USE(removed);
 
       // We cannot delete the instance immediately as we need to Join() the
@@ -529,7 +542,7 @@
   virtual void Run() {
     while (true) {
       {
-        LockGuard<Mutex> lock_guard(mutex_);
+        base::LockGuard<base::Mutex> lock_guard(mutex_);
         if (active_samplers_.is_empty()) break;
         // When CPU profiling is enabled both JavaScript and C++ code is
         // profiled. We must not suspend.
@@ -540,13 +553,13 @@
           sampler->DoSample();
         }
       }
-      OS::Sleep(interval_);
+      base::OS::Sleep(interval_);
     }
   }
 
  private:
   // Protects the process wide state below.
-  static Mutex* mutex_;
+  static base::Mutex* mutex_;
   static SamplerThread* instance_;
 
   const int interval_;
@@ -556,7 +569,7 @@
 };
 
 
-Mutex* SamplerThread::mutex_ = NULL;
+base::Mutex* SamplerThread::mutex_ = NULL;
 SamplerThread* SamplerThread::instance_ = NULL;
 
 
@@ -565,8 +578,8 @@
 //
 DISABLE_ASAN void TickSample::Init(Isolate* isolate,
                                    const RegisterState& regs) {
-  ASSERT(isolate->IsInitialized());
-  timestamp = TimeTicks::HighResolutionNow();
+  DCHECK(isolate->IsInitialized());
+  timestamp = base::TimeTicks::HighResolutionNow();
   pc = regs.pc;
   state = isolate->current_vm_state();
 
@@ -596,7 +609,7 @@
 
   SafeStackFrameIterator it(isolate, regs.fp, regs.sp, js_entry_sp);
   top_frame_type = it.top_frame_type();
-  int i = 0;
+  unsigned i = 0;
   while (!it.done() && i < TickSample::kMaxFramesCount) {
     stack[i++] = it.frame()->pc();
     it.Advance();
@@ -634,20 +647,20 @@
 
 
 Sampler::~Sampler() {
-  ASSERT(!IsActive());
+  DCHECK(!IsActive());
   delete data_;
 }
 
 
 void Sampler::Start() {
-  ASSERT(!IsActive());
+  DCHECK(!IsActive());
   SetActive(true);
   SamplerThread::AddActiveSampler(this);
 }
 
 
 void Sampler::Stop() {
-  ASSERT(IsActive());
+  DCHECK(IsActive());
   SamplerThread::RemoveActiveSampler(this);
   SetActive(false);
 }
diff --git a/src/sampler.h b/src/sampler.h
index fe94a02..c3dce4e 100644
--- a/src/sampler.h
+++ b/src/sampler.h
@@ -44,10 +44,11 @@
     Address tos;   // Top stack value (*sp).
     Address external_callback;
   };
-  static const int kMaxFramesCount = 64;
+  static const unsigned kMaxFramesCountLog2 = 8;
+  static const unsigned kMaxFramesCount = (1 << kMaxFramesCountLog2) - 1;
   Address stack[kMaxFramesCount];  // Call stack.
-  TimeTicks timestamp;
-  int frames_count : 8;  // Number of captured frames.
+  base::TimeTicks timestamp;
+  unsigned frames_count : kMaxFramesCountLog2;  // Number of captured frames.
   bool has_external_callback : 1;
   StackFrame::Type top_frame_type : 4;
 };
diff --git a/src/scanner-character-streams.cc b/src/scanner-character-streams.cc
index 23af45f..d06f479 100644
--- a/src/scanner-character-streams.cc
+++ b/src/scanner-character-streams.cc
@@ -6,12 +6,40 @@
 
 #include "src/scanner-character-streams.h"
 
+#include "include/v8.h"
 #include "src/handles.h"
 #include "src/unicode-inl.h"
 
 namespace v8 {
 namespace internal {
 
+namespace {
+
+unsigned CopyCharsHelper(uint16_t* dest, unsigned length, const uint8_t* src,
+                         unsigned* src_pos, unsigned src_length,
+                         ScriptCompiler::StreamedSource::Encoding encoding) {
+  if (encoding == ScriptCompiler::StreamedSource::UTF8) {
+    return v8::internal::Utf8ToUtf16CharacterStream::CopyChars(
+        dest, length, src, src_pos, src_length);
+  }
+
+  unsigned to_fill = length;
+  if (to_fill > src_length - *src_pos) to_fill = src_length - *src_pos;
+
+  if (encoding == ScriptCompiler::StreamedSource::ONE_BYTE) {
+    v8::internal::CopyChars<uint8_t, uint16_t>(dest, src + *src_pos, to_fill);
+  } else {
+    DCHECK(encoding == ScriptCompiler::StreamedSource::TWO_BYTE);
+    v8::internal::CopyChars<uint16_t, uint16_t>(
+        dest, reinterpret_cast<const uint16_t*>(src + *src_pos), to_fill);
+  }
+  *src_pos += to_fill;
+  return to_fill;
+}
+
+}  // namespace
+
+
 // ----------------------------------------------------------------------------
 // BufferedUtf16CharacterStreams
 
@@ -55,8 +83,8 @@
     buffer_cursor_ = buffer_end_;
   }
   // Ensure that there is room for at least one pushback.
-  ASSERT(buffer_cursor_ > buffer_);
-  ASSERT(pos_ > 0);
+  DCHECK(buffer_cursor_ > buffer_);
+  DCHECK(pos_ > 0);
   buffer_[--buffer_cursor_ - buffer_] = character;
   if (buffer_cursor_ == buffer_) {
     pushback_limit_ = NULL;
@@ -78,7 +106,7 @@
     if (buffer_cursor_ < buffer_end_) return true;
     // Otherwise read a new block.
   }
-  unsigned length = FillBuffer(pos_, kBufferSize);
+  unsigned length = FillBuffer(pos_);
   buffer_end_ = buffer_ + length;
   return length > 0;
 }
@@ -102,7 +130,7 @@
     unsigned end_position)
     : string_(data),
       length_(end_position) {
-  ASSERT(end_position >= start_position);
+  DCHECK(end_position >= start_position);
   pos_ = start_position;
 }
 
@@ -118,9 +146,9 @@
 }
 
 
-unsigned GenericStringUtf16CharacterStream::FillBuffer(unsigned from_pos,
-                                                      unsigned length) {
+unsigned GenericStringUtf16CharacterStream::FillBuffer(unsigned from_pos) {
   if (from_pos >= length_) return 0;
+  unsigned length = kBufferSize;
   if (from_pos + length > length_) {
     length = length_ - from_pos;
   }
@@ -145,6 +173,35 @@
 Utf8ToUtf16CharacterStream::~Utf8ToUtf16CharacterStream() { }
 
 
+unsigned Utf8ToUtf16CharacterStream::CopyChars(uint16_t* dest, unsigned length,
+                                               const byte* src,
+                                               unsigned* src_pos,
+                                               unsigned src_length) {
+  static const unibrow::uchar kMaxUtf16Character = 0xffff;
+  unsigned i = 0;
+  // Because of the UTF-16 lead and trail surrogates, we stop filling the buffer
+  // one character early (in the normal case), because we need to have at least
+  // two free spaces in the buffer to be sure that the next character will fit.
+  while (i < length - 1) {
+    if (*src_pos == src_length) break;
+    unibrow::uchar c = src[*src_pos];
+    if (c <= unibrow::Utf8::kMaxOneByteChar) {
+      *src_pos = *src_pos + 1;
+    } else {
+      c = unibrow::Utf8::CalculateValue(src + *src_pos, src_length - *src_pos,
+                                        src_pos);
+    }
+    if (c > kMaxUtf16Character) {
+      dest[i++] = unibrow::Utf16::LeadSurrogate(c);
+      dest[i++] = unibrow::Utf16::TrailSurrogate(c);
+    } else {
+      dest[i++] = static_cast<uc16>(c);
+    }
+  }
+  return i;
+}
+
+
 unsigned Utf8ToUtf16CharacterStream::BufferSeekForward(unsigned delta) {
   unsigned old_pos = pos_;
   unsigned target_pos = pos_ + delta;
@@ -155,33 +212,15 @@
 }
 
 
-unsigned Utf8ToUtf16CharacterStream::FillBuffer(unsigned char_position,
-                                                unsigned length) {
-  static const unibrow::uchar kMaxUtf16Character = 0xffff;
+unsigned Utf8ToUtf16CharacterStream::FillBuffer(unsigned char_position) {
   SetRawPosition(char_position);
   if (raw_character_position_ != char_position) {
     // char_position was not a valid position in the stream (hit the end
     // while spooling to it).
     return 0u;
   }
-  unsigned i = 0;
-  while (i < length - 1) {
-    if (raw_data_pos_ == raw_data_length_) break;
-    unibrow::uchar c = raw_data_[raw_data_pos_];
-    if (c <= unibrow::Utf8::kMaxOneByteChar) {
-      raw_data_pos_++;
-    } else {
-      c =  unibrow::Utf8::CalculateValue(raw_data_ + raw_data_pos_,
-                                         raw_data_length_ - raw_data_pos_,
-                                         &raw_data_pos_);
-    }
-    if (c > kMaxUtf16Character) {
-      buffer_[i++] = unibrow::Utf16::LeadSurrogate(c);
-      buffer_[i++] = unibrow::Utf16::TrailSurrogate(c);
-    } else {
-      buffer_[i++] = static_cast<uc16>(c);
-    }
-  }
+  unsigned i = CopyChars(buffer_, kBufferSize, raw_data_, &raw_data_pos_,
+                         raw_data_length_);
   raw_character_position_ = char_position + i;
   return i;
 }
@@ -209,12 +248,12 @@
 static inline void Utf8CharacterBack(const byte* buffer, unsigned* cursor) {
   byte character = buffer[--*cursor];
   if (character > unibrow::Utf8::kMaxOneByteChar) {
-    ASSERT(IsUtf8MultiCharacterFollower(character));
+    DCHECK(IsUtf8MultiCharacterFollower(character));
     // Last byte of a multi-byte character encoding. Step backwards until
     // pointing to the first byte of the encoding, recognized by having the
     // top two bits set.
     while (IsUtf8MultiCharacterFollower(buffer[--*cursor])) { }
-    ASSERT(IsUtf8MultiCharacterStart(buffer[*cursor]));
+    DCHECK(IsUtf8MultiCharacterStart(buffer[*cursor]));
   }
 }
 
@@ -230,7 +269,7 @@
     //  110..... - (0xCx, 0xDx) one additional byte (minimum).
     //  1110.... - (0xEx) two additional bytes.
     //  11110... - (0xFx) three additional bytes (maximum).
-    ASSERT(IsUtf8MultiCharacterStart(character));
+    DCHECK(IsUtf8MultiCharacterStart(character));
     // Additional bytes is:
     // 1 if value in range 0xC0 .. 0xDF.
     // 2 if value in range 0xE0 .. 0xEF.
@@ -239,7 +278,7 @@
     unsigned additional_bytes =
         ((0x3211u) >> (((character - 0xC0) >> 2) & 0xC)) & 0x03;
     *cursor += additional_bytes;
-    ASSERT(!IsUtf8MultiCharacterFollower(buffer[1 + additional_bytes]));
+    DCHECK(!IsUtf8MultiCharacterFollower(buffer[1 + additional_bytes]));
   }
 }
 
@@ -255,12 +294,12 @@
       int old_pos = raw_data_pos_;
       Utf8CharacterBack(raw_data_, &raw_data_pos_);
       raw_character_position_--;
-      ASSERT(old_pos - raw_data_pos_ <= 4);
+      DCHECK(old_pos - raw_data_pos_ <= 4);
       // Step back over both code units for surrogate pairs.
       if (old_pos - raw_data_pos_ == 4) raw_character_position_--;
     } while (raw_character_position_ > target_position);
     // No surrogate pair splitting.
-    ASSERT(raw_character_position_ == target_position);
+    DCHECK(raw_character_position_ == target_position);
     return;
   }
   // Spool forwards in the utf8 buffer.
@@ -269,11 +308,123 @@
     int old_pos = raw_data_pos_;
     Utf8CharacterForward(raw_data_, &raw_data_pos_);
     raw_character_position_++;
-    ASSERT(raw_data_pos_ - old_pos <= 4);
+    DCHECK(raw_data_pos_ - old_pos <= 4);
     if (raw_data_pos_ - old_pos == 4) raw_character_position_++;
   }
   // No surrogate pair splitting.
-  ASSERT(raw_character_position_ == target_position);
+  DCHECK(raw_character_position_ == target_position);
+}
+
+
+unsigned ExternalStreamingStream::FillBuffer(unsigned position) {
+  // Ignore "position" which is the position in the decoded data. Instead,
+  // ExternalStreamingStream keeps track of the position in the raw data.
+  unsigned data_in_buffer = 0;
+  // Note that the UTF-8 decoder might not be able to fill the buffer
+  // completely; it will typically leave the last character empty (see
+  // Utf8ToUtf16CharacterStream::CopyChars).
+  while (data_in_buffer < kBufferSize - 1) {
+    if (current_data_ == NULL) {
+      // GetSomeData will wait until the embedder has enough data. Here's an
+      // interface between the API which uses size_t (which is the correct type
+      // here) and the internal parts which use unsigned. TODO(marja): make the
+      // internal parts use size_t too.
+      current_data_length_ =
+          static_cast<unsigned>(source_stream_->GetMoreData(&current_data_));
+      current_data_offset_ = 0;
+      bool data_ends = current_data_length_ == 0;
+
+      // A caveat: a data chunk might end with bytes from an incomplete UTF-8
+      // character (the rest of the bytes will be in the next chunk).
+      if (encoding_ == ScriptCompiler::StreamedSource::UTF8) {
+        HandleUtf8SplitCharacters(&data_in_buffer);
+        if (!data_ends && current_data_offset_ == current_data_length_) {
+          // The data stream didn't end, but we used all the data in the
+          // chunk. This will only happen when the chunk was really small. We
+          // don't handle the case where a UTF-8 character is split over several
+          // chunks; in that case V8 won't crash, but it will be a parse error.
+          delete[] current_data_;
+          current_data_ = NULL;
+          current_data_length_ = 0;
+          current_data_offset_ = 0;
+          continue;  // Request a new chunk.
+        }
+      }
+
+      // Did the data stream end?
+      if (data_ends) {
+        DCHECK(utf8_split_char_buffer_length_ == 0);
+        return data_in_buffer;
+      }
+    }
+
+    // Fill the buffer from current_data_.
+    unsigned new_offset = 0;
+    unsigned new_chars_in_buffer =
+        CopyCharsHelper(buffer_ + data_in_buffer, kBufferSize - data_in_buffer,
+                        current_data_ + current_data_offset_, &new_offset,
+                        current_data_length_ - current_data_offset_, encoding_);
+    data_in_buffer += new_chars_in_buffer;
+    current_data_offset_ += new_offset;
+    DCHECK(data_in_buffer <= kBufferSize);
+
+    // Did we use all the data in the data chunk?
+    if (current_data_offset_ == current_data_length_) {
+      delete[] current_data_;
+      current_data_ = NULL;
+      current_data_length_ = 0;
+      current_data_offset_ = 0;
+    }
+  }
+  return data_in_buffer;
+}
+
+void ExternalStreamingStream::HandleUtf8SplitCharacters(
+    unsigned* data_in_buffer) {
+  // First check if we have leftover data from the last chunk.
+  unibrow::uchar c;
+  if (utf8_split_char_buffer_length_ > 0) {
+    // Move the bytes which are part of the split character (which started in
+    // the previous chunk) into utf8_split_char_buffer_.
+    while (current_data_offset_ < current_data_length_ &&
+           utf8_split_char_buffer_length_ < 4 &&
+           (c = current_data_[current_data_offset_]) >
+               unibrow::Utf8::kMaxOneByteChar) {
+      utf8_split_char_buffer_[utf8_split_char_buffer_length_] = c;
+      ++utf8_split_char_buffer_length_;
+      ++current_data_offset_;
+    }
+
+    // Convert the data in utf8_split_char_buffer_.
+    unsigned new_offset = 0;
+    unsigned new_chars_in_buffer =
+        CopyCharsHelper(buffer_ + *data_in_buffer,
+                        kBufferSize - *data_in_buffer, utf8_split_char_buffer_,
+                        &new_offset, utf8_split_char_buffer_length_, encoding_);
+    *data_in_buffer += new_chars_in_buffer;
+    // Make sure we used all the data.
+    DCHECK(new_offset == utf8_split_char_buffer_length_);
+    DCHECK(*data_in_buffer <= kBufferSize);
+
+    utf8_split_char_buffer_length_ = 0;
+  }
+
+  // Move bytes which are part of an incomplete character from the end of the
+  // current chunk to utf8_split_char_buffer_. They will be converted when the
+  // next data chunk arrives. Note that all valid UTF-8 characters are at most 4
+  // bytes long, but if the data is invalid, we can have character values bigger
+  // than unibrow::Utf8::kMaxOneByteChar for more than 4 consecutive bytes.
+  while (current_data_length_ > current_data_offset_ &&
+         (c = current_data_[current_data_length_ - 1]) >
+             unibrow::Utf8::kMaxOneByteChar &&
+         utf8_split_char_buffer_length_ < 4) {
+    --current_data_length_;
+    ++utf8_split_char_buffer_length_;
+  }
+  CHECK(utf8_split_char_buffer_length_ <= 4);
+  for (unsigned i = 0; i < utf8_split_char_buffer_length_; ++i) {
+    utf8_split_char_buffer_[i] = current_data_[current_data_length_ + i];
+  }
 }
 
 
diff --git a/src/scanner-character-streams.h b/src/scanner-character-streams.h
index a25eb58..afca13f 100644
--- a/src/scanner-character-streams.h
+++ b/src/scanner-character-streams.h
@@ -29,7 +29,7 @@
   virtual void SlowPushBack(uc16 character);
 
   virtual unsigned BufferSeekForward(unsigned delta) = 0;
-  virtual unsigned FillBuffer(unsigned position, unsigned length) = 0;
+  virtual unsigned FillBuffer(unsigned position) = 0;
 
   const uc16* pushback_limit_;
   uc16 buffer_[kBufferSize];
@@ -46,7 +46,7 @@
 
  protected:
   virtual unsigned BufferSeekForward(unsigned delta);
-  virtual unsigned FillBuffer(unsigned position, unsigned length);
+  virtual unsigned FillBuffer(unsigned position);
 
   Handle<String> string_;
   unsigned length_;
@@ -59,9 +59,12 @@
   Utf8ToUtf16CharacterStream(const byte* data, unsigned length);
   virtual ~Utf8ToUtf16CharacterStream();
 
+  static unsigned CopyChars(uint16_t* dest, unsigned length, const byte* src,
+                            unsigned* src_pos, unsigned src_length);
+
  protected:
   virtual unsigned BufferSeekForward(unsigned delta);
-  virtual unsigned FillBuffer(unsigned char_position, unsigned length);
+  virtual unsigned FillBuffer(unsigned char_position);
   void SetRawPosition(unsigned char_position);
 
   const byte* raw_data_;
@@ -73,6 +76,46 @@
 };
 
 
+// ExternalStreamingStream is a wrapper around an ExternalSourceStream (see
+// include/v8.h) subclass implemented by the embedder.
+class ExternalStreamingStream : public BufferedUtf16CharacterStream {
+ public:
+  ExternalStreamingStream(ScriptCompiler::ExternalSourceStream* source_stream,
+                          v8::ScriptCompiler::StreamedSource::Encoding encoding)
+      : source_stream_(source_stream),
+        encoding_(encoding),
+        current_data_(NULL),
+        current_data_offset_(0),
+        current_data_length_(0),
+        utf8_split_char_buffer_length_(0) {}
+
+  virtual ~ExternalStreamingStream() { delete[] current_data_; }
+
+  virtual unsigned BufferSeekForward(unsigned delta) OVERRIDE {
+    // We never need to seek forward when streaming scripts. We only seek
+    // forward when we want to parse a function whose location we already know,
+    // and when streaming, we don't know the locations of anything we haven't
+    // seen yet.
+    UNREACHABLE();
+    return 0;
+  }
+
+  virtual unsigned FillBuffer(unsigned position);
+
+ private:
+  void HandleUtf8SplitCharacters(unsigned* data_in_buffer);
+
+  ScriptCompiler::ExternalSourceStream* source_stream_;
+  v8::ScriptCompiler::StreamedSource::Encoding encoding_;
+  const uint8_t* current_data_;
+  unsigned current_data_offset_;
+  unsigned current_data_length_;
+  // For converting UTF-8 characters which are split across two data chunks.
+  uint8_t utf8_split_char_buffer_[4];
+  unsigned utf8_split_char_buffer_length_;
+};
+
+
 // UTF16 buffer to read characters from an external string.
 class ExternalTwoByteStringUtf16CharacterStream: public Utf16CharacterStream {
  public:
@@ -82,7 +125,7 @@
   virtual ~ExternalTwoByteStringUtf16CharacterStream();
 
   virtual void PushBack(uc32 character) {
-    ASSERT(buffer_cursor_ > raw_data_);
+    DCHECK(buffer_cursor_ > raw_data_);
     buffer_cursor_--;
     pos_--;
   }
diff --git a/src/scanner.cc b/src/scanner.cc
index 0265a8f..72874aa 100644
--- a/src/scanner.cc
+++ b/src/scanner.cc
@@ -6,18 +6,28 @@
 
 #include <cmath>
 
-#include "src/scanner.h"
+#include "src/v8.h"
 
 #include "include/v8stdint.h"
+#include "src/ast-value-factory.h"
 #include "src/char-predicates-inl.h"
 #include "src/conversions-inl.h"
 #include "src/list-inl.h"
-#include "src/v8.h"
 #include "src/parser.h"
+#include "src/scanner.h"
 
 namespace v8 {
 namespace internal {
 
+
+Handle<String> LiteralBuffer::Internalize(Isolate* isolate) const {
+  if (is_one_byte()) {
+    return isolate->factory()->InternalizeOneByteString(one_byte_literal());
+  }
+  return isolate->factory()->InternalizeTwoByteString(two_byte_literal());
+}
+
+
 // ----------------------------------------------------------------------------
 // Scanner
 
@@ -26,7 +36,8 @@
       octal_pos_(Location::invalid()),
       harmony_scoping_(false),
       harmony_modules_(false),
-      harmony_numeric_literals_(false) { }
+      harmony_numeric_literals_(false),
+      harmony_classes_(false) { }
 
 
 void Scanner::Initialize(Utf16CharacterStream* source) {
@@ -43,7 +54,7 @@
 
 
 uc32 Scanner::ScanHexNumber(int expected_length) {
-  ASSERT(expected_length <= 4);  // prevent overflow
+  DCHECK(expected_length <= 4);  // prevent overflow
 
   uc32 digits[4] = { 0, 0, 0, 0 };
   uc32 x = 0;
@@ -294,8 +305,70 @@
 }
 
 
+Token::Value Scanner::SkipSourceURLComment() {
+  TryToParseSourceURLComment();
+  while (c0_ >= 0 && !unicode_cache_->IsLineTerminator(c0_)) {
+    Advance();
+  }
+
+  return Token::WHITESPACE;
+}
+
+
+void Scanner::TryToParseSourceURLComment() {
+  // Magic comments are of the form: //[#@]\s<name>=\s*<value>\s*.* and this
+  // function will just return if it cannot parse a magic comment.
+  if (!unicode_cache_->IsWhiteSpace(c0_))
+    return;
+  Advance();
+  LiteralBuffer name;
+  while (c0_ >= 0 && !unicode_cache_->IsWhiteSpaceOrLineTerminator(c0_) &&
+         c0_ != '=') {
+    name.AddChar(c0_);
+    Advance();
+  }
+  if (!name.is_one_byte()) return;
+  Vector<const uint8_t> name_literal = name.one_byte_literal();
+  LiteralBuffer* value;
+  if (name_literal == STATIC_CHAR_VECTOR("sourceURL")) {
+    value = &source_url_;
+  } else if (name_literal == STATIC_CHAR_VECTOR("sourceMappingURL")) {
+    value = &source_mapping_url_;
+  } else {
+    return;
+  }
+  if (c0_ != '=')
+    return;
+  Advance();
+  value->Reset();
+  while (c0_ >= 0 && unicode_cache_->IsWhiteSpace(c0_)) {
+    Advance();
+  }
+  while (c0_ >= 0 && !unicode_cache_->IsLineTerminator(c0_)) {
+    // Disallowed characters.
+    if (c0_ == '"' || c0_ == '\'') {
+      value->Reset();
+      return;
+    }
+    if (unicode_cache_->IsWhiteSpace(c0_)) {
+      break;
+    }
+    value->AddChar(c0_);
+    Advance();
+  }
+  // Allow whitespace at the end.
+  while (c0_ >= 0 && !unicode_cache_->IsLineTerminator(c0_)) {
+    if (!unicode_cache_->IsWhiteSpace(c0_)) {
+      value->Reset();
+      break;
+    }
+    Advance();
+  }
+}
+
+
 Token::Value Scanner::SkipMultiLineComment() {
-  ASSERT(c0_ == '*');
+  DCHECK(c0_ == '*');
   Advance();
 
   while (c0_ >= 0) {
@@ -322,7 +395,7 @@
 
 Token::Value Scanner::ScanHtmlComment() {
   // Check for <!-- comments.
-  ASSERT(c0_ == '!');
+  DCHECK(c0_ == '!');
   Advance();
   if (c0_ == '-') {
     Advance();
@@ -330,7 +403,7 @@
     PushBack('-');  // undo Advance()
   }
   PushBack('!');  // undo Advance()
-  ASSERT(c0_ == '!');
+  DCHECK(c0_ == '!');
   return Token::LT;
 }
 
@@ -394,10 +467,12 @@
         break;
 
       case '=':
-        // = == ===
+        // = == === =>
         Advance();
         if (c0_ == '=') {
           token = Select('=', Token::EQ_STRICT, Token::EQ);
+        } else if (c0_ == '>') {
+          token = Select(Token::ARROW);
         } else {
           token = Token::ASSIGN;
         }
@@ -458,7 +533,14 @@
         // /  // /* /=
         Advance();
         if (c0_ == '/') {
-          token = SkipSingleLineComment();
+          Advance();
+          if (c0_ == '@' || c0_ == '#') {
+            Advance();
+            token = SkipSourceURLComment();
+          } else {
+            PushBack(c0_);
+            token = SkipSingleLineComment();
+          }
         } else if (c0_ == '*') {
           token = SkipMultiLineComment();
         } else if (c0_ == '=') {
@@ -580,9 +662,9 @@
   // the "next" token. The "current" token will be invalid.
   if (pos == next_.location.beg_pos) return;
   int current_pos = source_pos();
-  ASSERT_EQ(next_.location.end_pos, current_pos);
+  DCHECK_EQ(next_.location.end_pos, current_pos);
   // Positions inside the lookahead token aren't supported.
-  ASSERT(pos >= current_pos);
+  DCHECK(pos >= current_pos);
   if (pos != current_pos) {
     source_->SeekForward(pos - source_->pos());
     Advance();
@@ -702,7 +784,7 @@
 
 
 Token::Value Scanner::ScanNumber(bool seen_period) {
-  ASSERT(IsDecimalDigit(c0_));  // the first digit of the number or the fraction
+  DCHECK(IsDecimalDigit(c0_));  // the first digit of the number or the fraction
 
   enum { DECIMAL, HEX, OCTAL, IMPLICIT_OCTAL, BINARY } kind = DECIMAL;
 
@@ -781,7 +863,7 @@
 
   // scan exponent, if any
   if (c0_ == 'e' || c0_ == 'E') {
-    ASSERT(kind != HEX);  // 'e'/'E' must be scanned as part of the hex number
+    DCHECK(kind != HEX);  // 'e'/'E' must be scanned as part of the hex number
     if (kind != DECIMAL) return Token::ILLEGAL;
     // scan exponent
     AddLiteralCharAdvance();
@@ -820,77 +902,82 @@
 // ----------------------------------------------------------------------------
 // Keyword Matcher
 
-#define KEYWORDS(KEYWORD_GROUP, KEYWORD)                            \
-  KEYWORD_GROUP('b')                                                \
-  KEYWORD("break", Token::BREAK)                                    \
-  KEYWORD_GROUP('c')                                                \
-  KEYWORD("case", Token::CASE)                                      \
-  KEYWORD("catch", Token::CATCH)                                    \
-  KEYWORD("class", Token::FUTURE_RESERVED_WORD)                     \
-  KEYWORD("const", Token::CONST)                                    \
-  KEYWORD("continue", Token::CONTINUE)                              \
-  KEYWORD_GROUP('d')                                                \
-  KEYWORD("debugger", Token::DEBUGGER)                              \
-  KEYWORD("default", Token::DEFAULT)                                \
-  KEYWORD("delete", Token::DELETE)                                  \
-  KEYWORD("do", Token::DO)                                          \
-  KEYWORD_GROUP('e')                                                \
-  KEYWORD("else", Token::ELSE)                                      \
-  KEYWORD("enum", Token::FUTURE_RESERVED_WORD)                      \
-  KEYWORD("export", harmony_modules                                 \
-                    ? Token::EXPORT : Token::FUTURE_RESERVED_WORD)  \
-  KEYWORD("extends", Token::FUTURE_RESERVED_WORD)                   \
-  KEYWORD_GROUP('f')                                                \
-  KEYWORD("false", Token::FALSE_LITERAL)                            \
-  KEYWORD("finally", Token::FINALLY)                                \
-  KEYWORD("for", Token::FOR)                                        \
-  KEYWORD("function", Token::FUNCTION)                              \
-  KEYWORD_GROUP('i')                                                \
-  KEYWORD("if", Token::IF)                                          \
-  KEYWORD("implements", Token::FUTURE_STRICT_RESERVED_WORD)         \
-  KEYWORD("import", harmony_modules                                 \
-                    ? Token::IMPORT : Token::FUTURE_RESERVED_WORD)  \
-  KEYWORD("in", Token::IN)                                          \
-  KEYWORD("instanceof", Token::INSTANCEOF)                          \
-  KEYWORD("interface", Token::FUTURE_STRICT_RESERVED_WORD)          \
-  KEYWORD_GROUP('l')                                                \
-  KEYWORD("let", harmony_scoping                                    \
-                 ? Token::LET : Token::FUTURE_STRICT_RESERVED_WORD) \
-  KEYWORD_GROUP('n')                                                \
-  KEYWORD("new", Token::NEW)                                        \
-  KEYWORD("null", Token::NULL_LITERAL)                              \
-  KEYWORD_GROUP('p')                                                \
-  KEYWORD("package", Token::FUTURE_STRICT_RESERVED_WORD)            \
-  KEYWORD("private", Token::FUTURE_STRICT_RESERVED_WORD)            \
-  KEYWORD("protected", Token::FUTURE_STRICT_RESERVED_WORD)          \
-  KEYWORD("public", Token::FUTURE_STRICT_RESERVED_WORD)             \
-  KEYWORD_GROUP('r')                                                \
-  KEYWORD("return", Token::RETURN)                                  \
-  KEYWORD_GROUP('s')                                                \
-  KEYWORD("static", Token::FUTURE_STRICT_RESERVED_WORD)             \
-  KEYWORD("super", Token::FUTURE_RESERVED_WORD)                     \
-  KEYWORD("switch", Token::SWITCH)                                  \
-  KEYWORD_GROUP('t')                                                \
-  KEYWORD("this", Token::THIS)                                      \
-  KEYWORD("throw", Token::THROW)                                    \
-  KEYWORD("true", Token::TRUE_LITERAL)                              \
-  KEYWORD("try", Token::TRY)                                        \
-  KEYWORD("typeof", Token::TYPEOF)                                  \
-  KEYWORD_GROUP('v')                                                \
-  KEYWORD("var", Token::VAR)                                        \
-  KEYWORD("void", Token::VOID)                                      \
-  KEYWORD_GROUP('w')                                                \
-  KEYWORD("while", Token::WHILE)                                    \
-  KEYWORD("with", Token::WITH)                                      \
-  KEYWORD_GROUP('y')                                                \
+#define KEYWORDS(KEYWORD_GROUP, KEYWORD)                                     \
+  KEYWORD_GROUP('b')                                                         \
+  KEYWORD("break", Token::BREAK)                                             \
+  KEYWORD_GROUP('c')                                                         \
+  KEYWORD("case", Token::CASE)                                               \
+  KEYWORD("catch", Token::CATCH)                                             \
+  KEYWORD("class",                                                           \
+          harmony_classes ? Token::CLASS : Token::FUTURE_RESERVED_WORD)      \
+  KEYWORD("const", Token::CONST)                                             \
+  KEYWORD("continue", Token::CONTINUE)                                       \
+  KEYWORD_GROUP('d')                                                         \
+  KEYWORD("debugger", Token::DEBUGGER)                                       \
+  KEYWORD("default", Token::DEFAULT)                                         \
+  KEYWORD("delete", Token::DELETE)                                           \
+  KEYWORD("do", Token::DO)                                                   \
+  KEYWORD_GROUP('e')                                                         \
+  KEYWORD("else", Token::ELSE)                                               \
+  KEYWORD("enum", Token::FUTURE_RESERVED_WORD)                               \
+  KEYWORD("export",                                                          \
+          harmony_modules ? Token::EXPORT : Token::FUTURE_RESERVED_WORD)     \
+  KEYWORD("extends",                                                         \
+          harmony_classes ? Token::EXTENDS : Token::FUTURE_RESERVED_WORD)    \
+  KEYWORD_GROUP('f')                                                         \
+  KEYWORD("false", Token::FALSE_LITERAL)                                     \
+  KEYWORD("finally", Token::FINALLY)                                         \
+  KEYWORD("for", Token::FOR)                                                 \
+  KEYWORD("function", Token::FUNCTION)                                       \
+  KEYWORD_GROUP('i')                                                         \
+  KEYWORD("if", Token::IF)                                                   \
+  KEYWORD("implements", Token::FUTURE_STRICT_RESERVED_WORD)                  \
+  KEYWORD("import",                                                          \
+          harmony_modules ? Token::IMPORT : Token::FUTURE_RESERVED_WORD)     \
+  KEYWORD("in", Token::IN)                                                   \
+  KEYWORD("instanceof", Token::INSTANCEOF)                                   \
+  KEYWORD("interface", Token::FUTURE_STRICT_RESERVED_WORD)                   \
+  KEYWORD_GROUP('l')                                                         \
+  KEYWORD("let",                                                             \
+          harmony_scoping ? Token::LET : Token::FUTURE_STRICT_RESERVED_WORD) \
+  KEYWORD_GROUP('n')                                                         \
+  KEYWORD("new", Token::NEW)                                                 \
+  KEYWORD("null", Token::NULL_LITERAL)                                       \
+  KEYWORD_GROUP('p')                                                         \
+  KEYWORD("package", Token::FUTURE_STRICT_RESERVED_WORD)                     \
+  KEYWORD("private", Token::FUTURE_STRICT_RESERVED_WORD)                     \
+  KEYWORD("protected", Token::FUTURE_STRICT_RESERVED_WORD)                   \
+  KEYWORD("public", Token::FUTURE_STRICT_RESERVED_WORD)                      \
+  KEYWORD_GROUP('r')                                                         \
+  KEYWORD("return", Token::RETURN)                                           \
+  KEYWORD_GROUP('s')                                                         \
+  KEYWORD("static", harmony_classes ? Token::STATIC                          \
+                                    : Token::FUTURE_STRICT_RESERVED_WORD)    \
+  KEYWORD("super",                                                           \
+          harmony_classes ? Token::SUPER : Token::FUTURE_RESERVED_WORD)      \
+  KEYWORD("switch", Token::SWITCH)                                           \
+  KEYWORD_GROUP('t')                                                         \
+  KEYWORD("this", Token::THIS)                                               \
+  KEYWORD("throw", Token::THROW)                                             \
+  KEYWORD("true", Token::TRUE_LITERAL)                                       \
+  KEYWORD("try", Token::TRY)                                                 \
+  KEYWORD("typeof", Token::TYPEOF)                                           \
+  KEYWORD_GROUP('v')                                                         \
+  KEYWORD("var", Token::VAR)                                                 \
+  KEYWORD("void", Token::VOID)                                               \
+  KEYWORD_GROUP('w')                                                         \
+  KEYWORD("while", Token::WHILE)                                             \
+  KEYWORD("with", Token::WITH)                                               \
+  KEYWORD_GROUP('y')                                                         \
   KEYWORD("yield", Token::YIELD)
 
 
 static Token::Value KeywordOrIdentifierToken(const uint8_t* input,
                                              int input_length,
                                              bool harmony_scoping,
-                                             bool harmony_modules) {
-  ASSERT(input_length >= 1);
+                                             bool harmony_modules,
+                                             bool harmony_classes) {
+  DCHECK(input_length >= 1);
   const int kMinLength = 2;
   const int kMaxLength = 10;
   if (input_length < kMinLength || input_length > kMaxLength) {
@@ -927,8 +1014,19 @@
 }
 
 
+bool Scanner::IdentifierIsFutureStrictReserved(
+    const AstRawString* string) const {
+  // Keywords are always 1-byte strings.
+  return string->is_one_byte() &&
+         Token::FUTURE_STRICT_RESERVED_WORD ==
+             KeywordOrIdentifierToken(string->raw_data(), string->length(),
+                                      harmony_scoping_, harmony_modules_,
+                                      harmony_classes_);
+}
+
+
 Token::Value Scanner::ScanIdentifierOrKeyword() {
-  ASSERT(unicode_cache_->IsIdentifierStart(c0_));
+  DCHECK(unicode_cache_->IsIdentifierStart(c0_));
   LiteralScope literal(this);
   // Scan identifier start character.
   if (c0_ == '\\') {
@@ -966,7 +1064,8 @@
     return KeywordOrIdentifierToken(chars.start(),
                                     chars.length(),
                                     harmony_scoping_,
-                                    harmony_modules_);
+                                    harmony_modules_,
+                                    harmony_classes_);
   }
 
   return Token::IDENTIFIER;
@@ -1044,7 +1143,7 @@
 
 
 bool Scanner::ScanLiteralUnicodeEscape() {
-  ASSERT(c0_ == '\\');
+  DCHECK(c0_ == '\\');
   uc32 chars_read[6] = {'\\', 'u', 0, 0, 0, 0};
   Advance();
   int i = 1;
@@ -1093,31 +1192,24 @@
 }
 
 
-Handle<String> Scanner::AllocateNextLiteralString(Isolate* isolate,
-                                                  PretenureFlag tenured) {
-  if (is_next_literal_one_byte()) {
-    return isolate->factory()->NewStringFromOneByte(
-        next_literal_one_byte_string(), tenured).ToHandleChecked();
-  } else {
-    return isolate->factory()->NewStringFromTwoByte(
-        next_literal_two_byte_string(), tenured).ToHandleChecked();
+const AstRawString* Scanner::CurrentSymbol(AstValueFactory* ast_value_factory) {
+  if (is_literal_one_byte()) {
+    return ast_value_factory->GetOneByteString(literal_one_byte_string());
   }
+  return ast_value_factory->GetTwoByteString(literal_two_byte_string());
 }
 
 
-Handle<String> Scanner::AllocateInternalizedString(Isolate* isolate) {
-  if (is_literal_one_byte()) {
-    return isolate->factory()->InternalizeOneByteString(
-        literal_one_byte_string());
-  } else {
-    return isolate->factory()->InternalizeTwoByteString(
-        literal_two_byte_string());
+const AstRawString* Scanner::NextSymbol(AstValueFactory* ast_value_factory) {
+  if (is_next_literal_one_byte()) {
+    return ast_value_factory->GetOneByteString(next_literal_one_byte_string());
   }
+  return ast_value_factory->GetTwoByteString(next_literal_two_byte_string());
 }
 
 
 double Scanner::DoubleValue() {
-  ASSERT(is_literal_one_byte());
+  DCHECK(is_literal_one_byte());
   return StringToDouble(
       unicode_cache_,
       literal_one_byte_string(),
@@ -1162,7 +1254,7 @@
 
 
 int DuplicateFinder::AddNumber(Vector<const uint8_t> key, int value) {
-  ASSERT(key.length() > 0);
+  DCHECK(key.length() > 0);
   // Quick check for already being in canonical form.
   if (IsNumberCanonical(key)) {
     return AddOneByteSymbol(key, value);
@@ -1216,7 +1308,7 @@
 
 uint32_t DuplicateFinder::Hash(Vector<const uint8_t> key, bool is_one_byte) {
   // Primitive hash function, almost identical to the one used
-  // for strings (except that it's seeded by the length and ASCII-ness).
+  // for strings (except that it's seeded by the length and representation).
   int length = key.length();
   uint32_t hash = (length << 1) | (is_one_byte ? 1 : 0) ;
   for (int i = 0; i < length; i++) {
@@ -1230,10 +1322,10 @@
 
 bool DuplicateFinder::Match(void* first, void* second) {
   // Decode lengths.
-  // Length + ASCII-bit is encoded as base 128, most significant heptet first,
-  // with a 8th bit being non-zero while there are more heptets.
+  // Length + representation is encoded as base 128, most significant heptet
+  // first, with a 8th bit being non-zero while there are more heptets.
   // The value encodes the number of bytes following, and whether the original
-  // was ASCII.
+  // was Latin1.
   byte* s1 = reinterpret_cast<byte*>(first);
   byte* s2 = reinterpret_cast<byte*>(second);
   uint32_t length_one_byte_field = 0;
diff --git a/src/scanner.h b/src/scanner.h
index 2979082..356c8e4 100644
--- a/src/scanner.h
+++ b/src/scanner.h
@@ -8,8 +8,8 @@
 #define V8_SCANNER_H_
 
 #include "src/allocation.h"
+#include "src/base/logging.h"
 #include "src/char-predicates.h"
-#include "src/checks.h"
 #include "src/globals.h"
 #include "src/hashmap.h"
 #include "src/list.h"
@@ -21,6 +21,8 @@
 namespace internal {
 
 
+class AstRawString;
+class AstValueFactory;
 class ParserRecorder;
 
 
@@ -150,7 +152,7 @@
   int AddTwoByteSymbol(Vector<const uint16_t> key, int value);
   // Add a a number literal by converting it (if necessary)
   // to the string that ToString(ToNumber(literal)) would generate.
-  // and then adding that string with AddAsciiSymbol.
+  // and then adding that string with AddOneByteSymbol.
   // This string is the actual value used as key in an object literal,
   // and the one that must be different from the other keys.
   int AddNumber(Vector<const uint8_t> key, int value);
@@ -164,7 +166,7 @@
   uint8_t* BackupKey(Vector<const uint8_t> key, bool is_one_byte);
 
   // Compare two encoded keys (both pointing into the backing store)
-  // for having the same base-127 encoded lengths and ASCII-ness,
+  // for having the same base-127 encoded lengths and representation.
   // and then having the same 'length' bytes following.
   static bool Match(void* first, void* second);
   // Creates a hash from a sequence of bytes.
@@ -209,34 +211,34 @@
       }
       ConvertToTwoByte();
     }
-    ASSERT(code_unit < 0x10000u);
+    DCHECK(code_unit < 0x10000u);
     *reinterpret_cast<uint16_t*>(&backing_store_[position_]) = code_unit;
     position_ += kUC16Size;
   }
 
-  bool is_one_byte() { return is_one_byte_; }
+  bool is_one_byte() const { return is_one_byte_; }
 
-  bool is_contextual_keyword(Vector<const char> keyword) {
+  bool is_contextual_keyword(Vector<const char> keyword) const {
     return is_one_byte() && keyword.length() == position_ &&
         (memcmp(keyword.start(), backing_store_.start(), position_) == 0);
   }
 
-  Vector<const uint16_t> two_byte_literal() {
-    ASSERT(!is_one_byte_);
-    ASSERT((position_ & 0x1) == 0);
+  Vector<const uint16_t> two_byte_literal() const {
+    DCHECK(!is_one_byte_);
+    DCHECK((position_ & 0x1) == 0);
     return Vector<const uint16_t>(
         reinterpret_cast<const uint16_t*>(backing_store_.start()),
         position_ >> 1);
   }
 
-  Vector<const uint8_t> one_byte_literal() {
-    ASSERT(is_one_byte_);
+  Vector<const uint8_t> one_byte_literal() const {
+    DCHECK(is_one_byte_);
     return Vector<const uint8_t>(
         reinterpret_cast<const uint8_t*>(backing_store_.start()),
         position_);
   }
 
-  int length() {
+  int length() const {
     return is_one_byte_ ? position_ : (position_ >> 1);
   }
 
@@ -245,6 +247,8 @@
     is_one_byte_ = true;
   }
 
+  Handle<String> Internalize(Isolate* isolate) const;
+
  private:
   static const int kInitialCapacity = 16;
   static const int kGrowthFactory = 4;
@@ -264,7 +268,7 @@
   }
 
   void ConvertToTwoByte() {
-    ASSERT(is_one_byte_);
+    DCHECK(is_one_byte_);
     Vector<byte> new_store;
     int new_content_size = position_ * kUC16Size;
     if (new_content_size >= backing_store_.length()) {
@@ -368,17 +372,16 @@
     return current_.literal_chars->length() != source_length;
   }
   bool is_literal_contextual_keyword(Vector<const char> keyword) {
-    ASSERT_NOT_NULL(current_.literal_chars);
+    DCHECK_NOT_NULL(current_.literal_chars);
     return current_.literal_chars->is_contextual_keyword(keyword);
   }
   bool is_next_contextual_keyword(Vector<const char> keyword) {
-    ASSERT_NOT_NULL(next_.literal_chars);
+    DCHECK_NOT_NULL(next_.literal_chars);
     return next_.literal_chars->is_contextual_keyword(keyword);
   }
 
-  Handle<String> AllocateNextLiteralString(Isolate* isolate,
-                                           PretenureFlag tenured);
-  Handle<String> AllocateInternalizedString(Isolate* isolate);
+  const AstRawString* CurrentSymbol(AstValueFactory* ast_value_factory);
+  const AstRawString* NextSymbol(AstValueFactory* ast_value_factory);
 
   double DoubleValue();
   bool UnescapedLiteralMatches(const char* data, int length) {
@@ -435,6 +438,12 @@
   void SetHarmonyNumericLiterals(bool numeric_literals) {
     harmony_numeric_literals_ = numeric_literals;
   }
+  bool HarmonyClasses() const {
+    return harmony_classes_;
+  }
+  void SetHarmonyClasses(bool classes) {
+    harmony_classes_ = classes;
+  }
 
   // Returns true if there was a line terminator before the peek'ed token,
   // possibly inside a multi-line comment.
@@ -450,6 +459,13 @@
   // be empty).
   bool ScanRegExpFlags();
 
+  const LiteralBuffer* source_url() const { return &source_url_; }
+  const LiteralBuffer* source_mapping_url() const {
+    return &source_mapping_url_;
+  }
+
+  bool IdentifierIsFutureStrictReserved(const AstRawString* string) const;
+
  private:
   // The current and look-ahead token.
   struct TokenDesc {
@@ -481,7 +497,7 @@
   }
 
   INLINE(void AddLiteralChar(uc32 c)) {
-    ASSERT_NOT_NULL(next_.literal_chars);
+    DCHECK_NOT_NULL(next_.literal_chars);
     next_.literal_chars->AddChar(c);
   }
 
@@ -530,37 +546,37 @@
   // These functions only give the correct result if the literal
   // was scanned between calls to StartLiteral() and TerminateLiteral().
   Vector<const uint8_t> literal_one_byte_string() {
-    ASSERT_NOT_NULL(current_.literal_chars);
+    DCHECK_NOT_NULL(current_.literal_chars);
     return current_.literal_chars->one_byte_literal();
   }
   Vector<const uint16_t> literal_two_byte_string() {
-    ASSERT_NOT_NULL(current_.literal_chars);
+    DCHECK_NOT_NULL(current_.literal_chars);
     return current_.literal_chars->two_byte_literal();
   }
   bool is_literal_one_byte() {
-    ASSERT_NOT_NULL(current_.literal_chars);
+    DCHECK_NOT_NULL(current_.literal_chars);
     return current_.literal_chars->is_one_byte();
   }
   int literal_length() const {
-    ASSERT_NOT_NULL(current_.literal_chars);
+    DCHECK_NOT_NULL(current_.literal_chars);
     return current_.literal_chars->length();
   }
   // Returns the literal string for the next token (the token that
   // would be returned if Next() were called).
   Vector<const uint8_t> next_literal_one_byte_string() {
-    ASSERT_NOT_NULL(next_.literal_chars);
+    DCHECK_NOT_NULL(next_.literal_chars);
     return next_.literal_chars->one_byte_literal();
   }
   Vector<const uint16_t> next_literal_two_byte_string() {
-    ASSERT_NOT_NULL(next_.literal_chars);
+    DCHECK_NOT_NULL(next_.literal_chars);
     return next_.literal_chars->two_byte_literal();
   }
   bool is_next_literal_one_byte() {
-    ASSERT_NOT_NULL(next_.literal_chars);
+    DCHECK_NOT_NULL(next_.literal_chars);
     return next_.literal_chars->is_one_byte();
   }
   int next_literal_length() const {
-    ASSERT_NOT_NULL(next_.literal_chars);
+    DCHECK_NOT_NULL(next_.literal_chars);
     return next_.literal_chars->length();
   }
 
@@ -571,6 +587,8 @@
 
   bool SkipWhiteSpace();
   Token::Value SkipSingleLineComment();
+  Token::Value SkipSourceURLComment();
+  void TryToParseSourceURLComment();
   Token::Value SkipMultiLineComment();
   // Scans a possible HTML comment -- begins with '<!'.
   Token::Value ScanHtmlComment();
@@ -605,6 +623,10 @@
   LiteralBuffer literal_buffer1_;
   LiteralBuffer literal_buffer2_;
 
+  // Values parsed from magic comments.
+  LiteralBuffer source_url_;
+  LiteralBuffer source_mapping_url_;
+
   TokenDesc current_;  // desc for current token (as returned by Next())
   TokenDesc next_;     // desc for next token (one token look-ahead)
 
@@ -631,6 +653,8 @@
   bool harmony_modules_;
   // Whether we scan 0o777 and 0b111 as numbers.
   bool harmony_numeric_literals_;
+  // Whether we scan 'class', 'extends', 'static' and 'super' as keywords.
+  bool harmony_classes_;
 };
 
 } }  // namespace v8::internal
diff --git a/src/scopeinfo.cc b/src/scopeinfo.cc
index d84c5bf..75bf014 100644
--- a/src/scopeinfo.cc
+++ b/src/scopeinfo.cc
@@ -21,8 +21,8 @@
   const int stack_local_count = stack_locals.length();
   const int context_local_count = context_locals.length();
   // Make sure we allocate the correct amount.
-  ASSERT(scope->StackLocalCount() == stack_local_count);
-  ASSERT(scope->ContextLocalCount() == context_local_count);
+  DCHECK(scope->StackLocalCount() == stack_local_count);
+  DCHECK(scope->ContextLocalCount() == context_local_count);
 
   // Determine use and location of the function variable if it is present.
   FunctionVariableInfo function_name_info;
@@ -34,7 +34,7 @@
     } else if (var->IsContextSlot()) {
       function_name_info = CONTEXT;
     } else {
-      ASSERT(var->IsStackLocal());
+      DCHECK(var->IsStackLocal());
       function_name_info = STACK;
     }
     function_variable_mode = var->mode();
@@ -54,10 +54,12 @@
 
   // Encode the flags.
   int flags = ScopeTypeField::encode(scope->scope_type()) |
-      CallsEvalField::encode(scope->calls_eval()) |
-      StrictModeField::encode(scope->strict_mode()) |
-      FunctionVariableField::encode(function_name_info) |
-      FunctionVariableMode::encode(function_variable_mode);
+              CallsEvalField::encode(scope->calls_eval()) |
+              StrictModeField::encode(scope->strict_mode()) |
+              FunctionVariableField::encode(function_name_info) |
+              FunctionVariableMode::encode(function_variable_mode) |
+              AsmModuleField::encode(scope->asm_module()) |
+              AsmFunctionField::encode(scope->asm_function());
   scope_info->SetFlags(flags);
   scope_info->SetParameterCount(parameter_count);
   scope_info->SetStackLocalCount(stack_local_count);
@@ -65,7 +67,7 @@
 
   int index = kVariablePartIndex;
   // Add parameters.
-  ASSERT(index == scope_info->ParameterEntriesIndex());
+  DCHECK(index == scope_info->ParameterEntriesIndex());
   for (int i = 0; i < parameter_count; ++i) {
     scope_info->set(index++, *scope->parameter(i)->name());
   }
@@ -73,9 +75,9 @@
   // Add stack locals' names. We are assuming that the stack locals'
   // slots are allocated in increasing order, so we can simply add
   // them to the ScopeInfo object.
-  ASSERT(index == scope_info->StackLocalEntriesIndex());
+  DCHECK(index == scope_info->StackLocalEntriesIndex());
   for (int i = 0; i < stack_local_count; ++i) {
-    ASSERT(stack_locals[i]->index() == i);
+    DCHECK(stack_locals[i]->index() == i);
     scope_info->set(index++, *stack_locals[i]->name());
   }
 
@@ -88,37 +90,39 @@
   context_locals.Sort(&Variable::CompareIndex);
 
   // Add context locals' names.
-  ASSERT(index == scope_info->ContextLocalNameEntriesIndex());
+  DCHECK(index == scope_info->ContextLocalNameEntriesIndex());
   for (int i = 0; i < context_local_count; ++i) {
     scope_info->set(index++, *context_locals[i]->name());
   }
 
   // Add context locals' info.
-  ASSERT(index == scope_info->ContextLocalInfoEntriesIndex());
+  DCHECK(index == scope_info->ContextLocalInfoEntriesIndex());
   for (int i = 0; i < context_local_count; ++i) {
     Variable* var = context_locals[i];
-    uint32_t value = ContextLocalMode::encode(var->mode()) |
-        ContextLocalInitFlag::encode(var->initialization_flag());
+    uint32_t value =
+        ContextLocalMode::encode(var->mode()) |
+        ContextLocalInitFlag::encode(var->initialization_flag()) |
+        ContextLocalMaybeAssignedFlag::encode(var->maybe_assigned());
     scope_info->set(index++, Smi::FromInt(value));
   }
 
   // If present, add the function variable name and its index.
-  ASSERT(index == scope_info->FunctionNameEntryIndex());
+  DCHECK(index == scope_info->FunctionNameEntryIndex());
   if (has_function_name) {
     int var_index = scope->function()->proxy()->var()->index();
     scope_info->set(index++, *scope->function()->proxy()->name());
     scope_info->set(index++, Smi::FromInt(var_index));
-    ASSERT(function_name_info != STACK ||
+    DCHECK(function_name_info != STACK ||
            (var_index == scope_info->StackLocalCount() &&
             var_index == scope_info->StackSlotCount() - 1));
-    ASSERT(function_name_info != CONTEXT ||
+    DCHECK(function_name_info != CONTEXT ||
            var_index == scope_info->ContextLength() - 1);
   }
 
-  ASSERT(index == scope_info->length());
-  ASSERT(scope->num_parameters() == scope_info->ParameterCount());
-  ASSERT(scope->num_stack_slots() == scope_info->StackSlotCount());
-  ASSERT(scope->num_heap_slots() == scope_info->ContextLength() ||
+  DCHECK(index == scope_info->length());
+  DCHECK(scope->num_parameters() == scope_info->ParameterCount());
+  DCHECK(scope->num_stack_slots() == scope_info->StackSlotCount());
+  DCHECK(scope->num_heap_slots() == scope_info->ContextLength() ||
          (scope->num_heap_slots() == kVariablePartIndex &&
           scope_info->ContextLength() == 0));
   return scope_info;
@@ -131,7 +135,7 @@
 
 
 ScopeType ScopeInfo::scope_type() {
-  ASSERT(length() > 0);
+  DCHECK(length() > 0);
   return ScopeTypeField::decode(Flags());
 }
 
@@ -204,21 +208,21 @@
 
 
 String* ScopeInfo::FunctionName() {
-  ASSERT(HasFunctionName());
+  DCHECK(HasFunctionName());
   return String::cast(get(FunctionNameEntryIndex()));
 }
 
 
 String* ScopeInfo::ParameterName(int var) {
-  ASSERT(0 <= var && var < ParameterCount());
+  DCHECK(0 <= var && var < ParameterCount());
   int info_index = ParameterEntriesIndex() + var;
   return String::cast(get(info_index));
 }
 
 
 String* ScopeInfo::LocalName(int var) {
-  ASSERT(0 <= var && var < LocalCount());
-  ASSERT(StackLocalEntriesIndex() + StackLocalCount() ==
+  DCHECK(0 <= var && var < LocalCount());
+  DCHECK(StackLocalEntriesIndex() + StackLocalCount() ==
          ContextLocalNameEntriesIndex());
   int info_index = StackLocalEntriesIndex() + var;
   return String::cast(get(info_index));
@@ -226,21 +230,21 @@
 
 
 String* ScopeInfo::StackLocalName(int var) {
-  ASSERT(0 <= var && var < StackLocalCount());
+  DCHECK(0 <= var && var < StackLocalCount());
   int info_index = StackLocalEntriesIndex() + var;
   return String::cast(get(info_index));
 }
 
 
 String* ScopeInfo::ContextLocalName(int var) {
-  ASSERT(0 <= var && var < ContextLocalCount());
+  DCHECK(0 <= var && var < ContextLocalCount());
   int info_index = ContextLocalNameEntriesIndex() + var;
   return String::cast(get(info_index));
 }
 
 
 VariableMode ScopeInfo::ContextLocalMode(int var) {
-  ASSERT(0 <= var && var < ContextLocalCount());
+  DCHECK(0 <= var && var < ContextLocalCount());
   int info_index = ContextLocalInfoEntriesIndex() + var;
   int value = Smi::cast(get(info_index))->value();
   return ContextLocalMode::decode(value);
@@ -248,15 +252,23 @@
 
 
 InitializationFlag ScopeInfo::ContextLocalInitFlag(int var) {
-  ASSERT(0 <= var && var < ContextLocalCount());
+  DCHECK(0 <= var && var < ContextLocalCount());
   int info_index = ContextLocalInfoEntriesIndex() + var;
   int value = Smi::cast(get(info_index))->value();
   return ContextLocalInitFlag::decode(value);
 }
 
 
+MaybeAssignedFlag ScopeInfo::ContextLocalMaybeAssignedFlag(int var) {
+  DCHECK(0 <= var && var < ContextLocalCount());
+  int info_index = ContextLocalInfoEntriesIndex() + var;
+  int value = Smi::cast(get(info_index))->value();
+  return ContextLocalMaybeAssignedFlag::decode(value);
+}
+
+
 bool ScopeInfo::LocalIsSynthetic(int var) {
-  ASSERT(0 <= var && var < LocalCount());
+  DCHECK(0 <= var && var < LocalCount());
   // There's currently no flag stored on the ScopeInfo to indicate that a
   // variable is a compiler-introduced temporary. However, to avoid conflict
   // with user declarations, the current temporaries like .generator_object and
@@ -267,7 +279,7 @@
 
 
 int ScopeInfo::StackSlotIndex(String* name) {
-  ASSERT(name->IsInternalizedString());
+  DCHECK(name->IsInternalizedString());
   if (length() > 0) {
     int start = StackLocalEntriesIndex();
     int end = StackLocalEntriesIndex() + StackLocalCount();
@@ -282,19 +294,19 @@
 
 
 int ScopeInfo::ContextSlotIndex(Handle<ScopeInfo> scope_info,
-                                Handle<String> name,
-                                VariableMode* mode,
-                                InitializationFlag* init_flag) {
-  ASSERT(name->IsInternalizedString());
-  ASSERT(mode != NULL);
-  ASSERT(init_flag != NULL);
+                                Handle<String> name, VariableMode* mode,
+                                InitializationFlag* init_flag,
+                                MaybeAssignedFlag* maybe_assigned_flag) {
+  DCHECK(name->IsInternalizedString());
+  DCHECK(mode != NULL);
+  DCHECK(init_flag != NULL);
   if (scope_info->length() > 0) {
     ContextSlotCache* context_slot_cache =
         scope_info->GetIsolate()->context_slot_cache();
-    int result =
-        context_slot_cache->Lookup(*scope_info, *name, mode, init_flag);
+    int result = context_slot_cache->Lookup(*scope_info, *name, mode, init_flag,
+                                            maybe_assigned_flag);
     if (result != ContextSlotCache::kNotFound) {
-      ASSERT(result < scope_info->ContextLength());
+      DCHECK(result < scope_info->ContextLength());
       return result;
     }
 
@@ -306,22 +318,24 @@
         int var = i - start;
         *mode = scope_info->ContextLocalMode(var);
         *init_flag = scope_info->ContextLocalInitFlag(var);
+        *maybe_assigned_flag = scope_info->ContextLocalMaybeAssignedFlag(var);
         result = Context::MIN_CONTEXT_SLOTS + var;
-        context_slot_cache->Update(scope_info, name, *mode, *init_flag, result);
-        ASSERT(result < scope_info->ContextLength());
+        context_slot_cache->Update(scope_info, name, *mode, *init_flag,
+                                   *maybe_assigned_flag, result);
+        DCHECK(result < scope_info->ContextLength());
         return result;
       }
     }
-    // Cache as not found. Mode and init flag don't matter.
-    context_slot_cache->Update(
-        scope_info, name, INTERNAL, kNeedsInitialization, -1);
+    // Cache as not found. Mode, init flag and maybe assigned flag don't matter.
+    context_slot_cache->Update(scope_info, name, INTERNAL, kNeedsInitialization,
+                               kNotAssigned, -1);
   }
   return -1;
 }
 
 
 int ScopeInfo::ParameterIndex(String* name) {
-  ASSERT(name->IsInternalizedString());
+  DCHECK(name->IsInternalizedString());
   if (length() > 0) {
     // We must read parameters from the end since for
     // multiply declared parameters the value of the
@@ -341,8 +355,8 @@
 
 
 int ScopeInfo::FunctionContextSlotIndex(String* name, VariableMode* mode) {
-  ASSERT(name->IsInternalizedString());
-  ASSERT(mode != NULL);
+  DCHECK(name->IsInternalizedString());
+  DCHECK(mode != NULL);
   if (length() > 0) {
     if (FunctionVariableField::decode(Flags()) == CONTEXT &&
         FunctionName() == name) {
@@ -368,13 +382,11 @@
     int context_index = Context::MIN_CONTEXT_SLOTS + i;
     RETURN_ON_EXCEPTION_VALUE(
         isolate,
-        Runtime::SetObjectProperty(
-            isolate,
+        Runtime::DefineObjectProperty(
             scope_object,
             Handle<String>(String::cast(scope_info->get(i + start))),
             Handle<Object>(context->get(context_index), isolate),
-            ::NONE,
-            SLOPPY),
+            ::NONE),
         false);
   }
   return true;
@@ -382,7 +394,7 @@
 
 
 int ScopeInfo::ParameterEntriesIndex() {
-  ASSERT(length() > 0);
+  DCHECK(length() > 0);
   return kVariablePartIndex;
 }
 
@@ -415,30 +427,30 @@
 }
 
 
-int ContextSlotCache::Lookup(Object* data,
-                             String* name,
-                             VariableMode* mode,
-                             InitializationFlag* init_flag) {
+int ContextSlotCache::Lookup(Object* data, String* name, VariableMode* mode,
+                             InitializationFlag* init_flag,
+                             MaybeAssignedFlag* maybe_assigned_flag) {
   int index = Hash(data, name);
   Key& key = keys_[index];
   if ((key.data == data) && key.name->Equals(name)) {
     Value result(values_[index]);
     if (mode != NULL) *mode = result.mode();
     if (init_flag != NULL) *init_flag = result.initialization_flag();
+    if (maybe_assigned_flag != NULL)
+      *maybe_assigned_flag = result.maybe_assigned_flag();
     return result.index() + kNotFound;
   }
   return kNotFound;
 }
 
 
-void ContextSlotCache::Update(Handle<Object> data,
-                              Handle<String> name,
-                              VariableMode mode,
-                              InitializationFlag init_flag,
+void ContextSlotCache::Update(Handle<Object> data, Handle<String> name,
+                              VariableMode mode, InitializationFlag init_flag,
+                              MaybeAssignedFlag maybe_assigned_flag,
                               int slot_index) {
   DisallowHeapAllocation no_gc;
   Handle<String> internalized_name;
-  ASSERT(slot_index > kNotFound);
+  DCHECK(slot_index > kNotFound);
   if (StringTable::InternalizeStringIfExists(name->GetIsolate(), name).
       ToHandle(&internalized_name)) {
     int index = Hash(*data, *internalized_name);
@@ -446,9 +458,10 @@
     key.data = *data;
     key.name = *internalized_name;
     // Please note value only takes a uint as index.
-    values_[index] = Value(mode, init_flag, slot_index - kNotFound).raw();
+    values_[index] = Value(mode, init_flag, maybe_assigned_flag,
+                           slot_index - kNotFound).raw();
 #ifdef DEBUG
-    ValidateEntry(data, name, mode, init_flag, slot_index);
+    ValidateEntry(data, name, mode, init_flag, maybe_assigned_flag, slot_index);
 #endif
   }
 }
@@ -461,10 +474,10 @@
 
 #ifdef DEBUG
 
-void ContextSlotCache::ValidateEntry(Handle<Object> data,
-                                     Handle<String> name,
+void ContextSlotCache::ValidateEntry(Handle<Object> data, Handle<String> name,
                                      VariableMode mode,
                                      InitializationFlag init_flag,
+                                     MaybeAssignedFlag maybe_assigned_flag,
                                      int slot_index) {
   DisallowHeapAllocation no_gc;
   Handle<String> internalized_name;
@@ -472,12 +485,13 @@
       ToHandle(&internalized_name)) {
     int index = Hash(*data, *name);
     Key& key = keys_[index];
-    ASSERT(key.data == *data);
-    ASSERT(key.name->Equals(*name));
+    DCHECK(key.data == *data);
+    DCHECK(key.name->Equals(*name));
     Value result(values_[index]);
-    ASSERT(result.mode() == mode);
-    ASSERT(result.initialization_flag() == init_flag);
-    ASSERT(result.index() + kNotFound == slot_index);
+    DCHECK(result.mode() == mode);
+    DCHECK(result.initialization_flag() == init_flag);
+    DCHECK(result.maybe_assigned_flag() == maybe_assigned_flag);
+    DCHECK(result.index() + kNotFound == slot_index);
   }
 }
 
@@ -540,19 +554,19 @@
   for (Interface::Iterator it = interface->iterator();
        !it.done(); it.Advance(), ++i) {
     Variable* var = scope->LookupLocal(it.name());
-    info->set_name(i, *it.name());
+    info->set_name(i, *(it.name()->string()));
     info->set_mode(i, var->mode());
-    ASSERT((var->mode() == MODULE) == (it.interface()->IsModule()));
+    DCHECK((var->mode() == MODULE) == (it.interface()->IsModule()));
     if (var->mode() == MODULE) {
-      ASSERT(it.interface()->IsFrozen());
-      ASSERT(it.interface()->Index() >= 0);
+      DCHECK(it.interface()->IsFrozen());
+      DCHECK(it.interface()->Index() >= 0);
       info->set_index(i, it.interface()->Index());
     } else {
-      ASSERT(var->index() >= 0);
+      DCHECK(var->index() >= 0);
       info->set_index(i, var->index());
     }
   }
-  ASSERT(i == info->length());
+  DCHECK(i == info->length());
   return info;
 }
 
diff --git a/src/scopeinfo.h b/src/scopeinfo.h
index 7b8ed44..1d9f06f 100644
--- a/src/scopeinfo.h
+++ b/src/scopeinfo.h
@@ -20,17 +20,14 @@
  public:
   // Lookup context slot index for (data, name).
   // If absent, kNotFound is returned.
-  int Lookup(Object* data,
-             String* name,
-             VariableMode* mode,
-             InitializationFlag* init_flag);
+  int Lookup(Object* data, String* name, VariableMode* mode,
+             InitializationFlag* init_flag,
+             MaybeAssignedFlag* maybe_assigned_flag);
 
   // Update an element in the cache.
-  void Update(Handle<Object> data,
-              Handle<String> name,
-              VariableMode mode,
+  void Update(Handle<Object> data, Handle<String> name, VariableMode mode,
               InitializationFlag init_flag,
-              int slot_index);
+              MaybeAssignedFlag maybe_assigned_flag, int slot_index);
 
   // Clear the cache.
   void Clear();
@@ -49,11 +46,9 @@
   inline static int Hash(Object* data, String* name);
 
 #ifdef DEBUG
-  void ValidateEntry(Handle<Object> data,
-                     Handle<String> name,
-                     VariableMode mode,
-                     InitializationFlag init_flag,
-                     int slot_index);
+  void ValidateEntry(Handle<Object> data, Handle<String> name,
+                     VariableMode mode, InitializationFlag init_flag,
+                     MaybeAssignedFlag maybe_assigned_flag, int slot_index);
 #endif
 
   static const int kLength = 256;
@@ -63,18 +58,19 @@
   };
 
   struct Value {
-    Value(VariableMode mode,
-          InitializationFlag init_flag,
-          int index) {
-      ASSERT(ModeField::is_valid(mode));
-      ASSERT(InitField::is_valid(init_flag));
-      ASSERT(IndexField::is_valid(index));
-      value_ = ModeField::encode(mode) |
-          IndexField::encode(index) |
-          InitField::encode(init_flag);
-      ASSERT(mode == this->mode());
-      ASSERT(init_flag == this->initialization_flag());
-      ASSERT(index == this->index());
+    Value(VariableMode mode, InitializationFlag init_flag,
+          MaybeAssignedFlag maybe_assigned_flag, int index) {
+      DCHECK(ModeField::is_valid(mode));
+      DCHECK(InitField::is_valid(init_flag));
+      DCHECK(MaybeAssignedField::is_valid(maybe_assigned_flag));
+      DCHECK(IndexField::is_valid(index));
+      value_ = ModeField::encode(mode) | IndexField::encode(index) |
+               InitField::encode(init_flag) |
+               MaybeAssignedField::encode(maybe_assigned_flag);
+      DCHECK(mode == this->mode());
+      DCHECK(init_flag == this->initialization_flag());
+      DCHECK(maybe_assigned_flag == this->maybe_assigned_flag());
+      DCHECK(index == this->index());
     }
 
     explicit inline Value(uint32_t value) : value_(value) {}
@@ -87,13 +83,18 @@
       return InitField::decode(value_);
     }
 
+    MaybeAssignedFlag maybe_assigned_flag() {
+      return MaybeAssignedField::decode(value_);
+    }
+
     int index() { return IndexField::decode(value_); }
 
     // Bit fields in value_ (type, shift, size). Must be public so the
     // constants can be embedded in generated code.
-    class ModeField:  public BitField<VariableMode,       0, 4> {};
-    class InitField:  public BitField<InitializationFlag, 4, 1> {};
-    class IndexField: public BitField<int,                5, 32-5> {};
+    class ModeField : public BitField<VariableMode, 0, 4> {};
+    class InitField : public BitField<InitializationFlag, 4, 1> {};
+    class MaybeAssignedField : public BitField<MaybeAssignedFlag, 5, 1> {};
+    class IndexField : public BitField<int, 6, 32 - 6> {};
 
    private:
     uint32_t value_;
diff --git a/src/scopes.cc b/src/scopes.cc
index 497f794..440c7f2 100644
--- a/src/scopes.cc
+++ b/src/scopes.cc
@@ -24,52 +24,40 @@
 //       use. Because a Variable holding a handle with the same location exists
 //       this is ensured.
 
-static bool Match(void* key1, void* key2) {
-  String* name1 = *reinterpret_cast<String**>(key1);
-  String* name2 = *reinterpret_cast<String**>(key2);
-  ASSERT(name1->IsInternalizedString());
-  ASSERT(name2->IsInternalizedString());
-  return name1 == name2;
-}
-
-
 VariableMap::VariableMap(Zone* zone)
-    : ZoneHashMap(Match, 8, ZoneAllocationPolicy(zone)),
+    : ZoneHashMap(ZoneHashMap::PointersMatch, 8, ZoneAllocationPolicy(zone)),
       zone_(zone) {}
 VariableMap::~VariableMap() {}
 
 
-Variable* VariableMap::Declare(
-    Scope* scope,
-    Handle<String> name,
-    VariableMode mode,
-    bool is_valid_lhs,
-    Variable::Kind kind,
-    InitializationFlag initialization_flag,
-    Interface* interface) {
-  Entry* p = ZoneHashMap::Lookup(name.location(), name->Hash(), true,
-                                 ZoneAllocationPolicy(zone()));
+Variable* VariableMap::Declare(Scope* scope, const AstRawString* name,
+                               VariableMode mode, bool is_valid_lhs,
+                               Variable::Kind kind,
+                               InitializationFlag initialization_flag,
+                               MaybeAssignedFlag maybe_assigned_flag,
+                               Interface* interface) {
+  // AstRawStrings are unambiguous, i.e., the same string is always represented
+  // by the same AstRawString*.
+  // FIXME(marja): fix the type of Lookup.
+  Entry* p = ZoneHashMap::Lookup(const_cast<AstRawString*>(name), name->hash(),
+                                 true, ZoneAllocationPolicy(zone()));
   if (p->value == NULL) {
     // The variable has not been declared yet -> insert it.
-    ASSERT(p->key == name.location());
-    p->value = new(zone()) Variable(scope,
-                                    name,
-                                    mode,
-                                    is_valid_lhs,
-                                    kind,
-                                    initialization_flag,
-                                    interface);
+    DCHECK(p->key == name);
+    p->value = new (zone())
+        Variable(scope, name, mode, is_valid_lhs, kind, initialization_flag,
+                 maybe_assigned_flag, interface);
   }
   return reinterpret_cast<Variable*>(p->value);
 }
 
 
-Variable* VariableMap::Lookup(Handle<String> name) {
-  Entry* p = ZoneHashMap::Lookup(name.location(), name->Hash(), false,
-                                 ZoneAllocationPolicy(NULL));
+Variable* VariableMap::Lookup(const AstRawString* name) {
+  Entry* p = ZoneHashMap::Lookup(const_cast<AstRawString*>(name), name->hash(),
+                                 false, ZoneAllocationPolicy(NULL));
   if (p != NULL) {
-    ASSERT(*reinterpret_cast<String**>(p->key) == *name);
-    ASSERT(p->value != NULL);
+    DCHECK(reinterpret_cast<const AstRawString*>(p->key) == name);
+    DCHECK(p->value != NULL);
     return reinterpret_cast<Variable*>(p->value);
   }
   return NULL;
@@ -79,7 +67,8 @@
 // ----------------------------------------------------------------------------
 // Implementation of Scope
 
-Scope::Scope(Scope* outer_scope, ScopeType scope_type, Zone* zone)
+Scope::Scope(Scope* outer_scope, ScopeType scope_type,
+             AstValueFactory* ast_value_factory, Zone* zone)
     : isolate_(zone->isolate()),
       inner_scopes_(4, zone),
       variables_(zone),
@@ -92,17 +81,19 @@
                  (scope_type == MODULE_SCOPE || scope_type == GLOBAL_SCOPE)
                      ? Interface::NewModule(zone) : NULL),
       already_resolved_(false),
+      ast_value_factory_(ast_value_factory),
       zone_(zone) {
   SetDefaults(scope_type, outer_scope, Handle<ScopeInfo>::null());
   // The outermost scope must be a global scope.
-  ASSERT(scope_type == GLOBAL_SCOPE || outer_scope != NULL);
-  ASSERT(!HasIllegalRedeclaration());
+  DCHECK(scope_type == GLOBAL_SCOPE || outer_scope != NULL);
+  DCHECK(!HasIllegalRedeclaration());
 }
 
 
 Scope::Scope(Scope* inner_scope,
              ScopeType scope_type,
              Handle<ScopeInfo> scope_info,
+             AstValueFactory* value_factory,
              Zone* zone)
     : isolate_(zone->isolate()),
       inner_scopes_(4, zone),
@@ -114,6 +105,7 @@
       decls_(4, zone),
       interface_(NULL),
       already_resolved_(true),
+      ast_value_factory_(value_factory),
       zone_(zone) {
   SetDefaults(scope_type, NULL, scope_info);
   if (!scope_info.is_null()) {
@@ -126,7 +118,8 @@
 }
 
 
-Scope::Scope(Scope* inner_scope, Handle<String> catch_variable_name, Zone* zone)
+Scope::Scope(Scope* inner_scope, const AstRawString* catch_variable_name,
+             AstValueFactory* value_factory, Zone* zone)
     : isolate_(zone->isolate()),
       inner_scopes_(1, zone),
       variables_(zone),
@@ -137,6 +130,7 @@
       decls_(0, zone),
       interface_(NULL),
       already_resolved_(true),
+      ast_value_factory_(value_factory),
       zone_(zone) {
   SetDefaults(CATCH_SCOPE, NULL, Handle<ScopeInfo>::null());
   AddInnerScope(inner_scope);
@@ -157,7 +151,7 @@
                         Handle<ScopeInfo> scope_info) {
   outer_scope_ = outer_scope;
   scope_type_ = scope_type;
-  scope_name_ = isolate_->factory()->empty_string();
+  scope_name_ = ast_value_factory_->empty_string();
   dynamics_ = NULL;
   receiver_ = NULL;
   function_ = NULL;
@@ -166,6 +160,8 @@
   scope_inside_with_ = false;
   scope_contains_with_ = false;
   scope_calls_eval_ = false;
+  asm_module_ = false;
+  asm_function_ = outer_scope != NULL && outer_scope->asm_module_;
   // Inherit the strict mode from the parent scope.
   strict_mode_ = outer_scope != NULL ? outer_scope->strict_mode_ : SLOPPY;
   outer_scope_calls_sloppy_eval_ = false;
@@ -199,6 +195,7 @@
       Scope* with_scope = new(zone) Scope(current_scope,
                                           WITH_SCOPE,
                                           Handle<ScopeInfo>::null(),
+                                          global_scope->ast_value_factory_,
                                           zone);
       current_scope = with_scope;
       // All the inner scopes are inside a with.
@@ -211,30 +208,38 @@
       current_scope = new(zone) Scope(current_scope,
                                       GLOBAL_SCOPE,
                                       Handle<ScopeInfo>(scope_info),
+                                      global_scope->ast_value_factory_,
                                       zone);
     } else if (context->IsModuleContext()) {
       ScopeInfo* scope_info = ScopeInfo::cast(context->module()->scope_info());
       current_scope = new(zone) Scope(current_scope,
                                       MODULE_SCOPE,
                                       Handle<ScopeInfo>(scope_info),
+                                      global_scope->ast_value_factory_,
                                       zone);
     } else if (context->IsFunctionContext()) {
       ScopeInfo* scope_info = context->closure()->shared()->scope_info();
       current_scope = new(zone) Scope(current_scope,
                                       FUNCTION_SCOPE,
                                       Handle<ScopeInfo>(scope_info),
+                                      global_scope->ast_value_factory_,
                                       zone);
+      if (scope_info->IsAsmFunction()) current_scope->asm_function_ = true;
+      if (scope_info->IsAsmModule()) current_scope->asm_module_ = true;
     } else if (context->IsBlockContext()) {
       ScopeInfo* scope_info = ScopeInfo::cast(context->extension());
       current_scope = new(zone) Scope(current_scope,
                                       BLOCK_SCOPE,
                                       Handle<ScopeInfo>(scope_info),
+                                      global_scope->ast_value_factory_,
                                       zone);
     } else {
-      ASSERT(context->IsCatchContext());
+      DCHECK(context->IsCatchContext());
       String* name = String::cast(context->extension());
-      current_scope = new(zone) Scope(
-          current_scope, Handle<String>(name), zone);
+      current_scope = new (zone) Scope(
+          current_scope,
+          global_scope->ast_value_factory_->GetString(Handle<String>(name)),
+          global_scope->ast_value_factory_, zone);
     }
     if (contains_with) current_scope->RecordWithStatement();
     if (innermost_scope == NULL) innermost_scope = current_scope;
@@ -253,7 +258,7 @@
 
 
 bool Scope::Analyze(CompilationInfo* info) {
-  ASSERT(info->function() != NULL);
+  DCHECK(info->function() != NULL);
   Scope* scope = info->function()->scope();
   Scope* top = scope;
 
@@ -266,7 +271,8 @@
 
   // Allocate the variables.
   {
-    AstNodeFactory<AstNullVisitor> ast_node_factory(info->zone());
+    AstNodeFactory<AstNullVisitor> ast_node_factory(
+        info->zone(), info->ast_value_factory(), info->ast_node_id_gen());
     if (!top->AllocateVariables(info, &ast_node_factory)) return false;
   }
 
@@ -289,7 +295,7 @@
 
 
 void Scope::Initialize() {
-  ASSERT(!already_resolved());
+  DCHECK(!already_resolved());
 
   // Add this scope as a new inner scope of the outer scope.
   if (outer_scope_ != NULL) {
@@ -310,7 +316,7 @@
   if (is_declaration_scope()) {
     Variable* var =
         variables_.Declare(this,
-                           isolate_->factory()->this_string(),
+                           ast_value_factory_->this_string(),
                            VAR,
                            false,
                            Variable::THIS,
@@ -318,7 +324,7 @@
     var->AllocateTo(Variable::PARAMETER, -1);
     receiver_ = var;
   } else {
-    ASSERT(outer_scope() != NULL);
+    DCHECK(outer_scope() != NULL);
     receiver_ = outer_scope()->receiver();
   }
 
@@ -327,7 +333,7 @@
     // Note that it might never be accessed, in which case it won't be
     // allocated during variable allocation.
     variables_.Declare(this,
-                       isolate_->factory()->arguments_string(),
+                       ast_value_factory_->arguments_string(),
                        VAR,
                        true,
                        Variable::ARGUMENTS,
@@ -337,10 +343,10 @@
 
 
 Scope* Scope::FinalizeBlockScope() {
-  ASSERT(is_block_scope());
-  ASSERT(internals_.is_empty());
-  ASSERT(temps_.is_empty());
-  ASSERT(params_.is_empty());
+  DCHECK(is_block_scope());
+  DCHECK(internals_.is_empty());
+  DCHECK(temps_.is_empty());
+  DCHECK(params_.is_empty());
 
   if (num_var_or_const() > 0) return this;
 
@@ -366,45 +372,55 @@
 }
 
 
-Variable* Scope::LookupLocal(Handle<String> name) {
+Variable* Scope::LookupLocal(const AstRawString* name) {
   Variable* result = variables_.Lookup(name);
   if (result != NULL || scope_info_.is_null()) {
     return result;
   }
+  // The Scope is backed up by ScopeInfo. This means it cannot operate in a
+  // heap-independent mode, and all strings must be internalized immediately. So
+  // it's ok to get the Handle<String> here.
+  Handle<String> name_handle = name->string();
   // If we have a serialized scope info, we might find the variable there.
   // There should be no local slot with the given name.
-  ASSERT(scope_info_->StackSlotIndex(*name) < 0);
+  DCHECK(scope_info_->StackSlotIndex(*name_handle) < 0);
 
   // Check context slot lookup.
   VariableMode mode;
   Variable::Location location = Variable::CONTEXT;
   InitializationFlag init_flag;
-  int index = ScopeInfo::ContextSlotIndex(scope_info_, name, &mode, &init_flag);
+  MaybeAssignedFlag maybe_assigned_flag;
+  int index = ScopeInfo::ContextSlotIndex(scope_info_, name_handle, &mode,
+                                          &init_flag, &maybe_assigned_flag);
   if (index < 0) {
     // Check parameters.
-    index = scope_info_->ParameterIndex(*name);
+    index = scope_info_->ParameterIndex(*name_handle);
     if (index < 0) return NULL;
 
     mode = DYNAMIC;
     location = Variable::LOOKUP;
     init_flag = kCreatedInitialized;
+    // Be conservative and flag parameters as maybe assigned. Better information
+    // would require ScopeInfo to serialize the maybe_assigned bit also for
+    // parameters.
+    maybe_assigned_flag = kMaybeAssigned;
   }
 
   Variable* var = variables_.Declare(this, name, mode, true, Variable::NORMAL,
-                                     init_flag);
+                                     init_flag, maybe_assigned_flag);
   var->AllocateTo(location, index);
   return var;
 }
 
 
-Variable* Scope::LookupFunctionVar(Handle<String> name,
+Variable* Scope::LookupFunctionVar(const AstRawString* name,
                                    AstNodeFactory<AstNullVisitor>* factory) {
-  if (function_ != NULL && function_->proxy()->name().is_identical_to(name)) {
+  if (function_ != NULL && function_->proxy()->raw_name() == name) {
     return function_->proxy()->var();
   } else if (!scope_info_.is_null()) {
     // If we are backed by a scope info, try to lookup the variable there.
     VariableMode mode;
-    int index = scope_info_->FunctionContextSlotIndex(*name, &mode);
+    int index = scope_info_->FunctionContextSlotIndex(*(name->string()), &mode);
     if (index < 0) return NULL;
     Variable* var = new(zone()) Variable(
         this, name, mode, true /* is valid LHS */,
@@ -421,7 +437,7 @@
 }
 
 
-Variable* Scope::Lookup(Handle<String> name) {
+Variable* Scope::Lookup(const AstRawString* name) {
   for (Scope* scope = this;
        scope != NULL;
        scope = scope->outer_scope()) {
@@ -432,32 +448,33 @@
 }
 
 
-void Scope::DeclareParameter(Handle<String> name, VariableMode mode) {
-  ASSERT(!already_resolved());
-  ASSERT(is_function_scope());
+Variable* Scope::DeclareParameter(const AstRawString* name, VariableMode mode) {
+  DCHECK(!already_resolved());
+  DCHECK(is_function_scope());
   Variable* var = variables_.Declare(this, name, mode, true, Variable::NORMAL,
                                      kCreatedInitialized);
   params_.Add(var, zone());
+  return var;
 }
 
 
-Variable* Scope::DeclareLocal(Handle<String> name,
-                              VariableMode mode,
+Variable* Scope::DeclareLocal(const AstRawString* name, VariableMode mode,
                               InitializationFlag init_flag,
+                              MaybeAssignedFlag maybe_assigned_flag,
                               Interface* interface) {
-  ASSERT(!already_resolved());
+  DCHECK(!already_resolved());
   // This function handles VAR, LET, and CONST modes.  DYNAMIC variables are
   // introduces during variable allocation, INTERNAL variables are allocated
   // explicitly, and TEMPORARY variables are allocated via NewTemporary().
-  ASSERT(IsDeclaredVariableMode(mode));
+  DCHECK(IsDeclaredVariableMode(mode));
   ++num_var_or_const_;
-  return variables_.Declare(
-      this, name, mode, true, Variable::NORMAL, init_flag, interface);
+  return variables_.Declare(this, name, mode, true, Variable::NORMAL, init_flag,
+                            maybe_assigned_flag, interface);
 }
 
 
-Variable* Scope::DeclareDynamicGlobal(Handle<String> name) {
-  ASSERT(is_global_scope());
+Variable* Scope::DeclareDynamicGlobal(const AstRawString* name) {
+  DCHECK(is_global_scope());
   return variables_.Declare(this,
                             name,
                             DYNAMIC_GLOBAL,
@@ -479,8 +496,8 @@
 }
 
 
-Variable* Scope::NewInternal(Handle<String> name) {
-  ASSERT(!already_resolved());
+Variable* Scope::NewInternal(const AstRawString* name) {
+  DCHECK(!already_resolved());
   Variable* var = new(zone()) Variable(this,
                                        name,
                                        INTERNAL,
@@ -492,8 +509,8 @@
 }
 
 
-Variable* Scope::NewTemporary(Handle<String> name) {
-  ASSERT(!already_resolved());
+Variable* Scope::NewTemporary(const AstRawString* name) {
+  DCHECK(!already_resolved());
   Variable* var = new(zone()) Variable(this,
                                        name,
                                        TEMPORARY,
@@ -515,12 +532,12 @@
   if (!HasIllegalRedeclaration()) {
     illegal_redecl_ = expression;
   }
-  ASSERT(HasIllegalRedeclaration());
+  DCHECK(HasIllegalRedeclaration());
 }
 
 
 void Scope::VisitIllegalRedeclaration(AstVisitor* visitor) {
-  ASSERT(HasIllegalRedeclaration());
+  DCHECK(HasIllegalRedeclaration());
   illegal_redecl_->Accept(visitor);
 }
 
@@ -530,7 +547,7 @@
   for (int i = 0; i < length; i++) {
     Declaration* decl = decls_[i];
     if (decl->mode() != VAR) continue;
-    Handle<String> name = decl->proxy()->name();
+    const AstRawString* name = decl->proxy()->raw_name();
 
     // Iterate through all scopes until and including the declaration scope.
     Scope* previous = NULL;
@@ -566,14 +583,14 @@
 
 void Scope::CollectStackAndContextLocals(ZoneList<Variable*>* stack_locals,
                                          ZoneList<Variable*>* context_locals) {
-  ASSERT(stack_locals != NULL);
-  ASSERT(context_locals != NULL);
+  DCHECK(stack_locals != NULL);
+  DCHECK(context_locals != NULL);
 
   // Collect internals which are always allocated on the heap.
   for (int i = 0; i < internals_.length(); i++) {
     Variable* var = internals_[i];
     if (var->is_used()) {
-      ASSERT(var->IsContextSlot());
+      DCHECK(var->IsContextSlot());
       context_locals->Add(var, zone());
     }
   }
@@ -584,10 +601,10 @@
     Variable* var = temps_[i];
     if (var->is_used()) {
       if (var->IsContextSlot()) {
-        ASSERT(has_forced_context_allocation());
+        DCHECK(has_forced_context_allocation());
         context_locals->Add(var, zone());
       } else {
-        ASSERT(var->IsStackLocal());
+        DCHECK(var->IsStackLocal());
         stack_locals->Add(var, zone());
       }
     }
@@ -629,7 +646,7 @@
 
   // 2) Allocate module instances.
   if (FLAG_harmony_modules && (is_global_scope() || is_module_scope())) {
-    ASSERT(num_modules_ == 0);
+    DCHECK(num_modules_ == 0);
     AllocateModulesRecursively(this);
   }
 
@@ -698,11 +715,11 @@
 int Scope::ContextChainLength(Scope* scope) {
   int n = 0;
   for (Scope* s = this; s != scope; s = s->outer_scope_) {
-    ASSERT(s != NULL);  // scope must be in the scope chain
+    DCHECK(s != NULL);  // scope must be in the scope chain
     if (s->is_with_scope() || s->num_heap_slots() > 0) n++;
     // Catch and module scopes always have heap slots.
-    ASSERT(!s->is_catch_scope() || s->num_heap_slots() > 0);
-    ASSERT(!s->is_module_scope() || s->num_heap_slots() > 0);
+    DCHECK(!s->is_catch_scope() || s->num_heap_slots() > 0);
+    DCHECK(!s->is_module_scope() || s->num_heap_slots() > 0);
   }
   return n;
 }
@@ -743,7 +760,7 @@
     Scope* scope = inner_scopes_[i];
     int beg_pos = scope->start_position();
     int end_pos = scope->end_position();
-    ASSERT(beg_pos >= 0 && end_pos >= 0);
+    DCHECK(beg_pos >= 0 && end_pos >= 0);
     if (beg_pos <= position && position < end_pos) {
       scope->GetNestedScopeChain(chain, position);
       return;
@@ -773,9 +790,8 @@
 }
 
 
-static void PrintName(Handle<String> name) {
-  SmartArrayPointer<char> s = name->ToCString(DISALLOW_NULLS);
-  PrintF("%s", s.get());
+static void PrintName(const AstRawString* name) {
+  PrintF("%.*s", name->length(), name->raw_data());
 }
 
 
@@ -803,12 +819,18 @@
   if (var->is_used() || !var->IsUnallocated()) {
     Indent(indent, Variable::Mode2String(var->mode()));
     PrintF(" ");
-    PrintName(var->name());
+    PrintName(var->raw_name());
     PrintF(";  // ");
     PrintLocation(var);
+    bool comma = !var->IsUnallocated();
     if (var->has_forced_context_allocation()) {
-      if (!var->IsUnallocated()) PrintF(", ");
+      if (comma) PrintF(", ");
       PrintF("forced context allocation");
+      comma = true;
+    }
+    if (var->maybe_assigned() == kMaybeAssigned) {
+      if (comma) PrintF(", ");
+      PrintF("maybe assigned");
     }
     PrintF("\n");
   }
@@ -829,7 +851,7 @@
 
   // Print header.
   Indent(n0, Header(scope_type_));
-  if (scope_name_->length() > 0) {
+  if (!scope_name_->IsEmpty()) {
     PrintF(" ");
     PrintName(scope_name_);
   }
@@ -839,7 +861,7 @@
     PrintF(" (");
     for (int i = 0; i < params_.length(); i++) {
       if (i > 0) PrintF(", ");
-      PrintName(params_[i]->name());
+      PrintName(params_[i]->raw_name());
     }
     PrintF(")");
   }
@@ -849,7 +871,7 @@
   // Function name, if any (named function literals, only).
   if (function_ != NULL) {
     Indent(n1, "// (local) function name: ");
-    PrintName(function_->proxy()->name());
+    PrintName(function_->proxy()->raw_name());
     PrintF("\n");
   }
 
@@ -917,8 +939,8 @@
 #endif  // DEBUG
 
 
-Variable* Scope::NonLocal(Handle<String> name, VariableMode mode) {
-  if (dynamics_ == NULL) dynamics_ = new(zone()) DynamicScopePart(zone());
+Variable* Scope::NonLocal(const AstRawString* name, VariableMode mode) {
+  if (dynamics_ == NULL) dynamics_ = new (zone()) DynamicScopePart(zone());
   VariableMap* map = dynamics_->GetMap(mode);
   Variable* var = map->Lookup(name);
   if (var == NULL) {
@@ -938,10 +960,10 @@
 }
 
 
-Variable* Scope::LookupRecursive(Handle<String> name,
+Variable* Scope::LookupRecursive(VariableProxy* proxy,
                                  BindingKind* binding_kind,
                                  AstNodeFactory<AstNullVisitor>* factory) {
-  ASSERT(binding_kind != NULL);
+  DCHECK(binding_kind != NULL);
   if (already_resolved() && is_with_scope()) {
     // Short-cut: if the scope is deserialized from a scope info, variable
     // allocation is already fixed.  We can simply return with dynamic lookup.
@@ -950,7 +972,7 @@
   }
 
   // Try to find the variable in this scope.
-  Variable* var = LookupLocal(name);
+  Variable* var = LookupLocal(proxy->raw_name());
 
   // We found a variable and we are done. (Even if there is an 'eval' in
   // this scope which introduces the same variable again, the resulting
@@ -964,26 +986,27 @@
   // if any. We can do this for all scopes, since the function variable is
   // only present - if at all - for function scopes.
   *binding_kind = UNBOUND;
-  var = LookupFunctionVar(name, factory);
+  var = LookupFunctionVar(proxy->raw_name(), factory);
   if (var != NULL) {
     *binding_kind = BOUND;
   } else if (outer_scope_ != NULL) {
-    var = outer_scope_->LookupRecursive(name, binding_kind, factory);
+    var = outer_scope_->LookupRecursive(proxy, binding_kind, factory);
     if (*binding_kind == BOUND && (is_function_scope() || is_with_scope())) {
       var->ForceContextAllocation();
     }
   } else {
-    ASSERT(is_global_scope());
+    DCHECK(is_global_scope());
   }
 
   if (is_with_scope()) {
-    ASSERT(!already_resolved());
+    DCHECK(!already_resolved());
     // The current scope is a with scope, so the variable binding can not be
     // statically resolved. However, note that it was necessary to do a lookup
     // in the outer scope anyway, because if a binding exists in an outer scope,
     // the associated variable has to be marked as potentially being accessed
     // from inside of an inner with scope (the property may not be in the 'with'
     // object).
+    if (var != NULL && proxy->is_assigned()) var->set_maybe_assigned();
     *binding_kind = DYNAMIC_LOOKUP;
     return NULL;
   } else if (calls_sloppy_eval()) {
@@ -1004,7 +1027,7 @@
 bool Scope::ResolveVariable(CompilationInfo* info,
                             VariableProxy* proxy,
                             AstNodeFactory<AstNullVisitor>* factory) {
-  ASSERT(info->global_scope()->is_global_scope());
+  DCHECK(info->global_scope()->is_global_scope());
 
   // If the proxy is already resolved there's nothing to do
   // (functions and consts may be resolved by the parser).
@@ -1012,7 +1035,7 @@
 
   // Otherwise, try to resolve the variable.
   BindingKind binding_kind;
-  Variable* var = LookupRecursive(proxy->name(), &binding_kind, factory);
+  Variable* var = LookupRecursive(proxy, &binding_kind, factory);
   switch (binding_kind) {
     case BOUND:
       // We found a variable binding.
@@ -1024,53 +1047,57 @@
       // scope which was not promoted to a context, this can happen if we use
       // debugger to evaluate arbitrary expressions at a break point).
       if (var->IsGlobalObjectProperty()) {
-        var = NonLocal(proxy->name(), DYNAMIC_GLOBAL);
+        var = NonLocal(proxy->raw_name(), DYNAMIC_GLOBAL);
       } else if (var->is_dynamic()) {
-        var = NonLocal(proxy->name(), DYNAMIC);
+        var = NonLocal(proxy->raw_name(), DYNAMIC);
       } else {
         Variable* invalidated = var;
-        var = NonLocal(proxy->name(), DYNAMIC_LOCAL);
+        var = NonLocal(proxy->raw_name(), DYNAMIC_LOCAL);
         var->set_local_if_not_shadowed(invalidated);
       }
       break;
 
     case UNBOUND:
       // No binding has been found. Declare a variable on the global object.
-      var = info->global_scope()->DeclareDynamicGlobal(proxy->name());
+      var = info->global_scope()->DeclareDynamicGlobal(proxy->raw_name());
       break;
 
     case UNBOUND_EVAL_SHADOWED:
       // No binding has been found. But some scope makes a sloppy 'eval' call.
-      var = NonLocal(proxy->name(), DYNAMIC_GLOBAL);
+      var = NonLocal(proxy->raw_name(), DYNAMIC_GLOBAL);
       break;
 
     case DYNAMIC_LOOKUP:
       // The variable could not be resolved statically.
-      var = NonLocal(proxy->name(), DYNAMIC);
+      var = NonLocal(proxy->raw_name(), DYNAMIC);
       break;
   }
 
-  ASSERT(var != NULL);
+  DCHECK(var != NULL);
+  if (proxy->is_assigned()) var->set_maybe_assigned();
 
   if (FLAG_harmony_scoping && strict_mode() == STRICT &&
-      var->is_const_mode() && proxy->IsLValue()) {
+      var->is_const_mode() && proxy->is_assigned()) {
     // Assignment to const. Throw a syntax error.
     MessageLocation location(
         info->script(), proxy->position(), proxy->position());
     Isolate* isolate = info->isolate();
     Factory* factory = isolate->factory();
     Handle<JSArray> array = factory->NewJSArray(0);
-    Handle<Object> result =
+    Handle<Object> error;
+    MaybeHandle<Object> maybe_error =
         factory->NewSyntaxError("harmony_const_assign", array);
-    isolate->Throw(*result, &location);
+    if (maybe_error.ToHandle(&error)) isolate->Throw(*error, &location);
     return false;
   }
 
   if (FLAG_harmony_modules) {
     bool ok;
 #ifdef DEBUG
-    if (FLAG_print_interface_details)
-      PrintF("# Resolve %s:\n", var->name()->ToAsciiArray());
+    if (FLAG_print_interface_details) {
+      PrintF("# Resolve %.*s:\n", var->raw_name()->length(),
+             var->raw_name()->raw_data());
+    }
 #endif
     proxy->interface()->Unify(var->interface(), zone(), &ok);
     if (!ok) {
@@ -1092,9 +1119,10 @@
       Factory* factory = isolate->factory();
       Handle<JSArray> array = factory->NewJSArray(1);
       JSObject::SetElement(array, 0, var->name(), NONE, STRICT).Assert();
-      Handle<Object> result =
+      Handle<Object> error;
+      MaybeHandle<Object> maybe_error =
           factory->NewSyntaxError("module_type_error", array);
-      isolate->Throw(*result, &location);
+      if (maybe_error.ToHandle(&error)) isolate->Throw(*error, &location);
       return false;
     }
   }
@@ -1108,7 +1136,7 @@
 bool Scope::ResolveVariablesRecursively(
     CompilationInfo* info,
     AstNodeFactory<AstNullVisitor>* factory) {
-  ASSERT(info->global_scope()->is_global_scope());
+  DCHECK(info->global_scope()->is_global_scope());
 
   // Resolve unresolved variables for this scope.
   for (int i = 0; i < unresolved_.length(); i++) {
@@ -1125,7 +1153,7 @@
 }
 
 
-bool Scope::PropagateScopeInfo(bool outer_scope_calls_sloppy_eval ) {
+void Scope::PropagateScopeInfo(bool outer_scope_calls_sloppy_eval ) {
   if (outer_scope_calls_sloppy_eval) {
     outer_scope_calls_sloppy_eval_ = true;
   }
@@ -1133,16 +1161,18 @@
   bool calls_sloppy_eval =
       this->calls_sloppy_eval() || outer_scope_calls_sloppy_eval_;
   for (int i = 0; i < inner_scopes_.length(); i++) {
-    Scope* inner_scope = inner_scopes_[i];
-    if (inner_scope->PropagateScopeInfo(calls_sloppy_eval)) {
+    Scope* inner = inner_scopes_[i];
+    inner->PropagateScopeInfo(calls_sloppy_eval);
+    if (inner->scope_calls_eval_ || inner->inner_scope_calls_eval_) {
       inner_scope_calls_eval_ = true;
     }
-    if (inner_scope->force_eager_compilation_) {
+    if (inner->force_eager_compilation_) {
       force_eager_compilation_ = true;
     }
+    if (asm_module_ && inner->scope_type() == FUNCTION_SCOPE) {
+      inner->asm_function_ = true;
+    }
   }
-
-  return scope_calls_eval_ || inner_scope_calls_eval_;
 }
 
 
@@ -1150,7 +1180,7 @@
   // Give var a read/write use if there is a chance it might be accessed
   // via an eval() call.  This is only possible if the variable has a
   // visible name.
-  if ((var->is_this() || var->name()->length() > 0) &&
+  if ((var->is_this() || !var->raw_name()->IsEmpty()) &&
       (var->has_forced_context_allocation() ||
        scope_calls_eval_ ||
        inner_scope_calls_eval_ ||
@@ -1159,7 +1189,8 @@
        is_block_scope() ||
        is_module_scope() ||
        is_global_scope())) {
-    var->set_is_used(true);
+    var->set_is_used();
+    if (scope_calls_eval_ || inner_scope_calls_eval_) var->set_maybe_assigned();
   }
   // Global variables do not need to be allocated.
   return !var->IsGlobalObjectProperty() && var->is_used();
@@ -1210,9 +1241,9 @@
 
 
 void Scope::AllocateParameterLocals() {
-  ASSERT(is_function_scope());
-  Variable* arguments = LookupLocal(isolate_->factory()->arguments_string());
-  ASSERT(arguments != NULL);  // functions have 'arguments' declared implicitly
+  DCHECK(is_function_scope());
+  Variable* arguments = LookupLocal(ast_value_factory_->arguments_string());
+  DCHECK(arguments != NULL);  // functions have 'arguments' declared implicitly
 
   bool uses_sloppy_arguments = false;
 
@@ -1242,7 +1273,7 @@
   // order is relevant!
   for (int i = params_.length() - 1; i >= 0; --i) {
     Variable* var = params_[i];
-    ASSERT(var->scope() == this);
+    DCHECK(var->scope() == this);
     if (uses_sloppy_arguments || has_forced_context_allocation()) {
       // Force context allocation of the parameter.
       var->ForceContextAllocation();
@@ -1250,12 +1281,12 @@
 
     if (MustAllocate(var)) {
       if (MustAllocateInContext(var)) {
-        ASSERT(var->IsUnallocated() || var->IsContextSlot());
+        DCHECK(var->IsUnallocated() || var->IsContextSlot());
         if (var->IsUnallocated()) {
           AllocateHeapSlot(var);
         }
       } else {
-        ASSERT(var->IsUnallocated() || var->IsParameter());
+        DCHECK(var->IsUnallocated() || var->IsParameter());
         if (var->IsUnallocated()) {
           var->AllocateTo(Variable::PARAMETER, i);
         }
@@ -1266,8 +1297,8 @@
 
 
 void Scope::AllocateNonParameterLocal(Variable* var) {
-  ASSERT(var->scope() == this);
-  ASSERT(!var->IsVariable(isolate_->factory()->dot_result_string()) ||
+  DCHECK(var->scope() == this);
+  DCHECK(!var->IsVariable(isolate_->factory()->dot_result_string()) ||
          !var->IsStackLocal());
   if (var->IsUnallocated() && MustAllocate(var)) {
     if (MustAllocateInContext(var)) {
@@ -1344,18 +1375,17 @@
   }
 
   // Allocation done.
-  ASSERT(num_heap_slots_ == 0 || num_heap_slots_ >= Context::MIN_CONTEXT_SLOTS);
+  DCHECK(num_heap_slots_ == 0 || num_heap_slots_ >= Context::MIN_CONTEXT_SLOTS);
 }
 
 
 void Scope::AllocateModulesRecursively(Scope* host_scope) {
   if (already_resolved()) return;
   if (is_module_scope()) {
-    ASSERT(interface_->IsFrozen());
-    Handle<String> name = isolate_->factory()->InternalizeOneByteString(
-        STATIC_ASCII_VECTOR(".module"));
-    ASSERT(module_var_ == NULL);
-    module_var_ = host_scope->NewInternal(name);
+    DCHECK(interface_->IsFrozen());
+    DCHECK(module_var_ == NULL);
+    module_var_ =
+        host_scope->NewInternal(ast_value_factory_->dot_module_string());
     ++host_scope->num_modules_;
   }
 
diff --git a/src/scopes.h b/src/scopes.h
index 4486921..06c6c99 100644
--- a/src/scopes.h
+++ b/src/scopes.h
@@ -21,15 +21,13 @@
 
   virtual ~VariableMap();
 
-  Variable* Declare(Scope* scope,
-                    Handle<String> name,
-                    VariableMode mode,
-                    bool is_valid_lhs,
-                    Variable::Kind kind,
+  Variable* Declare(Scope* scope, const AstRawString* name, VariableMode mode,
+                    bool is_valid_lhs, Variable::Kind kind,
                     InitializationFlag initialization_flag,
+                    MaybeAssignedFlag maybe_assigned_flag = kNotAssigned,
                     Interface* interface = Interface::NewValue());
 
-  Variable* Lookup(Handle<String> name);
+  Variable* Lookup(const AstRawString* name);
 
   Zone* zone() const { return zone_; }
 
@@ -51,7 +49,7 @@
 
   VariableMap* GetMap(VariableMode mode) {
     int index = mode - DYNAMIC;
-    ASSERT(index >= 0 && index < 3);
+    DCHECK(index >= 0 && index < 3);
     return maps_[index];
   }
 
@@ -74,7 +72,8 @@
   // ---------------------------------------------------------------------------
   // Construction
 
-  Scope(Scope* outer_scope, ScopeType scope_type, Zone* zone);
+  Scope(Scope* outer_scope, ScopeType scope_type,
+        AstValueFactory* value_factory, Zone* zone);
 
   // Compute top scope and allocate variables. For lazy compilation the top
   // scope only contains the single lazily compiled function, so this
@@ -85,7 +84,9 @@
                                       Zone* zone);
 
   // The scope name is only used for printing/debugging.
-  void SetScopeName(Handle<String> scope_name) { scope_name_ = scope_name; }
+  void SetScopeName(const AstRawString* scope_name) {
+    scope_name_ = scope_name;
+  }
 
   void Initialize();
 
@@ -100,55 +101,55 @@
   // Declarations
 
   // Lookup a variable in this scope. Returns the variable or NULL if not found.
-  Variable* LookupLocal(Handle<String> name);
+  Variable* LookupLocal(const AstRawString* name);
 
   // This lookup corresponds to a lookup in the "intermediate" scope sitting
   // between this scope and the outer scope. (ECMA-262, 3rd., requires that
   // the name of named function literal is kept in an intermediate scope
   // in between this scope and the next outer scope.)
-  Variable* LookupFunctionVar(Handle<String> name,
+  Variable* LookupFunctionVar(const AstRawString* name,
                               AstNodeFactory<AstNullVisitor>* factory);
 
   // Lookup a variable in this scope or outer scopes.
   // Returns the variable or NULL if not found.
-  Variable* Lookup(Handle<String> name);
+  Variable* Lookup(const AstRawString* name);
 
   // Declare the function variable for a function literal. This variable
   // is in an intermediate scope between this function scope and the the
   // outer scope. Only possible for function scopes; at most one variable.
   void DeclareFunctionVar(VariableDeclaration* declaration) {
-    ASSERT(is_function_scope());
+    DCHECK(is_function_scope());
     function_ = declaration;
   }
 
   // Declare a parameter in this scope.  When there are duplicated
   // parameters the rightmost one 'wins'.  However, the implementation
   // expects all parameters to be declared and from left to right.
-  void DeclareParameter(Handle<String> name, VariableMode mode);
+  Variable* DeclareParameter(const AstRawString* name, VariableMode mode);
 
   // Declare a local variable in this scope. If the variable has been
   // declared before, the previously declared variable is returned.
-  Variable* DeclareLocal(Handle<String> name,
-                         VariableMode mode,
+  Variable* DeclareLocal(const AstRawString* name, VariableMode mode,
                          InitializationFlag init_flag,
+                         MaybeAssignedFlag maybe_assigned_flag = kNotAssigned,
                          Interface* interface = Interface::NewValue());
 
   // Declare an implicit global variable in this scope which must be a
   // global scope.  The variable was introduced (possibly from an inner
   // scope) by a reference to an unresolved variable with no intervening
   // with statements or eval calls.
-  Variable* DeclareDynamicGlobal(Handle<String> name);
+  Variable* DeclareDynamicGlobal(const AstRawString* name);
 
   // Create a new unresolved variable.
   template<class Visitor>
   VariableProxy* NewUnresolved(AstNodeFactory<Visitor>* factory,
-                               Handle<String> name,
+                               const AstRawString* name,
                                Interface* interface = Interface::NewValue(),
                                int position = RelocInfo::kNoPosition) {
     // Note that we must not share the unresolved variables with
     // the same name because they may be removed selectively via
     // RemoveUnresolved().
-    ASSERT(!already_resolved());
+    DCHECK(!already_resolved());
     VariableProxy* proxy =
         factory->NewVariableProxy(name, false, interface, position);
     unresolved_.Add(proxy, zone_);
@@ -167,13 +168,13 @@
   // for printing and cannot be used to find the variable.  In particular,
   // the only way to get hold of the temporary is by keeping the Variable*
   // around.
-  Variable* NewInternal(Handle<String> name);
+  Variable* NewInternal(const AstRawString* name);
 
   // Creates a new temporary variable in this scope.  The name is only used
   // for printing and cannot be used to find the variable.  In particular,
   // the only way to get hold of the temporary is by keeping the Variable*
   // around.  The name should not clash with a legitimate variable names.
-  Variable* NewTemporary(Handle<String> name);
+  Variable* NewTemporary(const AstRawString* name);
 
   // Adds the specific declaration node to the list of declarations in
   // this scope. The declarations are processed as part of entering
@@ -213,6 +214,9 @@
   // Set the strict mode flag (unless disabled by a global flag).
   void SetStrictMode(StrictMode strict_mode) { strict_mode_ = strict_mode; }
 
+  // Set the ASM module flag.
+  void SetAsmModule() { asm_module_ = true; }
+
   // Position in the source where this scope begins and ends.
   //
   // * For the scope of a with statement
@@ -246,7 +250,7 @@
 
   // In some cases we want to force context allocation for a whole scope.
   void ForceContextAllocation() {
-    ASSERT(!already_resolved());
+    DCHECK(!already_resolved());
     force_context_allocation_ = true;
   }
   bool has_forced_context_allocation() const {
@@ -280,6 +284,8 @@
   bool outer_scope_calls_sloppy_eval() const {
     return outer_scope_calls_sloppy_eval_;
   }
+  bool asm_module() const { return asm_module_; }
+  bool asm_function() const { return asm_function_; }
 
   // Is this scope inside a with statement.
   bool inside_with() const { return scope_inside_with_; }
@@ -301,14 +307,14 @@
   // The variable holding the function literal for named function
   // literals, or NULL.  Only valid for function scopes.
   VariableDeclaration* function() const {
-    ASSERT(is_function_scope());
+    DCHECK(is_function_scope());
     return function_;
   }
 
   // Parameters. The left-most parameter has index 0.
   // Only valid for function scopes.
   Variable* parameter(int index) const {
-    ASSERT(is_function_scope());
+    DCHECK(is_function_scope());
     return params_[index];
   }
 
@@ -390,7 +396,7 @@
 
   // ---------------------------------------------------------------------------
   // Strict mode support.
-  bool IsDeclared(Handle<String> name) {
+  bool IsDeclared(const AstRawString* name) {
     // During formal parameter list parsing the scope only contains
     // two variables inserted at initialization: "this" and "arguments".
     // "this" is an invalid parameter name and "arguments" is invalid parameter
@@ -421,7 +427,7 @@
   ScopeType scope_type_;
 
   // Debugging support.
-  Handle<String> scope_name_;
+  const AstRawString* scope_name_;
 
   // The variables declared in this scope:
   //
@@ -462,6 +468,10 @@
   // This scope or a nested catch scope or with scope contain an 'eval' call. At
   // the 'eval' call site this scope is the declaration scope.
   bool scope_calls_eval_;
+  // This scope contains an "use asm" annotation.
+  bool asm_module_;
+  // This scope's outer context is an asm module.
+  bool asm_function_;
   // The strict mode of this scope.
   StrictMode strict_mode_;
   // Source positions.
@@ -497,7 +507,7 @@
 
   // Create a non-local variable with a given name.
   // These variables are looked up dynamically at runtime.
-  Variable* NonLocal(Handle<String> name, VariableMode mode);
+  Variable* NonLocal(const AstRawString* name, VariableMode mode);
 
   // Variable resolution.
   // Possible results of a recursive variable lookup telling if and how a
@@ -548,7 +558,7 @@
   // Lookup a variable reference given by name recursively starting with this
   // scope. If the code is executed because of a call to 'eval', the context
   // parameter should be set to the calling context of 'eval'.
-  Variable* LookupRecursive(Handle<String> name,
+  Variable* LookupRecursive(VariableProxy* proxy,
                             BindingKind* binding_kind,
                             AstNodeFactory<AstNullVisitor>* factory);
   MUST_USE_RESULT
@@ -560,7 +570,7 @@
                                    AstNodeFactory<AstNullVisitor>* factory);
 
   // Scope analysis.
-  bool PropagateScopeInfo(bool outer_scope_calls_sloppy_eval);
+  void PropagateScopeInfo(bool outer_scope_calls_sloppy_eval);
   bool HasTrivialContext() const;
 
   // Predicates.
@@ -592,10 +602,12 @@
  private:
   // Construct a scope based on the scope info.
   Scope(Scope* inner_scope, ScopeType type, Handle<ScopeInfo> scope_info,
-        Zone* zone);
+        AstValueFactory* value_factory, Zone* zone);
 
   // Construct a catch scope with a binding for the name.
-  Scope(Scope* inner_scope, Handle<String> catch_variable_name, Zone* zone);
+  Scope(Scope* inner_scope,
+        const AstRawString* catch_variable_name,
+        AstValueFactory* value_factory, Zone* zone);
 
   void AddInnerScope(Scope* inner_scope) {
     if (inner_scope != NULL) {
@@ -608,6 +620,7 @@
                    Scope* outer_scope,
                    Handle<ScopeInfo> scope_info);
 
+  AstValueFactory* ast_value_factory_;
   Zone* zone_;
 };
 
diff --git a/src/serialize.cc b/src/serialize.cc
index 4e5699c..dce62fe 100644
--- a/src/serialize.cc
+++ b/src/serialize.cc
@@ -6,18 +6,22 @@
 
 #include "src/accessors.h"
 #include "src/api.h"
+#include "src/base/platform/platform.h"
 #include "src/bootstrapper.h"
+#include "src/code-stubs.h"
 #include "src/deoptimizer.h"
 #include "src/execution.h"
 #include "src/global-handles.h"
-#include "src/ic-inl.h"
+#include "src/ic/ic.h"
+#include "src/ic/stub-cache.h"
 #include "src/natives.h"
-#include "src/platform.h"
+#include "src/objects.h"
 #include "src/runtime.h"
 #include "src/serialize.h"
 #include "src/snapshot.h"
-#include "src/stub-cache.h"
+#include "src/snapshot-source-sink.h"
 #include "src/v8threads.h"
+#include "src/version.h"
 
 namespace v8 {
 namespace internal {
@@ -91,12 +95,14 @@
                                  TypeCode type,
                                  uint16_t id,
                                  const char* name) {
-  ASSERT_NE(NULL, address);
+  DCHECK_NE(NULL, address);
   ExternalReferenceEntry entry;
   entry.address = address;
   entry.code = EncodeExternal(type, id);
   entry.name = name;
-  ASSERT_NE(0, entry.code);
+  DCHECK_NE(0, entry.code);
+  // Assert that the code is added in ascending order to rule out duplicates.
+  DCHECK((size() == 0) || (code(size() - 1) < entry.code));
   refs_.Add(entry);
   if (id > max_id_[type]) max_id_[type] = id;
 }
@@ -107,6 +113,144 @@
     max_id_[type_code] = 0;
   }
 
+  // Miscellaneous
+  Add(ExternalReference::roots_array_start(isolate).address(),
+      "Heap::roots_array_start()");
+  Add(ExternalReference::address_of_stack_limit(isolate).address(),
+      "StackGuard::address_of_jslimit()");
+  Add(ExternalReference::address_of_real_stack_limit(isolate).address(),
+      "StackGuard::address_of_real_jslimit()");
+  Add(ExternalReference::new_space_start(isolate).address(),
+      "Heap::NewSpaceStart()");
+  Add(ExternalReference::new_space_mask(isolate).address(),
+      "Heap::NewSpaceMask()");
+  Add(ExternalReference::new_space_allocation_limit_address(isolate).address(),
+      "Heap::NewSpaceAllocationLimitAddress()");
+  Add(ExternalReference::new_space_allocation_top_address(isolate).address(),
+      "Heap::NewSpaceAllocationTopAddress()");
+  Add(ExternalReference::debug_break(isolate).address(), "Debug::Break()");
+  Add(ExternalReference::debug_step_in_fp_address(isolate).address(),
+      "Debug::step_in_fp_addr()");
+  Add(ExternalReference::mod_two_doubles_operation(isolate).address(),
+      "mod_two_doubles");
+  // Keyed lookup cache.
+  Add(ExternalReference::keyed_lookup_cache_keys(isolate).address(),
+      "KeyedLookupCache::keys()");
+  Add(ExternalReference::keyed_lookup_cache_field_offsets(isolate).address(),
+      "KeyedLookupCache::field_offsets()");
+  Add(ExternalReference::handle_scope_next_address(isolate).address(),
+      "HandleScope::next");
+  Add(ExternalReference::handle_scope_limit_address(isolate).address(),
+      "HandleScope::limit");
+  Add(ExternalReference::handle_scope_level_address(isolate).address(),
+      "HandleScope::level");
+  Add(ExternalReference::new_deoptimizer_function(isolate).address(),
+      "Deoptimizer::New()");
+  Add(ExternalReference::compute_output_frames_function(isolate).address(),
+      "Deoptimizer::ComputeOutputFrames()");
+  Add(ExternalReference::address_of_min_int().address(),
+      "LDoubleConstant::min_int");
+  Add(ExternalReference::address_of_one_half().address(),
+      "LDoubleConstant::one_half");
+  Add(ExternalReference::isolate_address(isolate).address(), "isolate");
+  Add(ExternalReference::address_of_negative_infinity().address(),
+      "LDoubleConstant::negative_infinity");
+  Add(ExternalReference::power_double_double_function(isolate).address(),
+      "power_double_double_function");
+  Add(ExternalReference::power_double_int_function(isolate).address(),
+      "power_double_int_function");
+  Add(ExternalReference::math_log_double_function(isolate).address(),
+      "std::log");
+  Add(ExternalReference::store_buffer_top(isolate).address(),
+      "store_buffer_top");
+  Add(ExternalReference::address_of_canonical_non_hole_nan().address(),
+      "canonical_nan");
+  Add(ExternalReference::address_of_the_hole_nan().address(), "the_hole_nan");
+  Add(ExternalReference::get_date_field_function(isolate).address(),
+      "JSDate::GetField");
+  Add(ExternalReference::date_cache_stamp(isolate).address(),
+      "date_cache_stamp");
+  Add(ExternalReference::address_of_pending_message_obj(isolate).address(),
+      "address_of_pending_message_obj");
+  Add(ExternalReference::address_of_has_pending_message(isolate).address(),
+      "address_of_has_pending_message");
+  Add(ExternalReference::address_of_pending_message_script(isolate).address(),
+      "pending_message_script");
+  Add(ExternalReference::get_make_code_young_function(isolate).address(),
+      "Code::MakeCodeYoung");
+  Add(ExternalReference::cpu_features().address(), "cpu_features");
+  Add(ExternalReference(Runtime::kAllocateInNewSpace, isolate).address(),
+      "Runtime::AllocateInNewSpace");
+  Add(ExternalReference(Runtime::kAllocateInTargetSpace, isolate).address(),
+      "Runtime::AllocateInTargetSpace");
+  Add(ExternalReference::old_pointer_space_allocation_top_address(isolate)
+          .address(),
+      "Heap::OldPointerSpaceAllocationTopAddress");
+  Add(ExternalReference::old_pointer_space_allocation_limit_address(isolate)
+          .address(),
+      "Heap::OldPointerSpaceAllocationLimitAddress");
+  Add(ExternalReference::old_data_space_allocation_top_address(isolate)
+          .address(),
+      "Heap::OldDataSpaceAllocationTopAddress");
+  Add(ExternalReference::old_data_space_allocation_limit_address(isolate)
+          .address(),
+      "Heap::OldDataSpaceAllocationLimitAddress");
+  Add(ExternalReference::allocation_sites_list_address(isolate).address(),
+      "Heap::allocation_sites_list_address()");
+  Add(ExternalReference::address_of_uint32_bias().address(), "uint32_bias");
+  Add(ExternalReference::get_mark_code_as_executed_function(isolate).address(),
+      "Code::MarkCodeAsExecuted");
+  Add(ExternalReference::is_profiling_address(isolate).address(),
+      "CpuProfiler::is_profiling");
+  Add(ExternalReference::scheduled_exception_address(isolate).address(),
+      "Isolate::scheduled_exception");
+  Add(ExternalReference::invoke_function_callback(isolate).address(),
+      "InvokeFunctionCallback");
+  Add(ExternalReference::invoke_accessor_getter_callback(isolate).address(),
+      "InvokeAccessorGetterCallback");
+  Add(ExternalReference::flush_icache_function(isolate).address(),
+      "CpuFeatures::FlushICache");
+  Add(ExternalReference::log_enter_external_function(isolate).address(),
+      "Logger::EnterExternal");
+  Add(ExternalReference::log_leave_external_function(isolate).address(),
+      "Logger::LeaveExternal");
+  Add(ExternalReference::address_of_minus_one_half().address(),
+      "double_constants.minus_one_half");
+  Add(ExternalReference::stress_deopt_count(isolate).address(),
+      "Isolate::stress_deopt_count_address()");
+  Add(ExternalReference::incremental_marking_record_write_function(isolate)
+          .address(),
+      "IncrementalMarking::RecordWriteFromCode");
+
+  // Debug addresses
+  Add(ExternalReference::debug_after_break_target_address(isolate).address(),
+      "Debug::after_break_target_address()");
+  Add(ExternalReference::debug_restarter_frame_function_pointer_address(isolate)
+          .address(),
+      "Debug::restarter_frame_function_pointer_address()");
+  Add(ExternalReference::debug_is_active_address(isolate).address(),
+      "Debug::is_active_address()");
+
+#ifndef V8_INTERPRETED_REGEXP
+  Add(ExternalReference::re_case_insensitive_compare_uc16(isolate).address(),
+      "NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()");
+  Add(ExternalReference::re_check_stack_guard_state(isolate).address(),
+      "RegExpMacroAssembler*::CheckStackGuardState()");
+  Add(ExternalReference::re_grow_stack(isolate).address(),
+      "NativeRegExpMacroAssembler::GrowStack()");
+  Add(ExternalReference::re_word_character_map().address(),
+      "NativeRegExpMacroAssembler::word_character_map");
+  Add(ExternalReference::address_of_regexp_stack_limit(isolate).address(),
+      "RegExpStack::limit_address()");
+  Add(ExternalReference::address_of_regexp_stack_memory_address(isolate)
+          .address(),
+      "RegExpStack::memory_address()");
+  Add(ExternalReference::address_of_regexp_stack_memory_size(isolate).address(),
+      "RegExpStack::memory_size()");
+  Add(ExternalReference::address_of_static_offsets_vector(isolate).address(),
+      "OffsetsVector::static_offsets_vector");
+#endif  // V8_INTERPRETED_REGEXP
+
   // The following populates all of the different type of external references
   // into the ExternalReferenceTable.
   //
@@ -153,14 +297,6 @@
   INLINE_OPTIMIZED_FUNCTION_LIST(RUNTIME_ENTRY)
 #undef RUNTIME_ENTRY
 
-#define RUNTIME_HIDDEN_ENTRY(name, nargs, ressize) \
-  { RUNTIME_FUNCTION, \
-    Runtime::kHidden##name, \
-    "Runtime::Hidden" #name },
-
-  RUNTIME_HIDDEN_FUNCTION_LIST(RUNTIME_HIDDEN_ENTRY)
-#undef RUNTIME_HIDDEN_ENTRY
-
 #define INLINE_OPTIMIZED_ENTRY(name, nargs, ressize) \
   { RUNTIME_FUNCTION, \
     Runtime::kInlineOptimized##name, \
@@ -179,7 +315,7 @@
 #undef IC_ENTRY
   };  // end of ref_table[].
 
-  for (size_t i = 0; i < ARRAY_SIZE(ref_table); ++i) {
+  for (size_t i = 0; i < arraysize(ref_table); ++i) {
     AddFromId(ref_table[i].type,
               ref_table[i].id,
               ref_table[i].name,
@@ -205,7 +341,7 @@
   };  // end of stats_ref_table[].
 
   Counters* counters = isolate->counters();
-  for (size_t i = 0; i < ARRAY_SIZE(stats_ref_table); ++i) {
+  for (size_t i = 0; i < arraysize(stats_ref_table); ++i) {
     Add(reinterpret_cast<Address>(GetInternalPointer(
             (counters->*(stats_ref_table[i].counter))())),
         STATS_COUNTER,
@@ -245,293 +381,26 @@
 
   // Stub cache tables
   Add(stub_cache->key_reference(StubCache::kPrimary).address(),
-      STUB_CACHE_TABLE,
-      1,
-      "StubCache::primary_->key");
+      STUB_CACHE_TABLE, 1, "StubCache::primary_->key");
   Add(stub_cache->value_reference(StubCache::kPrimary).address(),
-      STUB_CACHE_TABLE,
-      2,
-      "StubCache::primary_->value");
+      STUB_CACHE_TABLE, 2, "StubCache::primary_->value");
   Add(stub_cache->map_reference(StubCache::kPrimary).address(),
-      STUB_CACHE_TABLE,
-      3,
-      "StubCache::primary_->map");
+      STUB_CACHE_TABLE, 3, "StubCache::primary_->map");
   Add(stub_cache->key_reference(StubCache::kSecondary).address(),
-      STUB_CACHE_TABLE,
-      4,
-      "StubCache::secondary_->key");
+      STUB_CACHE_TABLE, 4, "StubCache::secondary_->key");
   Add(stub_cache->value_reference(StubCache::kSecondary).address(),
-      STUB_CACHE_TABLE,
-      5,
-      "StubCache::secondary_->value");
+      STUB_CACHE_TABLE, 5, "StubCache::secondary_->value");
   Add(stub_cache->map_reference(StubCache::kSecondary).address(),
-      STUB_CACHE_TABLE,
-      6,
-      "StubCache::secondary_->map");
+      STUB_CACHE_TABLE, 6, "StubCache::secondary_->map");
 
   // Runtime entries
   Add(ExternalReference::delete_handle_scope_extensions(isolate).address(),
-      RUNTIME_ENTRY,
-      4,
-      "HandleScope::DeleteExtensions");
-  Add(ExternalReference::
-          incremental_marking_record_write_function(isolate).address(),
-      RUNTIME_ENTRY,
-      5,
-      "IncrementalMarking::RecordWrite");
+      RUNTIME_ENTRY, 1, "HandleScope::DeleteExtensions");
+  Add(ExternalReference::incremental_marking_record_write_function(isolate)
+          .address(),
+      RUNTIME_ENTRY, 2, "IncrementalMarking::RecordWrite");
   Add(ExternalReference::store_buffer_overflow_function(isolate).address(),
-      RUNTIME_ENTRY,
-      6,
-      "StoreBuffer::StoreBufferOverflow");
-
-  // Miscellaneous
-  Add(ExternalReference::roots_array_start(isolate).address(),
-      UNCLASSIFIED,
-      3,
-      "Heap::roots_array_start()");
-  Add(ExternalReference::address_of_stack_limit(isolate).address(),
-      UNCLASSIFIED,
-      4,
-      "StackGuard::address_of_jslimit()");
-  Add(ExternalReference::address_of_real_stack_limit(isolate).address(),
-      UNCLASSIFIED,
-      5,
-      "StackGuard::address_of_real_jslimit()");
-#ifndef V8_INTERPRETED_REGEXP
-  Add(ExternalReference::address_of_regexp_stack_limit(isolate).address(),
-      UNCLASSIFIED,
-      6,
-      "RegExpStack::limit_address()");
-  Add(ExternalReference::address_of_regexp_stack_memory_address(
-          isolate).address(),
-      UNCLASSIFIED,
-      7,
-      "RegExpStack::memory_address()");
-  Add(ExternalReference::address_of_regexp_stack_memory_size(isolate).address(),
-      UNCLASSIFIED,
-      8,
-      "RegExpStack::memory_size()");
-  Add(ExternalReference::address_of_static_offsets_vector(isolate).address(),
-      UNCLASSIFIED,
-      9,
-      "OffsetsVector::static_offsets_vector");
-#endif  // V8_INTERPRETED_REGEXP
-  Add(ExternalReference::new_space_start(isolate).address(),
-      UNCLASSIFIED,
-      10,
-      "Heap::NewSpaceStart()");
-  Add(ExternalReference::new_space_mask(isolate).address(),
-      UNCLASSIFIED,
-      11,
-      "Heap::NewSpaceMask()");
-  Add(ExternalReference::new_space_allocation_limit_address(isolate).address(),
-      UNCLASSIFIED,
-      14,
-      "Heap::NewSpaceAllocationLimitAddress()");
-  Add(ExternalReference::new_space_allocation_top_address(isolate).address(),
-      UNCLASSIFIED,
-      15,
-      "Heap::NewSpaceAllocationTopAddress()");
-  Add(ExternalReference::debug_break(isolate).address(),
-      UNCLASSIFIED,
-      16,
-      "Debug::Break()");
-  Add(ExternalReference::debug_step_in_fp_address(isolate).address(),
-      UNCLASSIFIED,
-      17,
-      "Debug::step_in_fp_addr()");
-  Add(ExternalReference::mod_two_doubles_operation(isolate).address(),
-      UNCLASSIFIED,
-      22,
-      "mod_two_doubles");
-#ifndef V8_INTERPRETED_REGEXP
-  Add(ExternalReference::re_case_insensitive_compare_uc16(isolate).address(),
-      UNCLASSIFIED,
-      24,
-      "NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()");
-  Add(ExternalReference::re_check_stack_guard_state(isolate).address(),
-      UNCLASSIFIED,
-      25,
-      "RegExpMacroAssembler*::CheckStackGuardState()");
-  Add(ExternalReference::re_grow_stack(isolate).address(),
-      UNCLASSIFIED,
-      26,
-      "NativeRegExpMacroAssembler::GrowStack()");
-  Add(ExternalReference::re_word_character_map().address(),
-      UNCLASSIFIED,
-      27,
-      "NativeRegExpMacroAssembler::word_character_map");
-#endif  // V8_INTERPRETED_REGEXP
-  // Keyed lookup cache.
-  Add(ExternalReference::keyed_lookup_cache_keys(isolate).address(),
-      UNCLASSIFIED,
-      28,
-      "KeyedLookupCache::keys()");
-  Add(ExternalReference::keyed_lookup_cache_field_offsets(isolate).address(),
-      UNCLASSIFIED,
-      29,
-      "KeyedLookupCache::field_offsets()");
-  Add(ExternalReference::handle_scope_next_address(isolate).address(),
-      UNCLASSIFIED,
-      31,
-      "HandleScope::next");
-  Add(ExternalReference::handle_scope_limit_address(isolate).address(),
-      UNCLASSIFIED,
-      32,
-      "HandleScope::limit");
-  Add(ExternalReference::handle_scope_level_address(isolate).address(),
-      UNCLASSIFIED,
-      33,
-      "HandleScope::level");
-  Add(ExternalReference::new_deoptimizer_function(isolate).address(),
-      UNCLASSIFIED,
-      34,
-      "Deoptimizer::New()");
-  Add(ExternalReference::compute_output_frames_function(isolate).address(),
-      UNCLASSIFIED,
-      35,
-      "Deoptimizer::ComputeOutputFrames()");
-  Add(ExternalReference::address_of_min_int().address(),
-      UNCLASSIFIED,
-      36,
-      "LDoubleConstant::min_int");
-  Add(ExternalReference::address_of_one_half().address(),
-      UNCLASSIFIED,
-      37,
-      "LDoubleConstant::one_half");
-  Add(ExternalReference::isolate_address(isolate).address(),
-      UNCLASSIFIED,
-      38,
-      "isolate");
-  Add(ExternalReference::address_of_minus_zero().address(),
-      UNCLASSIFIED,
-      39,
-      "LDoubleConstant::minus_zero");
-  Add(ExternalReference::address_of_negative_infinity().address(),
-      UNCLASSIFIED,
-      40,
-      "LDoubleConstant::negative_infinity");
-  Add(ExternalReference::power_double_double_function(isolate).address(),
-      UNCLASSIFIED,
-      41,
-      "power_double_double_function");
-  Add(ExternalReference::power_double_int_function(isolate).address(),
-      UNCLASSIFIED,
-      42,
-      "power_double_int_function");
-  Add(ExternalReference::store_buffer_top(isolate).address(),
-      UNCLASSIFIED,
-      43,
-      "store_buffer_top");
-  Add(ExternalReference::address_of_canonical_non_hole_nan().address(),
-      UNCLASSIFIED,
-      44,
-      "canonical_nan");
-  Add(ExternalReference::address_of_the_hole_nan().address(),
-      UNCLASSIFIED,
-      45,
-      "the_hole_nan");
-  Add(ExternalReference::get_date_field_function(isolate).address(),
-      UNCLASSIFIED,
-      46,
-      "JSDate::GetField");
-  Add(ExternalReference::date_cache_stamp(isolate).address(),
-      UNCLASSIFIED,
-      47,
-      "date_cache_stamp");
-  Add(ExternalReference::address_of_pending_message_obj(isolate).address(),
-      UNCLASSIFIED,
-      48,
-      "address_of_pending_message_obj");
-  Add(ExternalReference::address_of_has_pending_message(isolate).address(),
-      UNCLASSIFIED,
-      49,
-      "address_of_has_pending_message");
-  Add(ExternalReference::address_of_pending_message_script(isolate).address(),
-      UNCLASSIFIED,
-      50,
-      "pending_message_script");
-  Add(ExternalReference::get_make_code_young_function(isolate).address(),
-      UNCLASSIFIED,
-      51,
-      "Code::MakeCodeYoung");
-  Add(ExternalReference::cpu_features().address(),
-      UNCLASSIFIED,
-      52,
-      "cpu_features");
-  Add(ExternalReference(Runtime::kHiddenAllocateInNewSpace, isolate).address(),
-      UNCLASSIFIED,
-      53,
-      "Runtime::AllocateInNewSpace");
-  Add(ExternalReference(
-          Runtime::kHiddenAllocateInTargetSpace, isolate).address(),
-      UNCLASSIFIED,
-      54,
-      "Runtime::AllocateInTargetSpace");
-  Add(ExternalReference::old_pointer_space_allocation_top_address(
-      isolate).address(),
-      UNCLASSIFIED,
-      55,
-      "Heap::OldPointerSpaceAllocationTopAddress");
-  Add(ExternalReference::old_pointer_space_allocation_limit_address(
-      isolate).address(),
-      UNCLASSIFIED,
-      56,
-      "Heap::OldPointerSpaceAllocationLimitAddress");
-  Add(ExternalReference::old_data_space_allocation_top_address(
-      isolate).address(),
-      UNCLASSIFIED,
-      57,
-      "Heap::OldDataSpaceAllocationTopAddress");
-  Add(ExternalReference::old_data_space_allocation_limit_address(
-      isolate).address(),
-      UNCLASSIFIED,
-      58,
-      "Heap::OldDataSpaceAllocationLimitAddress");
-  Add(ExternalReference::allocation_sites_list_address(isolate).address(),
-      UNCLASSIFIED,
-      59,
-      "Heap::allocation_sites_list_address()");
-  Add(ExternalReference::address_of_uint32_bias().address(),
-      UNCLASSIFIED,
-      60,
-      "uint32_bias");
-  Add(ExternalReference::get_mark_code_as_executed_function(isolate).address(),
-      UNCLASSIFIED,
-      61,
-      "Code::MarkCodeAsExecuted");
-
-  Add(ExternalReference::is_profiling_address(isolate).address(),
-      UNCLASSIFIED,
-      62,
-      "CpuProfiler::is_profiling");
-
-  Add(ExternalReference::scheduled_exception_address(isolate).address(),
-      UNCLASSIFIED,
-      63,
-      "Isolate::scheduled_exception");
-
-  Add(ExternalReference::invoke_function_callback(isolate).address(),
-      UNCLASSIFIED,
-      64,
-      "InvokeFunctionCallback");
-
-  Add(ExternalReference::invoke_accessor_getter_callback(isolate).address(),
-      UNCLASSIFIED,
-      65,
-      "InvokeAccessorGetterCallback");
-
-  // Debug addresses
-  Add(ExternalReference::debug_after_break_target_address(isolate).address(),
-      UNCLASSIFIED,
-      66,
-      "Debug::after_break_target_address()");
-
-  Add(ExternalReference::debug_restarter_frame_function_pointer_address(
-          isolate).address(),
-      UNCLASSIFIED,
-      67,
-      "Debug::restarter_frame_function_pointer_address()");
+      RUNTIME_ENTRY, 3, "StoreBuffer::StoreBufferOverflow");
 
   // Add a small set of deopt entry addresses to encoder without generating the
   // deopt table code, which isn't possible at deserialization time.
@@ -560,7 +429,7 @@
 
 uint32_t ExternalReferenceEncoder::Encode(Address key) const {
   int index = IndexOf(key);
-  ASSERT(key == NULL || index >= 0);
+  DCHECK(key == NULL || index >= 0);
   return index >= 0 ?
          ExternalReferenceTable::instance(isolate_)->code(index) : 0;
 }
@@ -568,8 +437,8 @@
 
 const char* ExternalReferenceEncoder::NameOfAddress(Address key) const {
   int index = IndexOf(key);
-  return index >= 0 ?
-      ExternalReferenceTable::instance(isolate_)->name(index) : NULL;
+  return index >= 0 ? ExternalReferenceTable::instance(isolate_)->name(index)
+                    : "<unknown>";
 }
 
 
@@ -672,11 +541,11 @@
     void Move(Address from, Address to) {
       if (from == to) return;
       HashMap::Entry* from_entry = FindEntry(from);
-      ASSERT(from_entry != NULL);
+      DCHECK(from_entry != NULL);
       void* value = from_entry->value;
       RemoveEntry(from_entry);
       HashMap::Entry* to_entry = FindOrCreateEntry(to);
-      ASSERT(to_entry->value == NULL);
+      DCHECK(to_entry->value == NULL);
       to_entry->value = value;
     }
 
@@ -725,6 +594,7 @@
 
 Deserializer::Deserializer(SnapshotByteSource* source)
     : isolate_(NULL),
+      attached_objects_(NULL),
       source_(source),
       external_reference_decoder_(NULL) {
   for (int i = 0; i < LAST_SPACE + 1; i++) {
@@ -737,20 +607,20 @@
   PageIterator it(isolate_->heap()->code_space());
   while (it.has_next()) {
     Page* p = it.next();
-    CPU::FlushICache(p->area_start(), p->area_end() - p->area_start());
+    CpuFeatures::FlushICache(p->area_start(), p->area_end() - p->area_start());
   }
 }
 
 
 void Deserializer::Deserialize(Isolate* isolate) {
   isolate_ = isolate;
-  ASSERT(isolate_ != NULL);
+  DCHECK(isolate_ != NULL);
   isolate_->heap()->ReserveSpace(reservations_, &high_water_[0]);
   // No active threads.
-  ASSERT_EQ(NULL, isolate_->thread_manager()->FirstThreadStateInUse());
+  DCHECK_EQ(NULL, isolate_->thread_manager()->FirstThreadStateInUse());
   // No active handles.
-  ASSERT(isolate_->handle_scope_implementer()->blocks()->is_empty());
-  ASSERT_EQ(NULL, external_reference_decoder_);
+  DCHECK(isolate_->handle_scope_implementer()->blocks()->is_empty());
+  DCHECK_EQ(NULL, external_reference_decoder_);
   external_reference_decoder_ = new ExternalReferenceDecoder(isolate);
   isolate_->heap()->IterateSmiRoots(this);
   isolate_->heap()->IterateStrongRoots(this, VISIT_ONLY_STRONG);
@@ -775,7 +645,7 @@
   for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
     Object* source = isolate_->heap()->natives_source_cache()->get(i);
     if (!source->IsUndefined()) {
-      ExternalAsciiString::cast(source)->update_data_cache();
+      ExternalOneByteString::cast(source)->update_data_cache();
     }
   }
 
@@ -790,13 +660,15 @@
 void Deserializer::DeserializePartial(Isolate* isolate, Object** root) {
   isolate_ = isolate;
   for (int i = NEW_SPACE; i < kNumberOfSpaces; i++) {
-    ASSERT(reservations_[i] != kUninitializedReservation);
+    DCHECK(reservations_[i] != kUninitializedReservation);
   }
   isolate_->heap()->ReserveSpace(reservations_, &high_water_[0]);
   if (external_reference_decoder_ == NULL) {
     external_reference_decoder_ = new ExternalReferenceDecoder(isolate);
   }
 
+  DisallowHeapAllocation no_gc;
+
   // Keep track of the code space start and end pointers in case new
   // code objects were unserialized
   OldSpace* code_space = isolate_->heap()->code_space();
@@ -812,11 +684,12 @@
 
 Deserializer::~Deserializer() {
   // TODO(svenpanne) Re-enable this assertion when v8 initialization is fixed.
-  // ASSERT(source_->AtEOF());
+  // DCHECK(source_->AtEOF());
   if (external_reference_decoder_) {
     delete external_reference_decoder_;
     external_reference_decoder_ = NULL;
   }
+  if (attached_objects_) attached_objects_->Dispose();
 }
 
 
@@ -839,6 +712,64 @@
 }
 
 
+// Used to insert a deserialized internalized string into the string table.
+class StringTableInsertionKey : public HashTableKey {
+ public:
+  explicit StringTableInsertionKey(String* string)
+      : string_(string), hash_(HashForObject(string)) {
+    DCHECK(string->IsInternalizedString());
+  }
+
+  virtual bool IsMatch(Object* string) {
+    // We know that all entries in a hash table had their hash keys created.
+    // Use that knowledge to have fast failure.
+    if (hash_ != HashForObject(string)) return false;
+    // We want to compare the content of two internalized strings here.
+    return string_->SlowEquals(String::cast(string));
+  }
+
+  virtual uint32_t Hash() OVERRIDE { return hash_; }
+
+  virtual uint32_t HashForObject(Object* key) OVERRIDE {
+    return String::cast(key)->Hash();
+  }
+
+  MUST_USE_RESULT virtual Handle<Object> AsHandle(Isolate* isolate)
+      OVERRIDE {
+    return handle(string_, isolate);
+  }
+
+  String* string_;
+  uint32_t hash_;
+};
+
+
+HeapObject* Deserializer::ProcessNewObjectFromSerializedCode(HeapObject* obj) {
+  if (obj->IsString()) {
+    String* string = String::cast(obj);
+    // Uninitialize hash field as the hash seed may have changed.
+    string->set_hash_field(String::kEmptyHashField);
+    if (string->IsInternalizedString()) {
+      DisallowHeapAllocation no_gc;
+      HandleScope scope(isolate_);
+      StringTableInsertionKey key(string);
+      String* canonical = *StringTable::LookupKey(isolate_, &key);
+      string->SetForwardedInternalizedString(canonical);
+      return canonical;
+    }
+  }
+  return obj;
+}
+
+
+Object* Deserializer::ProcessBackRefInSerializedCode(Object* obj) {
+  if (obj->IsInternalizedString()) {
+    return String::cast(obj)->GetForwardedInternalizedString();
+  }
+  return obj;
+}
+
+
 // This routine writes the new object into the pointer provided and then
 // returns true if the new object was in young space and false otherwise.
 // The reason for this strange interface is that otherwise the object is
@@ -849,7 +780,7 @@
   int size = source_->GetInt() << kObjectAlignmentBits;
   Address address = Allocate(space_number, size);
   HeapObject* obj = HeapObject::FromAddress(address);
-  *write_back = obj;
+  isolate_->heap()->OnAllocationEvent(obj, size);
   Object** current = reinterpret_cast<Object**>(address);
   Object** limit = current + (size >> kPointerSizeLog2);
   if (FLAG_log_snapshot_positions) {
@@ -860,13 +791,15 @@
   // TODO(mvstanton): consider treating the heap()->allocation_sites_list()
   // as a (weak) root. If this root is relocated correctly,
   // RelinkAllocationSite() isn't necessary.
-  if (obj->IsAllocationSite()) {
-    RelinkAllocationSite(AllocationSite::cast(obj));
-  }
+  if (obj->IsAllocationSite()) RelinkAllocationSite(AllocationSite::cast(obj));
 
+  // Fix up strings from serialized user code.
+  if (deserializing_user_code()) obj = ProcessNewObjectFromSerializedCode(obj);
+
+  *write_back = obj;
 #ifdef DEBUG
   bool is_codespace = (space_number == CODE_SPACE);
-  ASSERT(obj->IsCode() == is_codespace);
+  DCHECK(obj->IsCode() == is_codespace);
 #endif
 }
 
@@ -887,91 +820,107 @@
   while (current < limit) {
     int data = source_->Get();
     switch (data) {
-#define CASE_STATEMENT(where, how, within, space_number)                       \
-      case where + how + within + space_number:                                \
-      ASSERT((where & ~kPointedToMask) == 0);                                  \
-      ASSERT((how & ~kHowToCodeMask) == 0);                                    \
-      ASSERT((within & ~kWhereToPointMask) == 0);                              \
-      ASSERT((space_number & ~kSpaceMask) == 0);
+#define CASE_STATEMENT(where, how, within, space_number) \
+  case where + how + within + space_number:              \
+    STATIC_ASSERT((where & ~kPointedToMask) == 0);       \
+    STATIC_ASSERT((how & ~kHowToCodeMask) == 0);         \
+    STATIC_ASSERT((within & ~kWhereToPointMask) == 0);   \
+    STATIC_ASSERT((space_number & ~kSpaceMask) == 0);
 
 #define CASE_BODY(where, how, within, space_number_if_any)                     \
-      {                                                                        \
-        bool emit_write_barrier = false;                                       \
-        bool current_was_incremented = false;                                  \
-        int space_number =  space_number_if_any == kAnyOldSpace ?              \
-                            (data & kSpaceMask) : space_number_if_any;         \
-        if (where == kNewObject && how == kPlain && within == kStartOfObject) {\
-          ReadObject(space_number, current);                                   \
-          emit_write_barrier = (space_number == NEW_SPACE);                    \
-        } else {                                                               \
-          Object* new_object = NULL;  /* May not be a real Object pointer. */  \
-          if (where == kNewObject) {                                           \
-            ReadObject(space_number, &new_object);                             \
-          } else if (where == kRootArray) {                                    \
-            int root_id = source_->GetInt();                                   \
-            new_object = isolate->heap()->roots_array_start()[root_id];        \
-            emit_write_barrier = isolate->heap()->InNewSpace(new_object);      \
-          } else if (where == kPartialSnapshotCache) {                         \
-            int cache_index = source_->GetInt();                               \
-            new_object = isolate->serialize_partial_snapshot_cache()           \
-                [cache_index];                                                 \
-            emit_write_barrier = isolate->heap()->InNewSpace(new_object);      \
-          } else if (where == kExternalReference) {                            \
-            int skip = source_->GetInt();                                      \
-            current = reinterpret_cast<Object**>(reinterpret_cast<Address>(    \
-                current) + skip);                                              \
-            int reference_id = source_->GetInt();                              \
-            Address address = external_reference_decoder_->                    \
-                Decode(reference_id);                                          \
-            new_object = reinterpret_cast<Object*>(address);                   \
-          } else if (where == kBackref) {                                      \
-            emit_write_barrier = (space_number == NEW_SPACE);                  \
-            new_object = GetAddressFromEnd(data & kSpaceMask);                 \
-          } else {                                                             \
-            ASSERT(where == kBackrefWithSkip);                                 \
-            int skip = source_->GetInt();                                      \
-            current = reinterpret_cast<Object**>(                              \
-                reinterpret_cast<Address>(current) + skip);                    \
-            emit_write_barrier = (space_number == NEW_SPACE);                  \
-            new_object = GetAddressFromEnd(data & kSpaceMask);                 \
-          }                                                                    \
-          if (within == kInnerPointer) {                                       \
-            if (space_number != CODE_SPACE || new_object->IsCode()) {          \
-              Code* new_code_object = reinterpret_cast<Code*>(new_object);     \
-              new_object = reinterpret_cast<Object*>(                          \
-                  new_code_object->instruction_start());                       \
-            } else {                                                           \
-              ASSERT(space_number == CODE_SPACE);                              \
-              Cell* cell = Cell::cast(new_object);                             \
-              new_object = reinterpret_cast<Object*>(                          \
-                  cell->ValueAddress());                                       \
-            }                                                                  \
-          }                                                                    \
-          if (how == kFromCode) {                                              \
-            Address location_of_branch_data =                                  \
-                reinterpret_cast<Address>(current);                            \
-            Assembler::deserialization_set_special_target_at(                  \
-                location_of_branch_data,                                       \
-                Code::cast(HeapObject::FromAddress(current_object_address)),   \
-                reinterpret_cast<Address>(new_object));                        \
-            location_of_branch_data += Assembler::kSpecialTargetSize;          \
-            current = reinterpret_cast<Object**>(location_of_branch_data);     \
-            current_was_incremented = true;                                    \
-          } else {                                                             \
-            *current = new_object;                                             \
-          }                                                                    \
+  {                                                                            \
+    bool emit_write_barrier = false;                                           \
+    bool current_was_incremented = false;                                      \
+    int space_number = space_number_if_any == kAnyOldSpace                     \
+                           ? (data & kSpaceMask)                               \
+                           : space_number_if_any;                              \
+    if (where == kNewObject && how == kPlain && within == kStartOfObject) {    \
+      ReadObject(space_number, current);                                       \
+      emit_write_barrier = (space_number == NEW_SPACE);                        \
+    } else {                                                                   \
+      Object* new_object = NULL; /* May not be a real Object pointer. */       \
+      if (where == kNewObject) {                                               \
+        ReadObject(space_number, &new_object);                                 \
+      } else if (where == kRootArray) {                                        \
+        int root_id = source_->GetInt();                                       \
+        new_object = isolate->heap()->roots_array_start()[root_id];            \
+        emit_write_barrier = isolate->heap()->InNewSpace(new_object);          \
+      } else if (where == kPartialSnapshotCache) {                             \
+        int cache_index = source_->GetInt();                                   \
+        new_object = isolate->serialize_partial_snapshot_cache()[cache_index]; \
+        emit_write_barrier = isolate->heap()->InNewSpace(new_object);          \
+      } else if (where == kExternalReference) {                                \
+        int skip = source_->GetInt();                                          \
+        current = reinterpret_cast<Object**>(                                  \
+            reinterpret_cast<Address>(current) + skip);                        \
+        int reference_id = source_->GetInt();                                  \
+        Address address = external_reference_decoder_->Decode(reference_id);   \
+        new_object = reinterpret_cast<Object*>(address);                       \
+      } else if (where == kBackref) {                                          \
+        emit_write_barrier = (space_number == NEW_SPACE);                      \
+        new_object = GetAddressFromEnd(data & kSpaceMask);                     \
+        if (deserializing_user_code()) {                                       \
+          new_object = ProcessBackRefInSerializedCode(new_object);             \
         }                                                                      \
-        if (emit_write_barrier && write_barrier_needed) {                      \
-          Address current_address = reinterpret_cast<Address>(current);        \
-          isolate->heap()->RecordWrite(                                        \
-              current_object_address,                                          \
-              static_cast<int>(current_address - current_object_address));     \
+      } else if (where == kBuiltin) {                                          \
+        DCHECK(deserializing_user_code());                                     \
+        int builtin_id = source_->GetInt();                                    \
+        DCHECK_LE(0, builtin_id);                                              \
+        DCHECK_LT(builtin_id, Builtins::builtin_count);                        \
+        Builtins::Name name = static_cast<Builtins::Name>(builtin_id);         \
+        new_object = isolate->builtins()->builtin(name);                       \
+        emit_write_barrier = false;                                            \
+      } else if (where == kAttachedReference) {                                \
+        DCHECK(deserializing_user_code());                                     \
+        int index = source_->GetInt();                                         \
+        new_object = *attached_objects_->at(index);                            \
+        emit_write_barrier = isolate->heap()->InNewSpace(new_object);          \
+      } else {                                                                 \
+        DCHECK(where == kBackrefWithSkip);                                     \
+        int skip = source_->GetInt();                                          \
+        current = reinterpret_cast<Object**>(                                  \
+            reinterpret_cast<Address>(current) + skip);                        \
+        emit_write_barrier = (space_number == NEW_SPACE);                      \
+        new_object = GetAddressFromEnd(data & kSpaceMask);                     \
+        if (deserializing_user_code()) {                                       \
+          new_object = ProcessBackRefInSerializedCode(new_object);             \
         }                                                                      \
-        if (!current_was_incremented) {                                        \
-          current++;                                                           \
-        }                                                                      \
-        break;                                                                 \
       }                                                                        \
+      if (within == kInnerPointer) {                                           \
+        if (space_number != CODE_SPACE || new_object->IsCode()) {              \
+          Code* new_code_object = reinterpret_cast<Code*>(new_object);         \
+          new_object =                                                         \
+              reinterpret_cast<Object*>(new_code_object->instruction_start()); \
+        } else {                                                               \
+          DCHECK(space_number == CODE_SPACE);                                  \
+          Cell* cell = Cell::cast(new_object);                                 \
+          new_object = reinterpret_cast<Object*>(cell->ValueAddress());        \
+        }                                                                      \
+      }                                                                        \
+      if (how == kFromCode) {                                                  \
+        Address location_of_branch_data = reinterpret_cast<Address>(current);  \
+        Assembler::deserialization_set_special_target_at(                      \
+            location_of_branch_data,                                           \
+            Code::cast(HeapObject::FromAddress(current_object_address)),       \
+            reinterpret_cast<Address>(new_object));                            \
+        location_of_branch_data += Assembler::kSpecialTargetSize;              \
+        current = reinterpret_cast<Object**>(location_of_branch_data);         \
+        current_was_incremented = true;                                        \
+      } else {                                                                 \
+        *current = new_object;                                                 \
+      }                                                                        \
+    }                                                                          \
+    if (emit_write_barrier && write_barrier_needed) {                          \
+      Address current_address = reinterpret_cast<Address>(current);            \
+      isolate->heap()->RecordWrite(                                            \
+          current_object_address,                                              \
+          static_cast<int>(current_address - current_object_address));         \
+    }                                                                          \
+    if (!current_was_incremented) {                                            \
+      current++;                                                               \
+    }                                                                          \
+    break;                                                                     \
+  }
 
 // This generates a case and a body for the new space (which has to do extra
 // write barrier handling) and handles the other spaces with 8 fall-through
@@ -1058,7 +1007,7 @@
       SIXTEEN_CASES(kRootArrayConstants + kNoSkipDistance + 16) {
         int root_id = RootArrayConstantFromByteCode(data);
         Object* object = isolate->heap()->roots_array_start()[root_id];
-        ASSERT(!isolate->heap()->InNewSpace(object));
+        DCHECK(!isolate->heap()->InNewSpace(object));
         *current++ = object;
         break;
       }
@@ -1070,7 +1019,7 @@
         current = reinterpret_cast<Object**>(
             reinterpret_cast<intptr_t>(current) + skip);
         Object* object = isolate->heap()->roots_array_start()[root_id];
-        ASSERT(!isolate->heap()->InNewSpace(object));
+        DCHECK(!isolate->heap()->InNewSpace(object));
         *current++ = object;
         break;
       }
@@ -1078,7 +1027,7 @@
       case kRepeat: {
         int repeats = source_->GetInt();
         Object* object = current[-1];
-        ASSERT(!isolate->heap()->InNewSpace(object));
+        DCHECK(!isolate->heap()->InNewSpace(object));
         for (int i = 0; i < repeats; i++) current[i] = object;
         current += repeats;
         break;
@@ -1093,7 +1042,7 @@
       FOUR_CASES(kConstantRepeat + 9) {
         int repeats = RepeatsForCode(data);
         Object* object = current[-1];
-        ASSERT(!isolate->heap()->InNewSpace(object));
+        DCHECK(!isolate->heap()->InNewSpace(object));
         for (int i = 0; i < repeats; i++) current[i] = object;
         current += repeats;
         break;
@@ -1114,7 +1063,8 @@
       // allocation point and write a pointer to it to the current object.
       ALL_SPACES(kBackref, kPlain, kStartOfObject)
       ALL_SPACES(kBackrefWithSkip, kPlain, kStartOfObject)
-#if defined(V8_TARGET_ARCH_MIPS) || V8_OOL_CONSTANT_POOL
+#if defined(V8_TARGET_ARCH_MIPS) || V8_OOL_CONSTANT_POOL || \
+    defined(V8_TARGET_ARCH_MIPS64)
       // Deserialize a new object from pointer found in code and write
       // a pointer to it to the current object. Required only for MIPS or ARM
       // with ool constant pool, and omitted on the other architectures because
@@ -1138,6 +1088,12 @@
       // current object.
       CASE_STATEMENT(kRootArray, kPlain, kStartOfObject, 0)
       CASE_BODY(kRootArray, kPlain, kStartOfObject, 0)
+#if defined(V8_TARGET_ARCH_MIPS) || V8_OOL_CONSTANT_POOL || \
+    defined(V8_TARGET_ARCH_MIPS64)
+      // Find an object in the roots array and write a pointer to it to in code.
+      CASE_STATEMENT(kRootArray, kFromCode, kStartOfObject, 0)
+      CASE_BODY(kRootArray, kFromCode, kStartOfObject, 0)
+#endif
       // Find an object in the partial snapshots cache and write a pointer to it
       // to the current object.
       CASE_STATEMENT(kPartialSnapshotCache, kPlain, kStartOfObject, 0)
@@ -1166,6 +1122,26 @@
                 kFromCode,
                 kStartOfObject,
                 0)
+      // Find a builtin and write a pointer to it to the current object.
+      CASE_STATEMENT(kBuiltin, kPlain, kStartOfObject, 0)
+      CASE_BODY(kBuiltin, kPlain, kStartOfObject, 0)
+#if V8_OOL_CONSTANT_POOL
+      // Find a builtin code entry and write a pointer to it to the current
+      // object.
+      CASE_STATEMENT(kBuiltin, kPlain, kInnerPointer, 0)
+      CASE_BODY(kBuiltin, kPlain, kInnerPointer, 0)
+#endif
+      // Find a builtin and write a pointer to it in the current code object.
+      CASE_STATEMENT(kBuiltin, kFromCode, kInnerPointer, 0)
+      CASE_BODY(kBuiltin, kFromCode, kInnerPointer, 0)
+      // Find an object in the attached references and write a pointer to it to
+      // the current object.
+      CASE_STATEMENT(kAttachedReference, kPlain, kStartOfObject, 0)
+      CASE_BODY(kAttachedReference, kPlain, kStartOfObject, 0)
+      CASE_STATEMENT(kAttachedReference, kPlain, kInnerPointer, 0)
+      CASE_BODY(kAttachedReference, kPlain, kInnerPointer, 0)
+      CASE_STATEMENT(kAttachedReference, kFromCode, kInnerPointer, 0)
+      CASE_BODY(kAttachedReference, kFromCode, kInnerPointer, 0)
 
 #undef CASE_STATEMENT
 #undef CASE_BODY
@@ -1199,20 +1175,7 @@
         UNREACHABLE();
     }
   }
-  ASSERT_EQ(limit, current);
-}
-
-
-void SnapshotByteSink::PutInt(uintptr_t integer, const char* description) {
-  ASSERT(integer < 1 << 22);
-  integer <<= 2;
-  int bytes = 1;
-  if (integer > 0xff) bytes = 2;
-  if (integer > 0xffff) bytes = 3;
-  integer |= bytes;
-  Put(static_cast<int>(integer & 0xff), "IntPart1");
-  if (bytes > 1) Put(static_cast<int>((integer >> 8) & 0xff), "IntPart2");
-  if (bytes > 2) Put(static_cast<int>((integer >> 16) & 0xff), "IntPart3");
+  DCHECK_EQ(limit, current);
 }
 
 
@@ -1334,7 +1297,7 @@
   startup_serializer_->VisitPointer(reinterpret_cast<Object**>(&heap_object));
   // We don't recurse from the startup snapshot generator into the partial
   // snapshot generator.
-  ASSERT(length == isolate->serialize_partial_snapshot_cache_length() - 1);
+  DCHECK(length == isolate->serialize_partial_snapshot_cache_length() - 1);
   return length;
 }
 
@@ -1345,14 +1308,6 @@
   for (int i = 0; i < root_index_wave_front_; i++) {
     Object* root = heap->roots_array_start()[i];
     if (!root->IsSmi() && root == heap_object) {
-#if defined(V8_TARGET_ARCH_MIPS) || V8_OOL_CONSTANT_POOL
-      if (from == kFromCode) {
-        // In order to avoid code bloat in the deserializer we don't have
-        // support for the encoding that specifies a particular root should
-        // be written from within code.
-        return kInvalidRootIndex;
-      }
-#endif
       return i;
     }
   }
@@ -1364,12 +1319,12 @@
 // location into a later object.  We can encode the location as an offset from
 // the start of the deserialized objects or as an offset backwards from the
 // current allocation pointer.
-void Serializer::SerializeReferenceToPreviousObject(
-    int space,
-    int address,
-    HowToCode how_to_code,
-    WhereToPoint where_to_point,
-    int skip) {
+void Serializer::SerializeReferenceToPreviousObject(HeapObject* heap_object,
+                                                    HowToCode how_to_code,
+                                                    WhereToPoint where_to_point,
+                                                    int skip) {
+  int space = SpaceOfObject(heap_object);
+  int address = address_mapper_.MappedTo(heap_object);
   int offset = CurrentAllocationAddress(space) - address;
   // Shift out the bits that are always 0.
   offset >>= kObjectAlignmentBits;
@@ -1391,6 +1346,7 @@
     int skip) {
   CHECK(o->IsHeapObject());
   HeapObject* heap_object = HeapObject::cast(o);
+  DCHECK(!heap_object->IsJSFunction());
 
   int root_index;
   if ((root_index = RootIndex(heap_object, how_to_code)) != kInvalidRootIndex) {
@@ -1399,12 +1355,7 @@
   }
 
   if (address_mapper_.IsMapped(heap_object)) {
-    int space = SpaceOfObject(heap_object);
-    int address = address_mapper_.MappedTo(heap_object);
-    SerializeReferenceToPreviousObject(space,
-                                       address,
-                                       how_to_code,
-                                       where_to_point,
+    SerializeReferenceToPreviousObject(heap_object, how_to_code, where_to_point,
                                        skip);
   } else {
     if (skip != 0) {
@@ -1475,7 +1426,7 @@
   if (heap_object->IsMap()) {
     // The code-caches link to context-specific code objects, which
     // the startup and context serializes cannot currently handle.
-    ASSERT(Map::cast(heap_object)->code_cache() ==
+    DCHECK(Map::cast(heap_object)->code_cache() ==
            heap_object->GetHeap()->empty_fixed_array());
   }
 
@@ -1501,18 +1452,13 @@
   // Pointers from the partial snapshot to the objects in the startup snapshot
   // should go through the root array or through the partial snapshot cache.
   // If this is not the case you may have to add something to the root array.
-  ASSERT(!startup_serializer_->address_mapper()->IsMapped(heap_object));
+  DCHECK(!startup_serializer_->address_mapper()->IsMapped(heap_object));
   // All the internalized strings that the partial snapshot needs should be
   // either in the root table or in the partial snapshot cache.
-  ASSERT(!heap_object->IsInternalizedString());
+  DCHECK(!heap_object->IsInternalizedString());
 
   if (address_mapper_.IsMapped(heap_object)) {
-    int space = SpaceOfObject(heap_object);
-    int address = address_mapper_.MappedTo(heap_object);
-    SerializeReferenceToPreviousObject(space,
-                                       address,
-                                       how_to_code,
-                                       where_to_point,
+    SerializeReferenceToPreviousObject(heap_object, how_to_code, where_to_point,
                                        skip);
   } else {
     if (skip != 0) {
@@ -1579,9 +1525,10 @@
           root_index != kInvalidRootIndex &&
           root_index < kRootArrayNumberOfConstantEncodings &&
           current_contents == current[-1]) {
-        ASSERT(!serializer_->isolate()->heap()->InNewSpace(current_contents));
+        DCHECK(!serializer_->isolate()->heap()->InNewSpace(current_contents));
         int repeat_count = 1;
-        while (current < end - 1 && current[repeat_count] == current_contents) {
+        while (&current[repeat_count] < end - 1 &&
+               current[repeat_count] == current_contents) {
           repeat_count++;
         }
         current += repeat_count;
@@ -1678,19 +1625,20 @@
   int skip = OutputRawData(rinfo->pc(), kCanReturnSkipInsteadOfSkipping);
   Cell* object = Cell::cast(rinfo->target_cell());
   serializer_->SerializeObject(object, kPlain, kInnerPointer, skip);
+  bytes_processed_so_far_ += kPointerSize;
 }
 
 
-void Serializer::ObjectSerializer::VisitExternalAsciiString(
-    v8::String::ExternalAsciiStringResource** resource_pointer) {
+void Serializer::ObjectSerializer::VisitExternalOneByteString(
+    v8::String::ExternalOneByteStringResource** resource_pointer) {
   Address references_start = reinterpret_cast<Address>(resource_pointer);
   OutputRawData(references_start);
   for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
     Object* source =
         serializer_->isolate()->heap()->natives_source_cache()->get(i);
     if (!source->IsUndefined()) {
-      ExternalAsciiString* string = ExternalAsciiString::cast(source);
-      typedef v8::String::ExternalAsciiStringResource Resource;
+      ExternalOneByteString* string = ExternalOneByteString::cast(source);
+      typedef v8::String::ExternalOneByteStringResource Resource;
       const Resource* resource = string->resource();
       if (resource == *resource_pointer) {
         sink_->Put(kNativesStringResource, "NativesStringResource");
@@ -1734,10 +1682,10 @@
   int up_to_offset = static_cast<int>(up_to - object_start);
   int to_skip = up_to_offset - bytes_processed_so_far_;
   int bytes_to_output = to_skip;
-  bytes_processed_so_far_ +=  to_skip;
+  bytes_processed_so_far_ += to_skip;
   // This assert will fail if the reloc info gives us the target_address_address
   // locations in a non-ascending order.  Luckily that doesn't happen.
-  ASSERT(to_skip >= 0);
+  DCHECK(to_skip >= 0);
   bool outputting_code = false;
   if (to_skip != 0 && code_object_ && !code_has_been_output_) {
     // Output the code all at once and fix later.
@@ -1790,7 +1738,7 @@
   for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
     AllocationSpace s = static_cast<AllocationSpace>(i);
     if (object->GetHeap()->InSpace(object, s)) {
-      ASSERT(i < kNumberOfSpaces);
+      DCHECK(i < kNumberOfSpaces);
       return i;
     }
   }
@@ -1831,12 +1779,294 @@
 }
 
 
-bool SnapshotByteSource::AtEOF() {
-  if (0u + length_ - position_ > 2 * sizeof(uint32_t)) return false;
-  for (int x = position_; x < length_; x++) {
-    if (data_[x] != SerializerDeserializer::nop()) return false;
+ScriptData* CodeSerializer::Serialize(Isolate* isolate,
+                                      Handle<SharedFunctionInfo> info,
+                                      Handle<String> source) {
+  base::ElapsedTimer timer;
+  if (FLAG_profile_deserialization) timer.Start();
+
+  // Serialize code object.
+  List<byte> payload;
+  ListSnapshotSink list_sink(&payload);
+  DebugSnapshotSink debug_sink(&list_sink);
+  SnapshotByteSink* sink = FLAG_trace_code_serializer
+                               ? static_cast<SnapshotByteSink*>(&debug_sink)
+                               : static_cast<SnapshotByteSink*>(&list_sink);
+  CodeSerializer cs(isolate, sink, *source);
+  DisallowHeapAllocation no_gc;
+  Object** location = Handle<Object>::cast(info).location();
+  cs.VisitPointer(location);
+  cs.Pad();
+
+  SerializedCodeData data(&payload, &cs);
+  ScriptData* script_data = data.GetScriptData();
+
+  if (FLAG_profile_deserialization) {
+    double ms = timer.Elapsed().InMillisecondsF();
+    int length = script_data->length();
+    PrintF("[Serializing to %d bytes took %0.3f ms]\n", length, ms);
   }
-  return true;
+
+  return script_data;
 }
 
+
+void CodeSerializer::SerializeObject(Object* o, HowToCode how_to_code,
+                                     WhereToPoint where_to_point, int skip) {
+  CHECK(o->IsHeapObject());
+  HeapObject* heap_object = HeapObject::cast(o);
+
+  // The code-caches link to context-specific code objects, which
+  // the startup and context serializes cannot currently handle.
+  DCHECK(!heap_object->IsMap() ||
+         Map::cast(heap_object)->code_cache() ==
+             heap_object->GetHeap()->empty_fixed_array());
+
+  int root_index;
+  if ((root_index = RootIndex(heap_object, how_to_code)) != kInvalidRootIndex) {
+    PutRoot(root_index, heap_object, how_to_code, where_to_point, skip);
+    return;
+  }
+
+  // TODO(yangguo) wire up global object.
+  // TODO(yangguo) We cannot deal with different hash seeds yet.
+  DCHECK(!heap_object->IsHashTable());
+
+  if (address_mapper_.IsMapped(heap_object)) {
+    SerializeReferenceToPreviousObject(heap_object, how_to_code, where_to_point,
+                                       skip);
+    return;
+  }
+
+  if (heap_object->IsCode()) {
+    Code* code_object = Code::cast(heap_object);
+    if (code_object->kind() == Code::BUILTIN) {
+      SerializeBuiltin(code_object, how_to_code, where_to_point, skip);
+      return;
+    }
+    if (code_object->IsCodeStubOrIC()) {
+      SerializeCodeStub(code_object, how_to_code, where_to_point, skip);
+      return;
+    }
+    code_object->ClearInlineCaches();
+  }
+
+  if (heap_object == source_) {
+    SerializeSourceObject(how_to_code, where_to_point, skip);
+    return;
+  }
+
+  SerializeHeapObject(heap_object, how_to_code, where_to_point, skip);
+}
+
+
+void CodeSerializer::SerializeHeapObject(HeapObject* heap_object,
+                                         HowToCode how_to_code,
+                                         WhereToPoint where_to_point,
+                                         int skip) {
+  if (heap_object->IsScript()) {
+    // The wrapper cache uses a Foreign object to point to a global handle.
+    // However, the object visitor expects foreign objects to point to external
+    // references.  Clear the cache to avoid this issue.
+    Script::cast(heap_object)->ClearWrapperCache();
+  }
+
+  if (skip != 0) {
+    sink_->Put(kSkip, "SkipFromSerializeObject");
+    sink_->PutInt(skip, "SkipDistanceFromSerializeObject");
+  }
+
+  if (FLAG_trace_code_serializer) {
+    PrintF("Encoding heap object: ");
+    heap_object->ShortPrint();
+    PrintF("\n");
+  }
+
+  // Object has not yet been serialized.  Serialize it here.
+  ObjectSerializer serializer(this, heap_object, sink_, how_to_code,
+                              where_to_point);
+  serializer.Serialize();
+}
+
+
+void CodeSerializer::SerializeBuiltin(Code* builtin, HowToCode how_to_code,
+                                      WhereToPoint where_to_point, int skip) {
+  if (skip != 0) {
+    sink_->Put(kSkip, "SkipFromSerializeBuiltin");
+    sink_->PutInt(skip, "SkipDistanceFromSerializeBuiltin");
+  }
+
+  DCHECK((how_to_code == kPlain && where_to_point == kStartOfObject) ||
+         (how_to_code == kPlain && where_to_point == kInnerPointer) ||
+         (how_to_code == kFromCode && where_to_point == kInnerPointer));
+  int builtin_index = builtin->builtin_index();
+  DCHECK_LT(builtin_index, Builtins::builtin_count);
+  DCHECK_LE(0, builtin_index);
+
+  if (FLAG_trace_code_serializer) {
+    PrintF("Encoding builtin: %s\n",
+           isolate()->builtins()->name(builtin_index));
+  }
+
+  sink_->Put(kBuiltin + how_to_code + where_to_point, "Builtin");
+  sink_->PutInt(builtin_index, "builtin_index");
+}
+
+
+void CodeSerializer::SerializeCodeStub(Code* code, HowToCode how_to_code,
+                                       WhereToPoint where_to_point, int skip) {
+  DCHECK((how_to_code == kPlain && where_to_point == kStartOfObject) ||
+         (how_to_code == kPlain && where_to_point == kInnerPointer) ||
+         (how_to_code == kFromCode && where_to_point == kInnerPointer));
+  uint32_t stub_key = code->stub_key();
+
+  if (stub_key == CodeStub::NoCacheKey()) {
+    if (FLAG_trace_code_serializer) {
+      PrintF("Encoding uncacheable code stub as heap object\n");
+    }
+    SerializeHeapObject(code, how_to_code, where_to_point, skip);
+    return;
+  }
+
+  if (skip != 0) {
+    sink_->Put(kSkip, "SkipFromSerializeCodeStub");
+    sink_->PutInt(skip, "SkipDistanceFromSerializeCodeStub");
+  }
+
+  int index = AddCodeStubKey(stub_key) + kCodeStubsBaseIndex;
+
+  if (FLAG_trace_code_serializer) {
+    PrintF("Encoding code stub %s as %d\n",
+           CodeStub::MajorName(CodeStub::MajorKeyFromKey(stub_key), false),
+           index);
+  }
+
+  sink_->Put(kAttachedReference + how_to_code + where_to_point, "CodeStub");
+  sink_->PutInt(index, "CodeStub key");
+}
+
+
+int CodeSerializer::AddCodeStubKey(uint32_t stub_key) {
+  // TODO(yangguo) Maybe we need a hash table for a faster lookup than O(n^2).
+  int index = 0;
+  while (index < stub_keys_.length()) {
+    if (stub_keys_[index] == stub_key) return index;
+    index++;
+  }
+  stub_keys_.Add(stub_key);
+  return index;
+}
+
+
+void CodeSerializer::SerializeSourceObject(HowToCode how_to_code,
+                                           WhereToPoint where_to_point,
+                                           int skip) {
+  if (skip != 0) {
+    sink_->Put(kSkip, "SkipFromSerializeSourceObject");
+    sink_->PutInt(skip, "SkipDistanceFromSerializeSourceObject");
+  }
+
+  if (FLAG_trace_code_serializer) {
+    PrintF("Encoding source object\n");
+  }
+
+  DCHECK(how_to_code == kPlain && where_to_point == kStartOfObject);
+  sink_->Put(kAttachedReference + how_to_code + where_to_point, "Source");
+  sink_->PutInt(kSourceObjectIndex, "kSourceObjectIndex");
+}
+
+
+Handle<SharedFunctionInfo> CodeSerializer::Deserialize(Isolate* isolate,
+                                                       ScriptData* data,
+                                                       Handle<String> source) {
+  base::ElapsedTimer timer;
+  if (FLAG_profile_deserialization) timer.Start();
+
+  Object* root;
+
+  {
+    HandleScope scope(isolate);
+
+    SerializedCodeData scd(data, *source);
+    SnapshotByteSource payload(scd.Payload(), scd.PayloadLength());
+    Deserializer deserializer(&payload);
+    STATIC_ASSERT(NEW_SPACE == 0);
+    for (int i = NEW_SPACE; i <= PROPERTY_CELL_SPACE; i++) {
+      deserializer.set_reservation(i, scd.GetReservation(i));
+    }
+
+    // Prepare and register list of attached objects.
+    Vector<const uint32_t> code_stub_keys = scd.CodeStubKeys();
+    Vector<Handle<Object> > attached_objects = Vector<Handle<Object> >::New(
+        code_stub_keys.length() + kCodeStubsBaseIndex);
+    attached_objects[kSourceObjectIndex] = source;
+    for (int i = 0; i < code_stub_keys.length(); i++) {
+      attached_objects[i + kCodeStubsBaseIndex] =
+          CodeStub::GetCode(isolate, code_stub_keys[i]).ToHandleChecked();
+    }
+    deserializer.SetAttachedObjects(&attached_objects);
+
+    // Deserialize.
+    deserializer.DeserializePartial(isolate, &root);
+    deserializer.FlushICacheForNewCodeObjects();
+  }
+
+  if (FLAG_profile_deserialization) {
+    double ms = timer.Elapsed().InMillisecondsF();
+    int length = data->length();
+    PrintF("[Deserializing from %d bytes took %0.3f ms]\n", length, ms);
+  }
+  return Handle<SharedFunctionInfo>(SharedFunctionInfo::cast(root), isolate);
+}
+
+
+SerializedCodeData::SerializedCodeData(List<byte>* payload, CodeSerializer* cs)
+    : owns_script_data_(true) {
+  DisallowHeapAllocation no_gc;
+  List<uint32_t>* stub_keys = cs->stub_keys();
+
+  // Calculate sizes.
+  int num_stub_keys = stub_keys->length();
+  int stub_keys_size = stub_keys->length() * kInt32Size;
+  int data_length = kHeaderSize + stub_keys_size + payload->length();
+
+  // Allocate backing store and create result data.
+  byte* data = NewArray<byte>(data_length);
+  DCHECK(IsAligned(reinterpret_cast<intptr_t>(data), kPointerAlignment));
+  script_data_ = new ScriptData(data, data_length);
+  script_data_->AcquireDataOwnership();
+
+  // Set header values.
+  SetHeaderValue(kCheckSumOffset, CheckSum(cs->source()));
+  SetHeaderValue(kNumCodeStubKeysOffset, num_stub_keys);
+  SetHeaderValue(kPayloadLengthOffset, payload->length());
+  STATIC_ASSERT(NEW_SPACE == 0);
+  for (int i = NEW_SPACE; i <= PROPERTY_CELL_SPACE; i++) {
+    SetHeaderValue(kReservationsOffset + i, cs->CurrentAllocationAddress(i));
+  }
+
+  // Copy code stub keys.
+  CopyBytes(data + kHeaderSize, reinterpret_cast<byte*>(stub_keys->begin()),
+            stub_keys_size);
+
+  // Copy serialized data.
+  CopyBytes(data + kHeaderSize + stub_keys_size, payload->begin(),
+            static_cast<size_t>(payload->length()));
+}
+
+
+bool SerializedCodeData::IsSane(String* source) {
+  return GetHeaderValue(kCheckSumOffset) == CheckSum(source) &&
+         PayloadLength() >= SharedFunctionInfo::kSize;
+}
+
+
+int SerializedCodeData::CheckSum(String* string) {
+  int checksum = Version::Hash();
+#ifdef DEBUG
+  uint32_t seed = static_cast<uint32_t>(checksum);
+  checksum = static_cast<int>(IteratingStringHasher::Hash(string, seed));
+#endif  // DEBUG
+  return checksum;
+}
 } }  // namespace v8::internal
diff --git a/src/serialize.h b/src/serialize.h
index 9e3cc88..71b274b 100644
--- a/src/serialize.h
+++ b/src/serialize.h
@@ -5,7 +5,11 @@
 #ifndef V8_SERIALIZE_H_
 #define V8_SERIALIZE_H_
 
+#include "src/compiler.h"
 #include "src/hashmap.h"
+#include "src/heap-profiler.h"
+#include "src/isolate.h"
+#include "src/snapshot-source-sink.h"
 
 namespace v8 {
 namespace internal {
@@ -13,17 +17,16 @@
 // A TypeCode is used to distinguish different kinds of external reference.
 // It is a single bit to make testing for types easy.
 enum TypeCode {
-  UNCLASSIFIED,        // One-of-a-kind references.
+  UNCLASSIFIED,  // One-of-a-kind references.
+  C_BUILTIN,
   BUILTIN,
   RUNTIME_FUNCTION,
   IC_UTILITY,
   STATS_COUNTER,
   TOP_ADDRESS,
-  C_BUILTIN,
-  EXTENSION,
   ACCESSOR,
-  RUNTIME_ENTRY,
   STUB_CACHE_TABLE,
+  RUNTIME_ENTRY,
   LAZY_DEOPTIMIZATION
 };
 
@@ -77,8 +80,12 @@
   // For other types of references, the caller will figure out the address.
   void Add(Address address, TypeCode type, uint16_t id, const char* name);
 
+  void Add(Address address, const char* name) {
+    Add(address, UNCLASSIFIED, ++max_id_[UNCLASSIFIED], name);
+  }
+
   List<ExternalReferenceEntry> refs_;
-  int max_id_[kTypeCodeCount];
+  uint16_t max_id_[kTypeCodeCount];
 };
 
 
@@ -119,7 +126,7 @@
 
   Address* Lookup(uint32_t key) const {
     int type = key >> kReferenceTypeShift;
-    ASSERT(kFirstTypeCode <= type && type < kTypeCodeCount);
+    DCHECK(kFirstTypeCode <= type && type < kTypeCodeCount);
     int id = key & kReferenceIdMask;
     return &encodings_[type][id];
   }
@@ -132,49 +139,6 @@
 };
 
 
-class SnapshotByteSource {
- public:
-  SnapshotByteSource(const byte* array, int length)
-    : data_(array), length_(length), position_(0) { }
-
-  bool HasMore() { return position_ < length_; }
-
-  int Get() {
-    ASSERT(position_ < length_);
-    return data_[position_++];
-  }
-
-  int32_t GetUnalignedInt() {
-#if defined(V8_HOST_CAN_READ_UNALIGNED) &&  __BYTE_ORDER == __LITTLE_ENDIAN
-    int32_t answer;
-    ASSERT(position_ + sizeof(answer) <= length_ + 0u);
-    answer = *reinterpret_cast<const int32_t*>(data_ + position_);
-#else
-    int32_t answer = data_[position_];
-    answer |= data_[position_ + 1] << 8;
-    answer |= data_[position_ + 2] << 16;
-    answer |= data_[position_ + 3] << 24;
-#endif
-    return answer;
-  }
-
-  void Advance(int by) { position_ += by; }
-
-  inline void CopyRaw(byte* to, int number_of_bytes);
-
-  inline int GetInt();
-
-  bool AtEOF();
-
-  int position() { return position_; }
-
- private:
-  const byte* data_;
-  int length_;
-  int position_;
-};
-
-
 // The Serializer/Deserializer class is a common superclass for Serializer and
 // Deserializer which is used to store common constants and methods used by
 // both.
@@ -187,17 +151,18 @@
  protected:
   // Where the pointed-to object can be found:
   enum Where {
-    kNewObject = 0,                 // Object is next in snapshot.
+    kNewObject = 0,  // Object is next in snapshot.
     // 1-6                             One per space.
-    kRootArray = 0x9,               // Object is found in root array.
-    kPartialSnapshotCache = 0xa,    // Object is in the cache.
-    kExternalReference = 0xb,       // Pointer to an external reference.
-    kSkip = 0xc,                    // Skip n bytes.
-    kNop = 0xd,                     // Does nothing, used to pad.
-    // 0xe-0xf                         Free.
-    kBackref = 0x10,                // Object is described relative to end.
+    kRootArray = 0x9,             // Object is found in root array.
+    kPartialSnapshotCache = 0xa,  // Object is in the cache.
+    kExternalReference = 0xb,     // Pointer to an external reference.
+    kSkip = 0xc,                  // Skip n bytes.
+    kBuiltin = 0xd,               // Builtin code object.
+    kAttachedReference = 0xe,     // Object is described in an attached list.
+    kNop = 0xf,                   // Does nothing, used to pad.
+    kBackref = 0x10,              // Object is described relative to end.
     // 0x11-0x16                       One per space.
-    kBackrefWithSkip = 0x18,        // Object is described relative to end.
+    kBackrefWithSkip = 0x18,  // Object is described relative to end.
     // 0x19-0x1e                       One per space.
     // 0x20-0x3f                       Used by misc. tags below.
     kPointedToMask = 0x3f
@@ -246,11 +211,11 @@
   // 0x73-0x7f            Repeat last word (subtract 0x72 to get the count).
   static const int kMaxRepeats = 0x7f - 0x72;
   static int CodeForRepeats(int repeats) {
-    ASSERT(repeats >= 1 && repeats <= kMaxRepeats);
+    DCHECK(repeats >= 1 && repeats <= kMaxRepeats);
     return 0x72 + repeats;
   }
   static int RepeatsForCode(int byte_code) {
-    ASSERT(byte_code >= kConstantRepeat && byte_code <= 0x7f);
+    DCHECK(byte_code >= kConstantRepeat && byte_code <= 0x7f);
     return byte_code - 0x72;
   }
   static const int kRootArrayConstants = 0xa0;
@@ -268,26 +233,6 @@
 };
 
 
-int SnapshotByteSource::GetInt() {
-  // This way of variable-length encoding integers does not suffer from branch
-  // mispredictions.
-  uint32_t answer = GetUnalignedInt();
-  int bytes = answer & 3;
-  Advance(bytes);
-  uint32_t mask = 0xffffffffu;
-  mask >>= 32 - (bytes << 3);
-  answer &= mask;
-  answer >>= 2;
-  return answer;
-}
-
-
-void SnapshotByteSource::CopyRaw(byte* to, int number_of_bytes) {
-  MemCopy(to, data_ + position_, number_of_bytes);
-  position_ += number_of_bytes;
-}
-
-
 // A Deserializer reads a snapshot and reconstructs the Object graph it defines.
 class Deserializer: public SerializerDeserializer {
  public:
@@ -303,11 +248,21 @@
   void DeserializePartial(Isolate* isolate, Object** root);
 
   void set_reservation(int space_number, int reservation) {
-    ASSERT(space_number >= 0);
-    ASSERT(space_number <= LAST_SPACE);
+    DCHECK(space_number >= 0);
+    DCHECK(space_number <= LAST_SPACE);
     reservations_[space_number] = reservation;
   }
 
+  void FlushICacheForNewCodeObjects();
+
+  // Serialized user code reference certain objects that are provided in a list
+  // By calling this method, we assume that we are deserializing user code.
+  void SetAttachedObjects(Vector<Handle<Object> >* attached_objects) {
+    attached_objects_ = attached_objects;
+  }
+
+  bool deserializing_user_code() { return attached_objects_ != NULL; }
+
  private:
   virtual void VisitPointers(Object** start, Object** end);
 
@@ -328,16 +283,16 @@
       Object** start, Object** end, int space, Address object_address);
   void ReadObject(int space_number, Object** write_back);
 
+  // Special handling for serialized code like hooking up internalized strings.
+  HeapObject* ProcessNewObjectFromSerializedCode(HeapObject* obj);
+  Object* ProcessBackRefInSerializedCode(Object* obj);
+
   // This routine both allocates a new object, and also keeps
   // track of where objects have been allocated so that we can
   // fix back references when deserializing.
   Address Allocate(int space_index, int size) {
     Address address = high_water_[space_index];
     high_water_[space_index] = address + size;
-    HeapProfiler* profiler = isolate_->heap_profiler();
-    if (profiler->is_tracking_allocations()) {
-      profiler->AllocationEvent(address, size);
-    }
     return address;
   }
 
@@ -349,11 +304,12 @@
     return HeapObject::FromAddress(high_water_[space] - offset);
   }
 
-  void FlushICacheForNewCodeObjects();
-
   // Cached current isolate.
   Isolate* isolate_;
 
+  // Objects from the attached object descriptions in the serialized user code.
+  Vector<Handle<Object> >* attached_objects_;
+
   SnapshotByteSource* source_;
   // This is the address of the next object that will be allocated in each
   // space.  It is used to calculate the addresses of back-references.
@@ -368,18 +324,6 @@
 };
 
 
-class SnapshotByteSink {
- public:
-  virtual ~SnapshotByteSink() { }
-  virtual void Put(int byte, const char* description) = 0;
-  virtual void PutSection(int byte, const char* description) {
-    Put(byte, description);
-  }
-  void PutInt(uintptr_t integer, const char* description);
-  virtual int Position() = 0;
-};
-
-
 // Mapping objects to their location after deserialization.
 // This is used during building, but not at runtime by V8.
 class SerializationAddressMapper {
@@ -397,13 +341,13 @@
   }
 
   int MappedTo(HeapObject* obj) {
-    ASSERT(IsMapped(obj));
+    DCHECK(IsMapped(obj));
     return static_cast<int>(reinterpret_cast<intptr_t>(
         serialization_map_->Lookup(Key(obj), Hash(obj), false)->value));
   }
 
   void AddMapping(HeapObject* obj, int to) {
-    ASSERT(!IsMapped(obj));
+    DCHECK(!IsMapped(obj));
     HashMap::Entry* entry =
         serialization_map_->Lookup(Key(obj), Hash(obj), true);
     entry->value = Value(to);
@@ -439,7 +383,7 @@
   // You can call this after serialization to find out how much space was used
   // in each space.
   int CurrentAllocationAddress(int space) const {
-    ASSERT(space < kNumberOfSpaces);
+    DCHECK(space < kNumberOfSpaces);
     return fullness_[space];
   }
 
@@ -458,7 +402,7 @@
   int RootIndex(HeapObject* heap_object, HowToCode from);
   intptr_t root_index_wave_front() { return root_index_wave_front_; }
   void set_root_index_wave_front(intptr_t value) {
-    ASSERT(value >= root_index_wave_front_);
+    DCHECK(value >= root_index_wave_front_);
     root_index_wave_front_ = value;
   }
 
@@ -486,8 +430,8 @@
     void VisitCell(RelocInfo* rinfo);
     void VisitRuntimeEntry(RelocInfo* reloc);
     // Used for seralizing the external strings that hold the natives source.
-    void VisitExternalAsciiString(
-        v8::String::ExternalAsciiStringResource** resource);
+    void VisitExternalOneByteString(
+        v8::String::ExternalOneByteStringResource** resource);
     // We can't serialize a heap with external two byte strings.
     void VisitExternalTwoByteString(
         v8::String::ExternalStringResource** resource) {
@@ -515,12 +459,10 @@
                                HowToCode how_to_code,
                                WhereToPoint where_to_point,
                                int skip) = 0;
-  void SerializeReferenceToPreviousObject(
-      int space,
-      int address,
-      HowToCode how_to_code,
-      WhereToPoint where_to_point,
-      int skip);
+  void SerializeReferenceToPreviousObject(HeapObject* heap_object,
+                                          HowToCode how_to_code,
+                                          WhereToPoint where_to_point,
+                                          int skip);
   void InitializeAllocators();
   // This will return the space for an object.
   static int SpaceOfObject(HeapObject* object);
@@ -584,7 +526,7 @@
     // allow them to be part of the partial snapshot because they contain a
     // unique ID, and deserializing several partial snapshots containing script
     // would cause dupes.
-    ASSERT(!o->IsScript());
+    DCHECK(!o->IsScript());
     return o->IsName() || o->IsSharedFunctionInfo() ||
            o->IsHeapNumber() || o->IsCode() ||
            o->IsScopeInfo() ||
@@ -624,9 +566,142 @@
     SerializeWeakReferences();
     Pad();
   }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(StartupSerializer);
 };
 
 
+class CodeSerializer : public Serializer {
+ public:
+  CodeSerializer(Isolate* isolate, SnapshotByteSink* sink, String* source)
+      : Serializer(isolate, sink), source_(source) {
+    set_root_index_wave_front(Heap::kStrongRootListLength);
+    InitializeCodeAddressMap();
+  }
+
+  static ScriptData* Serialize(Isolate* isolate,
+                               Handle<SharedFunctionInfo> info,
+                               Handle<String> source);
+
+  virtual void SerializeObject(Object* o, HowToCode how_to_code,
+                               WhereToPoint where_to_point, int skip);
+
+  static Handle<SharedFunctionInfo> Deserialize(Isolate* isolate,
+                                                ScriptData* data,
+                                                Handle<String> source);
+
+  static const int kSourceObjectIndex = 0;
+  static const int kCodeStubsBaseIndex = 1;
+
+  String* source() {
+    DCHECK(!AllowHeapAllocation::IsAllowed());
+    return source_;
+  }
+
+  List<uint32_t>* stub_keys() { return &stub_keys_; }
+
+ private:
+  void SerializeBuiltin(Code* builtin, HowToCode how_to_code,
+                        WhereToPoint where_to_point, int skip);
+  void SerializeCodeStub(Code* code, HowToCode how_to_code,
+                         WhereToPoint where_to_point, int skip);
+  void SerializeSourceObject(HowToCode how_to_code, WhereToPoint where_to_point,
+                             int skip);
+  void SerializeHeapObject(HeapObject* heap_object, HowToCode how_to_code,
+                           WhereToPoint where_to_point, int skip);
+  int AddCodeStubKey(uint32_t stub_key);
+
+  DisallowHeapAllocation no_gc_;
+  String* source_;
+  List<uint32_t> stub_keys_;
+  DISALLOW_COPY_AND_ASSIGN(CodeSerializer);
+};
+
+
+// Wrapper around ScriptData to provide code-serializer-specific functionality.
+class SerializedCodeData {
+ public:
+  // Used by when consuming.
+  explicit SerializedCodeData(ScriptData* data, String* source)
+      : script_data_(data), owns_script_data_(false) {
+    DisallowHeapAllocation no_gc;
+    CHECK(IsSane(source));
+  }
+
+  // Used when producing.
+  SerializedCodeData(List<byte>* payload, CodeSerializer* cs);
+
+  ~SerializedCodeData() {
+    if (owns_script_data_) delete script_data_;
+  }
+
+  // Return ScriptData object and relinquish ownership over it to the caller.
+  ScriptData* GetScriptData() {
+    ScriptData* result = script_data_;
+    script_data_ = NULL;
+    DCHECK(owns_script_data_);
+    owns_script_data_ = false;
+    return result;
+  }
+
+  Vector<const uint32_t> CodeStubKeys() const {
+    return Vector<const uint32_t>(
+        reinterpret_cast<const uint32_t*>(script_data_->data() + kHeaderSize),
+        GetHeaderValue(kNumCodeStubKeysOffset));
+  }
+
+  const byte* Payload() const {
+    int code_stubs_size = GetHeaderValue(kNumCodeStubKeysOffset) * kInt32Size;
+    return script_data_->data() + kHeaderSize + code_stubs_size;
+  }
+
+  int PayloadLength() const {
+    int payload_length = GetHeaderValue(kPayloadLengthOffset);
+    DCHECK_EQ(script_data_->data() + script_data_->length(),
+              Payload() + payload_length);
+    return payload_length;
+  }
+
+  int GetReservation(int space) const {
+    return GetHeaderValue(kReservationsOffset + space);
+  }
+
+ private:
+  void SetHeaderValue(int offset, int value) {
+    reinterpret_cast<int*>(const_cast<byte*>(script_data_->data()))[offset] =
+        value;
+  }
+
+  int GetHeaderValue(int offset) const {
+    return reinterpret_cast<const int*>(script_data_->data())[offset];
+  }
+
+  bool IsSane(String* source);
+
+  int CheckSum(String* source);
+
+  // The data header consists of int-sized entries:
+  // [0] version hash
+  // [1] number of code stub keys
+  // [2] payload length
+  // [3..9] reservation sizes for spaces from NEW_SPACE to PROPERTY_CELL_SPACE.
+  static const int kCheckSumOffset = 0;
+  static const int kNumCodeStubKeysOffset = 1;
+  static const int kPayloadLengthOffset = 2;
+  static const int kReservationsOffset = 3;
+
+  static const int kNumSpaces = PROPERTY_CELL_SPACE - NEW_SPACE + 1;
+  static const int kHeaderEntries = kReservationsOffset + kNumSpaces;
+  static const int kHeaderSize = kHeaderEntries * kIntSize;
+
+  // Following the header, we store, in sequential order
+  // - code stub keys
+  // - serialization payload
+
+  ScriptData* script_data_;
+  bool owns_script_data_;
+};
 } }  // namespace v8::internal
 
 #endif  // V8_SERIALIZE_H_
diff --git a/src/simulator.h b/src/simulator.h
index c873907..6dd08f4 100644
--- a/src/simulator.h
+++ b/src/simulator.h
@@ -15,6 +15,8 @@
 #include "src/arm/simulator-arm.h"
 #elif V8_TARGET_ARCH_MIPS
 #include "src/mips/simulator-mips.h"
+#elif V8_TARGET_ARCH_MIPS64
+#include "src/mips64/simulator-mips64.h"
 #elif V8_TARGET_ARCH_X87
 #include "src/x87/simulator-x87.h"
 #else
diff --git a/src/small-pointer-list.h b/src/small-pointer-list.h
index c4f8233..241689e 100644
--- a/src/small-pointer-list.h
+++ b/src/small-pointer-list.h
@@ -5,7 +5,7 @@
 #ifndef V8_SMALL_POINTER_LIST_H_
 #define V8_SMALL_POINTER_LIST_H_
 
-#include "src/checks.h"
+#include "src/base/logging.h"
 #include "src/globals.h"
 #include "src/zone.h"
 
@@ -38,7 +38,7 @@
     if ((data_ & kTagMask) == kSingletonTag) {
       list->Add(single_value(), zone);
     }
-    ASSERT(IsAligned(reinterpret_cast<intptr_t>(list), kPointerAlignment));
+    DCHECK(IsAligned(reinterpret_cast<intptr_t>(list), kPointerAlignment));
     data_ = reinterpret_cast<intptr_t>(list) | kListTag;
   }
 
@@ -61,7 +61,7 @@
   }
 
   void Add(T* pointer, Zone* zone) {
-    ASSERT(IsAligned(reinterpret_cast<intptr_t>(pointer), kPointerAlignment));
+    DCHECK(IsAligned(reinterpret_cast<intptr_t>(pointer), kPointerAlignment));
     if ((data_ & kTagMask) == kEmptyTag) {
       data_ = reinterpret_cast<intptr_t>(pointer) | kSingletonTag;
       return;
@@ -70,7 +70,7 @@
       PointerList* list = new(zone) PointerList(2, zone);
       list->Add(single_value(), zone);
       list->Add(pointer, zone);
-      ASSERT(IsAligned(reinterpret_cast<intptr_t>(list), kPointerAlignment));
+      DCHECK(IsAligned(reinterpret_cast<intptr_t>(list), kPointerAlignment));
       data_ = reinterpret_cast<intptr_t>(list) | kListTag;
       return;
     }
@@ -80,9 +80,9 @@
   // Note: returns T* and not T*& (unlike List from list.h).
   // This makes the implementation simpler and more const correct.
   T* at(int i) const {
-    ASSERT((data_ & kTagMask) != kEmptyTag);
+    DCHECK((data_ & kTagMask) != kEmptyTag);
     if ((data_ & kTagMask) == kSingletonTag) {
-      ASSERT(i == 0);
+      DCHECK(i == 0);
       return single_value();
     }
     return list()->at(i);
@@ -104,7 +104,7 @@
   }
 
   T* RemoveLast() {
-    ASSERT((data_ & kTagMask) != kEmptyTag);
+    DCHECK((data_ & kTagMask) != kEmptyTag);
     if ((data_ & kTagMask) == kSingletonTag) {
       T* result = single_value();
       data_ = kEmptyTag;
@@ -115,11 +115,11 @@
 
   void Rewind(int pos) {
     if ((data_ & kTagMask) == kEmptyTag) {
-      ASSERT(pos == 0);
+      DCHECK(pos == 0);
       return;
     }
     if ((data_ & kTagMask) == kSingletonTag) {
-      ASSERT(pos == 0 || pos == 1);
+      DCHECK(pos == 0 || pos == 1);
       if (pos == 0) {
         data_ = kEmptyTag;
       }
@@ -155,13 +155,13 @@
   STATIC_ASSERT(kTagMask + 1 <= kPointerAlignment);
 
   T* single_value() const {
-    ASSERT((data_ & kTagMask) == kSingletonTag);
+    DCHECK((data_ & kTagMask) == kSingletonTag);
     STATIC_ASSERT(kSingletonTag == 0);
     return reinterpret_cast<T*>(data_);
   }
 
   PointerList* list() const {
-    ASSERT((data_ & kTagMask) == kListTag);
+    DCHECK((data_ & kTagMask) == kListTag);
     return reinterpret_cast<PointerList*>(data_ & kValueMask);
   }
 
diff --git a/src/smart-pointers.h b/src/smart-pointers.h
index db2206a..c4bbd0b 100644
--- a/src/smart-pointers.h
+++ b/src/smart-pointers.h
@@ -56,7 +56,7 @@
   }
 
   void Reset(T* new_value) {
-    ASSERT(p_ == NULL || p_ != new_value);
+    DCHECK(p_ == NULL || p_ != new_value);
     if (p_) Deallocator::Delete(p_);
     p_ = new_value;
   }
@@ -66,7 +66,7 @@
   // double freeing.
   SmartPointerBase<Deallocator, T>& operator=(
       const SmartPointerBase<Deallocator, T>& rhs) {
-    ASSERT(is_empty());
+    DCHECK(is_empty());
     T* tmp = rhs.p_;  // swap to handle self-assignment
     const_cast<SmartPointerBase<Deallocator, T>&>(rhs).p_ = NULL;
     p_ = tmp;
diff --git a/src/snapshot-common.cc b/src/snapshot-common.cc
index bef0969..a2d5213 100644
--- a/src/snapshot-common.cc
+++ b/src/snapshot-common.cc
@@ -7,51 +7,13 @@
 #include "src/v8.h"
 
 #include "src/api.h"
+#include "src/base/platform/platform.h"
 #include "src/serialize.h"
 #include "src/snapshot.h"
-#include "src/platform.h"
 
 namespace v8 {
 namespace internal {
 
-
-static void ReserveSpaceForSnapshot(Deserializer* deserializer,
-                                    const char* file_name) {
-  int file_name_length = StrLength(file_name) + 10;
-  Vector<char> name = Vector<char>::New(file_name_length + 1);
-  SNPrintF(name, "%s.size", file_name);
-  FILE* fp = OS::FOpen(name.start(), "r");
-  CHECK_NE(NULL, fp);
-  int new_size, pointer_size, data_size, code_size, map_size, cell_size,
-      property_cell_size;
-#ifdef _MSC_VER
-  // Avoid warning about unsafe fscanf from MSVC.
-  // Please note that this is only fine if %c and %s are not being used.
-#define fscanf fscanf_s
-#endif
-  CHECK_EQ(1, fscanf(fp, "new %d\n", &new_size));
-  CHECK_EQ(1, fscanf(fp, "pointer %d\n", &pointer_size));
-  CHECK_EQ(1, fscanf(fp, "data %d\n", &data_size));
-  CHECK_EQ(1, fscanf(fp, "code %d\n", &code_size));
-  CHECK_EQ(1, fscanf(fp, "map %d\n", &map_size));
-  CHECK_EQ(1, fscanf(fp, "cell %d\n", &cell_size));
-  CHECK_EQ(1, fscanf(fp, "property cell %d\n", &property_cell_size));
-#ifdef _MSC_VER
-#undef fscanf
-#endif
-  fclose(fp);
-  deserializer->set_reservation(NEW_SPACE, new_size);
-  deserializer->set_reservation(OLD_POINTER_SPACE, pointer_size);
-  deserializer->set_reservation(OLD_DATA_SPACE, data_size);
-  deserializer->set_reservation(CODE_SPACE, code_size);
-  deserializer->set_reservation(MAP_SPACE, map_size);
-  deserializer->set_reservation(CELL_SPACE, cell_size);
-  deserializer->set_reservation(PROPERTY_CELL_SPACE,
-                                property_cell_size);
-  name.Dispose();
-}
-
-
 void Snapshot::ReserveSpaceForLinkedInSnapshot(Deserializer* deserializer) {
   deserializer->set_reservation(NEW_SPACE, new_space_used_);
   deserializer->set_reservation(OLD_POINTER_SPACE, pointer_space_used_);
@@ -64,29 +26,16 @@
 }
 
 
-bool Snapshot::Initialize(const char* snapshot_file) {
-  if (snapshot_file) {
-    int len;
-    byte* str = ReadBytes(snapshot_file, &len);
-    if (!str) return false;
-    bool success;
-    {
-      SnapshotByteSource source(str, len);
-      Deserializer deserializer(&source);
-      ReserveSpaceForSnapshot(&deserializer, snapshot_file);
-      success = V8::Initialize(&deserializer);
-    }
-    DeleteArray(str);
-    return success;
-  } else if (size_ > 0) {
-    ElapsedTimer timer;
+bool Snapshot::Initialize(Isolate* isolate) {
+  if (size_ > 0) {
+    base::ElapsedTimer timer;
     if (FLAG_profile_deserialization) {
       timer.Start();
     }
     SnapshotByteSource source(raw_data_, raw_size_);
     Deserializer deserializer(&source);
     ReserveSpaceForLinkedInSnapshot(&deserializer);
-    bool success = V8::Initialize(&deserializer);
+    bool success = isolate->Init(&deserializer);
     if (FLAG_profile_deserialization) {
       double ms = timer.Elapsed().InMillisecondsF();
       PrintF("[Snapshot loading and deserialization took %0.3f ms]\n", ms);
@@ -123,4 +72,15 @@
   return Handle<Context>(Context::cast(root));
 }
 
+
+#ifdef V8_USE_EXTERNAL_STARTUP_DATA
+// Dummy implementations of Set*FromFile(..) APIs.
+//
+// These are meant for use with snapshot-external.cc. Should this file
+// be compiled with those options we just supply these dummy implementations
+// below. This happens when compiling the mksnapshot utility.
+void SetNativesFromFile(StartupData* data) { CHECK(false); }
+void SetSnapshotFromFile(StartupData* data) { CHECK(false); }
+#endif  // V8_USE_EXTERNAL_STARTUP_DATA
+
 } }  // namespace v8::internal
diff --git a/src/snapshot-external.cc b/src/snapshot-external.cc
new file mode 100644
index 0000000..ee1a8f4
--- /dev/null
+++ b/src/snapshot-external.cc
@@ -0,0 +1,140 @@
+// Copyright 2006-2008 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Used for building with external snapshots.
+
+#include "src/snapshot.h"
+
+#include "src/serialize.h"
+#include "src/snapshot-source-sink.h"
+#include "src/v8.h"  // for V8::Initialize
+
+namespace v8 {
+namespace internal {
+
+
+struct SnapshotImpl {
+ public:
+  const byte* data;
+  int size;
+  int new_space_used;
+  int pointer_space_used;
+  int data_space_used;
+  int code_space_used;
+  int map_space_used;
+  int cell_space_used;
+  int property_cell_space_used;
+
+  const byte* context_data;
+  int context_size;
+  int context_new_space_used;
+  int context_pointer_space_used;
+  int context_data_space_used;
+  int context_code_space_used;
+  int context_map_space_used;
+  int context_cell_space_used;
+  int context_property_cell_space_used;
+};
+
+
+static SnapshotImpl* snapshot_impl_ = NULL;
+
+
+bool Snapshot::HaveASnapshotToStartFrom() {
+  return snapshot_impl_ != NULL;
+}
+
+
+bool Snapshot::Initialize(Isolate* isolate) {
+  if (!HaveASnapshotToStartFrom())
+    return false;
+
+  base::ElapsedTimer timer;
+  if (FLAG_profile_deserialization) {
+    timer.Start();
+  }
+  SnapshotByteSource source(snapshot_impl_->data, snapshot_impl_->size);
+  Deserializer deserializer(&source);
+  deserializer.set_reservation(NEW_SPACE, snapshot_impl_->new_space_used);
+  deserializer.set_reservation(OLD_POINTER_SPACE,
+                               snapshot_impl_->pointer_space_used);
+  deserializer.set_reservation(OLD_DATA_SPACE,
+                               snapshot_impl_->data_space_used);
+  deserializer.set_reservation(CODE_SPACE, snapshot_impl_->code_space_used);
+  deserializer.set_reservation(MAP_SPACE, snapshot_impl_->map_space_used);
+  deserializer.set_reservation(CELL_SPACE, snapshot_impl_->cell_space_used);
+  deserializer.set_reservation(PROPERTY_CELL_SPACE,
+                               snapshot_impl_->property_cell_space_used);
+  bool success = isolate->Init(&deserializer);
+  if (FLAG_profile_deserialization) {
+    double ms = timer.Elapsed().InMillisecondsF();
+    PrintF("[Snapshot loading and deserialization took %0.3f ms]\n", ms);
+  }
+  return success;
+}
+
+
+Handle<Context> Snapshot::NewContextFromSnapshot(Isolate* isolate) {
+  if (!HaveASnapshotToStartFrom())
+    return Handle<Context>();
+
+  SnapshotByteSource source(snapshot_impl_->context_data,
+                            snapshot_impl_->context_size);
+  Deserializer deserializer(&source);
+  deserializer.set_reservation(NEW_SPACE,
+                               snapshot_impl_->context_new_space_used);
+  deserializer.set_reservation(OLD_POINTER_SPACE,
+                               snapshot_impl_->context_pointer_space_used);
+  deserializer.set_reservation(OLD_DATA_SPACE,
+                               snapshot_impl_->context_data_space_used);
+  deserializer.set_reservation(CODE_SPACE,
+                               snapshot_impl_->context_code_space_used);
+  deserializer.set_reservation(MAP_SPACE,
+                               snapshot_impl_->context_map_space_used);
+  deserializer.set_reservation(CELL_SPACE,
+                               snapshot_impl_->context_cell_space_used);
+  deserializer.set_reservation(PROPERTY_CELL_SPACE,
+                               snapshot_impl_->
+                                   context_property_cell_space_used);
+  Object* root;
+  deserializer.DeserializePartial(isolate, &root);
+  CHECK(root->IsContext());
+  return Handle<Context>(Context::cast(root));
+}
+
+
+void SetSnapshotFromFile(StartupData* snapshot_blob) {
+  DCHECK(snapshot_blob);
+  DCHECK(snapshot_blob->data);
+  DCHECK(snapshot_blob->raw_size > 0);
+  DCHECK(!snapshot_impl_);
+
+  snapshot_impl_ = new SnapshotImpl;
+  SnapshotByteSource source(reinterpret_cast<const byte*>(snapshot_blob->data),
+                            snapshot_blob->raw_size);
+
+  bool success = source.GetBlob(&snapshot_impl_->data,
+                                &snapshot_impl_->size);
+  snapshot_impl_->new_space_used = source.GetInt();
+  snapshot_impl_->pointer_space_used = source.GetInt();
+  snapshot_impl_->data_space_used = source.GetInt();
+  snapshot_impl_->code_space_used = source.GetInt();
+  snapshot_impl_->map_space_used = source.GetInt();
+  snapshot_impl_->cell_space_used = source.GetInt();
+  snapshot_impl_->property_cell_space_used = source.GetInt();
+
+  success &= source.GetBlob(&snapshot_impl_->context_data,
+                            &snapshot_impl_->context_size);
+  snapshot_impl_->context_new_space_used = source.GetInt();
+  snapshot_impl_->context_pointer_space_used = source.GetInt();
+  snapshot_impl_->context_data_space_used = source.GetInt();
+  snapshot_impl_->context_code_space_used = source.GetInt();
+  snapshot_impl_->context_map_space_used = source.GetInt();
+  snapshot_impl_->context_cell_space_used = source.GetInt();
+  snapshot_impl_->context_property_cell_space_used = source.GetInt();
+
+  DCHECK(success);
+}
+
+} }  // namespace v8::internal
diff --git a/src/snapshot-source-sink.cc b/src/snapshot-source-sink.cc
new file mode 100644
index 0000000..44f8706
--- /dev/null
+++ b/src/snapshot-source-sink.cc
@@ -0,0 +1,97 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+#include "src/snapshot-source-sink.h"
+
+#include "src/base/logging.h"
+#include "src/handles-inl.h"
+#include "src/serialize.h"  // for SerializerDeserializer::nop() in AtEOF()
+
+
+namespace v8 {
+namespace internal {
+
+
+SnapshotByteSource::SnapshotByteSource(const byte* array, int length)
+    : data_(array), length_(length), position_(0) {
+}
+
+
+SnapshotByteSource::~SnapshotByteSource() { }
+
+
+int32_t SnapshotByteSource::GetUnalignedInt() {
+  DCHECK(position_ < length_);  // Require at least one byte left.
+  int32_t answer = data_[position_];
+  answer |= data_[position_ + 1] << 8;
+  answer |= data_[position_ + 2] << 16;
+  answer |= data_[position_ + 3] << 24;
+  return answer;
+}
+
+
+void SnapshotByteSource::CopyRaw(byte* to, int number_of_bytes) {
+  MemCopy(to, data_ + position_, number_of_bytes);
+  position_ += number_of_bytes;
+}
+
+
+void SnapshotByteSink::PutInt(uintptr_t integer, const char* description) {
+  DCHECK(integer < 1 << 22);
+  integer <<= 2;
+  int bytes = 1;
+  if (integer > 0xff) bytes = 2;
+  if (integer > 0xffff) bytes = 3;
+  integer |= bytes;
+  Put(static_cast<int>(integer & 0xff), "IntPart1");
+  if (bytes > 1) Put(static_cast<int>((integer >> 8) & 0xff), "IntPart2");
+  if (bytes > 2) Put(static_cast<int>((integer >> 16) & 0xff), "IntPart3");
+}
+
+void SnapshotByteSink::PutRaw(byte* data, int number_of_bytes,
+                              const char* description) {
+  for (int i = 0; i < number_of_bytes; ++i) {
+    Put(data[i], description);
+  }
+}
+
+void SnapshotByteSink::PutBlob(byte* data, int number_of_bytes,
+                               const char* description) {
+  PutInt(number_of_bytes, description);
+  PutRaw(data, number_of_bytes, description);
+}
+
+
+bool SnapshotByteSource::AtEOF() {
+  if (0u + length_ - position_ > 2 * sizeof(uint32_t)) return false;
+  for (int x = position_; x < length_; x++) {
+    if (data_[x] != SerializerDeserializer::nop()) return false;
+  }
+  return true;
+}
+
+
+bool SnapshotByteSource::GetBlob(const byte** data, int* number_of_bytes) {
+  int size = GetInt();
+  *number_of_bytes = size;
+
+  if (position_ + size < length_) {
+    *data = &data_[position_];
+    Advance(size);
+    return true;
+  } else {
+    Advance(length_ - position_);  // proceed until end.
+    return false;
+  }
+}
+
+
+void DebugSnapshotSink::Put(byte b, const char* description) {
+  PrintF("%24s: %x\n", description, b);
+  sink_->Put(b, description);
+}
+
+}  // namespace v8::internal
+}  // namespace v8
diff --git a/src/snapshot-source-sink.h b/src/snapshot-source-sink.h
new file mode 100644
index 0000000..3c64bca
--- /dev/null
+++ b/src/snapshot-source-sink.h
@@ -0,0 +1,125 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_SNAPSHOT_SOURCE_SINK_H_
+#define V8_SNAPSHOT_SOURCE_SINK_H_
+
+#include "src/base/logging.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+
+/**
+ * Source to read snapshot and builtins files from.
+ *
+ * Note: Memory ownership remains with callee.
+ */
+class SnapshotByteSource FINAL {
+ public:
+  SnapshotByteSource(const byte* array, int length);
+  ~SnapshotByteSource();
+
+  bool HasMore() { return position_ < length_; }
+
+  int Get() {
+    DCHECK(position_ < length_);
+    return data_[position_++];
+  }
+
+  int32_t GetUnalignedInt();
+
+  void Advance(int by) { position_ += by; }
+
+  void CopyRaw(byte* to, int number_of_bytes);
+
+  inline int GetInt() {
+    // This way of variable-length encoding integers does not suffer from branch
+    // mispredictions.
+    uint32_t answer = GetUnalignedInt();
+    int bytes = answer & 3;
+    Advance(bytes);
+    uint32_t mask = 0xffffffffu;
+    mask >>= 32 - (bytes << 3);
+    answer &= mask;
+    answer >>= 2;
+    return answer;
+  }
+
+  bool GetBlob(const byte** data, int* number_of_bytes);
+
+  bool AtEOF();
+
+  int position() { return position_; }
+
+ private:
+  const byte* data_;
+  int length_;
+  int position_;
+
+  DISALLOW_COPY_AND_ASSIGN(SnapshotByteSource);
+};
+
+
+/**
+ * Sink to write snapshot files to.
+ *
+ * Subclasses must implement actual storage or i/o.
+ */
+class SnapshotByteSink {
+ public:
+  virtual ~SnapshotByteSink() { }
+  virtual void Put(byte b, const char* description) = 0;
+  virtual void PutSection(int b, const char* description) {
+    DCHECK_LE(b, kMaxUInt8);
+    Put(static_cast<byte>(b), description);
+  }
+  void PutInt(uintptr_t integer, const char* description);
+  void PutRaw(byte* data, int number_of_bytes, const char* description);
+  void PutBlob(byte* data, int number_of_bytes, const char* description);
+  virtual int Position() = 0;
+};
+
+
+class DummySnapshotSink : public SnapshotByteSink {
+ public:
+  DummySnapshotSink() : length_(0) {}
+  virtual ~DummySnapshotSink() {}
+  virtual void Put(byte b, const char* description) { length_++; }
+  virtual int Position() { return length_; }
+
+ private:
+  int length_;
+};
+
+
+// Wrap a SnapshotByteSink into a DebugSnapshotSink to get debugging output.
+class DebugSnapshotSink : public SnapshotByteSink {
+ public:
+  explicit DebugSnapshotSink(SnapshotByteSink* chained) : sink_(chained) {}
+  virtual void Put(byte b, const char* description) OVERRIDE;
+  virtual int Position() OVERRIDE { return sink_->Position(); }
+
+ private:
+  SnapshotByteSink* sink_;
+};
+
+
+class ListSnapshotSink : public i::SnapshotByteSink {
+ public:
+  explicit ListSnapshotSink(i::List<byte>* data) : data_(data) {}
+  virtual void Put(byte b, const char* description) OVERRIDE {
+    data_->Add(b);
+  }
+  virtual int Position() OVERRIDE { return data_->length(); }
+
+ private:
+  i::List<byte>* data_;
+};
+
+}  // namespace v8::internal
+}  // namespace v8
+
+#endif  // V8_SNAPSHOT_SOURCE_SINK_H_
diff --git a/src/snapshot.h b/src/snapshot.h
index 17191f0..3d752a7 100644
--- a/src/snapshot.h
+++ b/src/snapshot.h
@@ -12,23 +12,16 @@
 
 class Snapshot {
  public:
-  // Initialize the VM from the given snapshot file. If snapshot_file is
-  // NULL, use the internal snapshot instead. Returns false if no snapshot
-  // could be found.
-  static bool Initialize(const char* snapshot_file = NULL);
+  // Initialize the Isolate from the internal snapshot. Returns false if no
+  // snapshot could be found.
+  static bool Initialize(Isolate* isolate);
 
   static bool HaveASnapshotToStartFrom();
 
   // Create a new context using the internal partial snapshot.
   static Handle<Context> NewContextFromSnapshot(Isolate* isolate);
 
-  // Returns whether or not the snapshot is enabled.
-  static bool IsEnabled() { return size_ != 0; }
-
-  // Write snapshot to the given file. Returns true if snapshot was written
-  // successfully.
-  static bool WriteToFile(const char* snapshot_file);
-
+  // These methods support COMPRESS_STARTUP_DATA_BZ2.
   static const byte* data() { return data_; }
   static int size() { return size_; }
   static int raw_size() { return raw_size_; }
@@ -72,6 +65,10 @@
   DISALLOW_IMPLICIT_CONSTRUCTORS(Snapshot);
 };
 
+#ifdef V8_USE_EXTERNAL_STARTUP_DATA
+void SetSnapshotFromFile(StartupData* snapshot_blob);
+#endif
+
 } }  // namespace v8::internal
 
 #endif  // V8_SNAPSHOT_H_
diff --git a/src/spaces-inl.h b/src/spaces-inl.h
deleted file mode 100644
index e863b51..0000000
--- a/src/spaces-inl.h
+++ /dev/null
@@ -1,327 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_SPACES_INL_H_
-#define V8_SPACES_INL_H_
-
-#include "src/heap-profiler.h"
-#include "src/isolate.h"
-#include "src/spaces.h"
-#include "src/v8memory.h"
-
-namespace v8 {
-namespace internal {
-
-
-// -----------------------------------------------------------------------------
-// Bitmap
-
-void Bitmap::Clear(MemoryChunk* chunk) {
-  Bitmap* bitmap = chunk->markbits();
-  for (int i = 0; i < bitmap->CellsCount(); i++) bitmap->cells()[i] = 0;
-  chunk->ResetLiveBytes();
-}
-
-
-// -----------------------------------------------------------------------------
-// PageIterator
-
-
-PageIterator::PageIterator(PagedSpace* space)
-    : space_(space),
-      prev_page_(&space->anchor_),
-      next_page_(prev_page_->next_page()) { }
-
-
-bool PageIterator::has_next() {
-  return next_page_ != &space_->anchor_;
-}
-
-
-Page* PageIterator::next() {
-  ASSERT(has_next());
-  prev_page_ = next_page_;
-  next_page_ = next_page_->next_page();
-  return prev_page_;
-}
-
-
-// -----------------------------------------------------------------------------
-// NewSpacePageIterator
-
-
-NewSpacePageIterator::NewSpacePageIterator(NewSpace* space)
-    : prev_page_(NewSpacePage::FromAddress(space->ToSpaceStart())->prev_page()),
-      next_page_(NewSpacePage::FromAddress(space->ToSpaceStart())),
-      last_page_(NewSpacePage::FromLimit(space->ToSpaceEnd())) { }
-
-NewSpacePageIterator::NewSpacePageIterator(SemiSpace* space)
-    : prev_page_(space->anchor()),
-      next_page_(prev_page_->next_page()),
-      last_page_(prev_page_->prev_page()) { }
-
-NewSpacePageIterator::NewSpacePageIterator(Address start, Address limit)
-    : prev_page_(NewSpacePage::FromAddress(start)->prev_page()),
-      next_page_(NewSpacePage::FromAddress(start)),
-      last_page_(NewSpacePage::FromLimit(limit)) {
-  SemiSpace::AssertValidRange(start, limit);
-}
-
-
-bool NewSpacePageIterator::has_next() {
-  return prev_page_ != last_page_;
-}
-
-
-NewSpacePage* NewSpacePageIterator::next() {
-  ASSERT(has_next());
-  prev_page_ = next_page_;
-  next_page_ = next_page_->next_page();
-  return prev_page_;
-}
-
-
-// -----------------------------------------------------------------------------
-// HeapObjectIterator
-HeapObject* HeapObjectIterator::FromCurrentPage() {
-  while (cur_addr_ != cur_end_) {
-    if (cur_addr_ == space_->top() && cur_addr_ != space_->limit()) {
-      cur_addr_ = space_->limit();
-      continue;
-    }
-    HeapObject* obj = HeapObject::FromAddress(cur_addr_);
-    int obj_size = (size_func_ == NULL) ? obj->Size() : size_func_(obj);
-    cur_addr_ += obj_size;
-    ASSERT(cur_addr_ <= cur_end_);
-    if (!obj->IsFiller()) {
-      ASSERT_OBJECT_SIZE(obj_size);
-      return obj;
-    }
-  }
-  return NULL;
-}
-
-
-// -----------------------------------------------------------------------------
-// MemoryAllocator
-
-#ifdef ENABLE_HEAP_PROTECTION
-
-void MemoryAllocator::Protect(Address start, size_t size) {
-  OS::Protect(start, size);
-}
-
-
-void MemoryAllocator::Unprotect(Address start,
-                                size_t size,
-                                Executability executable) {
-  OS::Unprotect(start, size, executable);
-}
-
-
-void MemoryAllocator::ProtectChunkFromPage(Page* page) {
-  int id = GetChunkId(page);
-  OS::Protect(chunks_[id].address(), chunks_[id].size());
-}
-
-
-void MemoryAllocator::UnprotectChunkFromPage(Page* page) {
-  int id = GetChunkId(page);
-  OS::Unprotect(chunks_[id].address(), chunks_[id].size(),
-                chunks_[id].owner()->executable() == EXECUTABLE);
-}
-
-#endif
-
-
-// --------------------------------------------------------------------------
-// PagedSpace
-Page* Page::Initialize(Heap* heap,
-                       MemoryChunk* chunk,
-                       Executability executable,
-                       PagedSpace* owner) {
-  Page* page = reinterpret_cast<Page*>(chunk);
-  ASSERT(page->area_size() <= kMaxRegularHeapObjectSize);
-  ASSERT(chunk->owner() == owner);
-  owner->IncreaseCapacity(page->area_size());
-  owner->Free(page->area_start(), page->area_size());
-
-  heap->incremental_marking()->SetOldSpacePageFlags(chunk);
-
-  return page;
-}
-
-
-bool PagedSpace::Contains(Address addr) {
-  Page* p = Page::FromAddress(addr);
-  if (!p->is_valid()) return false;
-  return p->owner() == this;
-}
-
-
-void MemoryChunk::set_scan_on_scavenge(bool scan) {
-  if (scan) {
-    if (!scan_on_scavenge()) heap_->increment_scan_on_scavenge_pages();
-    SetFlag(SCAN_ON_SCAVENGE);
-  } else {
-    if (scan_on_scavenge()) heap_->decrement_scan_on_scavenge_pages();
-    ClearFlag(SCAN_ON_SCAVENGE);
-  }
-  heap_->incremental_marking()->SetOldSpacePageFlags(this);
-}
-
-
-MemoryChunk* MemoryChunk::FromAnyPointerAddress(Heap* heap, Address addr) {
-  MemoryChunk* maybe = reinterpret_cast<MemoryChunk*>(
-      OffsetFrom(addr) & ~Page::kPageAlignmentMask);
-  if (maybe->owner() != NULL) return maybe;
-  LargeObjectIterator iterator(heap->lo_space());
-  for (HeapObject* o = iterator.Next(); o != NULL; o = iterator.Next()) {
-    // Fixed arrays are the only pointer-containing objects in large object
-    // space.
-    if (o->IsFixedArray()) {
-      MemoryChunk* chunk = MemoryChunk::FromAddress(o->address());
-      if (chunk->Contains(addr)) {
-        return chunk;
-      }
-    }
-  }
-  UNREACHABLE();
-  return NULL;
-}
-
-
-void MemoryChunk::UpdateHighWaterMark(Address mark) {
-  if (mark == NULL) return;
-  // Need to subtract one from the mark because when a chunk is full the
-  // top points to the next address after the chunk, which effectively belongs
-  // to another chunk. See the comment to Page::FromAllocationTop.
-  MemoryChunk* chunk = MemoryChunk::FromAddress(mark - 1);
-  int new_mark = static_cast<int>(mark - chunk->address());
-  if (new_mark > chunk->high_water_mark_) {
-    chunk->high_water_mark_ = new_mark;
-  }
-}
-
-
-PointerChunkIterator::PointerChunkIterator(Heap* heap)
-    : state_(kOldPointerState),
-      old_pointer_iterator_(heap->old_pointer_space()),
-      map_iterator_(heap->map_space()),
-      lo_iterator_(heap->lo_space()) { }
-
-
-Page* Page::next_page() {
-  ASSERT(next_chunk()->owner() == owner());
-  return static_cast<Page*>(next_chunk());
-}
-
-
-Page* Page::prev_page() {
-  ASSERT(prev_chunk()->owner() == owner());
-  return static_cast<Page*>(prev_chunk());
-}
-
-
-void Page::set_next_page(Page* page) {
-  ASSERT(page->owner() == owner());
-  set_next_chunk(page);
-}
-
-
-void Page::set_prev_page(Page* page) {
-  ASSERT(page->owner() == owner());
-  set_prev_chunk(page);
-}
-
-
-// Try linear allocation in the page of alloc_info's allocation top.  Does
-// not contain slow case logic (e.g. move to the next page or try free list
-// allocation) so it can be used by all the allocation functions and for all
-// the paged spaces.
-HeapObject* PagedSpace::AllocateLinearly(int size_in_bytes) {
-  Address current_top = allocation_info_.top();
-  Address new_top = current_top + size_in_bytes;
-  if (new_top > allocation_info_.limit()) return NULL;
-
-  allocation_info_.set_top(new_top);
-  return HeapObject::FromAddress(current_top);
-}
-
-
-// Raw allocation.
-AllocationResult PagedSpace::AllocateRaw(int size_in_bytes) {
-  HeapObject* object = AllocateLinearly(size_in_bytes);
-  if (object != NULL) {
-    if (identity() == CODE_SPACE) {
-      SkipList::Update(object->address(), size_in_bytes);
-    }
-    return object;
-  }
-
-  ASSERT(!heap()->linear_allocation() ||
-         (anchor_.next_chunk() == &anchor_ &&
-          anchor_.prev_chunk() == &anchor_));
-
-  object = free_list_.Allocate(size_in_bytes);
-  if (object != NULL) {
-    if (identity() == CODE_SPACE) {
-      SkipList::Update(object->address(), size_in_bytes);
-    }
-    return object;
-  }
-
-  object = SlowAllocateRaw(size_in_bytes);
-  if (object != NULL) {
-    if (identity() == CODE_SPACE) {
-      SkipList::Update(object->address(), size_in_bytes);
-    }
-    return object;
-  }
-
-  return AllocationResult::Retry(identity());
-}
-
-
-// -----------------------------------------------------------------------------
-// NewSpace
-
-
-AllocationResult NewSpace::AllocateRaw(int size_in_bytes) {
-  Address old_top = allocation_info_.top();
-
-  if (allocation_info_.limit() - old_top < size_in_bytes) {
-    return SlowAllocateRaw(size_in_bytes);
-  }
-
-  HeapObject* obj = HeapObject::FromAddress(old_top);
-  allocation_info_.set_top(allocation_info_.top() + size_in_bytes);
-  ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
-
-  return obj;
-}
-
-
-LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk) {
-  heap->incremental_marking()->SetOldSpacePageFlags(chunk);
-  return static_cast<LargePage*>(chunk);
-}
-
-
-intptr_t LargeObjectSpace::Available() {
-  return ObjectSizeFor(heap()->isolate()->memory_allocator()->Available());
-}
-
-
-bool FreeListNode::IsFreeListNode(HeapObject* object) {
-  Map* map = object->map();
-  Heap* heap = object->GetHeap();
-  return map == heap->raw_unchecked_free_space_map()
-      || map == heap->raw_unchecked_one_pointer_filler_map()
-      || map == heap->raw_unchecked_two_pointer_filler_map();
-}
-
-} }  // namespace v8::internal
-
-#endif  // V8_SPACES_INL_H_
diff --git a/src/spaces.cc b/src/spaces.cc
deleted file mode 100644
index 69a0145..0000000
--- a/src/spaces.cc
+++ /dev/null
@@ -1,3145 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#include "src/full-codegen.h"
-#include "src/macro-assembler.h"
-#include "src/mark-compact.h"
-#include "src/msan.h"
-#include "src/platform.h"
-
-namespace v8 {
-namespace internal {
-
-
-// ----------------------------------------------------------------------------
-// HeapObjectIterator
-
-HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
-  // You can't actually iterate over the anchor page.  It is not a real page,
-  // just an anchor for the double linked page list.  Initialize as if we have
-  // reached the end of the anchor page, then the first iteration will move on
-  // to the first page.
-  Initialize(space,
-             NULL,
-             NULL,
-             kAllPagesInSpace,
-             NULL);
-}
-
-
-HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
-                                       HeapObjectCallback size_func) {
-  // You can't actually iterate over the anchor page.  It is not a real page,
-  // just an anchor for the double linked page list.  Initialize the current
-  // address and end as NULL, then the first iteration will move on
-  // to the first page.
-  Initialize(space,
-             NULL,
-             NULL,
-             kAllPagesInSpace,
-             size_func);
-}
-
-
-HeapObjectIterator::HeapObjectIterator(Page* page,
-                                       HeapObjectCallback size_func) {
-  Space* owner = page->owner();
-  ASSERT(owner == page->heap()->old_pointer_space() ||
-         owner == page->heap()->old_data_space() ||
-         owner == page->heap()->map_space() ||
-         owner == page->heap()->cell_space() ||
-         owner == page->heap()->property_cell_space() ||
-         owner == page->heap()->code_space());
-  Initialize(reinterpret_cast<PagedSpace*>(owner),
-             page->area_start(),
-             page->area_end(),
-             kOnePageOnly,
-             size_func);
-  ASSERT(page->WasSweptPrecisely());
-}
-
-
-void HeapObjectIterator::Initialize(PagedSpace* space,
-                                    Address cur, Address end,
-                                    HeapObjectIterator::PageMode mode,
-                                    HeapObjectCallback size_f) {
-  // Check that we actually can iterate this space.
-  ASSERT(!space->was_swept_conservatively());
-
-  space_ = space;
-  cur_addr_ = cur;
-  cur_end_ = end;
-  page_mode_ = mode;
-  size_func_ = size_f;
-}
-
-
-// We have hit the end of the page and should advance to the next block of
-// objects.  This happens at the end of the page.
-bool HeapObjectIterator::AdvanceToNextPage() {
-  ASSERT(cur_addr_ == cur_end_);
-  if (page_mode_ == kOnePageOnly) return false;
-  Page* cur_page;
-  if (cur_addr_ == NULL) {
-    cur_page = space_->anchor();
-  } else {
-    cur_page = Page::FromAddress(cur_addr_ - 1);
-    ASSERT(cur_addr_ == cur_page->area_end());
-  }
-  cur_page = cur_page->next_page();
-  if (cur_page == space_->anchor()) return false;
-  cur_addr_ = cur_page->area_start();
-  cur_end_ = cur_page->area_end();
-  ASSERT(cur_page->WasSweptPrecisely());
-  return true;
-}
-
-
-// -----------------------------------------------------------------------------
-// CodeRange
-
-
-CodeRange::CodeRange(Isolate* isolate)
-    : isolate_(isolate),
-      code_range_(NULL),
-      free_list_(0),
-      allocation_list_(0),
-      current_allocation_block_index_(0) {
-}
-
-
-bool CodeRange::SetUp(size_t requested) {
-  ASSERT(code_range_ == NULL);
-
-  if (requested == 0) {
-    // When a target requires the code range feature, we put all code objects
-    // in a kMaximalCodeRangeSize range of virtual address space, so that
-    // they can call each other with near calls.
-    if (kRequiresCodeRange) {
-      requested = kMaximalCodeRangeSize;
-    } else {
-      return true;
-    }
-  }
-
-  ASSERT(!kRequiresCodeRange || requested <= kMaximalCodeRangeSize);
-  code_range_ = new VirtualMemory(requested);
-  CHECK(code_range_ != NULL);
-  if (!code_range_->IsReserved()) {
-    delete code_range_;
-    code_range_ = NULL;
-    return false;
-  }
-
-  // We are sure that we have mapped a block of requested addresses.
-  ASSERT(code_range_->size() == requested);
-  LOG(isolate_,
-      NewEvent("CodeRange", code_range_->address(), requested));
-  Address base = reinterpret_cast<Address>(code_range_->address());
-  Address aligned_base =
-      RoundUp(reinterpret_cast<Address>(code_range_->address()),
-              MemoryChunk::kAlignment);
-  size_t size = code_range_->size() - (aligned_base - base);
-  allocation_list_.Add(FreeBlock(aligned_base, size));
-  current_allocation_block_index_ = 0;
-  return true;
-}
-
-
-int CodeRange::CompareFreeBlockAddress(const FreeBlock* left,
-                                       const FreeBlock* right) {
-  // The entire point of CodeRange is that the difference between two
-  // addresses in the range can be represented as a signed 32-bit int,
-  // so the cast is semantically correct.
-  return static_cast<int>(left->start - right->start);
-}
-
-
-bool CodeRange::GetNextAllocationBlock(size_t requested) {
-  for (current_allocation_block_index_++;
-       current_allocation_block_index_ < allocation_list_.length();
-       current_allocation_block_index_++) {
-    if (requested <= allocation_list_[current_allocation_block_index_].size) {
-      return true;  // Found a large enough allocation block.
-    }
-  }
-
-  // Sort and merge the free blocks on the free list and the allocation list.
-  free_list_.AddAll(allocation_list_);
-  allocation_list_.Clear();
-  free_list_.Sort(&CompareFreeBlockAddress);
-  for (int i = 0; i < free_list_.length();) {
-    FreeBlock merged = free_list_[i];
-    i++;
-    // Add adjacent free blocks to the current merged block.
-    while (i < free_list_.length() &&
-           free_list_[i].start == merged.start + merged.size) {
-      merged.size += free_list_[i].size;
-      i++;
-    }
-    if (merged.size > 0) {
-      allocation_list_.Add(merged);
-    }
-  }
-  free_list_.Clear();
-
-  for (current_allocation_block_index_ = 0;
-       current_allocation_block_index_ < allocation_list_.length();
-       current_allocation_block_index_++) {
-    if (requested <= allocation_list_[current_allocation_block_index_].size) {
-      return true;  // Found a large enough allocation block.
-    }
-  }
-  current_allocation_block_index_ = 0;
-  // Code range is full or too fragmented.
-  return false;
-}
-
-
-Address CodeRange::AllocateRawMemory(const size_t requested_size,
-                                     const size_t commit_size,
-                                     size_t* allocated) {
-  ASSERT(commit_size <= requested_size);
-  ASSERT(current_allocation_block_index_ < allocation_list_.length());
-  if (requested_size > allocation_list_[current_allocation_block_index_].size) {
-    // Find an allocation block large enough.
-    if (!GetNextAllocationBlock(requested_size)) return NULL;
-  }
-  // Commit the requested memory at the start of the current allocation block.
-  size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment);
-  FreeBlock current = allocation_list_[current_allocation_block_index_];
-  if (aligned_requested >= (current.size - Page::kPageSize)) {
-    // Don't leave a small free block, useless for a large object or chunk.
-    *allocated = current.size;
-  } else {
-    *allocated = aligned_requested;
-  }
-  ASSERT(*allocated <= current.size);
-  ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment));
-  if (!isolate_->memory_allocator()->CommitExecutableMemory(code_range_,
-                                                            current.start,
-                                                            commit_size,
-                                                            *allocated)) {
-    *allocated = 0;
-    return NULL;
-  }
-  allocation_list_[current_allocation_block_index_].start += *allocated;
-  allocation_list_[current_allocation_block_index_].size -= *allocated;
-  if (*allocated == current.size) {
-    // This block is used up, get the next one.
-    if (!GetNextAllocationBlock(0)) return NULL;
-  }
-  return current.start;
-}
-
-
-bool CodeRange::CommitRawMemory(Address start, size_t length) {
-  return isolate_->memory_allocator()->CommitMemory(start, length, EXECUTABLE);
-}
-
-
-bool CodeRange::UncommitRawMemory(Address start, size_t length) {
-  return code_range_->Uncommit(start, length);
-}
-
-
-void CodeRange::FreeRawMemory(Address address, size_t length) {
-  ASSERT(IsAddressAligned(address, MemoryChunk::kAlignment));
-  free_list_.Add(FreeBlock(address, length));
-  code_range_->Uncommit(address, length);
-}
-
-
-void CodeRange::TearDown() {
-    delete code_range_;  // Frees all memory in the virtual memory range.
-    code_range_ = NULL;
-    free_list_.Free();
-    allocation_list_.Free();
-}
-
-
-// -----------------------------------------------------------------------------
-// MemoryAllocator
-//
-
-MemoryAllocator::MemoryAllocator(Isolate* isolate)
-    : isolate_(isolate),
-      capacity_(0),
-      capacity_executable_(0),
-      size_(0),
-      size_executable_(0),
-      lowest_ever_allocated_(reinterpret_cast<void*>(-1)),
-      highest_ever_allocated_(reinterpret_cast<void*>(0)) {
-}
-
-
-bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
-  capacity_ = RoundUp(capacity, Page::kPageSize);
-  capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
-  ASSERT_GE(capacity_, capacity_executable_);
-
-  size_ = 0;
-  size_executable_ = 0;
-
-  return true;
-}
-
-
-void MemoryAllocator::TearDown() {
-  // Check that spaces were torn down before MemoryAllocator.
-  ASSERT(size_ == 0);
-  // TODO(gc) this will be true again when we fix FreeMemory.
-  // ASSERT(size_executable_ == 0);
-  capacity_ = 0;
-  capacity_executable_ = 0;
-}
-
-
-bool MemoryAllocator::CommitMemory(Address base,
-                                   size_t size,
-                                   Executability executable) {
-  if (!VirtualMemory::CommitRegion(base, size, executable == EXECUTABLE)) {
-    return false;
-  }
-  UpdateAllocatedSpaceLimits(base, base + size);
-  return true;
-}
-
-
-void MemoryAllocator::FreeMemory(VirtualMemory* reservation,
-                                 Executability executable) {
-  // TODO(gc) make code_range part of memory allocator?
-  ASSERT(reservation->IsReserved());
-  size_t size = reservation->size();
-  ASSERT(size_ >= size);
-  size_ -= size;
-
-  isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
-
-  if (executable == EXECUTABLE) {
-    ASSERT(size_executable_ >= size);
-    size_executable_ -= size;
-  }
-  // Code which is part of the code-range does not have its own VirtualMemory.
-  ASSERT(isolate_->code_range() == NULL ||
-         !isolate_->code_range()->contains(
-             static_cast<Address>(reservation->address())));
-  ASSERT(executable == NOT_EXECUTABLE ||
-         isolate_->code_range() == NULL ||
-         !isolate_->code_range()->valid());
-  reservation->Release();
-}
-
-
-void MemoryAllocator::FreeMemory(Address base,
-                                 size_t size,
-                                 Executability executable) {
-  // TODO(gc) make code_range part of memory allocator?
-  ASSERT(size_ >= size);
-  size_ -= size;
-
-  isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
-
-  if (executable == EXECUTABLE) {
-    ASSERT(size_executable_ >= size);
-    size_executable_ -= size;
-  }
-  if (isolate_->code_range() != NULL &&
-      isolate_->code_range()->contains(static_cast<Address>(base))) {
-    ASSERT(executable == EXECUTABLE);
-    isolate_->code_range()->FreeRawMemory(base, size);
-  } else {
-    ASSERT(executable == NOT_EXECUTABLE ||
-           isolate_->code_range() == NULL ||
-           !isolate_->code_range()->valid());
-    bool result = VirtualMemory::ReleaseRegion(base, size);
-    USE(result);
-    ASSERT(result);
-  }
-}
-
-
-Address MemoryAllocator::ReserveAlignedMemory(size_t size,
-                                              size_t alignment,
-                                              VirtualMemory* controller) {
-  VirtualMemory reservation(size, alignment);
-
-  if (!reservation.IsReserved()) return NULL;
-  size_ += reservation.size();
-  Address base = RoundUp(static_cast<Address>(reservation.address()),
-                         alignment);
-  controller->TakeControl(&reservation);
-  return base;
-}
-
-
-Address MemoryAllocator::AllocateAlignedMemory(size_t reserve_size,
-                                               size_t commit_size,
-                                               size_t alignment,
-                                               Executability executable,
-                                               VirtualMemory* controller) {
-  ASSERT(commit_size <= reserve_size);
-  VirtualMemory reservation;
-  Address base = ReserveAlignedMemory(reserve_size, alignment, &reservation);
-  if (base == NULL) return NULL;
-
-  if (executable == EXECUTABLE) {
-    if (!CommitExecutableMemory(&reservation,
-                                base,
-                                commit_size,
-                                reserve_size)) {
-      base = NULL;
-    }
-  } else {
-    if (reservation.Commit(base, commit_size, false)) {
-      UpdateAllocatedSpaceLimits(base, base + commit_size);
-    } else {
-      base = NULL;
-    }
-  }
-
-  if (base == NULL) {
-    // Failed to commit the body. Release the mapping and any partially
-    // commited regions inside it.
-    reservation.Release();
-    return NULL;
-  }
-
-  controller->TakeControl(&reservation);
-  return base;
-}
-
-
-void Page::InitializeAsAnchor(PagedSpace* owner) {
-  set_owner(owner);
-  set_prev_page(this);
-  set_next_page(this);
-}
-
-
-NewSpacePage* NewSpacePage::Initialize(Heap* heap,
-                                       Address start,
-                                       SemiSpace* semi_space) {
-  Address area_start = start + NewSpacePage::kObjectStartOffset;
-  Address area_end = start + Page::kPageSize;
-
-  MemoryChunk* chunk = MemoryChunk::Initialize(heap,
-                                               start,
-                                               Page::kPageSize,
-                                               area_start,
-                                               area_end,
-                                               NOT_EXECUTABLE,
-                                               semi_space);
-  chunk->set_next_chunk(NULL);
-  chunk->set_prev_chunk(NULL);
-  chunk->initialize_scan_on_scavenge(true);
-  bool in_to_space = (semi_space->id() != kFromSpace);
-  chunk->SetFlag(in_to_space ? MemoryChunk::IN_TO_SPACE
-                             : MemoryChunk::IN_FROM_SPACE);
-  ASSERT(!chunk->IsFlagSet(in_to_space ? MemoryChunk::IN_FROM_SPACE
-                                       : MemoryChunk::IN_TO_SPACE));
-  NewSpacePage* page = static_cast<NewSpacePage*>(chunk);
-  heap->incremental_marking()->SetNewSpacePageFlags(page);
-  return page;
-}
-
-
-void NewSpacePage::InitializeAsAnchor(SemiSpace* semi_space) {
-  set_owner(semi_space);
-  set_next_chunk(this);
-  set_prev_chunk(this);
-  // Flags marks this invalid page as not being in new-space.
-  // All real new-space pages will be in new-space.
-  SetFlags(0, ~0);
-}
-
-
-MemoryChunk* MemoryChunk::Initialize(Heap* heap,
-                                     Address base,
-                                     size_t size,
-                                     Address area_start,
-                                     Address area_end,
-                                     Executability executable,
-                                     Space* owner) {
-  MemoryChunk* chunk = FromAddress(base);
-
-  ASSERT(base == chunk->address());
-
-  chunk->heap_ = heap;
-  chunk->size_ = size;
-  chunk->area_start_ = area_start;
-  chunk->area_end_ = area_end;
-  chunk->flags_ = 0;
-  chunk->set_owner(owner);
-  chunk->InitializeReservedMemory();
-  chunk->slots_buffer_ = NULL;
-  chunk->skip_list_ = NULL;
-  chunk->write_barrier_counter_ = kWriteBarrierCounterGranularity;
-  chunk->progress_bar_ = 0;
-  chunk->high_water_mark_ = static_cast<int>(area_start - base);
-  chunk->set_parallel_sweeping(PARALLEL_SWEEPING_DONE);
-  chunk->available_in_small_free_list_ = 0;
-  chunk->available_in_medium_free_list_ = 0;
-  chunk->available_in_large_free_list_ = 0;
-  chunk->available_in_huge_free_list_ = 0;
-  chunk->non_available_small_blocks_ = 0;
-  chunk->ResetLiveBytes();
-  Bitmap::Clear(chunk);
-  chunk->initialize_scan_on_scavenge(false);
-  chunk->SetFlag(WAS_SWEPT_PRECISELY);
-
-  ASSERT(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
-  ASSERT(OFFSET_OF(MemoryChunk, live_byte_count_) == kLiveBytesOffset);
-
-  if (executable == EXECUTABLE) {
-    chunk->SetFlag(IS_EXECUTABLE);
-  }
-
-  if (owner == heap->old_data_space()) {
-    chunk->SetFlag(CONTAINS_ONLY_DATA);
-  }
-
-  return chunk;
-}
-
-
-// Commit MemoryChunk area to the requested size.
-bool MemoryChunk::CommitArea(size_t requested) {
-  size_t guard_size = IsFlagSet(IS_EXECUTABLE) ?
-                      MemoryAllocator::CodePageGuardSize() : 0;
-  size_t header_size = area_start() - address() - guard_size;
-  size_t commit_size = RoundUp(header_size + requested, OS::CommitPageSize());
-  size_t committed_size = RoundUp(header_size + (area_end() - area_start()),
-                                  OS::CommitPageSize());
-
-  if (commit_size > committed_size) {
-    // Commit size should be less or equal than the reserved size.
-    ASSERT(commit_size <= size() - 2 * guard_size);
-    // Append the committed area.
-    Address start = address() + committed_size + guard_size;
-    size_t length = commit_size - committed_size;
-    if (reservation_.IsReserved()) {
-      Executability executable = IsFlagSet(IS_EXECUTABLE)
-          ? EXECUTABLE : NOT_EXECUTABLE;
-      if (!heap()->isolate()->memory_allocator()->CommitMemory(
-              start, length, executable)) {
-        return false;
-      }
-    } else {
-      CodeRange* code_range = heap_->isolate()->code_range();
-      ASSERT(code_range != NULL && code_range->valid() &&
-             IsFlagSet(IS_EXECUTABLE));
-      if (!code_range->CommitRawMemory(start, length)) return false;
-    }
-
-    if (Heap::ShouldZapGarbage()) {
-      heap_->isolate()->memory_allocator()->ZapBlock(start, length);
-    }
-  } else if (commit_size < committed_size) {
-    ASSERT(commit_size > 0);
-    // Shrink the committed area.
-    size_t length = committed_size - commit_size;
-    Address start = address() + committed_size + guard_size - length;
-    if (reservation_.IsReserved()) {
-      if (!reservation_.Uncommit(start, length)) return false;
-    } else {
-      CodeRange* code_range = heap_->isolate()->code_range();
-      ASSERT(code_range != NULL && code_range->valid() &&
-             IsFlagSet(IS_EXECUTABLE));
-      if (!code_range->UncommitRawMemory(start, length)) return false;
-    }
-  }
-
-  area_end_ = area_start_ + requested;
-  return true;
-}
-
-
-void MemoryChunk::InsertAfter(MemoryChunk* other) {
-  MemoryChunk* other_next = other->next_chunk();
-
-  set_next_chunk(other_next);
-  set_prev_chunk(other);
-  other_next->set_prev_chunk(this);
-  other->set_next_chunk(this);
-}
-
-
-void MemoryChunk::Unlink() {
-  MemoryChunk* next_element = next_chunk();
-  MemoryChunk* prev_element = prev_chunk();
-  next_element->set_prev_chunk(prev_element);
-  prev_element->set_next_chunk(next_element);
-  set_prev_chunk(NULL);
-  set_next_chunk(NULL);
-}
-
-
-MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t reserve_area_size,
-                                            intptr_t commit_area_size,
-                                            Executability executable,
-                                            Space* owner) {
-  ASSERT(commit_area_size <= reserve_area_size);
-
-  size_t chunk_size;
-  Heap* heap = isolate_->heap();
-  Address base = NULL;
-  VirtualMemory reservation;
-  Address area_start = NULL;
-  Address area_end = NULL;
-
-  //
-  // MemoryChunk layout:
-  //
-  //             Executable
-  // +----------------------------+<- base aligned with MemoryChunk::kAlignment
-  // |           Header           |
-  // +----------------------------+<- base + CodePageGuardStartOffset
-  // |           Guard            |
-  // +----------------------------+<- area_start_
-  // |           Area             |
-  // +----------------------------+<- area_end_ (area_start + commit_area_size)
-  // |   Committed but not used   |
-  // +----------------------------+<- aligned at OS page boundary
-  // | Reserved but not committed |
-  // +----------------------------+<- aligned at OS page boundary
-  // |           Guard            |
-  // +----------------------------+<- base + chunk_size
-  //
-  //           Non-executable
-  // +----------------------------+<- base aligned with MemoryChunk::kAlignment
-  // |          Header            |
-  // +----------------------------+<- area_start_ (base + kObjectStartOffset)
-  // |           Area             |
-  // +----------------------------+<- area_end_ (area_start + commit_area_size)
-  // |  Committed but not used    |
-  // +----------------------------+<- aligned at OS page boundary
-  // | Reserved but not committed |
-  // +----------------------------+<- base + chunk_size
-  //
-
-  if (executable == EXECUTABLE) {
-    chunk_size = RoundUp(CodePageAreaStartOffset() + reserve_area_size,
-                         OS::CommitPageSize()) + CodePageGuardSize();
-
-    // Check executable memory limit.
-    if (size_executable_ + chunk_size > capacity_executable_) {
-      LOG(isolate_,
-          StringEvent("MemoryAllocator::AllocateRawMemory",
-                      "V8 Executable Allocation capacity exceeded"));
-      return NULL;
-    }
-
-    // Size of header (not executable) plus area (executable).
-    size_t commit_size = RoundUp(CodePageGuardStartOffset() + commit_area_size,
-                                 OS::CommitPageSize());
-    // Allocate executable memory either from code range or from the
-    // OS.
-    if (isolate_->code_range() != NULL && isolate_->code_range()->valid()) {
-      base = isolate_->code_range()->AllocateRawMemory(chunk_size,
-                                                       commit_size,
-                                                       &chunk_size);
-      ASSERT(IsAligned(reinterpret_cast<intptr_t>(base),
-                       MemoryChunk::kAlignment));
-      if (base == NULL) return NULL;
-      size_ += chunk_size;
-      // Update executable memory size.
-      size_executable_ += chunk_size;
-    } else {
-      base = AllocateAlignedMemory(chunk_size,
-                                   commit_size,
-                                   MemoryChunk::kAlignment,
-                                   executable,
-                                   &reservation);
-      if (base == NULL) return NULL;
-      // Update executable memory size.
-      size_executable_ += reservation.size();
-    }
-
-    if (Heap::ShouldZapGarbage()) {
-      ZapBlock(base, CodePageGuardStartOffset());
-      ZapBlock(base + CodePageAreaStartOffset(), commit_area_size);
-    }
-
-    area_start = base + CodePageAreaStartOffset();
-    area_end = area_start + commit_area_size;
-  } else {
-    chunk_size = RoundUp(MemoryChunk::kObjectStartOffset + reserve_area_size,
-                         OS::CommitPageSize());
-    size_t commit_size = RoundUp(MemoryChunk::kObjectStartOffset +
-                                 commit_area_size, OS::CommitPageSize());
-    base = AllocateAlignedMemory(chunk_size,
-                                 commit_size,
-                                 MemoryChunk::kAlignment,
-                                 executable,
-                                 &reservation);
-
-    if (base == NULL) return NULL;
-
-    if (Heap::ShouldZapGarbage()) {
-      ZapBlock(base, Page::kObjectStartOffset + commit_area_size);
-    }
-
-    area_start = base + Page::kObjectStartOffset;
-    area_end = area_start + commit_area_size;
-  }
-
-  // Use chunk_size for statistics and callbacks because we assume that they
-  // treat reserved but not-yet committed memory regions of chunks as allocated.
-  isolate_->counters()->memory_allocated()->
-      Increment(static_cast<int>(chunk_size));
-
-  LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
-  if (owner != NULL) {
-    ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
-    PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
-  }
-
-  MemoryChunk* result = MemoryChunk::Initialize(heap,
-                                                base,
-                                                chunk_size,
-                                                area_start,
-                                                area_end,
-                                                executable,
-                                                owner);
-  result->set_reserved_memory(&reservation);
-  MSAN_MEMORY_IS_INITIALIZED_IN_JIT(base, chunk_size);
-  return result;
-}
-
-
-void Page::ResetFreeListStatistics() {
-  non_available_small_blocks_ = 0;
-  available_in_small_free_list_ = 0;
-  available_in_medium_free_list_ = 0;
-  available_in_large_free_list_ = 0;
-  available_in_huge_free_list_ = 0;
-}
-
-
-Page* MemoryAllocator::AllocatePage(intptr_t size,
-                                    PagedSpace* owner,
-                                    Executability executable) {
-  MemoryChunk* chunk = AllocateChunk(size, size, executable, owner);
-
-  if (chunk == NULL) return NULL;
-
-  return Page::Initialize(isolate_->heap(), chunk, executable, owner);
-}
-
-
-LargePage* MemoryAllocator::AllocateLargePage(intptr_t object_size,
-                                              Space* owner,
-                                              Executability executable) {
-  MemoryChunk* chunk = AllocateChunk(object_size,
-                                     object_size,
-                                     executable,
-                                     owner);
-  if (chunk == NULL) return NULL;
-  return LargePage::Initialize(isolate_->heap(), chunk);
-}
-
-
-void MemoryAllocator::Free(MemoryChunk* chunk) {
-  LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
-  if (chunk->owner() != NULL) {
-    ObjectSpace space =
-        static_cast<ObjectSpace>(1 << chunk->owner()->identity());
-    PerformAllocationCallback(space, kAllocationActionFree, chunk->size());
-  }
-
-  isolate_->heap()->RememberUnmappedPage(
-      reinterpret_cast<Address>(chunk), chunk->IsEvacuationCandidate());
-
-  delete chunk->slots_buffer();
-  delete chunk->skip_list();
-
-  VirtualMemory* reservation = chunk->reserved_memory();
-  if (reservation->IsReserved()) {
-    FreeMemory(reservation, chunk->executable());
-  } else {
-    FreeMemory(chunk->address(),
-               chunk->size(),
-               chunk->executable());
-  }
-}
-
-
-bool MemoryAllocator::CommitBlock(Address start,
-                                  size_t size,
-                                  Executability executable) {
-  if (!CommitMemory(start, size, executable)) return false;
-
-  if (Heap::ShouldZapGarbage()) {
-    ZapBlock(start, size);
-  }
-
-  isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
-  return true;
-}
-
-
-bool MemoryAllocator::UncommitBlock(Address start, size_t size) {
-  if (!VirtualMemory::UncommitRegion(start, size)) return false;
-  isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
-  return true;
-}
-
-
-void MemoryAllocator::ZapBlock(Address start, size_t size) {
-  for (size_t s = 0; s + kPointerSize <= size; s += kPointerSize) {
-    Memory::Address_at(start + s) = kZapValue;
-  }
-}
-
-
-void MemoryAllocator::PerformAllocationCallback(ObjectSpace space,
-                                                AllocationAction action,
-                                                size_t size) {
-  for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
-    MemoryAllocationCallbackRegistration registration =
-      memory_allocation_callbacks_[i];
-    if ((registration.space & space) == space &&
-        (registration.action & action) == action)
-      registration.callback(space, action, static_cast<int>(size));
-  }
-}
-
-
-bool MemoryAllocator::MemoryAllocationCallbackRegistered(
-    MemoryAllocationCallback callback) {
-  for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
-    if (memory_allocation_callbacks_[i].callback == callback) return true;
-  }
-  return false;
-}
-
-
-void MemoryAllocator::AddMemoryAllocationCallback(
-    MemoryAllocationCallback callback,
-    ObjectSpace space,
-    AllocationAction action) {
-  ASSERT(callback != NULL);
-  MemoryAllocationCallbackRegistration registration(callback, space, action);
-  ASSERT(!MemoryAllocator::MemoryAllocationCallbackRegistered(callback));
-  return memory_allocation_callbacks_.Add(registration);
-}
-
-
-void MemoryAllocator::RemoveMemoryAllocationCallback(
-     MemoryAllocationCallback callback) {
-  ASSERT(callback != NULL);
-  for (int i = 0; i < memory_allocation_callbacks_.length(); ++i) {
-    if (memory_allocation_callbacks_[i].callback == callback) {
-      memory_allocation_callbacks_.Remove(i);
-      return;
-    }
-  }
-  UNREACHABLE();
-}
-
-
-#ifdef DEBUG
-void MemoryAllocator::ReportStatistics() {
-  float pct = static_cast<float>(capacity_ - size_) / capacity_;
-  PrintF("  capacity: %" V8_PTR_PREFIX "d"
-             ", used: %" V8_PTR_PREFIX "d"
-             ", available: %%%d\n\n",
-         capacity_, size_, static_cast<int>(pct*100));
-}
-#endif
-
-
-int MemoryAllocator::CodePageGuardStartOffset() {
-  // We are guarding code pages: the first OS page after the header
-  // will be protected as non-writable.
-  return RoundUp(Page::kObjectStartOffset, OS::CommitPageSize());
-}
-
-
-int MemoryAllocator::CodePageGuardSize() {
-  return static_cast<int>(OS::CommitPageSize());
-}
-
-
-int MemoryAllocator::CodePageAreaStartOffset() {
-  // We are guarding code pages: the first OS page after the header
-  // will be protected as non-writable.
-  return CodePageGuardStartOffset() + CodePageGuardSize();
-}
-
-
-int MemoryAllocator::CodePageAreaEndOffset() {
-  // We are guarding code pages: the last OS page will be protected as
-  // non-writable.
-  return Page::kPageSize - static_cast<int>(OS::CommitPageSize());
-}
-
-
-bool MemoryAllocator::CommitExecutableMemory(VirtualMemory* vm,
-                                             Address start,
-                                             size_t commit_size,
-                                             size_t reserved_size) {
-  // Commit page header (not executable).
-  if (!vm->Commit(start,
-                  CodePageGuardStartOffset(),
-                  false)) {
-    return false;
-  }
-
-  // Create guard page after the header.
-  if (!vm->Guard(start + CodePageGuardStartOffset())) {
-    return false;
-  }
-
-  // Commit page body (executable).
-  if (!vm->Commit(start + CodePageAreaStartOffset(),
-                  commit_size - CodePageGuardStartOffset(),
-                  true)) {
-    return false;
-  }
-
-  // Create guard page before the end.
-  if (!vm->Guard(start + reserved_size - CodePageGuardSize())) {
-    return false;
-  }
-
-  UpdateAllocatedSpaceLimits(start,
-                             start + CodePageAreaStartOffset() +
-                             commit_size - CodePageGuardStartOffset());
-  return true;
-}
-
-
-// -----------------------------------------------------------------------------
-// MemoryChunk implementation
-
-void MemoryChunk::IncrementLiveBytesFromMutator(Address address, int by) {
-  MemoryChunk* chunk = MemoryChunk::FromAddress(address);
-  if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) {
-    static_cast<PagedSpace*>(chunk->owner())->IncrementUnsweptFreeBytes(-by);
-  }
-  chunk->IncrementLiveBytes(by);
-}
-
-
-// -----------------------------------------------------------------------------
-// PagedSpace implementation
-
-PagedSpace::PagedSpace(Heap* heap,
-                       intptr_t max_capacity,
-                       AllocationSpace id,
-                       Executability executable)
-    : Space(heap, id, executable),
-      free_list_(this),
-      was_swept_conservatively_(false),
-      unswept_free_bytes_(0),
-      end_of_unswept_pages_(NULL) {
-  if (id == CODE_SPACE) {
-    area_size_ = heap->isolate()->memory_allocator()->
-        CodePageAreaSize();
-  } else {
-    area_size_ = Page::kPageSize - Page::kObjectStartOffset;
-  }
-  max_capacity_ = (RoundDown(max_capacity, Page::kPageSize) / Page::kPageSize)
-      * AreaSize();
-  accounting_stats_.Clear();
-
-  allocation_info_.set_top(NULL);
-  allocation_info_.set_limit(NULL);
-
-  anchor_.InitializeAsAnchor(this);
-}
-
-
-bool PagedSpace::SetUp() {
-  return true;
-}
-
-
-bool PagedSpace::HasBeenSetUp() {
-  return true;
-}
-
-
-void PagedSpace::TearDown() {
-  PageIterator iterator(this);
-  while (iterator.has_next()) {
-    heap()->isolate()->memory_allocator()->Free(iterator.next());
-  }
-  anchor_.set_next_page(&anchor_);
-  anchor_.set_prev_page(&anchor_);
-  accounting_stats_.Clear();
-}
-
-
-size_t PagedSpace::CommittedPhysicalMemory() {
-  if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
-  MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
-  size_t size = 0;
-  PageIterator it(this);
-  while (it.has_next()) {
-    size += it.next()->CommittedPhysicalMemory();
-  }
-  return size;
-}
-
-
-Object* PagedSpace::FindObject(Address addr) {
-  // Note: this function can only be called on precisely swept spaces.
-  ASSERT(!heap()->mark_compact_collector()->in_use());
-
-  if (!Contains(addr)) return Smi::FromInt(0);  // Signaling not found.
-
-  Page* p = Page::FromAddress(addr);
-  HeapObjectIterator it(p, NULL);
-  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
-    Address cur = obj->address();
-    Address next = cur + obj->Size();
-    if ((cur <= addr) && (addr < next)) return obj;
-  }
-
-  UNREACHABLE();
-  return Smi::FromInt(0);
-}
-
-
-bool PagedSpace::CanExpand() {
-  ASSERT(max_capacity_ % AreaSize() == 0);
-
-  if (Capacity() == max_capacity_) return false;
-
-  ASSERT(Capacity() < max_capacity_);
-
-  // Are we going to exceed capacity for this space?
-  if ((Capacity() + Page::kPageSize) > max_capacity_) return false;
-
-  return true;
-}
-
-
-bool PagedSpace::Expand() {
-  if (!CanExpand()) return false;
-
-  intptr_t size = AreaSize();
-
-  if (anchor_.next_page() == &anchor_) {
-    size = SizeOfFirstPage();
-  }
-
-  Page* p = heap()->isolate()->memory_allocator()->AllocatePage(
-      size, this, executable());
-  if (p == NULL) return false;
-
-  ASSERT(Capacity() <= max_capacity_);
-
-  p->InsertAfter(anchor_.prev_page());
-
-  return true;
-}
-
-
-intptr_t PagedSpace::SizeOfFirstPage() {
-  int size = 0;
-  switch (identity()) {
-    case OLD_POINTER_SPACE:
-      size = 96 * kPointerSize * KB;
-      break;
-    case OLD_DATA_SPACE:
-      size = 192 * KB;
-      break;
-    case MAP_SPACE:
-      size = 16 * kPointerSize * KB;
-      break;
-    case CELL_SPACE:
-      size = 16 * kPointerSize * KB;
-      break;
-    case PROPERTY_CELL_SPACE:
-      size = 8 * kPointerSize * KB;
-      break;
-    case CODE_SPACE: {
-      CodeRange* code_range = heap()->isolate()->code_range();
-      if (code_range != NULL && code_range->valid()) {
-        // When code range exists, code pages are allocated in a special way
-        // (from the reserved code range). That part of the code is not yet
-        // upgraded to handle small pages.
-        size = AreaSize();
-      } else {
-        size = RoundUp(
-            480 * KB * FullCodeGenerator::kBootCodeSizeMultiplier / 100,
-            kPointerSize);
-      }
-      break;
-    }
-    default:
-      UNREACHABLE();
-  }
-  return Min(size, AreaSize());
-}
-
-
-int PagedSpace::CountTotalPages() {
-  PageIterator it(this);
-  int count = 0;
-  while (it.has_next()) {
-    it.next();
-    count++;
-  }
-  return count;
-}
-
-
-void PagedSpace::ObtainFreeListStatistics(Page* page, SizeStats* sizes) {
-  sizes->huge_size_ = page->available_in_huge_free_list();
-  sizes->small_size_ = page->available_in_small_free_list();
-  sizes->medium_size_ = page->available_in_medium_free_list();
-  sizes->large_size_ = page->available_in_large_free_list();
-}
-
-
-void PagedSpace::ResetFreeListStatistics() {
-  PageIterator page_iterator(this);
-  while (page_iterator.has_next()) {
-    Page* page = page_iterator.next();
-    page->ResetFreeListStatistics();
-  }
-}
-
-
-void PagedSpace::IncreaseCapacity(int size) {
-  accounting_stats_.ExpandSpace(size);
-}
-
-
-void PagedSpace::ReleasePage(Page* page) {
-  ASSERT(page->LiveBytes() == 0);
-  ASSERT(AreaSize() == page->area_size());
-
-  if (page->WasSwept()) {
-    intptr_t size = free_list_.EvictFreeListItems(page);
-    accounting_stats_.AllocateBytes(size);
-    ASSERT_EQ(AreaSize(), static_cast<int>(size));
-  } else {
-    DecreaseUnsweptFreeBytes(page);
-  }
-
-  if (page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE)) {
-    heap()->decrement_scan_on_scavenge_pages();
-    page->ClearFlag(MemoryChunk::SCAN_ON_SCAVENGE);
-  }
-
-  ASSERT(!free_list_.ContainsPageFreeListItems(page));
-
-  if (Page::FromAllocationTop(allocation_info_.top()) == page) {
-    allocation_info_.set_top(NULL);
-    allocation_info_.set_limit(NULL);
-  }
-
-  page->Unlink();
-  if (page->IsFlagSet(MemoryChunk::CONTAINS_ONLY_DATA)) {
-    heap()->isolate()->memory_allocator()->Free(page);
-  } else {
-    heap()->QueueMemoryChunkForFree(page);
-  }
-
-  ASSERT(Capacity() > 0);
-  accounting_stats_.ShrinkSpace(AreaSize());
-}
-
-
-#ifdef DEBUG
-void PagedSpace::Print() { }
-#endif
-
-#ifdef VERIFY_HEAP
-void PagedSpace::Verify(ObjectVisitor* visitor) {
-  // We can only iterate over the pages if they were swept precisely.
-  if (was_swept_conservatively_) return;
-
-  bool allocation_pointer_found_in_space =
-      (allocation_info_.top() == allocation_info_.limit());
-  PageIterator page_iterator(this);
-  while (page_iterator.has_next()) {
-    Page* page = page_iterator.next();
-    CHECK(page->owner() == this);
-    if (page == Page::FromAllocationTop(allocation_info_.top())) {
-      allocation_pointer_found_in_space = true;
-    }
-    CHECK(page->WasSweptPrecisely());
-    HeapObjectIterator it(page, NULL);
-    Address end_of_previous_object = page->area_start();
-    Address top = page->area_end();
-    int black_size = 0;
-    for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
-      CHECK(end_of_previous_object <= object->address());
-
-      // The first word should be a map, and we expect all map pointers to
-      // be in map space.
-      Map* map = object->map();
-      CHECK(map->IsMap());
-      CHECK(heap()->map_space()->Contains(map));
-
-      // Perform space-specific object verification.
-      VerifyObject(object);
-
-      // The object itself should look OK.
-      object->ObjectVerify();
-
-      // All the interior pointers should be contained in the heap.
-      int size = object->Size();
-      object->IterateBody(map->instance_type(), size, visitor);
-      if (Marking::IsBlack(Marking::MarkBitFrom(object))) {
-        black_size += size;
-      }
-
-      CHECK(object->address() + size <= top);
-      end_of_previous_object = object->address() + size;
-    }
-    CHECK_LE(black_size, page->LiveBytes());
-  }
-  CHECK(allocation_pointer_found_in_space);
-}
-#endif  // VERIFY_HEAP
-
-// -----------------------------------------------------------------------------
-// NewSpace implementation
-
-
-bool NewSpace::SetUp(int reserved_semispace_capacity,
-                     int maximum_semispace_capacity) {
-  // Set up new space based on the preallocated memory block defined by
-  // start and size. The provided space is divided into two semi-spaces.
-  // To support fast containment testing in the new space, the size of
-  // this chunk must be a power of two and it must be aligned to its size.
-  int initial_semispace_capacity = heap()->InitialSemiSpaceSize();
-
-  size_t size = 2 * reserved_semispace_capacity;
-  Address base =
-      heap()->isolate()->memory_allocator()->ReserveAlignedMemory(
-          size, size, &reservation_);
-  if (base == NULL) return false;
-
-  chunk_base_ = base;
-  chunk_size_ = static_cast<uintptr_t>(size);
-  LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_));
-
-  ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
-  ASSERT(IsPowerOf2(maximum_semispace_capacity));
-
-  // Allocate and set up the histogram arrays if necessary.
-  allocated_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
-  promoted_histogram_ = NewArray<HistogramInfo>(LAST_TYPE + 1);
-
-#define SET_NAME(name) allocated_histogram_[name].set_name(#name); \
-                       promoted_histogram_[name].set_name(#name);
-  INSTANCE_TYPE_LIST(SET_NAME)
-#undef SET_NAME
-
-  ASSERT(reserved_semispace_capacity == heap()->ReservedSemiSpaceSize());
-  ASSERT(static_cast<intptr_t>(chunk_size_) >=
-         2 * heap()->ReservedSemiSpaceSize());
-  ASSERT(IsAddressAligned(chunk_base_, 2 * reserved_semispace_capacity, 0));
-
-  to_space_.SetUp(chunk_base_,
-                  initial_semispace_capacity,
-                  maximum_semispace_capacity);
-  from_space_.SetUp(chunk_base_ + reserved_semispace_capacity,
-                    initial_semispace_capacity,
-                    maximum_semispace_capacity);
-  if (!to_space_.Commit()) {
-    return false;
-  }
-  ASSERT(!from_space_.is_committed());  // No need to use memory yet.
-
-  start_ = chunk_base_;
-  address_mask_ = ~(2 * reserved_semispace_capacity - 1);
-  object_mask_ = address_mask_ | kHeapObjectTagMask;
-  object_expected_ = reinterpret_cast<uintptr_t>(start_) | kHeapObjectTag;
-
-  ResetAllocationInfo();
-
-  return true;
-}
-
-
-void NewSpace::TearDown() {
-  if (allocated_histogram_) {
-    DeleteArray(allocated_histogram_);
-    allocated_histogram_ = NULL;
-  }
-  if (promoted_histogram_) {
-    DeleteArray(promoted_histogram_);
-    promoted_histogram_ = NULL;
-  }
-
-  start_ = NULL;
-  allocation_info_.set_top(NULL);
-  allocation_info_.set_limit(NULL);
-
-  to_space_.TearDown();
-  from_space_.TearDown();
-
-  LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_));
-
-  ASSERT(reservation_.IsReserved());
-  heap()->isolate()->memory_allocator()->FreeMemory(&reservation_,
-                                                    NOT_EXECUTABLE);
-  chunk_base_ = NULL;
-  chunk_size_ = 0;
-}
-
-
-void NewSpace::Flip() {
-  SemiSpace::Swap(&from_space_, &to_space_);
-}
-
-
-void NewSpace::Grow() {
-  // Double the semispace size but only up to maximum capacity.
-  ASSERT(Capacity() < MaximumCapacity());
-  int new_capacity = Min(MaximumCapacity(), 2 * static_cast<int>(Capacity()));
-  if (to_space_.GrowTo(new_capacity)) {
-    // Only grow from space if we managed to grow to-space.
-    if (!from_space_.GrowTo(new_capacity)) {
-      // If we managed to grow to-space but couldn't grow from-space,
-      // attempt to shrink to-space.
-      if (!to_space_.ShrinkTo(from_space_.Capacity())) {
-        // We are in an inconsistent state because we could not
-        // commit/uncommit memory from new space.
-        V8::FatalProcessOutOfMemory("Failed to grow new space.");
-      }
-    }
-  }
-  ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
-}
-
-
-void NewSpace::Shrink() {
-  int new_capacity = Max(InitialCapacity(), 2 * SizeAsInt());
-  int rounded_new_capacity = RoundUp(new_capacity, Page::kPageSize);
-  if (rounded_new_capacity < Capacity() &&
-      to_space_.ShrinkTo(rounded_new_capacity))  {
-    // Only shrink from-space if we managed to shrink to-space.
-    from_space_.Reset();
-    if (!from_space_.ShrinkTo(rounded_new_capacity)) {
-      // If we managed to shrink to-space but couldn't shrink from
-      // space, attempt to grow to-space again.
-      if (!to_space_.GrowTo(from_space_.Capacity())) {
-        // We are in an inconsistent state because we could not
-        // commit/uncommit memory from new space.
-        V8::FatalProcessOutOfMemory("Failed to shrink new space.");
-      }
-    }
-  }
-  ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
-}
-
-
-void NewSpace::UpdateAllocationInfo() {
-  MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
-  allocation_info_.set_top(to_space_.page_low());
-  allocation_info_.set_limit(to_space_.page_high());
-  UpdateInlineAllocationLimit(0);
-  ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
-}
-
-
-void NewSpace::ResetAllocationInfo() {
-  to_space_.Reset();
-  UpdateAllocationInfo();
-  pages_used_ = 0;
-  // Clear all mark-bits in the to-space.
-  NewSpacePageIterator it(&to_space_);
-  while (it.has_next()) {
-    Bitmap::Clear(it.next());
-  }
-}
-
-
-void NewSpace::UpdateInlineAllocationLimit(int size_in_bytes) {
-  if (heap()->inline_allocation_disabled()) {
-    // Lowest limit when linear allocation was disabled.
-    Address high = to_space_.page_high();
-    Address new_top = allocation_info_.top() + size_in_bytes;
-    allocation_info_.set_limit(Min(new_top, high));
-  } else if (inline_allocation_limit_step() == 0) {
-    // Normal limit is the end of the current page.
-    allocation_info_.set_limit(to_space_.page_high());
-  } else {
-    // Lower limit during incremental marking.
-    Address high = to_space_.page_high();
-    Address new_top = allocation_info_.top() + size_in_bytes;
-    Address new_limit = new_top + inline_allocation_limit_step_;
-    allocation_info_.set_limit(Min(new_limit, high));
-  }
-  ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
-}
-
-
-bool NewSpace::AddFreshPage() {
-  Address top = allocation_info_.top();
-  if (NewSpacePage::IsAtStart(top)) {
-    // The current page is already empty. Don't try to make another.
-
-    // We should only get here if someone asks to allocate more
-    // than what can be stored in a single page.
-    // TODO(gc): Change the limit on new-space allocation to prevent this
-    // from happening (all such allocations should go directly to LOSpace).
-    return false;
-  }
-  if (!to_space_.AdvancePage()) {
-    // Failed to get a new page in to-space.
-    return false;
-  }
-
-  // Clear remainder of current page.
-  Address limit = NewSpacePage::FromLimit(top)->area_end();
-  if (heap()->gc_state() == Heap::SCAVENGE) {
-    heap()->promotion_queue()->SetNewLimit(limit);
-    heap()->promotion_queue()->ActivateGuardIfOnTheSamePage();
-  }
-
-  int remaining_in_page = static_cast<int>(limit - top);
-  heap()->CreateFillerObjectAt(top, remaining_in_page);
-  pages_used_++;
-  UpdateAllocationInfo();
-
-  return true;
-}
-
-
-AllocationResult NewSpace::SlowAllocateRaw(int size_in_bytes) {
-  Address old_top = allocation_info_.top();
-  Address high = to_space_.page_high();
-  if (allocation_info_.limit() < high) {
-    // Either the limit has been lowered because linear allocation was disabled
-    // or because incremental marking wants to get a chance to do a step. Set
-    // the new limit accordingly.
-    Address new_top = old_top + size_in_bytes;
-    int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
-    heap()->incremental_marking()->Step(
-        bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
-    UpdateInlineAllocationLimit(size_in_bytes);
-    top_on_previous_step_ = new_top;
-    return AllocateRaw(size_in_bytes);
-  } else if (AddFreshPage()) {
-    // Switched to new page. Try allocating again.
-    int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
-    heap()->incremental_marking()->Step(
-        bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
-    top_on_previous_step_ = to_space_.page_low();
-    return AllocateRaw(size_in_bytes);
-  } else {
-    return AllocationResult::Retry();
-  }
-}
-
-
-#ifdef VERIFY_HEAP
-// We do not use the SemiSpaceIterator because verification doesn't assume
-// that it works (it depends on the invariants we are checking).
-void NewSpace::Verify() {
-  // The allocation pointer should be in the space or at the very end.
-  ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
-
-  // There should be objects packed in from the low address up to the
-  // allocation pointer.
-  Address current = to_space_.first_page()->area_start();
-  CHECK_EQ(current, to_space_.space_start());
-
-  while (current != top()) {
-    if (!NewSpacePage::IsAtEnd(current)) {
-      // The allocation pointer should not be in the middle of an object.
-      CHECK(!NewSpacePage::FromLimit(current)->ContainsLimit(top()) ||
-            current < top());
-
-      HeapObject* object = HeapObject::FromAddress(current);
-
-      // The first word should be a map, and we expect all map pointers to
-      // be in map space.
-      Map* map = object->map();
-      CHECK(map->IsMap());
-      CHECK(heap()->map_space()->Contains(map));
-
-      // The object should not be code or a map.
-      CHECK(!object->IsMap());
-      CHECK(!object->IsCode());
-
-      // The object itself should look OK.
-      object->ObjectVerify();
-
-      // All the interior pointers should be contained in the heap.
-      VerifyPointersVisitor visitor;
-      int size = object->Size();
-      object->IterateBody(map->instance_type(), size, &visitor);
-
-      current += size;
-    } else {
-      // At end of page, switch to next page.
-      NewSpacePage* page = NewSpacePage::FromLimit(current)->next_page();
-      // Next page should be valid.
-      CHECK(!page->is_anchor());
-      current = page->area_start();
-    }
-  }
-
-  // Check semi-spaces.
-  CHECK_EQ(from_space_.id(), kFromSpace);
-  CHECK_EQ(to_space_.id(), kToSpace);
-  from_space_.Verify();
-  to_space_.Verify();
-}
-#endif
-
-// -----------------------------------------------------------------------------
-// SemiSpace implementation
-
-void SemiSpace::SetUp(Address start,
-                      int initial_capacity,
-                      int maximum_capacity) {
-  // Creates a space in the young generation. The constructor does not
-  // allocate memory from the OS.  A SemiSpace is given a contiguous chunk of
-  // memory of size 'capacity' when set up, and does not grow or shrink
-  // otherwise.  In the mark-compact collector, the memory region of the from
-  // space is used as the marking stack. It requires contiguous memory
-  // addresses.
-  ASSERT(maximum_capacity >= Page::kPageSize);
-  initial_capacity_ = RoundDown(initial_capacity, Page::kPageSize);
-  capacity_ = initial_capacity;
-  maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize);
-  maximum_committed_ = 0;
-  committed_ = false;
-  start_ = start;
-  address_mask_ = ~(maximum_capacity - 1);
-  object_mask_ = address_mask_ | kHeapObjectTagMask;
-  object_expected_ = reinterpret_cast<uintptr_t>(start) | kHeapObjectTag;
-  age_mark_ = start_;
-}
-
-
-void SemiSpace::TearDown() {
-  start_ = NULL;
-  capacity_ = 0;
-}
-
-
-bool SemiSpace::Commit() {
-  ASSERT(!is_committed());
-  int pages = capacity_ / Page::kPageSize;
-  if (!heap()->isolate()->memory_allocator()->CommitBlock(start_,
-                                                          capacity_,
-                                                          executable())) {
-    return false;
-  }
-
-  NewSpacePage* current = anchor();
-  for (int i = 0; i < pages; i++) {
-    NewSpacePage* new_page =
-      NewSpacePage::Initialize(heap(), start_ + i * Page::kPageSize, this);
-    new_page->InsertAfter(current);
-    current = new_page;
-  }
-
-  SetCapacity(capacity_);
-  committed_ = true;
-  Reset();
-  return true;
-}
-
-
-bool SemiSpace::Uncommit() {
-  ASSERT(is_committed());
-  Address start = start_ + maximum_capacity_ - capacity_;
-  if (!heap()->isolate()->memory_allocator()->UncommitBlock(start, capacity_)) {
-    return false;
-  }
-  anchor()->set_next_page(anchor());
-  anchor()->set_prev_page(anchor());
-
-  committed_ = false;
-  return true;
-}
-
-
-size_t SemiSpace::CommittedPhysicalMemory() {
-  if (!is_committed()) return 0;
-  size_t size = 0;
-  NewSpacePageIterator it(this);
-  while (it.has_next()) {
-    size += it.next()->CommittedPhysicalMemory();
-  }
-  return size;
-}
-
-
-bool SemiSpace::GrowTo(int new_capacity) {
-  if (!is_committed()) {
-    if (!Commit()) return false;
-  }
-  ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
-  ASSERT(new_capacity <= maximum_capacity_);
-  ASSERT(new_capacity > capacity_);
-  int pages_before = capacity_ / Page::kPageSize;
-  int pages_after = new_capacity / Page::kPageSize;
-
-  size_t delta = new_capacity - capacity_;
-
-  ASSERT(IsAligned(delta, OS::AllocateAlignment()));
-  if (!heap()->isolate()->memory_allocator()->CommitBlock(
-      start_ + capacity_, delta, executable())) {
-    return false;
-  }
-  SetCapacity(new_capacity);
-  NewSpacePage* last_page = anchor()->prev_page();
-  ASSERT(last_page != anchor());
-  for (int i = pages_before; i < pages_after; i++) {
-    Address page_address = start_ + i * Page::kPageSize;
-    NewSpacePage* new_page = NewSpacePage::Initialize(heap(),
-                                                      page_address,
-                                                      this);
-    new_page->InsertAfter(last_page);
-    Bitmap::Clear(new_page);
-    // Duplicate the flags that was set on the old page.
-    new_page->SetFlags(last_page->GetFlags(),
-                       NewSpacePage::kCopyOnFlipFlagsMask);
-    last_page = new_page;
-  }
-  return true;
-}
-
-
-bool SemiSpace::ShrinkTo(int new_capacity) {
-  ASSERT((new_capacity & Page::kPageAlignmentMask) == 0);
-  ASSERT(new_capacity >= initial_capacity_);
-  ASSERT(new_capacity < capacity_);
-  if (is_committed()) {
-    size_t delta = capacity_ - new_capacity;
-    ASSERT(IsAligned(delta, OS::AllocateAlignment()));
-
-    MemoryAllocator* allocator = heap()->isolate()->memory_allocator();
-    if (!allocator->UncommitBlock(start_ + new_capacity, delta)) {
-      return false;
-    }
-
-    int pages_after = new_capacity / Page::kPageSize;
-    NewSpacePage* new_last_page =
-        NewSpacePage::FromAddress(start_ + (pages_after - 1) * Page::kPageSize);
-    new_last_page->set_next_page(anchor());
-    anchor()->set_prev_page(new_last_page);
-    ASSERT((current_page_ >= first_page()) && (current_page_ <= new_last_page));
-  }
-
-  SetCapacity(new_capacity);
-
-  return true;
-}
-
-
-void SemiSpace::FlipPages(intptr_t flags, intptr_t mask) {
-  anchor_.set_owner(this);
-  // Fixup back-pointers to anchor. Address of anchor changes
-  // when we swap.
-  anchor_.prev_page()->set_next_page(&anchor_);
-  anchor_.next_page()->set_prev_page(&anchor_);
-
-  bool becomes_to_space = (id_ == kFromSpace);
-  id_ = becomes_to_space ? kToSpace : kFromSpace;
-  NewSpacePage* page = anchor_.next_page();
-  while (page != &anchor_) {
-    page->set_owner(this);
-    page->SetFlags(flags, mask);
-    if (becomes_to_space) {
-      page->ClearFlag(MemoryChunk::IN_FROM_SPACE);
-      page->SetFlag(MemoryChunk::IN_TO_SPACE);
-      page->ClearFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
-      page->ResetLiveBytes();
-    } else {
-      page->SetFlag(MemoryChunk::IN_FROM_SPACE);
-      page->ClearFlag(MemoryChunk::IN_TO_SPACE);
-    }
-    ASSERT(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
-    ASSERT(page->IsFlagSet(MemoryChunk::IN_TO_SPACE) ||
-           page->IsFlagSet(MemoryChunk::IN_FROM_SPACE));
-    page = page->next_page();
-  }
-}
-
-
-void SemiSpace::Reset() {
-  ASSERT(anchor_.next_page() != &anchor_);
-  current_page_ = anchor_.next_page();
-}
-
-
-void SemiSpace::Swap(SemiSpace* from, SemiSpace* to) {
-  // We won't be swapping semispaces without data in them.
-  ASSERT(from->anchor_.next_page() != &from->anchor_);
-  ASSERT(to->anchor_.next_page() != &to->anchor_);
-
-  // Swap bits.
-  SemiSpace tmp = *from;
-  *from = *to;
-  *to = tmp;
-
-  // Fixup back-pointers to the page list anchor now that its address
-  // has changed.
-  // Swap to/from-space bits on pages.
-  // Copy GC flags from old active space (from-space) to new (to-space).
-  intptr_t flags = from->current_page()->GetFlags();
-  to->FlipPages(flags, NewSpacePage::kCopyOnFlipFlagsMask);
-
-  from->FlipPages(0, 0);
-}
-
-
-void SemiSpace::SetCapacity(int new_capacity) {
-  capacity_ = new_capacity;
-  if (capacity_ > maximum_committed_) {
-    maximum_committed_ = capacity_;
-  }
-}
-
-
-void SemiSpace::set_age_mark(Address mark) {
-  ASSERT(NewSpacePage::FromLimit(mark)->semi_space() == this);
-  age_mark_ = mark;
-  // Mark all pages up to the one containing mark.
-  NewSpacePageIterator it(space_start(), mark);
-  while (it.has_next()) {
-    it.next()->SetFlag(MemoryChunk::NEW_SPACE_BELOW_AGE_MARK);
-  }
-}
-
-
-#ifdef DEBUG
-void SemiSpace::Print() { }
-#endif
-
-#ifdef VERIFY_HEAP
-void SemiSpace::Verify() {
-  bool is_from_space = (id_ == kFromSpace);
-  NewSpacePage* page = anchor_.next_page();
-  CHECK(anchor_.semi_space() == this);
-  while (page != &anchor_) {
-    CHECK(page->semi_space() == this);
-    CHECK(page->InNewSpace());
-    CHECK(page->IsFlagSet(is_from_space ? MemoryChunk::IN_FROM_SPACE
-                                        : MemoryChunk::IN_TO_SPACE));
-    CHECK(!page->IsFlagSet(is_from_space ? MemoryChunk::IN_TO_SPACE
-                                         : MemoryChunk::IN_FROM_SPACE));
-    CHECK(page->IsFlagSet(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING));
-    if (!is_from_space) {
-      // The pointers-from-here-are-interesting flag isn't updated dynamically
-      // on from-space pages, so it might be out of sync with the marking state.
-      if (page->heap()->incremental_marking()->IsMarking()) {
-        CHECK(page->IsFlagSet(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
-      } else {
-        CHECK(!page->IsFlagSet(
-            MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING));
-      }
-      // TODO(gc): Check that the live_bytes_count_ field matches the
-      // black marking on the page (if we make it match in new-space).
-    }
-    CHECK(page->IsFlagSet(MemoryChunk::SCAN_ON_SCAVENGE));
-    CHECK(page->prev_page()->next_page() == page);
-    page = page->next_page();
-  }
-}
-#endif
-
-#ifdef DEBUG
-void SemiSpace::AssertValidRange(Address start, Address end) {
-  // Addresses belong to same semi-space
-  NewSpacePage* page = NewSpacePage::FromLimit(start);
-  NewSpacePage* end_page = NewSpacePage::FromLimit(end);
-  SemiSpace* space = page->semi_space();
-  CHECK_EQ(space, end_page->semi_space());
-  // Start address is before end address, either on same page,
-  // or end address is on a later page in the linked list of
-  // semi-space pages.
-  if (page == end_page) {
-    CHECK(start <= end);
-  } else {
-    while (page != end_page) {
-      page = page->next_page();
-      CHECK_NE(page, space->anchor());
-    }
-  }
-}
-#endif
-
-
-// -----------------------------------------------------------------------------
-// SemiSpaceIterator implementation.
-SemiSpaceIterator::SemiSpaceIterator(NewSpace* space) {
-  Initialize(space->bottom(), space->top(), NULL);
-}
-
-
-SemiSpaceIterator::SemiSpaceIterator(NewSpace* space,
-                                     HeapObjectCallback size_func) {
-  Initialize(space->bottom(), space->top(), size_func);
-}
-
-
-SemiSpaceIterator::SemiSpaceIterator(NewSpace* space, Address start) {
-  Initialize(start, space->top(), NULL);
-}
-
-
-SemiSpaceIterator::SemiSpaceIterator(Address from, Address to) {
-  Initialize(from, to, NULL);
-}
-
-
-void SemiSpaceIterator::Initialize(Address start,
-                                   Address end,
-                                   HeapObjectCallback size_func) {
-  SemiSpace::AssertValidRange(start, end);
-  current_ = start;
-  limit_ = end;
-  size_func_ = size_func;
-}
-
-
-#ifdef DEBUG
-// heap_histograms is shared, always clear it before using it.
-static void ClearHistograms(Isolate* isolate) {
-  // We reset the name each time, though it hasn't changed.
-#define DEF_TYPE_NAME(name) isolate->heap_histograms()[name].set_name(#name);
-  INSTANCE_TYPE_LIST(DEF_TYPE_NAME)
-#undef DEF_TYPE_NAME
-
-#define CLEAR_HISTOGRAM(name) isolate->heap_histograms()[name].clear();
-  INSTANCE_TYPE_LIST(CLEAR_HISTOGRAM)
-#undef CLEAR_HISTOGRAM
-
-  isolate->js_spill_information()->Clear();
-}
-
-
-static void ClearCodeKindStatistics(int* code_kind_statistics) {
-  for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
-    code_kind_statistics[i] = 0;
-  }
-}
-
-
-static void ReportCodeKindStatistics(int* code_kind_statistics) {
-  PrintF("\n   Code kind histograms: \n");
-  for (int i = 0; i < Code::NUMBER_OF_KINDS; i++) {
-    if (code_kind_statistics[i] > 0) {
-      PrintF("     %-20s: %10d bytes\n",
-             Code::Kind2String(static_cast<Code::Kind>(i)),
-             code_kind_statistics[i]);
-    }
-  }
-  PrintF("\n");
-}
-
-
-static int CollectHistogramInfo(HeapObject* obj) {
-  Isolate* isolate = obj->GetIsolate();
-  InstanceType type = obj->map()->instance_type();
-  ASSERT(0 <= type && type <= LAST_TYPE);
-  ASSERT(isolate->heap_histograms()[type].name() != NULL);
-  isolate->heap_histograms()[type].increment_number(1);
-  isolate->heap_histograms()[type].increment_bytes(obj->Size());
-
-  if (FLAG_collect_heap_spill_statistics && obj->IsJSObject()) {
-    JSObject::cast(obj)->IncrementSpillStatistics(
-        isolate->js_spill_information());
-  }
-
-  return obj->Size();
-}
-
-
-static void ReportHistogram(Isolate* isolate, bool print_spill) {
-  PrintF("\n  Object Histogram:\n");
-  for (int i = 0; i <= LAST_TYPE; i++) {
-    if (isolate->heap_histograms()[i].number() > 0) {
-      PrintF("    %-34s%10d (%10d bytes)\n",
-             isolate->heap_histograms()[i].name(),
-             isolate->heap_histograms()[i].number(),
-             isolate->heap_histograms()[i].bytes());
-    }
-  }
-  PrintF("\n");
-
-  // Summarize string types.
-  int string_number = 0;
-  int string_bytes = 0;
-#define INCREMENT(type, size, name, camel_name)      \
-    string_number += isolate->heap_histograms()[type].number(); \
-    string_bytes += isolate->heap_histograms()[type].bytes();
-  STRING_TYPE_LIST(INCREMENT)
-#undef INCREMENT
-  if (string_number > 0) {
-    PrintF("    %-34s%10d (%10d bytes)\n\n", "STRING_TYPE", string_number,
-           string_bytes);
-  }
-
-  if (FLAG_collect_heap_spill_statistics && print_spill) {
-    isolate->js_spill_information()->Print();
-  }
-}
-#endif  // DEBUG
-
-
-// Support for statistics gathering for --heap-stats and --log-gc.
-void NewSpace::ClearHistograms() {
-  for (int i = 0; i <= LAST_TYPE; i++) {
-    allocated_histogram_[i].clear();
-    promoted_histogram_[i].clear();
-  }
-}
-
-
-// Because the copying collector does not touch garbage objects, we iterate
-// the new space before a collection to get a histogram of allocated objects.
-// This only happens when --log-gc flag is set.
-void NewSpace::CollectStatistics() {
-  ClearHistograms();
-  SemiSpaceIterator it(this);
-  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next())
-    RecordAllocation(obj);
-}
-
-
-static void DoReportStatistics(Isolate* isolate,
-                               HistogramInfo* info, const char* description) {
-  LOG(isolate, HeapSampleBeginEvent("NewSpace", description));
-  // Lump all the string types together.
-  int string_number = 0;
-  int string_bytes = 0;
-#define INCREMENT(type, size, name, camel_name)       \
-    string_number += info[type].number();             \
-    string_bytes += info[type].bytes();
-  STRING_TYPE_LIST(INCREMENT)
-#undef INCREMENT
-  if (string_number > 0) {
-    LOG(isolate,
-        HeapSampleItemEvent("STRING_TYPE", string_number, string_bytes));
-  }
-
-  // Then do the other types.
-  for (int i = FIRST_NONSTRING_TYPE; i <= LAST_TYPE; ++i) {
-    if (info[i].number() > 0) {
-      LOG(isolate,
-          HeapSampleItemEvent(info[i].name(), info[i].number(),
-                              info[i].bytes()));
-    }
-  }
-  LOG(isolate, HeapSampleEndEvent("NewSpace", description));
-}
-
-
-void NewSpace::ReportStatistics() {
-#ifdef DEBUG
-  if (FLAG_heap_stats) {
-    float pct = static_cast<float>(Available()) / Capacity();
-    PrintF("  capacity: %" V8_PTR_PREFIX "d"
-               ", available: %" V8_PTR_PREFIX "d, %%%d\n",
-           Capacity(), Available(), static_cast<int>(pct*100));
-    PrintF("\n  Object Histogram:\n");
-    for (int i = 0; i <= LAST_TYPE; i++) {
-      if (allocated_histogram_[i].number() > 0) {
-        PrintF("    %-34s%10d (%10d bytes)\n",
-               allocated_histogram_[i].name(),
-               allocated_histogram_[i].number(),
-               allocated_histogram_[i].bytes());
-      }
-    }
-    PrintF("\n");
-  }
-#endif  // DEBUG
-
-  if (FLAG_log_gc) {
-    Isolate* isolate = heap()->isolate();
-    DoReportStatistics(isolate, allocated_histogram_, "allocated");
-    DoReportStatistics(isolate, promoted_histogram_, "promoted");
-  }
-}
-
-
-void NewSpace::RecordAllocation(HeapObject* obj) {
-  InstanceType type = obj->map()->instance_type();
-  ASSERT(0 <= type && type <= LAST_TYPE);
-  allocated_histogram_[type].increment_number(1);
-  allocated_histogram_[type].increment_bytes(obj->Size());
-}
-
-
-void NewSpace::RecordPromotion(HeapObject* obj) {
-  InstanceType type = obj->map()->instance_type();
-  ASSERT(0 <= type && type <= LAST_TYPE);
-  promoted_histogram_[type].increment_number(1);
-  promoted_histogram_[type].increment_bytes(obj->Size());
-}
-
-
-size_t NewSpace::CommittedPhysicalMemory() {
-  if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
-  MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
-  size_t size = to_space_.CommittedPhysicalMemory();
-  if (from_space_.is_committed()) {
-    size += from_space_.CommittedPhysicalMemory();
-  }
-  return size;
-}
-
-
-// -----------------------------------------------------------------------------
-// Free lists for old object spaces implementation
-
-void FreeListNode::set_size(Heap* heap, int size_in_bytes) {
-  ASSERT(size_in_bytes > 0);
-  ASSERT(IsAligned(size_in_bytes, kPointerSize));
-
-  // We write a map and possibly size information to the block.  If the block
-  // is big enough to be a FreeSpace with at least one extra word (the next
-  // pointer), we set its map to be the free space map and its size to an
-  // appropriate array length for the desired size from HeapObject::Size().
-  // If the block is too small (eg, one or two words), to hold both a size
-  // field and a next pointer, we give it a filler map that gives it the
-  // correct size.
-  if (size_in_bytes > FreeSpace::kHeaderSize) {
-    // Can't use FreeSpace::cast because it fails during deserialization.
-    // We have to set the size first with a release store before we store
-    // the map because a concurrent store buffer scan on scavenge must not
-    // observe a map with an invalid size.
-    FreeSpace* this_as_free_space = reinterpret_cast<FreeSpace*>(this);
-    this_as_free_space->nobarrier_set_size(size_in_bytes);
-    synchronized_set_map_no_write_barrier(heap->raw_unchecked_free_space_map());
-  } else if (size_in_bytes == kPointerSize) {
-    set_map_no_write_barrier(heap->raw_unchecked_one_pointer_filler_map());
-  } else if (size_in_bytes == 2 * kPointerSize) {
-    set_map_no_write_barrier(heap->raw_unchecked_two_pointer_filler_map());
-  } else {
-    UNREACHABLE();
-  }
-  // We would like to ASSERT(Size() == size_in_bytes) but this would fail during
-  // deserialization because the free space map is not done yet.
-}
-
-
-FreeListNode* FreeListNode::next() {
-  ASSERT(IsFreeListNode(this));
-  if (map() == GetHeap()->raw_unchecked_free_space_map()) {
-    ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize);
-    return reinterpret_cast<FreeListNode*>(
-        Memory::Address_at(address() + kNextOffset));
-  } else {
-    return reinterpret_cast<FreeListNode*>(
-        Memory::Address_at(address() + kPointerSize));
-  }
-}
-
-
-FreeListNode** FreeListNode::next_address() {
-  ASSERT(IsFreeListNode(this));
-  if (map() == GetHeap()->raw_unchecked_free_space_map()) {
-    ASSERT(Size() >= kNextOffset + kPointerSize);
-    return reinterpret_cast<FreeListNode**>(address() + kNextOffset);
-  } else {
-    return reinterpret_cast<FreeListNode**>(address() + kPointerSize);
-  }
-}
-
-
-void FreeListNode::set_next(FreeListNode* next) {
-  ASSERT(IsFreeListNode(this));
-  // While we are booting the VM the free space map will actually be null.  So
-  // we have to make sure that we don't try to use it for anything at that
-  // stage.
-  if (map() == GetHeap()->raw_unchecked_free_space_map()) {
-    ASSERT(map() == NULL || Size() >= kNextOffset + kPointerSize);
-    base::NoBarrier_Store(
-        reinterpret_cast<base::AtomicWord*>(address() + kNextOffset),
-        reinterpret_cast<base::AtomicWord>(next));
-  } else {
-    base::NoBarrier_Store(
-        reinterpret_cast<base::AtomicWord*>(address() + kPointerSize),
-        reinterpret_cast<base::AtomicWord>(next));
-  }
-}
-
-
-intptr_t FreeListCategory::Concatenate(FreeListCategory* category) {
-  intptr_t free_bytes = 0;
-  if (category->top() != NULL) {
-    // This is safe (not going to deadlock) since Concatenate operations
-    // are never performed on the same free lists at the same time in
-    // reverse order.
-    LockGuard<Mutex> target_lock_guard(mutex());
-    LockGuard<Mutex> source_lock_guard(category->mutex());
-    ASSERT(category->end_ != NULL);
-    free_bytes = category->available();
-    if (end_ == NULL) {
-      end_ = category->end();
-    } else {
-      category->end()->set_next(top());
-    }
-    set_top(category->top());
-    base::NoBarrier_Store(&top_, category->top_);
-    available_ += category->available();
-    category->Reset();
-  }
-  return free_bytes;
-}
-
-
-void FreeListCategory::Reset() {
-  set_top(NULL);
-  set_end(NULL);
-  set_available(0);
-}
-
-
-intptr_t FreeListCategory::EvictFreeListItemsInList(Page* p) {
-  int sum = 0;
-  FreeListNode* t = top();
-  FreeListNode** n = &t;
-  while (*n != NULL) {
-    if (Page::FromAddress((*n)->address()) == p) {
-      FreeSpace* free_space = reinterpret_cast<FreeSpace*>(*n);
-      sum += free_space->Size();
-      *n = (*n)->next();
-    } else {
-      n = (*n)->next_address();
-    }
-  }
-  set_top(t);
-  if (top() == NULL) {
-    set_end(NULL);
-  }
-  available_ -= sum;
-  return sum;
-}
-
-
-bool FreeListCategory::ContainsPageFreeListItemsInList(Page* p) {
-  FreeListNode* node = top();
-  while (node != NULL) {
-    if (Page::FromAddress(node->address()) == p) return true;
-    node = node->next();
-  }
-  return false;
-}
-
-
-FreeListNode* FreeListCategory::PickNodeFromList(int *node_size) {
-  FreeListNode* node = top();
-
-  if (node == NULL) return NULL;
-
-  while (node != NULL &&
-         Page::FromAddress(node->address())->IsEvacuationCandidate()) {
-    available_ -= reinterpret_cast<FreeSpace*>(node)->Size();
-    node = node->next();
-  }
-
-  if (node != NULL) {
-    set_top(node->next());
-    *node_size = reinterpret_cast<FreeSpace*>(node)->Size();
-    available_ -= *node_size;
-  } else {
-    set_top(NULL);
-  }
-
-  if (top() == NULL) {
-    set_end(NULL);
-  }
-
-  return node;
-}
-
-
-FreeListNode* FreeListCategory::PickNodeFromList(int size_in_bytes,
-                                                 int *node_size) {
-  FreeListNode* node = PickNodeFromList(node_size);
-  if (node != NULL && *node_size < size_in_bytes) {
-    Free(node, *node_size);
-    *node_size = 0;
-    return NULL;
-  }
-  return node;
-}
-
-
-void FreeListCategory::Free(FreeListNode* node, int size_in_bytes) {
-  node->set_next(top());
-  set_top(node);
-  if (end_ == NULL) {
-    end_ = node;
-  }
-  available_ += size_in_bytes;
-}
-
-
-void FreeListCategory::RepairFreeList(Heap* heap) {
-  FreeListNode* n = top();
-  while (n != NULL) {
-    Map** map_location = reinterpret_cast<Map**>(n->address());
-    if (*map_location == NULL) {
-      *map_location = heap->free_space_map();
-    } else {
-      ASSERT(*map_location == heap->free_space_map());
-    }
-    n = n->next();
-  }
-}
-
-
-FreeList::FreeList(PagedSpace* owner)
-    : owner_(owner), heap_(owner->heap()) {
-  Reset();
-}
-
-
-intptr_t FreeList::Concatenate(FreeList* free_list) {
-  intptr_t free_bytes = 0;
-  free_bytes += small_list_.Concatenate(free_list->small_list());
-  free_bytes += medium_list_.Concatenate(free_list->medium_list());
-  free_bytes += large_list_.Concatenate(free_list->large_list());
-  free_bytes += huge_list_.Concatenate(free_list->huge_list());
-  return free_bytes;
-}
-
-
-void FreeList::Reset() {
-  small_list_.Reset();
-  medium_list_.Reset();
-  large_list_.Reset();
-  huge_list_.Reset();
-}
-
-
-int FreeList::Free(Address start, int size_in_bytes) {
-  if (size_in_bytes == 0) return 0;
-
-  FreeListNode* node = FreeListNode::FromAddress(start);
-  node->set_size(heap_, size_in_bytes);
-  Page* page = Page::FromAddress(start);
-
-  // Early return to drop too-small blocks on the floor.
-  if (size_in_bytes < kSmallListMin) {
-    page->add_non_available_small_blocks(size_in_bytes);
-    return size_in_bytes;
-  }
-
-  // Insert other blocks at the head of a free list of the appropriate
-  // magnitude.
-  if (size_in_bytes <= kSmallListMax) {
-    small_list_.Free(node, size_in_bytes);
-    page->add_available_in_small_free_list(size_in_bytes);
-  } else if (size_in_bytes <= kMediumListMax) {
-    medium_list_.Free(node, size_in_bytes);
-    page->add_available_in_medium_free_list(size_in_bytes);
-  } else if (size_in_bytes <= kLargeListMax) {
-    large_list_.Free(node, size_in_bytes);
-    page->add_available_in_large_free_list(size_in_bytes);
-  } else {
-    huge_list_.Free(node, size_in_bytes);
-    page->add_available_in_huge_free_list(size_in_bytes);
-  }
-
-  ASSERT(IsVeryLong() || available() == SumFreeLists());
-  return 0;
-}
-
-
-FreeListNode* FreeList::FindNodeFor(int size_in_bytes, int* node_size) {
-  FreeListNode* node = NULL;
-  Page* page = NULL;
-
-  if (size_in_bytes <= kSmallAllocationMax) {
-    node = small_list_.PickNodeFromList(node_size);
-    if (node != NULL) {
-      ASSERT(size_in_bytes <= *node_size);
-      page = Page::FromAddress(node->address());
-      page->add_available_in_small_free_list(-(*node_size));
-      ASSERT(IsVeryLong() || available() == SumFreeLists());
-      return node;
-    }
-  }
-
-  if (size_in_bytes <= kMediumAllocationMax) {
-    node = medium_list_.PickNodeFromList(node_size);
-    if (node != NULL) {
-      ASSERT(size_in_bytes <= *node_size);
-      page = Page::FromAddress(node->address());
-      page->add_available_in_medium_free_list(-(*node_size));
-      ASSERT(IsVeryLong() || available() == SumFreeLists());
-      return node;
-    }
-  }
-
-  if (size_in_bytes <= kLargeAllocationMax) {
-    node = large_list_.PickNodeFromList(node_size);
-    if (node != NULL) {
-      ASSERT(size_in_bytes <= *node_size);
-      page = Page::FromAddress(node->address());
-      page->add_available_in_large_free_list(-(*node_size));
-      ASSERT(IsVeryLong() || available() == SumFreeLists());
-      return node;
-    }
-  }
-
-  int huge_list_available = huge_list_.available();
-  FreeListNode* top_node = huge_list_.top();
-  for (FreeListNode** cur = &top_node;
-       *cur != NULL;
-       cur = (*cur)->next_address()) {
-    FreeListNode* cur_node = *cur;
-    while (cur_node != NULL &&
-           Page::FromAddress(cur_node->address())->IsEvacuationCandidate()) {
-      int size = reinterpret_cast<FreeSpace*>(cur_node)->Size();
-      huge_list_available -= size;
-      page = Page::FromAddress(cur_node->address());
-      page->add_available_in_huge_free_list(-size);
-      cur_node = cur_node->next();
-    }
-
-    *cur = cur_node;
-    if (cur_node == NULL) {
-      huge_list_.set_end(NULL);
-      break;
-    }
-
-    ASSERT((*cur)->map() == heap_->raw_unchecked_free_space_map());
-    FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(*cur);
-    int size = cur_as_free_space->Size();
-    if (size >= size_in_bytes) {
-      // Large enough node found.  Unlink it from the list.
-      node = *cur;
-      *cur = node->next();
-      *node_size = size;
-      huge_list_available -= size;
-      page = Page::FromAddress(node->address());
-      page->add_available_in_huge_free_list(-size);
-      break;
-    }
-  }
-
-  huge_list_.set_top(top_node);
-  if (huge_list_.top() == NULL) {
-    huge_list_.set_end(NULL);
-  }
-  huge_list_.set_available(huge_list_available);
-
-  if (node != NULL) {
-    ASSERT(IsVeryLong() || available() == SumFreeLists());
-    return node;
-  }
-
-  if (size_in_bytes <= kSmallListMax) {
-    node = small_list_.PickNodeFromList(size_in_bytes, node_size);
-    if (node != NULL) {
-      ASSERT(size_in_bytes <= *node_size);
-      page = Page::FromAddress(node->address());
-      page->add_available_in_small_free_list(-(*node_size));
-    }
-  } else if (size_in_bytes <= kMediumListMax) {
-    node = medium_list_.PickNodeFromList(size_in_bytes, node_size);
-    if (node != NULL) {
-      ASSERT(size_in_bytes <= *node_size);
-      page = Page::FromAddress(node->address());
-      page->add_available_in_medium_free_list(-(*node_size));
-    }
-  } else if (size_in_bytes <= kLargeListMax) {
-    node = large_list_.PickNodeFromList(size_in_bytes, node_size);
-    if (node != NULL) {
-      ASSERT(size_in_bytes <= *node_size);
-      page = Page::FromAddress(node->address());
-      page->add_available_in_large_free_list(-(*node_size));
-    }
-  }
-
-  ASSERT(IsVeryLong() || available() == SumFreeLists());
-  return node;
-}
-
-
-// Allocation on the old space free list.  If it succeeds then a new linear
-// allocation space has been set up with the top and limit of the space.  If
-// the allocation fails then NULL is returned, and the caller can perform a GC
-// or allocate a new page before retrying.
-HeapObject* FreeList::Allocate(int size_in_bytes) {
-  ASSERT(0 < size_in_bytes);
-  ASSERT(size_in_bytes <= kMaxBlockSize);
-  ASSERT(IsAligned(size_in_bytes, kPointerSize));
-  // Don't free list allocate if there is linear space available.
-  ASSERT(owner_->limit() - owner_->top() < size_in_bytes);
-
-  int old_linear_size = static_cast<int>(owner_->limit() - owner_->top());
-  // Mark the old linear allocation area with a free space map so it can be
-  // skipped when scanning the heap.  This also puts it back in the free list
-  // if it is big enough.
-  owner_->Free(owner_->top(), old_linear_size);
-
-  owner_->heap()->incremental_marking()->OldSpaceStep(
-      size_in_bytes - old_linear_size);
-
-  int new_node_size = 0;
-  FreeListNode* new_node = FindNodeFor(size_in_bytes, &new_node_size);
-  if (new_node == NULL) {
-    owner_->SetTopAndLimit(NULL, NULL);
-    return NULL;
-  }
-
-  int bytes_left = new_node_size - size_in_bytes;
-  ASSERT(bytes_left >= 0);
-
-#ifdef DEBUG
-  for (int i = 0; i < size_in_bytes / kPointerSize; i++) {
-    reinterpret_cast<Object**>(new_node->address())[i] =
-        Smi::FromInt(kCodeZapValue);
-  }
-#endif
-
-  // The old-space-step might have finished sweeping and restarted marking.
-  // Verify that it did not turn the page of the new node into an evacuation
-  // candidate.
-  ASSERT(!MarkCompactCollector::IsOnEvacuationCandidate(new_node));
-
-  const int kThreshold = IncrementalMarking::kAllocatedThreshold;
-
-  // Memory in the linear allocation area is counted as allocated.  We may free
-  // a little of this again immediately - see below.
-  owner_->Allocate(new_node_size);
-
-  if (owner_->heap()->inline_allocation_disabled()) {
-    // Keep the linear allocation area empty if requested to do so, just
-    // return area back to the free list instead.
-    owner_->Free(new_node->address() + size_in_bytes, bytes_left);
-    ASSERT(owner_->top() == NULL && owner_->limit() == NULL);
-  } else if (bytes_left > kThreshold &&
-             owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
-             FLAG_incremental_marking_steps) {
-    int linear_size = owner_->RoundSizeDownToObjectAlignment(kThreshold);
-    // We don't want to give too large linear areas to the allocator while
-    // incremental marking is going on, because we won't check again whether
-    // we want to do another increment until the linear area is used up.
-    owner_->Free(new_node->address() + size_in_bytes + linear_size,
-                 new_node_size - size_in_bytes - linear_size);
-    owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
-                           new_node->address() + size_in_bytes + linear_size);
-  } else if (bytes_left > 0) {
-    // Normally we give the rest of the node to the allocator as its new
-    // linear allocation area.
-    owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
-                           new_node->address() + new_node_size);
-  } else {
-    // TODO(gc) Try not freeing linear allocation region when bytes_left
-    // are zero.
-    owner_->SetTopAndLimit(NULL, NULL);
-  }
-
-  return new_node;
-}
-
-
-intptr_t FreeList::EvictFreeListItems(Page* p) {
-  intptr_t sum = huge_list_.EvictFreeListItemsInList(p);
-  p->set_available_in_huge_free_list(0);
-
-  if (sum < p->area_size()) {
-    sum += small_list_.EvictFreeListItemsInList(p) +
-        medium_list_.EvictFreeListItemsInList(p) +
-        large_list_.EvictFreeListItemsInList(p);
-    p->set_available_in_small_free_list(0);
-    p->set_available_in_medium_free_list(0);
-    p->set_available_in_large_free_list(0);
-  }
-
-  return sum;
-}
-
-
-bool FreeList::ContainsPageFreeListItems(Page* p) {
-  return huge_list_.EvictFreeListItemsInList(p) ||
-         small_list_.EvictFreeListItemsInList(p) ||
-         medium_list_.EvictFreeListItemsInList(p) ||
-         large_list_.EvictFreeListItemsInList(p);
-}
-
-
-void FreeList::RepairLists(Heap* heap) {
-  small_list_.RepairFreeList(heap);
-  medium_list_.RepairFreeList(heap);
-  large_list_.RepairFreeList(heap);
-  huge_list_.RepairFreeList(heap);
-}
-
-
-#ifdef DEBUG
-intptr_t FreeListCategory::SumFreeList() {
-  intptr_t sum = 0;
-  FreeListNode* cur = top();
-  while (cur != NULL) {
-    ASSERT(cur->map() == cur->GetHeap()->raw_unchecked_free_space_map());
-    FreeSpace* cur_as_free_space = reinterpret_cast<FreeSpace*>(cur);
-    sum += cur_as_free_space->nobarrier_size();
-    cur = cur->next();
-  }
-  return sum;
-}
-
-
-static const int kVeryLongFreeList = 500;
-
-
-int FreeListCategory::FreeListLength() {
-  int length = 0;
-  FreeListNode* cur = top();
-  while (cur != NULL) {
-    length++;
-    cur = cur->next();
-    if (length == kVeryLongFreeList) return length;
-  }
-  return length;
-}
-
-
-bool FreeList::IsVeryLong() {
-  if (small_list_.FreeListLength() == kVeryLongFreeList) return  true;
-  if (medium_list_.FreeListLength() == kVeryLongFreeList) return  true;
-  if (large_list_.FreeListLength() == kVeryLongFreeList) return  true;
-  if (huge_list_.FreeListLength() == kVeryLongFreeList) return  true;
-  return false;
-}
-
-
-// This can take a very long time because it is linear in the number of entries
-// on the free list, so it should not be called if FreeListLength returns
-// kVeryLongFreeList.
-intptr_t FreeList::SumFreeLists() {
-  intptr_t sum = small_list_.SumFreeList();
-  sum += medium_list_.SumFreeList();
-  sum += large_list_.SumFreeList();
-  sum += huge_list_.SumFreeList();
-  return sum;
-}
-#endif
-
-
-// -----------------------------------------------------------------------------
-// OldSpace implementation
-
-void PagedSpace::PrepareForMarkCompact() {
-  // We don't have a linear allocation area while sweeping.  It will be restored
-  // on the first allocation after the sweep.
-  EmptyAllocationInfo();
-
-  // This counter will be increased for pages which will be swept by the
-  // sweeper threads.
-  unswept_free_bytes_ = 0;
-
-  // Clear the free list before a full GC---it will be rebuilt afterward.
-  free_list_.Reset();
-}
-
-
-intptr_t PagedSpace::SizeOfObjects() {
-  ASSERT(heap()->mark_compact_collector()->IsConcurrentSweepingInProgress() ||
-         (unswept_free_bytes_ == 0));
-  return Size() - unswept_free_bytes_ - (limit() - top());
-}
-
-
-// After we have booted, we have created a map which represents free space
-// on the heap.  If there was already a free list then the elements on it
-// were created with the wrong FreeSpaceMap (normally NULL), so we need to
-// fix them.
-void PagedSpace::RepairFreeListsAfterBoot() {
-  free_list_.RepairLists(heap());
-}
-
-
-void PagedSpace::EvictEvacuationCandidatesFromFreeLists() {
-  if (allocation_info_.top() >= allocation_info_.limit()) return;
-
-  if (Page::FromAllocationTop(allocation_info_.top())->
-      IsEvacuationCandidate()) {
-    // Create filler object to keep page iterable if it was iterable.
-    int remaining =
-        static_cast<int>(allocation_info_.limit() - allocation_info_.top());
-    heap()->CreateFillerObjectAt(allocation_info_.top(), remaining);
-
-    allocation_info_.set_top(NULL);
-    allocation_info_.set_limit(NULL);
-  }
-}
-
-
-HeapObject* PagedSpace::WaitForSweeperThreadsAndRetryAllocation(
-    int size_in_bytes) {
-  MarkCompactCollector* collector = heap()->mark_compact_collector();
-
-  // If sweeper threads are still running, wait for them.
-  if (collector->IsConcurrentSweepingInProgress()) {
-    collector->WaitUntilSweepingCompleted();
-
-    // After waiting for the sweeper threads, there may be new free-list
-    // entries.
-    return free_list_.Allocate(size_in_bytes);
-  }
-  return NULL;
-}
-
-
-HeapObject* PagedSpace::SlowAllocateRaw(int size_in_bytes) {
-  // Allocation in this space has failed.
-
-  // If sweeper threads are active, try to re-fill the free-lists.
-  MarkCompactCollector* collector = heap()->mark_compact_collector();
-  if (collector->IsConcurrentSweepingInProgress()) {
-    collector->RefillFreeList(this);
-
-    // Retry the free list allocation.
-    HeapObject* object = free_list_.Allocate(size_in_bytes);
-    if (object != NULL) return object;
-  }
-
-  // Free list allocation failed and there is no next page.  Fail if we have
-  // hit the old generation size limit that should cause a garbage
-  // collection.
-  if (!heap()->always_allocate()
-      && heap()->OldGenerationAllocationLimitReached()) {
-    // If sweeper threads are active, wait for them at that point and steal
-    // elements form their free-lists.
-    HeapObject* object = WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
-    if (object != NULL) return object;
-  }
-
-  // Try to expand the space and allocate in the new next page.
-  if (Expand()) {
-    ASSERT(CountTotalPages() > 1 || size_in_bytes <= free_list_.available());
-    return free_list_.Allocate(size_in_bytes);
-  }
-
-  // If sweeper threads are active, wait for them at that point and steal
-  // elements form their free-lists. Allocation may still fail their which
-  // would indicate that there is not enough memory for the given allocation.
-  return WaitForSweeperThreadsAndRetryAllocation(size_in_bytes);
-}
-
-
-#ifdef DEBUG
-void PagedSpace::ReportCodeStatistics(Isolate* isolate) {
-  CommentStatistic* comments_statistics =
-      isolate->paged_space_comments_statistics();
-  ReportCodeKindStatistics(isolate->code_kind_statistics());
-  PrintF("Code comment statistics (\"   [ comment-txt   :    size/   "
-         "count  (average)\"):\n");
-  for (int i = 0; i <= CommentStatistic::kMaxComments; i++) {
-    const CommentStatistic& cs = comments_statistics[i];
-    if (cs.size > 0) {
-      PrintF("   %-30s: %10d/%6d     (%d)\n", cs.comment, cs.size, cs.count,
-             cs.size/cs.count);
-    }
-  }
-  PrintF("\n");
-}
-
-
-void PagedSpace::ResetCodeStatistics(Isolate* isolate) {
-  CommentStatistic* comments_statistics =
-      isolate->paged_space_comments_statistics();
-  ClearCodeKindStatistics(isolate->code_kind_statistics());
-  for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
-    comments_statistics[i].Clear();
-  }
-  comments_statistics[CommentStatistic::kMaxComments].comment = "Unknown";
-  comments_statistics[CommentStatistic::kMaxComments].size = 0;
-  comments_statistics[CommentStatistic::kMaxComments].count = 0;
-}
-
-
-// Adds comment to 'comment_statistics' table. Performance OK as long as
-// 'kMaxComments' is small
-static void EnterComment(Isolate* isolate, const char* comment, int delta) {
-  CommentStatistic* comments_statistics =
-      isolate->paged_space_comments_statistics();
-  // Do not count empty comments
-  if (delta <= 0) return;
-  CommentStatistic* cs = &comments_statistics[CommentStatistic::kMaxComments];
-  // Search for a free or matching entry in 'comments_statistics': 'cs'
-  // points to result.
-  for (int i = 0; i < CommentStatistic::kMaxComments; i++) {
-    if (comments_statistics[i].comment == NULL) {
-      cs = &comments_statistics[i];
-      cs->comment = comment;
-      break;
-    } else if (strcmp(comments_statistics[i].comment, comment) == 0) {
-      cs = &comments_statistics[i];
-      break;
-    }
-  }
-  // Update entry for 'comment'
-  cs->size += delta;
-  cs->count += 1;
-}
-
-
-// Call for each nested comment start (start marked with '[ xxx', end marked
-// with ']'.  RelocIterator 'it' must point to a comment reloc info.
-static void CollectCommentStatistics(Isolate* isolate, RelocIterator* it) {
-  ASSERT(!it->done());
-  ASSERT(it->rinfo()->rmode() == RelocInfo::COMMENT);
-  const char* tmp = reinterpret_cast<const char*>(it->rinfo()->data());
-  if (tmp[0] != '[') {
-    // Not a nested comment; skip
-    return;
-  }
-
-  // Search for end of nested comment or a new nested comment
-  const char* const comment_txt =
-      reinterpret_cast<const char*>(it->rinfo()->data());
-  const byte* prev_pc = it->rinfo()->pc();
-  int flat_delta = 0;
-  it->next();
-  while (true) {
-    // All nested comments must be terminated properly, and therefore exit
-    // from loop.
-    ASSERT(!it->done());
-    if (it->rinfo()->rmode() == RelocInfo::COMMENT) {
-      const char* const txt =
-          reinterpret_cast<const char*>(it->rinfo()->data());
-      flat_delta += static_cast<int>(it->rinfo()->pc() - prev_pc);
-      if (txt[0] == ']') break;  // End of nested  comment
-      // A new comment
-      CollectCommentStatistics(isolate, it);
-      // Skip code that was covered with previous comment
-      prev_pc = it->rinfo()->pc();
-    }
-    it->next();
-  }
-  EnterComment(isolate, comment_txt, flat_delta);
-}
-
-
-// Collects code size statistics:
-// - by code kind
-// - by code comment
-void PagedSpace::CollectCodeStatistics() {
-  Isolate* isolate = heap()->isolate();
-  HeapObjectIterator obj_it(this);
-  for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
-    if (obj->IsCode()) {
-      Code* code = Code::cast(obj);
-      isolate->code_kind_statistics()[code->kind()] += code->Size();
-      RelocIterator it(code);
-      int delta = 0;
-      const byte* prev_pc = code->instruction_start();
-      while (!it.done()) {
-        if (it.rinfo()->rmode() == RelocInfo::COMMENT) {
-          delta += static_cast<int>(it.rinfo()->pc() - prev_pc);
-          CollectCommentStatistics(isolate, &it);
-          prev_pc = it.rinfo()->pc();
-        }
-        it.next();
-      }
-
-      ASSERT(code->instruction_start() <= prev_pc &&
-             prev_pc <= code->instruction_end());
-      delta += static_cast<int>(code->instruction_end() - prev_pc);
-      EnterComment(isolate, "NoComment", delta);
-    }
-  }
-}
-
-
-void PagedSpace::ReportStatistics() {
-  int pct = static_cast<int>(Available() * 100 / Capacity());
-  PrintF("  capacity: %" V8_PTR_PREFIX "d"
-             ", waste: %" V8_PTR_PREFIX "d"
-             ", available: %" V8_PTR_PREFIX "d, %%%d\n",
-         Capacity(), Waste(), Available(), pct);
-
-  if (was_swept_conservatively_) return;
-  ClearHistograms(heap()->isolate());
-  HeapObjectIterator obj_it(this);
-  for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next())
-    CollectHistogramInfo(obj);
-  ReportHistogram(heap()->isolate(), true);
-}
-#endif
-
-
-// -----------------------------------------------------------------------------
-// MapSpace implementation
-// TODO(mvstanton): this is weird...the compiler can't make a vtable unless
-// there is at least one non-inlined virtual function. I would prefer to hide
-// the VerifyObject definition behind VERIFY_HEAP.
-
-void MapSpace::VerifyObject(HeapObject* object) {
-  CHECK(object->IsMap());
-}
-
-
-// -----------------------------------------------------------------------------
-// CellSpace and PropertyCellSpace implementation
-// TODO(mvstanton): this is weird...the compiler can't make a vtable unless
-// there is at least one non-inlined virtual function. I would prefer to hide
-// the VerifyObject definition behind VERIFY_HEAP.
-
-void CellSpace::VerifyObject(HeapObject* object) {
-  CHECK(object->IsCell());
-}
-
-
-void PropertyCellSpace::VerifyObject(HeapObject* object) {
-  CHECK(object->IsPropertyCell());
-}
-
-
-// -----------------------------------------------------------------------------
-// LargeObjectIterator
-
-LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space) {
-  current_ = space->first_page_;
-  size_func_ = NULL;
-}
-
-
-LargeObjectIterator::LargeObjectIterator(LargeObjectSpace* space,
-                                         HeapObjectCallback size_func) {
-  current_ = space->first_page_;
-  size_func_ = size_func;
-}
-
-
-HeapObject* LargeObjectIterator::Next() {
-  if (current_ == NULL) return NULL;
-
-  HeapObject* object = current_->GetObject();
-  current_ = current_->next_page();
-  return object;
-}
-
-
-// -----------------------------------------------------------------------------
-// LargeObjectSpace
-static bool ComparePointers(void* key1, void* key2) {
-    return key1 == key2;
-}
-
-
-LargeObjectSpace::LargeObjectSpace(Heap* heap,
-                                   intptr_t max_capacity,
-                                   AllocationSpace id)
-    : Space(heap, id, NOT_EXECUTABLE),  // Managed on a per-allocation basis
-      max_capacity_(max_capacity),
-      first_page_(NULL),
-      size_(0),
-      page_count_(0),
-      objects_size_(0),
-      chunk_map_(ComparePointers, 1024) {}
-
-
-bool LargeObjectSpace::SetUp() {
-  first_page_ = NULL;
-  size_ = 0;
-  maximum_committed_ = 0;
-  page_count_ = 0;
-  objects_size_ = 0;
-  chunk_map_.Clear();
-  return true;
-}
-
-
-void LargeObjectSpace::TearDown() {
-  while (first_page_ != NULL) {
-    LargePage* page = first_page_;
-    first_page_ = first_page_->next_page();
-    LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", page->address()));
-
-    ObjectSpace space = static_cast<ObjectSpace>(1 << identity());
-    heap()->isolate()->memory_allocator()->PerformAllocationCallback(
-        space, kAllocationActionFree, page->size());
-    heap()->isolate()->memory_allocator()->Free(page);
-  }
-  SetUp();
-}
-
-
-AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
-                                               Executability executable) {
-  // Check if we want to force a GC before growing the old space further.
-  // If so, fail the allocation.
-  if (!heap()->always_allocate() &&
-      heap()->OldGenerationAllocationLimitReached()) {
-    return AllocationResult::Retry(identity());
-  }
-
-  if (Size() + object_size > max_capacity_) {
-    return AllocationResult::Retry(identity());
-  }
-
-  LargePage* page = heap()->isolate()->memory_allocator()->
-      AllocateLargePage(object_size, this, executable);
-  if (page == NULL) return AllocationResult::Retry(identity());
-  ASSERT(page->area_size() >= object_size);
-
-  size_ += static_cast<int>(page->size());
-  objects_size_ += object_size;
-  page_count_++;
-  page->set_next_page(first_page_);
-  first_page_ = page;
-
-  if (size_ > maximum_committed_) {
-    maximum_committed_ = size_;
-  }
-
-  // Register all MemoryChunk::kAlignment-aligned chunks covered by
-  // this large page in the chunk map.
-  uintptr_t base = reinterpret_cast<uintptr_t>(page) / MemoryChunk::kAlignment;
-  uintptr_t limit = base + (page->size() - 1) / MemoryChunk::kAlignment;
-  for (uintptr_t key = base; key <= limit; key++) {
-    HashMap::Entry* entry = chunk_map_.Lookup(reinterpret_cast<void*>(key),
-                                              static_cast<uint32_t>(key),
-                                              true);
-    ASSERT(entry != NULL);
-    entry->value = page;
-  }
-
-  HeapObject* object = page->GetObject();
-
-  if (Heap::ShouldZapGarbage()) {
-    // Make the object consistent so the heap can be verified in OldSpaceStep.
-    // We only need to do this in debug builds or if verify_heap is on.
-    reinterpret_cast<Object**>(object->address())[0] =
-        heap()->fixed_array_map();
-    reinterpret_cast<Object**>(object->address())[1] = Smi::FromInt(0);
-  }
-
-  heap()->incremental_marking()->OldSpaceStep(object_size);
-  return object;
-}
-
-
-size_t LargeObjectSpace::CommittedPhysicalMemory() {
-  if (!VirtualMemory::HasLazyCommits()) return CommittedMemory();
-  size_t size = 0;
-  LargePage* current = first_page_;
-  while (current != NULL) {
-    size += current->CommittedPhysicalMemory();
-    current = current->next_page();
-  }
-  return size;
-}
-
-
-// GC support
-Object* LargeObjectSpace::FindObject(Address a) {
-  LargePage* page = FindPage(a);
-  if (page != NULL) {
-    return page->GetObject();
-  }
-  return Smi::FromInt(0);  // Signaling not found.
-}
-
-
-LargePage* LargeObjectSpace::FindPage(Address a) {
-  uintptr_t key = reinterpret_cast<uintptr_t>(a) / MemoryChunk::kAlignment;
-  HashMap::Entry* e = chunk_map_.Lookup(reinterpret_cast<void*>(key),
-                                        static_cast<uint32_t>(key),
-                                        false);
-  if (e != NULL) {
-    ASSERT(e->value != NULL);
-    LargePage* page = reinterpret_cast<LargePage*>(e->value);
-    ASSERT(page->is_valid());
-    if (page->Contains(a)) {
-      return page;
-    }
-  }
-  return NULL;
-}
-
-
-void LargeObjectSpace::FreeUnmarkedObjects() {
-  LargePage* previous = NULL;
-  LargePage* current = first_page_;
-  while (current != NULL) {
-    HeapObject* object = current->GetObject();
-    // Can this large page contain pointers to non-trivial objects.  No other
-    // pointer object is this big.
-    bool is_pointer_object = object->IsFixedArray();
-    MarkBit mark_bit = Marking::MarkBitFrom(object);
-    if (mark_bit.Get()) {
-      mark_bit.Clear();
-      Page::FromAddress(object->address())->ResetProgressBar();
-      Page::FromAddress(object->address())->ResetLiveBytes();
-      previous = current;
-      current = current->next_page();
-    } else {
-      LargePage* page = current;
-      // Cut the chunk out from the chunk list.
-      current = current->next_page();
-      if (previous == NULL) {
-        first_page_ = current;
-      } else {
-        previous->set_next_page(current);
-      }
-
-      // Free the chunk.
-      heap()->mark_compact_collector()->ReportDeleteIfNeeded(
-          object, heap()->isolate());
-      size_ -= static_cast<int>(page->size());
-      objects_size_ -= object->Size();
-      page_count_--;
-
-      // Remove entries belonging to this page.
-      // Use variable alignment to help pass length check (<= 80 characters)
-      // of single line in tools/presubmit.py.
-      const intptr_t alignment = MemoryChunk::kAlignment;
-      uintptr_t base = reinterpret_cast<uintptr_t>(page)/alignment;
-      uintptr_t limit = base + (page->size()-1)/alignment;
-      for (uintptr_t key = base; key <= limit; key++) {
-        chunk_map_.Remove(reinterpret_cast<void*>(key),
-                          static_cast<uint32_t>(key));
-      }
-
-      if (is_pointer_object) {
-        heap()->QueueMemoryChunkForFree(page);
-      } else {
-        heap()->isolate()->memory_allocator()->Free(page);
-      }
-    }
-  }
-  heap()->FreeQueuedChunks();
-}
-
-
-bool LargeObjectSpace::Contains(HeapObject* object) {
-  Address address = object->address();
-  MemoryChunk* chunk = MemoryChunk::FromAddress(address);
-
-  bool owned = (chunk->owner() == this);
-
-  SLOW_ASSERT(!owned || FindObject(address)->IsHeapObject());
-
-  return owned;
-}
-
-
-#ifdef VERIFY_HEAP
-// We do not assume that the large object iterator works, because it depends
-// on the invariants we are checking during verification.
-void LargeObjectSpace::Verify() {
-  for (LargePage* chunk = first_page_;
-       chunk != NULL;
-       chunk = chunk->next_page()) {
-    // Each chunk contains an object that starts at the large object page's
-    // object area start.
-    HeapObject* object = chunk->GetObject();
-    Page* page = Page::FromAddress(object->address());
-    CHECK(object->address() == page->area_start());
-
-    // The first word should be a map, and we expect all map pointers to be
-    // in map space.
-    Map* map = object->map();
-    CHECK(map->IsMap());
-    CHECK(heap()->map_space()->Contains(map));
-
-    // We have only code, sequential strings, external strings
-    // (sequential strings that have been morphed into external
-    // strings), fixed arrays, and byte arrays in large object space.
-    CHECK(object->IsCode() || object->IsSeqString() ||
-           object->IsExternalString() || object->IsFixedArray() ||
-           object->IsFixedDoubleArray() || object->IsByteArray());
-
-    // The object itself should look OK.
-    object->ObjectVerify();
-
-    // Byte arrays and strings don't have interior pointers.
-    if (object->IsCode()) {
-      VerifyPointersVisitor code_visitor;
-      object->IterateBody(map->instance_type(),
-                          object->Size(),
-                          &code_visitor);
-    } else if (object->IsFixedArray()) {
-      FixedArray* array = FixedArray::cast(object);
-      for (int j = 0; j < array->length(); j++) {
-        Object* element = array->get(j);
-        if (element->IsHeapObject()) {
-          HeapObject* element_object = HeapObject::cast(element);
-          CHECK(heap()->Contains(element_object));
-          CHECK(element_object->map()->IsMap());
-        }
-      }
-    }
-  }
-}
-#endif
-
-
-#ifdef DEBUG
-void LargeObjectSpace::Print() {
-  LargeObjectIterator it(this);
-  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
-    obj->Print();
-  }
-}
-
-
-void LargeObjectSpace::ReportStatistics() {
-  PrintF("  size: %" V8_PTR_PREFIX "d\n", size_);
-  int num_objects = 0;
-  ClearHistograms(heap()->isolate());
-  LargeObjectIterator it(this);
-  for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
-    num_objects++;
-    CollectHistogramInfo(obj);
-  }
-
-  PrintF("  number of objects %d, "
-         "size of objects %" V8_PTR_PREFIX "d\n", num_objects, objects_size_);
-  if (num_objects > 0) ReportHistogram(heap()->isolate(), false);
-}
-
-
-void LargeObjectSpace::CollectCodeStatistics() {
-  Isolate* isolate = heap()->isolate();
-  LargeObjectIterator obj_it(this);
-  for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
-    if (obj->IsCode()) {
-      Code* code = Code::cast(obj);
-      isolate->code_kind_statistics()[code->kind()] += code->Size();
-    }
-  }
-}
-
-
-void Page::Print() {
-  // Make a best-effort to print the objects in the page.
-  PrintF("Page@%p in %s\n",
-         this->address(),
-         AllocationSpaceName(this->owner()->identity()));
-  printf(" --------------------------------------\n");
-  HeapObjectIterator objects(this, heap()->GcSafeSizeOfOldObjectFunction());
-  unsigned mark_size = 0;
-  for (HeapObject* object = objects.Next();
-       object != NULL;
-       object = objects.Next()) {
-    bool is_marked = Marking::MarkBitFrom(object).Get();
-    PrintF(" %c ", (is_marked ? '!' : ' '));  // Indent a little.
-    if (is_marked) {
-      mark_size += heap()->GcSafeSizeOfOldObjectFunction()(object);
-    }
-    object->ShortPrint();
-    PrintF("\n");
-  }
-  printf(" --------------------------------------\n");
-  printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
-}
-
-#endif  // DEBUG
-
-} }  // namespace v8::internal
diff --git a/src/spaces.h b/src/spaces.h
deleted file mode 100644
index a8c981d..0000000
--- a/src/spaces.h
+++ /dev/null
@@ -1,3012 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_SPACES_H_
-#define V8_SPACES_H_
-
-#include "src/allocation.h"
-#include "src/base/atomicops.h"
-#include "src/hashmap.h"
-#include "src/list.h"
-#include "src/log.h"
-#include "src/platform/mutex.h"
-#include "src/utils.h"
-
-namespace v8 {
-namespace internal {
-
-class Isolate;
-
-// -----------------------------------------------------------------------------
-// Heap structures:
-//
-// A JS heap consists of a young generation, an old generation, and a large
-// object space. The young generation is divided into two semispaces. A
-// scavenger implements Cheney's copying algorithm. The old generation is
-// separated into a map space and an old object space. The map space contains
-// all (and only) map objects, the rest of old objects go into the old space.
-// The old generation is collected by a mark-sweep-compact collector.
-//
-// The semispaces of the young generation are contiguous.  The old and map
-// spaces consists of a list of pages. A page has a page header and an object
-// area.
-//
-// There is a separate large object space for objects larger than
-// Page::kMaxHeapObjectSize, so that they do not have to move during
-// collection. The large object space is paged. Pages in large object space
-// may be larger than the page size.
-//
-// A store-buffer based write barrier is used to keep track of intergenerational
-// references.  See store-buffer.h.
-//
-// During scavenges and mark-sweep collections we sometimes (after a store
-// buffer overflow) iterate intergenerational pointers without decoding heap
-// object maps so if the page belongs to old pointer space or large object
-// space it is essential to guarantee that the page does not contain any
-// garbage pointers to new space: every pointer aligned word which satisfies
-// the Heap::InNewSpace() predicate must be a pointer to a live heap object in
-// new space. Thus objects in old pointer and large object spaces should have a
-// special layout (e.g. no bare integer fields). This requirement does not
-// apply to map space which is iterated in a special fashion. However we still
-// require pointer fields of dead maps to be cleaned.
-//
-// To enable lazy cleaning of old space pages we can mark chunks of the page
-// as being garbage.  Garbage sections are marked with a special map.  These
-// sections are skipped when scanning the page, even if we are otherwise
-// scanning without regard for object boundaries.  Garbage sections are chained
-// together to form a free list after a GC.  Garbage sections created outside
-// of GCs by object trunctation etc. may not be in the free list chain.  Very
-// small free spaces are ignored, they need only be cleaned of bogus pointers
-// into new space.
-//
-// Each page may have up to one special garbage section.  The start of this
-// section is denoted by the top field in the space.  The end of the section
-// is denoted by the limit field in the space.  This special garbage section
-// is not marked with a free space map in the data.  The point of this section
-// is to enable linear allocation without having to constantly update the byte
-// array every time the top field is updated and a new object is created.  The
-// special garbage section is not in the chain of garbage sections.
-//
-// Since the top and limit fields are in the space, not the page, only one page
-// has a special garbage section, and if the top and limit are equal then there
-// is no special garbage section.
-
-// Some assertion macros used in the debugging mode.
-
-#define ASSERT_PAGE_ALIGNED(address)                                           \
-  ASSERT((OffsetFrom(address) & Page::kPageAlignmentMask) == 0)
-
-#define ASSERT_OBJECT_ALIGNED(address)                                         \
-  ASSERT((OffsetFrom(address) & kObjectAlignmentMask) == 0)
-
-#define ASSERT_OBJECT_SIZE(size)                                               \
-  ASSERT((0 < size) && (size <= Page::kMaxRegularHeapObjectSize))
-
-#define ASSERT_PAGE_OFFSET(offset)                                             \
-  ASSERT((Page::kObjectStartOffset <= offset)                                  \
-      && (offset <= Page::kPageSize))
-
-#define ASSERT_MAP_PAGE_INDEX(index)                                           \
-  ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex))
-
-
-class PagedSpace;
-class MemoryAllocator;
-class AllocationInfo;
-class Space;
-class FreeList;
-class MemoryChunk;
-
-class MarkBit {
- public:
-  typedef uint32_t CellType;
-
-  inline MarkBit(CellType* cell, CellType mask, bool data_only)
-      : cell_(cell), mask_(mask), data_only_(data_only) { }
-
-  inline CellType* cell() { return cell_; }
-  inline CellType mask() { return mask_; }
-
-#ifdef DEBUG
-  bool operator==(const MarkBit& other) {
-    return cell_ == other.cell_ && mask_ == other.mask_;
-  }
-#endif
-
-  inline void Set() { *cell_ |= mask_; }
-  inline bool Get() { return (*cell_ & mask_) != 0; }
-  inline void Clear() { *cell_ &= ~mask_; }
-
-  inline bool data_only() { return data_only_; }
-
-  inline MarkBit Next() {
-    CellType new_mask = mask_ << 1;
-    if (new_mask == 0) {
-      return MarkBit(cell_ + 1, 1, data_only_);
-    } else {
-      return MarkBit(cell_, new_mask, data_only_);
-    }
-  }
-
- private:
-  CellType* cell_;
-  CellType mask_;
-  // This boolean indicates that the object is in a data-only space with no
-  // pointers.  This enables some optimizations when marking.
-  // It is expected that this field is inlined and turned into control flow
-  // at the place where the MarkBit object is created.
-  bool data_only_;
-};
-
-
-// Bitmap is a sequence of cells each containing fixed number of bits.
-class Bitmap {
- public:
-  static const uint32_t kBitsPerCell = 32;
-  static const uint32_t kBitsPerCellLog2 = 5;
-  static const uint32_t kBitIndexMask = kBitsPerCell - 1;
-  static const uint32_t kBytesPerCell = kBitsPerCell / kBitsPerByte;
-  static const uint32_t kBytesPerCellLog2 = kBitsPerCellLog2 - kBitsPerByteLog2;
-
-  static const size_t kLength =
-    (1 << kPageSizeBits) >> (kPointerSizeLog2);
-
-  static const size_t kSize =
-    (1 << kPageSizeBits) >> (kPointerSizeLog2 + kBitsPerByteLog2);
-
-
-  static int CellsForLength(int length) {
-    return (length + kBitsPerCell - 1) >> kBitsPerCellLog2;
-  }
-
-  int CellsCount() {
-    return CellsForLength(kLength);
-  }
-
-  static int SizeFor(int cells_count) {
-    return sizeof(MarkBit::CellType) * cells_count;
-  }
-
-  INLINE(static uint32_t IndexToCell(uint32_t index)) {
-    return index >> kBitsPerCellLog2;
-  }
-
-  INLINE(static uint32_t CellToIndex(uint32_t index)) {
-    return index << kBitsPerCellLog2;
-  }
-
-  INLINE(static uint32_t CellAlignIndex(uint32_t index)) {
-    return (index + kBitIndexMask) & ~kBitIndexMask;
-  }
-
-  INLINE(MarkBit::CellType* cells()) {
-    return reinterpret_cast<MarkBit::CellType*>(this);
-  }
-
-  INLINE(Address address()) {
-    return reinterpret_cast<Address>(this);
-  }
-
-  INLINE(static Bitmap* FromAddress(Address addr)) {
-    return reinterpret_cast<Bitmap*>(addr);
-  }
-
-  inline MarkBit MarkBitFromIndex(uint32_t index, bool data_only = false) {
-    MarkBit::CellType mask = 1 << (index & kBitIndexMask);
-    MarkBit::CellType* cell = this->cells() + (index >> kBitsPerCellLog2);
-    return MarkBit(cell, mask, data_only);
-  }
-
-  static inline void Clear(MemoryChunk* chunk);
-
-  static void PrintWord(uint32_t word, uint32_t himask = 0) {
-    for (uint32_t mask = 1; mask != 0; mask <<= 1) {
-      if ((mask & himask) != 0) PrintF("[");
-      PrintF((mask & word) ? "1" : "0");
-      if ((mask & himask) != 0) PrintF("]");
-    }
-  }
-
-  class CellPrinter {
-   public:
-    CellPrinter() : seq_start(0), seq_type(0), seq_length(0) { }
-
-    void Print(uint32_t pos, uint32_t cell) {
-      if (cell == seq_type) {
-        seq_length++;
-        return;
-      }
-
-      Flush();
-
-      if (IsSeq(cell)) {
-        seq_start = pos;
-        seq_length = 0;
-        seq_type = cell;
-        return;
-      }
-
-      PrintF("%d: ", pos);
-      PrintWord(cell);
-      PrintF("\n");
-    }
-
-    void Flush() {
-      if (seq_length > 0) {
-        PrintF("%d: %dx%d\n",
-               seq_start,
-               seq_type == 0 ? 0 : 1,
-               seq_length * kBitsPerCell);
-        seq_length = 0;
-      }
-    }
-
-    static bool IsSeq(uint32_t cell) { return cell == 0 || cell == 0xFFFFFFFF; }
-
-   private:
-    uint32_t seq_start;
-    uint32_t seq_type;
-    uint32_t seq_length;
-  };
-
-  void Print() {
-    CellPrinter printer;
-    for (int i = 0; i < CellsCount(); i++) {
-      printer.Print(i, cells()[i]);
-    }
-    printer.Flush();
-    PrintF("\n");
-  }
-
-  bool IsClean() {
-    for (int i = 0; i < CellsCount(); i++) {
-      if (cells()[i] != 0) {
-        return false;
-      }
-    }
-    return true;
-  }
-};
-
-
-class SkipList;
-class SlotsBuffer;
-
-// MemoryChunk represents a memory region owned by a specific space.
-// It is divided into the header and the body. Chunk start is always
-// 1MB aligned. Start of the body is aligned so it can accommodate
-// any heap object.
-class MemoryChunk {
- public:
-  // Only works if the pointer is in the first kPageSize of the MemoryChunk.
-  static MemoryChunk* FromAddress(Address a) {
-    return reinterpret_cast<MemoryChunk*>(OffsetFrom(a) & ~kAlignmentMask);
-  }
-
-  // Only works for addresses in pointer spaces, not data or code spaces.
-  static inline MemoryChunk* FromAnyPointerAddress(Heap* heap, Address addr);
-
-  Address address() { return reinterpret_cast<Address>(this); }
-
-  bool is_valid() { return address() != NULL; }
-
-  MemoryChunk* next_chunk() const {
-    return reinterpret_cast<MemoryChunk*>(base::Acquire_Load(&next_chunk_));
-  }
-
-  MemoryChunk* prev_chunk() const {
-    return reinterpret_cast<MemoryChunk*>(base::Acquire_Load(&prev_chunk_));
-  }
-
-  void set_next_chunk(MemoryChunk* next) {
-    base::Release_Store(&next_chunk_, reinterpret_cast<base::AtomicWord>(next));
-  }
-
-  void set_prev_chunk(MemoryChunk* prev) {
-    base::Release_Store(&prev_chunk_, reinterpret_cast<base::AtomicWord>(prev));
-  }
-
-  Space* owner() const {
-    if ((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) ==
-        kFailureTag) {
-      return reinterpret_cast<Space*>(reinterpret_cast<intptr_t>(owner_) -
-                                      kFailureTag);
-    } else {
-      return NULL;
-    }
-  }
-
-  void set_owner(Space* space) {
-    ASSERT((reinterpret_cast<intptr_t>(space) & kFailureTagMask) == 0);
-    owner_ = reinterpret_cast<Address>(space) + kFailureTag;
-    ASSERT((reinterpret_cast<intptr_t>(owner_) & kFailureTagMask) ==
-           kFailureTag);
-  }
-
-  VirtualMemory* reserved_memory() {
-    return &reservation_;
-  }
-
-  void InitializeReservedMemory() {
-    reservation_.Reset();
-  }
-
-  void set_reserved_memory(VirtualMemory* reservation) {
-    ASSERT_NOT_NULL(reservation);
-    reservation_.TakeControl(reservation);
-  }
-
-  bool scan_on_scavenge() { return IsFlagSet(SCAN_ON_SCAVENGE); }
-  void initialize_scan_on_scavenge(bool scan) {
-    if (scan) {
-      SetFlag(SCAN_ON_SCAVENGE);
-    } else {
-      ClearFlag(SCAN_ON_SCAVENGE);
-    }
-  }
-  inline void set_scan_on_scavenge(bool scan);
-
-  int store_buffer_counter() { return store_buffer_counter_; }
-  void set_store_buffer_counter(int counter) {
-    store_buffer_counter_ = counter;
-  }
-
-  bool Contains(Address addr) {
-    return addr >= area_start() && addr < area_end();
-  }
-
-  // Checks whether addr can be a limit of addresses in this page.
-  // It's a limit if it's in the page, or if it's just after the
-  // last byte of the page.
-  bool ContainsLimit(Address addr) {
-    return addr >= area_start() && addr <= area_end();
-  }
-
-  // Every n write barrier invocations we go to runtime even though
-  // we could have handled it in generated code.  This lets us check
-  // whether we have hit the limit and should do some more marking.
-  static const int kWriteBarrierCounterGranularity = 500;
-
-  enum MemoryChunkFlags {
-    IS_EXECUTABLE,
-    ABOUT_TO_BE_FREED,
-    POINTERS_TO_HERE_ARE_INTERESTING,
-    POINTERS_FROM_HERE_ARE_INTERESTING,
-    SCAN_ON_SCAVENGE,
-    IN_FROM_SPACE,  // Mutually exclusive with IN_TO_SPACE.
-    IN_TO_SPACE,    // All pages in new space has one of these two set.
-    NEW_SPACE_BELOW_AGE_MARK,
-    CONTAINS_ONLY_DATA,
-    EVACUATION_CANDIDATE,
-    RESCAN_ON_EVACUATION,
-
-    // Pages swept precisely can be iterated, hitting only the live objects.
-    // Whereas those swept conservatively cannot be iterated over. Both flags
-    // indicate that marking bits have been cleared by the sweeper, otherwise
-    // marking bits are still intact.
-    WAS_SWEPT_PRECISELY,
-    WAS_SWEPT_CONSERVATIVELY,
-
-    // Large objects can have a progress bar in their page header. These object
-    // are scanned in increments and will be kept black while being scanned.
-    // Even if the mutator writes to them they will be kept black and a white
-    // to grey transition is performed in the value.
-    HAS_PROGRESS_BAR,
-
-    // Last flag, keep at bottom.
-    NUM_MEMORY_CHUNK_FLAGS
-  };
-
-
-  static const int kPointersToHereAreInterestingMask =
-      1 << POINTERS_TO_HERE_ARE_INTERESTING;
-
-  static const int kPointersFromHereAreInterestingMask =
-      1 << POINTERS_FROM_HERE_ARE_INTERESTING;
-
-  static const int kEvacuationCandidateMask =
-      1 << EVACUATION_CANDIDATE;
-
-  static const int kSkipEvacuationSlotsRecordingMask =
-      (1 << EVACUATION_CANDIDATE) |
-      (1 << RESCAN_ON_EVACUATION) |
-      (1 << IN_FROM_SPACE) |
-      (1 << IN_TO_SPACE);
-
-
-  void SetFlag(int flag) {
-    flags_ |= static_cast<uintptr_t>(1) << flag;
-  }
-
-  void ClearFlag(int flag) {
-    flags_ &= ~(static_cast<uintptr_t>(1) << flag);
-  }
-
-  void SetFlagTo(int flag, bool value) {
-    if (value) {
-      SetFlag(flag);
-    } else {
-      ClearFlag(flag);
-    }
-  }
-
-  bool IsFlagSet(int flag) {
-    return (flags_ & (static_cast<uintptr_t>(1) << flag)) != 0;
-  }
-
-  // Set or clear multiple flags at a time. The flags in the mask
-  // are set to the value in "flags", the rest retain the current value
-  // in flags_.
-  void SetFlags(intptr_t flags, intptr_t mask) {
-    flags_ = (flags_ & ~mask) | (flags & mask);
-  }
-
-  // Return all current flags.
-  intptr_t GetFlags() { return flags_; }
-
-
-  // PARALLEL_SWEEPING_DONE - The page state when sweeping is complete or
-  // sweeping must not be performed on that page.
-  // PARALLEL_SWEEPING_FINALIZE - A sweeper thread is done sweeping this
-  // page and will not touch the page memory anymore.
-  // PARALLEL_SWEEPING_IN_PROGRESS - This page is currently swept by a
-  // sweeper thread.
-  // PARALLEL_SWEEPING_PENDING - This page is ready for parallel sweeping.
-  enum ParallelSweepingState {
-    PARALLEL_SWEEPING_DONE,
-    PARALLEL_SWEEPING_FINALIZE,
-    PARALLEL_SWEEPING_IN_PROGRESS,
-    PARALLEL_SWEEPING_PENDING
-  };
-
-  ParallelSweepingState parallel_sweeping() {
-    return static_cast<ParallelSweepingState>(
-        base::Acquire_Load(&parallel_sweeping_));
-  }
-
-  void set_parallel_sweeping(ParallelSweepingState state) {
-    base::Release_Store(&parallel_sweeping_, state);
-  }
-
-  bool TryParallelSweeping() {
-    return base::Acquire_CompareAndSwap(
-               &parallel_sweeping_, PARALLEL_SWEEPING_PENDING,
-               PARALLEL_SWEEPING_IN_PROGRESS) == PARALLEL_SWEEPING_PENDING;
-  }
-
-  // Manage live byte count (count of bytes known to be live,
-  // because they are marked black).
-  void ResetLiveBytes() {
-    if (FLAG_gc_verbose) {
-      PrintF("ResetLiveBytes:%p:%x->0\n",
-             static_cast<void*>(this), live_byte_count_);
-    }
-    live_byte_count_ = 0;
-  }
-  void IncrementLiveBytes(int by) {
-    if (FLAG_gc_verbose) {
-      printf("UpdateLiveBytes:%p:%x%c=%x->%x\n",
-             static_cast<void*>(this), live_byte_count_,
-             ((by < 0) ? '-' : '+'), ((by < 0) ? -by : by),
-             live_byte_count_ + by);
-    }
-    live_byte_count_ += by;
-    ASSERT_LE(static_cast<unsigned>(live_byte_count_), size_);
-  }
-  int LiveBytes() {
-    ASSERT(static_cast<unsigned>(live_byte_count_) <= size_);
-    return live_byte_count_;
-  }
-
-  int write_barrier_counter() {
-    return static_cast<int>(write_barrier_counter_);
-  }
-
-  void set_write_barrier_counter(int counter) {
-    write_barrier_counter_ = counter;
-  }
-
-  int progress_bar() {
-    ASSERT(IsFlagSet(HAS_PROGRESS_BAR));
-    return progress_bar_;
-  }
-
-  void set_progress_bar(int progress_bar) {
-    ASSERT(IsFlagSet(HAS_PROGRESS_BAR));
-    progress_bar_ = progress_bar;
-  }
-
-  void ResetProgressBar() {
-    if (IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) {
-      set_progress_bar(0);
-      ClearFlag(MemoryChunk::HAS_PROGRESS_BAR);
-    }
-  }
-
-  bool IsLeftOfProgressBar(Object** slot) {
-    Address slot_address = reinterpret_cast<Address>(slot);
-    ASSERT(slot_address > this->address());
-    return (slot_address - (this->address() + kObjectStartOffset)) <
-           progress_bar();
-  }
-
-  static void IncrementLiveBytesFromGC(Address address, int by) {
-    MemoryChunk::FromAddress(address)->IncrementLiveBytes(by);
-  }
-
-  static void IncrementLiveBytesFromMutator(Address address, int by);
-
-  static const intptr_t kAlignment =
-      (static_cast<uintptr_t>(1) << kPageSizeBits);
-
-  static const intptr_t kAlignmentMask = kAlignment - 1;
-
-  static const intptr_t kSizeOffset = 0;
-
-  static const intptr_t kLiveBytesOffset =
-     kSizeOffset + kPointerSize + kPointerSize + kPointerSize +
-     kPointerSize + kPointerSize +
-     kPointerSize + kPointerSize + kPointerSize + kIntSize;
-
-  static const size_t kSlotsBufferOffset = kLiveBytesOffset + kIntSize;
-
-  static const size_t kWriteBarrierCounterOffset =
-      kSlotsBufferOffset + kPointerSize + kPointerSize;
-
-  static const size_t kHeaderSize = kWriteBarrierCounterOffset + kPointerSize +
-                                    kIntSize + kIntSize + kPointerSize +
-                                    5 * kPointerSize +
-                                    kPointerSize + kPointerSize;
-
-  static const int kBodyOffset =
-      CODE_POINTER_ALIGN(kHeaderSize + Bitmap::kSize);
-
-  // The start offset of the object area in a page. Aligned to both maps and
-  // code alignment to be suitable for both.  Also aligned to 32 words because
-  // the marking bitmap is arranged in 32 bit chunks.
-  static const int kObjectStartAlignment = 32 * kPointerSize;
-  static const int kObjectStartOffset = kBodyOffset - 1 +
-      (kObjectStartAlignment - (kBodyOffset - 1) % kObjectStartAlignment);
-
-  size_t size() const { return size_; }
-
-  void set_size(size_t size) {
-    size_ = size;
-  }
-
-  void SetArea(Address area_start, Address area_end) {
-    area_start_ = area_start;
-    area_end_ = area_end;
-  }
-
-  Executability executable() {
-    return IsFlagSet(IS_EXECUTABLE) ? EXECUTABLE : NOT_EXECUTABLE;
-  }
-
-  bool ContainsOnlyData() {
-    return IsFlagSet(CONTAINS_ONLY_DATA);
-  }
-
-  bool InNewSpace() {
-    return (flags_ & ((1 << IN_FROM_SPACE) | (1 << IN_TO_SPACE))) != 0;
-  }
-
-  bool InToSpace() {
-    return IsFlagSet(IN_TO_SPACE);
-  }
-
-  bool InFromSpace() {
-    return IsFlagSet(IN_FROM_SPACE);
-  }
-
-  // ---------------------------------------------------------------------
-  // Markbits support
-
-  inline Bitmap* markbits() {
-    return Bitmap::FromAddress(address() + kHeaderSize);
-  }
-
-  void PrintMarkbits() { markbits()->Print(); }
-
-  inline uint32_t AddressToMarkbitIndex(Address addr) {
-    return static_cast<uint32_t>(addr - this->address()) >> kPointerSizeLog2;
-  }
-
-  inline static uint32_t FastAddressToMarkbitIndex(Address addr) {
-    const intptr_t offset =
-        reinterpret_cast<intptr_t>(addr) & kAlignmentMask;
-
-    return static_cast<uint32_t>(offset) >> kPointerSizeLog2;
-  }
-
-  inline Address MarkbitIndexToAddress(uint32_t index) {
-    return this->address() + (index << kPointerSizeLog2);
-  }
-
-  void InsertAfter(MemoryChunk* other);
-  void Unlink();
-
-  inline Heap* heap() { return heap_; }
-
-  static const int kFlagsOffset = kPointerSize;
-
-  bool IsEvacuationCandidate() { return IsFlagSet(EVACUATION_CANDIDATE); }
-
-  bool ShouldSkipEvacuationSlotRecording() {
-    return (flags_ & kSkipEvacuationSlotsRecordingMask) != 0;
-  }
-
-  inline SkipList* skip_list() {
-    return skip_list_;
-  }
-
-  inline void set_skip_list(SkipList* skip_list) {
-    skip_list_ = skip_list;
-  }
-
-  inline SlotsBuffer* slots_buffer() {
-    return slots_buffer_;
-  }
-
-  inline SlotsBuffer** slots_buffer_address() {
-    return &slots_buffer_;
-  }
-
-  void MarkEvacuationCandidate() {
-    ASSERT(slots_buffer_ == NULL);
-    SetFlag(EVACUATION_CANDIDATE);
-  }
-
-  void ClearEvacuationCandidate() {
-    ASSERT(slots_buffer_ == NULL);
-    ClearFlag(EVACUATION_CANDIDATE);
-  }
-
-  Address area_start() { return area_start_; }
-  Address area_end() { return area_end_; }
-  int area_size() {
-    return static_cast<int>(area_end() - area_start());
-  }
-  bool CommitArea(size_t requested);
-
-  // Approximate amount of physical memory committed for this chunk.
-  size_t CommittedPhysicalMemory() {
-    return high_water_mark_;
-  }
-
-  static inline void UpdateHighWaterMark(Address mark);
-
- protected:
-  size_t size_;
-  intptr_t flags_;
-
-  // Start and end of allocatable memory on this chunk.
-  Address area_start_;
-  Address area_end_;
-
-  // If the chunk needs to remember its memory reservation, it is stored here.
-  VirtualMemory reservation_;
-  // The identity of the owning space.  This is tagged as a failure pointer, but
-  // no failure can be in an object, so this can be distinguished from any entry
-  // in a fixed array.
-  Address owner_;
-  Heap* heap_;
-  // Used by the store buffer to keep track of which pages to mark scan-on-
-  // scavenge.
-  int store_buffer_counter_;
-  // Count of bytes marked black on page.
-  int live_byte_count_;
-  SlotsBuffer* slots_buffer_;
-  SkipList* skip_list_;
-  intptr_t write_barrier_counter_;
-  // Used by the incremental marker to keep track of the scanning progress in
-  // large objects that have a progress bar and are scanned in increments.
-  int progress_bar_;
-  // Assuming the initial allocation on a page is sequential,
-  // count highest number of bytes ever allocated on the page.
-  int high_water_mark_;
-
-  base::AtomicWord parallel_sweeping_;
-
-  // PagedSpace free-list statistics.
-  intptr_t available_in_small_free_list_;
-  intptr_t available_in_medium_free_list_;
-  intptr_t available_in_large_free_list_;
-  intptr_t available_in_huge_free_list_;
-  intptr_t non_available_small_blocks_;
-
-  static MemoryChunk* Initialize(Heap* heap,
-                                 Address base,
-                                 size_t size,
-                                 Address area_start,
-                                 Address area_end,
-                                 Executability executable,
-                                 Space* owner);
-
- private:
-  // next_chunk_ holds a pointer of type MemoryChunk
-  base::AtomicWord next_chunk_;
-  // prev_chunk_ holds a pointer of type MemoryChunk
-  base::AtomicWord prev_chunk_;
-
-  friend class MemoryAllocator;
-};
-
-
-STATIC_ASSERT(sizeof(MemoryChunk) <= MemoryChunk::kHeaderSize);
-
-
-// -----------------------------------------------------------------------------
-// A page is a memory chunk of a size 1MB. Large object pages may be larger.
-//
-// The only way to get a page pointer is by calling factory methods:
-//   Page* p = Page::FromAddress(addr); or
-//   Page* p = Page::FromAllocationTop(top);
-class Page : public MemoryChunk {
- public:
-  // Returns the page containing a given address. The address ranges
-  // from [page_addr .. page_addr + kPageSize[
-  // This only works if the object is in fact in a page.  See also MemoryChunk::
-  // FromAddress() and FromAnyAddress().
-  INLINE(static Page* FromAddress(Address a)) {
-    return reinterpret_cast<Page*>(OffsetFrom(a) & ~kPageAlignmentMask);
-  }
-
-  // Returns the page containing an allocation top. Because an allocation
-  // top address can be the upper bound of the page, we need to subtract
-  // it with kPointerSize first. The address ranges from
-  // [page_addr + kObjectStartOffset .. page_addr + kPageSize].
-  INLINE(static Page* FromAllocationTop(Address top)) {
-    Page* p = FromAddress(top - kPointerSize);
-    return p;
-  }
-
-  // Returns the next page in the chain of pages owned by a space.
-  inline Page* next_page();
-  inline Page* prev_page();
-  inline void set_next_page(Page* page);
-  inline void set_prev_page(Page* page);
-
-  // Checks whether an address is page aligned.
-  static bool IsAlignedToPageSize(Address a) {
-    return 0 == (OffsetFrom(a) & kPageAlignmentMask);
-  }
-
-  // Returns the offset of a given address to this page.
-  INLINE(int Offset(Address a)) {
-    int offset = static_cast<int>(a - address());
-    return offset;
-  }
-
-  // Returns the address for a given offset to the this page.
-  Address OffsetToAddress(int offset) {
-    ASSERT_PAGE_OFFSET(offset);
-    return address() + offset;
-  }
-
-  // ---------------------------------------------------------------------
-
-  // Page size in bytes.  This must be a multiple of the OS page size.
-  static const int kPageSize = 1 << kPageSizeBits;
-
-  // Maximum object size that fits in a page. Objects larger than that size
-  // are allocated in large object space and are never moved in memory. This
-  // also applies to new space allocation, since objects are never migrated
-  // from new space to large object space.  Takes double alignment into account.
-  static const int kMaxRegularHeapObjectSize = kPageSize - kObjectStartOffset;
-
-  // Page size mask.
-  static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
-
-  inline void ClearGCFields();
-
-  static inline Page* Initialize(Heap* heap,
-                                 MemoryChunk* chunk,
-                                 Executability executable,
-                                 PagedSpace* owner);
-
-  void InitializeAsAnchor(PagedSpace* owner);
-
-  bool WasSweptPrecisely() { return IsFlagSet(WAS_SWEPT_PRECISELY); }
-  bool WasSweptConservatively() { return IsFlagSet(WAS_SWEPT_CONSERVATIVELY); }
-  bool WasSwept() { return WasSweptPrecisely() || WasSweptConservatively(); }
-
-  void MarkSweptPrecisely() { SetFlag(WAS_SWEPT_PRECISELY); }
-  void MarkSweptConservatively() { SetFlag(WAS_SWEPT_CONSERVATIVELY); }
-
-  void ClearSweptPrecisely() { ClearFlag(WAS_SWEPT_PRECISELY); }
-  void ClearSweptConservatively() { ClearFlag(WAS_SWEPT_CONSERVATIVELY); }
-
-  void ResetFreeListStatistics();
-
-#define FRAGMENTATION_STATS_ACCESSORS(type, name) \
-  type name() { return name##_; }                 \
-  void set_##name(type name) { name##_ = name; }  \
-  void add_##name(type name) { name##_ += name; }
-
-  FRAGMENTATION_STATS_ACCESSORS(intptr_t, non_available_small_blocks)
-  FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_small_free_list)
-  FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_medium_free_list)
-  FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_large_free_list)
-  FRAGMENTATION_STATS_ACCESSORS(intptr_t, available_in_huge_free_list)
-
-#undef FRAGMENTATION_STATS_ACCESSORS
-
-#ifdef DEBUG
-  void Print();
-#endif  // DEBUG
-
-  friend class MemoryAllocator;
-};
-
-
-STATIC_ASSERT(sizeof(Page) <= MemoryChunk::kHeaderSize);
-
-
-class LargePage : public MemoryChunk {
- public:
-  HeapObject* GetObject() {
-    return HeapObject::FromAddress(area_start());
-  }
-
-  inline LargePage* next_page() const {
-    return static_cast<LargePage*>(next_chunk());
-  }
-
-  inline void set_next_page(LargePage* page) {
-    set_next_chunk(page);
-  }
- private:
-  static inline LargePage* Initialize(Heap* heap, MemoryChunk* chunk);
-
-  friend class MemoryAllocator;
-};
-
-STATIC_ASSERT(sizeof(LargePage) <= MemoryChunk::kHeaderSize);
-
-// ----------------------------------------------------------------------------
-// Space is the abstract superclass for all allocation spaces.
-class Space : public Malloced {
- public:
-  Space(Heap* heap, AllocationSpace id, Executability executable)
-      : heap_(heap), id_(id), executable_(executable) {}
-
-  virtual ~Space() {}
-
-  Heap* heap() const { return heap_; }
-
-  // Does the space need executable memory?
-  Executability executable() { return executable_; }
-
-  // Identity used in error reporting.
-  AllocationSpace identity() { return id_; }
-
-  // Returns allocated size.
-  virtual intptr_t Size() = 0;
-
-  // Returns size of objects. Can differ from the allocated size
-  // (e.g. see LargeObjectSpace).
-  virtual intptr_t SizeOfObjects() { return Size(); }
-
-  virtual int RoundSizeDownToObjectAlignment(int size) {
-    if (id_ == CODE_SPACE) {
-      return RoundDown(size, kCodeAlignment);
-    } else {
-      return RoundDown(size, kPointerSize);
-    }
-  }
-
-#ifdef DEBUG
-  virtual void Print() = 0;
-#endif
-
- private:
-  Heap* heap_;
-  AllocationSpace id_;
-  Executability executable_;
-};
-
-
-// ----------------------------------------------------------------------------
-// All heap objects containing executable code (code objects) must be allocated
-// from a 2 GB range of memory, so that they can call each other using 32-bit
-// displacements.  This happens automatically on 32-bit platforms, where 32-bit
-// displacements cover the entire 4GB virtual address space.  On 64-bit
-// platforms, we support this using the CodeRange object, which reserves and
-// manages a range of virtual memory.
-class CodeRange {
- public:
-  explicit CodeRange(Isolate* isolate);
-  ~CodeRange() { TearDown(); }
-
-  // Reserves a range of virtual memory, but does not commit any of it.
-  // Can only be called once, at heap initialization time.
-  // Returns false on failure.
-  bool SetUp(size_t requested_size);
-
-  // Frees the range of virtual memory, and frees the data structures used to
-  // manage it.
-  void TearDown();
-
-  bool valid() { return code_range_ != NULL; }
-  Address start() {
-    ASSERT(valid());
-    return static_cast<Address>(code_range_->address());
-  }
-  bool contains(Address address) {
-    if (!valid()) return false;
-    Address start = static_cast<Address>(code_range_->address());
-    return start <= address && address < start + code_range_->size();
-  }
-
-  // Allocates a chunk of memory from the large-object portion of
-  // the code range.  On platforms with no separate code range, should
-  // not be called.
-  MUST_USE_RESULT Address AllocateRawMemory(const size_t requested_size,
-                                            const size_t commit_size,
-                                            size_t* allocated);
-  bool CommitRawMemory(Address start, size_t length);
-  bool UncommitRawMemory(Address start, size_t length);
-  void FreeRawMemory(Address buf, size_t length);
-
- private:
-  Isolate* isolate_;
-
-  // The reserved range of virtual memory that all code objects are put in.
-  VirtualMemory* code_range_;
-  // Plain old data class, just a struct plus a constructor.
-  class FreeBlock {
-   public:
-    FreeBlock(Address start_arg, size_t size_arg)
-        : start(start_arg), size(size_arg) {
-      ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment));
-      ASSERT(size >= static_cast<size_t>(Page::kPageSize));
-    }
-    FreeBlock(void* start_arg, size_t size_arg)
-        : start(static_cast<Address>(start_arg)), size(size_arg) {
-      ASSERT(IsAddressAligned(start, MemoryChunk::kAlignment));
-      ASSERT(size >= static_cast<size_t>(Page::kPageSize));
-    }
-
-    Address start;
-    size_t size;
-  };
-
-  // Freed blocks of memory are added to the free list.  When the allocation
-  // list is exhausted, the free list is sorted and merged to make the new
-  // allocation list.
-  List<FreeBlock> free_list_;
-  // Memory is allocated from the free blocks on the allocation list.
-  // The block at current_allocation_block_index_ is the current block.
-  List<FreeBlock> allocation_list_;
-  int current_allocation_block_index_;
-
-  // Finds a block on the allocation list that contains at least the
-  // requested amount of memory.  If none is found, sorts and merges
-  // the existing free memory blocks, and searches again.
-  // If none can be found, returns false.
-  bool GetNextAllocationBlock(size_t requested);
-  // Compares the start addresses of two free blocks.
-  static int CompareFreeBlockAddress(const FreeBlock* left,
-                                     const FreeBlock* right);
-
-  DISALLOW_COPY_AND_ASSIGN(CodeRange);
-};
-
-
-class SkipList {
- public:
-  SkipList() {
-    Clear();
-  }
-
-  void Clear() {
-    for (int idx = 0; idx < kSize; idx++) {
-      starts_[idx] = reinterpret_cast<Address>(-1);
-    }
-  }
-
-  Address StartFor(Address addr) {
-    return starts_[RegionNumber(addr)];
-  }
-
-  void AddObject(Address addr, int size) {
-    int start_region = RegionNumber(addr);
-    int end_region = RegionNumber(addr + size - kPointerSize);
-    for (int idx = start_region; idx <= end_region; idx++) {
-      if (starts_[idx] > addr) starts_[idx] = addr;
-    }
-  }
-
-  static inline int RegionNumber(Address addr) {
-    return (OffsetFrom(addr) & Page::kPageAlignmentMask) >> kRegionSizeLog2;
-  }
-
-  static void Update(Address addr, int size) {
-    Page* page = Page::FromAddress(addr);
-    SkipList* list = page->skip_list();
-    if (list == NULL) {
-      list = new SkipList();
-      page->set_skip_list(list);
-    }
-
-    list->AddObject(addr, size);
-  }
-
- private:
-  static const int kRegionSizeLog2 = 13;
-  static const int kRegionSize = 1 << kRegionSizeLog2;
-  static const int kSize = Page::kPageSize / kRegionSize;
-
-  STATIC_ASSERT(Page::kPageSize % kRegionSize == 0);
-
-  Address starts_[kSize];
-};
-
-
-// ----------------------------------------------------------------------------
-// A space acquires chunks of memory from the operating system. The memory
-// allocator allocated and deallocates pages for the paged heap spaces and large
-// pages for large object space.
-//
-// Each space has to manage it's own pages.
-//
-class MemoryAllocator {
- public:
-  explicit MemoryAllocator(Isolate* isolate);
-
-  // Initializes its internal bookkeeping structures.
-  // Max capacity of the total space and executable memory limit.
-  bool SetUp(intptr_t max_capacity, intptr_t capacity_executable);
-
-  void TearDown();
-
-  Page* AllocatePage(
-      intptr_t size, PagedSpace* owner, Executability executable);
-
-  LargePage* AllocateLargePage(
-      intptr_t object_size, Space* owner, Executability executable);
-
-  void Free(MemoryChunk* chunk);
-
-  // Returns the maximum available bytes of heaps.
-  intptr_t Available() { return capacity_ < size_ ? 0 : capacity_ - size_; }
-
-  // Returns allocated spaces in bytes.
-  intptr_t Size() { return size_; }
-
-  // Returns the maximum available executable bytes of heaps.
-  intptr_t AvailableExecutable() {
-    if (capacity_executable_ < size_executable_) return 0;
-    return capacity_executable_ - size_executable_;
-  }
-
-  // Returns allocated executable spaces in bytes.
-  intptr_t SizeExecutable() { return size_executable_; }
-
-  // Returns maximum available bytes that the old space can have.
-  intptr_t MaxAvailable() {
-    return (Available() / Page::kPageSize) * Page::kMaxRegularHeapObjectSize;
-  }
-
-  // Returns an indication of whether a pointer is in a space that has
-  // been allocated by this MemoryAllocator.
-  V8_INLINE bool IsOutsideAllocatedSpace(const void* address) const {
-    return address < lowest_ever_allocated_ ||
-        address >= highest_ever_allocated_;
-  }
-
-#ifdef DEBUG
-  // Reports statistic info of the space.
-  void ReportStatistics();
-#endif
-
-  // Returns a MemoryChunk in which the memory region from commit_area_size to
-  // reserve_area_size of the chunk area is reserved but not committed, it
-  // could be committed later by calling MemoryChunk::CommitArea.
-  MemoryChunk* AllocateChunk(intptr_t reserve_area_size,
-                             intptr_t commit_area_size,
-                             Executability executable,
-                             Space* space);
-
-  Address ReserveAlignedMemory(size_t requested,
-                               size_t alignment,
-                               VirtualMemory* controller);
-  Address AllocateAlignedMemory(size_t reserve_size,
-                                size_t commit_size,
-                                size_t alignment,
-                                Executability executable,
-                                VirtualMemory* controller);
-
-  bool CommitMemory(Address addr, size_t size, Executability executable);
-
-  void FreeMemory(VirtualMemory* reservation, Executability executable);
-  void FreeMemory(Address addr, size_t size, Executability executable);
-
-  // Commit a contiguous block of memory from the initial chunk.  Assumes that
-  // the address is not NULL, the size is greater than zero, and that the
-  // block is contained in the initial chunk.  Returns true if it succeeded
-  // and false otherwise.
-  bool CommitBlock(Address start, size_t size, Executability executable);
-
-  // Uncommit a contiguous block of memory [start..(start+size)[.
-  // start is not NULL, the size is greater than zero, and the
-  // block is contained in the initial chunk.  Returns true if it succeeded
-  // and false otherwise.
-  bool UncommitBlock(Address start, size_t size);
-
-  // Zaps a contiguous block of memory [start..(start+size)[ thus
-  // filling it up with a recognizable non-NULL bit pattern.
-  void ZapBlock(Address start, size_t size);
-
-  void PerformAllocationCallback(ObjectSpace space,
-                                 AllocationAction action,
-                                 size_t size);
-
-  void AddMemoryAllocationCallback(MemoryAllocationCallback callback,
-                                          ObjectSpace space,
-                                          AllocationAction action);
-
-  void RemoveMemoryAllocationCallback(
-      MemoryAllocationCallback callback);
-
-  bool MemoryAllocationCallbackRegistered(
-      MemoryAllocationCallback callback);
-
-  static int CodePageGuardStartOffset();
-
-  static int CodePageGuardSize();
-
-  static int CodePageAreaStartOffset();
-
-  static int CodePageAreaEndOffset();
-
-  static int CodePageAreaSize() {
-    return CodePageAreaEndOffset() - CodePageAreaStartOffset();
-  }
-
-  MUST_USE_RESULT bool CommitExecutableMemory(VirtualMemory* vm,
-                                              Address start,
-                                              size_t commit_size,
-                                              size_t reserved_size);
-
- private:
-  Isolate* isolate_;
-
-  // Maximum space size in bytes.
-  size_t capacity_;
-  // Maximum subset of capacity_ that can be executable
-  size_t capacity_executable_;
-
-  // Allocated space size in bytes.
-  size_t size_;
-  // Allocated executable space size in bytes.
-  size_t size_executable_;
-
-  // We keep the lowest and highest addresses allocated as a quick way
-  // of determining that pointers are outside the heap. The estimate is
-  // conservative, i.e. not all addrsses in 'allocated' space are allocated
-  // to our heap. The range is [lowest, highest[, inclusive on the low end
-  // and exclusive on the high end.
-  void* lowest_ever_allocated_;
-  void* highest_ever_allocated_;
-
-  struct MemoryAllocationCallbackRegistration {
-    MemoryAllocationCallbackRegistration(MemoryAllocationCallback callback,
-                                         ObjectSpace space,
-                                         AllocationAction action)
-        : callback(callback), space(space), action(action) {
-    }
-    MemoryAllocationCallback callback;
-    ObjectSpace space;
-    AllocationAction action;
-  };
-
-  // A List of callback that are triggered when memory is allocated or free'd
-  List<MemoryAllocationCallbackRegistration>
-      memory_allocation_callbacks_;
-
-  // Initializes pages in a chunk. Returns the first page address.
-  // This function and GetChunkId() are provided for the mark-compact
-  // collector to rebuild page headers in the from space, which is
-  // used as a marking stack and its page headers are destroyed.
-  Page* InitializePagesInChunk(int chunk_id, int pages_in_chunk,
-                               PagedSpace* owner);
-
-  void UpdateAllocatedSpaceLimits(void* low, void* high) {
-    lowest_ever_allocated_ = Min(lowest_ever_allocated_, low);
-    highest_ever_allocated_ = Max(highest_ever_allocated_, high);
-  }
-
-  DISALLOW_IMPLICIT_CONSTRUCTORS(MemoryAllocator);
-};
-
-
-// -----------------------------------------------------------------------------
-// Interface for heap object iterator to be implemented by all object space
-// object iterators.
-//
-// NOTE: The space specific object iterators also implements the own next()
-//       method which is used to avoid using virtual functions
-//       iterating a specific space.
-
-class ObjectIterator : public Malloced {
- public:
-  virtual ~ObjectIterator() { }
-
-  virtual HeapObject* next_object() = 0;
-};
-
-
-// -----------------------------------------------------------------------------
-// Heap object iterator in new/old/map spaces.
-//
-// A HeapObjectIterator iterates objects from the bottom of the given space
-// to its top or from the bottom of the given page to its top.
-//
-// If objects are allocated in the page during iteration the iterator may
-// or may not iterate over those objects.  The caller must create a new
-// iterator in order to be sure to visit these new objects.
-class HeapObjectIterator: public ObjectIterator {
- public:
-  // Creates a new object iterator in a given space.
-  // If the size function is not given, the iterator calls the default
-  // Object::Size().
-  explicit HeapObjectIterator(PagedSpace* space);
-  HeapObjectIterator(PagedSpace* space, HeapObjectCallback size_func);
-  HeapObjectIterator(Page* page, HeapObjectCallback size_func);
-
-  // Advance to the next object, skipping free spaces and other fillers and
-  // skipping the special garbage section of which there is one per space.
-  // Returns NULL when the iteration has ended.
-  inline HeapObject* Next() {
-    do {
-      HeapObject* next_obj = FromCurrentPage();
-      if (next_obj != NULL) return next_obj;
-    } while (AdvanceToNextPage());
-    return NULL;
-  }
-
-  virtual HeapObject* next_object() {
-    return Next();
-  }
-
- private:
-  enum PageMode { kOnePageOnly, kAllPagesInSpace };
-
-  Address cur_addr_;  // Current iteration point.
-  Address cur_end_;   // End iteration point.
-  HeapObjectCallback size_func_;  // Size function or NULL.
-  PagedSpace* space_;
-  PageMode page_mode_;
-
-  // Fast (inlined) path of next().
-  inline HeapObject* FromCurrentPage();
-
-  // Slow path of next(), goes into the next page.  Returns false if the
-  // iteration has ended.
-  bool AdvanceToNextPage();
-
-  // Initializes fields.
-  inline void Initialize(PagedSpace* owner,
-                         Address start,
-                         Address end,
-                         PageMode mode,
-                         HeapObjectCallback size_func);
-};
-
-
-// -----------------------------------------------------------------------------
-// A PageIterator iterates the pages in a paged space.
-
-class PageIterator BASE_EMBEDDED {
- public:
-  explicit inline PageIterator(PagedSpace* space);
-
-  inline bool has_next();
-  inline Page* next();
-
- private:
-  PagedSpace* space_;
-  Page* prev_page_;  // Previous page returned.
-  // Next page that will be returned.  Cached here so that we can use this
-  // iterator for operations that deallocate pages.
-  Page* next_page_;
-};
-
-
-// -----------------------------------------------------------------------------
-// A space has a circular list of pages. The next page can be accessed via
-// Page::next_page() call.
-
-// An abstraction of allocation and relocation pointers in a page-structured
-// space.
-class AllocationInfo {
- public:
-  AllocationInfo() : top_(NULL), limit_(NULL) {
-  }
-
-  INLINE(void set_top(Address top)) {
-    SLOW_ASSERT(top == NULL ||
-        (reinterpret_cast<intptr_t>(top) & HeapObjectTagMask()) == 0);
-    top_ = top;
-  }
-
-  INLINE(Address top()) const {
-    SLOW_ASSERT(top_ == NULL ||
-        (reinterpret_cast<intptr_t>(top_) & HeapObjectTagMask()) == 0);
-    return top_;
-  }
-
-  Address* top_address() {
-    return &top_;
-  }
-
-  INLINE(void set_limit(Address limit)) {
-    SLOW_ASSERT(limit == NULL ||
-        (reinterpret_cast<intptr_t>(limit) & HeapObjectTagMask()) == 0);
-    limit_ = limit;
-  }
-
-  INLINE(Address limit()) const {
-    SLOW_ASSERT(limit_ == NULL ||
-        (reinterpret_cast<intptr_t>(limit_) & HeapObjectTagMask()) == 0);
-    return limit_;
-  }
-
-  Address* limit_address() {
-    return &limit_;
-  }
-
-#ifdef DEBUG
-  bool VerifyPagedAllocation() {
-    return (Page::FromAllocationTop(top_) == Page::FromAllocationTop(limit_))
-        && (top_ <= limit_);
-  }
-#endif
-
- private:
-  // Current allocation top.
-  Address top_;
-  // Current allocation limit.
-  Address limit_;
-};
-
-
-// An abstraction of the accounting statistics of a page-structured space.
-// The 'capacity' of a space is the number of object-area bytes (i.e., not
-// including page bookkeeping structures) currently in the space. The 'size'
-// of a space is the number of allocated bytes, the 'waste' in the space is
-// the number of bytes that are not allocated and not available to
-// allocation without reorganizing the space via a GC (e.g. small blocks due
-// to internal fragmentation, top of page areas in map space), and the bytes
-// 'available' is the number of unallocated bytes that are not waste.  The
-// capacity is the sum of size, waste, and available.
-//
-// The stats are only set by functions that ensure they stay balanced. These
-// functions increase or decrease one of the non-capacity stats in
-// conjunction with capacity, or else they always balance increases and
-// decreases to the non-capacity stats.
-class AllocationStats BASE_EMBEDDED {
- public:
-  AllocationStats() { Clear(); }
-
-  // Zero out all the allocation statistics (i.e., no capacity).
-  void Clear() {
-    capacity_ = 0;
-    max_capacity_ = 0;
-    size_ = 0;
-    waste_ = 0;
-  }
-
-  void ClearSizeWaste() {
-    size_ = capacity_;
-    waste_ = 0;
-  }
-
-  // Reset the allocation statistics (i.e., available = capacity with no
-  // wasted or allocated bytes).
-  void Reset() {
-    size_ = 0;
-    waste_ = 0;
-  }
-
-  // Accessors for the allocation statistics.
-  intptr_t Capacity() { return capacity_; }
-  intptr_t MaxCapacity() { return max_capacity_; }
-  intptr_t Size() { return size_; }
-  intptr_t Waste() { return waste_; }
-
-  // Grow the space by adding available bytes.  They are initially marked as
-  // being in use (part of the size), but will normally be immediately freed,
-  // putting them on the free list and removing them from size_.
-  void ExpandSpace(int size_in_bytes) {
-    capacity_ += size_in_bytes;
-    size_ += size_in_bytes;
-    if (capacity_ > max_capacity_) {
-      max_capacity_ = capacity_;
-    }
-    ASSERT(size_ >= 0);
-  }
-
-  // Shrink the space by removing available bytes.  Since shrinking is done
-  // during sweeping, bytes have been marked as being in use (part of the size)
-  // and are hereby freed.
-  void ShrinkSpace(int size_in_bytes) {
-    capacity_ -= size_in_bytes;
-    size_ -= size_in_bytes;
-    ASSERT(size_ >= 0);
-  }
-
-  // Allocate from available bytes (available -> size).
-  void AllocateBytes(intptr_t size_in_bytes) {
-    size_ += size_in_bytes;
-    ASSERT(size_ >= 0);
-  }
-
-  // Free allocated bytes, making them available (size -> available).
-  void DeallocateBytes(intptr_t size_in_bytes) {
-    size_ -= size_in_bytes;
-    ASSERT(size_ >= 0);
-  }
-
-  // Waste free bytes (available -> waste).
-  void WasteBytes(int size_in_bytes) {
-    size_ -= size_in_bytes;
-    waste_ += size_in_bytes;
-    ASSERT(size_ >= 0);
-  }
-
- private:
-  intptr_t capacity_;
-  intptr_t max_capacity_;
-  intptr_t size_;
-  intptr_t waste_;
-};
-
-
-// -----------------------------------------------------------------------------
-// Free lists for old object spaces
-//
-// Free-list nodes are free blocks in the heap.  They look like heap objects
-// (free-list node pointers have the heap object tag, and they have a map like
-// a heap object).  They have a size and a next pointer.  The next pointer is
-// the raw address of the next free list node (or NULL).
-class FreeListNode: public HeapObject {
- public:
-  // Obtain a free-list node from a raw address.  This is not a cast because
-  // it does not check nor require that the first word at the address is a map
-  // pointer.
-  static FreeListNode* FromAddress(Address address) {
-    return reinterpret_cast<FreeListNode*>(HeapObject::FromAddress(address));
-  }
-
-  static inline bool IsFreeListNode(HeapObject* object);
-
-  // Set the size in bytes, which can be read with HeapObject::Size().  This
-  // function also writes a map to the first word of the block so that it
-  // looks like a heap object to the garbage collector and heap iteration
-  // functions.
-  void set_size(Heap* heap, int size_in_bytes);
-
-  // Accessors for the next field.
-  inline FreeListNode* next();
-  inline FreeListNode** next_address();
-  inline void set_next(FreeListNode* next);
-
-  inline void Zap();
-
-  static inline FreeListNode* cast(Object* object) {
-    return reinterpret_cast<FreeListNode*>(object);
-  }
-
- private:
-  static const int kNextOffset = POINTER_SIZE_ALIGN(FreeSpace::kHeaderSize);
-
-  DISALLOW_IMPLICIT_CONSTRUCTORS(FreeListNode);
-};
-
-
-// The free list category holds a pointer to the top element and a pointer to
-// the end element of the linked list of free memory blocks.
-class FreeListCategory {
- public:
-  FreeListCategory() :
-      top_(0),
-      end_(NULL),
-      available_(0) {}
-
-  intptr_t Concatenate(FreeListCategory* category);
-
-  void Reset();
-
-  void Free(FreeListNode* node, int size_in_bytes);
-
-  FreeListNode* PickNodeFromList(int *node_size);
-  FreeListNode* PickNodeFromList(int size_in_bytes, int *node_size);
-
-  intptr_t EvictFreeListItemsInList(Page* p);
-  bool ContainsPageFreeListItemsInList(Page* p);
-
-  void RepairFreeList(Heap* heap);
-
-  FreeListNode* top() const {
-    return reinterpret_cast<FreeListNode*>(base::NoBarrier_Load(&top_));
-  }
-
-  void set_top(FreeListNode* top) {
-    base::NoBarrier_Store(&top_, reinterpret_cast<base::AtomicWord>(top));
-  }
-
-  FreeListNode** GetEndAddress() { return &end_; }
-  FreeListNode* end() const { return end_; }
-  void set_end(FreeListNode* end) { end_ = end; }
-
-  int* GetAvailableAddress() { return &available_; }
-  int available() const { return available_; }
-  void set_available(int available) { available_ = available; }
-
-  Mutex* mutex() { return &mutex_; }
-
-  bool IsEmpty() {
-    return top() == 0;
-  }
-
-#ifdef DEBUG
-  intptr_t SumFreeList();
-  int FreeListLength();
-#endif
-
- private:
-  // top_ points to the top FreeListNode* in the free list category.
-  base::AtomicWord top_;
-  FreeListNode* end_;
-  Mutex mutex_;
-
-  // Total available bytes in all blocks of this free list category.
-  int available_;
-};
-
-
-// The free list for the old space.  The free list is organized in such a way
-// as to encourage objects allocated around the same time to be near each
-// other.  The normal way to allocate is intended to be by bumping a 'top'
-// pointer until it hits a 'limit' pointer.  When the limit is hit we need to
-// find a new space to allocate from.  This is done with the free list, which
-// is divided up into rough categories to cut down on waste.  Having finer
-// categories would scatter allocation more.
-
-// The old space free list is organized in categories.
-// 1-31 words:  Such small free areas are discarded for efficiency reasons.
-//     They can be reclaimed by the compactor.  However the distance between top
-//     and limit may be this small.
-// 32-255 words: There is a list of spaces this large.  It is used for top and
-//     limit when the object we need to allocate is 1-31 words in size.  These
-//     spaces are called small.
-// 256-2047 words: There is a list of spaces this large.  It is used for top and
-//     limit when the object we need to allocate is 32-255 words in size.  These
-//     spaces are called medium.
-// 1048-16383 words: There is a list of spaces this large.  It is used for top
-//     and limit when the object we need to allocate is 256-2047 words in size.
-//     These spaces are call large.
-// At least 16384 words.  This list is for objects of 2048 words or larger.
-//     Empty pages are added to this list.  These spaces are called huge.
-class FreeList {
- public:
-  explicit FreeList(PagedSpace* owner);
-
-  intptr_t Concatenate(FreeList* free_list);
-
-  // Clear the free list.
-  void Reset();
-
-  // Return the number of bytes available on the free list.
-  intptr_t available() {
-    return small_list_.available() + medium_list_.available() +
-           large_list_.available() + huge_list_.available();
-  }
-
-  // Place a node on the free list.  The block of size 'size_in_bytes'
-  // starting at 'start' is placed on the free list.  The return value is the
-  // number of bytes that have been lost due to internal fragmentation by
-  // freeing the block.  Bookkeeping information will be written to the block,
-  // i.e., its contents will be destroyed.  The start address should be word
-  // aligned, and the size should be a non-zero multiple of the word size.
-  int Free(Address start, int size_in_bytes);
-
-  // Allocate a block of size 'size_in_bytes' from the free list.  The block
-  // is unitialized.  A failure is returned if no block is available.  The
-  // number of bytes lost to fragmentation is returned in the output parameter
-  // 'wasted_bytes'.  The size should be a non-zero multiple of the word size.
-  MUST_USE_RESULT HeapObject* Allocate(int size_in_bytes);
-
-  bool IsEmpty() {
-    return small_list_.IsEmpty() && medium_list_.IsEmpty() &&
-           large_list_.IsEmpty() && huge_list_.IsEmpty();
-  }
-
-#ifdef DEBUG
-  void Zap();
-  intptr_t SumFreeLists();
-  bool IsVeryLong();
-#endif
-
-  // Used after booting the VM.
-  void RepairLists(Heap* heap);
-
-  intptr_t EvictFreeListItems(Page* p);
-  bool ContainsPageFreeListItems(Page* p);
-
-  FreeListCategory* small_list() { return &small_list_; }
-  FreeListCategory* medium_list() { return &medium_list_; }
-  FreeListCategory* large_list() { return &large_list_; }
-  FreeListCategory* huge_list() { return &huge_list_; }
-
- private:
-  // The size range of blocks, in bytes.
-  static const int kMinBlockSize = 3 * kPointerSize;
-  static const int kMaxBlockSize = Page::kMaxRegularHeapObjectSize;
-
-  FreeListNode* FindNodeFor(int size_in_bytes, int* node_size);
-
-  PagedSpace* owner_;
-  Heap* heap_;
-
-  static const int kSmallListMin = 0x20 * kPointerSize;
-  static const int kSmallListMax = 0xff * kPointerSize;
-  static const int kMediumListMax = 0x7ff * kPointerSize;
-  static const int kLargeListMax = 0x3fff * kPointerSize;
-  static const int kSmallAllocationMax = kSmallListMin - kPointerSize;
-  static const int kMediumAllocationMax = kSmallListMax;
-  static const int kLargeAllocationMax = kMediumListMax;
-  FreeListCategory small_list_;
-  FreeListCategory medium_list_;
-  FreeListCategory large_list_;
-  FreeListCategory huge_list_;
-
-  DISALLOW_IMPLICIT_CONSTRUCTORS(FreeList);
-};
-
-
-class AllocationResult {
- public:
-  // Implicit constructor from Object*.
-  AllocationResult(Object* object) : object_(object),  // NOLINT
-                                     retry_space_(INVALID_SPACE) { }
-
-  AllocationResult() : object_(NULL),
-                       retry_space_(INVALID_SPACE) { }
-
-  static inline AllocationResult Retry(AllocationSpace space = NEW_SPACE) {
-    return AllocationResult(space);
-  }
-
-  inline bool IsRetry() { return retry_space_ != INVALID_SPACE; }
-
-  template <typename T>
-  bool To(T** obj) {
-    if (IsRetry()) return false;
-    *obj = T::cast(object_);
-    return true;
-  }
-
-  Object* ToObjectChecked() {
-    CHECK(!IsRetry());
-    return object_;
-  }
-
-  AllocationSpace RetrySpace() {
-    ASSERT(IsRetry());
-    return retry_space_;
-  }
-
- private:
-  explicit AllocationResult(AllocationSpace space) : object_(NULL),
-                                                     retry_space_(space) { }
-
-  Object* object_;
-  AllocationSpace retry_space_;
-};
-
-
-class PagedSpace : public Space {
- public:
-  // Creates a space with a maximum capacity, and an id.
-  PagedSpace(Heap* heap,
-             intptr_t max_capacity,
-             AllocationSpace id,
-             Executability executable);
-
-  virtual ~PagedSpace() {}
-
-  // Set up the space using the given address range of virtual memory (from
-  // the memory allocator's initial chunk) if possible.  If the block of
-  // addresses is not big enough to contain a single page-aligned page, a
-  // fresh chunk will be allocated.
-  bool SetUp();
-
-  // Returns true if the space has been successfully set up and not
-  // subsequently torn down.
-  bool HasBeenSetUp();
-
-  // Cleans up the space, frees all pages in this space except those belonging
-  // to the initial chunk, uncommits addresses in the initial chunk.
-  void TearDown();
-
-  // Checks whether an object/address is in this space.
-  inline bool Contains(Address a);
-  bool Contains(HeapObject* o) { return Contains(o->address()); }
-
-  // Given an address occupied by a live object, return that object if it is
-  // in this space, or a Smi if it is not.  The implementation iterates over
-  // objects in the page containing the address, the cost is linear in the
-  // number of objects in the page.  It may be slow.
-  Object* FindObject(Address addr);
-
-  // During boot the free_space_map is created, and afterwards we may need
-  // to write it into the free list nodes that were already created.
-  void RepairFreeListsAfterBoot();
-
-  // Prepares for a mark-compact GC.
-  void PrepareForMarkCompact();
-
-  // Current capacity without growing (Size() + Available()).
-  intptr_t Capacity() { return accounting_stats_.Capacity(); }
-
-  // Total amount of memory committed for this space.  For paged
-  // spaces this equals the capacity.
-  intptr_t CommittedMemory() { return Capacity(); }
-
-  // The maximum amount of memory ever committed for this space.
-  intptr_t MaximumCommittedMemory() { return accounting_stats_.MaxCapacity(); }
-
-  // Approximate amount of physical memory committed for this space.
-  size_t CommittedPhysicalMemory();
-
-  struct SizeStats {
-    intptr_t Total() {
-      return small_size_ + medium_size_ + large_size_ + huge_size_;
-    }
-
-    intptr_t small_size_;
-    intptr_t medium_size_;
-    intptr_t large_size_;
-    intptr_t huge_size_;
-  };
-
-  void ObtainFreeListStatistics(Page* p, SizeStats* sizes);
-  void ResetFreeListStatistics();
-
-  // Sets the capacity, the available space and the wasted space to zero.
-  // The stats are rebuilt during sweeping by adding each page to the
-  // capacity and the size when it is encountered.  As free spaces are
-  // discovered during the sweeping they are subtracted from the size and added
-  // to the available and wasted totals.
-  void ClearStats() {
-    accounting_stats_.ClearSizeWaste();
-    ResetFreeListStatistics();
-  }
-
-  // Increases the number of available bytes of that space.
-  void AddToAccountingStats(intptr_t bytes) {
-    accounting_stats_.DeallocateBytes(bytes);
-  }
-
-  // Available bytes without growing.  These are the bytes on the free list.
-  // The bytes in the linear allocation area are not included in this total
-  // because updating the stats would slow down allocation.  New pages are
-  // immediately added to the free list so they show up here.
-  intptr_t Available() { return free_list_.available(); }
-
-  // Allocated bytes in this space.  Garbage bytes that were not found due to
-  // concurrent sweeping are counted as being allocated!  The bytes in the
-  // current linear allocation area (between top and limit) are also counted
-  // here.
-  virtual intptr_t Size() { return accounting_stats_.Size(); }
-
-  // As size, but the bytes in lazily swept pages are estimated and the bytes
-  // in the current linear allocation area are not included.
-  virtual intptr_t SizeOfObjects();
-
-  // Wasted bytes in this space.  These are just the bytes that were thrown away
-  // due to being too small to use for allocation.  They do not include the
-  // free bytes that were not found at all due to lazy sweeping.
-  virtual intptr_t Waste() { return accounting_stats_.Waste(); }
-
-  // Returns the allocation pointer in this space.
-  Address top() { return allocation_info_.top(); }
-  Address limit() { return allocation_info_.limit(); }
-
-  // The allocation top address.
-  Address* allocation_top_address() {
-    return allocation_info_.top_address();
-  }
-
-  // The allocation limit address.
-  Address* allocation_limit_address() {
-    return allocation_info_.limit_address();
-  }
-
-  // Allocate the requested number of bytes in the space if possible, return a
-  // failure object if not.
-  MUST_USE_RESULT inline AllocationResult AllocateRaw(int size_in_bytes);
-
-  // Give a block of memory to the space's free list.  It might be added to
-  // the free list or accounted as waste.
-  // If add_to_freelist is false then just accounting stats are updated and
-  // no attempt to add area to free list is made.
-  int Free(Address start, int size_in_bytes) {
-    int wasted = free_list_.Free(start, size_in_bytes);
-    accounting_stats_.DeallocateBytes(size_in_bytes - wasted);
-    return size_in_bytes - wasted;
-  }
-
-  void ResetFreeList() {
-    free_list_.Reset();
-  }
-
-  // Set space allocation info.
-  void SetTopAndLimit(Address top, Address limit) {
-    ASSERT(top == limit ||
-           Page::FromAddress(top) == Page::FromAddress(limit - 1));
-    MemoryChunk::UpdateHighWaterMark(allocation_info_.top());
-    allocation_info_.set_top(top);
-    allocation_info_.set_limit(limit);
-  }
-
-  // Empty space allocation info, returning unused area to free list.
-  void EmptyAllocationInfo() {
-    // Mark the old linear allocation area with a free space map so it can be
-    // skipped when scanning the heap.
-    int old_linear_size = static_cast<int>(limit() - top());
-    Free(top(), old_linear_size);
-    SetTopAndLimit(NULL, NULL);
-  }
-
-  void Allocate(int bytes) {
-    accounting_stats_.AllocateBytes(bytes);
-  }
-
-  void IncreaseCapacity(int size);
-
-  // Releases an unused page and shrinks the space.
-  void ReleasePage(Page* page);
-
-  // The dummy page that anchors the linked list of pages.
-  Page* anchor() { return &anchor_; }
-
-#ifdef VERIFY_HEAP
-  // Verify integrity of this space.
-  virtual void Verify(ObjectVisitor* visitor);
-
-  // Overridden by subclasses to verify space-specific object
-  // properties (e.g., only maps or free-list nodes are in map space).
-  virtual void VerifyObject(HeapObject* obj) {}
-#endif
-
-#ifdef DEBUG
-  // Print meta info and objects in this space.
-  virtual void Print();
-
-  // Reports statistics for the space
-  void ReportStatistics();
-
-  // Report code object related statistics
-  void CollectCodeStatistics();
-  static void ReportCodeStatistics(Isolate* isolate);
-  static void ResetCodeStatistics(Isolate* isolate);
-#endif
-
-  bool was_swept_conservatively() { return was_swept_conservatively_; }
-  void set_was_swept_conservatively(bool b) { was_swept_conservatively_ = b; }
-
-  // Evacuation candidates are swept by evacuator.  Needs to return a valid
-  // result before _and_ after evacuation has finished.
-  static bool ShouldBeSweptBySweeperThreads(Page* p) {
-    return !p->IsEvacuationCandidate() &&
-           !p->IsFlagSet(Page::RESCAN_ON_EVACUATION) &&
-           !p->WasSweptPrecisely();
-  }
-
-  void IncrementUnsweptFreeBytes(intptr_t by) {
-    unswept_free_bytes_ += by;
-  }
-
-  void IncreaseUnsweptFreeBytes(Page* p) {
-    ASSERT(ShouldBeSweptBySweeperThreads(p));
-    unswept_free_bytes_ += (p->area_size() - p->LiveBytes());
-  }
-
-  void DecrementUnsweptFreeBytes(intptr_t by) {
-    unswept_free_bytes_ -= by;
-  }
-
-  void DecreaseUnsweptFreeBytes(Page* p) {
-    ASSERT(ShouldBeSweptBySweeperThreads(p));
-    unswept_free_bytes_ -= (p->area_size() - p->LiveBytes());
-  }
-
-  void ResetUnsweptFreeBytes() {
-    unswept_free_bytes_ = 0;
-  }
-
-  // This function tries to steal size_in_bytes memory from the sweeper threads
-  // free-lists. If it does not succeed stealing enough memory, it will wait
-  // for the sweeper threads to finish sweeping.
-  // It returns true when sweeping is completed and false otherwise.
-  bool EnsureSweeperProgress(intptr_t size_in_bytes);
-
-  void set_end_of_unswept_pages(Page* page) {
-    end_of_unswept_pages_ = page;
-  }
-
-  Page* end_of_unswept_pages() {
-    return end_of_unswept_pages_;
-  }
-
-  Page* FirstPage() { return anchor_.next_page(); }
-  Page* LastPage() { return anchor_.prev_page(); }
-
-  void EvictEvacuationCandidatesFromFreeLists();
-
-  bool CanExpand();
-
-  // Returns the number of total pages in this space.
-  int CountTotalPages();
-
-  // Return size of allocatable area on a page in this space.
-  inline int AreaSize() {
-    return area_size_;
-  }
-
- protected:
-  FreeList* free_list() { return &free_list_; }
-
-  int area_size_;
-
-  // Maximum capacity of this space.
-  intptr_t max_capacity_;
-
-  intptr_t SizeOfFirstPage();
-
-  // Accounting information for this space.
-  AllocationStats accounting_stats_;
-
-  // The dummy page that anchors the double linked list of pages.
-  Page anchor_;
-
-  // The space's free list.
-  FreeList free_list_;
-
-  // Normal allocation information.
-  AllocationInfo allocation_info_;
-
-  bool was_swept_conservatively_;
-
-  // The number of free bytes which could be reclaimed by advancing the
-  // concurrent sweeper threads.  This is only an estimation because concurrent
-  // sweeping is done conservatively.
-  intptr_t unswept_free_bytes_;
-
-  // The sweeper threads iterate over the list of pointer and data space pages
-  // and sweep these pages concurrently. They will stop sweeping after the
-  // end_of_unswept_pages_ page.
-  Page* end_of_unswept_pages_;
-
-  // Expands the space by allocating a fixed number of pages. Returns false if
-  // it cannot allocate requested number of pages from OS, or if the hard heap
-  // size limit has been hit.
-  bool Expand();
-
-  // Generic fast case allocation function that tries linear allocation at the
-  // address denoted by top in allocation_info_.
-  inline HeapObject* AllocateLinearly(int size_in_bytes);
-
-  MUST_USE_RESULT HeapObject*
-      WaitForSweeperThreadsAndRetryAllocation(int size_in_bytes);
-
-  // Slow path of AllocateRaw.  This function is space-dependent.
-  MUST_USE_RESULT HeapObject* SlowAllocateRaw(int size_in_bytes);
-
-  friend class PageIterator;
-  friend class MarkCompactCollector;
-};
-
-
-class NumberAndSizeInfo BASE_EMBEDDED {
- public:
-  NumberAndSizeInfo() : number_(0), bytes_(0) {}
-
-  int number() const { return number_; }
-  void increment_number(int num) { number_ += num; }
-
-  int bytes() const { return bytes_; }
-  void increment_bytes(int size) { bytes_ += size; }
-
-  void clear() {
-    number_ = 0;
-    bytes_ = 0;
-  }
-
- private:
-  int number_;
-  int bytes_;
-};
-
-
-// HistogramInfo class for recording a single "bar" of a histogram.  This
-// class is used for collecting statistics to print to the log file.
-class HistogramInfo: public NumberAndSizeInfo {
- public:
-  HistogramInfo() : NumberAndSizeInfo() {}
-
-  const char* name() { return name_; }
-  void set_name(const char* name) { name_ = name; }
-
- private:
-  const char* name_;
-};
-
-
-enum SemiSpaceId {
-  kFromSpace = 0,
-  kToSpace = 1
-};
-
-
-class SemiSpace;
-
-
-class NewSpacePage : public MemoryChunk {
- public:
-  // GC related flags copied from from-space to to-space when
-  // flipping semispaces.
-  static const intptr_t kCopyOnFlipFlagsMask =
-    (1 << MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING) |
-    (1 << MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING) |
-    (1 << MemoryChunk::SCAN_ON_SCAVENGE);
-
-  static const int kAreaSize = Page::kMaxRegularHeapObjectSize;
-
-  inline NewSpacePage* next_page() const {
-    return static_cast<NewSpacePage*>(next_chunk());
-  }
-
-  inline void set_next_page(NewSpacePage* page) {
-    set_next_chunk(page);
-  }
-
-  inline NewSpacePage* prev_page() const {
-    return static_cast<NewSpacePage*>(prev_chunk());
-  }
-
-  inline void set_prev_page(NewSpacePage* page) {
-    set_prev_chunk(page);
-  }
-
-  SemiSpace* semi_space() {
-    return reinterpret_cast<SemiSpace*>(owner());
-  }
-
-  bool is_anchor() { return !this->InNewSpace(); }
-
-  static bool IsAtStart(Address addr) {
-    return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask)
-        == kObjectStartOffset;
-  }
-
-  static bool IsAtEnd(Address addr) {
-    return (reinterpret_cast<intptr_t>(addr) & Page::kPageAlignmentMask) == 0;
-  }
-
-  Address address() {
-    return reinterpret_cast<Address>(this);
-  }
-
-  // Finds the NewSpacePage containg the given address.
-  static inline NewSpacePage* FromAddress(Address address_in_page) {
-    Address page_start =
-        reinterpret_cast<Address>(reinterpret_cast<uintptr_t>(address_in_page) &
-                                  ~Page::kPageAlignmentMask);
-    NewSpacePage* page = reinterpret_cast<NewSpacePage*>(page_start);
-    return page;
-  }
-
-  // Find the page for a limit address. A limit address is either an address
-  // inside a page, or the address right after the last byte of a page.
-  static inline NewSpacePage* FromLimit(Address address_limit) {
-    return NewSpacePage::FromAddress(address_limit - 1);
-  }
-
-  // Checks if address1 and address2 are on the same new space page.
-  static inline bool OnSamePage(Address address1, Address address2) {
-    return NewSpacePage::FromAddress(address1) ==
-           NewSpacePage::FromAddress(address2);
-  }
-
- private:
-  // Create a NewSpacePage object that is only used as anchor
-  // for the doubly-linked list of real pages.
-  explicit NewSpacePage(SemiSpace* owner) {
-    InitializeAsAnchor(owner);
-  }
-
-  static NewSpacePage* Initialize(Heap* heap,
-                                  Address start,
-                                  SemiSpace* semi_space);
-
-  // Intialize a fake NewSpacePage used as sentinel at the ends
-  // of a doubly-linked list of real NewSpacePages.
-  // Only uses the prev/next links, and sets flags to not be in new-space.
-  void InitializeAsAnchor(SemiSpace* owner);
-
-  friend class SemiSpace;
-  friend class SemiSpaceIterator;
-};
-
-
-// -----------------------------------------------------------------------------
-// SemiSpace in young generation
-//
-// A semispace is a contiguous chunk of memory holding page-like memory
-// chunks. The mark-compact collector  uses the memory of the first page in
-// the from space as a marking stack when tracing live objects.
-
-class SemiSpace : public Space {
- public:
-  // Constructor.
-  SemiSpace(Heap* heap, SemiSpaceId semispace)
-    : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
-      start_(NULL),
-      age_mark_(NULL),
-      id_(semispace),
-      anchor_(this),
-      current_page_(NULL) { }
-
-  // Sets up the semispace using the given chunk.
-  void SetUp(Address start, int initial_capacity, int maximum_capacity);
-
-  // Tear down the space.  Heap memory was not allocated by the space, so it
-  // is not deallocated here.
-  void TearDown();
-
-  // True if the space has been set up but not torn down.
-  bool HasBeenSetUp() { return start_ != NULL; }
-
-  // Grow the semispace to the new capacity.  The new capacity
-  // requested must be larger than the current capacity and less than
-  // the maximum capacity.
-  bool GrowTo(int new_capacity);
-
-  // Shrinks the semispace to the new capacity.  The new capacity
-  // requested must be more than the amount of used memory in the
-  // semispace and less than the current capacity.
-  bool ShrinkTo(int new_capacity);
-
-  // Returns the start address of the first page of the space.
-  Address space_start() {
-    ASSERT(anchor_.next_page() != &anchor_);
-    return anchor_.next_page()->area_start();
-  }
-
-  // Returns the start address of the current page of the space.
-  Address page_low() {
-    return current_page_->area_start();
-  }
-
-  // Returns one past the end address of the space.
-  Address space_end() {
-    return anchor_.prev_page()->area_end();
-  }
-
-  // Returns one past the end address of the current page of the space.
-  Address page_high() {
-    return current_page_->area_end();
-  }
-
-  bool AdvancePage() {
-    NewSpacePage* next_page = current_page_->next_page();
-    if (next_page == anchor()) return false;
-    current_page_ = next_page;
-    return true;
-  }
-
-  // Resets the space to using the first page.
-  void Reset();
-
-  // Age mark accessors.
-  Address age_mark() { return age_mark_; }
-  void set_age_mark(Address mark);
-
-  // True if the address is in the address range of this semispace (not
-  // necessarily below the allocation pointer).
-  bool Contains(Address a) {
-    return (reinterpret_cast<uintptr_t>(a) & address_mask_)
-           == reinterpret_cast<uintptr_t>(start_);
-  }
-
-  // True if the object is a heap object in the address range of this
-  // semispace (not necessarily below the allocation pointer).
-  bool Contains(Object* o) {
-    return (reinterpret_cast<uintptr_t>(o) & object_mask_) == object_expected_;
-  }
-
-  // If we don't have these here then SemiSpace will be abstract.  However
-  // they should never be called.
-  virtual intptr_t Size() {
-    UNREACHABLE();
-    return 0;
-  }
-
-  bool is_committed() { return committed_; }
-  bool Commit();
-  bool Uncommit();
-
-  NewSpacePage* first_page() { return anchor_.next_page(); }
-  NewSpacePage* current_page() { return current_page_; }
-
-#ifdef VERIFY_HEAP
-  virtual void Verify();
-#endif
-
-#ifdef DEBUG
-  virtual void Print();
-  // Validate a range of of addresses in a SemiSpace.
-  // The "from" address must be on a page prior to the "to" address,
-  // in the linked page order, or it must be earlier on the same page.
-  static void AssertValidRange(Address from, Address to);
-#else
-  // Do nothing.
-  inline static void AssertValidRange(Address from, Address to) {}
-#endif
-
-  // Returns the current capacity of the semi space.
-  int Capacity() { return capacity_; }
-
-  // Returns the maximum capacity of the semi space.
-  int MaximumCapacity() { return maximum_capacity_; }
-
-  // Returns the initial capacity of the semi space.
-  int InitialCapacity() { return initial_capacity_; }
-
-  SemiSpaceId id() { return id_; }
-
-  static void Swap(SemiSpace* from, SemiSpace* to);
-
-  // Returns the maximum amount of memory ever committed by the semi space.
-  size_t MaximumCommittedMemory() { return maximum_committed_; }
-
-  // Approximate amount of physical memory committed for this space.
-  size_t CommittedPhysicalMemory();
-
- private:
-  // Flips the semispace between being from-space and to-space.
-  // Copies the flags into the masked positions on all pages in the space.
-  void FlipPages(intptr_t flags, intptr_t flag_mask);
-
-  // Updates Capacity and MaximumCommitted based on new capacity.
-  void SetCapacity(int new_capacity);
-
-  NewSpacePage* anchor() { return &anchor_; }
-
-  // The current and maximum capacity of the space.
-  int capacity_;
-  int maximum_capacity_;
-  int initial_capacity_;
-
-  intptr_t maximum_committed_;
-
-  // The start address of the space.
-  Address start_;
-  // Used to govern object promotion during mark-compact collection.
-  Address age_mark_;
-
-  // Masks and comparison values to test for containment in this semispace.
-  uintptr_t address_mask_;
-  uintptr_t object_mask_;
-  uintptr_t object_expected_;
-
-  bool committed_;
-  SemiSpaceId id_;
-
-  NewSpacePage anchor_;
-  NewSpacePage* current_page_;
-
-  friend class SemiSpaceIterator;
-  friend class NewSpacePageIterator;
- public:
-  TRACK_MEMORY("SemiSpace")
-};
-
-
-// A SemiSpaceIterator is an ObjectIterator that iterates over the active
-// semispace of the heap's new space.  It iterates over the objects in the
-// semispace from a given start address (defaulting to the bottom of the
-// semispace) to the top of the semispace.  New objects allocated after the
-// iterator is created are not iterated.
-class SemiSpaceIterator : public ObjectIterator {
- public:
-  // Create an iterator over the objects in the given space.  If no start
-  // address is given, the iterator starts from the bottom of the space.  If
-  // no size function is given, the iterator calls Object::Size().
-
-  // Iterate over all of allocated to-space.
-  explicit SemiSpaceIterator(NewSpace* space);
-  // Iterate over all of allocated to-space, with a custome size function.
-  SemiSpaceIterator(NewSpace* space, HeapObjectCallback size_func);
-  // Iterate over part of allocated to-space, from start to the end
-  // of allocation.
-  SemiSpaceIterator(NewSpace* space, Address start);
-  // Iterate from one address to another in the same semi-space.
-  SemiSpaceIterator(Address from, Address to);
-
-  HeapObject* Next() {
-    if (current_ == limit_) return NULL;
-    if (NewSpacePage::IsAtEnd(current_)) {
-      NewSpacePage* page = NewSpacePage::FromLimit(current_);
-      page = page->next_page();
-      ASSERT(!page->is_anchor());
-      current_ = page->area_start();
-      if (current_ == limit_) return NULL;
-    }
-
-    HeapObject* object = HeapObject::FromAddress(current_);
-    int size = (size_func_ == NULL) ? object->Size() : size_func_(object);
-
-    current_ += size;
-    return object;
-  }
-
-  // Implementation of the ObjectIterator functions.
-  virtual HeapObject* next_object() { return Next(); }
-
- private:
-  void Initialize(Address start,
-                  Address end,
-                  HeapObjectCallback size_func);
-
-  // The current iteration point.
-  Address current_;
-  // The end of iteration.
-  Address limit_;
-  // The callback function.
-  HeapObjectCallback size_func_;
-};
-
-
-// -----------------------------------------------------------------------------
-// A PageIterator iterates the pages in a semi-space.
-class NewSpacePageIterator BASE_EMBEDDED {
- public:
-  // Make an iterator that runs over all pages in to-space.
-  explicit inline NewSpacePageIterator(NewSpace* space);
-
-  // Make an iterator that runs over all pages in the given semispace,
-  // even those not used in allocation.
-  explicit inline NewSpacePageIterator(SemiSpace* space);
-
-  // Make iterator that iterates from the page containing start
-  // to the page that contains limit in the same semispace.
-  inline NewSpacePageIterator(Address start, Address limit);
-
-  inline bool has_next();
-  inline NewSpacePage* next();
-
- private:
-  NewSpacePage* prev_page_;  // Previous page returned.
-  // Next page that will be returned.  Cached here so that we can use this
-  // iterator for operations that deallocate pages.
-  NewSpacePage* next_page_;
-  // Last page returned.
-  NewSpacePage* last_page_;
-};
-
-
-// -----------------------------------------------------------------------------
-// The young generation space.
-//
-// The new space consists of a contiguous pair of semispaces.  It simply
-// forwards most functions to the appropriate semispace.
-
-class NewSpace : public Space {
- public:
-  // Constructor.
-  explicit NewSpace(Heap* heap)
-    : Space(heap, NEW_SPACE, NOT_EXECUTABLE),
-      to_space_(heap, kToSpace),
-      from_space_(heap, kFromSpace),
-      reservation_(),
-      inline_allocation_limit_step_(0) {}
-
-  // Sets up the new space using the given chunk.
-  bool SetUp(int reserved_semispace_size_, int max_semi_space_size);
-
-  // Tears down the space.  Heap memory was not allocated by the space, so it
-  // is not deallocated here.
-  void TearDown();
-
-  // True if the space has been set up but not torn down.
-  bool HasBeenSetUp() {
-    return to_space_.HasBeenSetUp() && from_space_.HasBeenSetUp();
-  }
-
-  // Flip the pair of spaces.
-  void Flip();
-
-  // Grow the capacity of the semispaces.  Assumes that they are not at
-  // their maximum capacity.
-  void Grow();
-
-  // Shrink the capacity of the semispaces.
-  void Shrink();
-
-  // True if the address or object lies in the address range of either
-  // semispace (not necessarily below the allocation pointer).
-  bool Contains(Address a) {
-    return (reinterpret_cast<uintptr_t>(a) & address_mask_)
-        == reinterpret_cast<uintptr_t>(start_);
-  }
-
-  bool Contains(Object* o) {
-    Address a = reinterpret_cast<Address>(o);
-    return (reinterpret_cast<uintptr_t>(a) & object_mask_) == object_expected_;
-  }
-
-  // Return the allocated bytes in the active semispace.
-  virtual intptr_t Size() {
-    return pages_used_ * NewSpacePage::kAreaSize +
-        static_cast<int>(top() - to_space_.page_low());
-  }
-
-  // The same, but returning an int.  We have to have the one that returns
-  // intptr_t because it is inherited, but if we know we are dealing with the
-  // new space, which can't get as big as the other spaces then this is useful:
-  int SizeAsInt() { return static_cast<int>(Size()); }
-
-  // Return the current capacity of a semispace.
-  intptr_t EffectiveCapacity() {
-    SLOW_ASSERT(to_space_.Capacity() == from_space_.Capacity());
-    return (to_space_.Capacity() / Page::kPageSize) * NewSpacePage::kAreaSize;
-  }
-
-  // Return the current capacity of a semispace.
-  intptr_t Capacity() {
-    ASSERT(to_space_.Capacity() == from_space_.Capacity());
-    return to_space_.Capacity();
-  }
-
-  // Return the total amount of memory committed for new space.
-  intptr_t CommittedMemory() {
-    if (from_space_.is_committed()) return 2 * Capacity();
-    return Capacity();
-  }
-
-  // Return the total amount of memory committed for new space.
-  intptr_t MaximumCommittedMemory() {
-    return to_space_.MaximumCommittedMemory() +
-        from_space_.MaximumCommittedMemory();
-  }
-
-  // Approximate amount of physical memory committed for this space.
-  size_t CommittedPhysicalMemory();
-
-  // Return the available bytes without growing.
-  intptr_t Available() {
-    return Capacity() - Size();
-  }
-
-  // Return the maximum capacity of a semispace.
-  int MaximumCapacity() {
-    ASSERT(to_space_.MaximumCapacity() == from_space_.MaximumCapacity());
-    return to_space_.MaximumCapacity();
-  }
-
-  bool IsAtMaximumCapacity() {
-    return Capacity() == MaximumCapacity();
-  }
-
-  // Returns the initial capacity of a semispace.
-  int InitialCapacity() {
-    ASSERT(to_space_.InitialCapacity() == from_space_.InitialCapacity());
-    return to_space_.InitialCapacity();
-  }
-
-  // Return the address of the allocation pointer in the active semispace.
-  Address top() {
-    ASSERT(to_space_.current_page()->ContainsLimit(allocation_info_.top()));
-    return allocation_info_.top();
-  }
-
-  void set_top(Address top) {
-    ASSERT(to_space_.current_page()->ContainsLimit(top));
-    allocation_info_.set_top(top);
-  }
-
-  // Return the address of the allocation pointer limit in the active semispace.
-  Address limit() {
-    ASSERT(to_space_.current_page()->ContainsLimit(allocation_info_.limit()));
-    return allocation_info_.limit();
-  }
-
-  // Return the address of the first object in the active semispace.
-  Address bottom() { return to_space_.space_start(); }
-
-  // Get the age mark of the inactive semispace.
-  Address age_mark() { return from_space_.age_mark(); }
-  // Set the age mark in the active semispace.
-  void set_age_mark(Address mark) { to_space_.set_age_mark(mark); }
-
-  // The start address of the space and a bit mask. Anding an address in the
-  // new space with the mask will result in the start address.
-  Address start() { return start_; }
-  uintptr_t mask() { return address_mask_; }
-
-  INLINE(uint32_t AddressToMarkbitIndex(Address addr)) {
-    ASSERT(Contains(addr));
-    ASSERT(IsAligned(OffsetFrom(addr), kPointerSize) ||
-           IsAligned(OffsetFrom(addr) - 1, kPointerSize));
-    return static_cast<uint32_t>(addr - start_) >> kPointerSizeLog2;
-  }
-
-  INLINE(Address MarkbitIndexToAddress(uint32_t index)) {
-    return reinterpret_cast<Address>(index << kPointerSizeLog2);
-  }
-
-  // The allocation top and limit address.
-  Address* allocation_top_address() {
-    return allocation_info_.top_address();
-  }
-
-  // The allocation limit address.
-  Address* allocation_limit_address() {
-    return allocation_info_.limit_address();
-  }
-
-  MUST_USE_RESULT INLINE(AllocationResult AllocateRaw(int size_in_bytes));
-
-  // Reset the allocation pointer to the beginning of the active semispace.
-  void ResetAllocationInfo();
-
-  void UpdateInlineAllocationLimit(int size_in_bytes);
-  void LowerInlineAllocationLimit(intptr_t step) {
-    inline_allocation_limit_step_ = step;
-    UpdateInlineAllocationLimit(0);
-    top_on_previous_step_ = allocation_info_.top();
-  }
-
-  // Get the extent of the inactive semispace (for use as a marking stack,
-  // or to zap it). Notice: space-addresses are not necessarily on the
-  // same page, so FromSpaceStart() might be above FromSpaceEnd().
-  Address FromSpacePageLow() { return from_space_.page_low(); }
-  Address FromSpacePageHigh() { return from_space_.page_high(); }
-  Address FromSpaceStart() { return from_space_.space_start(); }
-  Address FromSpaceEnd() { return from_space_.space_end(); }
-
-  // Get the extent of the active semispace's pages' memory.
-  Address ToSpaceStart() { return to_space_.space_start(); }
-  Address ToSpaceEnd() { return to_space_.space_end(); }
-
-  inline bool ToSpaceContains(Address address) {
-    return to_space_.Contains(address);
-  }
-  inline bool FromSpaceContains(Address address) {
-    return from_space_.Contains(address);
-  }
-
-  // True if the object is a heap object in the address range of the
-  // respective semispace (not necessarily below the allocation pointer of the
-  // semispace).
-  inline bool ToSpaceContains(Object* o) { return to_space_.Contains(o); }
-  inline bool FromSpaceContains(Object* o) { return from_space_.Contains(o); }
-
-  // Try to switch the active semispace to a new, empty, page.
-  // Returns false if this isn't possible or reasonable (i.e., there
-  // are no pages, or the current page is already empty), or true
-  // if successful.
-  bool AddFreshPage();
-
-#ifdef VERIFY_HEAP
-  // Verify the active semispace.
-  virtual void Verify();
-#endif
-
-#ifdef DEBUG
-  // Print the active semispace.
-  virtual void Print() { to_space_.Print(); }
-#endif
-
-  // Iterates the active semispace to collect statistics.
-  void CollectStatistics();
-  // Reports previously collected statistics of the active semispace.
-  void ReportStatistics();
-  // Clears previously collected statistics.
-  void ClearHistograms();
-
-  // Record the allocation or promotion of a heap object.  Note that we don't
-  // record every single allocation, but only those that happen in the
-  // to space during a scavenge GC.
-  void RecordAllocation(HeapObject* obj);
-  void RecordPromotion(HeapObject* obj);
-
-  // Return whether the operation succeded.
-  bool CommitFromSpaceIfNeeded() {
-    if (from_space_.is_committed()) return true;
-    return from_space_.Commit();
-  }
-
-  bool UncommitFromSpace() {
-    if (!from_space_.is_committed()) return true;
-    return from_space_.Uncommit();
-  }
-
-  inline intptr_t inline_allocation_limit_step() {
-    return inline_allocation_limit_step_;
-  }
-
-  SemiSpace* active_space() { return &to_space_; }
-
- private:
-  // Update allocation info to match the current to-space page.
-  void UpdateAllocationInfo();
-
-  Address chunk_base_;
-  uintptr_t chunk_size_;
-
-  // The semispaces.
-  SemiSpace to_space_;
-  SemiSpace from_space_;
-  VirtualMemory reservation_;
-  int pages_used_;
-
-  // Start address and bit mask for containment testing.
-  Address start_;
-  uintptr_t address_mask_;
-  uintptr_t object_mask_;
-  uintptr_t object_expected_;
-
-  // Allocation pointer and limit for normal allocation and allocation during
-  // mark-compact collection.
-  AllocationInfo allocation_info_;
-
-  // When incremental marking is active we will set allocation_info_.limit
-  // to be lower than actual limit and then will gradually increase it
-  // in steps to guarantee that we do incremental marking steps even
-  // when all allocation is performed from inlined generated code.
-  intptr_t inline_allocation_limit_step_;
-
-  Address top_on_previous_step_;
-
-  HistogramInfo* allocated_histogram_;
-  HistogramInfo* promoted_histogram_;
-
-  MUST_USE_RESULT AllocationResult SlowAllocateRaw(int size_in_bytes);
-
-  friend class SemiSpaceIterator;
-
- public:
-  TRACK_MEMORY("NewSpace")
-};
-
-
-// -----------------------------------------------------------------------------
-// Old object space (excluding map objects)
-
-class OldSpace : public PagedSpace {
- public:
-  // Creates an old space object with a given maximum capacity.
-  // The constructor does not allocate pages from OS.
-  OldSpace(Heap* heap,
-           intptr_t max_capacity,
-           AllocationSpace id,
-           Executability executable)
-      : PagedSpace(heap, max_capacity, id, executable) {
-  }
-
- public:
-  TRACK_MEMORY("OldSpace")
-};
-
-
-// For contiguous spaces, top should be in the space (or at the end) and limit
-// should be the end of the space.
-#define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
-  SLOW_ASSERT((space).page_low() <= (info).top() \
-              && (info).top() <= (space).page_high() \
-              && (info).limit() <= (space).page_high())
-
-
-// -----------------------------------------------------------------------------
-// Old space for all map objects
-
-class MapSpace : public PagedSpace {
- public:
-  // Creates a map space object with a maximum capacity.
-  MapSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
-      : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE),
-        max_map_space_pages_(kMaxMapPageIndex - 1) {
-  }
-
-  // Given an index, returns the page address.
-  // TODO(1600): this limit is artifical just to keep code compilable
-  static const int kMaxMapPageIndex = 1 << 16;
-
-  virtual int RoundSizeDownToObjectAlignment(int size) {
-    if (IsPowerOf2(Map::kSize)) {
-      return RoundDown(size, Map::kSize);
-    } else {
-      return (size / Map::kSize) * Map::kSize;
-    }
-  }
-
- protected:
-  virtual void VerifyObject(HeapObject* obj);
-
- private:
-  static const int kMapsPerPage = Page::kMaxRegularHeapObjectSize / Map::kSize;
-
-  // Do map space compaction if there is a page gap.
-  int CompactionThreshold() {
-    return kMapsPerPage * (max_map_space_pages_ - 1);
-  }
-
-  const int max_map_space_pages_;
-
- public:
-  TRACK_MEMORY("MapSpace")
-};
-
-
-// -----------------------------------------------------------------------------
-// Old space for simple property cell objects
-
-class CellSpace : public PagedSpace {
- public:
-  // Creates a property cell space object with a maximum capacity.
-  CellSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id)
-      : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE) {
-  }
-
-  virtual int RoundSizeDownToObjectAlignment(int size) {
-    if (IsPowerOf2(Cell::kSize)) {
-      return RoundDown(size, Cell::kSize);
-    } else {
-      return (size / Cell::kSize) * Cell::kSize;
-    }
-  }
-
- protected:
-  virtual void VerifyObject(HeapObject* obj);
-
- public:
-  TRACK_MEMORY("CellSpace")
-};
-
-
-// -----------------------------------------------------------------------------
-// Old space for all global object property cell objects
-
-class PropertyCellSpace : public PagedSpace {
- public:
-  // Creates a property cell space object with a maximum capacity.
-  PropertyCellSpace(Heap* heap, intptr_t max_capacity,
-                    AllocationSpace id)
-      : PagedSpace(heap, max_capacity, id, NOT_EXECUTABLE) {
-  }
-
-  virtual int RoundSizeDownToObjectAlignment(int size) {
-    if (IsPowerOf2(PropertyCell::kSize)) {
-      return RoundDown(size, PropertyCell::kSize);
-    } else {
-      return (size / PropertyCell::kSize) * PropertyCell::kSize;
-    }
-  }
-
- protected:
-  virtual void VerifyObject(HeapObject* obj);
-
- public:
-  TRACK_MEMORY("PropertyCellSpace")
-};
-
-
-// -----------------------------------------------------------------------------
-// Large objects ( > Page::kMaxHeapObjectSize ) are allocated and managed by
-// the large object space. A large object is allocated from OS heap with
-// extra padding bytes (Page::kPageSize + Page::kObjectStartOffset).
-// A large object always starts at Page::kObjectStartOffset to a page.
-// Large objects do not move during garbage collections.
-
-class LargeObjectSpace : public Space {
- public:
-  LargeObjectSpace(Heap* heap, intptr_t max_capacity, AllocationSpace id);
-  virtual ~LargeObjectSpace() {}
-
-  // Initializes internal data structures.
-  bool SetUp();
-
-  // Releases internal resources, frees objects in this space.
-  void TearDown();
-
-  static intptr_t ObjectSizeFor(intptr_t chunk_size) {
-    if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0;
-    return chunk_size - Page::kPageSize - Page::kObjectStartOffset;
-  }
-
-  // Shared implementation of AllocateRaw, AllocateRawCode and
-  // AllocateRawFixedArray.
-  MUST_USE_RESULT AllocationResult AllocateRaw(int object_size,
-                                               Executability executable);
-
-  // Available bytes for objects in this space.
-  inline intptr_t Available();
-
-  virtual intptr_t Size() {
-    return size_;
-  }
-
-  virtual intptr_t SizeOfObjects() {
-    return objects_size_;
-  }
-
-  intptr_t MaximumCommittedMemory() {
-    return maximum_committed_;
-  }
-
-  intptr_t CommittedMemory() {
-    return Size();
-  }
-
-  // Approximate amount of physical memory committed for this space.
-  size_t CommittedPhysicalMemory();
-
-  int PageCount() {
-    return page_count_;
-  }
-
-  // Finds an object for a given address, returns a Smi if it is not found.
-  // The function iterates through all objects in this space, may be slow.
-  Object* FindObject(Address a);
-
-  // Finds a large object page containing the given address, returns NULL
-  // if such a page doesn't exist.
-  LargePage* FindPage(Address a);
-
-  // Frees unmarked objects.
-  void FreeUnmarkedObjects();
-
-  // Checks whether a heap object is in this space; O(1).
-  bool Contains(HeapObject* obj);
-
-  // Checks whether the space is empty.
-  bool IsEmpty() { return first_page_ == NULL; }
-
-  LargePage* first_page() { return first_page_; }
-
-#ifdef VERIFY_HEAP
-  virtual void Verify();
-#endif
-
-#ifdef DEBUG
-  virtual void Print();
-  void ReportStatistics();
-  void CollectCodeStatistics();
-#endif
-  // Checks whether an address is in the object area in this space.  It
-  // iterates all objects in the space. May be slow.
-  bool SlowContains(Address addr) { return FindObject(addr)->IsHeapObject(); }
-
- private:
-  intptr_t max_capacity_;
-  intptr_t maximum_committed_;
-  // The head of the linked list of large object chunks.
-  LargePage* first_page_;
-  intptr_t size_;  // allocated bytes
-  int page_count_;  // number of chunks
-  intptr_t objects_size_;  // size of objects
-  // Map MemoryChunk::kAlignment-aligned chunks to large pages covering them
-  HashMap chunk_map_;
-
-  friend class LargeObjectIterator;
-
- public:
-  TRACK_MEMORY("LargeObjectSpace")
-};
-
-
-class LargeObjectIterator: public ObjectIterator {
- public:
-  explicit LargeObjectIterator(LargeObjectSpace* space);
-  LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func);
-
-  HeapObject* Next();
-
-  // implementation of ObjectIterator.
-  virtual HeapObject* next_object() { return Next(); }
-
- private:
-  LargePage* current_;
-  HeapObjectCallback size_func_;
-};
-
-
-// Iterates over the chunks (pages and large object pages) that can contain
-// pointers to new space.
-class PointerChunkIterator BASE_EMBEDDED {
- public:
-  inline explicit PointerChunkIterator(Heap* heap);
-
-  // Return NULL when the iterator is done.
-  MemoryChunk* next() {
-    switch (state_) {
-      case kOldPointerState: {
-        if (old_pointer_iterator_.has_next()) {
-          return old_pointer_iterator_.next();
-        }
-        state_ = kMapState;
-        // Fall through.
-      }
-      case kMapState: {
-        if (map_iterator_.has_next()) {
-          return map_iterator_.next();
-        }
-        state_ = kLargeObjectState;
-        // Fall through.
-      }
-      case kLargeObjectState: {
-        HeapObject* heap_object;
-        do {
-          heap_object = lo_iterator_.Next();
-          if (heap_object == NULL) {
-            state_ = kFinishedState;
-            return NULL;
-          }
-          // Fixed arrays are the only pointer-containing objects in large
-          // object space.
-        } while (!heap_object->IsFixedArray());
-        MemoryChunk* answer = MemoryChunk::FromAddress(heap_object->address());
-        return answer;
-      }
-      case kFinishedState:
-        return NULL;
-      default:
-        break;
-    }
-    UNREACHABLE();
-    return NULL;
-  }
-
-
- private:
-  enum State {
-    kOldPointerState,
-    kMapState,
-    kLargeObjectState,
-    kFinishedState
-  };
-  State state_;
-  PageIterator old_pointer_iterator_;
-  PageIterator map_iterator_;
-  LargeObjectIterator lo_iterator_;
-};
-
-
-#ifdef DEBUG
-struct CommentStatistic {
-  const char* comment;
-  int size;
-  int count;
-  void Clear() {
-    comment = NULL;
-    size = 0;
-    count = 0;
-  }
-  // Must be small, since an iteration is used for lookup.
-  static const int kMaxComments = 64;
-};
-#endif
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_SPACES_H_
diff --git a/src/splay-tree.h b/src/splay-tree.h
index 5448fcd..30e5d67 100644
--- a/src/splay-tree.h
+++ b/src/splay-tree.h
@@ -35,8 +35,8 @@
 
   class Locator;
 
-  SplayTree(AllocationPolicy allocator = AllocationPolicy())
-      : root_(NULL), allocator_(allocator) { }
+  explicit SplayTree(AllocationPolicy allocator = AllocationPolicy())
+      : root_(NULL), allocator_(allocator) {}
   ~SplayTree();
 
   INLINE(void* operator new(size_t size,
diff --git a/src/store-buffer-inl.h b/src/store-buffer-inl.h
deleted file mode 100644
index fdfe37d..0000000
--- a/src/store-buffer-inl.h
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_STORE_BUFFER_INL_H_
-#define V8_STORE_BUFFER_INL_H_
-
-#include "src/store-buffer.h"
-
-namespace v8 {
-namespace internal {
-
-Address StoreBuffer::TopAddress() {
-  return reinterpret_cast<Address>(heap_->store_buffer_top_address());
-}
-
-
-void StoreBuffer::Mark(Address addr) {
-  ASSERT(!heap_->cell_space()->Contains(addr));
-  ASSERT(!heap_->code_space()->Contains(addr));
-  ASSERT(!heap_->old_data_space()->Contains(addr));
-  Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
-  *top++ = addr;
-  heap_->public_set_store_buffer_top(top);
-  if ((reinterpret_cast<uintptr_t>(top) & kStoreBufferOverflowBit) != 0) {
-    ASSERT(top == limit_);
-    Compact();
-  } else {
-    ASSERT(top < limit_);
-  }
-}
-
-
-void StoreBuffer::EnterDirectlyIntoStoreBuffer(Address addr) {
-  if (store_buffer_rebuilding_enabled_) {
-    SLOW_ASSERT(!heap_->cell_space()->Contains(addr) &&
-                !heap_->code_space()->Contains(addr) &&
-                !heap_->old_data_space()->Contains(addr) &&
-                !heap_->new_space()->Contains(addr));
-    Address* top = old_top_;
-    *top++ = addr;
-    old_top_ = top;
-    old_buffer_is_sorted_ = false;
-    old_buffer_is_filtered_ = false;
-    if (top >= old_limit_) {
-      ASSERT(callback_ != NULL);
-      (*callback_)(heap_,
-                   MemoryChunk::FromAnyPointerAddress(heap_, addr),
-                   kStoreBufferFullEvent);
-    }
-  }
-}
-
-
-void StoreBuffer::ClearDeadObject(HeapObject* object) {
-  Address& map_field = Memory::Address_at(object->address());
-  if (heap_->map_space()->Contains(map_field)) {
-    map_field = NULL;
-  }
-}
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_STORE_BUFFER_INL_H_
diff --git a/src/store-buffer.cc b/src/store-buffer.cc
deleted file mode 100644
index 5ec3e54..0000000
--- a/src/store-buffer.cc
+++ /dev/null
@@ -1,620 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/store-buffer.h"
-
-#include <algorithm>
-
-#include "src/v8.h"
-
-#include "src/base/atomicops.h"
-#include "src/counters.h"
-#include "src/store-buffer-inl.h"
-
-namespace v8 {
-namespace internal {
-
-StoreBuffer::StoreBuffer(Heap* heap)
-    : heap_(heap),
-      start_(NULL),
-      limit_(NULL),
-      old_start_(NULL),
-      old_limit_(NULL),
-      old_top_(NULL),
-      old_reserved_limit_(NULL),
-      old_buffer_is_sorted_(false),
-      old_buffer_is_filtered_(false),
-      during_gc_(false),
-      store_buffer_rebuilding_enabled_(false),
-      callback_(NULL),
-      may_move_store_buffer_entries_(true),
-      virtual_memory_(NULL),
-      hash_set_1_(NULL),
-      hash_set_2_(NULL),
-      hash_sets_are_empty_(true) {
-}
-
-
-void StoreBuffer::SetUp() {
-  virtual_memory_ = new VirtualMemory(kStoreBufferSize * 3);
-  uintptr_t start_as_int =
-      reinterpret_cast<uintptr_t>(virtual_memory_->address());
-  start_ =
-      reinterpret_cast<Address*>(RoundUp(start_as_int, kStoreBufferSize * 2));
-  limit_ = start_ + (kStoreBufferSize / kPointerSize);
-
-  old_virtual_memory_ =
-      new VirtualMemory(kOldStoreBufferLength * kPointerSize);
-  old_top_ = old_start_ =
-      reinterpret_cast<Address*>(old_virtual_memory_->address());
-  // Don't know the alignment requirements of the OS, but it is certainly not
-  // less than 0xfff.
-  ASSERT((reinterpret_cast<uintptr_t>(old_start_) & 0xfff) == 0);
-  int initial_length = static_cast<int>(OS::CommitPageSize() / kPointerSize);
-  ASSERT(initial_length > 0);
-  ASSERT(initial_length <= kOldStoreBufferLength);
-  old_limit_ = old_start_ + initial_length;
-  old_reserved_limit_ = old_start_ + kOldStoreBufferLength;
-
-  CHECK(old_virtual_memory_->Commit(
-            reinterpret_cast<void*>(old_start_),
-            (old_limit_ - old_start_) * kPointerSize,
-            false));
-
-  ASSERT(reinterpret_cast<Address>(start_) >= virtual_memory_->address());
-  ASSERT(reinterpret_cast<Address>(limit_) >= virtual_memory_->address());
-  Address* vm_limit = reinterpret_cast<Address*>(
-      reinterpret_cast<char*>(virtual_memory_->address()) +
-          virtual_memory_->size());
-  ASSERT(start_ <= vm_limit);
-  ASSERT(limit_ <= vm_limit);
-  USE(vm_limit);
-  ASSERT((reinterpret_cast<uintptr_t>(limit_) & kStoreBufferOverflowBit) != 0);
-  ASSERT((reinterpret_cast<uintptr_t>(limit_ - 1) & kStoreBufferOverflowBit) ==
-         0);
-
-  CHECK(virtual_memory_->Commit(reinterpret_cast<Address>(start_),
-                                kStoreBufferSize,
-                                false));  // Not executable.
-  heap_->public_set_store_buffer_top(start_);
-
-  hash_set_1_ = new uintptr_t[kHashSetLength];
-  hash_set_2_ = new uintptr_t[kHashSetLength];
-  hash_sets_are_empty_ = false;
-
-  ClearFilteringHashSets();
-}
-
-
-void StoreBuffer::TearDown() {
-  delete virtual_memory_;
-  delete old_virtual_memory_;
-  delete[] hash_set_1_;
-  delete[] hash_set_2_;
-  old_start_ = old_top_ = old_limit_ = old_reserved_limit_ = NULL;
-  start_ = limit_ = NULL;
-  heap_->public_set_store_buffer_top(start_);
-}
-
-
-void StoreBuffer::StoreBufferOverflow(Isolate* isolate) {
-  isolate->heap()->store_buffer()->Compact();
-  isolate->counters()->store_buffer_overflows()->Increment();
-}
-
-
-void StoreBuffer::Uniq() {
-  // Remove adjacent duplicates and cells that do not point at new space.
-  Address previous = NULL;
-  Address* write = old_start_;
-  ASSERT(may_move_store_buffer_entries_);
-  for (Address* read = old_start_; read < old_top_; read++) {
-    Address current = *read;
-    if (current != previous) {
-      if (heap_->InNewSpace(*reinterpret_cast<Object**>(current))) {
-        *write++ = current;
-      }
-    }
-    previous = current;
-  }
-  old_top_ = write;
-}
-
-
-bool StoreBuffer::SpaceAvailable(intptr_t space_needed) {
-  return old_limit_ - old_top_ >= space_needed;
-}
-
-
-void StoreBuffer::EnsureSpace(intptr_t space_needed) {
-  while (old_limit_ - old_top_ < space_needed &&
-         old_limit_ < old_reserved_limit_) {
-    size_t grow = old_limit_ - old_start_;  // Double size.
-    CHECK(old_virtual_memory_->Commit(reinterpret_cast<void*>(old_limit_),
-                                      grow * kPointerSize,
-                                      false));
-    old_limit_ += grow;
-  }
-
-  if (SpaceAvailable(space_needed)) return;
-
-  if (old_buffer_is_filtered_) return;
-  ASSERT(may_move_store_buffer_entries_);
-  Compact();
-
-  old_buffer_is_filtered_ = true;
-  bool page_has_scan_on_scavenge_flag = false;
-
-  PointerChunkIterator it(heap_);
-  MemoryChunk* chunk;
-  while ((chunk = it.next()) != NULL) {
-    if (chunk->scan_on_scavenge()) {
-      page_has_scan_on_scavenge_flag = true;
-      break;
-    }
-  }
-
-  if (page_has_scan_on_scavenge_flag) {
-    Filter(MemoryChunk::SCAN_ON_SCAVENGE);
-  }
-
-  if (SpaceAvailable(space_needed)) return;
-
-  // Sample 1 entry in 97 and filter out the pages where we estimate that more
-  // than 1 in 8 pointers are to new space.
-  static const int kSampleFinenesses = 5;
-  static const struct Samples {
-    int prime_sample_step;
-    int threshold;
-  } samples[kSampleFinenesses] =  {
-    { 97, ((Page::kPageSize / kPointerSize) / 97) / 8 },
-    { 23, ((Page::kPageSize / kPointerSize) / 23) / 16 },
-    { 7, ((Page::kPageSize / kPointerSize) / 7) / 32 },
-    { 3, ((Page::kPageSize / kPointerSize) / 3) / 256 },
-    { 1, 0}
-  };
-  for (int i = 0; i < kSampleFinenesses; i++) {
-    ExemptPopularPages(samples[i].prime_sample_step, samples[i].threshold);
-    // As a last resort we mark all pages as being exempt from the store buffer.
-    ASSERT(i != (kSampleFinenesses - 1) || old_top_ == old_start_);
-    if (SpaceAvailable(space_needed)) return;
-  }
-  UNREACHABLE();
-}
-
-
-// Sample the store buffer to see if some pages are taking up a lot of space
-// in the store buffer.
-void StoreBuffer::ExemptPopularPages(int prime_sample_step, int threshold) {
-  PointerChunkIterator it(heap_);
-  MemoryChunk* chunk;
-  while ((chunk = it.next()) != NULL) {
-    chunk->set_store_buffer_counter(0);
-  }
-  bool created_new_scan_on_scavenge_pages = false;
-  MemoryChunk* previous_chunk = NULL;
-  for (Address* p = old_start_; p < old_top_; p += prime_sample_step) {
-    Address addr = *p;
-    MemoryChunk* containing_chunk = NULL;
-    if (previous_chunk != NULL && previous_chunk->Contains(addr)) {
-      containing_chunk = previous_chunk;
-    } else {
-      containing_chunk = MemoryChunk::FromAnyPointerAddress(heap_, addr);
-    }
-    int old_counter = containing_chunk->store_buffer_counter();
-    if (old_counter >= threshold) {
-      containing_chunk->set_scan_on_scavenge(true);
-      created_new_scan_on_scavenge_pages = true;
-    }
-    containing_chunk->set_store_buffer_counter(old_counter + 1);
-    previous_chunk = containing_chunk;
-  }
-  if (created_new_scan_on_scavenge_pages) {
-    Filter(MemoryChunk::SCAN_ON_SCAVENGE);
-  }
-  old_buffer_is_filtered_ = true;
-}
-
-
-void StoreBuffer::Filter(int flag) {
-  Address* new_top = old_start_;
-  MemoryChunk* previous_chunk = NULL;
-  for (Address* p = old_start_; p < old_top_; p++) {
-    Address addr = *p;
-    MemoryChunk* containing_chunk = NULL;
-    if (previous_chunk != NULL && previous_chunk->Contains(addr)) {
-      containing_chunk = previous_chunk;
-    } else {
-      containing_chunk = MemoryChunk::FromAnyPointerAddress(heap_, addr);
-      previous_chunk = containing_chunk;
-    }
-    if (!containing_chunk->IsFlagSet(flag)) {
-      *new_top++ = addr;
-    }
-  }
-  old_top_ = new_top;
-
-  // Filtering hash sets are inconsistent with the store buffer after this
-  // operation.
-  ClearFilteringHashSets();
-}
-
-
-void StoreBuffer::SortUniq() {
-  Compact();
-  if (old_buffer_is_sorted_) return;
-  std::sort(old_start_, old_top_);
-  Uniq();
-
-  old_buffer_is_sorted_ = true;
-
-  // Filtering hash sets are inconsistent with the store buffer after this
-  // operation.
-  ClearFilteringHashSets();
-}
-
-
-bool StoreBuffer::PrepareForIteration() {
-  Compact();
-  PointerChunkIterator it(heap_);
-  MemoryChunk* chunk;
-  bool page_has_scan_on_scavenge_flag = false;
-  while ((chunk = it.next()) != NULL) {
-    if (chunk->scan_on_scavenge()) {
-      page_has_scan_on_scavenge_flag = true;
-      break;
-    }
-  }
-
-  if (page_has_scan_on_scavenge_flag) {
-    Filter(MemoryChunk::SCAN_ON_SCAVENGE);
-  }
-
-  // Filtering hash sets are inconsistent with the store buffer after
-  // iteration.
-  ClearFilteringHashSets();
-
-  return page_has_scan_on_scavenge_flag;
-}
-
-
-#ifdef DEBUG
-void StoreBuffer::Clean() {
-  ClearFilteringHashSets();
-  Uniq();  // Also removes things that no longer point to new space.
-  EnsureSpace(kStoreBufferSize / 2);
-}
-
-
-static Address* in_store_buffer_1_element_cache = NULL;
-
-
-bool StoreBuffer::CellIsInStoreBuffer(Address cell_address) {
-  if (!FLAG_enable_slow_asserts) return true;
-  if (in_store_buffer_1_element_cache != NULL &&
-      *in_store_buffer_1_element_cache == cell_address) {
-    return true;
-  }
-  Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
-  for (Address* current = top - 1; current >= start_; current--) {
-    if (*current == cell_address) {
-      in_store_buffer_1_element_cache = current;
-      return true;
-    }
-  }
-  for (Address* current = old_top_ - 1; current >= old_start_; current--) {
-    if (*current == cell_address) {
-      in_store_buffer_1_element_cache = current;
-      return true;
-    }
-  }
-  return false;
-}
-#endif
-
-
-void StoreBuffer::ClearFilteringHashSets() {
-  if (!hash_sets_are_empty_) {
-    memset(reinterpret_cast<void*>(hash_set_1_),
-           0,
-           sizeof(uintptr_t) * kHashSetLength);
-    memset(reinterpret_cast<void*>(hash_set_2_),
-           0,
-           sizeof(uintptr_t) * kHashSetLength);
-    hash_sets_are_empty_ = true;
-  }
-}
-
-
-void StoreBuffer::GCPrologue() {
-  ClearFilteringHashSets();
-  during_gc_ = true;
-}
-
-
-#ifdef VERIFY_HEAP
-void StoreBuffer::VerifyPointers(LargeObjectSpace* space) {
-  LargeObjectIterator it(space);
-  for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
-    if (object->IsFixedArray()) {
-      Address slot_address = object->address();
-      Address end = object->address() + object->Size();
-
-      while (slot_address < end) {
-        HeapObject** slot = reinterpret_cast<HeapObject**>(slot_address);
-        // When we are not in GC the Heap::InNewSpace() predicate
-        // checks that pointers which satisfy predicate point into
-        // the active semispace.
-        Object* object = reinterpret_cast<Object*>(
-            base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
-        heap_->InNewSpace(object);
-        slot_address += kPointerSize;
-      }
-    }
-  }
-}
-#endif
-
-
-void StoreBuffer::Verify() {
-#ifdef VERIFY_HEAP
-  VerifyPointers(heap_->lo_space());
-#endif
-}
-
-
-void StoreBuffer::GCEpilogue() {
-  during_gc_ = false;
-#ifdef VERIFY_HEAP
-  if (FLAG_verify_heap) {
-    Verify();
-  }
-#endif
-}
-
-
-void StoreBuffer::FindPointersToNewSpaceInRegion(
-    Address start,
-    Address end,
-    ObjectSlotCallback slot_callback,
-    bool clear_maps) {
-  for (Address slot_address = start;
-       slot_address < end;
-       slot_address += kPointerSize) {
-    Object** slot = reinterpret_cast<Object**>(slot_address);
-    Object* object = reinterpret_cast<Object*>(
-        base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
-    if (heap_->InNewSpace(object)) {
-      HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
-      ASSERT(heap_object->IsHeapObject());
-      // The new space object was not promoted if it still contains a map
-      // pointer. Clear the map field now lazily.
-      if (clear_maps) ClearDeadObject(heap_object);
-      slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object);
-      object = reinterpret_cast<Object*>(
-          base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
-      if (heap_->InNewSpace(object)) {
-        EnterDirectlyIntoStoreBuffer(slot_address);
-      }
-    }
-  }
-}
-
-
-// Compute start address of the first map following given addr.
-static inline Address MapStartAlign(Address addr) {
-  Address page = Page::FromAddress(addr)->area_start();
-  return page + (((addr - page) + (Map::kSize - 1)) / Map::kSize * Map::kSize);
-}
-
-
-// Compute end address of the first map preceding given addr.
-static inline Address MapEndAlign(Address addr) {
-  Address page = Page::FromAllocationTop(addr)->area_start();
-  return page + ((addr - page) / Map::kSize * Map::kSize);
-}
-
-
-void StoreBuffer::FindPointersToNewSpaceInMaps(
-    Address start,
-    Address end,
-    ObjectSlotCallback slot_callback,
-    bool clear_maps) {
-  ASSERT(MapStartAlign(start) == start);
-  ASSERT(MapEndAlign(end) == end);
-
-  Address map_address = start;
-  while (map_address < end) {
-    ASSERT(!heap_->InNewSpace(Memory::Object_at(map_address)));
-    ASSERT(Memory::Object_at(map_address)->IsMap());
-
-    Address pointer_fields_start = map_address + Map::kPointerFieldsBeginOffset;
-    Address pointer_fields_end = map_address + Map::kPointerFieldsEndOffset;
-
-    FindPointersToNewSpaceInRegion(pointer_fields_start,
-                                   pointer_fields_end,
-                                   slot_callback,
-                                   clear_maps);
-    map_address += Map::kSize;
-  }
-}
-
-
-void StoreBuffer::FindPointersToNewSpaceInMapsRegion(
-    Address start,
-    Address end,
-    ObjectSlotCallback slot_callback,
-    bool clear_maps) {
-  Address map_aligned_start = MapStartAlign(start);
-  Address map_aligned_end   = MapEndAlign(end);
-
-  ASSERT(map_aligned_start == start);
-  ASSERT(map_aligned_start <= map_aligned_end && map_aligned_end <= end);
-
-  FindPointersToNewSpaceInMaps(map_aligned_start,
-                               map_aligned_end,
-                               slot_callback,
-                               clear_maps);
-}
-
-
-void StoreBuffer::IteratePointersInStoreBuffer(
-    ObjectSlotCallback slot_callback,
-    bool clear_maps) {
-  Address* limit = old_top_;
-  old_top_ = old_start_;
-  {
-    DontMoveStoreBufferEntriesScope scope(this);
-    for (Address* current = old_start_; current < limit; current++) {
-#ifdef DEBUG
-      Address* saved_top = old_top_;
-#endif
-      Object** slot = reinterpret_cast<Object**>(*current);
-      Object* object = reinterpret_cast<Object*>(
-          base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
-      if (heap_->InFromSpace(object)) {
-        HeapObject* heap_object = reinterpret_cast<HeapObject*>(object);
-        // The new space object was not promoted if it still contains a map
-        // pointer. Clear the map field now lazily.
-        if (clear_maps) ClearDeadObject(heap_object);
-        slot_callback(reinterpret_cast<HeapObject**>(slot), heap_object);
-        object = reinterpret_cast<Object*>(
-            base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
-        if (heap_->InNewSpace(object)) {
-          EnterDirectlyIntoStoreBuffer(reinterpret_cast<Address>(slot));
-        }
-      }
-      ASSERT(old_top_ == saved_top + 1 || old_top_ == saved_top);
-    }
-  }
-}
-
-
-void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback) {
-  IteratePointersToNewSpace(slot_callback, false);
-}
-
-
-void StoreBuffer::IteratePointersToNewSpaceAndClearMaps(
-    ObjectSlotCallback slot_callback) {
-  IteratePointersToNewSpace(slot_callback, true);
-}
-
-
-void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback,
-                                            bool clear_maps) {
-  // We do not sort or remove duplicated entries from the store buffer because
-  // we expect that callback will rebuild the store buffer thus removing
-  // all duplicates and pointers to old space.
-  bool some_pages_to_scan = PrepareForIteration();
-
-  // TODO(gc): we want to skip slots on evacuation candidates
-  // but we can't simply figure that out from slot address
-  // because slot can belong to a large object.
-  IteratePointersInStoreBuffer(slot_callback, clear_maps);
-
-  // We are done scanning all the pointers that were in the store buffer, but
-  // there may be some pages marked scan_on_scavenge that have pointers to new
-  // space that are not in the store buffer.  We must scan them now.  As we
-  // scan, the surviving pointers to new space will be added to the store
-  // buffer.  If there are still a lot of pointers to new space then we will
-  // keep the scan_on_scavenge flag on the page and discard the pointers that
-  // were added to the store buffer.  If there are not many pointers to new
-  // space left on the page we will keep the pointers in the store buffer and
-  // remove the flag from the page.
-  if (some_pages_to_scan) {
-    if (callback_ != NULL) {
-      (*callback_)(heap_, NULL, kStoreBufferStartScanningPagesEvent);
-    }
-    PointerChunkIterator it(heap_);
-    MemoryChunk* chunk;
-    while ((chunk = it.next()) != NULL) {
-      if (chunk->scan_on_scavenge()) {
-        chunk->set_scan_on_scavenge(false);
-        if (callback_ != NULL) {
-          (*callback_)(heap_, chunk, kStoreBufferScanningPageEvent);
-        }
-        if (chunk->owner() == heap_->lo_space()) {
-          LargePage* large_page = reinterpret_cast<LargePage*>(chunk);
-          HeapObject* array = large_page->GetObject();
-          ASSERT(array->IsFixedArray());
-          Address start = array->address();
-          Address end = start + array->Size();
-          FindPointersToNewSpaceInRegion(start, end, slot_callback, clear_maps);
-        } else {
-          Page* page = reinterpret_cast<Page*>(chunk);
-          PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
-          Address start = page->area_start();
-          Address end = page->area_end();
-          if (owner == heap_->map_space()) {
-            FindPointersToNewSpaceInMapsRegion(
-                start, end, slot_callback, clear_maps);
-          } else {
-            FindPointersToNewSpaceInRegion(
-                start, end, slot_callback, clear_maps);
-          }
-        }
-      }
-    }
-    if (callback_ != NULL) {
-      (*callback_)(heap_, NULL, kStoreBufferScanningPageEvent);
-    }
-  }
-}
-
-
-void StoreBuffer::Compact() {
-  Address* top = reinterpret_cast<Address*>(heap_->store_buffer_top());
-
-  if (top == start_) return;
-
-  // There's no check of the limit in the loop below so we check here for
-  // the worst case (compaction doesn't eliminate any pointers).
-  ASSERT(top <= limit_);
-  heap_->public_set_store_buffer_top(start_);
-  EnsureSpace(top - start_);
-  ASSERT(may_move_store_buffer_entries_);
-  // Goes through the addresses in the store buffer attempting to remove
-  // duplicates.  In the interest of speed this is a lossy operation.  Some
-  // duplicates will remain.  We have two hash sets with different hash
-  // functions to reduce the number of unnecessary clashes.
-  hash_sets_are_empty_ = false;  // Hash sets are in use.
-  for (Address* current = start_; current < top; current++) {
-    ASSERT(!heap_->cell_space()->Contains(*current));
-    ASSERT(!heap_->code_space()->Contains(*current));
-    ASSERT(!heap_->old_data_space()->Contains(*current));
-    uintptr_t int_addr = reinterpret_cast<uintptr_t>(*current);
-    // Shift out the last bits including any tags.
-    int_addr >>= kPointerSizeLog2;
-    // The upper part of an address is basically random because of ASLR and OS
-    // non-determinism, so we use only the bits within a page for hashing to
-    // make v8's behavior (more) deterministic.
-    uintptr_t hash_addr =
-        int_addr & (Page::kPageAlignmentMask >> kPointerSizeLog2);
-    int hash1 = ((hash_addr ^ (hash_addr >> kHashSetLengthLog2)) &
-                 (kHashSetLength - 1));
-    if (hash_set_1_[hash1] == int_addr) continue;
-    uintptr_t hash2 = (hash_addr - (hash_addr >> kHashSetLengthLog2));
-    hash2 ^= hash2 >> (kHashSetLengthLog2 * 2);
-    hash2 &= (kHashSetLength - 1);
-    if (hash_set_2_[hash2] == int_addr) continue;
-    if (hash_set_1_[hash1] == 0) {
-      hash_set_1_[hash1] = int_addr;
-    } else if (hash_set_2_[hash2] == 0) {
-      hash_set_2_[hash2] = int_addr;
-    } else {
-      // Rather than slowing down we just throw away some entries.  This will
-      // cause some duplicates to remain undetected.
-      hash_set_1_[hash1] = int_addr;
-      hash_set_2_[hash2] = 0;
-    }
-    old_buffer_is_sorted_ = false;
-    old_buffer_is_filtered_ = false;
-    *old_top_++ = reinterpret_cast<Address>(int_addr << kPointerSizeLog2);
-    ASSERT(old_top_ <= old_limit_);
-  }
-  heap_->isolate()->counters()->store_buffer_compactions()->Increment();
-}
-
-} }  // namespace v8::internal
diff --git a/src/store-buffer.h b/src/store-buffer.h
deleted file mode 100644
index d6de2aa..0000000
--- a/src/store-buffer.h
+++ /dev/null
@@ -1,238 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_STORE_BUFFER_H_
-#define V8_STORE_BUFFER_H_
-
-#include "src/allocation.h"
-#include "src/checks.h"
-#include "src/globals.h"
-#include "src/platform.h"
-
-namespace v8 {
-namespace internal {
-
-class Page;
-class PagedSpace;
-class StoreBuffer;
-
-typedef void (*ObjectSlotCallback)(HeapObject** from, HeapObject* to);
-
-typedef void (StoreBuffer::*RegionCallback)(Address start,
-                                            Address end,
-                                            ObjectSlotCallback slot_callback,
-                                            bool clear_maps);
-
-// Used to implement the write barrier by collecting addresses of pointers
-// between spaces.
-class StoreBuffer {
- public:
-  explicit StoreBuffer(Heap* heap);
-
-  static void StoreBufferOverflow(Isolate* isolate);
-
-  inline Address TopAddress();
-
-  void SetUp();
-  void TearDown();
-
-  // This is used by the mutator to enter addresses into the store buffer.
-  inline void Mark(Address addr);
-
-  // This is used by the heap traversal to enter the addresses into the store
-  // buffer that should still be in the store buffer after GC.  It enters
-  // addresses directly into the old buffer because the GC starts by wiping the
-  // old buffer and thereafter only visits each cell once so there is no need
-  // to attempt to remove any dupes.  During the first part of a GC we
-  // are using the store buffer to access the old spaces and at the same time
-  // we are rebuilding the store buffer using this function.  There is, however
-  // no issue of overwriting the buffer we are iterating over, because this
-  // stage of the scavenge can only reduce the number of addresses in the store
-  // buffer (some objects are promoted so pointers to them do not need to be in
-  // the store buffer).  The later parts of the GC scan the pages that are
-  // exempt from the store buffer and process the promotion queue.  These steps
-  // can overflow this buffer.  We check for this and on overflow we call the
-  // callback set up with the StoreBufferRebuildScope object.
-  inline void EnterDirectlyIntoStoreBuffer(Address addr);
-
-  // Iterates over all pointers that go from old space to new space.  It will
-  // delete the store buffer as it starts so the callback should reenter
-  // surviving old-to-new pointers into the store buffer to rebuild it.
-  void IteratePointersToNewSpace(ObjectSlotCallback callback);
-
-  // Same as IteratePointersToNewSpace but additonally clears maps in objects
-  // referenced from the store buffer that do not contain a forwarding pointer.
-  void IteratePointersToNewSpaceAndClearMaps(ObjectSlotCallback callback);
-
-  static const int kStoreBufferOverflowBit = 1 << (14 + kPointerSizeLog2);
-  static const int kStoreBufferSize = kStoreBufferOverflowBit;
-  static const int kStoreBufferLength = kStoreBufferSize / sizeof(Address);
-  static const int kOldStoreBufferLength = kStoreBufferLength * 16;
-  static const int kHashSetLengthLog2 = 12;
-  static const int kHashSetLength = 1 << kHashSetLengthLog2;
-
-  void Compact();
-
-  void GCPrologue();
-  void GCEpilogue();
-
-  Object*** Limit() { return reinterpret_cast<Object***>(old_limit_); }
-  Object*** Start() { return reinterpret_cast<Object***>(old_start_); }
-  Object*** Top() { return reinterpret_cast<Object***>(old_top_); }
-  void SetTop(Object*** top) {
-    ASSERT(top >= Start());
-    ASSERT(top <= Limit());
-    old_top_ = reinterpret_cast<Address*>(top);
-  }
-
-  bool old_buffer_is_sorted() { return old_buffer_is_sorted_; }
-  bool old_buffer_is_filtered() { return old_buffer_is_filtered_; }
-
-  // Goes through the store buffer removing pointers to things that have
-  // been promoted.  Rebuilds the store buffer completely if it overflowed.
-  void SortUniq();
-
-  void EnsureSpace(intptr_t space_needed);
-  void Verify();
-
-  bool PrepareForIteration();
-
-#ifdef DEBUG
-  void Clean();
-  // Slow, for asserts only.
-  bool CellIsInStoreBuffer(Address cell);
-#endif
-
-  void Filter(int flag);
-
- private:
-  Heap* heap_;
-
-  // The store buffer is divided up into a new buffer that is constantly being
-  // filled by mutator activity and an old buffer that is filled with the data
-  // from the new buffer after compression.
-  Address* start_;
-  Address* limit_;
-
-  Address* old_start_;
-  Address* old_limit_;
-  Address* old_top_;
-  Address* old_reserved_limit_;
-  VirtualMemory* old_virtual_memory_;
-
-  bool old_buffer_is_sorted_;
-  bool old_buffer_is_filtered_;
-  bool during_gc_;
-  // The garbage collector iterates over many pointers to new space that are not
-  // handled by the store buffer.  This flag indicates whether the pointers
-  // found by the callbacks should be added to the store buffer or not.
-  bool store_buffer_rebuilding_enabled_;
-  StoreBufferCallback callback_;
-  bool may_move_store_buffer_entries_;
-
-  VirtualMemory* virtual_memory_;
-
-  // Two hash sets used for filtering.
-  // If address is in the hash set then it is guaranteed to be in the
-  // old part of the store buffer.
-  uintptr_t* hash_set_1_;
-  uintptr_t* hash_set_2_;
-  bool hash_sets_are_empty_;
-
-  void ClearFilteringHashSets();
-
-  bool SpaceAvailable(intptr_t space_needed);
-  void Uniq();
-  void ExemptPopularPages(int prime_sample_step, int threshold);
-
-  // Set the map field of the object to NULL if contains a map.
-  inline void ClearDeadObject(HeapObject *object);
-
-  void IteratePointersToNewSpace(ObjectSlotCallback callback, bool clear_maps);
-
-  void FindPointersToNewSpaceInRegion(Address start,
-                                      Address end,
-                                      ObjectSlotCallback slot_callback,
-                                      bool clear_maps);
-
-  // For each region of pointers on a page in use from an old space call
-  // visit_pointer_region callback.
-  // If either visit_pointer_region or callback can cause an allocation
-  // in old space and changes in allocation watermark then
-  // can_preallocate_during_iteration should be set to true.
-  void IteratePointersOnPage(
-      PagedSpace* space,
-      Page* page,
-      RegionCallback region_callback,
-      ObjectSlotCallback slot_callback);
-
-  void FindPointersToNewSpaceInMaps(
-    Address start,
-    Address end,
-    ObjectSlotCallback slot_callback,
-    bool clear_maps);
-
-  void FindPointersToNewSpaceInMapsRegion(
-    Address start,
-    Address end,
-    ObjectSlotCallback slot_callback,
-    bool clear_maps);
-
-  void IteratePointersInStoreBuffer(ObjectSlotCallback slot_callback,
-                                    bool clear_maps);
-
-#ifdef VERIFY_HEAP
-  void VerifyPointers(LargeObjectSpace* space);
-#endif
-
-  friend class StoreBufferRebuildScope;
-  friend class DontMoveStoreBufferEntriesScope;
-};
-
-
-class StoreBufferRebuildScope {
- public:
-  explicit StoreBufferRebuildScope(Heap* heap,
-                                   StoreBuffer* store_buffer,
-                                   StoreBufferCallback callback)
-      : store_buffer_(store_buffer),
-        stored_state_(store_buffer->store_buffer_rebuilding_enabled_),
-        stored_callback_(store_buffer->callback_) {
-    store_buffer_->store_buffer_rebuilding_enabled_ = true;
-    store_buffer_->callback_ = callback;
-    (*callback)(heap, NULL, kStoreBufferStartScanningPagesEvent);
-  }
-
-  ~StoreBufferRebuildScope() {
-    store_buffer_->callback_ = stored_callback_;
-    store_buffer_->store_buffer_rebuilding_enabled_ = stored_state_;
-  }
-
- private:
-  StoreBuffer* store_buffer_;
-  bool stored_state_;
-  StoreBufferCallback stored_callback_;
-};
-
-
-class DontMoveStoreBufferEntriesScope {
- public:
-  explicit DontMoveStoreBufferEntriesScope(StoreBuffer* store_buffer)
-      : store_buffer_(store_buffer),
-        stored_state_(store_buffer->may_move_store_buffer_entries_) {
-    store_buffer_->may_move_store_buffer_entries_ = false;
-  }
-
-  ~DontMoveStoreBufferEntriesScope() {
-    store_buffer_->may_move_store_buffer_entries_ = stored_state_;
-  }
-
- private:
-  StoreBuffer* store_buffer_;
-  bool stored_state_;
-};
-
-} }  // namespace v8::internal
-
-#endif  // V8_STORE_BUFFER_H_
diff --git a/src/string-iterator.js b/src/string-iterator.js
new file mode 100644
index 0000000..cb578e7
--- /dev/null
+++ b/src/string-iterator.js
@@ -0,0 +1,107 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+'use strict';
+
+
+// This file relies on the fact that the following declaration has been made
+// in runtime.js:
+// var $String = global.String;
+
+
+var stringIteratorIteratedStringSymbol =
+    GLOBAL_PRIVATE("StringIterator#iteratedString");
+var stringIteratorNextIndexSymbol = GLOBAL_PRIVATE("StringIterator#next");
+
+
+function StringIterator() {}
+
+
+// 21.1.5.1 CreateStringIterator Abstract Operation
+function CreateStringIterator(string) {
+  var s = TO_STRING_INLINE(string);
+  var iterator = new StringIterator;
+  SET_PRIVATE(iterator, stringIteratorIteratedStringSymbol, s);
+  SET_PRIVATE(iterator, stringIteratorNextIndexSymbol, 0);
+  return iterator;
+}
+
+
+// 21.1.5.2.2 %StringIteratorPrototype%[@@iterator]
+function StringIteratorIterator() {
+  return this;
+}
+
+
+// 21.1.5.2.1 %StringIteratorPrototype%.next( )
+function StringIteratorNext() {
+  var iterator = ToObject(this);
+
+  if (!HAS_DEFINED_PRIVATE(iterator, stringIteratorNextIndexSymbol)) {
+    throw MakeTypeError('incompatible_method_receiver',
+                        ['String Iterator.prototype.next']);
+  }
+
+  var s = GET_PRIVATE(iterator, stringIteratorIteratedStringSymbol);
+  if (IS_UNDEFINED(s)) {
+    return CreateIteratorResultObject(UNDEFINED, true);
+  }
+
+  var position = GET_PRIVATE(iterator, stringIteratorNextIndexSymbol);
+  var length = TO_UINT32(s.length);
+
+  if (position >= length) {
+    SET_PRIVATE(iterator, stringIteratorIteratedStringSymbol,
+                UNDEFINED);
+    return CreateIteratorResultObject(UNDEFINED, true);
+  }
+
+  var first = %_StringCharCodeAt(s, position);
+  var resultString = %_StringCharFromCode(first);
+  position++;
+
+  if (first >= 0xD800 && first <= 0xDBFF && position < length) {
+    var second = %_StringCharCodeAt(s, position);
+    if (second >= 0xDC00 && second <= 0xDFFF) {
+      resultString += %_StringCharFromCode(second);
+      position++;
+    }
+  }
+
+  SET_PRIVATE(iterator, stringIteratorNextIndexSymbol, position);
+
+  return CreateIteratorResultObject(resultString, false);
+}
+
+
+function SetUpStringIterator() {
+  %CheckIsBootstrapping();
+
+  %FunctionSetPrototype(StringIterator, new $Object());
+  %FunctionSetInstanceClassName(StringIterator, 'String Iterator');
+
+  InstallFunctions(StringIterator.prototype, DONT_ENUM, $Array(
+    'next', StringIteratorNext
+  ));
+  %FunctionSetName(StringIteratorIterator, '[Symbol.iterator]');
+  %AddNamedProperty(StringIterator.prototype, symbolIterator,
+                    StringIteratorIterator, DONT_ENUM);
+}
+SetUpStringIterator();
+
+
+// 21.1.3.27 String.prototype [ @@iterator ]( )
+function StringPrototypeIterator() {
+  return CreateStringIterator(this);
+}
+
+
+function ExtendStringPrototypeWithIterator() {
+  %CheckIsBootstrapping();
+
+  %FunctionSetName(StringPrototypeIterator, '[Symbol.iterator]');
+  %AddNamedProperty($String.prototype, symbolIterator,
+                    StringPrototypeIterator, DONT_ENUM);
+}
+ExtendStringPrototypeWithIterator();
diff --git a/src/string-search.cc b/src/string-search.cc
index 1f0eb7e..0c18762 100644
--- a/src/string-search.cc
+++ b/src/string-search.cc
@@ -3,6 +3,7 @@
 // found in the LICENSE file.
 
 #include "src/v8.h"
+
 #include "src/string-search.h"
 
 namespace v8 {
diff --git a/src/string-search.h b/src/string-search.h
index 09bc36e..bf5ffe6 100644
--- a/src/string-search.h
+++ b/src/string-search.h
@@ -30,7 +30,7 @@
   // a potentially less efficient searching, but is a safe approximation.
   // For needles using only characters in the same Unicode 256-code point page,
   // there is no search speed degradation.
-  static const int kAsciiAlphabetSize = 256;
+  static const int kLatin1AlphabetSize = 256;
   static const int kUC16AlphabetSize = Isolate::kUC16AlphabetSize;
 
   // Bad-char shift table stored in the state. It's length is the alphabet size.
@@ -81,10 +81,10 @@
 
   static inline int AlphabetSize() {
     if (sizeof(PatternChar) == 1) {
-      // ASCII needle.
-      return kAsciiAlphabetSize;
+      // Latin1 needle.
+      return kLatin1AlphabetSize;
     } else {
-      ASSERT(sizeof(PatternChar) == 2);
+      DCHECK(sizeof(PatternChar) == 2);
       // UC16 needle.
       return kUC16AlphabetSize;
     }
@@ -196,7 +196,7 @@
     StringSearch<PatternChar, SubjectChar>* search,
     Vector<const SubjectChar> subject,
     int index) {
-  ASSERT_EQ(1, search->pattern_.length());
+  DCHECK_EQ(1, search->pattern_.length());
   PatternChar pattern_first_char = search->pattern_[0];
   int i = index;
   if (sizeof(SubjectChar) == 1 && sizeof(PatternChar) == 1) {
@@ -230,7 +230,7 @@
 inline bool CharCompare(const PatternChar* pattern,
                         const SubjectChar* subject,
                         int length) {
-  ASSERT(length > 0);
+  DCHECK(length > 0);
   int pos = 0;
   do {
     if (pattern[pos] != subject[pos]) {
@@ -249,7 +249,7 @@
     Vector<const SubjectChar> subject,
     int index) {
   Vector<const PatternChar> pattern = search->pattern_;
-  ASSERT(pattern.length() > 1);
+  DCHECK(pattern.length() > 1);
   int pattern_length = pattern.length();
   PatternChar pattern_first_char = pattern[0];
   int i = index;
diff --git a/src/string-stream.cc b/src/string-stream.cc
index 930ce3d..42c2af7 100644
--- a/src/string-stream.cc
+++ b/src/string-stream.cc
@@ -5,6 +5,7 @@
 #include "src/string-stream.h"
 
 #include "src/handles-inl.h"
+#include "src/prototype.h"
 
 namespace v8 {
 namespace internal {
@@ -17,16 +18,9 @@
 }
 
 
-NoAllocationStringAllocator::NoAllocationStringAllocator(char* memory,
-                                                         unsigned size) {
-  size_ = size;
-  space_ = memory;
-}
-
-
 bool StringStream::Put(char c) {
   if (full()) return false;
-  ASSERT(length_ < capacity_);
+  DCHECK(length_ < capacity_);
   // Since the trailing '\0' is not accounted for in length_ fullness is
   // indicated by a difference of 1 between length_ and capacity_. Thus when
   // reaching a difference of 2 we need to grow the buffer.
@@ -38,7 +32,7 @@
       buffer_ = new_buffer;
     } else {
       // Reached the end of the available buffer.
-      ASSERT(capacity_ >= 5);
+      DCHECK(capacity_ >= 5);
       length_ = capacity_ - 1;  // Indicate fullness of the stream.
       buffer_[length_ - 4] = '.';
       buffer_[length_ - 3] = '.';
@@ -96,26 +90,26 @@
     FmtElm current = elms[elm++];
     switch (type) {
     case 's': {
-      ASSERT_EQ(FmtElm::C_STR, current.type_);
+      DCHECK_EQ(FmtElm::C_STR, current.type_);
       const char* value = current.data_.u_c_str_;
       Add(value);
       break;
     }
     case 'w': {
-      ASSERT_EQ(FmtElm::LC_STR, current.type_);
+      DCHECK_EQ(FmtElm::LC_STR, current.type_);
       Vector<const uc16> value = *current.data_.u_lc_str_;
       for (int i = 0; i < value.length(); i++)
         Put(static_cast<char>(value[i]));
       break;
     }
     case 'o': {
-      ASSERT_EQ(FmtElm::OBJ, current.type_);
+      DCHECK_EQ(FmtElm::OBJ, current.type_);
       Object* obj = current.data_.u_obj_;
       PrintObject(obj);
       break;
     }
     case 'k': {
-      ASSERT_EQ(FmtElm::INT, current.type_);
+      DCHECK_EQ(FmtElm::INT, current.type_);
       int value = current.data_.u_int_;
       if (0x20 <= value && value <= 0x7F) {
         Put(value);
@@ -135,9 +129,18 @@
     }
     case 'f': case 'g': case 'G': case 'e': case 'E': {
       double value = current.data_.u_double_;
-      EmbeddedVector<char, 28> formatted;
-      SNPrintF(formatted, temp.start(), value);
-      Add(formatted.start());
+      int inf = std::isinf(value);
+      if (inf == -1) {
+        Add("-inf");
+      } else if (inf == 1) {
+        Add("inf");
+      } else if (std::isnan(value)) {
+        Add("nan");
+      } else {
+        EmbeddedVector<char, 28> formatted;
+        SNPrintF(formatted, temp.start(), value);
+        Add(formatted.start());
+      }
       break;
     }
     case 'p': {
@@ -154,7 +157,7 @@
   }
 
   // Verify that the buffer is 0-terminated
-  ASSERT(buffer_[length_] == '\0');
+  DCHECK(buffer_[length_] == '\0');
 }
 
 
@@ -506,11 +509,11 @@
   Object* name = fun->shared()->name();
   bool print_name = false;
   Isolate* isolate = fun->GetIsolate();
-  for (Object* p = receiver;
-       p != isolate->heap()->null_value();
-       p = p->GetPrototype(isolate)) {
-    if (p->IsJSObject()) {
-      Object* key = JSObject::cast(p)->SlowReverseLookup(fun);
+  for (PrototypeIterator iter(isolate, receiver,
+                              PrototypeIterator::START_AT_RECEIVER);
+       !iter.IsAtEnd(); iter.Advance()) {
+    if (iter.GetCurrent()->IsJSObject()) {
+      Object* key = JSObject::cast(iter.GetCurrent())->SlowReverseLookup(fun);
       if (key != isolate->heap()->undefined_value()) {
         if (!name->IsString() ||
             !key->IsString() ||
@@ -555,12 +558,4 @@
 }
 
 
-// Only grow once to the maximum allowable size.
-char* NoAllocationStringAllocator::grow(unsigned* bytes) {
-  ASSERT(size_ >= *bytes);
-  *bytes = size_;
-  return space_;
-}
-
-
 } }  // namespace v8::internal
diff --git a/src/string-stream.h b/src/string-stream.h
index d72d5c2..fca1d4b 100644
--- a/src/string-stream.h
+++ b/src/string-stream.h
@@ -24,33 +24,18 @@
 
 
 // Normal allocator uses new[] and delete[].
-class HeapStringAllocator V8_FINAL : public StringAllocator {
+class HeapStringAllocator FINAL : public StringAllocator {
  public:
   ~HeapStringAllocator() { DeleteArray(space_); }
-  virtual char* allocate(unsigned bytes) V8_OVERRIDE;
-  virtual char* grow(unsigned* bytes) V8_OVERRIDE;
+  virtual char* allocate(unsigned bytes) OVERRIDE;
+  virtual char* grow(unsigned* bytes) OVERRIDE;
 
  private:
   char* space_;
 };
 
 
-// Allocator for use when no new c++ heap allocation is allowed.
-// Given a preallocated buffer up front and does no allocation while
-// building message.
-class NoAllocationStringAllocator V8_FINAL : public StringAllocator {
- public:
-  NoAllocationStringAllocator(char* memory, unsigned size);
-  virtual char* allocate(unsigned bytes) V8_OVERRIDE { return space_; }
-  virtual char* grow(unsigned* bytes) V8_OVERRIDE;
-
- private:
-  unsigned size_;
-  char* space_;
-};
-
-
-class FmtElm V8_FINAL {
+class FmtElm FINAL {
  public:
   FmtElm(int value) : type_(INT) {  // NOLINT
     data_.u_int_ = value;
@@ -90,7 +75,7 @@
 };
 
 
-class StringStream V8_FINAL {
+class StringStream FINAL {
  public:
   explicit StringStream(StringAllocator* allocator):
     allocator_(allocator),
@@ -168,31 +153,6 @@
   DISALLOW_IMPLICIT_CONSTRUCTORS(StringStream);
 };
 
-
-// Utility class to print a list of items to a stream, divided by a separator.
-class SimpleListPrinter V8_FINAL {
- public:
-  explicit SimpleListPrinter(StringStream* stream, char separator = ',') {
-    separator_ = separator;
-    stream_ = stream;
-    first_ = true;
-  }
-
-  void Add(const char* str) {
-    if (first_) {
-      first_ = false;
-    } else {
-      stream_->Put(separator_);
-    }
-    stream_->Add(str);
-  }
-
- private:
-  bool first_;
-  char separator_;
-  StringStream* stream_;
-};
-
 } }  // namespace v8::internal
 
 #endif  // V8_STRING_STREAM_H_
diff --git a/src/string.js b/src/string.js
index 07cdcc0..ac5cb7f 100644
--- a/src/string.js
+++ b/src/string.js
@@ -9,11 +9,12 @@
 // -------------------------------------------------------------------
 
 function StringConstructor(x) {
-  var value = %_ArgumentsLength() == 0 ? '' : TO_STRING_INLINE(x);
+  if (%_ArgumentsLength() == 0) x = '';
   if (%_IsConstructCall()) {
-    %_SetValueOf(this, value);
+    %_SetValueOf(this, TO_STRING_INLINE(x));
   } else {
-    return value;
+    return IS_SYMBOL(x) ?
+        %_CallFunction(x, SymbolToString) : TO_STRING_INLINE(x);
   }
 }
 
@@ -61,13 +62,13 @@
 
 
 // ECMA-262, section 15.5.4.6
-function StringConcat() {
+function StringConcat(other /* and more */) {  // length == 1
   CHECK_OBJECT_COERCIBLE(this, "String.prototype.concat");
 
   var len = %_ArgumentsLength();
   var this_as_string = TO_STRING_INLINE(this);
   if (len === 1) {
-    return this_as_string + %_Arguments(0);
+    return this_as_string + other;
   }
   var parts = new InternalArray(len + 1);
   parts[0] = this_as_string;
@@ -78,9 +79,6 @@
   return %StringBuilderConcat(parts, len + 1, "");
 }
 
-// Match ES3 and Safari
-%FunctionSetLength(StringConcat, 1);
-
 
 // ECMA-262 section 15.5.4.7
 function StringIndexOfJS(pattern /* position */) {  // length == 1
@@ -709,7 +707,7 @@
 }
 
 
-// This is not a part of ECMA-262.
+// ES6 draft, revision 26 (2014-07-18), section B.2.3.1
 function StringSubstr(start, n) {
   CHECK_OBJECT_COERCIBLE(this, "String.prototype.substr");
 
@@ -815,7 +813,7 @@
     if (!%_IsSmi(code)) code = ToNumber(code) & 0xffff;
     if (code < 0) code = code & 0xffff;
     if (code > 0xff) break;
-    %_OneByteSeqStringSetChar(one_byte, i, code);
+    %_OneByteSeqStringSetChar(i, code, one_byte);
   }
   if (i == n) return one_byte;
   one_byte = %TruncateString(one_byte, i);
@@ -824,84 +822,105 @@
   for (var j = 0; i < n; i++, j++) {
     var code = %_Arguments(i);
     if (!%_IsSmi(code)) code = ToNumber(code) & 0xffff;
-    %_TwoByteSeqStringSetChar(two_byte, j, code);
+    %_TwoByteSeqStringSetChar(j, code, two_byte);
   }
   return one_byte + two_byte;
 }
 
 
-// Helper function for very basic XSS protection.
+// ES6 draft, revision 26 (2014-07-18), section B.2.3.2.1
 function HtmlEscape(str) {
-  return TO_STRING_INLINE(str).replace(/</g, "&lt;")
-                              .replace(/>/g, "&gt;")
-                              .replace(/"/g, "&quot;")
-                              .replace(/'/g, "&#039;");
+  return TO_STRING_INLINE(str).replace(/"/g, "&quot;");
 }
 
 
-// Compatibility support for KJS.
-// Tested by mozilla/js/tests/js1_5/Regress/regress-276103.js.
-function StringLink(s) {
-  return "<a href=\"" + HtmlEscape(s) + "\">" + this + "</a>";
-}
-
-
+// ES6 draft, revision 26 (2014-07-18), section B.2.3.2
 function StringAnchor(name) {
+  CHECK_OBJECT_COERCIBLE(this, "String.prototype.anchor");
   return "<a name=\"" + HtmlEscape(name) + "\">" + this + "</a>";
 }
 
 
-function StringFontcolor(color) {
-  return "<font color=\"" + HtmlEscape(color) + "\">" + this + "</font>";
-}
-
-
-function StringFontsize(size) {
-  return "<font size=\"" + HtmlEscape(size) + "\">" + this + "</font>";
-}
-
-
+// ES6 draft, revision 26 (2014-07-18), section B.2.3.3
 function StringBig() {
+  CHECK_OBJECT_COERCIBLE(this, "String.prototype.big");
   return "<big>" + this + "</big>";
 }
 
 
+// ES6 draft, revision 26 (2014-07-18), section B.2.3.4
 function StringBlink() {
+  CHECK_OBJECT_COERCIBLE(this, "String.prototype.blink");
   return "<blink>" + this + "</blink>";
 }
 
 
+// ES6 draft, revision 26 (2014-07-18), section B.2.3.5
 function StringBold() {
+  CHECK_OBJECT_COERCIBLE(this, "String.prototype.bold");
   return "<b>" + this + "</b>";
 }
 
 
+// ES6 draft, revision 26 (2014-07-18), section B.2.3.6
 function StringFixed() {
+  CHECK_OBJECT_COERCIBLE(this, "String.prototype.fixed");
   return "<tt>" + this + "</tt>";
 }
 
 
+// ES6 draft, revision 26 (2014-07-18), section B.2.3.7
+function StringFontcolor(color) {
+  CHECK_OBJECT_COERCIBLE(this, "String.prototype.fontcolor");
+  return "<font color=\"" + HtmlEscape(color) + "\">" + this + "</font>";
+}
+
+
+// ES6 draft, revision 26 (2014-07-18), section B.2.3.8
+function StringFontsize(size) {
+  CHECK_OBJECT_COERCIBLE(this, "String.prototype.fontsize");
+  return "<font size=\"" + HtmlEscape(size) + "\">" + this + "</font>";
+}
+
+
+// ES6 draft, revision 26 (2014-07-18), section B.2.3.9
 function StringItalics() {
+  CHECK_OBJECT_COERCIBLE(this, "String.prototype.italics");
   return "<i>" + this + "</i>";
 }
 
 
+// ES6 draft, revision 26 (2014-07-18), section B.2.3.10
+function StringLink(s) {
+  CHECK_OBJECT_COERCIBLE(this, "String.prototype.link");
+  return "<a href=\"" + HtmlEscape(s) + "\">" + this + "</a>";
+}
+
+
+// ES6 draft, revision 26 (2014-07-18), section B.2.3.11
 function StringSmall() {
+  CHECK_OBJECT_COERCIBLE(this, "String.prototype.small");
   return "<small>" + this + "</small>";
 }
 
 
+// ES6 draft, revision 26 (2014-07-18), section B.2.3.12
 function StringStrike() {
+  CHECK_OBJECT_COERCIBLE(this, "String.prototype.strike");
   return "<strike>" + this + "</strike>";
 }
 
 
+// ES6 draft, revision 26 (2014-07-18), section B.2.3.13
 function StringSub() {
+  CHECK_OBJECT_COERCIBLE(this, "String.prototype.sub");
   return "<sub>" + this + "</sub>";
 }
 
 
+// ES6 draft, revision 26 (2014-07-18), section B.2.3.14
 function StringSup() {
+  CHECK_OBJECT_COERCIBLE(this, "String.prototype.sup");
   return "<sup>" + this + "</sup>";
 }
 
@@ -915,7 +934,7 @@
   %FunctionSetPrototype($String, new $String());
 
   // Set up the constructor property on the String prototype object.
-  %SetProperty($String.prototype, "constructor", $String, DONT_ENUM);
+  %AddNamedProperty($String.prototype, "constructor", $String, DONT_ENUM);
 
   // Set up the non-enumerable functions on the String object.
   InstallFunctions($String, DONT_ENUM, $Array(
diff --git a/src/strtod.cc b/src/strtod.cc
index 391aebc..2b48af3 100644
--- a/src/strtod.cc
+++ b/src/strtod.cc
@@ -5,12 +5,14 @@
 #include <stdarg.h>
 #include <cmath>
 
-#include "src/globals.h"
-#include "src/utils.h"
-#include "src/strtod.h"
+#include "src/v8.h"
+
 #include "src/bignum.h"
 #include "src/cached-powers.h"
 #include "src/double.h"
+#include "src/globals.h"
+#include "src/strtod.h"
+#include "src/utils.h"
 
 namespace v8 {
 namespace internal {
@@ -61,7 +63,7 @@
   // 10^22 = 0x21e19e0c9bab2400000 = 0x878678326eac9 * 2^22
   10000000000000000000000.0
 };
-static const int kExactPowersOfTenSize = ARRAY_SIZE(exact_powers_of_ten);
+static const int kExactPowersOfTenSize = arraysize(exact_powers_of_ten);
 
 // Maximum number of significant digits in the decimal representation.
 // In fact the value is 772 (see conversions.cc), but to give us some margin
@@ -97,7 +99,7 @@
   }
   // The input buffer has been trimmed. Therefore the last digit must be
   // different from '0'.
-  ASSERT(buffer[buffer.length() - 1] != '0');
+  DCHECK(buffer[buffer.length() - 1] != '0');
   // Set the last digit to be non-zero. This is sufficient to guarantee
   // correct rounding.
   significant_buffer[kMaxSignificantDecimalDigits - 1] = '1';
@@ -117,7 +119,7 @@
   int i = 0;
   while (i < buffer.length() && result <= (kMaxUint64 / 10 - 1)) {
     int digit = buffer[i++] - '0';
-    ASSERT(0 <= digit && digit <= 9);
+    DCHECK(0 <= digit && digit <= 9);
     result = 10 * result + digit;
   }
   *number_of_read_digits = i;
@@ -175,14 +177,14 @@
     if (exponent < 0 && -exponent < kExactPowersOfTenSize) {
       // 10^-exponent fits into a double.
       *result = static_cast<double>(ReadUint64(trimmed, &read_digits));
-      ASSERT(read_digits == trimmed.length());
+      DCHECK(read_digits == trimmed.length());
       *result /= exact_powers_of_ten[-exponent];
       return true;
     }
     if (0 <= exponent && exponent < kExactPowersOfTenSize) {
       // 10^exponent fits into a double.
       *result = static_cast<double>(ReadUint64(trimmed, &read_digits));
-      ASSERT(read_digits == trimmed.length());
+      DCHECK(read_digits == trimmed.length());
       *result *= exact_powers_of_ten[exponent];
       return true;
     }
@@ -194,7 +196,7 @@
       // 10^remaining_digits. As a result the remaining exponent now fits
       // into a double too.
       *result = static_cast<double>(ReadUint64(trimmed, &read_digits));
-      ASSERT(read_digits == trimmed.length());
+      DCHECK(read_digits == trimmed.length());
       *result *= exact_powers_of_ten[remaining_digits];
       *result *= exact_powers_of_ten[exponent - remaining_digits];
       return true;
@@ -207,11 +209,11 @@
 // Returns 10^exponent as an exact DiyFp.
 // The given exponent must be in the range [1; kDecimalExponentDistance[.
 static DiyFp AdjustmentPowerOfTen(int exponent) {
-  ASSERT(0 < exponent);
-  ASSERT(exponent < PowersOfTenCache::kDecimalExponentDistance);
+  DCHECK(0 < exponent);
+  DCHECK(exponent < PowersOfTenCache::kDecimalExponentDistance);
   // Simply hardcode the remaining powers for the given decimal exponent
   // distance.
-  ASSERT(PowersOfTenCache::kDecimalExponentDistance == 8);
+  DCHECK(PowersOfTenCache::kDecimalExponentDistance == 8);
   switch (exponent) {
     case 1: return DiyFp(V8_2PART_UINT64_C(0xa0000000, 00000000), -60);
     case 2: return DiyFp(V8_2PART_UINT64_C(0xc8000000, 00000000), -57);
@@ -245,13 +247,13 @@
   const int kDenominator = 1 << kDenominatorLog;
   // Move the remaining decimals into the exponent.
   exponent += remaining_decimals;
-  int error = (remaining_decimals == 0 ? 0 : kDenominator / 2);
+  int64_t error = (remaining_decimals == 0 ? 0 : kDenominator / 2);
 
   int old_e = input.e();
   input.Normalize();
   error <<= old_e - input.e();
 
-  ASSERT(exponent <= PowersOfTenCache::kMaxDecimalExponent);
+  DCHECK(exponent <= PowersOfTenCache::kMaxDecimalExponent);
   if (exponent < PowersOfTenCache::kMinDecimalExponent) {
     *result = 0.0;
     return true;
@@ -269,7 +271,7 @@
     if (kMaxUint64DecimalDigits - buffer.length() >= adjustment_exponent) {
       // The product of input with the adjustment power fits into a 64 bit
       // integer.
-      ASSERT(DiyFp::kSignificandSize == 64);
+      DCHECK(DiyFp::kSignificandSize == 64);
     } else {
       // The adjustment power is exact. There is hence only an error of 0.5.
       error += kDenominator / 2;
@@ -311,8 +313,8 @@
     precision_digits_count -= shift_amount;
   }
   // We use uint64_ts now. This only works if the DiyFp uses uint64_ts too.
-  ASSERT(DiyFp::kSignificandSize == 64);
-  ASSERT(precision_digits_count < 64);
+  DCHECK(DiyFp::kSignificandSize == 64);
+  DCHECK(precision_digits_count < 64);
   uint64_t one64 = 1;
   uint64_t precision_bits_mask = (one64 << precision_digits_count) - 1;
   uint64_t precision_bits = input.f() & precision_bits_mask;
@@ -356,14 +358,14 @@
 
   DiyFp upper_boundary = Double(guess).UpperBoundary();
 
-  ASSERT(buffer.length() + exponent <= kMaxDecimalPower + 1);
-  ASSERT(buffer.length() + exponent > kMinDecimalPower);
-  ASSERT(buffer.length() <= kMaxSignificantDecimalDigits);
+  DCHECK(buffer.length() + exponent <= kMaxDecimalPower + 1);
+  DCHECK(buffer.length() + exponent > kMinDecimalPower);
+  DCHECK(buffer.length() <= kMaxSignificantDecimalDigits);
   // Make sure that the Bignum will be able to hold all our numbers.
   // Our Bignum implementation has a separate field for exponents. Shifts will
   // consume at most one bigit (< 64 bits).
   // ln(10) == 3.3219...
-  ASSERT(((kMaxDecimalPower + 1) * 333 / 100) < Bignum::kMaxSignificantBits);
+  DCHECK(((kMaxDecimalPower + 1) * 333 / 100) < Bignum::kMaxSignificantBits);
   Bignum input;
   Bignum boundary;
   input.AssignDecimalString(buffer);
diff --git a/src/stub-cache.cc b/src/stub-cache.cc
deleted file mode 100644
index c15038e..0000000
--- a/src/stub-cache.cc
+++ /dev/null
@@ -1,1435 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#include "src/api.h"
-#include "src/arguments.h"
-#include "src/ast.h"
-#include "src/code-stubs.h"
-#include "src/cpu-profiler.h"
-#include "src/gdb-jit.h"
-#include "src/ic-inl.h"
-#include "src/stub-cache.h"
-#include "src/type-info.h"
-#include "src/vm-state-inl.h"
-
-namespace v8 {
-namespace internal {
-
-// -----------------------------------------------------------------------
-// StubCache implementation.
-
-
-StubCache::StubCache(Isolate* isolate)
-    : isolate_(isolate) { }
-
-
-void StubCache::Initialize() {
-  ASSERT(IsPowerOf2(kPrimaryTableSize));
-  ASSERT(IsPowerOf2(kSecondaryTableSize));
-  Clear();
-}
-
-
-Code* StubCache::Set(Name* name, Map* map, Code* code) {
-  // Get the flags from the code.
-  Code::Flags flags = Code::RemoveTypeFromFlags(code->flags());
-
-  // Validate that the name does not move on scavenge, and that we
-  // can use identity checks instead of structural equality checks.
-  ASSERT(!heap()->InNewSpace(name));
-  ASSERT(name->IsUniqueName());
-
-  // The state bits are not important to the hash function because
-  // the stub cache only contains monomorphic stubs. Make sure that
-  // the bits are the least significant so they will be the ones
-  // masked out.
-  ASSERT(Code::ExtractICStateFromFlags(flags) == MONOMORPHIC);
-  STATIC_ASSERT((Code::ICStateField::kMask & 1) == 1);
-
-  // Make sure that the code type is not included in the hash.
-  ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
-
-  // Compute the primary entry.
-  int primary_offset = PrimaryOffset(name, flags, map);
-  Entry* primary = entry(primary_, primary_offset);
-  Code* old_code = primary->value;
-
-  // If the primary entry has useful data in it, we retire it to the
-  // secondary cache before overwriting it.
-  if (old_code != isolate_->builtins()->builtin(Builtins::kIllegal)) {
-    Map* old_map = primary->map;
-    Code::Flags old_flags = Code::RemoveTypeFromFlags(old_code->flags());
-    int seed = PrimaryOffset(primary->key, old_flags, old_map);
-    int secondary_offset = SecondaryOffset(primary->key, old_flags, seed);
-    Entry* secondary = entry(secondary_, secondary_offset);
-    *secondary = *primary;
-  }
-
-  // Update primary cache.
-  primary->key = name;
-  primary->value = code;
-  primary->map = map;
-  isolate()->counters()->megamorphic_stub_cache_updates()->Increment();
-  return code;
-}
-
-
-Handle<Code> StubCache::FindIC(Handle<Name> name,
-                               Handle<Map> stub_holder,
-                               Code::Kind kind,
-                               ExtraICState extra_state,
-                               InlineCacheHolderFlag cache_holder) {
-  Code::Flags flags = Code::ComputeMonomorphicFlags(
-      kind, extra_state, cache_holder);
-  Handle<Object> probe(stub_holder->FindInCodeCache(*name, flags), isolate_);
-  if (probe->IsCode()) return Handle<Code>::cast(probe);
-  return Handle<Code>::null();
-}
-
-
-Handle<Code> StubCache::FindHandler(Handle<Name> name,
-                                    Handle<Map> stub_holder,
-                                    Code::Kind kind,
-                                    InlineCacheHolderFlag cache_holder,
-                                    Code::StubType type) {
-  Code::Flags flags = Code::ComputeHandlerFlags(kind, type, cache_holder);
-
-  Handle<Object> probe(stub_holder->FindInCodeCache(*name, flags), isolate_);
-  if (probe->IsCode()) return Handle<Code>::cast(probe);
-  return Handle<Code>::null();
-}
-
-
-Handle<Code> StubCache::ComputeMonomorphicIC(
-    Code::Kind kind,
-    Handle<Name> name,
-    Handle<HeapType> type,
-    Handle<Code> handler,
-    ExtraICState extra_ic_state) {
-  InlineCacheHolderFlag flag = IC::GetCodeCacheFlag(*type);
-
-  Handle<Map> stub_holder;
-  Handle<Code> ic;
-  // There are multiple string maps that all use the same prototype. That
-  // prototype cannot hold multiple handlers, one for each of the string maps,
-  // for a single name. Hence, turn off caching of the IC.
-  bool can_be_cached = !type->Is(HeapType::String());
-  if (can_be_cached) {
-    stub_holder = IC::GetCodeCacheHolder(flag, *type, isolate());
-    ic = FindIC(name, stub_holder, kind, extra_ic_state, flag);
-    if (!ic.is_null()) return ic;
-  }
-
-  if (kind == Code::LOAD_IC) {
-    LoadStubCompiler ic_compiler(isolate(), extra_ic_state, flag);
-    ic = ic_compiler.CompileMonomorphicIC(type, handler, name);
-  } else if (kind == Code::KEYED_LOAD_IC) {
-    KeyedLoadStubCompiler ic_compiler(isolate(), extra_ic_state, flag);
-    ic = ic_compiler.CompileMonomorphicIC(type, handler, name);
-  } else if (kind == Code::STORE_IC) {
-    StoreStubCompiler ic_compiler(isolate(), extra_ic_state);
-    ic = ic_compiler.CompileMonomorphicIC(type, handler, name);
-  } else {
-    ASSERT(kind == Code::KEYED_STORE_IC);
-    ASSERT(STANDARD_STORE ==
-           KeyedStoreIC::GetKeyedAccessStoreMode(extra_ic_state));
-    KeyedStoreStubCompiler ic_compiler(isolate(), extra_ic_state);
-    ic = ic_compiler.CompileMonomorphicIC(type, handler, name);
-  }
-
-  if (can_be_cached) Map::UpdateCodeCache(stub_holder, name, ic);
-  return ic;
-}
-
-
-Handle<Code> StubCache::ComputeLoadNonexistent(Handle<Name> name,
-                                               Handle<HeapType> type) {
-  InlineCacheHolderFlag flag = IC::GetCodeCacheFlag(*type);
-  Handle<Map> stub_holder = IC::GetCodeCacheHolder(flag, *type, isolate());
-  // If no dictionary mode objects are present in the prototype chain, the load
-  // nonexistent IC stub can be shared for all names for a given map and we use
-  // the empty string for the map cache in that case. If there are dictionary
-  // mode objects involved, we need to do negative lookups in the stub and
-  // therefore the stub will be specific to the name.
-  Handle<Map> current_map = stub_holder;
-  Handle<Name> cache_name = current_map->is_dictionary_map()
-      ? name : Handle<Name>::cast(isolate()->factory()->nonexistent_symbol());
-  Handle<Object> next(current_map->prototype(), isolate());
-  Handle<JSObject> last = Handle<JSObject>::null();
-  while (!next->IsNull()) {
-    last = Handle<JSObject>::cast(next);
-    next = handle(current_map->prototype(), isolate());
-    current_map = handle(Handle<HeapObject>::cast(next)->map());
-    if (current_map->is_dictionary_map()) cache_name = name;
-  }
-
-  // Compile the stub that is either shared for all names or
-  // name specific if there are global objects involved.
-  Handle<Code> handler = FindHandler(
-      cache_name, stub_holder, Code::LOAD_IC, flag, Code::FAST);
-  if (!handler.is_null()) {
-    return handler;
-  }
-
-  LoadStubCompiler compiler(isolate_, kNoExtraICState, flag);
-  handler = compiler.CompileLoadNonexistent(type, last, cache_name);
-  Map::UpdateCodeCache(stub_holder, cache_name, handler);
-  return handler;
-}
-
-
-Handle<Code> StubCache::ComputeKeyedLoadElement(Handle<Map> receiver_map) {
-  Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC);
-  Handle<Name> name =
-      isolate()->factory()->KeyedLoadElementMonomorphic_string();
-
-  Handle<Object> probe(receiver_map->FindInCodeCache(*name, flags), isolate_);
-  if (probe->IsCode()) return Handle<Code>::cast(probe);
-
-  KeyedLoadStubCompiler compiler(isolate());
-  Handle<Code> code = compiler.CompileLoadElement(receiver_map);
-
-  Map::UpdateCodeCache(receiver_map, name, code);
-  return code;
-}
-
-
-Handle<Code> StubCache::ComputeKeyedStoreElement(
-    Handle<Map> receiver_map,
-    StrictMode strict_mode,
-    KeyedAccessStoreMode store_mode) {
-  ExtraICState extra_state =
-      KeyedStoreIC::ComputeExtraICState(strict_mode, store_mode);
-  Code::Flags flags = Code::ComputeMonomorphicFlags(
-      Code::KEYED_STORE_IC, extra_state);
-
-  ASSERT(store_mode == STANDARD_STORE ||
-         store_mode == STORE_AND_GROW_NO_TRANSITION ||
-         store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
-         store_mode == STORE_NO_TRANSITION_HANDLE_COW);
-
-  Handle<String> name =
-      isolate()->factory()->KeyedStoreElementMonomorphic_string();
-  Handle<Object> probe(receiver_map->FindInCodeCache(*name, flags), isolate_);
-  if (probe->IsCode()) return Handle<Code>::cast(probe);
-
-  KeyedStoreStubCompiler compiler(isolate(), extra_state);
-  Handle<Code> code = compiler.CompileStoreElement(receiver_map);
-
-  Map::UpdateCodeCache(receiver_map, name, code);
-  ASSERT(KeyedStoreIC::GetKeyedAccessStoreMode(code->extra_ic_state())
-         == store_mode);
-  return code;
-}
-
-
-#define CALL_LOGGER_TAG(kind, type) (Logger::KEYED_##type)
-
-static void FillCache(Isolate* isolate, Handle<Code> code) {
-  Handle<UnseededNumberDictionary> dictionary =
-      UnseededNumberDictionary::Set(isolate->factory()->non_monomorphic_cache(),
-                                    code->flags(),
-                                    code);
-  isolate->heap()->public_set_non_monomorphic_cache(*dictionary);
-}
-
-
-Code* StubCache::FindPreMonomorphicIC(Code::Kind kind, ExtraICState state) {
-  Code::Flags flags = Code::ComputeFlags(kind, PREMONOMORPHIC, state);
-  UnseededNumberDictionary* dictionary =
-      isolate()->heap()->non_monomorphic_cache();
-  int entry = dictionary->FindEntry(isolate(), flags);
-  ASSERT(entry != -1);
-  Object* code = dictionary->ValueAt(entry);
-  // This might be called during the marking phase of the collector
-  // hence the unchecked cast.
-  return reinterpret_cast<Code*>(code);
-}
-
-
-Handle<Code> StubCache::ComputeLoad(InlineCacheState ic_state,
-                                    ExtraICState extra_state) {
-  Code::Flags flags = Code::ComputeFlags(Code::LOAD_IC, ic_state, extra_state);
-  Handle<UnseededNumberDictionary> cache =
-      isolate_->factory()->non_monomorphic_cache();
-  int entry = cache->FindEntry(isolate_, flags);
-  if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
-
-  StubCompiler compiler(isolate_);
-  Handle<Code> code;
-  if (ic_state == UNINITIALIZED) {
-    code = compiler.CompileLoadInitialize(flags);
-  } else if (ic_state == PREMONOMORPHIC) {
-    code = compiler.CompileLoadPreMonomorphic(flags);
-  } else if (ic_state == MEGAMORPHIC) {
-    code = compiler.CompileLoadMegamorphic(flags);
-  } else {
-    UNREACHABLE();
-  }
-  FillCache(isolate_, code);
-  return code;
-}
-
-
-Handle<Code> StubCache::ComputeStore(InlineCacheState ic_state,
-                                     ExtraICState extra_state) {
-  Code::Flags flags = Code::ComputeFlags(Code::STORE_IC, ic_state, extra_state);
-  Handle<UnseededNumberDictionary> cache =
-      isolate_->factory()->non_monomorphic_cache();
-  int entry = cache->FindEntry(isolate_, flags);
-  if (entry != -1) return Handle<Code>(Code::cast(cache->ValueAt(entry)));
-
-  StubCompiler compiler(isolate_);
-  Handle<Code> code;
-  if (ic_state == UNINITIALIZED) {
-    code = compiler.CompileStoreInitialize(flags);
-  } else if (ic_state == PREMONOMORPHIC) {
-    code = compiler.CompileStorePreMonomorphic(flags);
-  } else if (ic_state == GENERIC) {
-    code = compiler.CompileStoreGeneric(flags);
-  } else if (ic_state == MEGAMORPHIC) {
-    code = compiler.CompileStoreMegamorphic(flags);
-  } else {
-    UNREACHABLE();
-  }
-
-  FillCache(isolate_, code);
-  return code;
-}
-
-
-Handle<Code> StubCache::ComputeCompareNil(Handle<Map> receiver_map,
-                                          CompareNilICStub* stub) {
-  Handle<String> name(isolate_->heap()->empty_string());
-  if (!receiver_map->is_shared()) {
-    Handle<Code> cached_ic = FindIC(name, receiver_map, Code::COMPARE_NIL_IC,
-                                    stub->GetExtraICState());
-    if (!cached_ic.is_null()) return cached_ic;
-  }
-
-  Code::FindAndReplacePattern pattern;
-  pattern.Add(isolate_->factory()->meta_map(), receiver_map);
-  Handle<Code> ic = stub->GetCodeCopy(pattern);
-
-  if (!receiver_map->is_shared()) {
-    Map::UpdateCodeCache(receiver_map, name, ic);
-  }
-
-  return ic;
-}
-
-
-// TODO(verwaest): Change this method so it takes in a TypeHandleList.
-Handle<Code> StubCache::ComputeLoadElementPolymorphic(
-    MapHandleList* receiver_maps) {
-  Code::Flags flags = Code::ComputeFlags(Code::KEYED_LOAD_IC, POLYMORPHIC);
-  Handle<PolymorphicCodeCache> cache =
-      isolate_->factory()->polymorphic_code_cache();
-  Handle<Object> probe = cache->Lookup(receiver_maps, flags);
-  if (probe->IsCode()) return Handle<Code>::cast(probe);
-
-  TypeHandleList types(receiver_maps->length());
-  for (int i = 0; i < receiver_maps->length(); i++) {
-    types.Add(HeapType::Class(receiver_maps->at(i), isolate()));
-  }
-  CodeHandleList handlers(receiver_maps->length());
-  KeyedLoadStubCompiler compiler(isolate_);
-  compiler.CompileElementHandlers(receiver_maps, &handlers);
-  Handle<Code> code = compiler.CompilePolymorphicIC(
-      &types, &handlers, factory()->empty_string(), Code::NORMAL, ELEMENT);
-
-  isolate()->counters()->keyed_load_polymorphic_stubs()->Increment();
-
-  PolymorphicCodeCache::Update(cache, receiver_maps, flags, code);
-  return code;
-}
-
-
-Handle<Code> StubCache::ComputePolymorphicIC(
-    Code::Kind kind,
-    TypeHandleList* types,
-    CodeHandleList* handlers,
-    int number_of_valid_types,
-    Handle<Name> name,
-    ExtraICState extra_ic_state) {
-  Handle<Code> handler = handlers->at(0);
-  Code::StubType type = number_of_valid_types == 1 ? handler->type()
-                                                   : Code::NORMAL;
-  if (kind == Code::LOAD_IC) {
-    LoadStubCompiler ic_compiler(isolate_, extra_ic_state);
-    return ic_compiler.CompilePolymorphicIC(
-        types, handlers, name, type, PROPERTY);
-  } else {
-    ASSERT(kind == Code::STORE_IC);
-    StoreStubCompiler ic_compiler(isolate_, extra_ic_state);
-    return ic_compiler.CompilePolymorphicIC(
-        types, handlers, name, type, PROPERTY);
-  }
-}
-
-
-Handle<Code> StubCache::ComputeStoreElementPolymorphic(
-    MapHandleList* receiver_maps,
-    KeyedAccessStoreMode store_mode,
-    StrictMode strict_mode) {
-  ASSERT(store_mode == STANDARD_STORE ||
-         store_mode == STORE_AND_GROW_NO_TRANSITION ||
-         store_mode == STORE_NO_TRANSITION_IGNORE_OUT_OF_BOUNDS ||
-         store_mode == STORE_NO_TRANSITION_HANDLE_COW);
-  Handle<PolymorphicCodeCache> cache =
-      isolate_->factory()->polymorphic_code_cache();
-  ExtraICState extra_state = KeyedStoreIC::ComputeExtraICState(
-      strict_mode, store_mode);
-  Code::Flags flags =
-      Code::ComputeFlags(Code::KEYED_STORE_IC, POLYMORPHIC, extra_state);
-  Handle<Object> probe = cache->Lookup(receiver_maps, flags);
-  if (probe->IsCode()) return Handle<Code>::cast(probe);
-
-  KeyedStoreStubCompiler compiler(isolate_, extra_state);
-  Handle<Code> code = compiler.CompileStoreElementPolymorphic(receiver_maps);
-  PolymorphicCodeCache::Update(cache, receiver_maps, flags, code);
-  return code;
-}
-
-
-void StubCache::Clear() {
-  Code* empty = isolate_->builtins()->builtin(Builtins::kIllegal);
-  for (int i = 0; i < kPrimaryTableSize; i++) {
-    primary_[i].key = heap()->empty_string();
-    primary_[i].map = NULL;
-    primary_[i].value = empty;
-  }
-  for (int j = 0; j < kSecondaryTableSize; j++) {
-    secondary_[j].key = heap()->empty_string();
-    secondary_[j].map = NULL;
-    secondary_[j].value = empty;
-  }
-}
-
-
-void StubCache::CollectMatchingMaps(SmallMapList* types,
-                                    Handle<Name> name,
-                                    Code::Flags flags,
-                                    Handle<Context> native_context,
-                                    Zone* zone) {
-  for (int i = 0; i < kPrimaryTableSize; i++) {
-    if (primary_[i].key == *name) {
-      Map* map = primary_[i].map;
-      // Map can be NULL, if the stub is constant function call
-      // with a primitive receiver.
-      if (map == NULL) continue;
-
-      int offset = PrimaryOffset(*name, flags, map);
-      if (entry(primary_, offset) == &primary_[i] &&
-          !TypeFeedbackOracle::CanRetainOtherContext(map, *native_context)) {
-        types->AddMapIfMissing(Handle<Map>(map), zone);
-      }
-    }
-  }
-
-  for (int i = 0; i < kSecondaryTableSize; i++) {
-    if (secondary_[i].key == *name) {
-      Map* map = secondary_[i].map;
-      // Map can be NULL, if the stub is constant function call
-      // with a primitive receiver.
-      if (map == NULL) continue;
-
-      // Lookup in primary table and skip duplicates.
-      int primary_offset = PrimaryOffset(*name, flags, map);
-
-      // Lookup in secondary table and add matches.
-      int offset = SecondaryOffset(*name, flags, primary_offset);
-      if (entry(secondary_, offset) == &secondary_[i] &&
-          !TypeFeedbackOracle::CanRetainOtherContext(map, *native_context)) {
-        types->AddMapIfMissing(Handle<Map>(map), zone);
-      }
-    }
-  }
-}
-
-
-// ------------------------------------------------------------------------
-// StubCompiler implementation.
-
-
-RUNTIME_FUNCTION(StoreCallbackProperty) {
-  JSObject* receiver = JSObject::cast(args[0]);
-  JSObject* holder = JSObject::cast(args[1]);
-  ExecutableAccessorInfo* callback = ExecutableAccessorInfo::cast(args[2]);
-  Address setter_address = v8::ToCData<Address>(callback->setter());
-  v8::AccessorSetterCallback fun =
-      FUNCTION_CAST<v8::AccessorSetterCallback>(setter_address);
-  ASSERT(fun != NULL);
-  ASSERT(callback->IsCompatibleReceiver(receiver));
-  Handle<Name> name = args.at<Name>(3);
-  Handle<Object> value = args.at<Object>(4);
-  HandleScope scope(isolate);
-
-  // TODO(rossberg): Support symbols in the API.
-  if (name->IsSymbol()) return *value;
-  Handle<String> str = Handle<String>::cast(name);
-
-  LOG(isolate, ApiNamedPropertyAccess("store", receiver, *name));
-  PropertyCallbackArguments
-      custom_args(isolate, callback->data(), receiver, holder);
-  custom_args.Call(fun, v8::Utils::ToLocal(str), v8::Utils::ToLocal(value));
-  RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
-  return *value;
-}
-
-
-/**
- * Attempts to load a property with an interceptor (which must be present),
- * but doesn't search the prototype chain.
- *
- * Returns |Heap::no_interceptor_result_sentinel()| if interceptor doesn't
- * provide any value for the given name.
- */
-RUNTIME_FUNCTION(LoadPropertyWithInterceptorOnly) {
-  ASSERT(args.length() == StubCache::kInterceptorArgsLength);
-  Handle<Name> name_handle =
-      args.at<Name>(StubCache::kInterceptorArgsNameIndex);
-  Handle<InterceptorInfo> interceptor_info =
-      args.at<InterceptorInfo>(StubCache::kInterceptorArgsInfoIndex);
-
-  // TODO(rossberg): Support symbols in the API.
-  if (name_handle->IsSymbol())
-    return isolate->heap()->no_interceptor_result_sentinel();
-  Handle<String> name = Handle<String>::cast(name_handle);
-
-  Address getter_address = v8::ToCData<Address>(interceptor_info->getter());
-  v8::NamedPropertyGetterCallback getter =
-      FUNCTION_CAST<v8::NamedPropertyGetterCallback>(getter_address);
-  ASSERT(getter != NULL);
-
-  Handle<JSObject> receiver =
-      args.at<JSObject>(StubCache::kInterceptorArgsThisIndex);
-  Handle<JSObject> holder =
-      args.at<JSObject>(StubCache::kInterceptorArgsHolderIndex);
-  PropertyCallbackArguments callback_args(
-      isolate, interceptor_info->data(), *receiver, *holder);
-  {
-    // Use the interceptor getter.
-    HandleScope scope(isolate);
-    v8::Handle<v8::Value> r =
-        callback_args.Call(getter, v8::Utils::ToLocal(name));
-    RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
-    if (!r.IsEmpty()) {
-      Handle<Object> result = v8::Utils::OpenHandle(*r);
-      result->VerifyApiCallResultType();
-      return *v8::Utils::OpenHandle(*r);
-    }
-  }
-
-  return isolate->heap()->no_interceptor_result_sentinel();
-}
-
-
-static Object* ThrowReferenceError(Isolate* isolate, Name* name) {
-  // If the load is non-contextual, just return the undefined result.
-  // Note that both keyed and non-keyed loads may end up here.
-  HandleScope scope(isolate);
-  LoadIC ic(IC::NO_EXTRA_FRAME, isolate);
-  if (ic.contextual_mode() != CONTEXTUAL) {
-    return isolate->heap()->undefined_value();
-  }
-
-  // Throw a reference error.
-  Handle<Name> name_handle(name);
-  Handle<Object> error =
-      isolate->factory()->NewReferenceError("not_defined",
-                                            HandleVector(&name_handle, 1));
-  return isolate->Throw(*error);
-}
-
-
-/**
- * Loads a property with an interceptor performing post interceptor
- * lookup if interceptor failed.
- */
-RUNTIME_FUNCTION(LoadPropertyWithInterceptor) {
-  HandleScope scope(isolate);
-  ASSERT(args.length() == StubCache::kInterceptorArgsLength);
-  Handle<Name> name =
-      args.at<Name>(StubCache::kInterceptorArgsNameIndex);
-  Handle<JSObject> receiver =
-      args.at<JSObject>(StubCache::kInterceptorArgsThisIndex);
-  Handle<JSObject> holder =
-      args.at<JSObject>(StubCache::kInterceptorArgsHolderIndex);
-
-  Handle<Object> result;
-  LookupIterator it(receiver, name, holder);
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result, JSObject::GetProperty(&it));
-
-  if (it.IsFound()) return *result;
-
-  return ThrowReferenceError(isolate, Name::cast(args[0]));
-}
-
-
-RUNTIME_FUNCTION(StoreInterceptorProperty) {
-  HandleScope scope(isolate);
-  ASSERT(args.length() == 3);
-  StoreIC ic(IC::NO_EXTRA_FRAME, isolate);
-  Handle<JSObject> receiver = args.at<JSObject>(0);
-  Handle<Name> name = args.at<Name>(1);
-  Handle<Object> value = args.at<Object>(2);
-  ASSERT(receiver->HasNamedInterceptor());
-  PropertyAttributes attr = NONE;
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result,
-      JSObject::SetPropertyWithInterceptor(
-          receiver, name, value, attr, ic.strict_mode()));
-  return *result;
-}
-
-
-RUNTIME_FUNCTION(KeyedLoadPropertyWithInterceptor) {
-  HandleScope scope(isolate);
-  Handle<JSObject> receiver = args.at<JSObject>(0);
-  ASSERT(args.smi_at(1) >= 0);
-  uint32_t index = args.smi_at(1);
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
-      isolate, result,
-      JSObject::GetElementWithInterceptor(receiver, receiver, index));
-  return *result;
-}
-
-
-Handle<Code> StubCompiler::CompileLoadInitialize(Code::Flags flags) {
-  LoadIC::GenerateInitialize(masm());
-  Handle<Code> code = GetCodeWithFlags(flags, "CompileLoadInitialize");
-  PROFILE(isolate(),
-          CodeCreateEvent(Logger::LOAD_INITIALIZE_TAG, *code, 0));
-  GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *code));
-  return code;
-}
-
-
-Handle<Code> StubCompiler::CompileLoadPreMonomorphic(Code::Flags flags) {
-  LoadIC::GeneratePreMonomorphic(masm());
-  Handle<Code> code = GetCodeWithFlags(flags, "CompileLoadPreMonomorphic");
-  PROFILE(isolate(),
-          CodeCreateEvent(Logger::LOAD_PREMONOMORPHIC_TAG, *code, 0));
-  GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *code));
-  return code;
-}
-
-
-Handle<Code> StubCompiler::CompileLoadMegamorphic(Code::Flags flags) {
-  LoadIC::GenerateMegamorphic(masm());
-  Handle<Code> code = GetCodeWithFlags(flags, "CompileLoadMegamorphic");
-  PROFILE(isolate(),
-          CodeCreateEvent(Logger::LOAD_MEGAMORPHIC_TAG, *code, 0));
-  GDBJIT(AddCode(GDBJITInterface::LOAD_IC, *code));
-  return code;
-}
-
-
-Handle<Code> StubCompiler::CompileStoreInitialize(Code::Flags flags) {
-  StoreIC::GenerateInitialize(masm());
-  Handle<Code> code = GetCodeWithFlags(flags, "CompileStoreInitialize");
-  PROFILE(isolate(),
-          CodeCreateEvent(Logger::STORE_INITIALIZE_TAG, *code, 0));
-  GDBJIT(AddCode(GDBJITInterface::STORE_IC, *code));
-  return code;
-}
-
-
-Handle<Code> StubCompiler::CompileStorePreMonomorphic(Code::Flags flags) {
-  StoreIC::GeneratePreMonomorphic(masm());
-  Handle<Code> code = GetCodeWithFlags(flags, "CompileStorePreMonomorphic");
-  PROFILE(isolate(),
-          CodeCreateEvent(Logger::STORE_PREMONOMORPHIC_TAG, *code, 0));
-  GDBJIT(AddCode(GDBJITInterface::STORE_IC, *code));
-  return code;
-}
-
-
-Handle<Code> StubCompiler::CompileStoreGeneric(Code::Flags flags) {
-  ExtraICState extra_state = Code::ExtractExtraICStateFromFlags(flags);
-  StrictMode strict_mode = StoreIC::GetStrictMode(extra_state);
-  StoreIC::GenerateRuntimeSetProperty(masm(), strict_mode);
-  Handle<Code> code = GetCodeWithFlags(flags, "CompileStoreGeneric");
-  PROFILE(isolate(),
-          CodeCreateEvent(Logger::STORE_GENERIC_TAG, *code, 0));
-  GDBJIT(AddCode(GDBJITInterface::STORE_IC, *code));
-  return code;
-}
-
-
-Handle<Code> StubCompiler::CompileStoreMegamorphic(Code::Flags flags) {
-  StoreIC::GenerateMegamorphic(masm());
-  Handle<Code> code = GetCodeWithFlags(flags, "CompileStoreMegamorphic");
-  PROFILE(isolate(),
-          CodeCreateEvent(Logger::STORE_MEGAMORPHIC_TAG, *code, 0));
-  GDBJIT(AddCode(GDBJITInterface::STORE_IC, *code));
-  return code;
-}
-
-
-#undef CALL_LOGGER_TAG
-
-
-Handle<Code> StubCompiler::GetCodeWithFlags(Code::Flags flags,
-                                            const char* name) {
-  // Create code object in the heap.
-  CodeDesc desc;
-  masm_.GetCode(&desc);
-  Handle<Code> code = factory()->NewCode(desc, flags, masm_.CodeObject());
-  if (code->has_major_key()) {
-    code->set_major_key(CodeStub::NoCache);
-  }
-#ifdef ENABLE_DISASSEMBLER
-  if (FLAG_print_code_stubs) code->Disassemble(name);
-#endif
-  return code;
-}
-
-
-Handle<Code> StubCompiler::GetCodeWithFlags(Code::Flags flags,
-                                            Handle<Name> name) {
-  return (FLAG_print_code_stubs && !name.is_null() && name->IsString())
-      ? GetCodeWithFlags(flags, Handle<String>::cast(name)->ToCString().get())
-      : GetCodeWithFlags(flags, NULL);
-}
-
-
-void StubCompiler::LookupPostInterceptor(Handle<JSObject> holder,
-                                         Handle<Name> name,
-                                         LookupResult* lookup) {
-  holder->LookupOwnRealNamedProperty(name, lookup);
-  if (lookup->IsFound()) return;
-  if (holder->GetPrototype()->IsNull()) return;
-  holder->GetPrototype()->Lookup(name, lookup);
-}
-
-
-#define __ ACCESS_MASM(masm())
-
-
-Register LoadStubCompiler::HandlerFrontendHeader(
-    Handle<HeapType> type,
-    Register object_reg,
-    Handle<JSObject> holder,
-    Handle<Name> name,
-    Label* miss) {
-  PrototypeCheckType check_type = CHECK_ALL_MAPS;
-  int function_index = -1;
-  if (type->Is(HeapType::String())) {
-    function_index = Context::STRING_FUNCTION_INDEX;
-  } else if (type->Is(HeapType::Symbol())) {
-    function_index = Context::SYMBOL_FUNCTION_INDEX;
-  } else if (type->Is(HeapType::Number())) {
-    function_index = Context::NUMBER_FUNCTION_INDEX;
-  } else if (type->Is(HeapType::Boolean())) {
-    function_index = Context::BOOLEAN_FUNCTION_INDEX;
-  } else {
-    check_type = SKIP_RECEIVER;
-  }
-
-  if (check_type == CHECK_ALL_MAPS) {
-    GenerateDirectLoadGlobalFunctionPrototype(
-        masm(), function_index, scratch1(), miss);
-    Object* function = isolate()->native_context()->get(function_index);
-    Object* prototype = JSFunction::cast(function)->instance_prototype();
-    type = IC::CurrentTypeOf(handle(prototype, isolate()), isolate());
-    object_reg = scratch1();
-  }
-
-  // Check that the maps starting from the prototype haven't changed.
-  return CheckPrototypes(
-      type, object_reg, holder, scratch1(), scratch2(), scratch3(),
-      name, miss, check_type);
-}
-
-
-// HandlerFrontend for store uses the name register. It has to be restored
-// before a miss.
-Register StoreStubCompiler::HandlerFrontendHeader(
-    Handle<HeapType> type,
-    Register object_reg,
-    Handle<JSObject> holder,
-    Handle<Name> name,
-    Label* miss) {
-  return CheckPrototypes(type, object_reg, holder, this->name(),
-                         scratch1(), scratch2(), name, miss, SKIP_RECEIVER);
-}
-
-
-bool BaseLoadStoreStubCompiler::IncludesNumberType(TypeHandleList* types) {
-  for (int i = 0; i < types->length(); ++i) {
-    if (types->at(i)->Is(HeapType::Number())) return true;
-  }
-  return false;
-}
-
-
-Register BaseLoadStoreStubCompiler::HandlerFrontend(Handle<HeapType> type,
-                                                    Register object_reg,
-                                                    Handle<JSObject> holder,
-                                                    Handle<Name> name) {
-  Label miss;
-
-  Register reg = HandlerFrontendHeader(type, object_reg, holder, name, &miss);
-
-  HandlerFrontendFooter(name, &miss);
-
-  return reg;
-}
-
-
-void LoadStubCompiler::NonexistentHandlerFrontend(Handle<HeapType> type,
-                                                  Handle<JSObject> last,
-                                                  Handle<Name> name) {
-  Label miss;
-
-  Register holder;
-  Handle<Map> last_map;
-  if (last.is_null()) {
-    holder = receiver();
-    last_map = IC::TypeToMap(*type, isolate());
-    // If |type| has null as its prototype, |last| is Handle<JSObject>::null().
-    ASSERT(last_map->prototype() == isolate()->heap()->null_value());
-  } else {
-    holder = HandlerFrontendHeader(type, receiver(), last, name, &miss);
-    last_map = handle(last->map());
-  }
-
-  if (last_map->is_dictionary_map() &&
-      !last_map->IsJSGlobalObjectMap() &&
-      !last_map->IsJSGlobalProxyMap()) {
-    if (!name->IsUniqueName()) {
-      ASSERT(name->IsString());
-      name = factory()->InternalizeString(Handle<String>::cast(name));
-    }
-    ASSERT(last.is_null() ||
-           last->property_dictionary()->FindEntry(name) ==
-               NameDictionary::kNotFound);
-    GenerateDictionaryNegativeLookup(masm(), &miss, holder, name,
-                                     scratch2(), scratch3());
-  }
-
-  // If the last object in the prototype chain is a global object,
-  // check that the global property cell is empty.
-  if (last_map->IsJSGlobalObjectMap()) {
-    Handle<JSGlobalObject> global = last.is_null()
-        ? Handle<JSGlobalObject>::cast(type->AsConstant()->Value())
-        : Handle<JSGlobalObject>::cast(last);
-    GenerateCheckPropertyCell(masm(), global, name, scratch2(), &miss);
-  }
-
-  HandlerFrontendFooter(name, &miss);
-}
-
-
-Handle<Code> LoadStubCompiler::CompileLoadField(
-    Handle<HeapType> type,
-    Handle<JSObject> holder,
-    Handle<Name> name,
-    FieldIndex field,
-    Representation representation) {
-  Register reg = HandlerFrontend(type, receiver(), holder, name);
-  GenerateLoadField(reg, holder, field, representation);
-
-  // Return the generated code.
-  return GetCode(kind(), Code::FAST, name);
-}
-
-
-Handle<Code> LoadStubCompiler::CompileLoadConstant(
-    Handle<HeapType> type,
-    Handle<JSObject> holder,
-    Handle<Name> name,
-    Handle<Object> value) {
-  HandlerFrontend(type, receiver(), holder, name);
-  GenerateLoadConstant(value);
-
-  // Return the generated code.
-  return GetCode(kind(), Code::FAST, name);
-}
-
-
-Handle<Code> LoadStubCompiler::CompileLoadCallback(
-    Handle<HeapType> type,
-    Handle<JSObject> holder,
-    Handle<Name> name,
-    Handle<ExecutableAccessorInfo> callback) {
-  Register reg = CallbackHandlerFrontend(
-      type, receiver(), holder, name, callback);
-  GenerateLoadCallback(reg, callback);
-
-  // Return the generated code.
-  return GetCode(kind(), Code::FAST, name);
-}
-
-
-Handle<Code> LoadStubCompiler::CompileLoadCallback(
-    Handle<HeapType> type,
-    Handle<JSObject> holder,
-    Handle<Name> name,
-    const CallOptimization& call_optimization) {
-  ASSERT(call_optimization.is_simple_api_call());
-  Handle<JSFunction> callback = call_optimization.constant_function();
-  CallbackHandlerFrontend(type, receiver(), holder, name, callback);
-  Handle<Map>receiver_map = IC::TypeToMap(*type, isolate());
-  GenerateFastApiCall(
-      masm(), call_optimization, receiver_map,
-      receiver(), scratch1(), false, 0, NULL);
-  // Return the generated code.
-  return GetCode(kind(), Code::FAST, name);
-}
-
-
-Handle<Code> LoadStubCompiler::CompileLoadInterceptor(
-    Handle<HeapType> type,
-    Handle<JSObject> holder,
-    Handle<Name> name) {
-  LookupResult lookup(isolate());
-  LookupPostInterceptor(holder, name, &lookup);
-
-  Register reg = HandlerFrontend(type, receiver(), holder, name);
-  // TODO(368): Compile in the whole chain: all the interceptors in
-  // prototypes and ultimate answer.
-  GenerateLoadInterceptor(reg, type, holder, &lookup, name);
-
-  // Return the generated code.
-  return GetCode(kind(), Code::FAST, name);
-}
-
-
-void LoadStubCompiler::GenerateLoadPostInterceptor(
-    Register interceptor_reg,
-    Handle<JSObject> interceptor_holder,
-    Handle<Name> name,
-    LookupResult* lookup) {
-  Handle<JSObject> holder(lookup->holder());
-  if (lookup->IsField()) {
-    FieldIndex field = lookup->GetFieldIndex();
-    if (interceptor_holder.is_identical_to(holder)) {
-      GenerateLoadField(
-          interceptor_reg, holder, field, lookup->representation());
-    } else {
-      // We found FIELD property in prototype chain of interceptor's holder.
-      // Retrieve a field from field's holder.
-      Register reg = HandlerFrontend(
-          IC::CurrentTypeOf(interceptor_holder, isolate()),
-          interceptor_reg, holder, name);
-      GenerateLoadField(
-          reg, holder, field, lookup->representation());
-    }
-  } else {
-    // We found CALLBACKS property in prototype chain of interceptor's
-    // holder.
-    ASSERT(lookup->type() == CALLBACKS);
-    Handle<ExecutableAccessorInfo> callback(
-        ExecutableAccessorInfo::cast(lookup->GetCallbackObject()));
-    ASSERT(callback->getter() != NULL);
-
-    Register reg = CallbackHandlerFrontend(
-        IC::CurrentTypeOf(interceptor_holder, isolate()),
-        interceptor_reg, holder, name, callback);
-    GenerateLoadCallback(reg, callback);
-  }
-}
-
-
-Handle<Code> BaseLoadStoreStubCompiler::CompileMonomorphicIC(
-    Handle<HeapType> type,
-    Handle<Code> handler,
-    Handle<Name> name) {
-  TypeHandleList types(1);
-  CodeHandleList handlers(1);
-  types.Add(type);
-  handlers.Add(handler);
-  Code::StubType stub_type = handler->type();
-  return CompilePolymorphicIC(&types, &handlers, name, stub_type, PROPERTY);
-}
-
-
-Handle<Code> LoadStubCompiler::CompileLoadViaGetter(
-    Handle<HeapType> type,
-    Handle<JSObject> holder,
-    Handle<Name> name,
-    Handle<JSFunction> getter) {
-  HandlerFrontend(type, receiver(), holder, name);
-  GenerateLoadViaGetter(masm(), type, receiver(), getter);
-
-  // Return the generated code.
-  return GetCode(kind(), Code::FAST, name);
-}
-
-
-Handle<Code> StoreStubCompiler::CompileStoreTransition(
-    Handle<JSObject> object,
-    LookupResult* lookup,
-    Handle<Map> transition,
-    Handle<Name> name) {
-  Label miss, slow;
-
-  // Ensure no transitions to deprecated maps are followed.
-  __ CheckMapDeprecated(transition, scratch1(), &miss);
-
-  // Check that we are allowed to write this.
-  if (object->GetPrototype()->IsJSObject()) {
-    Handle<JSObject> holder;
-    // holder == object indicates that no property was found.
-    if (lookup->holder() != *object) {
-      holder = Handle<JSObject>(lookup->holder());
-    } else {
-      // Find the top object.
-      holder = object;
-      do {
-        holder = Handle<JSObject>(JSObject::cast(holder->GetPrototype()));
-      } while (holder->GetPrototype()->IsJSObject());
-    }
-
-    Register holder_reg = HandlerFrontendHeader(
-        IC::CurrentTypeOf(object, isolate()), receiver(), holder, name, &miss);
-
-    // If no property was found, and the holder (the last object in the
-    // prototype chain) is in slow mode, we need to do a negative lookup on the
-    // holder.
-    if (lookup->holder() == *object) {
-      GenerateNegativeHolderLookup(masm(), holder, holder_reg, name, &miss);
-    }
-  }
-
-  GenerateStoreTransition(masm(),
-                          object,
-                          lookup,
-                          transition,
-                          name,
-                          receiver(), this->name(), value(),
-                          scratch1(), scratch2(), scratch3(),
-                          &miss,
-                          &slow);
-
-  // Handle store cache miss.
-  GenerateRestoreName(masm(), &miss, name);
-  TailCallBuiltin(masm(), MissBuiltin(kind()));
-
-  GenerateRestoreName(masm(), &slow, name);
-  TailCallBuiltin(masm(), SlowBuiltin(kind()));
-
-  // Return the generated code.
-  return GetCode(kind(), Code::FAST, name);
-}
-
-
-Handle<Code> StoreStubCompiler::CompileStoreField(Handle<JSObject> object,
-                                                  LookupResult* lookup,
-                                                  Handle<Name> name) {
-  Label miss;
-
-  HandlerFrontendHeader(IC::CurrentTypeOf(object, isolate()),
-                        receiver(), object, name, &miss);
-
-  // Generate store field code.
-  GenerateStoreField(masm(),
-                     object,
-                     lookup,
-                     receiver(), this->name(), value(), scratch1(), scratch2(),
-                     &miss);
-
-  // Handle store cache miss.
-  __ bind(&miss);
-  TailCallBuiltin(masm(), MissBuiltin(kind()));
-
-  // Return the generated code.
-  return GetCode(kind(), Code::FAST, name);
-}
-
-
-Handle<Code> StoreStubCompiler::CompileStoreArrayLength(Handle<JSObject> object,
-                                                        LookupResult* lookup,
-                                                        Handle<Name> name) {
-  // This accepts as a receiver anything JSArray::SetElementsLength accepts
-  // (currently anything except for external arrays which means anything with
-  // elements of FixedArray type).  Value must be a number, but only smis are
-  // accepted as the most common case.
-  Label miss;
-
-  // Check that value is a smi.
-  __ JumpIfNotSmi(value(), &miss);
-
-  // Generate tail call to StoreIC_ArrayLength.
-  GenerateStoreArrayLength();
-
-  // Handle miss case.
-  __ bind(&miss);
-  TailCallBuiltin(masm(), MissBuiltin(kind()));
-
-  // Return the generated code.
-  return GetCode(kind(), Code::FAST, name);
-}
-
-
-Handle<Code> StoreStubCompiler::CompileStoreViaSetter(
-    Handle<JSObject> object,
-    Handle<JSObject> holder,
-    Handle<Name> name,
-    Handle<JSFunction> setter) {
-  Handle<HeapType> type = IC::CurrentTypeOf(object, isolate());
-  HandlerFrontend(type, receiver(), holder, name);
-  GenerateStoreViaSetter(masm(), type, receiver(), setter);
-
-  return GetCode(kind(), Code::FAST, name);
-}
-
-
-Handle<Code> StoreStubCompiler::CompileStoreCallback(
-    Handle<JSObject> object,
-    Handle<JSObject> holder,
-    Handle<Name> name,
-    const CallOptimization& call_optimization) {
-  HandlerFrontend(IC::CurrentTypeOf(object, isolate()),
-                  receiver(), holder, name);
-  Register values[] = { value() };
-  GenerateFastApiCall(
-      masm(), call_optimization, handle(object->map()),
-      receiver(), scratch1(), true, 1, values);
-  // Return the generated code.
-  return GetCode(kind(), Code::FAST, name);
-}
-
-
-Handle<Code> KeyedLoadStubCompiler::CompileLoadElement(
-    Handle<Map> receiver_map) {
-  ElementsKind elements_kind = receiver_map->elements_kind();
-  if (receiver_map->has_fast_elements() ||
-      receiver_map->has_external_array_elements() ||
-      receiver_map->has_fixed_typed_array_elements()) {
-    Handle<Code> stub = KeyedLoadFastElementStub(
-        isolate(),
-        receiver_map->instance_type() == JS_ARRAY_TYPE,
-        elements_kind).GetCode();
-    __ DispatchMap(receiver(), scratch1(), receiver_map, stub, DO_SMI_CHECK);
-  } else {
-    Handle<Code> stub = FLAG_compiled_keyed_dictionary_loads
-        ? KeyedLoadDictionaryElementStub(isolate()).GetCode()
-        : KeyedLoadDictionaryElementPlatformStub(isolate()).GetCode();
-    __ DispatchMap(receiver(), scratch1(), receiver_map, stub, DO_SMI_CHECK);
-  }
-
-  TailCallBuiltin(masm(), Builtins::kKeyedLoadIC_Miss);
-
-  // Return the generated code.
-  return GetICCode(kind(), Code::NORMAL, factory()->empty_string());
-}
-
-
-Handle<Code> KeyedStoreStubCompiler::CompileStoreElement(
-    Handle<Map> receiver_map) {
-  ElementsKind elements_kind = receiver_map->elements_kind();
-  bool is_jsarray = receiver_map->instance_type() == JS_ARRAY_TYPE;
-  Handle<Code> stub;
-  if (receiver_map->has_fast_elements() ||
-      receiver_map->has_external_array_elements() ||
-      receiver_map->has_fixed_typed_array_elements()) {
-    stub = KeyedStoreFastElementStub(
-        isolate(),
-        is_jsarray,
-        elements_kind,
-        store_mode()).GetCode();
-  } else {
-    stub = KeyedStoreElementStub(isolate(),
-                                 is_jsarray,
-                                 elements_kind,
-                                 store_mode()).GetCode();
-  }
-
-  __ DispatchMap(receiver(), scratch1(), receiver_map, stub, DO_SMI_CHECK);
-
-  TailCallBuiltin(masm(), Builtins::kKeyedStoreIC_Miss);
-
-  // Return the generated code.
-  return GetICCode(kind(), Code::NORMAL, factory()->empty_string());
-}
-
-
-#undef __
-
-
-void StubCompiler::TailCallBuiltin(MacroAssembler* masm, Builtins::Name name) {
-  Handle<Code> code(masm->isolate()->builtins()->builtin(name));
-  GenerateTailCall(masm, code);
-}
-
-
-void BaseLoadStoreStubCompiler::JitEvent(Handle<Name> name, Handle<Code> code) {
-#ifdef ENABLE_GDB_JIT_INTERFACE
-  GDBJITInterface::CodeTag tag;
-  if (kind_ == Code::LOAD_IC) {
-    tag = GDBJITInterface::LOAD_IC;
-  } else if (kind_ == Code::KEYED_LOAD_IC) {
-    tag = GDBJITInterface::KEYED_LOAD_IC;
-  } else if (kind_ == Code::STORE_IC) {
-    tag = GDBJITInterface::STORE_IC;
-  } else {
-    tag = GDBJITInterface::KEYED_STORE_IC;
-  }
-  GDBJIT(AddCode(tag, *name, *code));
-#endif
-}
-
-
-void BaseLoadStoreStubCompiler::InitializeRegisters() {
-  if (kind_ == Code::LOAD_IC) {
-    registers_ = LoadStubCompiler::registers();
-  } else if (kind_ == Code::KEYED_LOAD_IC) {
-    registers_ = KeyedLoadStubCompiler::registers();
-  } else if (kind_ == Code::STORE_IC) {
-    registers_ = StoreStubCompiler::registers();
-  } else {
-    registers_ = KeyedStoreStubCompiler::registers();
-  }
-}
-
-
-Handle<Code> BaseLoadStoreStubCompiler::GetICCode(Code::Kind kind,
-                                                  Code::StubType type,
-                                                  Handle<Name> name,
-                                                  InlineCacheState state) {
-  Code::Flags flags = Code::ComputeFlags(kind, state, extra_state(), type);
-  Handle<Code> code = GetCodeWithFlags(flags, name);
-  IC::RegisterWeakMapDependency(code);
-  PROFILE(isolate(), CodeCreateEvent(log_kind(code), *code, *name));
-  JitEvent(name, code);
-  return code;
-}
-
-
-Handle<Code> BaseLoadStoreStubCompiler::GetCode(Code::Kind kind,
-                                                Code::StubType type,
-                                                Handle<Name> name) {
-  ASSERT_EQ(kNoExtraICState, extra_state());
-  Code::Flags flags = Code::ComputeHandlerFlags(kind, type, cache_holder_);
-  Handle<Code> code = GetCodeWithFlags(flags, name);
-  PROFILE(isolate(), CodeCreateEvent(log_kind(code), *code, *name));
-  JitEvent(name, code);
-  return code;
-}
-
-
-void KeyedLoadStubCompiler::CompileElementHandlers(MapHandleList* receiver_maps,
-                                                   CodeHandleList* handlers) {
-  for (int i = 0; i < receiver_maps->length(); ++i) {
-    Handle<Map> receiver_map = receiver_maps->at(i);
-    Handle<Code> cached_stub;
-
-    if ((receiver_map->instance_type() & kNotStringTag) == 0) {
-      cached_stub = isolate()->builtins()->KeyedLoadIC_String();
-    } else if (receiver_map->instance_type() < FIRST_JS_RECEIVER_TYPE) {
-      cached_stub = isolate()->builtins()->KeyedLoadIC_Slow();
-    } else {
-      bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
-      ElementsKind elements_kind = receiver_map->elements_kind();
-
-      if (IsFastElementsKind(elements_kind) ||
-          IsExternalArrayElementsKind(elements_kind) ||
-          IsFixedTypedArrayElementsKind(elements_kind)) {
-        cached_stub =
-            KeyedLoadFastElementStub(isolate(),
-                                     is_js_array,
-                                     elements_kind).GetCode();
-      } else if (elements_kind == SLOPPY_ARGUMENTS_ELEMENTS) {
-        cached_stub = isolate()->builtins()->KeyedLoadIC_SloppyArguments();
-      } else {
-        ASSERT(elements_kind == DICTIONARY_ELEMENTS);
-        cached_stub =
-            KeyedLoadDictionaryElementStub(isolate()).GetCode();
-      }
-    }
-
-    handlers->Add(cached_stub);
-  }
-}
-
-
-Handle<Code> KeyedStoreStubCompiler::CompileStoreElementPolymorphic(
-    MapHandleList* receiver_maps) {
-  // Collect MONOMORPHIC stubs for all |receiver_maps|.
-  CodeHandleList handlers(receiver_maps->length());
-  MapHandleList transitioned_maps(receiver_maps->length());
-  for (int i = 0; i < receiver_maps->length(); ++i) {
-    Handle<Map> receiver_map(receiver_maps->at(i));
-    Handle<Code> cached_stub;
-    Handle<Map> transitioned_map =
-        receiver_map->FindTransitionedMap(receiver_maps);
-
-    // TODO(mvstanton): The code below is doing pessimistic elements
-    // transitions. I would like to stop doing that and rely on Allocation Site
-    // Tracking to do a better job of ensuring the data types are what they need
-    // to be. Not all the elements are in place yet, pessimistic elements
-    // transitions are still important for performance.
-    bool is_js_array = receiver_map->instance_type() == JS_ARRAY_TYPE;
-    ElementsKind elements_kind = receiver_map->elements_kind();
-    if (!transitioned_map.is_null()) {
-      cached_stub = ElementsTransitionAndStoreStub(
-          isolate(),
-          elements_kind,
-          transitioned_map->elements_kind(),
-          is_js_array,
-          store_mode()).GetCode();
-    } else if (receiver_map->instance_type() < FIRST_JS_RECEIVER_TYPE) {
-      cached_stub = isolate()->builtins()->KeyedStoreIC_Slow();
-    } else {
-      if (receiver_map->has_fast_elements() ||
-          receiver_map->has_external_array_elements() ||
-          receiver_map->has_fixed_typed_array_elements()) {
-        cached_stub = KeyedStoreFastElementStub(
-            isolate(),
-            is_js_array,
-            elements_kind,
-            store_mode()).GetCode();
-      } else {
-        cached_stub = KeyedStoreElementStub(
-            isolate(),
-            is_js_array,
-            elements_kind,
-            store_mode()).GetCode();
-      }
-    }
-    ASSERT(!cached_stub.is_null());
-    handlers.Add(cached_stub);
-    transitioned_maps.Add(transitioned_map);
-  }
-  Handle<Code> code =
-      CompileStorePolymorphic(receiver_maps, &handlers, &transitioned_maps);
-  isolate()->counters()->keyed_store_polymorphic_stubs()->Increment();
-  PROFILE(isolate(),
-          CodeCreateEvent(Logger::KEYED_STORE_POLYMORPHIC_IC_TAG, *code, 0));
-  return code;
-}
-
-
-void KeyedStoreStubCompiler::GenerateStoreDictionaryElement(
-    MacroAssembler* masm) {
-  KeyedStoreIC::GenerateSlow(masm);
-}
-
-
-CallOptimization::CallOptimization(LookupResult* lookup) {
-  if (lookup->IsFound() &&
-      lookup->IsCacheable() &&
-      lookup->IsConstantFunction()) {
-    // We only optimize constant function calls.
-    Initialize(Handle<JSFunction>(lookup->GetConstantFunction()));
-  } else {
-    Initialize(Handle<JSFunction>::null());
-  }
-}
-
-
-CallOptimization::CallOptimization(Handle<JSFunction> function) {
-  Initialize(function);
-}
-
-
-Handle<JSObject> CallOptimization::LookupHolderOfExpectedType(
-    Handle<Map> object_map,
-    HolderLookup* holder_lookup) const {
-  ASSERT(is_simple_api_call());
-  if (!object_map->IsJSObjectMap()) {
-    *holder_lookup = kHolderNotFound;
-    return Handle<JSObject>::null();
-  }
-  if (expected_receiver_type_.is_null() ||
-      expected_receiver_type_->IsTemplateFor(*object_map)) {
-    *holder_lookup = kHolderIsReceiver;
-    return Handle<JSObject>::null();
-  }
-  while (true) {
-    if (!object_map->prototype()->IsJSObject()) break;
-    Handle<JSObject> prototype(JSObject::cast(object_map->prototype()));
-    if (!prototype->map()->is_hidden_prototype()) break;
-    object_map = handle(prototype->map());
-    if (expected_receiver_type_->IsTemplateFor(*object_map)) {
-      *holder_lookup = kHolderFound;
-      return prototype;
-    }
-  }
-  *holder_lookup = kHolderNotFound;
-  return Handle<JSObject>::null();
-}
-
-
-bool CallOptimization::IsCompatibleReceiver(Handle<Object> receiver,
-                                            Handle<JSObject> holder) const {
-  ASSERT(is_simple_api_call());
-  if (!receiver->IsJSObject()) return false;
-  Handle<Map> map(JSObject::cast(*receiver)->map());
-  HolderLookup holder_lookup;
-  Handle<JSObject> api_holder =
-      LookupHolderOfExpectedType(map, &holder_lookup);
-  switch (holder_lookup) {
-    case kHolderNotFound:
-      return false;
-    case kHolderIsReceiver:
-      return true;
-    case kHolderFound:
-      if (api_holder.is_identical_to(holder)) return true;
-      // Check if holder is in prototype chain of api_holder.
-      {
-        JSObject* object = *api_holder;
-        while (true) {
-          Object* prototype = object->map()->prototype();
-          if (!prototype->IsJSObject()) return false;
-          if (prototype == *holder) return true;
-          object = JSObject::cast(prototype);
-        }
-      }
-      break;
-  }
-  UNREACHABLE();
-  return false;
-}
-
-
-void CallOptimization::Initialize(Handle<JSFunction> function) {
-  constant_function_ = Handle<JSFunction>::null();
-  is_simple_api_call_ = false;
-  expected_receiver_type_ = Handle<FunctionTemplateInfo>::null();
-  api_call_info_ = Handle<CallHandlerInfo>::null();
-
-  if (function.is_null() || !function->is_compiled()) return;
-
-  constant_function_ = function;
-  AnalyzePossibleApiFunction(function);
-}
-
-
-void CallOptimization::AnalyzePossibleApiFunction(Handle<JSFunction> function) {
-  if (!function->shared()->IsApiFunction()) return;
-  Handle<FunctionTemplateInfo> info(function->shared()->get_api_func_data());
-
-  // Require a C++ callback.
-  if (info->call_code()->IsUndefined()) return;
-  api_call_info_ =
-      Handle<CallHandlerInfo>(CallHandlerInfo::cast(info->call_code()));
-
-  // Accept signatures that either have no restrictions at all or
-  // only have restrictions on the receiver.
-  if (!info->signature()->IsUndefined()) {
-    Handle<SignatureInfo> signature =
-        Handle<SignatureInfo>(SignatureInfo::cast(info->signature()));
-    if (!signature->args()->IsUndefined()) return;
-    if (!signature->receiver()->IsUndefined()) {
-      expected_receiver_type_ =
-          Handle<FunctionTemplateInfo>(
-              FunctionTemplateInfo::cast(signature->receiver()));
-    }
-  }
-
-  is_simple_api_call_ = true;
-}
-
-
-} }  // namespace v8::internal
diff --git a/src/stub-cache.h b/src/stub-cache.h
deleted file mode 100644
index 9f2a87b..0000000
--- a/src/stub-cache.h
+++ /dev/null
@@ -1,834 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_STUB_CACHE_H_
-#define V8_STUB_CACHE_H_
-
-#include "src/allocation.h"
-#include "src/arguments.h"
-#include "src/code-stubs.h"
-#include "src/ic-inl.h"
-#include "src/macro-assembler.h"
-#include "src/objects.h"
-#include "src/zone-inl.h"
-
-namespace v8 {
-namespace internal {
-
-
-// The stub cache is used for megamorphic calls and property accesses.
-// It maps (map, name, type)->Code*
-
-// The design of the table uses the inline cache stubs used for
-// mono-morphic calls. The beauty of this, we do not have to
-// invalidate the cache whenever a prototype map is changed.  The stub
-// validates the map chain as in the mono-morphic case.
-
-
-class CallOptimization;
-class SmallMapList;
-class StubCache;
-
-
-class SCTableReference {
- public:
-  Address address() const { return address_; }
-
- private:
-  explicit SCTableReference(Address address) : address_(address) {}
-
-  Address address_;
-
-  friend class StubCache;
-};
-
-
-class StubCache {
- public:
-  struct Entry {
-    Name* key;
-    Code* value;
-    Map* map;
-  };
-
-  void Initialize();
-
-  Handle<JSObject> StubHolder(Handle<JSObject> receiver,
-                              Handle<JSObject> holder);
-
-  Handle<Code> FindIC(Handle<Name> name,
-                      Handle<Map> stub_holder_map,
-                      Code::Kind kind,
-                      ExtraICState extra_state = kNoExtraICState,
-                      InlineCacheHolderFlag cache_holder = OWN_MAP);
-
-  Handle<Code> FindHandler(Handle<Name> name,
-                           Handle<Map> map,
-                           Code::Kind kind,
-                           InlineCacheHolderFlag cache_holder,
-                           Code::StubType type);
-
-  Handle<Code> ComputeMonomorphicIC(Code::Kind kind,
-                                    Handle<Name> name,
-                                    Handle<HeapType> type,
-                                    Handle<Code> handler,
-                                    ExtraICState extra_ic_state);
-
-  Handle<Code> ComputeLoadNonexistent(Handle<Name> name, Handle<HeapType> type);
-
-  Handle<Code> ComputeKeyedLoadElement(Handle<Map> receiver_map);
-
-  Handle<Code> ComputeKeyedStoreElement(Handle<Map> receiver_map,
-                                        StrictMode strict_mode,
-                                        KeyedAccessStoreMode store_mode);
-
-  // ---
-
-  Handle<Code> ComputeLoad(InlineCacheState ic_state, ExtraICState extra_state);
-  Handle<Code> ComputeStore(InlineCacheState ic_state,
-                            ExtraICState extra_state);
-
-  // ---
-
-  Handle<Code> ComputeCompareNil(Handle<Map> receiver_map,
-                                 CompareNilICStub* stub);
-
-  // ---
-
-  Handle<Code> ComputeLoadElementPolymorphic(MapHandleList* receiver_maps);
-  Handle<Code> ComputeStoreElementPolymorphic(MapHandleList* receiver_maps,
-                                              KeyedAccessStoreMode store_mode,
-                                              StrictMode strict_mode);
-
-  Handle<Code> ComputePolymorphicIC(Code::Kind kind,
-                                    TypeHandleList* types,
-                                    CodeHandleList* handlers,
-                                    int number_of_valid_maps,
-                                    Handle<Name> name,
-                                    ExtraICState extra_ic_state);
-
-  // Finds the Code object stored in the Heap::non_monomorphic_cache().
-  Code* FindPreMonomorphicIC(Code::Kind kind, ExtraICState extra_ic_state);
-
-  // Update cache for entry hash(name, map).
-  Code* Set(Name* name, Map* map, Code* code);
-
-  // Clear the lookup table (@ mark compact collection).
-  void Clear();
-
-  // Collect all maps that match the name and flags.
-  void CollectMatchingMaps(SmallMapList* types,
-                           Handle<Name> name,
-                           Code::Flags flags,
-                           Handle<Context> native_context,
-                           Zone* zone);
-
-  // Generate code for probing the stub cache table.
-  // Arguments extra, extra2 and extra3 may be used to pass additional scratch
-  // registers. Set to no_reg if not needed.
-  void GenerateProbe(MacroAssembler* masm,
-                     Code::Flags flags,
-                     Register receiver,
-                     Register name,
-                     Register scratch,
-                     Register extra,
-                     Register extra2 = no_reg,
-                     Register extra3 = no_reg);
-
-  enum Table {
-    kPrimary,
-    kSecondary
-  };
-
-
-  SCTableReference key_reference(StubCache::Table table) {
-    return SCTableReference(
-        reinterpret_cast<Address>(&first_entry(table)->key));
-  }
-
-
-  SCTableReference map_reference(StubCache::Table table) {
-    return SCTableReference(
-        reinterpret_cast<Address>(&first_entry(table)->map));
-  }
-
-
-  SCTableReference value_reference(StubCache::Table table) {
-    return SCTableReference(
-        reinterpret_cast<Address>(&first_entry(table)->value));
-  }
-
-
-  StubCache::Entry* first_entry(StubCache::Table table) {
-    switch (table) {
-      case StubCache::kPrimary: return StubCache::primary_;
-      case StubCache::kSecondary: return StubCache::secondary_;
-    }
-    UNREACHABLE();
-    return NULL;
-  }
-
-  Isolate* isolate() { return isolate_; }
-  Heap* heap() { return isolate()->heap(); }
-  Factory* factory() { return isolate()->factory(); }
-
-  // These constants describe the structure of the interceptor arguments on the
-  // stack. The arguments are pushed by the (platform-specific)
-  // PushInterceptorArguments and read by LoadPropertyWithInterceptorOnly and
-  // LoadWithInterceptor.
-  static const int kInterceptorArgsNameIndex = 0;
-  static const int kInterceptorArgsInfoIndex = 1;
-  static const int kInterceptorArgsThisIndex = 2;
-  static const int kInterceptorArgsHolderIndex = 3;
-  static const int kInterceptorArgsLength = 4;
-
- private:
-  explicit StubCache(Isolate* isolate);
-
-  // The stub cache has a primary and secondary level.  The two levels have
-  // different hashing algorithms in order to avoid simultaneous collisions
-  // in both caches.  Unlike a probing strategy (quadratic or otherwise) the
-  // update strategy on updates is fairly clear and simple:  Any existing entry
-  // in the primary cache is moved to the secondary cache, and secondary cache
-  // entries are overwritten.
-
-  // Hash algorithm for the primary table.  This algorithm is replicated in
-  // assembler for every architecture.  Returns an index into the table that
-  // is scaled by 1 << kHeapObjectTagSize.
-  static int PrimaryOffset(Name* name, Code::Flags flags, Map* map) {
-    // This works well because the heap object tag size and the hash
-    // shift are equal.  Shifting down the length field to get the
-    // hash code would effectively throw away two bits of the hash
-    // code.
-    STATIC_ASSERT(kHeapObjectTagSize == Name::kHashShift);
-    // Compute the hash of the name (use entire hash field).
-    ASSERT(name->HasHashCode());
-    uint32_t field = name->hash_field();
-    // Using only the low bits in 64-bit mode is unlikely to increase the
-    // risk of collision even if the heap is spread over an area larger than
-    // 4Gb (and not at all if it isn't).
-    uint32_t map_low32bits =
-        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map));
-    // We always set the in_loop bit to zero when generating the lookup code
-    // so do it here too so the hash codes match.
-    uint32_t iflags =
-        (static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup);
-    // Base the offset on a simple combination of name, flags, and map.
-    uint32_t key = (map_low32bits + field) ^ iflags;
-    return key & ((kPrimaryTableSize - 1) << kHeapObjectTagSize);
-  }
-
-  // Hash algorithm for the secondary table.  This algorithm is replicated in
-  // assembler for every architecture.  Returns an index into the table that
-  // is scaled by 1 << kHeapObjectTagSize.
-  static int SecondaryOffset(Name* name, Code::Flags flags, int seed) {
-    // Use the seed from the primary cache in the secondary cache.
-    uint32_t name_low32bits =
-        static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name));
-    // We always set the in_loop bit to zero when generating the lookup code
-    // so do it here too so the hash codes match.
-    uint32_t iflags =
-        (static_cast<uint32_t>(flags) & ~Code::kFlagsNotUsedInLookup);
-    uint32_t key = (seed - name_low32bits) + iflags;
-    return key & ((kSecondaryTableSize - 1) << kHeapObjectTagSize);
-  }
-
-  // Compute the entry for a given offset in exactly the same way as
-  // we do in generated code.  We generate an hash code that already
-  // ends in Name::kHashShift 0s.  Then we multiply it so it is a multiple
-  // of sizeof(Entry).  This makes it easier to avoid making mistakes
-  // in the hashed offset computations.
-  static Entry* entry(Entry* table, int offset) {
-    const int multiplier = sizeof(*table) >> Name::kHashShift;
-    return reinterpret_cast<Entry*>(
-        reinterpret_cast<Address>(table) + offset * multiplier);
-  }
-
-  static const int kPrimaryTableBits = 11;
-  static const int kPrimaryTableSize = (1 << kPrimaryTableBits);
-  static const int kSecondaryTableBits = 9;
-  static const int kSecondaryTableSize = (1 << kSecondaryTableBits);
-
-  Entry primary_[kPrimaryTableSize];
-  Entry secondary_[kSecondaryTableSize];
-  Isolate* isolate_;
-
-  friend class Isolate;
-  friend class SCTableReference;
-
-  DISALLOW_COPY_AND_ASSIGN(StubCache);
-};
-
-
-// ------------------------------------------------------------------------
-
-
-// Support functions for IC stubs for callbacks.
-DECLARE_RUNTIME_FUNCTION(StoreCallbackProperty);
-
-
-// Support functions for IC stubs for interceptors.
-DECLARE_RUNTIME_FUNCTION(LoadPropertyWithInterceptorOnly);
-DECLARE_RUNTIME_FUNCTION(LoadPropertyWithInterceptor);
-DECLARE_RUNTIME_FUNCTION(StoreInterceptorProperty);
-DECLARE_RUNTIME_FUNCTION(KeyedLoadPropertyWithInterceptor);
-
-
-enum PrototypeCheckType { CHECK_ALL_MAPS, SKIP_RECEIVER };
-enum IcCheckType { ELEMENT, PROPERTY };
-
-
-// The stub compilers compile stubs for the stub cache.
-class StubCompiler BASE_EMBEDDED {
- public:
-  explicit StubCompiler(Isolate* isolate,
-                        ExtraICState extra_ic_state = kNoExtraICState)
-      : isolate_(isolate), extra_ic_state_(extra_ic_state),
-        masm_(isolate, NULL, 256) { }
-
-  Handle<Code> CompileLoadInitialize(Code::Flags flags);
-  Handle<Code> CompileLoadPreMonomorphic(Code::Flags flags);
-  Handle<Code> CompileLoadMegamorphic(Code::Flags flags);
-
-  Handle<Code> CompileStoreInitialize(Code::Flags flags);
-  Handle<Code> CompileStorePreMonomorphic(Code::Flags flags);
-  Handle<Code> CompileStoreGeneric(Code::Flags flags);
-  Handle<Code> CompileStoreMegamorphic(Code::Flags flags);
-
-  // Static functions for generating parts of stubs.
-  static void GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
-                                                  int index,
-                                                  Register prototype);
-
-  // Helper function used to check that the dictionary doesn't contain
-  // the property. This function may return false negatives, so miss_label
-  // must always call a backup property check that is complete.
-  // This function is safe to call if the receiver has fast properties.
-  // Name must be unique and receiver must be a heap object.
-  static void GenerateDictionaryNegativeLookup(MacroAssembler* masm,
-                                               Label* miss_label,
-                                               Register receiver,
-                                               Handle<Name> name,
-                                               Register r0,
-                                               Register r1);
-
-  // Generates prototype loading code that uses the objects from the
-  // context we were in when this function was called. If the context
-  // has changed, a jump to miss is performed. This ties the generated
-  // code to a particular context and so must not be used in cases
-  // where the generated code is not allowed to have references to
-  // objects from a context.
-  static void GenerateDirectLoadGlobalFunctionPrototype(MacroAssembler* masm,
-                                                        int index,
-                                                        Register prototype,
-                                                        Label* miss);
-
-  static void GenerateFastPropertyLoad(MacroAssembler* masm,
-                                       Register dst,
-                                       Register src,
-                                       bool inobject,
-                                       int index,
-                                       Representation representation);
-
-  static void GenerateLoadArrayLength(MacroAssembler* masm,
-                                      Register receiver,
-                                      Register scratch,
-                                      Label* miss_label);
-
-  static void GenerateLoadFunctionPrototype(MacroAssembler* masm,
-                                            Register receiver,
-                                            Register scratch1,
-                                            Register scratch2,
-                                            Label* miss_label);
-
-  // Generate code to check that a global property cell is empty. Create
-  // the property cell at compilation time if no cell exists for the
-  // property.
-  static void GenerateCheckPropertyCell(MacroAssembler* masm,
-                                        Handle<JSGlobalObject> global,
-                                        Handle<Name> name,
-                                        Register scratch,
-                                        Label* miss);
-
-  static void TailCallBuiltin(MacroAssembler* masm, Builtins::Name name);
-
-  // Generates code that verifies that the property holder has not changed
-  // (checking maps of objects in the prototype chain for fast and global
-  // objects or doing negative lookup for slow objects, ensures that the
-  // property cells for global objects are still empty) and checks that the map
-  // of the holder has not changed. If necessary the function also generates
-  // code for security check in case of global object holders. Helps to make
-  // sure that the current IC is still valid.
-  //
-  // The scratch and holder registers are always clobbered, but the object
-  // register is only clobbered if it the same as the holder register. The
-  // function returns a register containing the holder - either object_reg or
-  // holder_reg.
-  Register CheckPrototypes(Handle<HeapType> type,
-                           Register object_reg,
-                           Handle<JSObject> holder,
-                           Register holder_reg,
-                           Register scratch1,
-                           Register scratch2,
-                           Handle<Name> name,
-                           Label* miss,
-                           PrototypeCheckType check = CHECK_ALL_MAPS);
-
-  static void GenerateFastApiCall(MacroAssembler* masm,
-                                  const CallOptimization& optimization,
-                                  Handle<Map> receiver_map,
-                                  Register receiver,
-                                  Register scratch,
-                                  bool is_store,
-                                  int argc,
-                                  Register* values);
-
- protected:
-  Handle<Code> GetCodeWithFlags(Code::Flags flags, const char* name);
-  Handle<Code> GetCodeWithFlags(Code::Flags flags, Handle<Name> name);
-
-  ExtraICState extra_state() { return extra_ic_state_; }
-
-  MacroAssembler* masm() { return &masm_; }
-
-  static void LookupPostInterceptor(Handle<JSObject> holder,
-                                    Handle<Name> name,
-                                    LookupResult* lookup);
-
-  Isolate* isolate() { return isolate_; }
-  Heap* heap() { return isolate()->heap(); }
-  Factory* factory() { return isolate()->factory(); }
-
-  static void GenerateTailCall(MacroAssembler* masm, Handle<Code> code);
-
- private:
-  Isolate* isolate_;
-  const ExtraICState extra_ic_state_;
-  MacroAssembler masm_;
-};
-
-
-enum FrontendCheckType { PERFORM_INITIAL_CHECKS, SKIP_INITIAL_CHECKS };
-
-
-class BaseLoadStoreStubCompiler: public StubCompiler {
- public:
-  BaseLoadStoreStubCompiler(Isolate* isolate,
-                            Code::Kind kind,
-                            ExtraICState extra_ic_state = kNoExtraICState,
-                            InlineCacheHolderFlag cache_holder = OWN_MAP)
-      : StubCompiler(isolate, extra_ic_state),
-        kind_(kind),
-        cache_holder_(cache_holder) {
-    InitializeRegisters();
-  }
-  virtual ~BaseLoadStoreStubCompiler() { }
-
-  Handle<Code> CompileMonomorphicIC(Handle<HeapType> type,
-                                    Handle<Code> handler,
-                                    Handle<Name> name);
-
-  Handle<Code> CompilePolymorphicIC(TypeHandleList* types,
-                                    CodeHandleList* handlers,
-                                    Handle<Name> name,
-                                    Code::StubType type,
-                                    IcCheckType check);
-
-  static Builtins::Name MissBuiltin(Code::Kind kind) {
-    switch (kind) {
-      case Code::LOAD_IC: return Builtins::kLoadIC_Miss;
-      case Code::STORE_IC: return Builtins::kStoreIC_Miss;
-      case Code::KEYED_LOAD_IC: return Builtins::kKeyedLoadIC_Miss;
-      case Code::KEYED_STORE_IC: return Builtins::kKeyedStoreIC_Miss;
-      default: UNREACHABLE();
-    }
-    return Builtins::kLoadIC_Miss;
-  }
-
- protected:
-  virtual Register HandlerFrontendHeader(Handle<HeapType> type,
-                                         Register object_reg,
-                                         Handle<JSObject> holder,
-                                         Handle<Name> name,
-                                         Label* miss) = 0;
-
-  virtual void HandlerFrontendFooter(Handle<Name> name, Label* miss) = 0;
-
-  Register HandlerFrontend(Handle<HeapType> type,
-                           Register object_reg,
-                           Handle<JSObject> holder,
-                           Handle<Name> name);
-
-  Handle<Code> GetCode(Code::Kind kind,
-                       Code::StubType type,
-                       Handle<Name> name);
-
-  Handle<Code> GetICCode(Code::Kind kind,
-                         Code::StubType type,
-                         Handle<Name> name,
-                         InlineCacheState state = MONOMORPHIC);
-  Code::Kind kind() { return kind_; }
-
-  Logger::LogEventsAndTags log_kind(Handle<Code> code) {
-    if (!code->is_inline_cache_stub()) return Logger::STUB_TAG;
-    if (kind_ == Code::LOAD_IC) {
-      return code->ic_state() == MONOMORPHIC
-          ? Logger::LOAD_IC_TAG : Logger::LOAD_POLYMORPHIC_IC_TAG;
-    } else if (kind_ == Code::KEYED_LOAD_IC) {
-      return code->ic_state() == MONOMORPHIC
-          ? Logger::KEYED_LOAD_IC_TAG : Logger::KEYED_LOAD_POLYMORPHIC_IC_TAG;
-    } else if (kind_ == Code::STORE_IC) {
-      return code->ic_state() == MONOMORPHIC
-          ? Logger::STORE_IC_TAG : Logger::STORE_POLYMORPHIC_IC_TAG;
-    } else {
-      return code->ic_state() == MONOMORPHIC
-          ? Logger::KEYED_STORE_IC_TAG : Logger::KEYED_STORE_POLYMORPHIC_IC_TAG;
-    }
-  }
-  void JitEvent(Handle<Name> name, Handle<Code> code);
-
-  Register receiver() { return registers_[0]; }
-  Register name()     { return registers_[1]; }
-  Register scratch1() { return registers_[2]; }
-  Register scratch2() { return registers_[3]; }
-  Register scratch3() { return registers_[4]; }
-
-  void InitializeRegisters();
-
-  bool IncludesNumberType(TypeHandleList* types);
-
-  Code::Kind kind_;
-  InlineCacheHolderFlag cache_holder_;
-  Register* registers_;
-};
-
-
-class LoadStubCompiler: public BaseLoadStoreStubCompiler {
- public:
-  LoadStubCompiler(Isolate* isolate,
-                   ExtraICState extra_ic_state = kNoExtraICState,
-                   InlineCacheHolderFlag cache_holder = OWN_MAP,
-                   Code::Kind kind = Code::LOAD_IC)
-      : BaseLoadStoreStubCompiler(isolate, kind, extra_ic_state,
-                                  cache_holder) { }
-  virtual ~LoadStubCompiler() { }
-
-  Handle<Code> CompileLoadField(Handle<HeapType> type,
-                                Handle<JSObject> holder,
-                                Handle<Name> name,
-                                FieldIndex index,
-                                Representation representation);
-
-  Handle<Code> CompileLoadCallback(Handle<HeapType> type,
-                                   Handle<JSObject> holder,
-                                   Handle<Name> name,
-                                   Handle<ExecutableAccessorInfo> callback);
-
-  Handle<Code> CompileLoadCallback(Handle<HeapType> type,
-                                   Handle<JSObject> holder,
-                                   Handle<Name> name,
-                                   const CallOptimization& call_optimization);
-
-  Handle<Code> CompileLoadConstant(Handle<HeapType> type,
-                                   Handle<JSObject> holder,
-                                   Handle<Name> name,
-                                   Handle<Object> value);
-
-  Handle<Code> CompileLoadInterceptor(Handle<HeapType> type,
-                                      Handle<JSObject> holder,
-                                      Handle<Name> name);
-
-  Handle<Code> CompileLoadViaGetter(Handle<HeapType> type,
-                                    Handle<JSObject> holder,
-                                    Handle<Name> name,
-                                    Handle<JSFunction> getter);
-
-  static void GenerateLoadViaGetter(MacroAssembler* masm,
-                                    Handle<HeapType> type,
-                                    Register receiver,
-                                    Handle<JSFunction> getter);
-
-  static void GenerateLoadViaGetterForDeopt(MacroAssembler* masm) {
-    GenerateLoadViaGetter(
-        masm, Handle<HeapType>::null(), no_reg, Handle<JSFunction>());
-  }
-
-  Handle<Code> CompileLoadNonexistent(Handle<HeapType> type,
-                                      Handle<JSObject> last,
-                                      Handle<Name> name);
-
-  Handle<Code> CompileLoadGlobal(Handle<HeapType> type,
-                                 Handle<GlobalObject> holder,
-                                 Handle<PropertyCell> cell,
-                                 Handle<Name> name,
-                                 bool is_dont_delete);
-
- protected:
-  ContextualMode contextual_mode() {
-    return LoadIC::GetContextualMode(extra_state());
-  }
-
-  virtual Register HandlerFrontendHeader(Handle<HeapType> type,
-                                         Register object_reg,
-                                         Handle<JSObject> holder,
-                                         Handle<Name> name,
-                                         Label* miss);
-
-  virtual void HandlerFrontendFooter(Handle<Name> name, Label* miss);
-
-  Register CallbackHandlerFrontend(Handle<HeapType> type,
-                                   Register object_reg,
-                                   Handle<JSObject> holder,
-                                   Handle<Name> name,
-                                   Handle<Object> callback);
-  void NonexistentHandlerFrontend(Handle<HeapType> type,
-                                  Handle<JSObject> last,
-                                  Handle<Name> name);
-
-  void GenerateLoadField(Register reg,
-                         Handle<JSObject> holder,
-                         FieldIndex field,
-                         Representation representation);
-  void GenerateLoadConstant(Handle<Object> value);
-  void GenerateLoadCallback(Register reg,
-                            Handle<ExecutableAccessorInfo> callback);
-  void GenerateLoadCallback(const CallOptimization& call_optimization,
-                            Handle<Map> receiver_map);
-  void GenerateLoadInterceptor(Register holder_reg,
-                               Handle<Object> object,
-                               Handle<JSObject> holder,
-                               LookupResult* lookup,
-                               Handle<Name> name);
-  void GenerateLoadPostInterceptor(Register reg,
-                                   Handle<JSObject> interceptor_holder,
-                                   Handle<Name> name,
-                                   LookupResult* lookup);
-
- private:
-  static Register* registers();
-  Register scratch4() { return registers_[5]; }
-  friend class BaseLoadStoreStubCompiler;
-};
-
-
-class KeyedLoadStubCompiler: public LoadStubCompiler {
- public:
-  KeyedLoadStubCompiler(Isolate* isolate,
-                        ExtraICState extra_ic_state = kNoExtraICState,
-                        InlineCacheHolderFlag cache_holder = OWN_MAP)
-      : LoadStubCompiler(isolate, extra_ic_state, cache_holder,
-                         Code::KEYED_LOAD_IC) { }
-
-  Handle<Code> CompileLoadElement(Handle<Map> receiver_map);
-
-  void CompileElementHandlers(MapHandleList* receiver_maps,
-                              CodeHandleList* handlers);
-
-  static void GenerateLoadDictionaryElement(MacroAssembler* masm);
-
- private:
-  static Register* registers();
-  friend class BaseLoadStoreStubCompiler;
-};
-
-
-class StoreStubCompiler: public BaseLoadStoreStubCompiler {
- public:
-  StoreStubCompiler(Isolate* isolate,
-                    ExtraICState extra_ic_state,
-                    Code::Kind kind = Code::STORE_IC)
-      : BaseLoadStoreStubCompiler(isolate, kind, extra_ic_state) {}
-
-  virtual ~StoreStubCompiler() { }
-
-  Handle<Code> CompileStoreTransition(Handle<JSObject> object,
-                                      LookupResult* lookup,
-                                      Handle<Map> transition,
-                                      Handle<Name> name);
-
-  Handle<Code> CompileStoreField(Handle<JSObject> object,
-                                 LookupResult* lookup,
-                                 Handle<Name> name);
-
-  Handle<Code> CompileStoreArrayLength(Handle<JSObject> object,
-                                       LookupResult* lookup,
-                                       Handle<Name> name);
-
-  void GenerateStoreArrayLength();
-
-  void GenerateNegativeHolderLookup(MacroAssembler* masm,
-                                    Handle<JSObject> holder,
-                                    Register holder_reg,
-                                    Handle<Name> name,
-                                    Label* miss);
-
-  void GenerateStoreTransition(MacroAssembler* masm,
-                               Handle<JSObject> object,
-                               LookupResult* lookup,
-                               Handle<Map> transition,
-                               Handle<Name> name,
-                               Register receiver_reg,
-                               Register name_reg,
-                               Register value_reg,
-                               Register scratch1,
-                               Register scratch2,
-                               Register scratch3,
-                               Label* miss_label,
-                               Label* slow);
-
-  void GenerateStoreField(MacroAssembler* masm,
-                          Handle<JSObject> object,
-                          LookupResult* lookup,
-                          Register receiver_reg,
-                          Register name_reg,
-                          Register value_reg,
-                          Register scratch1,
-                          Register scratch2,
-                          Label* miss_label);
-
-  Handle<Code> CompileStoreCallback(Handle<JSObject> object,
-                                    Handle<JSObject> holder,
-                                    Handle<Name> name,
-                                    Handle<ExecutableAccessorInfo> callback);
-
-  Handle<Code> CompileStoreCallback(Handle<JSObject> object,
-                                    Handle<JSObject> holder,
-                                    Handle<Name> name,
-                                    const CallOptimization& call_optimization);
-
-  static void GenerateStoreViaSetter(MacroAssembler* masm,
-                                     Handle<HeapType> type,
-                                     Register receiver,
-                                     Handle<JSFunction> setter);
-
-  static void GenerateStoreViaSetterForDeopt(MacroAssembler* masm) {
-    GenerateStoreViaSetter(
-        masm, Handle<HeapType>::null(), no_reg, Handle<JSFunction>());
-  }
-
-  Handle<Code> CompileStoreViaSetter(Handle<JSObject> object,
-                                     Handle<JSObject> holder,
-                                     Handle<Name> name,
-                                     Handle<JSFunction> setter);
-
-  Handle<Code> CompileStoreInterceptor(Handle<JSObject> object,
-                                       Handle<Name> name);
-
-  static Builtins::Name SlowBuiltin(Code::Kind kind) {
-    switch (kind) {
-      case Code::STORE_IC: return Builtins::kStoreIC_Slow;
-      case Code::KEYED_STORE_IC: return Builtins::kKeyedStoreIC_Slow;
-      default: UNREACHABLE();
-    }
-    return Builtins::kStoreIC_Slow;
-  }
-
- protected:
-  virtual Register HandlerFrontendHeader(Handle<HeapType> type,
-                                         Register object_reg,
-                                         Handle<JSObject> holder,
-                                         Handle<Name> name,
-                                         Label* miss);
-
-  virtual void HandlerFrontendFooter(Handle<Name> name, Label* miss);
-  void GenerateRestoreName(MacroAssembler* masm,
-                           Label* label,
-                           Handle<Name> name);
-
- private:
-  static Register* registers();
-  static Register value();
-  friend class BaseLoadStoreStubCompiler;
-};
-
-
-class KeyedStoreStubCompiler: public StoreStubCompiler {
- public:
-  KeyedStoreStubCompiler(Isolate* isolate,
-                         ExtraICState extra_ic_state)
-      : StoreStubCompiler(isolate, extra_ic_state, Code::KEYED_STORE_IC) {}
-
-  Handle<Code> CompileStoreElement(Handle<Map> receiver_map);
-
-  Handle<Code> CompileStorePolymorphic(MapHandleList* receiver_maps,
-                                       CodeHandleList* handler_stubs,
-                                       MapHandleList* transitioned_maps);
-
-  Handle<Code> CompileStoreElementPolymorphic(MapHandleList* receiver_maps);
-
-  static void GenerateStoreDictionaryElement(MacroAssembler* masm);
-
- private:
-  static Register* registers();
-
-  KeyedAccessStoreMode store_mode() {
-    return KeyedStoreIC::GetKeyedAccessStoreMode(extra_state());
-  }
-
-  Register transition_map() { return scratch1(); }
-
-  friend class BaseLoadStoreStubCompiler;
-};
-
-
-// Holds information about possible function call optimizations.
-class CallOptimization BASE_EMBEDDED {
- public:
-  explicit CallOptimization(LookupResult* lookup);
-
-  explicit CallOptimization(Handle<JSFunction> function);
-
-  bool is_constant_call() const {
-    return !constant_function_.is_null();
-  }
-
-  Handle<JSFunction> constant_function() const {
-    ASSERT(is_constant_call());
-    return constant_function_;
-  }
-
-  bool is_simple_api_call() const {
-    return is_simple_api_call_;
-  }
-
-  Handle<FunctionTemplateInfo> expected_receiver_type() const {
-    ASSERT(is_simple_api_call());
-    return expected_receiver_type_;
-  }
-
-  Handle<CallHandlerInfo> api_call_info() const {
-    ASSERT(is_simple_api_call());
-    return api_call_info_;
-  }
-
-  enum HolderLookup {
-    kHolderNotFound,
-    kHolderIsReceiver,
-    kHolderFound
-  };
-  Handle<JSObject> LookupHolderOfExpectedType(
-      Handle<Map> receiver_map,
-      HolderLookup* holder_lookup) const;
-
-  // Check if the api holder is between the receiver and the holder.
-  bool IsCompatibleReceiver(Handle<Object> receiver,
-                            Handle<JSObject> holder) const;
-
- private:
-  void Initialize(Handle<JSFunction> function);
-
-  // Determines whether the given function can be called using the
-  // fast api call builtin.
-  void AnalyzePossibleApiFunction(Handle<JSFunction> function);
-
-  Handle<JSFunction> constant_function_;
-  bool is_simple_api_call_;
-  Handle<FunctionTemplateInfo> expected_receiver_type_;
-  Handle<CallHandlerInfo> api_call_info_;
-};
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_STUB_CACHE_H_
diff --git a/src/sweeper-thread.cc b/src/sweeper-thread.cc
deleted file mode 100644
index ea2553d..0000000
--- a/src/sweeper-thread.cc
+++ /dev/null
@@ -1,86 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/sweeper-thread.h"
-
-#include "src/v8.h"
-
-#include "src/isolate.h"
-#include "src/v8threads.h"
-
-namespace v8 {
-namespace internal {
-
-static const int kSweeperThreadStackSize = 64 * KB;
-
-SweeperThread::SweeperThread(Isolate* isolate)
-     : Thread(Thread::Options("v8:SweeperThread", kSweeperThreadStackSize)),
-       isolate_(isolate),
-       heap_(isolate->heap()),
-       collector_(heap_->mark_compact_collector()),
-       start_sweeping_semaphore_(0),
-       end_sweeping_semaphore_(0),
-       stop_semaphore_(0) {
-  ASSERT(!FLAG_job_based_sweeping);
-  base::NoBarrier_Store(&stop_thread_, static_cast<base::AtomicWord>(false));
-}
-
-
-void SweeperThread::Run() {
-  Isolate::SetIsolateThreadLocals(isolate_, NULL);
-  DisallowHeapAllocation no_allocation;
-  DisallowHandleAllocation no_handles;
-  DisallowHandleDereference no_deref;
-
-  while (true) {
-    start_sweeping_semaphore_.Wait();
-
-    if (base::Acquire_Load(&stop_thread_)) {
-      stop_semaphore_.Signal();
-      return;
-    }
-
-    collector_->SweepInParallel(heap_->old_data_space());
-    collector_->SweepInParallel(heap_->old_pointer_space());
-    end_sweeping_semaphore_.Signal();
-  }
-}
-
-
-void SweeperThread::Stop() {
-  base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(true));
-  start_sweeping_semaphore_.Signal();
-  stop_semaphore_.Wait();
-  Join();
-}
-
-
-void SweeperThread::StartSweeping() {
-  start_sweeping_semaphore_.Signal();
-}
-
-
-void SweeperThread::WaitForSweeperThread() {
-  end_sweeping_semaphore_.Wait();
-}
-
-
-bool SweeperThread::SweepingCompleted() {
-  bool value = end_sweeping_semaphore_.WaitFor(TimeDelta::FromSeconds(0));
-  if (value) {
-    end_sweeping_semaphore_.Signal();
-  }
-  return value;
-}
-
-
-int SweeperThread::NumberOfThreads(int max_available) {
-  if (!FLAG_concurrent_sweeping && !FLAG_parallel_sweeping) return 0;
-  if (FLAG_sweeper_threads > 0) return FLAG_sweeper_threads;
-  if (FLAG_concurrent_sweeping) return max_available - 1;
-  ASSERT(FLAG_parallel_sweeping);
-  return max_available;
-}
-
-} }  // namespace v8::internal
diff --git a/src/sweeper-thread.h b/src/sweeper-thread.h
deleted file mode 100644
index 02cace6..0000000
--- a/src/sweeper-thread.h
+++ /dev/null
@@ -1,45 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_SWEEPER_THREAD_H_
-#define V8_SWEEPER_THREAD_H_
-
-#include "src/base/atomicops.h"
-#include "src/flags.h"
-#include "src/platform.h"
-#include "src/utils.h"
-
-#include "src/spaces.h"
-
-#include "src/heap.h"
-
-namespace v8 {
-namespace internal {
-
-class SweeperThread : public Thread {
- public:
-  explicit SweeperThread(Isolate* isolate);
-  ~SweeperThread() {}
-
-  void Run();
-  void Stop();
-  void StartSweeping();
-  void WaitForSweeperThread();
-  bool SweepingCompleted();
-
-  static int NumberOfThreads(int max_available);
-
- private:
-  Isolate* isolate_;
-  Heap* heap_;
-  MarkCompactCollector* collector_;
-  Semaphore start_sweeping_semaphore_;
-  Semaphore end_sweeping_semaphore_;
-  Semaphore stop_semaphore_;
-  volatile base::AtomicWord stop_thread_;
-};
-
-} }  // namespace v8::internal
-
-#endif  // V8_SWEEPER_THREAD_H_
diff --git a/src/symbol.js b/src/symbol.js
index 1c48302..ce3327b 100644
--- a/src/symbol.js
+++ b/src/symbol.js
@@ -82,7 +82,6 @@
 
 //-------------------------------------------------------------------
 
-var symbolCreate = InternalSymbol("Symbol.create");
 var symbolHasInstance = InternalSymbol("Symbol.hasInstance");
 var symbolIsConcatSpreadable = InternalSymbol("Symbol.isConcatSpreadable");
 var symbolIsRegExp = InternalSymbol("Symbol.isRegExp");
@@ -100,12 +99,12 @@
   %FunctionSetPrototype($Symbol, new $Object());
 
   InstallConstants($Symbol, $Array(
-    "create", symbolCreate,
-    "hasInstance", symbolHasInstance,
-    "isConcatSpreadable", symbolIsConcatSpreadable,
-    "isRegExp", symbolIsRegExp,
+    // TODO(rossberg): expose when implemented.
+    // "hasInstance", symbolHasInstance,
+    // "isConcatSpreadable", symbolIsConcatSpreadable,
+    // "isRegExp", symbolIsRegExp,
     "iterator", symbolIterator,
-    "toStringTag", symbolToStringTag,
+    // "toStringTag", symbolToStringTag,
     "unscopables", symbolUnscopables
   ));
   InstallFunctions($Symbol, DONT_ENUM, $Array(
@@ -113,7 +112,7 @@
     "keyFor", SymbolKeyFor
   ));
 
-  %SetProperty($Symbol.prototype, "constructor", $Symbol, DONT_ENUM);
+  %AddNamedProperty($Symbol.prototype, "constructor", $Symbol, DONT_ENUM);
   InstallFunctions($Symbol.prototype, DONT_ENUM, $Array(
     "toString", SymbolToString,
     "valueOf", SymbolValueOf
diff --git a/src/test/DEPS b/src/test/DEPS
new file mode 100644
index 0000000..13855ec
--- /dev/null
+++ b/src/test/DEPS
@@ -0,0 +1,3 @@
+include_rules = [
+  "+include/libplatform/libplatform.h"
+]
diff --git a/src/test/run-all-unittests.cc b/src/test/run-all-unittests.cc
new file mode 100644
index 0000000..8c361dd
--- /dev/null
+++ b/src/test/run-all-unittests.cc
@@ -0,0 +1,45 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "include/libplatform/libplatform.h"
+#include "include/v8.h"
+#include "src/base/compiler-specific.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace {
+
+class DefaultPlatformEnvironment FINAL : public ::testing::Environment {
+ public:
+  DefaultPlatformEnvironment() : platform_(NULL) {}
+  ~DefaultPlatformEnvironment() {}
+
+  virtual void SetUp() OVERRIDE {
+    EXPECT_EQ(NULL, platform_);
+    platform_ = v8::platform::CreateDefaultPlatform();
+    ASSERT_TRUE(platform_ != NULL);
+    v8::V8::InitializePlatform(platform_);
+    ASSERT_TRUE(v8::V8::Initialize());
+  }
+
+  virtual void TearDown() OVERRIDE {
+    ASSERT_TRUE(platform_ != NULL);
+    v8::V8::Dispose();
+    v8::V8::ShutdownPlatform();
+    delete platform_;
+    platform_ = NULL;
+  }
+
+ private:
+  v8::Platform* platform_;
+};
+
+}  // namespace
+
+
+int main(int argc, char** argv) {
+  testing::InitGoogleMock(&argc, argv);
+  testing::AddGlobalTestEnvironment(new DefaultPlatformEnvironment);
+  v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
+  return RUN_ALL_TESTS();
+}
diff --git a/src/test/test-utils.cc b/src/test/test-utils.cc
new file mode 100644
index 0000000..1041465
--- /dev/null
+++ b/src/test/test-utils.cc
@@ -0,0 +1,58 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/test/test-utils.h"
+
+#include "src/isolate-inl.h"
+
+namespace v8 {
+
+// static
+Isolate* TestWithIsolate::isolate_ = NULL;
+
+
+TestWithIsolate::TestWithIsolate()
+    : isolate_scope_(isolate()), handle_scope_(isolate()) {}
+
+
+TestWithIsolate::~TestWithIsolate() {}
+
+
+// static
+void TestWithIsolate::SetUpTestCase() {
+  Test::SetUpTestCase();
+  EXPECT_EQ(NULL, isolate_);
+  isolate_ = v8::Isolate::New();
+  EXPECT_TRUE(isolate_ != NULL);
+}
+
+
+// static
+void TestWithIsolate::TearDownTestCase() {
+  ASSERT_TRUE(isolate_ != NULL);
+  isolate_->Dispose();
+  isolate_ = NULL;
+  Test::TearDownTestCase();
+}
+
+
+TestWithContext::TestWithContext()
+    : context_(Context::New(isolate())), context_scope_(context_) {}
+
+
+TestWithContext::~TestWithContext() {}
+
+
+namespace internal {
+
+TestWithIsolate::~TestWithIsolate() {}
+
+
+Factory* TestWithIsolate::factory() const { return isolate()->factory(); }
+
+
+TestWithZone::~TestWithZone() {}
+
+}  // namespace internal
+}  // namespace v8
diff --git a/src/test/test-utils.h b/src/test/test-utils.h
new file mode 100644
index 0000000..05d1ea6
--- /dev/null
+++ b/src/test/test-utils.h
@@ -0,0 +1,86 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TEST_TEST_UTILS_H_
+#define V8_TEST_TEST_UTILS_H_
+
+#include "include/v8.h"
+#include "src/base/macros.h"
+#include "src/zone.h"
+#include "testing/gtest-support.h"
+
+namespace v8 {
+
+class TestWithIsolate : public ::testing::Test {
+ public:
+  TestWithIsolate();
+  virtual ~TestWithIsolate();
+
+  Isolate* isolate() const { return isolate_; }
+
+  static void SetUpTestCase();
+  static void TearDownTestCase();
+
+ private:
+  static Isolate* isolate_;
+  Isolate::Scope isolate_scope_;
+  HandleScope handle_scope_;
+
+  DISALLOW_COPY_AND_ASSIGN(TestWithIsolate);
+};
+
+
+class TestWithContext : public virtual TestWithIsolate {
+ public:
+  TestWithContext();
+  virtual ~TestWithContext();
+
+  const Local<Context>& context() const { return context_; }
+
+ private:
+  Local<Context> context_;
+  Context::Scope context_scope_;
+
+  DISALLOW_COPY_AND_ASSIGN(TestWithContext);
+};
+
+
+namespace internal {
+
+// Forward declarations.
+class Factory;
+
+
+class TestWithIsolate : public virtual ::v8::TestWithIsolate {
+ public:
+  TestWithIsolate() {}
+  virtual ~TestWithIsolate();
+
+  Factory* factory() const;
+  Isolate* isolate() const {
+    return reinterpret_cast<Isolate*>(::v8::TestWithIsolate::isolate());
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(TestWithIsolate);
+};
+
+
+class TestWithZone : public TestWithIsolate {
+ public:
+  TestWithZone() : zone_(isolate()) {}
+  virtual ~TestWithZone();
+
+  Zone* zone() { return &zone_; }
+
+ private:
+  Zone zone_;
+
+  DISALLOW_COPY_AND_ASSIGN(TestWithZone);
+};
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_TEST_TEST_UTILS_H_
diff --git a/src/test/test.gyp b/src/test/test.gyp
new file mode 100644
index 0000000..f4c6a5e
--- /dev/null
+++ b/src/test/test.gyp
@@ -0,0 +1,71 @@
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+  'variables': {
+    'v8_code': 1,
+  },
+  'includes': ['../../build/toolchain.gypi', '../../build/features.gypi'],
+  'targets': [
+    {
+      'target_name': 'run-all-unittests',
+      'type': 'static_library',
+      'variables': {
+        'optimize': 'max',
+      },
+      'dependencies': [
+        '../../testing/gmock.gyp:gmock',
+        '../../testing/gtest.gyp:gtest',
+        '../../tools/gyp/v8.gyp:v8_libplatform',
+      ],
+      'include_dirs': [
+        '../..',
+      ],
+      'sources': [  ### gcmole(all) ###
+        'run-all-unittests.cc',
+        'test-utils.h',
+        'test-utils.cc',
+      ],
+      'export_dependent_settings': [
+        '../../testing/gmock.gyp:gmock',
+        '../../testing/gtest.gyp:gtest',
+      ],
+      'conditions': [
+        ['component=="shared_library"', {
+          # compiler-unittests can't be built against a shared library, so we
+          # need to depend on the underlying static target in that case.
+          'conditions': [
+            ['v8_use_snapshot=="true"', {
+              'dependencies': ['../../tools/gyp/v8.gyp:v8_snapshot'],
+            },
+            {
+              'dependencies': [
+                '../../tools/gyp/v8.gyp:v8_nosnapshot',
+              ],
+            }],
+          ],
+        }, {
+          'dependencies': ['../../tools/gyp/v8.gyp:v8'],
+        }],
+        ['os_posix == 1', {
+          # TODO(svenpanne): This is a temporary work-around to fix the warnings
+          # that show up because we use -std=gnu++0x instead of -std=c++11.
+          'cflags!': [
+            '-pedantic',
+          ],
+          'direct_dependent_settings': {
+            'cflags!': [
+              '-pedantic',
+            ],
+          },
+        }],
+        ['want_separate_host_toolset==1', {
+          'toolsets': ['host', 'target'],
+        }, {
+          'toolsets': ['target'],
+        }],
+      ],
+    },
+  ],
+}
diff --git a/src/third_party/kernel/tools/perf/util/jitdump.h b/src/third_party/kernel/tools/perf/util/jitdump.h
new file mode 100644
index 0000000..85d51b7
--- /dev/null
+++ b/src/third_party/kernel/tools/perf/util/jitdump.h
@@ -0,0 +1,83 @@
+#ifndef JITDUMP_H
+#define JITDUMP_H
+
+#include <sys/time.h>
+#include <time.h>
+#include <stdint.h>
+
+/* JiTD */
+#define JITHEADER_MAGIC 0x4A695444
+#define JITHEADER_MAGIC_SW 0x4454694A
+
+#define PADDING_8ALIGNED(x) ((((x) + 7) & 7) ^ 7)
+
+#define JITHEADER_VERSION 1
+
+struct jitheader {
+  uint32_t magic;      /* characters "jItD" */
+  uint32_t version;    /* header version */
+  uint32_t total_size; /* total size of header */
+  uint32_t elf_mach;   /* elf mach target */
+  uint32_t pad1;       /* reserved */
+  uint32_t pid;        /* JIT process id */
+  uint64_t timestamp;  /* timestamp */
+};
+
+enum jit_record_type {
+  JIT_CODE_LOAD = 0,
+  JIT_CODE_MOVE = 1,
+  JIT_CODE_DEBUG_INFO = 2,
+  JIT_CODE_CLOSE = 3,
+  JIT_CODE_MAX
+};
+
+/* record prefix (mandatory in each record) */
+struct jr_prefix {
+  uint32_t id;
+  uint32_t total_size;
+  uint64_t timestamp;
+};
+
+struct jr_code_load {
+  struct jr_prefix p;
+
+  uint32_t pid;
+  uint32_t tid;
+  uint64_t vma;
+  uint64_t code_addr;
+  uint64_t code_size;
+  uint64_t code_index;
+};
+
+struct jr_code_close {
+  struct jr_prefix p;
+};
+
+struct jr_code_move {
+  struct jr_prefix p;
+
+  uint32_t pid;
+  uint32_t tid;
+  uint64_t vma;
+  uint64_t old_code_addr;
+  uint64_t new_code_addr;
+  uint64_t code_size;
+  uint64_t code_index;
+};
+
+struct jr_code_debug_info {
+  struct jr_prefix p;
+
+  uint64_t code_addr;
+  uint64_t nr_entry;
+};
+
+union jr_entry {
+  struct jr_code_debug_info info;
+  struct jr_code_close close;
+  struct jr_code_load load;
+  struct jr_code_move move;
+  struct jr_prefix prefix;
+};
+
+#endif /* !JITDUMP_H */
diff --git a/src/third_party/vtune/DEPS b/src/third_party/vtune/DEPS
new file mode 100644
index 0000000..adbe86e
--- /dev/null
+++ b/src/third_party/vtune/DEPS
@@ -0,0 +1,3 @@
+include_rules = [
+  "+../../../include",
+]
diff --git a/src/third_party/vtune/v8-vtune.h b/src/third_party/vtune/v8-vtune.h
index c60b303..a7e5116 100644
--- a/src/third_party/vtune/v8-vtune.h
+++ b/src/third_party/vtune/v8-vtune.h
@@ -58,9 +58,11 @@
 #ifndef V8_VTUNE_H_
 #define V8_VTUNE_H_
 
+#include "../../../include/v8.h"
+
 namespace vTune {
 
-void InitializeVtuneForV8();
+void InitializeVtuneForV8(v8::Isolate::CreateParams& params);
 
 }  // namespace vTune
 
diff --git a/src/third_party/vtune/vtune-jit.cc b/src/third_party/vtune/vtune-jit.cc
index 023dd18..e489d6e 100644
--- a/src/third_party/vtune/vtune-jit.cc
+++ b/src/third_party/vtune/vtune-jit.cc
@@ -192,13 +192,12 @@
         jmethod.method_size = static_cast<unsigned int>(event->code_len);
         jmethod.method_name = temp_method_name;
 
-        Handle<Script> script = event->script;
+        Handle<UnboundScript> script = event->script;
 
         if (*script != NULL) {
           // Get the source file name and set it to jmethod.source_file_name
-         if ((*script->GetUnboundScript()->GetScriptName())->IsString()) {
-            Handle<String> script_name =
-                script->GetUnboundScript()->GetScriptName()->ToString();
+          if ((*script->GetScriptName())->IsString()) {
+            Handle<String> script_name = script->GetScriptName()->ToString();
             temp_file_name = new char[script_name->Utf8Length() + 1];
             script_name->WriteUtf8(temp_file_name);
             jmethod.source_file_name = temp_file_name;
@@ -225,7 +224,7 @@
               jmethod.line_number_table[index].Offset =
                   static_cast<unsigned int>(Iter->pc_);
               jmethod.line_number_table[index++].LineNumber =
-                  script->GetUnboundScript()->GetLineNumber(Iter->pos_)+1;
+                  script->GetLineNumber(Iter->pos_) + 1;
             }
             GetEntries()->erase(event->code_start);
           }
@@ -272,13 +271,10 @@
 
 }  // namespace internal
 
-void InitializeVtuneForV8() {
-  if (v8::V8::Initialize()) {
-    v8::V8::SetFlagsFromString("--nocompact_code_space",
-                              (int)strlen("--nocompact_code_space"));
-    v8::V8::SetJitCodeEventHandler(v8::kJitCodeEventDefault,
-        vTune::internal::VTUNEJITInterface::event_handler);
-  }
+void InitializeVtuneForV8(v8::Isolate::CreateParams& params) {
+  v8::V8::SetFlagsFromString("--nocompact_code_space",
+                             (int)strlen("--nocompact_code_space"));
+  params.code_event_handler = vTune::internal::VTUNEJITInterface::event_handler;
 }
 
 }  // namespace vTune
diff --git a/src/token.h b/src/token.h
index 12ae424..9c719b8 100644
--- a/src/token.h
+++ b/src/token.h
@@ -5,7 +5,7 @@
 #ifndef V8_TOKEN_H_
 #define V8_TOKEN_H_
 
-#include "src/checks.h"
+#include "src/base/logging.h"
 
 namespace v8 {
 namespace internal {
@@ -25,138 +25,143 @@
 
 #define IGNORE_TOKEN(name, string, precedence)
 
-#define TOKEN_LIST(T, K)                                                \
-  /* End of source indicator. */                                        \
-  T(EOS, "EOS", 0)                                                      \
-                                                                        \
-  /* Punctuators (ECMA-262, section 7.7, page 15). */                   \
-  T(LPAREN, "(", 0)                                                     \
-  T(RPAREN, ")", 0)                                                     \
-  T(LBRACK, "[", 0)                                                     \
-  T(RBRACK, "]", 0)                                                     \
-  T(LBRACE, "{", 0)                                                     \
-  T(RBRACE, "}", 0)                                                     \
-  T(COLON, ":", 0)                                                      \
-  T(SEMICOLON, ";", 0)                                                  \
-  T(PERIOD, ".", 0)                                                     \
-  T(CONDITIONAL, "?", 3)                                                \
-  T(INC, "++", 0)                                                       \
-  T(DEC, "--", 0)                                                       \
-                                                                        \
-  /* Assignment operators. */                                           \
-  /* IsAssignmentOp() and Assignment::is_compound() relies on */        \
-  /* this block of enum values being contiguous and sorted in the */    \
-  /* same order! */                                                     \
-  T(INIT_VAR, "=init_var", 2)  /* AST-use only. */                      \
-  T(INIT_LET, "=init_let", 2)  /* AST-use only. */                      \
-  T(INIT_CONST, "=init_const", 2)  /* AST-use only. */                  \
-  T(INIT_CONST_LEGACY, "=init_const_legacy", 2)  /* AST-use only. */    \
-  T(ASSIGN, "=", 2)                                                     \
-  T(ASSIGN_BIT_OR, "|=", 2)                                             \
-  T(ASSIGN_BIT_XOR, "^=", 2)                                            \
-  T(ASSIGN_BIT_AND, "&=", 2)                                            \
-  T(ASSIGN_SHL, "<<=", 2)                                               \
-  T(ASSIGN_SAR, ">>=", 2)                                               \
-  T(ASSIGN_SHR, ">>>=", 2)                                              \
-  T(ASSIGN_ADD, "+=", 2)                                                \
-  T(ASSIGN_SUB, "-=", 2)                                                \
-  T(ASSIGN_MUL, "*=", 2)                                                \
-  T(ASSIGN_DIV, "/=", 2)                                                \
-  T(ASSIGN_MOD, "%=", 2)                                                \
-                                                                        \
-  /* Binary operators sorted by precedence. */                          \
-  /* IsBinaryOp() relies on this block of enum values */                \
-  /* being contiguous and sorted in the same order! */                  \
-  T(COMMA, ",", 1)                                                      \
-  T(OR, "||", 4)                                                        \
-  T(AND, "&&", 5)                                                       \
-  T(BIT_OR, "|", 6)                                                     \
-  T(BIT_XOR, "^", 7)                                                    \
-  T(BIT_AND, "&", 8)                                                    \
-  T(SHL, "<<", 11)                                                      \
-  T(SAR, ">>", 11)                                                      \
-  T(SHR, ">>>", 11)                                                     \
-  T(ROR, "rotate right", 11)   /* only used by Crankshaft */            \
-  T(ADD, "+", 12)                                                       \
-  T(SUB, "-", 12)                                                       \
-  T(MUL, "*", 13)                                                       \
-  T(DIV, "/", 13)                                                       \
-  T(MOD, "%", 13)                                                       \
-                                                                        \
-  /* Compare operators sorted by precedence. */                         \
-  /* IsCompareOp() relies on this block of enum values */               \
-  /* being contiguous and sorted in the same order! */                  \
-  T(EQ, "==", 9)                                                        \
-  T(NE, "!=", 9)                                                        \
-  T(EQ_STRICT, "===", 9)                                                \
-  T(NE_STRICT, "!==", 9)                                                \
-  T(LT, "<", 10)                                                        \
-  T(GT, ">", 10)                                                        \
-  T(LTE, "<=", 10)                                                      \
-  T(GTE, ">=", 10)                                                      \
-  K(INSTANCEOF, "instanceof", 10)                                       \
-  K(IN, "in", 10)                                                       \
-                                                                        \
-  /* Unary operators. */                                                \
-  /* IsUnaryOp() relies on this block of enum values */                 \
-  /* being contiguous and sorted in the same order! */                  \
-  T(NOT, "!", 0)                                                        \
-  T(BIT_NOT, "~", 0)                                                    \
-  K(DELETE, "delete", 0)                                                \
-  K(TYPEOF, "typeof", 0)                                                \
-  K(VOID, "void", 0)                                                    \
-                                                                        \
-  /* Keywords (ECMA-262, section 7.5.2, page 13). */                    \
-  K(BREAK, "break", 0)                                                  \
-  K(CASE, "case", 0)                                                    \
-  K(CATCH, "catch", 0)                                                  \
-  K(CONTINUE, "continue", 0)                                            \
-  K(DEBUGGER, "debugger", 0)                                            \
-  K(DEFAULT, "default", 0)                                              \
-  /* DELETE */                                                          \
-  K(DO, "do", 0)                                                        \
-  K(ELSE, "else", 0)                                                    \
-  K(FINALLY, "finally", 0)                                              \
-  K(FOR, "for", 0)                                                      \
-  K(FUNCTION, "function", 0)                                            \
-  K(IF, "if", 0)                                                        \
-  /* IN */                                                              \
-  /* INSTANCEOF */                                                      \
-  K(NEW, "new", 0)                                                      \
-  K(RETURN, "return", 0)                                                \
-  K(SWITCH, "switch", 0)                                                \
-  K(THIS, "this", 0)                                                    \
-  K(THROW, "throw", 0)                                                  \
-  K(TRY, "try", 0)                                                      \
-  /* TYPEOF */                                                          \
-  K(VAR, "var", 0)                                                      \
-  /* VOID */                                                            \
-  K(WHILE, "while", 0)                                                  \
-  K(WITH, "with", 0)                                                    \
-                                                                        \
-  /* Literals (ECMA-262, section 7.8, page 16). */                      \
-  K(NULL_LITERAL, "null", 0)                                            \
-  K(TRUE_LITERAL, "true", 0)                                            \
-  K(FALSE_LITERAL, "false", 0)                                          \
-  T(NUMBER, NULL, 0)                                                    \
-  T(STRING, NULL, 0)                                                    \
-                                                                        \
-  /* Identifiers (not keywords or future reserved words). */            \
-  T(IDENTIFIER, NULL, 0)                                                \
-                                                                        \
-  /* Future reserved words (ECMA-262, section 7.6.1.2). */              \
-  T(FUTURE_RESERVED_WORD, NULL, 0)                                      \
-  T(FUTURE_STRICT_RESERVED_WORD, NULL, 0)                               \
-  K(CONST, "const", 0)                                                  \
-  K(EXPORT, "export", 0)                                                \
-  K(IMPORT, "import", 0)                                                \
-  K(LET, "let", 0)                                                      \
-  K(YIELD, "yield", 0)                                                  \
-                                                                        \
-  /* Illegal token - not able to scan. */                               \
-  T(ILLEGAL, "ILLEGAL", 0)                                              \
-                                                                        \
-  /* Scanner-internal use only. */                                      \
+#define TOKEN_LIST(T, K)                                             \
+  /* End of source indicator. */                                     \
+  T(EOS, "EOS", 0)                                                   \
+                                                                     \
+  /* Punctuators (ECMA-262, section 7.7, page 15). */                \
+  T(LPAREN, "(", 0)                                                  \
+  T(RPAREN, ")", 0)                                                  \
+  T(LBRACK, "[", 0)                                                  \
+  T(RBRACK, "]", 0)                                                  \
+  T(LBRACE, "{", 0)                                                  \
+  T(RBRACE, "}", 0)                                                  \
+  T(COLON, ":", 0)                                                   \
+  T(SEMICOLON, ";", 0)                                               \
+  T(PERIOD, ".", 0)                                                  \
+  T(CONDITIONAL, "?", 3)                                             \
+  T(INC, "++", 0)                                                    \
+  T(DEC, "--", 0)                                                    \
+  T(ARROW, "=>", 0)                                                  \
+                                                                     \
+  /* Assignment operators. */                                        \
+  /* IsAssignmentOp() and Assignment::is_compound() relies on */     \
+  /* this block of enum values being contiguous and sorted in the */ \
+  /* same order! */                                                  \
+  T(INIT_VAR, "=init_var", 2)                   /* AST-use only. */  \
+  T(INIT_LET, "=init_let", 2)                   /* AST-use only. */  \
+  T(INIT_CONST, "=init_const", 2)               /* AST-use only. */  \
+  T(INIT_CONST_LEGACY, "=init_const_legacy", 2) /* AST-use only. */  \
+  T(ASSIGN, "=", 2)                                                  \
+  T(ASSIGN_BIT_OR, "|=", 2)                                          \
+  T(ASSIGN_BIT_XOR, "^=", 2)                                         \
+  T(ASSIGN_BIT_AND, "&=", 2)                                         \
+  T(ASSIGN_SHL, "<<=", 2)                                            \
+  T(ASSIGN_SAR, ">>=", 2)                                            \
+  T(ASSIGN_SHR, ">>>=", 2)                                           \
+  T(ASSIGN_ADD, "+=", 2)                                             \
+  T(ASSIGN_SUB, "-=", 2)                                             \
+  T(ASSIGN_MUL, "*=", 2)                                             \
+  T(ASSIGN_DIV, "/=", 2)                                             \
+  T(ASSIGN_MOD, "%=", 2)                                             \
+                                                                     \
+  /* Binary operators sorted by precedence. */                       \
+  /* IsBinaryOp() relies on this block of enum values */             \
+  /* being contiguous and sorted in the same order! */               \
+  T(COMMA, ",", 1)                                                   \
+  T(OR, "||", 4)                                                     \
+  T(AND, "&&", 5)                                                    \
+  T(BIT_OR, "|", 6)                                                  \
+  T(BIT_XOR, "^", 7)                                                 \
+  T(BIT_AND, "&", 8)                                                 \
+  T(SHL, "<<", 11)                                                   \
+  T(SAR, ">>", 11)                                                   \
+  T(SHR, ">>>", 11)                                                  \
+  T(ROR, "rotate right", 11) /* only used by Crankshaft */           \
+  T(ADD, "+", 12)                                                    \
+  T(SUB, "-", 12)                                                    \
+  T(MUL, "*", 13)                                                    \
+  T(DIV, "/", 13)                                                    \
+  T(MOD, "%", 13)                                                    \
+                                                                     \
+  /* Compare operators sorted by precedence. */                      \
+  /* IsCompareOp() relies on this block of enum values */            \
+  /* being contiguous and sorted in the same order! */               \
+  T(EQ, "==", 9)                                                     \
+  T(NE, "!=", 9)                                                     \
+  T(EQ_STRICT, "===", 9)                                             \
+  T(NE_STRICT, "!==", 9)                                             \
+  T(LT, "<", 10)                                                     \
+  T(GT, ">", 10)                                                     \
+  T(LTE, "<=", 10)                                                   \
+  T(GTE, ">=", 10)                                                   \
+  K(INSTANCEOF, "instanceof", 10)                                    \
+  K(IN, "in", 10)                                                    \
+                                                                     \
+  /* Unary operators. */                                             \
+  /* IsUnaryOp() relies on this block of enum values */              \
+  /* being contiguous and sorted in the same order! */               \
+  T(NOT, "!", 0)                                                     \
+  T(BIT_NOT, "~", 0)                                                 \
+  K(DELETE, "delete", 0)                                             \
+  K(TYPEOF, "typeof", 0)                                             \
+  K(VOID, "void", 0)                                                 \
+                                                                     \
+  /* Keywords (ECMA-262, section 7.5.2, page 13). */                 \
+  K(BREAK, "break", 0)                                               \
+  K(CASE, "case", 0)                                                 \
+  K(CATCH, "catch", 0)                                               \
+  K(CONTINUE, "continue", 0)                                         \
+  K(DEBUGGER, "debugger", 0)                                         \
+  K(DEFAULT, "default", 0)                                           \
+  /* DELETE */                                                       \
+  K(DO, "do", 0)                                                     \
+  K(ELSE, "else", 0)                                                 \
+  K(FINALLY, "finally", 0)                                           \
+  K(FOR, "for", 0)                                                   \
+  K(FUNCTION, "function", 0)                                         \
+  K(IF, "if", 0)                                                     \
+  /* IN */                                                           \
+  /* INSTANCEOF */                                                   \
+  K(NEW, "new", 0)                                                   \
+  K(RETURN, "return", 0)                                             \
+  K(SWITCH, "switch", 0)                                             \
+  K(THIS, "this", 0)                                                 \
+  K(THROW, "throw", 0)                                               \
+  K(TRY, "try", 0)                                                   \
+  /* TYPEOF */                                                       \
+  K(VAR, "var", 0)                                                   \
+  /* VOID */                                                         \
+  K(WHILE, "while", 0)                                               \
+  K(WITH, "with", 0)                                                 \
+                                                                     \
+  /* Literals (ECMA-262, section 7.8, page 16). */                   \
+  K(NULL_LITERAL, "null", 0)                                         \
+  K(TRUE_LITERAL, "true", 0)                                         \
+  K(FALSE_LITERAL, "false", 0)                                       \
+  T(NUMBER, NULL, 0)                                                 \
+  T(STRING, NULL, 0)                                                 \
+                                                                     \
+  /* Identifiers (not keywords or future reserved words). */         \
+  T(IDENTIFIER, NULL, 0)                                             \
+                                                                     \
+  /* Future reserved words (ECMA-262, section 7.6.1.2). */           \
+  T(FUTURE_RESERVED_WORD, NULL, 0)                                   \
+  T(FUTURE_STRICT_RESERVED_WORD, NULL, 0)                            \
+  K(CLASS, "class", 0)                                               \
+  K(CONST, "const", 0)                                               \
+  K(EXPORT, "export", 0)                                             \
+  K(EXTENDS, "extends", 0)                                           \
+  K(IMPORT, "import", 0)                                             \
+  K(LET, "let", 0)                                                   \
+  K(STATIC, "static", 0)                                             \
+  K(YIELD, "yield", 0)                                               \
+  K(SUPER, "super", 0)                                               \
+                                                                     \
+  /* Illegal token - not able to scan. */                            \
+  T(ILLEGAL, "ILLEGAL", 0)                                           \
+                                                                     \
+  /* Scanner-internal use only. */                                   \
   T(WHITESPACE, NULL, 0)
 
 
@@ -173,7 +178,7 @@
   // Returns a string corresponding to the C++ token name
   // (e.g. "LT" for the token LT).
   static const char* Name(Value tok) {
-    ASSERT(tok < NUM_TOKENS);  // tok is unsigned
+    DCHECK(tok < NUM_TOKENS);  // tok is unsigned
     return name_[tok];
   }
 
@@ -216,7 +221,7 @@
   }
 
   static Value NegateCompareOp(Value op) {
-    ASSERT(IsArithmeticCompareOp(op));
+    DCHECK(IsArithmeticCompareOp(op));
     switch (op) {
       case EQ: return NE;
       case NE: return EQ;
@@ -233,7 +238,7 @@
   }
 
   static Value ReverseCompareOp(Value op) {
-    ASSERT(IsArithmeticCompareOp(op));
+    DCHECK(IsArithmeticCompareOp(op));
     switch (op) {
       case EQ: return EQ;
       case NE: return NE;
@@ -269,14 +274,14 @@
   // (.e., "<" for the token LT) or NULL if the token doesn't
   // have a (unique) string (e.g. an IDENTIFIER).
   static const char* String(Value tok) {
-    ASSERT(tok < NUM_TOKENS);  // tok is unsigned.
+    DCHECK(tok < NUM_TOKENS);  // tok is unsigned.
     return string_[tok];
   }
 
   // Returns the precedence > 0 for binary and compare
   // operators; returns 0 otherwise.
   static int Precedence(Value tok) {
-    ASSERT(tok < NUM_TOKENS);  // tok is unsigned.
+    DCHECK(tok < NUM_TOKENS);  // tok is unsigned.
     return precedence_[tok];
   }
 
diff --git a/src/transitions-inl.h b/src/transitions-inl.h
index 2387803..a16eb44 100644
--- a/src/transitions-inl.h
+++ b/src/transitions-inl.h
@@ -28,7 +28,7 @@
 
 
 TransitionArray* TransitionArray::cast(Object* object) {
-  ASSERT(object->IsTransitionArray());
+  DCHECK(object->IsTransitionArray());
   return reinterpret_cast<TransitionArray*>(object);
 }
 
@@ -59,7 +59,7 @@
 
 
 FixedArray* TransitionArray::GetPrototypeTransitions() {
-  ASSERT(IsFullTransitionArray());
+  DCHECK(IsFullTransitionArray());
   Object* prototype_transitions = get(kPrototypeTransitionsIndex);
   return FixedArray::cast(prototype_transitions);
 }
@@ -67,8 +67,8 @@
 
 void TransitionArray::SetPrototypeTransitions(FixedArray* transitions,
                                               WriteBarrierMode mode) {
-  ASSERT(IsFullTransitionArray());
-  ASSERT(transitions->IsFixedArray());
+  DCHECK(IsFullTransitionArray());
+  DCHECK(transitions->IsFixedArray());
   Heap* heap = GetHeap();
   WRITE_FIELD(this, kPrototypeTransitionsOffset, transitions);
   CONDITIONAL_WRITE_BARRIER(
@@ -83,8 +83,8 @@
 
 
 Object** TransitionArray::GetKeySlot(int transition_number) {
-  ASSERT(!IsSimpleTransition());
-  ASSERT(transition_number < number_of_transitions());
+  DCHECK(!IsSimpleTransition());
+  DCHECK(transition_number < number_of_transitions());
   return RawFieldOfElementAt(ToKeyIndex(transition_number));
 }
 
@@ -96,34 +96,34 @@
     Name* key = target->instance_descriptors()->GetKey(descriptor);
     return key;
   }
-  ASSERT(transition_number < number_of_transitions());
+  DCHECK(transition_number < number_of_transitions());
   return Name::cast(get(ToKeyIndex(transition_number)));
 }
 
 
 void TransitionArray::SetKey(int transition_number, Name* key) {
-  ASSERT(!IsSimpleTransition());
-  ASSERT(transition_number < number_of_transitions());
+  DCHECK(!IsSimpleTransition());
+  DCHECK(transition_number < number_of_transitions());
   set(ToKeyIndex(transition_number), key);
 }
 
 
 Map* TransitionArray::GetTarget(int transition_number) {
   if (IsSimpleTransition()) {
-    ASSERT(transition_number == kSimpleTransitionIndex);
+    DCHECK(transition_number == kSimpleTransitionIndex);
     return Map::cast(get(kSimpleTransitionTarget));
   }
-  ASSERT(transition_number < number_of_transitions());
+  DCHECK(transition_number < number_of_transitions());
   return Map::cast(get(ToTargetIndex(transition_number)));
 }
 
 
 void TransitionArray::SetTarget(int transition_number, Map* value) {
   if (IsSimpleTransition()) {
-    ASSERT(transition_number == kSimpleTransitionIndex);
+    DCHECK(transition_number == kSimpleTransitionIndex);
     return set(kSimpleTransitionTarget, value);
   }
-  ASSERT(transition_number < number_of_transitions());
+  DCHECK(transition_number < number_of_transitions());
   set(ToTargetIndex(transition_number), value);
 }
 
diff --git a/src/transitions.cc b/src/transitions.cc
index 6ac1ab0..96ed870 100644
--- a/src/transitions.cc
+++ b/src/transitions.cc
@@ -64,7 +64,7 @@
 
 Handle<TransitionArray> TransitionArray::ExtendToFullTransitionArray(
     Handle<Map> containing_map) {
-  ASSERT(!containing_map->transitions()->IsFullTransitionArray());
+  DCHECK(!containing_map->transitions()->IsFullTransitionArray());
   int nof = containing_map->transitions()->number_of_transitions();
 
   // A transition array may shrink during GC.
@@ -72,7 +72,7 @@
   DisallowHeapAllocation no_gc;
   int new_nof = containing_map->transitions()->number_of_transitions();
   if (new_nof != nof) {
-    ASSERT(new_nof == 0);
+    DCHECK(new_nof == 0);
     result->Shrink(ToKeyIndex(0));
   } else if (nof == 1) {
     result->NoIncrementalWriteBarrierCopyFrom(
@@ -104,11 +104,11 @@
   // The map's transition array may grown smaller during the allocation above as
   // it was weakly traversed, though it is guaranteed not to disappear. Trim the
   // result copy if needed, and recompute variables.
-  ASSERT(map->HasTransitionArray());
+  DCHECK(map->HasTransitionArray());
   DisallowHeapAllocation no_gc;
   TransitionArray* array = map->transitions();
   if (array->number_of_transitions() != number_of_transitions) {
-    ASSERT(array->number_of_transitions() < number_of_transitions);
+    DCHECK(array->number_of_transitions() < number_of_transitions);
 
     number_of_transitions = array->number_of_transitions();
     new_size = number_of_transitions;
diff --git a/src/transitions.h b/src/transitions.h
index ec99c8b..21c02ac 100644
--- a/src/transitions.h
+++ b/src/transitions.h
@@ -5,11 +5,11 @@
 #ifndef V8_TRANSITIONS_H_
 #define V8_TRANSITIONS_H_
 
+#include "src/checks.h"
 #include "src/elements-kind.h"
-#include "src/heap.h"
+#include "src/heap/heap.h"
 #include "src/isolate.h"
 #include "src/objects.h"
-#include "src/v8checks.h"
 
 namespace v8 {
 namespace internal {
@@ -140,10 +140,7 @@
 
 #ifdef OBJECT_PRINT
   // Print all the transitions.
-  inline void PrintTransitions() {
-    PrintTransitions(stdout);
-  }
-  void PrintTransitions(FILE* out);
+  void PrintTransitions(OStream& os);  // NOLINT
 #endif
 
 #ifdef DEBUG
diff --git a/src/trig-table.h b/src/trig-table.h
deleted file mode 100644
index 7332152..0000000
--- a/src/trig-table.h
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_TRIG_TABLE_H_
-#define V8_TRIG_TABLE_H_
-
-
-namespace v8 {
-namespace internal {
-
-class TrigonometricLookupTable : public AllStatic {
- public:
-  // Casting away const-ness to use as argument for typed array constructor.
-  static void* sin_table() {
-    return const_cast<double*>(&kSinTable[0]);
-  }
-
-  static void* cos_x_interval_table() {
-    return const_cast<double*>(&kCosXIntervalTable[0]);
-  }
-
-  static double samples_over_pi_half() { return kSamplesOverPiHalf; }
-  static int samples() { return kSamples; }
-  static int table_num_bytes() { return kTableSize * sizeof(*kSinTable); }
-  static int table_size() { return kTableSize; }
-
- private:
-  static const double kSinTable[];
-  static const double kCosXIntervalTable[];
-  static const int kSamples;
-  static const int kTableSize;
-  static const double kSamplesOverPiHalf;
-};
-
-} }  // namespace v8::internal
-
-#endif  // V8_TRIG_TABLE_H_
diff --git a/src/type-feedback-vector-inl.h b/src/type-feedback-vector-inl.h
new file mode 100644
index 0000000..43e768e
--- /dev/null
+++ b/src/type-feedback-vector-inl.h
@@ -0,0 +1,45 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TYPE_FEEDBACK_VECTOR_INL_H_
+#define V8_TYPE_FEEDBACK_VECTOR_INL_H_
+
+#include "src/type-feedback-vector.h"
+
+namespace v8 {
+namespace internal {
+
+Handle<Object> TypeFeedbackVector::UninitializedSentinel(Isolate* isolate) {
+  return isolate->factory()->uninitialized_symbol();
+}
+
+
+Handle<Object> TypeFeedbackVector::MegamorphicSentinel(Isolate* isolate) {
+  return isolate->factory()->megamorphic_symbol();
+}
+
+
+Handle<Object> TypeFeedbackVector::PremonomorphicSentinel(Isolate* isolate) {
+  return isolate->factory()->megamorphic_symbol();
+}
+
+
+Handle<Object> TypeFeedbackVector::GenericSentinel(Isolate* isolate) {
+  return isolate->factory()->generic_symbol();
+}
+
+
+Handle<Object> TypeFeedbackVector::MonomorphicArraySentinel(
+    Isolate* isolate, ElementsKind elements_kind) {
+  return Handle<Object>(Smi::FromInt(static_cast<int>(elements_kind)), isolate);
+}
+
+
+Object* TypeFeedbackVector::RawUninitializedSentinel(Heap* heap) {
+  return heap->uninitialized_symbol();
+}
+}
+}  // namespace v8::internal
+
+#endif  // V8_TYPE_FEEDBACK_VECTOR_INL_H_
diff --git a/src/type-feedback-vector.cc b/src/type-feedback-vector.cc
new file mode 100644
index 0000000..a3fe070
--- /dev/null
+++ b/src/type-feedback-vector.cc
@@ -0,0 +1,22 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/objects.h"
+#include "src/type-feedback-vector-inl.h"
+
+namespace v8 {
+namespace internal {
+
+// static
+Handle<TypeFeedbackVector> TypeFeedbackVector::Copy(
+    Isolate* isolate, Handle<TypeFeedbackVector> vector) {
+  Handle<TypeFeedbackVector> result;
+  result = Handle<TypeFeedbackVector>::cast(
+      isolate->factory()->CopyFixedArray(Handle<FixedArray>::cast(vector)));
+  return result;
+}
+}
+}  // namespace v8::internal
diff --git a/src/type-feedback-vector.h b/src/type-feedback-vector.h
new file mode 100644
index 0000000..b6fadba
--- /dev/null
+++ b/src/type-feedback-vector.h
@@ -0,0 +1,55 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TYPE_FEEDBACK_VECTOR_H_
+#define V8_TYPE_FEEDBACK_VECTOR_H_
+
+#include "src/checks.h"
+#include "src/elements-kind.h"
+#include "src/heap/heap.h"
+#include "src/isolate.h"
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+
+class TypeFeedbackVector : public FixedArray {
+ public:
+  // Casting.
+  static TypeFeedbackVector* cast(Object* obj) {
+    DCHECK(obj->IsTypeFeedbackVector());
+    return reinterpret_cast<TypeFeedbackVector*>(obj);
+  }
+
+  static Handle<TypeFeedbackVector> Copy(Isolate* isolate,
+                                         Handle<TypeFeedbackVector> vector);
+
+  // The object that indicates an uninitialized cache.
+  static inline Handle<Object> UninitializedSentinel(Isolate* isolate);
+
+  // The object that indicates a megamorphic state.
+  static inline Handle<Object> MegamorphicSentinel(Isolate* isolate);
+
+  // The object that indicates a premonomorphic state.
+  static inline Handle<Object> PremonomorphicSentinel(Isolate* isolate);
+
+  // The object that indicates a generic state.
+  static inline Handle<Object> GenericSentinel(Isolate* isolate);
+
+  // The object that indicates a monomorphic state of Array with
+  // ElementsKind
+  static inline Handle<Object> MonomorphicArraySentinel(
+      Isolate* isolate, ElementsKind elements_kind);
+
+  // A raw version of the uninitialized sentinel that's safe to read during
+  // garbage collection (e.g., for patching the cache).
+  static inline Object* RawUninitializedSentinel(Heap* heap);
+
+ private:
+  DISALLOW_IMPLICIT_CONSTRUCTORS(TypeFeedbackVector);
+};
+}
+}  // namespace v8::internal
+
+#endif  // V8_TRANSITIONS_H_
diff --git a/src/type-info.cc b/src/type-info.cc
index 45ac1a3..cf3950f 100644
--- a/src/type-info.cc
+++ b/src/type-info.cc
@@ -7,31 +7,28 @@
 #include "src/ast.h"
 #include "src/code-stubs.h"
 #include "src/compiler.h"
-#include "src/ic.h"
+#include "src/ic/ic.h"
+#include "src/ic/stub-cache.h"
 #include "src/macro-assembler.h"
-#include "src/stub-cache.h"
 #include "src/type-info.h"
 
-#include "src/ic-inl.h"
 #include "src/objects-inl.h"
 
 namespace v8 {
 namespace internal {
 
 
-TypeFeedbackOracle::TypeFeedbackOracle(Handle<Code> code,
-                                       Handle<FixedArray> feedback_vector,
-                                       Handle<Context> native_context,
-                                       Zone* zone)
-    : native_context_(native_context),
-      zone_(zone) {
+TypeFeedbackOracle::TypeFeedbackOracle(
+    Handle<Code> code, Handle<TypeFeedbackVector> feedback_vector,
+    Handle<Context> native_context, Zone* zone)
+    : native_context_(native_context), zone_(zone) {
   BuildDictionary(code);
-  ASSERT(dictionary_->IsDictionary());
+  DCHECK(dictionary_->IsDictionary());
   // We make a copy of the feedback vector because a GC could clear
   // the type feedback info contained therein.
   // TODO(mvstanton): revisit the decision to copy when we weakly
   // traverse the feedback vector at GC time.
-  feedback_vector_ = isolate()->factory()->CopyFixedArray(feedback_vector);
+  feedback_vector_ = TypeFeedbackVector::Copy(isolate(), feedback_vector);
 }
 
 
@@ -56,7 +53,7 @@
 
 
 Handle<Object> TypeFeedbackOracle::GetInfo(int slot) {
-  ASSERT(slot >= 0 && slot < feedback_vector_->length());
+  DCHECK(slot >= 0 && slot < feedback_vector_->length());
   Object* obj = feedback_vector_->get(slot);
   if (!obj->IsJSFunction() ||
       !CanRetainOtherContext(JSFunction::cast(obj), *native_context_)) {
@@ -112,8 +109,9 @@
 byte TypeFeedbackOracle::ForInType(int feedback_vector_slot) {
   Handle<Object> value = GetInfo(feedback_vector_slot);
   return value.is_identical_to(
-      TypeFeedbackInfo::UninitializedSentinel(isolate()))
-      ? ForInStatement::FAST_FOR_IN : ForInStatement::SLOW_FOR_IN;
+             TypeFeedbackVector::UninitializedSentinel(isolate()))
+             ? ForInStatement::FAST_FOR_IN
+             : ForInStatement::SLOW_FOR_IN;
 }
 
 
@@ -146,7 +144,7 @@
     return Handle<JSFunction>::cast(info);
   }
 
-  ASSERT(info->IsAllocationSite());
+  DCHECK(info->IsAllocationSite());
   return Handle<JSFunction>(isolate()->native_context()->array_function());
 }
 
@@ -175,16 +173,6 @@
 }
 
 
-bool TypeFeedbackOracle::LoadIsStub(TypeFeedbackId id, ICStub* stub) {
-  Handle<Object> object = GetInfo(id);
-  if (!object->IsCode()) return false;
-  Handle<Code> code = Handle<Code>::cast(object);
-  if (!code->is_load_stub()) return false;
-  if (code->ic_state() != MONOMORPHIC) return false;
-  return stub->Describes(*code);
-}
-
-
 void TypeFeedbackOracle::CompareType(TypeFeedbackId id,
                                      Type** left_type,
                                      Type** right_type,
@@ -200,16 +188,17 @@
   Handle<Map> map;
   Map* raw_map = code->FindFirstMap();
   if (raw_map != NULL) {
-    if (Map::CurrentMapForDeprecated(handle(raw_map)).ToHandle(&map) &&
+    if (Map::TryUpdate(handle(raw_map)).ToHandle(&map) &&
         CanRetainOtherContext(*map, *native_context_)) {
       map = Handle<Map>::null();
     }
   }
 
   if (code->is_compare_ic_stub()) {
-    int stub_minor_key = code->stub_info();
-    CompareIC::StubInfoToType(
-        stub_minor_key, left_type, right_type, combined_type, map, zone());
+    CompareICStub stub(code->stub_key(), isolate());
+    *left_type = CompareICState::StateToType(zone(), stub.left());
+    *right_type = CompareICState::StateToType(zone(), stub.right());
+    *combined_type = CompareICState::StateToType(zone(), stub.state(), map);
   } else if (code->is_compare_nil_ic_stub()) {
     CompareNilICStub stub(isolate(), code->extra_ic_state());
     *combined_type = stub.GetType(zone(), map);
@@ -229,17 +218,17 @@
   if (!object->IsCode()) {
     // For some binary ops we don't have ICs, e.g. Token::COMMA, but for the
     // operations covered by the BinaryOpIC we should always have them.
-    ASSERT(op < BinaryOpIC::State::FIRST_TOKEN ||
-           op > BinaryOpIC::State::LAST_TOKEN);
+    DCHECK(op < BinaryOpICState::FIRST_TOKEN ||
+           op > BinaryOpICState::LAST_TOKEN);
     *left = *right = *result = Type::None(zone());
     *fixed_right_arg = Maybe<int>();
     *allocation_site = Handle<AllocationSite>::null();
     return;
   }
   Handle<Code> code = Handle<Code>::cast(object);
-  ASSERT_EQ(Code::BINARY_OP_IC, code->kind());
-  BinaryOpIC::State state(isolate(), code->extra_ic_state());
-  ASSERT_EQ(op, state.op());
+  DCHECK_EQ(Code::BINARY_OP_IC, code->kind());
+  BinaryOpICState state(isolate(), code->extra_ic_state());
+  DCHECK_EQ(op, state.op());
 
   *left = state.GetLeftType(zone());
   *right = state.GetRightType(zone());
@@ -259,22 +248,18 @@
   Handle<Object> object = GetInfo(id);
   if (!object->IsCode()) return Type::None(zone());
   Handle<Code> code = Handle<Code>::cast(object);
-  ASSERT_EQ(Code::BINARY_OP_IC, code->kind());
-  BinaryOpIC::State state(isolate(), code->extra_ic_state());
+  DCHECK_EQ(Code::BINARY_OP_IC, code->kind());
+  BinaryOpICState state(isolate(), code->extra_ic_state());
   return state.GetLeftType(zone());
 }
 
 
-void TypeFeedbackOracle::PropertyReceiverTypes(
-    TypeFeedbackId id, Handle<String> name,
-    SmallMapList* receiver_types, bool* is_prototype) {
+void TypeFeedbackOracle::PropertyReceiverTypes(TypeFeedbackId id,
+                                               Handle<String> name,
+                                               SmallMapList* receiver_types) {
   receiver_types->Clear();
-  FunctionPrototypeStub proto_stub(isolate(), Code::LOAD_IC);
-  *is_prototype = LoadIsStub(id, &proto_stub);
-  if (!*is_prototype) {
-    Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC);
-    CollectReceiverTypes(id, name, flags, receiver_types);
-  }
+  Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC);
+  CollectReceiverTypes(id, name, flags, receiver_types);
 }
 
 
@@ -321,7 +306,7 @@
   Handle<Object> object = GetInfo(ast_id);
   if (object->IsUndefined() || object->IsSmi()) return;
 
-  ASSERT(object->IsCode());
+  DCHECK(object->IsCode());
   Handle<Code> code(Handle<Code>::cast(object));
 
   if (FLAG_collect_megamorphic_maps_from_stub_cache &&
@@ -472,7 +457,7 @@
 
 
 void TypeFeedbackOracle::SetInfo(TypeFeedbackId ast_id, Object* target) {
-  ASSERT(dictionary_->FindEntry(IdToKey(ast_id)) ==
+  DCHECK(dictionary_->FindEntry(IdToKey(ast_id)) ==
          UnseededNumberDictionary::kNotFound);
   // Dictionary has been allocated with sufficient size for all elements.
   DisallowHeapAllocation no_need_to_resize_dictionary;
diff --git a/src/type-info.h b/src/type-info.h
index 706921a..434ddd6 100644
--- a/src/type-info.h
+++ b/src/type-info.h
@@ -14,16 +14,14 @@
 namespace internal {
 
 // Forward declarations.
-class ICStub;
 class SmallMapList;
 
 
 class TypeFeedbackOracle: public ZoneObject {
  public:
   TypeFeedbackOracle(Handle<Code> code,
-                     Handle<FixedArray> feedback_vector,
-                     Handle<Context> native_context,
-                     Zone* zone);
+                     Handle<TypeFeedbackVector> feedback_vector,
+                     Handle<Context> native_context, Zone* zone);
 
   bool LoadIsUninitialized(TypeFeedbackId id);
   bool StoreIsUninitialized(TypeFeedbackId id);
@@ -41,10 +39,8 @@
 
   KeyedAccessStoreMode GetStoreMode(TypeFeedbackId id);
 
-  void PropertyReceiverTypes(TypeFeedbackId id,
-                             Handle<String> name,
-                             SmallMapList* receiver_types,
-                             bool* is_prototype);
+  void PropertyReceiverTypes(TypeFeedbackId id, Handle<String> name,
+                             SmallMapList* receiver_types);
   void KeyedPropertyReceiverTypes(TypeFeedbackId id,
                                   SmallMapList* receiver_types,
                                   bool* is_string);
@@ -70,7 +66,6 @@
   Handle<AllocationSite> GetCallNewAllocationSite(int slot);
 
   bool LoadIsBuiltin(TypeFeedbackId id, Builtins::Name builtin_id);
-  bool LoadIsStub(TypeFeedbackId id, ICStub* stub);
 
   // TODO(1571) We can't use ToBooleanStub::Types as the return value because
   // of various cycles in our headers. Death to tons of implementations in
@@ -124,7 +119,7 @@
   Handle<Context> native_context_;
   Zone* zone_;
   Handle<UnseededNumberDictionary> dictionary_;
-  Handle<FixedArray> feedback_vector_;
+  Handle<TypeFeedbackVector> feedback_vector_;
 
   DISALLOW_COPY_AND_ASSIGN(TypeFeedbackOracle);
 };
diff --git a/src/typedarray.js b/src/typedarray.js
index d2f5ae8..c149b35 100644
--- a/src/typedarray.js
+++ b/src/typedarray.js
@@ -299,13 +299,13 @@
   %SetCode(global.NAME, NAMEConstructor);
   %FunctionSetPrototype(global.NAME, new $Object());
 
-  %SetProperty(global.NAME, "BYTES_PER_ELEMENT", ELEMENT_SIZE,
-               READ_ONLY | DONT_ENUM | DONT_DELETE);
-  %SetProperty(global.NAME.prototype,
-               "constructor", global.NAME, DONT_ENUM);
-  %SetProperty(global.NAME.prototype,
-               "BYTES_PER_ELEMENT", ELEMENT_SIZE,
-               READ_ONLY | DONT_ENUM | DONT_DELETE);
+  %AddNamedProperty(global.NAME, "BYTES_PER_ELEMENT", ELEMENT_SIZE,
+                    READ_ONLY | DONT_ENUM | DONT_DELETE);
+  %AddNamedProperty(global.NAME.prototype,
+                    "constructor", global.NAME, DONT_ENUM);
+  %AddNamedProperty(global.NAME.prototype,
+                    "BYTES_PER_ELEMENT", ELEMENT_SIZE,
+                    READ_ONLY | DONT_ENUM | DONT_DELETE);
   InstallGetter(global.NAME.prototype, "buffer", NAME_GetBuffer);
   InstallGetter(global.NAME.prototype, "byteOffset", NAME_GetByteOffset);
   InstallGetter(global.NAME.prototype, "byteLength", NAME_GetByteLength);
@@ -436,7 +436,7 @@
   %FunctionSetPrototype($DataView, new $Object);
 
   // Set up constructor property on the DataView prototype.
-  %SetProperty($DataView.prototype, "constructor", $DataView, DONT_ENUM);
+  %AddNamedProperty($DataView.prototype, "constructor", $DataView, DONT_ENUM);
 
   InstallGetter($DataView.prototype, "buffer", DataViewGetBufferJS);
   InstallGetter($DataView.prototype, "byteOffset", DataViewGetByteOffset);
diff --git a/src/types-inl.h b/src/types-inl.h
index 0bdd463..162e658 100644
--- a/src/types-inl.h
+++ b/src/types-inl.h
@@ -19,7 +19,7 @@
 template<class Config>
 TypeImpl<Config>* TypeImpl<Config>::cast(typename Config::Base* object) {
   TypeImpl* t = static_cast<TypeImpl*>(object);
-  ASSERT(t->IsBitset() || t->IsClass() || t->IsConstant() ||
+  DCHECK(t->IsBitset() || t->IsClass() || t->IsConstant() || t->IsRange() ||
          t->IsUnion() || t->IsArray() || t->IsFunction() || t->IsContext());
   return t;
 }
@@ -70,7 +70,7 @@
 
 // static
 bool ZoneTypeConfig::is_bitset(Type* type) {
-  return reinterpret_cast<intptr_t>(type) & 1;
+  return reinterpret_cast<uintptr_t>(type) & 1;
 }
 
 
@@ -87,15 +87,15 @@
 
 
 // static
-int ZoneTypeConfig::as_bitset(Type* type) {
-  ASSERT(is_bitset(type));
-  return static_cast<int>(reinterpret_cast<intptr_t>(type) >> 1);
+ZoneTypeConfig::Type::bitset ZoneTypeConfig::as_bitset(Type* type) {
+  DCHECK(is_bitset(type));
+  return static_cast<Type::bitset>(reinterpret_cast<uintptr_t>(type) ^ 1u);
 }
 
 
 // static
 ZoneTypeConfig::Struct* ZoneTypeConfig::as_struct(Type* type) {
-  ASSERT(!is_bitset(type));
+  DCHECK(!is_bitset(type));
   return reinterpret_cast<Struct*>(type);
 }
 
@@ -108,13 +108,14 @@
 
 
 // static
-ZoneTypeConfig::Type* ZoneTypeConfig::from_bitset(int bitset) {
-  return reinterpret_cast<Type*>((bitset << 1) | 1);
+ZoneTypeConfig::Type* ZoneTypeConfig::from_bitset(Type::bitset bitset) {
+  return reinterpret_cast<Type*>(static_cast<uintptr_t>(bitset | 1u));
 }
 
 
 // static
-ZoneTypeConfig::Type* ZoneTypeConfig::from_bitset(int bitset, Zone* Zone) {
+ZoneTypeConfig::Type* ZoneTypeConfig::from_bitset(
+    Type::bitset bitset, Zone* Zone) {
   return from_bitset(bitset);
 }
 
@@ -145,7 +146,7 @@
 
 // static
 void ZoneTypeConfig::struct_shrink(Struct* structure, int length) {
-  ASSERT(0 <= length && length <= struct_length(structure));
+  DCHECK(0 <= length && length <= struct_length(structure));
   structure[1] = reinterpret_cast<void*>(length);
 }
 
@@ -164,14 +165,14 @@
 
 // static
 Type* ZoneTypeConfig::struct_get(Struct* structure, int i) {
-  ASSERT(0 <= i && i <= struct_length(structure));
+  DCHECK(0 <= i && i <= struct_length(structure));
   return static_cast<Type*>(structure[2 + i]);
 }
 
 
 // static
 void ZoneTypeConfig::struct_set(Struct* structure, int i, Type* x) {
-  ASSERT(0 <= i && i <= struct_length(structure));
+  DCHECK(0 <= i && i <= struct_length(structure));
   structure[2 + i] = x;
 }
 
@@ -179,7 +180,7 @@
 // static
 template<class V>
 i::Handle<V> ZoneTypeConfig::struct_get_value(Struct* structure, int i) {
-  ASSERT(0 <= i && i <= struct_length(structure));
+  DCHECK(0 <= i && i <= struct_length(structure));
   return i::Handle<V>(static_cast<V**>(structure[2 + i]));
 }
 
@@ -188,7 +189,7 @@
 template<class V>
 void ZoneTypeConfig::struct_set_value(
     Struct* structure, int i, i::Handle<V> x) {
-  ASSERT(0 <= i && i <= struct_length(structure));
+  DCHECK(0 <= i && i <= struct_length(structure));
   structure[2 + i] = x.location();
 }
 
@@ -229,8 +230,9 @@
 
 
 // static
-int HeapTypeConfig::as_bitset(Type* type) {
-  return i::Smi::cast(type)->value();
+HeapTypeConfig::Type::bitset HeapTypeConfig::as_bitset(Type* type) {
+  // TODO(rossberg): Breaks the Smi abstraction. Fix once there is a better way.
+  return static_cast<Type::bitset>(reinterpret_cast<uintptr_t>(type));
 }
 
 
@@ -247,14 +249,15 @@
 
 
 // static
-HeapTypeConfig::Type* HeapTypeConfig::from_bitset(int bitset) {
-  return Type::cast(i::Smi::FromInt(bitset));
+HeapTypeConfig::Type* HeapTypeConfig::from_bitset(Type::bitset bitset) {
+  // TODO(rossberg): Breaks the Smi abstraction. Fix once there is a better way.
+  return reinterpret_cast<Type*>(static_cast<uintptr_t>(bitset));
 }
 
 
 // static
 i::Handle<HeapTypeConfig::Type> HeapTypeConfig::from_bitset(
-    int bitset, Isolate* isolate) {
+    Type::bitset bitset, Isolate* isolate) {
   return i::handle(from_bitset(bitset), isolate);
 }
 
diff --git a/src/types.cc b/src/types.cc
index 22694c0..8e96d86 100644
--- a/src/types.cc
+++ b/src/types.cc
@@ -4,43 +4,128 @@
 
 #include "src/types.h"
 
-#include "src/string-stream.h"
+#include "src/ostreams.h"
 #include "src/types-inl.h"
 
 namespace v8 {
 namespace internal {
 
+
+// NOTE: If code is marked as being a "shortcut", this means that removing
+// the code won't affect the semantics of the surrounding function definition.
+
+
+// -----------------------------------------------------------------------------
+// Range-related helper functions.
+
+// The result may be invalid (max < min).
+template<class Config>
+typename TypeImpl<Config>::Limits TypeImpl<Config>::Intersect(
+    Limits lhs, Limits rhs) {
+  DisallowHeapAllocation no_allocation;
+  Limits result(lhs);
+  if (lhs.min->Number() < rhs.min->Number()) result.min = rhs.min;
+  if (lhs.max->Number() > rhs.max->Number()) result.max = rhs.max;
+  return result;
+}
+
+
+template<class Config>
+typename TypeImpl<Config>::Limits TypeImpl<Config>::Union(
+    Limits lhs, Limits rhs) {
+  DisallowHeapAllocation no_allocation;
+  Limits result(lhs);
+  if (lhs.min->Number() > rhs.min->Number()) result.min = rhs.min;
+  if (lhs.max->Number() < rhs.max->Number()) result.max = rhs.max;
+  return result;
+}
+
+
+template<class Config>
+bool TypeImpl<Config>::Overlap(
+    typename TypeImpl<Config>::RangeType* lhs,
+    typename TypeImpl<Config>::RangeType* rhs) {
+  DisallowHeapAllocation no_allocation;
+  typename TypeImpl<Config>::Limits lim = Intersect(Limits(lhs), Limits(rhs));
+  return lim.min->Number() <= lim.max->Number();
+}
+
+
+template<class Config>
+bool TypeImpl<Config>::Contains(
+    typename TypeImpl<Config>::RangeType* lhs,
+    typename TypeImpl<Config>::RangeType* rhs) {
+  DisallowHeapAllocation no_allocation;
+  return lhs->Min()->Number() <= rhs->Min()->Number()
+      && rhs->Max()->Number() <= lhs->Max()->Number();
+}
+
+
+template<class Config>
+bool TypeImpl<Config>::Contains(
+    typename TypeImpl<Config>::RangeType* range, i::Object* val) {
+  DisallowHeapAllocation no_allocation;
+  return IsInteger(val)
+      && range->Min()->Number() <= val->Number()
+      && val->Number() <= range->Max()->Number();
+}
+
+
+// -----------------------------------------------------------------------------
+// Min and Max computation.
+
+template<class Config>
+double TypeImpl<Config>::Min() {
+  DCHECK(this->Is(Number()));
+  if (this->IsBitset()) return BitsetType::Min(this->AsBitset());
+  if (this->IsUnion()) {
+    double min = +V8_INFINITY;
+    for (int i = 0; i < this->AsUnion()->Length(); ++i) {
+      min = std::min(min, this->AsUnion()->Get(i)->Min());
+    }
+    return min;
+  }
+  if (this->IsRange()) return this->AsRange()->Min()->Number();
+  if (this->IsConstant()) return this->AsConstant()->Value()->Number();
+  UNREACHABLE();
+  return 0;
+}
+
+
+template<class Config>
+double TypeImpl<Config>::Max() {
+  DCHECK(this->Is(Number()));
+  if (this->IsBitset()) return BitsetType::Max(this->AsBitset());
+  if (this->IsUnion()) {
+    double max = -V8_INFINITY;
+    for (int i = 0; i < this->AsUnion()->Length(); ++i) {
+      max = std::max(max, this->AsUnion()->Get(i)->Max());
+    }
+    return max;
+  }
+  if (this->IsRange()) return this->AsRange()->Max()->Number();
+  if (this->IsConstant()) return this->AsConstant()->Value()->Number();
+  UNREACHABLE();
+  return 0;
+}
+
+
 // -----------------------------------------------------------------------------
 // Glb and lub computation.
 
+
 // The largest bitset subsumed by this type.
 template<class Config>
-int TypeImpl<Config>::BitsetType::Glb(TypeImpl* type) {
+typename TypeImpl<Config>::bitset
+TypeImpl<Config>::BitsetType::Glb(TypeImpl* type) {
   DisallowHeapAllocation no_allocation;
   if (type->IsBitset()) {
     return type->AsBitset();
   } else if (type->IsUnion()) {
-    UnionHandle unioned = handle(type->AsUnion());
-    int bitset = kNone;
-    for (int i = 0; i < unioned->Length(); ++i) {
-      bitset |= unioned->Get(i)->BitsetGlb();
-    }
-    return bitset;
-  } else if (type->IsClass()) {
-    // Little hack to avoid the need for a region for handlification here...
-    return REPRESENTATION(Config::is_class(type)
-        ? Lub(*Config::as_class(type))
-        : type->AsClass()->Bound(NULL)->AsBitset());
-  } else if (type->IsConstant()) {
-    return REPRESENTATION(type->AsConstant()->Bound()->AsBitset());
-  } else if (type->IsContext()) {
-    return REPRESENTATION(type->AsContext()->Bound()->AsBitset());
-  } else if (type->IsArray()) {
-    return REPRESENTATION(type->AsArray()->Bound()->AsBitset());
-  } else if (type->IsFunction()) {
-    return REPRESENTATION(type->AsFunction()->Bound()->AsBitset());
+    SLOW_DCHECK(type->AsUnion()->Wellformed());
+    return type->AsUnion()->Get(0)->BitsetGlb();  // Shortcut.
+    // (The remaining BitsetGlb's are None anyway).
   } else {
-    UNREACHABLE();
     return kNone;
   }
 }
@@ -48,143 +133,68 @@
 
 // The smallest bitset subsuming this type.
 template<class Config>
-int TypeImpl<Config>::BitsetType::Lub(TypeImpl* type) {
+typename TypeImpl<Config>::bitset
+TypeImpl<Config>::BitsetType::Lub(TypeImpl* type) {
   DisallowHeapAllocation no_allocation;
-  if (type->IsBitset()) {
-    return type->AsBitset();
-  } else if (type->IsUnion()) {
-    UnionHandle unioned = handle(type->AsUnion());
+  if (type->IsBitset()) return type->AsBitset();
+  if (type->IsUnion()) {
     int bitset = kNone;
-    for (int i = 0; i < unioned->Length(); ++i) {
-      bitset |= unioned->Get(i)->BitsetLub();
+    for (int i = 0; i < type->AsUnion()->Length(); ++i) {
+      bitset |= type->AsUnion()->Get(i)->BitsetLub();
     }
     return bitset;
-  } else if (type->IsClass()) {
+  }
+  if (type->IsClass()) {
     // Little hack to avoid the need for a region for handlification here...
     return Config::is_class(type) ? Lub(*Config::as_class(type)) :
         type->AsClass()->Bound(NULL)->AsBitset();
-  } else if (type->IsConstant()) {
-    return type->AsConstant()->Bound()->AsBitset();
-  } else if (type->IsContext()) {
-    return type->AsContext()->Bound()->AsBitset();
-  } else if (type->IsArray()) {
-    return type->AsArray()->Bound()->AsBitset();
-  } else if (type->IsFunction()) {
-    return type->AsFunction()->Bound()->AsBitset();
-  } else {
-    UNREACHABLE();
-    return kNone;
   }
-}
-
-
-// The smallest bitset subsuming this type, ignoring explicit bounds.
-template<class Config>
-int TypeImpl<Config>::BitsetType::InherentLub(TypeImpl* type) {
-  DisallowHeapAllocation no_allocation;
-  if (type->IsBitset()) {
-    return type->AsBitset();
-  } else if (type->IsUnion()) {
-    UnionHandle unioned = handle(type->AsUnion());
-    int bitset = kNone;
-    for (int i = 0; i < unioned->Length(); ++i) {
-      bitset |= unioned->Get(i)->InherentBitsetLub();
-    }
-    return bitset;
-  } else if (type->IsClass()) {
-    return Lub(*type->AsClass()->Map());
-  } else if (type->IsConstant()) {
-    return Lub(*type->AsConstant()->Value());
-  } else if (type->IsContext()) {
-    return kInternal & kTaggedPtr;
-  } else if (type->IsArray()) {
-    return kArray;
-  } else if (type->IsFunction()) {
-    return kFunction;
-  } else {
-    UNREACHABLE();
-    return kNone;
-  }
+  if (type->IsConstant()) return type->AsConstant()->Bound()->AsBitset();
+  if (type->IsRange()) return type->AsRange()->BitsetLub();
+  if (type->IsContext()) return kInternal & kTaggedPtr;
+  if (type->IsArray()) return kArray;
+  if (type->IsFunction()) return kFunction;
+  UNREACHABLE();
+  return kNone;
 }
 
 
 template<class Config>
-int TypeImpl<Config>::BitsetType::Lub(i::Object* value) {
-  DisallowHeapAllocation no_allocation;
-  if (value->IsNumber()) {
-    return Lub(value->Number()) & (value->IsSmi() ? kTaggedInt : kTaggedPtr);
-  }
-  return Lub(i::HeapObject::cast(value)->map());
-}
-
-
-template<class Config>
-int TypeImpl<Config>::BitsetType::Lub(double value) {
-  DisallowHeapAllocation no_allocation;
-  if (i::IsMinusZero(value)) return kMinusZero;
-  if (std::isnan(value)) return kNaN;
-  if (IsUint32Double(value)) return Lub(FastD2UI(value));
-  if (IsInt32Double(value)) return Lub(FastD2I(value));
-  return kOtherNumber;
-}
-
-
-template<class Config>
-int TypeImpl<Config>::BitsetType::Lub(int32_t value) {
-  if (value >= 0x40000000) {
-    return i::SmiValuesAre31Bits() ? kOtherUnsigned31 : kUnsignedSmall;
-  }
-  if (value >= 0) return kUnsignedSmall;
-  if (value >= -0x40000000) return kOtherSignedSmall;
-  return i::SmiValuesAre31Bits() ? kOtherSigned32 : kOtherSignedSmall;
-}
-
-
-template<class Config>
-int TypeImpl<Config>::BitsetType::Lub(uint32_t value) {
-  DisallowHeapAllocation no_allocation;
-  if (value >= 0x80000000u) return kOtherUnsigned32;
-  if (value >= 0x40000000u) {
-    return i::SmiValuesAre31Bits() ? kOtherUnsigned31 : kUnsignedSmall;
-  }
-  return kUnsignedSmall;
-}
-
-
-template<class Config>
-int TypeImpl<Config>::BitsetType::Lub(i::Map* map) {
+typename TypeImpl<Config>::bitset
+TypeImpl<Config>::BitsetType::Lub(i::Map* map) {
   DisallowHeapAllocation no_allocation;
   switch (map->instance_type()) {
     case STRING_TYPE:
-    case ASCII_STRING_TYPE:
+    case ONE_BYTE_STRING_TYPE:
     case CONS_STRING_TYPE:
-    case CONS_ASCII_STRING_TYPE:
+    case CONS_ONE_BYTE_STRING_TYPE:
     case SLICED_STRING_TYPE:
-    case SLICED_ASCII_STRING_TYPE:
+    case SLICED_ONE_BYTE_STRING_TYPE:
     case EXTERNAL_STRING_TYPE:
-    case EXTERNAL_ASCII_STRING_TYPE:
+    case EXTERNAL_ONE_BYTE_STRING_TYPE:
     case EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
     case SHORT_EXTERNAL_STRING_TYPE:
-    case SHORT_EXTERNAL_ASCII_STRING_TYPE:
+    case SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE:
     case SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE:
+      return kOtherString;
     case INTERNALIZED_STRING_TYPE:
-    case ASCII_INTERNALIZED_STRING_TYPE:
+    case ONE_BYTE_INTERNALIZED_STRING_TYPE:
     case EXTERNAL_INTERNALIZED_STRING_TYPE:
-    case EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE:
+    case EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE:
     case EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE:
     case SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE:
-    case SHORT_EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE:
+    case SHORT_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE:
     case SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE:
-      return kString;
+      return kInternalizedString;
     case SYMBOL_TYPE:
       return kSymbol;
     case ODDBALL_TYPE: {
       Heap* heap = map->GetHeap();
       if (map == heap->undefined_map()) return kUndefined;
-      if (map == heap->the_hole_map()) return kAny;  // TODO(rossberg): kNone?
       if (map == heap->null_map()) return kNull;
       if (map == heap->boolean_map()) return kBoolean;
-      ASSERT(map == heap->uninitialized_map() ||
+      DCHECK(map == heap->the_hole_map() ||
+             map == heap->uninitialized_map() ||
              map == heap->no_interceptor_result_sentinel_map() ||
              map == heap->termination_exception_map() ||
              map == heap->arguments_marker_map());
@@ -238,6 +248,7 @@
     case ACCESSOR_PAIR_TYPE:
     case FIXED_ARRAY_TYPE:
     case FOREIGN_TYPE:
+    case CODE_TYPE:
       return kInternal & kTaggedPtr;
     default:
       UNREACHABLE();
@@ -246,63 +257,191 @@
 }
 
 
+template<class Config>
+typename TypeImpl<Config>::bitset
+TypeImpl<Config>::BitsetType::Lub(i::Object* value) {
+  DisallowHeapAllocation no_allocation;
+  if (value->IsNumber()) {
+    return Lub(value->Number()) & (value->IsSmi() ? kTaggedInt : kTaggedPtr);
+  }
+  return Lub(i::HeapObject::cast(value)->map());
+}
+
+
+template<class Config>
+typename TypeImpl<Config>::bitset
+TypeImpl<Config>::BitsetType::Lub(double value) {
+  DisallowHeapAllocation no_allocation;
+  if (i::IsMinusZero(value)) return kMinusZero;
+  if (std::isnan(value)) return kNaN;
+  if (IsUint32Double(value)) return Lub(FastD2UI(value));
+  if (IsInt32Double(value)) return Lub(FastD2I(value));
+  return kOtherNumber;
+}
+
+
+template<class Config>
+typename TypeImpl<Config>::bitset
+TypeImpl<Config>::BitsetType::Lub(int32_t value) {
+  DisallowHeapAllocation no_allocation;
+  if (value >= 0x40000000) {
+    return i::SmiValuesAre31Bits() ? kOtherUnsigned31 : kUnsignedSmall;
+  }
+  if (value >= 0) return kUnsignedSmall;
+  if (value >= -0x40000000) return kOtherSignedSmall;
+  return i::SmiValuesAre31Bits() ? kOtherSigned32 : kOtherSignedSmall;
+}
+
+
+template<class Config>
+typename TypeImpl<Config>::bitset
+TypeImpl<Config>::BitsetType::Lub(uint32_t value) {
+  DisallowHeapAllocation no_allocation;
+  if (value >= 0x80000000u) return kOtherUnsigned32;
+  if (value >= 0x40000000u) {
+    return i::SmiValuesAre31Bits() ? kOtherUnsigned31 : kUnsignedSmall;
+  }
+  return kUnsignedSmall;
+}
+
+
+// Minimum values of regular numeric bitsets when SmiValuesAre31Bits.
+template<class Config>
+const typename TypeImpl<Config>::BitsetType::BitsetMin
+TypeImpl<Config>::BitsetType::BitsetMins31[] = {
+    {kOtherNumber, -V8_INFINITY},
+    {kOtherSigned32, kMinInt},
+    {kOtherSignedSmall, -0x40000000},
+    {kUnsignedSmall, 0},
+    {kOtherUnsigned31, 0x40000000},
+    {kOtherUnsigned32, 0x80000000},
+    {kOtherNumber, static_cast<double>(kMaxUInt32) + 1}
+};
+
+
+// Minimum values of regular numeric bitsets when SmiValuesAre32Bits.
+// OtherSigned32 and OtherUnsigned31 are empty (see the diagrams in types.h).
+template<class Config>
+const typename TypeImpl<Config>::BitsetType::BitsetMin
+TypeImpl<Config>::BitsetType::BitsetMins32[] = {
+    {kOtherNumber, -V8_INFINITY},
+    {kOtherSignedSmall, kMinInt},
+    {kUnsignedSmall, 0},
+    {kOtherUnsigned32, 0x80000000},
+    {kOtherNumber, static_cast<double>(kMaxUInt32) + 1}
+};
+
+
+template<class Config>
+typename TypeImpl<Config>::bitset
+TypeImpl<Config>::BitsetType::Lub(Limits lim) {
+  DisallowHeapAllocation no_allocation;
+  double min = lim.min->Number();
+  double max = lim.max->Number();
+  int lub = kNone;
+  const BitsetMin* mins = BitsetMins();
+
+  for (size_t i = 1; i < BitsetMinsSize(); ++i) {
+    if (min < mins[i].min) {
+      lub |= mins[i-1].bits;
+      if (max < mins[i].min) return lub;
+    }
+  }
+  return lub |= mins[BitsetMinsSize()-1].bits;
+}
+
+
+template<class Config>
+double TypeImpl<Config>::BitsetType::Min(bitset bits) {
+  DisallowHeapAllocation no_allocation;
+  DCHECK(Is(bits, kNumber));
+  const BitsetMin* mins = BitsetMins();
+  bool mz = SEMANTIC(bits & kMinusZero);
+  for (size_t i = 0; i < BitsetMinsSize(); ++i) {
+    if (Is(SEMANTIC(mins[i].bits), bits)) {
+      return mz ? std::min(0.0, mins[i].min) : mins[i].min;
+    }
+  }
+  if (mz) return 0;
+  return base::OS::nan_value();
+}
+
+
+template<class Config>
+double TypeImpl<Config>::BitsetType::Max(bitset bits) {
+  DisallowHeapAllocation no_allocation;
+  DCHECK(Is(bits, kNumber));
+  const BitsetMin* mins = BitsetMins();
+  bool mz = bits & kMinusZero;
+  if (BitsetType::Is(mins[BitsetMinsSize()-1].bits, bits)) {
+    return +V8_INFINITY;
+  }
+  for (size_t i = BitsetMinsSize()-1; i-- > 0; ) {
+    if (Is(SEMANTIC(mins[i].bits), bits)) {
+      return mz ?
+          std::max(0.0, mins[i+1].min - 1) : mins[i+1].min - 1;
+    }
+  }
+  if (mz) return 0;
+  return base::OS::nan_value();
+}
+
+
 // -----------------------------------------------------------------------------
 // Predicates.
 
-// Check this <= that.
+
 template<class Config>
-bool TypeImpl<Config>::SlowIs(TypeImpl* that) {
+bool TypeImpl<Config>::SimplyEquals(TypeImpl* that) {
   DisallowHeapAllocation no_allocation;
-
-  // Fast path for bitsets.
-  if (this->IsNone()) return true;
-  if (that->IsBitset()) {
-    return (BitsetType::Lub(this) | that->AsBitset()) == that->AsBitset();
+  if (this->IsClass()) {
+    return that->IsClass()
+        && *this->AsClass()->Map() == *that->AsClass()->Map();
   }
-  if (this->IsBitset() && SEMANTIC(this->AsBitset()) == BitsetType::kNone) {
-    // Bitsets only have non-bitset supertypes along the representation axis.
-    int that_bitset = that->BitsetGlb();
-    return (this->AsBitset() | that_bitset) == that_bitset;
+  if (this->IsConstant()) {
+    return that->IsConstant()
+        && *this->AsConstant()->Value() == *that->AsConstant()->Value();
   }
-
-  if (that->IsClass()) {
-    return this->IsClass()
-        && *this->AsClass()->Map() == *that->AsClass()->Map()
-        && ((Config::is_class(that) && Config::is_class(this)) ||
-            BitsetType::New(this->BitsetLub())->Is(
-                BitsetType::New(that->BitsetLub())));
-  }
-  if (that->IsConstant()) {
-    return this->IsConstant()
-        && *this->AsConstant()->Value() == *that->AsConstant()->Value()
-        && this->AsConstant()->Bound()->Is(that->AsConstant()->Bound());
-  }
-  if (that->IsContext()) {
-    return this->IsContext()
+  if (this->IsContext()) {
+    return that->IsContext()
         && this->AsContext()->Outer()->Equals(that->AsContext()->Outer());
   }
-  if (that->IsArray()) {
-    return this->IsArray()
+  if (this->IsArray()) {
+    return that->IsArray()
         && this->AsArray()->Element()->Equals(that->AsArray()->Element());
   }
-  if (that->IsFunction()) {
-    // We currently do not allow for any variance here, in order to keep
-    // Union and Intersect operations simple.
-    if (!this->IsFunction()) return false;
+  if (this->IsFunction()) {
+    if (!that->IsFunction()) return false;
     FunctionType* this_fun = this->AsFunction();
     FunctionType* that_fun = that->AsFunction();
     if (this_fun->Arity() != that_fun->Arity() ||
         !this_fun->Result()->Equals(that_fun->Result()) ||
-        !that_fun->Receiver()->Equals(this_fun->Receiver())) {
+        !this_fun->Receiver()->Equals(that_fun->Receiver())) {
       return false;
     }
     for (int i = 0; i < this_fun->Arity(); ++i) {
-      if (!that_fun->Parameter(i)->Equals(this_fun->Parameter(i))) return false;
+      if (!this_fun->Parameter(i)->Equals(that_fun->Parameter(i))) return false;
     }
     return true;
   }
+  UNREACHABLE();
+  return false;
+}
 
-  // (T1 \/ ... \/ Tn) <= T  <=>  (T1 <= T) /\ ... /\ (Tn <= T)
+
+// Check if [this] <= [that].
+template<class Config>
+bool TypeImpl<Config>::SlowIs(TypeImpl* that) {
+  DisallowHeapAllocation no_allocation;
+
+  if (that->IsBitset()) {
+    return BitsetType::Is(this->BitsetLub(), that->AsBitset());
+  }
+  if (this->IsBitset()) {
+    return BitsetType::Is(this->AsBitset(), that->BitsetGlb());
+  }
+
+  // (T1 \/ ... \/ Tn) <= T  if  (T1 <= T) /\ ... /\ (Tn <= T)
   if (this->IsUnion()) {
     UnionHandle unioned = handle(this->AsUnion());
     for (int i = 0; i < unioned->Length(); ++i) {
@@ -311,19 +450,22 @@
     return true;
   }
 
-  // T <= (T1 \/ ... \/ Tn)  <=>  (T <= T1) \/ ... \/ (T <= Tn)
-  // (iff T is not a union)
-  ASSERT(!this->IsUnion());
+  // T <= (T1 \/ ... \/ Tn)  if  (T <= T1) \/ ... \/ (T <= Tn)
   if (that->IsUnion()) {
-    UnionHandle unioned = handle(that->AsUnion());
-    for (int i = 0; i < unioned->Length(); ++i) {
-      if (this->Is(unioned->Get(i))) return true;
-      if (this->IsBitset()) break;  // Fast fail, only first field is a bitset.
+    for (int i = 0; i < that->AsUnion()->Length(); ++i) {
+      if (this->Is(that->AsUnion()->Get(i))) return true;
+      if (i > 1 && this->IsRange()) return false;  // Shortcut.
     }
     return false;
   }
 
-  return false;
+  if (that->IsRange()) {
+    return (this->IsRange() && Contains(that->AsRange(), this->AsRange()))
+        || (this->IsConstant() &&
+            Contains(that->AsRange(), *this->AsConstant()->Value()));
+  }
+  if (this->IsRange()) return false;
+  return this->SimplyEquals(that);
 }
 
 
@@ -347,7 +489,7 @@
 }
 
 
-// Check if this contains only (currently) stable classes.
+// Check if [this] contains only (currently) stable classes.
 template<class Config>
 bool TypeImpl<Config>::NowStable() {
   DisallowHeapAllocation no_allocation;
@@ -358,12 +500,12 @@
 }
 
 
-// Check this overlaps that.
+// Check if [this] and [that] overlap.
 template<class Config>
 bool TypeImpl<Config>::Maybe(TypeImpl* that) {
   DisallowHeapAllocation no_allocation;
 
-  // (T1 \/ ... \/ Tn) overlaps T <=> (T1 overlaps T) \/ ... \/ (Tn overlaps T)
+  // (T1 \/ ... \/ Tn) overlaps T  if  (T1 overlaps T) \/ ... \/ (Tn overlaps T)
   if (this->IsUnion()) {
     UnionHandle unioned = handle(this->AsUnion());
     for (int i = 0; i < unioned->Length(); ++i) {
@@ -372,65 +514,80 @@
     return false;
   }
 
-  // T overlaps (T1 \/ ... \/ Tn) <=> (T overlaps T1) \/ ... \/ (T overlaps Tn)
+  // T overlaps (T1 \/ ... \/ Tn)  if  (T overlaps T1) \/ ... \/ (T overlaps Tn)
   if (that->IsUnion()) {
-    UnionHandle unioned = handle(that->AsUnion());
-    for (int i = 0; i < unioned->Length(); ++i) {
-      if (this->Maybe(unioned->Get(i))) return true;
+    for (int i = 0; i < that->AsUnion()->Length(); ++i) {
+      if (this->Maybe(that->AsUnion()->Get(i))) return true;
     }
     return false;
   }
 
-  ASSERT(!this->IsUnion() && !that->IsUnion());
-  if (this->IsBitset()) {
-    return BitsetType::IsInhabited(this->AsBitset() & that->BitsetLub());
+  if (!BitsetType::IsInhabited(this->BitsetLub() & that->BitsetLub()))
+    return false;
+  if (this->IsBitset() || that->IsBitset()) return true;
+
+  if (this->IsClass() != that->IsClass()) return true;
+
+  if (this->IsRange()) {
+    if (that->IsConstant()) {
+      return Contains(this->AsRange(), *that->AsConstant()->Value());
+    }
+    return that->IsRange() && Overlap(this->AsRange(), that->AsRange());
   }
-  if (that->IsBitset()) {
-    return BitsetType::IsInhabited(this->BitsetLub() & that->AsBitset());
-  }
-  if (this->IsClass()) {
-    return that->IsClass()
-        && *this->AsClass()->Map() == *that->AsClass()->Map();
-  }
-  if (this->IsConstant()) {
-    return that->IsConstant()
-        && *this->AsConstant()->Value() == *that->AsConstant()->Value();
-  }
-  if (this->IsContext()) {
-    return this->Equals(that);
-  }
-  if (this->IsArray()) {
-    // There is no variance!
-    return this->Equals(that);
-  }
-  if (this->IsFunction()) {
-    // There is no variance!
-    return this->Equals(that);
+  if (that->IsRange()) {
+    if (this->IsConstant()) {
+      return Contains(that->AsRange(), *this->AsConstant()->Value());
+    }
+    return this->IsRange() && Overlap(this->AsRange(), that->AsRange());
   }
 
-  return false;
+  return this->SimplyEquals(that);
 }
 
 
-// Check if value is contained in (inhabits) type.
+// Return the range in [this], or [NULL].
+template<class Config>
+typename TypeImpl<Config>::RangeType* TypeImpl<Config>::GetRange() {
+  DisallowHeapAllocation no_allocation;
+  if (this->IsRange()) return this->AsRange();
+  if (this->IsUnion() && this->AsUnion()->Get(1)->IsRange()) {
+    return this->AsUnion()->Get(1)->AsRange();
+  }
+  return NULL;
+}
+
+
 template<class Config>
 bool TypeImpl<Config>::Contains(i::Object* value) {
   DisallowHeapAllocation no_allocation;
   for (Iterator<i::Object> it = this->Constants(); !it.Done(); it.Advance()) {
     if (*it.Current() == value) return true;
   }
+  if (IsInteger(value)) {
+    RangeType* range = this->GetRange();
+    if (range != NULL && Contains(range, value)) return true;
+  }
   return BitsetType::New(BitsetType::Lub(value))->Is(this);
 }
 
 
 template<class Config>
 bool TypeImpl<Config>::UnionType::Wellformed() {
-  ASSERT(this->Length() >= 2);
+  DisallowHeapAllocation no_allocation;
+  // This checks the invariants of the union representation:
+  // 1. There are at least two elements.
+  // 2. At most one element is a bitset, and it must be the first one.
+  // 3. At most one element is a range, and it must be the second one
+  //    (even when the first element is not a bitset).
+  // 4. No element is itself a union.
+  // 5. No element is a subtype of any other.
+  DCHECK(this->Length() >= 2);  // (1)
   for (int i = 0; i < this->Length(); ++i) {
-    ASSERT(!this->Get(i)->IsUnion());
-    if (i > 0) ASSERT(!this->Get(i)->IsBitset());
+    if (i != 0) DCHECK(!this->Get(i)->IsBitset());  // (2)
+    if (i != 1) DCHECK(!this->Get(i)->IsRange());  // (3)
+    DCHECK(!this->Get(i)->IsUnion());  // (4)
     for (int j = 0; j < this->Length(); ++j) {
-      if (i != j) ASSERT(!this->Get(i)->Is(this->Get(j)));
+      if (i != j) DCHECK(!this->Get(i)->Is(this->Get(j)));  // (5)
     }
   }
   return true;
@@ -440,153 +597,148 @@
 // -----------------------------------------------------------------------------
 // Union and intersection
 
-template<class Config>
-typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::Narrow(
-    int bitset, Region* region) {
-  TypeHandle bound = BitsetType::New(bitset, region);
-  if (this->IsClass()) {
-    return ClassType::New(this->AsClass()->Map(), bound, region);
-  } else if (this->IsConstant()) {
-    return ConstantType::New(this->AsConstant()->Value(), bound, region);
-  } else if (this->IsContext()) {
-    return ContextType::New(this->AsContext()->Outer(), bound, region);
-  } else if (this->IsArray()) {
-    return ArrayType::New(this->AsArray()->Element(), bound, region);
-  } else if (this->IsFunction()) {
-    FunctionType* function = this->AsFunction();
-    int arity = function->Arity();
-    FunctionHandle type = FunctionType::New(
-        function->Result(), function->Receiver(), bound, arity, region);
-    for (int i = 0; i < arity; ++i) {
-      type->InitParameter(i, function->Parameter(i));
-    }
-    return type;
-  }
-  UNREACHABLE();
-  return TypeHandle();
+
+static bool AddIsSafe(int x, int y) {
+  return x >= 0 ?
+      y <= std::numeric_limits<int>::max() - x :
+      y >= std::numeric_limits<int>::min() - x;
 }
 
 
 template<class Config>
-int TypeImpl<Config>::BoundBy(TypeImpl* that) {
-  ASSERT(!this->IsUnion());
-  if (that->IsUnion()) {
-    UnionType* unioned = that->AsUnion();
-    int length = unioned->Length();
-    int bitset = BitsetType::kNone;
-    for (int i = 0; i < length; ++i) {
-      bitset |= BoundBy(unioned->Get(i)->unhandle());
-    }
-    return bitset;
-  } else if (that->IsClass() && this->IsClass() &&
-      *this->AsClass()->Map() == *that->AsClass()->Map()) {
-    return that->BitsetLub();
-  } else if (that->IsConstant() && this->IsConstant() &&
-      *this->AsConstant()->Value() == *that->AsConstant()->Value()) {
-    return that->AsConstant()->Bound()->AsBitset();
-  } else if (that->IsContext() && this->IsContext() && this->Is(that)) {
-    return that->AsContext()->Bound()->AsBitset();
-  } else if (that->IsArray() && this->IsArray() && this->Is(that)) {
-    return that->AsArray()->Bound()->AsBitset();
-  } else if (that->IsFunction() && this->IsFunction() && this->Is(that)) {
-    return that->AsFunction()->Bound()->AsBitset();
+typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::Intersect(
+    TypeHandle type1, TypeHandle type2, Region* region) {
+  bitset bits = type1->BitsetGlb() & type2->BitsetGlb();
+  if (!BitsetType::IsInhabited(bits)) bits = BitsetType::kNone;
+
+  // Fast case: bit sets.
+  if (type1->IsBitset() && type2->IsBitset()) {
+    return BitsetType::New(bits, region);
   }
-  return that->BitsetGlb();
+
+  // Fast case: top or bottom types.
+  if (type1->IsNone() || type2->IsAny()) return type1;  // Shortcut.
+  if (type2->IsNone() || type1->IsAny()) return type2;  // Shortcut.
+
+  // Semi-fast case.
+  if (type1->Is(type2)) return type1;
+  if (type2->Is(type1)) return type2;
+
+  // Slow case: create union.
+  int size1 = type1->IsUnion() ? type1->AsUnion()->Length() : 1;
+  int size2 = type2->IsUnion() ? type2->AsUnion()->Length() : 1;
+  if (!AddIsSafe(size1, size2)) return Any(region);
+  int size = size1 + size2;
+  if (!AddIsSafe(size, 2)) return Any(region);
+  size += 2;
+  UnionHandle result = UnionType::New(size, region);
+  size = 0;
+
+  // Deal with bitsets.
+  result->Set(size++, BitsetType::New(bits, region));
+
+  // Deal with ranges.
+  TypeHandle range = None(region);
+  RangeType* range1 = type1->GetRange();
+  RangeType* range2 = type2->GetRange();
+  if (range1 != NULL && range2 != NULL) {
+    Limits lim = Intersect(Limits(range1), Limits(range2));
+    if (lim.min->Number() <= lim.max->Number()) {
+      range = RangeType::New(lim, region);
+    }
+  }
+  result->Set(size++, range);
+
+  size = IntersectAux(type1, type2, result, size, region);
+  return NormalizeUnion(result, size);
 }
 
 
 template<class Config>
-int TypeImpl<Config>::IndexInUnion(
-    int bound, UnionHandle unioned, int current_size) {
-  ASSERT(!this->IsUnion());
-  for (int i = 0; i < current_size; ++i) {
-    TypeHandle that = unioned->Get(i);
-    if (that->IsBitset()) {
-      if ((bound | that->AsBitset()) == that->AsBitset()) return i;
-    } else if (that->IsClass() && this->IsClass()) {
-      if (*this->AsClass()->Map() == *that->AsClass()->Map()) return i;
-    } else if (that->IsConstant() && this->IsConstant()) {
-      if (*this->AsConstant()->Value() == *that->AsConstant()->Value())
-        return i;
-    } else if (that->IsContext() && this->IsContext()) {
-      if (this->Is(that)) return i;
-    } else if (that->IsArray() && this->IsArray()) {
-      if (this->Is(that)) return i;
-    } else if (that->IsFunction() && this->IsFunction()) {
-      if (this->Is(that)) return i;
-    }
+int TypeImpl<Config>::UpdateRange(
+    RangeHandle range, UnionHandle result, int size, Region* region) {
+  TypeHandle old_range = result->Get(1);
+  DCHECK(old_range->IsRange() || old_range->IsNone());
+  if (range->Is(old_range)) return size;
+  if (!old_range->Is(range->unhandle())) {
+    range = RangeType::New(
+        Union(Limits(range->AsRange()), Limits(old_range->AsRange())), region);
   }
-  return -1;
-}
+  result->Set(1, range);
 
-
-// Get non-bitsets from type, bounded by upper.
-// Store at result starting at index. Returns updated index.
-template<class Config>
-int TypeImpl<Config>::ExtendUnion(
-    UnionHandle result, int size, TypeHandle type,
-    TypeHandle other, bool is_intersect, Region* region) {
-  int old_size = size;
-  if (type->IsUnion()) {
-    UnionHandle unioned = handle(type->AsUnion());
-    for (int i = 0; i < unioned->Length(); ++i) {
-      TypeHandle type_i = unioned->Get(i);
-      ASSERT(i == 0 || !(type_i->IsBitset() || type_i->Is(unioned->Get(0))));
-      if (!type_i->IsBitset()) {
-        size = ExtendUnion(result, size, type_i, other, is_intersect, region);
-      }
-    }
-  } else if (!type->IsBitset()) {
-    ASSERT(type->IsClass() || type->IsConstant() ||
-           type->IsArray() || type->IsFunction() || type->IsContext());
-    int inherent_bound = type->InherentBitsetLub();
-    int old_bound = type->BitsetLub();
-    int other_bound = type->BoundBy(other->unhandle()) & inherent_bound;
-    int new_bound =
-        is_intersect ? (old_bound & other_bound) : (old_bound | other_bound);
-    if (new_bound != BitsetType::kNone) {
-      int i = type->IndexInUnion(new_bound, result, old_size);
-      if (i == -1) {
-        i = size++;
-      } else if (result->Get(i)->IsBitset()) {
-        return size;  // Already fully subsumed.
-      } else {
-        int type_i_bound = result->Get(i)->BitsetLub();
-        new_bound |= type_i_bound;
-        if (new_bound == type_i_bound) return size;
-      }
-      if (new_bound != old_bound) type = type->Narrow(new_bound, region);
-      result->Set(i, type);
+  // Remove any components that just got subsumed.
+  for (int i = 2; i < size; ) {
+    if (result->Get(i)->Is(range->unhandle())) {
+      result->Set(i, result->Get(--size));
+    } else {
+      ++i;
     }
   }
   return size;
 }
 
 
-// If bitset is subsumed by another entry in the result, remove it.
-// (Only bitsets with empty semantic axis can be subtypes of non-bitsets.)
 template<class Config>
-int TypeImpl<Config>::NormalizeUnion(UnionHandle result, int size, int bitset) {
-  if (bitset != BitsetType::kNone && SEMANTIC(bitset) == BitsetType::kNone) {
-    for (int i = 1; i < size; ++i) {
-      int glb = result->Get(i)->BitsetGlb();
-      if ((bitset | glb) == glb) {
-        for (int j = 1; j < size; ++j) {
-          result->Set(j - 1, result->Get(j));
-        }
-        --size;
-        break;
-      }
+int TypeImpl<Config>::IntersectAux(
+    TypeHandle lhs, TypeHandle rhs,
+    UnionHandle result, int size, Region* region) {
+  if (lhs->IsUnion()) {
+    for (int i = 0; i < lhs->AsUnion()->Length(); ++i) {
+      size = IntersectAux(lhs->AsUnion()->Get(i), rhs, result, size, region);
     }
+    return size;
+  }
+  if (rhs->IsUnion()) {
+    for (int i = 0; i < rhs->AsUnion()->Length(); ++i) {
+      size = IntersectAux(lhs, rhs->AsUnion()->Get(i), result, size, region);
+    }
+    return size;
+  }
+
+  if (!BitsetType::IsInhabited(lhs->BitsetLub() & rhs->BitsetLub())) {
+    return size;
+  }
+
+  if (lhs->IsRange()) {
+    if (rhs->IsBitset() || rhs->IsClass()) {
+      return UpdateRange(
+          Config::template cast<RangeType>(lhs), result, size, region);
+    }
+    if (rhs->IsConstant() &&
+        Contains(lhs->AsRange(), *rhs->AsConstant()->Value())) {
+      return AddToUnion(rhs, result, size, region);
+    }
+    return size;
+  }
+  if (rhs->IsRange()) {
+    if (lhs->IsBitset() || lhs->IsClass()) {
+      return UpdateRange(
+          Config::template cast<RangeType>(rhs), result, size, region);
+    }
+    if (lhs->IsConstant() &&
+        Contains(rhs->AsRange(), *lhs->AsConstant()->Value())) {
+      return AddToUnion(lhs, result, size, region);
+    }
+    return size;
+  }
+
+  if (lhs->IsBitset() || rhs->IsBitset()) {
+    return AddToUnion(lhs->IsBitset() ? rhs : lhs, result, size, region);
+  }
+  if (lhs->IsClass() != rhs->IsClass()) {
+    return AddToUnion(lhs->IsClass() ? rhs : lhs, result, size, region);
+  }
+  if (lhs->SimplyEquals(rhs->unhandle())) {
+    return AddToUnion(lhs, result, size, region);
   }
   return size;
 }
 
 
-// Union is O(1) on simple bitsets, but O(n*m) on structured unions.
 template<class Config>
 typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::Union(
     TypeHandle type1, TypeHandle type2, Region* region) {
+
   // Fast case: bit sets.
   if (type1->IsBitset() && type2->IsBitset()) {
     return BitsetType::New(type1->AsBitset() | type2->AsBitset(), region);
@@ -596,92 +748,80 @@
   if (type1->IsAny() || type2->IsNone()) return type1;
   if (type2->IsAny() || type1->IsNone()) return type2;
 
-  // Semi-fast case: Unioned objects are neither involved nor produced.
-  if (!(type1->IsUnion() || type2->IsUnion())) {
-    if (type1->Is(type2)) return type2;
-    if (type2->Is(type1)) return type1;
-  }
+  // Semi-fast case.
+  if (type1->Is(type2)) return type2;
+  if (type2->Is(type1)) return type1;
 
-  // Slow case: may need to produce a Unioned object.
-  int size = 0;
-  if (!type1->IsBitset()) {
-    size += (type1->IsUnion() ? type1->AsUnion()->Length() : 1);
-  }
-  if (!type2->IsBitset()) {
-    size += (type2->IsUnion() ? type2->AsUnion()->Length() : 1);
-  }
-  int bitset = type1->BitsetGlb() | type2->BitsetGlb();
-  if (bitset != BitsetType::kNone) ++size;
-  ASSERT(size >= 1);
-
-  UnionHandle unioned = UnionType::New(size, region);
+  // Slow case: create union.
+  int size1 = type1->IsUnion() ? type1->AsUnion()->Length() : 1;
+  int size2 = type2->IsUnion() ? type2->AsUnion()->Length() : 1;
+  if (!AddIsSafe(size1, size2)) return Any(region);
+  int size = size1 + size2;
+  if (!AddIsSafe(size, 2)) return Any(region);
+  size += 2;
+  UnionHandle result = UnionType::New(size, region);
   size = 0;
-  if (bitset != BitsetType::kNone) {
-    unioned->Set(size++, BitsetType::New(bitset, region));
-  }
-  size = ExtendUnion(unioned, size, type1, type2, false, region);
-  size = ExtendUnion(unioned, size, type2, type1, false, region);
-  size = NormalizeUnion(unioned, size, bitset);
 
-  if (size == 1) {
-    return unioned->Get(0);
-  } else {
-    unioned->Shrink(size);
-    ASSERT(unioned->Wellformed());
-    return unioned;
+  // Deal with bitsets.
+  TypeHandle bits = BitsetType::New(
+      type1->BitsetGlb() | type2->BitsetGlb(), region);
+  result->Set(size++, bits);
+
+  // Deal with ranges.
+  TypeHandle range = None(region);
+  RangeType* range1 = type1->GetRange();
+  RangeType* range2 = type2->GetRange();
+  if (range1 != NULL && range2 != NULL) {
+    range = RangeType::New(Union(Limits(range1), Limits(range2)), region);
+  } else if (range1 != NULL) {
+    range = handle(range1);
+  } else if (range2 != NULL) {
+    range = handle(range2);
   }
+  result->Set(size++, range);
+
+  size = AddToUnion(type1, result, size, region);
+  size = AddToUnion(type2, result, size, region);
+  return NormalizeUnion(result, size);
 }
 
 
-// Intersection is O(1) on simple bitsets, but O(n*m) on structured unions.
+// Add [type] to [result] unless [type] is bitset, range, or already subsumed.
+// Return new size of [result].
 template<class Config>
-typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::Intersect(
-    TypeHandle type1, TypeHandle type2, Region* region) {
-  // Fast case: bit sets.
-  if (type1->IsBitset() && type2->IsBitset()) {
-    return BitsetType::New(type1->AsBitset() & type2->AsBitset(), region);
+int TypeImpl<Config>::AddToUnion(
+    TypeHandle type, UnionHandle result, int size, Region* region) {
+  if (type->IsBitset() || type->IsRange()) return size;
+  if (type->IsUnion()) {
+    for (int i = 0; i < type->AsUnion()->Length(); ++i) {
+      size = AddToUnion(type->AsUnion()->Get(i), result, size, region);
+    }
+    return size;
   }
+  for (int i = 0; i < size; ++i) {
+    if (type->Is(result->Get(i))) return size;
+  }
+  result->Set(size++, type);
+  return size;
+}
 
-  // Fast case: top or bottom types.
-  if (type1->IsNone() || type2->IsAny()) return type1;
-  if (type2->IsNone() || type1->IsAny()) return type2;
 
-  // Semi-fast case: Unioned objects are neither involved nor produced.
-  if (!(type1->IsUnion() || type2->IsUnion())) {
-    if (type1->Is(type2)) return type1;
-    if (type2->Is(type1)) return type2;
+template<class Config>
+typename TypeImpl<Config>::TypeHandle TypeImpl<Config>::NormalizeUnion(
+    UnionHandle unioned, int size) {
+  DCHECK(size >= 2);
+  // If range is subsumed by bitset, use its place for a different type.
+  if (unioned->Get(1)->Is(unioned->Get(0))) {
+    unioned->Set(1, unioned->Get(--size));
   }
-
-  // Slow case: may need to produce a Unioned object.
-  int size = 0;
-  if (!type1->IsBitset()) {
-    size += (type1->IsUnion() ? type1->AsUnion()->Length() : 1);
+  // If bitset is None, use its place for a different type.
+  if (size >= 2 && unioned->Get(0)->IsNone()) {
+    unioned->Set(0, unioned->Get(--size));
   }
-  if (!type2->IsBitset()) {
-    size += (type2->IsUnion() ? type2->AsUnion()->Length() : 1);
-  }
-  int bitset = type1->BitsetGlb() & type2->BitsetGlb();
-  if (bitset != BitsetType::kNone) ++size;
-  ASSERT(size >= 1);
-
-  UnionHandle unioned = UnionType::New(size, region);
-  size = 0;
-  if (bitset != BitsetType::kNone) {
-    unioned->Set(size++, BitsetType::New(bitset, region));
-  }
-  size = ExtendUnion(unioned, size, type1, type2, true, region);
-  size = ExtendUnion(unioned, size, type2, type1, true, region);
-  size = NormalizeUnion(unioned, size, bitset);
-
-  if (size == 0) {
-    return None(region);
-  } else if (size == 1) {
-    return unioned->Get(0);
-  } else {
-    unioned->Shrink(size);
-    ASSERT(unioned->Wellformed());
-    return unioned;
-  }
+  if (size == 1) return unioned->Get(0);
+  unioned->Shrink(size);
+  SLOW_DCHECK(unioned->Wellformed());
+  return unioned;
 }
 
 
@@ -727,7 +867,7 @@
 template<class Config> template<class T>
 typename TypeImpl<Config>::TypeHandle
 TypeImpl<Config>::Iterator<T>::get_type() {
-  ASSERT(!Done());
+  DCHECK(!Done());
   return type_->IsUnion() ? type_->AsUnion()->Get(index_) : type_;
 }
 
@@ -777,7 +917,7 @@
   DisallowHeapAllocation no_allocation;
   ++index_;
   if (type_->IsUnion()) {
-    UnionHandle unioned = handle(type_->AsUnion());
+    UnionHandle unioned = Config::template cast<UnionType>(type_);
     for (; index_ < unioned->Length(); ++index_) {
       if (matches(unioned->Get(index_))) return;
     }
@@ -798,13 +938,12 @@
   if (type->IsBitset()) {
     return BitsetType::New(type->AsBitset(), region);
   } else if (type->IsClass()) {
-    return ClassType::New(
-        type->AsClass()->Map(),
-        BitsetType::New(type->BitsetLub(), region), region);
+    return ClassType::New(type->AsClass()->Map(), region);
   } else if (type->IsConstant()) {
-    return ConstantType::New(
-        type->AsConstant()->Value(),
-        Convert<OtherType>(type->AsConstant()->Bound(), region), region);
+    return ConstantType::New(type->AsConstant()->Value(), region);
+  } else if (type->IsRange()) {
+    return RangeType::New(
+        type->AsRange()->Min(), type->AsRange()->Max(), region);
   } else if (type->IsContext()) {
     TypeHandle outer = Convert<OtherType>(type->AsContext()->Outer(), region);
     return ContextType::New(outer, region);
@@ -812,22 +951,22 @@
     int length = type->AsUnion()->Length();
     UnionHandle unioned = UnionType::New(length, region);
     for (int i = 0; i < length; ++i) {
-      unioned->Set(i, Convert<OtherType>(type->AsUnion()->Get(i), region));
+      TypeHandle t = Convert<OtherType>(type->AsUnion()->Get(i), region);
+      unioned->Set(i, t);
     }
     return unioned;
   } else if (type->IsArray()) {
-    return ArrayType::New(
-        Convert<OtherType>(type->AsArray()->Element(), region),
-        Convert<OtherType>(type->AsArray()->Bound(), region), region);
+    TypeHandle element = Convert<OtherType>(type->AsArray()->Element(), region);
+    return ArrayType::New(element, region);
   } else if (type->IsFunction()) {
+    TypeHandle res = Convert<OtherType>(type->AsFunction()->Result(), region);
+    TypeHandle rcv = Convert<OtherType>(type->AsFunction()->Receiver(), region);
     FunctionHandle function = FunctionType::New(
-        Convert<OtherType>(type->AsFunction()->Result(), region),
-        Convert<OtherType>(type->AsFunction()->Receiver(), region),
-        Convert<OtherType>(type->AsFunction()->Bound(), region),
-        type->AsFunction()->Arity(), region);
+        res, rcv, type->AsFunction()->Arity(), region);
     for (int i = 0; i < function->Arity(); ++i) {
-      function->InitParameter(i,
-          Convert<OtherType>(type->AsFunction()->Parameter(i), region));
+      TypeHandle param = Convert<OtherType>(
+          type->AsFunction()->Parameter(i), region);
+      function->InitParameter(i, param);
     }
     return function;
   } else {
@@ -841,8 +980,8 @@
 // Printing.
 
 template<class Config>
-const char* TypeImpl<Config>::BitsetType::Name(int bitset) {
-  switch (bitset) {
+const char* TypeImpl<Config>::BitsetType::Name(bitset bits) {
+  switch (bits) {
     case REPRESENTATION(kAny): return "Any";
     #define RETURN_NAMED_REPRESENTATION_TYPE(type, value) \
     case REPRESENTATION(k##type): return #type;
@@ -860,115 +999,112 @@
 }
 
 
-template<class Config>
-void TypeImpl<Config>::BitsetType::PrintTo(StringStream* stream, int bitset) {
+template <class Config>
+void TypeImpl<Config>::BitsetType::Print(OStream& os,  // NOLINT
+                                         bitset bits) {
   DisallowHeapAllocation no_allocation;
-  const char* name = Name(bitset);
+  const char* name = Name(bits);
   if (name != NULL) {
-    stream->Add("%s", name);
-  } else {
-    static const int named_bitsets[] = {
-      #define BITSET_CONSTANT(type, value) REPRESENTATION(k##type),
-      REPRESENTATION_BITSET_TYPE_LIST(BITSET_CONSTANT)
-      #undef BITSET_CONSTANT
-
-      #define BITSET_CONSTANT(type, value) SEMANTIC(k##type),
-      SEMANTIC_BITSET_TYPE_LIST(BITSET_CONSTANT)
-      #undef BITSET_CONSTANT
-    };
-
-    bool is_first = true;
-    stream->Add("(");
-    for (int i(ARRAY_SIZE(named_bitsets) - 1); bitset != 0 && i >= 0; --i) {
-      int subset = named_bitsets[i];
-      if ((bitset & subset) == subset) {
-        if (!is_first) stream->Add(" | ");
-        is_first = false;
-        stream->Add("%s", Name(subset));
-        bitset -= subset;
-      }
-    }
-    ASSERT(bitset == 0);
-    stream->Add(")");
+    os << name;
+    return;
   }
+
+  static const bitset named_bitsets[] = {
+#define BITSET_CONSTANT(type, value) REPRESENTATION(k##type),
+      REPRESENTATION_BITSET_TYPE_LIST(BITSET_CONSTANT)
+#undef BITSET_CONSTANT
+
+#define BITSET_CONSTANT(type, value) SEMANTIC(k##type),
+      SEMANTIC_BITSET_TYPE_LIST(BITSET_CONSTANT)
+#undef BITSET_CONSTANT
+  };
+
+  bool is_first = true;
+  os << "(";
+  for (int i(arraysize(named_bitsets) - 1); bits != 0 && i >= 0; --i) {
+    bitset subset = named_bitsets[i];
+    if ((bits & subset) == subset) {
+      if (!is_first) os << " | ";
+      is_first = false;
+      os << Name(subset);
+      bits -= subset;
+    }
+  }
+  DCHECK(bits == 0);
+  os << ")";
 }
 
 
-template<class Config>
-void TypeImpl<Config>::PrintTo(StringStream* stream, PrintDimension dim) {
+template <class Config>
+void TypeImpl<Config>::PrintTo(OStream& os, PrintDimension dim) {  // NOLINT
   DisallowHeapAllocation no_allocation;
   if (dim != REPRESENTATION_DIM) {
     if (this->IsBitset()) {
-      BitsetType::PrintTo(stream, SEMANTIC(this->AsBitset()));
+      BitsetType::Print(os, SEMANTIC(this->AsBitset()));
     } else if (this->IsClass()) {
-      stream->Add("Class(%p < ", static_cast<void*>(*this->AsClass()->Map()));
-      BitsetType::New(BitsetType::Lub(this))->PrintTo(stream, dim);
-      stream->Add(")");
-      return;
+      os << "Class(" << static_cast<void*>(*this->AsClass()->Map()) << " < ";
+      BitsetType::New(BitsetType::Lub(this))->PrintTo(os, dim);
+      os << ")";
     } else if (this->IsConstant()) {
-      stream->Add("Constant(%p : ",
-             static_cast<void*>(*this->AsConstant()->Value()));
-      BitsetType::New(BitsetType::Lub(this))->PrintTo(stream, dim);
-      stream->Add(")");
-      return;
+      os << "Constant(" << static_cast<void*>(*this->AsConstant()->Value())
+         << ")";
+    } else if (this->IsRange()) {
+      os << "Range(" << this->AsRange()->Min()->Number()
+         << ", " << this->AsRange()->Max()->Number() << ")";
     } else if (this->IsContext()) {
-      stream->Add("Context(");
-      this->AsContext()->Outer()->PrintTo(stream, dim);
-      stream->Add(")");
+      os << "Context(";
+      this->AsContext()->Outer()->PrintTo(os, dim);
+      os << ")";
     } else if (this->IsUnion()) {
-      stream->Add("(");
+      os << "(";
       UnionHandle unioned = handle(this->AsUnion());
       for (int i = 0; i < unioned->Length(); ++i) {
         TypeHandle type_i = unioned->Get(i);
-        if (i > 0) stream->Add(" | ");
-        type_i->PrintTo(stream, dim);
+        if (i > 0) os << " | ";
+        type_i->PrintTo(os, dim);
       }
-      stream->Add(")");
-      return;
+      os << ")";
     } else if (this->IsArray()) {
-      stream->Add("Array(");
-      AsArray()->Element()->PrintTo(stream, dim);
-      stream->Add(")");
+      os << "Array(";
+      AsArray()->Element()->PrintTo(os, dim);
+      os << ")";
     } else if (this->IsFunction()) {
       if (!this->AsFunction()->Receiver()->IsAny()) {
-        this->AsFunction()->Receiver()->PrintTo(stream, dim);
-        stream->Add(".");
+        this->AsFunction()->Receiver()->PrintTo(os, dim);
+        os << ".";
       }
-      stream->Add("(");
+      os << "(";
       for (int i = 0; i < this->AsFunction()->Arity(); ++i) {
-        if (i > 0) stream->Add(", ");
-        this->AsFunction()->Parameter(i)->PrintTo(stream, dim);
+        if (i > 0) os << ", ";
+        this->AsFunction()->Parameter(i)->PrintTo(os, dim);
       }
-      stream->Add(")->");
-      this->AsFunction()->Result()->PrintTo(stream, dim);
+      os << ")->";
+      this->AsFunction()->Result()->PrintTo(os, dim);
     } else {
       UNREACHABLE();
     }
   }
-  if (dim == BOTH_DIMS) {
-    stream->Add("/");
-  }
+  if (dim == BOTH_DIMS) os << "/";
   if (dim != SEMANTIC_DIM) {
-    BitsetType::PrintTo(stream, REPRESENTATION(this->BitsetLub()));
+    BitsetType::Print(os, REPRESENTATION(this->BitsetLub()));
   }
 }
 
 
-template<class Config>
-void TypeImpl<Config>::TypePrint(FILE* out, PrintDimension dim) {
-  HeapStringAllocator allocator;
-  StringStream stream(&allocator);
-  PrintTo(&stream, dim);
-  stream.OutputToFile(out);
+#ifdef DEBUG
+template <class Config>
+void TypeImpl<Config>::Print() {
+  OFStream os(stdout);
+  PrintTo(os);
+  os << endl;
 }
-
-
-template<class Config>
-void TypeImpl<Config>::TypePrint(PrintDimension dim) {
-  TypePrint(stdout, dim);
-  PrintF(stdout, "\n");
-  Flush(stdout);
+template <class Config>
+void TypeImpl<Config>::BitsetType::Print(bitset bits) {
+  OFStream os(stdout);
+  Print(os, bits);
+  os << endl;
 }
+#endif
 
 
 // -----------------------------------------------------------------------------
diff --git a/src/types.h b/src/types.h
index aaa76e4..e7815ed 100644
--- a/src/types.h
+++ b/src/types.h
@@ -5,7 +5,10 @@
 #ifndef V8_TYPES_H_
 #define V8_TYPES_H_
 
+#include "src/conversions.h"
+#include "src/factory.h"
 #include "src/handles.h"
+#include "src/ostreams.h"
 
 namespace v8 {
 namespace internal {
@@ -21,6 +24,7 @@
 // Types consist of two dimensions: semantic (value range) and representation.
 // Both are related through subtyping.
 //
+//
 // SEMANTIC DIMENSION
 //
 // The following equations and inequations hold for the semantic axis:
@@ -59,6 +63,7 @@
 // However, we also define a 'temporal' variant of the subtyping relation that
 // considers the _current_ state only, i.e., Constant(x) <_now Class(map(x)).
 //
+//
 // REPRESENTATIONAL DIMENSION
 //
 // For the representation axis, the following holds:
@@ -66,11 +71,12 @@
 //   None <= R
 //   R <= Any
 //
-//   UntaggedInt <= UntaggedInt8 \/ UntaggedInt16 \/ UntaggedInt32)
-//   UntaggedFloat <= UntaggedFloat32 \/ UntaggedFloat64
-//   UntaggedNumber <= UntaggedInt \/ UntaggedFloat
-//   Untagged <= UntaggedNumber \/ UntaggedPtr
-//   Tagged <= TaggedInt \/ TaggedPtr
+//   UntaggedInt = UntaggedInt1 \/ UntaggedInt8 \/
+//                 UntaggedInt16 \/ UntaggedInt32
+//   UntaggedFloat = UntaggedFloat32 \/ UntaggedFloat64
+//   UntaggedNumber = UntaggedInt \/ UntaggedFloat
+//   Untagged = UntaggedNumber \/ UntaggedPtr
+//   Tagged = TaggedInt \/ TaggedPtr
 //
 // Subtyping relates the two dimensions, for example:
 //
@@ -85,6 +91,16 @@
 //   SignedSmall /\ TaggedInt       (a 'smi')
 //   Number /\ TaggedPtr            (a heap number)
 //
+//
+// RANGE TYPES
+//
+// A range type represents a continuous integer interval by its minimum and
+// maximum value.  Either value might be an infinity.
+//
+// Constant(v) is considered a subtype of Range(x..y) if v happens to be an
+// integer between x and y.
+//
+//
 // PREDICATES
 //
 // There are two main functions for testing types:
@@ -106,21 +122,23 @@
 // Any compilation decision based on such temporary properties requires runtime
 // guarding!
 //
+//
 // PROPERTIES
 //
 // Various formal properties hold for constructors, operators, and predicates
-// over types. For example, constructors are injective, subtyping is a complete
-// partial order, union and intersection satisfy the usual algebraic properties.
+// over types. For example, constructors are injective and subtyping is a
+// complete partial order.
 //
 // See test/cctest/test-types.cc for a comprehensive executable specification,
 // especially with respect to the properties of the more exotic 'temporal'
 // constructors and predicates (those prefixed 'Now').
 //
+//
 // IMPLEMENTATION
 //
 // Internally, all 'primitive' types, and their unions, are represented as
-// bitsets. Class is a heap pointer to the respective map. Only Constant's, or
-// unions containing Class'es or Constant's, currently require allocation.
+// bitsets. Bit 0 is reserved for tagging. Class is a heap pointer to the
+// respective map. Only structured types require allocation.
 // Note that the bitset representation is closed under both Union and Intersect.
 //
 // There are two type representations, using different allocation:
@@ -136,23 +154,23 @@
 // Values for bitset types
 
 #define MASK_BITSET_TYPE_LIST(V) \
-  V(Representation, static_cast<int>(0xffc00000)) \
-  V(Semantic,       static_cast<int>(0x003fffff))
+  V(Representation, 0xff800000u) \
+  V(Semantic,       0x007ffffeu)
 
 #define REPRESENTATION(k) ((k) & BitsetType::kRepresentation)
 #define SEMANTIC(k)       ((k) & BitsetType::kSemantic)
 
 #define REPRESENTATION_BITSET_TYPE_LIST(V) \
   V(None,             0)                   \
-  V(UntaggedInt1,     1 << 22 | kSemantic) \
-  V(UntaggedInt8,     1 << 23 | kSemantic) \
-  V(UntaggedInt16,    1 << 24 | kSemantic) \
-  V(UntaggedInt32,    1 << 25 | kSemantic) \
-  V(UntaggedFloat32,  1 << 26 | kSemantic) \
-  V(UntaggedFloat64,  1 << 27 | kSemantic) \
-  V(UntaggedPtr,      1 << 28 | kSemantic) \
-  V(TaggedInt,        1 << 29 | kSemantic) \
-  V(TaggedPtr,        -1 << 30 | kSemantic)  /* MSB has to be sign-extended */ \
+  V(UntaggedInt1,     1u << 23 | kSemantic) \
+  V(UntaggedInt8,     1u << 24 | kSemantic) \
+  V(UntaggedInt16,    1u << 25 | kSemantic) \
+  V(UntaggedInt32,    1u << 26 | kSemantic) \
+  V(UntaggedFloat32,  1u << 27 | kSemantic) \
+  V(UntaggedFloat64,  1u << 28 | kSemantic) \
+  V(UntaggedPtr,      1u << 29 | kSemantic) \
+  V(TaggedInt,        1u << 30 | kSemantic) \
+  V(TaggedPtr,        1u << 31 | kSemantic) \
   \
   V(UntaggedInt,      kUntaggedInt1 | kUntaggedInt8 |      \
                       kUntaggedInt16 | kUntaggedInt32)     \
@@ -162,34 +180,35 @@
   V(Tagged,           kTaggedInt | kTaggedPtr)
 
 #define SEMANTIC_BITSET_TYPE_LIST(V) \
-  V(Null,                1 << 0  | REPRESENTATION(kTaggedPtr)) \
-  V(Undefined,           1 << 1  | REPRESENTATION(kTaggedPtr)) \
-  V(Boolean,             1 << 2  | REPRESENTATION(kTaggedPtr)) \
-  V(UnsignedSmall,       1 << 3  | REPRESENTATION(kTagged | kUntaggedNumber)) \
-  V(OtherSignedSmall,    1 << 4  | REPRESENTATION(kTagged | kUntaggedNumber)) \
-  V(OtherUnsigned31,     1 << 5  | REPRESENTATION(kTagged | kUntaggedNumber)) \
-  V(OtherUnsigned32,     1 << 6  | REPRESENTATION(kTagged | kUntaggedNumber)) \
-  V(OtherSigned32,       1 << 7  | REPRESENTATION(kTagged | kUntaggedNumber)) \
-  V(MinusZero,           1 << 8  | REPRESENTATION(kTagged | kUntaggedNumber)) \
-  V(NaN,                 1 << 9  | REPRESENTATION(kTagged | kUntaggedNumber)) \
-  V(OtherNumber,         1 << 10 | REPRESENTATION(kTagged | kUntaggedNumber)) \
-  V(Symbol,              1 << 11 | REPRESENTATION(kTaggedPtr)) \
-  V(InternalizedString,  1 << 12 | REPRESENTATION(kTaggedPtr)) \
-  V(OtherString,         1 << 13 | REPRESENTATION(kTaggedPtr)) \
-  V(Undetectable,        1 << 14 | REPRESENTATION(kTaggedPtr)) \
-  V(Array,               1 << 15 | REPRESENTATION(kTaggedPtr)) \
-  V(Buffer,              1 << 16 | REPRESENTATION(kTaggedPtr)) \
-  V(Function,            1 << 17 | REPRESENTATION(kTaggedPtr)) \
-  V(RegExp,              1 << 18 | REPRESENTATION(kTaggedPtr)) \
-  V(OtherObject,         1 << 19 | REPRESENTATION(kTaggedPtr)) \
-  V(Proxy,               1 << 20 | REPRESENTATION(kTaggedPtr)) \
-  V(Internal,            1 << 21 | REPRESENTATION(kTagged | kUntagged)) \
+  V(Null,                1u << 1  | REPRESENTATION(kTaggedPtr)) \
+  V(Undefined,           1u << 2  | REPRESENTATION(kTaggedPtr)) \
+  V(Boolean,             1u << 3  | REPRESENTATION(kTaggedPtr)) \
+  V(UnsignedSmall,       1u << 4  | REPRESENTATION(kTagged | kUntaggedNumber)) \
+  V(OtherSignedSmall,    1u << 5  | REPRESENTATION(kTagged | kUntaggedNumber)) \
+  V(OtherUnsigned31,     1u << 6  | REPRESENTATION(kTagged | kUntaggedNumber)) \
+  V(OtherUnsigned32,     1u << 7  | REPRESENTATION(kTagged | kUntaggedNumber)) \
+  V(OtherSigned32,       1u << 8  | REPRESENTATION(kTagged | kUntaggedNumber)) \
+  V(MinusZero,           1u << 9  | REPRESENTATION(kTagged | kUntaggedNumber)) \
+  V(NaN,                 1u << 10 | REPRESENTATION(kTagged | kUntaggedNumber)) \
+  V(OtherNumber,         1u << 11 | REPRESENTATION(kTagged | kUntaggedNumber)) \
+  V(Symbol,              1u << 12 | REPRESENTATION(kTaggedPtr)) \
+  V(InternalizedString,  1u << 13 | REPRESENTATION(kTaggedPtr)) \
+  V(OtherString,         1u << 14 | REPRESENTATION(kTaggedPtr)) \
+  V(Undetectable,        1u << 15 | REPRESENTATION(kTaggedPtr)) \
+  V(Array,               1u << 16 | REPRESENTATION(kTaggedPtr)) \
+  V(Buffer,              1u << 17 | REPRESENTATION(kTaggedPtr)) \
+  V(Function,            1u << 18 | REPRESENTATION(kTaggedPtr)) \
+  V(RegExp,              1u << 19 | REPRESENTATION(kTaggedPtr)) \
+  V(OtherObject,         1u << 20 | REPRESENTATION(kTaggedPtr)) \
+  V(Proxy,               1u << 21 | REPRESENTATION(kTaggedPtr)) \
+  V(Internal,            1u << 22 | REPRESENTATION(kTagged | kUntagged)) \
   \
   V(SignedSmall,         kUnsignedSmall | kOtherSignedSmall) \
   V(Signed32,            kSignedSmall | kOtherUnsigned31 | kOtherSigned32) \
   V(Unsigned32,          kUnsignedSmall | kOtherUnsigned31 | kOtherUnsigned32) \
   V(Integral32,          kSigned32 | kUnsigned32) \
-  V(Number,              kIntegral32 | kMinusZero | kNaN | kOtherNumber) \
+  V(OrderedNumber,       kIntegral32 | kMinusZero | kOtherNumber) \
+  V(Number,              kOrderedNumber | kNaN) \
   V(String,              kInternalizedString | kOtherString) \
   V(UniqueName,          kSymbol | kInternalizedString) \
   V(Name,                kSymbol | kString) \
@@ -202,12 +221,36 @@
   V(Receiver,            kObject | kProxy) \
   V(NonNumber,           kBoolean | kName | kNull | kReceiver | \
                          kUndefined | kInternal) \
-  V(Any,                 -1)
+  V(Any,                 0xfffffffeu)
+
+/*
+ * The following diagrams show how integers (in the mathematical sense) are
+ * divided among the different atomic numerical types.
+ *
+ * If SmiValuesAre31Bits():
+ *
+ *   ON    OS32     OSS     US     OU31    OU32     ON
+ * ______[_______[_______[_______[_______[_______[_______
+ *     -2^31   -2^30     0      2^30    2^31    2^32
+ *
+ * Otherwise:
+ *
+ *   ON         OSS             US         OU32     ON
+ * ______[_______________[_______________[_______[_______
+ *     -2^31             0              2^31    2^32
+ *
+ *
+ * E.g., OtherUnsigned32 (OU32) covers all integers from 2^31 to 2^32-1.
+ *
+ */
+
+#define PROPER_BITSET_TYPE_LIST(V) \
+  REPRESENTATION_BITSET_TYPE_LIST(V) \
+  SEMANTIC_BITSET_TYPE_LIST(V)
 
 #define BITSET_TYPE_LIST(V) \
   MASK_BITSET_TYPE_LIST(V) \
-  REPRESENTATION_BITSET_TYPE_LIST(V) \
-  SEMANTIC_BITSET_TYPE_LIST(V)
+  PROPER_BITSET_TYPE_LIST(V)
 
 
 // -----------------------------------------------------------------------------
@@ -224,11 +267,11 @@
 //   static bool is_bitset(Type*);
 //   static bool is_class(Type*);
 //   static bool is_struct(Type*, int tag);
-//   static int as_bitset(Type*);
+//   static bitset as_bitset(Type*);
 //   static i::Handle<i::Map> as_class(Type*);
 //   static Handle<Struct>::type as_struct(Type*);
-//   static Type* from_bitset(int bitset);
-//   static Handle<Type>::type from_bitset(int bitset, Region*);
+//   static Type* from_bitset(bitset);
+//   static Handle<Type>::type from_bitset(bitset, Region*);
 //   static Handle<Type>::type from_class(i::Handle<Map>, Region*);
 //   static Handle<Type>::type from_struct(Handle<Struct>::type, int tag);
 //   static Handle<Struct>::type struct_create(int tag, int length, Region*);
@@ -247,12 +290,14 @@
  public:
   // Auxiliary types.
 
-  class BitsetType;      // Internal
-  class StructuralType;  // Internal
-  class UnionType;       // Internal
+  typedef uint32_t bitset;  // Internal
+  class BitsetType;         // Internal
+  class StructuralType;     // Internal
+  class UnionType;          // Internal
 
   class ClassType;
   class ConstantType;
+  class RangeType;
   class ContextType;
   class ArrayType;
   class FunctionType;
@@ -260,6 +305,7 @@
   typedef typename Config::template Handle<TypeImpl>::type TypeHandle;
   typedef typename Config::template Handle<ClassType>::type ClassHandle;
   typedef typename Config::template Handle<ConstantType>::type ConstantHandle;
+  typedef typename Config::template Handle<RangeType>::type RangeHandle;
   typedef typename Config::template Handle<ContextType>::type ContextHandle;
   typedef typename Config::template Handle<ArrayType>::type ArrayHandle;
   typedef typename Config::template Handle<FunctionType>::type FunctionHandle;
@@ -269,11 +315,13 @@
   // Constructors.
 
   #define DEFINE_TYPE_CONSTRUCTOR(type, value)                                \
-    static TypeImpl* type() { return BitsetType::New(BitsetType::k##type); }  \
+    static TypeImpl* type() {                                                 \
+      return BitsetType::New(BitsetType::k##type);                            \
+    }                                                                         \
     static TypeHandle type(Region* region) {                                  \
       return BitsetType::New(BitsetType::k##type, region);                    \
     }
-  BITSET_TYPE_LIST(DEFINE_TYPE_CONSTRUCTOR)
+  PROPER_BITSET_TYPE_LIST(DEFINE_TYPE_CONSTRUCTOR)
   #undef DEFINE_TYPE_CONSTRUCTOR
 
   static TypeHandle Class(i::Handle<i::Map> map, Region* region) {
@@ -282,6 +330,10 @@
   static TypeHandle Constant(i::Handle<i::Object> value, Region* region) {
     return ConstantType::New(value, region);
   }
+  static TypeHandle Range(
+      i::Handle<i::Object> min, i::Handle<i::Object> max, Region* region) {
+    return RangeType::New(min, max, region);
+  }
   static TypeHandle Context(TypeHandle outer, Region* region) {
     return ContextType::New(outer, region);
   }
@@ -347,7 +399,7 @@
   template<class TypeHandle>
   bool Equals(TypeHandle that) { return this->Equals(*that); }
 
-  // Equivalent to Constant(value)->Is(this), but avoiding allocation.
+  // Equivalent to Constant(val)->Is(this), but avoiding allocation.
   bool Contains(i::Object* val);
   bool Contains(i::Handle<i::Object> val) { return this->Contains(*val); }
 
@@ -374,6 +426,9 @@
   bool IsConstant() {
     return Config::is_struct(this, StructuralType::kConstantTag);
   }
+  bool IsRange() {
+    return Config::is_struct(this, StructuralType::kRangeTag);
+  }
   bool IsContext() {
     return Config::is_struct(this, StructuralType::kContextTag);
   }
@@ -386,10 +441,18 @@
 
   ClassType* AsClass() { return ClassType::cast(this); }
   ConstantType* AsConstant() { return ConstantType::cast(this); }
+  RangeType* AsRange() { return RangeType::cast(this); }
   ContextType* AsContext() { return ContextType::cast(this); }
   ArrayType* AsArray() { return ArrayType::cast(this); }
   FunctionType* AsFunction() { return FunctionType::cast(this); }
 
+  // Minimum and maximum of a numeric type.
+  // These functions do not distinguish between -0 and +0.  If the type equals
+  // kNaN, they return NaN; otherwise kNaN is ignored.  Only call these
+  // functions on subtypes of Number.
+  double Min();
+  double Max();
+
   int NumClasses();
   int NumConstants();
 
@@ -415,9 +478,11 @@
 
   enum PrintDimension { BOTH_DIMS, SEMANTIC_DIM, REPRESENTATION_DIM };
 
-  void PrintTo(StringStream* stream, PrintDimension = BOTH_DIMS);
-  void TypePrint(PrintDimension = BOTH_DIMS);
-  void TypePrint(FILE* out, PrintDimension = BOTH_DIMS);
+  void PrintTo(OStream& os, PrintDimension dim = BOTH_DIMS);  // NOLINT
+
+#ifdef DEBUG
+  void Print();
+#endif
 
  protected:
   // Friends.
@@ -440,27 +505,55 @@
   bool IsBitset() { return Config::is_bitset(this); }
   bool IsUnion() { return Config::is_struct(this, StructuralType::kUnionTag); }
 
-  int AsBitset() {
-    ASSERT(this->IsBitset());
+  bitset AsBitset() {
+    DCHECK(this->IsBitset());
     return static_cast<BitsetType*>(this)->Bitset();
   }
   UnionType* AsUnion() { return UnionType::cast(this); }
 
   // Auxiliary functions.
 
-  int BitsetGlb() { return BitsetType::Glb(this); }
-  int BitsetLub() { return BitsetType::Lub(this); }
-  int InherentBitsetLub() { return BitsetType::InherentLub(this); }
+  bitset BitsetGlb() { return BitsetType::Glb(this); }
+  bitset BitsetLub() { return BitsetType::Lub(this); }
 
   bool SlowIs(TypeImpl* that);
 
-  TypeHandle Narrow(int bitset, Region* region);
-  int BoundBy(TypeImpl* that);
-  int IndexInUnion(int bound, UnionHandle unioned, int current_size);
-  static int ExtendUnion(
-      UnionHandle unioned, int current_size, TypeHandle t,
-      TypeHandle other, bool is_intersect, Region* region);
-  static int NormalizeUnion(UnionHandle unioned, int current_size, int bitset);
+  static bool IsInteger(double x) {
+    return nearbyint(x) == x && !i::IsMinusZero(x);  // Allows for infinities.
+  }
+  static bool IsInteger(i::Object* x) {
+    return x->IsNumber() && IsInteger(x->Number());
+  }
+
+  struct Limits {
+    i::Handle<i::Object> min;
+    i::Handle<i::Object> max;
+    Limits(i::Handle<i::Object> min, i::Handle<i::Object> max) :
+      min(min), max(max) {}
+    explicit Limits(RangeType* range) :
+      min(range->Min()), max(range->Max()) {}
+  };
+
+  static Limits Intersect(Limits lhs, Limits rhs);
+  static Limits Union(Limits lhs, Limits rhs);
+  static bool Overlap(RangeType* lhs, RangeType* rhs);
+  static bool Contains(RangeType* lhs, RangeType* rhs);
+  static bool Contains(RangeType* range, i::Object* val);
+
+  RangeType* GetRange();
+  static int UpdateRange(
+      RangeHandle type, UnionHandle result, int size, Region* region);
+
+  bool SimplyEquals(TypeImpl* that);
+  template<class TypeHandle>
+  bool SimplyEquals(TypeHandle that) { return this->SimplyEquals(*that); }
+
+  static int AddToUnion(
+      TypeHandle type, UnionHandle result, int size, Region* region);
+  static int IntersectAux(
+      TypeHandle type, TypeHandle other,
+      UnionHandle result, int size, Region* region);
+  static TypeHandle NormalizeUnion(UnionHandle unioned, int size);
 };
 
 
@@ -479,31 +572,60 @@
     kUnusedEOL = 0
   };
 
-  int Bitset() { return Config::as_bitset(this); }
+  bitset Bitset() { return Config::as_bitset(this); }
 
-  static TypeImpl* New(int bitset) {
-    return static_cast<BitsetType*>(Config::from_bitset(bitset));
+  static TypeImpl* New(bitset bits) {
+    DCHECK(bits == kNone || IsInhabited(bits));
+    return Config::from_bitset(bits);
   }
-  static TypeHandle New(int bitset, Region* region) {
-    return Config::from_bitset(bitset, region);
+  static TypeHandle New(bitset bits, Region* region) {
+    DCHECK(bits == kNone || IsInhabited(bits));
+    return Config::from_bitset(bits, region);
+  }
+  // TODO(neis): Eventually allow again for types with empty semantics
+  // part and modify intersection and possibly subtyping accordingly.
+
+  static bool IsInhabited(bitset bits) {
+    return bits & kSemantic;
   }
 
-  static bool IsInhabited(int bitset) {
-    return (bitset & kRepresentation) && (bitset & kSemantic);
+  static bool Is(bitset bits1, bitset bits2) {
+    return (bits1 | bits2) == bits2;
   }
 
-  static int Glb(TypeImpl* type);  // greatest lower bound that's a bitset
-  static int Lub(TypeImpl* type);  // least upper bound that's a bitset
-  static int Lub(i::Object* value);
-  static int Lub(double value);
-  static int Lub(int32_t value);
-  static int Lub(uint32_t value);
-  static int Lub(i::Map* map);
-  static int InherentLub(TypeImpl* type);
+  static double Min(bitset);
+  static double Max(bitset);
 
-  static const char* Name(int bitset);
-  static void PrintTo(StringStream* stream, int bitset);
-  using TypeImpl::PrintTo;
+  static bitset Glb(TypeImpl* type);  // greatest lower bound that's a bitset
+  static bitset Lub(TypeImpl* type);  // least upper bound that's a bitset
+  static bitset Lub(i::Object* value);
+  static bitset Lub(double value);
+  static bitset Lub(int32_t value);
+  static bitset Lub(uint32_t value);
+  static bitset Lub(i::Map* map);
+  static bitset Lub(Limits lim);
+
+  static const char* Name(bitset);
+  static void Print(OStream& os, bitset);  // NOLINT
+#ifdef DEBUG
+  static void Print(bitset);
+#endif
+
+ private:
+  struct BitsetMin{
+    bitset bits;
+    double min;
+  };
+  static const BitsetMin BitsetMins31[];
+  static const BitsetMin BitsetMins32[];
+  static const BitsetMin* BitsetMins() {
+    return i::SmiValuesAre31Bits() ? BitsetMins31 : BitsetMins32;
+  }
+  static size_t BitsetMinsSize() {
+    return i::SmiValuesAre31Bits() ? 7 : 5;
+    /* arraysize(BitsetMins31) : arraysize(BitsetMins32); */
+    // Using arraysize here doesn't compile on Windows.
+  }
 };
 
 
@@ -521,6 +643,7 @@
   enum Tag {
     kClassTag,
     kConstantTag,
+    kRangeTag,
     kContextTag,
     kArrayTag,
     kFunctionTag,
@@ -531,28 +654,28 @@
     return Config::struct_length(Config::as_struct(this));
   }
   TypeHandle Get(int i) {
-    ASSERT(0 <= i && i < this->Length());
+    DCHECK(0 <= i && i < this->Length());
     return Config::struct_get(Config::as_struct(this), i);
   }
   void Set(int i, TypeHandle type) {
-    ASSERT(0 <= i && i < this->Length());
+    DCHECK(0 <= i && i < this->Length());
     Config::struct_set(Config::as_struct(this), i, type);
   }
   void Shrink(int length) {
-    ASSERT(2 <= length && length <= this->Length());
+    DCHECK(2 <= length && length <= this->Length());
     Config::struct_shrink(Config::as_struct(this), length);
   }
   template<class V> i::Handle<V> GetValue(int i) {
-    ASSERT(0 <= i && i < this->Length());
+    DCHECK(0 <= i && i < this->Length());
     return Config::template struct_get_value<V>(Config::as_struct(this), i);
   }
   template<class V> void SetValue(int i, i::Handle<V> x) {
-    ASSERT(0 <= i && i < this->Length());
+    DCHECK(0 <= i && i < this->Length());
     Config::struct_set_value(Config::as_struct(this), i, x);
   }
 
   static TypeHandle New(Tag tag, int length, Region* region) {
-    ASSERT(1 <= length);
+    DCHECK(1 <= length);
     return Config::from_struct(Config::struct_create(tag, length, region));
   }
 };
@@ -574,7 +697,7 @@
   }
 
   static UnionType* cast(TypeImpl* type) {
-    ASSERT(type->IsUnion());
+    DCHECK(type->IsUnion());
     return static_cast<UnionType*>(type);
   }
 
@@ -589,38 +712,29 @@
 class TypeImpl<Config>::ClassType : public StructuralType {
  public:
   TypeHandle Bound(Region* region) {
-    return Config::is_class(this)
-        ? BitsetType::New(BitsetType::Lub(*Config::as_class(this)), region)
-        : this->Get(0);
+    return Config::is_class(this) ?
+        BitsetType::New(BitsetType::Lub(*Config::as_class(this)), region) :
+        this->Get(0);
   }
   i::Handle<i::Map> Map() {
-    return Config::is_class(this)
-        ? Config::as_class(this)
-        : this->template GetValue<i::Map>(1);
-  }
-
-  static ClassHandle New(
-      i::Handle<i::Map> map, TypeHandle bound, Region* region) {
-    ClassHandle type = Config::template cast<ClassType>(
-        StructuralType::New(StructuralType::kClassTag, 2, region));
-    type->Set(0, bound);
-    type->SetValue(1, map);
-    return type;
+    return Config::is_class(this) ? Config::as_class(this) :
+        this->template GetValue<i::Map>(1);
   }
 
   static ClassHandle New(i::Handle<i::Map> map, Region* region) {
     ClassHandle type =
         Config::template cast<ClassType>(Config::from_class(map, region));
-    if (type->IsClass()) {
-      return type;
-    } else {
-      TypeHandle bound = BitsetType::New(BitsetType::Lub(*map), region);
-      return New(map, bound, region);
+    if (!type->IsClass()) {
+      type = Config::template cast<ClassType>(
+          StructuralType::New(StructuralType::kClassTag, 2, region));
+      type->Set(0, BitsetType::New(BitsetType::Lub(*map), region));
+      type->SetValue(1, map);
     }
+    return type;
   }
 
   static ClassType* cast(TypeImpl* type) {
-    ASSERT(type->IsClass());
+    DCHECK(type->IsClass());
     return static_cast<ClassType*>(type);
   }
 };
@@ -635,25 +749,55 @@
   TypeHandle Bound() { return this->Get(0); }
   i::Handle<i::Object> Value() { return this->template GetValue<i::Object>(1); }
 
-  static ConstantHandle New(
-      i::Handle<i::Object> value, TypeHandle bound, Region* region) {
+  static ConstantHandle New(i::Handle<i::Object> value, Region* region) {
     ConstantHandle type = Config::template cast<ConstantType>(
         StructuralType::New(StructuralType::kConstantTag, 2, region));
-    type->Set(0, bound);
+    type->Set(0, BitsetType::New(BitsetType::Lub(*value), region));
     type->SetValue(1, value);
     return type;
   }
 
-  static ConstantHandle New(i::Handle<i::Object> value, Region* region) {
-    TypeHandle bound = BitsetType::New(BitsetType::Lub(*value), region);
-    return New(value, bound, region);
-  }
-
   static ConstantType* cast(TypeImpl* type) {
-    ASSERT(type->IsConstant());
+    DCHECK(type->IsConstant());
     return static_cast<ConstantType*>(type);
   }
 };
+// TODO(neis): Also cache value if numerical.
+// TODO(neis): Allow restricting the representation.
+
+
+// -----------------------------------------------------------------------------
+// Range types.
+
+template<class Config>
+class TypeImpl<Config>::RangeType : public StructuralType {
+ public:
+  int BitsetLub() { return this->Get(0)->AsBitset(); }
+  i::Handle<i::Object> Min() { return this->template GetValue<i::Object>(1); }
+  i::Handle<i::Object> Max() { return this->template GetValue<i::Object>(2); }
+
+  static RangeHandle New(
+      i::Handle<i::Object> min, i::Handle<i::Object> max, Region* region) {
+    DCHECK(min->Number() <= max->Number());
+    RangeHandle type = Config::template cast<RangeType>(
+        StructuralType::New(StructuralType::kRangeTag, 3, region));
+    type->Set(0, BitsetType::New(BitsetType::Lub(Limits(min, max)), region));
+    type->SetValue(1, min);
+    type->SetValue(2, max);
+    return type;
+  }
+
+  static RangeHandle New(Limits lim, Region* region) {
+    return New(lim.min, lim.max, region);
+  }
+
+  static RangeType* cast(TypeImpl* type) {
+    DCHECK(type->IsRange());
+    return static_cast<RangeType*>(type);
+  }
+};
+// TODO(neis): Also cache min and max values.
+// TODO(neis): Allow restricting the representation.
 
 
 // -----------------------------------------------------------------------------
@@ -662,25 +806,17 @@
 template<class Config>
 class TypeImpl<Config>::ContextType : public StructuralType {
  public:
-  TypeHandle Bound() { return this->Get(0); }
-  TypeHandle Outer() { return this->Get(1); }
+  TypeHandle Outer() { return this->Get(0); }
 
-  static ContextHandle New(TypeHandle outer, TypeHandle bound, Region* region) {
+  static ContextHandle New(TypeHandle outer, Region* region) {
     ContextHandle type = Config::template cast<ContextType>(
-        StructuralType::New(StructuralType::kContextTag, 2, region));
-    type->Set(0, bound);
-    type->Set(1, outer);
+        StructuralType::New(StructuralType::kContextTag, 1, region));
+    type->Set(0, outer);
     return type;
   }
 
-  static ContextHandle New(TypeHandle outer, Region* region) {
-    TypeHandle bound = BitsetType::New(
-        BitsetType::kInternal & BitsetType::kTaggedPtr, region);
-    return New(outer, bound, region);
-  }
-
   static ContextType* cast(TypeImpl* type) {
-    ASSERT(type->IsContext());
+    DCHECK(type->IsContext());
     return static_cast<ContextType*>(type);
   }
 };
@@ -692,25 +828,17 @@
 template<class Config>
 class TypeImpl<Config>::ArrayType : public StructuralType {
  public:
-  TypeHandle Bound() { return this->Get(0); }
-  TypeHandle Element() { return this->Get(1); }
+  TypeHandle Element() { return this->Get(0); }
 
-  static ArrayHandle New(TypeHandle element, TypeHandle bound, Region* region) {
-    ASSERT(SEMANTIC(bound->AsBitset()) == SEMANTIC(BitsetType::kArray));
+  static ArrayHandle New(TypeHandle element, Region* region) {
     ArrayHandle type = Config::template cast<ArrayType>(
-        StructuralType::New(StructuralType::kArrayTag, 2, region));
-    type->Set(0, bound);
-    type->Set(1, element);
+        StructuralType::New(StructuralType::kArrayTag, 1, region));
+    type->Set(0, element);
     return type;
   }
 
-  static ArrayHandle New(TypeHandle element, Region* region) {
-    TypeHandle bound = BitsetType::New(BitsetType::kArray, region);
-    return New(element, bound, region);
-  }
-
   static ArrayType* cast(TypeImpl* type) {
-    ASSERT(type->IsArray());
+    DCHECK(type->IsArray());
     return static_cast<ArrayType*>(type);
   }
 };
@@ -722,34 +850,24 @@
 template<class Config>
 class TypeImpl<Config>::FunctionType : public StructuralType {
  public:
-  int Arity() { return this->Length() - 3; }
-  TypeHandle Bound() { return this->Get(0); }
-  TypeHandle Result() { return this->Get(1); }
-  TypeHandle Receiver() { return this->Get(2); }
-  TypeHandle Parameter(int i) { return this->Get(3 + i); }
+  int Arity() { return this->Length() - 2; }
+  TypeHandle Result() { return this->Get(0); }
+  TypeHandle Receiver() { return this->Get(1); }
+  TypeHandle Parameter(int i) { return this->Get(2 + i); }
 
-  void InitParameter(int i, TypeHandle type) { this->Set(3 + i, type); }
-
-  static FunctionHandle New(
-      TypeHandle result, TypeHandle receiver, TypeHandle bound,
-      int arity, Region* region) {
-    ASSERT(SEMANTIC(bound->AsBitset()) == SEMANTIC(BitsetType::kFunction));
-    FunctionHandle type = Config::template cast<FunctionType>(
-        StructuralType::New(StructuralType::kFunctionTag, 3 + arity, region));
-    type->Set(0, bound);
-    type->Set(1, result);
-    type->Set(2, receiver);
-    return type;
-  }
+  void InitParameter(int i, TypeHandle type) { this->Set(2 + i, type); }
 
   static FunctionHandle New(
       TypeHandle result, TypeHandle receiver, int arity, Region* region) {
-    TypeHandle bound = BitsetType::New(BitsetType::kFunction, region);
-    return New(result, receiver, bound, arity, region);
+    FunctionHandle type = Config::template cast<FunctionType>(
+        StructuralType::New(StructuralType::kFunctionTag, 2 + arity, region));
+    type->Set(0, result);
+    type->Set(1, receiver);
+    return type;
   }
 
   static FunctionType* cast(TypeImpl* type) {
-    ASSERT(type->IsFunction());
+    DCHECK(type->IsFunction());
     return static_cast<FunctionType*>(type);
   }
 };
@@ -799,12 +917,12 @@
   static inline bool is_class(Type* type);
   static inline bool is_struct(Type* type, int tag);
 
-  static inline int as_bitset(Type* type);
+  static inline Type::bitset as_bitset(Type* type);
   static inline i::Handle<i::Map> as_class(Type* type);
   static inline Struct* as_struct(Type* type);
 
-  static inline Type* from_bitset(int bitset);
-  static inline Type* from_bitset(int bitset, Zone* zone);
+  static inline Type* from_bitset(Type::bitset);
+  static inline Type* from_bitset(Type::bitset, Zone* zone);
   static inline Type* from_class(i::Handle<i::Map> map, Zone* zone);
   static inline Type* from_struct(Struct* structured);
 
@@ -841,12 +959,12 @@
   static inline bool is_class(Type* type);
   static inline bool is_struct(Type* type, int tag);
 
-  static inline int as_bitset(Type* type);
+  static inline Type::bitset as_bitset(Type* type);
   static inline i::Handle<i::Map> as_class(Type* type);
   static inline i::Handle<Struct> as_struct(Type* type);
 
-  static inline Type* from_bitset(int bitset);
-  static inline i::Handle<Type> from_bitset(int bitset, Isolate* isolate);
+  static inline Type* from_bitset(Type::bitset);
+  static inline i::Handle<Type> from_bitset(Type::bitset, Isolate* isolate);
   static inline i::Handle<Type> from_class(
       i::Handle<i::Map> map, Isolate* isolate);
   static inline i::Handle<Type> from_struct(i::Handle<Struct> structure);
@@ -885,7 +1003,7 @@
   BoundsImpl() {}
   explicit BoundsImpl(TypeHandle t) : lower(t), upper(t) {}
   BoundsImpl(TypeHandle l, TypeHandle u) : lower(l), upper(u) {
-    ASSERT(lower->Is(upper));
+    DCHECK(lower->Is(upper));
   }
 
   // Unrestricted bounds.
diff --git a/src/typing.cc b/src/typing.cc
index 7762624..02c9603 100644
--- a/src/typing.cc
+++ b/src/typing.cc
@@ -6,6 +6,7 @@
 
 #include "src/frames.h"
 #include "src/frames-inl.h"
+#include "src/ostreams.h"
 #include "src/parser.h"  // for CompileTimeValue; TODO(rossberg): should move
 #include "src/scopes.h"
 
@@ -27,7 +28,7 @@
 
 #define RECURSE(call)                         \
   do {                                        \
-    ASSERT(!visitor->HasStackOverflow());     \
+    DCHECK(!visitor->HasStackOverflow());     \
     call;                                     \
     if (visitor->HasStackOverflow()) return;  \
   } while (false)
@@ -50,12 +51,12 @@
 
 #ifdef OBJECT_PRINT
   static void PrintObserved(Variable* var, Object* value, Type* type) {
-    PrintF("  observed %s ", var->IsParameter() ? "param" : "local");
-    var->name()->Print();
-    PrintF(" : ");
-    value->ShortPrint();
-    PrintF(" -> ");
-    type->TypePrint();
+    OFStream os(stdout);
+    os << "  observed " << (var->IsParameter() ? "param" : "local") << "  ";
+    var->name()->Print(os);
+    os << " : " << Brief(value) << " -> ";
+    type->PrintTo(os);
+    os << endl;
   }
 #endif  // OBJECT_PRINT
 
@@ -75,7 +76,7 @@
   Scope* scope = info_->scope();
 
   // Assert that the frame on the stack belongs to the function we want to OSR.
-  ASSERT_EQ(*info_->closure(), frame->function());
+  DCHECK_EQ(*info_->closure(), frame->function());
 
   int params = scope->num_parameters();
   int locals = scope->StackLocalCount();
@@ -118,7 +119,7 @@
 
 #define RECURSE(call)                \
   do {                               \
-    ASSERT(!HasStackOverflow());     \
+    DCHECK(!HasStackOverflow());     \
     call;                            \
     if (HasStackOverflow()) return;  \
   } while (false)
@@ -351,6 +352,9 @@
 }
 
 
+void AstTyper::VisitClassLiteral(ClassLiteral* expr) {}
+
+
 void AstTyper::VisitNativeFunctionLiteral(NativeFunctionLiteral* expr) {
 }
 
@@ -435,7 +439,7 @@
     if (!expr->IsUninitialized()) {
       if (prop->key()->IsPropertyName()) {
         Literal* lit_key = prop->key()->AsLiteral();
-        ASSERT(lit_key != NULL && lit_key->value()->IsString());
+        DCHECK(lit_key != NULL && lit_key->value()->IsString());
         Handle<String> name = Handle<String>::cast(lit_key->value());
         oracle()->AssignmentReceiverTypes(id, name, expr->GetReceiverTypes());
       } else {
@@ -483,12 +487,9 @@
   if (!expr->IsUninitialized()) {
     if (expr->key()->IsPropertyName()) {
       Literal* lit_key = expr->key()->AsLiteral();
-      ASSERT(lit_key != NULL && lit_key->value()->IsString());
+      DCHECK(lit_key != NULL && lit_key->value()->IsString());
       Handle<String> name = Handle<String>::cast(lit_key->value());
-      bool is_prototype;
-      oracle()->PropertyReceiverTypes(
-          id, name, expr->GetReceiverTypes(), &is_prototype);
-      expr->set_is_function_prototype(is_prototype);
+      oracle()->PropertyReceiverTypes(id, name, expr->GetReceiverTypes());
     } else {
       bool is_string;
       oracle()->KeyedPropertyReceiverTypes(
@@ -725,6 +726,9 @@
 }
 
 
+void AstTyper::VisitSuperReference(SuperReference* expr) {}
+
+
 void AstTyper::VisitDeclarations(ZoneList<Declaration*>* decls) {
   for (int i = 0; i < decls->length(); ++i) {
     Declaration* decl = decls->at(i);
diff --git a/src/typing.h b/src/typing.h
index 1d76f8a..6f6b0dc 100644
--- a/src/typing.h
+++ b/src/typing.h
@@ -10,11 +10,11 @@
 #include "src/allocation.h"
 #include "src/ast.h"
 #include "src/compiler.h"
+#include "src/effects.h"
+#include "src/scopes.h"
 #include "src/type-info.h"
 #include "src/types.h"
-#include "src/effects.h"
 #include "src/zone.h"
-#include "src/scopes.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/unicode-inl.h b/src/unicode-inl.h
index 6ef7f98..81327d7 100644
--- a/src/unicode-inl.h
+++ b/src/unicode-inl.h
@@ -6,7 +6,7 @@
 #define V8_UNICODE_INL_H_
 
 #include "src/unicode.h"
-#include "src/checks.h"
+#include "src/base/logging.h"
 #include "src/utils.h"
 
 namespace unibrow {
@@ -58,7 +58,7 @@
 
 
 uint16_t Latin1::ConvertNonLatin1ToLatin1(uint16_t c) {
-  ASSERT(c > Latin1::kMaxChar);
+  DCHECK(c > Latin1::kMaxChar);
   switch (c) {
     // This are equivalent characters in unicode.
     case 0x39c:
@@ -184,7 +184,7 @@
 template <unsigned kBufferSize>
 unsigned Utf8Decoder<kBufferSize>::WriteUtf16(uint16_t* data,
                                               unsigned length) const {
-  ASSERT(length > 0);
+  DCHECK(length > 0);
   if (length > utf16_length_) length = utf16_length_;
   // memcpy everything in buffer.
   unsigned buffer_length =
@@ -192,7 +192,7 @@
   unsigned memcpy_length = length <= buffer_length ? length : buffer_length;
   v8::internal::MemCopy(data, buffer_, memcpy_length * sizeof(uint16_t));
   if (length <= buffer_length) return length;
-  ASSERT(unbuffered_start_ != NULL);
+  DCHECK(unbuffered_start_ != NULL);
   // Copy the rest the slow way.
   WriteUtf16Slow(unbuffered_start_,
                  data + buffer_length,
diff --git a/src/unicode.cc b/src/unicode.cc
index 2d75654..a128a6f 100644
--- a/src/unicode.cc
+++ b/src/unicode.cc
@@ -5,8 +5,8 @@
 // This file was generated at 2014-02-07 15:31:16.733174
 
 #include "src/unicode-inl.h"
-#include <stdlib.h>
 #include <stdio.h>
+#include <stdlib.h>
 
 namespace unibrow {
 
@@ -271,7 +271,7 @@
   while (stream_length != 0) {
     unsigned cursor = 0;
     uint32_t character = Utf8::ValueOf(stream, stream_length, &cursor);
-    ASSERT(cursor > 0 && cursor <= stream_length);
+    DCHECK(cursor > 0 && cursor <= stream_length);
     stream += cursor;
     stream_length -= cursor;
     bool is_two_characters = character > Utf16::kMaxNonSurrogateCharCode;
@@ -296,7 +296,7 @@
     }
     // Have gone over buffer.
     // Last char of buffer is unused, set cursor back.
-    ASSERT(is_two_characters);
+    DCHECK(is_two_characters);
     writing_to_buffer = false;
     last_byte_of_buffer_unused_ = true;
     unbuffered_start_ = stream - cursor;
@@ -317,7 +317,7 @@
     if (character > unibrow::Utf16::kMaxNonSurrogateCharCode) {
       *data++ = Utf16::LeadSurrogate(character);
       *data++ = Utf16::TrailSurrogate(character);
-      ASSERT(data_length > 1);
+      DCHECK(data_length > 1);
       data_length -= 2;
     } else {
       *data++ = character;
diff --git a/src/unique.h b/src/unique.h
index 4668128..619c3c9 100644
--- a/src/unique.h
+++ b/src/unique.h
@@ -5,8 +5,9 @@
 #ifndef V8_HYDROGEN_UNIQUE_H_
 #define V8_HYDROGEN_UNIQUE_H_
 
-#include "src/handles.h"
-#include "src/objects.h"
+#include "src/handles-inl.h"  // TODO(everyone): Fix our inl.h crap
+#include "src/objects-inl.h"  // TODO(everyone): Fix our inl.h crap
+#include "src/string-stream.h"
 #include "src/utils.h"
 #include "src/zone.h"
 
@@ -29,8 +30,10 @@
 // Careful! Comparison of two Uniques is only correct if both were created
 // in the same "era" of GC or if at least one is a non-movable object.
 template <typename T>
-class Unique V8_FINAL {
+class Unique {
  public:
+  Unique<T>() : raw_address_(NULL) {}
+
   // TODO(titzer): make private and introduce a uniqueness scope.
   explicit Unique(Handle<T> handle) {
     if (handle.is_null()) {
@@ -42,9 +45,9 @@
       // NOTE: we currently consider maps to be non-movable, so no special
       // assurance is required for creating a Unique<Map>.
       // TODO(titzer): other immortable immovable objects are also fine.
-      ASSERT(!AllowHeapAllocation::IsAllowed() || handle->IsMap());
+      DCHECK(!AllowHeapAllocation::IsAllowed() || handle->IsMap());
       raw_address_ = reinterpret_cast<Address>(*handle);
-      ASSERT_NE(raw_address_, NULL);  // Non-null should imply non-zero address.
+      DCHECK_NE(raw_address_, NULL);  // Non-null should imply non-zero address.
     }
     handle_ = handle;
   }
@@ -68,28 +71,28 @@
 
   template <typename U>
   inline bool operator==(const Unique<U>& other) const {
-    ASSERT(IsInitialized() && other.IsInitialized());
+    DCHECK(IsInitialized() && other.IsInitialized());
     return raw_address_ == other.raw_address_;
   }
 
   template <typename U>
   inline bool operator!=(const Unique<U>& other) const {
-    ASSERT(IsInitialized() && other.IsInitialized());
+    DCHECK(IsInitialized() && other.IsInitialized());
     return raw_address_ != other.raw_address_;
   }
 
   inline intptr_t Hashcode() const {
-    ASSERT(IsInitialized());
+    DCHECK(IsInitialized());
     return reinterpret_cast<intptr_t>(raw_address_);
   }
 
   inline bool IsNull() const {
-    ASSERT(IsInitialized());
+    DCHECK(IsInitialized());
     return raw_address_ == NULL;
   }
 
   inline bool IsKnownGlobal(void* global) const {
-    ASSERT(IsInitialized());
+    DCHECK(IsInitialized());
     return raw_address_ == reinterpret_cast<Address>(global);
   }
 
@@ -118,9 +121,7 @@
   template <class U>
   friend class Unique;  // For comparing raw_address values.
 
- private:
-  Unique<T>() : raw_address_(NULL) { }
-
+ protected:
   Address raw_address_;
   Handle<T> handle_;
 
@@ -129,7 +130,7 @@
 
 
 template <typename T>
-class UniqueSet V8_FINAL : public ZoneObject {
+class UniqueSet FINAL : public ZoneObject {
  public:
   // Constructor. A new set will be empty.
   UniqueSet() : size_(0), capacity_(0), array_(NULL) { }
@@ -138,7 +139,7 @@
   UniqueSet(int capacity, Zone* zone)
       : size_(0), capacity_(capacity),
         array_(zone->NewArray<Unique<T> >(capacity)) {
-    ASSERT(capacity <= kMaxCapacity);
+    DCHECK(capacity <= kMaxCapacity);
   }
 
   // Singleton constructor.
@@ -149,7 +150,7 @@
 
   // Add a new element to this unique set. Mutates this set. O(|this|).
   void Add(Unique<T> uniq, Zone* zone) {
-    ASSERT(uniq.IsInitialized());
+    DCHECK(uniq.IsInitialized());
     // Keep the set sorted by the {raw_address} of the unique elements.
     for (int i = 0; i < size_; i++) {
       if (array_[i] == uniq) return;
@@ -312,7 +313,7 @@
   }
 
   inline Unique<T> at(int index) const {
-    ASSERT(index >= 0 && index < size_);
+    DCHECK(index >= 0 && index < size_);
     return array_[index];
   }
 
@@ -341,7 +342,6 @@
   }
 };
 
-
 } }  // namespace v8::internal
 
 #endif  // V8_HYDROGEN_UNIQUE_H_
diff --git a/src/uri.h b/src/uri.h
index a35ee99..75f2605 100644
--- a/src/uri.h
+++ b/src/uri.h
@@ -22,7 +22,7 @@
 template <>
 Vector<const uint8_t> GetCharVector(Handle<String> string) {
   String::FlatContent flat = string->GetFlatContent();
-  ASSERT(flat.IsAscii());
+  DCHECK(flat.IsOneByte());
   return flat.ToOneByteVector();
 }
 
@@ -30,7 +30,7 @@
 template <>
 Vector<const uc16> GetCharVector(Handle<String> string) {
   String::FlatContent flat = string->GetFlatContent();
-  ASSERT(flat.IsTwoByte());
+  DCHECK(flat.IsTwoByte());
   return flat.ToUC16Vector();
 }
 
@@ -73,7 +73,7 @@
                                           Handle<String> source) {
   int index;
   { DisallowHeapAllocation no_allocation;
-    StringSearch<uint8_t, Char> search(isolate, STATIC_ASCII_VECTOR("%"));
+    StringSearch<uint8_t, Char> search(isolate, STATIC_CHAR_VECTOR("%"));
     index = search.Search(GetCharVector<Char>(source), 0);
     if (index < 0) return source;
   }
@@ -100,13 +100,13 @@
     }
   }
 
-  ASSERT(start_index < length);
+  DCHECK(start_index < length);
   Handle<String> first_part =
       isolate->factory()->NewProperSubString(string, 0, start_index);
 
   int dest_position = 0;
   Handle<String> second_part;
-  ASSERT(unescaped_length <= String::kMaxLength);
+  DCHECK(unescaped_length <= String::kMaxLength);
   if (one_byte) {
     Handle<SeqOneByteString> dest = isolate->factory()->NewRawOneByteString(
         unescaped_length).ToHandleChecked();
@@ -226,7 +226,7 @@
 
 template<typename Char>
 MaybeHandle<String> URIEscape::Escape(Isolate* isolate, Handle<String> string) {
-  ASSERT(string->IsFlat());
+  DCHECK(string->IsFlat());
   int escaped_length = 0;
   int length = string->length();
 
@@ -243,7 +243,7 @@
       }
 
       // We don't allow strings that are longer than a maximal length.
-      ASSERT(String::kMaxLength < 0x7fffffff - 6);  // Cannot overflow.
+      DCHECK(String::kMaxLength < 0x7fffffff - 6);  // Cannot overflow.
       if (escaped_length > String::kMaxLength) break;  // Provoke exception.
     }
   }
diff --git a/src/uri.js b/src/uri.js
index 4b7d1f7..09079bc 100644
--- a/src/uri.js
+++ b/src/uri.js
@@ -172,10 +172,10 @@
       throw new $URIError("URI malformed");
     }
     if (value < 0x10000) {
-      %_TwoByteSeqStringSetChar(result, index++, value);
+      %_TwoByteSeqStringSetChar(index++, value, result);
     } else {
-      %_TwoByteSeqStringSetChar(result, index++, (value >> 10) + 0xd7c0);
-      %_TwoByteSeqStringSetChar(result, index++, (value & 0x3ff) + 0xdc00);
+      %_TwoByteSeqStringSetChar(index++, (value >> 10) + 0xd7c0, result);
+      %_TwoByteSeqStringSetChar(index++, (value & 0x3ff) + 0xdc00, result);
     }
     return index;
   }
@@ -205,7 +205,7 @@
 
     var result = %NewString(array.length, NEW_ONE_BYTE_STRING);
     for (var i = 0; i < array.length; i++) {
-      %_OneByteSeqStringSetChar(result, i, array[i]);
+      %_OneByteSeqStringSetChar(i, array[i], result);
     }
     return result;
   }
@@ -217,24 +217,24 @@
     var index = 0;
     var k = 0;
 
-    // Optimistically assume ascii string.
+    // Optimistically assume one-byte string.
     for ( ; k < uriLength; k++) {
       var code = uri.charCodeAt(k);
       if (code == 37) {  // '%'
         if (k + 2 >= uriLength) throw new $URIError("URI malformed");
         var cc = URIHexCharsToCharCode(uri.charCodeAt(k+1), uri.charCodeAt(k+2));
-        if (cc >> 7) break;  // Assumption wrong, two byte string.
+        if (cc >> 7) break;  // Assumption wrong, two-byte string.
         if (reserved(cc)) {
-          %_OneByteSeqStringSetChar(one_byte, index++, 37);  // '%'.
-          %_OneByteSeqStringSetChar(one_byte, index++, uri.charCodeAt(k+1));
-          %_OneByteSeqStringSetChar(one_byte, index++, uri.charCodeAt(k+2));
+          %_OneByteSeqStringSetChar(index++, 37, one_byte);  // '%'.
+          %_OneByteSeqStringSetChar(index++, uri.charCodeAt(k+1), one_byte);
+          %_OneByteSeqStringSetChar(index++, uri.charCodeAt(k+2), one_byte);
         } else {
-          %_OneByteSeqStringSetChar(one_byte, index++, cc);
+          %_OneByteSeqStringSetChar(index++, cc, one_byte);
         }
         k += 2;
       } else {
-        if (code > 0x7f) break;  // Assumption wrong, two byte string.
-        %_OneByteSeqStringSetChar(one_byte, index++, code);
+        if (code > 0x7f) break;  // Assumption wrong, two-byte string.
+        %_OneByteSeqStringSetChar(index++, code, one_byte);
       }
     }
 
@@ -264,14 +264,14 @@
           }
           index = URIDecodeOctets(octets, two_byte, index);
         } else  if (reserved(cc)) {
-          %_TwoByteSeqStringSetChar(two_byte, index++, 37);  // '%'.
-          %_TwoByteSeqStringSetChar(two_byte, index++, uri.charCodeAt(k - 1));
-          %_TwoByteSeqStringSetChar(two_byte, index++, uri.charCodeAt(k));
+          %_TwoByteSeqStringSetChar(index++, 37, two_byte);  // '%'.
+          %_TwoByteSeqStringSetChar(index++, uri.charCodeAt(k - 1), two_byte);
+          %_TwoByteSeqStringSetChar(index++, uri.charCodeAt(k), two_byte);
         } else {
-          %_TwoByteSeqStringSetChar(two_byte, index++, cc);
+          %_TwoByteSeqStringSetChar(index++, cc, two_byte);
         }
       } else {
-        %_TwoByteSeqStringSetChar(two_byte, index++, code);
+        %_TwoByteSeqStringSetChar(index++, code, two_byte);
       }
     }
 
diff --git a/src/utils.cc b/src/utils.cc
index 52b0d48..165855a 100644
--- a/src/utils.cc
+++ b/src/utils.cc
@@ -7,8 +7,8 @@
 
 #include "src/v8.h"
 
-#include "src/checks.h"
-#include "src/platform.h"
+#include "src/base/logging.h"
+#include "src/base/platform/platform.h"
 #include "src/utils.h"
 
 namespace v8 {
@@ -27,8 +27,8 @@
 
 
 void SimpleStringBuilder::AddSubstring(const char* s, int n) {
-  ASSERT(!is_finalized() && position_ + n <= buffer_.length());
-  ASSERT(static_cast<size_t>(n) <= strlen(s));
+  DCHECK(!is_finalized() && position_ + n <= buffer_.length());
+  DCHECK(static_cast<size_t>(n) <= strlen(s));
   MemCopy(&buffer_[position_], s, n * kCharSize);
   position_ += n;
 }
@@ -60,7 +60,7 @@
 
 
 char* SimpleStringBuilder::Finalize() {
-  ASSERT(!is_finalized() && position_ <= buffer_.length());
+  DCHECK(!is_finalized() && position_ <= buffer_.length());
   // If there is no space for null termination, overwrite last character.
   if (position_ == buffer_.length()) {
     position_--;
@@ -70,9 +70,9 @@
   buffer_[position_] = '\0';
   // Make sure nobody managed to add a 0-character to the
   // buffer while building the string.
-  ASSERT(strlen(buffer_.start()) == static_cast<size_t>(position_));
+  DCHECK(strlen(buffer_.start()) == static_cast<size_t>(position_));
   position_ = -1;
-  ASSERT(is_finalized());
+  DCHECK(is_finalized());
   return buffer_.start();
 }
 
@@ -80,7 +80,7 @@
 void PrintF(const char* format, ...) {
   va_list arguments;
   va_start(arguments, format);
-  OS::VPrint(format, arguments);
+  base::OS::VPrint(format, arguments);
   va_end(arguments);
 }
 
@@ -88,16 +88,16 @@
 void PrintF(FILE* out, const char* format, ...) {
   va_list arguments;
   va_start(arguments, format);
-  OS::VFPrint(out, format, arguments);
+  base::OS::VFPrint(out, format, arguments);
   va_end(arguments);
 }
 
 
 void PrintPID(const char* format, ...) {
-  OS::Print("[%d] ", OS::GetCurrentProcessId());
+  base::OS::Print("[%d] ", base::OS::GetCurrentProcessId());
   va_list arguments;
   va_start(arguments, format);
-  OS::VPrint(format, arguments);
+  base::OS::VPrint(format, arguments);
   va_end(arguments);
 }
 
@@ -112,12 +112,12 @@
 
 
 int VSNPrintF(Vector<char> str, const char* format, va_list args) {
-  return OS::VSNPrintF(str.start(), str.length(), format, args);
+  return base::OS::VSNPrintF(str.start(), str.length(), format, args);
 }
 
 
 void StrNCpy(Vector<char> dest, const char* src, size_t n) {
-  OS::StrNCpy(dest.start(), dest.length(), src, n);
+  base::OS::StrNCpy(dest.start(), dest.length(), src, n);
 }
 
 
@@ -172,7 +172,7 @@
     MemCopy(result + offset, line_buf, len * kCharSize);
     offset += len;
   }
-  ASSERT(result != NULL);
+  DCHECK(result != NULL);
   result[offset] = '\0';
   return result;
 }
@@ -185,7 +185,7 @@
                         const char* filename) {
   if (file == NULL || fseek(file, 0, SEEK_END) != 0) {
     if (verbose) {
-      OS::PrintError("Cannot read from file %s.\n", filename);
+      base::OS::PrintError("Cannot read from file %s.\n", filename);
     }
     return NULL;
   }
@@ -212,7 +212,7 @@
                         int* size,
                         int extra_space,
                         bool verbose) {
-  FILE* file = OS::FOpen(filename, "rb");
+  FILE* file = base::OS::FOpen(filename, "rb");
   char* result = ReadCharsFromFile(file, size, extra_space, verbose, filename);
   if (file != NULL) fclose(file);
   return result;
@@ -274,10 +274,10 @@
                 const char* str,
                 int size,
                 bool verbose) {
-  FILE* f = OS::FOpen(filename, "ab");
+  FILE* f = base::OS::FOpen(filename, "ab");
   if (f == NULL) {
     if (verbose) {
-      OS::PrintError("Cannot open file %s for writing.\n", filename);
+      base::OS::PrintError("Cannot open file %s for writing.\n", filename);
     }
     return 0;
   }
@@ -291,10 +291,10 @@
                const char* str,
                int size,
                bool verbose) {
-  FILE* f = OS::FOpen(filename, "wb");
+  FILE* f = base::OS::FOpen(filename, "wb");
   if (f == NULL) {
     if (verbose) {
-      OS::PrintError("Cannot open file %s for writing.\n", filename);
+      base::OS::PrintError("Cannot open file %s for writing.\n", filename);
     }
     return 0;
   }
@@ -323,7 +323,7 @@
 
 
 void StringBuilder::AddFormattedList(const char* format, va_list list) {
-  ASSERT(!is_finalized() && position_ <= buffer_.length());
+  DCHECK(!is_finalized() && position_ <= buffer_.length());
   int n = VSNPrintF(buffer_ + position_, format, list);
   if (n < 0 || n >= (buffer_.length() - position_)) {
     position_ = buffer_.length();
@@ -394,4 +394,24 @@
 }
 
 
+bool DoubleToBoolean(double d) {
+  // NaN, +0, and -0 should return the false object
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+  union IeeeDoubleLittleEndianArchType u;
+#elif __BYTE_ORDER == __BIG_ENDIAN
+  union IeeeDoubleBigEndianArchType u;
+#endif
+  u.d = d;
+  if (u.bits.exp == 2047) {
+    // Detect NaN for IEEE double precision floating point.
+    if ((u.bits.man_low | u.bits.man_high) != 0) return false;
+  }
+  if (u.bits.exp == 0) {
+    // Detect +0, and -0 for IEEE double precision floating point.
+    if ((u.bits.man_low | u.bits.man_high) == 0) return false;
+  }
+  return true;
+}
+
+
 } }  // namespace v8::internal
diff --git a/src/utils.h b/src/utils.h
index 5422985..2991815 100644
--- a/src/utils.h
+++ b/src/utils.h
@@ -8,13 +8,16 @@
 #include <limits.h>
 #include <stdlib.h>
 #include <string.h>
+#include <cmath>
 
+#include "include/v8.h"
 #include "src/allocation.h"
+#include "src/base/bits.h"
+#include "src/base/logging.h"
 #include "src/base/macros.h"
-#include "src/checks.h"
+#include "src/base/platform/platform.h"
 #include "src/globals.h"
 #include "src/list.h"
-#include "src/platform.h"
 #include "src/vector.h"
 
 namespace v8 {
@@ -23,17 +26,16 @@
 // ----------------------------------------------------------------------------
 // General helper functions
 
-// Returns true iff x is a power of 2. Cannot be used with the maximally
-// negative value of the type T (the -1 overflows).
-template <typename T>
-inline bool IsPowerOf2(T x) {
-  return IS_POWER_OF_TWO(x);
+
+// Same as strcmp, but can handle NULL arguments.
+inline bool CStringEquals(const char* s1, const char* s2) {
+  return (s1 == s2) || (s1 != NULL && s2 != NULL && strcmp(s1, s2) == 0);
 }
 
 
 // X must be a power of 2.  Returns the number of trailing zeros.
 inline int WhichPowerOf2(uint32_t x) {
-  ASSERT(IsPowerOf2(x));
+  DCHECK(base::bits::IsPowerOfTwo32(x));
   int bits = 0;
 #ifdef DEBUG
   int original_x = x;
@@ -57,7 +59,7 @@
     case 2: bits++;  // Fall through.
     case 1: break;
   }
-  ASSERT_EQ(1 << bits, original_x);
+  DCHECK_EQ(1 << bits, original_x);
   return bits;
   return 0;
 }
@@ -90,50 +92,6 @@
 }
 
 
-// Compute the 0-relative offset of some absolute value x of type T.
-// This allows conversion of Addresses and integral types into
-// 0-relative int offsets.
-template <typename T>
-inline intptr_t OffsetFrom(T x) {
-  return x - static_cast<T>(0);
-}
-
-
-// Compute the absolute value of type T for some 0-relative offset x.
-// This allows conversion of 0-relative int offsets into Addresses and
-// integral types.
-template <typename T>
-inline T AddressFrom(intptr_t x) {
-  return static_cast<T>(static_cast<T>(0) + x);
-}
-
-
-// Return the largest multiple of m which is <= x.
-template <typename T>
-inline T RoundDown(T x, intptr_t m) {
-  ASSERT(IsPowerOf2(m));
-  return AddressFrom<T>(OffsetFrom(x) & -m);
-}
-
-
-// Return the smallest multiple of m which is >= x.
-template <typename T>
-inline T RoundUp(T x, intptr_t m) {
-  return RoundDown<T>(static_cast<T>(x + m - 1), m);
-}
-
-
-// Increment a pointer until it has the specified alignment.
-// This works like RoundUp, but it works correctly on pointer types where
-// sizeof(*pointer) might not be 1.
-template<class T>
-T AlignUp(T pointer, size_t alignment) {
-  ASSERT(sizeof(pointer) == sizeof(uintptr_t));
-  uintptr_t pointer_raw = reinterpret_cast<uintptr_t>(pointer);
-  return reinterpret_cast<T>(RoundUp(pointer_raw, alignment));
-}
-
-
 template <typename T>
 int Compare(const T& a, const T& b) {
   if (a == b)
@@ -161,29 +119,6 @@
 }
 
 
-// Returns the smallest power of two which is >= x. If you pass in a
-// number that is already a power of two, it is returned as is.
-// Implementation is from "Hacker's Delight" by Henry S. Warren, Jr.,
-// figure 3-3, page 48, where the function is called clp2.
-inline uint32_t RoundUpToPowerOf2(uint32_t x) {
-  ASSERT(x <= 0x80000000u);
-  x = x - 1;
-  x = x | (x >> 1);
-  x = x | (x >> 2);
-  x = x | (x >> 4);
-  x = x | (x >> 8);
-  x = x | (x >> 16);
-  return x + 1;
-}
-
-
-inline uint32_t RoundDownToPowerOf2(uint32_t x) {
-  uint32_t rounded_up = RoundUpToPowerOf2(x);
-  if (rounded_up > x) return rounded_up >> 1;
-  return rounded_up;
-}
-
-
 template <typename T, typename U>
 inline bool IsAligned(T value, U alignment) {
   return (value & (alignment - 1)) == 0;
@@ -220,10 +155,12 @@
 }
 
 
-// Returns the negative absolute value of its argument.
-template <typename T>
-T NegAbs(T a) {
-  return a < 0 ? a : -a;
+// Floor(-0.0) == 0.0
+inline double Floor(double x) {
+#ifdef _MSC_VER
+  if (x == 0) return x;  // Fix for issue 3477.
+#endif
+  return std::floor(x);
 }
 
 
@@ -278,7 +215,7 @@
 
   // Returns a type U with the bit field value encoded.
   static U encode(T value) {
-    ASSERT(is_valid(value));
+    DCHECK(is_valid(value));
     return static_cast<U>(value) << shift;
   }
 
@@ -445,7 +382,7 @@
   explicit Access(StaticResource<T>* resource)
     : resource_(resource)
     , instance_(&resource->instance_) {
-    ASSERT(!resource->is_reserved_);
+    DCHECK(!resource->is_reserved_);
     resource->is_reserved_ = true;
   }
 
@@ -473,12 +410,12 @@
   bool is_set() const { return pointer_ != NULL; }
 
   T* get() const {
-    ASSERT(pointer_ != NULL);
+    DCHECK(pointer_ != NULL);
     return pointer_;
   }
 
   void set(T* value) {
-    ASSERT(pointer_ == NULL && value != NULL);
+    DCHECK(pointer_ == NULL && value != NULL);
     pointer_ = value;
   }
 
@@ -502,7 +439,7 @@
   EmbeddedVector(const EmbeddedVector& rhs)
       : Vector<T>(rhs) {
     MemCopy(buffer_, rhs.buffer_, sizeof(T) * kSize);
-    set_start(buffer_);
+    this->set_start(buffer_);
   }
 
   EmbeddedVector& operator=(const EmbeddedVector& rhs) {
@@ -558,7 +495,7 @@
   // A basic Collector will keep this vector valid as long as the Collector
   // is alive.
   inline Vector<T> AddBlock(int size, T initial_value) {
-    ASSERT(size > 0);
+    DCHECK(size > 0);
     if (size > current_chunk_.length() - index_) {
       Grow(size);
     }
@@ -592,7 +529,7 @@
 
   // Write the contents of the collector into the provided vector.
   void WriteTo(Vector<T> destination) {
-    ASSERT(size_ <= destination.length());
+    DCHECK(size_ <= destination.length());
     int position = 0;
     for (int i = 0; i < chunks_.length(); i++) {
       Vector<T> chunk = chunks_.at(i);
@@ -632,7 +569,7 @@
 
   // Creates a new current chunk, and stores the old chunk in the chunks_ list.
   void Grow(int min_capacity) {
-    ASSERT(growth_factor > 1);
+    DCHECK(growth_factor > 1);
     int new_capacity;
     int current_length = current_chunk_.length();
     if (current_length < kMinCapacity) {
@@ -650,7 +587,7 @@
       }
     }
     NewChunk(new_capacity);
-    ASSERT(index_ + min_capacity <= current_chunk_.length());
+    DCHECK(index_ + min_capacity <= current_chunk_.length());
   }
 
   // Before replacing the current chunk, give a subclass the option to move
@@ -689,12 +626,12 @@
   virtual ~SequenceCollector() {}
 
   void StartSequence() {
-    ASSERT(sequence_start_ == kNoSequence);
+    DCHECK(sequence_start_ == kNoSequence);
     sequence_start_ = this->index_;
   }
 
   Vector<T> EndSequence() {
-    ASSERT(sequence_start_ != kNoSequence);
+    DCHECK(sequence_start_ != kNoSequence);
     int sequence_start = sequence_start_;
     sequence_start_ = kNoSequence;
     if (sequence_start == this->index_) return Vector<T>();
@@ -703,7 +640,7 @@
 
   // Drops the currently added sequence, and all collected elements in it.
   void DropSequence() {
-    ASSERT(sequence_start_ != kNoSequence);
+    DCHECK(sequence_start_ != kNoSequence);
     int sequence_length = this->index_ - sequence_start_;
     this->index_ = sequence_start_;
     this->size_ -= sequence_length;
@@ -728,7 +665,7 @@
     }
     int sequence_length = this->index_ - sequence_start_;
     Vector<T> new_chunk = Vector<T>::New(sequence_length + new_capacity);
-    ASSERT(sequence_length < new_chunk.length());
+    DCHECK(sequence_length < new_chunk.length());
     for (int i = 0; i < sequence_length; i++) {
       new_chunk[i] = this->current_chunk_[sequence_start_ + i];
     }
@@ -744,26 +681,17 @@
 };
 
 
-// Compare ASCII/16bit chars to ASCII/16bit chars.
+// Compare 8bit/16bit chars to 8bit/16bit chars.
 template <typename lchar, typename rchar>
 inline int CompareCharsUnsigned(const lchar* lhs,
                                 const rchar* rhs,
                                 int chars) {
   const lchar* limit = lhs + chars;
-#ifdef V8_HOST_CAN_READ_UNALIGNED
-  if (sizeof(*lhs) == sizeof(*rhs)) {
-    // Number of characters in a uintptr_t.
-    static const int kStepSize = sizeof(uintptr_t) / sizeof(*lhs);  // NOLINT
-    while (lhs <= limit - kStepSize) {
-      if (*reinterpret_cast<const uintptr_t*>(lhs) !=
-          *reinterpret_cast<const uintptr_t*>(rhs)) {
-        break;
-      }
-      lhs += kStepSize;
-      rhs += kStepSize;
-    }
+  if (sizeof(*lhs) == sizeof(char) && sizeof(*rhs) == sizeof(char)) {
+    // memcmp compares byte-by-byte, yielding wrong results for two-byte
+    // strings on little-endian systems.
+    return memcmp(lhs, rhs, chars);
   }
-#endif
   while (lhs < limit) {
     int r = static_cast<int>(*lhs) - static_cast<int>(*rhs);
     if (r != 0) return r;
@@ -775,8 +703,8 @@
 
 template<typename lchar, typename rchar>
 inline int CompareChars(const lchar* lhs, const rchar* rhs, int chars) {
-  ASSERT(sizeof(lchar) <= 2);
-  ASSERT(sizeof(rchar) <= 2);
+  DCHECK(sizeof(lchar) <= 2);
+  DCHECK(sizeof(rchar) <= 2);
   if (sizeof(lchar) == 1) {
     if (sizeof(rchar) == 1) {
       return CompareCharsUnsigned(reinterpret_cast<const uint8_t*>(lhs),
@@ -803,70 +731,14 @@
 
 // Calculate 10^exponent.
 inline int TenToThe(int exponent) {
-  ASSERT(exponent <= 9);
-  ASSERT(exponent >= 1);
+  DCHECK(exponent <= 9);
+  DCHECK(exponent >= 1);
   int answer = 10;
   for (int i = 1; i < exponent; i++) answer *= 10;
   return answer;
 }
 
 
-// The type-based aliasing rule allows the compiler to assume that pointers of
-// different types (for some definition of different) never alias each other.
-// Thus the following code does not work:
-//
-// float f = foo();
-// int fbits = *(int*)(&f);
-//
-// The compiler 'knows' that the int pointer can't refer to f since the types
-// don't match, so the compiler may cache f in a register, leaving random data
-// in fbits.  Using C++ style casts makes no difference, however a pointer to
-// char data is assumed to alias any other pointer.  This is the 'memcpy
-// exception'.
-//
-// Bit_cast uses the memcpy exception to move the bits from a variable of one
-// type of a variable of another type.  Of course the end result is likely to
-// be implementation dependent.  Most compilers (gcc-4.2 and MSVC 2005)
-// will completely optimize BitCast away.
-//
-// There is an additional use for BitCast.
-// Recent gccs will warn when they see casts that may result in breakage due to
-// the type-based aliasing rule.  If you have checked that there is no breakage
-// you can use BitCast to cast one pointer type to another.  This confuses gcc
-// enough that it can no longer see that you have cast one pointer type to
-// another thus avoiding the warning.
-
-// We need different implementations of BitCast for pointer and non-pointer
-// values. We use partial specialization of auxiliary struct to work around
-// issues with template functions overloading.
-template <class Dest, class Source>
-struct BitCastHelper {
-  STATIC_ASSERT(sizeof(Dest) == sizeof(Source));
-
-  INLINE(static Dest cast(const Source& source)) {
-    Dest dest;
-    memcpy(&dest, &source, sizeof(dest));
-    return dest;
-  }
-};
-
-template <class Dest, class Source>
-struct BitCastHelper<Dest, Source*> {
-  INLINE(static Dest cast(Source* source)) {
-    return BitCastHelper<Dest, uintptr_t>::
-        cast(reinterpret_cast<uintptr_t>(source));
-  }
-};
-
-template <class Dest, class Source>
-INLINE(Dest BitCast(const Source& source));
-
-template <class Dest, class Source>
-inline Dest BitCast(const Source& source) {
-  return BitCastHelper<Dest, Source>::cast(source);
-}
-
-
 template<typename ElementType, int NumElements>
 class EmbeddedContainer {
  public:
@@ -874,11 +746,11 @@
 
   int length() const { return NumElements; }
   const ElementType& operator[](int i) const {
-    ASSERT(i < length());
+    DCHECK(i < length());
     return elems_[i];
   }
   ElementType& operator[](int i) {
-    ASSERT(i < length());
+    DCHECK(i < length());
     return elems_[i];
   }
 
@@ -924,7 +796,7 @@
 
   // Get the current position in the builder.
   int position() const {
-    ASSERT(!is_finalized());
+    DCHECK(!is_finalized());
     return position_;
   }
 
@@ -935,8 +807,8 @@
   // 0-characters; use the Finalize() method to terminate the string
   // instead.
   void AddCharacter(char c) {
-    ASSERT(c != '\0');
-    ASSERT(!is_finalized() && position_ < buffer_.length());
+    DCHECK(c != '\0');
+    DCHECK(!is_finalized() && position_ < buffer_.length());
     buffer_[position_++] = c;
   }
 
@@ -995,9 +867,9 @@
 
  private:
   T Mask(E element) const {
-    // The strange typing in ASSERT is necessary to avoid stupid warnings, see:
+    // The strange typing in DCHECK is necessary to avoid stupid warnings, see:
     // http://gcc.gnu.org/bugzilla/show_bug.cgi?id=43680
-    ASSERT(static_cast<int>(element) < static_cast<int>(sizeof(T) * CHAR_BIT));
+    DCHECK(static_cast<int>(element) < static_cast<int>(sizeof(T) * CHAR_BIT));
     return static_cast<T>(1) << element;
   }
 
@@ -1024,19 +896,19 @@
 
 // Check number width.
 inline bool is_intn(int64_t x, unsigned n) {
-  ASSERT((0 < n) && (n < 64));
+  DCHECK((0 < n) && (n < 64));
   int64_t limit = static_cast<int64_t>(1) << (n - 1);
   return (-limit <= x) && (x < limit);
 }
 
 inline bool is_uintn(int64_t x, unsigned n) {
-  ASSERT((0 < n) && (n < (sizeof(x) * kBitsPerByte)));
+  DCHECK((0 < n) && (n < (sizeof(x) * kBitsPerByte)));
   return !(x >> n);
 }
 
 template <class T>
 inline T truncate_to_intn(T x, unsigned n) {
-  ASSERT((0 < n) && (n < (sizeof(x) * kBitsPerByte)));
+  DCHECK((0 < n) && (n < (sizeof(x) * kBitsPerByte)));
   return (x & ((static_cast<T>(1) << n) - 1));
 }
 
@@ -1242,9 +1114,9 @@
   // TODO(mvstanton): disabled because mac builds are bogus failing on this
   // assert. They are doing a signed comparison. Investigate in
   // the morning.
-  // ASSERT(Min(dst, const_cast<T*>(src)) + num_words <=
+  // DCHECK(Min(dst, const_cast<T*>(src)) + num_words <=
   //       Max(dst, const_cast<T*>(src)));
-  ASSERT(num_words > 0);
+  DCHECK(num_words > 0);
 
   // Use block copying MemCopy if the segment we're copying is
   // enough to justify the extra call/setup overhead.
@@ -1265,7 +1137,7 @@
 template <typename T>
 inline void MoveWords(T* dst, const T* src, size_t num_words) {
   STATIC_ASSERT(sizeof(T) == kPointerSize);
-  ASSERT(num_words > 0);
+  DCHECK(num_words > 0);
 
   // Use block copying MemCopy if the segment we're copying is
   // enough to justify the extra call/setup overhead.
@@ -1288,7 +1160,7 @@
 template <typename T>
 inline void CopyBytes(T* dst, const T* src, size_t num_bytes) {
   STATIC_ASSERT(sizeof(T) == 1);
-  ASSERT(Min(dst, const_cast<T*>(src)) + num_bytes <=
+  DCHECK(Min(dst, const_cast<T*>(src)) + num_bytes <=
          Max(dst, const_cast<T*>(src)));
   if (num_bytes == 0) return;
 
@@ -1318,8 +1190,12 @@
 #if V8_HOST_ARCH_IA32
 #define STOS "stosl"
 #elif V8_HOST_ARCH_X64
+#if V8_HOST_ARCH_32_BIT
+#define STOS "addr32 stosl"
+#else
 #define STOS "stosq"
 #endif
+#endif
 #if defined(__native_client__)
   // This STOS sequence does not validate for x86_64 Native Client.
   // Here we #undef STOS to force use of the slower C version.
@@ -1350,21 +1226,6 @@
 }
 
 
-// Simple wrapper that allows an ExternalString to refer to a
-// Vector<const char>. Doesn't assume ownership of the data.
-class AsciiStringAdapter: public v8::String::ExternalAsciiStringResource {
- public:
-  explicit AsciiStringAdapter(Vector<const char> data) : data_(data) {}
-
-  virtual const char* data() const { return data_.start(); }
-
-  virtual size_t length() const { return data_.length(); }
-
- private:
-  Vector<const char> data_;
-};
-
-
 // Simple support to read a file into a 0-terminated C-string.
 // The returned buffer must be freed by the caller.
 // On return, *exits tells whether the file existed.
@@ -1389,14 +1250,14 @@
 INLINE(void CopyCharsUnsigned(uint16_t* dest, const uint16_t* src, int chars));
 #endif
 
-// Copy from ASCII/16bit chars to ASCII/16bit chars.
+// Copy from 8bit/16bit chars to 8bit/16bit chars.
 template <typename sourcechar, typename sinkchar>
 INLINE(void CopyChars(sinkchar* dest, const sourcechar* src, int chars));
 
 template<typename sourcechar, typename sinkchar>
 void CopyChars(sinkchar* dest, const sourcechar* src, int chars) {
-  ASSERT(sizeof(sourcechar) <= 2);
-  ASSERT(sizeof(sinkchar) <= 2);
+  DCHECK(sizeof(sourcechar) <= 2);
+  DCHECK(sizeof(sinkchar) <= 2);
   if (sizeof(sinkchar) == 1) {
     if (sizeof(sourcechar) == 1) {
       CopyCharsUnsigned(reinterpret_cast<uint8_t*>(dest),
@@ -1423,25 +1284,11 @@
 template <typename sourcechar, typename sinkchar>
 void CopyCharsUnsigned(sinkchar* dest, const sourcechar* src, int chars) {
   sinkchar* limit = dest + chars;
-#ifdef V8_HOST_CAN_READ_UNALIGNED
-  if (sizeof(*dest) == sizeof(*src)) {
-    if (chars >= static_cast<int>(kMinComplexMemCopy / sizeof(*dest))) {
-      MemCopy(dest, src, chars * sizeof(*dest));
-      return;
-    }
-    // Number of characters in a uintptr_t.
-    static const int kStepSize = sizeof(uintptr_t) / sizeof(*dest);  // NOLINT
-    ASSERT(dest + kStepSize > dest);  // Check for overflow.
-    while (dest + kStepSize <= limit) {
-      *reinterpret_cast<uintptr_t*>(dest) =
-          *reinterpret_cast<const uintptr_t*>(src);
-      dest += kStepSize;
-      src += kStepSize;
-    }
-  }
-#endif
-  while (dest < limit) {
-    *dest++ = static_cast<sinkchar>(*src++);
+  if ((sizeof(*dest) == sizeof(*src)) &&
+      (chars >= static_cast<int>(kMinComplexMemCopy / sizeof(*dest)))) {
+    MemCopy(dest, src, chars * sizeof(*dest));
+  } else {
+    while (dest < limit) *dest++ = static_cast<sinkchar>(*src++);
   }
 }
 
@@ -1578,6 +1425,46 @@
 };
 
 
-} }  // namespace v8::internal
+bool DoubleToBoolean(double d);
+
+template <typename Stream>
+bool StringToArrayIndex(Stream* stream, uint32_t* index) {
+  uint16_t ch = stream->GetNext();
+
+  // If the string begins with a '0' character, it must only consist
+  // of it to be a legal array index.
+  if (ch == '0') {
+    *index = 0;
+    return !stream->HasMore();
+  }
+
+  // Convert string to uint32 array index; character by character.
+  int d = ch - '0';
+  if (d < 0 || d > 9) return false;
+  uint32_t result = d;
+  while (stream->HasMore()) {
+    d = stream->GetNext() - '0';
+    if (d < 0 || d > 9) return false;
+    // Check that the new result is below the 32 bit limit.
+    if (result > 429496729U - ((d > 5) ? 1 : 0)) return false;
+    result = (result * 10) + d;
+  }
+
+  *index = result;
+  return true;
+}
+
+
+// Returns current value of top of the stack. Works correctly with ASAN.
+DISABLE_ASAN
+inline uintptr_t GetCurrentStackPosition() {
+  // Takes the address of the limit variable in order to find out where
+  // the top of stack is right now.
+  uintptr_t limit = reinterpret_cast<uintptr_t>(&limit);
+  return limit;
+}
+
+}  // namespace internal
+}  // namespace v8
 
 #endif  // V8_UTILS_H_
diff --git a/src/utils/DEPS b/src/utils/DEPS
deleted file mode 100644
index 3da1ce1..0000000
--- a/src/utils/DEPS
+++ /dev/null
@@ -1,5 +0,0 @@
-include_rules = [
-  "-src",
-  "+src/base",
-  "+src/platform",
-]
diff --git a/src/utils/random-number-generator.cc b/src/utils/random-number-generator.cc
deleted file mode 100644
index 3da6a5a..0000000
--- a/src/utils/random-number-generator.cc
+++ /dev/null
@@ -1,131 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/utils/random-number-generator.h"
-
-#include <stdio.h>
-#include <stdlib.h>
-
-#include <new>
-
-#include "src/base/macros.h"
-#include "src/platform/mutex.h"
-#include "src/platform/time.h"
-
-namespace v8 {
-namespace internal {
-
-static LazyMutex entropy_mutex = LAZY_MUTEX_INITIALIZER;
-static RandomNumberGenerator::EntropySource entropy_source = NULL;
-
-
-// static
-void RandomNumberGenerator::SetEntropySource(EntropySource source) {
-  LockGuard<Mutex> lock_guard(entropy_mutex.Pointer());
-  entropy_source = source;
-}
-
-
-RandomNumberGenerator::RandomNumberGenerator() {
-  // Check if embedder supplied an entropy source.
-  { LockGuard<Mutex> lock_guard(entropy_mutex.Pointer());
-    if (entropy_source != NULL) {
-      int64_t seed;
-      if (entropy_source(reinterpret_cast<unsigned char*>(&seed),
-                         sizeof(seed))) {
-        SetSeed(seed);
-        return;
-      }
-    }
-  }
-
-#if V8_OS_CYGWIN || V8_OS_WIN
-  // Use rand_s() to gather entropy on Windows. See:
-  // https://code.google.com/p/v8/issues/detail?id=2905
-  unsigned first_half, second_half;
-  errno_t result = rand_s(&first_half);
-  ASSERT_EQ(0, result);
-  result = rand_s(&second_half);
-  ASSERT_EQ(0, result);
-  SetSeed((static_cast<int64_t>(first_half) << 32) + second_half);
-#else
-  // Gather entropy from /dev/urandom if available.
-  FILE* fp = fopen("/dev/urandom", "rb");
-  if (fp != NULL) {
-    int64_t seed;
-    size_t n = fread(&seed, sizeof(seed), 1, fp);
-    fclose(fp);
-    if (n == 1) {
-      SetSeed(seed);
-      return;
-    }
-  }
-
-  // We cannot assume that random() or rand() were seeded
-  // properly, so instead of relying on random() or rand(),
-  // we just seed our PRNG using timing data as fallback.
-  // This is weak entropy, but it's sufficient, because
-  // it is the responsibility of the embedder to install
-  // an entropy source using v8::V8::SetEntropySource(),
-  // which provides reasonable entropy, see:
-  // https://code.google.com/p/v8/issues/detail?id=2905
-  int64_t seed = Time::NowFromSystemTime().ToInternalValue() << 24;
-  seed ^= TimeTicks::HighResolutionNow().ToInternalValue() << 16;
-  seed ^= TimeTicks::Now().ToInternalValue() << 8;
-  SetSeed(seed);
-#endif  // V8_OS_CYGWIN || V8_OS_WIN
-}
-
-
-int RandomNumberGenerator::NextInt(int max) {
-  ASSERT_LE(0, max);
-
-  // Fast path if max is a power of 2.
-  if (IS_POWER_OF_TWO(max)) {
-    return static_cast<int>((max * static_cast<int64_t>(Next(31))) >> 31);
-  }
-
-  while (true) {
-    int rnd = Next(31);
-    int val = rnd % max;
-    if (rnd - val + (max - 1) >= 0) {
-      return val;
-    }
-  }
-}
-
-
-double RandomNumberGenerator::NextDouble() {
-  return ((static_cast<int64_t>(Next(26)) << 27) + Next(27)) /
-      static_cast<double>(static_cast<int64_t>(1) << 53);
-}
-
-
-void RandomNumberGenerator::NextBytes(void* buffer, size_t buflen) {
-  for (size_t n = 0; n < buflen; ++n) {
-    static_cast<uint8_t*>(buffer)[n] = static_cast<uint8_t>(Next(8));
-  }
-}
-
-
-int RandomNumberGenerator::Next(int bits) {
-  ASSERT_LT(0, bits);
-  ASSERT_GE(32, bits);
-  // Do unsigned multiplication, which has the intended modulo semantics, while
-  // signed multiplication would expose undefined behavior.
-  uint64_t product = static_cast<uint64_t>(seed_) * kMultiplier;
-  // Assigning a uint64_t to an int64_t is implementation defined, but this
-  // should be OK. Use a static_cast to explicitly state that we know what we're
-  // doing. (Famous last words...)
-  int64_t seed = static_cast<int64_t>((product + kAddend) & kMask);
-  seed_ = seed;
-  return static_cast<int>(seed >> (48 - bits));
-}
-
-
-void RandomNumberGenerator::SetSeed(int64_t seed) {
-  seed_ = (seed ^ kMultiplier) & kMask;
-}
-
-} }  // namespace v8::internal
diff --git a/src/utils/random-number-generator.h b/src/utils/random-number-generator.h
deleted file mode 100644
index 5407571..0000000
--- a/src/utils/random-number-generator.h
+++ /dev/null
@@ -1,87 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_UTILS_RANDOM_NUMBER_GENERATOR_H_
-#define V8_UTILS_RANDOM_NUMBER_GENERATOR_H_
-
-#include "src/base/macros.h"
-
-namespace v8 {
-namespace internal {
-
-// -----------------------------------------------------------------------------
-// RandomNumberGenerator
-//
-// This class is used to generate a stream of pseudorandom numbers. The class
-// uses a 48-bit seed, which is modified using a linear congruential formula.
-// (See Donald Knuth, The Art of Computer Programming, Volume 3, Section 3.2.1.)
-// If two instances of RandomNumberGenerator are created with the same seed, and
-// the same sequence of method calls is made for each, they will generate and
-// return identical sequences of numbers.
-// This class uses (probably) weak entropy by default, but it's sufficient,
-// because it is the responsibility of the embedder to install an entropy source
-// using v8::V8::SetEntropySource(), which provides reasonable entropy, see:
-// https://code.google.com/p/v8/issues/detail?id=2905
-// This class is neither reentrant nor threadsafe.
-
-class RandomNumberGenerator V8_FINAL {
- public:
-  // EntropySource is used as a callback function when V8 needs a source of
-  // entropy.
-  typedef bool (*EntropySource)(unsigned char* buffer, size_t buflen);
-  static void SetEntropySource(EntropySource entropy_source);
-
-  RandomNumberGenerator();
-  explicit RandomNumberGenerator(int64_t seed) { SetSeed(seed); }
-
-  // Returns the next pseudorandom, uniformly distributed int value from this
-  // random number generator's sequence. The general contract of |NextInt()| is
-  // that one int value is pseudorandomly generated and returned.
-  // All 2^32 possible integer values are produced with (approximately) equal
-  // probability.
-  V8_INLINE int NextInt() V8_WARN_UNUSED_RESULT {
-    return Next(32);
-  }
-
-  // Returns a pseudorandom, uniformly distributed int value between 0
-  // (inclusive) and the specified max value (exclusive), drawn from this random
-  // number generator's sequence. The general contract of |NextInt(int)| is that
-  // one int value in the specified range is pseudorandomly generated and
-  // returned. All max possible int values are produced with (approximately)
-  // equal probability.
-  int NextInt(int max) V8_WARN_UNUSED_RESULT;
-
-  // Returns the next pseudorandom, uniformly distributed boolean value from
-  // this random number generator's sequence. The general contract of
-  // |NextBoolean()| is that one boolean value is pseudorandomly generated and
-  // returned. The values true and false are produced with (approximately) equal
-  // probability.
-  V8_INLINE bool NextBool() V8_WARN_UNUSED_RESULT {
-    return Next(1) != 0;
-  }
-
-  // Returns the next pseudorandom, uniformly distributed double value between
-  // 0.0 and 1.0 from this random number generator's sequence.
-  // The general contract of |NextDouble()| is that one double value, chosen
-  // (approximately) uniformly from the range 0.0 (inclusive) to 1.0
-  // (exclusive), is pseudorandomly generated and returned.
-  double NextDouble() V8_WARN_UNUSED_RESULT;
-
-  // Fills the elements of a specified array of bytes with random numbers.
-  void NextBytes(void* buffer, size_t buflen);
-
- private:
-  static const int64_t kMultiplier = V8_2PART_UINT64_C(0x5, deece66d);
-  static const int64_t kAddend = 0xb;
-  static const int64_t kMask = V8_2PART_UINT64_C(0xffff, ffffffff);
-
-  int Next(int bits) V8_WARN_UNUSED_RESULT;
-  void SetSeed(int64_t seed);
-
-  int64_t seed_;
-};
-
-} }  // namespace v8::internal
-
-#endif  // V8_UTILS_RANDOM_NUMBER_GENERATOR_H_
diff --git a/src/v8.cc b/src/v8.cc
index 8aba51a..62c3da4 100644
--- a/src/v8.cc
+++ b/src/v8.cc
@@ -6,24 +6,23 @@
 
 #include "src/assembler.h"
 #include "src/base/once.h"
-#include "src/isolate.h"
-#include "src/elements.h"
+#include "src/base/platform/platform.h"
 #include "src/bootstrapper.h"
+#include "src/compiler/pipeline.h"
 #include "src/debug.h"
 #include "src/deoptimizer.h"
+#include "src/elements.h"
 #include "src/frames.h"
+#include "src/heap/store-buffer.h"
 #include "src/heap-profiler.h"
 #include "src/hydrogen.h"
-#ifdef V8_USE_DEFAULT_PLATFORM
-#include "src/libplatform/default-platform.h"
-#endif
+#include "src/isolate.h"
 #include "src/lithium-allocator.h"
 #include "src/objects.h"
-#include "src/platform.h"
-#include "src/sampler.h"
 #include "src/runtime-profiler.h"
+#include "src/sampler.h"
 #include "src/serialize.h"
-#include "src/store-buffer.h"
+
 
 namespace v8 {
 namespace internal {
@@ -34,21 +33,9 @@
 v8::Platform* V8::platform_ = NULL;
 
 
-bool V8::Initialize(Deserializer* des) {
+bool V8::Initialize() {
   InitializeOncePerProcess();
-  Isolate* isolate = Isolate::UncheckedCurrent();
-  if (isolate == NULL) return true;
-  if (isolate->IsDead()) return false;
-  if (isolate->IsInitialized()) return true;
-
-#ifdef V8_USE_DEFAULT_PLATFORM
-  DefaultPlatform* platform = static_cast<DefaultPlatform*>(platform_);
-  platform->SetThreadPoolSize(isolate->max_available_threads());
-  // We currently only start the threads early, if we know that we'll use them.
-  if (FLAG_job_based_sweeping) platform->EnsureInitialized();
-#endif
-
-  return isolate->Init(des);
+  return true;
 }
 
 
@@ -56,17 +43,12 @@
   Bootstrapper::TearDownExtensions();
   ElementsAccessor::TearDown();
   LOperand::TearDownCaches();
+  compiler::Pipeline::TearDown();
   ExternalReference::TearDownMathExpData();
   RegisteredExtension::UnregisterAll();
   Isolate::GlobalTearDown();
-
   Sampler::TearDown();
-
-#ifdef V8_USE_DEFAULT_PLATFORM
-  DefaultPlatform* platform = static_cast<DefaultPlatform*>(platform_);
-  platform_ = NULL;
-  delete platform;
-#endif
+  FlagList::ResetAllFlags();  // Frees memory held by string arguments.
 }
 
 
@@ -90,9 +72,10 @@
     FLAG_max_semi_space_size = 1;
   }
 
-#ifdef V8_USE_DEFAULT_PLATFORM
-  platform_ = new DefaultPlatform;
-#endif
+  base::OS::Initialize(FLAG_random_seed, FLAG_hard_abort, FLAG_gc_fake_mmap);
+
+  Isolate::InitializeOncePerProcess();
+
   Sampler::SetUp();
   CpuFeatures::Probe(false);
   init_memcopy_functions();
@@ -104,6 +87,7 @@
 #endif
   ElementsAccessor::InitializeOncePerProcess();
   LOperand::SetUpCaches();
+  compiler::Pipeline::SetUp();
   SetUpJSCallerSavedCodeData();
   ExternalReference::SetUp();
   Bootstrapper::InitializeOncePerProcess();
@@ -116,20 +100,20 @@
 
 
 void V8::InitializePlatform(v8::Platform* platform) {
-  ASSERT(!platform_);
-  ASSERT(platform);
+  CHECK(!platform_);
+  CHECK(platform);
   platform_ = platform;
 }
 
 
 void V8::ShutdownPlatform() {
-  ASSERT(platform_);
+  CHECK(platform_);
   platform_ = NULL;
 }
 
 
 v8::Platform* V8::GetCurrentPlatform() {
-  ASSERT(platform_);
+  DCHECK(platform_);
   return platform_;
 }
 
diff --git a/src/v8.h b/src/v8.h
index b14458a..13c33e1 100644
--- a/src/v8.h
+++ b/src/v8.h
@@ -28,38 +28,32 @@
 // Basic includes
 #include "include/v8.h"
 #include "include/v8-platform.h"
-#include "src/v8checks.h"
-#include "src/allocation.h"
-#include "src/assert-scope.h"
-#include "src/utils.h"
-#include "src/flags.h"
-#include "src/globals.h"
+#include "src/checks.h"  // NOLINT
+#include "src/allocation.h"  // NOLINT
+#include "src/assert-scope.h"  // NOLINT
+#include "src/utils.h"  // NOLINT
+#include "src/flags.h"  // NOLINT
+#include "src/globals.h"  // NOLINT
 
 // Objects & heap
-#include "src/objects-inl.h"
-#include "src/spaces-inl.h"
-#include "src/heap-inl.h"
-#include "src/incremental-marking-inl.h"
-#include "src/mark-compact-inl.h"
-#include "src/log-inl.h"
-#include "src/handles-inl.h"
-#include "src/types-inl.h"
-#include "src/zone-inl.h"
+#include "src/objects-inl.h"  // NOLINT
+#include "src/heap/spaces-inl.h"               // NOLINT
+#include "src/heap/heap-inl.h"                 // NOLINT
+#include "src/heap/incremental-marking-inl.h"  // NOLINT
+#include "src/heap/mark-compact-inl.h"         // NOLINT
+#include "src/log-inl.h"  // NOLINT
+#include "src/handles-inl.h"  // NOLINT
+#include "src/types-inl.h"  // NOLINT
+#include "src/zone-inl.h"  // NOLINT
 
 namespace v8 {
 namespace internal {
 
-class Deserializer;
-
 class V8 : public AllStatic {
  public:
   // Global actions.
 
-  // If Initialize is called with des == NULL, the initial state is
-  // created from scratch. If a non-null Deserializer is given, the
-  // initial state is created by reading the deserialized data into an
-  // empty heap.
-  static bool Initialize(Deserializer* des);
+  static bool Initialize();
   static void TearDown();
 
   // Report process out of memory. Implementation found in api.cc.
diff --git a/src/v8checks.h b/src/v8checks.h
deleted file mode 100644
index 3d63cae..0000000
--- a/src/v8checks.h
+++ /dev/null
@@ -1,39 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_V8CHECKS_H_
-#define V8_V8CHECKS_H_
-
-#include "src/checks.h"
-
-namespace v8 {
-  class Value;
-  template <class T> class Handle;
-
-namespace internal {
-  intptr_t HeapObjectTagMask();
-
-} }  // namespace v8::internal
-
-
-void CheckNonEqualsHelper(const char* file,
-                          int line,
-                          const char* unexpected_source,
-                          v8::Handle<v8::Value> unexpected,
-                          const char* value_source,
-                          v8::Handle<v8::Value> value);
-
-void CheckEqualsHelper(const char* file,
-                       int line,
-                       const char* expected_source,
-                       v8::Handle<v8::Value> expected,
-                       const char* value_source,
-                       v8::Handle<v8::Value> value);
-
-#define ASSERT_TAG_ALIGNED(address) \
-  ASSERT((reinterpret_cast<intptr_t>(address) & HeapObjectTagMask()) == 0)
-
-#define ASSERT_SIZE_TAG_ALIGNED(size) ASSERT((size & HeapObjectTagMask()) == 0)
-
-#endif  // V8_V8CHECKS_H_
diff --git a/src/v8natives.js b/src/v8natives.js
index 1d05338..782b953 100644
--- a/src/v8natives.js
+++ b/src/v8natives.js
@@ -28,7 +28,7 @@
     var f = functions[i + 1];
     %FunctionSetName(f, key);
     %FunctionRemovePrototype(f);
-    %SetProperty(object, key, f, attributes);
+    %AddNamedProperty(object, key, f, attributes);
     %SetNativeFlag(f);
   }
   %ToFastProperties(object);
@@ -39,7 +39,7 @@
 function InstallGetter(object, name, getter) {
   %FunctionSetName(getter, name);
   %FunctionRemovePrototype(getter);
-  %DefineOrRedefineAccessorProperty(object, name, getter, null, DONT_ENUM);
+  %DefineAccessorPropertyUnchecked(object, name, getter, null, DONT_ENUM);
   %SetNativeFlag(getter);
 }
 
@@ -50,7 +50,7 @@
   %FunctionSetName(setter, name);
   %FunctionRemovePrototype(getter);
   %FunctionRemovePrototype(setter);
-  %DefineOrRedefineAccessorProperty(object, name, getter, setter, DONT_ENUM);
+  %DefineAccessorPropertyUnchecked(object, name, getter, setter, DONT_ENUM);
   %SetNativeFlag(getter);
   %SetNativeFlag(setter);
 }
@@ -65,7 +65,7 @@
   for (var i = 0; i < constants.length; i += 2) {
     var name = constants[i];
     var k = constants[i + 1];
-    %SetProperty(object, name, k, attributes);
+    %AddNamedProperty(object, name, k, attributes);
   }
   %ToFastProperties(object);
 }
@@ -86,16 +86,17 @@
   }
   if (fields) {
     for (var i = 0; i < fields.length; i++) {
-      %SetProperty(prototype, fields[i], UNDEFINED, DONT_ENUM | DONT_DELETE);
+      %AddNamedProperty(prototype, fields[i],
+                        UNDEFINED, DONT_ENUM | DONT_DELETE);
     }
   }
   for (var i = 0; i < methods.length; i += 2) {
     var key = methods[i];
     var f = methods[i + 1];
-    %SetProperty(prototype, key, f, DONT_ENUM | DONT_DELETE | READ_ONLY);
+    %AddNamedProperty(prototype, key, f, DONT_ENUM | DONT_DELETE | READ_ONLY);
     %SetNativeFlag(f);
   }
-  %SetPrototype(prototype, null);
+  %InternalSetPrototype(prototype, null);
   %ToFastProperties(prototype);
 }
 
@@ -172,12 +173,12 @@
                          'be the global object from which eval originated');
   }
 
-  var global_receiver = %GlobalReceiver(global);
+  var global_proxy = %GlobalProxy(global);
 
   var f = %CompileString(x, false);
   if (!IS_FUNCTION(f)) return f;
 
-  return %_CallFunction(global_receiver, f);
+  return %_CallFunction(global_proxy, f);
 }
 
 
@@ -190,13 +191,13 @@
   var attributes = DONT_ENUM | DONT_DELETE | READ_ONLY;
 
   // ECMA 262 - 15.1.1.1.
-  %SetProperty(global, "NaN", NAN, attributes);
+  %AddNamedProperty(global, "NaN", NAN, attributes);
 
   // ECMA-262 - 15.1.1.2.
-  %SetProperty(global, "Infinity", INFINITY, attributes);
+  %AddNamedProperty(global, "Infinity", INFINITY, attributes);
 
   // ECMA-262 - 15.1.1.3.
-  %SetProperty(global, "undefined", UNDEFINED, attributes);
+  %AddNamedProperty(global, "undefined", UNDEFINED, attributes);
 
   // Set up non-enumerable function on the global object.
   InstallFunctions(global, DONT_ENUM, $Array(
@@ -274,7 +275,7 @@
 function ObjectDefineGetter(name, fun) {
   var receiver = this;
   if (receiver == null && !IS_UNDETECTABLE(receiver)) {
-    receiver = %GlobalReceiver(global);
+    receiver = %GlobalProxy(global);
   }
   if (!IS_SPEC_FUNCTION(fun)) {
     throw new $TypeError(
@@ -291,7 +292,7 @@
 function ObjectLookupGetter(name) {
   var receiver = this;
   if (receiver == null && !IS_UNDETECTABLE(receiver)) {
-    receiver = %GlobalReceiver(global);
+    receiver = %GlobalProxy(global);
   }
   return %LookupAccessor(ToObject(receiver), ToName(name), GETTER);
 }
@@ -300,7 +301,7 @@
 function ObjectDefineSetter(name, fun) {
   var receiver = this;
   if (receiver == null && !IS_UNDETECTABLE(receiver)) {
-    receiver = %GlobalReceiver(global);
+    receiver = %GlobalProxy(global);
   }
   if (!IS_SPEC_FUNCTION(fun)) {
     throw new $TypeError(
@@ -317,7 +318,7 @@
 function ObjectLookupSetter(name) {
   var receiver = this;
   if (receiver == null && !IS_UNDETECTABLE(receiver)) {
-    receiver = %GlobalReceiver(global);
+    receiver = %GlobalProxy(global);
   }
   return %LookupAccessor(ToObject(receiver), ToName(name), SETTER);
 }
@@ -386,24 +387,22 @@
   var obj = new $Object();
 
   if (desc.hasValue()) {
-    %IgnoreAttributesAndSetProperty(obj, "value", desc.getValue(), NONE);
+    %AddNamedProperty(obj, "value", desc.getValue(), NONE);
   }
   if (desc.hasWritable()) {
-    %IgnoreAttributesAndSetProperty(obj, "writable", desc.isWritable(), NONE);
+    %AddNamedProperty(obj, "writable", desc.isWritable(), NONE);
   }
   if (desc.hasGetter()) {
-    %IgnoreAttributesAndSetProperty(obj, "get", desc.getGet(), NONE);
+    %AddNamedProperty(obj, "get", desc.getGet(), NONE);
   }
   if (desc.hasSetter()) {
-    %IgnoreAttributesAndSetProperty(obj, "set", desc.getSet(), NONE);
+    %AddNamedProperty(obj, "set", desc.getSet(), NONE);
   }
   if (desc.hasEnumerable()) {
-    %IgnoreAttributesAndSetProperty(obj, "enumerable",
-                                    desc.isEnumerable(), NONE);
+    %AddNamedProperty(obj, "enumerable", desc.isEnumerable(), NONE);
   }
   if (desc.hasConfigurable()) {
-    %IgnoreAttributesAndSetProperty(obj, "configurable",
-                                    desc.isConfigurable(), NONE);
+    %AddNamedProperty(obj, "configurable", desc.isConfigurable(), NONE);
   }
   return obj;
 }
@@ -572,10 +571,6 @@
 // property descriptor. For a description of the array layout please
 // see the runtime.cc file.
 function ConvertDescriptorArrayToDescriptor(desc_array) {
-  if (desc_array === false) {
-    throw 'Internal error: invalid desc_array';
-  }
-
   if (IS_UNDEFINED(desc_array)) {
     return UNDEFINED;
   }
@@ -650,9 +645,6 @@
   // If p is not a property on obj undefined is returned.
   var props = %GetOwnProperty(ToObject(obj), p);
 
-  // A false value here means that access checks failed.
-  if (props === false) return UNDEFINED;
-
   return ConvertDescriptorArrayToDescriptor(props);
 }
 
@@ -693,11 +685,8 @@
 
 // ES5 8.12.9.
 function DefineObjectProperty(obj, p, desc, should_throw) {
-  var current_or_access = %GetOwnProperty(ToObject(obj), ToName(p));
-  // A false value here means that access checks failed.
-  if (current_or_access === false) return UNDEFINED;
-
-  var current = ConvertDescriptorArrayToDescriptor(current_or_access);
+  var current_array = %GetOwnProperty(ToObject(obj), ToName(p));
+  var current = ConvertDescriptorArrayToDescriptor(current_array);
   var extensible = %IsExtensible(ToObject(obj));
 
   // Error handling according to spec.
@@ -833,7 +822,7 @@
       value = current.getValue();
     }
 
-    %DefineOrRedefineDataProperty(obj, p, value, flag);
+    %DefineDataPropertyUnchecked(obj, p, value, flag);
   } else {
     // There are 3 cases that lead here:
     // Step 4b - defining a new accessor property.
@@ -841,9 +830,19 @@
     //                 property.
     // Step 12 - updating an existing accessor property with an accessor
     //           descriptor.
-    var getter = desc.hasGetter() ? desc.getGet() : null;
-    var setter = desc.hasSetter() ? desc.getSet() : null;
-    %DefineOrRedefineAccessorProperty(obj, p, getter, setter, flag);
+    var getter = null;
+    if (desc.hasGetter()) {
+      getter = desc.getGet();
+    } else if (IsAccessorDescriptor(current) && current.hasGetter()) {
+      getter = current.getGet();
+    }
+    var setter = null;
+    if (desc.hasSetter()) {
+      setter = desc.getSet();
+    } else if (IsAccessorDescriptor(current) && current.hasSetter()) {
+      setter = current.getSet();
+    }
+    %DefineAccessorPropertyUnchecked(obj, p, getter, setter, flag);
   }
   return true;
 }
@@ -901,7 +900,7 @@
     // Make sure the below call to DefineObjectProperty() doesn't overwrite
     // any magic "length" property by removing the value.
     // TODO(mstarzinger): This hack should be removed once we have addressed the
-    // respective TODO in Runtime_DefineOrRedefineDataProperty.
+    // respective TODO in Runtime_DefineDataPropertyUnchecked.
     // For the time being, we need a hack to prevent Object.observe from
     // generating two change records.
     obj.length = new_length;
@@ -926,34 +925,36 @@
   }
 
   // Step 4 - Special handling for array index.
-  var index = ToUint32(p);
-  var emit_splice = false;
-  if (ToString(index) == p && index != 4294967295) {
-    var length = obj.length;
-    if (index >= length && %IsObserved(obj)) {
-      emit_splice = true;
-      BeginPerformSplice(obj);
-    }
-
-    var length_desc = GetOwnPropertyJS(obj, "length");
-    if ((index >= length && !length_desc.isWritable()) ||
-        !DefineObjectProperty(obj, p, desc, true)) {
-      if (emit_splice)
-        EndPerformSplice(obj);
-      if (should_throw) {
-        throw MakeTypeError("define_disallowed", [p]);
-      } else {
-        return false;
+  if (!IS_SYMBOL(p)) {
+    var index = ToUint32(p);
+    var emit_splice = false;
+    if (ToString(index) == p && index != 4294967295) {
+      var length = obj.length;
+      if (index >= length && %IsObserved(obj)) {
+        emit_splice = true;
+        BeginPerformSplice(obj);
       }
+
+      var length_desc = GetOwnPropertyJS(obj, "length");
+      if ((index >= length && !length_desc.isWritable()) ||
+          !DefineObjectProperty(obj, p, desc, true)) {
+        if (emit_splice)
+          EndPerformSplice(obj);
+        if (should_throw) {
+          throw MakeTypeError("define_disallowed", [p]);
+        } else {
+          return false;
+        }
+      }
+      if (index >= length) {
+        obj.length = index + 1;
+      }
+      if (emit_splice) {
+        EndPerformSplice(obj);
+        EnqueueSpliceRecord(obj, length, [], index + 1 - length);
+      }
+      return true;
     }
-    if (index >= length) {
-      obj.length = index + 1;
-    }
-    if (emit_splice) {
-      EndPerformSplice(obj);
-      EnqueueSpliceRecord(obj, length, [], index + 1 - length);
-    }
-    return true;
   }
 
   // Step 5 - Fallback to default implementation.
@@ -1126,7 +1127,8 @@
   if (!IS_SPEC_OBJECT(proto) && proto !== null) {
     throw MakeTypeError("proto_object_or_null", [proto]);
   }
-  var obj = { __proto__: proto };
+  var obj = {};
+  %InternalSetPrototype(obj, proto);
   if (!IS_UNDEFINED(properties)) ObjectDefineProperties(obj, properties);
   return obj;
 }
@@ -1173,13 +1175,24 @@
 }
 
 
-function GetOwnEnumerablePropertyNames(properties) {
+function GetOwnEnumerablePropertyNames(object) {
   var names = new InternalArray();
-  for (var key in properties) {
-    if (%HasOwnProperty(properties, key)) {
+  for (var key in object) {
+    if (%HasOwnProperty(object, key)) {
       names.push(key);
     }
   }
+
+  var filter = PROPERTY_ATTRIBUTES_STRING | PROPERTY_ATTRIBUTES_PRIVATE_SYMBOL;
+  var symbols = %GetOwnPropertyNames(object, filter);
+  for (var i = 0; i < symbols.length; ++i) {
+    var symbol = symbols[i];
+    if (IS_SYMBOL(symbol)) {
+      var desc = ObjectGetOwnPropertyDescriptor(object, symbol);
+      if (desc.enumerable) names.push(symbol);
+    }
+  }
+
   return names;
 }
 
@@ -1311,7 +1324,9 @@
   for (var i = 0; i < names.length; i++) {
     var name = names[i];
     var desc = GetOwnPropertyJS(obj, name);
-    if (desc.isConfigurable()) return false;
+    if (desc.isConfigurable()) {
+      return false;
+    }
   }
   return true;
 }
@@ -1397,7 +1412,7 @@
   %SetNativeFlag($Object);
   %SetCode($Object, ObjectConstructor);
 
-  %SetProperty($Object.prototype, "constructor", $Object, DONT_ENUM);
+  %AddNamedProperty($Object.prototype, "constructor", $Object, DONT_ENUM);
 
   // Set up non-enumerable functions on the Object.prototype object.
   InstallFunctions($Object.prototype, DONT_ENUM, $Array(
@@ -1484,7 +1499,7 @@
 
   %SetCode($Boolean, BooleanConstructor);
   %FunctionSetPrototype($Boolean, new $Boolean(false));
-  %SetProperty($Boolean.prototype, "constructor", $Boolean, DONT_ENUM);
+  %AddNamedProperty($Boolean.prototype, "constructor", $Boolean, DONT_ENUM);
 
   InstallFunctions($Boolean.prototype, DONT_ENUM, $Array(
     "toString", BooleanToString,
@@ -1667,7 +1682,7 @@
 
   %OptimizeObjectForAddingMultipleProperties($Number.prototype, 8);
   // Set up the constructor property on the Number prototype object.
-  %SetProperty($Number.prototype, "constructor", $Number, DONT_ENUM);
+  %AddNamedProperty($Number.prototype, "constructor", $Number, DONT_ENUM);
 
   InstallConstants($Number, $Array(
       // ECMA-262 section 15.7.3.1.
@@ -1735,10 +1750,18 @@
     }
   }
 
+  if (%FunctionIsArrow(func)) {
+    return source;
+  }
+
   var name = %FunctionNameShouldPrintAsAnonymous(func)
       ? 'anonymous'
       : %FunctionGetName(func);
-  var head = %FunctionIsGenerator(func) ? 'function* ' : 'function ';
+
+  var isGenerator = %FunctionIsGenerator(func);
+  var head = %FunctionIsConciseMethod(func)
+      ? (isGenerator ? '*' : '')
+      : (isGenerator ? 'function* ' : 'function ');
   return head + name + source;
 }
 
@@ -1833,12 +1856,10 @@
 
 function FunctionConstructor(arg1) {  // length == 1
   var source = NewFunctionString(arguments, 'function');
-  var global_receiver = %GlobalReceiver(global);
+  var global_proxy = %GlobalProxy(global);
   // Compile the string in the constructor and not a helper so that errors
   // appear to come from here.
-  var f = %CompileString(source, true);
-  if (!IS_FUNCTION(f)) return f;
-  f = %_CallFunction(global_receiver, f);
+  var f = %_CallFunction(global_proxy, %CompileString(source, true));
   %FunctionMarkNameShouldPrintAsAnonymous(f);
   return f;
 }
@@ -1850,7 +1871,7 @@
   %CheckIsBootstrapping();
 
   %SetCode($Function, FunctionConstructor);
-  %SetProperty($Function.prototype, "constructor", $Function, DONT_ENUM);
+  %AddNamedProperty($Function.prototype, "constructor", $Function, DONT_ENUM);
 
   InstallFunctions($Function.prototype, DONT_ENUM, $Array(
     "bind", FunctionBind,
@@ -1859,3 +1880,33 @@
 }
 
 SetUpFunction();
+
+
+// ----------------------------------------------------------------------------
+// Iterator related spec functions.
+
+// ES6 rev 26, 2014-07-18
+// 7.4.1 CheckIterable ( obj )
+function ToIterable(obj) {
+  if (!IS_SPEC_OBJECT(obj)) {
+    return UNDEFINED;
+  }
+  return obj[symbolIterator];
+}
+
+
+// ES6 rev 26, 2014-07-18
+// 7.4.2 GetIterator ( obj, method )
+function GetIterator(obj, method) {
+  if (IS_UNDEFINED(method)) {
+    method = ToIterable(obj);
+  }
+  if (!IS_SPEC_FUNCTION(method)) {
+    throw MakeTypeError('not_iterable', [obj]);
+  }
+  var iterator = %_CallFunction(obj, method);
+  if (!IS_SPEC_OBJECT(iterator)) {
+    throw MakeTypeError('not_an_iterator', [iterator]);
+  }
+  return iterator;
+}
diff --git a/src/v8threads.cc b/src/v8threads.cc
index 1238445..a46b289 100644
--- a/src/v8threads.cc
+++ b/src/v8threads.cc
@@ -8,8 +8,8 @@
 #include "src/bootstrapper.h"
 #include "src/debug.h"
 #include "src/execution.h"
-#include "src/v8threads.h"
 #include "src/regexp-stack.h"
+#include "src/v8threads.h"
 
 namespace v8 {
 
@@ -22,7 +22,7 @@
 // Once the Locker is initialized, the current thread will be guaranteed to have
 // the lock for a given isolate.
 void Locker::Initialize(v8::Isolate* isolate) {
-  ASSERT(isolate != NULL);
+  DCHECK(isolate != NULL);
   has_lock_= false;
   top_level_ = true;
   isolate_ = reinterpret_cast<i::Isolate*>(isolate);
@@ -52,12 +52,12 @@
       isolate_->stack_guard()->InitThread(access);
     }
   }
-  ASSERT(isolate_->thread_manager()->IsLockedByCurrentThread());
+  DCHECK(isolate_->thread_manager()->IsLockedByCurrentThread());
 }
 
 
 bool Locker::IsLocked(v8::Isolate* isolate) {
-  ASSERT(isolate != NULL);
+  DCHECK(isolate != NULL);
   i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
   return internal_isolate->thread_manager()->IsLockedByCurrentThread();
 }
@@ -69,7 +69,7 @@
 
 
 Locker::~Locker() {
-  ASSERT(isolate_->thread_manager()->IsLockedByCurrentThread());
+  DCHECK(isolate_->thread_manager()->IsLockedByCurrentThread());
   if (has_lock_) {
     if (top_level_) {
       isolate_->thread_manager()->FreeThreadResources();
@@ -82,16 +82,16 @@
 
 
 void Unlocker::Initialize(v8::Isolate* isolate) {
-  ASSERT(isolate != NULL);
+  DCHECK(isolate != NULL);
   isolate_ = reinterpret_cast<i::Isolate*>(isolate);
-  ASSERT(isolate_->thread_manager()->IsLockedByCurrentThread());
+  DCHECK(isolate_->thread_manager()->IsLockedByCurrentThread());
   isolate_->thread_manager()->ArchiveThread();
   isolate_->thread_manager()->Unlock();
 }
 
 
 Unlocker::~Unlocker() {
-  ASSERT(!isolate_->thread_manager()->IsLockedByCurrentThread());
+  DCHECK(!isolate_->thread_manager()->IsLockedByCurrentThread());
   isolate_->thread_manager()->Lock();
   isolate_->thread_manager()->RestoreThread();
 }
@@ -101,7 +101,7 @@
 
 
 bool ThreadManager::RestoreThread() {
-  ASSERT(IsLockedByCurrentThread());
+  DCHECK(IsLockedByCurrentThread());
   // First check whether the current thread has been 'lazily archived', i.e.
   // not archived at all.  If that is the case we put the state storage we
   // had prepared back in the free list, since we didn't need it after all.
@@ -109,8 +109,8 @@
     lazily_archived_thread_ = ThreadId::Invalid();
     Isolate::PerIsolateThreadData* per_thread =
         isolate_->FindPerThreadDataForThisThread();
-    ASSERT(per_thread != NULL);
-    ASSERT(per_thread->thread_state() == lazily_archived_thread_state_);
+    DCHECK(per_thread != NULL);
+    DCHECK(per_thread->thread_state() == lazily_archived_thread_state_);
     lazily_archived_thread_state_->set_id(ThreadId::Invalid());
     lazily_archived_thread_state_->LinkInto(ThreadState::FREE_LIST);
     lazily_archived_thread_state_ = NULL;
@@ -158,7 +158,7 @@
 void ThreadManager::Lock() {
   mutex_.Lock();
   mutex_owner_ = ThreadId::Current();
-  ASSERT(IsLockedByCurrentThread());
+  DCHECK(IsLockedByCurrentThread());
 }
 
 
@@ -271,9 +271,9 @@
 
 
 void ThreadManager::ArchiveThread() {
-  ASSERT(lazily_archived_thread_.Equals(ThreadId::Invalid()));
-  ASSERT(!IsArchived());
-  ASSERT(IsLockedByCurrentThread());
+  DCHECK(lazily_archived_thread_.Equals(ThreadId::Invalid()));
+  DCHECK(!IsArchived());
+  DCHECK(IsLockedByCurrentThread());
   ThreadState* state = GetFreeThreadState();
   state->Unlink();
   Isolate::PerIsolateThreadData* per_thread =
@@ -281,14 +281,14 @@
   per_thread->set_thread_state(state);
   lazily_archived_thread_ = ThreadId::Current();
   lazily_archived_thread_state_ = state;
-  ASSERT(state->id().Equals(ThreadId::Invalid()));
+  DCHECK(state->id().Equals(ThreadId::Invalid()));
   state->set_id(CurrentId());
-  ASSERT(!state->id().Equals(ThreadId::Invalid()));
+  DCHECK(!state->id().Equals(ThreadId::Invalid()));
 }
 
 
 void ThreadManager::EagerlyArchiveThread() {
-  ASSERT(IsLockedByCurrentThread());
+  DCHECK(IsLockedByCurrentThread());
   ThreadState* state = lazily_archived_thread_state_;
   state->LinkInto(ThreadState::IN_USE_LIST);
   char* to = state->data();
@@ -307,6 +307,9 @@
 
 
 void ThreadManager::FreeThreadResources() {
+  DCHECK(!isolate_->has_pending_exception());
+  DCHECK(!isolate_->external_caught_exception());
+  DCHECK(isolate_->try_catch_handler() == NULL);
   isolate_->handle_scope_implementer()->FreeThreadResources();
   isolate_->FreeThreadResources();
   isolate_->debug()->FreeThreadResources();
diff --git a/src/v8threads.h b/src/v8threads.h
index ca722ad..c3ba517 100644
--- a/src/v8threads.h
+++ b/src/v8threads.h
@@ -96,7 +96,7 @@
 
   void EagerlyArchiveThread();
 
-  Mutex mutex_;
+  base::Mutex mutex_;
   ThreadId mutex_owner_;
   ThreadId lazily_archived_thread_;
   ThreadState* lazily_archived_thread_state_;
diff --git a/src/variables.cc b/src/variables.cc
index 906b6ab..6588312 100644
--- a/src/variables.cc
+++ b/src/variables.cc
@@ -32,30 +32,26 @@
 }
 
 
-Variable::Variable(Scope* scope,
-                   Handle<String> name,
-                   VariableMode mode,
-                   bool is_valid_ref,
-                   Kind kind,
+Variable::Variable(Scope* scope, const AstRawString* name, VariableMode mode,
+                   bool is_valid_ref, Kind kind,
                    InitializationFlag initialization_flag,
-                   Interface* interface)
-  : scope_(scope),
-    name_(name),
-    mode_(mode),
-    kind_(kind),
-    location_(UNALLOCATED),
-    index_(-1),
-    initializer_position_(RelocInfo::kNoPosition),
-    local_if_not_shadowed_(NULL),
-    is_valid_ref_(is_valid_ref),
-    force_context_allocation_(false),
-    is_used_(false),
-    initialization_flag_(initialization_flag),
-    interface_(interface) {
-  // Names must be canonicalized for fast equality checks.
-  ASSERT(name->IsInternalizedString());
+                   MaybeAssignedFlag maybe_assigned_flag, Interface* interface)
+    : scope_(scope),
+      name_(name),
+      mode_(mode),
+      kind_(kind),
+      location_(UNALLOCATED),
+      index_(-1),
+      initializer_position_(RelocInfo::kNoPosition),
+      local_if_not_shadowed_(NULL),
+      is_valid_ref_(is_valid_ref),
+      force_context_allocation_(false),
+      is_used_(false),
+      initialization_flag_(initialization_flag),
+      maybe_assigned_(maybe_assigned_flag),
+      interface_(interface) {
   // Var declared variables never need initialization.
-  ASSERT(!(mode == VAR && initialization_flag == kNeedsInitialization));
+  DCHECK(!(mode == VAR && initialization_flag == kNeedsInitialization));
 }
 
 
diff --git a/src/variables.h b/src/variables.h
index de209d8..a8cf5e3 100644
--- a/src/variables.h
+++ b/src/variables.h
@@ -5,8 +5,9 @@
 #ifndef V8_VARIABLES_H_
 #define V8_VARIABLES_H_
 
-#include "src/zone.h"
+#include "src/ast-value-factory.h"
 #include "src/interface.h"
+#include "src/zone.h"
 
 namespace v8 {
 namespace internal {
@@ -51,12 +52,9 @@
     LOOKUP
   };
 
-  Variable(Scope* scope,
-           Handle<String> name,
-           VariableMode mode,
-           bool is_valid_ref,
-           Kind kind,
-           InitializationFlag initialization_flag,
+  Variable(Scope* scope, const AstRawString* name, VariableMode mode,
+           bool is_valid_ref, Kind kind, InitializationFlag initialization_flag,
+           MaybeAssignedFlag maybe_assigned_flag = kNotAssigned,
            Interface* interface = Interface::NewValue());
 
   // Printing support
@@ -70,17 +68,20 @@
   // scope is only used to follow the context chain length.
   Scope* scope() const { return scope_; }
 
-  Handle<String> name() const { return name_; }
+  Handle<String> name() const { return name_->string(); }
+  const AstRawString* raw_name() const { return name_; }
   VariableMode mode() const { return mode_; }
   bool has_forced_context_allocation() const {
     return force_context_allocation_;
   }
   void ForceContextAllocation() {
-    ASSERT(mode_ != TEMPORARY);
+    DCHECK(mode_ != TEMPORARY);
     force_context_allocation_ = true;
   }
   bool is_used() { return is_used_; }
-  void set_is_used(bool flag) { is_used_ = flag; }
+  void set_is_used() { is_used_ = true; }
+  MaybeAssignedFlag maybe_assigned() const { return maybe_assigned_; }
+  void set_maybe_assigned() { maybe_assigned_ = kMaybeAssigned; }
 
   int initializer_position() { return initializer_position_; }
   void set_initializer_position(int pos) { initializer_position_ = pos; }
@@ -112,7 +113,7 @@
   }
 
   Variable* local_if_not_shadowed() const {
-    ASSERT(mode_ == DYNAMIC_LOCAL && local_if_not_shadowed_ != NULL);
+    DCHECK(mode_ == DYNAMIC_LOCAL && local_if_not_shadowed_ != NULL);
     return local_if_not_shadowed_;
   }
 
@@ -136,7 +137,7 @@
 
  private:
   Scope* scope_;
-  Handle<String> name_;
+  const AstRawString* name_;
   VariableMode mode_;
   Kind kind_;
   Location location_;
@@ -156,6 +157,7 @@
   bool force_context_allocation_;  // set by variable resolver
   bool is_used_;
   InitializationFlag initialization_flag_;
+  MaybeAssignedFlag maybe_assigned_;
 
   // Module type info.
   Interface* interface_;
diff --git a/src/vector.h b/src/vector.h
index 505ef5a..d3ba775 100644
--- a/src/vector.h
+++ b/src/vector.h
@@ -21,7 +21,7 @@
  public:
   Vector() : start_(NULL), length_(0) {}
   Vector(T* data, int length) : start_(data), length_(length) {
-    ASSERT(length == 0 || (length > 0 && data != NULL));
+    DCHECK(length == 0 || (length > 0 && data != NULL));
   }
 
   static Vector<T> New(int length) {
@@ -31,9 +31,9 @@
   // Returns a vector using the same backing storage as this one,
   // spanning from and including 'from', to but not including 'to'.
   Vector<T> SubVector(int from, int to) {
-    SLOW_ASSERT(to <= length_);
-    SLOW_ASSERT(from < to);
-    ASSERT(0 <= from);
+    SLOW_DCHECK(to <= length_);
+    SLOW_DCHECK(from < to);
+    DCHECK(0 <= from);
     return Vector<T>(start() + from, to - from);
   }
 
@@ -48,7 +48,7 @@
 
   // Access individual vector elements - checks bounds in debug mode.
   T& operator[](int index) const {
-    ASSERT(0 <= index && index < length_);
+    DCHECK(0 <= index && index < length_);
     return start_[index];
   }
 
@@ -74,7 +74,7 @@
   }
 
   void Truncate(int length) {
-    ASSERT(length <= length_);
+    DCHECK(length <= length_);
     length_ = length;
   }
 
@@ -87,7 +87,7 @@
   }
 
   inline Vector<T> operator+(int offset) {
-    ASSERT(offset < length_);
+    DCHECK(offset < length_);
     return Vector<T>(start_ + offset, length_ - offset);
   }
 
@@ -100,6 +100,17 @@
                      input.length() * sizeof(S) / sizeof(T));
   }
 
+  bool operator==(const Vector<T>& other) const {
+    if (length_ != other.length_) return false;
+    if (start_ == other.start_) return true;
+    for (int i = 0; i < length_; ++i) {
+      if (start_[i] != other.start_[i]) {
+        return false;
+      }
+    }
+    return true;
+  }
+
  protected:
   void set_start(T* start) { start_ = start; }
 
@@ -135,14 +146,14 @@
 
 inline int StrLength(const char* string) {
   size_t length = strlen(string);
-  ASSERT(length == static_cast<size_t>(static_cast<int>(length)));
+  DCHECK(length == static_cast<size_t>(static_cast<int>(length)));
   return static_cast<int>(length);
 }
 
 
-#define STATIC_ASCII_VECTOR(x)                        \
+#define STATIC_CHAR_VECTOR(x)                                              \
   v8::internal::Vector<const uint8_t>(reinterpret_cast<const uint8_t*>(x), \
-                                      ARRAY_SIZE(x)-1)
+                                      arraysize(x) - 1)
 
 inline Vector<const char> CStrVector(const char* data) {
   return Vector<const char>(data, StrLength(data));
diff --git a/src/version.cc b/src/version.cc
index da8e386..a6c529e 100644
--- a/src/version.cc
+++ b/src/version.cc
@@ -33,9 +33,9 @@
 // NOTE these macros are used by some of the tool scripts and the build
 // system so their names cannot be changed without changing the scripts.
 #define MAJOR_VERSION     3
-#define MINOR_VERSION     27
-#define BUILD_NUMBER      34
-#define PATCH_LEVEL       15
+#define MINOR_VERSION     29
+#define BUILD_NUMBER      88
+#define PATCH_LEVEL       17
 // Use 1 for candidates and 0 otherwise.
 // (Boolean macro values are not supported by all preprocessors.)
 #define IS_CANDIDATE_VERSION 0
diff --git a/src/version.h b/src/version.h
index b0a6071..4f60005 100644
--- a/src/version.h
+++ b/src/version.h
@@ -16,6 +16,7 @@
   static int GetBuild() { return build_; }
   static int GetPatch() { return patch_; }
   static bool IsCandidate() { return candidate_; }
+  static int Hash() { return (major_ << 20) ^ (minor_ << 10) ^ patch_; }
 
   // Calculate the V8 version string.
   static void GetString(Vector<char> str);
diff --git a/src/vm-state-inl.h b/src/vm-state-inl.h
index 4e0d7b8..ac3941e 100644
--- a/src/vm-state-inl.h
+++ b/src/vm-state-inl.h
@@ -40,8 +40,7 @@
 VMState<Tag>::VMState(Isolate* isolate)
     : isolate_(isolate), previous_tag_(isolate->current_vm_state()) {
   if (FLAG_log_timer_events && previous_tag_ != EXTERNAL && Tag == EXTERNAL) {
-    LOG(isolate_,
-        TimerEvent(Logger::START, Logger::TimerEventScope::v8_external));
+    LOG(isolate_, TimerEvent(Logger::START, TimerEventExternal::name()));
   }
   isolate_->set_current_vm_state(Tag);
 }
@@ -50,8 +49,7 @@
 template <StateTag Tag>
 VMState<Tag>::~VMState() {
   if (FLAG_log_timer_events && previous_tag_ != EXTERNAL && Tag == EXTERNAL) {
-    LOG(isolate_,
-        TimerEvent(Logger::END, Logger::TimerEventScope::v8_external));
+    LOG(isolate_, TimerEvent(Logger::END, TimerEventExternal::name()));
   }
   isolate_->set_current_vm_state(previous_tag_);
 }
diff --git a/src/weak-collection.js b/src/weak-collection.js
new file mode 100644
index 0000000..1160176
--- /dev/null
+++ b/src/weak-collection.js
@@ -0,0 +1,227 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+"use strict";
+
+// This file relies on the fact that the following declaration has been made
+// in runtime.js:
+// var $Array = global.Array;
+
+var $WeakMap = global.WeakMap;
+var $WeakSet = global.WeakSet;
+
+
+// -------------------------------------------------------------------
+// Harmony WeakMap
+
+function WeakMapConstructor(iterable) {
+  if (!%_IsConstructCall()) {
+    throw MakeTypeError('constructor_not_function', ['WeakMap']);
+  }
+
+  var iter, adder;
+
+  if (!IS_NULL_OR_UNDEFINED(iterable)) {
+    iter = GetIterator(ToObject(iterable));
+    adder = this.set;
+    if (!IS_SPEC_FUNCTION(adder)) {
+      throw MakeTypeError('property_not_function', ['set', this]);
+    }
+  }
+
+  %WeakCollectionInitialize(this);
+
+  if (IS_UNDEFINED(iter)) return;
+
+  var next, done, nextItem;
+  while (!(next = iter.next()).done) {
+    if (!IS_SPEC_OBJECT(next)) {
+      throw MakeTypeError('iterator_result_not_an_object', [next]);
+    }
+    nextItem = next.value;
+    if (!IS_SPEC_OBJECT(nextItem)) {
+      throw MakeTypeError('iterator_value_not_an_object', [nextItem]);
+    }
+    %_CallFunction(this, nextItem[0], nextItem[1], adder);
+  }
+}
+
+
+function WeakMapGet(key) {
+  if (!IS_WEAKMAP(this)) {
+    throw MakeTypeError('incompatible_method_receiver',
+                        ['WeakMap.prototype.get', this]);
+  }
+  if (!(IS_SPEC_OBJECT(key) || IS_SYMBOL(key))) {
+    throw %MakeTypeError('invalid_weakmap_key', [this, key]);
+  }
+  return %WeakCollectionGet(this, key);
+}
+
+
+function WeakMapSet(key, value) {
+  if (!IS_WEAKMAP(this)) {
+    throw MakeTypeError('incompatible_method_receiver',
+                        ['WeakMap.prototype.set', this]);
+  }
+  if (!(IS_SPEC_OBJECT(key) || IS_SYMBOL(key))) {
+    throw %MakeTypeError('invalid_weakmap_key', [this, key]);
+  }
+  return %WeakCollectionSet(this, key, value);
+}
+
+
+function WeakMapHas(key) {
+  if (!IS_WEAKMAP(this)) {
+    throw MakeTypeError('incompatible_method_receiver',
+                        ['WeakMap.prototype.has', this]);
+  }
+  if (!(IS_SPEC_OBJECT(key) || IS_SYMBOL(key))) {
+    throw %MakeTypeError('invalid_weakmap_key', [this, key]);
+  }
+  return %WeakCollectionHas(this, key);
+}
+
+
+function WeakMapDelete(key) {
+  if (!IS_WEAKMAP(this)) {
+    throw MakeTypeError('incompatible_method_receiver',
+                        ['WeakMap.prototype.delete', this]);
+  }
+  if (!(IS_SPEC_OBJECT(key) || IS_SYMBOL(key))) {
+    throw %MakeTypeError('invalid_weakmap_key', [this, key]);
+  }
+  return %WeakCollectionDelete(this, key);
+}
+
+
+function WeakMapClear() {
+  if (!IS_WEAKMAP(this)) {
+    throw MakeTypeError('incompatible_method_receiver',
+                        ['WeakMap.prototype.clear', this]);
+  }
+  // Replace the internal table with a new empty table.
+  %WeakCollectionInitialize(this);
+}
+
+
+// -------------------------------------------------------------------
+
+function SetUpWeakMap() {
+  %CheckIsBootstrapping();
+
+  %SetCode($WeakMap, WeakMapConstructor);
+  %FunctionSetPrototype($WeakMap, new $Object());
+  %AddNamedProperty($WeakMap.prototype, "constructor", $WeakMap, DONT_ENUM);
+
+  // Set up the non-enumerable functions on the WeakMap prototype object.
+  InstallFunctions($WeakMap.prototype, DONT_ENUM, $Array(
+    "get", WeakMapGet,
+    "set", WeakMapSet,
+    "has", WeakMapHas,
+    "delete", WeakMapDelete,
+    "clear", WeakMapClear
+  ));
+}
+
+SetUpWeakMap();
+
+
+// -------------------------------------------------------------------
+// Harmony WeakSet
+
+function WeakSetConstructor(iterable) {
+  if (!%_IsConstructCall()) {
+    throw MakeTypeError('constructor_not_function', ['WeakSet']);
+  }
+
+  var iter, adder;
+
+  if (!IS_NULL_OR_UNDEFINED(iterable)) {
+    iter = GetIterator(ToObject(iterable));
+    adder = this.add;
+    if (!IS_SPEC_FUNCTION(adder)) {
+      throw MakeTypeError('property_not_function', ['add', this]);
+    }
+  }
+
+  %WeakCollectionInitialize(this);
+
+  if (IS_UNDEFINED(iter)) return;
+
+  var next, done;
+  while (!(next = iter.next()).done) {
+    if (!IS_SPEC_OBJECT(next)) {
+      throw MakeTypeError('iterator_result_not_an_object', [next]);
+    }
+    %_CallFunction(this, next.value, adder);
+  }
+}
+
+
+function WeakSetAdd(value) {
+  if (!IS_WEAKSET(this)) {
+    throw MakeTypeError('incompatible_method_receiver',
+                        ['WeakSet.prototype.add', this]);
+  }
+  if (!(IS_SPEC_OBJECT(value) || IS_SYMBOL(value))) {
+    throw %MakeTypeError('invalid_weakset_value', [this, value]);
+  }
+  return %WeakCollectionSet(this, value, true);
+}
+
+
+function WeakSetHas(value) {
+  if (!IS_WEAKSET(this)) {
+    throw MakeTypeError('incompatible_method_receiver',
+                        ['WeakSet.prototype.has', this]);
+  }
+  if (!(IS_SPEC_OBJECT(value) || IS_SYMBOL(value))) {
+    throw %MakeTypeError('invalid_weakset_value', [this, value]);
+  }
+  return %WeakCollectionHas(this, value);
+}
+
+
+function WeakSetDelete(value) {
+  if (!IS_WEAKSET(this)) {
+    throw MakeTypeError('incompatible_method_receiver',
+                        ['WeakSet.prototype.delete', this]);
+  }
+  if (!(IS_SPEC_OBJECT(value) || IS_SYMBOL(value))) {
+    throw %MakeTypeError('invalid_weakset_value', [this, value]);
+  }
+  return %WeakCollectionDelete(this, value);
+}
+
+
+function WeakSetClear() {
+  if (!IS_WEAKSET(this)) {
+    throw MakeTypeError('incompatible_method_receiver',
+                        ['WeakSet.prototype.clear', this]);
+  }
+  // Replace the internal table with a new empty table.
+  %WeakCollectionInitialize(this);
+}
+
+
+// -------------------------------------------------------------------
+
+function SetUpWeakSet() {
+  %CheckIsBootstrapping();
+
+  %SetCode($WeakSet, WeakSetConstructor);
+  %FunctionSetPrototype($WeakSet, new $Object());
+  %AddNamedProperty($WeakSet.prototype, "constructor", $WeakSet, DONT_ENUM);
+
+  // Set up the non-enumerable functions on the WeakSet prototype object.
+  InstallFunctions($WeakSet.prototype, DONT_ENUM, $Array(
+    "add", WeakSetAdd,
+    "has", WeakSetHas,
+    "delete", WeakSetDelete,
+    "clear", WeakSetClear
+  ));
+}
+
+SetUpWeakSet();
diff --git a/src/weak_collection.js b/src/weak_collection.js
deleted file mode 100644
index 4c26d25..0000000
--- a/src/weak_collection.js
+++ /dev/null
@@ -1,183 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-"use strict";
-
-// This file relies on the fact that the following declaration has been made
-// in runtime.js:
-// var $Array = global.Array;
-
-var $WeakMap = global.WeakMap;
-var $WeakSet = global.WeakSet;
-
-
-// -------------------------------------------------------------------
-// Harmony WeakMap
-
-function WeakMapConstructor() {
-  if (%_IsConstructCall()) {
-    %WeakCollectionInitialize(this);
-  } else {
-    throw MakeTypeError('constructor_not_function', ['WeakMap']);
-  }
-}
-
-
-function WeakMapGet(key) {
-  if (!IS_WEAKMAP(this)) {
-    throw MakeTypeError('incompatible_method_receiver',
-                        ['WeakMap.prototype.get', this]);
-  }
-  if (!(IS_SPEC_OBJECT(key) || IS_SYMBOL(key))) {
-    throw %MakeTypeError('invalid_weakmap_key', [this, key]);
-  }
-  return %WeakCollectionGet(this, key);
-}
-
-
-function WeakMapSet(key, value) {
-  if (!IS_WEAKMAP(this)) {
-    throw MakeTypeError('incompatible_method_receiver',
-                        ['WeakMap.prototype.set', this]);
-  }
-  if (!(IS_SPEC_OBJECT(key) || IS_SYMBOL(key))) {
-    throw %MakeTypeError('invalid_weakmap_key', [this, key]);
-  }
-  return %WeakCollectionSet(this, key, value);
-}
-
-
-function WeakMapHas(key) {
-  if (!IS_WEAKMAP(this)) {
-    throw MakeTypeError('incompatible_method_receiver',
-                        ['WeakMap.prototype.has', this]);
-  }
-  if (!(IS_SPEC_OBJECT(key) || IS_SYMBOL(key))) {
-    throw %MakeTypeError('invalid_weakmap_key', [this, key]);
-  }
-  return %WeakCollectionHas(this, key);
-}
-
-
-function WeakMapDelete(key) {
-  if (!IS_WEAKMAP(this)) {
-    throw MakeTypeError('incompatible_method_receiver',
-                        ['WeakMap.prototype.delete', this]);
-  }
-  if (!(IS_SPEC_OBJECT(key) || IS_SYMBOL(key))) {
-    throw %MakeTypeError('invalid_weakmap_key', [this, key]);
-  }
-  return %WeakCollectionDelete(this, key);
-}
-
-
-function WeakMapClear() {
-  if (!IS_WEAKMAP(this)) {
-    throw MakeTypeError('incompatible_method_receiver',
-                        ['WeakMap.prototype.clear', this]);
-  }
-  // Replace the internal table with a new empty table.
-  %WeakCollectionInitialize(this);
-}
-
-
-// -------------------------------------------------------------------
-
-function SetUpWeakMap() {
-  %CheckIsBootstrapping();
-
-  %SetCode($WeakMap, WeakMapConstructor);
-  %FunctionSetPrototype($WeakMap, new $Object());
-  %SetProperty($WeakMap.prototype, "constructor", $WeakMap, DONT_ENUM);
-
-  // Set up the non-enumerable functions on the WeakMap prototype object.
-  InstallFunctions($WeakMap.prototype, DONT_ENUM, $Array(
-    "get", WeakMapGet,
-    "set", WeakMapSet,
-    "has", WeakMapHas,
-    "delete", WeakMapDelete,
-    "clear", WeakMapClear
-  ));
-}
-
-SetUpWeakMap();
-
-
-// -------------------------------------------------------------------
-// Harmony WeakSet
-
-function WeakSetConstructor() {
-  if (%_IsConstructCall()) {
-    %WeakCollectionInitialize(this);
-  } else {
-    throw MakeTypeError('constructor_not_function', ['WeakSet']);
-  }
-}
-
-
-function WeakSetAdd(value) {
-  if (!IS_WEAKSET(this)) {
-    throw MakeTypeError('incompatible_method_receiver',
-                        ['WeakSet.prototype.add', this]);
-  }
-  if (!(IS_SPEC_OBJECT(value) || IS_SYMBOL(value))) {
-    throw %MakeTypeError('invalid_weakset_value', [this, value]);
-  }
-  return %WeakCollectionSet(this, value, true);
-}
-
-
-function WeakSetHas(value) {
-  if (!IS_WEAKSET(this)) {
-    throw MakeTypeError('incompatible_method_receiver',
-                        ['WeakSet.prototype.has', this]);
-  }
-  if (!(IS_SPEC_OBJECT(value) || IS_SYMBOL(value))) {
-    throw %MakeTypeError('invalid_weakset_value', [this, value]);
-  }
-  return %WeakCollectionHas(this, value);
-}
-
-
-function WeakSetDelete(value) {
-  if (!IS_WEAKSET(this)) {
-    throw MakeTypeError('incompatible_method_receiver',
-                        ['WeakSet.prototype.delete', this]);
-  }
-  if (!(IS_SPEC_OBJECT(value) || IS_SYMBOL(value))) {
-    throw %MakeTypeError('invalid_weakset_value', [this, value]);
-  }
-  return %WeakCollectionDelete(this, value);
-}
-
-
-function WeakSetClear() {
-  if (!IS_WEAKSET(this)) {
-    throw MakeTypeError('incompatible_method_receiver',
-                        ['WeakSet.prototype.clear', this]);
-  }
-  // Replace the internal table with a new empty table.
-  %WeakCollectionInitialize(this);
-}
-
-
-// -------------------------------------------------------------------
-
-function SetUpWeakSet() {
-  %CheckIsBootstrapping();
-
-  %SetCode($WeakSet, WeakSetConstructor);
-  %FunctionSetPrototype($WeakSet, new $Object());
-  %SetProperty($WeakSet.prototype, "constructor", $WeakSet, DONT_ENUM);
-
-  // Set up the non-enumerable functions on the WeakSet prototype object.
-  InstallFunctions($WeakSet.prototype, DONT_ENUM, $Array(
-    "add", WeakSetAdd,
-    "has", WeakSetHas,
-    "delete", WeakSetDelete,
-    "clear", WeakSetClear
-  ));
-}
-
-SetUpWeakSet();
diff --git a/src/win32-math.cc b/src/win32-math.cc
deleted file mode 100644
index e0670b0..0000000
--- a/src/win32-math.cc
+++ /dev/null
@@ -1,82 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Extra POSIX/ANSI routines for Win32 when using Visual Studio C++. Please
-// refer to The Open Group Base Specification for specification of the correct
-// semantics for these functions.
-// (http://www.opengroup.org/onlinepubs/000095399/)
-#if defined(_MSC_VER) && (_MSC_VER < 1800)
-
-#include "src/base/win32-headers.h"
-#include <limits.h>        // Required for INT_MAX etc.
-#include <float.h>         // Required for DBL_MAX and on Win32 for finite()
-#include <cmath>
-#include "src/win32-math.h"
-
-#include "src/checks.h"
-
-
-namespace std {
-
-// Test for a NaN (not a number) value - usually defined in math.h
-int isnan(double x) {
-  return _isnan(x);
-}
-
-
-// Test for infinity - usually defined in math.h
-int isinf(double x) {
-  return (_fpclass(x) & (_FPCLASS_PINF | _FPCLASS_NINF)) != 0;
-}
-
-
-// Test for finite value - usually defined in math.h
-int isfinite(double x) {
-  return _finite(x);
-}
-
-
-// Test if x is less than y and both nominal - usually defined in math.h
-int isless(double x, double y) {
-  return isnan(x) || isnan(y) ? 0 : x < y;
-}
-
-
-// Test if x is greater than y and both nominal - usually defined in math.h
-int isgreater(double x, double y) {
-  return isnan(x) || isnan(y) ? 0 : x > y;
-}
-
-
-// Classify floating point number - usually defined in math.h
-int fpclassify(double x) {
-  // Use the MS-specific _fpclass() for classification.
-  int flags = _fpclass(x);
-
-  // Determine class. We cannot use a switch statement because
-  // the _FPCLASS_ constants are defined as flags.
-  if (flags & (_FPCLASS_PN | _FPCLASS_NN)) return FP_NORMAL;
-  if (flags & (_FPCLASS_PZ | _FPCLASS_NZ)) return FP_ZERO;
-  if (flags & (_FPCLASS_PD | _FPCLASS_ND)) return FP_SUBNORMAL;
-  if (flags & (_FPCLASS_PINF | _FPCLASS_NINF)) return FP_INFINITE;
-
-  // All cases should be covered by the code above.
-  ASSERT(flags & (_FPCLASS_SNAN | _FPCLASS_QNAN));
-  return FP_NAN;
-}
-
-
-// Test sign - usually defined in math.h
-int signbit(double x) {
-  // We need to take care of the special case of both positive
-  // and negative versions of zero.
-  if (x == 0)
-    return _fpclass(x) & _FPCLASS_NZ;
-  else
-    return x < 0;
-}
-
-}  // namespace std
-
-#endif  // _MSC_VER
diff --git a/src/win32-math.h b/src/win32-math.h
deleted file mode 100644
index 7b7cbc9..0000000
--- a/src/win32-math.h
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Extra POSIX/ANSI routines for Win32 when using Visual Studio C++. Please
-// refer to The Open Group Base Specification for specification of the correct
-// semantics for these functions.
-// (http://www.opengroup.org/onlinepubs/000095399/)
-
-#ifndef V8_WIN32_MATH_H_
-#define V8_WIN32_MATH_H_
-
-#ifndef _MSC_VER
-#error Wrong environment, expected MSVC.
-#endif  // _MSC_VER
-
-// MSVC 2013+ provides implementations of all standard math functions.
-#if (_MSC_VER < 1800)
-enum {
-  FP_NAN,
-  FP_INFINITE,
-  FP_ZERO,
-  FP_SUBNORMAL,
-  FP_NORMAL
-};
-
-
-namespace std {
-
-int isfinite(double x);
-int isinf(double x);
-int isnan(double x);
-int isless(double x, double y);
-int isgreater(double x, double y);
-int fpclassify(double x);
-int signbit(double x);
-
-}  // namespace std
-
-#endif  // _MSC_VER < 1800
-
-#endif  // V8_WIN32_MATH_H_
diff --git a/src/x64/assembler-x64-inl.h b/src/x64/assembler-x64-inl.h
index f1731af..b64bbfb 100644
--- a/src/x64/assembler-x64-inl.h
+++ b/src/x64/assembler-x64-inl.h
@@ -7,7 +7,7 @@
 
 #include "src/x64/assembler-x64.h"
 
-#include "src/cpu.h"
+#include "src/base/cpu.h"
 #include "src/debug.h"
 #include "src/v8memory.h"
 
@@ -57,7 +57,7 @@
 void Assembler::emit_code_target(Handle<Code> target,
                                  RelocInfo::Mode rmode,
                                  TypeFeedbackId ast_id) {
-  ASSERT(RelocInfo::IsCodeTarget(rmode) ||
+  DCHECK(RelocInfo::IsCodeTarget(rmode) ||
       rmode == RelocInfo::CODE_AGE_SEQUENCE);
   if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
     RecordRelocInfo(RelocInfo::CODE_TARGET_WITH_ID, ast_id.ToInt());
@@ -76,7 +76,7 @@
 
 
 void Assembler::emit_runtime_entry(Address entry, RelocInfo::Mode rmode) {
-  ASSERT(RelocInfo::IsRuntimeEntry(rmode));
+  DCHECK(RelocInfo::IsRuntimeEntry(rmode));
   RecordRelocInfo(rmode);
   emitl(static_cast<uint32_t>(entry - isolate()->code_range()->start()));
 }
@@ -108,7 +108,7 @@
 
 
 void Assembler::emit_rex_64(Register rm_reg) {
-  ASSERT_EQ(rm_reg.code() & 0xf, rm_reg.code());
+  DCHECK_EQ(rm_reg.code() & 0xf, rm_reg.code());
   emit(0x48 | rm_reg.high_bit());
 }
 
@@ -196,7 +196,7 @@
                                       ICacheFlushMode icache_flush_mode) {
   Memory::int32_at(pc) = static_cast<int32_t>(target - pc - 4);
   if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
-    CPU::FlushICache(pc, sizeof(int32_t));
+    CpuFeatures::FlushICache(pc, sizeof(int32_t));
   }
 }
 
@@ -206,6 +206,11 @@
 }
 
 
+Address Assembler::break_address_from_return_address(Address pc) {
+  return pc - Assembler::kPatchDebugBreakSlotReturnOffset;
+}
+
+
 Handle<Object> Assembler::code_target_object_handle_at(Address pc) {
   return code_targets_[Memory::int32_at(pc)];
 }
@@ -224,28 +229,28 @@
   if (IsInternalReference(rmode_)) {
     // absolute code pointer inside code object moves with the code object.
     Memory::Address_at(pc_) += static_cast<int32_t>(delta);
-    if (flush_icache) CPU::FlushICache(pc_, sizeof(Address));
+    if (flush_icache) CpuFeatures::FlushICache(pc_, sizeof(Address));
   } else if (IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)) {
     Memory::int32_at(pc_) -= static_cast<int32_t>(delta);
-    if (flush_icache) CPU::FlushICache(pc_, sizeof(int32_t));
+    if (flush_icache) CpuFeatures::FlushICache(pc_, sizeof(int32_t));
   } else if (rmode_ == CODE_AGE_SEQUENCE) {
     if (*pc_ == kCallOpcode) {
       int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
       *p -= static_cast<int32_t>(delta);  // Relocate entry.
-      if (flush_icache) CPU::FlushICache(p, sizeof(uint32_t));
+      if (flush_icache) CpuFeatures::FlushICache(p, sizeof(uint32_t));
     }
   }
 }
 
 
 Address RelocInfo::target_address() {
-  ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
+  DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
   return Assembler::target_address_at(pc_, host_);
 }
 
 
 Address RelocInfo::target_address_address() {
-  ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
+  DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
                               || rmode_ == EMBEDDED_OBJECT
                               || rmode_ == EXTERNAL_REFERENCE);
   return reinterpret_cast<Address>(pc_);
@@ -270,7 +275,7 @@
 void RelocInfo::set_target_address(Address target,
                                    WriteBarrierMode write_barrier_mode,
                                    ICacheFlushMode icache_flush_mode) {
-  ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
+  DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
   Assembler::set_target_address_at(pc_, host_, target, icache_flush_mode);
   if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL &&
       IsCodeTarget(rmode_)) {
@@ -282,13 +287,13 @@
 
 
 Object* RelocInfo::target_object() {
-  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+  DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
   return Memory::Object_at(pc_);
 }
 
 
 Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
-  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+  DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
   if (rmode_ == EMBEDDED_OBJECT) {
     return Memory::Object_Handle_at(pc_);
   } else {
@@ -298,7 +303,7 @@
 
 
 Address RelocInfo::target_reference() {
-  ASSERT(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
+  DCHECK(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
   return Memory::Address_at(pc_);
 }
 
@@ -306,11 +311,10 @@
 void RelocInfo::set_target_object(Object* target,
                                   WriteBarrierMode write_barrier_mode,
                                   ICacheFlushMode icache_flush_mode) {
-  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
-  ASSERT(!target->IsConsString());
+  DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
   Memory::Object_at(pc_) = target;
   if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
-    CPU::FlushICache(pc_, sizeof(Address));
+    CpuFeatures::FlushICache(pc_, sizeof(Address));
   }
   if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
       host() != NULL &&
@@ -322,7 +326,7 @@
 
 
 Address RelocInfo::target_runtime_entry(Assembler* origin) {
-  ASSERT(IsRuntimeEntry(rmode_));
+  DCHECK(IsRuntimeEntry(rmode_));
   return origin->runtime_entry_at(pc_);
 }
 
@@ -330,7 +334,7 @@
 void RelocInfo::set_target_runtime_entry(Address target,
                                          WriteBarrierMode write_barrier_mode,
                                          ICacheFlushMode icache_flush_mode) {
-  ASSERT(IsRuntimeEntry(rmode_));
+  DCHECK(IsRuntimeEntry(rmode_));
   if (target_address() != target) {
     set_target_address(target, write_barrier_mode, icache_flush_mode);
   }
@@ -338,14 +342,14 @@
 
 
 Handle<Cell> RelocInfo::target_cell_handle() {
-  ASSERT(rmode_ == RelocInfo::CELL);
+  DCHECK(rmode_ == RelocInfo::CELL);
   Address address = Memory::Address_at(pc_);
   return Handle<Cell>(reinterpret_cast<Cell**>(address));
 }
 
 
 Cell* RelocInfo::target_cell() {
-  ASSERT(rmode_ == RelocInfo::CELL);
+  DCHECK(rmode_ == RelocInfo::CELL);
   return Cell::FromValueAddress(Memory::Address_at(pc_));
 }
 
@@ -353,11 +357,11 @@
 void RelocInfo::set_target_cell(Cell* cell,
                                 WriteBarrierMode write_barrier_mode,
                                 ICacheFlushMode icache_flush_mode) {
-  ASSERT(rmode_ == RelocInfo::CELL);
+  DCHECK(rmode_ == RelocInfo::CELL);
   Address address = cell->address() + Cell::kValueOffset;
   Memory::Address_at(pc_) = address;
   if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
-    CPU::FlushICache(pc_, sizeof(Address));
+    CpuFeatures::FlushICache(pc_, sizeof(Address));
   }
   if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
       host() != NULL) {
@@ -399,15 +403,15 @@
 
 
 Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
-  ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
-  ASSERT(*pc_ == kCallOpcode);
+  DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+  DCHECK(*pc_ == kCallOpcode);
   return origin->code_target_object_handle_at(pc_ + 1);
 }
 
 
 Code* RelocInfo::code_age_stub() {
-  ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
-  ASSERT(*pc_ == kCallOpcode);
+  DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+  DCHECK(*pc_ == kCallOpcode);
   return Code::GetCodeFromTargetAddress(
       Assembler::target_address_at(pc_ + 1, host_));
 }
@@ -415,15 +419,15 @@
 
 void RelocInfo::set_code_age_stub(Code* stub,
                                   ICacheFlushMode icache_flush_mode) {
-  ASSERT(*pc_ == kCallOpcode);
-  ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+  DCHECK(*pc_ == kCallOpcode);
+  DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
   Assembler::set_target_address_at(pc_ + 1, host_, stub->instruction_start(),
                                    icache_flush_mode);
 }
 
 
 Address RelocInfo::call_address() {
-  ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+  DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
          (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
   return Memory::Address_at(
       pc_ + Assembler::kRealPatchReturnSequenceAddressOffset);
@@ -431,12 +435,12 @@
 
 
 void RelocInfo::set_call_address(Address target) {
-  ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+  DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
          (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
   Memory::Address_at(pc_ + Assembler::kRealPatchReturnSequenceAddressOffset) =
       target;
-  CPU::FlushICache(pc_ + Assembler::kRealPatchReturnSequenceAddressOffset,
-                   sizeof(Address));
+  CpuFeatures::FlushICache(
+      pc_ + Assembler::kRealPatchReturnSequenceAddressOffset, sizeof(Address));
   if (host() != NULL) {
     Object* target_code = Code::GetCodeFromTargetAddress(target);
     host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
@@ -456,7 +460,7 @@
 
 
 Object** RelocInfo::call_object_address() {
-  ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+  DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
          (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
   return reinterpret_cast<Object**>(
       pc_ + Assembler::kPatchReturnSequenceAddressOffset);
@@ -467,14 +471,14 @@
   RelocInfo::Mode mode = rmode();
   if (mode == RelocInfo::EMBEDDED_OBJECT) {
     visitor->VisitEmbeddedPointer(this);
-    CPU::FlushICache(pc_, sizeof(Address));
+    CpuFeatures::FlushICache(pc_, sizeof(Address));
   } else if (RelocInfo::IsCodeTarget(mode)) {
     visitor->VisitCodeTarget(this);
   } else if (mode == RelocInfo::CELL) {
     visitor->VisitCell(this);
   } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
     visitor->VisitExternalReference(this);
-    CPU::FlushICache(pc_, sizeof(Address));
+    CpuFeatures::FlushICache(pc_, sizeof(Address));
   } else if (RelocInfo::IsCodeAgeSequence(mode)) {
     visitor->VisitCodeAgeSequence(this);
   } else if (((RelocInfo::IsJSReturn(mode) &&
@@ -494,14 +498,14 @@
   RelocInfo::Mode mode = rmode();
   if (mode == RelocInfo::EMBEDDED_OBJECT) {
     StaticVisitor::VisitEmbeddedPointer(heap, this);
-    CPU::FlushICache(pc_, sizeof(Address));
+    CpuFeatures::FlushICache(pc_, sizeof(Address));
   } else if (RelocInfo::IsCodeTarget(mode)) {
     StaticVisitor::VisitCodeTarget(heap, this);
   } else if (mode == RelocInfo::CELL) {
     StaticVisitor::VisitCell(heap, this);
   } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
     StaticVisitor::VisitExternalReference(this);
-    CPU::FlushICache(pc_, sizeof(Address));
+    CpuFeatures::FlushICache(pc_, sizeof(Address));
   } else if (RelocInfo::IsCodeAgeSequence(mode)) {
     StaticVisitor::VisitCodeAgeSequence(heap, this);
   } else if (heap->isolate()->debug()->has_break_points() &&
@@ -520,7 +524,7 @@
 // Implementation of Operand
 
 void Operand::set_modrm(int mod, Register rm_reg) {
-  ASSERT(is_uint2(mod));
+  DCHECK(is_uint2(mod));
   buf_[0] = mod << 6 | rm_reg.low_bits();
   // Set REX.B to the high bit of rm.code().
   rex_ |= rm_reg.high_bit();
@@ -528,26 +532,26 @@
 
 
 void Operand::set_sib(ScaleFactor scale, Register index, Register base) {
-  ASSERT(len_ == 1);
-  ASSERT(is_uint2(scale));
+  DCHECK(len_ == 1);
+  DCHECK(is_uint2(scale));
   // Use SIB with no index register only for base rsp or r12. Otherwise we
   // would skip the SIB byte entirely.
-  ASSERT(!index.is(rsp) || base.is(rsp) || base.is(r12));
+  DCHECK(!index.is(rsp) || base.is(rsp) || base.is(r12));
   buf_[1] = (scale << 6) | (index.low_bits() << 3) | base.low_bits();
   rex_ |= index.high_bit() << 1 | base.high_bit();
   len_ = 2;
 }
 
 void Operand::set_disp8(int disp) {
-  ASSERT(is_int8(disp));
-  ASSERT(len_ == 1 || len_ == 2);
+  DCHECK(is_int8(disp));
+  DCHECK(len_ == 1 || len_ == 2);
   int8_t* p = reinterpret_cast<int8_t*>(&buf_[len_]);
   *p = disp;
   len_ += sizeof(int8_t);
 }
 
 void Operand::set_disp32(int disp) {
-  ASSERT(len_ == 1 || len_ == 2);
+  DCHECK(len_ == 1 || len_ == 2);
   int32_t* p = reinterpret_cast<int32_t*>(&buf_[len_]);
   *p = disp;
   len_ += sizeof(int32_t);
diff --git a/src/x64/assembler-x64.cc b/src/x64/assembler-x64.cc
index 59b027f..4f8d5b1 100644
--- a/src/x64/assembler-x64.cc
+++ b/src/x64/assembler-x64.cc
@@ -6,6 +6,7 @@
 
 #if V8_TARGET_ARCH_X64
 
+#include "src/base/bits.h"
 #include "src/macro-assembler.h"
 #include "src/serialize.h"
 
@@ -16,7 +17,7 @@
 // Implementation of CpuFeatures
 
 void CpuFeatures::ProbeImpl(bool cross_compile) {
-  CPU cpu;
+  base::CPU cpu;
   CHECK(cpu.has_sse2());  // SSE2 support is mandatory.
   CHECK(cpu.has_cmov());  // CMOV support is mandatory.
 
@@ -57,7 +58,7 @@
   patcher.masm()->call(kScratchRegister);
 
   // Check that the size of the code generated is as expected.
-  ASSERT_EQ(Assembler::kCallSequenceLength,
+  DCHECK_EQ(Assembler::kCallSequenceLength,
             patcher.masm()->SizeOfCodeGeneratedSince(&check_codesize));
 
   // Add the requested number of int3 instructions after the call.
@@ -74,7 +75,7 @@
   }
 
   // Indicate that code has changed.
-  CPU::FlushICache(pc_, instruction_count);
+  CpuFeatures::FlushICache(pc_, instruction_count);
 }
 
 
@@ -118,7 +119,7 @@
                  Register index,
                  ScaleFactor scale,
                  int32_t disp) : rex_(0) {
-  ASSERT(!index.is(rsp));
+  DCHECK(!index.is(rsp));
   len_ = 1;
   set_sib(scale, index, base);
   if (disp == 0 && !base.is(rbp) && !base.is(r13)) {
@@ -138,7 +139,7 @@
 Operand::Operand(Register index,
                  ScaleFactor scale,
                  int32_t disp) : rex_(0) {
-  ASSERT(!index.is(rsp));
+  DCHECK(!index.is(rsp));
   len_ = 1;
   set_modrm(0, rsp);
   set_sib(scale, index, rbp);
@@ -147,10 +148,10 @@
 
 
 Operand::Operand(const Operand& operand, int32_t offset) {
-  ASSERT(operand.len_ >= 1);
+  DCHECK(operand.len_ >= 1);
   // Operand encodes REX ModR/M [SIB] [Disp].
   byte modrm = operand.buf_[0];
-  ASSERT(modrm < 0xC0);  // Disallow mode 3 (register target).
+  DCHECK(modrm < 0xC0);  // Disallow mode 3 (register target).
   bool has_sib = ((modrm & 0x07) == 0x04);
   byte mode = modrm & 0xC0;
   int disp_offset = has_sib ? 2 : 1;
@@ -161,14 +162,14 @@
   int32_t disp_value = 0;
   if (mode == 0x80 || is_baseless) {
     // Mode 2 or mode 0 with rbp/r13 as base: Word displacement.
-    disp_value = *BitCast<const int32_t*>(&operand.buf_[disp_offset]);
+    disp_value = *bit_cast<const int32_t*>(&operand.buf_[disp_offset]);
   } else if (mode == 0x40) {
     // Mode 1: Byte displacement.
     disp_value = static_cast<signed char>(operand.buf_[disp_offset]);
   }
 
   // Write new operand with same registers, but with modified displacement.
-  ASSERT(offset >= 0 ? disp_value + offset > disp_value
+  DCHECK(offset >= 0 ? disp_value + offset > disp_value
                      : disp_value + offset < disp_value);  // No overflow.
   disp_value += offset;
   rex_ = operand.rex_;
@@ -195,7 +196,7 @@
 
 bool Operand::AddressUsesRegister(Register reg) const {
   int code = reg.code();
-  ASSERT((buf_[0] & 0xC0) != 0xC0);  // Always a memory operand.
+  DCHECK((buf_[0] & 0xC0) != 0xC0);  // Always a memory operand.
   // Start with only low three bits of base register. Initial decoding doesn't
   // distinguish on the REX.B bit.
   int base_code = buf_[0] & 0x07;
@@ -252,12 +253,12 @@
 void Assembler::GetCode(CodeDesc* desc) {
   // Finalize code (at this point overflow() may be true, but the gap ensures
   // that we are still not overlapping instructions and relocation info).
-  ASSERT(pc_ <= reloc_info_writer.pos());  // No overlap.
+  DCHECK(pc_ <= reloc_info_writer.pos());  // No overlap.
   // Set up code descriptor.
   desc->buffer = buffer_;
   desc->buffer_size = buffer_size_;
   desc->instr_size = pc_offset();
-  ASSERT(desc->instr_size > 0);  // Zero-size code objects upset the system.
+  DCHECK(desc->instr_size > 0);  // Zero-size code objects upset the system.
   desc->reloc_size =
       static_cast<int>((buffer_ + buffer_size_) - reloc_info_writer.pos());
   desc->origin = this;
@@ -265,7 +266,7 @@
 
 
 void Assembler::Align(int m) {
-  ASSERT(IsPowerOf2(m));
+  DCHECK(base::bits::IsPowerOfTwo32(m));
   int delta = (m - (pc_offset() & (m - 1))) & (m - 1);
   Nop(delta);
 }
@@ -286,8 +287,8 @@
 
 
 void Assembler::bind_to(Label* L, int pos) {
-  ASSERT(!L->is_bound());  // Label may only be bound once.
-  ASSERT(0 <= pos && pos <= pc_offset());  // Position must be valid.
+  DCHECK(!L->is_bound());  // Label may only be bound once.
+  DCHECK(0 <= pos && pos <= pc_offset());  // Position must be valid.
   if (L->is_linked()) {
     int current = L->pos();
     int next = long_at(current);
@@ -306,7 +307,7 @@
     int fixup_pos = L->near_link_pos();
     int offset_to_next =
         static_cast<int>(*reinterpret_cast<int8_t*>(addr_at(fixup_pos)));
-    ASSERT(offset_to_next <= 0);
+    DCHECK(offset_to_next <= 0);
     int disp = pos - (fixup_pos + sizeof(int8_t));
     CHECK(is_int8(disp));
     set_byte_at(fixup_pos, disp);
@@ -326,16 +327,13 @@
 
 
 void Assembler::GrowBuffer() {
-  ASSERT(buffer_overflow());
+  DCHECK(buffer_overflow());
   if (!own_buffer_) FATAL("external code buffer is too small");
 
   // Compute new buffer size.
   CodeDesc desc;  // the new buffer
-  if (buffer_size_ < 4*KB) {
-    desc.buffer_size = 4*KB;
-  } else {
-    desc.buffer_size = 2*buffer_size_;
-  }
+  desc.buffer_size = 2 * buffer_size_;
+
   // Some internal data structures overflow for very large buffers,
   // they must ensure that kMaximalBufferSize is not too large.
   if ((desc.buffer_size > kMaximalBufferSize) ||
@@ -364,13 +362,7 @@
           desc.reloc_size);
 
   // Switch buffers.
-  if (isolate() != NULL &&
-      isolate()->assembler_spare_buffer() == NULL &&
-      buffer_size_ == kMinimalBufferSize) {
-    isolate()->set_assembler_spare_buffer(buffer_);
-  } else {
-    DeleteArray(buffer_);
-  }
+  DeleteArray(buffer_);
   buffer_ = desc.buffer;
   buffer_size_ = desc.buffer_size;
   pc_ += pc_delta;
@@ -388,17 +380,17 @@
     }
   }
 
-  ASSERT(!buffer_overflow());
+  DCHECK(!buffer_overflow());
 }
 
 
 void Assembler::emit_operand(int code, const Operand& adr) {
-  ASSERT(is_uint3(code));
+  DCHECK(is_uint3(code));
   const unsigned length = adr.len_;
-  ASSERT(length > 0);
+  DCHECK(length > 0);
 
   // Emit updated ModR/M byte containing the given register.
-  ASSERT((adr.buf_[0] & 0x38) == 0);
+  DCHECK((adr.buf_[0] & 0x38) == 0);
   pc_[0] = adr.buf_[0] | code << 3;
 
   // Emit the rest of the encoded operand.
@@ -425,7 +417,7 @@
                               Register rm_reg,
                               int size) {
   EnsureSpace ensure_space(this);
-  ASSERT((opcode & 0xC6) == 2);
+  DCHECK((opcode & 0xC6) == 2);
   if (rm_reg.low_bits() == 4)  {  // Forces SIB byte.
     // Swap reg and rm_reg and change opcode operand order.
     emit_rex(rm_reg, reg, size);
@@ -441,7 +433,7 @@
 
 void Assembler::arithmetic_op_16(byte opcode, Register reg, Register rm_reg) {
   EnsureSpace ensure_space(this);
-  ASSERT((opcode & 0xC6) == 2);
+  DCHECK((opcode & 0xC6) == 2);
   if (rm_reg.low_bits() == 4) {  // Forces SIB byte.
     // Swap reg and rm_reg and change opcode operand order.
     emit(0x66);
@@ -481,7 +473,7 @@
 
 void Assembler::arithmetic_op_8(byte opcode, Register reg, Register rm_reg) {
   EnsureSpace ensure_space(this);
-  ASSERT((opcode & 0xC6) == 2);
+  DCHECK((opcode & 0xC6) == 2);
   if (rm_reg.low_bits() == 4)  {  // Forces SIB byte.
     // Swap reg and rm_reg and change opcode operand order.
     if (!rm_reg.is_byte_register() || !reg.is_byte_register()) {
@@ -583,7 +575,7 @@
                                           Immediate src) {
   EnsureSpace ensure_space(this);
   emit_optional_rex_32(dst);
-  ASSERT(is_int8(src.value_) || is_uint8(src.value_));
+  DCHECK(is_int8(src.value_) || is_uint8(src.value_));
   emit(0x80);
   emit_operand(subcode, dst);
   emit(src.value_);
@@ -598,7 +590,7 @@
     // Register is not one of al, bl, cl, dl.  Its encoding needs REX.
     emit_rex_32(dst);
   }
-  ASSERT(is_int8(src.value_) || is_uint8(src.value_));
+  DCHECK(is_int8(src.value_) || is_uint8(src.value_));
   emit(0x80);
   emit_modrm(subcode, dst);
   emit(src.value_);
@@ -610,7 +602,7 @@
                       int subcode,
                       int size) {
   EnsureSpace ensure_space(this);
-  ASSERT(size == kInt64Size ? is_uint6(shift_amount.value_)
+  DCHECK(size == kInt64Size ? is_uint6(shift_amount.value_)
                             : is_uint5(shift_amount.value_));
   if (shift_amount.value_ == 1) {
     emit_rex(dst, size);
@@ -667,13 +659,13 @@
   emit(0xE8);
   if (L->is_bound()) {
     int offset = L->pos() - pc_offset() - sizeof(int32_t);
-    ASSERT(offset <= 0);
+    DCHECK(offset <= 0);
     emitl(offset);
   } else if (L->is_linked()) {
     emitl(L->pos());
     L->link_to(pc_offset() - sizeof(int32_t));
   } else {
-    ASSERT(L->is_unused());
+    DCHECK(L->is_unused());
     int32_t current = pc_offset();
     emitl(current);
     L->link_to(current);
@@ -682,7 +674,7 @@
 
 
 void Assembler::call(Address entry, RelocInfo::Mode rmode) {
-  ASSERT(RelocInfo::IsRuntimeEntry(rmode));
+  DCHECK(RelocInfo::IsRuntimeEntry(rmode));
   positions_recorder()->WriteRecordedPositions();
   EnsureSpace ensure_space(this);
   // 1110 1000 #32-bit disp.
@@ -733,7 +725,7 @@
   emit(0xE8);
   Address source = pc_ + 4;
   intptr_t displacement = target - source;
-  ASSERT(is_int32(displacement));
+  DCHECK(is_int32(displacement));
   emitl(static_cast<int32_t>(displacement));
 }
 
@@ -764,7 +756,7 @@
   }
   // No need to check CpuInfo for CMOV support, it's a required part of the
   // 64-bit architecture.
-  ASSERT(cc >= 0);  // Use mov for unconditional moves.
+  DCHECK(cc >= 0);  // Use mov for unconditional moves.
   EnsureSpace ensure_space(this);
   // Opcode: REX.W 0f 40 + cc /r.
   emit_rex_64(dst, src);
@@ -780,7 +772,7 @@
   } else if (cc == never) {
     return;
   }
-  ASSERT(cc >= 0);
+  DCHECK(cc >= 0);
   EnsureSpace ensure_space(this);
   // Opcode: REX.W 0f 40 + cc /r.
   emit_rex_64(dst, src);
@@ -796,7 +788,7 @@
   } else if (cc == never) {
     return;
   }
-  ASSERT(cc >= 0);
+  DCHECK(cc >= 0);
   EnsureSpace ensure_space(this);
   // Opcode: 0f 40 + cc /r.
   emit_optional_rex_32(dst, src);
@@ -812,7 +804,7 @@
   } else if (cc == never) {
     return;
   }
-  ASSERT(cc >= 0);
+  DCHECK(cc >= 0);
   EnsureSpace ensure_space(this);
   // Opcode: 0f 40 + cc /r.
   emit_optional_rex_32(dst, src);
@@ -823,7 +815,7 @@
 
 
 void Assembler::cmpb_al(Immediate imm8) {
-  ASSERT(is_int8(imm8.value_) || is_uint8(imm8.value_));
+  DCHECK(is_int8(imm8.value_) || is_uint8(imm8.value_));
   EnsureSpace ensure_space(this);
   emit(0x3c);
   emit(imm8.value_);
@@ -901,6 +893,14 @@
 }
 
 
+void Assembler::emit_div(Register src, int size) {
+  EnsureSpace ensure_space(this);
+  emit_rex(src, size);
+  emit(0xF7);
+  emit_modrm(0x6, src);
+}
+
+
 void Assembler::emit_imul(Register src, int size) {
   EnsureSpace ensure_space(this);
   emit_rex(src, size);
@@ -972,12 +972,12 @@
     return;
   }
   EnsureSpace ensure_space(this);
-  ASSERT(is_uint4(cc));
+  DCHECK(is_uint4(cc));
   if (L->is_bound()) {
     const int short_size = 2;
     const int long_size  = 6;
     int offs = L->pos() - pc_offset();
-    ASSERT(offs <= 0);
+    DCHECK(offs <= 0);
     // Determine whether we can use 1-byte offsets for backwards branches,
     // which have a max range of 128 bytes.
 
@@ -1003,7 +1003,7 @@
     byte disp = 0x00;
     if (L->is_near_linked()) {
       int offset = L->near_link_pos() - pc_offset();
-      ASSERT(is_int8(offset));
+      DCHECK(is_int8(offset));
       disp = static_cast<byte>(offset & 0xFF);
     }
     L->link_to(pc_offset(), Label::kNear);
@@ -1015,7 +1015,7 @@
     emitl(L->pos());
     L->link_to(pc_offset() - sizeof(int32_t));
   } else {
-    ASSERT(L->is_unused());
+    DCHECK(L->is_unused());
     emit(0x0F);
     emit(0x80 | cc);
     int32_t current = pc_offset();
@@ -1026,9 +1026,9 @@
 
 
 void Assembler::j(Condition cc, Address entry, RelocInfo::Mode rmode) {
-  ASSERT(RelocInfo::IsRuntimeEntry(rmode));
+  DCHECK(RelocInfo::IsRuntimeEntry(rmode));
   EnsureSpace ensure_space(this);
-  ASSERT(is_uint4(cc));
+  DCHECK(is_uint4(cc));
   emit(0x0F);
   emit(0x80 | cc);
   emit_runtime_entry(entry, rmode);
@@ -1039,7 +1039,7 @@
                   Handle<Code> target,
                   RelocInfo::Mode rmode) {
   EnsureSpace ensure_space(this);
-  ASSERT(is_uint4(cc));
+  DCHECK(is_uint4(cc));
   // 0000 1111 1000 tttn #32-bit disp.
   emit(0x0F);
   emit(0x80 | cc);
@@ -1053,7 +1053,7 @@
   const int long_size = sizeof(int32_t);
   if (L->is_bound()) {
     int offs = L->pos() - pc_offset() - 1;
-    ASSERT(offs <= 0);
+    DCHECK(offs <= 0);
     if (is_int8(offs - short_size) && !predictable_code_size()) {
       // 1110 1011 #8-bit disp.
       emit(0xEB);
@@ -1068,7 +1068,7 @@
     byte disp = 0x00;
     if (L->is_near_linked()) {
       int offset = L->near_link_pos() - pc_offset();
-      ASSERT(is_int8(offset));
+      DCHECK(is_int8(offset));
       disp = static_cast<byte>(offset & 0xFF);
     }
     L->link_to(pc_offset(), Label::kNear);
@@ -1080,7 +1080,7 @@
     L->link_to(pc_offset() - long_size);
   } else {
     // 1110 1001 #32-bit disp.
-    ASSERT(L->is_unused());
+    DCHECK(L->is_unused());
     emit(0xE9);
     int32_t current = pc_offset();
     emitl(current);
@@ -1098,9 +1098,9 @@
 
 
 void Assembler::jmp(Address entry, RelocInfo::Mode rmode) {
-  ASSERT(RelocInfo::IsRuntimeEntry(rmode));
+  DCHECK(RelocInfo::IsRuntimeEntry(rmode));
   EnsureSpace ensure_space(this);
-  ASSERT(RelocInfo::IsRuntimeEntry(rmode));
+  DCHECK(RelocInfo::IsRuntimeEntry(rmode));
   emit(0xE9);
   emit_runtime_entry(entry, rmode);
 }
@@ -1139,7 +1139,7 @@
     emit(0xA1);
     emitp(value, mode);
   } else {
-    ASSERT(kPointerSize == kInt32Size);
+    DCHECK(kPointerSize == kInt32Size);
     emit(0xA1);
     emitp(value, mode);
     // In 64-bit mode, need to zero extend the operand to 8 bytes.
@@ -1177,6 +1177,7 @@
 void Assembler::movb(Register dst, Immediate imm) {
   EnsureSpace ensure_space(this);
   if (!dst.is_byte_register()) {
+    // Register is not one of al, bl, cl, dl.  Its encoding needs REX.
     emit_rex_32(dst);
   }
   emit(0xB0 + dst.low_bits());
@@ -1187,6 +1188,7 @@
 void Assembler::movb(const Operand& dst, Register src) {
   EnsureSpace ensure_space(this);
   if (!src.is_byte_register()) {
+    // Register is not one of al, bl, cl, dl.  Its encoding needs REX.
     emit_rex_32(src, dst);
   } else {
     emit_optional_rex_32(src, dst);
@@ -1271,7 +1273,7 @@
     emit(0xC7);
     emit_modrm(0x0, dst);
   } else {
-    ASSERT(size == kInt32Size);
+    DCHECK(size == kInt32Size);
     emit(0xB8 + dst.low_bits());
   }
   emit(value);
@@ -1317,13 +1319,13 @@
   emit_operand(0, dst);
   if (src->is_bound()) {
     int offset = src->pos() - pc_offset() - sizeof(int32_t);
-    ASSERT(offset <= 0);
+    DCHECK(offset <= 0);
     emitl(offset);
   } else if (src->is_linked()) {
     emitl(src->pos());
     src->link_to(pc_offset() - sizeof(int32_t));
   } else {
-    ASSERT(src->is_unused());
+    DCHECK(src->is_unused());
     int32_t current = pc_offset();
     emitl(current);
     src->link_to(current);
@@ -1394,6 +1396,22 @@
 }
 
 
+void Assembler::emit_movzxb(Register dst, Register src, int size) {
+  EnsureSpace ensure_space(this);
+  // 32 bit operations zero the top 32 bits of 64 bit registers.  Therefore
+  // there is no need to make this a 64 bit operation.
+  if (!src.is_byte_register()) {
+    // Register is not one of al, bl, cl, dl.  Its encoding needs REX.
+    emit_rex_32(dst, src);
+  } else {
+    emit_optional_rex_32(dst, src);
+  }
+  emit(0x0F);
+  emit(0xB6);
+  emit_modrm(dst, src);
+}
+
+
 void Assembler::emit_movzxw(Register dst, const Operand& src, int size) {
   EnsureSpace ensure_space(this);
   // 32 bit operations zero the top 32 bits of 64 bit registers.  Therefore
@@ -1625,7 +1643,7 @@
 
 void Assembler::ret(int imm16) {
   EnsureSpace ensure_space(this);
-  ASSERT(is_uint16(imm16));
+  DCHECK(is_uint16(imm16));
   if (imm16 == 0) {
     emit(0xC3);
   } else {
@@ -1642,8 +1660,9 @@
     return;
   }
   EnsureSpace ensure_space(this);
-  ASSERT(is_uint4(cc));
-  if (!reg.is_byte_register()) {  // Use x64 byte registers, where different.
+  DCHECK(is_uint4(cc));
+  if (!reg.is_byte_register()) {
+    // Register is not one of al, bl, cl, dl.  Its encoding needs REX.
     emit_rex_32(reg);
   }
   emit(0x0F);
@@ -1688,6 +1707,14 @@
 }
 
 
+void Assembler::emit_xchg(Register dst, const Operand& src, int size) {
+  EnsureSpace ensure_space(this);
+  emit_rex(dst, src, size);
+  emit(0x87);
+  emit_operand(dst, src);
+}
+
+
 void Assembler::store_rax(void* dst, RelocInfo::Mode mode) {
   EnsureSpace ensure_space(this);
   if (kPointerSize == kInt64Size) {
@@ -1695,7 +1722,7 @@
     emit(0xA3);
     emitp(dst, mode);
   } else {
-    ASSERT(kPointerSize == kInt32Size);
+    DCHECK(kPointerSize == kInt32Size);
     emit(0xA3);
     emitp(dst, mode);
     // In 64-bit mode, need to zero extend the operand to 8 bytes.
@@ -1729,7 +1756,7 @@
 
 
 void Assembler::testb(Register reg, Immediate mask) {
-  ASSERT(is_int8(mask.value_) || is_uint8(mask.value_));
+  DCHECK(is_int8(mask.value_) || is_uint8(mask.value_));
   EnsureSpace ensure_space(this);
   if (reg.is(rax)) {
     emit(0xA8);
@@ -1747,7 +1774,7 @@
 
 
 void Assembler::testb(const Operand& op, Immediate mask) {
-  ASSERT(is_int8(mask.value_) || is_uint8(mask.value_));
+  DCHECK(is_int8(mask.value_) || is_uint8(mask.value_));
   EnsureSpace ensure_space(this);
   emit_optional_rex_32(rax, op);
   emit(0xF6);
@@ -1895,7 +1922,7 @@
 
 
 void Assembler::fstp(int index) {
-  ASSERT(is_uint3(index));
+  DCHECK(is_uint3(index));
   EnsureSpace ensure_space(this);
   emit_farith(0xDD, 0xD8, index);
 }
@@ -1926,7 +1953,7 @@
 
 
 void Assembler::fisttp_s(const Operand& adr) {
-  ASSERT(IsEnabled(SSE3));
+  DCHECK(IsEnabled(SSE3));
   EnsureSpace ensure_space(this);
   emit_optional_rex_32(adr);
   emit(0xDB);
@@ -1935,7 +1962,7 @@
 
 
 void Assembler::fisttp_d(const Operand& adr) {
-  ASSERT(IsEnabled(SSE3));
+  DCHECK(IsEnabled(SSE3));
   EnsureSpace ensure_space(this);
   emit_optional_rex_32(adr);
   emit(0xDD);
@@ -2188,15 +2215,15 @@
 void Assembler::sahf() {
   // TODO(X64): Test for presence. Not all 64-bit intel CPU's have sahf
   // in 64-bit mode. Test CpuID.
-  ASSERT(IsEnabled(SAHF));
+  DCHECK(IsEnabled(SAHF));
   EnsureSpace ensure_space(this);
   emit(0x9E);
 }
 
 
 void Assembler::emit_farith(int b1, int b2, int i) {
-  ASSERT(is_uint8(b1) && is_uint8(b2));  // wrong opcode
-  ASSERT(is_uint3(i));  // illegal stack offset
+  DCHECK(is_uint8(b1) && is_uint8(b2));  // wrong opcode
+  DCHECK(is_uint3(i));  // illegal stack offset
   emit(b1);
   emit(b2 + i);
 }
@@ -2432,8 +2459,8 @@
 
 
 void Assembler::extractps(Register dst, XMMRegister src, byte imm8) {
-  ASSERT(IsEnabled(SSE4_1));
-  ASSERT(is_uint8(imm8));
+  DCHECK(IsEnabled(SSE4_1));
+  DCHECK(is_uint8(imm8));
   EnsureSpace ensure_space(this);
   emit(0x66);
   emit_optional_rex_32(src, dst);
@@ -2493,7 +2520,7 @@
 
 
 void Assembler::shufps(XMMRegister dst, XMMRegister src, byte imm8) {
-  ASSERT(is_uint8(imm8));
+  DCHECK(is_uint8(imm8));
   EnsureSpace ensure_space(this);
   emit_optional_rex_32(src, dst);
   emit(0x0F);
@@ -2602,6 +2629,16 @@
 }
 
 
+void Assembler::cvttsd2siq(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  emit(0xF2);
+  emit_rex_64(dst, src);
+  emit(0x0F);
+  emit(0x2C);
+  emit_sse_operand(dst, src);
+}
+
+
 void Assembler::cvtlsi2sd(XMMRegister dst, const Operand& src) {
   EnsureSpace ensure_space(this);
   emit(0xF2);
@@ -2835,7 +2872,7 @@
 
 void Assembler::roundsd(XMMRegister dst, XMMRegister src,
                         Assembler::RoundingMode mode) {
-  ASSERT(IsEnabled(SSE4_1));
+  DCHECK(IsEnabled(SSE4_1));
   EnsureSpace ensure_space(this);
   emit(0x66);
   emit_optional_rex_32(dst, src);
@@ -2873,6 +2910,12 @@
 }
 
 
+void Assembler::emit_sse_operand(Register reg, const Operand& adr) {
+  Register ireg = {reg.code()};
+  emit_operand(ireg, adr);
+}
+
+
 void Assembler::emit_sse_operand(XMMRegister dst, XMMRegister src) {
   emit(0xC0 | (dst.low_bits() << 3) | src.low_bits());
 }
@@ -2903,7 +2946,7 @@
 // Relocation information implementations.
 
 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
-  ASSERT(!RelocInfo::IsNone(rmode));
+  DCHECK(!RelocInfo::IsNone(rmode));
   // Don't record external references unless the heap will be serialized.
   if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
       !serializer_enabled() && !emit_debug_code()) {
@@ -2941,14 +2984,14 @@
 
 Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
   // No out-of-line constant pool support.
-  ASSERT(!FLAG_enable_ool_constant_pool);
+  DCHECK(!FLAG_enable_ool_constant_pool);
   return isolate->factory()->empty_constant_pool_array();
 }
 
 
 void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
   // No out-of-line constant pool support.
-  ASSERT(!FLAG_enable_ool_constant_pool);
+  DCHECK(!FLAG_enable_ool_constant_pool);
   return;
 }
 
diff --git a/src/x64/assembler-x64.h b/src/x64/assembler-x64.h
index 4259e9b..b2a97cc 100644
--- a/src/x64/assembler-x64.h
+++ b/src/x64/assembler-x64.h
@@ -84,13 +84,13 @@
   }
 
   static Register FromAllocationIndex(int index) {
-    ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
+    DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
     Register result = { kRegisterCodeByAllocationIndex[index] };
     return result;
   }
 
   static const char* AllocationIndexToString(int index) {
-    ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
+    DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
     const char* const names[] = {
       "rax",
       "rbx",
@@ -116,7 +116,7 @@
   // rax, rbx, rcx and rdx are byte registers, the rest are not.
   bool is_byte_register() const { return code_ <= 3; }
   int code() const {
-    ASSERT(is_valid());
+    DCHECK(is_valid());
     return code_;
   }
   int bit() const {
@@ -201,18 +201,18 @@
   }
 
   static int ToAllocationIndex(XMMRegister reg) {
-    ASSERT(reg.code() != 0);
+    DCHECK(reg.code() != 0);
     return reg.code() - 1;
   }
 
   static XMMRegister FromAllocationIndex(int index) {
-    ASSERT(0 <= index && index < kMaxNumAllocatableRegisters);
+    DCHECK(0 <= index && index < kMaxNumAllocatableRegisters);
     XMMRegister result = { index + 1 };
     return result;
   }
 
   static const char* AllocationIndexToString(int index) {
-    ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
+    DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
     const char* const names[] = {
       "xmm1",
       "xmm2",
@@ -234,15 +234,15 @@
   }
 
   static XMMRegister from_code(int code) {
-    ASSERT(code >= 0);
-    ASSERT(code < kMaxNumRegisters);
+    DCHECK(code >= 0);
+    DCHECK(code < kMaxNumRegisters);
     XMMRegister r = { code };
     return r;
   }
   bool is_valid() const { return 0 <= code_ && code_ < kMaxNumRegisters; }
   bool is(XMMRegister reg) const { return code_ == reg.code_; }
   int code() const {
-    ASSERT(is_valid());
+    DCHECK(is_valid());
     return code_;
   }
 
@@ -358,7 +358,7 @@
  public:
   explicit Immediate(int32_t value) : value_(value) {}
   explicit Immediate(Smi* value) {
-    ASSERT(SmiValuesAre31Bits());  // Only available for 31-bit SMI.
+    DCHECK(SmiValuesAre31Bits());  // Only available for 31-bit SMI.
     value_ = static_cast<int32_t>(reinterpret_cast<intptr_t>(value));
   }
 
@@ -437,26 +437,27 @@
 };
 
 
-#define ASSEMBLER_INSTRUCTION_LIST(V)   \
-  V(add)                                \
-  V(and)                                \
-  V(cmp)                                \
-  V(dec)                                \
-  V(idiv)                               \
-  V(imul)                               \
-  V(inc)                                \
-  V(lea)                                \
-  V(mov)                                \
-  V(movzxb)                             \
-  V(movzxw)                             \
-  V(neg)                                \
-  V(not)                                \
-  V(or)                                 \
-  V(repmovs)                            \
-  V(sbb)                                \
-  V(sub)                                \
-  V(test)                               \
-  V(xchg)                               \
+#define ASSEMBLER_INSTRUCTION_LIST(V) \
+  V(add)                              \
+  V(and)                              \
+  V(cmp)                              \
+  V(dec)                              \
+  V(idiv)                             \
+  V(div)                              \
+  V(imul)                             \
+  V(inc)                              \
+  V(lea)                              \
+  V(mov)                              \
+  V(movzxb)                           \
+  V(movzxw)                           \
+  V(neg)                              \
+  V(not)                              \
+  V(or)                               \
+  V(repmovs)                          \
+  V(sbb)                              \
+  V(sub)                              \
+  V(test)                             \
+  V(xchg)                             \
   V(xor)
 
 
@@ -538,6 +539,9 @@
   // of that call in the instruction stream.
   static inline Address target_address_from_return_address(Address pc);
 
+  // Return the code target address of the patch debug break slot
+  inline static Address break_address_from_return_address(Address pc);
+
   // This sets the branch destination (which is in the instruction on x64).
   // This is for calls and branches within generated code.
   inline static void deserialization_set_special_target_at(
@@ -549,7 +553,7 @@
     if (kPointerSize == kInt64Size) {
       return RelocInfo::NONE64;
     } else {
-      ASSERT(kPointerSize == kInt32Size);
+      DCHECK(kPointerSize == kInt32Size);
       return RelocInfo::NONE32;
     }
   }
@@ -1044,6 +1048,7 @@
   void cvttsd2si(Register dst, const Operand& src);
   void cvttsd2si(Register dst, XMMRegister src);
   void cvttsd2siq(Register dst, XMMRegister src);
+  void cvttsd2siq(Register dst, const Operand& src);
 
   void cvtlsi2sd(XMMRegister dst, const Operand& src);
   void cvtlsi2sd(XMMRegister dst, Register src);
@@ -1257,7 +1262,7 @@
     if (size == kInt64Size) {
       emit_rex_64();
     } else {
-      ASSERT(size == kInt32Size);
+      DCHECK(size == kInt32Size);
     }
   }
 
@@ -1266,7 +1271,7 @@
     if (size == kInt64Size) {
       emit_rex_64(p1);
     } else {
-      ASSERT(size == kInt32Size);
+      DCHECK(size == kInt32Size);
       emit_optional_rex_32(p1);
     }
   }
@@ -1276,7 +1281,7 @@
     if (size == kInt64Size) {
       emit_rex_64(p1, p2);
     } else {
-      ASSERT(size == kInt32Size);
+      DCHECK(size == kInt32Size);
       emit_optional_rex_32(p1, p2);
     }
   }
@@ -1302,7 +1307,7 @@
   // Emit a ModR/M byte with an operation subcode in the reg field and
   // a register in the rm_reg field.
   void emit_modrm(int code, Register rm_reg) {
-    ASSERT(is_uint3(code));
+    DCHECK(is_uint3(code));
     emit(0xC0 | code << 3 | rm_reg.low_bits());
   }
 
@@ -1312,6 +1317,7 @@
   // The first argument is the reg field, the second argument is the r/m field.
   void emit_sse_operand(XMMRegister dst, XMMRegister src);
   void emit_sse_operand(XMMRegister reg, const Operand& adr);
+  void emit_sse_operand(Register reg, const Operand& adr);
   void emit_sse_operand(XMMRegister dst, Register src);
   void emit_sse_operand(Register dst, XMMRegister src);
 
@@ -1435,6 +1441,7 @@
   // Divide edx:eax by lower 32 bits of src.  Quotient in eax, remainder in edx
   // when size is 32.
   void emit_idiv(Register src, int size);
+  void emit_div(Register src, int size);
 
   // Signed multiply instructions.
   // rdx:rax = rax * src when size is 64 or edx:eax = eax * src when size is 32.
@@ -1455,6 +1462,7 @@
   void emit_mov(const Operand& dst, Immediate value, int size);
 
   void emit_movzxb(Register dst, const Operand& src, int size);
+  void emit_movzxb(Register dst, Register src, int size);
   void emit_movzxw(Register dst, const Operand& src, int size);
   void emit_movzxw(Register dst, Register src, int size);
 
@@ -1514,9 +1522,12 @@
   void emit_test(Register reg, Immediate mask, int size);
   void emit_test(const Operand& op, Register reg, int size);
   void emit_test(const Operand& op, Immediate mask, int size);
+  void emit_test(Register reg, const Operand& op, int size) {
+    return emit_test(op, reg, size);
+  }
 
-  // Exchange two registers
   void emit_xchg(Register dst, Register src, int size);
+  void emit_xchg(Register dst, const Operand& src, int size);
 
   void emit_xor(Register dst, Register src, int size) {
     if (size == kInt64Size && dst.code() == src.code()) {
@@ -1574,7 +1585,7 @@
 #ifdef DEBUG
   ~EnsureSpace() {
     int bytes_generated = space_before_ - assembler_->available_space();
-    ASSERT(bytes_generated < assembler_->kGap);
+    DCHECK(bytes_generated < assembler_->kGap);
   }
 #endif
 
diff --git a/src/x64/builtins-x64.cc b/src/x64/builtins-x64.cc
index fa359c5..194d8a6 100644
--- a/src/x64/builtins-x64.cc
+++ b/src/x64/builtins-x64.cc
@@ -6,10 +6,10 @@
 
 #if V8_TARGET_ARCH_X64
 
+#include "src/code-factory.h"
 #include "src/codegen.h"
 #include "src/deoptimizer.h"
 #include "src/full-codegen.h"
-#include "src/stub-cache.h"
 
 namespace v8 {
 namespace internal {
@@ -41,7 +41,7 @@
     __ Push(rdi);
     __ PushReturnAddressFrom(kScratchRegister);
   } else {
-    ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
+    DCHECK(extra_args == NO_EXTRA_ARGUMENTS);
   }
 
   // JumpToExternalReference expects rax to contain the number of arguments
@@ -91,7 +91,7 @@
   __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
   __ j(above_equal, &ok);
 
-  CallRuntimePassFunction(masm, Runtime::kHiddenTryInstallOptimizedCode);
+  CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode);
   GenerateTailCallToReturnedCode(masm);
 
   __ bind(&ok);
@@ -109,7 +109,7 @@
   // -----------------------------------
 
   // Should never create mementos for api functions.
-  ASSERT(!is_api_function || !create_memento);
+  DCHECK(!is_api_function || !create_memento);
 
   // Enter a construct frame.
   {
@@ -144,7 +144,7 @@
       // rdi: constructor
       __ movp(rax, FieldOperand(rdi, JSFunction::kPrototypeOrInitialMapOffset));
       // Will both indicate a NULL and a Smi
-      ASSERT(kSmiTag == 0);
+      DCHECK(kSmiTag == 0);
       __ JumpIfSmi(rax, &rt_call);
       // rdi: constructor
       // rax: initial map (if proven valid below)
@@ -180,7 +180,7 @@
         __ Push(rdi);
 
         __ Push(rdi);  // constructor
-        __ CallRuntime(Runtime::kHiddenFinalizeInstanceSize, 1);
+        __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
 
         __ Pop(rdi);
         __ Pop(rax);
@@ -360,9 +360,9 @@
     __ movp(rdi, Operand(rsp, offset));
     __ Push(rdi);
     if (create_memento) {
-      __ CallRuntime(Runtime::kHiddenNewObjectWithAllocationSite, 2);
+      __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 2);
     } else {
-      __ CallRuntime(Runtime::kHiddenNewObject, 1);
+      __ CallRuntime(Runtime::kNewObject, 1);
     }
     __ movp(rbx, rax);  // store result in rbx
 
@@ -609,8 +609,8 @@
 }
 
 
-void Builtins::Generate_CompileUnoptimized(MacroAssembler* masm) {
-  CallRuntimePassFunction(masm, Runtime::kHiddenCompileUnoptimized);
+void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+  CallRuntimePassFunction(masm, Runtime::kCompileLazy);
   GenerateTailCallToReturnedCode(masm);
 }
 
@@ -625,7 +625,7 @@
   // Whether to compile in a background thread.
   __ Push(masm->isolate()->factory()->ToBoolean(concurrent));
 
-  __ CallRuntime(Runtime::kHiddenCompileOptimized, 2);
+  __ CallRuntime(Runtime::kCompileOptimized, 2);
   // Restore receiver.
   __ Pop(rdi);
 }
@@ -726,7 +726,7 @@
     // stubs that tail call the runtime on deopts passing their parameters in
     // registers.
     __ Pushad();
-    __ CallRuntime(Runtime::kHiddenNotifyStubFailure, 0, save_doubles);
+    __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
     __ Popad();
     // Tear down internal frame.
   }
@@ -755,7 +755,7 @@
     // Pass the deoptimization type to the runtime system.
     __ Push(Smi::FromInt(static_cast<int>(type)));
 
-    __ CallRuntime(Runtime::kHiddenNotifyDeoptimized, 1);
+    __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
     // Tear down internal frame.
   }
 
@@ -828,7 +828,7 @@
   // 3a. Patch the first argument if necessary when calling a function.
   Label shift_arguments;
   __ Set(rdx, 0);  // indicate regular JS_FUNCTION
-  { Label convert_to_object, use_global_receiver, patch_receiver;
+  { Label convert_to_object, use_global_proxy, patch_receiver;
     // Change context eagerly in case we need the global receiver.
     __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
 
@@ -849,9 +849,9 @@
     __ JumpIfSmi(rbx, &convert_to_object, Label::kNear);
 
     __ CompareRoot(rbx, Heap::kNullValueRootIndex);
-    __ j(equal, &use_global_receiver);
+    __ j(equal, &use_global_proxy);
     __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
-    __ j(equal, &use_global_receiver);
+    __ j(equal, &use_global_proxy);
 
     STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
     __ CmpObjectType(rbx, FIRST_SPEC_OBJECT_TYPE, rcx);
@@ -877,10 +877,10 @@
     __ movp(rdi, args.GetReceiverOperand());
     __ jmp(&patch_receiver, Label::kNear);
 
-    __ bind(&use_global_receiver);
+    __ bind(&use_global_proxy);
     __ movp(rbx,
             Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
-    __ movp(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
+    __ movp(rbx, FieldOperand(rbx, GlobalObject::kGlobalProxyOffset));
 
     __ bind(&patch_receiver);
     __ movp(args.GetArgumentOperand(1), rbx);
@@ -1024,7 +1024,7 @@
     __ movp(rsi, FieldOperand(rdi, JSFunction::kContextOffset));
 
     // Do not transform the receiver for strict mode functions.
-    Label call_to_object, use_global_receiver;
+    Label call_to_object, use_global_proxy;
     __ movp(rdx, FieldOperand(rdi, JSFunction::kSharedFunctionInfoOffset));
     __ testb(FieldOperand(rdx, SharedFunctionInfo::kStrictModeByteOffset),
              Immediate(1 << SharedFunctionInfo::kStrictModeBitWithinByte));
@@ -1038,9 +1038,9 @@
     // Compute the receiver in sloppy mode.
     __ JumpIfSmi(rbx, &call_to_object, Label::kNear);
     __ CompareRoot(rbx, Heap::kNullValueRootIndex);
-    __ j(equal, &use_global_receiver);
+    __ j(equal, &use_global_proxy);
     __ CompareRoot(rbx, Heap::kUndefinedValueRootIndex);
-    __ j(equal, &use_global_receiver);
+    __ j(equal, &use_global_proxy);
 
     // If given receiver is already a JavaScript object then there's no
     // reason for converting it.
@@ -1055,10 +1055,10 @@
     __ movp(rbx, rax);
     __ jmp(&push_receiver, Label::kNear);
 
-    __ bind(&use_global_receiver);
+    __ bind(&use_global_proxy);
     __ movp(rbx,
             Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
-    __ movp(rbx, FieldOperand(rbx, GlobalObject::kGlobalReceiverOffset));
+    __ movp(rbx, FieldOperand(rbx, GlobalObject::kGlobalProxyOffset));
 
     // Push the receiver.
     __ bind(&push_receiver);
@@ -1066,14 +1066,18 @@
 
     // Copy all arguments from the array to the stack.
     Label entry, loop;
-    __ movp(rax, Operand(rbp, kIndexOffset));
+    Register receiver = LoadDescriptor::ReceiverRegister();
+    Register key = LoadDescriptor::NameRegister();
+    __ movp(key, Operand(rbp, kIndexOffset));
     __ jmp(&entry);
     __ bind(&loop);
-    __ movp(rdx, Operand(rbp, kArgumentsOffset));  // load arguments
+    __ movp(receiver, Operand(rbp, kArgumentsOffset));  // load arguments
 
     // Use inline caching to speed up access to arguments.
-    Handle<Code> ic =
-        masm->isolate()->builtins()->KeyedLoadIC_Initialize();
+    if (FLAG_vector_ics) {
+      __ Move(VectorLoadICDescriptor::SlotRegister(), Smi::FromInt(0));
+    }
+    Handle<Code> ic = CodeFactory::KeyedLoadIC(masm->isolate()).code();
     __ Call(ic, RelocInfo::CODE_TARGET);
     // It is important that we do not have a test instruction after the
     // call.  A test instruction after the call is used to indicate that
@@ -1083,19 +1087,19 @@
     // Push the nth argument.
     __ Push(rax);
 
-    // Update the index on the stack and in register rax.
-    __ movp(rax, Operand(rbp, kIndexOffset));
-    __ SmiAddConstant(rax, rax, Smi::FromInt(1));
-    __ movp(Operand(rbp, kIndexOffset), rax);
+    // Update the index on the stack and in register key.
+    __ movp(key, Operand(rbp, kIndexOffset));
+    __ SmiAddConstant(key, key, Smi::FromInt(1));
+    __ movp(Operand(rbp, kIndexOffset), key);
 
     __ bind(&entry);
-    __ cmpp(rax, Operand(rbp, kLimitOffset));
+    __ cmpp(key, Operand(rbp, kLimitOffset));
     __ j(not_equal, &loop);
 
     // Call the function.
     Label call_proxy;
     ParameterCount actual(rax);
-    __ SmiToInteger32(rax, rax);
+    __ SmiToInteger32(rax, key);
     __ movp(rdi, Operand(rbp, kFunctionOffset));
     __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
     __ j(not_equal, &call_proxy);
@@ -1505,7 +1509,7 @@
   __ j(above_equal, &ok);
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
-    __ CallRuntime(Runtime::kHiddenStackGuard, 0);
+    __ CallRuntime(Runtime::kStackGuard, 0);
   }
   __ jmp(masm->isolate()->builtins()->OnStackReplacement(),
          RelocInfo::CODE_TARGET);
diff --git a/src/x64/code-stubs-x64.cc b/src/x64/code-stubs-x64.cc
index e31bf98..a625269 100644
--- a/src/x64/code-stubs-x64.cc
+++ b/src/x64/code-stubs-x64.cc
@@ -8,462 +8,106 @@
 
 #include "src/bootstrapper.h"
 #include "src/code-stubs.h"
+#include "src/codegen.h"
+#include "src/ic/handler-compiler.h"
+#include "src/ic/ic.h"
+#include "src/isolate.h"
+#include "src/jsregexp.h"
 #include "src/regexp-macro-assembler.h"
-#include "src/stub-cache.h"
 #include "src/runtime.h"
 
 namespace v8 {
 namespace internal {
 
 
-void FastNewClosureStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { rbx };
-  descriptor->register_param_count_ = 1;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      Runtime::FunctionForId(Runtime::kHiddenNewClosureFromStubFailure)->entry;
-}
-
-
-void FastNewContextStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { rdi };
-  descriptor->register_param_count_ = 1;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ = NULL;
-}
-
-
-void ToNumberStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { rax };
-  descriptor->register_param_count_ = 1;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ = NULL;
-}
-
-
-void NumberToStringStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { rax };
-  descriptor->register_param_count_ = 1;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      Runtime::FunctionForId(Runtime::kHiddenNumberToString)->entry;
-}
-
-
-void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { rax, rbx, rcx };
-  descriptor->register_param_count_ = 3;
-  descriptor->register_params_ = registers;
-  static Representation representations[] = {
-    Representation::Tagged(),
-    Representation::Smi(),
-    Representation::Tagged() };
-  descriptor->register_param_representations_ = representations;
-  descriptor->deoptimization_handler_ =
-      Runtime::FunctionForId(
-          Runtime::kHiddenCreateArrayLiteralStubBailout)->entry;
-}
-
-
-void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { rax, rbx, rcx, rdx };
-  descriptor->register_param_count_ = 4;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      Runtime::FunctionForId(Runtime::kHiddenCreateObjectLiteral)->entry;
-}
-
-
-void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { rbx, rdx };
-  descriptor->register_param_count_ = 2;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ = NULL;
-}
-
-
-void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { rdx, rax };
-  descriptor->register_param_count_ = 2;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
-}
-
-
-void KeyedLoadDictionaryElementStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { rdx, rax };
-  descriptor->register_param_count_ = 2;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-    FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
-}
-
-
-void RegExpConstructResultStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { rcx, rbx, rax };
-  descriptor->register_param_count_ = 3;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      Runtime::FunctionForId(Runtime::kHiddenRegExpConstructResult)->entry;
-}
-
-
-void KeyedLoadGenericElementStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { rdx, rax };
-  descriptor->register_param_count_ = 2;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      Runtime::FunctionForId(Runtime::kKeyedGetProperty)->entry;
-}
-
-
-void LoadFieldStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { rax };
-  descriptor->register_param_count_ = 1;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ = NULL;
-}
-
-
-void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { rdx };
-  descriptor->register_param_count_ = 1;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ = NULL;
-}
-
-
-void StringLengthStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { rax, rcx };
-  descriptor->register_param_count_ = 2;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ = NULL;
-}
-
-
-void KeyedStringLengthStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { rdx, rax };
-  descriptor->register_param_count_ = 2;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ = NULL;
-}
-
-
-void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { rdx, rcx, rax };
-  descriptor->register_param_count_ = 3;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      FUNCTION_ADDR(KeyedStoreIC_MissFromStubFailure);
-}
-
-
-void TransitionElementsKindStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { rax, rbx };
-  descriptor->register_param_count_ = 2;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
-}
-
-
 static void InitializeArrayConstructorDescriptor(
-    CodeStubInterfaceDescriptor* descriptor,
+    Isolate* isolate, CodeStubDescriptor* descriptor,
     int constant_stack_parameter_count) {
-  // register state
-  // rax -- number of arguments
-  // rdi -- function
-  // rbx -- allocation site with elements kind
-  static Register registers_variable_args[] = { rdi, rbx, rax };
-  static Register registers_no_args[] = { rdi, rbx };
+  Address deopt_handler = Runtime::FunctionForId(
+      Runtime::kArrayConstructor)->entry;
 
   if (constant_stack_parameter_count == 0) {
-    descriptor->register_param_count_ = 2;
-    descriptor->register_params_ = registers_no_args;
+    descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
+                           JS_FUNCTION_STUB_MODE);
   } else {
-    // stack param count needs (constructor pointer, and single argument)
-    descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
-    descriptor->stack_parameter_count_ = rax;
-    descriptor->register_param_count_ = 3;
-    static Representation representations[] = {
-        Representation::Tagged(),
-        Representation::Tagged(),
-        Representation::Integer32() };
-    descriptor->register_param_representations_ = representations;
-    descriptor->register_params_ = registers_variable_args;
+    descriptor->Initialize(rax, deopt_handler, constant_stack_parameter_count,
+                           JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
   }
-
-  descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
-  descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
-  descriptor->deoptimization_handler_ =
-      Runtime::FunctionForId(Runtime::kHiddenArrayConstructor)->entry;
 }
 
 
 static void InitializeInternalArrayConstructorDescriptor(
-    CodeStubInterfaceDescriptor* descriptor,
+    Isolate* isolate, CodeStubDescriptor* descriptor,
     int constant_stack_parameter_count) {
-  // register state
-  // rax -- number of arguments
-  // rdi -- constructor function
-  static Register registers_variable_args[] = { rdi, rax };
-  static Register registers_no_args[] = { rdi };
+  Address deopt_handler = Runtime::FunctionForId(
+      Runtime::kInternalArrayConstructor)->entry;
 
   if (constant_stack_parameter_count == 0) {
-    descriptor->register_param_count_ = 1;
-    descriptor->register_params_ = registers_no_args;
+    descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
+                           JS_FUNCTION_STUB_MODE);
   } else {
-    // stack param count needs (constructor pointer, and single argument)
-    descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
-    descriptor->stack_parameter_count_ = rax;
-    descriptor->register_param_count_ = 2;
-    descriptor->register_params_ = registers_variable_args;
-    static Representation representations[] = {
-        Representation::Tagged(),
-        Representation::Integer32() };
-    descriptor->register_param_representations_ = representations;
+    descriptor->Initialize(rax, deopt_handler, constant_stack_parameter_count,
+                           JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
   }
-
-  descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
-  descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
-  descriptor->deoptimization_handler_ =
-      Runtime::FunctionForId(Runtime::kHiddenInternalArrayConstructor)->entry;
 }
 
 
-void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  InitializeArrayConstructorDescriptor(descriptor, 0);
+void ArrayNoArgumentConstructorStub::InitializeDescriptor(
+    CodeStubDescriptor* descriptor) {
+  InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
 }
 
 
-void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  InitializeArrayConstructorDescriptor(descriptor, 1);
+void ArraySingleArgumentConstructorStub::InitializeDescriptor(
+    CodeStubDescriptor* descriptor) {
+  InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
 }
 
 
-void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  InitializeArrayConstructorDescriptor(descriptor, -1);
+void ArrayNArgumentsConstructorStub::InitializeDescriptor(
+    CodeStubDescriptor* descriptor) {
+  InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
 }
 
 
-void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(descriptor, 0);
+void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
+    CodeStubDescriptor* descriptor) {
+  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
 }
 
 
-void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(descriptor, 1);
+void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
+    CodeStubDescriptor* descriptor) {
+  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
 }
 
 
-void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(descriptor, -1);
-}
-
-
-void CompareNilICStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { rax };
-  descriptor->register_param_count_ = 1;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      FUNCTION_ADDR(CompareNilIC_Miss);
-  descriptor->SetMissHandler(
-      ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate()));
-}
-
-
-void ToBooleanStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { rax };
-  descriptor->register_param_count_ = 1;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-     FUNCTION_ADDR(ToBooleanIC_Miss);
-  descriptor->SetMissHandler(
-      ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate()));
-}
-
-
-void StoreGlobalStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { rdx, rcx, rax };
-  descriptor->register_param_count_ = 3;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      FUNCTION_ADDR(StoreIC_MissFromStubFailure);
-}
-
-
-void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { rax, rbx, rcx, rdx };
-  descriptor->register_param_count_ = 4;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss);
-}
-
-
-void BinaryOpICStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { rdx, rax };
-  descriptor->register_param_count_ = 2;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
-  descriptor->SetMissHandler(
-      ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate()));
-}
-
-
-void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { rcx, rdx, rax };
-  descriptor->register_param_count_ = 3;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite);
-}
-
-
-void StringAddStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { rdx, rax };
-  descriptor->register_param_count_ = 2;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      Runtime::FunctionForId(Runtime::kHiddenStringAdd)->entry;
-}
-
-
-void CallDescriptors::InitializeForIsolate(Isolate* isolate) {
-  {
-    CallInterfaceDescriptor* descriptor =
-        isolate->call_descriptor(Isolate::ArgumentAdaptorCall);
-    static Register registers[] = { rdi,  // JSFunction
-                                    rsi,  // context
-                                    rax,  // actual number of arguments
-                                    rbx,  // expected number of arguments
-    };
-    static Representation representations[] = {
-        Representation::Tagged(),     // JSFunction
-        Representation::Tagged(),     // context
-        Representation::Integer32(),  // actual number of arguments
-        Representation::Integer32(),  // expected number of arguments
-    };
-    descriptor->register_param_count_ = 4;
-    descriptor->register_params_ = registers;
-    descriptor->param_representations_ = representations;
-  }
-  {
-    CallInterfaceDescriptor* descriptor =
-        isolate->call_descriptor(Isolate::KeyedCall);
-    static Register registers[] = { rsi,  // context
-                                    rcx,  // key
-    };
-    static Representation representations[] = {
-        Representation::Tagged(),     // context
-        Representation::Tagged(),     // key
-    };
-    descriptor->register_param_count_ = 2;
-    descriptor->register_params_ = registers;
-    descriptor->param_representations_ = representations;
-  }
-  {
-    CallInterfaceDescriptor* descriptor =
-        isolate->call_descriptor(Isolate::NamedCall);
-    static Register registers[] = { rsi,  // context
-                                    rcx,  // name
-    };
-    static Representation representations[] = {
-        Representation::Tagged(),     // context
-        Representation::Tagged(),     // name
-    };
-    descriptor->register_param_count_ = 2;
-    descriptor->register_params_ = registers;
-    descriptor->param_representations_ = representations;
-  }
-  {
-    CallInterfaceDescriptor* descriptor =
-        isolate->call_descriptor(Isolate::CallHandler);
-    static Register registers[] = { rsi,  // context
-                                    rdx,  // receiver
-    };
-    static Representation representations[] = {
-        Representation::Tagged(),  // context
-        Representation::Tagged(),  // receiver
-    };
-    descriptor->register_param_count_ = 2;
-    descriptor->register_params_ = registers;
-    descriptor->param_representations_ = representations;
-  }
-  {
-    CallInterfaceDescriptor* descriptor =
-        isolate->call_descriptor(Isolate::ApiFunctionCall);
-    static Register registers[] = { rax,  // callee
-                                    rbx,  // call_data
-                                    rcx,  // holder
-                                    rdx,  // api_function_address
-                                    rsi,  // context
-    };
-    static Representation representations[] = {
-        Representation::Tagged(),    // callee
-        Representation::Tagged(),    // call_data
-        Representation::Tagged(),    // holder
-        Representation::External(),  // api_function_address
-        Representation::Tagged(),    // context
-    };
-    descriptor->register_param_count_ = 5;
-    descriptor->register_params_ = registers;
-    descriptor->param_representations_ = representations;
-  }
+void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
+    CodeStubDescriptor* descriptor) {
+  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
 }
 
 
 #define __ ACCESS_MASM(masm)
 
 
-void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
+void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
+                                               ExternalReference miss) {
   // Update the static counter each time a new code stub is generated.
   isolate()->counters()->code_stubs()->Increment();
 
-  CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor();
-  int param_count = descriptor->register_param_count_;
+  CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
+  int param_count = descriptor.GetEnvironmentParameterCount();
   {
     // Call the runtime system in a fresh internal frame.
     FrameScope scope(masm, StackFrame::INTERNAL);
-    ASSERT(descriptor->register_param_count_ == 0 ||
-           rax.is(descriptor->register_params_[param_count - 1]));
+    DCHECK(param_count == 0 ||
+           rax.is(descriptor.GetEnvironmentParameterRegister(param_count - 1)));
     // Push arguments
     for (int i = 0; i < param_count; ++i) {
-      __ Push(descriptor->register_params_[i]);
+      __ Push(descriptor.GetEnvironmentParameterRegister(i));
     }
-    ExternalReference miss = descriptor->miss_handler();
-    __ CallExternalReference(miss, descriptor->register_param_count_);
+    __ CallExternalReference(miss, param_count);
   }
 
   __ Ret();
@@ -471,7 +115,7 @@
 
 
 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
-  __ PushCallerSaved(save_doubles_);
+  __ PushCallerSaved(save_doubles() ? kSaveFPRegs : kDontSaveFPRegs);
   const int argument_count = 1;
   __ PrepareCallCFunction(argument_count);
   __ LoadAddress(arg_reg_1,
@@ -481,7 +125,7 @@
   __ CallCFunction(
       ExternalReference::store_buffer_overflow_function(isolate()),
       argument_count);
-  __ PopCallerSaved(save_doubles_);
+  __ PopCallerSaved(save_doubles() ? kSaveFPRegs : kDontSaveFPRegs);
   __ ret(0);
 }
 
@@ -504,7 +148,7 @@
 void DoubleToIStub::Generate(MacroAssembler* masm) {
     Register input_reg = this->source();
     Register final_result_reg = this->destination();
-    ASSERT(is_truncating());
+    DCHECK(is_truncating());
 
     Label check_negative, process_64_bits, done;
 
@@ -576,7 +220,7 @@
         __ addp(rsp, Immediate(kDoubleSize));
     }
     if (!final_result_reg.is(result_reg)) {
-        ASSERT(final_result_reg.is(rcx));
+        DCHECK(final_result_reg.is(rcx));
         __ movl(final_result_reg, result_reg);
     }
     __ popq(save_reg);
@@ -616,7 +260,8 @@
 
 
 void MathPowStub::Generate(MacroAssembler* masm) {
-  const Register exponent = rdx;
+  const Register exponent = MathPowTaggedDescriptor::exponent();
+  DCHECK(exponent.is(rdx));
   const Register base = rax;
   const Register scratch = rcx;
   const XMMRegister double_result = xmm3;
@@ -630,7 +275,7 @@
   __ movp(scratch, Immediate(1));
   __ Cvtlsi2sd(double_result, scratch);
 
-  if (exponent_type_ == ON_STACK) {
+  if (exponent_type() == ON_STACK) {
     Label base_is_smi, unpack_exponent;
     // The exponent and base are supplied as arguments on the stack.
     // This can only happen if the stub is called from non-optimized code.
@@ -660,7 +305,7 @@
                    Heap::kHeapNumberMapRootIndex);
     __ j(not_equal, &call_runtime);
     __ movsd(double_exponent, FieldOperand(exponent, HeapNumber::kValueOffset));
-  } else if (exponent_type_ == TAGGED) {
+  } else if (exponent_type() == TAGGED) {
     __ JumpIfNotSmi(exponent, &exponent_not_smi, Label::kNear);
     __ SmiToInteger32(exponent, exponent);
     __ jmp(&int_exponent);
@@ -669,11 +314,13 @@
     __ movsd(double_exponent, FieldOperand(exponent, HeapNumber::kValueOffset));
   }
 
-  if (exponent_type_ != INTEGER) {
+  if (exponent_type() != INTEGER) {
     Label fast_power, try_arithmetic_simplification;
     // Detect integer exponents stored as double.
     __ DoubleToI(exponent, double_exponent, double_scratch,
-                 TREAT_MINUS_ZERO_AS_ZERO, &try_arithmetic_simplification);
+                 TREAT_MINUS_ZERO_AS_ZERO, &try_arithmetic_simplification,
+                 &try_arithmetic_simplification,
+                 &try_arithmetic_simplification);
     __ jmp(&int_exponent);
 
     __ bind(&try_arithmetic_simplification);
@@ -682,7 +329,7 @@
     __ cmpl(exponent, Immediate(0x1));
     __ j(overflow, &call_runtime);
 
-    if (exponent_type_ == ON_STACK) {
+    if (exponent_type() == ON_STACK) {
       // Detect square root case.  Crankshaft detects constant +/-0.5 at
       // compile time and uses DoMathPowHalf instead.  We then skip this check
       // for non-constant cases of +/-0.5 as these hardly occur.
@@ -841,10 +488,10 @@
 
   // Returning or bailing out.
   Counters* counters = isolate()->counters();
-  if (exponent_type_ == ON_STACK) {
+  if (exponent_type() == ON_STACK) {
     // The arguments are still on the stack.
     __ bind(&call_runtime);
-    __ TailCallRuntime(Runtime::kHiddenMathPow, 2, 1);
+    __ TailCallRuntime(Runtime::kMathPowRT, 2, 1);
 
     // The stub is called from non-optimized code, which expects the result
     // as heap number in rax.
@@ -857,7 +504,7 @@
     __ bind(&call_runtime);
     // Move base to the correct argument register.  Exponent is already in xmm1.
     __ movsd(xmm0, double_base);
-    ASSERT(double_exponent.is(xmm1));
+    DCHECK(double_exponent.is(xmm1));
     {
       AllowExternalCallThatCantCauseGC scope(masm);
       __ PrepareCallCFunction(2);
@@ -876,35 +523,20 @@
 
 void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
   Label miss;
-  Register receiver;
-  if (kind() == Code::KEYED_LOAD_IC) {
-    // ----------- S t a t e -------------
-    //  -- rax    : key
-    //  -- rdx    : receiver
-    //  -- rsp[0] : return address
-    // -----------------------------------
-    __ Cmp(rax, isolate()->factory()->prototype_string());
-    __ j(not_equal, &miss);
-    receiver = rdx;
-  } else {
-    ASSERT(kind() == Code::LOAD_IC);
-    // ----------- S t a t e -------------
-    //  -- rax    : receiver
-    //  -- rcx    : name
-    //  -- rsp[0] : return address
-    // -----------------------------------
-    receiver = rax;
-  }
+  Register receiver = LoadDescriptor::ReceiverRegister();
 
-  StubCompiler::GenerateLoadFunctionPrototype(masm, receiver, r8, r9, &miss);
+  NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, r8,
+                                                          r9, &miss);
   __ bind(&miss);
-  StubCompiler::TailCallBuiltin(
-      masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
+  PropertyAccessCompiler::TailCallBuiltin(
+      masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
 }
 
 
 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
   // The key is in rdx and the parameter count is in rax.
+  DCHECK(rdx.is(ArgumentsAccessReadDescriptor::index()));
+  DCHECK(rax.is(ArgumentsAccessReadDescriptor::parameter_count()));
 
   // Check that the key is a smi.
   Label slow;
@@ -1027,35 +659,35 @@
 
   // rax = address of new object(s) (tagged)
   // rcx = argument count (untagged)
-  // Get the arguments boilerplate from the current native context into rdi.
-  Label has_mapped_parameters, copy;
+  // Get the arguments map from the current native context into rdi.
+  Label has_mapped_parameters, instantiate;
   __ movp(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
   __ movp(rdi, FieldOperand(rdi, GlobalObject::kNativeContextOffset));
   __ testp(rbx, rbx);
   __ j(not_zero, &has_mapped_parameters, Label::kNear);
 
-  const int kIndex = Context::SLOPPY_ARGUMENTS_BOILERPLATE_INDEX;
+  const int kIndex = Context::SLOPPY_ARGUMENTS_MAP_INDEX;
   __ movp(rdi, Operand(rdi, Context::SlotOffset(kIndex)));
-  __ jmp(&copy, Label::kNear);
+  __ jmp(&instantiate, Label::kNear);
 
-  const int kAliasedIndex = Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX;
+  const int kAliasedIndex = Context::ALIASED_ARGUMENTS_MAP_INDEX;
   __ bind(&has_mapped_parameters);
   __ movp(rdi, Operand(rdi, Context::SlotOffset(kAliasedIndex)));
-  __ bind(&copy);
+  __ bind(&instantiate);
 
   // rax = address of new object (tagged)
   // rbx = mapped parameter count (untagged)
   // rcx = argument count (untagged)
-  // rdi = address of boilerplate object (tagged)
-  // Copy the JS object part.
-  for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
-    __ movp(rdx, FieldOperand(rdi, i));
-    __ movp(FieldOperand(rax, i), rdx);
-  }
+  // rdi = address of arguments map (tagged)
+  __ movp(FieldOperand(rax, JSObject::kMapOffset), rdi);
+  __ LoadRoot(kScratchRegister, Heap::kEmptyFixedArrayRootIndex);
+  __ movp(FieldOperand(rax, JSObject::kPropertiesOffset), kScratchRegister);
+  __ movp(FieldOperand(rax, JSObject::kElementsOffset), kScratchRegister);
 
   // Set up the callee in-object property.
   STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
   __ movp(rdx, args.GetArgumentOperand(0));
+  __ AssertNotSmi(rdx);
   __ movp(FieldOperand(rax, JSObject::kHeaderSize +
                        Heap::kArgumentsCalleeIndex * kPointerSize),
           rdx);
@@ -1173,7 +805,7 @@
   __ bind(&runtime);
   __ Integer32ToSmi(rcx, rcx);
   __ movp(args.GetArgumentOperand(2), rcx);  // Patch argument count.
-  __ TailCallRuntime(Runtime::kHiddenNewSloppyArguments, 3, 1);
+  __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
 }
 
 
@@ -1200,7 +832,38 @@
   __ movp(args.GetArgumentOperand(1), rdx);
 
   __ bind(&runtime);
-  __ TailCallRuntime(Runtime::kHiddenNewSloppyArguments, 3, 1);
+  __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
+}
+
+
+void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
+  // Return address is on the stack.
+  Label slow;
+
+  Register receiver = LoadDescriptor::ReceiverRegister();
+  Register key = LoadDescriptor::NameRegister();
+  Register scratch = rax;
+  DCHECK(!scratch.is(receiver) && !scratch.is(key));
+
+  // Check that the key is an array index, that is Uint32.
+  STATIC_ASSERT(kSmiValueSize <= 32);
+  __ JumpUnlessNonNegativeSmi(key, &slow);
+
+  // Everything is fine, call runtime.
+  __ PopReturnAddressTo(scratch);
+  __ Push(receiver);  // receiver
+  __ Push(key);       // key
+  __ PushReturnAddressFrom(scratch);
+
+  // Perform tail call to the entry.
+  __ TailCallExternalReference(
+      ExternalReference(IC_Utility(IC::kLoadElementWithInterceptor),
+                        masm->isolate()),
+      2, 1);
+
+  __ bind(&slow);
+  PropertyAccessCompiler::TailCallBuiltin(
+      masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
 }
 
 
@@ -1245,18 +908,16 @@
   // Do the allocation of both objects in one go.
   __ Allocate(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT);
 
-  // Get the arguments boilerplate from the current native context.
+  // Get the arguments map from the current native context.
   __ movp(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
   __ movp(rdi, FieldOperand(rdi, GlobalObject::kNativeContextOffset));
-  const int offset =
-      Context::SlotOffset(Context::STRICT_ARGUMENTS_BOILERPLATE_INDEX);
+  const int offset = Context::SlotOffset(Context::STRICT_ARGUMENTS_MAP_INDEX);
   __ movp(rdi, Operand(rdi, offset));
 
-  // Copy the JS object part.
-  for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
-    __ movp(rbx, FieldOperand(rdi, i));
-    __ movp(FieldOperand(rax, i), rbx);
-  }
+  __ movp(FieldOperand(rax, JSObject::kMapOffset), rdi);
+  __ LoadRoot(kScratchRegister, Heap::kEmptyFixedArrayRootIndex);
+  __ movp(FieldOperand(rax, JSObject::kPropertiesOffset), kScratchRegister);
+  __ movp(FieldOperand(rax, JSObject::kElementsOffset), kScratchRegister);
 
   // Get the length (smi tagged) and set that as an in-object property too.
   STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
@@ -1301,7 +962,7 @@
 
   // Do the runtime call to allocate the arguments object.
   __ bind(&runtime);
-  __ TailCallRuntime(Runtime::kHiddenNewStrictArguments, 3, 1);
+  __ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
 }
 
 
@@ -1310,7 +971,7 @@
   // time or if regexp entry in generated code is turned off runtime switch or
   // at compilation.
 #ifdef V8_INTERPRETED_REGEXP
-  __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
+  __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
 #else  // V8_INTERPRETED_REGEXP
 
   // Stack frame on entry.
@@ -1456,7 +1117,7 @@
   // (6) One byte sequential.  Load regexp code for one byte.
   __ bind(&seq_one_byte_string);
   // rax: RegExp data (FixedArray)
-  __ movp(r11, FieldOperand(rax, JSRegExp::kDataAsciiCodeOffset));
+  __ movp(r11, FieldOperand(rax, JSRegExp::kDataOneByteCodeOffset));
   __ Set(rcx, 1);  // Type is one byte.
 
   // (E) Carry on.  String handling is done.
@@ -1469,7 +1130,7 @@
 
   // rdi: sequential subject string (or look-alike, external string)
   // r15: original subject string
-  // rcx: encoding of subject string (1 if ASCII, 0 if two_byte);
+  // rcx: encoding of subject string (1 if one_byte, 0 if two_byte);
   // r11: code
   // Load used arguments before starting to push arguments for call to native
   // RegExp code to avoid handling changing stack height.
@@ -1484,7 +1145,7 @@
 
   // rdi: subject string
   // rbx: previous index
-  // rcx: encoding of subject string (1 if ASCII 0 if two_byte);
+  // rcx: encoding of subject string (1 if one_byte 0 if two_byte);
   // r11: code
   // All checks done. Now push arguments for native regexp code.
   Counters* counters = isolate()->counters();
@@ -1533,7 +1194,7 @@
 
   // rdi: subject string
   // rbx: previous index
-  // rcx: encoding of subject string (1 if ASCII 0 if two_byte);
+  // rcx: encoding of subject string (1 if one_byte 0 if two_byte);
   // r11: code
   // r14: slice offset
   // r15: original subject string
@@ -1703,7 +1364,7 @@
 
   // Do the runtime call to execute the regexp.
   __ bind(&runtime);
-  __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
+  __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
 
   // Deferred code for string handling.
   // (7) Not a long external string?  If yes, go to (10).
@@ -1755,21 +1416,19 @@
 
 
 static int NegativeComparisonResult(Condition cc) {
-  ASSERT(cc != equal);
-  ASSERT((cc == less) || (cc == less_equal)
+  DCHECK(cc != equal);
+  DCHECK((cc == less) || (cc == less_equal)
       || (cc == greater) || (cc == greater_equal));
   return (cc == greater || cc == greater_equal) ? LESS : GREATER;
 }
 
 
-static void CheckInputType(MacroAssembler* masm,
-                           Register input,
-                           CompareIC::State expected,
-                           Label* fail) {
+static void CheckInputType(MacroAssembler* masm, Register input,
+                           CompareICState::State expected, Label* fail) {
   Label ok;
-  if (expected == CompareIC::SMI) {
+  if (expected == CompareICState::SMI) {
     __ JumpIfNotSmi(input, fail);
-  } else if (expected == CompareIC::NUMBER) {
+  } else if (expected == CompareICState::NUMBER) {
     __ JumpIfSmi(input, &ok);
     __ CompareMap(input, masm->isolate()->factory()->heap_number_map());
     __ j(not_equal, fail);
@@ -1794,14 +1453,14 @@
 }
 
 
-void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
+void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
   Label check_unequal_objects, done;
   Condition cc = GetCondition();
   Factory* factory = isolate()->factory();
 
   Label miss;
-  CheckInputType(masm, rdx, left_, &miss);
-  CheckInputType(masm, rax, right_, &miss);
+  CheckInputType(masm, rdx, left(), &miss);
+  CheckInputType(masm, rax, right(), &miss);
 
   // Compare two smis.
   Label non_smi, smi_done;
@@ -1946,7 +1605,7 @@
   // If one of the numbers was NaN, then the result is always false.
   // The cc is never not-equal.
   __ bind(&unordered);
-  ASSERT(cc != not_equal);
+  DCHECK(cc != not_equal);
   if (cc == less || cc == less_equal) {
     __ Set(rax, 1);
   } else {
@@ -1973,24 +1632,15 @@
 
   __ bind(&check_for_strings);
 
-  __ JumpIfNotBothSequentialAsciiStrings(
-      rdx, rax, rcx, rbx, &check_unequal_objects);
+  __ JumpIfNotBothSequentialOneByteStrings(rdx, rax, rcx, rbx,
+                                           &check_unequal_objects);
 
-  // Inline comparison of ASCII strings.
+  // Inline comparison of one-byte strings.
   if (cc == equal) {
-    StringCompareStub::GenerateFlatAsciiStringEquals(masm,
-                                                     rdx,
-                                                     rax,
-                                                     rcx,
-                                                     rbx);
+    StringHelper::GenerateFlatOneByteStringEquals(masm, rdx, rax, rcx, rbx);
   } else {
-    StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
-                                                       rdx,
-                                                       rax,
-                                                       rcx,
-                                                       rbx,
-                                                       rdi,
-                                                       r8);
+    StringHelper::GenerateCompareFlatOneByteStrings(masm, rdx, rax, rcx, rbx,
+                                                    rdi, r8);
   }
 
 #ifdef DEBUG
@@ -2077,7 +1727,7 @@
   // function without changing the state.
   __ cmpp(rcx, rdi);
   __ j(equal, &done);
-  __ Cmp(rcx, TypeFeedbackInfo::MegamorphicSentinel(isolate));
+  __ Cmp(rcx, TypeFeedbackVector::MegamorphicSentinel(isolate));
   __ j(equal, &done);
 
   if (!FLAG_pretenuring_call_new) {
@@ -2101,13 +1751,13 @@
 
   // A monomorphic miss (i.e, here the cache is not uninitialized) goes
   // megamorphic.
-  __ Cmp(rcx, TypeFeedbackInfo::UninitializedSentinel(isolate));
+  __ Cmp(rcx, TypeFeedbackVector::UninitializedSentinel(isolate));
   __ j(equal, &initialize);
   // MegamorphicSentinel is an immortal immovable object (undefined) so no
   // write-barrier is needed.
   __ bind(&megamorphic);
   __ Move(FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize),
-          TypeFeedbackInfo::MegamorphicSentinel(isolate));
+          TypeFeedbackVector::MegamorphicSentinel(isolate));
   __ jmp(&done);
 
   // An uninitialized cache is patched with the function or sentinel to
@@ -2286,7 +1936,7 @@
 
 
 void CallFunctionStub::Generate(MacroAssembler* masm) {
-  CallFunctionNoFeedback(masm, argc_, NeedsChecks(), CallAsMethod());
+  CallFunctionNoFeedback(masm, argc(), NeedsChecks(), CallAsMethod());
 }
 
 
@@ -2368,14 +2018,14 @@
   // rdi - function
   // rdx - slot id (as integer)
   Label miss;
-  int argc = state_.arg_count();
+  int argc = arg_count();
   ParameterCount actual(argc);
 
   EmitLoadTypeFeedbackVector(masm, rbx);
   __ SmiToInteger32(rdx, rdx);
 
   __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, rcx);
-  __ cmpq(rdi, rcx);
+  __ cmpp(rdi, rcx);
   __ j(not_equal, &miss);
 
   __ movp(rax, Immediate(arg_count()));
@@ -2392,7 +2042,7 @@
   __ TailCallStub(&stub);
 
   __ bind(&miss);
-  GenerateMiss(masm, IC::kCallIC_Customization_Miss);
+  GenerateMiss(masm);
 
   // The slow case, we need this no matter what to complete a call after a miss.
   CallFunctionNoFeedback(masm,
@@ -2407,13 +2057,12 @@
 
 void CallICStub::Generate(MacroAssembler* masm) {
   // rdi - function
-  // rbx - vector
   // rdx - slot id
   Isolate* isolate = masm->isolate();
   Label extra_checks_or_miss, slow_start;
   Label slow, non_function, wrap, cont;
   Label have_js_function;
-  int argc = state_.arg_count();
+  int argc = arg_count();
   StackArgumentsAccessor args(rsp, argc);
   ParameterCount actual(argc);
 
@@ -2421,12 +2070,12 @@
 
   // The checks. First, does rdi match the recorded monomorphic target?
   __ SmiToInteger32(rdx, rdx);
-  __ cmpq(rdi, FieldOperand(rbx, rdx, times_pointer_size,
+  __ cmpp(rdi, FieldOperand(rbx, rdx, times_pointer_size,
                             FixedArray::kHeaderSize));
   __ j(not_equal, &extra_checks_or_miss);
 
   __ bind(&have_js_function);
-  if (state_.CallAsMethod()) {
+  if (CallAsMethod()) {
     EmitContinueIfStrictOrNative(masm, &cont);
 
     // Load the receiver from the stack.
@@ -2445,7 +2094,7 @@
   __ bind(&slow);
   EmitSlowCase(isolate, masm, &args, argc, &non_function);
 
-  if (state_.CallAsMethod()) {
+  if (CallAsMethod()) {
     __ bind(&wrap);
     EmitWrapCase(masm, &args, &cont);
   }
@@ -2455,9 +2104,9 @@
 
   __ movp(rcx, FieldOperand(rbx, rdx, times_pointer_size,
                             FixedArray::kHeaderSize));
-  __ Cmp(rcx, TypeFeedbackInfo::MegamorphicSentinel(isolate));
+  __ Cmp(rcx, TypeFeedbackVector::MegamorphicSentinel(isolate));
   __ j(equal, &slow_start);
-  __ Cmp(rcx, TypeFeedbackInfo::UninitializedSentinel(isolate));
+  __ Cmp(rcx, TypeFeedbackVector::UninitializedSentinel(isolate));
   __ j(equal, &miss);
 
   if (!FLAG_trace_ic) {
@@ -2466,15 +2115,14 @@
     __ AssertNotSmi(rcx);
     __ CmpObjectType(rcx, JS_FUNCTION_TYPE, rcx);
     __ j(not_equal, &miss);
-    __ Move(FieldOperand(rbx, rdx, times_pointer_size,
-                         FixedArray::kHeaderSize),
-            TypeFeedbackInfo::MegamorphicSentinel(isolate));
+    __ Move(FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize),
+            TypeFeedbackVector::MegamorphicSentinel(isolate));
     __ jmp(&slow_start);
   }
 
   // We are here because tracing is on or we are going monomorphic.
   __ bind(&miss);
-  GenerateMiss(masm, IC::kCallIC_Miss);
+  GenerateMiss(masm);
 
   // the slow case
   __ bind(&slow_start);
@@ -2490,9 +2138,9 @@
 }
 
 
-void CallICStub::GenerateMiss(MacroAssembler* masm, IC::UtilityId id) {
+void CallICStub::GenerateMiss(MacroAssembler* masm) {
   // Get the receiver of the function from the stack; 1 ~ return address.
-  __ movp(rcx, Operand(rsp, (state_.arg_count() + 1) * kPointerSize));
+  __ movp(rcx, Operand(rsp, (arg_count() + 1) * kPointerSize));
 
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
@@ -2505,6 +2153,9 @@
     __ Push(rdx);
 
     // Call the entry.
+    IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
+                                               : IC::kCallIC_Customization_Miss;
+
     ExternalReference miss = ExternalReference(IC_Utility(id),
                                                masm->isolate());
     __ CallExternalReference(miss, 4);
@@ -2555,11 +2206,11 @@
 
   // Enter the exit frame that transitions from JavaScript to C++.
 #ifdef _WIN64
-  int arg_stack_space = (result_size_ < 2 ? 2 : 4);
-#else
+  int arg_stack_space = (result_size() < 2 ? 2 : 4);
+#else   // _WIN64
   int arg_stack_space = 0;
-#endif
-  __ EnterExitFrame(arg_stack_space, save_doubles_);
+#endif  // _WIN64
+  __ EnterExitFrame(arg_stack_space, save_doubles());
 
   // rbx: pointer to builtin function  (C callee-saved).
   // rbp: frame pointer of exit frame  (restored after C call).
@@ -2581,14 +2232,14 @@
   // Windows 64-bit ABI passes arguments in rcx, rdx, r8, r9.
   // Pass argv and argc as two parameters. The arguments object will
   // be created by stubs declared by DECLARE_RUNTIME_FUNCTION().
-  if (result_size_ < 2) {
+  if (result_size() < 2) {
     // Pass a pointer to the Arguments object as the first argument.
     // Return result in single register (rax).
     __ movp(rcx, r14);  // argc.
     __ movp(rdx, r15);  // argv.
     __ Move(r8, ExternalReference::isolate_address(isolate()));
   } else {
-    ASSERT_EQ(2, result_size_);
+    DCHECK_EQ(2, result_size());
     // Pass a pointer to the result location as the first argument.
     __ leap(rcx, StackSpaceOperand(2));
     // Pass a pointer to the Arguments object as the second argument.
@@ -2602,21 +2253,21 @@
   __ movp(rdi, r14);  // argc.
   __ movp(rsi, r15);  // argv.
   __ Move(rdx, ExternalReference::isolate_address(isolate()));
-#endif
+#endif  // _WIN64
   __ call(rbx);
   // Result is in rax - do not destroy this register!
 
 #ifdef _WIN64
   // If return value is on the stack, pop it to registers.
-  if (result_size_ > 1) {
-    ASSERT_EQ(2, result_size_);
+  if (result_size() > 1) {
+    DCHECK_EQ(2, result_size());
     // Read result values stored on stack. Result is stored
     // above the four argument mirror slots and the two
     // Arguments object slots.
     __ movq(rax, Operand(rsp, 6 * kRegisterSize));
     __ movq(rdx, Operand(rsp, 7 * kRegisterSize));
   }
-#endif
+#endif  // _WIN64
 
   // Runtime functions should not return 'the hole'.  Allowing it to escape may
   // lead to crashes in the IC code later.
@@ -2650,7 +2301,7 @@
   }
 
   // Exit the JavaScript to C++ exit frame.
-  __ LeaveExitFrame(save_doubles_);
+  __ LeaveExitFrame(save_doubles());
   __ ret(0);
 
   // Handling of exception.
@@ -2679,7 +2330,7 @@
 }
 
 
-void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
+void JSEntryStub::Generate(MacroAssembler* masm) {
   Label invoke, handler_entry, exit;
   Label not_outermost_js, not_outermost_js_2;
 
@@ -2692,7 +2343,7 @@
     __ movp(rbp, rsp);
 
     // Push the stack frame type marker twice.
-    int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
+    int marker = type();
     // Scratch register is neither callee-save, nor an argument register on any
     // platform. It's free to use at this point.
     // Cannot use smi-register for loading yet.
@@ -2782,7 +2433,7 @@
   // external reference instead of inlining the call target address directly
   // in the code, because the builtin stubs may not have been generated yet
   // at the time this code is generated.
-  if (is_construct) {
+  if (type() == StackFrame::ENTRY_CONSTRUCT) {
     ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
                                       isolate());
     __ Load(rax, construct_entry);
@@ -2859,6 +2510,13 @@
   // is and instance of the function and anything else to
   // indicate that the value is not an instance.
 
+  // Fixed register usage throughout the stub.
+  Register object = rax;     // Object (lhs).
+  Register map = rbx;        // Map of the object.
+  Register function = rdx;   // Function (rhs).
+  Register prototype = rdi;  // Prototype of the function.
+  Register scratch = rcx;
+
   static const int kOffsetToMapCheckValue = 2;
   static const int kOffsetToResultValue = kPointerSize == kInt64Size ? 18 : 14;
   // The last 4 bytes of the instruction sequence
@@ -2873,85 +2531,88 @@
   // before the offset of the hole value in the root array.
   static const unsigned int kWordBeforeResultValue =
       kPointerSize == kInt64Size ? 0x458B4906 : 0x458B4106;
-  // Only the inline check flag is supported on X64.
-  ASSERT(flags_ == kNoFlags || HasCallSiteInlineCheck());
+
   int extra_argument_offset = HasCallSiteInlineCheck() ? 1 : 0;
 
-  // Get the object - go slow case if it's a smi.
+  DCHECK_EQ(object.code(), InstanceofStub::left().code());
+  DCHECK_EQ(function.code(), InstanceofStub::right().code());
+
+  // Get the object and function - they are always both needed.
+  // Go slow case if the object is a smi.
   Label slow;
   StackArgumentsAccessor args(rsp, 2 + extra_argument_offset,
                               ARGUMENTS_DONT_CONTAIN_RECEIVER);
-  __ movp(rax, args.GetArgumentOperand(0));
-  __ JumpIfSmi(rax, &slow);
+  if (!HasArgsInRegisters()) {
+    __ movp(object, args.GetArgumentOperand(0));
+    __ movp(function, args.GetArgumentOperand(1));
+  }
+  __ JumpIfSmi(object, &slow);
 
   // Check that the left hand is a JS object. Leave its map in rax.
-  __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rax);
+  __ CmpObjectType(object, FIRST_SPEC_OBJECT_TYPE, map);
   __ j(below, &slow);
-  __ CmpInstanceType(rax, LAST_SPEC_OBJECT_TYPE);
+  __ CmpInstanceType(map, LAST_SPEC_OBJECT_TYPE);
   __ j(above, &slow);
 
-  // Get the prototype of the function.
-  __ movp(rdx, args.GetArgumentOperand(1));
-  // rdx is function, rax is map.
-
   // If there is a call site cache don't look in the global cache, but do the
   // real lookup and update the call site cache.
-  if (!HasCallSiteInlineCheck()) {
+  if (!HasCallSiteInlineCheck() && !ReturnTrueFalseObject()) {
     // Look up the function and the map in the instanceof cache.
     Label miss;
-    __ CompareRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
+    __ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
     __ j(not_equal, &miss, Label::kNear);
-    __ CompareRoot(rax, Heap::kInstanceofCacheMapRootIndex);
+    __ CompareRoot(map, Heap::kInstanceofCacheMapRootIndex);
     __ j(not_equal, &miss, Label::kNear);
     __ LoadRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
-    __ ret(2 * kPointerSize);
+    __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
     __ bind(&miss);
   }
 
-  __ TryGetFunctionPrototype(rdx, rbx, &slow, true);
+  // Get the prototype of the function.
+  __ TryGetFunctionPrototype(function, prototype, &slow, true);
 
   // Check that the function prototype is a JS object.
-  __ JumpIfSmi(rbx, &slow);
-  __ CmpObjectType(rbx, FIRST_SPEC_OBJECT_TYPE, kScratchRegister);
+  __ JumpIfSmi(prototype, &slow);
+  __ CmpObjectType(prototype, FIRST_SPEC_OBJECT_TYPE, kScratchRegister);
   __ j(below, &slow);
   __ CmpInstanceType(kScratchRegister, LAST_SPEC_OBJECT_TYPE);
   __ j(above, &slow);
 
-  // Register mapping:
-  //   rax is object map.
-  //   rdx is function.
-  //   rbx is function prototype.
+  // Update the global instanceof or call site inlined cache with the current
+  // map and function. The cached answer will be set when it is known below.
   if (!HasCallSiteInlineCheck()) {
-    __ StoreRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex);
-    __ StoreRoot(rax, Heap::kInstanceofCacheMapRootIndex);
+    __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
+    __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
   } else {
+    // The constants for the code patching are based on push instructions
+    // at the call site.
+    DCHECK(!HasArgsInRegisters());
     // Get return address and delta to inlined map check.
     __ movq(kScratchRegister, StackOperandForReturnAddress(0));
     __ subp(kScratchRegister, args.GetArgumentOperand(2));
     if (FLAG_debug_code) {
-      __ movl(rdi, Immediate(kWordBeforeMapCheckValue));
-      __ cmpl(Operand(kScratchRegister, kOffsetToMapCheckValue - 4), rdi);
+      __ movl(scratch, Immediate(kWordBeforeMapCheckValue));
+      __ cmpl(Operand(kScratchRegister, kOffsetToMapCheckValue - 4), scratch);
       __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheCheck);
     }
     __ movp(kScratchRegister,
             Operand(kScratchRegister, kOffsetToMapCheckValue));
-    __ movp(Operand(kScratchRegister, 0), rax);
+    __ movp(Operand(kScratchRegister, 0), map);
   }
 
-  __ movp(rcx, FieldOperand(rax, Map::kPrototypeOffset));
-
   // Loop through the prototype chain looking for the function prototype.
+  __ movp(scratch, FieldOperand(map, Map::kPrototypeOffset));
   Label loop, is_instance, is_not_instance;
   __ LoadRoot(kScratchRegister, Heap::kNullValueRootIndex);
   __ bind(&loop);
-  __ cmpp(rcx, rbx);
+  __ cmpp(scratch, prototype);
   __ j(equal, &is_instance, Label::kNear);
-  __ cmpp(rcx, kScratchRegister);
+  __ cmpp(scratch, kScratchRegister);
   // The code at is_not_instance assumes that kScratchRegister contains a
   // non-zero GCable value (the null object in this case).
   __ j(equal, &is_not_instance, Label::kNear);
-  __ movp(rcx, FieldOperand(rcx, HeapObject::kMapOffset));
-  __ movp(rcx, FieldOperand(rcx, Map::kPrototypeOffset));
+  __ movp(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
+  __ movp(scratch, FieldOperand(scratch, Map::kPrototypeOffset));
   __ jmp(&loop);
 
   __ bind(&is_instance);
@@ -2960,12 +2621,15 @@
     // Store bitwise zero in the cache.  This is a Smi in GC terms.
     STATIC_ASSERT(kSmiTag == 0);
     __ StoreRoot(rax, Heap::kInstanceofCacheAnswerRootIndex);
+    if (ReturnTrueFalseObject()) {
+      __ LoadRoot(rax, Heap::kTrueValueRootIndex);
+    }
   } else {
     // Store offset of true in the root array at the inline check site.
     int true_offset = 0x100 +
         (Heap::kTrueValueRootIndex << kPointerSizeLog2) - kRootRegisterBias;
     // Assert it is a 1-byte signed value.
-    ASSERT(true_offset >= 0 && true_offset < 0x100);
+    DCHECK(true_offset >= 0 && true_offset < 0x100);
     __ movl(rax, Immediate(true_offset));
     __ movq(kScratchRegister, StackOperandForReturnAddress(0));
     __ subp(kScratchRegister, args.GetArgumentOperand(2));
@@ -2975,20 +2639,26 @@
       __ cmpl(Operand(kScratchRegister, kOffsetToResultValue - 4), rax);
       __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheMov);
     }
-    __ Set(rax, 0);
+    if (!ReturnTrueFalseObject()) {
+      __ Set(rax, 0);
+    }
   }
-  __ ret((2 + extra_argument_offset) * kPointerSize);
+  __ ret(((HasArgsInRegisters() ? 0 : 2) + extra_argument_offset) *
+         kPointerSize);
 
   __ bind(&is_not_instance);
   if (!HasCallSiteInlineCheck()) {
     // We have to store a non-zero value in the cache.
     __ StoreRoot(kScratchRegister, Heap::kInstanceofCacheAnswerRootIndex);
+    if (ReturnTrueFalseObject()) {
+      __ LoadRoot(rax, Heap::kFalseValueRootIndex);
+    }
   } else {
     // Store offset of false in the root array at the inline check site.
     int false_offset = 0x100 +
         (Heap::kFalseValueRootIndex << kPointerSizeLog2) - kRootRegisterBias;
     // Assert it is a 1-byte signed value.
-    ASSERT(false_offset >= 0 && false_offset < 0x100);
+    DCHECK(false_offset >= 0 && false_offset < 0x100);
     __ movl(rax, Immediate(false_offset));
     __ movq(kScratchRegister, StackOperandForReturnAddress(0));
     __ subp(kScratchRegister, args.GetArgumentOperand(2));
@@ -2999,36 +2669,47 @@
       __ Assert(equal, kInstanceofStubUnexpectedCallSiteCacheMov);
     }
   }
-  __ ret((2 + extra_argument_offset) * kPointerSize);
+  __ ret(((HasArgsInRegisters() ? 0 : 2) + extra_argument_offset) *
+         kPointerSize);
 
   // Slow-case: Go through the JavaScript implementation.
   __ bind(&slow);
-  if (HasCallSiteInlineCheck()) {
-    // Remove extra value from the stack.
-    __ PopReturnAddressTo(rcx);
-    __ Pop(rax);
-    __ PushReturnAddressFrom(rcx);
+  if (!ReturnTrueFalseObject()) {
+    // Tail call the builtin which returns 0 or 1.
+    DCHECK(!HasArgsInRegisters());
+    if (HasCallSiteInlineCheck()) {
+      // Remove extra value from the stack.
+      __ PopReturnAddressTo(rcx);
+      __ Pop(rax);
+      __ PushReturnAddressFrom(rcx);
+    }
+    __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
+  } else {
+    // Call the builtin and convert 0/1 to true/false.
+    {
+      FrameScope scope(masm, StackFrame::INTERNAL);
+      __ Push(object);
+      __ Push(function);
+      __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
+    }
+    Label true_value, done;
+    __ testq(rax, rax);
+    __ j(zero, &true_value, Label::kNear);
+    __ LoadRoot(rax, Heap::kFalseValueRootIndex);
+    __ jmp(&done, Label::kNear);
+    __ bind(&true_value);
+    __ LoadRoot(rax, Heap::kTrueValueRootIndex);
+    __ bind(&done);
+    __ ret(((HasArgsInRegisters() ? 0 : 2) + extra_argument_offset) *
+           kPointerSize);
   }
-  __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
 }
 
 
-// Passing arguments in registers is not supported.
-Register InstanceofStub::left() { return no_reg; }
-
-
-Register InstanceofStub::right() { return no_reg; }
-
-
 // -------------------------------------------------------------------------
 // StringCharCodeAtGenerator
 
 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
-  Label flat_string;
-  Label ascii_string;
-  Label got_char_code;
-  Label sliced_string;
-
   // If the receiver is a smi trigger the non-string case.
   __ JumpIfSmi(object_, receiver_not_string_);
 
@@ -3076,9 +2757,9 @@
   if (index_flags_ == STRING_INDEX_IS_NUMBER) {
     __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
   } else {
-    ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
+    DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
     // NumberToSmi discards numbers that are not exact integers.
-    __ CallRuntime(Runtime::kHiddenNumberToSmi, 1);
+    __ CallRuntime(Runtime::kNumberToSmi, 1);
   }
   if (!index_.is(rax)) {
     // Save the conversion result before the pop instructions below
@@ -3103,7 +2784,7 @@
   __ Push(object_);
   __ Integer32ToSmi(index_, index_);
   __ Push(index_);
-  __ CallRuntime(Runtime::kHiddenStringCharCodeAt, 2);
+  __ CallRuntime(Runtime::kStringCharCodeAtRT, 2);
   if (!result_.is(rax)) {
     __ movp(result_, rax);
   }
@@ -3182,65 +2863,6 @@
 }
 
 
-void StringHelper::GenerateHashInit(MacroAssembler* masm,
-                                    Register hash,
-                                    Register character,
-                                    Register scratch) {
-  // hash = (seed + character) + ((seed + character) << 10);
-  __ LoadRoot(scratch, Heap::kHashSeedRootIndex);
-  __ SmiToInteger32(scratch, scratch);
-  __ addl(scratch, character);
-  __ movl(hash, scratch);
-  __ shll(scratch, Immediate(10));
-  __ addl(hash, scratch);
-  // hash ^= hash >> 6;
-  __ movl(scratch, hash);
-  __ shrl(scratch, Immediate(6));
-  __ xorl(hash, scratch);
-}
-
-
-void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
-                                            Register hash,
-                                            Register character,
-                                            Register scratch) {
-  // hash += character;
-  __ addl(hash, character);
-  // hash += hash << 10;
-  __ movl(scratch, hash);
-  __ shll(scratch, Immediate(10));
-  __ addl(hash, scratch);
-  // hash ^= hash >> 6;
-  __ movl(scratch, hash);
-  __ shrl(scratch, Immediate(6));
-  __ xorl(hash, scratch);
-}
-
-
-void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
-                                       Register hash,
-                                       Register scratch) {
-  // hash += hash << 3;
-  __ leal(hash, Operand(hash, hash, times_8, 0));
-  // hash ^= hash >> 11;
-  __ movl(scratch, hash);
-  __ shrl(scratch, Immediate(11));
-  __ xorl(hash, scratch);
-  // hash += hash << 15;
-  __ movl(scratch, hash);
-  __ shll(scratch, Immediate(15));
-  __ addl(hash, scratch);
-
-  __ andl(hash, Immediate(String::kHashBitMask));
-
-  // if (hash == 0) hash = 27;
-  Label hash_not_zero;
-  __ j(not_zero, &hash_not_zero);
-  __ Set(hash, StringHasher::kZeroHash);
-  __ bind(&hash_not_zero);
-}
-
-
 void SubStringStub::Generate(MacroAssembler* masm) {
   Label runtime;
 
@@ -3356,7 +2978,7 @@
     STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
     __ testb(rbx, Immediate(kStringEncodingMask));
     __ j(zero, &two_byte_slice, Label::kNear);
-    __ AllocateAsciiSlicedString(rax, rbx, r14, &runtime);
+    __ AllocateOneByteSlicedString(rax, rbx, r14, &runtime);
     __ jmp(&set_slice_header, Label::kNear);
     __ bind(&two_byte_slice);
     __ AllocateTwoByteSlicedString(rax, rbx, r14, &runtime);
@@ -3401,7 +3023,7 @@
   __ j(zero, &two_byte_sequential);
 
   // Allocate the result.
-  __ AllocateAsciiString(rax, rcx, r11, r14, r15, &runtime);
+  __ AllocateOneByteString(rax, rcx, r11, r14, r15, &runtime);
 
   // rax: result string
   // rcx: result string length
@@ -3447,7 +3069,7 @@
 
   // Just jump to runtime to create the sub string.
   __ bind(&runtime);
-  __ TailCallRuntime(Runtime::kHiddenSubString, 3, 1);
+  __ TailCallRuntime(Runtime::kSubString, 3, 1);
 
   __ bind(&single_char);
   // rax: string
@@ -3462,11 +3084,11 @@
 }
 
 
-void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
-                                                      Register left,
-                                                      Register right,
-                                                      Register scratch1,
-                                                      Register scratch2) {
+void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
+                                                   Register left,
+                                                   Register right,
+                                                   Register scratch1,
+                                                   Register scratch2) {
   Register length = scratch1;
 
   // Compare lengths.
@@ -3489,8 +3111,8 @@
   // Compare characters.
   __ bind(&compare_chars);
   Label strings_not_equal;
-  GenerateAsciiCharsCompareLoop(masm, left, right, length, scratch2,
-                                &strings_not_equal, Label::kNear);
+  GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2,
+                                  &strings_not_equal, Label::kNear);
 
   // Characters are equal.
   __ Move(rax, Smi::FromInt(EQUAL));
@@ -3503,13 +3125,9 @@
 }
 
 
-void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
-                                                        Register left,
-                                                        Register right,
-                                                        Register scratch1,
-                                                        Register scratch2,
-                                                        Register scratch3,
-                                                        Register scratch4) {
+void StringHelper::GenerateCompareFlatOneByteStrings(
+    MacroAssembler* masm, Register left, Register right, Register scratch1,
+    Register scratch2, Register scratch3, Register scratch4) {
   // Ensure that you can always subtract a string length from a non-negative
   // number (e.g. another length).
   STATIC_ASSERT(String::kMaxLength < 0x7fffffff);
@@ -3539,11 +3157,11 @@
 
   // Compare loop.
   Label result_not_equal;
-  GenerateAsciiCharsCompareLoop(masm, left, right, min_length, scratch2,
-                                &result_not_equal,
-                                // In debug-code mode, SmiTest below might push
-                                // the target label outside the near range.
-                                Label::kFar);
+  GenerateOneByteCharsCompareLoop(
+      masm, left, right, min_length, scratch2, &result_not_equal,
+      // In debug-code mode, SmiTest below might push
+      // the target label outside the near range.
+      Label::kFar);
 
   // Completed loop without finding different characters.
   // Compare lengths (precomputed).
@@ -3577,14 +3195,9 @@
 }
 
 
-void StringCompareStub::GenerateAsciiCharsCompareLoop(
-    MacroAssembler* masm,
-    Register left,
-    Register right,
-    Register length,
-    Register scratch,
-    Label* chars_not_equal,
-    Label::Distance near_jump) {
+void StringHelper::GenerateOneByteCharsCompareLoop(
+    MacroAssembler* masm, Register left, Register right, Register length,
+    Register scratch, Label* chars_not_equal, Label::Distance near_jump) {
   // Change index to run from -length to -1 by adding length to string
   // start. This means that loop ends when index reaches zero, which
   // doesn't need an additional compare.
@@ -3630,21 +3243,22 @@
 
   __ bind(&not_same);
 
-  // Check that both are sequential ASCII strings.
-  __ JumpIfNotBothSequentialAsciiStrings(rdx, rax, rcx, rbx, &runtime);
+  // Check that both are sequential one-byte strings.
+  __ JumpIfNotBothSequentialOneByteStrings(rdx, rax, rcx, rbx, &runtime);
 
-  // Inline comparison of ASCII strings.
+  // Inline comparison of one-byte strings.
   __ IncrementCounter(counters->string_compare_native(), 1);
   // Drop arguments from the stack
   __ PopReturnAddressTo(rcx);
   __ addp(rsp, Immediate(2 * kPointerSize));
   __ PushReturnAddressFrom(rcx);
-  GenerateCompareFlatAsciiStrings(masm, rdx, rax, rcx, rbx, rdi, r8);
+  StringHelper::GenerateCompareFlatOneByteStrings(masm, rdx, rax, rcx, rbx, rdi,
+                                                  r8);
 
   // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
   // tagged as a small integer.
   __ bind(&runtime);
-  __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
+  __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
 }
 
 
@@ -3671,13 +3285,13 @@
 
   // Tail call into the stub that handles binary operations with allocation
   // sites.
-  BinaryOpWithAllocationSiteStub stub(isolate(), state_);
+  BinaryOpWithAllocationSiteStub stub(isolate(), state());
   __ TailCallStub(&stub);
 }
 
 
-void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
-  ASSERT(state_ == CompareIC::SMI);
+void CompareICStub::GenerateSmis(MacroAssembler* masm) {
+  DCHECK(state() == CompareICState::SMI);
   Label miss;
   __ JumpIfNotBothSmi(rdx, rax, &miss, Label::kNear);
 
@@ -3700,17 +3314,17 @@
 }
 
 
-void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
-  ASSERT(state_ == CompareIC::NUMBER);
+void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
+  DCHECK(state() == CompareICState::NUMBER);
 
   Label generic_stub;
   Label unordered, maybe_undefined1, maybe_undefined2;
   Label miss;
 
-  if (left_ == CompareIC::SMI) {
+  if (left() == CompareICState::SMI) {
     __ JumpIfNotSmi(rdx, &miss);
   }
-  if (right_ == CompareIC::SMI) {
+  if (right() == CompareICState::SMI) {
     __ JumpIfNotSmi(rax, &miss);
   }
 
@@ -3752,12 +3366,12 @@
 
   __ bind(&unordered);
   __ bind(&generic_stub);
-  ICCompareStub stub(isolate(), op_, CompareIC::GENERIC, CompareIC::GENERIC,
-                     CompareIC::GENERIC);
+  CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
+                     CompareICState::GENERIC, CompareICState::GENERIC);
   __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
 
   __ bind(&maybe_undefined1);
-  if (Token::IsOrderedRelationalCompareOp(op_)) {
+  if (Token::IsOrderedRelationalCompareOp(op())) {
     __ Cmp(rax, isolate()->factory()->undefined_value());
     __ j(not_equal, &miss);
     __ JumpIfSmi(rdx, &unordered);
@@ -3767,7 +3381,7 @@
   }
 
   __ bind(&maybe_undefined2);
-  if (Token::IsOrderedRelationalCompareOp(op_)) {
+  if (Token::IsOrderedRelationalCompareOp(op())) {
     __ Cmp(rdx, isolate()->factory()->undefined_value());
     __ j(equal, &unordered);
   }
@@ -3777,9 +3391,9 @@
 }
 
 
-void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
-  ASSERT(state_ == CompareIC::INTERNALIZED_STRING);
-  ASSERT(GetCondition() == equal);
+void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
+  DCHECK(state() == CompareICState::INTERNALIZED_STRING);
+  DCHECK(GetCondition() == equal);
 
   // Registers containing left and right operands respectively.
   Register left = rdx;
@@ -3807,7 +3421,7 @@
   __ cmpp(left, right);
   // Make sure rax is non-zero. At this point input operands are
   // guaranteed to be non-zero.
-  ASSERT(right.is(rax));
+  DCHECK(right.is(rax));
   __ j(not_equal, &done, Label::kNear);
   STATIC_ASSERT(EQUAL == 0);
   STATIC_ASSERT(kSmiTag == 0);
@@ -3820,9 +3434,9 @@
 }
 
 
-void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
-  ASSERT(state_ == CompareIC::UNIQUE_NAME);
-  ASSERT(GetCondition() == equal);
+void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
+  DCHECK(state() == CompareICState::UNIQUE_NAME);
+  DCHECK(GetCondition() == equal);
 
   // Registers containing left and right operands respectively.
   Register left = rdx;
@@ -3842,15 +3456,15 @@
   __ movzxbp(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
   __ movzxbp(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
 
-  __ JumpIfNotUniqueName(tmp1, &miss, Label::kNear);
-  __ JumpIfNotUniqueName(tmp2, &miss, Label::kNear);
+  __ JumpIfNotUniqueNameInstanceType(tmp1, &miss, Label::kNear);
+  __ JumpIfNotUniqueNameInstanceType(tmp2, &miss, Label::kNear);
 
   // Unique names are compared by identity.
   Label done;
   __ cmpp(left, right);
   // Make sure rax is non-zero. At this point input operands are
   // guaranteed to be non-zero.
-  ASSERT(right.is(rax));
+  DCHECK(right.is(rax));
   __ j(not_equal, &done, Label::kNear);
   STATIC_ASSERT(EQUAL == 0);
   STATIC_ASSERT(kSmiTag == 0);
@@ -3863,11 +3477,11 @@
 }
 
 
-void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
-  ASSERT(state_ == CompareIC::STRING);
+void CompareICStub::GenerateStrings(MacroAssembler* masm) {
+  DCHECK(state() == CompareICState::STRING);
   Label miss;
 
-  bool equality = Token::IsEqualityOp(op_);
+  bool equality = Token::IsEqualityOp(op());
 
   // Registers containing left and right operands respectively.
   Register left = rdx;
@@ -3915,21 +3529,21 @@
     __ j(not_zero, &do_compare, Label::kNear);
     // Make sure rax is non-zero. At this point input operands are
     // guaranteed to be non-zero.
-    ASSERT(right.is(rax));
+    DCHECK(right.is(rax));
     __ ret(0);
     __ bind(&do_compare);
   }
 
-  // Check that both strings are sequential ASCII.
+  // Check that both strings are sequential one-byte.
   Label runtime;
-  __ JumpIfNotBothSequentialAsciiStrings(left, right, tmp1, tmp2, &runtime);
+  __ JumpIfNotBothSequentialOneByteStrings(left, right, tmp1, tmp2, &runtime);
 
-  // Compare flat ASCII strings. Returns when done.
+  // Compare flat one-byte strings. Returns when done.
   if (equality) {
-    StringCompareStub::GenerateFlatAsciiStringEquals(
-        masm, left, right, tmp1, tmp2);
+    StringHelper::GenerateFlatOneByteStringEquals(masm, left, right, tmp1,
+                                                  tmp2);
   } else {
-    StringCompareStub::GenerateCompareFlatAsciiStrings(
+    StringHelper::GenerateCompareFlatOneByteStrings(
         masm, left, right, tmp1, tmp2, tmp3, kScratchRegister);
   }
 
@@ -3942,7 +3556,7 @@
   if (equality) {
     __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
   } else {
-    __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
+    __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
   }
 
   __ bind(&miss);
@@ -3950,8 +3564,8 @@
 }
 
 
-void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
-  ASSERT(state_ == CompareIC::OBJECT);
+void CompareICStub::GenerateObjects(MacroAssembler* masm) {
+  DCHECK(state() == CompareICState::OBJECT);
   Label miss;
   Condition either_smi = masm->CheckEitherSmi(rdx, rax);
   __ j(either_smi, &miss, Label::kNear);
@@ -3961,7 +3575,7 @@
   __ CmpObjectType(rdx, JS_OBJECT_TYPE, rcx);
   __ j(not_equal, &miss, Label::kNear);
 
-  ASSERT(GetCondition() == equal);
+  DCHECK(GetCondition() == equal);
   __ subp(rax, rdx);
   __ ret(0);
 
@@ -3970,7 +3584,7 @@
 }
 
 
-void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
+void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
   Label miss;
   Condition either_smi = masm->CheckEitherSmi(rdx, rax);
   __ j(either_smi, &miss, Label::kNear);
@@ -3990,7 +3604,7 @@
 }
 
 
-void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
+void CompareICStub::GenerateMiss(MacroAssembler* masm) {
   {
     // Call the runtime system in a fresh internal frame.
     ExternalReference miss =
@@ -4001,7 +3615,7 @@
     __ Push(rax);
     __ Push(rdx);
     __ Push(rax);
-    __ Push(Smi::FromInt(op_));
+    __ Push(Smi::FromInt(op()));
     __ CallExternalReference(miss, 3);
 
     // Compute the entry point of the rewritten stub.
@@ -4021,7 +3635,7 @@
                                                       Register properties,
                                                       Handle<Name> name,
                                                       Register r0) {
-  ASSERT(name->IsUniqueName());
+  DCHECK(name->IsUniqueName());
   // If names of slots in range from 1 to kProbes - 1 for the hash value are
   // not equal to the name and kProbes-th slot is not used (its name is the
   // undefined value), it guarantees the hash table doesn't contain the
@@ -4038,12 +3652,12 @@
             Immediate(name->Hash() + NameDictionary::GetProbeOffset(i)));
 
     // Scale the index by multiplying by the entry size.
-    ASSERT(NameDictionary::kEntrySize == 3);
+    DCHECK(NameDictionary::kEntrySize == 3);
     __ leap(index, Operand(index, index, times_2, 0));  // index *= 3.
 
     Register entity_name = r0;
     // Having undefined at this place means the name is not contained.
-    ASSERT_EQ(kSmiTagSize, 1);
+    DCHECK_EQ(kSmiTagSize, 1);
     __ movp(entity_name, Operand(properties,
                                  index,
                                  times_pointer_size,
@@ -4062,8 +3676,8 @@
 
     // Check if the entry name is not a unique name.
     __ movp(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
-    __ JumpIfNotUniqueName(FieldOperand(entity_name, Map::kInstanceTypeOffset),
-                           miss);
+    __ JumpIfNotUniqueNameInstanceType(
+        FieldOperand(entity_name, Map::kInstanceTypeOffset), miss);
     __ bind(&good);
   }
 
@@ -4089,10 +3703,10 @@
                                                       Register name,
                                                       Register r0,
                                                       Register r1) {
-  ASSERT(!elements.is(r0));
-  ASSERT(!elements.is(r1));
-  ASSERT(!name.is(r0));
-  ASSERT(!name.is(r1));
+  DCHECK(!elements.is(r0));
+  DCHECK(!elements.is(r1));
+  DCHECK(!name.is(r0));
+  DCHECK(!name.is(r1));
 
   __ AssertName(name);
 
@@ -4109,7 +3723,7 @@
     __ andp(r1, r0);
 
     // Scale the index by multiplying by the entry size.
-    ASSERT(NameDictionary::kEntrySize == 3);
+    DCHECK(NameDictionary::kEntrySize == 3);
     __ leap(r1, Operand(r1, r1, times_2, 0));  // r1 = r1 * 3
 
     // Check if the key is identical to the name.
@@ -4149,9 +3763,9 @@
 
   Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
 
-  Register scratch = result_;
+  Register scratch = result();
 
-  __ SmiToInteger32(scratch, FieldOperand(dictionary_, kCapacityOffset));
+  __ SmiToInteger32(scratch, FieldOperand(dictionary(), kCapacityOffset));
   __ decl(scratch);
   __ Push(scratch);
 
@@ -4171,13 +3785,11 @@
     __ andp(scratch, Operand(rsp, 0));
 
     // Scale the index by multiplying by the entry size.
-    ASSERT(NameDictionary::kEntrySize == 3);
-    __ leap(index_, Operand(scratch, scratch, times_2, 0));  // index *= 3.
+    DCHECK(NameDictionary::kEntrySize == 3);
+    __ leap(index(), Operand(scratch, scratch, times_2, 0));  // index *= 3.
 
     // Having undefined at this place means the name is not contained.
-    __ movp(scratch, Operand(dictionary_,
-                             index_,
-                             times_pointer_size,
+    __ movp(scratch, Operand(dictionary(), index(), times_pointer_size,
                              kElementsStartOffset - kHeapObjectTag));
 
     __ Cmp(scratch, isolate()->factory()->undefined_value());
@@ -4187,15 +3799,16 @@
     __ cmpp(scratch, args.GetArgumentOperand(0));
     __ j(equal, &in_dictionary);
 
-    if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
+    if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
       // If we hit a key that is not a unique name during negative
       // lookup we have to bailout as this key might be equal to the
       // key we are looking for.
 
       // Check if the entry name is not a unique name.
       __ movp(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
-      __ JumpIfNotUniqueName(FieldOperand(scratch, Map::kInstanceTypeOffset),
-                             &maybe_in_dictionary);
+      __ JumpIfNotUniqueNameInstanceType(
+          FieldOperand(scratch, Map::kInstanceTypeOffset),
+          &maybe_in_dictionary);
     }
   }
 
@@ -4203,7 +3816,7 @@
   // If we are doing negative lookup then probing failure should be
   // treated as a lookup success. For positive lookup probing failure
   // should be treated as lookup failure.
-  if (mode_ == POSITIVE_LOOKUP) {
+  if (mode() == POSITIVE_LOOKUP) {
     __ movp(scratch, Immediate(0));
     __ Drop(1);
     __ ret(2 * kPointerSize);
@@ -4246,11 +3859,8 @@
   __ jmp(&skip_to_incremental_noncompacting, Label::kNear);
   __ jmp(&skip_to_incremental_compacting, Label::kFar);
 
-  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
-    __ RememberedSetHelper(object_,
-                           address_,
-                           value_,
-                           save_fp_regs_mode_,
+  if (remembered_set_action() == EMIT_REMEMBERED_SET) {
+    __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
                            MacroAssembler::kReturnAtEnd);
   } else {
     __ ret(0);
@@ -4272,7 +3882,7 @@
 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
   regs_.Save(masm);
 
-  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+  if (remembered_set_action() == EMIT_REMEMBERED_SET) {
     Label dont_need_remembered_set;
 
     __ movp(regs_.scratch0(), Operand(regs_.address(), 0));
@@ -4292,10 +3902,7 @@
         masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
     InformIncrementalMarker(masm);
     regs_.Restore(masm);
-    __ RememberedSetHelper(object_,
-                           address_,
-                           value_,
-                           save_fp_regs_mode_,
+    __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
                            MacroAssembler::kReturnAtEnd);
 
     __ bind(&dont_need_remembered_set);
@@ -4310,11 +3917,11 @@
 
 
 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
-  regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_);
+  regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
   Register address =
       arg_reg_1.is(regs_.address()) ? kScratchRegister : regs_.address();
-  ASSERT(!address.is(regs_.object()));
-  ASSERT(!address.is(arg_reg_1));
+  DCHECK(!address.is(regs_.object()));
+  DCHECK(!address.is(arg_reg_1));
   __ Move(address, regs_.address());
   __ Move(arg_reg_1, regs_.object());
   // TODO(gc) Can we just set address arg2 in the beginning?
@@ -4328,7 +3935,7 @@
   __ CallCFunction(
       ExternalReference::incremental_marking_record_write_function(isolate()),
       argument_count);
-  regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode_);
+  regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
 }
 
 
@@ -4361,10 +3968,7 @@
 
   regs_.Restore(masm);
   if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
-    __ RememberedSetHelper(object_,
-                           address_,
-                           value_,
-                           save_fp_regs_mode_,
+    __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
                            MacroAssembler::kReturnAtEnd);
   } else {
     __ ret(0);
@@ -4406,10 +4010,7 @@
 
   regs_.Restore(masm);
   if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
-    __ RememberedSetHelper(object_,
-                           address_,
-                           value_,
-                           save_fp_regs_mode_,
+    __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
                            MacroAssembler::kReturnAtEnd);
   } else {
     __ ret(0);
@@ -4511,14 +4112,27 @@
   __ movp(rbx, MemOperand(rbp, parameter_count_offset));
   masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
   __ PopReturnAddressTo(rcx);
-  int additional_offset = function_mode_ == JS_FUNCTION_STUB_MODE
-      ? kPointerSize
-      : 0;
+  int additional_offset =
+      function_mode() == JS_FUNCTION_STUB_MODE ? kPointerSize : 0;
   __ leap(rsp, MemOperand(rsp, rbx, times_pointer_size, additional_offset));
   __ jmp(rcx);  // Return to IC Miss stub, continuation still on stack.
 }
 
 
+void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
+  EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
+  VectorLoadStub stub(isolate(), state());
+  __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
+  EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
+  VectorKeyedLoadStub stub(isolate());
+  __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
   if (masm->isolate()->function_entry_hook() != NULL) {
     ProfileEntryHookStub stub(masm->isolate());
@@ -4605,12 +4219,12 @@
 
   Label normal_sequence;
   if (mode == DONT_OVERRIDE) {
-    ASSERT(FAST_SMI_ELEMENTS == 0);
-    ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
-    ASSERT(FAST_ELEMENTS == 2);
-    ASSERT(FAST_HOLEY_ELEMENTS == 3);
-    ASSERT(FAST_DOUBLE_ELEMENTS == 4);
-    ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
+    DCHECK(FAST_SMI_ELEMENTS == 0);
+    DCHECK(FAST_HOLEY_SMI_ELEMENTS == 1);
+    DCHECK(FAST_ELEMENTS == 2);
+    DCHECK(FAST_HOLEY_ELEMENTS == 3);
+    DCHECK(FAST_DOUBLE_ELEMENTS == 4);
+    DCHECK(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
 
     // is the low bit set? If so, we are holey and that is good.
     __ testb(rdx, Immediate(1));
@@ -4721,7 +4335,7 @@
 void ArrayConstructorStub::GenerateDispatchToArrayStub(
     MacroAssembler* masm,
     AllocationSiteOverrideMode mode) {
-  if (argument_count_ == ANY) {
+  if (argument_count() == ANY) {
     Label not_zero_case, not_one_case;
     __ testp(rax, rax);
     __ j(not_zero, &not_zero_case);
@@ -4734,11 +4348,11 @@
 
     __ bind(&not_one_case);
     CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
-  } else if (argument_count_ == NONE) {
+  } else if (argument_count() == NONE) {
     CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
-  } else if (argument_count_ == ONE) {
+  } else if (argument_count() == ONE) {
     CreateArrayDispatchOneArgument(masm, mode);
-  } else if (argument_count_ == MORE_THAN_ONE) {
+  } else if (argument_count() == MORE_THAN_ONE) {
     CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
   } else {
     UNREACHABLE();
@@ -4899,9 +4513,9 @@
   Register return_address = rdi;
   Register context = rsi;
 
-  int argc = ArgumentBits::decode(bit_field_);
-  bool is_store = IsStoreBits::decode(bit_field_);
-  bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_);
+  int argc = this->argc();
+  bool is_store = this->is_store();
+  bool call_data_undefined = this->call_data_undefined();
 
   typedef FunctionCallbackArguments FCA;
 
@@ -4969,7 +4583,7 @@
 
   // It's okay if api_function_address == callback_arg
   // but not arguments_arg
-  ASSERT(!api_function_address.is(arguments_arg));
+  DCHECK(!api_function_address.is(arguments_arg));
 
   // v8::InvocationCallback's argument.
   __ leap(arguments_arg, StackSpaceOperand(0));
@@ -5013,7 +4627,8 @@
   Register accessor_info_arg = rsi;
   Register name_arg = rdi;
 #endif
-  Register api_function_address = r8;
+  Register api_function_address = ApiGetterDescriptor::function_address();
+  DCHECK(api_function_address.is(r8));
   Register scratch = rax;
 
   // v8::Arguments::values_ and handler for name.
@@ -5039,7 +4654,7 @@
 
   // It's okay if api_function_address == getter_arg
   // but not accessor_info_arg or name_arg
-  ASSERT(!api_function_address.is(accessor_info_arg) &&
+  DCHECK(!api_function_address.is(accessor_info_arg) &&
          !api_function_address.is(name_arg));
 
   // The name handler is counted as an argument.
diff --git a/src/x64/code-stubs-x64.h b/src/x64/code-stubs-x64.h
index 7f9420c..d17fa1b 100644
--- a/src/x64/code-stubs-x64.h
+++ b/src/x64/code-stubs-x64.h
@@ -5,31 +5,12 @@
 #ifndef V8_X64_CODE_STUBS_X64_H_
 #define V8_X64_CODE_STUBS_X64_H_
 
-#include "src/ic-inl.h"
-
 namespace v8 {
 namespace internal {
 
 
 void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
 
-class StoreBufferOverflowStub: public PlatformCodeStub {
- public:
-  StoreBufferOverflowStub(Isolate* isolate, SaveFPRegsMode save_fp)
-      : PlatformCodeStub(isolate), save_doubles_(save_fp) { }
-
-  void Generate(MacroAssembler* masm);
-
-  static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
-  virtual bool SometimesSetsUpAFrame() { return false; }
-
- private:
-  SaveFPRegsMode save_doubles_;
-
-  Major MajorKey() { return StoreBufferOverflow; }
-  int MinorKey() { return (save_doubles_ == kSaveFPRegs) ? 1 : 0; }
-};
-
 
 class StringHelper : public AllStatic {
  public:
@@ -42,71 +23,24 @@
                                      Register count,
                                      String::Encoding encoding);
 
+  // Compares two flat one-byte strings and returns result in rax.
+  static void GenerateCompareFlatOneByteStrings(
+      MacroAssembler* masm, Register left, Register right, Register scratch1,
+      Register scratch2, Register scratch3, Register scratch4);
 
-  // Generate string hash.
-  static void GenerateHashInit(MacroAssembler* masm,
-                               Register hash,
-                               Register character,
-                               Register scratch);
-  static void GenerateHashAddCharacter(MacroAssembler* masm,
-                                       Register hash,
-                                       Register character,
-                                       Register scratch);
-  static void GenerateHashGetHash(MacroAssembler* masm,
-                                  Register hash,
-                                  Register scratch);
-
- private:
-  DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
-};
-
-
-class SubStringStub: public PlatformCodeStub {
- public:
-  explicit SubStringStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
-
- private:
-  Major MajorKey() { return SubString; }
-  int MinorKey() { return 0; }
-
-  void Generate(MacroAssembler* masm);
-};
-
-
-class StringCompareStub: public PlatformCodeStub {
- public:
-  explicit StringCompareStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
-
-  // Compares two flat ASCII strings and returns result in rax.
-  static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
-                                              Register left,
-                                              Register right,
+  // Compares two flat one-byte strings for equality and returns result in rax.
+  static void GenerateFlatOneByteStringEquals(MacroAssembler* masm,
+                                              Register left, Register right,
                                               Register scratch1,
-                                              Register scratch2,
-                                              Register scratch3,
-                                              Register scratch4);
-
-  // Compares two flat ASCII strings for equality and returns result
-  // in rax.
-  static void GenerateFlatAsciiStringEquals(MacroAssembler* masm,
-                                            Register left,
-                                            Register right,
-                                            Register scratch1,
-                                            Register scratch2);
+                                              Register scratch2);
 
  private:
-  virtual Major MajorKey() { return StringCompare; }
-  virtual int MinorKey() { return 0; }
-  virtual void Generate(MacroAssembler* masm);
-
-  static void GenerateAsciiCharsCompareLoop(
-      MacroAssembler* masm,
-      Register left,
-      Register right,
-      Register length,
-      Register scratch,
-      Label* chars_not_equal,
+  static void GenerateOneByteCharsCompareLoop(
+      MacroAssembler* masm, Register left, Register right, Register length,
+      Register scratch, Label* chars_not_equal,
       Label::Distance near_jump = Label::kFar);
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
 };
 
 
@@ -114,18 +48,13 @@
  public:
   enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
 
-  NameDictionaryLookupStub(Isolate* isolate,
-                           Register dictionary,
-                           Register result,
-                           Register index,
-                           LookupMode mode)
-      : PlatformCodeStub(isolate),
-        dictionary_(dictionary),
-        result_(result),
-        index_(index),
-        mode_(mode) { }
-
-  void Generate(MacroAssembler* masm);
+  NameDictionaryLookupStub(Isolate* isolate, Register dictionary,
+                           Register result, Register index, LookupMode mode)
+      : PlatformCodeStub(isolate) {
+    minor_key_ = DictionaryBits::encode(dictionary.code()) |
+                 ResultBits::encode(result.code()) |
+                 IndexBits::encode(index.code()) | LookupModeBits::encode(mode);
+  }
 
   static void GenerateNegativeLookup(MacroAssembler* masm,
                                      Label* miss,
@@ -156,46 +85,49 @@
       NameDictionary::kHeaderSize +
       NameDictionary::kElementsStartIndex * kPointerSize;
 
-  Major MajorKey() { return NameDictionaryLookup; }
-
-  int MinorKey() {
-    return DictionaryBits::encode(dictionary_.code()) |
-        ResultBits::encode(result_.code()) |
-        IndexBits::encode(index_.code()) |
-        LookupModeBits::encode(mode_);
+  Register dictionary() const {
+    return Register::from_code(DictionaryBits::decode(minor_key_));
   }
 
+  Register result() const {
+    return Register::from_code(ResultBits::decode(minor_key_));
+  }
+
+  Register index() const {
+    return Register::from_code(IndexBits::decode(minor_key_));
+  }
+
+  LookupMode mode() const { return LookupModeBits::decode(minor_key_); }
+
   class DictionaryBits: public BitField<int, 0, 4> {};
   class ResultBits: public BitField<int, 4, 4> {};
   class IndexBits: public BitField<int, 8, 4> {};
   class LookupModeBits: public BitField<LookupMode, 12, 1> {};
 
-  Register dictionary_;
-  Register result_;
-  Register index_;
-  LookupMode mode_;
+  DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+  DEFINE_PLATFORM_CODE_STUB(NameDictionaryLookup, PlatformCodeStub);
 };
 
 
 class RecordWriteStub: public PlatformCodeStub {
  public:
-  RecordWriteStub(Isolate* isolate,
-                  Register object,
-                  Register value,
-                  Register address,
-                  RememberedSetAction remembered_set_action,
+  RecordWriteStub(Isolate* isolate, Register object, Register value,
+                  Register address, RememberedSetAction remembered_set_action,
                   SaveFPRegsMode fp_mode)
       : PlatformCodeStub(isolate),
-        object_(object),
-        value_(value),
-        address_(address),
-        remembered_set_action_(remembered_set_action),
-        save_fp_regs_mode_(fp_mode),
         regs_(object,   // An input reg.
               address,  // An input reg.
               value) {  // One scratch reg.
+    minor_key_ = ObjectBits::encode(object.code()) |
+                 ValueBits::encode(value.code()) |
+                 AddressBits::encode(address.code()) |
+                 RememberedSetActionBits::encode(remembered_set_action) |
+                 SaveFPRegsModeBits::encode(fp_mode);
   }
 
+  RecordWriteStub(uint32_t key, Isolate* isolate)
+      : PlatformCodeStub(key, isolate), regs_(object(), address(), value()) {}
+
   enum Mode {
     STORE_BUFFER_ONLY,
     INCREMENTAL,
@@ -218,13 +150,13 @@
       return INCREMENTAL;
     }
 
-    ASSERT(first_instruction == kTwoByteNopInstruction);
+    DCHECK(first_instruction == kTwoByteNopInstruction);
 
     if (second_instruction == kFiveByteJumpInstruction) {
       return INCREMENTAL_COMPACTION;
     }
 
-    ASSERT(second_instruction == kFiveByteNopInstruction);
+    DCHECK(second_instruction == kFiveByteNopInstruction);
 
     return STORE_BUFFER_ONLY;
   }
@@ -232,25 +164,27 @@
   static void Patch(Code* stub, Mode mode) {
     switch (mode) {
       case STORE_BUFFER_ONLY:
-        ASSERT(GetMode(stub) == INCREMENTAL ||
+        DCHECK(GetMode(stub) == INCREMENTAL ||
                GetMode(stub) == INCREMENTAL_COMPACTION);
         stub->instruction_start()[0] = kTwoByteNopInstruction;
         stub->instruction_start()[2] = kFiveByteNopInstruction;
         break;
       case INCREMENTAL:
-        ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+        DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
         stub->instruction_start()[0] = kTwoByteJumpInstruction;
         break;
       case INCREMENTAL_COMPACTION:
-        ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+        DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
         stub->instruction_start()[0] = kTwoByteNopInstruction;
         stub->instruction_start()[2] = kFiveByteJumpInstruction;
         break;
     }
-    ASSERT(GetMode(stub) == mode);
-    CPU::FlushICache(stub->instruction_start(), 7);
+    DCHECK(GetMode(stub) == mode);
+    CpuFeatures::FlushICache(stub->instruction_start(), 7);
   }
 
+  DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+
  private:
   // This is a helper class for freeing up 3 scratch registers, where the third
   // is always rcx (needed for shift operations).  The input is two registers
@@ -266,7 +200,7 @@
           object_(object),
           address_(address),
           scratch0_(scratch0) {
-      ASSERT(!AreAliased(scratch0, object, address, no_reg));
+      DCHECK(!AreAliased(scratch0, object, address, no_reg));
       scratch1_ = GetRegThatIsNotRcxOr(object_, address_, scratch0_);
       if (scratch0.is(rcx)) {
         scratch0_ = GetRegThatIsNotRcxOr(object_, address_, scratch1_);
@@ -277,15 +211,15 @@
       if (address.is(rcx)) {
         address_ = GetRegThatIsNotRcxOr(object_, scratch0_, scratch1_);
       }
-      ASSERT(!AreAliased(scratch0_, object_, address_, rcx));
+      DCHECK(!AreAliased(scratch0_, object_, address_, rcx));
     }
 
     void Save(MacroAssembler* masm) {
-      ASSERT(!address_orig_.is(object_));
-      ASSERT(object_.is(object_orig_) || address_.is(address_orig_));
-      ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_));
-      ASSERT(!AreAliased(object_orig_, address_, scratch1_, scratch0_));
-      ASSERT(!AreAliased(object_, address_orig_, scratch1_, scratch0_));
+      DCHECK(!address_orig_.is(object_));
+      DCHECK(object_.is(object_orig_) || address_.is(address_orig_));
+      DCHECK(!AreAliased(object_, address_, scratch1_, scratch0_));
+      DCHECK(!AreAliased(object_orig_, address_, scratch1_, scratch0_));
+      DCHECK(!AreAliased(object_, address_orig_, scratch1_, scratch0_));
       // We don't have to save scratch0_orig_ because it was given to us as
       // a scratch register.  But if we had to switch to a different reg then
       // we should save the new scratch0_.
@@ -379,7 +313,9 @@
     kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
   };
 
-  void Generate(MacroAssembler* masm);
+  virtual Major MajorKey() const FINAL OVERRIDE { return RecordWrite; }
+
+  virtual void Generate(MacroAssembler* masm) OVERRIDE;
   void GenerateIncremental(MacroAssembler* masm, Mode mode);
   void CheckNeedsToInformIncrementalMarker(
       MacroAssembler* masm,
@@ -387,33 +323,40 @@
       Mode mode);
   void InformIncrementalMarker(MacroAssembler* masm);
 
-  Major MajorKey() { return RecordWrite; }
-
-  int MinorKey() {
-    return ObjectBits::encode(object_.code()) |
-        ValueBits::encode(value_.code()) |
-        AddressBits::encode(address_.code()) |
-        RememberedSetActionBits::encode(remembered_set_action_) |
-        SaveFPRegsModeBits::encode(save_fp_regs_mode_);
-  }
-
   void Activate(Code* code) {
     code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
   }
 
+  Register object() const {
+    return Register::from_code(ObjectBits::decode(minor_key_));
+  }
+
+  Register value() const {
+    return Register::from_code(ValueBits::decode(minor_key_));
+  }
+
+  Register address() const {
+    return Register::from_code(AddressBits::decode(minor_key_));
+  }
+
+  RememberedSetAction remembered_set_action() const {
+    return RememberedSetActionBits::decode(minor_key_);
+  }
+
+  SaveFPRegsMode save_fp_regs_mode() const {
+    return SaveFPRegsModeBits::decode(minor_key_);
+  }
+
   class ObjectBits: public BitField<int, 0, 4> {};
   class ValueBits: public BitField<int, 4, 4> {};
   class AddressBits: public BitField<int, 8, 4> {};
   class RememberedSetActionBits: public BitField<RememberedSetAction, 12, 1> {};
   class SaveFPRegsModeBits: public BitField<SaveFPRegsMode, 13, 1> {};
 
-  Register object_;
-  Register value_;
-  Register address_;
-  RememberedSetAction remembered_set_action_;
-  SaveFPRegsMode save_fp_regs_mode_;
   Label slow_;
   RegisterAllocation regs_;
+
+  DISALLOW_COPY_AND_ASSIGN(RecordWriteStub);
 };
 
 
diff --git a/src/x64/codegen-x64.cc b/src/x64/codegen-x64.cc
index 0f939d9..44e1618 100644
--- a/src/x64/codegen-x64.cc
+++ b/src/x64/codegen-x64.cc
@@ -17,14 +17,14 @@
 
 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
   masm->EnterFrame(StackFrame::INTERNAL);
-  ASSERT(!masm->has_frame());
+  DCHECK(!masm->has_frame());
   masm->set_has_frame(true);
 }
 
 
 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
   masm->LeaveFrame(StackFrame::INTERNAL);
-  ASSERT(masm->has_frame());
+  DCHECK(masm->has_frame());
   masm->set_has_frame(false);
 }
 
@@ -35,7 +35,8 @@
 UnaryMathFunction CreateExpFunction() {
   if (!FLAG_fast_math) return &std::exp;
   size_t actual_size;
-  byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
+  byte* buffer =
+      static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
   if (buffer == NULL) return &std::exp;
   ExternalReference::InitializeMathExpData();
 
@@ -55,10 +56,10 @@
 
   CodeDesc desc;
   masm.GetCode(&desc);
-  ASSERT(!RelocInfo::RequiresRelocation(desc));
+  DCHECK(!RelocInfo::RequiresRelocation(desc));
 
-  CPU::FlushICache(buffer, actual_size);
-  OS::ProtectCode(buffer, actual_size);
+  CpuFeatures::FlushICache(buffer, actual_size);
+  base::OS::ProtectCode(buffer, actual_size);
   return FUNCTION_CAST<UnaryMathFunction>(buffer);
 }
 
@@ -66,9 +67,8 @@
 UnaryMathFunction CreateSqrtFunction() {
   size_t actual_size;
   // Allocate buffer in executable space.
-  byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
-                                                 &actual_size,
-                                                 true));
+  byte* buffer =
+      static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
   if (buffer == NULL) return &std::sqrt;
 
   MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
@@ -79,10 +79,10 @@
 
   CodeDesc desc;
   masm.GetCode(&desc);
-  ASSERT(!RelocInfo::RequiresRelocation(desc));
+  DCHECK(!RelocInfo::RequiresRelocation(desc));
 
-  CPU::FlushICache(buffer, actual_size);
-  OS::ProtectCode(buffer, actual_size);
+  CpuFeatures::FlushICache(buffer, actual_size);
+  base::OS::ProtectCode(buffer, actual_size);
   return FUNCTION_CAST<UnaryMathFunction>(buffer);
 }
 
@@ -92,9 +92,8 @@
 // Define custom fmod implementation.
 ModuloFunction CreateModuloFunction() {
   size_t actual_size;
-  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
-                                                 &actual_size,
-                                                 true));
+  byte* buffer = static_cast<byte*>(
+      base::OS::Allocate(Assembler::kMinimalBufferSize, &actual_size, true));
   CHECK(buffer);
   Assembler masm(NULL, buffer, static_cast<int>(actual_size));
   // Generated code is put into a fixed, unmovable, buffer, and not into
@@ -170,7 +169,7 @@
 
   CodeDesc desc;
   masm.GetCode(&desc);
-  OS::ProtectCode(buffer, actual_size);
+  base::OS::ProtectCode(buffer, actual_size);
   // Call the function from C++ through this pointer.
   return FUNCTION_CAST<ModuloFunction>(buffer);
 }
@@ -185,26 +184,29 @@
 #define __ ACCESS_MASM(masm)
 
 void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
-    MacroAssembler* masm, AllocationSiteMode mode,
+    MacroAssembler* masm,
+    Register receiver,
+    Register key,
+    Register value,
+    Register target_map,
+    AllocationSiteMode mode,
     Label* allocation_memento_found) {
-  // ----------- S t a t e -------------
-  //  -- rax    : value
-  //  -- rbx    : target map
-  //  -- rcx    : key
-  //  -- rdx    : receiver
-  //  -- rsp[0] : return address
-  // -----------------------------------
+  // Return address is on the stack.
+  Register scratch = rdi;
+  DCHECK(!AreAliased(receiver, key, value, target_map, scratch));
+
   if (mode == TRACK_ALLOCATION_SITE) {
-    ASSERT(allocation_memento_found != NULL);
-    __ JumpIfJSArrayHasAllocationMemento(rdx, rdi, allocation_memento_found);
+    DCHECK(allocation_memento_found != NULL);
+    __ JumpIfJSArrayHasAllocationMemento(
+        receiver, scratch, allocation_memento_found);
   }
 
   // Set transitioned map.
-  __ movp(FieldOperand(rdx, HeapObject::kMapOffset), rbx);
-  __ RecordWriteField(rdx,
+  __ movp(FieldOperand(receiver, HeapObject::kMapOffset), target_map);
+  __ RecordWriteField(receiver,
                       HeapObject::kMapOffset,
-                      rbx,
-                      rdi,
+                      target_map,
+                      scratch,
                       kDontSaveFPRegs,
                       EMIT_REMEMBERED_SET,
                       OMIT_SMI_CHECK);
@@ -212,14 +214,19 @@
 
 
 void ElementsTransitionGenerator::GenerateSmiToDouble(
-    MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
-  // ----------- S t a t e -------------
-  //  -- rax    : value
-  //  -- rbx    : target map
-  //  -- rcx    : key
-  //  -- rdx    : receiver
-  //  -- rsp[0] : return address
-  // -----------------------------------
+    MacroAssembler* masm,
+    Register receiver,
+    Register key,
+    Register value,
+    Register target_map,
+    AllocationSiteMode mode,
+    Label* fail) {
+  // Return address is on the stack.
+  DCHECK(receiver.is(rdx));
+  DCHECK(key.is(rcx));
+  DCHECK(value.is(rax));
+  DCHECK(target_map.is(rbx));
+
   // The fail label is not actually used since we do not allocate.
   Label allocated, new_backing_store, only_change_map, done;
 
@@ -243,7 +250,7 @@
   } else {
     // For x32 port we have to allocate a new backing store as SMI size is
     // not equal with double size.
-    ASSERT(kDoubleSize == 2 * kPointerSize);
+    DCHECK(kDoubleSize == 2 * kPointerSize);
     __ jmp(&new_backing_store);
   }
 
@@ -279,7 +286,7 @@
   STATIC_ASSERT(FixedDoubleArray::kHeaderSize == FixedArray::kHeaderSize);
 
   Label loop, entry, convert_hole;
-  __ movq(r15, BitCast<int64_t, uint64_t>(kHoleNanInt64));
+  __ movq(r15, bit_cast<int64_t, uint64_t>(kHoleNanInt64));
   // r15: the-hole NaN
   __ jmp(&entry);
 
@@ -346,14 +353,19 @@
 
 
 void ElementsTransitionGenerator::GenerateDoubleToObject(
-    MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
-  // ----------- S t a t e -------------
-  //  -- rax    : value
-  //  -- rbx    : target map
-  //  -- rcx    : key
-  //  -- rdx    : receiver
-  //  -- rsp[0] : return address
-  // -----------------------------------
+    MacroAssembler* masm,
+    Register receiver,
+    Register key,
+    Register value,
+    Register target_map,
+    AllocationSiteMode mode,
+    Label* fail) {
+  // Return address is on the stack.
+  DCHECK(receiver.is(rdx));
+  DCHECK(key.is(rcx));
+  DCHECK(value.is(rax));
+  DCHECK(target_map.is(rbx));
+
   Label loop, entry, convert_hole, gc_required, only_change_map;
 
   if (mode == TRACK_ALLOCATION_SITE) {
@@ -381,7 +393,7 @@
   __ movp(FieldOperand(r11, FixedArray::kLengthOffset), r14);
 
   // Prepare for conversion loop.
-  __ movq(rsi, BitCast<int64_t, uint64_t>(kHoleNanInt64));
+  __ movq(rsi, bit_cast<int64_t, uint64_t>(kHoleNanInt64));
   __ LoadRoot(rdi, Heap::kTheHoleValueRootIndex);
   // rsi: the-hole NaN
   // rdi: pointer to the-hole
@@ -510,7 +522,7 @@
   __ j(zero, &seq_string, Label::kNear);
 
   // Handle external strings.
-  Label ascii_external, done;
+  Label one_byte_external, done;
   if (FLAG_debug_code) {
     // Assert that we do not have a cons or slice (indirect strings) here.
     // Sequential strings have already been ruled out.
@@ -525,22 +537,22 @@
   STATIC_ASSERT(kTwoByteStringTag == 0);
   __ testb(result, Immediate(kStringEncodingMask));
   __ movp(result, FieldOperand(string, ExternalString::kResourceDataOffset));
-  __ j(not_equal, &ascii_external, Label::kNear);
+  __ j(not_equal, &one_byte_external, Label::kNear);
   // Two-byte string.
   __ movzxwl(result, Operand(result, index, times_2, 0));
   __ jmp(&done, Label::kNear);
-  __ bind(&ascii_external);
-  // Ascii string.
+  __ bind(&one_byte_external);
+  // One-byte string.
   __ movzxbl(result, Operand(result, index, times_1, 0));
   __ jmp(&done, Label::kNear);
 
-  // Dispatch on the encoding: ASCII or two-byte.
-  Label ascii;
+  // Dispatch on the encoding: one-byte or two-byte.
+  Label one_byte;
   __ bind(&seq_string);
   STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
   STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
   __ testb(result, Immediate(kStringEncodingMask));
-  __ j(not_zero, &ascii, Label::kNear);
+  __ j(not_zero, &one_byte, Label::kNear);
 
   // Two-byte string.
   // Load the two-byte character code into the result register.
@@ -551,9 +563,9 @@
                                   SeqTwoByteString::kHeaderSize));
   __ jmp(&done, Label::kNear);
 
-  // ASCII string.
+  // One-byte string.
   // Load the byte into the result register.
-  __ bind(&ascii);
+  __ bind(&one_byte);
   __ movzxbl(result, FieldOperand(string,
                                   index,
                                   times_1,
@@ -568,11 +580,12 @@
                                    XMMRegister double_scratch,
                                    Register temp1,
                                    Register temp2) {
-  ASSERT(!input.is(result));
-  ASSERT(!input.is(double_scratch));
-  ASSERT(!result.is(double_scratch));
-  ASSERT(!temp1.is(temp2));
-  ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
+  DCHECK(!input.is(result));
+  DCHECK(!input.is(double_scratch));
+  DCHECK(!result.is(double_scratch));
+  DCHECK(!temp1.is(temp2));
+  DCHECK(ExternalReference::math_exp_constants(0).address() != NULL);
+  DCHECK(!masm->serializer_enabled());  // External references not serializable.
 
   Label done;
 
@@ -617,7 +630,7 @@
 
 
 CodeAgingHelper::CodeAgingHelper() {
-  ASSERT(young_sequence_.length() == kNoCodeAgeSequenceLength);
+  DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
   // The sequence of instructions that is patched out for aging code is the
   // following boilerplate stack-building prologue that is found both in
   // FUNCTION and OPTIMIZED_FUNCTION code:
@@ -638,7 +651,7 @@
 
 bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
   bool result = isolate->code_aging_helper()->IsYoung(sequence);
-  ASSERT(result || isolate->code_aging_helper()->IsOld(sequence));
+  DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
   return result;
 }
 
@@ -665,7 +678,7 @@
   uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
   if (age == kNoAgeCodeAge) {
     isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
-    CPU::FlushICache(sequence, young_length);
+    CpuFeatures::FlushICache(sequence, young_length);
   } else {
     Code* stub = GetCodeAgeStub(isolate, age, parity);
     CodePatcher patcher(sequence, young_length);
@@ -677,7 +690,7 @@
 
 
 Operand StackArgumentsAccessor::GetArgumentOperand(int index) {
-  ASSERT(index >= 0);
+  DCHECK(index >= 0);
   int receiver = (receiver_mode_ == ARGUMENTS_CONTAIN_RECEIVER) ? 1 : 0;
   int displacement_to_last_argument = base_reg_.is(rsp) ?
       kPCOnStackSize : kFPOnStackSize + kPCOnStackSize;
@@ -685,7 +698,7 @@
   if (argument_count_reg_.is(no_reg)) {
     // argument[0] is at base_reg_ + displacement_to_last_argument +
     // (argument_count_immediate_ + receiver - 1) * kPointerSize.
-    ASSERT(argument_count_immediate_ + receiver > 0);
+    DCHECK(argument_count_immediate_ + receiver > 0);
     return Operand(base_reg_, displacement_to_last_argument +
         (argument_count_immediate_ + receiver - 1 - index) * kPointerSize);
   } else {
diff --git a/src/x64/codegen-x64.h b/src/x64/codegen-x64.h
index 5faa987..0a551ee 100644
--- a/src/x64/codegen-x64.h
+++ b/src/x64/codegen-x64.h
@@ -6,7 +6,7 @@
 #define V8_X64_CODEGEN_X64_H_
 
 #include "src/ast.h"
-#include "src/ic-inl.h"
+#include "src/macro-assembler.h"
 
 namespace v8 {
 namespace internal {
@@ -96,7 +96,7 @@
 
   Operand GetArgumentOperand(int index);
   Operand GetReceiverOperand() {
-    ASSERT(receiver_mode_ == ARGUMENTS_CONTAIN_RECEIVER);
+    DCHECK(receiver_mode_ == ARGUMENTS_CONTAIN_RECEIVER);
     return GetArgumentOperand(0);
   }
 
diff --git a/src/x64/cpu-x64.cc b/src/x64/cpu-x64.cc
index ca2b89b..59a187f 100644
--- a/src/x64/cpu-x64.cc
+++ b/src/x64/cpu-x64.cc
@@ -12,13 +12,13 @@
 
 #if V8_TARGET_ARCH_X64
 
-#include "src/cpu.h"
+#include "src/assembler.h"
 #include "src/macro-assembler.h"
 
 namespace v8 {
 namespace internal {
 
-void CPU::FlushICache(void* start, size_t size) {
+void CpuFeatures::FlushICache(void* start, size_t size) {
   // No need to flush the instruction cache on Intel. On Intel instruction
   // cache flushing is only necessary when multiple cores running the same
   // code simultaneously. V8 (and JavaScript) is single threaded and when code
diff --git a/src/x64/debug-x64.cc b/src/x64/debug-x64.cc
index 4703e42..c8b7c22 100644
--- a/src/x64/debug-x64.cc
+++ b/src/x64/debug-x64.cc
@@ -23,7 +23,7 @@
 // CodeGenerator::VisitReturnStatement and VirtualFrame::Exit in codegen-x64.cc
 // for the precise return instructions sequence.
 void BreakLocationIterator::SetDebugBreakAtReturn()  {
-  ASSERT(Assembler::kJSReturnSequenceLength >= Assembler::kCallSequenceLength);
+  DCHECK(Assembler::kJSReturnSequenceLength >= Assembler::kCallSequenceLength);
   rinfo()->PatchCodeWithCall(
       debug_info_->GetIsolate()->builtins()->Return_DebugBreak()->entry(),
       Assembler::kJSReturnSequenceLength - Assembler::kCallSequenceLength);
@@ -40,20 +40,20 @@
 // A debug break in the frame exit code is identified by the JS frame exit code
 // having been patched with a call instruction.
 bool Debug::IsDebugBreakAtReturn(v8::internal::RelocInfo* rinfo) {
-  ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
+  DCHECK(RelocInfo::IsJSReturn(rinfo->rmode()));
   return rinfo->IsPatchedReturnSequence();
 }
 
 
 bool BreakLocationIterator::IsDebugBreakAtSlot() {
-  ASSERT(IsDebugBreakSlot());
+  DCHECK(IsDebugBreakSlot());
   // Check whether the debug break slot instructions have been patched.
   return rinfo()->IsPatchedDebugBreakSlotSequence();
 }
 
 
 void BreakLocationIterator::SetDebugBreakAtSlot() {
-  ASSERT(IsDebugBreakSlot());
+  DCHECK(IsDebugBreakSlot());
   rinfo()->PatchCodeWithCall(
       debug_info_->GetIsolate()->builtins()->Slot_DebugBreak()->entry(),
       Assembler::kDebugBreakSlotLength - Assembler::kCallSequenceLength);
@@ -61,7 +61,7 @@
 
 
 void BreakLocationIterator::ClearDebugBreakAtSlot() {
-  ASSERT(IsDebugBreakSlot());
+  DCHECK(IsDebugBreakSlot());
   rinfo()->PatchCode(original_rinfo()->pc(), Assembler::kDebugBreakSlotLength);
 }
 
@@ -86,13 +86,13 @@
     // Store the registers containing live values on the expression stack to
     // make sure that these are correctly updated during GC. Non object values
     // are stored as as two smis causing it to be untouched by GC.
-    ASSERT((object_regs & ~kJSCallerSaved) == 0);
-    ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
-    ASSERT((object_regs & non_object_regs) == 0);
+    DCHECK((object_regs & ~kJSCallerSaved) == 0);
+    DCHECK((non_object_regs & ~kJSCallerSaved) == 0);
+    DCHECK((object_regs & non_object_regs) == 0);
     for (int i = 0; i < kNumJSCallerSaved; i++) {
       int r = JSCallerSavedCode(i);
       Register reg = { r };
-      ASSERT(!reg.is(kScratchRegister));
+      DCHECK(!reg.is(kScratchRegister));
       if ((object_regs & (1 << r)) != 0) {
         __ Push(reg);
       }
@@ -162,45 +162,35 @@
 
 void DebugCodegen::GenerateLoadICDebugBreak(MacroAssembler* masm) {
   // Register state for IC load call (from ic-x64.cc).
-  // ----------- S t a t e -------------
-  //  -- rax    : receiver
-  //  -- rcx    : name
-  // -----------------------------------
-  Generate_DebugBreakCallHelper(masm, rax.bit() | rcx.bit(), 0, false);
+  Register receiver = LoadDescriptor::ReceiverRegister();
+  Register name = LoadDescriptor::NameRegister();
+  Generate_DebugBreakCallHelper(masm, receiver.bit() | name.bit(), 0, false);
 }
 
 
 void DebugCodegen::GenerateStoreICDebugBreak(MacroAssembler* masm) {
   // Register state for IC store call (from ic-x64.cc).
-  // ----------- S t a t e -------------
-  //  -- rax    : value
-  //  -- rcx    : name
-  //  -- rdx    : receiver
-  // -----------------------------------
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  Register name = StoreDescriptor::NameRegister();
+  Register value = StoreDescriptor::ValueRegister();
   Generate_DebugBreakCallHelper(
-      masm, rax.bit() | rcx.bit() | rdx.bit(), 0, false);
+      masm, receiver.bit() | name.bit() | value.bit(), 0, false);
 }
 
 
 void DebugCodegen::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
   // Register state for keyed IC load call (from ic-x64.cc).
-  // ----------- S t a t e -------------
-  //  -- rax     : key
-  //  -- rdx     : receiver
-  // -----------------------------------
-  Generate_DebugBreakCallHelper(masm, rax.bit() | rdx.bit(), 0, false);
+  GenerateLoadICDebugBreak(masm);
 }
 
 
 void DebugCodegen::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
-  // Register state for keyed IC load call (from ic-x64.cc).
-  // ----------- S t a t e -------------
-  //  -- rax    : value
-  //  -- rcx    : key
-  //  -- rdx    : receiver
-  // -----------------------------------
+  // Register state for keyed IC store call (from ic-x64.cc).
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  Register name = StoreDescriptor::NameRegister();
+  Register value = StoreDescriptor::ValueRegister();
   Generate_DebugBreakCallHelper(
-      masm, rax.bit() | rcx.bit() | rdx.bit(), 0, false);
+      masm, receiver.bit() | name.bit() | value.bit(), 0, false);
 }
 
 
@@ -265,7 +255,7 @@
   __ bind(&check_codesize);
   __ RecordDebugBreakSlot();
   __ Nop(Assembler::kDebugBreakSlotLength);
-  ASSERT_EQ(Assembler::kDebugBreakSlotLength,
+  DCHECK_EQ(Assembler::kDebugBreakSlotLength,
             masm->SizeOfCodeGeneratedSince(&check_codesize));
 }
 
diff --git a/src/x64/deoptimizer-x64.cc b/src/x64/deoptimizer-x64.cc
index ae3a824..16b0cdc 100644
--- a/src/x64/deoptimizer-x64.cc
+++ b/src/x64/deoptimizer-x64.cc
@@ -60,9 +60,6 @@
 #endif
   DeoptimizationInputData* deopt_data =
       DeoptimizationInputData::cast(code->deoptimization_data());
-  SharedFunctionInfo* shared =
-      SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo());
-  shared->EvictFromOptimizedCodeMap(code, "deoptimized code");
   deopt_data->SetSharedFunctionInfo(Smi::FromInt(0));
   // For each LLazyBailout instruction insert a call to the corresponding
   // deoptimization entry.
@@ -75,9 +72,9 @@
     CodePatcher patcher(call_address, Assembler::kCallSequenceLength);
     patcher.masm()->Call(GetDeoptimizationEntry(isolate, i, LAZY),
                          Assembler::RelocInfoNone());
-    ASSERT(prev_call_address == NULL ||
+    DCHECK(prev_call_address == NULL ||
            call_address >= prev_call_address + patch_size());
-    ASSERT(call_address + patch_size() <= code->instruction_end());
+    DCHECK(call_address + patch_size() <= code->instruction_end());
 #ifdef DEBUG
     prev_call_address = call_address;
 #endif
@@ -106,9 +103,9 @@
 
 
 void Deoptimizer::SetPlatformCompiledStubRegisters(
-    FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) {
+    FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
   intptr_t handler =
-      reinterpret_cast<intptr_t>(descriptor->deoptimization_handler_);
+      reinterpret_cast<intptr_t>(descriptor->deoptimization_handler());
   int params = descriptor->GetHandlerParameterCount();
   output_frame->SetRegister(rax.code(), params);
   output_frame->SetRegister(rbx.code(), handler);
@@ -294,7 +291,7 @@
     // Do not restore rsp, simply pop the value into the next register
     // and overwrite this afterwards.
     if (r.is(rsp)) {
-      ASSERT(i > 0);
+      DCHECK(i > 0);
       r = Register::from_code(i - 1);
     }
     __ popq(r);
@@ -317,7 +314,7 @@
     USE(start);
     __ pushq_imm32(i);
     __ jmp(&done);
-    ASSERT(masm()->pc_offset() - start == table_entry_size_);
+    DCHECK(masm()->pc_offset() - start == table_entry_size_);
   }
   __ bind(&done);
 }
diff --git a/src/x64/disasm-x64.cc b/src/x64/disasm-x64.cc
index f4c5de8..2b8fc2d 100644
--- a/src/x64/disasm-x64.cc
+++ b/src/x64/disasm-x64.cc
@@ -3,8 +3,8 @@
 // found in the LICENSE file.
 
 #include <assert.h>
-#include <stdio.h>
 #include <stdarg.h>
+#include <stdio.h>
 
 #include "src/v8.h"
 
@@ -216,7 +216,7 @@
     OperandType op_order = bm[i].op_order_;
     id->op_order_ =
         static_cast<OperandType>(op_order & ~BYTE_SIZE_OPERAND_FLAG);
-    ASSERT_EQ(NO_INSTR, id->type);  // Information not already entered
+    DCHECK_EQ(NO_INSTR, id->type);  // Information not already entered
     id->type = type;
     id->byte_size_operation = ((op_order & BYTE_SIZE_OPERAND_FLAG) != 0);
   }
@@ -230,7 +230,7 @@
                                      const char* mnem) {
   for (byte b = start; b <= end; b++) {
     InstructionDesc* id = &instructions_[b];
-    ASSERT_EQ(NO_INSTR, id->type);  // Information not already entered
+    DCHECK_EQ(NO_INSTR, id->type);  // Information not already entered
     id->mnem = mnem;
     id->type = type;
     id->byte_size_operation = byte_size;
@@ -241,7 +241,7 @@
 void InstructionTable::AddJumpConditionalShort() {
   for (byte b = 0x70; b <= 0x7F; b++) {
     InstructionDesc* id = &instructions_[b];
-    ASSERT_EQ(NO_INSTR, id->type);  // Information not already entered
+    DCHECK_EQ(NO_INSTR, id->type);  // Information not already entered
     id->mnem = NULL;  // Computed depending on condition code.
     id->type = JUMP_CONDITIONAL_SHORT_INSTR;
   }
@@ -328,7 +328,7 @@
   const InstructionTable* const instruction_table_;
 
   void setRex(byte rex) {
-    ASSERT_EQ(0x40, rex & 0xF0);
+    DCHECK_EQ(0x40, rex & 0xF0);
     rex_ = rex;
   }
 
@@ -661,7 +661,7 @@
 
 // Returns number of bytes used, including *data.
 int DisassemblerX64::F6F7Instruction(byte* data) {
-  ASSERT(*data == 0xF7 || *data == 0xF6);
+  DCHECK(*data == 0xF7 || *data == 0xF6);
   byte modrm = *(data + 1);
   int mod, regop, rm;
   get_modrm(modrm, &mod, &regop, &rm);
@@ -680,6 +680,9 @@
       case 5:
         mnem = "imul";
         break;
+      case 6:
+        mnem = "div";
+        break;
       case 7:
         mnem = "idiv";
         break;
@@ -747,7 +750,7 @@
       UnimplementedInstruction();
       return num_bytes;
   }
-  ASSERT_NE(NULL, mnem);
+  DCHECK_NE(NULL, mnem);
   if (op == 0xD0) {
     imm8 = 1;
   } else if (op == 0xC0) {
@@ -770,7 +773,7 @@
 
 // Returns number of bytes used, including *data.
 int DisassemblerX64::JumpShort(byte* data) {
-  ASSERT_EQ(0xEB, *data);
+  DCHECK_EQ(0xEB, *data);
   byte b = *(data + 1);
   byte* dest = data + static_cast<int8_t>(b) + 2;
   AppendToBuffer("jmp %s", NameOfAddress(dest));
@@ -780,7 +783,7 @@
 
 // Returns number of bytes used, including *data.
 int DisassemblerX64::JumpConditional(byte* data) {
-  ASSERT_EQ(0x0F, *data);
+  DCHECK_EQ(0x0F, *data);
   byte cond = *(data + 1) & 0x0F;
   byte* dest = data + *reinterpret_cast<int32_t*>(data + 2) + 6;
   const char* mnem = conditional_code_suffix[cond];
@@ -802,7 +805,7 @@
 
 // Returns number of bytes used, including *data.
 int DisassemblerX64::SetCC(byte* data) {
-  ASSERT_EQ(0x0F, *data);
+  DCHECK_EQ(0x0F, *data);
   byte cond = *(data + 1) & 0x0F;
   const char* mnem = conditional_code_suffix[cond];
   AppendToBuffer("set%s%c ", mnem, operand_size_code());
@@ -814,7 +817,7 @@
 // Returns number of bytes used, including *data.
 int DisassemblerX64::FPUInstruction(byte* data) {
   byte escape_opcode = *data;
-  ASSERT_EQ(0xD8, escape_opcode & 0xF8);
+  DCHECK_EQ(0xD8, escape_opcode & 0xF8);
   byte modrm_byte = *(data+1);
 
   if (modrm_byte >= 0xC0) {
@@ -1068,7 +1071,7 @@
         current += PrintRightXMMOperand(current);
       } else if (opcode == 0x73) {
         current += 1;
-        ASSERT(regop == 6);
+        DCHECK(regop == 6);
         AppendToBuffer("psllq,%s,%d", NameOfXMMRegister(rm), *current & 0x7f);
         current += 1;
       } else {
@@ -1788,7 +1791,7 @@
   }
 
   int instr_len = static_cast<int>(data - instr);
-  ASSERT(instr_len > 0);  // Ensure progress.
+  DCHECK(instr_len > 0);  // Ensure progress.
 
   int outp = 0;
   // Instruction bytes.
diff --git a/src/x64/frames-x64.cc b/src/x64/frames-x64.cc
index 5513308..114945b 100644
--- a/src/x64/frames-x64.cc
+++ b/src/x64/frames-x64.cc
@@ -7,9 +7,9 @@
 #if V8_TARGET_ARCH_X64
 
 #include "src/assembler.h"
-#include "src/x64/assembler-x64.h"
-#include "src/x64/assembler-x64-inl.h"
 #include "src/frames.h"
+#include "src/x64/assembler-x64-inl.h"
+#include "src/x64/assembler-x64.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/x64/full-codegen-x64.cc b/src/x64/full-codegen-x64.cc
index fa1eee6..0f787b2 100644
--- a/src/x64/full-codegen-x64.cc
+++ b/src/x64/full-codegen-x64.cc
@@ -6,15 +6,16 @@
 
 #if V8_TARGET_ARCH_X64
 
+#include "src/code-factory.h"
 #include "src/code-stubs.h"
 #include "src/codegen.h"
 #include "src/compiler.h"
 #include "src/debug.h"
 #include "src/full-codegen.h"
+#include "src/ic/ic.h"
 #include "src/isolate-inl.h"
 #include "src/parser.h"
 #include "src/scopes.h"
-#include "src/stub-cache.h"
 
 namespace v8 {
 namespace internal {
@@ -31,7 +32,7 @@
   }
 
   ~JumpPatchSite() {
-    ASSERT(patch_site_.is_bound() == info_emitted_);
+    DCHECK(patch_site_.is_bound() == info_emitted_);
   }
 
   void EmitJumpIfNotSmi(Register reg,
@@ -51,7 +52,7 @@
   void EmitPatchInfo() {
     if (patch_site_.is_bound()) {
       int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site_);
-      ASSERT(is_uint8(delta_to_patch_site));
+      DCHECK(is_uint8(delta_to_patch_site));
       __ testl(rax, Immediate(delta_to_patch_site));
 #ifdef DEBUG
       info_emitted_ = true;
@@ -64,8 +65,8 @@
  private:
   // jc will be patched with jz, jnc will become jnz.
   void EmitJump(Condition cc, Label* target, Label::Distance near_jump) {
-    ASSERT(!patch_site_.is_bound() && !info_emitted_);
-    ASSERT(cc == carry || cc == not_carry);
+    DCHECK(!patch_site_.is_bound() && !info_emitted_);
+    DCHECK(cc == carry || cc == not_carry);
     __ bind(&patch_site_);
     __ j(cc, target, near_jump);
   }
@@ -123,7 +124,7 @@
     __ j(not_equal, &ok, Label::kNear);
 
     __ movp(rcx, GlobalObjectOperand());
-    __ movp(rcx, FieldOperand(rcx, GlobalObject::kGlobalReceiverOffset));
+    __ movp(rcx, FieldOperand(rcx, GlobalObject::kGlobalProxyOffset));
 
     __ movp(args.GetReceiverOperand(), rcx);
 
@@ -142,7 +143,7 @@
   { Comment cmnt(masm_, "[ Allocate locals");
     int locals_count = info->scope()->num_stack_slots();
     // Generators allocate locals, if any, in context slots.
-    ASSERT(!info->function()->is_generator() || locals_count == 0);
+    DCHECK(!info->function()->is_generator() || locals_count == 0);
     if (locals_count == 1) {
       __ PushRoot(Heap::kUndefinedValueRootIndex);
     } else if (locals_count > 1) {
@@ -189,7 +190,7 @@
     if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
       __ Push(rdi);
       __ Push(info->scope()->GetScopeInfo());
-      __ CallRuntime(Runtime::kHiddenNewGlobalContext, 2);
+      __ CallRuntime(Runtime::kNewGlobalContext, 2);
     } else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
       FastNewContextStub stub(isolate(), heap_slots);
       __ CallStub(&stub);
@@ -197,7 +198,7 @@
       need_write_barrier = false;
     } else {
       __ Push(rdi);
-      __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
+      __ CallRuntime(Runtime::kNewFunctionContext, 1);
     }
     function_in_register = false;
     // Context is returned in rax.  It replaces the context passed to us.
@@ -284,9 +285,9 @@
       // constant.
       if (scope()->is_function_scope() && scope()->function() != NULL) {
         VariableDeclaration* function = scope()->function();
-        ASSERT(function->proxy()->var()->mode() == CONST ||
+        DCHECK(function->proxy()->var()->mode() == CONST ||
                function->proxy()->var()->mode() == CONST_LEGACY);
-        ASSERT(function->proxy()->var()->location() != Variable::UNALLOCATED);
+        DCHECK(function->proxy()->var()->location() != Variable::UNALLOCATED);
         VisitVariableDeclaration(function);
       }
       VisitDeclarations(scope()->declarations());
@@ -302,9 +303,9 @@
     }
 
     { Comment cmnt(masm_, "[ Body");
-      ASSERT(loop_depth() == 0);
+      DCHECK(loop_depth() == 0);
       VisitStatements(function()->body());
-      ASSERT(loop_depth() == 0);
+      DCHECK(loop_depth() == 0);
     }
   }
 
@@ -345,7 +346,7 @@
   Comment cmnt(masm_, "[ Back edge bookkeeping");
   Label ok;
 
-  ASSERT(back_edge_target->is_bound());
+  DCHECK(back_edge_target->is_bound());
   int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
   int weight = Min(kMaxBackEdgeWeight,
                    Max(1, distance / kCodeSizeMultiplier));
@@ -428,7 +429,7 @@
     }
     // Check that the size of the code used for returning is large enough
     // for the debugger's requirements.
-    ASSERT(Assembler::kJSReturnSequenceLength <=
+    DCHECK(Assembler::kJSReturnSequenceLength <=
            masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
 
     info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
@@ -437,18 +438,18 @@
 
 
 void FullCodeGenerator::EffectContext::Plug(Variable* var) const {
-  ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+  DCHECK(var->IsStackAllocated() || var->IsContextSlot());
 }
 
 
 void FullCodeGenerator::AccumulatorValueContext::Plug(Variable* var) const {
-  ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+  DCHECK(var->IsStackAllocated() || var->IsContextSlot());
   codegen()->GetVar(result_register(), var);
 }
 
 
 void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
-  ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+  DCHECK(var->IsStackAllocated() || var->IsContextSlot());
   MemOperand operand = codegen()->VarOperand(var, result_register());
   __ Push(operand);
 }
@@ -523,7 +524,7 @@
                                           true,
                                           true_label_,
                                           false_label_);
-  ASSERT(!lit->IsUndetectableObject());  // There are no undetectable literals.
+  DCHECK(!lit->IsUndetectableObject());  // There are no undetectable literals.
   if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
     if (false_label_ != fall_through_) __ jmp(false_label_);
   } else if (lit->IsTrue() || lit->IsJSObject()) {
@@ -550,7 +551,7 @@
 
 void FullCodeGenerator::EffectContext::DropAndPlug(int count,
                                                    Register reg) const {
-  ASSERT(count > 0);
+  DCHECK(count > 0);
   __ Drop(count);
 }
 
@@ -558,7 +559,7 @@
 void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
     int count,
     Register reg) const {
-  ASSERT(count > 0);
+  DCHECK(count > 0);
   __ Drop(count);
   __ Move(result_register(), reg);
 }
@@ -566,7 +567,7 @@
 
 void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
                                                        Register reg) const {
-  ASSERT(count > 0);
+  DCHECK(count > 0);
   if (count > 1) __ Drop(count - 1);
   __ movp(Operand(rsp, 0), reg);
 }
@@ -574,7 +575,7 @@
 
 void FullCodeGenerator::TestContext::DropAndPlug(int count,
                                                  Register reg) const {
-  ASSERT(count > 0);
+  DCHECK(count > 0);
   // For simplicity we always test the accumulator register.
   __ Drop(count);
   __ Move(result_register(), reg);
@@ -585,7 +586,7 @@
 
 void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
                                             Label* materialize_false) const {
-  ASSERT(materialize_true == materialize_false);
+  DCHECK(materialize_true == materialize_false);
   __ bind(materialize_true);
 }
 
@@ -618,8 +619,8 @@
 
 void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
                                           Label* materialize_false) const {
-  ASSERT(materialize_true == true_label_);
-  ASSERT(materialize_false == false_label_);
+  DCHECK(materialize_true == true_label_);
+  DCHECK(materialize_false == false_label_);
 }
 
 
@@ -682,7 +683,7 @@
 
 
 MemOperand FullCodeGenerator::StackOperand(Variable* var) {
-  ASSERT(var->IsStackAllocated());
+  DCHECK(var->IsStackAllocated());
   // Offset is negative because higher indexes are at lower addresses.
   int offset = -var->index() * kPointerSize;
   // Adjust by a (parameter or local) base offset.
@@ -697,7 +698,7 @@
 
 
 MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) {
-  ASSERT(var->IsContextSlot() || var->IsStackAllocated());
+  DCHECK(var->IsContextSlot() || var->IsStackAllocated());
   if (var->IsContextSlot()) {
     int context_chain_length = scope()->ContextChainLength(var->scope());
     __ LoadContext(scratch, context_chain_length);
@@ -709,7 +710,7 @@
 
 
 void FullCodeGenerator::GetVar(Register dest, Variable* var) {
-  ASSERT(var->IsContextSlot() || var->IsStackAllocated());
+  DCHECK(var->IsContextSlot() || var->IsStackAllocated());
   MemOperand location = VarOperand(var, dest);
   __ movp(dest, location);
 }
@@ -719,10 +720,10 @@
                                Register src,
                                Register scratch0,
                                Register scratch1) {
-  ASSERT(var->IsContextSlot() || var->IsStackAllocated());
-  ASSERT(!scratch0.is(src));
-  ASSERT(!scratch0.is(scratch1));
-  ASSERT(!scratch1.is(src));
+  DCHECK(var->IsContextSlot() || var->IsStackAllocated());
+  DCHECK(!scratch0.is(src));
+  DCHECK(!scratch0.is(scratch1));
+  DCHECK(!scratch1.is(src));
   MemOperand location = VarOperand(var, scratch0);
   __ movp(location, src);
 
@@ -756,7 +757,7 @@
 
 void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
   // The variable in the declaration always resides in the current context.
-  ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
+  DCHECK_EQ(0, scope()->ContextChainLength(variable->scope()));
   if (generate_debug_code_) {
     // Check that we're not inside a with or catch context.
     __ movp(rbx, FieldOperand(rsi, HeapObject::kMapOffset));
@@ -811,7 +812,7 @@
       __ Push(rsi);
       __ Push(variable->name());
       // Declaration nodes are always introduced in one of four modes.
-      ASSERT(IsDeclaredVariableMode(mode));
+      DCHECK(IsDeclaredVariableMode(mode));
       PropertyAttributes attr =
           IsImmutableVariableMode(mode) ? READ_ONLY : NONE;
       __ Push(Smi::FromInt(attr));
@@ -824,7 +825,7 @@
       } else {
         __ Push(Smi::FromInt(0));  // Indicates no initial value.
       }
-      __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4);
+      __ CallRuntime(Runtime::kDeclareLookupSlot, 4);
       break;
     }
   }
@@ -839,7 +840,7 @@
     case Variable::UNALLOCATED: {
       globals_->Add(variable->name(), zone());
       Handle<SharedFunctionInfo> function =
-          Compiler::BuildFunctionInfo(declaration->fun(), script());
+          Compiler::BuildFunctionInfo(declaration->fun(), script(), info_);
       // Check for stack-overflow exception.
       if (function.is_null()) return SetStackOverflow();
       globals_->Add(function, zone());
@@ -878,7 +879,7 @@
       __ Push(variable->name());
       __ Push(Smi::FromInt(NONE));
       VisitForStackValue(declaration->fun());
-      __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4);
+      __ CallRuntime(Runtime::kDeclareLookupSlot, 4);
       break;
     }
   }
@@ -887,8 +888,8 @@
 
 void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
   Variable* variable = declaration->proxy()->var();
-  ASSERT(variable->location() == Variable::CONTEXT);
-  ASSERT(variable->interface()->IsFrozen());
+  DCHECK(variable->location() == Variable::CONTEXT);
+  DCHECK(variable->interface()->IsFrozen());
 
   Comment cmnt(masm_, "[ ModuleDeclaration");
   EmitDebugCheckDeclarationContext(variable);
@@ -948,7 +949,7 @@
   __ Push(rsi);  // The context is the first argument.
   __ Push(pairs);
   __ Push(Smi::FromInt(DeclareGlobalsFlags()));
-  __ CallRuntime(Runtime::kHiddenDeclareGlobals, 3);
+  __ CallRuntime(Runtime::kDeclareGlobals, 3);
   // Return value is ignored.
 }
 
@@ -956,7 +957,7 @@
 void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
   // Call the runtime to declare the modules.
   __ Push(descriptions);
-  __ CallRuntime(Runtime::kHiddenDeclareModules, 1);
+  __ CallRuntime(Runtime::kDeclareModules, 1);
   // Return value is ignored.
 }
 
@@ -1011,7 +1012,8 @@
 
     // Record position before stub call for type feedback.
     SetSourcePosition(clause->position());
-    Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
+    Handle<Code> ic =
+        CodeFactory::CompareIC(isolate(), Token::EQ_STRICT).code();
     CallIC(ic, clause->CompareId());
     patch_site.EmitPatchInfo();
 
@@ -1148,7 +1150,7 @@
   // No need for a write barrier, we are storing a Smi in the feedback vector.
   __ Move(rbx, FeedbackVector());
   __ Move(FieldOperand(rbx, FixedArray::OffsetOfElementAt(slot)),
-          TypeFeedbackInfo::MegamorphicSentinel(isolate()));
+          TypeFeedbackVector::MegamorphicSentinel(isolate()));
   __ Move(rbx, Smi::FromInt(1));  // Smi indicates slow check
   __ movp(rcx, Operand(rsp, 0 * kPointerSize));  // Get enumerated object
   STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
@@ -1241,15 +1243,6 @@
   Iteration loop_statement(this, stmt);
   increment_loop_depth();
 
-  // var iterable = subject
-  VisitForAccumulatorValue(stmt->assign_iterable());
-
-  // As with for-in, skip the loop if the iterable is null or undefined.
-  __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
-  __ j(equal, loop_statement.break_label());
-  __ CompareRoot(rax, Heap::kNullValueRootIndex);
-  __ j(equal, loop_statement.break_label());
-
   // var iterator = iterable[Symbol.iterator]();
   VisitForEffect(stmt->assign_iterator());
 
@@ -1298,9 +1291,7 @@
       !pretenure &&
       scope()->is_function_scope() &&
       info->num_literals() == 0) {
-    FastNewClosureStub stub(isolate(),
-                            info->strict_mode(),
-                            info->is_generator());
+    FastNewClosureStub stub(isolate(), info->strict_mode(), info->kind());
     __ Move(rbx, info);
     __ CallStub(&stub);
   } else {
@@ -1309,7 +1300,7 @@
     __ Push(pretenure
             ? isolate()->factory()->true_value()
             : isolate()->factory()->false_value());
-    __ CallRuntime(Runtime::kHiddenNewClosure, 3);
+    __ CallRuntime(Runtime::kNewClosure, 3);
   }
   context()->Plug(rax);
 }
@@ -1321,7 +1312,26 @@
 }
 
 
-void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
+void FullCodeGenerator::EmitLoadHomeObject(SuperReference* expr) {
+  Comment cnmt(masm_, "[ SuperReference ");
+
+  __ movp(LoadDescriptor::ReceiverRegister(),
+          Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+
+  Handle<Symbol> home_object_symbol(isolate()->heap()->home_object_symbol());
+  __ Move(LoadDescriptor::NameRegister(), home_object_symbol);
+
+  CallLoadIC(NOT_CONTEXTUAL, expr->HomeObjectFeedbackId());
+
+  __ Cmp(rax, isolate()->factory()->undefined_value());
+  Label done;
+  __ j(not_equal, &done);
+  __ CallRuntime(Runtime::kThrowNonMethodError, 0);
+  __ bind(&done);
+}
+
+
+void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
                                                       TypeofState typeof_state,
                                                       Label* slow) {
   Register context = rsi;
@@ -1372,8 +1382,13 @@
 
   // All extension objects were empty and it is safe to use a global
   // load IC call.
-  __ movp(rax, GlobalObjectOperand());
-  __ Move(rcx, var->name());
+  __ movp(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+  __ Move(LoadDescriptor::NameRegister(), proxy->var()->name());
+  if (FLAG_vector_ics) {
+    __ Move(VectorLoadICDescriptor::SlotRegister(),
+            Smi::FromInt(proxy->VariableFeedbackSlot()));
+  }
+
   ContextualMode mode = (typeof_state == INSIDE_TYPEOF)
       ? NOT_CONTEXTUAL
       : CONTEXTUAL;
@@ -1383,7 +1398,7 @@
 
 MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
                                                                 Label* slow) {
-  ASSERT(var->IsContextSlot());
+  DCHECK(var->IsContextSlot());
   Register context = rsi;
   Register temp = rbx;
 
@@ -1411,7 +1426,7 @@
 }
 
 
-void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
+void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
                                                   TypeofState typeof_state,
                                                   Label* slow,
                                                   Label* done) {
@@ -1420,8 +1435,9 @@
   // introducing variables.  In those cases, we do not want to
   // perform a runtime call for all variables in the scope
   // containing the eval.
+  Variable* var = proxy->var();
   if (var->mode() == DYNAMIC_GLOBAL) {
-    EmitLoadGlobalCheckExtensions(var, typeof_state, slow);
+    EmitLoadGlobalCheckExtensions(proxy, typeof_state, slow);
     __ jmp(done);
   } else if (var->mode() == DYNAMIC_LOCAL) {
     Variable* local = var->local_if_not_shadowed();
@@ -1434,7 +1450,7 @@
         __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
       } else {  // LET || CONST
         __ Push(var->name());
-        __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
+        __ CallRuntime(Runtime::kThrowReferenceError, 1);
       }
     }
     __ jmp(done);
@@ -1452,10 +1468,12 @@
   switch (var->location()) {
     case Variable::UNALLOCATED: {
       Comment cmnt(masm_, "[ Global variable");
-      // Use inline caching. Variable name is passed in rcx and the global
-      // object on the stack.
-      __ Move(rcx, var->name());
-      __ movp(rax, GlobalObjectOperand());
+      __ Move(LoadDescriptor::NameRegister(), var->name());
+      __ movp(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+      if (FLAG_vector_ics) {
+        __ Move(VectorLoadICDescriptor::SlotRegister(),
+                Smi::FromInt(proxy->VariableFeedbackSlot()));
+      }
       CallLoadIC(CONTEXTUAL);
       context()->Plug(rax);
       break;
@@ -1472,7 +1490,7 @@
         // always looked up dynamically, i.e. in that case
         //     var->location() == LOOKUP.
         // always holds.
-        ASSERT(var->scope() != NULL);
+        DCHECK(var->scope() != NULL);
 
         // Check if the binding really needs an initialization check. The check
         // can be skipped in the following situation: we have a LET or CONST
@@ -1495,8 +1513,8 @@
           skip_init_check = false;
         } else {
           // Check that we always have valid source position.
-          ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
-          ASSERT(proxy->position() != RelocInfo::kNoPosition);
+          DCHECK(var->initializer_position() != RelocInfo::kNoPosition);
+          DCHECK(proxy->position() != RelocInfo::kNoPosition);
           skip_init_check = var->mode() != CONST_LEGACY &&
               var->initializer_position() < proxy->position();
         }
@@ -1511,10 +1529,10 @@
             // Throw a reference error when using an uninitialized let/const
             // binding in harmony mode.
             __ Push(var->name());
-            __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
+            __ CallRuntime(Runtime::kThrowReferenceError, 1);
           } else {
             // Uninitalized const bindings outside of harmony mode are unholed.
-            ASSERT(var->mode() == CONST_LEGACY);
+            DCHECK(var->mode() == CONST_LEGACY);
             __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
           }
           __ bind(&done);
@@ -1531,11 +1549,11 @@
       Label done, slow;
       // Generate code for loading from variables potentially shadowed
       // by eval-introduced variables.
-      EmitDynamicLookupFastCase(var, NOT_INSIDE_TYPEOF, &slow, &done);
+      EmitDynamicLookupFastCase(proxy, NOT_INSIDE_TYPEOF, &slow, &done);
       __ bind(&slow);
       __ Push(rsi);  // Context.
       __ Push(var->name());
-      __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2);
+      __ CallRuntime(Runtime::kLoadLookupSlot, 2);
       __ bind(&done);
       context()->Plug(rax);
       break;
@@ -1566,7 +1584,7 @@
   __ Push(Smi::FromInt(expr->literal_index()));
   __ Push(expr->pattern());
   __ Push(expr->flags());
-  __ CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4);
+  __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
   __ movp(rbx, rax);
 
   __ bind(&materialized);
@@ -1578,7 +1596,7 @@
   __ bind(&runtime_allocate);
   __ Push(rbx);
   __ Push(Smi::FromInt(size));
-  __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1);
+  __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
   __ Pop(rbx);
 
   __ bind(&allocated);
@@ -1627,7 +1645,7 @@
     __ Push(Smi::FromInt(expr->literal_index()));
     __ Push(constant_properties);
     __ Push(Smi::FromInt(flags));
-    __ CallRuntime(Runtime::kHiddenCreateObjectLiteral, 4);
+    __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
   } else {
     __ movp(rdi, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
     __ movp(rax, FieldOperand(rdi, JSFunction::kLiteralsOffset));
@@ -1662,14 +1680,15 @@
       case ObjectLiteral::Property::CONSTANT:
         UNREACHABLE();
       case ObjectLiteral::Property::MATERIALIZED_LITERAL:
-        ASSERT(!CompileTimeValue::IsCompileTimeValue(value));
+        DCHECK(!CompileTimeValue::IsCompileTimeValue(value));
         // Fall through.
       case ObjectLiteral::Property::COMPUTED:
         if (key->value()->IsInternalizedString()) {
           if (property->emit_store()) {
             VisitForAccumulatorValue(value);
-            __ Move(rcx, key->value());
-            __ movp(rdx, Operand(rsp, 0));
+            DCHECK(StoreDescriptor::ValueRegister().is(rax));
+            __ Move(StoreDescriptor::NameRegister(), key->value());
+            __ movp(StoreDescriptor::ReceiverRegister(), Operand(rsp, 0));
             CallStoreIC(key->LiteralFeedbackId());
             PrepareForBailoutForId(key->id(), NO_REGISTERS);
           } else {
@@ -1681,7 +1700,7 @@
         VisitForStackValue(key);
         VisitForStackValue(value);
         if (property->emit_store()) {
-          __ Push(Smi::FromInt(NONE));    // PropertyAttributes
+          __ Push(Smi::FromInt(SLOPPY));  // Strict mode
           __ CallRuntime(Runtime::kSetProperty, 4);
         } else {
           __ Drop(3);
@@ -1715,11 +1734,11 @@
     EmitAccessor(it->second->getter);
     EmitAccessor(it->second->setter);
     __ Push(Smi::FromInt(NONE));
-    __ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5);
+    __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
   }
 
   if (expr->has_function()) {
-    ASSERT(result_saved);
+    DCHECK(result_saved);
     __ Push(Operand(rsp, 0));
     __ CallRuntime(Runtime::kToFastProperties, 1);
   }
@@ -1743,7 +1762,7 @@
   ZoneList<Expression*>* subexprs = expr->values();
   int length = subexprs->length();
   Handle<FixedArray> constant_elements = expr->constant_elements();
-  ASSERT_EQ(2, constant_elements->length());
+  DCHECK_EQ(2, constant_elements->length());
   ElementsKind constant_elements_kind =
       static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
   bool has_constant_fast_elements =
@@ -1764,7 +1783,7 @@
     __ Push(Smi::FromInt(expr->literal_index()));
     __ Push(constant_elements);
     __ Push(Smi::FromInt(flags));
-    __ CallRuntime(Runtime::kHiddenCreateArrayLiteral, 4);
+    __ CallRuntime(Runtime::kCreateArrayLiteral, 4);
   } else {
     __ movp(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
     __ movp(rax, FieldOperand(rbx, JSFunction::kLiteralsOffset));
@@ -1824,7 +1843,7 @@
 
 
 void FullCodeGenerator::VisitAssignment(Assignment* expr) {
-  ASSERT(expr->target()->IsValidReferenceExpression());
+  DCHECK(expr->target()->IsValidReferenceExpression());
 
   Comment cmnt(masm_, "[ Assignment");
 
@@ -1846,9 +1865,9 @@
       break;
     case NAMED_PROPERTY:
       if (expr->is_compound()) {
-        // We need the receiver both on the stack and in the accumulator.
-        VisitForAccumulatorValue(property->obj());
-        __ Push(result_register());
+        // We need the receiver both on the stack and in the register.
+        VisitForStackValue(property->obj());
+        __ movp(LoadDescriptor::ReceiverRegister(), Operand(rsp, 0));
       } else {
         VisitForStackValue(property->obj());
       }
@@ -1856,9 +1875,9 @@
     case KEYED_PROPERTY: {
       if (expr->is_compound()) {
         VisitForStackValue(property->obj());
-        VisitForAccumulatorValue(property->key());
-        __ movp(rdx, Operand(rsp, 0));
-        __ Push(rax);
+        VisitForStackValue(property->key());
+        __ movp(LoadDescriptor::ReceiverRegister(), Operand(rsp, kPointerSize));
+        __ movp(LoadDescriptor::NameRegister(), Operand(rsp, 0));
       } else {
         VisitForStackValue(property->obj());
         VisitForStackValue(property->key());
@@ -1939,12 +1958,12 @@
   VisitForStackValue(expr->expression());
 
   switch (expr->yield_kind()) {
-    case Yield::SUSPEND:
+    case Yield::kSuspend:
       // Pop value from top-of-stack slot; box result into result register.
       EmitCreateIteratorResult(false);
       __ Push(result_register());
       // Fall through.
-    case Yield::INITIAL: {
+    case Yield::kInitial: {
       Label suspend, continuation, post_runtime, resume;
 
       __ jmp(&suspend);
@@ -1954,7 +1973,7 @@
 
       __ bind(&suspend);
       VisitForAccumulatorValue(expr->generator_object());
-      ASSERT(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
+      DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
       __ Move(FieldOperand(rax, JSGeneratorObject::kContinuationOffset),
               Smi::FromInt(continuation.pos()));
       __ movp(FieldOperand(rax, JSGeneratorObject::kContextOffset), rsi);
@@ -1965,7 +1984,7 @@
       __ cmpp(rsp, rbx);
       __ j(equal, &post_runtime);
       __ Push(rax);  // generator object
-      __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1);
+      __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
       __ movp(context_register(),
               Operand(rbp, StandardFrameConstants::kContextOffset));
       __ bind(&post_runtime);
@@ -1978,7 +1997,7 @@
       break;
     }
 
-    case Yield::FINAL: {
+    case Yield::kFinal: {
       VisitForAccumulatorValue(expr->generator_object());
       __ Move(FieldOperand(result_register(),
                            JSGeneratorObject::kContinuationOffset),
@@ -1990,7 +2009,7 @@
       break;
     }
 
-    case Yield::DELEGATING: {
+    case Yield::kDelegating: {
       VisitForStackValue(expr->generator_object());
 
       // Initial stack layout is as follows:
@@ -1999,6 +2018,9 @@
 
       Label l_catch, l_try, l_suspend, l_continuation, l_resume;
       Label l_next, l_call, l_loop;
+      Register load_receiver = LoadDescriptor::ReceiverRegister();
+      Register load_name = LoadDescriptor::NameRegister();
+
       // Initial send value is undefined.
       __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
       __ jmp(&l_next);
@@ -2006,10 +2028,10 @@
       // catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; }
       __ bind(&l_catch);
       handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos()));
-      __ LoadRoot(rcx, Heap::kthrow_stringRootIndex);    // "throw"
-      __ Push(rcx);
-      __ Push(Operand(rsp, 2 * kPointerSize));           // iter
-      __ Push(rax);                                      // exception
+      __ LoadRoot(load_name, Heap::kthrow_stringRootIndex);  // "throw"
+      __ Push(load_name);
+      __ Push(Operand(rsp, 2 * kPointerSize));               // iter
+      __ Push(rax);                                          // exception
       __ jmp(&l_call);
 
       // try { received = %yield result }
@@ -2027,14 +2049,14 @@
       const int generator_object_depth = kPointerSize + handler_size;
       __ movp(rax, Operand(rsp, generator_object_depth));
       __ Push(rax);                                      // g
-      ASSERT(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
+      DCHECK(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
       __ Move(FieldOperand(rax, JSGeneratorObject::kContinuationOffset),
               Smi::FromInt(l_continuation.pos()));
       __ movp(FieldOperand(rax, JSGeneratorObject::kContextOffset), rsi);
       __ movp(rcx, rsi);
       __ RecordWriteField(rax, JSGeneratorObject::kContextOffset, rcx, rdx,
                           kDontSaveFPRegs);
-      __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1);
+      __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
       __ movp(context_register(),
               Operand(rbp, StandardFrameConstants::kContextOffset));
       __ Pop(rax);                                       // result
@@ -2044,16 +2066,20 @@
 
       // receiver = iter; f = 'next'; arg = received;
       __ bind(&l_next);
-      __ LoadRoot(rcx, Heap::knext_stringRootIndex);     // "next"
-      __ Push(rcx);
-      __ Push(Operand(rsp, 2 * kPointerSize));           // iter
-      __ Push(rax);                                      // received
+
+      __ LoadRoot(load_name, Heap::knext_stringRootIndex);
+      __ Push(load_name);                           // "next"
+      __ Push(Operand(rsp, 2 * kPointerSize));      // iter
+      __ Push(rax);                                 // received
 
       // result = receiver[f](arg);
       __ bind(&l_call);
-      __ movp(rdx, Operand(rsp, kPointerSize));
-      __ movp(rax, Operand(rsp, 2 * kPointerSize));
-      Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+      __ movp(load_receiver, Operand(rsp, kPointerSize));
+      if (FLAG_vector_ics) {
+        __ Move(VectorLoadICDescriptor::SlotRegister(),
+                Smi::FromInt(expr->KeyedLoadFeedbackSlot()));
+      }
+      Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
       CallIC(ic, TypeFeedbackId::None());
       __ movp(rdi, rax);
       __ movp(Operand(rsp, 2 * kPointerSize), rdi);
@@ -2065,17 +2091,26 @@
 
       // if (!result.done) goto l_try;
       __ bind(&l_loop);
-      __ Push(rax);                                      // save result
-      __ LoadRoot(rcx, Heap::kdone_stringRootIndex);     // "done"
-      CallLoadIC(NOT_CONTEXTUAL);                        // result.done in rax
+      __ Move(load_receiver, rax);
+      __ Push(load_receiver);                               // save result
+      __ LoadRoot(load_name, Heap::kdone_stringRootIndex);  // "done"
+      if (FLAG_vector_ics) {
+        __ Move(VectorLoadICDescriptor::SlotRegister(),
+                Smi::FromInt(expr->DoneFeedbackSlot()));
+      }
+      CallLoadIC(NOT_CONTEXTUAL);                           // rax=result.done
       Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
       CallIC(bool_ic);
       __ testp(result_register(), result_register());
       __ j(zero, &l_try);
 
       // result.value
-      __ Pop(rax);                                       // result
-      __ LoadRoot(rcx, Heap::kvalue_stringRootIndex);    // "value"
+      __ Pop(load_receiver);                             // result
+      __ LoadRoot(load_name, Heap::kvalue_stringRootIndex);  // "value"
+      if (FLAG_vector_ics) {
+        __ Move(VectorLoadICDescriptor::SlotRegister(),
+                Smi::FromInt(expr->ValueFeedbackSlot()));
+      }
       CallLoadIC(NOT_CONTEXTUAL);                        // result.value in rax
       context()->DropAndPlug(2, rax);                    // drop iter and g
       break;
@@ -2088,7 +2123,7 @@
     Expression *value,
     JSGeneratorObject::ResumeMode resume_mode) {
   // The value stays in rax, and is ultimately read by the resumed generator, as
-  // if CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject) returned it. Or it
+  // if CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it
   // is read to throw the value when the resumed generator is already closed.
   // rbx will hold the generator object until the activation has been resumed.
   VisitForStackValue(generator);
@@ -2168,7 +2203,7 @@
   __ Push(rbx);
   __ Push(result_register());
   __ Push(Smi::FromInt(resume_mode));
-  __ CallRuntime(Runtime::kHiddenResumeJSGeneratorObject, 3);
+  __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
   // Not reached: the runtime call returns elsewhere.
   __ Abort(kGeneratorFailedToResume);
 
@@ -2182,14 +2217,14 @@
   } else {
     // Throw the provided value.
     __ Push(rax);
-    __ CallRuntime(Runtime::kHiddenThrow, 1);
+    __ CallRuntime(Runtime::kThrow, 1);
   }
   __ jmp(&done);
 
   // Throw error if we attempt to operate on a running generator.
   __ bind(&wrong_state);
   __ Push(rbx);
-  __ CallRuntime(Runtime::kHiddenThrowGeneratorStateError, 1);
+  __ CallRuntime(Runtime::kThrowGeneratorStateError, 1);
 
   __ bind(&done);
   context()->Plug(result_register());
@@ -2207,7 +2242,7 @@
 
   __ bind(&gc_required);
   __ Push(Smi::FromInt(map->instance_size()));
-  __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1);
+  __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
   __ movp(context_register(),
           Operand(rbp, StandardFrameConstants::kContextOffset));
 
@@ -2215,7 +2250,7 @@
   __ Move(rbx, map);
   __ Pop(rcx);
   __ Move(rdx, isolate()->factory()->ToBoolean(done));
-  ASSERT_EQ(map->instance_size(), 5 * kPointerSize);
+  DCHECK_EQ(map->instance_size(), 5 * kPointerSize);
   __ movp(FieldOperand(rax, HeapObject::kMapOffset), rbx);
   __ Move(FieldOperand(rax, JSObject::kPropertiesOffset),
           isolate()->factory()->empty_fixed_array());
@@ -2236,15 +2271,42 @@
 void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
   SetSourcePosition(prop->position());
   Literal* key = prop->key()->AsLiteral();
-  __ Move(rcx, key->value());
-  CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId());
+  __ Move(LoadDescriptor::NameRegister(), key->value());
+  if (FLAG_vector_ics) {
+    __ Move(VectorLoadICDescriptor::SlotRegister(),
+            Smi::FromInt(prop->PropertyFeedbackSlot()));
+    CallLoadIC(NOT_CONTEXTUAL);
+  } else {
+    CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId());
+  }
+}
+
+
+void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
+  SetSourcePosition(prop->position());
+  Literal* key = prop->key()->AsLiteral();
+  DCHECK(!key->value()->IsSmi());
+  DCHECK(prop->IsSuperAccess());
+
+  SuperReference* super_ref = prop->obj()->AsSuperReference();
+  EmitLoadHomeObject(super_ref);
+  __ Push(rax);
+  VisitForStackValue(super_ref->this_var());
+  __ Push(key->value());
+  __ CallRuntime(Runtime::kLoadFromSuper, 3);
 }
 
 
 void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
   SetSourcePosition(prop->position());
-  Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
-  CallIC(ic, prop->PropertyFeedbackId());
+  Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
+  if (FLAG_vector_ics) {
+    __ Move(VectorLoadICDescriptor::SlotRegister(),
+            Smi::FromInt(prop->PropertyFeedbackSlot()));
+    CallIC(ic);
+  } else {
+    CallIC(ic, prop->PropertyFeedbackId());
+  }
 }
 
 
@@ -2265,8 +2327,8 @@
 
   __ bind(&stub_call);
   __ movp(rax, rcx);
-  BinaryOpICStub stub(isolate(), op, mode);
-  CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId());
+  Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op, mode).code();
+  CallIC(code, expr->BinaryOperationFeedbackId());
   patch_site.EmitPatchInfo();
   __ jmp(&done, Label::kNear);
 
@@ -2313,16 +2375,16 @@
                                      Token::Value op,
                                      OverwriteMode mode) {
   __ Pop(rdx);
-  BinaryOpICStub stub(isolate(), op, mode);
+  Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op, mode).code();
   JumpPatchSite patch_site(masm_);    // unbound, signals no inlined smi code.
-  CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId());
+  CallIC(code, expr->BinaryOperationFeedbackId());
   patch_site.EmitPatchInfo();
   context()->Plug(rax);
 }
 
 
 void FullCodeGenerator::EmitAssignment(Expression* expr) {
-  ASSERT(expr->IsValidReferenceExpression());
+  DCHECK(expr->IsValidReferenceExpression());
 
   // Left-hand side can only be a property, a global or a (parameter or local)
   // slot.
@@ -2345,9 +2407,10 @@
     case NAMED_PROPERTY: {
       __ Push(rax);  // Preserve value.
       VisitForAccumulatorValue(prop->obj());
-      __ movp(rdx, rax);
-      __ Pop(rax);  // Restore value.
-      __ Move(rcx, prop->key()->AsLiteral()->value());
+      __ Move(StoreDescriptor::ReceiverRegister(), rax);
+      __ Pop(StoreDescriptor::ValueRegister());  // Restore value.
+      __ Move(StoreDescriptor::NameRegister(),
+              prop->key()->AsLiteral()->value());
       CallStoreIC();
       break;
     }
@@ -2355,12 +2418,11 @@
       __ Push(rax);  // Preserve value.
       VisitForStackValue(prop->obj());
       VisitForAccumulatorValue(prop->key());
-      __ movp(rcx, rax);
-      __ Pop(rdx);
-      __ Pop(rax);  // Restore value.
-      Handle<Code> ic = strict_mode() == SLOPPY
-          ? isolate()->builtins()->KeyedStoreIC_Initialize()
-          : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+      __ Move(StoreDescriptor::NameRegister(), rax);
+      __ Pop(StoreDescriptor::ReceiverRegister());
+      __ Pop(StoreDescriptor::ValueRegister());  // Restore value.
+      Handle<Code> ic =
+          CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
       CallIC(ic);
       break;
     }
@@ -2380,34 +2442,24 @@
 }
 
 
-void FullCodeGenerator::EmitCallStoreContextSlot(
-    Handle<String> name, StrictMode strict_mode) {
-  __ Push(rax);  // Value.
-  __ Push(rsi);  // Context.
-  __ Push(name);
-  __ Push(Smi::FromInt(strict_mode));
-  __ CallRuntime(Runtime::kHiddenStoreContextSlot, 4);
-}
-
-
 void FullCodeGenerator::EmitVariableAssignment(Variable* var,
                                                Token::Value op) {
   if (var->IsUnallocated()) {
     // Global var, const, or let.
-    __ Move(rcx, var->name());
-    __ movp(rdx, GlobalObjectOperand());
+    __ Move(StoreDescriptor::NameRegister(), var->name());
+    __ movp(StoreDescriptor::ReceiverRegister(), GlobalObjectOperand());
     CallStoreIC();
 
   } else if (op == Token::INIT_CONST_LEGACY) {
     // Const initializers need a write barrier.
-    ASSERT(!var->IsParameter());  // No const parameters.
+    DCHECK(!var->IsParameter());  // No const parameters.
     if (var->IsLookupSlot()) {
       __ Push(rax);
       __ Push(rsi);
       __ Push(var->name());
-      __ CallRuntime(Runtime::kHiddenInitializeConstContextSlot, 3);
+      __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3);
     } else {
-      ASSERT(var->IsStackLocal() || var->IsContextSlot());
+      DCHECK(var->IsStackLocal() || var->IsContextSlot());
       Label skip;
       MemOperand location = VarOperand(var, rcx);
       __ movp(rdx, location);
@@ -2419,28 +2471,30 @@
 
   } else if (var->mode() == LET && op != Token::INIT_LET) {
     // Non-initializing assignment to let variable needs a write barrier.
-    if (var->IsLookupSlot()) {
-      EmitCallStoreContextSlot(var->name(), strict_mode());
-    } else {
-      ASSERT(var->IsStackAllocated() || var->IsContextSlot());
-      Label assign;
-      MemOperand location = VarOperand(var, rcx);
-      __ movp(rdx, location);
-      __ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
-      __ j(not_equal, &assign, Label::kNear);
-      __ Push(var->name());
-      __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
-      __ bind(&assign);
-      EmitStoreToStackLocalOrContextSlot(var, location);
-    }
+    DCHECK(!var->IsLookupSlot());
+    DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+    Label assign;
+    MemOperand location = VarOperand(var, rcx);
+    __ movp(rdx, location);
+    __ CompareRoot(rdx, Heap::kTheHoleValueRootIndex);
+    __ j(not_equal, &assign, Label::kNear);
+    __ Push(var->name());
+    __ CallRuntime(Runtime::kThrowReferenceError, 1);
+    __ bind(&assign);
+    EmitStoreToStackLocalOrContextSlot(var, location);
 
   } else if (!var->is_const_mode() || op == Token::INIT_CONST) {
-    // Assignment to var or initializing assignment to let/const
-    // in harmony mode.
     if (var->IsLookupSlot()) {
-      EmitCallStoreContextSlot(var->name(), strict_mode());
+      // Assignment to var.
+      __ Push(rax);  // Value.
+      __ Push(rsi);  // Context.
+      __ Push(var->name());
+      __ Push(Smi::FromInt(strict_mode()));
+      __ CallRuntime(Runtime::kStoreLookupSlot, 4);
     } else {
-      ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+      // Assignment to var or initializing assignment to let/const in harmony
+      // mode.
+      DCHECK(var->IsStackAllocated() || var->IsContextSlot());
       MemOperand location = VarOperand(var, rcx);
       if (generate_debug_code_ && op == Token::INIT_LET) {
         // Check for an uninitialized let binding.
@@ -2458,13 +2512,13 @@
 void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
   // Assignment to a property, using a named store IC.
   Property* prop = expr->target()->AsProperty();
-  ASSERT(prop != NULL);
-  ASSERT(prop->key()->IsLiteral());
+  DCHECK(prop != NULL);
+  DCHECK(prop->key()->IsLiteral());
 
   // Record source code position before IC call.
   SetSourcePosition(expr->position());
-  __ Move(rcx, prop->key()->AsLiteral()->value());
-  __ Pop(rdx);
+  __ Move(StoreDescriptor::NameRegister(), prop->key()->AsLiteral()->value());
+  __ Pop(StoreDescriptor::ReceiverRegister());
   CallStoreIC(expr->AssignmentFeedbackId());
 
   PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@@ -2475,13 +2529,12 @@
 void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
   // Assignment to a property, using a keyed store IC.
 
-  __ Pop(rcx);
-  __ Pop(rdx);
+  __ Pop(StoreDescriptor::NameRegister());  // Key.
+  __ Pop(StoreDescriptor::ReceiverRegister());
+  DCHECK(StoreDescriptor::ValueRegister().is(rax));
   // Record source code position before IC call.
   SetSourcePosition(expr->position());
-  Handle<Code> ic = strict_mode() == SLOPPY
-      ? isolate()->builtins()->KeyedStoreIC_Initialize()
-      : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+  Handle<Code> ic = CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
   CallIC(ic, expr->AssignmentFeedbackId());
 
   PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@@ -2494,14 +2547,21 @@
   Expression* key = expr->key();
 
   if (key->IsPropertyName()) {
-    VisitForAccumulatorValue(expr->obj());
-    EmitNamedPropertyLoad(expr);
+    if (!expr->IsSuperAccess()) {
+      VisitForAccumulatorValue(expr->obj());
+      DCHECK(!rax.is(LoadDescriptor::ReceiverRegister()));
+      __ movp(LoadDescriptor::ReceiverRegister(), rax);
+      EmitNamedPropertyLoad(expr);
+    } else {
+      EmitNamedSuperPropertyLoad(expr);
+    }
     PrepareForBailoutForId(expr->LoadId(), TOS_REG);
     context()->Plug(rax);
   } else {
     VisitForStackValue(expr->obj());
     VisitForAccumulatorValue(expr->key());
-    __ Pop(rdx);
+    __ Move(LoadDescriptor::NameRegister(), rax);
+    __ Pop(LoadDescriptor::ReceiverRegister());
     EmitKeyedPropertyLoad(expr);
     context()->Plug(rax);
   }
@@ -2519,11 +2579,10 @@
 void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
   Expression* callee = expr->expression();
 
-  CallIC::CallType call_type = callee->IsVariableProxy()
-      ? CallIC::FUNCTION
-      : CallIC::METHOD;
+  CallICState::CallType call_type =
+      callee->IsVariableProxy() ? CallICState::FUNCTION : CallICState::METHOD;
   // Get the target function.
-  if (call_type == CallIC::FUNCTION) {
+  if (call_type == CallICState::FUNCTION) {
     { StackValueContext context(this);
       EmitVariableLoad(callee->AsVariableProxy());
       PrepareForBailout(callee, NO_REGISTERS);
@@ -2533,8 +2592,9 @@
     __ Push(isolate()->factory()->undefined_value());
   } else {
     // Load the function from the receiver.
-    ASSERT(callee->IsProperty());
-    __ movp(rax, Operand(rsp, 0));
+    DCHECK(callee->IsProperty());
+    DCHECK(!callee->AsProperty()->IsSuperAccess());
+    __ movp(LoadDescriptor::ReceiverRegister(), Operand(rsp, 0));
     EmitNamedPropertyLoad(callee->AsProperty());
     PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
     // Push the target function under the receiver.
@@ -2546,6 +2606,43 @@
 }
 
 
+void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
+  Expression* callee = expr->expression();
+  DCHECK(callee->IsProperty());
+  Property* prop = callee->AsProperty();
+  DCHECK(prop->IsSuperAccess());
+
+  SetSourcePosition(prop->position());
+  Literal* key = prop->key()->AsLiteral();
+  DCHECK(!key->value()->IsSmi());
+  // Load the function from the receiver.
+  SuperReference* super_ref = prop->obj()->AsSuperReference();
+  EmitLoadHomeObject(super_ref);
+  __ Push(rax);
+  VisitForAccumulatorValue(super_ref->this_var());
+  __ Push(rax);
+  __ Push(Operand(rsp, kPointerSize));
+  __ Push(rax);
+  __ Push(key->value());
+
+  // Stack here:
+  //  - home_object
+  //  - this (receiver)
+  //  - home_object <-- LoadFromSuper will pop here and below.
+  //  - this (receiver)
+  //  - key
+  __ CallRuntime(Runtime::kLoadFromSuper, 3);
+
+  // Replace home_object with target function.
+  __ movp(Operand(rsp, kPointerSize), rax);
+
+  // Stack here:
+  // - target function
+  // - this (receiver)
+  EmitCall(expr, CallICState::METHOD);
+}
+
+
 // Common code for calls using the IC.
 void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
                                                 Expression* key) {
@@ -2555,8 +2652,9 @@
   Expression* callee = expr->expression();
 
   // Load the function from the receiver.
-  ASSERT(callee->IsProperty());
-  __ movp(rdx, Operand(rsp, 0));
+  DCHECK(callee->IsProperty());
+  __ movp(LoadDescriptor::ReceiverRegister(), Operand(rsp, 0));
+  __ Move(LoadDescriptor::NameRegister(), rax);
   EmitKeyedPropertyLoad(callee->AsProperty());
   PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
 
@@ -2564,11 +2662,11 @@
   __ Push(Operand(rsp, 0));
   __ movp(Operand(rsp, kPointerSize), rax);
 
-  EmitCall(expr, CallIC::METHOD);
+  EmitCall(expr, CallICState::METHOD);
 }
 
 
-void FullCodeGenerator::EmitCall(Call* expr, CallIC::CallType call_type) {
+void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
   // Load the arguments.
   ZoneList<Expression*>* args = expr->arguments();
   int arg_count = args->length();
@@ -2605,6 +2703,9 @@
     __ PushRoot(Heap::kUndefinedValueRootIndex);
   }
 
+  // Push the enclosing function.
+  __ Push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
+
   // Push the receiver of the enclosing function and do runtime call.
   StackArgumentsAccessor args(rbp, info_->scope()->num_parameters());
   __ Push(args.GetReceiverOperand());
@@ -2616,7 +2717,7 @@
   __ Push(Smi::FromInt(scope()->start_position()));
 
   // Do the runtime call.
-  __ CallRuntime(Runtime::kHiddenResolvePossiblyDirectEval, 5);
+  __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 6);
 }
 
 
@@ -2676,14 +2777,14 @@
     { PreservePositionScope scope(masm()->positions_recorder());
       // Generate code for loading from variables potentially shadowed by
       // eval-introduced variables.
-      EmitDynamicLookupFastCase(proxy->var(), NOT_INSIDE_TYPEOF, &slow, &done);
+      EmitDynamicLookupFastCase(proxy, NOT_INSIDE_TYPEOF, &slow, &done);
     }
     __ bind(&slow);
     // Call the runtime to find the function to call (returned in rax) and
     // the object holding it (returned in rdx).
     __ Push(context_register());
     __ Push(proxy->name());
-    __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2);
+    __ CallRuntime(Runtime::kLoadLookupSlot, 2);
     __ Push(rax);  // Function.
     __ Push(rdx);  // Receiver.
 
@@ -2706,16 +2807,23 @@
     EmitCall(expr);
   } else if (call_type == Call::PROPERTY_CALL) {
     Property* property = callee->AsProperty();
-    { PreservePositionScope scope(masm()->positions_recorder());
-      VisitForStackValue(property->obj());
-    }
-    if (property->key()->IsPropertyName()) {
-      EmitCallWithLoadIC(expr);
+    bool is_named_call = property->key()->IsPropertyName();
+    // super.x() is handled in EmitCallWithLoadIC.
+    if (property->IsSuperAccess() && is_named_call) {
+      EmitSuperCallWithLoadIC(expr);
     } else {
-      EmitKeyedCallWithLoadIC(expr, property->key());
+      {
+        PreservePositionScope scope(masm()->positions_recorder());
+        VisitForStackValue(property->obj());
+      }
+      if (is_named_call) {
+        EmitCallWithLoadIC(expr);
+      } else {
+        EmitKeyedCallWithLoadIC(expr, property->key());
+      }
     }
   } else {
-    ASSERT(call_type == Call::OTHER_CALL);
+    DCHECK(call_type == Call::OTHER_CALL);
     // Call to an arbitrary expression not handled specially above.
     { PreservePositionScope scope(masm()->positions_recorder());
       VisitForStackValue(callee);
@@ -2727,7 +2835,7 @@
 
 #ifdef DEBUG
   // RecordJSReturnSite should have been called.
-  ASSERT(expr->return_is_recorded_);
+  DCHECK(expr->return_is_recorded_);
 #endif
 }
 
@@ -2761,7 +2869,7 @@
   // Record call targets in unoptimized code, but not in the snapshot.
   if (FLAG_pretenuring_call_new) {
     EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot());
-    ASSERT(expr->AllocationSiteFeedbackSlot() ==
+    DCHECK(expr->AllocationSiteFeedbackSlot() ==
            expr->CallNewFeedbackSlot() + 1);
   }
 
@@ -2777,7 +2885,7 @@
 
 void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -2798,7 +2906,7 @@
 
 void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -2819,7 +2927,7 @@
 
 void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -2851,7 +2959,7 @@
 
 void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -2873,7 +2981,7 @@
 
 void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -2898,7 +3006,7 @@
 void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
     CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -2939,7 +3047,8 @@
   // rcx: valid entries in the descriptor array.
   // Calculate the end of the descriptor array.
   __ imulp(rcx, rcx, Immediate(DescriptorArray::kDescriptorSize));
-  __ leap(rcx, Operand(r8, rcx, times_8, DescriptorArray::kFirstOffset));
+  __ leap(rcx,
+          Operand(r8, rcx, times_pointer_size, DescriptorArray::kFirstOffset));
   // Calculate location of the first key name.
   __ addp(r8, Immediate(DescriptorArray::kFirstOffset));
   // Loop through all the keys in the descriptor array. If one of these is the
@@ -2981,7 +3090,7 @@
 
 void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -3003,7 +3112,7 @@
 
 void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -3030,7 +3139,7 @@
 
 void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -3052,7 +3161,7 @@
 
 void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -3074,7 +3183,7 @@
 
 
 void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
-  ASSERT(expr->arguments()->length() == 0);
+  DCHECK(expr->arguments()->length() == 0);
 
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
@@ -3106,7 +3215,7 @@
 
 void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 2);
+  DCHECK(args->length() == 2);
 
   // Load the two objects into registers and perform the comparison.
   VisitForStackValue(args->at(0));
@@ -3130,7 +3239,7 @@
 
 void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   // ArgumentsAccessStub expects the key in rdx and the formal
   // parameter count in rax.
@@ -3144,7 +3253,7 @@
 
 
 void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
-  ASSERT(expr->arguments()->length() == 0);
+  DCHECK(expr->arguments()->length() == 0);
 
   Label exit;
   // Get the number of formal parameters.
@@ -3168,7 +3277,7 @@
 
 void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
   Label done, null, function, non_function_constructor;
 
   VisitForAccumulatorValue(args->at(0));
@@ -3208,7 +3317,7 @@
 
   // Functions have class 'Function'.
   __ bind(&function);
-  __ Move(rax, isolate()->factory()->function_class_string());
+  __ Move(rax, isolate()->factory()->Function_string());
   __ jmp(&done);
 
   // Objects with a non-function constructor have class 'Object'.
@@ -3231,7 +3340,7 @@
   // Load the arguments on the stack and call the stub.
   SubStringStub stub(isolate());
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 3);
+  DCHECK(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
   VisitForStackValue(args->at(2));
@@ -3244,7 +3353,7 @@
   // Load the arguments on the stack and call the stub.
   RegExpExecStub stub(isolate());
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 4);
+  DCHECK(args->length() == 4);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
   VisitForStackValue(args->at(2));
@@ -3256,7 +3365,7 @@
 
 void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));  // Load the object.
 
@@ -3275,8 +3384,8 @@
 
 void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 2);
-  ASSERT_NE(NULL, args->at(1)->AsLiteral());
+  DCHECK(args->length() == 2);
+  DCHECK_NE(NULL, args->at(1)->AsLiteral());
   Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value()));
 
   VisitForAccumulatorValue(args->at(0));  // Load the object.
@@ -3314,7 +3423,7 @@
   }
 
   __ bind(&not_date_object);
-  __ CallRuntime(Runtime::kHiddenThrowNotDateError, 0);
+  __ CallRuntime(Runtime::kThrowNotDateError, 0);
   __ bind(&done);
   context()->Plug(rax);
 }
@@ -3322,15 +3431,15 @@
 
 void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT_EQ(3, args->length());
+  DCHECK_EQ(3, args->length());
 
   Register string = rax;
   Register index = rbx;
   Register value = rcx;
 
-  VisitForStackValue(args->at(1));  // index
-  VisitForStackValue(args->at(2));  // value
-  VisitForAccumulatorValue(args->at(0));  // string
+  VisitForStackValue(args->at(0));        // index
+  VisitForStackValue(args->at(1));        // value
+  VisitForAccumulatorValue(args->at(2));  // string
   __ Pop(value);
   __ Pop(index);
 
@@ -3355,15 +3464,15 @@
 
 void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT_EQ(3, args->length());
+  DCHECK_EQ(3, args->length());
 
   Register string = rax;
   Register index = rbx;
   Register value = rcx;
 
-  VisitForStackValue(args->at(1));  // index
-  VisitForStackValue(args->at(2));  // value
-  VisitForAccumulatorValue(args->at(0));  // string
+  VisitForStackValue(args->at(0));        // index
+  VisitForStackValue(args->at(1));        // value
+  VisitForAccumulatorValue(args->at(2));  // string
   __ Pop(value);
   __ Pop(index);
 
@@ -3389,7 +3498,7 @@
 void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
   // Load the arguments on the stack and call the runtime function.
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 2);
+  DCHECK(args->length() == 2);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
   MathPowStub stub(isolate(), MathPowStub::ON_STACK);
@@ -3400,7 +3509,7 @@
 
 void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 2);
+  DCHECK(args->length() == 2);
 
   VisitForStackValue(args->at(0));  // Load the object.
   VisitForAccumulatorValue(args->at(1));  // Load the value.
@@ -3428,7 +3537,7 @@
 
 void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT_EQ(args->length(), 1);
+  DCHECK_EQ(args->length(), 1);
 
   // Load the argument into rax and call the stub.
   VisitForAccumulatorValue(args->at(0));
@@ -3441,7 +3550,7 @@
 
 void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -3460,7 +3569,7 @@
 
 void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 2);
+  DCHECK(args->length() == 2);
 
   VisitForStackValue(args->at(0));
   VisitForAccumulatorValue(args->at(1));
@@ -3506,7 +3615,7 @@
 
 void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 2);
+  DCHECK(args->length() == 2);
 
   VisitForStackValue(args->at(0));
   VisitForAccumulatorValue(args->at(1));
@@ -3554,7 +3663,7 @@
 
 void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT_EQ(2, args->length());
+  DCHECK_EQ(2, args->length());
   VisitForStackValue(args->at(0));
   VisitForAccumulatorValue(args->at(1));
 
@@ -3567,7 +3676,7 @@
 
 void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT_EQ(2, args->length());
+  DCHECK_EQ(2, args->length());
 
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
@@ -3580,7 +3689,7 @@
 
 void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() >= 2);
+  DCHECK(args->length() >= 2);
 
   int arg_count = args->length() - 2;  // 2 ~ receiver and function.
   for (int i = 0; i < arg_count + 1; i++) {
@@ -3613,7 +3722,7 @@
 void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
   RegExpConstructResultStub stub(isolate());
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 3);
+  DCHECK(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
   VisitForAccumulatorValue(args->at(2));
@@ -3626,9 +3735,9 @@
 
 void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT_EQ(2, args->length());
+  DCHECK_EQ(2, args->length());
 
-  ASSERT_NE(NULL, args->at(0)->AsLiteral());
+  DCHECK_NE(NULL, args->at(0)->AsLiteral());
   int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value();
 
   Handle<FixedArray> jsfunction_result_caches(
@@ -3674,7 +3783,7 @@
   // Call runtime to perform the lookup.
   __ Push(cache);
   __ Push(key);
-  __ CallRuntime(Runtime::kHiddenGetFromCache, 2);
+  __ CallRuntime(Runtime::kGetFromCache, 2);
 
   __ bind(&done);
   context()->Plug(rax);
@@ -3683,7 +3792,7 @@
 
 void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -3706,25 +3815,25 @@
 
 void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
   VisitForAccumulatorValue(args->at(0));
 
   __ AssertString(rax);
 
   __ movl(rax, FieldOperand(rax, String::kHashFieldOffset));
-  ASSERT(String::kHashShift >= kSmiTagSize);
+  DCHECK(String::kHashShift >= kSmiTagSize);
   __ IndexFromHash(rax, rax);
 
   context()->Plug(rax);
 }
 
 
-void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
+void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
   Label bailout, return_result, done, one_char_separator, long_separator,
       non_trivial_array, not_size_one_array, loop,
       loop_1, loop_1_condition, loop_2, loop_2_entry, loop_3, loop_3_entry;
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 2);
+  DCHECK(args->length() == 2);
   // We will leave the separator on the stack until the end of the function.
   VisitForStackValue(args->at(1));
   // Load this to rax (= array)
@@ -3780,7 +3889,7 @@
   array = no_reg;
 
 
-  // Check that all array elements are sequential ASCII strings, and
+  // Check that all array elements are sequential one-byte strings, and
   // accumulate the sum of their lengths, as a smi-encoded value.
   __ Set(index, 0);
   __ Set(string_length, 0);
@@ -3789,7 +3898,7 @@
   //                      scratch, string_length(int32), elements(FixedArray*).
   if (generate_debug_code_) {
     __ cmpp(index, array_length);
-    __ Assert(below, kNoEmptyArraysHereInEmitFastAsciiArrayJoin);
+    __ Assert(below, kNoEmptyArraysHereInEmitFastOneByteArrayJoin);
   }
   __ bind(&loop);
   __ movp(string, FieldOperand(elements,
@@ -3833,7 +3942,7 @@
   // elements: FixedArray of strings.
   // index: Array length.
 
-  // Check that the separator is a sequential ASCII string.
+  // Check that the separator is a sequential one-byte string.
   __ movp(string, separator_operand);
   __ JumpIfSmi(string, &bailout);
   __ movp(scratch, FieldOperand(string, HeapObject::kMapOffset));
@@ -3861,8 +3970,8 @@
   // Live registers and stack values:
   //   string_length: Total length of result string.
   //   elements: FixedArray of strings.
-  __ AllocateAsciiString(result_pos, string_length, scratch,
-                         index, string, &bailout);
+  __ AllocateOneByteString(result_pos, string_length, scratch, index, string,
+                           &bailout);
   __ movp(result_operand, result_pos);
   __ leap(result_pos, FieldOperand(result_pos, SeqOneByteString::kHeaderSize));
 
@@ -3909,7 +4018,7 @@
 
   // One-character separator case
   __ bind(&one_char_separator);
-  // Get the separator ASCII character value.
+  // Get the separator one-byte character value.
   // Register "string" holds the separator.
   __ movzxbl(scratch, FieldOperand(string, SeqOneByteString::kHeaderSize));
   __ Set(index, 0);
@@ -4004,6 +4113,17 @@
 }
 
 
+void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
+  DCHECK(expr->arguments()->length() == 0);
+  ExternalReference debug_is_active =
+      ExternalReference::debug_is_active_address(isolate());
+  __ Move(kScratchRegister, debug_is_active);
+  __ movzxbp(rax, Operand(kScratchRegister, 0));
+  __ Integer32ToSmi(rax, rax);
+  context()->Plug(rax);
+}
+
+
 void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
   if (expr->function() != NULL &&
       expr->function()->intrinsic_type == Runtime::INLINE) {
@@ -4022,9 +4142,15 @@
     __ Push(FieldOperand(rax, GlobalObject::kBuiltinsOffset));
 
     // Load the function from the receiver.
-    __ movp(rax, Operand(rsp, 0));
-    __ Move(rcx, expr->name());
-    CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId());
+    __ movp(LoadDescriptor::ReceiverRegister(), Operand(rsp, 0));
+    __ Move(LoadDescriptor::NameRegister(), expr->name());
+    if (FLAG_vector_ics) {
+      __ Move(VectorLoadICDescriptor::SlotRegister(),
+              Smi::FromInt(expr->CallRuntimeFeedbackSlot()));
+      CallLoadIC(NOT_CONTEXTUAL);
+    } else {
+      CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId());
+    }
 
     // Push the target function under the receiver.
     __ Push(Operand(rsp, 0));
@@ -4075,7 +4201,7 @@
         Variable* var = proxy->var();
         // Delete of an unqualified identifier is disallowed in strict mode
         // but "delete this" is allowed.
-        ASSERT(strict_mode() == SLOPPY || var->is_this());
+        DCHECK(strict_mode() == SLOPPY || var->is_this());
         if (var->IsUnallocated()) {
           __ Push(GlobalObjectOperand());
           __ Push(var->name());
@@ -4092,7 +4218,7 @@
           // context where the variable was introduced.
           __ Push(context_register());
           __ Push(var->name());
-          __ CallRuntime(Runtime::kHiddenDeleteContextSlot, 2);
+          __ CallRuntime(Runtime::kDeleteLookupSlot, 2);
           context()->Plug(rax);
         }
       } else {
@@ -4130,7 +4256,7 @@
         // for control and plugging the control flow into the context,
         // because we need to prepare a pair of extra administrative AST ids
         // for the optimizing compiler.
-        ASSERT(context()->IsAccumulatorValue() || context()->IsStackValue());
+        DCHECK(context()->IsAccumulatorValue() || context()->IsStackValue());
         Label materialize_true, materialize_false, done;
         VisitForControl(expr->expression(),
                         &materialize_false,
@@ -4173,7 +4299,7 @@
 
 
 void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
-  ASSERT(expr->expression()->IsValidReferenceExpression());
+  DCHECK(expr->expression()->IsValidReferenceExpression());
 
   Comment cmnt(masm_, "[ CountOperation");
   SetSourcePosition(expr->position());
@@ -4192,7 +4318,7 @@
 
   // Evaluate expression and get value.
   if (assign_type == VARIABLE) {
-    ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
+    DCHECK(expr->expression()->AsVariableProxy()->var() != NULL);
     AccumulatorValueContext context(this);
     EmitVariableLoad(expr->expression()->AsVariableProxy());
   } else {
@@ -4201,14 +4327,16 @@
       __ Push(Smi::FromInt(0));
     }
     if (assign_type == NAMED_PROPERTY) {
-      VisitForAccumulatorValue(prop->obj());
-      __ Push(rax);  // Copy of receiver, needed for later store.
+      VisitForStackValue(prop->obj());
+      __ movp(LoadDescriptor::ReceiverRegister(), Operand(rsp, 0));
       EmitNamedPropertyLoad(prop);
     } else {
       VisitForStackValue(prop->obj());
-      VisitForAccumulatorValue(prop->key());
-      __ movp(rdx, Operand(rsp, 0));  // Leave receiver on stack
-      __ Push(rax);  // Copy of key, needed for later store.
+      VisitForStackValue(prop->key());
+      // Leave receiver on stack
+      __ movp(LoadDescriptor::ReceiverRegister(), Operand(rsp, kPointerSize));
+      // Copy of key, needed for later store.
+      __ movp(LoadDescriptor::NameRegister(), Operand(rsp, 0));
       EmitKeyedPropertyLoad(prop);
     }
   }
@@ -4290,8 +4418,9 @@
   __ bind(&stub_call);
   __ movp(rdx, rax);
   __ Move(rax, Smi::FromInt(1));
-  BinaryOpICStub stub(isolate(), expr->binary_op(), NO_OVERWRITE);
-  CallIC(stub.GetCode(), expr->CountBinOpFeedbackId());
+  Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), expr->binary_op(),
+                                              NO_OVERWRITE).code();
+  CallIC(code, expr->CountBinOpFeedbackId());
   patch_site.EmitPatchInfo();
   __ bind(&done);
 
@@ -4320,8 +4449,9 @@
       }
       break;
     case NAMED_PROPERTY: {
-      __ Move(rcx, prop->key()->AsLiteral()->value());
-      __ Pop(rdx);
+      __ Move(StoreDescriptor::NameRegister(),
+              prop->key()->AsLiteral()->value());
+      __ Pop(StoreDescriptor::ReceiverRegister());
       CallStoreIC(expr->CountStoreFeedbackId());
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
       if (expr->is_postfix()) {
@@ -4334,11 +4464,10 @@
       break;
     }
     case KEYED_PROPERTY: {
-      __ Pop(rcx);
-      __ Pop(rdx);
-      Handle<Code> ic = strict_mode() == SLOPPY
-          ? isolate()->builtins()->KeyedStoreIC_Initialize()
-          : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+      __ Pop(StoreDescriptor::NameRegister());
+      __ Pop(StoreDescriptor::ReceiverRegister());
+      Handle<Code> ic =
+          CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
       CallIC(ic, expr->CountStoreFeedbackId());
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
       if (expr->is_postfix()) {
@@ -4356,13 +4485,17 @@
 
 void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
   VariableProxy* proxy = expr->AsVariableProxy();
-  ASSERT(!context()->IsEffect());
-  ASSERT(!context()->IsTest());
+  DCHECK(!context()->IsEffect());
+  DCHECK(!context()->IsTest());
 
   if (proxy != NULL && proxy->var()->IsUnallocated()) {
     Comment cmnt(masm_, "[ Global variable");
-    __ Move(rcx, proxy->name());
-    __ movp(rax, GlobalObjectOperand());
+    __ Move(LoadDescriptor::NameRegister(), proxy->name());
+    __ movp(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+    if (FLAG_vector_ics) {
+      __ Move(VectorLoadICDescriptor::SlotRegister(),
+              Smi::FromInt(proxy->VariableFeedbackSlot()));
+    }
     // Use a regular load, not a contextual load, to avoid a reference
     // error.
     CallLoadIC(NOT_CONTEXTUAL);
@@ -4374,12 +4507,12 @@
 
     // Generate code for loading from variables potentially shadowed
     // by eval-introduced variables.
-    EmitDynamicLookupFastCase(proxy->var(), INSIDE_TYPEOF, &slow, &done);
+    EmitDynamicLookupFastCase(proxy, INSIDE_TYPEOF, &slow, &done);
 
     __ bind(&slow);
     __ Push(rsi);
     __ Push(proxy->name());
-    __ CallRuntime(Runtime::kHiddenLoadContextSlotNoReferenceError, 2);
+    __ CallRuntime(Runtime::kLoadLookupSlotNoReferenceError, 2);
     PrepareForBailout(expr, TOS_REG);
     __ bind(&done);
 
@@ -4429,10 +4562,6 @@
     __ j(equal, if_true);
     __ CompareRoot(rax, Heap::kFalseValueRootIndex);
     Split(equal, if_true, if_false, fall_through);
-  } else if (FLAG_harmony_typeof &&
-             String::Equals(check, factory->null_string())) {
-    __ CompareRoot(rax, Heap::kNullValueRootIndex);
-    Split(equal, if_true, if_false, fall_through);
   } else if (String::Equals(check, factory->undefined_string())) {
     __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
     __ j(equal, if_true);
@@ -4451,10 +4580,8 @@
     Split(equal, if_true, if_false, fall_through);
   } else if (String::Equals(check, factory->object_string())) {
     __ JumpIfSmi(rax, if_false);
-    if (!FLAG_harmony_typeof) {
-      __ CompareRoot(rax, Heap::kNullValueRootIndex);
-      __ j(equal, if_true);
-    }
+    __ CompareRoot(rax, Heap::kNullValueRootIndex);
+    __ j(equal, if_true);
     __ CmpObjectType(rax, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, rdx);
     __ j(below, if_false);
     __ CmpInstanceType(rdx, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
@@ -4528,7 +4655,7 @@
 
       // Record position and call the compare IC.
       SetSourcePosition(expr->position());
-      Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+      Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
       CallIC(ic, expr->CompareOperationFeedbackId());
       patch_site.EmitPatchInfo();
 
@@ -4589,7 +4716,7 @@
 
 
 void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
-  ASSERT(IsAligned(frame_offset, kPointerSize));
+  DCHECK(IsAligned(frame_offset, kPointerSize));
   __ movp(Operand(rbp, frame_offset), value);
 }
 
@@ -4614,7 +4741,7 @@
     // code.  Fetch it from the context.
     __ Push(ContextOperand(rsi, Context::CLOSURE_INDEX));
   } else {
-    ASSERT(declaration_scope->is_function_scope());
+    DCHECK(declaration_scope->is_function_scope());
     __ Push(Operand(rbp, JavaScriptFrameConstants::kFunctionOffset));
   }
 }
@@ -4625,8 +4752,8 @@
 
 
 void FullCodeGenerator::EnterFinallyBlock() {
-  ASSERT(!result_register().is(rdx));
-  ASSERT(!result_register().is(rcx));
+  DCHECK(!result_register().is(rdx));
+  DCHECK(!result_register().is(rcx));
   // Cook return address on top of stack (smi encoded Code* delta)
   __ PopReturnAddressTo(rdx);
   __ Move(rcx, masm_->CodeObject());
@@ -4657,8 +4784,8 @@
 
 
 void FullCodeGenerator::ExitFinallyBlock() {
-  ASSERT(!result_register().is(rdx));
-  ASSERT(!result_register().is(rcx));
+  DCHECK(!result_register().is(rdx));
+  DCHECK(!result_register().is(rcx));
   // Restore pending message from stack.
   __ Pop(rdx);
   ExternalReference pending_message_script =
@@ -4770,18 +4897,18 @@
     Address pc) {
   Address call_target_address = pc - kIntSize;
   Address jns_instr_address = call_target_address - 3;
-  ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
+  DCHECK_EQ(kCallInstruction, *(call_target_address - 1));
 
   if (*jns_instr_address == kJnsInstruction) {
-    ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
-    ASSERT_EQ(isolate->builtins()->InterruptCheck()->entry(),
+    DCHECK_EQ(kJnsOffset, *(call_target_address - 2));
+    DCHECK_EQ(isolate->builtins()->InterruptCheck()->entry(),
               Assembler::target_address_at(call_target_address,
                                            unoptimized_code));
     return INTERRUPT;
   }
 
-  ASSERT_EQ(kNopByteOne, *jns_instr_address);
-  ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
+  DCHECK_EQ(kNopByteOne, *jns_instr_address);
+  DCHECK_EQ(kNopByteTwo, *(call_target_address - 2));
 
   if (Assembler::target_address_at(call_target_address,
                                    unoptimized_code) ==
@@ -4789,7 +4916,7 @@
     return ON_STACK_REPLACEMENT;
   }
 
-  ASSERT_EQ(isolate->builtins()->OsrAfterStackCheck()->entry(),
+  DCHECK_EQ(isolate->builtins()->OsrAfterStackCheck()->entry(),
             Assembler::target_address_at(call_target_address,
                                          unoptimized_code));
   return OSR_AFTER_STACK_CHECK;
diff --git a/src/x64/ic-x64.cc b/src/x64/ic-x64.cc
deleted file mode 100644
index 0cda1df..0000000
--- a/src/x64/ic-x64.cc
+++ /dev/null
@@ -1,1311 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#if V8_TARGET_ARCH_X64
-
-#include "src/codegen.h"
-#include "src/ic-inl.h"
-#include "src/runtime.h"
-#include "src/stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-// ----------------------------------------------------------------------------
-// Static IC stub generators.
-//
-
-#define __ ACCESS_MASM(masm)
-
-
-static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
-                                            Register type,
-                                            Label* global_object) {
-  // Register usage:
-  //   type: holds the receiver instance type on entry.
-  __ cmpb(type, Immediate(JS_GLOBAL_OBJECT_TYPE));
-  __ j(equal, global_object);
-  __ cmpb(type, Immediate(JS_BUILTINS_OBJECT_TYPE));
-  __ j(equal, global_object);
-  __ cmpb(type, Immediate(JS_GLOBAL_PROXY_TYPE));
-  __ j(equal, global_object);
-}
-
-
-// Generated code falls through if the receiver is a regular non-global
-// JS object with slow properties and no interceptors.
-static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm,
-                                                Register receiver,
-                                                Register r0,
-                                                Register r1,
-                                                Label* miss) {
-  // Register usage:
-  //   receiver: holds the receiver on entry and is unchanged.
-  //   r0: used to hold receiver instance type.
-  //       Holds the property dictionary on fall through.
-  //   r1: used to hold receivers map.
-
-  __ JumpIfSmi(receiver, miss);
-
-  // Check that the receiver is a valid JS object.
-  __ movp(r1, FieldOperand(receiver, HeapObject::kMapOffset));
-  __ movb(r0, FieldOperand(r1, Map::kInstanceTypeOffset));
-  __ cmpb(r0, Immediate(FIRST_SPEC_OBJECT_TYPE));
-  __ j(below, miss);
-
-  // If this assert fails, we have to check upper bound too.
-  STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
-
-  GenerateGlobalInstanceTypeCheck(masm, r0, miss);
-
-  // Check for non-global object that requires access check.
-  __ testb(FieldOperand(r1, Map::kBitFieldOffset),
-           Immediate((1 << Map::kIsAccessCheckNeeded) |
-                     (1 << Map::kHasNamedInterceptor)));
-  __ j(not_zero, miss);
-
-  __ movp(r0, FieldOperand(receiver, JSObject::kPropertiesOffset));
-  __ CompareRoot(FieldOperand(r0, HeapObject::kMapOffset),
-                 Heap::kHashTableMapRootIndex);
-  __ j(not_equal, miss);
-}
-
-
-
-// Helper function used to load a property from a dictionary backing storage.
-// This function may return false negatives, so miss_label
-// must always call a backup property load that is complete.
-// This function is safe to call if name is not an internalized string,
-// and will jump to the miss_label in that case.
-// The generated code assumes that the receiver has slow properties,
-// is not a global object and does not have interceptors.
-static void GenerateDictionaryLoad(MacroAssembler* masm,
-                                   Label* miss_label,
-                                   Register elements,
-                                   Register name,
-                                   Register r0,
-                                   Register r1,
-                                   Register result) {
-  // Register use:
-  //
-  // elements - holds the property dictionary on entry and is unchanged.
-  //
-  // name - holds the name of the property on entry and is unchanged.
-  //
-  // r0   - used to hold the capacity of the property dictionary.
-  //
-  // r1   - used to hold the index into the property dictionary.
-  //
-  // result - holds the result on exit if the load succeeded.
-
-  Label done;
-
-  // Probe the dictionary.
-  NameDictionaryLookupStub::GeneratePositiveLookup(masm,
-                                                   miss_label,
-                                                   &done,
-                                                   elements,
-                                                   name,
-                                                   r0,
-                                                   r1);
-
-  // If probing finds an entry in the dictionary, r1 contains the
-  // index into the dictionary. Check that the value is a normal
-  // property.
-  __ bind(&done);
-  const int kElementsStartOffset =
-      NameDictionary::kHeaderSize +
-      NameDictionary::kElementsStartIndex * kPointerSize;
-  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
-  __ Test(Operand(elements, r1, times_pointer_size,
-                  kDetailsOffset - kHeapObjectTag),
-          Smi::FromInt(PropertyDetails::TypeField::kMask));
-  __ j(not_zero, miss_label);
-
-  // Get the value at the masked, scaled index.
-  const int kValueOffset = kElementsStartOffset + kPointerSize;
-  __ movp(result,
-          Operand(elements, r1, times_pointer_size,
-                  kValueOffset - kHeapObjectTag));
-}
-
-
-// Helper function used to store a property to a dictionary backing
-// storage. This function may fail to store a property even though it
-// is in the dictionary, so code at miss_label must always call a
-// backup property store that is complete. This function is safe to
-// call if name is not an internalized string, and will jump to the miss_label
-// in that case. The generated code assumes that the receiver has slow
-// properties, is not a global object and does not have interceptors.
-static void GenerateDictionaryStore(MacroAssembler* masm,
-                                    Label* miss_label,
-                                    Register elements,
-                                    Register name,
-                                    Register value,
-                                    Register scratch0,
-                                    Register scratch1) {
-  // Register use:
-  //
-  // elements - holds the property dictionary on entry and is clobbered.
-  //
-  // name - holds the name of the property on entry and is unchanged.
-  //
-  // value - holds the value to store and is unchanged.
-  //
-  // scratch0 - used during the positive dictionary lookup and is clobbered.
-  //
-  // scratch1 - used for index into the property dictionary and is clobbered.
-  Label done;
-
-  // Probe the dictionary.
-  NameDictionaryLookupStub::GeneratePositiveLookup(masm,
-                                                   miss_label,
-                                                   &done,
-                                                   elements,
-                                                   name,
-                                                   scratch0,
-                                                   scratch1);
-
-  // If probing finds an entry in the dictionary, scratch0 contains the
-  // index into the dictionary. Check that the value is a normal
-  // property that is not read only.
-  __ bind(&done);
-  const int kElementsStartOffset =
-      NameDictionary::kHeaderSize +
-      NameDictionary::kElementsStartIndex * kPointerSize;
-  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
-  const int kTypeAndReadOnlyMask =
-      (PropertyDetails::TypeField::kMask |
-       PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
-  __ Test(Operand(elements,
-                  scratch1,
-                  times_pointer_size,
-                  kDetailsOffset - kHeapObjectTag),
-          Smi::FromInt(kTypeAndReadOnlyMask));
-  __ j(not_zero, miss_label);
-
-  // Store the value at the masked, scaled index.
-  const int kValueOffset = kElementsStartOffset + kPointerSize;
-  __ leap(scratch1, Operand(elements,
-                           scratch1,
-                           times_pointer_size,
-                           kValueOffset - kHeapObjectTag));
-  __ movp(Operand(scratch1, 0), value);
-
-  // Update write barrier. Make sure not to clobber the value.
-  __ movp(scratch0, value);
-  __ RecordWrite(elements, scratch1, scratch0, kDontSaveFPRegs);
-}
-
-
-// Checks the receiver for special cases (value type, slow case bits).
-// Falls through for regular JS object.
-static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
-                                           Register receiver,
-                                           Register map,
-                                           int interceptor_bit,
-                                           Label* slow) {
-  // Register use:
-  //   receiver - holds the receiver and is unchanged.
-  // Scratch registers:
-  //   map - used to hold the map of the receiver.
-
-  // Check that the object isn't a smi.
-  __ JumpIfSmi(receiver, slow);
-
-  // Check that the object is some kind of JS object EXCEPT JS Value type.
-  // In the case that the object is a value-wrapper object,
-  // we enter the runtime system to make sure that indexing
-  // into string objects work as intended.
-  ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
-  __ CmpObjectType(receiver, JS_OBJECT_TYPE, map);
-  __ j(below, slow);
-
-  // Check bit field.
-  __ testb(FieldOperand(map, Map::kBitFieldOffset),
-           Immediate((1 << Map::kIsAccessCheckNeeded) |
-                     (1 << interceptor_bit)));
-  __ j(not_zero, slow);
-}
-
-
-// Loads an indexed element from a fast case array.
-// If not_fast_array is NULL, doesn't perform the elements map check.
-static void GenerateFastArrayLoad(MacroAssembler* masm,
-                                  Register receiver,
-                                  Register key,
-                                  Register elements,
-                                  Register scratch,
-                                  Register result,
-                                  Label* not_fast_array,
-                                  Label* out_of_range) {
-  // Register use:
-  //
-  // receiver - holds the receiver on entry.
-  //            Unchanged unless 'result' is the same register.
-  //
-  // key      - holds the smi key on entry.
-  //            Unchanged unless 'result' is the same register.
-  //
-  // elements - holds the elements of the receiver on exit.
-  //
-  // result   - holds the result on exit if the load succeeded.
-  //            Allowed to be the the same as 'receiver' or 'key'.
-  //            Unchanged on bailout so 'receiver' and 'key' can be safely
-  //            used by further computation.
-  //
-  // Scratch registers:
-  //
-  //   scratch - used to hold elements of the receiver and the loaded value.
-
-  __ movp(elements, FieldOperand(receiver, JSObject::kElementsOffset));
-  if (not_fast_array != NULL) {
-    // Check that the object is in fast mode and writable.
-    __ CompareRoot(FieldOperand(elements, HeapObject::kMapOffset),
-                   Heap::kFixedArrayMapRootIndex);
-    __ j(not_equal, not_fast_array);
-  } else {
-    __ AssertFastElements(elements);
-  }
-  // Check that the key (index) is within bounds.
-  __ SmiCompare(key, FieldOperand(elements, FixedArray::kLengthOffset));
-  // Unsigned comparison rejects negative indices.
-  __ j(above_equal, out_of_range);
-  // Fast case: Do the load.
-  SmiIndex index = masm->SmiToIndex(scratch, key, kPointerSizeLog2);
-  __ movp(scratch, FieldOperand(elements,
-                                index.reg,
-                                index.scale,
-                                FixedArray::kHeaderSize));
-  __ CompareRoot(scratch, Heap::kTheHoleValueRootIndex);
-  // In case the loaded value is the_hole we have to consult GetProperty
-  // to ensure the prototype chain is searched.
-  __ j(equal, out_of_range);
-  if (!result.is(scratch)) {
-    __ movp(result, scratch);
-  }
-}
-
-
-// Checks whether a key is an array index string or a unique name.
-// Falls through if the key is a unique name.
-static void GenerateKeyNameCheck(MacroAssembler* masm,
-                                 Register key,
-                                 Register map,
-                                 Register hash,
-                                 Label* index_string,
-                                 Label* not_unique) {
-  // Register use:
-  //   key - holds the key and is unchanged. Assumed to be non-smi.
-  // Scratch registers:
-  //   map - used to hold the map of the key.
-  //   hash - used to hold the hash of the key.
-  Label unique;
-  __ CmpObjectType(key, LAST_UNIQUE_NAME_TYPE, map);
-  __ j(above, not_unique);
-  STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
-  __ j(equal, &unique);
-
-  // Is the string an array index, with cached numeric value?
-  __ movl(hash, FieldOperand(key, Name::kHashFieldOffset));
-  __ testl(hash, Immediate(Name::kContainsCachedArrayIndexMask));
-  __ j(zero, index_string);  // The value in hash is used at jump target.
-
-  // Is the string internalized? We already know it's a string so a single
-  // bit test is enough.
-  STATIC_ASSERT(kNotInternalizedTag != 0);
-  __ testb(FieldOperand(map, Map::kInstanceTypeOffset),
-           Immediate(kIsNotInternalizedMask));
-  __ j(not_zero, not_unique);
-
-  __ bind(&unique);
-}
-
-
-
-void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- rax    : key
-  //  -- rdx    : receiver
-  //  -- rsp[0] : return address
-  // -----------------------------------
-  Label slow, check_name, index_smi, index_name, property_array_property;
-  Label probe_dictionary, check_number_dictionary;
-
-  // Check that the key is a smi.
-  __ JumpIfNotSmi(rax, &check_name);
-  __ bind(&index_smi);
-  // Now the key is known to be a smi. This place is also jumped to from below
-  // where a numeric string is converted to a smi.
-
-  GenerateKeyedLoadReceiverCheck(
-      masm, rdx, rcx, Map::kHasIndexedInterceptor, &slow);
-
-  // Check the receiver's map to see if it has fast elements.
-  __ CheckFastElements(rcx, &check_number_dictionary);
-
-  GenerateFastArrayLoad(masm,
-                        rdx,
-                        rax,
-                        rcx,
-                        rbx,
-                        rax,
-                        NULL,
-                        &slow);
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->keyed_load_generic_smi(), 1);
-  __ ret(0);
-
-  __ bind(&check_number_dictionary);
-  __ SmiToInteger32(rbx, rax);
-  __ movp(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
-
-  // Check whether the elements is a number dictionary.
-  // rdx: receiver
-  // rax: key
-  // rbx: key as untagged int32
-  // rcx: elements
-  __ CompareRoot(FieldOperand(rcx, HeapObject::kMapOffset),
-                 Heap::kHashTableMapRootIndex);
-  __ j(not_equal, &slow);
-  __ LoadFromNumberDictionary(&slow, rcx, rax, rbx, r9, rdi, rax);
-  __ ret(0);
-
-  __ bind(&slow);
-  // Slow case: Jump to runtime.
-  // rdx: receiver
-  // rax: key
-  __ IncrementCounter(counters->keyed_load_generic_slow(), 1);
-  GenerateRuntimeGetProperty(masm);
-
-  __ bind(&check_name);
-  GenerateKeyNameCheck(masm, rax, rcx, rbx, &index_name, &slow);
-
-  GenerateKeyedLoadReceiverCheck(
-      masm, rdx, rcx, Map::kHasNamedInterceptor, &slow);
-
-  // If the receiver is a fast-case object, check the keyed lookup
-  // cache. Otherwise probe the dictionary leaving result in rcx.
-  __ movp(rbx, FieldOperand(rdx, JSObject::kPropertiesOffset));
-  __ CompareRoot(FieldOperand(rbx, HeapObject::kMapOffset),
-                 Heap::kHashTableMapRootIndex);
-  __ j(equal, &probe_dictionary);
-
-  // Load the map of the receiver, compute the keyed lookup cache hash
-  // based on 32 bits of the map pointer and the string hash.
-  __ movp(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
-  __ movl(rcx, rbx);
-  __ shrl(rcx, Immediate(KeyedLookupCache::kMapHashShift));
-  __ movl(rdi, FieldOperand(rax, String::kHashFieldOffset));
-  __ shrl(rdi, Immediate(String::kHashShift));
-  __ xorp(rcx, rdi);
-  int mask = (KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask);
-  __ andp(rcx, Immediate(mask));
-
-  // Load the key (consisting of map and internalized string) from the cache and
-  // check for match.
-  Label load_in_object_property;
-  static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
-  Label hit_on_nth_entry[kEntriesPerBucket];
-  ExternalReference cache_keys
-      = ExternalReference::keyed_lookup_cache_keys(masm->isolate());
-
-  for (int i = 0; i < kEntriesPerBucket - 1; i++) {
-    Label try_next_entry;
-    __ movp(rdi, rcx);
-    __ shlp(rdi, Immediate(kPointerSizeLog2 + 1));
-    __ LoadAddress(kScratchRegister, cache_keys);
-    int off = kPointerSize * i * 2;
-    __ cmpp(rbx, Operand(kScratchRegister, rdi, times_1, off));
-    __ j(not_equal, &try_next_entry);
-    __ cmpp(rax, Operand(kScratchRegister, rdi, times_1, off + kPointerSize));
-    __ j(equal, &hit_on_nth_entry[i]);
-    __ bind(&try_next_entry);
-  }
-
-  int off = kPointerSize * (kEntriesPerBucket - 1) * 2;
-  __ cmpp(rbx, Operand(kScratchRegister, rdi, times_1, off));
-  __ j(not_equal, &slow);
-  __ cmpp(rax, Operand(kScratchRegister, rdi, times_1, off + kPointerSize));
-  __ j(not_equal, &slow);
-
-  // Get field offset, which is a 32-bit integer.
-  ExternalReference cache_field_offsets
-      = ExternalReference::keyed_lookup_cache_field_offsets(masm->isolate());
-
-  // Hit on nth entry.
-  for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
-    __ bind(&hit_on_nth_entry[i]);
-    if (i != 0) {
-      __ addl(rcx, Immediate(i));
-    }
-    __ LoadAddress(kScratchRegister, cache_field_offsets);
-    __ movl(rdi, Operand(kScratchRegister, rcx, times_4, 0));
-    __ movzxbp(rcx, FieldOperand(rbx, Map::kInObjectPropertiesOffset));
-    __ subp(rdi, rcx);
-    __ j(above_equal, &property_array_property);
-    if (i != 0) {
-      __ jmp(&load_in_object_property);
-    }
-  }
-
-  // Load in-object property.
-  __ bind(&load_in_object_property);
-  __ movzxbp(rcx, FieldOperand(rbx, Map::kInstanceSizeOffset));
-  __ addp(rcx, rdi);
-  __ movp(rax, FieldOperand(rdx, rcx, times_pointer_size, 0));
-  __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
-  __ ret(0);
-
-  // Load property array property.
-  __ bind(&property_array_property);
-  __ movp(rax, FieldOperand(rdx, JSObject::kPropertiesOffset));
-  __ movp(rax, FieldOperand(rax, rdi, times_pointer_size,
-                            FixedArray::kHeaderSize));
-  __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
-  __ ret(0);
-
-  // Do a quick inline probe of the receiver's dictionary, if it
-  // exists.
-  __ bind(&probe_dictionary);
-  // rdx: receiver
-  // rax: key
-  // rbx: elements
-
-  __ movp(rcx, FieldOperand(rdx, JSObject::kMapOffset));
-  __ movb(rcx, FieldOperand(rcx, Map::kInstanceTypeOffset));
-  GenerateGlobalInstanceTypeCheck(masm, rcx, &slow);
-
-  GenerateDictionaryLoad(masm, &slow, rbx, rax, rcx, rdi, rax);
-  __ IncrementCounter(counters->keyed_load_generic_symbol(), 1);
-  __ ret(0);
-
-  __ bind(&index_name);
-  __ IndexFromHash(rbx, rax);
-  __ jmp(&index_smi);
-}
-
-
-void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- rax    : key
-  //  -- rdx    : receiver
-  //  -- rsp[0] : return address
-  // -----------------------------------
-  Label miss;
-
-  Register receiver = rdx;
-  Register index = rax;
-  Register scratch = rcx;
-  Register result = rax;
-
-  StringCharAtGenerator char_at_generator(receiver,
-                                          index,
-                                          scratch,
-                                          result,
-                                          &miss,  // When not a string.
-                                          &miss,  // When not a number.
-                                          &miss,  // When index out of range.
-                                          STRING_INDEX_IS_ARRAY_INDEX);
-  char_at_generator.GenerateFast(masm);
-  __ ret(0);
-
-  StubRuntimeCallHelper call_helper;
-  char_at_generator.GenerateSlow(masm, call_helper);
-
-  __ bind(&miss);
-  GenerateMiss(masm);
-}
-
-
-void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- rax    : key
-  //  -- rdx    : receiver
-  //  -- rsp[0] : return address
-  // -----------------------------------
-  Label slow;
-
-  // Check that the receiver isn't a smi.
-  __ JumpIfSmi(rdx, &slow);
-
-  // Check that the key is an array index, that is Uint32.
-  STATIC_ASSERT(kSmiValueSize <= 32);
-  __ JumpUnlessNonNegativeSmi(rax, &slow);
-
-  // Get the map of the receiver.
-  __ movp(rcx, FieldOperand(rdx, HeapObject::kMapOffset));
-
-  // Check that it has indexed interceptor and access checks
-  // are not enabled for this object.
-  __ movb(rcx, FieldOperand(rcx, Map::kBitFieldOffset));
-  __ andb(rcx, Immediate(kSlowCaseBitFieldMask));
-  __ cmpb(rcx, Immediate(1 << Map::kHasIndexedInterceptor));
-  __ j(not_zero, &slow);
-
-  // Everything is fine, call runtime.
-  __ PopReturnAddressTo(rcx);
-  __ Push(rdx);  // receiver
-  __ Push(rax);  // key
-  __ PushReturnAddressFrom(rcx);
-
-  // Perform tail call to the entry.
-  __ TailCallExternalReference(
-      ExternalReference(IC_Utility(kKeyedLoadPropertyWithInterceptor),
-                        masm->isolate()),
-      2,
-      1);
-
-  __ bind(&slow);
-  GenerateMiss(masm);
-}
-
-
-static void KeyedStoreGenerateGenericHelper(
-    MacroAssembler* masm,
-    Label* fast_object,
-    Label* fast_double,
-    Label* slow,
-    KeyedStoreCheckMap check_map,
-    KeyedStoreIncrementLength increment_length) {
-  Label transition_smi_elements;
-  Label finish_object_store, non_double_value, transition_double_elements;
-  Label fast_double_without_map_check;
-  // Fast case: Do the store, could be either Object or double.
-  __ bind(fast_object);
-  // rax: value
-  // rbx: receiver's elements array (a FixedArray)
-  // rcx: index
-  // rdx: receiver (a JSArray)
-  // r9: map of receiver
-  if (check_map == kCheckMap) {
-    __ movp(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
-    __ CompareRoot(rdi, Heap::kFixedArrayMapRootIndex);
-    __ j(not_equal, fast_double);
-  }
-
-  // HOLECHECK: guards "A[i] = V"
-  // We have to go to the runtime if the current value is the hole because
-  // there may be a callback on the element
-  Label holecheck_passed1;
-  __ movp(kScratchRegister, FieldOperand(rbx,
-                                         rcx,
-                                         times_pointer_size,
-                                         FixedArray::kHeaderSize));
-  __ CompareRoot(kScratchRegister, Heap::kTheHoleValueRootIndex);
-  __ j(not_equal, &holecheck_passed1);
-  __ JumpIfDictionaryInPrototypeChain(rdx, rdi, kScratchRegister, slow);
-
-  __ bind(&holecheck_passed1);
-
-  // Smi stores don't require further checks.
-  Label non_smi_value;
-  __ JumpIfNotSmi(rax, &non_smi_value);
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ leal(rdi, Operand(rcx, 1));
-    __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rdi);
-  }
-  // It's irrelevant whether array is smi-only or not when writing a smi.
-  __ movp(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize),
-          rax);
-  __ ret(0);
-
-  __ bind(&non_smi_value);
-  // Writing a non-smi, check whether array allows non-smi elements.
-  // r9: receiver's map
-  __ CheckFastObjectElements(r9, &transition_smi_elements);
-
-  __ bind(&finish_object_store);
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ leal(rdi, Operand(rcx, 1));
-    __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rdi);
-  }
-  __ movp(FieldOperand(rbx, rcx, times_pointer_size, FixedArray::kHeaderSize),
-          rax);
-  __ movp(rdx, rax);  // Preserve the value which is returned.
-  __ RecordWriteArray(
-      rbx, rdx, rcx, kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-  __ ret(0);
-
-  __ bind(fast_double);
-  if (check_map == kCheckMap) {
-    // Check for fast double array case. If this fails, call through to the
-    // runtime.
-    // rdi: elements array's map
-    __ CompareRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
-    __ j(not_equal, slow);
-  }
-
-  // HOLECHECK: guards "A[i] double hole?"
-  // We have to see if the double version of the hole is present. If so
-  // go to the runtime.
-  uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
-  __ cmpl(FieldOperand(rbx, rcx, times_8, offset), Immediate(kHoleNanUpper32));
-  __ j(not_equal, &fast_double_without_map_check);
-  __ JumpIfDictionaryInPrototypeChain(rdx, rdi, kScratchRegister, slow);
-
-  __ bind(&fast_double_without_map_check);
-  __ StoreNumberToDoubleElements(rax, rbx, rcx, xmm0,
-                                 &transition_double_elements);
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ leal(rdi, Operand(rcx, 1));
-    __ Integer32ToSmiField(FieldOperand(rdx, JSArray::kLengthOffset), rdi);
-  }
-  __ ret(0);
-
-  __ bind(&transition_smi_elements);
-  __ movp(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
-
-  // Transition the array appropriately depending on the value type.
-  __ movp(r9, FieldOperand(rax, HeapObject::kMapOffset));
-  __ CompareRoot(r9, Heap::kHeapNumberMapRootIndex);
-  __ j(not_equal, &non_double_value);
-
-  // Value is a double. Transition FAST_SMI_ELEMENTS ->
-  // FAST_DOUBLE_ELEMENTS and complete the store.
-  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
-                                         FAST_DOUBLE_ELEMENTS,
-                                         rbx,
-                                         rdi,
-                                         slow);
-  AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
-                                                    FAST_DOUBLE_ELEMENTS);
-  ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow);
-  __ movp(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
-  __ jmp(&fast_double_without_map_check);
-
-  __ bind(&non_double_value);
-  // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
-  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
-                                         FAST_ELEMENTS,
-                                         rbx,
-                                         rdi,
-                                         slow);
-  mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
-  ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode,
-                                                                   slow);
-  __ movp(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
-  __ jmp(&finish_object_store);
-
-  __ bind(&transition_double_elements);
-  // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
-  // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
-  // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
-  __ movp(rbx, FieldOperand(rdx, HeapObject::kMapOffset));
-  __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
-                                         FAST_ELEMENTS,
-                                         rbx,
-                                         rdi,
-                                         slow);
-  mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
-  ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow);
-  __ movp(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
-  __ jmp(&finish_object_store);
-}
-
-
-void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
-                                   StrictMode strict_mode) {
-  // ----------- S t a t e -------------
-  //  -- rax    : value
-  //  -- rcx    : key
-  //  -- rdx    : receiver
-  //  -- rsp[0] : return address
-  // -----------------------------------
-  Label slow, slow_with_tagged_index, fast_object, fast_object_grow;
-  Label fast_double, fast_double_grow;
-  Label array, extra, check_if_double_array;
-
-  // Check that the object isn't a smi.
-  __ JumpIfSmi(rdx, &slow_with_tagged_index);
-  // Get the map from the receiver.
-  __ movp(r9, FieldOperand(rdx, HeapObject::kMapOffset));
-  // Check that the receiver does not require access checks and is not observed.
-  // The generic stub does not perform map checks or handle observed objects.
-  __ testb(FieldOperand(r9, Map::kBitFieldOffset),
-           Immediate(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved));
-  __ j(not_zero, &slow_with_tagged_index);
-  // Check that the key is a smi.
-  __ JumpIfNotSmi(rcx, &slow_with_tagged_index);
-  __ SmiToInteger32(rcx, rcx);
-
-  __ CmpInstanceType(r9, JS_ARRAY_TYPE);
-  __ j(equal, &array);
-  // Check that the object is some kind of JSObject.
-  __ CmpInstanceType(r9, FIRST_JS_OBJECT_TYPE);
-  __ j(below, &slow);
-
-  // Object case: Check key against length in the elements array.
-  // rax: value
-  // rdx: JSObject
-  // rcx: index
-  __ movp(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
-  // Check array bounds.
-  __ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), rcx);
-  // rax: value
-  // rbx: FixedArray
-  // rcx: index
-  __ j(above, &fast_object);
-
-  // Slow case: call runtime.
-  __ bind(&slow);
-  __ Integer32ToSmi(rcx, rcx);
-  __ bind(&slow_with_tagged_index);
-  GenerateRuntimeSetProperty(masm, strict_mode);
-  // Never returns to here.
-
-  // Extra capacity case: Check if there is extra capacity to
-  // perform the store and update the length. Used for adding one
-  // element to the array by writing to array[array.length].
-  __ bind(&extra);
-  // rax: value
-  // rdx: receiver (a JSArray)
-  // rbx: receiver's elements array (a FixedArray)
-  // rcx: index
-  // flags: smicompare (rdx.length(), rbx)
-  __ j(not_equal, &slow);  // do not leave holes in the array
-  __ SmiCompareInteger32(FieldOperand(rbx, FixedArray::kLengthOffset), rcx);
-  __ j(below_equal, &slow);
-  // Increment index to get new length.
-  __ movp(rdi, FieldOperand(rbx, HeapObject::kMapOffset));
-  __ CompareRoot(rdi, Heap::kFixedArrayMapRootIndex);
-  __ j(not_equal, &check_if_double_array);
-  __ jmp(&fast_object_grow);
-
-  __ bind(&check_if_double_array);
-  // rdi: elements array's map
-  __ CompareRoot(rdi, Heap::kFixedDoubleArrayMapRootIndex);
-  __ j(not_equal, &slow);
-  __ jmp(&fast_double_grow);
-
-  // Array case: Get the length and the elements array from the JS
-  // array. Check that the array is in fast mode (and writable); if it
-  // is the length is always a smi.
-  __ bind(&array);
-  // rax: value
-  // rdx: receiver (a JSArray)
-  // rcx: index
-  __ movp(rbx, FieldOperand(rdx, JSObject::kElementsOffset));
-
-  // Check the key against the length in the array, compute the
-  // address to store into and fall through to fast case.
-  __ SmiCompareInteger32(FieldOperand(rdx, JSArray::kLengthOffset), rcx);
-  __ j(below_equal, &extra);
-
-  KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double,
-                                  &slow, kCheckMap, kDontIncrementLength);
-  KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
-                                  &slow, kDontCheckMap, kIncrementLength);
-}
-
-
-static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
-                                             Register object,
-                                             Register key,
-                                             Register scratch1,
-                                             Register scratch2,
-                                             Register scratch3,
-                                             Label* unmapped_case,
-                                             Label* slow_case) {
-  Heap* heap = masm->isolate()->heap();
-
-  // Check that the receiver is a JSObject. Because of the elements
-  // map check later, we do not need to check for interceptors or
-  // whether it requires access checks.
-  __ JumpIfSmi(object, slow_case);
-  // Check that the object is some kind of JSObject.
-  __ CmpObjectType(object, FIRST_JS_RECEIVER_TYPE, scratch1);
-  __ j(below, slow_case);
-
-  // Check that the key is a positive smi.
-  Condition check = masm->CheckNonNegativeSmi(key);
-  __ j(NegateCondition(check), slow_case);
-
-  // Load the elements into scratch1 and check its map. If not, jump
-  // to the unmapped lookup with the parameter map in scratch1.
-  Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
-  __ movp(scratch1, FieldOperand(object, JSObject::kElementsOffset));
-  __ CheckMap(scratch1, arguments_map, slow_case, DONT_DO_SMI_CHECK);
-
-  // Check if element is in the range of mapped arguments.
-  __ movp(scratch2, FieldOperand(scratch1, FixedArray::kLengthOffset));
-  __ SmiSubConstant(scratch2, scratch2, Smi::FromInt(2));
-  __ cmpp(key, scratch2);
-  __ j(greater_equal, unmapped_case);
-
-  // Load element index and check whether it is the hole.
-  const int kHeaderSize = FixedArray::kHeaderSize + 2 * kPointerSize;
-  __ SmiToInteger64(scratch3, key);
-  __ movp(scratch2, FieldOperand(scratch1,
-                                 scratch3,
-                                 times_pointer_size,
-                                 kHeaderSize));
-  __ CompareRoot(scratch2, Heap::kTheHoleValueRootIndex);
-  __ j(equal, unmapped_case);
-
-  // Load value from context and return it. We can reuse scratch1 because
-  // we do not jump to the unmapped lookup (which requires the parameter
-  // map in scratch1).
-  __ movp(scratch1, FieldOperand(scratch1, FixedArray::kHeaderSize));
-  __ SmiToInteger64(scratch3, scratch2);
-  return FieldOperand(scratch1,
-                      scratch3,
-                      times_pointer_size,
-                      Context::kHeaderSize);
-}
-
-
-static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
-                                               Register key,
-                                               Register parameter_map,
-                                               Register scratch,
-                                               Label* slow_case) {
-  // Element is in arguments backing store, which is referenced by the
-  // second element of the parameter_map. The parameter_map register
-  // must be loaded with the parameter map of the arguments object and is
-  // overwritten.
-  const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
-  Register backing_store = parameter_map;
-  __ movp(backing_store, FieldOperand(parameter_map, kBackingStoreOffset));
-  Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
-  __ CheckMap(backing_store, fixed_array_map, slow_case, DONT_DO_SMI_CHECK);
-  __ movp(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset));
-  __ cmpp(key, scratch);
-  __ j(greater_equal, slow_case);
-  __ SmiToInteger64(scratch, key);
-  return FieldOperand(backing_store,
-                      scratch,
-                      times_pointer_size,
-                      FixedArray::kHeaderSize);
-}
-
-
-void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- rax    : key
-  //  -- rdx    : receiver
-  //  -- rsp[0] : return address
-  // -----------------------------------
-  Label slow, notin;
-  Operand mapped_location =
-      GenerateMappedArgumentsLookup(
-          masm, rdx, rax, rbx, rcx, rdi, &notin, &slow);
-  __ movp(rax, mapped_location);
-  __ Ret();
-  __ bind(&notin);
-  // The unmapped lookup expects that the parameter map is in rbx.
-  Operand unmapped_location =
-      GenerateUnmappedArgumentsLookup(masm, rax, rbx, rcx, &slow);
-  __ CompareRoot(unmapped_location, Heap::kTheHoleValueRootIndex);
-  __ j(equal, &slow);
-  __ movp(rax, unmapped_location);
-  __ Ret();
-  __ bind(&slow);
-  GenerateMiss(masm);
-}
-
-
-void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- rax    : value
-  //  -- rcx    : key
-  //  -- rdx    : receiver
-  //  -- rsp[0] : return address
-  // -----------------------------------
-  Label slow, notin;
-  Operand mapped_location = GenerateMappedArgumentsLookup(
-      masm, rdx, rcx, rbx, rdi, r8, &notin, &slow);
-  __ movp(mapped_location, rax);
-  __ leap(r9, mapped_location);
-  __ movp(r8, rax);
-  __ RecordWrite(rbx,
-                 r9,
-                 r8,
-                 kDontSaveFPRegs,
-                 EMIT_REMEMBERED_SET,
-                 INLINE_SMI_CHECK);
-  __ Ret();
-  __ bind(&notin);
-  // The unmapped lookup expects that the parameter map is in rbx.
-  Operand unmapped_location =
-      GenerateUnmappedArgumentsLookup(masm, rcx, rbx, rdi, &slow);
-  __ movp(unmapped_location, rax);
-  __ leap(r9, unmapped_location);
-  __ movp(r8, rax);
-  __ RecordWrite(rbx,
-                 r9,
-                 r8,
-                 kDontSaveFPRegs,
-                 EMIT_REMEMBERED_SET,
-                 INLINE_SMI_CHECK);
-  __ Ret();
-  __ bind(&slow);
-  GenerateMiss(masm);
-}
-
-
-void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- rax    : receiver
-  //  -- rcx    : name
-  //  -- rsp[0] : return address
-  // -----------------------------------
-
-  // Probe the stub cache.
-  Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC);
-  masm->isolate()->stub_cache()->GenerateProbe(
-      masm, flags, rax, rcx, rbx, rdx);
-
-  GenerateMiss(masm);
-}
-
-
-void LoadIC::GenerateNormal(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- rax    : receiver
-  //  -- rcx    : name
-  //  -- rsp[0] : return address
-  // -----------------------------------
-  Label miss, slow;
-
-  GenerateNameDictionaryReceiverCheck(masm, rax, rdx, rbx, &miss);
-
-  //  rdx: elements
-  // Search the dictionary placing the result in rax.
-  GenerateDictionaryLoad(masm, &slow, rdx, rcx, rbx, rdi, rax);
-  __ ret(0);
-
-  // Dictionary load failed, go slow (but don't miss).
-  __ bind(&slow);
-  GenerateRuntimeGetProperty(masm);
-
-  // Cache miss: Jump to runtime.
-  __ bind(&miss);
-  GenerateMiss(masm);
-}
-
-
-void LoadIC::GenerateMiss(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- rax    : receiver
-  //  -- rcx    : name
-  //  -- rsp[0] : return address
-  // -----------------------------------
-
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->load_miss(), 1);
-
-  __ PopReturnAddressTo(rbx);
-  __ Push(rax);  // receiver
-  __ Push(rcx);  // name
-  __ PushReturnAddressFrom(rbx);
-
-  // Perform tail call to the entry.
-  ExternalReference ref =
-      ExternalReference(IC_Utility(kLoadIC_Miss), masm->isolate());
-  __ TailCallExternalReference(ref, 2, 1);
-}
-
-
-void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- rax    : receiver
-  //  -- rcx    : name
-  //  -- rsp[0] : return address
-  // -----------------------------------
-
-  __ PopReturnAddressTo(rbx);
-  __ Push(rax);  // receiver
-  __ Push(rcx);  // name
-  __ PushReturnAddressFrom(rbx);
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
-}
-
-
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- rax    : key
-  //  -- rdx    : receiver
-  //  -- rsp[0] : return address
-  // -----------------------------------
-
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->keyed_load_miss(), 1);
-
-  __ PopReturnAddressTo(rbx);
-  __ Push(rdx);  // receiver
-  __ Push(rax);  // name
-  __ PushReturnAddressFrom(rbx);
-
-  // Perform tail call to the entry.
-  ExternalReference ref =
-      ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate());
-  __ TailCallExternalReference(ref, 2, 1);
-}
-
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- rax    : key
-  //  -- rdx    : receiver
-  //  -- rsp[0] : return address
-  // -----------------------------------
-
-  __ PopReturnAddressTo(rbx);
-  __ Push(rdx);  // receiver
-  __ Push(rax);  // name
-  __ PushReturnAddressFrom(rbx);
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
-}
-
-
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- rax    : value
-  //  -- rcx    : name
-  //  -- rdx    : receiver
-  //  -- rsp[0] : return address
-  // -----------------------------------
-
-  // Get the receiver from the stack and probe the stub cache.
-  Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC);
-  masm->isolate()->stub_cache()->GenerateProbe(
-      masm, flags, rdx, rcx, rbx, no_reg);
-
-  // Cache miss: Jump to runtime.
-  GenerateMiss(masm);
-}
-
-
-void StoreIC::GenerateMiss(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- rax    : value
-  //  -- rcx    : name
-  //  -- rdx    : receiver
-  //  -- rsp[0] : return address
-  // -----------------------------------
-
-  __ PopReturnAddressTo(rbx);
-  __ Push(rdx);  // receiver
-  __ Push(rcx);  // name
-  __ Push(rax);  // value
-  __ PushReturnAddressFrom(rbx);
-
-  // Perform tail call to the entry.
-  ExternalReference ref =
-      ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
-  __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void StoreIC::GenerateNormal(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- rax    : value
-  //  -- rcx    : name
-  //  -- rdx    : receiver
-  //  -- rsp[0] : return address
-  // -----------------------------------
-
-  Label miss;
-
-  GenerateNameDictionaryReceiverCheck(masm, rdx, rbx, rdi, &miss);
-
-  GenerateDictionaryStore(masm, &miss, rbx, rcx, rax, r8, r9);
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->store_normal_hit(), 1);
-  __ ret(0);
-
-  __ bind(&miss);
-  __ IncrementCounter(counters->store_normal_miss(), 1);
-  GenerateMiss(masm);
-}
-
-
-void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
-                                         StrictMode strict_mode) {
-  // ----------- S t a t e -------------
-  //  -- rax    : value
-  //  -- rcx    : name
-  //  -- rdx    : receiver
-  //  -- rsp[0] : return address
-  // -----------------------------------
-  __ PopReturnAddressTo(rbx);
-  __ Push(rdx);
-  __ Push(rcx);
-  __ Push(rax);
-  __ Push(Smi::FromInt(NONE));  // PropertyAttributes
-  __ Push(Smi::FromInt(strict_mode));
-  __ PushReturnAddressFrom(rbx);
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
-}
-
-
-void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
-                                              StrictMode strict_mode) {
-  // ----------- S t a t e -------------
-  //  -- rax    : value
-  //  -- rcx    : key
-  //  -- rdx    : receiver
-  //  -- rsp[0] : return address
-  // -----------------------------------
-
-  __ PopReturnAddressTo(rbx);
-  __ Push(rdx);  // receiver
-  __ Push(rcx);  // key
-  __ Push(rax);  // value
-  __ Push(Smi::FromInt(NONE));          // PropertyAttributes
-  __ Push(Smi::FromInt(strict_mode));   // Strict mode.
-  __ PushReturnAddressFrom(rbx);
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
-}
-
-
-void StoreIC::GenerateSlow(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- rax    : value
-  //  -- rcx    : key
-  //  -- rdx    : receiver
-  //  -- rsp[0] : return address
-  // -----------------------------------
-
-  __ PopReturnAddressTo(rbx);
-  __ Push(rdx);  // receiver
-  __ Push(rcx);  // key
-  __ Push(rax);  // value
-  __ PushReturnAddressFrom(rbx);
-
-  // Do tail-call to runtime routine.
-  ExternalReference ref(IC_Utility(kStoreIC_Slow), masm->isolate());
-  __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- rax    : value
-  //  -- rcx    : key
-  //  -- rdx    : receiver
-  //  -- rsp[0] : return address
-  // -----------------------------------
-
-  __ PopReturnAddressTo(rbx);
-  __ Push(rdx);  // receiver
-  __ Push(rcx);  // key
-  __ Push(rax);  // value
-  __ PushReturnAddressFrom(rbx);
-
-  // Do tail-call to runtime routine.
-  ExternalReference ref(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
-  __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- rax    : value
-  //  -- rcx    : key
-  //  -- rdx    : receiver
-  //  -- rsp[0] : return address
-  // -----------------------------------
-
-  __ PopReturnAddressTo(rbx);
-  __ Push(rdx);  // receiver
-  __ Push(rcx);  // key
-  __ Push(rax);  // value
-  __ PushReturnAddressFrom(rbx);
-
-  // Do tail-call to runtime routine.
-  ExternalReference ref =
-      ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
-  __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-#undef __
-
-
-Condition CompareIC::ComputeCondition(Token::Value op) {
-  switch (op) {
-    case Token::EQ_STRICT:
-    case Token::EQ:
-      return equal;
-    case Token::LT:
-      return less;
-    case Token::GT:
-      return greater;
-    case Token::LTE:
-      return less_equal;
-    case Token::GTE:
-      return greater_equal;
-    default:
-      UNREACHABLE();
-      return no_condition;
-  }
-}
-
-
-bool CompareIC::HasInlinedSmiCode(Address address) {
-  // The address of the instruction following the call.
-  Address test_instruction_address =
-      address + Assembler::kCallTargetAddressOffset;
-
-  // If the instruction following the call is not a test al, nothing
-  // was inlined.
-  return *test_instruction_address == Assembler::kTestAlByte;
-}
-
-
-void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
-  // The address of the instruction following the call.
-  Address test_instruction_address =
-      address + Assembler::kCallTargetAddressOffset;
-
-  // If the instruction following the call is not a test al, nothing
-  // was inlined.
-  if (*test_instruction_address != Assembler::kTestAlByte) {
-    ASSERT(*test_instruction_address == Assembler::kNopByte);
-    return;
-  }
-
-  Address delta_address = test_instruction_address + 1;
-  // The delta to the start of the map check instruction and the
-  // condition code uses at the patched jump.
-  uint8_t delta = *reinterpret_cast<uint8_t*>(delta_address);
-  if (FLAG_trace_ic) {
-    PrintF("[  patching ic at %p, test=%p, delta=%d\n",
-           address, test_instruction_address, delta);
-  }
-
-  // Patch with a short conditional jump. Enabling means switching from a short
-  // jump-if-carry/not-carry to jump-if-zero/not-zero, whereas disabling is the
-  // reverse operation of that.
-  Address jmp_address = test_instruction_address - delta;
-  ASSERT((check == ENABLE_INLINED_SMI_CHECK)
-         ? (*jmp_address == Assembler::kJncShortOpcode ||
-            *jmp_address == Assembler::kJcShortOpcode)
-         : (*jmp_address == Assembler::kJnzShortOpcode ||
-            *jmp_address == Assembler::kJzShortOpcode));
-  Condition cc = (check == ENABLE_INLINED_SMI_CHECK)
-      ? (*jmp_address == Assembler::kJncShortOpcode ? not_zero : zero)
-      : (*jmp_address == Assembler::kJnzShortOpcode ? not_carry : carry);
-  *jmp_address = static_cast<byte>(Assembler::kJccShortPrefix | cc);
-}
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_TARGET_ARCH_X64
diff --git a/src/x64/interface-descriptors-x64.cc b/src/x64/interface-descriptors-x64.cc
new file mode 100644
index 0000000..84fdca4
--- /dev/null
+++ b/src/x64/interface-descriptors-x64.cc
@@ -0,0 +1,305 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_X64
+
+#include "src/interface-descriptors.h"
+
+namespace v8 {
+namespace internal {
+
+const Register CallInterfaceDescriptor::ContextRegister() { return rsi; }
+
+
+const Register LoadDescriptor::ReceiverRegister() { return rdx; }
+const Register LoadDescriptor::NameRegister() { return rcx; }
+
+
+const Register VectorLoadICTrampolineDescriptor::SlotRegister() { return rax; }
+
+
+const Register VectorLoadICDescriptor::VectorRegister() { return rbx; }
+
+
+const Register StoreDescriptor::ReceiverRegister() { return rdx; }
+const Register StoreDescriptor::NameRegister() { return rcx; }
+const Register StoreDescriptor::ValueRegister() { return rax; }
+
+
+const Register ElementTransitionAndStoreDescriptor::MapRegister() {
+  return rbx;
+}
+
+
+const Register InstanceofDescriptor::left() { return rax; }
+const Register InstanceofDescriptor::right() { return rdx; }
+
+
+const Register ArgumentsAccessReadDescriptor::index() { return rdx; }
+const Register ArgumentsAccessReadDescriptor::parameter_count() { return rax; }
+
+
+const Register ApiGetterDescriptor::function_address() { return r8; }
+
+
+const Register MathPowTaggedDescriptor::exponent() { return rdx; }
+
+
+const Register MathPowIntegerDescriptor::exponent() {
+  return MathPowTaggedDescriptor::exponent();
+}
+
+
+void FastNewClosureDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {rsi, rbx};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void FastNewContextDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {rsi, rdi};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ToNumberDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  // ToNumberStub invokes a function, and therefore needs a context.
+  Register registers[] = {rsi, rax};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void NumberToStringDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {rsi, rax};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void FastCloneShallowArrayDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {rsi, rax, rbx, rcx};
+  Representation representations[] = {
+      Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
+      Representation::Tagged()};
+  data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void FastCloneShallowObjectDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {rsi, rax, rbx, rcx, rdx};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void CreateAllocationSiteDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {rsi, rbx, rdx};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void StoreArrayLiteralElementDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {rsi, rcx, rax};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void CallFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {rsi, rdi};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void CallFunctionWithFeedbackDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {rsi, rdi, rdx};
+  Representation representations[] = {Representation::Tagged(),
+                                      Representation::Tagged(),
+                                      Representation::Smi()};
+  data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void CallConstructDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  // rax : number of arguments
+  // rbx : feedback vector
+  // rdx : (only if rbx is not the megamorphic symbol) slot in feedback
+  //       vector (Smi)
+  // rdi : constructor function
+  // TODO(turbofan): So far we don't gather type feedback and hence skip the
+  // slot parameter, but ArrayConstructStub needs the vector to be undefined.
+  Register registers[] = {rsi, rax, rdi, rbx};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void RegExpConstructResultDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {rsi, rcx, rbx, rax};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void TransitionElementsKindDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {rsi, rax, rbx};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ArrayConstructorConstantArgCountDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  // register state
+  // rax -- number of arguments
+  // rdi -- function
+  // rbx -- allocation site with elements kind
+  Register registers[] = {rsi, rdi, rbx};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ArrayConstructorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  // stack param count needs (constructor pointer, and single argument)
+  Register registers[] = {rsi, rdi, rbx, rax};
+  Representation representations[] = {
+      Representation::Tagged(), Representation::Tagged(),
+      Representation::Tagged(), Representation::Integer32()};
+  data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void InternalArrayConstructorConstantArgCountDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  // register state
+  // rsi -- context
+  // rax -- number of arguments
+  // rdi -- constructor function
+  Register registers[] = {rsi, rdi};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void InternalArrayConstructorDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  // stack param count needs (constructor pointer, and single argument)
+  Register registers[] = {rsi, rdi, rax};
+  Representation representations[] = {Representation::Tagged(),
+                                      Representation::Tagged(),
+                                      Representation::Integer32()};
+  data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void CompareNilDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {rsi, rax};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ToBooleanDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {rsi, rax};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void BinaryOpDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {rsi, rdx, rax};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void BinaryOpWithAllocationSiteDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {rsi, rcx, rdx, rax};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void StringAddDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {rsi, rdx, rax};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void KeyedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      rsi,  // context
+      rcx,  // key
+  };
+  Representation representations[] = {
+      Representation::Tagged(),  // context
+      Representation::Tagged(),  // key
+  };
+  data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void NamedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      rsi,  // context
+      rcx,  // name
+  };
+  Representation representations[] = {
+      Representation::Tagged(),  // context
+      Representation::Tagged(),  // name
+  };
+  data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void CallHandlerDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      rsi,  // context
+      rdx,  // receiver
+  };
+  Representation representations[] = {
+      Representation::Tagged(),  // context
+      Representation::Tagged(),  // receiver
+  };
+  data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void ArgumentAdaptorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      rsi,  // context
+      rdi,  // JSFunction
+      rax,  // actual number of arguments
+      rbx,  // expected number of arguments
+  };
+  Representation representations[] = {
+      Representation::Tagged(),     // context
+      Representation::Tagged(),     // JSFunction
+      Representation::Integer32(),  // actual number of arguments
+      Representation::Integer32(),  // expected number of arguments
+  };
+  data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void ApiFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      rsi,  // context
+      rax,  // callee
+      rbx,  // call_data
+      rcx,  // holder
+      rdx,  // api_function_address
+  };
+  Representation representations[] = {
+      Representation::Tagged(),    // context
+      Representation::Tagged(),    // callee
+      Representation::Tagged(),    // call_data
+      Representation::Tagged(),    // holder
+      Representation::External(),  // api_function_address
+  };
+  data->Initialize(arraysize(registers), registers, representations);
+}
+}
+}  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_X64
diff --git a/src/x64/lithium-codegen-x64.cc b/src/x64/lithium-codegen-x64.cc
index 81e8e9b..1981d55 100644
--- a/src/x64/lithium-codegen-x64.cc
+++ b/src/x64/lithium-codegen-x64.cc
@@ -6,10 +6,13 @@
 
 #if V8_TARGET_ARCH_X64
 
-#include "src/x64/lithium-codegen-x64.h"
+#include "src/base/bits.h"
+#include "src/code-factory.h"
 #include "src/code-stubs.h"
-#include "src/stub-cache.h"
 #include "src/hydrogen-osr.h"
+#include "src/ic/ic.h"
+#include "src/ic/stub-cache.h"
+#include "src/x64/lithium-codegen-x64.h"
 
 namespace v8 {
 namespace internal {
@@ -17,7 +20,7 @@
 
 // When invoking builtins, we need to record the safepoint in the middle of
 // the invoke instruction sequence generated by the macro assembler.
-class SafepointGenerator V8_FINAL : public CallWrapper {
+class SafepointGenerator FINAL : public CallWrapper {
  public:
   SafepointGenerator(LCodeGen* codegen,
                      LPointerMap* pointers,
@@ -27,9 +30,9 @@
         deopt_mode_(mode) { }
   virtual ~SafepointGenerator() {}
 
-  virtual void BeforeCall(int call_size) const V8_OVERRIDE {}
+  virtual void BeforeCall(int call_size) const OVERRIDE {}
 
-  virtual void AfterCall() const V8_OVERRIDE {
+  virtual void AfterCall() const OVERRIDE {
     codegen_->RecordSafepoint(pointers_, deopt_mode_);
   }
 
@@ -44,7 +47,7 @@
 
 bool LCodeGen::GenerateCode() {
   LPhase phase("Z_Code generation", chunk());
-  ASSERT(is_unused());
+  DCHECK(is_unused());
   status_ = GENERATING;
 
   // Open a frame scope to indicate that there is a frame on the stack.  The
@@ -61,7 +64,7 @@
 
 
 void LCodeGen::FinishCode(Handle<Code> code) {
-  ASSERT(is_done());
+  DCHECK(is_done());
   code->set_stack_slots(GetStackSlotCount());
   code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
   if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
@@ -80,8 +83,8 @@
 
 
 void LCodeGen::SaveCallerDoubles() {
-  ASSERT(info()->saves_caller_doubles());
-  ASSERT(NeedsEagerFrame());
+  DCHECK(info()->saves_caller_doubles());
+  DCHECK(NeedsEagerFrame());
   Comment(";;; Save clobbered callee double registers");
   int count = 0;
   BitVector* doubles = chunk()->allocated_double_registers();
@@ -96,8 +99,8 @@
 
 
 void LCodeGen::RestoreCallerDoubles() {
-  ASSERT(info()->saves_caller_doubles());
-  ASSERT(NeedsEagerFrame());
+  DCHECK(info()->saves_caller_doubles());
+  DCHECK(NeedsEagerFrame());
   Comment(";;; Restore clobbered callee double registers");
   BitVector* doubles = chunk()->allocated_double_registers();
   BitVector::Iterator save_iterator(doubles);
@@ -112,7 +115,7 @@
 
 
 bool LCodeGen::GeneratePrologue() {
-  ASSERT(is_generating());
+  DCHECK(is_generating());
 
   if (info()->IsOptimizing()) {
     ProfileEntryHookStub::MaybeCallEntryHook(masm_);
@@ -137,7 +140,7 @@
       __ j(not_equal, &ok, Label::kNear);
 
       __ movp(rcx, GlobalObjectOperand());
-      __ movp(rcx, FieldOperand(rcx, GlobalObject::kGlobalReceiverOffset));
+      __ movp(rcx, FieldOperand(rcx, GlobalObject::kGlobalProxyOffset));
 
       __ movp(args.GetReceiverOperand(), rcx);
 
@@ -147,7 +150,7 @@
 
   info()->set_prologue_offset(masm_->pc_offset());
   if (NeedsEagerFrame()) {
-    ASSERT(!frame_is_built_);
+    DCHECK(!frame_is_built_);
     frame_is_built_ = true;
     if (info()->IsStub()) {
       __ StubPrologue();
@@ -167,7 +170,7 @@
 #endif
       __ Push(rax);
       __ Set(rax, slots);
-      __ movq(kScratchRegister, kSlotsZapValue);
+      __ Set(kScratchRegister, kSlotsZapValue);
       Label loop;
       __ bind(&loop);
       __ movp(MemOperand(rsp, rax, times_pointer_size, 0),
@@ -200,7 +203,7 @@
       need_write_barrier = false;
     } else {
       __ Push(rdi);
-      __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
+      __ CallRuntime(Runtime::kNewFunctionContext, 1);
     }
     RecordSafepoint(Safepoint::kNoLazyDeopt);
     // Context is returned in rax.  It replaces the context passed to us.
@@ -252,7 +255,7 @@
   // Adjust the frame size, subsuming the unoptimized frame into the
   // optimized frame.
   int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
-  ASSERT(slots >= 0);
+  DCHECK(slots >= 0);
   __ subp(rsp, Immediate(slots * kPointerSize));
 }
 
@@ -279,13 +282,13 @@
     // size is 64-bit. For x32 port, we sign extend the dehoisted key at the use
     // points and MustSignExtendResult is always false. We can't use
     // STATIC_ASSERT here as the pointer size is 32-bit for x32.
-    ASSERT(kPointerSize == kInt64Size);
+    DCHECK(kPointerSize == kInt64Size);
     if (instr->result()->IsRegister()) {
       Register result_reg = ToRegister(instr->result());
       __ movsxlq(result_reg, result_reg);
     } else {
       // Sign extend the 32bit result in the stack slots.
-      ASSERT(instr->result()->IsStackSlot());
+      DCHECK(instr->result()->IsStackSlot());
       Operand src = ToOperand(instr->result());
       __ movsxlq(kScratchRegister, src);
       __ movq(src, kScratchRegister);
@@ -300,17 +303,12 @@
     Comment(";;; -------------------- Jump table --------------------");
   }
   for (int i = 0; i < jump_table_.length(); i++) {
-    __ bind(&jump_table_[i].label);
-    Address entry = jump_table_[i].address;
-    Deoptimizer::BailoutType type = jump_table_[i].bailout_type;
-    int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
-    if (id == Deoptimizer::kNotDeoptimizationEntry) {
-      Comment(";;; jump table entry %d.", i);
-    } else {
-      Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
-    }
-    if (jump_table_[i].needs_frame) {
-      ASSERT(!info()->saves_caller_doubles());
+    Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
+    __ bind(&table_entry->label);
+    Address entry = table_entry->address;
+    DeoptComment(table_entry->reason);
+    if (table_entry->needs_frame) {
+      DCHECK(!info()->saves_caller_doubles());
       __ Move(kScratchRegister, ExternalReference::ForDeoptEntry(entry));
       if (needs_frame.is_bound()) {
         __ jmp(&needs_frame);
@@ -323,7 +321,7 @@
         // This variant of deopt can only be used with stubs. Since we don't
         // have a function pointer to install in the stack frame that we're
         // building, install a special marker there instead.
-        ASSERT(info()->IsStub());
+        DCHECK(info()->IsStub());
         __ Move(rsi, Smi::FromInt(StackFrame::STUB));
         __ Push(rsi);
         __ movp(rsi, MemOperand(rsp, kPointerSize));
@@ -331,7 +329,7 @@
       }
     } else {
       if (info()->saves_caller_doubles()) {
-        ASSERT(info()->IsStub());
+        DCHECK(info()->IsStub());
         RestoreCallerDoubles();
       }
       __ call(entry, RelocInfo::RUNTIME_ENTRY);
@@ -342,7 +340,7 @@
 
 
 bool LCodeGen::GenerateDeferredCode() {
-  ASSERT(is_generating());
+  DCHECK(is_generating());
   if (deferred_.length() > 0) {
     for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
       LDeferredCode* code = deferred_[i];
@@ -360,8 +358,8 @@
       __ bind(code->entry());
       if (NeedsDeferredFrame()) {
         Comment(";;; Build frame");
-        ASSERT(!frame_is_built_);
-        ASSERT(info()->IsStub());
+        DCHECK(!frame_is_built_);
+        DCHECK(info()->IsStub());
         frame_is_built_ = true;
         // Build the frame in such a way that esi isn't trashed.
         __ pushq(rbp);  // Caller's frame pointer.
@@ -374,7 +372,7 @@
       if (NeedsDeferredFrame()) {
         __ bind(code->done());
         Comment(";;; Destroy frame");
-        ASSERT(frame_is_built_);
+        DCHECK(frame_is_built_);
         frame_is_built_ = false;
         __ movp(rsp, rbp);
         __ popq(rbp);
@@ -391,7 +389,7 @@
 
 
 bool LCodeGen::GenerateSafepointTable() {
-  ASSERT(is_done());
+  DCHECK(is_done());
   safepoints_.Emit(masm(), GetStackSlotCount());
   return !is_aborted();
 }
@@ -408,13 +406,13 @@
 
 
 Register LCodeGen::ToRegister(LOperand* op) const {
-  ASSERT(op->IsRegister());
+  DCHECK(op->IsRegister());
   return ToRegister(op->index());
 }
 
 
 XMMRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
-  ASSERT(op->IsDoubleRegister());
+  DCHECK(op->IsDoubleRegister());
   return ToDoubleRegister(op->index());
 }
 
@@ -445,7 +443,7 @@
   HConstant* constant = chunk_->LookupConstant(op);
   int32_t value = constant->Integer32Value();
   if (r.IsInteger32()) return value;
-  ASSERT(SmiValuesAre31Bits() && r.IsSmiOrTagged());
+  DCHECK(SmiValuesAre31Bits() && r.IsSmiOrTagged());
   return static_cast<int32_t>(reinterpret_cast<intptr_t>(Smi::FromInt(value)));
 }
 
@@ -458,27 +456,27 @@
 
 double LCodeGen::ToDouble(LConstantOperand* op) const {
   HConstant* constant = chunk_->LookupConstant(op);
-  ASSERT(constant->HasDoubleValue());
+  DCHECK(constant->HasDoubleValue());
   return constant->DoubleValue();
 }
 
 
 ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const {
   HConstant* constant = chunk_->LookupConstant(op);
-  ASSERT(constant->HasExternalReferenceValue());
+  DCHECK(constant->HasExternalReferenceValue());
   return constant->ExternalReferenceValue();
 }
 
 
 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
   HConstant* constant = chunk_->LookupConstant(op);
-  ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
+  DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
   return constant->handle(isolate());
 }
 
 
 static int ArgumentsOffsetWithoutFrame(int index) {
-  ASSERT(index < 0);
+  DCHECK(index < 0);
   return -(index + 1) * kPointerSize + kPCOnStackSize;
 }
 
@@ -486,7 +484,7 @@
 Operand LCodeGen::ToOperand(LOperand* op) const {
   // Does not handle registers. In X64 assembler, plain registers are not
   // representable as an Operand.
-  ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
+  DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
   if (NeedsEagerFrame()) {
     return Operand(rbp, StackSlotOffset(op->index()));
   } else {
@@ -521,13 +519,13 @@
       translation->BeginConstructStubFrame(closure_id, translation_size);
       break;
     case JS_GETTER:
-      ASSERT(translation_size == 1);
-      ASSERT(height == 0);
+      DCHECK(translation_size == 1);
+      DCHECK(height == 0);
       translation->BeginGetterStubFrame(closure_id);
       break;
     case JS_SETTER:
-      ASSERT(translation_size == 2);
-      ASSERT(height == 0);
+      DCHECK(translation_size == 2);
+      DCHECK(height == 0);
       translation->BeginSetterStubFrame(closure_id);
       break;
     case ARGUMENTS_ADAPTOR:
@@ -626,7 +624,7 @@
                                LInstruction* instr,
                                SafepointMode safepoint_mode,
                                int argc) {
-  ASSERT(instr != NULL);
+  DCHECK(instr != NULL);
   __ call(code, mode);
   RecordSafepointWithLazyDeopt(instr, safepoint_mode, argc);
 
@@ -650,8 +648,8 @@
                            int num_arguments,
                            LInstruction* instr,
                            SaveFPRegsMode save_doubles) {
-  ASSERT(instr != NULL);
-  ASSERT(instr->HasPointerMap());
+  DCHECK(instr != NULL);
+  DCHECK(instr->HasPointerMap());
 
   __ CallRuntime(function, num_arguments, save_doubles);
 
@@ -726,13 +724,14 @@
 }
 
 
-void LCodeGen::DeoptimizeIf(Condition cc,
-                            LEnvironment* environment,
+void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
+                            const char* detail,
                             Deoptimizer::BailoutType bailout_type) {
+  LEnvironment* environment = instr->environment();
   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
-  ASSERT(environment->HasBeenRegistered());
+  DCHECK(environment->HasBeenRegistered());
   int id = environment->deoptimization_index();
-  ASSERT(info()->IsOptimizing() || info()->IsStub());
+  DCHECK(info()->IsOptimizing() || info()->IsStub());
   Address entry =
       Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
   if (entry == NULL) {
@@ -744,7 +743,7 @@
     ExternalReference count = ExternalReference::stress_deopt_count(isolate());
     Label no_deopt;
     __ pushfq();
-    __ Push(rax);
+    __ pushq(rax);
     Operand count_operand = masm()->ExternalOperand(count, kScratchRegister);
     __ movl(rax, count_operand);
     __ subl(rax, Immediate(1));
@@ -752,13 +751,13 @@
     if (FLAG_trap_on_deopt) __ int3();
     __ movl(rax, Immediate(FLAG_deopt_every_n_times));
     __ movl(count_operand, rax);
-    __ Pop(rax);
+    __ popq(rax);
     __ popfq();
-    ASSERT(frame_is_built_);
+    DCHECK(frame_is_built_);
     __ call(entry, RelocInfo::RUNTIME_ENTRY);
     __ bind(&no_deopt);
     __ movl(count_operand, rax);
-    __ Pop(rax);
+    __ popq(rax);
     __ popfq();
   }
 
@@ -771,22 +770,22 @@
     __ bind(&done);
   }
 
-  ASSERT(info()->IsStub() || frame_is_built_);
+  Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
+                             instr->Mnemonic(), detail);
+  DCHECK(info()->IsStub() || frame_is_built_);
   // Go through jump table if we need to handle condition, build frame, or
   // restore caller doubles.
   if (cc == no_condition && frame_is_built_ &&
       !info()->saves_caller_doubles()) {
+    DeoptComment(reason);
     __ call(entry, RelocInfo::RUNTIME_ENTRY);
   } else {
+    Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
+                                            !frame_is_built_);
     // We often have several deopts to the same entry, reuse the last
     // jump entry if this is the case.
     if (jump_table_.is_empty() ||
-        jump_table_.last().address != entry ||
-        jump_table_.last().needs_frame != !frame_is_built_ ||
-        jump_table_.last().bailout_type != bailout_type) {
-      Deoptimizer::JumpTableEntry table_entry(entry,
-                                              bailout_type,
-                                              !frame_is_built_);
+        !table_entry.IsEquivalentTo(jump_table_.last())) {
       jump_table_.Add(table_entry, zone());
     }
     if (cc == no_condition) {
@@ -798,12 +797,12 @@
 }
 
 
-void LCodeGen::DeoptimizeIf(Condition cc,
-                            LEnvironment* environment) {
+void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
+                            const char* detail) {
   Deoptimizer::BailoutType bailout_type = info()->IsStub()
       ? Deoptimizer::LAZY
       : Deoptimizer::EAGER;
-  DeoptimizeIf(cc, environment, bailout_type);
+  DeoptimizeIf(cc, instr, detail, bailout_type);
 }
 
 
@@ -862,7 +861,7 @@
 
 
 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
-  ASSERT(deoptimization_literals_.length() == 0);
+  DCHECK(deoptimization_literals_.length() == 0);
 
   const ZoneList<Handle<JSFunction> >* inlined_closures =
       chunk()->inlined_closures();
@@ -882,7 +881,7 @@
   if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
     RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
   } else {
-    ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS);
+    DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS);
     RecordSafepointWithRegisters(
         instr->pointer_map(), argc, Safepoint::kLazyDeopt);
   }
@@ -894,7 +893,7 @@
     Safepoint::Kind kind,
     int arguments,
     Safepoint::DeoptMode deopt_mode) {
-  ASSERT(kind == expected_safepoint_kind_);
+  DCHECK(kind == expected_safepoint_kind_);
 
   const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
 
@@ -983,8 +982,8 @@
 
 
 void LCodeGen::DoCallStub(LCallStub* instr) {
-  ASSERT(ToRegister(instr->context()).is(rsi));
-  ASSERT(ToRegister(instr->result()).is(rax));
+  DCHECK(ToRegister(instr->context()).is(rsi));
+  DCHECK(ToRegister(instr->result()).is(rax));
   switch (instr->hydrogen()->major_key()) {
     case CodeStub::RegExpExec: {
       RegExpExecStub stub(isolate());
@@ -1015,7 +1014,7 @@
 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
   Register dividend = ToRegister(instr->dividend());
   int32_t divisor = instr->divisor();
-  ASSERT(dividend.is(ToRegister(instr->result())));
+  DCHECK(dividend.is(ToRegister(instr->result())));
 
   // Theoretically, a variation of the branch-free code for integer division by
   // a power of 2 (calculating the remainder via an additional multiplication
@@ -1034,7 +1033,7 @@
     __ andl(dividend, Immediate(mask));
     __ negl(dividend);
     if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
-      DeoptimizeIf(zero, instr->environment());
+      DeoptimizeIf(zero, instr, "minus zero");
     }
     __ jmp(&done, Label::kNear);
   }
@@ -1048,10 +1047,10 @@
 void LCodeGen::DoModByConstI(LModByConstI* instr) {
   Register dividend = ToRegister(instr->dividend());
   int32_t divisor = instr->divisor();
-  ASSERT(ToRegister(instr->result()).is(rax));
+  DCHECK(ToRegister(instr->result()).is(rax));
 
   if (divisor == 0) {
-    DeoptimizeIf(no_condition, instr->environment());
+    DeoptimizeIf(no_condition, instr, "division by zero");
     return;
   }
 
@@ -1066,7 +1065,7 @@
     Label remainder_not_zero;
     __ j(not_zero, &remainder_not_zero, Label::kNear);
     __ cmpl(dividend, Immediate(0));
-    DeoptimizeIf(less, instr->environment());
+    DeoptimizeIf(less, instr, "minus zero");
     __ bind(&remainder_not_zero);
   }
 }
@@ -1076,19 +1075,19 @@
   HMod* hmod = instr->hydrogen();
 
   Register left_reg = ToRegister(instr->left());
-  ASSERT(left_reg.is(rax));
+  DCHECK(left_reg.is(rax));
   Register right_reg = ToRegister(instr->right());
-  ASSERT(!right_reg.is(rax));
-  ASSERT(!right_reg.is(rdx));
+  DCHECK(!right_reg.is(rax));
+  DCHECK(!right_reg.is(rdx));
   Register result_reg = ToRegister(instr->result());
-  ASSERT(result_reg.is(rdx));
+  DCHECK(result_reg.is(rdx));
 
   Label done;
   // Check for x % 0, idiv would signal a divide error. We have to
   // deopt in this case because we can't return a NaN.
   if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
     __ testl(right_reg, right_reg);
-    DeoptimizeIf(zero, instr->environment());
+    DeoptimizeIf(zero, instr, "division by zero");
   }
 
   // Check for kMinInt % -1, idiv would signal a divide error. We
@@ -1099,7 +1098,7 @@
     __ j(not_zero, &no_overflow_possible, Label::kNear);
     __ cmpl(right_reg, Immediate(-1));
     if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
-      DeoptimizeIf(equal, instr->environment());
+      DeoptimizeIf(equal, instr, "minus zero");
     } else {
       __ j(not_equal, &no_overflow_possible, Label::kNear);
       __ Set(result_reg, 0);
@@ -1119,7 +1118,7 @@
     __ j(not_sign, &positive_left, Label::kNear);
     __ idivl(right_reg);
     __ testl(result_reg, result_reg);
-    DeoptimizeIf(zero, instr->environment());
+    DeoptimizeIf(zero, instr, "minus zero");
     __ jmp(&done, Label::kNear);
     __ bind(&positive_left);
   }
@@ -1131,7 +1130,7 @@
 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
   Register dividend = ToRegister(instr->dividend());
   int32_t divisor = instr->divisor();
-  ASSERT(dividend.is(ToRegister(instr->result())));
+  DCHECK(dividend.is(ToRegister(instr->result())));
 
   // If the divisor is positive, things are easy: There can be no deopts and we
   // can simply do an arithmetic right shift.
@@ -1145,13 +1144,13 @@
   // If the divisor is negative, we have to negate and handle edge cases.
   __ negl(dividend);
   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
-    DeoptimizeIf(zero, instr->environment());
+    DeoptimizeIf(zero, instr, "minus zero");
   }
 
   // Dividing by -1 is basically negation, unless we overflow.
   if (divisor == -1) {
     if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
-      DeoptimizeIf(overflow, instr->environment());
+      DeoptimizeIf(overflow, instr, "overflow");
     }
     return;
   }
@@ -1175,10 +1174,10 @@
 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
   Register dividend = ToRegister(instr->dividend());
   int32_t divisor = instr->divisor();
-  ASSERT(ToRegister(instr->result()).is(rdx));
+  DCHECK(ToRegister(instr->result()).is(rdx));
 
   if (divisor == 0) {
-    DeoptimizeIf(no_condition, instr->environment());
+    DeoptimizeIf(no_condition, instr, "division by zero");
     return;
   }
 
@@ -1186,7 +1185,7 @@
   HMathFloorOfDiv* hdiv = instr->hydrogen();
   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
     __ testl(dividend, dividend);
-    DeoptimizeIf(zero, instr->environment());
+    DeoptimizeIf(zero, instr, "minus zero");
   }
 
   // Easy case: We need no dynamic check for the dividend and the flooring
@@ -1201,7 +1200,7 @@
   // In the general case we may need to adjust before and after the truncating
   // division to get a flooring division.
   Register temp = ToRegister(instr->temp3());
-  ASSERT(!temp.is(dividend) && !temp.is(rax) && !temp.is(rdx));
+  DCHECK(!temp.is(dividend) && !temp.is(rax) && !temp.is(rdx));
   Label needs_adjustment, done;
   __ cmpl(dividend, Immediate(0));
   __ j(divisor > 0 ? less : greater, &needs_adjustment, Label::kNear);
@@ -1224,16 +1223,16 @@
   Register divisor = ToRegister(instr->divisor());
   Register remainder = ToRegister(instr->temp());
   Register result = ToRegister(instr->result());
-  ASSERT(dividend.is(rax));
-  ASSERT(remainder.is(rdx));
-  ASSERT(result.is(rax));
-  ASSERT(!divisor.is(rax));
-  ASSERT(!divisor.is(rdx));
+  DCHECK(dividend.is(rax));
+  DCHECK(remainder.is(rdx));
+  DCHECK(result.is(rax));
+  DCHECK(!divisor.is(rax));
+  DCHECK(!divisor.is(rdx));
 
   // Check for x / 0.
   if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
     __ testl(divisor, divisor);
-    DeoptimizeIf(zero, instr->environment());
+    DeoptimizeIf(zero, instr, "division by zero");
   }
 
   // Check for (0 / -x) that will produce negative zero.
@@ -1242,7 +1241,7 @@
     __ testl(dividend, dividend);
     __ j(not_zero, &dividend_not_zero, Label::kNear);
     __ testl(divisor, divisor);
-    DeoptimizeIf(sign, instr->environment());
+    DeoptimizeIf(sign, instr, "minus zero");
     __ bind(&dividend_not_zero);
   }
 
@@ -1252,7 +1251,7 @@
     __ cmpl(dividend, Immediate(kMinInt));
     __ j(not_zero, &dividend_not_min_int, Label::kNear);
     __ cmpl(divisor, Immediate(-1));
-    DeoptimizeIf(zero, instr->environment());
+    DeoptimizeIf(zero, instr, "overflow");
     __ bind(&dividend_not_min_int);
   }
 
@@ -1274,26 +1273,26 @@
   Register dividend = ToRegister(instr->dividend());
   int32_t divisor = instr->divisor();
   Register result = ToRegister(instr->result());
-  ASSERT(divisor == kMinInt || IsPowerOf2(Abs(divisor)));
-  ASSERT(!result.is(dividend));
+  DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
+  DCHECK(!result.is(dividend));
 
   // Check for (0 / -x) that will produce negative zero.
   HDiv* hdiv = instr->hydrogen();
   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
     __ testl(dividend, dividend);
-    DeoptimizeIf(zero, instr->environment());
+    DeoptimizeIf(zero, instr, "minus zero");
   }
   // Check for (kMinInt / -1).
   if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
     __ cmpl(dividend, Immediate(kMinInt));
-    DeoptimizeIf(zero, instr->environment());
+    DeoptimizeIf(zero, instr, "overflow");
   }
   // Deoptimize if remainder will not be 0.
   if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
       divisor != 1 && divisor != -1) {
     int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
     __ testl(dividend, Immediate(mask));
-    DeoptimizeIf(not_zero, instr->environment());
+    DeoptimizeIf(not_zero, instr, "lost precision");
   }
   __ Move(result, dividend);
   int32_t shift = WhichPowerOf2Abs(divisor);
@@ -1311,10 +1310,10 @@
 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
   Register dividend = ToRegister(instr->dividend());
   int32_t divisor = instr->divisor();
-  ASSERT(ToRegister(instr->result()).is(rdx));
+  DCHECK(ToRegister(instr->result()).is(rdx));
 
   if (divisor == 0) {
-    DeoptimizeIf(no_condition, instr->environment());
+    DeoptimizeIf(no_condition, instr, "division by zero");
     return;
   }
 
@@ -1322,7 +1321,7 @@
   HDiv* hdiv = instr->hydrogen();
   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
     __ testl(dividend, dividend);
-    DeoptimizeIf(zero, instr->environment());
+    DeoptimizeIf(zero, instr, "minus zero");
   }
 
   __ TruncatingDiv(dividend, Abs(divisor));
@@ -1332,7 +1331,7 @@
     __ movl(rax, rdx);
     __ imull(rax, rax, Immediate(divisor));
     __ subl(rax, dividend);
-    DeoptimizeIf(not_equal, instr->environment());
+    DeoptimizeIf(not_equal, instr, "lost precision");
   }
 }
 
@@ -1343,16 +1342,16 @@
   Register dividend = ToRegister(instr->dividend());
   Register divisor = ToRegister(instr->divisor());
   Register remainder = ToRegister(instr->temp());
-  ASSERT(dividend.is(rax));
-  ASSERT(remainder.is(rdx));
-  ASSERT(ToRegister(instr->result()).is(rax));
-  ASSERT(!divisor.is(rax));
-  ASSERT(!divisor.is(rdx));
+  DCHECK(dividend.is(rax));
+  DCHECK(remainder.is(rdx));
+  DCHECK(ToRegister(instr->result()).is(rax));
+  DCHECK(!divisor.is(rax));
+  DCHECK(!divisor.is(rdx));
 
   // Check for x / 0.
   if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
     __ testl(divisor, divisor);
-    DeoptimizeIf(zero, instr->environment());
+    DeoptimizeIf(zero, instr, "division by zero");
   }
 
   // Check for (0 / -x) that will produce negative zero.
@@ -1361,7 +1360,7 @@
     __ testl(dividend, dividend);
     __ j(not_zero, &dividend_not_zero, Label::kNear);
     __ testl(divisor, divisor);
-    DeoptimizeIf(sign, instr->environment());
+    DeoptimizeIf(sign, instr, "minus zero");
     __ bind(&dividend_not_zero);
   }
 
@@ -1371,7 +1370,7 @@
     __ cmpl(dividend, Immediate(kMinInt));
     __ j(not_zero, &dividend_not_min_int, Label::kNear);
     __ cmpl(divisor, Immediate(-1));
-    DeoptimizeIf(zero, instr->environment());
+    DeoptimizeIf(zero, instr, "overflow");
     __ bind(&dividend_not_min_int);
   }
 
@@ -1382,7 +1381,7 @@
   if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
     // Deoptimize if remainder is not 0.
     __ testl(remainder, remainder);
-    DeoptimizeIf(not_zero, instr->environment());
+    DeoptimizeIf(not_zero, instr, "lost precision");
   }
 }
 
@@ -1459,7 +1458,7 @@
   }
 
   if (can_overflow) {
-    DeoptimizeIf(overflow, instr->environment());
+    DeoptimizeIf(overflow, instr, "overflow");
   }
 
   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@@ -1474,14 +1473,14 @@
     if (right->IsConstantOperand()) {
       // Constant can't be represented as 32-bit Smi due to immediate size
       // limit.
-      ASSERT(SmiValuesAre32Bits()
+      DCHECK(SmiValuesAre32Bits()
           ? !instr->hydrogen_value()->representation().IsSmi()
           : SmiValuesAre31Bits());
       if (ToInteger32(LConstantOperand::cast(right)) < 0) {
-        DeoptimizeIf(no_condition, instr->environment());
+        DeoptimizeIf(no_condition, instr, "minus zero");
       } else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
         __ cmpl(kScratchRegister, Immediate(0));
-        DeoptimizeIf(less, instr->environment());
+        DeoptimizeIf(less, instr, "minus zero");
       }
     } else if (right->IsStackSlot()) {
       if (instr->hydrogen_value()->representation().IsSmi()) {
@@ -1489,7 +1488,7 @@
       } else {
         __ orl(kScratchRegister, ToOperand(right));
       }
-      DeoptimizeIf(sign, instr->environment());
+      DeoptimizeIf(sign, instr, "minus zero");
     } else {
       // Test the non-zero operand for negative sign.
       if (instr->hydrogen_value()->representation().IsSmi()) {
@@ -1497,7 +1496,7 @@
       } else {
         __ orl(kScratchRegister, ToRegister(right));
       }
-      DeoptimizeIf(sign, instr->environment());
+      DeoptimizeIf(sign, instr, "minus zero");
     }
     __ bind(&done);
   }
@@ -1507,8 +1506,8 @@
 void LCodeGen::DoBitI(LBitI* instr) {
   LOperand* left = instr->left();
   LOperand* right = instr->right();
-  ASSERT(left->Equals(instr->result()));
-  ASSERT(left->IsRegister());
+  DCHECK(left->Equals(instr->result()));
+  DCHECK(left->IsRegister());
 
   if (right->IsConstantOperand()) {
     int32_t right_operand =
@@ -1560,7 +1559,7 @@
         break;
     }
   } else {
-    ASSERT(right->IsRegister());
+    DCHECK(right->IsRegister());
     switch (instr->op()) {
       case Token::BIT_AND:
         if (instr->IsInteger32()) {
@@ -1594,10 +1593,10 @@
 void LCodeGen::DoShiftI(LShiftI* instr) {
   LOperand* left = instr->left();
   LOperand* right = instr->right();
-  ASSERT(left->Equals(instr->result()));
-  ASSERT(left->IsRegister());
+  DCHECK(left->Equals(instr->result()));
+  DCHECK(left->IsRegister());
   if (right->IsRegister()) {
-    ASSERT(ToRegister(right).is(rcx));
+    DCHECK(ToRegister(right).is(rcx));
 
     switch (instr->op()) {
       case Token::ROR:
@@ -1610,7 +1609,7 @@
         __ shrl_cl(ToRegister(left));
         if (instr->can_deopt()) {
           __ testl(ToRegister(left), ToRegister(left));
-          DeoptimizeIf(negative, instr->environment());
+          DeoptimizeIf(negative, instr, "negative value");
         }
         break;
       case Token::SHL:
@@ -1639,7 +1638,7 @@
           __ shrl(ToRegister(left), Immediate(shift_count));
         } else if (instr->can_deopt()) {
           __ testl(ToRegister(left), ToRegister(left));
-          DeoptimizeIf(negative, instr->environment());
+          DeoptimizeIf(negative, instr, "negative value");
         }
         break;
       case Token::SHL:
@@ -1648,13 +1647,13 @@
             if (SmiValuesAre32Bits()) {
               __ shlp(ToRegister(left), Immediate(shift_count));
             } else {
-              ASSERT(SmiValuesAre31Bits());
+              DCHECK(SmiValuesAre31Bits());
               if (instr->can_deopt()) {
                 if (shift_count != 1) {
                   __ shll(ToRegister(left), Immediate(shift_count - 1));
                 }
                 __ Integer32ToSmi(ToRegister(left), ToRegister(left));
-                DeoptimizeIf(overflow, instr->environment());
+                DeoptimizeIf(overflow, instr, "overflow");
               } else {
                 __ shll(ToRegister(left), Immediate(shift_count));
               }
@@ -1675,7 +1674,7 @@
 void LCodeGen::DoSubI(LSubI* instr) {
   LOperand* left = instr->left();
   LOperand* right = instr->right();
-  ASSERT(left->Equals(instr->result()));
+  DCHECK(left->Equals(instr->result()));
 
   if (right->IsConstantOperand()) {
     int32_t right_operand =
@@ -1697,7 +1696,7 @@
   }
 
   if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
-    DeoptimizeIf(overflow, instr->environment());
+    DeoptimizeIf(overflow, instr, "overflow");
   }
 }
 
@@ -1718,10 +1717,10 @@
 
 
 void LCodeGen::DoConstantD(LConstantD* instr) {
-  ASSERT(instr->result()->IsDoubleRegister());
+  DCHECK(instr->result()->IsDoubleRegister());
   XMMRegister res = ToDoubleRegister(instr->result());
   double v = instr->value();
-  uint64_t int_val = BitCast<uint64_t, double>(v);
+  uint64_t int_val = bit_cast<uint64_t, double>(v);
   // Use xor to produce +0.0 in a fast and compact way, but avoid to
   // do so if the constant is -0.0.
   if (int_val == 0) {
@@ -1758,13 +1757,13 @@
   Register result = ToRegister(instr->result());
   Smi* index = instr->index();
   Label runtime, done, not_date_object;
-  ASSERT(object.is(result));
-  ASSERT(object.is(rax));
+  DCHECK(object.is(result));
+  DCHECK(object.is(rax));
 
   Condition cc = masm()->CheckSmi(object);
-  DeoptimizeIf(cc, instr->environment());
+  DeoptimizeIf(cc, instr, "Smi");
   __ CmpObjectType(object, JS_DATE_TYPE, kScratchRegister);
-  DeoptimizeIf(not_equal, instr->environment());
+  DeoptimizeIf(not_equal, instr, "not a date object");
 
   if (index->value() == 0) {
     __ movp(result, FieldOperand(object, JSDate::kValueOffset));
@@ -1854,12 +1853,12 @@
   Operand operand = BuildSeqStringOperand(string, instr->index(), encoding);
   if (instr->value()->IsConstantOperand()) {
     int value = ToInteger32(LConstantOperand::cast(instr->value()));
-    ASSERT_LE(0, value);
+    DCHECK_LE(0, value);
     if (encoding == String::ONE_BYTE_ENCODING) {
-      ASSERT_LE(value, String::kMaxOneByteCharCode);
+      DCHECK_LE(value, String::kMaxOneByteCharCode);
       __ movb(operand, Immediate(value));
     } else {
-      ASSERT_LE(value, String::kMaxUtf16CodeUnit);
+      DCHECK_LE(value, String::kMaxUtf16CodeUnit);
       __ movw(operand, Immediate(value));
     }
   } else {
@@ -1883,7 +1882,7 @@
   if (LAddI::UseLea(instr->hydrogen()) && !left->Equals(instr->result())) {
     if (right->IsConstantOperand()) {
       // No support for smi-immediates for 32-bit SMI.
-      ASSERT(SmiValuesAre32Bits() ? !target_rep.IsSmi() : SmiValuesAre31Bits());
+      DCHECK(SmiValuesAre32Bits() ? !target_rep.IsSmi() : SmiValuesAre31Bits());
       int32_t offset =
           ToRepresentation(LConstantOperand::cast(right),
                            instr->hydrogen()->right()->representation());
@@ -1905,7 +1904,7 @@
   } else {
     if (right->IsConstantOperand()) {
       // No support for smi-immediates for 32-bit SMI.
-      ASSERT(SmiValuesAre32Bits() ? !target_rep.IsSmi() : SmiValuesAre31Bits());
+      DCHECK(SmiValuesAre32Bits() ? !target_rep.IsSmi() : SmiValuesAre31Bits());
       int32_t right_operand =
           ToRepresentation(LConstantOperand::cast(right),
                            instr->hydrogen()->right()->representation());
@@ -1928,7 +1927,7 @@
       }
     }
     if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
-      DeoptimizeIf(overflow, instr->environment());
+      DeoptimizeIf(overflow, instr, "overflow");
     }
   }
 }
@@ -1937,7 +1936,7 @@
 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
   LOperand* left = instr->left();
   LOperand* right = instr->right();
-  ASSERT(left->Equals(instr->result()));
+  DCHECK(left->Equals(instr->result()));
   HMathMinMax::Operation operation = instr->hydrogen()->operation();
   if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
     Label return_left;
@@ -1949,7 +1948,7 @@
       Immediate right_imm = Immediate(
           ToRepresentation(LConstantOperand::cast(right),
                            instr->hydrogen()->right()->representation()));
-      ASSERT(SmiValuesAre32Bits()
+      DCHECK(SmiValuesAre32Bits()
           ? !instr->hydrogen()->representation().IsSmi()
           : SmiValuesAre31Bits());
       __ cmpl(left_reg, right_imm);
@@ -1976,7 +1975,7 @@
     }
     __ bind(&return_left);
   } else {
-    ASSERT(instr->hydrogen()->representation().IsDouble());
+    DCHECK(instr->hydrogen()->representation().IsDouble());
     Label check_nan_left, check_zero, return_left, return_right;
     Condition condition = (operation == HMathMinMax::kMathMin) ? below : above;
     XMMRegister left_reg = ToDoubleRegister(left);
@@ -2017,7 +2016,7 @@
   XMMRegister right = ToDoubleRegister(instr->right());
   XMMRegister result = ToDoubleRegister(instr->result());
   // All operations except MOD are computed in-place.
-  ASSERT(instr->op() == Token::MOD || left.is(result));
+  DCHECK(instr->op() == Token::MOD || left.is(result));
   switch (instr->op()) {
     case Token::ADD:
       __ addsd(left, right);
@@ -2038,7 +2037,7 @@
       XMMRegister xmm_scratch = double_scratch0();
       __ PrepareCallCFunction(2);
       __ movaps(xmm_scratch, left);
-      ASSERT(right.is(xmm1));
+      DCHECK(right.is(xmm1));
       __ CallCFunction(
           ExternalReference::mod_two_doubles_operation(isolate()), 2);
       __ movaps(result, xmm_scratch);
@@ -2052,13 +2051,14 @@
 
 
 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
-  ASSERT(ToRegister(instr->context()).is(rsi));
-  ASSERT(ToRegister(instr->left()).is(rdx));
-  ASSERT(ToRegister(instr->right()).is(rax));
-  ASSERT(ToRegister(instr->result()).is(rax));
+  DCHECK(ToRegister(instr->context()).is(rsi));
+  DCHECK(ToRegister(instr->left()).is(rdx));
+  DCHECK(ToRegister(instr->right()).is(rax));
+  DCHECK(ToRegister(instr->result()).is(rax));
 
-  BinaryOpICStub stub(isolate(), instr->op(), NO_OVERWRITE);
-  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  Handle<Code> code =
+      CodeFactory::BinaryOpIC(isolate(), instr->op(), NO_OVERWRITE).code();
+  CallCode(code, RelocInfo::CODE_TARGET, instr);
 }
 
 
@@ -2099,45 +2099,45 @@
 void LCodeGen::DoBranch(LBranch* instr) {
   Representation r = instr->hydrogen()->value()->representation();
   if (r.IsInteger32()) {
-    ASSERT(!info()->IsStub());
+    DCHECK(!info()->IsStub());
     Register reg = ToRegister(instr->value());
     __ testl(reg, reg);
     EmitBranch(instr, not_zero);
   } else if (r.IsSmi()) {
-    ASSERT(!info()->IsStub());
+    DCHECK(!info()->IsStub());
     Register reg = ToRegister(instr->value());
     __ testp(reg, reg);
     EmitBranch(instr, not_zero);
   } else if (r.IsDouble()) {
-    ASSERT(!info()->IsStub());
+    DCHECK(!info()->IsStub());
     XMMRegister reg = ToDoubleRegister(instr->value());
     XMMRegister xmm_scratch = double_scratch0();
     __ xorps(xmm_scratch, xmm_scratch);
     __ ucomisd(reg, xmm_scratch);
     EmitBranch(instr, not_equal);
   } else {
-    ASSERT(r.IsTagged());
+    DCHECK(r.IsTagged());
     Register reg = ToRegister(instr->value());
     HType type = instr->hydrogen()->value()->type();
     if (type.IsBoolean()) {
-      ASSERT(!info()->IsStub());
+      DCHECK(!info()->IsStub());
       __ CompareRoot(reg, Heap::kTrueValueRootIndex);
       EmitBranch(instr, equal);
     } else if (type.IsSmi()) {
-      ASSERT(!info()->IsStub());
+      DCHECK(!info()->IsStub());
       __ SmiCompare(reg, Smi::FromInt(0));
       EmitBranch(instr, not_equal);
     } else if (type.IsJSArray()) {
-      ASSERT(!info()->IsStub());
+      DCHECK(!info()->IsStub());
       EmitBranch(instr, no_condition);
     } else if (type.IsHeapNumber()) {
-      ASSERT(!info()->IsStub());
+      DCHECK(!info()->IsStub());
       XMMRegister xmm_scratch = double_scratch0();
       __ xorps(xmm_scratch, xmm_scratch);
       __ ucomisd(xmm_scratch, FieldOperand(reg, HeapNumber::kValueOffset));
       EmitBranch(instr, not_equal);
     } else if (type.IsString()) {
-      ASSERT(!info()->IsStub());
+      DCHECK(!info()->IsStub());
       __ cmpp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
       EmitBranch(instr, not_equal);
     } else {
@@ -2172,7 +2172,7 @@
       } else if (expected.NeedsMap()) {
         // If we need a map later and have a Smi -> deopt.
         __ testb(reg, Immediate(kSmiTagMask));
-        DeoptimizeIf(zero, instr->environment());
+        DeoptimizeIf(zero, instr, "Smi");
       }
 
       const Register map = kScratchRegister;
@@ -2226,7 +2226,7 @@
       if (!expected.IsGeneric()) {
         // We've seen something for the first time -> deopt.
         // This can only happen if we are not generic already.
-        DeoptimizeIf(no_condition, instr->environment());
+        DeoptimizeIf(no_condition, instr, "unexpected object");
       }
     }
   }
@@ -2380,7 +2380,7 @@
 
 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
   Representation rep = instr->hydrogen()->value()->representation();
-  ASSERT(!rep.IsInteger32());
+  DCHECK(!rep.IsInteger32());
 
   if (rep.IsDouble()) {
     XMMRegister value = ToDoubleRegister(instr->value());
@@ -2408,7 +2408,7 @@
 Condition LCodeGen::EmitIsObject(Register input,
                                  Label* is_not_object,
                                  Label* is_object) {
-  ASSERT(!input.is(kScratchRegister));
+  DCHECK(!input.is(kScratchRegister));
 
   __ JumpIfSmi(input, is_not_object);
 
@@ -2497,10 +2497,10 @@
 
 
 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
-  ASSERT(ToRegister(instr->context()).is(rsi));
+  DCHECK(ToRegister(instr->context()).is(rsi));
   Token::Value op = instr->op();
 
-  Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+  Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 
   Condition condition = TokenToCondition(op, false);
@@ -2514,7 +2514,7 @@
   InstanceType from = instr->from();
   InstanceType to = instr->to();
   if (from == FIRST_TYPE) return to;
-  ASSERT(from == to || to == LAST_TYPE);
+  DCHECK(from == to || to == LAST_TYPE);
   return from;
 }
 
@@ -2549,7 +2549,7 @@
   __ AssertString(input);
 
   __ movl(result, FieldOperand(input, String::kHashFieldOffset));
-  ASSERT(String::kHashShift >= kSmiTagSize);
+  DCHECK(String::kHashShift >= kSmiTagSize);
   __ IndexFromHash(result, result);
 }
 
@@ -2572,13 +2572,13 @@
                                Register input,
                                Register temp,
                                Register temp2) {
-  ASSERT(!input.is(temp));
-  ASSERT(!input.is(temp2));
-  ASSERT(!temp.is(temp2));
+  DCHECK(!input.is(temp));
+  DCHECK(!input.is(temp2));
+  DCHECK(!temp.is(temp2));
 
   __ JumpIfSmi(input, is_false);
 
-  if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
+  if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
     // Assuming the following assertions, we can use the same compares to test
     // for both being a function type and being in the object type range.
     STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
@@ -2609,7 +2609,7 @@
 
   // Objects with a non-function constructor have class 'Object'.
   __ CmpObjectType(temp, JS_FUNCTION_TYPE, kScratchRegister);
-  if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
+  if (String::Equals(class_name, isolate()->factory()->Object_string())) {
     __ j(not_equal, is_true);
   } else {
     __ j(not_equal, is_false);
@@ -2626,7 +2626,7 @@
   // classes and it doesn't have to because you can't access it with natives
   // syntax.  Since both sides are internalized it is sufficient to use an
   // identity comparison.
-  ASSERT(class_name->IsInternalizedString());
+  DCHECK(class_name->IsInternalizedString());
   __ Cmp(temp, class_name);
   // End with the answer in the z flag.
 }
@@ -2654,7 +2654,7 @@
 
 
 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
-  ASSERT(ToRegister(instr->context()).is(rsi));
+  DCHECK(ToRegister(instr->context()).is(rsi));
   InstanceofStub stub(isolate(), InstanceofStub::kNoFlags);
   __ Push(ToRegister(instr->left()));
   __ Push(ToRegister(instr->right()));
@@ -2671,22 +2671,22 @@
 
 
 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
-  class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode {
+  class DeferredInstanceOfKnownGlobal FINAL : public LDeferredCode {
    public:
     DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
                                   LInstanceOfKnownGlobal* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() V8_OVERRIDE {
+    virtual void Generate() OVERRIDE {
       codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
     }
-    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
     Label* map_check() { return &map_check_; }
    private:
     LInstanceOfKnownGlobal* instr_;
     Label map_check_;
   };
 
-  ASSERT(ToRegister(instr->context()).is(rsi));
+  DCHECK(ToRegister(instr->context()).is(rsi));
   DeferredInstanceOfKnownGlobal* deferred;
   deferred = new(zone()) DeferredInstanceOfKnownGlobal(this, instr);
 
@@ -2714,7 +2714,7 @@
   // Check that the code size between patch label and patch sites is invariant.
   Label end_of_patched_code;
   __ bind(&end_of_patched_code);
-  ASSERT(true);
+  DCHECK(true);
 #endif
   __ jmp(&done, Label::kNear);
 
@@ -2746,10 +2746,10 @@
     __ Push(ToRegister(instr->value()));
     __ Push(instr->function());
 
-    static const int kAdditionalDelta = 10;
+    static const int kAdditionalDelta = kPointerSize == kInt64Size ? 10 : 16;
     int delta =
         masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
-    ASSERT(delta >= 0);
+    DCHECK(delta >= 0);
     __ PushImm32(delta);
 
     // We are pushing three values on the stack but recording a
@@ -2761,7 +2761,7 @@
                     instr,
                     RECORD_SAFEPOINT_WITH_REGISTERS,
                     2);
-    ASSERT(delta == masm_->SizeOfCodeGeneratedSince(map_check));
+    DCHECK(delta == masm_->SizeOfCodeGeneratedSince(map_check));
     LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
     safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
     // Move result to a register that survives the end of the
@@ -2781,10 +2781,10 @@
 
 
 void LCodeGen::DoCmpT(LCmpT* instr) {
-  ASSERT(ToRegister(instr->context()).is(rsi));
+  DCHECK(ToRegister(instr->context()).is(rsi));
   Token::Value op = instr->op();
 
-  Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+  Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 
   Condition condition = TokenToCondition(op, false);
@@ -2842,19 +2842,36 @@
   __ LoadGlobalCell(result, instr->hydrogen()->cell().handle());
   if (instr->hydrogen()->RequiresHoleCheck()) {
     __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
-    DeoptimizeIf(equal, instr->environment());
+    DeoptimizeIf(equal, instr, "hole");
   }
 }
 
 
-void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
-  ASSERT(ToRegister(instr->context()).is(rsi));
-  ASSERT(ToRegister(instr->global_object()).is(rax));
-  ASSERT(ToRegister(instr->result()).is(rax));
+template <class T>
+void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
+  DCHECK(FLAG_vector_ics);
+  Register vector = ToRegister(instr->temp_vector());
+  DCHECK(vector.is(VectorLoadICDescriptor::VectorRegister()));
+  __ Move(vector, instr->hydrogen()->feedback_vector());
+  // No need to allocate this register.
+  DCHECK(VectorLoadICDescriptor::SlotRegister().is(rax));
+  __ Move(VectorLoadICDescriptor::SlotRegister(),
+          Smi::FromInt(instr->hydrogen()->slot()));
+}
 
-  __ Move(rcx, instr->name());
+
+void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
+  DCHECK(ToRegister(instr->context()).is(rsi));
+  DCHECK(ToRegister(instr->global_object())
+             .is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->result()).is(rax));
+
+  __ Move(LoadDescriptor::NameRegister(), instr->name());
+  if (FLAG_vector_ics) {
+    EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
+  }
   ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
-  Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode);
+  Handle<Code> ic = CodeFactory::LoadIC(isolate(), mode).code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -2870,10 +2887,10 @@
   if (instr->hydrogen()->RequiresHoleCheck()) {
     // We have a temp because CompareRoot might clobber kScratchRegister.
     Register cell = ToRegister(instr->temp());
-    ASSERT(!value.is(cell));
+    DCHECK(!value.is(cell));
     __ Move(cell, cell_handle, RelocInfo::CELL);
     __ CompareRoot(Operand(cell, 0), Heap::kTheHoleValueRootIndex);
-    DeoptimizeIf(equal, instr->environment());
+    DeoptimizeIf(equal, instr, "hole");
     // Store the value.
     __ movp(Operand(cell, 0), value);
   } else {
@@ -2892,7 +2909,7 @@
   if (instr->hydrogen()->RequiresHoleCheck()) {
     __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
     if (instr->hydrogen()->DeoptimizesOnHole()) {
-      DeoptimizeIf(equal, instr->environment());
+      DeoptimizeIf(equal, instr, "hole");
     } else {
       Label is_not_hole;
       __ j(not_equal, &is_not_hole, Label::kNear);
@@ -2913,7 +2930,7 @@
   if (instr->hydrogen()->RequiresHoleCheck()) {
     __ CompareRoot(target, Heap::kTheHoleValueRootIndex);
     if (instr->hydrogen()->DeoptimizesOnHole()) {
-      DeoptimizeIf(equal, instr->environment());
+      DeoptimizeIf(equal, instr, "hole");
     } else {
       __ j(not_equal, &skip_assignment);
     }
@@ -2946,7 +2963,7 @@
   if (access.IsExternalMemory()) {
     Register result = ToRegister(instr->result());
     if (instr->object()->IsConstantOperand()) {
-      ASSERT(result.is(rax));
+      DCHECK(result.is(rax));
       __ load_rax(ToExternalReference(LConstantOperand::cast(instr->object())));
     } else {
       Register object = ToRegister(instr->object());
@@ -2979,7 +2996,7 @@
 
     // Read int value directly from upper half of the smi.
     STATIC_ASSERT(kSmiTag == 0);
-    ASSERT(kSmiTagSize + kSmiShiftSize == 32);
+    DCHECK(kSmiTagSize + kSmiShiftSize == 32);
     offset += kPointerSize / 2;
     representation = Representation::Integer32();
   }
@@ -2988,12 +3005,15 @@
 
 
 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
-  ASSERT(ToRegister(instr->context()).is(rsi));
-  ASSERT(ToRegister(instr->object()).is(rax));
-  ASSERT(ToRegister(instr->result()).is(rax));
+  DCHECK(ToRegister(instr->context()).is(rsi));
+  DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->result()).is(rax));
 
-  __ Move(rcx, instr->name());
-  Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
+  __ Move(LoadDescriptor::NameRegister(), instr->name());
+  if (FLAG_vector_ics) {
+    EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
+  }
+  Handle<Code> ic = CodeFactory::LoadIC(isolate(), NOT_CONTEXTUAL).code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -3002,23 +3022,13 @@
   Register function = ToRegister(instr->function());
   Register result = ToRegister(instr->result());
 
-  // Check that the function really is a function.
-  __ CmpObjectType(function, JS_FUNCTION_TYPE, result);
-  DeoptimizeIf(not_equal, instr->environment());
-
-  // Check whether the function has an instance prototype.
-  Label non_instance;
-  __ testb(FieldOperand(result, Map::kBitFieldOffset),
-           Immediate(1 << Map::kHasNonInstancePrototype));
-  __ j(not_zero, &non_instance, Label::kNear);
-
   // Get the prototype or initial map from the function.
   __ movp(result,
          FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
 
   // Check that the function has a prototype or an initial map.
   __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
-  DeoptimizeIf(equal, instr->environment());
+  DeoptimizeIf(equal, instr, "hole");
 
   // If the function does not have an initial map, we're done.
   Label done;
@@ -3027,12 +3037,6 @@
 
   // Get the prototype from the initial map.
   __ movp(result, FieldOperand(result, Map::kPrototypeOffset));
-  __ jmp(&done, Label::kNear);
-
-  // Non-instance prototype: Fetch prototype from constructor field
-  // in the function's map.
-  __ bind(&non_instance);
-  __ movp(result, FieldOperand(result, Map::kConstructorOffset));
 
   // All done.
   __ bind(&done);
@@ -3136,7 +3140,7 @@
         __ movl(result, operand);
         if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
           __ testl(result, result);
-          DeoptimizeIf(negative, instr->environment());
+          DeoptimizeIf(negative, instr, "negative value");
         }
         break;
       case EXTERNAL_FLOAT32_ELEMENTS:
@@ -3175,7 +3179,7 @@
         FAST_DOUBLE_ELEMENTS,
         instr->base_offset() + sizeof(kHoleNanLower32));
     __ cmpl(hole_check_operand, Immediate(kHoleNanUpper32));
-    DeoptimizeIf(equal, instr->environment());
+    DeoptimizeIf(equal, instr, "hole");
   }
 
   Operand double_load_operand = BuildFastArrayOperand(
@@ -3204,7 +3208,7 @@
   }
   if (representation.IsInteger32() && SmiValuesAre32Bits() &&
       hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
-    ASSERT(!requires_hole_check);
+    DCHECK(!requires_hole_check);
     if (FLAG_debug_code) {
       Register scratch = kScratchRegister;
       __ Load(scratch,
@@ -3218,26 +3222,24 @@
     }
     // Read int value directly from upper half of the smi.
     STATIC_ASSERT(kSmiTag == 0);
-    ASSERT(kSmiTagSize + kSmiShiftSize == 32);
+    DCHECK(kSmiTagSize + kSmiShiftSize == 32);
     offset += kPointerSize / 2;
   }
 
   __ Load(result,
-          BuildFastArrayOperand(instr->elements(),
-                                key,
+          BuildFastArrayOperand(instr->elements(), key,
                                 instr->hydrogen()->key()->representation(),
-                                FAST_ELEMENTS,
-                                offset),
+                                FAST_ELEMENTS, offset),
           representation);
 
   // Check for the hole value.
   if (requires_hole_check) {
     if (IsFastSmiElementsKind(hinstr->elements_kind())) {
       Condition smi = __ CheckSmi(result);
-      DeoptimizeIf(NegateCondition(smi), instr->environment());
+      DeoptimizeIf(NegateCondition(smi), instr, "not a Smi");
     } else {
       __ CompareRoot(result, Heap::kTheHoleValueRootIndex);
-      DeoptimizeIf(equal, instr->environment());
+      DeoptimizeIf(equal, instr, "hole");
     }
   }
 }
@@ -3272,7 +3274,7 @@
   } else {
     // Take the tag bit into account while computing the shift size.
     if (key_representation.IsSmi() && (shift_size >= 1)) {
-      ASSERT(SmiValuesAre31Bits());
+      DCHECK(SmiValuesAre31Bits());
       shift_size -= kSmiTagSize;
     }
     ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
@@ -3285,11 +3287,15 @@
 
 
 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
-  ASSERT(ToRegister(instr->context()).is(rsi));
-  ASSERT(ToRegister(instr->object()).is(rdx));
-  ASSERT(ToRegister(instr->key()).is(rax));
+  DCHECK(ToRegister(instr->context()).is(rsi));
+  DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
 
-  Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+  if (FLAG_vector_ics) {
+    EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
+  }
+
+  Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -3382,9 +3388,9 @@
 
   // The receiver should be a JS object.
   Condition is_smi = __ CheckSmi(receiver);
-  DeoptimizeIf(is_smi, instr->environment());
+  DeoptimizeIf(is_smi, instr, "Smi");
   __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, kScratchRegister);
-  DeoptimizeIf(below, instr->environment());
+  DeoptimizeIf(below, instr, "not a JavaScript object");
 
   __ jmp(&receiver_ok, Label::kNear);
   __ bind(&global_object);
@@ -3392,8 +3398,7 @@
   __ movp(receiver,
           Operand(receiver,
                   Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
-  __ movp(receiver,
-          FieldOperand(receiver, GlobalObject::kGlobalReceiverOffset));
+  __ movp(receiver, FieldOperand(receiver, GlobalObject::kGlobalProxyOffset));
 
   __ bind(&receiver_ok);
 }
@@ -3404,15 +3409,15 @@
   Register function = ToRegister(instr->function());
   Register length = ToRegister(instr->length());
   Register elements = ToRegister(instr->elements());
-  ASSERT(receiver.is(rax));  // Used for parameter count.
-  ASSERT(function.is(rdi));  // Required by InvokeFunction.
-  ASSERT(ToRegister(instr->result()).is(rax));
+  DCHECK(receiver.is(rax));  // Used for parameter count.
+  DCHECK(function.is(rdi));  // Required by InvokeFunction.
+  DCHECK(ToRegister(instr->result()).is(rax));
 
   // Copy the arguments to this function possibly from the
   // adaptor frame below it.
   const uint32_t kArgumentsLimit = 1 * KB;
   __ cmpp(length, Immediate(kArgumentsLimit));
-  DeoptimizeIf(above, instr->environment());
+  DeoptimizeIf(above, instr, "too many arguments");
 
   __ Push(receiver);
   __ movp(receiver, length);
@@ -3432,7 +3437,7 @@
 
   // Invoke the function.
   __ bind(&invoke);
-  ASSERT(instr->HasPointerMap());
+  DCHECK(instr->HasPointerMap());
   LPointerMap* pointers = instr->pointer_map();
   SafepointGenerator safepoint_generator(
       this, pointers, Safepoint::kLazyDeopt);
@@ -3464,17 +3469,17 @@
     __ movp(result, Operand(rbp, StandardFrameConstants::kContextOffset));
   } else {
     // If there is no frame, the context must be in rsi.
-    ASSERT(result.is(rsi));
+    DCHECK(result.is(rsi));
   }
 }
 
 
 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
-  ASSERT(ToRegister(instr->context()).is(rsi));
+  DCHECK(ToRegister(instr->context()).is(rsi));
   __ Push(rsi);  // The context is the first argument.
   __ Push(instr->hydrogen()->pairs());
   __ Push(Smi::FromInt(instr->hydrogen()->flags()));
-  CallRuntime(Runtime::kHiddenDeclareGlobals, 3, instr);
+  CallRuntime(Runtime::kDeclareGlobals, 3, instr);
 }
 
 
@@ -3524,8 +3529,32 @@
 }
 
 
+void LCodeGen::DoTailCallThroughMegamorphicCache(
+    LTailCallThroughMegamorphicCache* instr) {
+  Register receiver = ToRegister(instr->receiver());
+  Register name = ToRegister(instr->name());
+  DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(name.is(LoadDescriptor::NameRegister()));
+
+  Register scratch = rbx;
+  DCHECK(!scratch.is(receiver) && !scratch.is(name));
+
+  // Important for the tail-call.
+  bool must_teardown_frame = NeedsEagerFrame();
+
+  // The probe will tail call to a handler if found.
+  isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(),
+                                         must_teardown_frame, receiver, name,
+                                         scratch, no_reg);
+
+  // Tail call to miss if we ended up here.
+  if (must_teardown_frame) __ leave();
+  LoadIC::GenerateMiss(masm());
+}
+
+
 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
-  ASSERT(ToRegister(instr->result()).is(rax));
+  DCHECK(ToRegister(instr->result()).is(rax));
 
   LPointerMap* pointers = instr->pointer_map();
   SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
@@ -3536,7 +3565,7 @@
     generator.BeforeCall(__ CallSize(code));
     __ call(code, RelocInfo::CODE_TARGET);
   } else {
-    ASSERT(instr->target()->IsRegister());
+    DCHECK(instr->target()->IsRegister());
     Register target = ToRegister(instr->target());
     generator.BeforeCall(__ CallSize(target));
     __ addp(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
@@ -3547,8 +3576,8 @@
 
 
 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
-  ASSERT(ToRegister(instr->function()).is(rdi));
-  ASSERT(ToRegister(instr->result()).is(rax));
+  DCHECK(ToRegister(instr->function()).is(rdi));
+  DCHECK(ToRegister(instr->result()).is(rax));
 
   if (instr->hydrogen()->pass_argument_count()) {
     __ Set(rax, instr->arity());
@@ -3583,7 +3612,7 @@
   Register input_reg = ToRegister(instr->value());
   __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
                  Heap::kHeapNumberMapRootIndex);
-  DeoptimizeIf(not_equal, instr->environment());
+  DeoptimizeIf(not_equal, instr, "not a heap number");
 
   Label slow, allocated, done;
   Register tmp = input_reg.is(rax) ? rcx : rax;
@@ -3606,7 +3635,7 @@
   // Slow case: Call the runtime system to do the number allocation.
   __ bind(&slow);
   CallRuntimeFromDeferred(
-      Runtime::kHiddenAllocateHeapNumber, 0, instr, instr->context());
+      Runtime::kAllocateHeapNumber, 0, instr, instr->context());
   // Set the pointer to the new heap number in tmp.
   if (!tmp.is(rax)) __ movp(tmp, rax);
   // Restore input_reg after call to runtime.
@@ -3629,7 +3658,7 @@
   Label is_positive;
   __ j(not_sign, &is_positive, Label::kNear);
   __ negl(input_reg);  // Sets flags.
-  DeoptimizeIf(negative, instr->environment());
+  DeoptimizeIf(negative, instr, "overflow");
   __ bind(&is_positive);
 }
 
@@ -3640,26 +3669,26 @@
   Label is_positive;
   __ j(not_sign, &is_positive, Label::kNear);
   __ negp(input_reg);  // Sets flags.
-  DeoptimizeIf(negative, instr->environment());
+  DeoptimizeIf(negative, instr, "overflow");
   __ bind(&is_positive);
 }
 
 
 void LCodeGen::DoMathAbs(LMathAbs* instr) {
   // Class for deferred case.
-  class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode {
+  class DeferredMathAbsTaggedHeapNumber FINAL : public LDeferredCode {
    public:
     DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() V8_OVERRIDE {
+    virtual void Generate() OVERRIDE {
       codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
     }
-    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
    private:
     LMathAbs* instr_;
   };
 
-  ASSERT(instr->value()->Equals(instr->result()));
+  DCHECK(instr->value()->Equals(instr->result()));
   Representation r = instr->hydrogen()->value()->representation();
 
   if (r.IsDouble()) {
@@ -3695,18 +3724,18 @@
       // Deoptimize if minus zero.
       __ movq(output_reg, input_reg);
       __ subq(output_reg, Immediate(1));
-      DeoptimizeIf(overflow, instr->environment());
+      DeoptimizeIf(overflow, instr, "minus zero");
     }
     __ roundsd(xmm_scratch, input_reg, Assembler::kRoundDown);
     __ cvttsd2si(output_reg, xmm_scratch);
     __ cmpl(output_reg, Immediate(0x1));
-    DeoptimizeIf(overflow, instr->environment());
+    DeoptimizeIf(overflow, instr, "overflow");
   } else {
     Label negative_sign, done;
     // Deoptimize on unordered.
     __ xorps(xmm_scratch, xmm_scratch);  // Zero the register.
     __ ucomisd(input_reg, xmm_scratch);
-    DeoptimizeIf(parity_even, instr->environment());
+    DeoptimizeIf(parity_even, instr, "NaN");
     __ j(below, &negative_sign, Label::kNear);
 
     if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
@@ -3715,7 +3744,7 @@
       __ j(above, &positive_sign, Label::kNear);
       __ movmskpd(output_reg, input_reg);
       __ testq(output_reg, Immediate(1));
-      DeoptimizeIf(not_zero, instr->environment());
+      DeoptimizeIf(not_zero, instr, "minus zero");
       __ Set(output_reg, 0);
       __ jmp(&done);
       __ bind(&positive_sign);
@@ -3725,7 +3754,7 @@
     __ cvttsd2si(output_reg, input_reg);
     // Overflow is signalled with minint.
     __ cmpl(output_reg, Immediate(0x1));
-    DeoptimizeIf(overflow, instr->environment());
+    DeoptimizeIf(overflow, instr, "overflow");
     __ jmp(&done, Label::kNear);
 
     // Non-zero negative reaches here.
@@ -3736,7 +3765,7 @@
     __ ucomisd(input_reg, xmm_scratch);
     __ j(equal, &done, Label::kNear);
     __ subl(output_reg, Immediate(1));
-    DeoptimizeIf(overflow, instr->environment());
+    DeoptimizeIf(overflow, instr, "overflow");
 
     __ bind(&done);
   }
@@ -3763,8 +3792,7 @@
   __ cvttsd2si(output_reg, xmm_scratch);
   // Overflow is signalled with minint.
   __ cmpl(output_reg, Immediate(0x1));
-  __ RecordComment("D2I conversion overflow");
-  DeoptimizeIf(overflow, instr->environment());
+  DeoptimizeIf(overflow, instr, "overflow");
   __ jmp(&done, dist);
 
   __ bind(&below_one_half);
@@ -3780,8 +3808,7 @@
   __ cvttsd2si(output_reg, input_temp);
   // Catch minint due to overflow, and to prevent overflow when compensating.
   __ cmpl(output_reg, Immediate(0x1));
-  __ RecordComment("D2I conversion overflow");
-  DeoptimizeIf(overflow, instr->environment());
+  DeoptimizeIf(overflow, instr, "overflow");
 
   __ Cvtlsi2sd(xmm_scratch, output_reg);
   __ ucomisd(xmm_scratch, input_temp);
@@ -3796,14 +3823,21 @@
   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
     __ movq(output_reg, input_reg);
     __ testq(output_reg, output_reg);
-    __ RecordComment("Minus zero");
-    DeoptimizeIf(negative, instr->environment());
+    DeoptimizeIf(negative, instr, "minus zero");
   }
   __ Set(output_reg, 0);
   __ bind(&done);
 }
 
 
+void LCodeGen::DoMathFround(LMathFround* instr) {
+  XMMRegister input_reg = ToDoubleRegister(instr->value());
+  XMMRegister output_reg = ToDoubleRegister(instr->result());
+  __ cvtsd2ss(output_reg, input_reg);
+  __ cvtss2sd(output_reg, output_reg);
+}
+
+
 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
   XMMRegister output = ToDoubleRegister(instr->result());
   if (instr->value()->IsDoubleRegister()) {
@@ -3819,7 +3853,7 @@
 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
   XMMRegister xmm_scratch = double_scratch0();
   XMMRegister input_reg = ToDoubleRegister(instr->value());
-  ASSERT(ToDoubleRegister(instr->result()).is(input_reg));
+  DCHECK(ToDoubleRegister(instr->result()).is(input_reg));
 
   // Note that according to ECMA-262 15.8.2.13:
   // Math.pow(-Infinity, 0.5) == Infinity
@@ -3853,22 +3887,22 @@
   // Having marked this as a call, we can use any registers.
   // Just make sure that the input/output registers are the expected ones.
 
-  Register exponent = rdx;
-  ASSERT(!instr->right()->IsRegister() ||
-         ToRegister(instr->right()).is(exponent));
-  ASSERT(!instr->right()->IsDoubleRegister() ||
+  Register tagged_exponent = MathPowTaggedDescriptor::exponent();
+  DCHECK(!instr->right()->IsRegister() ||
+         ToRegister(instr->right()).is(tagged_exponent));
+  DCHECK(!instr->right()->IsDoubleRegister() ||
          ToDoubleRegister(instr->right()).is(xmm1));
-  ASSERT(ToDoubleRegister(instr->left()).is(xmm2));
-  ASSERT(ToDoubleRegister(instr->result()).is(xmm3));
+  DCHECK(ToDoubleRegister(instr->left()).is(xmm2));
+  DCHECK(ToDoubleRegister(instr->result()).is(xmm3));
 
   if (exponent_type.IsSmi()) {
     MathPowStub stub(isolate(), MathPowStub::TAGGED);
     __ CallStub(&stub);
   } else if (exponent_type.IsTagged()) {
     Label no_deopt;
-    __ JumpIfSmi(exponent, &no_deopt, Label::kNear);
-    __ CmpObjectType(exponent, HEAP_NUMBER_TYPE, rcx);
-    DeoptimizeIf(not_equal, instr->environment());
+    __ JumpIfSmi(tagged_exponent, &no_deopt, Label::kNear);
+    __ CmpObjectType(tagged_exponent, HEAP_NUMBER_TYPE, rcx);
+    DeoptimizeIf(not_equal, instr, "not a heap number");
     __ bind(&no_deopt);
     MathPowStub stub(isolate(), MathPowStub::TAGGED);
     __ CallStub(&stub);
@@ -3876,7 +3910,7 @@
     MathPowStub stub(isolate(), MathPowStub::INTEGER);
     __ CallStub(&stub);
   } else {
-    ASSERT(exponent_type.IsDouble());
+    DCHECK(exponent_type.IsDouble());
     MathPowStub stub(isolate(), MathPowStub::DOUBLE);
     __ CallStub(&stub);
   }
@@ -3895,7 +3929,7 @@
 
 
 void LCodeGen::DoMathLog(LMathLog* instr) {
-  ASSERT(instr->value()->Equals(instr->result()));
+  DCHECK(instr->value()->Equals(instr->result()));
   XMMRegister input_reg = ToDoubleRegister(instr->value());
   XMMRegister xmm_scratch = double_scratch0();
   Label positive, done, zero;
@@ -3942,9 +3976,9 @@
 
 
 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
-  ASSERT(ToRegister(instr->context()).is(rsi));
-  ASSERT(ToRegister(instr->function()).is(rdi));
-  ASSERT(instr->HasPointerMap());
+  DCHECK(ToRegister(instr->context()).is(rsi));
+  DCHECK(ToRegister(instr->function()).is(rdi));
+  DCHECK(instr->HasPointerMap());
 
   Handle<JSFunction> known_function = instr->hydrogen()->known_function();
   if (known_function.is_null()) {
@@ -3963,9 +3997,9 @@
 
 
 void LCodeGen::DoCallFunction(LCallFunction* instr) {
-  ASSERT(ToRegister(instr->context()).is(rsi));
-  ASSERT(ToRegister(instr->function()).is(rdi));
-  ASSERT(ToRegister(instr->result()).is(rax));
+  DCHECK(ToRegister(instr->context()).is(rsi));
+  DCHECK(ToRegister(instr->function()).is(rdi));
+  DCHECK(ToRegister(instr->result()).is(rax));
 
   int arity = instr->arity();
   CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
@@ -3974,9 +4008,9 @@
 
 
 void LCodeGen::DoCallNew(LCallNew* instr) {
-  ASSERT(ToRegister(instr->context()).is(rsi));
-  ASSERT(ToRegister(instr->constructor()).is(rdi));
-  ASSERT(ToRegister(instr->result()).is(rax));
+  DCHECK(ToRegister(instr->context()).is(rsi));
+  DCHECK(ToRegister(instr->constructor()).is(rdi));
+  DCHECK(ToRegister(instr->result()).is(rax));
 
   __ Set(rax, instr->arity());
   // No cell in ebx for construct type feedback in optimized code
@@ -3987,9 +4021,9 @@
 
 
 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
-  ASSERT(ToRegister(instr->context()).is(rsi));
-  ASSERT(ToRegister(instr->constructor()).is(rdi));
-  ASSERT(ToRegister(instr->result()).is(rax));
+  DCHECK(ToRegister(instr->context()).is(rsi));
+  DCHECK(ToRegister(instr->constructor()).is(rdi));
+  DCHECK(ToRegister(instr->result()).is(rax));
 
   __ Set(rax, instr->arity());
   __ LoadRoot(rbx, Heap::kUndefinedValueRootIndex);
@@ -4032,7 +4066,7 @@
 
 
 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
-  ASSERT(ToRegister(instr->context()).is(rsi));
+  DCHECK(ToRegister(instr->context()).is(rsi));
   CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles());
 }
 
@@ -4066,10 +4100,10 @@
   int offset = access.offset();
 
   if (access.IsExternalMemory()) {
-    ASSERT(!hinstr->NeedsWriteBarrier());
+    DCHECK(!hinstr->NeedsWriteBarrier());
     Register value = ToRegister(instr->value());
     if (instr->object()->IsConstantOperand()) {
-      ASSERT(value.is(rax));
+      DCHECK(value.is(rax));
       LConstantOperand* object = LConstantOperand::cast(instr->object());
       __ store_rax(ToExternalReference(object));
     } else {
@@ -4082,13 +4116,13 @@
   Register object = ToRegister(instr->object());
   __ AssertNotSmi(object);
 
-  ASSERT(!representation.IsSmi() ||
+  DCHECK(!representation.IsSmi() ||
          !instr->value()->IsConstantOperand() ||
          IsInteger32Constant(LConstantOperand::cast(instr->value())));
   if (representation.IsDouble()) {
-    ASSERT(access.IsInobject());
-    ASSERT(!hinstr->has_transition());
-    ASSERT(!hinstr->NeedsWriteBarrier());
+    DCHECK(access.IsInobject());
+    DCHECK(!hinstr->has_transition());
+    DCHECK(!hinstr->NeedsWriteBarrier());
     XMMRegister value = ToDoubleRegister(instr->value());
     __ movsd(FieldOperand(object, offset), value);
     return;
@@ -4120,7 +4154,7 @@
 
   if (representation.IsSmi() && SmiValuesAre32Bits() &&
       hinstr->value()->representation().IsInteger32()) {
-    ASSERT(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
+    DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
     if (FLAG_debug_code) {
       Register scratch = kScratchRegister;
       __ Load(scratch, FieldOperand(write_register, offset), representation);
@@ -4128,7 +4162,7 @@
     }
     // Store int value directly to upper half of the smi.
     STATIC_ASSERT(kSmiTag == 0);
-    ASSERT(kSmiTagSize + kSmiShiftSize == 32);
+    DCHECK(kSmiTagSize + kSmiShiftSize == 32);
     offset += kPointerSize / 2;
     representation = Representation::Integer32();
   }
@@ -4141,7 +4175,7 @@
   } else {
     LConstantOperand* operand_value = LConstantOperand::cast(instr->value());
     if (IsInteger32Constant(operand_value)) {
-      ASSERT(!hinstr->NeedsWriteBarrier());
+      DCHECK(!hinstr->NeedsWriteBarrier());
       int32_t value = ToInteger32(operand_value);
       if (representation.IsSmi()) {
         __ Move(operand, Smi::FromInt(value));
@@ -4152,7 +4186,7 @@
 
     } else {
       Handle<Object> handle_value = ToHandle(operand_value);
-      ASSERT(!hinstr->NeedsWriteBarrier());
+      DCHECK(!hinstr->NeedsWriteBarrier());
       __ Move(operand, handle_value);
     }
   }
@@ -4174,11 +4208,11 @@
 
 
 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
-  ASSERT(ToRegister(instr->context()).is(rsi));
-  ASSERT(ToRegister(instr->object()).is(rdx));
-  ASSERT(ToRegister(instr->value()).is(rax));
+  DCHECK(ToRegister(instr->context()).is(rsi));
+  DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
 
-  __ Move(rcx, instr->hydrogen()->name());
+  __ Move(StoreDescriptor::NameRegister(), instr->hydrogen()->name());
   Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
@@ -4186,8 +4220,8 @@
 
 void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
   Representation representation = instr->hydrogen()->length()->representation();
-  ASSERT(representation.Equals(instr->hydrogen()->index()->representation()));
-  ASSERT(representation.IsSmiOrInteger32());
+  DCHECK(representation.Equals(instr->hydrogen()->index()->representation()));
+  DCHECK(representation.IsSmiOrInteger32());
 
   Condition cc = instr->hydrogen()->allow_equality() ? below : below_equal;
   if (instr->length()->IsConstantOperand()) {
@@ -4240,7 +4274,7 @@
     __ int3();
     __ bind(&done);
   } else {
-    DeoptimizeIf(cc, instr->environment());
+    DeoptimizeIf(cc, instr, "out of bounds");
   }
 }
 
@@ -4332,8 +4366,9 @@
     __ ucomisd(value, value);
     __ j(parity_odd, &have_value, Label::kNear);  // NaN.
 
-    __ Set(kScratchRegister, BitCast<uint64_t>(
-        FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
+    __ Set(kScratchRegister,
+           bit_cast<uint64_t>(
+               FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
     __ movq(value, kScratchRegister);
 
     __ bind(&have_value);
@@ -4363,8 +4398,8 @@
     __ movsxlq(ToRegister(key), ToRegister(key));
   }
   if (representation.IsInteger32() && SmiValuesAre32Bits()) {
-    ASSERT(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
-    ASSERT(hinstr->elements_kind() == FAST_SMI_ELEMENTS);
+    DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
+    DCHECK(hinstr->elements_kind() == FAST_SMI_ELEMENTS);
     if (FLAG_debug_code) {
       Register scratch = kScratchRegister;
       __ Load(scratch,
@@ -4378,7 +4413,7 @@
     }
     // Store int value directly to upper half of the smi.
     STATIC_ASSERT(kSmiTag == 0);
-    ASSERT(kSmiTagSize + kSmiShiftSize == 32);
+    DCHECK(kSmiTagSize + kSmiShiftSize == 32);
     offset += kPointerSize / 2;
   }
 
@@ -4408,9 +4443,9 @@
 
   if (hinstr->NeedsWriteBarrier()) {
     Register elements = ToRegister(instr->elements());
-    ASSERT(instr->value()->IsRegister());
+    DCHECK(instr->value()->IsRegister());
     Register value = ToRegister(instr->value());
-    ASSERT(!key->IsConstantOperand());
+    DCHECK(!key->IsConstantOperand());
     SmiCheck check_needed = hinstr->value()->type().IsHeapObject()
             ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
     // Compute address of modified element and store it into key register.
@@ -4439,14 +4474,13 @@
 
 
 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
-  ASSERT(ToRegister(instr->context()).is(rsi));
-  ASSERT(ToRegister(instr->object()).is(rdx));
-  ASSERT(ToRegister(instr->key()).is(rcx));
-  ASSERT(ToRegister(instr->value()).is(rax));
+  DCHECK(ToRegister(instr->context()).is(rsi));
+  DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
+  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
 
-  Handle<Code> ic = instr->strict_mode() == STRICT
-      ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
-      : isolate()->builtins()->KeyedStoreIC_Initialize();
+  Handle<Code> ic =
+      CodeFactory::KeyedStoreIC(isolate(), instr->strict_mode()).code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -4470,8 +4504,8 @@
     __ RecordWriteForMap(object_reg, new_map_reg, ToRegister(instr->temp()),
                          kDontSaveFPRegs);
   } else {
-    ASSERT(object_reg.is(rax));
-    ASSERT(ToRegister(instr->context()).is(rsi));
+    DCHECK(object_reg.is(rax));
+    DCHECK(ToRegister(instr->context()).is(rsi));
     PushSafepointRegistersScope scope(this);
     __ Move(rbx, to_map);
     bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
@@ -4488,15 +4522,15 @@
   Register temp = ToRegister(instr->temp());
   Label no_memento_found;
   __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
-  DeoptimizeIf(equal, instr->environment());
+  DeoptimizeIf(equal, instr, "memento found");
   __ bind(&no_memento_found);
 }
 
 
 void LCodeGen::DoStringAdd(LStringAdd* instr) {
-  ASSERT(ToRegister(instr->context()).is(rsi));
-  ASSERT(ToRegister(instr->left()).is(rdx));
-  ASSERT(ToRegister(instr->right()).is(rax));
+  DCHECK(ToRegister(instr->context()).is(rsi));
+  DCHECK(ToRegister(instr->left()).is(rdx));
+  DCHECK(ToRegister(instr->right()).is(rax));
   StringAddStub stub(isolate(),
                      instr->hydrogen()->flags(),
                      instr->hydrogen()->pretenure_flag());
@@ -4505,14 +4539,14 @@
 
 
 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
-  class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode {
+  class DeferredStringCharCodeAt FINAL : public LDeferredCode {
    public:
     DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() V8_OVERRIDE {
+    virtual void Generate() OVERRIDE {
       codegen()->DoDeferredStringCharCodeAt(instr_);
     }
-    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
    private:
     LStringCharCodeAt* instr_;
   };
@@ -4552,7 +4586,7 @@
     __ Push(index);
   }
   CallRuntimeFromDeferred(
-      Runtime::kHiddenStringCharCodeAt, 2, instr, instr->context());
+      Runtime::kStringCharCodeAtRT, 2, instr, instr->context());
   __ AssertSmi(rax);
   __ SmiToInteger32(rax, rax);
   __ StoreToSafepointRegisterSlot(result, rax);
@@ -4560,14 +4594,14 @@
 
 
 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
-  class DeferredStringCharFromCode V8_FINAL : public LDeferredCode {
+  class DeferredStringCharFromCode FINAL : public LDeferredCode {
    public:
     DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() V8_OVERRIDE {
+    virtual void Generate() OVERRIDE {
       codegen()->DoDeferredStringCharFromCode(instr_);
     }
-    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
    private:
     LStringCharFromCode* instr_;
   };
@@ -4575,10 +4609,10 @@
   DeferredStringCharFromCode* deferred =
       new(zone()) DeferredStringCharFromCode(this, instr);
 
-  ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
+  DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
   Register char_code = ToRegister(instr->char_code());
   Register result = ToRegister(instr->result());
-  ASSERT(!char_code.is(result));
+  DCHECK(!char_code.is(result));
 
   __ cmpl(char_code, Immediate(String::kMaxOneByteCharCode));
   __ j(above, deferred->entry());
@@ -4612,9 +4646,9 @@
 
 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
   LOperand* input = instr->value();
-  ASSERT(input->IsRegister() || input->IsStackSlot());
+  DCHECK(input->IsRegister() || input->IsStackSlot());
   LOperand* output = instr->result();
-  ASSERT(output->IsDoubleRegister());
+  DCHECK(output->IsDoubleRegister());
   if (input->IsRegister()) {
     __ Cvtlsi2sd(ToDoubleRegister(output), ToRegister(input));
   } else {
@@ -4632,27 +4666,27 @@
 
 
 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
-  class DeferredNumberTagI V8_FINAL : public LDeferredCode {
+  class DeferredNumberTagI FINAL : public LDeferredCode {
    public:
     DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() V8_OVERRIDE {
+    virtual void Generate() OVERRIDE {
       codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
                                        instr_->temp2(), SIGNED_INT32);
     }
-    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
    private:
     LNumberTagI* instr_;
   };
 
   LOperand* input = instr->value();
-  ASSERT(input->IsRegister() && input->Equals(instr->result()));
+  DCHECK(input->IsRegister() && input->Equals(instr->result()));
   Register reg = ToRegister(input);
 
   if (SmiValuesAre32Bits()) {
     __ Integer32ToSmi(reg, reg);
   } else {
-    ASSERT(SmiValuesAre31Bits());
+    DCHECK(SmiValuesAre31Bits());
     DeferredNumberTagI* deferred = new(zone()) DeferredNumberTagI(this, instr);
     __ Integer32ToSmi(reg, reg);
     __ j(overflow, deferred->entry());
@@ -4662,21 +4696,21 @@
 
 
 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
-  class DeferredNumberTagU V8_FINAL : public LDeferredCode {
+  class DeferredNumberTagU FINAL : public LDeferredCode {
    public:
     DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() V8_OVERRIDE {
+    virtual void Generate() OVERRIDE {
       codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
                                        instr_->temp2(), UNSIGNED_INT32);
     }
-    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
    private:
     LNumberTagU* instr_;
   };
 
   LOperand* input = instr->value();
-  ASSERT(input->IsRegister() && input->Equals(instr->result()));
+  DCHECK(input->IsRegister() && input->Equals(instr->result()));
   Register reg = ToRegister(input);
 
   DeferredNumberTagU* deferred = new(zone()) DeferredNumberTagU(this, instr);
@@ -4701,7 +4735,7 @@
   // runtime (MacroAssembler::EnterExitFrameEpilogue preserves only allocatable
   // XMM registers on x64).
   if (signedness == SIGNED_INT32) {
-    ASSERT(SmiValuesAre31Bits());
+    DCHECK(SmiValuesAre31Bits());
     // There was overflow, so bits 30 and 31 of the original integer
     // disagree. Try to allocate a heap number in new space and store
     // the value in there. If that fails, call the runtime system.
@@ -4709,13 +4743,13 @@
     __ xorl(reg, Immediate(0x80000000));
     __ cvtlsi2sd(temp_xmm, reg);
   } else {
-    ASSERT(signedness == UNSIGNED_INT32);
+    DCHECK(signedness == UNSIGNED_INT32);
     __ LoadUint32(temp_xmm, reg);
   }
 
   if (FLAG_inline_new) {
     __ AllocateHeapNumber(reg, tmp, &slow);
-    __ jmp(&done, Label::kNear);
+    __ jmp(&done, kPointerSize == kInt64Size ? Label::kNear : Label::kFar);
   }
 
   // Slow case: Call the runtime system to do the number allocation.
@@ -4731,11 +4765,11 @@
 
     // NumberTagIU uses the context from the frame, rather than
     // the environment's HContext or HInlinedContext value.
-    // They only call Runtime::kHiddenAllocateHeapNumber.
+    // They only call Runtime::kAllocateHeapNumber.
     // The corresponding HChange instructions are added in a phase that does
     // not have easy access to the local context.
     __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-    __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
+    __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
     RecordSafepointWithRegisters(
         instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
     __ StoreToSafepointRegisterSlot(reg, rax);
@@ -4749,14 +4783,14 @@
 
 
 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
-  class DeferredNumberTagD V8_FINAL : public LDeferredCode {
+  class DeferredNumberTagD FINAL : public LDeferredCode {
    public:
     DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() V8_OVERRIDE {
+    virtual void Generate() OVERRIDE {
       codegen()->DoDeferredNumberTagD(instr_);
     }
-    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
    private:
     LNumberTagD* instr_;
   };
@@ -4787,11 +4821,11 @@
     PushSafepointRegistersScope scope(this);
     // NumberTagD uses the context from the frame, rather than
     // the environment's HContext or HInlinedContext value.
-    // They only call Runtime::kHiddenAllocateHeapNumber.
+    // They only call Runtime::kAllocateHeapNumber.
     // The corresponding HChange instructions are added in a phase that does
     // not have easy access to the local context.
     __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-    __ CallRuntimeSaveDoubles(Runtime::kHiddenAllocateHeapNumber);
+    __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
     RecordSafepointWithRegisters(
         instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
     __ movp(kScratchRegister, rax);
@@ -4807,22 +4841,22 @@
   if (hchange->CheckFlag(HValue::kCanOverflow) &&
       hchange->value()->CheckFlag(HValue::kUint32)) {
     Condition is_smi = __ CheckUInteger32ValidSmiValue(input);
-    DeoptimizeIf(NegateCondition(is_smi), instr->environment());
+    DeoptimizeIf(NegateCondition(is_smi), instr, "overflow");
   }
   __ Integer32ToSmi(output, input);
   if (hchange->CheckFlag(HValue::kCanOverflow) &&
       !hchange->value()->CheckFlag(HValue::kUint32)) {
-    DeoptimizeIf(overflow, instr->environment());
+    DeoptimizeIf(overflow, instr, "overflow");
   }
 }
 
 
 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
-  ASSERT(instr->value()->Equals(instr->result()));
+  DCHECK(instr->value()->Equals(instr->result()));
   Register input = ToRegister(instr->value());
   if (instr->needs_check()) {
     Condition is_smi = __ CheckSmi(input);
-    DeoptimizeIf(NegateCondition(is_smi), instr->environment());
+    DeoptimizeIf(NegateCondition(is_smi), instr, "not a Smi");
   } else {
     __ AssertSmi(input);
   }
@@ -4830,12 +4864,12 @@
 }
 
 
-void LCodeGen::EmitNumberUntagD(Register input_reg,
-                                XMMRegister result_reg,
-                                bool can_convert_undefined_to_nan,
-                                bool deoptimize_on_minus_zero,
-                                LEnvironment* env,
-                                NumberUntagDMode mode) {
+void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
+                                XMMRegister result_reg, NumberUntagDMode mode) {
+  bool can_convert_undefined_to_nan =
+      instr->hydrogen()->can_convert_undefined_to_nan();
+  bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
+
   Label convert, load_smi, done;
 
   if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
@@ -4853,7 +4887,7 @@
     if (can_convert_undefined_to_nan) {
       __ j(not_equal, &convert, Label::kNear);
     } else {
-      DeoptimizeIf(not_equal, env);
+      DeoptimizeIf(not_equal, instr, "not a heap number");
     }
 
     if (deoptimize_on_minus_zero) {
@@ -4863,7 +4897,7 @@
       __ j(not_equal, &done, Label::kNear);
       __ movmskpd(kScratchRegister, result_reg);
       __ testq(kScratchRegister, Immediate(1));
-      DeoptimizeIf(not_zero, env);
+      DeoptimizeIf(not_zero, instr, "minus zero");
     }
     __ jmp(&done, Label::kNear);
 
@@ -4872,14 +4906,14 @@
 
       // Convert undefined (and hole) to NaN. Compute NaN as 0/0.
       __ CompareRoot(input_reg, Heap::kUndefinedValueRootIndex);
-      DeoptimizeIf(not_equal, env);
+      DeoptimizeIf(not_equal, instr, "not a heap number/undefined");
 
       __ xorps(result_reg, result_reg);
       __ divsd(result_reg, result_reg);
       __ jmp(&done, Label::kNear);
     }
   } else {
-    ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
+    DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
   }
 
   // Smi to XMM conversion
@@ -4919,39 +4953,47 @@
 
     __ bind(&check_false);
     __ CompareRoot(input_reg, Heap::kFalseValueRootIndex);
-    __ RecordComment("Deferred TaggedToI: cannot truncate");
-    DeoptimizeIf(not_equal, instr->environment());
+    DeoptimizeIf(not_equal, instr, "not a heap number/undefined/true/false");
     __ Set(input_reg, 0);
-    __ jmp(done);
   } else {
-    Label bailout;
-    XMMRegister xmm_temp = ToDoubleRegister(instr->temp());
-    __ TaggedToI(input_reg, input_reg, xmm_temp,
-        instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
-
-    __ jmp(done);
-    __ bind(&bailout);
-    DeoptimizeIf(no_condition, instr->environment());
+    XMMRegister scratch = ToDoubleRegister(instr->temp());
+    DCHECK(!scratch.is(xmm0));
+    __ CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
+                   Heap::kHeapNumberMapRootIndex);
+    DeoptimizeIf(not_equal, instr, "not a heap number");
+    __ movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
+    __ cvttsd2si(input_reg, xmm0);
+    __ Cvtlsi2sd(scratch, input_reg);
+    __ ucomisd(xmm0, scratch);
+    DeoptimizeIf(not_equal, instr, "lost precision");
+    DeoptimizeIf(parity_even, instr, "NaN");
+    if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) {
+      __ testl(input_reg, input_reg);
+      __ j(not_zero, done);
+      __ movmskpd(input_reg, xmm0);
+      __ andl(input_reg, Immediate(1));
+      DeoptimizeIf(not_zero, instr, "minus zero");
+    }
   }
 }
 
 
 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
-  class DeferredTaggedToI V8_FINAL : public LDeferredCode {
+  class DeferredTaggedToI FINAL : public LDeferredCode {
    public:
     DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() V8_OVERRIDE {
+    virtual void Generate() OVERRIDE {
       codegen()->DoDeferredTaggedToI(instr_, done());
     }
-    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
    private:
     LTaggedToI* instr_;
   };
 
   LOperand* input = instr->value();
-  ASSERT(input->IsRegister());
-  ASSERT(input->Equals(instr->result()));
+  DCHECK(input->IsRegister());
+  DCHECK(input->Equals(instr->result()));
   Register input_reg = ToRegister(input);
 
   if (instr->hydrogen()->value()->representation().IsSmi()) {
@@ -4967,9 +5009,9 @@
 
 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
   LOperand* input = instr->value();
-  ASSERT(input->IsRegister());
+  DCHECK(input->IsRegister());
   LOperand* result = instr->result();
-  ASSERT(result->IsDoubleRegister());
+  DCHECK(result->IsDoubleRegister());
 
   Register input_reg = ToRegister(input);
   XMMRegister result_reg = ToDoubleRegister(result);
@@ -4978,19 +5020,15 @@
   NumberUntagDMode mode = value->representation().IsSmi()
       ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
 
-  EmitNumberUntagD(input_reg, result_reg,
-                   instr->hydrogen()->can_convert_undefined_to_nan(),
-                   instr->hydrogen()->deoptimize_on_minus_zero(),
-                   instr->environment(),
-                   mode);
+  EmitNumberUntagD(instr, input_reg, result_reg, mode);
 }
 
 
 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
   LOperand* input = instr->value();
-  ASSERT(input->IsDoubleRegister());
+  DCHECK(input->IsDoubleRegister());
   LOperand* result = instr->result();
-  ASSERT(result->IsRegister());
+  DCHECK(result->IsRegister());
 
   XMMRegister input_reg = ToDoubleRegister(input);
   Register result_reg = ToRegister(result);
@@ -4998,14 +5036,19 @@
   if (instr->truncating()) {
     __ TruncateDoubleToI(result_reg, input_reg);
   } else {
-    Label bailout, done;
+    Label lost_precision, is_nan, minus_zero, done;
     XMMRegister xmm_scratch = double_scratch0();
+    Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
     __ DoubleToI(result_reg, input_reg, xmm_scratch,
-        instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
-
-    __ jmp(&done, Label::kNear);
-    __ bind(&bailout);
-    DeoptimizeIf(no_condition, instr->environment());
+                 instr->hydrogen()->GetMinusZeroMode(), &lost_precision,
+                 &is_nan, &minus_zero, dist);
+    __ jmp(&done, dist);
+    __ bind(&lost_precision);
+    DeoptimizeIf(no_condition, instr, "lost precision");
+    __ bind(&is_nan);
+    DeoptimizeIf(no_condition, instr, "NaN");
+    __ bind(&minus_zero);
+    DeoptimizeIf(no_condition, instr, "minus zero");
     __ bind(&done);
   }
 }
@@ -5013,32 +5056,36 @@
 
 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
   LOperand* input = instr->value();
-  ASSERT(input->IsDoubleRegister());
+  DCHECK(input->IsDoubleRegister());
   LOperand* result = instr->result();
-  ASSERT(result->IsRegister());
+  DCHECK(result->IsRegister());
 
   XMMRegister input_reg = ToDoubleRegister(input);
   Register result_reg = ToRegister(result);
 
-  Label bailout, done;
+  Label lost_precision, is_nan, minus_zero, done;
   XMMRegister xmm_scratch = double_scratch0();
+  Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
   __ DoubleToI(result_reg, input_reg, xmm_scratch,
-      instr->hydrogen()->GetMinusZeroMode(), &bailout, Label::kNear);
-
-  __ jmp(&done, Label::kNear);
-  __ bind(&bailout);
-  DeoptimizeIf(no_condition, instr->environment());
+               instr->hydrogen()->GetMinusZeroMode(), &lost_precision, &is_nan,
+               &minus_zero, dist);
+  __ jmp(&done, dist);
+  __ bind(&lost_precision);
+  DeoptimizeIf(no_condition, instr, "lost precision");
+  __ bind(&is_nan);
+  DeoptimizeIf(no_condition, instr, "NaN");
+  __ bind(&minus_zero);
+  DeoptimizeIf(no_condition, instr, "minus zero");
   __ bind(&done);
-
   __ Integer32ToSmi(result_reg, result_reg);
-  DeoptimizeIf(overflow, instr->environment());
+  DeoptimizeIf(overflow, instr, "overflow");
 }
 
 
 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
   LOperand* input = instr->value();
   Condition cc = masm()->CheckSmi(ToRegister(input));
-  DeoptimizeIf(NegateCondition(cc), instr->environment());
+  DeoptimizeIf(NegateCondition(cc), instr, "not a Smi");
 }
 
 
@@ -5046,7 +5093,7 @@
   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
     LOperand* input = instr->value();
     Condition cc = masm()->CheckSmi(ToRegister(input));
-    DeoptimizeIf(cc, instr->environment());
+    DeoptimizeIf(cc, instr, "Smi");
   }
 }
 
@@ -5066,14 +5113,14 @@
 
     // If there is only one type in the interval check for equality.
     if (first == last) {
-      DeoptimizeIf(not_equal, instr->environment());
+      DeoptimizeIf(not_equal, instr, "wrong instance type");
     } else {
-      DeoptimizeIf(below, instr->environment());
+      DeoptimizeIf(below, instr, "wrong instance type");
       // Omit check for the last type.
       if (last != LAST_TYPE) {
         __ cmpb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
                 Immediate(static_cast<int8_t>(last)));
-        DeoptimizeIf(above, instr->environment());
+        DeoptimizeIf(above, instr, "wrong instance type");
       }
     }
   } else {
@@ -5081,17 +5128,17 @@
     uint8_t tag;
     instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
 
-    if (IsPowerOf2(mask)) {
-      ASSERT(tag == 0 || IsPowerOf2(tag));
+    if (base::bits::IsPowerOfTwo32(mask)) {
+      DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
       __ testb(FieldOperand(kScratchRegister, Map::kInstanceTypeOffset),
                Immediate(mask));
-      DeoptimizeIf(tag == 0 ? not_zero : zero, instr->environment());
+      DeoptimizeIf(tag == 0 ? not_zero : zero, instr, "wrong instance type");
     } else {
       __ movzxbl(kScratchRegister,
                  FieldOperand(kScratchRegister, Map::kInstanceTypeOffset));
       __ andb(kScratchRegister, Immediate(mask));
       __ cmpb(kScratchRegister, Immediate(tag));
-      DeoptimizeIf(not_equal, instr->environment());
+      DeoptimizeIf(not_equal, instr, "wrong instance type");
     }
   }
 }
@@ -5100,7 +5147,7 @@
 void LCodeGen::DoCheckValue(LCheckValue* instr) {
   Register reg = ToRegister(instr->value());
   __ Cmp(reg, instr->hydrogen()->object().handle());
-  DeoptimizeIf(not_equal, instr->environment());
+  DeoptimizeIf(not_equal, instr, "value mismatch");
 }
 
 
@@ -5115,22 +5162,22 @@
 
     __ testp(rax, Immediate(kSmiTagMask));
   }
-  DeoptimizeIf(zero, instr->environment());
+  DeoptimizeIf(zero, instr, "instance migration failed");
 }
 
 
 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
-  class DeferredCheckMaps V8_FINAL : public LDeferredCode {
+  class DeferredCheckMaps FINAL : public LDeferredCode {
    public:
     DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
         : LDeferredCode(codegen), instr_(instr), object_(object) {
       SetExit(check_maps());
     }
-    virtual void Generate() V8_OVERRIDE {
+    virtual void Generate() OVERRIDE {
       codegen()->DoDeferredInstanceMigration(instr_, object_);
     }
     Label* check_maps() { return &check_maps_; }
-    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
    private:
     LCheckMaps* instr_;
     Label check_maps_;
@@ -5146,7 +5193,7 @@
   }
 
   LOperand* input = instr->value();
-  ASSERT(input->IsRegister());
+  DCHECK(input->IsRegister());
   Register reg = ToRegister(input);
 
   DeferredCheckMaps* deferred = NULL;
@@ -5168,7 +5215,7 @@
   if (instr->hydrogen()->HasMigrationTarget()) {
     __ j(not_equal, deferred->entry());
   } else {
-    DeoptimizeIf(not_equal, instr->environment());
+    DeoptimizeIf(not_equal, instr, "wrong map");
   }
 
   __ bind(&success);
@@ -5184,14 +5231,14 @@
 
 
 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
-  ASSERT(instr->unclamped()->Equals(instr->result()));
+  DCHECK(instr->unclamped()->Equals(instr->result()));
   Register value_reg = ToRegister(instr->result());
   __ ClampUint8(value_reg);
 }
 
 
 void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
-  ASSERT(instr->unclamped()->Equals(instr->result()));
+  DCHECK(instr->unclamped()->Equals(instr->result()));
   Register input_reg = ToRegister(instr->unclamped());
   XMMRegister temp_xmm_reg = ToDoubleRegister(instr->temp_xmm());
   XMMRegister xmm_scratch = double_scratch0();
@@ -5207,7 +5254,7 @@
   // Check for undefined. Undefined is converted to zero for clamping
   // conversions.
   __ Cmp(input_reg, factory()->undefined_value());
-  DeoptimizeIf(not_equal, instr->environment());
+  DeoptimizeIf(not_equal, instr, "not a heap number/undefined");
   __ xorl(input_reg, input_reg);
   __ jmp(&done, Label::kNear);
 
@@ -5251,14 +5298,14 @@
 
 
 void LCodeGen::DoAllocate(LAllocate* instr) {
-  class DeferredAllocate V8_FINAL : public LDeferredCode {
+  class DeferredAllocate FINAL : public LDeferredCode {
    public:
     DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() V8_OVERRIDE {
+    virtual void Generate() OVERRIDE {
       codegen()->DoDeferredAllocate(instr_);
     }
-    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
    private:
     LAllocate* instr_;
   };
@@ -5275,11 +5322,11 @@
     flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
   }
   if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
-    ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
-    ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+    DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
+    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
     flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
   } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
-    ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
     flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
   }
 
@@ -5327,7 +5374,7 @@
   PushSafepointRegistersScope scope(this);
   if (instr->size()->IsRegister()) {
     Register size = ToRegister(instr->size());
-    ASSERT(!size.is(result));
+    DCHECK(!size.is(result));
     __ Integer32ToSmi(size, size);
     __ Push(size);
   } else {
@@ -5337,11 +5384,11 @@
 
   int flags = 0;
   if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
-    ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
-    ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+    DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
+    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
     flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
   } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
-    ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
     flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
   } else {
     flags = AllocateTargetSpace::update(flags, NEW_SPACE);
@@ -5349,20 +5396,20 @@
   __ Push(Smi::FromInt(flags));
 
   CallRuntimeFromDeferred(
-      Runtime::kHiddenAllocateInTargetSpace, 2, instr, instr->context());
+      Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
   __ StoreToSafepointRegisterSlot(result, rax);
 }
 
 
 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
-  ASSERT(ToRegister(instr->value()).is(rax));
+  DCHECK(ToRegister(instr->value()).is(rax));
   __ Push(rax);
   CallRuntime(Runtime::kToFastProperties, 1, instr);
 }
 
 
 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
-  ASSERT(ToRegister(instr->context()).is(rsi));
+  DCHECK(ToRegister(instr->context()).is(rsi));
   Label materialized;
   // Registers will be used as follows:
   // rcx = literals array.
@@ -5381,7 +5428,7 @@
   __ Push(Smi::FromInt(instr->hydrogen()->literal_index()));
   __ Push(instr->hydrogen()->pattern());
   __ Push(instr->hydrogen()->flags());
-  CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4, instr);
+  CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
   __ movp(rbx, rax);
 
   __ bind(&materialized);
@@ -5393,7 +5440,7 @@
   __ bind(&runtime_allocate);
   __ Push(rbx);
   __ Push(Smi::FromInt(size));
-  CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1, instr);
+  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
   __ Pop(rbx);
 
   __ bind(&allocated);
@@ -5413,14 +5460,13 @@
 
 
 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
-  ASSERT(ToRegister(instr->context()).is(rsi));
+  DCHECK(ToRegister(instr->context()).is(rsi));
   // Use the fast case closure allocation code that allocates in new
   // space for nested functions that don't need literals cloning.
   bool pretenure = instr->hydrogen()->pretenure();
   if (!pretenure && instr->hydrogen()->has_no_literals()) {
-    FastNewClosureStub stub(isolate(),
-                            instr->hydrogen()->strict_mode(),
-                            instr->hydrogen()->is_generator());
+    FastNewClosureStub stub(isolate(), instr->hydrogen()->strict_mode(),
+                            instr->hydrogen()->kind());
     __ Move(rbx, instr->hydrogen()->shared_info());
     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   } else {
@@ -5428,13 +5474,13 @@
     __ Push(instr->hydrogen()->shared_info());
     __ PushRoot(pretenure ? Heap::kTrueValueRootIndex :
                             Heap::kFalseValueRootIndex);
-    CallRuntime(Runtime::kHiddenNewClosure, 3, instr);
+    CallRuntime(Runtime::kNewClosure, 3, instr);
   }
 }
 
 
 void LCodeGen::DoTypeof(LTypeof* instr) {
-  ASSERT(ToRegister(instr->context()).is(rsi));
+  DCHECK(ToRegister(instr->context()).is(rsi));
   LOperand* input = instr->value();
   EmitPushTaggedOperand(input);
   CallRuntime(Runtime::kTypeof, 1, instr);
@@ -5442,7 +5488,7 @@
 
 
 void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
-  ASSERT(!operand->IsDoubleRegister());
+  DCHECK(!operand->IsDoubleRegister());
   if (operand->IsConstantOperand()) {
     __ Push(ToHandle(LConstantOperand::cast(operand)));
   } else if (operand->IsRegister()) {
@@ -5502,11 +5548,6 @@
     __ CompareRoot(input, Heap::kFalseValueRootIndex);
     final_branch_condition = equal;
 
-  } else if (FLAG_harmony_typeof &&
-             String::Equals(type_name, factory->null_string())) {
-    __ CompareRoot(input, Heap::kNullValueRootIndex);
-    final_branch_condition = equal;
-
   } else if (String::Equals(type_name, factory->undefined_string())) {
     __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
     __ j(equal, true_label, true_distance);
@@ -5527,10 +5568,8 @@
 
   } else if (String::Equals(type_name, factory->object_string())) {
     __ JumpIfSmi(input, false_label, false_distance);
-    if (!FLAG_harmony_typeof) {
-      __ CompareRoot(input, Heap::kNullValueRootIndex);
-      __ j(equal, true_label, true_distance);
-    }
+    __ CompareRoot(input, Heap::kNullValueRootIndex);
+    __ j(equal, true_label, true_distance);
     __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
     __ j(below, false_label, false_distance);
     __ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
@@ -5590,7 +5629,7 @@
 
 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
   last_lazy_deopt_pc_ = masm()->pc_offset();
-  ASSERT(instr->HasEnvironment());
+  DCHECK(instr->HasEnvironment());
   LEnvironment* env = instr->environment();
   RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
@@ -5606,9 +5645,7 @@
   if (info()->IsStub() && type == Deoptimizer::EAGER) {
     type = Deoptimizer::LAZY;
   }
-
-  Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
-  DeoptimizeIf(no_condition, instr->environment(), type);
+  DeoptimizeIf(no_condition, instr, instr->hydrogen()->reason(), type);
 }
 
 
@@ -5625,28 +5662,28 @@
 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
   PushSafepointRegistersScope scope(this);
   __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-  __ CallRuntimeSaveDoubles(Runtime::kHiddenStackGuard);
+  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
   RecordSafepointWithLazyDeopt(instr, RECORD_SAFEPOINT_WITH_REGISTERS, 0);
-  ASSERT(instr->HasEnvironment());
+  DCHECK(instr->HasEnvironment());
   LEnvironment* env = instr->environment();
   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
 }
 
 
 void LCodeGen::DoStackCheck(LStackCheck* instr) {
-  class DeferredStackCheck V8_FINAL : public LDeferredCode {
+  class DeferredStackCheck FINAL : public LDeferredCode {
    public:
     DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() V8_OVERRIDE {
+    virtual void Generate() OVERRIDE {
       codegen()->DoDeferredStackCheck(instr_);
     }
-    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
    private:
     LStackCheck* instr_;
   };
 
-  ASSERT(instr->HasEnvironment());
+  DCHECK(instr->HasEnvironment());
   LEnvironment* env = instr->environment();
   // There is no LLazyBailout instruction for stack-checks. We have to
   // prepare for lazy deoptimization explicitly here.
@@ -5656,14 +5693,14 @@
     __ CompareRoot(rsp, Heap::kStackLimitRootIndex);
     __ j(above_equal, &done, Label::kNear);
 
-    ASSERT(instr->context()->IsRegister());
-    ASSERT(ToRegister(instr->context()).is(rsi));
+    DCHECK(instr->context()->IsRegister());
+    DCHECK(ToRegister(instr->context()).is(rsi));
     CallCode(isolate()->builtins()->StackCheck(),
              RelocInfo::CODE_TARGET,
              instr);
     __ bind(&done);
   } else {
-    ASSERT(instr->hydrogen()->is_backwards_branch());
+    DCHECK(instr->hydrogen()->is_backwards_branch());
     // Perform stack overflow check if this goto needs it before jumping.
     DeferredStackCheck* deferred_stack_check =
         new(zone()) DeferredStackCheck(this, instr);
@@ -5688,7 +5725,7 @@
 
   // If the environment were already registered, we would have no way of
   // backpatching it with the spill slot operands.
-  ASSERT(!environment->HasBeenRegistered());
+  DCHECK(!environment->HasBeenRegistered());
   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
 
   GenerateOsrPrologue();
@@ -5696,21 +5733,21 @@
 
 
 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
-  ASSERT(ToRegister(instr->context()).is(rsi));
+  DCHECK(ToRegister(instr->context()).is(rsi));
   __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
-  DeoptimizeIf(equal, instr->environment());
+  DeoptimizeIf(equal, instr, "undefined");
 
   Register null_value = rdi;
   __ LoadRoot(null_value, Heap::kNullValueRootIndex);
   __ cmpp(rax, null_value);
-  DeoptimizeIf(equal, instr->environment());
+  DeoptimizeIf(equal, instr, "null");
 
   Condition cc = masm()->CheckSmi(rax);
-  DeoptimizeIf(cc, instr->environment());
+  DeoptimizeIf(cc, instr, "Smi");
 
   STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
   __ CmpObjectType(rax, LAST_JS_PROXY_TYPE, rcx);
-  DeoptimizeIf(below_equal, instr->environment());
+  DeoptimizeIf(below_equal, instr, "wrong instance type");
 
   Label use_cache, call_runtime;
   __ CheckEnumCache(null_value, &call_runtime);
@@ -5725,7 +5762,7 @@
 
   __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
                  Heap::kMetaMapRootIndex);
-  DeoptimizeIf(not_equal, instr->environment());
+  DeoptimizeIf(not_equal, instr, "wrong map");
   __ bind(&use_cache);
 }
 
@@ -5747,7 +5784,7 @@
           FieldOperand(result, FixedArray::SizeFor(instr->idx())));
   __ bind(&done);
   Condition cc = masm()->CheckSmi(result);
-  DeoptimizeIf(cc, instr->environment());
+  DeoptimizeIf(cc, instr, "no cache");
 }
 
 
@@ -5755,7 +5792,7 @@
   Register object = ToRegister(instr->value());
   __ cmpp(ToRegister(instr->map()),
           FieldOperand(object, HeapObject::kMapOffset));
-  DeoptimizeIf(not_equal, instr->environment());
+  DeoptimizeIf(not_equal, instr, "wrong map");
 }
 
 
@@ -5774,7 +5811,7 @@
 
 
 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
-  class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode {
+  class DeferredLoadMutableDouble FINAL : public LDeferredCode {
    public:
     DeferredLoadMutableDouble(LCodeGen* codegen,
                               LLoadFieldByIndex* instr,
@@ -5785,10 +5822,10 @@
           object_(object),
           index_(index) {
     }
-    virtual void Generate() V8_OVERRIDE {
+    virtual void Generate() OVERRIDE {
       codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_);
     }
-    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
    private:
     LLoadFieldByIndex* instr_;
     Register object_;
@@ -5840,7 +5877,7 @@
   Handle<ScopeInfo> scope_info = instr->scope_info();
   __ Push(scope_info);
   __ Push(ToRegister(instr->function()));
-  CallRuntime(Runtime::kHiddenPushBlockContext, 2, instr);
+  CallRuntime(Runtime::kPushBlockContext, 2, instr);
   RecordSafepoint(Safepoint::kNoLazyDeopt);
 }
 
diff --git a/src/x64/lithium-codegen-x64.h b/src/x64/lithium-codegen-x64.h
index 5621a3d..ccd90b5 100644
--- a/src/x64/lithium-codegen-x64.h
+++ b/src/x64/lithium-codegen-x64.h
@@ -7,7 +7,7 @@
 
 #include "src/x64/lithium-x64.h"
 
-#include "src/checks.h"
+#include "src/base/logging.h"
 #include "src/deoptimizer.h"
 #include "src/lithium-codegen.h"
 #include "src/safepoint-table.h"
@@ -143,8 +143,8 @@
 
   // Code generation passes.  Returns true if code generation should
   // continue.
-  void GenerateBodyInstructionPre(LInstruction* instr) V8_OVERRIDE;
-  void GenerateBodyInstructionPost(LInstruction* instr) V8_OVERRIDE;
+  void GenerateBodyInstructionPre(LInstruction* instr) OVERRIDE;
+  void GenerateBodyInstructionPost(LInstruction* instr) OVERRIDE;
   bool GeneratePrologue();
   bool GenerateDeferredCode();
   bool GenerateJumpTable();
@@ -206,10 +206,9 @@
                                     int argc);
   void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
                                             Safepoint::DeoptMode mode);
-  void DeoptimizeIf(Condition cc,
-                    LEnvironment* environment,
+  void DeoptimizeIf(Condition cc, LInstruction* instr, const char* detail,
                     Deoptimizer::BailoutType bailout_type);
-  void DeoptimizeIf(Condition cc, LEnvironment* environment);
+  void DeoptimizeIf(Condition cc, LInstruction* instr, const char* detail);
 
   bool DeoptEveryNTimes() {
     return FLAG_deopt_every_n_times != 0 && !info()->IsStub();
@@ -253,7 +252,7 @@
   void RecordSafepointWithRegisters(LPointerMap* pointers,
                                     int arguments,
                                     Safepoint::DeoptMode mode);
-  void RecordAndWritePosition(int position) V8_OVERRIDE;
+  void RecordAndWritePosition(int position) OVERRIDE;
 
   static Condition TokenToCondition(Token::Value op, bool is_unsigned);
   void EmitGoto(int block);
@@ -263,13 +262,8 @@
   void EmitBranch(InstrType instr, Condition cc);
   template<class InstrType>
   void EmitFalseBranch(InstrType instr, Condition cc);
-  void EmitNumberUntagD(
-      Register input,
-      XMMRegister result,
-      bool allow_undefined_as_nan,
-      bool deoptimize_on_minus_zero,
-      LEnvironment* env,
-      NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED);
+  void EmitNumberUntagD(LNumberUntagD* instr, Register input,
+                        XMMRegister result, NumberUntagDMode mode);
 
   // Emits optimized code for typeof x == "y".  Modifies input register.
   // Returns the condition on which a final split to
@@ -307,13 +301,17 @@
                     int* offset,
                     AllocationSiteMode mode);
 
-  void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE;
+  void EnsureSpaceForLazyDeopt(int space_needed) OVERRIDE;
   void DoLoadKeyedExternalArray(LLoadKeyed* instr);
   void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
   void DoLoadKeyedFixedArray(LLoadKeyed* instr);
   void DoStoreKeyedExternalArray(LStoreKeyed* instr);
   void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
   void DoStoreKeyedFixedArray(LStoreKeyed* instr);
+
+  template <class T>
+  void EmitVectorLoadICRegisters(T* instr);
+
 #ifdef _MSC_VER
   // On windows, you may not access the stack more than one page below
   // the most recently mapped page. To make the allocated area randomly
@@ -341,18 +339,18 @@
 
   Safepoint::Kind expected_safepoint_kind_;
 
-  class PushSafepointRegistersScope V8_FINAL BASE_EMBEDDED {
+  class PushSafepointRegistersScope FINAL BASE_EMBEDDED {
    public:
     explicit PushSafepointRegistersScope(LCodeGen* codegen)
         : codegen_(codegen) {
-      ASSERT(codegen_->info()->is_calling());
-      ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
+      DCHECK(codegen_->info()->is_calling());
+      DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
       codegen_->masm_->PushSafepointRegisters();
       codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
     }
 
     ~PushSafepointRegistersScope() {
-      ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
+      DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
       codegen_->masm_->PopSafepointRegisters();
       codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
     }
diff --git a/src/x64/lithium-gap-resolver-x64.cc b/src/x64/lithium-gap-resolver-x64.cc
index 93c1512..d10e1a1 100644
--- a/src/x64/lithium-gap-resolver-x64.cc
+++ b/src/x64/lithium-gap-resolver-x64.cc
@@ -6,8 +6,8 @@
 
 #if V8_TARGET_ARCH_X64
 
-#include "src/x64/lithium-gap-resolver-x64.h"
 #include "src/x64/lithium-codegen-x64.h"
+#include "src/x64/lithium-gap-resolver-x64.h"
 
 namespace v8 {
 namespace internal {
@@ -17,7 +17,7 @@
 
 
 void LGapResolver::Resolve(LParallelMove* parallel_move) {
-  ASSERT(moves_.is_empty());
+  DCHECK(moves_.is_empty());
   // Build up a worklist of moves.
   BuildInitialMoveList(parallel_move);
 
@@ -34,7 +34,7 @@
   // Perform the moves with constant sources.
   for (int i = 0; i < moves_.length(); ++i) {
     if (!moves_[i].IsEliminated()) {
-      ASSERT(moves_[i].source()->IsConstantOperand());
+      DCHECK(moves_[i].source()->IsConstantOperand());
       EmitMove(i);
     }
   }
@@ -65,13 +65,13 @@
   // which means that a call to PerformMove could change any source operand
   // in the move graph.
 
-  ASSERT(!moves_[index].IsPending());
-  ASSERT(!moves_[index].IsRedundant());
+  DCHECK(!moves_[index].IsPending());
+  DCHECK(!moves_[index].IsRedundant());
 
   // Clear this move's destination to indicate a pending move.  The actual
   // destination is saved in a stack-allocated local.  Recursion may allow
   // multiple moves to be pending.
-  ASSERT(moves_[index].source() != NULL);  // Or else it will look eliminated.
+  DCHECK(moves_[index].source() != NULL);  // Or else it will look eliminated.
   LOperand* destination = moves_[index].destination();
   moves_[index].set_destination(NULL);
 
@@ -112,7 +112,7 @@
   for (int i = 0; i < moves_.length(); ++i) {
     LMoveOperands other_move = moves_[i];
     if (other_move.Blocks(destination)) {
-      ASSERT(other_move.IsPending());
+      DCHECK(other_move.IsPending());
       EmitSwap(index);
       return;
     }
@@ -124,12 +124,12 @@
 
 
 void LGapResolver::Verify() {
-#ifdef ENABLE_SLOW_ASSERTS
+#ifdef ENABLE_SLOW_DCHECKS
   // No operand should be the destination for more than one move.
   for (int i = 0; i < moves_.length(); ++i) {
     LOperand* destination = moves_[i].destination();
     for (int j = i + 1; j < moves_.length(); ++j) {
-      SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
+      SLOW_DCHECK(!destination->Equals(moves_[j].destination()));
     }
   }
 #endif
@@ -151,7 +151,7 @@
       Register dst = cgen_->ToRegister(destination);
       __ movp(dst, src);
     } else {
-      ASSERT(destination->IsStackSlot());
+      DCHECK(destination->IsStackSlot());
       Operand dst = cgen_->ToOperand(destination);
       __ movp(dst, src);
     }
@@ -162,7 +162,7 @@
       Register dst = cgen_->ToRegister(destination);
       __ movp(dst, src);
     } else {
-      ASSERT(destination->IsStackSlot());
+      DCHECK(destination->IsStackSlot());
       Operand dst = cgen_->ToOperand(destination);
       __ movp(kScratchRegister, src);
       __ movp(dst, kScratchRegister);
@@ -188,7 +188,7 @@
       }
     } else if (destination->IsDoubleRegister()) {
       double v = cgen_->ToDouble(constant_source);
-      uint64_t int_val = BitCast<uint64_t, double>(v);
+      uint64_t int_val = bit_cast<uint64_t, double>(v);
       XMMRegister dst = cgen_->ToDoubleRegister(destination);
       if (int_val == 0) {
         __ xorps(dst, dst);
@@ -197,7 +197,7 @@
         __ movq(dst, kScratchRegister);
       }
     } else {
-      ASSERT(destination->IsStackSlot());
+      DCHECK(destination->IsStackSlot());
       Operand dst = cgen_->ToOperand(destination);
       if (cgen_->IsSmiConstant(constant_source)) {
         __ Move(dst, cgen_->ToSmi(constant_source));
@@ -215,7 +215,7 @@
     if (destination->IsDoubleRegister()) {
       __ movaps(cgen_->ToDoubleRegister(destination), src);
     } else {
-      ASSERT(destination->IsDoubleStackSlot());
+      DCHECK(destination->IsDoubleStackSlot());
       __ movsd(cgen_->ToOperand(destination), src);
     }
   } else if (source->IsDoubleStackSlot()) {
@@ -223,7 +223,7 @@
     if (destination->IsDoubleRegister()) {
       __ movsd(cgen_->ToDoubleRegister(destination), src);
     } else {
-      ASSERT(destination->IsDoubleStackSlot());
+      DCHECK(destination->IsDoubleStackSlot());
       __ movsd(xmm0, src);
       __ movsd(cgen_->ToOperand(destination), xmm0);
     }
@@ -278,13 +278,13 @@
 
   } else if (source->IsDoubleRegister() || destination->IsDoubleRegister()) {
     // Swap a double register and a double stack slot.
-    ASSERT((source->IsDoubleRegister() && destination->IsDoubleStackSlot()) ||
+    DCHECK((source->IsDoubleRegister() && destination->IsDoubleStackSlot()) ||
            (source->IsDoubleStackSlot() && destination->IsDoubleRegister()));
     XMMRegister reg = cgen_->ToDoubleRegister(source->IsDoubleRegister()
                                                   ? source
                                                   : destination);
     LOperand* other = source->IsDoubleRegister() ? destination : source;
-    ASSERT(other->IsDoubleStackSlot());
+    DCHECK(other->IsDoubleStackSlot());
     Operand other_operand = cgen_->ToOperand(other);
     __ movsd(xmm0, other_operand);
     __ movsd(other_operand, reg);
diff --git a/src/x64/lithium-gap-resolver-x64.h b/src/x64/lithium-gap-resolver-x64.h
index fd4b91a..695b352 100644
--- a/src/x64/lithium-gap-resolver-x64.h
+++ b/src/x64/lithium-gap-resolver-x64.h
@@ -15,7 +15,7 @@
 class LCodeGen;
 class LGapResolver;
 
-class LGapResolver V8_FINAL BASE_EMBEDDED {
+class LGapResolver FINAL BASE_EMBEDDED {
  public:
   explicit LGapResolver(LCodeGen* owner);
 
diff --git a/src/x64/lithium-x64.cc b/src/x64/lithium-x64.cc
index 325f2c0..69f50b1 100644
--- a/src/x64/lithium-x64.cc
+++ b/src/x64/lithium-x64.cc
@@ -6,10 +6,9 @@
 
 #if V8_TARGET_ARCH_X64
 
-#include "src/lithium-allocator-inl.h"
-#include "src/x64/lithium-x64.h"
-#include "src/x64/lithium-codegen-x64.h"
 #include "src/hydrogen-osr.h"
+#include "src/lithium-inl.h"
+#include "src/x64/lithium-codegen-x64.h"
 
 namespace v8 {
 namespace internal {
@@ -28,17 +27,17 @@
   // outputs because all registers are blocked by the calling convention.
   // Inputs operands must use a fixed register or use-at-start policy or
   // a non-register policy.
-  ASSERT(Output() == NULL ||
+  DCHECK(Output() == NULL ||
          LUnallocated::cast(Output())->HasFixedPolicy() ||
          !LUnallocated::cast(Output())->HasRegisterPolicy());
   for (UseIterator it(this); !it.Done(); it.Advance()) {
     LUnallocated* operand = LUnallocated::cast(it.Current());
-    ASSERT(operand->HasFixedPolicy() ||
+    DCHECK(operand->HasFixedPolicy() ||
            operand->IsUsedAtStart());
   }
   for (TempIterator it(this); !it.Done(); it.Advance()) {
     LUnallocated* operand = LUnallocated::cast(it.Current());
-    ASSERT(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
+    DCHECK(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
   }
 }
 #endif
@@ -331,6 +330,16 @@
 
 
 int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) {
+  if (kind == DOUBLE_REGISTERS && kDoubleSize == 2 * kPointerSize) {
+    // Skip a slot if for a double-width slot for x32 port.
+    spill_slot_count_++;
+    // The spill slot's address is at rbp - (index + 1) * kPointerSize -
+    // StandardFrameConstants::kFixedFrameSizeFromFp. kFixedFrameSizeFromFp is
+    // 2 * kPointerSize, if rbp is aligned at 8-byte boundary, the below "|= 1"
+    // will make sure the spilled doubles are aligned at 8-byte boundary.
+    // TODO(haitao): make sure rbp is aligned at 8-byte boundary for x32 port.
+    spill_slot_count_ |= 1;
+  }
   return spill_slot_count_++;
 }
 
@@ -343,7 +352,7 @@
   if (kind == DOUBLE_REGISTERS) {
     return LDoubleStackSlot::Create(index, zone());
   } else {
-    ASSERT(kind == GENERAL_REGISTERS);
+    DCHECK(kind == GENERAL_REGISTERS);
     return LStackSlot::Create(index, zone());
   }
 }
@@ -351,8 +360,9 @@
 
 void LStoreNamedField::PrintDataTo(StringStream* stream) {
   object()->PrintTo(stream);
-  hydrogen()->access().PrintTo(stream);
-  stream->Add(" <- ");
+  OStringStream os;
+  os << hydrogen()->access() << " <- ";
+  stream->Add(os.c_str());
   value()->PrintTo(stream);
 }
 
@@ -389,7 +399,7 @@
   }
 
   if (value() == NULL) {
-    ASSERT(hydrogen()->IsConstantHoleStore() &&
+    DCHECK(hydrogen()->IsConstantHoleStore() &&
            hydrogen()->value()->representation().IsDouble());
     stream->Add("<the hole(nan)>");
   } else {
@@ -414,7 +424,7 @@
 
 
 LPlatformChunk* LChunkBuilder::Build() {
-  ASSERT(is_unused());
+  DCHECK(is_unused());
   chunk_ = new(zone()) LPlatformChunk(info(), graph());
   LPhase phase("L_Building chunk", chunk_);
   status_ = BUILDING;
@@ -439,12 +449,6 @@
 }
 
 
-void LChunkBuilder::Abort(BailoutReason reason) {
-  info()->set_bailout_reason(reason);
-  status_ = ABORTED;
-}
-
-
 LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
   return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
                                   Register::ToAllocationIndex(reg));
@@ -599,9 +603,8 @@
   HEnvironment* hydrogen_env = current_block_->last_environment();
   int argument_index_accumulator = 0;
   ZoneList<HValue*> objects_to_materialize(0, zone());
-  instr->set_environment(CreateEnvironment(hydrogen_env,
-                                           &argument_index_accumulator,
-                                           &objects_to_materialize));
+  instr->set_environment(CreateEnvironment(
+      hydrogen_env, &argument_index_accumulator, &objects_to_materialize));
   return instr;
 }
 
@@ -635,7 +638,7 @@
 
 
 LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
-  ASSERT(!instr->HasPointerMap());
+  DCHECK(!instr->HasPointerMap());
   instr->set_pointer_map(new(zone()) LPointerMap(zone()));
   return instr;
 }
@@ -656,14 +659,14 @@
 
 LOperand* LChunkBuilder::FixedTemp(Register reg) {
   LUnallocated* operand = ToUnallocated(reg);
-  ASSERT(operand->HasFixedPolicy());
+  DCHECK(operand->HasFixedPolicy());
   return operand;
 }
 
 
 LOperand* LChunkBuilder::FixedTemp(XMMRegister reg) {
   LUnallocated* operand = ToUnallocated(reg);
-  ASSERT(operand->HasFixedPolicy());
+  DCHECK(operand->HasFixedPolicy());
   return operand;
 }
 
@@ -692,8 +695,8 @@
 LInstruction* LChunkBuilder::DoShift(Token::Value op,
                                      HBitwiseBinaryOperation* instr) {
   if (instr->representation().IsSmiOrInteger32()) {
-    ASSERT(instr->left()->representation().Equals(instr->representation()));
-    ASSERT(instr->right()->representation().Equals(instr->representation()));
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
     LOperand* left = UseRegisterAtStart(instr->left());
 
     HValue* right_value = instr->right();
@@ -735,9 +738,9 @@
 
 LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
                                            HArithmeticBinaryOperation* instr) {
-  ASSERT(instr->representation().IsDouble());
-  ASSERT(instr->left()->representation().IsDouble());
-  ASSERT(instr->right()->representation().IsDouble());
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->left()->representation().IsDouble());
+  DCHECK(instr->right()->representation().IsDouble());
   if (op == Token::MOD) {
     LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
     LOperand* right = UseFixedDouble(instr->BetterRightOperand(), xmm1);
@@ -756,8 +759,8 @@
                                            HBinaryOperation* instr) {
   HValue* left = instr->left();
   HValue* right = instr->right();
-  ASSERT(left->representation().IsTagged());
-  ASSERT(right->representation().IsTagged());
+  DCHECK(left->representation().IsTagged());
+  DCHECK(right->representation().IsTagged());
   LOperand* context = UseFixed(instr->context(), rsi);
   LOperand* left_operand = UseFixed(left, rdx);
   LOperand* right_operand = UseFixed(right, rax);
@@ -768,7 +771,7 @@
 
 
 void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
-  ASSERT(is_building());
+  DCHECK(is_building());
   current_block_ = block;
   next_block_ = next_block;
   if (block->IsStartBlock()) {
@@ -777,13 +780,13 @@
   } else if (block->predecessors()->length() == 1) {
     // We have a single predecessor => copy environment and outgoing
     // argument count from the predecessor.
-    ASSERT(block->phis()->length() == 0);
+    DCHECK(block->phis()->length() == 0);
     HBasicBlock* pred = block->predecessors()->at(0);
     HEnvironment* last_environment = pred->last_environment();
-    ASSERT(last_environment != NULL);
+    DCHECK(last_environment != NULL);
     // Only copy the environment, if it is later used again.
     if (pred->end()->SecondSuccessor() == NULL) {
-      ASSERT(pred->end()->FirstSuccessor() == block);
+      DCHECK(pred->end()->FirstSuccessor() == block);
     } else {
       if (pred->end()->FirstSuccessor()->block_id() > block->block_id() ||
           pred->end()->SecondSuccessor()->block_id() > block->block_id()) {
@@ -791,7 +794,7 @@
       }
     }
     block->UpdateEnvironment(last_environment);
-    ASSERT(pred->argument_count() >= 0);
+    DCHECK(pred->argument_count() >= 0);
     argument_count_ = pred->argument_count();
   } else {
     // We are at a state join => process phis.
@@ -843,7 +846,7 @@
     if (current->OperandCount() == 0) {
       instr = DefineAsRegister(new(zone()) LDummy());
     } else {
-      ASSERT(!current->OperandAt(0)->IsControlInstruction());
+      DCHECK(!current->OperandAt(0)->IsControlInstruction());
       instr = DefineAsRegister(new(zone())
           LDummyUse(UseAny(current->OperandAt(0))));
     }
@@ -866,7 +869,7 @@
   }
 
   argument_count_ += current->argument_delta();
-  ASSERT(argument_count_ >= 0);
+  DCHECK(argument_count_ >= 0);
 
   if (instr != NULL) {
     AddInstruction(instr, current);
@@ -908,7 +911,7 @@
       LUnallocated* operand = LUnallocated::cast(it.Current());
       if (operand->HasFixedPolicy()) ++fixed;
     }
-    ASSERT(fixed == 0 || used_at_start == 0);
+    DCHECK(fixed == 0 || used_at_start == 0);
   }
 #endif
 
@@ -972,7 +975,7 @@
 
 
 LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
-  ASSERT(instr->value()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
   LOperand* value = UseRegisterAtStart(instr->value());
   return new(zone()) LCmpMapAndBranch(value);
 }
@@ -1093,14 +1096,14 @@
 
 LInstruction* LChunkBuilder::DoCallWithDescriptor(
     HCallWithDescriptor* instr) {
-  const CallInterfaceDescriptor* descriptor = instr->descriptor();
+  CallInterfaceDescriptor descriptor = instr->descriptor();
 
   LOperand* target = UseRegisterOrConstantAtStart(instr->target());
   ZoneList<LOperand*> ops(instr->OperandCount(), zone());
   ops.Add(target, zone());
   for (int i = 1; i < instr->OperandCount(); i++) {
-    LOperand* op = UseFixed(instr->OperandAt(i),
-        descriptor->GetParameterRegister(i - 1));
+    LOperand* op =
+        UseFixed(instr->OperandAt(i), descriptor.GetParameterRegister(i - 1));
     ops.Add(op, zone());
   }
 
@@ -1110,6 +1113,19 @@
 }
 
 
+LInstruction* LChunkBuilder::DoTailCallThroughMegamorphicCache(
+    HTailCallThroughMegamorphicCache* instr) {
+  LOperand* context = UseFixed(instr->context(), rsi);
+  LOperand* receiver_register =
+      UseFixed(instr->receiver(), LoadDescriptor::ReceiverRegister());
+  LOperand* name_register =
+      UseFixed(instr->name(), LoadDescriptor::NameRegister());
+  // Not marked as call. It can't deoptimize, and it never returns.
+  return new (zone()) LTailCallThroughMegamorphicCache(
+      context, receiver_register, name_register);
+}
+
+
 LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
   LOperand* context = UseFixed(instr->context(), rsi);
   LOperand* function = UseFixed(instr->function(), rdi);
@@ -1120,14 +1136,24 @@
 
 LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
   switch (instr->op()) {
-    case kMathFloor: return DoMathFloor(instr);
-    case kMathRound: return DoMathRound(instr);
-    case kMathAbs: return DoMathAbs(instr);
-    case kMathLog: return DoMathLog(instr);
-    case kMathExp: return DoMathExp(instr);
-    case kMathSqrt: return DoMathSqrt(instr);
-    case kMathPowHalf: return DoMathPowHalf(instr);
-    case kMathClz32: return DoMathClz32(instr);
+    case kMathFloor:
+      return DoMathFloor(instr);
+    case kMathRound:
+      return DoMathRound(instr);
+    case kMathFround:
+      return DoMathFround(instr);
+    case kMathAbs:
+      return DoMathAbs(instr);
+    case kMathLog:
+      return DoMathLog(instr);
+    case kMathExp:
+      return DoMathExp(instr);
+    case kMathSqrt:
+      return DoMathSqrt(instr);
+    case kMathPowHalf:
+      return DoMathPowHalf(instr);
+    case kMathClz32:
+      return DoMathClz32(instr);
     default:
       UNREACHABLE();
       return NULL;
@@ -1150,6 +1176,13 @@
 }
 
 
+LInstruction* LChunkBuilder::DoMathFround(HUnaryMathOperation* instr) {
+  LOperand* input = UseRegister(instr->value());
+  LMathFround* result = new (zone()) LMathFround(input);
+  return DefineAsRegister(result);
+}
+
+
 LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
   LOperand* context = UseAny(instr->context());
   LOperand* input = UseRegisterAtStart(instr->value());
@@ -1163,8 +1196,8 @@
 
 
 LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
-  ASSERT(instr->representation().IsDouble());
-  ASSERT(instr->value()->representation().IsDouble());
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->value()->representation().IsDouble());
   LOperand* input = UseRegisterAtStart(instr->value());
   return MarkAsCall(DefineSameAsFirst(new(zone()) LMathLog(input)), instr);
 }
@@ -1178,8 +1211,8 @@
 
 
 LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
-  ASSERT(instr->representation().IsDouble());
-  ASSERT(instr->value()->representation().IsDouble());
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->value()->representation().IsDouble());
   LOperand* value = UseTempRegister(instr->value());
   LOperand* temp1 = TempRegister();
   LOperand* temp2 = TempRegister();
@@ -1254,9 +1287,9 @@
 
 LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
   if (instr->representation().IsSmiOrInteger32()) {
-    ASSERT(instr->left()->representation().Equals(instr->representation()));
-    ASSERT(instr->right()->representation().Equals(instr->representation()));
-    ASSERT(instr->CheckFlag(HValue::kTruncatingToInt32));
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+    DCHECK(instr->CheckFlag(HValue::kTruncatingToInt32));
 
     LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
     LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
@@ -1268,9 +1301,9 @@
 
 
 LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) {
-  ASSERT(instr->representation().IsSmiOrInteger32());
-  ASSERT(instr->left()->representation().Equals(instr->representation()));
-  ASSERT(instr->right()->representation().Equals(instr->representation()));
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
   LOperand* dividend = UseRegister(instr->left());
   int32_t divisor = instr->right()->GetInteger32Constant();
   LInstruction* result = DefineAsRegister(new(zone()) LDivByPowerOf2I(
@@ -1286,9 +1319,9 @@
 
 
 LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
-  ASSERT(instr->representation().IsInteger32());
-  ASSERT(instr->left()->representation().Equals(instr->representation()));
-  ASSERT(instr->right()->representation().Equals(instr->representation()));
+  DCHECK(instr->representation().IsInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
   LOperand* dividend = UseRegister(instr->left());
   int32_t divisor = instr->right()->GetInteger32Constant();
   LOperand* temp1 = FixedTemp(rax);
@@ -1305,9 +1338,9 @@
 
 
 LInstruction* LChunkBuilder::DoDivI(HDiv* instr) {
-  ASSERT(instr->representation().IsSmiOrInteger32());
-  ASSERT(instr->left()->representation().Equals(instr->representation()));
-  ASSERT(instr->right()->representation().Equals(instr->representation()));
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
   LOperand* dividend = UseFixed(instr->left(), rax);
   LOperand* divisor = UseRegister(instr->right());
   LOperand* temp = FixedTemp(rdx);
@@ -1354,9 +1387,9 @@
 
 
 LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
-  ASSERT(instr->representation().IsInteger32());
-  ASSERT(instr->left()->representation().Equals(instr->representation()));
-  ASSERT(instr->right()->representation().Equals(instr->representation()));
+  DCHECK(instr->representation().IsInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
   LOperand* dividend = UseRegister(instr->left());
   int32_t divisor = instr->right()->GetInteger32Constant();
   LOperand* temp1 = FixedTemp(rax);
@@ -1381,9 +1414,9 @@
 
 
 LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) {
-  ASSERT(instr->representation().IsSmiOrInteger32());
-  ASSERT(instr->left()->representation().Equals(instr->representation()));
-  ASSERT(instr->right()->representation().Equals(instr->representation()));
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
   LOperand* dividend = UseFixed(instr->left(), rax);
   LOperand* divisor = UseRegister(instr->right());
   LOperand* temp = FixedTemp(rdx);
@@ -1410,14 +1443,15 @@
 
 
 LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) {
-  ASSERT(instr->representation().IsSmiOrInteger32());
-  ASSERT(instr->left()->representation().Equals(instr->representation()));
-  ASSERT(instr->right()->representation().Equals(instr->representation()));
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
   LOperand* dividend = UseRegisterAtStart(instr->left());
   int32_t divisor = instr->right()->GetInteger32Constant();
   LInstruction* result = DefineSameAsFirst(new(zone()) LModByPowerOf2I(
           dividend, divisor));
-  if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+  if (instr->CheckFlag(HValue::kLeftCanBeNegative) &&
+      instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
     result = AssignEnvironment(result);
   }
   return result;
@@ -1425,9 +1459,9 @@
 
 
 LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) {
-  ASSERT(instr->representation().IsSmiOrInteger32());
-  ASSERT(instr->left()->representation().Equals(instr->representation()));
-  ASSERT(instr->right()->representation().Equals(instr->representation()));
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
   LOperand* dividend = UseRegister(instr->left());
   int32_t divisor = instr->right()->GetInteger32Constant();
   LOperand* temp1 = FixedTemp(rax);
@@ -1442,9 +1476,9 @@
 
 
 LInstruction* LChunkBuilder::DoModI(HMod* instr) {
-  ASSERT(instr->representation().IsSmiOrInteger32());
-  ASSERT(instr->left()->representation().Equals(instr->representation()));
-  ASSERT(instr->right()->representation().Equals(instr->representation()));
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
   LOperand* dividend = UseFixed(instr->left(), rax);
   LOperand* divisor = UseRegister(instr->right());
   LOperand* temp = FixedTemp(rdx);
@@ -1477,8 +1511,8 @@
 
 LInstruction* LChunkBuilder::DoMul(HMul* instr) {
   if (instr->representation().IsSmiOrInteger32()) {
-    ASSERT(instr->left()->representation().Equals(instr->representation()));
-    ASSERT(instr->right()->representation().Equals(instr->representation()));
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
     LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
     LOperand* right = UseOrConstant(instr->BetterRightOperand());
     LMulI* mul = new(zone()) LMulI(left, right);
@@ -1497,8 +1531,8 @@
 
 LInstruction* LChunkBuilder::DoSub(HSub* instr) {
   if (instr->representation().IsSmiOrInteger32()) {
-    ASSERT(instr->left()->representation().Equals(instr->representation()));
-    ASSERT(instr->right()->representation().Equals(instr->representation()));
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
     LOperand* left = UseRegisterAtStart(instr->left());
     LOperand* right = UseOrConstantAtStart(instr->right());
     LSubI* sub = new(zone()) LSubI(left, right);
@@ -1522,8 +1556,8 @@
     // are multiple uses of the add's inputs, so using a 3-register add will
     // preserve all input values for later uses.
     bool use_lea = LAddI::UseLea(instr);
-    ASSERT(instr->left()->representation().Equals(instr->representation()));
-    ASSERT(instr->right()->representation().Equals(instr->representation()));
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
     LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
     HValue* right_candidate = instr->BetterRightOperand();
     LOperand* right;
@@ -1544,9 +1578,9 @@
     }
     return result;
   } else if (instr->representation().IsExternal()) {
-    ASSERT(instr->left()->representation().IsExternal());
-    ASSERT(instr->right()->representation().IsInteger32());
-    ASSERT(!instr->CheckFlag(HValue::kCanOverflow));
+    DCHECK(instr->left()->representation().IsExternal());
+    DCHECK(instr->right()->representation().IsInteger32());
+    DCHECK(!instr->CheckFlag(HValue::kCanOverflow));
     bool use_lea = LAddI::UseLea(instr);
     LOperand* left = UseRegisterAtStart(instr->left());
     HValue* right_candidate = instr->right();
@@ -1570,8 +1604,8 @@
 LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
   LOperand* left = NULL;
   LOperand* right = NULL;
-  ASSERT(instr->left()->representation().Equals(instr->representation()));
-  ASSERT(instr->right()->representation().Equals(instr->representation()));
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
   if (instr->representation().IsSmi()) {
     left = UseRegisterAtStart(instr->BetterLeftOperand());
     right = UseAtStart(instr->BetterRightOperand());
@@ -1579,7 +1613,7 @@
     left = UseRegisterAtStart(instr->BetterLeftOperand());
     right = UseOrConstantAtStart(instr->BetterRightOperand());
   } else {
-    ASSERT(instr->representation().IsDouble());
+    DCHECK(instr->representation().IsDouble());
     left = UseRegisterAtStart(instr->left());
     right = UseRegisterAtStart(instr->right());
   }
@@ -1589,14 +1623,16 @@
 
 
 LInstruction* LChunkBuilder::DoPower(HPower* instr) {
-  ASSERT(instr->representation().IsDouble());
+  DCHECK(instr->representation().IsDouble());
   // We call a C function for double power. It can't trigger a GC.
   // We need to use fixed result register for the call.
   Representation exponent_type = instr->right()->representation();
-  ASSERT(instr->left()->representation().IsDouble());
+  DCHECK(instr->left()->representation().IsDouble());
   LOperand* left = UseFixedDouble(instr->left(), xmm2);
-  LOperand* right = exponent_type.IsDouble() ?
-      UseFixedDouble(instr->right(), xmm1) : UseFixed(instr->right(), rdx);
+  LOperand* right =
+      exponent_type.IsDouble()
+          ? UseFixedDouble(instr->right(), xmm1)
+          : UseFixed(instr->right(), MathPowTaggedDescriptor::exponent());
   LPower* result = new(zone()) LPower(left, right);
   return MarkAsCall(DefineFixedDouble(result, xmm3), instr,
                     CAN_DEOPTIMIZE_EAGERLY);
@@ -1604,8 +1640,8 @@
 
 
 LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
-  ASSERT(instr->left()->representation().IsTagged());
-  ASSERT(instr->right()->representation().IsTagged());
+  DCHECK(instr->left()->representation().IsTagged());
+  DCHECK(instr->right()->representation().IsTagged());
   LOperand* context = UseFixed(instr->context(), rsi);
   LOperand* left = UseFixed(instr->left(), rdx);
   LOperand* right = UseFixed(instr->right(), rax);
@@ -1618,15 +1654,15 @@
     HCompareNumericAndBranch* instr) {
   Representation r = instr->representation();
   if (r.IsSmiOrInteger32()) {
-    ASSERT(instr->left()->representation().Equals(r));
-    ASSERT(instr->right()->representation().Equals(r));
+    DCHECK(instr->left()->representation().Equals(r));
+    DCHECK(instr->right()->representation().Equals(r));
     LOperand* left = UseRegisterOrConstantAtStart(instr->left());
     LOperand* right = UseOrConstantAtStart(instr->right());
     return new(zone()) LCompareNumericAndBranch(left, right);
   } else {
-    ASSERT(r.IsDouble());
-    ASSERT(instr->left()->representation().IsDouble());
-    ASSERT(instr->right()->representation().IsDouble());
+    DCHECK(r.IsDouble());
+    DCHECK(instr->left()->representation().IsDouble());
+    DCHECK(instr->right()->representation().IsDouble());
     LOperand* left;
     LOperand* right;
     if (instr->left()->IsConstant() && instr->right()->IsConstant()) {
@@ -1664,13 +1700,13 @@
 
 
 LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
-  ASSERT(instr->value()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
   return new(zone()) LIsObjectAndBranch(UseRegisterAtStart(instr->value()));
 }
 
 
 LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
-  ASSERT(instr->value()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
   LOperand* value = UseRegisterAtStart(instr->value());
   LOperand* temp = TempRegister();
   return new(zone()) LIsStringAndBranch(value, temp);
@@ -1678,14 +1714,14 @@
 
 
 LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
-  ASSERT(instr->value()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
   return new(zone()) LIsSmiAndBranch(Use(instr->value()));
 }
 
 
 LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
     HIsUndetectableAndBranch* instr) {
-  ASSERT(instr->value()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
   LOperand* value = UseRegisterAtStart(instr->value());
   LOperand* temp = TempRegister();
   return new(zone()) LIsUndetectableAndBranch(value, temp);
@@ -1695,8 +1731,8 @@
 LInstruction* LChunkBuilder::DoStringCompareAndBranch(
     HStringCompareAndBranch* instr) {
 
-  ASSERT(instr->left()->representation().IsTagged());
-  ASSERT(instr->right()->representation().IsTagged());
+  DCHECK(instr->left()->representation().IsTagged());
+  DCHECK(instr->right()->representation().IsTagged());
   LOperand* context = UseFixed(instr->context(), rsi);
   LOperand* left = UseFixed(instr->left(), rdx);
   LOperand* right = UseFixed(instr->right(), rax);
@@ -1709,7 +1745,7 @@
 
 LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
     HHasInstanceTypeAndBranch* instr) {
-  ASSERT(instr->value()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
   LOperand* value = UseRegisterAtStart(instr->value());
   return new(zone()) LHasInstanceTypeAndBranch(value);
 }
@@ -1717,7 +1753,7 @@
 
 LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
     HGetCachedArrayIndex* instr)  {
-  ASSERT(instr->value()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
   LOperand* value = UseRegisterAtStart(instr->value());
 
   return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value));
@@ -1726,7 +1762,7 @@
 
 LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
     HHasCachedArrayIndexAndBranch* instr) {
-  ASSERT(instr->value()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
   LOperand* value = UseRegisterAtStart(instr->value());
   return new(zone()) LHasCachedArrayIndexAndBranch(value);
 }
@@ -1844,7 +1880,7 @@
       }
       return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value)));
     } else {
-      ASSERT(to.IsInteger32());
+      DCHECK(to.IsInteger32());
       if (val->type().IsSmi() || val->representation().IsSmi()) {
         LOperand* value = UseRegister(val);
         return DefineSameAsFirst(new(zone()) LSmiUntag(value, false));
@@ -1871,7 +1907,7 @@
       return AssignEnvironment(
           DefineAsRegister(new(zone()) LDoubleToSmi(value)));
     } else {
-      ASSERT(to.IsInteger32());
+      DCHECK(to.IsInteger32());
       LOperand* value = UseRegister(val);
       LInstruction* result = DefineAsRegister(new(zone()) LDoubleToI(value));
       if (!instr->CanTruncateToInt32()) result = AssignEnvironment(result);
@@ -1904,7 +1940,7 @@
       }
       return result;
     } else {
-      ASSERT(to.IsDouble());
+      DCHECK(to.IsDouble());
       if (val->CheckFlag(HInstruction::kUint32)) {
         return DefineAsRegister(new(zone()) LUint32ToDouble(UseRegister(val)));
       } else {
@@ -1968,7 +2004,7 @@
   } else if (input_rep.IsInteger32()) {
     return DefineSameAsFirst(new(zone()) LClampIToUint8(reg));
   } else {
-    ASSERT(input_rep.IsSmiOrTagged());
+    DCHECK(input_rep.IsSmiOrTagged());
     // Register allocator doesn't (yet) support allocation of double
     // temps. Reserve xmm1 explicitly.
     LClampTToUint8* result = new(zone()) LClampTToUint8(reg,
@@ -1980,7 +2016,7 @@
 
 LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) {
   HValue* value = instr->value();
-  ASSERT(value->representation().IsDouble());
+  DCHECK(value->representation().IsDouble());
   return DefineAsRegister(new(zone()) LDoubleBits(UseRegister(value)));
 }
 
@@ -2030,9 +2066,15 @@
 
 LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
   LOperand* context = UseFixed(instr->context(), rsi);
-  LOperand* global_object = UseFixed(instr->global_object(), rax);
+  LOperand* global_object =
+      UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
+  LOperand* vector = NULL;
+  if (FLAG_vector_ics) {
+    vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
+  }
+
   LLoadGlobalGeneric* result =
-      new(zone()) LLoadGlobalGeneric(context, global_object);
+      new(zone()) LLoadGlobalGeneric(context, global_object, vector);
   return MarkAsCall(DefineFixed(result, rax), instr);
 }
 
@@ -2097,8 +2139,14 @@
 
 LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
   LOperand* context = UseFixed(instr->context(), rsi);
-  LOperand* object = UseFixed(instr->object(), rax);
-  LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric(context, object);
+  LOperand* object =
+      UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
+  LOperand* vector = NULL;
+  if (FLAG_vector_ics) {
+    vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
+  }
+  LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric(
+      context, object, vector);
   return MarkAsCall(DefineFixed(result, rax), instr);
 }
 
@@ -2120,7 +2168,7 @@
   // size is 64-bit. For x32 port, we sign extend the dehoisted key at the use
   // points and should not invoke this function. We can't use STATIC_ASSERT
   // here as the pointer size is 32-bit for x32.
-  ASSERT(kPointerSize == kInt64Size);
+  DCHECK(kPointerSize == kInt64Size);
   BitVector* dehoisted_key_ids = chunk_->GetDehoistedKeyIds();
   if (dehoisted_key_ids->Contains(candidate->id())) return;
   dehoisted_key_ids->Add(candidate->id());
@@ -2132,7 +2180,7 @@
 
 
 LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
-  ASSERT((kPointerSize == kInt64Size &&
+  DCHECK((kPointerSize == kInt64Size &&
           instr->key()->representation().IsInteger32()) ||
          (kPointerSize == kInt32Size &&
           instr->key()->representation().IsSmiOrInteger32()));
@@ -2158,7 +2206,7 @@
     LOperand* obj = UseRegisterAtStart(instr->elements());
     result = DefineAsRegister(new(zone()) LLoadKeyed(obj, key));
   } else {
-    ASSERT(
+    DCHECK(
         (instr->representation().IsInteger32() &&
          !(IsDoubleOrFloatElementsKind(elements_kind))) ||
         (instr->representation().IsDouble() &&
@@ -2183,11 +2231,16 @@
 
 LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
   LOperand* context = UseFixed(instr->context(), rsi);
-  LOperand* object = UseFixed(instr->object(), rdx);
-  LOperand* key = UseFixed(instr->key(), rax);
+  LOperand* object =
+      UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
+  LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
+  LOperand* vector = NULL;
+  if (FLAG_vector_ics) {
+    vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
+  }
 
   LLoadKeyedGeneric* result =
-      new(zone()) LLoadKeyedGeneric(context, object, key);
+      new(zone()) LLoadKeyedGeneric(context, object, key, vector);
   return MarkAsCall(DefineFixed(result, rax), instr);
 }
 
@@ -2200,7 +2253,7 @@
   }
 
   if (!instr->is_typed_elements()) {
-    ASSERT(instr->elements()->representation().IsTagged());
+    DCHECK(instr->elements()->representation().IsTagged());
     bool needs_write_barrier = instr->NeedsWriteBarrier();
     LOperand* object = NULL;
     LOperand* key = NULL;
@@ -2212,7 +2265,7 @@
       val = UseRegisterAtStart(instr->value());
       key = UseRegisterOrConstantAtStart(instr->key());
     } else {
-      ASSERT(value_representation.IsSmiOrTagged() ||
+      DCHECK(value_representation.IsSmiOrTagged() ||
              value_representation.IsInteger32());
       if (needs_write_barrier) {
         object = UseTempRegister(instr->elements());
@@ -2228,12 +2281,12 @@
     return new(zone()) LStoreKeyed(object, key, val);
   }
 
-  ASSERT(
+  DCHECK(
        (instr->value()->representation().IsInteger32() &&
        !IsDoubleOrFloatElementsKind(elements_kind)) ||
        (instr->value()->representation().IsDouble() &&
        IsDoubleOrFloatElementsKind(elements_kind)));
-  ASSERT((instr->is_fixed_typed_array() &&
+  DCHECK((instr->is_fixed_typed_array() &&
           instr->elements()->representation().IsTagged()) ||
          (instr->is_external() &&
           instr->elements()->representation().IsExternal()));
@@ -2260,13 +2313,14 @@
 
 LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
   LOperand* context = UseFixed(instr->context(), rsi);
-  LOperand* object = UseFixed(instr->object(), rdx);
-  LOperand* key = UseFixed(instr->key(), rcx);
-  LOperand* value = UseFixed(instr->value(), rax);
+  LOperand* object =
+      UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
+  LOperand* key = UseFixed(instr->key(), StoreDescriptor::NameRegister());
+  LOperand* value = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
 
-  ASSERT(instr->object()->representation().IsTagged());
-  ASSERT(instr->key()->representation().IsTagged());
-  ASSERT(instr->value()->representation().IsTagged());
+  DCHECK(instr->object()->representation().IsTagged());
+  DCHECK(instr->key()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
 
   LStoreKeyedGeneric* result =
       new(zone()) LStoreKeyedGeneric(context, object, key, value);
@@ -2317,9 +2371,9 @@
         ? UseRegister(instr->object())
         : UseTempRegister(instr->object());
   } else if (is_external_location) {
-    ASSERT(!is_in_object);
-    ASSERT(!needs_write_barrier);
-    ASSERT(!needs_write_barrier_for_map);
+    DCHECK(!is_in_object);
+    DCHECK(!needs_write_barrier);
+    DCHECK(!needs_write_barrier_for_map);
     obj = UseRegisterOrConstant(instr->object());
   } else {
     obj = needs_write_barrier_for_map
@@ -2338,8 +2392,6 @@
     val = UseFixed(instr->value(), rax);
   } else if (can_be_constant) {
     val = UseRegisterOrConstant(instr->value());
-  } else if (instr->field_representation().IsSmi()) {
-    val = UseRegister(instr->value());
   } else if (instr->field_representation().IsDouble()) {
     val = UseRegisterAtStart(instr->value());
   } else {
@@ -2357,8 +2409,9 @@
 
 LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
   LOperand* context = UseFixed(instr->context(), rsi);
-  LOperand* object = UseFixed(instr->object(), rdx);
-  LOperand* value = UseFixed(instr->value(), rax);
+  LOperand* object =
+      UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
+  LOperand* value = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
 
   LStoreNamedGeneric* result =
       new(zone()) LStoreNamedGeneric(context, object, value);
@@ -2421,7 +2474,7 @@
 
 
 LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
-  ASSERT(argument_count_ == 0);
+  DCHECK(argument_count_ == 0);
   allocator_->MarkAsOsrEntry();
   current_block_->last_environment()->set_ast_id(instr->ast_id());
   return AssignEnvironment(new(zone()) LOsrEntry);
@@ -2434,11 +2487,11 @@
     int spill_index = chunk()->GetParameterStackSlot(instr->index());
     return DefineAsSpilled(result, spill_index);
   } else {
-    ASSERT(info()->IsStub());
-    CodeStubInterfaceDescriptor* descriptor =
-        info()->code_stub()->GetInterfaceDescriptor();
+    DCHECK(info()->IsStub());
+    CallInterfaceDescriptor descriptor =
+        info()->code_stub()->GetCallInterfaceDescriptor();
     int index = static_cast<int>(instr->index());
-    Register reg = descriptor->GetParameterRegister(index);
+    Register reg = descriptor.GetEnvironmentParameterRegister(index);
     return DefineFixed(result, reg);
   }
 }
@@ -2454,7 +2507,7 @@
   } else {
     spill_index = env_index - instr->environment()->first_local_index();
     if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
-      Abort(kTooManySpillSlotsNeededForOSR);
+      Retry(kTooManySpillSlotsNeededForOSR);
       spill_index = 0;
     }
   }
@@ -2540,7 +2593,7 @@
     LOperand* context = UseFixed(instr->context(), rsi);
     return MarkAsCall(new(zone()) LStackCheck(context), instr);
   } else {
-    ASSERT(instr->is_backwards_branch());
+    DCHECK(instr->is_backwards_branch());
     LOperand* context = UseAny(instr->context());
     return AssignEnvironment(
         AssignPointerMap(new(zone()) LStackCheck(context)));
@@ -2561,6 +2614,7 @@
   if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
     inner->Bind(instr->arguments_var(), instr->arguments_object());
   }
+  inner->BindContext(instr->closure_context());
   inner->set_entry(instr);
   current_block_->UpdateEnvironment(inner);
   chunk_->AddInlinedClosure(instr->closure());
@@ -2576,7 +2630,7 @@
   if (env->entry()->arguments_pushed()) {
     int argument_count = env->arguments_environment()->parameter_count();
     pop = new(zone()) LDrop(argument_count);
-    ASSERT(instr->argument_delta() == -argument_count);
+    DCHECK(instr->argument_delta() == -argument_count);
   }
 
   HEnvironment* outer = current_block_->last_environment()->
diff --git a/src/x64/lithium-x64.h b/src/x64/lithium-x64.h
index 9609cfc..30b994e 100644
--- a/src/x64/lithium-x64.h
+++ b/src/x64/lithium-x64.h
@@ -6,8 +6,8 @@
 #define V8_X64_LITHIUM_X64_H_
 
 #include "src/hydrogen.h"
-#include "src/lithium-allocator.h"
 #include "src/lithium.h"
+#include "src/lithium-allocator.h"
 #include "src/safepoint-table.h"
 #include "src/utils.h"
 
@@ -17,160 +17,162 @@
 // Forward declarations.
 class LCodeGen;
 
-#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V)    \
-  V(AccessArgumentsAt)                          \
-  V(AddI)                                       \
-  V(Allocate)                                   \
-  V(AllocateBlockContext)                       \
-  V(ApplyArguments)                             \
-  V(ArgumentsElements)                          \
-  V(ArgumentsLength)                            \
-  V(ArithmeticD)                                \
-  V(ArithmeticT)                                \
-  V(BitI)                                       \
-  V(BoundsCheck)                                \
-  V(Branch)                                     \
-  V(CallJSFunction)                             \
-  V(CallWithDescriptor)                         \
-  V(CallFunction)                               \
-  V(CallNew)                                    \
-  V(CallNewArray)                               \
-  V(CallRuntime)                                \
-  V(CallStub)                                   \
-  V(CheckInstanceType)                          \
-  V(CheckMaps)                                  \
-  V(CheckMapValue)                              \
-  V(CheckNonSmi)                                \
-  V(CheckSmi)                                   \
-  V(CheckValue)                                 \
-  V(ClampDToUint8)                              \
-  V(ClampIToUint8)                              \
-  V(ClampTToUint8)                              \
-  V(ClassOfTestAndBranch)                       \
-  V(CompareMinusZeroAndBranch)                  \
-  V(CompareNumericAndBranch)                    \
-  V(CmpObjectEqAndBranch)                       \
-  V(CmpHoleAndBranch)                           \
-  V(CmpMapAndBranch)                            \
-  V(CmpT)                                       \
-  V(ConstantD)                                  \
-  V(ConstantE)                                  \
-  V(ConstantI)                                  \
-  V(ConstantS)                                  \
-  V(ConstantT)                                  \
-  V(ConstructDouble)                            \
-  V(Context)                                    \
-  V(DateField)                                  \
-  V(DebugBreak)                                 \
-  V(DeclareGlobals)                             \
-  V(Deoptimize)                                 \
-  V(DivByConstI)                                \
-  V(DivByPowerOf2I)                             \
-  V(DivI)                                       \
-  V(DoubleBits)                                 \
-  V(DoubleToI)                                  \
-  V(DoubleToSmi)                                \
-  V(Drop)                                       \
-  V(DummyUse)                                   \
-  V(Dummy)                                      \
-  V(FlooringDivByConstI)                        \
-  V(FlooringDivByPowerOf2I)                     \
-  V(FlooringDivI)                               \
-  V(ForInCacheArray)                            \
-  V(ForInPrepareMap)                            \
-  V(FunctionLiteral)                            \
-  V(GetCachedArrayIndex)                        \
-  V(Goto)                                       \
-  V(HasCachedArrayIndexAndBranch)               \
-  V(HasInstanceTypeAndBranch)                   \
-  V(InnerAllocatedObject)                       \
-  V(InstanceOf)                                 \
-  V(InstanceOfKnownGlobal)                      \
-  V(InstructionGap)                             \
-  V(Integer32ToDouble)                          \
-  V(InvokeFunction)                             \
-  V(IsConstructCallAndBranch)                   \
-  V(IsObjectAndBranch)                          \
-  V(IsStringAndBranch)                          \
-  V(IsSmiAndBranch)                             \
-  V(IsUndetectableAndBranch)                    \
-  V(Label)                                      \
-  V(LazyBailout)                                \
-  V(LoadContextSlot)                            \
-  V(LoadRoot)                                   \
-  V(LoadFieldByIndex)                           \
-  V(LoadFunctionPrototype)                      \
-  V(LoadGlobalCell)                             \
-  V(LoadGlobalGeneric)                          \
-  V(LoadKeyed)                                  \
-  V(LoadKeyedGeneric)                           \
-  V(LoadNamedField)                             \
-  V(LoadNamedGeneric)                           \
-  V(MapEnumLength)                              \
-  V(MathAbs)                                    \
-  V(MathClz32)                                  \
-  V(MathExp)                                    \
-  V(MathFloor)                                  \
-  V(MathLog)                                    \
-  V(MathMinMax)                                 \
-  V(MathPowHalf)                                \
-  V(MathRound)                                  \
-  V(MathSqrt)                                   \
-  V(ModByConstI)                                \
-  V(ModByPowerOf2I)                             \
-  V(ModI)                                       \
-  V(MulI)                                       \
-  V(NumberTagD)                                 \
-  V(NumberTagI)                                 \
-  V(NumberTagU)                                 \
-  V(NumberUntagD)                               \
-  V(OsrEntry)                                   \
-  V(Parameter)                                  \
-  V(Power)                                      \
-  V(PushArgument)                               \
-  V(RegExpLiteral)                              \
-  V(Return)                                     \
-  V(SeqStringGetChar)                           \
-  V(SeqStringSetChar)                           \
-  V(ShiftI)                                     \
-  V(SmiTag)                                     \
-  V(SmiUntag)                                   \
-  V(StackCheck)                                 \
-  V(StoreCodeEntry)                             \
-  V(StoreContextSlot)                           \
-  V(StoreFrameContext)                          \
-  V(StoreGlobalCell)                            \
-  V(StoreKeyed)                                 \
-  V(StoreKeyedGeneric)                          \
-  V(StoreNamedField)                            \
-  V(StoreNamedGeneric)                          \
-  V(StringAdd)                                  \
-  V(StringCharCodeAt)                           \
-  V(StringCharFromCode)                         \
-  V(StringCompareAndBranch)                     \
-  V(SubI)                                       \
-  V(TaggedToI)                                  \
-  V(ThisFunction)                               \
-  V(ToFastProperties)                           \
-  V(TransitionElementsKind)                     \
-  V(TrapAllocationMemento)                      \
-  V(Typeof)                                     \
-  V(TypeofIsAndBranch)                          \
-  V(Uint32ToDouble)                             \
-  V(UnknownOSRValue)                            \
+#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
+  V(AccessArgumentsAt)                       \
+  V(AddI)                                    \
+  V(Allocate)                                \
+  V(AllocateBlockContext)                    \
+  V(ApplyArguments)                          \
+  V(ArgumentsElements)                       \
+  V(ArgumentsLength)                         \
+  V(ArithmeticD)                             \
+  V(ArithmeticT)                             \
+  V(BitI)                                    \
+  V(BoundsCheck)                             \
+  V(Branch)                                  \
+  V(CallJSFunction)                          \
+  V(CallWithDescriptor)                      \
+  V(CallFunction)                            \
+  V(CallNew)                                 \
+  V(CallNewArray)                            \
+  V(CallRuntime)                             \
+  V(CallStub)                                \
+  V(CheckInstanceType)                       \
+  V(CheckMaps)                               \
+  V(CheckMapValue)                           \
+  V(CheckNonSmi)                             \
+  V(CheckSmi)                                \
+  V(CheckValue)                              \
+  V(ClampDToUint8)                           \
+  V(ClampIToUint8)                           \
+  V(ClampTToUint8)                           \
+  V(ClassOfTestAndBranch)                    \
+  V(CompareMinusZeroAndBranch)               \
+  V(CompareNumericAndBranch)                 \
+  V(CmpObjectEqAndBranch)                    \
+  V(CmpHoleAndBranch)                        \
+  V(CmpMapAndBranch)                         \
+  V(CmpT)                                    \
+  V(ConstantD)                               \
+  V(ConstantE)                               \
+  V(ConstantI)                               \
+  V(ConstantS)                               \
+  V(ConstantT)                               \
+  V(ConstructDouble)                         \
+  V(Context)                                 \
+  V(DateField)                               \
+  V(DebugBreak)                              \
+  V(DeclareGlobals)                          \
+  V(Deoptimize)                              \
+  V(DivByConstI)                             \
+  V(DivByPowerOf2I)                          \
+  V(DivI)                                    \
+  V(DoubleBits)                              \
+  V(DoubleToI)                               \
+  V(DoubleToSmi)                             \
+  V(Drop)                                    \
+  V(DummyUse)                                \
+  V(Dummy)                                   \
+  V(FlooringDivByConstI)                     \
+  V(FlooringDivByPowerOf2I)                  \
+  V(FlooringDivI)                            \
+  V(ForInCacheArray)                         \
+  V(ForInPrepareMap)                         \
+  V(FunctionLiteral)                         \
+  V(GetCachedArrayIndex)                     \
+  V(Goto)                                    \
+  V(HasCachedArrayIndexAndBranch)            \
+  V(HasInstanceTypeAndBranch)                \
+  V(InnerAllocatedObject)                    \
+  V(InstanceOf)                              \
+  V(InstanceOfKnownGlobal)                   \
+  V(InstructionGap)                          \
+  V(Integer32ToDouble)                       \
+  V(InvokeFunction)                          \
+  V(IsConstructCallAndBranch)                \
+  V(IsObjectAndBranch)                       \
+  V(IsStringAndBranch)                       \
+  V(IsSmiAndBranch)                          \
+  V(IsUndetectableAndBranch)                 \
+  V(Label)                                   \
+  V(LazyBailout)                             \
+  V(LoadContextSlot)                         \
+  V(LoadRoot)                                \
+  V(LoadFieldByIndex)                        \
+  V(LoadFunctionPrototype)                   \
+  V(LoadGlobalCell)                          \
+  V(LoadGlobalGeneric)                       \
+  V(LoadKeyed)                               \
+  V(LoadKeyedGeneric)                        \
+  V(LoadNamedField)                          \
+  V(LoadNamedGeneric)                        \
+  V(MapEnumLength)                           \
+  V(MathAbs)                                 \
+  V(MathClz32)                               \
+  V(MathExp)                                 \
+  V(MathFloor)                               \
+  V(MathFround)                              \
+  V(MathLog)                                 \
+  V(MathMinMax)                              \
+  V(MathPowHalf)                             \
+  V(MathRound)                               \
+  V(MathSqrt)                                \
+  V(ModByConstI)                             \
+  V(ModByPowerOf2I)                          \
+  V(ModI)                                    \
+  V(MulI)                                    \
+  V(NumberTagD)                              \
+  V(NumberTagI)                              \
+  V(NumberTagU)                              \
+  V(NumberUntagD)                            \
+  V(OsrEntry)                                \
+  V(Parameter)                               \
+  V(Power)                                   \
+  V(PushArgument)                            \
+  V(RegExpLiteral)                           \
+  V(Return)                                  \
+  V(SeqStringGetChar)                        \
+  V(SeqStringSetChar)                        \
+  V(ShiftI)                                  \
+  V(SmiTag)                                  \
+  V(SmiUntag)                                \
+  V(StackCheck)                              \
+  V(StoreCodeEntry)                          \
+  V(StoreContextSlot)                        \
+  V(StoreFrameContext)                       \
+  V(StoreGlobalCell)                         \
+  V(StoreKeyed)                              \
+  V(StoreKeyedGeneric)                       \
+  V(StoreNamedField)                         \
+  V(StoreNamedGeneric)                       \
+  V(StringAdd)                               \
+  V(StringCharCodeAt)                        \
+  V(StringCharFromCode)                      \
+  V(StringCompareAndBranch)                  \
+  V(SubI)                                    \
+  V(TaggedToI)                               \
+  V(TailCallThroughMegamorphicCache)         \
+  V(ThisFunction)                            \
+  V(ToFastProperties)                        \
+  V(TransitionElementsKind)                  \
+  V(TrapAllocationMemento)                   \
+  V(Typeof)                                  \
+  V(TypeofIsAndBranch)                       \
+  V(Uint32ToDouble)                          \
+  V(UnknownOSRValue)                         \
   V(WrapReceiver)
 
 
 #define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)                        \
-  virtual Opcode opcode() const V8_FINAL V8_OVERRIDE {                      \
+  virtual Opcode opcode() const FINAL OVERRIDE {                      \
     return LInstruction::k##type;                                           \
   }                                                                         \
-  virtual void CompileToNative(LCodeGen* generator) V8_FINAL V8_OVERRIDE;   \
-  virtual const char* Mnemonic() const V8_FINAL V8_OVERRIDE {               \
+  virtual void CompileToNative(LCodeGen* generator) FINAL OVERRIDE;   \
+  virtual const char* Mnemonic() const FINAL OVERRIDE {               \
     return mnemonic;                                                        \
   }                                                                         \
   static L##type* cast(LInstruction* instr) {                               \
-    ASSERT(instr->Is##type());                                              \
+    DCHECK(instr->Is##type());                                              \
     return reinterpret_cast<L##type*>(instr);                               \
   }
 
@@ -219,6 +221,9 @@
 
   virtual bool IsControl() const { return false; }
 
+  // Try deleting this instruction if possible.
+  virtual bool TryDelete() { return false; }
+
   void set_environment(LEnvironment* env) { environment_ = env; }
   LEnvironment* environment() const { return environment_; }
   bool HasEnvironment() const { return environment_ != NULL; }
@@ -261,11 +266,12 @@
   void VerifyCall();
 #endif
 
+  virtual int InputCount() = 0;
+  virtual LOperand* InputAt(int i) = 0;
+
  private:
   // Iterator support.
   friend class InputIterator;
-  virtual int InputCount() = 0;
-  virtual LOperand* InputAt(int i) = 0;
 
   friend class TempIterator;
   virtual int TempCount() = 0;
@@ -286,14 +292,14 @@
  public:
   // Allow 0 or 1 output operands.
   STATIC_ASSERT(R == 0 || R == 1);
-  virtual bool HasResult() const V8_FINAL V8_OVERRIDE {
+  virtual bool HasResult() const FINAL OVERRIDE {
     return R != 0 && result() != NULL;
   }
   void set_result(LOperand* operand) { results_[0] = operand; }
   LOperand* result() const { return results_[0]; }
 
   virtual bool MustSignExtendResult(
-      LPlatformChunk* chunk) const V8_FINAL V8_OVERRIDE;
+      LPlatformChunk* chunk) const FINAL OVERRIDE;
 
  protected:
   EmbeddedContainer<LOperand*, R> results_;
@@ -311,11 +317,11 @@
 
  private:
   // Iterator support.
-  virtual int InputCount() V8_FINAL V8_OVERRIDE { return I; }
-  virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
+  virtual int InputCount() FINAL OVERRIDE { return I; }
+  virtual LOperand* InputAt(int i) FINAL OVERRIDE { return inputs_[i]; }
 
-  virtual int TempCount() V8_FINAL V8_OVERRIDE { return T; }
-  virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return temps_[i]; }
+  virtual int TempCount() FINAL OVERRIDE { return T; }
+  virtual LOperand* TempAt(int i) FINAL OVERRIDE { return temps_[i]; }
 };
 
 
@@ -330,10 +336,10 @@
   }
 
   // Can't use the DECLARE-macro here because of sub-classes.
-  virtual bool IsGap() const V8_FINAL V8_OVERRIDE { return true; }
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual bool IsGap() const FINAL OVERRIDE { return true; }
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
   static LGap* cast(LInstruction* instr) {
-    ASSERT(instr->IsGap());
+    DCHECK(instr->IsGap());
     return reinterpret_cast<LGap*>(instr);
   }
 
@@ -368,11 +374,11 @@
 };
 
 
-class LInstructionGap V8_FINAL : public LGap {
+class LInstructionGap FINAL : public LGap {
  public:
   explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
 
-  virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
     return !IsRedundant();
   }
 
@@ -380,14 +386,14 @@
 };
 
 
-class LGoto V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LGoto FINAL : public LTemplateInstruction<0, 0, 0> {
  public:
   explicit LGoto(HBasicBlock* block) : block_(block) { }
 
-  virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE;
+  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE;
   DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
-  virtual bool IsControl() const V8_OVERRIDE { return true; }
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  virtual bool IsControl() const OVERRIDE { return true; }
 
   int block_id() const { return block_->block_id(); }
 
@@ -396,7 +402,7 @@
 };
 
 
-class LLazyBailout V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LLazyBailout FINAL : public LTemplateInstruction<0, 0, 0> {
  public:
   LLazyBailout() : gap_instructions_size_(0) { }
 
@@ -412,14 +418,14 @@
 };
 
 
-class LDummy V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LDummy FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
-  explicit LDummy() { }
+  LDummy() {}
   DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy")
 };
 
 
-class LDummyUse V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDummyUse FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LDummyUse(LOperand* value) {
     inputs_[0] = value;
@@ -428,25 +434,25 @@
 };
 
 
-class LDeoptimize V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LDeoptimize FINAL : public LTemplateInstruction<0, 0, 0> {
  public:
-  virtual bool IsControl() const V8_OVERRIDE { return true; }
+  virtual bool IsControl() const OVERRIDE { return true; }
   DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
   DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
 };
 
 
-class LLabel V8_FINAL : public LGap {
+class LLabel FINAL : public LGap {
  public:
   explicit LLabel(HBasicBlock* block)
       : LGap(block), replacement_(NULL) { }
 
-  virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
     return false;
   }
   DECLARE_CONCRETE_INSTRUCTION(Label, "label")
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int block_id() const { return block()->block_id(); }
   bool is_loop_header() const { return block()->IsLoopHeader(); }
@@ -462,16 +468,16 @@
 };
 
 
-class LParameter V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LParameter FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
-  virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
     return false;
   }
   DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
 };
 
 
-class LCallStub V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallStub FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LCallStub(LOperand* context) {
     inputs_[0] = context;
@@ -484,9 +490,30 @@
 };
 
 
-class LUnknownOSRValue V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LTailCallThroughMegamorphicCache FINAL
+    : public LTemplateInstruction<0, 3, 0> {
  public:
-  virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+  explicit LTailCallThroughMegamorphicCache(LOperand* context,
+                                            LOperand* receiver,
+                                            LOperand* name) {
+    inputs_[0] = context;
+    inputs_[1] = receiver;
+    inputs_[2] = name;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* receiver() { return inputs_[1]; }
+  LOperand* name() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache,
+                               "tail-call-through-megamorphic-cache")
+  DECLARE_HYDROGEN_ACCESSOR(TailCallThroughMegamorphicCache)
+};
+
+
+class LUnknownOSRValue FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
     return false;
   }
   DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
@@ -498,7 +525,7 @@
  public:
   LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
 
-  virtual bool IsControl() const V8_FINAL V8_OVERRIDE { return true; }
+  virtual bool IsControl() const FINAL OVERRIDE { return true; }
 
   int SuccessorCount() { return hydrogen()->SuccessorCount(); }
   HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
@@ -537,7 +564,7 @@
 };
 
 
-class LWrapReceiver V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LWrapReceiver FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LWrapReceiver(LOperand* receiver, LOperand* function) {
     inputs_[0] = receiver;
@@ -552,7 +579,7 @@
 };
 
 
-class LApplyArguments V8_FINAL : public LTemplateInstruction<1, 4, 0> {
+class LApplyArguments FINAL : public LTemplateInstruction<1, 4, 0> {
  public:
   LApplyArguments(LOperand* function,
                   LOperand* receiver,
@@ -573,7 +600,7 @@
 };
 
 
-class LAccessArgumentsAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LAccessArgumentsAt FINAL : public LTemplateInstruction<1, 3, 0> {
  public:
   LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
     inputs_[0] = arguments;
@@ -587,11 +614,11 @@
 
   DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LArgumentsLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LArgumentsLength FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LArgumentsLength(LOperand* elements) {
     inputs_[0] = elements;
@@ -603,14 +630,14 @@
 };
 
 
-class LArgumentsElements V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LArgumentsElements FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
   DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
 };
 
 
-class LModByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LModByPowerOf2I FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   LModByPowerOf2I(LOperand* dividend, int32_t divisor) {
     inputs_[0] = dividend;
@@ -628,7 +655,7 @@
 };
 
 
-class LModByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+class LModByConstI FINAL : public LTemplateInstruction<1, 1, 2> {
  public:
   LModByConstI(LOperand* dividend,
                int32_t divisor,
@@ -653,7 +680,7 @@
 };
 
 
-class LModI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LModI FINAL : public LTemplateInstruction<1, 2, 1> {
  public:
   LModI(LOperand* left, LOperand* right, LOperand* temp) {
     inputs_[0] = left;
@@ -670,7 +697,7 @@
 };
 
 
-class LDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDivByPowerOf2I FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   LDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
     inputs_[0] = dividend;
@@ -688,7 +715,7 @@
 };
 
 
-class LDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+class LDivByConstI FINAL : public LTemplateInstruction<1, 1, 2> {
  public:
   LDivByConstI(LOperand* dividend,
                int32_t divisor,
@@ -713,7 +740,7 @@
 };
 
 
-class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LDivI FINAL : public LTemplateInstruction<1, 2, 1> {
  public:
   LDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
     inputs_[0] = dividend;
@@ -730,7 +757,7 @@
 };
 
 
-class LFlooringDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LFlooringDivByPowerOf2I FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
     inputs_[0] = dividend;
@@ -749,7 +776,7 @@
 };
 
 
-class LFlooringDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 3> {
+class LFlooringDivByConstI FINAL : public LTemplateInstruction<1, 1, 3> {
  public:
   LFlooringDivByConstI(LOperand* dividend,
                        int32_t divisor,
@@ -777,7 +804,7 @@
 };
 
 
-class LFlooringDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LFlooringDivI FINAL : public LTemplateInstruction<1, 2, 1> {
  public:
   LFlooringDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
     inputs_[0] = dividend;
@@ -794,7 +821,7 @@
 };
 
 
-class LMulI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LMulI FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LMulI(LOperand* left, LOperand* right) {
     inputs_[0] = left;
@@ -809,7 +836,7 @@
 };
 
 
-class LCompareNumericAndBranch V8_FINAL : public LControlInstruction<2, 0> {
+class LCompareNumericAndBranch FINAL : public LControlInstruction<2, 0> {
  public:
   LCompareNumericAndBranch(LOperand* left, LOperand* right) {
     inputs_[0] = left;
@@ -828,11 +855,11 @@
     return hydrogen()->representation().IsDouble();
   }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LMathFloor V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathFloor FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LMathFloor(LOperand* value) {
     inputs_[0] = value;
@@ -845,9 +872,9 @@
 };
 
 
-class LMathRound V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LMathRound FINAL : public LTemplateInstruction<1, 1, 1> {
  public:
-  explicit LMathRound(LOperand* value, LOperand* temp) {
+  LMathRound(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
     temps_[0] = temp;
   }
@@ -860,7 +887,17 @@
 };
 
 
-class LMathAbs V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LMathFround FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathFround(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathFround, "math-fround")
+};
+
+
+class LMathAbs FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   explicit LMathAbs(LOperand* context, LOperand* value) {
     inputs_[1] = context;
@@ -875,7 +912,7 @@
 };
 
 
-class LMathLog V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathLog FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LMathLog(LOperand* value) {
     inputs_[0] = value;
@@ -887,7 +924,7 @@
 };
 
 
-class LMathClz32 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathClz32 FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LMathClz32(LOperand* value) {
     inputs_[0] = value;
@@ -899,7 +936,7 @@
 };
 
 
-class LMathExp V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+class LMathExp FINAL : public LTemplateInstruction<1, 1, 2> {
  public:
   LMathExp(LOperand* value, LOperand* temp1, LOperand* temp2) {
     inputs_[0] = value;
@@ -916,7 +953,7 @@
 };
 
 
-class LMathSqrt V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathSqrt FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LMathSqrt(LOperand* value) {
     inputs_[0] = value;
@@ -928,7 +965,7 @@
 };
 
 
-class LMathPowHalf V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathPowHalf FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LMathPowHalf(LOperand* value) {
     inputs_[0] = value;
@@ -940,7 +977,7 @@
 };
 
 
-class LCmpObjectEqAndBranch V8_FINAL : public LControlInstruction<2, 0> {
+class LCmpObjectEqAndBranch FINAL : public LControlInstruction<2, 0> {
  public:
   LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
     inputs_[0] = left;
@@ -954,7 +991,7 @@
 };
 
 
-class LCmpHoleAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LCmpHoleAndBranch FINAL : public LControlInstruction<1, 0> {
  public:
   explicit LCmpHoleAndBranch(LOperand* object) {
     inputs_[0] = object;
@@ -967,7 +1004,7 @@
 };
 
 
-class LCompareMinusZeroAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LCompareMinusZeroAndBranch FINAL : public LControlInstruction<1, 0> {
  public:
   explicit LCompareMinusZeroAndBranch(LOperand* value) {
     inputs_[0] = value;
@@ -982,7 +1019,7 @@
 
 
 
-class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LIsObjectAndBranch FINAL : public LControlInstruction<1, 0> {
  public:
   explicit LIsObjectAndBranch(LOperand* value) {
     inputs_[0] = value;
@@ -993,11 +1030,11 @@
   DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LIsStringAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LIsStringAndBranch FINAL : public LControlInstruction<1, 1> {
  public:
   explicit LIsStringAndBranch(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
@@ -1010,11 +1047,11 @@
   DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LIsSmiAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LIsSmiAndBranch FINAL : public LControlInstruction<1, 0> {
  public:
   explicit LIsSmiAndBranch(LOperand* value) {
     inputs_[0] = value;
@@ -1025,11 +1062,11 @@
   DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LIsUndetectableAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LIsUndetectableAndBranch FINAL : public LControlInstruction<1, 1> {
  public:
   explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
@@ -1043,11 +1080,11 @@
                                "is-undetectable-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LStringCompareAndBranch V8_FINAL : public LControlInstruction<3, 0> {
+class LStringCompareAndBranch FINAL : public LControlInstruction<3, 0> {
  public:
   explicit LStringCompareAndBranch(LOperand* context,
                                    LOperand* left,
@@ -1065,13 +1102,13 @@
                                "string-compare-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 
   Token::Value op() const { return hydrogen()->token(); }
 };
 
 
-class LHasInstanceTypeAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LHasInstanceTypeAndBranch FINAL : public LControlInstruction<1, 0> {
  public:
   explicit LHasInstanceTypeAndBranch(LOperand* value) {
     inputs_[0] = value;
@@ -1083,11 +1120,11 @@
                                "has-instance-type-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LGetCachedArrayIndex V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LGetCachedArrayIndex FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LGetCachedArrayIndex(LOperand* value) {
     inputs_[0] = value;
@@ -1100,7 +1137,7 @@
 };
 
 
-class LHasCachedArrayIndexAndBranch V8_FINAL
+class LHasCachedArrayIndexAndBranch FINAL
     : public LControlInstruction<1, 0> {
  public:
   explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
@@ -1113,11 +1150,11 @@
                                "has-cached-array-index-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LClassOfTestAndBranch V8_FINAL : public LControlInstruction<1, 2> {
+class LClassOfTestAndBranch FINAL : public LControlInstruction<1, 2> {
  public:
   LClassOfTestAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
     inputs_[0] = value;
@@ -1133,11 +1170,11 @@
                                "class-of-test-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LCmpT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LCmpT FINAL : public LTemplateInstruction<1, 3, 0> {
  public:
   LCmpT(LOperand* context, LOperand* left, LOperand* right) {
     inputs_[0] = context;
@@ -1156,7 +1193,7 @@
 };
 
 
-class LInstanceOf V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LInstanceOf FINAL : public LTemplateInstruction<1, 3, 0> {
  public:
   LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
     inputs_[0] = context;
@@ -1172,7 +1209,7 @@
 };
 
 
-class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LInstanceOfKnownGlobal FINAL : public LTemplateInstruction<1, 2, 1> {
  public:
   LInstanceOfKnownGlobal(LOperand* context, LOperand* value, LOperand* temp) {
     inputs_[0] = context;
@@ -1193,7 +1230,7 @@
     return lazy_deopt_env_;
   }
   virtual void SetDeferredLazyDeoptimizationEnvironment(
-      LEnvironment* env) V8_OVERRIDE {
+      LEnvironment* env) OVERRIDE {
     lazy_deopt_env_ = env;
   }
 
@@ -1202,7 +1239,7 @@
 };
 
 
-class LBoundsCheck V8_FINAL : public LTemplateInstruction<0, 2, 0> {
+class LBoundsCheck FINAL : public LTemplateInstruction<0, 2, 0> {
  public:
   LBoundsCheck(LOperand* index, LOperand* length) {
     inputs_[0] = index;
@@ -1217,7 +1254,7 @@
 };
 
 
-class LBitI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LBitI FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LBitI(LOperand* left, LOperand* right) {
     inputs_[0] = left;
@@ -1237,7 +1274,7 @@
 };
 
 
-class LShiftI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LShiftI FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
       : op_(op), can_deopt_(can_deopt) {
@@ -1258,7 +1295,7 @@
 };
 
 
-class LSubI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LSubI FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LSubI(LOperand* left, LOperand* right) {
     inputs_[0] = left;
@@ -1273,7 +1310,7 @@
 };
 
 
-class LConstantI V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LConstantI FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
   DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1282,7 +1319,7 @@
 };
 
 
-class LConstantS V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LConstantS FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
   DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1291,7 +1328,7 @@
 };
 
 
-class LConstantD V8_FINAL : public LTemplateInstruction<1, 0, 1> {
+class LConstantD FINAL : public LTemplateInstruction<1, 0, 1> {
  public:
   explicit LConstantD(LOperand* temp) {
     temps_[0] = temp;
@@ -1306,7 +1343,7 @@
 };
 
 
-class LConstantE V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LConstantE FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e")
   DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1317,7 +1354,7 @@
 };
 
 
-class LConstantT V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LConstantT FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
   DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1328,7 +1365,7 @@
 };
 
 
-class LBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LBranch FINAL : public LControlInstruction<1, 0> {
  public:
   explicit LBranch(LOperand* value) {
     inputs_[0] = value;
@@ -1339,17 +1376,17 @@
   DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
   DECLARE_HYDROGEN_ACCESSOR(Branch)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LDebugBreak V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LDebugBreak FINAL : public LTemplateInstruction<0, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
 };
 
 
-class LCmpMapAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LCmpMapAndBranch FINAL : public LControlInstruction<1, 0> {
  public:
   explicit LCmpMapAndBranch(LOperand* value) {
     inputs_[0] = value;
@@ -1364,7 +1401,7 @@
 };
 
 
-class LMapEnumLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMapEnumLength FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LMapEnumLength(LOperand* value) {
     inputs_[0] = value;
@@ -1376,7 +1413,7 @@
 };
 
 
-class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDateField FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   LDateField(LOperand* date, Smi* index) : index_(index) {
     inputs_[0] = date;
@@ -1393,7 +1430,7 @@
 };
 
 
-class LSeqStringGetChar V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LSeqStringGetChar FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LSeqStringGetChar(LOperand* string, LOperand* index) {
     inputs_[0] = string;
@@ -1408,7 +1445,7 @@
 };
 
 
-class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 4, 0> {
+class LSeqStringSetChar FINAL : public LTemplateInstruction<1, 4, 0> {
  public:
   LSeqStringSetChar(LOperand* context,
                     LOperand* string,
@@ -1429,7 +1466,7 @@
 };
 
 
-class LAddI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LAddI FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LAddI(LOperand* left, LOperand* right) {
     inputs_[0] = left;
@@ -1449,7 +1486,7 @@
 };
 
 
-class LMathMinMax V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LMathMinMax FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LMathMinMax(LOperand* left, LOperand* right) {
     inputs_[0] = left;
@@ -1464,7 +1501,7 @@
 };
 
 
-class LPower V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LPower FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LPower(LOperand* left, LOperand* right) {
     inputs_[0] = left;
@@ -1479,7 +1516,7 @@
 };
 
 
-class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LArithmeticD FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
       : op_(op) {
@@ -1491,18 +1528,18 @@
   LOperand* left() { return inputs_[0]; }
   LOperand* right() { return inputs_[1]; }
 
-  virtual Opcode opcode() const V8_OVERRIDE {
+  virtual Opcode opcode() const OVERRIDE {
     return LInstruction::kArithmeticD;
   }
-  virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
-  virtual const char* Mnemonic() const V8_OVERRIDE;
+  virtual void CompileToNative(LCodeGen* generator) OVERRIDE;
+  virtual const char* Mnemonic() const OVERRIDE;
 
  private:
   Token::Value op_;
 };
 
 
-class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LArithmeticT FINAL : public LTemplateInstruction<1, 3, 0> {
  public:
   LArithmeticT(Token::Value op,
                LOperand* context,
@@ -1519,18 +1556,18 @@
   LOperand* left() { return inputs_[1]; }
   LOperand* right() { return inputs_[2]; }
 
-  virtual Opcode opcode() const V8_OVERRIDE {
+  virtual Opcode opcode() const OVERRIDE {
     return LInstruction::kArithmeticT;
   }
-  virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
-  virtual const char* Mnemonic() const V8_OVERRIDE;
+  virtual void CompileToNative(LCodeGen* generator) OVERRIDE;
+  virtual const char* Mnemonic() const OVERRIDE;
 
  private:
   Token::Value op_;
 };
 
 
-class LReturn V8_FINAL : public LTemplateInstruction<0, 3, 0> {
+class LReturn FINAL : public LTemplateInstruction<0, 3, 0> {
  public:
   explicit LReturn(LOperand* value,
                    LOperand* context,
@@ -1547,7 +1584,7 @@
     return parameter_count()->IsConstantOperand();
   }
   LConstantOperand* constant_parameter_count() {
-    ASSERT(has_constant_parameter_count());
+    DCHECK(has_constant_parameter_count());
     return LConstantOperand::cast(parameter_count());
   }
   LOperand* parameter_count() { return inputs_[2]; }
@@ -1557,7 +1594,7 @@
 };
 
 
-class LLoadNamedField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LLoadNamedField FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LLoadNamedField(LOperand* object) {
     inputs_[0] = object;
@@ -1570,11 +1607,13 @@
 };
 
 
-class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LLoadNamedGeneric FINAL : public LTemplateInstruction<1, 2, 1> {
  public:
-  explicit LLoadNamedGeneric(LOperand* context, LOperand* object) {
+  explicit LLoadNamedGeneric(LOperand* context, LOperand* object,
+                             LOperand* vector) {
     inputs_[0] = context;
     inputs_[1] = object;
+    temps_[0] = vector;
   }
 
   DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
@@ -1582,11 +1621,13 @@
 
   LOperand* context() { return inputs_[0]; }
   LOperand* object() { return inputs_[1]; }
+  LOperand* temp_vector() { return temps_[0]; }
+
   Handle<Object> name() const { return hydrogen()->name(); }
 };
 
 
-class LLoadFunctionPrototype V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LLoadFunctionPrototype FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LLoadFunctionPrototype(LOperand* function) {
     inputs_[0] = function;
@@ -1599,7 +1640,7 @@
 };
 
 
-class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LLoadRoot FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
   DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
@@ -1624,7 +1665,7 @@
 }
 
 
-class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyed FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LLoadKeyed(LOperand* elements, LOperand* key) {
     inputs_[0] = elements;
@@ -1645,7 +1686,7 @@
   }
   LOperand* elements() { return inputs_[0]; }
   LOperand* key() { return inputs_[1]; }
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
   uint32_t base_offset() const { return hydrogen()->base_offset(); }
   ElementsKind elements_kind() const {
     return hydrogen()->elements_kind();
@@ -1653,34 +1694,40 @@
 };
 
 
-class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LLoadKeyedGeneric FINAL : public LTemplateInstruction<1, 3, 1> {
  public:
-  LLoadKeyedGeneric(LOperand* context, LOperand* obj, LOperand* key) {
+  LLoadKeyedGeneric(LOperand* context, LOperand* obj, LOperand* key,
+                    LOperand* vector) {
     inputs_[0] = context;
     inputs_[1] = obj;
     inputs_[2] = key;
+    temps_[0] = vector;
   }
 
   DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
+  DECLARE_HYDROGEN_ACCESSOR(LoadKeyedGeneric)
 
   LOperand* context() { return inputs_[0]; }
   LOperand* object() { return inputs_[1]; }
   LOperand* key() { return inputs_[2]; }
+  LOperand* temp_vector() { return temps_[0]; }
 };
 
 
-class LLoadGlobalCell V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LLoadGlobalCell FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
   DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
 };
 
 
-class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LLoadGlobalGeneric FINAL : public LTemplateInstruction<1, 2, 1> {
  public:
-  explicit LLoadGlobalGeneric(LOperand* context, LOperand* global_object) {
+  explicit LLoadGlobalGeneric(LOperand* context, LOperand* global_object,
+                              LOperand* vector) {
     inputs_[0] = context;
     inputs_[1] = global_object;
+    temps_[0] = vector;
   }
 
   DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
@@ -1688,12 +1735,14 @@
 
   LOperand* context() { return inputs_[0]; }
   LOperand* global_object() { return inputs_[1]; }
+  LOperand* temp_vector() { return temps_[0]; }
+
   Handle<Object> name() const { return hydrogen()->name(); }
   bool for_typeof() const { return hydrogen()->for_typeof(); }
 };
 
 
-class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 1> {
+class LStoreGlobalCell FINAL : public LTemplateInstruction<0, 1, 1> {
  public:
   explicit LStoreGlobalCell(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
@@ -1708,7 +1757,7 @@
 };
 
 
-class LLoadContextSlot V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LLoadContextSlot FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LLoadContextSlot(LOperand* context) {
     inputs_[0] = context;
@@ -1721,11 +1770,11 @@
 
   int slot_index() { return hydrogen()->slot_index(); }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LStoreContextSlot V8_FINAL : public LTemplateInstruction<0, 2, 1> {
+class LStoreContextSlot FINAL : public LTemplateInstruction<0, 2, 1> {
  public:
   LStoreContextSlot(LOperand* context, LOperand* value, LOperand* temp) {
     inputs_[0] = context;
@@ -1742,11 +1791,11 @@
 
   int slot_index() { return hydrogen()->slot_index(); }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LPushArgument V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LPushArgument FINAL : public LTemplateInstruction<0, 1, 0> {
  public:
   explicit LPushArgument(LOperand* value) {
     inputs_[0] = value;
@@ -1758,7 +1807,7 @@
 };
 
 
-class LDrop V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LDrop FINAL : public LTemplateInstruction<0, 0, 0> {
  public:
   explicit LDrop(int count) : count_(count) { }
 
@@ -1771,7 +1820,7 @@
 };
 
 
-class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 2, 0> {
+class LStoreCodeEntry FINAL: public LTemplateInstruction<0, 2, 0> {
  public:
   LStoreCodeEntry(LOperand* function, LOperand* code_object) {
     inputs_[0] = function;
@@ -1788,7 +1837,7 @@
 };
 
 
-class LInnerAllocatedObject V8_FINAL: public LTemplateInstruction<1, 2, 0> {
+class LInnerAllocatedObject FINAL: public LTemplateInstruction<1, 2, 0> {
  public:
   LInnerAllocatedObject(LOperand* base_object, LOperand* offset) {
     inputs_[0] = base_object;
@@ -1798,27 +1847,27 @@
   LOperand* base_object() const { return inputs_[0]; }
   LOperand* offset() const { return inputs_[1]; }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 
   DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object")
 };
 
 
-class LThisFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LThisFunction FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
   DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
 };
 
 
-class LContext V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LContext FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(Context, "context")
   DECLARE_HYDROGEN_ACCESSOR(Context)
 };
 
 
-class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LDeclareGlobals FINAL : public LTemplateInstruction<0, 1, 0> {
  public:
   explicit LDeclareGlobals(LOperand* context) {
     inputs_[0] = context;
@@ -1831,7 +1880,7 @@
 };
 
 
-class LCallJSFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallJSFunction FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LCallJSFunction(LOperand* function) {
     inputs_[0] = function;
@@ -1842,44 +1891,44 @@
   DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
   DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 };
 
 
-class LCallWithDescriptor V8_FINAL : public LTemplateResultInstruction<1> {
+class LCallWithDescriptor FINAL : public LTemplateResultInstruction<1> {
  public:
-  LCallWithDescriptor(const CallInterfaceDescriptor* descriptor,
-                      const ZoneList<LOperand*>& operands,
-                      Zone* zone)
-    : inputs_(descriptor->environment_length() + 1, zone) {
-    ASSERT(descriptor->environment_length() + 1 == operands.length());
+  LCallWithDescriptor(CallInterfaceDescriptor descriptor,
+                      const ZoneList<LOperand*>& operands, Zone* zone)
+      : inputs_(descriptor.GetRegisterParameterCount() + 1, zone) {
+    DCHECK(descriptor.GetRegisterParameterCount() + 1 == operands.length());
     inputs_.AddAll(operands, zone);
   }
 
   LOperand* target() const { return inputs_[0]; }
 
- private:
-  DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
   DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ private:
+  DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
+
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 
   ZoneList<LOperand*> inputs_;
 
   // Iterator support.
-  virtual int InputCount() V8_FINAL V8_OVERRIDE { return inputs_.length(); }
-  virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
+  virtual int InputCount() FINAL OVERRIDE { return inputs_.length(); }
+  virtual LOperand* InputAt(int i) FINAL OVERRIDE { return inputs_[i]; }
 
-  virtual int TempCount() V8_FINAL V8_OVERRIDE { return 0; }
-  virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return NULL; }
+  virtual int TempCount() FINAL OVERRIDE { return 0; }
+  virtual LOperand* TempAt(int i) FINAL OVERRIDE { return NULL; }
 };
 
 
-class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LInvokeFunction FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LInvokeFunction(LOperand* context, LOperand* function) {
     inputs_[0] = context;
@@ -1892,13 +1941,13 @@
   DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
   DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 };
 
 
-class LCallFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCallFunction FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LCallFunction(LOperand* context, LOperand* function) {
     inputs_[0] = context;
@@ -1914,7 +1963,7 @@
 };
 
 
-class LCallNew V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCallNew FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LCallNew(LOperand* context, LOperand* constructor) {
     inputs_[0] = context;
@@ -1927,13 +1976,13 @@
   DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
   DECLARE_HYDROGEN_ACCESSOR(CallNew)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 };
 
 
-class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCallNewArray FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LCallNewArray(LOperand* context, LOperand* constructor) {
     inputs_[0] = context;
@@ -1946,13 +1995,13 @@
   DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
   DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 };
 
 
-class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallRuntime FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LCallRuntime(LOperand* context) {
     inputs_[0] = context;
@@ -1963,7 +2012,7 @@
   DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
   DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
 
-  virtual bool ClobbersDoubleRegisters(Isolate* isolate) const V8_OVERRIDE {
+  virtual bool ClobbersDoubleRegisters(Isolate* isolate) const OVERRIDE {
     return save_doubles() == kDontSaveFPRegs;
   }
 
@@ -1973,7 +2022,7 @@
 };
 
 
-class LInteger32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LInteger32ToDouble FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LInteger32ToDouble(LOperand* value) {
     inputs_[0] = value;
@@ -1985,7 +2034,7 @@
 };
 
 
-class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LUint32ToDouble FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LUint32ToDouble(LOperand* value) {
     inputs_[0] = value;
@@ -1997,7 +2046,7 @@
 };
 
 
-class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+class LNumberTagI FINAL : public LTemplateInstruction<1, 1, 2> {
  public:
   LNumberTagI(LOperand* value, LOperand* temp1, LOperand* temp2) {
     inputs_[0] = value;
@@ -2013,7 +2062,7 @@
 };
 
 
-class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+class LNumberTagU FINAL : public LTemplateInstruction<1, 1, 2> {
  public:
   LNumberTagU(LOperand* value, LOperand* temp1, LOperand* temp2) {
     inputs_[0] = value;
@@ -2029,7 +2078,7 @@
 };
 
 
-class LNumberTagD V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LNumberTagD FINAL : public LTemplateInstruction<1, 1, 1> {
  public:
   explicit LNumberTagD(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
@@ -2045,7 +2094,7 @@
 
 
 // Sometimes truncating conversion from a tagged value to an int32.
-class LDoubleToI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDoubleToI FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LDoubleToI(LOperand* value) {
     inputs_[0] = value;
@@ -2060,7 +2109,7 @@
 };
 
 
-class LDoubleToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDoubleToSmi FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LDoubleToSmi(LOperand* value) {
     inputs_[0] = value;
@@ -2074,7 +2123,7 @@
 
 
 // Truncating conversion from a tagged value to an int32.
-class LTaggedToI V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LTaggedToI FINAL : public LTemplateInstruction<1, 1, 1> {
  public:
   LTaggedToI(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
@@ -2091,7 +2140,7 @@
 };
 
 
-class LSmiTag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LSmiTag FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LSmiTag(LOperand* value) {
     inputs_[0] = value;
@@ -2104,7 +2153,7 @@
 };
 
 
-class LNumberUntagD V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LNumberUntagD FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LNumberUntagD(LOperand* value) {
     inputs_[0] = value;
@@ -2117,7 +2166,7 @@
 };
 
 
-class LSmiUntag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LSmiUntag FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   LSmiUntag(LOperand* value, bool needs_check)
       : needs_check_(needs_check) {
@@ -2134,7 +2183,7 @@
 };
 
 
-class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 1> {
+class LStoreNamedField FINAL : public LTemplateInstruction<0, 2, 1> {
  public:
   LStoreNamedField(LOperand* object, LOperand* value, LOperand* temp) {
     inputs_[0] = object;
@@ -2149,7 +2198,7 @@
   DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
   DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 
   Representation representation() const {
     return hydrogen()->field_representation();
@@ -2157,7 +2206,7 @@
 };
 
 
-class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
+class LStoreNamedGeneric FINAL : public LTemplateInstruction<0, 3, 0> {
  public:
   LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value) {
     inputs_[0] = context;
@@ -2172,14 +2221,14 @@
   DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
   DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 
   Handle<Object> name() const { return hydrogen()->name(); }
   StrictMode strict_mode() { return hydrogen()->strict_mode(); }
 };
 
 
-class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyed FINAL : public LTemplateInstruction<0, 3, 0> {
  public:
   LStoreKeyed(LOperand* object, LOperand* key, LOperand* value) {
     inputs_[0] = object;
@@ -2202,13 +2251,13 @@
   DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
   DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
   bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
   uint32_t base_offset() const { return hydrogen()->base_offset(); }
 };
 
 
-class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 4, 0> {
+class LStoreKeyedGeneric FINAL : public LTemplateInstruction<0, 4, 0> {
  public:
   LStoreKeyedGeneric(LOperand* context,
                      LOperand* object,
@@ -2228,13 +2277,13 @@
   DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
   DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 
   StrictMode strict_mode() { return hydrogen()->strict_mode(); }
 };
 
 
-class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 2, 2> {
+class LTransitionElementsKind FINAL : public LTemplateInstruction<0, 2, 2> {
  public:
   LTransitionElementsKind(LOperand* object,
                           LOperand* context,
@@ -2255,7 +2304,7 @@
                                "transition-elements-kind")
   DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 
   Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
   Handle<Map> transitioned_map() {
@@ -2266,7 +2315,7 @@
 };
 
 
-class LTrapAllocationMemento V8_FINAL : public LTemplateInstruction<0, 1, 1> {
+class LTrapAllocationMemento FINAL : public LTemplateInstruction<0, 1, 1> {
  public:
   LTrapAllocationMemento(LOperand* object,
                          LOperand* temp) {
@@ -2282,7 +2331,7 @@
 };
 
 
-class LStringAdd V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LStringAdd FINAL : public LTemplateInstruction<1, 3, 0> {
  public:
   LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
     inputs_[0] = context;
@@ -2299,7 +2348,7 @@
 };
 
 
-class LStringCharCodeAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LStringCharCodeAt FINAL : public LTemplateInstruction<1, 3, 0> {
  public:
   LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
     inputs_[0] = context;
@@ -2316,7 +2365,7 @@
 };
 
 
-class LStringCharFromCode V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LStringCharFromCode FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   explicit LStringCharFromCode(LOperand* context, LOperand* char_code) {
     inputs_[0] = context;
@@ -2331,7 +2380,7 @@
 };
 
 
-class LCheckValue V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LCheckValue FINAL : public LTemplateInstruction<0, 1, 0> {
  public:
   explicit LCheckValue(LOperand* value) {
     inputs_[0] = value;
@@ -2344,7 +2393,7 @@
 };
 
 
-class LCheckInstanceType V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LCheckInstanceType FINAL : public LTemplateInstruction<0, 1, 0> {
  public:
   explicit LCheckInstanceType(LOperand* value) {
     inputs_[0] = value;
@@ -2357,7 +2406,7 @@
 };
 
 
-class LCheckMaps V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LCheckMaps FINAL : public LTemplateInstruction<0, 1, 0> {
  public:
   explicit LCheckMaps(LOperand* value = NULL) {
     inputs_[0] = value;
@@ -2370,7 +2419,7 @@
 };
 
 
-class LCheckSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCheckSmi FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LCheckSmi(LOperand* value) {
     inputs_[0] = value;
@@ -2382,7 +2431,7 @@
 };
 
 
-class LClampDToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LClampDToUint8 FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LClampDToUint8(LOperand* unclamped) {
     inputs_[0] = unclamped;
@@ -2394,7 +2443,7 @@
 };
 
 
-class LClampIToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LClampIToUint8 FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LClampIToUint8(LOperand* unclamped) {
     inputs_[0] = unclamped;
@@ -2406,7 +2455,7 @@
 };
 
 
-class LClampTToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LClampTToUint8 FINAL : public LTemplateInstruction<1, 1, 1> {
  public:
   LClampTToUint8(LOperand* unclamped,
                  LOperand* temp_xmm) {
@@ -2421,7 +2470,7 @@
 };
 
 
-class LCheckNonSmi V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LCheckNonSmi FINAL : public LTemplateInstruction<0, 1, 0> {
  public:
   explicit LCheckNonSmi(LOperand* value) {
     inputs_[0] = value;
@@ -2434,7 +2483,7 @@
 };
 
 
-class LDoubleBits V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDoubleBits FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LDoubleBits(LOperand* value) {
     inputs_[0] = value;
@@ -2447,7 +2496,7 @@
 };
 
 
-class LConstructDouble V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LConstructDouble FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LConstructDouble(LOperand* hi, LOperand* lo) {
     inputs_[0] = hi;
@@ -2461,7 +2510,7 @@
 };
 
 
-class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LAllocate FINAL : public LTemplateInstruction<1, 2, 1> {
  public:
   LAllocate(LOperand* context, LOperand* size, LOperand* temp) {
     inputs_[0] = context;
@@ -2478,7 +2527,7 @@
 };
 
 
-class LRegExpLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LRegExpLiteral FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LRegExpLiteral(LOperand* context) {
     inputs_[0] = context;
@@ -2491,7 +2540,7 @@
 };
 
 
-class LFunctionLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LFunctionLiteral FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LFunctionLiteral(LOperand* context) {
     inputs_[0] = context;
@@ -2504,7 +2553,7 @@
 };
 
 
-class LToFastProperties V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LToFastProperties FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LToFastProperties(LOperand* value) {
     inputs_[0] = value;
@@ -2517,7 +2566,7 @@
 };
 
 
-class LTypeof V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LTypeof FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LTypeof(LOperand* context, LOperand* value) {
     inputs_[0] = context;
@@ -2531,7 +2580,7 @@
 };
 
 
-class LTypeofIsAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LTypeofIsAndBranch FINAL : public LControlInstruction<1, 0> {
  public:
   explicit LTypeofIsAndBranch(LOperand* value) {
     inputs_[0] = value;
@@ -2544,11 +2593,11 @@
 
   Handle<String> type_literal() { return hydrogen()->type_literal(); }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LIsConstructCallAndBranch V8_FINAL : public LControlInstruction<0, 1> {
+class LIsConstructCallAndBranch FINAL : public LControlInstruction<0, 1> {
  public:
   explicit LIsConstructCallAndBranch(LOperand* temp) {
     temps_[0] = temp;
@@ -2562,18 +2611,18 @@
 };
 
 
-class LOsrEntry V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LOsrEntry FINAL : public LTemplateInstruction<0, 0, 0> {
  public:
   LOsrEntry() {}
 
-  virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
     return false;
   }
   DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
 };
 
 
-class LStackCheck V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LStackCheck FINAL : public LTemplateInstruction<0, 1, 0> {
  public:
   explicit LStackCheck(LOperand* context) {
     inputs_[0] = context;
@@ -2591,7 +2640,7 @@
 };
 
 
-class LForInPrepareMap V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LForInPrepareMap FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LForInPrepareMap(LOperand* context, LOperand* object) {
     inputs_[0] = context;
@@ -2605,7 +2654,7 @@
 };
 
 
-class LForInCacheArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LForInCacheArray FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LForInCacheArray(LOperand* map) {
     inputs_[0] = map;
@@ -2621,7 +2670,7 @@
 };
 
 
-class LCheckMapValue V8_FINAL : public LTemplateInstruction<0, 2, 0> {
+class LCheckMapValue FINAL : public LTemplateInstruction<0, 2, 0> {
  public:
   LCheckMapValue(LOperand* value, LOperand* map) {
     inputs_[0] = value;
@@ -2635,7 +2684,7 @@
 };
 
 
-class LLoadFieldByIndex V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LLoadFieldByIndex FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LLoadFieldByIndex(LOperand* object, LOperand* index) {
     inputs_[0] = object;
@@ -2679,7 +2728,7 @@
 
 
 class LChunkBuilder;
-class LPlatformChunk V8_FINAL : public LChunk {
+class LPlatformChunk FINAL : public LChunk {
  public:
   LPlatformChunk(CompilationInfo* info, HGraph* graph)
       : LChunk(info, graph),
@@ -2697,20 +2746,14 @@
 };
 
 
-class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
+class LChunkBuilder FINAL : public LChunkBuilderBase {
  public:
   LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
-      : LChunkBuilderBase(graph->zone()),
-        chunk_(NULL),
-        info_(info),
-        graph_(graph),
-        status_(UNUSED),
+      : LChunkBuilderBase(info, graph),
         current_instruction_(NULL),
         current_block_(NULL),
         next_block_(NULL),
-        allocator_(allocator) { }
-
-  Isolate* isolate() const { return graph_->isolate(); }
+        allocator_(allocator) {}
 
   // Build the sequence for the graph.
   LPlatformChunk* Build();
@@ -2722,6 +2765,7 @@
 
   LInstruction* DoMathFloor(HUnaryMathOperation* instr);
   LInstruction* DoMathRound(HUnaryMathOperation* instr);
+  LInstruction* DoMathFround(HUnaryMathOperation* instr);
   LInstruction* DoMathAbs(HUnaryMathOperation* instr);
   LInstruction* DoMathLog(HUnaryMathOperation* instr);
   LInstruction* DoMathExp(HUnaryMathOperation* instr);
@@ -2739,24 +2783,6 @@
   LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr);
 
  private:
-  enum Status {
-    UNUSED,
-    BUILDING,
-    DONE,
-    ABORTED
-  };
-
-  LPlatformChunk* chunk() const { return chunk_; }
-  CompilationInfo* info() const { return info_; }
-  HGraph* graph() const { return graph_; }
-
-  bool is_unused() const { return status_ == UNUSED; }
-  bool is_building() const { return status_ == BUILDING; }
-  bool is_done() const { return status_ == DONE; }
-  bool is_aborted() const { return status_ == ABORTED; }
-
-  void Abort(BailoutReason reason);
-
   // Methods for getting operands for Use / Define / Temp.
   LUnallocated* ToUnallocated(Register reg);
   LUnallocated* ToUnallocated(XMMRegister reg);
@@ -2801,7 +2827,7 @@
 
   // An input operand in register, stack slot or a constant operand.
   // Will not be moved to a register even if one is freely available.
-  virtual MUST_USE_RESULT LOperand* UseAny(HValue* value) V8_OVERRIDE;
+  virtual MUST_USE_RESULT LOperand* UseAny(HValue* value) OVERRIDE;
 
   // Temporary operand that must be in a register.
   MUST_USE_RESULT LUnallocated* TempRegister();
@@ -2848,10 +2874,6 @@
                               HBinaryOperation* instr);
   void FindDehoistedKeyDefinitions(HValue* candidate);
 
-  LPlatformChunk* chunk_;
-  CompilationInfo* info_;
-  HGraph* const graph_;
-  Status status_;
   HInstruction* current_instruction_;
   HBasicBlock* current_block_;
   HBasicBlock* next_block_;
diff --git a/src/x64/macro-assembler-x64.cc b/src/x64/macro-assembler-x64.cc
index 39acf80..5033303 100644
--- a/src/x64/macro-assembler-x64.cc
+++ b/src/x64/macro-assembler-x64.cc
@@ -6,15 +6,17 @@
 
 #if V8_TARGET_ARCH_X64
 
+#include "src/base/bits.h"
+#include "src/base/division-by-constant.h"
 #include "src/bootstrapper.h"
 #include "src/codegen.h"
 #include "src/cpu-profiler.h"
+#include "src/debug.h"
+#include "src/heap/heap.h"
+#include "src/isolate-inl.h"
+#include "src/serialize.h"
 #include "src/x64/assembler-x64.h"
 #include "src/x64/macro-assembler-x64.h"
-#include "src/serialize.h"
-#include "src/debug.h"
-#include "src/heap.h"
-#include "src/isolate-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -157,7 +159,7 @@
 
 
 void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) {
-  ASSERT(root_array_available_);
+  DCHECK(root_array_available_);
   movp(destination, Operand(kRootRegister,
                             (index << kPointerSizeLog2) - kRootRegisterBias));
 }
@@ -166,7 +168,7 @@
 void MacroAssembler::LoadRootIndexed(Register destination,
                                      Register variable_offset,
                                      int fixed_offset) {
-  ASSERT(root_array_available_);
+  DCHECK(root_array_available_);
   movp(destination,
        Operand(kRootRegister,
                variable_offset, times_pointer_size,
@@ -175,20 +177,20 @@
 
 
 void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index) {
-  ASSERT(root_array_available_);
+  DCHECK(root_array_available_);
   movp(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias),
        source);
 }
 
 
 void MacroAssembler::PushRoot(Heap::RootListIndex index) {
-  ASSERT(root_array_available_);
+  DCHECK(root_array_available_);
   Push(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias));
 }
 
 
 void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
-  ASSERT(root_array_available_);
+  DCHECK(root_array_available_);
   cmpp(with, Operand(kRootRegister,
                      (index << kPointerSizeLog2) - kRootRegisterBias));
 }
@@ -196,8 +198,8 @@
 
 void MacroAssembler::CompareRoot(const Operand& with,
                                  Heap::RootListIndex index) {
-  ASSERT(root_array_available_);
-  ASSERT(!with.AddressUsesRegister(kScratchRegister));
+  DCHECK(root_array_available_);
+  DCHECK(!with.AddressUsesRegister(kScratchRegister));
   LoadRoot(kScratchRegister, index);
   cmpp(with, kScratchRegister);
 }
@@ -232,16 +234,15 @@
     ret(0);
     bind(&buffer_overflowed);
   } else {
-    ASSERT(and_then == kFallThroughAtEnd);
+    DCHECK(and_then == kFallThroughAtEnd);
     j(equal, &done, Label::kNear);
   }
-  StoreBufferOverflowStub store_buffer_overflow =
-      StoreBufferOverflowStub(isolate(), save_fp);
+  StoreBufferOverflowStub store_buffer_overflow(isolate(), save_fp);
   CallStub(&store_buffer_overflow);
   if (and_then == kReturnAtEnd) {
     ret(0);
   } else {
-    ASSERT(and_then == kFallThroughAtEnd);
+    DCHECK(and_then == kFallThroughAtEnd);
     bind(&done);
   }
 }
@@ -268,7 +269,7 @@
     cmpp(scratch, kScratchRegister);
     j(cc, branch, distance);
   } else {
-    ASSERT(kPointerSize == kInt64Size
+    DCHECK(kPointerSize == kInt64Size
         ? is_int32(static_cast<int64_t>(isolate()->heap()->NewSpaceMask()))
         : kPointerSize == kInt32Size);
     intptr_t new_space_start =
@@ -307,7 +308,7 @@
 
   // Although the object register is tagged, the offset is relative to the start
   // of the object, so so offset must be a multiple of kPointerSize.
-  ASSERT(IsAligned(offset, kPointerSize));
+  DCHECK(IsAligned(offset, kPointerSize));
 
   leap(dst, FieldOperand(object, offset));
   if (emit_debug_code()) {
@@ -372,10 +373,10 @@
                                        Register map,
                                        Register dst,
                                        SaveFPRegsMode fp_mode) {
-  ASSERT(!object.is(kScratchRegister));
-  ASSERT(!object.is(map));
-  ASSERT(!object.is(dst));
-  ASSERT(!map.is(dst));
+  DCHECK(!object.is(kScratchRegister));
+  DCHECK(!object.is(map));
+  DCHECK(!object.is(dst));
+  DCHECK(!map.is(dst));
   AssertNotSmi(object);
 
   if (emit_debug_code()) {
@@ -405,10 +406,6 @@
   // Compute the address.
   leap(dst, FieldOperand(object, HeapObject::kMapOffset));
 
-  // Count number of write barriers in generated code.
-  isolate()->counters()->write_barriers_static()->Increment();
-  IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
-
   // First, check if a write barrier is even needed. The tests below
   // catch stores of smis and stores into the young generation.
   Label done;
@@ -430,6 +427,10 @@
 
   bind(&done);
 
+  // Count number of write barriers in generated code.
+  isolate()->counters()->write_barriers_static()->Increment();
+  IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
+
   // Clobber clobbered registers when running with the debug-code flag
   // turned on to provoke errors.
   if (emit_debug_code()) {
@@ -447,9 +448,9 @@
     RememberedSetAction remembered_set_action,
     SmiCheck smi_check,
     PointersToHereCheck pointers_to_here_check_for_value) {
-  ASSERT(!object.is(value));
-  ASSERT(!object.is(address));
-  ASSERT(!value.is(address));
+  DCHECK(!object.is(value));
+  DCHECK(!object.is(address));
+  DCHECK(!value.is(address));
   AssertNotSmi(object);
 
   if (remembered_set_action == OMIT_REMEMBERED_SET &&
@@ -465,10 +466,6 @@
     bind(&ok);
   }
 
-  // Count number of write barriers in generated code.
-  isolate()->counters()->write_barriers_static()->Increment();
-  IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
-
   // First, check if a write barrier is even needed. The tests below
   // catch stores of smis and stores into the young generation.
   Label done;
@@ -500,6 +497,10 @@
 
   bind(&done);
 
+  // Count number of write barriers in generated code.
+  isolate()->counters()->write_barriers_static()->Increment();
+  IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
+
   // Clobber clobbered registers when running with the debug-code flag
   // turned on to provoke errors.
   if (emit_debug_code()) {
@@ -542,10 +543,10 @@
 
 
 void MacroAssembler::CheckStackAlignment() {
-  int frame_alignment = OS::ActivationFrameAlignment();
+  int frame_alignment = base::OS::ActivationFrameAlignment();
   int frame_alignment_mask = frame_alignment - 1;
   if (frame_alignment > kPointerSize) {
-    ASSERT(IsPowerOf2(frame_alignment));
+    DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
     Label alignment_as_expected;
     testp(rsp, Immediate(frame_alignment_mask));
     j(zero, &alignment_as_expected, Label::kNear);
@@ -600,7 +601,7 @@
 
 
 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
-  ASSERT(AllowThisStubCall(stub));  // Calls are not allowed in some stubs
+  DCHECK(AllowThisStubCall(stub));  // Calls are not allowed in some stubs
   Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
 }
 
@@ -611,7 +612,7 @@
 
 
 void MacroAssembler::StubReturn(int argc) {
-  ASSERT(argc >= 1 && generating_stub());
+  DCHECK(argc >= 1 && generating_stub());
   ret((argc - 1) * kPointerSize);
 }
 
@@ -625,7 +626,7 @@
   // The assert checks that the constants for the maximum number of digits
   // for an array index cached in the hash field and the number of bits
   // reserved for it does not conflict.
-  ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
+  DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
          (1 << String::kArrayIndexValueBits));
   if (!hash.is(index)) {
     movl(index, hash);
@@ -694,7 +695,7 @@
 static int Offset(ExternalReference ref0, ExternalReference ref1) {
   int64_t offset = (ref0.address() - ref1.address());
   // Check that fits into int.
-  ASSERT(static_cast<int>(offset) == offset);
+  DCHECK(static_cast<int>(offset) == offset);
   return static_cast<int>(offset);
 }
 
@@ -731,7 +732,7 @@
   ExternalReference scheduled_exception_address =
       ExternalReference::scheduled_exception_address(isolate());
 
-  ASSERT(rdx.is(function_address) || r8.is(function_address));
+  DCHECK(rdx.is(function_address) || r8.is(function_address));
   // Allocate HandleScope in callee-save registers.
   Register prev_next_address_reg = r14;
   Register prev_limit_reg = rbx;
@@ -843,7 +844,7 @@
   bind(&promote_scheduled_exception);
   {
     FrameScope frame(this, StackFrame::INTERNAL);
-    CallRuntime(Runtime::kHiddenPromoteScheduledException, 0);
+    CallRuntime(Runtime::kPromoteScheduledException, 0);
   }
   jmp(&exception_handled);
 
@@ -873,7 +874,7 @@
                                    InvokeFlag flag,
                                    const CallWrapper& call_wrapper) {
   // You can't call a builtin without a valid frame.
-  ASSERT(flag == JUMP_FUNCTION || has_frame());
+  DCHECK(flag == JUMP_FUNCTION || has_frame());
 
   // Rely on the assertion to check that the number of provided
   // arguments match the expected number of arguments. Fake a
@@ -895,7 +896,7 @@
 
 
 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
-  ASSERT(!target.is(rdi));
+  DCHECK(!target.is(rdi));
   // Load the JavaScript builtin function from the builtins object.
   GetBuiltinFunction(rdi, id);
   movp(target, FieldOperand(rdi, JSFunction::kCodeEntryOffset));
@@ -971,7 +972,7 @@
 
 
 void MacroAssembler::Load(Register dst, const Operand& src, Representation r) {
-  ASSERT(!r.IsDouble());
+  DCHECK(!r.IsDouble());
   if (r.IsInteger8()) {
     movsxbq(dst, src);
   } else if (r.IsUInteger8()) {
@@ -989,7 +990,7 @@
 
 
 void MacroAssembler::Store(const Operand& dst, Register src, Representation r) {
-  ASSERT(!r.IsDouble());
+  DCHECK(!r.IsDouble());
   if (r.IsInteger8() || r.IsUInteger8()) {
     movb(dst, src);
   } else if (r.IsInteger16() || r.IsUInteger16()) {
@@ -1044,7 +1045,7 @@
 
 
 void MacroAssembler::SafeMove(Register dst, Smi* src) {
-  ASSERT(!dst.is(kScratchRegister));
+  DCHECK(!dst.is(kScratchRegister));
   if (IsUnsafeInt(src->value()) && jit_cookie() != 0) {
     if (SmiValuesAre32Bits()) {
       // JIT cookie can be converted to Smi.
@@ -1052,7 +1053,7 @@
       Move(kScratchRegister, Smi::FromInt(jit_cookie()));
       xorp(dst, kScratchRegister);
     } else {
-      ASSERT(SmiValuesAre31Bits());
+      DCHECK(SmiValuesAre31Bits());
       int32_t value = static_cast<int32_t>(reinterpret_cast<intptr_t>(src));
       movp(dst, Immediate(value ^ jit_cookie()));
       xorp(dst, Immediate(jit_cookie()));
@@ -1071,7 +1072,7 @@
       Move(kScratchRegister, Smi::FromInt(jit_cookie()));
       xorp(Operand(rsp, 0), kScratchRegister);
     } else {
-      ASSERT(SmiValuesAre31Bits());
+      DCHECK(SmiValuesAre31Bits());
       int32_t value = static_cast<int32_t>(reinterpret_cast<intptr_t>(src));
       Push(Immediate(value ^ jit_cookie()));
       xorp(Operand(rsp, 0), Immediate(jit_cookie()));
@@ -1100,7 +1101,7 @@
   if (emit_debug_code()) {
     Move(dst, Smi::FromInt(kSmiConstantRegisterValue),
          Assembler::RelocInfoNone());
-    cmpq(dst, kSmiConstantRegister);
+    cmpp(dst, kSmiConstantRegister);
     Assert(equal, kUninitializedKSmiConstantRegister);
   }
   int value = source->value();
@@ -1171,10 +1172,10 @@
   }
 
   if (SmiValuesAre32Bits()) {
-    ASSERT(kSmiShift % kBitsPerByte == 0);
+    DCHECK(kSmiShift % kBitsPerByte == 0);
     movl(Operand(dst, kSmiShift / kBitsPerByte), src);
   } else {
-    ASSERT(SmiValuesAre31Bits());
+    DCHECK(SmiValuesAre31Bits());
     Integer32ToSmi(kScratchRegister, src);
     movp(dst, kScratchRegister);
   }
@@ -1202,7 +1203,7 @@
   if (SmiValuesAre32Bits()) {
     shrp(dst, Immediate(kSmiShift));
   } else {
-    ASSERT(SmiValuesAre31Bits());
+    DCHECK(SmiValuesAre31Bits());
     sarl(dst, Immediate(kSmiShift));
   }
 }
@@ -1212,7 +1213,7 @@
   if (SmiValuesAre32Bits()) {
     movl(dst, Operand(src, kSmiShift / kBitsPerByte));
   } else {
-    ASSERT(SmiValuesAre31Bits());
+    DCHECK(SmiValuesAre31Bits());
     movl(dst, src);
     sarl(dst, Immediate(kSmiShift));
   }
@@ -1236,7 +1237,7 @@
   if (SmiValuesAre32Bits()) {
     movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte));
   } else {
-    ASSERT(SmiValuesAre31Bits());
+    DCHECK(SmiValuesAre31Bits());
     movp(dst, src);
     SmiToInteger64(dst, dst);
   }
@@ -1263,7 +1264,7 @@
 
 
 void MacroAssembler::Cmp(Register dst, Smi* src) {
-  ASSERT(!dst.is(kScratchRegister));
+  DCHECK(!dst.is(kScratchRegister));
   if (src->value() == 0) {
     testp(dst, dst);
   } else {
@@ -1292,7 +1293,7 @@
   if (SmiValuesAre32Bits()) {
     cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value()));
   } else {
-    ASSERT(SmiValuesAre31Bits());
+    DCHECK(SmiValuesAre31Bits());
     cmpl(dst, Immediate(src));
   }
 }
@@ -1301,7 +1302,7 @@
 void MacroAssembler::Cmp(const Operand& dst, Smi* src) {
   // The Operand cannot use the smi register.
   Register smi_reg = GetSmiConstant(src);
-  ASSERT(!dst.AddressUsesRegister(smi_reg));
+  DCHECK(!dst.AddressUsesRegister(smi_reg));
   cmpp(dst, smi_reg);
 }
 
@@ -1310,7 +1311,7 @@
   if (SmiValuesAre32Bits()) {
     cmpl(Operand(dst, kSmiShift / kBitsPerByte), src);
   } else {
-    ASSERT(SmiValuesAre31Bits());
+    DCHECK(SmiValuesAre31Bits());
     SmiToInteger32(kScratchRegister, dst);
     cmpl(kScratchRegister, src);
   }
@@ -1320,8 +1321,8 @@
 void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst,
                                                            Register src,
                                                            int power) {
-  ASSERT(power >= 0);
-  ASSERT(power < 64);
+  DCHECK(power >= 0);
+  DCHECK(power < 64);
   if (power == 0) {
     SmiToInteger64(dst, src);
     return;
@@ -1340,7 +1341,7 @@
 void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst,
                                                          Register src,
                                                          int power) {
-  ASSERT((0 <= power) && (power < 32));
+  DCHECK((0 <= power) && (power < 32));
   if (dst.is(src)) {
     shrp(dst, Immediate(power + kSmiShift));
   } else {
@@ -1353,8 +1354,8 @@
                                  Label* on_not_smis,
                                  Label::Distance near_jump) {
   if (dst.is(src1) || dst.is(src2)) {
-    ASSERT(!src1.is(kScratchRegister));
-    ASSERT(!src2.is(kScratchRegister));
+    DCHECK(!src1.is(kScratchRegister));
+    DCHECK(!src2.is(kScratchRegister));
     movp(kScratchRegister, src1);
     orp(kScratchRegister, src2);
     JumpIfNotSmi(kScratchRegister, on_not_smis, near_jump);
@@ -1400,7 +1401,7 @@
     leal(kScratchRegister, Operand(first, second, times_1, 0));
     testb(kScratchRegister, Immediate(0x03));
   } else {
-    ASSERT(SmiValuesAre31Bits());
+    DCHECK(SmiValuesAre31Bits());
     movl(kScratchRegister, first);
     orl(kScratchRegister, second);
     testb(kScratchRegister, Immediate(kSmiTagMask));
@@ -1442,7 +1443,7 @@
 
 
 Condition MacroAssembler::CheckIsMinSmi(Register src) {
-  ASSERT(!src.is(kScratchRegister));
+  DCHECK(!src.is(kScratchRegister));
   // If we overflow by subtracting one, it's the minimal smi value.
   cmpp(src, kSmiConstantRegister);
   return overflow;
@@ -1454,7 +1455,7 @@
     // A 32-bit integer value can always be converted to a smi.
     return always;
   } else {
-    ASSERT(SmiValuesAre31Bits());
+    DCHECK(SmiValuesAre31Bits());
     cmpl(src, Immediate(0xc0000000));
     return positive;
   }
@@ -1468,7 +1469,7 @@
     testl(src, src);
     return positive;
   } else {
-    ASSERT(SmiValuesAre31Bits());
+    DCHECK(SmiValuesAre31Bits());
     testl(src, Immediate(0xc0000000));
     return zero;
   }
@@ -1586,7 +1587,7 @@
     }
     return;
   } else if (dst.is(src)) {
-    ASSERT(!dst.is(kScratchRegister));
+    DCHECK(!dst.is(kScratchRegister));
     switch (constant->value()) {
       case 1:
         addp(dst, kSmiConstantRegister);
@@ -1634,7 +1635,7 @@
       addl(Operand(dst, kSmiShift / kBitsPerByte),
            Immediate(constant->value()));
     } else {
-      ASSERT(SmiValuesAre31Bits());
+      DCHECK(SmiValuesAre31Bits());
       addp(dst, Immediate(constant));
     }
   }
@@ -1652,12 +1653,12 @@
       movp(dst, src);
     }
   } else if (dst.is(src)) {
-    ASSERT(!dst.is(kScratchRegister));
+    DCHECK(!dst.is(kScratchRegister));
     LoadSmiConstant(kScratchRegister, constant);
     addp(dst, kScratchRegister);
     if (mode.Contains(BAILOUT_ON_NO_OVERFLOW)) {
       j(no_overflow, bailout_label, near_jump);
-      ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER));
+      DCHECK(mode.Contains(PRESERVE_SOURCE_REGISTER));
       subp(dst, kScratchRegister);
     } else if (mode.Contains(BAILOUT_ON_OVERFLOW)) {
       if (mode.Contains(PRESERVE_SOURCE_REGISTER)) {
@@ -1674,8 +1675,8 @@
       CHECK(mode.IsEmpty());
     }
   } else {
-    ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER));
-    ASSERT(mode.Contains(BAILOUT_ON_OVERFLOW));
+    DCHECK(mode.Contains(PRESERVE_SOURCE_REGISTER));
+    DCHECK(mode.Contains(BAILOUT_ON_OVERFLOW));
     LoadSmiConstant(dst, constant);
     addp(dst, src);
     j(overflow, bailout_label, near_jump);
@@ -1689,7 +1690,7 @@
       movp(dst, src);
     }
   } else if (dst.is(src)) {
-    ASSERT(!dst.is(kScratchRegister));
+    DCHECK(!dst.is(kScratchRegister));
     Register constant_reg = GetSmiConstant(constant);
     subp(dst, constant_reg);
   } else {
@@ -1718,12 +1719,12 @@
       movp(dst, src);
     }
   } else if (dst.is(src)) {
-    ASSERT(!dst.is(kScratchRegister));
+    DCHECK(!dst.is(kScratchRegister));
     LoadSmiConstant(kScratchRegister, constant);
     subp(dst, kScratchRegister);
     if (mode.Contains(BAILOUT_ON_NO_OVERFLOW)) {
       j(no_overflow, bailout_label, near_jump);
-      ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER));
+      DCHECK(mode.Contains(PRESERVE_SOURCE_REGISTER));
       addp(dst, kScratchRegister);
     } else if (mode.Contains(BAILOUT_ON_OVERFLOW)) {
       if (mode.Contains(PRESERVE_SOURCE_REGISTER)) {
@@ -1740,10 +1741,10 @@
       CHECK(mode.IsEmpty());
     }
   } else {
-    ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER));
-    ASSERT(mode.Contains(BAILOUT_ON_OVERFLOW));
+    DCHECK(mode.Contains(PRESERVE_SOURCE_REGISTER));
+    DCHECK(mode.Contains(BAILOUT_ON_OVERFLOW));
     if (constant->value() == Smi::kMinValue) {
-      ASSERT(!dst.is(kScratchRegister));
+      DCHECK(!dst.is(kScratchRegister));
       movp(dst, src);
       LoadSmiConstant(kScratchRegister, constant);
       subp(dst, kScratchRegister);
@@ -1763,7 +1764,7 @@
                             Label* on_smi_result,
                             Label::Distance near_jump) {
   if (dst.is(src)) {
-    ASSERT(!dst.is(kScratchRegister));
+    DCHECK(!dst.is(kScratchRegister));
     movp(kScratchRegister, src);
     negp(dst);  // Low 32 bits are retained as zero by negation.
     // Test if result is zero or Smi::kMinValue.
@@ -1808,8 +1809,8 @@
                             Register src2,
                             Label* on_not_smi_result,
                             Label::Distance near_jump) {
-  ASSERT_NOT_NULL(on_not_smi_result);
-  ASSERT(!dst.is(src2));
+  DCHECK_NOT_NULL(on_not_smi_result);
+  DCHECK(!dst.is(src2));
   SmiAddHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump);
 }
 
@@ -1819,8 +1820,8 @@
                             const Operand& src2,
                             Label* on_not_smi_result,
                             Label::Distance near_jump) {
-  ASSERT_NOT_NULL(on_not_smi_result);
-  ASSERT(!src2.AddressUsesRegister(dst));
+  DCHECK_NOT_NULL(on_not_smi_result);
+  DCHECK(!src2.AddressUsesRegister(dst));
   SmiAddHelper<Operand>(this, dst, src1, src2, on_not_smi_result, near_jump);
 }
 
@@ -1872,8 +1873,8 @@
                             Register src2,
                             Label* on_not_smi_result,
                             Label::Distance near_jump) {
-  ASSERT_NOT_NULL(on_not_smi_result);
-  ASSERT(!dst.is(src2));
+  DCHECK_NOT_NULL(on_not_smi_result);
+  DCHECK(!dst.is(src2));
   SmiSubHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump);
 }
 
@@ -1883,8 +1884,8 @@
                             const Operand& src2,
                             Label* on_not_smi_result,
                             Label::Distance near_jump) {
-  ASSERT_NOT_NULL(on_not_smi_result);
-  ASSERT(!src2.AddressUsesRegister(dst));
+  DCHECK_NOT_NULL(on_not_smi_result);
+  DCHECK(!src2.AddressUsesRegister(dst));
   SmiSubHelper<Operand>(this, dst, src1, src2, on_not_smi_result, near_jump);
 }
 
@@ -1905,7 +1906,7 @@
 
 
 void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) {
-  ASSERT(!dst.is(src2));
+  DCHECK(!dst.is(src2));
   SmiSubNoOverflowHelper<Register>(this, dst, src1, src2);
 }
 
@@ -1922,10 +1923,10 @@
                             Register src2,
                             Label* on_not_smi_result,
                             Label::Distance near_jump) {
-  ASSERT(!dst.is(src2));
-  ASSERT(!dst.is(kScratchRegister));
-  ASSERT(!src1.is(kScratchRegister));
-  ASSERT(!src2.is(kScratchRegister));
+  DCHECK(!dst.is(src2));
+  DCHECK(!dst.is(kScratchRegister));
+  DCHECK(!src1.is(kScratchRegister));
+  DCHECK(!src2.is(kScratchRegister));
 
   if (dst.is(src1)) {
     Label failure, zero_correct_result;
@@ -1977,12 +1978,12 @@
                             Register src2,
                             Label* on_not_smi_result,
                             Label::Distance near_jump) {
-  ASSERT(!src1.is(kScratchRegister));
-  ASSERT(!src2.is(kScratchRegister));
-  ASSERT(!dst.is(kScratchRegister));
-  ASSERT(!src2.is(rax));
-  ASSERT(!src2.is(rdx));
-  ASSERT(!src1.is(rdx));
+  DCHECK(!src1.is(kScratchRegister));
+  DCHECK(!src2.is(kScratchRegister));
+  DCHECK(!dst.is(kScratchRegister));
+  DCHECK(!src2.is(rax));
+  DCHECK(!src2.is(rdx));
+  DCHECK(!src1.is(rdx));
 
   // Check for 0 divisor (result is +/-Infinity).
   testp(src2, src2);
@@ -2040,13 +2041,13 @@
                             Register src2,
                             Label* on_not_smi_result,
                             Label::Distance near_jump) {
-  ASSERT(!dst.is(kScratchRegister));
-  ASSERT(!src1.is(kScratchRegister));
-  ASSERT(!src2.is(kScratchRegister));
-  ASSERT(!src2.is(rax));
-  ASSERT(!src2.is(rdx));
-  ASSERT(!src1.is(rdx));
-  ASSERT(!src1.is(src2));
+  DCHECK(!dst.is(kScratchRegister));
+  DCHECK(!src1.is(kScratchRegister));
+  DCHECK(!src2.is(kScratchRegister));
+  DCHECK(!src2.is(rax));
+  DCHECK(!src2.is(rdx));
+  DCHECK(!src1.is(rdx));
+  DCHECK(!src1.is(src2));
 
   testp(src2, src2);
   j(zero, on_not_smi_result, near_jump);
@@ -2092,14 +2093,14 @@
 
 
 void MacroAssembler::SmiNot(Register dst, Register src) {
-  ASSERT(!dst.is(kScratchRegister));
-  ASSERT(!src.is(kScratchRegister));
+  DCHECK(!dst.is(kScratchRegister));
+  DCHECK(!src.is(kScratchRegister));
   if (SmiValuesAre32Bits()) {
     // Set tag and padding bits before negating, so that they are zero
     // afterwards.
     movl(kScratchRegister, Immediate(~0));
   } else {
-    ASSERT(SmiValuesAre31Bits());
+    DCHECK(SmiValuesAre31Bits());
     movl(kScratchRegister, Immediate(1));
   }
   if (dst.is(src)) {
@@ -2112,7 +2113,7 @@
 
 
 void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) {
-  ASSERT(!dst.is(src2));
+  DCHECK(!dst.is(src2));
   if (!dst.is(src1)) {
     movp(dst, src1);
   }
@@ -2124,7 +2125,7 @@
   if (constant->value() == 0) {
     Set(dst, 0);
   } else if (dst.is(src)) {
-    ASSERT(!dst.is(kScratchRegister));
+    DCHECK(!dst.is(kScratchRegister));
     Register constant_reg = GetSmiConstant(constant);
     andp(dst, constant_reg);
   } else {
@@ -2136,7 +2137,7 @@
 
 void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) {
   if (!dst.is(src1)) {
-    ASSERT(!src1.is(src2));
+    DCHECK(!src1.is(src2));
     movp(dst, src1);
   }
   orp(dst, src2);
@@ -2145,7 +2146,7 @@
 
 void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) {
   if (dst.is(src)) {
-    ASSERT(!dst.is(kScratchRegister));
+    DCHECK(!dst.is(kScratchRegister));
     Register constant_reg = GetSmiConstant(constant);
     orp(dst, constant_reg);
   } else {
@@ -2157,7 +2158,7 @@
 
 void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) {
   if (!dst.is(src1)) {
-    ASSERT(!src1.is(src2));
+    DCHECK(!src1.is(src2));
     movp(dst, src1);
   }
   xorp(dst, src2);
@@ -2166,7 +2167,7 @@
 
 void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) {
   if (dst.is(src)) {
-    ASSERT(!dst.is(kScratchRegister));
+    DCHECK(!dst.is(kScratchRegister));
     Register constant_reg = GetSmiConstant(constant);
     xorp(dst, constant_reg);
   } else {
@@ -2179,7 +2180,7 @@
 void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst,
                                                      Register src,
                                                      int shift_value) {
-  ASSERT(is_uint5(shift_value));
+  DCHECK(is_uint5(shift_value));
   if (shift_value > 0) {
     if (dst.is(src)) {
       sarp(dst, Immediate(shift_value + kSmiShift));
@@ -2205,7 +2206,7 @@
       shlq(dst, Immediate(shift_value & 0x1f));
     }
   } else {
-    ASSERT(SmiValuesAre31Bits());
+    DCHECK(SmiValuesAre31Bits());
     if (dst.is(src)) {
       UNIMPLEMENTED();  // Not used.
     } else {
@@ -2234,7 +2235,7 @@
       shrp(dst, Immediate(shift_value + kSmiShift));
       shlp(dst, Immediate(kSmiShift));
     } else {
-      ASSERT(SmiValuesAre31Bits());
+      DCHECK(SmiValuesAre31Bits());
       SmiToInteger32(dst, src);
       shrp(dst, Immediate(shift_value));
       JumpIfUIntNotValidSmiValue(dst, on_not_smi_result, near_jump);
@@ -2250,7 +2251,7 @@
                                   Label* on_not_smi_result,
                                   Label::Distance near_jump) {
   if (SmiValuesAre32Bits()) {
-    ASSERT(!dst.is(rcx));
+    DCHECK(!dst.is(rcx));
     if (!dst.is(src1)) {
       movp(dst, src1);
     }
@@ -2260,12 +2261,12 @@
     andp(rcx, Immediate(0x1f));
     shlq_cl(dst);
   } else {
-    ASSERT(SmiValuesAre31Bits());
-    ASSERT(!dst.is(kScratchRegister));
-    ASSERT(!src1.is(kScratchRegister));
-    ASSERT(!src2.is(kScratchRegister));
-    ASSERT(!dst.is(src2));
-    ASSERT(!dst.is(rcx));
+    DCHECK(SmiValuesAre31Bits());
+    DCHECK(!dst.is(kScratchRegister));
+    DCHECK(!src1.is(kScratchRegister));
+    DCHECK(!src2.is(kScratchRegister));
+    DCHECK(!dst.is(src2));
+    DCHECK(!dst.is(rcx));
 
     if (src1.is(rcx) || src2.is(rcx)) {
       movq(kScratchRegister, rcx);
@@ -2300,11 +2301,11 @@
                                           Register src2,
                                           Label* on_not_smi_result,
                                           Label::Distance near_jump) {
-  ASSERT(!dst.is(kScratchRegister));
-  ASSERT(!src1.is(kScratchRegister));
-  ASSERT(!src2.is(kScratchRegister));
-  ASSERT(!dst.is(src2));
-  ASSERT(!dst.is(rcx));
+  DCHECK(!dst.is(kScratchRegister));
+  DCHECK(!src1.is(kScratchRegister));
+  DCHECK(!src2.is(kScratchRegister));
+  DCHECK(!dst.is(src2));
+  DCHECK(!dst.is(rcx));
   if (src1.is(rcx) || src2.is(rcx)) {
     movq(kScratchRegister, rcx);
   }
@@ -2335,10 +2336,10 @@
 void MacroAssembler::SmiShiftArithmeticRight(Register dst,
                                              Register src1,
                                              Register src2) {
-  ASSERT(!dst.is(kScratchRegister));
-  ASSERT(!src1.is(kScratchRegister));
-  ASSERT(!src2.is(kScratchRegister));
-  ASSERT(!dst.is(rcx));
+  DCHECK(!dst.is(kScratchRegister));
+  DCHECK(!src1.is(kScratchRegister));
+  DCHECK(!src2.is(kScratchRegister));
+  DCHECK(!dst.is(rcx));
 
   SmiToInteger32(rcx, src2);
   if (!dst.is(src1)) {
@@ -2355,18 +2356,18 @@
                                   Register src2,
                                   Label* on_not_smis,
                                   Label::Distance near_jump) {
-  ASSERT(!dst.is(kScratchRegister));
-  ASSERT(!src1.is(kScratchRegister));
-  ASSERT(!src2.is(kScratchRegister));
-  ASSERT(!dst.is(src1));
-  ASSERT(!dst.is(src2));
+  DCHECK(!dst.is(kScratchRegister));
+  DCHECK(!src1.is(kScratchRegister));
+  DCHECK(!src2.is(kScratchRegister));
+  DCHECK(!dst.is(src1));
+  DCHECK(!dst.is(src2));
   // Both operands must not be smis.
 #ifdef DEBUG
   Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2));
   Check(not_both_smis, kBothRegistersWereSmisInSelectNonSmi);
 #endif
   STATIC_ASSERT(kSmiTag == 0);
-  ASSERT_EQ(0, Smi::FromInt(0));
+  DCHECK_EQ(0, Smi::FromInt(0));
   movl(kScratchRegister, Immediate(kSmiTagMask));
   andp(kScratchRegister, src1);
   testl(kScratchRegister, src2);
@@ -2374,7 +2375,7 @@
   j(not_zero, on_not_smis, near_jump);
 
   // Exactly one operand is a smi.
-  ASSERT_EQ(1, static_cast<int>(kSmiTagMask));
+  DCHECK_EQ(1, static_cast<int>(kSmiTagMask));
   // kScratchRegister still holds src1 & kSmiTag, which is either zero or one.
   subp(kScratchRegister, Immediate(1));
   // If src1 is a smi, then scratch register all 1s, else it is all 0s.
@@ -2391,7 +2392,7 @@
                                     Register src,
                                     int shift) {
   if (SmiValuesAre32Bits()) {
-    ASSERT(is_uint6(shift));
+    DCHECK(is_uint6(shift));
     // There is a possible optimization if shift is in the range 60-63, but that
     // will (and must) never happen.
     if (!dst.is(src)) {
@@ -2404,8 +2405,8 @@
     }
     return SmiIndex(dst, times_1);
   } else {
-    ASSERT(SmiValuesAre31Bits());
-    ASSERT(shift >= times_1 && shift <= (static_cast<int>(times_8) + 1));
+    DCHECK(SmiValuesAre31Bits());
+    DCHECK(shift >= times_1 && shift <= (static_cast<int>(times_8) + 1));
     if (!dst.is(src)) {
       movp(dst, src);
     }
@@ -2426,7 +2427,7 @@
                                             int shift) {
   if (SmiValuesAre32Bits()) {
     // Register src holds a positive smi.
-    ASSERT(is_uint6(shift));
+    DCHECK(is_uint6(shift));
     if (!dst.is(src)) {
       movp(dst, src);
     }
@@ -2438,8 +2439,8 @@
     }
     return SmiIndex(dst, times_1);
   } else {
-    ASSERT(SmiValuesAre31Bits());
-    ASSERT(shift >= times_1 && shift <= (static_cast<int>(times_8) + 1));
+    DCHECK(SmiValuesAre31Bits());
+    DCHECK(shift >= times_1 && shift <= (static_cast<int>(times_8) + 1));
     if (!dst.is(src)) {
       movp(dst, src);
     }
@@ -2455,10 +2456,10 @@
 
 void MacroAssembler::AddSmiField(Register dst, const Operand& src) {
   if (SmiValuesAre32Bits()) {
-    ASSERT_EQ(0, kSmiShift % kBitsPerByte);
+    DCHECK_EQ(0, kSmiShift % kBitsPerByte);
     addl(dst, Operand(src, kSmiShift / kBitsPerByte));
   } else {
-    ASSERT(SmiValuesAre31Bits());
+    DCHECK(SmiValuesAre31Bits());
     SmiToInteger32(kScratchRegister, src);
     addl(dst, kScratchRegister);
   }
@@ -2477,7 +2478,7 @@
 
 
 void MacroAssembler::PushRegisterAsTwoSmis(Register src, Register scratch) {
-  ASSERT(!src.is(scratch));
+  DCHECK(!src.is(scratch));
   movp(scratch, src);
   // High bits.
   shrp(src, Immediate(kPointerSize * kBitsPerByte - kSmiShift));
@@ -2490,7 +2491,7 @@
 
 
 void MacroAssembler::PopRegisterAsTwoSmis(Register dst, Register scratch) {
-  ASSERT(!dst.is(scratch));
+  DCHECK(!dst.is(scratch));
   Pop(scratch);
   // Low bits.
   shrp(scratch, Immediate(kSmiShift));
@@ -2506,7 +2507,7 @@
   if (SmiValuesAre32Bits()) {
     testl(Operand(src, kIntSize), Immediate(source->value()));
   } else {
-    ASSERT(SmiValuesAre31Bits());
+    DCHECK(SmiValuesAre31Bits());
     testl(src, Immediate(source));
   }
 }
@@ -2610,13 +2611,9 @@
 }
 
 
-void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(
-    Register first_object,
-    Register second_object,
-    Register scratch1,
-    Register scratch2,
-    Label* on_fail,
-    Label::Distance near_jump) {
+void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(
+    Register first_object, Register second_object, Register scratch1,
+    Register scratch2, Label* on_fail, Label::Distance near_jump) {
   // Check that both objects are not smis.
   Condition either_smi = CheckEitherSmi(first_object, second_object);
   j(either_smi, on_fail, near_jump);
@@ -2627,67 +2624,62 @@
   movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
   movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
 
-  // Check that both are flat ASCII strings.
-  ASSERT(kNotStringTag != 0);
-  const int kFlatAsciiStringMask =
+  // Check that both are flat one-byte strings.
+  DCHECK(kNotStringTag != 0);
+  const int kFlatOneByteStringMask =
       kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
-  const int kFlatAsciiStringTag =
+  const int kFlatOneByteStringTag =
       kStringTag | kOneByteStringTag | kSeqStringTag;
 
-  andl(scratch1, Immediate(kFlatAsciiStringMask));
-  andl(scratch2, Immediate(kFlatAsciiStringMask));
+  andl(scratch1, Immediate(kFlatOneByteStringMask));
+  andl(scratch2, Immediate(kFlatOneByteStringMask));
   // Interleave the bits to check both scratch1 and scratch2 in one test.
-  ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
+  DCHECK_EQ(0, kFlatOneByteStringMask & (kFlatOneByteStringMask << 3));
   leap(scratch1, Operand(scratch1, scratch2, times_8, 0));
   cmpl(scratch1,
-       Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
+       Immediate(kFlatOneByteStringTag + (kFlatOneByteStringTag << 3)));
   j(not_equal, on_fail, near_jump);
 }
 
 
-void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
-    Register instance_type,
-    Register scratch,
-    Label* failure,
+void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(
+    Register instance_type, Register scratch, Label* failure,
     Label::Distance near_jump) {
   if (!scratch.is(instance_type)) {
     movl(scratch, instance_type);
   }
 
-  const int kFlatAsciiStringMask =
+  const int kFlatOneByteStringMask =
       kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
 
-  andl(scratch, Immediate(kFlatAsciiStringMask));
+  andl(scratch, Immediate(kFlatOneByteStringMask));
   cmpl(scratch, Immediate(kStringTag | kSeqStringTag | kOneByteStringTag));
   j(not_equal, failure, near_jump);
 }
 
 
-void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
-    Register first_object_instance_type,
-    Register second_object_instance_type,
-    Register scratch1,
-    Register scratch2,
-    Label* on_fail,
+void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
+    Register first_object_instance_type, Register second_object_instance_type,
+    Register scratch1, Register scratch2, Label* on_fail,
     Label::Distance near_jump) {
   // Load instance type for both strings.
   movp(scratch1, first_object_instance_type);
   movp(scratch2, second_object_instance_type);
 
-  // Check that both are flat ASCII strings.
-  ASSERT(kNotStringTag != 0);
-  const int kFlatAsciiStringMask =
+  // Check that both are flat one-byte strings.
+  DCHECK(kNotStringTag != 0);
+  const int kFlatOneByteStringMask =
       kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
-  const int kFlatAsciiStringTag =
+  const int kFlatOneByteStringTag =
       kStringTag | kOneByteStringTag | kSeqStringTag;
 
-  andl(scratch1, Immediate(kFlatAsciiStringMask));
-  andl(scratch2, Immediate(kFlatAsciiStringMask));
+  andl(scratch1, Immediate(kFlatOneByteStringMask));
+  andl(scratch2, Immediate(kFlatOneByteStringMask));
   // Interleave the bits to check both scratch1 and scratch2 in one test.
-  ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
+  DCHECK_EQ(0, kFlatOneByteStringMask & (kFlatOneByteStringMask << 3));
   leap(scratch1, Operand(scratch1, scratch2, times_8, 0));
   cmpl(scratch1,
-       Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3)));
+       Immediate(kFlatOneByteStringTag + (kFlatOneByteStringTag << 3)));
   j(not_equal, on_fail, near_jump);
 }
 
@@ -2709,16 +2701,16 @@
 }
 
 
-void MacroAssembler::JumpIfNotUniqueName(Operand operand,
-                                         Label* not_unique_name,
-                                         Label::Distance distance) {
+void MacroAssembler::JumpIfNotUniqueNameInstanceType(Operand operand,
+                                                     Label* not_unique_name,
+                                                     Label::Distance distance) {
   JumpIfNotUniqueNameHelper<Operand>(this, operand, not_unique_name, distance);
 }
 
 
-void MacroAssembler::JumpIfNotUniqueName(Register reg,
-                                         Label* not_unique_name,
-                                         Label::Distance distance) {
+void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
+                                                     Label* not_unique_name,
+                                                     Label::Distance distance) {
   JumpIfNotUniqueNameHelper<Register>(this, reg, not_unique_name, distance);
 }
 
@@ -2787,7 +2779,7 @@
 void MacroAssembler::MoveHeapObject(Register result,
                                     Handle<Object> object) {
   AllowDeferredHandleDereference using_raw_address;
-  ASSERT(object->IsHeapObject());
+  DCHECK(object->IsHeapObject());
   if (isolate()->heap()->InNewSpace(*object)) {
     Handle<Cell> cell = isolate()->factory()->NewCell(object);
     Move(result, cell, RelocInfo::CELL);
@@ -2818,7 +2810,7 @@
 
 void MacroAssembler::DropUnderReturnAddress(int stack_elements,
                                             Register scratch) {
-  ASSERT(stack_elements > 0);
+  DCHECK(stack_elements > 0);
   if (kPointerSize == kInt64Size && stack_elements == 1) {
     popq(MemOperand(rsp, 0));
     return;
@@ -2835,7 +2827,7 @@
     pushq(src);
   } else {
     // x32 uses 64-bit push for rbp in the prologue.
-    ASSERT(src.code() != rbp.code());
+    DCHECK(src.code() != rbp.code());
     leal(rsp, Operand(rsp, -4));
     movp(Operand(rsp, 0), src);
   }
@@ -2888,7 +2880,7 @@
     popq(dst);
   } else {
     // x32 uses 64-bit pop for rbp in the epilogue.
-    ASSERT(dst.code() != rbp.code());
+    DCHECK(dst.code() != rbp.code());
     movp(dst, Operand(rsp, 0));
     leal(rsp, Operand(rsp, 4));
   }
@@ -2927,7 +2919,7 @@
 void MacroAssembler::LoadSharedFunctionInfoSpecialField(Register dst,
                                                         Register base,
                                                         int offset) {
-  ASSERT(offset > SharedFunctionInfo::kLengthOffset &&
+  DCHECK(offset > SharedFunctionInfo::kLengthOffset &&
          offset <= SharedFunctionInfo::kSize &&
          (((offset - SharedFunctionInfo::kLengthOffset) / kIntSize) % 2 == 1));
   if (kPointerSize == kInt64Size) {
@@ -2942,7 +2934,7 @@
 void MacroAssembler::TestBitSharedFunctionInfoSpecialField(Register base,
                                                            int offset,
                                                            int bits) {
-  ASSERT(offset > SharedFunctionInfo::kLengthOffset &&
+  DCHECK(offset > SharedFunctionInfo::kLengthOffset &&
          offset <= SharedFunctionInfo::kSize &&
          (((offset - SharedFunctionInfo::kLengthOffset) / kIntSize) % 2 == 1));
   if (kPointerSize == kInt32Size) {
@@ -3030,7 +3022,7 @@
 #ifdef DEBUG
   int end_position = pc_offset() + CallSize(code_object);
 #endif
-  ASSERT(RelocInfo::IsCodeTarget(rmode) ||
+  DCHECK(RelocInfo::IsCodeTarget(rmode) ||
       rmode == RelocInfo::CODE_AGE_SEQUENCE);
   call(code_object, rmode, ast_id);
 #ifdef DEBUG
@@ -3394,8 +3386,9 @@
   bind(&is_nan);
   // Convert all NaNs to the same canonical NaN value when they are stored in
   // the double array.
-  Set(kScratchRegister, BitCast<uint64_t>(
-      FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
+  Set(kScratchRegister,
+      bit_cast<uint64_t>(
+          FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
   movq(xmm_scratch, kScratchRegister);
   jmp(&have_double_value, Label::kNear);
 
@@ -3524,17 +3517,16 @@
 }
 
 
-void MacroAssembler::DoubleToI(Register result_reg,
-                               XMMRegister input_reg,
+void MacroAssembler::DoubleToI(Register result_reg, XMMRegister input_reg,
                                XMMRegister scratch,
                                MinusZeroMode minus_zero_mode,
-                               Label* conversion_failed,
-                               Label::Distance dst) {
+                               Label* lost_precision, Label* is_nan,
+                               Label* minus_zero, Label::Distance dst) {
   cvttsd2si(result_reg, input_reg);
   Cvtlsi2sd(xmm0, result_reg);
   ucomisd(xmm0, input_reg);
-  j(not_equal, conversion_failed, dst);
-  j(parity_even, conversion_failed, dst);  // NaN.
+  j(not_equal, lost_precision, dst);
+  j(parity_even, is_nan, dst);  // NaN.
   if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
     Label done;
     // The integer converted back is equal to the original. We
@@ -3544,47 +3536,14 @@
     movmskpd(result_reg, input_reg);
     // Bit 0 contains the sign of the double in input_reg.
     // If input was positive, we are ok and return 0, otherwise
-    // jump to conversion_failed.
+    // jump to minus_zero.
     andl(result_reg, Immediate(1));
-    j(not_zero, conversion_failed, dst);
+    j(not_zero, minus_zero, dst);
     bind(&done);
   }
 }
 
 
-void MacroAssembler::TaggedToI(Register result_reg,
-                               Register input_reg,
-                               XMMRegister temp,
-                               MinusZeroMode minus_zero_mode,
-                               Label* lost_precision,
-                               Label::Distance dst) {
-  Label done;
-  ASSERT(!temp.is(xmm0));
-
-  // Heap number map check.
-  CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset),
-              Heap::kHeapNumberMapRootIndex);
-  j(not_equal, lost_precision, dst);
-
-  movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset));
-  cvttsd2si(result_reg, xmm0);
-  Cvtlsi2sd(temp, result_reg);
-  ucomisd(xmm0, temp);
-  RecordComment("Deferred TaggedToI: lost precision");
-  j(not_equal, lost_precision, dst);
-  RecordComment("Deferred TaggedToI: NaN");
-  j(parity_even, lost_precision, dst);  // NaN.
-  if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
-    testl(result_reg, result_reg);
-    j(not_zero, &done, Label::kNear);
-    movmskpd(result_reg, xmm0);
-    andl(result_reg, Immediate(1));
-    j(not_zero, lost_precision, dst);
-  }
-  bind(&done);
-}
-
-
 void MacroAssembler::LoadInstanceDescriptors(Register map,
                                              Register descriptors) {
   movp(descriptors, FieldOperand(map, Map::kDescriptorsOffset));
@@ -3660,7 +3619,7 @@
 
 void MacroAssembler::AssertZeroExtended(Register int32_register) {
   if (emit_debug_code()) {
-    ASSERT(!int32_register.is(kScratchRegister));
+    DCHECK(!int32_register.is(kScratchRegister));
     movq(kScratchRegister, V8_INT64_C(0x0000000100000000));
     cmpq(kScratchRegister, int32_register);
     Check(above_equal, k32BitValueInRegisterIsNotZeroExtended);
@@ -3711,7 +3670,7 @@
                                      Heap::RootListIndex root_value_index,
                                      BailoutReason reason) {
   if (emit_debug_code()) {
-    ASSERT(!src.is(kScratchRegister));
+    DCHECK(!src.is(kScratchRegister));
     LoadRoot(kScratchRegister, root_value_index);
     cmpp(src, kScratchRegister);
     Check(equal, reason);
@@ -3745,15 +3704,16 @@
                                              Register result,
                                              Label* miss,
                                              bool miss_on_bound_function) {
-  // Check that the receiver isn't a smi.
-  testl(function, Immediate(kSmiTagMask));
-  j(zero, miss);
-
-  // Check that the function really is a function.
-  CmpObjectType(function, JS_FUNCTION_TYPE, result);
-  j(not_equal, miss);
-
+  Label non_instance;
   if (miss_on_bound_function) {
+    // Check that the receiver isn't a smi.
+    testl(function, Immediate(kSmiTagMask));
+    j(zero, miss);
+
+    // Check that the function really is a function.
+    CmpObjectType(function, JS_FUNCTION_TYPE, result);
+    j(not_equal, miss);
+
     movp(kScratchRegister,
          FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
     // It's not smi-tagged (stored in the top half of a smi-tagged 8-byte
@@ -3762,13 +3722,12 @@
         SharedFunctionInfo::kCompilerHintsOffset,
         SharedFunctionInfo::kBoundFunction);
     j(not_zero, miss);
-  }
 
-  // Make sure that the function has an instance prototype.
-  Label non_instance;
-  testb(FieldOperand(result, Map::kBitFieldOffset),
-        Immediate(1 << Map::kHasNonInstancePrototype));
-  j(not_zero, &non_instance, Label::kNear);
+    // Make sure that the function has an instance prototype.
+    testb(FieldOperand(result, Map::kBitFieldOffset),
+          Immediate(1 << Map::kHasNonInstancePrototype));
+    j(not_zero, &non_instance, Label::kNear);
+  }
 
   // Get the prototype or initial map from the function.
   movp(result,
@@ -3787,12 +3746,15 @@
 
   // Get the prototype from the initial map.
   movp(result, FieldOperand(result, Map::kPrototypeOffset));
-  jmp(&done, Label::kNear);
 
-  // Non-instance prototype: Fetch prototype from constructor field
-  // in initial map.
-  bind(&non_instance);
-  movp(result, FieldOperand(result, Map::kConstructorOffset));
+  if (miss_on_bound_function) {
+    jmp(&done, Label::kNear);
+
+    // Non-instance prototype: Fetch prototype from constructor field
+    // in initial map.
+    bind(&non_instance);
+    movp(result, FieldOperand(result, Map::kConstructorOffset));
+  }
 
   // All done.
   bind(&done);
@@ -3808,7 +3770,7 @@
 
 
 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
-  ASSERT(value > 0);
+  DCHECK(value > 0);
   if (FLAG_native_code_counters && counter->Enabled()) {
     Operand counter_operand = ExternalOperand(ExternalReference(counter));
     if (value == 1) {
@@ -3821,7 +3783,7 @@
 
 
 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
-  ASSERT(value > 0);
+  DCHECK(value > 0);
   if (FLAG_native_code_counters && counter->Enabled()) {
     Operand counter_operand = ExternalOperand(ExternalReference(counter));
     if (value == 1) {
@@ -3837,7 +3799,7 @@
   Set(rax, 0);  // No arguments.
   LoadAddress(rbx, ExternalReference(Runtime::kDebugBreak, isolate()));
   CEntryStub ces(isolate(), 1);
-  ASSERT(AllowThisStubCall(&ces));
+  DCHECK(AllowThisStubCall(&ces));
   Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
 }
 
@@ -3848,7 +3810,7 @@
                                 InvokeFlag flag,
                                 const CallWrapper& call_wrapper) {
   // You can't call a function without a valid frame.
-  ASSERT(flag == JUMP_FUNCTION || has_frame());
+  DCHECK(flag == JUMP_FUNCTION || has_frame());
 
   Label done;
   bool definitely_mismatches = false;
@@ -3867,7 +3829,7 @@
       call(code);
       call_wrapper.AfterCall();
     } else {
-      ASSERT(flag == JUMP_FUNCTION);
+      DCHECK(flag == JUMP_FUNCTION);
       jmp(code);
     }
     bind(&done);
@@ -3880,9 +3842,9 @@
                                     InvokeFlag flag,
                                     const CallWrapper& call_wrapper) {
   // You can't call a function without a valid frame.
-  ASSERT(flag == JUMP_FUNCTION || has_frame());
+  DCHECK(flag == JUMP_FUNCTION || has_frame());
 
-  ASSERT(function.is(rdi));
+  DCHECK(function.is(rdi));
   movp(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
   movp(rsi, FieldOperand(function, JSFunction::kContextOffset));
   LoadSharedFunctionInfoSpecialField(rbx, rdx,
@@ -3902,9 +3864,9 @@
                                     InvokeFlag flag,
                                     const CallWrapper& call_wrapper) {
   // You can't call a function without a valid frame.
-  ASSERT(flag == JUMP_FUNCTION || has_frame());
+  DCHECK(flag == JUMP_FUNCTION || has_frame());
 
-  ASSERT(function.is(rdi));
+  DCHECK(function.is(rdi));
   movp(rsi, FieldOperand(function, JSFunction::kContextOffset));
   // Advances rdx to the end of the Code object header, to the start of
   // the executable code.
@@ -3937,7 +3899,7 @@
   *definitely_mismatches = false;
   Label invoke;
   if (expected.is_immediate()) {
-    ASSERT(actual.is_immediate());
+    DCHECK(actual.is_immediate());
     if (expected.immediate() == actual.immediate()) {
       definitely_matches = true;
     } else {
@@ -3961,15 +3923,15 @@
       // IC mechanism.
       cmpp(expected.reg(), Immediate(actual.immediate()));
       j(equal, &invoke, Label::kNear);
-      ASSERT(expected.reg().is(rbx));
+      DCHECK(expected.reg().is(rbx));
       Set(rax, actual.immediate());
     } else if (!expected.reg().is(actual.reg())) {
       // Both expected and actual are in (different) registers. This
       // is the case when we invoke functions using call and apply.
       cmpp(expected.reg(), actual.reg());
       j(equal, &invoke, Label::kNear);
-      ASSERT(actual.reg().is(rax));
-      ASSERT(expected.reg().is(rbx));
+      DCHECK(actual.reg().is(rax));
+      DCHECK(expected.reg().is(rbx));
     }
   }
 
@@ -4053,15 +4015,15 @@
 void MacroAssembler::EnterExitFramePrologue(bool save_rax) {
   // Set up the frame structure on the stack.
   // All constants are relative to the frame pointer of the exit frame.
-  ASSERT(ExitFrameConstants::kCallerSPDisplacement ==
+  DCHECK(ExitFrameConstants::kCallerSPDisplacement ==
          kFPOnStackSize + kPCOnStackSize);
-  ASSERT(ExitFrameConstants::kCallerPCOffset == kFPOnStackSize);
-  ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
+  DCHECK(ExitFrameConstants::kCallerPCOffset == kFPOnStackSize);
+  DCHECK(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize);
   pushq(rbp);
   movp(rbp, rsp);
 
   // Reserve room for entry stack pointer and push the code object.
-  ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
+  DCHECK(ExitFrameConstants::kSPOffset == -1 * kPointerSize);
   Push(Immediate(0));  // Saved entry sp, patched before call.
   Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT);
   Push(kScratchRegister);  // Accessed from EditFrame::code_slot.
@@ -4097,10 +4059,10 @@
   }
 
   // Get the required frame alignment for the OS.
-  const int kFrameAlignment = OS::ActivationFrameAlignment();
+  const int kFrameAlignment = base::OS::ActivationFrameAlignment();
   if (kFrameAlignment > 0) {
-    ASSERT(IsPowerOf2(kFrameAlignment));
-    ASSERT(is_int8(kFrameAlignment));
+    DCHECK(base::bits::IsPowerOfTwo32(kFrameAlignment));
+    DCHECK(is_int8(kFrameAlignment));
     andp(rsp, Immediate(-kFrameAlignment));
   }
 
@@ -4183,8 +4145,8 @@
                                             Label* miss) {
   Label same_contexts;
 
-  ASSERT(!holder_reg.is(scratch));
-  ASSERT(!scratch.is(kScratchRegister));
+  DCHECK(!holder_reg.is(scratch));
+  DCHECK(!scratch.is(kScratchRegister));
   // Load current lexical context from the stack frame.
   movp(scratch, Operand(rbp, StandardFrameConstants::kContextOffset));
 
@@ -4244,7 +4206,7 @@
 
 
 // Compute the hash code from the untagged key.  This must be kept in sync with
-// ComputeIntegerHash in utils.h and KeyedLoadGenericElementStub in
+// ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
 // code-stub-hydrogen.cc
 void MacroAssembler::GetNumberHash(Register r0, Register scratch) {
   // First of all we assign the hash seed to scratch.
@@ -4330,7 +4292,7 @@
     andp(r2, r1);
 
     // Scale the index by multiplying by the entry size.
-    ASSERT(SeededNumberDictionary::kEntrySize == 3);
+    DCHECK(SeededNumberDictionary::kEntrySize == 3);
     leap(r2, Operand(r2, r2, times_2, 0));  // r2 = r2 * 3
 
     // Check if the key matches.
@@ -4349,7 +4311,7 @@
   // Check that the value is a normal propety.
   const int kDetailsOffset =
       SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
-  ASSERT_EQ(NORMAL, 0);
+  DCHECK_EQ(NORMAL, 0);
   Test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
        Smi::FromInt(PropertyDetails::TypeField::kMask));
   j(not_zero, miss);
@@ -4370,7 +4332,7 @@
   // Just return if allocation top is already known.
   if ((flags & RESULT_CONTAINS_TOP) != 0) {
     // No use of scratch if allocation top is provided.
-    ASSERT(!scratch.is_valid());
+    DCHECK(!scratch.is_valid());
 #ifdef DEBUG
     // Assert that result actually contains top on entry.
     Operand top_operand = ExternalOperand(allocation_top);
@@ -4403,12 +4365,12 @@
   } else {
     // Align the next allocation. Storing the filler map without checking top
     // is safe in new-space because the limit of the heap is aligned there.
-    ASSERT(kPointerSize * 2 == kDoubleSize);
-    ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
-    ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
+    DCHECK(kPointerSize * 2 == kDoubleSize);
+    DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
+    DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
     // Make sure scratch is not clobbered by this function as it might be
     // used in UpdateAllocationTopHelper later.
-    ASSERT(!scratch.is(kScratchRegister));
+    DCHECK(!scratch.is(kScratchRegister));
     Label aligned;
     testl(result, Immediate(kDoubleAlignmentMask));
     j(zero, &aligned, Label::kNear);
@@ -4453,8 +4415,8 @@
                               Register scratch,
                               Label* gc_required,
                               AllocationFlags flags) {
-  ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
-  ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
+  DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
+  DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
   if (!FLAG_inline_new) {
     if (emit_debug_code()) {
       // Trash the registers to simulate an allocation failure.
@@ -4469,7 +4431,7 @@
     jmp(gc_required);
     return;
   }
-  ASSERT(!result.is(result_end));
+  DCHECK(!result.is(result_end));
 
   // Load address of new object into result.
   LoadAllocationTopHelper(result, scratch, flags);
@@ -4505,7 +4467,7 @@
     }
   } else if (tag_result) {
     // Tag the result if requested.
-    ASSERT(kHeapObjectTag == 1);
+    DCHECK(kHeapObjectTag == 1);
     incp(result);
   }
 }
@@ -4519,7 +4481,7 @@
                               Register scratch,
                               Label* gc_required,
                               AllocationFlags flags) {
-  ASSERT((flags & SIZE_IN_WORDS) == 0);
+  DCHECK((flags & SIZE_IN_WORDS) == 0);
   leap(result_end, Operand(element_count, element_size, header_size));
   Allocate(result_end, result, result_end, scratch, gc_required, flags);
 }
@@ -4531,7 +4493,7 @@
                               Register scratch,
                               Label* gc_required,
                               AllocationFlags flags) {
-  ASSERT((flags & SIZE_IN_WORDS) == 0);
+  DCHECK((flags & SIZE_IN_WORDS) == 0);
   if (!FLAG_inline_new) {
     if (emit_debug_code()) {
       // Trash the registers to simulate an allocation failure.
@@ -4545,7 +4507,7 @@
     jmp(gc_required);
     return;
   }
-  ASSERT(!result.is(result_end));
+  DCHECK(!result.is(result_end));
 
   // Load address of new object into result.
   LoadAllocationTopHelper(result, scratch, flags);
@@ -4593,12 +4555,17 @@
 
 void MacroAssembler::AllocateHeapNumber(Register result,
                                         Register scratch,
-                                        Label* gc_required) {
+                                        Label* gc_required,
+                                        MutableMode mode) {
   // Allocate heap number in new space.
   Allocate(HeapNumber::kSize, result, scratch, no_reg, gc_required, TAG_OBJECT);
 
+  Heap::RootListIndex map_index = mode == MUTABLE
+      ? Heap::kMutableHeapNumberMapRootIndex
+      : Heap::kHeapNumberMapRootIndex;
+
   // Set the map.
-  LoadRoot(kScratchRegister, Heap::kHeapNumberMapRootIndex);
+  LoadRoot(kScratchRegister, map_index);
   movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
 }
 
@@ -4613,7 +4580,7 @@
   // observing object alignment.
   const int kHeaderAlignment = SeqTwoByteString::kHeaderSize &
                                kObjectAlignmentMask;
-  ASSERT(kShortSize == 2);
+  DCHECK(kShortSize == 2);
   // scratch1 = length * 2 + kObjectAlignmentMask.
   leap(scratch1, Operand(length, length, times_1, kObjectAlignmentMask +
                 kHeaderAlignment));
@@ -4642,25 +4609,23 @@
 }
 
 
-void MacroAssembler::AllocateAsciiString(Register result,
-                                         Register length,
-                                         Register scratch1,
-                                         Register scratch2,
-                                         Register scratch3,
-                                         Label* gc_required) {
+void MacroAssembler::AllocateOneByteString(Register result, Register length,
+                                           Register scratch1, Register scratch2,
+                                           Register scratch3,
+                                           Label* gc_required) {
   // Calculate the number of bytes needed for the characters in the string while
   // observing object alignment.
   const int kHeaderAlignment = SeqOneByteString::kHeaderSize &
                                kObjectAlignmentMask;
   movl(scratch1, length);
-  ASSERT(kCharSize == 1);
+  DCHECK(kCharSize == 1);
   addp(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment));
   andp(scratch1, Immediate(~kObjectAlignmentMask));
   if (kHeaderAlignment > 0) {
     subp(scratch1, Immediate(kHeaderAlignment));
   }
 
-  // Allocate ASCII string in new space.
+  // Allocate one-byte string in new space.
   Allocate(SeqOneByteString::kHeaderSize,
            times_1,
            scratch1,
@@ -4671,7 +4636,7 @@
            TAG_OBJECT);
 
   // Set the map, length and hash field.
-  LoadRoot(kScratchRegister, Heap::kAsciiStringMapRootIndex);
+  LoadRoot(kScratchRegister, Heap::kOneByteStringMapRootIndex);
   movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
   Integer32ToSmi(scratch1, length);
   movp(FieldOperand(result, String::kLengthOffset), scratch1);
@@ -4694,10 +4659,10 @@
 }
 
 
-void MacroAssembler::AllocateAsciiConsString(Register result,
-                                             Register scratch1,
-                                             Register scratch2,
-                                             Label* gc_required) {
+void MacroAssembler::AllocateOneByteConsString(Register result,
+                                               Register scratch1,
+                                               Register scratch2,
+                                               Label* gc_required) {
   Allocate(ConsString::kSize,
            result,
            scratch1,
@@ -4706,7 +4671,7 @@
            TAG_OBJECT);
 
   // Set the map. The other fields are left uninitialized.
-  LoadRoot(kScratchRegister, Heap::kConsAsciiStringMapRootIndex);
+  LoadRoot(kScratchRegister, Heap::kConsOneByteStringMapRootIndex);
   movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
 }
 
@@ -4725,16 +4690,16 @@
 }
 
 
-void MacroAssembler::AllocateAsciiSlicedString(Register result,
-                                               Register scratch1,
-                                               Register scratch2,
-                                               Label* gc_required) {
+void MacroAssembler::AllocateOneByteSlicedString(Register result,
+                                                 Register scratch1,
+                                                 Register scratch2,
+                                                 Label* gc_required) {
   // Allocate heap number in new space.
   Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
            TAG_OBJECT);
 
   // Set the map. The other fields are left uninitialized.
-  LoadRoot(kScratchRegister, Heap::kSlicedAsciiStringMapRootIndex);
+  LoadRoot(kScratchRegister, Heap::kSlicedOneByteStringMapRootIndex);
   movp(FieldOperand(result, HeapObject::kMapOffset), kScratchRegister);
 }
 
@@ -4751,7 +4716,7 @@
                                Register length,
                                int min_length,
                                Register scratch) {
-  ASSERT(min_length >= 0);
+  DCHECK(min_length >= 0);
   if (emit_debug_code()) {
     cmpl(length, Immediate(min_length));
     Assert(greater_equal, kInvalidMinLength);
@@ -4764,9 +4729,9 @@
     j(below, &short_string, Label::kNear);
   }
 
-  ASSERT(source.is(rsi));
-  ASSERT(destination.is(rdi));
-  ASSERT(length.is(rcx));
+  DCHECK(source.is(rsi));
+  DCHECK(destination.is(rdi));
+  DCHECK(length.is(rcx));
 
   if (min_length <= kLongStringLimit) {
     cmpl(length, Immediate(2 * kPointerSize));
@@ -4931,7 +4896,7 @@
   // arguments.
   // On AMD64 ABI (Linux/Mac) the first six arguments are passed in registers
   // and the caller does not reserve stack slots for them.
-  ASSERT(num_arguments >= 0);
+  DCHECK(num_arguments >= 0);
 #ifdef _WIN64
   const int kMinimumStackSlots = kRegisterPassedArguments;
   if (num_arguments < kMinimumStackSlots) return kMinimumStackSlots;
@@ -4977,13 +4942,13 @@
 
 
 void MacroAssembler::PrepareCallCFunction(int num_arguments) {
-  int frame_alignment = OS::ActivationFrameAlignment();
-  ASSERT(frame_alignment != 0);
-  ASSERT(num_arguments >= 0);
+  int frame_alignment = base::OS::ActivationFrameAlignment();
+  DCHECK(frame_alignment != 0);
+  DCHECK(num_arguments >= 0);
 
   // Make stack end at alignment and allocate space for arguments and old rsp.
   movp(kScratchRegister, rsp);
-  ASSERT(IsPowerOf2(frame_alignment));
+  DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
   int argument_slots_on_stack =
       ArgumentStackSlotsForCFunctionCall(num_arguments);
   subp(rsp, Immediate((argument_slots_on_stack + 1) * kRegisterSize));
@@ -5000,30 +4965,48 @@
 
 
 void MacroAssembler::CallCFunction(Register function, int num_arguments) {
-  ASSERT(has_frame());
+  DCHECK(has_frame());
   // Check stack alignment.
   if (emit_debug_code()) {
     CheckStackAlignment();
   }
 
   call(function);
-  ASSERT(OS::ActivationFrameAlignment() != 0);
-  ASSERT(num_arguments >= 0);
+  DCHECK(base::OS::ActivationFrameAlignment() != 0);
+  DCHECK(num_arguments >= 0);
   int argument_slots_on_stack =
       ArgumentStackSlotsForCFunctionCall(num_arguments);
   movp(rsp, Operand(rsp, argument_slots_on_stack * kRegisterSize));
 }
 
 
-bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
-  if (r1.is(r2)) return true;
-  if (r1.is(r3)) return true;
-  if (r1.is(r4)) return true;
-  if (r2.is(r3)) return true;
-  if (r2.is(r4)) return true;
-  if (r3.is(r4)) return true;
-  return false;
+#ifdef DEBUG
+bool AreAliased(Register reg1,
+                Register reg2,
+                Register reg3,
+                Register reg4,
+                Register reg5,
+                Register reg6,
+                Register reg7,
+                Register reg8) {
+  int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
+      reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
+      reg7.is_valid() + reg8.is_valid();
+
+  RegList regs = 0;
+  if (reg1.is_valid()) regs |= reg1.bit();
+  if (reg2.is_valid()) regs |= reg2.bit();
+  if (reg3.is_valid()) regs |= reg3.bit();
+  if (reg4.is_valid()) regs |= reg4.bit();
+  if (reg5.is_valid()) regs |= reg5.bit();
+  if (reg6.is_valid()) regs |= reg6.bit();
+  if (reg7.is_valid()) regs |= reg7.bit();
+  if (reg8.is_valid()) regs |= reg8.bit();
+  int n_of_non_aliasing_regs = NumRegs(regs);
+
+  return n_of_valid_regs != n_of_non_aliasing_regs;
 }
+#endif
 
 
 CodePatcher::CodePatcher(byte* address, int size)
@@ -5033,17 +5016,17 @@
   // Create a new macro assembler pointing to the address of the code to patch.
   // The size is adjusted with kGap on order for the assembler to generate size
   // bytes of instructions without failing with buffer size constraints.
-  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+  DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
 }
 
 
 CodePatcher::~CodePatcher() {
   // Indicate that code has changed.
-  CPU::FlushICache(address_, size_);
+  CpuFeatures::FlushICache(address_, size_);
 
   // Check that the code was patched as expected.
-  ASSERT(masm_.pc_ == address_ + size_);
-  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+  DCHECK(masm_.pc_ == address_ + size_);
+  DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
 }
 
 
@@ -5054,7 +5037,7 @@
     Condition cc,
     Label* condition_met,
     Label::Distance condition_met_distance) {
-  ASSERT(cc == zero || cc == not_zero);
+  DCHECK(cc == zero || cc == not_zero);
   if (scratch.is(object)) {
     andp(scratch, Immediate(~Page::kPageAlignmentMask));
   } else {
@@ -5088,10 +5071,10 @@
                                  Register mask_scratch,
                                  Label* on_black,
                                  Label::Distance on_black_distance) {
-  ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, rcx));
+  DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, rcx));
   GetMarkBits(object, bitmap_scratch, mask_scratch);
 
-  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+  DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
   // The mask_scratch register contains a 1 at the position of the first bit
   // and a 0 at all other positions, including the position of the second bit.
   movp(rcx, mask_scratch);
@@ -5117,8 +5100,8 @@
   movp(scratch, FieldOperand(value, HeapObject::kMapOffset));
   CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
   j(equal, &is_data_object, Label::kNear);
-  ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
-  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+  DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+  DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
   // If it's a string and it's not a cons string then it's an object containing
   // no GC pointers.
   testb(FieldOperand(scratch, Map::kInstanceTypeOffset),
@@ -5131,7 +5114,7 @@
 void MacroAssembler::GetMarkBits(Register addr_reg,
                                  Register bitmap_reg,
                                  Register mask_reg) {
-  ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx));
+  DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx));
   movp(bitmap_reg, addr_reg);
   // Sign extended 32 bit immediate.
   andp(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
@@ -5158,14 +5141,14 @@
     Register mask_scratch,
     Label* value_is_white_and_not_data,
     Label::Distance distance) {
-  ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, rcx));
+  DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, rcx));
   GetMarkBits(value, bitmap_scratch, mask_scratch);
 
   // If the value is black or grey we don't need to do anything.
-  ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
-  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
-  ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
-  ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+  DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+  DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
+  DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
+  DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
 
   Label done;
 
@@ -5203,8 +5186,8 @@
 
   bind(&not_heap_number);
   // Check for strings.
-  ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
-  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+  DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+  DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
   // If it's a string and it's not a cons string then it's an object containing
   // no GC pointers.
   Register instance_type = rcx;
@@ -5217,20 +5200,20 @@
   Label not_external;
   // External strings are the only ones with the kExternalStringTag bit
   // set.
-  ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
-  ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
+  DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
+  DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
   testb(instance_type, Immediate(kExternalStringTag));
   j(zero, &not_external, Label::kNear);
   movp(length, Immediate(ExternalString::kSize));
   jmp(&is_data_object, Label::kNear);
 
   bind(&not_external);
-  // Sequential string, either ASCII or UC16.
-  ASSERT(kOneByteStringTag == 0x04);
+  // Sequential string, either Latin1 or UC16.
+  DCHECK(kOneByteStringTag == 0x04);
   andp(length, Immediate(kStringEncodingMask));
   xorp(length, Immediate(kStringEncodingMask));
   addp(length, Immediate(0x04));
-  // Value now either 4 (if ASCII) or 8 (if UC16), i.e. char-size shifted by 2.
+  // Value now either 4 (if Latin1) or 8 (if UC16), i.e. char-size shifted by 2.
   imulp(length, FieldOperand(value, String::kLengthOffset));
   shrp(length, Immediate(2 + kSmiTagSize + kSmiShiftSize));
   addp(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask));
@@ -5319,8 +5302,8 @@
     Register scratch0,
     Register scratch1,
     Label* found) {
-  ASSERT(!(scratch0.is(kScratchRegister) && scratch1.is(kScratchRegister)));
-  ASSERT(!scratch1.is(scratch0));
+  DCHECK(!(scratch0.is(kScratchRegister) && scratch1.is(kScratchRegister)));
+  DCHECK(!scratch1.is(scratch0));
   Register current = scratch0;
   Label loop_again;
 
@@ -5340,14 +5323,16 @@
 
 
 void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) {
-  ASSERT(!dividend.is(rax));
-  ASSERT(!dividend.is(rdx));
-  MultiplierAndShift ms(divisor);
-  movl(rax, Immediate(ms.multiplier()));
+  DCHECK(!dividend.is(rax));
+  DCHECK(!dividend.is(rdx));
+  base::MagicNumbersForDivision<uint32_t> mag =
+      base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
+  movl(rax, Immediate(mag.multiplier));
   imull(dividend);
-  if (divisor > 0 && ms.multiplier() < 0) addl(rdx, dividend);
-  if (divisor < 0 && ms.multiplier() > 0) subl(rdx, dividend);
-  if (ms.shift() > 0) sarl(rdx, Immediate(ms.shift()));
+  bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
+  if (divisor > 0 && neg) addl(rdx, dividend);
+  if (divisor < 0 && !neg && mag.multiplier > 0) subl(rdx, dividend);
+  if (mag.shift > 0) sarl(rdx, Immediate(mag.shift));
   movl(rax, dividend);
   shrl(rax, Immediate(31));
   addl(rdx, rax);
diff --git a/src/x64/macro-assembler-x64.h b/src/x64/macro-assembler-x64.h
index 8a0ffa6..d051773 100644
--- a/src/x64/macro-assembler-x64.h
+++ b/src/x64/macro-assembler-x64.h
@@ -6,6 +6,7 @@
 #define V8_X64_MACRO_ASSEMBLER_X64_H_
 
 #include "src/assembler.h"
+#include "src/bailout-reason.h"
 #include "src/frames.h"
 #include "src/globals.h"
 
@@ -50,7 +51,16 @@
       : EnumSet<SmiOperationConstraint, byte>(bits) { }
 };
 
-bool AreAliased(Register r1, Register r2, Register r3, Register r4);
+#ifdef DEBUG
+bool AreAliased(Register reg1,
+                Register reg2,
+                Register reg3 = no_reg,
+                Register reg4 = no_reg,
+                Register reg5 = no_reg,
+                Register reg6 = no_reg,
+                Register reg7 = no_reg,
+                Register reg8 = no_reg);
+#endif
 
 // Forward declaration.
 class JumpTarget;
@@ -764,29 +774,22 @@
                        Label::Distance near_jump = Label::kFar);
 
 
-  void JumpIfNotBothSequentialAsciiStrings(
-      Register first_object,
-      Register second_object,
-      Register scratch1,
-      Register scratch2,
-      Label* on_not_both_flat_ascii,
+  void JumpIfNotBothSequentialOneByteStrings(
+      Register first_object, Register second_object, Register scratch1,
+      Register scratch2, Label* on_not_both_flat_one_byte,
       Label::Distance near_jump = Label::kFar);
 
-  // Check whether the instance type represents a flat ASCII string. Jump to the
-  // label if not. If the instance type can be scratched specify same register
-  // for both instance type and scratch.
-  void JumpIfInstanceTypeIsNotSequentialAscii(
-      Register instance_type,
-      Register scratch,
-      Label*on_not_flat_ascii_string,
+  // Check whether the instance type represents a flat one-byte string. Jump
+  // to the label if not. If the instance type can be scratched specify same
+  // register for both instance type and scratch.
+  void JumpIfInstanceTypeIsNotSequentialOneByte(
+      Register instance_type, Register scratch,
+      Label* on_not_flat_one_byte_string,
       Label::Distance near_jump = Label::kFar);
 
-  void JumpIfBothInstanceTypesAreNotSequentialAscii(
-      Register first_object_instance_type,
-      Register second_object_instance_type,
-      Register scratch1,
-      Register scratch2,
-      Label* on_fail,
+  void JumpIfBothInstanceTypesAreNotSequentialOneByte(
+      Register first_object_instance_type, Register second_object_instance_type,
+      Register scratch1, Register scratch2, Label* on_fail,
       Label::Distance near_jump = Label::kFar);
 
   void EmitSeqStringSetCharCheck(Register string,
@@ -795,10 +798,10 @@
                                  uint32_t encoding_mask);
 
   // Checks if the given register or operand is a unique name
-  void JumpIfNotUniqueName(Register reg, Label* not_unique_name,
-                           Label::Distance distance = Label::kFar);
-  void JumpIfNotUniqueName(Operand operand, Label* not_unique_name,
-                           Label::Distance distance = Label::kFar);
+  void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name,
+                                       Label::Distance distance = Label::kFar);
+  void JumpIfNotUniqueNameInstanceType(Operand operand, Label* not_unique_name,
+                                       Label::Distance distance = Label::kFar);
 
   // ---------------------------------------------------------------------------
   // Macro instructions.
@@ -873,15 +876,15 @@
   void Move(Register dst, void* ptr, RelocInfo::Mode rmode) {
     // This method must not be used with heap object references. The stored
     // address is not GC safe. Use the handle version instead.
-    ASSERT(rmode > RelocInfo::LAST_GCED_ENUM);
+    DCHECK(rmode > RelocInfo::LAST_GCED_ENUM);
     movp(dst, ptr, rmode);
   }
 
   void Move(Register dst, Handle<Object> value, RelocInfo::Mode rmode) {
     AllowDeferredHandleDereference using_raw_address;
-    ASSERT(!RelocInfo::IsNone(rmode));
-    ASSERT(value->IsHeapObject());
-    ASSERT(!isolate()->heap()->InNewSpace(*value));
+    DCHECK(!RelocInfo::IsNone(rmode));
+    DCHECK(value->IsHeapObject());
+    DCHECK(!isolate()->heap()->InNewSpace(*value));
     movp(dst, reinterpret_cast<void*>(value.location()), rmode);
   }
 
@@ -1028,12 +1031,9 @@
   void TruncateDoubleToI(Register result_reg, XMMRegister input_reg);
 
   void DoubleToI(Register result_reg, XMMRegister input_reg,
-      XMMRegister scratch, MinusZeroMode minus_zero_mode,
-      Label* conversion_failed, Label::Distance dst = Label::kFar);
-
-  void TaggedToI(Register result_reg, Register input_reg, XMMRegister temp,
-      MinusZeroMode minus_zero_mode, Label* lost_precision,
-      Label::Distance dst = Label::kFar);
+                 XMMRegister scratch, MinusZeroMode minus_zero_mode,
+                 Label* lost_precision, Label* is_nan, Label* minus_zero,
+                 Label::Distance dst = Label::kFar);
 
   void LoadUint32(XMMRegister dst, Register src);
 
@@ -1059,9 +1059,9 @@
     } else {
       static const int shift = Field::kShift;
       static const int mask = (Field::kMask >> Field::kShift) << kSmiTagSize;
-      ASSERT(SmiValuesAre31Bits());
-      ASSERT(kSmiShift == kSmiTagSize);
-      ASSERT((mask & 0x80000000u) == 0);
+      DCHECK(SmiValuesAre31Bits());
+      DCHECK(kSmiShift == kSmiTagSize);
+      DCHECK((mask & 0x80000000u) == 0);
       if (shift < kSmiShift) {
         shlp(reg, Immediate(kSmiShift - shift));
       } else if (shift > kSmiShift) {
@@ -1186,7 +1186,8 @@
   // space is full.
   void AllocateHeapNumber(Register result,
                           Register scratch,
-                          Label* gc_required);
+                          Label* gc_required,
+                          MutableMode mode = IMMUTABLE);
 
   // Allocate a sequential string. All the header fields of the string object
   // are initialized.
@@ -1196,12 +1197,9 @@
                              Register scratch2,
                              Register scratch3,
                              Label* gc_required);
-  void AllocateAsciiString(Register result,
-                           Register length,
-                           Register scratch1,
-                           Register scratch2,
-                           Register scratch3,
-                           Label* gc_required);
+  void AllocateOneByteString(Register result, Register length,
+                             Register scratch1, Register scratch2,
+                             Register scratch3, Label* gc_required);
 
   // Allocate a raw cons string object. Only the map field of the result is
   // initialized.
@@ -1209,10 +1207,8 @@
                           Register scratch1,
                           Register scratch2,
                           Label* gc_required);
-  void AllocateAsciiConsString(Register result,
-                               Register scratch1,
-                               Register scratch2,
-                               Label* gc_required);
+  void AllocateOneByteConsString(Register result, Register scratch1,
+                                 Register scratch2, Label* gc_required);
 
   // Allocate a raw sliced string object. Only the map field of the result is
   // initialized.
@@ -1220,10 +1216,8 @@
                             Register scratch1,
                             Register scratch2,
                             Label* gc_required);
-  void AllocateAsciiSlicedString(Register result,
-                                 Register scratch1,
-                                 Register scratch2,
-                                 Label* gc_required);
+  void AllocateOneByteSlicedString(Register result, Register scratch1,
+                                   Register scratch2, Label* gc_required);
 
   // ---------------------------------------------------------------------------
   // Support functions.
@@ -1377,7 +1371,7 @@
   void Ret(int bytes_dropped, Register scratch);
 
   Handle<Object> CodeObject() {
-    ASSERT(!code_object_.is_null());
+    DCHECK(!code_object_.is_null());
     return code_object_;
   }
 
diff --git a/src/x64/regexp-macro-assembler-x64.cc b/src/x64/regexp-macro-assembler-x64.cc
index a8c1cb4..82a3735 100644
--- a/src/x64/regexp-macro-assembler-x64.cc
+++ b/src/x64/regexp-macro-assembler-x64.cc
@@ -7,12 +7,12 @@
 #if V8_TARGET_ARCH_X64
 
 #include "src/cpu-profiler.h"
-#include "src/serialize.h"
-#include "src/unicode.h"
 #include "src/log.h"
-#include "src/regexp-stack.h"
 #include "src/macro-assembler.h"
 #include "src/regexp-macro-assembler.h"
+#include "src/regexp-stack.h"
+#include "src/serialize.h"
+#include "src/unicode.h"
 #include "src/x64/regexp-macro-assembler-x64.h"
 
 namespace v8 {
@@ -22,7 +22,7 @@
 
 /*
  * This assembler uses the following register assignment convention
- * - rdx : Currently loaded character(s) as ASCII or UC16.  Must be loaded
+ * - rdx : Currently loaded character(s) as Latin1 or UC16.  Must be loaded
  *         using LoadCurrentCharacter before using any of the dispatch methods.
  *         Temporarily stores the index of capture start after a matching pass
  *         for a global regexp.
@@ -109,7 +109,7 @@
       success_label_(),
       backtrack_label_(),
       exit_label_() {
-  ASSERT_EQ(0, registers_to_save % 2);
+  DCHECK_EQ(0, registers_to_save % 2);
   __ jmp(&entry_label_);   // We'll write the entry code when we know more.
   __ bind(&start_label_);  // And then continue from here.
 }
@@ -140,8 +140,8 @@
 
 
 void RegExpMacroAssemblerX64::AdvanceRegister(int reg, int by) {
-  ASSERT(reg >= 0);
-  ASSERT(reg < num_registers_);
+  DCHECK(reg >= 0);
+  DCHECK(reg < num_registers_);
   if (by != 0) {
     __ addp(register_location(reg), Immediate(by));
   }
@@ -244,7 +244,7 @@
   __ addl(rax, rbx);
   BranchOrBacktrack(greater, on_no_match);
 
-  if (mode_ == ASCII) {
+  if (mode_ == LATIN1) {
     Label loop_increment;
     if (on_no_match == NULL) {
       on_no_match = &backtrack_label_;
@@ -295,7 +295,7 @@
     __ movp(rdi, r11);
     __ subq(rdi, rsi);
   } else {
-    ASSERT(mode_ == UC16);
+    DCHECK(mode_ == UC16);
     // Save important/volatile registers before calling C function.
 #ifndef _WIN64
     // Caller save on Linux and callee save in Windows.
@@ -400,11 +400,11 @@
 
   Label loop;
   __ bind(&loop);
-  if (mode_ == ASCII) {
+  if (mode_ == LATIN1) {
     __ movzxbl(rax, Operand(rdx, 0));
     __ cmpb(rax, Operand(rbx, 0));
   } else {
-    ASSERT(mode_ == UC16);
+    DCHECK(mode_ == UC16);
     __ movzxwl(rax, Operand(rdx, 0));
     __ cmpw(rax, Operand(rbx, 0));
   }
@@ -465,7 +465,7 @@
     uc16 minus,
     uc16 mask,
     Label* on_not_equal) {
-  ASSERT(minus < String::kMaxUtf16CodeUnit);
+  DCHECK(minus < String::kMaxUtf16CodeUnit);
   __ leap(rax, Operand(current_character(), -minus));
   __ andp(rax, Immediate(mask));
   __ cmpl(rax, Immediate(c));
@@ -498,7 +498,7 @@
     Label* on_bit_set) {
   __ Move(rax, table);
   Register index = current_character();
-  if (mode_ != ASCII || kTableMask != String::kMaxOneByteCharCode) {
+  if (mode_ != LATIN1 || kTableMask != String::kMaxOneByteCharCode) {
     __ movp(rbx, current_character());
     __ andp(rbx, Immediate(kTableMask));
     index = rbx;
@@ -518,7 +518,7 @@
   switch (type) {
   case 's':
     // Match space-characters
-    if (mode_ == ASCII) {
+    if (mode_ == LATIN1) {
       // One byte space characters are '\t'..'\r', ' ' and \u00a0.
       Label success;
       __ cmpl(current_character(), Immediate(' '));
@@ -574,7 +574,7 @@
     // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
     __ subl(rax, Immediate(0x0b));
     __ cmpl(rax, Immediate(0x0c - 0x0b));
-    if (mode_ == ASCII) {
+    if (mode_ == LATIN1) {
       BranchOrBacktrack(above, on_no_match);
     } else {
       Label done;
@@ -590,13 +590,13 @@
     return true;
   }
   case 'w': {
-    if (mode_ != ASCII) {
-      // Table is 128 entries, so all ASCII characters can be tested.
+    if (mode_ != LATIN1) {
+      // Table is 256 entries, so all Latin1 characters can be tested.
       __ cmpl(current_character(), Immediate('z'));
       BranchOrBacktrack(above, on_no_match);
     }
     __ Move(rbx, ExternalReference::re_word_character_map());
-    ASSERT_EQ(0, word_character_map[0]);  // Character '\0' is not a word char.
+    DCHECK_EQ(0, word_character_map[0]);  // Character '\0' is not a word char.
     __ testb(Operand(rbx, current_character(), times_1, 0),
              current_character());
     BranchOrBacktrack(zero, on_no_match);
@@ -604,17 +604,17 @@
   }
   case 'W': {
     Label done;
-    if (mode_ != ASCII) {
-      // Table is 128 entries, so all ASCII characters can be tested.
+    if (mode_ != LATIN1) {
+      // Table is 256 entries, so all Latin1 characters can be tested.
       __ cmpl(current_character(), Immediate('z'));
       __ j(above, &done);
     }
     __ Move(rbx, ExternalReference::re_word_character_map());
-    ASSERT_EQ(0, word_character_map[0]);  // Character '\0' is not a word char.
+    DCHECK_EQ(0, word_character_map[0]);  // Character '\0' is not a word char.
     __ testb(Operand(rbx, current_character(), times_1, 0),
              current_character());
     BranchOrBacktrack(not_zero, on_no_match);
-    if (mode_ != ASCII) {
+    if (mode_ != LATIN1) {
       __ bind(&done);
     }
     return true;
@@ -669,12 +669,12 @@
 #else
   // GCC passes arguments in rdi, rsi, rdx, rcx, r8, r9 (and then on stack).
   // Push register parameters on stack for reference.
-  ASSERT_EQ(kInputString, -1 * kRegisterSize);
-  ASSERT_EQ(kStartIndex, -2 * kRegisterSize);
-  ASSERT_EQ(kInputStart, -3 * kRegisterSize);
-  ASSERT_EQ(kInputEnd, -4 * kRegisterSize);
-  ASSERT_EQ(kRegisterOutput, -5 * kRegisterSize);
-  ASSERT_EQ(kNumOutputRegisters, -6 * kRegisterSize);
+  DCHECK_EQ(kInputString, -1 * kRegisterSize);
+  DCHECK_EQ(kStartIndex, -2 * kRegisterSize);
+  DCHECK_EQ(kInputStart, -3 * kRegisterSize);
+  DCHECK_EQ(kInputEnd, -4 * kRegisterSize);
+  DCHECK_EQ(kRegisterOutput, -5 * kRegisterSize);
+  DCHECK_EQ(kNumOutputRegisters, -6 * kRegisterSize);
   __ pushq(rdi);
   __ pushq(rsi);
   __ pushq(rdx);
@@ -1022,8 +1022,8 @@
                                                    Label* on_end_of_input,
                                                    bool check_bounds,
                                                    int characters) {
-  ASSERT(cp_offset >= -1);      // ^ and \b can look behind one character.
-  ASSERT(cp_offset < (1<<30));  // Be sane! (And ensure negation works)
+  DCHECK(cp_offset >= -1);      // ^ and \b can look behind one character.
+  DCHECK(cp_offset < (1<<30));  // Be sane! (And ensure negation works)
   if (check_bounds) {
     CheckPosition(cp_offset + characters - 1, on_end_of_input);
   }
@@ -1104,7 +1104,7 @@
 
 
 void RegExpMacroAssemblerX64::SetRegister(int register_index, int to) {
-  ASSERT(register_index >= num_saved_registers_);  // Reserved for positions!
+  DCHECK(register_index >= num_saved_registers_);  // Reserved for positions!
   __ movp(register_location(register_index), Immediate(to));
 }
 
@@ -1127,7 +1127,7 @@
 
 
 void RegExpMacroAssemblerX64::ClearRegisters(int reg_from, int reg_to) {
-  ASSERT(reg_from <= reg_to);
+  DCHECK(reg_from <= reg_to);
   __ movp(rax, Operand(rbp, kInputStartMinusOne));
   for (int reg = reg_from; reg <= reg_to; reg++) {
     __ movp(register_location(reg), rax);
@@ -1205,10 +1205,10 @@
   Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
 
   // Current string.
-  bool is_ascii = subject->IsOneByteRepresentationUnderneath();
+  bool is_one_byte = subject->IsOneByteRepresentationUnderneath();
 
-  ASSERT(re_code->instruction_start() <= *return_address);
-  ASSERT(*return_address <=
+  DCHECK(re_code->instruction_start() <= *return_address);
+  DCHECK(*return_address <=
       re_code->instruction_start() + re_code->instruction_size());
 
   Object* result = isolate->stack_guard()->HandleInterrupts();
@@ -1236,8 +1236,8 @@
   }
 
   // String might have changed.
-  if (subject_tmp->IsOneByteRepresentation() != is_ascii) {
-    // If we changed between an ASCII and an UC16 string, the specialized
+  if (subject_tmp->IsOneByteRepresentation() != is_one_byte) {
+    // If we changed between an Latin1 and an UC16 string, the specialized
     // code cannot be used, and we need to restart regexp matching from
     // scratch (including, potentially, compiling a new version of the code).
     return RETRY;
@@ -1247,7 +1247,7 @@
   // be a sequential or external string with the same content.
   // Update the start and end pointers in the stack frame to the current
   // location (whether it has actually moved or not).
-  ASSERT(StringShape(*subject_tmp).IsSequential() ||
+  DCHECK(StringShape(*subject_tmp).IsSequential() ||
       StringShape(*subject_tmp).IsExternal());
 
   // The original start address of the characters to match.
@@ -1279,7 +1279,7 @@
 
 
 Operand RegExpMacroAssemblerX64::register_location(int register_index) {
-  ASSERT(register_index < (1<<30));
+  DCHECK(register_index < (1<<30));
   if (num_registers_ <= register_index) {
     num_registers_ = register_index + 1;
   }
@@ -1330,7 +1330,7 @@
 
 
 void RegExpMacroAssemblerX64::Push(Register source) {
-  ASSERT(!source.is(backtrack_stackpointer()));
+  DCHECK(!source.is(backtrack_stackpointer()));
   // Notice: This updates flags, unlike normal Push.
   __ subp(backtrack_stackpointer(), Immediate(kIntSize));
   __ movl(Operand(backtrack_stackpointer(), 0), source);
@@ -1370,7 +1370,7 @@
 
 
 void RegExpMacroAssemblerX64::Pop(Register target) {
-  ASSERT(!target.is(backtrack_stackpointer()));
+  DCHECK(!target.is(backtrack_stackpointer()));
   __ movsxlq(target, Operand(backtrack_stackpointer(), 0));
   // Notice: This updates flags, unlike normal Pop.
   __ addp(backtrack_stackpointer(), Immediate(kIntSize));
@@ -1413,22 +1413,22 @@
 
 void RegExpMacroAssemblerX64::LoadCurrentCharacterUnchecked(int cp_offset,
                                                             int characters) {
-  if (mode_ == ASCII) {
+  if (mode_ == LATIN1) {
     if (characters == 4) {
       __ movl(current_character(), Operand(rsi, rdi, times_1, cp_offset));
     } else if (characters == 2) {
       __ movzxwl(current_character(), Operand(rsi, rdi, times_1, cp_offset));
     } else {
-      ASSERT(characters == 1);
+      DCHECK(characters == 1);
       __ movzxbl(current_character(), Operand(rsi, rdi, times_1, cp_offset));
     }
   } else {
-    ASSERT(mode_ == UC16);
+    DCHECK(mode_ == UC16);
     if (characters == 2) {
       __ movl(current_character(),
               Operand(rsi, rdi, times_1, cp_offset * sizeof(uc16)));
     } else {
-      ASSERT(characters == 1);
+      DCHECK(characters == 1);
       __ movzxwl(current_character(),
                  Operand(rsi, rdi, times_1, cp_offset * sizeof(uc16)));
     }
diff --git a/src/x64/regexp-macro-assembler-x64.h b/src/x64/regexp-macro-assembler-x64.h
index 89d8d3b..e373377 100644
--- a/src/x64/regexp-macro-assembler-x64.h
+++ b/src/x64/regexp-macro-assembler-x64.h
@@ -5,10 +5,9 @@
 #ifndef V8_X64_REGEXP_MACRO_ASSEMBLER_X64_H_
 #define V8_X64_REGEXP_MACRO_ASSEMBLER_X64_H_
 
-#include "src/x64/assembler-x64.h"
-#include "src/x64/assembler-x64-inl.h"
 #include "src/macro-assembler.h"
-#include "src/code.h"
+#include "src/x64/assembler-x64-inl.h"
+#include "src/x64/assembler-x64.h"
 #include "src/x64/macro-assembler-x64.h"
 
 namespace v8 {
@@ -255,7 +254,7 @@
 
   ZoneList<int> code_relative_fixup_positions_;
 
-  // Which mode to generate code for (ASCII or UC16).
+  // Which mode to generate code for (LATIN1 or UC16).
   Mode mode_;
 
   // One greater than maximal register index actually used.
diff --git a/src/x64/stub-cache-x64.cc b/src/x64/stub-cache-x64.cc
deleted file mode 100644
index 422ef2e..0000000
--- a/src/x64/stub-cache-x64.cc
+++ /dev/null
@@ -1,1436 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#if V8_TARGET_ARCH_X64
-
-#include "src/arguments.h"
-#include "src/ic-inl.h"
-#include "src/codegen.h"
-#include "src/stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-
-static void ProbeTable(Isolate* isolate,
-                       MacroAssembler* masm,
-                       Code::Flags flags,
-                       StubCache::Table table,
-                       Register receiver,
-                       Register name,
-                       // The offset is scaled by 4, based on
-                       // kHeapObjectTagSize, which is two bits
-                       Register offset) {
-  // We need to scale up the pointer by 2 when the offset is scaled by less
-  // than the pointer size.
-  ASSERT(kPointerSize == kInt64Size
-      ? kPointerSizeLog2 == kHeapObjectTagSize + 1
-      : kPointerSizeLog2 == kHeapObjectTagSize);
-  ScaleFactor scale_factor = kPointerSize == kInt64Size ? times_2 : times_1;
-
-  ASSERT_EQ(3 * kPointerSize, sizeof(StubCache::Entry));
-  // The offset register holds the entry offset times four (due to masking
-  // and shifting optimizations).
-  ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
-  ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
-  Label miss;
-
-  // Multiply by 3 because there are 3 fields per entry (name, code, map).
-  __ leap(offset, Operand(offset, offset, times_2, 0));
-
-  __ LoadAddress(kScratchRegister, key_offset);
-
-  // Check that the key in the entry matches the name.
-  // Multiply entry offset by 16 to get the entry address. Since the
-  // offset register already holds the entry offset times four, multiply
-  // by a further four.
-  __ cmpl(name, Operand(kScratchRegister, offset, scale_factor, 0));
-  __ j(not_equal, &miss);
-
-  // Get the map entry from the cache.
-  // Use key_offset + kPointerSize * 2, rather than loading map_offset.
-  __ movp(kScratchRegister,
-          Operand(kScratchRegister, offset, scale_factor, kPointerSize * 2));
-  __ cmpp(kScratchRegister, FieldOperand(receiver, HeapObject::kMapOffset));
-  __ j(not_equal, &miss);
-
-  // Get the code entry from the cache.
-  __ LoadAddress(kScratchRegister, value_offset);
-  __ movp(kScratchRegister,
-          Operand(kScratchRegister, offset, scale_factor, 0));
-
-  // Check that the flags match what we're looking for.
-  __ movl(offset, FieldOperand(kScratchRegister, Code::kFlagsOffset));
-  __ andp(offset, Immediate(~Code::kFlagsNotUsedInLookup));
-  __ cmpl(offset, Immediate(flags));
-  __ j(not_equal, &miss);
-
-#ifdef DEBUG
-    if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
-      __ jmp(&miss);
-    } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
-      __ jmp(&miss);
-    }
-#endif
-
-  // Jump to the first instruction in the code stub.
-  __ addp(kScratchRegister, Immediate(Code::kHeaderSize - kHeapObjectTag));
-  __ jmp(kScratchRegister);
-
-  __ bind(&miss);
-}
-
-
-void StubCompiler::GenerateDictionaryNegativeLookup(MacroAssembler* masm,
-                                                    Label* miss_label,
-                                                    Register receiver,
-                                                    Handle<Name> name,
-                                                    Register scratch0,
-                                                    Register scratch1) {
-  ASSERT(name->IsUniqueName());
-  ASSERT(!receiver.is(scratch0));
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->negative_lookups(), 1);
-  __ IncrementCounter(counters->negative_lookups_miss(), 1);
-
-  __ movp(scratch0, FieldOperand(receiver, HeapObject::kMapOffset));
-
-  const int kInterceptorOrAccessCheckNeededMask =
-      (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
-
-  // Bail out if the receiver has a named interceptor or requires access checks.
-  __ testb(FieldOperand(scratch0, Map::kBitFieldOffset),
-           Immediate(kInterceptorOrAccessCheckNeededMask));
-  __ j(not_zero, miss_label);
-
-  // Check that receiver is a JSObject.
-  __ CmpInstanceType(scratch0, FIRST_SPEC_OBJECT_TYPE);
-  __ j(below, miss_label);
-
-  // Load properties array.
-  Register properties = scratch0;
-  __ movp(properties, FieldOperand(receiver, JSObject::kPropertiesOffset));
-
-  // Check that the properties array is a dictionary.
-  __ CompareRoot(FieldOperand(properties, HeapObject::kMapOffset),
-                 Heap::kHashTableMapRootIndex);
-  __ j(not_equal, miss_label);
-
-  Label done;
-  NameDictionaryLookupStub::GenerateNegativeLookup(masm,
-                                                   miss_label,
-                                                   &done,
-                                                   properties,
-                                                   name,
-                                                   scratch1);
-  __ bind(&done);
-  __ DecrementCounter(counters->negative_lookups_miss(), 1);
-}
-
-
-void StubCache::GenerateProbe(MacroAssembler* masm,
-                              Code::Flags flags,
-                              Register receiver,
-                              Register name,
-                              Register scratch,
-                              Register extra,
-                              Register extra2,
-                              Register extra3) {
-  Isolate* isolate = masm->isolate();
-  Label miss;
-  USE(extra);   // The register extra is not used on the X64 platform.
-  USE(extra2);  // The register extra2 is not used on the X64 platform.
-  USE(extra3);  // The register extra2 is not used on the X64 platform.
-  // Make sure that code is valid. The multiplying code relies on the
-  // entry size being 3 * kPointerSize.
-  ASSERT(sizeof(Entry) == 3 * kPointerSize);
-
-  // Make sure the flags do not name a specific type.
-  ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
-
-  // Make sure that there are no register conflicts.
-  ASSERT(!scratch.is(receiver));
-  ASSERT(!scratch.is(name));
-
-  // Check scratch register is valid, extra and extra2 are unused.
-  ASSERT(!scratch.is(no_reg));
-  ASSERT(extra2.is(no_reg));
-  ASSERT(extra3.is(no_reg));
-
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1);
-
-  // Check that the receiver isn't a smi.
-  __ JumpIfSmi(receiver, &miss);
-
-  // Get the map of the receiver and compute the hash.
-  __ movl(scratch, FieldOperand(name, Name::kHashFieldOffset));
-  // Use only the low 32 bits of the map pointer.
-  __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
-  __ xorp(scratch, Immediate(flags));
-  // We mask out the last two bits because they are not part of the hash and
-  // they are always 01 for maps.  Also in the two 'and' instructions below.
-  __ andp(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize));
-
-  // Probe the primary table.
-  ProbeTable(isolate, masm, flags, kPrimary, receiver, name, scratch);
-
-  // Primary miss: Compute hash for secondary probe.
-  __ movl(scratch, FieldOperand(name, Name::kHashFieldOffset));
-  __ addl(scratch, FieldOperand(receiver, HeapObject::kMapOffset));
-  __ xorp(scratch, Immediate(flags));
-  __ andp(scratch, Immediate((kPrimaryTableSize - 1) << kHeapObjectTagSize));
-  __ subl(scratch, name);
-  __ addl(scratch, Immediate(flags));
-  __ andp(scratch, Immediate((kSecondaryTableSize - 1) << kHeapObjectTagSize));
-
-  // Probe the secondary table.
-  ProbeTable(isolate, masm, flags, kSecondary, receiver, name, scratch);
-
-  // Cache miss: Fall-through and let caller handle the miss by
-  // entering the runtime system.
-  __ bind(&miss);
-  __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1);
-}
-
-
-void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
-                                                       int index,
-                                                       Register prototype) {
-  // Load the global or builtins object from the current context.
-  __ movp(prototype,
-          Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
-  // Load the native context from the global or builtins object.
-  __ movp(prototype,
-          FieldOperand(prototype, GlobalObject::kNativeContextOffset));
-  // Load the function from the native context.
-  __ movp(prototype, Operand(prototype, Context::SlotOffset(index)));
-  // Load the initial map.  The global functions all have initial maps.
-  __ movp(prototype,
-          FieldOperand(prototype, JSFunction::kPrototypeOrInitialMapOffset));
-  // Load the prototype from the initial map.
-  __ movp(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
-}
-
-
-void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
-    MacroAssembler* masm,
-    int index,
-    Register prototype,
-    Label* miss) {
-  Isolate* isolate = masm->isolate();
-  // Get the global function with the given index.
-  Handle<JSFunction> function(
-      JSFunction::cast(isolate->native_context()->get(index)));
-
-  // Check we're still in the same context.
-  Register scratch = prototype;
-  const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
-  __ movp(scratch, Operand(rsi, offset));
-  __ movp(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
-  __ Cmp(Operand(scratch, Context::SlotOffset(index)), function);
-  __ j(not_equal, miss);
-
-  // Load its initial map. The global functions all have initial maps.
-  __ Move(prototype, Handle<Map>(function->initial_map()));
-  // Load the prototype from the initial map.
-  __ movp(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
-}
-
-
-void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
-                                           Register receiver,
-                                           Register scratch,
-                                           Label* miss_label) {
-  // Check that the receiver isn't a smi.
-  __ JumpIfSmi(receiver, miss_label);
-
-  // Check that the object is a JS array.
-  __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
-  __ j(not_equal, miss_label);
-
-  // Load length directly from the JS array.
-  __ movp(rax, FieldOperand(receiver, JSArray::kLengthOffset));
-  __ ret(0);
-}
-
-
-void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
-                                                 Register receiver,
-                                                 Register result,
-                                                 Register scratch,
-                                                 Label* miss_label) {
-  __ TryGetFunctionPrototype(receiver, result, miss_label);
-  if (!result.is(rax)) __ movp(rax, result);
-  __ ret(0);
-}
-
-
-void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
-                                            Register dst,
-                                            Register src,
-                                            bool inobject,
-                                            int index,
-                                            Representation representation) {
-  ASSERT(!representation.IsDouble());
-  int offset = index * kPointerSize;
-  if (!inobject) {
-    // Calculate the offset into the properties array.
-    offset = offset + FixedArray::kHeaderSize;
-    __ movp(dst, FieldOperand(src, JSObject::kPropertiesOffset));
-    src = dst;
-  }
-  __ movp(dst, FieldOperand(src, offset));
-}
-
-
-static void PushInterceptorArguments(MacroAssembler* masm,
-                                     Register receiver,
-                                     Register holder,
-                                     Register name,
-                                     Handle<JSObject> holder_obj) {
-  STATIC_ASSERT(StubCache::kInterceptorArgsNameIndex == 0);
-  STATIC_ASSERT(StubCache::kInterceptorArgsInfoIndex == 1);
-  STATIC_ASSERT(StubCache::kInterceptorArgsThisIndex == 2);
-  STATIC_ASSERT(StubCache::kInterceptorArgsHolderIndex == 3);
-  STATIC_ASSERT(StubCache::kInterceptorArgsLength == 4);
-  __ Push(name);
-  Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
-  ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
-  __ Move(kScratchRegister, interceptor);
-  __ Push(kScratchRegister);
-  __ Push(receiver);
-  __ Push(holder);
-}
-
-
-static void CompileCallLoadPropertyWithInterceptor(
-    MacroAssembler* masm,
-    Register receiver,
-    Register holder,
-    Register name,
-    Handle<JSObject> holder_obj,
-    IC::UtilityId id) {
-  PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
-  __ CallExternalReference(
-      ExternalReference(IC_Utility(id), masm->isolate()),
-      StubCache::kInterceptorArgsLength);
-}
-
-
-// Generate call to api function.
-void StubCompiler::GenerateFastApiCall(MacroAssembler* masm,
-                                       const CallOptimization& optimization,
-                                       Handle<Map> receiver_map,
-                                       Register receiver,
-                                       Register scratch_in,
-                                       bool is_store,
-                                       int argc,
-                                       Register* values) {
-  ASSERT(optimization.is_simple_api_call());
-
-  __ PopReturnAddressTo(scratch_in);
-  // receiver
-  __ Push(receiver);
-  // Write the arguments to stack frame.
-  for (int i = 0; i < argc; i++) {
-    Register arg = values[argc-1-i];
-    ASSERT(!receiver.is(arg));
-    ASSERT(!scratch_in.is(arg));
-    __ Push(arg);
-  }
-  __ PushReturnAddressFrom(scratch_in);
-  // Stack now matches JSFunction abi.
-
-  // Abi for CallApiFunctionStub.
-  Register callee = rax;
-  Register call_data = rbx;
-  Register holder = rcx;
-  Register api_function_address = rdx;
-  Register scratch = rdi;  // scratch_in is no longer valid.
-
-  // Put holder in place.
-  CallOptimization::HolderLookup holder_lookup;
-  Handle<JSObject> api_holder = optimization.LookupHolderOfExpectedType(
-      receiver_map,
-      &holder_lookup);
-  switch (holder_lookup) {
-    case CallOptimization::kHolderIsReceiver:
-      __ Move(holder, receiver);
-      break;
-    case CallOptimization::kHolderFound:
-      __ Move(holder, api_holder);
-     break;
-    case CallOptimization::kHolderNotFound:
-      UNREACHABLE();
-      break;
-  }
-
-  Isolate* isolate = masm->isolate();
-  Handle<JSFunction> function = optimization.constant_function();
-  Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
-  Handle<Object> call_data_obj(api_call_info->data(), isolate);
-
-  // Put callee in place.
-  __ Move(callee, function);
-
-  bool call_data_undefined = false;
-  // Put call_data in place.
-  if (isolate->heap()->InNewSpace(*call_data_obj)) {
-    __ Move(scratch, api_call_info);
-    __ movp(call_data, FieldOperand(scratch, CallHandlerInfo::kDataOffset));
-  } else if (call_data_obj->IsUndefined()) {
-    call_data_undefined = true;
-    __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
-  } else {
-    __ Move(call_data, call_data_obj);
-  }
-
-  // Put api_function_address in place.
-  Address function_address = v8::ToCData<Address>(api_call_info->callback());
-  __ Move(
-      api_function_address, function_address, RelocInfo::EXTERNAL_REFERENCE);
-
-  // Jump to stub.
-  CallApiFunctionStub stub(isolate, is_store, call_data_undefined, argc);
-  __ TailCallStub(&stub);
-}
-
-
-void StoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
-                                            Label* label,
-                                            Handle<Name> name) {
-  if (!label->is_unused()) {
-    __ bind(label);
-    __ Move(this->name(), name);
-  }
-}
-
-
-void StubCompiler::GenerateCheckPropertyCell(MacroAssembler* masm,
-                                             Handle<JSGlobalObject> global,
-                                             Handle<Name> name,
-                                             Register scratch,
-                                             Label* miss) {
-  Handle<PropertyCell> cell =
-      JSGlobalObject::EnsurePropertyCell(global, name);
-  ASSERT(cell->value()->IsTheHole());
-  __ Move(scratch, cell);
-  __ Cmp(FieldOperand(scratch, Cell::kValueOffset),
-         masm->isolate()->factory()->the_hole_value());
-  __ j(not_equal, miss);
-}
-
-
-void StoreStubCompiler::GenerateNegativeHolderLookup(
-    MacroAssembler* masm,
-    Handle<JSObject> holder,
-    Register holder_reg,
-    Handle<Name> name,
-    Label* miss) {
-  if (holder->IsJSGlobalObject()) {
-    GenerateCheckPropertyCell(
-        masm, Handle<JSGlobalObject>::cast(holder), name, scratch1(), miss);
-  } else if (!holder->HasFastProperties() && !holder->IsJSGlobalProxy()) {
-    GenerateDictionaryNegativeLookup(
-        masm, miss, holder_reg, name, scratch1(), scratch2());
-  }
-}
-
-
-// Receiver_reg is preserved on jumps to miss_label, but may be destroyed if
-// store is successful.
-void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
-                                                Handle<JSObject> object,
-                                                LookupResult* lookup,
-                                                Handle<Map> transition,
-                                                Handle<Name> name,
-                                                Register receiver_reg,
-                                                Register storage_reg,
-                                                Register value_reg,
-                                                Register scratch1,
-                                                Register scratch2,
-                                                Register unused,
-                                                Label* miss_label,
-                                                Label* slow) {
-  int descriptor = transition->LastAdded();
-  DescriptorArray* descriptors = transition->instance_descriptors();
-  PropertyDetails details = descriptors->GetDetails(descriptor);
-  Representation representation = details.representation();
-  ASSERT(!representation.IsNone());
-
-  if (details.type() == CONSTANT) {
-    Handle<Object> constant(descriptors->GetValue(descriptor), masm->isolate());
-    __ Cmp(value_reg, constant);
-    __ j(not_equal, miss_label);
-  } else if (representation.IsSmi()) {
-    __ JumpIfNotSmi(value_reg, miss_label);
-  } else if (representation.IsHeapObject()) {
-    __ JumpIfSmi(value_reg, miss_label);
-    HeapType* field_type = descriptors->GetFieldType(descriptor);
-    HeapType::Iterator<Map> it = field_type->Classes();
-    if (!it.Done()) {
-      Label do_store;
-      while (true) {
-        __ CompareMap(value_reg, it.Current());
-        it.Advance();
-        if (it.Done()) {
-          __ j(not_equal, miss_label);
-          break;
-        }
-        __ j(equal, &do_store, Label::kNear);
-      }
-      __ bind(&do_store);
-    }
-  } else if (representation.IsDouble()) {
-    Label do_store, heap_number;
-    __ AllocateHeapNumber(storage_reg, scratch1, slow);
-
-    __ JumpIfNotSmi(value_reg, &heap_number);
-    __ SmiToInteger32(scratch1, value_reg);
-    __ Cvtlsi2sd(xmm0, scratch1);
-    __ jmp(&do_store);
-
-    __ bind(&heap_number);
-    __ CheckMap(value_reg, masm->isolate()->factory()->heap_number_map(),
-                miss_label, DONT_DO_SMI_CHECK);
-    __ movsd(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
-
-    __ bind(&do_store);
-    __ movsd(FieldOperand(storage_reg, HeapNumber::kValueOffset), xmm0);
-  }
-
-  // Stub never generated for non-global objects that require access
-  // checks.
-  ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
-
-  // Perform map transition for the receiver if necessary.
-  if (details.type() == FIELD &&
-      object->map()->unused_property_fields() == 0) {
-    // The properties must be extended before we can store the value.
-    // We jump to a runtime call that extends the properties array.
-    __ PopReturnAddressTo(scratch1);
-    __ Push(receiver_reg);
-    __ Push(transition);
-    __ Push(value_reg);
-    __ PushReturnAddressFrom(scratch1);
-    __ TailCallExternalReference(
-        ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
-                          masm->isolate()),
-        3,
-        1);
-    return;
-  }
-
-  // Update the map of the object.
-  __ Move(scratch1, transition);
-  __ movp(FieldOperand(receiver_reg, HeapObject::kMapOffset), scratch1);
-
-  // Update the write barrier for the map field.
-  __ RecordWriteField(receiver_reg,
-                      HeapObject::kMapOffset,
-                      scratch1,
-                      scratch2,
-                      kDontSaveFPRegs,
-                      OMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-
-  if (details.type() == CONSTANT) {
-    ASSERT(value_reg.is(rax));
-    __ ret(0);
-    return;
-  }
-
-  int index = transition->instance_descriptors()->GetFieldIndex(
-      transition->LastAdded());
-
-  // Adjust for the number of properties stored in the object. Even in the
-  // face of a transition we can use the old map here because the size of the
-  // object and the number of in-object properties is not going to change.
-  index -= object->map()->inobject_properties();
-
-  // TODO(verwaest): Share this code as a code stub.
-  SmiCheck smi_check = representation.IsTagged()
-      ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
-  if (index < 0) {
-    // Set the property straight into the object.
-    int offset = object->map()->instance_size() + (index * kPointerSize);
-    if (representation.IsDouble()) {
-      __ movp(FieldOperand(receiver_reg, offset), storage_reg);
-    } else {
-      __ movp(FieldOperand(receiver_reg, offset), value_reg);
-    }
-
-    if (!representation.IsSmi()) {
-      // Update the write barrier for the array address.
-      if (!representation.IsDouble()) {
-        __ movp(storage_reg, value_reg);
-      }
-      __ RecordWriteField(
-          receiver_reg, offset, storage_reg, scratch1, kDontSaveFPRegs,
-          EMIT_REMEMBERED_SET, smi_check);
-    }
-  } else {
-    // Write to the properties array.
-    int offset = index * kPointerSize + FixedArray::kHeaderSize;
-    // Get the properties array (optimistically).
-    __ movp(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
-    if (representation.IsDouble()) {
-      __ movp(FieldOperand(scratch1, offset), storage_reg);
-    } else {
-      __ movp(FieldOperand(scratch1, offset), value_reg);
-    }
-
-    if (!representation.IsSmi()) {
-      // Update the write barrier for the array address.
-      if (!representation.IsDouble()) {
-        __ movp(storage_reg, value_reg);
-      }
-      __ RecordWriteField(
-          scratch1, offset, storage_reg, receiver_reg, kDontSaveFPRegs,
-          EMIT_REMEMBERED_SET, smi_check);
-    }
-  }
-
-  // Return the value (register rax).
-  ASSERT(value_reg.is(rax));
-  __ ret(0);
-}
-
-
-// Both name_reg and receiver_reg are preserved on jumps to miss_label,
-// but may be destroyed if store is successful.
-void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
-                                           Handle<JSObject> object,
-                                           LookupResult* lookup,
-                                           Register receiver_reg,
-                                           Register name_reg,
-                                           Register value_reg,
-                                           Register scratch1,
-                                           Register scratch2,
-                                           Label* miss_label) {
-  // Stub never generated for non-global objects that require access
-  // checks.
-  ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
-
-  FieldIndex index = lookup->GetFieldIndex();
-
-  Representation representation = lookup->representation();
-  ASSERT(!representation.IsNone());
-  if (representation.IsSmi()) {
-    __ JumpIfNotSmi(value_reg, miss_label);
-  } else if (representation.IsHeapObject()) {
-    __ JumpIfSmi(value_reg, miss_label);
-    HeapType* field_type = lookup->GetFieldType();
-    HeapType::Iterator<Map> it = field_type->Classes();
-    if (!it.Done()) {
-      Label do_store;
-      while (true) {
-        __ CompareMap(value_reg, it.Current());
-        it.Advance();
-        if (it.Done()) {
-          __ j(not_equal, miss_label);
-          break;
-        }
-        __ j(equal, &do_store, Label::kNear);
-      }
-      __ bind(&do_store);
-    }
-  } else if (representation.IsDouble()) {
-    // Load the double storage.
-    if (index.is_inobject()) {
-      __ movp(scratch1, FieldOperand(receiver_reg, index.offset()));
-    } else {
-      __ movp(scratch1,
-              FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
-      __ movp(scratch1, FieldOperand(scratch1, index.offset()));
-    }
-
-    // Store the value into the storage.
-    Label do_store, heap_number;
-    __ JumpIfNotSmi(value_reg, &heap_number);
-    __ SmiToInteger32(scratch2, value_reg);
-    __ Cvtlsi2sd(xmm0, scratch2);
-    __ jmp(&do_store);
-
-    __ bind(&heap_number);
-    __ CheckMap(value_reg, masm->isolate()->factory()->heap_number_map(),
-                miss_label, DONT_DO_SMI_CHECK);
-    __ movsd(xmm0, FieldOperand(value_reg, HeapNumber::kValueOffset));
-    __ bind(&do_store);
-    __ movsd(FieldOperand(scratch1, HeapNumber::kValueOffset), xmm0);
-    // Return the value (register rax).
-    ASSERT(value_reg.is(rax));
-    __ ret(0);
-    return;
-  }
-
-  // TODO(verwaest): Share this code as a code stub.
-  SmiCheck smi_check = representation.IsTagged()
-      ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
-  if (index.is_inobject()) {
-    // Set the property straight into the object.
-    __ movp(FieldOperand(receiver_reg, index.offset()), value_reg);
-
-    if (!representation.IsSmi()) {
-      // Update the write barrier for the array address.
-      // Pass the value being stored in the now unused name_reg.
-      __ movp(name_reg, value_reg);
-      __ RecordWriteField(
-          receiver_reg, index.offset(), name_reg, scratch1, kDontSaveFPRegs,
-          EMIT_REMEMBERED_SET, smi_check);
-    }
-  } else {
-    // Write to the properties array.
-    // Get the properties array (optimistically).
-    __ movp(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
-    __ movp(FieldOperand(scratch1, index.offset()), value_reg);
-
-    if (!representation.IsSmi()) {
-      // Update the write barrier for the array address.
-      // Pass the value being stored in the now unused name_reg.
-      __ movp(name_reg, value_reg);
-      __ RecordWriteField(
-          scratch1, index.offset(), name_reg, receiver_reg, kDontSaveFPRegs,
-          EMIT_REMEMBERED_SET, smi_check);
-    }
-  }
-
-  // Return the value (register rax).
-  ASSERT(value_reg.is(rax));
-  __ ret(0);
-}
-
-
-void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) {
-  __ jmp(code, RelocInfo::CODE_TARGET);
-}
-
-
-#undef __
-#define __ ACCESS_MASM((masm()))
-
-
-Register StubCompiler::CheckPrototypes(Handle<HeapType> type,
-                                       Register object_reg,
-                                       Handle<JSObject> holder,
-                                       Register holder_reg,
-                                       Register scratch1,
-                                       Register scratch2,
-                                       Handle<Name> name,
-                                       Label* miss,
-                                       PrototypeCheckType check) {
-  Handle<Map> receiver_map(IC::TypeToMap(*type, isolate()));
-
-  // Make sure there's no overlap between holder and object registers.
-  ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
-  ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
-         && !scratch2.is(scratch1));
-
-  // Keep track of the current object in register reg.  On the first
-  // iteration, reg is an alias for object_reg, on later iterations,
-  // it is an alias for holder_reg.
-  Register reg = object_reg;
-  int depth = 0;
-
-  Handle<JSObject> current = Handle<JSObject>::null();
-  if (type->IsConstant()) {
-    current = Handle<JSObject>::cast(type->AsConstant()->Value());
-  }
-  Handle<JSObject> prototype = Handle<JSObject>::null();
-  Handle<Map> current_map = receiver_map;
-  Handle<Map> holder_map(holder->map());
-  // Traverse the prototype chain and check the maps in the prototype chain for
-  // fast and global objects or do negative lookup for normal objects.
-  while (!current_map.is_identical_to(holder_map)) {
-    ++depth;
-
-    // Only global objects and objects that do not require access
-    // checks are allowed in stubs.
-    ASSERT(current_map->IsJSGlobalProxyMap() ||
-           !current_map->is_access_check_needed());
-
-    prototype = handle(JSObject::cast(current_map->prototype()));
-    if (current_map->is_dictionary_map() &&
-        !current_map->IsJSGlobalObjectMap() &&
-        !current_map->IsJSGlobalProxyMap()) {
-      if (!name->IsUniqueName()) {
-        ASSERT(name->IsString());
-        name = factory()->InternalizeString(Handle<String>::cast(name));
-      }
-      ASSERT(current.is_null() ||
-             current->property_dictionary()->FindEntry(name) ==
-             NameDictionary::kNotFound);
-
-      GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
-                                       scratch1, scratch2);
-
-      __ movp(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
-      reg = holder_reg;  // From now on the object will be in holder_reg.
-      __ movp(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
-    } else {
-      bool in_new_space = heap()->InNewSpace(*prototype);
-      if (in_new_space) {
-        // Save the map in scratch1 for later.
-        __ movp(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
-      }
-      if (depth != 1 || check == CHECK_ALL_MAPS) {
-        __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK);
-      }
-
-      // Check access rights to the global object.  This has to happen after
-      // the map check so that we know that the object is actually a global
-      // object.
-      if (current_map->IsJSGlobalProxyMap()) {
-        __ CheckAccessGlobalProxy(reg, scratch2, miss);
-      } else if (current_map->IsJSGlobalObjectMap()) {
-        GenerateCheckPropertyCell(
-            masm(), Handle<JSGlobalObject>::cast(current), name,
-            scratch2, miss);
-      }
-      reg = holder_reg;  // From now on the object will be in holder_reg.
-
-      if (in_new_space) {
-        // The prototype is in new space; we cannot store a reference to it
-        // in the code.  Load it from the map.
-        __ movp(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
-      } else {
-        // The prototype is in old space; load it directly.
-        __ Move(reg, prototype);
-      }
-    }
-
-    // Go to the next object in the prototype chain.
-    current = prototype;
-    current_map = handle(current->map());
-  }
-
-  // Log the check depth.
-  LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
-
-  if (depth != 0 || check == CHECK_ALL_MAPS) {
-    // Check the holder map.
-    __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK);
-  }
-
-  // Perform security check for access to the global object.
-  ASSERT(current_map->IsJSGlobalProxyMap() ||
-         !current_map->is_access_check_needed());
-  if (current_map->IsJSGlobalProxyMap()) {
-    __ CheckAccessGlobalProxy(reg, scratch1, miss);
-  }
-
-  // Return the register containing the holder.
-  return reg;
-}
-
-
-void LoadStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
-  if (!miss->is_unused()) {
-    Label success;
-    __ jmp(&success);
-    __ bind(miss);
-    TailCallBuiltin(masm(), MissBuiltin(kind()));
-    __ bind(&success);
-  }
-}
-
-
-void StoreStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
-  if (!miss->is_unused()) {
-    Label success;
-    __ jmp(&success);
-    GenerateRestoreName(masm(), miss, name);
-    TailCallBuiltin(masm(), MissBuiltin(kind()));
-    __ bind(&success);
-  }
-}
-
-
-Register LoadStubCompiler::CallbackHandlerFrontend(
-    Handle<HeapType> type,
-    Register object_reg,
-    Handle<JSObject> holder,
-    Handle<Name> name,
-    Handle<Object> callback) {
-  Label miss;
-
-  Register reg = HandlerFrontendHeader(type, object_reg, holder, name, &miss);
-
-  if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
-    ASSERT(!reg.is(scratch2()));
-    ASSERT(!reg.is(scratch3()));
-    ASSERT(!reg.is(scratch4()));
-
-    // Load the properties dictionary.
-    Register dictionary = scratch4();
-    __ movp(dictionary, FieldOperand(reg, JSObject::kPropertiesOffset));
-
-    // Probe the dictionary.
-    Label probe_done;
-    NameDictionaryLookupStub::GeneratePositiveLookup(masm(),
-                                                     &miss,
-                                                     &probe_done,
-                                                     dictionary,
-                                                     this->name(),
-                                                     scratch2(),
-                                                     scratch3());
-    __ bind(&probe_done);
-
-    // If probing finds an entry in the dictionary, scratch3 contains the
-    // index into the dictionary. Check that the value is the callback.
-    Register index = scratch3();
-    const int kElementsStartOffset =
-        NameDictionary::kHeaderSize +
-        NameDictionary::kElementsStartIndex * kPointerSize;
-    const int kValueOffset = kElementsStartOffset + kPointerSize;
-    __ movp(scratch2(),
-            Operand(dictionary, index, times_pointer_size,
-                    kValueOffset - kHeapObjectTag));
-    __ Move(scratch3(), callback, RelocInfo::EMBEDDED_OBJECT);
-    __ cmpp(scratch2(), scratch3());
-    __ j(not_equal, &miss);
-  }
-
-  HandlerFrontendFooter(name, &miss);
-  return reg;
-}
-
-
-void LoadStubCompiler::GenerateLoadField(Register reg,
-                                         Handle<JSObject> holder,
-                                         FieldIndex field,
-                                         Representation representation) {
-  if (!reg.is(receiver())) __ movp(receiver(), reg);
-  if (kind() == Code::LOAD_IC) {
-    LoadFieldStub stub(isolate(), field);
-    GenerateTailCall(masm(), stub.GetCode());
-  } else {
-    KeyedLoadFieldStub stub(isolate(), field);
-    GenerateTailCall(masm(), stub.GetCode());
-  }
-}
-
-
-void LoadStubCompiler::GenerateLoadCallback(
-    Register reg,
-    Handle<ExecutableAccessorInfo> callback) {
-  // Insert additional parameters into the stack frame above return address.
-  ASSERT(!scratch4().is(reg));
-  __ PopReturnAddressTo(scratch4());
-
-  STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
-  STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
-  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
-  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
-  STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
-  STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
-  STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6);
-  __ Push(receiver());  // receiver
-  if (heap()->InNewSpace(callback->data())) {
-    ASSERT(!scratch2().is(reg));
-    __ Move(scratch2(), callback);
-    __ Push(FieldOperand(scratch2(),
-                         ExecutableAccessorInfo::kDataOffset));  // data
-  } else {
-    __ Push(Handle<Object>(callback->data(), isolate()));
-  }
-  ASSERT(!kScratchRegister.is(reg));
-  __ LoadRoot(kScratchRegister, Heap::kUndefinedValueRootIndex);
-  __ Push(kScratchRegister);  // return value
-  __ Push(kScratchRegister);  // return value default
-  __ PushAddress(ExternalReference::isolate_address(isolate()));
-  __ Push(reg);  // holder
-  __ Push(name());  // name
-  // Save a pointer to where we pushed the arguments pointer.  This will be
-  // passed as the const PropertyAccessorInfo& to the C++ callback.
-
-  __ PushReturnAddressFrom(scratch4());
-
-  // Abi for CallApiGetter
-  Register api_function_address = r8;
-  Address getter_address = v8::ToCData<Address>(callback->getter());
-  __ Move(api_function_address, getter_address, RelocInfo::EXTERNAL_REFERENCE);
-
-  CallApiGetterStub stub(isolate());
-  __ TailCallStub(&stub);
-}
-
-
-void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
-  // Return the constant value.
-  __ Move(rax, value);
-  __ ret(0);
-}
-
-
-void LoadStubCompiler::GenerateLoadInterceptor(
-    Register holder_reg,
-    Handle<Object> object,
-    Handle<JSObject> interceptor_holder,
-    LookupResult* lookup,
-    Handle<Name> name) {
-  ASSERT(interceptor_holder->HasNamedInterceptor());
-  ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
-
-  // So far the most popular follow ups for interceptor loads are FIELD
-  // and CALLBACKS, so inline only them, other cases may be added
-  // later.
-  bool compile_followup_inline = false;
-  if (lookup->IsFound() && lookup->IsCacheable()) {
-    if (lookup->IsField()) {
-      compile_followup_inline = true;
-    } else if (lookup->type() == CALLBACKS &&
-               lookup->GetCallbackObject()->IsExecutableAccessorInfo()) {
-      ExecutableAccessorInfo* callback =
-          ExecutableAccessorInfo::cast(lookup->GetCallbackObject());
-      compile_followup_inline = callback->getter() != NULL &&
-          callback->IsCompatibleReceiver(*object);
-    }
-  }
-
-  if (compile_followup_inline) {
-    // Compile the interceptor call, followed by inline code to load the
-    // property from further up the prototype chain if the call fails.
-    // Check that the maps haven't changed.
-    ASSERT(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
-
-    // Preserve the receiver register explicitly whenever it is different from
-    // the holder and it is needed should the interceptor return without any
-    // result. The CALLBACKS case needs the receiver to be passed into C++ code,
-    // the FIELD case might cause a miss during the prototype check.
-    bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder();
-    bool must_preserve_receiver_reg = !receiver().is(holder_reg) &&
-        (lookup->type() == CALLBACKS || must_perfrom_prototype_check);
-
-    // Save necessary data before invoking an interceptor.
-    // Requires a frame to make GC aware of pushed pointers.
-    {
-      FrameScope frame_scope(masm(), StackFrame::INTERNAL);
-
-      if (must_preserve_receiver_reg) {
-        __ Push(receiver());
-      }
-      __ Push(holder_reg);
-      __ Push(this->name());
-
-      // Invoke an interceptor.  Note: map checks from receiver to
-      // interceptor's holder has been compiled before (see a caller
-      // of this method.)
-      CompileCallLoadPropertyWithInterceptor(
-          masm(), receiver(), holder_reg, this->name(), interceptor_holder,
-          IC::kLoadPropertyWithInterceptorOnly);
-
-      // Check if interceptor provided a value for property.  If it's
-      // the case, return immediately.
-      Label interceptor_failed;
-      __ CompareRoot(rax, Heap::kNoInterceptorResultSentinelRootIndex);
-      __ j(equal, &interceptor_failed);
-      frame_scope.GenerateLeaveFrame();
-      __ ret(0);
-
-      __ bind(&interceptor_failed);
-      __ Pop(this->name());
-      __ Pop(holder_reg);
-      if (must_preserve_receiver_reg) {
-        __ Pop(receiver());
-      }
-
-      // Leave the internal frame.
-    }
-
-    GenerateLoadPostInterceptor(holder_reg, interceptor_holder, name, lookup);
-  } else {  // !compile_followup_inline
-    // Call the runtime system to load the interceptor.
-    // Check that the maps haven't changed.
-    __ PopReturnAddressTo(scratch2());
-    PushInterceptorArguments(masm(), receiver(), holder_reg,
-                             this->name(), interceptor_holder);
-    __ PushReturnAddressFrom(scratch2());
-
-    ExternalReference ref = ExternalReference(
-        IC_Utility(IC::kLoadPropertyWithInterceptor), isolate());
-    __ TailCallExternalReference(ref, StubCache::kInterceptorArgsLength, 1);
-  }
-}
-
-
-Handle<Code> StoreStubCompiler::CompileStoreCallback(
-    Handle<JSObject> object,
-    Handle<JSObject> holder,
-    Handle<Name> name,
-    Handle<ExecutableAccessorInfo> callback) {
-  Register holder_reg = HandlerFrontend(
-      IC::CurrentTypeOf(object, isolate()), receiver(), holder, name);
-
-  __ PopReturnAddressTo(scratch1());
-  __ Push(receiver());
-  __ Push(holder_reg);
-  __ Push(callback);  // callback info
-  __ Push(name);
-  __ Push(value());
-  __ PushReturnAddressFrom(scratch1());
-
-  // Do tail-call to the runtime system.
-  ExternalReference store_callback_property =
-      ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
-  __ TailCallExternalReference(store_callback_property, 5, 1);
-
-  // Return the generated code.
-  return GetCode(kind(), Code::FAST, name);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void StoreStubCompiler::GenerateStoreViaSetter(
-    MacroAssembler* masm,
-    Handle<HeapType> type,
-    Register receiver,
-    Handle<JSFunction> setter) {
-  // ----------- S t a t e -------------
-  //  -- rsp[0] : return address
-  // -----------------------------------
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-
-    // Save value register, so we can restore it later.
-    __ Push(value());
-
-    if (!setter.is_null()) {
-      // Call the JavaScript setter with receiver and value on the stack.
-      if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
-        // Swap in the global receiver.
-        __ movp(receiver,
-                FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
-      }
-      __ Push(receiver);
-      __ Push(value());
-      ParameterCount actual(1);
-      ParameterCount expected(setter);
-      __ InvokeFunction(setter, expected, actual,
-                        CALL_FUNCTION, NullCallWrapper());
-    } else {
-      // If we generate a global code snippet for deoptimization only, remember
-      // the place to continue after deoptimization.
-      masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
-    }
-
-    // We have to return the passed value, not the return value of the setter.
-    __ Pop(rax);
-
-    // Restore context register.
-    __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-  }
-  __ ret(0);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
-    Handle<JSObject> object,
-    Handle<Name> name) {
-  __ PopReturnAddressTo(scratch1());
-  __ Push(receiver());
-  __ Push(this->name());
-  __ Push(value());
-  __ PushReturnAddressFrom(scratch1());
-
-  // Do tail-call to the runtime system.
-  ExternalReference store_ic_property =
-      ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate());
-  __ TailCallExternalReference(store_ic_property, 3, 1);
-
-  // Return the generated code.
-  return GetCode(kind(), Code::FAST, name);
-}
-
-
-void StoreStubCompiler::GenerateStoreArrayLength() {
-  // Prepare tail call to StoreIC_ArrayLength.
-  __ PopReturnAddressTo(scratch1());
-  __ Push(receiver());
-  __ Push(value());
-  __ PushReturnAddressFrom(scratch1());
-
-  ExternalReference ref =
-      ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength),
-                        masm()->isolate());
-  __ TailCallExternalReference(ref, 2, 1);
-}
-
-
-Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
-    MapHandleList* receiver_maps,
-    CodeHandleList* handler_stubs,
-    MapHandleList* transitioned_maps) {
-  Label miss;
-  __ JumpIfSmi(receiver(), &miss, Label::kNear);
-
-  __ movp(scratch1(), FieldOperand(receiver(), HeapObject::kMapOffset));
-  int receiver_count = receiver_maps->length();
-  for (int i = 0; i < receiver_count; ++i) {
-    // Check map and tail call if there's a match
-    __ Cmp(scratch1(), receiver_maps->at(i));
-    if (transitioned_maps->at(i).is_null()) {
-      __ j(equal, handler_stubs->at(i), RelocInfo::CODE_TARGET);
-    } else {
-      Label next_map;
-      __ j(not_equal, &next_map, Label::kNear);
-      __ Move(transition_map(),
-              transitioned_maps->at(i),
-              RelocInfo::EMBEDDED_OBJECT);
-      __ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET);
-      __ bind(&next_map);
-    }
-  }
-
-  __ bind(&miss);
-
-  TailCallBuiltin(masm(), MissBuiltin(kind()));
-
-  // Return the generated code.
-  return GetICCode(
-      kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
-}
-
-
-Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<HeapType> type,
-                                                      Handle<JSObject> last,
-                                                      Handle<Name> name) {
-  NonexistentHandlerFrontend(type, last, name);
-
-  // Return undefined if maps of the full prototype chain are still the
-  // same and no global property with this name contains a value.
-  __ LoadRoot(rax, Heap::kUndefinedValueRootIndex);
-  __ ret(0);
-
-  // Return the generated code.
-  return GetCode(kind(), Code::FAST, name);
-}
-
-
-Register* LoadStubCompiler::registers() {
-  // receiver, name, scratch1, scratch2, scratch3, scratch4.
-  static Register registers[] = { rax, rcx, rdx, rbx, rdi, r8 };
-  return registers;
-}
-
-
-Register* KeyedLoadStubCompiler::registers() {
-  // receiver, name, scratch1, scratch2, scratch3, scratch4.
-  static Register registers[] = { rdx, rax, rbx, rcx, rdi, r8 };
-  return registers;
-}
-
-
-Register StoreStubCompiler::value() {
-  return rax;
-}
-
-
-Register* StoreStubCompiler::registers() {
-  // receiver, name, scratch1, scratch2, scratch3.
-  static Register registers[] = { rdx, rcx, rbx, rdi, r8 };
-  return registers;
-}
-
-
-Register* KeyedStoreStubCompiler::registers() {
-  // receiver, name, scratch1, scratch2, scratch3.
-  static Register registers[] = { rdx, rcx, rbx, rdi, r8 };
-  return registers;
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
-                                             Handle<HeapType> type,
-                                             Register receiver,
-                                             Handle<JSFunction> getter) {
-  // ----------- S t a t e -------------
-  //  -- rax    : receiver
-  //  -- rcx    : name
-  //  -- rsp[0] : return address
-  // -----------------------------------
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-
-    if (!getter.is_null()) {
-      // Call the JavaScript getter with the receiver on the stack.
-      if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
-        // Swap in the global receiver.
-        __ movp(receiver,
-                FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
-      }
-      __ Push(receiver);
-      ParameterCount actual(0);
-      ParameterCount expected(getter);
-      __ InvokeFunction(getter, expected, actual,
-                        CALL_FUNCTION, NullCallWrapper());
-    } else {
-      // If we generate a global code snippet for deoptimization only, remember
-      // the place to continue after deoptimization.
-      masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
-    }
-
-    // Restore context register.
-    __ movp(rsi, Operand(rbp, StandardFrameConstants::kContextOffset));
-  }
-  __ ret(0);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-Handle<Code> LoadStubCompiler::CompileLoadGlobal(
-    Handle<HeapType> type,
-    Handle<GlobalObject> global,
-    Handle<PropertyCell> cell,
-    Handle<Name> name,
-    bool is_dont_delete) {
-  Label miss;
-  // TODO(verwaest): Directly store to rax. Currently we cannot do this, since
-  // rax is used as receiver(), which we would otherwise clobber before a
-  // potential miss.
-  HandlerFrontendHeader(type, receiver(), global, name, &miss);
-
-  // Get the value from the cell.
-  __ Move(rbx, cell);
-  __ movp(rbx, FieldOperand(rbx, PropertyCell::kValueOffset));
-
-  // Check for deleted property if property can actually be deleted.
-  if (!is_dont_delete) {
-    __ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
-    __ j(equal, &miss);
-  } else if (FLAG_debug_code) {
-    __ CompareRoot(rbx, Heap::kTheHoleValueRootIndex);
-    __ Check(not_equal, kDontDeleteCellsCannotContainTheHole);
-  }
-
-  Counters* counters = isolate()->counters();
-  __ IncrementCounter(counters->named_load_global_stub(), 1);
-  __ movp(rax, rbx);
-  __ ret(0);
-
-  HandlerFrontendFooter(name, &miss);
-
-  // Return the generated code.
-  return GetCode(kind(), Code::NORMAL, name);
-}
-
-
-Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
-    TypeHandleList* types,
-    CodeHandleList* handlers,
-    Handle<Name> name,
-    Code::StubType type,
-    IcCheckType check) {
-  Label miss;
-
-  if (check == PROPERTY &&
-      (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
-    __ Cmp(this->name(), name);
-    __ j(not_equal, &miss);
-  }
-
-  Label number_case;
-  Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
-  __ JumpIfSmi(receiver(), smi_target);
-
-  Register map_reg = scratch1();
-  __ movp(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
-  int receiver_count = types->length();
-  int number_of_handled_maps = 0;
-  for (int current = 0; current < receiver_count; ++current) {
-    Handle<HeapType> type = types->at(current);
-    Handle<Map> map = IC::TypeToMap(*type, isolate());
-    if (!map->is_deprecated()) {
-      number_of_handled_maps++;
-      // Check map and tail call if there's a match
-      __ Cmp(map_reg, map);
-      if (type->Is(HeapType::Number())) {
-        ASSERT(!number_case.is_unused());
-        __ bind(&number_case);
-      }
-      __ j(equal, handlers->at(current), RelocInfo::CODE_TARGET);
-    }
-  }
-  ASSERT(number_of_handled_maps > 0);
-
-  __  bind(&miss);
-  TailCallBuiltin(masm(), MissBuiltin(kind()));
-
-  // Return the generated code.
-  InlineCacheState state =
-      number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
-  return GetICCode(kind(), type, name, state);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
-    MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- rax    : key
-  //  -- rdx    : receiver
-  //  -- rsp[0] : return address
-  // -----------------------------------
-  Label slow, miss;
-
-  // This stub is meant to be tail-jumped to, the receiver must already
-  // have been verified by the caller to not be a smi.
-
-  __ JumpIfNotSmi(rax, &miss);
-  __ SmiToInteger32(rbx, rax);
-  __ movp(rcx, FieldOperand(rdx, JSObject::kElementsOffset));
-
-  // Check whether the elements is a number dictionary.
-  // rdx: receiver
-  // rax: key
-  // rbx: key as untagged int32
-  // rcx: elements
-  __ LoadFromNumberDictionary(&slow, rcx, rax, rbx, r9, rdi, rax);
-  __ ret(0);
-
-  __ bind(&slow);
-  // ----------- S t a t e -------------
-  //  -- rax    : key
-  //  -- rdx    : receiver
-  //  -- rsp[0] : return address
-  // -----------------------------------
-  TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow);
-
-  __ bind(&miss);
-  // ----------- S t a t e -------------
-  //  -- rax    : key
-  //  -- rdx    : receiver
-  //  -- rsp[0] : return address
-  // -----------------------------------
-  TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss);
-}
-
-
-#undef __
-
-} }  // namespace v8::internal
-
-#endif  // V8_TARGET_ARCH_X64
diff --git a/src/x87/assembler-x87-inl.h b/src/x87/assembler-x87-inl.h
index 4a5583c..6555ccd 100644
--- a/src/x87/assembler-x87-inl.h
+++ b/src/x87/assembler-x87-inl.h
@@ -39,13 +39,13 @@
 
 #include "src/x87/assembler-x87.h"
 
-#include "src/cpu.h"
+#include "src/assembler.h"
 #include "src/debug.h"
 
 namespace v8 {
 namespace internal {
 
-bool CpuFeatures::SupportsCrankshaft() { return false; }
+bool CpuFeatures::SupportsCrankshaft() { return true; }
 
 
 static const byte kCallOpcode = 0xE8;
@@ -58,42 +58,42 @@
   if (IsRuntimeEntry(rmode_) || IsCodeTarget(rmode_)) {
     int32_t* p = reinterpret_cast<int32_t*>(pc_);
     *p -= delta;  // Relocate entry.
-    if (flush_icache) CPU::FlushICache(p, sizeof(uint32_t));
+    if (flush_icache) CpuFeatures::FlushICache(p, sizeof(uint32_t));
   } else if (rmode_ == CODE_AGE_SEQUENCE) {
     if (*pc_ == kCallOpcode) {
       int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
       *p -= delta;  // Relocate entry.
-      if (flush_icache) CPU::FlushICache(p, sizeof(uint32_t));
+      if (flush_icache) CpuFeatures::FlushICache(p, sizeof(uint32_t));
     }
   } else if (rmode_ == JS_RETURN && IsPatchedReturnSequence()) {
     // Special handling of js_return when a break point is set (call
     // instruction has been inserted).
     int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
     *p -= delta;  // Relocate entry.
-    if (flush_icache) CPU::FlushICache(p, sizeof(uint32_t));
+    if (flush_icache) CpuFeatures::FlushICache(p, sizeof(uint32_t));
   } else if (rmode_ == DEBUG_BREAK_SLOT && IsPatchedDebugBreakSlotSequence()) {
     // Special handling of a debug break slot when a break point is set (call
     // instruction has been inserted).
     int32_t* p = reinterpret_cast<int32_t*>(pc_ + 1);
     *p -= delta;  // Relocate entry.
-    if (flush_icache) CPU::FlushICache(p, sizeof(uint32_t));
+    if (flush_icache) CpuFeatures::FlushICache(p, sizeof(uint32_t));
   } else if (IsInternalReference(rmode_)) {
     // absolute code pointer inside code object moves with the code object.
     int32_t* p = reinterpret_cast<int32_t*>(pc_);
     *p += delta;  // Relocate entry.
-    if (flush_icache) CPU::FlushICache(p, sizeof(uint32_t));
+    if (flush_icache) CpuFeatures::FlushICache(p, sizeof(uint32_t));
   }
 }
 
 
 Address RelocInfo::target_address() {
-  ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
+  DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
   return Assembler::target_address_at(pc_, host_);
 }
 
 
 Address RelocInfo::target_address_address() {
-  ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
+  DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_)
                               || rmode_ == EMBEDDED_OBJECT
                               || rmode_ == EXTERNAL_REFERENCE);
   return reinterpret_cast<Address>(pc_);
@@ -116,7 +116,7 @@
                                    ICacheFlushMode icache_flush_mode) {
   Assembler::set_target_address_at(pc_, host_, target, icache_flush_mode);
   Assembler::set_target_address_at(pc_, host_, target);
-  ASSERT(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
+  DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
   if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL &&
       IsCodeTarget(rmode_)) {
     Object* target_code = Code::GetCodeFromTargetAddress(target);
@@ -127,13 +127,13 @@
 
 
 Object* RelocInfo::target_object() {
-  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+  DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
   return Memory::Object_at(pc_);
 }
 
 
 Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
-  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+  DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
   return Memory::Object_Handle_at(pc_);
 }
 
@@ -141,11 +141,10 @@
 void RelocInfo::set_target_object(Object* target,
                                   WriteBarrierMode write_barrier_mode,
                                   ICacheFlushMode icache_flush_mode) {
-  ASSERT(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
-  ASSERT(!target->IsConsString());
+  DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
   Memory::Object_at(pc_) = target;
   if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
-    CPU::FlushICache(pc_, sizeof(Address));
+    CpuFeatures::FlushICache(pc_, sizeof(Address));
   }
   if (write_barrier_mode == UPDATE_WRITE_BARRIER &&
       host() != NULL &&
@@ -157,13 +156,13 @@
 
 
 Address RelocInfo::target_reference() {
-  ASSERT(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
+  DCHECK(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
   return Memory::Address_at(pc_);
 }
 
 
 Address RelocInfo::target_runtime_entry(Assembler* origin) {
-  ASSERT(IsRuntimeEntry(rmode_));
+  DCHECK(IsRuntimeEntry(rmode_));
   return reinterpret_cast<Address>(*reinterpret_cast<int32_t*>(pc_));
 }
 
@@ -171,7 +170,7 @@
 void RelocInfo::set_target_runtime_entry(Address target,
                                          WriteBarrierMode write_barrier_mode,
                                          ICacheFlushMode icache_flush_mode) {
-  ASSERT(IsRuntimeEntry(rmode_));
+  DCHECK(IsRuntimeEntry(rmode_));
   if (target_address() != target) {
     set_target_address(target, write_barrier_mode, icache_flush_mode);
   }
@@ -179,14 +178,14 @@
 
 
 Handle<Cell> RelocInfo::target_cell_handle() {
-  ASSERT(rmode_ == RelocInfo::CELL);
+  DCHECK(rmode_ == RelocInfo::CELL);
   Address address = Memory::Address_at(pc_);
   return Handle<Cell>(reinterpret_cast<Cell**>(address));
 }
 
 
 Cell* RelocInfo::target_cell() {
-  ASSERT(rmode_ == RelocInfo::CELL);
+  DCHECK(rmode_ == RelocInfo::CELL);
   return Cell::FromValueAddress(Memory::Address_at(pc_));
 }
 
@@ -194,11 +193,11 @@
 void RelocInfo::set_target_cell(Cell* cell,
                                 WriteBarrierMode write_barrier_mode,
                                 ICacheFlushMode icache_flush_mode) {
-  ASSERT(rmode_ == RelocInfo::CELL);
+  DCHECK(rmode_ == RelocInfo::CELL);
   Address address = cell->address() + Cell::kValueOffset;
   Memory::Address_at(pc_) = address;
   if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
-    CPU::FlushICache(pc_, sizeof(Address));
+    CpuFeatures::FlushICache(pc_, sizeof(Address));
   }
   if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
     // TODO(1550) We are passing NULL as a slot because cell can never be on
@@ -210,15 +209,15 @@
 
 
 Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
-  ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
-  ASSERT(*pc_ == kCallOpcode);
+  DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+  DCHECK(*pc_ == kCallOpcode);
   return Memory::Object_Handle_at(pc_ + 1);
 }
 
 
 Code* RelocInfo::code_age_stub() {
-  ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
-  ASSERT(*pc_ == kCallOpcode);
+  DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+  DCHECK(*pc_ == kCallOpcode);
   return Code::GetCodeFromTargetAddress(
       Assembler::target_address_at(pc_ + 1, host_));
 }
@@ -226,22 +225,22 @@
 
 void RelocInfo::set_code_age_stub(Code* stub,
                                   ICacheFlushMode icache_flush_mode) {
-  ASSERT(*pc_ == kCallOpcode);
-  ASSERT(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+  DCHECK(*pc_ == kCallOpcode);
+  DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
   Assembler::set_target_address_at(pc_ + 1, host_, stub->instruction_start(),
                                    icache_flush_mode);
 }
 
 
 Address RelocInfo::call_address() {
-  ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+  DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
          (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
   return Assembler::target_address_at(pc_ + 1, host_);
 }
 
 
 void RelocInfo::set_call_address(Address target) {
-  ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+  DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
          (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
   Assembler::set_target_address_at(pc_ + 1, host_, target);
   if (host() != NULL) {
@@ -263,7 +262,7 @@
 
 
 Object** RelocInfo::call_object_address() {
-  ASSERT((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+  DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
          (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
   return reinterpret_cast<Object**>(pc_ + 1);
 }
@@ -295,14 +294,14 @@
   RelocInfo::Mode mode = rmode();
   if (mode == RelocInfo::EMBEDDED_OBJECT) {
     visitor->VisitEmbeddedPointer(this);
-    CPU::FlushICache(pc_, sizeof(Address));
+    CpuFeatures::FlushICache(pc_, sizeof(Address));
   } else if (RelocInfo::IsCodeTarget(mode)) {
     visitor->VisitCodeTarget(this);
   } else if (mode == RelocInfo::CELL) {
     visitor->VisitCell(this);
   } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
     visitor->VisitExternalReference(this);
-    CPU::FlushICache(pc_, sizeof(Address));
+    CpuFeatures::FlushICache(pc_, sizeof(Address));
   } else if (RelocInfo::IsCodeAgeSequence(mode)) {
     visitor->VisitCodeAgeSequence(this);
   } else if (((RelocInfo::IsJSReturn(mode) &&
@@ -322,14 +321,14 @@
   RelocInfo::Mode mode = rmode();
   if (mode == RelocInfo::EMBEDDED_OBJECT) {
     StaticVisitor::VisitEmbeddedPointer(heap, this);
-    CPU::FlushICache(pc_, sizeof(Address));
+    CpuFeatures::FlushICache(pc_, sizeof(Address));
   } else if (RelocInfo::IsCodeTarget(mode)) {
     StaticVisitor::VisitCodeTarget(heap, this);
   } else if (mode == RelocInfo::CELL) {
     StaticVisitor::VisitCell(heap, this);
   } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
     StaticVisitor::VisitExternalReference(this);
-    CPU::FlushICache(pc_, sizeof(Address));
+    CpuFeatures::FlushICache(pc_, sizeof(Address));
   } else if (RelocInfo::IsCodeAgeSequence(mode)) {
     StaticVisitor::VisitCodeAgeSequence(heap, this);
   } else if (heap->isolate()->debug()->has_break_points() &&
@@ -368,7 +367,7 @@
   // Verify all Objects referred by code are NOT in new space.
   Object* obj = *handle;
   if (obj->IsHeapObject()) {
-    ASSERT(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
+    DCHECK(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
     x_ = reinterpret_cast<intptr_t>(handle.location());
     rmode_ = RelocInfo::EMBEDDED_OBJECT;
   } else {
@@ -401,7 +400,7 @@
   AllowDeferredHandleDereference heap_object_check;
   // Verify all Objects referred by code are NOT in new space.
   Object* obj = *handle;
-  ASSERT(!isolate()->heap()->InNewSpace(obj));
+  DCHECK(!isolate()->heap()->InNewSpace(obj));
   if (obj->IsHeapObject()) {
     emit(reinterpret_cast<intptr_t>(handle.location()),
          RelocInfo::EMBEDDED_OBJECT);
@@ -454,7 +453,7 @@
 
 
 void Assembler::emit_w(const Immediate& x) {
-  ASSERT(RelocInfo::IsNone(x.rmode_));
+  DCHECK(RelocInfo::IsNone(x.rmode_));
   uint16_t value = static_cast<uint16_t>(x.x_);
   reinterpret_cast<uint16_t*>(pc_)[0] = value;
   pc_ += sizeof(uint16_t);
@@ -474,7 +473,7 @@
   int32_t* p = reinterpret_cast<int32_t*>(pc);
   *p = target - (pc + sizeof(int32_t));
   if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
-    CPU::FlushICache(p, sizeof(int32_t));
+    CpuFeatures::FlushICache(p, sizeof(int32_t));
   }
 }
 
@@ -484,6 +483,11 @@
 }
 
 
+Address Assembler::break_address_from_return_address(Address pc) {
+  return pc - Assembler::kPatchDebugBreakSlotReturnOffset;
+}
+
+
 Displacement Assembler::disp_at(Label* L) {
   return Displacement(long_at(L->pos()));
 }
@@ -505,7 +509,7 @@
   byte disp = 0x00;
   if (L->is_near_linked()) {
     int offset = L->near_link_pos() - pc_offset();
-    ASSERT(is_int8(offset));
+    DCHECK(is_int8(offset));
     disp = static_cast<byte>(offset & 0xFF);
   }
   L->link_to(pc_offset(), Label::kNear);
@@ -514,30 +518,30 @@
 
 
 void Operand::set_modrm(int mod, Register rm) {
-  ASSERT((mod & -4) == 0);
+  DCHECK((mod & -4) == 0);
   buf_[0] = mod << 6 | rm.code();
   len_ = 1;
 }
 
 
 void Operand::set_sib(ScaleFactor scale, Register index, Register base) {
-  ASSERT(len_ == 1);
-  ASSERT((scale & -4) == 0);
+  DCHECK(len_ == 1);
+  DCHECK((scale & -4) == 0);
   // Use SIB with no index register only for base esp.
-  ASSERT(!index.is(esp) || base.is(esp));
+  DCHECK(!index.is(esp) || base.is(esp));
   buf_[1] = scale << 6 | index.code() << 3 | base.code();
   len_ = 2;
 }
 
 
 void Operand::set_disp8(int8_t disp) {
-  ASSERT(len_ == 1 || len_ == 2);
+  DCHECK(len_ == 1 || len_ == 2);
   *reinterpret_cast<int8_t*>(&buf_[len_++]) = disp;
 }
 
 
 void Operand::set_dispr(int32_t disp, RelocInfo::Mode rmode) {
-  ASSERT(len_ == 1 || len_ == 2);
+  DCHECK(len_ == 1 || len_ == 2);
   int32_t* p = reinterpret_cast<int32_t*>(&buf_[len_]);
   *p = disp;
   len_ += sizeof(int32_t);
@@ -556,6 +560,12 @@
   set_dispr(disp, rmode);
 }
 
+
+Operand::Operand(Immediate imm) {
+  // [disp/r]
+  set_modrm(0, ebp);
+  set_dispr(imm.x_, imm.rmode_);
+}
 } }  // namespace v8::internal
 
 #endif  // V8_X87_ASSEMBLER_X87_INL_H_
diff --git a/src/x87/assembler-x87.cc b/src/x87/assembler-x87.cc
index f2082c2..9e1c883 100644
--- a/src/x87/assembler-x87.cc
+++ b/src/x87/assembler-x87.cc
@@ -38,6 +38,8 @@
 
 #if V8_TARGET_ARCH_X87
 
+#include "src/base/bits.h"
+#include "src/base/cpu.h"
 #include "src/disassembler.h"
 #include "src/macro-assembler.h"
 #include "src/serialize.h"
@@ -49,7 +51,7 @@
 // Implementation of CpuFeatures
 
 void CpuFeatures::ProbeImpl(bool cross_compile) {
-  CPU cpu;
+  base::CPU cpu;
 
   // Only use statically determined features for cross compile (snapshot).
   if (cross_compile) return;
@@ -64,14 +66,14 @@
 // Implementation of Displacement
 
 void Displacement::init(Label* L, Type type) {
-  ASSERT(!L->is_bound());
+  DCHECK(!L->is_bound());
   int next = 0;
   if (L->is_linked()) {
     next = L->pos();
-    ASSERT(next > 0);  // Displacements must be at positions > 0
+    DCHECK(next > 0);  // Displacements must be at positions > 0
   }
   // Ensure that we _never_ overflow the next field.
-  ASSERT(NextField::is_valid(Assembler::kMaximalBufferSize));
+  DCHECK(NextField::is_valid(Assembler::kMaximalBufferSize));
   data_ = NextField::encode(next) | TypeField::encode(type);
 }
 
@@ -107,7 +109,7 @@
   }
 
   // Indicate that code has changed.
-  CPU::FlushICache(pc_, instruction_count);
+  CpuFeatures::FlushICache(pc_, instruction_count);
 }
 
 
@@ -131,11 +133,11 @@
   patcher.masm()->call(target, RelocInfo::NONE32);
 
   // Check that the size of the code generated is as expected.
-  ASSERT_EQ(kCallCodeSize,
+  DCHECK_EQ(kCallCodeSize,
             patcher.masm()->SizeOfCodeGeneratedSince(&check_codesize));
 
   // Add the requested number of int3 instructions after the call.
-  ASSERT_GE(guard_bytes, 0);
+  DCHECK_GE(guard_bytes, 0);
   for (int i = 0; i < guard_bytes; i++) {
     patcher.masm()->int3();
   }
@@ -170,7 +172,7 @@
                  ScaleFactor scale,
                  int32_t disp,
                  RelocInfo::Mode rmode) {
-  ASSERT(!index.is(esp));  // illegal addressing mode
+  DCHECK(!index.is(esp));  // illegal addressing mode
   // [base + index*scale + disp/r]
   if (disp == 0 && RelocInfo::IsNone(rmode) && !base.is(ebp)) {
     // [base + index*scale]
@@ -194,7 +196,7 @@
                  ScaleFactor scale,
                  int32_t disp,
                  RelocInfo::Mode rmode) {
-  ASSERT(!index.is(esp));  // illegal addressing mode
+  DCHECK(!index.is(esp));  // illegal addressing mode
   // [index*scale + disp/r]
   set_modrm(0, esp);
   set_sib(scale, index, ebp);
@@ -214,7 +216,7 @@
 
 
 Register Operand::reg() const {
-  ASSERT(is_reg_only());
+  DCHECK(is_reg_only());
   return Register::from_code(buf_[0] & 0x07);
 }
 
@@ -254,7 +256,7 @@
 void Assembler::GetCode(CodeDesc* desc) {
   // Finalize code (at this point overflow() may be true, but the gap ensures
   // that we are still not overlapping instructions and relocation info).
-  ASSERT(pc_ <= reloc_info_writer.pos());  // No overlap.
+  DCHECK(pc_ <= reloc_info_writer.pos());  // No overlap.
   // Set up code descriptor.
   desc->buffer = buffer_;
   desc->buffer_size = buffer_size_;
@@ -265,7 +267,7 @@
 
 
 void Assembler::Align(int m) {
-  ASSERT(IsPowerOf2(m));
+  DCHECK(base::bits::IsPowerOfTwo32(m));
   int mask = m - 1;
   int addr = pc_offset();
   Nop((m - (addr & mask)) & mask);
@@ -362,7 +364,7 @@
 
 
 void Assembler::pop(Register dst) {
-  ASSERT(reloc_info_writer.last_pc() != NULL);
+  DCHECK(reloc_info_writer.last_pc() != NULL);
   EnsureSpace ensure_space(this);
   EMIT(0x58 | dst.code());
 }
@@ -566,6 +568,13 @@
 }
 
 
+void Assembler::xchg(Register dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  EMIT(0x87);
+  emit_operand(dst, src);
+}
+
+
 void Assembler::adc(Register dst, int32_t imm32) {
   EnsureSpace ensure_space(this);
   emit_arith(2, Operand(dst), Immediate(imm32));
@@ -594,7 +603,7 @@
 
 
 void Assembler::add(const Operand& dst, const Immediate& x) {
-  ASSERT(reloc_info_writer.last_pc() != NULL);
+  DCHECK(reloc_info_writer.last_pc() != NULL);
   EnsureSpace ensure_space(this);
   emit_arith(0, dst, x);
 }
@@ -660,7 +669,7 @@
 
 
 void Assembler::cmpw(const Operand& op, Immediate imm16) {
-  ASSERT(imm16.is_int16());
+  DCHECK(imm16.is_int16());
   EnsureSpace ensure_space(this);
   EMIT(0x66);
   EMIT(0x81);
@@ -749,10 +758,17 @@
 }
 
 
-void Assembler::idiv(Register src) {
+void Assembler::idiv(const Operand& src) {
   EnsureSpace ensure_space(this);
   EMIT(0xF7);
-  EMIT(0xF8 | src.code());
+  emit_operand(edi, src);
+}
+
+
+void Assembler::div(const Operand& src) {
+  EnsureSpace ensure_space(this);
+  EMIT(0xF7);
+  emit_operand(esi, src);
 }
 
 
@@ -772,14 +788,19 @@
 
 
 void Assembler::imul(Register dst, Register src, int32_t imm32) {
+  imul(dst, Operand(src), imm32);
+}
+
+
+void Assembler::imul(Register dst, const Operand& src, int32_t imm32) {
   EnsureSpace ensure_space(this);
   if (is_int8(imm32)) {
     EMIT(0x6B);
-    EMIT(0xC0 | dst.code() << 3 | src.code());
+    emit_operand(dst, src);
     EMIT(imm32);
   } else {
     EMIT(0x69);
-    EMIT(0xC0 | dst.code() << 3 | src.code());
+    emit_operand(dst, src);
     emit(imm32);
   }
 }
@@ -819,6 +840,13 @@
 }
 
 
+void Assembler::neg(const Operand& dst) {
+  EnsureSpace ensure_space(this);
+  EMIT(0xF7);
+  emit_operand(ebx, dst);
+}
+
+
 void Assembler::not_(Register dst) {
   EnsureSpace ensure_space(this);
   EMIT(0xF7);
@@ -826,6 +854,13 @@
 }
 
 
+void Assembler::not_(const Operand& dst) {
+  EnsureSpace ensure_space(this);
+  EMIT(0xF7);
+  emit_operand(edx, dst);
+}
+
+
 void Assembler::or_(Register dst, int32_t imm32) {
   EnsureSpace ensure_space(this);
   emit_arith(1, Operand(dst), Immediate(imm32));
@@ -854,7 +889,7 @@
 
 void Assembler::rcl(Register dst, uint8_t imm8) {
   EnsureSpace ensure_space(this);
-  ASSERT(is_uint5(imm8));  // illegal shift count
+  DCHECK(is_uint5(imm8));  // illegal shift count
   if (imm8 == 1) {
     EMIT(0xD1);
     EMIT(0xD0 | dst.code());
@@ -868,7 +903,7 @@
 
 void Assembler::rcr(Register dst, uint8_t imm8) {
   EnsureSpace ensure_space(this);
-  ASSERT(is_uint5(imm8));  // illegal shift count
+  DCHECK(is_uint5(imm8));  // illegal shift count
   if (imm8 == 1) {
     EMIT(0xD1);
     EMIT(0xD8 | dst.code());
@@ -882,7 +917,7 @@
 
 void Assembler::ror(Register dst, uint8_t imm8) {
   EnsureSpace ensure_space(this);
-  ASSERT(is_uint5(imm8));  // illegal shift count
+  DCHECK(is_uint5(imm8));  // illegal shift count
   if (imm8 == 1) {
     EMIT(0xD1);
     EMIT(0xC8 | dst.code());
@@ -901,24 +936,24 @@
 }
 
 
-void Assembler::sar(Register dst, uint8_t imm8) {
+void Assembler::sar(const Operand& dst, uint8_t imm8) {
   EnsureSpace ensure_space(this);
-  ASSERT(is_uint5(imm8));  // illegal shift count
+  DCHECK(is_uint5(imm8));  // illegal shift count
   if (imm8 == 1) {
     EMIT(0xD1);
-    EMIT(0xF8 | dst.code());
+    emit_operand(edi, dst);
   } else {
     EMIT(0xC1);
-    EMIT(0xF8 | dst.code());
+    emit_operand(edi, dst);
     EMIT(imm8);
   }
 }
 
 
-void Assembler::sar_cl(Register dst) {
+void Assembler::sar_cl(const Operand& dst) {
   EnsureSpace ensure_space(this);
   EMIT(0xD3);
-  EMIT(0xF8 | dst.code());
+  emit_operand(edi, dst);
 }
 
 
@@ -937,24 +972,24 @@
 }
 
 
-void Assembler::shl(Register dst, uint8_t imm8) {
+void Assembler::shl(const Operand& dst, uint8_t imm8) {
   EnsureSpace ensure_space(this);
-  ASSERT(is_uint5(imm8));  // illegal shift count
+  DCHECK(is_uint5(imm8));  // illegal shift count
   if (imm8 == 1) {
     EMIT(0xD1);
-    EMIT(0xE0 | dst.code());
+    emit_operand(esp, dst);
   } else {
     EMIT(0xC1);
-    EMIT(0xE0 | dst.code());
+    emit_operand(esp, dst);
     EMIT(imm8);
   }
 }
 
 
-void Assembler::shl_cl(Register dst) {
+void Assembler::shl_cl(const Operand& dst) {
   EnsureSpace ensure_space(this);
   EMIT(0xD3);
-  EMIT(0xE0 | dst.code());
+  emit_operand(esp, dst);
 }
 
 
@@ -966,24 +1001,24 @@
 }
 
 
-void Assembler::shr(Register dst, uint8_t imm8) {
+void Assembler::shr(const Operand& dst, uint8_t imm8) {
   EnsureSpace ensure_space(this);
-  ASSERT(is_uint5(imm8));  // illegal shift count
+  DCHECK(is_uint5(imm8));  // illegal shift count
   if (imm8 == 1) {
     EMIT(0xD1);
-    EMIT(0xE8 | dst.code());
+    emit_operand(ebp, dst);
   } else {
     EMIT(0xC1);
-    EMIT(0xE8 | dst.code());
+    emit_operand(ebp, dst);
     EMIT(imm8);
   }
 }
 
 
-void Assembler::shr_cl(Register dst) {
+void Assembler::shr_cl(const Operand& dst) {
   EnsureSpace ensure_space(this);
   EMIT(0xD3);
-  EMIT(0xE8 | dst.code());
+  emit_operand(ebp, dst);
 }
 
 
@@ -1155,7 +1190,7 @@
 
 void Assembler::ret(int imm16) {
   EnsureSpace ensure_space(this);
-  ASSERT(is_uint16(imm16));
+  DCHECK(is_uint16(imm16));
   if (imm16 == 0) {
     EMIT(0xC3);
   } else {
@@ -1200,7 +1235,7 @@
 
 void Assembler::bind_to(Label* L, int pos) {
   EnsureSpace ensure_space(this);
-  ASSERT(0 <= pos && pos <= pc_offset());  // must have a valid binding position
+  DCHECK(0 <= pos && pos <= pc_offset());  // must have a valid binding position
   while (L->is_linked()) {
     Displacement disp = disp_at(L);
     int fixup_pos = L->pos();
@@ -1209,7 +1244,7 @@
       long_at_put(fixup_pos, pos + Code::kHeaderSize - kHeapObjectTag);
     } else {
       if (disp.type() == Displacement::UNCONDITIONAL_JUMP) {
-        ASSERT(byte_at(fixup_pos - 1) == 0xE9);  // jmp expected
+        DCHECK(byte_at(fixup_pos - 1) == 0xE9);  // jmp expected
       }
       // Relative address, relative to point after address.
       int imm32 = pos - (fixup_pos + sizeof(int32_t));
@@ -1221,7 +1256,7 @@
     int fixup_pos = L->near_link_pos();
     int offset_to_next =
         static_cast<int>(*reinterpret_cast<int8_t*>(addr_at(fixup_pos)));
-    ASSERT(offset_to_next <= 0);
+    DCHECK(offset_to_next <= 0);
     // Relative address, relative to point after address.
     int disp = pos - fixup_pos - sizeof(int8_t);
     CHECK(0 <= disp && disp <= 127);
@@ -1238,7 +1273,7 @@
 
 void Assembler::bind(Label* L) {
   EnsureSpace ensure_space(this);
-  ASSERT(!L->is_bound());  // label can only be bound once
+  DCHECK(!L->is_bound());  // label can only be bound once
   bind_to(L, pc_offset());
 }
 
@@ -1249,7 +1284,7 @@
   if (L->is_bound()) {
     const int long_size = 5;
     int offs = L->pos() - pc_offset();
-    ASSERT(offs <= 0);
+    DCHECK(offs <= 0);
     // 1110 1000 #32-bit disp.
     EMIT(0xE8);
     emit(offs - long_size);
@@ -1264,7 +1299,7 @@
 void Assembler::call(byte* entry, RelocInfo::Mode rmode) {
   positions_recorder()->WriteRecordedPositions();
   EnsureSpace ensure_space(this);
-  ASSERT(!RelocInfo::IsCodeTarget(rmode));
+  DCHECK(!RelocInfo::IsCodeTarget(rmode));
   EMIT(0xE8);
   if (RelocInfo::IsRuntimeEntry(rmode)) {
     emit(reinterpret_cast<uint32_t>(entry), rmode);
@@ -1298,7 +1333,7 @@
                      TypeFeedbackId ast_id) {
   positions_recorder()->WriteRecordedPositions();
   EnsureSpace ensure_space(this);
-  ASSERT(RelocInfo::IsCodeTarget(rmode)
+  DCHECK(RelocInfo::IsCodeTarget(rmode)
       || rmode == RelocInfo::CODE_AGE_SEQUENCE);
   EMIT(0xE8);
   emit(code, rmode, ast_id);
@@ -1311,7 +1346,7 @@
     const int short_size = 2;
     const int long_size  = 5;
     int offs = L->pos() - pc_offset();
-    ASSERT(offs <= 0);
+    DCHECK(offs <= 0);
     if (is_int8(offs - short_size)) {
       // 1110 1011 #8-bit disp.
       EMIT(0xEB);
@@ -1334,7 +1369,7 @@
 
 void Assembler::jmp(byte* entry, RelocInfo::Mode rmode) {
   EnsureSpace ensure_space(this);
-  ASSERT(!RelocInfo::IsCodeTarget(rmode));
+  DCHECK(!RelocInfo::IsCodeTarget(rmode));
   EMIT(0xE9);
   if (RelocInfo::IsRuntimeEntry(rmode)) {
     emit(reinterpret_cast<uint32_t>(entry), rmode);
@@ -1353,7 +1388,7 @@
 
 void Assembler::jmp(Handle<Code> code, RelocInfo::Mode rmode) {
   EnsureSpace ensure_space(this);
-  ASSERT(RelocInfo::IsCodeTarget(rmode));
+  DCHECK(RelocInfo::IsCodeTarget(rmode));
   EMIT(0xE9);
   emit(code, rmode);
 }
@@ -1361,12 +1396,12 @@
 
 void Assembler::j(Condition cc, Label* L, Label::Distance distance) {
   EnsureSpace ensure_space(this);
-  ASSERT(0 <= cc && static_cast<int>(cc) < 16);
+  DCHECK(0 <= cc && static_cast<int>(cc) < 16);
   if (L->is_bound()) {
     const int short_size = 2;
     const int long_size  = 6;
     int offs = L->pos() - pc_offset();
-    ASSERT(offs <= 0);
+    DCHECK(offs <= 0);
     if (is_int8(offs - short_size)) {
       // 0111 tttn #8-bit disp
       EMIT(0x70 | cc);
@@ -1393,7 +1428,7 @@
 
 void Assembler::j(Condition cc, byte* entry, RelocInfo::Mode rmode) {
   EnsureSpace ensure_space(this);
-  ASSERT((0 <= cc) && (static_cast<int>(cc) < 16));
+  DCHECK((0 <= cc) && (static_cast<int>(cc) < 16));
   // 0000 1111 1000 tttn #32-bit disp.
   EMIT(0x0F);
   EMIT(0x80 | cc);
@@ -1484,6 +1519,20 @@
 }
 
 
+void Assembler::fldcw(const Operand& adr) {
+  EnsureSpace ensure_space(this);
+  EMIT(0xD9);
+  emit_operand(ebp, adr);
+}
+
+
+void Assembler::fnstcw(const Operand& adr) {
+  EnsureSpace ensure_space(this);
+  EMIT(0xD9);
+  emit_operand(edi, adr);
+}
+
+
 void Assembler::fstp_d(const Operand& adr) {
   EnsureSpace ensure_space(this);
   EMIT(0xDD);
@@ -1520,7 +1569,7 @@
 
 
 void Assembler::fisttp_s(const Operand& adr) {
-  ASSERT(IsEnabled(SSE3));
+  DCHECK(IsEnabled(SSE3));
   EnsureSpace ensure_space(this);
   EMIT(0xDB);
   emit_operand(ecx, adr);
@@ -1528,7 +1577,7 @@
 
 
 void Assembler::fisttp_d(const Operand& adr) {
-  ASSERT(IsEnabled(SSE3));
+  DCHECK(IsEnabled(SSE3));
   EnsureSpace ensure_space(this);
   EMIT(0xDD);
   emit_operand(ecx, adr);
@@ -1563,6 +1612,13 @@
 }
 
 
+void Assembler::fsqrt() {
+  EnsureSpace ensure_space(this);
+  EMIT(0xD9);
+  EMIT(0xFA);
+}
+
+
 void Assembler::fcos() {
   EnsureSpace ensure_space(this);
   EMIT(0xD9);
@@ -1624,6 +1680,13 @@
 }
 
 
+void Assembler::fadd_d(const Operand& adr) {
+  EnsureSpace ensure_space(this);
+  EMIT(0xDC);
+  emit_operand(eax, adr);
+}
+
+
 void Assembler::fsub(int i) {
   EnsureSpace ensure_space(this);
   emit_farith(0xDC, 0xE8, i);
@@ -1737,6 +1800,13 @@
 }
 
 
+void Assembler::fxam() {
+  EnsureSpace ensure_space(this);
+  EMIT(0xD9);
+  EMIT(0xE5);
+}
+
+
 void Assembler::fucomp(int i) {
   EnsureSpace ensure_space(this);
   emit_farith(0xDD, 0xE8, i);
@@ -1798,6 +1868,20 @@
 }
 
 
+void Assembler::fnsave(const Operand& adr) {
+  EnsureSpace ensure_space(this);
+  EMIT(0xDD);
+  emit_operand(esi, adr);
+}
+
+
+void Assembler::frstor(const Operand& adr) {
+  EnsureSpace ensure_space(this);
+  EMIT(0xDD);
+  emit_operand(esp, adr);
+}
+
+
 void Assembler::sahf() {
   EnsureSpace ensure_space(this);
   EMIT(0x9E);
@@ -1805,7 +1889,7 @@
 
 
 void Assembler::setcc(Condition cc, Register reg) {
-  ASSERT(reg.is_byte_register());
+  DCHECK(reg.is_byte_register());
   EnsureSpace ensure_space(this);
   EMIT(0x0F);
   EMIT(0x90 | cc);
@@ -1841,16 +1925,13 @@
 
 
 void Assembler::GrowBuffer() {
-  ASSERT(buffer_overflow());
+  DCHECK(buffer_overflow());
   if (!own_buffer_) FATAL("external code buffer is too small");
 
   // Compute new buffer size.
   CodeDesc desc;  // the new buffer
-  if (buffer_size_ < 4*KB) {
-    desc.buffer_size = 4*KB;
-  } else {
-    desc.buffer_size = 2*buffer_size_;
-  }
+  desc.buffer_size = 2 * buffer_size_;
+
   // Some internal data structures overflow for very large buffers,
   // they must ensure that kMaximalBufferSize is not too large.
   if ((desc.buffer_size > kMaximalBufferSize) ||
@@ -1876,13 +1957,7 @@
   MemMove(rc_delta + reloc_info_writer.pos(), reloc_info_writer.pos(),
           desc.reloc_size);
 
-  // Switch buffers.
-  if (isolate()->assembler_spare_buffer() == NULL &&
-      buffer_size_ == kMinimalBufferSize) {
-    isolate()->set_assembler_spare_buffer(buffer_);
-  } else {
-    DeleteArray(buffer_);
-  }
+  DeleteArray(buffer_);
   buffer_ = desc.buffer;
   buffer_size_ = desc.buffer_size;
   pc_ += pc_delta;
@@ -1900,14 +1975,14 @@
     }
   }
 
-  ASSERT(!buffer_overflow());
+  DCHECK(!buffer_overflow());
 }
 
 
 void Assembler::emit_arith_b(int op1, int op2, Register dst, int imm8) {
-  ASSERT(is_uint8(op1) && is_uint8(op2));  // wrong opcode
-  ASSERT(is_uint8(imm8));
-  ASSERT((op1 & 0x01) == 0);  // should be 8bit operation
+  DCHECK(is_uint8(op1) && is_uint8(op2));  // wrong opcode
+  DCHECK(is_uint8(imm8));
+  DCHECK((op1 & 0x01) == 0);  // should be 8bit operation
   EMIT(op1);
   EMIT(op2 | dst.code());
   EMIT(imm8);
@@ -1915,7 +1990,7 @@
 
 
 void Assembler::emit_arith(int sel, Operand dst, const Immediate& x) {
-  ASSERT((0 <= sel) && (sel <= 7));
+  DCHECK((0 <= sel) && (sel <= 7));
   Register ireg = { sel };
   if (x.is_int8()) {
     EMIT(0x83);  // using a sign-extended 8-bit immediate.
@@ -1934,7 +2009,7 @@
 
 void Assembler::emit_operand(Register reg, const Operand& adr) {
   const unsigned length = adr.len_;
-  ASSERT(length > 0);
+  DCHECK(length > 0);
 
   // Emit updated ModRM byte containing the given register.
   pc_[0] = (adr.buf_[0] & ~0x38) | (reg.code() << 3);
@@ -1953,8 +2028,8 @@
 
 
 void Assembler::emit_farith(int b1, int b2, int i) {
-  ASSERT(is_uint8(b1) && is_uint8(b2));  // wrong opcode
-  ASSERT(0 <= i &&  i < 8);  // illegal stack offset
+  DCHECK(is_uint8(b1) && is_uint8(b2));  // wrong opcode
+  DCHECK(0 <= i &&  i < 8);  // illegal stack offset
   EMIT(b1);
   EMIT(b2 + i);
 }
@@ -1973,7 +2048,7 @@
 
 
 void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
-  ASSERT(!RelocInfo::IsNone(rmode));
+  DCHECK(!RelocInfo::IsNone(rmode));
   // Don't record external references unless the heap will be serialized.
   if (rmode == RelocInfo::EXTERNAL_REFERENCE &&
       !serializer_enabled() && !emit_debug_code()) {
@@ -1986,14 +2061,14 @@
 
 Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
   // No out-of-line constant pool support.
-  ASSERT(!FLAG_enable_ool_constant_pool);
+  DCHECK(!FLAG_enable_ool_constant_pool);
   return isolate->factory()->empty_constant_pool_array();
 }
 
 
 void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
   // No out-of-line constant pool support.
-  ASSERT(!FLAG_enable_ool_constant_pool);
+  DCHECK(!FLAG_enable_ool_constant_pool);
   return;
 }
 
diff --git a/src/x87/assembler-x87.h b/src/x87/assembler-x87.h
index 1624167..d37c9d7 100644
--- a/src/x87/assembler-x87.h
+++ b/src/x87/assembler-x87.h
@@ -78,8 +78,8 @@
   static inline Register FromAllocationIndex(int index);
 
   static Register from_code(int code) {
-    ASSERT(code >= 0);
-    ASSERT(code < kNumRegisters);
+    DCHECK(code >= 0);
+    DCHECK(code < kNumRegisters);
     Register r = { code };
     return r;
   }
@@ -88,11 +88,11 @@
   // eax, ebx, ecx and edx are byte registers, the rest are not.
   bool is_byte_register() const { return code_ <= 3; }
   int code() const {
-    ASSERT(is_valid());
+    DCHECK(is_valid());
     return code_;
   }
   int bit() const {
-    ASSERT(is_valid());
+    DCHECK(is_valid());
     return 1 << code_;
   }
 
@@ -122,7 +122,7 @@
 
 
 inline const char* Register::AllocationIndexToString(int index) {
-  ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
+  DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
   // This is the mapping of allocation indices to registers.
   const char* const kNames[] = { "eax", "ecx", "edx", "ebx", "esi", "edi" };
   return kNames[index];
@@ -130,19 +130,19 @@
 
 
 inline int Register::ToAllocationIndex(Register reg) {
-  ASSERT(reg.is_valid() && !reg.is(esp) && !reg.is(ebp));
+  DCHECK(reg.is_valid() && !reg.is(esp) && !reg.is(ebp));
   return (reg.code() >= 6) ? reg.code() - 2 : reg.code();
 }
 
 
 inline Register Register::FromAllocationIndex(int index)  {
-  ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
+  DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
   return (index >= 4) ? from_code(index + 2) : from_code(index);
 }
 
 
 struct X87Register {
-  static const int kMaxNumAllocatableRegisters = 8;
+  static const int kMaxNumAllocatableRegisters = 6;
   static const int kMaxNumRegisters = 8;
   static int NumAllocatableRegisters() {
     return kMaxNumAllocatableRegisters;
@@ -153,7 +153,7 @@
   }
 
   static const char* AllocationIndexToString(int index) {
-    ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
+    DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
     const char* const names[] = {
       "stX_0", "stX_1", "stX_2", "stX_3", "stX_4",
       "stX_5", "stX_6", "stX_7"
@@ -162,7 +162,7 @@
   }
 
   static X87Register FromAllocationIndex(int index) {
-    ASSERT(index >= 0 && index < kMaxNumAllocatableRegisters);
+    DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
     X87Register result;
     result.code_ = index;
     return result;
@@ -173,7 +173,7 @@
   }
 
   int code() const {
-    ASSERT(is_valid());
+    DCHECK(is_valid());
     return code_;
   }
 
@@ -292,6 +292,7 @@
   int x_;
   RelocInfo::Mode rmode_;
 
+  friend class Operand;
   friend class Assembler;
   friend class MacroAssembler;
 };
@@ -314,9 +315,14 @@
 
 class Operand BASE_EMBEDDED {
  public:
+  // reg
+  INLINE(explicit Operand(Register reg));
+
   // [disp/r]
   INLINE(explicit Operand(int32_t disp, RelocInfo::Mode rmode));
-  // disp only must always be relocated
+
+  // [disp/r]
+  INLINE(explicit Operand(Immediate imm));
 
   // [base + disp/r]
   explicit Operand(Register base, int32_t disp,
@@ -353,6 +359,10 @@
                    RelocInfo::CELL);
   }
 
+  static Operand ForRegisterPlusImmediate(Register base, Immediate imm) {
+    return Operand(base, imm.x_, imm.rmode_);
+  }
+
   // Returns true if this Operand is a wrapper for the specified register.
   bool is_reg(Register reg) const;
 
@@ -364,9 +374,6 @@
   Register reg() const;
 
  private:
-  // reg
-  INLINE(explicit Operand(Register reg));
-
   // Set the ModRM byte without an encoded 'reg' register. The
   // register is encoded later as part of the emit_operand operation.
   inline void set_modrm(int mod, Register rm);
@@ -383,7 +390,6 @@
 
   friend class Assembler;
   friend class MacroAssembler;
-  friend class LCodeGen;
 };
 
 
@@ -502,6 +508,9 @@
   // of that call in the instruction stream.
   inline static Address target_address_from_return_address(Address pc);
 
+  // Return the code target address of the patch debug break slot
+  inline static Address break_address_from_return_address(Address pc);
+
   // This sets the branch destination (which is in the instruction on x86).
   // This is for calls and branches within generated code.
   inline static void deserialization_set_special_target_at(
@@ -630,8 +639,9 @@
   void rep_stos();
   void stos();
 
-  // Exchange two registers
+  // Exchange
   void xchg(Register dst, Register src);
+  void xchg(Register dst, const Operand& src);
 
   // Arithmetics
   void adc(Register dst, int32_t imm32);
@@ -673,13 +683,17 @@
 
   void cdq();
 
-  void idiv(Register src);
+  void idiv(Register src) { idiv(Operand(src)); }
+  void idiv(const Operand& src);
+  void div(Register src) { div(Operand(src)); }
+  void div(const Operand& src);
 
   // Signed multiply instructions.
   void imul(Register src);                               // edx:eax = eax * src.
   void imul(Register dst, Register src) { imul(dst, Operand(src)); }
   void imul(Register dst, const Operand& src);           // dst = dst * src.
   void imul(Register dst, Register src, int32_t imm32);  // dst = src * imm32.
+  void imul(Register dst, const Operand& src, int32_t imm32);
 
   void inc(Register dst);
   void inc(const Operand& dst);
@@ -690,8 +704,10 @@
   void mul(Register src);                                // edx:eax = eax * reg.
 
   void neg(Register dst);
+  void neg(const Operand& dst);
 
   void not_(Register dst);
+  void not_(const Operand& dst);
 
   void or_(Register dst, int32_t imm32);
   void or_(Register dst, Register src) { or_(dst, Operand(src)); }
@@ -705,22 +721,28 @@
   void ror(Register dst, uint8_t imm8);
   void ror_cl(Register dst);
 
-  void sar(Register dst, uint8_t imm8);
-  void sar_cl(Register dst);
+  void sar(Register dst, uint8_t imm8) { sar(Operand(dst), imm8); }
+  void sar(const Operand& dst, uint8_t imm8);
+  void sar_cl(Register dst) { sar_cl(Operand(dst)); }
+  void sar_cl(const Operand& dst);
 
   void sbb(Register dst, const Operand& src);
 
   void shld(Register dst, Register src) { shld(dst, Operand(src)); }
   void shld(Register dst, const Operand& src);
 
-  void shl(Register dst, uint8_t imm8);
-  void shl_cl(Register dst);
+  void shl(Register dst, uint8_t imm8) { shl(Operand(dst), imm8); }
+  void shl(const Operand& dst, uint8_t imm8);
+  void shl_cl(Register dst) { shl_cl(Operand(dst)); }
+  void shl_cl(const Operand& dst);
 
   void shrd(Register dst, Register src) { shrd(dst, Operand(src)); }
   void shrd(Register dst, const Operand& src);
 
-  void shr(Register dst, uint8_t imm8);
-  void shr_cl(Register dst);
+  void shr(Register dst, uint8_t imm8) { shr(Operand(dst), imm8); }
+  void shr(const Operand& dst, uint8_t imm8);
+  void shr_cl(Register dst) { shr_cl(Operand(dst)); }
+  void shr_cl(const Operand& dst);
 
   void sub(Register dst, const Immediate& imm) { sub(Operand(dst), imm); }
   void sub(const Operand& dst, const Immediate& x);
@@ -830,6 +852,7 @@
 
   void fabs();
   void fchs();
+  void fsqrt();
   void fcos();
   void fsin();
   void fptan();
@@ -840,6 +863,7 @@
 
   void fadd(int i);
   void fadd_i(int i);
+  void fadd_d(const Operand& adr);
   void fsub(int i);
   void fsub_i(int i);
   void fmul(int i);
@@ -862,14 +886,19 @@
   void ffree(int i = 0);
 
   void ftst();
+  void fxam();
   void fucomp(int i);
   void fucompp();
   void fucomi(int i);
   void fucomip();
   void fcompp();
   void fnstsw_ax();
+  void fldcw(const Operand& adr);
+  void fnstcw(const Operand& adr);
   void fwait();
   void fnclex();
+  void fnsave(const Operand& adr);
+  void frstor(const Operand& adr);
 
   void frndint();
 
@@ -1015,7 +1044,7 @@
 #ifdef DEBUG
   ~EnsureSpace() {
     int bytes_generated = space_before_ - assembler_->available_space();
-    ASSERT(bytes_generated < assembler_->kGap);
+    DCHECK(bytes_generated < assembler_->kGap);
   }
 #endif
 
diff --git a/src/x87/builtins-x87.cc b/src/x87/builtins-x87.cc
index 8db42d8..d631175 100644
--- a/src/x87/builtins-x87.cc
+++ b/src/x87/builtins-x87.cc
@@ -6,10 +6,10 @@
 
 #if V8_TARGET_ARCH_X87
 
+#include "src/code-factory.h"
 #include "src/codegen.h"
 #include "src/deoptimizer.h"
 #include "src/full-codegen.h"
-#include "src/stub-cache.h"
 
 namespace v8 {
 namespace internal {
@@ -42,7 +42,7 @@
     __ push(edi);
     __ push(scratch);  // Restore return address.
   } else {
-    ASSERT(extra_args == NO_EXTRA_ARGUMENTS);
+    DCHECK(extra_args == NO_EXTRA_ARGUMENTS);
   }
 
   // JumpToExternalReference expects eax to contain the number of arguments
@@ -92,7 +92,7 @@
   __ cmp(esp, Operand::StaticVariable(stack_limit));
   __ j(above_equal, &ok, Label::kNear);
 
-  CallRuntimePassFunction(masm, Runtime::kHiddenTryInstallOptimizedCode);
+  CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode);
   GenerateTailCallToReturnedCode(masm);
 
   __ bind(&ok);
@@ -110,7 +110,7 @@
   // -----------------------------------
 
   // Should never create mementos for api functions.
-  ASSERT(!is_api_function || !create_memento);
+  DCHECK(!is_api_function || !create_memento);
 
   // Enter a construct frame.
   {
@@ -178,7 +178,7 @@
         __ push(edi);
 
         __ push(edi);  // constructor
-        __ CallRuntime(Runtime::kHiddenFinalizeInstanceSize, 1);
+        __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
 
         __ pop(edi);
         __ pop(eax);
@@ -359,9 +359,9 @@
     // edi: function (constructor)
     __ push(edi);
     if (create_memento) {
-      __ CallRuntime(Runtime::kHiddenNewObjectWithAllocationSite, 2);
+      __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 2);
     } else {
-      __ CallRuntime(Runtime::kHiddenNewObject, 1);
+      __ CallRuntime(Runtime::kNewObject, 1);
     }
     __ mov(ebx, eax);  // store result in ebx
 
@@ -550,8 +550,8 @@
 }
 
 
-void Builtins::Generate_CompileUnoptimized(MacroAssembler* masm) {
-  CallRuntimePassFunction(masm, Runtime::kHiddenCompileUnoptimized);
+void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+  CallRuntimePassFunction(masm, Runtime::kCompileLazy);
   GenerateTailCallToReturnedCode(masm);
 }
 
@@ -566,7 +566,7 @@
   // Whether to compile in a background thread.
   __ Push(masm->isolate()->factory()->ToBoolean(concurrent));
 
-  __ CallRuntime(Runtime::kHiddenCompileOptimized, 2);
+  __ CallRuntime(Runtime::kCompileOptimized, 2);
   // Restore receiver.
   __ pop(edi);
 }
@@ -660,7 +660,8 @@
 }
 
 
-static void Generate_NotifyStubFailureHelper(MacroAssembler* masm) {
+static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
+                                             SaveFPRegsMode save_doubles) {
   // Enter an internal frame.
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
@@ -669,7 +670,7 @@
     // stubs that tail call the runtime on deopts passing their parameters in
     // registers.
     __ pushad();
-    __ CallRuntime(Runtime::kHiddenNotifyStubFailure, 0);
+    __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
     __ popad();
     // Tear down internal frame.
   }
@@ -680,13 +681,12 @@
 
 
 void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
-  Generate_NotifyStubFailureHelper(masm);
+  Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
 }
 
 
 void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
-  // SaveDoubles is meanless for X87, just used by deoptimizer.cc
-  Generate_NotifyStubFailureHelper(masm);
+  Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
 }
 
 
@@ -697,7 +697,7 @@
 
     // Pass deoptimization type to the runtime system.
     __ push(Immediate(Smi::FromInt(static_cast<int>(type))));
-    __ CallRuntime(Runtime::kHiddenNotifyDeoptimized, 1);
+    __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
 
     // Tear down internal frame.
   }
@@ -765,7 +765,7 @@
   // 3a. Patch the first argument if necessary when calling a function.
   Label shift_arguments;
   __ Move(edx, Immediate(0));  // indicate regular JS_FUNCTION
-  { Label convert_to_object, use_global_receiver, patch_receiver;
+  { Label convert_to_object, use_global_proxy, patch_receiver;
     // Change context eagerly in case we need the global receiver.
     __ mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
 
@@ -787,9 +787,9 @@
     // global object if it is null or undefined.
     __ JumpIfSmi(ebx, &convert_to_object);
     __ cmp(ebx, factory->null_value());
-    __ j(equal, &use_global_receiver);
+    __ j(equal, &use_global_proxy);
     __ cmp(ebx, factory->undefined_value());
-    __ j(equal, &use_global_receiver);
+    __ j(equal, &use_global_proxy);
     STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
     __ CmpObjectType(ebx, FIRST_SPEC_OBJECT_TYPE, ecx);
     __ j(above_equal, &shift_arguments);
@@ -814,10 +814,10 @@
     __ mov(edi, Operand(esp, eax, times_4, 1 * kPointerSize));
     __ jmp(&patch_receiver);
 
-    __ bind(&use_global_receiver);
+    __ bind(&use_global_proxy);
     __ mov(ebx,
            Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
-    __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
+    __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalProxyOffset));
 
     __ bind(&patch_receiver);
     __ mov(Operand(esp, eax, times_4, 0), ebx);
@@ -943,7 +943,7 @@
     __ mov(ebx, Operand(ebp, kReceiverOffset));
 
     // Check that the function is a JS function (otherwise it must be a proxy).
-    Label push_receiver, use_global_receiver;
+    Label push_receiver, use_global_proxy;
     __ mov(edi, Operand(ebp, kFunctionOffset));
     __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
     __ j(not_equal, &push_receiver);
@@ -971,9 +971,9 @@
     // global object if it is null or undefined.
     __ JumpIfSmi(ebx, &call_to_object);
     __ cmp(ebx, factory->null_value());
-    __ j(equal, &use_global_receiver);
+    __ j(equal, &use_global_proxy);
     __ cmp(ebx, factory->undefined_value());
-    __ j(equal, &use_global_receiver);
+    __ j(equal, &use_global_proxy);
     STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
     __ CmpObjectType(ebx, FIRST_SPEC_OBJECT_TYPE, ecx);
     __ j(above_equal, &push_receiver);
@@ -984,10 +984,10 @@
     __ mov(ebx, eax);
     __ jmp(&push_receiver);
 
-    __ bind(&use_global_receiver);
+    __ bind(&use_global_proxy);
     __ mov(ebx,
            Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
-    __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalReceiverOffset));
+    __ mov(ebx, FieldOperand(ebx, GlobalObject::kGlobalProxyOffset));
 
     // Push the receiver.
     __ bind(&push_receiver);
@@ -995,13 +995,19 @@
 
     // Copy all arguments from the array to the stack.
     Label entry, loop;
-    __ mov(ecx, Operand(ebp, kIndexOffset));
+    Register receiver = LoadDescriptor::ReceiverRegister();
+    Register key = LoadDescriptor::NameRegister();
+    __ mov(key, Operand(ebp, kIndexOffset));
     __ jmp(&entry);
     __ bind(&loop);
-    __ mov(edx, Operand(ebp, kArgumentsOffset));  // load arguments
+    __ mov(receiver, Operand(ebp, kArgumentsOffset));  // load arguments
 
     // Use inline caching to speed up access to arguments.
-    Handle<Code> ic = masm->isolate()->builtins()->KeyedLoadIC_Initialize();
+    if (FLAG_vector_ics) {
+      __ mov(VectorLoadICDescriptor::SlotRegister(),
+             Immediate(Smi::FromInt(0)));
+    }
+    Handle<Code> ic = CodeFactory::KeyedLoadIC(masm->isolate()).code();
     __ call(ic, RelocInfo::CODE_TARGET);
     // It is important that we do not have a test instruction after the
     // call.  A test instruction after the call is used to indicate that
@@ -1011,19 +1017,19 @@
     // Push the nth argument.
     __ push(eax);
 
-    // Update the index on the stack and in register eax.
-    __ mov(ecx, Operand(ebp, kIndexOffset));
-    __ add(ecx, Immediate(1 << kSmiTagSize));
-    __ mov(Operand(ebp, kIndexOffset), ecx);
+    // Update the index on the stack and in register key.
+    __ mov(key, Operand(ebp, kIndexOffset));
+    __ add(key, Immediate(1 << kSmiTagSize));
+    __ mov(Operand(ebp, kIndexOffset), key);
 
     __ bind(&entry);
-    __ cmp(ecx, Operand(ebp, kLimitOffset));
+    __ cmp(key, Operand(ebp, kLimitOffset));
     __ j(not_equal, &loop);
 
     // Call the function.
     Label call_proxy;
-    __ mov(eax, ecx);
     ParameterCount actual(eax);
+    __ Move(eax, key);
     __ SmiUntag(eax);
     __ mov(edi, Operand(ebp, kFunctionOffset));
     __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
@@ -1436,7 +1442,7 @@
   __ j(above_equal, &ok, Label::kNear);
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
-    __ CallRuntime(Runtime::kHiddenStackGuard, 0);
+    __ CallRuntime(Runtime::kStackGuard, 0);
   }
   __ jmp(masm->isolate()->builtins()->OnStackReplacement(),
          RelocInfo::CODE_TARGET);
diff --git a/src/x87/code-stubs-x87.cc b/src/x87/code-stubs-x87.cc
index 7edc821..d4c383b 100644
--- a/src/x87/code-stubs-x87.cc
+++ b/src/x87/code-stubs-x87.cc
@@ -6,468 +6,116 @@
 
 #if V8_TARGET_ARCH_X87
 
+#include "src/base/bits.h"
 #include "src/bootstrapper.h"
 #include "src/code-stubs.h"
+#include "src/codegen.h"
+#include "src/ic/handler-compiler.h"
+#include "src/ic/ic.h"
 #include "src/isolate.h"
 #include "src/jsregexp.h"
 #include "src/regexp-macro-assembler.h"
 #include "src/runtime.h"
-#include "src/stub-cache.h"
-#include "src/codegen.h"
-#include "src/runtime.h"
 
 namespace v8 {
 namespace internal {
 
 
-void FastNewClosureStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { ebx };
-  descriptor->register_param_count_ = 1;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      Runtime::FunctionForId(Runtime::kHiddenNewClosureFromStubFailure)->entry;
-}
-
-
-void FastNewContextStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { edi };
-  descriptor->register_param_count_ = 1;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ = NULL;
-}
-
-
-void ToNumberStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { eax };
-  descriptor->register_param_count_ = 1;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ = NULL;
-}
-
-
-void NumberToStringStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { eax };
-  descriptor->register_param_count_ = 1;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      Runtime::FunctionForId(Runtime::kHiddenNumberToString)->entry;
-}
-
-
-void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { eax, ebx, ecx };
-  descriptor->register_param_count_ = 3;
-  descriptor->register_params_ = registers;
-  static Representation representations[] = {
-      Representation::Tagged(),
-      Representation::Smi(),
-      Representation::Tagged() };
-  descriptor->register_param_representations_ = representations;
-  descriptor->deoptimization_handler_ =
-      Runtime::FunctionForId(
-          Runtime::kHiddenCreateArrayLiteralStubBailout)->entry;
-}
-
-
-void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { eax, ebx, ecx, edx };
-  descriptor->register_param_count_ = 4;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      Runtime::FunctionForId(Runtime::kHiddenCreateObjectLiteral)->entry;
-}
-
-
-void CreateAllocationSiteStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { ebx, edx };
-  descriptor->register_param_count_ = 2;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ = NULL;
-}
-
-
-void KeyedLoadFastElementStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { edx, ecx };
-  descriptor->register_param_count_ = 2;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
-}
-
-
-void KeyedLoadDictionaryElementStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { edx, ecx };
-  descriptor->register_param_count_ = 2;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      FUNCTION_ADDR(KeyedLoadIC_MissFromStubFailure);
-}
-
-
-void RegExpConstructResultStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { ecx, ebx, eax };
-  descriptor->register_param_count_ = 3;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      Runtime::FunctionForId(Runtime::kHiddenRegExpConstructResult)->entry;
-}
-
-
-void KeyedLoadGenericElementStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { edx, ecx };
-  descriptor->register_param_count_ = 2;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      Runtime::FunctionForId(Runtime::kKeyedGetProperty)->entry;
-}
-
-
-void LoadFieldStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { edx };
-  descriptor->register_param_count_ = 1;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ = NULL;
-}
-
-
-void KeyedLoadFieldStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { edx };
-  descriptor->register_param_count_ = 1;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ = NULL;
-}
-
-
-void StringLengthStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { edx, ecx };
-  descriptor->register_param_count_ = 2;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ = NULL;
-}
-
-
-void KeyedStringLengthStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { edx, ecx };
-  descriptor->register_param_count_ = 2;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ = NULL;
-}
-
-
-void KeyedStoreFastElementStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { edx, ecx, eax };
-  descriptor->register_param_count_ = 3;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      FUNCTION_ADDR(KeyedStoreIC_MissFromStubFailure);
-}
-
-
-void TransitionElementsKindStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { eax, ebx };
-  descriptor->register_param_count_ = 2;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry;
-}
-
-
 static void InitializeArrayConstructorDescriptor(
-    Isolate* isolate,
-    CodeStubInterfaceDescriptor* descriptor,
+    Isolate* isolate, CodeStubDescriptor* descriptor,
     int constant_stack_parameter_count) {
   // register state
   // eax -- number of arguments
   // edi -- function
   // ebx -- allocation site with elements kind
-  static Register registers_variable_args[] = { edi, ebx, eax };
-  static Register registers_no_args[] = { edi, ebx };
+  Address deopt_handler = Runtime::FunctionForId(
+      Runtime::kArrayConstructor)->entry;
 
   if (constant_stack_parameter_count == 0) {
-    descriptor->register_param_count_ = 2;
-    descriptor->register_params_ = registers_no_args;
+    descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
+                           JS_FUNCTION_STUB_MODE);
   } else {
-    // stack param count needs (constructor pointer, and single argument)
-    descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
-    descriptor->stack_parameter_count_ = eax;
-    descriptor->register_param_count_ = 3;
-    descriptor->register_params_ = registers_variable_args;
-    static Representation representations[] = {
-        Representation::Tagged(),
-        Representation::Tagged(),
-        Representation::Integer32() };
-    descriptor->register_param_representations_ = representations;
+    descriptor->Initialize(eax, deopt_handler, constant_stack_parameter_count,
+                           JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
   }
-
-  descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
-  descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
-  descriptor->deoptimization_handler_ =
-      Runtime::FunctionForId(Runtime::kHiddenArrayConstructor)->entry;
 }
 
 
 static void InitializeInternalArrayConstructorDescriptor(
-    CodeStubInterfaceDescriptor* descriptor,
+    Isolate* isolate, CodeStubDescriptor* descriptor,
     int constant_stack_parameter_count) {
   // register state
   // eax -- number of arguments
   // edi -- constructor function
-  static Register registers_variable_args[] = { edi, eax };
-  static Register registers_no_args[] = { edi };
+  Address deopt_handler = Runtime::FunctionForId(
+      Runtime::kInternalArrayConstructor)->entry;
 
   if (constant_stack_parameter_count == 0) {
-    descriptor->register_param_count_ = 1;
-    descriptor->register_params_ = registers_no_args;
+    descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
+                           JS_FUNCTION_STUB_MODE);
   } else {
-    // stack param count needs (constructor pointer, and single argument)
-    descriptor->handler_arguments_mode_ = PASS_ARGUMENTS;
-    descriptor->stack_parameter_count_ = eax;
-    descriptor->register_param_count_ = 2;
-    descriptor->register_params_ = registers_variable_args;
-    static Representation representations[] = {
-        Representation::Tagged(),
-        Representation::Integer32() };
-    descriptor->register_param_representations_ = representations;
+    descriptor->Initialize(eax, deopt_handler, constant_stack_parameter_count,
+                           JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
   }
-
-  descriptor->hint_stack_parameter_count_ = constant_stack_parameter_count;
-  descriptor->function_mode_ = JS_FUNCTION_STUB_MODE;
-  descriptor->deoptimization_handler_ =
-      Runtime::FunctionForId(Runtime::kHiddenInternalArrayConstructor)->entry;
 }
 
 
-void ArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
+void ArrayNoArgumentConstructorStub::InitializeDescriptor(
+    CodeStubDescriptor* descriptor) {
   InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
 }
 
 
-void ArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
+void ArraySingleArgumentConstructorStub::InitializeDescriptor(
+    CodeStubDescriptor* descriptor) {
   InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
 }
 
 
-void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
+void ArrayNArgumentsConstructorStub::InitializeDescriptor(
+    CodeStubDescriptor* descriptor) {
   InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
 }
 
 
-void InternalArrayNoArgumentConstructorStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(descriptor, 0);
+void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
+    CodeStubDescriptor* descriptor) {
+  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
 }
 
 
-void InternalArraySingleArgumentConstructorStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(descriptor, 1);
+void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
+    CodeStubDescriptor* descriptor) {
+  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
 }
 
 
-void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  InitializeInternalArrayConstructorDescriptor(descriptor, -1);
-}
-
-
-void CompareNilICStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { eax };
-  descriptor->register_param_count_ = 1;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      FUNCTION_ADDR(CompareNilIC_Miss);
-  descriptor->SetMissHandler(
-      ExternalReference(IC_Utility(IC::kCompareNilIC_Miss), isolate()));
-}
-
-void ToBooleanStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { eax };
-  descriptor->register_param_count_ = 1;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      FUNCTION_ADDR(ToBooleanIC_Miss);
-  descriptor->SetMissHandler(
-      ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate()));
-}
-
-
-void StoreGlobalStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { edx, ecx, eax };
-  descriptor->register_param_count_ = 3;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      FUNCTION_ADDR(StoreIC_MissFromStubFailure);
-}
-
-
-void ElementsTransitionAndStoreStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { eax, ebx, ecx, edx };
-  descriptor->register_param_count_ = 4;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      FUNCTION_ADDR(ElementsTransitionAndStoreIC_Miss);
-}
-
-
-void BinaryOpICStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { edx, eax };
-  descriptor->register_param_count_ = 2;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ = FUNCTION_ADDR(BinaryOpIC_Miss);
-  descriptor->SetMissHandler(
-      ExternalReference(IC_Utility(IC::kBinaryOpIC_Miss), isolate()));
-}
-
-
-void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { ecx, edx, eax };
-  descriptor->register_param_count_ = 3;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite);
-}
-
-
-void StringAddStub::InitializeInterfaceDescriptor(
-    CodeStubInterfaceDescriptor* descriptor) {
-  static Register registers[] = { edx, eax };
-  descriptor->register_param_count_ = 2;
-  descriptor->register_params_ = registers;
-  descriptor->deoptimization_handler_ =
-      Runtime::FunctionForId(Runtime::kHiddenStringAdd)->entry;
-}
-
-
-void CallDescriptors::InitializeForIsolate(Isolate* isolate) {
-  {
-    CallInterfaceDescriptor* descriptor =
-        isolate->call_descriptor(Isolate::ArgumentAdaptorCall);
-    static Register registers[] = { edi,  // JSFunction
-                                    esi,  // context
-                                    eax,  // actual number of arguments
-                                    ebx,  // expected number of arguments
-    };
-    static Representation representations[] = {
-        Representation::Tagged(),     // JSFunction
-        Representation::Tagged(),     // context
-        Representation::Integer32(),  // actual number of arguments
-        Representation::Integer32(),  // expected number of arguments
-    };
-    descriptor->register_param_count_ = 4;
-    descriptor->register_params_ = registers;
-    descriptor->param_representations_ = representations;
-  }
-  {
-    CallInterfaceDescriptor* descriptor =
-        isolate->call_descriptor(Isolate::KeyedCall);
-    static Register registers[] = { esi,  // context
-                                    ecx,  // key
-    };
-    static Representation representations[] = {
-        Representation::Tagged(),     // context
-        Representation::Tagged(),     // key
-    };
-    descriptor->register_param_count_ = 2;
-    descriptor->register_params_ = registers;
-    descriptor->param_representations_ = representations;
-  }
-  {
-    CallInterfaceDescriptor* descriptor =
-        isolate->call_descriptor(Isolate::NamedCall);
-    static Register registers[] = { esi,  // context
-                                    ecx,  // name
-    };
-    static Representation representations[] = {
-        Representation::Tagged(),     // context
-        Representation::Tagged(),     // name
-    };
-    descriptor->register_param_count_ = 2;
-    descriptor->register_params_ = registers;
-    descriptor->param_representations_ = representations;
-  }
-  {
-    CallInterfaceDescriptor* descriptor =
-        isolate->call_descriptor(Isolate::CallHandler);
-    static Register registers[] = { esi,  // context
-                                    edx,  // receiver
-    };
-    static Representation representations[] = {
-        Representation::Tagged(),  // context
-        Representation::Tagged(),  // receiver
-    };
-    descriptor->register_param_count_ = 2;
-    descriptor->register_params_ = registers;
-    descriptor->param_representations_ = representations;
-  }
-  {
-    CallInterfaceDescriptor* descriptor =
-        isolate->call_descriptor(Isolate::ApiFunctionCall);
-    static Register registers[] = { eax,  // callee
-                                    ebx,  // call_data
-                                    ecx,  // holder
-                                    edx,  // api_function_address
-                                    esi,  // context
-    };
-    static Representation representations[] = {
-        Representation::Tagged(),    // callee
-        Representation::Tagged(),    // call_data
-        Representation::Tagged(),    // holder
-        Representation::External(),  // api_function_address
-        Representation::Tagged(),    // context
-    };
-    descriptor->register_param_count_ = 5;
-    descriptor->register_params_ = registers;
-    descriptor->param_representations_ = representations;
-  }
+void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
+    CodeStubDescriptor* descriptor) {
+  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
 }
 
 
 #define __ ACCESS_MASM(masm)
 
 
-void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) {
+void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
+                                               ExternalReference miss) {
   // Update the static counter each time a new code stub is generated.
   isolate()->counters()->code_stubs()->Increment();
 
-  CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor();
-  int param_count = descriptor->register_param_count_;
+  CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
+  int param_count = descriptor.GetEnvironmentParameterCount();
   {
     // Call the runtime system in a fresh internal frame.
     FrameScope scope(masm, StackFrame::INTERNAL);
-    ASSERT(descriptor->register_param_count_ == 0 ||
-           eax.is(descriptor->register_params_[param_count - 1]));
+    DCHECK(param_count == 0 ||
+           eax.is(descriptor.GetEnvironmentParameterRegister(param_count - 1)));
     // Push arguments
     for (int i = 0; i < param_count; ++i) {
-      __ push(descriptor->register_params_[i]);
+      __ push(descriptor.GetEnvironmentParameterRegister(i));
     }
-    ExternalReference miss = descriptor->miss_handler();
-    __ CallExternalReference(miss, descriptor->register_param_count_);
+    __ CallExternalReference(miss, param_count);
   }
 
   __ ret(0);
@@ -479,6 +127,11 @@
   // store the registers in any particular way, but we do have to store and
   // restore them.
   __ pushad();
+  if (save_doubles()) {
+    // Save FPU stat in m108byte.
+    __ sub(esp, Immediate(108));
+    __ fnsave(Operand(esp, 0));
+  }
   const int argument_count = 1;
 
   AllowExternalCallThatCantCauseGC scope(masm);
@@ -488,6 +141,11 @@
   __ CallCFunction(
       ExternalReference::store_buffer_overflow_function(isolate()),
       argument_count);
+  if (save_doubles()) {
+    // Restore FPU stat in m108byte.
+    __ frstor(Operand(esp, 0));
+    __ add(esp, Immediate(108));
+  }
   __ popad();
   __ ret(0);
 }
@@ -518,7 +176,7 @@
 void DoubleToIStub::Generate(MacroAssembler* masm) {
   Register input_reg = this->source();
   Register final_result_reg = this->destination();
-  ASSERT(is_truncating());
+  DCHECK(is_truncating());
 
   Label check_negative, process_64_bits, done, done_no_stash;
 
@@ -615,7 +273,7 @@
   }
   __ bind(&done_no_stash);
   if (!final_result_reg.is(result_reg)) {
-    ASSERT(final_result_reg.is(ecx));
+    DCHECK(final_result_reg.is(ecx));
     __ mov(final_result_reg, result_reg);
   }
   __ pop(save_reg);
@@ -672,27 +330,51 @@
 
 
 void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- ecx    : name
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
   Label miss;
+  Register receiver = LoadDescriptor::ReceiverRegister();
 
-  if (kind() == Code::KEYED_LOAD_IC) {
-    __ cmp(ecx, Immediate(isolate()->factory()->prototype_string()));
-    __ j(not_equal, &miss);
-  }
-
-  StubCompiler::GenerateLoadFunctionPrototype(masm, edx, eax, ebx, &miss);
+  NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, eax,
+                                                          ebx, &miss);
   __ bind(&miss);
-  StubCompiler::TailCallBuiltin(
-      masm, BaseLoadStoreStubCompiler::MissBuiltin(kind()));
+  PropertyAccessCompiler::TailCallBuiltin(
+      masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
+}
+
+
+void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
+  // Return address is on the stack.
+  Label slow;
+
+  Register receiver = LoadDescriptor::ReceiverRegister();
+  Register key = LoadDescriptor::NameRegister();
+  Register scratch = eax;
+  DCHECK(!scratch.is(receiver) && !scratch.is(key));
+
+  // Check that the key is an array index, that is Uint32.
+  __ test(key, Immediate(kSmiTagMask | kSmiSignMask));
+  __ j(not_zero, &slow);
+
+  // Everything is fine, call runtime.
+  __ pop(scratch);
+  __ push(receiver);  // receiver
+  __ push(key);       // key
+  __ push(scratch);   // return address
+
+  // Perform tail call to the entry.
+  ExternalReference ref = ExternalReference(
+      IC_Utility(IC::kLoadElementWithInterceptor), masm->isolate());
+  __ TailCallExternalReference(ref, 2, 1);
+
+  __ bind(&slow);
+  PropertyAccessCompiler::TailCallBuiltin(
+      masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
 }
 
 
 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
   // The key is in edx and the parameter count is in eax.
+  DCHECK(edx.is(ArgumentsAccessReadDescriptor::index()));
+  DCHECK(eax.is(ArgumentsAccessReadDescriptor::parameter_count()));
 
   // The displacement is used for skipping the frame pointer on the
   // stack. It is the offset of the last parameter (if any) relative
@@ -771,7 +453,7 @@
   __ mov(Operand(esp, 2 * kPointerSize), edx);
 
   __ bind(&runtime);
-  __ TailCallRuntime(Runtime::kHiddenNewSloppyArguments, 3, 1);
+  __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
 }
 
 
@@ -806,7 +488,7 @@
   __ mov(Operand(esp, 2 * kPointerSize), edx);
 
   // ebx = parameter count (tagged)
-  // ecx = argument count (tagged)
+  // ecx = argument count (smi-tagged)
   // esp[4] = parameter count (tagged)
   // esp[8] = address of receiver argument
   // Compute the mapped parameter count = min(ebx, ecx) in ebx.
@@ -839,47 +521,52 @@
   __ Allocate(ebx, eax, edx, edi, &runtime, TAG_OBJECT);
 
   // eax = address of new object(s) (tagged)
-  // ecx = argument count (tagged)
+  // ecx = argument count (smi-tagged)
   // esp[0] = mapped parameter count (tagged)
   // esp[8] = parameter count (tagged)
   // esp[12] = address of receiver argument
-  // Get the arguments boilerplate from the current native context into edi.
-  Label has_mapped_parameters, copy;
+  // Get the arguments map from the current native context into edi.
+  Label has_mapped_parameters, instantiate;
   __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
   __ mov(edi, FieldOperand(edi, GlobalObject::kNativeContextOffset));
   __ mov(ebx, Operand(esp, 0 * kPointerSize));
   __ test(ebx, ebx);
   __ j(not_zero, &has_mapped_parameters, Label::kNear);
-  __ mov(edi, Operand(edi,
-         Context::SlotOffset(Context::SLOPPY_ARGUMENTS_BOILERPLATE_INDEX)));
-  __ jmp(&copy, Label::kNear);
+  __ mov(
+      edi,
+      Operand(edi, Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX)));
+  __ jmp(&instantiate, Label::kNear);
 
   __ bind(&has_mapped_parameters);
-  __ mov(edi, Operand(edi,
-            Context::SlotOffset(Context::ALIASED_ARGUMENTS_BOILERPLATE_INDEX)));
-  __ bind(&copy);
+  __ mov(
+      edi,
+      Operand(edi, Context::SlotOffset(Context::ALIASED_ARGUMENTS_MAP_INDEX)));
+  __ bind(&instantiate);
 
   // eax = address of new object (tagged)
   // ebx = mapped parameter count (tagged)
-  // ecx = argument count (tagged)
-  // edi = address of boilerplate object (tagged)
+  // ecx = argument count (smi-tagged)
+  // edi = address of arguments map (tagged)
   // esp[0] = mapped parameter count (tagged)
   // esp[8] = parameter count (tagged)
   // esp[12] = address of receiver argument
   // Copy the JS object part.
-  for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
-    __ mov(edx, FieldOperand(edi, i));
-    __ mov(FieldOperand(eax, i), edx);
-  }
+  __ mov(FieldOperand(eax, JSObject::kMapOffset), edi);
+  __ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
+         masm->isolate()->factory()->empty_fixed_array());
+  __ mov(FieldOperand(eax, JSObject::kElementsOffset),
+         masm->isolate()->factory()->empty_fixed_array());
 
   // Set up the callee in-object property.
   STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
   __ mov(edx, Operand(esp, 4 * kPointerSize));
+  __ AssertNotSmi(edx);
   __ mov(FieldOperand(eax, JSObject::kHeaderSize +
                       Heap::kArgumentsCalleeIndex * kPointerSize),
          edx);
 
   // Use the length (smi tagged) and set that as an in-object property too.
+  __ AssertSmi(ecx);
   STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
   __ mov(FieldOperand(eax, JSObject::kHeaderSize +
                       Heap::kArgumentsLengthIndex * kPointerSize),
@@ -994,7 +681,7 @@
   __ bind(&runtime);
   __ pop(eax);  // Remove saved parameter count.
   __ mov(Operand(esp, 1 * kPointerSize), ecx);  // Patch argument count.
-  __ TailCallRuntime(Runtime::kHiddenNewSloppyArguments, 3, 1);
+  __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
 }
 
 
@@ -1036,22 +723,22 @@
   // Do the allocation of both objects in one go.
   __ Allocate(ecx, eax, edx, ebx, &runtime, TAG_OBJECT);
 
-  // Get the arguments boilerplate from the current native context.
+  // Get the arguments map from the current native context.
   __ mov(edi, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
   __ mov(edi, FieldOperand(edi, GlobalObject::kNativeContextOffset));
-  const int offset =
-      Context::SlotOffset(Context::STRICT_ARGUMENTS_BOILERPLATE_INDEX);
+  const int offset = Context::SlotOffset(Context::STRICT_ARGUMENTS_MAP_INDEX);
   __ mov(edi, Operand(edi, offset));
 
-  // Copy the JS object part.
-  for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) {
-    __ mov(ebx, FieldOperand(edi, i));
-    __ mov(FieldOperand(eax, i), ebx);
-  }
+  __ mov(FieldOperand(eax, JSObject::kMapOffset), edi);
+  __ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
+         masm->isolate()->factory()->empty_fixed_array());
+  __ mov(FieldOperand(eax, JSObject::kElementsOffset),
+         masm->isolate()->factory()->empty_fixed_array());
 
   // Get the length (smi tagged) and set that as an in-object property too.
   STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
   __ mov(ecx, Operand(esp, 1 * kPointerSize));
+  __ AssertSmi(ecx);
   __ mov(FieldOperand(eax, JSObject::kHeaderSize +
                       Heap::kArgumentsLengthIndex * kPointerSize),
          ecx);
@@ -1091,7 +778,7 @@
 
   // Do the runtime call to allocate the arguments object.
   __ bind(&runtime);
-  __ TailCallRuntime(Runtime::kHiddenNewStrictArguments, 3, 1);
+  __ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
 }
 
 
@@ -1100,7 +787,7 @@
   // time or if regexp entry in generated code is turned off runtime switch or
   // at compilation.
 #ifdef V8_INTERPRETED_REGEXP
-  __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
+  __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
 #else  // V8_INTERPRETED_REGEXP
 
   // Stack frame on entry.
@@ -1255,7 +942,7 @@
   __ JumpIfNotSmi(ebx, &runtime);
   __ cmp(ebx, FieldOperand(edx, String::kLengthOffset));
   __ j(above_equal, &runtime);
-  __ mov(edx, FieldOperand(ecx, JSRegExp::kDataAsciiCodeOffset));
+  __ mov(edx, FieldOperand(ecx, JSRegExp::kDataOneByteCodeOffset));
   __ Move(ecx, Immediate(1));  // Type is one byte.
 
   // (E) Carry on.  String handling is done.
@@ -1269,7 +956,7 @@
   // eax: subject string
   // ebx: previous index (smi)
   // edx: code
-  // ecx: encoding of subject string (1 if ASCII, 0 if two_byte);
+  // ecx: encoding of subject string (1 if one_byte, 0 if two_byte);
   // All checks done. Now push arguments for native regexp code.
   Counters* counters = isolate()->counters();
   __ IncrementCounter(counters->regexp_entry_native(), 1);
@@ -1314,7 +1001,7 @@
   // esi: original subject string
   // eax: underlying subject string
   // ebx: previous index
-  // ecx: encoding of subject string (1 if ASCII 0 if two_byte);
+  // ecx: encoding of subject string (1 if one_byte 0 if two_byte);
   // edx: code
   // Argument 4: End of string data
   // Argument 3: Start of string data
@@ -1438,16 +1125,12 @@
   __ mov(eax, Operand(esp, kSubjectOffset));
   __ mov(ecx, eax);
   __ mov(FieldOperand(ebx, RegExpImpl::kLastSubjectOffset), eax);
-  __ RecordWriteField(ebx,
-                      RegExpImpl::kLastSubjectOffset,
-                      eax,
-                      edi);
+  __ RecordWriteField(ebx, RegExpImpl::kLastSubjectOffset, eax, edi,
+                      kDontSaveFPRegs);
   __ mov(eax, ecx);
   __ mov(FieldOperand(ebx, RegExpImpl::kLastInputOffset), eax);
-  __ RecordWriteField(ebx,
-                      RegExpImpl::kLastInputOffset,
-                      eax,
-                      edi);
+  __ RecordWriteField(ebx, RegExpImpl::kLastInputOffset, eax, edi,
+                      kDontSaveFPRegs);
 
   // Get the static offsets vector filled by the native regexp code.
   ExternalReference address_of_static_offsets_vector =
@@ -1481,7 +1164,7 @@
 
   // Do the runtime call to execute the regexp.
   __ bind(&runtime);
-  __ TailCallRuntime(Runtime::kHiddenRegExpExec, 4, 1);
+  __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
 
   // Deferred code for string handling.
   // (7) Not a long external string?  If yes, go to (10).
@@ -1542,21 +1225,19 @@
 
 
 static int NegativeComparisonResult(Condition cc) {
-  ASSERT(cc != equal);
-  ASSERT((cc == less) || (cc == less_equal)
+  DCHECK(cc != equal);
+  DCHECK((cc == less) || (cc == less_equal)
       || (cc == greater) || (cc == greater_equal));
   return (cc == greater || cc == greater_equal) ? LESS : GREATER;
 }
 
 
-static void CheckInputType(MacroAssembler* masm,
-                           Register input,
-                           CompareIC::State expected,
-                           Label* fail) {
+static void CheckInputType(MacroAssembler* masm, Register input,
+                           CompareICState::State expected, Label* fail) {
   Label ok;
-  if (expected == CompareIC::SMI) {
+  if (expected == CompareICState::SMI) {
     __ JumpIfNotSmi(input, fail);
-  } else if (expected == CompareIC::NUMBER) {
+  } else if (expected == CompareICState::NUMBER) {
     __ JumpIfSmi(input, &ok);
     __ cmp(FieldOperand(input, HeapObject::kMapOffset),
            Immediate(masm->isolate()->factory()->heap_number_map()));
@@ -1581,13 +1262,13 @@
 }
 
 
-void ICCompareStub::GenerateGeneric(MacroAssembler* masm) {
+void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
   Label check_unequal_objects;
   Condition cc = GetCondition();
 
   Label miss;
-  CheckInputType(masm, edx, left_, &miss);
-  CheckInputType(masm, eax, right_, &miss);
+  CheckInputType(masm, edx, left(), &miss);
+  CheckInputType(masm, eax, right(), &miss);
 
   // Compare two smis.
   Label non_smi, smi_done;
@@ -1653,7 +1334,7 @@
     // If either is a Smi (we know that not both are), then they can only
     // be equal if the other is a HeapNumber. If so, use the slow case.
     STATIC_ASSERT(kSmiTag == 0);
-    ASSERT_EQ(0, Smi::FromInt(0));
+    DCHECK_EQ(0, Smi::FromInt(0));
     __ mov(ecx, Immediate(kSmiTagMask));
     __ and_(ecx, eax);
     __ test(ecx, edx);
@@ -1745,7 +1426,7 @@
   // If one of the numbers was NaN, then the result is always false.
   // The cc is never not-equal.
   __ bind(&unordered);
-  ASSERT(cc != not_equal);
+  DCHECK(cc != not_equal);
   if (cc == less || cc == less_equal) {
     __ mov(eax, Immediate(Smi::FromInt(1)));
   } else {
@@ -1770,23 +1451,15 @@
 
   __ bind(&check_for_strings);
 
-  __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx,
-                                         &check_unequal_objects);
+  __ JumpIfNotBothSequentialOneByteStrings(edx, eax, ecx, ebx,
+                                           &check_unequal_objects);
 
-  // Inline comparison of ASCII strings.
+  // Inline comparison of one-byte strings.
   if (cc == equal) {
-    StringCompareStub::GenerateFlatAsciiStringEquals(masm,
-                                                     edx,
-                                                     eax,
-                                                     ecx,
-                                                     ebx);
+    StringHelper::GenerateFlatOneByteStringEquals(masm, edx, eax, ecx, ebx);
   } else {
-    StringCompareStub::GenerateCompareFlatAsciiStrings(masm,
-                                                       edx,
-                                                       eax,
-                                                       ecx,
-                                                       ebx,
-                                                       edi);
+    StringHelper::GenerateCompareFlatOneByteStrings(masm, edx, eax, ecx, ebx,
+                                                    edi);
   }
 #ifdef DEBUG
   __ Abort(kUnexpectedFallThroughFromStringComparison);
@@ -1875,7 +1548,7 @@
   // function without changing the state.
   __ cmp(ecx, edi);
   __ j(equal, &done, Label::kFar);
-  __ cmp(ecx, Immediate(TypeFeedbackInfo::MegamorphicSentinel(isolate)));
+  __ cmp(ecx, Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate)));
   __ j(equal, &done, Label::kFar);
 
   if (!FLAG_pretenuring_call_new) {
@@ -1898,14 +1571,14 @@
 
   // A monomorphic miss (i.e, here the cache is not uninitialized) goes
   // megamorphic.
-  __ cmp(ecx, Immediate(TypeFeedbackInfo::UninitializedSentinel(isolate)));
+  __ cmp(ecx, Immediate(TypeFeedbackVector::UninitializedSentinel(isolate)));
   __ j(equal, &initialize);
   // MegamorphicSentinel is an immortal immovable object (undefined) so no
   // write-barrier is needed.
   __ bind(&megamorphic);
-  __ mov(FieldOperand(ebx, edx, times_half_pointer_size,
-                      FixedArray::kHeaderSize),
-         Immediate(TypeFeedbackInfo::MegamorphicSentinel(isolate)));
+  __ mov(
+      FieldOperand(ebx, edx, times_half_pointer_size, FixedArray::kHeaderSize),
+      Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate)));
   __ jmp(&done, Label::kFar);
 
   // An uninitialized cache is patched with the function or sentinel to
@@ -1951,7 +1624,8 @@
   __ push(edi);
   __ push(ebx);
   __ push(edx);
-  __ RecordWriteArray(ebx, edi, edx, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+  __ RecordWriteArray(ebx, edi, edx, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
   __ pop(edx);
   __ pop(ebx);
   __ pop(edi);
@@ -2043,7 +1717,7 @@
     // Load the receiver from the stack.
     __ mov(eax, Operand(esp, (argc + 1) * kPointerSize));
 
-    if (call_as_method) {
+    if (needs_checks) {
       __ JumpIfSmi(eax, &wrap);
 
       __ CmpObjectType(eax, FIRST_SPEC_OBJECT_TYPE, ecx);
@@ -2072,7 +1746,7 @@
 
 
 void CallFunctionStub::Generate(MacroAssembler* masm) {
-  CallFunctionNoFeedback(masm, argc_, NeedsChecks(), CallAsMethod());
+  CallFunctionNoFeedback(masm, argc(), NeedsChecks(), CallAsMethod());
 }
 
 
@@ -2156,7 +1830,7 @@
   // edi - function
   // edx - slot id
   Label miss;
-  int argc = state_.arg_count();
+  int argc = arg_count();
   ParameterCount actual(argc);
 
   EmitLoadTypeFeedbackVector(masm, ebx);
@@ -2180,7 +1854,7 @@
   __ TailCallStub(&stub);
 
   __ bind(&miss);
-  GenerateMiss(masm, IC::kCallIC_Customization_Miss);
+  GenerateMiss(masm);
 
   // The slow case, we need this no matter what to complete a call after a miss.
   CallFunctionNoFeedback(masm,
@@ -2200,7 +1874,7 @@
   Label extra_checks_or_miss, slow_start;
   Label slow, non_function, wrap, cont;
   Label have_js_function;
-  int argc = state_.arg_count();
+  int argc = arg_count();
   ParameterCount actual(argc);
 
   EmitLoadTypeFeedbackVector(masm, ebx);
@@ -2211,7 +1885,7 @@
   __ j(not_equal, &extra_checks_or_miss);
 
   __ bind(&have_js_function);
-  if (state_.CallAsMethod()) {
+  if (CallAsMethod()) {
     EmitContinueIfStrictOrNative(masm, &cont);
 
     // Load the receiver from the stack.
@@ -2230,7 +1904,7 @@
   __ bind(&slow);
   EmitSlowCase(isolate, masm, argc, &non_function);
 
-  if (state_.CallAsMethod()) {
+  if (CallAsMethod()) {
     __ bind(&wrap);
     EmitWrapCase(masm, argc, &cont);
   }
@@ -2240,9 +1914,9 @@
 
   __ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size,
                            FixedArray::kHeaderSize));
-  __ cmp(ecx, Immediate(TypeFeedbackInfo::MegamorphicSentinel(isolate)));
+  __ cmp(ecx, Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate)));
   __ j(equal, &slow_start);
-  __ cmp(ecx, Immediate(TypeFeedbackInfo::UninitializedSentinel(isolate)));
+  __ cmp(ecx, Immediate(TypeFeedbackVector::UninitializedSentinel(isolate)));
   __ j(equal, &miss);
 
   if (!FLAG_trace_ic) {
@@ -2253,13 +1927,13 @@
     __ j(not_equal, &miss);
     __ mov(FieldOperand(ebx, edx, times_half_pointer_size,
                         FixedArray::kHeaderSize),
-           Immediate(TypeFeedbackInfo::MegamorphicSentinel(isolate)));
+           Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate)));
     __ jmp(&slow_start);
   }
 
   // We are here because tracing is on or we are going monomorphic.
   __ bind(&miss);
-  GenerateMiss(masm, IC::kCallIC_Miss);
+  GenerateMiss(masm);
 
   // the slow case
   __ bind(&slow_start);
@@ -2277,9 +1951,9 @@
 }
 
 
-void CallICStub::GenerateMiss(MacroAssembler* masm, IC::UtilityId id) {
+void CallICStub::GenerateMiss(MacroAssembler* masm) {
   // Get the receiver of the function from the stack; 1 ~ return address.
-  __ mov(ecx, Operand(esp, (state_.arg_count() + 1) * kPointerSize));
+  __ mov(ecx, Operand(esp, (arg_count() + 1) * kPointerSize));
 
   {
     FrameScope scope(masm, StackFrame::INTERNAL);
@@ -2291,6 +1965,9 @@
     __ push(edx);
 
     // Call the entry.
+    IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
+                                               : IC::kCallIC_Customization_Miss;
+
     ExternalReference miss = ExternalReference(IC_Utility(id),
                                                masm->isolate());
     __ CallExternalReference(miss, 4);
@@ -2319,12 +1996,19 @@
 
 
 void CodeStub::GenerateFPStubs(Isolate* isolate) {
-  // Do nothing.
+  CEntryStub save_doubles(isolate, 1, kSaveFPRegs);
+  // Stubs might already be in the snapshot, detect that and don't regenerate,
+  // which would lead to code stub initialization state being messed up.
+  Code* save_doubles_code;
+  if (!save_doubles.FindCodeInCache(&save_doubles_code)) {
+    save_doubles_code = *(save_doubles.GetCode());
+  }
+  isolate->set_fp_stubs_generated(true);
 }
 
 
 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
-  CEntryStub stub(isolate, 1);
+  CEntryStub stub(isolate, 1, kDontSaveFPRegs);
   stub.GetCode();
 }
 
@@ -2340,7 +2024,7 @@
   ProfileEntryHookStub::MaybeCallEntryHook(masm);
 
   // Enter the exit frame that transitions from JavaScript to C++.
-  __ EnterExitFrame();
+  __ EnterExitFrame(save_doubles());
 
   // ebx: pointer to C function  (C callee-saved)
   // ebp: frame pointer  (restored after C call)
@@ -2348,7 +2032,7 @@
   // edi: number of arguments including receiver  (C callee-saved)
   // esi: pointer to the first argument (C callee-saved)
 
-  // Result returned in eax, or eax+edx if result_size_ is 2.
+  // Result returned in eax, or eax+edx if result size is 2.
 
   // Check stack alignment.
   if (FLAG_debug_code) {
@@ -2396,7 +2080,7 @@
   }
 
   // Exit the JavaScript to C++ exit frame.
-  __ LeaveExitFrame();
+  __ LeaveExitFrame(save_doubles());
   __ ret(0);
 
   // Handling of exception.
@@ -2423,7 +2107,7 @@
 }
 
 
-void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
+void JSEntryStub::Generate(MacroAssembler* masm) {
   Label invoke, handler_entry, exit;
   Label not_outermost_js, not_outermost_js_2;
 
@@ -2434,7 +2118,7 @@
   __ mov(ebp, esp);
 
   // Push marker in two places.
-  int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY;
+  int marker = type();
   __ push(Immediate(Smi::FromInt(marker)));  // context slot
   __ push(Immediate(Smi::FromInt(marker)));  // function slot
   // Save callee-saved registers (C calling conventions).
@@ -2485,7 +2169,7 @@
   // pop the faked function when we return. Notice that we cannot store a
   // reference to the trampoline code directly in this stub, because the
   // builtin stubs may not have been generated yet.
-  if (is_construct) {
+  if (type() == StackFrame::ENTRY_CONSTRUCT) {
     ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
                                       isolate());
     __ mov(edx, Immediate(construct_entry));
@@ -2542,7 +2226,7 @@
 //
 void InstanceofStub::Generate(MacroAssembler* masm) {
   // Call site inlining and patching implies arguments in registers.
-  ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
+  DCHECK(HasArgsInRegisters() || !HasCallSiteInlineCheck());
 
   // Fixed register usage throughout the stub.
   Register object = eax;  // Object (lhs).
@@ -2555,12 +2239,12 @@
   static const int kDeltaToCmpImmediate = 2;
   static const int kDeltaToMov = 8;
   static const int kDeltaToMovImmediate = 9;
-  static const int8_t kCmpEdiOperandByte1 = BitCast<int8_t, uint8_t>(0x3b);
-  static const int8_t kCmpEdiOperandByte2 = BitCast<int8_t, uint8_t>(0x3d);
-  static const int8_t kMovEaxImmediateByte = BitCast<int8_t, uint8_t>(0xb8);
+  static const int8_t kCmpEdiOperandByte1 = bit_cast<int8_t, uint8_t>(0x3b);
+  static const int8_t kCmpEdiOperandByte2 = bit_cast<int8_t, uint8_t>(0x3d);
+  static const int8_t kMovEaxImmediateByte = bit_cast<int8_t, uint8_t>(0xb8);
 
-  ASSERT_EQ(object.code(), InstanceofStub::left().code());
-  ASSERT_EQ(function.code(), InstanceofStub::right().code());
+  DCHECK_EQ(object.code(), InstanceofStub::left().code());
+  DCHECK_EQ(function.code(), InstanceofStub::right().code());
 
   // Get the object and function - they are always both needed.
   Label slow, not_js_object;
@@ -2575,7 +2259,7 @@
 
   // If there is a call site cache don't look in the global cache, but do the
   // real lookup and update the call site cache.
-  if (!HasCallSiteInlineCheck()) {
+  if (!HasCallSiteInlineCheck() && !ReturnTrueFalseObject()) {
     // Look up the function and the map in the instanceof cache.
     Label miss;
     __ CompareRoot(function, scratch, Heap::kInstanceofCacheFunctionRootIndex);
@@ -2602,7 +2286,7 @@
   } else {
     // The constants for the code patching are based on no push instructions
     // at the call site.
-    ASSERT(HasArgsInRegisters());
+    DCHECK(HasArgsInRegisters());
     // Get return address and delta to inlined map check.
     __ mov(scratch, Operand(esp, 0 * kPointerSize));
     __ sub(scratch, Operand(esp, 1 * kPointerSize));
@@ -2634,6 +2318,9 @@
   if (!HasCallSiteInlineCheck()) {
     __ mov(eax, Immediate(0));
     __ StoreRoot(eax, scratch, Heap::kInstanceofCacheAnswerRootIndex);
+    if (ReturnTrueFalseObject()) {
+      __ mov(eax, factory->true_value());
+    }
   } else {
     // Get return address and delta to inlined map check.
     __ mov(eax, factory->true_value());
@@ -2654,6 +2341,9 @@
   if (!HasCallSiteInlineCheck()) {
     __ mov(eax, Immediate(Smi::FromInt(1)));
     __ StoreRoot(eax, scratch, Heap::kInstanceofCacheAnswerRootIndex);
+    if (ReturnTrueFalseObject()) {
+      __ mov(eax, factory->false_value());
+    }
   } else {
     // Get return address and delta to inlined map check.
     __ mov(eax, factory->false_value());
@@ -2681,20 +2371,32 @@
   // Null is not instance of anything.
   __ cmp(object, factory->null_value());
   __ j(not_equal, &object_not_null, Label::kNear);
-  __ Move(eax, Immediate(Smi::FromInt(1)));
+  if (ReturnTrueFalseObject()) {
+    __ mov(eax, factory->false_value());
+  } else {
+    __ Move(eax, Immediate(Smi::FromInt(1)));
+  }
   __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
 
   __ bind(&object_not_null);
   // Smi values is not instance of anything.
   __ JumpIfNotSmi(object, &object_not_null_or_smi, Label::kNear);
-  __ Move(eax, Immediate(Smi::FromInt(1)));
+  if (ReturnTrueFalseObject()) {
+    __ mov(eax, factory->false_value());
+  } else {
+    __ Move(eax, Immediate(Smi::FromInt(1)));
+  }
   __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
 
   __ bind(&object_not_null_or_smi);
   // String values is not instance of anything.
   Condition is_string = masm->IsObjectStringType(object, scratch, scratch);
   __ j(NegateCondition(is_string), &slow, Label::kNear);
-  __ Move(eax, Immediate(Smi::FromInt(1)));
+  if (ReturnTrueFalseObject()) {
+    __ mov(eax, factory->false_value());
+  } else {
+    __ Move(eax, Immediate(Smi::FromInt(1)));
+  }
   __ ret((HasArgsInRegisters() ? 0 : 2) * kPointerSize);
 
   // Slow-case: Go through the JavaScript implementation.
@@ -2730,12 +2432,6 @@
 }
 
 
-Register InstanceofStub::left() { return eax; }
-
-
-Register InstanceofStub::right() { return edx; }
-
-
 // -------------------------------------------------------------------------
 // StringCharCodeAtGenerator
 
@@ -2789,9 +2485,9 @@
   if (index_flags_ == STRING_INDEX_IS_NUMBER) {
     __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
   } else {
-    ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
+    DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
     // NumberToSmi discards numbers that are not exact integers.
-    __ CallRuntime(Runtime::kHiddenNumberToSmi, 1);
+    __ CallRuntime(Runtime::kNumberToSmi, 1);
   }
   if (!index_.is(eax)) {
     // Save the conversion result before the pop instructions below
@@ -2817,7 +2513,7 @@
   __ push(object_);
   __ SmiTag(index_);
   __ push(index_);
-  __ CallRuntime(Runtime::kHiddenStringCharCodeAt, 2);
+  __ CallRuntime(Runtime::kStringCharCodeAtRT, 2);
   if (!result_.is(eax)) {
     __ mov(result_, eax);
   }
@@ -2835,7 +2531,7 @@
   // Fast case of Heap::LookupSingleCharacterStringFromCode.
   STATIC_ASSERT(kSmiTag == 0);
   STATIC_ASSERT(kSmiShiftSize == 0);
-  ASSERT(IsPowerOf2(String::kMaxOneByteCharCode + 1));
+  DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCode + 1));
   __ test(code_,
           Immediate(kSmiTagMask |
                     ((~String::kMaxOneByteCharCode) << kSmiTagSize)));
@@ -2846,7 +2542,7 @@
   STATIC_ASSERT(kSmiTag == 0);
   STATIC_ASSERT(kSmiTagSize == 1);
   STATIC_ASSERT(kSmiShiftSize == 0);
-  // At this point code register contains smi tagged ASCII char code.
+  // At this point code register contains smi tagged one byte char code.
   __ mov(result_, FieldOperand(result_,
                                code_, times_half_pointer_size,
                                FixedArray::kHeaderSize));
@@ -2881,9 +2577,9 @@
                                           Register count,
                                           Register scratch,
                                           String::Encoding encoding) {
-  ASSERT(!scratch.is(dest));
-  ASSERT(!scratch.is(src));
-  ASSERT(!scratch.is(count));
+  DCHECK(!scratch.is(dest));
+  DCHECK(!scratch.is(src));
+  DCHECK(!scratch.is(count));
 
   // Nothing to do for zero characters.
   Label done;
@@ -2908,74 +2604,6 @@
 }
 
 
-void StringHelper::GenerateHashInit(MacroAssembler* masm,
-                                    Register hash,
-                                    Register character,
-                                    Register scratch) {
-  // hash = (seed + character) + ((seed + character) << 10);
-  if (masm->serializer_enabled()) {
-    __ LoadRoot(scratch, Heap::kHashSeedRootIndex);
-    __ SmiUntag(scratch);
-    __ add(scratch, character);
-    __ mov(hash, scratch);
-    __ shl(scratch, 10);
-    __ add(hash, scratch);
-  } else {
-    int32_t seed = masm->isolate()->heap()->HashSeed();
-    __ lea(scratch, Operand(character, seed));
-    __ shl(scratch, 10);
-    __ lea(hash, Operand(scratch, character, times_1, seed));
-  }
-  // hash ^= hash >> 6;
-  __ mov(scratch, hash);
-  __ shr(scratch, 6);
-  __ xor_(hash, scratch);
-}
-
-
-void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm,
-                                            Register hash,
-                                            Register character,
-                                            Register scratch) {
-  // hash += character;
-  __ add(hash, character);
-  // hash += hash << 10;
-  __ mov(scratch, hash);
-  __ shl(scratch, 10);
-  __ add(hash, scratch);
-  // hash ^= hash >> 6;
-  __ mov(scratch, hash);
-  __ shr(scratch, 6);
-  __ xor_(hash, scratch);
-}
-
-
-void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
-                                       Register hash,
-                                       Register scratch) {
-  // hash += hash << 3;
-  __ mov(scratch, hash);
-  __ shl(scratch, 3);
-  __ add(hash, scratch);
-  // hash ^= hash >> 11;
-  __ mov(scratch, hash);
-  __ shr(scratch, 11);
-  __ xor_(hash, scratch);
-  // hash += hash << 15;
-  __ mov(scratch, hash);
-  __ shl(scratch, 15);
-  __ add(hash, scratch);
-
-  __ and_(hash, String::kHashBitMask);
-
-  // if (hash == 0) hash = 27;
-  Label hash_not_zero;
-  __ j(not_zero, &hash_not_zero, Label::kNear);
-  __ mov(hash, Immediate(StringHasher::kZeroHash));
-  __ bind(&hash_not_zero);
-}
-
-
 void SubStringStub::Generate(MacroAssembler* masm) {
   Label runtime;
 
@@ -3078,7 +2706,7 @@
     STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
     __ test(ebx, Immediate(kStringEncodingMask));
     __ j(zero, &two_byte_slice, Label::kNear);
-    __ AllocateAsciiSlicedString(eax, ebx, no_reg, &runtime);
+    __ AllocateOneByteSlicedString(eax, ebx, no_reg, &runtime);
     __ jmp(&set_slice_header, Label::kNear);
     __ bind(&two_byte_slice);
     __ AllocateTwoByteSlicedString(eax, ebx, no_reg, &runtime);
@@ -3125,8 +2753,8 @@
   __ test_b(ebx, kStringEncodingMask);
   __ j(zero, &two_byte_sequential);
 
-  // Sequential ASCII string.  Allocate the result.
-  __ AllocateAsciiString(eax, ecx, ebx, edx, edi, &runtime_drop_two);
+  // Sequential one byte string.  Allocate the result.
+  __ AllocateOneByteString(eax, ecx, ebx, edx, edi, &runtime_drop_two);
 
   // eax: result string
   // ecx: result string length
@@ -3182,7 +2810,7 @@
 
   // Just jump to runtime to create the sub string.
   __ bind(&runtime);
-  __ TailCallRuntime(Runtime::kHiddenSubString, 3, 1);
+  __ TailCallRuntime(Runtime::kSubString, 3, 1);
 
   __ bind(&single_char);
   // eax: string
@@ -3197,11 +2825,11 @@
 }
 
 
-void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm,
-                                                      Register left,
-                                                      Register right,
-                                                      Register scratch1,
-                                                      Register scratch2) {
+void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
+                                                   Register left,
+                                                   Register right,
+                                                   Register scratch1,
+                                                   Register scratch2) {
   Register length = scratch1;
 
   // Compare lengths.
@@ -3224,8 +2852,8 @@
 
   // Compare characters.
   __ bind(&compare_chars);
-  GenerateAsciiCharsCompareLoop(masm, left, right, length, scratch2,
-                                &strings_not_equal, Label::kNear);
+  GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2,
+                                  &strings_not_equal, Label::kNear);
 
   // Characters are equal.
   __ Move(eax, Immediate(Smi::FromInt(EQUAL)));
@@ -3233,12 +2861,9 @@
 }
 
 
-void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
-                                                        Register left,
-                                                        Register right,
-                                                        Register scratch1,
-                                                        Register scratch2,
-                                                        Register scratch3) {
+void StringHelper::GenerateCompareFlatOneByteStrings(
+    MacroAssembler* masm, Register left, Register right, Register scratch1,
+    Register scratch2, Register scratch3) {
   Counters* counters = masm->isolate()->counters();
   __ IncrementCounter(counters->string_compare_native(), 1);
 
@@ -3264,8 +2889,8 @@
 
   // Compare characters.
   Label result_not_equal;
-  GenerateAsciiCharsCompareLoop(masm, left, right, min_length, scratch2,
-                                &result_not_equal, Label::kNear);
+  GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
+                                  &result_not_equal, Label::kNear);
 
   // Compare lengths -  strings up to min-length are equal.
   __ bind(&compare_lengths);
@@ -3299,13 +2924,9 @@
 }
 
 
-void StringCompareStub::GenerateAsciiCharsCompareLoop(
-    MacroAssembler* masm,
-    Register left,
-    Register right,
-    Register length,
-    Register scratch,
-    Label* chars_not_equal,
+void StringHelper::GenerateOneByteCharsCompareLoop(
+    MacroAssembler* masm, Register left, Register right, Register length,
+    Register scratch, Label* chars_not_equal,
     Label::Distance chars_not_equal_near) {
   // Change index to run from -length to -1 by adding length to string
   // start. This means that loop ends when index reaches zero, which
@@ -3351,20 +2972,21 @@
 
   __ bind(&not_same);
 
-  // Check that both objects are sequential ASCII strings.
-  __ JumpIfNotBothSequentialAsciiStrings(edx, eax, ecx, ebx, &runtime);
+  // Check that both objects are sequential one-byte strings.
+  __ JumpIfNotBothSequentialOneByteStrings(edx, eax, ecx, ebx, &runtime);
 
-  // Compare flat ASCII strings.
+  // Compare flat one-byte strings.
   // Drop arguments from the stack.
   __ pop(ecx);
   __ add(esp, Immediate(2 * kPointerSize));
   __ push(ecx);
-  GenerateCompareFlatAsciiStrings(masm, edx, eax, ecx, ebx, edi);
+  StringHelper::GenerateCompareFlatOneByteStrings(masm, edx, eax, ecx, ebx,
+                                                  edi);
 
   // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
   // tagged as a small integer.
   __ bind(&runtime);
-  __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
+  __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
 }
 
 
@@ -3391,13 +3013,13 @@
 
   // Tail call into the stub that handles binary operations with allocation
   // sites.
-  BinaryOpWithAllocationSiteStub stub(isolate(), state_);
+  BinaryOpWithAllocationSiteStub stub(isolate(), state());
   __ TailCallStub(&stub);
 }
 
 
-void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
-  ASSERT(state_ == CompareIC::SMI);
+void CompareICStub::GenerateSmis(MacroAssembler* masm) {
+  DCHECK(state() == CompareICState::SMI);
   Label miss;
   __ mov(ecx, edx);
   __ or_(ecx, eax);
@@ -3422,17 +3044,17 @@
 }
 
 
-void ICCompareStub::GenerateNumbers(MacroAssembler* masm) {
-  ASSERT(state_ == CompareIC::NUMBER);
+void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
+  DCHECK(state() == CompareICState::NUMBER);
 
   Label generic_stub;
   Label unordered, maybe_undefined1, maybe_undefined2;
   Label miss;
 
-  if (left_ == CompareIC::SMI) {
+  if (left() == CompareICState::SMI) {
     __ JumpIfNotSmi(edx, &miss);
   }
-  if (right_ == CompareIC::SMI) {
+  if (right() == CompareICState::SMI) {
     __ JumpIfNotSmi(eax, &miss);
   }
 
@@ -3451,12 +3073,12 @@
 
   __ bind(&unordered);
   __ bind(&generic_stub);
-  ICCompareStub stub(isolate(), op_, CompareIC::GENERIC, CompareIC::GENERIC,
-                     CompareIC::GENERIC);
+  CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
+                     CompareICState::GENERIC, CompareICState::GENERIC);
   __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
 
   __ bind(&maybe_undefined1);
-  if (Token::IsOrderedRelationalCompareOp(op_)) {
+  if (Token::IsOrderedRelationalCompareOp(op())) {
     __ cmp(eax, Immediate(isolate()->factory()->undefined_value()));
     __ j(not_equal, &miss);
     __ JumpIfSmi(edx, &unordered);
@@ -3466,7 +3088,7 @@
   }
 
   __ bind(&maybe_undefined2);
-  if (Token::IsOrderedRelationalCompareOp(op_)) {
+  if (Token::IsOrderedRelationalCompareOp(op())) {
     __ cmp(edx, Immediate(isolate()->factory()->undefined_value()));
     __ j(equal, &unordered);
   }
@@ -3476,9 +3098,9 @@
 }
 
 
-void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) {
-  ASSERT(state_ == CompareIC::INTERNALIZED_STRING);
-  ASSERT(GetCondition() == equal);
+void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
+  DCHECK(state() == CompareICState::INTERNALIZED_STRING);
+  DCHECK(GetCondition() == equal);
 
   // Registers containing left and right operands respectively.
   Register left = edx;
@@ -3508,7 +3130,7 @@
   __ cmp(left, right);
   // Make sure eax is non-zero. At this point input operands are
   // guaranteed to be non-zero.
-  ASSERT(right.is(eax));
+  DCHECK(right.is(eax));
   __ j(not_equal, &done, Label::kNear);
   STATIC_ASSERT(EQUAL == 0);
   STATIC_ASSERT(kSmiTag == 0);
@@ -3521,9 +3143,9 @@
 }
 
 
-void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) {
-  ASSERT(state_ == CompareIC::UNIQUE_NAME);
-  ASSERT(GetCondition() == equal);
+void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
+  DCHECK(state() == CompareICState::UNIQUE_NAME);
+  DCHECK(GetCondition() == equal);
 
   // Registers containing left and right operands respectively.
   Register left = edx;
@@ -3545,15 +3167,15 @@
   __ movzx_b(tmp1, FieldOperand(tmp1, Map::kInstanceTypeOffset));
   __ movzx_b(tmp2, FieldOperand(tmp2, Map::kInstanceTypeOffset));
 
-  __ JumpIfNotUniqueName(tmp1, &miss, Label::kNear);
-  __ JumpIfNotUniqueName(tmp2, &miss, Label::kNear);
+  __ JumpIfNotUniqueNameInstanceType(tmp1, &miss, Label::kNear);
+  __ JumpIfNotUniqueNameInstanceType(tmp2, &miss, Label::kNear);
 
   // Unique names are compared by identity.
   Label done;
   __ cmp(left, right);
   // Make sure eax is non-zero. At this point input operands are
   // guaranteed to be non-zero.
-  ASSERT(right.is(eax));
+  DCHECK(right.is(eax));
   __ j(not_equal, &done, Label::kNear);
   STATIC_ASSERT(EQUAL == 0);
   STATIC_ASSERT(kSmiTag == 0);
@@ -3566,11 +3188,11 @@
 }
 
 
-void ICCompareStub::GenerateStrings(MacroAssembler* masm) {
-  ASSERT(state_ == CompareIC::STRING);
+void CompareICStub::GenerateStrings(MacroAssembler* masm) {
+  DCHECK(state() == CompareICState::STRING);
   Label miss;
 
-  bool equality = Token::IsEqualityOp(op_);
+  bool equality = Token::IsEqualityOp(op());
 
   // Registers containing left and right operands respectively.
   Register left = edx;
@@ -3621,22 +3243,22 @@
     __ j(not_zero, &do_compare, Label::kNear);
     // Make sure eax is non-zero. At this point input operands are
     // guaranteed to be non-zero.
-    ASSERT(right.is(eax));
+    DCHECK(right.is(eax));
     __ ret(0);
     __ bind(&do_compare);
   }
 
-  // Check that both strings are sequential ASCII.
+  // Check that both strings are sequential one-byte.
   Label runtime;
-  __ JumpIfNotBothSequentialAsciiStrings(left, right, tmp1, tmp2, &runtime);
+  __ JumpIfNotBothSequentialOneByteStrings(left, right, tmp1, tmp2, &runtime);
 
-  // Compare flat ASCII strings. Returns when done.
+  // Compare flat one byte strings. Returns when done.
   if (equality) {
-    StringCompareStub::GenerateFlatAsciiStringEquals(
-        masm, left, right, tmp1, tmp2);
+    StringHelper::GenerateFlatOneByteStringEquals(masm, left, right, tmp1,
+                                                  tmp2);
   } else {
-    StringCompareStub::GenerateCompareFlatAsciiStrings(
-        masm, left, right, tmp1, tmp2, tmp3);
+    StringHelper::GenerateCompareFlatOneByteStrings(masm, left, right, tmp1,
+                                                    tmp2, tmp3);
   }
 
   // Handle more complex cases in runtime.
@@ -3648,7 +3270,7 @@
   if (equality) {
     __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
   } else {
-    __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1);
+    __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
   }
 
   __ bind(&miss);
@@ -3656,8 +3278,8 @@
 }
 
 
-void ICCompareStub::GenerateObjects(MacroAssembler* masm) {
-  ASSERT(state_ == CompareIC::OBJECT);
+void CompareICStub::GenerateObjects(MacroAssembler* masm) {
+  DCHECK(state() == CompareICState::OBJECT);
   Label miss;
   __ mov(ecx, edx);
   __ and_(ecx, eax);
@@ -3668,7 +3290,7 @@
   __ CmpObjectType(edx, JS_OBJECT_TYPE, ecx);
   __ j(not_equal, &miss, Label::kNear);
 
-  ASSERT(GetCondition() == equal);
+  DCHECK(GetCondition() == equal);
   __ sub(eax, edx);
   __ ret(0);
 
@@ -3677,7 +3299,7 @@
 }
 
 
-void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) {
+void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
   Label miss;
   __ mov(ecx, edx);
   __ and_(ecx, eax);
@@ -3698,7 +3320,7 @@
 }
 
 
-void ICCompareStub::GenerateMiss(MacroAssembler* masm) {
+void CompareICStub::GenerateMiss(MacroAssembler* masm) {
   {
     // Call the runtime system in a fresh internal frame.
     ExternalReference miss = ExternalReference(IC_Utility(IC::kCompareIC_Miss),
@@ -3708,7 +3330,7 @@
     __ push(eax);
     __ push(edx);  // And also use them as the arguments.
     __ push(eax);
-    __ push(Immediate(Smi::FromInt(op_)));
+    __ push(Immediate(Smi::FromInt(op())));
     __ CallExternalReference(miss, 3);
     // Compute the entry point of the rewritten stub.
     __ lea(edi, FieldOperand(eax, Code::kHeaderSize));
@@ -3732,7 +3354,7 @@
                                                       Register properties,
                                                       Handle<Name> name,
                                                       Register r0) {
-  ASSERT(name->IsUniqueName());
+  DCHECK(name->IsUniqueName());
 
   // If names of slots in range from 1 to kProbes - 1 for the hash value are
   // not equal to the name and kProbes-th slot is not used (its name is the
@@ -3750,11 +3372,11 @@
                                    NameDictionary::GetProbeOffset(i))));
 
     // Scale the index by multiplying by the entry size.
-    ASSERT(NameDictionary::kEntrySize == 3);
+    DCHECK(NameDictionary::kEntrySize == 3);
     __ lea(index, Operand(index, index, times_2, 0));  // index *= 3.
     Register entity_name = r0;
     // Having undefined at this place means the name is not contained.
-    ASSERT_EQ(kSmiTagSize, 1);
+    DCHECK_EQ(kSmiTagSize, 1);
     __ mov(entity_name, Operand(properties, index, times_half_pointer_size,
                                 kElementsStartOffset - kHeapObjectTag));
     __ cmp(entity_name, masm->isolate()->factory()->undefined_value());
@@ -3771,8 +3393,8 @@
 
     // Check if the entry name is not a unique name.
     __ mov(entity_name, FieldOperand(entity_name, HeapObject::kMapOffset));
-    __ JumpIfNotUniqueName(FieldOperand(entity_name, Map::kInstanceTypeOffset),
-                           miss);
+    __ JumpIfNotUniqueNameInstanceType(
+        FieldOperand(entity_name, Map::kInstanceTypeOffset), miss);
     __ bind(&good);
   }
 
@@ -3798,10 +3420,10 @@
                                                       Register name,
                                                       Register r0,
                                                       Register r1) {
-  ASSERT(!elements.is(r0));
-  ASSERT(!elements.is(r1));
-  ASSERT(!name.is(r0));
-  ASSERT(!name.is(r1));
+  DCHECK(!elements.is(r0));
+  DCHECK(!elements.is(r1));
+  DCHECK(!name.is(r0));
+  DCHECK(!name.is(r1));
 
   __ AssertName(name);
 
@@ -3822,7 +3444,7 @@
     __ and_(r0, r1);
 
     // Scale the index by multiplying by the entry size.
-    ASSERT(NameDictionary::kEntrySize == 3);
+    DCHECK(NameDictionary::kEntrySize == 3);
     __ lea(r0, Operand(r0, r0, times_2, 0));  // r0 = r0 * 3
 
     // Check if the key is identical to the name.
@@ -3864,9 +3486,9 @@
 
   Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
 
-  Register scratch = result_;
+  Register scratch = result();
 
-  __ mov(scratch, FieldOperand(dictionary_, kCapacityOffset));
+  __ mov(scratch, FieldOperand(dictionary(), kCapacityOffset));
   __ dec(scratch);
   __ SmiUntag(scratch);
   __ push(scratch);
@@ -3885,14 +3507,12 @@
     __ and_(scratch, Operand(esp, 0));
 
     // Scale the index by multiplying by the entry size.
-    ASSERT(NameDictionary::kEntrySize == 3);
-    __ lea(index_, Operand(scratch, scratch, times_2, 0));  // index *= 3.
+    DCHECK(NameDictionary::kEntrySize == 3);
+    __ lea(index(), Operand(scratch, scratch, times_2, 0));  // index *= 3.
 
     // Having undefined at this place means the name is not contained.
-    ASSERT_EQ(kSmiTagSize, 1);
-    __ mov(scratch, Operand(dictionary_,
-                            index_,
-                            times_pointer_size,
+    DCHECK_EQ(kSmiTagSize, 1);
+    __ mov(scratch, Operand(dictionary(), index(), times_pointer_size,
                             kElementsStartOffset - kHeapObjectTag));
     __ cmp(scratch, isolate()->factory()->undefined_value());
     __ j(equal, &not_in_dictionary);
@@ -3901,15 +3521,16 @@
     __ cmp(scratch, Operand(esp, 3 * kPointerSize));
     __ j(equal, &in_dictionary);
 
-    if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) {
+    if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
       // If we hit a key that is not a unique name during negative
       // lookup we have to bailout as this key might be equal to the
       // key we are looking for.
 
       // Check if the entry name is not a unique name.
       __ mov(scratch, FieldOperand(scratch, HeapObject::kMapOffset));
-      __ JumpIfNotUniqueName(FieldOperand(scratch, Map::kInstanceTypeOffset),
-                             &maybe_in_dictionary);
+      __ JumpIfNotUniqueNameInstanceType(
+          FieldOperand(scratch, Map::kInstanceTypeOffset),
+          &maybe_in_dictionary);
     }
   }
 
@@ -3917,19 +3538,19 @@
   // If we are doing negative lookup then probing failure should be
   // treated as a lookup success. For positive lookup probing failure
   // should be treated as lookup failure.
-  if (mode_ == POSITIVE_LOOKUP) {
-    __ mov(result_, Immediate(0));
+  if (mode() == POSITIVE_LOOKUP) {
+    __ mov(result(), Immediate(0));
     __ Drop(1);
     __ ret(2 * kPointerSize);
   }
 
   __ bind(&in_dictionary);
-  __ mov(result_, Immediate(1));
+  __ mov(result(), Immediate(1));
   __ Drop(1);
   __ ret(2 * kPointerSize);
 
   __ bind(&not_in_dictionary);
-  __ mov(result_, Immediate(0));
+  __ mov(result(), Immediate(0));
   __ Drop(1);
   __ ret(2 * kPointerSize);
 }
@@ -3937,8 +3558,10 @@
 
 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
     Isolate* isolate) {
-  StoreBufferOverflowStub stub(isolate);
+  StoreBufferOverflowStub stub(isolate, kDontSaveFPRegs);
   stub.GetCode();
+  StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
+  stub2.GetCode();
 }
 
 
@@ -3957,10 +3580,8 @@
   __ jmp(&skip_to_incremental_noncompacting, Label::kNear);
   __ jmp(&skip_to_incremental_compacting, Label::kFar);
 
-  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
-    __ RememberedSetHelper(object_,
-                           address_,
-                           value_,
+  if (remembered_set_action() == EMIT_REMEMBERED_SET) {
+    __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
                            MacroAssembler::kReturnAtEnd);
   } else {
     __ ret(0);
@@ -3982,7 +3603,7 @@
 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
   regs_.Save(masm);
 
-  if (remembered_set_action_ == EMIT_REMEMBERED_SET) {
+  if (remembered_set_action() == EMIT_REMEMBERED_SET) {
     Label dont_need_remembered_set;
 
     __ mov(regs_.scratch0(), Operand(regs_.address(), 0));
@@ -4004,9 +3625,7 @@
         mode);
     InformIncrementalMarker(masm);
     regs_.Restore(masm);
-    __ RememberedSetHelper(object_,
-                           address_,
-                           value_,
+    __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
                            MacroAssembler::kReturnAtEnd);
 
     __ bind(&dont_need_remembered_set);
@@ -4023,7 +3642,7 @@
 
 
 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
-  regs_.SaveCallerSaveRegisters(masm);
+  regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
   int argument_count = 3;
   __ PrepareCallCFunction(argument_count, regs_.scratch0());
   __ mov(Operand(esp, 0 * kPointerSize), regs_.object());
@@ -4036,7 +3655,7 @@
       ExternalReference::incremental_marking_record_write_function(isolate()),
       argument_count);
 
-  regs_.RestoreCallerSaveRegisters(masm);
+  regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
 }
 
 
@@ -4067,9 +3686,7 @@
 
   regs_.Restore(masm);
   if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
-    __ RememberedSetHelper(object_,
-                           address_,
-                           value_,
+    __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
                            MacroAssembler::kReturnAtEnd);
   } else {
     __ ret(0);
@@ -4114,9 +3731,7 @@
 
   regs_.Restore(masm);
   if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
-    __ RememberedSetHelper(object_,
-                           address_,
-                           value_,
+    __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
                            MacroAssembler::kReturnAtEnd);
   } else {
     __ ret(0);
@@ -4186,8 +3801,7 @@
                            FixedArrayBase::kHeaderSize));
   __ mov(Operand(ecx, 0), eax);
   // Update the write barrier for the array store.
-  __ RecordWrite(ebx, ecx, eax,
-                 EMIT_REMEMBERED_SET,
+  __ RecordWrite(ebx, ecx, eax, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
                  OMIT_SMI_CHECK);
   __ ret(0);
 
@@ -4216,21 +3830,34 @@
 
 
 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
-  CEntryStub ces(isolate(), 1);
+  CEntryStub ces(isolate(), 1, kSaveFPRegs);
   __ call(ces.GetCode(), RelocInfo::CODE_TARGET);
   int parameter_count_offset =
       StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
   __ mov(ebx, MemOperand(ebp, parameter_count_offset));
   masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
   __ pop(ecx);
-  int additional_offset = function_mode_ == JS_FUNCTION_STUB_MODE
-      ? kPointerSize
-      : 0;
+  int additional_offset =
+      function_mode() == JS_FUNCTION_STUB_MODE ? kPointerSize : 0;
   __ lea(esp, MemOperand(esp, ebx, times_pointer_size, additional_offset));
   __ jmp(ecx);  // Return to IC Miss stub, continuation still on stack.
 }
 
 
+void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
+  EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
+  VectorLoadStub stub(isolate(), state());
+  __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
+  EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
+  VectorKeyedLoadStub stub(isolate());
+  __ jmp(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
   if (masm->isolate()->function_entry_hook() != NULL) {
     ProfileEntryHookStub stub(masm->isolate());
@@ -4257,7 +3884,7 @@
   __ push(eax);
 
   // Call the entry hook.
-  ASSERT(isolate()->function_entry_hook() != NULL);
+  DCHECK(isolate()->function_entry_hook() != NULL);
   __ call(FUNCTION_ADDR(isolate()->function_entry_hook()),
           RelocInfo::RUNTIME_ENTRY);
   __ add(esp, Immediate(2 * kPointerSize));
@@ -4310,12 +3937,12 @@
   // esp[4] - last argument
   Label normal_sequence;
   if (mode == DONT_OVERRIDE) {
-    ASSERT(FAST_SMI_ELEMENTS == 0);
-    ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
-    ASSERT(FAST_ELEMENTS == 2);
-    ASSERT(FAST_HOLEY_ELEMENTS == 3);
-    ASSERT(FAST_DOUBLE_ELEMENTS == 4);
-    ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
+    DCHECK(FAST_SMI_ELEMENTS == 0);
+    DCHECK(FAST_HOLEY_SMI_ELEMENTS == 1);
+    DCHECK(FAST_ELEMENTS == 2);
+    DCHECK(FAST_HOLEY_ELEMENTS == 3);
+    DCHECK(FAST_DOUBLE_ELEMENTS == 4);
+    DCHECK(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
 
     // is the low bit set? If so, we are holey and that is good.
     __ test_b(edx, 1);
@@ -4425,7 +4052,7 @@
 void ArrayConstructorStub::GenerateDispatchToArrayStub(
     MacroAssembler* masm,
     AllocationSiteOverrideMode mode) {
-  if (argument_count_ == ANY) {
+  if (argument_count() == ANY) {
     Label not_zero_case, not_one_case;
     __ test(eax, eax);
     __ j(not_zero, &not_zero_case);
@@ -4438,11 +4065,11 @@
 
     __ bind(&not_one_case);
     CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
-  } else if (argument_count_ == NONE) {
+  } else if (argument_count() == NONE) {
     CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
-  } else if (argument_count_ == ONE) {
+  } else if (argument_count() == ONE) {
     CreateArrayDispatchOneArgument(masm, mode);
-  } else if (argument_count_ == MORE_THAN_ONE) {
+  } else if (argument_count() == MORE_THAN_ONE) {
     CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
   } else {
     UNREACHABLE();
@@ -4452,7 +4079,7 @@
 
 void ArrayConstructorStub::Generate(MacroAssembler* masm) {
   // ----------- S t a t e -------------
-  //  -- eax : argc (only if argument_count_ == ANY)
+  //  -- eax : argc (only if argument_count() == ANY)
   //  -- ebx : AllocationSite or undefined
   //  -- edi : constructor
   //  -- esp[0] : return address
@@ -4600,9 +4227,9 @@
   Register return_address = edi;
   Register context = esi;
 
-  int argc = ArgumentBits::decode(bit_field_);
-  bool is_store = IsStoreBits::decode(bit_field_);
-  bool call_data_undefined = CallDataUndefinedBits::decode(bit_field_);
+  int argc = this->argc();
+  bool is_store = this->is_store();
+  bool call_data_undefined = this->call_data_undefined();
 
   typedef FunctionCallbackArguments FCA;
 
@@ -4706,6 +4333,7 @@
   //  -- ...
   //  -- edx                    : api_function_address
   // -----------------------------------
+  DCHECK(edx.is(ApiGetterDescriptor::function_address()));
 
   // array for v8::Arguments::values_, handler for name and pointer
   // to the values (it considered as smi in GC).
diff --git a/src/x87/code-stubs-x87.h b/src/x87/code-stubs-x87.h
index 13ab10f..03ff477 100644
--- a/src/x87/code-stubs-x87.h
+++ b/src/x87/code-stubs-x87.h
@@ -5,9 +5,6 @@
 #ifndef V8_X87_CODE_STUBS_X87_H_
 #define V8_X87_CODE_STUBS_X87_H_
 
-#include "src/macro-assembler.h"
-#include "src/ic-inl.h"
-
 namespace v8 {
 namespace internal {
 
@@ -17,22 +14,6 @@
                      Label* call_generic_code);
 
 
-class StoreBufferOverflowStub: public PlatformCodeStub {
- public:
-  explicit StoreBufferOverflowStub(Isolate* isolate)
-      : PlatformCodeStub(isolate) { }
-
-  void Generate(MacroAssembler* masm);
-
-  static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
-  virtual bool SometimesSetsUpAFrame() { return false; }
-
- private:
-  Major MajorKey() { return StoreBufferOverflow; }
-  int MinorKey() { return 0; }
-};
-
-
 class StringHelper : public AllStatic {
  public:
   // Generate code for copying characters using the rep movs instruction.
@@ -45,69 +26,26 @@
                                      Register scratch,
                                      String::Encoding encoding);
 
-  // Generate string hash.
-  static void GenerateHashInit(MacroAssembler* masm,
-                               Register hash,
-                               Register character,
-                               Register scratch);
-  static void GenerateHashAddCharacter(MacroAssembler* masm,
-                                       Register hash,
-                                       Register character,
-                                       Register scratch);
-  static void GenerateHashGetHash(MacroAssembler* masm,
-                                  Register hash,
-                                  Register scratch);
+  // Compares two flat one byte strings and returns result in eax.
+  static void GenerateCompareFlatOneByteStrings(MacroAssembler* masm,
+                                                Register left, Register right,
+                                                Register scratch1,
+                                                Register scratch2,
+                                                Register scratch3);
 
- private:
-  DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
-};
-
-
-class SubStringStub: public PlatformCodeStub {
- public:
-  explicit SubStringStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
-
- private:
-  Major MajorKey() { return SubString; }
-  int MinorKey() { return 0; }
-
-  void Generate(MacroAssembler* masm);
-};
-
-
-class StringCompareStub: public PlatformCodeStub {
- public:
-  explicit StringCompareStub(Isolate* isolate) : PlatformCodeStub(isolate) { }
-
-  // Compares two flat ASCII strings and returns result in eax.
-  static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
-                                              Register left,
-                                              Register right,
+  // Compares two flat one byte strings for equality and returns result in eax.
+  static void GenerateFlatOneByteStringEquals(MacroAssembler* masm,
+                                              Register left, Register right,
                                               Register scratch1,
-                                              Register scratch2,
-                                              Register scratch3);
-
-  // Compares two flat ASCII strings for equality and returns result
-  // in eax.
-  static void GenerateFlatAsciiStringEquals(MacroAssembler* masm,
-                                            Register left,
-                                            Register right,
-                                            Register scratch1,
-                                            Register scratch2);
+                                              Register scratch2);
 
  private:
-  virtual Major MajorKey() { return StringCompare; }
-  virtual int MinorKey() { return 0; }
-  virtual void Generate(MacroAssembler* masm);
-
-  static void GenerateAsciiCharsCompareLoop(
-      MacroAssembler* masm,
-      Register left,
-      Register right,
-      Register length,
-      Register scratch,
-      Label* chars_not_equal,
+  static void GenerateOneByteCharsCompareLoop(
+      MacroAssembler* masm, Register left, Register right, Register length,
+      Register scratch, Label* chars_not_equal,
       Label::Distance chars_not_equal_near = Label::kFar);
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
 };
 
 
@@ -115,15 +53,13 @@
  public:
   enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
 
-  NameDictionaryLookupStub(Isolate* isolate,
-                           Register dictionary,
-                           Register result,
-                           Register index,
-                           LookupMode mode)
-      : PlatformCodeStub(isolate),
-        dictionary_(dictionary), result_(result), index_(index), mode_(mode) { }
-
-  void Generate(MacroAssembler* masm);
+  NameDictionaryLookupStub(Isolate* isolate, Register dictionary,
+                           Register result, Register index, LookupMode mode)
+      : PlatformCodeStub(isolate) {
+    minor_key_ = DictionaryBits::encode(dictionary.code()) |
+                 ResultBits::encode(result.code()) |
+                 IndexBits::encode(index.code()) | LookupModeBits::encode(mode);
+  }
 
   static void GenerateNegativeLookup(MacroAssembler* masm,
                                      Label* miss,
@@ -154,44 +90,49 @@
       NameDictionary::kHeaderSize +
       NameDictionary::kElementsStartIndex * kPointerSize;
 
-  Major MajorKey() { return NameDictionaryLookup; }
-
-  int MinorKey() {
-    return DictionaryBits::encode(dictionary_.code()) |
-        ResultBits::encode(result_.code()) |
-        IndexBits::encode(index_.code()) |
-        LookupModeBits::encode(mode_);
+  Register dictionary() const {
+    return Register::from_code(DictionaryBits::decode(minor_key_));
   }
 
+  Register result() const {
+    return Register::from_code(ResultBits::decode(minor_key_));
+  }
+
+  Register index() const {
+    return Register::from_code(IndexBits::decode(minor_key_));
+  }
+
+  LookupMode mode() const { return LookupModeBits::decode(minor_key_); }
+
   class DictionaryBits: public BitField<int, 0, 3> {};
   class ResultBits: public BitField<int, 3, 3> {};
   class IndexBits: public BitField<int, 6, 3> {};
   class LookupModeBits: public BitField<LookupMode, 9, 1> {};
 
-  Register dictionary_;
-  Register result_;
-  Register index_;
-  LookupMode mode_;
+  DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+  DEFINE_PLATFORM_CODE_STUB(NameDictionaryLookup, PlatformCodeStub);
 };
 
 
 class RecordWriteStub: public PlatformCodeStub {
  public:
-  RecordWriteStub(Isolate* isolate,
-                  Register object,
-                  Register value,
-                  Register address,
-                  RememberedSetAction remembered_set_action)
+  RecordWriteStub(Isolate* isolate, Register object, Register value,
+                  Register address, RememberedSetAction remembered_set_action,
+                  SaveFPRegsMode fp_mode)
       : PlatformCodeStub(isolate),
-        object_(object),
-        value_(value),
-        address_(address),
-        remembered_set_action_(remembered_set_action),
         regs_(object,   // An input reg.
               address,  // An input reg.
               value) {  // One scratch reg.
+    minor_key_ = ObjectBits::encode(object.code()) |
+                 ValueBits::encode(value.code()) |
+                 AddressBits::encode(address.code()) |
+                 RememberedSetActionBits::encode(remembered_set_action) |
+                 SaveFPRegsModeBits::encode(fp_mode);
   }
 
+  RecordWriteStub(uint32_t key, Isolate* isolate)
+      : PlatformCodeStub(key, isolate), regs_(object(), address(), value()) {}
+
   enum Mode {
     STORE_BUFFER_ONLY,
     INCREMENTAL,
@@ -214,13 +155,13 @@
       return INCREMENTAL;
     }
 
-    ASSERT(first_instruction == kTwoByteNopInstruction);
+    DCHECK(first_instruction == kTwoByteNopInstruction);
 
     if (second_instruction == kFiveByteJumpInstruction) {
       return INCREMENTAL_COMPACTION;
     }
 
-    ASSERT(second_instruction == kFiveByteNopInstruction);
+    DCHECK(second_instruction == kFiveByteNopInstruction);
 
     return STORE_BUFFER_ONLY;
   }
@@ -228,25 +169,27 @@
   static void Patch(Code* stub, Mode mode) {
     switch (mode) {
       case STORE_BUFFER_ONLY:
-        ASSERT(GetMode(stub) == INCREMENTAL ||
+        DCHECK(GetMode(stub) == INCREMENTAL ||
                GetMode(stub) == INCREMENTAL_COMPACTION);
         stub->instruction_start()[0] = kTwoByteNopInstruction;
         stub->instruction_start()[2] = kFiveByteNopInstruction;
         break;
       case INCREMENTAL:
-        ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+        DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
         stub->instruction_start()[0] = kTwoByteJumpInstruction;
         break;
       case INCREMENTAL_COMPACTION:
-        ASSERT(GetMode(stub) == STORE_BUFFER_ONLY);
+        DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
         stub->instruction_start()[0] = kTwoByteNopInstruction;
         stub->instruction_start()[2] = kFiveByteJumpInstruction;
         break;
     }
-    ASSERT(GetMode(stub) == mode);
-    CPU::FlushICache(stub->instruction_start(), 7);
+    DCHECK(GetMode(stub) == mode);
+    CpuFeatures::FlushICache(stub->instruction_start(), 7);
   }
 
+  DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+
  private:
   // This is a helper class for freeing up 3 scratch registers, where the third
   // is always ecx (needed for shift operations).  The input is two registers
@@ -262,7 +205,7 @@
           object_(object),
           address_(address),
           scratch0_(scratch0) {
-      ASSERT(!AreAliased(scratch0, object, address, no_reg));
+      DCHECK(!AreAliased(scratch0, object, address, no_reg));
       scratch1_ = GetRegThatIsNotEcxOr(object_, address_, scratch0_);
       if (scratch0.is(ecx)) {
         scratch0_ = GetRegThatIsNotEcxOr(object_, address_, scratch1_);
@@ -273,15 +216,15 @@
       if (address.is(ecx)) {
         address_ = GetRegThatIsNotEcxOr(object_, scratch0_, scratch1_);
       }
-      ASSERT(!AreAliased(scratch0_, object_, address_, ecx));
+      DCHECK(!AreAliased(scratch0_, object_, address_, ecx));
     }
 
     void Save(MacroAssembler* masm) {
-      ASSERT(!address_orig_.is(object_));
-      ASSERT(object_.is(object_orig_) || address_.is(address_orig_));
-      ASSERT(!AreAliased(object_, address_, scratch1_, scratch0_));
-      ASSERT(!AreAliased(object_orig_, address_, scratch1_, scratch0_));
-      ASSERT(!AreAliased(object_, address_orig_, scratch1_, scratch0_));
+      DCHECK(!address_orig_.is(object_));
+      DCHECK(object_.is(object_orig_) || address_.is(address_orig_));
+      DCHECK(!AreAliased(object_, address_, scratch1_, scratch0_));
+      DCHECK(!AreAliased(object_orig_, address_, scratch1_, scratch0_));
+      DCHECK(!AreAliased(object_, address_orig_, scratch1_, scratch0_));
       // We don't have to save scratch0_orig_ because it was given to us as
       // a scratch register.  But if we had to switch to a different reg then
       // we should save the new scratch0_.
@@ -327,12 +270,23 @@
     // saved registers that were not already preserved.  The caller saved
     // registers are eax, ecx and edx.  The three scratch registers (incl. ecx)
     // will be restored by other means so we don't bother pushing them here.
-    void SaveCallerSaveRegisters(MacroAssembler* masm) {
+    void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
       if (!scratch0_.is(eax) && !scratch1_.is(eax)) masm->push(eax);
       if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->push(edx);
+      if (mode == kSaveFPRegs) {
+        // Save FPU state in m108byte.
+        masm->sub(esp, Immediate(108));
+        masm->fnsave(Operand(esp, 0));
+      }
     }
 
-    inline void RestoreCallerSaveRegisters(MacroAssembler*masm) {
+    inline void RestoreCallerSaveRegisters(MacroAssembler* masm,
+                                           SaveFPRegsMode mode) {
+      if (mode == kSaveFPRegs) {
+        // Restore FPU state in m108byte.
+        masm->frstor(Operand(esp, 0));
+        masm->add(esp, Immediate(108));
+      }
       if (!scratch0_.is(edx) && !scratch1_.is(edx)) masm->pop(edx);
       if (!scratch0_.is(eax) && !scratch1_.is(eax)) masm->pop(eax);
     }
@@ -372,9 +326,11 @@
   enum OnNoNeedToInformIncrementalMarker {
     kReturnOnNoNeedToInformIncrementalMarker,
     kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
-  }
-;
-  void Generate(MacroAssembler* masm);
+  };
+
+  virtual inline Major MajorKey() const FINAL OVERRIDE { return RecordWrite; }
+
+  virtual void Generate(MacroAssembler* masm) OVERRIDE;
   void GenerateIncremental(MacroAssembler* masm, Mode mode);
   void CheckNeedsToInformIncrementalMarker(
       MacroAssembler* masm,
@@ -382,29 +338,39 @@
       Mode mode);
   void InformIncrementalMarker(MacroAssembler* masm);
 
-  Major MajorKey() { return RecordWrite; }
-
-  int MinorKey() {
-    return ObjectBits::encode(object_.code()) |
-        ValueBits::encode(value_.code()) |
-        AddressBits::encode(address_.code()) |
-        RememberedSetActionBits::encode(remembered_set_action_);
-  }
-
   void Activate(Code* code) {
     code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
   }
 
+  Register object() const {
+    return Register::from_code(ObjectBits::decode(minor_key_));
+  }
+
+  Register value() const {
+    return Register::from_code(ValueBits::decode(minor_key_));
+  }
+
+  Register address() const {
+    return Register::from_code(AddressBits::decode(minor_key_));
+  }
+
+  RememberedSetAction remembered_set_action() const {
+    return RememberedSetActionBits::decode(minor_key_);
+  }
+
+  SaveFPRegsMode save_fp_regs_mode() const {
+    return SaveFPRegsModeBits::decode(minor_key_);
+  }
+
   class ObjectBits: public BitField<int, 0, 3> {};
   class ValueBits: public BitField<int, 3, 3> {};
   class AddressBits: public BitField<int, 6, 3> {};
   class RememberedSetActionBits: public BitField<RememberedSetAction, 9, 1> {};
+  class SaveFPRegsModeBits : public BitField<SaveFPRegsMode, 10, 1> {};
 
-  Register object_;
-  Register value_;
-  Register address_;
-  RememberedSetAction remembered_set_action_;
   RegisterAllocation regs_;
+
+  DISALLOW_COPY_AND_ASSIGN(RecordWriteStub);
 };
 
 
diff --git a/src/x87/codegen-x87.cc b/src/x87/codegen-x87.cc
index 5091e88..e33959e 100644
--- a/src/x87/codegen-x87.cc
+++ b/src/x87/codegen-x87.cc
@@ -7,7 +7,7 @@
 #if V8_TARGET_ARCH_X87
 
 #include "src/codegen.h"
-#include "src/heap.h"
+#include "src/heap/heap.h"
 #include "src/macro-assembler.h"
 
 namespace v8 {
@@ -19,14 +19,14 @@
 
 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
   masm->EnterFrame(StackFrame::INTERNAL);
-  ASSERT(!masm->has_frame());
+  DCHECK(!masm->has_frame());
   masm->set_has_frame(true);
 }
 
 
 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
   masm->LeaveFrame(StackFrame::INTERNAL);
-  ASSERT(masm->has_frame());
+  DCHECK(masm->has_frame());
   masm->set_has_frame(false);
 }
 
@@ -79,7 +79,8 @@
 MemMoveFunction CreateMemMoveFunction() {
   size_t actual_size;
   // Allocate buffer in executable space.
-  byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
+  byte* buffer =
+      static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
   if (buffer == NULL) return NULL;
   MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
   LabelConverter conv(buffer);
@@ -180,9 +181,9 @@
 
   CodeDesc desc;
   masm.GetCode(&desc);
-  ASSERT(!RelocInfo::RequiresRelocation(desc));
-  CPU::FlushICache(buffer, actual_size);
-  OS::ProtectCode(buffer, actual_size);
+  DCHECK(!RelocInfo::RequiresRelocation(desc));
+  CpuFeatures::FlushICache(buffer, actual_size);
+  base::OS::ProtectCode(buffer, actual_size);
   // TODO(jkummerow): It would be nice to register this code creation event
   // with the PROFILE / GDBJIT system.
   return FUNCTION_CAST<MemMoveFunction>(buffer);
@@ -198,40 +199,43 @@
 
 
 void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
-    MacroAssembler* masm, AllocationSiteMode mode,
+    MacroAssembler* masm,
+    Register receiver,
+    Register key,
+    Register value,
+    Register target_map,
+    AllocationSiteMode mode,
     Label* allocation_memento_found) {
-  // ----------- S t a t e -------------
-  //  -- eax    : value
-  //  -- ebx    : target map
-  //  -- ecx    : key
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
+  Register scratch = edi;
+  DCHECK(!AreAliased(receiver, key, value, target_map, scratch));
+
   if (mode == TRACK_ALLOCATION_SITE) {
-    ASSERT(allocation_memento_found != NULL);
-    __ JumpIfJSArrayHasAllocationMemento(edx, edi, allocation_memento_found);
+    DCHECK(allocation_memento_found != NULL);
+    __ JumpIfJSArrayHasAllocationMemento(
+        receiver, scratch, allocation_memento_found);
   }
 
   // Set transitioned map.
-  __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
-  __ RecordWriteField(edx,
-                      HeapObject::kMapOffset,
-                      ebx,
-                      edi,
-                      EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
+  __ mov(FieldOperand(receiver, HeapObject::kMapOffset), target_map);
+  __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch,
+                      kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
 }
 
 
 void ElementsTransitionGenerator::GenerateSmiToDouble(
-    MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
-  // ----------- S t a t e -------------
-  //  -- eax    : value
-  //  -- ebx    : target map
-  //  -- ecx    : key
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
+    MacroAssembler* masm,
+    Register receiver,
+    Register key,
+    Register value,
+    Register target_map,
+    AllocationSiteMode mode,
+    Label* fail) {
+  // Return address is on the stack.
+  DCHECK(receiver.is(edx));
+  DCHECK(key.is(ecx));
+  DCHECK(value.is(eax));
+  DCHECK(target_map.is(ebx));
+
   Label loop, entry, convert_hole, gc_required, only_change_map;
 
   if (mode == TRACK_ALLOCATION_SITE) {
@@ -267,12 +271,8 @@
   // Replace receiver's backing store with newly created FixedDoubleArray.
   __ mov(FieldOperand(edx, JSObject::kElementsOffset), eax);
   __ mov(ebx, eax);
-  __ RecordWriteField(edx,
-                      JSObject::kElementsOffset,
-                      ebx,
-                      edi,
-                      EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
+  __ RecordWriteField(edx, JSObject::kElementsOffset, ebx, edi, kDontSaveFPRegs,
+                      EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
 
   __ mov(edi, FieldOperand(esi, FixedArray::kLengthOffset));
 
@@ -331,24 +331,25 @@
   // ebx: target map
   // Set transitioned map.
   __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
-  __ RecordWriteField(edx,
-                      HeapObject::kMapOffset,
-                      ebx,
-                      edi,
-                      OMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
+  __ RecordWriteField(edx, HeapObject::kMapOffset, ebx, edi, kDontSaveFPRegs,
+                      OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
 }
 
 
 void ElementsTransitionGenerator::GenerateDoubleToObject(
-    MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
-  // ----------- S t a t e -------------
-  //  -- eax    : value
-  //  -- ebx    : target map
-  //  -- ecx    : key
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
+    MacroAssembler* masm,
+    Register receiver,
+    Register key,
+    Register value,
+    Register target_map,
+    AllocationSiteMode mode,
+    Label* fail) {
+  // Return address is on the stack.
+  DCHECK(receiver.is(edx));
+  DCHECK(key.is(ecx));
+  DCHECK(value.is(eax));
+  DCHECK(target_map.is(ebx));
+
   Label loop, entry, convert_hole, gc_required, only_change_map, success;
 
   if (mode == TRACK_ALLOCATION_SITE) {
@@ -386,12 +387,8 @@
   // Set transitioned map.
   __ bind(&only_change_map);
   __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
-  __ RecordWriteField(edx,
-                      HeapObject::kMapOffset,
-                      ebx,
-                      edi,
-                      OMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
+  __ RecordWriteField(edx, HeapObject::kMapOffset, ebx, edi, kDontSaveFPRegs,
+                      OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
   __ jmp(&success);
 
   // Call into runtime if GC is required.
@@ -420,10 +417,7 @@
   __ mov(FieldOperand(edx, HeapNumber::kValueOffset + kPointerSize), esi);
   __ mov(FieldOperand(eax, ebx, times_2, FixedArray::kHeaderSize), edx);
   __ mov(esi, ebx);
-  __ RecordWriteArray(eax,
-                      edx,
-                      esi,
-                      EMIT_REMEMBERED_SET,
+  __ RecordWriteArray(eax, edx, esi, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
                       OMIT_SMI_CHECK);
   __ jmp(&entry, Label::kNear);
 
@@ -442,20 +436,12 @@
   // edx: receiver
   // Set transitioned map.
   __ mov(FieldOperand(edx, HeapObject::kMapOffset), ebx);
-  __ RecordWriteField(edx,
-                      HeapObject::kMapOffset,
-                      ebx,
-                      edi,
-                      OMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
+  __ RecordWriteField(edx, HeapObject::kMapOffset, ebx, edi, kDontSaveFPRegs,
+                      OMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
   // Replace receiver's backing store with newly created and filled FixedArray.
   __ mov(FieldOperand(edx, JSObject::kElementsOffset), eax);
-  __ RecordWriteField(edx,
-                      JSObject::kElementsOffset,
-                      eax,
-                      edi,
-                      EMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
+  __ RecordWriteField(edx, JSObject::kElementsOffset, eax, edi, kDontSaveFPRegs,
+                      EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
 
   // Restore registers.
   __ pop(eax);
@@ -518,7 +504,7 @@
   __ j(zero, &seq_string, Label::kNear);
 
   // Handle external strings.
-  Label ascii_external, done;
+  Label one_byte_external, done;
   if (FLAG_debug_code) {
     // Assert that we do not have a cons or slice (indirect strings) here.
     // Sequential strings have already been ruled out.
@@ -533,22 +519,22 @@
   STATIC_ASSERT(kTwoByteStringTag == 0);
   __ test_b(result, kStringEncodingMask);
   __ mov(result, FieldOperand(string, ExternalString::kResourceDataOffset));
-  __ j(not_equal, &ascii_external, Label::kNear);
+  __ j(not_equal, &one_byte_external, Label::kNear);
   // Two-byte string.
   __ movzx_w(result, Operand(result, index, times_2, 0));
   __ jmp(&done, Label::kNear);
-  __ bind(&ascii_external);
-  // Ascii string.
+  __ bind(&one_byte_external);
+  // One-byte string.
   __ movzx_b(result, Operand(result, index, times_1, 0));
   __ jmp(&done, Label::kNear);
 
-  // Dispatch on the encoding: ASCII or two-byte.
-  Label ascii;
+  // Dispatch on the encoding: one-byte or two-byte.
+  Label one_byte;
   __ bind(&seq_string);
   STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
   STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
   __ test(result, Immediate(kStringEncodingMask));
-  __ j(not_zero, &ascii, Label::kNear);
+  __ j(not_zero, &one_byte, Label::kNear);
 
   // Two-byte string.
   // Load the two-byte character code into the result register.
@@ -558,9 +544,9 @@
                                   SeqTwoByteString::kHeaderSize));
   __ jmp(&done, Label::kNear);
 
-  // Ascii string.
+  // One-byte string.
   // Load the byte into the result register.
-  __ bind(&ascii);
+  __ bind(&one_byte);
   __ movzx_b(result, FieldOperand(string,
                                   index,
                                   times_1,
@@ -573,7 +559,7 @@
 
 
 CodeAgingHelper::CodeAgingHelper() {
-  ASSERT(young_sequence_.length() == kNoCodeAgeSequenceLength);
+  DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
   CodePatcher patcher(young_sequence_.start(), young_sequence_.length());
   patcher.masm()->push(ebp);
   patcher.masm()->mov(ebp, esp);
@@ -591,7 +577,7 @@
 
 bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
   bool result = isolate->code_aging_helper()->IsYoung(sequence);
-  ASSERT(result || isolate->code_aging_helper()->IsOld(sequence));
+  DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
   return result;
 }
 
@@ -618,7 +604,7 @@
   uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
   if (age == kNoAgeCodeAge) {
     isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
-    CPU::FlushICache(sequence, young_length);
+    CpuFeatures::FlushICache(sequence, young_length);
   } else {
     Code* stub = GetCodeAgeStub(isolate, age, parity);
     CodePatcher patcher(sequence, young_length);
diff --git a/src/x87/codegen-x87.h b/src/x87/codegen-x87.h
index 15b2702..c23e866 100644
--- a/src/x87/codegen-x87.h
+++ b/src/x87/codegen-x87.h
@@ -6,7 +6,7 @@
 #define V8_X87_CODEGEN_X87_H_
 
 #include "src/ast.h"
-#include "src/ic-inl.h"
+#include "src/macro-assembler.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/x87/cpu-x87.cc b/src/x87/cpu-x87.cc
index 469f58e..03816df 100644
--- a/src/x87/cpu-x87.cc
+++ b/src/x87/cpu-x87.cc
@@ -12,13 +12,13 @@
 
 #if V8_TARGET_ARCH_X87
 
-#include "src/cpu.h"
+#include "src/assembler.h"
 #include "src/macro-assembler.h"
 
 namespace v8 {
 namespace internal {
 
-void CPU::FlushICache(void* start, size_t size) {
+void CpuFeatures::FlushICache(void* start, size_t size) {
   // No need to flush the instruction cache on Intel. On Intel instruction
   // cache flushing is only necessary when multiple cores running the same
   // code simultaneously. V8 (and JavaScript) is single threaded and when code
diff --git a/src/x87/debug-x87.cc b/src/x87/debug-x87.cc
index e3e9165..92c23ab 100644
--- a/src/x87/debug-x87.cc
+++ b/src/x87/debug-x87.cc
@@ -22,7 +22,7 @@
 // CodeGenerator::VisitReturnStatement and VirtualFrame::Exit in codegen-x87.cc
 // for the precise return instructions sequence.
 void BreakLocationIterator::SetDebugBreakAtReturn() {
-  ASSERT(Assembler::kJSReturnSequenceLength >=
+  DCHECK(Assembler::kJSReturnSequenceLength >=
          Assembler::kCallInstructionLength);
   rinfo()->PatchCodeWithCall(
       debug_info_->GetIsolate()->builtins()->Return_DebugBreak()->entry(),
@@ -40,20 +40,20 @@
 // A debug break in the frame exit code is identified by the JS frame exit code
 // having been patched with a call instruction.
 bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
-  ASSERT(RelocInfo::IsJSReturn(rinfo->rmode()));
+  DCHECK(RelocInfo::IsJSReturn(rinfo->rmode()));
   return rinfo->IsPatchedReturnSequence();
 }
 
 
 bool BreakLocationIterator::IsDebugBreakAtSlot() {
-  ASSERT(IsDebugBreakSlot());
+  DCHECK(IsDebugBreakSlot());
   // Check whether the debug break slot instructions have been patched.
   return rinfo()->IsPatchedDebugBreakSlotSequence();
 }
 
 
 void BreakLocationIterator::SetDebugBreakAtSlot() {
-  ASSERT(IsDebugBreakSlot());
+  DCHECK(IsDebugBreakSlot());
   Isolate* isolate = debug_info_->GetIsolate();
   rinfo()->PatchCodeWithCall(
       isolate->builtins()->Slot_DebugBreak()->entry(),
@@ -62,7 +62,7 @@
 
 
 void BreakLocationIterator::ClearDebugBreakAtSlot() {
-  ASSERT(IsDebugBreakSlot());
+  DCHECK(IsDebugBreakSlot());
   rinfo()->PatchCode(original_rinfo()->pc(), Assembler::kDebugBreakSlotLength);
 }
 
@@ -86,9 +86,9 @@
     // Store the registers containing live values on the expression stack to
     // make sure that these are correctly updated during GC. Non object values
     // are stored as a smi causing it to be untouched by GC.
-    ASSERT((object_regs & ~kJSCallerSaved) == 0);
-    ASSERT((non_object_regs & ~kJSCallerSaved) == 0);
-    ASSERT((object_regs & non_object_regs) == 0);
+    DCHECK((object_regs & ~kJSCallerSaved) == 0);
+    DCHECK((non_object_regs & ~kJSCallerSaved) == 0);
+    DCHECK((object_regs & non_object_regs) == 0);
     for (int i = 0; i < kNumJSCallerSaved; i++) {
       int r = JSCallerSavedCode(i);
       Register reg = { r };
@@ -141,7 +141,7 @@
       }
     }
 
-    ASSERT(unused_reg.code() != -1);
+    DCHECK(unused_reg.code() != -1);
 
     // Read current padding counter and skip corresponding number of words.
     __ pop(unused_reg);
@@ -180,45 +180,35 @@
 
 void DebugCodegen::GenerateLoadICDebugBreak(MacroAssembler* masm) {
   // Register state for IC load call (from ic-x87.cc).
-  // ----------- S t a t e -------------
-  //  -- ecx    : name
-  //  -- edx    : receiver
-  // -----------------------------------
-  Generate_DebugBreakCallHelper(masm, ecx.bit() | edx.bit(), 0, false);
+  Register receiver = LoadDescriptor::ReceiverRegister();
+  Register name = LoadDescriptor::NameRegister();
+  Generate_DebugBreakCallHelper(masm, receiver.bit() | name.bit(), 0, false);
 }
 
 
 void DebugCodegen::GenerateStoreICDebugBreak(MacroAssembler* masm) {
   // Register state for IC store call (from ic-x87.cc).
-  // ----------- S t a t e -------------
-  //  -- eax    : value
-  //  -- ecx    : name
-  //  -- edx    : receiver
-  // -----------------------------------
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  Register name = StoreDescriptor::NameRegister();
+  Register value = StoreDescriptor::ValueRegister();
   Generate_DebugBreakCallHelper(
-      masm, eax.bit() | ecx.bit() | edx.bit(), 0, false);
+      masm, receiver.bit() | name.bit() | value.bit(), 0, false);
 }
 
 
 void DebugCodegen::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
   // Register state for keyed IC load call (from ic-x87.cc).
-  // ----------- S t a t e -------------
-  //  -- ecx    : key
-  //  -- edx    : receiver
-  // -----------------------------------
-  Generate_DebugBreakCallHelper(masm, ecx.bit() | edx.bit(), 0, false);
+  GenerateLoadICDebugBreak(masm);
 }
 
 
 void DebugCodegen::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
-  // Register state for keyed IC load call (from ic-x87.cc).
-  // ----------- S t a t e -------------
-  //  -- eax    : value
-  //  -- ecx    : key
-  //  -- edx    : receiver
-  // -----------------------------------
+  // Register state for keyed IC store call (from ic-x87.cc).
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  Register name = StoreDescriptor::NameRegister();
+  Register value = StoreDescriptor::ValueRegister();
   Generate_DebugBreakCallHelper(
-      masm, eax.bit() | ecx.bit() | edx.bit(), 0, false);
+      masm, receiver.bit() | name.bit() | value.bit(), 0, false);
 }
 
 
@@ -285,7 +275,7 @@
   __ bind(&check_codesize);
   __ RecordDebugBreakSlot();
   __ Nop(Assembler::kDebugBreakSlotLength);
-  ASSERT_EQ(Assembler::kDebugBreakSlotLength,
+  DCHECK_EQ(Assembler::kDebugBreakSlotLength,
             masm->SizeOfCodeGeneratedSince(&check_codesize));
 }
 
diff --git a/src/x87/deoptimizer-x87.cc b/src/x87/deoptimizer-x87.cc
index 36d6649..a76c7a7 100644
--- a/src/x87/deoptimizer-x87.cc
+++ b/src/x87/deoptimizer-x87.cc
@@ -35,7 +35,7 @@
   for (int i = 0; i < deopt_data->DeoptCount(); i++) {
     int pc_offset = deopt_data->Pc(i)->value();
     if (pc_offset == -1) continue;
-    ASSERT_GE(pc_offset, prev_pc_offset);
+    DCHECK_GE(pc_offset, prev_pc_offset);
     int pc_delta = pc_offset - prev_pc_offset;
     // We use RUNTIME_ENTRY reloc info which has a size of 2 bytes
     // if encodable with small pc delta encoding and up to 6 bytes
@@ -81,7 +81,7 @@
       byte* pos_before = reloc_info_writer.pos();
 #endif
       reloc_info_writer.Write(&rinfo);
-      ASSERT(RelocInfo::kMinRelocCommentSize ==
+      DCHECK(RelocInfo::kMinRelocCommentSize ==
              pos_before - reloc_info_writer.pos());
     }
     // Replace relocation information on the code object.
@@ -128,9 +128,6 @@
   // Emit call to lazy deoptimization at all lazy deopt points.
   DeoptimizationInputData* deopt_data =
       DeoptimizationInputData::cast(code->deoptimization_data());
-  SharedFunctionInfo* shared =
-      SharedFunctionInfo::cast(deopt_data->SharedFunctionInfo());
-  shared->EvictFromOptimizedCodeMap(code, "deoptimized code");
 #ifdef DEBUG
   Address prev_call_address = NULL;
 #endif
@@ -149,11 +146,11 @@
                     reinterpret_cast<intptr_t>(deopt_entry),
                     NULL);
     reloc_info_writer.Write(&rinfo);
-    ASSERT_GE(reloc_info_writer.pos(),
+    DCHECK_GE(reloc_info_writer.pos(),
               reloc_info->address() + ByteArray::kHeaderSize);
-    ASSERT(prev_call_address == NULL ||
+    DCHECK(prev_call_address == NULL ||
            call_address >= prev_call_address + patch_size());
-    ASSERT(call_address + patch_size() <= code->instruction_end());
+    DCHECK(call_address + patch_size() <= code->instruction_end());
 #ifdef DEBUG
     prev_call_address = call_address;
 #endif
@@ -169,7 +166,7 @@
   // Handle the junk part after the new relocation info. We will create
   // a non-live object in the extra space at the end of the former reloc info.
   Address junk_address = reloc_info->address() + reloc_info->Size();
-  ASSERT(junk_address <= reloc_end_address);
+  DCHECK(junk_address <= reloc_end_address);
   isolate->heap()->CreateFillerObjectAt(junk_address,
                                         reloc_end_address - junk_address);
 }
@@ -197,9 +194,9 @@
 
 
 void Deoptimizer::SetPlatformCompiledStubRegisters(
-    FrameDescription* output_frame, CodeStubInterfaceDescriptor* descriptor) {
+    FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
   intptr_t handler =
-      reinterpret_cast<intptr_t>(descriptor->deoptimization_handler_);
+      reinterpret_cast<intptr_t>(descriptor->deoptimization_handler());
   int params = descriptor->GetHandlerParameterCount();
   output_frame->SetRegister(eax.code(), params);
   output_frame->SetRegister(ebx.code(), handler);
@@ -207,8 +204,10 @@
 
 
 void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
-  // Do nothing for X87.
-  return;
+  for (int i = 0; i < X87Register::kMaxNumAllocatableRegisters; ++i) {
+    double double_value = input_->GetDoubleRegister(i);
+    output_frame->SetDoubleRegister(i, double_value);
+  }
 }
 
 
@@ -219,7 +218,7 @@
       input_frame_size - parameter_count * kPointerSize -
       StandardFrameConstants::kFixedFrameSize -
       kPointerSize;
-  ASSERT(JavaScriptFrameConstants::kDynamicAlignmentStateOffset ==
+  DCHECK(JavaScriptFrameConstants::kDynamicAlignmentStateOffset ==
       JavaScriptFrameConstants::kLocal0Offset);
   int32_t alignment_state = input_->GetFrameSlot(alignment_state_offset);
   return (alignment_state == kAlignmentPaddingPushed);
@@ -233,9 +232,42 @@
 
   // Save all general purpose registers before messing with them.
   const int kNumberOfRegisters = Register::kNumRegisters;
+
+  const int kDoubleRegsSize =
+      kDoubleSize * X87Register::kMaxNumAllocatableRegisters;
+
+  // Reserve space for x87 fp registers.
+  __ sub(esp, Immediate(kDoubleRegsSize));
+
   __ pushad();
 
-  const int kSavedRegistersAreaSize = kNumberOfRegisters * kPointerSize;
+  // GP registers are safe to use now.
+  // Save used x87 fp registers in correct position of previous reserve space.
+  Label loop, done;
+  // Get the layout of x87 stack.
+  __ sub(esp, Immediate(kPointerSize));
+  __ fistp_s(MemOperand(esp, 0));
+  __ pop(eax);
+  // Preserve stack layout in edi
+  __ mov(edi, eax);
+  // Get the x87 stack depth, the first 3 bits.
+  __ mov(ecx, eax);
+  __ and_(ecx, 0x7);
+  __ j(zero, &done, Label::kNear);
+
+  __ bind(&loop);
+  __ shr(eax, 0x3);
+  __ mov(ebx, eax);
+  __ and_(ebx, 0x7);  // Extract the st_x index into ebx.
+  // Pop TOS to the correct position. The disp(0x20) is due to pushad.
+  // The st_i should be saved to (esp + ebx * kDoubleSize + 0x20).
+  __ fstp_d(Operand(esp, ebx, times_8, 0x20));
+  __ dec(ecx);  // Decrease stack depth.
+  __ j(not_zero, &loop, Label::kNear);
+  __ bind(&done);
+
+  const int kSavedRegistersAreaSize =
+      kNumberOfRegisters * kPointerSize + kDoubleRegsSize;
 
   // Get the bailout id from the stack.
   __ mov(ebx, Operand(esp, kSavedRegistersAreaSize));
@@ -248,6 +280,7 @@
   __ sub(edx, ebp);
   __ neg(edx);
 
+  __ push(edi);
   // Allocate a new deoptimizer object.
   __ PrepareCallCFunction(6, eax);
   __ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
@@ -263,6 +296,8 @@
     __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6);
   }
 
+  __ pop(edi);
+
   // Preserve deoptimizer object in register eax and get the input
   // frame descriptor pointer.
   __ mov(ebx, Operand(eax, Deoptimizer::input_offset()));
@@ -273,13 +308,22 @@
     __ pop(Operand(ebx, offset));
   }
 
+  int double_regs_offset = FrameDescription::double_registers_offset();
+  // Fill in the double input registers.
+  for (int i = 0; i < X87Register::kMaxNumAllocatableRegisters; ++i) {
+    int dst_offset = i * kDoubleSize + double_regs_offset;
+    int src_offset = i * kDoubleSize;
+    __ fld_d(Operand(esp, src_offset));
+    __ fstp_d(Operand(ebx, dst_offset));
+  }
+
   // Clear FPU all exceptions.
   // TODO(ulan): Find out why the TOP register is not zero here in some cases,
   // and check that the generated code never deoptimizes with unbalanced stack.
   __ fnclex();
 
   // Remove the bailout id, return address and the double registers.
-  __ add(esp, Immediate(2 * kPointerSize));
+  __ add(esp, Immediate(kDoubleRegsSize + 2 * kPointerSize));
 
   // Compute a pointer to the unwinding limit in register ecx; that is
   // the first stack slot not part of the input frame.
@@ -301,6 +345,7 @@
   __ j(not_equal, &pop_loop);
 
   // Compute the output frame in the deoptimizer.
+  __ push(edi);
   __ push(eax);
   __ PrepareCallCFunction(1, ebx);
   __ mov(Operand(esp, 0 * kPointerSize), eax);
@@ -310,6 +355,7 @@
         ExternalReference::compute_output_frames_function(isolate()), 1);
   }
   __ pop(eax);
+  __ pop(edi);
 
   // If frame was dynamically aligned, pop padding.
   Label no_padding;
@@ -348,6 +394,25 @@
   __ cmp(eax, edx);
   __ j(below, &outer_push_loop);
 
+
+  // In case of a failed STUB, we have to restore the x87 stack.
+  // x87 stack layout is in edi.
+  Label loop2, done2;
+  // Get the x87 stack depth, the first 3 bits.
+  __ mov(ecx, edi);
+  __ and_(ecx, 0x7);
+  __ j(zero, &done2, Label::kNear);
+
+  __ lea(ecx, Operand(ecx, ecx, times_2, 0));
+  __ bind(&loop2);
+  __ mov(eax, edi);
+  __ shr_cl(eax);
+  __ and_(eax, 0x7);
+  __ fld_d(Operand(ebx, eax, times_8, double_regs_offset));
+  __ sub(ecx, Immediate(0x3));
+  __ j(not_zero, &loop2, Label::kNear);
+  __ bind(&done2);
+
   // Push state, pc, and continuation from the last output frame.
   __ push(Operand(ebx, FrameDescription::state_offset()));
   __ push(Operand(ebx, FrameDescription::pc_offset()));
@@ -376,7 +441,7 @@
     USE(start);
     __ push_imm32(i);
     __ jmp(&done);
-    ASSERT(masm()->pc_offset() - start == table_entry_size_);
+    DCHECK(masm()->pc_offset() - start == table_entry_size_);
   }
   __ bind(&done);
 }
diff --git a/src/x87/disasm-x87.cc b/src/x87/disasm-x87.cc
index a7d473f..908e8b0 100644
--- a/src/x87/disasm-x87.cc
+++ b/src/x87/disasm-x87.cc
@@ -3,8 +3,8 @@
 // found in the LICENSE file.
 
 #include <assert.h>
-#include <stdio.h>
 #include <stdarg.h>
+#include <stdio.h>
 
 #include "src/v8.h"
 
@@ -211,7 +211,7 @@
     InstructionDesc* id = &instructions_[bm[i].b];
     id->mnem = bm[i].mnem;
     id->op_order_ = bm[i].op_order_;
-    ASSERT_EQ(NO_INSTR, id->type);  // Information not already entered.
+    DCHECK_EQ(NO_INSTR, id->type);  // Information not already entered.
     id->type = type;
   }
 }
@@ -223,7 +223,7 @@
                                      const char* mnem) {
   for (byte b = start; b <= end; b++) {
     InstructionDesc* id = &instructions_[b];
-    ASSERT_EQ(NO_INSTR, id->type);  // Information not already entered.
+    DCHECK_EQ(NO_INSTR, id->type);  // Information not already entered.
     id->mnem = mnem;
     id->type = type;
   }
@@ -233,7 +233,7 @@
 void InstructionTable::AddJumpConditionalShort() {
   for (byte b = 0x70; b <= 0x7F; b++) {
     InstructionDesc* id = &instructions_[b];
-    ASSERT_EQ(NO_INSTR, id->type);  // Information not already entered.
+    DCHECK_EQ(NO_INSTR, id->type);  // Information not already entered.
     id->mnem = jump_conditional_mnem[b & 0x0F];
     id->type = JUMP_CONDITIONAL_SHORT_INSTR;
   }
@@ -528,84 +528,101 @@
 
 // Returns number of bytes used, including *data.
 int DisassemblerX87::F7Instruction(byte* data) {
-  ASSERT_EQ(0xF7, *data);
-  byte modrm = *(data+1);
+  DCHECK_EQ(0xF7, *data);
+  byte modrm = *++data;
   int mod, regop, rm;
   get_modrm(modrm, &mod, &regop, &rm);
-  if (mod == 3 && regop != 0) {
-    const char* mnem = NULL;
-    switch (regop) {
-      case 2: mnem = "not"; break;
-      case 3: mnem = "neg"; break;
-      case 4: mnem = "mul"; break;
-      case 5: mnem = "imul"; break;
-      case 7: mnem = "idiv"; break;
-      default: UnimplementedInstruction();
-    }
-    AppendToBuffer("%s %s", mnem, NameOfCPURegister(rm));
-    return 2;
-  } else if (mod == 3 && regop == eax) {
-    int32_t imm = *reinterpret_cast<int32_t*>(data+2);
-    AppendToBuffer("test %s,0x%x", NameOfCPURegister(rm), imm);
-    return 6;
-  } else if (regop == eax) {
-    AppendToBuffer("test ");
-    int count = PrintRightOperand(data+1);
-    int32_t imm = *reinterpret_cast<int32_t*>(data+1+count);
-    AppendToBuffer(",0x%x", imm);
-    return 1+count+4 /*int32_t*/;
-  } else {
-    UnimplementedInstruction();
-    return 2;
+  const char* mnem = NULL;
+  switch (regop) {
+    case 0:
+      mnem = "test";
+      break;
+    case 2:
+      mnem = "not";
+      break;
+    case 3:
+      mnem = "neg";
+      break;
+    case 4:
+      mnem = "mul";
+      break;
+    case 5:
+      mnem = "imul";
+      break;
+    case 6:
+      mnem = "div";
+      break;
+    case 7:
+      mnem = "idiv";
+      break;
+    default:
+      UnimplementedInstruction();
   }
+  AppendToBuffer("%s ", mnem);
+  int count = PrintRightOperand(data);
+  if (regop == 0) {
+    AppendToBuffer(",0x%x", *reinterpret_cast<int32_t*>(data + count));
+    count += 4;
+  }
+  return 1 + count;
 }
 
 
 int DisassemblerX87::D1D3C1Instruction(byte* data) {
   byte op = *data;
-  ASSERT(op == 0xD1 || op == 0xD3 || op == 0xC1);
-  byte modrm = *(data+1);
+  DCHECK(op == 0xD1 || op == 0xD3 || op == 0xC1);
+  byte modrm = *++data;
   int mod, regop, rm;
   get_modrm(modrm, &mod, &regop, &rm);
   int imm8 = -1;
-  int num_bytes = 2;
-  if (mod == 3) {
-    const char* mnem = NULL;
-    switch (regop) {
-      case kROL: mnem = "rol"; break;
-      case kROR: mnem = "ror"; break;
-      case kRCL: mnem = "rcl"; break;
-      case kRCR: mnem = "rcr"; break;
-      case kSHL: mnem = "shl"; break;
-      case KSHR: mnem = "shr"; break;
-      case kSAR: mnem = "sar"; break;
-      default: UnimplementedInstruction();
-    }
-    if (op == 0xD1) {
-      imm8 = 1;
-    } else if (op == 0xC1) {
-      imm8 = *(data+2);
-      num_bytes = 3;
-    } else if (op == 0xD3) {
-      // Shift/rotate by cl.
-    }
-    ASSERT_NE(NULL, mnem);
-    AppendToBuffer("%s %s,", mnem, NameOfCPURegister(rm));
-    if (imm8 >= 0) {
-      AppendToBuffer("%d", imm8);
-    } else {
-      AppendToBuffer("cl");
-    }
-  } else {
-    UnimplementedInstruction();
+  const char* mnem = NULL;
+  switch (regop) {
+    case kROL:
+      mnem = "rol";
+      break;
+    case kROR:
+      mnem = "ror";
+      break;
+    case kRCL:
+      mnem = "rcl";
+      break;
+    case kRCR:
+      mnem = "rcr";
+      break;
+    case kSHL:
+      mnem = "shl";
+      break;
+    case KSHR:
+      mnem = "shr";
+      break;
+    case kSAR:
+      mnem = "sar";
+      break;
+    default:
+      UnimplementedInstruction();
   }
-  return num_bytes;
+  AppendToBuffer("%s ", mnem);
+  int count = PrintRightOperand(data);
+  if (op == 0xD1) {
+    imm8 = 1;
+  } else if (op == 0xC1) {
+    imm8 = *(data + 1);
+    count++;
+  } else if (op == 0xD3) {
+    // Shift/rotate by cl.
+  }
+  if (imm8 >= 0) {
+    AppendToBuffer(",%d", imm8);
+  } else {
+    AppendToBuffer(",cl");
+  }
+  return 1 + count;
 }
 
 
 // Returns number of bytes used, including *data.
 int DisassemblerX87::JumpShort(byte* data) {
-  ASSERT_EQ(0xEB, *data);
+  DCHECK_EQ(0xEB, *data);
   byte b = *(data+1);
   byte* dest = data + static_cast<int8_t>(b) + 2;
   AppendToBuffer("jmp %s", NameOfAddress(dest));
@@ -615,7 +632,7 @@
 
 // Returns number of bytes used, including *data.
 int DisassemblerX87::JumpConditional(byte* data, const char* comment) {
-  ASSERT_EQ(0x0F, *data);
+  DCHECK_EQ(0x0F, *data);
   byte cond = *(data+1) & 0x0F;
   byte* dest = data + *reinterpret_cast<int32_t*>(data+2) + 6;
   const char* mnem = jump_conditional_mnem[cond];
@@ -643,7 +660,7 @@
 
 // Returns number of bytes used, including *data.
 int DisassemblerX87::SetCC(byte* data) {
-  ASSERT_EQ(0x0F, *data);
+  DCHECK_EQ(0x0F, *data);
   byte cond = *(data+1) & 0x0F;
   const char* mnem = set_conditional_mnem[cond];
   AppendToBuffer("%s ", mnem);
@@ -654,7 +671,7 @@
 
 // Returns number of bytes used, including *data.
 int DisassemblerX87::CMov(byte* data) {
-  ASSERT_EQ(0x0F, *data);
+  DCHECK_EQ(0x0F, *data);
   byte cond = *(data + 1) & 0x0F;
   const char* mnem = conditional_move_mnem[cond];
   int op_size = PrintOperands(mnem, REG_OPER_OP_ORDER, data + 2);
@@ -665,7 +682,7 @@
 // Returns number of bytes used, including *data.
 int DisassemblerX87::FPUInstruction(byte* data) {
   byte escape_opcode = *data;
-  ASSERT_EQ(0xD8, escape_opcode & 0xF8);
+  DCHECK_EQ(0xD8, escape_opcode & 0xF8);
   byte modrm_byte = *(data+1);
 
   if (modrm_byte >= 0xC0) {
@@ -685,7 +702,12 @@
         case 0: mnem = "fld_s"; break;
         case 2: mnem = "fst_s"; break;
         case 3: mnem = "fstp_s"; break;
-        case 7: mnem = "fstcw"; break;
+        case 5:
+          mnem = "fldcw";
+          break;
+        case 7:
+          mnem = "fnstcw";
+          break;
         default: UnimplementedInstruction();
       }
       break;
@@ -699,11 +721,27 @@
       }
       break;
 
+    case 0xDC:
+      switch (regop) {
+        case 0:
+          mnem = "fadd_d";
+          break;
+        default:
+          UnimplementedInstruction();
+      }
+      break;
+
     case 0xDD: switch (regop) {
         case 0: mnem = "fld_d"; break;
         case 1: mnem = "fisttp_d"; break;
         case 2: mnem = "fst_d"; break;
         case 3: mnem = "fstp_d"; break;
+        case 4:
+          mnem = "frstor";
+          break;
+        case 6:
+          mnem = "fnsave";
+          break;
         default: UnimplementedInstruction();
       }
       break;
@@ -954,17 +992,18 @@
         data += 3;
         break;
 
-      case 0x69:  // fall through
-      case 0x6B:
-        { int mod, regop, rm;
-          get_modrm(*(data+1), &mod, &regop, &rm);
-          int32_t imm =
-              *data == 0x6B ? *(data+2) : *reinterpret_cast<int32_t*>(data+2);
-          AppendToBuffer("imul %s,%s,0x%x",
-                         NameOfCPURegister(regop),
-                         NameOfCPURegister(rm),
-                         imm);
-          data += 2 + (*data == 0x6B ? 1 : 4);
+      case 0x6B: {
+        data++;
+        data += PrintOperands("imul", REG_OPER_OP_ORDER, data);
+        AppendToBuffer(",%d", *data);
+        data++;
+      } break;
+
+      case 0x69: {
+        data++;
+        data += PrintOperands("imul", REG_OPER_OP_ORDER, data);
+        AppendToBuffer(",%d", *reinterpret_cast<int32_t*>(data));
+        data += 4;
         }
         break;
 
@@ -1373,7 +1412,7 @@
             int mod, regop, rm;
             get_modrm(*data, &mod, &regop, &rm);
             int8_t imm8 = static_cast<int8_t>(data[1]);
-            ASSERT(regop == esi || regop == edx);
+            DCHECK(regop == esi || regop == edx);
             AppendToBuffer("%s %s,%d",
                            (regop == esi) ? "psllq" : "psrlq",
                            NameOfXMMRegister(rm),
@@ -1640,7 +1679,7 @@
   if (instr_len == 0) {
     printf("%02x", *data);
   }
-  ASSERT(instr_len > 0);  // Ensure progress.
+  DCHECK(instr_len > 0);  // Ensure progress.
 
   int outp = 0;
   // Instruction bytes.
diff --git a/src/x87/frames-x87.cc b/src/x87/frames-x87.cc
index cd4b724..6091b45 100644
--- a/src/x87/frames-x87.cc
+++ b/src/x87/frames-x87.cc
@@ -7,9 +7,9 @@
 #if V8_TARGET_ARCH_X87
 
 #include "src/assembler.h"
-#include "src/x87/assembler-x87.h"
-#include "src/x87/assembler-x87-inl.h"
 #include "src/frames.h"
+#include "src/x87/assembler-x87-inl.h"
+#include "src/x87/assembler-x87.h"
 
 namespace v8 {
 namespace internal {
diff --git a/src/x87/full-codegen-x87.cc b/src/x87/full-codegen-x87.cc
index 7006e7b..c159edd 100644
--- a/src/x87/full-codegen-x87.cc
+++ b/src/x87/full-codegen-x87.cc
@@ -6,15 +6,16 @@
 
 #if V8_TARGET_ARCH_X87
 
+#include "src/code-factory.h"
 #include "src/code-stubs.h"
 #include "src/codegen.h"
 #include "src/compiler.h"
 #include "src/debug.h"
 #include "src/full-codegen.h"
+#include "src/ic/ic.h"
 #include "src/isolate-inl.h"
 #include "src/parser.h"
 #include "src/scopes.h"
-#include "src/stub-cache.h"
 
 namespace v8 {
 namespace internal {
@@ -31,7 +32,7 @@
   }
 
   ~JumpPatchSite() {
-    ASSERT(patch_site_.is_bound() == info_emitted_);
+    DCHECK(patch_site_.is_bound() == info_emitted_);
   }
 
   void EmitJumpIfNotSmi(Register reg,
@@ -51,7 +52,7 @@
   void EmitPatchInfo() {
     if (patch_site_.is_bound()) {
       int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(&patch_site_);
-      ASSERT(is_uint8(delta_to_patch_site));
+      DCHECK(is_uint8(delta_to_patch_site));
       __ test(eax, Immediate(delta_to_patch_site));
 #ifdef DEBUG
       info_emitted_ = true;
@@ -64,8 +65,8 @@
  private:
   // jc will be patched with jz, jnc will become jnz.
   void EmitJump(Condition cc, Label* target, Label::Distance distance) {
-    ASSERT(!patch_site_.is_bound() && !info_emitted_);
-    ASSERT(cc == carry || cc == not_carry);
+    DCHECK(!patch_site_.is_bound() && !info_emitted_);
+    DCHECK(cc == carry || cc == not_carry);
     __ bind(&patch_site_);
     __ j(cc, target, distance);
   }
@@ -123,7 +124,7 @@
     __ j(not_equal, &ok, Label::kNear);
 
     __ mov(ecx, GlobalObjectOperand());
-    __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalReceiverOffset));
+    __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalProxyOffset));
 
     __ mov(Operand(esp, receiver_offset), ecx);
 
@@ -142,7 +143,7 @@
   { Comment cmnt(masm_, "[ Allocate locals");
     int locals_count = info->scope()->num_stack_slots();
     // Generators allocate locals, if any, in context slots.
-    ASSERT(!info->function()->is_generator() || locals_count == 0);
+    DCHECK(!info->function()->is_generator() || locals_count == 0);
     if (locals_count == 1) {
       __ push(Immediate(isolate()->factory()->undefined_value()));
     } else if (locals_count > 1) {
@@ -190,7 +191,7 @@
     if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
       __ push(edi);
       __ Push(info->scope()->GetScopeInfo());
-      __ CallRuntime(Runtime::kHiddenNewGlobalContext, 2);
+      __ CallRuntime(Runtime::kNewGlobalContext, 2);
     } else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
       FastNewContextStub stub(isolate(), heap_slots);
       __ CallStub(&stub);
@@ -198,7 +199,7 @@
       need_write_barrier = false;
     } else {
       __ push(edi);
-      __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
+      __ CallRuntime(Runtime::kNewFunctionContext, 1);
     }
     function_in_register = false;
     // Context is returned in eax.  It replaces the context passed to us.
@@ -220,10 +221,8 @@
         __ mov(Operand(esi, context_offset), eax);
         // Update the write barrier. This clobbers eax and ebx.
         if (need_write_barrier) {
-          __ RecordWriteContextSlot(esi,
-                                    context_offset,
-                                    eax,
-                                    ebx);
+          __ RecordWriteContextSlot(esi, context_offset, eax, ebx,
+                                    kDontSaveFPRegs);
         } else if (FLAG_debug_code) {
           Label done;
           __ JumpIfInNewSpace(esi, eax, &done, Label::kNear);
@@ -285,9 +284,9 @@
       // constant.
       if (scope()->is_function_scope() && scope()->function() != NULL) {
         VariableDeclaration* function = scope()->function();
-        ASSERT(function->proxy()->var()->mode() == CONST ||
+        DCHECK(function->proxy()->var()->mode() == CONST ||
                function->proxy()->var()->mode() == CONST_LEGACY);
-        ASSERT(function->proxy()->var()->location() != Variable::UNALLOCATED);
+        DCHECK(function->proxy()->var()->location() != Variable::UNALLOCATED);
         VisitVariableDeclaration(function);
       }
       VisitDeclarations(scope()->declarations());
@@ -305,9 +304,9 @@
     }
 
     { Comment cmnt(masm_, "[ Body");
-      ASSERT(loop_depth() == 0);
+      DCHECK(loop_depth() == 0);
       VisitStatements(function()->body());
-      ASSERT(loop_depth() == 0);
+      DCHECK(loop_depth() == 0);
     }
   }
 
@@ -345,7 +344,7 @@
   Comment cmnt(masm_, "[ Back edge bookkeeping");
   Label ok;
 
-  ASSERT(back_edge_target->is_bound());
+  DCHECK(back_edge_target->is_bound());
   int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target);
   int weight = Min(kMaxBackEdgeWeight,
                    Max(1, distance / kCodeSizeMultiplier));
@@ -415,7 +414,7 @@
     __ Ret(arguments_bytes, ecx);
     // Check that the size of the code used for returning is large enough
     // for the debugger's requirements.
-    ASSERT(Assembler::kJSReturnSequenceLength <=
+    DCHECK(Assembler::kJSReturnSequenceLength <=
            masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
     info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
   }
@@ -423,18 +422,18 @@
 
 
 void FullCodeGenerator::EffectContext::Plug(Variable* var) const {
-  ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+  DCHECK(var->IsStackAllocated() || var->IsContextSlot());
 }
 
 
 void FullCodeGenerator::AccumulatorValueContext::Plug(Variable* var) const {
-  ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+  DCHECK(var->IsStackAllocated() || var->IsContextSlot());
   codegen()->GetVar(result_register(), var);
 }
 
 
 void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
-  ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+  DCHECK(var->IsStackAllocated() || var->IsContextSlot());
   MemOperand operand = codegen()->VarOperand(var, result_register());
   // Memory operands can be pushed directly.
   __ push(operand);
@@ -499,7 +498,7 @@
                                           true,
                                           true_label_,
                                           false_label_);
-  ASSERT(!lit->IsUndetectableObject());  // There are no undetectable literals.
+  DCHECK(!lit->IsUndetectableObject());  // There are no undetectable literals.
   if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
     if (false_label_ != fall_through_) __ jmp(false_label_);
   } else if (lit->IsTrue() || lit->IsJSObject()) {
@@ -526,7 +525,7 @@
 
 void FullCodeGenerator::EffectContext::DropAndPlug(int count,
                                                    Register reg) const {
-  ASSERT(count > 0);
+  DCHECK(count > 0);
   __ Drop(count);
 }
 
@@ -534,7 +533,7 @@
 void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
     int count,
     Register reg) const {
-  ASSERT(count > 0);
+  DCHECK(count > 0);
   __ Drop(count);
   __ Move(result_register(), reg);
 }
@@ -542,7 +541,7 @@
 
 void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
                                                        Register reg) const {
-  ASSERT(count > 0);
+  DCHECK(count > 0);
   if (count > 1) __ Drop(count - 1);
   __ mov(Operand(esp, 0), reg);
 }
@@ -550,7 +549,7 @@
 
 void FullCodeGenerator::TestContext::DropAndPlug(int count,
                                                  Register reg) const {
-  ASSERT(count > 0);
+  DCHECK(count > 0);
   // For simplicity we always test the accumulator register.
   __ Drop(count);
   __ Move(result_register(), reg);
@@ -561,7 +560,7 @@
 
 void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
                                             Label* materialize_false) const {
-  ASSERT(materialize_true == materialize_false);
+  DCHECK(materialize_true == materialize_false);
   __ bind(materialize_true);
 }
 
@@ -594,8 +593,8 @@
 
 void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
                                           Label* materialize_false) const {
-  ASSERT(materialize_true == true_label_);
-  ASSERT(materialize_false == false_label_);
+  DCHECK(materialize_true == true_label_);
+  DCHECK(materialize_false == false_label_);
 }
 
 
@@ -660,7 +659,7 @@
 
 
 MemOperand FullCodeGenerator::StackOperand(Variable* var) {
-  ASSERT(var->IsStackAllocated());
+  DCHECK(var->IsStackAllocated());
   // Offset is negative because higher indexes are at lower addresses.
   int offset = -var->index() * kPointerSize;
   // Adjust by a (parameter or local) base offset.
@@ -674,7 +673,7 @@
 
 
 MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) {
-  ASSERT(var->IsContextSlot() || var->IsStackAllocated());
+  DCHECK(var->IsContextSlot() || var->IsStackAllocated());
   if (var->IsContextSlot()) {
     int context_chain_length = scope()->ContextChainLength(var->scope());
     __ LoadContext(scratch, context_chain_length);
@@ -686,7 +685,7 @@
 
 
 void FullCodeGenerator::GetVar(Register dest, Variable* var) {
-  ASSERT(var->IsContextSlot() || var->IsStackAllocated());
+  DCHECK(var->IsContextSlot() || var->IsStackAllocated());
   MemOperand location = VarOperand(var, dest);
   __ mov(dest, location);
 }
@@ -696,18 +695,18 @@
                                Register src,
                                Register scratch0,
                                Register scratch1) {
-  ASSERT(var->IsContextSlot() || var->IsStackAllocated());
-  ASSERT(!scratch0.is(src));
-  ASSERT(!scratch0.is(scratch1));
-  ASSERT(!scratch1.is(src));
+  DCHECK(var->IsContextSlot() || var->IsStackAllocated());
+  DCHECK(!scratch0.is(src));
+  DCHECK(!scratch0.is(scratch1));
+  DCHECK(!scratch1.is(src));
   MemOperand location = VarOperand(var, scratch0);
   __ mov(location, src);
 
   // Emit the write barrier code if the location is in the heap.
   if (var->IsContextSlot()) {
     int offset = Context::SlotOffset(var->index());
-    ASSERT(!scratch0.is(esi) && !src.is(esi) && !scratch1.is(esi));
-    __ RecordWriteContextSlot(scratch0, offset, src, scratch1);
+    DCHECK(!scratch0.is(esi) && !src.is(esi) && !scratch1.is(esi));
+    __ RecordWriteContextSlot(scratch0, offset, src, scratch1, kDontSaveFPRegs);
   }
 }
 
@@ -734,7 +733,7 @@
 
 void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
   // The variable in the declaration always resides in the current context.
-  ASSERT_EQ(0, scope()->ContextChainLength(variable->scope()));
+  DCHECK_EQ(0, scope()->ContextChainLength(variable->scope()));
   if (generate_debug_code_) {
     // Check that we're not inside a with or catch context.
     __ mov(ebx, FieldOperand(esi, HeapObject::kMapOffset));
@@ -788,7 +787,7 @@
       __ push(esi);
       __ push(Immediate(variable->name()));
       // VariableDeclaration nodes are always introduced in one of four modes.
-      ASSERT(IsDeclaredVariableMode(mode));
+      DCHECK(IsDeclaredVariableMode(mode));
       PropertyAttributes attr =
           IsImmutableVariableMode(mode) ? READ_ONLY : NONE;
       __ push(Immediate(Smi::FromInt(attr)));
@@ -801,7 +800,7 @@
       } else {
         __ push(Immediate(Smi::FromInt(0)));  // Indicates no initial value.
       }
-      __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4);
+      __ CallRuntime(Runtime::kDeclareLookupSlot, 4);
       break;
     }
   }
@@ -816,7 +815,7 @@
     case Variable::UNALLOCATED: {
       globals_->Add(variable->name(), zone());
       Handle<SharedFunctionInfo> function =
-          Compiler::BuildFunctionInfo(declaration->fun(), script());
+          Compiler::BuildFunctionInfo(declaration->fun(), script(), info_);
       // Check for stack-overflow exception.
       if (function.is_null()) return SetStackOverflow();
       globals_->Add(function, zone());
@@ -837,12 +836,9 @@
       VisitForAccumulatorValue(declaration->fun());
       __ mov(ContextOperand(esi, variable->index()), result_register());
       // We know that we have written a function, which is not a smi.
-      __ RecordWriteContextSlot(esi,
-                                Context::SlotOffset(variable->index()),
-                                result_register(),
-                                ecx,
-                                EMIT_REMEMBERED_SET,
-                                OMIT_SMI_CHECK);
+      __ RecordWriteContextSlot(esi, Context::SlotOffset(variable->index()),
+                                result_register(), ecx, kDontSaveFPRegs,
+                                EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
       PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
       break;
     }
@@ -853,7 +849,7 @@
       __ push(Immediate(variable->name()));
       __ push(Immediate(Smi::FromInt(NONE)));
       VisitForStackValue(declaration->fun());
-      __ CallRuntime(Runtime::kHiddenDeclareContextSlot, 4);
+      __ CallRuntime(Runtime::kDeclareLookupSlot, 4);
       break;
     }
   }
@@ -862,8 +858,8 @@
 
 void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
   Variable* variable = declaration->proxy()->var();
-  ASSERT(variable->location() == Variable::CONTEXT);
-  ASSERT(variable->interface()->IsFrozen());
+  DCHECK(variable->location() == Variable::CONTEXT);
+  DCHECK(variable->interface()->IsFrozen());
 
   Comment cmnt(masm_, "[ ModuleDeclaration");
   EmitDebugCheckDeclarationContext(variable);
@@ -876,11 +872,8 @@
   // Assign it.
   __ mov(ContextOperand(esi, variable->index()), eax);
   // We know that we have written a module, which is not a smi.
-  __ RecordWriteContextSlot(esi,
-                            Context::SlotOffset(variable->index()),
-                            eax,
-                            ecx,
-                            EMIT_REMEMBERED_SET,
+  __ RecordWriteContextSlot(esi, Context::SlotOffset(variable->index()), eax,
+                            ecx, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
                             OMIT_SMI_CHECK);
   PrepareForBailoutForId(declaration->proxy()->id(), NO_REGISTERS);
 
@@ -922,7 +915,7 @@
   __ push(esi);  // The context is the first argument.
   __ Push(pairs);
   __ Push(Smi::FromInt(DeclareGlobalsFlags()));
-  __ CallRuntime(Runtime::kHiddenDeclareGlobals, 3);
+  __ CallRuntime(Runtime::kDeclareGlobals, 3);
   // Return value is ignored.
 }
 
@@ -930,7 +923,7 @@
 void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
   // Call the runtime to declare the modules.
   __ Push(descriptions);
-  __ CallRuntime(Runtime::kHiddenDeclareModules, 1);
+  __ CallRuntime(Runtime::kDeclareModules, 1);
   // Return value is ignored.
 }
 
@@ -985,7 +978,8 @@
 
     // Record position before stub call for type feedback.
     SetSourcePosition(clause->position());
-    Handle<Code> ic = CompareIC::GetUninitialized(isolate(), Token::EQ_STRICT);
+    Handle<Code> ic =
+        CodeFactory::CompareIC(isolate(), Token::EQ_STRICT).code();
     CallIC(ic, clause->CompareId());
     patch_site.EmitPatchInfo();
 
@@ -1113,7 +1107,7 @@
   // No need for a write barrier, we are storing a Smi in the feedback vector.
   __ LoadHeapObject(ebx, FeedbackVector());
   __ mov(FieldOperand(ebx, FixedArray::OffsetOfElementAt(slot)),
-         Immediate(TypeFeedbackInfo::MegamorphicSentinel(isolate())));
+         Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate())));
 
   __ mov(ebx, Immediate(Smi::FromInt(1)));  // Smi indicates slow check
   __ mov(ecx, Operand(esp, 0 * kPointerSize));  // Get enumerated object
@@ -1152,7 +1146,7 @@
 
   // For proxies, no filtering is done.
   // TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
-  ASSERT(Smi::FromInt(0) == 0);
+  DCHECK(Smi::FromInt(0) == 0);
   __ test(edx, edx);
   __ j(zero, &update_each);
 
@@ -1204,15 +1198,6 @@
   Iteration loop_statement(this, stmt);
   increment_loop_depth();
 
-  // var iterable = subject
-  VisitForAccumulatorValue(stmt->assign_iterable());
-
-  // As with for-in, skip the loop if the iterator is null or undefined.
-  __ CompareRoot(eax, Heap::kUndefinedValueRootIndex);
-  __ j(equal, loop_statement.break_label());
-  __ CompareRoot(eax, Heap::kNullValueRootIndex);
-  __ j(equal, loop_statement.break_label());
-
   // var iterator = iterable[Symbol.iterator]();
   VisitForEffect(stmt->assign_iterator());
 
@@ -1261,9 +1246,7 @@
       !pretenure &&
       scope()->is_function_scope() &&
       info->num_literals() == 0) {
-    FastNewClosureStub stub(isolate(),
-                            info->strict_mode(),
-                            info->is_generator());
+    FastNewClosureStub stub(isolate(), info->strict_mode(), info->kind());
     __ mov(ebx, Immediate(info));
     __ CallStub(&stub);
   } else {
@@ -1272,7 +1255,7 @@
     __ push(Immediate(pretenure
                       ? isolate()->factory()->true_value()
                       : isolate()->factory()->false_value()));
-    __ CallRuntime(Runtime::kHiddenNewClosure, 3);
+    __ CallRuntime(Runtime::kNewClosure, 3);
   }
   context()->Plug(eax);
 }
@@ -1284,7 +1267,26 @@
 }
 
 
-void FullCodeGenerator::EmitLoadGlobalCheckExtensions(Variable* var,
+void FullCodeGenerator::EmitLoadHomeObject(SuperReference* expr) {
+  Comment cnmt(masm_, "[ SuperReference ");
+
+  __ mov(LoadDescriptor::ReceiverRegister(),
+         Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
+
+  Handle<Symbol> home_object_symbol(isolate()->heap()->home_object_symbol());
+  __ mov(LoadDescriptor::NameRegister(), home_object_symbol);
+
+  CallLoadIC(NOT_CONTEXTUAL, expr->HomeObjectFeedbackId());
+
+  __ cmp(eax, isolate()->factory()->undefined_value());
+  Label done;
+  __ j(not_equal, &done);
+  __ CallRuntime(Runtime::kThrowNonMethodError, 0);
+  __ bind(&done);
+}
+
+
+void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
                                                       TypeofState typeof_state,
                                                       Label* slow) {
   Register context = esi;
@@ -1334,8 +1336,13 @@
 
   // All extension objects were empty and it is safe to use a global
   // load IC call.
-  __ mov(edx, GlobalObjectOperand());
-  __ mov(ecx, var->name());
+  __ mov(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+  __ mov(LoadDescriptor::NameRegister(), proxy->var()->name());
+  if (FLAG_vector_ics) {
+    __ mov(VectorLoadICDescriptor::SlotRegister(),
+           Immediate(Smi::FromInt(proxy->VariableFeedbackSlot())));
+  }
+
   ContextualMode mode = (typeof_state == INSIDE_TYPEOF)
       ? NOT_CONTEXTUAL
       : CONTEXTUAL;
@@ -1346,7 +1353,7 @@
 
 MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
                                                                 Label* slow) {
-  ASSERT(var->IsContextSlot());
+  DCHECK(var->IsContextSlot());
   Register context = esi;
   Register temp = ebx;
 
@@ -1374,7 +1381,7 @@
 }
 
 
-void FullCodeGenerator::EmitDynamicLookupFastCase(Variable* var,
+void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
                                                   TypeofState typeof_state,
                                                   Label* slow,
                                                   Label* done) {
@@ -1383,8 +1390,9 @@
   // introducing variables.  In those cases, we do not want to
   // perform a runtime call for all variables in the scope
   // containing the eval.
+  Variable* var = proxy->var();
   if (var->mode() == DYNAMIC_GLOBAL) {
-    EmitLoadGlobalCheckExtensions(var, typeof_state, slow);
+    EmitLoadGlobalCheckExtensions(proxy, typeof_state, slow);
     __ jmp(done);
   } else if (var->mode() == DYNAMIC_LOCAL) {
     Variable* local = var->local_if_not_shadowed();
@@ -1397,7 +1405,7 @@
         __ mov(eax, isolate()->factory()->undefined_value());
       } else {  // LET || CONST
         __ push(Immediate(var->name()));
-        __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
+        __ CallRuntime(Runtime::kThrowReferenceError, 1);
       }
     }
     __ jmp(done);
@@ -1415,10 +1423,12 @@
   switch (var->location()) {
     case Variable::UNALLOCATED: {
       Comment cmnt(masm_, "[ Global variable");
-      // Use inline caching. Variable name is passed in ecx and the global
-      // object in eax.
-      __ mov(edx, GlobalObjectOperand());
-      __ mov(ecx, var->name());
+      __ mov(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+      __ mov(LoadDescriptor::NameRegister(), var->name());
+      if (FLAG_vector_ics) {
+        __ mov(VectorLoadICDescriptor::SlotRegister(),
+               Immediate(Smi::FromInt(proxy->VariableFeedbackSlot())));
+      }
       CallLoadIC(CONTEXTUAL);
       context()->Plug(eax);
       break;
@@ -1435,7 +1445,7 @@
         // always looked up dynamically, i.e. in that case
         //     var->location() == LOOKUP.
         // always holds.
-        ASSERT(var->scope() != NULL);
+        DCHECK(var->scope() != NULL);
 
         // Check if the binding really needs an initialization check. The check
         // can be skipped in the following situation: we have a LET or CONST
@@ -1458,8 +1468,8 @@
           skip_init_check = false;
         } else {
           // Check that we always have valid source position.
-          ASSERT(var->initializer_position() != RelocInfo::kNoPosition);
-          ASSERT(proxy->position() != RelocInfo::kNoPosition);
+          DCHECK(var->initializer_position() != RelocInfo::kNoPosition);
+          DCHECK(proxy->position() != RelocInfo::kNoPosition);
           skip_init_check = var->mode() != CONST_LEGACY &&
               var->initializer_position() < proxy->position();
         }
@@ -1474,10 +1484,10 @@
             // Throw a reference error when using an uninitialized let/const
             // binding in harmony mode.
             __ push(Immediate(var->name()));
-            __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
+            __ CallRuntime(Runtime::kThrowReferenceError, 1);
           } else {
             // Uninitalized const bindings outside of harmony mode are unholed.
-            ASSERT(var->mode() == CONST_LEGACY);
+            DCHECK(var->mode() == CONST_LEGACY);
             __ mov(eax, isolate()->factory()->undefined_value());
           }
           __ bind(&done);
@@ -1494,11 +1504,11 @@
       Label done, slow;
       // Generate code for loading from variables potentially shadowed
       // by eval-introduced variables.
-      EmitDynamicLookupFastCase(var, NOT_INSIDE_TYPEOF, &slow, &done);
+      EmitDynamicLookupFastCase(proxy, NOT_INSIDE_TYPEOF, &slow, &done);
       __ bind(&slow);
       __ push(esi);  // Context.
       __ push(Immediate(var->name()));
-      __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2);
+      __ CallRuntime(Runtime::kLoadLookupSlot, 2);
       __ bind(&done);
       context()->Plug(eax);
       break;
@@ -1529,7 +1539,7 @@
   __ push(Immediate(Smi::FromInt(expr->literal_index())));
   __ push(Immediate(expr->pattern()));
   __ push(Immediate(expr->flags()));
-  __ CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4);
+  __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
   __ mov(ebx, eax);
 
   __ bind(&materialized);
@@ -1541,7 +1551,7 @@
   __ bind(&runtime_allocate);
   __ push(ebx);
   __ push(Immediate(Smi::FromInt(size)));
-  __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1);
+  __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
   __ pop(ebx);
 
   __ bind(&allocated);
@@ -1591,7 +1601,7 @@
     __ push(Immediate(Smi::FromInt(expr->literal_index())));
     __ push(Immediate(constant_properties));
     __ push(Immediate(Smi::FromInt(flags)));
-    __ CallRuntime(Runtime::kHiddenCreateObjectLiteral, 4);
+    __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
   } else {
     __ mov(edi, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
     __ mov(eax, FieldOperand(edi, JSFunction::kLiteralsOffset));
@@ -1626,14 +1636,15 @@
       case ObjectLiteral::Property::CONSTANT:
         UNREACHABLE();
       case ObjectLiteral::Property::MATERIALIZED_LITERAL:
-        ASSERT(!CompileTimeValue::IsCompileTimeValue(value));
+        DCHECK(!CompileTimeValue::IsCompileTimeValue(value));
         // Fall through.
       case ObjectLiteral::Property::COMPUTED:
         if (key->value()->IsInternalizedString()) {
           if (property->emit_store()) {
             VisitForAccumulatorValue(value);
-            __ mov(ecx, Immediate(key->value()));
-            __ mov(edx, Operand(esp, 0));
+            DCHECK(StoreDescriptor::ValueRegister().is(eax));
+            __ mov(StoreDescriptor::NameRegister(), Immediate(key->value()));
+            __ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, 0));
             CallStoreIC(key->LiteralFeedbackId());
             PrepareForBailoutForId(key->id(), NO_REGISTERS);
           } else {
@@ -1645,7 +1656,7 @@
         VisitForStackValue(key);
         VisitForStackValue(value);
         if (property->emit_store()) {
-          __ push(Immediate(Smi::FromInt(NONE)));  // PropertyAttributes
+          __ push(Immediate(Smi::FromInt(SLOPPY)));  // Strict mode
           __ CallRuntime(Runtime::kSetProperty, 4);
         } else {
           __ Drop(3);
@@ -1679,11 +1690,11 @@
     EmitAccessor(it->second->getter);
     EmitAccessor(it->second->setter);
     __ push(Immediate(Smi::FromInt(NONE)));
-    __ CallRuntime(Runtime::kDefineOrRedefineAccessorProperty, 5);
+    __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
   }
 
   if (expr->has_function()) {
-    ASSERT(result_saved);
+    DCHECK(result_saved);
     __ push(Operand(esp, 0));
     __ CallRuntime(Runtime::kToFastProperties, 1);
   }
@@ -1707,7 +1718,7 @@
   ZoneList<Expression*>* subexprs = expr->values();
   int length = subexprs->length();
   Handle<FixedArray> constant_elements = expr->constant_elements();
-  ASSERT_EQ(2, constant_elements->length());
+  DCHECK_EQ(2, constant_elements->length());
   ElementsKind constant_elements_kind =
       static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
   bool has_constant_fast_elements =
@@ -1728,7 +1739,7 @@
     __ push(Immediate(Smi::FromInt(expr->literal_index())));
     __ push(Immediate(constant_elements));
     __ push(Immediate(Smi::FromInt(flags)));
-    __ CallRuntime(Runtime::kHiddenCreateArrayLiteral, 4);
+    __ CallRuntime(Runtime::kCreateArrayLiteral, 4);
   } else {
     __ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
     __ mov(eax, FieldOperand(ebx, JSFunction::kLiteralsOffset));
@@ -1764,9 +1775,8 @@
       // Store the subexpression value in the array's elements.
       __ mov(FieldOperand(ebx, offset), result_register());
       // Update the write barrier for the array store.
-      __ RecordWriteField(ebx, offset, result_register(), ecx,
-                          EMIT_REMEMBERED_SET,
-                          INLINE_SMI_CHECK);
+      __ RecordWriteField(ebx, offset, result_register(), ecx, kDontSaveFPRegs,
+                          EMIT_REMEMBERED_SET, INLINE_SMI_CHECK);
     } else {
       // Store the subexpression value in the array's elements.
       __ mov(ecx, Immediate(Smi::FromInt(i)));
@@ -1787,7 +1797,7 @@
 
 
 void FullCodeGenerator::VisitAssignment(Assignment* expr) {
-  ASSERT(expr->target()->IsValidReferenceExpression());
+  DCHECK(expr->target()->IsValidReferenceExpression());
 
   Comment cmnt(masm_, "[ Assignment");
 
@@ -1809,9 +1819,9 @@
       break;
     case NAMED_PROPERTY:
       if (expr->is_compound()) {
-        // We need the receiver both on the stack and in edx.
+        // We need the receiver both on the stack and in the register.
         VisitForStackValue(property->obj());
-        __ mov(edx, Operand(esp, 0));
+        __ mov(LoadDescriptor::ReceiverRegister(), Operand(esp, 0));
       } else {
         VisitForStackValue(property->obj());
       }
@@ -1820,8 +1830,8 @@
       if (expr->is_compound()) {
         VisitForStackValue(property->obj());
         VisitForStackValue(property->key());
-        __ mov(edx, Operand(esp, kPointerSize));  // Object.
-        __ mov(ecx, Operand(esp, 0));             // Key.
+        __ mov(LoadDescriptor::ReceiverRegister(), Operand(esp, kPointerSize));
+        __ mov(LoadDescriptor::NameRegister(), Operand(esp, 0));
       } else {
         VisitForStackValue(property->obj());
         VisitForStackValue(property->key());
@@ -1903,12 +1913,12 @@
   VisitForStackValue(expr->expression());
 
   switch (expr->yield_kind()) {
-    case Yield::SUSPEND:
+    case Yield::kSuspend:
       // Pop value from top-of-stack slot; box result into result register.
       EmitCreateIteratorResult(false);
       __ push(result_register());
       // Fall through.
-    case Yield::INITIAL: {
+    case Yield::kInitial: {
       Label suspend, continuation, post_runtime, resume;
 
       __ jmp(&suspend);
@@ -1918,17 +1928,18 @@
 
       __ bind(&suspend);
       VisitForAccumulatorValue(expr->generator_object());
-      ASSERT(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
+      DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
       __ mov(FieldOperand(eax, JSGeneratorObject::kContinuationOffset),
              Immediate(Smi::FromInt(continuation.pos())));
       __ mov(FieldOperand(eax, JSGeneratorObject::kContextOffset), esi);
       __ mov(ecx, esi);
-      __ RecordWriteField(eax, JSGeneratorObject::kContextOffset, ecx, edx);
+      __ RecordWriteField(eax, JSGeneratorObject::kContextOffset, ecx, edx,
+                          kDontSaveFPRegs);
       __ lea(ebx, Operand(ebp, StandardFrameConstants::kExpressionsOffset));
       __ cmp(esp, ebx);
       __ j(equal, &post_runtime);
       __ push(eax);  // generator object
-      __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1);
+      __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
       __ mov(context_register(),
              Operand(ebp, StandardFrameConstants::kContextOffset));
       __ bind(&post_runtime);
@@ -1940,7 +1951,7 @@
       break;
     }
 
-    case Yield::FINAL: {
+    case Yield::kFinal: {
       VisitForAccumulatorValue(expr->generator_object());
       __ mov(FieldOperand(result_register(),
                           JSGeneratorObject::kContinuationOffset),
@@ -1952,7 +1963,7 @@
       break;
     }
 
-    case Yield::DELEGATING: {
+    case Yield::kDelegating: {
       VisitForStackValue(expr->generator_object());
 
       // Initial stack layout is as follows:
@@ -1961,6 +1972,9 @@
 
       Label l_catch, l_try, l_suspend, l_continuation, l_resume;
       Label l_next, l_call, l_loop;
+      Register load_receiver = LoadDescriptor::ReceiverRegister();
+      Register load_name = LoadDescriptor::NameRegister();
+
       // Initial send value is undefined.
       __ mov(eax, isolate()->factory()->undefined_value());
       __ jmp(&l_next);
@@ -1968,10 +1982,10 @@
       // catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; }
       __ bind(&l_catch);
       handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos()));
-      __ mov(ecx, isolate()->factory()->throw_string());  // "throw"
-      __ push(ecx);                                      // "throw"
-      __ push(Operand(esp, 2 * kPointerSize));           // iter
-      __ push(eax);                                      // exception
+      __ mov(load_name, isolate()->factory()->throw_string());  // "throw"
+      __ push(load_name);                                       // "throw"
+      __ push(Operand(esp, 2 * kPointerSize));                  // iter
+      __ push(eax);                                             // exception
       __ jmp(&l_call);
 
       // try { received = %yield result }
@@ -1989,13 +2003,14 @@
       const int generator_object_depth = kPointerSize + handler_size;
       __ mov(eax, Operand(esp, generator_object_depth));
       __ push(eax);                                      // g
-      ASSERT(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
+      DCHECK(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
       __ mov(FieldOperand(eax, JSGeneratorObject::kContinuationOffset),
              Immediate(Smi::FromInt(l_continuation.pos())));
       __ mov(FieldOperand(eax, JSGeneratorObject::kContextOffset), esi);
       __ mov(ecx, esi);
-      __ RecordWriteField(eax, JSGeneratorObject::kContextOffset, ecx, edx);
-      __ CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject, 1);
+      __ RecordWriteField(eax, JSGeneratorObject::kContextOffset, ecx, edx,
+                          kDontSaveFPRegs);
+      __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
       __ mov(context_register(),
              Operand(ebp, StandardFrameConstants::kContextOffset));
       __ pop(eax);                                       // result
@@ -2005,15 +2020,20 @@
 
       // receiver = iter; f = iter.next; arg = received;
       __ bind(&l_next);
-      __ mov(ecx, isolate()->factory()->next_string());  // "next"
-      __ push(ecx);
-      __ push(Operand(esp, 2 * kPointerSize));           // iter
-      __ push(eax);                                      // received
+
+      __ mov(load_name, isolate()->factory()->next_string());
+      __ push(load_name);                           // "next"
+      __ push(Operand(esp, 2 * kPointerSize));      // iter
+      __ push(eax);                                 // received
 
       // result = receiver[f](arg);
       __ bind(&l_call);
-      __ mov(edx, Operand(esp, kPointerSize));
-      Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+      __ mov(load_receiver, Operand(esp, kPointerSize));
+      if (FLAG_vector_ics) {
+        __ mov(VectorLoadICDescriptor::SlotRegister(),
+               Immediate(Smi::FromInt(expr->KeyedLoadFeedbackSlot())));
+      }
+      Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
       CallIC(ic, TypeFeedbackId::None());
       __ mov(edi, eax);
       __ mov(Operand(esp, 2 * kPointerSize), edi);
@@ -2026,8 +2046,13 @@
       // if (!result.done) goto l_try;
       __ bind(&l_loop);
       __ push(eax);                                      // save result
-      __ mov(edx, eax);                                  // result
-      __ mov(ecx, isolate()->factory()->done_string());  // "done"
+      __ Move(load_receiver, eax);                       // result
+      __ mov(load_name,
+             isolate()->factory()->done_string());       // "done"
+      if (FLAG_vector_ics) {
+        __ mov(VectorLoadICDescriptor::SlotRegister(),
+               Immediate(Smi::FromInt(expr->DoneFeedbackSlot())));
+      }
       CallLoadIC(NOT_CONTEXTUAL);                        // result.done in eax
       Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
       CallIC(bool_ic);
@@ -2035,8 +2060,13 @@
       __ j(zero, &l_try);
 
       // result.value
-      __ pop(edx);                                        // result
-      __ mov(ecx, isolate()->factory()->value_string());  // "value"
+      __ pop(load_receiver);                              // result
+      __ mov(load_name,
+             isolate()->factory()->value_string());       // "value"
+      if (FLAG_vector_ics) {
+        __ mov(VectorLoadICDescriptor::SlotRegister(),
+               Immediate(Smi::FromInt(expr->ValueFeedbackSlot())));
+      }
       CallLoadIC(NOT_CONTEXTUAL);                         // result.value in eax
       context()->DropAndPlug(2, eax);                     // drop iter and g
       break;
@@ -2049,7 +2079,7 @@
     Expression *value,
     JSGeneratorObject::ResumeMode resume_mode) {
   // The value stays in eax, and is ultimately read by the resumed generator, as
-  // if CallRuntime(Runtime::kHiddenSuspendJSGeneratorObject) returned it. Or it
+  // if CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it
   // is read to throw the value when the resumed generator is already closed.
   // ebx will hold the generator object until the activation has been resumed.
   VisitForStackValue(generator);
@@ -2129,7 +2159,7 @@
   __ push(ebx);
   __ push(result_register());
   __ Push(Smi::FromInt(resume_mode));
-  __ CallRuntime(Runtime::kHiddenResumeJSGeneratorObject, 3);
+  __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
   // Not reached: the runtime call returns elsewhere.
   __ Abort(kGeneratorFailedToResume);
 
@@ -2143,14 +2173,14 @@
   } else {
     // Throw the provided value.
     __ push(eax);
-    __ CallRuntime(Runtime::kHiddenThrow, 1);
+    __ CallRuntime(Runtime::kThrow, 1);
   }
   __ jmp(&done);
 
   // Throw error if we attempt to operate on a running generator.
   __ bind(&wrong_state);
   __ push(ebx);
-  __ CallRuntime(Runtime::kHiddenThrowGeneratorStateError, 1);
+  __ CallRuntime(Runtime::kThrowGeneratorStateError, 1);
 
   __ bind(&done);
   context()->Plug(result_register());
@@ -2161,22 +2191,25 @@
   Label gc_required;
   Label allocated;
 
-  Handle<Map> map(isolate()->native_context()->iterator_result_map());
+  const int instance_size = 5 * kPointerSize;
+  DCHECK_EQ(isolate()->native_context()->iterator_result_map()->instance_size(),
+            instance_size);
 
-  __ Allocate(map->instance_size(), eax, ecx, edx, &gc_required, TAG_OBJECT);
+  __ Allocate(instance_size, eax, ecx, edx, &gc_required, TAG_OBJECT);
   __ jmp(&allocated);
 
   __ bind(&gc_required);
-  __ Push(Smi::FromInt(map->instance_size()));
-  __ CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1);
+  __ Push(Smi::FromInt(instance_size));
+  __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
   __ mov(context_register(),
          Operand(ebp, StandardFrameConstants::kContextOffset));
 
   __ bind(&allocated);
-  __ mov(ebx, map);
+  __ mov(ebx, Operand(esi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+  __ mov(ebx, FieldOperand(ebx, GlobalObject::kNativeContextOffset));
+  __ mov(ebx, ContextOperand(ebx, Context::ITERATOR_RESULT_MAP_INDEX));
   __ pop(ecx);
   __ mov(edx, isolate()->factory()->ToBoolean(done));
-  ASSERT_EQ(map->instance_size(), 5 * kPointerSize);
   __ mov(FieldOperand(eax, HeapObject::kMapOffset), ebx);
   __ mov(FieldOperand(eax, JSObject::kPropertiesOffset),
          isolate()->factory()->empty_fixed_array());
@@ -2187,24 +2220,51 @@
 
   // Only the value field needs a write barrier, as the other values are in the
   // root set.
-  __ RecordWriteField(eax, JSGeneratorObject::kResultValuePropertyOffset,
-                      ecx, edx);
+  __ RecordWriteField(eax, JSGeneratorObject::kResultValuePropertyOffset, ecx,
+                      edx, kDontSaveFPRegs);
 }
 
 
 void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
   SetSourcePosition(prop->position());
   Literal* key = prop->key()->AsLiteral();
-  ASSERT(!key->value()->IsSmi());
-  __ mov(ecx, Immediate(key->value()));
-  CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId());
+  DCHECK(!key->value()->IsSmi());
+  __ mov(LoadDescriptor::NameRegister(), Immediate(key->value()));
+  if (FLAG_vector_ics) {
+    __ mov(VectorLoadICDescriptor::SlotRegister(),
+           Immediate(Smi::FromInt(prop->PropertyFeedbackSlot())));
+    CallLoadIC(NOT_CONTEXTUAL);
+  } else {
+    CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId());
+  }
+}
+
+
+void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
+  SetSourcePosition(prop->position());
+  Literal* key = prop->key()->AsLiteral();
+  DCHECK(!key->value()->IsSmi());
+  DCHECK(prop->IsSuperAccess());
+
+  SuperReference* super_ref = prop->obj()->AsSuperReference();
+  EmitLoadHomeObject(super_ref);
+  __ push(eax);
+  VisitForStackValue(super_ref->this_var());
+  __ push(Immediate(key->value()));
+  __ CallRuntime(Runtime::kLoadFromSuper, 3);
 }
 
 
 void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
   SetSourcePosition(prop->position());
-  Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
-  CallIC(ic, prop->PropertyFeedbackId());
+  Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
+  if (FLAG_vector_ics) {
+    __ mov(VectorLoadICDescriptor::SlotRegister(),
+           Immediate(Smi::FromInt(prop->PropertyFeedbackSlot())));
+    CallIC(ic);
+  } else {
+    CallIC(ic, prop->PropertyFeedbackId());
+  }
 }
 
 
@@ -2224,8 +2284,8 @@
 
   __ bind(&stub_call);
   __ mov(eax, ecx);
-  BinaryOpICStub stub(isolate(), op, mode);
-  CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId());
+  Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op, mode).code();
+  CallIC(code, expr->BinaryOperationFeedbackId());
   patch_site.EmitPatchInfo();
   __ jmp(&done, Label::kNear);
 
@@ -2307,16 +2367,16 @@
                                      Token::Value op,
                                      OverwriteMode mode) {
   __ pop(edx);
-  BinaryOpICStub stub(isolate(), op, mode);
+  Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op, mode).code();
   JumpPatchSite patch_site(masm_);    // unbound, signals no inlined smi code.
-  CallIC(stub.GetCode(), expr->BinaryOperationFeedbackId());
+  CallIC(code, expr->BinaryOperationFeedbackId());
   patch_site.EmitPatchInfo();
   context()->Plug(eax);
 }
 
 
 void FullCodeGenerator::EmitAssignment(Expression* expr) {
-  ASSERT(expr->IsValidReferenceExpression());
+  DCHECK(expr->IsValidReferenceExpression());
 
   // Left-hand side can only be a property, a global or a (parameter or local)
   // slot.
@@ -2339,9 +2399,10 @@
     case NAMED_PROPERTY: {
       __ push(eax);  // Preserve value.
       VisitForAccumulatorValue(prop->obj());
-      __ mov(edx, eax);
-      __ pop(eax);  // Restore value.
-      __ mov(ecx, prop->key()->AsLiteral()->value());
+      __ Move(StoreDescriptor::ReceiverRegister(), eax);
+      __ pop(StoreDescriptor::ValueRegister());  // Restore value.
+      __ mov(StoreDescriptor::NameRegister(),
+             prop->key()->AsLiteral()->value());
       CallStoreIC();
       break;
     }
@@ -2349,12 +2410,11 @@
       __ push(eax);  // Preserve value.
       VisitForStackValue(prop->obj());
       VisitForAccumulatorValue(prop->key());
-      __ mov(ecx, eax);
-      __ pop(edx);  // Receiver.
-      __ pop(eax);  // Restore value.
-      Handle<Code> ic = strict_mode() == SLOPPY
-          ? isolate()->builtins()->KeyedStoreIC_Initialize()
-          : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+      __ Move(StoreDescriptor::NameRegister(), eax);
+      __ pop(StoreDescriptor::ReceiverRegister());  // Receiver.
+      __ pop(StoreDescriptor::ValueRegister());     // Restore value.
+      Handle<Code> ic =
+          CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
       CallIC(ic);
       break;
     }
@@ -2369,39 +2429,29 @@
   if (var->IsContextSlot()) {
     __ mov(edx, eax);
     int offset = Context::SlotOffset(var->index());
-    __ RecordWriteContextSlot(ecx, offset, edx, ebx);
+    __ RecordWriteContextSlot(ecx, offset, edx, ebx, kDontSaveFPRegs);
   }
 }
 
 
-void FullCodeGenerator::EmitCallStoreContextSlot(
-    Handle<String> name, StrictMode strict_mode) {
-  __ push(eax);  // Value.
-  __ push(esi);  // Context.
-  __ push(Immediate(name));
-  __ push(Immediate(Smi::FromInt(strict_mode)));
-  __ CallRuntime(Runtime::kHiddenStoreContextSlot, 4);
-}
-
-
 void FullCodeGenerator::EmitVariableAssignment(Variable* var,
                                                Token::Value op) {
   if (var->IsUnallocated()) {
     // Global var, const, or let.
-    __ mov(ecx, var->name());
-    __ mov(edx, GlobalObjectOperand());
+    __ mov(StoreDescriptor::NameRegister(), var->name());
+    __ mov(StoreDescriptor::ReceiverRegister(), GlobalObjectOperand());
     CallStoreIC();
 
   } else if (op == Token::INIT_CONST_LEGACY) {
     // Const initializers need a write barrier.
-    ASSERT(!var->IsParameter());  // No const parameters.
+    DCHECK(!var->IsParameter());  // No const parameters.
     if (var->IsLookupSlot()) {
       __ push(eax);
       __ push(esi);
       __ push(Immediate(var->name()));
-      __ CallRuntime(Runtime::kHiddenInitializeConstContextSlot, 3);
+      __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3);
     } else {
-      ASSERT(var->IsStackLocal() || var->IsContextSlot());
+      DCHECK(var->IsStackLocal() || var->IsContextSlot());
       Label skip;
       MemOperand location = VarOperand(var, ecx);
       __ mov(edx, location);
@@ -2413,28 +2463,30 @@
 
   } else if (var->mode() == LET && op != Token::INIT_LET) {
     // Non-initializing assignment to let variable needs a write barrier.
-    if (var->IsLookupSlot()) {
-      EmitCallStoreContextSlot(var->name(), strict_mode());
-    } else {
-      ASSERT(var->IsStackAllocated() || var->IsContextSlot());
-      Label assign;
-      MemOperand location = VarOperand(var, ecx);
-      __ mov(edx, location);
-      __ cmp(edx, isolate()->factory()->the_hole_value());
-      __ j(not_equal, &assign, Label::kNear);
-      __ push(Immediate(var->name()));
-      __ CallRuntime(Runtime::kHiddenThrowReferenceError, 1);
-      __ bind(&assign);
-      EmitStoreToStackLocalOrContextSlot(var, location);
-    }
+    DCHECK(!var->IsLookupSlot());
+    DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+    Label assign;
+    MemOperand location = VarOperand(var, ecx);
+    __ mov(edx, location);
+    __ cmp(edx, isolate()->factory()->the_hole_value());
+    __ j(not_equal, &assign, Label::kNear);
+    __ push(Immediate(var->name()));
+    __ CallRuntime(Runtime::kThrowReferenceError, 1);
+    __ bind(&assign);
+    EmitStoreToStackLocalOrContextSlot(var, location);
 
   } else if (!var->is_const_mode() || op == Token::INIT_CONST) {
-    // Assignment to var or initializing assignment to let/const
-    // in harmony mode.
     if (var->IsLookupSlot()) {
-      EmitCallStoreContextSlot(var->name(), strict_mode());
+      // Assignment to var.
+      __ push(eax);  // Value.
+      __ push(esi);  // Context.
+      __ push(Immediate(var->name()));
+      __ push(Immediate(Smi::FromInt(strict_mode())));
+      __ CallRuntime(Runtime::kStoreLookupSlot, 4);
     } else {
-      ASSERT(var->IsStackAllocated() || var->IsContextSlot());
+      // Assignment to var or initializing assignment to let/const in harmony
+      // mode.
+      DCHECK(var->IsStackAllocated() || var->IsContextSlot());
       MemOperand location = VarOperand(var, ecx);
       if (generate_debug_code_ && op == Token::INIT_LET) {
         // Check for an uninitialized let binding.
@@ -2455,13 +2507,13 @@
   // esp[0] : receiver
 
   Property* prop = expr->target()->AsProperty();
-  ASSERT(prop != NULL);
-  ASSERT(prop->key()->IsLiteral());
+  DCHECK(prop != NULL);
+  DCHECK(prop->key()->IsLiteral());
 
   // Record source code position before IC call.
   SetSourcePosition(expr->position());
-  __ mov(ecx, prop->key()->AsLiteral()->value());
-  __ pop(edx);
+  __ mov(StoreDescriptor::NameRegister(), prop->key()->AsLiteral()->value());
+  __ pop(StoreDescriptor::ReceiverRegister());
   CallStoreIC(expr->AssignmentFeedbackId());
   PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
   context()->Plug(eax);
@@ -2474,13 +2526,12 @@
   // esp[0]            : key
   // esp[kPointerSize] : receiver
 
-  __ pop(ecx);  // Key.
-  __ pop(edx);
+  __ pop(StoreDescriptor::NameRegister());  // Key.
+  __ pop(StoreDescriptor::ReceiverRegister());
+  DCHECK(StoreDescriptor::ValueRegister().is(eax));
   // Record source code position before IC call.
   SetSourcePosition(expr->position());
-  Handle<Code> ic = strict_mode() == SLOPPY
-      ? isolate()->builtins()->KeyedStoreIC_Initialize()
-      : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+  Handle<Code> ic = CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
   CallIC(ic, expr->AssignmentFeedbackId());
 
   PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
@@ -2493,16 +2544,20 @@
   Expression* key = expr->key();
 
   if (key->IsPropertyName()) {
-    VisitForAccumulatorValue(expr->obj());
-    __ mov(edx, result_register());
-    EmitNamedPropertyLoad(expr);
+    if (!expr->IsSuperAccess()) {
+      VisitForAccumulatorValue(expr->obj());
+      __ Move(LoadDescriptor::ReceiverRegister(), result_register());
+      EmitNamedPropertyLoad(expr);
+    } else {
+      EmitNamedSuperPropertyLoad(expr);
+    }
     PrepareForBailoutForId(expr->LoadId(), TOS_REG);
     context()->Plug(eax);
   } else {
     VisitForStackValue(expr->obj());
     VisitForAccumulatorValue(expr->key());
-    __ pop(edx);                     // Object.
-    __ mov(ecx, result_register());  // Key.
+    __ pop(LoadDescriptor::ReceiverRegister());                  // Object.
+    __ Move(LoadDescriptor::NameRegister(), result_register());  // Key.
     EmitKeyedPropertyLoad(expr);
     context()->Plug(eax);
   }
@@ -2520,11 +2575,10 @@
 void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
   Expression* callee = expr->expression();
 
-  CallIC::CallType call_type = callee->IsVariableProxy()
-      ? CallIC::FUNCTION
-      : CallIC::METHOD;
+  CallICState::CallType call_type =
+      callee->IsVariableProxy() ? CallICState::FUNCTION : CallICState::METHOD;
   // Get the target function.
-  if (call_type == CallIC::FUNCTION) {
+  if (call_type == CallICState::FUNCTION) {
     { StackValueContext context(this);
       EmitVariableLoad(callee->AsVariableProxy());
       PrepareForBailout(callee, NO_REGISTERS);
@@ -2534,8 +2588,9 @@
     __ push(Immediate(isolate()->factory()->undefined_value()));
   } else {
     // Load the function from the receiver.
-    ASSERT(callee->IsProperty());
-    __ mov(edx, Operand(esp, 0));
+    DCHECK(callee->IsProperty());
+    DCHECK(!callee->AsProperty()->IsSuperAccess());
+    __ mov(LoadDescriptor::ReceiverRegister(), Operand(esp, 0));
     EmitNamedPropertyLoad(callee->AsProperty());
     PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
     // Push the target function under the receiver.
@@ -2547,6 +2602,42 @@
 }
 
 
+void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
+  Expression* callee = expr->expression();
+  DCHECK(callee->IsProperty());
+  Property* prop = callee->AsProperty();
+  DCHECK(prop->IsSuperAccess());
+
+  SetSourcePosition(prop->position());
+  Literal* key = prop->key()->AsLiteral();
+  DCHECK(!key->value()->IsSmi());
+  // Load the function from the receiver.
+  SuperReference* super_ref = callee->AsProperty()->obj()->AsSuperReference();
+  EmitLoadHomeObject(super_ref);
+  __ push(eax);
+  VisitForAccumulatorValue(super_ref->this_var());
+  __ push(eax);
+  __ push(Operand(esp, kPointerSize));
+  __ push(eax);
+  __ push(Immediate(key->value()));
+  // Stack here:
+  //  - home_object
+  //  - this (receiver)
+  //  - home_object <-- LoadFromSuper will pop here and below.
+  //  - this (receiver)
+  //  - key
+  __ CallRuntime(Runtime::kLoadFromSuper, 3);
+
+  // Replace home_object with target function.
+  __ mov(Operand(esp, kPointerSize), eax);
+
+  // Stack here:
+  // - target function
+  // - this (receiver)
+  EmitCall(expr, CallICState::METHOD);
+}
+
+
 // Code common for calls using the IC.
 void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr,
                                                 Expression* key) {
@@ -2556,10 +2647,9 @@
   Expression* callee = expr->expression();
 
   // Load the function from the receiver.
-  ASSERT(callee->IsProperty());
-  __ mov(edx, Operand(esp, 0));
-  // Move the key into the right register for the keyed load IC.
-  __ mov(ecx, eax);
+  DCHECK(callee->IsProperty());
+  __ mov(LoadDescriptor::ReceiverRegister(), Operand(esp, 0));
+  __ mov(LoadDescriptor::NameRegister(), eax);
   EmitKeyedPropertyLoad(callee->AsProperty());
   PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
 
@@ -2567,11 +2657,11 @@
   __ push(Operand(esp, 0));
   __ mov(Operand(esp, kPointerSize), eax);
 
-  EmitCall(expr, CallIC::METHOD);
+  EmitCall(expr, CallICState::METHOD);
 }
 
 
-void FullCodeGenerator::EmitCall(Call* expr, CallIC::CallType call_type) {
+void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
   // Load the arguments.
   ZoneList<Expression*>* args = expr->arguments();
   int arg_count = args->length();
@@ -2608,6 +2698,8 @@
     __ push(Immediate(isolate()->factory()->undefined_value()));
   }
 
+  // Push the enclosing function.
+  __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
   // Push the receiver of the enclosing function.
   __ push(Operand(ebp, (2 + info_->scope()->num_parameters()) * kPointerSize));
   // Push the language mode.
@@ -2617,7 +2709,7 @@
   __ push(Immediate(Smi::FromInt(scope()->start_position())));
 
   // Do the runtime call.
-  __ CallRuntime(Runtime::kHiddenResolvePossiblyDirectEval, 5);
+  __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 6);
 }
 
 
@@ -2677,14 +2769,14 @@
     { PreservePositionScope scope(masm()->positions_recorder());
       // Generate code for loading from variables potentially shadowed by
       // eval-introduced variables.
-      EmitDynamicLookupFastCase(proxy->var(), NOT_INSIDE_TYPEOF, &slow, &done);
+      EmitDynamicLookupFastCase(proxy, NOT_INSIDE_TYPEOF, &slow, &done);
     }
     __ bind(&slow);
     // Call the runtime to find the function to call (returned in eax) and
     // the object holding it (returned in edx).
     __ push(context_register());
     __ push(Immediate(proxy->name()));
-    __ CallRuntime(Runtime::kHiddenLoadContextSlot, 2);
+    __ CallRuntime(Runtime::kLoadLookupSlot, 2);
     __ push(eax);  // Function.
     __ push(edx);  // Receiver.
 
@@ -2708,17 +2800,23 @@
 
   } else if (call_type == Call::PROPERTY_CALL) {
     Property* property = callee->AsProperty();
-    { PreservePositionScope scope(masm()->positions_recorder());
-      VisitForStackValue(property->obj());
-    }
-    if (property->key()->IsPropertyName()) {
-      EmitCallWithLoadIC(expr);
+    bool is_named_call = property->key()->IsPropertyName();
+    // super.x() is handled in EmitCallWithLoadIC.
+    if (property->IsSuperAccess() && is_named_call) {
+      EmitSuperCallWithLoadIC(expr);
     } else {
-      EmitKeyedCallWithLoadIC(expr, property->key());
+      {
+        PreservePositionScope scope(masm()->positions_recorder());
+        VisitForStackValue(property->obj());
+      }
+      if (is_named_call) {
+        EmitCallWithLoadIC(expr);
+      } else {
+        EmitKeyedCallWithLoadIC(expr, property->key());
+      }
     }
-
   } else {
-    ASSERT(call_type == Call::OTHER_CALL);
+    DCHECK(call_type == Call::OTHER_CALL);
     // Call to an arbitrary expression not handled specially above.
     { PreservePositionScope scope(masm()->positions_recorder());
       VisitForStackValue(callee);
@@ -2730,7 +2828,7 @@
 
 #ifdef DEBUG
   // RecordJSReturnSite should have been called.
-  ASSERT(expr->return_is_recorded_);
+  DCHECK(expr->return_is_recorded_);
 #endif
 }
 
@@ -2764,7 +2862,7 @@
   // Record call targets in unoptimized code.
   if (FLAG_pretenuring_call_new) {
     EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot());
-    ASSERT(expr->AllocationSiteFeedbackSlot() ==
+    DCHECK(expr->AllocationSiteFeedbackSlot() ==
            expr->CallNewFeedbackSlot() + 1);
   }
 
@@ -2780,7 +2878,7 @@
 
 void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -2801,7 +2899,7 @@
 
 void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -2822,7 +2920,7 @@
 
 void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -2854,7 +2952,7 @@
 
 void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -2876,7 +2974,7 @@
 
 void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -2901,7 +2999,7 @@
 void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
     CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -2991,7 +3089,7 @@
 
 void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -3013,7 +3111,7 @@
 
 void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -3041,7 +3139,7 @@
 
 void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -3063,7 +3161,7 @@
 
 void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -3085,7 +3183,7 @@
 
 
 void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
-  ASSERT(expr->arguments()->length() == 0);
+  DCHECK(expr->arguments()->length() == 0);
 
   Label materialize_true, materialize_false;
   Label* if_true = NULL;
@@ -3117,7 +3215,7 @@
 
 void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 2);
+  DCHECK(args->length() == 2);
 
   // Load the two objects into registers and perform the comparison.
   VisitForStackValue(args->at(0));
@@ -3141,7 +3239,7 @@
 
 void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   // ArgumentsAccessStub expects the key in edx and the formal
   // parameter count in eax.
@@ -3155,7 +3253,7 @@
 
 
 void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
-  ASSERT(expr->arguments()->length() == 0);
+  DCHECK(expr->arguments()->length() == 0);
 
   Label exit;
   // Get the number of formal parameters.
@@ -3179,7 +3277,7 @@
 
 void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
   Label done, null, function, non_function_constructor;
 
   VisitForAccumulatorValue(args->at(0));
@@ -3219,7 +3317,7 @@
 
   // Functions have class 'Function'.
   __ bind(&function);
-  __ mov(eax, isolate()->factory()->function_class_string());
+  __ mov(eax, isolate()->factory()->Function_string());
   __ jmp(&done);
 
   // Objects with a non-function constructor have class 'Object'.
@@ -3242,7 +3340,7 @@
   // Load the arguments on the stack and call the stub.
   SubStringStub stub(isolate());
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 3);
+  DCHECK(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
   VisitForStackValue(args->at(2));
@@ -3255,7 +3353,7 @@
   // Load the arguments on the stack and call the stub.
   RegExpExecStub stub(isolate());
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 4);
+  DCHECK(args->length() == 4);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
   VisitForStackValue(args->at(2));
@@ -3267,7 +3365,7 @@
 
 void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));  // Load the object.
 
@@ -3286,8 +3384,8 @@
 
 void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 2);
-  ASSERT_NE(NULL, args->at(1)->AsLiteral());
+  DCHECK(args->length() == 2);
+  DCHECK_NE(NULL, args->at(1)->AsLiteral());
   Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value()));
 
   VisitForAccumulatorValue(args->at(0));  // Load the object.
@@ -3323,7 +3421,7 @@
   }
 
   __ bind(&not_date_object);
-  __ CallRuntime(Runtime::kHiddenThrowNotDateError, 0);
+  __ CallRuntime(Runtime::kThrowNotDateError, 0);
   __ bind(&done);
   context()->Plug(result);
 }
@@ -3331,15 +3429,15 @@
 
 void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT_EQ(3, args->length());
+  DCHECK_EQ(3, args->length());
 
   Register string = eax;
   Register index = ebx;
   Register value = ecx;
 
-  VisitForStackValue(args->at(1));  // index
-  VisitForStackValue(args->at(2));  // value
-  VisitForAccumulatorValue(args->at(0));  // string
+  VisitForStackValue(args->at(0));        // index
+  VisitForStackValue(args->at(1));        // value
+  VisitForAccumulatorValue(args->at(2));  // string
 
   __ pop(value);
   __ pop(index);
@@ -3367,15 +3465,15 @@
 
 void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT_EQ(3, args->length());
+  DCHECK_EQ(3, args->length());
 
   Register string = eax;
   Register index = ebx;
   Register value = ecx;
 
-  VisitForStackValue(args->at(1));  // index
-  VisitForStackValue(args->at(2));  // value
-  VisitForAccumulatorValue(args->at(0));  // string
+  VisitForStackValue(args->at(0));        // index
+  VisitForStackValue(args->at(1));        // value
+  VisitForAccumulatorValue(args->at(2));  // string
   __ pop(value);
   __ pop(index);
 
@@ -3401,18 +3499,18 @@
 void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
   // Load the arguments on the stack and call the runtime function.
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 2);
+  DCHECK(args->length() == 2);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
 
-  __ CallRuntime(Runtime::kHiddenMathPowSlow, 2);
+  __ CallRuntime(Runtime::kMathPowSlow, 2);
   context()->Plug(eax);
 }
 
 
 void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 2);
+  DCHECK(args->length() == 2);
 
   VisitForStackValue(args->at(0));  // Load the object.
   VisitForAccumulatorValue(args->at(1));  // Load the value.
@@ -3432,7 +3530,7 @@
   // Update the write barrier.  Save the value as it will be
   // overwritten by the write barrier code and is needed afterward.
   __ mov(edx, eax);
-  __ RecordWriteField(ebx, JSValue::kValueOffset, edx, ecx);
+  __ RecordWriteField(ebx, JSValue::kValueOffset, edx, ecx, kDontSaveFPRegs);
 
   __ bind(&done);
   context()->Plug(eax);
@@ -3441,7 +3539,7 @@
 
 void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT_EQ(args->length(), 1);
+  DCHECK_EQ(args->length(), 1);
 
   // Load the argument into eax and call the stub.
   VisitForAccumulatorValue(args->at(0));
@@ -3454,7 +3552,7 @@
 
 void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -3473,7 +3571,7 @@
 
 void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 2);
+  DCHECK(args->length() == 2);
 
   VisitForStackValue(args->at(0));
   VisitForAccumulatorValue(args->at(1));
@@ -3519,7 +3617,7 @@
 
 void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 2);
+  DCHECK(args->length() == 2);
 
   VisitForStackValue(args->at(0));
   VisitForAccumulatorValue(args->at(1));
@@ -3567,7 +3665,7 @@
 
 void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT_EQ(2, args->length());
+  DCHECK_EQ(2, args->length());
   VisitForStackValue(args->at(0));
   VisitForAccumulatorValue(args->at(1));
 
@@ -3580,7 +3678,7 @@
 
 void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT_EQ(2, args->length());
+  DCHECK_EQ(2, args->length());
 
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
@@ -3593,7 +3691,7 @@
 
 void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() >= 2);
+  DCHECK(args->length() >= 2);
 
   int arg_count = args->length() - 2;  // 2 ~ receiver and function.
   for (int i = 0; i < arg_count + 1; ++i) {
@@ -3627,7 +3725,7 @@
   // Load the arguments on the stack and call the stub.
   RegExpConstructResultStub stub(isolate());
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 3);
+  DCHECK(args->length() == 3);
   VisitForStackValue(args->at(0));
   VisitForStackValue(args->at(1));
   VisitForAccumulatorValue(args->at(2));
@@ -3640,9 +3738,9 @@
 
 void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT_EQ(2, args->length());
+  DCHECK_EQ(2, args->length());
 
-  ASSERT_NE(NULL, args->at(0)->AsLiteral());
+  DCHECK_NE(NULL, args->at(0)->AsLiteral());
   int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value();
 
   Handle<FixedArray> jsfunction_result_caches(
@@ -3680,7 +3778,7 @@
   // Call runtime to perform the lookup.
   __ push(cache);
   __ push(key);
-  __ CallRuntime(Runtime::kHiddenGetFromCache, 2);
+  __ CallRuntime(Runtime::kGetFromCache, 2);
 
   __ bind(&done);
   context()->Plug(eax);
@@ -3689,7 +3787,7 @@
 
 void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
 
   VisitForAccumulatorValue(args->at(0));
 
@@ -3713,7 +3811,7 @@
 
 void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 1);
+  DCHECK(args->length() == 1);
   VisitForAccumulatorValue(args->at(0));
 
   __ AssertString(eax);
@@ -3725,13 +3823,13 @@
 }
 
 
-void FullCodeGenerator::EmitFastAsciiArrayJoin(CallRuntime* expr) {
+void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
   Label bailout, done, one_char_separator, long_separator,
       non_trivial_array, not_size_one_array, loop,
       loop_1, loop_1_condition, loop_2, loop_2_entry, loop_3, loop_3_entry;
 
   ZoneList<Expression*>* args = expr->arguments();
-  ASSERT(args->length() == 2);
+  DCHECK(args->length() == 2);
   // We will leave the separator on the stack until the end of the function.
   VisitForStackValue(args->at(1));
   // Load this to eax (= array)
@@ -3783,7 +3881,7 @@
   array = no_reg;
 
 
-  // Check that all array elements are sequential ASCII strings, and
+  // Check that all array elements are sequential one-byte strings, and
   // accumulate the sum of their lengths, as a smi-encoded value.
   __ Move(index, Immediate(0));
   __ Move(string_length, Immediate(0));
@@ -3792,7 +3890,7 @@
   //                      scratch, string_length, elements.
   if (generate_debug_code_) {
     __ cmp(index, array_length);
-    __ Assert(less, kNoEmptyArraysHereInEmitFastAsciiArrayJoin);
+    __ Assert(less, kNoEmptyArraysHereInEmitFastOneByteArrayJoin);
   }
   __ bind(&loop);
   __ mov(string, FieldOperand(elements,
@@ -3830,7 +3928,7 @@
   // string_length: Sum of string lengths, as a smi.
   // elements: FixedArray of strings.
 
-  // Check that the separator is a flat ASCII string.
+  // Check that the separator is a flat one-byte string.
   __ mov(string, separator_operand);
   __ JumpIfSmi(string, &bailout);
   __ mov(scratch, FieldOperand(string, HeapObject::kMapOffset));
@@ -3854,8 +3952,8 @@
   // Live registers and stack values:
   //   string_length
   //   elements
-  __ AllocateAsciiString(result_pos, string_length, scratch,
-                         index, string, &bailout);
+  __ AllocateOneByteString(result_pos, string_length, scratch, index, string,
+                           &bailout);
   __ mov(result_operand, result_pos);
   __ lea(result_pos, FieldOperand(result_pos, SeqOneByteString::kHeaderSize));
 
@@ -3898,7 +3996,7 @@
 
   // One-character separator case
   __ bind(&one_char_separator);
-  // Replace separator with its ASCII character value.
+  // Replace separator with its one-byte character value.
   __ mov_b(scratch, FieldOperand(string, SeqOneByteString::kHeaderSize));
   __ mov_b(separator_operand, scratch);
 
@@ -3989,6 +4087,16 @@
 }
 
 
+void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
+  DCHECK(expr->arguments()->length() == 0);
+  ExternalReference debug_is_active =
+      ExternalReference::debug_is_active_address(isolate());
+  __ movzx_b(eax, Operand::StaticVariable(debug_is_active));
+  __ SmiTag(eax);
+  context()->Plug(eax);
+}
+
+
 void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
   if (expr->function() != NULL &&
       expr->function()->intrinsic_type == Runtime::INLINE) {
@@ -4006,9 +4114,15 @@
     __ push(FieldOperand(eax, GlobalObject::kBuiltinsOffset));
 
     // Load the function from the receiver.
-    __ mov(edx, Operand(esp, 0));
-    __ mov(ecx, Immediate(expr->name()));
-    CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId());
+    __ mov(LoadDescriptor::ReceiverRegister(), Operand(esp, 0));
+    __ mov(LoadDescriptor::NameRegister(), Immediate(expr->name()));
+    if (FLAG_vector_ics) {
+      __ mov(VectorLoadICDescriptor::SlotRegister(),
+             Immediate(Smi::FromInt(expr->CallRuntimeFeedbackSlot())));
+      CallLoadIC(NOT_CONTEXTUAL);
+    } else {
+      CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId());
+    }
 
     // Push the target function under the receiver.
     __ push(Operand(esp, 0));
@@ -4062,7 +4176,7 @@
         Variable* var = proxy->var();
         // Delete of an unqualified identifier is disallowed in strict mode
         // but "delete this" is allowed.
-        ASSERT(strict_mode() == SLOPPY || var->is_this());
+        DCHECK(strict_mode() == SLOPPY || var->is_this());
         if (var->IsUnallocated()) {
           __ push(GlobalObjectOperand());
           __ push(Immediate(var->name()));
@@ -4079,7 +4193,7 @@
           // context where the variable was introduced.
           __ push(context_register());
           __ push(Immediate(var->name()));
-          __ CallRuntime(Runtime::kHiddenDeleteContextSlot, 2);
+          __ CallRuntime(Runtime::kDeleteLookupSlot, 2);
           context()->Plug(eax);
         }
       } else {
@@ -4117,7 +4231,7 @@
         // for control and plugging the control flow into the context,
         // because we need to prepare a pair of extra administrative AST ids
         // for the optimizing compiler.
-        ASSERT(context()->IsAccumulatorValue() || context()->IsStackValue());
+        DCHECK(context()->IsAccumulatorValue() || context()->IsStackValue());
         Label materialize_true, materialize_false, done;
         VisitForControl(expr->expression(),
                         &materialize_false,
@@ -4160,7 +4274,7 @@
 
 
 void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
-  ASSERT(expr->expression()->IsValidReferenceExpression());
+  DCHECK(expr->expression()->IsValidReferenceExpression());
 
   Comment cmnt(masm_, "[ CountOperation");
   SetSourcePosition(expr->position());
@@ -4179,7 +4293,7 @@
 
   // Evaluate expression and get value.
   if (assign_type == VARIABLE) {
-    ASSERT(expr->expression()->AsVariableProxy()->var() != NULL);
+    DCHECK(expr->expression()->AsVariableProxy()->var() != NULL);
     AccumulatorValueContext context(this);
     EmitVariableLoad(expr->expression()->AsVariableProxy());
   } else {
@@ -4188,16 +4302,16 @@
       __ push(Immediate(Smi::FromInt(0)));
     }
     if (assign_type == NAMED_PROPERTY) {
-      // Put the object both on the stack and in edx.
-      VisitForAccumulatorValue(prop->obj());
-      __ push(eax);
-      __ mov(edx, eax);
+      // Put the object both on the stack and in the register.
+      VisitForStackValue(prop->obj());
+      __ mov(LoadDescriptor::ReceiverRegister(), Operand(esp, 0));
       EmitNamedPropertyLoad(prop);
     } else {
       VisitForStackValue(prop->obj());
       VisitForStackValue(prop->key());
-      __ mov(edx, Operand(esp, kPointerSize));  // Object.
-      __ mov(ecx, Operand(esp, 0));             // Key.
+      __ mov(LoadDescriptor::ReceiverRegister(),
+             Operand(esp, kPointerSize));                       // Object.
+      __ mov(LoadDescriptor::NameRegister(), Operand(esp, 0));  // Key.
       EmitKeyedPropertyLoad(prop);
     }
   }
@@ -4282,8 +4396,9 @@
   __ bind(&stub_call);
   __ mov(edx, eax);
   __ mov(eax, Immediate(Smi::FromInt(1)));
-  BinaryOpICStub stub(isolate(), expr->binary_op(), NO_OVERWRITE);
-  CallIC(stub.GetCode(), expr->CountBinOpFeedbackId());
+  Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), expr->binary_op(),
+                                              NO_OVERWRITE).code();
+  CallIC(code, expr->CountBinOpFeedbackId());
   patch_site.EmitPatchInfo();
   __ bind(&done);
 
@@ -4312,8 +4427,9 @@
       }
       break;
     case NAMED_PROPERTY: {
-      __ mov(ecx, prop->key()->AsLiteral()->value());
-      __ pop(edx);
+      __ mov(StoreDescriptor::NameRegister(),
+             prop->key()->AsLiteral()->value());
+      __ pop(StoreDescriptor::ReceiverRegister());
       CallStoreIC(expr->CountStoreFeedbackId());
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
       if (expr->is_postfix()) {
@@ -4326,11 +4442,10 @@
       break;
     }
     case KEYED_PROPERTY: {
-      __ pop(ecx);
-      __ pop(edx);
-      Handle<Code> ic = strict_mode() == SLOPPY
-          ? isolate()->builtins()->KeyedStoreIC_Initialize()
-          : isolate()->builtins()->KeyedStoreIC_Initialize_Strict();
+      __ pop(StoreDescriptor::NameRegister());
+      __ pop(StoreDescriptor::ReceiverRegister());
+      Handle<Code> ic =
+          CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
       CallIC(ic, expr->CountStoreFeedbackId());
       PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
       if (expr->is_postfix()) {
@@ -4349,13 +4464,17 @@
 
 void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
   VariableProxy* proxy = expr->AsVariableProxy();
-  ASSERT(!context()->IsEffect());
-  ASSERT(!context()->IsTest());
+  DCHECK(!context()->IsEffect());
+  DCHECK(!context()->IsTest());
 
   if (proxy != NULL && proxy->var()->IsUnallocated()) {
     Comment cmnt(masm_, "[ Global variable");
-    __ mov(edx, GlobalObjectOperand());
-    __ mov(ecx, Immediate(proxy->name()));
+    __ mov(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+    __ mov(LoadDescriptor::NameRegister(), Immediate(proxy->name()));
+    if (FLAG_vector_ics) {
+      __ mov(VectorLoadICDescriptor::SlotRegister(),
+             Immediate(Smi::FromInt(proxy->VariableFeedbackSlot())));
+    }
     // Use a regular load, not a contextual load, to avoid a reference
     // error.
     CallLoadIC(NOT_CONTEXTUAL);
@@ -4367,12 +4486,12 @@
 
     // Generate code for loading from variables potentially shadowed
     // by eval-introduced variables.
-    EmitDynamicLookupFastCase(proxy->var(), INSIDE_TYPEOF, &slow, &done);
+    EmitDynamicLookupFastCase(proxy, INSIDE_TYPEOF, &slow, &done);
 
     __ bind(&slow);
     __ push(esi);
     __ push(Immediate(proxy->name()));
-    __ CallRuntime(Runtime::kHiddenLoadContextSlotNoReferenceError, 2);
+    __ CallRuntime(Runtime::kLoadLookupSlotNoReferenceError, 2);
     PrepareForBailout(expr, TOS_REG);
     __ bind(&done);
 
@@ -4422,10 +4541,6 @@
     __ j(equal, if_true);
     __ cmp(eax, isolate()->factory()->false_value());
     Split(equal, if_true, if_false, fall_through);
-  } else if (FLAG_harmony_typeof &&
-             String::Equals(check, factory->null_string())) {
-    __ cmp(eax, isolate()->factory()->null_value());
-    Split(equal, if_true, if_false, fall_through);
   } else if (String::Equals(check, factory->undefined_string())) {
     __ cmp(eax, isolate()->factory()->undefined_value());
     __ j(equal, if_true);
@@ -4444,10 +4559,8 @@
     Split(equal, if_true, if_false, fall_through);
   } else if (String::Equals(check, factory->object_string())) {
     __ JumpIfSmi(eax, if_false);
-    if (!FLAG_harmony_typeof) {
-      __ cmp(eax, isolate()->factory()->null_value());
-      __ j(equal, if_true);
-    }
+    __ cmp(eax, isolate()->factory()->null_value());
+    __ j(equal, if_true);
     __ CmpObjectType(eax, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, edx);
     __ j(below, if_false);
     __ CmpInstanceType(edx, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
@@ -4521,7 +4634,7 @@
 
       // Record position and call the compare IC.
       SetSourcePosition(expr->position());
-      Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+      Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
       CallIC(ic, expr->CompareOperationFeedbackId());
       patch_site.EmitPatchInfo();
 
@@ -4583,7 +4696,7 @@
 
 
 void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
-  ASSERT_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
+  DCHECK_EQ(POINTER_SIZE_ALIGN(frame_offset), frame_offset);
   __ mov(Operand(ebp, frame_offset), value);
 }
 
@@ -4608,7 +4721,7 @@
     // Fetch it from the context.
     __ push(ContextOperand(esi, Context::CLOSURE_INDEX));
   } else {
-    ASSERT(declaration_scope->is_function_scope());
+    DCHECK(declaration_scope->is_function_scope());
     __ push(Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
   }
 }
@@ -4619,7 +4732,7 @@
 
 void FullCodeGenerator::EnterFinallyBlock() {
   // Cook return address on top of stack (smi encoded Code* delta)
-  ASSERT(!result_register().is(edx));
+  DCHECK(!result_register().is(edx));
   __ pop(edx);
   __ sub(edx, Immediate(masm_->CodeObject()));
   STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1);
@@ -4650,7 +4763,7 @@
 
 
 void FullCodeGenerator::ExitFinallyBlock() {
-  ASSERT(!result_register().is(edx));
+  DCHECK(!result_register().is(edx));
   // Restore pending message from stack.
   __ pop(edx);
   ExternalReference pending_message_script =
@@ -4761,25 +4874,25 @@
     Address pc) {
   Address call_target_address = pc - kIntSize;
   Address jns_instr_address = call_target_address - 3;
-  ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
+  DCHECK_EQ(kCallInstruction, *(call_target_address - 1));
 
   if (*jns_instr_address == kJnsInstruction) {
-    ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
-    ASSERT_EQ(isolate->builtins()->InterruptCheck()->entry(),
+    DCHECK_EQ(kJnsOffset, *(call_target_address - 2));
+    DCHECK_EQ(isolate->builtins()->InterruptCheck()->entry(),
               Assembler::target_address_at(call_target_address,
                                            unoptimized_code));
     return INTERRUPT;
   }
 
-  ASSERT_EQ(kNopByteOne, *jns_instr_address);
-  ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
+  DCHECK_EQ(kNopByteOne, *jns_instr_address);
+  DCHECK_EQ(kNopByteTwo, *(call_target_address - 2));
 
   if (Assembler::target_address_at(call_target_address, unoptimized_code) ==
       isolate->builtins()->OnStackReplacement()->entry()) {
     return ON_STACK_REPLACEMENT;
   }
 
-  ASSERT_EQ(isolate->builtins()->OsrAfterStackCheck()->entry(),
+  DCHECK_EQ(isolate->builtins()->OsrAfterStackCheck()->entry(),
             Assembler::target_address_at(call_target_address,
                                          unoptimized_code));
   return OSR_AFTER_STACK_CHECK;
diff --git a/src/x87/ic-x87.cc b/src/x87/ic-x87.cc
deleted file mode 100644
index 6cd9ac4..0000000
--- a/src/x87/ic-x87.cc
+++ /dev/null
@@ -1,1290 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#if V8_TARGET_ARCH_X87
-
-#include "src/codegen.h"
-#include "src/ic-inl.h"
-#include "src/runtime.h"
-#include "src/stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-// ----------------------------------------------------------------------------
-// Static IC stub generators.
-//
-
-#define __ ACCESS_MASM(masm)
-
-
-static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm,
-                                            Register type,
-                                            Label* global_object) {
-  // Register usage:
-  //   type: holds the receiver instance type on entry.
-  __ cmp(type, JS_GLOBAL_OBJECT_TYPE);
-  __ j(equal, global_object);
-  __ cmp(type, JS_BUILTINS_OBJECT_TYPE);
-  __ j(equal, global_object);
-  __ cmp(type, JS_GLOBAL_PROXY_TYPE);
-  __ j(equal, global_object);
-}
-
-
-// Generated code falls through if the receiver is a regular non-global
-// JS object with slow properties and no interceptors.
-static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm,
-                                                Register receiver,
-                                                Register r0,
-                                                Register r1,
-                                                Label* miss) {
-  // Register usage:
-  //   receiver: holds the receiver on entry and is unchanged.
-  //   r0: used to hold receiver instance type.
-  //       Holds the property dictionary on fall through.
-  //   r1: used to hold receivers map.
-
-  // Check that the receiver isn't a smi.
-  __ JumpIfSmi(receiver, miss);
-
-  // Check that the receiver is a valid JS object.
-  __ mov(r1, FieldOperand(receiver, HeapObject::kMapOffset));
-  __ movzx_b(r0, FieldOperand(r1, Map::kInstanceTypeOffset));
-  __ cmp(r0, FIRST_SPEC_OBJECT_TYPE);
-  __ j(below, miss);
-
-  // If this assert fails, we have to check upper bound too.
-  STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
-
-  GenerateGlobalInstanceTypeCheck(masm, r0, miss);
-
-  // Check for non-global object that requires access check.
-  __ test_b(FieldOperand(r1, Map::kBitFieldOffset),
-            (1 << Map::kIsAccessCheckNeeded) |
-            (1 << Map::kHasNamedInterceptor));
-  __ j(not_zero, miss);
-
-  __ mov(r0, FieldOperand(receiver, JSObject::kPropertiesOffset));
-  __ CheckMap(r0, masm->isolate()->factory()->hash_table_map(), miss,
-              DONT_DO_SMI_CHECK);
-}
-
-
-// Helper function used to load a property from a dictionary backing
-// storage. This function may fail to load a property even though it is
-// in the dictionary, so code at miss_label must always call a backup
-// property load that is complete. This function is safe to call if
-// name is not internalized, and will jump to the miss_label in that
-// case. The generated code assumes that the receiver has slow
-// properties, is not a global object and does not have interceptors.
-static void GenerateDictionaryLoad(MacroAssembler* masm,
-                                   Label* miss_label,
-                                   Register elements,
-                                   Register name,
-                                   Register r0,
-                                   Register r1,
-                                   Register result) {
-  // Register use:
-  //
-  // elements - holds the property dictionary on entry and is unchanged.
-  //
-  // name - holds the name of the property on entry and is unchanged.
-  //
-  // Scratch registers:
-  //
-  // r0   - used for the index into the property dictionary
-  //
-  // r1   - used to hold the capacity of the property dictionary.
-  //
-  // result - holds the result on exit.
-
-  Label done;
-
-  // Probe the dictionary.
-  NameDictionaryLookupStub::GeneratePositiveLookup(masm,
-                                                   miss_label,
-                                                   &done,
-                                                   elements,
-                                                   name,
-                                                   r0,
-                                                   r1);
-
-  // If probing finds an entry in the dictionary, r0 contains the
-  // index into the dictionary. Check that the value is a normal
-  // property.
-  __ bind(&done);
-  const int kElementsStartOffset =
-      NameDictionary::kHeaderSize +
-      NameDictionary::kElementsStartIndex * kPointerSize;
-  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
-  __ test(Operand(elements, r0, times_4, kDetailsOffset - kHeapObjectTag),
-          Immediate(PropertyDetails::TypeField::kMask << kSmiTagSize));
-  __ j(not_zero, miss_label);
-
-  // Get the value at the masked, scaled index.
-  const int kValueOffset = kElementsStartOffset + kPointerSize;
-  __ mov(result, Operand(elements, r0, times_4, kValueOffset - kHeapObjectTag));
-}
-
-
-// Helper function used to store a property to a dictionary backing
-// storage. This function may fail to store a property eventhough it
-// is in the dictionary, so code at miss_label must always call a
-// backup property store that is complete. This function is safe to
-// call if name is not internalized, and will jump to the miss_label in
-// that case. The generated code assumes that the receiver has slow
-// properties, is not a global object and does not have interceptors.
-static void GenerateDictionaryStore(MacroAssembler* masm,
-                                    Label* miss_label,
-                                    Register elements,
-                                    Register name,
-                                    Register value,
-                                    Register r0,
-                                    Register r1) {
-  // Register use:
-  //
-  // elements - holds the property dictionary on entry and is clobbered.
-  //
-  // name - holds the name of the property on entry and is unchanged.
-  //
-  // value - holds the value to store and is unchanged.
-  //
-  // r0 - used for index into the property dictionary and is clobbered.
-  //
-  // r1 - used to hold the capacity of the property dictionary and is clobbered.
-  Label done;
-
-
-  // Probe the dictionary.
-  NameDictionaryLookupStub::GeneratePositiveLookup(masm,
-                                                   miss_label,
-                                                   &done,
-                                                   elements,
-                                                   name,
-                                                   r0,
-                                                   r1);
-
-  // If probing finds an entry in the dictionary, r0 contains the
-  // index into the dictionary. Check that the value is a normal
-  // property that is not read only.
-  __ bind(&done);
-  const int kElementsStartOffset =
-      NameDictionary::kHeaderSize +
-      NameDictionary::kElementsStartIndex * kPointerSize;
-  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
-  const int kTypeAndReadOnlyMask =
-      (PropertyDetails::TypeField::kMask |
-       PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize;
-  __ test(Operand(elements, r0, times_4, kDetailsOffset - kHeapObjectTag),
-          Immediate(kTypeAndReadOnlyMask));
-  __ j(not_zero, miss_label);
-
-  // Store the value at the masked, scaled index.
-  const int kValueOffset = kElementsStartOffset + kPointerSize;
-  __ lea(r0, Operand(elements, r0, times_4, kValueOffset - kHeapObjectTag));
-  __ mov(Operand(r0, 0), value);
-
-  // Update write barrier. Make sure not to clobber the value.
-  __ mov(r1, value);
-  __ RecordWrite(elements, r0, r1);
-}
-
-
-// Checks the receiver for special cases (value type, slow case bits).
-// Falls through for regular JS object.
-static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
-                                           Register receiver,
-                                           Register map,
-                                           int interceptor_bit,
-                                           Label* slow) {
-  // Register use:
-  //   receiver - holds the receiver and is unchanged.
-  // Scratch registers:
-  //   map - used to hold the map of the receiver.
-
-  // Check that the object isn't a smi.
-  __ JumpIfSmi(receiver, slow);
-
-  // Get the map of the receiver.
-  __ mov(map, FieldOperand(receiver, HeapObject::kMapOffset));
-
-  // Check bit field.
-  __ test_b(FieldOperand(map, Map::kBitFieldOffset),
-            (1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit));
-  __ j(not_zero, slow);
-  // Check that the object is some kind of JS object EXCEPT JS Value type.
-  // In the case that the object is a value-wrapper object,
-  // we enter the runtime system to make sure that indexing
-  // into string objects works as intended.
-  ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
-
-  __ CmpInstanceType(map, JS_OBJECT_TYPE);
-  __ j(below, slow);
-}
-
-
-// Loads an indexed element from a fast case array.
-// If not_fast_array is NULL, doesn't perform the elements map check.
-static void GenerateFastArrayLoad(MacroAssembler* masm,
-                                  Register receiver,
-                                  Register key,
-                                  Register scratch,
-                                  Register result,
-                                  Label* not_fast_array,
-                                  Label* out_of_range) {
-  // Register use:
-  //   receiver - holds the receiver and is unchanged.
-  //   key - holds the key and is unchanged (must be a smi).
-  // Scratch registers:
-  //   scratch - used to hold elements of the receiver and the loaded value.
-  //   result - holds the result on exit if the load succeeds and
-  //            we fall through.
-
-  __ mov(scratch, FieldOperand(receiver, JSObject::kElementsOffset));
-  if (not_fast_array != NULL) {
-    // Check that the object is in fast mode and writable.
-    __ CheckMap(scratch,
-                masm->isolate()->factory()->fixed_array_map(),
-                not_fast_array,
-                DONT_DO_SMI_CHECK);
-  } else {
-    __ AssertFastElements(scratch);
-  }
-  // Check that the key (index) is within bounds.
-  __ cmp(key, FieldOperand(scratch, FixedArray::kLengthOffset));
-  __ j(above_equal, out_of_range);
-  // Fast case: Do the load.
-  STATIC_ASSERT((kPointerSize == 4) && (kSmiTagSize == 1) && (kSmiTag == 0));
-  __ mov(scratch, FieldOperand(scratch, key, times_2, FixedArray::kHeaderSize));
-  __ cmp(scratch, Immediate(masm->isolate()->factory()->the_hole_value()));
-  // In case the loaded value is the_hole we have to consult GetProperty
-  // to ensure the prototype chain is searched.
-  __ j(equal, out_of_range);
-  if (!result.is(scratch)) {
-    __ mov(result, scratch);
-  }
-}
-
-
-// Checks whether a key is an array index string or a unique name.
-// Falls through if the key is a unique name.
-static void GenerateKeyNameCheck(MacroAssembler* masm,
-                                 Register key,
-                                 Register map,
-                                 Register hash,
-                                 Label* index_string,
-                                 Label* not_unique) {
-  // Register use:
-  //   key - holds the key and is unchanged. Assumed to be non-smi.
-  // Scratch registers:
-  //   map - used to hold the map of the key.
-  //   hash - used to hold the hash of the key.
-  Label unique;
-  __ CmpObjectType(key, LAST_UNIQUE_NAME_TYPE, map);
-  __ j(above, not_unique);
-  STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
-  __ j(equal, &unique);
-
-  // Is the string an array index, with cached numeric value?
-  __ mov(hash, FieldOperand(key, Name::kHashFieldOffset));
-  __ test(hash, Immediate(Name::kContainsCachedArrayIndexMask));
-  __ j(zero, index_string);
-
-  // Is the string internalized? We already know it's a string so a single
-  // bit test is enough.
-  STATIC_ASSERT(kNotInternalizedTag != 0);
-  __ test_b(FieldOperand(map, Map::kInstanceTypeOffset),
-            kIsNotInternalizedMask);
-  __ j(not_zero, not_unique);
-
-  __ bind(&unique);
-}
-
-
-static Operand GenerateMappedArgumentsLookup(MacroAssembler* masm,
-                                             Register object,
-                                             Register key,
-                                             Register scratch1,
-                                             Register scratch2,
-                                             Label* unmapped_case,
-                                             Label* slow_case) {
-  Heap* heap = masm->isolate()->heap();
-  Factory* factory = masm->isolate()->factory();
-
-  // Check that the receiver is a JSObject. Because of the elements
-  // map check later, we do not need to check for interceptors or
-  // whether it requires access checks.
-  __ JumpIfSmi(object, slow_case);
-  // Check that the object is some kind of JSObject.
-  __ CmpObjectType(object, FIRST_JS_RECEIVER_TYPE, scratch1);
-  __ j(below, slow_case);
-
-  // Check that the key is a positive smi.
-  __ test(key, Immediate(0x80000001));
-  __ j(not_zero, slow_case);
-
-  // Load the elements into scratch1 and check its map.
-  Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
-  __ mov(scratch1, FieldOperand(object, JSObject::kElementsOffset));
-  __ CheckMap(scratch1, arguments_map, slow_case, DONT_DO_SMI_CHECK);
-
-  // Check if element is in the range of mapped arguments. If not, jump
-  // to the unmapped lookup with the parameter map in scratch1.
-  __ mov(scratch2, FieldOperand(scratch1, FixedArray::kLengthOffset));
-  __ sub(scratch2, Immediate(Smi::FromInt(2)));
-  __ cmp(key, scratch2);
-  __ j(above_equal, unmapped_case);
-
-  // Load element index and check whether it is the hole.
-  const int kHeaderSize = FixedArray::kHeaderSize + 2 * kPointerSize;
-  __ mov(scratch2, FieldOperand(scratch1,
-                                key,
-                                times_half_pointer_size,
-                                kHeaderSize));
-  __ cmp(scratch2, factory->the_hole_value());
-  __ j(equal, unmapped_case);
-
-  // Load value from context and return it. We can reuse scratch1 because
-  // we do not jump to the unmapped lookup (which requires the parameter
-  // map in scratch1).
-  const int kContextOffset = FixedArray::kHeaderSize;
-  __ mov(scratch1, FieldOperand(scratch1, kContextOffset));
-  return FieldOperand(scratch1,
-                      scratch2,
-                      times_half_pointer_size,
-                      Context::kHeaderSize);
-}
-
-
-static Operand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
-                                               Register key,
-                                               Register parameter_map,
-                                               Register scratch,
-                                               Label* slow_case) {
-  // Element is in arguments backing store, which is referenced by the
-  // second element of the parameter_map.
-  const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
-  Register backing_store = parameter_map;
-  __ mov(backing_store, FieldOperand(parameter_map, kBackingStoreOffset));
-  Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
-  __ CheckMap(backing_store, fixed_array_map, slow_case, DONT_DO_SMI_CHECK);
-  __ mov(scratch, FieldOperand(backing_store, FixedArray::kLengthOffset));
-  __ cmp(key, scratch);
-  __ j(greater_equal, slow_case);
-  return FieldOperand(backing_store,
-                      key,
-                      times_half_pointer_size,
-                      FixedArray::kHeaderSize);
-}
-
-
-void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- ecx    : key
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
-  Label slow, check_name, index_smi, index_name, property_array_property;
-  Label probe_dictionary, check_number_dictionary;
-
-  // Check that the key is a smi.
-  __ JumpIfNotSmi(ecx, &check_name);
-  __ bind(&index_smi);
-  // Now the key is known to be a smi. This place is also jumped to from
-  // where a numeric string is converted to a smi.
-
-  GenerateKeyedLoadReceiverCheck(
-      masm, edx, eax, Map::kHasIndexedInterceptor, &slow);
-
-  // Check the receiver's map to see if it has fast elements.
-  __ CheckFastElements(eax, &check_number_dictionary);
-
-  GenerateFastArrayLoad(masm, edx, ecx, eax, eax, NULL, &slow);
-  Isolate* isolate = masm->isolate();
-  Counters* counters = isolate->counters();
-  __ IncrementCounter(counters->keyed_load_generic_smi(), 1);
-  __ ret(0);
-
-  __ bind(&check_number_dictionary);
-  __ mov(ebx, ecx);
-  __ SmiUntag(ebx);
-  __ mov(eax, FieldOperand(edx, JSObject::kElementsOffset));
-
-  // Check whether the elements is a number dictionary.
-  // edx: receiver
-  // ebx: untagged index
-  // ecx: key
-  // eax: elements
-  __ CheckMap(eax,
-              isolate->factory()->hash_table_map(),
-              &slow,
-              DONT_DO_SMI_CHECK);
-  Label slow_pop_receiver;
-  // Push receiver on the stack to free up a register for the dictionary
-  // probing.
-  __ push(edx);
-  __ LoadFromNumberDictionary(&slow_pop_receiver, eax, ecx, ebx, edx, edi, eax);
-  // Pop receiver before returning.
-  __ pop(edx);
-  __ ret(0);
-
-  __ bind(&slow_pop_receiver);
-  // Pop the receiver from the stack and jump to runtime.
-  __ pop(edx);
-
-  __ bind(&slow);
-  // Slow case: jump to runtime.
-  // edx: receiver
-  // ecx: key
-  __ IncrementCounter(counters->keyed_load_generic_slow(), 1);
-  GenerateRuntimeGetProperty(masm);
-
-  __ bind(&check_name);
-  GenerateKeyNameCheck(masm, ecx, eax, ebx, &index_name, &slow);
-
-  GenerateKeyedLoadReceiverCheck(
-      masm, edx, eax, Map::kHasNamedInterceptor, &slow);
-
-  // If the receiver is a fast-case object, check the keyed lookup
-  // cache. Otherwise probe the dictionary.
-  __ mov(ebx, FieldOperand(edx, JSObject::kPropertiesOffset));
-  __ cmp(FieldOperand(ebx, HeapObject::kMapOffset),
-         Immediate(isolate->factory()->hash_table_map()));
-  __ j(equal, &probe_dictionary);
-
-  // The receiver's map is still in eax, compute the keyed lookup cache hash
-  // based on 32 bits of the map pointer and the string hash.
-  if (FLAG_debug_code) {
-    __ cmp(eax, FieldOperand(edx, HeapObject::kMapOffset));
-    __ Check(equal, kMapIsNoLongerInEax);
-  }
-  __ mov(ebx, eax);  // Keep the map around for later.
-  __ shr(eax, KeyedLookupCache::kMapHashShift);
-  __ mov(edi, FieldOperand(ecx, String::kHashFieldOffset));
-  __ shr(edi, String::kHashShift);
-  __ xor_(eax, edi);
-  __ and_(eax, KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask);
-
-  // Load the key (consisting of map and internalized string) from the cache and
-  // check for match.
-  Label load_in_object_property;
-  static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
-  Label hit_on_nth_entry[kEntriesPerBucket];
-  ExternalReference cache_keys =
-      ExternalReference::keyed_lookup_cache_keys(masm->isolate());
-
-  for (int i = 0; i < kEntriesPerBucket - 1; i++) {
-    Label try_next_entry;
-    __ mov(edi, eax);
-    __ shl(edi, kPointerSizeLog2 + 1);
-    if (i != 0) {
-      __ add(edi, Immediate(kPointerSize * i * 2));
-    }
-    __ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys));
-    __ j(not_equal, &try_next_entry);
-    __ add(edi, Immediate(kPointerSize));
-    __ cmp(ecx, Operand::StaticArray(edi, times_1, cache_keys));
-    __ j(equal, &hit_on_nth_entry[i]);
-    __ bind(&try_next_entry);
-  }
-
-  __ lea(edi, Operand(eax, 1));
-  __ shl(edi, kPointerSizeLog2 + 1);
-  __ add(edi, Immediate(kPointerSize * (kEntriesPerBucket - 1) * 2));
-  __ cmp(ebx, Operand::StaticArray(edi, times_1, cache_keys));
-  __ j(not_equal, &slow);
-  __ add(edi, Immediate(kPointerSize));
-  __ cmp(ecx, Operand::StaticArray(edi, times_1, cache_keys));
-  __ j(not_equal, &slow);
-
-  // Get field offset.
-  // edx     : receiver
-  // ebx     : receiver's map
-  // ecx     : key
-  // eax     : lookup cache index
-  ExternalReference cache_field_offsets =
-      ExternalReference::keyed_lookup_cache_field_offsets(masm->isolate());
-
-  // Hit on nth entry.
-  for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
-    __ bind(&hit_on_nth_entry[i]);
-    if (i != 0) {
-      __ add(eax, Immediate(i));
-    }
-    __ mov(edi,
-           Operand::StaticArray(eax, times_pointer_size, cache_field_offsets));
-    __ movzx_b(eax, FieldOperand(ebx, Map::kInObjectPropertiesOffset));
-    __ sub(edi, eax);
-    __ j(above_equal, &property_array_property);
-    if (i != 0) {
-      __ jmp(&load_in_object_property);
-    }
-  }
-
-  // Load in-object property.
-  __ bind(&load_in_object_property);
-  __ movzx_b(eax, FieldOperand(ebx, Map::kInstanceSizeOffset));
-  __ add(eax, edi);
-  __ mov(eax, FieldOperand(edx, eax, times_pointer_size, 0));
-  __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
-  __ ret(0);
-
-  // Load property array property.
-  __ bind(&property_array_property);
-  __ mov(eax, FieldOperand(edx, JSObject::kPropertiesOffset));
-  __ mov(eax, FieldOperand(eax, edi, times_pointer_size,
-                           FixedArray::kHeaderSize));
-  __ IncrementCounter(counters->keyed_load_generic_lookup_cache(), 1);
-  __ ret(0);
-
-  // Do a quick inline probe of the receiver's dictionary, if it
-  // exists.
-  __ bind(&probe_dictionary);
-
-  __ mov(eax, FieldOperand(edx, JSObject::kMapOffset));
-  __ movzx_b(eax, FieldOperand(eax, Map::kInstanceTypeOffset));
-  GenerateGlobalInstanceTypeCheck(masm, eax, &slow);
-
-  GenerateDictionaryLoad(masm, &slow, ebx, ecx, eax, edi, eax);
-  __ IncrementCounter(counters->keyed_load_generic_symbol(), 1);
-  __ ret(0);
-
-  __ bind(&index_name);
-  __ IndexFromHash(ebx, ecx);
-  // Now jump to the place where smi keys are handled.
-  __ jmp(&index_smi);
-}
-
-
-void KeyedLoadIC::GenerateString(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- ecx    : key (index)
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
-  Label miss;
-
-  Register receiver = edx;
-  Register index = ecx;
-  Register scratch = ebx;
-  Register result = eax;
-
-  StringCharAtGenerator char_at_generator(receiver,
-                                          index,
-                                          scratch,
-                                          result,
-                                          &miss,  // When not a string.
-                                          &miss,  // When not a number.
-                                          &miss,  // When index out of range.
-                                          STRING_INDEX_IS_ARRAY_INDEX);
-  char_at_generator.GenerateFast(masm);
-  __ ret(0);
-
-  StubRuntimeCallHelper call_helper;
-  char_at_generator.GenerateSlow(masm, call_helper);
-
-  __ bind(&miss);
-  GenerateMiss(masm);
-}
-
-
-void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- ecx    : key
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
-  Label slow;
-
-  // Check that the receiver isn't a smi.
-  __ JumpIfSmi(edx, &slow);
-
-  // Check that the key is an array index, that is Uint32.
-  __ test(ecx, Immediate(kSmiTagMask | kSmiSignMask));
-  __ j(not_zero, &slow);
-
-  // Get the map of the receiver.
-  __ mov(eax, FieldOperand(edx, HeapObject::kMapOffset));
-
-  // Check that it has indexed interceptor and access checks
-  // are not enabled for this object.
-  __ movzx_b(eax, FieldOperand(eax, Map::kBitFieldOffset));
-  __ and_(eax, Immediate(kSlowCaseBitFieldMask));
-  __ cmp(eax, Immediate(1 << Map::kHasIndexedInterceptor));
-  __ j(not_zero, &slow);
-
-  // Everything is fine, call runtime.
-  __ pop(eax);
-  __ push(edx);  // receiver
-  __ push(ecx);  // key
-  __ push(eax);  // return address
-
-  // Perform tail call to the entry.
-  ExternalReference ref =
-      ExternalReference(IC_Utility(kKeyedLoadPropertyWithInterceptor),
-                        masm->isolate());
-  __ TailCallExternalReference(ref, 2, 1);
-
-  __ bind(&slow);
-  GenerateMiss(masm);
-}
-
-
-void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- ecx    : key
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
-  Label slow, notin;
-  Factory* factory = masm->isolate()->factory();
-  Operand mapped_location =
-      GenerateMappedArgumentsLookup(masm, edx, ecx, ebx, eax, &notin, &slow);
-  __ mov(eax, mapped_location);
-  __ Ret();
-  __ bind(&notin);
-  // The unmapped lookup expects that the parameter map is in ebx.
-  Operand unmapped_location =
-      GenerateUnmappedArgumentsLookup(masm, ecx, ebx, eax, &slow);
-  __ cmp(unmapped_location, factory->the_hole_value());
-  __ j(equal, &slow);
-  __ mov(eax, unmapped_location);
-  __ Ret();
-  __ bind(&slow);
-  GenerateMiss(masm);
-}
-
-
-void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- eax    : value
-  //  -- ecx    : key
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
-  Label slow, notin;
-  Operand mapped_location =
-      GenerateMappedArgumentsLookup(masm, edx, ecx, ebx, edi, &notin, &slow);
-  __ mov(mapped_location, eax);
-  __ lea(ecx, mapped_location);
-  __ mov(edx, eax);
-  __ RecordWrite(ebx, ecx, edx);
-  __ Ret();
-  __ bind(&notin);
-  // The unmapped lookup expects that the parameter map is in ebx.
-  Operand unmapped_location =
-      GenerateUnmappedArgumentsLookup(masm, ecx, ebx, edi, &slow);
-  __ mov(unmapped_location, eax);
-  __ lea(edi, unmapped_location);
-  __ mov(edx, eax);
-  __ RecordWrite(ebx, edi, edx);
-  __ Ret();
-  __ bind(&slow);
-  GenerateMiss(masm);
-}
-
-
-static void KeyedStoreGenerateGenericHelper(
-    MacroAssembler* masm,
-    Label* fast_object,
-    Label* fast_double,
-    Label* slow,
-    KeyedStoreCheckMap check_map,
-    KeyedStoreIncrementLength increment_length) {
-  Label transition_smi_elements;
-  Label finish_object_store, non_double_value, transition_double_elements;
-  Label fast_double_without_map_check;
-  // eax: value
-  // ecx: key (a smi)
-  // edx: receiver
-  // ebx: FixedArray receiver->elements
-  // edi: receiver map
-  // Fast case: Do the store, could either Object or double.
-  __ bind(fast_object);
-  if (check_map == kCheckMap) {
-    __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
-    __ cmp(edi, masm->isolate()->factory()->fixed_array_map());
-    __ j(not_equal, fast_double);
-  }
-
-  // HOLECHECK: guards "A[i] = V"
-  // We have to go to the runtime if the current value is the hole because
-  // there may be a callback on the element
-  Label holecheck_passed1;
-  __ cmp(FixedArrayElementOperand(ebx, ecx),
-         masm->isolate()->factory()->the_hole_value());
-  __ j(not_equal, &holecheck_passed1);
-  __ JumpIfDictionaryInPrototypeChain(edx, ebx, edi, slow);
-  __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
-
-  __ bind(&holecheck_passed1);
-
-  // Smi stores don't require further checks.
-  Label non_smi_value;
-  __ JumpIfNotSmi(eax, &non_smi_value);
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ add(FieldOperand(edx, JSArray::kLengthOffset),
-           Immediate(Smi::FromInt(1)));
-  }
-  // It's irrelevant whether array is smi-only or not when writing a smi.
-  __ mov(FixedArrayElementOperand(ebx, ecx), eax);
-  __ ret(0);
-
-  __ bind(&non_smi_value);
-  // Escape to elements kind transition case.
-  __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
-  __ CheckFastObjectElements(edi, &transition_smi_elements);
-
-  // Fast elements array, store the value to the elements backing store.
-  __ bind(&finish_object_store);
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ add(FieldOperand(edx, JSArray::kLengthOffset),
-           Immediate(Smi::FromInt(1)));
-  }
-  __ mov(FixedArrayElementOperand(ebx, ecx), eax);
-  // Update write barrier for the elements array address.
-  __ mov(edx, eax);  // Preserve the value which is returned.
-  __ RecordWriteArray(
-      ebx, edx, ecx, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
-  __ ret(0);
-
-  __ bind(fast_double);
-  if (check_map == kCheckMap) {
-    // Check for fast double array case. If this fails, call through to the
-    // runtime.
-    __ cmp(edi, masm->isolate()->factory()->fixed_double_array_map());
-    __ j(not_equal, slow);
-    // If the value is a number, store it as a double in the FastDoubleElements
-    // array.
-  }
-
-  // HOLECHECK: guards "A[i] double hole?"
-  // We have to see if the double version of the hole is present. If so
-  // go to the runtime.
-  uint32_t offset = FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32);
-  __ cmp(FieldOperand(ebx, ecx, times_4, offset), Immediate(kHoleNanUpper32));
-  __ j(not_equal, &fast_double_without_map_check);
-  __ JumpIfDictionaryInPrototypeChain(edx, ebx, edi, slow);
-  __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
-
-  __ bind(&fast_double_without_map_check);
-  __ StoreNumberToDoubleElements(eax, ebx, ecx, edi,
-                                 &transition_double_elements, false);
-  if (increment_length == kIncrementLength) {
-    // Add 1 to receiver->length.
-    __ add(FieldOperand(edx, JSArray::kLengthOffset),
-           Immediate(Smi::FromInt(1)));
-  }
-  __ ret(0);
-
-  __ bind(&transition_smi_elements);
-  __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
-
-  // Transition the array appropriately depending on the value type.
-  __ CheckMap(eax,
-              masm->isolate()->factory()->heap_number_map(),
-              &non_double_value,
-              DONT_DO_SMI_CHECK);
-
-  // Value is a double. Transition FAST_SMI_ELEMENTS -> FAST_DOUBLE_ELEMENTS
-  // and complete the store.
-  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
-                                         FAST_DOUBLE_ELEMENTS,
-                                         ebx,
-                                         edi,
-                                         slow);
-  AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS,
-                                                    FAST_DOUBLE_ELEMENTS);
-  ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow);
-  __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
-  __ jmp(&fast_double_without_map_check);
-
-  __ bind(&non_double_value);
-  // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
-  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS,
-                                         FAST_ELEMENTS,
-                                         ebx,
-                                         edi,
-                                         slow);
-  mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
-  ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode,
-                                                                   slow);
-  __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
-  __ jmp(&finish_object_store);
-
-  __ bind(&transition_double_elements);
-  // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
-  // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
-  // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
-  __ mov(ebx, FieldOperand(edx, HeapObject::kMapOffset));
-  __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS,
-                                         FAST_ELEMENTS,
-                                         ebx,
-                                         edi,
-                                         slow);
-  mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
-  ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow);
-  __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
-  __ jmp(&finish_object_store);
-}
-
-
-void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm,
-                                   StrictMode strict_mode) {
-  // ----------- S t a t e -------------
-  //  -- eax    : value
-  //  -- ecx    : key
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
-  Label slow, fast_object, fast_object_grow;
-  Label fast_double, fast_double_grow;
-  Label array, extra, check_if_double_array;
-
-  // Check that the object isn't a smi.
-  __ JumpIfSmi(edx, &slow);
-  // Get the map from the receiver.
-  __ mov(edi, FieldOperand(edx, HeapObject::kMapOffset));
-  // Check that the receiver does not require access checks and is not observed.
-  // The generic stub does not perform map checks or handle observed objects.
-  __ test_b(FieldOperand(edi, Map::kBitFieldOffset),
-            1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved);
-  __ j(not_zero, &slow);
-  // Check that the key is a smi.
-  __ JumpIfNotSmi(ecx, &slow);
-  __ CmpInstanceType(edi, JS_ARRAY_TYPE);
-  __ j(equal, &array);
-  // Check that the object is some kind of JSObject.
-  __ CmpInstanceType(edi, FIRST_JS_OBJECT_TYPE);
-  __ j(below, &slow);
-
-  // Object case: Check key against length in the elements array.
-  // eax: value
-  // edx: JSObject
-  // ecx: key (a smi)
-  // edi: receiver map
-  __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
-  // Check array bounds. Both the key and the length of FixedArray are smis.
-  __ cmp(ecx, FieldOperand(ebx, FixedArray::kLengthOffset));
-  __ j(below, &fast_object);
-
-  // Slow case: call runtime.
-  __ bind(&slow);
-  GenerateRuntimeSetProperty(masm, strict_mode);
-
-  // Extra capacity case: Check if there is extra capacity to
-  // perform the store and update the length. Used for adding one
-  // element to the array by writing to array[array.length].
-  __ bind(&extra);
-  // eax: value
-  // edx: receiver, a JSArray
-  // ecx: key, a smi.
-  // ebx: receiver->elements, a FixedArray
-  // edi: receiver map
-  // flags: compare (ecx, edx.length())
-  // do not leave holes in the array:
-  __ j(not_equal, &slow);
-  __ cmp(ecx, FieldOperand(ebx, FixedArray::kLengthOffset));
-  __ j(above_equal, &slow);
-  __ mov(edi, FieldOperand(ebx, HeapObject::kMapOffset));
-  __ cmp(edi, masm->isolate()->factory()->fixed_array_map());
-  __ j(not_equal, &check_if_double_array);
-  __ jmp(&fast_object_grow);
-
-  __ bind(&check_if_double_array);
-  __ cmp(edi, masm->isolate()->factory()->fixed_double_array_map());
-  __ j(not_equal, &slow);
-  __ jmp(&fast_double_grow);
-
-  // Array case: Get the length and the elements array from the JS
-  // array. Check that the array is in fast mode (and writable); if it
-  // is the length is always a smi.
-  __ bind(&array);
-  // eax: value
-  // edx: receiver, a JSArray
-  // ecx: key, a smi.
-  // edi: receiver map
-  __ mov(ebx, FieldOperand(edx, JSObject::kElementsOffset));
-
-  // Check the key against the length in the array and fall through to the
-  // common store code.
-  __ cmp(ecx, FieldOperand(edx, JSArray::kLengthOffset));  // Compare smis.
-  __ j(above_equal, &extra);
-
-  KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double,
-                                  &slow, kCheckMap, kDontIncrementLength);
-  KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow,
-                                  &slow, kDontCheckMap, kIncrementLength);
-}
-
-
-void LoadIC::GenerateMegamorphic(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- ecx    : name
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
-
-  // Probe the stub cache.
-  Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC);
-  masm->isolate()->stub_cache()->GenerateProbe(
-      masm, flags, edx, ecx, ebx, eax);
-
-  // Cache miss: Jump to runtime.
-  GenerateMiss(masm);
-}
-
-
-void LoadIC::GenerateNormal(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- ecx    : name
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
-  Label miss, slow;
-
-  GenerateNameDictionaryReceiverCheck(masm, edx, eax, ebx, &miss);
-
-  // eax: elements
-  // Search the dictionary placing the result in eax.
-  GenerateDictionaryLoad(masm, &slow, eax, ecx, edi, ebx, eax);
-  __ ret(0);
-
-  // Dictionary load failed, go slow (but don't miss).
-  __ bind(&slow);
-  GenerateRuntimeGetProperty(masm);
-
-  // Cache miss: Jump to runtime.
-  __ bind(&miss);
-  GenerateMiss(masm);
-}
-
-
-void LoadIC::GenerateMiss(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- ecx    : name
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
-
-  __ IncrementCounter(masm->isolate()->counters()->load_miss(), 1);
-
-  __ pop(ebx);
-  __ push(edx);  // receiver
-  __ push(ecx);  // name
-  __ push(ebx);  // return address
-
-  // Perform tail call to the entry.
-  ExternalReference ref =
-      ExternalReference(IC_Utility(kLoadIC_Miss), masm->isolate());
-  __ TailCallExternalReference(ref, 2, 1);
-}
-
-
-void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- ecx    : key
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
-
-  __ pop(ebx);
-  __ push(edx);  // receiver
-  __ push(ecx);  // name
-  __ push(ebx);  // return address
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
-}
-
-
-void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- ecx    : key
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
-
-  __ IncrementCounter(masm->isolate()->counters()->keyed_load_miss(), 1);
-
-  __ pop(ebx);
-  __ push(edx);  // receiver
-  __ push(ecx);  // name
-  __ push(ebx);  // return address
-
-  // Perform tail call to the entry.
-  ExternalReference ref =
-      ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate());
-  __ TailCallExternalReference(ref, 2, 1);
-}
-
-
-void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- ecx    : key
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
-
-  __ pop(ebx);
-  __ push(edx);  // receiver
-  __ push(ecx);  // name
-  __ push(ebx);  // return address
-
-  // Perform tail call to the entry.
-  __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
-}
-
-
-void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- eax    : value
-  //  -- ecx    : name
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
-  Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC);
-  masm->isolate()->stub_cache()->GenerateProbe(
-      masm, flags, edx, ecx, ebx, no_reg);
-
-  // Cache miss: Jump to runtime.
-  GenerateMiss(masm);
-}
-
-
-void StoreIC::GenerateMiss(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- eax    : value
-  //  -- ecx    : name
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
-
-  __ pop(ebx);
-  __ push(edx);
-  __ push(ecx);
-  __ push(eax);
-  __ push(ebx);
-
-  // Perform tail call to the entry.
-  ExternalReference ref =
-      ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
-  __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void StoreIC::GenerateNormal(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- eax    : value
-  //  -- ecx    : name
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
-
-  Label miss, restore_miss;
-
-  GenerateNameDictionaryReceiverCheck(masm, edx, ebx, edi, &miss);
-
-  // A lot of registers are needed for storing to slow case
-  // objects. Push and restore receiver but rely on
-  // GenerateDictionaryStore preserving the value and name.
-  __ push(edx);
-  GenerateDictionaryStore(masm, &restore_miss, ebx, ecx, eax, edx, edi);
-  __ Drop(1);
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->store_normal_hit(), 1);
-  __ ret(0);
-
-  __ bind(&restore_miss);
-  __ pop(edx);
-
-  __ bind(&miss);
-  __ IncrementCounter(counters->store_normal_miss(), 1);
-  GenerateMiss(masm);
-}
-
-
-void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
-                                         StrictMode strict_mode) {
-  // ----------- S t a t e -------------
-  //  -- eax    : value
-  //  -- ecx    : name
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
-  __ pop(ebx);
-  __ push(edx);
-  __ push(ecx);
-  __ push(eax);
-  __ push(Immediate(Smi::FromInt(NONE)));  // PropertyAttributes
-  __ push(Immediate(Smi::FromInt(strict_mode)));
-  __ push(ebx);  // return address
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
-}
-
-
-void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm,
-                                              StrictMode strict_mode) {
-  // ----------- S t a t e -------------
-  //  -- eax    : value
-  //  -- ecx    : key
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
-
-  __ pop(ebx);
-  __ push(edx);
-  __ push(ecx);
-  __ push(eax);
-  __ push(Immediate(Smi::FromInt(NONE)));         // PropertyAttributes
-  __ push(Immediate(Smi::FromInt(strict_mode)));  // Strict mode.
-  __ push(ebx);   // return address
-
-  // Do tail-call to runtime routine.
-  __ TailCallRuntime(Runtime::kSetProperty, 5, 1);
-}
-
-
-void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- eax    : value
-  //  -- ecx    : key
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
-
-  __ pop(ebx);
-  __ push(edx);
-  __ push(ecx);
-  __ push(eax);
-  __ push(ebx);
-
-  // Do tail-call to runtime routine.
-  ExternalReference ref =
-      ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
-  __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void StoreIC::GenerateSlow(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- eax    : value
-  //  -- ecx    : key
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
-
-  __ pop(ebx);
-  __ push(edx);
-  __ push(ecx);
-  __ push(eax);
-  __ push(ebx);   // return address
-
-  // Do tail-call to runtime routine.
-  ExternalReference ref(IC_Utility(kStoreIC_Slow), masm->isolate());
-  __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- eax    : value
-  //  -- ecx    : key
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
-
-  __ pop(ebx);
-  __ push(edx);
-  __ push(ecx);
-  __ push(eax);
-  __ push(ebx);   // return address
-
-  // Do tail-call to runtime routine.
-  ExternalReference ref(IC_Utility(kKeyedStoreIC_Slow), masm->isolate());
-  __ TailCallExternalReference(ref, 3, 1);
-}
-
-
-#undef __
-
-
-Condition CompareIC::ComputeCondition(Token::Value op) {
-  switch (op) {
-    case Token::EQ_STRICT:
-    case Token::EQ:
-      return equal;
-    case Token::LT:
-      return less;
-    case Token::GT:
-      return greater;
-    case Token::LTE:
-      return less_equal;
-    case Token::GTE:
-      return greater_equal;
-    default:
-      UNREACHABLE();
-      return no_condition;
-  }
-}
-
-
-bool CompareIC::HasInlinedSmiCode(Address address) {
-  // The address of the instruction following the call.
-  Address test_instruction_address =
-      address + Assembler::kCallTargetAddressOffset;
-
-  // If the instruction following the call is not a test al, nothing
-  // was inlined.
-  return *test_instruction_address == Assembler::kTestAlByte;
-}
-
-
-void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
-  // The address of the instruction following the call.
-  Address test_instruction_address =
-      address + Assembler::kCallTargetAddressOffset;
-
-  // If the instruction following the call is not a test al, nothing
-  // was inlined.
-  if (*test_instruction_address != Assembler::kTestAlByte) {
-    ASSERT(*test_instruction_address == Assembler::kNopByte);
-    return;
-  }
-
-  Address delta_address = test_instruction_address + 1;
-  // The delta to the start of the map check instruction and the
-  // condition code uses at the patched jump.
-  uint8_t delta = *reinterpret_cast<uint8_t*>(delta_address);
-  if (FLAG_trace_ic) {
-    PrintF("[  patching ic at %p, test=%p, delta=%d\n",
-           address, test_instruction_address, delta);
-  }
-
-  // Patch with a short conditional jump. Enabling means switching from a short
-  // jump-if-carry/not-carry to jump-if-zero/not-zero, whereas disabling is the
-  // reverse operation of that.
-  Address jmp_address = test_instruction_address - delta;
-  ASSERT((check == ENABLE_INLINED_SMI_CHECK)
-         ? (*jmp_address == Assembler::kJncShortOpcode ||
-            *jmp_address == Assembler::kJcShortOpcode)
-         : (*jmp_address == Assembler::kJnzShortOpcode ||
-            *jmp_address == Assembler::kJzShortOpcode));
-  Condition cc = (check == ENABLE_INLINED_SMI_CHECK)
-      ? (*jmp_address == Assembler::kJncShortOpcode ? not_zero : zero)
-      : (*jmp_address == Assembler::kJnzShortOpcode ? not_carry : carry);
-  *jmp_address = static_cast<byte>(Assembler::kJccShortPrefix | cc);
-}
-
-
-} }  // namespace v8::internal
-
-#endif  // V8_TARGET_ARCH_X87
diff --git a/src/x87/interface-descriptors-x87.cc b/src/x87/interface-descriptors-x87.cc
new file mode 100644
index 0000000..8dfad36
--- /dev/null
+++ b/src/x87/interface-descriptors-x87.cc
@@ -0,0 +1,304 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_X87
+
+#include "src/interface-descriptors.h"
+
+namespace v8 {
+namespace internal {
+
+const Register CallInterfaceDescriptor::ContextRegister() { return esi; }
+
+
+const Register LoadDescriptor::ReceiverRegister() { return edx; }
+const Register LoadDescriptor::NameRegister() { return ecx; }
+
+
+const Register VectorLoadICTrampolineDescriptor::SlotRegister() { return eax; }
+
+
+const Register VectorLoadICDescriptor::VectorRegister() { return ebx; }
+
+
+const Register StoreDescriptor::ReceiverRegister() { return edx; }
+const Register StoreDescriptor::NameRegister() { return ecx; }
+const Register StoreDescriptor::ValueRegister() { return eax; }
+
+
+const Register ElementTransitionAndStoreDescriptor::MapRegister() {
+  return ebx;
+}
+
+
+const Register InstanceofDescriptor::left() { return eax; }
+const Register InstanceofDescriptor::right() { return edx; }
+
+
+const Register ArgumentsAccessReadDescriptor::index() { return edx; }
+const Register ArgumentsAccessReadDescriptor::parameter_count() { return eax; }
+
+
+const Register ApiGetterDescriptor::function_address() { return edx; }
+
+
+const Register MathPowTaggedDescriptor::exponent() { return eax; }
+
+
+const Register MathPowIntegerDescriptor::exponent() {
+  return MathPowTaggedDescriptor::exponent();
+}
+
+
+void FastNewClosureDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {esi, ebx};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void FastNewContextDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {esi, edi};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ToNumberDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  // ToNumberStub invokes a function, and therefore needs a context.
+  Register registers[] = {esi, eax};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void NumberToStringDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {esi, eax};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void FastCloneShallowArrayDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {esi, eax, ebx, ecx};
+  Representation representations[] = {
+      Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
+      Representation::Tagged()};
+  data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void FastCloneShallowObjectDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {esi, eax, ebx, ecx, edx};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void CreateAllocationSiteDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {esi, ebx, edx};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void StoreArrayLiteralElementDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {esi, ecx, eax};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void CallFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {esi, edi};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void CallFunctionWithFeedbackDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {esi, edi, edx};
+  Representation representations[] = {Representation::Tagged(),
+                                      Representation::Tagged(),
+                                      Representation::Smi()};
+  data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void CallConstructDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  // eax : number of arguments
+  // ebx : feedback vector
+  // edx : (only if ebx is not the megamorphic symbol) slot in feedback
+  //       vector (Smi)
+  // edi : constructor function
+  // TODO(turbofan): So far we don't gather type feedback and hence skip the
+  // slot parameter, but ArrayConstructStub needs the vector to be undefined.
+  Register registers[] = {esi, eax, edi, ebx};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void RegExpConstructResultDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {esi, ecx, ebx, eax};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void TransitionElementsKindDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {esi, eax, ebx};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ArrayConstructorConstantArgCountDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  // register state
+  // eax -- number of arguments
+  // edi -- function
+  // ebx -- allocation site with elements kind
+  Register registers[] = {esi, edi, ebx};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ArrayConstructorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  // stack param count needs (constructor pointer, and single argument)
+  Register registers[] = {esi, edi, ebx, eax};
+  Representation representations[] = {
+      Representation::Tagged(), Representation::Tagged(),
+      Representation::Tagged(), Representation::Integer32()};
+  data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void InternalArrayConstructorConstantArgCountDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  // register state
+  // eax -- number of arguments
+  // edi -- function
+  Register registers[] = {esi, edi};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void InternalArrayConstructorDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  // stack param count needs (constructor pointer, and single argument)
+  Register registers[] = {esi, edi, eax};
+  Representation representations[] = {Representation::Tagged(),
+                                      Representation::Tagged(),
+                                      Representation::Integer32()};
+  data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void CompareNilDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {esi, eax};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ToBooleanDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {esi, eax};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void BinaryOpDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {esi, edx, eax};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void BinaryOpWithAllocationSiteDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {esi, ecx, edx, eax};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void StringAddDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {esi, edx, eax};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void KeyedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      esi,  // context
+      ecx,  // key
+  };
+  Representation representations[] = {
+      Representation::Tagged(),  // context
+      Representation::Tagged(),  // key
+  };
+  data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void NamedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      esi,  // context
+      ecx,  // name
+  };
+  Representation representations[] = {
+      Representation::Tagged(),  // context
+      Representation::Tagged(),  // name
+  };
+  data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void CallHandlerDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      esi,  // context
+      edx,  // name
+  };
+  Representation representations[] = {
+      Representation::Tagged(),  // context
+      Representation::Tagged(),  // receiver
+  };
+  data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void ArgumentAdaptorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      esi,  // context
+      edi,  // JSFunction
+      eax,  // actual number of arguments
+      ebx,  // expected number of arguments
+  };
+  Representation representations[] = {
+      Representation::Tagged(),     // context
+      Representation::Tagged(),     // JSFunction
+      Representation::Integer32(),  // actual number of arguments
+      Representation::Integer32(),  // expected number of arguments
+  };
+  data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void ApiFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      esi,  // context
+      eax,  // callee
+      ebx,  // call_data
+      ecx,  // holder
+      edx,  // api_function_address
+  };
+  Representation representations[] = {
+      Representation::Tagged(),    // context
+      Representation::Tagged(),    // callee
+      Representation::Tagged(),    // call_data
+      Representation::Tagged(),    // holder
+      Representation::External(),  // api_function_address
+  };
+  data->Initialize(arraysize(registers), registers, representations);
+}
+}
+}  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_X87
diff --git a/src/x87/lithium-codegen-x87.cc b/src/x87/lithium-codegen-x87.cc
index dab8768..a5bc5ea 100644
--- a/src/x87/lithium-codegen-x87.cc
+++ b/src/x87/lithium-codegen-x87.cc
@@ -6,13 +6,15 @@
 
 #if V8_TARGET_ARCH_X87
 
-#include "src/x87/lithium-codegen-x87.h"
-#include "src/ic.h"
+#include "src/base/bits.h"
+#include "src/code-factory.h"
 #include "src/code-stubs.h"
-#include "src/deoptimizer.h"
-#include "src/stub-cache.h"
 #include "src/codegen.h"
+#include "src/deoptimizer.h"
 #include "src/hydrogen-osr.h"
+#include "src/ic/ic.h"
+#include "src/ic/stub-cache.h"
+#include "src/x87/lithium-codegen-x87.h"
 
 namespace v8 {
 namespace internal {
@@ -20,7 +22,7 @@
 
 // When invoking builtins, we need to record the safepoint in the middle of
 // the invoke instruction sequence generated by the macro assembler.
-class SafepointGenerator V8_FINAL : public CallWrapper {
+class SafepointGenerator FINAL : public CallWrapper {
  public:
   SafepointGenerator(LCodeGen* codegen,
                      LPointerMap* pointers,
@@ -30,9 +32,9 @@
         deopt_mode_(mode) {}
   virtual ~SafepointGenerator() {}
 
-  virtual void BeforeCall(int call_size) const V8_OVERRIDE {}
+  virtual void BeforeCall(int call_size) const OVERRIDE {}
 
-  virtual void AfterCall() const V8_OVERRIDE {
+  virtual void AfterCall() const OVERRIDE {
     codegen_->RecordSafepoint(pointers_, deopt_mode_);
   }
 
@@ -47,7 +49,7 @@
 
 bool LCodeGen::GenerateCode() {
   LPhase phase("Z_Code generation", chunk());
-  ASSERT(is_unused());
+  DCHECK(is_unused());
   status_ = GENERATING;
 
   // Open a frame scope to indicate that there is a frame on the stack.  The
@@ -71,7 +73,7 @@
 
 
 void LCodeGen::FinishCode(Handle<Code> code) {
-  ASSERT(is_done());
+  DCHECK(is_done());
   code->set_stack_slots(GetStackSlotCount());
   code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
   if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
@@ -93,7 +95,7 @@
 
 
 bool LCodeGen::GeneratePrologue() {
-  ASSERT(is_generating());
+  DCHECK(is_generating());
 
   if (info()->IsOptimizing()) {
     ProfileEntryHookStub::MaybeCallEntryHook(masm_);
@@ -120,7 +122,7 @@
       __ j(not_equal, &ok, Label::kNear);
 
       __ mov(ecx, GlobalObjectOperand());
-      __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalReceiverOffset));
+      __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalProxyOffset));
 
       __ mov(Operand(esp, receiver_offset), ecx);
 
@@ -155,7 +157,7 @@
 
   info()->set_prologue_offset(masm_->pc_offset());
   if (NeedsEagerFrame()) {
-    ASSERT(!frame_is_built_);
+    DCHECK(!frame_is_built_);
     frame_is_built_ = true;
     if (info()->IsStub()) {
       __ StubPrologue();
@@ -174,7 +176,7 @@
 
   // Reserve space for the stack slots needed by the code.
   int slots = GetStackSlotCount();
-  ASSERT(slots != 0 || !info()->IsOptimizing());
+  DCHECK(slots != 0 || !info()->IsOptimizing());
   if (slots > 0) {
     if (slots == 1) {
       if (dynamic_frame_alignment_) {
@@ -230,7 +232,7 @@
       need_write_barrier = false;
     } else {
       __ push(edi);
-      __ CallRuntime(Runtime::kHiddenNewFunctionContext, 1);
+      __ CallRuntime(Runtime::kNewFunctionContext, 1);
     }
     RecordSafepoint(Safepoint::kNoLazyDeopt);
     // Context is returned in eax.  It replaces the context passed to us.
@@ -252,10 +254,8 @@
         __ mov(Operand(esi, context_offset), eax);
         // Update the write barrier. This clobbers eax and ebx.
         if (need_write_barrier) {
-          __ RecordWriteContextSlot(esi,
-                                    context_offset,
-                                    eax,
-                                    ebx);
+          __ RecordWriteContextSlot(esi, context_offset, eax, ebx,
+                                    kDontSaveFPRegs);
         } else if (FLAG_debug_code) {
           Label done;
           __ JumpIfInNewSpace(esi, eax, &done, Label::kNear);
@@ -267,6 +267,8 @@
     Comment(";;; End allocate local context");
   }
 
+  // Initailize FPU state.
+  __ fninit();
   // Trace the call.
   if (FLAG_trace && info()->IsOptimizing()) {
     // We have not executed any compiled code yet, so esi still holds the
@@ -323,8 +325,11 @@
   // Adjust the frame size, subsuming the unoptimized frame into the
   // optimized frame.
   int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
-  ASSERT(slots >= 1);
+  DCHECK(slots >= 1);
   __ sub(esp, Immediate((slots - 1) * kPointerSize));
+
+  // Initailize FPU state.
+  __ fninit();
 }
 
 
@@ -340,15 +345,28 @@
 
 
 void LCodeGen::GenerateBodyInstructionPost(LInstruction* instr) {
+  // When return from function call, FPU should be initialized again.
+  if (instr->IsCall() && instr->ClobbersDoubleRegisters(isolate())) {
+    bool double_result = instr->HasDoubleRegisterResult();
+    if (double_result) {
+      __ lea(esp, Operand(esp, -kDoubleSize));
+      __ fstp_d(Operand(esp, 0));
+    }
+    __ fninit();
+    if (double_result) {
+      __ fld_d(Operand(esp, 0));
+      __ lea(esp, Operand(esp, kDoubleSize));
+    }
+  }
   if (instr->IsGoto()) {
-    x87_stack_.LeavingBlock(current_block_, LGoto::cast(instr));
+    x87_stack_.LeavingBlock(current_block_, LGoto::cast(instr), this);
   } else if (FLAG_debug_code && FLAG_enable_slow_asserts &&
              !instr->IsGap() && !instr->IsReturn()) {
     if (instr->ClobbersDoubleRegisters(isolate())) {
       if (instr->HasDoubleRegisterResult()) {
-        ASSERT_EQ(1, x87_stack_.depth());
+        DCHECK_EQ(1, x87_stack_.depth());
       } else {
-        ASSERT_EQ(0, x87_stack_.depth());
+        DCHECK_EQ(0, x87_stack_.depth());
       }
     }
     __ VerifyX87StackDepth(x87_stack_.depth());
@@ -362,17 +380,12 @@
     Comment(";;; -------------------- Jump table --------------------");
   }
   for (int i = 0; i < jump_table_.length(); i++) {
-    __ bind(&jump_table_[i].label);
-    Address entry = jump_table_[i].address;
-    Deoptimizer::BailoutType type = jump_table_[i].bailout_type;
-    int id = Deoptimizer::GetDeoptimizationId(isolate(), entry, type);
-    if (id == Deoptimizer::kNotDeoptimizationEntry) {
-      Comment(";;; jump table entry %d.", i);
-    } else {
-      Comment(";;; jump table entry %d: deoptimization bailout %d.", i, id);
-    }
-    if (jump_table_[i].needs_frame) {
-      ASSERT(!info()->saves_caller_doubles());
+    Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
+    __ bind(&table_entry->label);
+    Address entry = table_entry->address;
+    DeoptComment(table_entry->reason);
+    if (table_entry->needs_frame) {
+      DCHECK(!info()->saves_caller_doubles());
       __ push(Immediate(ExternalReference::ForDeoptEntry(entry)));
       if (needs_frame.is_bound()) {
         __ jmp(&needs_frame);
@@ -382,7 +395,7 @@
         // This variant of deopt can only be used with stubs. Since we don't
         // have a function pointer to install in the stack frame that we're
         // building, install a special marker there instead.
-        ASSERT(info()->IsStub());
+        DCHECK(info()->IsStub());
         __ push(Immediate(Smi::FromInt(StackFrame::STUB)));
         // Push a PC inside the function so that the deopt code can find where
         // the deopt comes from. It doesn't have to be the precise return
@@ -407,7 +420,7 @@
 
 
 bool LCodeGen::GenerateDeferredCode() {
-  ASSERT(is_generating());
+  DCHECK(is_generating());
   if (deferred_.length() > 0) {
     for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
       LDeferredCode* code = deferred_[i];
@@ -427,8 +440,8 @@
       __ bind(code->entry());
       if (NeedsDeferredFrame()) {
         Comment(";;; Build frame");
-        ASSERT(!frame_is_built_);
-        ASSERT(info()->IsStub());
+        DCHECK(!frame_is_built_);
+        DCHECK(info()->IsStub());
         frame_is_built_ = true;
         // Build the frame in such a way that esi isn't trashed.
         __ push(ebp);  // Caller's frame pointer.
@@ -441,7 +454,7 @@
       if (NeedsDeferredFrame()) {
         __ bind(code->done());
         Comment(";;; Destroy frame");
-        ASSERT(frame_is_built_);
+        DCHECK(frame_is_built_);
         frame_is_built_ = false;
         __ mov(esp, ebp);
         __ pop(ebp);
@@ -458,7 +471,7 @@
 
 
 bool LCodeGen::GenerateSafepointTable() {
-  ASSERT(is_done());
+  DCHECK(is_done());
   if (!info()->IsStub()) {
     // For lazy deoptimization we need space to patch a call after every call.
     // Ensure there is always space for such patching, even if the code ends
@@ -484,25 +497,42 @@
 
 
 void LCodeGen::X87LoadForUsage(X87Register reg) {
-  ASSERT(x87_stack_.Contains(reg));
+  DCHECK(x87_stack_.Contains(reg));
   x87_stack_.Fxch(reg);
   x87_stack_.pop();
 }
 
 
 void LCodeGen::X87LoadForUsage(X87Register reg1, X87Register reg2) {
-  ASSERT(x87_stack_.Contains(reg1));
-  ASSERT(x87_stack_.Contains(reg2));
-  x87_stack_.Fxch(reg1, 1);
-  x87_stack_.Fxch(reg2);
-  x87_stack_.pop();
-  x87_stack_.pop();
+  DCHECK(x87_stack_.Contains(reg1));
+  DCHECK(x87_stack_.Contains(reg2));
+  if (reg1.is(reg2) && x87_stack_.depth() == 1) {
+    __ fld(x87_stack_.st(reg1));
+    x87_stack_.push(reg1);
+    x87_stack_.pop();
+    x87_stack_.pop();
+  } else {
+    x87_stack_.Fxch(reg1, 1);
+    x87_stack_.Fxch(reg2);
+    x87_stack_.pop();
+    x87_stack_.pop();
+  }
+}
+
+
+int LCodeGen::X87Stack::GetLayout() {
+  int layout = stack_depth_;
+  for (int i = 0; i < stack_depth_; i++) {
+    layout |= (stack_[stack_depth_ - 1 - i].code() << ((i + 1) * 3));
+  }
+
+  return layout;
 }
 
 
 void LCodeGen::X87Stack::Fxch(X87Register reg, int other_slot) {
-  ASSERT(is_mutable_);
-  ASSERT(Contains(reg) && stack_depth_ > other_slot);
+  DCHECK(is_mutable_);
+  DCHECK(Contains(reg) && stack_depth_ > other_slot);
   int i  = ArrayIndex(reg);
   int st = st2idx(i);
   if (st != other_slot) {
@@ -546,8 +576,8 @@
 
 
 void LCodeGen::X87Stack::Free(X87Register reg) {
-  ASSERT(is_mutable_);
-  ASSERT(Contains(reg));
+  DCHECK(is_mutable_);
+  DCHECK(Contains(reg));
   int i  = ArrayIndex(reg);
   int st = st2idx(i);
   if (st > 0) {
@@ -571,8 +601,24 @@
 }
 
 
+void LCodeGen::X87Mov(X87Register dst, X87Register src, X87OperandType opts) {
+  if (x87_stack_.Contains(dst)) {
+    x87_stack_.Fxch(dst);
+    __ fstp(0);
+    x87_stack_.pop();
+    // Push ST(i) onto the FPU register stack
+    __ fld(x87_stack_.st(src));
+    x87_stack_.push(dst);
+  } else {
+    // Push ST(i) onto the FPU register stack
+    __ fld(x87_stack_.st(src));
+    x87_stack_.push(dst);
+  }
+}
+
+
 void LCodeGen::X87Fld(Operand src, X87OperandType opts) {
-  ASSERT(!src.is_reg_only());
+  DCHECK(!src.is_reg_only());
   switch (opts) {
     case kX87DoubleOperand:
       __ fld_d(src);
@@ -590,12 +636,15 @@
 
 
 void LCodeGen::X87Mov(Operand dst, X87Register src, X87OperandType opts) {
-  ASSERT(!dst.is_reg_only());
+  DCHECK(!dst.is_reg_only());
   x87_stack_.Fxch(src);
   switch (opts) {
     case kX87DoubleOperand:
       __ fst_d(dst);
       break;
+    case kX87FloatOperand:
+      __ fst_s(dst);
+      break;
     case kX87IntOperand:
       __ fist_s(dst);
       break;
@@ -606,7 +655,7 @@
 
 
 void LCodeGen::X87Stack::PrepareToWrite(X87Register reg) {
-  ASSERT(is_mutable_);
+  DCHECK(is_mutable_);
   if (Contains(reg)) {
     Free(reg);
   }
@@ -616,9 +665,9 @@
 
 
 void LCodeGen::X87Stack::CommitWrite(X87Register reg) {
-  ASSERT(is_mutable_);
+  DCHECK(is_mutable_);
   // Assert the reg is prepared to write, but not on the virtual stack yet
-  ASSERT(!Contains(reg) && stack_[stack_depth_].is(reg) &&
+  DCHECK(!Contains(reg) && stack_[stack_depth_].is(reg) &&
       stack_depth_ < X87Register::kMaxNumAllocatableRegisters);
   stack_depth_++;
 }
@@ -627,7 +676,7 @@
 void LCodeGen::X87PrepareBinaryOp(
     X87Register left, X87Register right, X87Register result) {
   // You need to use DefineSameAsFirst for x87 instructions
-  ASSERT(result.is(left));
+  DCHECK(result.is(left));
   x87_stack_.Fxch(right, 1);
   x87_stack_.Fxch(left);
 }
@@ -659,15 +708,39 @@
 }
 
 
-void LCodeGen::X87Stack::LeavingBlock(int current_block_id, LGoto* goto_instr) {
-  ASSERT(stack_depth_ <= 1);
-  // If ever used for new stubs producing two pairs of doubles joined into two
-  // phis this assert hits. That situation is not handled, since the two stacks
-  // might have st0 and st1 swapped.
-  if (current_block_id + 1 != goto_instr->block_id()) {
+void LCodeGen::X87Stack::LeavingBlock(int current_block_id, LGoto* goto_instr,
+                                      LCodeGen* cgen) {
+  // For going to a joined block, an explicit LClobberDoubles is inserted before
+  // LGoto. Because all used x87 registers are spilled to stack slots. The
+  // ResolvePhis phase of register allocator could guarantee the two input's x87
+  // stacks have the same layout. So don't check stack_depth_ <= 1 here.
+  int goto_block_id = goto_instr->block_id();
+  if (current_block_id + 1 != goto_block_id) {
     // If we have a value on the x87 stack on leaving a block, it must be a
     // phi input. If the next block we compile is not the join block, we have
     // to discard the stack state.
+    // Before discarding the stack state, we need to save it if the "goto block"
+    // has unreachable last predecessor when FLAG_unreachable_code_elimination.
+    if (FLAG_unreachable_code_elimination) {
+      int length = goto_instr->block()->predecessors()->length();
+      bool has_unreachable_last_predecessor = false;
+      for (int i = 0; i < length; i++) {
+        HBasicBlock* block = goto_instr->block()->predecessors()->at(i);
+        if (block->IsUnreachable() &&
+            (block->block_id() + 1) == goto_block_id) {
+          has_unreachable_last_predecessor = true;
+        }
+      }
+      if (has_unreachable_last_predecessor) {
+        if (cgen->x87_stack_map_.find(goto_block_id) ==
+            cgen->x87_stack_map_.end()) {
+          X87Stack* stack = new (cgen->zone()) X87Stack(*this);
+          cgen->x87_stack_map_.insert(std::make_pair(goto_block_id, stack));
+        }
+      }
+    }
+
+    // Discard the stack state.
     stack_depth_ = 0;
   }
 }
@@ -677,24 +750,25 @@
   // The deoptimizer does not support X87 Registers. But as long as we
   // deopt from a stub its not a problem, since we will re-materialize the
   // original stub inputs, which can't be double registers.
-  ASSERT(info()->IsStub());
+  // DCHECK(info()->IsStub());
   if (FLAG_debug_code && FLAG_enable_slow_asserts) {
     __ pushfd();
     __ VerifyX87StackDepth(x87_stack_.depth());
     __ popfd();
   }
-  for (int i = 0; i < x87_stack_.depth(); i++) __ fstp(0);
+
+  // Flush X87 stack in the deoptimizer entry.
 }
 
 
 Register LCodeGen::ToRegister(LOperand* op) const {
-  ASSERT(op->IsRegister());
+  DCHECK(op->IsRegister());
   return ToRegister(op->index());
 }
 
 
 X87Register LCodeGen::ToX87Register(LOperand* op) const {
-  ASSERT(op->IsDoubleRegister());
+  DCHECK(op->IsDoubleRegister());
   return ToX87Register(op->index());
 }
 
@@ -709,28 +783,28 @@
   HConstant* constant = chunk_->LookupConstant(op);
   int32_t value = constant->Integer32Value();
   if (r.IsInteger32()) return value;
-  ASSERT(r.IsSmiOrTagged());
+  DCHECK(r.IsSmiOrTagged());
   return reinterpret_cast<int32_t>(Smi::FromInt(value));
 }
 
 
 Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
   HConstant* constant = chunk_->LookupConstant(op);
-  ASSERT(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
+  DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
   return constant->handle(isolate());
 }
 
 
 double LCodeGen::ToDouble(LConstantOperand* op) const {
   HConstant* constant = chunk_->LookupConstant(op);
-  ASSERT(constant->HasDoubleValue());
+  DCHECK(constant->HasDoubleValue());
   return constant->DoubleValue();
 }
 
 
 ExternalReference LCodeGen::ToExternalReference(LConstantOperand* op) const {
   HConstant* constant = chunk_->LookupConstant(op);
-  ASSERT(constant->HasExternalReferenceValue());
+  DCHECK(constant->HasExternalReferenceValue());
   return constant->ExternalReferenceValue();
 }
 
@@ -746,15 +820,15 @@
 
 
 static int ArgumentsOffsetWithoutFrame(int index) {
-  ASSERT(index < 0);
+  DCHECK(index < 0);
   return -(index + 1) * kPointerSize + kPCOnStackSize;
 }
 
 
 Operand LCodeGen::ToOperand(LOperand* op) const {
   if (op->IsRegister()) return Operand(ToRegister(op));
-  ASSERT(!op->IsDoubleRegister());
-  ASSERT(op->IsStackSlot() || op->IsDoubleStackSlot());
+  DCHECK(!op->IsDoubleRegister());
+  DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
   if (NeedsEagerFrame()) {
     return Operand(ebp, StackSlotOffset(op->index()));
   } else {
@@ -766,7 +840,7 @@
 
 
 Operand LCodeGen::HighOperand(LOperand* op) {
-  ASSERT(op->IsDoubleStackSlot());
+  DCHECK(op->IsDoubleStackSlot());
   if (NeedsEagerFrame()) {
     return Operand(ebp, StackSlotOffset(op->index()) + kPointerSize);
   } else {
@@ -801,13 +875,13 @@
       translation->BeginConstructStubFrame(closure_id, translation_size);
       break;
     case JS_GETTER:
-      ASSERT(translation_size == 1);
-      ASSERT(height == 0);
+      DCHECK(translation_size == 1);
+      DCHECK(height == 0);
       translation->BeginGetterStubFrame(closure_id);
       break;
     case JS_SETTER:
-      ASSERT(translation_size == 2);
-      ASSERT(height == 0);
+      DCHECK(translation_size == 2);
+      DCHECK(height == 0);
       translation->BeginSetterStubFrame(closure_id);
       break;
     case ARGUMENTS_ADAPTOR:
@@ -890,6 +964,9 @@
     } else {
       translation->StoreInt32Register(reg);
     }
+  } else if (op->IsDoubleRegister()) {
+    X87Register reg = ToX87Register(op);
+    translation->StoreDoubleRegister(reg);
   } else if (op->IsConstantOperand()) {
     HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
     int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
@@ -904,7 +981,7 @@
                                RelocInfo::Mode mode,
                                LInstruction* instr,
                                SafepointMode safepoint_mode) {
-  ASSERT(instr != NULL);
+  DCHECK(instr != NULL);
   __ call(code, mode);
   RecordSafepointWithLazyDeopt(instr, safepoint_mode);
 
@@ -924,17 +1001,16 @@
 }
 
 
-void LCodeGen::CallRuntime(const Runtime::Function* fun,
-                           int argc,
-                           LInstruction* instr) {
-  ASSERT(instr != NULL);
-  ASSERT(instr->HasPointerMap());
+void LCodeGen::CallRuntime(const Runtime::Function* fun, int argc,
+                           LInstruction* instr, SaveFPRegsMode save_doubles) {
+  DCHECK(instr != NULL);
+  DCHECK(instr->HasPointerMap());
 
-  __ CallRuntime(fun, argc);
+  __ CallRuntime(fun, argc, save_doubles);
 
   RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
 
-  ASSERT(info()->is_calling());
+  DCHECK(info()->is_calling());
 }
 
 
@@ -960,11 +1036,11 @@
                                        LOperand* context) {
   LoadContextFromDeferred(context);
 
-  __ CallRuntime(id);
+  __ CallRuntimeSaveDoubles(id);
   RecordSafepointWithRegisters(
       instr->pointer_map(), argc, Safepoint::kNoLazyDeopt);
 
-  ASSERT(info()->is_calling());
+  DCHECK(info()->is_calling());
 }
 
 
@@ -1005,13 +1081,14 @@
 }
 
 
-void LCodeGen::DeoptimizeIf(Condition cc,
-                            LEnvironment* environment,
+void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
+                            const char* detail,
                             Deoptimizer::BailoutType bailout_type) {
+  LEnvironment* environment = instr->environment();
   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
-  ASSERT(environment->HasBeenRegistered());
+  DCHECK(environment->HasBeenRegistered());
   int id = environment->deoptimization_index();
-  ASSERT(info()->IsOptimizing() || info()->IsStub());
+  DCHECK(info()->IsOptimizing() || info()->IsStub());
   Address entry =
       Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
   if (entry == NULL) {
@@ -1032,7 +1109,13 @@
     __ mov(Operand::StaticVariable(count), eax);
     __ pop(eax);
     __ popfd();
-    ASSERT(frame_is_built_);
+    DCHECK(frame_is_built_);
+    // Put the x87 stack layout in TOS.
+    if (x87_stack_.depth() > 0) EmitFlushX87ForDeopt();
+    __ push(Immediate(x87_stack_.GetLayout()));
+    __ fild_s(MemOperand(esp, 0));
+    // Don't touch eflags.
+    __ lea(esp, Operand(esp, kPointerSize));
     __ call(entry, RelocInfo::RUNTIME_ENTRY);
     __ bind(&no_deopt);
     __ mov(Operand::StaticVariable(count), eax);
@@ -1040,14 +1123,18 @@
     __ popfd();
   }
 
-  // Before Instructions which can deopt, we normally flush the x87 stack. But
-  // we can have inputs or outputs of the current instruction on the stack,
-  // thus we need to flush them here from the physical stack to leave it in a
-  // consistent state.
-  if (x87_stack_.depth() > 0) {
+  // Put the x87 stack layout in TOS, so that we can save x87 fp registers in
+  // the correct location.
+  {
     Label done;
     if (cc != no_condition) __ j(NegateCondition(cc), &done, Label::kNear);
-    EmitFlushX87ForDeopt();
+    if (x87_stack_.depth() > 0) EmitFlushX87ForDeopt();
+
+    int x87_stack_layout = x87_stack_.GetLayout();
+    __ push(Immediate(x87_stack_layout));
+    __ fild_s(MemOperand(esp, 0));
+    // Don't touch eflags.
+    __ lea(esp, Operand(esp, kPointerSize));
     __ bind(&done);
   }
 
@@ -1058,19 +1145,19 @@
     __ bind(&done);
   }
 
-  ASSERT(info()->IsStub() || frame_is_built_);
+  Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
+                             instr->Mnemonic(), detail);
+  DCHECK(info()->IsStub() || frame_is_built_);
   if (cc == no_condition && frame_is_built_) {
+    DeoptComment(reason);
     __ call(entry, RelocInfo::RUNTIME_ENTRY);
   } else {
+    Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
+                                            !frame_is_built_);
     // We often have several deopts to the same entry, reuse the last
     // jump entry if this is the case.
     if (jump_table_.is_empty() ||
-        jump_table_.last().address != entry ||
-        jump_table_.last().needs_frame != !frame_is_built_ ||
-        jump_table_.last().bailout_type != bailout_type) {
-      Deoptimizer::JumpTableEntry table_entry(entry,
-                                              bailout_type,
-                                              !frame_is_built_);
+        !table_entry.IsEquivalentTo(jump_table_.last())) {
       jump_table_.Add(table_entry, zone());
     }
     if (cc == no_condition) {
@@ -1082,12 +1169,12 @@
 }
 
 
-void LCodeGen::DeoptimizeIf(Condition cc,
-                            LEnvironment* environment) {
+void LCodeGen::DeoptimizeIf(Condition cc, LInstruction* instr,
+                            const char* detail) {
   Deoptimizer::BailoutType bailout_type = info()->IsStub()
       ? Deoptimizer::LAZY
       : Deoptimizer::EAGER;
-  DeoptimizeIf(cc, environment, bailout_type);
+  DeoptimizeIf(cc, instr, detail, bailout_type);
 }
 
 
@@ -1146,7 +1233,7 @@
 
 
 void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
-  ASSERT(deoptimization_literals_.length() == 0);
+  DCHECK(deoptimization_literals_.length() == 0);
 
   const ZoneList<Handle<JSFunction> >* inlined_closures =
       chunk()->inlined_closures();
@@ -1166,7 +1253,7 @@
   if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
     RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
   } else {
-    ASSERT(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+    DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
     RecordSafepointWithRegisters(
         instr->pointer_map(), 0, Safepoint::kLazyDeopt);
   }
@@ -1178,7 +1265,7 @@
     Safepoint::Kind kind,
     int arguments,
     Safepoint::DeoptMode deopt_mode) {
-  ASSERT(kind == expected_safepoint_kind_);
+  DCHECK(kind == expected_safepoint_kind_);
   const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
   Safepoint safepoint =
       safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode);
@@ -1234,6 +1321,16 @@
           LabelType(label));
   __ bind(label->label());
   current_block_ = label->block_id();
+  if (label->block()->predecessors()->length() > 1) {
+    // A join block's x87 stack is that of its last visited predecessor.
+    // If the last visited predecessor block is unreachable, the stack state
+    // will be wrong. In such case, use the x87 stack of reachable predecessor.
+    X87StackMap::const_iterator it = x87_stack_map_.find(current_block_);
+    // Restore x87 stack.
+    if (it != x87_stack_map_.end()) {
+      x87_stack_ = *(it->second);
+    }
+  }
   DoGap(label);
 }
 
@@ -1265,8 +1362,8 @@
 
 
 void LCodeGen::DoCallStub(LCallStub* instr) {
-  ASSERT(ToRegister(instr->context()).is(esi));
-  ASSERT(ToRegister(instr->result()).is(eax));
+  DCHECK(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->result()).is(eax));
   switch (instr->hydrogen()->major_key()) {
     case CodeStub::RegExpExec: {
       RegExpExecStub stub(isolate());
@@ -1297,7 +1394,7 @@
 void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
   Register dividend = ToRegister(instr->dividend());
   int32_t divisor = instr->divisor();
-  ASSERT(dividend.is(ToRegister(instr->result())));
+  DCHECK(dividend.is(ToRegister(instr->result())));
 
   // Theoretically, a variation of the branch-free code for integer division by
   // a power of 2 (calculating the remainder via an additional multiplication
@@ -1316,7 +1413,7 @@
     __ and_(dividend, mask);
     __ neg(dividend);
     if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
-      DeoptimizeIf(zero, instr->environment());
+      DeoptimizeIf(zero, instr, "minus zero");
     }
     __ jmp(&done, Label::kNear);
   }
@@ -1330,10 +1427,10 @@
 void LCodeGen::DoModByConstI(LModByConstI* instr) {
   Register dividend = ToRegister(instr->dividend());
   int32_t divisor = instr->divisor();
-  ASSERT(ToRegister(instr->result()).is(eax));
+  DCHECK(ToRegister(instr->result()).is(eax));
 
   if (divisor == 0) {
-    DeoptimizeIf(no_condition, instr->environment());
+    DeoptimizeIf(no_condition, instr, "division by zero");
     return;
   }
 
@@ -1348,7 +1445,7 @@
     Label remainder_not_zero;
     __ j(not_zero, &remainder_not_zero, Label::kNear);
     __ cmp(dividend, Immediate(0));
-    DeoptimizeIf(less, instr->environment());
+    DeoptimizeIf(less, instr, "minus zero");
     __ bind(&remainder_not_zero);
   }
 }
@@ -1358,19 +1455,19 @@
   HMod* hmod = instr->hydrogen();
 
   Register left_reg = ToRegister(instr->left());
-  ASSERT(left_reg.is(eax));
+  DCHECK(left_reg.is(eax));
   Register right_reg = ToRegister(instr->right());
-  ASSERT(!right_reg.is(eax));
-  ASSERT(!right_reg.is(edx));
+  DCHECK(!right_reg.is(eax));
+  DCHECK(!right_reg.is(edx));
   Register result_reg = ToRegister(instr->result());
-  ASSERT(result_reg.is(edx));
+  DCHECK(result_reg.is(edx));
 
   Label done;
   // Check for x % 0, idiv would signal a divide error. We have to
   // deopt in this case because we can't return a NaN.
   if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
     __ test(right_reg, Operand(right_reg));
-    DeoptimizeIf(zero, instr->environment());
+    DeoptimizeIf(zero, instr, "division by zero");
   }
 
   // Check for kMinInt % -1, idiv would signal a divide error. We
@@ -1381,7 +1478,7 @@
     __ j(not_equal, &no_overflow_possible, Label::kNear);
     __ cmp(right_reg, -1);
     if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
-      DeoptimizeIf(equal, instr->environment());
+      DeoptimizeIf(equal, instr, "minus zero");
     } else {
       __ j(not_equal, &no_overflow_possible, Label::kNear);
       __ Move(result_reg, Immediate(0));
@@ -1400,7 +1497,7 @@
     __ j(not_sign, &positive_left, Label::kNear);
     __ idiv(right_reg);
     __ test(result_reg, Operand(result_reg));
-    DeoptimizeIf(zero, instr->environment());
+    DeoptimizeIf(zero, instr, "minus zero");
     __ jmp(&done, Label::kNear);
     __ bind(&positive_left);
   }
@@ -1413,26 +1510,26 @@
   Register dividend = ToRegister(instr->dividend());
   int32_t divisor = instr->divisor();
   Register result = ToRegister(instr->result());
-  ASSERT(divisor == kMinInt || IsPowerOf2(Abs(divisor)));
-  ASSERT(!result.is(dividend));
+  DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
+  DCHECK(!result.is(dividend));
 
   // Check for (0 / -x) that will produce negative zero.
   HDiv* hdiv = instr->hydrogen();
   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
     __ test(dividend, dividend);
-    DeoptimizeIf(zero, instr->environment());
+    DeoptimizeIf(zero, instr, "minus zero");
   }
   // Check for (kMinInt / -1).
   if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
     __ cmp(dividend, kMinInt);
-    DeoptimizeIf(zero, instr->environment());
+    DeoptimizeIf(zero, instr, "overflow");
   }
   // Deoptimize if remainder will not be 0.
   if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
       divisor != 1 && divisor != -1) {
     int32_t mask = divisor < 0 ? -(divisor + 1) : (divisor - 1);
     __ test(dividend, Immediate(mask));
-    DeoptimizeIf(not_zero, instr->environment());
+    DeoptimizeIf(not_zero, instr, "lost precision");
   }
   __ Move(result, dividend);
   int32_t shift = WhichPowerOf2Abs(divisor);
@@ -1450,10 +1547,10 @@
 void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
   Register dividend = ToRegister(instr->dividend());
   int32_t divisor = instr->divisor();
-  ASSERT(ToRegister(instr->result()).is(edx));
+  DCHECK(ToRegister(instr->result()).is(edx));
 
   if (divisor == 0) {
-    DeoptimizeIf(no_condition, instr->environment());
+    DeoptimizeIf(no_condition, instr, "division by zero");
     return;
   }
 
@@ -1461,7 +1558,7 @@
   HDiv* hdiv = instr->hydrogen();
   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
     __ test(dividend, dividend);
-    DeoptimizeIf(zero, instr->environment());
+    DeoptimizeIf(zero, instr, "minus zero");
   }
 
   __ TruncatingDiv(dividend, Abs(divisor));
@@ -1471,7 +1568,7 @@
     __ mov(eax, edx);
     __ imul(eax, eax, divisor);
     __ sub(eax, dividend);
-    DeoptimizeIf(not_equal, instr->environment());
+    DeoptimizeIf(not_equal, instr, "lost precision");
   }
 }
 
@@ -1482,16 +1579,16 @@
   Register dividend = ToRegister(instr->dividend());
   Register divisor = ToRegister(instr->divisor());
   Register remainder = ToRegister(instr->temp());
-  ASSERT(dividend.is(eax));
-  ASSERT(remainder.is(edx));
-  ASSERT(ToRegister(instr->result()).is(eax));
-  ASSERT(!divisor.is(eax));
-  ASSERT(!divisor.is(edx));
+  DCHECK(dividend.is(eax));
+  DCHECK(remainder.is(edx));
+  DCHECK(ToRegister(instr->result()).is(eax));
+  DCHECK(!divisor.is(eax));
+  DCHECK(!divisor.is(edx));
 
   // Check for x / 0.
   if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
     __ test(divisor, divisor);
-    DeoptimizeIf(zero, instr->environment());
+    DeoptimizeIf(zero, instr, "division by zero");
   }
 
   // Check for (0 / -x) that will produce negative zero.
@@ -1500,7 +1597,7 @@
     __ test(dividend, dividend);
     __ j(not_zero, &dividend_not_zero, Label::kNear);
     __ test(divisor, divisor);
-    DeoptimizeIf(sign, instr->environment());
+    DeoptimizeIf(sign, instr, "minus zero");
     __ bind(&dividend_not_zero);
   }
 
@@ -1510,7 +1607,7 @@
     __ cmp(dividend, kMinInt);
     __ j(not_zero, &dividend_not_min_int, Label::kNear);
     __ cmp(divisor, -1);
-    DeoptimizeIf(zero, instr->environment());
+    DeoptimizeIf(zero, instr, "overflow");
     __ bind(&dividend_not_min_int);
   }
 
@@ -1521,7 +1618,7 @@
   if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
     // Deoptimize if remainder is not 0.
     __ test(remainder, remainder);
-    DeoptimizeIf(not_zero, instr->environment());
+    DeoptimizeIf(not_zero, instr, "lost precision");
   }
 }
 
@@ -1529,7 +1626,7 @@
 void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
   Register dividend = ToRegister(instr->dividend());
   int32_t divisor = instr->divisor();
-  ASSERT(dividend.is(ToRegister(instr->result())));
+  DCHECK(dividend.is(ToRegister(instr->result())));
 
   // If the divisor is positive, things are easy: There can be no deopts and we
   // can simply do an arithmetic right shift.
@@ -1543,13 +1640,13 @@
   // If the divisor is negative, we have to negate and handle edge cases.
   __ neg(dividend);
   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
-    DeoptimizeIf(zero, instr->environment());
+    DeoptimizeIf(zero, instr, "minus zero");
   }
 
   // Dividing by -1 is basically negation, unless we overflow.
   if (divisor == -1) {
     if (instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
-      DeoptimizeIf(overflow, instr->environment());
+      DeoptimizeIf(overflow, instr, "overflow");
     }
     return;
   }
@@ -1573,10 +1670,10 @@
 void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
   Register dividend = ToRegister(instr->dividend());
   int32_t divisor = instr->divisor();
-  ASSERT(ToRegister(instr->result()).is(edx));
+  DCHECK(ToRegister(instr->result()).is(edx));
 
   if (divisor == 0) {
-    DeoptimizeIf(no_condition, instr->environment());
+    DeoptimizeIf(no_condition, instr, "division by zero");
     return;
   }
 
@@ -1584,7 +1681,7 @@
   HMathFloorOfDiv* hdiv = instr->hydrogen();
   if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
     __ test(dividend, dividend);
-    DeoptimizeIf(zero, instr->environment());
+    DeoptimizeIf(zero, instr, "minus zero");
   }
 
   // Easy case: We need no dynamic check for the dividend and the flooring
@@ -1599,7 +1696,7 @@
   // In the general case we may need to adjust before and after the truncating
   // division to get a flooring division.
   Register temp = ToRegister(instr->temp3());
-  ASSERT(!temp.is(dividend) && !temp.is(eax) && !temp.is(edx));
+  DCHECK(!temp.is(dividend) && !temp.is(eax) && !temp.is(edx));
   Label needs_adjustment, done;
   __ cmp(dividend, Immediate(0));
   __ j(divisor > 0 ? less : greater, &needs_adjustment, Label::kNear);
@@ -1622,16 +1719,16 @@
   Register divisor = ToRegister(instr->divisor());
   Register remainder = ToRegister(instr->temp());
   Register result = ToRegister(instr->result());
-  ASSERT(dividend.is(eax));
-  ASSERT(remainder.is(edx));
-  ASSERT(result.is(eax));
-  ASSERT(!divisor.is(eax));
-  ASSERT(!divisor.is(edx));
+  DCHECK(dividend.is(eax));
+  DCHECK(remainder.is(edx));
+  DCHECK(result.is(eax));
+  DCHECK(!divisor.is(eax));
+  DCHECK(!divisor.is(edx));
 
   // Check for x / 0.
   if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
     __ test(divisor, divisor);
-    DeoptimizeIf(zero, instr->environment());
+    DeoptimizeIf(zero, instr, "division by zero");
   }
 
   // Check for (0 / -x) that will produce negative zero.
@@ -1640,7 +1737,7 @@
     __ test(dividend, dividend);
     __ j(not_zero, &dividend_not_zero, Label::kNear);
     __ test(divisor, divisor);
-    DeoptimizeIf(sign, instr->environment());
+    DeoptimizeIf(sign, instr, "minus zero");
     __ bind(&dividend_not_zero);
   }
 
@@ -1650,7 +1747,7 @@
     __ cmp(dividend, kMinInt);
     __ j(not_zero, &dividend_not_min_int, Label::kNear);
     __ cmp(divisor, -1);
-    DeoptimizeIf(zero, instr->environment());
+    DeoptimizeIf(zero, instr, "overflow");
     __ bind(&dividend_not_min_int);
   }
 
@@ -1728,25 +1825,25 @@
   }
 
   if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
-    DeoptimizeIf(overflow, instr->environment());
+    DeoptimizeIf(overflow, instr, "overflow");
   }
 
   if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
     // Bail out if the result is supposed to be negative zero.
     Label done;
     __ test(left, Operand(left));
-    __ j(not_zero, &done, Label::kNear);
+    __ j(not_zero, &done);
     if (right->IsConstantOperand()) {
       if (ToInteger32(LConstantOperand::cast(right)) < 0) {
-        DeoptimizeIf(no_condition, instr->environment());
+        DeoptimizeIf(no_condition, instr, "minus zero");
       } else if (ToInteger32(LConstantOperand::cast(right)) == 0) {
         __ cmp(ToRegister(instr->temp()), Immediate(0));
-        DeoptimizeIf(less, instr->environment());
+        DeoptimizeIf(less, instr, "minus zero");
       }
     } else {
       // Test the non-zero operand for negative sign.
       __ or_(ToRegister(instr->temp()), ToOperand(right));
-      DeoptimizeIf(sign, instr->environment());
+      DeoptimizeIf(sign, instr, "minus zero");
     }
     __ bind(&done);
   }
@@ -1756,8 +1853,8 @@
 void LCodeGen::DoBitI(LBitI* instr) {
   LOperand* left = instr->left();
   LOperand* right = instr->right();
-  ASSERT(left->Equals(instr->result()));
-  ASSERT(left->IsRegister());
+  DCHECK(left->Equals(instr->result()));
+  DCHECK(left->IsRegister());
 
   if (right->IsConstantOperand()) {
     int32_t right_operand =
@@ -1803,18 +1900,14 @@
 void LCodeGen::DoShiftI(LShiftI* instr) {
   LOperand* left = instr->left();
   LOperand* right = instr->right();
-  ASSERT(left->Equals(instr->result()));
-  ASSERT(left->IsRegister());
+  DCHECK(left->Equals(instr->result()));
+  DCHECK(left->IsRegister());
   if (right->IsRegister()) {
-    ASSERT(ToRegister(right).is(ecx));
+    DCHECK(ToRegister(right).is(ecx));
 
     switch (instr->op()) {
       case Token::ROR:
         __ ror_cl(ToRegister(left));
-        if (instr->can_deopt()) {
-          __ test(ToRegister(left), ToRegister(left));
-          DeoptimizeIf(sign, instr->environment());
-        }
         break;
       case Token::SAR:
         __ sar_cl(ToRegister(left));
@@ -1823,7 +1916,7 @@
         __ shr_cl(ToRegister(left));
         if (instr->can_deopt()) {
           __ test(ToRegister(left), ToRegister(left));
-          DeoptimizeIf(sign, instr->environment());
+          DeoptimizeIf(sign, instr, "negative value");
         }
         break;
       case Token::SHL:
@@ -1840,7 +1933,7 @@
       case Token::ROR:
         if (shift_count == 0 && instr->can_deopt()) {
           __ test(ToRegister(left), ToRegister(left));
-          DeoptimizeIf(sign, instr->environment());
+          DeoptimizeIf(sign, instr, "negative value");
         } else {
           __ ror(ToRegister(left), shift_count);
         }
@@ -1855,7 +1948,7 @@
           __ shr(ToRegister(left), shift_count);
         } else if (instr->can_deopt()) {
           __ test(ToRegister(left), ToRegister(left));
-          DeoptimizeIf(sign, instr->environment());
+          DeoptimizeIf(sign, instr, "negative value");
         }
         break;
       case Token::SHL:
@@ -1866,7 +1959,7 @@
               __ shl(ToRegister(left), shift_count - 1);
             }
             __ SmiTag(ToRegister(left));
-            DeoptimizeIf(overflow, instr->environment());
+            DeoptimizeIf(overflow, instr, "overflow");
           } else {
             __ shl(ToRegister(left), shift_count);
           }
@@ -1883,7 +1976,7 @@
 void LCodeGen::DoSubI(LSubI* instr) {
   LOperand* left = instr->left();
   LOperand* right = instr->right();
-  ASSERT(left->Equals(instr->result()));
+  DCHECK(left->Equals(instr->result()));
 
   if (right->IsConstantOperand()) {
     __ sub(ToOperand(left),
@@ -1892,7 +1985,7 @@
     __ sub(ToRegister(left), ToOperand(right));
   }
   if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
-    DeoptimizeIf(overflow, instr->environment());
+    DeoptimizeIf(overflow, instr, "overflow");
   }
 }
 
@@ -1909,10 +2002,10 @@
 
 void LCodeGen::DoConstantD(LConstantD* instr) {
   double v = instr->value();
-  uint64_t int_val = BitCast<uint64_t, double>(v);
+  uint64_t int_val = bit_cast<uint64_t, double>(v);
   int32_t lower = static_cast<int32_t>(int_val);
   int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
-  ASSERT(instr->result()->IsDoubleRegister());
+  DCHECK(instr->result()->IsDoubleRegister());
 
   __ push(Immediate(upper));
   __ push(Immediate(lower));
@@ -1948,13 +2041,13 @@
   Register scratch = ToRegister(instr->temp());
   Smi* index = instr->index();
   Label runtime, done;
-  ASSERT(object.is(result));
-  ASSERT(object.is(eax));
+  DCHECK(object.is(result));
+  DCHECK(object.is(eax));
 
   __ test(object, Immediate(kSmiTagMask));
-  DeoptimizeIf(zero, instr->environment());
+  DeoptimizeIf(zero, instr, "Smi");
   __ CmpObjectType(object, JS_DATE_TYPE, scratch);
-  DeoptimizeIf(not_equal, instr->environment());
+  DeoptimizeIf(not_equal, instr, "not a date object");
 
   if (index->value() == 0) {
     __ mov(result, FieldOperand(object, JSDate::kValueOffset));
@@ -2044,12 +2137,12 @@
   if (instr->value()->IsConstantOperand()) {
     int value = ToRepresentation(LConstantOperand::cast(instr->value()),
                                  Representation::Integer32());
-    ASSERT_LE(0, value);
+    DCHECK_LE(0, value);
     if (encoding == String::ONE_BYTE_ENCODING) {
-      ASSERT_LE(value, String::kMaxOneByteCharCode);
+      DCHECK_LE(value, String::kMaxOneByteCharCode);
       __ mov_b(operand, static_cast<int8_t>(value));
     } else {
-      ASSERT_LE(value, String::kMaxUtf16CodeUnit);
+      DCHECK_LE(value, String::kMaxUtf16CodeUnit);
       __ mov_w(operand, static_cast<int16_t>(value));
     }
   } else {
@@ -2084,7 +2177,7 @@
       __ add(ToRegister(left), ToOperand(right));
     }
     if (instr->hydrogen()->CheckFlag(HValue::kCanOverflow)) {
-      DeoptimizeIf(overflow, instr->environment());
+      DeoptimizeIf(overflow, instr, "overflow");
     }
   }
 }
@@ -2093,7 +2186,7 @@
 void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
   LOperand* left = instr->left();
   LOperand* right = instr->right();
-  ASSERT(left->Equals(instr->result()));
+  DCHECK(left->Equals(instr->result()));
   HMathMinMax::Operation operation = instr->hydrogen()->operation();
   if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
     Label return_left;
@@ -2116,8 +2209,58 @@
     }
     __ bind(&return_left);
   } else {
-    // TODO(weiliang) use X87 for double representation.
-    UNIMPLEMENTED();
+    DCHECK(instr->hydrogen()->representation().IsDouble());
+    Label check_nan_left, check_zero, return_left, return_right;
+    Condition condition = (operation == HMathMinMax::kMathMin) ? below : above;
+    X87Register left_reg = ToX87Register(left);
+    X87Register right_reg = ToX87Register(right);
+
+    X87PrepareBinaryOp(left_reg, right_reg, ToX87Register(instr->result()));
+    __ fld(1);
+    __ fld(1);
+    __ FCmp();
+    __ j(parity_even, &check_nan_left, Label::kNear);  // At least one NaN.
+    __ j(equal, &check_zero, Label::kNear);            // left == right.
+    __ j(condition, &return_left, Label::kNear);
+    __ jmp(&return_right, Label::kNear);
+
+    __ bind(&check_zero);
+    __ fld(0);
+    __ fldz();
+    __ FCmp();
+    __ j(not_equal, &return_left, Label::kNear);  // left == right != 0.
+    // At this point, both left and right are either 0 or -0.
+    if (operation == HMathMinMax::kMathMin) {
+      // Push st0 and st1 to stack, then pop them to temp registers and OR them,
+      // load it to left.
+      Register scratch_reg = ToRegister(instr->temp());
+      __ fld(1);
+      __ fld(1);
+      __ sub(esp, Immediate(2 * kPointerSize));
+      __ fstp_s(MemOperand(esp, 0));
+      __ fstp_s(MemOperand(esp, kPointerSize));
+      __ pop(scratch_reg);
+      __ xor_(MemOperand(esp, 0), scratch_reg);
+      X87Mov(left_reg, MemOperand(esp, 0), kX87FloatOperand);
+      __ pop(scratch_reg);  // restore esp
+    } else {
+      // Since we operate on +0 and/or -0, addsd and andsd have the same effect.
+      X87Fxch(left_reg);
+      __ fadd(1);
+    }
+    __ jmp(&return_left, Label::kNear);
+
+    __ bind(&check_nan_left);
+    __ fld(0);
+    __ fld(0);
+    __ FCmp();                                      // NaN check.
+    __ j(parity_even, &return_left, Label::kNear);  // left == NaN.
+
+    __ bind(&return_right);
+    X87Fxch(left_reg);
+    X87Mov(left_reg, right_reg);
+
+    __ bind(&return_left);
   }
 }
 
@@ -2129,6 +2272,8 @@
   if (instr->op() != Token::MOD) {
     X87PrepareBinaryOp(left, right, result);
   }
+  // Set the precision control to double-precision.
+  __ X87SetFPUCW(0x027F);
   switch (instr->op()) {
     case Token::ADD:
       __ fadd_i(1);
@@ -2148,7 +2293,7 @@
       X87Mov(Operand(esp, 1 * kDoubleSize), right);
       X87Mov(Operand(esp, 0), left);
       X87Free(right);
-      ASSERT(left.is(result));
+      DCHECK(left.is(result));
       X87PrepareToWrite(result);
       __ CallCFunction(
           ExternalReference::mod_two_doubles_operation(isolate()),
@@ -2162,17 +2307,21 @@
       UNREACHABLE();
       break;
   }
+
+  // Restore the default value of control word.
+  __ X87SetFPUCW(0x037F);
 }
 
 
 void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
-  ASSERT(ToRegister(instr->context()).is(esi));
-  ASSERT(ToRegister(instr->left()).is(edx));
-  ASSERT(ToRegister(instr->right()).is(eax));
-  ASSERT(ToRegister(instr->result()).is(eax));
+  DCHECK(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->left()).is(edx));
+  DCHECK(ToRegister(instr->right()).is(eax));
+  DCHECK(ToRegister(instr->result()).is(eax));
 
-  BinaryOpICStub stub(isolate(), instr->op(), NO_OVERWRITE);
-  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  Handle<Code> code =
+      CodeFactory::BinaryOpIC(isolate(), instr->op(), NO_OVERWRITE).code();
+  CallCode(code, RelocInfo::CODE_TARGET, instr);
 }
 
 
@@ -2214,26 +2363,30 @@
     __ test(reg, Operand(reg));
     EmitBranch(instr, not_zero);
   } else if (r.IsDouble()) {
-    UNREACHABLE();
+    X87Register reg = ToX87Register(instr->value());
+    X87LoadForUsage(reg);
+    __ fldz();
+    __ FCmp();
+    EmitBranch(instr, not_zero);
   } else {
-    ASSERT(r.IsTagged());
+    DCHECK(r.IsTagged());
     Register reg = ToRegister(instr->value());
     HType type = instr->hydrogen()->value()->type();
     if (type.IsBoolean()) {
-      ASSERT(!info()->IsStub());
+      DCHECK(!info()->IsStub());
       __ cmp(reg, factory()->true_value());
       EmitBranch(instr, equal);
     } else if (type.IsSmi()) {
-      ASSERT(!info()->IsStub());
+      DCHECK(!info()->IsStub());
       __ test(reg, Operand(reg));
       EmitBranch(instr, not_equal);
     } else if (type.IsJSArray()) {
-      ASSERT(!info()->IsStub());
+      DCHECK(!info()->IsStub());
       EmitBranch(instr, no_condition);
     } else if (type.IsHeapNumber()) {
       UNREACHABLE();
     } else if (type.IsString()) {
-      ASSERT(!info()->IsStub());
+      DCHECK(!info()->IsStub());
       __ cmp(FieldOperand(reg, String::kLengthOffset), Immediate(0));
       EmitBranch(instr, not_equal);
     } else {
@@ -2267,13 +2420,13 @@
       } else if (expected.NeedsMap()) {
         // If we need a map later and have a Smi -> deopt.
         __ test(reg, Immediate(kSmiTagMask));
-        DeoptimizeIf(zero, instr->environment());
+        DeoptimizeIf(zero, instr, "Smi");
       }
 
       Register map = no_reg;  // Keep the compiler happy.
       if (expected.NeedsMap()) {
         map = ToRegister(instr->temp());
-        ASSERT(!map.is(reg));
+        DCHECK(!map.is(reg));
         __ mov(map, FieldOperand(reg, HeapObject::kMapOffset));
 
         if (expected.CanBeUndetectable()) {
@@ -2324,7 +2477,7 @@
       if (!expected.IsGeneric()) {
         // We've seen something for the first time -> deopt.
         // This can only happen if we are not generic already.
-        DeoptimizeIf(no_condition, instr->environment());
+        DeoptimizeIf(no_condition, instr, "unexpected object");
       }
     }
   }
@@ -2467,10 +2620,13 @@
 
 void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
   Representation rep = instr->hydrogen()->value()->representation();
-  ASSERT(!rep.IsInteger32());
+  DCHECK(!rep.IsInteger32());
 
   if (rep.IsDouble()) {
-    UNREACHABLE();
+    X87Register input = ToX87Register(instr->value());
+    X87LoadForUsage(input);
+    __ FXamMinusZero();
+    EmitBranch(instr, equal);
   } else {
     Register value = ToRegister(instr->value());
     Handle<Map> map = masm()->isolate()->factory()->heap_number_map();
@@ -2594,7 +2750,7 @@
 void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
   Token::Value op = instr->op();
 
-  Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+  Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 
   Condition condition = ComputeCompareCondition(op);
@@ -2608,7 +2764,7 @@
   InstanceType from = instr->from();
   InstanceType to = instr->to();
   if (from == FIRST_TYPE) return to;
-  ASSERT(from == to || to == LAST_TYPE);
+  DCHECK(from == to || to == LAST_TYPE);
   return from;
 }
 
@@ -2666,12 +2822,12 @@
                                Register input,
                                Register temp,
                                Register temp2) {
-  ASSERT(!input.is(temp));
-  ASSERT(!input.is(temp2));
-  ASSERT(!temp.is(temp2));
+  DCHECK(!input.is(temp));
+  DCHECK(!input.is(temp2));
+  DCHECK(!temp.is(temp2));
   __ JumpIfSmi(input, is_false);
 
-  if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Function"))) {
+  if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
     // Assuming the following assertions, we can use the same compares to test
     // for both being a function type and being in the object type range.
     STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
@@ -2701,7 +2857,7 @@
   __ mov(temp, FieldOperand(temp, Map::kConstructorOffset));
   // Objects with a non-function constructor have class 'Object'.
   __ CmpObjectType(temp, JS_FUNCTION_TYPE, temp2);
-  if (class_name->IsOneByteEqualTo(STATIC_ASCII_VECTOR("Object"))) {
+  if (String::Equals(class_name, isolate()->factory()->Object_string())) {
     __ j(not_equal, is_true);
   } else {
     __ j(not_equal, is_false);
@@ -2746,7 +2902,7 @@
 
 void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
   // Object and function are in fixed registers defined by the stub.
-  ASSERT(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->context()).is(esi));
   InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
   CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
 
@@ -2762,16 +2918,16 @@
 
 
 void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
-  class DeferredInstanceOfKnownGlobal V8_FINAL : public LDeferredCode {
+  class DeferredInstanceOfKnownGlobal FINAL : public LDeferredCode {
    public:
     DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
                                   LInstanceOfKnownGlobal* instr,
                                   const X87Stack& x87_stack)
         : LDeferredCode(codegen, x87_stack), instr_(instr) { }
-    virtual void Generate() V8_OVERRIDE {
+    virtual void Generate() OVERRIDE {
       codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
     }
-    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
     Label* map_check() { return &map_check_; }
    private:
     LInstanceOfKnownGlobal* instr_;
@@ -2843,7 +2999,7 @@
   // stack is used to pass the offset to the location of the map check to
   // the stub.
   Register temp = ToRegister(instr->temp());
-  ASSERT(MacroAssembler::SafepointRegisterStackIndex(temp) == 0);
+  DCHECK(MacroAssembler::SafepointRegisterStackIndex(temp) == 0);
   __ LoadHeapObject(InstanceofStub::right(), instr->function());
   static const int kAdditionalDelta = 13;
   int delta = masm_->SizeOfCodeGeneratedSince(map_check) + kAdditionalDelta;
@@ -2866,7 +3022,7 @@
 void LCodeGen::DoCmpT(LCmpT* instr) {
   Token::Value op = instr->op();
 
-  Handle<Code> ic = CompareIC::GetUninitialized(isolate(), op);
+  Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 
   Condition condition = ComputeCompareCondition(op);
@@ -2899,7 +3055,7 @@
     __ SmiUntag(reg);
     Register return_addr_reg = reg.is(ecx) ? ebx : ecx;
     if (dynamic_frame_alignment && FLAG_debug_code) {
-      ASSERT(extra_value_count == 2);
+      DCHECK(extra_value_count == 2);
       __ cmp(Operand(esp, reg, times_pointer_size,
                      extra_value_count * kPointerSize),
              Immediate(kAlignmentZapValue));
@@ -2960,19 +3116,36 @@
   __ mov(result, Operand::ForCell(instr->hydrogen()->cell().handle()));
   if (instr->hydrogen()->RequiresHoleCheck()) {
     __ cmp(result, factory()->the_hole_value());
-    DeoptimizeIf(equal, instr->environment());
+    DeoptimizeIf(equal, instr, "hole");
   }
 }
 
 
-void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
-  ASSERT(ToRegister(instr->context()).is(esi));
-  ASSERT(ToRegister(instr->global_object()).is(edx));
-  ASSERT(ToRegister(instr->result()).is(eax));
+template <class T>
+void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
+  DCHECK(FLAG_vector_ics);
+  Register vector = ToRegister(instr->temp_vector());
+  DCHECK(vector.is(VectorLoadICDescriptor::VectorRegister()));
+  __ mov(vector, instr->hydrogen()->feedback_vector());
+  // No need to allocate this register.
+  DCHECK(VectorLoadICDescriptor::SlotRegister().is(eax));
+  __ mov(VectorLoadICDescriptor::SlotRegister(),
+         Immediate(Smi::FromInt(instr->hydrogen()->slot())));
+}
 
-  __ mov(ecx, instr->name());
+
+void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
+  DCHECK(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->global_object())
+             .is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->result()).is(eax));
+
+  __ mov(LoadDescriptor::NameRegister(), instr->name());
+  if (FLAG_vector_ics) {
+    EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
+  }
   ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
-  Handle<Code> ic = LoadIC::initialize_stub(isolate(), mode);
+  Handle<Code> ic = CodeFactory::LoadIC(isolate(), mode).code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -2987,7 +3160,7 @@
   // it as no longer deleted. We deoptimize in that case.
   if (instr->hydrogen()->RequiresHoleCheck()) {
     __ cmp(Operand::ForCell(cell_handle), factory()->the_hole_value());
-    DeoptimizeIf(equal, instr->environment());
+    DeoptimizeIf(equal, instr, "hole");
   }
 
   // Store the value.
@@ -3004,7 +3177,7 @@
   if (instr->hydrogen()->RequiresHoleCheck()) {
     __ cmp(result, factory()->the_hole_value());
     if (instr->hydrogen()->DeoptimizesOnHole()) {
-      DeoptimizeIf(equal, instr->environment());
+      DeoptimizeIf(equal, instr, "hole");
     } else {
       Label is_not_hole;
       __ j(not_equal, &is_not_hole, Label::kNear);
@@ -3025,7 +3198,7 @@
   if (instr->hydrogen()->RequiresHoleCheck()) {
     __ cmp(target, factory()->the_hole_value());
     if (instr->hydrogen()->DeoptimizesOnHole()) {
-      DeoptimizeIf(equal, instr->environment());
+      DeoptimizeIf(equal, instr, "hole");
     } else {
       __ j(not_equal, &skip_assignment, Label::kNear);
     }
@@ -3038,12 +3211,8 @@
             ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
     Register temp = ToRegister(instr->temp());
     int offset = Context::SlotOffset(instr->slot_index());
-    __ RecordWriteContextSlot(context,
-                              offset,
-                              value,
-                              temp,
-                              EMIT_REMEMBERED_SET,
-                              check_needed);
+    __ RecordWriteContextSlot(context, offset, value, temp, kSaveFPRegs,
+                              EMIT_REMEMBERED_SET, check_needed);
   }
 
   __ bind(&skip_assignment);
@@ -3080,7 +3249,7 @@
 
 
 void LCodeGen::EmitPushTaggedOperand(LOperand* operand) {
-  ASSERT(!operand->IsDoubleRegister());
+  DCHECK(!operand->IsDoubleRegister());
   if (operand->IsConstantOperand()) {
     Handle<Object> object = ToHandle(LConstantOperand::cast(operand));
     AllowDeferredHandleDereference smi_check;
@@ -3098,12 +3267,15 @@
 
 
 void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
-  ASSERT(ToRegister(instr->context()).is(esi));
-  ASSERT(ToRegister(instr->object()).is(edx));
-  ASSERT(ToRegister(instr->result()).is(eax));
+  DCHECK(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->result()).is(eax));
 
-  __ mov(ecx, instr->name());
-  Handle<Code> ic = LoadIC::initialize_stub(isolate(), NOT_CONTEXTUAL);
+  __ mov(LoadDescriptor::NameRegister(), instr->name());
+  if (FLAG_vector_ics) {
+    EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
+  }
+  Handle<Code> ic = CodeFactory::LoadIC(isolate(), NOT_CONTEXTUAL).code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -3113,23 +3285,13 @@
   Register temp = ToRegister(instr->temp());
   Register result = ToRegister(instr->result());
 
-  // Check that the function really is a function.
-  __ CmpObjectType(function, JS_FUNCTION_TYPE, result);
-  DeoptimizeIf(not_equal, instr->environment());
-
-  // Check whether the function has an instance prototype.
-  Label non_instance;
-  __ test_b(FieldOperand(result, Map::kBitFieldOffset),
-            1 << Map::kHasNonInstancePrototype);
-  __ j(not_zero, &non_instance, Label::kNear);
-
   // Get the prototype or initial map from the function.
   __ mov(result,
          FieldOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
 
   // Check that the function has a prototype or an initial map.
   __ cmp(Operand(result), Immediate(factory()->the_hole_value()));
-  DeoptimizeIf(equal, instr->environment());
+  DeoptimizeIf(equal, instr, "hole");
 
   // If the function does not have an initial map, we're done.
   Label done;
@@ -3138,12 +3300,6 @@
 
   // Get the prototype from the initial map.
   __ mov(result, FieldOperand(result, Map::kPrototypeOffset));
-  __ jmp(&done, Label::kNear);
-
-  // Non-instance prototype: Fetch prototype from constructor field
-  // in the function's map.
-  __ bind(&non_instance);
-  __ mov(result, FieldOperand(result, Map::kConstructorOffset));
 
   // All done.
   __ bind(&done);
@@ -3226,7 +3382,7 @@
         __ mov(result, operand);
         if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
           __ test(result, Operand(result));
-          DeoptimizeIf(negative, instr->environment());
+          DeoptimizeIf(negative, instr, "negative value");
         }
         break;
       case EXTERNAL_FLOAT32_ELEMENTS:
@@ -3256,7 +3412,7 @@
         FAST_DOUBLE_ELEMENTS,
         instr->base_offset() + sizeof(kHoleNanLower32));
     __ cmp(hole_check_operand, Immediate(kHoleNanUpper32));
-    DeoptimizeIf(equal, instr->environment());
+    DeoptimizeIf(equal, instr, "hole");
   }
 
   Operand double_load_operand = BuildFastArrayOperand(
@@ -3274,20 +3430,18 @@
 
   // Load the result.
   __ mov(result,
-         BuildFastArrayOperand(instr->elements(),
-                               instr->key(),
+         BuildFastArrayOperand(instr->elements(), instr->key(),
                                instr->hydrogen()->key()->representation(),
-                               FAST_ELEMENTS,
-                               instr->base_offset()));
+                               FAST_ELEMENTS, instr->base_offset()));
 
   // Check for the hole value.
   if (instr->hydrogen()->RequiresHoleCheck()) {
     if (IsFastSmiElementsKind(instr->hydrogen()->elements_kind())) {
       __ test(result, Immediate(kSmiTagMask));
-      DeoptimizeIf(not_equal, instr->environment());
+      DeoptimizeIf(not_equal, instr, "not a Smi");
     } else {
       __ cmp(result, factory()->the_hole_value());
-      DeoptimizeIf(equal, instr->environment());
+      DeoptimizeIf(equal, instr, "hole");
     }
   }
 }
@@ -3336,11 +3490,15 @@
 
 
 void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
-  ASSERT(ToRegister(instr->context()).is(esi));
-  ASSERT(ToRegister(instr->object()).is(edx));
-  ASSERT(ToRegister(instr->key()).is(ecx));
+  DCHECK(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
 
-  Handle<Code> ic = isolate()->builtins()->KeyedLoadIC_Initialize();
+  if (FLAG_vector_ics) {
+    EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
+  }
+
+  Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -3430,17 +3588,17 @@
 
   // The receiver should be a JS object.
   __ test(receiver, Immediate(kSmiTagMask));
-  DeoptimizeIf(equal, instr->environment());
+  DeoptimizeIf(equal, instr, "Smi");
   __ CmpObjectType(receiver, FIRST_SPEC_OBJECT_TYPE, scratch);
-  DeoptimizeIf(below, instr->environment());
+  DeoptimizeIf(below, instr, "not a JavaScript object");
 
   __ jmp(&receiver_ok, Label::kNear);
   __ bind(&global_object);
   __ mov(receiver, FieldOperand(function, JSFunction::kContextOffset));
   const int global_offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
   __ mov(receiver, Operand(receiver, global_offset));
-  const int receiver_offset = GlobalObject::kGlobalReceiverOffset;
-  __ mov(receiver, FieldOperand(receiver, receiver_offset));
+  const int proxy_offset = GlobalObject::kGlobalProxyOffset;
+  __ mov(receiver, FieldOperand(receiver, proxy_offset));
   __ bind(&receiver_ok);
 }
 
@@ -3450,15 +3608,15 @@
   Register function = ToRegister(instr->function());
   Register length = ToRegister(instr->length());
   Register elements = ToRegister(instr->elements());
-  ASSERT(receiver.is(eax));  // Used for parameter count.
-  ASSERT(function.is(edi));  // Required by InvokeFunction.
-  ASSERT(ToRegister(instr->result()).is(eax));
+  DCHECK(receiver.is(eax));  // Used for parameter count.
+  DCHECK(function.is(edi));  // Required by InvokeFunction.
+  DCHECK(ToRegister(instr->result()).is(eax));
 
   // Copy the arguments to this function possibly from the
   // adaptor frame below it.
   const uint32_t kArgumentsLimit = 1 * KB;
   __ cmp(length, kArgumentsLimit);
-  DeoptimizeIf(above, instr->environment());
+  DeoptimizeIf(above, instr, "too many arguments");
 
   __ push(receiver);
   __ mov(receiver, length);
@@ -3476,7 +3634,7 @@
 
   // Invoke the function.
   __ bind(&invoke);
-  ASSERT(instr->HasPointerMap());
+  DCHECK(instr->HasPointerMap());
   LPointerMap* pointers = instr->pointer_map();
   SafepointGenerator safepoint_generator(
       this, pointers, Safepoint::kLazyDeopt);
@@ -3513,17 +3671,17 @@
     __ mov(result, Operand(ebp, StandardFrameConstants::kContextOffset));
   } else {
     // If there is no frame, the context must be in esi.
-    ASSERT(result.is(esi));
+    DCHECK(result.is(esi));
   }
 }
 
 
 void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
-  ASSERT(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->context()).is(esi));
   __ push(esi);  // The context is the first argument.
   __ push(Immediate(instr->hydrogen()->pairs()));
   __ push(Immediate(Smi::FromInt(instr->hydrogen()->flags())));
-  CallRuntime(Runtime::kHiddenDeclareGlobals, 3, instr);
+  CallRuntime(Runtime::kDeclareGlobals, 3, instr);
 }
 
 
@@ -3570,8 +3728,34 @@
 }
 
 
+void LCodeGen::DoTailCallThroughMegamorphicCache(
+    LTailCallThroughMegamorphicCache* instr) {
+  Register receiver = ToRegister(instr->receiver());
+  Register name = ToRegister(instr->name());
+  DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(name.is(LoadDescriptor::NameRegister()));
+
+  Register scratch = ebx;
+  Register extra = eax;
+  DCHECK(!scratch.is(receiver) && !scratch.is(name));
+  DCHECK(!extra.is(receiver) && !extra.is(name));
+
+  // Important for the tail-call.
+  bool must_teardown_frame = NeedsEagerFrame();
+
+  // The probe will tail call to a handler if found.
+  isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(),
+                                         must_teardown_frame, receiver, name,
+                                         scratch, extra);
+
+  // Tail call to miss if we ended up here.
+  if (must_teardown_frame) __ leave();
+  LoadIC::GenerateMiss(masm());
+}
+
+
 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
-  ASSERT(ToRegister(instr->result()).is(eax));
+  DCHECK(ToRegister(instr->result()).is(eax));
 
   LPointerMap* pointers = instr->pointer_map();
   SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
@@ -3582,7 +3766,7 @@
     generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
     __ call(code, RelocInfo::CODE_TARGET);
   } else {
-    ASSERT(instr->target()->IsRegister());
+    DCHECK(instr->target()->IsRegister());
     Register target = ToRegister(instr->target());
     generator.BeforeCall(__ CallSize(Operand(target)));
     __ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
@@ -3593,8 +3777,8 @@
 
 
 void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
-  ASSERT(ToRegister(instr->function()).is(edi));
-  ASSERT(ToRegister(instr->result()).is(eax));
+  DCHECK(ToRegister(instr->function()).is(edi));
+  DCHECK(ToRegister(instr->result()).is(eax));
 
   if (instr->hydrogen()->pass_argument_count()) {
     __ mov(eax, instr->arity());
@@ -3625,7 +3809,7 @@
   Register input_reg = ToRegister(instr->value());
   __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
          factory()->heap_number_map());
-  DeoptimizeIf(not_equal, instr->environment());
+  DeoptimizeIf(not_equal, instr, "not a heap number");
 
   Label slow, allocated, done;
   Register tmp = input_reg.is(eax) ? ecx : eax;
@@ -3647,7 +3831,7 @@
 
   // Slow case: Call the runtime system to do the number allocation.
   __ bind(&slow);
-  CallRuntimeFromDeferred(Runtime::kHiddenAllocateHeapNumber, 0,
+  CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0,
                           instr, instr->context());
   // Set the pointer to the new heap number in tmp.
   if (!tmp.is(eax)) __ mov(tmp, eax);
@@ -3672,32 +3856,34 @@
   Label is_positive;
   __ j(not_sign, &is_positive, Label::kNear);
   __ neg(input_reg);  // Sets flags.
-  DeoptimizeIf(negative, instr->environment());
+  DeoptimizeIf(negative, instr, "overflow");
   __ bind(&is_positive);
 }
 
 
 void LCodeGen::DoMathAbs(LMathAbs* instr) {
   // Class for deferred case.
-  class DeferredMathAbsTaggedHeapNumber V8_FINAL : public LDeferredCode {
+  class DeferredMathAbsTaggedHeapNumber FINAL : public LDeferredCode {
    public:
     DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
                                     LMathAbs* instr,
                                     const X87Stack& x87_stack)
         : LDeferredCode(codegen, x87_stack), instr_(instr) { }
-    virtual void Generate() V8_OVERRIDE {
+    virtual void Generate() OVERRIDE {
       codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
     }
-    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
    private:
     LMathAbs* instr_;
   };
 
-  ASSERT(instr->value()->Equals(instr->result()));
+  DCHECK(instr->value()->Equals(instr->result()));
   Representation r = instr->hydrogen()->value()->representation();
 
   if (r.IsDouble()) {
-    UNIMPLEMENTED();
+    X87Register value = ToX87Register(instr->value());
+    X87Fxch(value);
+    __ fabs();
   } else if (r.IsSmiOrInteger32()) {
     EmitIntegerMathAbs(instr);
   } else {  // Tagged case.
@@ -3713,49 +3899,407 @@
 
 
 void LCodeGen::DoMathFloor(LMathFloor* instr) {
-  UNIMPLEMENTED();
+  Register output_reg = ToRegister(instr->result());
+  X87Register input_reg = ToX87Register(instr->value());
+  X87Fxch(input_reg);
+
+  Label not_minus_zero, done;
+  // Deoptimize on unordered.
+  __ fldz();
+  __ fld(1);
+  __ FCmp();
+  DeoptimizeIf(parity_even, instr, "NaN");
+  __ j(below, &not_minus_zero, Label::kNear);
+
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    // Check for negative zero.
+    __ j(not_equal, &not_minus_zero, Label::kNear);
+    // +- 0.0.
+    __ fld(0);
+    __ FXamSign();
+    DeoptimizeIf(not_zero, instr, "minus zero");
+    __ Move(output_reg, Immediate(0));
+    __ jmp(&done, Label::kFar);
+  }
+
+  // Positive input.
+  // rc=01B, round down.
+  __ bind(&not_minus_zero);
+  __ fnclex();
+  __ X87SetRC(0x0400);
+  __ sub(esp, Immediate(kPointerSize));
+  __ fist_s(Operand(esp, 0));
+  __ pop(output_reg);
+  __ X87CheckIA();
+  DeoptimizeIf(equal, instr, "overflow");
+  __ fnclex();
+  __ X87SetRC(0x0000);
+  __ bind(&done);
 }
 
 
 void LCodeGen::DoMathRound(LMathRound* instr) {
-  UNIMPLEMENTED();
+  X87Register input_reg = ToX87Register(instr->value());
+  Register result = ToRegister(instr->result());
+  X87Fxch(input_reg);
+  Label below_one_half, below_minus_one_half, done;
+
+  ExternalReference one_half = ExternalReference::address_of_one_half();
+  ExternalReference minus_one_half =
+      ExternalReference::address_of_minus_one_half();
+
+  __ fld_d(Operand::StaticVariable(one_half));
+  __ fld(1);
+  __ FCmp();
+  __ j(carry, &below_one_half);
+
+  // Use rounds towards zero, since 0.5 <= x, we use floor(0.5 + x)
+  __ fld(0);
+  __ fadd_d(Operand::StaticVariable(one_half));
+  // rc=11B, round toward zero.
+  __ X87SetRC(0x0c00);
+  __ sub(esp, Immediate(kPointerSize));
+  // Clear exception bits.
+  __ fnclex();
+  __ fistp_s(MemOperand(esp, 0));
+  // Check overflow.
+  __ X87CheckIA();
+  __ pop(result);
+  DeoptimizeIf(equal, instr, "conversion overflow");
+  __ fnclex();
+  // Restore round mode.
+  __ X87SetRC(0x0000);
+  __ jmp(&done);
+
+  __ bind(&below_one_half);
+  __ fld_d(Operand::StaticVariable(minus_one_half));
+  __ fld(1);
+  __ FCmp();
+  __ j(carry, &below_minus_one_half);
+  // We return 0 for the input range [+0, 0.5[, or [-0.5, 0.5[ if
+  // we can ignore the difference between a result of -0 and +0.
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    // If the sign is positive, we return +0.
+    __ fld(0);
+    __ FXamSign();
+    DeoptimizeIf(not_zero, instr, "minus zero");
+  }
+  __ Move(result, Immediate(0));
+  __ jmp(&done);
+
+  __ bind(&below_minus_one_half);
+  __ fld(0);
+  __ fadd_d(Operand::StaticVariable(one_half));
+  // rc=01B, round down.
+  __ X87SetRC(0x0400);
+  __ sub(esp, Immediate(kPointerSize));
+  // Clear exception bits.
+  __ fnclex();
+  __ fistp_s(MemOperand(esp, 0));
+  // Check overflow.
+  __ X87CheckIA();
+  __ pop(result);
+  DeoptimizeIf(equal, instr, "conversion overflow");
+  __ fnclex();
+  // Restore round mode.
+  __ X87SetRC(0x0000);
+
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoMathFround(LMathFround* instr) {
+  X87Register input_reg = ToX87Register(instr->value());
+  X87Fxch(input_reg);
+  __ sub(esp, Immediate(kPointerSize));
+  __ fstp_s(MemOperand(esp, 0));
+  X87Fld(MemOperand(esp, 0), kX87FloatOperand);
+  __ add(esp, Immediate(kPointerSize));
 }
 
 
 void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
-  UNIMPLEMENTED();
+  X87Register input = ToX87Register(instr->value());
+  X87Register result_reg = ToX87Register(instr->result());
+  Register temp_result = ToRegister(instr->temp1());
+  Register temp = ToRegister(instr->temp2());
+  Label slow, done, smi, finish;
+  DCHECK(result_reg.is(input));
+
+  // Store input into Heap number and call runtime function kMathExpRT.
+  if (FLAG_inline_new) {
+    __ AllocateHeapNumber(temp_result, temp, no_reg, &slow);
+    __ jmp(&done, Label::kNear);
+  }
+
+  // Slow case: Call the runtime system to do the number allocation.
+  __ bind(&slow);
+  {
+    // TODO(3095996): Put a valid pointer value in the stack slot where the
+    // result register is stored, as this register is in the pointer map, but
+    // contains an integer value.
+    __ Move(temp_result, Immediate(0));
+
+    // Preserve the value of all registers.
+    PushSafepointRegistersScope scope(this);
+
+    __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+    __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+    RecordSafepointWithRegisters(
+       instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+    __ StoreToSafepointRegisterSlot(temp_result, eax);
+  }
+  __ bind(&done);
+  X87LoadForUsage(input);
+  __ fstp_d(FieldOperand(temp_result, HeapNumber::kValueOffset));
+
+  {
+    // Preserve the value of all registers.
+    PushSafepointRegistersScope scope(this);
+
+    __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+    __ push(temp_result);
+    __ CallRuntimeSaveDoubles(Runtime::kMathSqrtRT);
+    RecordSafepointWithRegisters(instr->pointer_map(), 1,
+                                 Safepoint::kNoLazyDeopt);
+    __ StoreToSafepointRegisterSlot(temp_result, eax);
+  }
+  X87PrepareToWrite(result_reg);
+  // return value of MathExpRT is Smi or Heap Number.
+  __ JumpIfSmi(temp_result, &smi);
+  // Heap number(double)
+  __ fld_d(FieldOperand(temp_result, HeapNumber::kValueOffset));
+  __ jmp(&finish);
+  // SMI
+  __ bind(&smi);
+  __ SmiUntag(temp_result);
+  __ push(temp_result);
+  __ fild_s(MemOperand(esp, 0));
+  __ pop(temp_result);
+  __ bind(&finish);
+  X87CommitWrite(result_reg);
 }
 
 
 void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
-  UNIMPLEMENTED();
+  X87Register input_reg = ToX87Register(instr->value());
+  DCHECK(ToX87Register(instr->result()).is(input_reg));
+  X87Fxch(input_reg);
+  // Note that according to ECMA-262 15.8.2.13:
+  // Math.pow(-Infinity, 0.5) == Infinity
+  // Math.sqrt(-Infinity) == NaN
+  Label done, sqrt;
+  // Check base for -Infinity. C3 == 0, C2 == 1, C1 == 1 and C0 == 1
+  __ fxam();
+  __ push(eax);
+  __ fnstsw_ax();
+  __ and_(eax, Immediate(0x4700));
+  __ cmp(eax, Immediate(0x0700));
+  __ j(not_equal, &sqrt, Label::kNear);
+  // If input is -Infinity, return Infinity.
+  __ fchs();
+  __ jmp(&done, Label::kNear);
+
+  // Square root.
+  __ bind(&sqrt);
+  __ fldz();
+  __ faddp();  // Convert -0 to +0.
+  __ fsqrt();
+  __ bind(&done);
+  __ pop(eax);
 }
 
 
 void LCodeGen::DoPower(LPower* instr) {
-  UNIMPLEMENTED();
+  Representation exponent_type = instr->hydrogen()->right()->representation();
+  X87Register result = ToX87Register(instr->result());
+  // Having marked this as a call, we can use any registers.
+  X87Register base = ToX87Register(instr->left());
+  ExternalReference one_half = ExternalReference::address_of_one_half();
+
+  if (exponent_type.IsSmi()) {
+    Register exponent = ToRegister(instr->right());
+    X87LoadForUsage(base);
+    __ SmiUntag(exponent);
+    __ push(exponent);
+    __ fild_s(MemOperand(esp, 0));
+    __ pop(exponent);
+  } else if (exponent_type.IsTagged()) {
+    Register exponent = ToRegister(instr->right());
+    Register temp = exponent.is(ecx) ? eax : ecx;
+    Label no_deopt, done;
+    X87LoadForUsage(base);
+    __ JumpIfSmi(exponent, &no_deopt);
+    __ CmpObjectType(exponent, HEAP_NUMBER_TYPE, temp);
+    DeoptimizeIf(not_equal, instr, "not a heap number");
+    // Heap number(double)
+    __ fld_d(FieldOperand(exponent, HeapNumber::kValueOffset));
+    __ jmp(&done);
+    // SMI
+    __ bind(&no_deopt);
+    __ SmiUntag(exponent);
+    __ push(exponent);
+    __ fild_s(MemOperand(esp, 0));
+    __ pop(exponent);
+    __ bind(&done);
+  } else if (exponent_type.IsInteger32()) {
+    Register exponent = ToRegister(instr->right());
+    X87LoadForUsage(base);
+    __ push(exponent);
+    __ fild_s(MemOperand(esp, 0));
+    __ pop(exponent);
+  } else {
+    DCHECK(exponent_type.IsDouble());
+    X87Register exponent_double = ToX87Register(instr->right());
+    X87LoadForUsage(base, exponent_double);
+  }
+
+  // FP data stack {base, exponent(TOS)}.
+  // Handle (exponent==+-0.5 && base == -0).
+  Label not_plus_0;
+  __ fld(0);
+  __ fabs();
+  X87Fld(Operand::StaticVariable(one_half), kX87DoubleOperand);
+  __ FCmp();
+  __ j(parity_even, &not_plus_0, Label::kNear);  // NaN.
+  __ j(not_equal, &not_plus_0, Label::kNear);
+  __ fldz();
+  // FP data stack {base, exponent(TOS), zero}.
+  __ faddp(2);
+  __ bind(&not_plus_0);
+
+  {
+    __ PrepareCallCFunction(4, eax);
+    __ fstp_d(MemOperand(esp, kDoubleSize));  // Exponent value.
+    __ fstp_d(MemOperand(esp, 0));            // Base value.
+    X87PrepareToWrite(result);
+    __ CallCFunction(ExternalReference::power_double_double_function(isolate()),
+                     4);
+    // Return value is in st(0) on ia32.
+    X87CommitWrite(result);
+  }
 }
 
 
 void LCodeGen::DoMathLog(LMathLog* instr) {
-  UNIMPLEMENTED();
+  DCHECK(instr->value()->Equals(instr->result()));
+  X87Register input_reg = ToX87Register(instr->value());
+  X87Fxch(input_reg);
+
+  Label positive, done, zero, nan_result;
+  __ fldz();
+  __ fld(1);
+  __ FCmp();
+  __ j(below, &nan_result, Label::kNear);
+  __ j(equal, &zero, Label::kNear);
+  // Positive input.
+  // {input, ln2}.
+  __ fldln2();
+  // {ln2, input}.
+  __ fxch();
+  // {result}.
+  __ fyl2x();
+  __ jmp(&done, Label::kNear);
+
+  __ bind(&nan_result);
+  ExternalReference nan =
+      ExternalReference::address_of_canonical_non_hole_nan();
+  X87PrepareToWrite(input_reg);
+  __ fld_d(Operand::StaticVariable(nan));
+  X87CommitWrite(input_reg);
+  __ jmp(&done, Label::kNear);
+
+  __ bind(&zero);
+  ExternalReference ninf = ExternalReference::address_of_negative_infinity();
+  X87PrepareToWrite(input_reg);
+  __ fld_d(Operand::StaticVariable(ninf));
+  X87CommitWrite(input_reg);
+
+  __ bind(&done);
 }
 
 
 void LCodeGen::DoMathClz32(LMathClz32* instr) {
-  UNIMPLEMENTED();
+  Register input = ToRegister(instr->value());
+  Register result = ToRegister(instr->result());
+  Label not_zero_input;
+  __ bsr(result, input);
+
+  __ j(not_zero, &not_zero_input);
+  __ Move(result, Immediate(63));  // 63^31 == 32
+
+  __ bind(&not_zero_input);
+  __ xor_(result, Immediate(31));  // for x in [0..31], 31^x == 31-x.
 }
 
 
 void LCodeGen::DoMathExp(LMathExp* instr) {
-  UNIMPLEMENTED();
+  X87Register input = ToX87Register(instr->value());
+  X87Register result_reg = ToX87Register(instr->result());
+  Register temp_result = ToRegister(instr->temp1());
+  Register temp = ToRegister(instr->temp2());
+  Label slow, done, smi, finish;
+  DCHECK(result_reg.is(input));
+
+  // Store input into Heap number and call runtime function kMathExpRT.
+  if (FLAG_inline_new) {
+    __ AllocateHeapNumber(temp_result, temp, no_reg, &slow);
+    __ jmp(&done, Label::kNear);
+  }
+
+  // Slow case: Call the runtime system to do the number allocation.
+  __ bind(&slow);
+  {
+    // TODO(3095996): Put a valid pointer value in the stack slot where the
+    // result register is stored, as this register is in the pointer map, but
+    // contains an integer value.
+    __ Move(temp_result, Immediate(0));
+
+    // Preserve the value of all registers.
+    PushSafepointRegistersScope scope(this);
+
+    __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+    __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+    RecordSafepointWithRegisters(instr->pointer_map(), 0,
+                                 Safepoint::kNoLazyDeopt);
+    __ StoreToSafepointRegisterSlot(temp_result, eax);
+  }
+  __ bind(&done);
+  X87LoadForUsage(input);
+  __ fstp_d(FieldOperand(temp_result, HeapNumber::kValueOffset));
+
+  {
+    // Preserve the value of all registers.
+    PushSafepointRegistersScope scope(this);
+
+    __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
+    __ push(temp_result);
+    __ CallRuntimeSaveDoubles(Runtime::kMathExpRT);
+    RecordSafepointWithRegisters(instr->pointer_map(), 1,
+                                 Safepoint::kNoLazyDeopt);
+    __ StoreToSafepointRegisterSlot(temp_result, eax);
+  }
+  X87PrepareToWrite(result_reg);
+  // return value of MathExpRT is Smi or Heap Number.
+  __ JumpIfSmi(temp_result, &smi);
+  // Heap number(double)
+  __ fld_d(FieldOperand(temp_result, HeapNumber::kValueOffset));
+  __ jmp(&finish);
+  // SMI
+  __ bind(&smi);
+  __ SmiUntag(temp_result);
+  __ push(temp_result);
+  __ fild_s(MemOperand(esp, 0));
+  __ pop(temp_result);
+  __ bind(&finish);
+  X87CommitWrite(result_reg);
 }
 
 
 void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
-  ASSERT(ToRegister(instr->context()).is(esi));
-  ASSERT(ToRegister(instr->function()).is(edi));
-  ASSERT(instr->HasPointerMap());
+  DCHECK(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->function()).is(edi));
+  DCHECK(instr->HasPointerMap());
 
   Handle<JSFunction> known_function = instr->hydrogen()->known_function();
   if (known_function.is_null()) {
@@ -3775,9 +4319,9 @@
 
 
 void LCodeGen::DoCallFunction(LCallFunction* instr) {
-  ASSERT(ToRegister(instr->context()).is(esi));
-  ASSERT(ToRegister(instr->function()).is(edi));
-  ASSERT(ToRegister(instr->result()).is(eax));
+  DCHECK(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->function()).is(edi));
+  DCHECK(ToRegister(instr->result()).is(eax));
 
   int arity = instr->arity();
   CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
@@ -3786,9 +4330,9 @@
 
 
 void LCodeGen::DoCallNew(LCallNew* instr) {
-  ASSERT(ToRegister(instr->context()).is(esi));
-  ASSERT(ToRegister(instr->constructor()).is(edi));
-  ASSERT(ToRegister(instr->result()).is(eax));
+  DCHECK(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->constructor()).is(edi));
+  DCHECK(ToRegister(instr->result()).is(eax));
 
   // No cell in ebx for construct type feedback in optimized code
   __ mov(ebx, isolate()->factory()->undefined_value());
@@ -3799,9 +4343,9 @@
 
 
 void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
-  ASSERT(ToRegister(instr->context()).is(esi));
-  ASSERT(ToRegister(instr->constructor()).is(edi));
-  ASSERT(ToRegister(instr->result()).is(eax));
+  DCHECK(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->constructor()).is(edi));
+  DCHECK(ToRegister(instr->result()).is(eax));
 
   __ Move(eax, Immediate(instr->arity()));
   __ mov(ebx, isolate()->factory()->undefined_value());
@@ -3844,8 +4388,8 @@
 
 
 void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
-  ASSERT(ToRegister(instr->context()).is(esi));
-  CallRuntime(instr->function(), instr->arity(), instr);
+  DCHECK(ToRegister(instr->context()).is(esi));
+  CallRuntime(instr->function(), instr->arity(), instr, instr->save_doubles());
 }
 
 
@@ -3877,7 +4421,7 @@
   int offset = access.offset();
 
   if (access.IsExternalMemory()) {
-    ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+    DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
     MemOperand operand = instr->object()->IsConstantOperand()
         ? MemOperand::StaticVariable(
             ToExternalReference(LConstantOperand::cast(instr->object())))
@@ -3894,13 +4438,13 @@
 
   Register object = ToRegister(instr->object());
   __ AssertNotSmi(object);
-  ASSERT(!representation.IsSmi() ||
+  DCHECK(!representation.IsSmi() ||
          !instr->value()->IsConstantOperand() ||
          IsSmi(LConstantOperand::cast(instr->value())));
   if (representation.IsDouble()) {
-    ASSERT(access.IsInobject());
-    ASSERT(!instr->hydrogen()->has_transition());
-    ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+    DCHECK(access.IsInobject());
+    DCHECK(!instr->hydrogen()->has_transition());
+    DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
     X87Register value = ToX87Register(instr->value());
     X87Mov(FieldOperand(object, offset), value);
     return;
@@ -3916,7 +4460,7 @@
       __ mov(temp_map, transition);
       __ mov(FieldOperand(object, HeapObject::kMapOffset), temp_map);
       // Update the write barrier for the map field.
-      __ RecordWriteForMap(object, transition, temp_map, temp);
+      __ RecordWriteForMap(object, transition, temp_map, temp, kSaveFPRegs);
     }
   }
 
@@ -3935,11 +4479,11 @@
       __ Store(value, operand, representation);
     } else if (representation.IsInteger32()) {
       Immediate immediate = ToImmediate(operand_value, representation);
-      ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+      DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
       __ mov(operand, immediate);
     } else {
       Handle<Object> handle_value = ToHandle(operand_value);
-      ASSERT(!instr->hydrogen()->NeedsWriteBarrier());
+      DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
       __ mov(operand, handle_value);
     }
   } else {
@@ -3951,10 +4495,7 @@
     Register value = ToRegister(instr->value());
     Register temp = access.IsInobject() ? ToRegister(instr->temp()) : object;
     // Update the write barrier for the object for in-object properties.
-    __ RecordWriteField(write_register,
-                        offset,
-                        value,
-                        temp,
+    __ RecordWriteField(write_register, offset, value, temp, kSaveFPRegs,
                         EMIT_REMEMBERED_SET,
                         instr->hydrogen()->SmiCheckForWriteBarrier(),
                         instr->hydrogen()->PointersToHereCheckForValue());
@@ -3963,11 +4504,11 @@
 
 
 void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
-  ASSERT(ToRegister(instr->context()).is(esi));
-  ASSERT(ToRegister(instr->object()).is(edx));
-  ASSERT(ToRegister(instr->value()).is(eax));
+  DCHECK(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
 
-  __ mov(ecx, instr->name());
+  __ mov(StoreDescriptor::NameRegister(), instr->name());
   Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
@@ -3993,7 +4534,7 @@
     __ int3();
     __ bind(&done);
   } else {
-    DeoptimizeIf(cc, instr->environment());
+    DeoptimizeIf(cc, instr, "out of bounds");
   }
 }
 
@@ -4014,8 +4555,7 @@
       instr->base_offset()));
   if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
       elements_kind == FLOAT32_ELEMENTS) {
-    __ fld(0);
-    __ fstp_s(operand);
+    X87Mov(operand, ToX87Register(instr->value()), kX87FloatOperand);
   } else if (elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
              elements_kind == FLOAT64_ELEMENTS) {
     X87Mov(operand, ToX87Register(instr->value()));
@@ -4076,7 +4616,7 @@
     // This means we should store the (double) hole. No floating point
     // registers required.
     double nan_double = FixedDoubleArray::hole_nan_as_double();
-    uint64_t int_val = BitCast<uint64_t, double>(nan_double);
+    uint64_t int_val = bit_cast<uint64_t, double>(nan_double);
     int32_t lower = static_cast<int32_t>(int_val);
     int32_t upper = static_cast<int32_t>(int_val >> (kBitsPerInt));
 
@@ -4136,25 +4676,22 @@
       Immediate immediate = ToImmediate(operand_value, Representation::Smi());
       __ mov(operand, immediate);
     } else {
-      ASSERT(!IsInteger32(operand_value));
+      DCHECK(!IsInteger32(operand_value));
       Handle<Object> handle_value = ToHandle(operand_value);
       __ mov(operand, handle_value);
     }
   }
 
   if (instr->hydrogen()->NeedsWriteBarrier()) {
-    ASSERT(instr->value()->IsRegister());
+    DCHECK(instr->value()->IsRegister());
     Register value = ToRegister(instr->value());
-    ASSERT(!instr->key()->IsConstantOperand());
+    DCHECK(!instr->key()->IsConstantOperand());
     SmiCheck check_needed =
         instr->hydrogen()->value()->type().IsHeapObject()
           ? OMIT_SMI_CHECK : INLINE_SMI_CHECK;
     // Compute address of modified element and store it into key register.
     __ lea(key, operand);
-    __ RecordWrite(elements,
-                   key,
-                   value,
-                   EMIT_REMEMBERED_SET,
+    __ RecordWrite(elements, key, value, kSaveFPRegs, EMIT_REMEMBERED_SET,
                    check_needed,
                    instr->hydrogen()->PointersToHereCheckForValue());
   }
@@ -4174,14 +4711,13 @@
 
 
 void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
-  ASSERT(ToRegister(instr->context()).is(esi));
-  ASSERT(ToRegister(instr->object()).is(edx));
-  ASSERT(ToRegister(instr->key()).is(ecx));
-  ASSERT(ToRegister(instr->value()).is(eax));
+  DCHECK(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
+  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
 
-  Handle<Code> ic = instr->strict_mode() == STRICT
-      ? isolate()->builtins()->KeyedStoreIC_Initialize_Strict()
-      : isolate()->builtins()->KeyedStoreIC_Initialize();
+  Handle<Code> ic =
+      CodeFactory::KeyedStoreIC(isolate(), instr->strict_mode()).code();
   CallCode(ic, RelocInfo::CODE_TARGET, instr);
 }
 
@@ -4191,7 +4727,7 @@
   Register temp = ToRegister(instr->temp());
   Label no_memento_found;
   __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
-  DeoptimizeIf(equal, instr->environment());
+  DeoptimizeIf(equal, instr, "memento found");
   __ bind(&no_memento_found);
 }
 
@@ -4216,12 +4752,12 @@
     __ mov(FieldOperand(object_reg, HeapObject::kMapOffset),
            Immediate(to_map));
     // Write barrier.
-    ASSERT_NE(instr->temp(), NULL);
+    DCHECK_NE(instr->temp(), NULL);
     __ RecordWriteForMap(object_reg, to_map, new_map_reg,
-                         ToRegister(instr->temp()));
+                         ToRegister(instr->temp()), kDontSaveFPRegs);
   } else {
-    ASSERT(ToRegister(instr->context()).is(esi));
-    ASSERT(object_reg.is(eax));
+    DCHECK(ToRegister(instr->context()).is(esi));
+    DCHECK(object_reg.is(eax));
     PushSafepointRegistersScope scope(this);
     __ mov(ebx, to_map);
     bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
@@ -4235,16 +4771,16 @@
 
 
 void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
-  class DeferredStringCharCodeAt V8_FINAL : public LDeferredCode {
+  class DeferredStringCharCodeAt FINAL : public LDeferredCode {
    public:
     DeferredStringCharCodeAt(LCodeGen* codegen,
                              LStringCharCodeAt* instr,
                              const X87Stack& x87_stack)
         : LDeferredCode(codegen, x87_stack), instr_(instr) { }
-    virtual void Generate() V8_OVERRIDE {
+    virtual void Generate() OVERRIDE {
       codegen()->DoDeferredStringCharCodeAt(instr_);
     }
-    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
    private:
     LStringCharCodeAt* instr_;
   };
@@ -4285,7 +4821,7 @@
     __ SmiTag(index);
     __ push(index);
   }
-  CallRuntimeFromDeferred(Runtime::kHiddenStringCharCodeAt, 2,
+  CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2,
                           instr, instr->context());
   __ AssertSmi(eax);
   __ SmiUntag(eax);
@@ -4294,16 +4830,16 @@
 
 
 void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
-  class DeferredStringCharFromCode V8_FINAL : public LDeferredCode {
+  class DeferredStringCharFromCode FINAL : public LDeferredCode {
    public:
     DeferredStringCharFromCode(LCodeGen* codegen,
                                LStringCharFromCode* instr,
                                const X87Stack& x87_stack)
         : LDeferredCode(codegen, x87_stack), instr_(instr) { }
-    virtual void Generate() V8_OVERRIDE {
+    virtual void Generate() OVERRIDE {
       codegen()->DoDeferredStringCharFromCode(instr_);
     }
-    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
    private:
     LStringCharFromCode* instr_;
   };
@@ -4311,10 +4847,10 @@
   DeferredStringCharFromCode* deferred =
       new(zone()) DeferredStringCharFromCode(this, instr, x87_stack_);
 
-  ASSERT(instr->hydrogen()->value()->representation().IsInteger32());
+  DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
   Register char_code = ToRegister(instr->char_code());
   Register result = ToRegister(instr->result());
-  ASSERT(!char_code.is(result));
+  DCHECK(!char_code.is(result));
 
   __ cmp(char_code, String::kMaxOneByteCharCode);
   __ j(above, deferred->entry());
@@ -4346,9 +4882,9 @@
 
 
 void LCodeGen::DoStringAdd(LStringAdd* instr) {
-  ASSERT(ToRegister(instr->context()).is(esi));
-  ASSERT(ToRegister(instr->left()).is(edx));
-  ASSERT(ToRegister(instr->right()).is(eax));
+  DCHECK(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->left()).is(edx));
+  DCHECK(ToRegister(instr->right()).is(eax));
   StringAddStub stub(isolate(),
                      instr->hydrogen()->flags(),
                      instr->hydrogen()->pretenure_flag());
@@ -4359,8 +4895,8 @@
 void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
   LOperand* input = instr->value();
   LOperand* output = instr->result();
-  ASSERT(input->IsRegister() || input->IsStackSlot());
-  ASSERT(output->IsDoubleRegister());
+  DCHECK(input->IsRegister() || input->IsStackSlot());
+  DCHECK(output->IsDoubleRegister());
   if (input->IsRegister()) {
     Register input_reg = ToRegister(input);
     __ push(input_reg);
@@ -4383,23 +4919,23 @@
 
 
 void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
-  class DeferredNumberTagI V8_FINAL : public LDeferredCode {
+  class DeferredNumberTagI FINAL : public LDeferredCode {
    public:
     DeferredNumberTagI(LCodeGen* codegen,
                        LNumberTagI* instr,
                        const X87Stack& x87_stack)
         : LDeferredCode(codegen, x87_stack), instr_(instr) { }
-    virtual void Generate() V8_OVERRIDE {
+    virtual void Generate() OVERRIDE {
       codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp(),
                                        SIGNED_INT32);
     }
-    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
    private:
     LNumberTagI* instr_;
   };
 
   LOperand* input = instr->value();
-  ASSERT(input->IsRegister() && input->Equals(instr->result()));
+  DCHECK(input->IsRegister() && input->Equals(instr->result()));
   Register reg = ToRegister(input);
 
   DeferredNumberTagI* deferred =
@@ -4411,23 +4947,23 @@
 
 
 void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
-  class DeferredNumberTagU V8_FINAL : public LDeferredCode {
+  class DeferredNumberTagU FINAL : public LDeferredCode {
    public:
     DeferredNumberTagU(LCodeGen* codegen,
                        LNumberTagU* instr,
                        const X87Stack& x87_stack)
         : LDeferredCode(codegen, x87_stack), instr_(instr) { }
-    virtual void Generate() V8_OVERRIDE {
+    virtual void Generate() OVERRIDE {
       codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp(),
                                        UNSIGNED_INT32);
     }
-    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
    private:
     LNumberTagU* instr_;
   };
 
   LOperand* input = instr->value();
-  ASSERT(input->IsRegister() && input->Equals(instr->result()));
+  DCHECK(input->IsRegister() && input->Equals(instr->result()));
   Register reg = ToRegister(input);
 
   DeferredNumberTagU* deferred =
@@ -4484,11 +5020,11 @@
 
     // NumberTagI and NumberTagD use the context from the frame, rather than
     // the environment's HContext or HInlinedContext value.
-    // They only call Runtime::kHiddenAllocateHeapNumber.
+    // They only call Runtime::kAllocateHeapNumber.
     // The corresponding HChange instructions are added in a phase that does
     // not have easy access to the local context.
     __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-    __ CallRuntime(Runtime::kHiddenAllocateHeapNumber);
+    __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
     RecordSafepointWithRegisters(
         instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
     __ StoreToSafepointRegisterSlot(reg, eax);
@@ -4500,16 +5036,16 @@
 
 
 void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
-  class DeferredNumberTagD V8_FINAL : public LDeferredCode {
+  class DeferredNumberTagD FINAL : public LDeferredCode {
    public:
     DeferredNumberTagD(LCodeGen* codegen,
                        LNumberTagD* instr,
                        const X87Stack& x87_stack)
         : LDeferredCode(codegen, x87_stack), instr_(instr) { }
-    virtual void Generate() V8_OVERRIDE {
+    virtual void Generate() OVERRIDE {
       codegen()->DoDeferredNumberTagD(instr_);
     }
-    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
    private:
     LNumberTagD* instr_;
   };
@@ -4518,7 +5054,9 @@
 
   // Put the value to the top of stack
   X87Register src = ToX87Register(instr->value());
-  X87LoadForUsage(src);
+  // Don't use X87LoadForUsage here, which is only used by Instruction which
+  // clobbers fp registers.
+  x87_stack_.Fxch(src);
 
   DeferredNumberTagD* deferred =
       new(zone()) DeferredNumberTagD(this, instr, x87_stack_);
@@ -4529,7 +5067,7 @@
     __ jmp(deferred->entry());
   }
   __ bind(deferred->exit());
-  __ fstp_d(FieldOperand(reg, HeapNumber::kValueOffset));
+  __ fst_d(FieldOperand(reg, HeapNumber::kValueOffset));
 }
 
 
@@ -4543,11 +5081,11 @@
   PushSafepointRegistersScope scope(this);
   // NumberTagI and NumberTagD use the context from the frame, rather than
   // the environment's HContext or HInlinedContext value.
-  // They only call Runtime::kHiddenAllocateHeapNumber.
+  // They only call Runtime::kAllocateHeapNumber.
   // The corresponding HChange instructions are added in a phase that does
   // not have easy access to the local context.
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-  __ CallRuntime(Runtime::kHiddenAllocateHeapNumber);
+  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
   RecordSafepointWithRegisters(
       instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
   __ StoreToSafepointRegisterSlot(reg, eax);
@@ -4560,12 +5098,12 @@
   if (hchange->CheckFlag(HValue::kCanOverflow) &&
       hchange->value()->CheckFlag(HValue::kUint32)) {
     __ test(input, Immediate(0xc0000000));
-    DeoptimizeIf(not_zero, instr->environment());
+    DeoptimizeIf(not_zero, instr, "overflow");
   }
   __ SmiTag(input);
   if (hchange->CheckFlag(HValue::kCanOverflow) &&
       !hchange->value()->CheckFlag(HValue::kUint32)) {
-    DeoptimizeIf(overflow, instr->environment());
+    DeoptimizeIf(overflow, instr, "overflow");
   }
 }
 
@@ -4573,10 +5111,10 @@
 void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
   LOperand* input = instr->value();
   Register result = ToRegister(input);
-  ASSERT(input->IsRegister() && input->Equals(instr->result()));
+  DCHECK(input->IsRegister() && input->Equals(instr->result()));
   if (instr->needs_check()) {
     __ test(result, Immediate(kSmiTagMask));
-    DeoptimizeIf(not_zero, instr->environment());
+    DeoptimizeIf(not_zero, instr, "not a Smi");
   } else {
     __ AssertSmi(result);
   }
@@ -4584,32 +5122,32 @@
 }
 
 
-void LCodeGen::EmitNumberUntagDNoSSE2(Register input_reg,
-                                      Register temp_reg,
-                                      X87Register res_reg,
-                                      bool can_convert_undefined_to_nan,
-                                      bool deoptimize_on_minus_zero,
-                                      LEnvironment* env,
+void LCodeGen::EmitNumberUntagDNoSSE2(LNumberUntagD* instr, Register input_reg,
+                                      Register temp_reg, X87Register res_reg,
                                       NumberUntagDMode mode) {
+  bool can_convert_undefined_to_nan =
+      instr->hydrogen()->can_convert_undefined_to_nan();
+  bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
+
   Label load_smi, done;
 
   X87PrepareToWrite(res_reg);
   if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
     // Smi check.
-    __ JumpIfSmi(input_reg, &load_smi, Label::kNear);
+    __ JumpIfSmi(input_reg, &load_smi);
 
     // Heap number map check.
     __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
            factory()->heap_number_map());
     if (!can_convert_undefined_to_nan) {
-      DeoptimizeIf(not_equal, env);
+      DeoptimizeIf(not_equal, instr, "not a heap number");
     } else {
       Label heap_number, convert;
-      __ j(equal, &heap_number, Label::kNear);
+      __ j(equal, &heap_number);
 
       // Convert undefined (or hole) to NaN.
       __ cmp(input_reg, factory()->undefined_value());
-      DeoptimizeIf(not_equal, env);
+      DeoptimizeIf(not_equal, instr, "not a heap number/undefined");
 
       __ bind(&convert);
       ExternalReference nan =
@@ -4634,11 +5172,11 @@
 
       // Pop FPU stack before deoptimizing.
       __ fstp(0);
-      DeoptimizeIf(not_zero, env);
+      DeoptimizeIf(not_zero, instr, "minus zero");
     }
     __ jmp(&done, Label::kNear);
   } else {
-    ASSERT(mode == NUMBER_CANDIDATE_IS_SMI);
+    DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
   }
 
   __ bind(&load_smi);
@@ -4687,39 +5225,81 @@
 
     __ bind(&check_false);
     __ cmp(input_reg, factory()->false_value());
-    __ RecordComment("Deferred TaggedToI: cannot truncate");
-    DeoptimizeIf(not_equal, instr->environment());
+    DeoptimizeIf(not_equal, instr, "not a heap number/undefined/true/false");
     __ Move(input_reg, Immediate(0));
   } else {
-    Label bailout;
-    __ TaggedToI(input_reg, input_reg,
-                 instr->hydrogen()->GetMinusZeroMode(), &bailout);
-    __ jmp(done);
-    __ bind(&bailout);
-    DeoptimizeIf(no_condition, instr->environment());
+    // TODO(olivf) Converting a number on the fpu is actually quite slow. We
+    // should first try a fast conversion and then bailout to this slow case.
+    __ cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
+           isolate()->factory()->heap_number_map());
+    DeoptimizeIf(not_equal, instr, "not a heap number");
+
+    __ sub(esp, Immediate(kPointerSize));
+    __ fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
+
+    if (instr->hydrogen()->GetMinusZeroMode() == FAIL_ON_MINUS_ZERO) {
+      Label no_precision_lost, not_nan, zero_check;
+      __ fld(0);
+
+      __ fist_s(MemOperand(esp, 0));
+      __ fild_s(MemOperand(esp, 0));
+      __ FCmp();
+      __ pop(input_reg);
+
+      __ j(equal, &no_precision_lost, Label::kNear);
+      __ fstp(0);
+      DeoptimizeIf(no_condition, instr, "lost precision");
+      __ bind(&no_precision_lost);
+
+      __ j(parity_odd, &not_nan);
+      __ fstp(0);
+      DeoptimizeIf(no_condition, instr, "NaN");
+      __ bind(&not_nan);
+
+      __ test(input_reg, Operand(input_reg));
+      __ j(zero, &zero_check, Label::kNear);
+      __ fstp(0);
+      __ jmp(done);
+
+      __ bind(&zero_check);
+      // To check for minus zero, we load the value again as float, and check
+      // if that is still 0.
+      __ sub(esp, Immediate(kPointerSize));
+      __ fstp_s(Operand(esp, 0));
+      __ pop(input_reg);
+      __ test(input_reg, Operand(input_reg));
+      DeoptimizeIf(not_zero, instr, "minus zero");
+    } else {
+      __ fist_s(MemOperand(esp, 0));
+      __ fild_s(MemOperand(esp, 0));
+      __ FCmp();
+      __ pop(input_reg);
+      DeoptimizeIf(not_equal, instr, "lost precision");
+      DeoptimizeIf(parity_even, instr, "NaN");
+    }
   }
 }
 
 
 void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
-  class DeferredTaggedToI V8_FINAL : public LDeferredCode {
+  class DeferredTaggedToI FINAL : public LDeferredCode {
    public:
     DeferredTaggedToI(LCodeGen* codegen,
                       LTaggedToI* instr,
                       const X87Stack& x87_stack)
         : LDeferredCode(codegen, x87_stack), instr_(instr) { }
-    virtual void Generate() V8_OVERRIDE {
+    virtual void Generate() OVERRIDE {
       codegen()->DoDeferredTaggedToI(instr_, done());
     }
-    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
    private:
     LTaggedToI* instr_;
   };
 
   LOperand* input = instr->value();
-  ASSERT(input->IsRegister());
+  DCHECK(input->IsRegister());
   Register input_reg = ToRegister(input);
-  ASSERT(input_reg.is(ToRegister(instr->result())));
+  DCHECK(input_reg.is(ToRegister(instr->result())));
 
   if (instr->hydrogen()->value()->representation().IsSmi()) {
     __ SmiUntag(input_reg);
@@ -4740,36 +5320,29 @@
 
 void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
   LOperand* input = instr->value();
-  ASSERT(input->IsRegister());
+  DCHECK(input->IsRegister());
   LOperand* temp = instr->temp();
-  ASSERT(temp->IsRegister());
+  DCHECK(temp->IsRegister());
   LOperand* result = instr->result();
-  ASSERT(result->IsDoubleRegister());
+  DCHECK(result->IsDoubleRegister());
 
   Register input_reg = ToRegister(input);
-  bool deoptimize_on_minus_zero =
-      instr->hydrogen()->deoptimize_on_minus_zero();
   Register temp_reg = ToRegister(temp);
 
   HValue* value = instr->hydrogen()->value();
   NumberUntagDMode mode = value->representation().IsSmi()
       ? NUMBER_CANDIDATE_IS_SMI : NUMBER_CANDIDATE_IS_ANY_TAGGED;
 
-  EmitNumberUntagDNoSSE2(input_reg,
-                         temp_reg,
-                         ToX87Register(result),
-                         instr->hydrogen()->can_convert_undefined_to_nan(),
-                         deoptimize_on_minus_zero,
-                         instr->environment(),
+  EmitNumberUntagDNoSSE2(instr, input_reg, temp_reg, ToX87Register(result),
                          mode);
 }
 
 
 void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
   LOperand* input = instr->value();
-  ASSERT(input->IsDoubleRegister());
+  DCHECK(input->IsDoubleRegister());
   LOperand* result = instr->result();
-  ASSERT(result->IsRegister());
+  DCHECK(result->IsRegister());
   Register result_reg = ToRegister(result);
 
   if (instr->truncating()) {
@@ -4777,14 +5350,19 @@
     X87Fxch(input_reg);
     __ TruncateX87TOSToI(result_reg);
   } else {
-    Label bailout, done;
+    Label lost_precision, is_nan, minus_zero, done;
     X87Register input_reg = ToX87Register(input);
     X87Fxch(input_reg);
+    Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
     __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(),
-                 &bailout, Label::kNear);
-    __ jmp(&done, Label::kNear);
-    __ bind(&bailout);
-    DeoptimizeIf(no_condition, instr->environment());
+                 &lost_precision, &is_nan, &minus_zero, dist);
+    __ jmp(&done);
+    __ bind(&lost_precision);
+    DeoptimizeIf(no_condition, instr, "lost precision");
+    __ bind(&is_nan);
+    DeoptimizeIf(no_condition, instr, "NaN");
+    __ bind(&minus_zero);
+    DeoptimizeIf(no_condition, instr, "minus zero");
     __ bind(&done);
   }
 }
@@ -4792,30 +5370,34 @@
 
 void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
   LOperand* input = instr->value();
-  ASSERT(input->IsDoubleRegister());
+  DCHECK(input->IsDoubleRegister());
   LOperand* result = instr->result();
-  ASSERT(result->IsRegister());
+  DCHECK(result->IsRegister());
   Register result_reg = ToRegister(result);
 
-  Label bailout, done;
+  Label lost_precision, is_nan, minus_zero, done;
   X87Register input_reg = ToX87Register(input);
   X87Fxch(input_reg);
+  Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
   __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(),
-      &bailout, Label::kNear);
-  __ jmp(&done, Label::kNear);
-  __ bind(&bailout);
-  DeoptimizeIf(no_condition, instr->environment());
+               &lost_precision, &is_nan, &minus_zero, dist);
+  __ jmp(&done);
+  __ bind(&lost_precision);
+  DeoptimizeIf(no_condition, instr, "lost precision");
+  __ bind(&is_nan);
+  DeoptimizeIf(no_condition, instr, "NaN");
+  __ bind(&minus_zero);
+  DeoptimizeIf(no_condition, instr, "minus zero");
   __ bind(&done);
-
   __ SmiTag(result_reg);
-  DeoptimizeIf(overflow, instr->environment());
+  DeoptimizeIf(overflow, instr, "overflow");
 }
 
 
 void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
   LOperand* input = instr->value();
   __ test(ToOperand(input), Immediate(kSmiTagMask));
-  DeoptimizeIf(not_zero, instr->environment());
+  DeoptimizeIf(not_zero, instr, "not a Smi");
 }
 
 
@@ -4823,7 +5405,7 @@
   if (!instr->hydrogen()->value()->type().IsHeapObject()) {
     LOperand* input = instr->value();
     __ test(ToOperand(input), Immediate(kSmiTagMask));
-    DeoptimizeIf(zero, instr->environment());
+    DeoptimizeIf(zero, instr, "Smi");
   }
 }
 
@@ -4844,14 +5426,14 @@
 
     // If there is only one type in the interval check for equality.
     if (first == last) {
-      DeoptimizeIf(not_equal, instr->environment());
+      DeoptimizeIf(not_equal, instr, "wrong instance type");
     } else {
-      DeoptimizeIf(below, instr->environment());
+      DeoptimizeIf(below, instr, "wrong instance type");
       // Omit check for the last type.
       if (last != LAST_TYPE) {
         __ cmpb(FieldOperand(temp, Map::kInstanceTypeOffset),
                 static_cast<int8_t>(last));
-        DeoptimizeIf(above, instr->environment());
+        DeoptimizeIf(above, instr, "wrong instance type");
       }
     }
   } else {
@@ -4859,15 +5441,15 @@
     uint8_t tag;
     instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
 
-    if (IsPowerOf2(mask)) {
-      ASSERT(tag == 0 || IsPowerOf2(tag));
+    if (base::bits::IsPowerOfTwo32(mask)) {
+      DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
       __ test_b(FieldOperand(temp, Map::kInstanceTypeOffset), mask);
-      DeoptimizeIf(tag == 0 ? not_zero : zero, instr->environment());
+      DeoptimizeIf(tag == 0 ? not_zero : zero, instr, "wrong instance type");
     } else {
       __ movzx_b(temp, FieldOperand(temp, Map::kInstanceTypeOffset));
       __ and_(temp, mask);
       __ cmp(temp, tag);
-      DeoptimizeIf(not_equal, instr->environment());
+      DeoptimizeIf(not_equal, instr, "wrong instance type");
     }
   }
 }
@@ -4883,7 +5465,7 @@
     Operand operand = ToOperand(instr->value());
     __ cmp(operand, object);
   }
-  DeoptimizeIf(not_equal, instr->environment());
+  DeoptimizeIf(not_equal, instr, "value mismatch");
 }
 
 
@@ -4892,18 +5474,18 @@
     PushSafepointRegistersScope scope(this);
     __ push(object);
     __ xor_(esi, esi);
-    __ CallRuntime(Runtime::kTryMigrateInstance);
+    __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
     RecordSafepointWithRegisters(
         instr->pointer_map(), 1, Safepoint::kNoLazyDeopt);
 
     __ test(eax, Immediate(kSmiTagMask));
   }
-  DeoptimizeIf(zero, instr->environment());
+  DeoptimizeIf(zero, instr, "instance migration failed");
 }
 
 
 void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
-  class DeferredCheckMaps V8_FINAL : public LDeferredCode {
+  class DeferredCheckMaps FINAL : public LDeferredCode {
    public:
     DeferredCheckMaps(LCodeGen* codegen,
                       LCheckMaps* instr,
@@ -4912,11 +5494,11 @@
         : LDeferredCode(codegen, x87_stack), instr_(instr), object_(object) {
       SetExit(check_maps());
     }
-    virtual void Generate() V8_OVERRIDE {
+    virtual void Generate() OVERRIDE {
       codegen()->DoDeferredInstanceMigration(instr_, object_);
     }
     Label* check_maps() { return &check_maps_; }
-    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
    private:
     LCheckMaps* instr_;
     Label check_maps_;
@@ -4932,7 +5514,7 @@
   }
 
   LOperand* input = instr->value();
-  ASSERT(input->IsRegister());
+  DCHECK(input->IsRegister());
   Register reg = ToRegister(input);
 
   DeferredCheckMaps* deferred = NULL;
@@ -4954,7 +5536,7 @@
   if (instr->hydrogen()->HasMigrationTarget()) {
     __ j(not_equal, deferred->entry());
   } else {
-    DeoptimizeIf(not_equal, instr->environment());
+    DeoptimizeIf(not_equal, instr, "wrong map");
   }
 
   __ bind(&success);
@@ -4962,12 +5544,15 @@
 
 
 void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
-  UNREACHABLE();
+  X87Register value_reg = ToX87Register(instr->unclamped());
+  Register result_reg = ToRegister(instr->result());
+  X87Fxch(value_reg);
+  __ ClampTOSToUint8(result_reg);
 }
 
 
 void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
-  ASSERT(instr->unclamped()->Equals(instr->result()));
+  DCHECK(instr->unclamped()->Equals(instr->result()));
   Register value_reg = ToRegister(instr->result());
   __ ClampUint8(value_reg);
 }
@@ -4992,7 +5577,7 @@
   // Check for undefined. Undefined is converted to zero for clamping
   // conversions.
   __ cmp(input_reg, factory()->undefined_value());
-  DeoptimizeIf(not_equal, instr->environment());
+  DeoptimizeIf(not_equal, instr, "not a heap number/undefined");
   __ jmp(&zero_result, Label::kNear);
 
   // Heap number
@@ -5096,26 +5681,46 @@
 
 
 void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
-  UNREACHABLE();
+  X87Register value_reg = ToX87Register(instr->value());
+  Register result_reg = ToRegister(instr->result());
+  X87Fxch(value_reg);
+  __ sub(esp, Immediate(kDoubleSize));
+  __ fst_d(Operand(esp, 0));
+  if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
+    __ mov(result_reg, Operand(esp, kPointerSize));
+  } else {
+    __ mov(result_reg, Operand(esp, 0));
+  }
+  __ add(esp, Immediate(kDoubleSize));
 }
 
 
 void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
-  UNREACHABLE();
+  Register hi_reg = ToRegister(instr->hi());
+  Register lo_reg = ToRegister(instr->lo());
+  X87Register result_reg = ToX87Register(instr->result());
+  // Follow below pattern to write a x87 fp register.
+  X87PrepareToWrite(result_reg);
+  __ sub(esp, Immediate(kDoubleSize));
+  __ mov(Operand(esp, 0), lo_reg);
+  __ mov(Operand(esp, kPointerSize), hi_reg);
+  __ fld_d(Operand(esp, 0));
+  __ add(esp, Immediate(kDoubleSize));
+  X87CommitWrite(result_reg);
 }
 
 
 void LCodeGen::DoAllocate(LAllocate* instr) {
-  class DeferredAllocate V8_FINAL : public LDeferredCode {
+  class DeferredAllocate FINAL : public LDeferredCode {
    public:
     DeferredAllocate(LCodeGen* codegen,
                      LAllocate* instr,
                      const X87Stack& x87_stack)
         : LDeferredCode(codegen, x87_stack), instr_(instr) { }
-    virtual void Generate() V8_OVERRIDE {
+    virtual void Generate() OVERRIDE {
       codegen()->DoDeferredAllocate(instr_);
     }
-    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
    private:
     LAllocate* instr_;
   };
@@ -5132,11 +5737,11 @@
     flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
   }
   if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
-    ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
-    ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+    DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
+    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
     flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
   } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
-    ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
     flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
   }
 
@@ -5184,7 +5789,7 @@
   PushSafepointRegistersScope scope(this);
   if (instr->size()->IsRegister()) {
     Register size = ToRegister(instr->size());
-    ASSERT(!size.is(result));
+    DCHECK(!size.is(result));
     __ SmiTag(ToRegister(instr->size()));
     __ push(size);
   } else {
@@ -5201,11 +5806,11 @@
   int flags = AllocateDoubleAlignFlag::encode(
       instr->hydrogen()->MustAllocateDoubleAligned());
   if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
-    ASSERT(!instr->hydrogen()->IsOldDataSpaceAllocation());
-    ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+    DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
+    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
     flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
   } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
-    ASSERT(!instr->hydrogen()->IsNewSpaceAllocation());
+    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
     flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
   } else {
     flags = AllocateTargetSpace::update(flags, NEW_SPACE);
@@ -5213,20 +5818,20 @@
   __ push(Immediate(Smi::FromInt(flags)));
 
   CallRuntimeFromDeferred(
-      Runtime::kHiddenAllocateInTargetSpace, 2, instr, instr->context());
+      Runtime::kAllocateInTargetSpace, 2, instr, instr->context());
   __ StoreToSafepointRegisterSlot(result, eax);
 }
 
 
 void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
-  ASSERT(ToRegister(instr->value()).is(eax));
+  DCHECK(ToRegister(instr->value()).is(eax));
   __ push(eax);
   CallRuntime(Runtime::kToFastProperties, 1, instr);
 }
 
 
 void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
-  ASSERT(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->context()).is(esi));
   Label materialized;
   // Registers will be used as follows:
   // ecx = literals array.
@@ -5246,7 +5851,7 @@
   __ push(Immediate(Smi::FromInt(instr->hydrogen()->literal_index())));
   __ push(Immediate(instr->hydrogen()->pattern()));
   __ push(Immediate(instr->hydrogen()->flags()));
-  CallRuntime(Runtime::kHiddenMaterializeRegExpLiteral, 4, instr);
+  CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
   __ mov(ebx, eax);
 
   __ bind(&materialized);
@@ -5258,7 +5863,7 @@
   __ bind(&runtime_allocate);
   __ push(ebx);
   __ push(Immediate(Smi::FromInt(size)));
-  CallRuntime(Runtime::kHiddenAllocateInNewSpace, 1, instr);
+  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
   __ pop(ebx);
 
   __ bind(&allocated);
@@ -5278,14 +5883,13 @@
 
 
 void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
-  ASSERT(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->context()).is(esi));
   // Use the fast case closure allocation code that allocates in new
   // space for nested functions that don't need literals cloning.
   bool pretenure = instr->hydrogen()->pretenure();
   if (!pretenure && instr->hydrogen()->has_no_literals()) {
-    FastNewClosureStub stub(isolate(),
-                            instr->hydrogen()->strict_mode(),
-                            instr->hydrogen()->is_generator());
+    FastNewClosureStub stub(isolate(), instr->hydrogen()->strict_mode(),
+                            instr->hydrogen()->kind());
     __ mov(ebx, Immediate(instr->hydrogen()->shared_info()));
     CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
   } else {
@@ -5293,13 +5897,13 @@
     __ push(Immediate(instr->hydrogen()->shared_info()));
     __ push(Immediate(pretenure ? factory()->true_value()
                                 : factory()->false_value()));
-    CallRuntime(Runtime::kHiddenNewClosure, 3, instr);
+    CallRuntime(Runtime::kNewClosure, 3, instr);
   }
 }
 
 
 void LCodeGen::DoTypeof(LTypeof* instr) {
-  ASSERT(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->context()).is(esi));
   LOperand* input = instr->value();
   EmitPushTaggedOperand(input);
   CallRuntime(Runtime::kTypeof, 1, instr);
@@ -5353,11 +5957,6 @@
     __ cmp(input, factory()->false_value());
     final_branch_condition = equal;
 
-  } else if (FLAG_harmony_typeof &&
-             String::Equals(type_name, factory()->null_string())) {
-    __ cmp(input, factory()->null_value());
-    final_branch_condition = equal;
-
   } else if (String::Equals(type_name, factory()->undefined_string())) {
     __ cmp(input, factory()->undefined_value());
     __ j(equal, true_label, true_distance);
@@ -5378,10 +5977,8 @@
 
   } else if (String::Equals(type_name, factory()->object_string())) {
     __ JumpIfSmi(input, false_label, false_distance);
-    if (!FLAG_harmony_typeof) {
-      __ cmp(input, factory()->null_value());
-      __ j(equal, true_label, true_distance);
-    }
+    __ cmp(input, factory()->null_value());
+    __ j(equal, true_label, true_distance);
     __ CmpObjectType(input, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE, input);
     __ j(below, false_label, false_distance);
     __ CmpInstanceType(input, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
@@ -5440,7 +6037,7 @@
 
 void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
   last_lazy_deopt_pc_ = masm()->pc_offset();
-  ASSERT(instr->HasEnvironment());
+  DCHECK(instr->HasEnvironment());
   LEnvironment* env = instr->environment();
   RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
@@ -5456,8 +6053,7 @@
   if (info()->IsStub() && type == Deoptimizer::EAGER) {
     type = Deoptimizer::LAZY;
   }
-  Comment(";;; deoptimize: %s", instr->hydrogen()->reason());
-  DeoptimizeIf(no_condition, instr->environment(), type);
+  DeoptimizeIf(no_condition, instr, instr->hydrogen()->reason(), type);
 }
 
 
@@ -5474,31 +6070,31 @@
 void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
   PushSafepointRegistersScope scope(this);
   __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-  __ CallRuntime(Runtime::kHiddenStackGuard);
+  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
   RecordSafepointWithLazyDeopt(
       instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
-  ASSERT(instr->HasEnvironment());
+  DCHECK(instr->HasEnvironment());
   LEnvironment* env = instr->environment();
   safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
 }
 
 
 void LCodeGen::DoStackCheck(LStackCheck* instr) {
-  class DeferredStackCheck V8_FINAL : public LDeferredCode {
+  class DeferredStackCheck FINAL : public LDeferredCode {
    public:
     DeferredStackCheck(LCodeGen* codegen,
                        LStackCheck* instr,
                        const X87Stack& x87_stack)
         : LDeferredCode(codegen, x87_stack), instr_(instr) { }
-    virtual void Generate() V8_OVERRIDE {
+    virtual void Generate() OVERRIDE {
       codegen()->DoDeferredStackCheck(instr_);
     }
-    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
    private:
     LStackCheck* instr_;
   };
 
-  ASSERT(instr->HasEnvironment());
+  DCHECK(instr->HasEnvironment());
   LEnvironment* env = instr->environment();
   // There is no LLazyBailout instruction for stack-checks. We have to
   // prepare for lazy deoptimization explicitly here.
@@ -5510,14 +6106,14 @@
     __ cmp(esp, Operand::StaticVariable(stack_limit));
     __ j(above_equal, &done, Label::kNear);
 
-    ASSERT(instr->context()->IsRegister());
-    ASSERT(ToRegister(instr->context()).is(esi));
+    DCHECK(instr->context()->IsRegister());
+    DCHECK(ToRegister(instr->context()).is(esi));
     CallCode(isolate()->builtins()->StackCheck(),
              RelocInfo::CODE_TARGET,
              instr);
     __ bind(&done);
   } else {
-    ASSERT(instr->hydrogen()->is_backwards_branch());
+    DCHECK(instr->hydrogen()->is_backwards_branch());
     // Perform stack overflow check if this goto needs it before jumping.
     DeferredStackCheck* deferred_stack_check =
         new(zone()) DeferredStackCheck(this, instr, x87_stack_);
@@ -5544,7 +6140,7 @@
 
   // If the environment were already registered, we would have no way of
   // backpatching it with the spill slot operands.
-  ASSERT(!environment->HasBeenRegistered());
+  DCHECK(!environment->HasBeenRegistered());
   RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
 
   GenerateOsrPrologue();
@@ -5552,19 +6148,19 @@
 
 
 void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
-  ASSERT(ToRegister(instr->context()).is(esi));
+  DCHECK(ToRegister(instr->context()).is(esi));
   __ cmp(eax, isolate()->factory()->undefined_value());
-  DeoptimizeIf(equal, instr->environment());
+  DeoptimizeIf(equal, instr, "undefined");
 
   __ cmp(eax, isolate()->factory()->null_value());
-  DeoptimizeIf(equal, instr->environment());
+  DeoptimizeIf(equal, instr, "null");
 
   __ test(eax, Immediate(kSmiTagMask));
-  DeoptimizeIf(zero, instr->environment());
+  DeoptimizeIf(zero, instr, "Smi");
 
   STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
   __ CmpObjectType(eax, LAST_JS_PROXY_TYPE, ecx);
-  DeoptimizeIf(below_equal, instr->environment());
+  DeoptimizeIf(below_equal, instr, "wrong instance type");
 
   Label use_cache, call_runtime;
   __ CheckEnumCache(&call_runtime);
@@ -5579,7 +6175,7 @@
 
   __ cmp(FieldOperand(eax, HeapObject::kMapOffset),
          isolate()->factory()->meta_map());
-  DeoptimizeIf(not_equal, instr->environment());
+  DeoptimizeIf(not_equal, instr, "wrong map");
   __ bind(&use_cache);
 }
 
@@ -5602,7 +6198,7 @@
          FieldOperand(result, FixedArray::SizeFor(instr->idx())));
   __ bind(&done);
   __ test(result, result);
-  DeoptimizeIf(equal, instr->environment());
+  DeoptimizeIf(equal, instr, "no cache");
 }
 
 
@@ -5610,7 +6206,7 @@
   Register object = ToRegister(instr->value());
   __ cmp(ToRegister(instr->map()),
          FieldOperand(object, HeapObject::kMapOffset));
-  DeoptimizeIf(not_equal, instr->environment());
+  DeoptimizeIf(not_equal, instr, "wrong map");
 }
 
 
@@ -5621,7 +6217,7 @@
   __ push(object);
   __ push(index);
   __ xor_(esi, esi);
-  __ CallRuntime(Runtime::kLoadMutableDouble);
+  __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
   RecordSafepointWithRegisters(
       instr->pointer_map(), 2, Safepoint::kNoLazyDeopt);
   __ StoreToSafepointRegisterSlot(object, eax);
@@ -5629,7 +6225,7 @@
 
 
 void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
-  class DeferredLoadMutableDouble V8_FINAL : public LDeferredCode {
+  class DeferredLoadMutableDouble FINAL : public LDeferredCode {
    public:
     DeferredLoadMutableDouble(LCodeGen* codegen,
                               LLoadFieldByIndex* instr,
@@ -5641,10 +6237,10 @@
           object_(object),
           index_(index) {
     }
-    virtual void Generate() V8_OVERRIDE {
+    virtual void Generate() OVERRIDE {
       codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_);
     }
-    virtual LInstruction* instr() V8_OVERRIDE { return instr_; }
+    virtual LInstruction* instr() OVERRIDE { return instr_; }
    private:
     LLoadFieldByIndex* instr_;
     Register object_;
@@ -5695,7 +6291,7 @@
   Handle<ScopeInfo> scope_info = instr->scope_info();
   __ Push(scope_info);
   __ push(ToRegister(instr->function()));
-  CallRuntime(Runtime::kHiddenPushBlockContext, 2, instr);
+  CallRuntime(Runtime::kPushBlockContext, 2, instr);
   RecordSafepoint(Safepoint::kNoLazyDeopt);
 }
 
diff --git a/src/x87/lithium-codegen-x87.h b/src/x87/lithium-codegen-x87.h
index a84b49c..2f4a8d3 100644
--- a/src/x87/lithium-codegen-x87.h
+++ b/src/x87/lithium-codegen-x87.h
@@ -5,15 +5,16 @@
 #ifndef V8_X87_LITHIUM_CODEGEN_X87_H_
 #define V8_X87_LITHIUM_CODEGEN_X87_H_
 
+#include <map>
 #include "src/x87/lithium-x87.h"
 
-#include "src/checks.h"
+#include "src/base/logging.h"
 #include "src/deoptimizer.h"
-#include "src/x87/lithium-gap-resolver-x87.h"
 #include "src/lithium-codegen.h"
 #include "src/safepoint-table.h"
 #include "src/scopes.h"
 #include "src/utils.h"
+#include "src/x87/lithium-gap-resolver-x87.h"
 
 namespace v8 {
 namespace internal {
@@ -84,6 +85,8 @@
       X87OperandType operand = kX87DoubleOperand);
   void X87Mov(Operand src, X87Register reg,
       X87OperandType operand = kX87DoubleOperand);
+  void X87Mov(X87Register reg, X87Register src,
+              X87OperandType operand = kX87DoubleOperand);
 
   void X87PrepareBinaryOp(
       X87Register left, X87Register right, X87Register result);
@@ -174,8 +177,8 @@
 
   // Code generation passes.  Returns true if code generation should
   // continue.
-  void GenerateBodyInstructionPre(LInstruction* instr) V8_OVERRIDE;
-  void GenerateBodyInstructionPost(LInstruction* instr) V8_OVERRIDE;
+  void GenerateBodyInstructionPre(LInstruction* instr) OVERRIDE;
+  void GenerateBodyInstructionPost(LInstruction* instr) OVERRIDE;
   bool GeneratePrologue();
   bool GenerateDeferredCode();
   bool GenerateJumpTable();
@@ -198,9 +201,8 @@
                        LInstruction* instr,
                        SafepointMode safepoint_mode);
 
-  void CallRuntime(const Runtime::Function* fun,
-                   int argc,
-                   LInstruction* instr);
+  void CallRuntime(const Runtime::Function* fun, int argc, LInstruction* instr,
+                   SaveFPRegsMode save_doubles = kDontSaveFPRegs);
 
   void CallRuntime(Runtime::FunctionId id,
                    int argc,
@@ -234,10 +236,9 @@
 
   void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
                                             Safepoint::DeoptMode mode);
-  void DeoptimizeIf(Condition cc,
-                    LEnvironment* environment,
+  void DeoptimizeIf(Condition cc, LInstruction* instr, const char* detail,
                     Deoptimizer::BailoutType bailout_type);
-  void DeoptimizeIf(Condition cc, LEnvironment* environment);
+  void DeoptimizeIf(Condition cc, LInstruction* instr, const char* detail);
 
   bool DeoptEveryNTimes() {
     return FLAG_deopt_every_n_times != 0 && !info()->IsStub();
@@ -284,7 +285,7 @@
                                     int arguments,
                                     Safepoint::DeoptMode mode);
 
-  void RecordAndWritePosition(int position) V8_OVERRIDE;
+  void RecordAndWritePosition(int position) OVERRIDE;
 
   static Condition TokenToCondition(Token::Value op, bool is_unsigned);
   void EmitGoto(int block);
@@ -294,14 +295,9 @@
   void EmitBranch(InstrType instr, Condition cc);
   template<class InstrType>
   void EmitFalseBranch(InstrType instr, Condition cc);
-  void EmitNumberUntagDNoSSE2(
-      Register input,
-      Register temp,
-      X87Register res_reg,
-      bool allow_undefined_as_nan,
-      bool deoptimize_on_minus_zero,
-      LEnvironment* env,
-      NumberUntagDMode mode = NUMBER_CANDIDATE_IS_ANY_TAGGED);
+  void EmitNumberUntagDNoSSE2(LNumberUntagD* instr, Register input,
+                              Register temp, X87Register res_reg,
+                              NumberUntagDMode mode);
 
   // Emits optimized code for typeof x == "y".  Modifies input register.
   // Returns the condition on which a final split to
@@ -336,7 +332,7 @@
                     int* offset,
                     AllocationSiteMode mode);
 
-  void EnsureSpaceForLazyDeopt(int space_needed) V8_OVERRIDE;
+  void EnsureSpaceForLazyDeopt(int space_needed) OVERRIDE;
   void DoLoadKeyedExternalArray(LLoadKeyed* instr);
   void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
   void DoLoadKeyedFixedArray(LLoadKeyed* instr);
@@ -344,6 +340,9 @@
   void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
   void DoStoreKeyedFixedArray(LStoreKeyed* instr);
 
+  template <class T>
+  void EmitVectorLoadICRegisters(T* instr);
+
   void EmitReturn(LReturn* instr, bool dynamic_frame_alignment);
 
   // Emits code for pushing either a tagged constant, a (non-double)
@@ -378,7 +377,7 @@
   int osr_pc_offset_;
   bool frame_is_built_;
 
-  class X87Stack {
+  class X87Stack : public ZoneObject {
    public:
     explicit X87Stack(MacroAssembler* masm)
         : stack_depth_(0), is_mutable_(true), masm_(masm) { }
@@ -395,21 +394,30 @@
       }
       return true;
     }
+    X87Stack& operator=(const X87Stack& other) {
+      stack_depth_ = other.stack_depth_;
+      for (int i = 0; i < stack_depth_; i++) {
+        stack_[i] = other.stack_[i];
+      }
+      return *this;
+    }
     bool Contains(X87Register reg);
     void Fxch(X87Register reg, int other_slot = 0);
     void Free(X87Register reg);
     void PrepareToWrite(X87Register reg);
     void CommitWrite(X87Register reg);
     void FlushIfNecessary(LInstruction* instr, LCodeGen* cgen);
-    void LeavingBlock(int current_block_id, LGoto* goto_instr);
+    void LeavingBlock(int current_block_id, LGoto* goto_instr, LCodeGen* cgen);
     int depth() const { return stack_depth_; }
+    int GetLayout();
+    int st(X87Register reg) { return st2idx(ArrayIndex(reg)); }
     void pop() {
-      ASSERT(is_mutable_);
+      DCHECK(is_mutable_);
       stack_depth_--;
     }
     void push(X87Register reg) {
-      ASSERT(is_mutable_);
-      ASSERT(stack_depth_ < X87Register::kMaxNumAllocatableRegisters);
+      DCHECK(is_mutable_);
+      DCHECK(stack_depth_ < X87Register::kMaxNumAllocatableRegisters);
       stack_[stack_depth_] = reg;
       stack_depth_++;
     }
@@ -427,6 +435,9 @@
     MacroAssembler* masm_;
   };
   X87Stack x87_stack_;
+  // block_id -> X87Stack*;
+  typedef std::map<int, X87Stack*> X87StackMap;
+  X87StackMap x87_stack_map_;
 
   // Builder that keeps track of safepoints in the code. The table
   // itself is emitted at the end of the generated code.
@@ -437,18 +448,18 @@
 
   Safepoint::Kind expected_safepoint_kind_;
 
-  class PushSafepointRegistersScope V8_FINAL  BASE_EMBEDDED {
+  class PushSafepointRegistersScope FINAL  BASE_EMBEDDED {
    public:
     explicit PushSafepointRegistersScope(LCodeGen* codegen)
         : codegen_(codegen) {
-      ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
+      DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
       codegen_->masm_->PushSafepointRegisters();
       codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
-      ASSERT(codegen_->info()->is_calling());
+      DCHECK(codegen_->info()->is_calling());
     }
 
     ~PushSafepointRegistersScope() {
-      ASSERT(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
+      DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
       codegen_->masm_->PopSafepointRegisters();
       codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
     }
@@ -460,6 +471,7 @@
   friend class LDeferredCode;
   friend class LEnvironment;
   friend class SafepointGenerator;
+  friend class X87Stack;
   DISALLOW_COPY_AND_ASSIGN(LCodeGen);
 };
 
diff --git a/src/x87/lithium-gap-resolver-x87.cc b/src/x87/lithium-gap-resolver-x87.cc
index 9ebfa3a..6a64275 100644
--- a/src/x87/lithium-gap-resolver-x87.cc
+++ b/src/x87/lithium-gap-resolver-x87.cc
@@ -6,8 +6,8 @@
 
 #if V8_TARGET_ARCH_X87
 
-#include "src/x87/lithium-gap-resolver-x87.h"
 #include "src/x87/lithium-codegen-x87.h"
+#include "src/x87/lithium-gap-resolver-x87.h"
 
 namespace v8 {
 namespace internal {
@@ -21,7 +21,7 @@
 
 
 void LGapResolver::Resolve(LParallelMove* parallel_move) {
-  ASSERT(HasBeenReset());
+  DCHECK(HasBeenReset());
   // Build up a worklist of moves.
   BuildInitialMoveList(parallel_move);
 
@@ -38,13 +38,13 @@
   // Perform the moves with constant sources.
   for (int i = 0; i < moves_.length(); ++i) {
     if (!moves_[i].IsEliminated()) {
-      ASSERT(moves_[i].source()->IsConstantOperand());
+      DCHECK(moves_[i].source()->IsConstantOperand());
       EmitMove(i);
     }
   }
 
   Finish();
-  ASSERT(HasBeenReset());
+  DCHECK(HasBeenReset());
 }
 
 
@@ -70,12 +70,12 @@
   // which means that a call to PerformMove could change any source operand
   // in the move graph.
 
-  ASSERT(!moves_[index].IsPending());
-  ASSERT(!moves_[index].IsRedundant());
+  DCHECK(!moves_[index].IsPending());
+  DCHECK(!moves_[index].IsRedundant());
 
   // Clear this move's destination to indicate a pending move.  The actual
   // destination is saved on the side.
-  ASSERT(moves_[index].source() != NULL);  // Or else it will look eliminated.
+  DCHECK(moves_[index].source() != NULL);  // Or else it will look eliminated.
   LOperand* destination = moves_[index].destination();
   moves_[index].set_destination(NULL);
 
@@ -116,7 +116,7 @@
   for (int i = 0; i < moves_.length(); ++i) {
     LMoveOperands other_move = moves_[i];
     if (other_move.Blocks(destination)) {
-      ASSERT(other_move.IsPending());
+      DCHECK(other_move.IsPending());
       EmitSwap(index);
       return;
     }
@@ -142,13 +142,13 @@
   LOperand* source = moves_[index].source();
   if (source->IsRegister()) {
     --source_uses_[source->index()];
-    ASSERT(source_uses_[source->index()] >= 0);
+    DCHECK(source_uses_[source->index()] >= 0);
   }
 
   LOperand* destination = moves_[index].destination();
   if (destination->IsRegister()) {
     --destination_uses_[destination->index()];
-    ASSERT(destination_uses_[destination->index()] >= 0);
+    DCHECK(destination_uses_[destination->index()] >= 0);
   }
 
   moves_[index].Eliminate();
@@ -190,12 +190,12 @@
 
 
 void LGapResolver::Verify() {
-#ifdef ENABLE_SLOW_ASSERTS
+#ifdef ENABLE_SLOW_DCHECKS
   // No operand should be the destination for more than one move.
   for (int i = 0; i < moves_.length(); ++i) {
     LOperand* destination = moves_[i].destination();
     for (int j = i + 1; j < moves_.length(); ++j) {
-      SLOW_ASSERT(!destination->Equals(moves_[j].destination()));
+      SLOW_DCHECK(!destination->Equals(moves_[j].destination()));
     }
   }
 #endif
@@ -259,13 +259,13 @@
   // Dispatch on the source and destination operand kinds.  Not all
   // combinations are possible.
   if (source->IsRegister()) {
-    ASSERT(destination->IsRegister() || destination->IsStackSlot());
+    DCHECK(destination->IsRegister() || destination->IsStackSlot());
     Register src = cgen_->ToRegister(source);
     Operand dst = cgen_->ToOperand(destination);
     __ mov(dst, src);
 
   } else if (source->IsStackSlot()) {
-    ASSERT(destination->IsRegister() || destination->IsStackSlot());
+    DCHECK(destination->IsRegister() || destination->IsStackSlot());
     Operand src = cgen_->ToOperand(source);
     if (destination->IsRegister()) {
       Register dst = cgen_->ToRegister(destination);
@@ -292,7 +292,7 @@
       }
     } else if (destination->IsDoubleRegister()) {
       double v = cgen_->ToDouble(constant_source);
-      uint64_t int_val = BitCast<uint64_t, double>(v);
+      uint64_t int_val = bit_cast<uint64_t, double>(v);
       int32_t lower = static_cast<int32_t>(int_val);
       int32_t upper = static_cast<int32_t>(int_val >> kBitsPerInt);
       __ push(Immediate(upper));
@@ -301,7 +301,7 @@
       cgen_->X87Mov(dst, MemOperand(esp, 0));
       __ add(esp, Immediate(kDoubleSize));
     } else {
-      ASSERT(destination->IsStackSlot());
+      DCHECK(destination->IsStackSlot());
       Operand dst = cgen_->ToOperand(destination);
       Representation r = cgen_->IsSmi(constant_source)
           ? Representation::Smi() : Representation::Integer32();
@@ -317,10 +317,15 @@
   } else if (source->IsDoubleRegister()) {
     // load from the register onto the stack, store in destination, which must
     // be a double stack slot in the non-SSE2 case.
-    ASSERT(destination->IsDoubleStackSlot());
-    Operand dst = cgen_->ToOperand(destination);
-    X87Register src = cgen_->ToX87Register(source);
-    cgen_->X87Mov(dst, src);
+    if (destination->IsDoubleStackSlot()) {
+      Operand dst = cgen_->ToOperand(destination);
+      X87Register src = cgen_->ToX87Register(source);
+      cgen_->X87Mov(dst, src);
+    } else {
+      X87Register dst = cgen_->ToX87Register(destination);
+      X87Register src = cgen_->ToX87Register(source);
+      cgen_->X87Mov(dst, src);
+    }
   } else if (source->IsDoubleStackSlot()) {
     // load from the stack slot on top of the floating point stack, and then
     // store in destination. If destination is a double register, then it
diff --git a/src/x87/lithium-gap-resolver-x87.h b/src/x87/lithium-gap-resolver-x87.h
index 737660c..4d1496b 100644
--- a/src/x87/lithium-gap-resolver-x87.h
+++ b/src/x87/lithium-gap-resolver-x87.h
@@ -15,7 +15,7 @@
 class LCodeGen;
 class LGapResolver;
 
-class LGapResolver V8_FINAL BASE_EMBEDDED {
+class LGapResolver FINAL BASE_EMBEDDED {
  public:
   explicit LGapResolver(LCodeGen* owner);
 
diff --git a/src/x87/lithium-x87.cc b/src/x87/lithium-x87.cc
index 707783d..9304b89 100644
--- a/src/x87/lithium-x87.cc
+++ b/src/x87/lithium-x87.cc
@@ -6,10 +6,9 @@
 
 #if V8_TARGET_ARCH_X87
 
-#include "src/lithium-allocator-inl.h"
-#include "src/x87/lithium-x87.h"
-#include "src/x87/lithium-codegen-x87.h"
 #include "src/hydrogen-osr.h"
+#include "src/lithium-inl.h"
+#include "src/x87/lithium-codegen-x87.h"
 
 namespace v8 {
 namespace internal {
@@ -28,17 +27,17 @@
   // outputs because all registers are blocked by the calling convention.
   // Inputs operands must use a fixed register or use-at-start policy or
   // a non-register policy.
-  ASSERT(Output() == NULL ||
+  DCHECK(Output() == NULL ||
          LUnallocated::cast(Output())->HasFixedPolicy() ||
          !LUnallocated::cast(Output())->HasRegisterPolicy());
   for (UseIterator it(this); !it.Done(); it.Advance()) {
     LUnallocated* operand = LUnallocated::cast(it.Current());
-    ASSERT(operand->HasFixedPolicy() ||
+    DCHECK(operand->HasFixedPolicy() ||
            operand->IsUsedAtStart());
   }
   for (TempIterator it(this); !it.Done(); it.Advance()) {
     LUnallocated* operand = LUnallocated::cast(it.Current());
-    ASSERT(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
+    DCHECK(operand->HasFixedPolicy() ||!operand->HasRegisterPolicy());
   }
 }
 #endif
@@ -369,7 +368,7 @@
   if (kind == DOUBLE_REGISTERS) {
     return LDoubleStackSlot::Create(index, zone());
   } else {
-    ASSERT(kind == GENERAL_REGISTERS);
+    DCHECK(kind == GENERAL_REGISTERS);
     return LStackSlot::Create(index, zone());
   }
 }
@@ -377,8 +376,9 @@
 
 void LStoreNamedField::PrintDataTo(StringStream* stream) {
   object()->PrintTo(stream);
-  hydrogen()->access().PrintTo(stream);
-  stream->Add(" <- ");
+  OStringStream os;
+  os << hydrogen()->access() << " <- ";
+  stream->Add(os.c_str());
   value()->PrintTo(stream);
 }
 
@@ -415,7 +415,7 @@
   }
 
   if (value() == NULL) {
-    ASSERT(hydrogen()->IsConstantHoleStore() &&
+    DCHECK(hydrogen()->IsConstantHoleStore() &&
            hydrogen()->value()->representation().IsDouble());
     stream->Add("<the hole(nan)>");
   } else {
@@ -440,7 +440,7 @@
 
 
 LPlatformChunk* LChunkBuilder::Build() {
-  ASSERT(is_unused());
+  DCHECK(is_unused());
   chunk_ = new(zone()) LPlatformChunk(info(), graph());
   LPhase phase("L_Building chunk", chunk_);
   status_ = BUILDING;
@@ -448,7 +448,7 @@
   // Reserve the first spill slot for the state of dynamic alignment.
   if (info()->IsOptimizing()) {
     int alignment_state_index = chunk_->GetNextSpillIndex(GENERAL_REGISTERS);
-    ASSERT_EQ(alignment_state_index, 0);
+    DCHECK_EQ(alignment_state_index, 0);
     USE(alignment_state_index);
   }
 
@@ -472,18 +472,18 @@
 }
 
 
-void LChunkBuilder::Abort(BailoutReason reason) {
-  info()->set_bailout_reason(reason);
-  status_ = ABORTED;
-}
-
-
 LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
   return new(zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
                                   Register::ToAllocationIndex(reg));
 }
 
 
+LUnallocated* LChunkBuilder::ToUnallocated(X87Register reg) {
+  return new (zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
+                                   X87Register::ToAllocationIndex(reg));
+}
+
+
 LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
   return Use(value, ToUnallocated(fixed_register));
 }
@@ -616,6 +616,12 @@
 }
 
 
+LInstruction* LChunkBuilder::DefineFixed(LTemplateResultInstruction<1>* instr,
+                                         X87Register reg) {
+  return Define(instr, ToUnallocated(reg));
+}
+
+
 LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
   HEnvironment* hydrogen_env = current_block_->last_environment();
   int argument_index_accumulator = 0;
@@ -656,7 +662,7 @@
 
 
 LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
-  ASSERT(!instr->HasPointerMap());
+  DCHECK(!instr->HasPointerMap());
   instr->set_pointer_map(new(zone()) LPointerMap(zone()));
   return instr;
 }
@@ -677,7 +683,7 @@
 
 LOperand* LChunkBuilder::FixedTemp(Register reg) {
   LUnallocated* operand = ToUnallocated(reg);
-  ASSERT(operand->HasFixedPolicy());
+  DCHECK(operand->HasFixedPolicy());
   return operand;
 }
 
@@ -706,8 +712,8 @@
 LInstruction* LChunkBuilder::DoShift(Token::Value op,
                                      HBitwiseBinaryOperation* instr) {
   if (instr->representation().IsSmiOrInteger32()) {
-    ASSERT(instr->left()->representation().Equals(instr->representation()));
-    ASSERT(instr->right()->representation().Equals(instr->representation()));
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
     LOperand* left = UseRegisterAtStart(instr->left());
 
     HValue* right_value = instr->right();
@@ -748,9 +754,9 @@
 
 LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
                                            HArithmeticBinaryOperation* instr) {
-  ASSERT(instr->representation().IsDouble());
-  ASSERT(instr->left()->representation().IsDouble());
-  ASSERT(instr->right()->representation().IsDouble());
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->left()->representation().IsDouble());
+  DCHECK(instr->right()->representation().IsDouble());
   if (op == Token::MOD) {
     LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
     LOperand* right = UseRegisterAtStart(instr->BetterRightOperand());
@@ -769,8 +775,8 @@
                                            HBinaryOperation* instr) {
   HValue* left = instr->left();
   HValue* right = instr->right();
-  ASSERT(left->representation().IsTagged());
-  ASSERT(right->representation().IsTagged());
+  DCHECK(left->representation().IsTagged());
+  DCHECK(right->representation().IsTagged());
   LOperand* context = UseFixed(instr->context(), esi);
   LOperand* left_operand = UseFixed(left, edx);
   LOperand* right_operand = UseFixed(right, eax);
@@ -781,7 +787,7 @@
 
 
 void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
-  ASSERT(is_building());
+  DCHECK(is_building());
   current_block_ = block;
   next_block_ = next_block;
   if (block->IsStartBlock()) {
@@ -790,13 +796,13 @@
   } else if (block->predecessors()->length() == 1) {
     // We have a single predecessor => copy environment and outgoing
     // argument count from the predecessor.
-    ASSERT(block->phis()->length() == 0);
+    DCHECK(block->phis()->length() == 0);
     HBasicBlock* pred = block->predecessors()->at(0);
     HEnvironment* last_environment = pred->last_environment();
-    ASSERT(last_environment != NULL);
+    DCHECK(last_environment != NULL);
     // Only copy the environment, if it is later used again.
     if (pred->end()->SecondSuccessor() == NULL) {
-      ASSERT(pred->end()->FirstSuccessor() == block);
+      DCHECK(pred->end()->FirstSuccessor() == block);
     } else {
       if (pred->end()->FirstSuccessor()->block_id() > block->block_id() ||
           pred->end()->SecondSuccessor()->block_id() > block->block_id()) {
@@ -804,7 +810,7 @@
       }
     }
     block->UpdateEnvironment(last_environment);
-    ASSERT(pred->argument_count() >= 0);
+    DCHECK(pred->argument_count() >= 0);
     argument_count_ = pred->argument_count();
   } else {
     // We are at a state join => process phis.
@@ -856,7 +862,7 @@
     if (current->OperandCount() == 0) {
       instr = DefineAsRegister(new(zone()) LDummy());
     } else {
-      ASSERT(!current->OperandAt(0)->IsControlInstruction());
+      DCHECK(!current->OperandAt(0)->IsControlInstruction());
       instr = DefineAsRegister(new(zone())
           LDummyUse(UseAny(current->OperandAt(0))));
     }
@@ -872,6 +878,14 @@
     if (current->IsControlInstruction() &&
         HControlInstruction::cast(current)->KnownSuccessorBlock(&successor) &&
         successor != NULL) {
+      // Always insert a fpu register barrier here when branch is optimized to
+      // be a direct goto.
+      // TODO(weiliang): require a better solution.
+      if (!current->IsGoto()) {
+        LClobberDoubles* clobber = new (zone()) LClobberDoubles(isolate());
+        clobber->set_hydrogen_value(current);
+        chunk_->AddInstruction(clobber, current_block_);
+      }
       instr = new(zone()) LGoto(successor);
     } else {
       instr = current->CompileToLithium(this);
@@ -879,7 +893,7 @@
   }
 
   argument_count_ += current->argument_delta();
-  ASSERT(argument_count_ >= 0);
+  DCHECK(argument_count_ >= 0);
 
   if (instr != NULL) {
     AddInstruction(instr, current);
@@ -921,7 +935,7 @@
       LUnallocated* operand = LUnallocated::cast(it.Current());
       if (operand->HasFixedPolicy()) ++fixed;
     }
-    ASSERT(fixed == 0 || used_at_start == 0);
+    DCHECK(fixed == 0 || used_at_start == 0);
   }
 #endif
 
@@ -931,7 +945,8 @@
   if (FLAG_stress_environments && !instr->HasEnvironment()) {
     instr = AssignEnvironment(instr);
   }
-  if (instr->IsGoto() && LGoto::cast(instr)->jumps_to_join()) {
+  if (instr->IsGoto() &&
+      (LGoto::cast(instr)->jumps_to_join() || next_block_->is_osr_entry())) {
     // TODO(olivf) Since phis of spilled values are joined as registers
     // (not in the stack slot), we need to allow the goto gaps to keep one
     // x87 register alive. To ensure all other values are still spilled, we
@@ -979,7 +994,9 @@
   bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
       type.IsJSArray() || type.IsHeapNumber() || type.IsString();
   LOperand* temp = !easy_case && expected.NeedsMap() ? TempRegister() : NULL;
-  LInstruction* branch = new(zone()) LBranch(UseRegister(value), temp);
+  LInstruction* branch =
+      temp != NULL ? new (zone()) LBranch(UseRegister(value), temp)
+                   : new (zone()) LBranch(UseRegisterAtStart(value), temp);
   if (!easy_case &&
       ((!expected.Contains(ToBooleanStub::SMI) && expected.NeedsMap()) ||
        !expected.IsGeneric())) {
@@ -995,7 +1012,7 @@
 
 
 LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
-  ASSERT(instr->value()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
   LOperand* value = UseRegisterAtStart(instr->value());
   return new(zone()) LCmpMapAndBranch(value);
 }
@@ -1119,14 +1136,13 @@
 
 LInstruction* LChunkBuilder::DoCallWithDescriptor(
     HCallWithDescriptor* instr) {
-  const CallInterfaceDescriptor* descriptor = instr->descriptor();
-
+  CallInterfaceDescriptor descriptor = instr->descriptor();
   LOperand* target = UseRegisterOrConstantAtStart(instr->target());
   ZoneList<LOperand*> ops(instr->OperandCount(), zone());
   ops.Add(target, zone());
   for (int i = 1; i < instr->OperandCount(); i++) {
-    LOperand* op = UseFixed(instr->OperandAt(i),
-        descriptor->GetParameterRegister(i - 1));
+    LOperand* op =
+        UseFixed(instr->OperandAt(i), descriptor.GetParameterRegister(i - 1));
     ops.Add(op, zone());
   }
 
@@ -1136,6 +1152,19 @@
 }
 
 
+LInstruction* LChunkBuilder::DoTailCallThroughMegamorphicCache(
+    HTailCallThroughMegamorphicCache* instr) {
+  LOperand* context = UseFixed(instr->context(), esi);
+  LOperand* receiver_register =
+      UseFixed(instr->receiver(), LoadDescriptor::ReceiverRegister());
+  LOperand* name_register =
+      UseFixed(instr->name(), LoadDescriptor::NameRegister());
+  // Not marked as call. It can't deoptimize, and it never returns.
+  return new (zone()) LTailCallThroughMegamorphicCache(
+      context, receiver_register, name_register);
+}
+
+
 LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
   LOperand* function = UseFixed(instr->function(), edi);
@@ -1148,6 +1177,7 @@
   switch (instr->op()) {
     case kMathFloor: return DoMathFloor(instr);
     case kMathRound: return DoMathRound(instr);
+    case kMathFround: return DoMathFround(instr);
     case kMathAbs: return DoMathAbs(instr);
     case kMathLog: return DoMathLog(instr);
     case kMathExp: return DoMathExp(instr);
@@ -1169,9 +1199,16 @@
 
 
 LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
-  // Crankshaft is turned off for nosse2.
-  UNREACHABLE();
-  return NULL;
+  LOperand* input = UseRegisterAtStart(instr->value());
+  LInstruction* result = DefineAsRegister(new (zone()) LMathRound(input));
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathFround(HUnaryMathOperation* instr) {
+  LOperand* input = UseRegister(instr->value());
+  LMathFround* result = new (zone()) LMathFround(input);
+  return DefineSameAsFirst(result);
 }
 
 
@@ -1188,8 +1225,8 @@
 
 
 LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
-  ASSERT(instr->representation().IsDouble());
-  ASSERT(instr->value()->representation().IsDouble());
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->value()->representation().IsDouble());
   LOperand* input = UseRegisterAtStart(instr->value());
   return MarkAsCall(DefineSameAsFirst(new(zone()) LMathLog(input)), instr);
 }
@@ -1203,27 +1240,28 @@
 
 
 LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
-  ASSERT(instr->representation().IsDouble());
-  ASSERT(instr->value()->representation().IsDouble());
-  LOperand* value = UseTempRegister(instr->value());
-  LOperand* temp1 = TempRegister();
-  LOperand* temp2 = TempRegister();
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->value()->representation().IsDouble());
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* temp1 = FixedTemp(ecx);
+  LOperand* temp2 = FixedTemp(edx);
   LMathExp* result = new(zone()) LMathExp(value, temp1, temp2);
-  return DefineAsRegister(result);
+  return MarkAsCall(DefineSameAsFirst(result), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) {
   LOperand* input = UseRegisterAtStart(instr->value());
-  LMathSqrt* result = new(zone()) LMathSqrt(input);
-  return DefineSameAsFirst(result);
+  LOperand* temp1 = FixedTemp(ecx);
+  LOperand* temp2 = FixedTemp(edx);
+  LMathSqrt* result = new(zone()) LMathSqrt(input, temp1, temp2);
+  return MarkAsCall(DefineSameAsFirst(result), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
   LOperand* input = UseRegisterAtStart(instr->value());
-  LOperand* temp = TempRegister();
-  LMathPowHalf* result = new(zone()) LMathPowHalf(input, temp);
+  LMathPowHalf* result = new (zone()) LMathPowHalf(input);
   return DefineSameAsFirst(result);
 }
 
@@ -1280,9 +1318,9 @@
 
 LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
   if (instr->representation().IsSmiOrInteger32()) {
-    ASSERT(instr->left()->representation().Equals(instr->representation()));
-    ASSERT(instr->right()->representation().Equals(instr->representation()));
-    ASSERT(instr->CheckFlag(HValue::kTruncatingToInt32));
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+    DCHECK(instr->CheckFlag(HValue::kTruncatingToInt32));
 
     LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
     LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
@@ -1294,9 +1332,9 @@
 
 
 LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) {
-  ASSERT(instr->representation().IsSmiOrInteger32());
-  ASSERT(instr->left()->representation().Equals(instr->representation()));
-  ASSERT(instr->right()->representation().Equals(instr->representation()));
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
   LOperand* dividend = UseRegister(instr->left());
   int32_t divisor = instr->right()->GetInteger32Constant();
   LInstruction* result = DefineAsRegister(new(zone()) LDivByPowerOf2I(
@@ -1312,9 +1350,9 @@
 
 
 LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
-  ASSERT(instr->representation().IsInteger32());
-  ASSERT(instr->left()->representation().Equals(instr->representation()));
-  ASSERT(instr->right()->representation().Equals(instr->representation()));
+  DCHECK(instr->representation().IsInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
   LOperand* dividend = UseRegister(instr->left());
   int32_t divisor = instr->right()->GetInteger32Constant();
   LOperand* temp1 = FixedTemp(eax);
@@ -1331,9 +1369,9 @@
 
 
 LInstruction* LChunkBuilder::DoDivI(HDiv* instr) {
-  ASSERT(instr->representation().IsSmiOrInteger32());
-  ASSERT(instr->left()->representation().Equals(instr->representation()));
-  ASSERT(instr->right()->representation().Equals(instr->representation()));
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
   LOperand* dividend = UseFixed(instr->left(), eax);
   LOperand* divisor = UseRegister(instr->right());
   LOperand* temp = FixedTemp(edx);
@@ -1380,9 +1418,9 @@
 
 
 LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
-  ASSERT(instr->representation().IsInteger32());
-  ASSERT(instr->left()->representation().Equals(instr->representation()));
-  ASSERT(instr->right()->representation().Equals(instr->representation()));
+  DCHECK(instr->representation().IsInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
   LOperand* dividend = UseRegister(instr->left());
   int32_t divisor = instr->right()->GetInteger32Constant();
   LOperand* temp1 = FixedTemp(eax);
@@ -1407,9 +1445,9 @@
 
 
 LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) {
-  ASSERT(instr->representation().IsSmiOrInteger32());
-  ASSERT(instr->left()->representation().Equals(instr->representation()));
-  ASSERT(instr->right()->representation().Equals(instr->representation()));
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
   LOperand* dividend = UseFixed(instr->left(), eax);
   LOperand* divisor = UseRegister(instr->right());
   LOperand* temp = FixedTemp(edx);
@@ -1436,14 +1474,15 @@
 
 
 LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) {
-  ASSERT(instr->representation().IsSmiOrInteger32());
-  ASSERT(instr->left()->representation().Equals(instr->representation()));
-  ASSERT(instr->right()->representation().Equals(instr->representation()));
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
   LOperand* dividend = UseRegisterAtStart(instr->left());
   int32_t divisor = instr->right()->GetInteger32Constant();
   LInstruction* result = DefineSameAsFirst(new(zone()) LModByPowerOf2I(
           dividend, divisor));
-  if (instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+  if (instr->CheckFlag(HValue::kLeftCanBeNegative) &&
+      instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
     result = AssignEnvironment(result);
   }
   return result;
@@ -1451,9 +1490,9 @@
 
 
 LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) {
-  ASSERT(instr->representation().IsSmiOrInteger32());
-  ASSERT(instr->left()->representation().Equals(instr->representation()));
-  ASSERT(instr->right()->representation().Equals(instr->representation()));
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
   LOperand* dividend = UseRegister(instr->left());
   int32_t divisor = instr->right()->GetInteger32Constant();
   LOperand* temp1 = FixedTemp(eax);
@@ -1468,9 +1507,9 @@
 
 
 LInstruction* LChunkBuilder::DoModI(HMod* instr) {
-  ASSERT(instr->representation().IsSmiOrInteger32());
-  ASSERT(instr->left()->representation().Equals(instr->representation()));
-  ASSERT(instr->right()->representation().Equals(instr->representation()));
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
   LOperand* dividend = UseFixed(instr->left(), eax);
   LOperand* divisor = UseRegister(instr->right());
   LOperand* temp = FixedTemp(edx);
@@ -1503,8 +1542,8 @@
 
 LInstruction* LChunkBuilder::DoMul(HMul* instr) {
   if (instr->representation().IsSmiOrInteger32()) {
-    ASSERT(instr->left()->representation().Equals(instr->representation()));
-    ASSERT(instr->right()->representation().Equals(instr->representation()));
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
     LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
     LOperand* right = UseOrConstant(instr->BetterRightOperand());
     LOperand* temp = NULL;
@@ -1527,8 +1566,8 @@
 
 LInstruction* LChunkBuilder::DoSub(HSub* instr) {
   if (instr->representation().IsSmiOrInteger32()) {
-    ASSERT(instr->left()->representation().Equals(instr->representation()));
-    ASSERT(instr->right()->representation().Equals(instr->representation()));
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
     LOperand* left = UseRegisterAtStart(instr->left());
     LOperand* right = UseOrConstantAtStart(instr->right());
     LSubI* sub = new(zone()) LSubI(left, right);
@@ -1547,8 +1586,8 @@
 
 LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
   if (instr->representation().IsSmiOrInteger32()) {
-    ASSERT(instr->left()->representation().Equals(instr->representation()));
-    ASSERT(instr->right()->representation().Equals(instr->representation()));
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
     // Check to see if it would be advantageous to use an lea instruction rather
     // than an add. This is the case when no overflow check is needed and there
     // are multiple uses of the add's inputs, so using a 3-register add will
@@ -1571,9 +1610,9 @@
   } else if (instr->representation().IsDouble()) {
     return DoArithmeticD(Token::ADD, instr);
   } else if (instr->representation().IsExternal()) {
-    ASSERT(instr->left()->representation().IsExternal());
-    ASSERT(instr->right()->representation().IsInteger32());
-    ASSERT(!instr->CheckFlag(HValue::kCanOverflow));
+    DCHECK(instr->left()->representation().IsExternal());
+    DCHECK(instr->right()->representation().IsInteger32());
+    DCHECK(!instr->CheckFlag(HValue::kCanOverflow));
     bool use_lea = LAddI::UseLea(instr);
     LOperand* left = UseRegisterAtStart(instr->left());
     HValue* right_candidate = instr->right();
@@ -1594,33 +1633,39 @@
 LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
   LOperand* left = NULL;
   LOperand* right = NULL;
+  LOperand* scratch = TempRegister();
+
   if (instr->representation().IsSmiOrInteger32()) {
-    ASSERT(instr->left()->representation().Equals(instr->representation()));
-    ASSERT(instr->right()->representation().Equals(instr->representation()));
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
     left = UseRegisterAtStart(instr->BetterLeftOperand());
     right = UseOrConstantAtStart(instr->BetterRightOperand());
   } else {
-    ASSERT(instr->representation().IsDouble());
-    ASSERT(instr->left()->representation().IsDouble());
-    ASSERT(instr->right()->representation().IsDouble());
+    DCHECK(instr->representation().IsDouble());
+    DCHECK(instr->left()->representation().IsDouble());
+    DCHECK(instr->right()->representation().IsDouble());
     left = UseRegisterAtStart(instr->left());
     right = UseRegisterAtStart(instr->right());
   }
-  LMathMinMax* minmax = new(zone()) LMathMinMax(left, right);
+  LMathMinMax* minmax = new (zone()) LMathMinMax(left, right, scratch);
   return DefineSameAsFirst(minmax);
 }
 
 
 LInstruction* LChunkBuilder::DoPower(HPower* instr) {
-  // Crankshaft is turned off for nosse2.
-  UNREACHABLE();
-  return NULL;
+  // Unlike ia32, we don't have a MathPowStub and directly call c function.
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->left()->representation().IsDouble());
+  LOperand* left = UseRegisterAtStart(instr->left());
+  LOperand* right = UseRegisterAtStart(instr->right());
+  LPower* result = new (zone()) LPower(left, right);
+  return MarkAsCall(DefineSameAsFirst(result), instr);
 }
 
 
 LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
-  ASSERT(instr->left()->representation().IsSmiOrTagged());
-  ASSERT(instr->right()->representation().IsSmiOrTagged());
+  DCHECK(instr->left()->representation().IsSmiOrTagged());
+  DCHECK(instr->right()->representation().IsSmiOrTagged());
   LOperand* context = UseFixed(instr->context(), esi);
   LOperand* left = UseFixed(instr->left(), edx);
   LOperand* right = UseFixed(instr->right(), eax);
@@ -1633,15 +1678,15 @@
     HCompareNumericAndBranch* instr) {
   Representation r = instr->representation();
   if (r.IsSmiOrInteger32()) {
-    ASSERT(instr->left()->representation().Equals(r));
-    ASSERT(instr->right()->representation().Equals(r));
+    DCHECK(instr->left()->representation().Equals(r));
+    DCHECK(instr->right()->representation().Equals(r));
     LOperand* left = UseRegisterOrConstantAtStart(instr->left());
     LOperand* right = UseOrConstantAtStart(instr->right());
     return new(zone()) LCompareNumericAndBranch(left, right);
   } else {
-    ASSERT(r.IsDouble());
-    ASSERT(instr->left()->representation().IsDouble());
-    ASSERT(instr->right()->representation().IsDouble());
+    DCHECK(r.IsDouble());
+    DCHECK(instr->left()->representation().IsDouble());
+    DCHECK(instr->right()->representation().IsDouble());
     LOperand* left;
     LOperand* right;
     if (CanBeImmediateConstant(instr->left()) &&
@@ -1676,35 +1721,34 @@
 
 LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
     HCompareMinusZeroAndBranch* instr) {
-  LOperand* value = UseRegister(instr->value());
-  LOperand* scratch = TempRegister();
-  return new(zone()) LCompareMinusZeroAndBranch(value, scratch);
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return new (zone()) LCompareMinusZeroAndBranch(value);
 }
 
 
 LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
-  ASSERT(instr->value()->representation().IsSmiOrTagged());
+  DCHECK(instr->value()->representation().IsSmiOrTagged());
   LOperand* temp = TempRegister();
   return new(zone()) LIsObjectAndBranch(UseRegister(instr->value()), temp);
 }
 
 
 LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
-  ASSERT(instr->value()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
   LOperand* temp = TempRegister();
   return new(zone()) LIsStringAndBranch(UseRegister(instr->value()), temp);
 }
 
 
 LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
-  ASSERT(instr->value()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
   return new(zone()) LIsSmiAndBranch(Use(instr->value()));
 }
 
 
 LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
     HIsUndetectableAndBranch* instr) {
-  ASSERT(instr->value()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
   return new(zone()) LIsUndetectableAndBranch(
       UseRegisterAtStart(instr->value()), TempRegister());
 }
@@ -1712,8 +1756,8 @@
 
 LInstruction* LChunkBuilder::DoStringCompareAndBranch(
     HStringCompareAndBranch* instr) {
-  ASSERT(instr->left()->representation().IsTagged());
-  ASSERT(instr->right()->representation().IsTagged());
+  DCHECK(instr->left()->representation().IsTagged());
+  DCHECK(instr->right()->representation().IsTagged());
   LOperand* context = UseFixed(instr->context(), esi);
   LOperand* left = UseFixed(instr->left(), edx);
   LOperand* right = UseFixed(instr->right(), eax);
@@ -1727,7 +1771,7 @@
 
 LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
     HHasInstanceTypeAndBranch* instr) {
-  ASSERT(instr->value()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
   return new(zone()) LHasInstanceTypeAndBranch(
       UseRegisterAtStart(instr->value()),
       TempRegister());
@@ -1736,7 +1780,7 @@
 
 LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
     HGetCachedArrayIndex* instr)  {
-  ASSERT(instr->value()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
   LOperand* value = UseRegisterAtStart(instr->value());
 
   return DefineAsRegister(new(zone()) LGetCachedArrayIndex(value));
@@ -1745,7 +1789,7 @@
 
 LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
     HHasCachedArrayIndexAndBranch* instr) {
-  ASSERT(instr->value()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
   return new(zone()) LHasCachedArrayIndexAndBranch(
       UseRegisterAtStart(instr->value()));
 }
@@ -1753,7 +1797,7 @@
 
 LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
     HClassOfTestAndBranch* instr) {
-  ASSERT(instr->value()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
   return new(zone()) LClassOfTestAndBranch(UseRegister(instr->value()),
                                            TempRegister(),
                                            TempRegister());
@@ -1881,7 +1925,7 @@
       }
       return AssignEnvironment(DefineSameAsFirst(new(zone()) LCheckSmi(value)));
     } else {
-      ASSERT(to.IsInteger32());
+      DCHECK(to.IsInteger32());
       if (val->type().IsSmi() || val->representation().IsSmi()) {
         LOperand* value = UseRegister(val);
         return DefineSameAsFirst(new(zone()) LSmiUntag(value, false));
@@ -1905,7 +1949,7 @@
       return AssignEnvironment(
           DefineAsRegister(new(zone()) LDoubleToSmi(value)));
     } else {
-      ASSERT(to.IsInteger32());
+      DCHECK(to.IsInteger32());
       bool truncating = instr->CanTruncateToInt32();
       LOperand* value = UseRegister(val);
       LInstruction* result = DefineAsRegister(new(zone()) LDoubleToI(value));
@@ -1937,7 +1981,7 @@
       }
       return result;
     } else {
-      ASSERT(to.IsDouble());
+      DCHECK(to.IsDouble());
       if (val->CheckFlag(HInstruction::kUint32)) {
         return DefineAsRegister(new(zone()) LUint32ToDouble(UseRegister(val)));
       } else {
@@ -2001,13 +2045,13 @@
   HValue* value = instr->value();
   Representation input_rep = value->representation();
   if (input_rep.IsDouble()) {
-    UNREACHABLE();
-    return NULL;
+    LOperand* reg = UseRegister(value);
+    return DefineFixed(new (zone()) LClampDToUint8(reg), eax);
   } else if (input_rep.IsInteger32()) {
     LOperand* reg = UseFixed(value, eax);
     return DefineFixed(new(zone()) LClampIToUint8(reg), eax);
   } else {
-    ASSERT(input_rep.IsSmiOrTagged());
+    DCHECK(input_rep.IsSmiOrTagged());
     LOperand* value = UseRegister(instr->value());
     LClampTToUint8NoSSE2* res =
         new(zone()) LClampTToUint8NoSSE2(value, TempRegister(),
@@ -2019,7 +2063,7 @@
 
 LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) {
   HValue* value = instr->value();
-  ASSERT(value->representation().IsDouble());
+  DCHECK(value->representation().IsDouble());
   return DefineAsRegister(new(zone()) LDoubleBits(UseRegister(value)));
 }
 
@@ -2046,10 +2090,7 @@
   } else if (r.IsInteger32()) {
     return DefineAsRegister(new(zone()) LConstantI);
   } else if (r.IsDouble()) {
-    double value = instr->DoubleValue();
-    bool value_is_zero = BitCast<uint64_t, double>(value) == 0;
-    LOperand* temp = value_is_zero ? NULL : TempRegister();
-    return DefineAsRegister(new(zone()) LConstantD(temp));
+    return DefineAsRegister(new (zone()) LConstantD);
   } else if (r.IsExternal()) {
     return DefineAsRegister(new(zone()) LConstantE);
   } else if (r.IsTagged()) {
@@ -2071,9 +2112,15 @@
 
 LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
-  LOperand* global_object = UseFixed(instr->global_object(), edx);
+  LOperand* global_object =
+      UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
+  LOperand* vector = NULL;
+  if (FLAG_vector_ics) {
+    vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
+  }
+
   LLoadGlobalGeneric* result =
-      new(zone()) LLoadGlobalGeneric(context, global_object);
+      new(zone()) LLoadGlobalGeneric(context, global_object, vector);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
@@ -2126,8 +2173,14 @@
 
 LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
-  LOperand* object = UseFixed(instr->object(), edx);
-  LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric(context, object);
+  LOperand* object =
+      UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
+  LOperand* vector = NULL;
+  if (FLAG_vector_ics) {
+    vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
+  }
+  LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric(
+      context, object, vector);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
@@ -2146,7 +2199,7 @@
 
 
 LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
-  ASSERT(instr->key()->representation().IsSmiOrInteger32());
+  DCHECK(instr->key()->representation().IsSmiOrInteger32());
   ElementsKind elements_kind = instr->elements_kind();
   bool clobbers_key = ExternalArrayOpRequiresTemp(
       instr->key()->representation(), elements_kind);
@@ -2159,7 +2212,7 @@
     LOperand* obj = UseRegisterAtStart(instr->elements());
     result = DefineAsRegister(new(zone()) LLoadKeyed(obj, key));
   } else {
-    ASSERT(
+    DCHECK(
         (instr->representation().IsInteger32() &&
          !(IsDoubleOrFloatElementsKind(instr->elements_kind()))) ||
         (instr->representation().IsDouble() &&
@@ -2184,11 +2237,15 @@
 
 LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
-  LOperand* object = UseFixed(instr->object(), edx);
-  LOperand* key = UseFixed(instr->key(), ecx);
-
+  LOperand* object =
+      UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
+  LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
+  LOperand* vector = NULL;
+  if (FLAG_vector_ics) {
+    vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
+  }
   LLoadKeyedGeneric* result =
-      new(zone()) LLoadKeyedGeneric(context, object, key);
+      new(zone()) LLoadKeyedGeneric(context, object, key, vector);
   return MarkAsCall(DefineFixed(result, eax), instr);
 }
 
@@ -2218,18 +2275,20 @@
 
 LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
   if (!instr->is_typed_elements()) {
-    ASSERT(instr->elements()->representation().IsTagged());
-    ASSERT(instr->key()->representation().IsInteger32() ||
+    DCHECK(instr->elements()->representation().IsTagged());
+    DCHECK(instr->key()->representation().IsInteger32() ||
            instr->key()->representation().IsSmi());
 
     if (instr->value()->representation().IsDouble()) {
       LOperand* object = UseRegisterAtStart(instr->elements());
-      LOperand* val = NULL;
-      val = UseRegisterAtStart(instr->value());
+      // For storing double hole, no fp register required.
+      LOperand* val = instr->IsConstantHoleStore()
+                          ? NULL
+                          : UseRegisterAtStart(instr->value());
       LOperand* key = UseRegisterOrConstantAtStart(instr->key());
       return new(zone()) LStoreKeyed(object, key, val);
     } else {
-      ASSERT(instr->value()->representation().IsSmiOrTagged());
+      DCHECK(instr->value()->representation().IsSmiOrTagged());
       bool needs_write_barrier = instr->NeedsWriteBarrier();
 
       LOperand* obj = UseRegister(instr->elements());
@@ -2247,12 +2306,12 @@
   }
 
   ElementsKind elements_kind = instr->elements_kind();
-  ASSERT(
+  DCHECK(
       (instr->value()->representation().IsInteger32() &&
        !IsDoubleOrFloatElementsKind(elements_kind)) ||
       (instr->value()->representation().IsDouble() &&
        IsDoubleOrFloatElementsKind(elements_kind)));
-  ASSERT((instr->is_fixed_typed_array() &&
+  DCHECK((instr->is_fixed_typed_array() &&
           instr->elements()->representation().IsTagged()) ||
          (instr->is_external() &&
           instr->elements()->representation().IsExternal()));
@@ -2270,13 +2329,14 @@
 
 LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
-  LOperand* object = UseFixed(instr->object(), edx);
-  LOperand* key = UseFixed(instr->key(), ecx);
-  LOperand* value = UseFixed(instr->value(), eax);
+  LOperand* object =
+      UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
+  LOperand* key = UseFixed(instr->key(), StoreDescriptor::NameRegister());
+  LOperand* value = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
 
-  ASSERT(instr->object()->representation().IsTagged());
-  ASSERT(instr->key()->representation().IsTagged());
-  ASSERT(instr->value()->representation().IsTagged());
+  DCHECK(instr->object()->representation().IsTagged());
+  DCHECK(instr->key()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
 
   LStoreKeyedGeneric* result =
       new(zone()) LStoreKeyedGeneric(context, object, key, value);
@@ -2328,9 +2388,9 @@
         ? UseRegister(instr->object())
         : UseTempRegister(instr->object());
   } else if (is_external_location) {
-    ASSERT(!is_in_object);
-    ASSERT(!needs_write_barrier);
-    ASSERT(!needs_write_barrier_for_map);
+    DCHECK(!is_in_object);
+    DCHECK(!needs_write_barrier);
+    DCHECK(!needs_write_barrier_for_map);
     obj = UseRegisterOrConstant(instr->object());
   } else {
     obj = needs_write_barrier_for_map
@@ -2352,8 +2412,6 @@
     val = UseTempRegister(instr->value());
   } else if (can_be_constant) {
     val = UseRegisterOrConstant(instr->value());
-  } else if (instr->field_representation().IsSmi()) {
-    val = UseTempRegister(instr->value());
   } else if (instr->field_representation().IsDouble()) {
     val = UseRegisterAtStart(instr->value());
   } else {
@@ -2374,8 +2432,9 @@
 
 LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
   LOperand* context = UseFixed(instr->context(), esi);
-  LOperand* object = UseFixed(instr->object(), edx);
-  LOperand* value = UseFixed(instr->value(), eax);
+  LOperand* object =
+      UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
+  LOperand* value = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
 
   LStoreNamedGeneric* result =
       new(zone()) LStoreNamedGeneric(context, object, value);
@@ -2438,7 +2497,7 @@
 
 
 LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
-  ASSERT(argument_count_ == 0);
+  DCHECK(argument_count_ == 0);
   allocator_->MarkAsOsrEntry();
   current_block_->last_environment()->set_ast_id(instr->ast_id());
   return AssignEnvironment(new(zone()) LOsrEntry);
@@ -2451,11 +2510,11 @@
     int spill_index = chunk()->GetParameterStackSlot(instr->index());
     return DefineAsSpilled(result, spill_index);
   } else {
-    ASSERT(info()->IsStub());
-    CodeStubInterfaceDescriptor* descriptor =
-        info()->code_stub()->GetInterfaceDescriptor();
+    DCHECK(info()->IsStub());
+    CallInterfaceDescriptor descriptor =
+        info()->code_stub()->GetCallInterfaceDescriptor();
     int index = static_cast<int>(instr->index());
-    Register reg = descriptor->GetParameterRegister(index);
+    Register reg = descriptor.GetEnvironmentParameterRegister(index);
     return DefineFixed(result, reg);
   }
 }
@@ -2471,7 +2530,7 @@
   } else {
     spill_index = env_index - instr->environment()->first_local_index();
     if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
-      Abort(kNotEnoughSpillSlotsForOsr);
+      Retry(kNotEnoughSpillSlotsForOsr);
       spill_index = 0;
     }
     if (spill_index == 0) {
@@ -2562,7 +2621,7 @@
     LOperand* context = UseFixed(instr->context(), esi);
     return MarkAsCall(new(zone()) LStackCheck(context), instr);
   } else {
-    ASSERT(instr->is_backwards_branch());
+    DCHECK(instr->is_backwards_branch());
     LOperand* context = UseAny(instr->context());
     return AssignEnvironment(
         AssignPointerMap(new(zone()) LStackCheck(context)));
@@ -2583,6 +2642,7 @@
   if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
     inner->Bind(instr->arguments_var(), instr->arguments_object());
   }
+  inner->BindContext(instr->closure_context());
   inner->set_entry(instr);
   current_block_->UpdateEnvironment(inner);
   chunk_->AddInlinedClosure(instr->closure());
@@ -2598,7 +2658,7 @@
   if (env->entry()->arguments_pushed()) {
     int argument_count = env->arguments_environment()->parameter_count();
     pop = new(zone()) LDrop(argument_count);
-    ASSERT(instr->argument_delta() == -argument_count);
+    DCHECK(instr->argument_delta() == -argument_count);
   }
 
   HEnvironment* outer = current_block_->last_environment()->
diff --git a/src/x87/lithium-x87.h b/src/x87/lithium-x87.h
index 8c992b8..dbb18ec 100644
--- a/src/x87/lithium-x87.h
+++ b/src/x87/lithium-x87.h
@@ -6,172 +6,178 @@
 #define V8_X87_LITHIUM_X87_H_
 
 #include "src/hydrogen.h"
-#include "src/lithium-allocator.h"
 #include "src/lithium.h"
+#include "src/lithium-allocator.h"
 #include "src/safepoint-table.h"
 #include "src/utils.h"
 
 namespace v8 {
 namespace internal {
 
+namespace compiler {
+class RCodeVisualizer;
+}
+
 // Forward declarations.
 class LCodeGen;
 
-#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V)    \
-  V(AccessArgumentsAt)                          \
-  V(AddI)                                       \
-  V(AllocateBlockContext)                       \
-  V(Allocate)                                   \
-  V(ApplyArguments)                             \
-  V(ArgumentsElements)                          \
-  V(ArgumentsLength)                            \
-  V(ArithmeticD)                                \
-  V(ArithmeticT)                                \
-  V(BitI)                                       \
-  V(BoundsCheck)                                \
-  V(Branch)                                     \
-  V(CallJSFunction)                             \
-  V(CallWithDescriptor)                         \
-  V(CallFunction)                               \
-  V(CallNew)                                    \
-  V(CallNewArray)                               \
-  V(CallRuntime)                                \
-  V(CallStub)                                   \
-  V(CheckInstanceType)                          \
-  V(CheckMaps)                                  \
-  V(CheckMapValue)                              \
-  V(CheckNonSmi)                                \
-  V(CheckSmi)                                   \
-  V(CheckValue)                                 \
-  V(ClampDToUint8)                              \
-  V(ClampIToUint8)                              \
-  V(ClampTToUint8NoSSE2)                        \
-  V(ClassOfTestAndBranch)                       \
-  V(ClobberDoubles)                             \
-  V(CompareMinusZeroAndBranch)                  \
-  V(CompareNumericAndBranch)                    \
-  V(CmpObjectEqAndBranch)                       \
-  V(CmpHoleAndBranch)                           \
-  V(CmpMapAndBranch)                            \
-  V(CmpT)                                       \
-  V(ConstantD)                                  \
-  V(ConstantE)                                  \
-  V(ConstantI)                                  \
-  V(ConstantS)                                  \
-  V(ConstantT)                                  \
-  V(ConstructDouble)                            \
-  V(Context)                                    \
-  V(DateField)                                  \
-  V(DebugBreak)                                 \
-  V(DeclareGlobals)                             \
-  V(Deoptimize)                                 \
-  V(DivByConstI)                                \
-  V(DivByPowerOf2I)                             \
-  V(DivI)                                       \
-  V(DoubleBits)                                 \
-  V(DoubleToI)                                  \
-  V(DoubleToSmi)                                \
-  V(Drop)                                       \
-  V(Dummy)                                      \
-  V(DummyUse)                                   \
-  V(FlooringDivByConstI)                        \
-  V(FlooringDivByPowerOf2I)                     \
-  V(FlooringDivI)                               \
-  V(ForInCacheArray)                            \
-  V(ForInPrepareMap)                            \
-  V(FunctionLiteral)                            \
-  V(GetCachedArrayIndex)                        \
-  V(Goto)                                       \
-  V(HasCachedArrayIndexAndBranch)               \
-  V(HasInstanceTypeAndBranch)                   \
-  V(InnerAllocatedObject)                       \
-  V(InstanceOf)                                 \
-  V(InstanceOfKnownGlobal)                      \
-  V(InstructionGap)                             \
-  V(Integer32ToDouble)                          \
-  V(InvokeFunction)                             \
-  V(IsConstructCallAndBranch)                   \
-  V(IsObjectAndBranch)                          \
-  V(IsStringAndBranch)                          \
-  V(IsSmiAndBranch)                             \
-  V(IsUndetectableAndBranch)                    \
-  V(Label)                                      \
-  V(LazyBailout)                                \
-  V(LoadContextSlot)                            \
-  V(LoadFieldByIndex)                           \
-  V(LoadFunctionPrototype)                      \
-  V(LoadGlobalCell)                             \
-  V(LoadGlobalGeneric)                          \
-  V(LoadKeyed)                                  \
-  V(LoadKeyedGeneric)                           \
-  V(LoadNamedField)                             \
-  V(LoadNamedGeneric)                           \
-  V(LoadRoot)                                   \
-  V(MapEnumLength)                              \
-  V(MathAbs)                                    \
-  V(MathClz32)                                  \
-  V(MathExp)                                    \
-  V(MathFloor)                                  \
-  V(MathLog)                                    \
-  V(MathMinMax)                                 \
-  V(MathPowHalf)                                \
-  V(MathRound)                                  \
-  V(MathSqrt)                                   \
-  V(ModByConstI)                                \
-  V(ModByPowerOf2I)                             \
-  V(ModI)                                       \
-  V(MulI)                                       \
-  V(NumberTagD)                                 \
-  V(NumberTagI)                                 \
-  V(NumberTagU)                                 \
-  V(NumberUntagD)                               \
-  V(OsrEntry)                                   \
-  V(Parameter)                                  \
-  V(Power)                                      \
-  V(PushArgument)                               \
-  V(RegExpLiteral)                              \
-  V(Return)                                     \
-  V(SeqStringGetChar)                           \
-  V(SeqStringSetChar)                           \
-  V(ShiftI)                                     \
-  V(SmiTag)                                     \
-  V(SmiUntag)                                   \
-  V(StackCheck)                                 \
-  V(StoreCodeEntry)                             \
-  V(StoreContextSlot)                           \
-  V(StoreFrameContext)                          \
-  V(StoreGlobalCell)                            \
-  V(StoreKeyed)                                 \
-  V(StoreKeyedGeneric)                          \
-  V(StoreNamedField)                            \
-  V(StoreNamedGeneric)                          \
-  V(StringAdd)                                  \
-  V(StringCharCodeAt)                           \
-  V(StringCharFromCode)                         \
-  V(StringCompareAndBranch)                     \
-  V(SubI)                                       \
-  V(TaggedToI)                                  \
-  V(ThisFunction)                               \
-  V(ToFastProperties)                           \
-  V(TransitionElementsKind)                     \
-  V(TrapAllocationMemento)                      \
-  V(Typeof)                                     \
-  V(TypeofIsAndBranch)                          \
-  V(Uint32ToDouble)                             \
-  V(UnknownOSRValue)                            \
+#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
+  V(AccessArgumentsAt)                       \
+  V(AddI)                                    \
+  V(AllocateBlockContext)                    \
+  V(Allocate)                                \
+  V(ApplyArguments)                          \
+  V(ArgumentsElements)                       \
+  V(ArgumentsLength)                         \
+  V(ArithmeticD)                             \
+  V(ArithmeticT)                             \
+  V(BitI)                                    \
+  V(BoundsCheck)                             \
+  V(Branch)                                  \
+  V(CallJSFunction)                          \
+  V(CallWithDescriptor)                      \
+  V(CallFunction)                            \
+  V(CallNew)                                 \
+  V(CallNewArray)                            \
+  V(CallRuntime)                             \
+  V(CallStub)                                \
+  V(CheckInstanceType)                       \
+  V(CheckMaps)                               \
+  V(CheckMapValue)                           \
+  V(CheckNonSmi)                             \
+  V(CheckSmi)                                \
+  V(CheckValue)                              \
+  V(ClampDToUint8)                           \
+  V(ClampIToUint8)                           \
+  V(ClampTToUint8NoSSE2)                     \
+  V(ClassOfTestAndBranch)                    \
+  V(ClobberDoubles)                          \
+  V(CompareMinusZeroAndBranch)               \
+  V(CompareNumericAndBranch)                 \
+  V(CmpObjectEqAndBranch)                    \
+  V(CmpHoleAndBranch)                        \
+  V(CmpMapAndBranch)                         \
+  V(CmpT)                                    \
+  V(ConstantD)                               \
+  V(ConstantE)                               \
+  V(ConstantI)                               \
+  V(ConstantS)                               \
+  V(ConstantT)                               \
+  V(ConstructDouble)                         \
+  V(Context)                                 \
+  V(DateField)                               \
+  V(DebugBreak)                              \
+  V(DeclareGlobals)                          \
+  V(Deoptimize)                              \
+  V(DivByConstI)                             \
+  V(DivByPowerOf2I)                          \
+  V(DivI)                                    \
+  V(DoubleBits)                              \
+  V(DoubleToI)                               \
+  V(DoubleToSmi)                             \
+  V(Drop)                                    \
+  V(Dummy)                                   \
+  V(DummyUse)                                \
+  V(FlooringDivByConstI)                     \
+  V(FlooringDivByPowerOf2I)                  \
+  V(FlooringDivI)                            \
+  V(ForInCacheArray)                         \
+  V(ForInPrepareMap)                         \
+  V(FunctionLiteral)                         \
+  V(GetCachedArrayIndex)                     \
+  V(Goto)                                    \
+  V(HasCachedArrayIndexAndBranch)            \
+  V(HasInstanceTypeAndBranch)                \
+  V(InnerAllocatedObject)                    \
+  V(InstanceOf)                              \
+  V(InstanceOfKnownGlobal)                   \
+  V(InstructionGap)                          \
+  V(Integer32ToDouble)                       \
+  V(InvokeFunction)                          \
+  V(IsConstructCallAndBranch)                \
+  V(IsObjectAndBranch)                       \
+  V(IsStringAndBranch)                       \
+  V(IsSmiAndBranch)                          \
+  V(IsUndetectableAndBranch)                 \
+  V(Label)                                   \
+  V(LazyBailout)                             \
+  V(LoadContextSlot)                         \
+  V(LoadFieldByIndex)                        \
+  V(LoadFunctionPrototype)                   \
+  V(LoadGlobalCell)                          \
+  V(LoadGlobalGeneric)                       \
+  V(LoadKeyed)                               \
+  V(LoadKeyedGeneric)                        \
+  V(LoadNamedField)                          \
+  V(LoadNamedGeneric)                        \
+  V(LoadRoot)                                \
+  V(MapEnumLength)                           \
+  V(MathAbs)                                 \
+  V(MathClz32)                               \
+  V(MathExp)                                 \
+  V(MathFloor)                               \
+  V(MathFround)                              \
+  V(MathLog)                                 \
+  V(MathMinMax)                              \
+  V(MathPowHalf)                             \
+  V(MathRound)                               \
+  V(MathSqrt)                                \
+  V(ModByConstI)                             \
+  V(ModByPowerOf2I)                          \
+  V(ModI)                                    \
+  V(MulI)                                    \
+  V(NumberTagD)                              \
+  V(NumberTagI)                              \
+  V(NumberTagU)                              \
+  V(NumberUntagD)                            \
+  V(OsrEntry)                                \
+  V(Parameter)                               \
+  V(Power)                                   \
+  V(PushArgument)                            \
+  V(RegExpLiteral)                           \
+  V(Return)                                  \
+  V(SeqStringGetChar)                        \
+  V(SeqStringSetChar)                        \
+  V(ShiftI)                                  \
+  V(SmiTag)                                  \
+  V(SmiUntag)                                \
+  V(StackCheck)                              \
+  V(StoreCodeEntry)                          \
+  V(StoreContextSlot)                        \
+  V(StoreFrameContext)                       \
+  V(StoreGlobalCell)                         \
+  V(StoreKeyed)                              \
+  V(StoreKeyedGeneric)                       \
+  V(StoreNamedField)                         \
+  V(StoreNamedGeneric)                       \
+  V(StringAdd)                               \
+  V(StringCharCodeAt)                        \
+  V(StringCharFromCode)                      \
+  V(StringCompareAndBranch)                  \
+  V(SubI)                                    \
+  V(TaggedToI)                               \
+  V(TailCallThroughMegamorphicCache)         \
+  V(ThisFunction)                            \
+  V(ToFastProperties)                        \
+  V(TransitionElementsKind)                  \
+  V(TrapAllocationMemento)                   \
+  V(Typeof)                                  \
+  V(TypeofIsAndBranch)                       \
+  V(Uint32ToDouble)                          \
+  V(UnknownOSRValue)                         \
   V(WrapReceiver)
 
 
 #define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)                        \
-  virtual Opcode opcode() const V8_FINAL V8_OVERRIDE {                      \
+  virtual Opcode opcode() const FINAL OVERRIDE {                      \
     return LInstruction::k##type;                                           \
   }                                                                         \
-  virtual void CompileToNative(LCodeGen* generator) V8_FINAL V8_OVERRIDE;   \
-  virtual const char* Mnemonic() const V8_FINAL V8_OVERRIDE {               \
+  virtual void CompileToNative(LCodeGen* generator) FINAL OVERRIDE;   \
+  virtual const char* Mnemonic() const FINAL OVERRIDE {               \
     return mnemonic;                                                        \
   }                                                                         \
   static L##type* cast(LInstruction* instr) {                               \
-    ASSERT(instr->Is##type());                                              \
+    DCHECK(instr->Is##type());                                              \
     return reinterpret_cast<L##type*>(instr);                               \
   }
 
@@ -201,7 +207,7 @@
   enum Opcode {
     // Declare a unique enum value for each instruction.
 #define DECLARE_OPCODE(type) k##type,
-    LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE)
+    LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE) kAdapter,
     kNumberOfInstructions
 #undef DECLARE_OPCODE
   };
@@ -220,6 +226,9 @@
 
   virtual bool IsControl() const { return false; }
 
+  // Try deleting this instruction if possible.
+  virtual bool TryDelete() { return false; }
+
   void set_environment(LEnvironment* env) { environment_ = env; }
   LEnvironment* environment() const { return environment_; }
   bool HasEnvironment() const { return environment_ != NULL; }
@@ -262,11 +271,12 @@
   void VerifyCall();
 #endif
 
+  virtual int InputCount() = 0;
+  virtual LOperand* InputAt(int i) = 0;
+
  private:
   // Iterator support.
   friend class InputIterator;
-  virtual int InputCount() = 0;
-  virtual LOperand* InputAt(int i) = 0;
 
   friend class TempIterator;
   virtual int TempCount() = 0;
@@ -287,7 +297,7 @@
  public:
   // Allow 0 or 1 output operands.
   STATIC_ASSERT(R == 0 || R == 1);
-  virtual bool HasResult() const V8_FINAL V8_OVERRIDE {
+  virtual bool HasResult() const FINAL OVERRIDE {
     return R != 0 && result() != NULL;
   }
   void set_result(LOperand* operand) { results_[0] = operand; }
@@ -309,11 +319,11 @@
 
  private:
   // Iterator support.
-  virtual int InputCount() V8_FINAL V8_OVERRIDE { return I; }
-  virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
+  virtual int InputCount() FINAL OVERRIDE { return I; }
+  virtual LOperand* InputAt(int i) FINAL OVERRIDE { return inputs_[i]; }
 
-  virtual int TempCount() V8_FINAL V8_OVERRIDE { return T; }
-  virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return temps_[i]; }
+  virtual int TempCount() FINAL OVERRIDE { return T; }
+  virtual LOperand* TempAt(int i) FINAL OVERRIDE { return temps_[i]; }
 };
 
 
@@ -327,10 +337,10 @@
   }
 
   // Can't use the DECLARE-macro here because of sub-classes.
-  virtual bool IsGap() const V8_FINAL V8_OVERRIDE { return true; }
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual bool IsGap() const FINAL OVERRIDE { return true; }
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
   static LGap* cast(LInstruction* instr) {
-    ASSERT(instr->IsGap());
+    DCHECK(instr->IsGap());
     return reinterpret_cast<LGap*>(instr);
   }
 
@@ -364,11 +374,11 @@
 };
 
 
-class LInstructionGap V8_FINAL : public LGap {
+class LInstructionGap FINAL : public LGap {
  public:
   explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
 
-  virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
     return !IsRedundant();
   }
 
@@ -376,11 +386,11 @@
 };
 
 
-class LClobberDoubles V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LClobberDoubles FINAL : public LTemplateInstruction<0, 0, 0> {
  public:
   explicit LClobberDoubles(Isolate* isolate) { }
 
-  virtual bool ClobbersDoubleRegisters(Isolate* isolate) const V8_OVERRIDE {
+  virtual bool ClobbersDoubleRegisters(Isolate* isolate) const OVERRIDE {
     return true;
   }
 
@@ -388,41 +398,42 @@
 };
 
 
-class LGoto V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LGoto FINAL : public LTemplateInstruction<0, 0, 0> {
  public:
   explicit LGoto(HBasicBlock* block) : block_(block) { }
 
-  virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE;
+  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE;
   DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
-  virtual bool IsControl() const V8_OVERRIDE { return true; }
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  virtual bool IsControl() const OVERRIDE { return true; }
 
   int block_id() const { return block_->block_id(); }
-  virtual bool ClobbersDoubleRegisters(Isolate* isolate) const V8_OVERRIDE {
+  virtual bool ClobbersDoubleRegisters(Isolate* isolate) const OVERRIDE {
     return false;
   }
 
   bool jumps_to_join() const { return block_->predecessors()->length() > 1; }
+  HBasicBlock* block() const { return block_; }
 
  private:
   HBasicBlock* block_;
 };
 
 
-class LLazyBailout V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LLazyBailout FINAL : public LTemplateInstruction<0, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
 };
 
 
-class LDummy V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LDummy FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
-  explicit LDummy() { }
+  LDummy() {}
   DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy")
 };
 
 
-class LDummyUse V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDummyUse FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LDummyUse(LOperand* value) {
     inputs_[0] = value;
@@ -431,25 +442,25 @@
 };
 
 
-class LDeoptimize V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LDeoptimize FINAL : public LTemplateInstruction<0, 0, 0> {
  public:
-  virtual bool IsControl() const V8_OVERRIDE { return true; }
+  virtual bool IsControl() const OVERRIDE { return true; }
   DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
   DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
 };
 
 
-class LLabel V8_FINAL : public LGap {
+class LLabel FINAL : public LGap {
  public:
   explicit LLabel(HBasicBlock* block)
       : LGap(block), replacement_(NULL) { }
 
-  virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
     return false;
   }
   DECLARE_CONCRETE_INSTRUCTION(Label, "label")
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int block_id() const { return block()->block_id(); }
   bool is_loop_header() const { return block()->IsLoopHeader(); }
@@ -465,16 +476,16 @@
 };
 
 
-class LParameter V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LParameter FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
-  virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
     return false;
   }
   DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
 };
 
 
-class LCallStub V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallStub FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LCallStub(LOperand* context) {
     inputs_[0] = context;
@@ -487,9 +498,30 @@
 };
 
 
-class LUnknownOSRValue V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LTailCallThroughMegamorphicCache FINAL
+    : public LTemplateInstruction<0, 3, 0> {
  public:
-  virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+  explicit LTailCallThroughMegamorphicCache(LOperand* context,
+                                            LOperand* receiver,
+                                            LOperand* name) {
+    inputs_[0] = context;
+    inputs_[1] = receiver;
+    inputs_[2] = name;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* receiver() { return inputs_[1]; }
+  LOperand* name() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache,
+                               "tail-call-through-megamorphic-cache")
+  DECLARE_HYDROGEN_ACCESSOR(TailCallThroughMegamorphicCache)
+};
+
+
+class LUnknownOSRValue FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
     return false;
   }
   DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
@@ -501,7 +533,7 @@
  public:
   LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
 
-  virtual bool IsControl() const V8_FINAL V8_OVERRIDE { return true; }
+  virtual bool IsControl() const FINAL OVERRIDE { return true; }
 
   int SuccessorCount() { return hydrogen()->SuccessorCount(); }
   HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
@@ -540,7 +572,7 @@
 };
 
 
-class LWrapReceiver V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LWrapReceiver FINAL : public LTemplateInstruction<1, 2, 1> {
  public:
   LWrapReceiver(LOperand* receiver,
                 LOperand* function,
@@ -559,7 +591,7 @@
 };
 
 
-class LApplyArguments V8_FINAL : public LTemplateInstruction<1, 4, 0> {
+class LApplyArguments FINAL : public LTemplateInstruction<1, 4, 0> {
  public:
   LApplyArguments(LOperand* function,
                   LOperand* receiver,
@@ -580,7 +612,7 @@
 };
 
 
-class LAccessArgumentsAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LAccessArgumentsAt FINAL : public LTemplateInstruction<1, 3, 0> {
  public:
   LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
     inputs_[0] = arguments;
@@ -594,11 +626,11 @@
 
   DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LArgumentsLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LArgumentsLength FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LArgumentsLength(LOperand* elements) {
     inputs_[0] = elements;
@@ -610,20 +642,20 @@
 };
 
 
-class LArgumentsElements V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LArgumentsElements FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
   DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
 };
 
 
-class LDebugBreak V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LDebugBreak FINAL : public LTemplateInstruction<0, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
 };
 
 
-class LModByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LModByPowerOf2I FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   LModByPowerOf2I(LOperand* dividend, int32_t divisor) {
     inputs_[0] = dividend;
@@ -641,7 +673,7 @@
 };
 
 
-class LModByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+class LModByConstI FINAL : public LTemplateInstruction<1, 1, 2> {
  public:
   LModByConstI(LOperand* dividend,
                int32_t divisor,
@@ -666,7 +698,7 @@
 };
 
 
-class LModI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LModI FINAL : public LTemplateInstruction<1, 2, 1> {
  public:
   LModI(LOperand* left, LOperand* right, LOperand* temp) {
     inputs_[0] = left;
@@ -683,7 +715,7 @@
 };
 
 
-class LDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDivByPowerOf2I FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   LDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
     inputs_[0] = dividend;
@@ -701,7 +733,7 @@
 };
 
 
-class LDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+class LDivByConstI FINAL : public LTemplateInstruction<1, 1, 2> {
  public:
   LDivByConstI(LOperand* dividend,
                int32_t divisor,
@@ -726,7 +758,7 @@
 };
 
 
-class LDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LDivI FINAL : public LTemplateInstruction<1, 2, 1> {
  public:
   LDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
     inputs_[0] = dividend;
@@ -743,7 +775,7 @@
 };
 
 
-class LFlooringDivByPowerOf2I V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LFlooringDivByPowerOf2I FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
     inputs_[0] = dividend;
@@ -762,7 +794,7 @@
 };
 
 
-class LFlooringDivByConstI V8_FINAL : public LTemplateInstruction<1, 1, 3> {
+class LFlooringDivByConstI FINAL : public LTemplateInstruction<1, 1, 3> {
  public:
   LFlooringDivByConstI(LOperand* dividend,
                        int32_t divisor,
@@ -790,7 +822,7 @@
 };
 
 
-class LFlooringDivI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LFlooringDivI FINAL : public LTemplateInstruction<1, 2, 1> {
  public:
   LFlooringDivI(LOperand* dividend, LOperand* divisor, LOperand* temp) {
     inputs_[0] = dividend;
@@ -807,7 +839,7 @@
 };
 
 
-class LMulI V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LMulI FINAL : public LTemplateInstruction<1, 2, 1> {
  public:
   LMulI(LOperand* left, LOperand* right, LOperand* temp) {
     inputs_[0] = left;
@@ -824,7 +856,7 @@
 };
 
 
-class LCompareNumericAndBranch V8_FINAL : public LControlInstruction<2, 0> {
+class LCompareNumericAndBranch FINAL : public LControlInstruction<2, 0> {
  public:
   LCompareNumericAndBranch(LOperand* left, LOperand* right) {
     inputs_[0] = left;
@@ -847,7 +879,7 @@
 };
 
 
-class LMathFloor V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathFloor FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LMathFloor(LOperand* value) {
     inputs_[0] = value;
@@ -860,7 +892,7 @@
 };
 
 
-class LMathRound V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathRound FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LMathRound(LOperand* value) {
     inputs_[0] = value;
@@ -873,7 +905,17 @@
 };
 
 
-class LMathAbs V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LMathFround FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathFround(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathFround, "math-fround")
+};
+
+
+class LMathAbs FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LMathAbs(LOperand* context, LOperand* value) {
     inputs_[1] = context;
@@ -888,7 +930,7 @@
 };
 
 
-class LMathLog V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathLog FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LMathLog(LOperand* value) {
     inputs_[0] = value;
@@ -900,7 +942,7 @@
 };
 
 
-class LMathClz32 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathClz32 FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LMathClz32(LOperand* value) {
     inputs_[0] = value;
@@ -912,7 +954,7 @@
 };
 
 
-class LMathExp V8_FINAL : public LTemplateInstruction<1, 1, 2> {
+class LMathExp FINAL : public LTemplateInstruction<1, 1, 2> {
  public:
   LMathExp(LOperand* value,
            LOperand* temp1,
@@ -931,33 +973,35 @@
 };
 
 
-class LMathSqrt V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMathSqrt FINAL : public LTemplateInstruction<1, 1, 2> {
  public:
-  explicit LMathSqrt(LOperand* value) {
+  explicit LMathSqrt(LOperand* value,
+                     LOperand* temp1,
+                     LOperand* temp2) {
     inputs_[0] = value;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
   }
 
   LOperand* value() { return inputs_[0]; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
 
   DECLARE_CONCRETE_INSTRUCTION(MathSqrt, "math-sqrt")
 };
 
 
-class LMathPowHalf V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LMathPowHalf FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
-  LMathPowHalf(LOperand* value, LOperand* temp) {
-    inputs_[0] = value;
-    temps_[0] = temp;
-  }
+  explicit LMathPowHalf(LOperand* value) { inputs_[0] = value; }
 
   LOperand* value() { return inputs_[0]; }
-  LOperand* temp() { return temps_[0]; }
 
   DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half")
 };
 
 
-class LCmpObjectEqAndBranch V8_FINAL : public LControlInstruction<2, 0> {
+class LCmpObjectEqAndBranch FINAL : public LControlInstruction<2, 0> {
  public:
   LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
     inputs_[0] = left;
@@ -971,7 +1015,7 @@
 };
 
 
-class LCmpHoleAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LCmpHoleAndBranch FINAL : public LControlInstruction<1, 0> {
  public:
   explicit LCmpHoleAndBranch(LOperand* object) {
     inputs_[0] = object;
@@ -984,15 +1028,11 @@
 };
 
 
-class LCompareMinusZeroAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LCompareMinusZeroAndBranch FINAL : public LControlInstruction<1, 0> {
  public:
-  LCompareMinusZeroAndBranch(LOperand* value, LOperand* temp) {
-    inputs_[0] = value;
-    temps_[0] = temp;
-  }
+  explicit LCompareMinusZeroAndBranch(LOperand* value) { inputs_[0] = value; }
 
   LOperand* value() { return inputs_[0]; }
-  LOperand* temp() { return temps_[0]; }
 
   DECLARE_CONCRETE_INSTRUCTION(CompareMinusZeroAndBranch,
                                "cmp-minus-zero-and-branch")
@@ -1000,7 +1040,7 @@
 };
 
 
-class LIsObjectAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LIsObjectAndBranch FINAL : public LControlInstruction<1, 1> {
  public:
   LIsObjectAndBranch(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
@@ -1012,11 +1052,11 @@
 
   DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LIsStringAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LIsStringAndBranch FINAL : public LControlInstruction<1, 1> {
  public:
   LIsStringAndBranch(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
@@ -1029,11 +1069,11 @@
   DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LIsSmiAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LIsSmiAndBranch FINAL : public LControlInstruction<1, 0> {
  public:
   explicit LIsSmiAndBranch(LOperand* value) {
     inputs_[0] = value;
@@ -1044,11 +1084,11 @@
   DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LIsUndetectableAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LIsUndetectableAndBranch FINAL : public LControlInstruction<1, 1> {
  public:
   LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
@@ -1062,11 +1102,11 @@
                                "is-undetectable-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LStringCompareAndBranch V8_FINAL : public LControlInstruction<3, 0> {
+class LStringCompareAndBranch FINAL : public LControlInstruction<3, 0> {
  public:
   LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) {
     inputs_[0] = context;
@@ -1082,13 +1122,13 @@
                                "string-compare-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 
   Token::Value op() const { return hydrogen()->token(); }
 };
 
 
-class LHasInstanceTypeAndBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LHasInstanceTypeAndBranch FINAL : public LControlInstruction<1, 1> {
  public:
   LHasInstanceTypeAndBranch(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
@@ -1102,11 +1142,11 @@
                                "has-instance-type-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LGetCachedArrayIndex V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LGetCachedArrayIndex FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LGetCachedArrayIndex(LOperand* value) {
     inputs_[0] = value;
@@ -1119,7 +1159,7 @@
 };
 
 
-class LHasCachedArrayIndexAndBranch V8_FINAL
+class LHasCachedArrayIndexAndBranch FINAL
     : public LControlInstruction<1, 0> {
  public:
   explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
@@ -1131,11 +1171,11 @@
   DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
                                "has-cached-array-index-and-branch")
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LIsConstructCallAndBranch V8_FINAL : public LControlInstruction<0, 1> {
+class LIsConstructCallAndBranch FINAL : public LControlInstruction<0, 1> {
  public:
   explicit LIsConstructCallAndBranch(LOperand* temp) {
     temps_[0] = temp;
@@ -1148,7 +1188,7 @@
 };
 
 
-class LClassOfTestAndBranch V8_FINAL : public LControlInstruction<1, 2> {
+class LClassOfTestAndBranch FINAL : public LControlInstruction<1, 2> {
  public:
   LClassOfTestAndBranch(LOperand* value, LOperand* temp, LOperand* temp2) {
     inputs_[0] = value;
@@ -1164,11 +1204,11 @@
                                "class-of-test-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LCmpT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LCmpT FINAL : public LTemplateInstruction<1, 3, 0> {
  public:
   LCmpT(LOperand* context, LOperand* left, LOperand* right) {
     inputs_[0] = context;
@@ -1184,7 +1224,7 @@
 };
 
 
-class LInstanceOf V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LInstanceOf FINAL : public LTemplateInstruction<1, 3, 0> {
  public:
   LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
     inputs_[0] = context;
@@ -1198,7 +1238,7 @@
 };
 
 
-class LInstanceOfKnownGlobal V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LInstanceOfKnownGlobal FINAL : public LTemplateInstruction<1, 2, 1> {
  public:
   LInstanceOfKnownGlobal(LOperand* context, LOperand* value, LOperand* temp) {
     inputs_[0] = context;
@@ -1219,7 +1259,7 @@
     return lazy_deopt_env_;
   }
   virtual void SetDeferredLazyDeoptimizationEnvironment(
-      LEnvironment* env) V8_OVERRIDE {
+      LEnvironment* env) OVERRIDE {
     lazy_deopt_env_ = env;
   }
 
@@ -1228,7 +1268,7 @@
 };
 
 
-class LBoundsCheck V8_FINAL : public LTemplateInstruction<0, 2, 0> {
+class LBoundsCheck FINAL : public LTemplateInstruction<0, 2, 0> {
  public:
   LBoundsCheck(LOperand* index, LOperand* length) {
     inputs_[0] = index;
@@ -1243,7 +1283,7 @@
 };
 
 
-class LBitI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LBitI FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LBitI(LOperand* left, LOperand* right) {
     inputs_[0] = left;
@@ -1260,7 +1300,7 @@
 };
 
 
-class LShiftI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LShiftI FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
       : op_(op), can_deopt_(can_deopt) {
@@ -1282,7 +1322,7 @@
 };
 
 
-class LSubI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LSubI FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LSubI(LOperand* left, LOperand* right) {
     inputs_[0] = left;
@@ -1297,7 +1337,7 @@
 };
 
 
-class LConstantI V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LConstantI FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
   DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1306,7 +1346,7 @@
 };
 
 
-class LConstantS V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LConstantS FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
   DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1315,14 +1355,8 @@
 };
 
 
-class LConstantD V8_FINAL : public LTemplateInstruction<1, 0, 1> {
+class LConstantD FINAL : public LTemplateInstruction<1, 0, 1> {
  public:
-  explicit LConstantD(LOperand* temp) {
-    temps_[0] = temp;
-  }
-
-  LOperand* temp() { return temps_[0]; }
-
   DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
   DECLARE_HYDROGEN_ACCESSOR(Constant)
 
@@ -1330,7 +1364,7 @@
 };
 
 
-class LConstantE V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LConstantE FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e")
   DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1341,7 +1375,7 @@
 };
 
 
-class LConstantT V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LConstantT FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
   DECLARE_HYDROGEN_ACCESSOR(Constant)
@@ -1352,7 +1386,7 @@
 };
 
 
-class LBranch V8_FINAL : public LControlInstruction<1, 1> {
+class LBranch FINAL : public LControlInstruction<1, 1> {
  public:
   LBranch(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
@@ -1365,11 +1399,11 @@
   DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
   DECLARE_HYDROGEN_ACCESSOR(Branch)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LCmpMapAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LCmpMapAndBranch FINAL : public LControlInstruction<1, 0> {
  public:
   explicit LCmpMapAndBranch(LOperand* value) {
     inputs_[0] = value;
@@ -1384,7 +1418,7 @@
 };
 
 
-class LMapEnumLength V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LMapEnumLength FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LMapEnumLength(LOperand* value) {
     inputs_[0] = value;
@@ -1396,7 +1430,7 @@
 };
 
 
-class LDateField V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LDateField FINAL : public LTemplateInstruction<1, 1, 1> {
  public:
   LDateField(LOperand* date, LOperand* temp, Smi* index)
       : index_(index) {
@@ -1417,7 +1451,7 @@
 };
 
 
-class LSeqStringGetChar V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LSeqStringGetChar FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LSeqStringGetChar(LOperand* string, LOperand* index) {
     inputs_[0] = string;
@@ -1432,7 +1466,7 @@
 };
 
 
-class LSeqStringSetChar V8_FINAL : public LTemplateInstruction<1, 4, 0> {
+class LSeqStringSetChar FINAL : public LTemplateInstruction<1, 4, 0> {
  public:
   LSeqStringSetChar(LOperand* context,
                     LOperand* string,
@@ -1453,7 +1487,7 @@
 };
 
 
-class LAddI V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LAddI FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LAddI(LOperand* left, LOperand* right) {
     inputs_[0] = left;
@@ -1473,22 +1507,24 @@
 };
 
 
-class LMathMinMax V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LMathMinMax FINAL : public LTemplateInstruction<1, 2, 1> {
  public:
-  LMathMinMax(LOperand* left, LOperand* right) {
+  LMathMinMax(LOperand* left, LOperand* right, LOperand* temp) {
     inputs_[0] = left;
     inputs_[1] = right;
+    temps_[0] = temp;
   }
 
   LOperand* left() { return inputs_[0]; }
   LOperand* right() { return inputs_[1]; }
+  LOperand* temp() { return temps_[0]; }
 
   DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "math-min-max")
   DECLARE_HYDROGEN_ACCESSOR(MathMinMax)
 };
 
 
-class LPower V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LPower FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LPower(LOperand* left, LOperand* right) {
     inputs_[0] = left;
@@ -1503,7 +1539,7 @@
 };
 
 
-class LArithmeticD V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LArithmeticD FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LArithmeticD(Token::Value op, LOperand* left, LOperand* right)
       : op_(op) {
@@ -1516,18 +1552,18 @@
 
   Token::Value op() const { return op_; }
 
-  virtual Opcode opcode() const V8_OVERRIDE {
+  virtual Opcode opcode() const OVERRIDE {
     return LInstruction::kArithmeticD;
   }
-  virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
-  virtual const char* Mnemonic() const V8_OVERRIDE;
+  virtual void CompileToNative(LCodeGen* generator) OVERRIDE;
+  virtual const char* Mnemonic() const OVERRIDE;
 
  private:
   Token::Value op_;
 };
 
 
-class LArithmeticT V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LArithmeticT FINAL : public LTemplateInstruction<1, 3, 0> {
  public:
   LArithmeticT(Token::Value op,
                LOperand* context,
@@ -1543,11 +1579,11 @@
   LOperand* left() { return inputs_[1]; }
   LOperand* right() { return inputs_[2]; }
 
-  virtual Opcode opcode() const V8_OVERRIDE {
+  virtual Opcode opcode() const OVERRIDE {
     return LInstruction::kArithmeticT;
   }
-  virtual void CompileToNative(LCodeGen* generator) V8_OVERRIDE;
-  virtual const char* Mnemonic() const V8_OVERRIDE;
+  virtual void CompileToNative(LCodeGen* generator) OVERRIDE;
+  virtual const char* Mnemonic() const OVERRIDE;
 
   Token::Value op() const { return op_; }
 
@@ -1556,7 +1592,7 @@
 };
 
 
-class LReturn V8_FINAL : public LTemplateInstruction<0, 3, 0> {
+class LReturn FINAL : public LTemplateInstruction<0, 3, 0> {
  public:
   explicit LReturn(LOperand* value,
                    LOperand* context,
@@ -1570,7 +1606,7 @@
     return parameter_count()->IsConstantOperand();
   }
   LConstantOperand* constant_parameter_count() {
-    ASSERT(has_constant_parameter_count());
+    DCHECK(has_constant_parameter_count());
     return LConstantOperand::cast(parameter_count());
   }
   LOperand* parameter_count() { return inputs_[2]; }
@@ -1580,7 +1616,7 @@
 };
 
 
-class LLoadNamedField V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LLoadNamedField FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LLoadNamedField(LOperand* object) {
     inputs_[0] = object;
@@ -1593,15 +1629,17 @@
 };
 
 
-class LLoadNamedGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LLoadNamedGeneric FINAL : public LTemplateInstruction<1, 2, 1> {
  public:
-  LLoadNamedGeneric(LOperand* context, LOperand* object) {
+  LLoadNamedGeneric(LOperand* context, LOperand* object, LOperand* vector) {
     inputs_[0] = context;
     inputs_[1] = object;
+    temps_[0] = vector;
   }
 
   LOperand* context() { return inputs_[0]; }
   LOperand* object() { return inputs_[1]; }
+  LOperand* temp_vector() { return temps_[0]; }
 
   DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
   DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
@@ -1610,7 +1648,7 @@
 };
 
 
-class LLoadFunctionPrototype V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LLoadFunctionPrototype FINAL : public LTemplateInstruction<1, 1, 1> {
  public:
   LLoadFunctionPrototype(LOperand* function, LOperand* temp) {
     inputs_[0] = function;
@@ -1625,7 +1663,7 @@
 };
 
 
-class LLoadRoot V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LLoadRoot FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
   DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
@@ -1634,7 +1672,7 @@
 };
 
 
-class LLoadKeyed V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LLoadKeyed FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LLoadKeyed(LOperand* elements, LOperand* key) {
     inputs_[0] = elements;
@@ -1658,7 +1696,7 @@
   DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
   DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
   uint32_t base_offset() const { return hydrogen()->base_offset(); }
   bool key_is_smi() {
     return hydrogen()->key()->representation().IsTagged();
@@ -1682,38 +1720,45 @@
 }
 
 
-class LLoadKeyedGeneric V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LLoadKeyedGeneric FINAL : public LTemplateInstruction<1, 3, 1> {
  public:
-  LLoadKeyedGeneric(LOperand* context, LOperand* obj, LOperand* key) {
+  LLoadKeyedGeneric(LOperand* context, LOperand* obj, LOperand* key,
+                    LOperand* vector) {
     inputs_[0] = context;
     inputs_[1] = obj;
     inputs_[2] = key;
+    temps_[0] = vector;
   }
 
   LOperand* context() { return inputs_[0]; }
   LOperand* object() { return inputs_[1]; }
   LOperand* key() { return inputs_[2]; }
+  LOperand* temp_vector() { return temps_[0]; }
 
   DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
+  DECLARE_HYDROGEN_ACCESSOR(LoadKeyedGeneric)
 };
 
 
-class LLoadGlobalCell V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LLoadGlobalCell FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
   DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
 };
 
 
-class LLoadGlobalGeneric V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LLoadGlobalGeneric FINAL : public LTemplateInstruction<1, 2, 1> {
  public:
-  LLoadGlobalGeneric(LOperand* context, LOperand* global_object) {
+  LLoadGlobalGeneric(LOperand* context, LOperand* global_object,
+                     LOperand* vector) {
     inputs_[0] = context;
     inputs_[1] = global_object;
+    temps_[0] = vector;
   }
 
   LOperand* context() { return inputs_[0]; }
   LOperand* global_object() { return inputs_[1]; }
+  LOperand* temp_vector() { return temps_[0]; }
 
   DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
   DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
@@ -1723,7 +1768,7 @@
 };
 
 
-class LStoreGlobalCell V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LStoreGlobalCell FINAL : public LTemplateInstruction<0, 1, 0> {
  public:
   explicit LStoreGlobalCell(LOperand* value) {
     inputs_[0] = value;
@@ -1736,7 +1781,7 @@
 };
 
 
-class LLoadContextSlot V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LLoadContextSlot FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LLoadContextSlot(LOperand* context) {
     inputs_[0] = context;
@@ -1749,11 +1794,11 @@
 
   int slot_index() { return hydrogen()->slot_index(); }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LStoreContextSlot V8_FINAL : public LTemplateInstruction<0, 2, 1> {
+class LStoreContextSlot FINAL : public LTemplateInstruction<0, 2, 1> {
  public:
   LStoreContextSlot(LOperand* context, LOperand* value, LOperand* temp) {
     inputs_[0] = context;
@@ -1770,11 +1815,11 @@
 
   int slot_index() { return hydrogen()->slot_index(); }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LPushArgument V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LPushArgument FINAL : public LTemplateInstruction<0, 1, 0> {
  public:
   explicit LPushArgument(LOperand* value) {
     inputs_[0] = value;
@@ -1786,7 +1831,7 @@
 };
 
 
-class LDrop V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LDrop FINAL : public LTemplateInstruction<0, 0, 0> {
  public:
   explicit LDrop(int count) : count_(count) { }
 
@@ -1799,7 +1844,7 @@
 };
 
 
-class LStoreCodeEntry V8_FINAL: public LTemplateInstruction<0, 2, 0> {
+class LStoreCodeEntry FINAL: public LTemplateInstruction<0, 2, 0> {
  public:
   LStoreCodeEntry(LOperand* function, LOperand* code_object) {
     inputs_[0] = function;
@@ -1816,7 +1861,7 @@
 };
 
 
-class LInnerAllocatedObject V8_FINAL: public LTemplateInstruction<1, 2, 0> {
+class LInnerAllocatedObject FINAL: public LTemplateInstruction<1, 2, 0> {
  public:
   LInnerAllocatedObject(LOperand* base_object, LOperand* offset) {
     inputs_[0] = base_object;
@@ -1832,21 +1877,21 @@
 };
 
 
-class LThisFunction V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LThisFunction FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
   DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
 };
 
 
-class LContext V8_FINAL : public LTemplateInstruction<1, 0, 0> {
+class LContext FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
   DECLARE_CONCRETE_INSTRUCTION(Context, "context")
   DECLARE_HYDROGEN_ACCESSOR(Context)
 };
 
 
-class LDeclareGlobals V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LDeclareGlobals FINAL : public LTemplateInstruction<0, 1, 0> {
  public:
   explicit LDeclareGlobals(LOperand* context) {
     inputs_[0] = context;
@@ -1859,7 +1904,7 @@
 };
 
 
-class LCallJSFunction V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallJSFunction FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LCallJSFunction(LOperand* function) {
     inputs_[0] = function;
@@ -1870,44 +1915,44 @@
   DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
   DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 };
 
 
-class LCallWithDescriptor V8_FINAL : public LTemplateResultInstruction<1> {
+class LCallWithDescriptor FINAL : public LTemplateResultInstruction<1> {
  public:
-  LCallWithDescriptor(const CallInterfaceDescriptor* descriptor,
-                      const ZoneList<LOperand*>& operands,
-                      Zone* zone)
-    : inputs_(descriptor->environment_length() + 1, zone) {
-    ASSERT(descriptor->environment_length() + 1 == operands.length());
+  LCallWithDescriptor(CallInterfaceDescriptor descriptor,
+                      const ZoneList<LOperand*>& operands, Zone* zone)
+      : inputs_(descriptor.GetRegisterParameterCount() + 1, zone) {
+    DCHECK(descriptor.GetRegisterParameterCount() + 1 == operands.length());
     inputs_.AddAll(operands, zone);
   }
 
   LOperand* target() const { return inputs_[0]; }
 
- private:
-  DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
   DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+ private:
+  DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
+
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 
   ZoneList<LOperand*> inputs_;
 
   // Iterator support.
-  virtual int InputCount() V8_FINAL V8_OVERRIDE { return inputs_.length(); }
-  virtual LOperand* InputAt(int i) V8_FINAL V8_OVERRIDE { return inputs_[i]; }
+  virtual int InputCount() FINAL OVERRIDE { return inputs_.length(); }
+  virtual LOperand* InputAt(int i) FINAL OVERRIDE { return inputs_[i]; }
 
-  virtual int TempCount() V8_FINAL V8_OVERRIDE { return 0; }
-  virtual LOperand* TempAt(int i) V8_FINAL V8_OVERRIDE { return NULL; }
+  virtual int TempCount() FINAL OVERRIDE { return 0; }
+  virtual LOperand* TempAt(int i) FINAL OVERRIDE { return NULL; }
 };
 
 
-class LInvokeFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LInvokeFunction FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LInvokeFunction(LOperand* context, LOperand* function) {
     inputs_[0] = context;
@@ -1920,13 +1965,13 @@
   DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
   DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 };
 
 
-class LCallFunction V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCallFunction FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   explicit LCallFunction(LOperand* context, LOperand* function) {
     inputs_[0] = context;
@@ -1943,7 +1988,7 @@
 };
 
 
-class LCallNew V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCallNew FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LCallNew(LOperand* context, LOperand* constructor) {
     inputs_[0] = context;
@@ -1956,13 +2001,13 @@
   DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
   DECLARE_HYDROGEN_ACCESSOR(CallNew)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 };
 
 
-class LCallNewArray V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LCallNewArray FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LCallNewArray(LOperand* context, LOperand* constructor) {
     inputs_[0] = context;
@@ -1975,13 +2020,13 @@
   DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
   DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 };
 
 
-class LCallRuntime V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCallRuntime FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LCallRuntime(LOperand* context) {
     inputs_[0] = context;
@@ -1992,16 +2037,17 @@
   DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
   DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
 
-  virtual bool ClobbersDoubleRegisters(Isolate* isolate) const V8_OVERRIDE {
-    return true;
+  virtual bool ClobbersDoubleRegisters(Isolate* isolate) const OVERRIDE {
+    return save_doubles() == kDontSaveFPRegs;
   }
 
   const Runtime::Function* function() const { return hydrogen()->function(); }
   int arity() const { return hydrogen()->argument_count(); }
+  SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); }
 };
 
 
-class LInteger32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LInteger32ToDouble FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LInteger32ToDouble(LOperand* value) {
     inputs_[0] = value;
@@ -2013,7 +2059,7 @@
 };
 
 
-class LUint32ToDouble V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LUint32ToDouble FINAL : public LTemplateInstruction<1, 1, 1> {
  public:
   explicit LUint32ToDouble(LOperand* value) {
     inputs_[0] = value;
@@ -2025,7 +2071,7 @@
 };
 
 
-class LNumberTagI V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LNumberTagI FINAL : public LTemplateInstruction<1, 1, 1> {
  public:
   LNumberTagI(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
@@ -2039,7 +2085,7 @@
 };
 
 
-class LNumberTagU V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LNumberTagU FINAL : public LTemplateInstruction<1, 1, 1> {
  public:
   LNumberTagU(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
@@ -2053,7 +2099,7 @@
 };
 
 
-class LNumberTagD V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LNumberTagD FINAL : public LTemplateInstruction<1, 1, 1> {
  public:
   LNumberTagD(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
@@ -2069,7 +2115,7 @@
 
 
 // Sometimes truncating conversion from a tagged value to an int32.
-class LDoubleToI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDoubleToI FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LDoubleToI(LOperand* value) {
     inputs_[0] = value;
@@ -2084,7 +2130,7 @@
 };
 
 
-class LDoubleToSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDoubleToSmi FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LDoubleToSmi(LOperand* value) {
     inputs_[0] = value;
@@ -2098,7 +2144,7 @@
 
 
 // Truncating conversion from a tagged value to an int32.
-class LTaggedToI V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LTaggedToI FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LTaggedToI(LOperand* value) {
     inputs_[0] = value;
@@ -2113,7 +2159,7 @@
 };
 
 
-class LSmiTag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LSmiTag FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LSmiTag(LOperand* value) {
     inputs_[0] = value;
@@ -2126,7 +2172,7 @@
 };
 
 
-class LNumberUntagD V8_FINAL : public LTemplateInstruction<1, 1, 1> {
+class LNumberUntagD FINAL : public LTemplateInstruction<1, 1, 1> {
  public:
   explicit LNumberUntagD(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
@@ -2141,7 +2187,7 @@
 };
 
 
-class LSmiUntag V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LSmiUntag FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   LSmiUntag(LOperand* value, bool needs_check)
       : needs_check_(needs_check) {
@@ -2159,7 +2205,7 @@
 };
 
 
-class LStoreNamedField V8_FINAL : public LTemplateInstruction<0, 2, 2> {
+class LStoreNamedField FINAL : public LTemplateInstruction<0, 2, 2> {
  public:
   LStoreNamedField(LOperand* obj,
                    LOperand* val,
@@ -2179,11 +2225,11 @@
   DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
   DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LStoreNamedGeneric V8_FINAL : public LTemplateInstruction<0, 3, 0> {
+class LStoreNamedGeneric FINAL : public LTemplateInstruction<0, 3, 0> {
  public:
   LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value) {
     inputs_[0] = context;
@@ -2198,13 +2244,13 @@
   DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
   DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
   Handle<Object> name() const { return hydrogen()->name(); }
   StrictMode strict_mode() { return hydrogen()->strict_mode(); }
 };
 
 
-class LStoreKeyed V8_FINAL : public LTemplateInstruction<0, 3, 0> {
+class LStoreKeyed FINAL : public LTemplateInstruction<0, 3, 0> {
  public:
   LStoreKeyed(LOperand* obj, LOperand* key, LOperand* val) {
     inputs_[0] = obj;
@@ -2229,13 +2275,13 @@
   DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
   DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
   uint32_t base_offset() const { return hydrogen()->base_offset(); }
   bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
 };
 
 
-class LStoreKeyedGeneric V8_FINAL : public LTemplateInstruction<0, 4, 0> {
+class LStoreKeyedGeneric FINAL : public LTemplateInstruction<0, 4, 0> {
  public:
   LStoreKeyedGeneric(LOperand* context,
                      LOperand* object,
@@ -2255,13 +2301,13 @@
   DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
   DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 
   StrictMode strict_mode() { return hydrogen()->strict_mode(); }
 };
 
 
-class LTransitionElementsKind V8_FINAL : public LTemplateInstruction<0, 2, 2> {
+class LTransitionElementsKind FINAL : public LTemplateInstruction<0, 2, 2> {
  public:
   LTransitionElementsKind(LOperand* object,
                           LOperand* context,
@@ -2282,7 +2328,7 @@
                                "transition-elements-kind")
   DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 
   Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
   Handle<Map> transitioned_map() {
@@ -2293,7 +2339,7 @@
 };
 
 
-class LTrapAllocationMemento V8_FINAL  : public LTemplateInstruction<0, 1, 1> {
+class LTrapAllocationMemento FINAL  : public LTemplateInstruction<0, 1, 1> {
  public:
   LTrapAllocationMemento(LOperand* object,
                          LOperand* temp) {
@@ -2309,7 +2355,7 @@
 };
 
 
-class LStringAdd V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LStringAdd FINAL : public LTemplateInstruction<1, 3, 0> {
  public:
   LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
     inputs_[0] = context;
@@ -2326,7 +2372,7 @@
 };
 
 
-class LStringCharCodeAt V8_FINAL : public LTemplateInstruction<1, 3, 0> {
+class LStringCharCodeAt FINAL : public LTemplateInstruction<1, 3, 0> {
  public:
   LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
     inputs_[0] = context;
@@ -2343,7 +2389,7 @@
 };
 
 
-class LStringCharFromCode V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LStringCharFromCode FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LStringCharFromCode(LOperand* context, LOperand* char_code) {
     inputs_[0] = context;
@@ -2358,7 +2404,7 @@
 };
 
 
-class LCheckValue V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LCheckValue FINAL : public LTemplateInstruction<0, 1, 0> {
  public:
   explicit LCheckValue(LOperand* value) {
     inputs_[0] = value;
@@ -2371,7 +2417,7 @@
 };
 
 
-class LCheckInstanceType V8_FINAL : public LTemplateInstruction<0, 1, 1> {
+class LCheckInstanceType FINAL : public LTemplateInstruction<0, 1, 1> {
  public:
   LCheckInstanceType(LOperand* value, LOperand* temp) {
     inputs_[0] = value;
@@ -2386,7 +2432,7 @@
 };
 
 
-class LCheckMaps V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LCheckMaps FINAL : public LTemplateInstruction<0, 1, 0> {
  public:
   explicit LCheckMaps(LOperand* value = NULL) {
     inputs_[0] = value;
@@ -2399,7 +2445,7 @@
 };
 
 
-class LCheckSmi V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LCheckSmi FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LCheckSmi(LOperand* value) {
     inputs_[0] = value;
@@ -2411,7 +2457,7 @@
 };
 
 
-class LClampDToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LClampDToUint8 FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LClampDToUint8(LOperand* value) {
     inputs_[0] = value;
@@ -2423,7 +2469,7 @@
 };
 
 
-class LClampIToUint8 V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LClampIToUint8 FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LClampIToUint8(LOperand* value) {
     inputs_[0] = value;
@@ -2436,7 +2482,7 @@
 
 
 // Truncating conversion from a tagged value to an int32.
-class LClampTToUint8NoSSE2 V8_FINAL : public LTemplateInstruction<1, 1, 3> {
+class LClampTToUint8NoSSE2 FINAL : public LTemplateInstruction<1, 1, 3> {
  public:
   LClampTToUint8NoSSE2(LOperand* unclamped,
                        LOperand* temp1,
@@ -2459,7 +2505,7 @@
 };
 
 
-class LCheckNonSmi V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LCheckNonSmi FINAL : public LTemplateInstruction<0, 1, 0> {
  public:
   explicit LCheckNonSmi(LOperand* value) {
     inputs_[0] = value;
@@ -2472,7 +2518,7 @@
 };
 
 
-class LDoubleBits V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LDoubleBits FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LDoubleBits(LOperand* value) {
     inputs_[0] = value;
@@ -2485,7 +2531,7 @@
 };
 
 
-class LConstructDouble V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LConstructDouble FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LConstructDouble(LOperand* hi, LOperand* lo) {
     inputs_[0] = hi;
@@ -2499,7 +2545,7 @@
 };
 
 
-class LAllocate V8_FINAL : public LTemplateInstruction<1, 2, 1> {
+class LAllocate FINAL : public LTemplateInstruction<1, 2, 1> {
  public:
   LAllocate(LOperand* context, LOperand* size, LOperand* temp) {
     inputs_[0] = context;
@@ -2516,7 +2562,7 @@
 };
 
 
-class LRegExpLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LRegExpLiteral FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LRegExpLiteral(LOperand* context) {
     inputs_[0] = context;
@@ -2529,7 +2575,7 @@
 };
 
 
-class LFunctionLiteral V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LFunctionLiteral FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LFunctionLiteral(LOperand* context) {
     inputs_[0] = context;
@@ -2542,7 +2588,7 @@
 };
 
 
-class LToFastProperties V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LToFastProperties FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LToFastProperties(LOperand* value) {
     inputs_[0] = value;
@@ -2555,7 +2601,7 @@
 };
 
 
-class LTypeof V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LTypeof FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LTypeof(LOperand* context, LOperand* value) {
     inputs_[0] = context;
@@ -2569,7 +2615,7 @@
 };
 
 
-class LTypeofIsAndBranch V8_FINAL : public LControlInstruction<1, 0> {
+class LTypeofIsAndBranch FINAL : public LControlInstruction<1, 0> {
  public:
   explicit LTypeofIsAndBranch(LOperand* value) {
     inputs_[0] = value;
@@ -2582,20 +2628,20 @@
 
   Handle<String> type_literal() { return hydrogen()->type_literal(); }
 
-  virtual void PrintDataTo(StringStream* stream) V8_OVERRIDE;
+  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
-class LOsrEntry V8_FINAL : public LTemplateInstruction<0, 0, 0> {
+class LOsrEntry FINAL : public LTemplateInstruction<0, 0, 0> {
  public:
-  virtual bool HasInterestingComment(LCodeGen* gen) const V8_OVERRIDE {
+  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
     return false;
   }
   DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
 };
 
 
-class LStackCheck V8_FINAL : public LTemplateInstruction<0, 1, 0> {
+class LStackCheck FINAL : public LTemplateInstruction<0, 1, 0> {
  public:
   explicit LStackCheck(LOperand* context) {
     inputs_[0] = context;
@@ -2613,7 +2659,7 @@
 };
 
 
-class LForInPrepareMap V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LForInPrepareMap FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LForInPrepareMap(LOperand* context, LOperand* object) {
     inputs_[0] = context;
@@ -2627,7 +2673,7 @@
 };
 
 
-class LForInCacheArray V8_FINAL : public LTemplateInstruction<1, 1, 0> {
+class LForInCacheArray FINAL : public LTemplateInstruction<1, 1, 0> {
  public:
   explicit LForInCacheArray(LOperand* map) {
     inputs_[0] = map;
@@ -2643,7 +2689,7 @@
 };
 
 
-class LCheckMapValue V8_FINAL : public LTemplateInstruction<0, 2, 0> {
+class LCheckMapValue FINAL : public LTemplateInstruction<0, 2, 0> {
  public:
   LCheckMapValue(LOperand* value, LOperand* map) {
     inputs_[0] = value;
@@ -2657,7 +2703,7 @@
 };
 
 
-class LLoadFieldByIndex V8_FINAL : public LTemplateInstruction<1, 2, 0> {
+class LLoadFieldByIndex FINAL : public LTemplateInstruction<1, 2, 0> {
  public:
   LLoadFieldByIndex(LOperand* object, LOperand* index) {
     inputs_[0] = object;
@@ -2701,7 +2747,7 @@
 
 
 class LChunkBuilder;
-class LPlatformChunk V8_FINAL : public LChunk {
+class LPlatformChunk FINAL : public LChunk {
  public:
   LPlatformChunk(CompilationInfo* info, HGraph* graph)
       : LChunk(info, graph),
@@ -2717,20 +2763,14 @@
 };
 
 
-class LChunkBuilder V8_FINAL : public LChunkBuilderBase {
+class LChunkBuilder FINAL : public LChunkBuilderBase {
  public:
   LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
-      : LChunkBuilderBase(graph->zone()),
-        chunk_(NULL),
-        info_(info),
-        graph_(graph),
-        status_(UNUSED),
+      : LChunkBuilderBase(info, graph),
         current_instruction_(NULL),
         current_block_(NULL),
         next_block_(NULL),
-        allocator_(allocator) { }
-
-  Isolate* isolate() const { return graph_->isolate(); }
+        allocator_(allocator) {}
 
   // Build the sequence for the graph.
   LPlatformChunk* Build();
@@ -2742,6 +2782,7 @@
 
   LInstruction* DoMathFloor(HUnaryMathOperation* instr);
   LInstruction* DoMathRound(HUnaryMathOperation* instr);
+  LInstruction* DoMathFround(HUnaryMathOperation* instr);
   LInstruction* DoMathAbs(HUnaryMathOperation* instr);
   LInstruction* DoMathLog(HUnaryMathOperation* instr);
   LInstruction* DoMathExp(HUnaryMathOperation* instr);
@@ -2759,24 +2800,6 @@
   LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr);
 
  private:
-  enum Status {
-    UNUSED,
-    BUILDING,
-    DONE,
-    ABORTED
-  };
-
-  LPlatformChunk* chunk() const { return chunk_; }
-  CompilationInfo* info() const { return info_; }
-  HGraph* graph() const { return graph_; }
-
-  bool is_unused() const { return status_ == UNUSED; }
-  bool is_building() const { return status_ == BUILDING; }
-  bool is_done() const { return status_ == DONE; }
-  bool is_aborted() const { return status_ == ABORTED; }
-
-  void Abort(BailoutReason reason);
-
   // Methods for getting operands for Use / Define / Temp.
   LUnallocated* ToUnallocated(Register reg);
   LUnallocated* ToUnallocated(X87Register reg);
@@ -2820,7 +2843,7 @@
 
   // An input operand in register, stack slot or a constant operand.
   // Will not be moved to a register even if one is freely available.
-  virtual MUST_USE_RESULT LOperand* UseAny(HValue* value) V8_OVERRIDE;
+  virtual MUST_USE_RESULT LOperand* UseAny(HValue* value) OVERRIDE;
 
   // Temporary operand that must be in a register.
   MUST_USE_RESULT LUnallocated* TempRegister();
@@ -2836,6 +2859,8 @@
   LInstruction* DefineSameAsFirst(LTemplateResultInstruction<1>* instr);
   LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr,
                             Register reg);
+  LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr,
+                            X87Register reg);
   LInstruction* DefineX87TOS(LTemplateResultInstruction<1>* instr);
   // Assigns an environment to an instruction.  An instruction which can
   // deoptimize must have an environment.
@@ -2868,10 +2893,6 @@
 
   LOperand* GetStoreKeyedValueOperand(HStoreKeyed* instr);
 
-  LPlatformChunk* chunk_;
-  CompilationInfo* info_;
-  HGraph* const graph_;
-  Status status_;
   HInstruction* current_instruction_;
   HBasicBlock* current_block_;
   HBasicBlock* next_block_;
diff --git a/src/x87/macro-assembler-x87.cc b/src/x87/macro-assembler-x87.cc
index 06bd774..a1fa331 100644
--- a/src/x87/macro-assembler-x87.cc
+++ b/src/x87/macro-assembler-x87.cc
@@ -6,6 +6,8 @@
 
 #if V8_TARGET_ARCH_X87
 
+#include "src/base/bits.h"
+#include "src/base/division-by-constant.h"
 #include "src/bootstrapper.h"
 #include "src/codegen.h"
 #include "src/cpu-profiler.h"
@@ -33,7 +35,7 @@
 
 
 void MacroAssembler::Load(Register dst, const Operand& src, Representation r) {
-  ASSERT(!r.IsDouble());
+  DCHECK(!r.IsDouble());
   if (r.IsInteger8()) {
     movsx_b(dst, src);
   } else if (r.IsUInteger8()) {
@@ -49,7 +51,7 @@
 
 
 void MacroAssembler::Store(Register src, const Operand& dst, Representation r) {
-  ASSERT(!r.IsDouble());
+  DCHECK(!r.IsDouble());
   if (r.IsInteger8() || r.IsUInteger8()) {
     mov_b(dst, src);
   } else if (r.IsInteger16() || r.IsUInteger16()) {
@@ -83,7 +85,7 @@
 void MacroAssembler::StoreRoot(Register source,
                                Register scratch,
                                Heap::RootListIndex index) {
-  ASSERT(Heap::RootCanBeWrittenAfterInitialization(index));
+  DCHECK(Heap::RootCanBeWrittenAfterInitialization(index));
   ExternalReference roots_array_start =
       ExternalReference::roots_array_start(isolate());
   mov(scratch, Immediate(index));
@@ -105,7 +107,7 @@
 
 
 void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) {
-  ASSERT(isolate()->heap()->RootCanBeTreatedAsConstant(index));
+  DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(index));
   Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
   cmp(with, value);
 }
@@ -113,7 +115,7 @@
 
 void MacroAssembler::CompareRoot(const Operand& with,
                                  Heap::RootListIndex index) {
-  ASSERT(isolate()->heap()->RootCanBeTreatedAsConstant(index));
+  DCHECK(isolate()->heap()->RootCanBeTreatedAsConstant(index));
   Handle<Object> value(&isolate()->heap()->roots_array_start()[index]);
   cmp(with, value);
 }
@@ -125,7 +127,7 @@
     Condition cc,
     Label* condition_met,
     Label::Distance condition_met_distance) {
-  ASSERT(cc == equal || cc == not_equal);
+  DCHECK(cc == equal || cc == not_equal);
   if (scratch.is(object)) {
     and_(scratch, Immediate(~Page::kPageAlignmentMask));
   } else {
@@ -133,8 +135,8 @@
     and_(scratch, object);
   }
   // Check that we can use a test_b.
-  ASSERT(MemoryChunk::IN_FROM_SPACE < 8);
-  ASSERT(MemoryChunk::IN_TO_SPACE < 8);
+  DCHECK(MemoryChunk::IN_FROM_SPACE < 8);
+  DCHECK(MemoryChunk::IN_TO_SPACE < 8);
   int mask = (1 << MemoryChunk::IN_FROM_SPACE)
            | (1 << MemoryChunk::IN_TO_SPACE);
   // If non-zero, the page belongs to new-space.
@@ -146,8 +148,7 @@
 
 void MacroAssembler::RememberedSetHelper(
     Register object,  // Only used for debug checks.
-    Register addr,
-    Register scratch,
+    Register addr, Register scratch, SaveFPRegsMode save_fp,
     MacroAssembler::RememberedSetFinalAction and_then) {
   Label done;
   if (emit_debug_code()) {
@@ -175,21 +176,45 @@
     ret(0);
     bind(&buffer_overflowed);
   } else {
-    ASSERT(and_then == kFallThroughAtEnd);
+    DCHECK(and_then == kFallThroughAtEnd);
     j(equal, &done, Label::kNear);
   }
-  StoreBufferOverflowStub store_buffer_overflow =
-      StoreBufferOverflowStub(isolate());
+  StoreBufferOverflowStub store_buffer_overflow(isolate(), save_fp);
   CallStub(&store_buffer_overflow);
   if (and_then == kReturnAtEnd) {
     ret(0);
   } else {
-    ASSERT(and_then == kFallThroughAtEnd);
+    DCHECK(and_then == kFallThroughAtEnd);
     bind(&done);
   }
 }
 
 
+void MacroAssembler::ClampTOSToUint8(Register result_reg) {
+  Label done, conv_failure;
+  sub(esp, Immediate(kPointerSize));
+  fnclex();
+  fist_s(Operand(esp, 0));
+  pop(result_reg);
+  X87CheckIA();
+  j(equal, &conv_failure, Label::kNear);
+  test(result_reg, Immediate(0xFFFFFF00));
+  j(zero, &done, Label::kNear);
+  setcc(sign, result_reg);
+  sub(result_reg, Immediate(1));
+  and_(result_reg, Immediate(255));
+  jmp(&done, Label::kNear);
+  bind(&conv_failure);
+  fnclex();
+  fldz();
+  fld(1);
+  FCmp();
+  setcc(below, result_reg);  // 1 if negative, 0 if positive.
+  dec_b(result_reg);         // 0 if negative, 255 if positive.
+  bind(&done);
+}
+
+
 void MacroAssembler::ClampUint8(Register reg) {
   Label done;
   test(reg, Immediate(0xFFFFFF00));
@@ -218,8 +243,8 @@
 
 void MacroAssembler::X87TOSToI(Register result_reg,
                                MinusZeroMode minus_zero_mode,
-                               Label* conversion_failed,
-                               Label::Distance dst) {
+                               Label* lost_precision, Label* is_nan,
+                               Label* minus_zero, Label::Distance dst) {
   Label done;
   sub(esp, Immediate(kPointerSize));
   fld(0);
@@ -227,8 +252,8 @@
   fild_s(MemOperand(esp, 0));
   pop(result_reg);
   FCmp();
-  j(not_equal, conversion_failed, dst);
-  j(parity_even, conversion_failed, dst);
+  j(not_equal, lost_precision, dst);
+  j(parity_even, is_nan, dst);
   if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
     test(result_reg, Operand(result_reg));
     j(not_zero, &done, Label::kNear);
@@ -238,7 +263,7 @@
     fst_s(MemOperand(esp, 0));
     pop(result_reg);
     test(result_reg, Operand(result_reg));
-    j(not_zero, conversion_failed, dst);
+    j(not_zero, minus_zero, dst);
   }
   bind(&done);
 }
@@ -253,53 +278,6 @@
 }
 
 
-void MacroAssembler::TaggedToI(Register result_reg,
-                               Register input_reg,
-                               MinusZeroMode minus_zero_mode,
-                               Label* lost_precision) {
-  Label done;
-
-  cmp(FieldOperand(input_reg, HeapObject::kMapOffset),
-      isolate()->factory()->heap_number_map());
-  j(not_equal, lost_precision, Label::kNear);
-
-  // TODO(olivf) Converting a number on the fpu is actually quite slow. We
-  // should first try a fast conversion and then bailout to this slow case.
-  Label lost_precision_pop, zero_check;
-  Label* lost_precision_int = (minus_zero_mode == FAIL_ON_MINUS_ZERO)
-      ? &lost_precision_pop : lost_precision;
-  sub(esp, Immediate(kPointerSize));
-  fld_d(FieldOperand(input_reg, HeapNumber::kValueOffset));
-  if (minus_zero_mode == FAIL_ON_MINUS_ZERO) fld(0);
-  fist_s(MemOperand(esp, 0));
-  fild_s(MemOperand(esp, 0));
-  FCmp();
-  pop(result_reg);
-  j(not_equal, lost_precision_int, Label::kNear);
-  j(parity_even, lost_precision_int, Label::kNear);  // NaN.
-  if (minus_zero_mode == FAIL_ON_MINUS_ZERO) {
-    test(result_reg, Operand(result_reg));
-    j(zero, &zero_check, Label::kNear);
-    fstp(0);
-    jmp(&done, Label::kNear);
-    bind(&zero_check);
-    // To check for minus zero, we load the value again as float, and check
-    // if that is still 0.
-    sub(esp, Immediate(kPointerSize));
-    fstp_s(Operand(esp, 0));
-    pop(result_reg);
-    test(result_reg, Operand(result_reg));
-    j(zero, &done, Label::kNear);
-    jmp(lost_precision, Label::kNear);
-
-    bind(&lost_precision_pop);
-    fstp(0);
-    jmp(lost_precision, Label::kNear);
-  }
-  bind(&done);
-}
-
-
 void MacroAssembler::LoadUint32NoSSE2(Register src) {
   Label done;
   push(src);
@@ -316,11 +294,8 @@
 
 
 void MacroAssembler::RecordWriteArray(
-    Register object,
-    Register value,
-    Register index,
-    RememberedSetAction remembered_set_action,
-    SmiCheck smi_check,
+    Register object, Register value, Register index, SaveFPRegsMode save_fp,
+    RememberedSetAction remembered_set_action, SmiCheck smi_check,
     PointersToHereCheck pointers_to_here_check_for_value) {
   // First, check if a write barrier is even needed. The tests below
   // catch stores of Smis.
@@ -328,7 +303,7 @@
 
   // Skip barrier if writing a smi.
   if (smi_check == INLINE_SMI_CHECK) {
-    ASSERT_EQ(0, kSmiTag);
+    DCHECK_EQ(0, kSmiTag);
     test(value, Immediate(kSmiTagMask));
     j(zero, &done);
   }
@@ -340,28 +315,24 @@
   lea(dst, Operand(object, index, times_half_pointer_size,
                    FixedArray::kHeaderSize - kHeapObjectTag));
 
-  RecordWrite(object, dst, value, remembered_set_action, OMIT_SMI_CHECK,
-              pointers_to_here_check_for_value);
+  RecordWrite(object, dst, value, save_fp, remembered_set_action,
+              OMIT_SMI_CHECK, pointers_to_here_check_for_value);
 
   bind(&done);
 
   // Clobber clobbered input registers when running with the debug-code flag
   // turned on to provoke errors.
   if (emit_debug_code()) {
-    mov(value, Immediate(BitCast<int32_t>(kZapValue)));
-    mov(index, Immediate(BitCast<int32_t>(kZapValue)));
+    mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
+    mov(index, Immediate(bit_cast<int32_t>(kZapValue)));
   }
 }
 
 
 void MacroAssembler::RecordWriteField(
-    Register object,
-    int offset,
-    Register value,
-    Register dst,
-    RememberedSetAction remembered_set_action,
-    SmiCheck smi_check,
-    PointersToHereCheck pointers_to_here_check_for_value) {
+    Register object, int offset, Register value, Register dst,
+    SaveFPRegsMode save_fp, RememberedSetAction remembered_set_action,
+    SmiCheck smi_check, PointersToHereCheck pointers_to_here_check_for_value) {
   // First, check if a write barrier is even needed. The tests below
   // catch stores of Smis.
   Label done;
@@ -373,7 +344,7 @@
 
   // Although the object register is tagged, the offset is relative to the start
   // of the object, so so offset must be a multiple of kPointerSize.
-  ASSERT(IsAligned(offset, kPointerSize));
+  DCHECK(IsAligned(offset, kPointerSize));
 
   lea(dst, FieldOperand(object, offset));
   if (emit_debug_code()) {
@@ -384,25 +355,23 @@
     bind(&ok);
   }
 
-  RecordWrite(object, dst, value, remembered_set_action, OMIT_SMI_CHECK,
-              pointers_to_here_check_for_value);
+  RecordWrite(object, dst, value, save_fp, remembered_set_action,
+              OMIT_SMI_CHECK, pointers_to_here_check_for_value);
 
   bind(&done);
 
   // Clobber clobbered input registers when running with the debug-code flag
   // turned on to provoke errors.
   if (emit_debug_code()) {
-    mov(value, Immediate(BitCast<int32_t>(kZapValue)));
-    mov(dst, Immediate(BitCast<int32_t>(kZapValue)));
+    mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
+    mov(dst, Immediate(bit_cast<int32_t>(kZapValue)));
   }
 }
 
 
-void MacroAssembler::RecordWriteForMap(
-    Register object,
-    Handle<Map> map,
-    Register scratch1,
-    Register scratch2) {
+void MacroAssembler::RecordWriteForMap(Register object, Handle<Map> map,
+                                       Register scratch1, Register scratch2,
+                                       SaveFPRegsMode save_fp) {
   Label done;
 
   Register address = scratch1;
@@ -416,9 +385,9 @@
     bind(&ok);
   }
 
-  ASSERT(!object.is(value));
-  ASSERT(!object.is(address));
-  ASSERT(!value.is(address));
+  DCHECK(!object.is(value));
+  DCHECK(!object.is(address));
+  DCHECK(!value.is(address));
   AssertNotSmi(object);
 
   if (!FLAG_incremental_marking) {
@@ -428,46 +397,44 @@
   // Compute the address.
   lea(address, FieldOperand(object, HeapObject::kMapOffset));
 
-  // Count number of write barriers in generated code.
-  isolate()->counters()->write_barriers_static()->Increment();
-  IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
-
   // A single check of the map's pages interesting flag suffices, since it is
   // only set during incremental collection, and then it's also guaranteed that
   // the from object's page's interesting flag is also set.  This optimization
   // relies on the fact that maps can never be in new space.
-  ASSERT(!isolate()->heap()->InNewSpace(*map));
+  DCHECK(!isolate()->heap()->InNewSpace(*map));
   CheckPageFlagForMap(map,
                       MemoryChunk::kPointersToHereAreInterestingMask,
                       zero,
                       &done,
                       Label::kNear);
 
-  RecordWriteStub stub(isolate(), object, value, address, OMIT_REMEMBERED_SET);
+  RecordWriteStub stub(isolate(), object, value, address, OMIT_REMEMBERED_SET,
+                       save_fp);
   CallStub(&stub);
 
   bind(&done);
 
+  // Count number of write barriers in generated code.
+  isolate()->counters()->write_barriers_static()->Increment();
+  IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
+
   // Clobber clobbered input registers when running with the debug-code flag
   // turned on to provoke errors.
   if (emit_debug_code()) {
-    mov(value, Immediate(BitCast<int32_t>(kZapValue)));
-    mov(scratch1, Immediate(BitCast<int32_t>(kZapValue)));
-    mov(scratch2, Immediate(BitCast<int32_t>(kZapValue)));
+    mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
+    mov(scratch1, Immediate(bit_cast<int32_t>(kZapValue)));
+    mov(scratch2, Immediate(bit_cast<int32_t>(kZapValue)));
   }
 }
 
 
 void MacroAssembler::RecordWrite(
-    Register object,
-    Register address,
-    Register value,
-    RememberedSetAction remembered_set_action,
-    SmiCheck smi_check,
+    Register object, Register address, Register value, SaveFPRegsMode fp_mode,
+    RememberedSetAction remembered_set_action, SmiCheck smi_check,
     PointersToHereCheck pointers_to_here_check_for_value) {
-  ASSERT(!object.is(value));
-  ASSERT(!object.is(address));
-  ASSERT(!value.is(address));
+  DCHECK(!object.is(value));
+  DCHECK(!object.is(address));
+  DCHECK(!value.is(address));
   AssertNotSmi(object);
 
   if (remembered_set_action == OMIT_REMEMBERED_SET &&
@@ -483,10 +450,6 @@
     bind(&ok);
   }
 
-  // Count number of write barriers in generated code.
-  isolate()->counters()->write_barriers_static()->Increment();
-  IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
-
   // First, check if a write barrier is even needed. The tests below
   // catch stores of Smis and stores into young gen.
   Label done;
@@ -511,17 +474,21 @@
                 &done,
                 Label::kNear);
 
-  RecordWriteStub stub(isolate(), object, value, address,
-                       remembered_set_action);
+  RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
+                       fp_mode);
   CallStub(&stub);
 
   bind(&done);
 
+  // Count number of write barriers in generated code.
+  isolate()->counters()->write_barriers_static()->Increment();
+  IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1);
+
   // Clobber clobbered registers when running with the debug-code flag
   // turned on to provoke errors.
   if (emit_debug_code()) {
-    mov(address, Immediate(BitCast<int32_t>(kZapValue)));
-    mov(value, Immediate(BitCast<int32_t>(kZapValue)));
+    mov(address, Immediate(bit_cast<int32_t>(kZapValue)));
+    mov(value, Immediate(bit_cast<int32_t>(kZapValue)));
   }
 }
 
@@ -753,6 +720,60 @@
 }
 
 
+void MacroAssembler::FXamMinusZero() {
+  fxam();
+  push(eax);
+  fnstsw_ax();
+  and_(eax, Immediate(0x4700));
+  // For minus zero, C3 == 1 && C1 == 1.
+  cmp(eax, Immediate(0x4200));
+  pop(eax);
+  fstp(0);
+}
+
+
+void MacroAssembler::FXamSign() {
+  fxam();
+  push(eax);
+  fnstsw_ax();
+  // For negative value (including -0.0), C1 == 1.
+  and_(eax, Immediate(0x0200));
+  pop(eax);
+  fstp(0);
+}
+
+
+void MacroAssembler::X87CheckIA() {
+  push(eax);
+  fnstsw_ax();
+  // For #IA, IE == 1 && SF == 0.
+  and_(eax, Immediate(0x0041));
+  cmp(eax, Immediate(0x0001));
+  pop(eax);
+}
+
+
+// rc=00B, round to nearest.
+// rc=01B, round down.
+// rc=10B, round up.
+// rc=11B, round toward zero.
+void MacroAssembler::X87SetRC(int rc) {
+  sub(esp, Immediate(kPointerSize));
+  fnstcw(MemOperand(esp, 0));
+  and_(MemOperand(esp, 0), Immediate(0xF3FF));
+  or_(MemOperand(esp, 0), Immediate(rc));
+  fldcw(MemOperand(esp, 0));
+  add(esp, Immediate(kPointerSize));
+}
+
+
+void MacroAssembler::X87SetFPUCW(int cw) {
+  push(Immediate(cw));
+  fldcw(MemOperand(esp, 0));
+  add(esp, Immediate(kPointerSize));
+}
+
+
 void MacroAssembler::AssertNumber(Register object) {
   if (emit_debug_code()) {
     Label ok;
@@ -871,14 +892,14 @@
 
 void MacroAssembler::EnterExitFramePrologue() {
   // Set up the frame structure on the stack.
-  ASSERT(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
-  ASSERT(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
-  ASSERT(ExitFrameConstants::kCallerFPOffset ==  0 * kPointerSize);
+  DCHECK(ExitFrameConstants::kCallerSPDisplacement == +2 * kPointerSize);
+  DCHECK(ExitFrameConstants::kCallerPCOffset == +1 * kPointerSize);
+  DCHECK(ExitFrameConstants::kCallerFPOffset ==  0 * kPointerSize);
   push(ebp);
   mov(ebp, esp);
 
   // Reserve room for entry stack pointer and push the code object.
-  ASSERT(ExitFrameConstants::kSPOffset  == -1 * kPointerSize);
+  DCHECK(ExitFrameConstants::kSPOffset  == -1 * kPointerSize);
   push(Immediate(0));  // Saved entry sp, patched before call.
   push(Immediate(CodeObject()));  // Accessed from ExitFrame::code_slot.
 
@@ -890,13 +911,22 @@
 }
 
 
-void MacroAssembler::EnterExitFrameEpilogue(int argc) {
-  sub(esp, Immediate(argc * kPointerSize));
+void MacroAssembler::EnterExitFrameEpilogue(int argc, bool save_doubles) {
+  // Optionally save FPU state.
+  if (save_doubles) {
+    // Store FPU state to m108byte.
+    int space = 108 + argc * kPointerSize;
+    sub(esp, Immediate(space));
+    const int offset = -2 * kPointerSize;  // entry fp + code object.
+    fnsave(MemOperand(ebp, offset - 108));
+  } else {
+    sub(esp, Immediate(argc * kPointerSize));
+  }
 
   // Get the required frame alignment for the OS.
-  const int kFrameAlignment = OS::ActivationFrameAlignment();
+  const int kFrameAlignment = base::OS::ActivationFrameAlignment();
   if (kFrameAlignment > 0) {
-    ASSERT(IsPowerOf2(kFrameAlignment));
+    DCHECK(base::bits::IsPowerOfTwo32(kFrameAlignment));
     and_(esp, -kFrameAlignment);
   }
 
@@ -905,7 +935,7 @@
 }
 
 
-void MacroAssembler::EnterExitFrame() {
+void MacroAssembler::EnterExitFrame(bool save_doubles) {
   EnterExitFramePrologue();
 
   // Set up argc and argv in callee-saved registers.
@@ -914,17 +944,23 @@
   lea(esi, Operand(ebp, eax, times_4, offset));
 
   // Reserve space for argc, argv and isolate.
-  EnterExitFrameEpilogue(3);
+  EnterExitFrameEpilogue(3, save_doubles);
 }
 
 
 void MacroAssembler::EnterApiExitFrame(int argc) {
   EnterExitFramePrologue();
-  EnterExitFrameEpilogue(argc);
+  EnterExitFrameEpilogue(argc, false);
 }
 
 
-void MacroAssembler::LeaveExitFrame() {
+void MacroAssembler::LeaveExitFrame(bool save_doubles) {
+  // Optionally restore FPU state.
+  if (save_doubles) {
+    const int offset = -2 * kPointerSize;
+    frstor(MemOperand(ebp, offset - 108));
+  }
+
   // Get the return address from the stack and restore the frame pointer.
   mov(ecx, Operand(ebp, 1 * kPointerSize));
   mov(ebp, Operand(ebp, 0 * kPointerSize));
@@ -1112,9 +1148,9 @@
                                             Label* miss) {
   Label same_contexts;
 
-  ASSERT(!holder_reg.is(scratch1));
-  ASSERT(!holder_reg.is(scratch2));
-  ASSERT(!scratch1.is(scratch2));
+  DCHECK(!holder_reg.is(scratch1));
+  DCHECK(!holder_reg.is(scratch2));
+  DCHECK(!scratch1.is(scratch2));
 
   // Load current lexical context from the stack frame.
   mov(scratch1, Operand(ebp, StandardFrameConstants::kContextOffset));
@@ -1173,7 +1209,7 @@
 
 
 // Compute the hash code from the untagged key.  This must be kept in sync with
-// ComputeIntegerHash in utils.h and KeyedLoadGenericElementStub in
+// ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
 // code-stub-hydrogen.cc
 //
 // Note: r0 will contain hash code
@@ -1260,7 +1296,7 @@
     and_(r2, r1);
 
     // Scale the index by multiplying by the entry size.
-    ASSERT(SeededNumberDictionary::kEntrySize == 3);
+    DCHECK(SeededNumberDictionary::kEntrySize == 3);
     lea(r2, Operand(r2, r2, times_2, 0));  // r2 = r2 * 3
 
     // Check if the key matches.
@@ -1279,7 +1315,7 @@
   // Check that the value is a normal propety.
   const int kDetailsOffset =
       SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
-  ASSERT_EQ(NORMAL, 0);
+  DCHECK_EQ(NORMAL, 0);
   test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
        Immediate(PropertyDetails::TypeField::kMask << kSmiTagSize));
   j(not_zero, miss);
@@ -1300,7 +1336,7 @@
   // Just return if allocation top is already known.
   if ((flags & RESULT_CONTAINS_TOP) != 0) {
     // No use of scratch if allocation top is provided.
-    ASSERT(scratch.is(no_reg));
+    DCHECK(scratch.is(no_reg));
 #ifdef DEBUG
     // Assert that result actually contains top on entry.
     cmp(result, Operand::StaticVariable(allocation_top));
@@ -1345,8 +1381,8 @@
                               Register scratch,
                               Label* gc_required,
                               AllocationFlags flags) {
-  ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
-  ASSERT(object_size <= Page::kMaxRegularHeapObjectSize);
+  DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
+  DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
   if (!FLAG_inline_new) {
     if (emit_debug_code()) {
       // Trash the registers to simulate an allocation failure.
@@ -1361,7 +1397,7 @@
     jmp(gc_required);
     return;
   }
-  ASSERT(!result.is(result_end));
+  DCHECK(!result.is(result_end));
 
   // Load address of new object into result.
   LoadAllocationTopHelper(result, scratch, flags);
@@ -1372,8 +1408,8 @@
   // Align the next allocation. Storing the filler map without checking top is
   // safe in new-space because the limit of the heap is aligned there.
   if ((flags & DOUBLE_ALIGNMENT) != 0) {
-    ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
-    ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
+    DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
+    DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
     Label aligned;
     test(result, Immediate(kDoubleAlignmentMask));
     j(zero, &aligned, Label::kNear);
@@ -1409,7 +1445,7 @@
       sub(result, Immediate(object_size));
     }
   } else if (tag_result) {
-    ASSERT(kHeapObjectTag == 1);
+    DCHECK(kHeapObjectTag == 1);
     inc(result);
   }
 }
@@ -1424,7 +1460,7 @@
                               Register scratch,
                               Label* gc_required,
                               AllocationFlags flags) {
-  ASSERT((flags & SIZE_IN_WORDS) == 0);
+  DCHECK((flags & SIZE_IN_WORDS) == 0);
   if (!FLAG_inline_new) {
     if (emit_debug_code()) {
       // Trash the registers to simulate an allocation failure.
@@ -1438,7 +1474,7 @@
     jmp(gc_required);
     return;
   }
-  ASSERT(!result.is(result_end));
+  DCHECK(!result.is(result_end));
 
   // Load address of new object into result.
   LoadAllocationTopHelper(result, scratch, flags);
@@ -1449,8 +1485,8 @@
   // Align the next allocation. Storing the filler map without checking top is
   // safe in new-space because the limit of the heap is aligned there.
   if ((flags & DOUBLE_ALIGNMENT) != 0) {
-    ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
-    ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
+    DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
+    DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
     Label aligned;
     test(result, Immediate(kDoubleAlignmentMask));
     j(zero, &aligned, Label::kNear);
@@ -1471,11 +1507,11 @@
     STATIC_ASSERT(static_cast<ScaleFactor>(times_2 - 1) == times_1);
     STATIC_ASSERT(static_cast<ScaleFactor>(times_4 - 1) == times_2);
     STATIC_ASSERT(static_cast<ScaleFactor>(times_8 - 1) == times_4);
-    ASSERT(element_size >= times_2);
-    ASSERT(kSmiTagSize == 1);
+    DCHECK(element_size >= times_2);
+    DCHECK(kSmiTagSize == 1);
     element_size = static_cast<ScaleFactor>(element_size - 1);
   } else {
-    ASSERT(element_count_type == REGISTER_VALUE_IS_INT32);
+    DCHECK(element_count_type == REGISTER_VALUE_IS_INT32);
   }
   lea(result_end, Operand(element_count, element_size, header_size));
   add(result_end, result);
@@ -1484,7 +1520,7 @@
   j(above, gc_required);
 
   if ((flags & TAG_OBJECT) != 0) {
-    ASSERT(kHeapObjectTag == 1);
+    DCHECK(kHeapObjectTag == 1);
     inc(result);
   }
 
@@ -1499,7 +1535,7 @@
                               Register scratch,
                               Label* gc_required,
                               AllocationFlags flags) {
-  ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
+  DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0);
   if (!FLAG_inline_new) {
     if (emit_debug_code()) {
       // Trash the registers to simulate an allocation failure.
@@ -1513,7 +1549,7 @@
     jmp(gc_required);
     return;
   }
-  ASSERT(!result.is(result_end));
+  DCHECK(!result.is(result_end));
 
   // Load address of new object into result.
   LoadAllocationTopHelper(result, scratch, flags);
@@ -1524,8 +1560,8 @@
   // Align the next allocation. Storing the filler map without checking top is
   // safe in new-space because the limit of the heap is aligned there.
   if ((flags & DOUBLE_ALIGNMENT) != 0) {
-    ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
-    ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
+    DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
+    DCHECK(kPointerAlignment * 2 == kDoubleAlignment);
     Label aligned;
     test(result, Immediate(kDoubleAlignmentMask));
     j(zero, &aligned, Label::kNear);
@@ -1550,7 +1586,7 @@
 
   // Tag result if requested.
   if ((flags & TAG_OBJECT) != 0) {
-    ASSERT(kHeapObjectTag == 1);
+    DCHECK(kHeapObjectTag == 1);
     inc(result);
   }
 
@@ -1576,14 +1612,18 @@
 void MacroAssembler::AllocateHeapNumber(Register result,
                                         Register scratch1,
                                         Register scratch2,
-                                        Label* gc_required) {
+                                        Label* gc_required,
+                                        MutableMode mode) {
   // Allocate heap number in new space.
   Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
            TAG_OBJECT);
 
+  Handle<Map> map = mode == MUTABLE
+      ? isolate()->factory()->mutable_heap_number_map()
+      : isolate()->factory()->heap_number_map();
+
   // Set the map.
-  mov(FieldOperand(result, HeapObject::kMapOffset),
-      Immediate(isolate()->factory()->heap_number_map()));
+  mov(FieldOperand(result, HeapObject::kMapOffset), Immediate(map));
 }
 
 
@@ -1595,8 +1635,8 @@
                                            Label* gc_required) {
   // Calculate the number of bytes needed for the characters in the string while
   // observing object alignment.
-  ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
-  ASSERT(kShortSize == 2);
+  DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+  DCHECK(kShortSize == 2);
   // scratch1 = length * 2 + kObjectAlignmentMask.
   lea(scratch1, Operand(length, length, times_1, kObjectAlignmentMask));
   and_(scratch1, Immediate(~kObjectAlignmentMask));
@@ -1623,21 +1663,19 @@
 }
 
 
-void MacroAssembler::AllocateAsciiString(Register result,
-                                         Register length,
-                                         Register scratch1,
-                                         Register scratch2,
-                                         Register scratch3,
-                                         Label* gc_required) {
+void MacroAssembler::AllocateOneByteString(Register result, Register length,
+                                           Register scratch1, Register scratch2,
+                                           Register scratch3,
+                                           Label* gc_required) {
   // Calculate the number of bytes needed for the characters in the string while
   // observing object alignment.
-  ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+  DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
   mov(scratch1, length);
-  ASSERT(kCharSize == 1);
+  DCHECK(kCharSize == 1);
   add(scratch1, Immediate(kObjectAlignmentMask));
   and_(scratch1, Immediate(~kObjectAlignmentMask));
 
-  // Allocate ASCII string in new space.
+  // Allocate one-byte string in new space.
   Allocate(SeqOneByteString::kHeaderSize,
            times_1,
            scratch1,
@@ -1650,7 +1688,7 @@
 
   // Set the map, length and hash field.
   mov(FieldOperand(result, HeapObject::kMapOffset),
-      Immediate(isolate()->factory()->ascii_string_map()));
+      Immediate(isolate()->factory()->one_byte_string_map()));
   mov(scratch1, length);
   SmiTag(scratch1);
   mov(FieldOperand(result, String::kLengthOffset), scratch1);
@@ -1659,20 +1697,18 @@
 }
 
 
-void MacroAssembler::AllocateAsciiString(Register result,
-                                         int length,
-                                         Register scratch1,
-                                         Register scratch2,
-                                         Label* gc_required) {
-  ASSERT(length > 0);
+void MacroAssembler::AllocateOneByteString(Register result, int length,
+                                           Register scratch1, Register scratch2,
+                                           Label* gc_required) {
+  DCHECK(length > 0);
 
-  // Allocate ASCII string in new space.
+  // Allocate one-byte string in new space.
   Allocate(SeqOneByteString::SizeFor(length), result, scratch1, scratch2,
            gc_required, TAG_OBJECT);
 
   // Set the map, length and hash field.
   mov(FieldOperand(result, HeapObject::kMapOffset),
-      Immediate(isolate()->factory()->ascii_string_map()));
+      Immediate(isolate()->factory()->one_byte_string_map()));
   mov(FieldOperand(result, String::kLengthOffset),
       Immediate(Smi::FromInt(length)));
   mov(FieldOperand(result, String::kHashFieldOffset),
@@ -1694,10 +1730,10 @@
 }
 
 
-void MacroAssembler::AllocateAsciiConsString(Register result,
-                                             Register scratch1,
-                                             Register scratch2,
-                                             Label* gc_required) {
+void MacroAssembler::AllocateOneByteConsString(Register result,
+                                               Register scratch1,
+                                               Register scratch2,
+                                               Label* gc_required) {
   Allocate(ConsString::kSize,
            result,
            scratch1,
@@ -1707,7 +1743,7 @@
 
   // Set the map. The other fields are left uninitialized.
   mov(FieldOperand(result, HeapObject::kMapOffset),
-      Immediate(isolate()->factory()->cons_ascii_string_map()));
+      Immediate(isolate()->factory()->cons_one_byte_string_map()));
 }
 
 
@@ -1725,17 +1761,17 @@
 }
 
 
-void MacroAssembler::AllocateAsciiSlicedString(Register result,
-                                               Register scratch1,
-                                               Register scratch2,
-                                               Label* gc_required) {
+void MacroAssembler::AllocateOneByteSlicedString(Register result,
+                                                 Register scratch1,
+                                                 Register scratch2,
+                                                 Label* gc_required) {
   // Allocate heap number in new space.
   Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
            TAG_OBJECT);
 
   // Set the map. The other fields are left uninitialized.
   mov(FieldOperand(result, HeapObject::kMapOffset),
-      Immediate(isolate()->factory()->sliced_ascii_string_map()));
+      Immediate(isolate()->factory()->sliced_one_byte_string_map()));
 }
 
 
@@ -1752,9 +1788,9 @@
                                Register length,
                                Register scratch) {
   Label short_loop, len4, len8, len12, done, short_string;
-  ASSERT(source.is(esi));
-  ASSERT(destination.is(edi));
-  ASSERT(length.is(ecx));
+  DCHECK(source.is(esi));
+  DCHECK(destination.is(edi));
+  DCHECK(length.is(ecx));
   cmp(length, Immediate(4));
   j(below, &short_string, Label::kNear);
 
@@ -1824,7 +1860,7 @@
                                     int field_offset,
                                     int bit_index) {
   bit_index += kSmiTagSize + kSmiShiftSize;
-  ASSERT(IsPowerOf2(kBitsPerByte));
+  DCHECK(base::bits::IsPowerOfTwo32(kBitsPerByte));
   int byte_index = bit_index / kBitsPerByte;
   int byte_bit_index = bit_index & (kBitsPerByte - 1);
   test_b(FieldOperand(object, field_offset + byte_index),
@@ -1865,27 +1901,27 @@
                                              Register scratch,
                                              Label* miss,
                                              bool miss_on_bound_function) {
-  // Check that the receiver isn't a smi.
-  JumpIfSmi(function, miss);
-
-  // Check that the function really is a function.
-  CmpObjectType(function, JS_FUNCTION_TYPE, result);
-  j(not_equal, miss);
-
+  Label non_instance;
   if (miss_on_bound_function) {
+    // Check that the receiver isn't a smi.
+    JumpIfSmi(function, miss);
+
+    // Check that the function really is a function.
+    CmpObjectType(function, JS_FUNCTION_TYPE, result);
+    j(not_equal, miss);
+
     // If a bound function, go to miss label.
     mov(scratch,
         FieldOperand(function, JSFunction::kSharedFunctionInfoOffset));
     BooleanBitTest(scratch, SharedFunctionInfo::kCompilerHintsOffset,
                    SharedFunctionInfo::kBoundFunction);
     j(not_zero, miss);
-  }
 
-  // Make sure that the function has an instance prototype.
-  Label non_instance;
-  movzx_b(scratch, FieldOperand(result, Map::kBitFieldOffset));
-  test(scratch, Immediate(1 << Map::kHasNonInstancePrototype));
-  j(not_zero, &non_instance);
+    // Make sure that the function has an instance prototype.
+    movzx_b(scratch, FieldOperand(result, Map::kBitFieldOffset));
+    test(scratch, Immediate(1 << Map::kHasNonInstancePrototype));
+    j(not_zero, &non_instance);
+  }
 
   // Get the prototype or initial map from the function.
   mov(result,
@@ -1904,12 +1940,15 @@
 
   // Get the prototype from the initial map.
   mov(result, FieldOperand(result, Map::kPrototypeOffset));
-  jmp(&done);
 
-  // Non-instance prototype: Fetch prototype from constructor field
-  // in initial map.
-  bind(&non_instance);
-  mov(result, FieldOperand(result, Map::kConstructorOffset));
+  if (miss_on_bound_function) {
+    jmp(&done);
+
+    // Non-instance prototype: Fetch prototype from constructor field
+    // in initial map.
+    bind(&non_instance);
+    mov(result, FieldOperand(result, Map::kConstructorOffset));
+  }
 
   // All done.
   bind(&done);
@@ -1917,7 +1956,7 @@
 
 
 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) {
-  ASSERT(AllowThisStubCall(stub));  // Calls are not allowed in some stubs.
+  DCHECK(AllowThisStubCall(stub));  // Calls are not allowed in some stubs.
   call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id);
 }
 
@@ -1928,7 +1967,7 @@
 
 
 void MacroAssembler::StubReturn(int argc) {
-  ASSERT(argc >= 1 && generating_stub());
+  DCHECK(argc >= 1 && generating_stub());
   ret((argc - 1) * kPointerSize);
 }
 
@@ -1942,7 +1981,7 @@
   // The assert checks that the constants for the maximum number of digits
   // for an array index cached in the hash field and the number of bits
   // reserved for it does not conflict.
-  ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) <
+  DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
          (1 << String::kArrayIndexValueBits));
   if (!index.is(hash)) {
     mov(index, hash);
@@ -1951,8 +1990,8 @@
 }
 
 
-void MacroAssembler::CallRuntime(const Runtime::Function* f,
-                                 int num_arguments) {
+void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
+                                 SaveFPRegsMode save_doubles) {
   // If the expected number of arguments of the runtime function is
   // constant, we check that the actual number of arguments match the
   // expectation.
@@ -1964,7 +2003,7 @@
   // smarter.
   Move(eax, Immediate(num_arguments));
   mov(ebx, Immediate(ExternalReference(f, isolate())));
-  CEntryStub ces(isolate(), 1);
+  CEntryStub ces(isolate(), 1, save_doubles);
   CallStub(&ces);
 }
 
@@ -2008,7 +2047,7 @@
 void MacroAssembler::PrepareCallApiFunction(int argc) {
   EnterApiExitFrame(argc);
   if (emit_debug_code()) {
-    mov(esi, Immediate(BitCast<int32_t>(kZapValue)));
+    mov(esi, Immediate(bit_cast<int32_t>(kZapValue)));
   }
 }
 
@@ -2027,7 +2066,7 @@
   ExternalReference level_address =
       ExternalReference::handle_scope_level_address(isolate());
 
-  ASSERT(edx.is(function_address));
+  DCHECK(edx.is(function_address));
   // Allocate HandleScope in callee-save registers.
   mov(ebx, Operand::StaticVariable(next_address));
   mov(edi, Operand::StaticVariable(limit_address));
@@ -2144,7 +2183,7 @@
   bind(&promote_scheduled_exception);
   {
     FrameScope frame(this, StackFrame::INTERNAL);
-    CallRuntime(Runtime::kHiddenPromoteScheduledException, 0);
+    CallRuntime(Runtime::kPromoteScheduledException, 0);
   }
   jmp(&exception_handled);
 
@@ -2184,7 +2223,7 @@
   *definitely_mismatches = false;
   Label invoke;
   if (expected.is_immediate()) {
-    ASSERT(actual.is_immediate());
+    DCHECK(actual.is_immediate());
     if (expected.immediate() == actual.immediate()) {
       definitely_matches = true;
     } else {
@@ -2208,15 +2247,15 @@
       // IC mechanism.
       cmp(expected.reg(), actual.immediate());
       j(equal, &invoke);
-      ASSERT(expected.reg().is(ebx));
+      DCHECK(expected.reg().is(ebx));
       mov(eax, actual.immediate());
     } else if (!expected.reg().is(actual.reg())) {
       // Both expected and actual are in (different) registers. This
       // is the case when we invoke functions using call and apply.
       cmp(expected.reg(), actual.reg());
       j(equal, &invoke);
-      ASSERT(actual.reg().is(eax));
-      ASSERT(expected.reg().is(ebx));
+      DCHECK(actual.reg().is(eax));
+      DCHECK(expected.reg().is(ebx));
     }
   }
 
@@ -2251,7 +2290,7 @@
                                 InvokeFlag flag,
                                 const CallWrapper& call_wrapper) {
   // You can't call a function without a valid frame.
-  ASSERT(flag == JUMP_FUNCTION || has_frame());
+  DCHECK(flag == JUMP_FUNCTION || has_frame());
 
   Label done;
   bool definitely_mismatches = false;
@@ -2264,7 +2303,7 @@
       call(code);
       call_wrapper.AfterCall();
     } else {
-      ASSERT(flag == JUMP_FUNCTION);
+      DCHECK(flag == JUMP_FUNCTION);
       jmp(code);
     }
     bind(&done);
@@ -2277,9 +2316,9 @@
                                     InvokeFlag flag,
                                     const CallWrapper& call_wrapper) {
   // You can't call a function without a valid frame.
-  ASSERT(flag == JUMP_FUNCTION || has_frame());
+  DCHECK(flag == JUMP_FUNCTION || has_frame());
 
-  ASSERT(fun.is(edi));
+  DCHECK(fun.is(edi));
   mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
   mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
   mov(ebx, FieldOperand(edx, SharedFunctionInfo::kFormalParameterCountOffset));
@@ -2297,9 +2336,9 @@
                                     InvokeFlag flag,
                                     const CallWrapper& call_wrapper) {
   // You can't call a function without a valid frame.
-  ASSERT(flag == JUMP_FUNCTION || has_frame());
+  DCHECK(flag == JUMP_FUNCTION || has_frame());
 
-  ASSERT(fun.is(edi));
+  DCHECK(fun.is(edi));
   mov(esi, FieldOperand(edi, JSFunction::kContextOffset));
 
   InvokeCode(FieldOperand(edi, JSFunction::kCodeEntryOffset),
@@ -2321,7 +2360,7 @@
                                    InvokeFlag flag,
                                    const CallWrapper& call_wrapper) {
   // You can't call a builtin without a valid frame.
-  ASSERT(flag == JUMP_FUNCTION || has_frame());
+  DCHECK(flag == JUMP_FUNCTION || has_frame());
 
   // Rely on the assertion to check that the number of provided
   // arguments match the expected number of arguments. Fake a
@@ -2344,7 +2383,7 @@
 
 
 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
-  ASSERT(!target.is(edi));
+  DCHECK(!target.is(edi));
   // Load the JavaScript builtin function from the builtins object.
   GetBuiltinFunction(edi, id);
   // Load the code entry point from the function into the target register.
@@ -2457,7 +2496,7 @@
   // The registers are pushed starting with the lowest encoding,
   // which means that lowest encodings are furthest away from
   // the stack pointer.
-  ASSERT(reg_code >= 0 && reg_code < kNumSafepointRegisters);
+  DCHECK(reg_code >= 0 && reg_code < kNumSafepointRegisters);
   return kNumSafepointRegisters - reg_code - 1;
 }
 
@@ -2514,10 +2553,13 @@
 
 
 void MacroAssembler::VerifyX87StackDepth(uint32_t depth) {
+  // Turn off the stack depth check when serializer is enabled to reduce the
+  // code size.
+  if (serializer_enabled()) return;
   // Make sure the floating point stack is either empty or has depth items.
-  ASSERT(depth <= 7);
+  DCHECK(depth <= 7);
   // This is very expensive.
-  ASSERT(FLAG_debug_code && FLAG_enable_slow_asserts);
+  DCHECK(FLAG_debug_code && FLAG_enable_slow_asserts);
 
   // The top-of-stack (tos) is 7 if there is one item pushed.
   int tos = (8 - depth) % 8;
@@ -2570,7 +2612,7 @@
 
 
 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) {
-  ASSERT(value > 0);
+  DCHECK(value > 0);
   if (FLAG_native_code_counters && counter->Enabled()) {
     Operand operand = Operand::StaticVariable(ExternalReference(counter));
     if (value == 1) {
@@ -2583,7 +2625,7 @@
 
 
 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) {
-  ASSERT(value > 0);
+  DCHECK(value > 0);
   if (FLAG_native_code_counters && counter->Enabled()) {
     Operand operand = Operand::StaticVariable(ExternalReference(counter));
     if (value == 1) {
@@ -2598,7 +2640,7 @@
 void MacroAssembler::IncrementCounter(Condition cc,
                                       StatsCounter* counter,
                                       int value) {
-  ASSERT(value > 0);
+  DCHECK(value > 0);
   if (FLAG_native_code_counters && counter->Enabled()) {
     Label skip;
     j(NegateCondition(cc), &skip);
@@ -2613,7 +2655,7 @@
 void MacroAssembler::DecrementCounter(Condition cc,
                                       StatsCounter* counter,
                                       int value) {
-  ASSERT(value > 0);
+  DCHECK(value > 0);
   if (FLAG_native_code_counters && counter->Enabled()) {
     Label skip;
     j(NegateCondition(cc), &skip);
@@ -2659,10 +2701,10 @@
 
 
 void MacroAssembler::CheckStackAlignment() {
-  int frame_alignment = OS::ActivationFrameAlignment();
+  int frame_alignment = base::OS::ActivationFrameAlignment();
   int frame_alignment_mask = frame_alignment - 1;
   if (frame_alignment > kPointerSize) {
-    ASSERT(IsPowerOf2(frame_alignment));
+    DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
     Label alignment_as_expected;
     test(esp, Immediate(frame_alignment_mask));
     j(zero, &alignment_as_expected);
@@ -2790,10 +2832,8 @@
 }
 
 
-void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(
-    Register instance_type,
-    Register scratch,
-    Label* failure) {
+void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(
+    Register instance_type, Register scratch, Label* failure) {
   if (!scratch.is(instance_type)) {
     mov(scratch, instance_type);
   }
@@ -2804,11 +2844,11 @@
 }
 
 
-void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register object1,
-                                                         Register object2,
-                                                         Register scratch1,
-                                                         Register scratch2,
-                                                         Label* failure) {
+void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register object1,
+                                                           Register object2,
+                                                           Register scratch1,
+                                                           Register scratch2,
+                                                           Label* failure) {
   // Check that both objects are not smis.
   STATIC_ASSERT(kSmiTag == 0);
   mov(scratch1, object1);
@@ -2821,24 +2861,24 @@
   movzx_b(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset));
   movzx_b(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset));
 
-  // Check that both are flat ASCII strings.
-  const int kFlatAsciiStringMask =
+  // Check that both are flat one-byte strings.
+  const int kFlatOneByteStringMask =
       kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask;
-  const int kFlatAsciiStringTag =
+  const int kFlatOneByteStringTag =
       kStringTag | kOneByteStringTag | kSeqStringTag;
   // Interleave bits from both instance types and compare them in one check.
-  ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3));
-  and_(scratch1, kFlatAsciiStringMask);
-  and_(scratch2, kFlatAsciiStringMask);
+  DCHECK_EQ(0, kFlatOneByteStringMask & (kFlatOneByteStringMask << 3));
+  and_(scratch1, kFlatOneByteStringMask);
+  and_(scratch2, kFlatOneByteStringMask);
   lea(scratch1, Operand(scratch1, scratch2, times_8, 0));
-  cmp(scratch1, kFlatAsciiStringTag | (kFlatAsciiStringTag << 3));
+  cmp(scratch1, kFlatOneByteStringTag | (kFlatOneByteStringTag << 3));
   j(not_equal, failure);
 }
 
 
-void MacroAssembler::JumpIfNotUniqueName(Operand operand,
-                                         Label* not_unique_name,
-                                         Label::Distance distance) {
+void MacroAssembler::JumpIfNotUniqueNameInstanceType(Operand operand,
+                                                     Label* not_unique_name,
+                                                     Label::Distance distance) {
   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
   Label succeed;
   test(operand, Immediate(kIsNotStringMask | kIsNotInternalizedMask));
@@ -2886,13 +2926,13 @@
 
 
 void MacroAssembler::PrepareCallCFunction(int num_arguments, Register scratch) {
-  int frame_alignment = OS::ActivationFrameAlignment();
+  int frame_alignment = base::OS::ActivationFrameAlignment();
   if (frame_alignment != 0) {
     // Make stack end at alignment and make room for num_arguments words
     // and the original value of esp.
     mov(scratch, esp);
     sub(esp, Immediate((num_arguments + 1) * kPointerSize));
-    ASSERT(IsPowerOf2(frame_alignment));
+    DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
     and_(esp, -frame_alignment);
     mov(Operand(esp, num_arguments * kPointerSize), scratch);
   } else {
@@ -2911,14 +2951,14 @@
 
 void MacroAssembler::CallCFunction(Register function,
                                    int num_arguments) {
-  ASSERT(has_frame());
+  DCHECK(has_frame());
   // Check stack alignment.
   if (emit_debug_code()) {
     CheckStackAlignment();
   }
 
   call(function);
-  if (OS::ActivationFrameAlignment() != 0) {
+  if (base::OS::ActivationFrameAlignment() != 0) {
     mov(esp, Operand(esp, num_arguments * kPointerSize));
   } else {
     add(esp, Immediate(num_arguments * kPointerSize));
@@ -2926,15 +2966,33 @@
 }
 
 
-bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
-  if (r1.is(r2)) return true;
-  if (r1.is(r3)) return true;
-  if (r1.is(r4)) return true;
-  if (r2.is(r3)) return true;
-  if (r2.is(r4)) return true;
-  if (r3.is(r4)) return true;
-  return false;
+#ifdef DEBUG
+bool AreAliased(Register reg1,
+                Register reg2,
+                Register reg3,
+                Register reg4,
+                Register reg5,
+                Register reg6,
+                Register reg7,
+                Register reg8) {
+  int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() +
+      reg3.is_valid() + reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
+      reg7.is_valid() + reg8.is_valid();
+
+  RegList regs = 0;
+  if (reg1.is_valid()) regs |= reg1.bit();
+  if (reg2.is_valid()) regs |= reg2.bit();
+  if (reg3.is_valid()) regs |= reg3.bit();
+  if (reg4.is_valid()) regs |= reg4.bit();
+  if (reg5.is_valid()) regs |= reg5.bit();
+  if (reg6.is_valid()) regs |= reg6.bit();
+  if (reg7.is_valid()) regs |= reg7.bit();
+  if (reg8.is_valid()) regs |= reg8.bit();
+  int n_of_non_aliasing_regs = NumRegs(regs);
+
+  return n_of_valid_regs != n_of_non_aliasing_regs;
 }
+#endif
 
 
 CodePatcher::CodePatcher(byte* address, int size)
@@ -2944,17 +3002,17 @@
   // Create a new macro assembler pointing to the address of the code to patch.
   // The size is adjusted with kGap on order for the assembler to generate size
   // bytes of instructions without failing with buffer size constraints.
-  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+  DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
 }
 
 
 CodePatcher::~CodePatcher() {
   // Indicate that code has changed.
-  CPU::FlushICache(address_, size_);
+  CpuFeatures::FlushICache(address_, size_);
 
   // Check that the code was patched as expected.
-  ASSERT(masm_.pc_ == address_ + size_);
-  ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+  DCHECK(masm_.pc_ == address_ + size_);
+  DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
 }
 
 
@@ -2965,7 +3023,7 @@
     Condition cc,
     Label* condition_met,
     Label::Distance condition_met_distance) {
-  ASSERT(cc == zero || cc == not_zero);
+  DCHECK(cc == zero || cc == not_zero);
   if (scratch.is(object)) {
     and_(scratch, Immediate(~Page::kPageAlignmentMask));
   } else {
@@ -2988,12 +3046,13 @@
     Condition cc,
     Label* condition_met,
     Label::Distance condition_met_distance) {
-  ASSERT(cc == zero || cc == not_zero);
+  DCHECK(cc == zero || cc == not_zero);
   Page* page = Page::FromAddress(map->address());
+  DCHECK(!serializer_enabled());  // Serializer cannot match page_flags.
   ExternalReference reference(ExternalReference::page_flags(page));
   // The inlined static address check of the page's flags relies
   // on maps never being compacted.
-  ASSERT(!isolate()->heap()->mark_compact_collector()->
+  DCHECK(!isolate()->heap()->mark_compact_collector()->
          IsOnEvacuationCandidate(*map));
   if (mask < (1 << kBitsPerByte)) {
     test_b(Operand::StaticVariable(reference), static_cast<uint8_t>(mask));
@@ -3024,7 +3083,7 @@
   HasColor(object, scratch0, scratch1,
            on_black, on_black_near,
            1, 0);  // kBlackBitPattern.
-  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
+  DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
 }
 
 
@@ -3035,7 +3094,7 @@
                               Label::Distance has_color_distance,
                               int first_bit,
                               int second_bit) {
-  ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, ecx));
+  DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, ecx));
 
   GetMarkBits(object, bitmap_scratch, mask_scratch);
 
@@ -3059,7 +3118,7 @@
 void MacroAssembler::GetMarkBits(Register addr_reg,
                                  Register bitmap_reg,
                                  Register mask_reg) {
-  ASSERT(!AreAliased(addr_reg, mask_reg, bitmap_reg, ecx));
+  DCHECK(!AreAliased(addr_reg, mask_reg, bitmap_reg, ecx));
   mov(bitmap_reg, Immediate(~Page::kPageAlignmentMask));
   and_(bitmap_reg, addr_reg);
   mov(ecx, addr_reg);
@@ -3084,14 +3143,14 @@
     Register mask_scratch,
     Label* value_is_white_and_not_data,
     Label::Distance distance) {
-  ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, ecx));
+  DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ecx));
   GetMarkBits(value, bitmap_scratch, mask_scratch);
 
   // If the value is black or grey we don't need to do anything.
-  ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
-  ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
-  ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
-  ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+  DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+  DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
+  DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
+  DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
 
   Label done;
 
@@ -3129,8 +3188,8 @@
 
   bind(&not_heap_number);
   // Check for strings.
-  ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
-  ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+  DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+  DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
   // If it's a string and it's not a cons string then it's an object containing
   // no GC pointers.
   Register instance_type = ecx;
@@ -3143,24 +3202,24 @@
   Label not_external;
   // External strings are the only ones with the kExternalStringTag bit
   // set.
-  ASSERT_EQ(0, kSeqStringTag & kExternalStringTag);
-  ASSERT_EQ(0, kConsStringTag & kExternalStringTag);
+  DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
+  DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
   test_b(instance_type, kExternalStringTag);
   j(zero, &not_external, Label::kNear);
   mov(length, Immediate(ExternalString::kSize));
   jmp(&is_data_object, Label::kNear);
 
   bind(&not_external);
-  // Sequential string, either ASCII or UC16.
-  ASSERT(kOneByteStringTag == 0x04);
+  // Sequential string, either Latin1 or UC16.
+  DCHECK(kOneByteStringTag == 0x04);
   and_(length, Immediate(kStringEncodingMask));
   xor_(length, Immediate(kStringEncodingMask));
   add(length, Immediate(0x04));
-  // Value now either 4 (if ASCII) or 8 (if UC16), i.e., char-size shifted
+  // Value now either 4 (if Latin1) or 8 (if UC16), i.e., char-size shifted
   // by 2. If we multiply the string length as smi by this, it still
   // won't overflow a 32-bit value.
-  ASSERT_EQ(SeqOneByteString::kMaxSize, SeqTwoByteString::kMaxSize);
-  ASSERT(SeqOneByteString::kMaxSize <=
+  DCHECK_EQ(SeqOneByteString::kMaxSize, SeqTwoByteString::kMaxSize);
+  DCHECK(SeqOneByteString::kMaxSize <=
          static_cast<int>(0xffffffffu >> (2 + kSmiTagSize)));
   imul(length, FieldOperand(value, String::kLengthOffset));
   shr(length, 2 + kSmiTagSize + kSmiShiftSize);
@@ -3260,7 +3319,7 @@
     Register scratch0,
     Register scratch1,
     Label* found) {
-  ASSERT(!scratch1.is(scratch0));
+  DCHECK(!scratch1.is(scratch0));
   Factory* factory = isolate()->factory();
   Register current = scratch0;
   Label loop_again;
@@ -3282,14 +3341,16 @@
 
 
 void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) {
-  ASSERT(!dividend.is(eax));
-  ASSERT(!dividend.is(edx));
-  MultiplierAndShift ms(divisor);
-  mov(eax, Immediate(ms.multiplier()));
+  DCHECK(!dividend.is(eax));
+  DCHECK(!dividend.is(edx));
+  base::MagicNumbersForDivision<uint32_t> mag =
+      base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
+  mov(eax, Immediate(mag.multiplier));
   imul(dividend);
-  if (divisor > 0 && ms.multiplier() < 0) add(edx, dividend);
-  if (divisor < 0 && ms.multiplier() > 0) sub(edx, dividend);
-  if (ms.shift() > 0) sar(edx, ms.shift());
+  bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
+  if (divisor > 0 && neg) add(edx, dividend);
+  if (divisor < 0 && !neg && mag.multiplier > 0) sub(edx, dividend);
+  if (mag.shift > 0) sar(edx, mag.shift);
   mov(eax, dividend);
   shr(eax, 31);
   add(edx, eax);
diff --git a/src/x87/macro-assembler-x87.h b/src/x87/macro-assembler-x87.h
index 84141e6..c9e9087 100644
--- a/src/x87/macro-assembler-x87.h
+++ b/src/x87/macro-assembler-x87.h
@@ -6,6 +6,7 @@
 #define V8_X87_MACRO_ASSEMBLER_X87_H_
 
 #include "src/assembler.h"
+#include "src/bailout-reason.h"
 #include "src/frames.h"
 #include "src/globals.h"
 
@@ -30,7 +31,16 @@
 };
 
 
-bool AreAliased(Register r1, Register r2, Register r3, Register r4);
+#ifdef DEBUG
+bool AreAliased(Register reg1,
+                Register reg2,
+                Register reg3 = no_reg,
+                Register reg4 = no_reg,
+                Register reg5 = no_reg,
+                Register reg6 = no_reg,
+                Register reg7 = no_reg,
+                Register reg8 = no_reg);
+#endif
 
 
 // MacroAssembler implements a collection of frequently used macros.
@@ -65,8 +75,8 @@
   // at the address pointed to by the addr register.  Only works if addr is not
   // in new space.
   void RememberedSetHelper(Register object,  // Used for debug code.
-                           Register addr,
-                           Register scratch,
+                           Register addr, Register scratch,
+                           SaveFPRegsMode save_fp,
                            RememberedSetFinalAction and_then);
 
   void CheckPageFlag(Register object,
@@ -137,10 +147,8 @@
   // The offset is the offset from the start of the object, not the offset from
   // the tagged HeapObject pointer.  For use with FieldOperand(reg, off).
   void RecordWriteField(
-      Register object,
-      int offset,
-      Register value,
-      Register scratch,
+      Register object, int offset, Register value, Register scratch,
+      SaveFPRegsMode save_fp,
       RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
       SmiCheck smi_check = INLINE_SMI_CHECK,
       PointersToHereCheck pointers_to_here_check_for_value =
@@ -149,20 +157,14 @@
   // As above, but the offset has the tag presubtracted.  For use with
   // Operand(reg, off).
   void RecordWriteContextSlot(
-      Register context,
-      int offset,
-      Register value,
-      Register scratch,
+      Register context, int offset, Register value, Register scratch,
+      SaveFPRegsMode save_fp,
       RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
       SmiCheck smi_check = INLINE_SMI_CHECK,
       PointersToHereCheck pointers_to_here_check_for_value =
           kPointersToHereMaybeInteresting) {
-    RecordWriteField(context,
-                     offset + kHeapObjectTag,
-                     value,
-                     scratch,
-                     remembered_set_action,
-                     smi_check,
+    RecordWriteField(context, offset + kHeapObjectTag, value, scratch, save_fp,
+                     remembered_set_action, smi_check,
                      pointers_to_here_check_for_value);
   }
 
@@ -173,9 +175,7 @@
   // filters out smis so it does not update the write barrier if the
   // value is a smi.
   void RecordWriteArray(
-      Register array,
-      Register value,
-      Register index,
+      Register array, Register value, Register index, SaveFPRegsMode save_fp,
       RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
       SmiCheck smi_check = INLINE_SMI_CHECK,
       PointersToHereCheck pointers_to_here_check_for_value =
@@ -187,9 +187,7 @@
   // operation. RecordWrite filters out smis so it does not update the
   // write barrier if the value is a smi.
   void RecordWrite(
-      Register object,
-      Register address,
-      Register value,
+      Register object, Register address, Register value, SaveFPRegsMode save_fp,
       RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
       SmiCheck smi_check = INLINE_SMI_CHECK,
       PointersToHereCheck pointers_to_here_check_for_value =
@@ -198,11 +196,8 @@
   // For page containing |object| mark the region covering the object's map
   // dirty. |object| is the object being stored into, |map| is the Map object
   // that was stored.
-  void RecordWriteForMap(
-      Register object,
-      Handle<Map> map,
-      Register scratch1,
-      Register scratch2);
+  void RecordWriteForMap(Register object, Handle<Map> map, Register scratch1,
+                         Register scratch2, SaveFPRegsMode save_fp);
 
   // ---------------------------------------------------------------------------
   // Debugger Support
@@ -217,14 +212,14 @@
   // arguments in register eax and sets up the number of arguments in
   // register edi and the pointer to the first argument in register
   // esi.
-  void EnterExitFrame();
+  void EnterExitFrame(bool save_doubles);
 
   void EnterApiExitFrame(int argc);
 
   // Leave the current exit frame. Expects the return value in
   // register eax:edx (untouched) and the pointer to the first
   // argument in register esi.
-  void LeaveExitFrame();
+  void LeaveExitFrame(bool save_doubles);
 
   // Leave the current exit frame. Expects the return value in
   // register eax (untouched).
@@ -426,8 +421,14 @@
   // FCmp is similar to integer cmp, but requires unsigned
   // jcc instructions (je, ja, jae, jb, jbe, je, and jz).
   void FCmp();
+  void FXamMinusZero();
+  void FXamSign();
+  void X87CheckIA();
+  void X87SetRC(int rc);
+  void X87SetFPUCW(int cw);
 
   void ClampUint8(Register reg);
+  void ClampTOSToUint8(Register result_reg);
 
   void SlowTruncateToI(Register result_reg, Register input_reg,
       int offset = HeapNumber::kValueOffset - kHeapObjectTag);
@@ -436,10 +437,8 @@
   void TruncateX87TOSToI(Register result_reg);
 
   void X87TOSToI(Register result_reg, MinusZeroMode minus_zero_mode,
-      Label* conversion_failed, Label::Distance dst = Label::kFar);
-
-  void TaggedToI(Register result_reg, Register input_reg,
-      MinusZeroMode minus_zero_mode, Label* lost_precision);
+      Label* lost_precision, Label* is_nan, Label* minus_zero,
+      Label::Distance dst = Label::kFar);
 
   // Smi tagging support.
   void SmiTag(Register reg) {
@@ -617,7 +616,8 @@
   void AllocateHeapNumber(Register result,
                           Register scratch1,
                           Register scratch2,
-                          Label* gc_required);
+                          Label* gc_required,
+                          MutableMode mode = IMMUTABLE);
 
   // Allocate a sequential string. All the header fields of the string object
   // are initialized.
@@ -627,17 +627,11 @@
                              Register scratch2,
                              Register scratch3,
                              Label* gc_required);
-  void AllocateAsciiString(Register result,
-                           Register length,
-                           Register scratch1,
-                           Register scratch2,
-                           Register scratch3,
-                           Label* gc_required);
-  void AllocateAsciiString(Register result,
-                           int length,
-                           Register scratch1,
-                           Register scratch2,
-                           Label* gc_required);
+  void AllocateOneByteString(Register result, Register length,
+                             Register scratch1, Register scratch2,
+                             Register scratch3, Label* gc_required);
+  void AllocateOneByteString(Register result, int length, Register scratch1,
+                             Register scratch2, Label* gc_required);
 
   // Allocate a raw cons string object. Only the map field of the result is
   // initialized.
@@ -645,10 +639,8 @@
                           Register scratch1,
                           Register scratch2,
                           Label* gc_required);
-  void AllocateAsciiConsString(Register result,
-                               Register scratch1,
-                               Register scratch2,
-                               Label* gc_required);
+  void AllocateOneByteConsString(Register result, Register scratch1,
+                                 Register scratch2, Label* gc_required);
 
   // Allocate a raw sliced string object. Only the map field of the result is
   // initialized.
@@ -656,10 +648,8 @@
                             Register scratch1,
                             Register scratch2,
                             Label* gc_required);
-  void AllocateAsciiSlicedString(Register result,
-                                 Register scratch1,
-                                 Register scratch2,
-                                 Label* gc_required);
+  void AllocateOneByteSlicedString(Register result, Register scratch1,
+                                   Register scratch2, Label* gc_required);
 
   // Copy memory, byte-by-byte, from source to destination.  Not optimized for
   // long or aligned copies.
@@ -720,14 +710,17 @@
   void StubReturn(int argc);
 
   // Call a runtime routine.
-  void CallRuntime(const Runtime::Function* f, int num_arguments);
-  // Convenience function: Same as above, but takes the fid instead.
-  void CallRuntime(Runtime::FunctionId id) {
+  void CallRuntime(const Runtime::Function* f, int num_arguments,
+                   SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+  void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
     const Runtime::Function* function = Runtime::FunctionForId(id);
-    CallRuntime(function, function->nargs);
+    CallRuntime(function, function->nargs, kSaveFPRegs);
   }
-  void CallRuntime(Runtime::FunctionId id, int num_arguments) {
-    CallRuntime(Runtime::FunctionForId(id), num_arguments);
+
+  // Convenience function: Same as above, but takes the fid instead.
+  void CallRuntime(Runtime::FunctionId id, int num_arguments,
+                   SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+    CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
   }
 
   // Convenience function: call an external reference.
@@ -818,7 +811,7 @@
   void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
 
   Handle<Object> CodeObject() {
-    ASSERT(!code_object_.is_null());
+    DCHECK(!code_object_.is_null());
     return code_object_;
   }
 
@@ -878,29 +871,27 @@
                                Register scratch2,
                                Label* not_found);
 
-  // Check whether the instance type represents a flat ASCII string. Jump to the
-  // label if not. If the instance type can be scratched specify same register
-  // for both instance type and scratch.
-  void JumpIfInstanceTypeIsNotSequentialAscii(Register instance_type,
-                                              Register scratch,
-                                              Label* on_not_flat_ascii_string);
+  // Check whether the instance type represents a flat one-byte string. Jump to
+  // the label if not. If the instance type can be scratched specify same
+  // register for both instance type and scratch.
+  void JumpIfInstanceTypeIsNotSequentialOneByte(
+      Register instance_type, Register scratch,
+      Label* on_not_flat_one_byte_string);
 
-  // Checks if both objects are sequential ASCII strings, and jumps to label
+  // Checks if both objects are sequential one-byte strings, and jumps to label
   // if either is not.
-  void JumpIfNotBothSequentialAsciiStrings(Register object1,
-                                           Register object2,
-                                           Register scratch1,
-                                           Register scratch2,
-                                           Label* on_not_flat_ascii_strings);
+  void JumpIfNotBothSequentialOneByteStrings(
+      Register object1, Register object2, Register scratch1, Register scratch2,
+      Label* on_not_flat_one_byte_strings);
 
   // Checks if the given register or operand is a unique name
-  void JumpIfNotUniqueName(Register reg, Label* not_unique_name,
-                           Label::Distance distance = Label::kFar) {
-    JumpIfNotUniqueName(Operand(reg), not_unique_name, distance);
+  void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name,
+                                       Label::Distance distance = Label::kFar) {
+    JumpIfNotUniqueNameInstanceType(Operand(reg), not_unique_name, distance);
   }
 
-  void JumpIfNotUniqueName(Operand operand, Label* not_unique_name,
-                           Label::Distance distance = Label::kFar);
+  void JumpIfNotUniqueNameInstanceType(Operand operand, Label* not_unique_name,
+                                       Label::Distance distance = Label::kFar);
 
   void EmitSeqStringSetCharCheck(Register string,
                                  Register index,
@@ -961,7 +952,7 @@
                       const CallWrapper& call_wrapper = NullCallWrapper());
 
   void EnterExitFramePrologue();
-  void EnterExitFrameEpilogue(int argc);
+  void EnterExitFrameEpilogue(int argc, bool save_doubles);
 
   void LeaveExitFrameEpilogue(bool restore_context);
 
diff --git a/src/x87/regexp-macro-assembler-x87.cc b/src/x87/regexp-macro-assembler-x87.cc
index c5ea981..9bd08ca 100644
--- a/src/x87/regexp-macro-assembler-x87.cc
+++ b/src/x87/regexp-macro-assembler-x87.cc
@@ -7,11 +7,11 @@
 #if V8_TARGET_ARCH_X87
 
 #include "src/cpu-profiler.h"
-#include "src/unicode.h"
 #include "src/log.h"
-#include "src/regexp-stack.h"
 #include "src/macro-assembler.h"
 #include "src/regexp-macro-assembler.h"
+#include "src/regexp-stack.h"
+#include "src/unicode.h"
 #include "src/x87/regexp-macro-assembler-x87.h"
 
 namespace v8 {
@@ -91,7 +91,7 @@
       success_label_(),
       backtrack_label_(),
       exit_label_() {
-  ASSERT_EQ(0, registers_to_save % 2);
+  DCHECK_EQ(0, registers_to_save % 2);
   __ jmp(&entry_label_);   // We'll write the entry code later.
   __ bind(&start_label_);  // And then continue from here.
 }
@@ -123,8 +123,8 @@
 
 
 void RegExpMacroAssemblerX87::AdvanceRegister(int reg, int by) {
-  ASSERT(reg >= 0);
-  ASSERT(reg < num_registers_);
+  DCHECK(reg >= 0);
+  DCHECK(reg < num_registers_);
   if (by != 0) {
     __ add(register_location(reg), Immediate(by));
   }
@@ -219,7 +219,7 @@
   __ add(eax, ebx);
   BranchOrBacktrack(greater, on_no_match);
 
-  if (mode_ == ASCII) {
+  if (mode_ == LATIN1) {
     Label success;
     Label fail;
     Label loop_increment;
@@ -281,7 +281,7 @@
     // Compute new value of character position after the matched part.
     __ sub(edi, esi);
   } else {
-    ASSERT(mode_ == UC16);
+    DCHECK(mode_ == UC16);
     // Save registers before calling C function.
     __ push(esi);
     __ push(edi);
@@ -365,11 +365,11 @@
 
   Label loop;
   __ bind(&loop);
-  if (mode_ == ASCII) {
+  if (mode_ == LATIN1) {
     __ movzx_b(eax, Operand(edx, 0));
     __ cmpb_al(Operand(ebx, 0));
   } else {
-    ASSERT(mode_ == UC16);
+    DCHECK(mode_ == UC16);
     __ movzx_w(eax, Operand(edx, 0));
     __ cmpw_ax(Operand(ebx, 0));
   }
@@ -438,7 +438,7 @@
     uc16 minus,
     uc16 mask,
     Label* on_not_equal) {
-  ASSERT(minus < String::kMaxUtf16CodeUnit);
+  DCHECK(minus < String::kMaxUtf16CodeUnit);
   __ lea(eax, Operand(current_character(), -minus));
   if (c == 0) {
     __ test(eax, Immediate(mask));
@@ -475,7 +475,7 @@
     Label* on_bit_set) {
   __ mov(eax, Immediate(table));
   Register index = current_character();
-  if (mode_ != ASCII || kTableMask != String::kMaxOneByteCharCode) {
+  if (mode_ != LATIN1 || kTableMask != String::kMaxOneByteCharCode) {
     __ mov(ebx, kTableSize - 1);
     __ and_(ebx, current_character());
     index = ebx;
@@ -492,7 +492,7 @@
   switch (type) {
   case 's':
     // Match space-characters
-    if (mode_ == ASCII) {
+    if (mode_ == LATIN1) {
       // One byte space characters are '\t'..'\r', ' ' and \u00a0.
       Label success;
       __ cmp(current_character(), ' ');
@@ -542,12 +542,12 @@
     return true;
   }
   case 'w': {
-    if (mode_ != ASCII) {
-      // Table is 128 entries, so all ASCII characters can be tested.
+    if (mode_ != LATIN1) {
+      // Table is 256 entries, so all Latin1 characters can be tested.
       __ cmp(current_character(), Immediate('z'));
       BranchOrBacktrack(above, on_no_match);
     }
-    ASSERT_EQ(0, word_character_map[0]);  // Character '\0' is not a word char.
+    DCHECK_EQ(0, word_character_map[0]);  // Character '\0' is not a word char.
     ExternalReference word_map = ExternalReference::re_word_character_map();
     __ test_b(current_character(),
               Operand::StaticArray(current_character(), times_1, word_map));
@@ -556,17 +556,17 @@
   }
   case 'W': {
     Label done;
-    if (mode_ != ASCII) {
-      // Table is 128 entries, so all ASCII characters can be tested.
+    if (mode_ != LATIN1) {
+      // Table is 256 entries, so all Latin1 characters can be tested.
       __ cmp(current_character(), Immediate('z'));
       __ j(above, &done);
     }
-    ASSERT_EQ(0, word_character_map[0]);  // Character '\0' is not a word char.
+    DCHECK_EQ(0, word_character_map[0]);  // Character '\0' is not a word char.
     ExternalReference word_map = ExternalReference::re_word_character_map();
     __ test_b(current_character(),
               Operand::StaticArray(current_character(), times_1, word_map));
     BranchOrBacktrack(not_zero, on_no_match);
-    if (mode_ != ASCII) {
+    if (mode_ != LATIN1) {
       __ bind(&done);
     }
     return true;
@@ -583,12 +583,12 @@
     // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
     __ sub(eax, Immediate(0x0b));
     __ cmp(eax, 0x0c - 0x0b);
-    if (mode_ == ASCII) {
+    if (mode_ == LATIN1) {
       BranchOrBacktrack(above, on_no_match);
     } else {
       Label done;
       BranchOrBacktrack(below_equal, &done);
-      ASSERT_EQ(UC16, mode_);
+      DCHECK_EQ(UC16, mode_);
       // Compare original value to 0x2028 and 0x2029, using the already
       // computed (current_char ^ 0x01 - 0x0b). I.e., check for
       // 0x201d (0x2028 - 0x0b) or 0x201e.
@@ -946,8 +946,8 @@
                                                     Label* on_end_of_input,
                                                     bool check_bounds,
                                                     int characters) {
-  ASSERT(cp_offset >= -1);      // ^ and \b can look behind one character.
-  ASSERT(cp_offset < (1<<30));  // Be sane! (And ensure negation works)
+  DCHECK(cp_offset >= -1);      // ^ and \b can look behind one character.
+  DCHECK(cp_offset < (1<<30));  // Be sane! (And ensure negation works)
   if (check_bounds) {
     CheckPosition(cp_offset + characters - 1, on_end_of_input);
   }
@@ -1009,7 +1009,7 @@
 
 
 void RegExpMacroAssemblerX87::SetRegister(int register_index, int to) {
-  ASSERT(register_index >= num_saved_registers_);  // Reserved for positions!
+  DCHECK(register_index >= num_saved_registers_);  // Reserved for positions!
   __ mov(register_location(register_index), Immediate(to));
 }
 
@@ -1032,7 +1032,7 @@
 
 
 void RegExpMacroAssemblerX87::ClearRegisters(int reg_from, int reg_to) {
-  ASSERT(reg_from <= reg_to);
+  DCHECK(reg_from <= reg_to);
   __ mov(eax, Operand(ebp, kInputStartMinusOne));
   for (int reg = reg_from; reg <= reg_to; reg++) {
     __ mov(register_location(reg), eax);
@@ -1098,10 +1098,10 @@
   Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
 
   // Current string.
-  bool is_ascii = subject->IsOneByteRepresentationUnderneath();
+  bool is_one_byte = subject->IsOneByteRepresentationUnderneath();
 
-  ASSERT(re_code->instruction_start() <= *return_address);
-  ASSERT(*return_address <=
+  DCHECK(re_code->instruction_start() <= *return_address);
+  DCHECK(*return_address <=
       re_code->instruction_start() + re_code->instruction_size());
 
   Object* result = isolate->stack_guard()->HandleInterrupts();
@@ -1129,8 +1129,8 @@
   }
 
   // String might have changed.
-  if (subject_tmp->IsOneByteRepresentation() != is_ascii) {
-    // If we changed between an ASCII and an UC16 string, the specialized
+  if (subject_tmp->IsOneByteRepresentation() != is_one_byte) {
+    // If we changed between an LATIN1 and an UC16 string, the specialized
     // code cannot be used, and we need to restart regexp matching from
     // scratch (including, potentially, compiling a new version of the code).
     return RETRY;
@@ -1140,7 +1140,7 @@
   // be a sequential or external string with the same content.
   // Update the start and end pointers in the stack frame to the current
   // location (whether it has actually moved or not).
-  ASSERT(StringShape(*subject_tmp).IsSequential() ||
+  DCHECK(StringShape(*subject_tmp).IsSequential() ||
       StringShape(*subject_tmp).IsExternal());
 
   // The original start address of the characters to match.
@@ -1172,7 +1172,7 @@
 
 
 Operand RegExpMacroAssemblerX87::register_location(int register_index) {
-  ASSERT(register_index < (1<<30));
+  DCHECK(register_index < (1<<30));
   if (num_registers_ <= register_index) {
     num_registers_ = register_index + 1;
   }
@@ -1226,7 +1226,7 @@
 
 
 void RegExpMacroAssemblerX87::Push(Register source) {
-  ASSERT(!source.is(backtrack_stackpointer()));
+  DCHECK(!source.is(backtrack_stackpointer()));
   // Notice: This updates flags, unlike normal Push.
   __ sub(backtrack_stackpointer(), Immediate(kPointerSize));
   __ mov(Operand(backtrack_stackpointer(), 0), source);
@@ -1241,7 +1241,7 @@
 
 
 void RegExpMacroAssemblerX87::Pop(Register target) {
-  ASSERT(!target.is(backtrack_stackpointer()));
+  DCHECK(!target.is(backtrack_stackpointer()));
   __ mov(target, Operand(backtrack_stackpointer(), 0));
   // Notice: This updates flags, unlike normal Pop.
   __ add(backtrack_stackpointer(), Immediate(kPointerSize));
@@ -1277,22 +1277,22 @@
 
 void RegExpMacroAssemblerX87::LoadCurrentCharacterUnchecked(int cp_offset,
                                                              int characters) {
-  if (mode_ == ASCII) {
+  if (mode_ == LATIN1) {
     if (characters == 4) {
       __ mov(current_character(), Operand(esi, edi, times_1, cp_offset));
     } else if (characters == 2) {
       __ movzx_w(current_character(), Operand(esi, edi, times_1, cp_offset));
     } else {
-      ASSERT(characters == 1);
+      DCHECK(characters == 1);
       __ movzx_b(current_character(), Operand(esi, edi, times_1, cp_offset));
     }
   } else {
-    ASSERT(mode_ == UC16);
+    DCHECK(mode_ == UC16);
     if (characters == 2) {
       __ mov(current_character(),
              Operand(esi, edi, times_1, cp_offset * sizeof(uc16)));
     } else {
-      ASSERT(characters == 1);
+      DCHECK(characters == 1);
       __ movzx_w(current_character(),
                  Operand(esi, edi, times_1, cp_offset * sizeof(uc16)));
     }
diff --git a/src/x87/regexp-macro-assembler-x87.h b/src/x87/regexp-macro-assembler-x87.h
index e4cae62..3655bd9 100644
--- a/src/x87/regexp-macro-assembler-x87.h
+++ b/src/x87/regexp-macro-assembler-x87.h
@@ -5,9 +5,9 @@
 #ifndef V8_X87_REGEXP_MACRO_ASSEMBLER_X87_H_
 #define V8_X87_REGEXP_MACRO_ASSEMBLER_X87_H_
 
-#include "src/x87/assembler-x87.h"
-#include "src/x87/assembler-x87-inl.h"
 #include "src/macro-assembler.h"
+#include "src/x87/assembler-x87-inl.h"
+#include "src/x87/assembler-x87.h"
 
 namespace v8 {
 namespace internal {
@@ -174,7 +174,7 @@
 
   MacroAssembler* masm_;
 
-  // Which mode to generate code for (ASCII or UC16).
+  // Which mode to generate code for (LATIN1 or UC16).
   Mode mode_;
 
   // One greater than maximal register index actually used.
diff --git a/src/x87/stub-cache-x87.cc b/src/x87/stub-cache-x87.cc
deleted file mode 100644
index f480b51..0000000
--- a/src/x87/stub-cache-x87.cc
+++ /dev/null
@@ -1,1493 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-
-#if V8_TARGET_ARCH_X87
-
-#include "src/ic-inl.h"
-#include "src/codegen.h"
-#include "src/stub-cache.h"
-
-namespace v8 {
-namespace internal {
-
-#define __ ACCESS_MASM(masm)
-
-
-static void ProbeTable(Isolate* isolate,
-                       MacroAssembler* masm,
-                       Code::Flags flags,
-                       StubCache::Table table,
-                       Register name,
-                       Register receiver,
-                       // Number of the cache entry pointer-size scaled.
-                       Register offset,
-                       Register extra) {
-  ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
-  ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
-  ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
-
-  Label miss;
-
-  // Multiply by 3 because there are 3 fields per entry (name, code, map).
-  __ lea(offset, Operand(offset, offset, times_2, 0));
-
-  if (extra.is_valid()) {
-    // Get the code entry from the cache.
-    __ mov(extra, Operand::StaticArray(offset, times_1, value_offset));
-
-    // Check that the key in the entry matches the name.
-    __ cmp(name, Operand::StaticArray(offset, times_1, key_offset));
-    __ j(not_equal, &miss);
-
-    // Check the map matches.
-    __ mov(offset, Operand::StaticArray(offset, times_1, map_offset));
-    __ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset));
-    __ j(not_equal, &miss);
-
-    // Check that the flags match what we're looking for.
-    __ mov(offset, FieldOperand(extra, Code::kFlagsOffset));
-    __ and_(offset, ~Code::kFlagsNotUsedInLookup);
-    __ cmp(offset, flags);
-    __ j(not_equal, &miss);
-
-#ifdef DEBUG
-    if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
-      __ jmp(&miss);
-    } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
-      __ jmp(&miss);
-    }
-#endif
-
-    // Jump to the first instruction in the code stub.
-    __ add(extra, Immediate(Code::kHeaderSize - kHeapObjectTag));
-    __ jmp(extra);
-
-    __ bind(&miss);
-  } else {
-    // Save the offset on the stack.
-    __ push(offset);
-
-    // Check that the key in the entry matches the name.
-    __ cmp(name, Operand::StaticArray(offset, times_1, key_offset));
-    __ j(not_equal, &miss);
-
-    // Check the map matches.
-    __ mov(offset, Operand::StaticArray(offset, times_1, map_offset));
-    __ cmp(offset, FieldOperand(receiver, HeapObject::kMapOffset));
-    __ j(not_equal, &miss);
-
-    // Restore offset register.
-    __ mov(offset, Operand(esp, 0));
-
-    // Get the code entry from the cache.
-    __ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
-
-    // Check that the flags match what we're looking for.
-    __ mov(offset, FieldOperand(offset, Code::kFlagsOffset));
-    __ and_(offset, ~Code::kFlagsNotUsedInLookup);
-    __ cmp(offset, flags);
-    __ j(not_equal, &miss);
-
-#ifdef DEBUG
-    if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
-      __ jmp(&miss);
-    } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
-      __ jmp(&miss);
-    }
-#endif
-
-    // Restore offset and re-load code entry from cache.
-    __ pop(offset);
-    __ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
-
-    // Jump to the first instruction in the code stub.
-    __ add(offset, Immediate(Code::kHeaderSize - kHeapObjectTag));
-    __ jmp(offset);
-
-    // Pop at miss.
-    __ bind(&miss);
-    __ pop(offset);
-  }
-}
-
-
-void StubCompiler::GenerateDictionaryNegativeLookup(MacroAssembler* masm,
-                                                    Label* miss_label,
-                                                    Register receiver,
-                                                    Handle<Name> name,
-                                                    Register scratch0,
-                                                    Register scratch1) {
-  ASSERT(name->IsUniqueName());
-  ASSERT(!receiver.is(scratch0));
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->negative_lookups(), 1);
-  __ IncrementCounter(counters->negative_lookups_miss(), 1);
-
-  __ mov(scratch0, FieldOperand(receiver, HeapObject::kMapOffset));
-
-  const int kInterceptorOrAccessCheckNeededMask =
-      (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
-
-  // Bail out if the receiver has a named interceptor or requires access checks.
-  __ test_b(FieldOperand(scratch0, Map::kBitFieldOffset),
-            kInterceptorOrAccessCheckNeededMask);
-  __ j(not_zero, miss_label);
-
-  // Check that receiver is a JSObject.
-  __ CmpInstanceType(scratch0, FIRST_SPEC_OBJECT_TYPE);
-  __ j(below, miss_label);
-
-  // Load properties array.
-  Register properties = scratch0;
-  __ mov(properties, FieldOperand(receiver, JSObject::kPropertiesOffset));
-
-  // Check that the properties array is a dictionary.
-  __ cmp(FieldOperand(properties, HeapObject::kMapOffset),
-         Immediate(masm->isolate()->factory()->hash_table_map()));
-  __ j(not_equal, miss_label);
-
-  Label done;
-  NameDictionaryLookupStub::GenerateNegativeLookup(masm,
-                                                   miss_label,
-                                                   &done,
-                                                   properties,
-                                                   name,
-                                                   scratch1);
-  __ bind(&done);
-  __ DecrementCounter(counters->negative_lookups_miss(), 1);
-}
-
-
-void StubCache::GenerateProbe(MacroAssembler* masm,
-                              Code::Flags flags,
-                              Register receiver,
-                              Register name,
-                              Register scratch,
-                              Register extra,
-                              Register extra2,
-                              Register extra3) {
-  Label miss;
-
-  // Assert that code is valid.  The multiplying code relies on the entry size
-  // being 12.
-  ASSERT(sizeof(Entry) == 12);
-
-  // Assert the flags do not name a specific type.
-  ASSERT(Code::ExtractTypeFromFlags(flags) == 0);
-
-  // Assert that there are no register conflicts.
-  ASSERT(!scratch.is(receiver));
-  ASSERT(!scratch.is(name));
-  ASSERT(!extra.is(receiver));
-  ASSERT(!extra.is(name));
-  ASSERT(!extra.is(scratch));
-
-  // Assert scratch and extra registers are valid, and extra2/3 are unused.
-  ASSERT(!scratch.is(no_reg));
-  ASSERT(extra2.is(no_reg));
-  ASSERT(extra3.is(no_reg));
-
-  Register offset = scratch;
-  scratch = no_reg;
-
-  Counters* counters = masm->isolate()->counters();
-  __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1);
-
-  // Check that the receiver isn't a smi.
-  __ JumpIfSmi(receiver, &miss);
-
-  // Get the map of the receiver and compute the hash.
-  __ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
-  __ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
-  __ xor_(offset, flags);
-  // We mask out the last two bits because they are not part of the hash and
-  // they are always 01 for maps.  Also in the two 'and' instructions below.
-  __ and_(offset, (kPrimaryTableSize - 1) << kHeapObjectTagSize);
-  // ProbeTable expects the offset to be pointer scaled, which it is, because
-  // the heap object tag size is 2 and the pointer size log 2 is also 2.
-  ASSERT(kHeapObjectTagSize == kPointerSizeLog2);
-
-  // Probe the primary table.
-  ProbeTable(isolate(), masm, flags, kPrimary, name, receiver, offset, extra);
-
-  // Primary miss: Compute hash for secondary probe.
-  __ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
-  __ add(offset, FieldOperand(receiver, HeapObject::kMapOffset));
-  __ xor_(offset, flags);
-  __ and_(offset, (kPrimaryTableSize - 1) << kHeapObjectTagSize);
-  __ sub(offset, name);
-  __ add(offset, Immediate(flags));
-  __ and_(offset, (kSecondaryTableSize - 1) << kHeapObjectTagSize);
-
-  // Probe the secondary table.
-  ProbeTable(
-      isolate(), masm, flags, kSecondary, name, receiver, offset, extra);
-
-  // Cache miss: Fall-through and let caller handle the miss by
-  // entering the runtime system.
-  __ bind(&miss);
-  __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1);
-}
-
-
-void StubCompiler::GenerateLoadGlobalFunctionPrototype(MacroAssembler* masm,
-                                                       int index,
-                                                       Register prototype) {
-  __ LoadGlobalFunction(index, prototype);
-  __ LoadGlobalFunctionInitialMap(prototype, prototype);
-  // Load the prototype from the initial map.
-  __ mov(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
-}
-
-
-void StubCompiler::GenerateDirectLoadGlobalFunctionPrototype(
-    MacroAssembler* masm,
-    int index,
-    Register prototype,
-    Label* miss) {
-  // Get the global function with the given index.
-  Handle<JSFunction> function(
-      JSFunction::cast(masm->isolate()->native_context()->get(index)));
-  // Check we're still in the same context.
-  Register scratch = prototype;
-  const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
-  __ mov(scratch, Operand(esi, offset));
-  __ mov(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
-  __ cmp(Operand(scratch, Context::SlotOffset(index)), function);
-  __ j(not_equal, miss);
-
-  // Load its initial map. The global functions all have initial maps.
-  __ Move(prototype, Immediate(Handle<Map>(function->initial_map())));
-  // Load the prototype from the initial map.
-  __ mov(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
-}
-
-
-void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm,
-                                           Register receiver,
-                                           Register scratch,
-                                           Label* miss_label) {
-  // Check that the receiver isn't a smi.
-  __ JumpIfSmi(receiver, miss_label);
-
-  // Check that the object is a JS array.
-  __ CmpObjectType(receiver, JS_ARRAY_TYPE, scratch);
-  __ j(not_equal, miss_label);
-
-  // Load length directly from the JS array.
-  __ mov(eax, FieldOperand(receiver, JSArray::kLengthOffset));
-  __ ret(0);
-}
-
-
-void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
-                                                 Register receiver,
-                                                 Register scratch1,
-                                                 Register scratch2,
-                                                 Label* miss_label) {
-  __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
-  __ mov(eax, scratch1);
-  __ ret(0);
-}
-
-
-void StubCompiler::GenerateFastPropertyLoad(MacroAssembler* masm,
-                                            Register dst,
-                                            Register src,
-                                            bool inobject,
-                                            int index,
-                                            Representation representation) {
-  ASSERT(!representation.IsDouble());
-  int offset = index * kPointerSize;
-  if (!inobject) {
-    // Calculate the offset into the properties array.
-    offset = offset + FixedArray::kHeaderSize;
-    __ mov(dst, FieldOperand(src, JSObject::kPropertiesOffset));
-    src = dst;
-  }
-  __ mov(dst, FieldOperand(src, offset));
-}
-
-
-static void PushInterceptorArguments(MacroAssembler* masm,
-                                     Register receiver,
-                                     Register holder,
-                                     Register name,
-                                     Handle<JSObject> holder_obj) {
-  STATIC_ASSERT(StubCache::kInterceptorArgsNameIndex == 0);
-  STATIC_ASSERT(StubCache::kInterceptorArgsInfoIndex == 1);
-  STATIC_ASSERT(StubCache::kInterceptorArgsThisIndex == 2);
-  STATIC_ASSERT(StubCache::kInterceptorArgsHolderIndex == 3);
-  STATIC_ASSERT(StubCache::kInterceptorArgsLength == 4);
-  __ push(name);
-  Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
-  ASSERT(!masm->isolate()->heap()->InNewSpace(*interceptor));
-  Register scratch = name;
-  __ mov(scratch, Immediate(interceptor));
-  __ push(scratch);
-  __ push(receiver);
-  __ push(holder);
-}
-
-
-static void CompileCallLoadPropertyWithInterceptor(
-    MacroAssembler* masm,
-    Register receiver,
-    Register holder,
-    Register name,
-    Handle<JSObject> holder_obj,
-    IC::UtilityId id) {
-  PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
-  __ CallExternalReference(
-      ExternalReference(IC_Utility(id), masm->isolate()),
-      StubCache::kInterceptorArgsLength);
-}
-
-
-// Generate call to api function.
-// This function uses push() to generate smaller, faster code than
-// the version above. It is an optimization that should will be removed
-// when api call ICs are generated in hydrogen.
-void StubCompiler::GenerateFastApiCall(MacroAssembler* masm,
-                                       const CallOptimization& optimization,
-                                       Handle<Map> receiver_map,
-                                       Register receiver,
-                                       Register scratch_in,
-                                       bool is_store,
-                                       int argc,
-                                       Register* values) {
-  // Copy return value.
-  __ pop(scratch_in);
-  // receiver
-  __ push(receiver);
-  // Write the arguments to stack frame.
-  for (int i = 0; i < argc; i++) {
-    Register arg = values[argc-1-i];
-    ASSERT(!receiver.is(arg));
-    ASSERT(!scratch_in.is(arg));
-    __ push(arg);
-  }
-  __ push(scratch_in);
-  // Stack now matches JSFunction abi.
-  ASSERT(optimization.is_simple_api_call());
-
-  // Abi for CallApiFunctionStub.
-  Register callee = eax;
-  Register call_data = ebx;
-  Register holder = ecx;
-  Register api_function_address = edx;
-  Register scratch = edi;  // scratch_in is no longer valid.
-
-  // Put holder in place.
-  CallOptimization::HolderLookup holder_lookup;
-  Handle<JSObject> api_holder = optimization.LookupHolderOfExpectedType(
-      receiver_map,
-      &holder_lookup);
-  switch (holder_lookup) {
-    case CallOptimization::kHolderIsReceiver:
-      __ Move(holder, receiver);
-      break;
-    case CallOptimization::kHolderFound:
-      __ LoadHeapObject(holder, api_holder);
-     break;
-    case CallOptimization::kHolderNotFound:
-      UNREACHABLE();
-      break;
-  }
-
-  Isolate* isolate = masm->isolate();
-  Handle<JSFunction> function = optimization.constant_function();
-  Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
-  Handle<Object> call_data_obj(api_call_info->data(), isolate);
-
-  // Put callee in place.
-  __ LoadHeapObject(callee, function);
-
-  bool call_data_undefined = false;
-  // Put call_data in place.
-  if (isolate->heap()->InNewSpace(*call_data_obj)) {
-    __ mov(scratch, api_call_info);
-    __ mov(call_data, FieldOperand(scratch, CallHandlerInfo::kDataOffset));
-  } else if (call_data_obj->IsUndefined()) {
-    call_data_undefined = true;
-    __ mov(call_data, Immediate(isolate->factory()->undefined_value()));
-  } else {
-    __ mov(call_data, call_data_obj);
-  }
-
-  // Put api_function_address in place.
-  Address function_address = v8::ToCData<Address>(api_call_info->callback());
-  __ mov(api_function_address, Immediate(function_address));
-
-  // Jump to stub.
-  CallApiFunctionStub stub(isolate, is_store, call_data_undefined, argc);
-  __ TailCallStub(&stub);
-}
-
-
-void StoreStubCompiler::GenerateRestoreName(MacroAssembler* masm,
-                                            Label* label,
-                                            Handle<Name> name) {
-  if (!label->is_unused()) {
-    __ bind(label);
-    __ mov(this->name(), Immediate(name));
-  }
-}
-
-
-// Generate code to check that a global property cell is empty. Create
-// the property cell at compilation time if no cell exists for the
-// property.
-void StubCompiler::GenerateCheckPropertyCell(MacroAssembler* masm,
-                                             Handle<JSGlobalObject> global,
-                                             Handle<Name> name,
-                                             Register scratch,
-                                             Label* miss) {
-  Handle<PropertyCell> cell =
-      JSGlobalObject::EnsurePropertyCell(global, name);
-  ASSERT(cell->value()->IsTheHole());
-  Handle<Oddball> the_hole = masm->isolate()->factory()->the_hole_value();
-  if (masm->serializer_enabled()) {
-    __ mov(scratch, Immediate(cell));
-    __ cmp(FieldOperand(scratch, PropertyCell::kValueOffset),
-           Immediate(the_hole));
-  } else {
-    __ cmp(Operand::ForCell(cell), Immediate(the_hole));
-  }
-  __ j(not_equal, miss);
-}
-
-
-void StoreStubCompiler::GenerateNegativeHolderLookup(
-    MacroAssembler* masm,
-    Handle<JSObject> holder,
-    Register holder_reg,
-    Handle<Name> name,
-    Label* miss) {
-  if (holder->IsJSGlobalObject()) {
-    GenerateCheckPropertyCell(
-        masm, Handle<JSGlobalObject>::cast(holder), name, scratch1(), miss);
-  } else if (!holder->HasFastProperties() && !holder->IsJSGlobalProxy()) {
-    GenerateDictionaryNegativeLookup(
-        masm, miss, holder_reg, name, scratch1(), scratch2());
-  }
-}
-
-
-// Receiver_reg is preserved on jumps to miss_label, but may be destroyed if
-// store is successful.
-void StoreStubCompiler::GenerateStoreTransition(MacroAssembler* masm,
-                                                Handle<JSObject> object,
-                                                LookupResult* lookup,
-                                                Handle<Map> transition,
-                                                Handle<Name> name,
-                                                Register receiver_reg,
-                                                Register storage_reg,
-                                                Register value_reg,
-                                                Register scratch1,
-                                                Register scratch2,
-                                                Register unused,
-                                                Label* miss_label,
-                                                Label* slow) {
-  int descriptor = transition->LastAdded();
-  DescriptorArray* descriptors = transition->instance_descriptors();
-  PropertyDetails details = descriptors->GetDetails(descriptor);
-  Representation representation = details.representation();
-  ASSERT(!representation.IsNone());
-
-  if (details.type() == CONSTANT) {
-    Handle<Object> constant(descriptors->GetValue(descriptor), masm->isolate());
-    __ CmpObject(value_reg, constant);
-    __ j(not_equal, miss_label);
-  } else if (representation.IsSmi()) {
-      __ JumpIfNotSmi(value_reg, miss_label);
-  } else if (representation.IsHeapObject()) {
-    __ JumpIfSmi(value_reg, miss_label);
-    HeapType* field_type = descriptors->GetFieldType(descriptor);
-    HeapType::Iterator<Map> it = field_type->Classes();
-    if (!it.Done()) {
-      Label do_store;
-      while (true) {
-        __ CompareMap(value_reg, it.Current());
-        it.Advance();
-        if (it.Done()) {
-          __ j(not_equal, miss_label);
-          break;
-        }
-        __ j(equal, &do_store, Label::kNear);
-      }
-      __ bind(&do_store);
-    }
-  } else if (representation.IsDouble()) {
-    Label do_store, heap_number;
-    __ AllocateHeapNumber(storage_reg, scratch1, scratch2, slow);
-
-    __ JumpIfNotSmi(value_reg, &heap_number);
-    __ SmiUntag(value_reg);
-    __ push(value_reg);
-    __ fild_s(Operand(esp, 0));
-    __ pop(value_reg);
-    __ SmiTag(value_reg);
-    __ jmp(&do_store);
-
-    __ bind(&heap_number);
-    __ CheckMap(value_reg, masm->isolate()->factory()->heap_number_map(),
-                miss_label, DONT_DO_SMI_CHECK);
-    __ fld_d(FieldOperand(value_reg, HeapNumber::kValueOffset));
-
-    __ bind(&do_store);
-    __ fstp_d(FieldOperand(storage_reg, HeapNumber::kValueOffset));
-  }
-
-  // Stub never generated for non-global objects that require access
-  // checks.
-  ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
-
-  // Perform map transition for the receiver if necessary.
-  if (details.type() == FIELD &&
-      object->map()->unused_property_fields() == 0) {
-    // The properties must be extended before we can store the value.
-    // We jump to a runtime call that extends the properties array.
-    __ pop(scratch1);  // Return address.
-    __ push(receiver_reg);
-    __ push(Immediate(transition));
-    __ push(value_reg);
-    __ push(scratch1);
-    __ TailCallExternalReference(
-        ExternalReference(IC_Utility(IC::kSharedStoreIC_ExtendStorage),
-                          masm->isolate()),
-        3,
-        1);
-    return;
-  }
-
-  // Update the map of the object.
-  __ mov(scratch1, Immediate(transition));
-  __ mov(FieldOperand(receiver_reg, HeapObject::kMapOffset), scratch1);
-
-  // Update the write barrier for the map field.
-  __ RecordWriteField(receiver_reg,
-                      HeapObject::kMapOffset,
-                      scratch1,
-                      scratch2,
-                      OMIT_REMEMBERED_SET,
-                      OMIT_SMI_CHECK);
-
-  if (details.type() == CONSTANT) {
-    ASSERT(value_reg.is(eax));
-    __ ret(0);
-    return;
-  }
-
-  int index = transition->instance_descriptors()->GetFieldIndex(
-      transition->LastAdded());
-
-  // Adjust for the number of properties stored in the object. Even in the
-  // face of a transition we can use the old map here because the size of the
-  // object and the number of in-object properties is not going to change.
-  index -= object->map()->inobject_properties();
-
-  SmiCheck smi_check = representation.IsTagged()
-      ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
-  // TODO(verwaest): Share this code as a code stub.
-  if (index < 0) {
-    // Set the property straight into the object.
-    int offset = object->map()->instance_size() + (index * kPointerSize);
-    if (representation.IsDouble()) {
-      __ mov(FieldOperand(receiver_reg, offset), storage_reg);
-    } else {
-      __ mov(FieldOperand(receiver_reg, offset), value_reg);
-    }
-
-    if (!representation.IsSmi()) {
-      // Update the write barrier for the array address.
-      if (!representation.IsDouble()) {
-        __ mov(storage_reg, value_reg);
-      }
-      __ RecordWriteField(receiver_reg,
-                          offset,
-                          storage_reg,
-                          scratch1,
-                          EMIT_REMEMBERED_SET,
-                          smi_check);
-    }
-  } else {
-    // Write to the properties array.
-    int offset = index * kPointerSize + FixedArray::kHeaderSize;
-    // Get the properties array (optimistically).
-    __ mov(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
-    if (representation.IsDouble()) {
-      __ mov(FieldOperand(scratch1, offset), storage_reg);
-    } else {
-      __ mov(FieldOperand(scratch1, offset), value_reg);
-    }
-
-    if (!representation.IsSmi()) {
-      // Update the write barrier for the array address.
-      if (!representation.IsDouble()) {
-        __ mov(storage_reg, value_reg);
-      }
-      __ RecordWriteField(scratch1,
-                          offset,
-                          storage_reg,
-                          receiver_reg,
-                          EMIT_REMEMBERED_SET,
-                          smi_check);
-    }
-  }
-
-  // Return the value (register eax).
-  ASSERT(value_reg.is(eax));
-  __ ret(0);
-}
-
-
-// Both name_reg and receiver_reg are preserved on jumps to miss_label,
-// but may be destroyed if store is successful.
-void StoreStubCompiler::GenerateStoreField(MacroAssembler* masm,
-                                           Handle<JSObject> object,
-                                           LookupResult* lookup,
-                                           Register receiver_reg,
-                                           Register name_reg,
-                                           Register value_reg,
-                                           Register scratch1,
-                                           Register scratch2,
-                                           Label* miss_label) {
-  // Stub never generated for non-global objects that require access
-  // checks.
-  ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
-
-  FieldIndex index = lookup->GetFieldIndex();
-
-  Representation representation = lookup->representation();
-  ASSERT(!representation.IsNone());
-  if (representation.IsSmi()) {
-    __ JumpIfNotSmi(value_reg, miss_label);
-  } else if (representation.IsHeapObject()) {
-    __ JumpIfSmi(value_reg, miss_label);
-    HeapType* field_type = lookup->GetFieldType();
-    HeapType::Iterator<Map> it = field_type->Classes();
-    if (!it.Done()) {
-      Label do_store;
-      while (true) {
-        __ CompareMap(value_reg, it.Current());
-        it.Advance();
-        if (it.Done()) {
-          __ j(not_equal, miss_label);
-          break;
-        }
-        __ j(equal, &do_store, Label::kNear);
-      }
-      __ bind(&do_store);
-    }
-  } else if (representation.IsDouble()) {
-    // Load the double storage.
-    if (index.is_inobject()) {
-      __ mov(scratch1, FieldOperand(receiver_reg, index.offset()));
-    } else {
-      __ mov(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
-      __ mov(scratch1, FieldOperand(scratch1, index.offset()));
-    }
-
-    // Store the value into the storage.
-    Label do_store, heap_number;
-    __ JumpIfNotSmi(value_reg, &heap_number);
-    __ SmiUntag(value_reg);
-    __ push(value_reg);
-    __ fild_s(Operand(esp, 0));
-    __ pop(value_reg);
-    __ SmiTag(value_reg);
-    __ jmp(&do_store);
-    __ bind(&heap_number);
-    __ CheckMap(value_reg, masm->isolate()->factory()->heap_number_map(),
-                miss_label, DONT_DO_SMI_CHECK);
-    __ fld_d(FieldOperand(value_reg, HeapNumber::kValueOffset));
-    __ bind(&do_store);
-    __ fstp_d(FieldOperand(scratch1, HeapNumber::kValueOffset));
-    // Return the value (register eax).
-    ASSERT(value_reg.is(eax));
-    __ ret(0);
-    return;
-  }
-
-  ASSERT(!representation.IsDouble());
-  // TODO(verwaest): Share this code as a code stub.
-  SmiCheck smi_check = representation.IsTagged()
-      ? INLINE_SMI_CHECK : OMIT_SMI_CHECK;
-  if (index.is_inobject()) {
-    // Set the property straight into the object.
-    __ mov(FieldOperand(receiver_reg, index.offset()), value_reg);
-
-    if (!representation.IsSmi()) {
-      // Update the write barrier for the array address.
-      // Pass the value being stored in the now unused name_reg.
-      __ mov(name_reg, value_reg);
-      __ RecordWriteField(receiver_reg,
-                          index.offset(),
-                          name_reg,
-                          scratch1,
-                          EMIT_REMEMBERED_SET,
-                          smi_check);
-    }
-  } else {
-    // Write to the properties array.
-    // Get the properties array (optimistically).
-    __ mov(scratch1, FieldOperand(receiver_reg, JSObject::kPropertiesOffset));
-    __ mov(FieldOperand(scratch1, index.offset()), value_reg);
-
-    if (!representation.IsSmi()) {
-      // Update the write barrier for the array address.
-      // Pass the value being stored in the now unused name_reg.
-      __ mov(name_reg, value_reg);
-      __ RecordWriteField(scratch1,
-                          index.offset(),
-                          name_reg,
-                          receiver_reg,
-                          EMIT_REMEMBERED_SET,
-                          smi_check);
-    }
-  }
-
-  // Return the value (register eax).
-  ASSERT(value_reg.is(eax));
-  __ ret(0);
-}
-
-
-void StubCompiler::GenerateTailCall(MacroAssembler* masm, Handle<Code> code) {
-  __ jmp(code, RelocInfo::CODE_TARGET);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-Register StubCompiler::CheckPrototypes(Handle<HeapType> type,
-                                       Register object_reg,
-                                       Handle<JSObject> holder,
-                                       Register holder_reg,
-                                       Register scratch1,
-                                       Register scratch2,
-                                       Handle<Name> name,
-                                       Label* miss,
-                                       PrototypeCheckType check) {
-  Handle<Map> receiver_map(IC::TypeToMap(*type, isolate()));
-
-  // Make sure there's no overlap between holder and object registers.
-  ASSERT(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
-  ASSERT(!scratch2.is(object_reg) && !scratch2.is(holder_reg)
-         && !scratch2.is(scratch1));
-
-  // Keep track of the current object in register reg.
-  Register reg = object_reg;
-  int depth = 0;
-
-  Handle<JSObject> current = Handle<JSObject>::null();
-  if (type->IsConstant()) current =
-      Handle<JSObject>::cast(type->AsConstant()->Value());
-  Handle<JSObject> prototype = Handle<JSObject>::null();
-  Handle<Map> current_map = receiver_map;
-  Handle<Map> holder_map(holder->map());
-  // Traverse the prototype chain and check the maps in the prototype chain for
-  // fast and global objects or do negative lookup for normal objects.
-  while (!current_map.is_identical_to(holder_map)) {
-    ++depth;
-
-    // Only global objects and objects that do not require access
-    // checks are allowed in stubs.
-    ASSERT(current_map->IsJSGlobalProxyMap() ||
-           !current_map->is_access_check_needed());
-
-    prototype = handle(JSObject::cast(current_map->prototype()));
-    if (current_map->is_dictionary_map() &&
-        !current_map->IsJSGlobalObjectMap() &&
-        !current_map->IsJSGlobalProxyMap()) {
-      if (!name->IsUniqueName()) {
-        ASSERT(name->IsString());
-        name = factory()->InternalizeString(Handle<String>::cast(name));
-      }
-      ASSERT(current.is_null() ||
-             current->property_dictionary()->FindEntry(name) ==
-             NameDictionary::kNotFound);
-
-      GenerateDictionaryNegativeLookup(masm(), miss, reg, name,
-                                       scratch1, scratch2);
-
-      __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
-      reg = holder_reg;  // From now on the object will be in holder_reg.
-      __ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
-    } else {
-      bool in_new_space = heap()->InNewSpace(*prototype);
-      if (depth != 1 || check == CHECK_ALL_MAPS) {
-        __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK);
-      }
-
-      // Check access rights to the global object.  This has to happen after
-      // the map check so that we know that the object is actually a global
-      // object.
-      if (current_map->IsJSGlobalProxyMap()) {
-        __ CheckAccessGlobalProxy(reg, scratch1, scratch2, miss);
-      } else if (current_map->IsJSGlobalObjectMap()) {
-        GenerateCheckPropertyCell(
-            masm(), Handle<JSGlobalObject>::cast(current), name,
-            scratch2, miss);
-      }
-
-      if (in_new_space) {
-        // Save the map in scratch1 for later.
-        __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
-      }
-
-      reg = holder_reg;  // From now on the object will be in holder_reg.
-
-      if (in_new_space) {
-        // The prototype is in new space; we cannot store a reference to it
-        // in the code.  Load it from the map.
-        __ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
-      } else {
-        // The prototype is in old space; load it directly.
-        __ mov(reg, prototype);
-      }
-    }
-
-    // Go to the next object in the prototype chain.
-    current = prototype;
-    current_map = handle(current->map());
-  }
-
-  // Log the check depth.
-  LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
-
-  if (depth != 0 || check == CHECK_ALL_MAPS) {
-    // Check the holder map.
-    __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK);
-  }
-
-  // Perform security check for access to the global object.
-  ASSERT(current_map->IsJSGlobalProxyMap() ||
-         !current_map->is_access_check_needed());
-  if (current_map->IsJSGlobalProxyMap()) {
-    __ CheckAccessGlobalProxy(reg, scratch1, scratch2, miss);
-  }
-
-  // Return the register containing the holder.
-  return reg;
-}
-
-
-void LoadStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
-  if (!miss->is_unused()) {
-    Label success;
-    __ jmp(&success);
-    __ bind(miss);
-    TailCallBuiltin(masm(), MissBuiltin(kind()));
-    __ bind(&success);
-  }
-}
-
-
-void StoreStubCompiler::HandlerFrontendFooter(Handle<Name> name, Label* miss) {
-  if (!miss->is_unused()) {
-    Label success;
-    __ jmp(&success);
-    GenerateRestoreName(masm(), miss, name);
-    TailCallBuiltin(masm(), MissBuiltin(kind()));
-    __ bind(&success);
-  }
-}
-
-
-Register LoadStubCompiler::CallbackHandlerFrontend(
-    Handle<HeapType> type,
-    Register object_reg,
-    Handle<JSObject> holder,
-    Handle<Name> name,
-    Handle<Object> callback) {
-  Label miss;
-
-  Register reg = HandlerFrontendHeader(type, object_reg, holder, name, &miss);
-
-  if (!holder->HasFastProperties() && !holder->IsJSGlobalObject()) {
-    ASSERT(!reg.is(scratch2()));
-    ASSERT(!reg.is(scratch3()));
-    Register dictionary = scratch1();
-    bool must_preserve_dictionary_reg = reg.is(dictionary);
-
-    // Load the properties dictionary.
-    if (must_preserve_dictionary_reg) {
-      __ push(dictionary);
-    }
-    __ mov(dictionary, FieldOperand(reg, JSObject::kPropertiesOffset));
-
-    // Probe the dictionary.
-    Label probe_done, pop_and_miss;
-    NameDictionaryLookupStub::GeneratePositiveLookup(masm(),
-                                                     &pop_and_miss,
-                                                     &probe_done,
-                                                     dictionary,
-                                                     this->name(),
-                                                     scratch2(),
-                                                     scratch3());
-    __ bind(&pop_and_miss);
-    if (must_preserve_dictionary_reg) {
-      __ pop(dictionary);
-    }
-    __ jmp(&miss);
-    __ bind(&probe_done);
-
-    // If probing finds an entry in the dictionary, scratch2 contains the
-    // index into the dictionary. Check that the value is the callback.
-    Register index = scratch2();
-    const int kElementsStartOffset =
-        NameDictionary::kHeaderSize +
-        NameDictionary::kElementsStartIndex * kPointerSize;
-    const int kValueOffset = kElementsStartOffset + kPointerSize;
-    __ mov(scratch3(),
-           Operand(dictionary, index, times_4, kValueOffset - kHeapObjectTag));
-    if (must_preserve_dictionary_reg) {
-      __ pop(dictionary);
-    }
-    __ cmp(scratch3(), callback);
-    __ j(not_equal, &miss);
-  }
-
-  HandlerFrontendFooter(name, &miss);
-  return reg;
-}
-
-
-void LoadStubCompiler::GenerateLoadField(Register reg,
-                                         Handle<JSObject> holder,
-                                         FieldIndex field,
-                                         Representation representation) {
-  if (!reg.is(receiver())) __ mov(receiver(), reg);
-  if (kind() == Code::LOAD_IC) {
-    LoadFieldStub stub(isolate(), field);
-    GenerateTailCall(masm(), stub.GetCode());
-  } else {
-    KeyedLoadFieldStub stub(isolate(), field);
-    GenerateTailCall(masm(), stub.GetCode());
-  }
-}
-
-
-void LoadStubCompiler::GenerateLoadCallback(
-    Register reg,
-    Handle<ExecutableAccessorInfo> callback) {
-  // Insert additional parameters into the stack frame above return address.
-  ASSERT(!scratch3().is(reg));
-  __ pop(scratch3());  // Get return address to place it below.
-
-  STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
-  STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
-  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
-  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
-  STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
-  STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
-  __ push(receiver());  // receiver
-  // Push data from ExecutableAccessorInfo.
-  if (isolate()->heap()->InNewSpace(callback->data())) {
-    ASSERT(!scratch2().is(reg));
-    __ mov(scratch2(), Immediate(callback));
-    __ push(FieldOperand(scratch2(), ExecutableAccessorInfo::kDataOffset));
-  } else {
-    __ push(Immediate(Handle<Object>(callback->data(), isolate())));
-  }
-  __ push(Immediate(isolate()->factory()->undefined_value()));  // ReturnValue
-  // ReturnValue default value
-  __ push(Immediate(isolate()->factory()->undefined_value()));
-  __ push(Immediate(reinterpret_cast<int>(isolate())));
-  __ push(reg);  // holder
-
-  // Save a pointer to where we pushed the arguments. This will be
-  // passed as the const PropertyAccessorInfo& to the C++ callback.
-  __ push(esp);
-
-  __ push(name());  // name
-
-  __ push(scratch3());  // Restore return address.
-
-  // Abi for CallApiGetter
-  Register getter_address = edx;
-  Address function_address = v8::ToCData<Address>(callback->getter());
-  __ mov(getter_address, Immediate(function_address));
-
-  CallApiGetterStub stub(isolate());
-  __ TailCallStub(&stub);
-}
-
-
-void LoadStubCompiler::GenerateLoadConstant(Handle<Object> value) {
-  // Return the constant value.
-  __ LoadObject(eax, value);
-  __ ret(0);
-}
-
-
-void LoadStubCompiler::GenerateLoadInterceptor(
-    Register holder_reg,
-    Handle<Object> object,
-    Handle<JSObject> interceptor_holder,
-    LookupResult* lookup,
-    Handle<Name> name) {
-  ASSERT(interceptor_holder->HasNamedInterceptor());
-  ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
-
-  // So far the most popular follow ups for interceptor loads are FIELD
-  // and CALLBACKS, so inline only them, other cases may be added
-  // later.
-  bool compile_followup_inline = false;
-  if (lookup->IsFound() && lookup->IsCacheable()) {
-    if (lookup->IsField()) {
-      compile_followup_inline = true;
-    } else if (lookup->type() == CALLBACKS &&
-               lookup->GetCallbackObject()->IsExecutableAccessorInfo()) {
-      ExecutableAccessorInfo* callback =
-          ExecutableAccessorInfo::cast(lookup->GetCallbackObject());
-      compile_followup_inline = callback->getter() != NULL &&
-          callback->IsCompatibleReceiver(*object);
-    }
-  }
-
-  if (compile_followup_inline) {
-    // Compile the interceptor call, followed by inline code to load the
-    // property from further up the prototype chain if the call fails.
-    // Check that the maps haven't changed.
-    ASSERT(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
-
-    // Preserve the receiver register explicitly whenever it is different from
-    // the holder and it is needed should the interceptor return without any
-    // result. The CALLBACKS case needs the receiver to be passed into C++ code,
-    // the FIELD case might cause a miss during the prototype check.
-    bool must_perfrom_prototype_check = *interceptor_holder != lookup->holder();
-    bool must_preserve_receiver_reg = !receiver().is(holder_reg) &&
-        (lookup->type() == CALLBACKS || must_perfrom_prototype_check);
-
-    // Save necessary data before invoking an interceptor.
-    // Requires a frame to make GC aware of pushed pointers.
-    {
-      FrameScope frame_scope(masm(), StackFrame::INTERNAL);
-
-      if (must_preserve_receiver_reg) {
-        __ push(receiver());
-      }
-      __ push(holder_reg);
-      __ push(this->name());
-
-      // Invoke an interceptor.  Note: map checks from receiver to
-      // interceptor's holder has been compiled before (see a caller
-      // of this method.)
-      CompileCallLoadPropertyWithInterceptor(
-          masm(), receiver(), holder_reg, this->name(), interceptor_holder,
-          IC::kLoadPropertyWithInterceptorOnly);
-
-      // Check if interceptor provided a value for property.  If it's
-      // the case, return immediately.
-      Label interceptor_failed;
-      __ cmp(eax, factory()->no_interceptor_result_sentinel());
-      __ j(equal, &interceptor_failed);
-      frame_scope.GenerateLeaveFrame();
-      __ ret(0);
-
-      // Clobber registers when generating debug-code to provoke errors.
-      __ bind(&interceptor_failed);
-      if (FLAG_debug_code) {
-        __ mov(receiver(), Immediate(BitCast<int32_t>(kZapValue)));
-        __ mov(holder_reg, Immediate(BitCast<int32_t>(kZapValue)));
-        __ mov(this->name(), Immediate(BitCast<int32_t>(kZapValue)));
-      }
-
-      __ pop(this->name());
-      __ pop(holder_reg);
-      if (must_preserve_receiver_reg) {
-        __ pop(receiver());
-      }
-
-      // Leave the internal frame.
-    }
-
-    GenerateLoadPostInterceptor(holder_reg, interceptor_holder, name, lookup);
-  } else {  // !compile_followup_inline
-    // Call the runtime system to load the interceptor.
-    // Check that the maps haven't changed.
-    __ pop(scratch2());  // save old return address
-    PushInterceptorArguments(masm(), receiver(), holder_reg,
-                             this->name(), interceptor_holder);
-    __ push(scratch2());  // restore old return address
-
-    ExternalReference ref =
-        ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptor),
-                          isolate());
-    __ TailCallExternalReference(ref, StubCache::kInterceptorArgsLength, 1);
-  }
-}
-
-
-Handle<Code> StoreStubCompiler::CompileStoreCallback(
-    Handle<JSObject> object,
-    Handle<JSObject> holder,
-    Handle<Name> name,
-    Handle<ExecutableAccessorInfo> callback) {
-  Register holder_reg = HandlerFrontend(
-      IC::CurrentTypeOf(object, isolate()), receiver(), holder, name);
-
-  __ pop(scratch1());  // remove the return address
-  __ push(receiver());
-  __ push(holder_reg);
-  __ Push(callback);
-  __ Push(name);
-  __ push(value());
-  __ push(scratch1());  // restore return address
-
-  // Do tail-call to the runtime system.
-  ExternalReference store_callback_property =
-      ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
-  __ TailCallExternalReference(store_callback_property, 5, 1);
-
-  // Return the generated code.
-  return GetCode(kind(), Code::FAST, name);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void StoreStubCompiler::GenerateStoreViaSetter(
-    MacroAssembler* masm,
-    Handle<HeapType> type,
-    Register receiver,
-    Handle<JSFunction> setter) {
-  // ----------- S t a t e -------------
-  //  -- esp[0] : return address
-  // -----------------------------------
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-
-    // Save value register, so we can restore it later.
-    __ push(value());
-
-    if (!setter.is_null()) {
-      // Call the JavaScript setter with receiver and value on the stack.
-      if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
-        // Swap in the global receiver.
-        __ mov(receiver,
-               FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
-      }
-      __ push(receiver);
-      __ push(value());
-      ParameterCount actual(1);
-      ParameterCount expected(setter);
-      __ InvokeFunction(setter, expected, actual,
-                        CALL_FUNCTION, NullCallWrapper());
-    } else {
-      // If we generate a global code snippet for deoptimization only, remember
-      // the place to continue after deoptimization.
-      masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
-    }
-
-    // We have to return the passed value, not the return value of the setter.
-    __ pop(eax);
-
-    // Restore context register.
-    __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-  }
-  __ ret(0);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-Handle<Code> StoreStubCompiler::CompileStoreInterceptor(
-    Handle<JSObject> object,
-    Handle<Name> name) {
-  __ pop(scratch1());  // remove the return address
-  __ push(receiver());
-  __ push(this->name());
-  __ push(value());
-  __ push(scratch1());  // restore return address
-
-  // Do tail-call to the runtime system.
-  ExternalReference store_ic_property =
-      ExternalReference(IC_Utility(IC::kStoreInterceptorProperty), isolate());
-  __ TailCallExternalReference(store_ic_property, 3, 1);
-
-  // Return the generated code.
-  return GetCode(kind(), Code::FAST, name);
-}
-
-
-void StoreStubCompiler::GenerateStoreArrayLength() {
-  // Prepare tail call to StoreIC_ArrayLength.
-  __ pop(scratch1());  // remove the return address
-  __ push(receiver());
-  __ push(value());
-  __ push(scratch1());  // restore return address
-
-  ExternalReference ref =
-      ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength),
-                        masm()->isolate());
-  __ TailCallExternalReference(ref, 2, 1);
-}
-
-
-Handle<Code> KeyedStoreStubCompiler::CompileStorePolymorphic(
-    MapHandleList* receiver_maps,
-    CodeHandleList* handler_stubs,
-    MapHandleList* transitioned_maps) {
-  Label miss;
-  __ JumpIfSmi(receiver(), &miss, Label::kNear);
-  __ mov(scratch1(), FieldOperand(receiver(), HeapObject::kMapOffset));
-  for (int i = 0; i < receiver_maps->length(); ++i) {
-    __ cmp(scratch1(), receiver_maps->at(i));
-    if (transitioned_maps->at(i).is_null()) {
-      __ j(equal, handler_stubs->at(i));
-    } else {
-      Label next_map;
-      __ j(not_equal, &next_map, Label::kNear);
-      __ mov(transition_map(), Immediate(transitioned_maps->at(i)));
-      __ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET);
-      __ bind(&next_map);
-    }
-  }
-  __ bind(&miss);
-  TailCallBuiltin(masm(), MissBuiltin(kind()));
-
-  // Return the generated code.
-  return GetICCode(
-      kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
-}
-
-
-Handle<Code> LoadStubCompiler::CompileLoadNonexistent(Handle<HeapType> type,
-                                                      Handle<JSObject> last,
-                                                      Handle<Name> name) {
-  NonexistentHandlerFrontend(type, last, name);
-
-  // Return undefined if maps of the full prototype chain are still the
-  // same and no global property with this name contains a value.
-  __ mov(eax, isolate()->factory()->undefined_value());
-  __ ret(0);
-
-  // Return the generated code.
-  return GetCode(kind(), Code::FAST, name);
-}
-
-
-Register* LoadStubCompiler::registers() {
-  // receiver, name, scratch1, scratch2, scratch3, scratch4.
-  static Register registers[] = { edx, ecx, ebx, eax, edi, no_reg };
-  return registers;
-}
-
-
-Register* KeyedLoadStubCompiler::registers() {
-  // receiver, name, scratch1, scratch2, scratch3, scratch4.
-  static Register registers[] = { edx, ecx, ebx, eax, edi, no_reg };
-  return registers;
-}
-
-
-Register StoreStubCompiler::value() {
-  return eax;
-}
-
-
-Register* StoreStubCompiler::registers() {
-  // receiver, name, scratch1, scratch2, scratch3.
-  static Register registers[] = { edx, ecx, ebx, edi, no_reg };
-  return registers;
-}
-
-
-Register* KeyedStoreStubCompiler::registers() {
-  // receiver, name, scratch1, scratch2, scratch3.
-  static Register registers[] = { edx, ecx, ebx, edi, no_reg };
-  return registers;
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void LoadStubCompiler::GenerateLoadViaGetter(MacroAssembler* masm,
-                                             Handle<HeapType> type,
-                                             Register receiver,
-                                             Handle<JSFunction> getter) {
-  {
-    FrameScope scope(masm, StackFrame::INTERNAL);
-
-    if (!getter.is_null()) {
-      // Call the JavaScript getter with the receiver on the stack.
-      if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
-        // Swap in the global receiver.
-        __ mov(receiver,
-                FieldOperand(receiver, JSGlobalObject::kGlobalReceiverOffset));
-      }
-      __ push(receiver);
-      ParameterCount actual(0);
-      ParameterCount expected(getter);
-      __ InvokeFunction(getter, expected, actual,
-                        CALL_FUNCTION, NullCallWrapper());
-    } else {
-      // If we generate a global code snippet for deoptimization only, remember
-      // the place to continue after deoptimization.
-      masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
-    }
-
-    // Restore context register.
-    __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
-  }
-  __ ret(0);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm())
-
-
-Handle<Code> LoadStubCompiler::CompileLoadGlobal(
-    Handle<HeapType> type,
-    Handle<GlobalObject> global,
-    Handle<PropertyCell> cell,
-    Handle<Name> name,
-    bool is_dont_delete) {
-  Label miss;
-
-  HandlerFrontendHeader(type, receiver(), global, name, &miss);
-  // Get the value from the cell.
-  if (masm()->serializer_enabled()) {
-    __ mov(eax, Immediate(cell));
-    __ mov(eax, FieldOperand(eax, PropertyCell::kValueOffset));
-  } else {
-    __ mov(eax, Operand::ForCell(cell));
-  }
-
-  // Check for deleted property if property can actually be deleted.
-  if (!is_dont_delete) {
-    __ cmp(eax, factory()->the_hole_value());
-    __ j(equal, &miss);
-  } else if (FLAG_debug_code) {
-    __ cmp(eax, factory()->the_hole_value());
-    __ Check(not_equal, kDontDeleteCellsCannotContainTheHole);
-  }
-
-  Counters* counters = isolate()->counters();
-  __ IncrementCounter(counters->named_load_global_stub(), 1);
-  // The code above already loads the result into the return register.
-  __ ret(0);
-
-  HandlerFrontendFooter(name, &miss);
-
-  // Return the generated code.
-  return GetCode(kind(), Code::NORMAL, name);
-}
-
-
-Handle<Code> BaseLoadStoreStubCompiler::CompilePolymorphicIC(
-    TypeHandleList* types,
-    CodeHandleList* handlers,
-    Handle<Name> name,
-    Code::StubType type,
-    IcCheckType check) {
-  Label miss;
-
-  if (check == PROPERTY &&
-      (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
-    __ cmp(this->name(), Immediate(name));
-    __ j(not_equal, &miss);
-  }
-
-  Label number_case;
-  Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
-  __ JumpIfSmi(receiver(), smi_target);
-
-  Register map_reg = scratch1();
-  __ mov(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
-  int receiver_count = types->length();
-  int number_of_handled_maps = 0;
-  for (int current = 0; current < receiver_count; ++current) {
-    Handle<HeapType> type = types->at(current);
-    Handle<Map> map = IC::TypeToMap(*type, isolate());
-    if (!map->is_deprecated()) {
-      number_of_handled_maps++;
-      __ cmp(map_reg, map);
-      if (type->Is(HeapType::Number())) {
-        ASSERT(!number_case.is_unused());
-        __ bind(&number_case);
-      }
-      __ j(equal, handlers->at(current));
-    }
-  }
-  ASSERT(number_of_handled_maps != 0);
-
-  __ bind(&miss);
-  TailCallBuiltin(masm(), MissBuiltin(kind()));
-
-  // Return the generated code.
-  InlineCacheState state =
-      number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
-  return GetICCode(kind(), type, name, state);
-}
-
-
-#undef __
-#define __ ACCESS_MASM(masm)
-
-
-void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
-    MacroAssembler* masm) {
-  // ----------- S t a t e -------------
-  //  -- ecx    : key
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
-  Label slow, miss;
-
-  // This stub is meant to be tail-jumped to, the receiver must already
-  // have been verified by the caller to not be a smi.
-  __ JumpIfNotSmi(ecx, &miss);
-  __ mov(ebx, ecx);
-  __ SmiUntag(ebx);
-  __ mov(eax, FieldOperand(edx, JSObject::kElementsOffset));
-
-  // Push receiver on the stack to free up a register for the dictionary
-  // probing.
-  __ push(edx);
-  __ LoadFromNumberDictionary(&slow, eax, ecx, ebx, edx, edi, eax);
-  // Pop receiver before returning.
-  __ pop(edx);
-  __ ret(0);
-
-  __ bind(&slow);
-  __ pop(edx);
-
-  // ----------- S t a t e -------------
-  //  -- ecx    : key
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
-  TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Slow);
-
-  __ bind(&miss);
-  // ----------- S t a t e -------------
-  //  -- ecx    : key
-  //  -- edx    : receiver
-  //  -- esp[0] : return address
-  // -----------------------------------
-  TailCallBuiltin(masm, Builtins::kKeyedLoadIC_Miss);
-}
-
-
-#undef __
-
-} }  // namespace v8::internal
-
-#endif  // V8_TARGET_ARCH_X87
diff --git a/src/zone-allocator.h b/src/zone-allocator.h
index fd07ce2..ab0ae9c 100644
--- a/src/zone-allocator.h
+++ b/src/zone-allocator.h
@@ -62,6 +62,8 @@
   Zone* zone_;
 };
 
+typedef zone_allocator<bool> ZoneBoolAllocator;
+typedef zone_allocator<int> ZoneIntAllocator;
 } }  // namespace v8::internal
 
 #endif  // V8_ZONE_ALLOCATOR_H_
diff --git a/src/zone-containers.h b/src/zone-containers.h
index 3b08b47..2ee1780 100644
--- a/src/zone-containers.h
+++ b/src/zone-containers.h
@@ -5,19 +5,54 @@
 #ifndef V8_ZONE_CONTAINERS_H_
 #define V8_ZONE_CONTAINERS_H_
 
+#include <deque>
+#include <queue>
 #include <vector>
-#include <set>
 
-#include "src/zone.h"
+#include "src/zone-allocator.h"
 
 namespace v8 {
 namespace internal {
 
-typedef zone_allocator<int> ZoneIntAllocator;
-typedef std::vector<int, ZoneIntAllocator> IntVector;
-typedef IntVector::iterator IntVectorIter;
-typedef IntVector::reverse_iterator IntVectorRIter;
+// A wrapper subclass for std::vector to make it easy to construct one
+// that uses a zone allocator.
+template <typename T>
+class ZoneVector : public std::vector<T, zone_allocator<T> > {
+ public:
+  // Constructs an empty vector.
+  explicit ZoneVector(Zone* zone)
+      : std::vector<T, zone_allocator<T> >(zone_allocator<T>(zone)) {}
 
+  // Constructs a new vector and fills it with {size} elements, each
+  // having the value {def}.
+  ZoneVector(int size, T def, Zone* zone)
+      : std::vector<T, zone_allocator<T> >(size, def, zone_allocator<T>(zone)) {
+  }
+};
+
+// A wrapper subclass std::deque to make it easy to construct one
+// that uses a zone allocator.
+template <typename T>
+class ZoneDeque : public std::deque<T, zone_allocator<T> > {
+ public:
+  explicit ZoneDeque(Zone* zone)
+      : std::deque<T, zone_allocator<T> >(zone_allocator<T>(zone)) {}
+};
+
+// A wrapper subclass for std::queue to make it easy to construct one
+// that uses a zone allocator.
+template <typename T>
+class ZoneQueue : public std::queue<T, std::deque<T, zone_allocator<T> > > {
+ public:
+  // Constructs an empty queue.
+  explicit ZoneQueue(Zone* zone)
+      : std::queue<T, std::deque<T, zone_allocator<T> > >(
+            std::deque<T, zone_allocator<T> >(zone_allocator<T>(zone))) {}
+};
+
+// Typedefs to shorten commonly used vectors.
+typedef ZoneVector<bool> BoolVector;
+typedef ZoneVector<int> IntVector;
 } }  // namespace v8::internal
 
 #endif  // V8_ZONE_CONTAINERS_H_
diff --git a/src/zone-inl.h b/src/zone-inl.h
index 6c5aecd..cf037b5 100644
--- a/src/zone-inl.h
+++ b/src/zone-inl.h
@@ -24,54 +24,6 @@
 static const int kASanRedzoneBytes = 24;  // Must be a multiple of 8.
 
 
-inline void* Zone::New(int size) {
-  // Round up the requested size to fit the alignment.
-  size = RoundUp(size, kAlignment);
-
-  // If the allocation size is divisible by 8 then we return an 8-byte aligned
-  // address.
-  if (kPointerSize == 4 && kAlignment == 4) {
-    position_ += ((~size) & 4) & (reinterpret_cast<intptr_t>(position_) & 4);
-  } else {
-    ASSERT(kAlignment >= kPointerSize);
-  }
-
-  // Check if the requested size is available without expanding.
-  Address result = position_;
-
-  int size_with_redzone =
-#ifdef V8_USE_ADDRESS_SANITIZER
-      size + kASanRedzoneBytes;
-#else
-      size;
-#endif
-
-  if (size_with_redzone > limit_ - position_) {
-     result = NewExpand(size_with_redzone);
-  } else {
-     position_ += size_with_redzone;
-  }
-
-#ifdef V8_USE_ADDRESS_SANITIZER
-  Address redzone_position = result + size;
-  ASSERT(redzone_position + kASanRedzoneBytes == position_);
-  ASAN_POISON_MEMORY_REGION(redzone_position, kASanRedzoneBytes);
-#endif
-
-  // Check that the result has the proper alignment and return it.
-  ASSERT(IsAddressAligned(result, kAlignment, 0));
-  allocation_size_ += size;
-  return reinterpret_cast<void*>(result);
-}
-
-
-template <typename T>
-T* Zone::NewArray(int length) {
-  CHECK(std::numeric_limits<int>::max() / static_cast<int>(sizeof(T)) > length);
-  return static_cast<T*>(New(length * sizeof(T)));
-}
-
-
 bool Zone::excess_allocation() {
   return segment_bytes_allocated_ > kExcessLimit;
 }
@@ -97,7 +49,7 @@
 }
 
 inline void* ZoneAllocationPolicy::New(size_t size) {
-  ASSERT(zone_);
+  DCHECK(zone_);
   return zone_->New(static_cast<int>(size));
 }
 
diff --git a/src/zone.cc b/src/zone.cc
index d4fa42f..48d8c7b 100644
--- a/src/zone.cc
+++ b/src/zone.cc
@@ -58,7 +58,48 @@
   DeleteAll();
   DeleteKeptSegment();
 
-  ASSERT(segment_bytes_allocated_ == 0);
+  DCHECK(segment_bytes_allocated_ == 0);
+}
+
+
+void* Zone::New(int size) {
+  // Round up the requested size to fit the alignment.
+  size = RoundUp(size, kAlignment);
+
+  // If the allocation size is divisible by 8 then we return an 8-byte aligned
+  // address.
+  if (kPointerSize == 4 && kAlignment == 4) {
+    position_ += ((~size) & 4) & (reinterpret_cast<intptr_t>(position_) & 4);
+  } else {
+    DCHECK(kAlignment >= kPointerSize);
+  }
+
+  // Check if the requested size is available without expanding.
+  Address result = position_;
+
+  int size_with_redzone =
+#ifdef V8_USE_ADDRESS_SANITIZER
+      size + kASanRedzoneBytes;
+#else
+      size;
+#endif
+
+  if (size_with_redzone > limit_ - position_) {
+     result = NewExpand(size_with_redzone);
+  } else {
+     position_ += size_with_redzone;
+  }
+
+#ifdef V8_USE_ADDRESS_SANITIZER
+  Address redzone_position = result + size;
+  DCHECK(redzone_position + kASanRedzoneBytes == position_);
+  ASAN_POISON_MEMORY_REGION(redzone_position, kASanRedzoneBytes);
+#endif
+
+  // Check that the result has the proper alignment and return it.
+  DCHECK(IsAddressAligned(result, kAlignment, 0));
+  allocation_size_ += size;
+  return reinterpret_cast<void*>(result);
 }
 
 
@@ -120,7 +161,7 @@
   static const unsigned char kZapDeadByte = 0xcd;
 #endif
 
-  ASSERT(segment_head_ == NULL || segment_head_->next() == NULL);
+  DCHECK(segment_head_ == NULL || segment_head_->next() == NULL);
   if (segment_head_ != NULL) {
     int size = segment_head_->size();
 #ifdef DEBUG
@@ -133,7 +174,7 @@
     segment_head_ = NULL;
   }
 
-  ASSERT(segment_bytes_allocated_ == 0);
+  DCHECK(segment_bytes_allocated_ == 0);
 }
 
 
@@ -160,8 +201,8 @@
 Address Zone::NewExpand(int size) {
   // Make sure the requested size is already properly aligned and that
   // there isn't enough room in the Zone to satisfy the request.
-  ASSERT(size == RoundDown(size, kAlignment));
-  ASSERT(size > limit_ - position_);
+  DCHECK(size == RoundDown(size, kAlignment));
+  DCHECK(size > limit_ - position_);
 
   // Compute the new segment size. We use a 'high water mark'
   // strategy, where we increase the segment size every time we expand
@@ -210,7 +251,7 @@
     return NULL;
   }
   limit_ = segment->end();
-  ASSERT(position_ <= limit_);
+  DCHECK(position_ <= limit_);
   return result;
 }
 
diff --git a/src/zone.h b/src/zone.h
index d31d642..6f552b6 100644
--- a/src/zone.h
+++ b/src/zone.h
@@ -5,10 +5,12 @@
 #ifndef V8_ZONE_H_
 #define V8_ZONE_H_
 
+#include <limits>
+
 #include "src/allocation.h"
-#include "src/checks.h"
-#include "src/hashmap.h"
+#include "src/base/logging.h"
 #include "src/globals.h"
+#include "src/hashmap.h"
 #include "src/list.h"
 #include "src/splay-tree.h"
 
@@ -38,10 +40,14 @@
   ~Zone();
   // Allocate 'size' bytes of memory in the Zone; expands the Zone by
   // allocating new segments of memory on demand using malloc().
-  inline void* New(int size);
+  void* New(int size);
 
   template <typename T>
-  inline T* NewArray(int length);
+  T* NewArray(int length) {
+    CHECK(std::numeric_limits<int>::max() / static_cast<int>(sizeof(T)) >
+          length);
+    return static_cast<T*>(New(length * sizeof(T)));
+  }
 
   // Deletes all objects and free all memory allocated in the Zone. Keeps one
   // small (size <= kMaximumKeptSegmentSize) segment around if it finds one.
@@ -57,9 +63,9 @@
 
   inline void adjust_segment_bytes_allocated(int delta);
 
-  inline unsigned allocation_size() { return allocation_size_; }
+  inline unsigned allocation_size() const { return allocation_size_; }
 
-  inline Isolate* isolate() { return isolate_; }
+  inline Isolate* isolate() const { return isolate_; }
 
  private:
   friend class Isolate;
diff --git a/test/base-unittests/base-unittests.status b/test/base-unittests/base-unittests.status
new file mode 100644
index 0000000..d439913
--- /dev/null
+++ b/test/base-unittests/base-unittests.status
@@ -0,0 +1,6 @@
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+[
+]
diff --git a/test/benchmarks/benchmarks.status b/test/benchmarks/benchmarks.status
index d651b3c..a08fa41 100644
--- a/test/benchmarks/benchmarks.status
+++ b/test/benchmarks/benchmarks.status
@@ -25,9 +25,11 @@
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-# Too slow in Debug mode.
 [
 [ALWAYS, {
+  # Too slow in Debug mode.
   'octane/mandreel': [PASS, ['mode == debug', SKIP]],
+  # TODO(turbofan): Too slow in debug mode for now.
+  'octane/pdfjs': [PASS, ['mode == debug', SKIP]],
 }],  # ALWAYS
 ]
diff --git a/test/benchmarks/testcfg.py b/test/benchmarks/testcfg.py
index c94a35f..6607bef 100644
--- a/test/benchmarks/testcfg.py
+++ b/test/benchmarks/testcfg.py
@@ -183,8 +183,10 @@
     os.chdir(old_cwd)
 
   def VariantFlags(self, testcase, default_flags):
-    # Both --nocrankshaft and --stressopt are very slow.
-    return [[]]
+    # Both --nocrankshaft and --stressopt are very slow. Add TF but without
+    # always opt to match the way the benchmarks are run for performance
+    # testing.
+    return [[], ["--turbo-filter=*"]]
 
 
 def GetSuite(name, root):
diff --git a/test/cctest/OWNERS b/test/cctest/OWNERS
index 6d5f927..93565c5 100644
--- a/test/cctest/OWNERS
+++ b/test/cctest/OWNERS
@@ -1,2 +1,5 @@
-per-file *-mips.*=plind44@gmail.com
-per-file *-mips.*=gergely@homejinni.com
+per-file *-mips*=paul.lind@imgtec.com
+per-file *-mips*=gergely.kis@imgtec.com
+per-file *-mips*=akos.palfi@imgtec.com
+per-file *-mips*=balazs.kilvady@imgtec.com
+per-file *-mips*=dusan.milosavljevic@imgtec.com
diff --git a/test/cctest/cctest.cc b/test/cctest/cctest.cc
index 0be1520..f03710a 100644
--- a/test/cctest/cctest.cc
+++ b/test/cctest/cctest.cc
@@ -28,11 +28,19 @@
 #include "include/v8.h"
 #include "test/cctest/cctest.h"
 
+#include "include/libplatform/libplatform.h"
 #include "src/debug.h"
 #include "test/cctest/print-extension.h"
 #include "test/cctest/profiler-extension.h"
 #include "test/cctest/trace-extension.h"
 
+#if (defined(_WIN32) || defined(_WIN64))
+#include <windows.h>  // NOLINT
+#if defined(_MSC_VER)
+#include <crtdbg.h>
+#endif  // defined(_MSC_VER)
+#endif  // defined(_WIN32) || defined(_WIN64)
+
 enum InitializationState {kUnset, kUnintialized, kInitialized};
 static InitializationState initialization_state_  = kUnset;
 static bool disable_automatic_dispose_ = false;
@@ -137,10 +145,27 @@
 
 
 int main(int argc, char* argv[]) {
-  v8::V8::InitializeICU();
-  i::Isolate::SetCrashIfDefaultIsolateInitialized();
+#if (defined(_WIN32) || defined(_WIN64))
+  UINT new_flags =
+      SEM_FAILCRITICALERRORS | SEM_NOGPFAULTERRORBOX | SEM_NOOPENFILEERRORBOX;
+  UINT existing_flags = SetErrorMode(new_flags);
+  SetErrorMode(existing_flags | new_flags);
+#if defined(_MSC_VER)
+  _CrtSetReportMode(_CRT_WARN, _CRTDBG_MODE_DEBUG | _CRTDBG_MODE_FILE);
+  _CrtSetReportFile(_CRT_WARN, _CRTDBG_FILE_STDERR);
+  _CrtSetReportMode(_CRT_ASSERT, _CRTDBG_MODE_DEBUG | _CRTDBG_MODE_FILE);
+  _CrtSetReportFile(_CRT_ASSERT, _CRTDBG_FILE_STDERR);
+  _CrtSetReportMode(_CRT_ERROR, _CRTDBG_MODE_DEBUG | _CRTDBG_MODE_FILE);
+  _CrtSetReportFile(_CRT_ERROR, _CRTDBG_FILE_STDERR);
+  _set_error_mode(_OUT_TO_STDERR);
+#endif  // _MSC_VER
+#endif  // defined(_WIN32) || defined(_WIN64)
 
+  v8::V8::InitializeICU();
+  v8::Platform* platform = v8::platform::CreateDefaultPlatform();
+  v8::V8::InitializePlatform(platform);
   v8::internal::FlagList::SetFlagsFromCommandLine(&argc, argv, true);
+  v8::V8::Initialize();
 
   CcTestArrayBufferAllocator array_buffer_allocator;
   v8::V8::SetArrayBufferAllocator(&array_buffer_allocator);
@@ -157,10 +182,6 @@
   for (int i = 1; i < argc; i++) {
     char* arg = argv[i];
     if (strcmp(arg, "--list") == 0) {
-      // TODO(svenpanne) Serializer::enabled() and Serializer::code_address_map_
-      // are fundamentally broken, so we can't unconditionally initialize and
-      // dispose V8.
-      v8::V8::Initialize();
       PrintTestList(CcTest::last());
       print_run_count = false;
 
@@ -206,6 +227,8 @@
   CcTest::TearDown();
   // TODO(svenpanne) See comment above.
   // if (!disable_automatic_dispose_) v8::V8::Dispose();
+  v8::V8::ShutdownPlatform();
+  delete platform;
   return 0;
 }
 
diff --git a/test/cctest/cctest.gyp b/test/cctest/cctest.gyp
index f366131..6a57763 100644
--- a/test/cctest/cctest.gyp
+++ b/test/cctest/cctest.gyp
@@ -37,12 +37,53 @@
       'type': 'executable',
       'dependencies': [
         'resources',
+        '../../tools/gyp/v8.gyp:v8_libplatform',
       ],
       'include_dirs': [
         '../..',
       ],
       'sources': [  ### gcmole(all) ###
         '<(generated_file)',
+        'compiler/c-signature.h',
+        'compiler/codegen-tester.cc',
+        'compiler/codegen-tester.h',
+        'compiler/function-tester.h',
+        'compiler/graph-builder-tester.cc',
+        'compiler/graph-builder-tester.h',
+        'compiler/graph-tester.h',
+        'compiler/simplified-graph-builder.cc',
+        'compiler/simplified-graph-builder.h',
+        'compiler/test-branch-combine.cc',
+        'compiler/test-changes-lowering.cc',
+        'compiler/test-codegen-deopt.cc',
+        'compiler/test-gap-resolver.cc',
+        'compiler/test-graph-reducer.cc',
+        'compiler/test-instruction.cc',
+        'compiler/test-js-context-specialization.cc',
+        'compiler/test-js-constant-cache.cc',
+        'compiler/test-js-typed-lowering.cc',
+        'compiler/test-linkage.cc',
+        'compiler/test-machine-operator-reducer.cc',
+        'compiler/test-node-algorithm.cc',
+        'compiler/test-node-cache.cc',
+        'compiler/test-node.cc',
+        'compiler/test-operator.cc',
+        'compiler/test-phi-reducer.cc',
+        'compiler/test-pipeline.cc',
+        'compiler/test-representation-change.cc',
+        'compiler/test-run-deopt.cc',
+        'compiler/test-run-inlining.cc',
+        'compiler/test-run-intrinsics.cc',
+        'compiler/test-run-jsbranches.cc',
+        'compiler/test-run-jscalls.cc',
+        'compiler/test-run-jsexceptions.cc',
+        'compiler/test-run-jsops.cc',
+        'compiler/test-run-machops.cc',
+        'compiler/test-run-properties.cc',
+        'compiler/test-run-variables.cc',
+        'compiler/test-schedule.cc',
+        'compiler/test-scheduler.cc',
+        'compiler/test-simplified-lowering.cc',
         'cctest.cc',
         'gay-fixed.cc',
         'gay-precision.cc',
@@ -56,12 +97,11 @@
         'test-atomicops.cc',
         'test-bignum.cc',
         'test-bignum-dtoa.cc',
+        'test-checks.cc',
         'test-circular-queue.cc',
         'test-compiler.cc',
-        'test-condition-variable.cc',
         'test-constantpool.cc',
         'test-conversions.cc',
-        'test-cpu.cc',
         'test-cpu-profiler.cc',
         'test-dataflow.cc',
         'test-date.cc',
@@ -77,6 +117,7 @@
         'test-fixed-dtoa.cc',
         'test-flags.cc',
         'test-func-name-inference.cc',
+        'test-gc-tracer.cc',
         'test-global-handles.cc',
         'test-global-object.cc',
         'test-hashing.cc',
@@ -84,8 +125,6 @@
         'test-heap.cc',
         'test-heap-profiler.cc',
         'test-hydrogen-types.cc',
-        'test-libplatform-task-queue.cc',
-        'test-libplatform-worker-thread.cc',
         'test-list.cc',
         'test-liveedit.cc',
         'test-lockers.cc',
@@ -93,18 +132,16 @@
         'test-microtask-delivery.cc',
         'test-mark-compact.cc',
         'test-mementos.cc',
-        'test-mutex.cc',
         'test-object-observe.cc',
         'test-ordered-hash-table.cc',
+        'test-ostreams.cc',
         'test-parsing.cc',
         'test-platform.cc',
-        'test-platform-tls.cc',
         'test-profile-generator.cc',
         'test-random-number-generator.cc',
         'test-regexp.cc',
         'test-reloc-info.cc',
         'test-representation.cc',
-        'test-semaphore.cc',
         'test-serialize.cc',
         'test-spaces.cc',
         'test-strings.cc',
@@ -112,10 +149,10 @@
         'test-strtod.cc',
         'test-thread-termination.cc',
         'test-threads.cc',
-        'test-time.cc',
         'test-types.cc',
         'test-unbound-queue.cc',
         'test-unique.cc',
+        'test-unscopables-hidden-prototype.cc',
         'test-utils.cc',
         'test-version.cc',
         'test-weakmaps.cc',
@@ -129,7 +166,6 @@
             'test-assembler-ia32.cc',
             'test-code-stubs.cc',
             'test-code-stubs-ia32.cc',
-            'test-cpu-ia32.cc',
             'test-disasm-ia32.cc',
             'test-macro-assembler-ia32.cc',
             'test-log-stack-tracer.cc'
@@ -140,7 +176,6 @@
             'test-assembler-x64.cc',
             'test-code-stubs.cc',
             'test-code-stubs-x64.cc',
-            'test-cpu-x64.cc',
             'test-disasm-x64.cc',
             'test-macro-assembler-x64.cc',
             'test-log-stack-tracer.cc'
@@ -176,12 +211,20 @@
             'test-macro-assembler-mips.cc'
           ],
         }],
+        ['v8_target_arch=="mips64el"', {
+          'sources': [
+            'test-assembler-mips64.cc',
+            'test-code-stubs.cc',
+            'test-code-stubs-mips64.cc',
+            'test-disasm-mips64.cc',
+            'test-macro-assembler-mips64.cc'
+          ],
+        }],
         ['v8_target_arch=="x87"', {
           'sources': [  ### gcmole(arch:x87) ###
             'test-assembler-x87.cc',
             'test-code-stubs.cc',
             'test-code-stubs-x87.cc',
-            'test-cpu-x87.cc',
             'test-disasm-x87.cc',
             'test-macro-assembler-x87.cc',
             'test-log-stack-tracer.cc'
@@ -192,11 +235,6 @@
             'test-platform-linux.cc',
           ],
         }],
-        [ 'OS=="mac"', {
-          'sources': [
-            'test-platform-macos.cc',
-          ],
-        }],
         [ 'OS=="win"', {
           'sources': [
             'test-platform-win32.cc',
diff --git a/test/cctest/cctest.h b/test/cctest/cctest.h
index a357be4..6d27074 100644
--- a/test/cctest/cctest.h
+++ b/test/cctest/cctest.h
@@ -30,6 +30,8 @@
 
 #include "src/v8.h"
 
+#include "src/isolate-inl.h"
+
 #ifndef TEST
 #define TEST(Name)                                                             \
   static void Test##Name();                                                    \
@@ -51,6 +53,13 @@
   static void Test##Name()
 #endif
 
+#ifndef UNINITIALIZED_DEPENDENT_TEST
+#define UNINITIALIZED_DEPENDENT_TEST(Name, Dep)                                \
+  static void Test##Name();                                                    \
+  CcTest register_test_##Name(Test##Name, __FILE__, #Name, #Dep, true, false); \
+  static void Test##Name()
+#endif
+
 #ifndef DISABLED_TEST
 #define DISABLED_TEST(Name)                                                    \
   static void Test##Name();                                                    \
@@ -83,7 +92,6 @@
 // Use this to expose protected methods in i::Heap.
 class TestHeap : public i::Heap {
  public:
-  using i::Heap::AllocateArgumentsObject;
   using i::Heap::AllocateByteArray;
   using i::Heap::AllocateFixedArray;
   using i::Heap::AllocateHeapNumber;
@@ -113,6 +121,11 @@
     return isolate_;
   }
 
+  static i::Isolate* InitIsolateOnce() {
+    if (!initialize_called_) InitializeVM();
+    return i_isolate();
+  }
+
   static i::Isolate* i_isolate() {
     return reinterpret_cast<i::Isolate*>(isolate());
   }
@@ -125,6 +138,10 @@
     return reinterpret_cast<TestHeap*>(i_isolate()->heap());
   }
 
+  static v8::base::RandomNumberGenerator* random_number_generator() {
+    return InitIsolateOnce()->random_number_generator();
+  }
+
   static v8::Local<v8::Object> global() {
     return isolate()->GetCurrentContext()->Global();
   }
@@ -177,7 +194,7 @@
 // thread fuzzing test.  In the thread fuzzing test it will
 // pseudorandomly select a successor thread and switch execution
 // to that thread, suspending the current test.
-class ApiTestFuzzer: public v8::internal::Thread {
+class ApiTestFuzzer: public v8::base::Thread {
  public:
   void CallTest();
 
@@ -199,11 +216,10 @@
 
  private:
   explicit ApiTestFuzzer(int num)
-      : Thread("ApiTestFuzzer"),
+      : Thread(Options("ApiTestFuzzer")),
         test_number_(num),
         gate_(0),
-        active_(true) {
-  }
+        active_(true) {}
   ~ApiTestFuzzer() {}
 
   static bool fuzzing_;
@@ -212,11 +228,11 @@
   static int active_tests_;
   static bool NextThread();
   int test_number_;
-  v8::internal::Semaphore gate_;
+  v8::base::Semaphore gate_;
   bool active_;
   void ContextSwitch();
   static int GetNextTestNumber();
-  static v8::internal::Semaphore all_tests_done_;
+  static v8::base::Semaphore all_tests_done_;
 };
 
 
@@ -372,14 +388,20 @@
 }
 
 
-static inline v8::Local<v8::Value> PreCompileCompileRun(const char* source) {
+static inline v8::Local<v8::Value> ParserCacheCompileRun(const char* source) {
   // Compile once just to get the preparse data, then compile the second time
   // using the data.
   v8::Isolate* isolate = v8::Isolate::GetCurrent();
   v8::ScriptCompiler::Source script_source(v8_str(source));
   v8::ScriptCompiler::Compile(isolate, &script_source,
-                              v8::ScriptCompiler::kProduceDataToCache);
-  return v8::ScriptCompiler::Compile(isolate, &script_source)->Run();
+                              v8::ScriptCompiler::kProduceParserCache);
+
+  // Check whether we received cached data, and if so use it.
+  v8::ScriptCompiler::CompileOptions options =
+      script_source.GetCachedData() ? v8::ScriptCompiler::kConsumeParserCache
+                                    : v8::ScriptCompiler::kNoCompileOptions;
+
+  return v8::ScriptCompiler::Compile(isolate, &script_source, options)->Run();
 }
 
 
@@ -479,6 +501,26 @@
 }
 
 
+// Helper function that simulates many incremental marking steps until
+// marking is completed.
+static inline void SimulateIncrementalMarking(i::Heap* heap) {
+  i::MarkCompactCollector* collector = heap->mark_compact_collector();
+  i::IncrementalMarking* marking = heap->incremental_marking();
+  if (collector->sweeping_in_progress()) {
+    collector->EnsureSweepingCompleted();
+  }
+  CHECK(marking->IsMarking() || marking->IsStopped());
+  if (marking->IsStopped()) {
+    marking->Start();
+  }
+  CHECK(marking->IsMarking());
+  while (!marking->IsComplete()) {
+    marking->Step(i::MB, i::IncrementalMarking::NO_GC_VIA_STACK_GUARD);
+  }
+  CHECK(marking->IsComplete());
+}
+
+
 // Helper class for new allocations tracking and checking.
 // To use checking of JS allocations tracking in a test,
 // just create an instance of this class.
@@ -501,4 +543,30 @@
 };
 
 
+class InitializedHandleScope {
+ public:
+  InitializedHandleScope()
+      : main_isolate_(CcTest::InitIsolateOnce()),
+        handle_scope_(main_isolate_) {}
+
+  // Prefixing the below with main_ reduces a lot of naming clashes.
+  i::Isolate* main_isolate() { return main_isolate_; }
+
+ private:
+  i::Isolate* main_isolate_;
+  i::HandleScope handle_scope_;
+};
+
+
+class HandleAndZoneScope : public InitializedHandleScope {
+ public:
+  HandleAndZoneScope() : main_zone_(main_isolate()) {}
+
+  // Prefixing the below with main_ reduces a lot of naming clashes.
+  i::Zone* main_zone() { return &main_zone_; }
+
+ private:
+  i::Zone main_zone_;
+};
+
 #endif  // ifndef CCTEST_H_
diff --git a/test/cctest/cctest.status b/test/cctest/cctest.status
index 3dd25ab..5198af6 100644
--- a/test/cctest/cctest.status
+++ b/test/cctest/cctest.status
@@ -31,13 +31,10 @@
   'test-api/Bug*': [FAIL],
 
   ##############################################################################
+
   # BUG(382): Weird test. Can't guarantee that it never times out.
   'test-api/ApplyInterruption': [PASS, TIMEOUT],
 
-  # TODO(mstarzinger): Fail gracefully on multiple V8::Dispose calls.
-  'test-api/InitializeAndDisposeOnce': [SKIP],
-  'test-api/InitializeAndDisposeMultiple': [SKIP],
-
   # These tests always fail.  They are here to test test.py.  If
   # they don't fail then test.py has failed.
   'test-serialize/TestThatAlwaysFails': [FAIL],
@@ -74,9 +71,83 @@
   # BUG(2999). (test/cpu-profiler/CollectCpuProfile)
   # BUG(3287). (test-cpu-profiler/SampleWhenFrameIsNotSetup)
   'test-cpu-profiler/*': [PASS, FLAKY],
+  'test-cpu-profiler/*': [SKIP],
 
-  # BUG(crbug/386492). This will be fixed by r22029.
-  'test-debug/ThreadedDebugging': [PASS, FAIL],
+  # BUG(3525). Test crashes flakily.
+  'test-debug/RecursiveBreakpoints': [PASS, FLAKY],
+  'test-debug/RecursiveBreakpointsGlobal': [PASS, FLAKY],
+
+  ##############################################################################
+  # TurboFan compiler failures.
+
+  # TODO(sigurds): The schedule is borked with multiple inlinees,
+  # and cannot handle free-floating loops yet
+  'test-run-inlining/InlineTwiceDependentDiamond': [SKIP],
+  'test-run-inlining/InlineTwiceDependentDiamondDifferent': [SKIP],
+  'test-run-inlining/InlineLoop': [SKIP],
+
+  # Some tests are just too slow to run for now.
+  'test-api/Threading*': [PASS, NO_VARIANTS],
+  'test-heap/IncrementalMarkingStepMakesBigProgressWithLargeObjects': [PASS, NO_VARIANTS],
+  'test-heap-profiler/ManyLocalsInSharedContext': [PASS, NO_VARIANTS],
+  'test-debug/ThreadedDebugging': [PASS, NO_VARIANTS],
+  'test-debug/DebugBreakLoop': [PASS, NO_VARIANTS],
+
+  # Support for breakpoints requires using LoadICs and StoreICs.
+  'test-debug/BreakPointICStore': [PASS, NO_VARIANTS],
+  'test-debug/BreakPointICLoad': [PASS, NO_VARIANTS],
+  'test-debug/BreakPointICCall': [PASS, NO_VARIANTS],
+  'test-debug/BreakPointICCallWithGC': [PASS, NO_VARIANTS],
+  'test-debug/BreakPointConstructCallWithGC': [PASS, NO_VARIANTS],
+  'test-debug/BreakPointReturn': [PASS, NO_VARIANTS],
+  'test-debug/BreakPointThroughJavaScript': [PASS, NO_VARIANTS],
+  'test-debug/ScriptBreakPointByNameThroughJavaScript': [PASS, NO_VARIANTS],
+  'test-debug/ScriptBreakPointByIdThroughJavaScript': [PASS, NO_VARIANTS],
+  'test-debug/DebugStepLinear': [PASS, NO_VARIANTS],
+  'test-debug/DebugStepKeyedLoadLoop': [PASS, NO_VARIANTS],
+  'test-debug/DebugStepKeyedStoreLoop': [PASS, NO_VARIANTS],
+  'test-debug/DebugStepNamedLoadLoop': [PASS, NO_VARIANTS],
+  'test-debug/DebugStepNamedStoreLoop': [PASS, NO_VARIANTS],
+  'test-debug/DebugStepLinearMixedICs': [PASS, NO_VARIANTS],
+  'test-debug/DebugStepDeclarations': [PASS, NO_VARIANTS],
+  'test-debug/DebugStepLocals': [PASS, NO_VARIANTS],
+  'test-debug/DebugStepIf': [PASS, NO_VARIANTS],
+  'test-debug/DebugStepSwitch': [PASS, NO_VARIANTS],
+  'test-debug/DebugStepWhile': [PASS, NO_VARIANTS],
+  'test-debug/DebugStepDoWhile': [PASS, NO_VARIANTS],
+  'test-debug/DebugStepFor': [PASS, NO_VARIANTS],
+  'test-debug/DebugStepForContinue': [PASS, NO_VARIANTS],
+  'test-debug/DebugStepForBreak': [PASS, NO_VARIANTS],
+  'test-debug/DebugStepForIn': [PASS, NO_VARIANTS],
+  'test-debug/DebugStepWith': [PASS, NO_VARIANTS],
+  'test-debug/DebugConditional': [PASS, NO_VARIANTS],
+  'test-debug/StepInOutSimple': [PASS, NO_VARIANTS],
+  'test-debug/StepInOutTree': [PASS, NO_VARIANTS],
+  'test-debug/StepInOutBranch': [PASS, NO_VARIANTS],
+  'test-debug/DebugBreak': [PASS, NO_VARIANTS],
+  'test-debug/DebugBreakStackInspection': [PASS, NO_VARIANTS],
+  'test-debug/BreakMessageWhenMessageHandlerIsReset': [PASS, NO_VARIANTS],
+  'test-debug/NoDebugBreakInAfterCompileMessageHandler': [PASS, NO_VARIANTS],
+  'test-debug/DisableBreak': [PASS, NO_VARIANTS],
+  'test-debug/RegExpDebugBreak': [PASS, NO_VARIANTS],
+  'test-debug/DebugBreakFunctionApply': [PASS, NO_VARIANTS],
+  'test-debug/DeoptimizeDuringDebugBreak': [PASS, NO_VARIANTS],
+
+  # Support for %GetFrameDetails is missing and requires checkpoints.
+  'test-api/Regress385349': [PASS, NO_VARIANTS],
+  'test-debug/DebuggerStatement': [PASS, NO_VARIANTS],
+  'test-debug/DebuggerStatementBreakpoint': [PASS, NO_VARIANTS],
+  'test-debug/DebugEvaluateWithCodeGenerationDisallowed': [PASS, NO_VARIANTS],
+  'test-debug/DebugStepNatives': [PASS, NO_VARIANTS],
+  'test-debug/DebugStepFunctionCall': [PASS, NO_VARIANTS],
+  'test-debug/DebugStepFunctionApply': [PASS, NO_VARIANTS],
+  'test-debug/ScriptNameAndData': [PASS, NO_VARIANTS],
+  'test-debug/ContextData': [PASS, NO_VARIANTS],
+  'test-debug/DebugBreakInMessageHandler': [PASS, NO_VARIANTS],
+  'test-debug/CallFunctionInDebugger': [PASS, NO_VARIANTS],
+  'test-debug/CallingContextIsNotDebugContext': [PASS, NO_VARIANTS],
+  'test-debug/DebugEventContext': [PASS, NO_VARIANTS],
+  'test-debug/DebugBreakInline': [PASS, NO_VARIANTS],
 
   ############################################################################
   # Slow tests.
@@ -103,10 +174,16 @@
   'test-heap/ReleaseOverReservedPages': [PASS, FAIL],
 
   # BUG(v8:3155).
-  'test-strings/AsciiArrayJoin': [PASS, ['mode == debug', FAIL]],
+  'test-strings/OneByteArrayJoin': [PASS, ['mode == debug', FAIL]],
 
   # BUG(v8:3247).
   'test-mark-compact/NoPromotion': [SKIP],
+
+  # BUG(v8:3446).
+  'test-mark-compact/Promotion': [PASS, FAIL],
+
+  # BUG(v8:3434).
+  ' test-api/LoadICFastApi_DirectCall_GCMoveStubWithProfiler': [SKIP],
 }],  # 'arch == arm64'
 
 ['arch == arm64 and simulator_run == True', {
@@ -138,7 +215,7 @@
 ##############################################################################
 ['no_snap == True', {
   # BUG(3215)
-  'test-lockers/MultithreadedParallelIsolates': [PASS, FAIL],
+  'test-lockers/MultithreadedParallelIsolates': [PASS, FAIL, TIMEOUT],
 }],  # 'no_snap == True'
 
 ##############################################################################
@@ -155,9 +232,6 @@
   # BUG(2999).
   'test-cpu-profiler/CollectCpuProfile': [PASS, FAIL],
 
-  # BUG(3055).
-  'test-cpu-profiler/JsNative1JsNative2JsSample': [PASS, ['mode == release', FAIL], ['mode == debug', FLAKY]],
-
   # BUG(3005).
   'test-alloc/CodeRange': [PASS, FAIL],
 
@@ -167,6 +241,9 @@
   # BUG(3331). Fails on windows.
   'test-heap/NoWeakHashTableLeakWithIncrementalMarking': [SKIP],
 
+  # BUG(v8:3433). Crashes on windows.
+  'test-cpu-profiler/FunctionApplySample': [SKIP],
+
 }],  # 'system == windows'
 
 ##############################################################################
@@ -196,6 +273,10 @@
   'test-api/Threading2': [PASS, SLOW],
   'test-api/Threading3': [PASS, SLOW],
   'test-api/Threading4': [PASS, SLOW],
+
+  # Crashes due to OOM in simulator.
+  'test-types/Distributivity1': [PASS, FLAKY],
+  'test-types/Distributivity2': [PASS, FLAKY],
 }],  # 'arch == arm'
 
 ##############################################################################
@@ -209,23 +290,41 @@
   'test-serialize/DeserializeFromSecondSerializationAndRunScript2': [SKIP],
   'test-serialize/DeserializeAndRunScript2': [SKIP],
   'test-serialize/DeserializeFromSecondSerialization': [SKIP],
+
+  # Test requires turbofan:
+  'test-simplified-lowering/LowerStringOps_to_call_and_compare': [SKIP],
+  'codegen-tester/CompareWrapper': [SKIP],
+  'codegen-tester/ParametersEqual': [SKIP],
 }],  # 'arch == mipsel or arch == mips'
 
 ##############################################################################
+['arch == mips64el', {
+
+  # BUG(2657): Test sometimes times out on MIPS simulator.
+  'test-thread-termination/TerminateMultipleV8ThreadsDefaultIsolate': [PASS, TIMEOUT],
+
+  # BUG(v8:3154).
+  'test-heap/ReleaseOverReservedPages': [PASS, FAIL],
+
+  # BUG(1075): Unresolved crashes on MIPS also.
+  'test-serialize/Deserialize': [SKIP],
+  'test-serialize/DeserializeFromSecondSerializationAndRunScript2': [SKIP],
+  'test-serialize/DeserializeAndRunScript2': [SKIP],
+  'test-serialize/DeserializeFromSecondSerialization': [SKIP],
+
+  # Test requires turbofan:
+  'test-simplified-lowering/LowerStringOps_to_call_and_compare': [SKIP],
+  'codegen-tester/CompareWrapper': [SKIP],
+  'codegen-tester/ParametersEqual': [SKIP],
+}],  # 'arch == mips64el'
+
+##############################################################################
 ['arch == x87', {
 
-  # TODO (weiliang): Enable below tests after fixing the double register
-  # allocation limit in X87 port.
-  'test-serialize/Serialize': [PASS, ['mode == debug', SKIP]],
-  'test-serialize/Deserialize': [PASS, ['mode == debug', SKIP]],
-  'test-serialize/SerializeTwice': [PASS, ['mode == debug', SKIP]],
-  'test-serialize/ContextSerialization': [PASS, ['mode == debug', SKIP]],
-  'test-serialize/ContextDeserialization': [PASS, ['mode == debug', SKIP]],
-  'test-serialize/PartialDeserialization': [PASS, ['mode == debug', SKIP]],
-  'test-serialize/PartialSerialization': [PASS, ['mode == debug', SKIP]],
-  'test-serialize/DeserializeAndRunScript2': [PASS, ['mode == debug', SKIP]],
-  'test-serialize/DeserializeFromSecondSerializationAndRunScript2': [PASS, ['mode == debug', SKIP]],
-  'test-serialize/DeserializeFromSecondSerialization': [PASS, ['mode == debug', SKIP]],
+  # Test requires turbofan:
+  'codegen-tester/CompareWrapper': [SKIP],
+  'codegen-tester/ParametersEqual': [SKIP],
+  'test-simplified-lowering/LowerStringOps_to_call_and_compare': [SKIP],
 }],  # 'arch == x87'
 
 ##############################################################################
@@ -322,10 +421,10 @@
   'test-constantpool/ConstantPool' : [SKIP],
   'test-compiler/GetScriptLineNumber' : [SKIP],
   'test-api/ScriptMakingExternalString' : [SKIP],
-  'test-api/ScriptMakingExternalAsciiString' : [SKIP],
+  'test-api/ScriptMakingExternalOneByteString' : [SKIP],
   'test-api/MakingExternalStringConditions' : [SKIP],
-  'test-api/MakingExternalAsciiStringConditions' : [SKIP],
-  'test-api/MakingExternalUnalignedAsciiString' : [SKIP],
+  'test-api/MakingExternalOneByteStringConditions' : [SKIP],
+  'test-api/MakingExternalUnalignedOneByteString' : [SKIP],
   'test-api/IndexedInterceptorUnboxedDoubleWithIndexedAccessor' : [SKIP],
   'test-api/IndependentWeakHandle' : [SKIP],
   'test-api/GCFromWeakCallbacks' : [SKIP],
diff --git a/test/cctest/compiler/c-signature.h b/test/cctest/compiler/c-signature.h
new file mode 100644
index 0000000..5d161db
--- /dev/null
+++ b/test/cctest/compiler/c-signature.h
@@ -0,0 +1,133 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_C_SIGNATURE_H_
+#define V8_COMPILER_C_SIGNATURE_H_
+
+#include "src/compiler/machine-type.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+template <typename T>
+inline MachineType MachineTypeForC() {
+  CHECK(false);  // Instantiated with invalid type.
+  return kMachNone;
+}
+
+template <>
+inline MachineType MachineTypeForC<void>() {
+  return kMachNone;
+}
+
+template <>
+inline MachineType MachineTypeForC<int8_t>() {
+  return kMachInt8;
+}
+
+template <>
+inline MachineType MachineTypeForC<uint8_t>() {
+  return kMachUint8;
+}
+
+template <>
+inline MachineType MachineTypeForC<int16_t>() {
+  return kMachInt16;
+}
+
+template <>
+inline MachineType MachineTypeForC<uint16_t>() {
+  return kMachUint16;
+}
+
+template <>
+inline MachineType MachineTypeForC<int32_t>() {
+  return kMachInt32;
+}
+
+template <>
+inline MachineType MachineTypeForC<uint32_t>() {
+  return kMachUint32;
+}
+
+template <>
+inline MachineType MachineTypeForC<int64_t>() {
+  return kMachInt64;
+}
+
+template <>
+inline MachineType MachineTypeForC<uint64_t>() {
+  return kMachUint64;
+}
+
+template <>
+inline MachineType MachineTypeForC<double>() {
+  return kMachFloat64;
+}
+
+template <>
+inline MachineType MachineTypeForC<Object*>() {
+  return kMachAnyTagged;
+}
+
+template <typename Ret, uint16_t kParamCount>
+class CSignatureOf : public MachineSignature {
+ protected:
+  MachineType storage_[1 + kParamCount];
+
+  CSignatureOf()
+      : MachineSignature(MachineTypeForC<Ret>() != kMachNone ? 1 : 0,
+                         kParamCount,
+                         reinterpret_cast<MachineType*>(&storage_)) {
+    if (return_count_ == 1) storage_[0] = MachineTypeForC<Ret>();
+  }
+  void Set(int index, MachineType type) {
+    DCHECK(index >= 0 && index < kParamCount);
+    reps_[return_count_ + index] = type;
+  }
+};
+
+// Helper classes for instantiating Signature objects to be callable from C.
+template <typename Ret>
+class CSignature0 : public CSignatureOf<Ret, 0> {
+ public:
+  CSignature0() : CSignatureOf<Ret, 0>() {}
+};
+
+template <typename Ret, typename P1>
+class CSignature1 : public CSignatureOf<Ret, 1> {
+ public:
+  CSignature1() : CSignatureOf<Ret, 1>() {
+    this->Set(0, MachineTypeForC<P1>());
+  }
+};
+
+template <typename Ret, typename P1, typename P2>
+class CSignature2 : public CSignatureOf<Ret, 2> {
+ public:
+  CSignature2() : CSignatureOf<Ret, 2>() {
+    this->Set(0, MachineTypeForC<P1>());
+    this->Set(1, MachineTypeForC<P2>());
+  }
+};
+
+template <typename Ret, typename P1, typename P2, typename P3>
+class CSignature3 : public CSignatureOf<Ret, 3> {
+ public:
+  CSignature3() : CSignatureOf<Ret, 3>() {
+    this->Set(0, MachineTypeForC<P1>());
+    this->Set(1, MachineTypeForC<P2>());
+    this->Set(2, MachineTypeForC<P3>());
+  }
+};
+
+static const CSignature2<int32_t, int32_t, int32_t> int32_int32_to_int32;
+static const CSignature2<uint32_t, uint32_t, uint32_t> uint32_uint32_to_uint32;
+static const CSignature2<double, double, double> float64_float64_to_float64;
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_COMPILER_C_SIGNATURE_H_
diff --git a/test/cctest/compiler/call-tester.h b/test/cctest/compiler/call-tester.h
new file mode 100644
index 0000000..e864160
--- /dev/null
+++ b/test/cctest/compiler/call-tester.h
@@ -0,0 +1,393 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CCTEST_COMPILER_CALL_TESTER_H_
+#define V8_CCTEST_COMPILER_CALL_TESTER_H_
+
+#include "src/v8.h"
+
+#include "src/simulator.h"
+
+#if V8_TARGET_ARCH_IA32
+#if __GNUC__
+#define V8_CDECL __attribute__((cdecl))
+#else
+#define V8_CDECL __cdecl
+#endif
+#else
+#define V8_CDECL
+#endif
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// TODO(titzer): use c-signature.h instead of ReturnValueTraits
+template <typename R>
+struct ReturnValueTraits {
+  static R Cast(uintptr_t r) { return reinterpret_cast<R>(r); }
+  static MachineType Representation() {
+    // TODO(dcarney): detect when R is of a subclass of Object* instead of this
+    // type check.
+    while (false) {
+      *(static_cast<Object* volatile*>(0)) = static_cast<R>(0);
+    }
+    return kMachAnyTagged;
+  }
+};
+
+template <>
+struct ReturnValueTraits<int32_t*> {
+  static int32_t* Cast(uintptr_t r) { return reinterpret_cast<int32_t*>(r); }
+  static MachineType Representation() { return kMachPtr; }
+};
+
+template <>
+struct ReturnValueTraits<void> {
+  static void Cast(uintptr_t r) {}
+  static MachineType Representation() { return kMachPtr; }
+};
+
+template <>
+struct ReturnValueTraits<bool> {
+  static bool Cast(uintptr_t r) { return static_cast<bool>(r); }
+  static MachineType Representation() { return kRepBit; }
+};
+
+template <>
+struct ReturnValueTraits<int32_t> {
+  static int32_t Cast(uintptr_t r) { return static_cast<int32_t>(r); }
+  static MachineType Representation() { return kMachInt32; }
+};
+
+template <>
+struct ReturnValueTraits<uint32_t> {
+  static uint32_t Cast(uintptr_t r) { return static_cast<uint32_t>(r); }
+  static MachineType Representation() { return kMachUint32; }
+};
+
+template <>
+struct ReturnValueTraits<int64_t> {
+  static int64_t Cast(uintptr_t r) { return static_cast<int64_t>(r); }
+  static MachineType Representation() { return kMachInt64; }
+};
+
+template <>
+struct ReturnValueTraits<uint64_t> {
+  static uint64_t Cast(uintptr_t r) { return static_cast<uint64_t>(r); }
+  static MachineType Representation() { return kMachUint64; }
+};
+
+template <>
+struct ReturnValueTraits<int16_t> {
+  static int16_t Cast(uintptr_t r) { return static_cast<int16_t>(r); }
+  static MachineType Representation() { return kMachInt16; }
+};
+
+template <>
+struct ReturnValueTraits<uint16_t> {
+  static uint16_t Cast(uintptr_t r) { return static_cast<uint16_t>(r); }
+  static MachineType Representation() { return kMachUint16; }
+};
+
+template <>
+struct ReturnValueTraits<int8_t> {
+  static int8_t Cast(uintptr_t r) { return static_cast<int8_t>(r); }
+  static MachineType Representation() { return kMachInt8; }
+};
+
+template <>
+struct ReturnValueTraits<uint8_t> {
+  static uint8_t Cast(uintptr_t r) { return static_cast<uint8_t>(r); }
+  static MachineType Representation() { return kMachUint8; }
+};
+
+template <>
+struct ReturnValueTraits<double> {
+  static double Cast(uintptr_t r) {
+    UNREACHABLE();
+    return 0.0;
+  }
+  static MachineType Representation() { return kMachFloat64; }
+};
+
+
+template <typename R>
+struct ParameterTraits {
+  static uintptr_t Cast(R r) { return static_cast<uintptr_t>(r); }
+};
+
+template <>
+struct ParameterTraits<int*> {
+  static uintptr_t Cast(int* r) { return reinterpret_cast<uintptr_t>(r); }
+};
+
+template <typename T>
+struct ParameterTraits<T*> {
+  static uintptr_t Cast(void* r) { return reinterpret_cast<uintptr_t>(r); }
+};
+
+class CallHelper {
+ public:
+  explicit CallHelper(Isolate* isolate, MachineSignature* machine_sig)
+      : machine_sig_(machine_sig), isolate_(isolate) {
+    USE(isolate_);
+  }
+  virtual ~CallHelper() {}
+
+  static MachineSignature* MakeMachineSignature(
+      Zone* zone, MachineType return_type, MachineType p0 = kMachNone,
+      MachineType p1 = kMachNone, MachineType p2 = kMachNone,
+      MachineType p3 = kMachNone, MachineType p4 = kMachNone) {
+    // Count the number of parameters.
+    size_t param_count = 5;
+    MachineType types[] = {p0, p1, p2, p3, p4};
+    while (param_count > 0 && types[param_count - 1] == kMachNone)
+      param_count--;
+    size_t return_count = return_type == kMachNone ? 0 : 1;
+
+    // Build the machine signature.
+    MachineSignature::Builder builder(zone, return_count, param_count);
+    if (return_count > 0) builder.AddReturn(return_type);
+    for (size_t i = 0; i < param_count; i++) {
+      builder.AddParam(types[i]);
+    }
+    return builder.Build();
+  }
+
+ protected:
+  MachineSignature* machine_sig_;
+  void VerifyParameters(size_t parameter_count, MachineType* parameter_types) {
+    CHECK(machine_sig_->parameter_count() == parameter_count);
+    for (size_t i = 0; i < parameter_count; i++) {
+      CHECK_EQ(machine_sig_->GetParam(i), parameter_types[i]);
+    }
+  }
+  virtual byte* Generate() = 0;
+
+ private:
+#if USE_SIMULATOR && V8_TARGET_ARCH_ARM64
+  uintptr_t CallSimulator(byte* f, Simulator::CallArgument* args) {
+    Simulator* simulator = Simulator::current(isolate_);
+    return static_cast<uintptr_t>(simulator->CallInt64(f, args));
+  }
+
+  template <typename R, typename F>
+  R DoCall(F* f) {
+    Simulator::CallArgument args[] = {Simulator::CallArgument::End()};
+    return ReturnValueTraits<R>::Cast(CallSimulator(FUNCTION_ADDR(f), args));
+  }
+  template <typename R, typename F, typename P1>
+  R DoCall(F* f, P1 p1) {
+    Simulator::CallArgument args[] = {Simulator::CallArgument(p1),
+                                      Simulator::CallArgument::End()};
+    return ReturnValueTraits<R>::Cast(CallSimulator(FUNCTION_ADDR(f), args));
+  }
+  template <typename R, typename F, typename P1, typename P2>
+  R DoCall(F* f, P1 p1, P2 p2) {
+    Simulator::CallArgument args[] = {Simulator::CallArgument(p1),
+                                      Simulator::CallArgument(p2),
+                                      Simulator::CallArgument::End()};
+    return ReturnValueTraits<R>::Cast(CallSimulator(FUNCTION_ADDR(f), args));
+  }
+  template <typename R, typename F, typename P1, typename P2, typename P3>
+  R DoCall(F* f, P1 p1, P2 p2, P3 p3) {
+    Simulator::CallArgument args[] = {
+        Simulator::CallArgument(p1), Simulator::CallArgument(p2),
+        Simulator::CallArgument(p3), Simulator::CallArgument::End()};
+    return ReturnValueTraits<R>::Cast(CallSimulator(FUNCTION_ADDR(f), args));
+  }
+  template <typename R, typename F, typename P1, typename P2, typename P3,
+            typename P4>
+  R DoCall(F* f, P1 p1, P2 p2, P3 p3, P4 p4) {
+    Simulator::CallArgument args[] = {
+        Simulator::CallArgument(p1), Simulator::CallArgument(p2),
+        Simulator::CallArgument(p3), Simulator::CallArgument(p4),
+        Simulator::CallArgument::End()};
+    return ReturnValueTraits<R>::Cast(CallSimulator(FUNCTION_ADDR(f), args));
+  }
+#elif USE_SIMULATOR && V8_TARGET_ARCH_ARM
+  uintptr_t CallSimulator(byte* f, int32_t p1 = 0, int32_t p2 = 0,
+                          int32_t p3 = 0, int32_t p4 = 0) {
+    Simulator* simulator = Simulator::current(isolate_);
+    return static_cast<uintptr_t>(simulator->Call(f, 4, p1, p2, p3, p4));
+  }
+  template <typename R, typename F>
+  R DoCall(F* f) {
+    return ReturnValueTraits<R>::Cast(CallSimulator(FUNCTION_ADDR(f)));
+  }
+  template <typename R, typename F, typename P1>
+  R DoCall(F* f, P1 p1) {
+    return ReturnValueTraits<R>::Cast(
+        CallSimulator(FUNCTION_ADDR(f), ParameterTraits<P1>::Cast(p1)));
+  }
+  template <typename R, typename F, typename P1, typename P2>
+  R DoCall(F* f, P1 p1, P2 p2) {
+    return ReturnValueTraits<R>::Cast(
+        CallSimulator(FUNCTION_ADDR(f), ParameterTraits<P1>::Cast(p1),
+                      ParameterTraits<P2>::Cast(p2)));
+  }
+  template <typename R, typename F, typename P1, typename P2, typename P3>
+  R DoCall(F* f, P1 p1, P2 p2, P3 p3) {
+    return ReturnValueTraits<R>::Cast(CallSimulator(
+        FUNCTION_ADDR(f), ParameterTraits<P1>::Cast(p1),
+        ParameterTraits<P2>::Cast(p2), ParameterTraits<P3>::Cast(p3)));
+  }
+  template <typename R, typename F, typename P1, typename P2, typename P3,
+            typename P4>
+  R DoCall(F* f, P1 p1, P2 p2, P3 p3, P4 p4) {
+    return ReturnValueTraits<R>::Cast(CallSimulator(
+        FUNCTION_ADDR(f), ParameterTraits<P1>::Cast(p1),
+        ParameterTraits<P2>::Cast(p2), ParameterTraits<P3>::Cast(p3),
+        ParameterTraits<P4>::Cast(p4)));
+  }
+#else
+  template <typename R, typename F>
+  R DoCall(F* f) {
+    return f();
+  }
+  template <typename R, typename F, typename P1>
+  R DoCall(F* f, P1 p1) {
+    return f(p1);
+  }
+  template <typename R, typename F, typename P1, typename P2>
+  R DoCall(F* f, P1 p1, P2 p2) {
+    return f(p1, p2);
+  }
+  template <typename R, typename F, typename P1, typename P2, typename P3>
+  R DoCall(F* f, P1 p1, P2 p2, P3 p3) {
+    return f(p1, p2, p3);
+  }
+  template <typename R, typename F, typename P1, typename P2, typename P3,
+            typename P4>
+  R DoCall(F* f, P1 p1, P2 p2, P3 p3, P4 p4) {
+    return f(p1, p2, p3, p4);
+  }
+#endif
+
+#ifndef DEBUG
+  void VerifyParameters0() {}
+
+  template <typename P1>
+  void VerifyParameters1() {}
+
+  template <typename P1, typename P2>
+  void VerifyParameters2() {}
+
+  template <typename P1, typename P2, typename P3>
+  void VerifyParameters3() {}
+
+  template <typename P1, typename P2, typename P3, typename P4>
+  void VerifyParameters4() {}
+#else
+  void VerifyParameters0() { VerifyParameters(0, NULL); }
+
+  template <typename P1>
+  void VerifyParameters1() {
+    MachineType parameters[] = {ReturnValueTraits<P1>::Representation()};
+    VerifyParameters(arraysize(parameters), parameters);
+  }
+
+  template <typename P1, typename P2>
+  void VerifyParameters2() {
+    MachineType parameters[] = {ReturnValueTraits<P1>::Representation(),
+                                ReturnValueTraits<P2>::Representation()};
+    VerifyParameters(arraysize(parameters), parameters);
+  }
+
+  template <typename P1, typename P2, typename P3>
+  void VerifyParameters3() {
+    MachineType parameters[] = {ReturnValueTraits<P1>::Representation(),
+                                ReturnValueTraits<P2>::Representation(),
+                                ReturnValueTraits<P3>::Representation()};
+    VerifyParameters(arraysize(parameters), parameters);
+  }
+
+  template <typename P1, typename P2, typename P3, typename P4>
+  void VerifyParameters4() {
+    MachineType parameters[] = {ReturnValueTraits<P1>::Representation(),
+                                ReturnValueTraits<P2>::Representation(),
+                                ReturnValueTraits<P3>::Representation(),
+                                ReturnValueTraits<P4>::Representation()};
+    VerifyParameters(arraysize(parameters), parameters);
+  }
+#endif
+
+  // TODO(dcarney): replace Call() in CallHelper2 with these.
+  template <typename R>
+  R Call0() {
+    typedef R V8_CDECL FType();
+    VerifyParameters0();
+    return DoCall<R>(FUNCTION_CAST<FType*>(Generate()));
+  }
+
+  template <typename R, typename P1>
+  R Call1(P1 p1) {
+    typedef R V8_CDECL FType(P1);
+    VerifyParameters1<P1>();
+    return DoCall<R>(FUNCTION_CAST<FType*>(Generate()), p1);
+  }
+
+  template <typename R, typename P1, typename P2>
+  R Call2(P1 p1, P2 p2) {
+    typedef R V8_CDECL FType(P1, P2);
+    VerifyParameters2<P1, P2>();
+    return DoCall<R>(FUNCTION_CAST<FType*>(Generate()), p1, p2);
+  }
+
+  template <typename R, typename P1, typename P2, typename P3>
+  R Call3(P1 p1, P2 p2, P3 p3) {
+    typedef R V8_CDECL FType(P1, P2, P3);
+    VerifyParameters3<P1, P2, P3>();
+    return DoCall<R>(FUNCTION_CAST<FType*>(Generate()), p1, p2, p3);
+  }
+
+  template <typename R, typename P1, typename P2, typename P3, typename P4>
+  R Call4(P1 p1, P2 p2, P3 p3, P4 p4) {
+    typedef R V8_CDECL FType(P1, P2, P3, P4);
+    VerifyParameters4<P1, P2, P3, P4>();
+    return DoCall<R>(FUNCTION_CAST<FType*>(Generate()), p1, p2, p3, p4);
+  }
+
+  template <typename R, typename C>
+  friend class CallHelper2;
+  Isolate* isolate_;
+};
+
+
+// TODO(dcarney): replace CallHelper with CallHelper2 and rename.
+template <typename R, typename C>
+class CallHelper2 {
+ public:
+  R Call() { return helper()->template Call0<R>(); }
+
+  template <typename P1>
+  R Call(P1 p1) {
+    return helper()->template Call1<R>(p1);
+  }
+
+  template <typename P1, typename P2>
+  R Call(P1 p1, P2 p2) {
+    return helper()->template Call2<R>(p1, p2);
+  }
+
+  template <typename P1, typename P2, typename P3>
+  R Call(P1 p1, P2 p2, P3 p3) {
+    return helper()->template Call3<R>(p1, p2, p3);
+  }
+
+  template <typename P1, typename P2, typename P3, typename P4>
+  R Call(P1 p1, P2 p2, P3 p3, P4 p4) {
+    return helper()->template Call4<R>(p1, p2, p3, p4);
+  }
+
+ private:
+  CallHelper* helper() { return static_cast<C*>(this); }
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CCTEST_COMPILER_CALL_TESTER_H_
diff --git a/test/cctest/compiler/codegen-tester.cc b/test/cctest/compiler/codegen-tester.cc
new file mode 100644
index 0000000..b1874f5
--- /dev/null
+++ b/test/cctest/compiler/codegen-tester.cc
@@ -0,0 +1,577 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "test/cctest/cctest.h"
+#include "test/cctest/compiler/codegen-tester.h"
+#include "test/cctest/compiler/value-helper.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+TEST(CompareWrapper) {
+  // Who tests the testers?
+  // If CompareWrapper is broken, then test expectations will be broken.
+  RawMachineAssemblerTester<int32_t> m;
+  CompareWrapper wWord32Equal(IrOpcode::kWord32Equal);
+  CompareWrapper wInt32LessThan(IrOpcode::kInt32LessThan);
+  CompareWrapper wInt32LessThanOrEqual(IrOpcode::kInt32LessThanOrEqual);
+  CompareWrapper wUint32LessThan(IrOpcode::kUint32LessThan);
+  CompareWrapper wUint32LessThanOrEqual(IrOpcode::kUint32LessThanOrEqual);
+
+  {
+    FOR_INT32_INPUTS(pl) {
+      FOR_INT32_INPUTS(pr) {
+        int32_t a = *pl;
+        int32_t b = *pr;
+        CHECK_EQ(a == b, wWord32Equal.Int32Compare(a, b));
+        CHECK_EQ(a < b, wInt32LessThan.Int32Compare(a, b));
+        CHECK_EQ(a <= b, wInt32LessThanOrEqual.Int32Compare(a, b));
+      }
+    }
+  }
+
+  {
+    FOR_UINT32_INPUTS(pl) {
+      FOR_UINT32_INPUTS(pr) {
+        uint32_t a = *pl;
+        uint32_t b = *pr;
+        CHECK_EQ(a == b, wWord32Equal.Int32Compare(a, b));
+        CHECK_EQ(a < b, wUint32LessThan.Int32Compare(a, b));
+        CHECK_EQ(a <= b, wUint32LessThanOrEqual.Int32Compare(a, b));
+      }
+    }
+  }
+
+  CHECK_EQ(true, wWord32Equal.Int32Compare(0, 0));
+  CHECK_EQ(true, wWord32Equal.Int32Compare(257, 257));
+  CHECK_EQ(true, wWord32Equal.Int32Compare(65539, 65539));
+  CHECK_EQ(true, wWord32Equal.Int32Compare(-1, -1));
+  CHECK_EQ(true, wWord32Equal.Int32Compare(0xffffffff, 0xffffffff));
+
+  CHECK_EQ(false, wWord32Equal.Int32Compare(0, 1));
+  CHECK_EQ(false, wWord32Equal.Int32Compare(257, 256));
+  CHECK_EQ(false, wWord32Equal.Int32Compare(65539, 65537));
+  CHECK_EQ(false, wWord32Equal.Int32Compare(-1, -2));
+  CHECK_EQ(false, wWord32Equal.Int32Compare(0xffffffff, 0xfffffffe));
+
+  CHECK_EQ(false, wInt32LessThan.Int32Compare(0, 0));
+  CHECK_EQ(false, wInt32LessThan.Int32Compare(357, 357));
+  CHECK_EQ(false, wInt32LessThan.Int32Compare(75539, 75539));
+  CHECK_EQ(false, wInt32LessThan.Int32Compare(-1, -1));
+  CHECK_EQ(false, wInt32LessThan.Int32Compare(0xffffffff, 0xffffffff));
+
+  CHECK_EQ(true, wInt32LessThan.Int32Compare(0, 1));
+  CHECK_EQ(true, wInt32LessThan.Int32Compare(456, 457));
+  CHECK_EQ(true, wInt32LessThan.Int32Compare(85537, 85539));
+  CHECK_EQ(true, wInt32LessThan.Int32Compare(-2, -1));
+  CHECK_EQ(true, wInt32LessThan.Int32Compare(0xfffffffe, 0xffffffff));
+
+  CHECK_EQ(false, wInt32LessThan.Int32Compare(1, 0));
+  CHECK_EQ(false, wInt32LessThan.Int32Compare(457, 456));
+  CHECK_EQ(false, wInt32LessThan.Int32Compare(85539, 85537));
+  CHECK_EQ(false, wInt32LessThan.Int32Compare(-1, -2));
+  CHECK_EQ(false, wInt32LessThan.Int32Compare(0xffffffff, 0xfffffffe));
+
+  CHECK_EQ(true, wInt32LessThanOrEqual.Int32Compare(0, 0));
+  CHECK_EQ(true, wInt32LessThanOrEqual.Int32Compare(357, 357));
+  CHECK_EQ(true, wInt32LessThanOrEqual.Int32Compare(75539, 75539));
+  CHECK_EQ(true, wInt32LessThanOrEqual.Int32Compare(-1, -1));
+  CHECK_EQ(true, wInt32LessThanOrEqual.Int32Compare(0xffffffff, 0xffffffff));
+
+  CHECK_EQ(true, wInt32LessThanOrEqual.Int32Compare(0, 1));
+  CHECK_EQ(true, wInt32LessThanOrEqual.Int32Compare(456, 457));
+  CHECK_EQ(true, wInt32LessThanOrEqual.Int32Compare(85537, 85539));
+  CHECK_EQ(true, wInt32LessThanOrEqual.Int32Compare(-2, -1));
+  CHECK_EQ(true, wInt32LessThanOrEqual.Int32Compare(0xfffffffe, 0xffffffff));
+
+  CHECK_EQ(false, wInt32LessThanOrEqual.Int32Compare(1, 0));
+  CHECK_EQ(false, wInt32LessThanOrEqual.Int32Compare(457, 456));
+  CHECK_EQ(false, wInt32LessThanOrEqual.Int32Compare(85539, 85537));
+  CHECK_EQ(false, wInt32LessThanOrEqual.Int32Compare(-1, -2));
+  CHECK_EQ(false, wInt32LessThanOrEqual.Int32Compare(0xffffffff, 0xfffffffe));
+
+  // Unsigned comparisons.
+  CHECK_EQ(false, wUint32LessThan.Int32Compare(0, 0));
+  CHECK_EQ(false, wUint32LessThan.Int32Compare(357, 357));
+  CHECK_EQ(false, wUint32LessThan.Int32Compare(75539, 75539));
+  CHECK_EQ(false, wUint32LessThan.Int32Compare(-1, -1));
+  CHECK_EQ(false, wUint32LessThan.Int32Compare(0xffffffff, 0xffffffff));
+  CHECK_EQ(false, wUint32LessThan.Int32Compare(0xffffffff, 0));
+  CHECK_EQ(false, wUint32LessThan.Int32Compare(-2999, 0));
+
+  CHECK_EQ(true, wUint32LessThan.Int32Compare(0, 1));
+  CHECK_EQ(true, wUint32LessThan.Int32Compare(456, 457));
+  CHECK_EQ(true, wUint32LessThan.Int32Compare(85537, 85539));
+  CHECK_EQ(true, wUint32LessThan.Int32Compare(-11, -10));
+  CHECK_EQ(true, wUint32LessThan.Int32Compare(0xfffffffe, 0xffffffff));
+  CHECK_EQ(true, wUint32LessThan.Int32Compare(0, 0xffffffff));
+  CHECK_EQ(true, wUint32LessThan.Int32Compare(0, -2996));
+
+  CHECK_EQ(false, wUint32LessThan.Int32Compare(1, 0));
+  CHECK_EQ(false, wUint32LessThan.Int32Compare(457, 456));
+  CHECK_EQ(false, wUint32LessThan.Int32Compare(85539, 85537));
+  CHECK_EQ(false, wUint32LessThan.Int32Compare(-10, -21));
+  CHECK_EQ(false, wUint32LessThan.Int32Compare(0xffffffff, 0xfffffffe));
+
+  CHECK_EQ(true, wUint32LessThanOrEqual.Int32Compare(0, 0));
+  CHECK_EQ(true, wUint32LessThanOrEqual.Int32Compare(357, 357));
+  CHECK_EQ(true, wUint32LessThanOrEqual.Int32Compare(75539, 75539));
+  CHECK_EQ(true, wUint32LessThanOrEqual.Int32Compare(-1, -1));
+  CHECK_EQ(true, wUint32LessThanOrEqual.Int32Compare(0xffffffff, 0xffffffff));
+
+  CHECK_EQ(true, wUint32LessThanOrEqual.Int32Compare(0, 1));
+  CHECK_EQ(true, wUint32LessThanOrEqual.Int32Compare(456, 457));
+  CHECK_EQ(true, wUint32LessThanOrEqual.Int32Compare(85537, 85539));
+  CHECK_EQ(true, wUint32LessThanOrEqual.Int32Compare(-300, -299));
+  CHECK_EQ(true, wUint32LessThanOrEqual.Int32Compare(-300, -300));
+  CHECK_EQ(true, wUint32LessThanOrEqual.Int32Compare(0xfffffffe, 0xffffffff));
+  CHECK_EQ(true, wUint32LessThanOrEqual.Int32Compare(0, -2995));
+
+  CHECK_EQ(false, wUint32LessThanOrEqual.Int32Compare(1, 0));
+  CHECK_EQ(false, wUint32LessThanOrEqual.Int32Compare(457, 456));
+  CHECK_EQ(false, wUint32LessThanOrEqual.Int32Compare(85539, 85537));
+  CHECK_EQ(false, wUint32LessThanOrEqual.Int32Compare(-130, -170));
+  CHECK_EQ(false, wUint32LessThanOrEqual.Int32Compare(0xffffffff, 0xfffffffe));
+  CHECK_EQ(false, wUint32LessThanOrEqual.Int32Compare(-2997, 0));
+
+  CompareWrapper wFloat64Equal(IrOpcode::kFloat64Equal);
+  CompareWrapper wFloat64LessThan(IrOpcode::kFloat64LessThan);
+  CompareWrapper wFloat64LessThanOrEqual(IrOpcode::kFloat64LessThanOrEqual);
+
+  // Check NaN handling.
+  double nan = v8::base::OS::nan_value();
+  double inf = V8_INFINITY;
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(nan, 0.0));
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(nan, 1.0));
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(nan, inf));
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(nan, -inf));
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(nan, nan));
+
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(0.0, nan));
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(1.0, nan));
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(inf, nan));
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(-inf, nan));
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(nan, nan));
+
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(nan, 0.0));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(nan, 1.0));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(nan, inf));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(nan, -inf));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(nan, nan));
+
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(0.0, nan));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(1.0, nan));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(inf, nan));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(-inf, nan));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(nan, nan));
+
+  CHECK_EQ(false, wFloat64LessThanOrEqual.Float64Compare(nan, 0.0));
+  CHECK_EQ(false, wFloat64LessThanOrEqual.Float64Compare(nan, 1.0));
+  CHECK_EQ(false, wFloat64LessThanOrEqual.Float64Compare(nan, inf));
+  CHECK_EQ(false, wFloat64LessThanOrEqual.Float64Compare(nan, -inf));
+  CHECK_EQ(false, wFloat64LessThanOrEqual.Float64Compare(nan, nan));
+
+  CHECK_EQ(false, wFloat64LessThanOrEqual.Float64Compare(0.0, nan));
+  CHECK_EQ(false, wFloat64LessThanOrEqual.Float64Compare(1.0, nan));
+  CHECK_EQ(false, wFloat64LessThanOrEqual.Float64Compare(inf, nan));
+  CHECK_EQ(false, wFloat64LessThanOrEqual.Float64Compare(-inf, nan));
+  CHECK_EQ(false, wFloat64LessThanOrEqual.Float64Compare(nan, nan));
+
+  // Check inf handling.
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(inf, 0.0));
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(inf, 1.0));
+  CHECK_EQ(true, wFloat64Equal.Float64Compare(inf, inf));
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(inf, -inf));
+
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(0.0, inf));
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(1.0, inf));
+  CHECK_EQ(true, wFloat64Equal.Float64Compare(inf, inf));
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(-inf, inf));
+
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(inf, 0.0));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(inf, 1.0));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(inf, inf));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(inf, -inf));
+
+  CHECK_EQ(true, wFloat64LessThan.Float64Compare(0.0, inf));
+  CHECK_EQ(true, wFloat64LessThan.Float64Compare(1.0, inf));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(inf, inf));
+  CHECK_EQ(true, wFloat64LessThan.Float64Compare(-inf, inf));
+
+  CHECK_EQ(false, wFloat64LessThanOrEqual.Float64Compare(inf, 0.0));
+  CHECK_EQ(false, wFloat64LessThanOrEqual.Float64Compare(inf, 1.0));
+  CHECK_EQ(true, wFloat64LessThanOrEqual.Float64Compare(inf, inf));
+  CHECK_EQ(false, wFloat64LessThanOrEqual.Float64Compare(inf, -inf));
+
+  CHECK_EQ(true, wFloat64LessThanOrEqual.Float64Compare(0.0, inf));
+  CHECK_EQ(true, wFloat64LessThanOrEqual.Float64Compare(1.0, inf));
+  CHECK_EQ(true, wFloat64LessThanOrEqual.Float64Compare(inf, inf));
+  CHECK_EQ(true, wFloat64LessThanOrEqual.Float64Compare(-inf, inf));
+
+  // Check -inf handling.
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(-inf, 0.0));
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(-inf, 1.0));
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(-inf, inf));
+  CHECK_EQ(true, wFloat64Equal.Float64Compare(-inf, -inf));
+
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(0.0, -inf));
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(1.0, -inf));
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(inf, -inf));
+  CHECK_EQ(true, wFloat64Equal.Float64Compare(-inf, -inf));
+
+  CHECK_EQ(true, wFloat64LessThan.Float64Compare(-inf, 0.0));
+  CHECK_EQ(true, wFloat64LessThan.Float64Compare(-inf, 1.0));
+  CHECK_EQ(true, wFloat64LessThan.Float64Compare(-inf, inf));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(-inf, -inf));
+
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(0.0, -inf));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(1.0, -inf));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(inf, -inf));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(-inf, -inf));
+
+  CHECK_EQ(true, wFloat64LessThanOrEqual.Float64Compare(-inf, 0.0));
+  CHECK_EQ(true, wFloat64LessThanOrEqual.Float64Compare(-inf, 1.0));
+  CHECK_EQ(true, wFloat64LessThanOrEqual.Float64Compare(-inf, inf));
+  CHECK_EQ(true, wFloat64LessThanOrEqual.Float64Compare(-inf, -inf));
+
+  CHECK_EQ(false, wFloat64LessThanOrEqual.Float64Compare(0.0, -inf));
+  CHECK_EQ(false, wFloat64LessThanOrEqual.Float64Compare(1.0, -inf));
+  CHECK_EQ(false, wFloat64LessThanOrEqual.Float64Compare(inf, -inf));
+  CHECK_EQ(true, wFloat64LessThanOrEqual.Float64Compare(-inf, -inf));
+
+  // Check basic values.
+  CHECK_EQ(true, wFloat64Equal.Float64Compare(0, 0));
+  CHECK_EQ(true, wFloat64Equal.Float64Compare(257.1, 257.1));
+  CHECK_EQ(true, wFloat64Equal.Float64Compare(65539.1, 65539.1));
+  CHECK_EQ(true, wFloat64Equal.Float64Compare(-1.1, -1.1));
+
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(0, 1));
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(257.2, 256.2));
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(65539.2, 65537.2));
+  CHECK_EQ(false, wFloat64Equal.Float64Compare(-1.2, -2.2));
+
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(0, 0));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(357.3, 357.3));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(75539.3, 75539.3));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(-1.3, -1.3));
+
+  CHECK_EQ(true, wFloat64LessThan.Float64Compare(0, 1));
+  CHECK_EQ(true, wFloat64LessThan.Float64Compare(456.4, 457.4));
+  CHECK_EQ(true, wFloat64LessThan.Float64Compare(85537.4, 85539.4));
+  CHECK_EQ(true, wFloat64LessThan.Float64Compare(-2.4, -1.4));
+
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(1, 0));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(457.5, 456.5));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(85539.5, 85537.5));
+  CHECK_EQ(false, wFloat64LessThan.Float64Compare(-1.5, -2.5));
+
+  CHECK_EQ(true, wFloat64LessThanOrEqual.Float64Compare(0, 0));
+  CHECK_EQ(true, wFloat64LessThanOrEqual.Float64Compare(357.6, 357.6));
+  CHECK_EQ(true, wFloat64LessThanOrEqual.Float64Compare(75539.6, 75539.6));
+  CHECK_EQ(true, wFloat64LessThanOrEqual.Float64Compare(-1.6, -1.6));
+
+  CHECK_EQ(true, wFloat64LessThanOrEqual.Float64Compare(0, 1));
+  CHECK_EQ(true, wFloat64LessThanOrEqual.Float64Compare(456.7, 457.7));
+  CHECK_EQ(true, wFloat64LessThanOrEqual.Float64Compare(85537.7, 85539.7));
+  CHECK_EQ(true, wFloat64LessThanOrEqual.Float64Compare(-2.7, -1.7));
+
+  CHECK_EQ(false, wFloat64LessThanOrEqual.Float64Compare(1, 0));
+  CHECK_EQ(false, wFloat64LessThanOrEqual.Float64Compare(457.8, 456.8));
+  CHECK_EQ(false, wFloat64LessThanOrEqual.Float64Compare(85539.8, 85537.8));
+  CHECK_EQ(false, wFloat64LessThanOrEqual.Float64Compare(-1.8, -2.8));
+}
+
+
+void Int32BinopInputShapeTester::TestAllInputShapes() {
+  std::vector<int32_t> inputs = ValueHelper::int32_vector();
+  int num_int_inputs = static_cast<int>(inputs.size());
+  if (num_int_inputs > 16) num_int_inputs = 16;  // limit to 16 inputs
+
+  for (int i = -2; i < num_int_inputs; i++) {    // for all left shapes
+    for (int j = -2; j < num_int_inputs; j++) {  // for all right shapes
+      if (i >= 0 && j >= 0) break;               // No constant/constant combos
+      RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32);
+      Node* p0 = m.Parameter(0);
+      Node* p1 = m.Parameter(1);
+      Node* n0;
+      Node* n1;
+
+      // left = Parameter | Load | Constant
+      if (i == -2) {
+        n0 = p0;
+      } else if (i == -1) {
+        n0 = m.LoadFromPointer(&input_a, kMachInt32);
+      } else {
+        n0 = m.Int32Constant(inputs[i]);
+      }
+
+      // right = Parameter | Load | Constant
+      if (j == -2) {
+        n1 = p1;
+      } else if (j == -1) {
+        n1 = m.LoadFromPointer(&input_b, kMachInt32);
+      } else {
+        n1 = m.Int32Constant(inputs[j]);
+      }
+
+      gen->gen(&m, n0, n1);
+
+      if (false) printf("Int32BinopInputShapeTester i=%d, j=%d\n", i, j);
+      if (i >= 0) {
+        input_a = inputs[i];
+        RunRight(&m);
+      } else if (j >= 0) {
+        input_b = inputs[j];
+        RunLeft(&m);
+      } else {
+        Run(&m);
+      }
+    }
+  }
+}
+
+
+void Int32BinopInputShapeTester::Run(RawMachineAssemblerTester<int32_t>* m) {
+  FOR_INT32_INPUTS(pl) {
+    FOR_INT32_INPUTS(pr) {
+      input_a = *pl;
+      input_b = *pr;
+      int32_t expect = gen->expected(input_a, input_b);
+      if (false) printf("  cmp(a=%d, b=%d) ?== %d\n", input_a, input_b, expect);
+      CHECK_EQ(expect, m->Call(input_a, input_b));
+    }
+  }
+}
+
+
+void Int32BinopInputShapeTester::RunLeft(
+    RawMachineAssemblerTester<int32_t>* m) {
+  FOR_UINT32_INPUTS(i) {
+    input_a = *i;
+    int32_t expect = gen->expected(input_a, input_b);
+    if (false) printf("  cmp(a=%d, b=%d) ?== %d\n", input_a, input_b, expect);
+    CHECK_EQ(expect, m->Call(input_a, input_b));
+  }
+}
+
+
+void Int32BinopInputShapeTester::RunRight(
+    RawMachineAssemblerTester<int32_t>* m) {
+  FOR_UINT32_INPUTS(i) {
+    input_b = *i;
+    int32_t expect = gen->expected(input_a, input_b);
+    if (false) printf("  cmp(a=%d, b=%d) ?== %d\n", input_a, input_b, expect);
+    CHECK_EQ(expect, m->Call(input_a, input_b));
+  }
+}
+
+
+#if V8_TURBOFAN_TARGET
+
+TEST(ParametersEqual) {
+  RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32);
+  Node* p1 = m.Parameter(1);
+  CHECK_NE(NULL, p1);
+  Node* p0 = m.Parameter(0);
+  CHECK_NE(NULL, p0);
+  CHECK_EQ(p0, m.Parameter(0));
+  CHECK_EQ(p1, m.Parameter(1));
+}
+
+
+void RunSmiConstant(int32_t v) {
+// TODO(dcarney): on x64 Smis are generated with the SmiConstantRegister
+#if !V8_TARGET_ARCH_X64
+  if (Smi::IsValid(v)) {
+    RawMachineAssemblerTester<Object*> m;
+    m.Return(m.NumberConstant(v));
+    CHECK_EQ(Smi::FromInt(v), m.Call());
+  }
+#endif
+}
+
+
+void RunNumberConstant(double v) {
+  RawMachineAssemblerTester<Object*> m;
+#if V8_TARGET_ARCH_X64
+  // TODO(dcarney): on x64 Smis are generated with the SmiConstantRegister
+  Handle<Object> number = m.isolate()->factory()->NewNumber(v);
+  if (number->IsSmi()) return;
+#endif
+  m.Return(m.NumberConstant(v));
+  Object* result = m.Call();
+  m.CheckNumber(v, result);
+}
+
+
+TEST(RunEmpty) {
+  RawMachineAssemblerTester<int32_t> m;
+  m.Return(m.Int32Constant(0));
+  CHECK_EQ(0, m.Call());
+}
+
+
+TEST(RunInt32Constants) {
+  FOR_INT32_INPUTS(i) {
+    RawMachineAssemblerTester<int32_t> m;
+    m.Return(m.Int32Constant(*i));
+    CHECK_EQ(*i, m.Call());
+  }
+}
+
+
+TEST(RunSmiConstants) {
+  for (int32_t i = 1; i < Smi::kMaxValue && i != 0; i = i << 1) {
+    RunSmiConstant(i);
+    RunSmiConstant(3 * i);
+    RunSmiConstant(5 * i);
+    RunSmiConstant(-i);
+    RunSmiConstant(i | 1);
+    RunSmiConstant(i | 3);
+  }
+  RunSmiConstant(Smi::kMaxValue);
+  RunSmiConstant(Smi::kMaxValue - 1);
+  RunSmiConstant(Smi::kMinValue);
+  RunSmiConstant(Smi::kMinValue + 1);
+
+  FOR_INT32_INPUTS(i) { RunSmiConstant(*i); }
+}
+
+
+TEST(RunNumberConstants) {
+  {
+    FOR_FLOAT64_INPUTS(i) { RunNumberConstant(*i); }
+  }
+  {
+    FOR_INT32_INPUTS(i) { RunNumberConstant(*i); }
+  }
+
+  for (int32_t i = 1; i < Smi::kMaxValue && i != 0; i = i << 1) {
+    RunNumberConstant(i);
+    RunNumberConstant(-i);
+    RunNumberConstant(i | 1);
+    RunNumberConstant(i | 3);
+  }
+  RunNumberConstant(Smi::kMaxValue);
+  RunNumberConstant(Smi::kMaxValue - 1);
+  RunNumberConstant(Smi::kMinValue);
+  RunNumberConstant(Smi::kMinValue + 1);
+}
+
+
+TEST(RunEmptyString) {
+  RawMachineAssemblerTester<Object*> m;
+  m.Return(m.StringConstant("empty"));
+  m.CheckString("empty", m.Call());
+}
+
+
+TEST(RunHeapConstant) {
+  RawMachineAssemblerTester<Object*> m;
+  m.Return(m.StringConstant("empty"));
+  m.CheckString("empty", m.Call());
+}
+
+
+TEST(RunHeapNumberConstant) {
+  RawMachineAssemblerTester<Object*> m;
+  Handle<Object> number = m.isolate()->factory()->NewHeapNumber(100.5);
+  m.Return(m.HeapConstant(number));
+  Object* result = m.Call();
+  CHECK_EQ(result, *number);
+}
+
+
+TEST(RunParam1) {
+  RawMachineAssemblerTester<int32_t> m(kMachInt32);
+  m.Return(m.Parameter(0));
+
+  FOR_INT32_INPUTS(i) {
+    int32_t result = m.Call(*i);
+    CHECK_EQ(*i, result);
+  }
+}
+
+
+TEST(RunParam2_1) {
+  RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32);
+  Node* p0 = m.Parameter(0);
+  Node* p1 = m.Parameter(1);
+  m.Return(p0);
+  USE(p1);
+
+  FOR_INT32_INPUTS(i) {
+    int32_t result = m.Call(*i, -9999);
+    CHECK_EQ(*i, result);
+  }
+}
+
+
+TEST(RunParam2_2) {
+  RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32);
+  Node* p0 = m.Parameter(0);
+  Node* p1 = m.Parameter(1);
+  m.Return(p1);
+  USE(p0);
+
+  FOR_INT32_INPUTS(i) {
+    int32_t result = m.Call(-7777, *i);
+    CHECK_EQ(*i, result);
+  }
+}
+
+
+TEST(RunParam3) {
+  for (int i = 0; i < 3; i++) {
+    RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32, kMachInt32);
+    Node* nodes[] = {m.Parameter(0), m.Parameter(1), m.Parameter(2)};
+    m.Return(nodes[i]);
+
+    int p[] = {-99, -77, -88};
+    FOR_INT32_INPUTS(j) {
+      p[i] = *j;
+      int32_t result = m.Call(p[0], p[1], p[2]);
+      CHECK_EQ(*j, result);
+    }
+  }
+}
+
+
+TEST(RunBinopTester) {
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(bt.param0);
+
+    FOR_INT32_INPUTS(i) { CHECK_EQ(*i, bt.call(*i, 777)); }
+  }
+
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(bt.param1);
+
+    FOR_INT32_INPUTS(i) { CHECK_EQ(*i, bt.call(666, *i)); }
+  }
+
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Float64BinopTester bt(&m);
+    bt.AddReturn(bt.param0);
+
+    FOR_FLOAT64_INPUTS(i) { CHECK_EQ(*i, bt.call(*i, 9.0)); }
+  }
+
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Float64BinopTester bt(&m);
+    bt.AddReturn(bt.param1);
+
+    FOR_FLOAT64_INPUTS(i) { CHECK_EQ(*i, bt.call(-11.25, *i)); }
+  }
+}
+
+#endif  // V8_TURBOFAN_TARGET
diff --git a/test/cctest/compiler/codegen-tester.h b/test/cctest/compiler/codegen-tester.h
new file mode 100644
index 0000000..6aa5bae
--- /dev/null
+++ b/test/cctest/compiler/codegen-tester.h
@@ -0,0 +1,338 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CCTEST_COMPILER_CODEGEN_TESTER_H_
+#define V8_CCTEST_COMPILER_CODEGEN_TESTER_H_
+
+#include "src/v8.h"
+
+#include "src/compiler/pipeline.h"
+#include "src/compiler/raw-machine-assembler.h"
+#include "src/simulator.h"
+#include "test/cctest/compiler/call-tester.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+template <typename MachineAssembler>
+class MachineAssemblerTester : public HandleAndZoneScope,
+                               public CallHelper,
+                               public MachineAssembler {
+ public:
+  MachineAssemblerTester(MachineType return_type, MachineType p0,
+                         MachineType p1, MachineType p2, MachineType p3,
+                         MachineType p4)
+      : HandleAndZoneScope(),
+        CallHelper(
+            main_isolate(),
+            MakeMachineSignature(main_zone(), return_type, p0, p1, p2, p3, p4)),
+        MachineAssembler(
+            new (main_zone()) Graph(main_zone()),
+            MakeMachineSignature(main_zone(), return_type, p0, p1, p2, p3, p4),
+            kMachPtr) {}
+
+  Node* LoadFromPointer(void* address, MachineType rep, int32_t offset = 0) {
+    return this->Load(rep, this->PointerConstant(address),
+                      this->Int32Constant(offset));
+  }
+
+  void StoreToPointer(void* address, MachineType rep, Node* node) {
+    this->Store(rep, this->PointerConstant(address), node);
+  }
+
+  Node* StringConstant(const char* string) {
+    return this->HeapConstant(
+        this->isolate()->factory()->InternalizeUtf8String(string));
+  }
+
+  void CheckNumber(double expected, Object* number) {
+    CHECK(this->isolate()->factory()->NewNumber(expected)->SameValue(number));
+  }
+
+  void CheckString(const char* expected, Object* string) {
+    CHECK(
+        this->isolate()->factory()->InternalizeUtf8String(expected)->SameValue(
+            string));
+  }
+
+  void GenerateCode() { Generate(); }
+
+ protected:
+  virtual byte* Generate() {
+    if (code_.is_null()) {
+      Schedule* schedule = this->Export();
+      CallDescriptor* call_descriptor = this->call_descriptor();
+      Graph* graph = this->graph();
+      CompilationInfo info(graph->zone()->isolate(), graph->zone());
+      Linkage linkage(&info, call_descriptor);
+      Pipeline pipeline(&info);
+      code_ = pipeline.GenerateCodeForMachineGraph(&linkage, graph, schedule);
+    }
+    return this->code_.ToHandleChecked()->entry();
+  }
+
+ private:
+  MaybeHandle<Code> code_;
+};
+
+
+template <typename ReturnType>
+class RawMachineAssemblerTester
+    : public MachineAssemblerTester<RawMachineAssembler>,
+      public CallHelper2<ReturnType, RawMachineAssemblerTester<ReturnType> > {
+ public:
+  RawMachineAssemblerTester(MachineType p0 = kMachNone,
+                            MachineType p1 = kMachNone,
+                            MachineType p2 = kMachNone,
+                            MachineType p3 = kMachNone,
+                            MachineType p4 = kMachNone)
+      : MachineAssemblerTester<RawMachineAssembler>(
+            ReturnValueTraits<ReturnType>::Representation(), p0, p1, p2, p3,
+            p4) {}
+
+  template <typename Ci, typename Fn>
+  void Run(const Ci& ci, const Fn& fn) {
+    typename Ci::const_iterator i;
+    for (i = ci.begin(); i != ci.end(); ++i) {
+      CHECK_EQ(fn(*i), this->Call(*i));
+    }
+  }
+
+  template <typename Ci, typename Cj, typename Fn>
+  void Run(const Ci& ci, const Cj& cj, const Fn& fn) {
+    typename Ci::const_iterator i;
+    typename Cj::const_iterator j;
+    for (i = ci.begin(); i != ci.end(); ++i) {
+      for (j = cj.begin(); j != cj.end(); ++j) {
+        CHECK_EQ(fn(*i, *j), this->Call(*i, *j));
+      }
+    }
+  }
+};
+
+
+static const bool USE_RESULT_BUFFER = true;
+static const bool USE_RETURN_REGISTER = false;
+static const int32_t CHECK_VALUE = 0x99BEEDCE;
+
+
+// TODO(titzer): use the C-style calling convention, or any register-based
+// calling convention for binop tests.
+template <typename CType, MachineType rep, bool use_result_buffer>
+class BinopTester {
+ public:
+  explicit BinopTester(RawMachineAssemblerTester<int32_t>* tester)
+      : T(tester),
+        param0(T->LoadFromPointer(&p0, rep)),
+        param1(T->LoadFromPointer(&p1, rep)),
+        p0(static_cast<CType>(0)),
+        p1(static_cast<CType>(0)),
+        result(static_cast<CType>(0)) {}
+
+  RawMachineAssemblerTester<int32_t>* T;
+  Node* param0;
+  Node* param1;
+
+  CType call(CType a0, CType a1) {
+    p0 = a0;
+    p1 = a1;
+    if (use_result_buffer) {
+      CHECK_EQ(CHECK_VALUE, T->Call());
+      return result;
+    } else {
+      return T->Call();
+    }
+  }
+
+  void AddReturn(Node* val) {
+    if (use_result_buffer) {
+      T->Store(rep, T->PointerConstant(&result), T->Int32Constant(0), val);
+      T->Return(T->Int32Constant(CHECK_VALUE));
+    } else {
+      T->Return(val);
+    }
+  }
+
+  template <typename Ci, typename Cj, typename Fn>
+  void Run(const Ci& ci, const Cj& cj, const Fn& fn) {
+    typename Ci::const_iterator i;
+    typename Cj::const_iterator j;
+    for (i = ci.begin(); i != ci.end(); ++i) {
+      for (j = cj.begin(); j != cj.end(); ++j) {
+        CHECK_EQ(fn(*i, *j), this->call(*i, *j));
+      }
+    }
+  }
+
+ protected:
+  CType p0;
+  CType p1;
+  CType result;
+};
+
+
+// A helper class for testing code sequences that take two int parameters and
+// return an int value.
+class Int32BinopTester
+    : public BinopTester<int32_t, kMachInt32, USE_RETURN_REGISTER> {
+ public:
+  explicit Int32BinopTester(RawMachineAssemblerTester<int32_t>* tester)
+      : BinopTester<int32_t, kMachInt32, USE_RETURN_REGISTER>(tester) {}
+};
+
+
+// A helper class for testing code sequences that take two uint parameters and
+// return an uint value.
+class Uint32BinopTester
+    : public BinopTester<uint32_t, kMachUint32, USE_RETURN_REGISTER> {
+ public:
+  explicit Uint32BinopTester(RawMachineAssemblerTester<int32_t>* tester)
+      : BinopTester<uint32_t, kMachUint32, USE_RETURN_REGISTER>(tester) {}
+
+  uint32_t call(uint32_t a0, uint32_t a1) {
+    p0 = a0;
+    p1 = a1;
+    return static_cast<uint32_t>(T->Call());
+  }
+};
+
+
+// A helper class for testing code sequences that take two double parameters and
+// return a double value.
+// TODO(titzer): figure out how to return doubles correctly on ia32.
+class Float64BinopTester
+    : public BinopTester<double, kMachFloat64, USE_RESULT_BUFFER> {
+ public:
+  explicit Float64BinopTester(RawMachineAssemblerTester<int32_t>* tester)
+      : BinopTester<double, kMachFloat64, USE_RESULT_BUFFER>(tester) {}
+};
+
+
+// A helper class for testing code sequences that take two pointer parameters
+// and return a pointer value.
+// TODO(titzer): pick word size of pointers based on V8_TARGET.
+template <typename Type>
+class PointerBinopTester
+    : public BinopTester<Type*, kMachPtr, USE_RETURN_REGISTER> {
+ public:
+  explicit PointerBinopTester(RawMachineAssemblerTester<int32_t>* tester)
+      : BinopTester<Type*, kMachPtr, USE_RETURN_REGISTER>(tester) {}
+};
+
+
+// A helper class for testing code sequences that take two tagged parameters and
+// return a tagged value.
+template <typename Type>
+class TaggedBinopTester
+    : public BinopTester<Type*, kMachAnyTagged, USE_RETURN_REGISTER> {
+ public:
+  explicit TaggedBinopTester(RawMachineAssemblerTester<int32_t>* tester)
+      : BinopTester<Type*, kMachAnyTagged, USE_RETURN_REGISTER>(tester) {}
+};
+
+// A helper class for testing compares. Wraps a machine opcode and provides
+// evaluation routines and the operators.
+class CompareWrapper {
+ public:
+  explicit CompareWrapper(IrOpcode::Value op) : opcode(op) {}
+
+  Node* MakeNode(RawMachineAssemblerTester<int32_t>* m, Node* a, Node* b) {
+    return m->NewNode(op(m->machine()), a, b);
+  }
+
+  const Operator* op(MachineOperatorBuilder* machine) {
+    switch (opcode) {
+      case IrOpcode::kWord32Equal:
+        return machine->Word32Equal();
+      case IrOpcode::kInt32LessThan:
+        return machine->Int32LessThan();
+      case IrOpcode::kInt32LessThanOrEqual:
+        return machine->Int32LessThanOrEqual();
+      case IrOpcode::kUint32LessThan:
+        return machine->Uint32LessThan();
+      case IrOpcode::kUint32LessThanOrEqual:
+        return machine->Uint32LessThanOrEqual();
+      case IrOpcode::kFloat64Equal:
+        return machine->Float64Equal();
+      case IrOpcode::kFloat64LessThan:
+        return machine->Float64LessThan();
+      case IrOpcode::kFloat64LessThanOrEqual:
+        return machine->Float64LessThanOrEqual();
+      default:
+        UNREACHABLE();
+    }
+    return NULL;
+  }
+
+  bool Int32Compare(int32_t a, int32_t b) {
+    switch (opcode) {
+      case IrOpcode::kWord32Equal:
+        return a == b;
+      case IrOpcode::kInt32LessThan:
+        return a < b;
+      case IrOpcode::kInt32LessThanOrEqual:
+        return a <= b;
+      case IrOpcode::kUint32LessThan:
+        return static_cast<uint32_t>(a) < static_cast<uint32_t>(b);
+      case IrOpcode::kUint32LessThanOrEqual:
+        return static_cast<uint32_t>(a) <= static_cast<uint32_t>(b);
+      default:
+        UNREACHABLE();
+    }
+    return false;
+  }
+
+  bool Float64Compare(double a, double b) {
+    switch (opcode) {
+      case IrOpcode::kFloat64Equal:
+        return a == b;
+      case IrOpcode::kFloat64LessThan:
+        return a < b;
+      case IrOpcode::kFloat64LessThanOrEqual:
+        return a <= b;
+      default:
+        UNREACHABLE();
+    }
+    return false;
+  }
+
+  IrOpcode::Value opcode;
+};
+
+
+// A small closure class to generate code for a function of two inputs that
+// produces a single output so that it can be used in many different contexts.
+// The {expected()} method should compute the expected output for a given
+// pair of inputs.
+template <typename T>
+class BinopGen {
+ public:
+  virtual void gen(RawMachineAssemblerTester<int32_t>* m, Node* a, Node* b) = 0;
+  virtual T expected(T a, T b) = 0;
+  virtual ~BinopGen() {}
+};
+
+// A helper class to generate various combination of input shape combinations
+// and run the generated code to ensure it produces the correct results.
+class Int32BinopInputShapeTester {
+ public:
+  explicit Int32BinopInputShapeTester(BinopGen<int32_t>* g) : gen(g) {}
+
+  void TestAllInputShapes();
+
+ private:
+  BinopGen<int32_t>* gen;
+  int32_t input_a;
+  int32_t input_b;
+
+  void Run(RawMachineAssemblerTester<int32_t>* m);
+  void RunLeft(RawMachineAssemblerTester<int32_t>* m);
+  void RunRight(RawMachineAssemblerTester<int32_t>* m);
+};
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CCTEST_COMPILER_CODEGEN_TESTER_H_
diff --git a/test/cctest/compiler/function-tester.h b/test/cctest/compiler/function-tester.h
new file mode 100644
index 0000000..c869f00
--- /dev/null
+++ b/test/cctest/compiler/function-tester.h
@@ -0,0 +1,193 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CCTEST_COMPILER_FUNCTION_TESTER_H_
+#define V8_CCTEST_COMPILER_FUNCTION_TESTER_H_
+
+#include "src/v8.h"
+#include "test/cctest/cctest.h"
+
+#include "src/compiler.h"
+#include "src/compiler/pipeline.h"
+#include "src/execution.h"
+#include "src/full-codegen.h"
+#include "src/handles.h"
+#include "src/objects-inl.h"
+#include "src/parser.h"
+#include "src/rewriter.h"
+#include "src/scopes.h"
+
+#define USE_CRANKSHAFT 0
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class FunctionTester : public InitializedHandleScope {
+ public:
+  explicit FunctionTester(const char* source, uint32_t flags = 0)
+      : isolate(main_isolate()),
+        function((FLAG_allow_natives_syntax = true, NewFunction(source))),
+        flags_(flags) {
+    Compile(function);
+    const uint32_t supported_flags = CompilationInfo::kContextSpecializing |
+                                     CompilationInfo::kInliningEnabled |
+                                     CompilationInfo::kTypingEnabled;
+    CHECK_EQ(0, flags_ & ~supported_flags);
+  }
+
+  Isolate* isolate;
+  Handle<JSFunction> function;
+
+  Handle<JSFunction> Compile(Handle<JSFunction> function) {
+#if V8_TURBOFAN_TARGET
+    CompilationInfoWithZone info(function);
+
+    CHECK(Parser::Parse(&info));
+    info.SetOptimizing(BailoutId::None(), Handle<Code>(function->code()));
+    if (flags_ & CompilationInfo::kContextSpecializing) {
+      info.MarkAsContextSpecializing();
+    }
+    if (flags_ & CompilationInfo::kInliningEnabled) {
+      info.MarkAsInliningEnabled();
+    }
+    if (flags_ & CompilationInfo::kTypingEnabled) {
+      info.MarkAsTypingEnabled();
+    }
+    CHECK(Rewriter::Rewrite(&info));
+    CHECK(Scope::Analyze(&info));
+    CHECK(Compiler::EnsureDeoptimizationSupport(&info));
+
+    Pipeline pipeline(&info);
+    Handle<Code> code = pipeline.GenerateCode();
+    if (FLAG_turbo_deoptimization) {
+      info.context()->native_context()->AddOptimizedCode(*code);
+    }
+
+    CHECK(!code.is_null());
+    function->ReplaceCode(*code);
+#elif USE_CRANKSHAFT
+    Handle<Code> unoptimized = Handle<Code>(function->code());
+    Handle<Code> code = Compiler::GetOptimizedCode(function, unoptimized,
+                                                   Compiler::NOT_CONCURRENT);
+    CHECK(!code.is_null());
+#if ENABLE_DISASSEMBLER
+    if (FLAG_print_opt_code) {
+      CodeTracer::Scope tracing_scope(isolate->GetCodeTracer());
+      code->Disassemble("test code", tracing_scope.file());
+    }
+#endif
+    function->ReplaceCode(*code);
+#endif
+    return function;
+  }
+
+  MaybeHandle<Object> Call(Handle<Object> a, Handle<Object> b) {
+    Handle<Object> args[] = {a, b};
+    return Execution::Call(isolate, function, undefined(), 2, args, false);
+  }
+
+  void CheckThrows(Handle<Object> a, Handle<Object> b) {
+    TryCatch try_catch;
+    MaybeHandle<Object> no_result = Call(a, b);
+    CHECK(isolate->has_pending_exception());
+    CHECK(try_catch.HasCaught());
+    CHECK(no_result.is_null());
+    // TODO(mstarzinger): Temporary workaround for issue chromium:362388.
+    isolate->OptionalRescheduleException(true);
+  }
+
+  v8::Handle<v8::Message> CheckThrowsReturnMessage(Handle<Object> a,
+                                                   Handle<Object> b) {
+    TryCatch try_catch;
+    MaybeHandle<Object> no_result = Call(a, b);
+    CHECK(isolate->has_pending_exception());
+    CHECK(try_catch.HasCaught());
+    CHECK(no_result.is_null());
+    // TODO(mstarzinger): Calling OptionalRescheduleException is a dirty hack,
+    // it's the only way to make Message() not to assert because an external
+    // exception has been caught by the try_catch.
+    isolate->OptionalRescheduleException(true);
+    return try_catch.Message();
+  }
+
+  void CheckCall(Handle<Object> expected, Handle<Object> a, Handle<Object> b) {
+    Handle<Object> result = Call(a, b).ToHandleChecked();
+    CHECK(expected->SameValue(*result));
+  }
+
+  void CheckCall(Handle<Object> expected, Handle<Object> a) {
+    CheckCall(expected, a, undefined());
+  }
+
+  void CheckCall(Handle<Object> expected) {
+    CheckCall(expected, undefined(), undefined());
+  }
+
+  void CheckCall(double expected, double a, double b) {
+    CheckCall(Val(expected), Val(a), Val(b));
+  }
+
+  void CheckTrue(Handle<Object> a, Handle<Object> b) {
+    CheckCall(true_value(), a, b);
+  }
+
+  void CheckTrue(Handle<Object> a) { CheckCall(true_value(), a, undefined()); }
+
+  void CheckTrue(double a, double b) {
+    CheckCall(true_value(), Val(a), Val(b));
+  }
+
+  void CheckFalse(Handle<Object> a, Handle<Object> b) {
+    CheckCall(false_value(), a, b);
+  }
+
+  void CheckFalse(Handle<Object> a) {
+    CheckCall(false_value(), a, undefined());
+  }
+
+  void CheckFalse(double a, double b) {
+    CheckCall(false_value(), Val(a), Val(b));
+  }
+
+  Handle<JSFunction> NewFunction(const char* source) {
+    return v8::Utils::OpenHandle(
+        *v8::Handle<v8::Function>::Cast(CompileRun(source)));
+  }
+
+  Handle<JSObject> NewObject(const char* source) {
+    return v8::Utils::OpenHandle(
+        *v8::Handle<v8::Object>::Cast(CompileRun(source)));
+  }
+
+  Handle<String> Val(const char* string) {
+    return isolate->factory()->InternalizeUtf8String(string);
+  }
+
+  Handle<Object> Val(double value) {
+    return isolate->factory()->NewNumber(value);
+  }
+
+  Handle<Object> infinity() { return isolate->factory()->infinity_value(); }
+
+  Handle<Object> minus_infinity() { return Val(-V8_INFINITY); }
+
+  Handle<Object> nan() { return isolate->factory()->nan_value(); }
+
+  Handle<Object> undefined() { return isolate->factory()->undefined_value(); }
+
+  Handle<Object> null() { return isolate->factory()->null_value(); }
+
+  Handle<Object> true_value() { return isolate->factory()->true_value(); }
+
+  Handle<Object> false_value() { return isolate->factory()->false_value(); }
+
+ private:
+  uint32_t flags_;
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_CCTEST_COMPILER_FUNCTION_TESTER_H_
diff --git a/test/cctest/compiler/graph-builder-tester.cc b/test/cctest/compiler/graph-builder-tester.cc
new file mode 100644
index 0000000..bfa8226
--- /dev/null
+++ b/test/cctest/compiler/graph-builder-tester.cc
@@ -0,0 +1,56 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/cctest/compiler/graph-builder-tester.h"
+
+#include "src/compiler/linkage.h"
+#include "src/compiler/pipeline.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+MachineCallHelper::MachineCallHelper(Zone* zone, MachineSignature* machine_sig)
+    : CallHelper(zone->isolate(), machine_sig),
+      parameters_(NULL),
+      graph_(NULL) {}
+
+
+void MachineCallHelper::InitParameters(GraphBuilder* builder,
+                                       CommonOperatorBuilder* common) {
+  DCHECK_EQ(NULL, parameters_);
+  graph_ = builder->graph();
+  int param_count = static_cast<int>(parameter_count());
+  if (param_count == 0) return;
+  parameters_ = graph_->zone()->NewArray<Node*>(param_count);
+  for (int i = 0; i < param_count; ++i) {
+    parameters_[i] = builder->NewNode(common->Parameter(i), graph_->start());
+  }
+}
+
+
+byte* MachineCallHelper::Generate() {
+  DCHECK(parameter_count() == 0 || parameters_ != NULL);
+  if (!Pipeline::SupportedBackend()) return NULL;
+  if (code_.is_null()) {
+    Zone* zone = graph_->zone();
+    CompilationInfo info(zone->isolate(), zone);
+    Linkage linkage(&info,
+                    Linkage::GetSimplifiedCDescriptor(zone, machine_sig_));
+    Pipeline pipeline(&info);
+    code_ = pipeline.GenerateCodeForMachineGraph(&linkage, graph_);
+  }
+  return code_.ToHandleChecked()->entry();
+}
+
+
+Node* MachineCallHelper::Parameter(size_t index) {
+  DCHECK_NE(NULL, parameters_);
+  DCHECK(index < parameter_count());
+  return parameters_[index];
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/test/cctest/compiler/graph-builder-tester.h b/test/cctest/compiler/graph-builder-tester.h
new file mode 100644
index 0000000..df79250
--- /dev/null
+++ b/test/cctest/compiler/graph-builder-tester.h
@@ -0,0 +1,107 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CCTEST_COMPILER_GRAPH_BUILDER_TESTER_H_
+#define V8_CCTEST_COMPILER_GRAPH_BUILDER_TESTER_H_
+
+#include "src/v8.h"
+#include "test/cctest/cctest.h"
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph-builder.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/simplified-operator.h"
+#include "test/cctest/compiler/call-tester.h"
+#include "test/cctest/compiler/simplified-graph-builder.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// A class that just passes node creation on to the Graph.
+class DirectGraphBuilder : public GraphBuilder {
+ public:
+  explicit DirectGraphBuilder(Graph* graph) : GraphBuilder(graph) {}
+  virtual ~DirectGraphBuilder() {}
+
+ protected:
+  virtual Node* MakeNode(const Operator* op, int value_input_count,
+                         Node** value_inputs) FINAL {
+    return graph()->NewNode(op, value_input_count, value_inputs);
+  }
+};
+
+
+class MachineCallHelper : public CallHelper {
+ public:
+  MachineCallHelper(Zone* zone, MachineSignature* machine_sig);
+
+  Node* Parameter(size_t index);
+
+  void GenerateCode() { Generate(); }
+
+ protected:
+  virtual byte* Generate();
+  void InitParameters(GraphBuilder* builder, CommonOperatorBuilder* common);
+
+ protected:
+  size_t parameter_count() const { return machine_sig_->parameter_count(); }
+
+ private:
+  Node** parameters_;
+  // TODO(dcarney): shouldn't need graph stored.
+  Graph* graph_;
+  MaybeHandle<Code> code_;
+};
+
+
+class GraphAndBuilders {
+ public:
+  explicit GraphAndBuilders(Zone* zone)
+      : main_graph_(new (zone) Graph(zone)),
+        main_common_(zone),
+        main_simplified_(zone) {}
+
+ protected:
+  // Prefixed with main_ to avoid naiming conflicts.
+  Graph* main_graph_;
+  CommonOperatorBuilder main_common_;
+  MachineOperatorBuilder main_machine_;
+  SimplifiedOperatorBuilder main_simplified_;
+};
+
+
+template <typename ReturnType>
+class GraphBuilderTester
+    : public HandleAndZoneScope,
+      private GraphAndBuilders,
+      public MachineCallHelper,
+      public SimplifiedGraphBuilder,
+      public CallHelper2<ReturnType, GraphBuilderTester<ReturnType> > {
+ public:
+  explicit GraphBuilderTester(MachineType p0 = kMachNone,
+                              MachineType p1 = kMachNone,
+                              MachineType p2 = kMachNone,
+                              MachineType p3 = kMachNone,
+                              MachineType p4 = kMachNone)
+      : GraphAndBuilders(main_zone()),
+        MachineCallHelper(
+            main_zone(),
+            MakeMachineSignature(
+                main_zone(), ReturnValueTraits<ReturnType>::Representation(),
+                p0, p1, p2, p3, p4)),
+        SimplifiedGraphBuilder(main_graph_, &main_common_, &main_machine_,
+                               &main_simplified_) {
+    Begin(static_cast<int>(parameter_count()));
+    InitParameters(this, &main_common_);
+  }
+  virtual ~GraphBuilderTester() {}
+
+  Factory* factory() const { return isolate()->factory(); }
+};
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CCTEST_COMPILER_GRAPH_BUILDER_TESTER_H_
diff --git a/test/cctest/compiler/graph-tester.h b/test/cctest/compiler/graph-tester.h
new file mode 100644
index 0000000..e569245
--- /dev/null
+++ b/test/cctest/compiler/graph-tester.h
@@ -0,0 +1,42 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CCTEST_COMPILER_GRAPH_TESTER_H_
+#define V8_CCTEST_COMPILER_GRAPH_TESTER_H_
+
+#include "src/v8.h"
+#include "test/cctest/cctest.h"
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class GraphTester : public HandleAndZoneScope, public Graph {
+ public:
+  GraphTester() : Graph(main_zone()) {}
+};
+
+
+class GraphWithStartNodeTester : public GraphTester {
+ public:
+  explicit GraphWithStartNodeTester(int num_parameters = 0)
+      : builder_(main_zone()),
+        start_node_(NewNode(builder_.Start(num_parameters))) {
+    SetStart(start_node_);
+  }
+
+  Node* start_node() { return start_node_; }
+
+ private:
+  CommonOperatorBuilder builder_;
+  Node* start_node_;
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+#endif  // V8_CCTEST_COMPILER_GRAPH_TESTER_H_
diff --git a/test/cctest/compiler/instruction-selector-tester.h b/test/cctest/compiler/instruction-selector-tester.h
new file mode 100644
index 0000000..3a28b2e
--- /dev/null
+++ b/test/cctest/compiler/instruction-selector-tester.h
@@ -0,0 +1,127 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CCTEST_COMPILER_INSTRUCTION_SELECTOR_TEST_H_
+#define V8_CCTEST_COMPILER_INSTRUCTION_SELECTOR_TEST_H_
+
+#include <deque>
+#include <set>
+
+#include "src/compiler/instruction-selector.h"
+#include "src/compiler/raw-machine-assembler.h"
+#include "src/ostreams.h"
+#include "test/cctest/cctest.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+typedef std::set<int> VirtualRegisterSet;
+
+enum InstructionSelectorTesterMode { kTargetMode, kInternalMode };
+
+class InstructionSelectorTester : public HandleAndZoneScope,
+                                  public RawMachineAssembler {
+ public:
+  enum Mode { kTargetMode, kInternalMode };
+
+  static const int kParameterCount = 3;
+  static MachineType* BuildParameterArray(Zone* zone) {
+    MachineType* array = zone->NewArray<MachineType>(kParameterCount);
+    for (int i = 0; i < kParameterCount; ++i) {
+      array[i] = kMachInt32;
+    }
+    return array;
+  }
+
+  InstructionSelectorTester()
+      : RawMachineAssembler(
+            new (main_zone()) Graph(main_zone()),
+            new (main_zone()) MachineCallDescriptorBuilder(
+                kMachInt32, kParameterCount, BuildParameterArray(main_zone())),
+            kMachPtr) {}
+
+  void SelectInstructions(CpuFeature feature) {
+    SelectInstructions(InstructionSelector::Features(feature));
+  }
+
+  void SelectInstructions(CpuFeature feature1, CpuFeature feature2) {
+    SelectInstructions(InstructionSelector::Features(feature1, feature2));
+  }
+
+  void SelectInstructions(Mode mode = kTargetMode) {
+    SelectInstructions(InstructionSelector::Features(), mode);
+  }
+
+  void SelectInstructions(InstructionSelector::Features features,
+                          Mode mode = kTargetMode) {
+    OFStream out(stdout);
+    Schedule* schedule = Export();
+    CHECK_NE(0, graph()->NodeCount());
+    CompilationInfo info(main_isolate(), main_zone());
+    Linkage linkage(&info, call_descriptor());
+    InstructionSequence sequence(&linkage, graph(), schedule);
+    SourcePositionTable source_positions(graph());
+    InstructionSelector selector(&sequence, &source_positions, features);
+    selector.SelectInstructions();
+    out << "--- Code sequence after instruction selection --- " << endl
+        << sequence;
+    for (InstructionSequence::const_iterator i = sequence.begin();
+         i != sequence.end(); ++i) {
+      Instruction* instr = *i;
+      if (instr->opcode() < 0) continue;
+      if (mode == kTargetMode) {
+        switch (ArchOpcodeField::decode(instr->opcode())) {
+#define CASE(Name) \
+  case k##Name:    \
+    break;
+          TARGET_ARCH_OPCODE_LIST(CASE)
+#undef CASE
+          default:
+            continue;
+        }
+      }
+      code.push_back(instr);
+    }
+    for (int vreg = 0; vreg < sequence.VirtualRegisterCount(); ++vreg) {
+      if (sequence.IsDouble(vreg)) {
+        CHECK(!sequence.IsReference(vreg));
+        doubles.insert(vreg);
+      }
+      if (sequence.IsReference(vreg)) {
+        CHECK(!sequence.IsDouble(vreg));
+        references.insert(vreg);
+      }
+    }
+    immediates.assign(sequence.immediates().begin(),
+                      sequence.immediates().end());
+  }
+
+  int32_t ToInt32(const InstructionOperand* operand) const {
+    size_t i = operand->index();
+    CHECK(i < immediates.size());
+    CHECK_EQ(InstructionOperand::IMMEDIATE, operand->kind());
+    return immediates[i].ToInt32();
+  }
+
+  std::deque<Instruction*> code;
+  VirtualRegisterSet doubles;
+  VirtualRegisterSet references;
+  std::deque<Constant> immediates;
+};
+
+
+static inline void CheckSameVreg(InstructionOperand* exp,
+                                 InstructionOperand* val) {
+  CHECK_EQ(InstructionOperand::UNALLOCATED, exp->kind());
+  CHECK_EQ(InstructionOperand::UNALLOCATED, val->kind());
+  CHECK_EQ(UnallocatedOperand::cast(exp)->virtual_register(),
+           UnallocatedOperand::cast(val)->virtual_register());
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CCTEST_COMPILER_INSTRUCTION_SELECTOR_TEST_H_
diff --git a/test/cctest/compiler/simplified-graph-builder.cc b/test/cctest/compiler/simplified-graph-builder.cc
new file mode 100644
index 0000000..c44d5ed
--- /dev/null
+++ b/test/cctest/compiler/simplified-graph-builder.cc
@@ -0,0 +1,90 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/cctest/compiler/simplified-graph-builder.h"
+
+#include "src/compiler/operator-properties.h"
+#include "src/compiler/operator-properties-inl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+SimplifiedGraphBuilder::SimplifiedGraphBuilder(
+    Graph* graph, CommonOperatorBuilder* common,
+    MachineOperatorBuilder* machine, SimplifiedOperatorBuilder* simplified)
+    : GraphBuilder(graph),
+      effect_(NULL),
+      return_(NULL),
+      common_(common),
+      machine_(machine),
+      simplified_(simplified) {}
+
+
+void SimplifiedGraphBuilder::Begin(int num_parameters) {
+  DCHECK(graph()->start() == NULL);
+  Node* start = graph()->NewNode(common()->Start(num_parameters));
+  graph()->SetStart(start);
+  effect_ = start;
+}
+
+
+void SimplifiedGraphBuilder::Return(Node* value) {
+  return_ =
+      graph()->NewNode(common()->Return(), value, effect_, graph()->start());
+  effect_ = NULL;
+}
+
+
+void SimplifiedGraphBuilder::End() {
+  Node* end = graph()->NewNode(common()->End(), return_);
+  graph()->SetEnd(end);
+}
+
+
+Node* SimplifiedGraphBuilder::MakeNode(const Operator* op,
+                                       int value_input_count,
+                                       Node** value_inputs) {
+  DCHECK(op->InputCount() == value_input_count);
+
+  DCHECK(!OperatorProperties::HasContextInput(op));
+  DCHECK(!OperatorProperties::HasFrameStateInput(op));
+  bool has_control = OperatorProperties::GetControlInputCount(op) == 1;
+  bool has_effect = OperatorProperties::GetEffectInputCount(op) == 1;
+
+  DCHECK(OperatorProperties::GetControlInputCount(op) < 2);
+  DCHECK(OperatorProperties::GetEffectInputCount(op) < 2);
+
+  Node* result = NULL;
+  if (!has_control && !has_effect) {
+    result = graph()->NewNode(op, value_input_count, value_inputs);
+  } else {
+    int input_count_with_deps = value_input_count;
+    if (has_control) ++input_count_with_deps;
+    if (has_effect) ++input_count_with_deps;
+    Node** buffer = zone()->NewArray<Node*>(input_count_with_deps);
+    memcpy(buffer, value_inputs, kPointerSize * value_input_count);
+    Node** current_input = buffer + value_input_count;
+    if (has_effect) {
+      *current_input++ = effect_;
+    }
+    if (has_control) {
+      *current_input++ = graph()->start();
+    }
+    result = graph()->NewNode(op, input_count_with_deps, buffer);
+    if (has_effect) {
+      effect_ = result;
+    }
+    if (OperatorProperties::HasControlOutput(result->op())) {
+      // This graph builder does not support control flow.
+      UNREACHABLE();
+    }
+  }
+
+  return result;
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/test/cctest/compiler/simplified-graph-builder.h b/test/cctest/compiler/simplified-graph-builder.h
new file mode 100644
index 0000000..1b637b7
--- /dev/null
+++ b/test/cctest/compiler/simplified-graph-builder.h
@@ -0,0 +1,156 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CCTEST_COMPILER_SIMPLIFIED_GRAPH_BUILDER_H_
+#define V8_CCTEST_COMPILER_SIMPLIFIED_GRAPH_BUILDER_H_
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph-builder.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/simplified-operator.h"
+#include "test/cctest/cctest.h"
+#include "test/cctest/compiler/call-tester.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class SimplifiedGraphBuilder : public GraphBuilder {
+ public:
+  SimplifiedGraphBuilder(Graph* graph, CommonOperatorBuilder* common,
+                         MachineOperatorBuilder* machine,
+                         SimplifiedOperatorBuilder* simplified);
+  virtual ~SimplifiedGraphBuilder() {}
+
+  Zone* zone() const { return graph()->zone(); }
+  Isolate* isolate() const { return zone()->isolate(); }
+  CommonOperatorBuilder* common() const { return common_; }
+  MachineOperatorBuilder* machine() const { return machine_; }
+  SimplifiedOperatorBuilder* simplified() const { return simplified_; }
+
+  // Initialize graph and builder.
+  void Begin(int num_parameters);
+
+  void Return(Node* value);
+
+  // Close the graph.
+  void End();
+
+  Node* PointerConstant(void* value) {
+    intptr_t intptr_value = reinterpret_cast<intptr_t>(value);
+    return kPointerSize == 8 ? NewNode(common()->Int64Constant(intptr_value))
+                             : Int32Constant(static_cast<int>(intptr_value));
+  }
+  Node* Int32Constant(int32_t value) {
+    return NewNode(common()->Int32Constant(value));
+  }
+  Node* HeapConstant(Handle<Object> object) {
+    Unique<Object> val = Unique<Object>::CreateUninitialized(object);
+    return NewNode(common()->HeapConstant(val));
+  }
+
+  Node* BooleanNot(Node* a) { return NewNode(simplified()->BooleanNot(), a); }
+
+  Node* NumberEqual(Node* a, Node* b) {
+    return NewNode(simplified()->NumberEqual(), a, b);
+  }
+  Node* NumberLessThan(Node* a, Node* b) {
+    return NewNode(simplified()->NumberLessThan(), a, b);
+  }
+  Node* NumberLessThanOrEqual(Node* a, Node* b) {
+    return NewNode(simplified()->NumberLessThanOrEqual(), a, b);
+  }
+  Node* NumberAdd(Node* a, Node* b) {
+    return NewNode(simplified()->NumberAdd(), a, b);
+  }
+  Node* NumberSubtract(Node* a, Node* b) {
+    return NewNode(simplified()->NumberSubtract(), a, b);
+  }
+  Node* NumberMultiply(Node* a, Node* b) {
+    return NewNode(simplified()->NumberMultiply(), a, b);
+  }
+  Node* NumberDivide(Node* a, Node* b) {
+    return NewNode(simplified()->NumberDivide(), a, b);
+  }
+  Node* NumberModulus(Node* a, Node* b) {
+    return NewNode(simplified()->NumberModulus(), a, b);
+  }
+  Node* NumberToInt32(Node* a) {
+    return NewNode(simplified()->NumberToInt32(), a);
+  }
+  Node* NumberToUint32(Node* a) {
+    return NewNode(simplified()->NumberToUint32(), a);
+  }
+
+  Node* StringEqual(Node* a, Node* b) {
+    return NewNode(simplified()->StringEqual(), a, b);
+  }
+  Node* StringLessThan(Node* a, Node* b) {
+    return NewNode(simplified()->StringLessThan(), a, b);
+  }
+  Node* StringLessThanOrEqual(Node* a, Node* b) {
+    return NewNode(simplified()->StringLessThanOrEqual(), a, b);
+  }
+  Node* StringAdd(Node* a, Node* b) {
+    return NewNode(simplified()->StringAdd(), a, b);
+  }
+
+  Node* ChangeTaggedToInt32(Node* a) {
+    return NewNode(simplified()->ChangeTaggedToInt32(), a);
+  }
+  Node* ChangeTaggedToUint32(Node* a) {
+    return NewNode(simplified()->ChangeTaggedToUint32(), a);
+  }
+  Node* ChangeTaggedToFloat64(Node* a) {
+    return NewNode(simplified()->ChangeTaggedToFloat64(), a);
+  }
+  Node* ChangeInt32ToTagged(Node* a) {
+    return NewNode(simplified()->ChangeInt32ToTagged(), a);
+  }
+  Node* ChangeUint32ToTagged(Node* a) {
+    return NewNode(simplified()->ChangeUint32ToTagged(), a);
+  }
+  Node* ChangeFloat64ToTagged(Node* a) {
+    return NewNode(simplified()->ChangeFloat64ToTagged(), a);
+  }
+  Node* ChangeBoolToBit(Node* a) {
+    return NewNode(simplified()->ChangeBoolToBit(), a);
+  }
+  Node* ChangeBitToBool(Node* a) {
+    return NewNode(simplified()->ChangeBitToBool(), a);
+  }
+
+  Node* LoadField(const FieldAccess& access, Node* object) {
+    return NewNode(simplified()->LoadField(access), object);
+  }
+  Node* StoreField(const FieldAccess& access, Node* object, Node* value) {
+    return NewNode(simplified()->StoreField(access), object, value);
+  }
+  Node* LoadElement(const ElementAccess& access, Node* object, Node* index,
+                    Node* length) {
+    return NewNode(simplified()->LoadElement(access), object, index, length);
+  }
+  Node* StoreElement(const ElementAccess& access, Node* object, Node* index,
+                     Node* length, Node* value) {
+    return NewNode(simplified()->StoreElement(access), object, index, length,
+                   value);
+  }
+
+ protected:
+  virtual Node* MakeNode(const Operator* op, int value_input_count,
+                         Node** value_inputs) FINAL;
+
+ private:
+  Node* effect_;
+  Node* return_;
+  CommonOperatorBuilder* common_;
+  MachineOperatorBuilder* machine_;
+  SimplifiedOperatorBuilder* simplified_;
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CCTEST_COMPILER_SIMPLIFIED_GRAPH_BUILDER_H_
diff --git a/test/cctest/compiler/test-branch-combine.cc b/test/cctest/compiler/test-branch-combine.cc
new file mode 100644
index 0000000..cd3472d
--- /dev/null
+++ b/test/cctest/compiler/test-branch-combine.cc
@@ -0,0 +1,462 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "test/cctest/cctest.h"
+#include "test/cctest/compiler/codegen-tester.h"
+#include "test/cctest/compiler/value-helper.h"
+
+#if V8_TURBOFAN_TARGET
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+typedef RawMachineAssembler::Label MLabel;
+
+static IrOpcode::Value int32cmp_opcodes[] = {
+    IrOpcode::kWord32Equal, IrOpcode::kInt32LessThan,
+    IrOpcode::kInt32LessThanOrEqual, IrOpcode::kUint32LessThan,
+    IrOpcode::kUint32LessThanOrEqual};
+
+
+TEST(BranchCombineWord32EqualZero_1) {
+  // Test combining a branch with x == 0
+  RawMachineAssemblerTester<int32_t> m(kMachInt32);
+  int32_t eq_constant = -1033;
+  int32_t ne_constant = 825118;
+  Node* p0 = m.Parameter(0);
+
+  MLabel blocka, blockb;
+  m.Branch(m.Word32Equal(p0, m.Int32Constant(0)), &blocka, &blockb);
+  m.Bind(&blocka);
+  m.Return(m.Int32Constant(eq_constant));
+  m.Bind(&blockb);
+  m.Return(m.Int32Constant(ne_constant));
+
+  FOR_INT32_INPUTS(i) {
+    int32_t a = *i;
+    int32_t expect = a == 0 ? eq_constant : ne_constant;
+    CHECK_EQ(expect, m.Call(a));
+  }
+}
+
+
+TEST(BranchCombineWord32EqualZero_chain) {
+  // Test combining a branch with a chain of x == 0 == 0 == 0 ...
+  int32_t eq_constant = -1133;
+  int32_t ne_constant = 815118;
+
+  for (int k = 0; k < 6; k++) {
+    RawMachineAssemblerTester<int32_t> m(kMachInt32);
+    Node* p0 = m.Parameter(0);
+    MLabel blocka, blockb;
+    Node* cond = p0;
+    for (int j = 0; j < k; j++) {
+      cond = m.Word32Equal(cond, m.Int32Constant(0));
+    }
+    m.Branch(cond, &blocka, &blockb);
+    m.Bind(&blocka);
+    m.Return(m.Int32Constant(eq_constant));
+    m.Bind(&blockb);
+    m.Return(m.Int32Constant(ne_constant));
+
+    FOR_INT32_INPUTS(i) {
+      int32_t a = *i;
+      int32_t expect = (k & 1) == 1 ? (a == 0 ? eq_constant : ne_constant)
+                                    : (a == 0 ? ne_constant : eq_constant);
+      CHECK_EQ(expect, m.Call(a));
+    }
+  }
+}
+
+
+TEST(BranchCombineInt32LessThanZero_1) {
+  // Test combining a branch with x < 0
+  RawMachineAssemblerTester<int32_t> m(kMachInt32);
+  int32_t eq_constant = -1433;
+  int32_t ne_constant = 845118;
+  Node* p0 = m.Parameter(0);
+
+  MLabel blocka, blockb;
+  m.Branch(m.Int32LessThan(p0, m.Int32Constant(0)), &blocka, &blockb);
+  m.Bind(&blocka);
+  m.Return(m.Int32Constant(eq_constant));
+  m.Bind(&blockb);
+  m.Return(m.Int32Constant(ne_constant));
+
+  FOR_INT32_INPUTS(i) {
+    int32_t a = *i;
+    int32_t expect = a < 0 ? eq_constant : ne_constant;
+    CHECK_EQ(expect, m.Call(a));
+  }
+}
+
+
+TEST(BranchCombineUint32LessThan100_1) {
+  // Test combining a branch with x < 100
+  RawMachineAssemblerTester<int32_t> m(kMachUint32);
+  int32_t eq_constant = 1471;
+  int32_t ne_constant = 88845718;
+  Node* p0 = m.Parameter(0);
+
+  MLabel blocka, blockb;
+  m.Branch(m.Uint32LessThan(p0, m.Int32Constant(100)), &blocka, &blockb);
+  m.Bind(&blocka);
+  m.Return(m.Int32Constant(eq_constant));
+  m.Bind(&blockb);
+  m.Return(m.Int32Constant(ne_constant));
+
+  FOR_UINT32_INPUTS(i) {
+    uint32_t a = *i;
+    int32_t expect = a < 100 ? eq_constant : ne_constant;
+    CHECK_EQ(expect, m.Call(a));
+  }
+}
+
+
+TEST(BranchCombineUint32LessThanOrEqual100_1) {
+  // Test combining a branch with x <= 100
+  RawMachineAssemblerTester<int32_t> m(kMachUint32);
+  int32_t eq_constant = 1479;
+  int32_t ne_constant = 77845719;
+  Node* p0 = m.Parameter(0);
+
+  MLabel blocka, blockb;
+  m.Branch(m.Uint32LessThanOrEqual(p0, m.Int32Constant(100)), &blocka, &blockb);
+  m.Bind(&blocka);
+  m.Return(m.Int32Constant(eq_constant));
+  m.Bind(&blockb);
+  m.Return(m.Int32Constant(ne_constant));
+
+  FOR_UINT32_INPUTS(i) {
+    uint32_t a = *i;
+    int32_t expect = a <= 100 ? eq_constant : ne_constant;
+    CHECK_EQ(expect, m.Call(a));
+  }
+}
+
+
+TEST(BranchCombineZeroLessThanInt32_1) {
+  // Test combining a branch with 0 < x
+  RawMachineAssemblerTester<int32_t> m(kMachInt32);
+  int32_t eq_constant = -2033;
+  int32_t ne_constant = 225118;
+  Node* p0 = m.Parameter(0);
+
+  MLabel blocka, blockb;
+  m.Branch(m.Int32LessThan(m.Int32Constant(0), p0), &blocka, &blockb);
+  m.Bind(&blocka);
+  m.Return(m.Int32Constant(eq_constant));
+  m.Bind(&blockb);
+  m.Return(m.Int32Constant(ne_constant));
+
+  FOR_INT32_INPUTS(i) {
+    int32_t a = *i;
+    int32_t expect = 0 < a ? eq_constant : ne_constant;
+    CHECK_EQ(expect, m.Call(a));
+  }
+}
+
+
+TEST(BranchCombineInt32GreaterThanZero_1) {
+  // Test combining a branch with x > 0
+  RawMachineAssemblerTester<int32_t> m(kMachInt32);
+  int32_t eq_constant = -1073;
+  int32_t ne_constant = 825178;
+  Node* p0 = m.Parameter(0);
+
+  MLabel blocka, blockb;
+  m.Branch(m.Int32GreaterThan(p0, m.Int32Constant(0)), &blocka, &blockb);
+  m.Bind(&blocka);
+  m.Return(m.Int32Constant(eq_constant));
+  m.Bind(&blockb);
+  m.Return(m.Int32Constant(ne_constant));
+
+  FOR_INT32_INPUTS(i) {
+    int32_t a = *i;
+    int32_t expect = a > 0 ? eq_constant : ne_constant;
+    CHECK_EQ(expect, m.Call(a));
+  }
+}
+
+
+TEST(BranchCombineWord32EqualP) {
+  // Test combining a branch with an Word32Equal.
+  RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32);
+  int32_t eq_constant = -1035;
+  int32_t ne_constant = 825018;
+  Node* p0 = m.Parameter(0);
+  Node* p1 = m.Parameter(1);
+
+  MLabel blocka, blockb;
+  m.Branch(m.Word32Equal(p0, p1), &blocka, &blockb);
+  m.Bind(&blocka);
+  m.Return(m.Int32Constant(eq_constant));
+  m.Bind(&blockb);
+  m.Return(m.Int32Constant(ne_constant));
+
+  FOR_INT32_INPUTS(i) {
+    FOR_INT32_INPUTS(j) {
+      int32_t a = *i;
+      int32_t b = *j;
+      int32_t expect = a == b ? eq_constant : ne_constant;
+      CHECK_EQ(expect, m.Call(a, b));
+    }
+  }
+}
+
+
+TEST(BranchCombineWord32EqualI) {
+  int32_t eq_constant = -1135;
+  int32_t ne_constant = 925718;
+
+  for (int left = 0; left < 2; left++) {
+    FOR_INT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachInt32);
+      int32_t a = *i;
+
+      Node* p0 = m.Int32Constant(a);
+      Node* p1 = m.Parameter(0);
+
+      MLabel blocka, blockb;
+      if (left == 1) m.Branch(m.Word32Equal(p0, p1), &blocka, &blockb);
+      if (left == 0) m.Branch(m.Word32Equal(p1, p0), &blocka, &blockb);
+      m.Bind(&blocka);
+      m.Return(m.Int32Constant(eq_constant));
+      m.Bind(&blockb);
+      m.Return(m.Int32Constant(ne_constant));
+
+      FOR_INT32_INPUTS(j) {
+        int32_t b = *j;
+        int32_t expect = a == b ? eq_constant : ne_constant;
+        CHECK_EQ(expect, m.Call(b));
+      }
+    }
+  }
+}
+
+
+TEST(BranchCombineInt32CmpP) {
+  int32_t eq_constant = -1235;
+  int32_t ne_constant = 725018;
+
+  for (int op = 0; op < 2; op++) {
+    RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32);
+    Node* p0 = m.Parameter(0);
+    Node* p1 = m.Parameter(1);
+
+    MLabel blocka, blockb;
+    if (op == 0) m.Branch(m.Int32LessThan(p0, p1), &blocka, &blockb);
+    if (op == 1) m.Branch(m.Int32LessThanOrEqual(p0, p1), &blocka, &blockb);
+    m.Bind(&blocka);
+    m.Return(m.Int32Constant(eq_constant));
+    m.Bind(&blockb);
+    m.Return(m.Int32Constant(ne_constant));
+
+    FOR_INT32_INPUTS(i) {
+      FOR_INT32_INPUTS(j) {
+        int32_t a = *i;
+        int32_t b = *j;
+        int32_t expect = 0;
+        if (op == 0) expect = a < b ? eq_constant : ne_constant;
+        if (op == 1) expect = a <= b ? eq_constant : ne_constant;
+        CHECK_EQ(expect, m.Call(a, b));
+      }
+    }
+  }
+}
+
+
+TEST(BranchCombineInt32CmpI) {
+  int32_t eq_constant = -1175;
+  int32_t ne_constant = 927711;
+
+  for (int op = 0; op < 2; op++) {
+    FOR_INT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachInt32);
+      int32_t a = *i;
+      Node* p0 = m.Int32Constant(a);
+      Node* p1 = m.Parameter(0);
+
+      MLabel blocka, blockb;
+      if (op == 0) m.Branch(m.Int32LessThan(p0, p1), &blocka, &blockb);
+      if (op == 1) m.Branch(m.Int32LessThanOrEqual(p0, p1), &blocka, &blockb);
+      m.Bind(&blocka);
+      m.Return(m.Int32Constant(eq_constant));
+      m.Bind(&blockb);
+      m.Return(m.Int32Constant(ne_constant));
+
+      FOR_INT32_INPUTS(j) {
+        int32_t b = *j;
+        int32_t expect = 0;
+        if (op == 0) expect = a < b ? eq_constant : ne_constant;
+        if (op == 1) expect = a <= b ? eq_constant : ne_constant;
+        CHECK_EQ(expect, m.Call(b));
+      }
+    }
+  }
+}
+
+
+// Now come the sophisticated tests for many input shape combinations.
+
+// Materializes a boolean (1 or 0) from a comparison.
+class CmpMaterializeBoolGen : public BinopGen<int32_t> {
+ public:
+  CompareWrapper w;
+  bool invert;
+
+  CmpMaterializeBoolGen(IrOpcode::Value opcode, bool i)
+      : w(opcode), invert(i) {}
+
+  virtual void gen(RawMachineAssemblerTester<int32_t>* m, Node* a, Node* b) {
+    Node* cond = w.MakeNode(m, a, b);
+    if (invert) cond = m->Word32Equal(cond, m->Int32Constant(0));
+    m->Return(cond);
+  }
+  virtual int32_t expected(int32_t a, int32_t b) {
+    if (invert) return !w.Int32Compare(a, b) ? 1 : 0;
+    return w.Int32Compare(a, b) ? 1 : 0;
+  }
+};
+
+
+// Generates a branch and return one of two values from a comparison.
+class CmpBranchGen : public BinopGen<int32_t> {
+ public:
+  CompareWrapper w;
+  bool invert;
+  bool true_first;
+  int32_t eq_constant;
+  int32_t ne_constant;
+
+  CmpBranchGen(IrOpcode::Value opcode, bool i, bool t, int32_t eq, int32_t ne)
+      : w(opcode), invert(i), true_first(t), eq_constant(eq), ne_constant(ne) {}
+
+  virtual void gen(RawMachineAssemblerTester<int32_t>* m, Node* a, Node* b) {
+    MLabel blocka, blockb;
+    Node* cond = w.MakeNode(m, a, b);
+    if (invert) cond = m->Word32Equal(cond, m->Int32Constant(0));
+    m->Branch(cond, &blocka, &blockb);
+    if (true_first) {
+      m->Bind(&blocka);
+      m->Return(m->Int32Constant(eq_constant));
+      m->Bind(&blockb);
+      m->Return(m->Int32Constant(ne_constant));
+    } else {
+      m->Bind(&blockb);
+      m->Return(m->Int32Constant(ne_constant));
+      m->Bind(&blocka);
+      m->Return(m->Int32Constant(eq_constant));
+    }
+  }
+  virtual int32_t expected(int32_t a, int32_t b) {
+    if (invert) return !w.Int32Compare(a, b) ? eq_constant : ne_constant;
+    return w.Int32Compare(a, b) ? eq_constant : ne_constant;
+  }
+};
+
+
+TEST(BranchCombineInt32CmpAllInputShapes_materialized) {
+  for (size_t i = 0; i < arraysize(int32cmp_opcodes); i++) {
+    CmpMaterializeBoolGen gen(int32cmp_opcodes[i], false);
+    Int32BinopInputShapeTester tester(&gen);
+    tester.TestAllInputShapes();
+  }
+}
+
+
+TEST(BranchCombineInt32CmpAllInputShapes_inverted_materialized) {
+  for (size_t i = 0; i < arraysize(int32cmp_opcodes); i++) {
+    CmpMaterializeBoolGen gen(int32cmp_opcodes[i], true);
+    Int32BinopInputShapeTester tester(&gen);
+    tester.TestAllInputShapes();
+  }
+}
+
+
+TEST(BranchCombineInt32CmpAllInputShapes_branch_true) {
+  for (int i = 0; i < static_cast<int>(arraysize(int32cmp_opcodes)); i++) {
+    CmpBranchGen gen(int32cmp_opcodes[i], false, false, 995 + i, -1011 - i);
+    Int32BinopInputShapeTester tester(&gen);
+    tester.TestAllInputShapes();
+  }
+}
+
+
+TEST(BranchCombineInt32CmpAllInputShapes_branch_false) {
+  for (int i = 0; i < static_cast<int>(arraysize(int32cmp_opcodes)); i++) {
+    CmpBranchGen gen(int32cmp_opcodes[i], false, true, 795 + i, -2011 - i);
+    Int32BinopInputShapeTester tester(&gen);
+    tester.TestAllInputShapes();
+  }
+}
+
+
+TEST(BranchCombineInt32CmpAllInputShapes_inverse_branch_true) {
+  for (int i = 0; i < static_cast<int>(arraysize(int32cmp_opcodes)); i++) {
+    CmpBranchGen gen(int32cmp_opcodes[i], true, false, 695 + i, -3011 - i);
+    Int32BinopInputShapeTester tester(&gen);
+    tester.TestAllInputShapes();
+  }
+}
+
+
+TEST(BranchCombineInt32CmpAllInputShapes_inverse_branch_false) {
+  for (int i = 0; i < static_cast<int>(arraysize(int32cmp_opcodes)); i++) {
+    CmpBranchGen gen(int32cmp_opcodes[i], true, true, 595 + i, -4011 - i);
+    Int32BinopInputShapeTester tester(&gen);
+    tester.TestAllInputShapes();
+  }
+}
+
+
+TEST(BranchCombineFloat64Compares) {
+  double inf = V8_INFINITY;
+  double nan = v8::base::OS::nan_value();
+  double inputs[] = {0.0, 1.0, -1.0, -inf, inf, nan};
+
+  int32_t eq_constant = -1733;
+  int32_t ne_constant = 915118;
+
+  double input_a = 0.0;
+  double input_b = 0.0;
+
+  CompareWrapper cmps[] = {CompareWrapper(IrOpcode::kFloat64Equal),
+                           CompareWrapper(IrOpcode::kFloat64LessThan),
+                           CompareWrapper(IrOpcode::kFloat64LessThanOrEqual)};
+
+  for (size_t c = 0; c < arraysize(cmps); c++) {
+    CompareWrapper cmp = cmps[c];
+    for (int invert = 0; invert < 2; invert++) {
+      RawMachineAssemblerTester<int32_t> m;
+      Node* a = m.LoadFromPointer(&input_a, kMachFloat64);
+      Node* b = m.LoadFromPointer(&input_b, kMachFloat64);
+
+      MLabel blocka, blockb;
+      Node* cond = cmp.MakeNode(&m, a, b);
+      if (invert) cond = m.Word32Equal(cond, m.Int32Constant(0));
+      m.Branch(cond, &blocka, &blockb);
+      m.Bind(&blocka);
+      m.Return(m.Int32Constant(eq_constant));
+      m.Bind(&blockb);
+      m.Return(m.Int32Constant(ne_constant));
+
+      for (size_t i = 0; i < arraysize(inputs); i++) {
+        for (size_t j = 0; j < arraysize(inputs); j += 2) {
+          input_a = inputs[i];
+          input_b = inputs[i];
+          int32_t expected =
+              invert ? (cmp.Float64Compare(input_a, input_b) ? ne_constant
+                                                             : eq_constant)
+                     : (cmp.Float64Compare(input_a, input_b) ? eq_constant
+                                                             : ne_constant);
+          CHECK_EQ(expected, m.Call());
+        }
+      }
+    }
+  }
+}
+#endif  // V8_TURBOFAN_TARGET
diff --git a/test/cctest/compiler/test-changes-lowering.cc b/test/cctest/compiler/test-changes-lowering.cc
new file mode 100644
index 0000000..06308a0
--- /dev/null
+++ b/test/cctest/compiler/test-changes-lowering.cc
@@ -0,0 +1,413 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <limits>
+
+#include "src/compiler/change-lowering.h"
+#include "src/compiler/control-builders.h"
+#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/pipeline.h"
+#include "src/compiler/typer.h"
+#include "src/compiler/verifier.h"
+#include "src/execution.h"
+#include "src/globals.h"
+#include "src/parser.h"
+#include "src/rewriter.h"
+#include "src/scopes.h"
+#include "test/cctest/cctest.h"
+#include "test/cctest/compiler/codegen-tester.h"
+#include "test/cctest/compiler/graph-builder-tester.h"
+#include "test/cctest/compiler/value-helper.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+template <typename ReturnType>
+class ChangesLoweringTester : public GraphBuilderTester<ReturnType> {
+ public:
+  explicit ChangesLoweringTester(MachineType p0 = kMachNone)
+      : GraphBuilderTester<ReturnType>(p0),
+        typer(this->zone()),
+        javascript(this->zone()),
+        jsgraph(this->graph(), this->common(), &javascript, &typer,
+                this->machine()),
+        function(Handle<JSFunction>::null()) {}
+
+  Typer typer;
+  JSOperatorBuilder javascript;
+  JSGraph jsgraph;
+  Handle<JSFunction> function;
+
+  Node* start() { return this->graph()->start(); }
+
+  template <typename T>
+  T* CallWithPotentialGC() {
+    // TODO(titzer): we need to wrap the code in a JSFunction and call it via
+    // Execution::Call() so that the GC knows about the frame, can walk it,
+    // relocate the code object if necessary, etc.
+    // This is pretty ugly and at the least should be moved up to helpers.
+    if (function.is_null()) {
+      function =
+          v8::Utils::OpenHandle(*v8::Handle<v8::Function>::Cast(CompileRun(
+              "(function() { 'use strict'; return 2.7123; })")));
+      CompilationInfoWithZone info(function);
+      CHECK(Parser::Parse(&info));
+      info.SetOptimizing(BailoutId::None(), Handle<Code>(function->code()));
+      CHECK(Rewriter::Rewrite(&info));
+      CHECK(Scope::Analyze(&info));
+      CHECK_NE(NULL, info.scope());
+      Handle<ScopeInfo> scope_info =
+          ScopeInfo::Create(info.scope(), info.zone());
+      info.shared_info()->set_scope_info(*scope_info);
+      Pipeline pipeline(&info);
+      Linkage linkage(&info);
+      Handle<Code> code =
+          pipeline.GenerateCodeForMachineGraph(&linkage, this->graph());
+      CHECK(!code.is_null());
+      function->ReplaceCode(*code);
+    }
+    Handle<Object>* args = NULL;
+    MaybeHandle<Object> result =
+        Execution::Call(this->isolate(), function, factory()->undefined_value(),
+                        0, args, false);
+    return T::cast(*result.ToHandleChecked());
+  }
+
+  void StoreFloat64(Node* node, double* ptr) {
+    Node* ptr_node = this->PointerConstant(ptr);
+    this->Store(kMachFloat64, ptr_node, node);
+  }
+
+  Node* LoadInt32(int32_t* ptr) {
+    Node* ptr_node = this->PointerConstant(ptr);
+    return this->Load(kMachInt32, ptr_node);
+  }
+
+  Node* LoadUint32(uint32_t* ptr) {
+    Node* ptr_node = this->PointerConstant(ptr);
+    return this->Load(kMachUint32, ptr_node);
+  }
+
+  Node* LoadFloat64(double* ptr) {
+    Node* ptr_node = this->PointerConstant(ptr);
+    return this->Load(kMachFloat64, ptr_node);
+  }
+
+  void CheckNumber(double expected, Object* number) {
+    CHECK(this->isolate()->factory()->NewNumber(expected)->SameValue(number));
+  }
+
+  void BuildAndLower(const Operator* op) {
+    // We build a graph by hand here, because the raw machine assembler
+    // does not add the correct control and effect nodes.
+    Node* p0 = this->Parameter(0);
+    Node* change = this->graph()->NewNode(op, p0);
+    Node* ret = this->graph()->NewNode(this->common()->Return(), change,
+                                       this->start(), this->start());
+    Node* end = this->graph()->NewNode(this->common()->End(), ret);
+    this->graph()->SetEnd(end);
+    LowerChange(change);
+  }
+
+  void BuildStoreAndLower(const Operator* op, const Operator* store_op,
+                          void* location) {
+    // We build a graph by hand here, because the raw machine assembler
+    // does not add the correct control and effect nodes.
+    Node* p0 = this->Parameter(0);
+    Node* change = this->graph()->NewNode(op, p0);
+    Node* store = this->graph()->NewNode(
+        store_op, this->PointerConstant(location), this->Int32Constant(0),
+        change, this->start(), this->start());
+    Node* ret = this->graph()->NewNode(
+        this->common()->Return(), this->Int32Constant(0), store, this->start());
+    Node* end = this->graph()->NewNode(this->common()->End(), ret);
+    this->graph()->SetEnd(end);
+    LowerChange(change);
+  }
+
+  void BuildLoadAndLower(const Operator* op, const Operator* load_op,
+                         void* location) {
+    // We build a graph by hand here, because the raw machine assembler
+    // does not add the correct control and effect nodes.
+    Node* load =
+        this->graph()->NewNode(load_op, this->PointerConstant(location),
+                               this->Int32Constant(0), this->start());
+    Node* change = this->graph()->NewNode(op, load);
+    Node* ret = this->graph()->NewNode(this->common()->Return(), change,
+                                       this->start(), this->start());
+    Node* end = this->graph()->NewNode(this->common()->End(), ret);
+    this->graph()->SetEnd(end);
+    LowerChange(change);
+  }
+
+  void LowerChange(Node* change) {
+    // Run the graph reducer with changes lowering on a single node.
+    CompilationInfo info(this->isolate(), this->zone());
+    Linkage linkage(&info);
+    ChangeLowering lowering(&jsgraph, &linkage);
+    GraphReducer reducer(this->graph());
+    reducer.AddReducer(&lowering);
+    reducer.ReduceNode(change);
+    Verifier::Run(this->graph());
+  }
+
+  Factory* factory() { return this->isolate()->factory(); }
+  Heap* heap() { return this->isolate()->heap(); }
+};
+
+
+TEST(RunChangeTaggedToInt32) {
+  // Build and lower a graph by hand.
+  ChangesLoweringTester<int32_t> t(kMachAnyTagged);
+  t.BuildAndLower(t.simplified()->ChangeTaggedToInt32());
+
+  if (Pipeline::SupportedTarget()) {
+    FOR_INT32_INPUTS(i) {
+      int32_t input = *i;
+
+      if (Smi::IsValid(input)) {
+        int32_t result = t.Call(Smi::FromInt(input));
+        CHECK_EQ(input, result);
+      }
+
+      {
+        Handle<Object> number = t.factory()->NewNumber(input);
+        int32_t result = t.Call(*number);
+        CHECK_EQ(input, result);
+      }
+
+      {
+        Handle<HeapNumber> number = t.factory()->NewHeapNumber(input);
+        int32_t result = t.Call(*number);
+        CHECK_EQ(input, result);
+      }
+    }
+  }
+}
+
+
+TEST(RunChangeTaggedToUint32) {
+  // Build and lower a graph by hand.
+  ChangesLoweringTester<uint32_t> t(kMachAnyTagged);
+  t.BuildAndLower(t.simplified()->ChangeTaggedToUint32());
+
+  if (Pipeline::SupportedTarget()) {
+    FOR_UINT32_INPUTS(i) {
+      uint32_t input = *i;
+
+      if (Smi::IsValid(input)) {
+        uint32_t result = t.Call(Smi::FromInt(input));
+        CHECK_EQ(static_cast<int32_t>(input), static_cast<int32_t>(result));
+      }
+
+      {
+        Handle<Object> number = t.factory()->NewNumber(input);
+        uint32_t result = t.Call(*number);
+        CHECK_EQ(static_cast<int32_t>(input), static_cast<int32_t>(result));
+      }
+
+      {
+        Handle<HeapNumber> number = t.factory()->NewHeapNumber(input);
+        uint32_t result = t.Call(*number);
+        CHECK_EQ(static_cast<int32_t>(input), static_cast<int32_t>(result));
+      }
+    }
+  }
+}
+
+
+TEST(RunChangeTaggedToFloat64) {
+  ChangesLoweringTester<int32_t> t(kMachAnyTagged);
+  double result;
+
+  t.BuildStoreAndLower(
+      t.simplified()->ChangeTaggedToFloat64(),
+      t.machine()->Store(StoreRepresentation(kMachFloat64, kNoWriteBarrier)),
+      &result);
+
+  if (Pipeline::SupportedTarget()) {
+    FOR_INT32_INPUTS(i) {
+      int32_t input = *i;
+
+      if (Smi::IsValid(input)) {
+        t.Call(Smi::FromInt(input));
+        CHECK_EQ(input, static_cast<int32_t>(result));
+      }
+
+      {
+        Handle<Object> number = t.factory()->NewNumber(input);
+        t.Call(*number);
+        CHECK_EQ(input, static_cast<int32_t>(result));
+      }
+
+      {
+        Handle<HeapNumber> number = t.factory()->NewHeapNumber(input);
+        t.Call(*number);
+        CHECK_EQ(input, static_cast<int32_t>(result));
+      }
+    }
+  }
+
+  if (Pipeline::SupportedTarget()) {
+    FOR_FLOAT64_INPUTS(i) {
+      double input = *i;
+      {
+        Handle<Object> number = t.factory()->NewNumber(input);
+        t.Call(*number);
+        CHECK_EQ(input, result);
+      }
+
+      {
+        Handle<HeapNumber> number = t.factory()->NewHeapNumber(input);
+        t.Call(*number);
+        CHECK_EQ(input, result);
+      }
+    }
+  }
+}
+
+
+TEST(RunChangeBoolToBit) {
+  ChangesLoweringTester<int32_t> t(kMachAnyTagged);
+  t.BuildAndLower(t.simplified()->ChangeBoolToBit());
+
+  if (Pipeline::SupportedTarget()) {
+    Object* true_obj = t.heap()->true_value();
+    int32_t result = t.Call(true_obj);
+    CHECK_EQ(1, result);
+  }
+
+  if (Pipeline::SupportedTarget()) {
+    Object* false_obj = t.heap()->false_value();
+    int32_t result = t.Call(false_obj);
+    CHECK_EQ(0, result);
+  }
+}
+
+
+TEST(RunChangeBitToBool) {
+  ChangesLoweringTester<Object*> t(kMachInt32);
+  t.BuildAndLower(t.simplified()->ChangeBitToBool());
+
+  if (Pipeline::SupportedTarget()) {
+    Object* result = t.Call(1);
+    Object* true_obj = t.heap()->true_value();
+    CHECK_EQ(true_obj, result);
+  }
+
+  if (Pipeline::SupportedTarget()) {
+    Object* result = t.Call(0);
+    Object* false_obj = t.heap()->false_value();
+    CHECK_EQ(false_obj, result);
+  }
+}
+
+
+#if V8_TURBOFAN_BACKEND
+// TODO(titzer): disabled on ARM
+
+TEST(RunChangeInt32ToTaggedSmi) {
+  ChangesLoweringTester<Object*> t;
+  int32_t input;
+  t.BuildLoadAndLower(t.simplified()->ChangeInt32ToTagged(),
+                      t.machine()->Load(kMachInt32), &input);
+
+  if (Pipeline::SupportedTarget()) {
+    FOR_INT32_INPUTS(i) {
+      input = *i;
+      if (!Smi::IsValid(input)) continue;
+      Object* result = t.Call();
+      t.CheckNumber(static_cast<double>(input), result);
+    }
+  }
+}
+
+
+TEST(RunChangeUint32ToTaggedSmi) {
+  ChangesLoweringTester<Object*> t;
+  uint32_t input;
+  t.BuildLoadAndLower(t.simplified()->ChangeUint32ToTagged(),
+                      t.machine()->Load(kMachUint32), &input);
+
+  if (Pipeline::SupportedTarget()) {
+    FOR_UINT32_INPUTS(i) {
+      input = *i;
+      if (input > static_cast<uint32_t>(Smi::kMaxValue)) continue;
+      Object* result = t.Call();
+      double expected = static_cast<double>(input);
+      t.CheckNumber(expected, result);
+    }
+  }
+}
+
+
+TEST(RunChangeInt32ToTagged) {
+  ChangesLoweringTester<Object*> t;
+  int32_t input;
+  t.BuildLoadAndLower(t.simplified()->ChangeInt32ToTagged(),
+                      t.machine()->Load(kMachInt32), &input);
+
+  if (Pipeline::SupportedTarget()) {
+    for (int m = 0; m < 3; m++) {  // Try 3 GC modes.
+      FOR_INT32_INPUTS(i) {
+        if (m == 0) CcTest::heap()->EnableInlineAllocation();
+        if (m == 1) CcTest::heap()->DisableInlineAllocation();
+        if (m == 2) SimulateFullSpace(CcTest::heap()->new_space());
+
+        input = *i;
+        Object* result = t.CallWithPotentialGC<Object>();
+        t.CheckNumber(static_cast<double>(input), result);
+      }
+    }
+  }
+}
+
+
+TEST(RunChangeUint32ToTagged) {
+  ChangesLoweringTester<Object*> t;
+  uint32_t input;
+  t.BuildLoadAndLower(t.simplified()->ChangeUint32ToTagged(),
+                      t.machine()->Load(kMachUint32), &input);
+
+  if (Pipeline::SupportedTarget()) {
+    for (int m = 0; m < 3; m++) {  // Try 3 GC modes.
+      FOR_UINT32_INPUTS(i) {
+        if (m == 0) CcTest::heap()->EnableInlineAllocation();
+        if (m == 1) CcTest::heap()->DisableInlineAllocation();
+        if (m == 2) SimulateFullSpace(CcTest::heap()->new_space());
+
+        input = *i;
+        Object* result = t.CallWithPotentialGC<Object>();
+        double expected = static_cast<double>(input);
+        t.CheckNumber(expected, result);
+      }
+    }
+  }
+}
+
+
+TEST(RunChangeFloat64ToTagged) {
+  ChangesLoweringTester<Object*> t;
+  double input;
+  t.BuildLoadAndLower(t.simplified()->ChangeFloat64ToTagged(),
+                      t.machine()->Load(kMachFloat64), &input);
+
+  if (Pipeline::SupportedTarget()) {
+    for (int m = 0; m < 3; m++) {  // Try 3 GC modes.
+      FOR_FLOAT64_INPUTS(i) {
+        if (m == 0) CcTest::heap()->EnableInlineAllocation();
+        if (m == 1) CcTest::heap()->DisableInlineAllocation();
+        if (m == 2) SimulateFullSpace(CcTest::heap()->new_space());
+
+        input = *i;
+        Object* result = t.CallWithPotentialGC<Object>();
+        t.CheckNumber(input, result);
+      }
+    }
+  }
+}
+
+#endif  // V8_TURBOFAN_BACKEND
diff --git a/test/cctest/compiler/test-codegen-deopt.cc b/test/cctest/compiler/test-codegen-deopt.cc
new file mode 100644
index 0000000..8217229
--- /dev/null
+++ b/test/cctest/compiler/test-codegen-deopt.cc
@@ -0,0 +1,313 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+#include "test/cctest/cctest.h"
+
+#include "src/compiler/code-generator.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/instruction-selector.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/operator.h"
+#include "src/compiler/raw-machine-assembler.h"
+#include "src/compiler/register-allocator.h"
+#include "src/compiler/schedule.h"
+
+#include "src/full-codegen.h"
+#include "src/parser.h"
+#include "src/rewriter.h"
+
+#include "test/cctest/compiler/c-signature.h"
+#include "test/cctest/compiler/function-tester.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+
+#if V8_TURBOFAN_TARGET
+
+typedef RawMachineAssembler::Label MLabel;
+
+static Handle<JSFunction> NewFunction(const char* source) {
+  return v8::Utils::OpenHandle(
+      *v8::Handle<v8::Function>::Cast(CompileRun(source)));
+}
+
+
+class DeoptCodegenTester {
+ public:
+  explicit DeoptCodegenTester(HandleAndZoneScope* scope, const char* src)
+      : scope_(scope),
+        function(NewFunction(src)),
+        info(function, scope->main_zone()),
+        bailout_id(-1) {
+    CHECK(Parser::Parse(&info));
+    info.SetOptimizing(BailoutId::None(), Handle<Code>(function->code()));
+    CHECK(Rewriter::Rewrite(&info));
+    CHECK(Scope::Analyze(&info));
+    CHECK(Compiler::EnsureDeoptimizationSupport(&info));
+
+    DCHECK(info.shared_info()->has_deoptimization_support());
+
+    graph = new (scope_->main_zone()) Graph(scope_->main_zone());
+  }
+
+  virtual ~DeoptCodegenTester() { delete code; }
+
+  void GenerateCodeFromSchedule(Schedule* schedule) {
+    OFStream os(stdout);
+    if (FLAG_trace_turbo) {
+      os << *schedule;
+    }
+
+    // Initialize the codegen and generate code.
+    Linkage* linkage = new (scope_->main_zone()) Linkage(&info);
+    code = new v8::internal::compiler::InstructionSequence(linkage, graph,
+                                                           schedule);
+    SourcePositionTable source_positions(graph);
+    InstructionSelector selector(code, &source_positions);
+    selector.SelectInstructions();
+
+    if (FLAG_trace_turbo) {
+      os << "----- Instruction sequence before register allocation -----\n"
+         << *code;
+    }
+
+    RegisterAllocator allocator(code);
+    CHECK(allocator.Allocate());
+
+    if (FLAG_trace_turbo) {
+      os << "----- Instruction sequence after register allocation -----\n"
+         << *code;
+    }
+
+    compiler::CodeGenerator generator(code);
+    result_code = generator.GenerateCode();
+
+#ifdef OBJECT_PRINT
+    if (FLAG_print_opt_code || FLAG_trace_turbo) {
+      result_code->Print();
+    }
+#endif
+  }
+
+  Zone* zone() { return scope_->main_zone(); }
+
+  HandleAndZoneScope* scope_;
+  Handle<JSFunction> function;
+  CompilationInfo info;
+  BailoutId bailout_id;
+  Handle<Code> result_code;
+  v8::internal::compiler::InstructionSequence* code;
+  Graph* graph;
+};
+
+
+class TrivialDeoptCodegenTester : public DeoptCodegenTester {
+ public:
+  explicit TrivialDeoptCodegenTester(HandleAndZoneScope* scope)
+      : DeoptCodegenTester(scope,
+                           "function foo() { deopt(); return 42; }; foo") {}
+
+  void GenerateCode() {
+    GenerateCodeFromSchedule(BuildGraphAndSchedule(graph));
+  }
+
+  Schedule* BuildGraphAndSchedule(Graph* graph) {
+    CommonOperatorBuilder common(zone());
+
+    // Manually construct a schedule for the function below:
+    // function foo() {
+    //   deopt();
+    // }
+
+    CSignature1<Object*, Object*> sig;
+    RawMachineAssembler m(graph, &sig);
+
+    Handle<JSFunction> deopt_function =
+        NewFunction("function deopt() { %DeoptimizeFunction(foo); }; deopt");
+    Unique<Object> deopt_fun_constant =
+        Unique<Object>::CreateUninitialized(deopt_function);
+    Node* deopt_fun_node = m.NewNode(common.HeapConstant(deopt_fun_constant));
+
+    Handle<Context> caller_context(function->context(), CcTest::i_isolate());
+    Unique<Object> caller_context_constant =
+        Unique<Object>::CreateUninitialized(caller_context);
+    Node* caller_context_node =
+        m.NewNode(common.HeapConstant(caller_context_constant));
+
+    bailout_id = GetCallBailoutId();
+    Node* parameters = m.NewNode(common.StateValues(1), m.UndefinedConstant());
+    Node* locals = m.NewNode(common.StateValues(0));
+    Node* stack = m.NewNode(common.StateValues(0));
+
+    Node* state_node = m.NewNode(
+        common.FrameState(JS_FRAME, bailout_id, kIgnoreOutput), parameters,
+        locals, stack, caller_context_node, m.UndefinedConstant());
+
+    Handle<Context> context(deopt_function->context(), CcTest::i_isolate());
+    Unique<Object> context_constant =
+        Unique<Object>::CreateUninitialized(context);
+    Node* context_node = m.NewNode(common.HeapConstant(context_constant));
+
+    m.CallJS0(deopt_fun_node, m.UndefinedConstant(), context_node, state_node);
+
+    m.Return(m.UndefinedConstant());
+
+    // Schedule the graph:
+    Schedule* schedule = m.Export();
+
+    return schedule;
+  }
+
+  BailoutId GetCallBailoutId() {
+    ZoneList<Statement*>* body = info.function()->body();
+    for (int i = 0; i < body->length(); i++) {
+      if (body->at(i)->IsExpressionStatement() &&
+          body->at(i)->AsExpressionStatement()->expression()->IsCall()) {
+        return body->at(i)->AsExpressionStatement()->expression()->id();
+      }
+    }
+    CHECK(false);
+    return BailoutId(-1);
+  }
+};
+
+
+TEST(TurboTrivialDeoptCodegen) {
+  HandleAndZoneScope scope;
+  InitializedHandleScope handles;
+
+  FLAG_allow_natives_syntax = true;
+  FLAG_turbo_deoptimization = true;
+
+  TrivialDeoptCodegenTester t(&scope);
+  t.GenerateCode();
+
+  DeoptimizationInputData* data =
+      DeoptimizationInputData::cast(t.result_code->deoptimization_data());
+
+  // TODO(jarin) Find a way to test the safepoint.
+
+  // Check that we deoptimize to the right AST id.
+  CHECK_EQ(1, data->DeoptCount());
+  CHECK_EQ(t.bailout_id.ToInt(), data->AstId(0).ToInt());
+}
+
+
+TEST(TurboTrivialDeoptCodegenAndRun) {
+  HandleAndZoneScope scope;
+  InitializedHandleScope handles;
+
+  FLAG_allow_natives_syntax = true;
+  FLAG_turbo_deoptimization = true;
+
+  TrivialDeoptCodegenTester t(&scope);
+  t.GenerateCode();
+
+  t.function->ReplaceCode(*t.result_code);
+  t.info.context()->native_context()->AddOptimizedCode(*t.result_code);
+
+  Isolate* isolate = scope.main_isolate();
+  Handle<Object> result;
+  bool has_pending_exception =
+      !Execution::Call(isolate, t.function,
+                       isolate->factory()->undefined_value(), 0, NULL,
+                       false).ToHandle(&result);
+  CHECK(!has_pending_exception);
+  CHECK(result->SameValue(Smi::FromInt(42)));
+}
+
+
+class TrivialRuntimeDeoptCodegenTester : public DeoptCodegenTester {
+ public:
+  explicit TrivialRuntimeDeoptCodegenTester(HandleAndZoneScope* scope)
+      : DeoptCodegenTester(
+            scope,
+            "function foo() { %DeoptimizeFunction(foo); return 42; }; foo") {}
+
+  void GenerateCode() {
+    GenerateCodeFromSchedule(BuildGraphAndSchedule(graph));
+  }
+
+  Schedule* BuildGraphAndSchedule(Graph* graph) {
+    CommonOperatorBuilder common(zone());
+
+    // Manually construct a schedule for the function below:
+    // function foo() {
+    //   %DeoptimizeFunction(foo);
+    // }
+
+    CSignature1<Object*, Object*> sig;
+    RawMachineAssembler m(graph, &sig);
+
+    Unique<Object> this_fun_constant =
+        Unique<Object>::CreateUninitialized(function);
+    Node* this_fun_node = m.NewNode(common.HeapConstant(this_fun_constant));
+
+    Handle<Context> context(function->context(), CcTest::i_isolate());
+    Unique<Object> context_constant =
+        Unique<Object>::CreateUninitialized(context);
+    Node* context_node = m.NewNode(common.HeapConstant(context_constant));
+
+    bailout_id = GetCallBailoutId();
+    Node* parameters = m.NewNode(common.StateValues(1), m.UndefinedConstant());
+    Node* locals = m.NewNode(common.StateValues(0));
+    Node* stack = m.NewNode(common.StateValues(0));
+
+    Node* state_node = m.NewNode(
+        common.FrameState(JS_FRAME, bailout_id, kIgnoreOutput), parameters,
+        locals, stack, context_node, m.UndefinedConstant());
+
+    m.CallRuntime1(Runtime::kDeoptimizeFunction, this_fun_node, context_node,
+                   state_node);
+
+    m.Return(m.UndefinedConstant());
+
+    // Schedule the graph:
+    Schedule* schedule = m.Export();
+
+    return schedule;
+  }
+
+  BailoutId GetCallBailoutId() {
+    ZoneList<Statement*>* body = info.function()->body();
+    for (int i = 0; i < body->length(); i++) {
+      if (body->at(i)->IsExpressionStatement() &&
+          body->at(i)->AsExpressionStatement()->expression()->IsCallRuntime()) {
+        return body->at(i)->AsExpressionStatement()->expression()->id();
+      }
+    }
+    CHECK(false);
+    return BailoutId(-1);
+  }
+};
+
+
+TEST(TurboTrivialRuntimeDeoptCodegenAndRun) {
+  HandleAndZoneScope scope;
+  InitializedHandleScope handles;
+
+  FLAG_allow_natives_syntax = true;
+  FLAG_turbo_deoptimization = true;
+
+  TrivialRuntimeDeoptCodegenTester t(&scope);
+  t.GenerateCode();
+
+  t.function->ReplaceCode(*t.result_code);
+  t.info.context()->native_context()->AddOptimizedCode(*t.result_code);
+
+  Isolate* isolate = scope.main_isolate();
+  Handle<Object> result;
+  bool has_pending_exception =
+      !Execution::Call(isolate, t.function,
+                       isolate->factory()->undefined_value(), 0, NULL,
+                       false).ToHandle(&result);
+  CHECK(!has_pending_exception);
+  CHECK(result->SameValue(Smi::FromInt(42)));
+}
+
+#endif
diff --git a/test/cctest/compiler/test-gap-resolver.cc b/test/cctest/compiler/test-gap-resolver.cc
new file mode 100644
index 0000000..6239f2a
--- /dev/null
+++ b/test/cctest/compiler/test-gap-resolver.cc
@@ -0,0 +1,173 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/gap-resolver.h"
+
+#include "src/base/utils/random-number-generator.h"
+#include "test/cctest/cctest.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+// The state of our move interpreter is the mapping of operands to values. Note
+// that the actual values don't really matter, all we care about is equality.
+class InterpreterState {
+ public:
+  typedef std::vector<MoveOperands> Moves;
+
+  void ExecuteInParallel(Moves moves) {
+    InterpreterState copy(*this);
+    for (Moves::iterator it = moves.begin(); it != moves.end(); ++it) {
+      if (!it->IsRedundant()) write(it->destination(), copy.read(it->source()));
+    }
+  }
+
+  bool operator==(const InterpreterState& other) const {
+    return values_ == other.values_;
+  }
+
+  bool operator!=(const InterpreterState& other) const {
+    return values_ != other.values_;
+  }
+
+ private:
+  // Internally, the state is a normalized permutation of (kind,index) pairs.
+  typedef std::pair<InstructionOperand::Kind, int> Key;
+  typedef Key Value;
+  typedef std::map<Key, Value> OperandMap;
+
+  Value read(const InstructionOperand* op) const {
+    OperandMap::const_iterator it = values_.find(KeyFor(op));
+    return (it == values_.end()) ? ValueFor(op) : it->second;
+  }
+
+  void write(const InstructionOperand* op, Value v) {
+    if (v == ValueFor(op)) {
+      values_.erase(KeyFor(op));
+    } else {
+      values_[KeyFor(op)] = v;
+    }
+  }
+
+  static Key KeyFor(const InstructionOperand* op) {
+    return Key(op->kind(), op->index());
+  }
+
+  static Value ValueFor(const InstructionOperand* op) {
+    return Value(op->kind(), op->index());
+  }
+
+  friend OStream& operator<<(OStream& os, const InterpreterState& is) {
+    for (OperandMap::const_iterator it = is.values_.begin();
+         it != is.values_.end(); ++it) {
+      if (it != is.values_.begin()) os << " ";
+      InstructionOperand source(it->first.first, it->first.second);
+      InstructionOperand destination(it->second.first, it->second.second);
+      os << MoveOperands(&source, &destination);
+    }
+    return os;
+  }
+
+  OperandMap values_;
+};
+
+
+// An abstract interpreter for moves, swaps and parallel moves.
+class MoveInterpreter : public GapResolver::Assembler {
+ public:
+  virtual void AssembleMove(InstructionOperand* source,
+                            InstructionOperand* destination) OVERRIDE {
+    InterpreterState::Moves moves;
+    moves.push_back(MoveOperands(source, destination));
+    state_.ExecuteInParallel(moves);
+  }
+
+  virtual void AssembleSwap(InstructionOperand* source,
+                            InstructionOperand* destination) OVERRIDE {
+    InterpreterState::Moves moves;
+    moves.push_back(MoveOperands(source, destination));
+    moves.push_back(MoveOperands(destination, source));
+    state_.ExecuteInParallel(moves);
+  }
+
+  void AssembleParallelMove(const ParallelMove* pm) {
+    InterpreterState::Moves moves(pm->move_operands()->begin(),
+                                  pm->move_operands()->end());
+    state_.ExecuteInParallel(moves);
+  }
+
+  InterpreterState state() const { return state_; }
+
+ private:
+  InterpreterState state_;
+};
+
+
+class ParallelMoveCreator : public HandleAndZoneScope {
+ public:
+  ParallelMoveCreator() : rng_(CcTest::random_number_generator()) {}
+
+  ParallelMove* Create(int size) {
+    ParallelMove* parallel_move = new (main_zone()) ParallelMove(main_zone());
+    std::set<InstructionOperand*, InstructionOperandComparator> seen;
+    for (int i = 0; i < size; ++i) {
+      MoveOperands mo(CreateRandomOperand(), CreateRandomOperand());
+      if (!mo.IsRedundant() && seen.find(mo.destination()) == seen.end()) {
+        parallel_move->AddMove(mo.source(), mo.destination(), main_zone());
+        seen.insert(mo.destination());
+      }
+    }
+    return parallel_move;
+  }
+
+ private:
+  struct InstructionOperandComparator {
+    bool operator()(const InstructionOperand* x,
+                    const InstructionOperand* y) const {
+      return (x->kind() < y->kind()) ||
+             (x->kind() == y->kind() && x->index() < y->index());
+    }
+  };
+
+  InstructionOperand* CreateRandomOperand() {
+    int index = rng_->NextInt(6);
+    switch (rng_->NextInt(5)) {
+      case 0:
+        return ConstantOperand::Create(index, main_zone());
+      case 1:
+        return StackSlotOperand::Create(index, main_zone());
+      case 2:
+        return DoubleStackSlotOperand::Create(index, main_zone());
+      case 3:
+        return RegisterOperand::Create(index, main_zone());
+      case 4:
+        return DoubleRegisterOperand::Create(index, main_zone());
+    }
+    UNREACHABLE();
+    return NULL;
+  }
+
+ private:
+  v8::base::RandomNumberGenerator* rng_;
+};
+
+
+TEST(FuzzResolver) {
+  ParallelMoveCreator pmc;
+  for (int size = 0; size < 20; ++size) {
+    for (int repeat = 0; repeat < 50; ++repeat) {
+      ParallelMove* pm = pmc.Create(size);
+
+      // Note: The gap resolver modifies the ParallelMove, so interpret first.
+      MoveInterpreter mi1;
+      mi1.AssembleParallelMove(pm);
+
+      MoveInterpreter mi2;
+      GapResolver resolver(&mi2);
+      resolver.Resolve(pm);
+
+      CHECK(mi1.state() == mi2.state());
+    }
+  }
+}
diff --git a/test/cctest/compiler/test-graph-reducer.cc b/test/cctest/compiler/test-graph-reducer.cc
new file mode 100644
index 0000000..eabfd22
--- /dev/null
+++ b/test/cctest/compiler/test-graph-reducer.cc
@@ -0,0 +1,661 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "graph-tester.h"
+#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/graph-reducer.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+const uint8_t OPCODE_A0 = 10;
+const uint8_t OPCODE_A1 = 11;
+const uint8_t OPCODE_A2 = 12;
+const uint8_t OPCODE_B0 = 20;
+const uint8_t OPCODE_B1 = 21;
+const uint8_t OPCODE_B2 = 22;
+const uint8_t OPCODE_C0 = 30;
+const uint8_t OPCODE_C1 = 31;
+const uint8_t OPCODE_C2 = 32;
+
+static SimpleOperator OPA0(OPCODE_A0, Operator::kNoWrite, 0, 0, "opa0");
+static SimpleOperator OPA1(OPCODE_A1, Operator::kNoWrite, 1, 0, "opa1");
+static SimpleOperator OPA2(OPCODE_A2, Operator::kNoWrite, 2, 0, "opa2");
+static SimpleOperator OPB0(OPCODE_B0, Operator::kNoWrite, 0, 0, "opa0");
+static SimpleOperator OPB1(OPCODE_B1, Operator::kNoWrite, 1, 0, "opa1");
+static SimpleOperator OPB2(OPCODE_B2, Operator::kNoWrite, 2, 0, "opa2");
+static SimpleOperator OPC0(OPCODE_C0, Operator::kNoWrite, 0, 0, "opc0");
+static SimpleOperator OPC1(OPCODE_C1, Operator::kNoWrite, 1, 0, "opc1");
+static SimpleOperator OPC2(OPCODE_C2, Operator::kNoWrite, 2, 0, "opc2");
+
+
+// Replaces all "A" operators with "B" operators without creating new nodes.
+class InPlaceABReducer : public Reducer {
+ public:
+  virtual Reduction Reduce(Node* node) {
+    switch (node->op()->opcode()) {
+      case OPCODE_A0:
+        CHECK_EQ(0, node->InputCount());
+        node->set_op(&OPB0);
+        return Replace(node);
+      case OPCODE_A1:
+        CHECK_EQ(1, node->InputCount());
+        node->set_op(&OPB1);
+        return Replace(node);
+      case OPCODE_A2:
+        CHECK_EQ(2, node->InputCount());
+        node->set_op(&OPB2);
+        return Replace(node);
+    }
+    return NoChange();
+  }
+};
+
+
+// Replaces all "A" operators with "B" operators by allocating new nodes.
+class NewABReducer : public Reducer {
+ public:
+  explicit NewABReducer(Graph* graph) : graph_(graph) {}
+  virtual Reduction Reduce(Node* node) {
+    switch (node->op()->opcode()) {
+      case OPCODE_A0:
+        CHECK_EQ(0, node->InputCount());
+        return Replace(graph_->NewNode(&OPB0));
+      case OPCODE_A1:
+        CHECK_EQ(1, node->InputCount());
+        return Replace(graph_->NewNode(&OPB1, node->InputAt(0)));
+      case OPCODE_A2:
+        CHECK_EQ(2, node->InputCount());
+        return Replace(
+            graph_->NewNode(&OPB2, node->InputAt(0), node->InputAt(1)));
+    }
+    return NoChange();
+  }
+  Graph* graph_;
+};
+
+
+// Replaces all "B" operators with "C" operators without creating new nodes.
+class InPlaceBCReducer : public Reducer {
+ public:
+  virtual Reduction Reduce(Node* node) {
+    switch (node->op()->opcode()) {
+      case OPCODE_B0:
+        CHECK_EQ(0, node->InputCount());
+        node->set_op(&OPC0);
+        return Replace(node);
+      case OPCODE_B1:
+        CHECK_EQ(1, node->InputCount());
+        node->set_op(&OPC1);
+        return Replace(node);
+      case OPCODE_B2:
+        CHECK_EQ(2, node->InputCount());
+        node->set_op(&OPC2);
+        return Replace(node);
+    }
+    return NoChange();
+  }
+};
+
+
+// Wraps all "OPA0" nodes in "OPB1" operators by allocating new nodes.
+class A0Wrapper FINAL : public Reducer {
+ public:
+  explicit A0Wrapper(Graph* graph) : graph_(graph) {}
+  virtual Reduction Reduce(Node* node) OVERRIDE {
+    switch (node->op()->opcode()) {
+      case OPCODE_A0:
+        CHECK_EQ(0, node->InputCount());
+        return Replace(graph_->NewNode(&OPB1, node));
+    }
+    return NoChange();
+  }
+  Graph* graph_;
+};
+
+
+// Wraps all "OPB0" nodes in two "OPC1" operators by allocating new nodes.
+class B0Wrapper FINAL : public Reducer {
+ public:
+  explicit B0Wrapper(Graph* graph) : graph_(graph) {}
+  virtual Reduction Reduce(Node* node) OVERRIDE {
+    switch (node->op()->opcode()) {
+      case OPCODE_B0:
+        CHECK_EQ(0, node->InputCount());
+        return Replace(graph_->NewNode(&OPC1, graph_->NewNode(&OPC1, node)));
+    }
+    return NoChange();
+  }
+  Graph* graph_;
+};
+
+
+// Replaces all "OPA1" nodes with the first input.
+class A1Forwarder : public Reducer {
+  virtual Reduction Reduce(Node* node) {
+    switch (node->op()->opcode()) {
+      case OPCODE_A1:
+        CHECK_EQ(1, node->InputCount());
+        return Replace(node->InputAt(0));
+    }
+    return NoChange();
+  }
+};
+
+
+// Replaces all "OPB1" nodes with the first input.
+class B1Forwarder : public Reducer {
+  virtual Reduction Reduce(Node* node) {
+    switch (node->op()->opcode()) {
+      case OPCODE_B1:
+        CHECK_EQ(1, node->InputCount());
+        return Replace(node->InputAt(0));
+    }
+    return NoChange();
+  }
+};
+
+
+// Swaps the inputs to "OP2A" and "OP2B" nodes based on ids.
+class AB2Sorter : public Reducer {
+  virtual Reduction Reduce(Node* node) {
+    switch (node->op()->opcode()) {
+      case OPCODE_A2:
+      case OPCODE_B2:
+        CHECK_EQ(2, node->InputCount());
+        Node* x = node->InputAt(0);
+        Node* y = node->InputAt(1);
+        if (x->id() > y->id()) {
+          node->ReplaceInput(0, y);
+          node->ReplaceInput(1, x);
+          return Replace(node);
+        }
+    }
+    return NoChange();
+  }
+};
+
+
+// Simply records the nodes visited.
+class ReducerRecorder : public Reducer {
+ public:
+  explicit ReducerRecorder(Zone* zone)
+      : set(NodeSet::key_compare(), NodeSet::allocator_type(zone)) {}
+  virtual Reduction Reduce(Node* node) {
+    set.insert(node);
+    return NoChange();
+  }
+  void CheckContains(Node* node) {
+    CHECK_EQ(1, static_cast<int>(set.count(node)));
+  }
+  NodeSet set;
+};
+
+
+TEST(ReduceGraphFromEnd1) {
+  GraphTester graph;
+
+  Node* n1 = graph.NewNode(&OPA0);
+  Node* end = graph.NewNode(&OPA1, n1);
+  graph.SetEnd(end);
+
+  GraphReducer reducer(&graph);
+  ReducerRecorder recorder(graph.zone());
+  reducer.AddReducer(&recorder);
+  reducer.ReduceGraph();
+  recorder.CheckContains(n1);
+  recorder.CheckContains(end);
+}
+
+
+TEST(ReduceGraphFromEnd2) {
+  GraphTester graph;
+
+  Node* n1 = graph.NewNode(&OPA0);
+  Node* n2 = graph.NewNode(&OPA1, n1);
+  Node* n3 = graph.NewNode(&OPA1, n1);
+  Node* end = graph.NewNode(&OPA2, n2, n3);
+  graph.SetEnd(end);
+
+  GraphReducer reducer(&graph);
+  ReducerRecorder recorder(graph.zone());
+  reducer.AddReducer(&recorder);
+  reducer.ReduceGraph();
+  recorder.CheckContains(n1);
+  recorder.CheckContains(n2);
+  recorder.CheckContains(n3);
+  recorder.CheckContains(end);
+}
+
+
+TEST(ReduceInPlace1) {
+  GraphTester graph;
+
+  Node* n1 = graph.NewNode(&OPA0);
+  Node* end = graph.NewNode(&OPA1, n1);
+  graph.SetEnd(end);
+
+  GraphReducer reducer(&graph);
+  InPlaceABReducer r;
+  reducer.AddReducer(&r);
+
+  // Tests A* => B* with in-place updates.
+  for (int i = 0; i < 3; i++) {
+    int before = graph.NodeCount();
+    reducer.ReduceGraph();
+    CHECK_EQ(before, graph.NodeCount());
+    CHECK_EQ(&OPB0, n1->op());
+    CHECK_EQ(&OPB1, end->op());
+    CHECK_EQ(n1, end->InputAt(0));
+  }
+}
+
+
+TEST(ReduceInPlace2) {
+  GraphTester graph;
+
+  Node* n1 = graph.NewNode(&OPA0);
+  Node* n2 = graph.NewNode(&OPA1, n1);
+  Node* n3 = graph.NewNode(&OPA1, n1);
+  Node* end = graph.NewNode(&OPA2, n2, n3);
+  graph.SetEnd(end);
+
+  GraphReducer reducer(&graph);
+  InPlaceABReducer r;
+  reducer.AddReducer(&r);
+
+  // Tests A* => B* with in-place updates.
+  for (int i = 0; i < 3; i++) {
+    int before = graph.NodeCount();
+    reducer.ReduceGraph();
+    CHECK_EQ(before, graph.NodeCount());
+    CHECK_EQ(&OPB0, n1->op());
+    CHECK_EQ(&OPB1, n2->op());
+    CHECK_EQ(n1, n2->InputAt(0));
+    CHECK_EQ(&OPB1, n3->op());
+    CHECK_EQ(n1, n3->InputAt(0));
+    CHECK_EQ(&OPB2, end->op());
+    CHECK_EQ(n2, end->InputAt(0));
+    CHECK_EQ(n3, end->InputAt(1));
+  }
+}
+
+
+TEST(ReduceNew1) {
+  GraphTester graph;
+
+  Node* n1 = graph.NewNode(&OPA0);
+  Node* n2 = graph.NewNode(&OPA1, n1);
+  Node* n3 = graph.NewNode(&OPA1, n1);
+  Node* end = graph.NewNode(&OPA2, n2, n3);
+  graph.SetEnd(end);
+
+  GraphReducer reducer(&graph);
+  NewABReducer r(&graph);
+  reducer.AddReducer(&r);
+
+  // Tests A* => B* while creating new nodes.
+  for (int i = 0; i < 3; i++) {
+    int before = graph.NodeCount();
+    reducer.ReduceGraph();
+    if (i == 0) {
+      CHECK_NE(before, graph.NodeCount());
+    } else {
+      CHECK_EQ(before, graph.NodeCount());
+    }
+    Node* nend = graph.end();
+    CHECK_NE(end, nend);  // end() should be updated too.
+
+    Node* nn2 = nend->InputAt(0);
+    Node* nn3 = nend->InputAt(1);
+    Node* nn1 = nn2->InputAt(0);
+
+    CHECK_EQ(nn1, nn3->InputAt(0));
+
+    CHECK_EQ(&OPB0, nn1->op());
+    CHECK_EQ(&OPB1, nn2->op());
+    CHECK_EQ(&OPB1, nn3->op());
+    CHECK_EQ(&OPB2, nend->op());
+  }
+}
+
+
+TEST(Wrapping1) {
+  GraphTester graph;
+
+  Node* end = graph.NewNode(&OPA0);
+  graph.SetEnd(end);
+  CHECK_EQ(1, graph.NodeCount());
+
+  GraphReducer reducer(&graph);
+  A0Wrapper r(&graph);
+  reducer.AddReducer(&r);
+
+  reducer.ReduceGraph();
+  CHECK_EQ(2, graph.NodeCount());
+
+  Node* nend = graph.end();
+  CHECK_NE(end, nend);
+  CHECK_EQ(&OPB1, nend->op());
+  CHECK_EQ(1, nend->InputCount());
+  CHECK_EQ(end, nend->InputAt(0));
+}
+
+
+TEST(Wrapping2) {
+  GraphTester graph;
+
+  Node* end = graph.NewNode(&OPB0);
+  graph.SetEnd(end);
+  CHECK_EQ(1, graph.NodeCount());
+
+  GraphReducer reducer(&graph);
+  B0Wrapper r(&graph);
+  reducer.AddReducer(&r);
+
+  reducer.ReduceGraph();
+  CHECK_EQ(3, graph.NodeCount());
+
+  Node* nend = graph.end();
+  CHECK_NE(end, nend);
+  CHECK_EQ(&OPC1, nend->op());
+  CHECK_EQ(1, nend->InputCount());
+
+  Node* n1 = nend->InputAt(0);
+  CHECK_NE(end, n1);
+  CHECK_EQ(&OPC1, n1->op());
+  CHECK_EQ(1, n1->InputCount());
+  CHECK_EQ(end, n1->InputAt(0));
+}
+
+
+TEST(Forwarding1) {
+  GraphTester graph;
+
+  Node* n1 = graph.NewNode(&OPA0);
+  Node* end = graph.NewNode(&OPA1, n1);
+  graph.SetEnd(end);
+
+  GraphReducer reducer(&graph);
+  A1Forwarder r;
+  reducer.AddReducer(&r);
+
+  // Tests A1(x) => x
+  for (int i = 0; i < 3; i++) {
+    int before = graph.NodeCount();
+    reducer.ReduceGraph();
+    CHECK_EQ(before, graph.NodeCount());
+    CHECK_EQ(&OPA0, n1->op());
+    CHECK_EQ(n1, graph.end());
+  }
+}
+
+
+TEST(Forwarding2) {
+  GraphTester graph;
+
+  Node* n1 = graph.NewNode(&OPA0);
+  Node* n2 = graph.NewNode(&OPA1, n1);
+  Node* n3 = graph.NewNode(&OPA1, n1);
+  Node* end = graph.NewNode(&OPA2, n2, n3);
+  graph.SetEnd(end);
+
+  GraphReducer reducer(&graph);
+  A1Forwarder r;
+  reducer.AddReducer(&r);
+
+  // Tests reducing A2(A1(x), A1(y)) => A2(x, y).
+  for (int i = 0; i < 3; i++) {
+    int before = graph.NodeCount();
+    reducer.ReduceGraph();
+    CHECK_EQ(before, graph.NodeCount());
+    CHECK_EQ(&OPA0, n1->op());
+    CHECK_EQ(n1, end->InputAt(0));
+    CHECK_EQ(n1, end->InputAt(1));
+    CHECK_EQ(&OPA2, end->op());
+    CHECK_EQ(0, n2->UseCount());
+    CHECK_EQ(0, n3->UseCount());
+  }
+}
+
+
+TEST(Forwarding3) {
+  // Tests reducing a chain of A1(A1(A1(A1(x)))) => x.
+  for (int i = 0; i < 8; i++) {
+    GraphTester graph;
+
+    Node* n1 = graph.NewNode(&OPA0);
+    Node* end = n1;
+    for (int j = 0; j < i; j++) {
+      end = graph.NewNode(&OPA1, end);
+    }
+    graph.SetEnd(end);
+
+    GraphReducer reducer(&graph);
+    A1Forwarder r;
+    reducer.AddReducer(&r);
+
+    for (int i = 0; i < 3; i++) {
+      int before = graph.NodeCount();
+      reducer.ReduceGraph();
+      CHECK_EQ(before, graph.NodeCount());
+      CHECK_EQ(&OPA0, n1->op());
+      CHECK_EQ(n1, graph.end());
+    }
+  }
+}
+
+
+TEST(ReduceForward1) {
+  GraphTester graph;
+
+  Node* n1 = graph.NewNode(&OPA0);
+  Node* n2 = graph.NewNode(&OPA1, n1);
+  Node* n3 = graph.NewNode(&OPA1, n1);
+  Node* end = graph.NewNode(&OPA2, n2, n3);
+  graph.SetEnd(end);
+
+  GraphReducer reducer(&graph);
+  InPlaceABReducer r;
+  B1Forwarder f;
+  reducer.AddReducer(&r);
+  reducer.AddReducer(&f);
+
+  // Tests first reducing A => B, then B1(x) => x.
+  for (int i = 0; i < 3; i++) {
+    int before = graph.NodeCount();
+    reducer.ReduceGraph();
+    CHECK_EQ(before, graph.NodeCount());
+    CHECK_EQ(&OPB0, n1->op());
+    CHECK(n2->IsDead());
+    CHECK_EQ(n1, end->InputAt(0));
+    CHECK(n3->IsDead());
+    CHECK_EQ(n1, end->InputAt(0));
+    CHECK_EQ(&OPB2, end->op());
+    CHECK_EQ(0, n2->UseCount());
+    CHECK_EQ(0, n3->UseCount());
+  }
+}
+
+
+TEST(Sorter1) {
+  HandleAndZoneScope scope;
+  AB2Sorter r;
+  for (int i = 0; i < 6; i++) {
+    GraphTester graph;
+
+    Node* n1 = graph.NewNode(&OPA0);
+    Node* n2 = graph.NewNode(&OPA1, n1);
+    Node* n3 = graph.NewNode(&OPA1, n1);
+    Node* end = NULL;  // Initialize to please the compiler.
+
+    if (i == 0) end = graph.NewNode(&OPA2, n2, n3);
+    if (i == 1) end = graph.NewNode(&OPA2, n3, n2);
+    if (i == 2) end = graph.NewNode(&OPA2, n2, n1);
+    if (i == 3) end = graph.NewNode(&OPA2, n1, n2);
+    if (i == 4) end = graph.NewNode(&OPA2, n3, n1);
+    if (i == 5) end = graph.NewNode(&OPA2, n1, n3);
+
+    graph.SetEnd(end);
+
+    GraphReducer reducer(&graph);
+    reducer.AddReducer(&r);
+
+    int before = graph.NodeCount();
+    reducer.ReduceGraph();
+    CHECK_EQ(before, graph.NodeCount());
+    CHECK_EQ(&OPA0, n1->op());
+    CHECK_EQ(&OPA1, n2->op());
+    CHECK_EQ(&OPA1, n3->op());
+    CHECK_EQ(&OPA2, end->op());
+    CHECK_EQ(end, graph.end());
+    CHECK(end->InputAt(0)->id() <= end->InputAt(1)->id());
+  }
+}
+
+
+// Generate a node graph with the given permutations.
+void GenDAG(Graph* graph, int* p3, int* p2, int* p1) {
+  Node* level4 = graph->NewNode(&OPA0);
+  Node* level3[] = {graph->NewNode(&OPA1, level4),
+                    graph->NewNode(&OPA1, level4)};
+
+  Node* level2[] = {graph->NewNode(&OPA1, level3[p3[0]]),
+                    graph->NewNode(&OPA1, level3[p3[1]]),
+                    graph->NewNode(&OPA1, level3[p3[0]]),
+                    graph->NewNode(&OPA1, level3[p3[1]])};
+
+  Node* level1[] = {graph->NewNode(&OPA2, level2[p2[0]], level2[p2[1]]),
+                    graph->NewNode(&OPA2, level2[p2[2]], level2[p2[3]])};
+
+  Node* end = graph->NewNode(&OPA2, level1[p1[0]], level1[p1[1]]);
+  graph->SetEnd(end);
+}
+
+
+TEST(SortForwardReduce) {
+  GraphTester graph;
+
+  // Tests combined reductions on a series of DAGs.
+  for (int j = 0; j < 2; j++) {
+    int p3[] = {j, 1 - j};
+    for (int m = 0; m < 2; m++) {
+      int p1[] = {m, 1 - m};
+      for (int k = 0; k < 24; k++) {  // All permutations of 0, 1, 2, 3
+        int p2[] = {-1, -1, -1, -1};
+        int n = k;
+        for (int d = 4; d >= 1; d--) {  // Construct permutation.
+          int p = n % d;
+          for (int z = 0; z < 4; z++) {
+            if (p2[z] == -1) {
+              if (p == 0) p2[z] = d - 1;
+              p--;
+            }
+          }
+          n = n / d;
+        }
+
+        GenDAG(&graph, p3, p2, p1);
+
+        GraphReducer reducer(&graph);
+        AB2Sorter r1;
+        A1Forwarder r2;
+        InPlaceABReducer r3;
+        reducer.AddReducer(&r1);
+        reducer.AddReducer(&r2);
+        reducer.AddReducer(&r3);
+
+        reducer.ReduceGraph();
+
+        Node* end = graph.end();
+        CHECK_EQ(&OPB2, end->op());
+        Node* n1 = end->InputAt(0);
+        Node* n2 = end->InputAt(1);
+        CHECK_NE(n1, n2);
+        CHECK(n1->id() < n2->id());
+        CHECK_EQ(&OPB2, n1->op());
+        CHECK_EQ(&OPB2, n2->op());
+        Node* n4 = n1->InputAt(0);
+        CHECK_EQ(&OPB0, n4->op());
+        CHECK_EQ(n4, n1->InputAt(1));
+        CHECK_EQ(n4, n2->InputAt(0));
+        CHECK_EQ(n4, n2->InputAt(1));
+      }
+    }
+  }
+}
+
+
+TEST(Order) {
+  // Test that the order of reducers doesn't matter, as they should be
+  // rerun for changed nodes.
+  for (int i = 0; i < 2; i++) {
+    GraphTester graph;
+
+    Node* n1 = graph.NewNode(&OPA0);
+    Node* end = graph.NewNode(&OPA1, n1);
+    graph.SetEnd(end);
+
+    GraphReducer reducer(&graph);
+    InPlaceABReducer abr;
+    InPlaceBCReducer bcr;
+    if (i == 0) {
+      reducer.AddReducer(&abr);
+      reducer.AddReducer(&bcr);
+    } else {
+      reducer.AddReducer(&bcr);
+      reducer.AddReducer(&abr);
+    }
+
+    // Tests A* => C* with in-place updates.
+    for (int i = 0; i < 3; i++) {
+      int before = graph.NodeCount();
+      reducer.ReduceGraph();
+      CHECK_EQ(before, graph.NodeCount());
+      CHECK_EQ(&OPC0, n1->op());
+      CHECK_EQ(&OPC1, end->op());
+      CHECK_EQ(n1, end->InputAt(0));
+    }
+  }
+}
+
+
+// Tests that a reducer is only applied once.
+class OneTimeReducer : public Reducer {
+ public:
+  OneTimeReducer(Reducer* reducer, Zone* zone)
+      : reducer_(reducer),
+        nodes_(NodeSet::key_compare(), NodeSet::allocator_type(zone)) {}
+  virtual Reduction Reduce(Node* node) {
+    CHECK_EQ(0, static_cast<int>(nodes_.count(node)));
+    nodes_.insert(node);
+    return reducer_->Reduce(node);
+  }
+  Reducer* reducer_;
+  NodeSet nodes_;
+};
+
+
+TEST(OneTimeReduce1) {
+  GraphTester graph;
+
+  Node* n1 = graph.NewNode(&OPA0);
+  Node* end = graph.NewNode(&OPA1, n1);
+  graph.SetEnd(end);
+
+  GraphReducer reducer(&graph);
+  InPlaceABReducer r;
+  OneTimeReducer once(&r, graph.zone());
+  reducer.AddReducer(&once);
+
+  // Tests A* => B* with in-place updates. Should only be applied once.
+  int before = graph.NodeCount();
+  reducer.ReduceGraph();
+  CHECK_EQ(before, graph.NodeCount());
+  CHECK_EQ(&OPB0, n1->op());
+  CHECK_EQ(&OPB1, end->op());
+  CHECK_EQ(n1, end->InputAt(0));
+}
diff --git a/test/cctest/compiler/test-instruction.cc b/test/cctest/compiler/test-instruction.cc
new file mode 100644
index 0000000..a9feaac
--- /dev/null
+++ b/test/cctest/compiler/test-instruction.cc
@@ -0,0 +1,350 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+#include "test/cctest/cctest.h"
+
+#include "src/compiler/code-generator.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/instruction.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/operator.h"
+#include "src/compiler/schedule.h"
+#include "src/compiler/scheduler.h"
+#include "src/lithium.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+typedef v8::internal::compiler::Instruction TestInstr;
+typedef v8::internal::compiler::InstructionSequence TestInstrSeq;
+
+// A testing helper for the register code abstraction.
+class InstructionTester : public HandleAndZoneScope {
+ public:  // We're all friends here.
+  InstructionTester()
+      : isolate(main_isolate()),
+        graph(zone()),
+        schedule(zone()),
+        info(static_cast<HydrogenCodeStub*>(NULL), main_isolate()),
+        linkage(&info),
+        common(zone()),
+        code(NULL) {}
+
+  ~InstructionTester() { delete code; }
+
+  Isolate* isolate;
+  Graph graph;
+  Schedule schedule;
+  CompilationInfoWithZone info;
+  Linkage linkage;
+  CommonOperatorBuilder common;
+  MachineOperatorBuilder machine;
+  TestInstrSeq* code;
+
+  Zone* zone() { return main_zone(); }
+
+  void allocCode() {
+    if (schedule.rpo_order()->size() == 0) {
+      // Compute the RPO order.
+      Scheduler::ComputeSpecialRPO(&schedule);
+      DCHECK(schedule.rpo_order()->size() > 0);
+    }
+    code = new TestInstrSeq(&linkage, &graph, &schedule);
+  }
+
+  Node* Int32Constant(int32_t val) {
+    Node* node = graph.NewNode(common.Int32Constant(val));
+    schedule.AddNode(schedule.start(), node);
+    return node;
+  }
+
+  Node* Float64Constant(double val) {
+    Node* node = graph.NewNode(common.Float64Constant(val));
+    schedule.AddNode(schedule.start(), node);
+    return node;
+  }
+
+  Node* Parameter(int32_t which) {
+    Node* node = graph.NewNode(common.Parameter(which));
+    schedule.AddNode(schedule.start(), node);
+    return node;
+  }
+
+  Node* NewNode(BasicBlock* block) {
+    Node* node = graph.NewNode(common.Int32Constant(111));
+    schedule.AddNode(block, node);
+    return node;
+  }
+
+  int NewInstr(BasicBlock* block) {
+    InstructionCode opcode = static_cast<InstructionCode>(110);
+    TestInstr* instr = TestInstr::New(zone(), opcode);
+    return code->AddInstruction(instr, block);
+  }
+
+  UnallocatedOperand* NewUnallocated(int vreg) {
+    UnallocatedOperand* unallocated =
+        new (zone()) UnallocatedOperand(UnallocatedOperand::ANY);
+    unallocated->set_virtual_register(vreg);
+    return unallocated;
+  }
+};
+
+
+TEST(InstructionBasic) {
+  InstructionTester R;
+
+  for (int i = 0; i < 10; i++) {
+    R.Int32Constant(i);  // Add some nodes to the graph.
+  }
+
+  BasicBlock* last = R.schedule.start();
+  for (int i = 0; i < 5; i++) {
+    BasicBlock* block = R.schedule.NewBasicBlock();
+    R.schedule.AddGoto(last, block);
+    last = block;
+  }
+
+  R.allocCode();
+
+  CHECK_EQ(R.graph.NodeCount(), R.code->ValueCount());
+
+  BasicBlockVector* blocks = R.schedule.rpo_order();
+  CHECK_EQ(static_cast<int>(blocks->size()), R.code->BasicBlockCount());
+
+  int index = 0;
+  for (BasicBlockVectorIter i = blocks->begin(); i != blocks->end();
+       i++, index++) {
+    BasicBlock* block = *i;
+    CHECK_EQ(block, R.code->BlockAt(index));
+    CHECK_EQ(-1, R.code->GetLoopEnd(block));
+  }
+}
+
+
+TEST(InstructionGetBasicBlock) {
+  InstructionTester R;
+
+  BasicBlock* b0 = R.schedule.start();
+  BasicBlock* b1 = R.schedule.NewBasicBlock();
+  BasicBlock* b2 = R.schedule.NewBasicBlock();
+  BasicBlock* b3 = R.schedule.end();
+
+  R.schedule.AddGoto(b0, b1);
+  R.schedule.AddGoto(b1, b2);
+  R.schedule.AddGoto(b2, b3);
+
+  R.allocCode();
+
+  R.code->StartBlock(b0);
+  int i0 = R.NewInstr(b0);
+  int i1 = R.NewInstr(b0);
+  R.code->EndBlock(b0);
+  R.code->StartBlock(b1);
+  int i2 = R.NewInstr(b1);
+  int i3 = R.NewInstr(b1);
+  int i4 = R.NewInstr(b1);
+  int i5 = R.NewInstr(b1);
+  R.code->EndBlock(b1);
+  R.code->StartBlock(b2);
+  int i6 = R.NewInstr(b2);
+  int i7 = R.NewInstr(b2);
+  int i8 = R.NewInstr(b2);
+  R.code->EndBlock(b2);
+  R.code->StartBlock(b3);
+  R.code->EndBlock(b3);
+
+  CHECK_EQ(b0, R.code->GetBasicBlock(i0));
+  CHECK_EQ(b0, R.code->GetBasicBlock(i1));
+
+  CHECK_EQ(b1, R.code->GetBasicBlock(i2));
+  CHECK_EQ(b1, R.code->GetBasicBlock(i3));
+  CHECK_EQ(b1, R.code->GetBasicBlock(i4));
+  CHECK_EQ(b1, R.code->GetBasicBlock(i5));
+
+  CHECK_EQ(b2, R.code->GetBasicBlock(i6));
+  CHECK_EQ(b2, R.code->GetBasicBlock(i7));
+  CHECK_EQ(b2, R.code->GetBasicBlock(i8));
+
+  CHECK_EQ(b0, R.code->GetBasicBlock(b0->first_instruction_index()));
+  CHECK_EQ(b0, R.code->GetBasicBlock(b0->last_instruction_index()));
+
+  CHECK_EQ(b1, R.code->GetBasicBlock(b1->first_instruction_index()));
+  CHECK_EQ(b1, R.code->GetBasicBlock(b1->last_instruction_index()));
+
+  CHECK_EQ(b2, R.code->GetBasicBlock(b2->first_instruction_index()));
+  CHECK_EQ(b2, R.code->GetBasicBlock(b2->last_instruction_index()));
+
+  CHECK_EQ(b3, R.code->GetBasicBlock(b3->first_instruction_index()));
+  CHECK_EQ(b3, R.code->GetBasicBlock(b3->last_instruction_index()));
+}
+
+
+TEST(InstructionIsGapAt) {
+  InstructionTester R;
+
+  BasicBlock* b0 = R.schedule.start();
+  R.schedule.AddReturn(b0, R.Int32Constant(1));
+
+  R.allocCode();
+  TestInstr* i0 = TestInstr::New(R.zone(), 100);
+  TestInstr* g = TestInstr::New(R.zone(), 103)->MarkAsControl();
+  R.code->StartBlock(b0);
+  R.code->AddInstruction(i0, b0);
+  R.code->AddInstruction(g, b0);
+  R.code->EndBlock(b0);
+
+  CHECK_EQ(true, R.code->InstructionAt(0)->IsBlockStart());
+
+  CHECK_EQ(true, R.code->IsGapAt(0));   // Label
+  CHECK_EQ(true, R.code->IsGapAt(1));   // Gap
+  CHECK_EQ(false, R.code->IsGapAt(2));  // i0
+  CHECK_EQ(true, R.code->IsGapAt(3));   // Gap
+  CHECK_EQ(true, R.code->IsGapAt(4));   // Gap
+  CHECK_EQ(false, R.code->IsGapAt(5));  // g
+}
+
+
+TEST(InstructionIsGapAt2) {
+  InstructionTester R;
+
+  BasicBlock* b0 = R.schedule.start();
+  BasicBlock* b1 = R.schedule.end();
+  R.schedule.AddGoto(b0, b1);
+  R.schedule.AddReturn(b1, R.Int32Constant(1));
+
+  R.allocCode();
+  TestInstr* i0 = TestInstr::New(R.zone(), 100);
+  TestInstr* g = TestInstr::New(R.zone(), 103)->MarkAsControl();
+  R.code->StartBlock(b0);
+  R.code->AddInstruction(i0, b0);
+  R.code->AddInstruction(g, b0);
+  R.code->EndBlock(b0);
+
+  TestInstr* i1 = TestInstr::New(R.zone(), 102);
+  TestInstr* g1 = TestInstr::New(R.zone(), 104)->MarkAsControl();
+  R.code->StartBlock(b1);
+  R.code->AddInstruction(i1, b1);
+  R.code->AddInstruction(g1, b1);
+  R.code->EndBlock(b1);
+
+  CHECK_EQ(true, R.code->InstructionAt(0)->IsBlockStart());
+
+  CHECK_EQ(true, R.code->IsGapAt(0));   // Label
+  CHECK_EQ(true, R.code->IsGapAt(1));   // Gap
+  CHECK_EQ(false, R.code->IsGapAt(2));  // i0
+  CHECK_EQ(true, R.code->IsGapAt(3));   // Gap
+  CHECK_EQ(true, R.code->IsGapAt(4));   // Gap
+  CHECK_EQ(false, R.code->IsGapAt(5));  // g
+
+  CHECK_EQ(true, R.code->InstructionAt(6)->IsBlockStart());
+
+  CHECK_EQ(true, R.code->IsGapAt(6));    // Label
+  CHECK_EQ(true, R.code->IsGapAt(7));    // Gap
+  CHECK_EQ(false, R.code->IsGapAt(8));   // i1
+  CHECK_EQ(true, R.code->IsGapAt(9));    // Gap
+  CHECK_EQ(true, R.code->IsGapAt(10));   // Gap
+  CHECK_EQ(false, R.code->IsGapAt(11));  // g1
+}
+
+
+TEST(InstructionAddGapMove) {
+  InstructionTester R;
+
+  BasicBlock* b0 = R.schedule.start();
+  R.schedule.AddReturn(b0, R.Int32Constant(1));
+
+  R.allocCode();
+  TestInstr* i0 = TestInstr::New(R.zone(), 100);
+  TestInstr* g = TestInstr::New(R.zone(), 103)->MarkAsControl();
+  R.code->StartBlock(b0);
+  R.code->AddInstruction(i0, b0);
+  R.code->AddInstruction(g, b0);
+  R.code->EndBlock(b0);
+
+  CHECK_EQ(true, R.code->InstructionAt(0)->IsBlockStart());
+
+  CHECK_EQ(true, R.code->IsGapAt(0));   // Label
+  CHECK_EQ(true, R.code->IsGapAt(1));   // Gap
+  CHECK_EQ(false, R.code->IsGapAt(2));  // i0
+  CHECK_EQ(true, R.code->IsGapAt(3));   // Gap
+  CHECK_EQ(true, R.code->IsGapAt(4));   // Gap
+  CHECK_EQ(false, R.code->IsGapAt(5));  // g
+
+  int indexes[] = {0, 1, 3, 4, -1};
+  for (int i = 0; indexes[i] >= 0; i++) {
+    int index = indexes[i];
+
+    UnallocatedOperand* op1 = R.NewUnallocated(index + 6);
+    UnallocatedOperand* op2 = R.NewUnallocated(index + 12);
+
+    R.code->AddGapMove(index, op1, op2);
+    GapInstruction* gap = R.code->GapAt(index);
+    ParallelMove* move = gap->GetParallelMove(GapInstruction::START);
+    CHECK_NE(NULL, move);
+    const ZoneList<MoveOperands>* move_operands = move->move_operands();
+    CHECK_EQ(1, move_operands->length());
+    MoveOperands* cur = &move_operands->at(0);
+    CHECK_EQ(op1, cur->source());
+    CHECK_EQ(op2, cur->destination());
+  }
+}
+
+
+TEST(InstructionOperands) {
+  Zone zone(CcTest::InitIsolateOnce());
+
+  {
+    TestInstr* i = TestInstr::New(&zone, 101);
+    CHECK_EQ(0, static_cast<int>(i->OutputCount()));
+    CHECK_EQ(0, static_cast<int>(i->InputCount()));
+    CHECK_EQ(0, static_cast<int>(i->TempCount()));
+  }
+
+  InstructionOperand* outputs[] = {
+      new (&zone) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER),
+      new (&zone) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER),
+      new (&zone) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER),
+      new (&zone) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER)};
+
+  InstructionOperand* inputs[] = {
+      new (&zone) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER),
+      new (&zone) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER),
+      new (&zone) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER),
+      new (&zone) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER)};
+
+  InstructionOperand* temps[] = {
+      new (&zone) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER),
+      new (&zone) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER),
+      new (&zone) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER),
+      new (&zone) UnallocatedOperand(UnallocatedOperand::MUST_HAVE_REGISTER)};
+
+  for (size_t i = 0; i < arraysize(outputs); i++) {
+    for (size_t j = 0; j < arraysize(inputs); j++) {
+      for (size_t k = 0; k < arraysize(temps); k++) {
+        TestInstr* m =
+            TestInstr::New(&zone, 101, i, outputs, j, inputs, k, temps);
+        CHECK(i == m->OutputCount());
+        CHECK(j == m->InputCount());
+        CHECK(k == m->TempCount());
+
+        for (size_t z = 0; z < i; z++) {
+          CHECK_EQ(outputs[z], m->OutputAt(z));
+        }
+
+        for (size_t z = 0; z < j; z++) {
+          CHECK_EQ(inputs[z], m->InputAt(z));
+        }
+
+        for (size_t z = 0; z < k; z++) {
+          CHECK_EQ(temps[z], m->TempAt(z));
+        }
+      }
+    }
+  }
+}
diff --git a/test/cctest/compiler/test-js-constant-cache.cc b/test/cctest/compiler/test-js-constant-cache.cc
new file mode 100644
index 0000000..eb0975e
--- /dev/null
+++ b/test/cctest/compiler/test-js-constant-cache.cc
@@ -0,0 +1,291 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/compiler/js-graph.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/typer.h"
+#include "src/types.h"
+#include "test/cctest/cctest.h"
+#include "test/cctest/compiler/value-helper.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+class JSCacheTesterHelper {
+ protected:
+  explicit JSCacheTesterHelper(Zone* zone)
+      : main_graph_(zone),
+        main_common_(zone),
+        main_javascript_(zone),
+        main_typer_(zone),
+        main_machine_() {}
+  Graph main_graph_;
+  CommonOperatorBuilder main_common_;
+  JSOperatorBuilder main_javascript_;
+  Typer main_typer_;
+  MachineOperatorBuilder main_machine_;
+};
+
+
+class JSConstantCacheTester : public HandleAndZoneScope,
+                              public JSCacheTesterHelper,
+                              public JSGraph {
+ public:
+  JSConstantCacheTester()
+      : JSCacheTesterHelper(main_zone()),
+        JSGraph(&main_graph_, &main_common_, &main_javascript_, &main_typer_,
+                &main_machine_) {}
+
+  Type* upper(Node* node) { return NodeProperties::GetBounds(node).upper; }
+
+  Handle<Object> handle(Node* node) {
+    CHECK_EQ(IrOpcode::kHeapConstant, node->opcode());
+    return OpParameter<Unique<Object> >(node).handle();
+  }
+
+  Factory* factory() { return main_isolate()->factory(); }
+};
+
+
+TEST(ZeroConstant1) {
+  JSConstantCacheTester T;
+
+  Node* zero = T.ZeroConstant();
+
+  CHECK_EQ(IrOpcode::kNumberConstant, zero->opcode());
+  CHECK_EQ(zero, T.Constant(0));
+  CHECK_NE(zero, T.Constant(-0.0));
+  CHECK_NE(zero, T.Constant(1.0));
+  CHECK_NE(zero, T.Constant(v8::base::OS::nan_value()));
+  CHECK_NE(zero, T.Float64Constant(0));
+  CHECK_NE(zero, T.Int32Constant(0));
+
+  Type* t = T.upper(zero);
+
+  CHECK(t->Is(Type::Number()));
+  CHECK(t->Is(Type::Integral32()));
+  CHECK(t->Is(Type::Signed32()));
+  CHECK(t->Is(Type::Unsigned32()));
+  CHECK(t->Is(Type::SignedSmall()));
+  CHECK(t->Is(Type::UnsignedSmall()));
+}
+
+
+TEST(MinusZeroConstant) {
+  JSConstantCacheTester T;
+
+  Node* minus_zero = T.Constant(-0.0);
+  Node* zero = T.ZeroConstant();
+
+  CHECK_EQ(IrOpcode::kNumberConstant, minus_zero->opcode());
+  CHECK_EQ(minus_zero, T.Constant(-0.0));
+  CHECK_NE(zero, minus_zero);
+
+  Type* t = T.upper(minus_zero);
+
+  CHECK(t->Is(Type::Number()));
+  CHECK(t->Is(Type::MinusZero()));
+  CHECK(!t->Is(Type::Integral32()));
+  CHECK(!t->Is(Type::Signed32()));
+  CHECK(!t->Is(Type::Unsigned32()));
+  CHECK(!t->Is(Type::SignedSmall()));
+  CHECK(!t->Is(Type::UnsignedSmall()));
+
+  double zero_value = OpParameter<double>(zero);
+  double minus_zero_value = OpParameter<double>(minus_zero);
+
+  CHECK_EQ(0.0, zero_value);
+  CHECK_NE(-0.0, zero_value);
+  CHECK_EQ(-0.0, minus_zero_value);
+  CHECK_NE(0.0, minus_zero_value);
+}
+
+
+TEST(ZeroConstant2) {
+  JSConstantCacheTester T;
+
+  Node* zero = T.Constant(0);
+
+  CHECK_EQ(IrOpcode::kNumberConstant, zero->opcode());
+  CHECK_EQ(zero, T.ZeroConstant());
+  CHECK_NE(zero, T.Constant(-0.0));
+  CHECK_NE(zero, T.Constant(1.0));
+  CHECK_NE(zero, T.Constant(v8::base::OS::nan_value()));
+  CHECK_NE(zero, T.Float64Constant(0));
+  CHECK_NE(zero, T.Int32Constant(0));
+
+  Type* t = T.upper(zero);
+
+  CHECK(t->Is(Type::Number()));
+  CHECK(t->Is(Type::Integral32()));
+  CHECK(t->Is(Type::Signed32()));
+  CHECK(t->Is(Type::Unsigned32()));
+  CHECK(t->Is(Type::SignedSmall()));
+  CHECK(t->Is(Type::UnsignedSmall()));
+}
+
+
+TEST(OneConstant1) {
+  JSConstantCacheTester T;
+
+  Node* one = T.OneConstant();
+
+  CHECK_EQ(IrOpcode::kNumberConstant, one->opcode());
+  CHECK_EQ(one, T.Constant(1));
+  CHECK_EQ(one, T.Constant(1.0));
+  CHECK_NE(one, T.Constant(1.01));
+  CHECK_NE(one, T.Constant(-1.01));
+  CHECK_NE(one, T.Constant(v8::base::OS::nan_value()));
+  CHECK_NE(one, T.Float64Constant(1.0));
+  CHECK_NE(one, T.Int32Constant(1));
+
+  Type* t = T.upper(one);
+
+  CHECK(t->Is(Type::Number()));
+  CHECK(t->Is(Type::Integral32()));
+  CHECK(t->Is(Type::Signed32()));
+  CHECK(t->Is(Type::Unsigned32()));
+  CHECK(t->Is(Type::SignedSmall()));
+  CHECK(t->Is(Type::UnsignedSmall()));
+}
+
+
+TEST(OneConstant2) {
+  JSConstantCacheTester T;
+
+  Node* one = T.Constant(1);
+
+  CHECK_EQ(IrOpcode::kNumberConstant, one->opcode());
+  CHECK_EQ(one, T.OneConstant());
+  CHECK_EQ(one, T.Constant(1.0));
+  CHECK_NE(one, T.Constant(1.01));
+  CHECK_NE(one, T.Constant(-1.01));
+  CHECK_NE(one, T.Constant(v8::base::OS::nan_value()));
+  CHECK_NE(one, T.Float64Constant(1.0));
+  CHECK_NE(one, T.Int32Constant(1));
+
+  Type* t = T.upper(one);
+
+  CHECK(t->Is(Type::Number()));
+  CHECK(t->Is(Type::Integral32()));
+  CHECK(t->Is(Type::Signed32()));
+  CHECK(t->Is(Type::Unsigned32()));
+  CHECK(t->Is(Type::SignedSmall()));
+  CHECK(t->Is(Type::UnsignedSmall()));
+}
+
+
+TEST(Canonicalizations) {
+  JSConstantCacheTester T;
+
+  CHECK_EQ(T.ZeroConstant(), T.ZeroConstant());
+  CHECK_EQ(T.UndefinedConstant(), T.UndefinedConstant());
+  CHECK_EQ(T.TheHoleConstant(), T.TheHoleConstant());
+  CHECK_EQ(T.TrueConstant(), T.TrueConstant());
+  CHECK_EQ(T.FalseConstant(), T.FalseConstant());
+  CHECK_EQ(T.NullConstant(), T.NullConstant());
+  CHECK_EQ(T.ZeroConstant(), T.ZeroConstant());
+  CHECK_EQ(T.OneConstant(), T.OneConstant());
+  CHECK_EQ(T.NaNConstant(), T.NaNConstant());
+}
+
+
+TEST(NoAliasing) {
+  JSConstantCacheTester T;
+
+  Node* nodes[] = {T.UndefinedConstant(), T.TheHoleConstant(), T.TrueConstant(),
+                   T.FalseConstant(),     T.NullConstant(),    T.ZeroConstant(),
+                   T.OneConstant(),       T.NaNConstant(),     T.Constant(21),
+                   T.Constant(22.2)};
+
+  for (size_t i = 0; i < arraysize(nodes); i++) {
+    for (size_t j = 0; j < arraysize(nodes); j++) {
+      if (i != j) CHECK_NE(nodes[i], nodes[j]);
+    }
+  }
+}
+
+
+TEST(CanonicalizingNumbers) {
+  JSConstantCacheTester T;
+
+  FOR_FLOAT64_INPUTS(i) {
+    Node* node = T.Constant(*i);
+    for (int j = 0; j < 5; j++) {
+      CHECK_EQ(node, T.Constant(*i));
+    }
+  }
+}
+
+
+TEST(NumberTypes) {
+  JSConstantCacheTester T;
+
+  FOR_FLOAT64_INPUTS(i) {
+    double value = *i;
+    Node* node = T.Constant(value);
+    CHECK(T.upper(node)->Equals(Type::Of(value, T.main_zone())));
+  }
+}
+
+
+TEST(HeapNumbers) {
+  JSConstantCacheTester T;
+
+  FOR_FLOAT64_INPUTS(i) {
+    double value = *i;
+    Handle<Object> num = T.factory()->NewNumber(value);
+    Handle<HeapNumber> heap = T.factory()->NewHeapNumber(value);
+    Node* node1 = T.Constant(value);
+    Node* node2 = T.Constant(num);
+    Node* node3 = T.Constant(heap);
+    CHECK_EQ(node1, node2);
+    CHECK_EQ(node1, node3);
+  }
+}
+
+
+TEST(OddballHandle) {
+  JSConstantCacheTester T;
+
+  CHECK_EQ(T.UndefinedConstant(), T.Constant(T.factory()->undefined_value()));
+  CHECK_EQ(T.TheHoleConstant(), T.Constant(T.factory()->the_hole_value()));
+  CHECK_EQ(T.TrueConstant(), T.Constant(T.factory()->true_value()));
+  CHECK_EQ(T.FalseConstant(), T.Constant(T.factory()->false_value()));
+  CHECK_EQ(T.NullConstant(), T.Constant(T.factory()->null_value()));
+  CHECK_EQ(T.NaNConstant(), T.Constant(T.factory()->nan_value()));
+}
+
+
+TEST(OddballValues) {
+  JSConstantCacheTester T;
+
+  CHECK_EQ(*T.factory()->undefined_value(), *T.handle(T.UndefinedConstant()));
+  CHECK_EQ(*T.factory()->the_hole_value(), *T.handle(T.TheHoleConstant()));
+  CHECK_EQ(*T.factory()->true_value(), *T.handle(T.TrueConstant()));
+  CHECK_EQ(*T.factory()->false_value(), *T.handle(T.FalseConstant()));
+  CHECK_EQ(*T.factory()->null_value(), *T.handle(T.NullConstant()));
+}
+
+
+TEST(OddballTypes) {
+  JSConstantCacheTester T;
+
+  CHECK(T.upper(T.UndefinedConstant())->Is(Type::Undefined()));
+  // TODO(dcarney): figure this out.
+  // CHECK(T.upper(T.TheHoleConstant())->Is(Type::Internal()));
+  CHECK(T.upper(T.TrueConstant())->Is(Type::Boolean()));
+  CHECK(T.upper(T.FalseConstant())->Is(Type::Boolean()));
+  CHECK(T.upper(T.NullConstant())->Is(Type::Null()));
+  CHECK(T.upper(T.ZeroConstant())->Is(Type::Number()));
+  CHECK(T.upper(T.OneConstant())->Is(Type::Number()));
+  CHECK(T.upper(T.NaNConstant())->Is(Type::NaN()));
+}
+
+
+TEST(ExternalReferences) {
+  // TODO(titzer): test canonicalization of external references.
+}
diff --git a/test/cctest/compiler/test-js-context-specialization.cc b/test/cctest/compiler/test-js-context-specialization.cc
new file mode 100644
index 0000000..47c660a
--- /dev/null
+++ b/test/cctest/compiler/test-js-context-specialization.cc
@@ -0,0 +1,307 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/js-context-specialization.h"
+#include "src/compiler/js-operator.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/source-position.h"
+#include "src/compiler/typer.h"
+#include "test/cctest/cctest.h"
+#include "test/cctest/compiler/function-tester.h"
+#include "test/cctest/compiler/graph-builder-tester.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+class ContextSpecializationTester : public HandleAndZoneScope,
+                                    public DirectGraphBuilder {
+ public:
+  ContextSpecializationTester()
+      : DirectGraphBuilder(new (main_zone()) Graph(main_zone())),
+        common_(main_zone()),
+        javascript_(main_zone()),
+        machine_(),
+        simplified_(main_zone()),
+        typer_(main_zone()),
+        jsgraph_(graph(), common(), &javascript_, &typer_, &machine_),
+        info_(main_isolate(), main_zone()) {}
+
+  Factory* factory() { return main_isolate()->factory(); }
+  CommonOperatorBuilder* common() { return &common_; }
+  JSOperatorBuilder* javascript() { return &javascript_; }
+  SimplifiedOperatorBuilder* simplified() { return &simplified_; }
+  JSGraph* jsgraph() { return &jsgraph_; }
+  CompilationInfo* info() { return &info_; }
+
+ private:
+  CommonOperatorBuilder common_;
+  JSOperatorBuilder javascript_;
+  MachineOperatorBuilder machine_;
+  SimplifiedOperatorBuilder simplified_;
+  Typer typer_;
+  JSGraph jsgraph_;
+  CompilationInfo info_;
+};
+
+
+TEST(ReduceJSLoadContext) {
+  ContextSpecializationTester t;
+
+  Node* start = t.NewNode(t.common()->Start(0));
+  t.graph()->SetStart(start);
+
+  // Make a context and initialize it a bit for this test.
+  Handle<Context> native = t.factory()->NewNativeContext();
+  Handle<Context> subcontext1 = t.factory()->NewNativeContext();
+  Handle<Context> subcontext2 = t.factory()->NewNativeContext();
+  subcontext2->set_previous(*subcontext1);
+  subcontext1->set_previous(*native);
+  Handle<Object> expected = t.factory()->InternalizeUtf8String("gboy!");
+  const int slot = Context::GLOBAL_OBJECT_INDEX;
+  native->set(slot, *expected);
+
+  Node* const_context = t.jsgraph()->Constant(native);
+  Node* deep_const_context = t.jsgraph()->Constant(subcontext2);
+  Node* param_context = t.NewNode(t.common()->Parameter(0), start);
+  JSContextSpecializer spec(t.info(), t.jsgraph(), const_context);
+
+  {
+    // Mutable slot, constant context, depth = 0 => do nothing.
+    Node* load = t.NewNode(t.javascript()->LoadContext(0, 0, false),
+                           const_context, const_context, start);
+    Reduction r = spec.ReduceJSLoadContext(load);
+    CHECK(!r.Changed());
+  }
+
+  {
+    // Mutable slot, non-constant context, depth = 0 => do nothing.
+    Node* load = t.NewNode(t.javascript()->LoadContext(0, 0, false),
+                           param_context, param_context, start);
+    Reduction r = spec.ReduceJSLoadContext(load);
+    CHECK(!r.Changed());
+  }
+
+  {
+    // Mutable slot, constant context, depth > 0 => fold-in parent context.
+    Node* load = t.NewNode(
+        t.javascript()->LoadContext(2, Context::GLOBAL_EVAL_FUN_INDEX, false),
+        deep_const_context, deep_const_context, start);
+    Reduction r = spec.ReduceJSLoadContext(load);
+    CHECK(r.Changed());
+    Node* new_context_input = NodeProperties::GetValueInput(r.replacement(), 0);
+    CHECK_EQ(IrOpcode::kHeapConstant, new_context_input->opcode());
+    HeapObjectMatcher<Context> match(new_context_input);
+    CHECK_EQ(*native, *match.Value().handle());
+    ContextAccess access = OpParameter<ContextAccess>(r.replacement());
+    CHECK_EQ(Context::GLOBAL_EVAL_FUN_INDEX, access.index());
+    CHECK_EQ(0, access.depth());
+    CHECK_EQ(false, access.immutable());
+  }
+
+  {
+    // Immutable slot, constant context, depth = 0 => specialize.
+    Node* load = t.NewNode(t.javascript()->LoadContext(0, slot, true),
+                           const_context, const_context, start);
+    Reduction r = spec.ReduceJSLoadContext(load);
+    CHECK(r.Changed());
+    CHECK(r.replacement() != load);
+
+    HeapObjectMatcher<Object> match(r.replacement());
+    CHECK(match.HasValue());
+    CHECK_EQ(*expected, *match.Value().handle());
+  }
+
+  // TODO(titzer): test with other kinds of contexts, e.g. a function context.
+  // TODO(sigurds): test that loads below create context are not optimized
+}
+
+
+TEST(ReduceJSStoreContext) {
+  ContextSpecializationTester t;
+
+  Node* start = t.NewNode(t.common()->Start(0));
+  t.graph()->SetStart(start);
+
+  // Make a context and initialize it a bit for this test.
+  Handle<Context> native = t.factory()->NewNativeContext();
+  Handle<Context> subcontext1 = t.factory()->NewNativeContext();
+  Handle<Context> subcontext2 = t.factory()->NewNativeContext();
+  subcontext2->set_previous(*subcontext1);
+  subcontext1->set_previous(*native);
+  Handle<Object> expected = t.factory()->InternalizeUtf8String("gboy!");
+  const int slot = Context::GLOBAL_OBJECT_INDEX;
+  native->set(slot, *expected);
+
+  Node* const_context = t.jsgraph()->Constant(native);
+  Node* deep_const_context = t.jsgraph()->Constant(subcontext2);
+  Node* param_context = t.NewNode(t.common()->Parameter(0), start);
+  JSContextSpecializer spec(t.info(), t.jsgraph(), const_context);
+
+  {
+    // Mutable slot, constant context, depth = 0 => do nothing.
+    Node* load = t.NewNode(t.javascript()->StoreContext(0, 0), const_context,
+                           const_context, start);
+    Reduction r = spec.ReduceJSStoreContext(load);
+    CHECK(!r.Changed());
+  }
+
+  {
+    // Mutable slot, non-constant context, depth = 0 => do nothing.
+    Node* load = t.NewNode(t.javascript()->StoreContext(0, 0), param_context,
+                           param_context, start);
+    Reduction r = spec.ReduceJSStoreContext(load);
+    CHECK(!r.Changed());
+  }
+
+  {
+    // Immutable slot, constant context, depth = 0 => do nothing.
+    Node* load = t.NewNode(t.javascript()->StoreContext(0, slot), const_context,
+                           const_context, start);
+    Reduction r = spec.ReduceJSStoreContext(load);
+    CHECK(!r.Changed());
+  }
+
+  {
+    // Mutable slot, constant context, depth > 0 => fold-in parent context.
+    Node* load = t.NewNode(
+        t.javascript()->StoreContext(2, Context::GLOBAL_EVAL_FUN_INDEX),
+        deep_const_context, deep_const_context, start);
+    Reduction r = spec.ReduceJSStoreContext(load);
+    CHECK(r.Changed());
+    Node* new_context_input = NodeProperties::GetValueInput(r.replacement(), 0);
+    CHECK_EQ(IrOpcode::kHeapConstant, new_context_input->opcode());
+    HeapObjectMatcher<Context> match(new_context_input);
+    CHECK_EQ(*native, *match.Value().handle());
+    ContextAccess access = OpParameter<ContextAccess>(r.replacement());
+    CHECK_EQ(Context::GLOBAL_EVAL_FUN_INDEX, access.index());
+    CHECK_EQ(0, access.depth());
+    CHECK_EQ(false, access.immutable());
+  }
+}
+
+
+// TODO(titzer): factor out common code with effects checking in typed lowering.
+static void CheckEffectInput(Node* effect, Node* use) {
+  CHECK_EQ(effect, NodeProperties::GetEffectInput(use));
+}
+
+
+TEST(SpecializeToContext) {
+  ContextSpecializationTester t;
+
+  Node* start = t.NewNode(t.common()->Start(0));
+  t.graph()->SetStart(start);
+
+  // Make a context and initialize it a bit for this test.
+  Handle<Context> native = t.factory()->NewNativeContext();
+  Handle<Object> expected = t.factory()->InternalizeUtf8String("gboy!");
+  const int slot = Context::GLOBAL_OBJECT_INDEX;
+  native->set(slot, *expected);
+  t.info()->SetContext(native);
+
+  Node* const_context = t.jsgraph()->Constant(native);
+  Node* param_context = t.NewNode(t.common()->Parameter(0), start);
+  JSContextSpecializer spec(t.info(), t.jsgraph(), const_context);
+
+  {
+    // Check that SpecializeToContext() replaces values and forwards effects
+    // correctly, and folds values from constant and non-constant contexts
+    Node* effect_in = start;
+    Node* load = t.NewNode(t.javascript()->LoadContext(0, slot, true),
+                           const_context, const_context, effect_in);
+
+
+    Node* value_use = t.NewNode(t.simplified()->ChangeTaggedToInt32(), load);
+    Node* other_load = t.NewNode(t.javascript()->LoadContext(0, slot, true),
+                                 param_context, param_context, load);
+    Node* effect_use = other_load;
+    Node* other_use =
+        t.NewNode(t.simplified()->ChangeTaggedToInt32(), other_load);
+
+    Node* add = t.NewNode(t.javascript()->Add(), value_use, other_use,
+                          param_context, other_load, start);
+
+    Node* ret = t.NewNode(t.common()->Return(), add, effect_use, start);
+    Node* end = t.NewNode(t.common()->End(), ret);
+    USE(end);
+    t.graph()->SetEnd(end);
+
+    // Double check the above graph is what we expect, or the test is broken.
+    CheckEffectInput(effect_in, load);
+    CheckEffectInput(load, effect_use);
+
+    // Perform the substitution on the entire graph.
+    spec.SpecializeToContext();
+
+    // Effects should have been forwarded (not replaced with a value).
+    CheckEffectInput(effect_in, effect_use);
+
+    // Use of {other_load} should not have been replaced.
+    CHECK_EQ(other_load, other_use->InputAt(0));
+
+    Node* replacement = value_use->InputAt(0);
+    HeapObjectMatcher<Object> match(replacement);
+    CHECK(match.HasValue());
+    CHECK_EQ(*expected, *match.Value().handle());
+  }
+  // TODO(titzer): clean up above test and test more complicated effects.
+}
+
+
+TEST(SpecializeJSFunction_ToConstant1) {
+  FunctionTester T(
+      "(function() { var x = 1; function inc(a)"
+      " { return a + x; } return inc; })()");
+
+  T.CheckCall(1.0, 0.0, 0.0);
+  T.CheckCall(2.0, 1.0, 0.0);
+  T.CheckCall(2.1, 1.1, 0.0);
+}
+
+
+TEST(SpecializeJSFunction_ToConstant2) {
+  FunctionTester T(
+      "(function() { var x = 1.5; var y = 2.25; var z = 3.75;"
+      " function f(a) { return a - x + y - z; } return f; })()");
+
+  T.CheckCall(-3.0, 0.0, 0.0);
+  T.CheckCall(-2.0, 1.0, 0.0);
+  T.CheckCall(-1.9, 1.1, 0.0);
+}
+
+
+TEST(SpecializeJSFunction_ToConstant3) {
+  FunctionTester T(
+      "(function() { var x = -11.5; function inc()"
+      " { return (function(a) { return a + x; }); }"
+      " return inc(); })()");
+
+  T.CheckCall(-11.5, 0.0, 0.0);
+  T.CheckCall(-10.5, 1.0, 0.0);
+  T.CheckCall(-10.4, 1.1, 0.0);
+}
+
+
+TEST(SpecializeJSFunction_ToConstant_uninit) {
+  {
+    FunctionTester T(
+        "(function() { if (false) { var x = 1; } function inc(a)"
+        " { return x; } return inc; })()");  // x is undefined!
+
+    CHECK(T.Call(T.Val(0.0), T.Val(0.0)).ToHandleChecked()->IsUndefined());
+    CHECK(T.Call(T.Val(2.0), T.Val(0.0)).ToHandleChecked()->IsUndefined());
+    CHECK(T.Call(T.Val(-2.1), T.Val(0.0)).ToHandleChecked()->IsUndefined());
+  }
+
+  {
+    FunctionTester T(
+        "(function() { if (false) { var x = 1; } function inc(a)"
+        " { return a + x; } return inc; })()");  // x is undefined!
+
+    CHECK(T.Call(T.Val(0.0), T.Val(0.0)).ToHandleChecked()->IsNaN());
+    CHECK(T.Call(T.Val(2.0), T.Val(0.0)).ToHandleChecked()->IsNaN());
+    CHECK(T.Call(T.Val(-2.1), T.Val(0.0)).ToHandleChecked()->IsNaN());
+  }
+}
diff --git a/test/cctest/compiler/test-js-typed-lowering.cc b/test/cctest/compiler/test-js-typed-lowering.cc
new file mode 100644
index 0000000..cf126c2
--- /dev/null
+++ b/test/cctest/compiler/test-js-typed-lowering.cc
@@ -0,0 +1,1385 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+#include "test/cctest/cctest.h"
+
+#include "src/compiler/graph-inl.h"
+#include "src/compiler/js-typed-lowering.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/opcodes.h"
+#include "src/compiler/typer.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+class JSTypedLoweringTester : public HandleAndZoneScope {
+ public:
+  explicit JSTypedLoweringTester(int num_parameters = 0)
+      : isolate(main_isolate()),
+        binop(NULL),
+        unop(NULL),
+        javascript(main_zone()),
+        simplified(main_zone()),
+        common(main_zone()),
+        graph(main_zone()),
+        typer(main_zone()),
+        context_node(NULL) {
+    typer.DecorateGraph(&graph);
+    Node* s = graph.NewNode(common.Start(num_parameters));
+    graph.SetStart(s);
+  }
+
+  Isolate* isolate;
+  const Operator* binop;
+  const Operator* unop;
+  JSOperatorBuilder javascript;
+  MachineOperatorBuilder machine;
+  SimplifiedOperatorBuilder simplified;
+  CommonOperatorBuilder common;
+  Graph graph;
+  Typer typer;
+  Node* context_node;
+
+  Node* Parameter(Type* t, int32_t index = 0) {
+    Node* n = graph.NewNode(common.Parameter(index), graph.start());
+    NodeProperties::SetBounds(n, Bounds(Type::None(), t));
+    return n;
+  }
+
+  Node* UndefinedConstant() {
+    Unique<Object> unique =
+        Unique<Object>::CreateImmovable(isolate->factory()->undefined_value());
+    return graph.NewNode(common.HeapConstant(unique));
+  }
+
+  Node* HeapConstant(Handle<Object> constant) {
+    Unique<Object> unique = Unique<Object>::CreateUninitialized(constant);
+    return graph.NewNode(common.HeapConstant(unique));
+  }
+
+  Node* EmptyFrameState(Node* context) {
+    Node* parameters = graph.NewNode(common.StateValues(0));
+    Node* locals = graph.NewNode(common.StateValues(0));
+    Node* stack = graph.NewNode(common.StateValues(0));
+
+    Node* state_node =
+        graph.NewNode(common.FrameState(JS_FRAME, BailoutId(0), kIgnoreOutput),
+                      parameters, locals, stack, context, UndefinedConstant());
+
+    return state_node;
+  }
+
+  Node* reduce(Node* node) {
+    JSGraph jsgraph(&graph, &common, &javascript, &typer, &machine);
+    JSTypedLowering reducer(&jsgraph);
+    Reduction reduction = reducer.Reduce(node);
+    if (reduction.Changed()) return reduction.replacement();
+    return node;
+  }
+
+  Node* start() { return graph.start(); }
+
+  Node* context() {
+    if (context_node == NULL) {
+      context_node = graph.NewNode(common.Parameter(-1), graph.start());
+    }
+    return context_node;
+  }
+
+  Node* control() { return start(); }
+
+  void CheckPureBinop(IrOpcode::Value expected, Node* node) {
+    CHECK_EQ(expected, node->opcode());
+    CHECK_EQ(2, node->InputCount());  // should not have context, effect, etc.
+  }
+
+  void CheckPureBinop(const Operator* expected, Node* node) {
+    CHECK_EQ(expected->opcode(), node->op()->opcode());
+    CHECK_EQ(2, node->InputCount());  // should not have context, effect, etc.
+  }
+
+  Node* ReduceUnop(const Operator* op, Type* input_type) {
+    return reduce(Unop(op, Parameter(input_type)));
+  }
+
+  Node* ReduceBinop(const Operator* op, Type* left_type, Type* right_type) {
+    return reduce(Binop(op, Parameter(left_type, 0), Parameter(right_type, 1)));
+  }
+
+  Node* Binop(const Operator* op, Node* left, Node* right) {
+    // JS binops also require context, effect, and control
+    return graph.NewNode(op, left, right, context(), start(), control());
+  }
+
+  Node* Unop(const Operator* op, Node* input) {
+    // JS unops also require context, effect, and control
+    return graph.NewNode(op, input, context(), start(), control());
+  }
+
+  Node* UseForEffect(Node* node) {
+    // TODO(titzer): use EffectPhi after fixing EffectCount
+    return graph.NewNode(javascript.ToNumber(), node, context(), node,
+                         control());
+  }
+
+  void CheckEffectInput(Node* effect, Node* use) {
+    CHECK_EQ(effect, NodeProperties::GetEffectInput(use));
+  }
+
+  void CheckInt32Constant(int32_t expected, Node* result) {
+    CHECK_EQ(IrOpcode::kInt32Constant, result->opcode());
+    CHECK_EQ(expected, OpParameter<int32_t>(result));
+  }
+
+  void CheckNumberConstant(double expected, Node* result) {
+    CHECK_EQ(IrOpcode::kNumberConstant, result->opcode());
+    CHECK_EQ(expected, OpParameter<double>(result));
+  }
+
+  void CheckNaN(Node* result) {
+    CHECK_EQ(IrOpcode::kNumberConstant, result->opcode());
+    double value = OpParameter<double>(result);
+    CHECK(std::isnan(value));
+  }
+
+  void CheckTrue(Node* result) {
+    CheckHandle(isolate->factory()->true_value(), result);
+  }
+
+  void CheckFalse(Node* result) {
+    CheckHandle(isolate->factory()->false_value(), result);
+  }
+
+  void CheckHandle(Handle<Object> expected, Node* result) {
+    CHECK_EQ(IrOpcode::kHeapConstant, result->opcode());
+    Handle<Object> value = OpParameter<Unique<Object> >(result).handle();
+    CHECK_EQ(*expected, *value);
+  }
+};
+
+static Type* kStringTypes[] = {Type::InternalizedString(), Type::OtherString(),
+                               Type::String()};
+
+
+static Type* kInt32Types[] = {
+    Type::UnsignedSmall(),   Type::OtherSignedSmall(), Type::OtherUnsigned31(),
+    Type::OtherUnsigned32(), Type::OtherSigned32(),    Type::SignedSmall(),
+    Type::Signed32(),        Type::Unsigned32(),       Type::Integral32()};
+
+
+static Type* kNumberTypes[] = {
+    Type::UnsignedSmall(),   Type::OtherSignedSmall(), Type::OtherUnsigned31(),
+    Type::OtherUnsigned32(), Type::OtherSigned32(),    Type::SignedSmall(),
+    Type::Signed32(),        Type::Unsigned32(),       Type::Integral32(),
+    Type::MinusZero(),       Type::NaN(),              Type::OtherNumber(),
+    Type::OrderedNumber(),   Type::Number()};
+
+
+static Type* kJSTypes[] = {Type::Undefined(), Type::Null(),   Type::Boolean(),
+                           Type::Number(),    Type::String(), Type::Object()};
+
+
+static Type* I32Type(bool is_signed) {
+  return is_signed ? Type::Signed32() : Type::Unsigned32();
+}
+
+
+static IrOpcode::Value NumberToI32(bool is_signed) {
+  return is_signed ? IrOpcode::kNumberToInt32 : IrOpcode::kNumberToUint32;
+}
+
+
+// TODO(turbofan): Lowering of StringAdd is disabled for now.
+#if 0
+TEST(StringBinops) {
+  JSTypedLoweringTester R;
+
+  for (size_t i = 0; i < arraysize(kStringTypes); ++i) {
+    Node* p0 = R.Parameter(kStringTypes[i], 0);
+
+    for (size_t j = 0; j < arraysize(kStringTypes); ++j) {
+      Node* p1 = R.Parameter(kStringTypes[j], 1);
+
+      Node* add = R.Binop(R.javascript.Add(), p0, p1);
+      Node* r = R.reduce(add);
+
+      R.CheckPureBinop(IrOpcode::kStringAdd, r);
+      CHECK_EQ(p0, r->InputAt(0));
+      CHECK_EQ(p1, r->InputAt(1));
+    }
+  }
+}
+#endif
+
+
+TEST(AddNumber1) {
+  JSTypedLoweringTester R;
+  for (size_t i = 0; i < arraysize(kNumberTypes); ++i) {
+    Node* p0 = R.Parameter(kNumberTypes[i], 0);
+    Node* p1 = R.Parameter(kNumberTypes[i], 1);
+    Node* add = R.Binop(R.javascript.Add(), p0, p1);
+    Node* r = R.reduce(add);
+
+    R.CheckPureBinop(IrOpcode::kNumberAdd, r);
+    CHECK_EQ(p0, r->InputAt(0));
+    CHECK_EQ(p1, r->InputAt(1));
+  }
+}
+
+
+TEST(NumberBinops) {
+  JSTypedLoweringTester R;
+  const Operator* ops[] = {
+      R.javascript.Add(),      R.simplified.NumberAdd(),
+      R.javascript.Subtract(), R.simplified.NumberSubtract(),
+      R.javascript.Multiply(), R.simplified.NumberMultiply(),
+      R.javascript.Divide(),   R.simplified.NumberDivide(),
+      R.javascript.Modulus(),  R.simplified.NumberModulus(),
+  };
+
+  for (size_t i = 0; i < arraysize(kNumberTypes); ++i) {
+    Node* p0 = R.Parameter(kNumberTypes[i], 0);
+
+    for (size_t j = 0; j < arraysize(kNumberTypes); ++j) {
+      Node* p1 = R.Parameter(kNumberTypes[j], 1);
+
+      for (size_t k = 0; k < arraysize(ops); k += 2) {
+        Node* add = R.Binop(ops[k], p0, p1);
+        Node* r = R.reduce(add);
+
+        R.CheckPureBinop(ops[k + 1], r);
+        CHECK_EQ(p0, r->InputAt(0));
+        CHECK_EQ(p1, r->InputAt(1));
+      }
+    }
+  }
+}
+
+
+static void CheckToI32(Node* old_input, Node* new_input, bool is_signed) {
+  Type* old_type = NodeProperties::GetBounds(old_input).upper;
+  Type* expected_type = I32Type(is_signed);
+  if (old_type->Is(expected_type)) {
+    CHECK_EQ(old_input, new_input);
+  } else if (new_input->opcode() == IrOpcode::kNumberConstant) {
+    CHECK(NodeProperties::GetBounds(new_input).upper->Is(expected_type));
+    double v = OpParameter<double>(new_input);
+    double e = static_cast<double>(is_signed ? FastD2I(v) : FastD2UI(v));
+    CHECK_EQ(e, v);
+  } else {
+    CHECK_EQ(NumberToI32(is_signed), new_input->opcode());
+  }
+}
+
+
+// A helper class for testing lowering of bitwise shift operators.
+class JSBitwiseShiftTypedLoweringTester : public JSTypedLoweringTester {
+ public:
+  static const int kNumberOps = 6;
+  const Operator* ops[kNumberOps];
+  bool signedness[kNumberOps];
+
+  JSBitwiseShiftTypedLoweringTester() {
+    int i = 0;
+    set(i++, javascript.ShiftLeft(), true);
+    set(i++, machine.Word32Shl(), false);
+    set(i++, javascript.ShiftRight(), true);
+    set(i++, machine.Word32Sar(), false);
+    set(i++, javascript.ShiftRightLogical(), false);
+    set(i++, machine.Word32Shr(), false);
+  }
+
+ private:
+  void set(int idx, const Operator* op, bool s) {
+    ops[idx] = op;
+    signedness[idx] = s;
+  }
+};
+
+
+TEST(Int32BitwiseShifts) {
+  JSBitwiseShiftTypedLoweringTester R;
+
+  Type* types[] = {
+      Type::SignedSmall(), Type::UnsignedSmall(), Type::OtherSigned32(),
+      Type::Unsigned32(),  Type::Signed32(),      Type::MinusZero(),
+      Type::NaN(),         Type::OtherNumber(),   Type::Undefined(),
+      Type::Null(),        Type::Boolean(),       Type::Number(),
+      Type::String(),      Type::Object()};
+
+  for (size_t i = 0; i < arraysize(types); ++i) {
+    Node* p0 = R.Parameter(types[i], 0);
+
+    for (size_t j = 0; j < arraysize(types); ++j) {
+      Node* p1 = R.Parameter(types[j], 1);
+
+      for (int k = 0; k < R.kNumberOps; k += 2) {
+        Node* add = R.Binop(R.ops[k], p0, p1);
+        Node* r = R.reduce(add);
+
+        R.CheckPureBinop(R.ops[k + 1], r);
+        Node* r0 = r->InputAt(0);
+        Node* r1 = r->InputAt(1);
+
+        CheckToI32(p0, r0, R.signedness[k]);
+
+        R.CheckPureBinop(IrOpcode::kWord32And, r1);
+        CheckToI32(p1, r1->InputAt(0), R.signedness[k + 1]);
+        R.CheckInt32Constant(0x1F, r1->InputAt(1));
+      }
+    }
+  }
+}
+
+
+// A helper class for testing lowering of bitwise operators.
+class JSBitwiseTypedLoweringTester : public JSTypedLoweringTester {
+ public:
+  static const int kNumberOps = 6;
+  const Operator* ops[kNumberOps];
+  bool signedness[kNumberOps];
+
+  JSBitwiseTypedLoweringTester() {
+    int i = 0;
+    set(i++, javascript.BitwiseOr(), true);
+    set(i++, machine.Word32Or(), true);
+    set(i++, javascript.BitwiseXor(), true);
+    set(i++, machine.Word32Xor(), true);
+    set(i++, javascript.BitwiseAnd(), true);
+    set(i++, machine.Word32And(), true);
+  }
+
+ private:
+  void set(int idx, const Operator* op, bool s) {
+    ops[idx] = op;
+    signedness[idx] = s;
+  }
+};
+
+
+TEST(Int32BitwiseBinops) {
+  JSBitwiseTypedLoweringTester R;
+
+  Type* types[] = {
+      Type::SignedSmall(), Type::UnsignedSmall(), Type::OtherSigned32(),
+      Type::Unsigned32(),  Type::Signed32(),      Type::MinusZero(),
+      Type::NaN(),         Type::OtherNumber(),   Type::Undefined(),
+      Type::Null(),        Type::Boolean(),       Type::Number(),
+      Type::String(),      Type::Object()};
+
+  for (size_t i = 0; i < arraysize(types); ++i) {
+    Node* p0 = R.Parameter(types[i], 0);
+
+    for (size_t j = 0; j < arraysize(types); ++j) {
+      Node* p1 = R.Parameter(types[j], 1);
+
+      for (int k = 0; k < R.kNumberOps; k += 2) {
+        Node* add = R.Binop(R.ops[k], p0, p1);
+        Node* r = R.reduce(add);
+
+        R.CheckPureBinop(R.ops[k + 1], r);
+
+        CheckToI32(p0, r->InputAt(0), R.signedness[k]);
+        CheckToI32(p1, r->InputAt(1), R.signedness[k + 1]);
+      }
+    }
+  }
+}
+
+
+TEST(JSToNumber1) {
+  JSTypedLoweringTester R;
+  const Operator* ton = R.javascript.ToNumber();
+
+  for (size_t i = 0; i < arraysize(kNumberTypes); i++) {  // ToNumber(number)
+    Node* r = R.ReduceUnop(ton, kNumberTypes[i]);
+    CHECK_EQ(IrOpcode::kParameter, r->opcode());
+  }
+
+  {  // ToNumber(undefined)
+    Node* r = R.ReduceUnop(ton, Type::Undefined());
+    R.CheckNaN(r);
+  }
+
+  {  // ToNumber(null)
+    Node* r = R.ReduceUnop(ton, Type::Null());
+    R.CheckNumberConstant(0.0, r);
+  }
+}
+
+
+TEST(JSToNumber_replacement) {
+  JSTypedLoweringTester R;
+
+  Type* types[] = {Type::Null(), Type::Undefined(), Type::Number()};
+
+  for (size_t i = 0; i < arraysize(types); i++) {
+    Node* n = R.Parameter(types[i]);
+    Node* c = R.graph.NewNode(R.javascript.ToNumber(), n, R.context(),
+                              R.start(), R.start());
+    Node* effect_use = R.UseForEffect(c);
+    Node* add = R.graph.NewNode(R.simplified.ReferenceEqual(Type::Any()), n, c);
+
+    R.CheckEffectInput(c, effect_use);
+    Node* r = R.reduce(c);
+
+    if (types[i]->Is(Type::Number())) {
+      CHECK_EQ(n, r);
+    } else {
+      CHECK_EQ(IrOpcode::kNumberConstant, r->opcode());
+    }
+
+    CHECK_EQ(n, add->InputAt(0));
+    CHECK_EQ(r, add->InputAt(1));
+    R.CheckEffectInput(R.start(), effect_use);
+  }
+}
+
+
+TEST(JSToNumberOfConstant) {
+  JSTypedLoweringTester R;
+
+  const Operator* ops[] = {
+      R.common.NumberConstant(0), R.common.NumberConstant(-1),
+      R.common.NumberConstant(0.1), R.common.Int32Constant(1177),
+      R.common.Float64Constant(0.99)};
+
+  for (size_t i = 0; i < arraysize(ops); i++) {
+    Node* n = R.graph.NewNode(ops[i]);
+    Node* convert = R.Unop(R.javascript.ToNumber(), n);
+    Node* r = R.reduce(convert);
+    // Note that either outcome below is correct. It only depends on whether
+    // the types of constants are eagerly computed or only computed by the
+    // typing pass.
+    if (NodeProperties::GetBounds(n).upper->Is(Type::Number())) {
+      // If number constants are eagerly typed, then reduction should
+      // remove the ToNumber.
+      CHECK_EQ(n, r);
+    } else {
+      // Otherwise, type-based lowering should only look at the type, and
+      // *not* try to constant fold.
+      CHECK_EQ(convert, r);
+    }
+  }
+}
+
+
+TEST(JSToNumberOfNumberOrOtherPrimitive) {
+  JSTypedLoweringTester R;
+  Type* others[] = {Type::Undefined(), Type::Null(), Type::Boolean(),
+                    Type::String()};
+
+  for (size_t i = 0; i < arraysize(others); i++) {
+    Type* t = Type::Union(Type::Number(), others[i], R.main_zone());
+    Node* r = R.ReduceUnop(R.javascript.ToNumber(), t);
+    CHECK_EQ(IrOpcode::kJSToNumber, r->opcode());
+  }
+}
+
+
+TEST(JSToBoolean) {
+  JSTypedLoweringTester R;
+  const Operator* op = R.javascript.ToBoolean();
+
+  {  // ToBoolean(undefined)
+    Node* r = R.ReduceUnop(op, Type::Undefined());
+    R.CheckFalse(r);
+  }
+
+  {  // ToBoolean(null)
+    Node* r = R.ReduceUnop(op, Type::Null());
+    R.CheckFalse(r);
+  }
+
+  {  // ToBoolean(boolean)
+    Node* r = R.ReduceUnop(op, Type::Boolean());
+    CHECK_EQ(IrOpcode::kParameter, r->opcode());
+  }
+
+  {  // ToBoolean(ordered-number)
+    Node* r = R.ReduceUnop(op, Type::OrderedNumber());
+    CHECK_EQ(IrOpcode::kBooleanNot, r->opcode());
+    Node* i = r->InputAt(0);
+    CHECK_EQ(IrOpcode::kNumberEqual, i->opcode());
+    // ToBoolean(x:ordered-number) => BooleanNot(NumberEqual(x, #0))
+  }
+
+  {  // ToBoolean(string)
+    Node* r = R.ReduceUnop(op, Type::String());
+    // TODO(titzer): test will break with better js-typed-lowering
+    CHECK_EQ(IrOpcode::kJSToBoolean, r->opcode());
+  }
+
+  {  // ToBoolean(object)
+    Node* r = R.ReduceUnop(op, Type::DetectableObject());
+    R.CheckTrue(r);
+  }
+
+  {  // ToBoolean(undetectable)
+    Node* r = R.ReduceUnop(op, Type::Undetectable());
+    R.CheckFalse(r);
+  }
+
+  {  // ToBoolean(object)
+    Node* r = R.ReduceUnop(op, Type::Object());
+    CHECK_EQ(IrOpcode::kJSToBoolean, r->opcode());
+  }
+}
+
+
+TEST(JSToBoolean_replacement) {
+  JSTypedLoweringTester R;
+
+  Type* types[] = {Type::Null(),             Type::Undefined(),
+                   Type::Boolean(),          Type::OrderedNumber(),
+                   Type::DetectableObject(), Type::Undetectable()};
+
+  for (size_t i = 0; i < arraysize(types); i++) {
+    Node* n = R.Parameter(types[i]);
+    Node* c = R.graph.NewNode(R.javascript.ToBoolean(), n, R.context(),
+                              R.start(), R.start());
+    Node* effect_use = R.UseForEffect(c);
+    Node* add = R.graph.NewNode(R.simplified.ReferenceEqual(Type::Any()), n, c);
+
+    R.CheckEffectInput(c, effect_use);
+    Node* r = R.reduce(c);
+
+    if (types[i]->Is(Type::Boolean())) {
+      CHECK_EQ(n, r);
+    } else if (types[i]->Is(Type::OrderedNumber())) {
+      CHECK_EQ(IrOpcode::kBooleanNot, r->opcode());
+    } else {
+      CHECK_EQ(IrOpcode::kHeapConstant, r->opcode());
+    }
+
+    CHECK_EQ(n, add->InputAt(0));
+    CHECK_EQ(r, add->InputAt(1));
+    R.CheckEffectInput(R.start(), effect_use);
+  }
+}
+
+
+TEST(JSToString1) {
+  JSTypedLoweringTester R;
+
+  for (size_t i = 0; i < arraysize(kStringTypes); i++) {
+    Node* r = R.ReduceUnop(R.javascript.ToString(), kStringTypes[i]);
+    CHECK_EQ(IrOpcode::kParameter, r->opcode());
+  }
+
+  const Operator* op = R.javascript.ToString();
+
+  {  // ToString(undefined) => "undefined"
+    Node* r = R.ReduceUnop(op, Type::Undefined());
+    R.CheckHandle(R.isolate->factory()->undefined_string(), r);
+  }
+
+  {  // ToString(null) => "null"
+    Node* r = R.ReduceUnop(op, Type::Null());
+    R.CheckHandle(R.isolate->factory()->null_string(), r);
+  }
+
+  {  // ToString(boolean)
+    Node* r = R.ReduceUnop(op, Type::Boolean());
+    // TODO(titzer): could be a branch
+    CHECK_EQ(IrOpcode::kJSToString, r->opcode());
+  }
+
+  {  // ToString(number)
+    Node* r = R.ReduceUnop(op, Type::Number());
+    // TODO(titzer): could remove effects
+    CHECK_EQ(IrOpcode::kJSToString, r->opcode());
+  }
+
+  {  // ToString(string)
+    Node* r = R.ReduceUnop(op, Type::String());
+    CHECK_EQ(IrOpcode::kParameter, r->opcode());  // No-op
+  }
+
+  {  // ToString(object)
+    Node* r = R.ReduceUnop(op, Type::Object());
+    CHECK_EQ(IrOpcode::kJSToString, r->opcode());  // No reduction.
+  }
+}
+
+
+TEST(JSToString_replacement) {
+  JSTypedLoweringTester R;
+
+  Type* types[] = {Type::Null(), Type::Undefined(), Type::String()};
+
+  for (size_t i = 0; i < arraysize(types); i++) {
+    Node* n = R.Parameter(types[i]);
+    Node* c = R.graph.NewNode(R.javascript.ToString(), n, R.context(),
+                              R.start(), R.start());
+    Node* effect_use = R.UseForEffect(c);
+    Node* add = R.graph.NewNode(R.simplified.ReferenceEqual(Type::Any()), n, c);
+
+    R.CheckEffectInput(c, effect_use);
+    Node* r = R.reduce(c);
+
+    if (types[i]->Is(Type::String())) {
+      CHECK_EQ(n, r);
+    } else {
+      CHECK_EQ(IrOpcode::kHeapConstant, r->opcode());
+    }
+
+    CHECK_EQ(n, add->InputAt(0));
+    CHECK_EQ(r, add->InputAt(1));
+    R.CheckEffectInput(R.start(), effect_use);
+  }
+}
+
+
+TEST(StringComparison) {
+  JSTypedLoweringTester R;
+
+  const Operator* ops[] = {
+      R.javascript.LessThan(),           R.simplified.StringLessThan(),
+      R.javascript.LessThanOrEqual(),    R.simplified.StringLessThanOrEqual(),
+      R.javascript.GreaterThan(),        R.simplified.StringLessThan(),
+      R.javascript.GreaterThanOrEqual(), R.simplified.StringLessThanOrEqual()};
+
+  for (size_t i = 0; i < arraysize(kStringTypes); i++) {
+    Node* p0 = R.Parameter(kStringTypes[i], 0);
+    for (size_t j = 0; j < arraysize(kStringTypes); j++) {
+      Node* p1 = R.Parameter(kStringTypes[j], 1);
+
+      for (size_t k = 0; k < arraysize(ops); k += 2) {
+        Node* cmp = R.Binop(ops[k], p0, p1);
+        Node* r = R.reduce(cmp);
+
+        R.CheckPureBinop(ops[k + 1], r);
+        if (k >= 4) {
+          // GreaterThan and GreaterThanOrEqual commute the inputs
+          // and use the LessThan and LessThanOrEqual operators.
+          CHECK_EQ(p1, r->InputAt(0));
+          CHECK_EQ(p0, r->InputAt(1));
+        } else {
+          CHECK_EQ(p0, r->InputAt(0));
+          CHECK_EQ(p1, r->InputAt(1));
+        }
+      }
+    }
+  }
+}
+
+
+static void CheckIsConvertedToNumber(Node* val, Node* converted) {
+  if (NodeProperties::GetBounds(val).upper->Is(Type::Number())) {
+    CHECK_EQ(val, converted);
+  } else if (NodeProperties::GetBounds(val).upper->Is(Type::Boolean())) {
+    CHECK_EQ(IrOpcode::kBooleanToNumber, converted->opcode());
+    CHECK_EQ(val, converted->InputAt(0));
+  } else {
+    if (converted->opcode() == IrOpcode::kNumberConstant) return;
+    CHECK_EQ(IrOpcode::kJSToNumber, converted->opcode());
+    CHECK_EQ(val, converted->InputAt(0));
+  }
+}
+
+
+TEST(NumberComparison) {
+  JSTypedLoweringTester R;
+
+  const Operator* ops[] = {
+      R.javascript.LessThan(),           R.simplified.NumberLessThan(),
+      R.javascript.LessThanOrEqual(),    R.simplified.NumberLessThanOrEqual(),
+      R.javascript.GreaterThan(),        R.simplified.NumberLessThan(),
+      R.javascript.GreaterThanOrEqual(), R.simplified.NumberLessThanOrEqual()};
+
+  for (size_t i = 0; i < arraysize(kJSTypes); i++) {
+    Type* t0 = kJSTypes[i];
+    // Skip Type::String and Type::Receiver which might coerce into a string.
+    if (t0->Is(Type::String()) || t0->Is(Type::Receiver())) continue;
+    Node* p0 = R.Parameter(t0, 0);
+
+    for (size_t j = 0; j < arraysize(kJSTypes); j++) {
+      Type* t1 = kJSTypes[j];
+      // Skip Type::String and Type::Receiver which might coerce into a string.
+      if (t1->Is(Type::String()) || t0->Is(Type::Receiver())) continue;
+      Node* p1 = R.Parameter(t1, 1);
+
+      for (size_t k = 0; k < arraysize(ops); k += 2) {
+        Node* cmp = R.Binop(ops[k], p0, p1);
+        Node* r = R.reduce(cmp);
+
+        R.CheckPureBinop(ops[k + 1], r);
+        if (k >= 4) {
+          // GreaterThan and GreaterThanOrEqual commute the inputs
+          // and use the LessThan and LessThanOrEqual operators.
+          CheckIsConvertedToNumber(p1, r->InputAt(0));
+          CheckIsConvertedToNumber(p0, r->InputAt(1));
+        } else {
+          CheckIsConvertedToNumber(p0, r->InputAt(0));
+          CheckIsConvertedToNumber(p1, r->InputAt(1));
+        }
+      }
+    }
+  }
+}
+
+
+TEST(MixedComparison1) {
+  JSTypedLoweringTester R;
+
+  Type* types[] = {Type::Number(), Type::String(),
+                   Type::Union(Type::Number(), Type::String(), R.main_zone())};
+
+  for (size_t i = 0; i < arraysize(types); i++) {
+    Node* p0 = R.Parameter(types[i], 0);
+
+    for (size_t j = 0; j < arraysize(types); j++) {
+      Node* p1 = R.Parameter(types[j], 1);
+      {
+        Node* cmp = R.Binop(R.javascript.LessThan(), p0, p1);
+        Node* r = R.reduce(cmp);
+
+        if (!types[i]->Maybe(Type::String()) ||
+            !types[j]->Maybe(Type::String())) {
+          if (types[i]->Is(Type::String()) && types[j]->Is(Type::String())) {
+            R.CheckPureBinop(R.simplified.StringLessThan(), r);
+          } else {
+            R.CheckPureBinop(R.simplified.NumberLessThan(), r);
+          }
+        } else {
+          CHECK_EQ(cmp, r);  // No reduction of mixed types.
+        }
+      }
+    }
+  }
+}
+
+
+TEST(ObjectComparison) {
+  JSTypedLoweringTester R;
+
+  Node* p0 = R.Parameter(Type::Number(), 0);
+  Node* p1 = R.Parameter(Type::Object(), 1);
+
+  Node* cmp = R.Binop(R.javascript.LessThan(), p0, p1);
+  Node* effect_use = R.UseForEffect(cmp);
+
+  R.CheckEffectInput(R.start(), cmp);
+  R.CheckEffectInput(cmp, effect_use);
+
+  Node* r = R.reduce(cmp);
+
+  R.CheckPureBinop(R.simplified.NumberLessThan(), r);
+
+  Node* i0 = r->InputAt(0);
+  Node* i1 = r->InputAt(1);
+
+  CHECK_EQ(p0, i0);
+  CHECK_NE(p1, i1);
+  CHECK_EQ(IrOpcode::kParameter, i0->opcode());
+  CHECK_EQ(IrOpcode::kJSToNumber, i1->opcode());
+
+  // Check effect chain is correct.
+  R.CheckEffectInput(R.start(), i1);
+  R.CheckEffectInput(i1, effect_use);
+}
+
+
+TEST(UnaryNot) {
+  JSTypedLoweringTester R;
+  const Operator* opnot = R.javascript.UnaryNot();
+
+  for (size_t i = 0; i < arraysize(kJSTypes); i++) {
+    Node* orig = R.Unop(opnot, R.Parameter(kJSTypes[i]));
+    Node* use = R.graph.NewNode(R.common.Return(), orig);
+    Node* r = R.reduce(orig);
+    // TODO(titzer): test will break if/when js-typed-lowering constant folds.
+    CHECK_EQ(IrOpcode::kBooleanNot, use->InputAt(0)->opcode());
+
+    if (r == orig && orig->opcode() == IrOpcode::kJSToBoolean) {
+      // The original node was turned into a ToBoolean.
+      CHECK_EQ(IrOpcode::kJSToBoolean, r->opcode());
+    } else {
+      CHECK_EQ(IrOpcode::kBooleanNot, r->opcode());
+    }
+  }
+}
+
+
+TEST(RemoveToNumberEffects) {
+  FLAG_turbo_deoptimization = true;
+
+  JSTypedLoweringTester R;
+
+  Node* effect_use = NULL;
+  for (int i = 0; i < 10; i++) {
+    Node* p0 = R.Parameter(Type::Number());
+    Node* ton = R.Unop(R.javascript.ToNumber(), p0);
+    Node* frame_state = R.EmptyFrameState(R.context());
+    effect_use = NULL;
+
+    switch (i) {
+      case 0:
+        effect_use = R.graph.NewNode(R.javascript.ToNumber(), p0, R.context(),
+                                     ton, R.start());
+        break;
+      case 1:
+        effect_use = R.graph.NewNode(R.javascript.ToNumber(), ton, R.context(),
+                                     ton, R.start());
+        break;
+      case 2:
+        effect_use = R.graph.NewNode(R.common.EffectPhi(1), ton, R.start());
+      case 3:
+        effect_use = R.graph.NewNode(R.javascript.Add(), ton, ton, R.context(),
+                                     frame_state, ton, R.start());
+        break;
+      case 4:
+        effect_use = R.graph.NewNode(R.javascript.Add(), p0, p0, R.context(),
+                                     frame_state, ton, R.start());
+        break;
+      case 5:
+        effect_use = R.graph.NewNode(R.common.Return(), p0, ton, R.start());
+        break;
+      case 6:
+        effect_use = R.graph.NewNode(R.common.Return(), ton, ton, R.start());
+    }
+
+    R.CheckEffectInput(R.start(), ton);
+    if (effect_use != NULL) R.CheckEffectInput(ton, effect_use);
+
+    Node* r = R.reduce(ton);
+    CHECK_EQ(p0, r);
+    CHECK_NE(R.start(), r);
+
+    if (effect_use != NULL) {
+      R.CheckEffectInput(R.start(), effect_use);
+      // Check that value uses of ToNumber() do not go to start().
+      for (int i = 0; i < effect_use->op()->InputCount(); i++) {
+        CHECK_NE(R.start(), effect_use->InputAt(i));
+      }
+    }
+  }
+
+  CHECK_EQ(NULL, effect_use);  // should have done all cases above.
+}
+
+
+// Helper class for testing the reduction of a single binop.
+class BinopEffectsTester {
+ public:
+  explicit BinopEffectsTester(const Operator* op, Type* t0, Type* t1)
+      : R(),
+        p0(R.Parameter(t0, 0)),
+        p1(R.Parameter(t1, 1)),
+        binop(R.Binop(op, p0, p1)),
+        effect_use(R.graph.NewNode(R.common.EffectPhi(1), binop, R.start())) {
+    // Effects should be ordered start -> binop -> effect_use
+    R.CheckEffectInput(R.start(), binop);
+    R.CheckEffectInput(binop, effect_use);
+    result = R.reduce(binop);
+  }
+
+  JSTypedLoweringTester R;
+  Node* p0;
+  Node* p1;
+  Node* binop;
+  Node* effect_use;
+  Node* result;
+
+  void CheckEffectsRemoved() { R.CheckEffectInput(R.start(), effect_use); }
+
+  void CheckEffectOrdering(Node* n0) {
+    R.CheckEffectInput(R.start(), n0);
+    R.CheckEffectInput(n0, effect_use);
+  }
+
+  void CheckEffectOrdering(Node* n0, Node* n1) {
+    R.CheckEffectInput(R.start(), n0);
+    R.CheckEffectInput(n0, n1);
+    R.CheckEffectInput(n1, effect_use);
+  }
+
+  Node* CheckConvertedInput(IrOpcode::Value opcode, int which, bool effects) {
+    return CheckConverted(opcode, result->InputAt(which), effects);
+  }
+
+  Node* CheckConverted(IrOpcode::Value opcode, Node* node, bool effects) {
+    CHECK_EQ(opcode, node->opcode());
+    if (effects) {
+      CHECK_LT(0, OperatorProperties::GetEffectInputCount(node->op()));
+    } else {
+      CHECK_EQ(0, OperatorProperties::GetEffectInputCount(node->op()));
+    }
+    return node;
+  }
+
+  Node* CheckNoOp(int which) {
+    CHECK_EQ(which == 0 ? p0 : p1, result->InputAt(which));
+    return result->InputAt(which);
+  }
+};
+
+
+// Helper function for strict and non-strict equality reductions.
+void CheckEqualityReduction(JSTypedLoweringTester* R, bool strict, Node* l,
+                            Node* r, IrOpcode::Value expected) {
+  for (int j = 0; j < 2; j++) {
+    Node* p0 = j == 0 ? l : r;
+    Node* p1 = j == 1 ? l : r;
+
+    {
+      Node* eq = strict ? R->graph.NewNode(R->javascript.StrictEqual(), p0, p1)
+                        : R->Binop(R->javascript.Equal(), p0, p1);
+      Node* r = R->reduce(eq);
+      R->CheckPureBinop(expected, r);
+    }
+
+    {
+      Node* ne = strict
+                     ? R->graph.NewNode(R->javascript.StrictNotEqual(), p0, p1)
+                     : R->Binop(R->javascript.NotEqual(), p0, p1);
+      Node* n = R->reduce(ne);
+      CHECK_EQ(IrOpcode::kBooleanNot, n->opcode());
+      Node* r = n->InputAt(0);
+      R->CheckPureBinop(expected, r);
+    }
+  }
+}
+
+
+TEST(EqualityForNumbers) {
+  JSTypedLoweringTester R;
+
+  Type* simple_number_types[] = {Type::UnsignedSmall(), Type::SignedSmall(),
+                                 Type::Signed32(), Type::Unsigned32(),
+                                 Type::Number()};
+
+
+  for (size_t i = 0; i < arraysize(simple_number_types); ++i) {
+    Node* p0 = R.Parameter(simple_number_types[i], 0);
+
+    for (size_t j = 0; j < arraysize(simple_number_types); ++j) {
+      Node* p1 = R.Parameter(simple_number_types[j], 1);
+
+      CheckEqualityReduction(&R, true, p0, p1, IrOpcode::kNumberEqual);
+      CheckEqualityReduction(&R, false, p0, p1, IrOpcode::kNumberEqual);
+    }
+  }
+}
+
+
+TEST(StrictEqualityForRefEqualTypes) {
+  JSTypedLoweringTester R;
+
+  Type* types[] = {Type::Undefined(), Type::Null(), Type::Boolean(),
+                   Type::Object(), Type::Receiver()};
+
+  Node* p0 = R.Parameter(Type::Any());
+  for (size_t i = 0; i < arraysize(types); i++) {
+    Node* p1 = R.Parameter(types[i]);
+    CheckEqualityReduction(&R, true, p0, p1, IrOpcode::kReferenceEqual);
+  }
+  // TODO(titzer): Equal(RefEqualTypes)
+}
+
+
+TEST(StringEquality) {
+  JSTypedLoweringTester R;
+  Node* p0 = R.Parameter(Type::String());
+  Node* p1 = R.Parameter(Type::String());
+
+  CheckEqualityReduction(&R, true, p0, p1, IrOpcode::kStringEqual);
+  CheckEqualityReduction(&R, false, p0, p1, IrOpcode::kStringEqual);
+}
+
+
+TEST(RemovePureNumberBinopEffects) {
+  JSTypedLoweringTester R;
+
+  const Operator* ops[] = {
+      R.javascript.Equal(),           R.simplified.NumberEqual(),
+      R.javascript.Add(),             R.simplified.NumberAdd(),
+      R.javascript.Subtract(),        R.simplified.NumberSubtract(),
+      R.javascript.Multiply(),        R.simplified.NumberMultiply(),
+      R.javascript.Divide(),          R.simplified.NumberDivide(),
+      R.javascript.Modulus(),         R.simplified.NumberModulus(),
+      R.javascript.LessThan(),        R.simplified.NumberLessThan(),
+      R.javascript.LessThanOrEqual(), R.simplified.NumberLessThanOrEqual(),
+  };
+
+  for (size_t j = 0; j < arraysize(ops); j += 2) {
+    BinopEffectsTester B(ops[j], Type::Number(), Type::Number());
+    CHECK_EQ(ops[j + 1]->opcode(), B.result->op()->opcode());
+
+    B.R.CheckPureBinop(B.result->opcode(), B.result);
+
+    B.CheckNoOp(0);
+    B.CheckNoOp(1);
+
+    B.CheckEffectsRemoved();
+  }
+}
+
+
+TEST(OrderNumberBinopEffects1) {
+  JSTypedLoweringTester R;
+
+  const Operator* ops[] = {
+      R.javascript.Subtract(), R.simplified.NumberSubtract(),
+      R.javascript.Multiply(), R.simplified.NumberMultiply(),
+      R.javascript.Divide(),   R.simplified.NumberDivide(),
+      R.javascript.Modulus(),  R.simplified.NumberModulus(),
+  };
+
+  for (size_t j = 0; j < arraysize(ops); j += 2) {
+    BinopEffectsTester B(ops[j], Type::Object(), Type::String());
+    CHECK_EQ(ops[j + 1]->opcode(), B.result->op()->opcode());
+
+    Node* i0 = B.CheckConvertedInput(IrOpcode::kJSToNumber, 0, true);
+    Node* i1 = B.CheckConvertedInput(IrOpcode::kJSToNumber, 1, true);
+
+    CHECK_EQ(B.p0, i0->InputAt(0));
+    CHECK_EQ(B.p1, i1->InputAt(0));
+
+    // Effects should be ordered start -> i0 -> i1 -> effect_use
+    B.CheckEffectOrdering(i0, i1);
+  }
+}
+
+
+TEST(OrderNumberBinopEffects2) {
+  JSTypedLoweringTester R;
+
+  const Operator* ops[] = {
+      R.javascript.Add(),      R.simplified.NumberAdd(),
+      R.javascript.Subtract(), R.simplified.NumberSubtract(),
+      R.javascript.Multiply(), R.simplified.NumberMultiply(),
+      R.javascript.Divide(),   R.simplified.NumberDivide(),
+      R.javascript.Modulus(),  R.simplified.NumberModulus(),
+  };
+
+  for (size_t j = 0; j < arraysize(ops); j += 2) {
+    BinopEffectsTester B(ops[j], Type::Number(), Type::Symbol());
+
+    Node* i0 = B.CheckNoOp(0);
+    Node* i1 = B.CheckConvertedInput(IrOpcode::kJSToNumber, 1, true);
+
+    CHECK_EQ(B.p0, i0);
+    CHECK_EQ(B.p1, i1->InputAt(0));
+
+    // Effects should be ordered start -> i1 -> effect_use
+    B.CheckEffectOrdering(i1);
+  }
+
+  for (size_t j = 0; j < arraysize(ops); j += 2) {
+    BinopEffectsTester B(ops[j], Type::Symbol(), Type::Number());
+
+    Node* i0 = B.CheckConvertedInput(IrOpcode::kJSToNumber, 0, true);
+    Node* i1 = B.CheckNoOp(1);
+
+    CHECK_EQ(B.p0, i0->InputAt(0));
+    CHECK_EQ(B.p1, i1);
+
+    // Effects should be ordered start -> i0 -> effect_use
+    B.CheckEffectOrdering(i0);
+  }
+}
+
+
+TEST(OrderCompareEffects) {
+  JSTypedLoweringTester R;
+
+  const Operator* ops[] = {
+      R.javascript.GreaterThan(), R.simplified.NumberLessThan(),
+      R.javascript.GreaterThanOrEqual(), R.simplified.NumberLessThanOrEqual(),
+  };
+
+  for (size_t j = 0; j < arraysize(ops); j += 2) {
+    BinopEffectsTester B(ops[j], Type::Symbol(), Type::String());
+    CHECK_EQ(ops[j + 1]->opcode(), B.result->op()->opcode());
+
+    Node* i0 = B.CheckConvertedInput(IrOpcode::kJSToNumber, 0, true);
+    Node* i1 = B.CheckConvertedInput(IrOpcode::kJSToNumber, 1, true);
+
+    // Inputs should be commuted.
+    CHECK_EQ(B.p1, i0->InputAt(0));
+    CHECK_EQ(B.p0, i1->InputAt(0));
+
+    // But effects should be ordered start -> i1 -> i0 -> effect_use
+    B.CheckEffectOrdering(i1, i0);
+  }
+
+  for (size_t j = 0; j < arraysize(ops); j += 2) {
+    BinopEffectsTester B(ops[j], Type::Number(), Type::Symbol());
+
+    Node* i0 = B.CheckConvertedInput(IrOpcode::kJSToNumber, 0, true);
+    Node* i1 = B.result->InputAt(1);
+
+    CHECK_EQ(B.p1, i0->InputAt(0));  // Should be commuted.
+    CHECK_EQ(B.p0, i1);
+
+    // Effects should be ordered start -> i1 -> effect_use
+    B.CheckEffectOrdering(i0);
+  }
+
+  for (size_t j = 0; j < arraysize(ops); j += 2) {
+    BinopEffectsTester B(ops[j], Type::Symbol(), Type::Number());
+
+    Node* i0 = B.result->InputAt(0);
+    Node* i1 = B.CheckConvertedInput(IrOpcode::kJSToNumber, 1, true);
+
+    CHECK_EQ(B.p1, i0);  // Should be commuted.
+    CHECK_EQ(B.p0, i1->InputAt(0));
+
+    // Effects should be ordered start -> i0 -> effect_use
+    B.CheckEffectOrdering(i1);
+  }
+}
+
+
+TEST(Int32BinopEffects) {
+  JSBitwiseTypedLoweringTester R;
+
+  for (int j = 0; j < R.kNumberOps; j += 2) {
+    bool signed_left = R.signedness[j], signed_right = R.signedness[j + 1];
+    BinopEffectsTester B(R.ops[j], I32Type(signed_left), I32Type(signed_right));
+    CHECK_EQ(R.ops[j + 1]->opcode(), B.result->op()->opcode());
+
+    B.R.CheckPureBinop(B.result->opcode(), B.result);
+
+    B.CheckNoOp(0);
+    B.CheckNoOp(1);
+
+    B.CheckEffectsRemoved();
+  }
+
+  for (int j = 0; j < R.kNumberOps; j += 2) {
+    bool signed_left = R.signedness[j], signed_right = R.signedness[j + 1];
+    BinopEffectsTester B(R.ops[j], Type::Number(), Type::Number());
+    CHECK_EQ(R.ops[j + 1]->opcode(), B.result->op()->opcode());
+
+    B.R.CheckPureBinop(B.result->opcode(), B.result);
+
+    B.CheckConvertedInput(NumberToI32(signed_left), 0, false);
+    B.CheckConvertedInput(NumberToI32(signed_right), 1, false);
+
+    B.CheckEffectsRemoved();
+  }
+
+  for (int j = 0; j < R.kNumberOps; j += 2) {
+    bool signed_left = R.signedness[j], signed_right = R.signedness[j + 1];
+    BinopEffectsTester B(R.ops[j], Type::Number(), Type::Object());
+
+    B.R.CheckPureBinop(B.result->opcode(), B.result);
+
+    Node* i0 = B.CheckConvertedInput(NumberToI32(signed_left), 0, false);
+    Node* i1 = B.CheckConvertedInput(NumberToI32(signed_right), 1, false);
+
+    CHECK_EQ(B.p0, i0->InputAt(0));
+    Node* ii1 = B.CheckConverted(IrOpcode::kJSToNumber, i1->InputAt(0), true);
+
+    CHECK_EQ(B.p1, ii1->InputAt(0));
+
+    B.CheckEffectOrdering(ii1);
+  }
+
+  for (int j = 0; j < R.kNumberOps; j += 2) {
+    bool signed_left = R.signedness[j], signed_right = R.signedness[j + 1];
+    BinopEffectsTester B(R.ops[j], Type::Object(), Type::Number());
+
+    B.R.CheckPureBinop(B.result->opcode(), B.result);
+
+    Node* i0 = B.CheckConvertedInput(NumberToI32(signed_left), 0, false);
+    Node* i1 = B.CheckConvertedInput(NumberToI32(signed_right), 1, false);
+
+    Node* ii0 = B.CheckConverted(IrOpcode::kJSToNumber, i0->InputAt(0), true);
+    CHECK_EQ(B.p1, i1->InputAt(0));
+
+    CHECK_EQ(B.p0, ii0->InputAt(0));
+
+    B.CheckEffectOrdering(ii0);
+  }
+
+  for (int j = 0; j < R.kNumberOps; j += 2) {
+    bool signed_left = R.signedness[j], signed_right = R.signedness[j + 1];
+    BinopEffectsTester B(R.ops[j], Type::Object(), Type::Object());
+
+    B.R.CheckPureBinop(B.result->opcode(), B.result);
+
+    Node* i0 = B.CheckConvertedInput(NumberToI32(signed_left), 0, false);
+    Node* i1 = B.CheckConvertedInput(NumberToI32(signed_right), 1, false);
+
+    Node* ii0 = B.CheckConverted(IrOpcode::kJSToNumber, i0->InputAt(0), true);
+    Node* ii1 = B.CheckConverted(IrOpcode::kJSToNumber, i1->InputAt(0), true);
+
+    CHECK_EQ(B.p0, ii0->InputAt(0));
+    CHECK_EQ(B.p1, ii1->InputAt(0));
+
+    B.CheckEffectOrdering(ii0, ii1);
+  }
+}
+
+
+TEST(UnaryNotEffects) {
+  JSTypedLoweringTester R;
+  const Operator* opnot = R.javascript.UnaryNot();
+
+  for (size_t i = 0; i < arraysize(kJSTypes); i++) {
+    Node* p0 = R.Parameter(kJSTypes[i], 0);
+    Node* orig = R.Unop(opnot, p0);
+    Node* effect_use = R.UseForEffect(orig);
+    Node* value_use = R.graph.NewNode(R.common.Return(), orig);
+    Node* r = R.reduce(orig);
+    // TODO(titzer): test will break if/when js-typed-lowering constant folds.
+    CHECK_EQ(IrOpcode::kBooleanNot, value_use->InputAt(0)->opcode());
+
+    if (r == orig && orig->opcode() == IrOpcode::kJSToBoolean) {
+      // The original node was turned into a ToBoolean, which has an effect.
+      CHECK_EQ(IrOpcode::kJSToBoolean, r->opcode());
+      R.CheckEffectInput(R.start(), orig);
+      R.CheckEffectInput(orig, effect_use);
+    } else {
+      // effect should have been removed from this node.
+      CHECK_EQ(IrOpcode::kBooleanNot, r->opcode());
+      R.CheckEffectInput(R.start(), effect_use);
+    }
+  }
+}
+
+
+TEST(Int32AddNarrowing) {
+  {
+    JSBitwiseTypedLoweringTester R;
+
+    for (int o = 0; o < R.kNumberOps; o += 2) {
+      for (size_t i = 0; i < arraysize(kInt32Types); i++) {
+        Node* n0 = R.Parameter(kInt32Types[i]);
+        for (size_t j = 0; j < arraysize(kInt32Types); j++) {
+          Node* n1 = R.Parameter(kInt32Types[j]);
+          Node* one = R.graph.NewNode(R.common.NumberConstant(1));
+
+          for (int l = 0; l < 2; l++) {
+            Node* add_node = R.Binop(R.simplified.NumberAdd(), n0, n1);
+            Node* or_node =
+                R.Binop(R.ops[o], l ? add_node : one, l ? one : add_node);
+            Node* r = R.reduce(or_node);
+
+            CHECK_EQ(R.ops[o + 1]->opcode(), r->op()->opcode());
+            CHECK_EQ(IrOpcode::kInt32Add, add_node->opcode());
+            bool is_signed = l ? R.signedness[o] : R.signedness[o + 1];
+
+            Type* add_type = NodeProperties::GetBounds(add_node).upper;
+            CHECK(add_type->Is(I32Type(is_signed)));
+          }
+        }
+      }
+    }
+  }
+  {
+    JSBitwiseShiftTypedLoweringTester R;
+
+    for (int o = 0; o < R.kNumberOps; o += 2) {
+      for (size_t i = 0; i < arraysize(kInt32Types); i++) {
+        Node* n0 = R.Parameter(kInt32Types[i]);
+        for (size_t j = 0; j < arraysize(kInt32Types); j++) {
+          Node* n1 = R.Parameter(kInt32Types[j]);
+          Node* one = R.graph.NewNode(R.common.NumberConstant(1));
+
+          for (int l = 0; l < 2; l++) {
+            Node* add_node = R.Binop(R.simplified.NumberAdd(), n0, n1);
+            Node* or_node =
+                R.Binop(R.ops[o], l ? add_node : one, l ? one : add_node);
+            Node* r = R.reduce(or_node);
+
+            CHECK_EQ(R.ops[o + 1]->opcode(), r->op()->opcode());
+            CHECK_EQ(IrOpcode::kInt32Add, add_node->opcode());
+            bool is_signed = l ? R.signedness[o] : R.signedness[o + 1];
+
+            Type* add_type = NodeProperties::GetBounds(add_node).upper;
+            CHECK(add_type->Is(I32Type(is_signed)));
+          }
+        }
+      }
+    }
+  }
+}
+
+
+TEST(Int32AddNarrowingNotOwned) {
+  JSBitwiseTypedLoweringTester R;
+
+  for (int o = 0; o < R.kNumberOps; o += 2) {
+    Node* n0 = R.Parameter(I32Type(R.signedness[o]));
+    Node* n1 = R.Parameter(I32Type(R.signedness[o + 1]));
+    Node* one = R.graph.NewNode(R.common.NumberConstant(1));
+
+    Node* add_node = R.Binop(R.simplified.NumberAdd(), n0, n1);
+    Node* or_node = R.Binop(R.ops[o], add_node, one);
+    Node* other_use = R.Binop(R.simplified.NumberAdd(), add_node, one);
+    Node* r = R.reduce(or_node);
+    CHECK_EQ(R.ops[o + 1]->opcode(), r->op()->opcode());
+    // Should not be reduced to Int32Add because of the other number add.
+    CHECK_EQ(IrOpcode::kNumberAdd, add_node->opcode());
+    // Conversion to int32 should be done.
+    CheckToI32(add_node, r->InputAt(0), R.signedness[o]);
+    CheckToI32(one, r->InputAt(1), R.signedness[o + 1]);
+    // The other use should also not be touched.
+    CHECK_EQ(add_node, other_use->InputAt(0));
+    CHECK_EQ(one, other_use->InputAt(1));
+  }
+}
+
+
+TEST(Int32Comparisons) {
+  JSTypedLoweringTester R;
+
+  struct Entry {
+    const Operator* js_op;
+    const Operator* uint_op;
+    const Operator* int_op;
+    const Operator* num_op;
+    bool commute;
+  };
+
+  Entry ops[] = {
+      {R.javascript.LessThan(), R.machine.Uint32LessThan(),
+       R.machine.Int32LessThan(), R.simplified.NumberLessThan(), false},
+      {R.javascript.LessThanOrEqual(), R.machine.Uint32LessThanOrEqual(),
+       R.machine.Int32LessThanOrEqual(), R.simplified.NumberLessThanOrEqual(),
+       false},
+      {R.javascript.GreaterThan(), R.machine.Uint32LessThan(),
+       R.machine.Int32LessThan(), R.simplified.NumberLessThan(), true},
+      {R.javascript.GreaterThanOrEqual(), R.machine.Uint32LessThanOrEqual(),
+       R.machine.Int32LessThanOrEqual(), R.simplified.NumberLessThanOrEqual(),
+       true}};
+
+  for (size_t o = 0; o < arraysize(ops); o++) {
+    for (size_t i = 0; i < arraysize(kNumberTypes); i++) {
+      Type* t0 = kNumberTypes[i];
+      Node* p0 = R.Parameter(t0, 0);
+
+      for (size_t j = 0; j < arraysize(kNumberTypes); j++) {
+        Type* t1 = kNumberTypes[j];
+        Node* p1 = R.Parameter(t1, 1);
+
+        Node* cmp = R.Binop(ops[o].js_op, p0, p1);
+        Node* r = R.reduce(cmp);
+
+        const Operator* expected;
+        if (t0->Is(Type::Unsigned32()) && t1->Is(Type::Unsigned32())) {
+          expected = ops[o].uint_op;
+        } else if (t0->Is(Type::Signed32()) && t1->Is(Type::Signed32())) {
+          expected = ops[o].int_op;
+        } else {
+          expected = ops[o].num_op;
+        }
+        R.CheckPureBinop(expected, r);
+        if (ops[o].commute) {
+          CHECK_EQ(p1, r->InputAt(0));
+          CHECK_EQ(p0, r->InputAt(1));
+        } else {
+          CHECK_EQ(p0, r->InputAt(0));
+          CHECK_EQ(p1, r->InputAt(1));
+        }
+      }
+    }
+  }
+}
diff --git a/test/cctest/compiler/test-linkage.cc b/test/cctest/compiler/test-linkage.cc
new file mode 100644
index 0000000..ff65d6e
--- /dev/null
+++ b/test/cctest/compiler/test-linkage.cc
@@ -0,0 +1,113 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/compiler.h"
+#include "src/zone.h"
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/operator.h"
+#include "src/compiler/pipeline.h"
+#include "src/compiler/schedule.h"
+#include "test/cctest/cctest.h"
+
+#if V8_TURBOFAN_TARGET
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+static SimpleOperator dummy_operator(IrOpcode::kParameter, Operator::kNoWrite,
+                                     0, 0, "dummy");
+
+// So we can get a real JS function.
+static Handle<JSFunction> Compile(const char* source) {
+  Isolate* isolate = CcTest::i_isolate();
+  Handle<String> source_code = isolate->factory()
+                                   ->NewStringFromUtf8(CStrVector(source))
+                                   .ToHandleChecked();
+  Handle<SharedFunctionInfo> shared_function = Compiler::CompileScript(
+      source_code, Handle<String>(), 0, 0, false,
+      Handle<Context>(isolate->native_context()), NULL, NULL,
+      v8::ScriptCompiler::kNoCompileOptions, NOT_NATIVES_CODE);
+  return isolate->factory()->NewFunctionFromSharedFunctionInfo(
+      shared_function, isolate->native_context());
+}
+
+
+TEST(TestLinkageCreate) {
+  InitializedHandleScope handles;
+  Handle<JSFunction> function = Compile("a + b");
+  CompilationInfoWithZone info(function);
+  Linkage linkage(&info);
+}
+
+
+TEST(TestLinkageJSFunctionIncoming) {
+  InitializedHandleScope handles;
+
+  const char* sources[] = {"(function() { })", "(function(a) { })",
+                           "(function(a,b) { })", "(function(a,b,c) { })"};
+
+  for (int i = 0; i < 3; i++) {
+    i::HandleScope handles(CcTest::i_isolate());
+    Handle<JSFunction> function = v8::Utils::OpenHandle(
+        *v8::Handle<v8::Function>::Cast(CompileRun(sources[i])));
+    CompilationInfoWithZone info(function);
+    Linkage linkage(&info);
+
+    CallDescriptor* descriptor = linkage.GetIncomingDescriptor();
+    CHECK_NE(NULL, descriptor);
+
+    CHECK_EQ(1 + i, descriptor->JSParameterCount());
+    CHECK_EQ(1, descriptor->ReturnCount());
+    CHECK_EQ(Operator::kNoProperties, descriptor->properties());
+    CHECK_EQ(true, descriptor->IsJSFunctionCall());
+  }
+}
+
+
+TEST(TestLinkageCodeStubIncoming) {
+  Isolate* isolate = CcTest::InitIsolateOnce();
+  CompilationInfoWithZone info(static_cast<HydrogenCodeStub*>(NULL), isolate);
+  Linkage linkage(&info);
+  // TODO(titzer): test linkage creation with a bonafide code stub.
+  // this just checks current behavior.
+  CHECK_EQ(NULL, linkage.GetIncomingDescriptor());
+}
+
+
+TEST(TestLinkageJSCall) {
+  HandleAndZoneScope handles;
+  Handle<JSFunction> function = Compile("a + c");
+  CompilationInfoWithZone info(function);
+  Linkage linkage(&info);
+
+  for (int i = 0; i < 32; i++) {
+    CallDescriptor* descriptor = linkage.GetJSCallDescriptor(i);
+    CHECK_NE(NULL, descriptor);
+    CHECK_EQ(i, descriptor->JSParameterCount());
+    CHECK_EQ(1, descriptor->ReturnCount());
+    CHECK_EQ(Operator::kNoProperties, descriptor->properties());
+    CHECK_EQ(true, descriptor->IsJSFunctionCall());
+  }
+}
+
+
+TEST(TestLinkageRuntimeCall) {
+  // TODO(titzer): test linkage creation for outgoing runtime calls.
+}
+
+
+TEST(TestLinkageStubCall) {
+  // TODO(titzer): test linkage creation for outgoing stub calls.
+}
+
+
+#endif  // V8_TURBOFAN_TARGET
diff --git a/test/cctest/compiler/test-machine-operator-reducer.cc b/test/cctest/compiler/test-machine-operator-reducer.cc
new file mode 100644
index 0000000..eca1f3c
--- /dev/null
+++ b/test/cctest/compiler/test-machine-operator-reducer.cc
@@ -0,0 +1,809 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "test/cctest/cctest.h"
+
+#include "src/base/utils/random-number-generator.h"
+#include "src/compiler/graph-inl.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/machine-operator-reducer.h"
+#include "src/compiler/typer.h"
+#include "test/cctest/compiler/value-helper.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+template <typename T>
+const Operator* NewConstantOperator(CommonOperatorBuilder* common,
+                                    volatile T value);
+
+template <>
+const Operator* NewConstantOperator<int32_t>(CommonOperatorBuilder* common,
+                                             volatile int32_t value) {
+  return common->Int32Constant(value);
+}
+
+template <>
+const Operator* NewConstantOperator<double>(CommonOperatorBuilder* common,
+                                            volatile double value) {
+  return common->Float64Constant(value);
+}
+
+
+template <typename T>
+T ValueOfOperator(const Operator* op);
+
+template <>
+int32_t ValueOfOperator<int32_t>(const Operator* op) {
+  CHECK_EQ(IrOpcode::kInt32Constant, op->opcode());
+  return OpParameter<int32_t>(op);
+}
+
+template <>
+double ValueOfOperator<double>(const Operator* op) {
+  CHECK_EQ(IrOpcode::kFloat64Constant, op->opcode());
+  return OpParameter<double>(op);
+}
+
+
+class ReducerTester : public HandleAndZoneScope {
+ public:
+  explicit ReducerTester(int num_parameters = 0)
+      : isolate(main_isolate()),
+        binop(NULL),
+        unop(NULL),
+        common(main_zone()),
+        graph(main_zone()),
+        javascript(main_zone()),
+        typer(main_zone()),
+        jsgraph(&graph, &common, &javascript, &typer, &machine),
+        maxuint32(Constant<int32_t>(kMaxUInt32)) {
+    Node* s = graph.NewNode(common.Start(num_parameters));
+    graph.SetStart(s);
+  }
+
+  Isolate* isolate;
+  const Operator* binop;
+  const Operator* unop;
+  MachineOperatorBuilder machine;
+  CommonOperatorBuilder common;
+  Graph graph;
+  JSOperatorBuilder javascript;
+  Typer typer;
+  JSGraph jsgraph;
+  Node* maxuint32;
+
+  template <typename T>
+  Node* Constant(volatile T value) {
+    return graph.NewNode(NewConstantOperator<T>(&common, value));
+  }
+
+  template <typename T>
+  const T ValueOf(const Operator* op) {
+    return ValueOfOperator<T>(op);
+  }
+
+  // Check that the reduction of this binop applied to constants {a} and {b}
+  // yields the {expect} value.
+  template <typename T>
+  void CheckFoldBinop(volatile T expect, volatile T a, volatile T b) {
+    CheckFoldBinop<T>(expect, Constant<T>(a), Constant<T>(b));
+  }
+
+  // Check that the reduction of this binop applied to {a} and {b} yields
+  // the {expect} value.
+  template <typename T>
+  void CheckFoldBinop(volatile T expect, Node* a, Node* b) {
+    CHECK_NE(NULL, binop);
+    Node* n = graph.NewNode(binop, a, b);
+    MachineOperatorReducer reducer(&jsgraph);
+    Reduction reduction = reducer.Reduce(n);
+    CHECK(reduction.Changed());
+    CHECK_NE(n, reduction.replacement());
+    CHECK_EQ(expect, ValueOf<T>(reduction.replacement()->op()));
+  }
+
+  // Check that the reduction of this binop applied to {a} and {b} yields
+  // the {expect} node.
+  void CheckBinop(Node* expect, Node* a, Node* b) {
+    CHECK_NE(NULL, binop);
+    Node* n = graph.NewNode(binop, a, b);
+    MachineOperatorReducer reducer(&jsgraph);
+    Reduction reduction = reducer.Reduce(n);
+    CHECK(reduction.Changed());
+    CHECK_EQ(expect, reduction.replacement());
+  }
+
+  // Check that the reduction of this binop applied to {left} and {right} yields
+  // this binop applied to {left_expect} and {right_expect}.
+  void CheckFoldBinop(Node* left_expect, Node* right_expect, Node* left,
+                      Node* right) {
+    CHECK_NE(NULL, binop);
+    Node* n = graph.NewNode(binop, left, right);
+    MachineOperatorReducer reducer(&jsgraph);
+    Reduction reduction = reducer.Reduce(n);
+    CHECK(reduction.Changed());
+    CHECK_EQ(binop, reduction.replacement()->op());
+    CHECK_EQ(left_expect, reduction.replacement()->InputAt(0));
+    CHECK_EQ(right_expect, reduction.replacement()->InputAt(1));
+  }
+
+  // Check that the reduction of this binop applied to {left} and {right} yields
+  // the {op_expect} applied to {left_expect} and {right_expect}.
+  template <typename T>
+  void CheckFoldBinop(volatile T left_expect, const Operator* op_expect,
+                      Node* right_expect, Node* left, Node* right) {
+    CHECK_NE(NULL, binop);
+    Node* n = graph.NewNode(binop, left, right);
+    MachineOperatorReducer reducer(&jsgraph);
+    Reduction r = reducer.Reduce(n);
+    CHECK(r.Changed());
+    CHECK_EQ(op_expect->opcode(), r.replacement()->op()->opcode());
+    CHECK_EQ(left_expect, ValueOf<T>(r.replacement()->InputAt(0)->op()));
+    CHECK_EQ(right_expect, r.replacement()->InputAt(1));
+  }
+
+  // Check that the reduction of this binop applied to {left} and {right} yields
+  // the {op_expect} applied to {left_expect} and {right_expect}.
+  template <typename T>
+  void CheckFoldBinop(Node* left_expect, const Operator* op_expect,
+                      volatile T right_expect, Node* left, Node* right) {
+    CHECK_NE(NULL, binop);
+    Node* n = graph.NewNode(binop, left, right);
+    MachineOperatorReducer reducer(&jsgraph);
+    Reduction r = reducer.Reduce(n);
+    CHECK(r.Changed());
+    CHECK_EQ(op_expect->opcode(), r.replacement()->op()->opcode());
+    CHECK_EQ(left_expect, r.replacement()->InputAt(0));
+    CHECK_EQ(right_expect, ValueOf<T>(r.replacement()->InputAt(1)->op()));
+  }
+
+  // Check that if the given constant appears on the left, the reducer will
+  // swap it to be on the right.
+  template <typename T>
+  void CheckPutConstantOnRight(volatile T constant) {
+    // TODO(titzer): CHECK(binop->HasProperty(Operator::kCommutative));
+    Node* p = Parameter();
+    Node* k = Constant<T>(constant);
+    {
+      Node* n = graph.NewNode(binop, k, p);
+      MachineOperatorReducer reducer(&jsgraph);
+      Reduction reduction = reducer.Reduce(n);
+      CHECK(!reduction.Changed() || reduction.replacement() == n);
+      CHECK_EQ(p, n->InputAt(0));
+      CHECK_EQ(k, n->InputAt(1));
+    }
+    {
+      Node* n = graph.NewNode(binop, p, k);
+      MachineOperatorReducer reducer(&jsgraph);
+      Reduction reduction = reducer.Reduce(n);
+      CHECK(!reduction.Changed());
+      CHECK_EQ(p, n->InputAt(0));
+      CHECK_EQ(k, n->InputAt(1));
+    }
+  }
+
+  // Check that if the given constant appears on the left, the reducer will
+  // *NOT* swap it to be on the right.
+  template <typename T>
+  void CheckDontPutConstantOnRight(volatile T constant) {
+    CHECK(!binop->HasProperty(Operator::kCommutative));
+    Node* p = Parameter();
+    Node* k = Constant<T>(constant);
+    Node* n = graph.NewNode(binop, k, p);
+    MachineOperatorReducer reducer(&jsgraph);
+    Reduction reduction = reducer.Reduce(n);
+    CHECK(!reduction.Changed());
+    CHECK_EQ(k, n->InputAt(0));
+    CHECK_EQ(p, n->InputAt(1));
+  }
+
+  Node* Parameter(int32_t index = 0) {
+    return graph.NewNode(common.Parameter(index), graph.start());
+  }
+};
+
+
+TEST(ReduceWord32And) {
+  ReducerTester R;
+  R.binop = R.machine.Word32And();
+
+  FOR_INT32_INPUTS(pl) {
+    FOR_INT32_INPUTS(pr) {
+      int32_t x = *pl, y = *pr;
+      R.CheckFoldBinop<int32_t>(x & y, x, y);
+    }
+  }
+
+  R.CheckPutConstantOnRight(33);
+  R.CheckPutConstantOnRight(44000);
+
+  Node* x = R.Parameter();
+  Node* zero = R.Constant<int32_t>(0);
+  Node* minus_1 = R.Constant<int32_t>(-1);
+
+  R.CheckBinop(zero, x, zero);  // x  & 0  => 0
+  R.CheckBinop(zero, zero, x);  // 0  & x  => 0
+  R.CheckBinop(x, x, minus_1);  // x  & -1 => 0
+  R.CheckBinop(x, minus_1, x);  // -1 & x  => 0
+  R.CheckBinop(x, x, x);        // x  & x  => x
+}
+
+
+TEST(ReduceWord32Or) {
+  ReducerTester R;
+  R.binop = R.machine.Word32Or();
+
+  FOR_INT32_INPUTS(pl) {
+    FOR_INT32_INPUTS(pr) {
+      int32_t x = *pl, y = *pr;
+      R.CheckFoldBinop<int32_t>(x | y, x, y);
+    }
+  }
+
+  R.CheckPutConstantOnRight(36);
+  R.CheckPutConstantOnRight(44001);
+
+  Node* x = R.Parameter();
+  Node* zero = R.Constant<int32_t>(0);
+  Node* minus_1 = R.Constant<int32_t>(-1);
+
+  R.CheckBinop(x, x, zero);           // x  & 0  => x
+  R.CheckBinop(x, zero, x);           // 0  & x  => x
+  R.CheckBinop(minus_1, x, minus_1);  // x  & -1 => -1
+  R.CheckBinop(minus_1, minus_1, x);  // -1 & x  => -1
+  R.CheckBinop(x, x, x);              // x  & x  => x
+}
+
+
+TEST(ReduceWord32Xor) {
+  ReducerTester R;
+  R.binop = R.machine.Word32Xor();
+
+  FOR_INT32_INPUTS(pl) {
+    FOR_INT32_INPUTS(pr) {
+      int32_t x = *pl, y = *pr;
+      R.CheckFoldBinop<int32_t>(x ^ y, x, y);
+    }
+  }
+
+  R.CheckPutConstantOnRight(39);
+  R.CheckPutConstantOnRight(4403);
+
+  Node* x = R.Parameter();
+  Node* zero = R.Constant<int32_t>(0);
+
+  R.CheckBinop(x, x, zero);            // x ^ 0  => x
+  R.CheckBinop(x, zero, x);            // 0 ^ x  => x
+  R.CheckFoldBinop<int32_t>(0, x, x);  // x ^ x  => 0
+}
+
+
+TEST(ReduceWord32Shl) {
+  ReducerTester R;
+  R.binop = R.machine.Word32Shl();
+
+  // TODO(titzer): out of range shifts
+  FOR_INT32_INPUTS(i) {
+    for (int y = 0; y < 32; y++) {
+      int32_t x = *i;
+      R.CheckFoldBinop<int32_t>(x << y, x, y);
+    }
+  }
+
+  R.CheckDontPutConstantOnRight(44);
+
+  Node* x = R.Parameter();
+  Node* zero = R.Constant<int32_t>(0);
+
+  R.CheckBinop(x, x, zero);  // x << 0  => x
+}
+
+
+TEST(ReduceWord32Shr) {
+  ReducerTester R;
+  R.binop = R.machine.Word32Shr();
+
+  // TODO(titzer): test out of range shifts
+  FOR_UINT32_INPUTS(i) {
+    for (uint32_t y = 0; y < 32; y++) {
+      uint32_t x = *i;
+      R.CheckFoldBinop<int32_t>(x >> y, x, y);
+    }
+  }
+
+  R.CheckDontPutConstantOnRight(44);
+
+  Node* x = R.Parameter();
+  Node* zero = R.Constant<int32_t>(0);
+
+  R.CheckBinop(x, x, zero);  // x >>> 0  => x
+}
+
+
+TEST(ReduceWord32Sar) {
+  ReducerTester R;
+  R.binop = R.machine.Word32Sar();
+
+  // TODO(titzer): test out of range shifts
+  FOR_INT32_INPUTS(i) {
+    for (int32_t y = 0; y < 32; y++) {
+      int32_t x = *i;
+      R.CheckFoldBinop<int32_t>(x >> y, x, y);
+    }
+  }
+
+  R.CheckDontPutConstantOnRight(44);
+
+  Node* x = R.Parameter();
+  Node* zero = R.Constant<int32_t>(0);
+
+  R.CheckBinop(x, x, zero);  // x >> 0  => x
+}
+
+
+TEST(ReduceWord32Equal) {
+  ReducerTester R;
+  R.binop = R.machine.Word32Equal();
+
+  FOR_INT32_INPUTS(pl) {
+    FOR_INT32_INPUTS(pr) {
+      int32_t x = *pl, y = *pr;
+      R.CheckFoldBinop<int32_t>(x == y ? 1 : 0, x, y);
+    }
+  }
+
+  R.CheckPutConstantOnRight(48);
+  R.CheckPutConstantOnRight(-48);
+
+  Node* x = R.Parameter(0);
+  Node* y = R.Parameter(1);
+  Node* zero = R.Constant<int32_t>(0);
+  Node* sub = R.graph.NewNode(R.machine.Int32Sub(), x, y);
+
+  R.CheckFoldBinop<int32_t>(1, x, x);  // x == x  => 1
+  R.CheckFoldBinop(x, y, sub, zero);   // x - y == 0  => x == y
+  R.CheckFoldBinop(x, y, zero, sub);   // 0 == x - y  => x == y
+}
+
+
+TEST(ReduceInt32Add) {
+  ReducerTester R;
+  R.binop = R.machine.Int32Add();
+
+  FOR_INT32_INPUTS(pl) {
+    FOR_INT32_INPUTS(pr) {
+      int32_t x = *pl, y = *pr;
+      R.CheckFoldBinop<int32_t>(x + y, x, y);  // TODO(titzer): signed overflow
+    }
+  }
+
+  R.CheckPutConstantOnRight(41);
+  R.CheckPutConstantOnRight(4407);
+
+  Node* x = R.Parameter();
+  Node* zero = R.Constant<int32_t>(0);
+
+  R.CheckBinop(x, x, zero);  // x + 0  => x
+  R.CheckBinop(x, zero, x);  // 0 + x  => x
+}
+
+
+TEST(ReduceInt32Sub) {
+  ReducerTester R;
+  R.binop = R.machine.Int32Sub();
+
+  FOR_INT32_INPUTS(pl) {
+    FOR_INT32_INPUTS(pr) {
+      int32_t x = *pl, y = *pr;
+      R.CheckFoldBinop<int32_t>(x - y, x, y);
+    }
+  }
+
+  R.CheckDontPutConstantOnRight(412);
+
+  Node* x = R.Parameter();
+  Node* zero = R.Constant<int32_t>(0);
+
+  R.CheckBinop(x, x, zero);  // x - 0  => x
+}
+
+
+TEST(ReduceInt32Mul) {
+  ReducerTester R;
+  R.binop = R.machine.Int32Mul();
+
+  FOR_INT32_INPUTS(pl) {
+    FOR_INT32_INPUTS(pr) {
+      int32_t x = *pl, y = *pr;
+      R.CheckFoldBinop<int32_t>(x * y, x, y);  // TODO(titzer): signed overflow
+    }
+  }
+
+  R.CheckPutConstantOnRight(4111);
+  R.CheckPutConstantOnRight(-4407);
+
+  Node* x = R.Parameter();
+  Node* zero = R.Constant<int32_t>(0);
+  Node* one = R.Constant<int32_t>(1);
+  Node* minus_one = R.Constant<int32_t>(-1);
+
+  R.CheckBinop(zero, x, zero);  // x * 0  => 0
+  R.CheckBinop(zero, zero, x);  // 0 * x  => 0
+  R.CheckBinop(x, x, one);      // x * 1  => x
+  R.CheckBinop(x, one, x);      // 1 * x  => x
+  R.CheckFoldBinop<int32_t>(0, R.machine.Int32Sub(), x, minus_one,
+                            x);  // -1 * x  => 0 - x
+  R.CheckFoldBinop<int32_t>(0, R.machine.Int32Sub(), x, x,
+                            minus_one);  // x * -1  => 0 - x
+
+  for (int32_t n = 1; n < 31; ++n) {
+    Node* multiplier = R.Constant<int32_t>(1 << n);
+    R.CheckFoldBinop<int32_t>(x, R.machine.Word32Shl(), n, x,
+                              multiplier);  // x * 2^n => x << n
+    R.CheckFoldBinop<int32_t>(x, R.machine.Word32Shl(), n, multiplier,
+                              x);  // 2^n * x => x << n
+  }
+}
+
+
+TEST(ReduceInt32Div) {
+  ReducerTester R;
+  R.binop = R.machine.Int32Div();
+
+  FOR_INT32_INPUTS(pl) {
+    FOR_INT32_INPUTS(pr) {
+      int32_t x = *pl, y = *pr;
+      if (y == 0) continue;              // TODO(titzer): test / 0
+      int32_t r = y == -1 ? -x : x / y;  // INT_MIN / -1 may explode in C
+      R.CheckFoldBinop<int32_t>(r, x, y);
+    }
+  }
+
+  R.CheckDontPutConstantOnRight(41111);
+  R.CheckDontPutConstantOnRight(-44071);
+
+  Node* x = R.Parameter();
+  Node* one = R.Constant<int32_t>(1);
+  Node* minus_one = R.Constant<int32_t>(-1);
+
+  R.CheckBinop(x, x, one);  // x / 1  => x
+  // TODO(titzer):                          // 0 / x  => 0 if x != 0
+  // TODO(titzer):                          // x / 2^n => x >> n and round
+  R.CheckFoldBinop<int32_t>(0, R.machine.Int32Sub(), x, x,
+                            minus_one);  // x / -1  => 0 - x
+}
+
+
+TEST(ReduceInt32UDiv) {
+  ReducerTester R;
+  R.binop = R.machine.Int32UDiv();
+
+  FOR_UINT32_INPUTS(pl) {
+    FOR_UINT32_INPUTS(pr) {
+      uint32_t x = *pl, y = *pr;
+      if (y == 0) continue;  // TODO(titzer): test / 0
+      R.CheckFoldBinop<int32_t>(x / y, x, y);
+    }
+  }
+
+  R.CheckDontPutConstantOnRight(41311);
+  R.CheckDontPutConstantOnRight(-44371);
+
+  Node* x = R.Parameter();
+  Node* one = R.Constant<int32_t>(1);
+
+  R.CheckBinop(x, x, one);  // x / 1  => x
+  // TODO(titzer):                            // 0 / x  => 0 if x != 0
+
+  for (uint32_t n = 1; n < 32; ++n) {
+    Node* divisor = R.Constant<int32_t>(1u << n);
+    R.CheckFoldBinop<int32_t>(x, R.machine.Word32Shr(), n, x,
+                              divisor);  // x / 2^n => x >> n
+  }
+}
+
+
+TEST(ReduceInt32Mod) {
+  ReducerTester R;
+  R.binop = R.machine.Int32Mod();
+
+  FOR_INT32_INPUTS(pl) {
+    FOR_INT32_INPUTS(pr) {
+      int32_t x = *pl, y = *pr;
+      if (y == 0) continue;             // TODO(titzer): test % 0
+      int32_t r = y == -1 ? 0 : x % y;  // INT_MIN % -1 may explode in C
+      R.CheckFoldBinop<int32_t>(r, x, y);
+    }
+  }
+
+  R.CheckDontPutConstantOnRight(413);
+  R.CheckDontPutConstantOnRight(-4401);
+
+  Node* x = R.Parameter();
+  Node* one = R.Constant<int32_t>(1);
+
+  R.CheckFoldBinop<int32_t>(0, x, one);  // x % 1  => 0
+  // TODO(titzer):                       // x % 2^n => x & 2^n-1 and round
+}
+
+
+TEST(ReduceInt32UMod) {
+  ReducerTester R;
+  R.binop = R.machine.Int32UMod();
+
+  FOR_INT32_INPUTS(pl) {
+    FOR_INT32_INPUTS(pr) {
+      uint32_t x = *pl, y = *pr;
+      if (y == 0) continue;  // TODO(titzer): test x % 0
+      R.CheckFoldBinop<int32_t>(x % y, x, y);
+    }
+  }
+
+  R.CheckDontPutConstantOnRight(417);
+  R.CheckDontPutConstantOnRight(-4371);
+
+  Node* x = R.Parameter();
+  Node* one = R.Constant<int32_t>(1);
+
+  R.CheckFoldBinop<int32_t>(0, x, one);  // x % 1  => 0
+
+  for (uint32_t n = 1; n < 32; ++n) {
+    Node* divisor = R.Constant<int32_t>(1u << n);
+    R.CheckFoldBinop<int32_t>(x, R.machine.Word32And(), (1u << n) - 1, x,
+                              divisor);  // x % 2^n => x & 2^n-1
+  }
+}
+
+
+TEST(ReduceInt32LessThan) {
+  ReducerTester R;
+  R.binop = R.machine.Int32LessThan();
+
+  FOR_INT32_INPUTS(pl) {
+    FOR_INT32_INPUTS(pr) {
+      int32_t x = *pl, y = *pr;
+      R.CheckFoldBinop<int32_t>(x < y ? 1 : 0, x, y);
+    }
+  }
+
+  R.CheckDontPutConstantOnRight(41399);
+  R.CheckDontPutConstantOnRight(-440197);
+
+  Node* x = R.Parameter(0);
+  Node* y = R.Parameter(1);
+  Node* zero = R.Constant<int32_t>(0);
+  Node* sub = R.graph.NewNode(R.machine.Int32Sub(), x, y);
+
+  R.CheckFoldBinop<int32_t>(0, x, x);  // x < x  => 0
+  R.CheckFoldBinop(x, y, sub, zero);   // x - y < 0 => x < y
+  R.CheckFoldBinop(y, x, zero, sub);   // 0 < x - y => y < x
+}
+
+
+TEST(ReduceInt32LessThanOrEqual) {
+  ReducerTester R;
+  R.binop = R.machine.Int32LessThanOrEqual();
+
+  FOR_INT32_INPUTS(pl) {
+    FOR_INT32_INPUTS(pr) {
+      int32_t x = *pl, y = *pr;
+      R.CheckFoldBinop<int32_t>(x <= y ? 1 : 0, x, y);
+    }
+  }
+
+  FOR_INT32_INPUTS(i) { R.CheckDontPutConstantOnRight<int32_t>(*i); }
+
+  Node* x = R.Parameter(0);
+  Node* y = R.Parameter(1);
+  Node* zero = R.Constant<int32_t>(0);
+  Node* sub = R.graph.NewNode(R.machine.Int32Sub(), x, y);
+
+  R.CheckFoldBinop<int32_t>(1, x, x);  // x <= x => 1
+  R.CheckFoldBinop(x, y, sub, zero);   // x - y <= 0 => x <= y
+  R.CheckFoldBinop(y, x, zero, sub);   // 0 <= x - y => y <= x
+}
+
+
+TEST(ReduceUint32LessThan) {
+  ReducerTester R;
+  R.binop = R.machine.Uint32LessThan();
+
+  FOR_UINT32_INPUTS(pl) {
+    FOR_UINT32_INPUTS(pr) {
+      uint32_t x = *pl, y = *pr;
+      R.CheckFoldBinop<int32_t>(x < y ? 1 : 0, x, y);
+    }
+  }
+
+  R.CheckDontPutConstantOnRight(41399);
+  R.CheckDontPutConstantOnRight(-440197);
+
+  Node* x = R.Parameter();
+  Node* max = R.maxuint32;
+  Node* zero = R.Constant<int32_t>(0);
+
+  R.CheckFoldBinop<int32_t>(0, max, x);   // M < x  => 0
+  R.CheckFoldBinop<int32_t>(0, x, zero);  // x < 0  => 0
+  R.CheckFoldBinop<int32_t>(0, x, x);     // x < x  => 0
+}
+
+
+TEST(ReduceUint32LessThanOrEqual) {
+  ReducerTester R;
+  R.binop = R.machine.Uint32LessThanOrEqual();
+
+  FOR_UINT32_INPUTS(pl) {
+    FOR_UINT32_INPUTS(pr) {
+      uint32_t x = *pl, y = *pr;
+      R.CheckFoldBinop<int32_t>(x <= y ? 1 : 0, x, y);
+    }
+  }
+
+  R.CheckDontPutConstantOnRight(41399);
+  R.CheckDontPutConstantOnRight(-440197);
+
+  Node* x = R.Parameter();
+  Node* max = R.maxuint32;
+  Node* zero = R.Constant<int32_t>(0);
+
+  R.CheckFoldBinop<int32_t>(1, x, max);   // x <= M  => 1
+  R.CheckFoldBinop<int32_t>(1, zero, x);  // 0 <= x  => 1
+  R.CheckFoldBinop<int32_t>(1, x, x);     // x <= x  => 1
+}
+
+
+TEST(ReduceLoadStore) {
+  ReducerTester R;
+
+  Node* base = R.Constant<int32_t>(11);
+  Node* index = R.Constant<int32_t>(4);
+  Node* load = R.graph.NewNode(R.machine.Load(kMachInt32), base, index);
+
+  {
+    MachineOperatorReducer reducer(&R.jsgraph);
+    Reduction reduction = reducer.Reduce(load);
+    CHECK(!reduction.Changed());  // loads should not be reduced.
+  }
+
+  {
+    Node* store = R.graph.NewNode(
+        R.machine.Store(StoreRepresentation(kMachInt32, kNoWriteBarrier)), base,
+        index, load);
+    MachineOperatorReducer reducer(&R.jsgraph);
+    Reduction reduction = reducer.Reduce(store);
+    CHECK(!reduction.Changed());  // stores should not be reduced.
+  }
+}
+
+
+static void CheckNans(ReducerTester* R) {
+  Node* x = R->Parameter();
+  std::vector<double> nans = ValueHelper::nan_vector();
+  for (std::vector<double>::const_iterator pl = nans.begin(); pl != nans.end();
+       ++pl) {
+    for (std::vector<double>::const_iterator pr = nans.begin();
+         pr != nans.end(); ++pr) {
+      Node* nan1 = R->Constant<double>(*pl);
+      Node* nan2 = R->Constant<double>(*pr);
+      R->CheckBinop(nan1, x, nan1);     // x % NaN => NaN
+      R->CheckBinop(nan1, nan1, x);     // NaN % x => NaN
+      R->CheckBinop(nan1, nan2, nan1);  // NaN % NaN => NaN
+    }
+  }
+}
+
+
+TEST(ReduceFloat64Add) {
+  ReducerTester R;
+  R.binop = R.machine.Float64Add();
+
+  FOR_FLOAT64_INPUTS(pl) {
+    FOR_FLOAT64_INPUTS(pr) {
+      double x = *pl, y = *pr;
+      R.CheckFoldBinop<double>(x + y, x, y);
+    }
+  }
+
+  FOR_FLOAT64_INPUTS(i) { R.CheckPutConstantOnRight(*i); }
+  // TODO(titzer): CheckNans(&R);
+}
+
+
+TEST(ReduceFloat64Sub) {
+  ReducerTester R;
+  R.binop = R.machine.Float64Sub();
+
+  FOR_FLOAT64_INPUTS(pl) {
+    FOR_FLOAT64_INPUTS(pr) {
+      double x = *pl, y = *pr;
+      R.CheckFoldBinop<double>(x - y, x, y);
+    }
+  }
+  // TODO(titzer): CheckNans(&R);
+}
+
+
+TEST(ReduceFloat64Mul) {
+  ReducerTester R;
+  R.binop = R.machine.Float64Mul();
+
+  FOR_FLOAT64_INPUTS(pl) {
+    FOR_FLOAT64_INPUTS(pr) {
+      double x = *pl, y = *pr;
+      R.CheckFoldBinop<double>(x * y, x, y);
+    }
+  }
+
+  double inf = V8_INFINITY;
+  R.CheckPutConstantOnRight(-inf);
+  R.CheckPutConstantOnRight(-0.1);
+  R.CheckPutConstantOnRight(0.1);
+  R.CheckPutConstantOnRight(inf);
+
+  Node* x = R.Parameter();
+  Node* one = R.Constant<double>(1.0);
+
+  R.CheckBinop(x, x, one);  // x * 1.0 => x
+  R.CheckBinop(x, one, x);  // 1.0 * x => x
+
+  CheckNans(&R);
+}
+
+
+TEST(ReduceFloat64Div) {
+  ReducerTester R;
+  R.binop = R.machine.Float64Div();
+
+  FOR_FLOAT64_INPUTS(pl) {
+    FOR_FLOAT64_INPUTS(pr) {
+      double x = *pl, y = *pr;
+      R.CheckFoldBinop<double>(x / y, x, y);
+    }
+  }
+
+  Node* x = R.Parameter();
+  Node* one = R.Constant<double>(1.0);
+
+  R.CheckBinop(x, x, one);  // x / 1.0 => x
+
+  CheckNans(&R);
+}
+
+
+TEST(ReduceFloat64Mod) {
+  ReducerTester R;
+  R.binop = R.machine.Float64Mod();
+
+  FOR_FLOAT64_INPUTS(pl) {
+    FOR_FLOAT64_INPUTS(pr) {
+      double x = *pl, y = *pr;
+      R.CheckFoldBinop<double>(modulo(x, y), x, y);
+    }
+  }
+
+  CheckNans(&R);
+}
+
+
+// TODO(titzer): test MachineOperatorReducer for Word64And
+// TODO(titzer): test MachineOperatorReducer for Word64Or
+// TODO(titzer): test MachineOperatorReducer for Word64Xor
+// TODO(titzer): test MachineOperatorReducer for Word64Shl
+// TODO(titzer): test MachineOperatorReducer for Word64Shr
+// TODO(titzer): test MachineOperatorReducer for Word64Sar
+// TODO(titzer): test MachineOperatorReducer for Word64Equal
+// TODO(titzer): test MachineOperatorReducer for Word64Not
+// TODO(titzer): test MachineOperatorReducer for Int64Add
+// TODO(titzer): test MachineOperatorReducer for Int64Sub
+// TODO(titzer): test MachineOperatorReducer for Int64Mul
+// TODO(titzer): test MachineOperatorReducer for Int64UMul
+// TODO(titzer): test MachineOperatorReducer for Int64Div
+// TODO(titzer): test MachineOperatorReducer for Int64UDiv
+// TODO(titzer): test MachineOperatorReducer for Int64Mod
+// TODO(titzer): test MachineOperatorReducer for Int64UMod
+// TODO(titzer): test MachineOperatorReducer for Int64Neg
+// TODO(titzer): test MachineOperatorReducer for ChangeInt32ToFloat64
+// TODO(titzer): test MachineOperatorReducer for ChangeFloat64ToInt32
+// TODO(titzer): test MachineOperatorReducer for Float64Compare
diff --git a/test/cctest/compiler/test-node-algorithm.cc b/test/cctest/compiler/test-node-algorithm.cc
new file mode 100644
index 0000000..10f98a6
--- /dev/null
+++ b/test/cctest/compiler/test-node-algorithm.cc
@@ -0,0 +1,330 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <vector>
+
+#include "src/v8.h"
+
+#include "graph-tester.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/generic-node.h"
+#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/graph-inl.h"
+#include "src/compiler/graph-visualizer.h"
+#include "src/compiler/operator.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+static SimpleOperator dummy_operator(IrOpcode::kParameter, Operator::kNoWrite,
+                                     0, 0, "dummy");
+
+class PreNodeVisitor : public NullNodeVisitor {
+ public:
+  GenericGraphVisit::Control Pre(Node* node) {
+    printf("NODE ID: %d\n", node->id());
+    nodes_.push_back(node);
+    return GenericGraphVisit::CONTINUE;
+  }
+  std::vector<Node*> nodes_;
+};
+
+
+class PostNodeVisitor : public NullNodeVisitor {
+ public:
+  GenericGraphVisit::Control Post(Node* node) {
+    printf("NODE ID: %d\n", node->id());
+    nodes_.push_back(node);
+    return GenericGraphVisit::CONTINUE;
+  }
+  std::vector<Node*> nodes_;
+};
+
+
+TEST(TestUseNodeVisitEmpty) {
+  GraphWithStartNodeTester graph;
+
+  PreNodeVisitor node_visitor;
+  graph.VisitNodeUsesFromStart(&node_visitor);
+
+  CHECK_EQ(1, static_cast<int>(node_visitor.nodes_.size()));
+}
+
+
+TEST(TestUseNodePreOrderVisitSimple) {
+  GraphWithStartNodeTester graph;
+  Node* n2 = graph.NewNode(&dummy_operator, graph.start());
+  Node* n3 = graph.NewNode(&dummy_operator, n2);
+  Node* n4 = graph.NewNode(&dummy_operator, n2, n3);
+  Node* n5 = graph.NewNode(&dummy_operator, n4, n2);
+  graph.SetEnd(n5);
+
+  PreNodeVisitor node_visitor;
+  graph.VisitNodeUsesFromStart(&node_visitor);
+
+  CHECK_EQ(5, static_cast<int>(node_visitor.nodes_.size()));
+  CHECK(graph.start()->id() == node_visitor.nodes_[0]->id());
+  CHECK(n2->id() == node_visitor.nodes_[1]->id());
+  CHECK(n3->id() == node_visitor.nodes_[2]->id());
+  CHECK(n4->id() == node_visitor.nodes_[3]->id());
+  CHECK(n5->id() == node_visitor.nodes_[4]->id());
+}
+
+
+TEST(TestInputNodePreOrderVisitSimple) {
+  GraphWithStartNodeTester graph;
+  Node* n2 = graph.NewNode(&dummy_operator, graph.start());
+  Node* n3 = graph.NewNode(&dummy_operator, n2);
+  Node* n4 = graph.NewNode(&dummy_operator, n2, n3);
+  Node* n5 = graph.NewNode(&dummy_operator, n4, n2);
+  graph.SetEnd(n5);
+
+  PreNodeVisitor node_visitor;
+  graph.VisitNodeInputsFromEnd(&node_visitor);
+  CHECK_EQ(5, static_cast<int>(node_visitor.nodes_.size()));
+  CHECK(n5->id() == node_visitor.nodes_[0]->id());
+  CHECK(n4->id() == node_visitor.nodes_[1]->id());
+  CHECK(n2->id() == node_visitor.nodes_[2]->id());
+  CHECK(graph.start()->id() == node_visitor.nodes_[3]->id());
+  CHECK(n3->id() == node_visitor.nodes_[4]->id());
+}
+
+
+TEST(TestUseNodePostOrderVisitSimple) {
+  GraphWithStartNodeTester graph;
+  Node* n2 = graph.NewNode(&dummy_operator, graph.start());
+  Node* n3 = graph.NewNode(&dummy_operator, graph.start());
+  Node* n4 = graph.NewNode(&dummy_operator, n2);
+  Node* n5 = graph.NewNode(&dummy_operator, n2);
+  Node* n6 = graph.NewNode(&dummy_operator, n2);
+  Node* n7 = graph.NewNode(&dummy_operator, n3);
+  Node* end_dependencies[4] = {n4, n5, n6, n7};
+  Node* n8 = graph.NewNode(&dummy_operator, 4, end_dependencies);
+  graph.SetEnd(n8);
+
+  PostNodeVisitor node_visitor;
+  graph.VisitNodeUsesFromStart(&node_visitor);
+
+  CHECK_EQ(8, static_cast<int>(node_visitor.nodes_.size()));
+  CHECK(graph.end()->id() == node_visitor.nodes_[0]->id());
+  CHECK(n4->id() == node_visitor.nodes_[1]->id());
+  CHECK(n5->id() == node_visitor.nodes_[2]->id());
+  CHECK(n6->id() == node_visitor.nodes_[3]->id());
+  CHECK(n2->id() == node_visitor.nodes_[4]->id());
+  CHECK(n7->id() == node_visitor.nodes_[5]->id());
+  CHECK(n3->id() == node_visitor.nodes_[6]->id());
+  CHECK(graph.start()->id() == node_visitor.nodes_[7]->id());
+}
+
+
+TEST(TestUseNodePostOrderVisitLong) {
+  GraphWithStartNodeTester graph;
+  Node* n2 = graph.NewNode(&dummy_operator, graph.start());
+  Node* n3 = graph.NewNode(&dummy_operator, graph.start());
+  Node* n4 = graph.NewNode(&dummy_operator, n2);
+  Node* n5 = graph.NewNode(&dummy_operator, n2);
+  Node* n6 = graph.NewNode(&dummy_operator, n3);
+  Node* n7 = graph.NewNode(&dummy_operator, n3);
+  Node* n8 = graph.NewNode(&dummy_operator, n5);
+  Node* n9 = graph.NewNode(&dummy_operator, n5);
+  Node* n10 = graph.NewNode(&dummy_operator, n9);
+  Node* n11 = graph.NewNode(&dummy_operator, n9);
+  Node* end_dependencies[6] = {n4, n8, n10, n11, n6, n7};
+  Node* n12 = graph.NewNode(&dummy_operator, 6, end_dependencies);
+  graph.SetEnd(n12);
+
+  PostNodeVisitor node_visitor;
+  graph.VisitNodeUsesFromStart(&node_visitor);
+
+  CHECK_EQ(12, static_cast<int>(node_visitor.nodes_.size()));
+  CHECK(graph.end()->id() == node_visitor.nodes_[0]->id());
+  CHECK(n4->id() == node_visitor.nodes_[1]->id());
+  CHECK(n8->id() == node_visitor.nodes_[2]->id());
+  CHECK(n10->id() == node_visitor.nodes_[3]->id());
+  CHECK(n11->id() == node_visitor.nodes_[4]->id());
+  CHECK(n9->id() == node_visitor.nodes_[5]->id());
+  CHECK(n5->id() == node_visitor.nodes_[6]->id());
+  CHECK(n2->id() == node_visitor.nodes_[7]->id());
+  CHECK(n6->id() == node_visitor.nodes_[8]->id());
+  CHECK(n7->id() == node_visitor.nodes_[9]->id());
+  CHECK(n3->id() == node_visitor.nodes_[10]->id());
+  CHECK(graph.start()->id() == node_visitor.nodes_[11]->id());
+}
+
+
+TEST(TestUseNodePreOrderVisitCycle) {
+  GraphWithStartNodeTester graph;
+  Node* n0 = graph.start_node();
+  Node* n1 = graph.NewNode(&dummy_operator, n0);
+  Node* n2 = graph.NewNode(&dummy_operator, n1);
+  n0->AppendInput(graph.main_zone(), n2);
+  graph.SetStart(n0);
+  graph.SetEnd(n2);
+
+  PreNodeVisitor node_visitor;
+  graph.VisitNodeUsesFromStart(&node_visitor);
+
+  CHECK_EQ(3, static_cast<int>(node_visitor.nodes_.size()));
+  CHECK(n0->id() == node_visitor.nodes_[0]->id());
+  CHECK(n1->id() == node_visitor.nodes_[1]->id());
+  CHECK(n2->id() == node_visitor.nodes_[2]->id());
+}
+
+
+struct ReenterNodeVisitor : NullNodeVisitor {
+  GenericGraphVisit::Control Pre(Node* node) {
+    printf("[%d] PRE NODE: %d\n", static_cast<int>(nodes_.size()), node->id());
+    nodes_.push_back(node->id());
+    int size = static_cast<int>(nodes_.size());
+    switch (node->id()) {
+      case 0:
+        return size < 6 ? GenericGraphVisit::REENTER : GenericGraphVisit::SKIP;
+      case 1:
+        return size < 4 ? GenericGraphVisit::DEFER
+                        : GenericGraphVisit::CONTINUE;
+      default:
+        return GenericGraphVisit::REENTER;
+    }
+  }
+
+  GenericGraphVisit::Control Post(Node* node) {
+    printf("[%d] POST NODE: %d\n", static_cast<int>(nodes_.size()), node->id());
+    nodes_.push_back(-node->id());
+    return node->id() == 4 ? GenericGraphVisit::REENTER
+                           : GenericGraphVisit::CONTINUE;
+  }
+
+  void PreEdge(Node* from, int index, Node* to) {
+    printf("[%d] PRE EDGE: %d-%d\n", static_cast<int>(edges_.size()),
+           from->id(), to->id());
+    edges_.push_back(std::make_pair(from->id(), to->id()));
+  }
+
+  void PostEdge(Node* from, int index, Node* to) {
+    printf("[%d] POST EDGE: %d-%d\n", static_cast<int>(edges_.size()),
+           from->id(), to->id());
+    edges_.push_back(std::make_pair(-from->id(), -to->id()));
+  }
+
+  std::vector<int> nodes_;
+  std::vector<std::pair<int, int> > edges_;
+};
+
+
+TEST(TestUseNodeReenterVisit) {
+  GraphWithStartNodeTester graph;
+  Node* n0 = graph.start_node();
+  Node* n1 = graph.NewNode(&dummy_operator, n0);
+  Node* n2 = graph.NewNode(&dummy_operator, n0);
+  Node* n3 = graph.NewNode(&dummy_operator, n2);
+  Node* n4 = graph.NewNode(&dummy_operator, n0);
+  Node* n5 = graph.NewNode(&dummy_operator, n4);
+  n0->AppendInput(graph.main_zone(), n3);
+  graph.SetStart(n0);
+  graph.SetEnd(n5);
+
+  ReenterNodeVisitor visitor;
+  graph.VisitNodeUsesFromStart(&visitor);
+
+  CHECK_EQ(22, static_cast<int>(visitor.nodes_.size()));
+  CHECK_EQ(24, static_cast<int>(visitor.edges_.size()));
+
+  CHECK(n0->id() == visitor.nodes_[0]);
+  CHECK(n0->id() == visitor.edges_[0].first);
+  CHECK(n1->id() == visitor.edges_[0].second);
+  CHECK(n1->id() == visitor.nodes_[1]);
+  // N1 is deferred.
+  CHECK(-n1->id() == visitor.edges_[1].second);
+  CHECK(-n0->id() == visitor.edges_[1].first);
+  CHECK(n0->id() == visitor.edges_[2].first);
+  CHECK(n2->id() == visitor.edges_[2].second);
+  CHECK(n2->id() == visitor.nodes_[2]);
+  CHECK(n2->id() == visitor.edges_[3].first);
+  CHECK(n3->id() == visitor.edges_[3].second);
+  CHECK(n3->id() == visitor.nodes_[3]);
+  // Circle back to N0, which we may reenter for now.
+  CHECK(n3->id() == visitor.edges_[4].first);
+  CHECK(n0->id() == visitor.edges_[4].second);
+  CHECK(n0->id() == visitor.nodes_[4]);
+  CHECK(n0->id() == visitor.edges_[5].first);
+  CHECK(n1->id() == visitor.edges_[5].second);
+  CHECK(n1->id() == visitor.nodes_[5]);
+  // This time N1 is no longer deferred.
+  CHECK(-n1->id() == visitor.nodes_[6]);
+  CHECK(-n1->id() == visitor.edges_[6].second);
+  CHECK(-n0->id() == visitor.edges_[6].first);
+  CHECK(n0->id() == visitor.edges_[7].first);
+  CHECK(n2->id() == visitor.edges_[7].second);
+  CHECK(n2->id() == visitor.nodes_[7]);
+  CHECK(n2->id() == visitor.edges_[8].first);
+  CHECK(n3->id() == visitor.edges_[8].second);
+  CHECK(n3->id() == visitor.nodes_[8]);
+  CHECK(n3->id() == visitor.edges_[9].first);
+  CHECK(n0->id() == visitor.edges_[9].second);
+  CHECK(n0->id() == visitor.nodes_[9]);
+  // This time we break at N0 and skip it.
+  CHECK(-n0->id() == visitor.edges_[10].second);
+  CHECK(-n3->id() == visitor.edges_[10].first);
+  CHECK(-n3->id() == visitor.nodes_[10]);
+  CHECK(-n3->id() == visitor.edges_[11].second);
+  CHECK(-n2->id() == visitor.edges_[11].first);
+  CHECK(-n2->id() == visitor.nodes_[11]);
+  CHECK(-n2->id() == visitor.edges_[12].second);
+  CHECK(-n0->id() == visitor.edges_[12].first);
+  CHECK(n0->id() == visitor.edges_[13].first);
+  CHECK(n4->id() == visitor.edges_[13].second);
+  CHECK(n4->id() == visitor.nodes_[12]);
+  CHECK(n4->id() == visitor.edges_[14].first);
+  CHECK(n5->id() == visitor.edges_[14].second);
+  CHECK(n5->id() == visitor.nodes_[13]);
+  CHECK(-n5->id() == visitor.nodes_[14]);
+  CHECK(-n5->id() == visitor.edges_[15].second);
+  CHECK(-n4->id() == visitor.edges_[15].first);
+  CHECK(-n4->id() == visitor.nodes_[15]);
+  CHECK(-n4->id() == visitor.edges_[16].second);
+  CHECK(-n0->id() == visitor.edges_[16].first);
+  CHECK(-n0->id() == visitor.nodes_[16]);
+  CHECK(-n0->id() == visitor.edges_[17].second);
+  CHECK(-n3->id() == visitor.edges_[17].first);
+  CHECK(-n3->id() == visitor.nodes_[17]);
+  CHECK(-n3->id() == visitor.edges_[18].second);
+  CHECK(-n2->id() == visitor.edges_[18].first);
+  CHECK(-n2->id() == visitor.nodes_[18]);
+  CHECK(-n2->id() == visitor.edges_[19].second);
+  CHECK(-n0->id() == visitor.edges_[19].first);
+  // N4 may be reentered.
+  CHECK(n0->id() == visitor.edges_[20].first);
+  CHECK(n4->id() == visitor.edges_[20].second);
+  CHECK(n4->id() == visitor.nodes_[19]);
+  CHECK(n4->id() == visitor.edges_[21].first);
+  CHECK(n5->id() == visitor.edges_[21].second);
+  CHECK(-n5->id() == visitor.edges_[22].second);
+  CHECK(-n4->id() == visitor.edges_[22].first);
+  CHECK(-n4->id() == visitor.nodes_[20]);
+  CHECK(-n4->id() == visitor.edges_[23].second);
+  CHECK(-n0->id() == visitor.edges_[23].first);
+  CHECK(-n0->id() == visitor.nodes_[21]);
+}
+
+
+TEST(TestPrintNodeGraphToNodeGraphviz) {
+  GraphWithStartNodeTester graph;
+  Node* n2 = graph.NewNode(&dummy_operator, graph.start());
+  Node* n3 = graph.NewNode(&dummy_operator, graph.start());
+  Node* n4 = graph.NewNode(&dummy_operator, n2);
+  Node* n5 = graph.NewNode(&dummy_operator, n2);
+  Node* n6 = graph.NewNode(&dummy_operator, n3);
+  Node* n7 = graph.NewNode(&dummy_operator, n3);
+  Node* n8 = graph.NewNode(&dummy_operator, n5);
+  Node* n9 = graph.NewNode(&dummy_operator, n5);
+  Node* n10 = graph.NewNode(&dummy_operator, n9);
+  Node* n11 = graph.NewNode(&dummy_operator, n9);
+  Node* end_dependencies[6] = {n4, n8, n10, n11, n6, n7};
+  Node* n12 = graph.NewNode(&dummy_operator, 6, end_dependencies);
+  graph.SetEnd(n12);
+
+  OFStream os(stdout);
+  os << AsDOT(graph);
+}
diff --git a/test/cctest/compiler/test-node-cache.cc b/test/cctest/compiler/test-node-cache.cc
new file mode 100644
index 0000000..3569386
--- /dev/null
+++ b/test/cctest/compiler/test-node-cache.cc
@@ -0,0 +1,160 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "graph-tester.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/node-cache.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+TEST(Int32Constant_back_to_back) {
+  GraphTester graph;
+  Int32NodeCache cache;
+
+  for (int i = -2000000000; i < 2000000000; i += 3315177) {
+    Node** pos = cache.Find(graph.zone(), i);
+    CHECK_NE(NULL, pos);
+    for (int j = 0; j < 3; j++) {
+      Node** npos = cache.Find(graph.zone(), i);
+      CHECK_EQ(pos, npos);
+    }
+  }
+}
+
+
+TEST(Int32Constant_five) {
+  GraphTester graph;
+  Int32NodeCache cache;
+  CommonOperatorBuilder common(graph.zone());
+
+  int32_t constants[] = {static_cast<int32_t>(0x80000000), -77, 0, 1, -1};
+
+  Node* nodes[arraysize(constants)];
+
+  for (size_t i = 0; i < arraysize(constants); i++) {
+    int32_t k = constants[i];
+    Node* node = graph.NewNode(common.Int32Constant(k));
+    *cache.Find(graph.zone(), k) = nodes[i] = node;
+  }
+
+  for (size_t i = 0; i < arraysize(constants); i++) {
+    int32_t k = constants[i];
+    CHECK_EQ(nodes[i], *cache.Find(graph.zone(), k));
+  }
+}
+
+
+TEST(Int32Constant_hits) {
+  GraphTester graph;
+  Int32NodeCache cache;
+  const int32_t kSize = 1500;
+  Node** nodes = graph.zone()->NewArray<Node*>(kSize);
+  CommonOperatorBuilder common(graph.zone());
+
+  for (int i = 0; i < kSize; i++) {
+    int32_t v = i * -55;
+    nodes[i] = graph.NewNode(common.Int32Constant(v));
+    *cache.Find(graph.zone(), v) = nodes[i];
+  }
+
+  int hits = 0;
+  for (int i = 0; i < kSize; i++) {
+    int32_t v = i * -55;
+    Node** pos = cache.Find(graph.zone(), v);
+    if (*pos != NULL) {
+      CHECK_EQ(nodes[i], *pos);
+      hits++;
+    }
+  }
+  CHECK_LT(4, hits);
+}
+
+
+TEST(Int64Constant_back_to_back) {
+  GraphTester graph;
+  Int64NodeCache cache;
+
+  for (int64_t i = -2000000000; i < 2000000000; i += 3315177) {
+    Node** pos = cache.Find(graph.zone(), i);
+    CHECK_NE(NULL, pos);
+    for (int j = 0; j < 3; j++) {
+      Node** npos = cache.Find(graph.zone(), i);
+      CHECK_EQ(pos, npos);
+    }
+  }
+}
+
+
+TEST(Int64Constant_hits) {
+  GraphTester graph;
+  Int64NodeCache cache;
+  const int32_t kSize = 1500;
+  Node** nodes = graph.zone()->NewArray<Node*>(kSize);
+  CommonOperatorBuilder common(graph.zone());
+
+  for (int i = 0; i < kSize; i++) {
+    int64_t v = static_cast<int64_t>(i) * static_cast<int64_t>(5003001);
+    nodes[i] = graph.NewNode(common.Int32Constant(i));
+    *cache.Find(graph.zone(), v) = nodes[i];
+  }
+
+  int hits = 0;
+  for (int i = 0; i < kSize; i++) {
+    int64_t v = static_cast<int64_t>(i) * static_cast<int64_t>(5003001);
+    Node** pos = cache.Find(graph.zone(), v);
+    if (*pos != NULL) {
+      CHECK_EQ(nodes[i], *pos);
+      hits++;
+    }
+  }
+  CHECK_LT(4, hits);
+}
+
+
+TEST(PtrConstant_back_to_back) {
+  GraphTester graph;
+  PtrNodeCache cache;
+  int32_t buffer[50];
+
+  for (int32_t* p = buffer;
+       (p - buffer) < static_cast<ptrdiff_t>(arraysize(buffer)); p++) {
+    Node** pos = cache.Find(graph.zone(), p);
+    CHECK_NE(NULL, pos);
+    for (int j = 0; j < 3; j++) {
+      Node** npos = cache.Find(graph.zone(), p);
+      CHECK_EQ(pos, npos);
+    }
+  }
+}
+
+
+TEST(PtrConstant_hits) {
+  GraphTester graph;
+  PtrNodeCache cache;
+  const int32_t kSize = 50;
+  int32_t buffer[kSize];
+  Node* nodes[kSize];
+  CommonOperatorBuilder common(graph.zone());
+
+  for (size_t i = 0; i < arraysize(buffer); i++) {
+    int k = static_cast<int>(i);
+    int32_t* p = &buffer[i];
+    nodes[i] = graph.NewNode(common.Int32Constant(k));
+    *cache.Find(graph.zone(), p) = nodes[i];
+  }
+
+  int hits = 0;
+  for (size_t i = 0; i < arraysize(buffer); i++) {
+    int32_t* p = &buffer[i];
+    Node** pos = cache.Find(graph.zone(), p);
+    if (*pos != NULL) {
+      CHECK_EQ(nodes[i], *pos);
+      hits++;
+    }
+  }
+  CHECK_LT(4, hits);
+}
diff --git a/test/cctest/compiler/test-node.cc b/test/cctest/compiler/test-node.cc
new file mode 100644
index 0000000..28d807e
--- /dev/null
+++ b/test/cctest/compiler/test-node.cc
@@ -0,0 +1,841 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <functional>
+
+#include "src/v8.h"
+
+#include "graph-tester.h"
+#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/node.h"
+#include "src/compiler/operator.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+static SimpleOperator dummy_operator(IrOpcode::kParameter, Operator::kNoWrite,
+                                     0, 0, "dummy");
+
+TEST(NodeAllocation) {
+  GraphTester graph;
+  Node* n1 = graph.NewNode(&dummy_operator);
+  Node* n2 = graph.NewNode(&dummy_operator);
+  CHECK(n2->id() != n1->id());
+}
+
+
+TEST(NodeWithOpcode) {
+  GraphTester graph;
+  Node* n1 = graph.NewNode(&dummy_operator);
+  Node* n2 = graph.NewNode(&dummy_operator);
+  CHECK(n1->op() == &dummy_operator);
+  CHECK(n2->op() == &dummy_operator);
+}
+
+
+TEST(NodeInputs1) {
+  GraphTester graph;
+  Node* n0 = graph.NewNode(&dummy_operator);
+  Node* n2 = graph.NewNode(&dummy_operator, n0);
+  CHECK_EQ(1, n2->InputCount());
+  CHECK(n0 == n2->InputAt(0));
+}
+
+
+TEST(NodeInputs2) {
+  GraphTester graph;
+  Node* n0 = graph.NewNode(&dummy_operator);
+  Node* n1 = graph.NewNode(&dummy_operator);
+  Node* n2 = graph.NewNode(&dummy_operator, n0, n1);
+  CHECK_EQ(2, n2->InputCount());
+  CHECK(n0 == n2->InputAt(0));
+  CHECK(n1 == n2->InputAt(1));
+}
+
+
+TEST(NodeInputs3) {
+  GraphTester graph;
+  Node* n0 = graph.NewNode(&dummy_operator);
+  Node* n1 = graph.NewNode(&dummy_operator);
+  Node* n2 = graph.NewNode(&dummy_operator, n0, n1, n1);
+  CHECK_EQ(3, n2->InputCount());
+  CHECK(n0 == n2->InputAt(0));
+  CHECK(n1 == n2->InputAt(1));
+  CHECK(n1 == n2->InputAt(2));
+}
+
+
+TEST(NodeInputIteratorEmpty) {
+  GraphTester graph;
+  Node* n1 = graph.NewNode(&dummy_operator);
+  Node::Inputs::iterator i(n1->inputs().begin());
+  int input_count = 0;
+  for (; i != n1->inputs().end(); ++i) {
+    input_count++;
+  }
+  CHECK_EQ(0, input_count);
+}
+
+
+TEST(NodeInputIteratorOne) {
+  GraphTester graph;
+  Node* n0 = graph.NewNode(&dummy_operator);
+  Node* n1 = graph.NewNode(&dummy_operator, n0);
+  Node::Inputs::iterator i(n1->inputs().begin());
+  CHECK_EQ(1, n1->InputCount());
+  CHECK_EQ(n0, *i);
+  ++i;
+  CHECK(n1->inputs().end() == i);
+}
+
+
+TEST(NodeUseIteratorEmpty) {
+  GraphTester graph;
+  Node* n1 = graph.NewNode(&dummy_operator);
+  Node::Uses::iterator i(n1->uses().begin());
+  int use_count = 0;
+  for (; i != n1->uses().end(); ++i) {
+    Node::Edge edge(i.edge());
+    USE(edge);
+    use_count++;
+  }
+  CHECK_EQ(0, use_count);
+}
+
+
+TEST(NodeUseIteratorOne) {
+  GraphTester graph;
+  Node* n0 = graph.NewNode(&dummy_operator);
+  Node* n1 = graph.NewNode(&dummy_operator, n0);
+  Node::Uses::iterator i(n0->uses().begin());
+  CHECK_EQ(n1, *i);
+  ++i;
+  CHECK(n0->uses().end() == i);
+}
+
+
+TEST(NodeUseIteratorReplaceNoUses) {
+  GraphTester graph;
+  Node* n0 = graph.NewNode(&dummy_operator);
+  Node* n1 = graph.NewNode(&dummy_operator);
+  Node* n2 = graph.NewNode(&dummy_operator);
+  Node* n3 = graph.NewNode(&dummy_operator, n2);
+  n0->ReplaceUses(n1);
+  CHECK(n0->uses().begin() == n0->uses().end());
+  n0->ReplaceUses(n2);
+  CHECK(n0->uses().begin() == n0->uses().end());
+  USE(n3);
+}
+
+
+TEST(NodeUseIteratorReplaceUses) {
+  GraphTester graph;
+  Node* n0 = graph.NewNode(&dummy_operator);
+  Node* n1 = graph.NewNode(&dummy_operator, n0);
+  Node* n2 = graph.NewNode(&dummy_operator, n0);
+  Node* n3 = graph.NewNode(&dummy_operator);
+  Node::Uses::iterator i1(n0->uses().begin());
+  CHECK_EQ(n1, *i1);
+  ++i1;
+  CHECK_EQ(n2, *i1);
+  n0->ReplaceUses(n3);
+  Node::Uses::iterator i2(n3->uses().begin());
+  CHECK_EQ(n1, *i2);
+  ++i2;
+  CHECK_EQ(n2, *i2);
+  Node::Inputs::iterator i3(n1->inputs().begin());
+  CHECK_EQ(n3, *i3);
+  ++i3;
+  CHECK(n1->inputs().end() == i3);
+  Node::Inputs::iterator i4(n2->inputs().begin());
+  CHECK_EQ(n3, *i4);
+  ++i4;
+  CHECK(n2->inputs().end() == i4);
+}
+
+
+TEST(NodeUseIteratorReplaceUsesSelf) {
+  GraphTester graph;
+  Node* n0 = graph.NewNode(&dummy_operator);
+  Node* n1 = graph.NewNode(&dummy_operator, n0);
+  Node* n3 = graph.NewNode(&dummy_operator);
+
+  n1->ReplaceInput(0, n1);  // Create self-reference.
+
+  Node::Uses::iterator i1(n1->uses().begin());
+  CHECK_EQ(n1, *i1);
+
+  n1->ReplaceUses(n3);
+
+  CHECK(n1->uses().begin() == n1->uses().end());
+
+  Node::Uses::iterator i2(n3->uses().begin());
+  CHECK_EQ(n1, *i2);
+  ++i2;
+  CHECK(n1->uses().end() == i2);
+}
+
+
+TEST(ReplaceInput) {
+  GraphTester graph;
+  Node* n0 = graph.NewNode(&dummy_operator);
+  Node* n1 = graph.NewNode(&dummy_operator);
+  Node* n2 = graph.NewNode(&dummy_operator);
+  Node* n3 = graph.NewNode(&dummy_operator, n0, n1, n2);
+  Node::Inputs::iterator i1(n3->inputs().begin());
+  CHECK(n0 == *i1);
+  CHECK_EQ(n0, n3->InputAt(0));
+  ++i1;
+  CHECK_EQ(n1, *i1);
+  CHECK_EQ(n1, n3->InputAt(1));
+  ++i1;
+  CHECK_EQ(n2, *i1);
+  CHECK_EQ(n2, n3->InputAt(2));
+  ++i1;
+  CHECK(i1 == n3->inputs().end());
+
+  Node::Uses::iterator i2(n1->uses().begin());
+  CHECK_EQ(n3, *i2);
+  ++i2;
+  CHECK(i2 == n1->uses().end());
+
+  Node* n4 = graph.NewNode(&dummy_operator);
+  Node::Uses::iterator i3(n4->uses().begin());
+  CHECK(i3 == n4->uses().end());
+
+  n3->ReplaceInput(1, n4);
+
+  Node::Uses::iterator i4(n1->uses().begin());
+  CHECK(i4 == n1->uses().end());
+
+  Node::Uses::iterator i5(n4->uses().begin());
+  CHECK_EQ(n3, *i5);
+  ++i5;
+  CHECK(i5 == n4->uses().end());
+
+  Node::Inputs::iterator i6(n3->inputs().begin());
+  CHECK(n0 == *i6);
+  CHECK_EQ(n0, n3->InputAt(0));
+  ++i6;
+  CHECK_EQ(n4, *i6);
+  CHECK_EQ(n4, n3->InputAt(1));
+  ++i6;
+  CHECK_EQ(n2, *i6);
+  CHECK_EQ(n2, n3->InputAt(2));
+  ++i6;
+  CHECK(i6 == n3->inputs().end());
+}
+
+
+TEST(OwnedBy) {
+  GraphTester graph;
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n1 = graph.NewNode(&dummy_operator);
+
+    CHECK(!n0->OwnedBy(n1));
+    CHECK(!n1->OwnedBy(n0));
+
+    Node* n2 = graph.NewNode(&dummy_operator, n0);
+    CHECK(n0->OwnedBy(n2));
+    CHECK(!n2->OwnedBy(n0));
+
+    Node* n3 = graph.NewNode(&dummy_operator, n0);
+    CHECK(!n0->OwnedBy(n2));
+    CHECK(!n0->OwnedBy(n3));
+    CHECK(!n2->OwnedBy(n0));
+    CHECK(!n3->OwnedBy(n0));
+  }
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n1 = graph.NewNode(&dummy_operator, n0);
+    CHECK(n0->OwnedBy(n1));
+    CHECK(!n1->OwnedBy(n0));
+    Node* n2 = graph.NewNode(&dummy_operator, n0);
+    CHECK(!n0->OwnedBy(n1));
+    CHECK(!n0->OwnedBy(n2));
+    CHECK(!n1->OwnedBy(n0));
+    CHECK(!n1->OwnedBy(n2));
+    CHECK(!n2->OwnedBy(n0));
+    CHECK(!n2->OwnedBy(n1));
+
+    Node* n3 = graph.NewNode(&dummy_operator);
+    n2->ReplaceInput(0, n3);
+
+    CHECK(n0->OwnedBy(n1));
+    CHECK(!n1->OwnedBy(n0));
+    CHECK(!n1->OwnedBy(n0));
+    CHECK(!n1->OwnedBy(n2));
+    CHECK(!n2->OwnedBy(n0));
+    CHECK(!n2->OwnedBy(n1));
+    CHECK(n3->OwnedBy(n2));
+    CHECK(!n2->OwnedBy(n3));
+  }
+}
+
+
+TEST(Uses) {
+  GraphTester graph;
+
+  Node* n0 = graph.NewNode(&dummy_operator);
+  Node* n1 = graph.NewNode(&dummy_operator, n0);
+  CHECK_EQ(1, n0->UseCount());
+  printf("A: %d vs %d\n", n0->UseAt(0)->id(), n1->id());
+  CHECK(n0->UseAt(0) == n1);
+  Node* n2 = graph.NewNode(&dummy_operator, n0);
+  CHECK_EQ(2, n0->UseCount());
+  printf("B: %d vs %d\n", n0->UseAt(1)->id(), n2->id());
+  CHECK(n0->UseAt(1) == n2);
+  Node* n3 = graph.NewNode(&dummy_operator, n0);
+  CHECK_EQ(3, n0->UseCount());
+  CHECK(n0->UseAt(2) == n3);
+}
+
+
+TEST(Inputs) {
+  GraphTester graph;
+
+  Node* n0 = graph.NewNode(&dummy_operator);
+  Node* n1 = graph.NewNode(&dummy_operator, n0);
+  Node* n2 = graph.NewNode(&dummy_operator, n0);
+  Node* n3 = graph.NewNode(&dummy_operator, n0, n1, n2);
+  CHECK_EQ(3, n3->InputCount());
+  CHECK(n3->InputAt(0) == n0);
+  CHECK(n3->InputAt(1) == n1);
+  CHECK(n3->InputAt(2) == n2);
+  Node* n4 = graph.NewNode(&dummy_operator, n0, n1, n2);
+  n3->AppendInput(graph.zone(), n4);
+  CHECK_EQ(4, n3->InputCount());
+  CHECK(n3->InputAt(0) == n0);
+  CHECK(n3->InputAt(1) == n1);
+  CHECK(n3->InputAt(2) == n2);
+  CHECK(n3->InputAt(3) == n4);
+  Node* n5 = graph.NewNode(&dummy_operator, n4);
+  n3->AppendInput(graph.zone(), n4);
+  CHECK_EQ(5, n3->InputCount());
+  CHECK(n3->InputAt(0) == n0);
+  CHECK(n3->InputAt(1) == n1);
+  CHECK(n3->InputAt(2) == n2);
+  CHECK(n3->InputAt(3) == n4);
+  CHECK(n3->InputAt(4) == n4);
+
+  // Make sure uses have been hooked op correctly.
+  Node::Uses uses(n4->uses());
+  Node::Uses::iterator current = uses.begin();
+  CHECK(current != uses.end());
+  CHECK(*current == n3);
+  ++current;
+  CHECK(current != uses.end());
+  CHECK(*current == n5);
+  ++current;
+  CHECK(current != uses.end());
+  CHECK(*current == n3);
+  ++current;
+  CHECK(current == uses.end());
+}
+
+
+TEST(RemoveInput) {
+  GraphTester graph;
+
+  Node* n0 = graph.NewNode(&dummy_operator);
+  Node* n1 = graph.NewNode(&dummy_operator, n0);
+  Node* n2 = graph.NewNode(&dummy_operator, n0, n1);
+
+  n1->RemoveInput(0);
+  CHECK_EQ(0, n1->InputCount());
+  CHECK_EQ(1, n0->UseCount());
+
+  n2->RemoveInput(0);
+  CHECK_EQ(1, n2->InputCount());
+  CHECK_EQ(0, n0->UseCount());
+  CHECK_EQ(1, n1->UseCount());
+
+  n2->RemoveInput(0);
+  CHECK_EQ(0, n2->InputCount());
+}
+
+
+TEST(AppendInputsAndIterator) {
+  GraphTester graph;
+
+  Node* n0 = graph.NewNode(&dummy_operator);
+  Node* n1 = graph.NewNode(&dummy_operator, n0);
+  Node* n2 = graph.NewNode(&dummy_operator, n0, n1);
+
+  Node::Inputs inputs(n2->inputs());
+  Node::Inputs::iterator current = inputs.begin();
+  CHECK(current != inputs.end());
+  CHECK(*current == n0);
+  ++current;
+  CHECK(current != inputs.end());
+  CHECK(*current == n1);
+  ++current;
+  CHECK(current == inputs.end());
+
+  Node* n3 = graph.NewNode(&dummy_operator);
+  n2->AppendInput(graph.zone(), n3);
+  inputs = n2->inputs();
+  current = inputs.begin();
+  CHECK(current != inputs.end());
+  CHECK(*current == n0);
+  CHECK_EQ(0, current.index());
+  ++current;
+  CHECK(current != inputs.end());
+  CHECK(*current == n1);
+  CHECK_EQ(1, current.index());
+  ++current;
+  CHECK(current != inputs.end());
+  CHECK(*current == n3);
+  CHECK_EQ(2, current.index());
+  ++current;
+  CHECK(current == inputs.end());
+}
+
+
+TEST(NullInputsSimple) {
+  GraphTester graph;
+
+  Node* n0 = graph.NewNode(&dummy_operator);
+  Node* n1 = graph.NewNode(&dummy_operator, n0);
+  Node* n2 = graph.NewNode(&dummy_operator, n0, n1);
+  CHECK_EQ(2, n2->InputCount());
+
+  CHECK(n0 == n2->InputAt(0));
+  CHECK(n1 == n2->InputAt(1));
+  CHECK_EQ(2, n0->UseCount());
+  n2->ReplaceInput(0, NULL);
+  CHECK(NULL == n2->InputAt(0));
+  CHECK(n1 == n2->InputAt(1));
+  CHECK_EQ(1, n0->UseCount());
+}
+
+
+TEST(NullInputsAppended) {
+  GraphTester graph;
+
+  Node* n0 = graph.NewNode(&dummy_operator);
+  Node* n1 = graph.NewNode(&dummy_operator, n0);
+  Node* n2 = graph.NewNode(&dummy_operator, n0);
+  Node* n3 = graph.NewNode(&dummy_operator, n0);
+  n3->AppendInput(graph.zone(), n1);
+  n3->AppendInput(graph.zone(), n2);
+  CHECK_EQ(3, n3->InputCount());
+
+  CHECK(n0 == n3->InputAt(0));
+  CHECK(n1 == n3->InputAt(1));
+  CHECK(n2 == n3->InputAt(2));
+  CHECK_EQ(1, n1->UseCount());
+  n3->ReplaceInput(1, NULL);
+  CHECK(n0 == n3->InputAt(0));
+  CHECK(NULL == n3->InputAt(1));
+  CHECK(n2 == n3->InputAt(2));
+  CHECK_EQ(0, n1->UseCount());
+}
+
+
+TEST(ReplaceUsesFromAppendedInputs) {
+  GraphTester graph;
+
+  Node* n0 = graph.NewNode(&dummy_operator);
+  Node* n1 = graph.NewNode(&dummy_operator, n0);
+  Node* n2 = graph.NewNode(&dummy_operator, n0);
+  Node* n3 = graph.NewNode(&dummy_operator);
+  n2->AppendInput(graph.zone(), n1);
+  n2->AppendInput(graph.zone(), n0);
+  CHECK_EQ(0, n3->UseCount());
+  CHECK_EQ(3, n0->UseCount());
+  n0->ReplaceUses(n3);
+  CHECK_EQ(0, n0->UseCount());
+  CHECK_EQ(3, n3->UseCount());
+
+  Node::Uses uses(n3->uses());
+  Node::Uses::iterator current = uses.begin();
+  CHECK(current != uses.end());
+  CHECK(*current == n1);
+  ++current;
+  CHECK(current != uses.end());
+  CHECK(*current == n2);
+  ++current;
+  CHECK(current != uses.end());
+  CHECK(*current == n2);
+  ++current;
+  CHECK(current == uses.end());
+}
+
+
+template <bool result>
+struct FixedPredicate {
+  bool operator()(const Node* node) const { return result; }
+};
+
+
+TEST(ReplaceUsesIfWithFixedPredicate) {
+  GraphTester graph;
+
+  Node* n0 = graph.NewNode(&dummy_operator);
+  Node* n1 = graph.NewNode(&dummy_operator, n0);
+  Node* n2 = graph.NewNode(&dummy_operator, n0);
+  Node* n3 = graph.NewNode(&dummy_operator);
+
+  CHECK_EQ(0, n2->UseCount());
+  n2->ReplaceUsesIf(FixedPredicate<true>(), n1);
+  CHECK_EQ(0, n2->UseCount());
+  n2->ReplaceUsesIf(FixedPredicate<false>(), n1);
+  CHECK_EQ(0, n2->UseCount());
+
+  CHECK_EQ(0, n3->UseCount());
+  n3->ReplaceUsesIf(FixedPredicate<true>(), n1);
+  CHECK_EQ(0, n3->UseCount());
+  n3->ReplaceUsesIf(FixedPredicate<false>(), n1);
+  CHECK_EQ(0, n3->UseCount());
+
+  CHECK_EQ(2, n0->UseCount());
+  CHECK_EQ(0, n1->UseCount());
+  n0->ReplaceUsesIf(FixedPredicate<false>(), n1);
+  CHECK_EQ(2, n0->UseCount());
+  CHECK_EQ(0, n1->UseCount());
+  n0->ReplaceUsesIf(FixedPredicate<true>(), n1);
+  CHECK_EQ(0, n0->UseCount());
+  CHECK_EQ(2, n1->UseCount());
+
+  n1->AppendInput(graph.zone(), n1);
+  CHECK_EQ(3, n1->UseCount());
+  n1->AppendInput(graph.zone(), n3);
+  CHECK_EQ(1, n3->UseCount());
+  n3->ReplaceUsesIf(FixedPredicate<true>(), n1);
+  CHECK_EQ(4, n1->UseCount());
+  CHECK_EQ(0, n3->UseCount());
+  n1->ReplaceUsesIf(FixedPredicate<false>(), n3);
+  CHECK_EQ(4, n1->UseCount());
+  CHECK_EQ(0, n3->UseCount());
+}
+
+
+TEST(ReplaceUsesIfWithEqualTo) {
+  GraphTester graph;
+
+  Node* n0 = graph.NewNode(&dummy_operator);
+  Node* n1 = graph.NewNode(&dummy_operator, n0);
+  Node* n2 = graph.NewNode(&dummy_operator, n0, n1);
+
+  CHECK_EQ(0, n2->UseCount());
+  n2->ReplaceUsesIf(std::bind1st(std::equal_to<Node*>(), n1), n0);
+  CHECK_EQ(0, n2->UseCount());
+
+  CHECK_EQ(2, n0->UseCount());
+  CHECK_EQ(1, n1->UseCount());
+  n1->ReplaceUsesIf(std::bind1st(std::equal_to<Node*>(), n0), n0);
+  CHECK_EQ(2, n0->UseCount());
+  CHECK_EQ(1, n1->UseCount());
+  n0->ReplaceUsesIf(std::bind2nd(std::equal_to<Node*>(), n2), n1);
+  CHECK_EQ(1, n0->UseCount());
+  CHECK_EQ(2, n1->UseCount());
+}
+
+
+TEST(ReplaceInputMultipleUses) {
+  GraphTester graph;
+
+  Node* n0 = graph.NewNode(&dummy_operator);
+  Node* n1 = graph.NewNode(&dummy_operator);
+  Node* n2 = graph.NewNode(&dummy_operator, n0);
+  n2->ReplaceInput(0, n1);
+  CHECK_EQ(0, n0->UseCount());
+  CHECK_EQ(1, n1->UseCount());
+
+  Node* n3 = graph.NewNode(&dummy_operator, n0);
+  n3->ReplaceInput(0, n1);
+  CHECK_EQ(0, n0->UseCount());
+  CHECK_EQ(2, n1->UseCount());
+}
+
+
+TEST(TrimInputCountInline) {
+  GraphTester graph;
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n1 = graph.NewNode(&dummy_operator, n0);
+    n1->TrimInputCount(1);
+    CHECK_EQ(1, n1->InputCount());
+    CHECK_EQ(n0, n1->InputAt(0));
+    CHECK_EQ(1, n0->UseCount());
+  }
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n1 = graph.NewNode(&dummy_operator, n0);
+    n1->TrimInputCount(0);
+    CHECK_EQ(0, n1->InputCount());
+    CHECK_EQ(0, n0->UseCount());
+  }
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n1 = graph.NewNode(&dummy_operator);
+    Node* n2 = graph.NewNode(&dummy_operator, n0, n1);
+    n2->TrimInputCount(2);
+    CHECK_EQ(2, n2->InputCount());
+    CHECK_EQ(1, n0->UseCount());
+    CHECK_EQ(1, n1->UseCount());
+    CHECK_EQ(0, n2->UseCount());
+  }
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n1 = graph.NewNode(&dummy_operator);
+    Node* n2 = graph.NewNode(&dummy_operator, n0, n1);
+    n2->TrimInputCount(1);
+    CHECK_EQ(1, n2->InputCount());
+    CHECK_EQ(1, n0->UseCount());
+    CHECK_EQ(0, n1->UseCount());
+    CHECK_EQ(0, n2->UseCount());
+  }
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n1 = graph.NewNode(&dummy_operator);
+    Node* n2 = graph.NewNode(&dummy_operator, n0, n1);
+    n2->TrimInputCount(0);
+    CHECK_EQ(0, n2->InputCount());
+    CHECK_EQ(0, n0->UseCount());
+    CHECK_EQ(0, n1->UseCount());
+    CHECK_EQ(0, n2->UseCount());
+  }
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n2 = graph.NewNode(&dummy_operator, n0, n0);
+    n2->TrimInputCount(1);
+    CHECK_EQ(1, n2->InputCount());
+    CHECK_EQ(1, n0->UseCount());
+    CHECK_EQ(0, n2->UseCount());
+  }
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n2 = graph.NewNode(&dummy_operator, n0, n0);
+    n2->TrimInputCount(0);
+    CHECK_EQ(0, n2->InputCount());
+    CHECK_EQ(0, n0->UseCount());
+    CHECK_EQ(0, n2->UseCount());
+  }
+}
+
+
+TEST(TrimInputCountOutOfLine1) {
+  GraphTester graph;
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n1 = graph.NewNode(&dummy_operator);
+    n1->AppendInput(graph.zone(), n0);
+    n1->TrimInputCount(1);
+    CHECK_EQ(1, n1->InputCount());
+    CHECK_EQ(n0, n1->InputAt(0));
+    CHECK_EQ(1, n0->UseCount());
+  }
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n1 = graph.NewNode(&dummy_operator);
+    n1->AppendInput(graph.zone(), n0);
+    CHECK_EQ(1, n1->InputCount());
+    n1->TrimInputCount(0);
+    CHECK_EQ(0, n1->InputCount());
+    CHECK_EQ(0, n0->UseCount());
+  }
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n1 = graph.NewNode(&dummy_operator);
+    Node* n2 = graph.NewNode(&dummy_operator);
+    n2->AppendInput(graph.zone(), n0);
+    n2->AppendInput(graph.zone(), n1);
+    CHECK_EQ(2, n2->InputCount());
+    n2->TrimInputCount(2);
+    CHECK_EQ(2, n2->InputCount());
+    CHECK_EQ(n0, n2->InputAt(0));
+    CHECK_EQ(n1, n2->InputAt(1));
+    CHECK_EQ(1, n0->UseCount());
+    CHECK_EQ(1, n1->UseCount());
+    CHECK_EQ(0, n2->UseCount());
+  }
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n1 = graph.NewNode(&dummy_operator);
+    Node* n2 = graph.NewNode(&dummy_operator);
+    n2->AppendInput(graph.zone(), n0);
+    n2->AppendInput(graph.zone(), n1);
+    CHECK_EQ(2, n2->InputCount());
+    n2->TrimInputCount(1);
+    CHECK_EQ(1, n2->InputCount());
+    CHECK_EQ(n0, n2->InputAt(0));
+    CHECK_EQ(1, n0->UseCount());
+    CHECK_EQ(0, n1->UseCount());
+    CHECK_EQ(0, n2->UseCount());
+  }
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n1 = graph.NewNode(&dummy_operator);
+    Node* n2 = graph.NewNode(&dummy_operator);
+    n2->AppendInput(graph.zone(), n0);
+    n2->AppendInput(graph.zone(), n1);
+    CHECK_EQ(2, n2->InputCount());
+    n2->TrimInputCount(0);
+    CHECK_EQ(0, n2->InputCount());
+    CHECK_EQ(0, n0->UseCount());
+    CHECK_EQ(0, n1->UseCount());
+    CHECK_EQ(0, n2->UseCount());
+  }
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n2 = graph.NewNode(&dummy_operator);
+    n2->AppendInput(graph.zone(), n0);
+    n2->AppendInput(graph.zone(), n0);
+    CHECK_EQ(2, n2->InputCount());
+    CHECK_EQ(2, n0->UseCount());
+    n2->TrimInputCount(1);
+    CHECK_EQ(1, n2->InputCount());
+    CHECK_EQ(1, n0->UseCount());
+    CHECK_EQ(0, n2->UseCount());
+  }
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n2 = graph.NewNode(&dummy_operator);
+    n2->AppendInput(graph.zone(), n0);
+    n2->AppendInput(graph.zone(), n0);
+    CHECK_EQ(2, n2->InputCount());
+    CHECK_EQ(2, n0->UseCount());
+    n2->TrimInputCount(0);
+    CHECK_EQ(0, n2->InputCount());
+    CHECK_EQ(0, n0->UseCount());
+    CHECK_EQ(0, n2->UseCount());
+  }
+}
+
+
+TEST(TrimInputCountOutOfLine2) {
+  GraphTester graph;
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n1 = graph.NewNode(&dummy_operator);
+    Node* n2 = graph.NewNode(&dummy_operator, n0);
+    n2->AppendInput(graph.zone(), n1);
+    CHECK_EQ(2, n2->InputCount());
+    n2->TrimInputCount(2);
+    CHECK_EQ(2, n2->InputCount());
+    CHECK_EQ(n0, n2->InputAt(0));
+    CHECK_EQ(n1, n2->InputAt(1));
+    CHECK_EQ(1, n0->UseCount());
+    CHECK_EQ(1, n1->UseCount());
+    CHECK_EQ(0, n2->UseCount());
+  }
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n1 = graph.NewNode(&dummy_operator);
+    Node* n2 = graph.NewNode(&dummy_operator, n0);
+    n2->AppendInput(graph.zone(), n1);
+    CHECK_EQ(2, n2->InputCount());
+    n2->TrimInputCount(1);
+    CHECK_EQ(1, n2->InputCount());
+    CHECK_EQ(n0, n2->InputAt(0));
+    CHECK_EQ(1, n0->UseCount());
+    CHECK_EQ(0, n1->UseCount());
+    CHECK_EQ(0, n2->UseCount());
+  }
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n1 = graph.NewNode(&dummy_operator);
+    Node* n2 = graph.NewNode(&dummy_operator, n0);
+    n2->AppendInput(graph.zone(), n1);
+    CHECK_EQ(2, n2->InputCount());
+    n2->TrimInputCount(0);
+    CHECK_EQ(0, n2->InputCount());
+    CHECK_EQ(0, n0->UseCount());
+    CHECK_EQ(0, n1->UseCount());
+    CHECK_EQ(0, n2->UseCount());
+  }
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n2 = graph.NewNode(&dummy_operator, n0);
+    n2->AppendInput(graph.zone(), n0);
+    CHECK_EQ(2, n2->InputCount());
+    CHECK_EQ(2, n0->UseCount());
+    n2->TrimInputCount(1);
+    CHECK_EQ(1, n2->InputCount());
+    CHECK_EQ(1, n0->UseCount());
+    CHECK_EQ(0, n2->UseCount());
+  }
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n2 = graph.NewNode(&dummy_operator, n0);
+    n2->AppendInput(graph.zone(), n0);
+    CHECK_EQ(2, n2->InputCount());
+    CHECK_EQ(2, n0->UseCount());
+    n2->TrimInputCount(0);
+    CHECK_EQ(0, n2->InputCount());
+    CHECK_EQ(0, n0->UseCount());
+    CHECK_EQ(0, n2->UseCount());
+  }
+}
+
+
+TEST(RemoveAllInputs) {
+  GraphTester graph;
+
+  for (int i = 0; i < 2; i++) {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n1 = graph.NewNode(&dummy_operator, n0);
+    Node* n2;
+    if (i == 0) {
+      n2 = graph.NewNode(&dummy_operator, n0, n1);
+    } else {
+      n2 = graph.NewNode(&dummy_operator, n0);
+      n2->AppendInput(graph.zone(), n1);  // with out-of-line input.
+    }
+
+    n0->RemoveAllInputs();
+    CHECK_EQ(0, n0->InputCount());
+
+    CHECK_EQ(2, n0->UseCount());
+    n1->RemoveAllInputs();
+    CHECK_EQ(1, n1->InputCount());
+    CHECK_EQ(1, n0->UseCount());
+    CHECK_EQ(NULL, n1->InputAt(0));
+
+    CHECK_EQ(1, n1->UseCount());
+    n2->RemoveAllInputs();
+    CHECK_EQ(2, n2->InputCount());
+    CHECK_EQ(0, n0->UseCount());
+    CHECK_EQ(0, n1->UseCount());
+    CHECK_EQ(NULL, n2->InputAt(0));
+    CHECK_EQ(NULL, n2->InputAt(1));
+  }
+
+  {
+    Node* n0 = graph.NewNode(&dummy_operator);
+    Node* n1 = graph.NewNode(&dummy_operator, n0);
+    n1->ReplaceInput(0, n1);  // self-reference.
+
+    CHECK_EQ(0, n0->UseCount());
+    CHECK_EQ(1, n1->UseCount());
+    n1->RemoveAllInputs();
+    CHECK_EQ(1, n1->InputCount());
+    CHECK_EQ(0, n1->UseCount());
+    CHECK_EQ(NULL, n1->InputAt(0));
+  }
+}
diff --git a/test/cctest/compiler/test-operator.cc b/test/cctest/compiler/test-operator.cc
new file mode 100644
index 0000000..af75d67
--- /dev/null
+++ b/test/cctest/compiler/test-operator.cc
@@ -0,0 +1,244 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/compiler/operator.h"
+#include "test/cctest/cctest.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+#define NaN (v8::base::OS::nan_value())
+#define Infinity (std::numeric_limits<double>::infinity())
+
+TEST(TestOperatorMnemonic) {
+  SimpleOperator op1(10, Operator::kNoProperties, 0, 0, "ThisOne");
+  CHECK_EQ(0, strcmp(op1.mnemonic(), "ThisOne"));
+
+  SimpleOperator op2(11, Operator::kNoProperties, 0, 0, "ThatOne");
+  CHECK_EQ(0, strcmp(op2.mnemonic(), "ThatOne"));
+
+  Operator1<int> op3(12, Operator::kNoProperties, 0, 1, "Mnemonic1", 12333);
+  CHECK_EQ(0, strcmp(op3.mnemonic(), "Mnemonic1"));
+
+  Operator1<double> op4(13, Operator::kNoProperties, 0, 1, "TheOther", 99.9);
+  CHECK_EQ(0, strcmp(op4.mnemonic(), "TheOther"));
+}
+
+
+TEST(TestSimpleOperatorHash) {
+  SimpleOperator op1(17, Operator::kNoProperties, 0, 0, "Another");
+  CHECK_EQ(17, op1.HashCode());
+
+  SimpleOperator op2(18, Operator::kNoProperties, 0, 0, "Falsch");
+  CHECK_EQ(18, op2.HashCode());
+}
+
+
+TEST(TestSimpleOperatorEquals) {
+  SimpleOperator op1a(19, Operator::kNoProperties, 0, 0, "Another1");
+  SimpleOperator op1b(19, Operator::kFoldable, 2, 2, "Another2");
+
+  CHECK(op1a.Equals(&op1a));
+  CHECK(op1a.Equals(&op1b));
+  CHECK(op1b.Equals(&op1a));
+  CHECK(op1b.Equals(&op1b));
+
+  SimpleOperator op2a(20, Operator::kNoProperties, 0, 0, "Falsch1");
+  SimpleOperator op2b(20, Operator::kFoldable, 1, 1, "Falsch2");
+
+  CHECK(op2a.Equals(&op2a));
+  CHECK(op2a.Equals(&op2b));
+  CHECK(op2b.Equals(&op2a));
+  CHECK(op2b.Equals(&op2b));
+
+  CHECK(!op1a.Equals(&op2a));
+  CHECK(!op1a.Equals(&op2b));
+  CHECK(!op1b.Equals(&op2a));
+  CHECK(!op1b.Equals(&op2b));
+
+  CHECK(!op2a.Equals(&op1a));
+  CHECK(!op2a.Equals(&op1b));
+  CHECK(!op2b.Equals(&op1a));
+  CHECK(!op2b.Equals(&op1b));
+}
+
+
+static SmartArrayPointer<const char> OperatorToString(Operator* op) {
+  OStringStream os;
+  os << *op;
+  return SmartArrayPointer<const char>(StrDup(os.c_str()));
+}
+
+
+TEST(TestSimpleOperatorPrint) {
+  SimpleOperator op1a(19, Operator::kNoProperties, 0, 0, "Another1");
+  SimpleOperator op1b(19, Operator::kFoldable, 2, 2, "Another2");
+
+  CHECK_EQ("Another1", OperatorToString(&op1a).get());
+  CHECK_EQ("Another2", OperatorToString(&op1b).get());
+
+  SimpleOperator op2a(20, Operator::kNoProperties, 0, 0, "Flog1");
+  SimpleOperator op2b(20, Operator::kFoldable, 1, 1, "Flog2");
+
+  CHECK_EQ("Flog1", OperatorToString(&op2a).get());
+  CHECK_EQ("Flog2", OperatorToString(&op2b).get());
+}
+
+
+TEST(TestOperator1intHash) {
+  Operator1<int> op1a(23, Operator::kNoProperties, 0, 0, "Wolfie", 11);
+  Operator1<int> op1b(23, Operator::kFoldable, 2, 2, "Doggie", 11);
+
+  CHECK_EQ(op1a.HashCode(), op1b.HashCode());
+
+  Operator1<int> op2a(24, Operator::kNoProperties, 0, 0, "Arfie", 3);
+  Operator1<int> op2b(24, Operator::kNoProperties, 0, 0, "Arfie", 4);
+
+  CHECK_NE(op1a.HashCode(), op2a.HashCode());
+  CHECK_NE(op2a.HashCode(), op2b.HashCode());
+}
+
+
+TEST(TestOperator1intEquals) {
+  Operator1<int> op1a(23, Operator::kNoProperties, 0, 0, "Scratchy", 11);
+  Operator1<int> op1b(23, Operator::kFoldable, 2, 2, "Scratchy", 11);
+
+  CHECK(op1a.Equals(&op1a));
+  CHECK(op1a.Equals(&op1b));
+  CHECK(op1b.Equals(&op1a));
+  CHECK(op1b.Equals(&op1b));
+
+  Operator1<int> op2a(24, Operator::kNoProperties, 0, 0, "Im", 3);
+  Operator1<int> op2b(24, Operator::kNoProperties, 0, 0, "Im", 4);
+
+  CHECK(op2a.Equals(&op2a));
+  CHECK(!op2a.Equals(&op2b));
+  CHECK(!op2b.Equals(&op2a));
+  CHECK(op2b.Equals(&op2b));
+
+  CHECK(!op1a.Equals(&op2a));
+  CHECK(!op1a.Equals(&op2b));
+  CHECK(!op1b.Equals(&op2a));
+  CHECK(!op1b.Equals(&op2b));
+
+  CHECK(!op2a.Equals(&op1a));
+  CHECK(!op2a.Equals(&op1b));
+  CHECK(!op2b.Equals(&op1a));
+  CHECK(!op2b.Equals(&op1b));
+
+  SimpleOperator op3(25, Operator::kNoProperties, 0, 0, "Weepy");
+
+  CHECK(!op1a.Equals(&op3));
+  CHECK(!op1b.Equals(&op3));
+  CHECK(!op2a.Equals(&op3));
+  CHECK(!op2b.Equals(&op3));
+
+  CHECK(!op3.Equals(&op1a));
+  CHECK(!op3.Equals(&op1b));
+  CHECK(!op3.Equals(&op2a));
+  CHECK(!op3.Equals(&op2b));
+}
+
+
+TEST(TestOperator1intPrint) {
+  Operator1<int> op1(12, Operator::kNoProperties, 0, 1, "Op1Test", 0);
+  CHECK_EQ("Op1Test[0]", OperatorToString(&op1).get());
+
+  Operator1<int> op2(12, Operator::kNoProperties, 0, 1, "Op1Test", 66666666);
+  CHECK_EQ("Op1Test[66666666]", OperatorToString(&op2).get());
+
+  Operator1<int> op3(12, Operator::kNoProperties, 0, 1, "FooBar", 2347);
+  CHECK_EQ("FooBar[2347]", OperatorToString(&op3).get());
+
+  Operator1<int> op4(12, Operator::kNoProperties, 0, 1, "BarFoo", -879);
+  CHECK_EQ("BarFoo[-879]", OperatorToString(&op4).get());
+}
+
+
+TEST(TestOperator1doubleHash) {
+  Operator1<double> op1a(23, Operator::kNoProperties, 0, 0, "Wolfie", 11.77);
+  Operator1<double> op1b(23, Operator::kFoldable, 2, 2, "Doggie", 11.77);
+
+  CHECK_EQ(op1a.HashCode(), op1b.HashCode());
+
+  Operator1<double> op2a(24, Operator::kNoProperties, 0, 0, "Arfie", -6.7);
+  Operator1<double> op2b(24, Operator::kNoProperties, 0, 0, "Arfie", -6.8);
+
+  CHECK_NE(op1a.HashCode(), op2a.HashCode());
+  CHECK_NE(op2a.HashCode(), op2b.HashCode());
+}
+
+
+TEST(TestOperator1doubleEquals) {
+  Operator1<double> op1a(23, Operator::kNoProperties, 0, 0, "Scratchy", 11.77);
+  Operator1<double> op1b(23, Operator::kFoldable, 2, 2, "Scratchy", 11.77);
+
+  CHECK(op1a.Equals(&op1a));
+  CHECK(op1a.Equals(&op1b));
+  CHECK(op1b.Equals(&op1a));
+  CHECK(op1b.Equals(&op1b));
+
+  Operator1<double> op2a(24, Operator::kNoProperties, 0, 0, "Im", 3.1);
+  Operator1<double> op2b(24, Operator::kNoProperties, 0, 0, "Im", 3.2);
+
+  CHECK(op2a.Equals(&op2a));
+  CHECK(!op2a.Equals(&op2b));
+  CHECK(!op2b.Equals(&op2a));
+  CHECK(op2b.Equals(&op2b));
+
+  CHECK(!op1a.Equals(&op2a));
+  CHECK(!op1a.Equals(&op2b));
+  CHECK(!op1b.Equals(&op2a));
+  CHECK(!op1b.Equals(&op2b));
+
+  CHECK(!op2a.Equals(&op1a));
+  CHECK(!op2a.Equals(&op1b));
+  CHECK(!op2b.Equals(&op1a));
+  CHECK(!op2b.Equals(&op1b));
+
+  SimpleOperator op3(25, Operator::kNoProperties, 0, 0, "Weepy");
+
+  CHECK(!op1a.Equals(&op3));
+  CHECK(!op1b.Equals(&op3));
+  CHECK(!op2a.Equals(&op3));
+  CHECK(!op2b.Equals(&op3));
+
+  CHECK(!op3.Equals(&op1a));
+  CHECK(!op3.Equals(&op1b));
+  CHECK(!op3.Equals(&op2a));
+  CHECK(!op3.Equals(&op2b));
+
+  Operator1<double> op4a(24, Operator::kNoProperties, 0, 0, "Bashful", NaN);
+  Operator1<double> op4b(24, Operator::kNoProperties, 0, 0, "Bashful", NaN);
+
+  CHECK(op4a.Equals(&op4a));
+  CHECK(op4a.Equals(&op4b));
+  CHECK(op4b.Equals(&op4a));
+  CHECK(op4b.Equals(&op4b));
+
+  CHECK(!op3.Equals(&op4a));
+  CHECK(!op3.Equals(&op4b));
+  CHECK(!op3.Equals(&op4a));
+  CHECK(!op3.Equals(&op4b));
+}
+
+
+TEST(TestOperator1doublePrint) {
+  Operator1<double> op1(12, Operator::kNoProperties, 0, 1, "Op1Test", 0);
+  CHECK_EQ("Op1Test[0]", OperatorToString(&op1).get());
+
+  Operator1<double> op2(12, Operator::kNoProperties, 0, 1, "Op1Test", 7.3);
+  CHECK_EQ("Op1Test[7.3]", OperatorToString(&op2).get());
+
+  Operator1<double> op3(12, Operator::kNoProperties, 0, 1, "FooBar", 2e+123);
+  CHECK_EQ("FooBar[2e+123]", OperatorToString(&op3).get());
+
+  Operator1<double> op4(12, Operator::kNoProperties, 0, 1, "BarFoo", Infinity);
+  CHECK_EQ("BarFoo[inf]", OperatorToString(&op4).get());
+
+  Operator1<double> op5(12, Operator::kNoProperties, 0, 1, "BarFoo", NaN);
+  CHECK_EQ("BarFoo[nan]", OperatorToString(&op5).get());
+}
diff --git a/test/cctest/compiler/test-phi-reducer.cc b/test/cctest/compiler/test-phi-reducer.cc
new file mode 100644
index 0000000..7d2fab6
--- /dev/null
+++ b/test/cctest/compiler/test-phi-reducer.cc
@@ -0,0 +1,230 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+#include "test/cctest/cctest.h"
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph-inl.h"
+#include "src/compiler/phi-reducer.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+class PhiReducerTester : HandleAndZoneScope {
+ public:
+  explicit PhiReducerTester(int num_parameters = 0)
+      : isolate(main_isolate()),
+        common(main_zone()),
+        graph(main_zone()),
+        self(graph.NewNode(common.Start(num_parameters))),
+        dead(graph.NewNode(common.Dead())) {
+    graph.SetStart(self);
+  }
+
+  Isolate* isolate;
+  CommonOperatorBuilder common;
+  Graph graph;
+  Node* self;
+  Node* dead;
+
+  void CheckReduce(Node* expect, Node* phi) {
+    PhiReducer reducer;
+    Reduction reduction = reducer.Reduce(phi);
+    if (expect == phi) {
+      CHECK(!reduction.Changed());
+    } else {
+      CHECK(reduction.Changed());
+      CHECK_EQ(expect, reduction.replacement());
+    }
+  }
+
+  Node* Int32Constant(int32_t val) {
+    return graph.NewNode(common.Int32Constant(val));
+  }
+
+  Node* Float64Constant(double val) {
+    return graph.NewNode(common.Float64Constant(val));
+  }
+
+  Node* Parameter(int32_t index = 0) {
+    return graph.NewNode(common.Parameter(index), graph.start());
+  }
+
+  Node* Phi(Node* a) {
+    return SetSelfReferences(graph.NewNode(common.Phi(kMachAnyTagged, 1), a));
+  }
+
+  Node* Phi(Node* a, Node* b) {
+    return SetSelfReferences(
+        graph.NewNode(common.Phi(kMachAnyTagged, 2), a, b));
+  }
+
+  Node* Phi(Node* a, Node* b, Node* c) {
+    return SetSelfReferences(
+        graph.NewNode(common.Phi(kMachAnyTagged, 3), a, b, c));
+  }
+
+  Node* Phi(Node* a, Node* b, Node* c, Node* d) {
+    return SetSelfReferences(
+        graph.NewNode(common.Phi(kMachAnyTagged, 4), a, b, c, d));
+  }
+
+  Node* PhiWithControl(Node* a, Node* control) {
+    return SetSelfReferences(
+        graph.NewNode(common.Phi(kMachAnyTagged, 1), a, control));
+  }
+
+  Node* PhiWithControl(Node* a, Node* b, Node* control) {
+    return SetSelfReferences(
+        graph.NewNode(common.Phi(kMachAnyTagged, 2), a, b, control));
+  }
+
+  Node* SetSelfReferences(Node* node) {
+    Node::Inputs inputs = node->inputs();
+    for (Node::Inputs::iterator iter(inputs.begin()); iter != inputs.end();
+         ++iter) {
+      Node* input = *iter;
+      if (input == self) node->ReplaceInput(iter.index(), node);
+    }
+    return node;
+  }
+};
+
+
+TEST(PhiReduce1) {
+  PhiReducerTester R;
+  Node* zero = R.Int32Constant(0);
+  Node* one = R.Int32Constant(1);
+  Node* oneish = R.Float64Constant(1.1);
+  Node* param = R.Parameter();
+
+  Node* singles[] = {zero, one, oneish, param};
+  for (size_t i = 0; i < arraysize(singles); i++) {
+    R.CheckReduce(singles[i], R.Phi(singles[i]));
+  }
+}
+
+
+TEST(PhiReduce2) {
+  PhiReducerTester R;
+  Node* zero = R.Int32Constant(0);
+  Node* one = R.Int32Constant(1);
+  Node* oneish = R.Float64Constant(1.1);
+  Node* param = R.Parameter();
+
+  Node* singles[] = {zero, one, oneish, param};
+  for (size_t i = 0; i < arraysize(singles); i++) {
+    Node* a = singles[i];
+    R.CheckReduce(a, R.Phi(a, a));
+  }
+
+  for (size_t i = 0; i < arraysize(singles); i++) {
+    Node* a = singles[i];
+    R.CheckReduce(a, R.Phi(R.self, a));
+    R.CheckReduce(a, R.Phi(a, R.self));
+  }
+
+  for (size_t i = 1; i < arraysize(singles); i++) {
+    Node* a = singles[i], *b = singles[0];
+    Node* phi1 = R.Phi(b, a);
+    R.CheckReduce(phi1, phi1);
+
+    Node* phi2 = R.Phi(a, b);
+    R.CheckReduce(phi2, phi2);
+  }
+}
+
+
+TEST(PhiReduce3) {
+  PhiReducerTester R;
+  Node* zero = R.Int32Constant(0);
+  Node* one = R.Int32Constant(1);
+  Node* oneish = R.Float64Constant(1.1);
+  Node* param = R.Parameter();
+
+  Node* singles[] = {zero, one, oneish, param};
+  for (size_t i = 0; i < arraysize(singles); i++) {
+    Node* a = singles[i];
+    R.CheckReduce(a, R.Phi(a, a, a));
+  }
+
+  for (size_t i = 0; i < arraysize(singles); i++) {
+    Node* a = singles[i];
+    R.CheckReduce(a, R.Phi(R.self, a, a));
+    R.CheckReduce(a, R.Phi(a, R.self, a));
+    R.CheckReduce(a, R.Phi(a, a, R.self));
+  }
+
+  for (size_t i = 1; i < arraysize(singles); i++) {
+    Node* a = singles[i], *b = singles[0];
+    Node* phi1 = R.Phi(b, a, a);
+    R.CheckReduce(phi1, phi1);
+
+    Node* phi2 = R.Phi(a, b, a);
+    R.CheckReduce(phi2, phi2);
+
+    Node* phi3 = R.Phi(a, a, b);
+    R.CheckReduce(phi3, phi3);
+  }
+}
+
+
+TEST(PhiReduce4) {
+  PhiReducerTester R;
+  Node* zero = R.Int32Constant(0);
+  Node* one = R.Int32Constant(1);
+  Node* oneish = R.Float64Constant(1.1);
+  Node* param = R.Parameter();
+
+  Node* singles[] = {zero, one, oneish, param};
+  for (size_t i = 0; i < arraysize(singles); i++) {
+    Node* a = singles[i];
+    R.CheckReduce(a, R.Phi(a, a, a, a));
+  }
+
+  for (size_t i = 0; i < arraysize(singles); i++) {
+    Node* a = singles[i];
+    R.CheckReduce(a, R.Phi(R.self, a, a, a));
+    R.CheckReduce(a, R.Phi(a, R.self, a, a));
+    R.CheckReduce(a, R.Phi(a, a, R.self, a));
+    R.CheckReduce(a, R.Phi(a, a, a, R.self));
+
+    R.CheckReduce(a, R.Phi(R.self, R.self, a, a));
+    R.CheckReduce(a, R.Phi(a, R.self, R.self, a));
+    R.CheckReduce(a, R.Phi(a, a, R.self, R.self));
+    R.CheckReduce(a, R.Phi(R.self, a, a, R.self));
+  }
+
+  for (size_t i = 1; i < arraysize(singles); i++) {
+    Node* a = singles[i], *b = singles[0];
+    Node* phi1 = R.Phi(b, a, a, a);
+    R.CheckReduce(phi1, phi1);
+
+    Node* phi2 = R.Phi(a, b, a, a);
+    R.CheckReduce(phi2, phi2);
+
+    Node* phi3 = R.Phi(a, a, b, a);
+    R.CheckReduce(phi3, phi3);
+
+    Node* phi4 = R.Phi(a, a, a, b);
+    R.CheckReduce(phi4, phi4);
+  }
+}
+
+
+TEST(PhiReduceShouldIgnoreControlNodes) {
+  PhiReducerTester R;
+  Node* zero = R.Int32Constant(0);
+  Node* one = R.Int32Constant(1);
+  Node* oneish = R.Float64Constant(1.1);
+  Node* param = R.Parameter();
+
+  Node* singles[] = {zero, one, oneish, param};
+  for (size_t i = 0; i < arraysize(singles); ++i) {
+    R.CheckReduce(singles[i], R.PhiWithControl(singles[i], R.dead));
+    R.CheckReduce(singles[i], R.PhiWithControl(R.self, singles[i], R.dead));
+    R.CheckReduce(singles[i], R.PhiWithControl(singles[i], R.self, R.dead));
+  }
+}
diff --git a/test/cctest/compiler/test-pipeline.cc b/test/cctest/compiler/test-pipeline.cc
new file mode 100644
index 0000000..f0b750a
--- /dev/null
+++ b/test/cctest/compiler/test-pipeline.cc
@@ -0,0 +1,38 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+#include "test/cctest/cctest.h"
+
+#include "src/compiler.h"
+#include "src/compiler/pipeline.h"
+#include "src/handles.h"
+#include "src/parser.h"
+#include "src/rewriter.h"
+#include "src/scopes.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+TEST(PipelineAdd) {
+  InitializedHandleScope handles;
+  const char* source = "(function(a,b) { return a + b; })";
+  Handle<JSFunction> function = v8::Utils::OpenHandle(
+      *v8::Handle<v8::Function>::Cast(CompileRun(source)));
+  CompilationInfoWithZone info(function);
+
+  CHECK(Parser::Parse(&info));
+  CHECK(Rewriter::Rewrite(&info));
+  CHECK(Scope::Analyze(&info));
+  CHECK_NE(NULL, info.scope());
+
+  Pipeline pipeline(&info);
+#if V8_TURBOFAN_TARGET
+  Handle<Code> code = pipeline.GenerateCode();
+  CHECK(Pipeline::SupportedTarget());
+  CHECK(!code.is_null());
+#else
+  USE(pipeline);
+#endif
+}
diff --git a/test/cctest/compiler/test-representation-change.cc b/test/cctest/compiler/test-representation-change.cc
new file mode 100644
index 0000000..6c9026b
--- /dev/null
+++ b/test/cctest/compiler/test-representation-change.cc
@@ -0,0 +1,305 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <limits>
+
+#include "src/v8.h"
+#include "test/cctest/cctest.h"
+#include "test/cctest/compiler/graph-builder-tester.h"
+
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/representation-change.h"
+#include "src/compiler/typer.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+namespace v8 {  // for friendiness.
+namespace internal {
+namespace compiler {
+
+class RepresentationChangerTester : public HandleAndZoneScope,
+                                    public GraphAndBuilders {
+ public:
+  explicit RepresentationChangerTester(int num_parameters = 0)
+      : GraphAndBuilders(main_zone()),
+        typer_(main_zone()),
+        javascript_(main_zone()),
+        jsgraph_(main_graph_, &main_common_, &javascript_, &typer_,
+                 &main_machine_),
+        changer_(&jsgraph_, &main_simplified_, main_isolate()) {
+    Node* s = graph()->NewNode(common()->Start(num_parameters));
+    graph()->SetStart(s);
+  }
+
+  Typer typer_;
+  JSOperatorBuilder javascript_;
+  JSGraph jsgraph_;
+  RepresentationChanger changer_;
+
+  Isolate* isolate() { return main_isolate(); }
+  Graph* graph() { return main_graph_; }
+  CommonOperatorBuilder* common() { return &main_common_; }
+  JSGraph* jsgraph() { return &jsgraph_; }
+  RepresentationChanger* changer() { return &changer_; }
+
+  // TODO(titzer): use ValueChecker / ValueUtil
+  void CheckInt32Constant(Node* n, int32_t expected) {
+    Int32Matcher m(n);
+    CHECK(m.HasValue());
+    CHECK_EQ(expected, m.Value());
+  }
+
+  void CheckHeapConstant(Node* n, HeapObject* expected) {
+    HeapObjectMatcher<HeapObject> m(n);
+    CHECK(m.HasValue());
+    CHECK_EQ(expected, *m.Value().handle());
+  }
+
+  void CheckNumberConstant(Node* n, double expected) {
+    NumberMatcher m(n);
+    CHECK_EQ(IrOpcode::kNumberConstant, n->opcode());
+    CHECK(m.HasValue());
+    CHECK_EQ(expected, m.Value());
+  }
+
+  Node* Parameter(int index = 0) {
+    return graph()->NewNode(common()->Parameter(index), graph()->start());
+  }
+
+  void CheckTypeError(MachineTypeUnion from, MachineTypeUnion to) {
+    changer()->testing_type_errors_ = true;
+    changer()->type_error_ = false;
+    Node* n = Parameter(0);
+    Node* c = changer()->GetRepresentationFor(n, from, to);
+    CHECK(changer()->type_error_);
+    CHECK_EQ(n, c);
+  }
+
+  void CheckNop(MachineTypeUnion from, MachineTypeUnion to) {
+    Node* n = Parameter(0);
+    Node* c = changer()->GetRepresentationFor(n, from, to);
+    CHECK_EQ(n, c);
+  }
+};
+}
+}
+}  // namespace v8::internal::compiler
+
+
+// TODO(titzer): add kRepFloat32 when fully supported.
+static const MachineType all_reps[] = {kRepBit, kRepWord32, kRepWord64,
+                                       kRepFloat64, kRepTagged};
+
+
+// TODO(titzer): lift this to ValueHelper
+static const double double_inputs[] = {
+    0.0,   -0.0,    1.0,    -1.0,        0.1,         1.4,    -1.7,
+    2,     5,       6,      982983,      888,         -999.8, 3.1e7,
+    -2e66, 2.3e124, -12e73, V8_INFINITY, -V8_INFINITY};
+
+
+static const int32_t int32_inputs[] = {
+    0,      1,                                -1,
+    2,      5,                                6,
+    982983, 888,                              -999,
+    65535,  static_cast<int32_t>(0xFFFFFFFF), static_cast<int32_t>(0x80000000)};
+
+
+static const uint32_t uint32_inputs[] = {
+    0,      1,   static_cast<uint32_t>(-1),   2,     5,          6,
+    982983, 888, static_cast<uint32_t>(-999), 65535, 0xFFFFFFFF, 0x80000000};
+
+
+TEST(BoolToBit_constant) {
+  RepresentationChangerTester r;
+
+  Node* true_node = r.jsgraph()->TrueConstant();
+  Node* true_bit =
+      r.changer()->GetRepresentationFor(true_node, kRepTagged, kRepBit);
+  r.CheckInt32Constant(true_bit, 1);
+
+  Node* false_node = r.jsgraph()->FalseConstant();
+  Node* false_bit =
+      r.changer()->GetRepresentationFor(false_node, kRepTagged, kRepBit);
+  r.CheckInt32Constant(false_bit, 0);
+}
+
+
+TEST(BitToBool_constant) {
+  RepresentationChangerTester r;
+
+  for (int i = -5; i < 5; i++) {
+    Node* node = r.jsgraph()->Int32Constant(i);
+    Node* val = r.changer()->GetRepresentationFor(node, kRepBit, kRepTagged);
+    r.CheckHeapConstant(val, i == 0 ? r.isolate()->heap()->false_value()
+                                    : r.isolate()->heap()->true_value());
+  }
+}
+
+
+TEST(ToTagged_constant) {
+  RepresentationChangerTester r;
+
+  for (size_t i = 0; i < arraysize(double_inputs); i++) {
+    Node* n = r.jsgraph()->Float64Constant(double_inputs[i]);
+    Node* c = r.changer()->GetRepresentationFor(n, kRepFloat64, kRepTagged);
+    r.CheckNumberConstant(c, double_inputs[i]);
+  }
+
+  for (size_t i = 0; i < arraysize(int32_inputs); i++) {
+    Node* n = r.jsgraph()->Int32Constant(int32_inputs[i]);
+    Node* c = r.changer()->GetRepresentationFor(n, kRepWord32 | kTypeInt32,
+                                                kRepTagged);
+    r.CheckNumberConstant(c, static_cast<double>(int32_inputs[i]));
+  }
+
+  for (size_t i = 0; i < arraysize(uint32_inputs); i++) {
+    Node* n = r.jsgraph()->Int32Constant(uint32_inputs[i]);
+    Node* c = r.changer()->GetRepresentationFor(n, kRepWord32 | kTypeUint32,
+                                                kRepTagged);
+    r.CheckNumberConstant(c, static_cast<double>(uint32_inputs[i]));
+  }
+}
+
+
+static void CheckChange(IrOpcode::Value expected, MachineTypeUnion from,
+                        MachineTypeUnion to) {
+  RepresentationChangerTester r;
+
+  Node* n = r.Parameter();
+  Node* c = r.changer()->GetRepresentationFor(n, from, to);
+
+  CHECK_NE(c, n);
+  CHECK_EQ(expected, c->opcode());
+  CHECK_EQ(n, c->InputAt(0));
+}
+
+
+TEST(SingleChanges) {
+  CheckChange(IrOpcode::kChangeBoolToBit, kRepTagged, kRepBit);
+  CheckChange(IrOpcode::kChangeBitToBool, kRepBit, kRepTagged);
+
+  CheckChange(IrOpcode::kChangeInt32ToTagged, kRepWord32 | kTypeInt32,
+              kRepTagged);
+  CheckChange(IrOpcode::kChangeUint32ToTagged, kRepWord32 | kTypeUint32,
+              kRepTagged);
+  CheckChange(IrOpcode::kChangeFloat64ToTagged, kRepFloat64, kRepTagged);
+
+  CheckChange(IrOpcode::kChangeTaggedToInt32, kRepTagged | kTypeInt32,
+              kRepWord32);
+  CheckChange(IrOpcode::kChangeTaggedToUint32, kRepTagged | kTypeUint32,
+              kRepWord32);
+  CheckChange(IrOpcode::kChangeTaggedToFloat64, kRepTagged, kRepFloat64);
+
+  // Int32,Uint32 <-> Float64 are actually machine conversions.
+  CheckChange(IrOpcode::kChangeInt32ToFloat64, kRepWord32 | kTypeInt32,
+              kRepFloat64);
+  CheckChange(IrOpcode::kChangeUint32ToFloat64, kRepWord32 | kTypeUint32,
+              kRepFloat64);
+  CheckChange(IrOpcode::kChangeFloat64ToInt32, kRepFloat64 | kTypeInt32,
+              kRepWord32);
+  CheckChange(IrOpcode::kChangeFloat64ToUint32, kRepFloat64 | kTypeUint32,
+              kRepWord32);
+}
+
+
+TEST(SignednessInWord32) {
+  RepresentationChangerTester r;
+
+  // TODO(titzer): assume that uses of a word32 without a sign mean kTypeInt32.
+  CheckChange(IrOpcode::kChangeTaggedToInt32, kRepTagged,
+              kRepWord32 | kTypeInt32);
+  CheckChange(IrOpcode::kChangeTaggedToUint32, kRepTagged,
+              kRepWord32 | kTypeUint32);
+  CheckChange(IrOpcode::kChangeInt32ToFloat64, kRepWord32, kRepFloat64);
+  CheckChange(IrOpcode::kChangeFloat64ToInt32, kRepFloat64, kRepWord32);
+}
+
+
+TEST(Nops) {
+  RepresentationChangerTester r;
+
+  // X -> X is always a nop for any single representation X.
+  for (size_t i = 0; i < arraysize(all_reps); i++) {
+    r.CheckNop(all_reps[i], all_reps[i]);
+  }
+
+  // 32-bit floats.
+  r.CheckNop(kRepFloat32, kRepFloat32);
+  r.CheckNop(kRepFloat32 | kTypeNumber, kRepFloat32);
+  r.CheckNop(kRepFloat32, kRepFloat32 | kTypeNumber);
+
+  // 32-bit or 64-bit words can be used as branch conditions (kRepBit).
+  r.CheckNop(kRepWord32, kRepBit);
+  r.CheckNop(kRepWord32, kRepBit | kTypeBool);
+  r.CheckNop(kRepWord64, kRepBit);
+  r.CheckNop(kRepWord64, kRepBit | kTypeBool);
+
+  // 32-bit words can be used as smaller word sizes and vice versa, because
+  // loads from memory implicitly sign or zero extend the value to the
+  // full machine word size, and stores implicitly truncate.
+  r.CheckNop(kRepWord32, kRepWord8);
+  r.CheckNop(kRepWord32, kRepWord16);
+  r.CheckNop(kRepWord32, kRepWord32);
+  r.CheckNop(kRepWord8, kRepWord32);
+  r.CheckNop(kRepWord16, kRepWord32);
+
+  // kRepBit (result of comparison) is implicitly a wordish thing.
+  r.CheckNop(kRepBit, kRepWord8);
+  r.CheckNop(kRepBit | kTypeBool, kRepWord8);
+  r.CheckNop(kRepBit, kRepWord16);
+  r.CheckNop(kRepBit | kTypeBool, kRepWord16);
+  r.CheckNop(kRepBit, kRepWord32);
+  r.CheckNop(kRepBit | kTypeBool, kRepWord32);
+  r.CheckNop(kRepBit, kRepWord64);
+  r.CheckNop(kRepBit | kTypeBool, kRepWord64);
+}
+
+
+TEST(TypeErrors) {
+  RepresentationChangerTester r;
+
+  // Floats cannot be implicitly converted to/from comparison conditions.
+  r.CheckTypeError(kRepFloat64, kRepBit);
+  r.CheckTypeError(kRepFloat64, kRepBit | kTypeBool);
+  r.CheckTypeError(kRepBit, kRepFloat64);
+  r.CheckTypeError(kRepBit | kTypeBool, kRepFloat64);
+
+  // Floats cannot be implicitly converted to/from comparison conditions.
+  r.CheckTypeError(kRepFloat32, kRepBit);
+  r.CheckTypeError(kRepFloat32, kRepBit | kTypeBool);
+  r.CheckTypeError(kRepBit, kRepFloat32);
+  r.CheckTypeError(kRepBit | kTypeBool, kRepFloat32);
+
+  // Word64 is internal and shouldn't be implicitly converted.
+  r.CheckTypeError(kRepWord64, kRepTagged | kTypeBool);
+  r.CheckTypeError(kRepWord64, kRepTagged);
+  r.CheckTypeError(kRepWord64, kRepTagged | kTypeBool);
+  r.CheckTypeError(kRepTagged, kRepWord64);
+  r.CheckTypeError(kRepTagged | kTypeBool, kRepWord64);
+
+  // Word64 / Word32 shouldn't be implicitly converted.
+  r.CheckTypeError(kRepWord64, kRepWord32);
+  r.CheckTypeError(kRepWord32, kRepWord64);
+  r.CheckTypeError(kRepWord64, kRepWord32 | kTypeInt32);
+  r.CheckTypeError(kRepWord32 | kTypeInt32, kRepWord64);
+  r.CheckTypeError(kRepWord64, kRepWord32 | kTypeUint32);
+  r.CheckTypeError(kRepWord32 | kTypeUint32, kRepWord64);
+
+  for (size_t i = 0; i < arraysize(all_reps); i++) {
+    for (size_t j = 0; j < arraysize(all_reps); j++) {
+      if (i == j) continue;
+      // Only a single from representation is allowed.
+      r.CheckTypeError(all_reps[i] | all_reps[j], kRepTagged);
+    }
+  }
+
+  // TODO(titzer): Float32 representation changes trigger type errors now.
+  // Enforce current behavior to test all paths through representation changer.
+  for (size_t i = 0; i < arraysize(all_reps); i++) {
+    r.CheckTypeError(all_reps[i], kRepFloat32);
+    r.CheckTypeError(kRepFloat32, all_reps[i]);
+  }
+}
diff --git a/test/cctest/compiler/test-run-deopt.cc b/test/cctest/compiler/test-run-deopt.cc
new file mode 100644
index 0000000..14c024c
--- /dev/null
+++ b/test/cctest/compiler/test-run-deopt.cc
@@ -0,0 +1,76 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "test/cctest/cctest.h"
+#include "test/cctest/compiler/function-tester.h"
+
+using namespace v8;
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+#if V8_TURBOFAN_TARGET
+
+static void IsOptimized(const FunctionCallbackInfo<v8::Value>& args) {
+  JavaScriptFrameIterator it(CcTest::i_isolate());
+  JavaScriptFrame* frame = it.frame();
+  return args.GetReturnValue().Set(frame->is_optimized());
+}
+
+
+static void InstallIsOptimizedHelper(v8::Isolate* isolate) {
+  Local<v8::Context> context = isolate->GetCurrentContext();
+  Local<v8::FunctionTemplate> t = FunctionTemplate::New(isolate, IsOptimized);
+  context->Global()->Set(v8_str("IsOptimized"), t->GetFunction());
+}
+
+
+TEST(TurboSimpleDeopt) {
+  FLAG_allow_natives_syntax = true;
+  FLAG_turbo_deoptimization = true;
+
+  FunctionTester T(
+      "(function f(a) {"
+      "var b = 1;"
+      "if (!IsOptimized()) return 0;"
+      "%DeoptimizeFunction(f);"
+      "if (IsOptimized()) return 0;"
+      "return a + b; })");
+
+  InstallIsOptimizedHelper(CcTest::isolate());
+  T.CheckCall(T.Val(2), T.Val(1));
+}
+
+
+TEST(TurboSimpleDeoptInExpr) {
+  FLAG_allow_natives_syntax = true;
+  FLAG_turbo_deoptimization = true;
+
+  FunctionTester T(
+      "(function f(a) {"
+      "var b = 1;"
+      "var c = 2;"
+      "if (!IsOptimized()) return 0;"
+      "var d = b + (%DeoptimizeFunction(f), c);"
+      "if (IsOptimized()) return 0;"
+      "return d + a; })");
+
+  InstallIsOptimizedHelper(CcTest::isolate());
+  T.CheckCall(T.Val(6), T.Val(3));
+}
+
+#endif
+
+TEST(TurboTrivialDeopt) {
+  FLAG_allow_natives_syntax = true;
+  FLAG_turbo_deoptimization = true;
+
+  FunctionTester T(
+      "(function foo() {"
+      "%DeoptimizeFunction(foo);"
+      "return 1; })");
+
+  T.CheckCall(T.Val(1));
+}
diff --git a/test/cctest/compiler/test-run-inlining.cc b/test/cctest/compiler/test-run-inlining.cc
new file mode 100644
index 0000000..ad82fec
--- /dev/null
+++ b/test/cctest/compiler/test-run-inlining.cc
@@ -0,0 +1,353 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "test/cctest/compiler/function-tester.h"
+
+#if V8_TURBOFAN_TARGET
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+// Helper to determine inline count via JavaScriptFrame::GetInlineCount.
+// Note that a count of 1 indicates that no inlining has occured.
+static void AssertInlineCount(const v8::FunctionCallbackInfo<v8::Value>& args) {
+  StackTraceFrameIterator it(CcTest::i_isolate());
+  int frames_seen = 0;
+  JavaScriptFrame* topmost = it.frame();
+  while (!it.done()) {
+    JavaScriptFrame* frame = it.frame();
+    PrintF("%d %s, inline count: %d\n", frames_seen,
+           frame->function()->shared()->DebugName()->ToCString().get(),
+           frame->GetInlineCount());
+    frames_seen++;
+    it.Advance();
+  }
+  CHECK_EQ(args[0]->ToInt32()->Value(), topmost->GetInlineCount());
+}
+
+
+static void InstallAssertInlineCountHelper(v8::Isolate* isolate) {
+  v8::Local<v8::Context> context = isolate->GetCurrentContext();
+  v8::Local<v8::FunctionTemplate> t =
+      v8::FunctionTemplate::New(isolate, AssertInlineCount);
+  context->Global()->Set(v8_str("AssertInlineCount"), t->GetFunction());
+}
+
+
+TEST(SimpleInlining) {
+  FLAG_turbo_deoptimization = true;
+  FunctionTester T(
+      "(function(){"
+      "function foo(s) { AssertInlineCount(2); return s; };"
+      "function bar(s, t) { return foo(s); };"
+      "return bar;})();",
+      CompilationInfo::kInliningEnabled |
+          CompilationInfo::kContextSpecializing |
+          CompilationInfo::kTypingEnabled);
+
+  InstallAssertInlineCountHelper(CcTest::isolate());
+  T.CheckCall(T.Val(1), T.Val(1), T.Val(2));
+}
+
+
+TEST(SimpleInliningDeopt) {
+  FLAG_turbo_deoptimization = true;
+  FunctionTester T(
+      "(function(){"
+      "function foo(s) { %DeoptimizeFunction(bar); return "
+      "s; };"
+      "function bar(s, t) { return foo(s); };"
+      "return bar;})();",
+      CompilationInfo::kInliningEnabled |
+          CompilationInfo::kContextSpecializing |
+          CompilationInfo::kTypingEnabled);
+
+  InstallAssertInlineCountHelper(CcTest::isolate());
+  T.CheckCall(T.Val(1), T.Val(1), T.Val(2));
+}
+
+
+TEST(SimpleInliningContext) {
+  FLAG_turbo_deoptimization = true;
+  FunctionTester T(
+      "(function () {"
+      "function foo(s) { AssertInlineCount(2); var x = 12; return s + x; };"
+      "function bar(s, t) { return foo(s); };"
+      "return bar;"
+      "})();",
+      CompilationInfo::kInliningEnabled |
+          CompilationInfo::kContextSpecializing |
+          CompilationInfo::kTypingEnabled);
+
+  InstallAssertInlineCountHelper(CcTest::isolate());
+  T.CheckCall(T.Val(13), T.Val(1), T.Val(2));
+}
+
+
+TEST(SimpleInliningContextDeopt) {
+  FLAG_turbo_deoptimization = true;
+  FunctionTester T(
+      "(function () {"
+      "function foo(s) { "
+      "  AssertInlineCount(2); %DeoptimizeFunction(bar); var x = 12;"
+      "  return s + x;"
+      "};"
+      "function bar(s, t) { return foo(s); };"
+      "return bar;"
+      "})();",
+      CompilationInfo::kInliningEnabled |
+          CompilationInfo::kContextSpecializing |
+          CompilationInfo::kTypingEnabled);
+
+  InstallAssertInlineCountHelper(CcTest::isolate());
+  T.CheckCall(T.Val(13), T.Val(1), T.Val(2));
+}
+
+
+TEST(CaptureContext) {
+  FLAG_turbo_deoptimization = true;
+  FunctionTester T(
+      "var f = (function () {"
+      "var x = 42;"
+      "function bar(s) { return x + s; };"
+      "return (function (s) { return bar(s); });"
+      "})();"
+      "(function (s) { return f(s)})",
+      CompilationInfo::kInliningEnabled |
+          CompilationInfo::kContextSpecializing |
+          CompilationInfo::kTypingEnabled);
+
+  InstallAssertInlineCountHelper(CcTest::isolate());
+  T.CheckCall(T.Val(42 + 12), T.Val(12), T.undefined());
+}
+
+
+// TODO(sigurds) For now we do not inline any native functions. If we do at
+// some point, change this test.
+TEST(DontInlineEval) {
+  FLAG_turbo_deoptimization = true;
+  FunctionTester T(
+      "var x = 42;"
+      "(function () {"
+      "function bar(s, t) { return eval(\"AssertInlineCount(1); x\") };"
+      "return bar;"
+      "})();",
+      CompilationInfo::kInliningEnabled |
+          CompilationInfo::kContextSpecializing |
+          CompilationInfo::kTypingEnabled);
+
+  InstallAssertInlineCountHelper(CcTest::isolate());
+  T.CheckCall(T.Val(42), T.Val("x"), T.undefined());
+}
+
+
+TEST(InlineOmitArguments) {
+  FLAG_turbo_deoptimization = true;
+  FunctionTester T(
+      "(function () {"
+      "var x = 42;"
+      "function bar(s, t, u, v) { AssertInlineCount(2); return x + s; };"
+      "return (function (s,t) { return bar(s); });"
+      "})();",
+      CompilationInfo::kInliningEnabled |
+          CompilationInfo::kContextSpecializing |
+          CompilationInfo::kTypingEnabled);
+
+  InstallAssertInlineCountHelper(CcTest::isolate());
+  T.CheckCall(T.Val(42 + 12), T.Val(12), T.undefined());
+}
+
+
+TEST(InlineOmitArgumentsDeopt) {
+  FLAG_turbo_deoptimization = true;
+  FunctionTester T(
+      "(function () {"
+      "function foo(s,t,u,v) { AssertInlineCount(2); %DeoptimizeFunction(bar); "
+      "return baz(); };"
+      "function bar() { return foo(11); };"
+      "function baz() { return foo.arguments.length == 1 && "
+      "                        foo.arguments[0] == 11 ; }"
+      "return bar;"
+      "})();",
+      CompilationInfo::kInliningEnabled |
+          CompilationInfo::kContextSpecializing |
+          CompilationInfo::kTypingEnabled);
+
+  InstallAssertInlineCountHelper(CcTest::isolate());
+  T.CheckCall(T.true_value(), T.Val(12), T.Val(14));
+}
+
+
+TEST(InlineSurplusArguments) {
+  FLAG_turbo_deoptimization = true;
+  FunctionTester T(
+      "(function () {"
+      "var x = 42;"
+      "function foo(s) { AssertInlineCount(2); return x + s; };"
+      "function bar(s,t) { return foo(s,t,13); };"
+      "return bar;"
+      "})();",
+      CompilationInfo::kInliningEnabled |
+          CompilationInfo::kContextSpecializing |
+          CompilationInfo::kTypingEnabled);
+
+  InstallAssertInlineCountHelper(CcTest::isolate());
+  T.CheckCall(T.Val(42 + 12), T.Val(12), T.undefined());
+}
+
+
+TEST(InlineSurplusArgumentsDeopt) {
+  FLAG_turbo_deoptimization = true;
+  FunctionTester T(
+      "(function () {"
+      "function foo(s) { AssertInlineCount(2); %DeoptimizeFunction(bar); "
+      "return baz(); };"
+      "function bar() { return foo(13, 14, 15); };"
+      "function baz() { return foo.arguments.length == 3 && "
+      "                        foo.arguments[0] == 13 && "
+      "                        foo.arguments[1] == 14 && "
+      "                        foo.arguments[2] == 15; }"
+      "return bar;"
+      "})();",
+      CompilationInfo::kInliningEnabled |
+          CompilationInfo::kContextSpecializing |
+          CompilationInfo::kTypingEnabled);
+
+  InstallAssertInlineCountHelper(CcTest::isolate());
+  T.CheckCall(T.true_value(), T.Val(12), T.Val(14));
+}
+
+
+TEST(InlineTwice) {
+  FLAG_turbo_deoptimization = true;
+  FunctionTester T(
+      "(function () {"
+      "var x = 42;"
+      "function bar(s) { AssertInlineCount(2); return x + s; };"
+      "return (function (s,t) { return bar(s) + bar(t); });"
+      "})();",
+      CompilationInfo::kInliningEnabled |
+          CompilationInfo::kContextSpecializing |
+          CompilationInfo::kTypingEnabled);
+
+  InstallAssertInlineCountHelper(CcTest::isolate());
+  T.CheckCall(T.Val(2 * 42 + 12 + 4), T.Val(12), T.Val(4));
+}
+
+
+TEST(InlineTwiceDependent) {
+  FLAG_turbo_deoptimization = true;
+  FunctionTester T(
+      "(function () {"
+      "var x = 42;"
+      "function foo(s) { AssertInlineCount(2); return x + s; };"
+      "function bar(s,t) { return foo(foo(s)); };"
+      "return bar;"
+      "})();",
+      CompilationInfo::kInliningEnabled |
+          CompilationInfo::kContextSpecializing |
+          CompilationInfo::kTypingEnabled);
+
+  InstallAssertInlineCountHelper(CcTest::isolate());
+  T.CheckCall(T.Val(42 + 42 + 12), T.Val(12), T.Val(4));
+}
+
+
+TEST(InlineTwiceDependentDiamond) {
+  FLAG_turbo_deoptimization = true;
+  FunctionTester T(
+      "(function () {"
+      "var x = 41;"
+      "function foo(s) { AssertInlineCount(2); if (s % 2 == 0) {"
+      "                  return x - s } else { return x + s; } };"
+      "function bar(s,t) { return foo(foo(s)); };"
+      "return bar;"
+      "})();",
+      CompilationInfo::kInliningEnabled |
+          CompilationInfo::kContextSpecializing |
+          CompilationInfo::kTypingEnabled);
+
+  InstallAssertInlineCountHelper(CcTest::isolate());
+  T.CheckCall(T.Val(-11), T.Val(11), T.Val(4));
+}
+
+
+TEST(InlineTwiceDependentDiamondDifferent) {
+  FLAG_turbo_deoptimization = true;
+  FunctionTester T(
+      "(function () {"
+      "var x = 41;"
+      "function foo(s,t) { AssertInlineCount(2); if (s % 2 == 0) {"
+      "                    return x - s * t } else { return x + s * t; } };"
+      "function bar(s,t) { return foo(foo(s, 3), 5); };"
+      "return bar;"
+      "})();",
+      CompilationInfo::kInliningEnabled |
+          CompilationInfo::kContextSpecializing |
+          CompilationInfo::kTypingEnabled);
+
+  InstallAssertInlineCountHelper(CcTest::isolate());
+  T.CheckCall(T.Val(-329), T.Val(11), T.Val(4));
+}
+
+
+TEST(InlineLoop) {
+  FLAG_turbo_deoptimization = true;
+  FunctionTester T(
+      "(function () {"
+      "var x = 41;"
+      "function foo(s) { AssertInlineCount(2); while (s > 0) {"
+      "                  s = s - 1; }; return s; };"
+      "function bar(s,t) { return foo(foo(s)); };"
+      "return bar;"
+      "})();",
+      CompilationInfo::kInliningEnabled |
+          CompilationInfo::kContextSpecializing |
+          CompilationInfo::kTypingEnabled);
+
+  InstallAssertInlineCountHelper(CcTest::isolate());
+  T.CheckCall(T.Val(0.0), T.Val(11), T.Val(4));
+}
+
+
+TEST(InlineStrictIntoNonStrict) {
+  FLAG_turbo_deoptimization = true;
+  FunctionTester T(
+      "(function () {"
+      "var x = Object.create({}, { y: { value:42, writable:false } });"
+      "function foo(s) { 'use strict';"
+      "                   x.y = 9; };"
+      "function bar(s,t) { return foo(s); };"
+      "return bar;"
+      "})();",
+      CompilationInfo::kInliningEnabled |
+          CompilationInfo::kContextSpecializing |
+          CompilationInfo::kTypingEnabled);
+
+  InstallAssertInlineCountHelper(CcTest::isolate());
+  T.CheckThrows(T.undefined(), T.undefined());
+}
+
+
+TEST(InlineNonStrictIntoStrict) {
+  FLAG_turbo_deoptimization = true;
+  FunctionTester T(
+      "(function () {"
+      "var x = Object.create({}, { y: { value:42, writable:false } });"
+      "function foo(s) { x.y = 9; return x.y; };"
+      "function bar(s,t) { \'use strict\'; return foo(s); };"
+      "return bar;"
+      "})();",
+      CompilationInfo::kInliningEnabled |
+          CompilationInfo::kContextSpecializing |
+          CompilationInfo::kTypingEnabled);
+
+  InstallAssertInlineCountHelper(CcTest::isolate());
+  T.CheckCall(T.Val(42), T.undefined(), T.undefined());
+}
+
+
+#endif  // V8_TURBOFAN_TARGET
diff --git a/test/cctest/compiler/test-run-intrinsics.cc b/test/cctest/compiler/test-run-intrinsics.cc
new file mode 100644
index 0000000..a1b5676
--- /dev/null
+++ b/test/cctest/compiler/test-run-intrinsics.cc
@@ -0,0 +1,211 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "test/cctest/compiler/function-tester.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+
+TEST(IsSmi) {
+  FunctionTester T("(function(a) { return %_IsSmi(a); })");
+
+  T.CheckTrue(T.Val(1));
+  T.CheckFalse(T.Val(1.1));
+  T.CheckFalse(T.Val(-0.0));
+  T.CheckTrue(T.Val(-2));
+  T.CheckFalse(T.Val(-2.3));
+  T.CheckFalse(T.undefined());
+}
+
+
+TEST(IsNonNegativeSmi) {
+  FunctionTester T("(function(a) { return %_IsNonNegativeSmi(a); })");
+
+  T.CheckTrue(T.Val(1));
+  T.CheckFalse(T.Val(1.1));
+  T.CheckFalse(T.Val(-0.0));
+  T.CheckFalse(T.Val(-2));
+  T.CheckFalse(T.Val(-2.3));
+  T.CheckFalse(T.undefined());
+}
+
+
+TEST(IsMinusZero) {
+  FunctionTester T("(function(a) { return %_IsMinusZero(a); })");
+
+  T.CheckFalse(T.Val(1));
+  T.CheckFalse(T.Val(1.1));
+  T.CheckTrue(T.Val(-0.0));
+  T.CheckFalse(T.Val(-2));
+  T.CheckFalse(T.Val(-2.3));
+  T.CheckFalse(T.undefined());
+}
+
+
+TEST(IsArray) {
+  FunctionTester T("(function(a) { return %_IsArray(a); })");
+
+  T.CheckFalse(T.NewObject("(function() {})"));
+  T.CheckTrue(T.NewObject("([1])"));
+  T.CheckFalse(T.NewObject("({})"));
+  T.CheckFalse(T.NewObject("(/x/)"));
+  T.CheckFalse(T.undefined());
+  T.CheckFalse(T.null());
+  T.CheckFalse(T.Val("x"));
+  T.CheckFalse(T.Val(1));
+}
+
+
+TEST(IsObject) {
+  FunctionTester T("(function(a) { return %_IsObject(a); })");
+
+  T.CheckFalse(T.NewObject("(function() {})"));
+  T.CheckTrue(T.NewObject("([1])"));
+  T.CheckTrue(T.NewObject("({})"));
+  T.CheckTrue(T.NewObject("(/x/)"));
+  T.CheckFalse(T.undefined());
+  T.CheckTrue(T.null());
+  T.CheckFalse(T.Val("x"));
+  T.CheckFalse(T.Val(1));
+}
+
+
+TEST(IsFunction) {
+  FunctionTester T("(function(a) { return %_IsFunction(a); })");
+
+  T.CheckTrue(T.NewObject("(function() {})"));
+  T.CheckFalse(T.NewObject("([1])"));
+  T.CheckFalse(T.NewObject("({})"));
+  T.CheckFalse(T.NewObject("(/x/)"));
+  T.CheckFalse(T.undefined());
+  T.CheckFalse(T.null());
+  T.CheckFalse(T.Val("x"));
+  T.CheckFalse(T.Val(1));
+}
+
+
+TEST(IsRegExp) {
+  FunctionTester T("(function(a) { return %_IsRegExp(a); })");
+
+  T.CheckFalse(T.NewObject("(function() {})"));
+  T.CheckFalse(T.NewObject("([1])"));
+  T.CheckFalse(T.NewObject("({})"));
+  T.CheckTrue(T.NewObject("(/x/)"));
+  T.CheckFalse(T.undefined());
+  T.CheckFalse(T.null());
+  T.CheckFalse(T.Val("x"));
+  T.CheckFalse(T.Val(1));
+}
+
+
+TEST(ClassOf) {
+  FunctionTester T("(function(a) { return %_ClassOf(a); })");
+
+  T.CheckCall(T.Val("Function"), T.NewObject("(function() {})"));
+  T.CheckCall(T.Val("Array"), T.NewObject("([1])"));
+  T.CheckCall(T.Val("Object"), T.NewObject("({})"));
+  T.CheckCall(T.Val("RegExp"), T.NewObject("(/x/)"));
+  T.CheckCall(T.null(), T.undefined());
+  T.CheckCall(T.null(), T.null());
+  T.CheckCall(T.null(), T.Val("x"));
+  T.CheckCall(T.null(), T.Val(1));
+}
+
+
+TEST(ObjectEquals) {
+  FunctionTester T("(function(a,b) { return %_ObjectEquals(a,b); })");
+  CompileRun("var o = {}");
+
+  T.CheckTrue(T.NewObject("(o)"), T.NewObject("(o)"));
+  T.CheckTrue(T.Val("internal"), T.Val("internal"));
+  T.CheckTrue(T.true_value(), T.true_value());
+  T.CheckFalse(T.true_value(), T.false_value());
+  T.CheckFalse(T.NewObject("({})"), T.NewObject("({})"));
+  T.CheckFalse(T.Val("a"), T.Val("b"));
+}
+
+
+TEST(ValueOf) {
+  FunctionTester T("(function(a) { return %_ValueOf(a); })");
+
+  T.CheckCall(T.Val("a"), T.Val("a"));
+  T.CheckCall(T.Val("b"), T.NewObject("(new String('b'))"));
+  T.CheckCall(T.Val(123), T.Val(123));
+  T.CheckCall(T.Val(456), T.NewObject("(new Number(456))"));
+}
+
+
+TEST(SetValueOf) {
+  FunctionTester T("(function(a,b) { return %_SetValueOf(a,b); })");
+
+  T.CheckCall(T.Val("a"), T.NewObject("(new String)"), T.Val("a"));
+  T.CheckCall(T.Val(123), T.NewObject("(new Number)"), T.Val(123));
+  T.CheckCall(T.Val("x"), T.undefined(), T.Val("x"));
+}
+
+
+TEST(StringCharFromCode) {
+  FunctionTester T("(function(a) { return %_StringCharFromCode(a); })");
+
+  T.CheckCall(T.Val("a"), T.Val(97));
+  T.CheckCall(T.Val("\xE2\x9D\x8A"), T.Val(0x274A));
+  T.CheckCall(T.Val(""), T.undefined());
+}
+
+
+TEST(StringCharAt) {
+  FunctionTester T("(function(a,b) { return %_StringCharAt(a,b); })");
+
+  T.CheckCall(T.Val("e"), T.Val("huge fan!"), T.Val(3));
+  T.CheckCall(T.Val("f"), T.Val("\xE2\x9D\x8A fan!"), T.Val(2));
+  T.CheckCall(T.Val(""), T.Val("not a fan!"), T.Val(23));
+}
+
+
+TEST(StringCharCodeAt) {
+  FunctionTester T("(function(a,b) { return %_StringCharCodeAt(a,b); })");
+
+  T.CheckCall(T.Val('e'), T.Val("huge fan!"), T.Val(3));
+  T.CheckCall(T.Val('f'), T.Val("\xE2\x9D\x8A fan!"), T.Val(2));
+  T.CheckCall(T.nan(), T.Val("not a fan!"), T.Val(23));
+}
+
+
+TEST(StringAdd) {
+  FunctionTester T("(function(a,b) { return %_StringAdd(a,b); })");
+
+  T.CheckCall(T.Val("aaabbb"), T.Val("aaa"), T.Val("bbb"));
+  T.CheckCall(T.Val("aaa"), T.Val("aaa"), T.Val(""));
+  T.CheckCall(T.Val("bbb"), T.Val(""), T.Val("bbb"));
+}
+
+
+TEST(StringSubString) {
+  FunctionTester T("(function(a,b) { return %_SubString(a,b,b+3); })");
+
+  T.CheckCall(T.Val("aaa"), T.Val("aaabbb"), T.Val(0.0));
+  T.CheckCall(T.Val("abb"), T.Val("aaabbb"), T.Val(2));
+  T.CheckCall(T.Val("aaa"), T.Val("aaa"), T.Val(0.0));
+}
+
+
+TEST(StringCompare) {
+  FunctionTester T("(function(a,b) { return %_StringCompare(a,b); })");
+
+  T.CheckCall(T.Val(-1), T.Val("aaa"), T.Val("bbb"));
+  T.CheckCall(T.Val(0.0), T.Val("bbb"), T.Val("bbb"));
+  T.CheckCall(T.Val(+1), T.Val("ccc"), T.Val("bbb"));
+}
+
+
+TEST(CallFunction) {
+  FunctionTester T("(function(a,b) { return %_CallFunction(a, 1, 2, 3, b); })");
+  CompileRun("function f(a,b,c) { return a + b + c + this.d; }");
+
+  T.CheckCall(T.Val(129), T.NewObject("({d:123})"), T.NewObject("f"));
+  T.CheckCall(T.Val("6x"), T.NewObject("({d:'x'})"), T.NewObject("f"));
+}
diff --git a/test/cctest/compiler/test-run-jsbranches.cc b/test/cctest/compiler/test-run-jsbranches.cc
new file mode 100644
index 0000000..df2fcdc
--- /dev/null
+++ b/test/cctest/compiler/test-run-jsbranches.cc
@@ -0,0 +1,282 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "test/cctest/compiler/function-tester.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+TEST(Conditional) {
+  FunctionTester T("(function(a) { return a ? 23 : 42; })");
+
+  T.CheckCall(T.Val(23), T.true_value(), T.undefined());
+  T.CheckCall(T.Val(42), T.false_value(), T.undefined());
+  T.CheckCall(T.Val(42), T.undefined(), T.undefined());
+  T.CheckCall(T.Val(42), T.Val(0.0), T.undefined());
+  T.CheckCall(T.Val(23), T.Val(999), T.undefined());
+  T.CheckCall(T.Val(23), T.Val("x"), T.undefined());
+}
+
+
+TEST(LogicalAnd) {
+  FunctionTester T("(function(a,b) { return a && b; })");
+
+  T.CheckCall(T.true_value(), T.true_value(), T.true_value());
+  T.CheckCall(T.false_value(), T.false_value(), T.true_value());
+  T.CheckCall(T.false_value(), T.true_value(), T.false_value());
+  T.CheckCall(T.false_value(), T.false_value(), T.false_value());
+
+  T.CheckCall(T.Val(999), T.Val(777), T.Val(999));
+  T.CheckCall(T.Val(0.0), T.Val(0.0), T.Val(999));
+  T.CheckCall(T.Val("b"), T.Val("a"), T.Val("b"));
+}
+
+
+TEST(LogicalOr) {
+  FunctionTester T("(function(a,b) { return a || b; })");
+
+  T.CheckCall(T.true_value(), T.true_value(), T.true_value());
+  T.CheckCall(T.true_value(), T.false_value(), T.true_value());
+  T.CheckCall(T.true_value(), T.true_value(), T.false_value());
+  T.CheckCall(T.false_value(), T.false_value(), T.false_value());
+
+  T.CheckCall(T.Val(777), T.Val(777), T.Val(999));
+  T.CheckCall(T.Val(999), T.Val(0.0), T.Val(999));
+  T.CheckCall(T.Val("a"), T.Val("a"), T.Val("b"));
+}
+
+
+TEST(LogicalEffect) {
+  FunctionTester T("(function(a,b) { a && (b = a); return b; })");
+
+  T.CheckCall(T.true_value(), T.true_value(), T.true_value());
+  T.CheckCall(T.true_value(), T.false_value(), T.true_value());
+  T.CheckCall(T.true_value(), T.true_value(), T.false_value());
+  T.CheckCall(T.false_value(), T.false_value(), T.false_value());
+
+  T.CheckCall(T.Val(777), T.Val(777), T.Val(999));
+  T.CheckCall(T.Val(999), T.Val(0.0), T.Val(999));
+  T.CheckCall(T.Val("a"), T.Val("a"), T.Val("b"));
+}
+
+
+TEST(IfStatement) {
+  FunctionTester T("(function(a) { if (a) { return 1; } else { return 2; } })");
+
+  T.CheckCall(T.Val(1), T.true_value(), T.undefined());
+  T.CheckCall(T.Val(2), T.false_value(), T.undefined());
+  T.CheckCall(T.Val(2), T.undefined(), T.undefined());
+  T.CheckCall(T.Val(2), T.Val(0.0), T.undefined());
+  T.CheckCall(T.Val(1), T.Val(999), T.undefined());
+  T.CheckCall(T.Val(1), T.Val("x"), T.undefined());
+}
+
+
+TEST(DoWhileStatement) {
+  FunctionTester T("(function(a,b) { do { a+=23; } while(a < b) return a; })");
+
+  T.CheckCall(T.Val(24), T.Val(1), T.Val(1));
+  T.CheckCall(T.Val(24), T.Val(1), T.Val(23));
+  T.CheckCall(T.Val(47), T.Val(1), T.Val(25));
+  T.CheckCall(T.Val("str23"), T.Val("str"), T.Val("str"));
+}
+
+
+TEST(WhileStatement) {
+  FunctionTester T("(function(a,b) { while(a < b) { a+=23; } return a; })");
+
+  T.CheckCall(T.Val(1), T.Val(1), T.Val(1));
+  T.CheckCall(T.Val(24), T.Val(1), T.Val(23));
+  T.CheckCall(T.Val(47), T.Val(1), T.Val(25));
+  T.CheckCall(T.Val("str"), T.Val("str"), T.Val("str"));
+}
+
+
+TEST(ForStatement) {
+  FunctionTester T("(function(a,b) { for (; a < b; a+=23) {} return a; })");
+
+  T.CheckCall(T.Val(1), T.Val(1), T.Val(1));
+  T.CheckCall(T.Val(24), T.Val(1), T.Val(23));
+  T.CheckCall(T.Val(47), T.Val(1), T.Val(25));
+  T.CheckCall(T.Val("str"), T.Val("str"), T.Val("str"));
+}
+
+
+static void TestForIn(const char* code) {
+  FunctionTester T(code);
+  T.CheckCall(T.undefined(), T.undefined());
+  T.CheckCall(T.undefined(), T.null());
+  T.CheckCall(T.undefined(), T.NewObject("({})"));
+  T.CheckCall(T.undefined(), T.Val(1));
+  T.CheckCall(T.Val("2"), T.Val("str"));
+  T.CheckCall(T.Val("a"), T.NewObject("({'a' : 1})"));
+  T.CheckCall(T.Val("2"), T.NewObject("([1, 2, 3])"));
+  T.CheckCall(T.Val("a"), T.NewObject("({'a' : 1, 'b' : 1})"), T.Val("b"));
+  T.CheckCall(T.Val("1"), T.NewObject("([1, 2, 3])"), T.Val("2"));
+}
+
+
+TEST(ForInStatement) {
+  // Variable assignment.
+  TestForIn(
+      "(function(a, b) {"
+      "var last;"
+      "for (var x in a) {"
+      "  if (b) { delete a[b]; b = undefined; }"
+      "  last = x;"
+      "}"
+      "return last;})");
+  // Indexed assignment.
+  TestForIn(
+      "(function(a, b) {"
+      "var array = [0, 1, undefined];"
+      "for (array[2] in a) {"
+      "  if (b) { delete a[b]; b = undefined; }"
+      "}"
+      "return array[2];})");
+  // Named assignment.
+  TestForIn(
+      "(function(a, b) {"
+      "var obj = {'a' : undefined};"
+      "for (obj.a in a) {"
+      "  if (b) { delete a[b]; b = undefined; }"
+      "}"
+      "return obj.a;})");
+}
+
+
+TEST(ForInContinueStatement) {
+  const char* src =
+      "(function(a,b) {"
+      "  var r = '-';"
+      "  for (var x in a) {"
+      "    r += 'A-';"
+      "    if (b) continue;"
+      "    r += 'B-';"
+      "  }"
+      "  return r;"
+      "})";
+  FunctionTester T(src);
+
+  T.CheckCall(T.Val("-A-B-"), T.NewObject("({x:1})"), T.false_value());
+  T.CheckCall(T.Val("-A-B-A-B-"), T.NewObject("({x:1,y:2})"), T.false_value());
+  T.CheckCall(T.Val("-A-"), T.NewObject("({x:1})"), T.true_value());
+  T.CheckCall(T.Val("-A-A-"), T.NewObject("({x:1,y:2})"), T.true_value());
+}
+
+
+TEST(SwitchStatement) {
+  const char* src =
+      "(function(a,b) {"
+      "  var r = '-';"
+      "  switch (a) {"
+      "    case 'x'    : r += 'X-';"
+      "    case b + 'b': r += 'B-';"
+      "    default     : r += 'D-';"
+      "    case 'y'    : r += 'Y-';"
+      "  }"
+      "  return r;"
+      "})";
+  FunctionTester T(src);
+
+  T.CheckCall(T.Val("-X-B-D-Y-"), T.Val("x"), T.Val("B"));
+  T.CheckCall(T.Val("-B-D-Y-"), T.Val("Bb"), T.Val("B"));
+  T.CheckCall(T.Val("-D-Y-"), T.Val("z"), T.Val("B"));
+  T.CheckCall(T.Val("-Y-"), T.Val("y"), T.Val("B"));
+
+  CompileRun("var c = 0; var o = { toString:function(){return c++} };");
+  T.CheckCall(T.Val("-D-Y-"), T.Val("1b"), T.NewObject("o"));
+  T.CheckCall(T.Val("-B-D-Y-"), T.Val("1b"), T.NewObject("o"));
+  T.CheckCall(T.Val("-D-Y-"), T.Val("1b"), T.NewObject("o"));
+}
+
+
+TEST(BlockBreakStatement) {
+  FunctionTester T("(function(a,b) { L:{ if (a) break L; b=1; } return b; })");
+
+  T.CheckCall(T.Val(7), T.true_value(), T.Val(7));
+  T.CheckCall(T.Val(1), T.false_value(), T.Val(7));
+}
+
+
+TEST(BlockReturnStatement) {
+  FunctionTester T("(function(a,b) { L:{ if (a) b=1; return b; } })");
+
+  T.CheckCall(T.Val(1), T.true_value(), T.Val(7));
+  T.CheckCall(T.Val(7), T.false_value(), T.Val(7));
+}
+
+
+TEST(NestedIfConditional) {
+  FunctionTester T("(function(a,b) { if (a) { b = (b?b:7) + 1; } return b; })");
+
+  T.CheckCall(T.Val(4), T.false_value(), T.Val(4));
+  T.CheckCall(T.Val(6), T.true_value(), T.Val(5));
+  T.CheckCall(T.Val(8), T.true_value(), T.undefined());
+}
+
+
+TEST(NestedIfLogical) {
+  const char* src =
+      "(function(a,b) {"
+      "  if (a || b) { return 1; } else { return 2; }"
+      "})";
+  FunctionTester T(src);
+
+  T.CheckCall(T.Val(1), T.true_value(), T.true_value());
+  T.CheckCall(T.Val(1), T.false_value(), T.true_value());
+  T.CheckCall(T.Val(1), T.true_value(), T.false_value());
+  T.CheckCall(T.Val(2), T.false_value(), T.false_value());
+  T.CheckCall(T.Val(1), T.Val(1.0), T.Val(1.0));
+  T.CheckCall(T.Val(1), T.Val(0.0), T.Val(1.0));
+  T.CheckCall(T.Val(1), T.Val(1.0), T.Val(0.0));
+  T.CheckCall(T.Val(2), T.Val(0.0), T.Val(0.0));
+}
+
+
+TEST(NestedIfElseFor) {
+  const char* src =
+      "(function(a,b) {"
+      "  if (!a) { return b - 3; } else { for (; a < b; a++); }"
+      "  return a;"
+      "})";
+  FunctionTester T(src);
+
+  T.CheckCall(T.Val(1), T.false_value(), T.Val(4));
+  T.CheckCall(T.Val(2), T.true_value(), T.Val(2));
+  T.CheckCall(T.Val(3), T.Val(3), T.Val(1));
+}
+
+
+TEST(NestedWhileWhile) {
+  const char* src =
+      "(function(a) {"
+      "  var i = a; while (false) while(false) return i;"
+      "  return i;"
+      "})";
+  FunctionTester T(src);
+
+  T.CheckCall(T.Val(2.0), T.Val(2.0), T.Val(-1.0));
+  T.CheckCall(T.Val(65.0), T.Val(65.0), T.Val(-1.0));
+}
+
+
+TEST(NestedForIf) {
+  FunctionTester T("(function(a,b) { for (; a > 1; a--) if (b) return 1; })");
+
+  T.CheckCall(T.Val(1), T.Val(3), T.true_value());
+  T.CheckCall(T.undefined(), T.Val(2), T.false_value());
+  T.CheckCall(T.undefined(), T.Val(1), T.null());
+}
+
+
+TEST(NestedForConditional) {
+  FunctionTester T("(function(a,b) { for (; a > 1; a--) return b ? 1 : 2; })");
+
+  T.CheckCall(T.Val(1), T.Val(3), T.true_value());
+  T.CheckCall(T.Val(2), T.Val(2), T.false_value());
+  T.CheckCall(T.undefined(), T.Val(1), T.null());
+}
diff --git a/test/cctest/compiler/test-run-jscalls.cc b/test/cctest/compiler/test-run-jscalls.cc
new file mode 100644
index 0000000..dec7194
--- /dev/null
+++ b/test/cctest/compiler/test-run-jscalls.cc
@@ -0,0 +1,289 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "test/cctest/compiler/function-tester.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+TEST(SimpleCall) {
+  FunctionTester T("(function(foo,a) { return foo(a); })");
+  Handle<JSFunction> foo = T.NewFunction("(function(a) { return a; })");
+
+  T.CheckCall(T.Val(3), foo, T.Val(3));
+  T.CheckCall(T.Val(3.1), foo, T.Val(3.1));
+  T.CheckCall(foo, foo, foo);
+  T.CheckCall(T.Val("Abba"), foo, T.Val("Abba"));
+}
+
+
+TEST(SimpleCall2) {
+  FunctionTester T("(function(foo,a) { return foo(a); })");
+  Handle<JSFunction> foo = T.NewFunction("(function(a) { return a; })");
+  T.Compile(foo);
+
+  T.CheckCall(T.Val(3), foo, T.Val(3));
+  T.CheckCall(T.Val(3.1), foo, T.Val(3.1));
+  T.CheckCall(foo, foo, foo);
+  T.CheckCall(T.Val("Abba"), foo, T.Val("Abba"));
+}
+
+
+TEST(ConstCall) {
+  FunctionTester T("(function(foo,a) { return foo(a,3); })");
+  Handle<JSFunction> foo = T.NewFunction("(function(a,b) { return a + b; })");
+  T.Compile(foo);
+
+  T.CheckCall(T.Val(6), foo, T.Val(3));
+  T.CheckCall(T.Val(6.1), foo, T.Val(3.1));
+  T.CheckCall(T.Val("function (a,b) { return a + b; }3"), foo, foo);
+  T.CheckCall(T.Val("Abba3"), foo, T.Val("Abba"));
+}
+
+
+TEST(ConstCall2) {
+  FunctionTester T("(function(foo,a) { return foo(a,\"3\"); })");
+  Handle<JSFunction> foo = T.NewFunction("(function(a,b) { return a + b; })");
+  T.Compile(foo);
+
+  T.CheckCall(T.Val("33"), foo, T.Val(3));
+  T.CheckCall(T.Val("3.13"), foo, T.Val(3.1));
+  T.CheckCall(T.Val("function (a,b) { return a + b; }3"), foo, foo);
+  T.CheckCall(T.Val("Abba3"), foo, T.Val("Abba"));
+}
+
+
+TEST(PropertyNamedCall) {
+  FunctionTester T("(function(a,b) { return a.foo(b,23); })");
+  CompileRun("function foo(y,z) { return this.x + y + z; }");
+
+  T.CheckCall(T.Val(32), T.NewObject("({ foo:foo, x:4 })"), T.Val(5));
+  T.CheckCall(T.Val("xy23"), T.NewObject("({ foo:foo, x:'x' })"), T.Val("y"));
+  T.CheckCall(T.nan(), T.NewObject("({ foo:foo, y:0 })"), T.Val(3));
+}
+
+
+TEST(PropertyKeyedCall) {
+  FunctionTester T("(function(a,b) { var f = 'foo'; return a[f](b,23); })");
+  CompileRun("function foo(y,z) { return this.x + y + z; }");
+
+  T.CheckCall(T.Val(32), T.NewObject("({ foo:foo, x:4 })"), T.Val(5));
+  T.CheckCall(T.Val("xy23"), T.NewObject("({ foo:foo, x:'x' })"), T.Val("y"));
+  T.CheckCall(T.nan(), T.NewObject("({ foo:foo, y:0 })"), T.Val(3));
+}
+
+
+TEST(GlobalCall) {
+  FunctionTester T("(function(a,b) { return foo(a,b); })");
+  CompileRun("function foo(a,b) { return a + b + this.c; }");
+  CompileRun("var c = 23;");
+
+  T.CheckCall(T.Val(32), T.Val(4), T.Val(5));
+  T.CheckCall(T.Val("xy23"), T.Val("x"), T.Val("y"));
+  T.CheckCall(T.nan(), T.undefined(), T.Val(3));
+}
+
+
+TEST(LookupCall) {
+  FunctionTester T("(function(a,b) { with (a) { return foo(a,b); } })");
+
+  CompileRun("function f1(a,b) { return a.val + b; }");
+  T.CheckCall(T.Val(5), T.NewObject("({ foo:f1, val:2 })"), T.Val(3));
+  T.CheckCall(T.Val("xy"), T.NewObject("({ foo:f1, val:'x' })"), T.Val("y"));
+
+  CompileRun("function f2(a,b) { return this.val + b; }");
+  T.CheckCall(T.Val(9), T.NewObject("({ foo:f2, val:4 })"), T.Val(5));
+  T.CheckCall(T.Val("xy"), T.NewObject("({ foo:f2, val:'x' })"), T.Val("y"));
+}
+
+
+TEST(MismatchCallTooFew) {
+  FunctionTester T("(function(a,b) { return foo(a,b); })");
+  CompileRun("function foo(a,b,c) { return a + b + c; }");
+
+  T.CheckCall(T.nan(), T.Val(23), T.Val(42));
+  T.CheckCall(T.nan(), T.Val(4.2), T.Val(2.3));
+  T.CheckCall(T.Val("abundefined"), T.Val("a"), T.Val("b"));
+}
+
+
+TEST(MismatchCallTooMany) {
+  FunctionTester T("(function(a,b) { return foo(a,b); })");
+  CompileRun("function foo(a) { return a; }");
+
+  T.CheckCall(T.Val(23), T.Val(23), T.Val(42));
+  T.CheckCall(T.Val(4.2), T.Val(4.2), T.Val(2.3));
+  T.CheckCall(T.Val("a"), T.Val("a"), T.Val("b"));
+}
+
+
+TEST(ConstructorCall) {
+  FunctionTester T("(function(a,b) { return new foo(a,b).value; })");
+  CompileRun("function foo(a,b) { return { value: a + b + this.c }; }");
+  CompileRun("foo.prototype.c = 23;");
+
+  T.CheckCall(T.Val(32), T.Val(4), T.Val(5));
+  T.CheckCall(T.Val("xy23"), T.Val("x"), T.Val("y"));
+  T.CheckCall(T.nan(), T.undefined(), T.Val(3));
+}
+
+
+// TODO(titzer): factor these out into test-runtime-calls.cc
+TEST(RuntimeCallCPP1) {
+  FLAG_allow_natives_syntax = true;
+  FunctionTester T("(function(a) { return %ToBool(a); })");
+
+  T.CheckCall(T.true_value(), T.Val(23), T.undefined());
+  T.CheckCall(T.true_value(), T.Val(4.2), T.undefined());
+  T.CheckCall(T.true_value(), T.Val("str"), T.undefined());
+  T.CheckCall(T.true_value(), T.true_value(), T.undefined());
+  T.CheckCall(T.false_value(), T.false_value(), T.undefined());
+  T.CheckCall(T.false_value(), T.undefined(), T.undefined());
+  T.CheckCall(T.false_value(), T.Val(0.0), T.undefined());
+}
+
+
+TEST(RuntimeCallCPP2) {
+  FLAG_allow_natives_syntax = true;
+  FunctionTester T("(function(a,b) { return %NumberAdd(a, b); })");
+
+  T.CheckCall(T.Val(65), T.Val(42), T.Val(23));
+  T.CheckCall(T.Val(19), T.Val(42), T.Val(-23));
+  T.CheckCall(T.Val(6.5), T.Val(4.2), T.Val(2.3));
+}
+
+
+TEST(RuntimeCallJS) {
+  FLAG_allow_natives_syntax = true;
+  FunctionTester T("(function(a) { return %ToString(a); })");
+
+  T.CheckCall(T.Val("23"), T.Val(23), T.undefined());
+  T.CheckCall(T.Val("4.2"), T.Val(4.2), T.undefined());
+  T.CheckCall(T.Val("str"), T.Val("str"), T.undefined());
+  T.CheckCall(T.Val("true"), T.true_value(), T.undefined());
+  T.CheckCall(T.Val("false"), T.false_value(), T.undefined());
+  T.CheckCall(T.Val("undefined"), T.undefined(), T.undefined());
+}
+
+
+TEST(RuntimeCallInline) {
+  FLAG_allow_natives_syntax = true;
+  FunctionTester T("(function(a) { return %_IsObject(a); })");
+
+  T.CheckCall(T.false_value(), T.Val(23), T.undefined());
+  T.CheckCall(T.false_value(), T.Val(4.2), T.undefined());
+  T.CheckCall(T.false_value(), T.Val("str"), T.undefined());
+  T.CheckCall(T.false_value(), T.true_value(), T.undefined());
+  T.CheckCall(T.false_value(), T.false_value(), T.undefined());
+  T.CheckCall(T.false_value(), T.undefined(), T.undefined());
+  T.CheckCall(T.true_value(), T.NewObject("({})"), T.undefined());
+  T.CheckCall(T.true_value(), T.NewObject("([])"), T.undefined());
+}
+
+
+TEST(RuntimeCallBooleanize) {
+  // TODO(turbofan): %Booleanize will disappear, don't hesitate to remove this
+  // test case, two-argument case is covered by the above test already.
+  FLAG_allow_natives_syntax = true;
+  FunctionTester T("(function(a,b) { return %Booleanize(a, b); })");
+
+  T.CheckCall(T.true_value(), T.Val(-1), T.Val(Token::LT));
+  T.CheckCall(T.false_value(), T.Val(-1), T.Val(Token::EQ));
+  T.CheckCall(T.false_value(), T.Val(-1), T.Val(Token::GT));
+
+  T.CheckCall(T.false_value(), T.Val(0.0), T.Val(Token::LT));
+  T.CheckCall(T.true_value(), T.Val(0.0), T.Val(Token::EQ));
+  T.CheckCall(T.false_value(), T.Val(0.0), T.Val(Token::GT));
+
+  T.CheckCall(T.false_value(), T.Val(1), T.Val(Token::LT));
+  T.CheckCall(T.false_value(), T.Val(1), T.Val(Token::EQ));
+  T.CheckCall(T.true_value(), T.Val(1), T.Val(Token::GT));
+}
+
+
+TEST(EvalCall) {
+  FunctionTester T("(function(a,b) { return eval(a); })");
+  Handle<JSObject> g(T.function->context()->global_object()->global_proxy());
+
+  T.CheckCall(T.Val(23), T.Val("17 + 6"), T.undefined());
+  T.CheckCall(T.Val("'Y'; a"), T.Val("'Y'; a"), T.Val("b-val"));
+  T.CheckCall(T.Val("b-val"), T.Val("'Y'; b"), T.Val("b-val"));
+  T.CheckCall(g, T.Val("this"), T.undefined());
+  T.CheckCall(g, T.Val("'use strict'; this"), T.undefined());
+
+  CompileRun("eval = function(x) { return x; }");
+  T.CheckCall(T.Val("17 + 6"), T.Val("17 + 6"), T.undefined());
+
+  CompileRun("eval = function(x) { return this; }");
+  T.CheckCall(g, T.Val("17 + 6"), T.undefined());
+
+  CompileRun("eval = function(x) { 'use strict'; return this; }");
+  T.CheckCall(T.undefined(), T.Val("17 + 6"), T.undefined());
+}
+
+
+TEST(ReceiverPatching) {
+  // TODO(turbofan): Note that this test only checks that the function prologue
+  // patches an undefined receiver to the global receiver. If this starts to
+  // fail once we fix the calling protocol, just remove this test.
+  FunctionTester T("(function(a) { return this; })");
+  Handle<JSObject> g(T.function->context()->global_object()->global_proxy());
+  T.CheckCall(g, T.undefined());
+}
+
+
+TEST(CallEval) {
+  FunctionTester T(
+      "var x = 42;"
+      "(function () {"
+      "function bar() { return eval('x') };"
+      "return bar;"
+      "})();");
+
+  T.CheckCall(T.Val(42), T.Val("x"), T.undefined());
+}
+
+
+TEST(ContextLoadedFromActivation) {
+  const char* script =
+      "var x = 42;"
+      "(function() {"
+      "  return function () { return x };"
+      "})()";
+
+  // Disable context specialization.
+  FunctionTester T(script);
+  v8::Local<v8::Context> context = v8::Context::New(CcTest::isolate());
+  v8::Context::Scope scope(context);
+  v8::Local<v8::Value> value = CompileRun(script);
+  i::Handle<i::Object> ofun = v8::Utils::OpenHandle(*value);
+  i::Handle<i::JSFunction> jsfun = Handle<JSFunction>::cast(ofun);
+  jsfun->set_code(T.function->code());
+  context->Global()->Set(v8_str("foo"), v8::Utils::ToLocal(jsfun));
+  CompileRun("var x = 24;");
+  ExpectInt32("foo();", 24);
+}
+
+
+TEST(BuiltinLoadedFromActivation) {
+  const char* script =
+      "var x = 42;"
+      "(function() {"
+      "  return function () { return this; };"
+      "})()";
+
+  // Disable context specialization.
+  FunctionTester T(script);
+  v8::Local<v8::Context> context = v8::Context::New(CcTest::isolate());
+  v8::Context::Scope scope(context);
+  v8::Local<v8::Value> value = CompileRun(script);
+  i::Handle<i::Object> ofun = v8::Utils::OpenHandle(*value);
+  i::Handle<i::JSFunction> jsfun = Handle<JSFunction>::cast(ofun);
+  jsfun->set_code(T.function->code());
+  context->Global()->Set(v8_str("foo"), v8::Utils::ToLocal(jsfun));
+  CompileRun("var x = 24;");
+  ExpectObject("foo()", context->Global());
+}
diff --git a/test/cctest/compiler/test-run-jsexceptions.cc b/test/cctest/compiler/test-run-jsexceptions.cc
new file mode 100644
index 0000000..0712ab6
--- /dev/null
+++ b/test/cctest/compiler/test-run-jsexceptions.cc
@@ -0,0 +1,45 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "test/cctest/compiler/function-tester.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+TEST(Throw) {
+  FunctionTester T("(function(a,b) { if (a) { throw b; } else { return b; }})");
+
+  T.CheckThrows(T.true_value(), T.NewObject("new Error"));
+  T.CheckCall(T.Val(23), T.false_value(), T.Val(23));
+}
+
+
+TEST(ThrowSourcePosition) {
+  static const char* src =
+      "(function(a, b) {        \n"
+      "  if (a == 1) throw 1;   \n"
+      "  if (a == 2) {throw 2}  \n"
+      "  if (a == 3) {0;throw 3}\n"
+      "  throw 4;               \n"
+      "})                       ";
+  FunctionTester T(src);
+  v8::Handle<v8::Message> message;
+
+  message = T.CheckThrowsReturnMessage(T.Val(1), T.undefined());
+  CHECK(!message.IsEmpty());
+  CHECK_EQ(2, message->GetLineNumber());
+  CHECK_EQ(40, message->GetStartPosition());
+
+  message = T.CheckThrowsReturnMessage(T.Val(2), T.undefined());
+  CHECK(!message.IsEmpty());
+  CHECK_EQ(3, message->GetLineNumber());
+  CHECK_EQ(67, message->GetStartPosition());
+
+  message = T.CheckThrowsReturnMessage(T.Val(3), T.undefined());
+  CHECK(!message.IsEmpty());
+  CHECK_EQ(4, message->GetLineNumber());
+  CHECK_EQ(95, message->GetStartPosition());
+}
diff --git a/test/cctest/compiler/test-run-jsops.cc b/test/cctest/compiler/test-run-jsops.cc
new file mode 100644
index 0000000..eb39760
--- /dev/null
+++ b/test/cctest/compiler/test-run-jsops.cc
@@ -0,0 +1,524 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "test/cctest/compiler/function-tester.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+TEST(BinopAdd) {
+  FunctionTester T("(function(a,b) { return a + b; })");
+
+  T.CheckCall(3, 1, 2);
+  T.CheckCall(-11, -2, -9);
+  T.CheckCall(-11, -1.5, -9.5);
+  T.CheckCall(T.Val("AB"), T.Val("A"), T.Val("B"));
+  T.CheckCall(T.Val("A11"), T.Val("A"), T.Val(11));
+  T.CheckCall(T.Val("12B"), T.Val(12), T.Val("B"));
+  T.CheckCall(T.Val("38"), T.Val("3"), T.Val("8"));
+  T.CheckCall(T.Val("31"), T.Val("3"), T.NewObject("([1])"));
+  T.CheckCall(T.Val("3[object Object]"), T.Val("3"), T.NewObject("({})"));
+}
+
+
+TEST(BinopSubtract) {
+  FunctionTester T("(function(a,b) { return a - b; })");
+
+  T.CheckCall(3, 4, 1);
+  T.CheckCall(3.0, 4.5, 1.5);
+  T.CheckCall(T.Val(-9), T.Val("0"), T.Val(9));
+  T.CheckCall(T.Val(-9), T.Val(0.0), T.Val("9"));
+  T.CheckCall(T.Val(1), T.Val("3"), T.Val("2"));
+  T.CheckCall(T.nan(), T.Val("3"), T.Val("B"));
+  T.CheckCall(T.Val(2), T.Val("3"), T.NewObject("([1])"));
+  T.CheckCall(T.nan(), T.Val("3"), T.NewObject("({})"));
+}
+
+
+TEST(BinopMultiply) {
+  FunctionTester T("(function(a,b) { return a * b; })");
+
+  T.CheckCall(6, 3, 2);
+  T.CheckCall(4.5, 2.0, 2.25);
+  T.CheckCall(T.Val(6), T.Val("3"), T.Val(2));
+  T.CheckCall(T.Val(4.5), T.Val(2.0), T.Val("2.25"));
+  T.CheckCall(T.Val(6), T.Val("3"), T.Val("2"));
+  T.CheckCall(T.nan(), T.Val("3"), T.Val("B"));
+  T.CheckCall(T.Val(3), T.Val("3"), T.NewObject("([1])"));
+  T.CheckCall(T.nan(), T.Val("3"), T.NewObject("({})"));
+}
+
+
+TEST(BinopDivide) {
+  FunctionTester T("(function(a,b) { return a / b; })");
+
+  T.CheckCall(2, 8, 4);
+  T.CheckCall(2.1, 8.4, 4);
+  T.CheckCall(V8_INFINITY, 8, 0);
+  T.CheckCall(-V8_INFINITY, -8, 0);
+  T.CheckCall(T.infinity(), T.Val(8), T.Val("0"));
+  T.CheckCall(T.minus_infinity(), T.Val("-8"), T.Val(0.0));
+  T.CheckCall(T.Val(1.5), T.Val("3"), T.Val("2"));
+  T.CheckCall(T.nan(), T.Val("3"), T.Val("B"));
+  T.CheckCall(T.Val(1.5), T.Val("3"), T.NewObject("([2])"));
+  T.CheckCall(T.nan(), T.Val("3"), T.NewObject("({})"));
+}
+
+
+TEST(BinopModulus) {
+  FunctionTester T("(function(a,b) { return a % b; })");
+
+  T.CheckCall(3, 8, 5);
+  T.CheckCall(T.Val(3), T.Val("8"), T.Val(5));
+  T.CheckCall(T.Val(3), T.Val(8), T.Val("5"));
+  T.CheckCall(T.Val(1), T.Val("3"), T.Val("2"));
+  T.CheckCall(T.nan(), T.Val("3"), T.Val("B"));
+  T.CheckCall(T.Val(1), T.Val("3"), T.NewObject("([2])"));
+  T.CheckCall(T.nan(), T.Val("3"), T.NewObject("({})"));
+}
+
+
+TEST(BinopShiftLeft) {
+  FunctionTester T("(function(a,b) { return a << b; })");
+
+  T.CheckCall(4, 2, 1);
+  T.CheckCall(T.Val(4), T.Val("2"), T.Val(1));
+  T.CheckCall(T.Val(4), T.Val(2), T.Val("1"));
+}
+
+
+TEST(BinopShiftRight) {
+  FunctionTester T("(function(a,b) { return a >> b; })");
+
+  T.CheckCall(4, 8, 1);
+  T.CheckCall(-4, -8, 1);
+  T.CheckCall(T.Val(4), T.Val("8"), T.Val(1));
+  T.CheckCall(T.Val(4), T.Val(8), T.Val("1"));
+}
+
+
+TEST(BinopShiftRightLogical) {
+  FunctionTester T("(function(a,b) { return a >>> b; })");
+
+  T.CheckCall(4, 8, 1);
+  T.CheckCall(0x7ffffffc, -8, 1);
+  T.CheckCall(T.Val(4), T.Val("8"), T.Val(1));
+  T.CheckCall(T.Val(4), T.Val(8), T.Val("1"));
+}
+
+
+TEST(BinopAnd) {
+  FunctionTester T("(function(a,b) { return a & b; })");
+
+  T.CheckCall(7, 7, 15);
+  T.CheckCall(7, 15, 7);
+  T.CheckCall(T.Val(7), T.Val("15"), T.Val(7));
+  T.CheckCall(T.Val(7), T.Val(15), T.Val("7"));
+}
+
+
+TEST(BinopOr) {
+  FunctionTester T("(function(a,b) { return a | b; })");
+
+  T.CheckCall(6, 4, 2);
+  T.CheckCall(6, 2, 4);
+  T.CheckCall(T.Val(6), T.Val("2"), T.Val(4));
+  T.CheckCall(T.Val(6), T.Val(2), T.Val("4"));
+}
+
+
+TEST(BinopXor) {
+  FunctionTester T("(function(a,b) { return a ^ b; })");
+
+  T.CheckCall(7, 15, 8);
+  T.CheckCall(7, 8, 15);
+  T.CheckCall(T.Val(7), T.Val("8"), T.Val(15));
+  T.CheckCall(T.Val(7), T.Val(8), T.Val("15"));
+}
+
+
+TEST(BinopStrictEqual) {
+  FunctionTester T("(function(a,b) { return a === b; })");
+
+  T.CheckTrue(7, 7);
+  T.CheckFalse(7, 8);
+  T.CheckTrue(7.1, 7.1);
+  T.CheckFalse(7.1, 8.1);
+
+  T.CheckTrue(T.Val("7.1"), T.Val("7.1"));
+  T.CheckFalse(T.Val(7.1), T.Val("7.1"));
+  T.CheckFalse(T.Val(7), T.undefined());
+  T.CheckFalse(T.undefined(), T.Val(7));
+
+  CompileRun("var o = { desc : 'I am a singleton' }");
+  T.CheckFalse(T.NewObject("([1])"), T.NewObject("([1])"));
+  T.CheckFalse(T.NewObject("({})"), T.NewObject("({})"));
+  T.CheckTrue(T.NewObject("(o)"), T.NewObject("(o)"));
+}
+
+
+TEST(BinopEqual) {
+  FunctionTester T("(function(a,b) { return a == b; })");
+
+  T.CheckTrue(7, 7);
+  T.CheckFalse(7, 8);
+  T.CheckTrue(7.1, 7.1);
+  T.CheckFalse(7.1, 8.1);
+
+  T.CheckTrue(T.Val("7.1"), T.Val("7.1"));
+  T.CheckTrue(T.Val(7.1), T.Val("7.1"));
+
+  CompileRun("var o = { desc : 'I am a singleton' }");
+  T.CheckFalse(T.NewObject("([1])"), T.NewObject("([1])"));
+  T.CheckFalse(T.NewObject("({})"), T.NewObject("({})"));
+  T.CheckTrue(T.NewObject("(o)"), T.NewObject("(o)"));
+}
+
+
+TEST(BinopNotEqual) {
+  FunctionTester T("(function(a,b) { return a != b; })");
+
+  T.CheckFalse(7, 7);
+  T.CheckTrue(7, 8);
+  T.CheckFalse(7.1, 7.1);
+  T.CheckTrue(7.1, 8.1);
+
+  T.CheckFalse(T.Val("7.1"), T.Val("7.1"));
+  T.CheckFalse(T.Val(7.1), T.Val("7.1"));
+
+  CompileRun("var o = { desc : 'I am a singleton' }");
+  T.CheckTrue(T.NewObject("([1])"), T.NewObject("([1])"));
+  T.CheckTrue(T.NewObject("({})"), T.NewObject("({})"));
+  T.CheckFalse(T.NewObject("(o)"), T.NewObject("(o)"));
+}
+
+
+TEST(BinopLessThan) {
+  FunctionTester T("(function(a,b) { return a < b; })");
+
+  T.CheckTrue(7, 8);
+  T.CheckFalse(8, 7);
+  T.CheckTrue(-8.1, -8);
+  T.CheckFalse(-8, -8.1);
+  T.CheckFalse(0.111, 0.111);
+
+  T.CheckFalse(T.Val("7.1"), T.Val("7.1"));
+  T.CheckFalse(T.Val(7.1), T.Val("6.1"));
+  T.CheckFalse(T.Val(7.1), T.Val("7.1"));
+  T.CheckTrue(T.Val(7.1), T.Val("8.1"));
+}
+
+
+TEST(BinopLessThanEqual) {
+  FunctionTester T("(function(a,b) { return a <= b; })");
+
+  T.CheckTrue(7, 8);
+  T.CheckFalse(8, 7);
+  T.CheckTrue(-8.1, -8);
+  T.CheckFalse(-8, -8.1);
+  T.CheckTrue(0.111, 0.111);
+
+  T.CheckTrue(T.Val("7.1"), T.Val("7.1"));
+  T.CheckFalse(T.Val(7.1), T.Val("6.1"));
+  T.CheckTrue(T.Val(7.1), T.Val("7.1"));
+  T.CheckTrue(T.Val(7.1), T.Val("8.1"));
+}
+
+
+TEST(BinopGreaterThan) {
+  FunctionTester T("(function(a,b) { return a > b; })");
+
+  T.CheckFalse(7, 8);
+  T.CheckTrue(8, 7);
+  T.CheckFalse(-8.1, -8);
+  T.CheckTrue(-8, -8.1);
+  T.CheckFalse(0.111, 0.111);
+
+  T.CheckFalse(T.Val("7.1"), T.Val("7.1"));
+  T.CheckTrue(T.Val(7.1), T.Val("6.1"));
+  T.CheckFalse(T.Val(7.1), T.Val("7.1"));
+  T.CheckFalse(T.Val(7.1), T.Val("8.1"));
+}
+
+
+TEST(BinopGreaterThanOrEqual) {
+  FunctionTester T("(function(a,b) { return a >= b; })");
+
+  T.CheckFalse(7, 8);
+  T.CheckTrue(8, 7);
+  T.CheckFalse(-8.1, -8);
+  T.CheckTrue(-8, -8.1);
+  T.CheckTrue(0.111, 0.111);
+
+  T.CheckTrue(T.Val("7.1"), T.Val("7.1"));
+  T.CheckTrue(T.Val(7.1), T.Val("6.1"));
+  T.CheckTrue(T.Val(7.1), T.Val("7.1"));
+  T.CheckFalse(T.Val(7.1), T.Val("8.1"));
+}
+
+
+TEST(BinopIn) {
+  FunctionTester T("(function(a,b) { return a in b; })");
+
+  T.CheckTrue(T.Val("x"), T.NewObject("({x:23})"));
+  T.CheckFalse(T.Val("y"), T.NewObject("({x:42})"));
+  T.CheckFalse(T.Val(123), T.NewObject("({x:65})"));
+  T.CheckTrue(T.Val(1), T.NewObject("([1,2,3])"));
+}
+
+
+TEST(BinopInstanceOf) {
+  FunctionTester T("(function(a,b) { return a instanceof b; })");
+
+  T.CheckTrue(T.NewObject("(new Number(23))"), T.NewObject("Number"));
+  T.CheckFalse(T.NewObject("(new Number(23))"), T.NewObject("String"));
+  T.CheckFalse(T.NewObject("(new String('a'))"), T.NewObject("Number"));
+  T.CheckTrue(T.NewObject("(new String('b'))"), T.NewObject("String"));
+  T.CheckFalse(T.Val(1), T.NewObject("Number"));
+  T.CheckFalse(T.Val("abc"), T.NewObject("String"));
+
+  CompileRun("var bound = (function() {}).bind(undefined)");
+  T.CheckTrue(T.NewObject("(new bound())"), T.NewObject("bound"));
+  T.CheckTrue(T.NewObject("(new bound())"), T.NewObject("Object"));
+  T.CheckFalse(T.NewObject("(new bound())"), T.NewObject("Number"));
+}
+
+
+TEST(UnopNot) {
+  FunctionTester T("(function(a) { return !a; })");
+
+  T.CheckCall(T.true_value(), T.false_value(), T.undefined());
+  T.CheckCall(T.false_value(), T.true_value(), T.undefined());
+  T.CheckCall(T.true_value(), T.Val(0.0), T.undefined());
+  T.CheckCall(T.false_value(), T.Val(123), T.undefined());
+  T.CheckCall(T.false_value(), T.Val("x"), T.undefined());
+  T.CheckCall(T.true_value(), T.undefined(), T.undefined());
+  T.CheckCall(T.true_value(), T.nan(), T.undefined());
+}
+
+
+TEST(UnopCountPost) {
+  FunctionTester T("(function(a) { return a++; })");
+
+  T.CheckCall(T.Val(0.0), T.Val(0.0), T.undefined());
+  T.CheckCall(T.Val(2.3), T.Val(2.3), T.undefined());
+  T.CheckCall(T.Val(123), T.Val(123), T.undefined());
+  T.CheckCall(T.Val(7), T.Val("7"), T.undefined());
+  T.CheckCall(T.nan(), T.Val("x"), T.undefined());
+  T.CheckCall(T.nan(), T.undefined(), T.undefined());
+  T.CheckCall(T.Val(1.0), T.true_value(), T.undefined());
+  T.CheckCall(T.Val(0.0), T.false_value(), T.undefined());
+  T.CheckCall(T.nan(), T.nan(), T.undefined());
+}
+
+
+TEST(UnopCountPre) {
+  FunctionTester T("(function(a) { return ++a; })");
+
+  T.CheckCall(T.Val(1.0), T.Val(0.0), T.undefined());
+  T.CheckCall(T.Val(3.3), T.Val(2.3), T.undefined());
+  T.CheckCall(T.Val(124), T.Val(123), T.undefined());
+  T.CheckCall(T.Val(8), T.Val("7"), T.undefined());
+  T.CheckCall(T.nan(), T.Val("x"), T.undefined());
+  T.CheckCall(T.nan(), T.undefined(), T.undefined());
+  T.CheckCall(T.Val(2.0), T.true_value(), T.undefined());
+  T.CheckCall(T.Val(1.0), T.false_value(), T.undefined());
+  T.CheckCall(T.nan(), T.nan(), T.undefined());
+}
+
+
+TEST(PropertyNamedLoad) {
+  FunctionTester T("(function(a,b) { return a.x; })");
+
+  T.CheckCall(T.Val(23), T.NewObject("({x:23})"), T.undefined());
+  T.CheckCall(T.undefined(), T.NewObject("({y:23})"), T.undefined());
+}
+
+
+TEST(PropertyKeyedLoad) {
+  FunctionTester T("(function(a,b) { return a[b]; })");
+
+  T.CheckCall(T.Val(23), T.NewObject("({x:23})"), T.Val("x"));
+  T.CheckCall(T.Val(42), T.NewObject("([23,42,65])"), T.Val(1));
+  T.CheckCall(T.undefined(), T.NewObject("({x:23})"), T.Val("y"));
+  T.CheckCall(T.undefined(), T.NewObject("([23,42,65])"), T.Val(4));
+}
+
+
+TEST(PropertyNamedStore) {
+  FunctionTester T("(function(a) { a.x = 7; return a.x; })");
+
+  T.CheckCall(T.Val(7), T.NewObject("({})"), T.undefined());
+  T.CheckCall(T.Val(7), T.NewObject("({x:23})"), T.undefined());
+}
+
+
+TEST(PropertyKeyedStore) {
+  FunctionTester T("(function(a,b) { a[b] = 7; return a.x; })");
+
+  T.CheckCall(T.Val(7), T.NewObject("({})"), T.Val("x"));
+  T.CheckCall(T.Val(7), T.NewObject("({x:23})"), T.Val("x"));
+  T.CheckCall(T.Val(9), T.NewObject("({x:9})"), T.Val("y"));
+}
+
+
+TEST(PropertyNamedDelete) {
+  FunctionTester T("(function(a) { return delete a.x; })");
+
+  CompileRun("var o = Object.create({}, { x: { value:23 } });");
+  T.CheckTrue(T.NewObject("({x:42})"), T.undefined());
+  T.CheckTrue(T.NewObject("({})"), T.undefined());
+  T.CheckFalse(T.NewObject("(o)"), T.undefined());
+}
+
+
+TEST(PropertyKeyedDelete) {
+  FunctionTester T("(function(a, b) { return delete a[b]; })");
+
+  CompileRun("function getX() { return 'x'; }");
+  CompileRun("var o = Object.create({}, { x: { value:23 } });");
+  T.CheckTrue(T.NewObject("({x:42})"), T.Val("x"));
+  T.CheckFalse(T.NewObject("(o)"), T.Val("x"));
+  T.CheckFalse(T.NewObject("(o)"), T.NewObject("({toString:getX})"));
+}
+
+
+TEST(GlobalLoad) {
+  FunctionTester T("(function() { return g; })");
+
+  T.CheckThrows(T.undefined(), T.undefined());
+  CompileRun("var g = 23;");
+  T.CheckCall(T.Val(23));
+}
+
+
+TEST(GlobalStoreSloppy) {
+  FunctionTester T("(function(a,b) { g = a + b; return g; })");
+
+  T.CheckCall(T.Val(33), T.Val(22), T.Val(11));
+  CompileRun("delete g");
+  CompileRun("const g = 23");
+  T.CheckCall(T.Val(23), T.Val(55), T.Val(44));
+}
+
+
+TEST(GlobalStoreStrict) {
+  FunctionTester T("(function(a,b) { 'use strict'; g = a + b; return g; })");
+
+  T.CheckThrows(T.Val(22), T.Val(11));
+  CompileRun("var g = 'a global variable';");
+  T.CheckCall(T.Val(33), T.Val(22), T.Val(11));
+}
+
+
+TEST(ContextLoad) {
+  FunctionTester T("(function(a,b) { (function(){a}); return a + b; })");
+
+  T.CheckCall(T.Val(65), T.Val(23), T.Val(42));
+  T.CheckCall(T.Val("ab"), T.Val("a"), T.Val("b"));
+}
+
+
+TEST(ContextStore) {
+  FunctionTester T("(function(a,b) { (function(){x}); var x = a; return x; })");
+
+  T.CheckCall(T.Val(23), T.Val(23), T.undefined());
+  T.CheckCall(T.Val("a"), T.Val("a"), T.undefined());
+}
+
+
+TEST(LookupLoad) {
+  FunctionTester T("(function(a,b) { with(a) { return x + b; } })");
+
+  T.CheckCall(T.Val(24), T.NewObject("({x:23})"), T.Val(1));
+  T.CheckCall(T.Val(32), T.NewObject("({x:23, b:9})"), T.Val(2));
+  T.CheckCall(T.Val(45), T.NewObject("({__proto__:{x:42}})"), T.Val(3));
+  T.CheckCall(T.Val(69), T.NewObject("({get x() { return 65; }})"), T.Val(4));
+}
+
+
+TEST(LookupStore) {
+  FunctionTester T("(function(a,b) { var x; with(a) { x = b; } return x; })");
+
+  T.CheckCall(T.undefined(), T.NewObject("({x:23})"), T.Val(1));
+  T.CheckCall(T.Val(2), T.NewObject("({y:23})"), T.Val(2));
+  T.CheckCall(T.Val(23), T.NewObject("({b:23})"), T.Val(3));
+  T.CheckCall(T.undefined(), T.NewObject("({__proto__:{x:42}})"), T.Val(4));
+}
+
+
+TEST(BlockLoadStore) {
+  FLAG_harmony_scoping = true;
+  FunctionTester T("(function(a) { 'use strict'; { let x = a+a; return x; }})");
+
+  T.CheckCall(T.Val(46), T.Val(23));
+  T.CheckCall(T.Val("aa"), T.Val("a"));
+}
+
+
+TEST(BlockLoadStoreNested) {
+  FLAG_harmony_scoping = true;
+  const char* src =
+      "(function(a,b) {"
+      "'use strict';"
+      "{ let x = a, y = a;"
+      "  { let y = b;"
+      "    return x + y;"
+      "  }"
+      "}})";
+  FunctionTester T(src);
+
+  T.CheckCall(T.Val(65), T.Val(23), T.Val(42));
+  T.CheckCall(T.Val("ab"), T.Val("a"), T.Val("b"));
+}
+
+
+TEST(ObjectLiteralComputed) {
+  FunctionTester T("(function(a,b) { o = { x:a+b }; return o.x; })");
+
+  T.CheckCall(T.Val(65), T.Val(23), T.Val(42));
+  T.CheckCall(T.Val("ab"), T.Val("a"), T.Val("b"));
+}
+
+
+TEST(ObjectLiteralNonString) {
+  FunctionTester T("(function(a,b) { o = { 7:a+b }; return o[7]; })");
+
+  T.CheckCall(T.Val(65), T.Val(23), T.Val(42));
+  T.CheckCall(T.Val("ab"), T.Val("a"), T.Val("b"));
+}
+
+
+TEST(ObjectLiteralPrototype) {
+  FunctionTester T("(function(a) { o = { __proto__:a }; return o.x; })");
+
+  T.CheckCall(T.Val(23), T.NewObject("({x:23})"), T.undefined());
+  T.CheckCall(T.undefined(), T.NewObject("({y:42})"), T.undefined());
+}
+
+
+TEST(ObjectLiteralGetter) {
+  FunctionTester T("(function(a) { o = { get x() {return a} }; return o.x; })");
+
+  T.CheckCall(T.Val(23), T.Val(23), T.undefined());
+  T.CheckCall(T.Val("x"), T.Val("x"), T.undefined());
+}
+
+
+TEST(ArrayLiteral) {
+  FunctionTester T("(function(a,b) { o = [1, a + b, 3]; return o[1]; })");
+
+  T.CheckCall(T.Val(65), T.Val(23), T.Val(42));
+  T.CheckCall(T.Val("ab"), T.Val("a"), T.Val("b"));
+}
+
+
+TEST(RegExpLiteral) {
+  FunctionTester T("(function(a) { o = /b/; return o.test(a); })");
+
+  T.CheckTrue(T.Val("abc"));
+  T.CheckFalse(T.Val("xyz"));
+}
diff --git a/test/cctest/compiler/test-run-machops.cc b/test/cctest/compiler/test-run-machops.cc
new file mode 100644
index 0000000..985e0f8
--- /dev/null
+++ b/test/cctest/compiler/test-run-machops.cc
@@ -0,0 +1,4245 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <functional>
+#include <limits>
+
+#include "src/base/bits.h"
+#include "src/compiler/generic-node-inl.h"
+#include "test/cctest/cctest.h"
+#include "test/cctest/compiler/codegen-tester.h"
+#include "test/cctest/compiler/value-helper.h"
+
+#if V8_TURBOFAN_TARGET
+
+using namespace v8::base;
+
+#define CHECK_UINT32_EQ(x, y) \
+  CHECK_EQ(static_cast<int32_t>(x), static_cast<int32_t>(y))
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+typedef RawMachineAssembler::Label MLabel;
+
+TEST(RunInt32Add) {
+  RawMachineAssemblerTester<int32_t> m;
+  Node* add = m.Int32Add(m.Int32Constant(0), m.Int32Constant(1));
+  m.Return(add);
+  CHECK_EQ(1, m.Call());
+}
+
+
+static Node* Int32Input(RawMachineAssemblerTester<int32_t>* m, int index) {
+  switch (index) {
+    case 0:
+      return m->Parameter(0);
+    case 1:
+      return m->Parameter(1);
+    case 2:
+      return m->Int32Constant(0);
+    case 3:
+      return m->Int32Constant(1);
+    case 4:
+      return m->Int32Constant(-1);
+    case 5:
+      return m->Int32Constant(0xff);
+    case 6:
+      return m->Int32Constant(0x01234567);
+    case 7:
+      return m->Load(kMachInt32, m->PointerConstant(NULL));
+    default:
+      return NULL;
+  }
+}
+
+
+TEST(CodeGenInt32Binop) {
+  RawMachineAssemblerTester<void> m;
+
+  const Operator* ops[] = {
+      m.machine()->Word32And(),      m.machine()->Word32Or(),
+      m.machine()->Word32Xor(),      m.machine()->Word32Shl(),
+      m.machine()->Word32Shr(),      m.machine()->Word32Sar(),
+      m.machine()->Word32Equal(),    m.machine()->Int32Add(),
+      m.machine()->Int32Sub(),       m.machine()->Int32Mul(),
+      m.machine()->Int32Div(),       m.machine()->Int32UDiv(),
+      m.machine()->Int32Mod(),       m.machine()->Int32UMod(),
+      m.machine()->Int32LessThan(),  m.machine()->Int32LessThanOrEqual(),
+      m.machine()->Uint32LessThan(), m.machine()->Uint32LessThanOrEqual(),
+      NULL};
+
+  for (int i = 0; ops[i] != NULL; i++) {
+    for (int j = 0; j < 8; j++) {
+      for (int k = 0; k < 8; k++) {
+        RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32);
+        Node* a = Int32Input(&m, j);
+        Node* b = Int32Input(&m, k);
+        m.Return(m.NewNode(ops[i], a, b));
+        m.GenerateCode();
+      }
+    }
+  }
+}
+
+
+TEST(RunGoto) {
+  RawMachineAssemblerTester<int32_t> m;
+  int constant = 99999;
+
+  MLabel next;
+  m.Goto(&next);
+  m.Bind(&next);
+  m.Return(m.Int32Constant(constant));
+
+  CHECK_EQ(constant, m.Call());
+}
+
+
+TEST(RunGotoMultiple) {
+  RawMachineAssemblerTester<int32_t> m;
+  int constant = 9999977;
+
+  MLabel labels[10];
+  for (size_t i = 0; i < arraysize(labels); i++) {
+    m.Goto(&labels[i]);
+    m.Bind(&labels[i]);
+  }
+  m.Return(m.Int32Constant(constant));
+
+  CHECK_EQ(constant, m.Call());
+}
+
+
+TEST(RunBranch) {
+  RawMachineAssemblerTester<int32_t> m;
+  int constant = 999777;
+
+  MLabel blocka, blockb;
+  m.Branch(m.Int32Constant(0), &blocka, &blockb);
+  m.Bind(&blocka);
+  m.Return(m.Int32Constant(0 - constant));
+  m.Bind(&blockb);
+  m.Return(m.Int32Constant(constant));
+
+  CHECK_EQ(constant, m.Call());
+}
+
+
+TEST(RunRedundantBranch1) {
+  RawMachineAssemblerTester<int32_t> m;
+  int constant = 944777;
+
+  MLabel blocka;
+  m.Branch(m.Int32Constant(0), &blocka, &blocka);
+  m.Bind(&blocka);
+  m.Return(m.Int32Constant(constant));
+
+  CHECK_EQ(constant, m.Call());
+}
+
+
+TEST(RunRedundantBranch2) {
+  RawMachineAssemblerTester<int32_t> m;
+  int constant = 955777;
+
+  MLabel blocka, blockb;
+  m.Branch(m.Int32Constant(0), &blocka, &blocka);
+  m.Bind(&blockb);
+  m.Goto(&blocka);
+  m.Bind(&blocka);
+  m.Return(m.Int32Constant(constant));
+
+  CHECK_EQ(constant, m.Call());
+}
+
+
+TEST(RunRedundantBranch3) {
+  RawMachineAssemblerTester<int32_t> m;
+  int constant = 966777;
+
+  MLabel blocka, blockb, blockc;
+  m.Branch(m.Int32Constant(0), &blocka, &blockc);
+  m.Bind(&blocka);
+  m.Branch(m.Int32Constant(0), &blockb, &blockb);
+  m.Bind(&blockc);
+  m.Goto(&blockb);
+  m.Bind(&blockb);
+  m.Return(m.Int32Constant(constant));
+
+  CHECK_EQ(constant, m.Call());
+}
+
+
+TEST(RunDiamond2) {
+  RawMachineAssemblerTester<int32_t> m;
+
+  int constant = 995666;
+
+  MLabel blocka, blockb, end;
+  m.Branch(m.Int32Constant(0), &blocka, &blockb);
+  m.Bind(&blocka);
+  m.Goto(&end);
+  m.Bind(&blockb);
+  m.Goto(&end);
+  m.Bind(&end);
+  m.Return(m.Int32Constant(constant));
+
+  CHECK_EQ(constant, m.Call());
+}
+
+
+TEST(RunLoop) {
+  RawMachineAssemblerTester<int32_t> m;
+  int constant = 999555;
+
+  MLabel header, body, exit;
+  m.Goto(&header);
+  m.Bind(&header);
+  m.Branch(m.Int32Constant(0), &body, &exit);
+  m.Bind(&body);
+  m.Goto(&header);
+  m.Bind(&exit);
+  m.Return(m.Int32Constant(constant));
+
+  CHECK_EQ(constant, m.Call());
+}
+
+
+template <typename R>
+static void BuildDiamondPhi(RawMachineAssemblerTester<R>* m, Node* cond_node,
+                            MachineType type, Node* true_node,
+                            Node* false_node) {
+  MLabel blocka, blockb;
+  MLabel* end = m->Exit();
+  m->Branch(cond_node, &blocka, &blockb);
+  m->Bind(&blocka);
+  m->Goto(end);
+  m->Bind(&blockb);
+  m->Goto(end);
+
+  m->Bind(end);
+  Node* phi = m->Phi(type, true_node, false_node);
+  m->Return(phi);
+}
+
+
+TEST(RunDiamondPhiConst) {
+  RawMachineAssemblerTester<int32_t> m(kMachInt32);
+  int false_val = 0xFF666;
+  int true_val = 0x00DDD;
+  Node* true_node = m.Int32Constant(true_val);
+  Node* false_node = m.Int32Constant(false_val);
+  BuildDiamondPhi(&m, m.Parameter(0), kMachInt32, true_node, false_node);
+  CHECK_EQ(false_val, m.Call(0));
+  CHECK_EQ(true_val, m.Call(1));
+}
+
+
+TEST(RunDiamondPhiNumber) {
+  RawMachineAssemblerTester<Object*> m(kMachInt32);
+  double false_val = -11.1;
+  double true_val = 200.1;
+  Node* true_node = m.NumberConstant(true_val);
+  Node* false_node = m.NumberConstant(false_val);
+  BuildDiamondPhi(&m, m.Parameter(0), kMachAnyTagged, true_node, false_node);
+  m.CheckNumber(false_val, m.Call(0));
+  m.CheckNumber(true_val, m.Call(1));
+}
+
+
+TEST(RunDiamondPhiString) {
+  RawMachineAssemblerTester<Object*> m(kMachInt32);
+  const char* false_val = "false";
+  const char* true_val = "true";
+  Node* true_node = m.StringConstant(true_val);
+  Node* false_node = m.StringConstant(false_val);
+  BuildDiamondPhi(&m, m.Parameter(0), kMachAnyTagged, true_node, false_node);
+  m.CheckString(false_val, m.Call(0));
+  m.CheckString(true_val, m.Call(1));
+}
+
+
+TEST(RunDiamondPhiParam) {
+  RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32, kMachInt32);
+  BuildDiamondPhi(&m, m.Parameter(0), kMachInt32, m.Parameter(1),
+                  m.Parameter(2));
+  int32_t c1 = 0x260cb75a;
+  int32_t c2 = 0xcd3e9c8b;
+  int result = m.Call(0, c1, c2);
+  CHECK_EQ(c2, result);
+  result = m.Call(1, c1, c2);
+  CHECK_EQ(c1, result);
+}
+
+
+TEST(RunLoopPhiConst) {
+  RawMachineAssemblerTester<int32_t> m;
+  int true_val = 0x44000;
+  int false_val = 0x00888;
+
+  Node* cond_node = m.Int32Constant(0);
+  Node* true_node = m.Int32Constant(true_val);
+  Node* false_node = m.Int32Constant(false_val);
+
+  // x = false_val; while(false) { x = true_val; } return x;
+  MLabel body, header;
+  MLabel* end = m.Exit();
+
+  m.Goto(&header);
+  m.Bind(&header);
+  Node* phi = m.Phi(kMachInt32, false_node, true_node);
+  m.Branch(cond_node, &body, end);
+  m.Bind(&body);
+  m.Goto(&header);
+  m.Bind(end);
+  m.Return(phi);
+
+  CHECK_EQ(false_val, m.Call());
+}
+
+
+TEST(RunLoopPhiParam) {
+  RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32, kMachInt32);
+
+  MLabel blocka, blockb;
+  MLabel* end = m.Exit();
+
+  m.Goto(&blocka);
+
+  m.Bind(&blocka);
+  Node* phi = m.Phi(kMachInt32, m.Parameter(1), m.Parameter(2));
+  Node* cond = m.Phi(kMachInt32, m.Parameter(0), m.Int32Constant(0));
+  m.Branch(cond, &blockb, end);
+
+  m.Bind(&blockb);
+  m.Goto(&blocka);
+
+  m.Bind(end);
+  m.Return(phi);
+
+  int32_t c1 = 0xa81903b4;
+  int32_t c2 = 0x5a1207da;
+  int result = m.Call(0, c1, c2);
+  CHECK_EQ(c1, result);
+  result = m.Call(1, c1, c2);
+  CHECK_EQ(c2, result);
+}
+
+
+TEST(RunLoopPhiInduction) {
+  RawMachineAssemblerTester<int32_t> m;
+
+  int false_val = 0x10777;
+
+  // x = false_val; while(false) { x++; } return x;
+  MLabel header, body;
+  MLabel* end = m.Exit();
+  Node* false_node = m.Int32Constant(false_val);
+
+  m.Goto(&header);
+
+  m.Bind(&header);
+  Node* phi = m.Phi(kMachInt32, false_node, false_node);
+  m.Branch(m.Int32Constant(0), &body, end);
+
+  m.Bind(&body);
+  Node* add = m.Int32Add(phi, m.Int32Constant(1));
+  phi->ReplaceInput(1, add);
+  m.Goto(&header);
+
+  m.Bind(end);
+  m.Return(phi);
+
+  CHECK_EQ(false_val, m.Call());
+}
+
+
+TEST(RunLoopIncrement) {
+  RawMachineAssemblerTester<int32_t> m;
+  Int32BinopTester bt(&m);
+
+  // x = 0; while(x ^ param) { x++; } return x;
+  MLabel header, body;
+  MLabel* end = m.Exit();
+  Node* zero = m.Int32Constant(0);
+
+  m.Goto(&header);
+
+  m.Bind(&header);
+  Node* phi = m.Phi(kMachInt32, zero, zero);
+  m.Branch(m.WordXor(phi, bt.param0), &body, end);
+
+  m.Bind(&body);
+  phi->ReplaceInput(1, m.Int32Add(phi, m.Int32Constant(1)));
+  m.Goto(&header);
+
+  m.Bind(end);
+  bt.AddReturn(phi);
+
+  CHECK_EQ(11, bt.call(11, 0));
+  CHECK_EQ(110, bt.call(110, 0));
+  CHECK_EQ(176, bt.call(176, 0));
+}
+
+
+TEST(RunLoopIncrement2) {
+  RawMachineAssemblerTester<int32_t> m;
+  Int32BinopTester bt(&m);
+
+  // x = 0; while(x < param) { x++; } return x;
+  MLabel header, body;
+  MLabel* end = m.Exit();
+  Node* zero = m.Int32Constant(0);
+
+  m.Goto(&header);
+
+  m.Bind(&header);
+  Node* phi = m.Phi(kMachInt32, zero, zero);
+  m.Branch(m.Int32LessThan(phi, bt.param0), &body, end);
+
+  m.Bind(&body);
+  phi->ReplaceInput(1, m.Int32Add(phi, m.Int32Constant(1)));
+  m.Goto(&header);
+
+  m.Bind(end);
+  bt.AddReturn(phi);
+
+  CHECK_EQ(11, bt.call(11, 0));
+  CHECK_EQ(110, bt.call(110, 0));
+  CHECK_EQ(176, bt.call(176, 0));
+  CHECK_EQ(0, bt.call(-200, 0));
+}
+
+
+TEST(RunLoopIncrement3) {
+  RawMachineAssemblerTester<int32_t> m;
+  Int32BinopTester bt(&m);
+
+  // x = 0; while(x < param) { x++; } return x;
+  MLabel header, body;
+  MLabel* end = m.Exit();
+  Node* zero = m.Int32Constant(0);
+
+  m.Goto(&header);
+
+  m.Bind(&header);
+  Node* phi = m.Phi(kMachInt32, zero, zero);
+  m.Branch(m.Uint32LessThan(phi, bt.param0), &body, end);
+
+  m.Bind(&body);
+  phi->ReplaceInput(1, m.Int32Add(phi, m.Int32Constant(1)));
+  m.Goto(&header);
+
+  m.Bind(end);
+  bt.AddReturn(phi);
+
+  CHECK_EQ(11, bt.call(11, 0));
+  CHECK_EQ(110, bt.call(110, 0));
+  CHECK_EQ(176, bt.call(176, 0));
+  CHECK_EQ(200, bt.call(200, 0));
+}
+
+
+TEST(RunLoopDecrement) {
+  RawMachineAssemblerTester<int32_t> m;
+  Int32BinopTester bt(&m);
+
+  // x = param; while(x) { x--; } return x;
+  MLabel header, body;
+  MLabel* end = m.Exit();
+
+  m.Goto(&header);
+
+  m.Bind(&header);
+  Node* phi = m.Phi(kMachInt32, bt.param0, m.Int32Constant(0));
+  m.Branch(phi, &body, end);
+
+  m.Bind(&body);
+  phi->ReplaceInput(1, m.Int32Sub(phi, m.Int32Constant(1)));
+  m.Goto(&header);
+
+  m.Bind(end);
+  bt.AddReturn(phi);
+
+  CHECK_EQ(0, bt.call(11, 0));
+  CHECK_EQ(0, bt.call(110, 0));
+  CHECK_EQ(0, bt.call(197, 0));
+}
+
+
+TEST(RunLoopIncrementFloat64) {
+  RawMachineAssemblerTester<int32_t> m;
+
+  // x = -3.0; while(x < 10) { x = x + 0.5; } return (int) x;
+  MLabel header, body;
+  MLabel* end = m.Exit();
+  Node* minus_3 = m.Float64Constant(-3.0);
+  Node* ten = m.Float64Constant(10.0);
+
+  m.Goto(&header);
+
+  m.Bind(&header);
+  Node* phi = m.Phi(kMachFloat64, minus_3, ten);
+  m.Branch(m.Float64LessThan(phi, ten), &body, end);
+
+  m.Bind(&body);
+  phi->ReplaceInput(1, m.Float64Add(phi, m.Float64Constant(0.5)));
+  m.Goto(&header);
+
+  m.Bind(end);
+  m.Return(m.ChangeFloat64ToInt32(phi));
+
+  CHECK_EQ(10, m.Call());
+}
+
+
+TEST(RunLoadInt32) {
+  RawMachineAssemblerTester<int32_t> m;
+
+  int32_t p1 = 0;  // loads directly from this location.
+  m.Return(m.LoadFromPointer(&p1, kMachInt32));
+
+  FOR_INT32_INPUTS(i) {
+    p1 = *i;
+    CHECK_EQ(p1, m.Call());
+  }
+}
+
+
+TEST(RunLoadInt32Offset) {
+  int32_t p1 = 0;  // loads directly from this location.
+
+  int32_t offsets[] = {-2000000, -100, -101, 1,          3,
+                       7,        120,  2000, 2000000000, 0xff};
+
+  for (size_t i = 0; i < arraysize(offsets); i++) {
+    RawMachineAssemblerTester<int32_t> m;
+    int32_t offset = offsets[i];
+    byte* pointer = reinterpret_cast<byte*>(&p1) - offset;
+    // generate load [#base + #index]
+    m.Return(m.LoadFromPointer(pointer, kMachInt32, offset));
+
+    FOR_INT32_INPUTS(j) {
+      p1 = *j;
+      CHECK_EQ(p1, m.Call());
+    }
+  }
+}
+
+
+TEST(RunLoadStoreFloat64Offset) {
+  double p1 = 0;  // loads directly from this location.
+  double p2 = 0;  // and stores directly into this location.
+
+  FOR_INT32_INPUTS(i) {
+    int32_t magic = 0x2342aabb + *i * 3;
+    RawMachineAssemblerTester<int32_t> m;
+    int32_t offset = *i;
+    byte* from = reinterpret_cast<byte*>(&p1) - offset;
+    byte* to = reinterpret_cast<byte*>(&p2) - offset;
+    // generate load [#base + #index]
+    Node* load =
+        m.Load(kMachFloat64, m.PointerConstant(from), m.Int32Constant(offset));
+    m.Store(kMachFloat64, m.PointerConstant(to), m.Int32Constant(offset), load);
+    m.Return(m.Int32Constant(magic));
+
+    FOR_FLOAT64_INPUTS(j) {
+      p1 = *j;
+      p2 = *j - 5;
+      CHECK_EQ(magic, m.Call());
+      CHECK_EQ(p1, p2);
+    }
+  }
+}
+
+
+TEST(RunInt32AddP) {
+  RawMachineAssemblerTester<int32_t> m;
+  Int32BinopTester bt(&m);
+
+  bt.AddReturn(m.Int32Add(bt.param0, bt.param1));
+
+  FOR_INT32_INPUTS(i) {
+    FOR_INT32_INPUTS(j) {
+      // Use uint32_t because signed overflow is UB in C.
+      int expected = static_cast<int32_t>(*i + *j);
+      CHECK_EQ(expected, bt.call(*i, *j));
+    }
+  }
+}
+
+
+TEST(RunInt32AddAndWord32SarP) {
+  {
+    RawMachineAssemblerTester<int32_t> m(kMachUint32, kMachInt32, kMachUint32);
+    m.Return(m.Int32Add(m.Parameter(0),
+                        m.Word32Sar(m.Parameter(1), m.Parameter(2))));
+    FOR_UINT32_INPUTS(i) {
+      FOR_INT32_INPUTS(j) {
+        FOR_UINT32_SHIFTS(shift) {
+          // Use uint32_t because signed overflow is UB in C.
+          int32_t expected = *i + (*j >> shift);
+          CHECK_EQ(expected, m.Call(*i, *j, shift));
+        }
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachUint32, kMachUint32);
+    m.Return(m.Int32Add(m.Word32Sar(m.Parameter(0), m.Parameter(1)),
+                        m.Parameter(2)));
+    FOR_INT32_INPUTS(i) {
+      FOR_UINT32_SHIFTS(shift) {
+        FOR_UINT32_INPUTS(k) {
+          // Use uint32_t because signed overflow is UB in C.
+          int32_t expected = (*i >> shift) + *k;
+          CHECK_EQ(expected, m.Call(*i, shift, *k));
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunInt32AddAndWord32ShlP) {
+  {
+    RawMachineAssemblerTester<int32_t> m(kMachUint32, kMachInt32, kMachUint32);
+    m.Return(m.Int32Add(m.Parameter(0),
+                        m.Word32Shl(m.Parameter(1), m.Parameter(2))));
+    FOR_UINT32_INPUTS(i) {
+      FOR_INT32_INPUTS(j) {
+        FOR_UINT32_SHIFTS(shift) {
+          // Use uint32_t because signed overflow is UB in C.
+          int32_t expected = *i + (*j << shift);
+          CHECK_EQ(expected, m.Call(*i, *j, shift));
+        }
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachUint32, kMachUint32);
+    m.Return(m.Int32Add(m.Word32Shl(m.Parameter(0), m.Parameter(1)),
+                        m.Parameter(2)));
+    FOR_INT32_INPUTS(i) {
+      FOR_UINT32_SHIFTS(shift) {
+        FOR_UINT32_INPUTS(k) {
+          // Use uint32_t because signed overflow is UB in C.
+          int32_t expected = (*i << shift) + *k;
+          CHECK_EQ(expected, m.Call(*i, shift, *k));
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunInt32AddAndWord32ShrP) {
+  {
+    RawMachineAssemblerTester<int32_t> m(kMachUint32, kMachUint32, kMachUint32);
+    m.Return(m.Int32Add(m.Parameter(0),
+                        m.Word32Shr(m.Parameter(1), m.Parameter(2))));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        FOR_UINT32_SHIFTS(shift) {
+          // Use uint32_t because signed overflow is UB in C.
+          int32_t expected = *i + (*j >> shift);
+          CHECK_EQ(expected, m.Call(*i, *j, shift));
+        }
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m(kMachUint32, kMachUint32, kMachUint32);
+    m.Return(m.Int32Add(m.Word32Shr(m.Parameter(0), m.Parameter(1)),
+                        m.Parameter(2)));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_SHIFTS(shift) {
+        FOR_UINT32_INPUTS(k) {
+          // Use uint32_t because signed overflow is UB in C.
+          int32_t expected = (*i >> shift) + *k;
+          CHECK_EQ(expected, m.Call(*i, shift, *k));
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunInt32AddInBranch) {
+  static const int32_t constant = 987654321;
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Uint32BinopTester bt(&m);
+    MLabel blocka, blockb;
+    m.Branch(
+        m.Word32Equal(m.Int32Add(bt.param0, bt.param1), m.Int32Constant(0)),
+        &blocka, &blockb);
+    m.Bind(&blocka);
+    bt.AddReturn(m.Int32Constant(constant));
+    m.Bind(&blockb);
+    bt.AddReturn(m.Int32Constant(0 - constant));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i + *j) == 0 ? constant : 0 - constant;
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Uint32BinopTester bt(&m);
+    MLabel blocka, blockb;
+    m.Branch(
+        m.Word32NotEqual(m.Int32Add(bt.param0, bt.param1), m.Int32Constant(0)),
+        &blocka, &blockb);
+    m.Bind(&blocka);
+    bt.AddReturn(m.Int32Constant(constant));
+    m.Bind(&blockb);
+    bt.AddReturn(m.Int32Constant(0 - constant));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i + *j) != 0 ? constant : 0 - constant;
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<uint32_t> m(kMachUint32);
+      MLabel blocka, blockb;
+      m.Branch(m.Word32Equal(m.Int32Add(m.Int32Constant(*i), m.Parameter(0)),
+                             m.Int32Constant(0)),
+               &blocka, &blockb);
+      m.Bind(&blocka);
+      m.Return(m.Int32Constant(constant));
+      m.Bind(&blockb);
+      m.Return(m.Int32Constant(0 - constant));
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = (*i + *j) == 0 ? constant : 0 - constant;
+        CHECK_UINT32_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<uint32_t> m(kMachUint32);
+      MLabel blocka, blockb;
+      m.Branch(m.Word32NotEqual(m.Int32Add(m.Int32Constant(*i), m.Parameter(0)),
+                                m.Int32Constant(0)),
+               &blocka, &blockb);
+      m.Bind(&blocka);
+      m.Return(m.Int32Constant(constant));
+      m.Bind(&blockb);
+      m.Return(m.Int32Constant(0 - constant));
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = (*i + *j) != 0 ? constant : 0 - constant;
+        CHECK_UINT32_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<void> m;
+    const Operator* shops[] = {m.machine()->Word32Sar(),
+                               m.machine()->Word32Shl(),
+                               m.machine()->Word32Shr()};
+    for (size_t n = 0; n < arraysize(shops); n++) {
+      RawMachineAssemblerTester<int32_t> m(kMachUint32, kMachInt32,
+                                           kMachUint32);
+      MLabel blocka, blockb;
+      m.Branch(m.Word32Equal(m.Int32Add(m.Parameter(0),
+                                        m.NewNode(shops[n], m.Parameter(1),
+                                                  m.Parameter(2))),
+                             m.Int32Constant(0)),
+               &blocka, &blockb);
+      m.Bind(&blocka);
+      m.Return(m.Int32Constant(constant));
+      m.Bind(&blockb);
+      m.Return(m.Int32Constant(0 - constant));
+      FOR_UINT32_INPUTS(i) {
+        FOR_INT32_INPUTS(j) {
+          FOR_UINT32_SHIFTS(shift) {
+            int32_t right;
+            switch (shops[n]->opcode()) {
+              default:
+                UNREACHABLE();
+              case IrOpcode::kWord32Sar:
+                right = *j >> shift;
+                break;
+              case IrOpcode::kWord32Shl:
+                right = *j << shift;
+                break;
+              case IrOpcode::kWord32Shr:
+                right = static_cast<uint32_t>(*j) >> shift;
+                break;
+            }
+            int32_t expected = ((*i + right) == 0) ? constant : 0 - constant;
+            CHECK_EQ(expected, m.Call(*i, *j, shift));
+          }
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunInt32AddInComparison) {
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Uint32BinopTester bt(&m);
+    bt.AddReturn(
+        m.Word32Equal(m.Int32Add(bt.param0, bt.param1), m.Int32Constant(0)));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = (*i + *j) == 0;
+        CHECK_UINT32_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Uint32BinopTester bt(&m);
+    bt.AddReturn(
+        m.Word32Equal(m.Int32Constant(0), m.Int32Add(bt.param0, bt.param1)));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = (*i + *j) == 0;
+        CHECK_UINT32_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<uint32_t> m(kMachUint32);
+      m.Return(m.Word32Equal(m.Int32Add(m.Int32Constant(*i), m.Parameter(0)),
+                             m.Int32Constant(0)));
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = (*i + *j) == 0;
+        CHECK_UINT32_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<uint32_t> m(kMachUint32);
+      m.Return(m.Word32Equal(m.Int32Add(m.Parameter(0), m.Int32Constant(*i)),
+                             m.Int32Constant(0)));
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = (*j + *i) == 0;
+        CHECK_UINT32_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<void> m;
+    const Operator* shops[] = {m.machine()->Word32Sar(),
+                               m.machine()->Word32Shl(),
+                               m.machine()->Word32Shr()};
+    for (size_t n = 0; n < arraysize(shops); n++) {
+      RawMachineAssemblerTester<int32_t> m(kMachUint32, kMachInt32,
+                                           kMachUint32);
+      m.Return(m.Word32Equal(
+          m.Int32Add(m.Parameter(0),
+                     m.NewNode(shops[n], m.Parameter(1), m.Parameter(2))),
+          m.Int32Constant(0)));
+      FOR_UINT32_INPUTS(i) {
+        FOR_INT32_INPUTS(j) {
+          FOR_UINT32_SHIFTS(shift) {
+            int32_t right;
+            switch (shops[n]->opcode()) {
+              default:
+                UNREACHABLE();
+              case IrOpcode::kWord32Sar:
+                right = *j >> shift;
+                break;
+              case IrOpcode::kWord32Shl:
+                right = *j << shift;
+                break;
+              case IrOpcode::kWord32Shr:
+                right = static_cast<uint32_t>(*j) >> shift;
+                break;
+            }
+            int32_t expected = (*i + right) == 0;
+            CHECK_EQ(expected, m.Call(*i, *j, shift));
+          }
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunInt32SubP) {
+  RawMachineAssemblerTester<int32_t> m;
+  Uint32BinopTester bt(&m);
+
+  m.Return(m.Int32Sub(bt.param0, bt.param1));
+
+  FOR_UINT32_INPUTS(i) {
+    FOR_UINT32_INPUTS(j) {
+      uint32_t expected = static_cast<int32_t>(*i - *j);
+      CHECK_UINT32_EQ(expected, bt.call(*i, *j));
+    }
+  }
+}
+
+
+TEST(RunInt32SubImm) {
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<uint32_t> m(kMachUint32);
+      m.Return(m.Int32Sub(m.Int32Constant(*i), m.Parameter(0)));
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = *i - *j;
+        CHECK_UINT32_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<uint32_t> m(kMachUint32);
+      m.Return(m.Int32Sub(m.Parameter(0), m.Int32Constant(*i)));
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = *j - *i;
+        CHECK_UINT32_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+}
+
+
+TEST(RunInt32SubAndWord32SarP) {
+  {
+    RawMachineAssemblerTester<int32_t> m(kMachUint32, kMachInt32, kMachUint32);
+    m.Return(m.Int32Sub(m.Parameter(0),
+                        m.Word32Sar(m.Parameter(1), m.Parameter(2))));
+    FOR_UINT32_INPUTS(i) {
+      FOR_INT32_INPUTS(j) {
+        FOR_UINT32_SHIFTS(shift) {
+          int32_t expected = *i - (*j >> shift);
+          CHECK_EQ(expected, m.Call(*i, *j, shift));
+        }
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachUint32, kMachUint32);
+    m.Return(m.Int32Sub(m.Word32Sar(m.Parameter(0), m.Parameter(1)),
+                        m.Parameter(2)));
+    FOR_INT32_INPUTS(i) {
+      FOR_UINT32_SHIFTS(shift) {
+        FOR_UINT32_INPUTS(k) {
+          int32_t expected = (*i >> shift) - *k;
+          CHECK_EQ(expected, m.Call(*i, shift, *k));
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunInt32SubAndWord32ShlP) {
+  {
+    RawMachineAssemblerTester<int32_t> m(kMachUint32, kMachInt32, kMachUint32);
+    m.Return(m.Int32Sub(m.Parameter(0),
+                        m.Word32Shl(m.Parameter(1), m.Parameter(2))));
+    FOR_UINT32_INPUTS(i) {
+      FOR_INT32_INPUTS(j) {
+        FOR_UINT32_SHIFTS(shift) {
+          int32_t expected = *i - (*j << shift);
+          CHECK_EQ(expected, m.Call(*i, *j, shift));
+        }
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachUint32, kMachUint32);
+    m.Return(m.Int32Sub(m.Word32Shl(m.Parameter(0), m.Parameter(1)),
+                        m.Parameter(2)));
+    FOR_INT32_INPUTS(i) {
+      FOR_UINT32_SHIFTS(shift) {
+        FOR_UINT32_INPUTS(k) {
+          // Use uint32_t because signed overflow is UB in C.
+          int32_t expected = (*i << shift) - *k;
+          CHECK_EQ(expected, m.Call(*i, shift, *k));
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunInt32SubAndWord32ShrP) {
+  {
+    RawMachineAssemblerTester<uint32_t> m(kMachUint32, kMachUint32,
+                                          kMachUint32);
+    m.Return(m.Int32Sub(m.Parameter(0),
+                        m.Word32Shr(m.Parameter(1), m.Parameter(2))));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        FOR_UINT32_SHIFTS(shift) {
+          // Use uint32_t because signed overflow is UB in C.
+          int32_t expected = *i - (*j >> shift);
+          CHECK_UINT32_EQ(expected, m.Call(*i, *j, shift));
+        }
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<uint32_t> m(kMachUint32, kMachUint32,
+                                          kMachUint32);
+    m.Return(m.Int32Sub(m.Word32Shr(m.Parameter(0), m.Parameter(1)),
+                        m.Parameter(2)));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_SHIFTS(shift) {
+        FOR_UINT32_INPUTS(k) {
+          // Use uint32_t because signed overflow is UB in C.
+          int32_t expected = (*i >> shift) - *k;
+          CHECK_EQ(expected, m.Call(*i, shift, *k));
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunInt32SubInBranch) {
+  static const int constant = 987654321;
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Uint32BinopTester bt(&m);
+    MLabel blocka, blockb;
+    m.Branch(
+        m.Word32Equal(m.Int32Sub(bt.param0, bt.param1), m.Int32Constant(0)),
+        &blocka, &blockb);
+    m.Bind(&blocka);
+    bt.AddReturn(m.Int32Constant(constant));
+    m.Bind(&blockb);
+    bt.AddReturn(m.Int32Constant(0 - constant));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i - *j) == 0 ? constant : 0 - constant;
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Uint32BinopTester bt(&m);
+    MLabel blocka, blockb;
+    m.Branch(
+        m.Word32NotEqual(m.Int32Sub(bt.param0, bt.param1), m.Int32Constant(0)),
+        &blocka, &blockb);
+    m.Bind(&blocka);
+    bt.AddReturn(m.Int32Constant(constant));
+    m.Bind(&blockb);
+    bt.AddReturn(m.Int32Constant(0 - constant));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i - *j) != 0 ? constant : 0 - constant;
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<uint32_t> m(kMachUint32);
+      MLabel blocka, blockb;
+      m.Branch(m.Word32Equal(m.Int32Sub(m.Int32Constant(*i), m.Parameter(0)),
+                             m.Int32Constant(0)),
+               &blocka, &blockb);
+      m.Bind(&blocka);
+      m.Return(m.Int32Constant(constant));
+      m.Bind(&blockb);
+      m.Return(m.Int32Constant(0 - constant));
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i - *j) == 0 ? constant : 0 - constant;
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachUint32);
+      MLabel blocka, blockb;
+      m.Branch(m.Word32NotEqual(m.Int32Sub(m.Int32Constant(*i), m.Parameter(0)),
+                                m.Int32Constant(0)),
+               &blocka, &blockb);
+      m.Bind(&blocka);
+      m.Return(m.Int32Constant(constant));
+      m.Bind(&blockb);
+      m.Return(m.Int32Constant(0 - constant));
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i - *j) != 0 ? constant : 0 - constant;
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<void> m;
+    const Operator* shops[] = {m.machine()->Word32Sar(),
+                               m.machine()->Word32Shl(),
+                               m.machine()->Word32Shr()};
+    for (size_t n = 0; n < arraysize(shops); n++) {
+      RawMachineAssemblerTester<int32_t> m(kMachUint32, kMachInt32,
+                                           kMachUint32);
+      MLabel blocka, blockb;
+      m.Branch(m.Word32Equal(m.Int32Sub(m.Parameter(0),
+                                        m.NewNode(shops[n], m.Parameter(1),
+                                                  m.Parameter(2))),
+                             m.Int32Constant(0)),
+               &blocka, &blockb);
+      m.Bind(&blocka);
+      m.Return(m.Int32Constant(constant));
+      m.Bind(&blockb);
+      m.Return(m.Int32Constant(0 - constant));
+      FOR_UINT32_INPUTS(i) {
+        FOR_INT32_INPUTS(j) {
+          FOR_UINT32_SHIFTS(shift) {
+            int32_t right;
+            switch (shops[n]->opcode()) {
+              default:
+                UNREACHABLE();
+              case IrOpcode::kWord32Sar:
+                right = *j >> shift;
+                break;
+              case IrOpcode::kWord32Shl:
+                right = *j << shift;
+                break;
+              case IrOpcode::kWord32Shr:
+                right = static_cast<uint32_t>(*j) >> shift;
+                break;
+            }
+            int32_t expected = ((*i - right) == 0) ? constant : 0 - constant;
+            CHECK_EQ(expected, m.Call(*i, *j, shift));
+          }
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunInt32SubInComparison) {
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Uint32BinopTester bt(&m);
+    bt.AddReturn(
+        m.Word32Equal(m.Int32Sub(bt.param0, bt.param1), m.Int32Constant(0)));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = (*i - *j) == 0;
+        CHECK_UINT32_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Uint32BinopTester bt(&m);
+    bt.AddReturn(
+        m.Word32Equal(m.Int32Constant(0), m.Int32Sub(bt.param0, bt.param1)));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = (*i - *j) == 0;
+        CHECK_UINT32_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<uint32_t> m(kMachUint32);
+      m.Return(m.Word32Equal(m.Int32Sub(m.Int32Constant(*i), m.Parameter(0)),
+                             m.Int32Constant(0)));
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = (*i - *j) == 0;
+        CHECK_UINT32_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<uint32_t> m(kMachUint32);
+      m.Return(m.Word32Equal(m.Int32Sub(m.Parameter(0), m.Int32Constant(*i)),
+                             m.Int32Constant(0)));
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = (*j - *i) == 0;
+        CHECK_UINT32_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<void> m;
+    const Operator* shops[] = {m.machine()->Word32Sar(),
+                               m.machine()->Word32Shl(),
+                               m.machine()->Word32Shr()};
+    for (size_t n = 0; n < arraysize(shops); n++) {
+      RawMachineAssemblerTester<int32_t> m(kMachUint32, kMachInt32,
+                                           kMachUint32);
+      m.Return(m.Word32Equal(
+          m.Int32Sub(m.Parameter(0),
+                     m.NewNode(shops[n], m.Parameter(1), m.Parameter(2))),
+          m.Int32Constant(0)));
+      FOR_UINT32_INPUTS(i) {
+        FOR_INT32_INPUTS(j) {
+          FOR_UINT32_SHIFTS(shift) {
+            int32_t right;
+            switch (shops[n]->opcode()) {
+              default:
+                UNREACHABLE();
+              case IrOpcode::kWord32Sar:
+                right = *j >> shift;
+                break;
+              case IrOpcode::kWord32Shl:
+                right = *j << shift;
+                break;
+              case IrOpcode::kWord32Shr:
+                right = static_cast<uint32_t>(*j) >> shift;
+                break;
+            }
+            int32_t expected = (*i - right) == 0;
+            CHECK_EQ(expected, m.Call(*i, *j, shift));
+          }
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunInt32MulP) {
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(m.Int32Mul(bt.param0, bt.param1));
+    FOR_INT32_INPUTS(i) {
+      FOR_INT32_INPUTS(j) {
+        int expected = static_cast<int32_t>(*i * *j);
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Uint32BinopTester bt(&m);
+    bt.AddReturn(m.Int32Mul(bt.param0, bt.param1));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = *i * *j;
+        CHECK_UINT32_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+}
+
+
+TEST(RunInt32MulImm) {
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<uint32_t> m(kMachUint32);
+      m.Return(m.Int32Mul(m.Int32Constant(*i), m.Parameter(0)));
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = *i * *j;
+        CHECK_UINT32_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<uint32_t> m(kMachUint32);
+      m.Return(m.Int32Mul(m.Parameter(0), m.Int32Constant(*i)));
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = *j * *i;
+        CHECK_UINT32_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+}
+
+
+TEST(RunInt32MulAndInt32AddP) {
+  {
+    RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32, kMachInt32);
+    m.Return(
+        m.Int32Add(m.Parameter(0), m.Int32Mul(m.Parameter(1), m.Parameter(2))));
+    FOR_INT32_INPUTS(i) {
+      FOR_INT32_INPUTS(j) {
+        FOR_INT32_INPUTS(k) {
+          int32_t p0 = *i;
+          int32_t p1 = *j;
+          int32_t p2 = *k;
+          int expected = p0 + static_cast<int32_t>(p1 * p2);
+          CHECK_EQ(expected, m.Call(p0, p1, p2));
+        }
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32, kMachInt32);
+    m.Return(
+        m.Int32Add(m.Int32Mul(m.Parameter(0), m.Parameter(1)), m.Parameter(2)));
+    FOR_INT32_INPUTS(i) {
+      FOR_INT32_INPUTS(j) {
+        FOR_INT32_INPUTS(k) {
+          int32_t p0 = *i;
+          int32_t p1 = *j;
+          int32_t p2 = *k;
+          int expected = static_cast<int32_t>(p0 * p1) + p2;
+          CHECK_EQ(expected, m.Call(p0, p1, p2));
+        }
+      }
+    }
+  }
+  {
+    FOR_INT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m;
+      Int32BinopTester bt(&m);
+      bt.AddReturn(
+          m.Int32Add(m.Int32Constant(*i), m.Int32Mul(bt.param0, bt.param1)));
+      FOR_INT32_INPUTS(j) {
+        FOR_INT32_INPUTS(k) {
+          int32_t p0 = *j;
+          int32_t p1 = *k;
+          int expected = *i + static_cast<int32_t>(p0 * p1);
+          CHECK_EQ(expected, bt.call(p0, p1));
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunInt32MulAndInt32SubP) {
+  {
+    RawMachineAssemblerTester<int32_t> m(kMachUint32, kMachInt32, kMachInt32);
+    m.Return(
+        m.Int32Sub(m.Parameter(0), m.Int32Mul(m.Parameter(1), m.Parameter(2))));
+    FOR_UINT32_INPUTS(i) {
+      FOR_INT32_INPUTS(j) {
+        FOR_INT32_INPUTS(k) {
+          uint32_t p0 = *i;
+          int32_t p1 = *j;
+          int32_t p2 = *k;
+          // Use uint32_t because signed overflow is UB in C.
+          int expected = p0 - static_cast<uint32_t>(p1 * p2);
+          CHECK_EQ(expected, m.Call(p0, p1, p2));
+        }
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m;
+      Int32BinopTester bt(&m);
+      bt.AddReturn(
+          m.Int32Sub(m.Int32Constant(*i), m.Int32Mul(bt.param0, bt.param1)));
+      FOR_INT32_INPUTS(j) {
+        FOR_INT32_INPUTS(k) {
+          int32_t p0 = *j;
+          int32_t p1 = *k;
+          // Use uint32_t because signed overflow is UB in C.
+          int expected = *i - static_cast<uint32_t>(p0 * p1);
+          CHECK_EQ(expected, bt.call(p0, p1));
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunInt32DivP) {
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(m.Int32Div(bt.param0, bt.param1));
+    FOR_INT32_INPUTS(i) {
+      FOR_INT32_INPUTS(j) {
+        int p0 = *i;
+        int p1 = *j;
+        if (p1 != 0 && (static_cast<uint32_t>(p0) != 0x80000000 || p1 != -1)) {
+          int expected = static_cast<int32_t>(p0 / p1);
+          CHECK_EQ(expected, bt.call(p0, p1));
+        }
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(m.Int32Add(bt.param0, m.Int32Div(bt.param0, bt.param1)));
+    FOR_INT32_INPUTS(i) {
+      FOR_INT32_INPUTS(j) {
+        int p0 = *i;
+        int p1 = *j;
+        if (p1 != 0 && (static_cast<uint32_t>(p0) != 0x80000000 || p1 != -1)) {
+          int expected = static_cast<int32_t>(p0 + (p0 / p1));
+          CHECK_EQ(expected, bt.call(p0, p1));
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunInt32UDivP) {
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(m.Int32UDiv(bt.param0, bt.param1));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t p0 = *i;
+        uint32_t p1 = *j;
+        if (p1 != 0) {
+          uint32_t expected = static_cast<uint32_t>(p0 / p1);
+          CHECK_EQ(expected, bt.call(p0, p1));
+        }
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(m.Int32Add(bt.param0, m.Int32UDiv(bt.param0, bt.param1)));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t p0 = *i;
+        uint32_t p1 = *j;
+        if (p1 != 0) {
+          uint32_t expected = static_cast<uint32_t>(p0 + (p0 / p1));
+          CHECK_EQ(expected, bt.call(p0, p1));
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunInt32ModP) {
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(m.Int32Mod(bt.param0, bt.param1));
+    FOR_INT32_INPUTS(i) {
+      FOR_INT32_INPUTS(j) {
+        int p0 = *i;
+        int p1 = *j;
+        if (p1 != 0 && (static_cast<uint32_t>(p0) != 0x80000000 || p1 != -1)) {
+          int expected = static_cast<int32_t>(p0 % p1);
+          CHECK_EQ(expected, bt.call(p0, p1));
+        }
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(m.Int32Add(bt.param0, m.Int32Mod(bt.param0, bt.param1)));
+    FOR_INT32_INPUTS(i) {
+      FOR_INT32_INPUTS(j) {
+        int p0 = *i;
+        int p1 = *j;
+        if (p1 != 0 && (static_cast<uint32_t>(p0) != 0x80000000 || p1 != -1)) {
+          int expected = static_cast<int32_t>(p0 + (p0 % p1));
+          CHECK_EQ(expected, bt.call(p0, p1));
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunInt32UModP) {
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(m.Int32UMod(bt.param0, bt.param1));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t p0 = *i;
+        uint32_t p1 = *j;
+        if (p1 != 0) {
+          uint32_t expected = static_cast<uint32_t>(p0 % p1);
+          CHECK_EQ(expected, bt.call(p0, p1));
+        }
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(m.Int32Add(bt.param0, m.Int32UMod(bt.param0, bt.param1)));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t p0 = *i;
+        uint32_t p1 = *j;
+        if (p1 != 0) {
+          uint32_t expected = static_cast<uint32_t>(p0 + (p0 % p1));
+          CHECK_EQ(expected, bt.call(p0, p1));
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunWord32AndP) {
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(m.Word32And(bt.param0, bt.param1));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = *i & *j;
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(m.Word32And(bt.param0, m.Word32Not(bt.param1)));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = *i & ~(*j);
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(m.Word32And(m.Word32Not(bt.param0), bt.param1));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = ~(*i) & *j;
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+}
+
+
+TEST(RunWord32AndAndWord32ShlP) {
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Uint32BinopTester bt(&m);
+    bt.AddReturn(
+        m.Word32Shl(bt.param0, m.Word32And(bt.param1, m.Int32Constant(0x1f))));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = *i << (*j & 0x1f);
+        CHECK_UINT32_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Uint32BinopTester bt(&m);
+    bt.AddReturn(
+        m.Word32Shl(bt.param0, m.Word32And(m.Int32Constant(0x1f), bt.param1)));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = *i << (0x1f & *j);
+        CHECK_UINT32_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+}
+
+
+TEST(RunWord32AndAndWord32ShrP) {
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Uint32BinopTester bt(&m);
+    bt.AddReturn(
+        m.Word32Shr(bt.param0, m.Word32And(bt.param1, m.Int32Constant(0x1f))));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = *i >> (*j & 0x1f);
+        CHECK_UINT32_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Uint32BinopTester bt(&m);
+    bt.AddReturn(
+        m.Word32Shr(bt.param0, m.Word32And(m.Int32Constant(0x1f), bt.param1)));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = *i >> (0x1f & *j);
+        CHECK_UINT32_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+}
+
+
+TEST(RunWord32AndAndWord32SarP) {
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(
+        m.Word32Sar(bt.param0, m.Word32And(bt.param1, m.Int32Constant(0x1f))));
+    FOR_INT32_INPUTS(i) {
+      FOR_INT32_INPUTS(j) {
+        int32_t expected = *i >> (*j & 0x1f);
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(
+        m.Word32Sar(bt.param0, m.Word32And(m.Int32Constant(0x1f), bt.param1)));
+    FOR_INT32_INPUTS(i) {
+      FOR_INT32_INPUTS(j) {
+        uint32_t expected = *i >> (0x1f & *j);
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+}
+
+
+TEST(RunWord32AndImm) {
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<uint32_t> m(kMachUint32);
+      m.Return(m.Word32And(m.Int32Constant(*i), m.Parameter(0)));
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = *i & *j;
+        CHECK_UINT32_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<uint32_t> m(kMachUint32);
+      m.Return(m.Word32And(m.Int32Constant(*i), m.Word32Not(m.Parameter(0))));
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = *i & ~(*j);
+        CHECK_UINT32_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+}
+
+
+TEST(RunWord32AndInBranch) {
+  static const int constant = 987654321;
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Uint32BinopTester bt(&m);
+    MLabel blocka, blockb;
+    m.Branch(
+        m.Word32Equal(m.Word32And(bt.param0, bt.param1), m.Int32Constant(0)),
+        &blocka, &blockb);
+    m.Bind(&blocka);
+    bt.AddReturn(m.Int32Constant(constant));
+    m.Bind(&blockb);
+    bt.AddReturn(m.Int32Constant(0 - constant));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i & *j) == 0 ? constant : 0 - constant;
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Uint32BinopTester bt(&m);
+    MLabel blocka, blockb;
+    m.Branch(
+        m.Word32NotEqual(m.Word32And(bt.param0, bt.param1), m.Int32Constant(0)),
+        &blocka, &blockb);
+    m.Bind(&blocka);
+    bt.AddReturn(m.Int32Constant(constant));
+    m.Bind(&blockb);
+    bt.AddReturn(m.Int32Constant(0 - constant));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i & *j) != 0 ? constant : 0 - constant;
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachUint32);
+      MLabel blocka, blockb;
+      m.Branch(m.Word32Equal(m.Word32And(m.Int32Constant(*i), m.Parameter(0)),
+                             m.Int32Constant(0)),
+               &blocka, &blockb);
+      m.Bind(&blocka);
+      m.Return(m.Int32Constant(constant));
+      m.Bind(&blockb);
+      m.Return(m.Int32Constant(0 - constant));
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i & *j) == 0 ? constant : 0 - constant;
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachUint32);
+      MLabel blocka, blockb;
+      m.Branch(
+          m.Word32NotEqual(m.Word32And(m.Int32Constant(*i), m.Parameter(0)),
+                           m.Int32Constant(0)),
+          &blocka, &blockb);
+      m.Bind(&blocka);
+      m.Return(m.Int32Constant(constant));
+      m.Bind(&blockb);
+      m.Return(m.Int32Constant(0 - constant));
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i & *j) != 0 ? constant : 0 - constant;
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<void> m;
+    const Operator* shops[] = {m.machine()->Word32Sar(),
+                               m.machine()->Word32Shl(),
+                               m.machine()->Word32Shr()};
+    for (size_t n = 0; n < arraysize(shops); n++) {
+      RawMachineAssemblerTester<int32_t> m(kMachUint32, kMachInt32,
+                                           kMachUint32);
+      MLabel blocka, blockb;
+      m.Branch(m.Word32Equal(m.Word32And(m.Parameter(0),
+                                         m.NewNode(shops[n], m.Parameter(1),
+                                                   m.Parameter(2))),
+                             m.Int32Constant(0)),
+               &blocka, &blockb);
+      m.Bind(&blocka);
+      m.Return(m.Int32Constant(constant));
+      m.Bind(&blockb);
+      m.Return(m.Int32Constant(0 - constant));
+      FOR_UINT32_INPUTS(i) {
+        FOR_INT32_INPUTS(j) {
+          FOR_UINT32_SHIFTS(shift) {
+            int32_t right;
+            switch (shops[n]->opcode()) {
+              default:
+                UNREACHABLE();
+              case IrOpcode::kWord32Sar:
+                right = *j >> shift;
+                break;
+              case IrOpcode::kWord32Shl:
+                right = *j << shift;
+                break;
+              case IrOpcode::kWord32Shr:
+                right = static_cast<uint32_t>(*j) >> shift;
+                break;
+            }
+            int32_t expected = ((*i & right) == 0) ? constant : 0 - constant;
+            CHECK_EQ(expected, m.Call(*i, *j, shift));
+          }
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunWord32AndInComparison) {
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Uint32BinopTester bt(&m);
+    bt.AddReturn(
+        m.Word32Equal(m.Word32And(bt.param0, bt.param1), m.Int32Constant(0)));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = (*i & *j) == 0;
+        CHECK_UINT32_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Uint32BinopTester bt(&m);
+    bt.AddReturn(
+        m.Word32Equal(m.Int32Constant(0), m.Word32And(bt.param0, bt.param1)));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = (*i & *j) == 0;
+        CHECK_UINT32_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<uint32_t> m(kMachUint32);
+      m.Return(m.Word32Equal(m.Word32And(m.Int32Constant(*i), m.Parameter(0)),
+                             m.Int32Constant(0)));
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = (*i & *j) == 0;
+        CHECK_UINT32_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<uint32_t> m(kMachUint32);
+      m.Return(m.Word32Equal(m.Word32And(m.Parameter(0), m.Int32Constant(*i)),
+                             m.Int32Constant(0)));
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = (*j & *i) == 0;
+        CHECK_UINT32_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+}
+
+
+TEST(RunWord32OrP) {
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Uint32BinopTester bt(&m);
+    bt.AddReturn(m.Word32Or(bt.param0, bt.param1));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = *i | *j;
+        CHECK_UINT32_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Uint32BinopTester bt(&m);
+    bt.AddReturn(m.Word32Or(bt.param0, m.Word32Not(bt.param1)));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = *i | ~(*j);
+        CHECK_UINT32_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Uint32BinopTester bt(&m);
+    bt.AddReturn(m.Word32Or(m.Word32Not(bt.param0), bt.param1));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = ~(*i) | *j;
+        CHECK_UINT32_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+}
+
+
+TEST(RunWord32OrImm) {
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<uint32_t> m(kMachUint32);
+      m.Return(m.Word32Or(m.Int32Constant(*i), m.Parameter(0)));
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = *i | *j;
+        CHECK_UINT32_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<uint32_t> m(kMachUint32);
+      m.Return(m.Word32Or(m.Int32Constant(*i), m.Word32Not(m.Parameter(0))));
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = *i | ~(*j);
+        CHECK_UINT32_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+}
+
+
+TEST(RunWord32OrInBranch) {
+  static const int constant = 987654321;
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    MLabel blocka, blockb;
+    m.Branch(
+        m.Word32Equal(m.Word32Or(bt.param0, bt.param1), m.Int32Constant(0)),
+        &blocka, &blockb);
+    m.Bind(&blocka);
+    bt.AddReturn(m.Int32Constant(constant));
+    m.Bind(&blockb);
+    bt.AddReturn(m.Int32Constant(0 - constant));
+    FOR_INT32_INPUTS(i) {
+      FOR_INT32_INPUTS(j) {
+        int32_t expected = (*i | *j) == 0 ? constant : 0 - constant;
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    MLabel blocka, blockb;
+    m.Branch(
+        m.Word32NotEqual(m.Word32Or(bt.param0, bt.param1), m.Int32Constant(0)),
+        &blocka, &blockb);
+    m.Bind(&blocka);
+    bt.AddReturn(m.Int32Constant(constant));
+    m.Bind(&blockb);
+    bt.AddReturn(m.Int32Constant(0 - constant));
+    FOR_INT32_INPUTS(i) {
+      FOR_INT32_INPUTS(j) {
+        int32_t expected = (*i | *j) != 0 ? constant : 0 - constant;
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    FOR_INT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachInt32);
+      MLabel blocka, blockb;
+      m.Branch(m.Word32Equal(m.Word32Or(m.Int32Constant(*i), m.Parameter(0)),
+                             m.Int32Constant(0)),
+               &blocka, &blockb);
+      m.Bind(&blocka);
+      m.Return(m.Int32Constant(constant));
+      m.Bind(&blockb);
+      m.Return(m.Int32Constant(0 - constant));
+      FOR_INT32_INPUTS(j) {
+        int32_t expected = (*i | *j) == 0 ? constant : 0 - constant;
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    FOR_INT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachInt32);
+      MLabel blocka, blockb;
+      m.Branch(m.Word32NotEqual(m.Word32Or(m.Int32Constant(*i), m.Parameter(0)),
+                                m.Int32Constant(0)),
+               &blocka, &blockb);
+      m.Bind(&blocka);
+      m.Return(m.Int32Constant(constant));
+      m.Bind(&blockb);
+      m.Return(m.Int32Constant(0 - constant));
+      FOR_INT32_INPUTS(j) {
+        int32_t expected = (*i | *j) != 0 ? constant : 0 - constant;
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<void> m;
+    const Operator* shops[] = {m.machine()->Word32Sar(),
+                               m.machine()->Word32Shl(),
+                               m.machine()->Word32Shr()};
+    for (size_t n = 0; n < arraysize(shops); n++) {
+      RawMachineAssemblerTester<int32_t> m(kMachUint32, kMachInt32,
+                                           kMachUint32);
+      MLabel blocka, blockb;
+      m.Branch(m.Word32Equal(m.Word32Or(m.Parameter(0),
+                                        m.NewNode(shops[n], m.Parameter(1),
+                                                  m.Parameter(2))),
+                             m.Int32Constant(0)),
+               &blocka, &blockb);
+      m.Bind(&blocka);
+      m.Return(m.Int32Constant(constant));
+      m.Bind(&blockb);
+      m.Return(m.Int32Constant(0 - constant));
+      FOR_UINT32_INPUTS(i) {
+        FOR_INT32_INPUTS(j) {
+          FOR_UINT32_SHIFTS(shift) {
+            int32_t right;
+            switch (shops[n]->opcode()) {
+              default:
+                UNREACHABLE();
+              case IrOpcode::kWord32Sar:
+                right = *j >> shift;
+                break;
+              case IrOpcode::kWord32Shl:
+                right = *j << shift;
+                break;
+              case IrOpcode::kWord32Shr:
+                right = static_cast<uint32_t>(*j) >> shift;
+                break;
+            }
+            int32_t expected = ((*i | right) == 0) ? constant : 0 - constant;
+            CHECK_EQ(expected, m.Call(*i, *j, shift));
+          }
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunWord32OrInComparison) {
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Uint32BinopTester bt(&m);
+    bt.AddReturn(
+        m.Word32Equal(m.Word32Or(bt.param0, bt.param1), m.Int32Constant(0)));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i | *j) == 0;
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Uint32BinopTester bt(&m);
+    bt.AddReturn(
+        m.Word32Equal(m.Int32Constant(0), m.Word32Or(bt.param0, bt.param1)));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = (*i | *j) == 0;
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<uint32_t> m(kMachUint32);
+      m.Return(m.Word32Equal(m.Word32Or(m.Int32Constant(*i), m.Parameter(0)),
+                             m.Int32Constant(0)));
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = (*i | *j) == 0;
+        CHECK_UINT32_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<uint32_t> m(kMachUint32);
+      m.Return(m.Word32Equal(m.Word32Or(m.Parameter(0), m.Int32Constant(*i)),
+                             m.Int32Constant(0)));
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = (*j | *i) == 0;
+        CHECK_UINT32_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+}
+
+
+TEST(RunWord32XorP) {
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m(kMachUint32);
+      m.Return(m.Word32Xor(m.Int32Constant(*i), m.Parameter(0)));
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = *i ^ *j;
+        CHECK_UINT32_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Uint32BinopTester bt(&m);
+    bt.AddReturn(m.Word32Xor(bt.param0, bt.param1));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = *i ^ *j;
+        CHECK_UINT32_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(m.Word32Xor(bt.param0, m.Word32Not(bt.param1)));
+    FOR_INT32_INPUTS(i) {
+      FOR_INT32_INPUTS(j) {
+        int32_t expected = *i ^ ~(*j);
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(m.Word32Xor(m.Word32Not(bt.param0), bt.param1));
+    FOR_INT32_INPUTS(i) {
+      FOR_INT32_INPUTS(j) {
+        int32_t expected = ~(*i) ^ *j;
+        CHECK_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<uint32_t> m(kMachUint32);
+      m.Return(m.Word32Xor(m.Int32Constant(*i), m.Word32Not(m.Parameter(0))));
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = *i ^ ~(*j);
+        CHECK_UINT32_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+}
+
+
+TEST(RunWord32XorInBranch) {
+  static const uint32_t constant = 987654321;
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Uint32BinopTester bt(&m);
+    MLabel blocka, blockb;
+    m.Branch(
+        m.Word32Equal(m.Word32Xor(bt.param0, bt.param1), m.Int32Constant(0)),
+        &blocka, &blockb);
+    m.Bind(&blocka);
+    bt.AddReturn(m.Int32Constant(constant));
+    m.Bind(&blockb);
+    bt.AddReturn(m.Int32Constant(0 - constant));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = (*i ^ *j) == 0 ? constant : 0 - constant;
+        CHECK_UINT32_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Uint32BinopTester bt(&m);
+    MLabel blocka, blockb;
+    m.Branch(
+        m.Word32NotEqual(m.Word32Xor(bt.param0, bt.param1), m.Int32Constant(0)),
+        &blocka, &blockb);
+    m.Bind(&blocka);
+    bt.AddReturn(m.Int32Constant(constant));
+    m.Bind(&blockb);
+    bt.AddReturn(m.Int32Constant(0 - constant));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = (*i ^ *j) != 0 ? constant : 0 - constant;
+        CHECK_UINT32_EQ(expected, bt.call(*i, *j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<uint32_t> m(kMachUint32);
+      MLabel blocka, blockb;
+      m.Branch(m.Word32Equal(m.Word32Xor(m.Int32Constant(*i), m.Parameter(0)),
+                             m.Int32Constant(0)),
+               &blocka, &blockb);
+      m.Bind(&blocka);
+      m.Return(m.Int32Constant(constant));
+      m.Bind(&blockb);
+      m.Return(m.Int32Constant(0 - constant));
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = (*i ^ *j) == 0 ? constant : 0 - constant;
+        CHECK_UINT32_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    FOR_UINT32_INPUTS(i) {
+      RawMachineAssemblerTester<uint32_t> m(kMachUint32);
+      MLabel blocka, blockb;
+      m.Branch(
+          m.Word32NotEqual(m.Word32Xor(m.Int32Constant(*i), m.Parameter(0)),
+                           m.Int32Constant(0)),
+          &blocka, &blockb);
+      m.Bind(&blocka);
+      m.Return(m.Int32Constant(constant));
+      m.Bind(&blockb);
+      m.Return(m.Int32Constant(0 - constant));
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = (*i ^ *j) != 0 ? constant : 0 - constant;
+        CHECK_UINT32_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<void> m;
+    const Operator* shops[] = {m.machine()->Word32Sar(),
+                               m.machine()->Word32Shl(),
+                               m.machine()->Word32Shr()};
+    for (size_t n = 0; n < arraysize(shops); n++) {
+      RawMachineAssemblerTester<int32_t> m(kMachUint32, kMachInt32,
+                                           kMachUint32);
+      MLabel blocka, blockb;
+      m.Branch(m.Word32Equal(m.Word32Xor(m.Parameter(0),
+                                         m.NewNode(shops[n], m.Parameter(1),
+                                                   m.Parameter(2))),
+                             m.Int32Constant(0)),
+               &blocka, &blockb);
+      m.Bind(&blocka);
+      m.Return(m.Int32Constant(constant));
+      m.Bind(&blockb);
+      m.Return(m.Int32Constant(0 - constant));
+      FOR_UINT32_INPUTS(i) {
+        FOR_INT32_INPUTS(j) {
+          FOR_UINT32_SHIFTS(shift) {
+            int32_t right;
+            switch (shops[n]->opcode()) {
+              default:
+                UNREACHABLE();
+              case IrOpcode::kWord32Sar:
+                right = *j >> shift;
+                break;
+              case IrOpcode::kWord32Shl:
+                right = *j << shift;
+                break;
+              case IrOpcode::kWord32Shr:
+                right = static_cast<uint32_t>(*j) >> shift;
+                break;
+            }
+            int32_t expected = ((*i ^ right) == 0) ? constant : 0 - constant;
+            CHECK_EQ(expected, m.Call(*i, *j, shift));
+          }
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunWord32ShlP) {
+  {
+    FOR_UINT32_SHIFTS(shift) {
+      RawMachineAssemblerTester<uint32_t> m(kMachUint32);
+      m.Return(m.Word32Shl(m.Parameter(0), m.Int32Constant(shift)));
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = *j << shift;
+        CHECK_UINT32_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Uint32BinopTester bt(&m);
+    bt.AddReturn(m.Word32Shl(bt.param0, bt.param1));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_SHIFTS(shift) {
+        uint32_t expected = *i << shift;
+        CHECK_UINT32_EQ(expected, bt.call(*i, shift));
+      }
+    }
+  }
+}
+
+
+TEST(RunWord32ShlInComparison) {
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Uint32BinopTester bt(&m);
+    bt.AddReturn(
+        m.Word32Equal(m.Word32Shl(bt.param0, bt.param1), m.Int32Constant(0)));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_SHIFTS(shift) {
+        uint32_t expected = 0 == (*i << shift);
+        CHECK_UINT32_EQ(expected, bt.call(*i, shift));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Uint32BinopTester bt(&m);
+    bt.AddReturn(
+        m.Word32Equal(m.Int32Constant(0), m.Word32Shl(bt.param0, bt.param1)));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_SHIFTS(shift) {
+        uint32_t expected = 0 == (*i << shift);
+        CHECK_UINT32_EQ(expected, bt.call(*i, shift));
+      }
+    }
+  }
+  {
+    FOR_UINT32_SHIFTS(shift) {
+      RawMachineAssemblerTester<int32_t> m(kMachUint32);
+      m.Return(
+          m.Word32Equal(m.Int32Constant(0),
+                        m.Word32Shl(m.Parameter(0), m.Int32Constant(shift))));
+      FOR_UINT32_INPUTS(i) {
+        uint32_t expected = 0 == (*i << shift);
+        CHECK_UINT32_EQ(expected, m.Call(*i));
+      }
+    }
+  }
+  {
+    FOR_UINT32_SHIFTS(shift) {
+      RawMachineAssemblerTester<int32_t> m(kMachUint32);
+      m.Return(
+          m.Word32Equal(m.Word32Shl(m.Parameter(0), m.Int32Constant(shift)),
+                        m.Int32Constant(0)));
+      FOR_UINT32_INPUTS(i) {
+        uint32_t expected = 0 == (*i << shift);
+        CHECK_UINT32_EQ(expected, m.Call(*i));
+      }
+    }
+  }
+}
+
+
+TEST(RunWord32ShrP) {
+  {
+    FOR_UINT32_SHIFTS(shift) {
+      RawMachineAssemblerTester<uint32_t> m(kMachUint32);
+      m.Return(m.Word32Shr(m.Parameter(0), m.Int32Constant(shift)));
+      FOR_UINT32_INPUTS(j) {
+        uint32_t expected = *j >> shift;
+        CHECK_UINT32_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Uint32BinopTester bt(&m);
+    bt.AddReturn(m.Word32Shr(bt.param0, bt.param1));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_SHIFTS(shift) {
+        uint32_t expected = *i >> shift;
+        CHECK_UINT32_EQ(expected, bt.call(*i, shift));
+      }
+    }
+    CHECK_EQ(0x00010000, bt.call(0x80000000, 15));
+  }
+}
+
+
+TEST(RunWord32ShrInComparison) {
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Uint32BinopTester bt(&m);
+    bt.AddReturn(
+        m.Word32Equal(m.Word32Shr(bt.param0, bt.param1), m.Int32Constant(0)));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_SHIFTS(shift) {
+        uint32_t expected = 0 == (*i >> shift);
+        CHECK_UINT32_EQ(expected, bt.call(*i, shift));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Uint32BinopTester bt(&m);
+    bt.AddReturn(
+        m.Word32Equal(m.Int32Constant(0), m.Word32Shr(bt.param0, bt.param1)));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_SHIFTS(shift) {
+        uint32_t expected = 0 == (*i >> shift);
+        CHECK_UINT32_EQ(expected, bt.call(*i, shift));
+      }
+    }
+  }
+  {
+    FOR_UINT32_SHIFTS(shift) {
+      RawMachineAssemblerTester<int32_t> m(kMachUint32);
+      m.Return(
+          m.Word32Equal(m.Int32Constant(0),
+                        m.Word32Shr(m.Parameter(0), m.Int32Constant(shift))));
+      FOR_UINT32_INPUTS(i) {
+        uint32_t expected = 0 == (*i >> shift);
+        CHECK_UINT32_EQ(expected, m.Call(*i));
+      }
+    }
+  }
+  {
+    FOR_UINT32_SHIFTS(shift) {
+      RawMachineAssemblerTester<int32_t> m(kMachUint32);
+      m.Return(
+          m.Word32Equal(m.Word32Shr(m.Parameter(0), m.Int32Constant(shift)),
+                        m.Int32Constant(0)));
+      FOR_UINT32_INPUTS(i) {
+        uint32_t expected = 0 == (*i >> shift);
+        CHECK_UINT32_EQ(expected, m.Call(*i));
+      }
+    }
+  }
+}
+
+
+TEST(RunWord32SarP) {
+  {
+    FOR_INT32_SHIFTS(shift) {
+      RawMachineAssemblerTester<int32_t> m(kMachInt32);
+      m.Return(m.Word32Sar(m.Parameter(0), m.Int32Constant(shift)));
+      FOR_INT32_INPUTS(j) {
+        int32_t expected = *j >> shift;
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(m.Word32Sar(bt.param0, bt.param1));
+    FOR_INT32_INPUTS(i) {
+      FOR_INT32_SHIFTS(shift) {
+        int32_t expected = *i >> shift;
+        CHECK_EQ(expected, bt.call(*i, shift));
+      }
+    }
+    CHECK_EQ(0xFFFF0000, bt.call(0x80000000, 15));
+  }
+}
+
+
+TEST(RunWord32SarInComparison) {
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(
+        m.Word32Equal(m.Word32Sar(bt.param0, bt.param1), m.Int32Constant(0)));
+    FOR_INT32_INPUTS(i) {
+      FOR_INT32_SHIFTS(shift) {
+        int32_t expected = 0 == (*i >> shift);
+        CHECK_EQ(expected, bt.call(*i, shift));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Int32BinopTester bt(&m);
+    bt.AddReturn(
+        m.Word32Equal(m.Int32Constant(0), m.Word32Sar(bt.param0, bt.param1)));
+    FOR_INT32_INPUTS(i) {
+      FOR_INT32_SHIFTS(shift) {
+        int32_t expected = 0 == (*i >> shift);
+        CHECK_EQ(expected, bt.call(*i, shift));
+      }
+    }
+  }
+  {
+    FOR_INT32_SHIFTS(shift) {
+      RawMachineAssemblerTester<int32_t> m(kMachInt32);
+      m.Return(
+          m.Word32Equal(m.Int32Constant(0),
+                        m.Word32Sar(m.Parameter(0), m.Int32Constant(shift))));
+      FOR_INT32_INPUTS(i) {
+        int32_t expected = 0 == (*i >> shift);
+        CHECK_EQ(expected, m.Call(*i));
+      }
+    }
+  }
+  {
+    FOR_INT32_SHIFTS(shift) {
+      RawMachineAssemblerTester<int32_t> m(kMachInt32);
+      m.Return(
+          m.Word32Equal(m.Word32Sar(m.Parameter(0), m.Int32Constant(shift)),
+                        m.Int32Constant(0)));
+      FOR_INT32_INPUTS(i) {
+        uint32_t expected = 0 == (*i >> shift);
+        CHECK_EQ(expected, m.Call(*i));
+      }
+    }
+  }
+}
+
+
+TEST(RunWord32RorP) {
+  {
+    FOR_UINT32_SHIFTS(shift) {
+      RawMachineAssemblerTester<int32_t> m(kMachUint32);
+      m.Return(m.Word32Ror(m.Parameter(0), m.Int32Constant(shift)));
+      FOR_UINT32_INPUTS(j) {
+        int32_t expected = bits::RotateRight32(*j, shift);
+        CHECK_EQ(expected, m.Call(*j));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Uint32BinopTester bt(&m);
+    bt.AddReturn(m.Word32Ror(bt.param0, bt.param1));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_SHIFTS(shift) {
+        uint32_t expected = bits::RotateRight32(*i, shift);
+        CHECK_UINT32_EQ(expected, bt.call(*i, shift));
+      }
+    }
+  }
+}
+
+
+TEST(RunWord32RorInComparison) {
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Uint32BinopTester bt(&m);
+    bt.AddReturn(
+        m.Word32Equal(m.Word32Ror(bt.param0, bt.param1), m.Int32Constant(0)));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_SHIFTS(shift) {
+        uint32_t expected = 0 == bits::RotateRight32(*i, shift);
+        CHECK_UINT32_EQ(expected, bt.call(*i, shift));
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Uint32BinopTester bt(&m);
+    bt.AddReturn(
+        m.Word32Equal(m.Int32Constant(0), m.Word32Ror(bt.param0, bt.param1)));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_SHIFTS(shift) {
+        uint32_t expected = 0 == bits::RotateRight32(*i, shift);
+        CHECK_UINT32_EQ(expected, bt.call(*i, shift));
+      }
+    }
+  }
+  {
+    FOR_UINT32_SHIFTS(shift) {
+      RawMachineAssemblerTester<int32_t> m(kMachUint32);
+      m.Return(
+          m.Word32Equal(m.Int32Constant(0),
+                        m.Word32Ror(m.Parameter(0), m.Int32Constant(shift))));
+      FOR_UINT32_INPUTS(i) {
+        uint32_t expected = 0 == bits::RotateRight32(*i, shift);
+        CHECK_UINT32_EQ(expected, m.Call(*i));
+      }
+    }
+  }
+  {
+    FOR_UINT32_SHIFTS(shift) {
+      RawMachineAssemblerTester<int32_t> m(kMachUint32);
+      m.Return(
+          m.Word32Equal(m.Word32Ror(m.Parameter(0), m.Int32Constant(shift)),
+                        m.Int32Constant(0)));
+      FOR_UINT32_INPUTS(i) {
+        uint32_t expected = 0 == bits::RotateRight32(*i, shift);
+        CHECK_UINT32_EQ(expected, m.Call(*i));
+      }
+    }
+  }
+}
+
+
+TEST(RunWord32NotP) {
+  RawMachineAssemblerTester<int32_t> m(kMachInt32);
+  m.Return(m.Word32Not(m.Parameter(0)));
+  FOR_INT32_INPUTS(i) {
+    int expected = ~(*i);
+    CHECK_EQ(expected, m.Call(*i));
+  }
+}
+
+
+TEST(RunInt32NegP) {
+  RawMachineAssemblerTester<int32_t> m(kMachInt32);
+  m.Return(m.Int32Neg(m.Parameter(0)));
+  FOR_INT32_INPUTS(i) {
+    int expected = -*i;
+    CHECK_EQ(expected, m.Call(*i));
+  }
+}
+
+
+TEST(RunWord32EqualAndWord32SarP) {
+  {
+    RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32, kMachUint32);
+    m.Return(m.Word32Equal(m.Parameter(0),
+                           m.Word32Sar(m.Parameter(1), m.Parameter(2))));
+    FOR_INT32_INPUTS(i) {
+      FOR_INT32_INPUTS(j) {
+        FOR_UINT32_SHIFTS(shift) {
+          int32_t expected = (*i == (*j >> shift));
+          CHECK_EQ(expected, m.Call(*i, *j, shift));
+        }
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachUint32, kMachInt32);
+    m.Return(m.Word32Equal(m.Word32Sar(m.Parameter(0), m.Parameter(1)),
+                           m.Parameter(2)));
+    FOR_INT32_INPUTS(i) {
+      FOR_UINT32_SHIFTS(shift) {
+        FOR_INT32_INPUTS(k) {
+          int32_t expected = ((*i >> shift) == *k);
+          CHECK_EQ(expected, m.Call(*i, shift, *k));
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunWord32EqualAndWord32ShlP) {
+  {
+    RawMachineAssemblerTester<int32_t> m(kMachUint32, kMachUint32, kMachUint32);
+    m.Return(m.Word32Equal(m.Parameter(0),
+                           m.Word32Shl(m.Parameter(1), m.Parameter(2))));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        FOR_UINT32_SHIFTS(shift) {
+          int32_t expected = (*i == (*j << shift));
+          CHECK_EQ(expected, m.Call(*i, *j, shift));
+        }
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m(kMachUint32, kMachUint32, kMachUint32);
+    m.Return(m.Word32Equal(m.Word32Shl(m.Parameter(0), m.Parameter(1)),
+                           m.Parameter(2)));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_SHIFTS(shift) {
+        FOR_UINT32_INPUTS(k) {
+          int32_t expected = ((*i << shift) == *k);
+          CHECK_EQ(expected, m.Call(*i, shift, *k));
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunWord32EqualAndWord32ShrP) {
+  {
+    RawMachineAssemblerTester<int32_t> m(kMachUint32, kMachUint32, kMachUint32);
+    m.Return(m.Word32Equal(m.Parameter(0),
+                           m.Word32Shr(m.Parameter(1), m.Parameter(2))));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_INPUTS(j) {
+        FOR_UINT32_SHIFTS(shift) {
+          int32_t expected = (*i == (*j >> shift));
+          CHECK_EQ(expected, m.Call(*i, *j, shift));
+        }
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m(kMachUint32, kMachUint32, kMachUint32);
+    m.Return(m.Word32Equal(m.Word32Shr(m.Parameter(0), m.Parameter(1)),
+                           m.Parameter(2)));
+    FOR_UINT32_INPUTS(i) {
+      FOR_UINT32_SHIFTS(shift) {
+        FOR_UINT32_INPUTS(k) {
+          int32_t expected = ((*i >> shift) == *k);
+          CHECK_EQ(expected, m.Call(*i, shift, *k));
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunDeadNodes) {
+  for (int i = 0; true; i++) {
+    RawMachineAssemblerTester<int32_t> m(i == 5 ? kMachInt32 : kMachNone);
+    int constant = 0x55 + i;
+    switch (i) {
+      case 0:
+        m.Int32Constant(44);
+        break;
+      case 1:
+        m.StringConstant("unused");
+        break;
+      case 2:
+        m.NumberConstant(11.1);
+        break;
+      case 3:
+        m.PointerConstant(&constant);
+        break;
+      case 4:
+        m.LoadFromPointer(&constant, kMachInt32);
+        break;
+      case 5:
+        m.Parameter(0);
+        break;
+      default:
+        return;
+    }
+    m.Return(m.Int32Constant(constant));
+    if (i != 5) {
+      CHECK_EQ(constant, m.Call());
+    } else {
+      CHECK_EQ(constant, m.Call(0));
+    }
+  }
+}
+
+
+TEST(RunDeadInt32Binops) {
+  RawMachineAssemblerTester<int32_t> m;
+
+  const Operator* ops[] = {
+      m.machine()->Word32And(),             m.machine()->Word32Or(),
+      m.machine()->Word32Xor(),             m.machine()->Word32Shl(),
+      m.machine()->Word32Shr(),             m.machine()->Word32Sar(),
+      m.machine()->Word32Ror(),             m.machine()->Word32Equal(),
+      m.machine()->Int32Add(),              m.machine()->Int32Sub(),
+      m.machine()->Int32Mul(),              m.machine()->Int32Div(),
+      m.machine()->Int32UDiv(),             m.machine()->Int32Mod(),
+      m.machine()->Int32UMod(),             m.machine()->Int32LessThan(),
+      m.machine()->Int32LessThanOrEqual(),  m.machine()->Uint32LessThan(),
+      m.machine()->Uint32LessThanOrEqual(), NULL};
+
+  for (int i = 0; ops[i] != NULL; i++) {
+    RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32);
+    int constant = 0x55555 + i;
+    m.NewNode(ops[i], m.Parameter(0), m.Parameter(1));
+    m.Return(m.Int32Constant(constant));
+
+    CHECK_EQ(constant, m.Call(1, 1));
+  }
+}
+
+
+template <typename Type>
+static void RunLoadImmIndex(MachineType rep) {
+  const int kNumElems = 3;
+  Type buffer[kNumElems];
+
+  // initialize the buffer with raw data.
+  byte* raw = reinterpret_cast<byte*>(buffer);
+  for (size_t i = 0; i < sizeof(buffer); i++) {
+    raw[i] = static_cast<byte>((i + sizeof(buffer)) ^ 0xAA);
+  }
+
+  // Test with various large and small offsets.
+  for (int offset = -1; offset <= 200000; offset *= -5) {
+    for (int i = 0; i < kNumElems; i++) {
+      RawMachineAssemblerTester<Type> m;
+      Node* base = m.PointerConstant(buffer - offset);
+      Node* index = m.Int32Constant((offset + i) * sizeof(buffer[0]));
+      m.Return(m.Load(rep, base, index));
+
+      Type expected = buffer[i];
+      Type actual = m.Call();
+      CHECK(expected == actual);
+    }
+  }
+}
+
+
+TEST(RunLoadImmIndex) {
+  RunLoadImmIndex<int8_t>(kMachInt8);
+  RunLoadImmIndex<uint8_t>(kMachUint8);
+  RunLoadImmIndex<int16_t>(kMachInt16);
+  RunLoadImmIndex<uint16_t>(kMachUint16);
+  RunLoadImmIndex<int32_t>(kMachInt32);
+  RunLoadImmIndex<uint32_t>(kMachUint32);
+  RunLoadImmIndex<int32_t*>(kMachAnyTagged);
+
+  // TODO(titzer): test kRepBit loads
+  // TODO(titzer): test kMachFloat64 loads
+  // TODO(titzer): test various indexing modes.
+}
+
+
+template <typename CType>
+static void RunLoadStore(MachineType rep) {
+  const int kNumElems = 4;
+  CType buffer[kNumElems];
+
+  for (int32_t x = 0; x < kNumElems; x++) {
+    int32_t y = kNumElems - x - 1;
+    // initialize the buffer with raw data.
+    byte* raw = reinterpret_cast<byte*>(buffer);
+    for (size_t i = 0; i < sizeof(buffer); i++) {
+      raw[i] = static_cast<byte>((i + sizeof(buffer)) ^ 0xAA);
+    }
+
+    RawMachineAssemblerTester<int32_t> m;
+    int32_t OK = 0x29000 + x;
+    Node* base = m.PointerConstant(buffer);
+    Node* index0 = m.Int32Constant(x * sizeof(buffer[0]));
+    Node* load = m.Load(rep, base, index0);
+    Node* index1 = m.Int32Constant(y * sizeof(buffer[0]));
+    m.Store(rep, base, index1, load);
+    m.Return(m.Int32Constant(OK));
+
+    CHECK(buffer[x] != buffer[y]);
+    CHECK_EQ(OK, m.Call());
+    CHECK(buffer[x] == buffer[y]);
+  }
+}
+
+
+TEST(RunLoadStore) {
+  RunLoadStore<int8_t>(kMachInt8);
+  RunLoadStore<uint8_t>(kMachUint8);
+  RunLoadStore<int16_t>(kMachInt16);
+  RunLoadStore<uint16_t>(kMachUint16);
+  RunLoadStore<int32_t>(kMachInt32);
+  RunLoadStore<uint32_t>(kMachUint32);
+  RunLoadStore<void*>(kMachAnyTagged);
+  RunLoadStore<float>(kMachFloat32);
+  RunLoadStore<double>(kMachFloat64);
+}
+
+
+TEST(RunFloat64Binop) {
+  RawMachineAssemblerTester<int32_t> m;
+  double result;
+
+  const Operator* ops[] = {m.machine()->Float64Add(), m.machine()->Float64Sub(),
+                           m.machine()->Float64Mul(), m.machine()->Float64Div(),
+                           m.machine()->Float64Mod(), NULL};
+
+  double inf = V8_INFINITY;
+  const Operator* inputs[] = {
+      m.common()->Float64Constant(0),     m.common()->Float64Constant(1),
+      m.common()->Float64Constant(1),     m.common()->Float64Constant(0),
+      m.common()->Float64Constant(0),     m.common()->Float64Constant(-1),
+      m.common()->Float64Constant(-1),    m.common()->Float64Constant(0),
+      m.common()->Float64Constant(0.22),  m.common()->Float64Constant(-1.22),
+      m.common()->Float64Constant(-1.22), m.common()->Float64Constant(0.22),
+      m.common()->Float64Constant(inf),   m.common()->Float64Constant(0.22),
+      m.common()->Float64Constant(inf),   m.common()->Float64Constant(-inf),
+      NULL};
+
+  for (int i = 0; ops[i] != NULL; i++) {
+    for (int j = 0; inputs[j] != NULL; j += 2) {
+      RawMachineAssemblerTester<int32_t> m;
+      Node* a = m.NewNode(inputs[j]);
+      Node* b = m.NewNode(inputs[j + 1]);
+      Node* binop = m.NewNode(ops[i], a, b);
+      Node* base = m.PointerConstant(&result);
+      Node* zero = m.Int32Constant(0);
+      m.Store(kMachFloat64, base, zero, binop);
+      m.Return(m.Int32Constant(i + j));
+      CHECK_EQ(i + j, m.Call());
+    }
+  }
+}
+
+
+TEST(RunDeadFloat64Binops) {
+  RawMachineAssemblerTester<int32_t> m;
+
+  const Operator* ops[] = {m.machine()->Float64Add(), m.machine()->Float64Sub(),
+                           m.machine()->Float64Mul(), m.machine()->Float64Div(),
+                           m.machine()->Float64Mod(), NULL};
+
+  for (int i = 0; ops[i] != NULL; i++) {
+    RawMachineAssemblerTester<int32_t> m;
+    int constant = 0x53355 + i;
+    m.NewNode(ops[i], m.Float64Constant(0.1), m.Float64Constant(1.11));
+    m.Return(m.Int32Constant(constant));
+    CHECK_EQ(constant, m.Call());
+  }
+}
+
+
+TEST(RunFloat64AddP) {
+  RawMachineAssemblerTester<int32_t> m;
+  Float64BinopTester bt(&m);
+
+  bt.AddReturn(m.Float64Add(bt.param0, bt.param1));
+
+  FOR_FLOAT64_INPUTS(pl) {
+    FOR_FLOAT64_INPUTS(pr) {
+      double expected = *pl + *pr;
+      CHECK_EQ(expected, bt.call(*pl, *pr));
+    }
+  }
+}
+
+
+TEST(RunFloat64SubP) {
+  RawMachineAssemblerTester<int32_t> m;
+  Float64BinopTester bt(&m);
+
+  bt.AddReturn(m.Float64Sub(bt.param0, bt.param1));
+
+  FOR_FLOAT64_INPUTS(pl) {
+    FOR_FLOAT64_INPUTS(pr) {
+      double expected = *pl - *pr;
+      CHECK_EQ(expected, bt.call(*pl, *pr));
+    }
+  }
+}
+
+
+TEST(RunFloat64SubImm1) {
+  double input = 0.0;
+  double output = 0.0;
+
+  FOR_FLOAT64_INPUTS(i) {
+    RawMachineAssemblerTester<int32_t> m;
+    Node* t0 = m.LoadFromPointer(&input, kMachFloat64);
+    Node* t1 = m.Float64Sub(m.Float64Constant(*i), t0);
+    m.StoreToPointer(&output, kMachFloat64, t1);
+    m.Return(m.Int32Constant(0));
+    FOR_FLOAT64_INPUTS(j) {
+      input = *j;
+      double expected = *i - input;
+      CHECK_EQ(0, m.Call());
+      CHECK_EQ(expected, output);
+    }
+  }
+}
+
+
+TEST(RunFloat64SubImm2) {
+  double input = 0.0;
+  double output = 0.0;
+
+  FOR_FLOAT64_INPUTS(i) {
+    RawMachineAssemblerTester<int32_t> m;
+    Node* t0 = m.LoadFromPointer(&input, kMachFloat64);
+    Node* t1 = m.Float64Sub(t0, m.Float64Constant(*i));
+    m.StoreToPointer(&output, kMachFloat64, t1);
+    m.Return(m.Int32Constant(0));
+    FOR_FLOAT64_INPUTS(j) {
+      input = *j;
+      double expected = input - *i;
+      CHECK_EQ(0, m.Call());
+      CHECK_EQ(expected, output);
+    }
+  }
+}
+
+
+TEST(RunFloat64MulP) {
+  RawMachineAssemblerTester<int32_t> m;
+  Float64BinopTester bt(&m);
+
+  bt.AddReturn(m.Float64Mul(bt.param0, bt.param1));
+
+  FOR_FLOAT64_INPUTS(pl) {
+    FOR_FLOAT64_INPUTS(pr) {
+      double expected = *pl * *pr;
+      CHECK_EQ(expected, bt.call(*pl, *pr));
+    }
+  }
+}
+
+
+TEST(RunFloat64MulAndFloat64AddP) {
+  double input_a = 0.0;
+  double input_b = 0.0;
+  double input_c = 0.0;
+  double output = 0.0;
+
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Node* a = m.LoadFromPointer(&input_a, kMachFloat64);
+    Node* b = m.LoadFromPointer(&input_b, kMachFloat64);
+    Node* c = m.LoadFromPointer(&input_c, kMachFloat64);
+    m.StoreToPointer(&output, kMachFloat64,
+                     m.Float64Add(m.Float64Mul(a, b), c));
+    m.Return(m.Int32Constant(0));
+    FOR_FLOAT64_INPUTS(i) {
+      FOR_FLOAT64_INPUTS(j) {
+        FOR_FLOAT64_INPUTS(k) {
+          input_a = *i;
+          input_b = *j;
+          input_c = *k;
+          volatile double temp = input_a * input_b;
+          volatile double expected = temp + input_c;
+          CHECK_EQ(0, m.Call());
+          CHECK_EQ(expected, output);
+        }
+      }
+    }
+  }
+  {
+    RawMachineAssemblerTester<int32_t> m;
+    Node* a = m.LoadFromPointer(&input_a, kMachFloat64);
+    Node* b = m.LoadFromPointer(&input_b, kMachFloat64);
+    Node* c = m.LoadFromPointer(&input_c, kMachFloat64);
+    m.StoreToPointer(&output, kMachFloat64,
+                     m.Float64Add(a, m.Float64Mul(b, c)));
+    m.Return(m.Int32Constant(0));
+    FOR_FLOAT64_INPUTS(i) {
+      FOR_FLOAT64_INPUTS(j) {
+        FOR_FLOAT64_INPUTS(k) {
+          input_a = *i;
+          input_b = *j;
+          input_c = *k;
+          volatile double temp = input_b * input_c;
+          volatile double expected = input_a + temp;
+          CHECK_EQ(0, m.Call());
+          CHECK_EQ(expected, output);
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunFloat64MulAndFloat64SubP) {
+  double input_a = 0.0;
+  double input_b = 0.0;
+  double input_c = 0.0;
+  double output = 0.0;
+
+  RawMachineAssemblerTester<int32_t> m;
+  Node* a = m.LoadFromPointer(&input_a, kMachFloat64);
+  Node* b = m.LoadFromPointer(&input_b, kMachFloat64);
+  Node* c = m.LoadFromPointer(&input_c, kMachFloat64);
+  m.StoreToPointer(&output, kMachFloat64, m.Float64Sub(a, m.Float64Mul(b, c)));
+  m.Return(m.Int32Constant(0));
+
+  FOR_FLOAT64_INPUTS(i) {
+    FOR_FLOAT64_INPUTS(j) {
+      FOR_FLOAT64_INPUTS(k) {
+        input_a = *i;
+        input_b = *j;
+        input_c = *k;
+        volatile double temp = input_b * input_c;
+        volatile double expected = input_a - temp;
+        CHECK_EQ(0, m.Call());
+        CHECK_EQ(expected, output);
+      }
+    }
+  }
+}
+
+
+TEST(RunFloat64MulImm) {
+  double input = 0.0;
+  double output = 0.0;
+
+  {
+    FOR_FLOAT64_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m;
+      Node* t0 = m.LoadFromPointer(&input, kMachFloat64);
+      Node* t1 = m.Float64Mul(m.Float64Constant(*i), t0);
+      m.StoreToPointer(&output, kMachFloat64, t1);
+      m.Return(m.Int32Constant(0));
+      FOR_FLOAT64_INPUTS(j) {
+        input = *j;
+        double expected = *i * input;
+        CHECK_EQ(0, m.Call());
+        CHECK_EQ(expected, output);
+      }
+    }
+  }
+  {
+    FOR_FLOAT64_INPUTS(i) {
+      RawMachineAssemblerTester<int32_t> m;
+      Node* t0 = m.LoadFromPointer(&input, kMachFloat64);
+      Node* t1 = m.Float64Mul(t0, m.Float64Constant(*i));
+      m.StoreToPointer(&output, kMachFloat64, t1);
+      m.Return(m.Int32Constant(0));
+      FOR_FLOAT64_INPUTS(j) {
+        input = *j;
+        double expected = input * *i;
+        CHECK_EQ(0, m.Call());
+        CHECK_EQ(expected, output);
+      }
+    }
+  }
+}
+
+
+TEST(RunFloat64DivP) {
+  RawMachineAssemblerTester<int32_t> m;
+  Float64BinopTester bt(&m);
+
+  bt.AddReturn(m.Float64Div(bt.param0, bt.param1));
+
+  FOR_FLOAT64_INPUTS(pl) {
+    FOR_FLOAT64_INPUTS(pr) {
+      double expected = *pl / *pr;
+      CHECK_EQ(expected, bt.call(*pl, *pr));
+    }
+  }
+}
+
+
+TEST(RunFloat64ModP) {
+  RawMachineAssemblerTester<int32_t> m;
+  Float64BinopTester bt(&m);
+
+  bt.AddReturn(m.Float64Mod(bt.param0, bt.param1));
+
+  FOR_FLOAT64_INPUTS(i) {
+    FOR_FLOAT64_INPUTS(j) {
+      double expected = modulo(*i, *j);
+      double found = bt.call(*i, *j);
+      CHECK_EQ(expected, found);
+    }
+  }
+}
+
+
+TEST(RunChangeInt32ToFloat64_A) {
+  RawMachineAssemblerTester<int32_t> m;
+  int32_t magic = 0x986234;
+  double result = 0;
+
+  Node* convert = m.ChangeInt32ToFloat64(m.Int32Constant(magic));
+  m.Store(kMachFloat64, m.PointerConstant(&result), m.Int32Constant(0),
+          convert);
+  m.Return(m.Int32Constant(magic));
+
+  CHECK_EQ(magic, m.Call());
+  CHECK_EQ(static_cast<double>(magic), result);
+}
+
+
+TEST(RunChangeInt32ToFloat64_B) {
+  RawMachineAssemblerTester<int32_t> m(kMachInt32);
+  double output = 0;
+
+  Node* convert = m.ChangeInt32ToFloat64(m.Parameter(0));
+  m.Store(kMachFloat64, m.PointerConstant(&output), m.Int32Constant(0),
+          convert);
+  m.Return(m.Parameter(0));
+
+  FOR_INT32_INPUTS(i) {
+    int32_t expect = *i;
+    CHECK_EQ(expect, m.Call(expect));
+    CHECK_EQ(static_cast<double>(expect), output);
+  }
+}
+
+
+TEST(RunChangeUint32ToFloat64_B) {
+  RawMachineAssemblerTester<int32_t> m(kMachUint32);
+  double output = 0;
+
+  Node* convert = m.ChangeUint32ToFloat64(m.Parameter(0));
+  m.Store(kMachFloat64, m.PointerConstant(&output), m.Int32Constant(0),
+          convert);
+  m.Return(m.Parameter(0));
+
+  FOR_UINT32_INPUTS(i) {
+    uint32_t expect = *i;
+    CHECK_EQ(expect, m.Call(expect));
+    CHECK_EQ(static_cast<double>(expect), output);
+  }
+}
+
+
+TEST(RunChangeFloat64ToInt32_A) {
+  RawMachineAssemblerTester<int32_t> m;
+  int32_t magic = 0x786234;
+  double input = 11.1;
+  int32_t result = 0;
+
+  m.Store(kMachInt32, m.PointerConstant(&result), m.Int32Constant(0),
+          m.ChangeFloat64ToInt32(m.Float64Constant(input)));
+  m.Return(m.Int32Constant(magic));
+
+  CHECK_EQ(magic, m.Call());
+  CHECK_EQ(static_cast<int32_t>(input), result);
+}
+
+
+TEST(RunChangeFloat64ToInt32_B) {
+  RawMachineAssemblerTester<int32_t> m;
+  double input = 0;
+  int32_t output = 0;
+
+  Node* load =
+      m.Load(kMachFloat64, m.PointerConstant(&input), m.Int32Constant(0));
+  Node* convert = m.ChangeFloat64ToInt32(load);
+  m.Store(kMachInt32, m.PointerConstant(&output), m.Int32Constant(0), convert);
+  m.Return(convert);
+
+  {
+    FOR_INT32_INPUTS(i) {
+      input = *i;
+      int32_t expect = *i;
+      CHECK_EQ(expect, m.Call());
+      CHECK_EQ(expect, output);
+    }
+  }
+
+  // Check various powers of 2.
+  for (int32_t n = 1; n < 31; ++n) {
+    {
+      input = 1 << n;
+      int32_t expect = static_cast<int32_t>(input);
+      CHECK_EQ(expect, m.Call());
+      CHECK_EQ(expect, output);
+    }
+
+    {
+      input = 3 << n;
+      int32_t expect = static_cast<int32_t>(input);
+      CHECK_EQ(expect, m.Call());
+      CHECK_EQ(expect, output);
+    }
+  }
+  // Note we don't check fractional inputs, because these Convert operators
+  // really should be Change operators.
+}
+
+
+TEST(RunChangeFloat64ToUint32_B) {
+  RawMachineAssemblerTester<int32_t> m;
+  double input = 0;
+  int32_t output = 0;
+
+  Node* load =
+      m.Load(kMachFloat64, m.PointerConstant(&input), m.Int32Constant(0));
+  Node* convert = m.ChangeFloat64ToUint32(load);
+  m.Store(kMachInt32, m.PointerConstant(&output), m.Int32Constant(0), convert);
+  m.Return(convert);
+
+  {
+    FOR_UINT32_INPUTS(i) {
+      input = *i;
+      // TODO(titzer): add a CheckEqualsHelper overload for uint32_t.
+      int32_t expect = static_cast<int32_t>(*i);
+      CHECK_EQ(expect, m.Call());
+      CHECK_EQ(expect, output);
+    }
+  }
+
+  // Check various powers of 2.
+  for (int32_t n = 1; n < 31; ++n) {
+    {
+      input = 1u << n;
+      int32_t expect = static_cast<int32_t>(static_cast<uint32_t>(input));
+      CHECK_EQ(expect, m.Call());
+      CHECK_EQ(expect, output);
+    }
+
+    {
+      input = 3u << n;
+      int32_t expect = static_cast<int32_t>(static_cast<uint32_t>(input));
+      CHECK_EQ(expect, m.Call());
+      CHECK_EQ(expect, output);
+    }
+  }
+  // Note we don't check fractional inputs, because these Convert operators
+  // really should be Change operators.
+}
+
+
+TEST(RunChangeFloat64ToInt32_spilled) {
+  RawMachineAssemblerTester<int32_t> m;
+  const int kNumInputs = 32;
+  int32_t magic = 0x786234;
+  double input[kNumInputs];
+  int32_t result[kNumInputs];
+  Node* input_node[kNumInputs];
+
+  for (int i = 0; i < kNumInputs; i++) {
+    input_node[i] =
+        m.Load(kMachFloat64, m.PointerConstant(&input), m.Int32Constant(i * 8));
+  }
+
+  for (int i = 0; i < kNumInputs; i++) {
+    m.Store(kMachInt32, m.PointerConstant(&result), m.Int32Constant(i * 4),
+            m.ChangeFloat64ToInt32(input_node[i]));
+  }
+
+  m.Return(m.Int32Constant(magic));
+
+  for (int i = 0; i < kNumInputs; i++) {
+    input[i] = 100.9 + i;
+  }
+
+  CHECK_EQ(magic, m.Call());
+
+  for (int i = 0; i < kNumInputs; i++) {
+    CHECK_EQ(result[i], 100 + i);
+  }
+}
+
+
+TEST(RunChangeFloat64ToUint32_spilled) {
+  RawMachineAssemblerTester<uint32_t> m;
+  const int kNumInputs = 32;
+  int32_t magic = 0x786234;
+  double input[kNumInputs];
+  uint32_t result[kNumInputs];
+  Node* input_node[kNumInputs];
+
+  for (int i = 0; i < kNumInputs; i++) {
+    input_node[i] =
+        m.Load(kMachFloat64, m.PointerConstant(&input), m.Int32Constant(i * 8));
+  }
+
+  for (int i = 0; i < kNumInputs; i++) {
+    m.Store(kMachUint32, m.PointerConstant(&result), m.Int32Constant(i * 4),
+            m.ChangeFloat64ToUint32(input_node[i]));
+  }
+
+  m.Return(m.Int32Constant(magic));
+
+  for (int i = 0; i < kNumInputs; i++) {
+    if (i % 2) {
+      input[i] = 100 + i + 2147483648u;
+    } else {
+      input[i] = 100 + i;
+    }
+  }
+
+  CHECK_EQ(magic, m.Call());
+
+  for (int i = 0; i < kNumInputs; i++) {
+    if (i % 2) {
+      CHECK_UINT32_EQ(result[i], static_cast<uint32_t>(100 + i + 2147483648u));
+    } else {
+      CHECK_UINT32_EQ(result[i], static_cast<uint32_t>(100 + i));
+    }
+  }
+}
+
+
+TEST(RunDeadChangeFloat64ToInt32) {
+  RawMachineAssemblerTester<int32_t> m;
+  const int magic = 0x88abcda4;
+  m.ChangeFloat64ToInt32(m.Float64Constant(999.78));
+  m.Return(m.Int32Constant(magic));
+  CHECK_EQ(magic, m.Call());
+}
+
+
+TEST(RunDeadChangeInt32ToFloat64) {
+  RawMachineAssemblerTester<int32_t> m;
+  const int magic = 0x8834abcd;
+  m.ChangeInt32ToFloat64(m.Int32Constant(magic - 6888));
+  m.Return(m.Int32Constant(magic));
+  CHECK_EQ(magic, m.Call());
+}
+
+
+TEST(RunLoopPhiInduction2) {
+  RawMachineAssemblerTester<int32_t> m;
+
+  int false_val = 0x10777;
+
+  // x = false_val; while(false) { x++; } return x;
+  MLabel header, body, end;
+  Node* false_node = m.Int32Constant(false_val);
+  m.Goto(&header);
+  m.Bind(&header);
+  Node* phi = m.Phi(kMachInt32, false_node, false_node);
+  m.Branch(m.Int32Constant(0), &body, &end);
+  m.Bind(&body);
+  Node* add = m.Int32Add(phi, m.Int32Constant(1));
+  phi->ReplaceInput(1, add);
+  m.Goto(&header);
+  m.Bind(&end);
+  m.Return(phi);
+
+  CHECK_EQ(false_val, m.Call());
+}
+
+
+TEST(RunDoubleDiamond) {
+  RawMachineAssemblerTester<int32_t> m;
+
+  const int magic = 99645;
+  double buffer = 0.1;
+  double constant = 99.99;
+
+  MLabel blocka, blockb, end;
+  Node* k1 = m.Float64Constant(constant);
+  Node* k2 = m.Float64Constant(0 - constant);
+  m.Branch(m.Int32Constant(0), &blocka, &blockb);
+  m.Bind(&blocka);
+  m.Goto(&end);
+  m.Bind(&blockb);
+  m.Goto(&end);
+  m.Bind(&end);
+  Node* phi = m.Phi(kMachFloat64, k2, k1);
+  m.Store(kMachFloat64, m.PointerConstant(&buffer), m.Int32Constant(0), phi);
+  m.Return(m.Int32Constant(magic));
+
+  CHECK_EQ(magic, m.Call());
+  CHECK_EQ(constant, buffer);
+}
+
+
+TEST(RunRefDiamond) {
+  RawMachineAssemblerTester<int32_t> m;
+
+  const int magic = 99644;
+  Handle<String> rexpected =
+      CcTest::i_isolate()->factory()->InternalizeUtf8String("A");
+  String* buffer;
+
+  MLabel blocka, blockb, end;
+  Node* k1 = m.StringConstant("A");
+  Node* k2 = m.StringConstant("B");
+  m.Branch(m.Int32Constant(0), &blocka, &blockb);
+  m.Bind(&blocka);
+  m.Goto(&end);
+  m.Bind(&blockb);
+  m.Goto(&end);
+  m.Bind(&end);
+  Node* phi = m.Phi(kMachAnyTagged, k2, k1);
+  m.Store(kMachAnyTagged, m.PointerConstant(&buffer), m.Int32Constant(0), phi);
+  m.Return(m.Int32Constant(magic));
+
+  CHECK_EQ(magic, m.Call());
+  CHECK(rexpected->SameValue(buffer));
+}
+
+
+TEST(RunDoubleRefDiamond) {
+  RawMachineAssemblerTester<int32_t> m;
+
+  const int magic = 99648;
+  double dbuffer = 0.1;
+  double dconstant = 99.99;
+  Handle<String> rexpected =
+      CcTest::i_isolate()->factory()->InternalizeUtf8String("AX");
+  String* rbuffer;
+
+  MLabel blocka, blockb, end;
+  Node* d1 = m.Float64Constant(dconstant);
+  Node* d2 = m.Float64Constant(0 - dconstant);
+  Node* r1 = m.StringConstant("AX");
+  Node* r2 = m.StringConstant("BX");
+  m.Branch(m.Int32Constant(0), &blocka, &blockb);
+  m.Bind(&blocka);
+  m.Goto(&end);
+  m.Bind(&blockb);
+  m.Goto(&end);
+  m.Bind(&end);
+  Node* dphi = m.Phi(kMachFloat64, d2, d1);
+  Node* rphi = m.Phi(kMachAnyTagged, r2, r1);
+  m.Store(kMachFloat64, m.PointerConstant(&dbuffer), m.Int32Constant(0), dphi);
+  m.Store(kMachAnyTagged, m.PointerConstant(&rbuffer), m.Int32Constant(0),
+          rphi);
+  m.Return(m.Int32Constant(magic));
+
+  CHECK_EQ(magic, m.Call());
+  CHECK_EQ(dconstant, dbuffer);
+  CHECK(rexpected->SameValue(rbuffer));
+}
+
+
+TEST(RunDoubleRefDoubleDiamond) {
+  RawMachineAssemblerTester<int32_t> m;
+
+  const int magic = 99649;
+  double dbuffer = 0.1;
+  double dconstant = 99.997;
+  Handle<String> rexpected =
+      CcTest::i_isolate()->factory()->InternalizeUtf8String("AD");
+  String* rbuffer;
+
+  MLabel blocka, blockb, mid, blockd, blocke, end;
+  Node* d1 = m.Float64Constant(dconstant);
+  Node* d2 = m.Float64Constant(0 - dconstant);
+  Node* r1 = m.StringConstant("AD");
+  Node* r2 = m.StringConstant("BD");
+  m.Branch(m.Int32Constant(0), &blocka, &blockb);
+  m.Bind(&blocka);
+  m.Goto(&mid);
+  m.Bind(&blockb);
+  m.Goto(&mid);
+  m.Bind(&mid);
+  Node* dphi1 = m.Phi(kMachFloat64, d2, d1);
+  Node* rphi1 = m.Phi(kMachAnyTagged, r2, r1);
+  m.Branch(m.Int32Constant(0), &blockd, &blocke);
+
+  m.Bind(&blockd);
+  m.Goto(&end);
+  m.Bind(&blocke);
+  m.Goto(&end);
+  m.Bind(&end);
+  Node* dphi2 = m.Phi(kMachFloat64, d1, dphi1);
+  Node* rphi2 = m.Phi(kMachAnyTagged, r1, rphi1);
+
+  m.Store(kMachFloat64, m.PointerConstant(&dbuffer), m.Int32Constant(0), dphi2);
+  m.Store(kMachAnyTagged, m.PointerConstant(&rbuffer), m.Int32Constant(0),
+          rphi2);
+  m.Return(m.Int32Constant(magic));
+
+  CHECK_EQ(magic, m.Call());
+  CHECK_EQ(dconstant, dbuffer);
+  CHECK(rexpected->SameValue(rbuffer));
+}
+
+
+TEST(RunDoubleLoopPhi) {
+  RawMachineAssemblerTester<int32_t> m;
+  MLabel header, body, end;
+
+  int magic = 99773;
+  double buffer = 0.99;
+  double dconstant = 777.1;
+
+  Node* zero = m.Int32Constant(0);
+  Node* dk = m.Float64Constant(dconstant);
+
+  m.Goto(&header);
+  m.Bind(&header);
+  Node* phi = m.Phi(kMachFloat64, dk, dk);
+  phi->ReplaceInput(1, phi);
+  m.Branch(zero, &body, &end);
+  m.Bind(&body);
+  m.Goto(&header);
+  m.Bind(&end);
+  m.Store(kMachFloat64, m.PointerConstant(&buffer), m.Int32Constant(0), phi);
+  m.Return(m.Int32Constant(magic));
+
+  CHECK_EQ(magic, m.Call());
+}
+
+
+TEST(RunCountToTenAccRaw) {
+  RawMachineAssemblerTester<int32_t> m;
+
+  Node* zero = m.Int32Constant(0);
+  Node* ten = m.Int32Constant(10);
+  Node* one = m.Int32Constant(1);
+
+  MLabel header, body, body_cont, end;
+
+  m.Goto(&header);
+
+  m.Bind(&header);
+  Node* i = m.Phi(kMachInt32, zero, zero);
+  Node* j = m.Phi(kMachInt32, zero, zero);
+  m.Goto(&body);
+
+  m.Bind(&body);
+  Node* next_i = m.Int32Add(i, one);
+  Node* next_j = m.Int32Add(j, one);
+  m.Branch(m.Word32Equal(next_i, ten), &end, &body_cont);
+
+  m.Bind(&body_cont);
+  i->ReplaceInput(1, next_i);
+  j->ReplaceInput(1, next_j);
+  m.Goto(&header);
+
+  m.Bind(&end);
+  m.Return(ten);
+
+  CHECK_EQ(10, m.Call());
+}
+
+
+TEST(RunCountToTenAccRaw2) {
+  RawMachineAssemblerTester<int32_t> m;
+
+  Node* zero = m.Int32Constant(0);
+  Node* ten = m.Int32Constant(10);
+  Node* one = m.Int32Constant(1);
+
+  MLabel header, body, body_cont, end;
+
+  m.Goto(&header);
+
+  m.Bind(&header);
+  Node* i = m.Phi(kMachInt32, zero, zero);
+  Node* j = m.Phi(kMachInt32, zero, zero);
+  Node* k = m.Phi(kMachInt32, zero, zero);
+  m.Goto(&body);
+
+  m.Bind(&body);
+  Node* next_i = m.Int32Add(i, one);
+  Node* next_j = m.Int32Add(j, one);
+  Node* next_k = m.Int32Add(j, one);
+  m.Branch(m.Word32Equal(next_i, ten), &end, &body_cont);
+
+  m.Bind(&body_cont);
+  i->ReplaceInput(1, next_i);
+  j->ReplaceInput(1, next_j);
+  k->ReplaceInput(1, next_k);
+  m.Goto(&header);
+
+  m.Bind(&end);
+  m.Return(ten);
+
+  CHECK_EQ(10, m.Call());
+}
+
+
+TEST(RunAddTree) {
+  RawMachineAssemblerTester<int32_t> m;
+  int32_t inputs[] = {11, 12, 13, 14, 15, 16, 17, 18};
+
+  Node* base = m.PointerConstant(inputs);
+  Node* n0 = m.Load(kMachInt32, base, m.Int32Constant(0 * sizeof(int32_t)));
+  Node* n1 = m.Load(kMachInt32, base, m.Int32Constant(1 * sizeof(int32_t)));
+  Node* n2 = m.Load(kMachInt32, base, m.Int32Constant(2 * sizeof(int32_t)));
+  Node* n3 = m.Load(kMachInt32, base, m.Int32Constant(3 * sizeof(int32_t)));
+  Node* n4 = m.Load(kMachInt32, base, m.Int32Constant(4 * sizeof(int32_t)));
+  Node* n5 = m.Load(kMachInt32, base, m.Int32Constant(5 * sizeof(int32_t)));
+  Node* n6 = m.Load(kMachInt32, base, m.Int32Constant(6 * sizeof(int32_t)));
+  Node* n7 = m.Load(kMachInt32, base, m.Int32Constant(7 * sizeof(int32_t)));
+
+  Node* i1 = m.Int32Add(n0, n1);
+  Node* i2 = m.Int32Add(n2, n3);
+  Node* i3 = m.Int32Add(n4, n5);
+  Node* i4 = m.Int32Add(n6, n7);
+
+  Node* i5 = m.Int32Add(i1, i2);
+  Node* i6 = m.Int32Add(i3, i4);
+
+  Node* i7 = m.Int32Add(i5, i6);
+
+  m.Return(i7);
+
+  CHECK_EQ(116, m.Call());
+}
+
+
+static const int kFloat64CompareHelperTestCases = 15;
+static const int kFloat64CompareHelperNodeType = 4;
+
+static int Float64CompareHelper(RawMachineAssemblerTester<int32_t>* m,
+                                int test_case, int node_type, double x,
+                                double y) {
+  static double buffer[2];
+  buffer[0] = x;
+  buffer[1] = y;
+  CHECK(0 <= test_case && test_case < kFloat64CompareHelperTestCases);
+  CHECK(0 <= node_type && node_type < kFloat64CompareHelperNodeType);
+  CHECK(x < y);
+  bool load_a = node_type / 2 == 1;
+  bool load_b = node_type % 2 == 1;
+  Node* a = load_a ? m->Load(kMachFloat64, m->PointerConstant(&buffer[0]))
+                   : m->Float64Constant(x);
+  Node* b = load_b ? m->Load(kMachFloat64, m->PointerConstant(&buffer[1]))
+                   : m->Float64Constant(y);
+  Node* cmp = NULL;
+  bool expected = false;
+  switch (test_case) {
+    // Equal tests.
+    case 0:
+      cmp = m->Float64Equal(a, b);
+      expected = false;
+      break;
+    case 1:
+      cmp = m->Float64Equal(a, a);
+      expected = true;
+      break;
+    // LessThan tests.
+    case 2:
+      cmp = m->Float64LessThan(a, b);
+      expected = true;
+      break;
+    case 3:
+      cmp = m->Float64LessThan(b, a);
+      expected = false;
+      break;
+    case 4:
+      cmp = m->Float64LessThan(a, a);
+      expected = false;
+      break;
+    // LessThanOrEqual tests.
+    case 5:
+      cmp = m->Float64LessThanOrEqual(a, b);
+      expected = true;
+      break;
+    case 6:
+      cmp = m->Float64LessThanOrEqual(b, a);
+      expected = false;
+      break;
+    case 7:
+      cmp = m->Float64LessThanOrEqual(a, a);
+      expected = true;
+      break;
+    // NotEqual tests.
+    case 8:
+      cmp = m->Float64NotEqual(a, b);
+      expected = true;
+      break;
+    case 9:
+      cmp = m->Float64NotEqual(b, a);
+      expected = true;
+      break;
+    case 10:
+      cmp = m->Float64NotEqual(a, a);
+      expected = false;
+      break;
+    // GreaterThan tests.
+    case 11:
+      cmp = m->Float64GreaterThan(a, a);
+      expected = false;
+      break;
+    case 12:
+      cmp = m->Float64GreaterThan(a, b);
+      expected = false;
+      break;
+    // GreaterThanOrEqual tests.
+    case 13:
+      cmp = m->Float64GreaterThanOrEqual(a, a);
+      expected = true;
+      break;
+    case 14:
+      cmp = m->Float64GreaterThanOrEqual(b, a);
+      expected = true;
+      break;
+    default:
+      UNREACHABLE();
+  }
+  m->Return(cmp);
+  return expected;
+}
+
+
+TEST(RunFloat64Compare) {
+  double inf = V8_INFINITY;
+  // All pairs (a1, a2) are of the form a1 < a2.
+  double inputs[] = {0.0,  1.0,  -1.0, 0.22, -1.22, 0.22,
+                     -inf, 0.22, 0.22, inf,  -inf,  inf};
+
+  for (int test = 0; test < kFloat64CompareHelperTestCases; test++) {
+    for (int node_type = 0; node_type < kFloat64CompareHelperNodeType;
+         node_type++) {
+      for (size_t input = 0; input < arraysize(inputs); input += 2) {
+        RawMachineAssemblerTester<int32_t> m;
+        int expected = Float64CompareHelper(&m, test, node_type, inputs[input],
+                                            inputs[input + 1]);
+        CHECK_EQ(expected, m.Call());
+      }
+    }
+  }
+}
+
+
+TEST(RunFloat64UnorderedCompare) {
+  RawMachineAssemblerTester<int32_t> m;
+
+  const Operator* operators[] = {m.machine()->Float64Equal(),
+                                 m.machine()->Float64LessThan(),
+                                 m.machine()->Float64LessThanOrEqual()};
+
+  double nan = v8::base::OS::nan_value();
+
+  FOR_FLOAT64_INPUTS(i) {
+    for (size_t o = 0; o < arraysize(operators); ++o) {
+      for (int j = 0; j < 2; j++) {
+        RawMachineAssemblerTester<int32_t> m;
+        Node* a = m.Float64Constant(*i);
+        Node* b = m.Float64Constant(nan);
+        if (j == 1) std::swap(a, b);
+        m.Return(m.NewNode(operators[o], a, b));
+        CHECK_EQ(0, m.Call());
+      }
+    }
+  }
+}
+
+
+TEST(RunFloat64Equal) {
+  double input_a = 0.0;
+  double input_b = 0.0;
+
+  RawMachineAssemblerTester<int32_t> m;
+  Node* a = m.LoadFromPointer(&input_a, kMachFloat64);
+  Node* b = m.LoadFromPointer(&input_b, kMachFloat64);
+  m.Return(m.Float64Equal(a, b));
+
+  CompareWrapper cmp(IrOpcode::kFloat64Equal);
+  FOR_FLOAT64_INPUTS(pl) {
+    FOR_FLOAT64_INPUTS(pr) {
+      input_a = *pl;
+      input_b = *pr;
+      int32_t expected = cmp.Float64Compare(input_a, input_b) ? 1 : 0;
+      CHECK_EQ(expected, m.Call());
+    }
+  }
+}
+
+
+TEST(RunFloat64LessThan) {
+  double input_a = 0.0;
+  double input_b = 0.0;
+
+  RawMachineAssemblerTester<int32_t> m;
+  Node* a = m.LoadFromPointer(&input_a, kMachFloat64);
+  Node* b = m.LoadFromPointer(&input_b, kMachFloat64);
+  m.Return(m.Float64LessThan(a, b));
+
+  CompareWrapper cmp(IrOpcode::kFloat64LessThan);
+  FOR_FLOAT64_INPUTS(pl) {
+    FOR_FLOAT64_INPUTS(pr) {
+      input_a = *pl;
+      input_b = *pr;
+      int32_t expected = cmp.Float64Compare(input_a, input_b) ? 1 : 0;
+      CHECK_EQ(expected, m.Call());
+    }
+  }
+}
+
+
+template <typename IntType, MachineType kRepresentation>
+static void LoadStoreTruncation() {
+  IntType input;
+
+  RawMachineAssemblerTester<int32_t> m;
+  Node* a = m.LoadFromPointer(&input, kRepresentation);
+  Node* ap1 = m.Int32Add(a, m.Int32Constant(1));
+  m.StoreToPointer(&input, kRepresentation, ap1);
+  m.Return(ap1);
+
+  const IntType max = std::numeric_limits<IntType>::max();
+  const IntType min = std::numeric_limits<IntType>::min();
+
+  // Test upper bound.
+  input = max;
+  CHECK_EQ(max + 1, m.Call());
+  CHECK_EQ(min, input);
+
+  // Test lower bound.
+  input = min;
+  CHECK_EQ(static_cast<IntType>(max + 2), m.Call());
+  CHECK_EQ(min + 1, input);
+
+  // Test all one byte values that are not one byte bounds.
+  for (int i = -127; i < 127; i++) {
+    input = i;
+    int expected = i >= 0 ? i + 1 : max + (i - min) + 2;
+    CHECK_EQ(static_cast<IntType>(expected), m.Call());
+    CHECK_EQ(static_cast<IntType>(i + 1), input);
+  }
+}
+
+
+TEST(RunLoadStoreTruncation) {
+  LoadStoreTruncation<int8_t, kMachInt8>();
+  LoadStoreTruncation<int16_t, kMachInt16>();
+}
+
+
+static void IntPtrCompare(intptr_t left, intptr_t right) {
+  for (int test = 0; test < 7; test++) {
+    RawMachineAssemblerTester<bool> m(kMachPtr, kMachPtr);
+    Node* p0 = m.Parameter(0);
+    Node* p1 = m.Parameter(1);
+    Node* res = NULL;
+    bool expected = false;
+    switch (test) {
+      case 0:
+        res = m.IntPtrLessThan(p0, p1);
+        expected = true;
+        break;
+      case 1:
+        res = m.IntPtrLessThanOrEqual(p0, p1);
+        expected = true;
+        break;
+      case 2:
+        res = m.IntPtrEqual(p0, p1);
+        expected = false;
+        break;
+      case 3:
+        res = m.IntPtrGreaterThanOrEqual(p0, p1);
+        expected = false;
+        break;
+      case 4:
+        res = m.IntPtrGreaterThan(p0, p1);
+        expected = false;
+        break;
+      case 5:
+        res = m.IntPtrEqual(p0, p0);
+        expected = true;
+        break;
+      case 6:
+        res = m.IntPtrNotEqual(p0, p1);
+        expected = true;
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+    m.Return(res);
+    CHECK_EQ(expected, m.Call(reinterpret_cast<int32_t*>(left),
+                              reinterpret_cast<int32_t*>(right)));
+  }
+}
+
+
+TEST(RunIntPtrCompare) {
+  intptr_t min = std::numeric_limits<intptr_t>::min();
+  intptr_t max = std::numeric_limits<intptr_t>::max();
+  // An ascending chain of intptr_t
+  intptr_t inputs[] = {min, min / 2, -1, 0, 1, max / 2, max};
+  for (size_t i = 0; i < arraysize(inputs) - 1; i++) {
+    IntPtrCompare(inputs[i], inputs[i + 1]);
+  }
+}
+
+
+TEST(RunTestIntPtrArithmetic) {
+  static const int kInputSize = 10;
+  int32_t inputs[kInputSize];
+  int32_t outputs[kInputSize];
+  for (int i = 0; i < kInputSize; i++) {
+    inputs[i] = i;
+    outputs[i] = -1;
+  }
+  RawMachineAssemblerTester<int32_t*> m;
+  Node* input = m.PointerConstant(&inputs[0]);
+  Node* output = m.PointerConstant(&outputs[kInputSize - 1]);
+  Node* elem_size = m.ConvertInt32ToIntPtr(m.Int32Constant(sizeof(inputs[0])));
+  for (int i = 0; i < kInputSize; i++) {
+    m.Store(kMachInt32, output, m.Load(kMachInt32, input));
+    input = m.IntPtrAdd(input, elem_size);
+    output = m.IntPtrSub(output, elem_size);
+  }
+  m.Return(input);
+  CHECK_EQ(&inputs[kInputSize], m.Call());
+  for (int i = 0; i < kInputSize; i++) {
+    CHECK_EQ(i, inputs[i]);
+    CHECK_EQ(kInputSize - i - 1, outputs[i]);
+  }
+}
+
+
+TEST(RunSpillLotsOfThings) {
+  static const int kInputSize = 1000;
+  RawMachineAssemblerTester<void> m;
+  Node* accs[kInputSize];
+  int32_t outputs[kInputSize];
+  Node* one = m.Int32Constant(1);
+  Node* acc = one;
+  for (int i = 0; i < kInputSize; i++) {
+    acc = m.Int32Add(acc, one);
+    accs[i] = acc;
+  }
+  for (int i = 0; i < kInputSize; i++) {
+    m.StoreToPointer(&outputs[i], kMachInt32, accs[i]);
+  }
+  m.Return(one);
+  m.Call();
+  for (int i = 0; i < kInputSize; i++) {
+    CHECK_EQ(outputs[i], i + 2);
+  }
+}
+
+
+TEST(RunSpillConstantsAndParameters) {
+  static const int kInputSize = 1000;
+  static const int32_t kBase = 987;
+  RawMachineAssemblerTester<int32_t> m(kMachInt32, kMachInt32);
+  int32_t outputs[kInputSize];
+  Node* csts[kInputSize];
+  Node* accs[kInputSize];
+  Node* acc = m.Int32Constant(0);
+  for (int i = 0; i < kInputSize; i++) {
+    csts[i] = m.Int32Constant(static_cast<int32_t>(kBase + i));
+  }
+  for (int i = 0; i < kInputSize; i++) {
+    acc = m.Int32Add(acc, csts[i]);
+    accs[i] = acc;
+  }
+  for (int i = 0; i < kInputSize; i++) {
+    m.StoreToPointer(&outputs[i], kMachInt32, accs[i]);
+  }
+  m.Return(m.Int32Add(acc, m.Int32Add(m.Parameter(0), m.Parameter(1))));
+  FOR_INT32_INPUTS(i) {
+    FOR_INT32_INPUTS(j) {
+      int32_t expected = *i + *j;
+      for (int k = 0; k < kInputSize; k++) {
+        expected += kBase + k;
+      }
+      CHECK_EQ(expected, m.Call(*i, *j));
+      expected = 0;
+      for (int k = 0; k < kInputSize; k++) {
+        expected += kBase + k;
+        CHECK_EQ(expected, outputs[k]);
+      }
+    }
+  }
+}
+
+
+TEST(RunNewSpaceConstantsInPhi) {
+  RawMachineAssemblerTester<Object*> m(kMachInt32);
+
+  Isolate* isolate = CcTest::i_isolate();
+  Handle<HeapNumber> true_val = isolate->factory()->NewHeapNumber(11.2);
+  Handle<HeapNumber> false_val = isolate->factory()->NewHeapNumber(11.3);
+  Node* true_node = m.HeapConstant(true_val);
+  Node* false_node = m.HeapConstant(false_val);
+
+  MLabel blocka, blockb, end;
+  m.Branch(m.Parameter(0), &blocka, &blockb);
+  m.Bind(&blocka);
+  m.Goto(&end);
+  m.Bind(&blockb);
+  m.Goto(&end);
+
+  m.Bind(&end);
+  Node* phi = m.Phi(kMachAnyTagged, true_node, false_node);
+  m.Return(phi);
+
+  CHECK_EQ(*false_val, m.Call(0));
+  CHECK_EQ(*true_val, m.Call(1));
+}
+
+
+TEST(RunInt32AddWithOverflowP) {
+  int32_t actual_val = -1;
+  RawMachineAssemblerTester<int32_t> m;
+  Int32BinopTester bt(&m);
+  Node* add = m.Int32AddWithOverflow(bt.param0, bt.param1);
+  Node* val = m.Projection(0, add);
+  Node* ovf = m.Projection(1, add);
+  m.StoreToPointer(&actual_val, kMachInt32, val);
+  bt.AddReturn(ovf);
+  FOR_INT32_INPUTS(i) {
+    FOR_INT32_INPUTS(j) {
+      int32_t expected_val;
+      int expected_ovf = bits::SignedAddOverflow32(*i, *j, &expected_val);
+      CHECK_EQ(expected_ovf, bt.call(*i, *j));
+      CHECK_EQ(expected_val, actual_val);
+    }
+  }
+}
+
+
+TEST(RunInt32AddWithOverflowImm) {
+  int32_t actual_val = -1, expected_val = 0;
+  FOR_INT32_INPUTS(i) {
+    {
+      RawMachineAssemblerTester<int32_t> m(kMachInt32);
+      Node* add = m.Int32AddWithOverflow(m.Int32Constant(*i), m.Parameter(0));
+      Node* val = m.Projection(0, add);
+      Node* ovf = m.Projection(1, add);
+      m.StoreToPointer(&actual_val, kMachInt32, val);
+      m.Return(ovf);
+      FOR_INT32_INPUTS(j) {
+        int expected_ovf = bits::SignedAddOverflow32(*i, *j, &expected_val);
+        CHECK_EQ(expected_ovf, m.Call(*j));
+        CHECK_EQ(expected_val, actual_val);
+      }
+    }
+    {
+      RawMachineAssemblerTester<int32_t> m(kMachInt32);
+      Node* add = m.Int32AddWithOverflow(m.Parameter(0), m.Int32Constant(*i));
+      Node* val = m.Projection(0, add);
+      Node* ovf = m.Projection(1, add);
+      m.StoreToPointer(&actual_val, kMachInt32, val);
+      m.Return(ovf);
+      FOR_INT32_INPUTS(j) {
+        int expected_ovf = bits::SignedAddOverflow32(*i, *j, &expected_val);
+        CHECK_EQ(expected_ovf, m.Call(*j));
+        CHECK_EQ(expected_val, actual_val);
+      }
+    }
+    FOR_INT32_INPUTS(j) {
+      RawMachineAssemblerTester<int32_t> m;
+      Node* add =
+          m.Int32AddWithOverflow(m.Int32Constant(*i), m.Int32Constant(*j));
+      Node* val = m.Projection(0, add);
+      Node* ovf = m.Projection(1, add);
+      m.StoreToPointer(&actual_val, kMachInt32, val);
+      m.Return(ovf);
+      int expected_ovf = bits::SignedAddOverflow32(*i, *j, &expected_val);
+      CHECK_EQ(expected_ovf, m.Call());
+      CHECK_EQ(expected_val, actual_val);
+    }
+  }
+}
+
+
+TEST(RunInt32AddWithOverflowInBranchP) {
+  int constant = 911777;
+  MLabel blocka, blockb;
+  RawMachineAssemblerTester<int32_t> m;
+  Int32BinopTester bt(&m);
+  Node* add = m.Int32AddWithOverflow(bt.param0, bt.param1);
+  Node* ovf = m.Projection(1, add);
+  m.Branch(ovf, &blocka, &blockb);
+  m.Bind(&blocka);
+  bt.AddReturn(m.Int32Constant(constant));
+  m.Bind(&blockb);
+  Node* val = m.Projection(0, add);
+  bt.AddReturn(val);
+  FOR_INT32_INPUTS(i) {
+    FOR_INT32_INPUTS(j) {
+      int32_t expected;
+      if (bits::SignedAddOverflow32(*i, *j, &expected)) expected = constant;
+      CHECK_EQ(expected, bt.call(*i, *j));
+    }
+  }
+}
+
+
+TEST(RunInt32SubWithOverflowP) {
+  int32_t actual_val = -1;
+  RawMachineAssemblerTester<int32_t> m;
+  Int32BinopTester bt(&m);
+  Node* add = m.Int32SubWithOverflow(bt.param0, bt.param1);
+  Node* val = m.Projection(0, add);
+  Node* ovf = m.Projection(1, add);
+  m.StoreToPointer(&actual_val, kMachInt32, val);
+  bt.AddReturn(ovf);
+  FOR_INT32_INPUTS(i) {
+    FOR_INT32_INPUTS(j) {
+      int32_t expected_val;
+      int expected_ovf = bits::SignedSubOverflow32(*i, *j, &expected_val);
+      CHECK_EQ(expected_ovf, bt.call(*i, *j));
+      CHECK_EQ(expected_val, actual_val);
+    }
+  }
+}
+
+
+TEST(RunInt32SubWithOverflowImm) {
+  int32_t actual_val = -1, expected_val = 0;
+  FOR_INT32_INPUTS(i) {
+    {
+      RawMachineAssemblerTester<int32_t> m(kMachInt32);
+      Node* add = m.Int32SubWithOverflow(m.Int32Constant(*i), m.Parameter(0));
+      Node* val = m.Projection(0, add);
+      Node* ovf = m.Projection(1, add);
+      m.StoreToPointer(&actual_val, kMachInt32, val);
+      m.Return(ovf);
+      FOR_INT32_INPUTS(j) {
+        int expected_ovf = bits::SignedSubOverflow32(*i, *j, &expected_val);
+        CHECK_EQ(expected_ovf, m.Call(*j));
+        CHECK_EQ(expected_val, actual_val);
+      }
+    }
+    {
+      RawMachineAssemblerTester<int32_t> m(kMachInt32);
+      Node* add = m.Int32SubWithOverflow(m.Parameter(0), m.Int32Constant(*i));
+      Node* val = m.Projection(0, add);
+      Node* ovf = m.Projection(1, add);
+      m.StoreToPointer(&actual_val, kMachInt32, val);
+      m.Return(ovf);
+      FOR_INT32_INPUTS(j) {
+        int expected_ovf = bits::SignedSubOverflow32(*j, *i, &expected_val);
+        CHECK_EQ(expected_ovf, m.Call(*j));
+        CHECK_EQ(expected_val, actual_val);
+      }
+    }
+    FOR_INT32_INPUTS(j) {
+      RawMachineAssemblerTester<int32_t> m;
+      Node* add =
+          m.Int32SubWithOverflow(m.Int32Constant(*i), m.Int32Constant(*j));
+      Node* val = m.Projection(0, add);
+      Node* ovf = m.Projection(1, add);
+      m.StoreToPointer(&actual_val, kMachInt32, val);
+      m.Return(ovf);
+      int expected_ovf = bits::SignedSubOverflow32(*i, *j, &expected_val);
+      CHECK_EQ(expected_ovf, m.Call());
+      CHECK_EQ(expected_val, actual_val);
+    }
+  }
+}
+
+
+TEST(RunInt32SubWithOverflowInBranchP) {
+  int constant = 911999;
+  MLabel blocka, blockb;
+  RawMachineAssemblerTester<int32_t> m;
+  Int32BinopTester bt(&m);
+  Node* sub = m.Int32SubWithOverflow(bt.param0, bt.param1);
+  Node* ovf = m.Projection(1, sub);
+  m.Branch(ovf, &blocka, &blockb);
+  m.Bind(&blocka);
+  bt.AddReturn(m.Int32Constant(constant));
+  m.Bind(&blockb);
+  Node* val = m.Projection(0, sub);
+  bt.AddReturn(val);
+  FOR_INT32_INPUTS(i) {
+    FOR_INT32_INPUTS(j) {
+      int32_t expected;
+      if (bits::SignedSubOverflow32(*i, *j, &expected)) expected = constant;
+      CHECK_EQ(expected, bt.call(*i, *j));
+    }
+  }
+}
+
+
+TEST(RunChangeInt32ToInt64P) {
+  if (kPointerSize < 8) return;
+  int64_t actual = -1;
+  RawMachineAssemblerTester<int32_t> m(kMachInt32);
+  m.StoreToPointer(&actual, kMachInt64, m.ChangeInt32ToInt64(m.Parameter(0)));
+  m.Return(m.Int32Constant(0));
+  FOR_INT32_INPUTS(i) {
+    int64_t expected = *i;
+    CHECK_EQ(0, m.Call(*i));
+    CHECK_EQ(expected, actual);
+  }
+}
+
+
+TEST(RunChangeUint32ToUint64P) {
+  if (kPointerSize < 8) return;
+  int64_t actual = -1;
+  RawMachineAssemblerTester<int32_t> m(kMachUint32);
+  m.StoreToPointer(&actual, kMachUint64,
+                   m.ChangeUint32ToUint64(m.Parameter(0)));
+  m.Return(m.Int32Constant(0));
+  FOR_UINT32_INPUTS(i) {
+    int64_t expected = static_cast<uint64_t>(*i);
+    CHECK_EQ(0, m.Call(*i));
+    CHECK_EQ(expected, actual);
+  }
+}
+
+
+TEST(RunTruncateInt64ToInt32P) {
+  if (kPointerSize < 8) return;
+  int64_t expected = -1;
+  RawMachineAssemblerTester<int32_t> m;
+  m.Return(m.TruncateInt64ToInt32(m.LoadFromPointer(&expected, kMachInt64)));
+  FOR_UINT32_INPUTS(i) {
+    FOR_UINT32_INPUTS(j) {
+      expected = (static_cast<uint64_t>(*j) << 32) | *i;
+      CHECK_UINT32_EQ(expected, m.Call());
+    }
+  }
+}
+
+
+TEST(RunTruncateFloat64ToInt32P) {
+  struct {
+    double from;
+    double raw;
+  } kValues[] = {{0, 0},
+                 {0.5, 0},
+                 {-0.5, 0},
+                 {1.5, 1},
+                 {-1.5, -1},
+                 {5.5, 5},
+                 {-5.0, -5},
+                 {v8::base::OS::nan_value(), 0},
+                 {std::numeric_limits<double>::infinity(), 0},
+                 {-v8::base::OS::nan_value(), 0},
+                 {-std::numeric_limits<double>::infinity(), 0},
+                 {4.94065645841e-324, 0},
+                 {-4.94065645841e-324, 0},
+                 {0.9999999999999999, 0},
+                 {-0.9999999999999999, 0},
+                 {4294967296.0, 0},
+                 {-4294967296.0, 0},
+                 {9223372036854775000.0, 4294966272.0},
+                 {-9223372036854775000.0, -4294966272.0},
+                 {4.5036e+15, 372629504},
+                 {-4.5036e+15, -372629504},
+                 {287524199.5377777, 0x11234567},
+                 {-287524199.5377777, -0x11234567},
+                 {2300193596.302222, 2300193596.0},
+                 {-2300193596.302222, -2300193596.0},
+                 {4600387192.604444, 305419896},
+                 {-4600387192.604444, -305419896},
+                 {4823855600872397.0, 1737075661},
+                 {-4823855600872397.0, -1737075661},
+                 {4503603922337791.0, -1},
+                 {-4503603922337791.0, 1},
+                 {4503601774854143.0, 2147483647},
+                 {-4503601774854143.0, -2147483647},
+                 {9007207844675582.0, -2},
+                 {-9007207844675582.0, 2},
+                 {2.4178527921507624e+24, -536870912},
+                 {-2.4178527921507624e+24, 536870912},
+                 {2.417853945072267e+24, -536870912},
+                 {-2.417853945072267e+24, 536870912},
+                 {4.8357055843015248e+24, -1073741824},
+                 {-4.8357055843015248e+24, 1073741824},
+                 {4.8357078901445341e+24, -1073741824},
+                 {-4.8357078901445341e+24, 1073741824},
+                 {2147483647.0, 2147483647.0},
+                 {-2147483648.0, -2147483648.0},
+                 {9.6714111686030497e+24, -2147483648.0},
+                 {-9.6714111686030497e+24, -2147483648.0},
+                 {9.6714157802890681e+24, -2147483648.0},
+                 {-9.6714157802890681e+24, -2147483648.0},
+                 {1.9342813113834065e+25, 2147483648.0},
+                 {-1.9342813113834065e+25, 2147483648.0},
+                 {3.868562622766813e+25, 0},
+                 {-3.868562622766813e+25, 0},
+                 {1.7976931348623157e+308, 0},
+                 {-1.7976931348623157e+308, 0}};
+  double input = -1.0;
+  RawMachineAssemblerTester<int32_t> m;
+  m.Return(m.TruncateFloat64ToInt32(m.LoadFromPointer(&input, kMachFloat64)));
+  for (size_t i = 0; i < arraysize(kValues); ++i) {
+    input = kValues[i].from;
+    uint64_t expected = static_cast<int64_t>(kValues[i].raw);
+    CHECK_EQ(static_cast<int>(expected), m.Call());
+  }
+}
+
+#endif  // V8_TURBOFAN_TARGET
diff --git a/test/cctest/compiler/test-run-properties.cc b/test/cctest/compiler/test-run-properties.cc
new file mode 100644
index 0000000..d4442f7
--- /dev/null
+++ b/test/cctest/compiler/test-run-properties.cc
@@ -0,0 +1,141 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "test/cctest/compiler/function-tester.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+template <typename U>
+static void TypedArrayLoadHelper(const char* array_type) {
+  static const uint32_t kValues[] = {
+      0x00000000, 0x00000001, 0x00000023, 0x00000042, 0x12345678, 0x87654321,
+      0x0000003f, 0x0000007f, 0x00003fff, 0x00007fff, 0x3fffffff, 0x7fffffff,
+      0x000000ff, 0x00000080, 0x0000ffff, 0x00008000, 0xffffffff, 0x80000000};
+  EmbeddedVector<char, 1024> values_buffer;
+  StringBuilder values_builder(values_buffer.start(), values_buffer.length());
+  for (size_t i = 0; i < arraysize(kValues); ++i) {
+    values_builder.AddFormatted("a[%d] = 0x%08x;", i, kValues[i]);
+  }
+
+  // Note that below source creates two different typed arrays with distinct
+  // elements kind to get coverage for both access patterns:
+  // - IsFixedTypedArrayElementsKind(x)
+  // - IsExternalArrayElementsKind(y)
+  const char* source =
+      "(function(a) {"
+      "  var x = (a = new %sArray(%d)); %s;"
+      "  var y = (a = new %sArray(%d)); %s; %%TypedArrayGetBuffer(y);"
+      "  if (!%%HasFixed%sElements(x)) %%AbortJS('x');"
+      "  if (!%%HasExternal%sElements(y)) %%AbortJS('y');"
+      "  function f(a,b) {"
+      "    a = a | 0; b = b | 0;"
+      "    return x[a] + y[b];"
+      "  }"
+      "  return f;"
+      "})()";
+  EmbeddedVector<char, 1024> source_buffer;
+  SNPrintF(source_buffer, source, array_type, arraysize(kValues),
+           values_buffer.start(), array_type, arraysize(kValues),
+           values_buffer.start(), array_type, array_type);
+
+  FunctionTester T(
+      source_buffer.start(),
+      CompilationInfo::kContextSpecializing | CompilationInfo::kTypingEnabled);
+  for (size_t i = 0; i < arraysize(kValues); ++i) {
+    for (size_t j = 0; j < arraysize(kValues); ++j) {
+      volatile U value_a = static_cast<U>(kValues[i]);
+      volatile U value_b = static_cast<U>(kValues[j]);
+      double expected =
+          static_cast<double>(value_a) + static_cast<double>(value_b);
+      T.CheckCall(T.Val(expected), T.Val(static_cast<double>(i)),
+                  T.Val(static_cast<double>(j)));
+    }
+  }
+}
+
+
+TEST(TypedArrayLoad) {
+  FLAG_typed_array_max_size_in_heap = 256;
+  TypedArrayLoadHelper<int8_t>("Int8");
+  TypedArrayLoadHelper<uint8_t>("Uint8");
+  TypedArrayLoadHelper<int16_t>("Int16");
+  TypedArrayLoadHelper<uint16_t>("Uint16");
+  TypedArrayLoadHelper<int32_t>("Int32");
+  TypedArrayLoadHelper<uint32_t>("Uint32");
+  TypedArrayLoadHelper<float>("Float32");
+  TypedArrayLoadHelper<double>("Float64");
+  // TODO(mstarzinger): Add tests for ClampedUint8.
+}
+
+
+template <typename U>
+static void TypedArrayStoreHelper(const char* array_type) {
+  static const uint32_t kValues[] = {
+      0x00000000, 0x00000001, 0x00000023, 0x00000042, 0x12345678, 0x87654321,
+      0x0000003f, 0x0000007f, 0x00003fff, 0x00007fff, 0x3fffffff, 0x7fffffff,
+      0x000000ff, 0x00000080, 0x0000ffff, 0x00008000, 0xffffffff, 0x80000000};
+  EmbeddedVector<char, 1024> values_buffer;
+  StringBuilder values_builder(values_buffer.start(), values_buffer.length());
+  for (size_t i = 0; i < arraysize(kValues); ++i) {
+    values_builder.AddFormatted("a[%d] = 0x%08x;", i, kValues[i]);
+  }
+
+  // Note that below source creates two different typed arrays with distinct
+  // elements kind to get coverage for both access patterns:
+  // - IsFixedTypedArrayElementsKind(x)
+  // - IsExternalArrayElementsKind(y)
+  const char* source =
+      "(function(a) {"
+      "  var x = (a = new %sArray(%d)); %s;"
+      "  var y = (a = new %sArray(%d)); %s; %%TypedArrayGetBuffer(y);"
+      "  if (!%%HasFixed%sElements(x)) %%AbortJS('x');"
+      "  if (!%%HasExternal%sElements(y)) %%AbortJS('y');"
+      "  function f(a,b) {"
+      "    a = a | 0; b = b | 0;"
+      "    var t = x[a];"
+      "    x[a] = y[b];"
+      "    y[b] = t;"
+      "    t = y[b];"
+      "    y[b] = x[a];"
+      "    x[a] = t;"
+      "    return x[a] + y[b];"
+      "  }"
+      "  return f;"
+      "})()";
+  EmbeddedVector<char, 2048> source_buffer;
+  SNPrintF(source_buffer, source, array_type, arraysize(kValues),
+           values_buffer.start(), array_type, arraysize(kValues),
+           values_buffer.start(), array_type, array_type);
+
+  FunctionTester T(
+      source_buffer.start(),
+      CompilationInfo::kContextSpecializing | CompilationInfo::kTypingEnabled);
+  for (size_t i = 0; i < arraysize(kValues); ++i) {
+    for (size_t j = 0; j < arraysize(kValues); ++j) {
+      volatile U value_a = static_cast<U>(kValues[i]);
+      volatile U value_b = static_cast<U>(kValues[j]);
+      double expected =
+          static_cast<double>(value_a) + static_cast<double>(value_b);
+      T.CheckCall(T.Val(expected), T.Val(static_cast<double>(i)),
+                  T.Val(static_cast<double>(j)));
+    }
+  }
+}
+
+
+TEST(TypedArrayStore) {
+  FLAG_typed_array_max_size_in_heap = 256;
+  TypedArrayStoreHelper<int8_t>("Int8");
+  TypedArrayStoreHelper<uint8_t>("Uint8");
+  TypedArrayStoreHelper<int16_t>("Int16");
+  TypedArrayStoreHelper<uint16_t>("Uint16");
+  TypedArrayStoreHelper<int32_t>("Int32");
+  TypedArrayStoreHelper<uint32_t>("Uint32");
+  TypedArrayStoreHelper<float>("Float32");
+  TypedArrayStoreHelper<double>("Float64");
+  // TODO(mstarzinger): Add tests for ClampedUint8.
+}
diff --git a/test/cctest/compiler/test-run-variables.cc b/test/cctest/compiler/test-run-variables.cc
new file mode 100644
index 0000000..bf86e0d
--- /dev/null
+++ b/test/cctest/compiler/test-run-variables.cc
@@ -0,0 +1,121 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "test/cctest/compiler/function-tester.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+static const char* throws = NULL;
+
+static const char* load_tests[] = {
+    "var x = a; r = x",                       "123",       "0",
+    "var x = (r = x)",                        "undefined", "undefined",
+    "var x = (a?1:2); r = x",                 "1",         "2",
+    "const x = a; r = x",                     "123",       "0",
+    "const x = (r = x)",                      "undefined", "undefined",
+    "const x = (a?3:4); r = x",               "3",         "4",
+    "'use strict'; const x = a; r = x",       "123",       "0",
+    "'use strict'; const x = (r = x)",        throws,      throws,
+    "'use strict'; const x = (a?5:6); r = x", "5",         "6",
+    "'use strict'; let x = a; r = x",         "123",       "0",
+    "'use strict'; let x = (r = x)",          throws,      throws,
+    "'use strict'; let x = (a?7:8); r = x",   "7",         "8",
+    NULL};
+
+static const char* store_tests[] = {
+    "var x = 1; x = a; r = x",                     "123",  "0",
+    "var x = (a?(x=4,2):3); r = x",                "2",    "3",
+    "var x = (a?4:5); x = a; r = x",               "123",  "0",
+    "const x = 1; x = a; r = x",                   "1",    "1",
+    "const x = (a?(x=4,2):3); r = x",              "2",    "3",
+    "const x = (a?4:5); x = a; r = x",             "4",    "5",
+    // Assignments to 'const' are SyntaxErrors, handled by the parser,
+    // hence we cannot test them here because they are early errors.
+    "'use strict'; let x = 1; x = a; r = x",       "123",  "0",
+    "'use strict'; let x = (a?(x=4,2):3); r = x",  throws, "3",
+    "'use strict'; let x = (a?4:5); x = a; r = x", "123",  "0",
+    NULL};
+
+static const char* bind_tests[] = {
+    "if (a) { const x = a }; r = x;",            "123", "undefined",
+    "for (; a > 0; a--) { const x = a }; r = x", "123", "undefined",
+    // Re-initialization of variables other than legacy 'const' is not
+    // possible due to sane variable scoping, hence no tests here.
+    NULL};
+
+
+static void RunVariableTests(const char* source, const char* tests[]) {
+  FLAG_harmony_scoping = true;
+  EmbeddedVector<char, 512> buffer;
+
+  for (int i = 0; tests[i] != NULL; i += 3) {
+    SNPrintF(buffer, source, tests[i]);
+    PrintF("#%d: %s\n", i / 3, buffer.start());
+    FunctionTester T(buffer.start());
+
+    // Check function with non-falsey parameter.
+    if (tests[i + 1] != throws) {
+      Handle<Object> r = v8::Utils::OpenHandle(*CompileRun(tests[i + 1]));
+      T.CheckCall(r, T.Val(123), T.Val("result"));
+    } else {
+      T.CheckThrows(T.Val(123), T.Val("result"));
+    }
+
+    // Check function with falsey parameter.
+    if (tests[i + 2] != throws) {
+      Handle<Object> r = v8::Utils::OpenHandle(*CompileRun(tests[i + 2]));
+      T.CheckCall(r, T.Val(0.0), T.Val("result"));
+    } else {
+      T.CheckThrows(T.Val(0.0), T.Val("result"));
+    }
+  }
+}
+
+
+TEST(StackLoadVariables) {
+  const char* source = "(function(a,r) { %s; return r; })";
+  RunVariableTests(source, load_tests);
+}
+
+
+TEST(ContextLoadVariables) {
+  const char* source = "(function(a,r) { %s; function f() {x} return r; })";
+  RunVariableTests(source, load_tests);
+}
+
+
+TEST(StackStoreVariables) {
+  const char* source = "(function(a,r) { %s; return r; })";
+  RunVariableTests(source, store_tests);
+}
+
+
+TEST(ContextStoreVariables) {
+  const char* source = "(function(a,r) { %s; function f() {x} return r; })";
+  RunVariableTests(source, store_tests);
+}
+
+
+TEST(StackInitializeVariables) {
+  const char* source = "(function(a,r) { %s; return r; })";
+  RunVariableTests(source, bind_tests);
+}
+
+
+TEST(ContextInitializeVariables) {
+  const char* source = "(function(a,r) { %s; function f() {x} return r; })";
+  RunVariableTests(source, bind_tests);
+}
+
+
+TEST(SelfReferenceVariable) {
+  FunctionTester T("(function self() { return self; })");
+
+  T.CheckCall(T.function);
+  CompileRun("var self = 'not a function'");
+  T.CheckCall(T.function);
+}
diff --git a/test/cctest/compiler/test-schedule.cc b/test/cctest/compiler/test-schedule.cc
new file mode 100644
index 0000000..6c05c05
--- /dev/null
+++ b/test/cctest/compiler/test-schedule.cc
@@ -0,0 +1,145 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/operator.h"
+#include "src/compiler/schedule.h"
+#include "test/cctest/cctest.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+static SimpleOperator dummy_operator(IrOpcode::kParameter, Operator::kNoWrite,
+                                     0, 0, "dummy");
+
+TEST(TestScheduleAllocation) {
+  HandleAndZoneScope scope;
+  Schedule schedule(scope.main_zone());
+
+  CHECK_NE(NULL, schedule.start());
+  CHECK_EQ(schedule.start(), *(schedule.all_blocks().begin()));
+}
+
+
+TEST(TestScheduleAddNode) {
+  HandleAndZoneScope scope;
+  Graph graph(scope.main_zone());
+  Node* n0 = graph.NewNode(&dummy_operator);
+  Node* n1 = graph.NewNode(&dummy_operator);
+
+  Schedule schedule(scope.main_zone());
+
+  BasicBlock* entry = schedule.start();
+  schedule.AddNode(entry, n0);
+  schedule.AddNode(entry, n1);
+
+  CHECK_EQ(entry, schedule.block(n0));
+  CHECK_EQ(entry, schedule.block(n1));
+  CHECK(schedule.SameBasicBlock(n0, n1));
+
+  Node* n2 = graph.NewNode(&dummy_operator);
+  CHECK_EQ(NULL, schedule.block(n2));
+}
+
+
+TEST(TestScheduleAddGoto) {
+  HandleAndZoneScope scope;
+
+  Schedule schedule(scope.main_zone());
+  BasicBlock* entry = schedule.start();
+  BasicBlock* next = schedule.NewBasicBlock();
+
+  schedule.AddGoto(entry, next);
+
+  CHECK_EQ(0, entry->PredecessorCount());
+  CHECK_EQ(1, entry->SuccessorCount());
+  CHECK_EQ(next, entry->SuccessorAt(0));
+
+  CHECK_EQ(1, next->PredecessorCount());
+  CHECK_EQ(entry, next->PredecessorAt(0));
+  CHECK_EQ(0, next->SuccessorCount());
+}
+
+
+TEST(TestScheduleAddBranch) {
+  HandleAndZoneScope scope;
+  Schedule schedule(scope.main_zone());
+
+  BasicBlock* entry = schedule.start();
+  BasicBlock* tblock = schedule.NewBasicBlock();
+  BasicBlock* fblock = schedule.NewBasicBlock();
+
+  Graph graph(scope.main_zone());
+  CommonOperatorBuilder common(scope.main_zone());
+  Node* n0 = graph.NewNode(&dummy_operator);
+  Node* b = graph.NewNode(common.Branch(), n0);
+
+  schedule.AddBranch(entry, b, tblock, fblock);
+
+  CHECK_EQ(0, entry->PredecessorCount());
+  CHECK_EQ(2, entry->SuccessorCount());
+  CHECK_EQ(tblock, entry->SuccessorAt(0));
+  CHECK_EQ(fblock, entry->SuccessorAt(1));
+
+  CHECK_EQ(1, tblock->PredecessorCount());
+  CHECK_EQ(entry, tblock->PredecessorAt(0));
+  CHECK_EQ(0, tblock->SuccessorCount());
+
+  CHECK_EQ(1, fblock->PredecessorCount());
+  CHECK_EQ(entry, fblock->PredecessorAt(0));
+  CHECK_EQ(0, fblock->SuccessorCount());
+}
+
+
+TEST(TestScheduleAddReturn) {
+  HandleAndZoneScope scope;
+  Schedule schedule(scope.main_zone());
+  Graph graph(scope.main_zone());
+  Node* n0 = graph.NewNode(&dummy_operator);
+  BasicBlock* entry = schedule.start();
+  schedule.AddReturn(entry, n0);
+
+  CHECK_EQ(0, entry->PredecessorCount());
+  CHECK_EQ(1, entry->SuccessorCount());
+  CHECK_EQ(schedule.end(), entry->SuccessorAt(0));
+}
+
+
+TEST(TestScheduleAddThrow) {
+  HandleAndZoneScope scope;
+  Schedule schedule(scope.main_zone());
+  Graph graph(scope.main_zone());
+  Node* n0 = graph.NewNode(&dummy_operator);
+  BasicBlock* entry = schedule.start();
+  schedule.AddThrow(entry, n0);
+
+  CHECK_EQ(0, entry->PredecessorCount());
+  CHECK_EQ(1, entry->SuccessorCount());
+  CHECK_EQ(schedule.end(), entry->SuccessorAt(0));
+}
+
+
+TEST(BuildMulNodeGraph) {
+  HandleAndZoneScope scope;
+  Schedule schedule(scope.main_zone());
+  Graph graph(scope.main_zone());
+  CommonOperatorBuilder common(scope.main_zone());
+  MachineOperatorBuilder machine;
+
+  Node* start = graph.NewNode(common.Start(0));
+  graph.SetStart(start);
+  Node* param0 = graph.NewNode(common.Parameter(0), graph.start());
+  Node* param1 = graph.NewNode(common.Parameter(1), graph.start());
+
+  Node* mul = graph.NewNode(machine.Int32Mul(), param0, param1);
+  Node* ret = graph.NewNode(common.Return(), mul, start);
+
+  USE(ret);
+}
diff --git a/test/cctest/compiler/test-scheduler.cc b/test/cctest/compiler/test-scheduler.cc
new file mode 100644
index 0000000..cf33123
--- /dev/null
+++ b/test/cctest/compiler/test-scheduler.cc
@@ -0,0 +1,1713 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+#include "test/cctest/cctest.h"
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/generic-node.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/graph-visualizer.h"
+#include "src/compiler/js-operator.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/operator.h"
+#include "src/compiler/schedule.h"
+#include "src/compiler/scheduler.h"
+#include "src/compiler/verifier.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+// TODO(titzer): pull RPO tests out to their own file.
+struct TestLoop {
+  int count;
+  BasicBlock** nodes;
+  BasicBlock* header() { return nodes[0]; }
+  BasicBlock* last() { return nodes[count - 1]; }
+  ~TestLoop() { delete[] nodes; }
+};
+
+
+static TestLoop* CreateLoop(Schedule* schedule, int count) {
+  TestLoop* loop = new TestLoop();
+  loop->count = count;
+  loop->nodes = new BasicBlock* [count];
+  for (int i = 0; i < count; i++) {
+    loop->nodes[i] = schedule->NewBasicBlock();
+    if (i > 0) schedule->AddSuccessor(loop->nodes[i - 1], loop->nodes[i]);
+  }
+  schedule->AddSuccessor(loop->nodes[count - 1], loop->nodes[0]);
+  return loop;
+}
+
+
+static void CheckRPONumbers(BasicBlockVector* order, int expected,
+                            bool loops_allowed) {
+  CHECK_EQ(expected, static_cast<int>(order->size()));
+  for (int i = 0; i < static_cast<int>(order->size()); i++) {
+    CHECK(order->at(i)->rpo_number_ == i);
+    if (!loops_allowed) CHECK_LT(order->at(i)->loop_end_, 0);
+  }
+}
+
+
+static void CheckLoopContains(BasicBlock** blocks, int body_size) {
+  BasicBlock* header = blocks[0];
+  CHECK_GT(header->loop_end_, 0);
+  CHECK_EQ(body_size, (header->loop_end_ - header->rpo_number_));
+  for (int i = 0; i < body_size; i++) {
+    int num = blocks[i]->rpo_number_;
+    CHECK(num >= header->rpo_number_ && num < header->loop_end_);
+    CHECK(header->LoopContains(blocks[i]));
+    CHECK(header->IsLoopHeader() || blocks[i]->loop_header_ == header);
+  }
+}
+
+
+static int GetScheduledNodeCount(Schedule* schedule) {
+  int node_count = 0;
+  for (BasicBlockVectorIter i = schedule->rpo_order()->begin();
+       i != schedule->rpo_order()->end(); ++i) {
+    BasicBlock* block = *i;
+    for (BasicBlock::const_iterator j = block->begin(); j != block->end();
+         ++j) {
+      ++node_count;
+    }
+    BasicBlock::Control control = block->control_;
+    if (control != BasicBlock::kNone) {
+      ++node_count;
+    }
+  }
+  return node_count;
+}
+
+
+static Schedule* ComputeAndVerifySchedule(int expected, Graph* graph) {
+  if (FLAG_trace_turbo) {
+    OFStream os(stdout);
+    os << AsDOT(*graph);
+  }
+
+  Schedule* schedule = Scheduler::ComputeSchedule(graph);
+
+  if (FLAG_trace_turbo_scheduler) {
+    OFStream os(stdout);
+    os << *schedule << endl;
+  }
+  ScheduleVerifier::Run(schedule);
+  CHECK_EQ(expected, GetScheduledNodeCount(schedule));
+  return schedule;
+}
+
+
+TEST(RPODegenerate1) {
+  HandleAndZoneScope scope;
+  Schedule schedule(scope.main_zone());
+
+  BasicBlockVector* order = Scheduler::ComputeSpecialRPO(&schedule);
+  CheckRPONumbers(order, 1, false);
+  CHECK_EQ(schedule.start(), order->at(0));
+}
+
+
+TEST(RPODegenerate2) {
+  HandleAndZoneScope scope;
+  Schedule schedule(scope.main_zone());
+
+  schedule.AddGoto(schedule.start(), schedule.end());
+  BasicBlockVector* order = Scheduler::ComputeSpecialRPO(&schedule);
+  CheckRPONumbers(order, 2, false);
+  CHECK_EQ(schedule.start(), order->at(0));
+  CHECK_EQ(schedule.end(), order->at(1));
+}
+
+
+TEST(RPOLine) {
+  HandleAndZoneScope scope;
+
+  for (int i = 0; i < 10; i++) {
+    Schedule schedule(scope.main_zone());
+
+    BasicBlock* last = schedule.start();
+    for (int j = 0; j < i; j++) {
+      BasicBlock* block = schedule.NewBasicBlock();
+      schedule.AddGoto(last, block);
+      last = block;
+    }
+    BasicBlockVector* order = Scheduler::ComputeSpecialRPO(&schedule);
+    CheckRPONumbers(order, 1 + i, false);
+
+    Schedule::BasicBlocks blocks(schedule.all_blocks());
+    for (Schedule::BasicBlocks::iterator iter = blocks.begin();
+         iter != blocks.end(); ++iter) {
+      BasicBlock* block = *iter;
+      if (block->rpo_number_ >= 0 && block->SuccessorCount() == 1) {
+        CHECK(block->rpo_number_ + 1 == block->SuccessorAt(0)->rpo_number_);
+      }
+    }
+  }
+}
+
+
+TEST(RPOSelfLoop) {
+  HandleAndZoneScope scope;
+  Schedule schedule(scope.main_zone());
+  schedule.AddSuccessor(schedule.start(), schedule.start());
+  BasicBlockVector* order = Scheduler::ComputeSpecialRPO(&schedule);
+  CheckRPONumbers(order, 1, true);
+  BasicBlock* loop[] = {schedule.start()};
+  CheckLoopContains(loop, 1);
+}
+
+
+TEST(RPOEntryLoop) {
+  HandleAndZoneScope scope;
+  Schedule schedule(scope.main_zone());
+  schedule.AddSuccessor(schedule.start(), schedule.end());
+  schedule.AddSuccessor(schedule.end(), schedule.start());
+  BasicBlockVector* order = Scheduler::ComputeSpecialRPO(&schedule);
+  CheckRPONumbers(order, 2, true);
+  BasicBlock* loop[] = {schedule.start(), schedule.end()};
+  CheckLoopContains(loop, 2);
+}
+
+
+TEST(RPOEndLoop) {
+  HandleAndZoneScope scope;
+  Schedule schedule(scope.main_zone());
+  SmartPointer<TestLoop> loop1(CreateLoop(&schedule, 2));
+  schedule.AddSuccessor(schedule.start(), loop1->header());
+  BasicBlockVector* order = Scheduler::ComputeSpecialRPO(&schedule);
+  CheckRPONumbers(order, 3, true);
+  CheckLoopContains(loop1->nodes, loop1->count);
+}
+
+
+TEST(RPOEndLoopNested) {
+  HandleAndZoneScope scope;
+  Schedule schedule(scope.main_zone());
+  SmartPointer<TestLoop> loop1(CreateLoop(&schedule, 2));
+  schedule.AddSuccessor(schedule.start(), loop1->header());
+  schedule.AddSuccessor(loop1->last(), schedule.start());
+  BasicBlockVector* order = Scheduler::ComputeSpecialRPO(&schedule);
+  CheckRPONumbers(order, 3, true);
+  CheckLoopContains(loop1->nodes, loop1->count);
+}
+
+
+TEST(RPODiamond) {
+  HandleAndZoneScope scope;
+  Schedule schedule(scope.main_zone());
+
+  BasicBlock* A = schedule.start();
+  BasicBlock* B = schedule.NewBasicBlock();
+  BasicBlock* C = schedule.NewBasicBlock();
+  BasicBlock* D = schedule.end();
+
+  schedule.AddSuccessor(A, B);
+  schedule.AddSuccessor(A, C);
+  schedule.AddSuccessor(B, D);
+  schedule.AddSuccessor(C, D);
+
+  BasicBlockVector* order = Scheduler::ComputeSpecialRPO(&schedule);
+  CheckRPONumbers(order, 4, false);
+
+  CHECK_EQ(0, A->rpo_number_);
+  CHECK((B->rpo_number_ == 1 && C->rpo_number_ == 2) ||
+        (B->rpo_number_ == 2 && C->rpo_number_ == 1));
+  CHECK_EQ(3, D->rpo_number_);
+}
+
+
+TEST(RPOLoop1) {
+  HandleAndZoneScope scope;
+  Schedule schedule(scope.main_zone());
+
+  BasicBlock* A = schedule.start();
+  BasicBlock* B = schedule.NewBasicBlock();
+  BasicBlock* C = schedule.NewBasicBlock();
+  BasicBlock* D = schedule.end();
+
+  schedule.AddSuccessor(A, B);
+  schedule.AddSuccessor(B, C);
+  schedule.AddSuccessor(C, B);
+  schedule.AddSuccessor(C, D);
+
+  BasicBlockVector* order = Scheduler::ComputeSpecialRPO(&schedule);
+  CheckRPONumbers(order, 4, true);
+  BasicBlock* loop[] = {B, C};
+  CheckLoopContains(loop, 2);
+}
+
+
+TEST(RPOLoop2) {
+  HandleAndZoneScope scope;
+  Schedule schedule(scope.main_zone());
+
+  BasicBlock* A = schedule.start();
+  BasicBlock* B = schedule.NewBasicBlock();
+  BasicBlock* C = schedule.NewBasicBlock();
+  BasicBlock* D = schedule.end();
+
+  schedule.AddSuccessor(A, B);
+  schedule.AddSuccessor(B, C);
+  schedule.AddSuccessor(C, B);
+  schedule.AddSuccessor(B, D);
+
+  BasicBlockVector* order = Scheduler::ComputeSpecialRPO(&schedule);
+  CheckRPONumbers(order, 4, true);
+  BasicBlock* loop[] = {B, C};
+  CheckLoopContains(loop, 2);
+}
+
+
+TEST(RPOLoopN) {
+  HandleAndZoneScope scope;
+
+  for (int i = 0; i < 11; i++) {
+    Schedule schedule(scope.main_zone());
+    BasicBlock* A = schedule.start();
+    BasicBlock* B = schedule.NewBasicBlock();
+    BasicBlock* C = schedule.NewBasicBlock();
+    BasicBlock* D = schedule.NewBasicBlock();
+    BasicBlock* E = schedule.NewBasicBlock();
+    BasicBlock* F = schedule.NewBasicBlock();
+    BasicBlock* G = schedule.end();
+
+    schedule.AddSuccessor(A, B);
+    schedule.AddSuccessor(B, C);
+    schedule.AddSuccessor(C, D);
+    schedule.AddSuccessor(D, E);
+    schedule.AddSuccessor(E, F);
+    schedule.AddSuccessor(F, B);
+    schedule.AddSuccessor(B, G);
+
+    // Throw in extra backedges from time to time.
+    if (i == 1) schedule.AddSuccessor(B, B);
+    if (i == 2) schedule.AddSuccessor(C, B);
+    if (i == 3) schedule.AddSuccessor(D, B);
+    if (i == 4) schedule.AddSuccessor(E, B);
+    if (i == 5) schedule.AddSuccessor(F, B);
+
+    // Throw in extra loop exits from time to time.
+    if (i == 6) schedule.AddSuccessor(B, G);
+    if (i == 7) schedule.AddSuccessor(C, G);
+    if (i == 8) schedule.AddSuccessor(D, G);
+    if (i == 9) schedule.AddSuccessor(E, G);
+    if (i == 10) schedule.AddSuccessor(F, G);
+
+    BasicBlockVector* order = Scheduler::ComputeSpecialRPO(&schedule);
+    CheckRPONumbers(order, 7, true);
+    BasicBlock* loop[] = {B, C, D, E, F};
+    CheckLoopContains(loop, 5);
+  }
+}
+
+
+TEST(RPOLoopNest1) {
+  HandleAndZoneScope scope;
+  Schedule schedule(scope.main_zone());
+
+  BasicBlock* A = schedule.start();
+  BasicBlock* B = schedule.NewBasicBlock();
+  BasicBlock* C = schedule.NewBasicBlock();
+  BasicBlock* D = schedule.NewBasicBlock();
+  BasicBlock* E = schedule.NewBasicBlock();
+  BasicBlock* F = schedule.end();
+
+  schedule.AddSuccessor(A, B);
+  schedule.AddSuccessor(B, C);
+  schedule.AddSuccessor(C, D);
+  schedule.AddSuccessor(D, C);
+  schedule.AddSuccessor(D, E);
+  schedule.AddSuccessor(E, B);
+  schedule.AddSuccessor(E, F);
+
+  BasicBlockVector* order = Scheduler::ComputeSpecialRPO(&schedule);
+  CheckRPONumbers(order, 6, true);
+  BasicBlock* loop1[] = {B, C, D, E};
+  CheckLoopContains(loop1, 4);
+
+  BasicBlock* loop2[] = {C, D};
+  CheckLoopContains(loop2, 2);
+}
+
+
+TEST(RPOLoopNest2) {
+  HandleAndZoneScope scope;
+  Schedule schedule(scope.main_zone());
+
+  BasicBlock* A = schedule.start();
+  BasicBlock* B = schedule.NewBasicBlock();
+  BasicBlock* C = schedule.NewBasicBlock();
+  BasicBlock* D = schedule.NewBasicBlock();
+  BasicBlock* E = schedule.NewBasicBlock();
+  BasicBlock* F = schedule.NewBasicBlock();
+  BasicBlock* G = schedule.NewBasicBlock();
+  BasicBlock* H = schedule.end();
+
+  schedule.AddSuccessor(A, B);
+  schedule.AddSuccessor(B, C);
+  schedule.AddSuccessor(C, D);
+  schedule.AddSuccessor(D, E);
+  schedule.AddSuccessor(E, F);
+  schedule.AddSuccessor(F, G);
+  schedule.AddSuccessor(G, H);
+
+  schedule.AddSuccessor(E, D);
+  schedule.AddSuccessor(F, C);
+  schedule.AddSuccessor(G, B);
+
+  BasicBlockVector* order = Scheduler::ComputeSpecialRPO(&schedule);
+  CheckRPONumbers(order, 8, true);
+  BasicBlock* loop1[] = {B, C, D, E, F, G};
+  CheckLoopContains(loop1, 6);
+
+  BasicBlock* loop2[] = {C, D, E, F};
+  CheckLoopContains(loop2, 4);
+
+  BasicBlock* loop3[] = {D, E};
+  CheckLoopContains(loop3, 2);
+}
+
+
+TEST(RPOLoopFollow1) {
+  HandleAndZoneScope scope;
+  Schedule schedule(scope.main_zone());
+
+  SmartPointer<TestLoop> loop1(CreateLoop(&schedule, 1));
+  SmartPointer<TestLoop> loop2(CreateLoop(&schedule, 1));
+
+  BasicBlock* A = schedule.start();
+  BasicBlock* E = schedule.end();
+
+  schedule.AddSuccessor(A, loop1->header());
+  schedule.AddSuccessor(loop1->header(), loop2->header());
+  schedule.AddSuccessor(loop2->last(), E);
+
+  BasicBlockVector* order = Scheduler::ComputeSpecialRPO(&schedule);
+
+  CheckLoopContains(loop1->nodes, loop1->count);
+
+  CHECK_EQ(schedule.BasicBlockCount(), static_cast<int>(order->size()));
+  CheckLoopContains(loop1->nodes, loop1->count);
+  CheckLoopContains(loop2->nodes, loop2->count);
+}
+
+
+TEST(RPOLoopFollow2) {
+  HandleAndZoneScope scope;
+  Schedule schedule(scope.main_zone());
+
+  SmartPointer<TestLoop> loop1(CreateLoop(&schedule, 1));
+  SmartPointer<TestLoop> loop2(CreateLoop(&schedule, 1));
+
+  BasicBlock* A = schedule.start();
+  BasicBlock* S = schedule.NewBasicBlock();
+  BasicBlock* E = schedule.end();
+
+  schedule.AddSuccessor(A, loop1->header());
+  schedule.AddSuccessor(loop1->header(), S);
+  schedule.AddSuccessor(S, loop2->header());
+  schedule.AddSuccessor(loop2->last(), E);
+
+  BasicBlockVector* order = Scheduler::ComputeSpecialRPO(&schedule);
+
+  CheckLoopContains(loop1->nodes, loop1->count);
+
+  CHECK_EQ(schedule.BasicBlockCount(), static_cast<int>(order->size()));
+  CheckLoopContains(loop1->nodes, loop1->count);
+  CheckLoopContains(loop2->nodes, loop2->count);
+}
+
+
+TEST(RPOLoopFollowN) {
+  HandleAndZoneScope scope;
+
+  for (int size = 1; size < 5; size++) {
+    for (int exit = 0; exit < size; exit++) {
+      Schedule schedule(scope.main_zone());
+      SmartPointer<TestLoop> loop1(CreateLoop(&schedule, size));
+      SmartPointer<TestLoop> loop2(CreateLoop(&schedule, size));
+      BasicBlock* A = schedule.start();
+      BasicBlock* E = schedule.end();
+
+      schedule.AddSuccessor(A, loop1->header());
+      schedule.AddSuccessor(loop1->nodes[exit], loop2->header());
+      schedule.AddSuccessor(loop2->nodes[exit], E);
+      BasicBlockVector* order = Scheduler::ComputeSpecialRPO(&schedule);
+      CheckLoopContains(loop1->nodes, loop1->count);
+
+      CHECK_EQ(schedule.BasicBlockCount(), static_cast<int>(order->size()));
+      CheckLoopContains(loop1->nodes, loop1->count);
+      CheckLoopContains(loop2->nodes, loop2->count);
+    }
+  }
+}
+
+
+TEST(RPONestedLoopFollow1) {
+  HandleAndZoneScope scope;
+  Schedule schedule(scope.main_zone());
+
+  SmartPointer<TestLoop> loop1(CreateLoop(&schedule, 1));
+  SmartPointer<TestLoop> loop2(CreateLoop(&schedule, 1));
+
+  BasicBlock* A = schedule.start();
+  BasicBlock* B = schedule.NewBasicBlock();
+  BasicBlock* C = schedule.NewBasicBlock();
+  BasicBlock* E = schedule.end();
+
+  schedule.AddSuccessor(A, B);
+  schedule.AddSuccessor(B, loop1->header());
+  schedule.AddSuccessor(loop1->header(), loop2->header());
+  schedule.AddSuccessor(loop2->last(), C);
+  schedule.AddSuccessor(C, E);
+  schedule.AddSuccessor(C, B);
+
+  BasicBlockVector* order = Scheduler::ComputeSpecialRPO(&schedule);
+
+  CheckLoopContains(loop1->nodes, loop1->count);
+
+  CHECK_EQ(schedule.BasicBlockCount(), static_cast<int>(order->size()));
+  CheckLoopContains(loop1->nodes, loop1->count);
+  CheckLoopContains(loop2->nodes, loop2->count);
+
+  BasicBlock* loop3[] = {B, loop1->nodes[0], loop2->nodes[0], C};
+  CheckLoopContains(loop3, 4);
+}
+
+
+TEST(RPOLoopBackedges1) {
+  HandleAndZoneScope scope;
+
+  int size = 8;
+  for (int i = 0; i < size; i++) {
+    for (int j = 0; j < size; j++) {
+      Schedule schedule(scope.main_zone());
+      BasicBlock* A = schedule.start();
+      BasicBlock* E = schedule.end();
+
+      SmartPointer<TestLoop> loop1(CreateLoop(&schedule, size));
+      schedule.AddSuccessor(A, loop1->header());
+      schedule.AddSuccessor(loop1->last(), E);
+
+      schedule.AddSuccessor(loop1->nodes[i], loop1->header());
+      schedule.AddSuccessor(loop1->nodes[j], E);
+
+      BasicBlockVector* order = Scheduler::ComputeSpecialRPO(&schedule);
+      CheckRPONumbers(order, schedule.BasicBlockCount(), true);
+      CheckLoopContains(loop1->nodes, loop1->count);
+    }
+  }
+}
+
+
+TEST(RPOLoopOutedges1) {
+  HandleAndZoneScope scope;
+
+  int size = 8;
+  for (int i = 0; i < size; i++) {
+    for (int j = 0; j < size; j++) {
+      Schedule schedule(scope.main_zone());
+      BasicBlock* A = schedule.start();
+      BasicBlock* D = schedule.NewBasicBlock();
+      BasicBlock* E = schedule.end();
+
+      SmartPointer<TestLoop> loop1(CreateLoop(&schedule, size));
+      schedule.AddSuccessor(A, loop1->header());
+      schedule.AddSuccessor(loop1->last(), E);
+
+      schedule.AddSuccessor(loop1->nodes[i], loop1->header());
+      schedule.AddSuccessor(loop1->nodes[j], D);
+      schedule.AddSuccessor(D, E);
+
+      BasicBlockVector* order = Scheduler::ComputeSpecialRPO(&schedule);
+      CheckRPONumbers(order, schedule.BasicBlockCount(), true);
+      CheckLoopContains(loop1->nodes, loop1->count);
+    }
+  }
+}
+
+
+TEST(RPOLoopOutedges2) {
+  HandleAndZoneScope scope;
+
+  int size = 8;
+  for (int i = 0; i < size; i++) {
+    Schedule schedule(scope.main_zone());
+    BasicBlock* A = schedule.start();
+    BasicBlock* E = schedule.end();
+
+    SmartPointer<TestLoop> loop1(CreateLoop(&schedule, size));
+    schedule.AddSuccessor(A, loop1->header());
+    schedule.AddSuccessor(loop1->last(), E);
+
+    for (int j = 0; j < size; j++) {
+      BasicBlock* O = schedule.NewBasicBlock();
+      schedule.AddSuccessor(loop1->nodes[j], O);
+      schedule.AddSuccessor(O, E);
+    }
+
+    BasicBlockVector* order = Scheduler::ComputeSpecialRPO(&schedule);
+    CheckRPONumbers(order, schedule.BasicBlockCount(), true);
+    CheckLoopContains(loop1->nodes, loop1->count);
+  }
+}
+
+
+TEST(RPOLoopOutloops1) {
+  HandleAndZoneScope scope;
+
+  int size = 8;
+  for (int i = 0; i < size; i++) {
+    Schedule schedule(scope.main_zone());
+    BasicBlock* A = schedule.start();
+    BasicBlock* E = schedule.end();
+    SmartPointer<TestLoop> loop1(CreateLoop(&schedule, size));
+    schedule.AddSuccessor(A, loop1->header());
+    schedule.AddSuccessor(loop1->last(), E);
+
+    TestLoop** loopN = new TestLoop* [size];
+    for (int j = 0; j < size; j++) {
+      loopN[j] = CreateLoop(&schedule, 2);
+      schedule.AddSuccessor(loop1->nodes[j], loopN[j]->header());
+      schedule.AddSuccessor(loopN[j]->last(), E);
+    }
+
+    BasicBlockVector* order = Scheduler::ComputeSpecialRPO(&schedule);
+    CheckRPONumbers(order, schedule.BasicBlockCount(), true);
+    CheckLoopContains(loop1->nodes, loop1->count);
+
+    for (int j = 0; j < size; j++) {
+      CheckLoopContains(loopN[j]->nodes, loopN[j]->count);
+      delete loopN[j];
+    }
+    delete[] loopN;
+  }
+}
+
+
+TEST(RPOLoopMultibackedge) {
+  HandleAndZoneScope scope;
+  Schedule schedule(scope.main_zone());
+
+  BasicBlock* A = schedule.start();
+  BasicBlock* B = schedule.NewBasicBlock();
+  BasicBlock* C = schedule.NewBasicBlock();
+  BasicBlock* D = schedule.end();
+  BasicBlock* E = schedule.NewBasicBlock();
+
+  schedule.AddSuccessor(A, B);
+  schedule.AddSuccessor(B, C);
+  schedule.AddSuccessor(B, D);
+  schedule.AddSuccessor(B, E);
+  schedule.AddSuccessor(C, B);
+  schedule.AddSuccessor(D, B);
+  schedule.AddSuccessor(E, B);
+
+  BasicBlockVector* order = Scheduler::ComputeSpecialRPO(&schedule);
+  CheckRPONumbers(order, 5, true);
+
+  BasicBlock* loop1[] = {B, C, D, E};
+  CheckLoopContains(loop1, 4);
+}
+
+
+TEST(BuildScheduleEmpty) {
+  HandleAndZoneScope scope;
+  Graph graph(scope.main_zone());
+  CommonOperatorBuilder builder(scope.main_zone());
+  graph.SetStart(graph.NewNode(builder.Start(0)));
+  graph.SetEnd(graph.NewNode(builder.End(), graph.start()));
+
+  USE(Scheduler::ComputeSchedule(&graph));
+}
+
+
+TEST(BuildScheduleOneParameter) {
+  HandleAndZoneScope scope;
+  Graph graph(scope.main_zone());
+  CommonOperatorBuilder builder(scope.main_zone());
+  graph.SetStart(graph.NewNode(builder.Start(0)));
+
+  Node* p1 = graph.NewNode(builder.Parameter(0), graph.start());
+  Node* ret = graph.NewNode(builder.Return(), p1, graph.start(), graph.start());
+
+  graph.SetEnd(graph.NewNode(builder.End(), ret));
+
+  USE(Scheduler::ComputeSchedule(&graph));
+}
+
+
+TEST(BuildScheduleIfSplit) {
+  HandleAndZoneScope scope;
+  Graph graph(scope.main_zone());
+  CommonOperatorBuilder builder(scope.main_zone());
+  JSOperatorBuilder js_builder(scope.main_zone());
+  graph.SetStart(graph.NewNode(builder.Start(3)));
+
+  Node* p1 = graph.NewNode(builder.Parameter(0), graph.start());
+  Node* p2 = graph.NewNode(builder.Parameter(1), graph.start());
+  Node* p3 = graph.NewNode(builder.Parameter(2), graph.start());
+  Node* p4 = graph.NewNode(builder.Parameter(3), graph.start());
+  Node* p5 = graph.NewNode(builder.Parameter(4), graph.start());
+  Node* cmp = graph.NewNode(js_builder.LessThanOrEqual(), p1, p2, p3,
+                            graph.start(), graph.start());
+  Node* branch = graph.NewNode(builder.Branch(), cmp, graph.start());
+  Node* true_branch = graph.NewNode(builder.IfTrue(), branch);
+  Node* false_branch = graph.NewNode(builder.IfFalse(), branch);
+
+  Node* ret1 = graph.NewNode(builder.Return(), p4, graph.start(), true_branch);
+  Node* ret2 = graph.NewNode(builder.Return(), p5, graph.start(), false_branch);
+  Node* merge = graph.NewNode(builder.Merge(2), ret1, ret2);
+  graph.SetEnd(graph.NewNode(builder.End(), merge));
+
+  ComputeAndVerifySchedule(13, &graph);
+}
+
+
+TEST(BuildScheduleIfSplitWithEffects) {
+  HandleAndZoneScope scope;
+  Isolate* isolate = scope.main_isolate();
+  Graph graph(scope.main_zone());
+  CommonOperatorBuilder common_builder(scope.main_zone());
+  JSOperatorBuilder js_builder(scope.main_zone());
+  const Operator* op;
+
+  Handle<Object> object =
+      Handle<Object>(isolate->heap()->undefined_value(), isolate);
+  Unique<Object> unique_constant = Unique<Object>::CreateUninitialized(object);
+
+  // Manually transcripted code for:
+  // function turbo_fan_test(a, b, c, y) {
+  //   if (a < b) {
+  //     return a + b - c * c - a + y;
+  //   } else {
+  //     return c * c - a;
+  //   }
+  // }
+  op = common_builder.Start(0);
+  Node* n0 = graph.NewNode(op);
+  USE(n0);
+  Node* nil = graph.NewNode(common_builder.Dead());
+  op = common_builder.End();
+  Node* n23 = graph.NewNode(op, nil);
+  USE(n23);
+  op = common_builder.Merge(2);
+  Node* n22 = graph.NewNode(op, nil, nil);
+  USE(n22);
+  op = common_builder.Return();
+  Node* n16 = graph.NewNode(op, nil, nil, nil);
+  USE(n16);
+  op = js_builder.Add();
+  Node* n15 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n15);
+  op = js_builder.Subtract();
+  Node* n14 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n14);
+  op = js_builder.Subtract();
+  Node* n13 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n13);
+  op = js_builder.Add();
+  Node* n11 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n11);
+  op = common_builder.Parameter(0);
+  Node* n2 = graph.NewNode(op, n0);
+  USE(n2);
+  n11->ReplaceInput(0, n2);
+  op = common_builder.Parameter(0);
+  Node* n3 = graph.NewNode(op, n0);
+  USE(n3);
+  n11->ReplaceInput(1, n3);
+  op = common_builder.HeapConstant(unique_constant);
+  Node* n7 = graph.NewNode(op);
+  USE(n7);
+  n11->ReplaceInput(2, n7);
+  op = js_builder.LessThan();
+  Node* n8 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n8);
+  n8->ReplaceInput(0, n2);
+  n8->ReplaceInput(1, n3);
+  n8->ReplaceInput(2, n7);
+  n8->ReplaceInput(3, n0);
+  n8->ReplaceInput(4, n0);
+  n11->ReplaceInput(3, n8);
+  op = common_builder.IfTrue();
+  Node* n10 = graph.NewNode(op, nil);
+  USE(n10);
+  op = common_builder.Branch();
+  Node* n9 = graph.NewNode(op, nil, nil);
+  USE(n9);
+  n9->ReplaceInput(0, n8);
+  n9->ReplaceInput(1, n0);
+  n10->ReplaceInput(0, n9);
+  n11->ReplaceInput(4, n10);
+  n13->ReplaceInput(0, n11);
+  op = js_builder.Multiply();
+  Node* n12 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n12);
+  op = common_builder.Parameter(0);
+  Node* n4 = graph.NewNode(op, n0);
+  USE(n4);
+  n12->ReplaceInput(0, n4);
+  n12->ReplaceInput(1, n4);
+  n12->ReplaceInput(2, n7);
+  n12->ReplaceInput(3, n11);
+  n12->ReplaceInput(4, n10);
+  n13->ReplaceInput(1, n12);
+  n13->ReplaceInput(2, n7);
+  n13->ReplaceInput(3, n12);
+  n13->ReplaceInput(4, n10);
+  n14->ReplaceInput(0, n13);
+  n14->ReplaceInput(1, n2);
+  n14->ReplaceInput(2, n7);
+  n14->ReplaceInput(3, n13);
+  n14->ReplaceInput(4, n10);
+  n15->ReplaceInput(0, n14);
+  op = common_builder.Parameter(0);
+  Node* n5 = graph.NewNode(op, n0);
+  USE(n5);
+  n15->ReplaceInput(1, n5);
+  n15->ReplaceInput(2, n7);
+  n15->ReplaceInput(3, n14);
+  n15->ReplaceInput(4, n10);
+  n16->ReplaceInput(0, n15);
+  n16->ReplaceInput(1, n15);
+  n16->ReplaceInput(2, n10);
+  n22->ReplaceInput(0, n16);
+  op = common_builder.Return();
+  Node* n21 = graph.NewNode(op, nil, nil, nil);
+  USE(n21);
+  op = js_builder.Subtract();
+  Node* n20 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n20);
+  op = js_builder.Multiply();
+  Node* n19 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n19);
+  n19->ReplaceInput(0, n4);
+  n19->ReplaceInput(1, n4);
+  n19->ReplaceInput(2, n7);
+  n19->ReplaceInput(3, n8);
+  op = common_builder.IfFalse();
+  Node* n18 = graph.NewNode(op, nil);
+  USE(n18);
+  n18->ReplaceInput(0, n9);
+  n19->ReplaceInput(4, n18);
+  n20->ReplaceInput(0, n19);
+  n20->ReplaceInput(1, n2);
+  n20->ReplaceInput(2, n7);
+  n20->ReplaceInput(3, n19);
+  n20->ReplaceInput(4, n18);
+  n21->ReplaceInput(0, n20);
+  n21->ReplaceInput(1, n20);
+  n21->ReplaceInput(2, n18);
+  n22->ReplaceInput(1, n21);
+  n23->ReplaceInput(0, n22);
+
+  graph.SetStart(n0);
+  graph.SetEnd(n23);
+
+  ComputeAndVerifySchedule(20, &graph);
+}
+
+
+TEST(BuildScheduleSimpleLoop) {
+  HandleAndZoneScope scope;
+  Isolate* isolate = scope.main_isolate();
+  Graph graph(scope.main_zone());
+  CommonOperatorBuilder common_builder(scope.main_zone());
+  JSOperatorBuilder js_builder(scope.main_zone());
+  const Operator* op;
+
+  Handle<Object> object =
+      Handle<Object>(isolate->heap()->undefined_value(), isolate);
+  Unique<Object> unique_constant = Unique<Object>::CreateUninitialized(object);
+
+  // Manually transcripted code for:
+  // function turbo_fan_test(a, b) {
+  //   while (a < b) {
+  //     a++;
+  //   }
+  //   return a;
+  // }
+  op = common_builder.Start(0);
+  Node* n0 = graph.NewNode(op);
+  USE(n0);
+  Node* nil = graph.NewNode(common_builder.Dead());
+  op = common_builder.End();
+  Node* n20 = graph.NewNode(op, nil);
+  USE(n20);
+  op = common_builder.Return();
+  Node* n19 = graph.NewNode(op, nil, nil, nil);
+  USE(n19);
+  op = common_builder.Phi(kMachAnyTagged, 2);
+  Node* n8 = graph.NewNode(op, nil, nil, nil);
+  USE(n8);
+  op = common_builder.Parameter(0);
+  Node* n2 = graph.NewNode(op, n0);
+  USE(n2);
+  n8->ReplaceInput(0, n2);
+  op = js_builder.Add();
+  Node* n18 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n18);
+  op = js_builder.ToNumber();
+  Node* n16 = graph.NewNode(op, nil, nil, nil, nil);
+  USE(n16);
+  n16->ReplaceInput(0, n8);
+  op = common_builder.HeapConstant(unique_constant);
+  Node* n5 = graph.NewNode(op);
+  USE(n5);
+  n16->ReplaceInput(1, n5);
+  op = js_builder.LessThan();
+  Node* n12 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n12);
+  n12->ReplaceInput(0, n8);
+  op = common_builder.Phi(kMachAnyTagged, 2);
+  Node* n9 = graph.NewNode(op, nil, nil, nil);
+  USE(n9);
+  op = common_builder.Parameter(0);
+  Node* n3 = graph.NewNode(op, n0);
+  USE(n3);
+  n9->ReplaceInput(0, n3);
+  n9->ReplaceInput(1, n9);
+  op = common_builder.Loop(2);
+  Node* n6 = graph.NewNode(op, nil, nil);
+  USE(n6);
+  n6->ReplaceInput(0, n0);
+  op = common_builder.IfTrue();
+  Node* n14 = graph.NewNode(op, nil);
+  USE(n14);
+  op = common_builder.Branch();
+  Node* n13 = graph.NewNode(op, nil, nil);
+  USE(n13);
+  n13->ReplaceInput(0, n12);
+  n13->ReplaceInput(1, n6);
+  n14->ReplaceInput(0, n13);
+  n6->ReplaceInput(1, n14);
+  n9->ReplaceInput(2, n6);
+  n12->ReplaceInput(1, n9);
+  n12->ReplaceInput(2, n5);
+  op = common_builder.Phi(kMachAnyTagged, 2);
+  Node* n10 = graph.NewNode(op, nil, nil, nil);
+  USE(n10);
+  n10->ReplaceInput(0, n0);
+  n10->ReplaceInput(1, n18);
+  n10->ReplaceInput(2, n6);
+  n12->ReplaceInput(3, n10);
+  n12->ReplaceInput(4, n6);
+  n16->ReplaceInput(2, n12);
+  n16->ReplaceInput(3, n14);
+  n18->ReplaceInput(0, n16);
+  op = common_builder.NumberConstant(0);
+  Node* n17 = graph.NewNode(op);
+  USE(n17);
+  n18->ReplaceInput(1, n17);
+  n18->ReplaceInput(2, n5);
+  n18->ReplaceInput(3, n16);
+  n18->ReplaceInput(4, n14);
+  n8->ReplaceInput(1, n18);
+  n8->ReplaceInput(2, n6);
+  n19->ReplaceInput(0, n8);
+  n19->ReplaceInput(1, n12);
+  op = common_builder.IfFalse();
+  Node* n15 = graph.NewNode(op, nil);
+  USE(n15);
+  n15->ReplaceInput(0, n13);
+  n19->ReplaceInput(2, n15);
+  n20->ReplaceInput(0, n19);
+
+  graph.SetStart(n0);
+  graph.SetEnd(n20);
+
+  ComputeAndVerifySchedule(19, &graph);
+}
+
+
+TEST(BuildScheduleComplexLoops) {
+  HandleAndZoneScope scope;
+  Isolate* isolate = scope.main_isolate();
+  Graph graph(scope.main_zone());
+  CommonOperatorBuilder common_builder(scope.main_zone());
+  JSOperatorBuilder js_builder(scope.main_zone());
+  const Operator* op;
+
+  Handle<Object> object =
+      Handle<Object>(isolate->heap()->undefined_value(), isolate);
+  Unique<Object> unique_constant = Unique<Object>::CreateUninitialized(object);
+
+  // Manually transcripted code for:
+  // function turbo_fan_test(a, b, c) {
+  //   while (a < b) {
+  //     a++;
+  //     while (c < b) {
+  //       c++;
+  //     }
+  //   }
+  //   while (a < b) {
+  //     a += 2;
+  //   }
+  //   return a;
+  // }
+  op = common_builder.Start(0);
+  Node* n0 = graph.NewNode(op);
+  USE(n0);
+  Node* nil = graph.NewNode(common_builder.Dead());
+  op = common_builder.End();
+  Node* n46 = graph.NewNode(op, nil);
+  USE(n46);
+  op = common_builder.Return();
+  Node* n45 = graph.NewNode(op, nil, nil, nil);
+  USE(n45);
+  op = common_builder.Phi(kMachAnyTagged, 2);
+  Node* n35 = graph.NewNode(op, nil, nil, nil);
+  USE(n35);
+  op = common_builder.Phi(kMachAnyTagged, 2);
+  Node* n9 = graph.NewNode(op, nil, nil, nil);
+  USE(n9);
+  op = common_builder.Parameter(0);
+  Node* n2 = graph.NewNode(op, n0);
+  USE(n2);
+  n9->ReplaceInput(0, n2);
+  op = common_builder.Phi(kMachAnyTagged, 2);
+  Node* n23 = graph.NewNode(op, nil, nil, nil);
+  USE(n23);
+  op = js_builder.Add();
+  Node* n20 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n20);
+  op = js_builder.ToNumber();
+  Node* n18 = graph.NewNode(op, nil, nil, nil, nil);
+  USE(n18);
+  n18->ReplaceInput(0, n9);
+  op = common_builder.HeapConstant(unique_constant);
+  Node* n6 = graph.NewNode(op);
+  USE(n6);
+  n18->ReplaceInput(1, n6);
+  op = js_builder.LessThan();
+  Node* n14 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n14);
+  n14->ReplaceInput(0, n9);
+  op = common_builder.Phi(kMachAnyTagged, 2);
+  Node* n10 = graph.NewNode(op, nil, nil, nil);
+  USE(n10);
+  op = common_builder.Parameter(0);
+  Node* n3 = graph.NewNode(op, n0);
+  USE(n3);
+  n10->ReplaceInput(0, n3);
+  op = common_builder.Phi(kMachAnyTagged, 2);
+  Node* n24 = graph.NewNode(op, nil, nil, nil);
+  USE(n24);
+  n24->ReplaceInput(0, n10);
+  n24->ReplaceInput(1, n24);
+  op = common_builder.Loop(2);
+  Node* n21 = graph.NewNode(op, nil, nil);
+  USE(n21);
+  op = common_builder.IfTrue();
+  Node* n16 = graph.NewNode(op, nil);
+  USE(n16);
+  op = common_builder.Branch();
+  Node* n15 = graph.NewNode(op, nil, nil);
+  USE(n15);
+  n15->ReplaceInput(0, n14);
+  op = common_builder.Loop(2);
+  Node* n7 = graph.NewNode(op, nil, nil);
+  USE(n7);
+  n7->ReplaceInput(0, n0);
+  op = common_builder.IfFalse();
+  Node* n30 = graph.NewNode(op, nil);
+  USE(n30);
+  op = common_builder.Branch();
+  Node* n28 = graph.NewNode(op, nil, nil);
+  USE(n28);
+  op = js_builder.LessThan();
+  Node* n27 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n27);
+  op = common_builder.Phi(kMachAnyTagged, 2);
+  Node* n25 = graph.NewNode(op, nil, nil, nil);
+  USE(n25);
+  op = common_builder.Phi(kMachAnyTagged, 2);
+  Node* n11 = graph.NewNode(op, nil, nil, nil);
+  USE(n11);
+  op = common_builder.Parameter(0);
+  Node* n4 = graph.NewNode(op, n0);
+  USE(n4);
+  n11->ReplaceInput(0, n4);
+  n11->ReplaceInput(1, n25);
+  n11->ReplaceInput(2, n7);
+  n25->ReplaceInput(0, n11);
+  op = js_builder.Add();
+  Node* n32 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n32);
+  op = js_builder.ToNumber();
+  Node* n31 = graph.NewNode(op, nil, nil, nil, nil);
+  USE(n31);
+  n31->ReplaceInput(0, n25);
+  n31->ReplaceInput(1, n6);
+  n31->ReplaceInput(2, n27);
+  op = common_builder.IfTrue();
+  Node* n29 = graph.NewNode(op, nil);
+  USE(n29);
+  n29->ReplaceInput(0, n28);
+  n31->ReplaceInput(3, n29);
+  n32->ReplaceInput(0, n31);
+  op = common_builder.NumberConstant(0);
+  Node* n19 = graph.NewNode(op);
+  USE(n19);
+  n32->ReplaceInput(1, n19);
+  n32->ReplaceInput(2, n6);
+  n32->ReplaceInput(3, n31);
+  n32->ReplaceInput(4, n29);
+  n25->ReplaceInput(1, n32);
+  n25->ReplaceInput(2, n21);
+  n27->ReplaceInput(0, n25);
+  n27->ReplaceInput(1, n24);
+  n27->ReplaceInput(2, n6);
+  op = common_builder.Phi(kMachAnyTagged, 2);
+  Node* n26 = graph.NewNode(op, nil, nil, nil);
+  USE(n26);
+  n26->ReplaceInput(0, n20);
+  n26->ReplaceInput(1, n32);
+  n26->ReplaceInput(2, n21);
+  n27->ReplaceInput(3, n26);
+  n27->ReplaceInput(4, n21);
+  n28->ReplaceInput(0, n27);
+  n28->ReplaceInput(1, n21);
+  n30->ReplaceInput(0, n28);
+  n7->ReplaceInput(1, n30);
+  n15->ReplaceInput(1, n7);
+  n16->ReplaceInput(0, n15);
+  n21->ReplaceInput(0, n16);
+  n21->ReplaceInput(1, n29);
+  n24->ReplaceInput(2, n21);
+  n10->ReplaceInput(1, n24);
+  n10->ReplaceInput(2, n7);
+  n14->ReplaceInput(1, n10);
+  n14->ReplaceInput(2, n6);
+  op = common_builder.Phi(kMachAnyTagged, 2);
+  Node* n12 = graph.NewNode(op, nil, nil, nil);
+  USE(n12);
+  n12->ReplaceInput(0, n0);
+  n12->ReplaceInput(1, n27);
+  n12->ReplaceInput(2, n7);
+  n14->ReplaceInput(3, n12);
+  n14->ReplaceInput(4, n7);
+  n18->ReplaceInput(2, n14);
+  n18->ReplaceInput(3, n16);
+  n20->ReplaceInput(0, n18);
+  n20->ReplaceInput(1, n19);
+  n20->ReplaceInput(2, n6);
+  n20->ReplaceInput(3, n18);
+  n20->ReplaceInput(4, n16);
+  n23->ReplaceInput(0, n20);
+  n23->ReplaceInput(1, n23);
+  n23->ReplaceInput(2, n21);
+  n9->ReplaceInput(1, n23);
+  n9->ReplaceInput(2, n7);
+  n35->ReplaceInput(0, n9);
+  op = js_builder.Add();
+  Node* n44 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n44);
+  n44->ReplaceInput(0, n35);
+  op = common_builder.NumberConstant(0);
+  Node* n43 = graph.NewNode(op);
+  USE(n43);
+  n44->ReplaceInput(1, n43);
+  n44->ReplaceInput(2, n6);
+  op = js_builder.LessThan();
+  Node* n39 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n39);
+  n39->ReplaceInput(0, n35);
+  op = common_builder.Phi(kMachAnyTagged, 2);
+  Node* n36 = graph.NewNode(op, nil, nil, nil);
+  USE(n36);
+  n36->ReplaceInput(0, n10);
+  n36->ReplaceInput(1, n36);
+  op = common_builder.Loop(2);
+  Node* n33 = graph.NewNode(op, nil, nil);
+  USE(n33);
+  op = common_builder.IfFalse();
+  Node* n17 = graph.NewNode(op, nil);
+  USE(n17);
+  n17->ReplaceInput(0, n15);
+  n33->ReplaceInput(0, n17);
+  op = common_builder.IfTrue();
+  Node* n41 = graph.NewNode(op, nil);
+  USE(n41);
+  op = common_builder.Branch();
+  Node* n40 = graph.NewNode(op, nil, nil);
+  USE(n40);
+  n40->ReplaceInput(0, n39);
+  n40->ReplaceInput(1, n33);
+  n41->ReplaceInput(0, n40);
+  n33->ReplaceInput(1, n41);
+  n36->ReplaceInput(2, n33);
+  n39->ReplaceInput(1, n36);
+  n39->ReplaceInput(2, n6);
+  op = common_builder.Phi(kMachAnyTagged, 2);
+  Node* n38 = graph.NewNode(op, nil, nil, nil);
+  USE(n38);
+  n38->ReplaceInput(0, n14);
+  n38->ReplaceInput(1, n44);
+  n38->ReplaceInput(2, n33);
+  n39->ReplaceInput(3, n38);
+  n39->ReplaceInput(4, n33);
+  n44->ReplaceInput(3, n39);
+  n44->ReplaceInput(4, n41);
+  n35->ReplaceInput(1, n44);
+  n35->ReplaceInput(2, n33);
+  n45->ReplaceInput(0, n35);
+  n45->ReplaceInput(1, n39);
+  op = common_builder.IfFalse();
+  Node* n42 = graph.NewNode(op, nil);
+  USE(n42);
+  n42->ReplaceInput(0, n40);
+  n45->ReplaceInput(2, n42);
+  n46->ReplaceInput(0, n45);
+
+  graph.SetStart(n0);
+  graph.SetEnd(n46);
+
+  ComputeAndVerifySchedule(46, &graph);
+}
+
+
+TEST(BuildScheduleBreakAndContinue) {
+  HandleAndZoneScope scope;
+  Isolate* isolate = scope.main_isolate();
+  Graph graph(scope.main_zone());
+  CommonOperatorBuilder common_builder(scope.main_zone());
+  JSOperatorBuilder js_builder(scope.main_zone());
+  const Operator* op;
+
+  Handle<Object> object =
+      Handle<Object>(isolate->heap()->undefined_value(), isolate);
+  Unique<Object> unique_constant = Unique<Object>::CreateUninitialized(object);
+
+  // Manually transcripted code for:
+  // function turbo_fan_test(a, b, c) {
+  //   var d = 0;
+  //   while (a < b) {
+  //     a++;
+  //     while (c < b) {
+  //       c++;
+  //       if (d == 0) break;
+  //       a++;
+  //     }
+  //     if (a == 1) continue;
+  //     d++;
+  //   }
+  //   return a + d;
+  // }
+  op = common_builder.Start(0);
+  Node* n0 = graph.NewNode(op);
+  USE(n0);
+  Node* nil = graph.NewNode(common_builder.Dead());
+  op = common_builder.End();
+  Node* n58 = graph.NewNode(op, nil);
+  USE(n58);
+  op = common_builder.Return();
+  Node* n57 = graph.NewNode(op, nil, nil, nil);
+  USE(n57);
+  op = js_builder.Add();
+  Node* n56 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n56);
+  op = common_builder.Phi(kMachAnyTagged, 2);
+  Node* n10 = graph.NewNode(op, nil, nil, nil);
+  USE(n10);
+  op = common_builder.Parameter(0);
+  Node* n2 = graph.NewNode(op, n0);
+  USE(n2);
+  n10->ReplaceInput(0, n2);
+  op = common_builder.Phi(kMachAnyTagged, 2);
+  Node* n25 = graph.NewNode(op, nil, nil, nil);
+  USE(n25);
+  op = js_builder.Add();
+  Node* n22 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n22);
+  op = js_builder.ToNumber();
+  Node* n20 = graph.NewNode(op, nil, nil, nil, nil);
+  USE(n20);
+  n20->ReplaceInput(0, n10);
+  op = common_builder.HeapConstant(unique_constant);
+  Node* n6 = graph.NewNode(op);
+  USE(n6);
+  n20->ReplaceInput(1, n6);
+  op = js_builder.LessThan();
+  Node* n16 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n16);
+  n16->ReplaceInput(0, n10);
+  op = common_builder.Phi(kMachAnyTagged, 2);
+  Node* n11 = graph.NewNode(op, nil, nil, nil);
+  USE(n11);
+  op = common_builder.Parameter(0);
+  Node* n3 = graph.NewNode(op, n0);
+  USE(n3);
+  n11->ReplaceInput(0, n3);
+  op = common_builder.Phi(kMachAnyTagged, 2);
+  Node* n26 = graph.NewNode(op, nil, nil, nil);
+  USE(n26);
+  n26->ReplaceInput(0, n11);
+  n26->ReplaceInput(1, n26);
+  op = common_builder.Loop(2);
+  Node* n23 = graph.NewNode(op, nil, nil);
+  USE(n23);
+  op = common_builder.IfTrue();
+  Node* n18 = graph.NewNode(op, nil);
+  USE(n18);
+  op = common_builder.Branch();
+  Node* n17 = graph.NewNode(op, nil, nil);
+  USE(n17);
+  n17->ReplaceInput(0, n16);
+  op = common_builder.Loop(2);
+  Node* n8 = graph.NewNode(op, nil, nil);
+  USE(n8);
+  n8->ReplaceInput(0, n0);
+  op = common_builder.Merge(2);
+  Node* n53 = graph.NewNode(op, nil, nil);
+  USE(n53);
+  op = common_builder.IfTrue();
+  Node* n49 = graph.NewNode(op, nil);
+  USE(n49);
+  op = common_builder.Branch();
+  Node* n48 = graph.NewNode(op, nil, nil);
+  USE(n48);
+  op = js_builder.Equal();
+  Node* n47 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n47);
+  n47->ReplaceInput(0, n25);
+  op = common_builder.NumberConstant(0);
+  Node* n46 = graph.NewNode(op);
+  USE(n46);
+  n47->ReplaceInput(1, n46);
+  n47->ReplaceInput(2, n6);
+  op = common_builder.Phi(kMachAnyTagged, 2);
+  Node* n42 = graph.NewNode(op, nil, nil, nil);
+  USE(n42);
+  op = js_builder.LessThan();
+  Node* n30 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n30);
+  op = common_builder.Phi(kMachAnyTagged, 2);
+  Node* n27 = graph.NewNode(op, nil, nil, nil);
+  USE(n27);
+  op = common_builder.Phi(kMachAnyTagged, 2);
+  Node* n12 = graph.NewNode(op, nil, nil, nil);
+  USE(n12);
+  op = common_builder.Parameter(0);
+  Node* n4 = graph.NewNode(op, n0);
+  USE(n4);
+  n12->ReplaceInput(0, n4);
+  op = common_builder.Phi(kMachAnyTagged, 2);
+  Node* n41 = graph.NewNode(op, nil, nil, nil);
+  USE(n41);
+  n41->ReplaceInput(0, n27);
+  op = js_builder.Add();
+  Node* n35 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n35);
+  op = js_builder.ToNumber();
+  Node* n34 = graph.NewNode(op, nil, nil, nil, nil);
+  USE(n34);
+  n34->ReplaceInput(0, n27);
+  n34->ReplaceInput(1, n6);
+  n34->ReplaceInput(2, n30);
+  op = common_builder.IfTrue();
+  Node* n32 = graph.NewNode(op, nil);
+  USE(n32);
+  op = common_builder.Branch();
+  Node* n31 = graph.NewNode(op, nil, nil);
+  USE(n31);
+  n31->ReplaceInput(0, n30);
+  n31->ReplaceInput(1, n23);
+  n32->ReplaceInput(0, n31);
+  n34->ReplaceInput(3, n32);
+  n35->ReplaceInput(0, n34);
+  op = common_builder.NumberConstant(0);
+  Node* n21 = graph.NewNode(op);
+  USE(n21);
+  n35->ReplaceInput(1, n21);
+  n35->ReplaceInput(2, n6);
+  n35->ReplaceInput(3, n34);
+  n35->ReplaceInput(4, n32);
+  n41->ReplaceInput(1, n35);
+  op = common_builder.Merge(2);
+  Node* n40 = graph.NewNode(op, nil, nil);
+  USE(n40);
+  op = common_builder.IfFalse();
+  Node* n33 = graph.NewNode(op, nil);
+  USE(n33);
+  n33->ReplaceInput(0, n31);
+  n40->ReplaceInput(0, n33);
+  op = common_builder.IfTrue();
+  Node* n39 = graph.NewNode(op, nil);
+  USE(n39);
+  op = common_builder.Branch();
+  Node* n38 = graph.NewNode(op, nil, nil);
+  USE(n38);
+  op = js_builder.Equal();
+  Node* n37 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n37);
+  op = common_builder.Phi(kMachAnyTagged, 2);
+  Node* n28 = graph.NewNode(op, nil, nil, nil);
+  USE(n28);
+  op = common_builder.Phi(kMachAnyTagged, 2);
+  Node* n13 = graph.NewNode(op, nil, nil, nil);
+  USE(n13);
+  op = common_builder.NumberConstant(0);
+  Node* n7 = graph.NewNode(op);
+  USE(n7);
+  n13->ReplaceInput(0, n7);
+  op = common_builder.Phi(kMachAnyTagged, 2);
+  Node* n54 = graph.NewNode(op, nil, nil, nil);
+  USE(n54);
+  n54->ReplaceInput(0, n28);
+  op = js_builder.Add();
+  Node* n52 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n52);
+  op = js_builder.ToNumber();
+  Node* n51 = graph.NewNode(op, nil, nil, nil, nil);
+  USE(n51);
+  n51->ReplaceInput(0, n28);
+  n51->ReplaceInput(1, n6);
+  n51->ReplaceInput(2, n47);
+  op = common_builder.IfFalse();
+  Node* n50 = graph.NewNode(op, nil);
+  USE(n50);
+  n50->ReplaceInput(0, n48);
+  n51->ReplaceInput(3, n50);
+  n52->ReplaceInput(0, n51);
+  n52->ReplaceInput(1, n21);
+  n52->ReplaceInput(2, n6);
+  n52->ReplaceInput(3, n51);
+  n52->ReplaceInput(4, n50);
+  n54->ReplaceInput(1, n52);
+  n54->ReplaceInput(2, n53);
+  n13->ReplaceInput(1, n54);
+  n13->ReplaceInput(2, n8);
+  n28->ReplaceInput(0, n13);
+  n28->ReplaceInput(1, n28);
+  n28->ReplaceInput(2, n23);
+  n37->ReplaceInput(0, n28);
+  op = common_builder.NumberConstant(0);
+  Node* n36 = graph.NewNode(op);
+  USE(n36);
+  n37->ReplaceInput(1, n36);
+  n37->ReplaceInput(2, n6);
+  n37->ReplaceInput(3, n35);
+  n37->ReplaceInput(4, n32);
+  n38->ReplaceInput(0, n37);
+  n38->ReplaceInput(1, n32);
+  n39->ReplaceInput(0, n38);
+  n40->ReplaceInput(1, n39);
+  n41->ReplaceInput(2, n40);
+  n12->ReplaceInput(1, n41);
+  n12->ReplaceInput(2, n8);
+  n27->ReplaceInput(0, n12);
+  n27->ReplaceInput(1, n35);
+  n27->ReplaceInput(2, n23);
+  n30->ReplaceInput(0, n27);
+  n30->ReplaceInput(1, n26);
+  n30->ReplaceInput(2, n6);
+  op = common_builder.Phi(kMachAnyTagged, 2);
+  Node* n29 = graph.NewNode(op, nil, nil, nil);
+  USE(n29);
+  n29->ReplaceInput(0, n22);
+  op = js_builder.Add();
+  Node* n45 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n45);
+  op = js_builder.ToNumber();
+  Node* n44 = graph.NewNode(op, nil, nil, nil, nil);
+  USE(n44);
+  n44->ReplaceInput(0, n25);
+  n44->ReplaceInput(1, n6);
+  n44->ReplaceInput(2, n37);
+  op = common_builder.IfFalse();
+  Node* n43 = graph.NewNode(op, nil);
+  USE(n43);
+  n43->ReplaceInput(0, n38);
+  n44->ReplaceInput(3, n43);
+  n45->ReplaceInput(0, n44);
+  n45->ReplaceInput(1, n21);
+  n45->ReplaceInput(2, n6);
+  n45->ReplaceInput(3, n44);
+  n45->ReplaceInput(4, n43);
+  n29->ReplaceInput(1, n45);
+  n29->ReplaceInput(2, n23);
+  n30->ReplaceInput(3, n29);
+  n30->ReplaceInput(4, n23);
+  n42->ReplaceInput(0, n30);
+  n42->ReplaceInput(1, n37);
+  n42->ReplaceInput(2, n40);
+  n47->ReplaceInput(3, n42);
+  n47->ReplaceInput(4, n40);
+  n48->ReplaceInput(0, n47);
+  n48->ReplaceInput(1, n40);
+  n49->ReplaceInput(0, n48);
+  n53->ReplaceInput(0, n49);
+  n53->ReplaceInput(1, n50);
+  n8->ReplaceInput(1, n53);
+  n17->ReplaceInput(1, n8);
+  n18->ReplaceInput(0, n17);
+  n23->ReplaceInput(0, n18);
+  n23->ReplaceInput(1, n43);
+  n26->ReplaceInput(2, n23);
+  n11->ReplaceInput(1, n26);
+  n11->ReplaceInput(2, n8);
+  n16->ReplaceInput(1, n11);
+  n16->ReplaceInput(2, n6);
+  op = common_builder.Phi(kMachAnyTagged, 2);
+  Node* n14 = graph.NewNode(op, nil, nil, nil);
+  USE(n14);
+  n14->ReplaceInput(0, n0);
+  op = common_builder.Phi(kMachAnyTagged, 2);
+  Node* n55 = graph.NewNode(op, nil, nil, nil);
+  USE(n55);
+  n55->ReplaceInput(0, n47);
+  n55->ReplaceInput(1, n52);
+  n55->ReplaceInput(2, n53);
+  n14->ReplaceInput(1, n55);
+  n14->ReplaceInput(2, n8);
+  n16->ReplaceInput(3, n14);
+  n16->ReplaceInput(4, n8);
+  n20->ReplaceInput(2, n16);
+  n20->ReplaceInput(3, n18);
+  n22->ReplaceInput(0, n20);
+  n22->ReplaceInput(1, n21);
+  n22->ReplaceInput(2, n6);
+  n22->ReplaceInput(3, n20);
+  n22->ReplaceInput(4, n18);
+  n25->ReplaceInput(0, n22);
+  n25->ReplaceInput(1, n45);
+  n25->ReplaceInput(2, n23);
+  n10->ReplaceInput(1, n25);
+  n10->ReplaceInput(2, n8);
+  n56->ReplaceInput(0, n10);
+  n56->ReplaceInput(1, n13);
+  n56->ReplaceInput(2, n6);
+  n56->ReplaceInput(3, n16);
+  op = common_builder.IfFalse();
+  Node* n19 = graph.NewNode(op, nil);
+  USE(n19);
+  n19->ReplaceInput(0, n17);
+  n56->ReplaceInput(4, n19);
+  n57->ReplaceInput(0, n56);
+  n57->ReplaceInput(1, n56);
+  n57->ReplaceInput(2, n19);
+  n58->ReplaceInput(0, n57);
+
+  graph.SetStart(n0);
+  graph.SetEnd(n58);
+
+  ComputeAndVerifySchedule(62, &graph);
+}
+
+
+TEST(BuildScheduleSimpleLoopWithCodeMotion) {
+  HandleAndZoneScope scope;
+  Isolate* isolate = scope.main_isolate();
+  Graph graph(scope.main_zone());
+  CommonOperatorBuilder common_builder(scope.main_zone());
+  JSOperatorBuilder js_builder(scope.main_zone());
+  MachineOperatorBuilder machine_builder;
+  const Operator* op;
+
+  Handle<Object> object =
+      Handle<Object>(isolate->heap()->undefined_value(), isolate);
+  Unique<Object> unique_constant = Unique<Object>::CreateUninitialized(object);
+
+  // Manually transcripted code for:
+  // function turbo_fan_test(a, b, c) {
+  //   while (a < b) {
+  //     a += b + c;
+  //   }
+  //   return a;
+  // }
+  op = common_builder.Start(0);
+  Node* n0 = graph.NewNode(op);
+  USE(n0);
+  Node* nil = graph.NewNode(common_builder.Dead());
+  op = common_builder.End();
+  Node* n22 = graph.NewNode(op, nil);
+  USE(n22);
+  op = common_builder.Return();
+  Node* n21 = graph.NewNode(op, nil, nil, nil);
+  USE(n21);
+  op = common_builder.Phi(kMachAnyTagged, 2);
+  Node* n9 = graph.NewNode(op, nil, nil, nil);
+  USE(n9);
+  op = common_builder.Parameter(0);
+  Node* n2 = graph.NewNode(op, n0);
+  USE(n2);
+  n9->ReplaceInput(0, n2);
+  op = js_builder.Add();
+  Node* n20 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n20);
+  n20->ReplaceInput(0, n9);
+  op = machine_builder.Int32Add();
+  Node* n19 = graph.NewNode(op, nil, nil);
+  USE(n19);
+  op = common_builder.Phi(kMachAnyTagged, 2);
+  Node* n10 = graph.NewNode(op, nil, nil, nil);
+  USE(n10);
+  op = common_builder.Parameter(0);
+  Node* n3 = graph.NewNode(op, n0);
+  USE(n3);
+  n10->ReplaceInput(0, n3);
+  n10->ReplaceInput(1, n10);
+  op = common_builder.Loop(2);
+  Node* n7 = graph.NewNode(op, nil, nil);
+  USE(n7);
+  n7->ReplaceInput(0, n0);
+  op = common_builder.IfTrue();
+  Node* n17 = graph.NewNode(op, nil);
+  USE(n17);
+  op = common_builder.Branch();
+  Node* n16 = graph.NewNode(op, nil, nil);
+  USE(n16);
+  op = js_builder.ToBoolean();
+  Node* n15 = graph.NewNode(op, nil, nil, nil, nil);
+  USE(n15);
+  op = js_builder.LessThan();
+  Node* n14 = graph.NewNode(op, nil, nil, nil, nil, nil);
+  USE(n14);
+  n14->ReplaceInput(0, n9);
+  n14->ReplaceInput(1, n10);
+  op = common_builder.HeapConstant(unique_constant);
+  Node* n6 = graph.NewNode(op);
+  USE(n6);
+  n14->ReplaceInput(2, n6);
+  op = common_builder.Phi(kMachAnyTagged, 2);
+  Node* n12 = graph.NewNode(op, nil, nil, nil);
+  USE(n12);
+  n12->ReplaceInput(0, n0);
+  n12->ReplaceInput(1, n20);
+  n12->ReplaceInput(2, n7);
+  n14->ReplaceInput(3, n12);
+  n14->ReplaceInput(4, n7);
+  n15->ReplaceInput(0, n14);
+  n15->ReplaceInput(1, n6);
+  n15->ReplaceInput(2, n14);
+  n15->ReplaceInput(3, n7);
+  n16->ReplaceInput(0, n15);
+  n16->ReplaceInput(1, n7);
+  n17->ReplaceInput(0, n16);
+  n7->ReplaceInput(1, n17);
+  n10->ReplaceInput(2, n7);
+  n19->ReplaceInput(0, n2);
+  op = common_builder.Phi(kMachAnyTagged, 2);
+  Node* n11 = graph.NewNode(op, nil, nil, nil);
+  USE(n11);
+  op = common_builder.Parameter(0);
+  Node* n4 = graph.NewNode(op, n0);
+  USE(n4);
+  n11->ReplaceInput(0, n4);
+  n11->ReplaceInput(1, n11);
+  n11->ReplaceInput(2, n7);
+  n19->ReplaceInput(1, n3);
+  n20->ReplaceInput(1, n19);
+  n20->ReplaceInput(2, n6);
+  n20->ReplaceInput(3, n19);
+  n20->ReplaceInput(4, n17);
+  n9->ReplaceInput(1, n20);
+  n9->ReplaceInput(2, n7);
+  n21->ReplaceInput(0, n9);
+  n21->ReplaceInput(1, n15);
+  op = common_builder.IfFalse();
+  Node* n18 = graph.NewNode(op, nil);
+  USE(n18);
+  n18->ReplaceInput(0, n16);
+  n21->ReplaceInput(2, n18);
+  n22->ReplaceInput(0, n21);
+
+  graph.SetStart(n0);
+  graph.SetEnd(n22);
+
+  Schedule* schedule = ComputeAndVerifySchedule(19, &graph);
+  // Make sure the integer-only add gets hoisted to a different block that the
+  // JSAdd.
+  CHECK(schedule->block(n19) != schedule->block(n20));
+}
+
+
+#if V8_TURBOFAN_TARGET
+
+static Node* CreateDiamond(Graph* graph, CommonOperatorBuilder* common,
+                           Node* cond) {
+  Node* tv = graph->NewNode(common->Int32Constant(6));
+  Node* fv = graph->NewNode(common->Int32Constant(7));
+  Node* br = graph->NewNode(common->Branch(), cond, graph->start());
+  Node* t = graph->NewNode(common->IfTrue(), br);
+  Node* f = graph->NewNode(common->IfFalse(), br);
+  Node* m = graph->NewNode(common->Merge(2), t, f);
+  Node* phi = graph->NewNode(common->Phi(kMachAnyTagged, 2), tv, fv, m);
+  return phi;
+}
+
+
+TEST(FloatingDiamond1) {
+  HandleAndZoneScope scope;
+  Graph graph(scope.main_zone());
+  CommonOperatorBuilder common(scope.main_zone());
+
+  Node* start = graph.NewNode(common.Start(1));
+  graph.SetStart(start);
+
+  Node* p0 = graph.NewNode(common.Parameter(0), start);
+  Node* d1 = CreateDiamond(&graph, &common, p0);
+  Node* ret = graph.NewNode(common.Return(), d1, start, start);
+  Node* end = graph.NewNode(common.End(), ret, start);
+
+  graph.SetEnd(end);
+
+  ComputeAndVerifySchedule(13, &graph);
+}
+
+
+TEST(FloatingDiamond2) {
+  HandleAndZoneScope scope;
+  Graph graph(scope.main_zone());
+  CommonOperatorBuilder common(scope.main_zone());
+  MachineOperatorBuilder machine;
+
+  Node* start = graph.NewNode(common.Start(2));
+  graph.SetStart(start);
+
+  Node* p0 = graph.NewNode(common.Parameter(0), start);
+  Node* p1 = graph.NewNode(common.Parameter(1), start);
+  Node* d1 = CreateDiamond(&graph, &common, p0);
+  Node* d2 = CreateDiamond(&graph, &common, p1);
+  Node* add = graph.NewNode(machine.Int32Add(), d1, d2);
+  Node* ret = graph.NewNode(common.Return(), add, start, start);
+  Node* end = graph.NewNode(common.End(), ret, start);
+
+  graph.SetEnd(end);
+
+  ComputeAndVerifySchedule(24, &graph);
+}
+
+
+TEST(FloatingDiamond3) {
+  HandleAndZoneScope scope;
+  Graph graph(scope.main_zone());
+  CommonOperatorBuilder common(scope.main_zone());
+  MachineOperatorBuilder machine;
+
+  Node* start = graph.NewNode(common.Start(2));
+  graph.SetStart(start);
+
+  Node* p0 = graph.NewNode(common.Parameter(0), start);
+  Node* p1 = graph.NewNode(common.Parameter(1), start);
+  Node* d1 = CreateDiamond(&graph, &common, p0);
+  Node* d2 = CreateDiamond(&graph, &common, p1);
+  Node* add = graph.NewNode(machine.Int32Add(), d1, d2);
+  Node* d3 = CreateDiamond(&graph, &common, add);
+  Node* ret = graph.NewNode(common.Return(), d3, start, start);
+  Node* end = graph.NewNode(common.End(), ret, start);
+
+  graph.SetEnd(end);
+
+  ComputeAndVerifySchedule(33, &graph);
+}
+
+#endif
diff --git a/test/cctest/compiler/test-simplified-lowering.cc b/test/cctest/compiler/test-simplified-lowering.cc
new file mode 100644
index 0000000..96fb965
--- /dev/null
+++ b/test/cctest/compiler/test-simplified-lowering.cc
@@ -0,0 +1,1560 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <limits>
+
+#include "src/compiler/access-builder.h"
+#include "src/compiler/control-builders.h"
+#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/graph-visualizer.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/pipeline.h"
+#include "src/compiler/representation-change.h"
+#include "src/compiler/simplified-lowering.h"
+#include "src/compiler/typer.h"
+#include "src/compiler/verifier.h"
+#include "src/execution.h"
+#include "src/parser.h"
+#include "src/rewriter.h"
+#include "src/scopes.h"
+#include "test/cctest/cctest.h"
+#include "test/cctest/compiler/codegen-tester.h"
+#include "test/cctest/compiler/graph-builder-tester.h"
+#include "test/cctest/compiler/value-helper.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+template <typename ReturnType>
+class SimplifiedLoweringTester : public GraphBuilderTester<ReturnType> {
+ public:
+  SimplifiedLoweringTester(MachineType p0 = kMachNone,
+                           MachineType p1 = kMachNone,
+                           MachineType p2 = kMachNone,
+                           MachineType p3 = kMachNone,
+                           MachineType p4 = kMachNone)
+      : GraphBuilderTester<ReturnType>(p0, p1, p2, p3, p4),
+        typer(this->zone()),
+        javascript(this->zone()),
+        jsgraph(this->graph(), this->common(), &javascript, &typer,
+                this->machine()),
+        lowering(&jsgraph) {}
+
+  Typer typer;
+  JSOperatorBuilder javascript;
+  JSGraph jsgraph;
+  SimplifiedLowering lowering;
+
+  void LowerAllNodes() {
+    this->End();
+    lowering.LowerAllNodes();
+  }
+
+  Factory* factory() { return this->isolate()->factory(); }
+  Heap* heap() { return this->isolate()->heap(); }
+};
+
+
+#ifndef V8_TARGET_ARCH_ARM64
+// TODO(titzer): these result in a stub call that doesn't work on ARM64.
+// TODO(titzer): factor these tests out to test-run-simplifiedops.cc.
+// TODO(titzer): test tagged representation for input to NumberToInt32.
+TEST(RunNumberToInt32_float64) {
+  // TODO(titzer): explicit load/stores here are only because of representations
+  double input;
+  int32_t result;
+  SimplifiedLoweringTester<Object*> t;
+  FieldAccess load = {kUntaggedBase, 0, Handle<Name>(), Type::Number(),
+                      kMachFloat64};
+  Node* loaded = t.LoadField(load, t.PointerConstant(&input));
+  Node* convert = t.NumberToInt32(loaded);
+  FieldAccess store = {kUntaggedBase, 0, Handle<Name>(), Type::Signed32(),
+                       kMachInt32};
+  t.StoreField(store, t.PointerConstant(&result), convert);
+  t.Return(t.jsgraph.TrueConstant());
+  t.LowerAllNodes();
+  t.GenerateCode();
+
+  if (Pipeline::SupportedTarget()) {
+    FOR_FLOAT64_INPUTS(i) {
+      input = *i;
+      int32_t expected = DoubleToInt32(*i);
+      t.Call();
+      CHECK_EQ(expected, result);
+    }
+  }
+}
+
+
+// TODO(titzer): test tagged representation for input to NumberToUint32.
+TEST(RunNumberToUint32_float64) {
+  // TODO(titzer): explicit load/stores here are only because of representations
+  double input;
+  uint32_t result;
+  SimplifiedLoweringTester<Object*> t;
+  FieldAccess load = {kUntaggedBase, 0, Handle<Name>(), Type::Number(),
+                      kMachFloat64};
+  Node* loaded = t.LoadField(load, t.PointerConstant(&input));
+  Node* convert = t.NumberToUint32(loaded);
+  FieldAccess store = {kUntaggedBase, 0, Handle<Name>(), Type::Unsigned32(),
+                       kMachUint32};
+  t.StoreField(store, t.PointerConstant(&result), convert);
+  t.Return(t.jsgraph.TrueConstant());
+  t.LowerAllNodes();
+  t.GenerateCode();
+
+  if (Pipeline::SupportedTarget()) {
+    FOR_FLOAT64_INPUTS(i) {
+      input = *i;
+      uint32_t expected = DoubleToUint32(*i);
+      t.Call();
+      CHECK_EQ(static_cast<int32_t>(expected), static_cast<int32_t>(result));
+    }
+  }
+}
+#endif
+
+
+// Create a simple JSObject with a unique map.
+static Handle<JSObject> TestObject() {
+  static int index = 0;
+  char buffer[50];
+  v8::base::OS::SNPrintF(buffer, 50, "({'a_%d':1})", index++);
+  return Handle<JSObject>::cast(v8::Utils::OpenHandle(*CompileRun(buffer)));
+}
+
+
+TEST(RunLoadMap) {
+  SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
+  FieldAccess access = AccessBuilder::ForMap();
+  Node* load = t.LoadField(access, t.Parameter(0));
+  t.Return(load);
+
+  t.LowerAllNodes();
+  t.GenerateCode();
+
+  if (Pipeline::SupportedTarget()) {
+    Handle<JSObject> src = TestObject();
+    Handle<Map> src_map(src->map());
+    Object* result = t.Call(*src);  // TODO(titzer): raw pointers in call
+    CHECK_EQ(*src_map, result);
+  }
+}
+
+
+TEST(RunStoreMap) {
+  SimplifiedLoweringTester<int32_t> t(kMachAnyTagged, kMachAnyTagged);
+  FieldAccess access = AccessBuilder::ForMap();
+  t.StoreField(access, t.Parameter(1), t.Parameter(0));
+  t.Return(t.jsgraph.TrueConstant());
+
+  t.LowerAllNodes();
+  t.GenerateCode();
+
+  if (Pipeline::SupportedTarget()) {
+    Handle<JSObject> src = TestObject();
+    Handle<Map> src_map(src->map());
+    Handle<JSObject> dst = TestObject();
+    CHECK(src->map() != dst->map());
+    t.Call(*src_map, *dst);  // TODO(titzer): raw pointers in call
+    CHECK(*src_map == dst->map());
+  }
+}
+
+
+TEST(RunLoadProperties) {
+  SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
+  FieldAccess access = AccessBuilder::ForJSObjectProperties();
+  Node* load = t.LoadField(access, t.Parameter(0));
+  t.Return(load);
+
+  t.LowerAllNodes();
+  t.GenerateCode();
+
+  if (Pipeline::SupportedTarget()) {
+    Handle<JSObject> src = TestObject();
+    Handle<FixedArray> src_props(src->properties());
+    Object* result = t.Call(*src);  // TODO(titzer): raw pointers in call
+    CHECK_EQ(*src_props, result);
+  }
+}
+
+
+TEST(RunLoadStoreMap) {
+  SimplifiedLoweringTester<Object*> t(kMachAnyTagged, kMachAnyTagged);
+  FieldAccess access = AccessBuilder::ForMap();
+  Node* load = t.LoadField(access, t.Parameter(0));
+  t.StoreField(access, t.Parameter(1), load);
+  t.Return(load);
+
+  t.LowerAllNodes();
+  t.GenerateCode();
+
+  if (Pipeline::SupportedTarget()) {
+    Handle<JSObject> src = TestObject();
+    Handle<Map> src_map(src->map());
+    Handle<JSObject> dst = TestObject();
+    CHECK(src->map() != dst->map());
+    Object* result = t.Call(*src, *dst);  // TODO(titzer): raw pointers in call
+    CHECK(result->IsMap());
+    CHECK_EQ(*src_map, result);
+    CHECK(*src_map == dst->map());
+  }
+}
+
+
+TEST(RunLoadStoreFixedArrayIndex) {
+  SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
+  ElementAccess access = AccessBuilder::ForFixedArrayElement();
+  Node* load = t.LoadElement(access, t.Parameter(0), t.Int32Constant(0),
+                             t.Int32Constant(2));
+  t.StoreElement(access, t.Parameter(0), t.Int32Constant(1), t.Int32Constant(2),
+                 load);
+  t.Return(load);
+
+  t.LowerAllNodes();
+  t.GenerateCode();
+
+  if (Pipeline::SupportedTarget()) {
+    Handle<FixedArray> array = t.factory()->NewFixedArray(2);
+    Handle<JSObject> src = TestObject();
+    Handle<JSObject> dst = TestObject();
+    array->set(0, *src);
+    array->set(1, *dst);
+    Object* result = t.Call(*array);
+    CHECK_EQ(*src, result);
+    CHECK_EQ(*src, array->get(0));
+    CHECK_EQ(*src, array->get(1));
+  }
+}
+
+
+TEST(RunLoadStoreArrayBuffer) {
+  SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
+  const int index = 12;
+  const int array_length = 2 * index;
+  ElementAccess buffer_access =
+      AccessBuilder::ForBackingStoreElement(kMachInt8);
+  Node* backing_store = t.LoadField(
+      AccessBuilder::ForJSArrayBufferBackingStore(), t.Parameter(0));
+  Node* load =
+      t.LoadElement(buffer_access, backing_store, t.Int32Constant(index),
+                    t.Int32Constant(array_length));
+  t.StoreElement(buffer_access, backing_store, t.Int32Constant(index + 1),
+                 t.Int32Constant(array_length), load);
+  t.Return(t.jsgraph.TrueConstant());
+
+  t.LowerAllNodes();
+  t.GenerateCode();
+
+  if (Pipeline::SupportedTarget()) {
+    Handle<JSArrayBuffer> array = t.factory()->NewJSArrayBuffer();
+    Runtime::SetupArrayBufferAllocatingData(t.isolate(), array, array_length);
+    uint8_t* data = reinterpret_cast<uint8_t*>(array->backing_store());
+    for (int i = 0; i < array_length; i++) {
+      data[i] = i;
+    }
+
+    // TODO(titzer): raw pointers in call
+    Object* result = t.Call(*array);
+    CHECK_EQ(t.isolate()->heap()->true_value(), result);
+    for (int i = 0; i < array_length; i++) {
+      uint8_t expected = i;
+      if (i == (index + 1)) expected = index;
+      CHECK_EQ(data[i], expected);
+    }
+  }
+}
+
+
+TEST(RunLoadFieldFromUntaggedBase) {
+  Smi* smis[] = {Smi::FromInt(1), Smi::FromInt(2), Smi::FromInt(3)};
+
+  for (size_t i = 0; i < arraysize(smis); i++) {
+    int offset = static_cast<int>(i * sizeof(Smi*));
+    FieldAccess access = {kUntaggedBase, offset, Handle<Name>(),
+                          Type::Integral32(), kMachAnyTagged};
+
+    SimplifiedLoweringTester<Object*> t;
+    Node* load = t.LoadField(access, t.PointerConstant(smis));
+    t.Return(load);
+    t.LowerAllNodes();
+
+    if (!Pipeline::SupportedTarget()) continue;
+
+    for (int j = -5; j <= 5; j++) {
+      Smi* expected = Smi::FromInt(j);
+      smis[i] = expected;
+      CHECK_EQ(expected, t.Call());
+    }
+  }
+}
+
+
+TEST(RunStoreFieldToUntaggedBase) {
+  Smi* smis[] = {Smi::FromInt(1), Smi::FromInt(2), Smi::FromInt(3)};
+
+  for (size_t i = 0; i < arraysize(smis); i++) {
+    int offset = static_cast<int>(i * sizeof(Smi*));
+    FieldAccess access = {kUntaggedBase, offset, Handle<Name>(),
+                          Type::Integral32(), kMachAnyTagged};
+
+    SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
+    Node* p0 = t.Parameter(0);
+    t.StoreField(access, t.PointerConstant(smis), p0);
+    t.Return(p0);
+    t.LowerAllNodes();
+
+    if (!Pipeline::SupportedTarget()) continue;
+
+    for (int j = -5; j <= 5; j++) {
+      Smi* expected = Smi::FromInt(j);
+      smis[i] = Smi::FromInt(-100);
+      CHECK_EQ(expected, t.Call(expected));
+      CHECK_EQ(expected, smis[i]);
+    }
+  }
+}
+
+
+TEST(RunLoadElementFromUntaggedBase) {
+  Smi* smis[] = {Smi::FromInt(1), Smi::FromInt(2), Smi::FromInt(3),
+                 Smi::FromInt(4), Smi::FromInt(5)};
+
+  for (size_t i = 0; i < arraysize(smis); i++) {    // for header sizes
+    for (size_t j = 0; (i + j) < arraysize(smis); j++) {  // for element index
+      int offset = static_cast<int>(i * sizeof(Smi*));
+      ElementAccess access = {kUntaggedBase, offset, Type::Integral32(),
+                              kMachAnyTagged};
+
+      SimplifiedLoweringTester<Object*> t;
+      Node* load = t.LoadElement(
+          access, t.PointerConstant(smis), t.Int32Constant(static_cast<int>(j)),
+          t.Int32Constant(static_cast<int>(arraysize(smis))));
+      t.Return(load);
+      t.LowerAllNodes();
+
+      if (!Pipeline::SupportedTarget()) continue;
+
+      for (int k = -5; k <= 5; k++) {
+        Smi* expected = Smi::FromInt(k);
+        smis[i + j] = expected;
+        CHECK_EQ(expected, t.Call());
+      }
+    }
+  }
+}
+
+
+TEST(RunStoreElementFromUntaggedBase) {
+  Smi* smis[] = {Smi::FromInt(1), Smi::FromInt(2), Smi::FromInt(3),
+                 Smi::FromInt(4), Smi::FromInt(5)};
+
+  for (size_t i = 0; i < arraysize(smis); i++) {    // for header sizes
+    for (size_t j = 0; (i + j) < arraysize(smis); j++) {  // for element index
+      int offset = static_cast<int>(i * sizeof(Smi*));
+      ElementAccess access = {kUntaggedBase, offset, Type::Integral32(),
+                              kMachAnyTagged};
+
+      SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
+      Node* p0 = t.Parameter(0);
+      t.StoreElement(access, t.PointerConstant(smis),
+                     t.Int32Constant(static_cast<int>(j)),
+                     t.Int32Constant(static_cast<int>(arraysize(smis))), p0);
+      t.Return(p0);
+      t.LowerAllNodes();
+
+      if (!Pipeline::SupportedTarget()) continue;
+
+      for (int k = -5; k <= 5; k++) {
+        Smi* expected = Smi::FromInt(k);
+        smis[i + j] = Smi::FromInt(-100);
+        CHECK_EQ(expected, t.Call(expected));
+        CHECK_EQ(expected, smis[i + j]);
+      }
+
+      // TODO(titzer): assert the contents of the array.
+    }
+  }
+}
+
+
+// A helper class for accessing fields and elements of various types, on both
+// tagged and untagged base pointers. Contains both tagged and untagged buffers
+// for testing direct memory access from generated code.
+template <typename E>
+class AccessTester : public HandleAndZoneScope {
+ public:
+  bool tagged;
+  MachineType rep;
+  E* original_elements;
+  size_t num_elements;
+  E* untagged_array;
+  Handle<ByteArray> tagged_array;  // TODO(titzer): use FixedArray for tagged.
+
+  AccessTester(bool t, MachineType r, E* orig, size_t num)
+      : tagged(t),
+        rep(r),
+        original_elements(orig),
+        num_elements(num),
+        untagged_array(static_cast<E*>(malloc(ByteSize()))),
+        tagged_array(main_isolate()->factory()->NewByteArray(
+            static_cast<int>(ByteSize()))) {
+    Reinitialize();
+  }
+
+  ~AccessTester() { free(untagged_array); }
+
+  size_t ByteSize() { return num_elements * sizeof(E); }
+
+  // Nuke both {untagged_array} and {tagged_array} with {original_elements}.
+  void Reinitialize() {
+    memcpy(untagged_array, original_elements, ByteSize());
+    CHECK_EQ(static_cast<int>(ByteSize()), tagged_array->length());
+    E* raw = reinterpret_cast<E*>(tagged_array->GetDataStartAddress());
+    memcpy(raw, original_elements, ByteSize());
+  }
+
+  // Create and run code that copies the element in either {untagged_array}
+  // or {tagged_array} at index {from_index} to index {to_index}.
+  void RunCopyElement(int from_index, int to_index) {
+    // TODO(titzer): test element and field accesses where the base is not
+    // a constant in the code.
+    BoundsCheck(from_index);
+    BoundsCheck(to_index);
+    ElementAccess access = GetElementAccess();
+
+    SimplifiedLoweringTester<Object*> t;
+    Node* ptr = GetBaseNode(&t);
+    Node* load = t.LoadElement(access, ptr, t.Int32Constant(from_index),
+                               t.Int32Constant(static_cast<int>(num_elements)));
+    t.StoreElement(access, ptr, t.Int32Constant(to_index),
+                   t.Int32Constant(static_cast<int>(num_elements)), load);
+    t.Return(t.jsgraph.TrueConstant());
+    t.LowerAllNodes();
+    t.GenerateCode();
+
+    if (Pipeline::SupportedTarget()) {
+      Object* result = t.Call();
+      CHECK_EQ(t.isolate()->heap()->true_value(), result);
+    }
+  }
+
+  // Create and run code that copies the field in either {untagged_array}
+  // or {tagged_array} at index {from_index} to index {to_index}.
+  void RunCopyField(int from_index, int to_index) {
+    BoundsCheck(from_index);
+    BoundsCheck(to_index);
+    FieldAccess from_access = GetFieldAccess(from_index);
+    FieldAccess to_access = GetFieldAccess(to_index);
+
+    SimplifiedLoweringTester<Object*> t;
+    Node* ptr = GetBaseNode(&t);
+    Node* load = t.LoadField(from_access, ptr);
+    t.StoreField(to_access, ptr, load);
+    t.Return(t.jsgraph.TrueConstant());
+    t.LowerAllNodes();
+    t.GenerateCode();
+
+    if (Pipeline::SupportedTarget()) {
+      Object* result = t.Call();
+      CHECK_EQ(t.isolate()->heap()->true_value(), result);
+    }
+  }
+
+  // Create and run code that copies the elements from {this} to {that}.
+  void RunCopyElements(AccessTester<E>* that) {
+// TODO(titzer): Rewrite this test without StructuredGraphBuilder support.
+#if 0
+    SimplifiedLoweringTester<Object*> t;
+
+    Node* one = t.Int32Constant(1);
+    Node* index = t.Int32Constant(0);
+    Node* limit = t.Int32Constant(static_cast<int>(num_elements));
+    t.environment()->Push(index);
+    Node* src = this->GetBaseNode(&t);
+    Node* dst = that->GetBaseNode(&t);
+    {
+      LoopBuilder loop(&t);
+      loop.BeginLoop();
+      // Loop exit condition
+      index = t.environment()->Top();
+      Node* condition = t.Int32LessThan(index, limit);
+      loop.BreakUnless(condition);
+      // dst[index] = src[index]
+      index = t.environment()->Pop();
+      Node* load = t.LoadElement(this->GetElementAccess(), src, index);
+      t.StoreElement(that->GetElementAccess(), dst, index, load);
+      // index++
+      index = t.Int32Add(index, one);
+      t.environment()->Push(index);
+      // continue
+      loop.EndBody();
+      loop.EndLoop();
+    }
+    index = t.environment()->Pop();
+    t.Return(t.jsgraph.TrueConstant());
+    t.LowerAllNodes();
+    t.GenerateCode();
+
+    if (Pipeline::SupportedTarget()) {
+      Object* result = t.Call();
+      CHECK_EQ(t.isolate()->heap()->true_value(), result);
+    }
+#endif
+  }
+
+  E GetElement(int index) {
+    BoundsCheck(index);
+    if (tagged) {
+      E* raw = reinterpret_cast<E*>(tagged_array->GetDataStartAddress());
+      return raw[index];
+    } else {
+      return untagged_array[index];
+    }
+  }
+
+ private:
+  ElementAccess GetElementAccess() {
+    ElementAccess access = {tagged ? kTaggedBase : kUntaggedBase,
+                            tagged ? FixedArrayBase::kHeaderSize : 0,
+                            Type::Any(), rep};
+    return access;
+  }
+
+  FieldAccess GetFieldAccess(int field) {
+    int offset = field * sizeof(E);
+    FieldAccess access = {tagged ? kTaggedBase : kUntaggedBase,
+                          offset + (tagged ? FixedArrayBase::kHeaderSize : 0),
+                          Handle<Name>(), Type::Any(), rep};
+    return access;
+  }
+
+  template <typename T>
+  Node* GetBaseNode(SimplifiedLoweringTester<T>* t) {
+    return tagged ? t->HeapConstant(tagged_array)
+                  : t->PointerConstant(untagged_array);
+  }
+
+  void BoundsCheck(int index) {
+    CHECK_GE(index, 0);
+    CHECK_LT(index, static_cast<int>(num_elements));
+    CHECK_EQ(static_cast<int>(ByteSize()), tagged_array->length());
+  }
+};
+
+
+template <typename E>
+static void RunAccessTest(MachineType rep, E* original_elements, size_t num) {
+  int num_elements = static_cast<int>(num);
+
+  for (int taggedness = 0; taggedness < 2; taggedness++) {
+    AccessTester<E> a(taggedness == 1, rep, original_elements, num);
+    for (int field = 0; field < 2; field++) {
+      for (int i = 0; i < num_elements - 1; i++) {
+        a.Reinitialize();
+        if (field == 0) {
+          a.RunCopyField(i, i + 1);  // Test field read/write.
+        } else {
+          a.RunCopyElement(i, i + 1);  // Test element read/write.
+        }
+        if (Pipeline::SupportedTarget()) {  // verify.
+          for (int j = 0; j < num_elements; j++) {
+            E expect =
+                j == (i + 1) ? original_elements[i] : original_elements[j];
+            CHECK_EQ(expect, a.GetElement(j));
+          }
+        }
+      }
+    }
+  }
+  // Test array copy.
+  for (int tf = 0; tf < 2; tf++) {
+    for (int tt = 0; tt < 2; tt++) {
+      AccessTester<E> a(tf == 1, rep, original_elements, num);
+      AccessTester<E> b(tt == 1, rep, original_elements, num);
+      a.RunCopyElements(&b);
+      if (Pipeline::SupportedTarget()) {  // verify.
+        for (int i = 0; i < num_elements; i++) {
+          CHECK_EQ(a.GetElement(i), b.GetElement(i));
+        }
+      }
+    }
+  }
+}
+
+
+TEST(RunAccessTests_uint8) {
+  uint8_t data[] = {0x07, 0x16, 0x25, 0x34, 0x43, 0x99,
+                    0xab, 0x78, 0x89, 0x19, 0x2b, 0x38};
+  RunAccessTest<uint8_t>(kMachInt8, data, arraysize(data));
+}
+
+
+TEST(RunAccessTests_uint16) {
+  uint16_t data[] = {0x071a, 0x162b, 0x253c, 0x344d, 0x435e, 0x7777};
+  RunAccessTest<uint16_t>(kMachInt16, data, arraysize(data));
+}
+
+
+TEST(RunAccessTests_int32) {
+  int32_t data[] = {-211, 211, 628347, 2000000000, -2000000000, -1, -100000034};
+  RunAccessTest<int32_t>(kMachInt32, data, arraysize(data));
+}
+
+
+#define V8_2PART_INT64(a, b) (((static_cast<int64_t>(a) << 32) + 0x##b##u))
+
+
+TEST(RunAccessTests_int64) {
+  if (kPointerSize != 8) return;
+  int64_t data[] = {V8_2PART_INT64(0x10111213, 14151617),
+                    V8_2PART_INT64(0x20212223, 24252627),
+                    V8_2PART_INT64(0x30313233, 34353637),
+                    V8_2PART_INT64(0xa0a1a2a3, a4a5a6a7),
+                    V8_2PART_INT64(0xf0f1f2f3, f4f5f6f7)};
+  RunAccessTest<int64_t>(kMachInt64, data, arraysize(data));
+}
+
+
+TEST(RunAccessTests_float64) {
+  double data[] = {1.25, -1.25, 2.75, 11.0, 11100.8};
+  RunAccessTest<double>(kMachFloat64, data, arraysize(data));
+}
+
+
+TEST(RunAccessTests_Smi) {
+  Smi* data[] = {Smi::FromInt(-1),    Smi::FromInt(-9),
+                 Smi::FromInt(0),     Smi::FromInt(666),
+                 Smi::FromInt(77777), Smi::FromInt(Smi::kMaxValue)};
+  RunAccessTest<Smi*>(kMachAnyTagged, data, arraysize(data));
+}
+
+
+// Fills in most of the nodes of the graph in order to make tests shorter.
+class TestingGraph : public HandleAndZoneScope, public GraphAndBuilders {
+ public:
+  Typer typer;
+  JSOperatorBuilder javascript;
+  JSGraph jsgraph;
+  Node* p0;
+  Node* p1;
+  Node* p2;
+  Node* start;
+  Node* end;
+  Node* ret;
+
+  explicit TestingGraph(Type* p0_type, Type* p1_type = Type::None(),
+                        Type* p2_type = Type::None())
+      : GraphAndBuilders(main_zone()),
+        typer(main_zone()),
+        javascript(main_zone()),
+        jsgraph(graph(), common(), &javascript, &typer, machine()) {
+    start = graph()->NewNode(common()->Start(2));
+    graph()->SetStart(start);
+    ret =
+        graph()->NewNode(common()->Return(), jsgraph.Constant(0), start, start);
+    end = graph()->NewNode(common()->End(), ret);
+    graph()->SetEnd(end);
+    p0 = graph()->NewNode(common()->Parameter(0), start);
+    p1 = graph()->NewNode(common()->Parameter(1), start);
+    p2 = graph()->NewNode(common()->Parameter(2), start);
+    NodeProperties::SetBounds(p0, Bounds(p0_type));
+    NodeProperties::SetBounds(p1, Bounds(p1_type));
+    NodeProperties::SetBounds(p2, Bounds(p2_type));
+  }
+
+  void CheckLoweringBinop(IrOpcode::Value expected, const Operator* op) {
+    Node* node = Return(graph()->NewNode(op, p0, p1));
+    Lower();
+    CHECK_EQ(expected, node->opcode());
+  }
+
+  void CheckLoweringTruncatedBinop(IrOpcode::Value expected, const Operator* op,
+                                   const Operator* trunc) {
+    Node* node = graph()->NewNode(op, p0, p1);
+    Return(graph()->NewNode(trunc, node));
+    Lower();
+    CHECK_EQ(expected, node->opcode());
+  }
+
+  void Lower() {
+    SimplifiedLowering lowering(&jsgraph);
+    lowering.LowerAllNodes();
+  }
+
+  // Inserts the node as the return value of the graph.
+  Node* Return(Node* node) {
+    ret->ReplaceInput(0, node);
+    return node;
+  }
+
+  // Inserts the node as the effect input to the return of the graph.
+  void Effect(Node* node) { ret->ReplaceInput(1, node); }
+
+  Node* ExampleWithOutput(MachineType type) {
+    // TODO(titzer): use parameters with guaranteed representations.
+    if (type & kTypeInt32) {
+      return graph()->NewNode(machine()->Int32Add(), jsgraph.Int32Constant(1),
+                              jsgraph.Int32Constant(1));
+    } else if (type & kTypeUint32) {
+      return graph()->NewNode(machine()->Word32Shr(), jsgraph.Int32Constant(1),
+                              jsgraph.Int32Constant(1));
+    } else if (type & kRepFloat64) {
+      return graph()->NewNode(machine()->Float64Add(),
+                              jsgraph.Float64Constant(1),
+                              jsgraph.Float64Constant(1));
+    } else if (type & kRepBit) {
+      return graph()->NewNode(machine()->Word32Equal(),
+                              jsgraph.Int32Constant(1),
+                              jsgraph.Int32Constant(1));
+    } else if (type & kRepWord64) {
+      return graph()->NewNode(machine()->Int64Add(), Int64Constant(1),
+                              Int64Constant(1));
+    } else {
+      CHECK(type & kRepTagged);
+      return p0;
+    }
+  }
+
+  Node* Use(Node* node, MachineType type) {
+    if (type & kTypeInt32) {
+      return graph()->NewNode(machine()->Int32LessThan(), node,
+                              jsgraph.Int32Constant(1));
+    } else if (type & kTypeUint32) {
+      return graph()->NewNode(machine()->Uint32LessThan(), node,
+                              jsgraph.Int32Constant(1));
+    } else if (type & kRepFloat64) {
+      return graph()->NewNode(machine()->Float64Add(), node,
+                              jsgraph.Float64Constant(1));
+    } else if (type & kRepWord64) {
+      return graph()->NewNode(machine()->Int64LessThan(), node,
+                              Int64Constant(1));
+    } else {
+      return graph()->NewNode(simplified()->ReferenceEqual(Type::Any()), node,
+                              jsgraph.TrueConstant());
+    }
+  }
+
+  Node* Branch(Node* cond) {
+    Node* br = graph()->NewNode(common()->Branch(), cond, start);
+    Node* tb = graph()->NewNode(common()->IfTrue(), br);
+    Node* fb = graph()->NewNode(common()->IfFalse(), br);
+    Node* m = graph()->NewNode(common()->Merge(2), tb, fb);
+    NodeProperties::ReplaceControlInput(ret, m);
+    return br;
+  }
+
+  Node* Int64Constant(int64_t v) {
+    return graph()->NewNode(common()->Int64Constant(v));
+  }
+
+  SimplifiedOperatorBuilder* simplified() { return &main_simplified_; }
+  MachineOperatorBuilder* machine() { return &main_machine_; }
+  CommonOperatorBuilder* common() { return &main_common_; }
+  Graph* graph() { return main_graph_; }
+};
+
+
+TEST(LowerBooleanNot_bit_bit) {
+  // BooleanNot(x: kRepBit) used as kRepBit
+  TestingGraph t(Type::Boolean());
+  Node* b = t.ExampleWithOutput(kRepBit);
+  Node* inv = t.graph()->NewNode(t.simplified()->BooleanNot(), b);
+  Node* use = t.Branch(inv);
+  t.Lower();
+  Node* cmp = use->InputAt(0);
+  CHECK_EQ(t.machine()->WordEqual()->opcode(), cmp->opcode());
+  CHECK(b == cmp->InputAt(0) || b == cmp->InputAt(1));
+  Node* f = t.jsgraph.Int32Constant(0);
+  CHECK(f == cmp->InputAt(0) || f == cmp->InputAt(1));
+}
+
+
+TEST(LowerBooleanNot_bit_tagged) {
+  // BooleanNot(x: kRepBit) used as kRepTagged
+  TestingGraph t(Type::Boolean());
+  Node* b = t.ExampleWithOutput(kRepBit);
+  Node* inv = t.graph()->NewNode(t.simplified()->BooleanNot(), b);
+  Node* use = t.Use(inv, kRepTagged);
+  t.Return(use);
+  t.Lower();
+  CHECK_EQ(IrOpcode::kChangeBitToBool, use->InputAt(0)->opcode());
+  Node* cmp = use->InputAt(0)->InputAt(0);
+  CHECK_EQ(t.machine()->WordEqual()->opcode(), cmp->opcode());
+  CHECK(b == cmp->InputAt(0) || b == cmp->InputAt(1));
+  Node* f = t.jsgraph.Int32Constant(0);
+  CHECK(f == cmp->InputAt(0) || f == cmp->InputAt(1));
+}
+
+
+TEST(LowerBooleanNot_tagged_bit) {
+  // BooleanNot(x: kRepTagged) used as kRepBit
+  TestingGraph t(Type::Boolean());
+  Node* b = t.p0;
+  Node* inv = t.graph()->NewNode(t.simplified()->BooleanNot(), b);
+  Node* use = t.Branch(inv);
+  t.Lower();
+  Node* cmp = use->InputAt(0);
+  CHECK_EQ(t.machine()->WordEqual()->opcode(), cmp->opcode());
+  CHECK(b == cmp->InputAt(0) || b == cmp->InputAt(1));
+  Node* f = t.jsgraph.FalseConstant();
+  CHECK(f == cmp->InputAt(0) || f == cmp->InputAt(1));
+}
+
+
+TEST(LowerBooleanNot_tagged_tagged) {
+  // BooleanNot(x: kRepTagged) used as kRepTagged
+  TestingGraph t(Type::Boolean());
+  Node* b = t.p0;
+  Node* inv = t.graph()->NewNode(t.simplified()->BooleanNot(), b);
+  Node* use = t.Use(inv, kRepTagged);
+  t.Return(use);
+  t.Lower();
+  CHECK_EQ(IrOpcode::kChangeBitToBool, use->InputAt(0)->opcode());
+  Node* cmp = use->InputAt(0)->InputAt(0);
+  CHECK_EQ(t.machine()->WordEqual()->opcode(), cmp->opcode());
+  CHECK(b == cmp->InputAt(0) || b == cmp->InputAt(1));
+  Node* f = t.jsgraph.FalseConstant();
+  CHECK(f == cmp->InputAt(0) || f == cmp->InputAt(1));
+}
+
+
+TEST(LowerBooleanToNumber_bit_int32) {
+  // BooleanToNumber(x: kRepBit) used as kMachInt32
+  TestingGraph t(Type::Boolean());
+  Node* b = t.ExampleWithOutput(kRepBit);
+  Node* cnv = t.graph()->NewNode(t.simplified()->BooleanToNumber(), b);
+  Node* use = t.Use(cnv, kMachInt32);
+  t.Return(use);
+  t.Lower();
+  CHECK_EQ(b, use->InputAt(0));
+}
+
+
+TEST(LowerBooleanToNumber_tagged_int32) {
+  // BooleanToNumber(x: kRepTagged) used as kMachInt32
+  TestingGraph t(Type::Boolean());
+  Node* b = t.p0;
+  Node* cnv = t.graph()->NewNode(t.simplified()->BooleanToNumber(), b);
+  Node* use = t.Use(cnv, kMachInt32);
+  t.Return(use);
+  t.Lower();
+  CHECK_EQ(t.machine()->WordEqual()->opcode(), cnv->opcode());
+  CHECK(b == cnv->InputAt(0) || b == cnv->InputAt(1));
+  Node* c = t.jsgraph.TrueConstant();
+  CHECK(c == cnv->InputAt(0) || c == cnv->InputAt(1));
+}
+
+
+TEST(LowerBooleanToNumber_bit_tagged) {
+  // BooleanToNumber(x: kRepBit) used as kMachAnyTagged
+  TestingGraph t(Type::Boolean());
+  Node* b = t.ExampleWithOutput(kRepBit);
+  Node* cnv = t.graph()->NewNode(t.simplified()->BooleanToNumber(), b);
+  Node* use = t.Use(cnv, kMachAnyTagged);
+  t.Return(use);
+  t.Lower();
+  CHECK_EQ(b, use->InputAt(0)->InputAt(0));
+  CHECK_EQ(IrOpcode::kChangeInt32ToTagged, use->InputAt(0)->opcode());
+}
+
+
+TEST(LowerBooleanToNumber_tagged_tagged) {
+  // BooleanToNumber(x: kRepTagged) used as kMachAnyTagged
+  TestingGraph t(Type::Boolean());
+  Node* b = t.p0;
+  Node* cnv = t.graph()->NewNode(t.simplified()->BooleanToNumber(), b);
+  Node* use = t.Use(cnv, kMachAnyTagged);
+  t.Return(use);
+  t.Lower();
+  CHECK_EQ(cnv, use->InputAt(0)->InputAt(0));
+  CHECK_EQ(IrOpcode::kChangeInt32ToTagged, use->InputAt(0)->opcode());
+  CHECK_EQ(t.machine()->WordEqual()->opcode(), cnv->opcode());
+  CHECK(b == cnv->InputAt(0) || b == cnv->InputAt(1));
+  Node* c = t.jsgraph.TrueConstant();
+  CHECK(c == cnv->InputAt(0) || c == cnv->InputAt(1));
+}
+
+
+static Type* test_types[] = {Type::Signed32(), Type::Unsigned32(),
+                             Type::Number(), Type::Any()};
+
+
+TEST(LowerNumberCmp_to_int32) {
+  TestingGraph t(Type::Signed32(), Type::Signed32());
+
+  t.CheckLoweringBinop(IrOpcode::kWord32Equal, t.simplified()->NumberEqual());
+  t.CheckLoweringBinop(IrOpcode::kInt32LessThan,
+                       t.simplified()->NumberLessThan());
+  t.CheckLoweringBinop(IrOpcode::kInt32LessThanOrEqual,
+                       t.simplified()->NumberLessThanOrEqual());
+}
+
+
+TEST(LowerNumberCmp_to_uint32) {
+  TestingGraph t(Type::Unsigned32(), Type::Unsigned32());
+
+  t.CheckLoweringBinop(IrOpcode::kWord32Equal, t.simplified()->NumberEqual());
+  t.CheckLoweringBinop(IrOpcode::kUint32LessThan,
+                       t.simplified()->NumberLessThan());
+  t.CheckLoweringBinop(IrOpcode::kUint32LessThanOrEqual,
+                       t.simplified()->NumberLessThanOrEqual());
+}
+
+
+TEST(LowerNumberCmp_to_float64) {
+  static Type* types[] = {Type::Number(), Type::Any()};
+
+  for (size_t i = 0; i < arraysize(types); i++) {
+    TestingGraph t(types[i], types[i]);
+
+    t.CheckLoweringBinop(IrOpcode::kFloat64Equal,
+                         t.simplified()->NumberEqual());
+    t.CheckLoweringBinop(IrOpcode::kFloat64LessThan,
+                         t.simplified()->NumberLessThan());
+    t.CheckLoweringBinop(IrOpcode::kFloat64LessThanOrEqual,
+                         t.simplified()->NumberLessThanOrEqual());
+  }
+}
+
+
+TEST(LowerNumberAddSub_to_int32) {
+  TestingGraph t(Type::Signed32(), Type::Signed32());
+  t.CheckLoweringTruncatedBinop(IrOpcode::kInt32Add,
+                                t.simplified()->NumberAdd(),
+                                t.simplified()->NumberToInt32());
+  t.CheckLoweringTruncatedBinop(IrOpcode::kInt32Sub,
+                                t.simplified()->NumberSubtract(),
+                                t.simplified()->NumberToInt32());
+}
+
+
+TEST(LowerNumberAddSub_to_uint32) {
+  TestingGraph t(Type::Unsigned32(), Type::Unsigned32());
+  t.CheckLoweringTruncatedBinop(IrOpcode::kInt32Add,
+                                t.simplified()->NumberAdd(),
+                                t.simplified()->NumberToUint32());
+  t.CheckLoweringTruncatedBinop(IrOpcode::kInt32Sub,
+                                t.simplified()->NumberSubtract(),
+                                t.simplified()->NumberToUint32());
+}
+
+
+TEST(LowerNumberAddSub_to_float64) {
+  for (size_t i = 0; i < arraysize(test_types); i++) {
+    TestingGraph t(test_types[i], test_types[i]);
+
+    t.CheckLoweringBinop(IrOpcode::kFloat64Add, t.simplified()->NumberAdd());
+    t.CheckLoweringBinop(IrOpcode::kFloat64Sub,
+                         t.simplified()->NumberSubtract());
+  }
+}
+
+
+TEST(LowerNumberDivMod_to_float64) {
+  for (size_t i = 0; i < arraysize(test_types); i++) {
+    TestingGraph t(test_types[i], test_types[i]);
+
+    t.CheckLoweringBinop(IrOpcode::kFloat64Div, t.simplified()->NumberDivide());
+    t.CheckLoweringBinop(IrOpcode::kFloat64Mod,
+                         t.simplified()->NumberModulus());
+  }
+}
+
+
+static void CheckChangeOf(IrOpcode::Value change, Node* of, Node* node) {
+  CHECK_EQ(change, node->opcode());
+  CHECK_EQ(of, node->InputAt(0));
+}
+
+
+TEST(LowerNumberToInt32_to_nop) {
+  // NumberToInt32(x: kRepTagged | kTypeInt32) used as kRepTagged
+  TestingGraph t(Type::Signed32());
+  Node* trunc = t.graph()->NewNode(t.simplified()->NumberToInt32(), t.p0);
+  Node* use = t.Use(trunc, kRepTagged);
+  t.Return(use);
+  t.Lower();
+  CHECK_EQ(t.p0, use->InputAt(0));
+}
+
+
+TEST(LowerNumberToInt32_to_ChangeTaggedToFloat64) {
+  // NumberToInt32(x: kRepTagged | kTypeInt32) used as kRepFloat64
+  TestingGraph t(Type::Signed32());
+  Node* trunc = t.graph()->NewNode(t.simplified()->NumberToInt32(), t.p0);
+  Node* use = t.Use(trunc, kRepFloat64);
+  t.Return(use);
+  t.Lower();
+  CheckChangeOf(IrOpcode::kChangeTaggedToFloat64, t.p0, use->InputAt(0));
+}
+
+
+TEST(LowerNumberToInt32_to_ChangeTaggedToInt32) {
+  // NumberToInt32(x: kRepTagged | kTypeInt32) used as kRepWord32
+  TestingGraph t(Type::Signed32());
+  Node* trunc = t.graph()->NewNode(t.simplified()->NumberToInt32(), t.p0);
+  Node* use = t.Use(trunc, kTypeInt32);
+  t.Return(use);
+  t.Lower();
+  CheckChangeOf(IrOpcode::kChangeTaggedToInt32, t.p0, use->InputAt(0));
+}
+
+
+TEST(LowerNumberToInt32_to_TruncateFloat64ToInt32) {
+  // NumberToInt32(x: kRepFloat64) used as kMachInt32
+  TestingGraph t(Type::Number());
+  Node* p0 = t.ExampleWithOutput(kMachFloat64);
+  Node* trunc = t.graph()->NewNode(t.simplified()->NumberToInt32(), p0);
+  Node* use = t.Use(trunc, kMachInt32);
+  t.Return(use);
+  t.Lower();
+  CheckChangeOf(IrOpcode::kTruncateFloat64ToInt32, p0, use->InputAt(0));
+}
+
+
+TEST(LowerNumberToInt32_to_TruncateFloat64ToInt32_with_change) {
+  // NumberToInt32(x: kTypeNumber | kRepTagged) used as kMachInt32
+  TestingGraph t(Type::Number());
+  Node* trunc = t.graph()->NewNode(t.simplified()->NumberToInt32(), t.p0);
+  Node* use = t.Use(trunc, kMachInt32);
+  t.Return(use);
+  t.Lower();
+  Node* node = use->InputAt(0);
+  CHECK_EQ(IrOpcode::kTruncateFloat64ToInt32, node->opcode());
+  Node* of = node->InputAt(0);
+  CHECK_EQ(IrOpcode::kChangeTaggedToFloat64, of->opcode());
+  CHECK_EQ(t.p0, of->InputAt(0));
+}
+
+
+TEST(LowerNumberToInt32_to_ChangeFloat64ToTagged) {
+  // TODO(titzer): NumberToInt32(x: kRepFloat64 | kTypeInt32) used as kRepTagged
+}
+
+
+TEST(LowerNumberToInt32_to_ChangeFloat64ToInt32) {
+  // TODO(titzer): NumberToInt32(x: kRepFloat64 | kTypeInt32) used as kRepWord32
+  // | kTypeInt32
+}
+
+
+TEST(LowerNumberToUint32_to_nop) {
+  // NumberToUint32(x: kRepTagged | kTypeUint32) used as kRepTagged
+  TestingGraph t(Type::Unsigned32());
+  Node* trunc = t.graph()->NewNode(t.simplified()->NumberToUint32(), t.p0);
+  Node* use = t.Use(trunc, kRepTagged);
+  t.Return(use);
+  t.Lower();
+  CHECK_EQ(t.p0, use->InputAt(0));
+}
+
+
+TEST(LowerNumberToUint32_to_ChangeTaggedToFloat64) {
+  // NumberToUint32(x: kRepTagged | kTypeUint32) used as kRepWord32
+  TestingGraph t(Type::Unsigned32());
+  Node* trunc = t.graph()->NewNode(t.simplified()->NumberToUint32(), t.p0);
+  Node* use = t.Use(trunc, kRepFloat64);
+  t.Return(use);
+  t.Lower();
+  CheckChangeOf(IrOpcode::kChangeTaggedToFloat64, t.p0, use->InputAt(0));
+}
+
+
+TEST(LowerNumberToUint32_to_ChangeTaggedToUint32) {
+  // NumberToUint32(x: kRepTagged | kTypeUint32) used as kRepWord32
+  TestingGraph t(Type::Unsigned32());
+  Node* trunc = t.graph()->NewNode(t.simplified()->NumberToUint32(), t.p0);
+  Node* use = t.Use(trunc, kTypeUint32);
+  t.Return(use);
+  t.Lower();
+  CheckChangeOf(IrOpcode::kChangeTaggedToUint32, t.p0, use->InputAt(0));
+}
+
+
+TEST(LowerNumberToUint32_to_TruncateFloat64ToInt32) {
+  // NumberToUint32(x: kRepFloat64) used as kMachUint32
+  TestingGraph t(Type::Number());
+  Node* p0 = t.ExampleWithOutput(kMachFloat64);
+  Node* trunc = t.graph()->NewNode(t.simplified()->NumberToUint32(), p0);
+  Node* use = t.Use(trunc, kMachUint32);
+  t.Return(use);
+  t.Lower();
+  CheckChangeOf(IrOpcode::kTruncateFloat64ToInt32, p0, use->InputAt(0));
+}
+
+
+TEST(LowerNumberToUint32_to_TruncateFloat64ToInt32_with_change) {
+  // NumberToInt32(x: kTypeNumber | kRepTagged) used as kMachUint32
+  TestingGraph t(Type::Number());
+  Node* trunc = t.graph()->NewNode(t.simplified()->NumberToUint32(), t.p0);
+  Node* use = t.Use(trunc, kMachUint32);
+  t.Return(use);
+  t.Lower();
+  Node* node = use->InputAt(0);
+  CHECK_EQ(IrOpcode::kTruncateFloat64ToInt32, node->opcode());
+  Node* of = node->InputAt(0);
+  CHECK_EQ(IrOpcode::kChangeTaggedToFloat64, of->opcode());
+  CHECK_EQ(t.p0, of->InputAt(0));
+}
+
+
+TEST(LowerNumberToUint32_to_ChangeFloat64ToTagged) {
+  // TODO(titzer): NumberToUint32(x: kRepFloat64 | kTypeUint32) used as
+  // kRepTagged
+}
+
+
+TEST(LowerNumberToUint32_to_ChangeFloat64ToUint32) {
+  // TODO(titzer): NumberToUint32(x: kRepFloat64 | kTypeUint32) used as
+  // kRepWord32
+}
+
+
+TEST(LowerNumberToUint32_to_TruncateFloat64ToUint32) {
+  // TODO(titzer): NumberToUint32(x: kRepFloat64) used as kRepWord32
+}
+
+
+TEST(LowerReferenceEqual_to_wordeq) {
+  TestingGraph t(Type::Any(), Type::Any());
+  IrOpcode::Value opcode =
+      static_cast<IrOpcode::Value>(t.machine()->WordEqual()->opcode());
+  t.CheckLoweringBinop(opcode, t.simplified()->ReferenceEqual(Type::Any()));
+}
+
+
+TEST(LowerStringOps_to_call_and_compare) {
+  if (Pipeline::SupportedTarget()) {
+    // These tests need linkage for the calls.
+    TestingGraph t(Type::String(), Type::String());
+    IrOpcode::Value compare_eq =
+        static_cast<IrOpcode::Value>(t.machine()->WordEqual()->opcode());
+    IrOpcode::Value compare_lt =
+        static_cast<IrOpcode::Value>(t.machine()->IntLessThan()->opcode());
+    IrOpcode::Value compare_le = static_cast<IrOpcode::Value>(
+        t.machine()->IntLessThanOrEqual()->opcode());
+    t.CheckLoweringBinop(compare_eq, t.simplified()->StringEqual());
+    t.CheckLoweringBinop(compare_lt, t.simplified()->StringLessThan());
+    t.CheckLoweringBinop(compare_le, t.simplified()->StringLessThanOrEqual());
+    t.CheckLoweringBinop(IrOpcode::kCall, t.simplified()->StringAdd());
+  }
+}
+
+
+void CheckChangeInsertion(IrOpcode::Value expected, MachineType from,
+                          MachineType to) {
+  TestingGraph t(Type::Any());
+  Node* in = t.ExampleWithOutput(from);
+  Node* use = t.Use(in, to);
+  t.Return(use);
+  t.Lower();
+  CHECK_EQ(expected, use->InputAt(0)->opcode());
+  CHECK_EQ(in, use->InputAt(0)->InputAt(0));
+}
+
+
+TEST(InsertBasicChanges) {
+  CheckChangeInsertion(IrOpcode::kChangeFloat64ToInt32, kRepFloat64,
+                       kTypeInt32);
+  CheckChangeInsertion(IrOpcode::kChangeFloat64ToUint32, kRepFloat64,
+                       kTypeUint32);
+  CheckChangeInsertion(IrOpcode::kChangeTaggedToInt32, kRepTagged, kTypeInt32);
+  CheckChangeInsertion(IrOpcode::kChangeTaggedToUint32, kRepTagged,
+                       kTypeUint32);
+
+  CheckChangeInsertion(IrOpcode::kChangeFloat64ToTagged, kRepFloat64,
+                       kRepTagged);
+  CheckChangeInsertion(IrOpcode::kChangeTaggedToFloat64, kRepTagged,
+                       kRepFloat64);
+
+  CheckChangeInsertion(IrOpcode::kChangeInt32ToFloat64, kTypeInt32,
+                       kRepFloat64);
+  CheckChangeInsertion(IrOpcode::kChangeInt32ToTagged, kTypeInt32, kRepTagged);
+
+  CheckChangeInsertion(IrOpcode::kChangeUint32ToFloat64, kTypeUint32,
+                       kRepFloat64);
+  CheckChangeInsertion(IrOpcode::kChangeUint32ToTagged, kTypeUint32,
+                       kRepTagged);
+}
+
+
+static void CheckChangesAroundBinop(TestingGraph* t, const Operator* op,
+                                    IrOpcode::Value input_change,
+                                    IrOpcode::Value output_change) {
+  Node* binop = t->graph()->NewNode(op, t->p0, t->p1);
+  t->Return(binop);
+  t->Lower();
+  CHECK_EQ(input_change, binop->InputAt(0)->opcode());
+  CHECK_EQ(input_change, binop->InputAt(1)->opcode());
+  CHECK_EQ(t->p0, binop->InputAt(0)->InputAt(0));
+  CHECK_EQ(t->p1, binop->InputAt(1)->InputAt(0));
+  CHECK_EQ(output_change, t->ret->InputAt(0)->opcode());
+  CHECK_EQ(binop, t->ret->InputAt(0)->InputAt(0));
+}
+
+
+TEST(InsertChangesAroundInt32Binops) {
+  TestingGraph t(Type::Signed32(), Type::Signed32());
+
+  const Operator* ops[] = {t.machine()->Int32Add(),  t.machine()->Int32Sub(),
+                           t.machine()->Int32Mul(),  t.machine()->Int32Div(),
+                           t.machine()->Int32Mod(),  t.machine()->Word32And(),
+                           t.machine()->Word32Or(),  t.machine()->Word32Xor(),
+                           t.machine()->Word32Shl(), t.machine()->Word32Sar()};
+
+  for (size_t i = 0; i < arraysize(ops); i++) {
+    CheckChangesAroundBinop(&t, ops[i], IrOpcode::kChangeTaggedToInt32,
+                            IrOpcode::kChangeInt32ToTagged);
+  }
+}
+
+
+TEST(InsertChangesAroundInt32Cmp) {
+  TestingGraph t(Type::Signed32(), Type::Signed32());
+
+  const Operator* ops[] = {t.machine()->Int32LessThan(),
+                           t.machine()->Int32LessThanOrEqual()};
+
+  for (size_t i = 0; i < arraysize(ops); i++) {
+    CheckChangesAroundBinop(&t, ops[i], IrOpcode::kChangeTaggedToInt32,
+                            IrOpcode::kChangeBitToBool);
+  }
+}
+
+
+TEST(InsertChangesAroundUint32Cmp) {
+  TestingGraph t(Type::Unsigned32(), Type::Unsigned32());
+
+  const Operator* ops[] = {t.machine()->Uint32LessThan(),
+                           t.machine()->Uint32LessThanOrEqual()};
+
+  for (size_t i = 0; i < arraysize(ops); i++) {
+    CheckChangesAroundBinop(&t, ops[i], IrOpcode::kChangeTaggedToUint32,
+                            IrOpcode::kChangeBitToBool);
+  }
+}
+
+
+TEST(InsertChangesAroundFloat64Binops) {
+  TestingGraph t(Type::Number(), Type::Number());
+
+  const Operator* ops[] = {
+      t.machine()->Float64Add(), t.machine()->Float64Sub(),
+      t.machine()->Float64Mul(), t.machine()->Float64Div(),
+      t.machine()->Float64Mod(),
+  };
+
+  for (size_t i = 0; i < arraysize(ops); i++) {
+    CheckChangesAroundBinop(&t, ops[i], IrOpcode::kChangeTaggedToFloat64,
+                            IrOpcode::kChangeFloat64ToTagged);
+  }
+}
+
+
+TEST(InsertChangesAroundFloat64Cmp) {
+  TestingGraph t(Type::Number(), Type::Number());
+
+  const Operator* ops[] = {t.machine()->Float64Equal(),
+                           t.machine()->Float64LessThan(),
+                           t.machine()->Float64LessThanOrEqual()};
+
+  for (size_t i = 0; i < arraysize(ops); i++) {
+    CheckChangesAroundBinop(&t, ops[i], IrOpcode::kChangeTaggedToFloat64,
+                            IrOpcode::kChangeBitToBool);
+  }
+}
+
+
+void CheckFieldAccessArithmetic(FieldAccess access, Node* load_or_store) {
+  Int32Matcher index = Int32Matcher(load_or_store->InputAt(1));
+  CHECK(index.Is(access.offset - access.tag()));
+}
+
+
+Node* CheckElementAccessArithmetic(ElementAccess access, Node* load_or_store) {
+  Int32BinopMatcher index(load_or_store->InputAt(1));
+  CHECK_EQ(IrOpcode::kInt32Add, index.node()->opcode());
+  CHECK(index.right().Is(access.header_size - access.tag()));
+
+  int element_size = ElementSizeOf(access.machine_type);
+
+  if (element_size != 1) {
+    Int32BinopMatcher mul(index.left().node());
+    CHECK_EQ(IrOpcode::kInt32Mul, mul.node()->opcode());
+    CHECK(mul.right().Is(element_size));
+    return mul.left().node();
+  } else {
+    return index.left().node();
+  }
+}
+
+
+static const MachineType machine_reps[] = {
+    kRepBit,    kMachInt8,    kMachInt16,    kMachInt32,
+    kMachInt64, kMachFloat64, kMachAnyTagged};
+
+
+TEST(LowerLoadField_to_load) {
+  TestingGraph t(Type::Any(), Type::Signed32());
+
+  for (size_t i = 0; i < arraysize(machine_reps); i++) {
+    FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
+                          Handle<Name>::null(), Type::Any(), machine_reps[i]};
+
+    Node* load =
+        t.graph()->NewNode(t.simplified()->LoadField(access), t.p0, t.start);
+    Node* use = t.Use(load, machine_reps[i]);
+    t.Return(use);
+    t.Lower();
+    CHECK_EQ(IrOpcode::kLoad, load->opcode());
+    CHECK_EQ(t.p0, load->InputAt(0));
+    CheckFieldAccessArithmetic(access, load);
+
+    MachineType rep = OpParameter<MachineType>(load);
+    CHECK_EQ(machine_reps[i], rep);
+  }
+}
+
+
+TEST(LowerStoreField_to_store) {
+  TestingGraph t(Type::Any(), Type::Signed32());
+
+  for (size_t i = 0; i < arraysize(machine_reps); i++) {
+    FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
+                          Handle<Name>::null(), Type::Any(), machine_reps[i]};
+
+
+    Node* val = t.ExampleWithOutput(machine_reps[i]);
+    Node* store = t.graph()->NewNode(t.simplified()->StoreField(access), t.p0,
+                                     val, t.start, t.start);
+    t.Effect(store);
+    t.Lower();
+    CHECK_EQ(IrOpcode::kStore, store->opcode());
+    CHECK_EQ(val, store->InputAt(2));
+    CheckFieldAccessArithmetic(access, store);
+
+    StoreRepresentation rep = OpParameter<StoreRepresentation>(store);
+    if (machine_reps[i] & kRepTagged) {
+      CHECK_EQ(kFullWriteBarrier, rep.write_barrier_kind());
+    }
+    CHECK_EQ(machine_reps[i], rep.machine_type());
+  }
+}
+
+
+TEST(LowerLoadElement_to_load) {
+  TestingGraph t(Type::Any(), Type::Signed32());
+
+  for (size_t i = 0; i < arraysize(machine_reps); i++) {
+    ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
+                            Type::Any(), machine_reps[i]};
+
+    Node* load =
+        t.graph()->NewNode(t.simplified()->LoadElement(access), t.p0, t.p1,
+                           t.jsgraph.Int32Constant(1024), t.start);
+    Node* use = t.Use(load, machine_reps[i]);
+    t.Return(use);
+    t.Lower();
+    CHECK_EQ(IrOpcode::kLoad, load->opcode());
+    CHECK_EQ(t.p0, load->InputAt(0));
+    CheckElementAccessArithmetic(access, load);
+
+    MachineType rep = OpParameter<MachineType>(load);
+    CHECK_EQ(machine_reps[i], rep);
+  }
+}
+
+
+TEST(LowerStoreElement_to_store) {
+  TestingGraph t(Type::Any(), Type::Signed32());
+
+  for (size_t i = 0; i < arraysize(machine_reps); i++) {
+    ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
+                            Type::Any(), machine_reps[i]};
+
+    Node* val = t.ExampleWithOutput(machine_reps[i]);
+    Node* store = t.graph()->NewNode(t.simplified()->StoreElement(access), t.p0,
+                                     t.p1, t.jsgraph.Int32Constant(1024), val,
+                                     t.start, t.start);
+    t.Effect(store);
+    t.Lower();
+    CHECK_EQ(IrOpcode::kStore, store->opcode());
+    CHECK_EQ(val, store->InputAt(2));
+    CheckElementAccessArithmetic(access, store);
+
+    StoreRepresentation rep = OpParameter<StoreRepresentation>(store);
+    if (machine_reps[i] & kRepTagged) {
+      CHECK_EQ(kFullWriteBarrier, rep.write_barrier_kind());
+    }
+    CHECK_EQ(machine_reps[i], rep.machine_type());
+  }
+}
+
+
+TEST(InsertChangeForLoadElementIndex) {
+  // LoadElement(obj: Tagged, index: kTypeInt32 | kRepTagged, length) =>
+  //   Load(obj, Int32Add(Int32Mul(ChangeTaggedToInt32(index), #k), #k))
+  TestingGraph t(Type::Any(), Type::Signed32(), Type::Any());
+  ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize, Type::Any(),
+                          kMachAnyTagged};
+
+  Node* load = t.graph()->NewNode(t.simplified()->LoadElement(access), t.p0,
+                                  t.p1, t.p2, t.start);
+  t.Return(load);
+  t.Lower();
+  CHECK_EQ(IrOpcode::kLoad, load->opcode());
+  CHECK_EQ(t.p0, load->InputAt(0));
+
+  Node* index = CheckElementAccessArithmetic(access, load);
+  CheckChangeOf(IrOpcode::kChangeTaggedToInt32, t.p1, index);
+}
+
+
+TEST(InsertChangeForStoreElementIndex) {
+  // StoreElement(obj: Tagged, index: kTypeInt32 | kRepTagged, length, val) =>
+  //   Store(obj, Int32Add(Int32Mul(ChangeTaggedToInt32(index), #k), #k), val)
+  TestingGraph t(Type::Any(), Type::Signed32(), Type::Any());
+  ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize, Type::Any(),
+                          kMachAnyTagged};
+
+  Node* store =
+      t.graph()->NewNode(t.simplified()->StoreElement(access), t.p0, t.p1, t.p2,
+                         t.jsgraph.TrueConstant(), t.start, t.start);
+  t.Effect(store);
+  t.Lower();
+  CHECK_EQ(IrOpcode::kStore, store->opcode());
+  CHECK_EQ(t.p0, store->InputAt(0));
+
+  Node* index = CheckElementAccessArithmetic(access, store);
+  CheckChangeOf(IrOpcode::kChangeTaggedToInt32, t.p1, index);
+}
+
+
+TEST(InsertChangeForLoadElement) {
+  // TODO(titzer): test all load/store representation change insertions.
+  TestingGraph t(Type::Any(), Type::Signed32(), Type::Any());
+  ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize, Type::Any(),
+                          kMachFloat64};
+
+  Node* load = t.graph()->NewNode(t.simplified()->LoadElement(access), t.p0,
+                                  t.p1, t.p1, t.start);
+  t.Return(load);
+  t.Lower();
+  CHECK_EQ(IrOpcode::kLoad, load->opcode());
+  CHECK_EQ(t.p0, load->InputAt(0));
+  CheckChangeOf(IrOpcode::kChangeFloat64ToTagged, load, t.ret->InputAt(0));
+}
+
+
+TEST(InsertChangeForLoadField) {
+  // TODO(titzer): test all load/store representation change insertions.
+  TestingGraph t(Type::Any(), Type::Signed32());
+  FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
+                        Handle<Name>::null(), Type::Any(), kMachFloat64};
+
+  Node* load =
+      t.graph()->NewNode(t.simplified()->LoadField(access), t.p0, t.start);
+  t.Return(load);
+  t.Lower();
+  CHECK_EQ(IrOpcode::kLoad, load->opcode());
+  CHECK_EQ(t.p0, load->InputAt(0));
+  CheckChangeOf(IrOpcode::kChangeFloat64ToTagged, load, t.ret->InputAt(0));
+}
+
+
+TEST(InsertChangeForStoreElement) {
+  // TODO(titzer): test all load/store representation change insertions.
+  TestingGraph t(Type::Any(), Type::Signed32(), Type::Any());
+  ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize, Type::Any(),
+                          kMachFloat64};
+
+  Node* store = t.graph()->NewNode(t.simplified()->StoreElement(access), t.p0,
+                                   t.jsgraph.Int32Constant(0), t.p2, t.p1,
+                                   t.start, t.start);
+  t.Effect(store);
+  t.Lower();
+
+  CHECK_EQ(IrOpcode::kStore, store->opcode());
+  CHECK_EQ(t.p0, store->InputAt(0));
+  CheckChangeOf(IrOpcode::kChangeTaggedToFloat64, t.p1, store->InputAt(2));
+}
+
+
+TEST(InsertChangeForStoreField) {
+  // TODO(titzer): test all load/store representation change insertions.
+  TestingGraph t(Type::Any(), Type::Signed32());
+  FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
+                        Handle<Name>::null(), Type::Any(), kMachFloat64};
+
+  Node* store = t.graph()->NewNode(t.simplified()->StoreField(access), t.p0,
+                                   t.p1, t.start, t.start);
+  t.Effect(store);
+  t.Lower();
+
+  CHECK_EQ(IrOpcode::kStore, store->opcode());
+  CHECK_EQ(t.p0, store->InputAt(0));
+  CheckChangeOf(IrOpcode::kChangeTaggedToFloat64, t.p1, store->InputAt(2));
+}
+
+
+TEST(UpdatePhi) {
+  TestingGraph t(Type::Any(), Type::Signed32());
+  static const MachineType kMachineTypes[] = {kMachInt32, kMachUint32,
+                                              kMachFloat64};
+
+  for (size_t i = 0; i < arraysize(kMachineTypes); i++) {
+    FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
+                          Handle<Name>::null(), Type::Any(), kMachineTypes[i]};
+
+    Node* load0 =
+        t.graph()->NewNode(t.simplified()->LoadField(access), t.p0, t.start);
+    Node* load1 =
+        t.graph()->NewNode(t.simplified()->LoadField(access), t.p1, t.start);
+    Node* phi = t.graph()->NewNode(t.common()->Phi(kMachAnyTagged, 2), load0,
+                                   load1, t.start);
+    t.Return(t.Use(phi, kMachineTypes[i]));
+    t.Lower();
+
+    CHECK_EQ(IrOpcode::kPhi, phi->opcode());
+    CHECK_EQ(RepresentationOf(kMachineTypes[i]),
+             RepresentationOf(OpParameter<MachineType>(phi)));
+  }
+}
+
+
+// TODO(titzer): this tests current behavior of assuming an implicit
+// representation change in loading float32s. Fix when float32 is fully
+// supported.
+TEST(ImplicitFloat32ToFloat64InLoads) {
+  TestingGraph t(Type::Any());
+
+  FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
+                        Handle<Name>::null(), Type::Any(), kMachFloat32};
+
+  Node* load =
+      t.graph()->NewNode(t.simplified()->LoadField(access), t.p0, t.start);
+  t.Return(load);
+  t.Lower();
+  CHECK_EQ(IrOpcode::kLoad, load->opcode());
+  CHECK_EQ(t.p0, load->InputAt(0));
+  CheckChangeOf(IrOpcode::kChangeFloat64ToTagged, load, t.ret->InputAt(0));
+}
+
+
+TEST(ImplicitFloat64ToFloat32InStores) {
+  TestingGraph t(Type::Any(), Type::Signed32());
+  FieldAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
+                        Handle<Name>::null(), Type::Any(), kMachFloat32};
+
+  Node* store = t.graph()->NewNode(t.simplified()->StoreField(access), t.p0,
+                                   t.p1, t.start, t.start);
+  t.Effect(store);
+  t.Lower();
+
+  CHECK_EQ(IrOpcode::kStore, store->opcode());
+  CHECK_EQ(t.p0, store->InputAt(0));
+  CheckChangeOf(IrOpcode::kChangeTaggedToFloat64, t.p1, store->InputAt(2));
+}
diff --git a/test/cctest/compiler/value-helper.h b/test/cctest/compiler/value-helper.h
new file mode 100644
index 0000000..b5da982
--- /dev/null
+++ b/test/cctest/compiler/value-helper.h
@@ -0,0 +1,130 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_CCTEST_COMPILER_VALUE_HELPER_H_
+#define V8_CCTEST_COMPILER_VALUE_HELPER_H_
+
+#include "src/v8.h"
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/node-matchers.h"
+#include "src/isolate.h"
+#include "src/objects.h"
+#include "test/cctest/cctest.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// A collection of utilities related to numerical and heap values, including
+// example input values of various types, including int32_t, uint32_t, double,
+// etc.
+class ValueHelper {
+ public:
+  Isolate* isolate_;
+
+  ValueHelper() : isolate_(CcTest::InitIsolateOnce()) {}
+
+  void CheckFloat64Constant(double expected, Node* node) {
+    CHECK_EQ(IrOpcode::kFloat64Constant, node->opcode());
+    CHECK_EQ(expected, OpParameter<double>(node));
+  }
+
+  void CheckNumberConstant(double expected, Node* node) {
+    CHECK_EQ(IrOpcode::kNumberConstant, node->opcode());
+    CHECK_EQ(expected, OpParameter<double>(node));
+  }
+
+  void CheckInt32Constant(int32_t expected, Node* node) {
+    CHECK_EQ(IrOpcode::kInt32Constant, node->opcode());
+    CHECK_EQ(expected, OpParameter<int32_t>(node));
+  }
+
+  void CheckUint32Constant(int32_t expected, Node* node) {
+    CHECK_EQ(IrOpcode::kInt32Constant, node->opcode());
+    CHECK_EQ(expected, OpParameter<uint32_t>(node));
+  }
+
+  void CheckHeapConstant(Object* expected, Node* node) {
+    CHECK_EQ(IrOpcode::kHeapConstant, node->opcode());
+    CHECK_EQ(expected, *OpParameter<Unique<Object> >(node).handle());
+  }
+
+  void CheckTrue(Node* node) {
+    CheckHeapConstant(isolate_->heap()->true_value(), node);
+  }
+
+  void CheckFalse(Node* node) {
+    CheckHeapConstant(isolate_->heap()->false_value(), node);
+  }
+
+  static std::vector<double> float64_vector() {
+    static const double nan = v8::base::OS::nan_value();
+    static const double values[] = {
+        0.125,           0.25,            0.375,          0.5,
+        1.25,            -1.75,           2,              5.125,
+        6.25,            0.0,             -0.0,           982983.25,
+        888,             2147483647.0,    -999.75,        3.1e7,
+        -2e66,           3e-88,           -2147483648.0,  V8_INFINITY,
+        -V8_INFINITY,    nan,             2147483647.375, 2147483647.75,
+        2147483648.0,    2147483648.25,   2147483649.25,  -2147483647.0,
+        -2147483647.125, -2147483647.875, -2147483648.25, -2147483649.5};
+    return std::vector<double>(&values[0], &values[arraysize(values)]);
+  }
+
+  static const std::vector<int32_t> int32_vector() {
+    std::vector<uint32_t> values = uint32_vector();
+    return std::vector<int32_t>(values.begin(), values.end());
+  }
+
+  static const std::vector<uint32_t> uint32_vector() {
+    static const uint32_t kValues[] = {
+        0x00000000, 0x00000001, 0xffffffff, 0x1b09788b, 0x04c5fce8, 0xcc0de5bf,
+        0x273a798e, 0x187937a3, 0xece3af83, 0x5495a16b, 0x0b668ecc, 0x11223344,
+        0x0000009e, 0x00000043, 0x0000af73, 0x0000116b, 0x00658ecc, 0x002b3b4c,
+        0x88776655, 0x70000000, 0x07200000, 0x7fffffff, 0x56123761, 0x7fffff00,
+        0x761c4761, 0x80000000, 0x88888888, 0xa0000000, 0xdddddddd, 0xe0000000,
+        0xeeeeeeee, 0xfffffffd, 0xf0000000, 0x007fffff, 0x003fffff, 0x001fffff,
+        0x000fffff, 0x0007ffff, 0x0003ffff, 0x0001ffff, 0x0000ffff, 0x00007fff,
+        0x00003fff, 0x00001fff, 0x00000fff, 0x000007ff, 0x000003ff, 0x000001ff};
+    return std::vector<uint32_t>(&kValues[0], &kValues[arraysize(kValues)]);
+  }
+
+  static const std::vector<double> nan_vector(size_t limit = 0) {
+    static const double nan = v8::base::OS::nan_value();
+    static const double values[] = {-nan,               -V8_INFINITY * -0.0,
+                                    -V8_INFINITY * 0.0, V8_INFINITY * -0.0,
+                                    V8_INFINITY * 0.0,  nan};
+    return std::vector<double>(&values[0], &values[arraysize(values)]);
+  }
+
+  static const std::vector<uint32_t> ror_vector() {
+    static const uint32_t kValues[31] = {
+        1,  2,  3,  4,  5,  6,  7,  8,  9,  10, 11, 12, 13, 14, 15, 16,
+        17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31};
+    return std::vector<uint32_t>(&kValues[0], &kValues[arraysize(kValues)]);
+  }
+};
+
+// Helper macros that can be used in FOR_INT32_INPUTS(i) { ... *i ... }
+// Watch out, these macros aren't hygenic; they pollute your scope. Thanks STL.
+#define FOR_INPUTS(ctype, itype, var)                           \
+  std::vector<ctype> var##_vec = ValueHelper::itype##_vector(); \
+  for (std::vector<ctype>::iterator var = var##_vec.begin();    \
+       var != var##_vec.end(); ++var)
+
+#define FOR_INT32_INPUTS(var) FOR_INPUTS(int32_t, int32, var)
+#define FOR_UINT32_INPUTS(var) FOR_INPUTS(uint32_t, uint32, var)
+#define FOR_FLOAT64_INPUTS(var) FOR_INPUTS(double, float64, var)
+
+#define FOR_INT32_SHIFTS(var) for (int32_t var = 0; var < 32; var++)
+
+#define FOR_UINT32_SHIFTS(var) for (uint32_t var = 0; var < 32; var++)
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_CCTEST_COMPILER_VALUE_HELPER_H_
diff --git a/test/cctest/profiler-extension.cc b/test/cctest/profiler-extension.cc
index 5779bf4..263fc4f 100644
--- a/test/cctest/profiler-extension.cc
+++ b/test/cctest/profiler-extension.cc
@@ -27,8 +27,8 @@
 //
 // Tests of profiles generator and utilities.
 
+#include "src/base/logging.h"
 #include "test/cctest/profiler-extension.h"
-#include "src/checks.h"
 
 namespace v8 {
 namespace internal {
diff --git a/test/cctest/test-alloc.cc b/test/cctest/test-alloc.cc
index 4520c20..d647a31 100644
--- a/test/cctest/test-alloc.cc
+++ b/test/cctest/test-alloc.cc
@@ -26,11 +26,11 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 #include "src/v8.h"
+#include "test/cctest/cctest.h"
+
 #include "src/accessors.h"
 #include "src/api.h"
 
-#include "test/cctest/cctest.h"
-
 
 using namespace v8::internal;
 
@@ -50,7 +50,6 @@
   // for specific kinds.
   heap->AllocateFixedArray(100).ToObjectChecked();
   heap->AllocateHeapNumber(0.42).ToObjectChecked();
-  heap->AllocateArgumentsObject(Smi::FromInt(87), 10).ToObjectChecked();
   Object* object = heap->AllocateJSObject(
       *CcTest::i_isolate()->object_function()).ToObjectChecked();
   heap->CopyJSObject(JSObject::cast(object)).ToObjectChecked();
@@ -67,7 +66,7 @@
   static const int kLargeObjectSpaceFillerLength = 300000;
   static const int kLargeObjectSpaceFillerSize = FixedArray::SizeFor(
       kLargeObjectSpaceFillerLength);
-  ASSERT(kLargeObjectSpaceFillerSize > heap->old_pointer_space()->AreaSize());
+  DCHECK(kLargeObjectSpaceFillerSize > heap->old_pointer_space()->AreaSize());
   while (heap->OldGenerationSpaceAvailable() > kLargeObjectSpaceFillerSize) {
     heap->AllocateFixedArray(
         kLargeObjectSpaceFillerLength, TENURED).ToObjectChecked();
@@ -107,7 +106,7 @@
 
 
 void TestGetter(
-    v8::Local<v8::String> name,
+    v8::Local<v8::Name> name,
     const v8::PropertyCallbackInfo<v8::Value>& info) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
   HandleScope scope(isolate);
@@ -116,7 +115,7 @@
 
 
 void TestSetter(
-    v8::Local<v8::String> name,
+    v8::Local<v8::Name> name,
     v8::Local<v8::Value> value,
     const v8::PropertyCallbackInfo<void>& info) {
   UNREACHABLE();
@@ -125,7 +124,7 @@
 
 Handle<AccessorInfo> TestAccessorInfo(
       Isolate* isolate, PropertyAttributes attributes) {
-  Handle<String> name = isolate->factory()->NewStringFromStaticAscii("get");
+  Handle<String> name = isolate->factory()->NewStringFromStaticChars("get");
   return Accessors::MakeAccessor(isolate, name, &TestGetter, &TestSetter,
                                  attributes);
 }
@@ -147,7 +146,7 @@
   // Patch the map to have an accessor for "get".
   Handle<Map> map(function->initial_map());
   Handle<DescriptorArray> instance_descriptors(map->instance_descriptors());
-  ASSERT(instance_descriptors->IsEmpty());
+  DCHECK(instance_descriptors->IsEmpty());
 
   PropertyAttributes attrs = static_cast<PropertyAttributes>(0);
   Handle<AccessorInfo> foreign = TestAccessorInfo(isolate, attrs);
@@ -202,7 +201,7 @@
   code_range.SetUp(code_range_size);
   size_t current_allocated = 0;
   size_t total_allocated = 0;
-  List<Block> blocks(1000);
+  List< ::Block> blocks(1000);
 
   while (total_allocated < 5 * code_range_size) {
     if (current_allocated < code_range_size / 10) {
@@ -219,7 +218,7 @@
                                                   requested,
                                                   &allocated);
       CHECK(base != NULL);
-      blocks.Add(Block(base, static_cast<int>(allocated)));
+      blocks.Add(::Block(base, static_cast<int>(allocated)));
       current_allocated += static_cast<int>(allocated);
       total_allocated += static_cast<int>(allocated);
     } else {
diff --git a/test/cctest/test-api.cc b/test/cctest/test-api.cc
index 7fc80b3..0e80384 100644
--- a/test/cctest/test-api.cc
+++ b/test/cctest/test-api.cc
@@ -27,8 +27,8 @@
 
 #include <climits>
 #include <csignal>
-#include <string>
 #include <map>
+#include <string>
 
 #include "src/v8.h"
 
@@ -39,13 +39,13 @@
 #include "include/v8-util.h"
 #include "src/api.h"
 #include "src/arguments.h"
+#include "src/base/platform/platform.h"
 #include "src/compilation-cache.h"
 #include "src/cpu-profiler.h"
 #include "src/execution.h"
 #include "src/isolate.h"
 #include "src/objects.h"
 #include "src/parser.h"
-#include "src/platform.h"
 #include "src/snapshot.h"
 #include "src/unicode-inl.h"
 #include "src/utils.h"
@@ -63,6 +63,7 @@
 using ::v8::Handle;
 using ::v8::HandleScope;
 using ::v8::Local;
+using ::v8::Name;
 using ::v8::Message;
 using ::v8::MessageCallback;
 using ::v8::Object;
@@ -71,6 +72,7 @@
 using ::v8::Script;
 using ::v8::StackTrace;
 using ::v8::String;
+using ::v8::Symbol;
 using ::v8::TryCatch;
 using ::v8::Undefined;
 using ::v8::UniqueId;
@@ -128,19 +130,18 @@
 
 
 // Tests that call v8::V8::Dispose() cannot be threaded.
-TEST(InitializeAndDisposeOnce) {
+UNINITIALIZED_TEST(InitializeAndDisposeOnce) {
   CHECK(v8::V8::Initialize());
   CHECK(v8::V8::Dispose());
 }
 
 
 // Tests that call v8::V8::Dispose() cannot be threaded.
-TEST(InitializeAndDisposeMultiple) {
+UNINITIALIZED_TEST(InitializeAndDisposeMultiple) {
   for (int i = 0; i < 3; ++i) CHECK(v8::V8::Dispose());
   for (int i = 0; i < 3; ++i) CHECK(v8::V8::Initialize());
   for (int i = 0; i < 3; ++i) CHECK(v8::V8::Dispose());
-  // TODO(mstarzinger): This should fail gracefully instead of asserting.
-  // for (int i = 0; i < 3; ++i) CHECK(v8::V8::Initialize());
+  for (int i = 0; i < 3; ++i) CHECK(v8::V8::Initialize());
   for (int i = 0; i < 3; ++i) CHECK(v8::V8::Dispose());
 }
 
@@ -261,7 +262,7 @@
   const char* test_objects[] = {
       "fun_instance", "sub_fun_instance", "obj", "unrel" };
   unsigned bad_signature_start_offset = 2;
-  for (unsigned i = 0; i < ARRAY_SIZE(test_objects); i++) {
+  for (unsigned i = 0; i < arraysize(test_objects); i++) {
     i::ScopedVector<char> source(200);
     i::SNPrintF(
         source, "var test_object = %s; test_object", test_objects[i]);
@@ -409,7 +410,8 @@
 
 class TestResource: public String::ExternalStringResource {
  public:
-  TestResource(uint16_t* data, int* counter = NULL, bool owning_data = true)
+  explicit TestResource(uint16_t* data, int* counter = NULL,
+                        bool owning_data = true)
       : data_(data), length_(0), counter_(counter), owning_data_(owning_data) {
     while (data[length_]) ++length_;
   }
@@ -435,15 +437,16 @@
 };
 
 
-class TestAsciiResource: public String::ExternalAsciiStringResource {
+class TestOneByteResource : public String::ExternalOneByteStringResource {
  public:
-  TestAsciiResource(const char* data, int* counter = NULL, size_t offset = 0)
+  explicit TestOneByteResource(const char* data, int* counter = NULL,
+                               size_t offset = 0)
       : orig_data_(data),
         data_(data + offset),
         length_(strlen(data) - offset),
-        counter_(counter) { }
+        counter_(counter) {}
 
-  ~TestAsciiResource() {
+  ~TestOneByteResource() {
     i::DeleteArray(orig_data_);
     if (counter_ != NULL) ++*counter_;
   }
@@ -493,22 +496,22 @@
 }
 
 
-THREADED_TEST(ScriptUsingAsciiStringResource) {
+THREADED_TEST(ScriptUsingOneByteStringResource) {
   int dispose_count = 0;
   const char* c_source = "1 + 2 * 3";
   {
     LocalContext env;
     v8::HandleScope scope(env->GetIsolate());
-    TestAsciiResource* resource = new TestAsciiResource(i::StrDup(c_source),
-                                                        &dispose_count);
+    TestOneByteResource* resource =
+        new TestOneByteResource(i::StrDup(c_source), &dispose_count);
     Local<String> source = String::NewExternal(env->GetIsolate(), resource);
-    CHECK(source->IsExternalAscii());
+    CHECK(source->IsExternalOneByte());
     CHECK_EQ(static_cast<const String::ExternalStringResourceBase*>(resource),
-             source->GetExternalAsciiStringResource());
+             source->GetExternalOneByteStringResource());
     String::Encoding encoding = String::UNKNOWN_ENCODING;
     CHECK_EQ(static_cast<const String::ExternalStringResourceBase*>(resource),
              source->GetExternalStringResourceBase(&encoding));
-    CHECK_EQ(String::ASCII_ENCODING, encoding);
+    CHECK_EQ(String::ONE_BYTE_ENCODING, encoding);
     Local<Script> script = v8_compile(source);
     Local<Value> value = script->Run();
     CHECK(value->IsNumber());
@@ -534,10 +537,10 @@
     CcTest::heap()->CollectGarbage(i::NEW_SPACE);  // in survivor space now
     CcTest::heap()->CollectGarbage(i::NEW_SPACE);  // in old gen now
     CHECK_EQ(source->IsExternal(), false);
-    CHECK_EQ(source->IsExternalAscii(), false);
+    CHECK_EQ(source->IsExternalOneByte(), false);
     String::Encoding encoding = String::UNKNOWN_ENCODING;
     CHECK_EQ(NULL, source->GetExternalStringResourceBase(&encoding));
-    CHECK_EQ(String::ASCII_ENCODING, encoding);
+    CHECK_EQ(String::ONE_BYTE_ENCODING, encoding);
     bool success = source->MakeExternal(new TestResource(two_byte_source,
                                                          &dispose_count));
     CHECK(success);
@@ -554,7 +557,7 @@
 }
 
 
-THREADED_TEST(ScriptMakingExternalAsciiString) {
+THREADED_TEST(ScriptMakingExternalOneByteString) {
   int dispose_count = 0;
   const char* c_source = "1 + 2 * 3";
   {
@@ -565,7 +568,7 @@
     CcTest::heap()->CollectGarbage(i::NEW_SPACE);  // in survivor space now
     CcTest::heap()->CollectGarbage(i::NEW_SPACE);  // in old gen now
     bool success = source->MakeExternal(
-        new TestAsciiResource(i::StrDup(c_source), &dispose_count));
+        new TestOneByteResource(i::StrDup(c_source), &dispose_count));
     CHECK(success);
     Local<Script> script = v8_compile(source);
     Local<Value> value = script->Run();
@@ -628,7 +631,7 @@
 }
 
 
-TEST(MakingExternalAsciiStringConditions) {
+TEST(MakingExternalOneByteStringConditions) {
   LocalContext env;
   v8::HandleScope scope(env->GetIsolate());
 
@@ -665,7 +668,7 @@
 }
 
 
-TEST(MakingExternalUnalignedAsciiString) {
+TEST(MakingExternalUnalignedOneByteString) {
   LocalContext env;
   v8::HandleScope scope(env->GetIsolate());
 
@@ -685,12 +688,12 @@
 
   // Turn into external string with unaligned resource data.
   const char* c_cons = "_abcdefghijklmnopqrstuvwxyz";
-  bool success = cons->MakeExternal(
-      new TestAsciiResource(i::StrDup(c_cons), NULL, 1));
+  bool success =
+      cons->MakeExternal(new TestOneByteResource(i::StrDup(c_cons), NULL, 1));
   CHECK(success);
   const char* c_slice = "_bcdefghijklmnopqrstuvwxyz";
-  success = slice->MakeExternal(
-      new TestAsciiResource(i::StrDup(c_slice), NULL, 1));
+  success =
+      slice->MakeExternal(new TestOneByteResource(i::StrDup(c_slice), NULL, 1));
   CHECK(success);
 
   // Trigger GCs and force evacuation.
@@ -719,13 +722,13 @@
 }
 
 
-THREADED_TEST(UsingExternalAsciiString) {
+THREADED_TEST(UsingExternalOneByteString) {
   i::Factory* factory = CcTest::i_isolate()->factory();
   {
     v8::HandleScope scope(CcTest::isolate());
     const char* one_byte_string = "test string";
     Local<String> string = String::NewExternal(
-        CcTest::isolate(), new TestAsciiResource(i::StrDup(one_byte_string)));
+        CcTest::isolate(), new TestOneByteResource(i::StrDup(one_byte_string)));
     i::Handle<i::String> istring = v8::Utils::OpenHandle(*string);
     // Trigger GCs so that the newly allocated string moves to old gen.
     CcTest::heap()->CollectGarbage(i::NEW_SPACE);  // in survivor space now
@@ -761,7 +764,7 @@
 }
 
 
-THREADED_TEST(ScavengeExternalAsciiString) {
+THREADED_TEST(ScavengeExternalOneByteString) {
   i::FLAG_stress_compaction = false;
   i::FLAG_gc_global = false;
   int dispose_count = 0;
@@ -771,7 +774,7 @@
     const char* one_byte_string = "test string";
     Local<String> string = String::NewExternal(
         CcTest::isolate(),
-        new TestAsciiResource(i::StrDup(one_byte_string), &dispose_count));
+        new TestOneByteResource(i::StrDup(one_byte_string), &dispose_count));
     i::Handle<i::String> istring = v8::Utils::OpenHandle(*string);
     CcTest::heap()->CollectGarbage(i::NEW_SPACE);
     in_new_space = CcTest::heap()->InNewSpace(*istring);
@@ -784,15 +787,14 @@
 }
 
 
-class TestAsciiResourceWithDisposeControl: public TestAsciiResource {
+class TestOneByteResourceWithDisposeControl : public TestOneByteResource {
  public:
   // Only used by non-threaded tests, so it can use static fields.
   static int dispose_calls;
   static int dispose_count;
 
-  TestAsciiResourceWithDisposeControl(const char* data, bool dispose)
-      : TestAsciiResource(data, &dispose_count),
-        dispose_(dispose) { }
+  TestOneByteResourceWithDisposeControl(const char* data, bool dispose)
+      : TestOneByteResource(data, &dispose_count), dispose_(dispose) {}
 
   void Dispose() {
     ++dispose_calls;
@@ -803,17 +805,17 @@
 };
 
 
-int TestAsciiResourceWithDisposeControl::dispose_count = 0;
-int TestAsciiResourceWithDisposeControl::dispose_calls = 0;
+int TestOneByteResourceWithDisposeControl::dispose_count = 0;
+int TestOneByteResourceWithDisposeControl::dispose_calls = 0;
 
 
 TEST(ExternalStringWithDisposeHandling) {
   const char* c_source = "1 + 2 * 3";
 
   // Use a stack allocated external string resource allocated object.
-  TestAsciiResourceWithDisposeControl::dispose_count = 0;
-  TestAsciiResourceWithDisposeControl::dispose_calls = 0;
-  TestAsciiResourceWithDisposeControl res_stack(i::StrDup(c_source), false);
+  TestOneByteResourceWithDisposeControl::dispose_count = 0;
+  TestOneByteResourceWithDisposeControl::dispose_calls = 0;
+  TestOneByteResourceWithDisposeControl res_stack(i::StrDup(c_source), false);
   {
     LocalContext env;
     v8::HandleScope scope(env->GetIsolate());
@@ -823,18 +825,18 @@
     CHECK(value->IsNumber());
     CHECK_EQ(7, value->Int32Value());
     CcTest::heap()->CollectAllAvailableGarbage();
-    CHECK_EQ(0, TestAsciiResourceWithDisposeControl::dispose_count);
+    CHECK_EQ(0, TestOneByteResourceWithDisposeControl::dispose_count);
   }
   CcTest::i_isolate()->compilation_cache()->Clear();
   CcTest::heap()->CollectAllAvailableGarbage();
-  CHECK_EQ(1, TestAsciiResourceWithDisposeControl::dispose_calls);
-  CHECK_EQ(0, TestAsciiResourceWithDisposeControl::dispose_count);
+  CHECK_EQ(1, TestOneByteResourceWithDisposeControl::dispose_calls);
+  CHECK_EQ(0, TestOneByteResourceWithDisposeControl::dispose_count);
 
   // Use a heap allocated external string resource allocated object.
-  TestAsciiResourceWithDisposeControl::dispose_count = 0;
-  TestAsciiResourceWithDisposeControl::dispose_calls = 0;
-  TestAsciiResource* res_heap =
-      new TestAsciiResourceWithDisposeControl(i::StrDup(c_source), true);
+  TestOneByteResourceWithDisposeControl::dispose_count = 0;
+  TestOneByteResourceWithDisposeControl::dispose_calls = 0;
+  TestOneByteResource* res_heap =
+      new TestOneByteResourceWithDisposeControl(i::StrDup(c_source), true);
   {
     LocalContext env;
     v8::HandleScope scope(env->GetIsolate());
@@ -844,12 +846,12 @@
     CHECK(value->IsNumber());
     CHECK_EQ(7, value->Int32Value());
     CcTest::heap()->CollectAllAvailableGarbage();
-    CHECK_EQ(0, TestAsciiResourceWithDisposeControl::dispose_count);
+    CHECK_EQ(0, TestOneByteResourceWithDisposeControl::dispose_count);
   }
   CcTest::i_isolate()->compilation_cache()->Clear();
   CcTest::heap()->CollectAllAvailableGarbage();
-  CHECK_EQ(1, TestAsciiResourceWithDisposeControl::dispose_calls);
-  CHECK_EQ(1, TestAsciiResourceWithDisposeControl::dispose_count);
+  CHECK_EQ(1, TestOneByteResourceWithDisposeControl::dispose_calls);
+  CHECK_EQ(1, TestOneByteResourceWithDisposeControl::dispose_count);
 }
 
 
@@ -873,7 +875,8 @@
 
     Local<String> source = String::Concat(left, right);
     right = String::NewExternal(
-        env->GetIsolate(), new TestAsciiResource(i::StrDup(one_byte_extern_1)));
+        env->GetIsolate(),
+        new TestOneByteResource(i::StrDup(one_byte_extern_1)));
     source = String::Concat(source, right);
     right = String::NewExternal(
         env->GetIsolate(),
@@ -1185,7 +1188,7 @@
       0, 234, -723,
       i::Smi::kMinValue, i::Smi::kMaxValue
   };
-  for (size_t i = 0; i < ARRAY_SIZE(int_values); i++) {
+  for (size_t i = 0; i < arraysize(int_values); i++) {
     for (int modifier = -1; modifier <= 1; modifier++) {
       int int_value = int_values[i] + modifier;
       // check int32_t
@@ -1217,7 +1220,7 @@
       kUndefinedReturnValue,
       kEmptyStringReturnValue
   };
-  for (size_t i = 0; i < ARRAY_SIZE(oddballs); i++) {
+  for (size_t i = 0; i < arraysize(oddballs); i++) {
     fast_return_value_void = oddballs[i];
     value = TestFastReturnValues<void>();
     switch (fast_return_value_void) {
@@ -1539,6 +1542,55 @@
 }
 
 
+THREADED_TEST(ArgumentsObject) {
+  LocalContext env;
+  v8::HandleScope scope(env->GetIsolate());
+  v8::Handle<Value> arguments_object =
+      CompileRun("var out = 0; (function(){ out = arguments; })(1,2,3); out;");
+  CHECK(arguments_object->IsArgumentsObject());
+  v8::Handle<Value> array = CompileRun("[1,2,3]");
+  CHECK(!array->IsArgumentsObject());
+  v8::Handle<Value> object = CompileRun("{a:42}");
+  CHECK(!object->IsArgumentsObject());
+}
+
+
+THREADED_TEST(IsMapOrSet) {
+  LocalContext env;
+  v8::HandleScope scope(env->GetIsolate());
+  v8::Handle<Value> map = CompileRun("new Map()");
+  v8::Handle<Value> set = CompileRun("new Set()");
+  v8::Handle<Value> weak_map = CompileRun("new WeakMap()");
+  v8::Handle<Value> weak_set = CompileRun("new WeakSet()");
+  CHECK(map->IsMap());
+  CHECK(set->IsSet());
+  CHECK(weak_map->IsWeakMap());
+  CHECK(weak_set->IsWeakSet());
+
+  CHECK(!map->IsSet());
+  CHECK(!map->IsWeakMap());
+  CHECK(!map->IsWeakSet());
+
+  CHECK(!set->IsMap());
+  CHECK(!set->IsWeakMap());
+  CHECK(!set->IsWeakSet());
+
+  CHECK(!weak_map->IsMap());
+  CHECK(!weak_map->IsSet());
+  CHECK(!weak_map->IsWeakSet());
+
+  CHECK(!weak_set->IsMap());
+  CHECK(!weak_set->IsSet());
+  CHECK(!weak_set->IsWeakMap());
+
+  v8::Handle<Value> object = CompileRun("{a:42}");
+  CHECK(!object->IsMap());
+  CHECK(!object->IsSet());
+  CHECK(!object->IsWeakMap());
+  CHECK(!object->IsWeakSet());
+}
+
+
 THREADED_TEST(StringObject) {
   LocalContext env;
   v8::HandleScope scope(env->GetIsolate());
@@ -1876,6 +1928,24 @@
   self->Set(String::Concat(v8_str("accessor_"), name), value);
 }
 
+void SymbolAccessorGetter(Local<Name> name,
+                          const v8::PropertyCallbackInfo<v8::Value>& info) {
+  CHECK(name->IsSymbol());
+  Local<Symbol> sym = Local<Symbol>::Cast(name);
+  if (sym->Name()->IsUndefined())
+    return;
+  SimpleAccessorGetter(Local<String>::Cast(sym->Name()), info);
+}
+
+void SymbolAccessorSetter(Local<Name> name, Local<Value> value,
+                          const v8::PropertyCallbackInfo<void>& info) {
+  CHECK(name->IsSymbol());
+  Local<Symbol> sym = Local<Symbol>::Cast(name);
+  if (sym->Name()->IsUndefined())
+    return;
+  SimpleAccessorSetter(Local<String>::Cast(sym->Name()), value, info);
+}
+
 void EmptyInterceptorGetter(Local<String> name,
                             const v8::PropertyCallbackInfo<v8::Value>& info) {
 }
@@ -1934,6 +2004,14 @@
 }
 
 
+void AddAccessor(Handle<FunctionTemplate> templ,
+                 Handle<Name> name,
+                 v8::AccessorNameGetterCallback getter,
+                 v8::AccessorNameSetterCallback setter) {
+  templ->PrototypeTemplate()->SetAccessor(name, getter, setter);
+}
+
+
 THREADED_TEST(EmptyInterceptorDoesNotShadowAccessors) {
   v8::HandleScope scope(CcTest::isolate());
   Handle<FunctionTemplate> parent = FunctionTemplate::New(CcTest::isolate());
@@ -1966,10 +2044,9 @@
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
   i::LookupResult lookup(i_isolate);
   i::Handle<i::String> name(v8::Utils::OpenHandle(*v8_str("length")));
-  a->LookupOwnRealNamedProperty(name, &lookup);
-  CHECK(lookup.IsPropertyCallbacks());
-  i::Handle<i::Object> callback(lookup.GetCallbackObject(), i_isolate);
-  CHECK(callback->IsExecutableAccessorInfo());
+  i::LookupIterator it(a, name, i::LookupIterator::OWN_SKIP_INTERCEPTOR);
+  CHECK_EQ(i::LookupIterator::ACCESSOR, it.state());
+  CHECK(it.GetAccessors()->IsExecutableAccessorInfo());
 }
 
 
@@ -2737,8 +2814,6 @@
 
 
 THREADED_TEST(SymbolProperties) {
-  i::FLAG_harmony_symbols = true;
-
   LocalContext env;
   v8::Isolate* isolate = env->GetIsolate();
   v8::HandleScope scope(isolate);
@@ -2747,6 +2822,8 @@
   v8::Local<v8::Symbol> sym1 = v8::Symbol::New(isolate);
   v8::Local<v8::Symbol> sym2 =
       v8::Symbol::New(isolate, v8_str("my-symbol"));
+  v8::Local<v8::Symbol> sym3 =
+      v8::Symbol::New(isolate, v8_str("sym3"));
 
   CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
 
@@ -2802,31 +2879,62 @@
 
   CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
 
+  CHECK(obj->SetAccessor(sym3, SymbolAccessorGetter, SymbolAccessorSetter));
+  CHECK(obj->Get(sym3)->IsUndefined());
+  CHECK(obj->Set(sym3, v8::Integer::New(isolate, 42)));
+  CHECK(obj->Get(sym3)->Equals(v8::Integer::New(isolate, 42)));
+  CHECK(obj->Get(v8::String::NewFromUtf8(isolate, "accessor_sym3"))->Equals(
+      v8::Integer::New(isolate, 42)));
+
   // Add another property and delete it afterwards to force the object in
   // slow case.
   CHECK(obj->Set(sym2, v8::Integer::New(isolate, 2008)));
   CHECK_EQ(2002, obj->Get(sym1)->Int32Value());
   CHECK_EQ(2008, obj->Get(sym2)->Int32Value());
   CHECK_EQ(2002, obj->Get(sym1)->Int32Value());
-  CHECK_EQ(1, obj->GetOwnPropertyNames()->Length());
+  CHECK_EQ(2, obj->GetOwnPropertyNames()->Length());
 
   CHECK(obj->Has(sym1));
   CHECK(obj->Has(sym2));
+  CHECK(obj->Has(sym3));
+  CHECK(obj->Has(v8::String::NewFromUtf8(isolate, "accessor_sym3")));
   CHECK(obj->Delete(sym2));
   CHECK(obj->Has(sym1));
   CHECK(!obj->Has(sym2));
+  CHECK(obj->Has(sym3));
+  CHECK(obj->Has(v8::String::NewFromUtf8(isolate, "accessor_sym3")));
   CHECK_EQ(2002, obj->Get(sym1)->Int32Value());
-  CHECK_EQ(1, obj->GetOwnPropertyNames()->Length());
+  CHECK(obj->Get(sym3)->Equals(v8::Integer::New(isolate, 42)));
+  CHECK(obj->Get(v8::String::NewFromUtf8(isolate, "accessor_sym3"))->Equals(
+      v8::Integer::New(isolate, 42)));
+  CHECK_EQ(2, obj->GetOwnPropertyNames()->Length());
 
   // Symbol properties are inherited.
   v8::Local<v8::Object> child = v8::Object::New(isolate);
   child->SetPrototype(obj);
   CHECK(child->Has(sym1));
   CHECK_EQ(2002, child->Get(sym1)->Int32Value());
+  CHECK(obj->Get(sym3)->Equals(v8::Integer::New(isolate, 42)));
+  CHECK(obj->Get(v8::String::NewFromUtf8(isolate, "accessor_sym3"))->Equals(
+      v8::Integer::New(isolate, 42)));
   CHECK_EQ(0, child->GetOwnPropertyNames()->Length());
 }
 
 
+THREADED_TEST(SymbolTemplateProperties) {
+  LocalContext env;
+  v8::Isolate* isolate = env->GetIsolate();
+  v8::HandleScope scope(isolate);
+  v8::Local<v8::FunctionTemplate> foo = v8::FunctionTemplate::New(isolate);
+  v8::Local<v8::Name> name = v8::Symbol::New(isolate);
+  CHECK(!name.IsEmpty());
+  foo->PrototypeTemplate()->Set(name, v8::FunctionTemplate::New(isolate));
+  v8::Local<v8::Object> new_instance = foo->InstanceTemplate()->NewInstance();
+  CHECK(!new_instance.IsEmpty());
+  CHECK(new_instance->Has(name));
+}
+
+
 THREADED_TEST(PrivateProperties) {
   LocalContext env;
   v8::Isolate* isolate = env->GetIsolate();
@@ -2887,8 +2995,6 @@
 
 
 THREADED_TEST(GlobalSymbols) {
-  i::FLAG_harmony_symbols = true;
-
   LocalContext env;
   v8::Isolate* isolate = env->GetIsolate();
   v8::HandleScope scope(isolate);
@@ -2913,6 +3019,29 @@
 }
 
 
+static void CheckWellKnownSymbol(v8::Local<v8::Symbol>(*getter)(v8::Isolate*),
+                                 const char* name) {
+  LocalContext env;
+  v8::Isolate* isolate = env->GetIsolate();
+  v8::HandleScope scope(isolate);
+
+  v8::Local<v8::Symbol> symbol = getter(isolate);
+  std::string script = std::string("var sym = ") + name;
+  CompileRun(script.c_str());
+  v8::Local<Value> value = env->Global()->Get(v8_str("sym"));
+
+  CHECK(!value.IsEmpty());
+  CHECK(!symbol.IsEmpty());
+  CHECK(value->SameValue(symbol));
+}
+
+
+THREADED_TEST(WellKnownSymbols) {
+  CheckWellKnownSymbol(v8::Symbol::GetIterator, "Symbol.iterator");
+  CheckWellKnownSymbol(v8::Symbol::GetUnscopables, "Symbol.unscopables");
+}
+
+
 THREADED_TEST(GlobalPrivates) {
   LocalContext env;
   v8::Isolate* isolate = env->GetIsolate();
@@ -2972,7 +3101,7 @@
 
   CHECK_EQ(1024, static_cast<int>(ab_contents.ByteLength()));
   uint8_t* data = static_cast<uint8_t*>(ab_contents.Data());
-  ASSERT(data != NULL);
+  DCHECK(data != NULL);
   env->Global()->Set(v8_str("ab"), ab);
 
   v8::Handle<v8::Value> result = CompileRun("ab.byteLength");
@@ -4153,7 +4282,7 @@
 static void check_message_0(v8::Handle<v8::Message> message,
                             v8::Handle<Value> data) {
   CHECK_EQ(5.76, data->NumberValue());
-  CHECK_EQ(6.75, message->GetScriptResourceName()->NumberValue());
+  CHECK_EQ(6.75, message->GetScriptOrigin().ResourceName()->NumberValue());
   CHECK(!message->IsSharedCrossOrigin());
   message_received = true;
 }
@@ -4227,7 +4356,7 @@
 static void check_message_3(v8::Handle<v8::Message> message,
                             v8::Handle<Value> data) {
   CHECK(message->IsSharedCrossOrigin());
-  CHECK_EQ(6.75, message->GetScriptResourceName()->NumberValue());
+  CHECK_EQ(6.75, message->GetScriptOrigin().ResourceName()->NumberValue());
   message_received = true;
 }
 
@@ -4256,7 +4385,7 @@
 static void check_message_4(v8::Handle<v8::Message> message,
                             v8::Handle<Value> data) {
   CHECK(!message->IsSharedCrossOrigin());
-  CHECK_EQ(6.75, message->GetScriptResourceName()->NumberValue());
+  CHECK_EQ(6.75, message->GetScriptOrigin().ResourceName()->NumberValue());
   message_received = true;
 }
 
@@ -4285,7 +4414,7 @@
 static void check_message_5a(v8::Handle<v8::Message> message,
                             v8::Handle<Value> data) {
   CHECK(message->IsSharedCrossOrigin());
-  CHECK_EQ(6.75, message->GetScriptResourceName()->NumberValue());
+  CHECK_EQ(6.75, message->GetScriptOrigin().ResourceName()->NumberValue());
   message_received = true;
 }
 
@@ -4293,7 +4422,7 @@
 static void check_message_5b(v8::Handle<v8::Message> message,
                             v8::Handle<Value> data) {
   CHECK(!message->IsSharedCrossOrigin());
-  CHECK_EQ(6.75, message->GetScriptResourceName()->NumberValue());
+  CHECK_EQ(6.75, message->GetScriptOrigin().ResourceName()->NumberValue());
   message_received = true;
 }
 
@@ -4373,7 +4502,7 @@
   CHECK_EQ(v8::None, context->Global()->GetPropertyAttributes(prop));
   // read-only
   prop = v8_str("read_only");
-  context->Global()->Set(prop, v8_num(7), v8::ReadOnly);
+  context->Global()->ForceSet(prop, v8_num(7), v8::ReadOnly);
   CHECK_EQ(7, context->Global()->Get(prop)->Int32Value());
   CHECK_EQ(v8::ReadOnly, context->Global()->GetPropertyAttributes(prop));
   CompileRun("read_only = 9");
@@ -4382,14 +4511,14 @@
   CHECK_EQ(7, context->Global()->Get(prop)->Int32Value());
   // dont-delete
   prop = v8_str("dont_delete");
-  context->Global()->Set(prop, v8_num(13), v8::DontDelete);
+  context->Global()->ForceSet(prop, v8_num(13), v8::DontDelete);
   CHECK_EQ(13, context->Global()->Get(prop)->Int32Value());
   CompileRun("delete dont_delete");
   CHECK_EQ(13, context->Global()->Get(prop)->Int32Value());
   CHECK_EQ(v8::DontDelete, context->Global()->GetPropertyAttributes(prop));
   // dont-enum
   prop = v8_str("dont_enum");
-  context->Global()->Set(prop, v8_num(28), v8::DontEnum);
+  context->Global()->ForceSet(prop, v8_num(28), v8::DontEnum);
   CHECK_EQ(v8::DontEnum, context->Global()->GetPropertyAttributes(prop));
   // absent
   prop = v8_str("absent");
@@ -5312,15 +5441,28 @@
 }
 
 
-static void TryCatchNestedHelper(int depth) {
+static void TryCatchNested1Helper(int depth) {
   if (depth > 0) {
     v8::TryCatch try_catch;
     try_catch.SetVerbose(true);
-    TryCatchNestedHelper(depth - 1);
+    TryCatchNested1Helper(depth - 1);
     CHECK(try_catch.HasCaught());
     try_catch.ReThrow();
   } else {
-    CcTest::isolate()->ThrowException(v8_str("back"));
+    CcTest::isolate()->ThrowException(v8_str("E1"));
+  }
+}
+
+
+static void TryCatchNested2Helper(int depth) {
+  if (depth > 0) {
+    v8::TryCatch try_catch;
+    try_catch.SetVerbose(true);
+    TryCatchNested2Helper(depth - 1);
+    CHECK(try_catch.HasCaught());
+    try_catch.ReThrow();
+  } else {
+    CompileRun("throw 'E2';");
   }
 }
 
@@ -5329,17 +5471,29 @@
   v8::V8::Initialize();
   LocalContext context;
   v8::HandleScope scope(context->GetIsolate());
-  v8::TryCatch try_catch;
-  TryCatchNestedHelper(5);
-  CHECK(try_catch.HasCaught());
-  CHECK_EQ(0, strcmp(*v8::String::Utf8Value(try_catch.Exception()), "back"));
+
+  {
+    // Test nested try-catch with a native throw in the end.
+    v8::TryCatch try_catch;
+    TryCatchNested1Helper(5);
+    CHECK(try_catch.HasCaught());
+    CHECK_EQ(0, strcmp(*v8::String::Utf8Value(try_catch.Exception()), "E1"));
+  }
+
+  {
+    // Test nested try-catch with a JavaScript throw in the end.
+    v8::TryCatch try_catch;
+    TryCatchNested2Helper(5);
+    CHECK(try_catch.HasCaught());
+    CHECK_EQ(0, strcmp(*v8::String::Utf8Value(try_catch.Exception()), "E2"));
+  }
 }
 
 
 void TryCatchMixedNestingCheck(v8::TryCatch* try_catch) {
   CHECK(try_catch->HasCaught());
   Handle<Message> message = try_catch->Message();
-  Handle<Value> resource = message->GetScriptResourceName();
+  Handle<Value> resource = message->GetScriptOrigin().ResourceName();
   CHECK_EQ(0, strcmp(*v8::String::Utf8Value(resource), "inner"));
   CHECK_EQ(0, strcmp(*v8::String::Utf8Value(message->Get()),
                      "Uncaught Error: a"));
@@ -5378,6 +5532,53 @@
 }
 
 
+void TryCatchNativeHelper(const v8::FunctionCallbackInfo<v8::Value>& args) {
+  ApiTestFuzzer::Fuzz();
+  v8::TryCatch try_catch;
+  args.GetIsolate()->ThrowException(v8_str("boom"));
+  CHECK(try_catch.HasCaught());
+}
+
+
+TEST(TryCatchNative) {
+  v8::Isolate* isolate = CcTest::isolate();
+  v8::HandleScope scope(isolate);
+  v8::V8::Initialize();
+  v8::TryCatch try_catch;
+  Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
+  templ->Set(v8_str("TryCatchNativeHelper"),
+             v8::FunctionTemplate::New(isolate, TryCatchNativeHelper));
+  LocalContext context(0, templ);
+  CompileRun("TryCatchNativeHelper();");
+  CHECK(!try_catch.HasCaught());
+}
+
+
+void TryCatchNativeResetHelper(
+    const v8::FunctionCallbackInfo<v8::Value>& args) {
+  ApiTestFuzzer::Fuzz();
+  v8::TryCatch try_catch;
+  args.GetIsolate()->ThrowException(v8_str("boom"));
+  CHECK(try_catch.HasCaught());
+  try_catch.Reset();
+  CHECK(!try_catch.HasCaught());
+}
+
+
+TEST(TryCatchNativeReset) {
+  v8::Isolate* isolate = CcTest::isolate();
+  v8::HandleScope scope(isolate);
+  v8::V8::Initialize();
+  v8::TryCatch try_catch;
+  Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
+  templ->Set(v8_str("TryCatchNativeResetHelper"),
+             v8::FunctionTemplate::New(isolate, TryCatchNativeResetHelper));
+  LocalContext context(0, templ);
+  CompileRun("TryCatchNativeResetHelper();");
+  CHECK(!try_catch.HasCaught());
+}
+
+
 THREADED_TEST(Equality) {
   LocalContext context;
   v8::Isolate* isolate = context->GetIsolate();
@@ -5399,7 +5600,7 @@
   CHECK(v8_num(1)->StrictEquals(v8_num(1)));
   CHECK(!v8_num(1)->StrictEquals(v8_num(2)));
   CHECK(v8_num(0.0)->StrictEquals(v8_num(-0.0)));
-  Local<Value> not_a_number = v8_num(i::OS::nan_value());
+  Local<Value> not_a_number = v8_num(v8::base::OS::nan_value());
   CHECK(!not_a_number->StrictEquals(not_a_number));
   CHECK(v8::False(isolate)->StrictEquals(v8::False(isolate)));
   CHECK(!v8::False(isolate)->StrictEquals(v8::Undefined(isolate)));
@@ -6131,15 +6332,17 @@
   context->Global()->Set(v8_str("obj"), obj);
 
   const char* code =
-      "try {"
-      "  for (var i = 0; i < 100; i++) {"
+      "var result = 'PASSED';"
+      "for (var i = 0; i < 100; i++) {"
+      "  try {"
       "    var v = obj[0];"
-      "    if (v != undefined) throw 'Wrong value ' + v + ' at iteration ' + i;"
+      "    result = 'Wrong value ' + v + ' at iteration ' + i;"
+      "    break;"
+      "  } catch (e) {"
+      "    /* pass */"
       "  }"
-      "  'PASSED'"
-      "} catch(e) {"
-      "  e"
-      "}";
+      "}"
+      "result";
   ExpectString(code, "PASSED");
 }
 
@@ -6156,21 +6359,29 @@
   context->Global()->Set(v8_str("obj"), obj);
 
   const char* code =
-      "try {"
-      "  for (var i = 0; i < 100; i++) {"
-      "    var expected = i;"
-      "    if (i == 5) {"
-      "      %EnableAccessChecks(obj);"
-      "      expected = undefined;"
-      "    }"
-      "    var v = obj[i];"
-      "    if (v != expected) throw 'Wrong value ' + v + ' at iteration ' + i;"
-      "    if (i == 5) %DisableAccessChecks(obj);"
+      "var result = 'PASSED';"
+      "for (var i = 0; i < 100; i++) {"
+      "  var expected = i;"
+      "  if (i == 5) {"
+      "    %EnableAccessChecks(obj);"
       "  }"
-      "  'PASSED'"
-      "} catch(e) {"
-      "  e"
-      "}";
+      "  try {"
+      "    var v = obj[i];"
+      "    if (i == 5) {"
+      "      result = 'Should not have reached this!';"
+      "      break;"
+      "    } else if (v != expected) {"
+      "      result = 'Wrong value ' + v + ' at iteration ' + i;"
+      "      break;"
+      "    }"
+      "  } catch (e) {"
+      "    if (i != 5) {"
+      "      result = e;"
+      "    }"
+      "  }"
+      "  if (i == 5) %DisableAccessChecks(obj);"
+      "}"
+      "result";
   ExpectString(code, "PASSED");
 }
 
@@ -6694,6 +6905,33 @@
 }
 
 
+static const char* kStackTraceFromExtensionSource =
+  "function foo() {"
+  "  throw new Error();"
+  "}"
+  "function bar() {"
+  "  foo();"
+  "}";
+
+
+TEST(StackTraceInExtension) {
+  v8::HandleScope handle_scope(CcTest::isolate());
+  v8::RegisterExtension(new Extension("stacktracetest",
+                        kStackTraceFromExtensionSource));
+  const char* extension_names[] = { "stacktracetest" };
+  v8::ExtensionConfiguration extensions(1, extension_names);
+  v8::Handle<Context> context =
+      Context::New(CcTest::isolate(), &extensions);
+  Context::Scope lock(context);
+  CompileRun("function user() { bar(); }"
+             "var error;"
+             "try{ user(); } catch (e) { error = e; }");
+  CHECK_EQ(-1, CompileRun("error.stack.indexOf('foo')")->Int32Value());
+  CHECK_EQ(-1, CompileRun("error.stack.indexOf('bar')")->Int32Value());
+  CHECK_NE(-1, CompileRun("error.stack.indexOf('user')")->Int32Value());
+}
+
+
 TEST(NullExtensions) {
   v8::HandleScope handle_scope(CcTest::isolate());
   v8::RegisterExtension(new Extension("nulltest", NULL));
@@ -7110,8 +7348,9 @@
 
 static void MissingScriptInfoMessageListener(v8::Handle<v8::Message> message,
                                              v8::Handle<Value> data) {
-  CHECK(message->GetScriptResourceName()->IsUndefined());
-  CHECK_EQ(v8::Undefined(CcTest::isolate()), message->GetScriptResourceName());
+  CHECK(message->GetScriptOrigin().ResourceName()->IsUndefined());
+  CHECK_EQ(v8::Undefined(CcTest::isolate()),
+      message->GetScriptOrigin().ResourceName());
   message->GetLineNumber();
   message->GetSourceLine();
 }
@@ -8296,9 +8535,9 @@
 static const char* script_resource_name = "ExceptionInNativeScript.js";
 static void ExceptionInNativeScriptTestListener(v8::Handle<v8::Message> message,
                                                 v8::Handle<Value>) {
-  v8::Handle<v8::Value> name_val = message->GetScriptResourceName();
+  v8::Handle<v8::Value> name_val = message->GetScriptOrigin().ResourceName();
   CHECK(!name_val.IsEmpty() && name_val->IsString());
-  v8::String::Utf8Value name(message->GetScriptResourceName());
+  v8::String::Utf8Value name(message->GetScriptOrigin().ResourceName());
   CHECK_EQ(script_resource_name, *name);
   CHECK_EQ(3, message->GetLineNumber());
   v8::String::Utf8Value source_line(message->GetSourceLine());
@@ -8553,10 +8792,8 @@
   v8::Local<Script> access_other0 = v8_compile("other.Object");
   v8::Local<Script> access_other1 = v8_compile("other[42]");
   for (int i = 0; i < 5; i++) {
-    CHECK(!access_other0->Run()->Equals(other_object));
-    CHECK(access_other0->Run()->IsUndefined());
-    CHECK(!access_other1->Run()->Equals(v8_num(87)));
-    CHECK(access_other1->Run()->IsUndefined());
+    CHECK(access_other0->Run().IsEmpty());
+    CHECK(access_other1->Run().IsEmpty());
   }
 
   // Create an object that has 'other' in its prototype chain and make
@@ -8568,10 +8805,8 @@
   v8::Local<Script> access_f0 = v8_compile("f.Object");
   v8::Local<Script> access_f1 = v8_compile("f[42]");
   for (int j = 0; j < 5; j++) {
-    CHECK(!access_f0->Run()->Equals(other_object));
-    CHECK(access_f0->Run()->IsUndefined());
-    CHECK(!access_f1->Run()->Equals(v8_num(87)));
-    CHECK(access_f1->Run()->IsUndefined());
+    CHECK(access_f0->Run().IsEmpty());
+    CHECK(access_f1->Run().IsEmpty());
   }
 
   // Now it gets hairy: Set the prototype for the other global object
@@ -8590,10 +8825,8 @@
   Local<Script> access_f2 = v8_compile("f.foo");
   Local<Script> access_f3 = v8_compile("f[99]");
   for (int k = 0; k < 5; k++) {
-    CHECK(!access_f2->Run()->Equals(v8_num(100)));
-    CHECK(access_f2->Run()->IsUndefined());
-    CHECK(!access_f3->Run()->Equals(v8_num(101)));
-    CHECK(access_f3->Run()->IsUndefined());
+    CHECK(access_f2->Run().IsEmpty());
+    CHECK(access_f3->Run().IsEmpty());
   }
 }
 
@@ -8674,7 +8907,7 @@
     Context::Scope scope_env2(env2);
     Local<Value> result =
         CompileRun("delete env1.prop");
-    CHECK(result->IsFalse());
+    CHECK(result.IsEmpty());
   }
 
   // Check that env1.prop still exists.
@@ -8712,7 +8945,7 @@
   {
     Context::Scope scope_env2(env2);
     Local<Value> result = CompileRun(test);
-    CHECK(result->IsFalse());
+    CHECK(result.IsEmpty());
   }
 }
 
@@ -8739,11 +8972,18 @@
   env2->SetSecurityToken(bar);
   {
     Context::Scope scope_env2(env2);
-    Local<Value> result =
-        CompileRun("(function(){var obj = {'__proto__':env1};"
-                   "for (var p in obj)"
-                   "   if (p == 'prop') return false;"
-                   "return true;})()");
+    Local<Value> result = CompileRun(
+        "(function() {"
+        "  var obj = { '__proto__': env1 };"
+        "  try {"
+        "    for (var p in obj) {"
+        "      if (p == 'prop') return false;"
+        "    }"
+        "    return false;"
+        "  } catch (e) {"
+        "    return true;"
+        "  }"
+        "})()");
     CHECK(result->IsTrue());
   }
 }
@@ -8805,7 +9045,7 @@
   // Check that env3 is not accessible from env1
   {
     Local<Value> r = global3->Get(v8_str("prop2"));
-    CHECK(r->IsUndefined());
+    CHECK(r.IsEmpty());
   }
 }
 
@@ -8844,7 +9084,7 @@
   // Check that the global has been detached. No other.p property can
   // be found.
   result = CompileRun("other.p");
-  CHECK(result->IsUndefined());
+  CHECK(result.IsEmpty());
 
   // Reuse global2 for env3.
   v8::Handle<Context> env3 = Context::New(env1->GetIsolate(),
@@ -8874,7 +9114,7 @@
   // the global object for env3 which has a different security token,
   // so access should be blocked.
   result = CompileRun("other.p");
-  CHECK(result->IsUndefined());
+  CHECK(result.IsEmpty());
 }
 
 
@@ -8927,9 +9167,9 @@
   result = CompileRun("bound_x()");
   CHECK_EQ(v8_str("env2_x"), result);
   result = CompileRun("get_x()");
-  CHECK(result->IsUndefined());
+  CHECK(result.IsEmpty());
   result = CompileRun("get_x_w()");
-  CHECK(result->IsUndefined());
+  CHECK(result.IsEmpty());
   result = CompileRun("this_x()");
   CHECK_EQ(v8_str("env2_x"), result);
 
@@ -9019,19 +9259,13 @@
 }
 
 
-static int g_echo_value_1 = -1;
-static int g_echo_value_2 = -1;
+static int g_echo_value = -1;
 
 
 static void EchoGetter(
     Local<String> name,
     const v8::PropertyCallbackInfo<v8::Value>& info) {
-  info.GetReturnValue().Set(v8_num(g_echo_value_1));
-}
-
-
-static void EchoGetter(const v8::FunctionCallbackInfo<v8::Value>& info) {
-  info.GetReturnValue().Set(v8_num(g_echo_value_2));
+  info.GetReturnValue().Set(v8_num(g_echo_value));
 }
 
 
@@ -9039,14 +9273,7 @@
                        Local<Value> value,
                        const v8::PropertyCallbackInfo<void>&) {
   if (value->IsNumber())
-    g_echo_value_1 = value->Int32Value();
-}
-
-
-static void EchoSetter(const v8::FunctionCallbackInfo<v8::Value>& info) {
-  v8::Handle<v8::Value> value = info[0];
-  if (value->IsNumber())
-    g_echo_value_2 = value->Int32Value();
+    g_echo_value = value->Int32Value();
 }
 
 
@@ -9087,13 +9314,6 @@
       v8::AccessControl(v8::ALL_CAN_READ | v8::ALL_CAN_WRITE));
 
 
-  global_template->SetAccessorProperty(
-      v8_str("accessible_js_prop"),
-      v8::FunctionTemplate::New(isolate, EchoGetter),
-      v8::FunctionTemplate::New(isolate, EchoSetter),
-      v8::None,
-      v8::AccessControl(v8::ALL_CAN_READ | v8::ALL_CAN_WRITE));
-
   // Add an accessor that is not accessible by cross-domain JS code.
   global_template->SetAccessor(v8_str("blocked_prop"),
                                UnreachableGetter, UnreachableSetter,
@@ -9145,54 +9365,35 @@
   // Access blocked property.
   CompileRun("other.blocked_prop = 1");
 
-  ExpectUndefined("other.blocked_prop");
-  ExpectUndefined(
-      "Object.getOwnPropertyDescriptor(other, 'blocked_prop')");
-  ExpectFalse("propertyIsEnumerable.call(other, 'blocked_prop')");
-
-  // Enable ACCESS_HAS
-  allowed_access_type[v8::ACCESS_HAS] = true;
-  ExpectUndefined("other.blocked_prop");
-  // ... and now we can get the descriptor...
-  ExpectUndefined(
-      "Object.getOwnPropertyDescriptor(other, 'blocked_prop').value");
-  // ... and enumerate the property.
-  ExpectTrue("propertyIsEnumerable.call(other, 'blocked_prop')");
-  allowed_access_type[v8::ACCESS_HAS] = false;
+  CHECK(CompileRun("other.blocked_prop").IsEmpty());
+  CHECK(CompileRun("Object.getOwnPropertyDescriptor(other, 'blocked_prop')")
+            .IsEmpty());
+  CHECK(
+      CompileRun("propertyIsEnumerable.call(other, 'blocked_prop')").IsEmpty());
 
   // Access blocked element.
-  CompileRun("other[239] = 1");
+  CHECK(CompileRun("other[239] = 1").IsEmpty());
 
-  ExpectUndefined("other[239]");
-  ExpectUndefined("Object.getOwnPropertyDescriptor(other, '239')");
-  ExpectFalse("propertyIsEnumerable.call(other, '239')");
+  CHECK(CompileRun("other[239]").IsEmpty());
+  CHECK(CompileRun("Object.getOwnPropertyDescriptor(other, '239')").IsEmpty());
+  CHECK(CompileRun("propertyIsEnumerable.call(other, '239')").IsEmpty());
 
   // Enable ACCESS_HAS
   allowed_access_type[v8::ACCESS_HAS] = true;
-  ExpectUndefined("other[239]");
+  CHECK(CompileRun("other[239]").IsEmpty());
   // ... and now we can get the descriptor...
-  ExpectUndefined("Object.getOwnPropertyDescriptor(other, '239').value");
+  CHECK(CompileRun("Object.getOwnPropertyDescriptor(other, '239').value")
+            .IsEmpty());
   // ... and enumerate the property.
   ExpectTrue("propertyIsEnumerable.call(other, '239')");
   allowed_access_type[v8::ACCESS_HAS] = false;
 
   // Access a property with JS accessor.
-  CompileRun("other.js_accessor_p = 2");
+  CHECK(CompileRun("other.js_accessor_p = 2").IsEmpty());
 
-  ExpectUndefined("other.js_accessor_p");
-  ExpectUndefined(
-      "Object.getOwnPropertyDescriptor(other, 'js_accessor_p')");
-
-  // Enable ACCESS_HAS.
-  allowed_access_type[v8::ACCESS_HAS] = true;
-  ExpectUndefined("other.js_accessor_p");
-  ExpectUndefined(
-      "Object.getOwnPropertyDescriptor(other, 'js_accessor_p').get");
-  ExpectUndefined(
-      "Object.getOwnPropertyDescriptor(other, 'js_accessor_p').set");
-  ExpectUndefined(
-      "Object.getOwnPropertyDescriptor(other, 'js_accessor_p').value");
-  allowed_access_type[v8::ACCESS_HAS] = false;
+  CHECK(CompileRun("other.js_accessor_p").IsEmpty());
+  CHECK(CompileRun("Object.getOwnPropertyDescriptor(other, 'js_accessor_p')")
+            .IsEmpty());
 
   // Enable both ACCESS_HAS and ACCESS_GET.
   allowed_access_type[v8::ACCESS_HAS] = true;
@@ -9201,59 +9402,19 @@
   ExpectString("other.js_accessor_p", "getter");
   ExpectObject(
       "Object.getOwnPropertyDescriptor(other, 'js_accessor_p').get", getter);
-  ExpectUndefined(
-      "Object.getOwnPropertyDescriptor(other, 'js_accessor_p').set");
-  ExpectUndefined(
-      "Object.getOwnPropertyDescriptor(other, 'js_accessor_p').value");
-
-  allowed_access_type[v8::ACCESS_GET] = false;
-  allowed_access_type[v8::ACCESS_HAS] = false;
-
-  // Enable both ACCESS_HAS and ACCESS_SET.
-  allowed_access_type[v8::ACCESS_HAS] = true;
-  allowed_access_type[v8::ACCESS_SET] = true;
-
-  ExpectUndefined("other.js_accessor_p");
-  ExpectUndefined(
-      "Object.getOwnPropertyDescriptor(other, 'js_accessor_p').get");
   ExpectObject(
       "Object.getOwnPropertyDescriptor(other, 'js_accessor_p').set", setter);
   ExpectUndefined(
       "Object.getOwnPropertyDescriptor(other, 'js_accessor_p').value");
 
-  allowed_access_type[v8::ACCESS_SET] = false;
   allowed_access_type[v8::ACCESS_HAS] = false;
-
-  // Enable both ACCESS_HAS, ACCESS_GET and ACCESS_SET.
-  allowed_access_type[v8::ACCESS_HAS] = true;
-  allowed_access_type[v8::ACCESS_GET] = true;
-  allowed_access_type[v8::ACCESS_SET] = true;
-
-  ExpectString("other.js_accessor_p", "getter");
-  ExpectObject(
-      "Object.getOwnPropertyDescriptor(other, 'js_accessor_p').get", getter);
-  ExpectObject(
-      "Object.getOwnPropertyDescriptor(other, 'js_accessor_p').set", setter);
-  ExpectUndefined(
-      "Object.getOwnPropertyDescriptor(other, 'js_accessor_p').value");
-
-  allowed_access_type[v8::ACCESS_SET] = false;
   allowed_access_type[v8::ACCESS_GET] = false;
-  allowed_access_type[v8::ACCESS_HAS] = false;
 
   // Access an element with JS accessor.
-  CompileRun("other[42] = 2");
+  CHECK(CompileRun("other[42] = 2").IsEmpty());
 
-  ExpectUndefined("other[42]");
-  ExpectUndefined("Object.getOwnPropertyDescriptor(other, '42')");
-
-  // Enable ACCESS_HAS.
-  allowed_access_type[v8::ACCESS_HAS] = true;
-  ExpectUndefined("other[42]");
-  ExpectUndefined("Object.getOwnPropertyDescriptor(other, '42').get");
-  ExpectUndefined("Object.getOwnPropertyDescriptor(other, '42').set");
-  ExpectUndefined("Object.getOwnPropertyDescriptor(other, '42').value");
-  allowed_access_type[v8::ACCESS_HAS] = false;
+  CHECK(CompileRun("other[42]").IsEmpty());
+  CHECK(CompileRun("Object.getOwnPropertyDescriptor(other, '42')").IsEmpty());
 
   // Enable both ACCESS_HAS and ACCESS_GET.
   allowed_access_type[v8::ACCESS_HAS] = true;
@@ -9261,37 +9422,11 @@
 
   ExpectString("other[42]", "el_getter");
   ExpectObject("Object.getOwnPropertyDescriptor(other, '42').get", el_getter);
-  ExpectUndefined("Object.getOwnPropertyDescriptor(other, '42').set");
-  ExpectUndefined("Object.getOwnPropertyDescriptor(other, '42').value");
-
-  allowed_access_type[v8::ACCESS_GET] = false;
-  allowed_access_type[v8::ACCESS_HAS] = false;
-
-  // Enable both ACCESS_HAS and ACCESS_SET.
-  allowed_access_type[v8::ACCESS_HAS] = true;
-  allowed_access_type[v8::ACCESS_SET] = true;
-
-  ExpectUndefined("other[42]");
-  ExpectUndefined("Object.getOwnPropertyDescriptor(other, '42').get");
   ExpectObject("Object.getOwnPropertyDescriptor(other, '42').set", el_setter);
   ExpectUndefined("Object.getOwnPropertyDescriptor(other, '42').value");
 
-  allowed_access_type[v8::ACCESS_SET] = false;
   allowed_access_type[v8::ACCESS_HAS] = false;
-
-  // Enable both ACCESS_HAS, ACCESS_GET and ACCESS_SET.
-  allowed_access_type[v8::ACCESS_HAS] = true;
-  allowed_access_type[v8::ACCESS_GET] = true;
-  allowed_access_type[v8::ACCESS_SET] = true;
-
-  ExpectString("other[42]", "el_getter");
-  ExpectObject("Object.getOwnPropertyDescriptor(other, '42').get", el_getter);
-  ExpectObject("Object.getOwnPropertyDescriptor(other, '42').set", el_setter);
-  ExpectUndefined("Object.getOwnPropertyDescriptor(other, '42').value");
-
-  allowed_access_type[v8::ACCESS_SET] = false;
   allowed_access_type[v8::ACCESS_GET] = false;
-  allowed_access_type[v8::ACCESS_HAS] = false;
 
   v8::Handle<Value> value;
 
@@ -9299,50 +9434,38 @@
   value = CompileRun("other.accessible_prop = 3");
   CHECK(value->IsNumber());
   CHECK_EQ(3, value->Int32Value());
-  CHECK_EQ(3, g_echo_value_1);
-
-  // Access accessible js property
-  value = CompileRun("other.accessible_js_prop = 3");
-  CHECK(value->IsNumber());
-  CHECK_EQ(3, value->Int32Value());
-  CHECK_EQ(3, g_echo_value_2);
+  CHECK_EQ(3, g_echo_value);
 
   value = CompileRun("other.accessible_prop");
   CHECK(value->IsNumber());
   CHECK_EQ(3, value->Int32Value());
 
-  value = CompileRun("other.accessible_js_prop");
-  CHECK(value->IsNumber());
-  CHECK_EQ(3, value->Int32Value());
-
   value = CompileRun(
       "Object.getOwnPropertyDescriptor(other, 'accessible_prop').value");
   CHECK(value->IsNumber());
   CHECK_EQ(3, value->Int32Value());
 
-  value = CompileRun(
-      "Object.getOwnPropertyDescriptor(other, 'accessible_js_prop').get()");
-  CHECK(value->IsNumber());
-  CHECK_EQ(3, value->Int32Value());
-
   value = CompileRun("propertyIsEnumerable.call(other, 'accessible_prop')");
   CHECK(value->IsTrue());
 
-  value = CompileRun("propertyIsEnumerable.call(other, 'accessible_js_prop')");
-  CHECK(value->IsTrue());
-
   // Enumeration doesn't enumerate accessors from inaccessible objects in
   // the prototype chain even if the accessors are in themselves accessible.
-  value =
-      CompileRun("(function(){var obj = {'__proto__':other};"
-                 "for (var p in obj)"
-                 "   if (p == 'accessible_prop' ||"
-                 "       p == 'accessible_js_prop' ||"
-                 "       p == 'blocked_js_prop' ||"
-                 "       p == 'blocked_js_prop') {"
-                 "     return false;"
-                 "   }"
-                 "return true;})()");
+  value = CompileRun(
+      "(function() {"
+      "  var obj = { '__proto__': other };"
+      "  try {"
+      "    for (var p in obj) {"
+      "      if (p == 'accessible_prop' ||"
+      "          p == 'blocked_js_prop' ||"
+      "          p == 'blocked_js_prop') {"
+      "        return false;"
+      "      }"
+      "    }"
+      "    return false;"
+      "  } catch (e) {"
+      "    return true;"
+      "  }"
+      "})()");
   CHECK(value->IsTrue());
 
   context1->Exit();
@@ -9385,16 +9508,15 @@
   global1->Set(v8_str("other"), global0);
 
   // Regression test for issue 1154.
-  ExpectTrue("Object.keys(other).indexOf('blocked_prop') == -1");
-
-  ExpectUndefined("other.blocked_prop");
+  CHECK(CompileRun("Object.keys(other)").IsEmpty());
+  CHECK(CompileRun("other.blocked_prop").IsEmpty());
 
   // Regression test for issue 1027.
   CompileRun("Object.defineProperty(\n"
              "  other, 'blocked_prop', {configurable: false})");
-  ExpectUndefined("other.blocked_prop");
-  ExpectUndefined(
-      "Object.getOwnPropertyDescriptor(other, 'blocked_prop')");
+  CHECK(CompileRun("other.blocked_prop").IsEmpty());
+  CHECK(CompileRun("Object.getOwnPropertyDescriptor(other, 'blocked_prop')")
+            .IsEmpty());
 
   // Regression test for issue 1171.
   ExpectTrue("Object.isExtensible(other)");
@@ -9412,7 +9534,7 @@
   // Make sure that we can set the accessible accessors value using normal
   // assignment.
   CompileRun("other.accessible_prop = 42");
-  CHECK_EQ(42, g_echo_value_1);
+  CHECK_EQ(42, g_echo_value);
 
   v8::Handle<Value> value;
   CompileRun("Object.defineProperty(other, 'accessible_prop', {value: -1})");
@@ -9421,18 +9543,14 @@
 }
 
 
-static bool GetOwnPropertyNamesNamedBlocker(Local<v8::Object> global,
-                                            Local<Value> name,
-                                            v8::AccessType type,
-                                            Local<Value> data) {
+static bool BlockEverythingNamed(Local<v8::Object> object, Local<Value> name,
+                                 v8::AccessType type, Local<Value> data) {
   return false;
 }
 
 
-static bool GetOwnPropertyNamesIndexedBlocker(Local<v8::Object> global,
-                                              uint32_t key,
-                                              v8::AccessType type,
-                                              Local<Value> data) {
+static bool BlockEverythingIndexed(Local<v8::Object> object, uint32_t key,
+                                   v8::AccessType type, Local<Value> data) {
   return false;
 }
 
@@ -9444,8 +9562,8 @@
       v8::ObjectTemplate::New(isolate);
 
   obj_template->Set(v8_str("x"), v8::Integer::New(isolate, 42));
-  obj_template->SetAccessCheckCallbacks(GetOwnPropertyNamesNamedBlocker,
-                                        GetOwnPropertyNamesIndexedBlocker);
+  obj_template->SetAccessCheckCallbacks(BlockEverythingNamed,
+                                        BlockEverythingIndexed);
 
   // Create an environment
   v8::Local<Context> context0 = Context::New(isolate, NULL, obj_template);
@@ -9470,16 +9588,36 @@
   // proxy object.  Accessing the object that requires access checks
   // is blocked by the access checks on the object itself.
   value = CompileRun("Object.getOwnPropertyNames(other).length == 0");
-  CHECK(value->IsTrue());
+  CHECK(value.IsEmpty());
 
   value = CompileRun("Object.getOwnPropertyNames(object).length == 0");
-  CHECK(value->IsTrue());
+  CHECK(value.IsEmpty());
 
   context1->Exit();
   context0->Exit();
 }
 
 
+TEST(SuperAccessControl) {
+  i::FLAG_harmony_classes = true;
+  v8::Isolate* isolate = CcTest::isolate();
+  v8::HandleScope handle_scope(isolate);
+  v8::Handle<v8::ObjectTemplate> obj_template =
+      v8::ObjectTemplate::New(isolate);
+  obj_template->SetAccessCheckCallbacks(BlockEverythingNamed,
+                                        BlockEverythingIndexed);
+  LocalContext env;
+  env->Global()->Set(v8_str("prohibited"), obj_template->NewInstance());
+
+  v8::TryCatch try_catch;
+  CompileRun(
+      "function f() { return super.hasOwnProperty; };"
+      "var m = f.toMethod(prohibited);"
+      "m();");
+  CHECK(try_catch.HasCaught());
+}
+
+
 static void IndexedPropertyEnumerator(
     const v8::PropertyCallbackInfo<v8::Array>& info) {
   v8::Handle<v8::Array> result = v8::Array::New(info.GetIsolate(), 2);
@@ -9581,7 +9719,7 @@
   CHECK_EQ(10, value->Int32Value());
 
   value = v8_compile("other.unreachable")->Run();
-  CHECK(value->IsUndefined());
+  CHECK(value.IsEmpty());
 
   context1->Exit();
   context0->Exit();
@@ -10220,7 +10358,7 @@
   int hash = o->GetIdentityHash();
   USE(hash);
   o->Set(v8_str("foo"), v8_num(42));
-  ASSERT_EQ(hash, o->GetIdentityHash());
+  DCHECK_EQ(hash, o->GetIdentityHash());
 }
 
 
@@ -10443,7 +10581,7 @@
   v8::TryCatch try_catch;
   CHECK(!o1->SetPrototype(o0));
   CHECK(!try_catch.HasCaught());
-  ASSERT(!CcTest::i_isolate()->has_pending_exception());
+  DCHECK(!CcTest::i_isolate()->has_pending_exception());
 
   CHECK_EQ(42, CompileRun("function f() { return 42; }; f()")->Int32Value());
 }
@@ -13028,7 +13166,7 @@
   Handle<String> errorMessageString = message->Get();
   CHECK(!errorMessageString.IsEmpty());
   message->GetStackTrace();
-  message->GetScriptResourceName();
+  message->GetScriptOrigin().ResourceName();
 }
 
 
@@ -13221,7 +13359,7 @@
 
 
 bool ApiTestFuzzer::fuzzing_ = false;
-i::Semaphore ApiTestFuzzer::all_tests_done_(0);
+v8::base::Semaphore ApiTestFuzzer::all_tests_done_(0);
 int ApiTestFuzzer::active_tests_;
 int ApiTestFuzzer::tests_being_run_;
 int ApiTestFuzzer::current_;
@@ -13549,21 +13687,21 @@
     { v8::HandleScope scope(CcTest::isolate());
       LocalContext context;
     }
-    v8::V8::ContextDisposedNotification();
+    CcTest::isolate()->ContextDisposedNotification();
     CheckSurvivingGlobalObjectsCount(0);
 
     { v8::HandleScope scope(CcTest::isolate());
       LocalContext context;
       v8_compile("Date")->Run();
     }
-    v8::V8::ContextDisposedNotification();
+    CcTest::isolate()->ContextDisposedNotification();
     CheckSurvivingGlobalObjectsCount(0);
 
     { v8::HandleScope scope(CcTest::isolate());
       LocalContext context;
       v8_compile("/aaa/")->Run();
     }
-    v8::V8::ContextDisposedNotification();
+    CcTest::isolate()->ContextDisposedNotification();
     CheckSurvivingGlobalObjectsCount(0);
 
     { v8::HandleScope scope(CcTest::isolate());
@@ -13572,7 +13710,7 @@
       LocalContext context(&extensions);
       v8_compile("gc();")->Run();
     }
-    v8::V8::ContextDisposedNotification();
+    CcTest::isolate()->ContextDisposedNotification();
     CheckSurvivingGlobalObjectsCount(0);
   }
 }
@@ -14026,12 +14164,12 @@
   CompileRun(script);
   bar_func_ = i::Handle<i::JSFunction>::cast(
           v8::Utils::OpenHandle(*env->Global()->Get(v8_str("bar"))));
-  ASSERT(!bar_func_.is_null());
+  DCHECK(!bar_func_.is_null());
 
   foo_func_ =
       i::Handle<i::JSFunction>::cast(
            v8::Utils::OpenHandle(*env->Global()->Get(v8_str("foo"))));
-  ASSERT(!foo_func_.is_null());
+  DCHECK(!foo_func_.is_null());
 
   v8::Handle<v8::Value> value = CompileRun("bar();");
   CHECK(value->IsNumber());
@@ -14049,19 +14187,14 @@
 
 void SetFunctionEntryHookTest::RunTest() {
   // Work in a new isolate throughout.
-  v8::Isolate* isolate = v8::Isolate::New();
-
-  // Test setting the entry hook on the new isolate.
-  CHECK(v8::V8::SetFunctionEntryHook(isolate, EntryHook));
-
-  // Replacing the hook, once set should fail.
-  CHECK_EQ(false, v8::V8::SetFunctionEntryHook(isolate, EntryHook));
+  v8::Isolate::CreateParams create_params;
+  create_params.entry_hook = EntryHook;
+  create_params.code_event_handler = JitEvent;
+  v8::Isolate* isolate = v8::Isolate::New(create_params);
 
   {
     v8::Isolate::Scope scope(isolate);
 
-    v8::V8::SetJitCodeEventHandler(v8::kJitCodeEventDefault, JitEvent);
-
     RunLoopInNewEnv(isolate);
 
     // Check the exepected invocation counts.
@@ -14089,9 +14222,6 @@
     // We should record no invocations in this isolate.
     CHECK_EQ(0, static_cast<int>(invocations_.size()));
   }
-  // Since the isolate has been used, we shouldn't be able to set an entry
-  // hook anymore.
-  CHECK_EQ(false, v8::V8::SetFunctionEntryHook(isolate, EntryHook));
 
   isolate->Dispose();
 }
@@ -14285,7 +14415,7 @@
     saw_bar = 0;
     move_events = 0;
 
-    V8::SetJitCodeEventHandler(v8::kJitCodeEventDefault, event_handler);
+    isolate->SetJitCodeEventHandler(v8::kJitCodeEventDefault, event_handler);
 
     // Generate new code objects sparsely distributed across several
     // different fragmented code-space pages.
@@ -14309,7 +14439,7 @@
     // Force code movement.
     heap->CollectAllAvailableGarbage("TestSetJitCodeEventHandler");
 
-    V8::SetJitCodeEventHandler(v8::kJitCodeEventDefault, NULL);
+    isolate->SetJitCodeEventHandler(v8::kJitCodeEventDefault, NULL);
 
     CHECK_LE(kIterations, saw_bar);
     CHECK_LT(0, move_events);
@@ -14339,8 +14469,9 @@
     i::HashMap lineinfo(MatchPointers);
     jitcode_line_info = &lineinfo;
 
-    V8::SetJitCodeEventHandler(v8::kJitCodeEventEnumExisting, event_handler);
-    V8::SetJitCodeEventHandler(v8::kJitCodeEventDefault, NULL);
+    isolate->SetJitCodeEventHandler(v8::kJitCodeEventEnumExisting,
+                                    event_handler);
+    isolate->SetJitCodeEventHandler(v8::kJitCodeEventDefault, NULL);
 
     jitcode_line_info = NULL;
     // We expect that we got some events. Note that if we could get code removal
@@ -14424,7 +14555,7 @@
   CHECK_EQ(3, message->GetEndColumn());
   v8::String::Utf8Value line(message->GetSourceLine());
   CHECK_EQ("  throw 'nirk';", *line);
-  v8::String::Utf8Value name(message->GetScriptResourceName());
+  v8::String::Utf8Value name(message->GetScriptOrigin().ResourceName());
   CHECK_EQ(resource_name, *name);
 }
 
@@ -14693,13 +14824,13 @@
   context->Global()->Set(v8_str("obj_1"), instance_1);
 
   Local<Value> value_1 = CompileRun("obj_1.a");
-  CHECK(value_1->IsUndefined());
+  CHECK(value_1.IsEmpty());
 
   Local<v8::Object> instance_2 = templ->NewInstance();
   context->Global()->Set(v8_str("obj_2"), instance_2);
 
   Local<Value> value_2 = CompileRun("obj_2.a");
-  CHECK(value_2->IsUndefined());
+  CHECK(value_2.IsEmpty());
 }
 
 
@@ -14780,11 +14911,9 @@
   context->DetachGlobal();
   hidden_global->TurnOnAccessCheck();
 
-  // Failing access check to property get results in undefined.
-  CHECK(f1->Call(global, 0, NULL)->IsUndefined());
-  CHECK(f2->Call(global, 0, NULL)->IsUndefined());
-
-  // Failing access check to function call results in exception.
+  // Failing access check results in exception.
+  CHECK(f1->Call(global, 0, NULL).IsEmpty());
+  CHECK(f2->Call(global, 0, NULL).IsEmpty());
   CHECK(g1->Call(global, 0, NULL).IsEmpty());
   CHECK(g2->Call(global, 0, NULL).IsEmpty());
 
@@ -14868,11 +14997,9 @@
   context->DetachGlobal();
   hidden_global->TurnOnAccessCheck();
 
-  // Failing access check to property get results in undefined.
-  CHECK(f1->Call(global, 0, NULL)->IsUndefined());
-  CHECK(f2->Call(global, 0, NULL)->IsUndefined());
-
-  // Failing access check to function call results in exception.
+  // Failing access check results in exception.
+  CHECK(f1->Call(global, 0, NULL).IsEmpty());
+  CHECK(f2->Call(global, 0, NULL).IsEmpty());
   CHECK(g1->Call(global, 0, NULL).IsEmpty());
   CHECK(g2->Call(global, 0, NULL).IsEmpty());
 
@@ -14886,13 +15013,13 @@
   f2 = Local<Function>::Cast(hidden_global->Get(v8_str("f2")));
   g1 = Local<Function>::Cast(hidden_global->Get(v8_str("g1")));
   g2 = Local<Function>::Cast(hidden_global->Get(v8_str("g2")));
-  CHECK(hidden_global->Get(v8_str("h"))->IsUndefined());
+  CHECK(hidden_global->Get(v8_str("h")).IsEmpty());
 
-  // Failing access check to property get results in undefined.
-  CHECK(f1->Call(global, 0, NULL)->IsUndefined());
-  CHECK(f2->Call(global, 0, NULL)->IsUndefined());
-
-  // Failing access check to function call results in exception.
+  // Failing access check results in exception.
+  v8::Local<v8::Value> result = f1->Call(global, 0, NULL);
+  CHECK(result.IsEmpty());
+  CHECK(f1->Call(global, 0, NULL).IsEmpty());
+  CHECK(f2->Call(global, 0, NULL).IsEmpty());
   CHECK(g1->Call(global, 0, NULL).IsEmpty());
   CHECK(g2->Call(global, 0, NULL).IsEmpty());
 }
@@ -14909,137 +15036,24 @@
   const char* script = "function foo(a) { return a+1; }";
   v8::ScriptCompiler::Source source(v8_str(script));
   v8::ScriptCompiler::Compile(isolate, &source,
-                              v8::ScriptCompiler::kProduceDataToCache);
+                              v8::ScriptCompiler::kProduceParserCache);
   // Serialize.
   const v8::ScriptCompiler::CachedData* cd = source.GetCachedData();
-  char* serialized_data = i::NewArray<char>(cd->length);
+  i::byte* serialized_data = i::NewArray<i::byte>(cd->length);
   i::MemCopy(serialized_data, cd->data, cd->length);
 
   // Deserialize.
-  i::ScriptData* deserialized = i::ScriptData::New(serialized_data, cd->length);
+  i::ScriptData* deserialized = new i::ScriptData(serialized_data, cd->length);
 
   // Verify that the original is the same as the deserialized.
-  CHECK_EQ(cd->length, deserialized->Length());
-  CHECK_EQ(0, memcmp(cd->data, deserialized->Data(), cd->length));
+  CHECK_EQ(cd->length, deserialized->length());
+  CHECK_EQ(0, memcmp(cd->data, deserialized->data(), cd->length));
 
   delete deserialized;
   i::DeleteArray(serialized_data);
 }
 
 
-// Attempts to deserialize bad data.
-TEST(PreCompileDeserializationError) {
-  v8::V8::Initialize();
-  const char* data = "DONT CARE";
-  int invalid_size = 3;
-  i::ScriptData* sd = i::ScriptData::New(data, invalid_size);
-  CHECK_EQ(NULL, sd);
-}
-
-
-TEST(CompileWithInvalidCachedData) {
-  v8::V8::Initialize();
-  v8::Isolate* isolate = CcTest::isolate();
-  LocalContext context;
-  v8::HandleScope scope(context->GetIsolate());
-  i::FLAG_min_preparse_length = 0;
-
-  const char* script = "function foo(){ return 5;}\n"
-      "function bar(){ return 6 + 7;}  foo();";
-  v8::ScriptCompiler::Source source(v8_str(script));
-  v8::ScriptCompiler::Compile(isolate, &source,
-                              v8::ScriptCompiler::kProduceDataToCache);
-  // source owns its cached data. Create a ScriptData based on it. The user
-  // never needs to create ScriptDatas any more; we only need it here because we
-  // want to modify the data before passing it back.
-  const v8::ScriptCompiler::CachedData* cd = source.GetCachedData();
-  // ScriptData does not take ownership of the buffers passed to it.
-  i::ScriptData* sd =
-      i::ScriptData::New(reinterpret_cast<const char*>(cd->data), cd->length);
-  CHECK(!sd->HasError());
-  // ScriptData private implementation details
-  const int kHeaderSize = i::PreparseDataConstants::kHeaderSize;
-  const int kFunctionEntrySize = i::FunctionEntry::kSize;
-  const int kFunctionEntryStartOffset = 0;
-  const int kFunctionEntryEndOffset = 1;
-  unsigned* sd_data =
-      reinterpret_cast<unsigned*>(const_cast<char*>(sd->Data()));
-
-  // Overwrite function bar's end position with 0.
-  sd_data[kHeaderSize + 1 * kFunctionEntrySize + kFunctionEntryEndOffset] = 0;
-  v8::TryCatch try_catch;
-
-  // Make the script slightly different so that we don't hit the compilation
-  // cache. Don't change the lenghts of tokens.
-  const char* script2 = "function foo(){ return 6;}\n"
-      "function bar(){ return 6 + 7;}  foo();";
-  v8::ScriptCompiler::Source source2(
-      v8_str(script2),
-      // CachedData doesn't take ownership of the buffers, Source takes
-      // ownership of CachedData.
-      new v8::ScriptCompiler::CachedData(
-          reinterpret_cast<const uint8_t*>(sd->Data()), sd->Length()));
-  Local<v8::UnboundScript> compiled_script =
-      v8::ScriptCompiler::CompileUnbound(isolate, &source2);
-
-  CHECK(try_catch.HasCaught());
-  {
-    String::Utf8Value exception_value(try_catch.Message()->Get());
-    CHECK_EQ("Uncaught SyntaxError: Invalid cached data for function bar",
-             *exception_value);
-  }
-
-  try_catch.Reset();
-  delete sd;
-
-  // Overwrite function bar's start position with 200. The function entry will
-  // not be found when searching for it by position, and the compilation fails.
-
-  // ScriptData does not take ownership of the buffers passed to it.
-  sd = i::ScriptData::New(reinterpret_cast<const char*>(cd->data), cd->length);
-  sd_data = reinterpret_cast<unsigned*>(const_cast<char*>(sd->Data()));
-  sd_data[kHeaderSize + 1 * kFunctionEntrySize + kFunctionEntryStartOffset] =
-      200;
-  const char* script3 = "function foo(){ return 7;}\n"
-      "function bar(){ return 6 + 7;}  foo();";
-  v8::ScriptCompiler::Source source3(
-      v8_str(script3),
-      new v8::ScriptCompiler::CachedData(
-          reinterpret_cast<const uint8_t*>(sd->Data()), sd->Length()));
-  compiled_script =
-      v8::ScriptCompiler::CompileUnbound(isolate, &source3);
-  CHECK(try_catch.HasCaught());
-  {
-    String::Utf8Value exception_value(try_catch.Message()->Get());
-    CHECK_EQ("Uncaught SyntaxError: Invalid cached data for function bar",
-             *exception_value);
-  }
-  CHECK(compiled_script.IsEmpty());
-  try_catch.Reset();
-  delete sd;
-
-  // Try passing in cached data which is obviously invalid (wrong length).
-  sd = i::ScriptData::New(reinterpret_cast<const char*>(cd->data), cd->length);
-  const char* script4 =
-      "function foo(){ return 8;}\n"
-      "function bar(){ return 6 + 7;}  foo();";
-  v8::ScriptCompiler::Source source4(
-      v8_str(script4),
-      new v8::ScriptCompiler::CachedData(
-          reinterpret_cast<const uint8_t*>(sd->Data()), sd->Length() - 1));
-  compiled_script =
-      v8::ScriptCompiler::CompileUnbound(isolate, &source4);
-  CHECK(try_catch.HasCaught());
-  {
-    String::Utf8Value exception_value(try_catch.Message()->Get());
-    CHECK_EQ("Uncaught SyntaxError: Invalid cached data",
-             *exception_value);
-  }
-  CHECK(compiled_script.IsEmpty());
-  delete sd;
-}
-
-
 // This tests that we do not allow dictionary load/call inline caches
 // to use functions that have not yet been compiled.  The potential
 // problem of loading a function that has not yet been compiled can
@@ -15129,11 +15143,11 @@
 }
 
 
-class AsciiVectorResource : public v8::String::ExternalAsciiStringResource {
+class OneByteVectorResource : public v8::String::ExternalOneByteStringResource {
  public:
-  explicit AsciiVectorResource(i::Vector<const char> vector)
+  explicit OneByteVectorResource(i::Vector<const char> vector)
       : data_(vector) {}
-  virtual ~AsciiVectorResource() {}
+  virtual ~OneByteVectorResource() {}
   virtual size_t length() const { return data_.length(); }
   virtual const char* data() const { return data_.start(); }
  private:
@@ -15154,12 +15168,12 @@
 
 
 static void MorphAString(i::String* string,
-                         AsciiVectorResource* ascii_resource,
+                         OneByteVectorResource* one_byte_resource,
                          UC16VectorResource* uc16_resource) {
   CHECK(i::StringShape(string).IsExternal());
   if (string->IsOneByteRepresentation()) {
     // Check old map is not internalized or long.
-    CHECK(string->map() == CcTest::heap()->external_ascii_string_map());
+    CHECK(string->map() == CcTest::heap()->external_one_byte_string_map());
     // Morph external string to be TwoByte string.
     string->set_map(CcTest::heap()->external_string_map());
     i::ExternalTwoByteString* morphed =
@@ -15168,11 +15182,10 @@
   } else {
     // Check old map is not internalized or long.
     CHECK(string->map() == CcTest::heap()->external_string_map());
-    // Morph external string to be ASCII string.
-    string->set_map(CcTest::heap()->external_ascii_string_map());
-    i::ExternalAsciiString* morphed =
-         i::ExternalAsciiString::cast(string);
-    morphed->set_resource(ascii_resource);
+    // Morph external string to be one-byte string.
+    string->set_map(CcTest::heap()->external_one_byte_string_map());
+    i::ExternalOneByteString* morphed = i::ExternalOneByteString::cast(string);
+    morphed->set_resource(one_byte_resource);
   }
 }
 
@@ -15188,18 +15201,18 @@
     LocalContext env;
     i::Factory* factory = CcTest::i_isolate()->factory();
     v8::HandleScope scope(env->GetIsolate());
-    AsciiVectorResource ascii_resource(
+    OneByteVectorResource one_byte_resource(
         i::Vector<const char>(c_string, i::StrLength(c_string)));
     UC16VectorResource uc16_resource(
         i::Vector<const uint16_t>(two_byte_string,
                                   i::StrLength(c_string)));
 
-    Local<String> lhs(v8::Utils::ToLocal(
-        factory->NewExternalStringFromAscii(&ascii_resource)
-            .ToHandleChecked()));
-    Local<String> rhs(v8::Utils::ToLocal(
-        factory->NewExternalStringFromAscii(&ascii_resource)
-            .ToHandleChecked()));
+    Local<String> lhs(
+        v8::Utils::ToLocal(factory->NewExternalStringFromOneByte(
+                                        &one_byte_resource).ToHandleChecked()));
+    Local<String> rhs(
+        v8::Utils::ToLocal(factory->NewExternalStringFromOneByte(
+                                        &one_byte_resource).ToHandleChecked()));
 
     env->Global()->Set(v8_str("lhs"), lhs);
     env->Global()->Set(v8_str("rhs"), rhs);
@@ -15212,8 +15225,10 @@
     CHECK(lhs->IsOneByte());
     CHECK(rhs->IsOneByte());
 
-    MorphAString(*v8::Utils::OpenHandle(*lhs), &ascii_resource, &uc16_resource);
-    MorphAString(*v8::Utils::OpenHandle(*rhs), &ascii_resource, &uc16_resource);
+    MorphAString(*v8::Utils::OpenHandle(*lhs), &one_byte_resource,
+                 &uc16_resource);
+    MorphAString(*v8::Utils::OpenHandle(*rhs), &one_byte_resource,
+                 &uc16_resource);
 
     // This should UTF-8 without flattening, since everything is ASCII.
     Handle<String> cons = v8_compile("cons")->Run().As<String>();
@@ -15256,16 +15271,15 @@
 
   // This is a very short list of sources, which currently is to check for a
   // regression caused by r2703.
-  const char* ascii_sources[] = {
-    "0.5",
-    "-0.5",   // This mainly testes PushBack in the Scanner.
-    "--0.5",  // This mainly testes PushBack in the Scanner.
-    NULL
-  };
+  const char* one_byte_sources[] = {
+      "0.5",
+      "-0.5",   // This mainly testes PushBack in the Scanner.
+      "--0.5",  // This mainly testes PushBack in the Scanner.
+      NULL};
 
   // Compile the sources as external two byte strings.
-  for (int i = 0; ascii_sources[i] != NULL; i++) {
-    uint16_t* two_byte_string = AsciiToTwoByteString(ascii_sources[i]);
+  for (int i = 0; one_byte_sources[i] != NULL; i++) {
+    uint16_t* two_byte_string = AsciiToTwoByteString(one_byte_sources[i]);
     TestResource* uc16_resource = new TestResource(two_byte_string);
     v8::Local<v8::String> source =
         v8::String::NewExternal(context->GetIsolate(), uc16_resource);
@@ -15283,19 +15297,19 @@
 } regexp_interruption_data;
 
 
-class RegExpInterruptionThread : public i::Thread {
+class RegExpInterruptionThread : public v8::base::Thread {
  public:
   explicit RegExpInterruptionThread(v8::Isolate* isolate)
-      : Thread("TimeoutThread"), isolate_(isolate) {}
+      : Thread(Options("TimeoutThread")), isolate_(isolate) {}
 
   virtual void Run() {
     for (regexp_interruption_data.loop_count = 0;
          regexp_interruption_data.loop_count < 7;
          regexp_interruption_data.loop_count++) {
-      i::OS::Sleep(50);  // Wait a bit before requesting GC.
+      v8::base::OS::Sleep(50);  // Wait a bit before requesting GC.
       reinterpret_cast<i::Isolate*>(isolate_)->stack_guard()->RequestGC();
     }
-    i::OS::Sleep(50);  // Wait a bit before terminating.
+    v8::base::OS::Sleep(50);  // Wait a bit before terminating.
     v8::V8::TerminateExecution(isolate_);
   }
 
@@ -15324,14 +15338,14 @@
   RegExpInterruptionThread timeout_thread(CcTest::isolate());
 
   v8::V8::AddGCPrologueCallback(RunBeforeGC);
-  static const char* ascii_content = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
-  i::uc16* uc16_content = AsciiToTwoByteString(ascii_content);
-  v8::Local<v8::String> string = v8_str(ascii_content);
+  static const char* one_byte_content = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
+  i::uc16* uc16_content = AsciiToTwoByteString(one_byte_content);
+  v8::Local<v8::String> string = v8_str(one_byte_content);
 
   CcTest::global()->Set(v8_str("a"), string);
   regexp_interruption_data.string.Reset(CcTest::isolate(), string);
   regexp_interruption_data.string_resource = new UC16VectorResource(
-      i::Vector<const i::uc16>(uc16_content, i::StrLength(ascii_content)));
+      i::Vector<const i::uc16>(uc16_content, i::StrLength(one_byte_content)));
 
   v8::TryCatch try_catch;
   timeout_thread.Start();
@@ -15358,8 +15372,10 @@
   v8::Handle<v8::Object> global = context->Global();
   v8::Handle<v8::Object> global_proto =
       v8::Handle<v8::Object>::Cast(global->Get(v8_str("__proto__")));
-  global_proto->Set(v8_str("x"), v8::Integer::New(isolate, 0), v8::ReadOnly);
-  global_proto->Set(v8_str("y"), v8::Integer::New(isolate, 0), v8::ReadOnly);
+  global_proto->ForceSet(v8_str("x"), v8::Integer::New(isolate, 0),
+                         v8::ReadOnly);
+  global_proto->ForceSet(v8_str("y"), v8::Integer::New(isolate, 0),
+                         v8::ReadOnly);
   // Check without 'eval' or 'with'.
   v8::Handle<v8::Value> res =
       CompileRun("function f() { x = 42; return x; }; f()");
@@ -15417,7 +15433,7 @@
   // Ordinary properties
   v8::Handle<v8::String> simple_property =
       v8::String::NewFromUtf8(isolate, "p");
-  global->Set(simple_property, v8::Int32::New(isolate, 4), v8::ReadOnly);
+  global->ForceSet(simple_property, v8::Int32::New(isolate, 4), v8::ReadOnly);
   CHECK_EQ(4, global->Get(simple_property)->Int32Value());
   // This should fail because the property is read-only
   global->Set(simple_property, v8::Int32::New(isolate, 5));
@@ -15505,7 +15521,7 @@
   // Ordinary properties
   v8::Handle<v8::String> simple_property =
       v8::String::NewFromUtf8(isolate, "p");
-  global->Set(simple_property, v8::Int32::New(isolate, 4), v8::DontDelete);
+  global->ForceSet(simple_property, v8::Int32::New(isolate, 4), v8::DontDelete);
   CHECK_EQ(4, global->Get(simple_property)->Int32Value());
   // This should fail because the property is dont-delete.
   CHECK(!global->Delete(simple_property));
@@ -15542,7 +15558,8 @@
 
   v8::Handle<v8::String> some_property =
       v8::String::NewFromUtf8(isolate, "a");
-  global->Set(some_property, v8::Integer::New(isolate, 42), v8::DontDelete);
+  global->ForceSet(some_property, v8::Integer::New(isolate, 42),
+                   v8::DontDelete);
 
   // Deleting a property should get intercepted and nothing should
   // happen.
@@ -15831,19 +15848,19 @@
   i::Handle<i::Object> no_failure;
   no_failure = i::JSObject::SetElement(
       jsobj, 1, value, NONE, i::SLOPPY).ToHandleChecked();
-  ASSERT(!no_failure.is_null());
+  DCHECK(!no_failure.is_null());
   USE(no_failure);
   CheckElementValue(isolate, 2, jsobj, 1);
   *value.location() = i::Smi::FromInt(256);
   no_failure = i::JSObject::SetElement(
       jsobj, 1, value, NONE, i::SLOPPY).ToHandleChecked();
-  ASSERT(!no_failure.is_null());
+  DCHECK(!no_failure.is_null());
   USE(no_failure);
   CheckElementValue(isolate, 255, jsobj, 1);
   *value.location() = i::Smi::FromInt(-1);
   no_failure = i::JSObject::SetElement(
       jsobj, 1, value, NONE, i::SLOPPY).ToHandleChecked();
-  ASSERT(!no_failure.is_null());
+  DCHECK(!no_failure.is_null());
   USE(no_failure);
   CheckElementValue(isolate, 0, jsobj, 1);
 
@@ -16377,7 +16394,7 @@
   CHECK_EQ(0, result->Int32Value());
   if (array_type == v8::kExternalFloat64Array ||
       array_type == v8::kExternalFloat32Array) {
-    CHECK_EQ(static_cast<int>(i::OS::nan_value()),
+    CHECK_EQ(static_cast<int>(v8::base::OS::nan_value()),
              static_cast<int>(
                  i::Object::GetElement(
                      isolate, jsobj, 7).ToHandleChecked()->Number()));
@@ -17215,7 +17232,7 @@
   const int kOverviewTest = 1;
   const int kDetailedTest = 2;
 
-  ASSERT(args.Length() == 1);
+  DCHECK(args.Length() == 1);
 
   int testGroup = args[0]->Int32Value();
   if (testGroup == kOverviewTest) {
@@ -17700,6 +17717,54 @@
 }
 
 
+TEST(EvalWithSourceURLInMessageScriptResourceNameOrSourceURL) {
+  LocalContext context;
+  v8::HandleScope scope(context->GetIsolate());
+
+  const char *source =
+    "function outer() {\n"
+    "  var scriptContents = \"function foo() { FAIL.FAIL; }\\\n"
+    "  //# sourceURL=source_url\";\n"
+    "  eval(scriptContents);\n"
+    "  foo(); }\n"
+    "outer();\n"
+    "//# sourceURL=outer_url";
+
+  v8::TryCatch try_catch;
+  CompileRun(source);
+  CHECK(try_catch.HasCaught());
+
+  Local<v8::Message> message = try_catch.Message();
+  Handle<Value> sourceURL =
+    message->GetScriptOrigin().ResourceName();
+  CHECK_EQ(*v8::String::Utf8Value(sourceURL), "source_url");
+}
+
+
+TEST(RecursionWithSourceURLInMessageScriptResourceNameOrSourceURL) {
+  LocalContext context;
+  v8::HandleScope scope(context->GetIsolate());
+
+  const char *source =
+    "function outer() {\n"
+    "  var scriptContents = \"function boo(){ boo(); }\\\n"
+    "  //# sourceURL=source_url\";\n"
+    "  eval(scriptContents);\n"
+    "  boo(); }\n"
+    "outer();\n"
+    "//# sourceURL=outer_url";
+
+  v8::TryCatch try_catch;
+  CompileRun(source);
+  CHECK(try_catch.HasCaught());
+
+  Local<v8::Message> message = try_catch.Message();
+  Handle<Value> sourceURL =
+    message->GetScriptOrigin().ResourceName();
+  CHECK_EQ(*v8::String::Utf8Value(sourceURL), "source_url");
+}
+
+
 static void CreateGarbageInOldSpace() {
   i::Factory* factory = CcTest::i_isolate()->factory();
   v8::HandleScope scope(CcTest::isolate());
@@ -17713,6 +17778,7 @@
 // Test that idle notification can be handled and eventually returns true.
 TEST(IdleNotification) {
   const intptr_t MB = 1024 * 1024;
+  const int IdlePauseInMs = 1000;
   LocalContext env;
   v8::HandleScope scope(env->GetIsolate());
   intptr_t initial_size = CcTest::heap()->SizeOfObjects();
@@ -17721,7 +17787,7 @@
   CHECK_GT(size_with_garbage, initial_size + MB);
   bool finished = false;
   for (int i = 0; i < 200 && !finished; i++) {
-    finished = v8::V8::IdleNotification();
+    finished = env->GetIsolate()->IdleNotification(IdlePauseInMs);
   }
   intptr_t final_size = CcTest::heap()->SizeOfObjects();
   CHECK(finished);
@@ -17741,7 +17807,7 @@
   CHECK_GT(size_with_garbage, initial_size + MB);
   bool finished = false;
   for (int i = 0; i < 200 && !finished; i++) {
-    finished = v8::V8::IdleNotification(IdlePauseInMs);
+    finished = env->GetIsolate()->IdleNotification(IdlePauseInMs);
   }
   intptr_t final_size = CcTest::heap()->SizeOfObjects();
   CHECK(finished);
@@ -17761,7 +17827,7 @@
   CHECK_GT(size_with_garbage, initial_size + MB);
   bool finished = false;
   for (int i = 0; i < 200 && !finished; i++) {
-    finished = v8::V8::IdleNotification(IdlePauseInMs);
+    finished = env->GetIsolate()->IdleNotification(IdlePauseInMs);
   }
   intptr_t final_size = CcTest::heap()->SizeOfObjects();
   CHECK(finished);
@@ -17771,14 +17837,13 @@
 
 TEST(Regress2107) {
   const intptr_t MB = 1024 * 1024;
-  const int kShortIdlePauseInMs = 100;
-  const int kLongIdlePauseInMs = 1000;
+  const int kIdlePauseInMs = 1000;
   LocalContext env;
   v8::Isolate* isolate = env->GetIsolate();
   v8::HandleScope scope(env->GetIsolate());
   intptr_t initial_size = CcTest::heap()->SizeOfObjects();
   // Send idle notification to start a round of incremental GCs.
-  v8::V8::IdleNotification(kShortIdlePauseInMs);
+  env->GetIsolate()->IdleNotification(kIdlePauseInMs);
   // Emulate 7 page reloads.
   for (int i = 0; i < 7; i++) {
     {
@@ -17788,8 +17853,8 @@
       CreateGarbageInOldSpace();
       ctx->Exit();
     }
-    v8::V8::ContextDisposedNotification();
-    v8::V8::IdleNotification(kLongIdlePauseInMs);
+    env->GetIsolate()->ContextDisposedNotification();
+    env->GetIsolate()->IdleNotification(kIdlePauseInMs);
   }
   // Create garbage and check that idle notification still collects it.
   CreateGarbageInOldSpace();
@@ -17797,7 +17862,7 @@
   CHECK_GT(size_with_garbage, initial_size + MB);
   bool finished = false;
   for (int i = 0; i < 200 && !finished; i++) {
-    finished = v8::V8::IdleNotification(kShortIdlePauseInMs);
+    finished = env->GetIsolate()->IdleNotification(kIdlePauseInMs);
   }
   intptr_t final_size = CcTest::heap()->SizeOfObjects();
   CHECK_LT(final_size, initial_size + 1);
@@ -17838,13 +17903,11 @@
 static const int stack_breathing_room = 256 * i::KB;
 
 
-TEST(SetResourceConstraints) {
+TEST(SetStackLimit) {
   uint32_t* set_limit = ComputeStackLimit(stack_breathing_room);
 
   // Set stack limit.
-  v8::ResourceConstraints constraints;
-  constraints.set_stack_limit(set_limit);
-  CHECK(v8::SetResourceConstraints(CcTest::isolate(), &constraints));
+  CcTest::isolate()->SetStackLimit(reinterpret_cast<uintptr_t>(set_limit));
 
   // Execute a script.
   LocalContext env;
@@ -17859,16 +17922,14 @@
 }
 
 
-TEST(SetResourceConstraintsInThread) {
+TEST(SetStackLimitInThread) {
   uint32_t* set_limit;
   {
     v8::Locker locker(CcTest::isolate());
     set_limit = ComputeStackLimit(stack_breathing_room);
 
     // Set stack limit.
-    v8::ResourceConstraints constraints;
-    constraints.set_stack_limit(set_limit);
-    CHECK(v8::SetResourceConstraints(CcTest::isolate(), &constraints));
+    CcTest::isolate()->SetStackLimit(reinterpret_cast<uintptr_t>(set_limit));
 
     // Execute a script.
     v8::HandleScope scope(CcTest::isolate());
@@ -17911,7 +17972,7 @@
   virtual ~VisitorImpl() {}
   virtual void VisitExternalString(v8::Handle<v8::String> string) {
     if (!string->IsExternal()) {
-      CHECK(string->IsExternalAscii());
+      CHECK(string->IsExternalOneByte());
       return;
     }
     v8::String::ExternalStringResource* resource =
@@ -17968,12 +18029,12 @@
   CHECK(CcTest::heap()->old_pointer_space()->Contains(
             *v8::Utils::OpenHandle(*cons)));
 
-  TestAsciiResource* resource =
-      new TestAsciiResource(i::StrDup("Romeo Montague Juliet Capulet"));
+  TestOneByteResource* resource =
+      new TestOneByteResource(i::StrDup("Romeo Montague Juliet Capulet"));
   cons->MakeExternal(resource);
 
-  CHECK(cons->IsExternalAscii());
-  CHECK_EQ(resource, cons->GetExternalAsciiStringResource());
+  CHECK(cons->IsExternalOneByte());
+  CHECK_EQ(resource, cons->GetExternalOneByteStringResource());
   String::Encoding encoding;
   CHECK_EQ(resource, cons->GetExternalStringResourceBase(&encoding));
   CHECK_EQ(String::ONE_BYTE_ENCODING, encoding);
@@ -18028,8 +18089,8 @@
   { v8::Isolate::Scope isolate_scope(isolate);
     v8::HandleScope handle_scope(isolate);
     const char* s = "One string to test them all, one string to find them.";
-    TestAsciiResource* inscription =
-        new TestAsciiResource(i::StrDup(s), &destroyed);
+    TestOneByteResource* inscription =
+        new TestOneByteResource(i::StrDup(s), &destroyed);
     v8::Local<v8::String> ring = v8::String::NewExternal(isolate, inscription);
     // Ring is still alive.  Orcs are roaming freely across our lands.
     CHECK_EQ(0, destroyed);
@@ -18050,8 +18111,8 @@
     v8::HandleScope handle_scope(isolate);
     CompileRun("var ring = 'One string to test them all';");
     const char* s = "One string to test them all";
-    TestAsciiResource* inscription =
-        new TestAsciiResource(i::StrDup(s), &destroyed);
+    TestOneByteResource* inscription =
+        new TestOneByteResource(i::StrDup(s), &destroyed);
     v8::Local<v8::String> ring = CompileRun("ring")->ToString();
     CHECK(v8::Utils::OpenHandle(*ring)->IsInternalizedString());
     ring->MakeExternal(inscription);
@@ -18072,8 +18133,8 @@
     v8::HandleScope handle_scope(env->GetIsolate());
     CompileRun("var ring = 'One string to test them all';");
     const char* s = "One string to test them all";
-    TestAsciiResource* inscription =
-        new TestAsciiResource(i::StrDup(s), &destroyed);
+    TestOneByteResource* inscription =
+        new TestOneByteResource(i::StrDup(s), &destroyed);
     v8::Local<v8::String> ring = CompileRun("ring")->ToString();
     CHECK(v8::Utils::OpenHandle(*ring)->IsInternalizedString());
     ring->MakeExternal(inscription);
@@ -18108,7 +18169,7 @@
 static double DoubleToDateTime(double input) {
   double date_limit = 864e13;
   if (std::isnan(input) || input < -date_limit || input > date_limit) {
-    return i::OS::nan_value();
+    return v8::base::OS::nan_value();
   }
   return (input < 0) ? -(std::floor(-input)) : std::floor(input);
 }
@@ -18175,7 +18236,8 @@
     } else {
       uint64_t stored_bits = DoubleToBits(stored_number);
       // Check if quiet nan (bits 51..62 all set).
-#if defined(V8_TARGET_ARCH_MIPS) && !defined(USE_SIMULATOR)
+#if (defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64)) && \
+    !defined(_MIPS_ARCH_MIPS64R6) && !defined(USE_SIMULATOR)
       // Most significant fraction bit for quiet nan is set to 0
       // on MIPS architecture. Allowed by IEEE-754.
       CHECK_EQ(0xffe, static_cast<int>((stored_bits >> 51) & 0xfff));
@@ -18195,7 +18257,8 @@
     } else {
       uint64_t stored_bits = DoubleToBits(stored_date);
       // Check if quiet nan (bits 51..62 all set).
-#if defined(V8_TARGET_ARCH_MIPS) && !defined(USE_SIMULATOR)
+#if (defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64)) && \
+    !defined(_MIPS_ARCH_MIPS64R6) && !defined(USE_SIMULATOR)
       // Most significant fraction bit for quiet nan is set to 0
       // on MIPS architecture. Allowed by IEEE-754.
       CHECK_EQ(0xffe, static_cast<int>((stored_bits >> 51) & 0xfff));
@@ -18271,7 +18334,7 @@
     CompileRun(source_simple);
     context->Exit();
   }
-  v8::V8::ContextDisposedNotification();
+  isolate->ContextDisposedNotification();
   for (gc_count = 1; gc_count < 10; gc_count++) {
     other_context->Enter();
     CompileRun(source_simple);
@@ -18293,7 +18356,7 @@
     CompileRun(source_eval);
     context->Exit();
   }
-  v8::V8::ContextDisposedNotification();
+  isolate->ContextDisposedNotification();
   for (gc_count = 1; gc_count < 10; gc_count++) {
     other_context->Enter();
     CompileRun(source_eval);
@@ -18320,7 +18383,7 @@
     CHECK_EQ(1, message->GetLineNumber());
     context->Exit();
   }
-  v8::V8::ContextDisposedNotification();
+  isolate->ContextDisposedNotification();
   for (gc_count = 1; gc_count < 10; gc_count++) {
     other_context->Enter();
     CompileRun(source_exception);
@@ -18331,7 +18394,7 @@
   CHECK_GE(2, gc_count);
   CHECK_EQ(1, GetGlobalObjectsCount());
 
-  v8::V8::ContextDisposedNotification();
+  isolate->ContextDisposedNotification();
 }
 
 
@@ -18586,8 +18649,7 @@
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope scope(isolate);
   Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
-  templ->SetAccessor(v8_str("x"),
-                     GetterWhichReturns42,
+  templ->SetAccessor(v8_str("x"), GetterWhichReturns42,
                      SetterWhichSetsYOnThisTo23);
   LocalContext context;
   context->Global()->Set(v8_str("P"), templ->NewInstance());
@@ -18700,8 +18762,7 @@
 
   // Use an API object with accessors as prototype.
   Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
-  templ->SetAccessor(v8_str("x"),
-                     GetterWhichReturns42,
+  templ->SetAccessor(v8_str("x"), GetterWhichReturns42,
                      SetterWhichSetsYOnThisTo23);
   context->Global()->Set(v8_str("P"), templ->NewInstance());
 
@@ -19016,7 +19077,7 @@
 }
 
 
-THREADED_TEST(TwoByteStringInAsciiCons) {
+THREADED_TEST(TwoByteStringInOneByteCons) {
   // See Chromium issue 47824.
   LocalContext context;
   v8::HandleScope scope(context->GetIsolate());
@@ -19054,10 +19115,10 @@
 
   // If the cons string has been short-circuited, skip the following checks.
   if (!string.is_identical_to(flat_string)) {
-    // At this point, we should have a Cons string which is flat and ASCII,
+    // At this point, we should have a Cons string which is flat and one-byte,
     // with a first half that is a two-byte string (although it only contains
-    // ASCII characters). This is a valid sequence of steps, and it can happen
-    // in real pages.
+    // one-byte characters). This is a valid sequence of steps, and it can
+    // happen in real pages.
     CHECK(string->IsOneByteRepresentation());
     i::ConsString* cons = i::ConsString::cast(*string);
     CHECK_EQ(0, cons->second()->length());
@@ -19145,7 +19206,7 @@
       String::NewExternal(isolate,
                           new TestResource(string_contents, NULL, false));
   USE(two_byte); USE(cons_strings);
-  for (size_t i = 0; i < ARRAY_SIZE(cons_strings); i++) {
+  for (size_t i = 0; i < arraysize(cons_strings); i++) {
     // Base assumptions.
     string = cons_strings[i];
     CHECK(string->IsOneByte() && string->ContainsOnlyOneByte());
@@ -19439,33 +19500,28 @@
   return static_cast<int>(value->NumberValue());
 }
 
-class IsolateThread : public v8::internal::Thread {
+class IsolateThread : public v8::base::Thread {
  public:
-  IsolateThread(v8::Isolate* isolate, int fib_limit)
-      : Thread("IsolateThread"),
-        isolate_(isolate),
-        fib_limit_(fib_limit),
-        result_(0) { }
+  explicit IsolateThread(int fib_limit)
+      : Thread(Options("IsolateThread")), fib_limit_(fib_limit), result_(0) {}
 
   void Run() {
-    result_ = CalcFibonacci(isolate_, fib_limit_);
+    v8::Isolate* isolate = v8::Isolate::New();
+    result_ = CalcFibonacci(isolate, fib_limit_);
+    isolate->Dispose();
   }
 
   int result() { return result_; }
 
  private:
-  v8::Isolate* isolate_;
   int fib_limit_;
   int result_;
 };
 
 
 TEST(MultipleIsolatesOnIndividualThreads) {
-  v8::Isolate* isolate1 = v8::Isolate::New();
-  v8::Isolate* isolate2 = v8::Isolate::New();
-
-  IsolateThread thread1(isolate1, 21);
-  IsolateThread thread2(isolate2, 12);
+  IsolateThread thread1(21);
+  IsolateThread thread2(12);
 
   // Compute some fibonacci numbers on 3 threads in 3 isolates.
   thread1.Start();
@@ -19483,9 +19539,6 @@
   CHECK_EQ(result2, 144);
   CHECK_EQ(result1, thread1.result());
   CHECK_EQ(result2, thread2.result());
-
-  isolate1->Dispose();
-  isolate2->Dispose();
 }
 
 
@@ -19513,7 +19566,7 @@
   isolate->Dispose();
 }
 
-class InitDefaultIsolateThread : public v8::internal::Thread {
+class InitDefaultIsolateThread : public v8::base::Thread {
  public:
   enum TestCase {
     SetResourceConstraints,
@@ -19524,21 +19577,27 @@
   };
 
   explicit InitDefaultIsolateThread(TestCase testCase)
-      : Thread("InitDefaultIsolateThread"),
+      : Thread(Options("InitDefaultIsolateThread")),
         testCase_(testCase),
-        result_(false) { }
+        result_(false) {}
 
   void Run() {
-    v8::Isolate* isolate = v8::Isolate::New();
-    isolate->Enter();
+    v8::Isolate::CreateParams create_params;
     switch (testCase_) {
       case SetResourceConstraints: {
-        v8::ResourceConstraints constraints;
-        constraints.set_max_semi_space_size(1);
-        constraints.set_max_old_space_size(4);
-        v8::SetResourceConstraints(CcTest::isolate(), &constraints);
+        create_params.constraints.set_max_semi_space_size(1);
+        create_params.constraints.set_max_old_space_size(4);
         break;
       }
+      default:
+        break;
+    }
+    v8::Isolate* isolate = v8::Isolate::New(create_params);
+    isolate->Enter();
+    switch (testCase_) {
+      case SetResourceConstraints:
+        // Already handled in pre-Isolate-creation block.
+        break;
 
       case SetFatalHandler:
         v8::V8::SetFatalErrorHandler(NULL);
@@ -19747,7 +19806,7 @@
   // cell created using the API.
   LocalContext context;
   v8::HandleScope scope(context->GetIsolate());
-  context->Global()->Set(v8_str("cell"), v8_str("value"), v8::DontDelete);
+  context->Global()->ForceSet(v8_str("cell"), v8_str("value"), v8::DontDelete);
   ExpectBoolean("delete cell", false);
   CompileRun(function_code);
   ExpectString("readCell()", "value");
@@ -19830,6 +19889,7 @@
   CHECK_EQ(42, object1.WrapperClassId());
 
   CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
+  CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
 
   v8::Persistent<v8::Object> object2(isolate, v8::Object::New(isolate));
   CHECK_EQ(0, object2.WrapperClassId());
@@ -20343,15 +20403,15 @@
   LocalContext context;
   Local<v8::Object> obj = templ->NewInstance();
   context->Global()->Set(v8_str("obj"), obj);
-  obj->Set(v8_str("1"), v8_str("DONT_CHANGE"), v8::ReadOnly);
+  obj->ForceSet(v8_str("1"), v8_str("DONT_CHANGE"), v8::ReadOnly);
   obj->Set(v8_str("1"), v8_str("foobar"));
   CHECK_EQ(v8_str("DONT_CHANGE"), obj->Get(v8_str("1")));
-  obj->Set(v8_num(2), v8_str("DONT_CHANGE"), v8::ReadOnly);
+  obj->ForceSet(v8_num(2), v8_str("DONT_CHANGE"), v8::ReadOnly);
   obj->Set(v8_num(2), v8_str("foobar"));
   CHECK_EQ(v8_str("DONT_CHANGE"), obj->Get(v8_num(2)));
 
   // Test non-smi case.
-  obj->Set(v8_str("2000000000"), v8_str("DONT_CHANGE"), v8::ReadOnly);
+  obj->ForceSet(v8_str("2000000000"), v8_str("DONT_CHANGE"), v8::ReadOnly);
   obj->Set(v8_str("2000000000"), v8_str("foobar"));
   CHECK_EQ(v8_str("DONT_CHANGE"), obj->Get(v8_str("2000000000")));
 }
@@ -20475,20 +20535,20 @@
   CHECK(result1->Equals(simple_object->GetPrototype()));
 
   Local<Value> result2 = CompileRun("Object.getPrototypeOf(protected)");
-  CHECK(result2->Equals(Undefined(isolate)));
+  CHECK(result2.IsEmpty());
 
   Local<Value> result3 = CompileRun("Object.getPrototypeOf(global)");
   CHECK(result3->Equals(global_object->GetPrototype()));
 
   Local<Value> result4 = CompileRun("Object.getPrototypeOf(proxy)");
-  CHECK(result4->Equals(Undefined(isolate)));
+  CHECK(result4.IsEmpty());
 
   Local<Value> result5 = CompileRun("Object.getPrototypeOf(hidden)");
   CHECK(result5->Equals(
       object_with_hidden->GetPrototype()->ToObject()->GetPrototype()));
 
   Local<Value> result6 = CompileRun("Object.getPrototypeOf(phidden)");
-  CHECK(result6->Equals(Undefined(isolate)));
+  CHECK(result6.IsEmpty());
 }
 
 
@@ -20622,13 +20682,13 @@
 
 
 void CallCompletedCallback1() {
-  i::OS::Print("Firing callback 1.\n");
+  v8::base::OS::Print("Firing callback 1.\n");
   callback_fired ^= 1;  // Toggle first bit.
 }
 
 
 void CallCompletedCallback2() {
-  i::OS::Print("Firing callback 2.\n");
+  v8::base::OS::Print("Firing callback 2.\n");
   callback_fired ^= 2;  // Toggle second bit.
 }
 
@@ -20637,15 +20697,15 @@
   int32_t level = args[0]->Int32Value();
   if (level < 3) {
     level++;
-    i::OS::Print("Entering recursion level %d.\n", level);
+    v8::base::OS::Print("Entering recursion level %d.\n", level);
     char script[64];
     i::Vector<char> script_vector(script, sizeof(script));
     i::SNPrintF(script_vector, "recursion(%d)", level);
     CompileRun(script_vector.start());
-    i::OS::Print("Leaving recursion level %d.\n", level);
+    v8::base::OS::Print("Leaving recursion level %d.\n", level);
     CHECK_EQ(0, callback_fired);
   } else {
-    i::OS::Print("Recursion ends.\n");
+    v8::base::OS::Print("Recursion ends.\n");
     CHECK_EQ(0, callback_fired);
   }
 }
@@ -20662,19 +20722,19 @@
   env->GetIsolate()->AddCallCompletedCallback(CallCompletedCallback1);
   env->GetIsolate()->AddCallCompletedCallback(CallCompletedCallback1);
   env->GetIsolate()->AddCallCompletedCallback(CallCompletedCallback2);
-  i::OS::Print("--- Script (1) ---\n");
+  v8::base::OS::Print("--- Script (1) ---\n");
   Local<Script> script = v8::Script::Compile(
       v8::String::NewFromUtf8(env->GetIsolate(), "recursion(0)"));
   script->Run();
   CHECK_EQ(3, callback_fired);
 
-  i::OS::Print("\n--- Script (2) ---\n");
+  v8::base::OS::Print("\n--- Script (2) ---\n");
   callback_fired = 0;
   env->GetIsolate()->RemoveCallCompletedCallback(CallCompletedCallback1);
   script->Run();
   CHECK_EQ(2, callback_fired);
 
-  i::OS::Print("\n--- Function ---\n");
+  v8::base::OS::Print("\n--- Function ---\n");
   callback_fired = 0;
   Local<Function> recursive_function =
       Local<Function>::Cast(env->Global()->Get(v8_str("recursion")));
@@ -20904,6 +20964,37 @@
 }
 
 
+static void DebugEventInObserver(const v8::Debug::EventDetails& event_details) {
+  v8::DebugEvent event = event_details.GetEvent();
+  if (event != v8::Break) return;
+  Handle<Object> exec_state = event_details.GetExecutionState();
+  Handle<Value> break_id = exec_state->Get(v8_str("break_id"));
+  CompileRun("function f(id) { new FrameDetails(id, 0); }");
+  Handle<Function> fun = Handle<Function>::Cast(
+      CcTest::global()->Get(v8_str("f"))->ToObject());
+  fun->Call(CcTest::global(), 1, &break_id);
+}
+
+
+TEST(Regress385349) {
+  i::FLAG_allow_natives_syntax = true;
+  v8::Isolate* isolate = CcTest::isolate();
+  HandleScope handle_scope(isolate);
+  isolate->SetAutorunMicrotasks(false);
+  Handle<Context> context = Context::New(isolate);
+  v8::Debug::SetDebugEventListener(DebugEventInObserver);
+  {
+    Context::Scope context_scope(context);
+    CompileRun("var obj = {};"
+               "Object.observe(obj, function(changes) { debugger; });"
+               "obj.a = 0;");
+  }
+  isolate->RunMicrotasks();
+  isolate->SetAutorunMicrotasks(true);
+  v8::Debug::SetDebugEventListener(NULL);
+}
+
+
 #ifdef DEBUG
 static int probes_counter = 0;
 static int misses_counter = 0;
@@ -21085,8 +21176,7 @@
 }
 
 
-static void CheckInstanceCheckedResult(int getters,
-                                       int setters,
+static void CheckInstanceCheckedResult(int getters, int setters,
                                        bool expects_callbacks,
                                        TryCatch* try_catch) {
   if (expects_callbacks) {
@@ -21209,10 +21299,8 @@
 
   Local<FunctionTemplate> templ = FunctionTemplate::New(context->GetIsolate());
   Local<ObjectTemplate> proto = templ->PrototypeTemplate();
-  proto->SetAccessor(v8_str("foo"),
-                     InstanceCheckedGetter, InstanceCheckedSetter,
-                     Handle<Value>(),
-                     v8::DEFAULT,
+  proto->SetAccessor(v8_str("foo"), InstanceCheckedGetter,
+                     InstanceCheckedSetter, Handle<Value>(), v8::DEFAULT,
                      v8::None,
                      v8::AccessorSignature::New(context->GetIsolate(), templ));
   context->Global()->Set(v8_str("f"), templ->GetFunction());
@@ -21478,8 +21566,6 @@
 
 
 THREADED_TEST(Regress2535) {
-  i::FLAG_harmony_collections = true;
-  i::FLAG_harmony_symbols = true;
   LocalContext context;
   v8::HandleScope scope(context->GetIsolate());
   Local<Value> set_value = CompileRun("new Set();");
@@ -21537,7 +21623,7 @@
 }
 
 
-#if V8_OS_POSIX
+#if V8_OS_POSIX && !V8_OS_NACL
 class ThreadInterruptTest {
  public:
   ThreadInterruptTest() : sem_(0), sem_value_(0) { }
@@ -21554,16 +21640,16 @@
  private:
   static const int kExpectedValue = 1;
 
-  class InterruptThread : public i::Thread {
+  class InterruptThread : public v8::base::Thread {
    public:
     explicit InterruptThread(ThreadInterruptTest* test)
-        : Thread("InterruptThread"), test_(test) {}
+        : Thread(Options("InterruptThread")), test_(test) {}
 
     virtual void Run() {
       struct sigaction action;
 
       // Ensure that we'll enter waiting condition
-      i::OS::Sleep(100);
+      v8::base::OS::Sleep(100);
 
       // Setup signal handler
       memset(&action, 0, sizeof(action));
@@ -21574,7 +21660,7 @@
       kill(getpid(), SIGCHLD);
 
       // Ensure that if wait has returned because of error
-      i::OS::Sleep(100);
+      v8::base::OS::Sleep(100);
 
       // Set value and signal semaphore
       test_->sem_value_ = 1;
@@ -21588,7 +21674,7 @@
      ThreadInterruptTest* test_;
   };
 
-  i::Semaphore sem_;
+  v8::base::Semaphore sem_;
   volatile int sem_value_;
 };
 
@@ -21655,11 +21741,9 @@
     LocalContext context1(NULL, global_template);
     context1->Global()->Set(v8_str("other"), global0);
 
-    ExpectString("JSON.stringify(other)", "{}");
-    ExpectString("JSON.stringify({ 'a' : other, 'b' : ['c'] })",
-                 "{\"a\":{},\"b\":[\"c\"]}");
-    ExpectString("JSON.stringify([other, 'b', 'c'])",
-                 "[{},\"b\",\"c\"]");
+    CHECK(CompileRun("JSON.stringify(other)").IsEmpty());
+    CHECK(CompileRun("JSON.stringify({ 'a' : other, 'b' : ['c'] })").IsEmpty());
+    CHECK(CompileRun("JSON.stringify([other, 'b', 'c'])").IsEmpty());
 
     v8::Handle<v8::Array> array = v8::Array::New(isolate, 2);
     array->Set(0, v8_str("a"));
@@ -21667,9 +21751,9 @@
     context1->Global()->Set(v8_str("array"), array);
     ExpectString("JSON.stringify(array)", "[\"a\",\"b\"]");
     array->TurnOnAccessCheck();
-    ExpectString("JSON.stringify(array)", "[]");
-    ExpectString("JSON.stringify([array])", "[[]]");
-    ExpectString("JSON.stringify({'a' : array})", "{\"a\":[]}");
+    CHECK(CompileRun("JSON.stringify(array)").IsEmpty());
+    CHECK(CompileRun("JSON.stringify([array])").IsEmpty());
+    CHECK(CompileRun("JSON.stringify({'a' : array})").IsEmpty());
   }
 }
 
@@ -21738,7 +21822,6 @@
 
   // Create a context and set an x property on it's global object.
   LocalContext context0(NULL, global_template);
-  context0->Global()->Set(v8_str("x"), v8_num(42));
   v8::Handle<v8::Object> global0 = context0->Global();
 
   // Create a context with a different security token so that the
@@ -21767,8 +21850,8 @@
   CheckCorrectThrow("JSON.stringify(other)");
   CheckCorrectThrow("has_own_property(other, 'x')");
   CheckCorrectThrow("%GetProperty(other, 'x')");
-  CheckCorrectThrow("%SetProperty(other, 'x', 'foo', 1, 0)");
-  CheckCorrectThrow("%IgnoreAttributesAndSetProperty(other, 'x', 'foo')");
+  CheckCorrectThrow("%SetProperty(other, 'x', 'foo', 0)");
+  CheckCorrectThrow("%AddNamedProperty(other, 'x', 'foo', 1)");
   CheckCorrectThrow("%DeleteProperty(other, 'x', 0)");
   CheckCorrectThrow("%DeleteProperty(other, '1', 0)");
   CheckCorrectThrow("%HasOwnProperty(other, 'x')");
@@ -21778,7 +21861,7 @@
   CheckCorrectThrow("%GetPropertyNames(other)");
   // PROPERTY_ATTRIBUTES_NONE = 0
   CheckCorrectThrow("%GetOwnPropertyNames(other, 0)");
-  CheckCorrectThrow("%DefineOrRedefineAccessorProperty("
+  CheckCorrectThrow("%DefineAccessorPropertyUnchecked("
                         "other, 'x', null, null, 1)");
 
   // Reset the failed access check callback so it does not influence
@@ -21948,7 +22031,7 @@
 
   LocalContext env_;
   v8::Isolate* isolate_;
-  i::Semaphore sem_;
+  v8::base::Semaphore sem_;
   int warmup_;
   bool should_continue_;
 };
@@ -21964,10 +22047,10 @@
   }
 
  private:
-  class InterruptThread : public i::Thread {
+  class InterruptThread : public v8::base::Thread {
    public:
     explicit InterruptThread(RequestInterruptTestBase* test)
-        : Thread("RequestInterruptTest"), test_(test) {}
+        : Thread(Options("RequestInterruptTest")), test_(test) {}
 
     virtual void Run() {
       test_->sem_.Wait();
@@ -22184,10 +22267,10 @@
   }
 
  private:
-  class InterruptThread : public i::Thread {
+  class InterruptThread : public v8::base::Thread {
    public:
     explicit InterruptThread(ClearInterruptFromAnotherThread* test)
-        : Thread("RequestInterruptTest"), test_(test) {}
+        : Thread(Options("RequestInterruptTest")), test_(test) {}
 
     virtual void Run() {
       test_->sem_.Wait();
@@ -22201,7 +22284,7 @@
       ClearInterruptFromAnotherThread* test =
           reinterpret_cast<ClearInterruptFromAnotherThread*>(data);
       test->sem_.Signal();
-      bool success = test->sem2_.WaitFor(i::TimeDelta::FromSeconds(2));
+      bool success = test->sem2_.WaitFor(v8::base::TimeDelta::FromSeconds(2));
       // Crash instead of timeout to make this failure more prominent.
       CHECK(success);
       test->should_continue_ = false;
@@ -22212,7 +22295,7 @@
   };
 
   InterruptThread i_thread;
-  i::Semaphore sem2_;
+  v8::base::Semaphore sem2_;
 };
 
 
@@ -22336,12 +22419,12 @@
   void RunAll() {
     SignatureType signature_types[] =
       {kNoSignature, kSignatureOnReceiver, kSignatureOnPrototype};
-    for (unsigned i = 0; i < ARRAY_SIZE(signature_types); i++) {
+    for (unsigned i = 0; i < arraysize(signature_types); i++) {
       SignatureType signature_type = signature_types[i];
       for (int j = 0; j < 2; j++) {
         bool global = j == 0;
         int key = signature_type +
-            ARRAY_SIZE(signature_types) * (global ? 1 : 0);
+            arraysize(signature_types) * (global ? 1 : 0);
         Run(signature_type, global, key);
       }
     }
@@ -22469,7 +22552,7 @@
         wrap_function.start(), key, key, key, key, key, key);
     v8::TryCatch try_catch;
     CompileRun(source.start());
-    ASSERT(!try_catch.HasCaught());
+    DCHECK(!try_catch.HasCaught());
     CHECK_EQ(9, count);
   }
 };
@@ -22785,27 +22868,489 @@
 }
 
 
-Local<v8::Context> call_eval_context;
-Local<v8::Function> call_eval_bound_function;
-static void CallEval(const v8::FunctionCallbackInfo<v8::Value>& args) {
-  v8::Context::Scope scope(call_eval_context);
-  args.GetReturnValue().Set(
-      call_eval_bound_function->Call(call_eval_context->Global(), 0, NULL));
+void SourceURLHelper(const char* source, const char* expected_source_url,
+                     const char* expected_source_mapping_url) {
+  Local<Script> script = v8_compile(source);
+  if (expected_source_url != NULL) {
+    v8::String::Utf8Value url(script->GetUnboundScript()->GetSourceURL());
+    CHECK_EQ(expected_source_url, *url);
+  } else {
+    CHECK(script->GetUnboundScript()->GetSourceURL()->IsUndefined());
+  }
+  if (expected_source_mapping_url != NULL) {
+    v8::String::Utf8Value url(
+        script->GetUnboundScript()->GetSourceMappingURL());
+    CHECK_EQ(expected_source_mapping_url, *url);
+  } else {
+    CHECK(script->GetUnboundScript()->GetSourceMappingURL()->IsUndefined());
+  }
 }
 
 
-TEST(CrossActivationEval) {
+TEST(ScriptSourceURLAndSourceMappingURL) {
   LocalContext env;
   v8::Isolate* isolate = env->GetIsolate();
   v8::HandleScope scope(isolate);
-  {
-    call_eval_context = v8::Context::New(isolate);
-    v8::Context::Scope scope(call_eval_context);
-    call_eval_bound_function =
-        Local<Function>::Cast(CompileRun("eval.bind(this, '1')"));
+  SourceURLHelper("function foo() {}\n"
+                  "//# sourceURL=bar1.js\n", "bar1.js", NULL);
+  SourceURLHelper("function foo() {}\n"
+                  "//# sourceMappingURL=bar2.js\n", NULL, "bar2.js");
+
+  // Both sourceURL and sourceMappingURL.
+  SourceURLHelper("function foo() {}\n"
+                  "//# sourceURL=bar3.js\n"
+                  "//# sourceMappingURL=bar4.js\n", "bar3.js", "bar4.js");
+
+  // Two source URLs; the first one is ignored.
+  SourceURLHelper("function foo() {}\n"
+                  "//# sourceURL=ignoreme.js\n"
+                  "//# sourceURL=bar5.js\n", "bar5.js", NULL);
+  SourceURLHelper("function foo() {}\n"
+                  "//# sourceMappingURL=ignoreme.js\n"
+                  "//# sourceMappingURL=bar6.js\n", NULL, "bar6.js");
+
+  // SourceURL or sourceMappingURL in the middle of the script.
+  SourceURLHelper("function foo() {}\n"
+                  "//# sourceURL=bar7.js\n"
+                  "function baz() {}\n", "bar7.js", NULL);
+  SourceURLHelper("function foo() {}\n"
+                  "//# sourceMappingURL=bar8.js\n"
+                  "function baz() {}\n", NULL, "bar8.js");
+
+  // Too much whitespace.
+  SourceURLHelper("function foo() {}\n"
+                  "//#  sourceURL=bar9.js\n"
+                  "//#  sourceMappingURL=bar10.js\n", NULL, NULL);
+  SourceURLHelper("function foo() {}\n"
+                  "//# sourceURL =bar11.js\n"
+                  "//# sourceMappingURL =bar12.js\n", NULL, NULL);
+
+  // Disallowed characters in value.
+  SourceURLHelper("function foo() {}\n"
+                  "//# sourceURL=bar13 .js   \n"
+                  "//# sourceMappingURL=bar14 .js \n",
+                  NULL, NULL);
+  SourceURLHelper("function foo() {}\n"
+                  "//# sourceURL=bar15\t.js   \n"
+                  "//# sourceMappingURL=bar16\t.js \n",
+                  NULL, NULL);
+  SourceURLHelper("function foo() {}\n"
+                  "//# sourceURL=bar17'.js   \n"
+                  "//# sourceMappingURL=bar18'.js \n",
+                  NULL, NULL);
+  SourceURLHelper("function foo() {}\n"
+                  "//# sourceURL=bar19\".js   \n"
+                  "//# sourceMappingURL=bar20\".js \n",
+                  NULL, NULL);
+
+  // Not too much whitespace.
+  SourceURLHelper("function foo() {}\n"
+                  "//# sourceURL=  bar21.js   \n"
+                  "//# sourceMappingURL=  bar22.js \n", "bar21.js", "bar22.js");
+}
+
+
+TEST(GetOwnPropertyDescriptor) {
+  LocalContext env;
+  v8::Isolate* isolate = env->GetIsolate();
+  v8::HandleScope scope(isolate);
+  CompileRun(
+    "var x = { value : 13};"
+    "Object.defineProperty(x, 'p0', {value : 12});"
+    "Object.defineProperty(x, 'p1', {"
+    "  set : function(value) { this.value = value; },"
+    "  get : function() { return this.value; },"
+    "});");
+  Local<Object> x = Local<Object>::Cast(env->Global()->Get(v8_str("x")));
+  Local<Value> desc = x->GetOwnPropertyDescriptor(v8_str("no_prop"));
+  CHECK(desc->IsUndefined());
+  desc = x->GetOwnPropertyDescriptor(v8_str("p0"));
+  CHECK_EQ(v8_num(12), Local<Object>::Cast(desc)->Get(v8_str("value")));
+  desc = x->GetOwnPropertyDescriptor(v8_str("p1"));
+  Local<Function> set =
+    Local<Function>::Cast(Local<Object>::Cast(desc)->Get(v8_str("set")));
+  Local<Function> get =
+    Local<Function>::Cast(Local<Object>::Cast(desc)->Get(v8_str("get")));
+  CHECK_EQ(v8_num(13), get->Call(x, 0, NULL));
+  Handle<Value> args[] = { v8_num(14) };
+  set->Call(x, 1, args);
+  CHECK_EQ(v8_num(14), get->Call(x, 0, NULL));
+}
+
+
+TEST(Regress411877) {
+  v8::Isolate* isolate = CcTest::isolate();
+  v8::HandleScope handle_scope(isolate);
+  v8::Handle<v8::ObjectTemplate> object_template =
+      v8::ObjectTemplate::New(isolate);
+  object_template->SetAccessCheckCallbacks(NamedAccessCounter,
+                                           IndexedAccessCounter);
+
+  v8::Handle<Context> context = Context::New(isolate);
+  v8::Context::Scope context_scope(context);
+
+  context->Global()->Set(v8_str("o"), object_template->NewInstance());
+  CompileRun("Object.getOwnPropertyNames(o)");
+}
+
+
+TEST(GetHiddenPropertyTableAfterAccessCheck) {
+  v8::Isolate* isolate = CcTest::isolate();
+  v8::HandleScope handle_scope(isolate);
+  v8::Handle<v8::ObjectTemplate> object_template =
+      v8::ObjectTemplate::New(isolate);
+  object_template->SetAccessCheckCallbacks(NamedAccessCounter,
+                                           IndexedAccessCounter);
+
+  v8::Handle<Context> context = Context::New(isolate);
+  v8::Context::Scope context_scope(context);
+
+  v8::Handle<v8::Object> obj = object_template->NewInstance();
+  obj->Set(v8_str("key"), v8_str("value"));
+  obj->Delete(v8_str("key"));
+
+  obj->SetHiddenValue(v8_str("hidden key 2"), v8_str("hidden value 2"));
+}
+
+
+TEST(Regress411793) {
+  v8::Isolate* isolate = CcTest::isolate();
+  v8::HandleScope handle_scope(isolate);
+  v8::Handle<v8::ObjectTemplate> object_template =
+      v8::ObjectTemplate::New(isolate);
+  object_template->SetAccessCheckCallbacks(NamedAccessCounter,
+                                           IndexedAccessCounter);
+
+  v8::Handle<Context> context = Context::New(isolate);
+  v8::Context::Scope context_scope(context);
+
+  context->Global()->Set(v8_str("o"), object_template->NewInstance());
+  CompileRun(
+      "Object.defineProperty(o, 'key', "
+      "    { get: function() {}, set: function() {} });");
+}
+
+class TestSourceStream : public v8::ScriptCompiler::ExternalSourceStream {
+ public:
+  explicit TestSourceStream(const char** chunks) : chunks_(chunks), index_(0) {}
+
+  virtual size_t GetMoreData(const uint8_t** src) {
+    // Unlike in real use cases, this function will never block.
+    if (chunks_[index_] == NULL) {
+      return 0;
+    }
+    // Copy the data, since the caller takes ownership of it.
+    size_t len = strlen(chunks_[index_]);
+    // We don't need to zero-terminate since we return the length.
+    uint8_t* copy = new uint8_t[len];
+    memcpy(copy, chunks_[index_], len);
+    *src = copy;
+    ++index_;
+    return len;
   }
-  env->Global()->Set(v8_str("CallEval"),
-      v8::FunctionTemplate::New(isolate, CallEval)->GetFunction());
-  Local<Value> result = CompileRun("CallEval();");
-  CHECK_EQ(result, v8::Integer::New(isolate, 1));
+
+  // Helper for constructing a string from chunks (the compilation needs it
+  // too).
+  static char* FullSourceString(const char** chunks) {
+    size_t total_len = 0;
+    for (size_t i = 0; chunks[i] != NULL; ++i) {
+      total_len += strlen(chunks[i]);
+    }
+    char* full_string = new char[total_len + 1];
+    size_t offset = 0;
+    for (size_t i = 0; chunks[i] != NULL; ++i) {
+      size_t len = strlen(chunks[i]);
+      memcpy(full_string + offset, chunks[i], len);
+      offset += len;
+    }
+    full_string[total_len] = 0;
+    return full_string;
+  }
+
+ private:
+  const char** chunks_;
+  unsigned index_;
+};
+
+
+// Helper function for running streaming tests.
+void RunStreamingTest(const char** chunks,
+                      v8::ScriptCompiler::StreamedSource::Encoding encoding =
+                          v8::ScriptCompiler::StreamedSource::ONE_BYTE,
+                      bool expected_success = true) {
+  LocalContext env;
+  v8::Isolate* isolate = env->GetIsolate();
+  v8::HandleScope scope(isolate);
+  v8::TryCatch try_catch;
+
+  v8::ScriptCompiler::StreamedSource source(new TestSourceStream(chunks),
+                                            encoding);
+  v8::ScriptCompiler::ScriptStreamingTask* task =
+      v8::ScriptCompiler::StartStreamingScript(isolate, &source);
+
+  // TestSourceStream::GetMoreData won't block, so it's OK to just run the
+  // task here in the main thread.
+  task->Run();
+  delete task;
+
+  v8::ScriptOrigin origin(v8_str("http://foo.com"));
+  char* full_source = TestSourceStream::FullSourceString(chunks);
+
+  // The possible errors are only produced while compiling.
+  CHECK_EQ(false, try_catch.HasCaught());
+
+  v8::Handle<Script> script = v8::ScriptCompiler::Compile(
+      isolate, &source, v8_str(full_source), origin);
+  if (expected_success) {
+    CHECK(!script.IsEmpty());
+    v8::Handle<Value> result(script->Run());
+    // All scripts are supposed to return the fixed value 13 when ran.
+    CHECK_EQ(13, result->Int32Value());
+  } else {
+    CHECK(script.IsEmpty());
+    CHECK(try_catch.HasCaught());
+  }
+  delete[] full_source;
+}
+
+
+TEST(StreamingSimpleScript) {
+  // This script is unrealistically small, since no one chunk is enough to fill
+  // the backing buffer of Scanner, let alone overflow it.
+  const char* chunks[] = {"function foo() { ret", "urn 13; } f", "oo(); ",
+                          NULL};
+  RunStreamingTest(chunks);
+}
+
+
+TEST(StreamingBiggerScript) {
+  const char* chunk1 =
+      "function foo() {\n"
+      "  // Make this chunk sufficiently long so that it will overflow the\n"
+      "  // backing buffer of the Scanner.\n"
+      "  var i = 0;\n"
+      "  var result = 0;\n"
+      "  for (i = 0; i < 13; ++i) { result = result + 1; }\n"
+      "  result = 0;\n"
+      "  for (i = 0; i < 13; ++i) { result = result + 1; }\n"
+      "  result = 0;\n"
+      "  for (i = 0; i < 13; ++i) { result = result + 1; }\n"
+      "  result = 0;\n"
+      "  for (i = 0; i < 13; ++i) { result = result + 1; }\n"
+      "  return result;\n"
+      "}\n";
+  const char* chunks[] = {chunk1, "foo(); ", NULL};
+  RunStreamingTest(chunks);
+}
+
+
+TEST(StreamingScriptWithParseError) {
+  // Test that parse errors from streamed scripts are propagated correctly.
+  {
+    char chunk1[] =
+        "  // This will result in a parse error.\n"
+        "  var if else then foo";
+    char chunk2[] = "  13\n";
+    const char* chunks[] = {chunk1, chunk2, "foo();", NULL};
+
+    RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::ONE_BYTE,
+                     false);
+  }
+  // Test that the next script succeeds normally.
+  {
+    char chunk1[] =
+        "  // This will be parsed successfully.\n"
+        "  function foo() { return ";
+    char chunk2[] = "  13; }\n";
+    const char* chunks[] = {chunk1, chunk2, "foo();", NULL};
+
+    RunStreamingTest(chunks);
+  }
+}
+
+
+TEST(StreamingUtf8Script) {
+  // We'd want to write \uc481 instead of \xeb\x91\x80, but Windows compilers
+  // don't like it.
+  const char* chunk1 =
+      "function foo() {\n"
+      "  // This function will contain an UTF-8 character which is not in\n"
+      "  // ASCII.\n"
+      "  var foob\xeb\x91\x80r = 13;\n"
+      "  return foob\xeb\x91\x80r;\n"
+      "}\n";
+  const char* chunks[] = {chunk1, "foo(); ", NULL};
+  RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::UTF8);
+}
+
+
+TEST(StreamingUtf8ScriptWithSplitCharactersSanityCheck) {
+  // A sanity check to prove that the approach of splitting UTF-8
+  // characters is correct. Here is an UTF-8 character which will take three
+  // bytes.
+  const char* reference = "\xeb\x91\x80";
+  CHECK(3u == strlen(reference));  // NOLINT - no CHECK_EQ for unsigned.
+
+  char chunk1[] =
+      "function foo() {\n"
+      "  // This function will contain an UTF-8 character which is not in\n"
+      "  // ASCII.\n"
+      "  var foob";
+  char chunk2[] =
+      "XXXr = 13;\n"
+      "  return foob\xeb\x91\x80r;\n"
+      "}\n";
+  for (int i = 0; i < 3; ++i) {
+    chunk2[i] = reference[i];
+  }
+  const char* chunks[] = {chunk1, chunk2, "foo();", NULL};
+  RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::UTF8);
+}
+
+
+TEST(StreamingUtf8ScriptWithSplitCharacters) {
+  // Stream data where a multi-byte UTF-8 character is split between two data
+  // chunks.
+  const char* reference = "\xeb\x91\x80";
+  char chunk1[] =
+      "function foo() {\n"
+      "  // This function will contain an UTF-8 character which is not in\n"
+      "  // ASCII.\n"
+      "  var foobX";
+  char chunk2[] =
+      "XXr = 13;\n"
+      "  return foob\xeb\x91\x80r;\n"
+      "}\n";
+  chunk1[strlen(chunk1) - 1] = reference[0];
+  chunk2[0] = reference[1];
+  chunk2[1] = reference[2];
+  const char* chunks[] = {chunk1, chunk2, "foo();", NULL};
+  RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::UTF8);
+}
+
+
+TEST(StreamingUtf8ScriptWithSplitCharactersValidEdgeCases) {
+  // Tests edge cases which should still be decoded correctly.
+
+  // Case 1: a chunk contains only bytes for a split character (and no other
+  // data). This kind of a chunk would be exceptionally small, but we should
+  // still decode it correctly.
+  const char* reference = "\xeb\x91\x80";
+  // The small chunk is at the beginning of the split character
+  {
+    char chunk1[] =
+        "function foo() {\n"
+        "  // This function will contain an UTF-8 character which is not in\n"
+        "  // ASCII.\n"
+        "  var foob";
+    char chunk2[] = "XX";
+    char chunk3[] =
+        "Xr = 13;\n"
+        "  return foob\xeb\x91\x80r;\n"
+        "}\n";
+    chunk2[0] = reference[0];
+    chunk2[1] = reference[1];
+    chunk3[0] = reference[2];
+    const char* chunks[] = {chunk1, chunk2, chunk3, "foo();", NULL};
+    RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::UTF8);
+  }
+  // The small chunk is at the end of a character
+  {
+    char chunk1[] =
+        "function foo() {\n"
+        "  // This function will contain an UTF-8 character which is not in\n"
+        "  // ASCII.\n"
+        "  var foobX";
+    char chunk2[] = "XX";
+    char chunk3[] =
+        "r = 13;\n"
+        "  return foob\xeb\x91\x80r;\n"
+        "}\n";
+    chunk1[strlen(chunk1) - 1] = reference[0];
+    chunk2[0] = reference[1];
+    chunk2[1] = reference[2];
+    const char* chunks[] = {chunk1, chunk2, chunk3, "foo();", NULL};
+    RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::UTF8);
+  }
+  // Case 2: the script ends with a multi-byte character. Make sure that it's
+  // decoded correctly and not just ignored.
+  {
+    char chunk1[] =
+        "var foob\xeb\x91\x80 = 13;\n"
+        "foob\xeb\x91\x80";
+    const char* chunks[] = {chunk1, NULL};
+    RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::UTF8);
+  }
+}
+
+
+TEST(StreamingUtf8ScriptWithSplitCharactersInvalidEdgeCases) {
+  // Test cases where a UTF-8 character is split over several chunks. Those
+  // cases are not supported (the embedder should give the data in big enough
+  // chunks), but we shouldn't crash, just produce a parse error.
+  const char* reference = "\xeb\x91\x80";
+  char chunk1[] =
+      "function foo() {\n"
+      "  // This function will contain an UTF-8 character which is not in\n"
+      "  // ASCII.\n"
+      "  var foobX";
+  char chunk2[] = "X";
+  char chunk3[] =
+      "Xr = 13;\n"
+      "  return foob\xeb\x91\x80r;\n"
+      "}\n";
+  chunk1[strlen(chunk1) - 1] = reference[0];
+  chunk2[0] = reference[1];
+  chunk3[0] = reference[2];
+  const char* chunks[] = {chunk1, chunk2, chunk3, "foo();", NULL};
+
+  RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::UTF8, false);
+}
+
+
+TEST(StreamingProducesParserCache) {
+  i::FLAG_min_preparse_length = 0;
+  const char* chunks[] = {"function foo() { ret", "urn 13; } f", "oo(); ",
+                          NULL};
+
+  LocalContext env;
+  v8::Isolate* isolate = env->GetIsolate();
+  v8::HandleScope scope(isolate);
+
+  v8::ScriptCompiler::StreamedSource source(
+      new TestSourceStream(chunks),
+      v8::ScriptCompiler::StreamedSource::ONE_BYTE);
+  v8::ScriptCompiler::ScriptStreamingTask* task =
+      v8::ScriptCompiler::StartStreamingScript(
+          isolate, &source, v8::ScriptCompiler::kProduceParserCache);
+
+  // TestSourceStream::GetMoreData won't block, so it's OK to just run the
+  // task here in the main thread.
+  task->Run();
+  delete task;
+
+  const v8::ScriptCompiler::CachedData* cached_data = source.GetCachedData();
+  CHECK(cached_data != NULL);
+  CHECK(cached_data->data != NULL);
+  CHECK_GT(cached_data->length, 0);
+}
+
+
+TEST(StreamingScriptWithInvalidUtf8) {
+  // Regression test for a crash: test that invalid UTF-8 bytes in the end of a
+  // chunk don't produce a crash.
+  const char* reference = "\xeb\x91\x80\x80\x80";
+  char chunk1[] =
+      "function foo() {\n"
+      "  // This function will contain an UTF-8 character which is not in\n"
+      "  // ASCII.\n"
+      "  var foobXXXXX";  // Too many bytes which look like incomplete chars!
+  char chunk2[] =
+      "r = 13;\n"
+      "  return foob\xeb\x91\x80\x80\x80r;\n"
+      "}\n";
+  for (int i = 0; i < 5; ++i) chunk1[strlen(chunk1) - 5 + i] = reference[i];
+
+  const char* chunks[] = {chunk1, chunk2, "foo();", NULL};
+  RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::UTF8, false);
 }
diff --git a/test/cctest/test-assembler-arm.cc b/test/cctest/test-assembler-arm.cc
index b4f3b95..ed9563d 100644
--- a/test/cctest/test-assembler-arm.cc
+++ b/test/cctest/test-assembler-arm.cc
@@ -26,12 +26,13 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 #include "src/v8.h"
+#include "test/cctest/cctest.h"
 
+#include "src/arm/assembler-arm-inl.h"
+#include "src/arm/simulator-arm.h"
 #include "src/disassembler.h"
 #include "src/factory.h"
-#include "src/arm/simulator-arm.h"
-#include "src/arm/assembler-arm-inl.h"
-#include "test/cctest/cctest.h"
+#include "src/ostreams.h"
 
 using namespace v8::internal;
 
@@ -60,7 +61,8 @@
   Handle<Code> code = isolate->factory()->NewCode(
       desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
 #ifdef DEBUG
-  code->Print();
+  OFStream os(stdout);
+  code->Print(os);
 #endif
   F2 f = FUNCTION_CAST<F2>(code->entry());
   int res = reinterpret_cast<int>(CALL_GENERATED_CODE(f, 3, 4, 0, 0, 0));
@@ -95,7 +97,8 @@
   Handle<Code> code = isolate->factory()->NewCode(
       desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
 #ifdef DEBUG
-  code->Print();
+  OFStream os(stdout);
+  code->Print(os);
 #endif
   F1 f = FUNCTION_CAST<F1>(code->entry());
   int res = reinterpret_cast<int>(CALL_GENERATED_CODE(f, 100, 0, 0, 0, 0));
@@ -139,7 +142,8 @@
   Handle<Code> code = isolate->factory()->NewCode(
       desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
 #ifdef DEBUG
-  code->Print();
+  OFStream os(stdout);
+  code->Print(os);
 #endif
   F1 f = FUNCTION_CAST<F1>(code->entry());
   int res = reinterpret_cast<int>(CALL_GENERATED_CODE(f, 10, 0, 0, 0, 0));
@@ -185,7 +189,8 @@
   Handle<Code> code = isolate->factory()->NewCode(
       desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
 #ifdef DEBUG
-  code->Print();
+  OFStream os(stdout);
+  code->Print(os);
 #endif
   F3 f = FUNCTION_CAST<F3>(code->entry());
   t.i = 100000;
@@ -308,7 +313,8 @@
     Handle<Code> code = isolate->factory()->NewCode(
         desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
 #ifdef DEBUG
-    code->Print();
+    OFStream os(stdout);
+    code->Print(os);
 #endif
     F3 f = FUNCTION_CAST<F3>(code->entry());
     t.a = 1.5;
@@ -368,7 +374,8 @@
     Handle<Code> code = isolate->factory()->NewCode(
         desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
 #ifdef DEBUG
-    code->Print();
+    OFStream os(stdout);
+    code->Print(os);
 #endif
     F1 f = FUNCTION_CAST<F1>(code->entry());
     int res = reinterpret_cast<int>(
@@ -401,7 +408,8 @@
     Handle<Code> code = isolate->factory()->NewCode(
         desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
 #ifdef DEBUG
-    code->Print();
+    OFStream os(stdout);
+    code->Print(os);
 #endif
     F1 f = FUNCTION_CAST<F1>(code->entry());
     int res = reinterpret_cast<int>(
@@ -474,7 +482,8 @@
     Handle<Code> code = isolate->factory()->NewCode(
         desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
 #ifdef DEBUG
-    code->Print();
+    OFStream os(stdout);
+    code->Print(os);
 #endif
     F1 f = FUNCTION_CAST<F1>(code->entry());
     int res = reinterpret_cast<int>(
@@ -657,7 +666,8 @@
   Handle<Code> code = isolate->factory()->NewCode(
       desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
 #ifdef DEBUG
-  code->Print();
+  OFStream os(stdout);
+  code->Print(os);
 #endif
   F4 fn = FUNCTION_CAST<F4>(code->entry());
   d.a = 1.1;
@@ -766,7 +776,8 @@
   Handle<Code> code = isolate->factory()->NewCode(
       desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
 #ifdef DEBUG
-  code->Print();
+  OFStream os(stdout);
+  code->Print(os);
 #endif
   F4 fn = FUNCTION_CAST<F4>(code->entry());
   d.a = 1.1;
@@ -871,7 +882,8 @@
   Handle<Code> code = isolate->factory()->NewCode(
       desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
 #ifdef DEBUG
-  code->Print();
+  OFStream os(stdout);
+  code->Print(os);
 #endif
   F4 fn = FUNCTION_CAST<F4>(code->entry());
   d.a = 1.1;
@@ -965,7 +977,8 @@
   Handle<Code> code = isolate->factory()->NewCode(
       desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
 #ifdef DEBUG
-  code->Print();
+  OFStream os(stdout);
+  code->Print(os);
 #endif
   F3 f = FUNCTION_CAST<F3>(code->entry());
   Object* dummy = CALL_GENERATED_CODE(f, &i, 0, 0, 0, 0);
@@ -1092,7 +1105,8 @@
     Handle<Code> code = isolate->factory()->NewCode(
         desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
 #ifdef DEBUG
-    code->Print();
+    OFStream os(stdout);
+    code->Print(os);
 #endif
     F3 f = FUNCTION_CAST<F3>(code->entry());
     t.a = 1.5;
@@ -1164,10 +1178,11 @@
   Handle<Code> code = isolate->factory()->NewCode(
       desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
 #ifdef DEBUG
-  code->Print();
+  OFStream os(stdout);
+  code->Print(os);
 #endif
   F3 f = FUNCTION_CAST<F3>(code->entry());
-  t.left = BitCast<double>(kHoleNanInt64);
+  t.left = bit_cast<double>(kHoleNanInt64);
   t.right = 1;
   t.add_result = 0;
   t.sub_result = 0;
@@ -1180,18 +1195,22 @@
 #ifdef DEBUG
   const uint64_t kArmNanInt64 =
       (static_cast<uint64_t>(kArmNanUpper32) << 32) | kArmNanLower32;
-  ASSERT(kArmNanInt64 != kHoleNanInt64);
+  DCHECK(kArmNanInt64 != kHoleNanInt64);
 #endif
   // With VFP2 the sign of the canonicalized Nan is undefined. So
   // we remove the sign bit for the upper tests.
-  CHECK_EQ(kArmNanUpper32, (BitCast<int64_t>(t.add_result) >> 32) & 0x7fffffff);
-  CHECK_EQ(kArmNanLower32, BitCast<int64_t>(t.add_result) & 0xffffffffu);
-  CHECK_EQ(kArmNanUpper32, (BitCast<int64_t>(t.sub_result) >> 32) & 0x7fffffff);
-  CHECK_EQ(kArmNanLower32, BitCast<int64_t>(t.sub_result) & 0xffffffffu);
-  CHECK_EQ(kArmNanUpper32, (BitCast<int64_t>(t.mul_result) >> 32) & 0x7fffffff);
-  CHECK_EQ(kArmNanLower32, BitCast<int64_t>(t.mul_result) & 0xffffffffu);
-  CHECK_EQ(kArmNanUpper32, (BitCast<int64_t>(t.div_result) >> 32) & 0x7fffffff);
-  CHECK_EQ(kArmNanLower32, BitCast<int64_t>(t.div_result) & 0xffffffffu);
+  CHECK_EQ(kArmNanUpper32,
+           (bit_cast<int64_t>(t.add_result) >> 32) & 0x7fffffff);
+  CHECK_EQ(kArmNanLower32, bit_cast<int64_t>(t.add_result) & 0xffffffffu);
+  CHECK_EQ(kArmNanUpper32,
+           (bit_cast<int64_t>(t.sub_result) >> 32) & 0x7fffffff);
+  CHECK_EQ(kArmNanLower32, bit_cast<int64_t>(t.sub_result) & 0xffffffffu);
+  CHECK_EQ(kArmNanUpper32,
+           (bit_cast<int64_t>(t.mul_result) >> 32) & 0x7fffffff);
+  CHECK_EQ(kArmNanLower32, bit_cast<int64_t>(t.mul_result) & 0xffffffffu);
+  CHECK_EQ(kArmNanUpper32,
+           (bit_cast<int64_t>(t.div_result) >> 32) & 0x7fffffff);
+  CHECK_EQ(kArmNanLower32, bit_cast<int64_t>(t.div_result) & 0xffffffffu);
 }
 
 
@@ -1267,7 +1286,8 @@
     Handle<Code> code = isolate->factory()->NewCode(
         desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
 #ifdef DEBUG
-    code->Print();
+    OFStream os(stdout);
+    code->Print(os);
 #endif
     F3 f = FUNCTION_CAST<F3>(code->entry());
     t.src0 = 0x01020304;
@@ -1369,7 +1389,8 @@
   Handle<Code> code = isolate->factory()->NewCode(
       desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
 #ifdef DEBUG
-  code->Print();
+  OFStream os(stdout);
+  code->Print(os);
 #endif
   F3 f = FUNCTION_CAST<F3>(code->entry());
   t.src0 = 0x01020304;
@@ -1451,7 +1472,8 @@
     Handle<Code> code = isolate->factory()->NewCode(
         desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
 #ifdef DEBUG
-    code->Print();
+    OFStream os(stdout);
+    code->Print(os);
 #endif
     F3 f = FUNCTION_CAST<F3>(code->entry());
     Object* dummy;
diff --git a/test/cctest/test-assembler-arm64.cc b/test/cctest/test-assembler-arm64.cc
index db571a7..587a4ce 100644
--- a/test/cctest/test-assembler-arm64.cc
+++ b/test/cctest/test-assembler-arm64.cc
@@ -33,11 +33,11 @@
 
 #include "src/v8.h"
 
-#include "src/macro-assembler.h"
-#include "src/arm64/simulator-arm64.h"
 #include "src/arm64/decoder-arm64-inl.h"
 #include "src/arm64/disasm-arm64.h"
+#include "src/arm64/simulator-arm64.h"
 #include "src/arm64/utils-arm64.h"
+#include "src/macro-assembler.h"
 #include "test/cctest/cctest.h"
 #include "test/cctest/test-utils-arm64.h"
 
@@ -58,7 +58,7 @@
 //
 //     RUN();
 //
-//     ASSERT_EQUAL_64(1, x0);
+//     CHECK_EQUAL_64(1, x0);
 //
 //     TEARDOWN();
 //   }
@@ -74,22 +74,22 @@
 //
 // We provide some helper assert to handle common cases:
 //
-//   ASSERT_EQUAL_32(int32_t, int_32t)
-//   ASSERT_EQUAL_FP32(float, float)
-//   ASSERT_EQUAL_32(int32_t, W register)
-//   ASSERT_EQUAL_FP32(float, S register)
-//   ASSERT_EQUAL_64(int64_t, int_64t)
-//   ASSERT_EQUAL_FP64(double, double)
-//   ASSERT_EQUAL_64(int64_t, X register)
-//   ASSERT_EQUAL_64(X register, X register)
-//   ASSERT_EQUAL_FP64(double, D register)
+//   CHECK_EQUAL_32(int32_t, int_32t)
+//   CHECK_EQUAL_FP32(float, float)
+//   CHECK_EQUAL_32(int32_t, W register)
+//   CHECK_EQUAL_FP32(float, S register)
+//   CHECK_EQUAL_64(int64_t, int_64t)
+//   CHECK_EQUAL_FP64(double, double)
+//   CHECK_EQUAL_64(int64_t, X register)
+//   CHECK_EQUAL_64(X register, X register)
+//   CHECK_EQUAL_FP64(double, D register)
 //
-// e.g. ASSERT_EQUAL_64(0.5, d30);
+// e.g. CHECK_EQUAL_64(0.5, d30);
 //
 // If more advance computation is required before the assert then access the
 // RegisterDump named core directly:
 //
-//   ASSERT_EQUAL_64(0x1234, core.xreg(0) & 0xffff);
+//   CHECK_EQUAL_64(0x1234, core.xreg(0) & 0xffff);
 
 
 #if 0  // TODO(all): enable.
@@ -116,7 +116,7 @@
 #define SETUP_SIZE(buf_size)                    \
   Isolate* isolate = Isolate::Current();        \
   HandleScope scope(isolate);                   \
-  ASSERT(isolate != NULL);                      \
+  DCHECK(isolate != NULL);                      \
   byte* buf = new byte[buf_size];               \
   MacroAssembler masm(isolate, buf, buf_size);  \
   Decoder<DispatchingDecoderVisitor>* decoder = \
@@ -170,7 +170,7 @@
 #define SETUP_SIZE(buf_size)                                                   \
   Isolate* isolate = Isolate::Current();                                       \
   HandleScope scope(isolate);                                                  \
-  ASSERT(isolate != NULL);                                                     \
+  DCHECK(isolate != NULL);                                                     \
   byte* buf = new byte[buf_size];                                              \
   MacroAssembler masm(isolate, buf, buf_size);                                 \
   RegisterDump core;
@@ -190,12 +190,12 @@
   RESET();                                                                     \
   START_AFTER_RESET();
 
-#define RUN()                                                                  \
-  CPU::FlushICache(buf, masm.SizeOfGeneratedCode());                           \
-  {                                                                            \
-    void (*test_function)(void);                                               \
-    memcpy(&test_function, &buf, sizeof(buf));                                 \
-    test_function();                                                           \
+#define RUN()                                                \
+  CpuFeatures::FlushICache(buf, masm.SizeOfGeneratedCode()); \
+  {                                                          \
+    void (*test_function)(void);                             \
+    memcpy(&test_function, &buf, sizeof(buf));               \
+    test_function();                                         \
   }
 
 #define END()                                                                  \
@@ -209,29 +209,29 @@
 
 #endif  // ifdef USE_SIMULATOR.
 
-#define ASSERT_EQUAL_NZCV(expected)                                            \
+#define CHECK_EQUAL_NZCV(expected)                                            \
   CHECK(EqualNzcv(expected, core.flags_nzcv()))
 
-#define ASSERT_EQUAL_REGISTERS(expected)                                       \
+#define CHECK_EQUAL_REGISTERS(expected)                                       \
   CHECK(EqualRegisters(&expected, &core))
 
-#define ASSERT_EQUAL_32(expected, result)                                      \
+#define CHECK_EQUAL_32(expected, result)                                      \
   CHECK(Equal32(static_cast<uint32_t>(expected), &core, result))
 
-#define ASSERT_EQUAL_FP32(expected, result)                                    \
+#define CHECK_EQUAL_FP32(expected, result)                                    \
   CHECK(EqualFP32(expected, &core, result))
 
-#define ASSERT_EQUAL_64(expected, result)                                      \
+#define CHECK_EQUAL_64(expected, result)                                      \
   CHECK(Equal64(expected, &core, result))
 
-#define ASSERT_EQUAL_FP64(expected, result)                                    \
+#define CHECK_EQUAL_FP64(expected, result)                                    \
   CHECK(EqualFP64(expected, &core, result))
 
 #ifdef DEBUG
-#define ASSERT_LITERAL_POOL_SIZE(expected)                                     \
+#define DCHECK_LITERAL_POOL_SIZE(expected)                                     \
   CHECK((expected) == (__ LiteralPoolSize()))
 #else
-#define ASSERT_LITERAL_POOL_SIZE(expected)                                     \
+#define DCHECK_LITERAL_POOL_SIZE(expected)                                     \
   ((void) 0)
 #endif
 
@@ -276,12 +276,12 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(0x1000, x0);
-  ASSERT_EQUAL_64(0x1050, x1);
-  ASSERT_EQUAL_64(0x104f, x2);
-  ASSERT_EQUAL_64(0x1fff, x3);
-  ASSERT_EQUAL_64(0xfffffff8, x4);
-  ASSERT_EQUAL_64(0xfffffff8, x5);
+  CHECK_EQUAL_64(0x1000, x0);
+  CHECK_EQUAL_64(0x1050, x1);
+  CHECK_EQUAL_64(0x104f, x2);
+  CHECK_EQUAL_64(0x1fff, x3);
+  CHECK_EQUAL_64(0xfffffff8, x4);
+  CHECK_EQUAL_64(0xfffffff8, x5);
 
   TEARDOWN();
 }
@@ -312,22 +312,22 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(0xfffff000, x0);
-  ASSERT_EQUAL_64(0xfffffffffffff000UL, x1);
-  ASSERT_EQUAL_64(0x00001fff, x2);
-  ASSERT_EQUAL_64(0x0000000000003fffUL, x3);
-  ASSERT_EQUAL_64(0xe00001ff, x4);
-  ASSERT_EQUAL_64(0xf0000000000000ffUL, x5);
-  ASSERT_EQUAL_64(0x00000001, x6);
-  ASSERT_EQUAL_64(0x0, x7);
-  ASSERT_EQUAL_64(0x7ff80000, x8);
-  ASSERT_EQUAL_64(0x3ffc000000000000UL, x9);
-  ASSERT_EQUAL_64(0xffffff00, x10);
-  ASSERT_EQUAL_64(0x0000000000000001UL, x11);
-  ASSERT_EQUAL_64(0xffff8003, x12);
-  ASSERT_EQUAL_64(0xffffffffffff0007UL, x13);
-  ASSERT_EQUAL_64(0xfffffffffffe000fUL, x14);
-  ASSERT_EQUAL_64(0xfffffffffffe000fUL, x15);
+  CHECK_EQUAL_64(0xfffff000, x0);
+  CHECK_EQUAL_64(0xfffffffffffff000UL, x1);
+  CHECK_EQUAL_64(0x00001fff, x2);
+  CHECK_EQUAL_64(0x0000000000003fffUL, x3);
+  CHECK_EQUAL_64(0xe00001ff, x4);
+  CHECK_EQUAL_64(0xf0000000000000ffUL, x5);
+  CHECK_EQUAL_64(0x00000001, x6);
+  CHECK_EQUAL_64(0x0, x7);
+  CHECK_EQUAL_64(0x7ff80000, x8);
+  CHECK_EQUAL_64(0x3ffc000000000000UL, x9);
+  CHECK_EQUAL_64(0xffffff00, x10);
+  CHECK_EQUAL_64(0x0000000000000001UL, x11);
+  CHECK_EQUAL_64(0xffff8003, x12);
+  CHECK_EQUAL_64(0xffffffffffff0007UL, x13);
+  CHECK_EQUAL_64(0xfffffffffffe000fUL, x14);
+  CHECK_EQUAL_64(0xfffffffffffe000fUL, x15);
 
   TEARDOWN();
 }
@@ -384,31 +384,31 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(0x0123456789abcdefL, x0);
-  ASSERT_EQUAL_64(0x00000000abcd0000L, x1);
-  ASSERT_EQUAL_64(0xffffabcdffffffffL, x2);
-  ASSERT_EQUAL_64(0x5432ffffffffffffL, x3);
-  ASSERT_EQUAL_64(x4, x5);
-  ASSERT_EQUAL_32(-1, w6);
-  ASSERT_EQUAL_64(0x0123456789abcdefL, x7);
-  ASSERT_EQUAL_32(0x89abcdefL, w8);
-  ASSERT_EQUAL_64(0x0123456789abcdefL, x9);
-  ASSERT_EQUAL_32(0x89abcdefL, w10);
-  ASSERT_EQUAL_64(0x00000fff, x11);
-  ASSERT_EQUAL_64(0x0000000000000fffUL, x12);
-  ASSERT_EQUAL_64(0x00001ffe, x13);
-  ASSERT_EQUAL_64(0x0000000000003ffcUL, x14);
-  ASSERT_EQUAL_64(0x000001ff, x15);
-  ASSERT_EQUAL_64(0x00000000000000ffUL, x18);
-  ASSERT_EQUAL_64(0x00000001, x19);
-  ASSERT_EQUAL_64(0x0, x20);
-  ASSERT_EQUAL_64(0x7ff80000, x21);
-  ASSERT_EQUAL_64(0x3ffc000000000000UL, x22);
-  ASSERT_EQUAL_64(0x000000fe, x23);
-  ASSERT_EQUAL_64(0xfffffffffffffffcUL, x24);
-  ASSERT_EQUAL_64(0x00007ff8, x25);
-  ASSERT_EQUAL_64(0x000000000000fff0UL, x26);
-  ASSERT_EQUAL_64(0x000000000001ffe0UL, x27);
+  CHECK_EQUAL_64(0x0123456789abcdefL, x0);
+  CHECK_EQUAL_64(0x00000000abcd0000L, x1);
+  CHECK_EQUAL_64(0xffffabcdffffffffL, x2);
+  CHECK_EQUAL_64(0x5432ffffffffffffL, x3);
+  CHECK_EQUAL_64(x4, x5);
+  CHECK_EQUAL_32(-1, w6);
+  CHECK_EQUAL_64(0x0123456789abcdefL, x7);
+  CHECK_EQUAL_32(0x89abcdefL, w8);
+  CHECK_EQUAL_64(0x0123456789abcdefL, x9);
+  CHECK_EQUAL_32(0x89abcdefL, w10);
+  CHECK_EQUAL_64(0x00000fff, x11);
+  CHECK_EQUAL_64(0x0000000000000fffUL, x12);
+  CHECK_EQUAL_64(0x00001ffe, x13);
+  CHECK_EQUAL_64(0x0000000000003ffcUL, x14);
+  CHECK_EQUAL_64(0x000001ff, x15);
+  CHECK_EQUAL_64(0x00000000000000ffUL, x18);
+  CHECK_EQUAL_64(0x00000001, x19);
+  CHECK_EQUAL_64(0x0, x20);
+  CHECK_EQUAL_64(0x7ff80000, x21);
+  CHECK_EQUAL_64(0x3ffc000000000000UL, x22);
+  CHECK_EQUAL_64(0x000000fe, x23);
+  CHECK_EQUAL_64(0xfffffffffffffffcUL, x24);
+  CHECK_EQUAL_64(0x00007ff8, x25);
+  CHECK_EQUAL_64(0x000000000000fff0UL, x26);
+  CHECK_EQUAL_64(0x000000000001ffe0UL, x27);
 
   TEARDOWN();
 }
@@ -426,17 +426,23 @@
   __ Mov(w4, 0x00001234L);
   __ Mov(w5, 0x12340000L);
   __ Mov(w6, 0x12345678L);
+  __ Mov(w7, (int32_t)0x80000000);
+  __ Mov(w8, (int32_t)0xffff0000);
+  __ Mov(w9, kWMinInt);
   END();
 
   RUN();
 
-  ASSERT_EQUAL_64(0xffffffffL, x0);
-  ASSERT_EQUAL_64(0xffff1234L, x1);
-  ASSERT_EQUAL_64(0x1234ffffL, x2);
-  ASSERT_EQUAL_64(0x00000000L, x3);
-  ASSERT_EQUAL_64(0x00001234L, x4);
-  ASSERT_EQUAL_64(0x12340000L, x5);
-  ASSERT_EQUAL_64(0x12345678L, x6);
+  CHECK_EQUAL_64(0xffffffffL, x0);
+  CHECK_EQUAL_64(0xffff1234L, x1);
+  CHECK_EQUAL_64(0x1234ffffL, x2);
+  CHECK_EQUAL_64(0x00000000L, x3);
+  CHECK_EQUAL_64(0x00001234L, x4);
+  CHECK_EQUAL_64(0x12340000L, x5);
+  CHECK_EQUAL_64(0x12345678L, x6);
+  CHECK_EQUAL_64(0x80000000L, x7);
+  CHECK_EQUAL_64(0xffff0000L, x8);
+  CHECK_EQUAL_32(kWMinInt, w9);
 
   TEARDOWN();
 }
@@ -478,32 +484,32 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(0xffffffffffff1234L, x1);
-  ASSERT_EQUAL_64(0xffffffff12345678L, x2);
-  ASSERT_EQUAL_64(0xffff1234ffff5678L, x3);
-  ASSERT_EQUAL_64(0x1234ffffffff5678L, x4);
-  ASSERT_EQUAL_64(0x1234ffff5678ffffL, x5);
-  ASSERT_EQUAL_64(0x12345678ffffffffL, x6);
-  ASSERT_EQUAL_64(0x1234ffffffffffffL, x7);
-  ASSERT_EQUAL_64(0x123456789abcffffL, x8);
-  ASSERT_EQUAL_64(0x12345678ffff9abcL, x9);
-  ASSERT_EQUAL_64(0x1234ffff56789abcL, x10);
-  ASSERT_EQUAL_64(0xffff123456789abcL, x11);
-  ASSERT_EQUAL_64(0x0000000000000000L, x12);
-  ASSERT_EQUAL_64(0x0000000000001234L, x13);
-  ASSERT_EQUAL_64(0x0000000012345678L, x14);
-  ASSERT_EQUAL_64(0x0000123400005678L, x15);
-  ASSERT_EQUAL_64(0x1234000000005678L, x18);
-  ASSERT_EQUAL_64(0x1234000056780000L, x19);
-  ASSERT_EQUAL_64(0x1234567800000000L, x20);
-  ASSERT_EQUAL_64(0x1234000000000000L, x21);
-  ASSERT_EQUAL_64(0x123456789abc0000L, x22);
-  ASSERT_EQUAL_64(0x1234567800009abcL, x23);
-  ASSERT_EQUAL_64(0x1234000056789abcL, x24);
-  ASSERT_EQUAL_64(0x0000123456789abcL, x25);
-  ASSERT_EQUAL_64(0x123456789abcdef0L, x26);
-  ASSERT_EQUAL_64(0xffff000000000001L, x27);
-  ASSERT_EQUAL_64(0x8000ffff00000000L, x28);
+  CHECK_EQUAL_64(0xffffffffffff1234L, x1);
+  CHECK_EQUAL_64(0xffffffff12345678L, x2);
+  CHECK_EQUAL_64(0xffff1234ffff5678L, x3);
+  CHECK_EQUAL_64(0x1234ffffffff5678L, x4);
+  CHECK_EQUAL_64(0x1234ffff5678ffffL, x5);
+  CHECK_EQUAL_64(0x12345678ffffffffL, x6);
+  CHECK_EQUAL_64(0x1234ffffffffffffL, x7);
+  CHECK_EQUAL_64(0x123456789abcffffL, x8);
+  CHECK_EQUAL_64(0x12345678ffff9abcL, x9);
+  CHECK_EQUAL_64(0x1234ffff56789abcL, x10);
+  CHECK_EQUAL_64(0xffff123456789abcL, x11);
+  CHECK_EQUAL_64(0x0000000000000000L, x12);
+  CHECK_EQUAL_64(0x0000000000001234L, x13);
+  CHECK_EQUAL_64(0x0000000012345678L, x14);
+  CHECK_EQUAL_64(0x0000123400005678L, x15);
+  CHECK_EQUAL_64(0x1234000000005678L, x18);
+  CHECK_EQUAL_64(0x1234000056780000L, x19);
+  CHECK_EQUAL_64(0x1234567800000000L, x20);
+  CHECK_EQUAL_64(0x1234000000000000L, x21);
+  CHECK_EQUAL_64(0x123456789abc0000L, x22);
+  CHECK_EQUAL_64(0x1234567800009abcL, x23);
+  CHECK_EQUAL_64(0x1234000056789abcL, x24);
+  CHECK_EQUAL_64(0x0000123456789abcL, x25);
+  CHECK_EQUAL_64(0x123456789abcdef0L, x26);
+  CHECK_EQUAL_64(0xffff000000000001L, x27);
+  CHECK_EQUAL_64(0x8000ffff00000000L, x28);
 
   TEARDOWN();
 }
@@ -531,16 +537,16 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(0xf000f0ff, x2);
-  ASSERT_EQUAL_64(0xf000f0f0, x3);
-  ASSERT_EQUAL_64(0xf00000ff0000f0f0L, x4);
-  ASSERT_EQUAL_64(0x0f00f0ff, x5);
-  ASSERT_EQUAL_64(0xff00f0ff, x6);
-  ASSERT_EQUAL_64(0x0f00f0ff, x7);
-  ASSERT_EQUAL_64(0x0ffff0f0, x8);
-  ASSERT_EQUAL_64(0x0ff00000000ff0f0L, x9);
-  ASSERT_EQUAL_64(0xf0ff, x10);
-  ASSERT_EQUAL_64(0xf0000000f000f0f0L, x11);
+  CHECK_EQUAL_64(0xf000f0ff, x2);
+  CHECK_EQUAL_64(0xf000f0f0, x3);
+  CHECK_EQUAL_64(0xf00000ff0000f0f0L, x4);
+  CHECK_EQUAL_64(0x0f00f0ff, x5);
+  CHECK_EQUAL_64(0xff00f0ff, x6);
+  CHECK_EQUAL_64(0x0f00f0ff, x7);
+  CHECK_EQUAL_64(0x0ffff0f0, x8);
+  CHECK_EQUAL_64(0x0ff00000000ff0f0L, x9);
+  CHECK_EQUAL_64(0xf0ff, x10);
+  CHECK_EQUAL_64(0xf0000000f000f0f0L, x11);
 
   TEARDOWN();
 }
@@ -565,14 +571,14 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(0x00000081, x6);
-  ASSERT_EQUAL_64(0x00010101, x7);
-  ASSERT_EQUAL_64(0x00020201, x8);
-  ASSERT_EQUAL_64(0x0000000400040401UL, x9);
-  ASSERT_EQUAL_64(0x00000000ffffff81UL, x10);
-  ASSERT_EQUAL_64(0xffffffffffff0101UL, x11);
-  ASSERT_EQUAL_64(0xfffffffe00020201UL, x12);
-  ASSERT_EQUAL_64(0x0000000400040401UL, x13);
+  CHECK_EQUAL_64(0x00000081, x6);
+  CHECK_EQUAL_64(0x00010101, x7);
+  CHECK_EQUAL_64(0x00020201, x8);
+  CHECK_EQUAL_64(0x0000000400040401UL, x9);
+  CHECK_EQUAL_64(0x00000000ffffff81UL, x10);
+  CHECK_EQUAL_64(0xffffffffffff0101UL, x11);
+  CHECK_EQUAL_64(0xfffffffe00020201UL, x12);
+  CHECK_EQUAL_64(0x0000000400040401UL, x13);
 
   TEARDOWN();
 }
@@ -588,14 +594,19 @@
 
   __ Orr(x10, x0, Operand(0x1234567890abcdefUL));
   __ Orr(w11, w1, Operand(0x90abcdef));
+
+  __ Orr(w12, w0, kWMinInt);
+  __ Eor(w13, w0, kWMinInt);
   END();
 
   RUN();
 
-  ASSERT_EQUAL_64(0, x0);
-  ASSERT_EQUAL_64(0xf0f0f0f0f0f0f0f0UL, x1);
-  ASSERT_EQUAL_64(0x1234567890abcdefUL, x10);
-  ASSERT_EQUAL_64(0xf0fbfdffUL, x11);
+  CHECK_EQUAL_64(0, x0);
+  CHECK_EQUAL_64(0xf0f0f0f0f0f0f0f0UL, x1);
+  CHECK_EQUAL_64(0x1234567890abcdefUL, x10);
+  CHECK_EQUAL_64(0xf0fbfdffUL, x11);
+  CHECK_EQUAL_32(kWMinInt, w12);
+  CHECK_EQUAL_32(kWMinInt, w13);
 
   TEARDOWN();
 }
@@ -623,16 +634,16 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(0xffffffff0ffffff0L, x2);
-  ASSERT_EQUAL_64(0xfffff0ff, x3);
-  ASSERT_EQUAL_64(0xfffffff0fffff0ffL, x4);
-  ASSERT_EQUAL_64(0xffffffff87fffff0L, x5);
-  ASSERT_EQUAL_64(0x07fffff0, x6);
-  ASSERT_EQUAL_64(0xffffffff87fffff0L, x7);
-  ASSERT_EQUAL_64(0xff00ffff, x8);
-  ASSERT_EQUAL_64(0xff00ffffffffffffL, x9);
-  ASSERT_EQUAL_64(0xfffff0f0, x10);
-  ASSERT_EQUAL_64(0xffff0000fffff0f0L, x11);
+  CHECK_EQUAL_64(0xffffffff0ffffff0L, x2);
+  CHECK_EQUAL_64(0xfffff0ff, x3);
+  CHECK_EQUAL_64(0xfffffff0fffff0ffL, x4);
+  CHECK_EQUAL_64(0xffffffff87fffff0L, x5);
+  CHECK_EQUAL_64(0x07fffff0, x6);
+  CHECK_EQUAL_64(0xffffffff87fffff0L, x7);
+  CHECK_EQUAL_64(0xff00ffff, x8);
+  CHECK_EQUAL_64(0xff00ffffffffffffL, x9);
+  CHECK_EQUAL_64(0xfffff0f0, x10);
+  CHECK_EQUAL_64(0xffff0000fffff0f0L, x11);
 
   TEARDOWN();
 }
@@ -657,14 +668,14 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(0xffffff7f, x6);
-  ASSERT_EQUAL_64(0xfffffffffffefefdUL, x7);
-  ASSERT_EQUAL_64(0xfffdfdfb, x8);
-  ASSERT_EQUAL_64(0xfffffffbfffbfbf7UL, x9);
-  ASSERT_EQUAL_64(0x0000007f, x10);
-  ASSERT_EQUAL_64(0x0000fefd, x11);
-  ASSERT_EQUAL_64(0x00000001fffdfdfbUL, x12);
-  ASSERT_EQUAL_64(0xfffffffbfffbfbf7UL, x13);
+  CHECK_EQUAL_64(0xffffff7f, x6);
+  CHECK_EQUAL_64(0xfffffffffffefefdUL, x7);
+  CHECK_EQUAL_64(0xfffdfdfb, x8);
+  CHECK_EQUAL_64(0xfffffffbfffbfbf7UL, x9);
+  CHECK_EQUAL_64(0x0000007f, x10);
+  CHECK_EQUAL_64(0x0000fefd, x11);
+  CHECK_EQUAL_64(0x00000001fffdfdfbUL, x12);
+  CHECK_EQUAL_64(0xfffffffbfffbfbf7UL, x13);
 
   TEARDOWN();
 }
@@ -692,16 +703,16 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(0x000000f0, x2);
-  ASSERT_EQUAL_64(0x00000ff0, x3);
-  ASSERT_EQUAL_64(0x00000ff0, x4);
-  ASSERT_EQUAL_64(0x00000070, x5);
-  ASSERT_EQUAL_64(0x0000ff00, x6);
-  ASSERT_EQUAL_64(0x00000f00, x7);
-  ASSERT_EQUAL_64(0x00000ff0, x8);
-  ASSERT_EQUAL_64(0x00000000, x9);
-  ASSERT_EQUAL_64(0x0000ff00, x10);
-  ASSERT_EQUAL_64(0x000000f0, x11);
+  CHECK_EQUAL_64(0x000000f0, x2);
+  CHECK_EQUAL_64(0x00000ff0, x3);
+  CHECK_EQUAL_64(0x00000ff0, x4);
+  CHECK_EQUAL_64(0x00000070, x5);
+  CHECK_EQUAL_64(0x0000ff00, x6);
+  CHECK_EQUAL_64(0x00000f00, x7);
+  CHECK_EQUAL_64(0x00000ff0, x8);
+  CHECK_EQUAL_64(0x00000000, x9);
+  CHECK_EQUAL_64(0x0000ff00, x10);
+  CHECK_EQUAL_64(0x000000f0, x11);
 
   TEARDOWN();
 }
@@ -726,14 +737,14 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(0x00000081, x6);
-  ASSERT_EQUAL_64(0x00010102, x7);
-  ASSERT_EQUAL_64(0x00020204, x8);
-  ASSERT_EQUAL_64(0x0000000400040408UL, x9);
-  ASSERT_EQUAL_64(0xffffff81, x10);
-  ASSERT_EQUAL_64(0xffffffffffff0102UL, x11);
-  ASSERT_EQUAL_64(0xfffffffe00020204UL, x12);
-  ASSERT_EQUAL_64(0x0000000400040408UL, x13);
+  CHECK_EQUAL_64(0x00000081, x6);
+  CHECK_EQUAL_64(0x00010102, x7);
+  CHECK_EQUAL_64(0x00020204, x8);
+  CHECK_EQUAL_64(0x0000000400040408UL, x9);
+  CHECK_EQUAL_64(0xffffff81, x10);
+  CHECK_EQUAL_64(0xffffffffffff0102UL, x11);
+  CHECK_EQUAL_64(0xfffffffe00020204UL, x12);
+  CHECK_EQUAL_64(0x0000000400040408UL, x13);
 
   TEARDOWN();
 }
@@ -750,8 +761,8 @@
 
   RUN();
 
-  ASSERT_EQUAL_NZCV(NFlag);
-  ASSERT_EQUAL_64(0xf00000ff, x0);
+  CHECK_EQUAL_NZCV(NFlag);
+  CHECK_EQUAL_64(0xf00000ff, x0);
 
   START();
   __ Mov(x0, 0xfff0);
@@ -761,8 +772,8 @@
 
   RUN();
 
-  ASSERT_EQUAL_NZCV(ZFlag);
-  ASSERT_EQUAL_64(0x00000000, x0);
+  CHECK_EQUAL_NZCV(ZFlag);
+  CHECK_EQUAL_64(0x00000000, x0);
 
   START();
   __ Mov(x0, 0x8000000000000000L);
@@ -772,8 +783,8 @@
 
   RUN();
 
-  ASSERT_EQUAL_NZCV(NFlag);
-  ASSERT_EQUAL_64(0x8000000000000000L, x0);
+  CHECK_EQUAL_NZCV(NFlag);
+  CHECK_EQUAL_64(0x8000000000000000L, x0);
 
   START();
   __ Mov(x0, 0xfff0);
@@ -782,8 +793,8 @@
 
   RUN();
 
-  ASSERT_EQUAL_NZCV(ZFlag);
-  ASSERT_EQUAL_64(0x00000000, x0);
+  CHECK_EQUAL_NZCV(ZFlag);
+  CHECK_EQUAL_64(0x00000000, x0);
 
   START();
   __ Mov(x0, 0xff000000);
@@ -792,8 +803,8 @@
 
   RUN();
 
-  ASSERT_EQUAL_NZCV(NFlag);
-  ASSERT_EQUAL_64(0x80000000, x0);
+  CHECK_EQUAL_NZCV(NFlag);
+  CHECK_EQUAL_64(0x80000000, x0);
 
   TEARDOWN();
 }
@@ -831,18 +842,18 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(0x0000ff00, x2);
-  ASSERT_EQUAL_64(0x0000f000, x3);
-  ASSERT_EQUAL_64(0x0000f000, x4);
-  ASSERT_EQUAL_64(0x0000ff80, x5);
-  ASSERT_EQUAL_64(0x000000f0, x6);
-  ASSERT_EQUAL_64(0x0000f0f0, x7);
-  ASSERT_EQUAL_64(0x0000f000, x8);
-  ASSERT_EQUAL_64(0x0000ff00, x9);
-  ASSERT_EQUAL_64(0x0000ffe0, x10);
-  ASSERT_EQUAL_64(0x0000fef0, x11);
+  CHECK_EQUAL_64(0x0000ff00, x2);
+  CHECK_EQUAL_64(0x0000f000, x3);
+  CHECK_EQUAL_64(0x0000f000, x4);
+  CHECK_EQUAL_64(0x0000ff80, x5);
+  CHECK_EQUAL_64(0x000000f0, x6);
+  CHECK_EQUAL_64(0x0000f0f0, x7);
+  CHECK_EQUAL_64(0x0000f000, x8);
+  CHECK_EQUAL_64(0x0000ff00, x9);
+  CHECK_EQUAL_64(0x0000ffe0, x10);
+  CHECK_EQUAL_64(0x0000fef0, x11);
 
-  ASSERT_EQUAL_64(0x543210, x21);
+  CHECK_EQUAL_64(0x543210, x21);
 
   TEARDOWN();
 }
@@ -867,14 +878,14 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(0xffffff7e, x6);
-  ASSERT_EQUAL_64(0xfffffffffffefefdUL, x7);
-  ASSERT_EQUAL_64(0xfffdfdfb, x8);
-  ASSERT_EQUAL_64(0xfffffffbfffbfbf7UL, x9);
-  ASSERT_EQUAL_64(0x0000007e, x10);
-  ASSERT_EQUAL_64(0x0000fefd, x11);
-  ASSERT_EQUAL_64(0x00000001fffdfdfbUL, x12);
-  ASSERT_EQUAL_64(0xfffffffbfffbfbf7UL, x13);
+  CHECK_EQUAL_64(0xffffff7e, x6);
+  CHECK_EQUAL_64(0xfffffffffffefefdUL, x7);
+  CHECK_EQUAL_64(0xfffdfdfb, x8);
+  CHECK_EQUAL_64(0xfffffffbfffbfbf7UL, x9);
+  CHECK_EQUAL_64(0x0000007e, x10);
+  CHECK_EQUAL_64(0x0000fefd, x11);
+  CHECK_EQUAL_64(0x00000001fffdfdfbUL, x12);
+  CHECK_EQUAL_64(0xfffffffbfffbfbf7UL, x13);
 
   TEARDOWN();
 }
@@ -891,8 +902,8 @@
 
   RUN();
 
-  ASSERT_EQUAL_NZCV(ZFlag);
-  ASSERT_EQUAL_64(0x00000000, x0);
+  CHECK_EQUAL_NZCV(ZFlag);
+  CHECK_EQUAL_64(0x00000000, x0);
 
   START();
   __ Mov(x0, 0xffffffff);
@@ -901,8 +912,8 @@
 
   RUN();
 
-  ASSERT_EQUAL_NZCV(NFlag);
-  ASSERT_EQUAL_64(0x80000000, x0);
+  CHECK_EQUAL_NZCV(NFlag);
+  CHECK_EQUAL_64(0x80000000, x0);
 
   START();
   __ Mov(x0, 0x8000000000000000L);
@@ -912,8 +923,8 @@
 
   RUN();
 
-  ASSERT_EQUAL_NZCV(ZFlag);
-  ASSERT_EQUAL_64(0x00000000, x0);
+  CHECK_EQUAL_NZCV(ZFlag);
+  CHECK_EQUAL_64(0x00000000, x0);
 
   START();
   __ Mov(x0, 0xffffffffffffffffL);
@@ -922,8 +933,8 @@
 
   RUN();
 
-  ASSERT_EQUAL_NZCV(NFlag);
-  ASSERT_EQUAL_64(0x8000000000000000L, x0);
+  CHECK_EQUAL_NZCV(NFlag);
+  CHECK_EQUAL_64(0x8000000000000000L, x0);
 
   START();
   __ Mov(w0, 0xffff0000);
@@ -932,8 +943,8 @@
 
   RUN();
 
-  ASSERT_EQUAL_NZCV(ZFlag);
-  ASSERT_EQUAL_64(0x00000000, x0);
+  CHECK_EQUAL_NZCV(ZFlag);
+  CHECK_EQUAL_64(0x00000000, x0);
 
   TEARDOWN();
 }
@@ -961,16 +972,16 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(0xf000ff0f, x2);
-  ASSERT_EQUAL_64(0x0000f000, x3);
-  ASSERT_EQUAL_64(0x0000000f0000f000L, x4);
-  ASSERT_EQUAL_64(0x7800ff8f, x5);
-  ASSERT_EQUAL_64(0xffff00f0, x6);
-  ASSERT_EQUAL_64(0x0000f0f0, x7);
-  ASSERT_EQUAL_64(0x0000f00f, x8);
-  ASSERT_EQUAL_64(0x00000ff00000ffffL, x9);
-  ASSERT_EQUAL_64(0xff0000f0, x10);
-  ASSERT_EQUAL_64(0xff00ff00ff0000f0L, x11);
+  CHECK_EQUAL_64(0xf000ff0f, x2);
+  CHECK_EQUAL_64(0x0000f000, x3);
+  CHECK_EQUAL_64(0x0000000f0000f000L, x4);
+  CHECK_EQUAL_64(0x7800ff8f, x5);
+  CHECK_EQUAL_64(0xffff00f0, x6);
+  CHECK_EQUAL_64(0x0000f0f0, x7);
+  CHECK_EQUAL_64(0x0000f00f, x8);
+  CHECK_EQUAL_64(0x00000ff00000ffffL, x9);
+  CHECK_EQUAL_64(0xff0000f0, x10);
+  CHECK_EQUAL_64(0xff00ff00ff0000f0L, x11);
 
   TEARDOWN();
 }
@@ -995,14 +1006,14 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(0x11111190, x6);
-  ASSERT_EQUAL_64(0x1111111111101013UL, x7);
-  ASSERT_EQUAL_64(0x11131315, x8);
-  ASSERT_EQUAL_64(0x1111111511151519UL, x9);
-  ASSERT_EQUAL_64(0xeeeeee90, x10);
-  ASSERT_EQUAL_64(0xeeeeeeeeeeee1013UL, x11);
-  ASSERT_EQUAL_64(0xeeeeeeef11131315UL, x12);
-  ASSERT_EQUAL_64(0x1111111511151519UL, x13);
+  CHECK_EQUAL_64(0x11111190, x6);
+  CHECK_EQUAL_64(0x1111111111101013UL, x7);
+  CHECK_EQUAL_64(0x11131315, x8);
+  CHECK_EQUAL_64(0x1111111511151519UL, x9);
+  CHECK_EQUAL_64(0xeeeeee90, x10);
+  CHECK_EQUAL_64(0xeeeeeeeeeeee1013UL, x11);
+  CHECK_EQUAL_64(0xeeeeeeef11131315UL, x12);
+  CHECK_EQUAL_64(0x1111111511151519UL, x13);
 
   TEARDOWN();
 }
@@ -1030,16 +1041,16 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(0xffffffff0fff00f0L, x2);
-  ASSERT_EQUAL_64(0xffff0fff, x3);
-  ASSERT_EQUAL_64(0xfffffff0ffff0fffL, x4);
-  ASSERT_EQUAL_64(0xffffffff87ff0070L, x5);
-  ASSERT_EQUAL_64(0x0000ff0f, x6);
-  ASSERT_EQUAL_64(0xffffffffffff0f0fL, x7);
-  ASSERT_EQUAL_64(0xffff0ff0, x8);
-  ASSERT_EQUAL_64(0xfffff00fffff0000L, x9);
-  ASSERT_EQUAL_64(0xfc3f03cf, x10);
-  ASSERT_EQUAL_64(0xffffefffffff100fL, x11);
+  CHECK_EQUAL_64(0xffffffff0fff00f0L, x2);
+  CHECK_EQUAL_64(0xffff0fff, x3);
+  CHECK_EQUAL_64(0xfffffff0ffff0fffL, x4);
+  CHECK_EQUAL_64(0xffffffff87ff0070L, x5);
+  CHECK_EQUAL_64(0x0000ff0f, x6);
+  CHECK_EQUAL_64(0xffffffffffff0f0fL, x7);
+  CHECK_EQUAL_64(0xffff0ff0, x8);
+  CHECK_EQUAL_64(0xfffff00fffff0000L, x9);
+  CHECK_EQUAL_64(0xfc3f03cf, x10);
+  CHECK_EQUAL_64(0xffffefffffff100fL, x11);
 
   TEARDOWN();
 }
@@ -1064,14 +1075,14 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(0xeeeeee6f, x6);
-  ASSERT_EQUAL_64(0xeeeeeeeeeeefefecUL, x7);
-  ASSERT_EQUAL_64(0xeeececea, x8);
-  ASSERT_EQUAL_64(0xeeeeeeeaeeeaeae6UL, x9);
-  ASSERT_EQUAL_64(0x1111116f, x10);
-  ASSERT_EQUAL_64(0x111111111111efecUL, x11);
-  ASSERT_EQUAL_64(0x11111110eeececeaUL, x12);
-  ASSERT_EQUAL_64(0xeeeeeeeaeeeaeae6UL, x13);
+  CHECK_EQUAL_64(0xeeeeee6f, x6);
+  CHECK_EQUAL_64(0xeeeeeeeeeeefefecUL, x7);
+  CHECK_EQUAL_64(0xeeececea, x8);
+  CHECK_EQUAL_64(0xeeeeeeeaeeeaeae6UL, x9);
+  CHECK_EQUAL_64(0x1111116f, x10);
+  CHECK_EQUAL_64(0x111111111111efecUL, x11);
+  CHECK_EQUAL_64(0x11111110eeececeaUL, x12);
+  CHECK_EQUAL_64(0xeeeeeeeaeeeaeae6UL, x13);
 
   TEARDOWN();
 }
@@ -1110,25 +1121,25 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(0, x0);
-  ASSERT_EQUAL_64(0, x1);
-  ASSERT_EQUAL_64(0xffffffff, x2);
-  ASSERT_EQUAL_64(1, x3);
-  ASSERT_EQUAL_64(0, x4);
-  ASSERT_EQUAL_64(0xffffffff, x5);
-  ASSERT_EQUAL_64(0xffffffff00000001UL, x6);
-  ASSERT_EQUAL_64(1, x7);
-  ASSERT_EQUAL_64(0xffffffffffffffffUL, x8);
-  ASSERT_EQUAL_64(1, x9);
-  ASSERT_EQUAL_64(1, x10);
-  ASSERT_EQUAL_64(0, x11);
-  ASSERT_EQUAL_64(0, x12);
-  ASSERT_EQUAL_64(1, x13);
-  ASSERT_EQUAL_64(0xffffffff, x14);
-  ASSERT_EQUAL_64(0, x20);
-  ASSERT_EQUAL_64(0xffffffff00000001UL, x21);
-  ASSERT_EQUAL_64(0xffffffff, x22);
-  ASSERT_EQUAL_64(0xffffffffffffffffUL, x23);
+  CHECK_EQUAL_64(0, x0);
+  CHECK_EQUAL_64(0, x1);
+  CHECK_EQUAL_64(0xffffffff, x2);
+  CHECK_EQUAL_64(1, x3);
+  CHECK_EQUAL_64(0, x4);
+  CHECK_EQUAL_64(0xffffffff, x5);
+  CHECK_EQUAL_64(0xffffffff00000001UL, x6);
+  CHECK_EQUAL_64(1, x7);
+  CHECK_EQUAL_64(0xffffffffffffffffUL, x8);
+  CHECK_EQUAL_64(1, x9);
+  CHECK_EQUAL_64(1, x10);
+  CHECK_EQUAL_64(0, x11);
+  CHECK_EQUAL_64(0, x12);
+  CHECK_EQUAL_64(1, x13);
+  CHECK_EQUAL_64(0xffffffff, x14);
+  CHECK_EQUAL_64(0, x20);
+  CHECK_EQUAL_64(0xffffffff00000001UL, x21);
+  CHECK_EQUAL_64(0xffffffff, x22);
+  CHECK_EQUAL_64(0xffffffffffffffffUL, x23);
 
   TEARDOWN();
 }
@@ -1142,7 +1153,7 @@
   __ Smull(x2, w0, w1);
   END();
   RUN();
-  ASSERT_EQUAL_64(expected, x2);
+  CHECK_EQUAL_64(expected, x2);
   TEARDOWN();
 }
 
@@ -1198,31 +1209,31 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(0, x0);
-  ASSERT_EQUAL_64(1, x1);
-  ASSERT_EQUAL_64(0xffffffff, x2);
-  ASSERT_EQUAL_64(0xffffffff, x3);
-  ASSERT_EQUAL_64(1, x4);
-  ASSERT_EQUAL_64(0, x5);
-  ASSERT_EQUAL_64(0, x6);
-  ASSERT_EQUAL_64(0xffffffff, x7);
-  ASSERT_EQUAL_64(0xfffffffe, x8);
-  ASSERT_EQUAL_64(2, x9);
-  ASSERT_EQUAL_64(0, x10);
-  ASSERT_EQUAL_64(0, x11);
+  CHECK_EQUAL_64(0, x0);
+  CHECK_EQUAL_64(1, x1);
+  CHECK_EQUAL_64(0xffffffff, x2);
+  CHECK_EQUAL_64(0xffffffff, x3);
+  CHECK_EQUAL_64(1, x4);
+  CHECK_EQUAL_64(0, x5);
+  CHECK_EQUAL_64(0, x6);
+  CHECK_EQUAL_64(0xffffffff, x7);
+  CHECK_EQUAL_64(0xfffffffe, x8);
+  CHECK_EQUAL_64(2, x9);
+  CHECK_EQUAL_64(0, x10);
+  CHECK_EQUAL_64(0, x11);
 
-  ASSERT_EQUAL_64(0, x12);
-  ASSERT_EQUAL_64(1, x13);
-  ASSERT_EQUAL_64(0xffffffff, x14);
-  ASSERT_EQUAL_64(0xffffffffffffffff, x15);
-  ASSERT_EQUAL_64(1, x20);
-  ASSERT_EQUAL_64(0x100000000UL, x21);
-  ASSERT_EQUAL_64(0, x22);
-  ASSERT_EQUAL_64(0xffffffff, x23);
-  ASSERT_EQUAL_64(0x1fffffffe, x24);
-  ASSERT_EQUAL_64(0xfffffffe00000002UL, x25);
-  ASSERT_EQUAL_64(0, x26);
-  ASSERT_EQUAL_64(0, x27);
+  CHECK_EQUAL_64(0, x12);
+  CHECK_EQUAL_64(1, x13);
+  CHECK_EQUAL_64(0xffffffff, x14);
+  CHECK_EQUAL_64(0xffffffffffffffff, x15);
+  CHECK_EQUAL_64(1, x20);
+  CHECK_EQUAL_64(0x100000000UL, x21);
+  CHECK_EQUAL_64(0, x22);
+  CHECK_EQUAL_64(0xffffffff, x23);
+  CHECK_EQUAL_64(0x1fffffffe, x24);
+  CHECK_EQUAL_64(0xfffffffe00000002UL, x25);
+  CHECK_EQUAL_64(0, x26);
+  CHECK_EQUAL_64(0, x27);
 
   TEARDOWN();
 }
@@ -1268,31 +1279,31 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(0, x0);
-  ASSERT_EQUAL_64(1, x1);
-  ASSERT_EQUAL_64(0xffffffff, x2);
-  ASSERT_EQUAL_64(0xffffffff, x3);
-  ASSERT_EQUAL_64(1, x4);
-  ASSERT_EQUAL_64(0xfffffffe, x5);
-  ASSERT_EQUAL_64(0xfffffffe, x6);
-  ASSERT_EQUAL_64(1, x7);
-  ASSERT_EQUAL_64(0, x8);
-  ASSERT_EQUAL_64(0, x9);
-  ASSERT_EQUAL_64(0xfffffffe, x10);
-  ASSERT_EQUAL_64(0xfffffffe, x11);
+  CHECK_EQUAL_64(0, x0);
+  CHECK_EQUAL_64(1, x1);
+  CHECK_EQUAL_64(0xffffffff, x2);
+  CHECK_EQUAL_64(0xffffffff, x3);
+  CHECK_EQUAL_64(1, x4);
+  CHECK_EQUAL_64(0xfffffffe, x5);
+  CHECK_EQUAL_64(0xfffffffe, x6);
+  CHECK_EQUAL_64(1, x7);
+  CHECK_EQUAL_64(0, x8);
+  CHECK_EQUAL_64(0, x9);
+  CHECK_EQUAL_64(0xfffffffe, x10);
+  CHECK_EQUAL_64(0xfffffffe, x11);
 
-  ASSERT_EQUAL_64(0, x12);
-  ASSERT_EQUAL_64(1, x13);
-  ASSERT_EQUAL_64(0xffffffff, x14);
-  ASSERT_EQUAL_64(0xffffffffffffffffUL, x15);
-  ASSERT_EQUAL_64(1, x20);
-  ASSERT_EQUAL_64(0xfffffffeUL, x21);
-  ASSERT_EQUAL_64(0xfffffffffffffffeUL, x22);
-  ASSERT_EQUAL_64(0xffffffff00000001UL, x23);
-  ASSERT_EQUAL_64(0, x24);
-  ASSERT_EQUAL_64(0x200000000UL, x25);
-  ASSERT_EQUAL_64(0x1fffffffeUL, x26);
-  ASSERT_EQUAL_64(0xfffffffffffffffeUL, x27);
+  CHECK_EQUAL_64(0, x12);
+  CHECK_EQUAL_64(1, x13);
+  CHECK_EQUAL_64(0xffffffff, x14);
+  CHECK_EQUAL_64(0xffffffffffffffffUL, x15);
+  CHECK_EQUAL_64(1, x20);
+  CHECK_EQUAL_64(0xfffffffeUL, x21);
+  CHECK_EQUAL_64(0xfffffffffffffffeUL, x22);
+  CHECK_EQUAL_64(0xffffffff00000001UL, x23);
+  CHECK_EQUAL_64(0, x24);
+  CHECK_EQUAL_64(0x200000000UL, x25);
+  CHECK_EQUAL_64(0x1fffffffeUL, x26);
+  CHECK_EQUAL_64(0xfffffffffffffffeUL, x27);
 
   TEARDOWN();
 }
@@ -1330,18 +1341,18 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(0, x0);
-  ASSERT_EQUAL_64(0, x1);
-  ASSERT_EQUAL_64(0, x2);
-  ASSERT_EQUAL_64(0x01234567, x3);
-  ASSERT_EQUAL_64(0x02468acf, x4);
-  ASSERT_EQUAL_64(0xffffffffffffffffUL, x5);
-  ASSERT_EQUAL_64(0x4000000000000000UL, x6);
-  ASSERT_EQUAL_64(0, x7);
-  ASSERT_EQUAL_64(0, x8);
-  ASSERT_EQUAL_64(0x1c71c71c71c71c71UL, x9);
-  ASSERT_EQUAL_64(0xe38e38e38e38e38eUL, x10);
-  ASSERT_EQUAL_64(0x1c71c71c71c71c72UL, x11);
+  CHECK_EQUAL_64(0, x0);
+  CHECK_EQUAL_64(0, x1);
+  CHECK_EQUAL_64(0, x2);
+  CHECK_EQUAL_64(0x01234567, x3);
+  CHECK_EQUAL_64(0x02468acf, x4);
+  CHECK_EQUAL_64(0xffffffffffffffffUL, x5);
+  CHECK_EQUAL_64(0x4000000000000000UL, x6);
+  CHECK_EQUAL_64(0, x7);
+  CHECK_EQUAL_64(0, x8);
+  CHECK_EQUAL_64(0x1c71c71c71c71c71UL, x9);
+  CHECK_EQUAL_64(0xe38e38e38e38e38eUL, x10);
+  CHECK_EQUAL_64(0x1c71c71c71c71c72UL, x11);
 
   TEARDOWN();
 }
@@ -1370,14 +1381,14 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(3, x9);
-  ASSERT_EQUAL_64(5, x10);
-  ASSERT_EQUAL_64(5, x11);
-  ASSERT_EQUAL_64(0x200000001UL, x12);
-  ASSERT_EQUAL_64(0x100000003UL, x13);
-  ASSERT_EQUAL_64(0xfffffffe00000005UL, x14);
-  ASSERT_EQUAL_64(0xfffffffe00000005UL, x15);
-  ASSERT_EQUAL_64(0x1, x22);
+  CHECK_EQUAL_64(3, x9);
+  CHECK_EQUAL_64(5, x10);
+  CHECK_EQUAL_64(5, x11);
+  CHECK_EQUAL_64(0x200000001UL, x12);
+  CHECK_EQUAL_64(0x100000003UL, x13);
+  CHECK_EQUAL_64(0xfffffffe00000005UL, x14);
+  CHECK_EQUAL_64(0xfffffffe00000005UL, x15);
+  CHECK_EQUAL_64(0x1, x22);
 
   TEARDOWN();
 }
@@ -1406,14 +1417,14 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(5, x9);
-  ASSERT_EQUAL_64(3, x10);
-  ASSERT_EQUAL_64(3, x11);
-  ASSERT_EQUAL_64(0x1ffffffffUL, x12);
-  ASSERT_EQUAL_64(0xffffffff00000005UL, x13);
-  ASSERT_EQUAL_64(0x200000003UL, x14);
-  ASSERT_EQUAL_64(0x200000003UL, x15);
-  ASSERT_EQUAL_64(0x3ffffffffUL, x22);
+  CHECK_EQUAL_64(5, x9);
+  CHECK_EQUAL_64(3, x10);
+  CHECK_EQUAL_64(3, x11);
+  CHECK_EQUAL_64(0x1ffffffffUL, x12);
+  CHECK_EQUAL_64(0xffffffff00000005UL, x13);
+  CHECK_EQUAL_64(0x200000003UL, x14);
+  CHECK_EQUAL_64(0x200000003UL, x15);
+  CHECK_EQUAL_64(0x3ffffffffUL, x22);
 
   TEARDOWN();
 }
@@ -1469,34 +1480,34 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(1, x0);
-  ASSERT_EQUAL_64(0xffffffff, x1);
-  ASSERT_EQUAL_64(1, x2);
-  ASSERT_EQUAL_64(0xffffffff, x3);
-  ASSERT_EQUAL_64(1, x4);
-  ASSERT_EQUAL_64(1, x5);
-  ASSERT_EQUAL_64(0, x6);
-  ASSERT_EQUAL_64(1, x7);
-  ASSERT_EQUAL_64(0, x8);
-  ASSERT_EQUAL_64(0xffffffff00000001UL, x9);
-  ASSERT_EQUAL_64(0x40000000, x10);
-  ASSERT_EQUAL_64(0xC0000000, x11);
-  ASSERT_EQUAL_64(0x40000000, x12);
-  ASSERT_EQUAL_64(0x40000000, x13);
-  ASSERT_EQUAL_64(0x4000000000000000UL, x14);
-  ASSERT_EQUAL_64(0xC000000000000000UL, x15);
-  ASSERT_EQUAL_64(0, x22);
-  ASSERT_EQUAL_64(0x80000000, x23);
-  ASSERT_EQUAL_64(0, x24);
-  ASSERT_EQUAL_64(0x8000000000000000UL, x25);
-  ASSERT_EQUAL_64(0, x26);
-  ASSERT_EQUAL_64(0, x27);
-  ASSERT_EQUAL_64(0x7fffffffffffffffUL, x28);
-  ASSERT_EQUAL_64(0, x29);
-  ASSERT_EQUAL_64(0, x18);
-  ASSERT_EQUAL_64(0, x19);
-  ASSERT_EQUAL_64(0, x20);
-  ASSERT_EQUAL_64(0, x21);
+  CHECK_EQUAL_64(1, x0);
+  CHECK_EQUAL_64(0xffffffff, x1);
+  CHECK_EQUAL_64(1, x2);
+  CHECK_EQUAL_64(0xffffffff, x3);
+  CHECK_EQUAL_64(1, x4);
+  CHECK_EQUAL_64(1, x5);
+  CHECK_EQUAL_64(0, x6);
+  CHECK_EQUAL_64(1, x7);
+  CHECK_EQUAL_64(0, x8);
+  CHECK_EQUAL_64(0xffffffff00000001UL, x9);
+  CHECK_EQUAL_64(0x40000000, x10);
+  CHECK_EQUAL_64(0xC0000000, x11);
+  CHECK_EQUAL_64(0x40000000, x12);
+  CHECK_EQUAL_64(0x40000000, x13);
+  CHECK_EQUAL_64(0x4000000000000000UL, x14);
+  CHECK_EQUAL_64(0xC000000000000000UL, x15);
+  CHECK_EQUAL_64(0, x22);
+  CHECK_EQUAL_64(0x80000000, x23);
+  CHECK_EQUAL_64(0, x24);
+  CHECK_EQUAL_64(0x8000000000000000UL, x25);
+  CHECK_EQUAL_64(0, x26);
+  CHECK_EQUAL_64(0, x27);
+  CHECK_EQUAL_64(0x7fffffffffffffffUL, x28);
+  CHECK_EQUAL_64(0, x29);
+  CHECK_EQUAL_64(0, x18);
+  CHECK_EQUAL_64(0, x19);
+  CHECK_EQUAL_64(0, x20);
+  CHECK_EQUAL_64(0, x21);
 
   TEARDOWN();
 }
@@ -1519,13 +1530,13 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(0x084c2a6e, x0);
-  ASSERT_EQUAL_64(0x084c2a6e195d3b7fUL, x1);
-  ASSERT_EQUAL_64(0x54761032, x2);
-  ASSERT_EQUAL_64(0xdcfe98ba54761032UL, x3);
-  ASSERT_EQUAL_64(0x10325476, x4);
-  ASSERT_EQUAL_64(0x98badcfe10325476UL, x5);
-  ASSERT_EQUAL_64(0x1032547698badcfeUL, x6);
+  CHECK_EQUAL_64(0x084c2a6e, x0);
+  CHECK_EQUAL_64(0x084c2a6e195d3b7fUL, x1);
+  CHECK_EQUAL_64(0x54761032, x2);
+  CHECK_EQUAL_64(0xdcfe98ba54761032UL, x3);
+  CHECK_EQUAL_64(0x10325476, x4);
+  CHECK_EQUAL_64(0x98badcfe10325476UL, x5);
+  CHECK_EQUAL_64(0x1032547698badcfeUL, x6);
 
   TEARDOWN();
 }
@@ -1555,18 +1566,18 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(8, x0);
-  ASSERT_EQUAL_64(12, x1);
-  ASSERT_EQUAL_64(0, x2);
-  ASSERT_EQUAL_64(0, x3);
-  ASSERT_EQUAL_64(32, x4);
-  ASSERT_EQUAL_64(64, x5);
-  ASSERT_EQUAL_64(7, x6);
-  ASSERT_EQUAL_64(11, x7);
-  ASSERT_EQUAL_64(12, x8);
-  ASSERT_EQUAL_64(8, x9);
-  ASSERT_EQUAL_64(31, x10);
-  ASSERT_EQUAL_64(63, x11);
+  CHECK_EQUAL_64(8, x0);
+  CHECK_EQUAL_64(12, x1);
+  CHECK_EQUAL_64(0, x2);
+  CHECK_EQUAL_64(0, x3);
+  CHECK_EQUAL_64(32, x4);
+  CHECK_EQUAL_64(64, x5);
+  CHECK_EQUAL_64(7, x6);
+  CHECK_EQUAL_64(11, x7);
+  CHECK_EQUAL_64(12, x8);
+  CHECK_EQUAL_64(8, x9);
+  CHECK_EQUAL_64(31, x10);
+  CHECK_EQUAL_64(63, x11);
 
   TEARDOWN();
 }
@@ -1604,8 +1615,8 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(0x1, x0);
-  ASSERT_EQUAL_64(0x1, x1);
+  CHECK_EQUAL_64(0x1, x0);
+  CHECK_EQUAL_64(0x1, x1);
 
   TEARDOWN();
 }
@@ -1638,7 +1649,7 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(0x1, x0);
+  CHECK_EQUAL_64(0x1, x0);
   TEARDOWN();
 }
 
@@ -1682,8 +1693,8 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(0x0, x0);
-  ASSERT_EQUAL_64(0x0, x1);
+  CHECK_EQUAL_64(0x0, x0);
+  CHECK_EQUAL_64(0x0, x1);
 
   TEARDOWN();
 }
@@ -1748,7 +1759,7 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(0xf, x0);
+  CHECK_EQUAL_64(0xf, x0);
 
   TEARDOWN();
 }
@@ -1838,7 +1849,7 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(0x1, x0);
+  CHECK_EQUAL_64(0x1, x0);
 
   TEARDOWN();
 }
@@ -1885,9 +1896,9 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(core.xreg(3) + kInstructionSize, x0);
-  ASSERT_EQUAL_64(42, x1);
-  ASSERT_EQUAL_64(84, x2);
+  CHECK_EQUAL_64(core.xreg(3) + kInstructionSize, x0);
+  CHECK_EQUAL_64(42, x1);
+  CHECK_EQUAL_64(84, x2);
 
   TEARDOWN();
 }
@@ -1955,12 +1966,12 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(1, x0);
-  ASSERT_EQUAL_64(0, x1);
-  ASSERT_EQUAL_64(1, x2);
-  ASSERT_EQUAL_64(0, x3);
-  ASSERT_EQUAL_64(1, x4);
-  ASSERT_EQUAL_64(0, x5);
+  CHECK_EQUAL_64(1, x0);
+  CHECK_EQUAL_64(0, x1);
+  CHECK_EQUAL_64(1, x2);
+  CHECK_EQUAL_64(0, x3);
+  CHECK_EQUAL_64(1, x4);
+  CHECK_EQUAL_64(0, x5);
 
   TEARDOWN();
 }
@@ -2008,10 +2019,10 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(1, x0);
-  ASSERT_EQUAL_64(0, x1);
-  ASSERT_EQUAL_64(1, x2);
-  ASSERT_EQUAL_64(0, x3);
+  CHECK_EQUAL_64(1, x0);
+  CHECK_EQUAL_64(0, x1);
+  CHECK_EQUAL_64(1, x2);
+  CHECK_EQUAL_64(0, x3);
 
   TEARDOWN();
 }
@@ -2084,8 +2095,8 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(0x7, x0);
-  ASSERT_EQUAL_64(0x1, x1);
+  CHECK_EQUAL_64(0x7, x0);
+  CHECK_EQUAL_64(0x1, x1);
 
   TEARDOWN();
 }
@@ -2154,8 +2165,8 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(0x7, x0);
-  ASSERT_EQUAL_64(0x1, x1);
+  CHECK_EQUAL_64(0x7, x0);
+  CHECK_EQUAL_64(0x1, x1);
 
   TEARDOWN();
 }
@@ -2249,8 +2260,8 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(0x7, x0);
-  ASSERT_EQUAL_64(0x1, x1);
+  CHECK_EQUAL_64(0x7, x0);
+  CHECK_EQUAL_64(0x1, x1);
 
   TEARDOWN();
 }
@@ -2339,8 +2350,8 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(0x3, x0);
-  ASSERT_EQUAL_64(0x1, x1);
+  CHECK_EQUAL_64(0x3, x0);
+  CHECK_EQUAL_64(0x1, x1);
 
   TEARDOWN();
 }
@@ -2397,7 +2408,7 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(0x0, x0);
+  CHECK_EQUAL_64(0x0, x0);
 
   TEARDOWN();
 }
@@ -2429,18 +2440,18 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(0x76543210, x0);
-  ASSERT_EQUAL_64(0x76543210, dst[0]);
-  ASSERT_EQUAL_64(0xfedcba98, x1);
-  ASSERT_EQUAL_64(0xfedcba9800000000UL, dst[1]);
-  ASSERT_EQUAL_64(0x0123456789abcdefUL, x2);
-  ASSERT_EQUAL_64(0x0123456789abcdefUL, dst[2]);
-  ASSERT_EQUAL_64(0x32, x3);
-  ASSERT_EQUAL_64(0x3200, dst[3]);
-  ASSERT_EQUAL_64(0x7654, x4);
-  ASSERT_EQUAL_64(0x765400, dst[4]);
-  ASSERT_EQUAL_64(src_base, x17);
-  ASSERT_EQUAL_64(dst_base, x18);
+  CHECK_EQUAL_64(0x76543210, x0);
+  CHECK_EQUAL_64(0x76543210, dst[0]);
+  CHECK_EQUAL_64(0xfedcba98, x1);
+  CHECK_EQUAL_64(0xfedcba9800000000UL, dst[1]);
+  CHECK_EQUAL_64(0x0123456789abcdefUL, x2);
+  CHECK_EQUAL_64(0x0123456789abcdefUL, dst[2]);
+  CHECK_EQUAL_64(0x32, x3);
+  CHECK_EQUAL_64(0x3200, dst[3]);
+  CHECK_EQUAL_64(0x7654, x4);
+  CHECK_EQUAL_64(0x765400, dst[4]);
+  CHECK_EQUAL_64(src_base, x17);
+  CHECK_EQUAL_64(dst_base, x18);
 
   TEARDOWN();
 }
@@ -2478,18 +2489,18 @@
 
   RUN();
 
-  ASSERT_EQUAL_32(8191, w0);
-  ASSERT_EQUAL_32(8191, dst[8191]);
-  ASSERT_EQUAL_64(src_base, x22);
-  ASSERT_EQUAL_64(dst_base, x23);
-  ASSERT_EQUAL_32(0, w1);
-  ASSERT_EQUAL_32(0, dst[0]);
-  ASSERT_EQUAL_64(src_base + 4096 * sizeof(src[0]), x24);
-  ASSERT_EQUAL_64(dst_base + 4096 * sizeof(dst[0]), x25);
-  ASSERT_EQUAL_32(6144, w2);
-  ASSERT_EQUAL_32(6144, dst[6144]);
-  ASSERT_EQUAL_64(src_base + 6144 * sizeof(src[0]), x26);
-  ASSERT_EQUAL_64(dst_base + 6144 * sizeof(dst[0]), x27);
+  CHECK_EQUAL_32(8191, w0);
+  CHECK_EQUAL_32(8191, dst[8191]);
+  CHECK_EQUAL_64(src_base, x22);
+  CHECK_EQUAL_64(dst_base, x23);
+  CHECK_EQUAL_32(0, w1);
+  CHECK_EQUAL_32(0, dst[0]);
+  CHECK_EQUAL_64(src_base + 4096 * sizeof(src[0]), x24);
+  CHECK_EQUAL_64(dst_base + 4096 * sizeof(dst[0]), x25);
+  CHECK_EQUAL_32(6144, w2);
+  CHECK_EQUAL_32(6144, dst[6144]);
+  CHECK_EQUAL_64(src_base + 6144 * sizeof(src[0]), x26);
+  CHECK_EQUAL_64(dst_base + 6144 * sizeof(dst[0]), x27);
 
   TEARDOWN();
 }
@@ -2529,26 +2540,26 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(0xfedcba98, x0);
-  ASSERT_EQUAL_64(0xfedcba9800000000UL, dst[1]);
-  ASSERT_EQUAL_64(0x0123456789abcdefUL, x1);
-  ASSERT_EQUAL_64(0x0123456789abcdefUL, dst[2]);
-  ASSERT_EQUAL_64(0x01234567, x2);
-  ASSERT_EQUAL_64(0x0123456700000000UL, dst[4]);
-  ASSERT_EQUAL_64(0x32, x3);
-  ASSERT_EQUAL_64(0x3200, dst[3]);
-  ASSERT_EQUAL_64(0x9876, x4);
-  ASSERT_EQUAL_64(0x987600, dst[5]);
-  ASSERT_EQUAL_64(src_base + 4, x17);
-  ASSERT_EQUAL_64(dst_base + 12, x18);
-  ASSERT_EQUAL_64(src_base + 8, x19);
-  ASSERT_EQUAL_64(dst_base + 16, x20);
-  ASSERT_EQUAL_64(src_base + 12, x21);
-  ASSERT_EQUAL_64(dst_base + 36, x22);
-  ASSERT_EQUAL_64(src_base + 1, x23);
-  ASSERT_EQUAL_64(dst_base + 25, x24);
-  ASSERT_EQUAL_64(src_base + 3, x25);
-  ASSERT_EQUAL_64(dst_base + 41, x26);
+  CHECK_EQUAL_64(0xfedcba98, x0);
+  CHECK_EQUAL_64(0xfedcba9800000000UL, dst[1]);
+  CHECK_EQUAL_64(0x0123456789abcdefUL, x1);
+  CHECK_EQUAL_64(0x0123456789abcdefUL, dst[2]);
+  CHECK_EQUAL_64(0x01234567, x2);
+  CHECK_EQUAL_64(0x0123456700000000UL, dst[4]);
+  CHECK_EQUAL_64(0x32, x3);
+  CHECK_EQUAL_64(0x3200, dst[3]);
+  CHECK_EQUAL_64(0x9876, x4);
+  CHECK_EQUAL_64(0x987600, dst[5]);
+  CHECK_EQUAL_64(src_base + 4, x17);
+  CHECK_EQUAL_64(dst_base + 12, x18);
+  CHECK_EQUAL_64(src_base + 8, x19);
+  CHECK_EQUAL_64(dst_base + 16, x20);
+  CHECK_EQUAL_64(src_base + 12, x21);
+  CHECK_EQUAL_64(dst_base + 36, x22);
+  CHECK_EQUAL_64(src_base + 1, x23);
+  CHECK_EQUAL_64(dst_base + 25, x24);
+  CHECK_EQUAL_64(src_base + 3, x25);
+  CHECK_EQUAL_64(dst_base + 41, x26);
 
   TEARDOWN();
 }
@@ -2588,26 +2599,26 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(0xfedcba98, x0);
-  ASSERT_EQUAL_64(0xfedcba9800000000UL, dst[1]);
-  ASSERT_EQUAL_64(0x0123456789abcdefUL, x1);
-  ASSERT_EQUAL_64(0x0123456789abcdefUL, dst[2]);
-  ASSERT_EQUAL_64(0x0123456789abcdefUL, x2);
-  ASSERT_EQUAL_64(0x0123456789abcdefUL, dst[4]);
-  ASSERT_EQUAL_64(0x32, x3);
-  ASSERT_EQUAL_64(0x3200, dst[3]);
-  ASSERT_EQUAL_64(0x9876, x4);
-  ASSERT_EQUAL_64(0x987600, dst[5]);
-  ASSERT_EQUAL_64(src_base + 8, x17);
-  ASSERT_EQUAL_64(dst_base + 24, x18);
-  ASSERT_EQUAL_64(src_base + 16, x19);
-  ASSERT_EQUAL_64(dst_base + 32, x20);
-  ASSERT_EQUAL_64(src_base, x21);
-  ASSERT_EQUAL_64(dst_base, x22);
-  ASSERT_EQUAL_64(src_base + 2, x23);
-  ASSERT_EQUAL_64(dst_base + 30, x24);
-  ASSERT_EQUAL_64(src_base, x25);
-  ASSERT_EQUAL_64(dst_base, x26);
+  CHECK_EQUAL_64(0xfedcba98, x0);
+  CHECK_EQUAL_64(0xfedcba9800000000UL, dst[1]);
+  CHECK_EQUAL_64(0x0123456789abcdefUL, x1);
+  CHECK_EQUAL_64(0x0123456789abcdefUL, dst[2]);
+  CHECK_EQUAL_64(0x0123456789abcdefUL, x2);
+  CHECK_EQUAL_64(0x0123456789abcdefUL, dst[4]);
+  CHECK_EQUAL_64(0x32, x3);
+  CHECK_EQUAL_64(0x3200, dst[3]);
+  CHECK_EQUAL_64(0x9876, x4);
+  CHECK_EQUAL_64(0x987600, dst[5]);
+  CHECK_EQUAL_64(src_base + 8, x17);
+  CHECK_EQUAL_64(dst_base + 24, x18);
+  CHECK_EQUAL_64(src_base + 16, x19);
+  CHECK_EQUAL_64(dst_base + 32, x20);
+  CHECK_EQUAL_64(src_base, x21);
+  CHECK_EQUAL_64(dst_base, x22);
+  CHECK_EQUAL_64(src_base + 2, x23);
+  CHECK_EQUAL_64(dst_base + 30, x24);
+  CHECK_EQUAL_64(src_base, x25);
+  CHECK_EQUAL_64(dst_base, x26);
 
   TEARDOWN();
 }
@@ -2636,16 +2647,16 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(0xffffff80, x0);
-  ASSERT_EQUAL_64(0x0000007f, x1);
-  ASSERT_EQUAL_64(0xffff8080, x2);
-  ASSERT_EQUAL_64(0x00007f7f, x3);
-  ASSERT_EQUAL_64(0xffffffffffffff80UL, x4);
-  ASSERT_EQUAL_64(0x000000000000007fUL, x5);
-  ASSERT_EQUAL_64(0xffffffffffff8080UL, x6);
-  ASSERT_EQUAL_64(0x0000000000007f7fUL, x7);
-  ASSERT_EQUAL_64(0xffffffff80008080UL, x8);
-  ASSERT_EQUAL_64(0x000000007fff7f7fUL, x9);
+  CHECK_EQUAL_64(0xffffff80, x0);
+  CHECK_EQUAL_64(0x0000007f, x1);
+  CHECK_EQUAL_64(0xffff8080, x2);
+  CHECK_EQUAL_64(0x00007f7f, x3);
+  CHECK_EQUAL_64(0xffffffffffffff80UL, x4);
+  CHECK_EQUAL_64(0x000000000000007fUL, x5);
+  CHECK_EQUAL_64(0xffffffffffff8080UL, x6);
+  CHECK_EQUAL_64(0x0000000000007f7fUL, x7);
+  CHECK_EQUAL_64(0xffffffff80008080UL, x8);
+  CHECK_EQUAL_64(0x000000007fff7f7fUL, x9);
 
   TEARDOWN();
 }
@@ -2685,15 +2696,15 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(1, x0);
-  ASSERT_EQUAL_64(0x0000000300000002UL, x1);
-  ASSERT_EQUAL_64(3, x2);
-  ASSERT_EQUAL_64(3, x3);
-  ASSERT_EQUAL_64(2, x4);
-  ASSERT_EQUAL_32(1, dst[0]);
-  ASSERT_EQUAL_32(2, dst[1]);
-  ASSERT_EQUAL_32(3, dst[2]);
-  ASSERT_EQUAL_32(3, dst[3]);
+  CHECK_EQUAL_64(1, x0);
+  CHECK_EQUAL_64(0x0000000300000002UL, x1);
+  CHECK_EQUAL_64(3, x2);
+  CHECK_EQUAL_64(3, x3);
+  CHECK_EQUAL_64(2, x4);
+  CHECK_EQUAL_32(1, dst[0]);
+  CHECK_EQUAL_32(2, dst[1]);
+  CHECK_EQUAL_32(3, dst[2]);
+  CHECK_EQUAL_32(3, dst[3]);
 
   TEARDOWN();
 }
@@ -2725,18 +2736,18 @@
 
   RUN();
 
-  ASSERT_EQUAL_FP32(2.0, s0);
-  ASSERT_EQUAL_FP32(2.0, dst[0]);
-  ASSERT_EQUAL_FP32(1.0, s1);
-  ASSERT_EQUAL_FP32(1.0, dst[2]);
-  ASSERT_EQUAL_FP32(3.0, s2);
-  ASSERT_EQUAL_FP32(3.0, dst[1]);
-  ASSERT_EQUAL_64(src_base, x17);
-  ASSERT_EQUAL_64(dst_base + sizeof(dst[0]), x18);
-  ASSERT_EQUAL_64(src_base + sizeof(src[0]), x19);
-  ASSERT_EQUAL_64(dst_base + 2 * sizeof(dst[0]), x20);
-  ASSERT_EQUAL_64(src_base + 2 * sizeof(src[0]), x21);
-  ASSERT_EQUAL_64(dst_base, x22);
+  CHECK_EQUAL_FP32(2.0, s0);
+  CHECK_EQUAL_FP32(2.0, dst[0]);
+  CHECK_EQUAL_FP32(1.0, s1);
+  CHECK_EQUAL_FP32(1.0, dst[2]);
+  CHECK_EQUAL_FP32(3.0, s2);
+  CHECK_EQUAL_FP32(3.0, dst[1]);
+  CHECK_EQUAL_64(src_base, x17);
+  CHECK_EQUAL_64(dst_base + sizeof(dst[0]), x18);
+  CHECK_EQUAL_64(src_base + sizeof(src[0]), x19);
+  CHECK_EQUAL_64(dst_base + 2 * sizeof(dst[0]), x20);
+  CHECK_EQUAL_64(src_base + 2 * sizeof(src[0]), x21);
+  CHECK_EQUAL_64(dst_base, x22);
 
   TEARDOWN();
 }
@@ -2768,18 +2779,18 @@
 
   RUN();
 
-  ASSERT_EQUAL_FP64(2.0, d0);
-  ASSERT_EQUAL_FP64(2.0, dst[0]);
-  ASSERT_EQUAL_FP64(1.0, d1);
-  ASSERT_EQUAL_FP64(1.0, dst[2]);
-  ASSERT_EQUAL_FP64(3.0, d2);
-  ASSERT_EQUAL_FP64(3.0, dst[1]);
-  ASSERT_EQUAL_64(src_base, x17);
-  ASSERT_EQUAL_64(dst_base + sizeof(dst[0]), x18);
-  ASSERT_EQUAL_64(src_base + sizeof(src[0]), x19);
-  ASSERT_EQUAL_64(dst_base + 2 * sizeof(dst[0]), x20);
-  ASSERT_EQUAL_64(src_base + 2 * sizeof(src[0]), x21);
-  ASSERT_EQUAL_64(dst_base, x22);
+  CHECK_EQUAL_FP64(2.0, d0);
+  CHECK_EQUAL_FP64(2.0, dst[0]);
+  CHECK_EQUAL_FP64(1.0, d1);
+  CHECK_EQUAL_FP64(1.0, dst[2]);
+  CHECK_EQUAL_FP64(3.0, d2);
+  CHECK_EQUAL_FP64(3.0, dst[1]);
+  CHECK_EQUAL_64(src_base, x17);
+  CHECK_EQUAL_64(dst_base + sizeof(dst[0]), x18);
+  CHECK_EQUAL_64(src_base + sizeof(src[0]), x19);
+  CHECK_EQUAL_64(dst_base + 2 * sizeof(dst[0]), x20);
+  CHECK_EQUAL_64(src_base + 2 * sizeof(src[0]), x21);
+  CHECK_EQUAL_64(dst_base, x22);
 
   TEARDOWN();
 }
@@ -2803,13 +2814,13 @@
 
   RUN();
 
-  ASSERT_EQUAL_FP32(1.0, s31);
-  ASSERT_EQUAL_FP32(2.0, s0);
-  ASSERT_EQUAL_FP32(0.0, dst[0]);
-  ASSERT_EQUAL_FP32(2.0, dst[1]);
-  ASSERT_EQUAL_FP32(1.0, dst[2]);
-  ASSERT_EQUAL_64(src_base + 2 * sizeof(src[0]), x16);
-  ASSERT_EQUAL_64(dst_base + sizeof(dst[1]), x17);
+  CHECK_EQUAL_FP32(1.0, s31);
+  CHECK_EQUAL_FP32(2.0, s0);
+  CHECK_EQUAL_FP32(0.0, dst[0]);
+  CHECK_EQUAL_FP32(2.0, dst[1]);
+  CHECK_EQUAL_FP32(1.0, dst[2]);
+  CHECK_EQUAL_64(src_base + 2 * sizeof(src[0]), x16);
+  CHECK_EQUAL_64(dst_base + sizeof(dst[1]), x17);
 
   TEARDOWN();
 }
@@ -2833,13 +2844,13 @@
 
   RUN();
 
-  ASSERT_EQUAL_FP64(1.0, d31);
-  ASSERT_EQUAL_FP64(2.0, d0);
-  ASSERT_EQUAL_FP64(0.0, dst[0]);
-  ASSERT_EQUAL_FP64(2.0, dst[1]);
-  ASSERT_EQUAL_FP64(1.0, dst[2]);
-  ASSERT_EQUAL_64(src_base + 2 * sizeof(src[0]), x16);
-  ASSERT_EQUAL_64(dst_base + sizeof(dst[1]), x17);
+  CHECK_EQUAL_FP64(1.0, d31);
+  CHECK_EQUAL_FP64(2.0, d0);
+  CHECK_EQUAL_FP64(0.0, dst[0]);
+  CHECK_EQUAL_FP64(2.0, dst[1]);
+  CHECK_EQUAL_FP64(1.0, dst[2]);
+  CHECK_EQUAL_64(src_base + 2 * sizeof(src[0]), x16);
+  CHECK_EQUAL_64(dst_base + sizeof(dst[1]), x17);
 
   TEARDOWN();
 }
@@ -2874,27 +2885,85 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(0x44556677, x0);
-  ASSERT_EQUAL_64(0x00112233, x1);
-  ASSERT_EQUAL_64(0x0011223344556677UL, dst[0]);
-  ASSERT_EQUAL_64(0x00112233, x2);
-  ASSERT_EQUAL_64(0xccddeeff, x3);
-  ASSERT_EQUAL_64(0xccddeeff00112233UL, dst[1]);
-  ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x4);
-  ASSERT_EQUAL_64(0x8899aabbccddeeffUL, dst[2]);
-  ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, x5);
-  ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, dst[3]);
-  ASSERT_EQUAL_64(0x8899aabb, x6);
-  ASSERT_EQUAL_64(0xbbaa9988, x7);
-  ASSERT_EQUAL_64(0xbbaa99888899aabbUL, dst[4]);
-  ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x8);
-  ASSERT_EQUAL_64(0x8899aabbccddeeffUL, dst[5]);
-  ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, x9);
-  ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, dst[6]);
-  ASSERT_EQUAL_64(src_base, x16);
-  ASSERT_EQUAL_64(dst_base, x17);
-  ASSERT_EQUAL_64(src_base + 24, x18);
-  ASSERT_EQUAL_64(dst_base + 56, x19);
+  CHECK_EQUAL_64(0x44556677, x0);
+  CHECK_EQUAL_64(0x00112233, x1);
+  CHECK_EQUAL_64(0x0011223344556677UL, dst[0]);
+  CHECK_EQUAL_64(0x00112233, x2);
+  CHECK_EQUAL_64(0xccddeeff, x3);
+  CHECK_EQUAL_64(0xccddeeff00112233UL, dst[1]);
+  CHECK_EQUAL_64(0x8899aabbccddeeffUL, x4);
+  CHECK_EQUAL_64(0x8899aabbccddeeffUL, dst[2]);
+  CHECK_EQUAL_64(0xffeeddccbbaa9988UL, x5);
+  CHECK_EQUAL_64(0xffeeddccbbaa9988UL, dst[3]);
+  CHECK_EQUAL_64(0x8899aabb, x6);
+  CHECK_EQUAL_64(0xbbaa9988, x7);
+  CHECK_EQUAL_64(0xbbaa99888899aabbUL, dst[4]);
+  CHECK_EQUAL_64(0x8899aabbccddeeffUL, x8);
+  CHECK_EQUAL_64(0x8899aabbccddeeffUL, dst[5]);
+  CHECK_EQUAL_64(0xffeeddccbbaa9988UL, x9);
+  CHECK_EQUAL_64(0xffeeddccbbaa9988UL, dst[6]);
+  CHECK_EQUAL_64(src_base, x16);
+  CHECK_EQUAL_64(dst_base, x17);
+  CHECK_EQUAL_64(src_base + 24, x18);
+  CHECK_EQUAL_64(dst_base + 56, x19);
+
+  TEARDOWN();
+}
+
+
+TEST(ldp_stp_offset_wide) {
+  INIT_V8();
+  SETUP();
+
+  uint64_t src[3] = {0x0011223344556677, 0x8899aabbccddeeff,
+                     0xffeeddccbbaa9988};
+  uint64_t dst[7] = {0, 0, 0, 0, 0, 0, 0};
+  uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+  uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
+  // Move base too far from the array to force multiple instructions
+  // to be emitted.
+  const int64_t base_offset = 1024;
+
+  START();
+  __ Mov(x20, src_base - base_offset);
+  __ Mov(x21, dst_base - base_offset);
+  __ Mov(x18, src_base + base_offset + 24);
+  __ Mov(x19, dst_base + base_offset + 56);
+  __ Ldp(w0, w1, MemOperand(x20, base_offset));
+  __ Ldp(w2, w3, MemOperand(x20, base_offset + 4));
+  __ Ldp(x4, x5, MemOperand(x20, base_offset + 8));
+  __ Ldp(w6, w7, MemOperand(x18, -12 - base_offset));
+  __ Ldp(x8, x9, MemOperand(x18, -16 - base_offset));
+  __ Stp(w0, w1, MemOperand(x21, base_offset));
+  __ Stp(w2, w3, MemOperand(x21, base_offset + 8));
+  __ Stp(x4, x5, MemOperand(x21, base_offset + 16));
+  __ Stp(w6, w7, MemOperand(x19, -24 - base_offset));
+  __ Stp(x8, x9, MemOperand(x19, -16 - base_offset));
+  END();
+
+  RUN();
+
+  CHECK_EQUAL_64(0x44556677, x0);
+  CHECK_EQUAL_64(0x00112233, x1);
+  CHECK_EQUAL_64(0x0011223344556677UL, dst[0]);
+  CHECK_EQUAL_64(0x00112233, x2);
+  CHECK_EQUAL_64(0xccddeeff, x3);
+  CHECK_EQUAL_64(0xccddeeff00112233UL, dst[1]);
+  CHECK_EQUAL_64(0x8899aabbccddeeffUL, x4);
+  CHECK_EQUAL_64(0x8899aabbccddeeffUL, dst[2]);
+  CHECK_EQUAL_64(0xffeeddccbbaa9988UL, x5);
+  CHECK_EQUAL_64(0xffeeddccbbaa9988UL, dst[3]);
+  CHECK_EQUAL_64(0x8899aabb, x6);
+  CHECK_EQUAL_64(0xbbaa9988, x7);
+  CHECK_EQUAL_64(0xbbaa99888899aabbUL, dst[4]);
+  CHECK_EQUAL_64(0x8899aabbccddeeffUL, x8);
+  CHECK_EQUAL_64(0x8899aabbccddeeffUL, dst[5]);
+  CHECK_EQUAL_64(0xffeeddccbbaa9988UL, x9);
+  CHECK_EQUAL_64(0xffeeddccbbaa9988UL, dst[6]);
+  CHECK_EQUAL_64(src_base - base_offset, x20);
+  CHECK_EQUAL_64(dst_base - base_offset, x21);
+  CHECK_EQUAL_64(src_base + base_offset + 24, x18);
+  CHECK_EQUAL_64(dst_base + base_offset + 56, x19);
 
   TEARDOWN();
 }
@@ -2929,27 +2998,27 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(0x44556677, x0);
-  ASSERT_EQUAL_64(0x00112233, x1);
-  ASSERT_EQUAL_64(0x0011223344556677UL, dst[0]);
-  ASSERT_EQUAL_64(0x00112233, x2);
-  ASSERT_EQUAL_64(0xccddeeff, x3);
-  ASSERT_EQUAL_64(0xccddeeff00112233UL, dst[1]);
-  ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x4);
-  ASSERT_EQUAL_64(0x8899aabbccddeeffUL, dst[2]);
-  ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, x5);
-  ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, dst[3]);
-  ASSERT_EQUAL_64(0x8899aabb, x6);
-  ASSERT_EQUAL_64(0xbbaa9988, x7);
-  ASSERT_EQUAL_64(0xbbaa99888899aabbUL, dst[4]);
-  ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x8);
-  ASSERT_EQUAL_64(0x8899aabbccddeeffUL, dst[5]);
-  ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, x9);
-  ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, dst[6]);
-  ASSERT_EQUAL_64(src_base, x16);
-  ASSERT_EQUAL_64(dst_base, x17);
-  ASSERT_EQUAL_64(src_base + 24, x18);
-  ASSERT_EQUAL_64(dst_base + 56, x19);
+  CHECK_EQUAL_64(0x44556677, x0);
+  CHECK_EQUAL_64(0x00112233, x1);
+  CHECK_EQUAL_64(0x0011223344556677UL, dst[0]);
+  CHECK_EQUAL_64(0x00112233, x2);
+  CHECK_EQUAL_64(0xccddeeff, x3);
+  CHECK_EQUAL_64(0xccddeeff00112233UL, dst[1]);
+  CHECK_EQUAL_64(0x8899aabbccddeeffUL, x4);
+  CHECK_EQUAL_64(0x8899aabbccddeeffUL, dst[2]);
+  CHECK_EQUAL_64(0xffeeddccbbaa9988UL, x5);
+  CHECK_EQUAL_64(0xffeeddccbbaa9988UL, dst[3]);
+  CHECK_EQUAL_64(0x8899aabb, x6);
+  CHECK_EQUAL_64(0xbbaa9988, x7);
+  CHECK_EQUAL_64(0xbbaa99888899aabbUL, dst[4]);
+  CHECK_EQUAL_64(0x8899aabbccddeeffUL, x8);
+  CHECK_EQUAL_64(0x8899aabbccddeeffUL, dst[5]);
+  CHECK_EQUAL_64(0xffeeddccbbaa9988UL, x9);
+  CHECK_EQUAL_64(0xffeeddccbbaa9988UL, dst[6]);
+  CHECK_EQUAL_64(src_base, x16);
+  CHECK_EQUAL_64(dst_base, x17);
+  CHECK_EQUAL_64(src_base + 24, x18);
+  CHECK_EQUAL_64(dst_base + 56, x19);
 
   TEARDOWN();
 }
@@ -2985,26 +3054,89 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(0x00112233, x0);
-  ASSERT_EQUAL_64(0xccddeeff, x1);
-  ASSERT_EQUAL_64(0x44556677, x2);
-  ASSERT_EQUAL_64(0x00112233, x3);
-  ASSERT_EQUAL_64(0xccddeeff00112233UL, dst[0]);
-  ASSERT_EQUAL_64(0x0000000000112233UL, dst[1]);
-  ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x4);
-  ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, x5);
-  ASSERT_EQUAL_64(0x0011223344556677UL, x6);
-  ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x7);
-  ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, dst[2]);
-  ASSERT_EQUAL_64(0x8899aabbccddeeffUL, dst[3]);
-  ASSERT_EQUAL_64(0x0011223344556677UL, dst[4]);
-  ASSERT_EQUAL_64(src_base, x16);
-  ASSERT_EQUAL_64(dst_base, x17);
-  ASSERT_EQUAL_64(dst_base + 16, x18);
-  ASSERT_EQUAL_64(src_base + 4, x19);
-  ASSERT_EQUAL_64(dst_base + 4, x20);
-  ASSERT_EQUAL_64(src_base + 8, x21);
-  ASSERT_EQUAL_64(dst_base + 24, x22);
+  CHECK_EQUAL_64(0x00112233, x0);
+  CHECK_EQUAL_64(0xccddeeff, x1);
+  CHECK_EQUAL_64(0x44556677, x2);
+  CHECK_EQUAL_64(0x00112233, x3);
+  CHECK_EQUAL_64(0xccddeeff00112233UL, dst[0]);
+  CHECK_EQUAL_64(0x0000000000112233UL, dst[1]);
+  CHECK_EQUAL_64(0x8899aabbccddeeffUL, x4);
+  CHECK_EQUAL_64(0xffeeddccbbaa9988UL, x5);
+  CHECK_EQUAL_64(0x0011223344556677UL, x6);
+  CHECK_EQUAL_64(0x8899aabbccddeeffUL, x7);
+  CHECK_EQUAL_64(0xffeeddccbbaa9988UL, dst[2]);
+  CHECK_EQUAL_64(0x8899aabbccddeeffUL, dst[3]);
+  CHECK_EQUAL_64(0x0011223344556677UL, dst[4]);
+  CHECK_EQUAL_64(src_base, x16);
+  CHECK_EQUAL_64(dst_base, x17);
+  CHECK_EQUAL_64(dst_base + 16, x18);
+  CHECK_EQUAL_64(src_base + 4, x19);
+  CHECK_EQUAL_64(dst_base + 4, x20);
+  CHECK_EQUAL_64(src_base + 8, x21);
+  CHECK_EQUAL_64(dst_base + 24, x22);
+
+  TEARDOWN();
+}
+
+
+TEST(ldp_stp_preindex_wide) {
+  INIT_V8();
+  SETUP();
+
+  uint64_t src[3] = {0x0011223344556677, 0x8899aabbccddeeff,
+                     0xffeeddccbbaa9988};
+  uint64_t dst[5] = {0, 0, 0, 0, 0};
+  uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+  uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
+  // Move base too far from the array to force multiple instructions
+  // to be emitted.
+  const int64_t base_offset = 1024;
+
+  START();
+  __ Mov(x24, src_base - base_offset);
+  __ Mov(x25, dst_base + base_offset);
+  __ Mov(x18, dst_base + base_offset + 16);
+  __ Ldp(w0, w1, MemOperand(x24, base_offset + 4, PreIndex));
+  __ Mov(x19, x24);
+  __ Mov(x24, src_base - base_offset + 4);
+  __ Ldp(w2, w3, MemOperand(x24, base_offset - 4, PreIndex));
+  __ Stp(w2, w3, MemOperand(x25, 4 - base_offset, PreIndex));
+  __ Mov(x20, x25);
+  __ Mov(x25, dst_base + base_offset + 4);
+  __ Mov(x24, src_base - base_offset);
+  __ Stp(w0, w1, MemOperand(x25, -4 - base_offset, PreIndex));
+  __ Ldp(x4, x5, MemOperand(x24, base_offset + 8, PreIndex));
+  __ Mov(x21, x24);
+  __ Mov(x24, src_base - base_offset + 8);
+  __ Ldp(x6, x7, MemOperand(x24, base_offset - 8, PreIndex));
+  __ Stp(x7, x6, MemOperand(x18, 8 - base_offset, PreIndex));
+  __ Mov(x22, x18);
+  __ Mov(x18, dst_base + base_offset + 16 + 8);
+  __ Stp(x5, x4, MemOperand(x18, -8 - base_offset, PreIndex));
+  END();
+
+  RUN();
+
+  CHECK_EQUAL_64(0x00112233, x0);
+  CHECK_EQUAL_64(0xccddeeff, x1);
+  CHECK_EQUAL_64(0x44556677, x2);
+  CHECK_EQUAL_64(0x00112233, x3);
+  CHECK_EQUAL_64(0xccddeeff00112233UL, dst[0]);
+  CHECK_EQUAL_64(0x0000000000112233UL, dst[1]);
+  CHECK_EQUAL_64(0x8899aabbccddeeffUL, x4);
+  CHECK_EQUAL_64(0xffeeddccbbaa9988UL, x5);
+  CHECK_EQUAL_64(0x0011223344556677UL, x6);
+  CHECK_EQUAL_64(0x8899aabbccddeeffUL, x7);
+  CHECK_EQUAL_64(0xffeeddccbbaa9988UL, dst[2]);
+  CHECK_EQUAL_64(0x8899aabbccddeeffUL, dst[3]);
+  CHECK_EQUAL_64(0x0011223344556677UL, dst[4]);
+  CHECK_EQUAL_64(src_base, x24);
+  CHECK_EQUAL_64(dst_base, x25);
+  CHECK_EQUAL_64(dst_base + 16, x18);
+  CHECK_EQUAL_64(src_base + 4, x19);
+  CHECK_EQUAL_64(dst_base + 4, x20);
+  CHECK_EQUAL_64(src_base + 8, x21);
+  CHECK_EQUAL_64(dst_base + 24, x22);
 
   TEARDOWN();
 }
@@ -3040,26 +3172,89 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(0x44556677, x0);
-  ASSERT_EQUAL_64(0x00112233, x1);
-  ASSERT_EQUAL_64(0x00112233, x2);
-  ASSERT_EQUAL_64(0xccddeeff, x3);
-  ASSERT_EQUAL_64(0x4455667700112233UL, dst[0]);
-  ASSERT_EQUAL_64(0x0000000000112233UL, dst[1]);
-  ASSERT_EQUAL_64(0x0011223344556677UL, x4);
-  ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x5);
-  ASSERT_EQUAL_64(0x8899aabbccddeeffUL, x6);
-  ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, x7);
-  ASSERT_EQUAL_64(0xffeeddccbbaa9988UL, dst[2]);
-  ASSERT_EQUAL_64(0x8899aabbccddeeffUL, dst[3]);
-  ASSERT_EQUAL_64(0x0011223344556677UL, dst[4]);
-  ASSERT_EQUAL_64(src_base, x16);
-  ASSERT_EQUAL_64(dst_base, x17);
-  ASSERT_EQUAL_64(dst_base + 16, x18);
-  ASSERT_EQUAL_64(src_base + 4, x19);
-  ASSERT_EQUAL_64(dst_base + 4, x20);
-  ASSERT_EQUAL_64(src_base + 8, x21);
-  ASSERT_EQUAL_64(dst_base + 24, x22);
+  CHECK_EQUAL_64(0x44556677, x0);
+  CHECK_EQUAL_64(0x00112233, x1);
+  CHECK_EQUAL_64(0x00112233, x2);
+  CHECK_EQUAL_64(0xccddeeff, x3);
+  CHECK_EQUAL_64(0x4455667700112233UL, dst[0]);
+  CHECK_EQUAL_64(0x0000000000112233UL, dst[1]);
+  CHECK_EQUAL_64(0x0011223344556677UL, x4);
+  CHECK_EQUAL_64(0x8899aabbccddeeffUL, x5);
+  CHECK_EQUAL_64(0x8899aabbccddeeffUL, x6);
+  CHECK_EQUAL_64(0xffeeddccbbaa9988UL, x7);
+  CHECK_EQUAL_64(0xffeeddccbbaa9988UL, dst[2]);
+  CHECK_EQUAL_64(0x8899aabbccddeeffUL, dst[3]);
+  CHECK_EQUAL_64(0x0011223344556677UL, dst[4]);
+  CHECK_EQUAL_64(src_base, x16);
+  CHECK_EQUAL_64(dst_base, x17);
+  CHECK_EQUAL_64(dst_base + 16, x18);
+  CHECK_EQUAL_64(src_base + 4, x19);
+  CHECK_EQUAL_64(dst_base + 4, x20);
+  CHECK_EQUAL_64(src_base + 8, x21);
+  CHECK_EQUAL_64(dst_base + 24, x22);
+
+  TEARDOWN();
+}
+
+
+TEST(ldp_stp_postindex_wide) {
+  INIT_V8();
+  SETUP();
+
+  uint64_t src[4] = {0x0011223344556677, 0x8899aabbccddeeff, 0xffeeddccbbaa9988,
+                     0x7766554433221100};
+  uint64_t dst[5] = {0, 0, 0, 0, 0};
+  uintptr_t src_base = reinterpret_cast<uintptr_t>(src);
+  uintptr_t dst_base = reinterpret_cast<uintptr_t>(dst);
+  // Move base too far from the array to force multiple instructions
+  // to be emitted.
+  const int64_t base_offset = 1024;
+
+  START();
+  __ Mov(x24, src_base);
+  __ Mov(x25, dst_base);
+  __ Mov(x18, dst_base + 16);
+  __ Ldp(w0, w1, MemOperand(x24, base_offset + 4, PostIndex));
+  __ Mov(x19, x24);
+  __ Sub(x24, x24, base_offset);
+  __ Ldp(w2, w3, MemOperand(x24, base_offset - 4, PostIndex));
+  __ Stp(w2, w3, MemOperand(x25, 4 - base_offset, PostIndex));
+  __ Mov(x20, x25);
+  __ Sub(x24, x24, base_offset);
+  __ Add(x25, x25, base_offset);
+  __ Stp(w0, w1, MemOperand(x25, -4 - base_offset, PostIndex));
+  __ Ldp(x4, x5, MemOperand(x24, base_offset + 8, PostIndex));
+  __ Mov(x21, x24);
+  __ Sub(x24, x24, base_offset);
+  __ Ldp(x6, x7, MemOperand(x24, base_offset - 8, PostIndex));
+  __ Stp(x7, x6, MemOperand(x18, 8 - base_offset, PostIndex));
+  __ Mov(x22, x18);
+  __ Add(x18, x18, base_offset);
+  __ Stp(x5, x4, MemOperand(x18, -8 - base_offset, PostIndex));
+  END();
+
+  RUN();
+
+  CHECK_EQUAL_64(0x44556677, x0);
+  CHECK_EQUAL_64(0x00112233, x1);
+  CHECK_EQUAL_64(0x00112233, x2);
+  CHECK_EQUAL_64(0xccddeeff, x3);
+  CHECK_EQUAL_64(0x4455667700112233UL, dst[0]);
+  CHECK_EQUAL_64(0x0000000000112233UL, dst[1]);
+  CHECK_EQUAL_64(0x0011223344556677UL, x4);
+  CHECK_EQUAL_64(0x8899aabbccddeeffUL, x5);
+  CHECK_EQUAL_64(0x8899aabbccddeeffUL, x6);
+  CHECK_EQUAL_64(0xffeeddccbbaa9988UL, x7);
+  CHECK_EQUAL_64(0xffeeddccbbaa9988UL, dst[2]);
+  CHECK_EQUAL_64(0x8899aabbccddeeffUL, dst[3]);
+  CHECK_EQUAL_64(0x0011223344556677UL, dst[4]);
+  CHECK_EQUAL_64(src_base + base_offset, x24);
+  CHECK_EQUAL_64(dst_base - base_offset, x25);
+  CHECK_EQUAL_64(dst_base - base_offset + 16, x18);
+  CHECK_EQUAL_64(src_base + base_offset + 4, x19);
+  CHECK_EQUAL_64(dst_base - base_offset + 4, x20);
+  CHECK_EQUAL_64(src_base + base_offset + 8, x21);
+  CHECK_EQUAL_64(dst_base - base_offset + 24, x22);
 
   TEARDOWN();
 }
@@ -3079,8 +3274,8 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(0xffffffff80000000UL, x0);
-  ASSERT_EQUAL_64(0x000000007fffffffUL, x1);
+  CHECK_EQUAL_64(0xffffffff80000000UL, x0);
+  CHECK_EQUAL_64(0x000000007fffffffUL, x1);
 
   TEARDOWN();
 }
@@ -3113,19 +3308,19 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(0x6789abcd, x0);
-  ASSERT_EQUAL_64(0x6789abcd0000L, dst[0]);
-  ASSERT_EQUAL_64(0xabcdef0123456789L, x1);
-  ASSERT_EQUAL_64(0xcdef012345678900L, dst[1]);
-  ASSERT_EQUAL_64(0x000000ab, dst[2]);
-  ASSERT_EQUAL_64(0xabcdef01, x2);
-  ASSERT_EQUAL_64(0x00abcdef01000000L, dst[3]);
-  ASSERT_EQUAL_64(0x00000001, x3);
-  ASSERT_EQUAL_64(0x0100000000000000L, dst[4]);
-  ASSERT_EQUAL_64(src_base, x17);
-  ASSERT_EQUAL_64(dst_base, x18);
-  ASSERT_EQUAL_64(src_base + 16, x19);
-  ASSERT_EQUAL_64(dst_base + 32, x20);
+  CHECK_EQUAL_64(0x6789abcd, x0);
+  CHECK_EQUAL_64(0x6789abcd0000L, dst[0]);
+  CHECK_EQUAL_64(0xabcdef0123456789L, x1);
+  CHECK_EQUAL_64(0xcdef012345678900L, dst[1]);
+  CHECK_EQUAL_64(0x000000ab, dst[2]);
+  CHECK_EQUAL_64(0xabcdef01, x2);
+  CHECK_EQUAL_64(0x00abcdef01000000L, dst[3]);
+  CHECK_EQUAL_64(0x00000001, x3);
+  CHECK_EQUAL_64(0x0100000000000000L, dst[4]);
+  CHECK_EQUAL_64(src_base, x17);
+  CHECK_EQUAL_64(dst_base, x18);
+  CHECK_EQUAL_64(src_base + 16, x19);
+  CHECK_EQUAL_64(dst_base + 32, x20);
 
   TEARDOWN();
 }
@@ -3146,10 +3341,10 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(0x1234567890abcdefUL, x2);
-  ASSERT_EQUAL_64(0xfedcba09, x3);
-  ASSERT_EQUAL_FP64(1.234, d13);
-  ASSERT_EQUAL_FP32(2.5, s25);
+  CHECK_EQUAL_64(0x1234567890abcdefUL, x2);
+  CHECK_EQUAL_64(0xfedcba09, x3);
+  CHECK_EQUAL_FP64(1.234, d13);
+  CHECK_EQUAL_FP32(2.5, s25);
 
   TEARDOWN();
 }
@@ -3158,7 +3353,7 @@
 static void LdrLiteralRangeHelper(ptrdiff_t range_,
                                   LiteralPoolEmitOption option,
                                   bool expect_dump) {
-  ASSERT(range_ > 0);
+  DCHECK(range_ > 0);
   SETUP_SIZE(range_ + 1024);
 
   Label label_1, label_2;
@@ -3177,19 +3372,19 @@
   START();
   // Force a pool dump so the pool starts off empty.
   __ EmitLiteralPool(JumpRequired);
-  ASSERT_LITERAL_POOL_SIZE(0);
+  DCHECK_LITERAL_POOL_SIZE(0);
 
   __ Ldr(x0, 0x1234567890abcdefUL);
   __ Ldr(w1, 0xfedcba09);
   __ Ldr(d0, 1.234);
   __ Ldr(s1, 2.5);
-  ASSERT_LITERAL_POOL_SIZE(4);
+  DCHECK_LITERAL_POOL_SIZE(4);
 
   code_size += 4 * sizeof(Instr);
 
   // Check that the requested range (allowing space for a branch over the pool)
   // can be handled by this test.
-  ASSERT((code_size + pool_guard_size) <= range);
+  DCHECK((code_size + pool_guard_size) <= range);
 
   // Emit NOPs up to 'range', leaving space for the pool guard.
   while ((code_size + pool_guard_size) < range) {
@@ -3203,41 +3398,41 @@
     code_size += sizeof(Instr);
   }
 
-  ASSERT(code_size == range);
-  ASSERT_LITERAL_POOL_SIZE(4);
+  DCHECK(code_size == range);
+  DCHECK_LITERAL_POOL_SIZE(4);
 
   // Possibly generate a literal pool.
   __ CheckLiteralPool(option);
   __ Bind(&label_1);
   if (expect_dump) {
-    ASSERT_LITERAL_POOL_SIZE(0);
+    DCHECK_LITERAL_POOL_SIZE(0);
   } else {
-    ASSERT_LITERAL_POOL_SIZE(4);
+    DCHECK_LITERAL_POOL_SIZE(4);
   }
 
   // Force a pool flush to check that a second pool functions correctly.
   __ EmitLiteralPool(JumpRequired);
-  ASSERT_LITERAL_POOL_SIZE(0);
+  DCHECK_LITERAL_POOL_SIZE(0);
 
   // These loads should be after the pool (and will require a new one).
   __ Ldr(x4, 0x34567890abcdef12UL);
   __ Ldr(w5, 0xdcba09fe);
   __ Ldr(d4, 123.4);
   __ Ldr(s5, 250.0);
-  ASSERT_LITERAL_POOL_SIZE(4);
+  DCHECK_LITERAL_POOL_SIZE(4);
   END();
 
   RUN();
 
   // Check that the literals loaded correctly.
-  ASSERT_EQUAL_64(0x1234567890abcdefUL, x0);
-  ASSERT_EQUAL_64(0xfedcba09, x1);
-  ASSERT_EQUAL_FP64(1.234, d0);
-  ASSERT_EQUAL_FP32(2.5, s1);
-  ASSERT_EQUAL_64(0x34567890abcdef12UL, x4);
-  ASSERT_EQUAL_64(0xdcba09fe, x5);
-  ASSERT_EQUAL_FP64(123.4, d4);
-  ASSERT_EQUAL_FP32(250.0, s5);
+  CHECK_EQUAL_64(0x1234567890abcdefUL, x0);
+  CHECK_EQUAL_64(0xfedcba09, x1);
+  CHECK_EQUAL_FP64(1.234, d0);
+  CHECK_EQUAL_FP32(2.5, s1);
+  CHECK_EQUAL_64(0x34567890abcdef12UL, x4);
+  CHECK_EQUAL_64(0xdcba09fe, x5);
+  CHECK_EQUAL_FP64(123.4, d4);
+  CHECK_EQUAL_FP32(250.0, s5);
 
   TEARDOWN();
 }
@@ -3324,25 +3519,25 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(0x123, x10);
-  ASSERT_EQUAL_64(0x123111, x11);
-  ASSERT_EQUAL_64(0xabc000, x12);
-  ASSERT_EQUAL_64(0x0, x13);
+  CHECK_EQUAL_64(0x123, x10);
+  CHECK_EQUAL_64(0x123111, x11);
+  CHECK_EQUAL_64(0xabc000, x12);
+  CHECK_EQUAL_64(0x0, x13);
 
-  ASSERT_EQUAL_32(0x123, w14);
-  ASSERT_EQUAL_32(0x123111, w15);
-  ASSERT_EQUAL_32(0xabc000, w16);
-  ASSERT_EQUAL_32(0x0, w17);
+  CHECK_EQUAL_32(0x123, w14);
+  CHECK_EQUAL_32(0x123111, w15);
+  CHECK_EQUAL_32(0xabc000, w16);
+  CHECK_EQUAL_32(0x0, w17);
 
-  ASSERT_EQUAL_64(0xffffffffffffffffL, x20);
-  ASSERT_EQUAL_64(0x1000, x21);
-  ASSERT_EQUAL_64(0x111, x22);
-  ASSERT_EQUAL_64(0x7fffffffffffffffL, x23);
+  CHECK_EQUAL_64(0xffffffffffffffffL, x20);
+  CHECK_EQUAL_64(0x1000, x21);
+  CHECK_EQUAL_64(0x111, x22);
+  CHECK_EQUAL_64(0x7fffffffffffffffL, x23);
 
-  ASSERT_EQUAL_32(0xffffffff, w24);
-  ASSERT_EQUAL_32(0x1000, w25);
-  ASSERT_EQUAL_32(0x111, w26);
-  ASSERT_EQUAL_32(0xffffffff, w27);
+  CHECK_EQUAL_32(0xffffffff, w24);
+  CHECK_EQUAL_32(0x1000, w25);
+  CHECK_EQUAL_32(0x111, w26);
+  CHECK_EQUAL_32(0xffffffff, w27);
 
   TEARDOWN();
 }
@@ -3362,22 +3557,26 @@
   __ Add(w12, w0, Operand(0x12345678));
   __ Add(w13, w1, Operand(0xffffffff));
 
-  __ Sub(x20, x0, Operand(0x1234567890abcdefUL));
+  __ Add(w18, w0, Operand(kWMinInt));
+  __ Sub(w19, w0, Operand(kWMinInt));
 
+  __ Sub(x20, x0, Operand(0x1234567890abcdefUL));
   __ Sub(w21, w0, Operand(0x12345678));
   END();
 
   RUN();
 
-  ASSERT_EQUAL_64(0x1234567890abcdefUL, x10);
-  ASSERT_EQUAL_64(0x100000000UL, x11);
+  CHECK_EQUAL_64(0x1234567890abcdefUL, x10);
+  CHECK_EQUAL_64(0x100000000UL, x11);
 
-  ASSERT_EQUAL_32(0x12345678, w12);
-  ASSERT_EQUAL_64(0x0, x13);
+  CHECK_EQUAL_32(0x12345678, w12);
+  CHECK_EQUAL_64(0x0, x13);
 
-  ASSERT_EQUAL_64(-0x1234567890abcdefUL, x20);
+  CHECK_EQUAL_32(kWMinInt, w18);
+  CHECK_EQUAL_32(kWMinInt, w19);
 
-  ASSERT_EQUAL_32(-0x12345678, w21);
+  CHECK_EQUAL_64(-0x1234567890abcdefUL, x20);
+  CHECK_EQUAL_32(-0x12345678, w21);
 
   TEARDOWN();
 }
@@ -3414,23 +3613,23 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(0xffffffffffffffffL, x10);
-  ASSERT_EQUAL_64(0x23456789abcdef00L, x11);
-  ASSERT_EQUAL_64(0x000123456789abcdL, x12);
-  ASSERT_EQUAL_64(0x000123456789abcdL, x13);
-  ASSERT_EQUAL_64(0xfffedcba98765432L, x14);
-  ASSERT_EQUAL_64(0xff89abcd, x15);
-  ASSERT_EQUAL_64(0xef89abcc, x18);
-  ASSERT_EQUAL_64(0xef0123456789abccL, x19);
+  CHECK_EQUAL_64(0xffffffffffffffffL, x10);
+  CHECK_EQUAL_64(0x23456789abcdef00L, x11);
+  CHECK_EQUAL_64(0x000123456789abcdL, x12);
+  CHECK_EQUAL_64(0x000123456789abcdL, x13);
+  CHECK_EQUAL_64(0xfffedcba98765432L, x14);
+  CHECK_EQUAL_64(0xff89abcd, x15);
+  CHECK_EQUAL_64(0xef89abcc, x18);
+  CHECK_EQUAL_64(0xef0123456789abccL, x19);
 
-  ASSERT_EQUAL_64(0x0123456789abcdefL, x20);
-  ASSERT_EQUAL_64(0xdcba9876543210ffL, x21);
-  ASSERT_EQUAL_64(0xfffedcba98765432L, x22);
-  ASSERT_EQUAL_64(0xfffedcba98765432L, x23);
-  ASSERT_EQUAL_64(0x000123456789abcdL, x24);
-  ASSERT_EQUAL_64(0x00765432, x25);
-  ASSERT_EQUAL_64(0x10765432, x26);
-  ASSERT_EQUAL_64(0x10fedcba98765432L, x27);
+  CHECK_EQUAL_64(0x0123456789abcdefL, x20);
+  CHECK_EQUAL_64(0xdcba9876543210ffL, x21);
+  CHECK_EQUAL_64(0xfffedcba98765432L, x22);
+  CHECK_EQUAL_64(0xfffedcba98765432L, x23);
+  CHECK_EQUAL_64(0x000123456789abcdL, x24);
+  CHECK_EQUAL_64(0x00765432, x25);
+  CHECK_EQUAL_64(0x10765432, x26);
+  CHECK_EQUAL_64(0x10fedcba98765432L, x27);
 
   TEARDOWN();
 }
@@ -3476,32 +3675,32 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(0xefL, x10);
-  ASSERT_EQUAL_64(0x1deL, x11);
-  ASSERT_EQUAL_64(0x337bcL, x12);
-  ASSERT_EQUAL_64(0x89abcdef0L, x13);
+  CHECK_EQUAL_64(0xefL, x10);
+  CHECK_EQUAL_64(0x1deL, x11);
+  CHECK_EQUAL_64(0x337bcL, x12);
+  CHECK_EQUAL_64(0x89abcdef0L, x13);
 
-  ASSERT_EQUAL_64(0xffffffffffffffefL, x14);
-  ASSERT_EQUAL_64(0xffffffffffffffdeL, x15);
-  ASSERT_EQUAL_64(0xffffffffffff37bcL, x16);
-  ASSERT_EQUAL_64(0xfffffffc4d5e6f78L, x17);
-  ASSERT_EQUAL_64(0x10L, x18);
-  ASSERT_EQUAL_64(0x20L, x19);
-  ASSERT_EQUAL_64(0xc840L, x20);
-  ASSERT_EQUAL_64(0x3b2a19080L, x21);
+  CHECK_EQUAL_64(0xffffffffffffffefL, x14);
+  CHECK_EQUAL_64(0xffffffffffffffdeL, x15);
+  CHECK_EQUAL_64(0xffffffffffff37bcL, x16);
+  CHECK_EQUAL_64(0xfffffffc4d5e6f78L, x17);
+  CHECK_EQUAL_64(0x10L, x18);
+  CHECK_EQUAL_64(0x20L, x19);
+  CHECK_EQUAL_64(0xc840L, x20);
+  CHECK_EQUAL_64(0x3b2a19080L, x21);
 
-  ASSERT_EQUAL_64(0x0123456789abce0fL, x22);
-  ASSERT_EQUAL_64(0x0123456789abcdcfL, x23);
+  CHECK_EQUAL_64(0x0123456789abce0fL, x22);
+  CHECK_EQUAL_64(0x0123456789abcdcfL, x23);
 
-  ASSERT_EQUAL_32(0x89abce2f, w24);
-  ASSERT_EQUAL_32(0xffffffef, w25);
-  ASSERT_EQUAL_32(0xffffffde, w26);
-  ASSERT_EQUAL_32(0xc3b2a188, w27);
+  CHECK_EQUAL_32(0x89abce2f, w24);
+  CHECK_EQUAL_32(0xffffffef, w25);
+  CHECK_EQUAL_32(0xffffffde, w26);
+  CHECK_EQUAL_32(0xc3b2a188, w27);
 
-  ASSERT_EQUAL_32(0x4d5e6f78, w28);
-  ASSERT_EQUAL_64(0xfffffffc4d5e6f78L, x29);
+  CHECK_EQUAL_32(0x4d5e6f78, w28);
+  CHECK_EQUAL_64(0xfffffffc4d5e6f78L, x29);
 
-  ASSERT_EQUAL_64(256, x30);
+  CHECK_EQUAL_64(256, x30);
 
   TEARDOWN();
 }
@@ -3535,19 +3734,19 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(-42, x10);
-  ASSERT_EQUAL_64(4000, x11);
-  ASSERT_EQUAL_64(0x1122334455667700, x12);
+  CHECK_EQUAL_64(-42, x10);
+  CHECK_EQUAL_64(4000, x11);
+  CHECK_EQUAL_64(0x1122334455667700, x12);
 
-  ASSERT_EQUAL_64(600, x13);
-  ASSERT_EQUAL_64(5000, x14);
-  ASSERT_EQUAL_64(0x1122334455667cdd, x15);
+  CHECK_EQUAL_64(600, x13);
+  CHECK_EQUAL_64(5000, x14);
+  CHECK_EQUAL_64(0x1122334455667cdd, x15);
 
-  ASSERT_EQUAL_32(0x11223000, w19);
-  ASSERT_EQUAL_32(398000, w20);
+  CHECK_EQUAL_32(0x11223000, w19);
+  CHECK_EQUAL_32(398000, w20);
 
-  ASSERT_EQUAL_32(0x11223400, w21);
-  ASSERT_EQUAL_32(402000, w22);
+  CHECK_EQUAL_32(0x11223400, w21);
+  CHECK_EQUAL_32(402000, w22);
 
   TEARDOWN();
 }
@@ -3583,9 +3782,9 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(0, x0);
-  ASSERT_EQUAL_64(0, x1);
-  ASSERT_EQUAL_64(0, x2);
+  CHECK_EQUAL_64(0, x0);
+  CHECK_EQUAL_64(0, x1);
+  CHECK_EQUAL_64(0, x2);
 
   TEARDOWN();
 }
@@ -3651,20 +3850,20 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(0xfffffffffffffeddUL, x1);
-  ASSERT_EQUAL_64(0xfffffedd, x2);
-  ASSERT_EQUAL_64(0x1db97530eca86422UL, x3);
-  ASSERT_EQUAL_64(0xd950c844, x4);
-  ASSERT_EQUAL_64(0xe1db97530eca8643UL, x5);
-  ASSERT_EQUAL_64(0xf7654322, x6);
-  ASSERT_EQUAL_64(0x0076e5d4c3b2a191UL, x7);
-  ASSERT_EQUAL_64(0x01d950c9, x8);
-  ASSERT_EQUAL_64(0xffffff11, x9);
-  ASSERT_EQUAL_64(0x0000000000000022UL, x10);
-  ASSERT_EQUAL_64(0xfffcc844, x11);
-  ASSERT_EQUAL_64(0x0000000000019088UL, x12);
-  ASSERT_EQUAL_64(0x65432110, x13);
-  ASSERT_EQUAL_64(0x0000000765432110UL, x14);
+  CHECK_EQUAL_64(0xfffffffffffffeddUL, x1);
+  CHECK_EQUAL_64(0xfffffedd, x2);
+  CHECK_EQUAL_64(0x1db97530eca86422UL, x3);
+  CHECK_EQUAL_64(0xd950c844, x4);
+  CHECK_EQUAL_64(0xe1db97530eca8643UL, x5);
+  CHECK_EQUAL_64(0xf7654322, x6);
+  CHECK_EQUAL_64(0x0076e5d4c3b2a191UL, x7);
+  CHECK_EQUAL_64(0x01d950c9, x8);
+  CHECK_EQUAL_64(0xffffff11, x9);
+  CHECK_EQUAL_64(0x0000000000000022UL, x10);
+  CHECK_EQUAL_64(0xfffcc844, x11);
+  CHECK_EQUAL_64(0x0000000000019088UL, x12);
+  CHECK_EQUAL_64(0x65432110, x13);
+  CHECK_EQUAL_64(0x0000000765432110UL, x14);
 
   TEARDOWN();
 }
@@ -3714,29 +3913,29 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(0xffffffffffffffffL, x5);
-  ASSERT_EQUAL_64(1L << 60, x6);
-  ASSERT_EQUAL_64(0xf0123456789abcddL, x7);
-  ASSERT_EQUAL_64(0x0111111111111110L, x8);
-  ASSERT_EQUAL_64(0x1222222222222221L, x9);
+  CHECK_EQUAL_64(0xffffffffffffffffL, x5);
+  CHECK_EQUAL_64(1L << 60, x6);
+  CHECK_EQUAL_64(0xf0123456789abcddL, x7);
+  CHECK_EQUAL_64(0x0111111111111110L, x8);
+  CHECK_EQUAL_64(0x1222222222222221L, x9);
 
-  ASSERT_EQUAL_32(0xffffffff, w10);
-  ASSERT_EQUAL_32(1 << 30, w11);
-  ASSERT_EQUAL_32(0xf89abcdd, w12);
-  ASSERT_EQUAL_32(0x91111110, w13);
-  ASSERT_EQUAL_32(0x9a222221, w14);
+  CHECK_EQUAL_32(0xffffffff, w10);
+  CHECK_EQUAL_32(1 << 30, w11);
+  CHECK_EQUAL_32(0xf89abcdd, w12);
+  CHECK_EQUAL_32(0x91111110, w13);
+  CHECK_EQUAL_32(0x9a222221, w14);
 
-  ASSERT_EQUAL_64(0xffffffffffffffffL + 1, x18);
-  ASSERT_EQUAL_64((1L << 60) + 1, x19);
-  ASSERT_EQUAL_64(0xf0123456789abcddL + 1, x20);
-  ASSERT_EQUAL_64(0x0111111111111110L + 1, x21);
-  ASSERT_EQUAL_64(0x1222222222222221L + 1, x22);
+  CHECK_EQUAL_64(0xffffffffffffffffL + 1, x18);
+  CHECK_EQUAL_64((1L << 60) + 1, x19);
+  CHECK_EQUAL_64(0xf0123456789abcddL + 1, x20);
+  CHECK_EQUAL_64(0x0111111111111110L + 1, x21);
+  CHECK_EQUAL_64(0x1222222222222221L + 1, x22);
 
-  ASSERT_EQUAL_32(0xffffffff + 1, w23);
-  ASSERT_EQUAL_32((1 << 30) + 1, w24);
-  ASSERT_EQUAL_32(0xf89abcdd + 1, w25);
-  ASSERT_EQUAL_32(0x91111110 + 1, w26);
-  ASSERT_EQUAL_32(0x9a222221 + 1, w27);
+  CHECK_EQUAL_32(0xffffffff + 1, w23);
+  CHECK_EQUAL_32((1 << 30) + 1, w24);
+  CHECK_EQUAL_32(0xf89abcdd + 1, w25);
+  CHECK_EQUAL_32(0x91111110 + 1, w26);
+  CHECK_EQUAL_32(0x9a222221 + 1, w27);
 
   // Check that adc correctly sets the condition flags.
   START();
@@ -3749,8 +3948,8 @@
 
   RUN();
 
-  ASSERT_EQUAL_NZCV(ZCFlag);
-  ASSERT_EQUAL_64(0, x10);
+  CHECK_EQUAL_NZCV(ZCFlag);
+  CHECK_EQUAL_64(0, x10);
 
   START();
   __ Mov(x0, 1);
@@ -3762,8 +3961,8 @@
 
   RUN();
 
-  ASSERT_EQUAL_NZCV(ZCFlag);
-  ASSERT_EQUAL_64(0, x10);
+  CHECK_EQUAL_NZCV(ZCFlag);
+  CHECK_EQUAL_64(0, x10);
 
   START();
   __ Mov(x0, 0x10);
@@ -3775,8 +3974,8 @@
 
   RUN();
 
-  ASSERT_EQUAL_NZCV(NVFlag);
-  ASSERT_EQUAL_64(0x8000000000000000L, x10);
+  CHECK_EQUAL_NZCV(NVFlag);
+  CHECK_EQUAL_64(0x8000000000000000L, x10);
 
   // Check that sbc correctly sets the condition flags.
   START();
@@ -3789,8 +3988,8 @@
 
   RUN();
 
-  ASSERT_EQUAL_NZCV(ZFlag);
-  ASSERT_EQUAL_64(0, x10);
+  CHECK_EQUAL_NZCV(ZFlag);
+  CHECK_EQUAL_64(0, x10);
 
   START();
   __ Mov(x0, 1);
@@ -3802,8 +4001,8 @@
 
   RUN();
 
-  ASSERT_EQUAL_NZCV(NFlag);
-  ASSERT_EQUAL_64(0x8000000000000001L, x10);
+  CHECK_EQUAL_NZCV(NFlag);
+  CHECK_EQUAL_64(0x8000000000000001L, x10);
 
   START();
   __ Mov(x0, 0);
@@ -3814,8 +4013,8 @@
 
   RUN();
 
-  ASSERT_EQUAL_NZCV(ZFlag);
-  ASSERT_EQUAL_64(0, x10);
+  CHECK_EQUAL_NZCV(ZFlag);
+  CHECK_EQUAL_64(0, x10);
 
   START()
   __ Mov(w0, 0x7fffffff);
@@ -3826,8 +4025,8 @@
 
   RUN();
 
-  ASSERT_EQUAL_NZCV(NFlag);
-  ASSERT_EQUAL_64(0x80000000, x10);
+  CHECK_EQUAL_NZCV(NFlag);
+  CHECK_EQUAL_64(0x80000000, x10);
 
   START();
   // Clear the C flag.
@@ -3837,8 +4036,8 @@
 
   RUN();
 
-  ASSERT_EQUAL_NZCV(NFlag);
-  ASSERT_EQUAL_64(0x8000000000000000L, x10);
+  CHECK_EQUAL_NZCV(NFlag);
+  CHECK_EQUAL_64(0x8000000000000000L, x10);
 
   START()
   __ Mov(x0, 0);
@@ -3849,8 +4048,8 @@
 
   RUN();
 
-  ASSERT_EQUAL_NZCV(NFlag);
-  ASSERT_EQUAL_64(0xffffffffffffffffL, x10);
+  CHECK_EQUAL_NZCV(NFlag);
+  CHECK_EQUAL_64(0xffffffffffffffffL, x10);
 
   START()
   __ Mov(x0, 0);
@@ -3861,8 +4060,8 @@
 
   RUN();
 
-  ASSERT_EQUAL_NZCV(NFlag);
-  ASSERT_EQUAL_64(0x8000000000000001L, x10);
+  CHECK_EQUAL_NZCV(NFlag);
+  CHECK_EQUAL_64(0x8000000000000001L, x10);
 
   TEARDOWN();
 }
@@ -3904,23 +4103,23 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(0x1df, x10);
-  ASSERT_EQUAL_64(0xffffffffffff37bdL, x11);
-  ASSERT_EQUAL_64(0xfffffff765432110L, x12);
-  ASSERT_EQUAL_64(0x123456789abcdef1L, x13);
+  CHECK_EQUAL_64(0x1df, x10);
+  CHECK_EQUAL_64(0xffffffffffff37bdL, x11);
+  CHECK_EQUAL_64(0xfffffff765432110L, x12);
+  CHECK_EQUAL_64(0x123456789abcdef1L, x13);
 
-  ASSERT_EQUAL_32(0x1df, w14);
-  ASSERT_EQUAL_32(0xffff37bd, w15);
-  ASSERT_EQUAL_32(0x9abcdef1, w9);
+  CHECK_EQUAL_32(0x1df, w14);
+  CHECK_EQUAL_32(0xffff37bd, w15);
+  CHECK_EQUAL_32(0x9abcdef1, w9);
 
-  ASSERT_EQUAL_64(0x1df + 1, x20);
-  ASSERT_EQUAL_64(0xffffffffffff37bdL + 1, x21);
-  ASSERT_EQUAL_64(0xfffffff765432110L + 1, x22);
-  ASSERT_EQUAL_64(0x123456789abcdef1L + 1, x23);
+  CHECK_EQUAL_64(0x1df + 1, x20);
+  CHECK_EQUAL_64(0xffffffffffff37bdL + 1, x21);
+  CHECK_EQUAL_64(0xfffffff765432110L + 1, x22);
+  CHECK_EQUAL_64(0x123456789abcdef1L + 1, x23);
 
-  ASSERT_EQUAL_32(0x1df + 1, w24);
-  ASSERT_EQUAL_32(0xffff37bd + 1, w25);
-  ASSERT_EQUAL_32(0x9abcdef1 + 1, w26);
+  CHECK_EQUAL_32(0x1df + 1, w24);
+  CHECK_EQUAL_32(0xffff37bd + 1, w25);
+  CHECK_EQUAL_32(0x9abcdef1 + 1, w26);
 
   // Check that adc correctly sets the condition flags.
   START();
@@ -3933,7 +4132,7 @@
 
   RUN();
 
-  ASSERT_EQUAL_NZCV(CFlag);
+  CHECK_EQUAL_NZCV(CFlag);
 
   START();
   __ Mov(x0, 0x7fffffffffffffffL);
@@ -3945,7 +4144,7 @@
 
   RUN();
 
-  ASSERT_EQUAL_NZCV(NVFlag);
+  CHECK_EQUAL_NZCV(NVFlag);
 
   START();
   __ Mov(x0, 0x7fffffffffffffffL);
@@ -3956,7 +4155,7 @@
 
   RUN();
 
-  ASSERT_EQUAL_NZCV(NVFlag);
+  CHECK_EQUAL_NZCV(NVFlag);
 
   TEARDOWN();
 }
@@ -3992,19 +4191,19 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(0x1234567890abcdefUL, x7);
-  ASSERT_EQUAL_64(0xffffffff, x8);
-  ASSERT_EQUAL_64(0xedcba9876f543210UL, x9);
-  ASSERT_EQUAL_64(0, x10);
-  ASSERT_EQUAL_64(0xffffffff, x11);
-  ASSERT_EQUAL_64(0xffff, x12);
+  CHECK_EQUAL_64(0x1234567890abcdefUL, x7);
+  CHECK_EQUAL_64(0xffffffff, x8);
+  CHECK_EQUAL_64(0xedcba9876f543210UL, x9);
+  CHECK_EQUAL_64(0, x10);
+  CHECK_EQUAL_64(0xffffffff, x11);
+  CHECK_EQUAL_64(0xffff, x12);
 
-  ASSERT_EQUAL_64(0x1234567890abcdefUL + 1, x18);
-  ASSERT_EQUAL_64(0, x19);
-  ASSERT_EQUAL_64(0xedcba9876f543211UL, x20);
-  ASSERT_EQUAL_64(1, x21);
-  ASSERT_EQUAL_64(0x100000000UL, x22);
-  ASSERT_EQUAL_64(0x10000, x23);
+  CHECK_EQUAL_64(0x1234567890abcdefUL + 1, x18);
+  CHECK_EQUAL_64(0, x19);
+  CHECK_EQUAL_64(0xedcba9876f543211UL, x20);
+  CHECK_EQUAL_64(1, x21);
+  CHECK_EQUAL_64(0x100000000UL, x22);
+  CHECK_EQUAL_64(0x10000, x23);
 
   TEARDOWN();
 }
@@ -4030,11 +4229,11 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(0, x10);
-  ASSERT_EQUAL_64(-0x1111111111111111L, x11);
-  ASSERT_EQUAL_32(-0x11111111, w12);
-  ASSERT_EQUAL_64(-1L, x13);
-  ASSERT_EQUAL_32(0, w14);
+  CHECK_EQUAL_64(0, x10);
+  CHECK_EQUAL_64(-0x1111111111111111L, x11);
+  CHECK_EQUAL_32(-0x11111111, w12);
+  CHECK_EQUAL_64(-1L, x13);
+  CHECK_EQUAL_32(0, w14);
 
   START();
   __ Mov(x0, 0);
@@ -4043,7 +4242,7 @@
 
   RUN();
 
-  ASSERT_EQUAL_NZCV(ZCFlag);
+  CHECK_EQUAL_NZCV(ZCFlag);
 
   START();
   __ Mov(w0, 0);
@@ -4052,7 +4251,7 @@
 
   RUN();
 
-  ASSERT_EQUAL_NZCV(ZCFlag);
+  CHECK_EQUAL_NZCV(ZCFlag);
 
   START();
   __ Mov(x0, 0);
@@ -4062,7 +4261,7 @@
 
   RUN();
 
-  ASSERT_EQUAL_NZCV(NFlag);
+  CHECK_EQUAL_NZCV(NFlag);
 
   START();
   __ Mov(w0, 0);
@@ -4072,7 +4271,7 @@
 
   RUN();
 
-  ASSERT_EQUAL_NZCV(NFlag);
+  CHECK_EQUAL_NZCV(NFlag);
 
   START();
   __ Mov(x1, 0x1111111111111111L);
@@ -4081,7 +4280,7 @@
 
   RUN();
 
-  ASSERT_EQUAL_NZCV(CFlag);
+  CHECK_EQUAL_NZCV(CFlag);
 
   START();
   __ Mov(w1, 0x11111111);
@@ -4090,7 +4289,7 @@
 
   RUN();
 
-  ASSERT_EQUAL_NZCV(CFlag);
+  CHECK_EQUAL_NZCV(CFlag);
 
   START();
   __ Mov(x0, 1);
@@ -4100,7 +4299,7 @@
 
   RUN();
 
-  ASSERT_EQUAL_NZCV(NVFlag);
+  CHECK_EQUAL_NZCV(NVFlag);
 
   START();
   __ Mov(w0, 1);
@@ -4110,7 +4309,7 @@
 
   RUN();
 
-  ASSERT_EQUAL_NZCV(NVFlag);
+  CHECK_EQUAL_NZCV(NVFlag);
 
   START();
   __ Mov(x0, 1);
@@ -4120,7 +4319,7 @@
 
   RUN();
 
-  ASSERT_EQUAL_NZCV(ZCFlag);
+  CHECK_EQUAL_NZCV(ZCFlag);
 
   START();
   __ Mov(w0, 1);
@@ -4130,7 +4329,7 @@
 
   RUN();
 
-  ASSERT_EQUAL_NZCV(ZCFlag);
+  CHECK_EQUAL_NZCV(ZCFlag);
 
   START();
   __ Mov(w0, 0);
@@ -4142,7 +4341,7 @@
 
   RUN();
 
-  ASSERT_EQUAL_NZCV(NFlag);
+  CHECK_EQUAL_NZCV(NFlag);
 
   START();
   __ Mov(w0, 0);
@@ -4154,7 +4353,7 @@
 
   RUN();
 
-  ASSERT_EQUAL_NZCV(ZCFlag);
+  CHECK_EQUAL_NZCV(ZCFlag);
 
   TEARDOWN();
 }
@@ -4203,14 +4402,14 @@
 
   RUN();
 
-  ASSERT_EQUAL_32(ZCFlag, w0);
-  ASSERT_EQUAL_32(ZCFlag, w1);
-  ASSERT_EQUAL_32(ZCFlag, w2);
-  ASSERT_EQUAL_32(ZCFlag, w3);
-  ASSERT_EQUAL_32(ZCFlag, w4);
-  ASSERT_EQUAL_32(ZCFlag, w5);
-  ASSERT_EQUAL_32(ZCFlag, w6);
-  ASSERT_EQUAL_32(ZCFlag, w7);
+  CHECK_EQUAL_32(ZCFlag, w0);
+  CHECK_EQUAL_32(ZCFlag, w1);
+  CHECK_EQUAL_32(ZCFlag, w2);
+  CHECK_EQUAL_32(ZCFlag, w3);
+  CHECK_EQUAL_32(ZCFlag, w4);
+  CHECK_EQUAL_32(ZCFlag, w5);
+  CHECK_EQUAL_32(ZCFlag, w6);
+  CHECK_EQUAL_32(ZCFlag, w7);
 
   TEARDOWN();
 }
@@ -4256,14 +4455,14 @@
 
   RUN();
 
-  ASSERT_EQUAL_32(ZCFlag, w0);
-  ASSERT_EQUAL_32(ZCFlag, w1);
-  ASSERT_EQUAL_32(ZCFlag, w2);
-  ASSERT_EQUAL_32(NCFlag, w3);
-  ASSERT_EQUAL_32(NCFlag, w4);
-  ASSERT_EQUAL_32(ZCFlag, w5);
-  ASSERT_EQUAL_32(NCFlag, w6);
-  ASSERT_EQUAL_32(ZCFlag, w7);
+  CHECK_EQUAL_32(ZCFlag, w0);
+  CHECK_EQUAL_32(ZCFlag, w1);
+  CHECK_EQUAL_32(ZCFlag, w2);
+  CHECK_EQUAL_32(NCFlag, w3);
+  CHECK_EQUAL_32(NCFlag, w4);
+  CHECK_EQUAL_32(ZCFlag, w5);
+  CHECK_EQUAL_32(NCFlag, w6);
+  CHECK_EQUAL_32(ZCFlag, w7);
 
   TEARDOWN();
 }
@@ -4302,12 +4501,12 @@
 
   RUN();
 
-  ASSERT_EQUAL_32(NFlag, w0);
-  ASSERT_EQUAL_32(NCFlag, w1);
-  ASSERT_EQUAL_32(NoFlag, w2);
-  ASSERT_EQUAL_32(NZCVFlag, w3);
-  ASSERT_EQUAL_32(ZCFlag, w4);
-  ASSERT_EQUAL_32(ZCFlag, w5);
+  CHECK_EQUAL_32(NFlag, w0);
+  CHECK_EQUAL_32(NCFlag, w1);
+  CHECK_EQUAL_32(NoFlag, w2);
+  CHECK_EQUAL_32(NZCVFlag, w3);
+  CHECK_EQUAL_32(ZCFlag, w4);
+  CHECK_EQUAL_32(ZCFlag, w5);
 
   TEARDOWN();
 }
@@ -4331,8 +4530,8 @@
 
   RUN();
 
-  ASSERT_EQUAL_32(NFlag, w0);
-  ASSERT_EQUAL_32(NoFlag, w1);
+  CHECK_EQUAL_32(NFlag, w0);
+  CHECK_EQUAL_32(NoFlag, w1);
 
   TEARDOWN();
 }
@@ -4372,11 +4571,11 @@
 
   RUN();
 
-  ASSERT_EQUAL_32(ZCFlag, w0);
-  ASSERT_EQUAL_32(ZCFlag, w1);
-  ASSERT_EQUAL_32(ZCFlag, w2);
-  ASSERT_EQUAL_32(NCFlag, w3);
-  ASSERT_EQUAL_32(NZCVFlag, w4);
+  CHECK_EQUAL_32(ZCFlag, w0);
+  CHECK_EQUAL_32(ZCFlag, w1);
+  CHECK_EQUAL_32(ZCFlag, w2);
+  CHECK_EQUAL_32(NCFlag, w3);
+  CHECK_EQUAL_32(NZCVFlag, w4);
 
   TEARDOWN();
 }
@@ -4426,27 +4625,27 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(0x0000000f, x0);
-  ASSERT_EQUAL_64(0x0000001f, x1);
-  ASSERT_EQUAL_64(0x00000020, x2);
-  ASSERT_EQUAL_64(0x0000000f, x3);
-  ASSERT_EQUAL_64(0xffffffe0ffffffe0UL, x4);
-  ASSERT_EQUAL_64(0x0000000f0000000fUL, x5);
-  ASSERT_EQUAL_64(0xffffffe0ffffffe1UL, x6);
-  ASSERT_EQUAL_64(0x0000000f0000000fUL, x7);
-  ASSERT_EQUAL_64(0x00000001, x8);
-  ASSERT_EQUAL_64(0xffffffff, x9);
-  ASSERT_EQUAL_64(0x0000001f00000020UL, x10);
-  ASSERT_EQUAL_64(0xfffffff0fffffff0UL, x11);
-  ASSERT_EQUAL_64(0xfffffff0fffffff1UL, x12);
-  ASSERT_EQUAL_64(0x0000000f, x13);
-  ASSERT_EQUAL_64(0x0000000f0000000fUL, x14);
-  ASSERT_EQUAL_64(0x0000000f, x15);
-  ASSERT_EQUAL_64(0x0000000f0000000fUL, x18);
-  ASSERT_EQUAL_64(0, x24);
-  ASSERT_EQUAL_64(0x0000001f0000001fUL, x25);
-  ASSERT_EQUAL_64(0x0000001f0000001fUL, x26);
-  ASSERT_EQUAL_64(0, x27);
+  CHECK_EQUAL_64(0x0000000f, x0);
+  CHECK_EQUAL_64(0x0000001f, x1);
+  CHECK_EQUAL_64(0x00000020, x2);
+  CHECK_EQUAL_64(0x0000000f, x3);
+  CHECK_EQUAL_64(0xffffffe0ffffffe0UL, x4);
+  CHECK_EQUAL_64(0x0000000f0000000fUL, x5);
+  CHECK_EQUAL_64(0xffffffe0ffffffe1UL, x6);
+  CHECK_EQUAL_64(0x0000000f0000000fUL, x7);
+  CHECK_EQUAL_64(0x00000001, x8);
+  CHECK_EQUAL_64(0xffffffff, x9);
+  CHECK_EQUAL_64(0x0000001f00000020UL, x10);
+  CHECK_EQUAL_64(0xfffffff0fffffff0UL, x11);
+  CHECK_EQUAL_64(0xfffffff0fffffff1UL, x12);
+  CHECK_EQUAL_64(0x0000000f, x13);
+  CHECK_EQUAL_64(0x0000000f0000000fUL, x14);
+  CHECK_EQUAL_64(0x0000000f, x15);
+  CHECK_EQUAL_64(0x0000000f0000000fUL, x18);
+  CHECK_EQUAL_64(0, x24);
+  CHECK_EQUAL_64(0x0000001f0000001fUL, x25);
+  CHECK_EQUAL_64(0x0000001f0000001fUL, x26);
+  CHECK_EQUAL_64(0, x27);
 
   TEARDOWN();
 }
@@ -4484,23 +4683,23 @@
 
   RUN();
 
-  ASSERT_EQUAL_32(-2, w0);
-  ASSERT_EQUAL_32(-1, w1);
-  ASSERT_EQUAL_32(0, w2);
-  ASSERT_EQUAL_32(1, w3);
-  ASSERT_EQUAL_32(2, w4);
-  ASSERT_EQUAL_32(-1, w5);
-  ASSERT_EQUAL_32(0x40000000, w6);
-  ASSERT_EQUAL_32(0x80000000, w7);
+  CHECK_EQUAL_32(-2, w0);
+  CHECK_EQUAL_32(-1, w1);
+  CHECK_EQUAL_32(0, w2);
+  CHECK_EQUAL_32(1, w3);
+  CHECK_EQUAL_32(2, w4);
+  CHECK_EQUAL_32(-1, w5);
+  CHECK_EQUAL_32(0x40000000, w6);
+  CHECK_EQUAL_32(0x80000000, w7);
 
-  ASSERT_EQUAL_64(-2, x8);
-  ASSERT_EQUAL_64(-1, x9);
-  ASSERT_EQUAL_64(0, x10);
-  ASSERT_EQUAL_64(1, x11);
-  ASSERT_EQUAL_64(2, x12);
-  ASSERT_EQUAL_64(-1, x13);
-  ASSERT_EQUAL_64(0x4000000000000000UL, x14);
-  ASSERT_EQUAL_64(0x8000000000000000UL, x15);
+  CHECK_EQUAL_64(-2, x8);
+  CHECK_EQUAL_64(-1, x9);
+  CHECK_EQUAL_64(0, x10);
+  CHECK_EQUAL_64(1, x11);
+  CHECK_EQUAL_64(2, x12);
+  CHECK_EQUAL_64(-1, x13);
+  CHECK_EQUAL_64(0x4000000000000000UL, x14);
+  CHECK_EQUAL_64(0x8000000000000000UL, x15);
 
   TEARDOWN();
 }
@@ -4541,19 +4740,19 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(value, x0);
-  ASSERT_EQUAL_64(value << (shift[0] & 63), x16);
-  ASSERT_EQUAL_64(value << (shift[1] & 63), x17);
-  ASSERT_EQUAL_64(value << (shift[2] & 63), x18);
-  ASSERT_EQUAL_64(value << (shift[3] & 63), x19);
-  ASSERT_EQUAL_64(value << (shift[4] & 63), x20);
-  ASSERT_EQUAL_64(value << (shift[5] & 63), x21);
-  ASSERT_EQUAL_32(value << (shift[0] & 31), w22);
-  ASSERT_EQUAL_32(value << (shift[1] & 31), w23);
-  ASSERT_EQUAL_32(value << (shift[2] & 31), w24);
-  ASSERT_EQUAL_32(value << (shift[3] & 31), w25);
-  ASSERT_EQUAL_32(value << (shift[4] & 31), w26);
-  ASSERT_EQUAL_32(value << (shift[5] & 31), w27);
+  CHECK_EQUAL_64(value, x0);
+  CHECK_EQUAL_64(value << (shift[0] & 63), x16);
+  CHECK_EQUAL_64(value << (shift[1] & 63), x17);
+  CHECK_EQUAL_64(value << (shift[2] & 63), x18);
+  CHECK_EQUAL_64(value << (shift[3] & 63), x19);
+  CHECK_EQUAL_64(value << (shift[4] & 63), x20);
+  CHECK_EQUAL_64(value << (shift[5] & 63), x21);
+  CHECK_EQUAL_32(value << (shift[0] & 31), w22);
+  CHECK_EQUAL_32(value << (shift[1] & 31), w23);
+  CHECK_EQUAL_32(value << (shift[2] & 31), w24);
+  CHECK_EQUAL_32(value << (shift[3] & 31), w25);
+  CHECK_EQUAL_32(value << (shift[4] & 31), w26);
+  CHECK_EQUAL_32(value << (shift[5] & 31), w27);
 
   TEARDOWN();
 }
@@ -4594,21 +4793,21 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(value, x0);
-  ASSERT_EQUAL_64(value >> (shift[0] & 63), x16);
-  ASSERT_EQUAL_64(value >> (shift[1] & 63), x17);
-  ASSERT_EQUAL_64(value >> (shift[2] & 63), x18);
-  ASSERT_EQUAL_64(value >> (shift[3] & 63), x19);
-  ASSERT_EQUAL_64(value >> (shift[4] & 63), x20);
-  ASSERT_EQUAL_64(value >> (shift[5] & 63), x21);
+  CHECK_EQUAL_64(value, x0);
+  CHECK_EQUAL_64(value >> (shift[0] & 63), x16);
+  CHECK_EQUAL_64(value >> (shift[1] & 63), x17);
+  CHECK_EQUAL_64(value >> (shift[2] & 63), x18);
+  CHECK_EQUAL_64(value >> (shift[3] & 63), x19);
+  CHECK_EQUAL_64(value >> (shift[4] & 63), x20);
+  CHECK_EQUAL_64(value >> (shift[5] & 63), x21);
 
   value &= 0xffffffffUL;
-  ASSERT_EQUAL_32(value >> (shift[0] & 31), w22);
-  ASSERT_EQUAL_32(value >> (shift[1] & 31), w23);
-  ASSERT_EQUAL_32(value >> (shift[2] & 31), w24);
-  ASSERT_EQUAL_32(value >> (shift[3] & 31), w25);
-  ASSERT_EQUAL_32(value >> (shift[4] & 31), w26);
-  ASSERT_EQUAL_32(value >> (shift[5] & 31), w27);
+  CHECK_EQUAL_32(value >> (shift[0] & 31), w22);
+  CHECK_EQUAL_32(value >> (shift[1] & 31), w23);
+  CHECK_EQUAL_32(value >> (shift[2] & 31), w24);
+  CHECK_EQUAL_32(value >> (shift[3] & 31), w25);
+  CHECK_EQUAL_32(value >> (shift[4] & 31), w26);
+  CHECK_EQUAL_32(value >> (shift[5] & 31), w27);
 
   TEARDOWN();
 }
@@ -4649,21 +4848,21 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(value, x0);
-  ASSERT_EQUAL_64(value >> (shift[0] & 63), x16);
-  ASSERT_EQUAL_64(value >> (shift[1] & 63), x17);
-  ASSERT_EQUAL_64(value >> (shift[2] & 63), x18);
-  ASSERT_EQUAL_64(value >> (shift[3] & 63), x19);
-  ASSERT_EQUAL_64(value >> (shift[4] & 63), x20);
-  ASSERT_EQUAL_64(value >> (shift[5] & 63), x21);
+  CHECK_EQUAL_64(value, x0);
+  CHECK_EQUAL_64(value >> (shift[0] & 63), x16);
+  CHECK_EQUAL_64(value >> (shift[1] & 63), x17);
+  CHECK_EQUAL_64(value >> (shift[2] & 63), x18);
+  CHECK_EQUAL_64(value >> (shift[3] & 63), x19);
+  CHECK_EQUAL_64(value >> (shift[4] & 63), x20);
+  CHECK_EQUAL_64(value >> (shift[5] & 63), x21);
 
   int32_t value32 = static_cast<int32_t>(value & 0xffffffffUL);
-  ASSERT_EQUAL_32(value32 >> (shift[0] & 31), w22);
-  ASSERT_EQUAL_32(value32 >> (shift[1] & 31), w23);
-  ASSERT_EQUAL_32(value32 >> (shift[2] & 31), w24);
-  ASSERT_EQUAL_32(value32 >> (shift[3] & 31), w25);
-  ASSERT_EQUAL_32(value32 >> (shift[4] & 31), w26);
-  ASSERT_EQUAL_32(value32 >> (shift[5] & 31), w27);
+  CHECK_EQUAL_32(value32 >> (shift[0] & 31), w22);
+  CHECK_EQUAL_32(value32 >> (shift[1] & 31), w23);
+  CHECK_EQUAL_32(value32 >> (shift[2] & 31), w24);
+  CHECK_EQUAL_32(value32 >> (shift[3] & 31), w25);
+  CHECK_EQUAL_32(value32 >> (shift[4] & 31), w26);
+  CHECK_EQUAL_32(value32 >> (shift[5] & 31), w27);
 
   TEARDOWN();
 }
@@ -4704,19 +4903,19 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(value, x0);
-  ASSERT_EQUAL_64(0xf0123456789abcdeUL, x16);
-  ASSERT_EQUAL_64(0xef0123456789abcdUL, x17);
-  ASSERT_EQUAL_64(0xdef0123456789abcUL, x18);
-  ASSERT_EQUAL_64(0xcdef0123456789abUL, x19);
-  ASSERT_EQUAL_64(0xabcdef0123456789UL, x20);
-  ASSERT_EQUAL_64(0x789abcdef0123456UL, x21);
-  ASSERT_EQUAL_32(0xf89abcde, w22);
-  ASSERT_EQUAL_32(0xef89abcd, w23);
-  ASSERT_EQUAL_32(0xdef89abc, w24);
-  ASSERT_EQUAL_32(0xcdef89ab, w25);
-  ASSERT_EQUAL_32(0xabcdef89, w26);
-  ASSERT_EQUAL_32(0xf89abcde, w27);
+  CHECK_EQUAL_64(value, x0);
+  CHECK_EQUAL_64(0xf0123456789abcdeUL, x16);
+  CHECK_EQUAL_64(0xef0123456789abcdUL, x17);
+  CHECK_EQUAL_64(0xdef0123456789abcUL, x18);
+  CHECK_EQUAL_64(0xcdef0123456789abUL, x19);
+  CHECK_EQUAL_64(0xabcdef0123456789UL, x20);
+  CHECK_EQUAL_64(0x789abcdef0123456UL, x21);
+  CHECK_EQUAL_32(0xf89abcde, w22);
+  CHECK_EQUAL_32(0xef89abcd, w23);
+  CHECK_EQUAL_32(0xdef89abc, w24);
+  CHECK_EQUAL_32(0xcdef89ab, w25);
+  CHECK_EQUAL_32(0xabcdef89, w26);
+  CHECK_EQUAL_32(0xf89abcde, w27);
 
   TEARDOWN();
 }
@@ -4750,14 +4949,14 @@
   RUN();
 
 
-  ASSERT_EQUAL_64(0x88888888888889abL, x10);
-  ASSERT_EQUAL_64(0x8888cdef88888888L, x11);
+  CHECK_EQUAL_64(0x88888888888889abL, x10);
+  CHECK_EQUAL_64(0x8888cdef88888888L, x11);
 
-  ASSERT_EQUAL_32(0x888888ab, w20);
-  ASSERT_EQUAL_32(0x88cdef88, w21);
+  CHECK_EQUAL_32(0x888888ab, w20);
+  CHECK_EQUAL_32(0x88cdef88, w21);
 
-  ASSERT_EQUAL_64(0x8888888888ef8888L, x12);
-  ASSERT_EQUAL_64(0x88888888888888abL, x13);
+  CHECK_EQUAL_64(0x8888888888ef8888L, x12);
+  CHECK_EQUAL_64(0x88888888888888abL, x13);
 
   TEARDOWN();
 }
@@ -4799,28 +4998,28 @@
   RUN();
 
 
-  ASSERT_EQUAL_64(0xffffffffffff89abL, x10);
-  ASSERT_EQUAL_64(0xffffcdef00000000L, x11);
-  ASSERT_EQUAL_64(0x4567L, x12);
-  ASSERT_EQUAL_64(0x789abcdef0000L, x13);
+  CHECK_EQUAL_64(0xffffffffffff89abL, x10);
+  CHECK_EQUAL_64(0xffffcdef00000000L, x11);
+  CHECK_EQUAL_64(0x4567L, x12);
+  CHECK_EQUAL_64(0x789abcdef0000L, x13);
 
-  ASSERT_EQUAL_32(0xffffffab, w14);
-  ASSERT_EQUAL_32(0xffcdef00, w15);
-  ASSERT_EQUAL_32(0x54, w16);
-  ASSERT_EQUAL_32(0x00321000, w17);
+  CHECK_EQUAL_32(0xffffffab, w14);
+  CHECK_EQUAL_32(0xffcdef00, w15);
+  CHECK_EQUAL_32(0x54, w16);
+  CHECK_EQUAL_32(0x00321000, w17);
 
-  ASSERT_EQUAL_64(0x01234567L, x18);
-  ASSERT_EQUAL_64(0xfffffffffedcba98L, x19);
-  ASSERT_EQUAL_64(0xffffffffffcdef00L, x20);
-  ASSERT_EQUAL_64(0x321000L, x21);
-  ASSERT_EQUAL_64(0xffffffffffffabcdL, x22);
-  ASSERT_EQUAL_64(0x5432L, x23);
-  ASSERT_EQUAL_64(0xffffffffffffffefL, x24);
-  ASSERT_EQUAL_64(0x10, x25);
-  ASSERT_EQUAL_64(0xffffffffffffcdefL, x26);
-  ASSERT_EQUAL_64(0x3210, x27);
-  ASSERT_EQUAL_64(0xffffffff89abcdefL, x28);
-  ASSERT_EQUAL_64(0x76543210, x29);
+  CHECK_EQUAL_64(0x01234567L, x18);
+  CHECK_EQUAL_64(0xfffffffffedcba98L, x19);
+  CHECK_EQUAL_64(0xffffffffffcdef00L, x20);
+  CHECK_EQUAL_64(0x321000L, x21);
+  CHECK_EQUAL_64(0xffffffffffffabcdL, x22);
+  CHECK_EQUAL_64(0x5432L, x23);
+  CHECK_EQUAL_64(0xffffffffffffffefL, x24);
+  CHECK_EQUAL_64(0x10, x25);
+  CHECK_EQUAL_64(0xffffffffffffcdefL, x26);
+  CHECK_EQUAL_64(0x3210, x27);
+  CHECK_EQUAL_64(0xffffffff89abcdefL, x28);
+  CHECK_EQUAL_64(0x76543210, x29);
 
   TEARDOWN();
 }
@@ -4860,24 +5059,24 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(0x00000000000089abL, x10);
-  ASSERT_EQUAL_64(0x0000cdef00000000L, x11);
-  ASSERT_EQUAL_64(0x4567L, x12);
-  ASSERT_EQUAL_64(0x789abcdef0000L, x13);
+  CHECK_EQUAL_64(0x00000000000089abL, x10);
+  CHECK_EQUAL_64(0x0000cdef00000000L, x11);
+  CHECK_EQUAL_64(0x4567L, x12);
+  CHECK_EQUAL_64(0x789abcdef0000L, x13);
 
-  ASSERT_EQUAL_32(0x000000ab, w25);
-  ASSERT_EQUAL_32(0x00cdef00, w26);
-  ASSERT_EQUAL_32(0x54, w27);
-  ASSERT_EQUAL_32(0x00321000, w28);
+  CHECK_EQUAL_32(0x000000ab, w25);
+  CHECK_EQUAL_32(0x00cdef00, w26);
+  CHECK_EQUAL_32(0x54, w27);
+  CHECK_EQUAL_32(0x00321000, w28);
 
-  ASSERT_EQUAL_64(0x8000000000000000L, x15);
-  ASSERT_EQUAL_64(0x0123456789abcdefL, x16);
-  ASSERT_EQUAL_64(0x01234567L, x17);
-  ASSERT_EQUAL_64(0xcdef00L, x18);
-  ASSERT_EQUAL_64(0xabcdL, x19);
-  ASSERT_EQUAL_64(0xefL, x20);
-  ASSERT_EQUAL_64(0xcdefL, x21);
-  ASSERT_EQUAL_64(0x89abcdefL, x22);
+  CHECK_EQUAL_64(0x8000000000000000L, x15);
+  CHECK_EQUAL_64(0x0123456789abcdefL, x16);
+  CHECK_EQUAL_64(0x01234567L, x17);
+  CHECK_EQUAL_64(0xcdef00L, x18);
+  CHECK_EQUAL_64(0xabcdL, x19);
+  CHECK_EQUAL_64(0xefL, x20);
+  CHECK_EQUAL_64(0xcdefL, x21);
+  CHECK_EQUAL_64(0x89abcdefL, x22);
 
   TEARDOWN();
 }
@@ -4906,16 +5105,16 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(0x76543210, x10);
-  ASSERT_EQUAL_64(0xfedcba9876543210L, x11);
-  ASSERT_EQUAL_64(0xbb2a1908, x12);
-  ASSERT_EQUAL_64(0x0048d159e26af37bUL, x13);
-  ASSERT_EQUAL_64(0x89abcdef, x20);
-  ASSERT_EQUAL_64(0x0123456789abcdefL, x21);
-  ASSERT_EQUAL_64(0x19083b2a, x22);
-  ASSERT_EQUAL_64(0x13579bdf, x23);
-  ASSERT_EQUAL_64(0x7f6e5d4c3b2a1908UL, x24);
-  ASSERT_EQUAL_64(0x02468acf13579bdeUL, x25);
+  CHECK_EQUAL_64(0x76543210, x10);
+  CHECK_EQUAL_64(0xfedcba9876543210L, x11);
+  CHECK_EQUAL_64(0xbb2a1908, x12);
+  CHECK_EQUAL_64(0x0048d159e26af37bUL, x13);
+  CHECK_EQUAL_64(0x89abcdef, x20);
+  CHECK_EQUAL_64(0x0123456789abcdefL, x21);
+  CHECK_EQUAL_64(0x19083b2a, x22);
+  CHECK_EQUAL_64(0x13579bdf, x23);
+  CHECK_EQUAL_64(0x7f6e5d4c3b2a1908UL, x24);
+  CHECK_EQUAL_64(0x02468acf13579bdeUL, x25);
 
   TEARDOWN();
 }
@@ -4938,14 +5137,14 @@
 
   RUN();
 
-  ASSERT_EQUAL_FP32(1.0, s11);
-  ASSERT_EQUAL_FP64(-13.0, d22);
-  ASSERT_EQUAL_FP32(255.0, s1);
-  ASSERT_EQUAL_FP64(12.34567, d2);
-  ASSERT_EQUAL_FP32(0.0, s3);
-  ASSERT_EQUAL_FP64(0.0, d4);
-  ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s5);
-  ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d6);
+  CHECK_EQUAL_FP32(1.0, s11);
+  CHECK_EQUAL_FP64(-13.0, d22);
+  CHECK_EQUAL_FP32(255.0, s1);
+  CHECK_EQUAL_FP64(12.34567, d2);
+  CHECK_EQUAL_FP32(0.0, s3);
+  CHECK_EQUAL_FP64(0.0, d4);
+  CHECK_EQUAL_FP32(kFP32PositiveInfinity, s5);
+  CHECK_EQUAL_FP64(kFP64NegativeInfinity, d6);
 
   TEARDOWN();
 }
@@ -4970,13 +5169,13 @@
 
   RUN();
 
-  ASSERT_EQUAL_32(float_to_rawbits(1.0), w10);
-  ASSERT_EQUAL_FP32(1.0, s30);
-  ASSERT_EQUAL_FP32(1.0, s5);
-  ASSERT_EQUAL_64(double_to_rawbits(-13.0), x1);
-  ASSERT_EQUAL_FP64(-13.0, d2);
-  ASSERT_EQUAL_FP64(-13.0, d4);
-  ASSERT_EQUAL_FP32(rawbits_to_float(0x89abcdef), s6);
+  CHECK_EQUAL_32(float_to_rawbits(1.0), w10);
+  CHECK_EQUAL_FP32(1.0, s30);
+  CHECK_EQUAL_FP32(1.0, s5);
+  CHECK_EQUAL_64(double_to_rawbits(-13.0), x1);
+  CHECK_EQUAL_FP64(-13.0, d2);
+  CHECK_EQUAL_FP64(-13.0, d4);
+  CHECK_EQUAL_FP32(rawbits_to_float(0x89abcdef), s6);
 
   TEARDOWN();
 }
@@ -5020,20 +5219,20 @@
 
   RUN();
 
-  ASSERT_EQUAL_FP32(4.25, s0);
-  ASSERT_EQUAL_FP32(1.0, s1);
-  ASSERT_EQUAL_FP32(1.0, s2);
-  ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s3);
-  ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s4);
-  ASSERT_EQUAL_FP32(kFP32DefaultNaN, s5);
-  ASSERT_EQUAL_FP32(kFP32DefaultNaN, s6);
-  ASSERT_EQUAL_FP64(0.25, d7);
-  ASSERT_EQUAL_FP64(2.25, d8);
-  ASSERT_EQUAL_FP64(2.25, d9);
-  ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d10);
-  ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d11);
-  ASSERT_EQUAL_FP64(kFP64DefaultNaN, d12);
-  ASSERT_EQUAL_FP64(kFP64DefaultNaN, d13);
+  CHECK_EQUAL_FP32(4.25, s0);
+  CHECK_EQUAL_FP32(1.0, s1);
+  CHECK_EQUAL_FP32(1.0, s2);
+  CHECK_EQUAL_FP32(kFP32PositiveInfinity, s3);
+  CHECK_EQUAL_FP32(kFP32NegativeInfinity, s4);
+  CHECK_EQUAL_FP32(kFP32DefaultNaN, s5);
+  CHECK_EQUAL_FP32(kFP32DefaultNaN, s6);
+  CHECK_EQUAL_FP64(0.25, d7);
+  CHECK_EQUAL_FP64(2.25, d8);
+  CHECK_EQUAL_FP64(2.25, d9);
+  CHECK_EQUAL_FP64(kFP64PositiveInfinity, d10);
+  CHECK_EQUAL_FP64(kFP64NegativeInfinity, d11);
+  CHECK_EQUAL_FP64(kFP64DefaultNaN, d12);
+  CHECK_EQUAL_FP64(kFP64DefaultNaN, d13);
 
   TEARDOWN();
 }
@@ -5077,20 +5276,20 @@
 
   RUN();
 
-  ASSERT_EQUAL_FP32(2.25, s0);
-  ASSERT_EQUAL_FP32(1.0, s1);
-  ASSERT_EQUAL_FP32(-1.0, s2);
-  ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s3);
-  ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s4);
-  ASSERT_EQUAL_FP32(kFP32DefaultNaN, s5);
-  ASSERT_EQUAL_FP32(kFP32DefaultNaN, s6);
-  ASSERT_EQUAL_FP64(-4.25, d7);
-  ASSERT_EQUAL_FP64(-2.25, d8);
-  ASSERT_EQUAL_FP64(-2.25, d9);
-  ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d10);
-  ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d11);
-  ASSERT_EQUAL_FP64(kFP64DefaultNaN, d12);
-  ASSERT_EQUAL_FP64(kFP64DefaultNaN, d13);
+  CHECK_EQUAL_FP32(2.25, s0);
+  CHECK_EQUAL_FP32(1.0, s1);
+  CHECK_EQUAL_FP32(-1.0, s2);
+  CHECK_EQUAL_FP32(kFP32NegativeInfinity, s3);
+  CHECK_EQUAL_FP32(kFP32PositiveInfinity, s4);
+  CHECK_EQUAL_FP32(kFP32DefaultNaN, s5);
+  CHECK_EQUAL_FP32(kFP32DefaultNaN, s6);
+  CHECK_EQUAL_FP64(-4.25, d7);
+  CHECK_EQUAL_FP64(-2.25, d8);
+  CHECK_EQUAL_FP64(-2.25, d9);
+  CHECK_EQUAL_FP64(kFP64NegativeInfinity, d10);
+  CHECK_EQUAL_FP64(kFP64PositiveInfinity, d11);
+  CHECK_EQUAL_FP64(kFP64DefaultNaN, d12);
+  CHECK_EQUAL_FP64(kFP64DefaultNaN, d13);
 
   TEARDOWN();
 }
@@ -5135,20 +5334,20 @@
 
   RUN();
 
-  ASSERT_EQUAL_FP32(6.5, s0);
-  ASSERT_EQUAL_FP32(0.0, s1);
-  ASSERT_EQUAL_FP32(0.0, s2);
-  ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s3);
-  ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s4);
-  ASSERT_EQUAL_FP32(kFP32DefaultNaN, s5);
-  ASSERT_EQUAL_FP32(kFP32DefaultNaN, s6);
-  ASSERT_EQUAL_FP64(-4.5, d7);
-  ASSERT_EQUAL_FP64(0.0, d8);
-  ASSERT_EQUAL_FP64(0.0, d9);
-  ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d10);
-  ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d11);
-  ASSERT_EQUAL_FP64(kFP64DefaultNaN, d12);
-  ASSERT_EQUAL_FP64(kFP64DefaultNaN, d13);
+  CHECK_EQUAL_FP32(6.5, s0);
+  CHECK_EQUAL_FP32(0.0, s1);
+  CHECK_EQUAL_FP32(0.0, s2);
+  CHECK_EQUAL_FP32(kFP32NegativeInfinity, s3);
+  CHECK_EQUAL_FP32(kFP32PositiveInfinity, s4);
+  CHECK_EQUAL_FP32(kFP32DefaultNaN, s5);
+  CHECK_EQUAL_FP32(kFP32DefaultNaN, s6);
+  CHECK_EQUAL_FP64(-4.5, d7);
+  CHECK_EQUAL_FP64(0.0, d8);
+  CHECK_EQUAL_FP64(0.0, d9);
+  CHECK_EQUAL_FP64(kFP64NegativeInfinity, d10);
+  CHECK_EQUAL_FP64(kFP64PositiveInfinity, d11);
+  CHECK_EQUAL_FP64(kFP64DefaultNaN, d12);
+  CHECK_EQUAL_FP64(kFP64DefaultNaN, d13);
 
   TEARDOWN();
 }
@@ -5171,10 +5370,10 @@
   END();
   RUN();
 
-  ASSERT_EQUAL_FP64(fmadd, d28);
-  ASSERT_EQUAL_FP64(fmsub, d29);
-  ASSERT_EQUAL_FP64(fnmadd, d30);
-  ASSERT_EQUAL_FP64(fnmsub, d31);
+  CHECK_EQUAL_FP64(fmadd, d28);
+  CHECK_EQUAL_FP64(fmsub, d29);
+  CHECK_EQUAL_FP64(fnmadd, d30);
+  CHECK_EQUAL_FP64(fnmsub, d31);
 
   TEARDOWN();
 }
@@ -5239,10 +5438,10 @@
   END();
   RUN();
 
-  ASSERT_EQUAL_FP32(fmadd, s28);
-  ASSERT_EQUAL_FP32(fmsub, s29);
-  ASSERT_EQUAL_FP32(fnmadd, s30);
-  ASSERT_EQUAL_FP32(fnmsub, s31);
+  CHECK_EQUAL_FP32(fmadd, s28);
+  CHECK_EQUAL_FP32(fmsub, s29);
+  CHECK_EQUAL_FP32(fnmadd, s30);
+  CHECK_EQUAL_FP32(fnmsub, s31);
 
   TEARDOWN();
 }
@@ -5298,12 +5497,12 @@
   double q1 = rawbits_to_double(0x7ffaaaaa11111111);
   double q2 = rawbits_to_double(0x7ffaaaaa22222222);
   double qa = rawbits_to_double(0x7ffaaaaaaaaaaaaa);
-  ASSERT(IsSignallingNaN(s1));
-  ASSERT(IsSignallingNaN(s2));
-  ASSERT(IsSignallingNaN(sa));
-  ASSERT(IsQuietNaN(q1));
-  ASSERT(IsQuietNaN(q2));
-  ASSERT(IsQuietNaN(qa));
+  DCHECK(IsSignallingNaN(s1));
+  DCHECK(IsSignallingNaN(s2));
+  DCHECK(IsSignallingNaN(sa));
+  DCHECK(IsQuietNaN(q1));
+  DCHECK(IsQuietNaN(q2));
+  DCHECK(IsQuietNaN(qa));
 
   // The input NaNs after passing through ProcessNaN.
   double s1_proc = rawbits_to_double(0x7ffd555511111111);
@@ -5312,22 +5511,22 @@
   double q1_proc = q1;
   double q2_proc = q2;
   double qa_proc = qa;
-  ASSERT(IsQuietNaN(s1_proc));
-  ASSERT(IsQuietNaN(s2_proc));
-  ASSERT(IsQuietNaN(sa_proc));
-  ASSERT(IsQuietNaN(q1_proc));
-  ASSERT(IsQuietNaN(q2_proc));
-  ASSERT(IsQuietNaN(qa_proc));
+  DCHECK(IsQuietNaN(s1_proc));
+  DCHECK(IsQuietNaN(s2_proc));
+  DCHECK(IsQuietNaN(sa_proc));
+  DCHECK(IsQuietNaN(q1_proc));
+  DCHECK(IsQuietNaN(q2_proc));
+  DCHECK(IsQuietNaN(qa_proc));
 
   // Negated NaNs as it would be done on ARMv8 hardware.
   double s1_proc_neg = rawbits_to_double(0xfffd555511111111);
   double sa_proc_neg = rawbits_to_double(0xfffd5555aaaaaaaa);
   double q1_proc_neg = rawbits_to_double(0xfffaaaaa11111111);
   double qa_proc_neg = rawbits_to_double(0xfffaaaaaaaaaaaaa);
-  ASSERT(IsQuietNaN(s1_proc_neg));
-  ASSERT(IsQuietNaN(sa_proc_neg));
-  ASSERT(IsQuietNaN(q1_proc_neg));
-  ASSERT(IsQuietNaN(qa_proc_neg));
+  DCHECK(IsQuietNaN(s1_proc_neg));
+  DCHECK(IsQuietNaN(sa_proc_neg));
+  DCHECK(IsQuietNaN(q1_proc_neg));
+  DCHECK(IsQuietNaN(qa_proc_neg));
 
   // Quiet NaNs are propagated.
   FmaddFmsubHelper(q1, 0, 0, q1_proc, q1_proc_neg, q1_proc_neg, q1_proc);
@@ -5381,12 +5580,12 @@
   float q1 = rawbits_to_float(0x7fea1111);
   float q2 = rawbits_to_float(0x7fea2222);
   float qa = rawbits_to_float(0x7feaaaaa);
-  ASSERT(IsSignallingNaN(s1));
-  ASSERT(IsSignallingNaN(s2));
-  ASSERT(IsSignallingNaN(sa));
-  ASSERT(IsQuietNaN(q1));
-  ASSERT(IsQuietNaN(q2));
-  ASSERT(IsQuietNaN(qa));
+  DCHECK(IsSignallingNaN(s1));
+  DCHECK(IsSignallingNaN(s2));
+  DCHECK(IsSignallingNaN(sa));
+  DCHECK(IsQuietNaN(q1));
+  DCHECK(IsQuietNaN(q2));
+  DCHECK(IsQuietNaN(qa));
 
   // The input NaNs after passing through ProcessNaN.
   float s1_proc = rawbits_to_float(0x7fd51111);
@@ -5395,22 +5594,22 @@
   float q1_proc = q1;
   float q2_proc = q2;
   float qa_proc = qa;
-  ASSERT(IsQuietNaN(s1_proc));
-  ASSERT(IsQuietNaN(s2_proc));
-  ASSERT(IsQuietNaN(sa_proc));
-  ASSERT(IsQuietNaN(q1_proc));
-  ASSERT(IsQuietNaN(q2_proc));
-  ASSERT(IsQuietNaN(qa_proc));
+  DCHECK(IsQuietNaN(s1_proc));
+  DCHECK(IsQuietNaN(s2_proc));
+  DCHECK(IsQuietNaN(sa_proc));
+  DCHECK(IsQuietNaN(q1_proc));
+  DCHECK(IsQuietNaN(q2_proc));
+  DCHECK(IsQuietNaN(qa_proc));
 
   // Negated NaNs as it would be done on ARMv8 hardware.
   float s1_proc_neg = rawbits_to_float(0xffd51111);
   float sa_proc_neg = rawbits_to_float(0xffd5aaaa);
   float q1_proc_neg = rawbits_to_float(0xffea1111);
   float qa_proc_neg = rawbits_to_float(0xffeaaaaa);
-  ASSERT(IsQuietNaN(s1_proc_neg));
-  ASSERT(IsQuietNaN(sa_proc_neg));
-  ASSERT(IsQuietNaN(q1_proc_neg));
-  ASSERT(IsQuietNaN(qa_proc_neg));
+  DCHECK(IsQuietNaN(s1_proc_neg));
+  DCHECK(IsQuietNaN(sa_proc_neg));
+  DCHECK(IsQuietNaN(q1_proc_neg));
+  DCHECK(IsQuietNaN(qa_proc_neg));
 
   // Quiet NaNs are propagated.
   FmaddFmsubHelper(q1, 0, 0, q1_proc, q1_proc_neg, q1_proc_neg, q1_proc);
@@ -5494,20 +5693,20 @@
 
   RUN();
 
-  ASSERT_EQUAL_FP32(1.625f, s0);
-  ASSERT_EQUAL_FP32(1.0f, s1);
-  ASSERT_EQUAL_FP32(-0.0f, s2);
-  ASSERT_EQUAL_FP32(0.0f, s3);
-  ASSERT_EQUAL_FP32(-0.0f, s4);
-  ASSERT_EQUAL_FP32(kFP32DefaultNaN, s5);
-  ASSERT_EQUAL_FP32(kFP32DefaultNaN, s6);
-  ASSERT_EQUAL_FP64(-1.125, d7);
-  ASSERT_EQUAL_FP64(0.0, d8);
-  ASSERT_EQUAL_FP64(-0.0, d9);
-  ASSERT_EQUAL_FP64(0.0, d10);
-  ASSERT_EQUAL_FP64(-0.0, d11);
-  ASSERT_EQUAL_FP64(kFP64DefaultNaN, d12);
-  ASSERT_EQUAL_FP64(kFP64DefaultNaN, d13);
+  CHECK_EQUAL_FP32(1.625f, s0);
+  CHECK_EQUAL_FP32(1.0f, s1);
+  CHECK_EQUAL_FP32(-0.0f, s2);
+  CHECK_EQUAL_FP32(0.0f, s3);
+  CHECK_EQUAL_FP32(-0.0f, s4);
+  CHECK_EQUAL_FP32(kFP32DefaultNaN, s5);
+  CHECK_EQUAL_FP32(kFP32DefaultNaN, s6);
+  CHECK_EQUAL_FP64(-1.125, d7);
+  CHECK_EQUAL_FP64(0.0, d8);
+  CHECK_EQUAL_FP64(-0.0, d9);
+  CHECK_EQUAL_FP64(0.0, d10);
+  CHECK_EQUAL_FP64(-0.0, d11);
+  CHECK_EQUAL_FP64(kFP64DefaultNaN, d12);
+  CHECK_EQUAL_FP64(kFP64DefaultNaN, d13);
 
   TEARDOWN();
 }
@@ -5610,10 +5809,10 @@
 
   RUN();
 
-  ASSERT_EQUAL_FP64(min, d28);
-  ASSERT_EQUAL_FP64(max, d29);
-  ASSERT_EQUAL_FP64(minnm, d30);
-  ASSERT_EQUAL_FP64(maxnm, d31);
+  CHECK_EQUAL_FP64(min, d28);
+  CHECK_EQUAL_FP64(max, d29);
+  CHECK_EQUAL_FP64(minnm, d30);
+  CHECK_EQUAL_FP64(maxnm, d31);
 
   TEARDOWN();
 }
@@ -5628,10 +5827,10 @@
   double snan_processed = rawbits_to_double(0x7ffd555512345678);
   double qnan_processed = qnan;
 
-  ASSERT(IsSignallingNaN(snan));
-  ASSERT(IsQuietNaN(qnan));
-  ASSERT(IsQuietNaN(snan_processed));
-  ASSERT(IsQuietNaN(qnan_processed));
+  DCHECK(IsSignallingNaN(snan));
+  DCHECK(IsQuietNaN(qnan));
+  DCHECK(IsQuietNaN(snan_processed));
+  DCHECK(IsQuietNaN(qnan_processed));
 
   // Bootstrap tests.
   FminFmaxDoubleHelper(0, 0, 0, 0, 0, 0);
@@ -5695,10 +5894,10 @@
 
   RUN();
 
-  ASSERT_EQUAL_FP32(min, s28);
-  ASSERT_EQUAL_FP32(max, s29);
-  ASSERT_EQUAL_FP32(minnm, s30);
-  ASSERT_EQUAL_FP32(maxnm, s31);
+  CHECK_EQUAL_FP32(min, s28);
+  CHECK_EQUAL_FP32(max, s29);
+  CHECK_EQUAL_FP32(minnm, s30);
+  CHECK_EQUAL_FP32(maxnm, s31);
 
   TEARDOWN();
 }
@@ -5713,10 +5912,10 @@
   float snan_processed = rawbits_to_float(0x7fd51234);
   float qnan_processed = qnan;
 
-  ASSERT(IsSignallingNaN(snan));
-  ASSERT(IsQuietNaN(qnan));
-  ASSERT(IsQuietNaN(snan_processed));
-  ASSERT(IsQuietNaN(qnan_processed));
+  DCHECK(IsSignallingNaN(snan));
+  DCHECK(IsQuietNaN(qnan));
+  DCHECK(IsQuietNaN(snan_processed));
+  DCHECK(IsQuietNaN(qnan_processed));
 
   // Bootstrap tests.
   FminFmaxFloatHelper(0, 0, 0, 0, 0, 0);
@@ -5818,16 +6017,16 @@
 
   RUN();
 
-  ASSERT_EQUAL_32(ZCFlag, w0);
-  ASSERT_EQUAL_32(VFlag, w1);
-  ASSERT_EQUAL_32(NFlag, w2);
-  ASSERT_EQUAL_32(CVFlag, w3);
-  ASSERT_EQUAL_32(ZCFlag, w4);
-  ASSERT_EQUAL_32(ZVFlag, w5);
-  ASSERT_EQUAL_32(CFlag, w6);
-  ASSERT_EQUAL_32(NFlag, w7);
-  ASSERT_EQUAL_32(ZCFlag, w8);
-  ASSERT_EQUAL_32(ZCFlag, w9);
+  CHECK_EQUAL_32(ZCFlag, w0);
+  CHECK_EQUAL_32(VFlag, w1);
+  CHECK_EQUAL_32(NFlag, w2);
+  CHECK_EQUAL_32(CVFlag, w3);
+  CHECK_EQUAL_32(ZCFlag, w4);
+  CHECK_EQUAL_32(ZVFlag, w5);
+  CHECK_EQUAL_32(CFlag, w6);
+  CHECK_EQUAL_32(NFlag, w7);
+  CHECK_EQUAL_32(ZCFlag, w8);
+  CHECK_EQUAL_32(ZCFlag, w9);
 
   TEARDOWN();
 }
@@ -5897,20 +6096,20 @@
 
   RUN();
 
-  ASSERT_EQUAL_32(ZCFlag, w0);
-  ASSERT_EQUAL_32(NFlag, w1);
-  ASSERT_EQUAL_32(CFlag, w2);
-  ASSERT_EQUAL_32(CVFlag, w3);
-  ASSERT_EQUAL_32(CVFlag, w4);
-  ASSERT_EQUAL_32(ZCFlag, w5);
-  ASSERT_EQUAL_32(NFlag, w6);
-  ASSERT_EQUAL_32(ZCFlag, w10);
-  ASSERT_EQUAL_32(NFlag, w11);
-  ASSERT_EQUAL_32(CFlag, w12);
-  ASSERT_EQUAL_32(CVFlag, w13);
-  ASSERT_EQUAL_32(CVFlag, w14);
-  ASSERT_EQUAL_32(ZCFlag, w15);
-  ASSERT_EQUAL_32(NFlag, w16);
+  CHECK_EQUAL_32(ZCFlag, w0);
+  CHECK_EQUAL_32(NFlag, w1);
+  CHECK_EQUAL_32(CFlag, w2);
+  CHECK_EQUAL_32(CVFlag, w3);
+  CHECK_EQUAL_32(CVFlag, w4);
+  CHECK_EQUAL_32(ZCFlag, w5);
+  CHECK_EQUAL_32(NFlag, w6);
+  CHECK_EQUAL_32(ZCFlag, w10);
+  CHECK_EQUAL_32(NFlag, w11);
+  CHECK_EQUAL_32(CFlag, w12);
+  CHECK_EQUAL_32(CVFlag, w13);
+  CHECK_EQUAL_32(CVFlag, w14);
+  CHECK_EQUAL_32(ZCFlag, w15);
+  CHECK_EQUAL_32(NFlag, w16);
 
   TEARDOWN();
 }
@@ -5938,12 +6137,12 @@
 
   RUN();
 
-  ASSERT_EQUAL_FP32(1.0, s0);
-  ASSERT_EQUAL_FP32(2.0, s1);
-  ASSERT_EQUAL_FP64(3.0, d2);
-  ASSERT_EQUAL_FP64(4.0, d3);
-  ASSERT_EQUAL_FP32(1.0, s4);
-  ASSERT_EQUAL_FP64(3.0, d5);
+  CHECK_EQUAL_FP32(1.0, s0);
+  CHECK_EQUAL_FP32(2.0, s1);
+  CHECK_EQUAL_FP64(3.0, d2);
+  CHECK_EQUAL_FP64(4.0, d3);
+  CHECK_EQUAL_FP32(1.0, s4);
+  CHECK_EQUAL_FP64(3.0, d5);
 
   TEARDOWN();
 }
@@ -5977,18 +6176,18 @@
 
   RUN();
 
-  ASSERT_EQUAL_FP32(-1.0, s0);
-  ASSERT_EQUAL_FP32(1.0, s1);
-  ASSERT_EQUAL_FP32(-0.0, s2);
-  ASSERT_EQUAL_FP32(0.0, s3);
-  ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s4);
-  ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s5);
-  ASSERT_EQUAL_FP64(-1.0, d6);
-  ASSERT_EQUAL_FP64(1.0, d7);
-  ASSERT_EQUAL_FP64(-0.0, d8);
-  ASSERT_EQUAL_FP64(0.0, d9);
-  ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d10);
-  ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d11);
+  CHECK_EQUAL_FP32(-1.0, s0);
+  CHECK_EQUAL_FP32(1.0, s1);
+  CHECK_EQUAL_FP32(-0.0, s2);
+  CHECK_EQUAL_FP32(0.0, s3);
+  CHECK_EQUAL_FP32(kFP32NegativeInfinity, s4);
+  CHECK_EQUAL_FP32(kFP32PositiveInfinity, s5);
+  CHECK_EQUAL_FP64(-1.0, d6);
+  CHECK_EQUAL_FP64(1.0, d7);
+  CHECK_EQUAL_FP64(-0.0, d8);
+  CHECK_EQUAL_FP64(0.0, d9);
+  CHECK_EQUAL_FP64(kFP64NegativeInfinity, d10);
+  CHECK_EQUAL_FP64(kFP64PositiveInfinity, d11);
 
   TEARDOWN();
 }
@@ -6018,14 +6217,14 @@
 
   RUN();
 
-  ASSERT_EQUAL_FP32(1.0, s0);
-  ASSERT_EQUAL_FP32(1.0, s1);
-  ASSERT_EQUAL_FP32(0.0, s2);
-  ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s3);
-  ASSERT_EQUAL_FP64(1.0, d4);
-  ASSERT_EQUAL_FP64(1.0, d5);
-  ASSERT_EQUAL_FP64(0.0, d6);
-  ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d7);
+  CHECK_EQUAL_FP32(1.0, s0);
+  CHECK_EQUAL_FP32(1.0, s1);
+  CHECK_EQUAL_FP32(0.0, s2);
+  CHECK_EQUAL_FP32(kFP32PositiveInfinity, s3);
+  CHECK_EQUAL_FP64(1.0, d4);
+  CHECK_EQUAL_FP64(1.0, d5);
+  CHECK_EQUAL_FP64(0.0, d6);
+  CHECK_EQUAL_FP64(kFP64PositiveInfinity, d7);
 
   TEARDOWN();
 }
@@ -6069,20 +6268,20 @@
 
   RUN();
 
-  ASSERT_EQUAL_FP32(0.0, s0);
-  ASSERT_EQUAL_FP32(1.0, s1);
-  ASSERT_EQUAL_FP32(0.5, s2);
-  ASSERT_EQUAL_FP32(256.0, s3);
-  ASSERT_EQUAL_FP32(-0.0, s4);
-  ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s5);
-  ASSERT_EQUAL_FP32(kFP32DefaultNaN, s6);
-  ASSERT_EQUAL_FP64(0.0, d7);
-  ASSERT_EQUAL_FP64(1.0, d8);
-  ASSERT_EQUAL_FP64(0.5, d9);
-  ASSERT_EQUAL_FP64(65536.0, d10);
-  ASSERT_EQUAL_FP64(-0.0, d11);
-  ASSERT_EQUAL_FP64(kFP32PositiveInfinity, d12);
-  ASSERT_EQUAL_FP64(kFP64DefaultNaN, d13);
+  CHECK_EQUAL_FP32(0.0, s0);
+  CHECK_EQUAL_FP32(1.0, s1);
+  CHECK_EQUAL_FP32(0.5, s2);
+  CHECK_EQUAL_FP32(256.0, s3);
+  CHECK_EQUAL_FP32(-0.0, s4);
+  CHECK_EQUAL_FP32(kFP32PositiveInfinity, s5);
+  CHECK_EQUAL_FP32(kFP32DefaultNaN, s6);
+  CHECK_EQUAL_FP64(0.0, d7);
+  CHECK_EQUAL_FP64(1.0, d8);
+  CHECK_EQUAL_FP64(0.5, d9);
+  CHECK_EQUAL_FP64(65536.0, d10);
+  CHECK_EQUAL_FP64(-0.0, d11);
+  CHECK_EQUAL_FP64(kFP32PositiveInfinity, d12);
+  CHECK_EQUAL_FP64(kFP64DefaultNaN, d13);
 
   TEARDOWN();
 }
@@ -6148,30 +6347,30 @@
 
   RUN();
 
-  ASSERT_EQUAL_FP32(1.0, s0);
-  ASSERT_EQUAL_FP32(1.0, s1);
-  ASSERT_EQUAL_FP32(2.0, s2);
-  ASSERT_EQUAL_FP32(2.0, s3);
-  ASSERT_EQUAL_FP32(3.0, s4);
-  ASSERT_EQUAL_FP32(-2.0, s5);
-  ASSERT_EQUAL_FP32(-3.0, s6);
-  ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s7);
-  ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s8);
-  ASSERT_EQUAL_FP32(0.0, s9);
-  ASSERT_EQUAL_FP32(-0.0, s10);
-  ASSERT_EQUAL_FP32(-0.0, s11);
-  ASSERT_EQUAL_FP64(1.0, d12);
-  ASSERT_EQUAL_FP64(1.0, d13);
-  ASSERT_EQUAL_FP64(2.0, d14);
-  ASSERT_EQUAL_FP64(2.0, d15);
-  ASSERT_EQUAL_FP64(3.0, d16);
-  ASSERT_EQUAL_FP64(-2.0, d17);
-  ASSERT_EQUAL_FP64(-3.0, d18);
-  ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d19);
-  ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d20);
-  ASSERT_EQUAL_FP64(0.0, d21);
-  ASSERT_EQUAL_FP64(-0.0, d22);
-  ASSERT_EQUAL_FP64(-0.0, d23);
+  CHECK_EQUAL_FP32(1.0, s0);
+  CHECK_EQUAL_FP32(1.0, s1);
+  CHECK_EQUAL_FP32(2.0, s2);
+  CHECK_EQUAL_FP32(2.0, s3);
+  CHECK_EQUAL_FP32(3.0, s4);
+  CHECK_EQUAL_FP32(-2.0, s5);
+  CHECK_EQUAL_FP32(-3.0, s6);
+  CHECK_EQUAL_FP32(kFP32PositiveInfinity, s7);
+  CHECK_EQUAL_FP32(kFP32NegativeInfinity, s8);
+  CHECK_EQUAL_FP32(0.0, s9);
+  CHECK_EQUAL_FP32(-0.0, s10);
+  CHECK_EQUAL_FP32(-0.0, s11);
+  CHECK_EQUAL_FP64(1.0, d12);
+  CHECK_EQUAL_FP64(1.0, d13);
+  CHECK_EQUAL_FP64(2.0, d14);
+  CHECK_EQUAL_FP64(2.0, d15);
+  CHECK_EQUAL_FP64(3.0, d16);
+  CHECK_EQUAL_FP64(-2.0, d17);
+  CHECK_EQUAL_FP64(-3.0, d18);
+  CHECK_EQUAL_FP64(kFP64PositiveInfinity, d19);
+  CHECK_EQUAL_FP64(kFP64NegativeInfinity, d20);
+  CHECK_EQUAL_FP64(0.0, d21);
+  CHECK_EQUAL_FP64(-0.0, d22);
+  CHECK_EQUAL_FP64(-0.0, d23);
 
   TEARDOWN();
 }
@@ -6237,30 +6436,30 @@
 
   RUN();
 
-  ASSERT_EQUAL_FP32(1.0, s0);
-  ASSERT_EQUAL_FP32(1.0, s1);
-  ASSERT_EQUAL_FP32(1.0, s2);
-  ASSERT_EQUAL_FP32(1.0, s3);
-  ASSERT_EQUAL_FP32(2.0, s4);
-  ASSERT_EQUAL_FP32(-2.0, s5);
-  ASSERT_EQUAL_FP32(-3.0, s6);
-  ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s7);
-  ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s8);
-  ASSERT_EQUAL_FP32(0.0, s9);
-  ASSERT_EQUAL_FP32(-0.0, s10);
-  ASSERT_EQUAL_FP32(-1.0, s11);
-  ASSERT_EQUAL_FP64(1.0, d12);
-  ASSERT_EQUAL_FP64(1.0, d13);
-  ASSERT_EQUAL_FP64(1.0, d14);
-  ASSERT_EQUAL_FP64(1.0, d15);
-  ASSERT_EQUAL_FP64(2.0, d16);
-  ASSERT_EQUAL_FP64(-2.0, d17);
-  ASSERT_EQUAL_FP64(-3.0, d18);
-  ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d19);
-  ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d20);
-  ASSERT_EQUAL_FP64(0.0, d21);
-  ASSERT_EQUAL_FP64(-0.0, d22);
-  ASSERT_EQUAL_FP64(-1.0, d23);
+  CHECK_EQUAL_FP32(1.0, s0);
+  CHECK_EQUAL_FP32(1.0, s1);
+  CHECK_EQUAL_FP32(1.0, s2);
+  CHECK_EQUAL_FP32(1.0, s3);
+  CHECK_EQUAL_FP32(2.0, s4);
+  CHECK_EQUAL_FP32(-2.0, s5);
+  CHECK_EQUAL_FP32(-3.0, s6);
+  CHECK_EQUAL_FP32(kFP32PositiveInfinity, s7);
+  CHECK_EQUAL_FP32(kFP32NegativeInfinity, s8);
+  CHECK_EQUAL_FP32(0.0, s9);
+  CHECK_EQUAL_FP32(-0.0, s10);
+  CHECK_EQUAL_FP32(-1.0, s11);
+  CHECK_EQUAL_FP64(1.0, d12);
+  CHECK_EQUAL_FP64(1.0, d13);
+  CHECK_EQUAL_FP64(1.0, d14);
+  CHECK_EQUAL_FP64(1.0, d15);
+  CHECK_EQUAL_FP64(2.0, d16);
+  CHECK_EQUAL_FP64(-2.0, d17);
+  CHECK_EQUAL_FP64(-3.0, d18);
+  CHECK_EQUAL_FP64(kFP64PositiveInfinity, d19);
+  CHECK_EQUAL_FP64(kFP64NegativeInfinity, d20);
+  CHECK_EQUAL_FP64(0.0, d21);
+  CHECK_EQUAL_FP64(-0.0, d22);
+  CHECK_EQUAL_FP64(-1.0, d23);
 
   TEARDOWN();
 }
@@ -6326,30 +6525,30 @@
 
   RUN();
 
-  ASSERT_EQUAL_FP32(1.0, s0);
-  ASSERT_EQUAL_FP32(1.0, s1);
-  ASSERT_EQUAL_FP32(2.0, s2);
-  ASSERT_EQUAL_FP32(2.0, s3);
-  ASSERT_EQUAL_FP32(2.0, s4);
-  ASSERT_EQUAL_FP32(-2.0, s5);
-  ASSERT_EQUAL_FP32(-2.0, s6);
-  ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s7);
-  ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s8);
-  ASSERT_EQUAL_FP32(0.0, s9);
-  ASSERT_EQUAL_FP32(-0.0, s10);
-  ASSERT_EQUAL_FP32(-0.0, s11);
-  ASSERT_EQUAL_FP64(1.0, d12);
-  ASSERT_EQUAL_FP64(1.0, d13);
-  ASSERT_EQUAL_FP64(2.0, d14);
-  ASSERT_EQUAL_FP64(2.0, d15);
-  ASSERT_EQUAL_FP64(2.0, d16);
-  ASSERT_EQUAL_FP64(-2.0, d17);
-  ASSERT_EQUAL_FP64(-2.0, d18);
-  ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d19);
-  ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d20);
-  ASSERT_EQUAL_FP64(0.0, d21);
-  ASSERT_EQUAL_FP64(-0.0, d22);
-  ASSERT_EQUAL_FP64(-0.0, d23);
+  CHECK_EQUAL_FP32(1.0, s0);
+  CHECK_EQUAL_FP32(1.0, s1);
+  CHECK_EQUAL_FP32(2.0, s2);
+  CHECK_EQUAL_FP32(2.0, s3);
+  CHECK_EQUAL_FP32(2.0, s4);
+  CHECK_EQUAL_FP32(-2.0, s5);
+  CHECK_EQUAL_FP32(-2.0, s6);
+  CHECK_EQUAL_FP32(kFP32PositiveInfinity, s7);
+  CHECK_EQUAL_FP32(kFP32NegativeInfinity, s8);
+  CHECK_EQUAL_FP32(0.0, s9);
+  CHECK_EQUAL_FP32(-0.0, s10);
+  CHECK_EQUAL_FP32(-0.0, s11);
+  CHECK_EQUAL_FP64(1.0, d12);
+  CHECK_EQUAL_FP64(1.0, d13);
+  CHECK_EQUAL_FP64(2.0, d14);
+  CHECK_EQUAL_FP64(2.0, d15);
+  CHECK_EQUAL_FP64(2.0, d16);
+  CHECK_EQUAL_FP64(-2.0, d17);
+  CHECK_EQUAL_FP64(-2.0, d18);
+  CHECK_EQUAL_FP64(kFP64PositiveInfinity, d19);
+  CHECK_EQUAL_FP64(kFP64NegativeInfinity, d20);
+  CHECK_EQUAL_FP64(0.0, d21);
+  CHECK_EQUAL_FP64(-0.0, d22);
+  CHECK_EQUAL_FP64(-0.0, d23);
 
   TEARDOWN();
 }
@@ -6411,28 +6610,28 @@
 
   RUN();
 
-  ASSERT_EQUAL_FP32(1.0, s0);
-  ASSERT_EQUAL_FP32(1.0, s1);
-  ASSERT_EQUAL_FP32(1.0, s2);
-  ASSERT_EQUAL_FP32(1.0, s3);
-  ASSERT_EQUAL_FP32(2.0, s4);
-  ASSERT_EQUAL_FP32(-1.0, s5);
-  ASSERT_EQUAL_FP32(-2.0, s6);
-  ASSERT_EQUAL_FP32(kFP32PositiveInfinity, s7);
-  ASSERT_EQUAL_FP32(kFP32NegativeInfinity, s8);
-  ASSERT_EQUAL_FP32(0.0, s9);
-  ASSERT_EQUAL_FP32(-0.0, s10);
-  ASSERT_EQUAL_FP64(1.0, d11);
-  ASSERT_EQUAL_FP64(1.0, d12);
-  ASSERT_EQUAL_FP64(1.0, d13);
-  ASSERT_EQUAL_FP64(1.0, d14);
-  ASSERT_EQUAL_FP64(2.0, d15);
-  ASSERT_EQUAL_FP64(-1.0, d16);
-  ASSERT_EQUAL_FP64(-2.0, d17);
-  ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d18);
-  ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d19);
-  ASSERT_EQUAL_FP64(0.0, d20);
-  ASSERT_EQUAL_FP64(-0.0, d21);
+  CHECK_EQUAL_FP32(1.0, s0);
+  CHECK_EQUAL_FP32(1.0, s1);
+  CHECK_EQUAL_FP32(1.0, s2);
+  CHECK_EQUAL_FP32(1.0, s3);
+  CHECK_EQUAL_FP32(2.0, s4);
+  CHECK_EQUAL_FP32(-1.0, s5);
+  CHECK_EQUAL_FP32(-2.0, s6);
+  CHECK_EQUAL_FP32(kFP32PositiveInfinity, s7);
+  CHECK_EQUAL_FP32(kFP32NegativeInfinity, s8);
+  CHECK_EQUAL_FP32(0.0, s9);
+  CHECK_EQUAL_FP32(-0.0, s10);
+  CHECK_EQUAL_FP64(1.0, d11);
+  CHECK_EQUAL_FP64(1.0, d12);
+  CHECK_EQUAL_FP64(1.0, d13);
+  CHECK_EQUAL_FP64(1.0, d14);
+  CHECK_EQUAL_FP64(2.0, d15);
+  CHECK_EQUAL_FP64(-1.0, d16);
+  CHECK_EQUAL_FP64(-2.0, d17);
+  CHECK_EQUAL_FP64(kFP64PositiveInfinity, d18);
+  CHECK_EQUAL_FP64(kFP64NegativeInfinity, d19);
+  CHECK_EQUAL_FP64(0.0, d20);
+  CHECK_EQUAL_FP64(-0.0, d21);
 
   TEARDOWN();
 }
@@ -6478,19 +6677,19 @@
 
   RUN();
 
-  ASSERT_EQUAL_FP64(1.0f, d0);
-  ASSERT_EQUAL_FP64(1.1f, d1);
-  ASSERT_EQUAL_FP64(1.5f, d2);
-  ASSERT_EQUAL_FP64(1.9f, d3);
-  ASSERT_EQUAL_FP64(2.5f, d4);
-  ASSERT_EQUAL_FP64(-1.5f, d5);
-  ASSERT_EQUAL_FP64(-2.5f, d6);
-  ASSERT_EQUAL_FP64(kFP64PositiveInfinity, d7);
-  ASSERT_EQUAL_FP64(kFP64NegativeInfinity, d8);
-  ASSERT_EQUAL_FP64(0.0f, d9);
-  ASSERT_EQUAL_FP64(-0.0f, d10);
-  ASSERT_EQUAL_FP64(FLT_MAX, d11);
-  ASSERT_EQUAL_FP64(FLT_MIN, d12);
+  CHECK_EQUAL_FP64(1.0f, d0);
+  CHECK_EQUAL_FP64(1.1f, d1);
+  CHECK_EQUAL_FP64(1.5f, d2);
+  CHECK_EQUAL_FP64(1.9f, d3);
+  CHECK_EQUAL_FP64(2.5f, d4);
+  CHECK_EQUAL_FP64(-1.5f, d5);
+  CHECK_EQUAL_FP64(-2.5f, d6);
+  CHECK_EQUAL_FP64(kFP64PositiveInfinity, d7);
+  CHECK_EQUAL_FP64(kFP64NegativeInfinity, d8);
+  CHECK_EQUAL_FP64(0.0f, d9);
+  CHECK_EQUAL_FP64(-0.0f, d10);
+  CHECK_EQUAL_FP64(FLT_MAX, d11);
+  CHECK_EQUAL_FP64(FLT_MIN, d12);
 
   // Check that the NaN payload is preserved according to ARM64 conversion
   // rules:
@@ -6498,8 +6697,8 @@
   //  - The top bit of the mantissa is forced to 1 (making it a quiet NaN).
   //  - The remaining mantissa bits are copied until they run out.
   //  - The low-order bits that haven't already been assigned are set to 0.
-  ASSERT_EQUAL_FP64(rawbits_to_double(0x7ff82468a0000000), d13);
-  ASSERT_EQUAL_FP64(rawbits_to_double(0x7ff82468a0000000), d14);
+  CHECK_EQUAL_FP64(rawbits_to_double(0x7ff82468a0000000), d13);
+  CHECK_EQUAL_FP64(rawbits_to_double(0x7ff82468a0000000), d14);
 
   TEARDOWN();
 }
@@ -6599,8 +6798,8 @@
     float expected = test[i].expected;
 
     // We only expect positive input.
-    ASSERT(std::signbit(in) == 0);
-    ASSERT(std::signbit(expected) == 0);
+    DCHECK(std::signbit(in) == 0);
+    DCHECK(std::signbit(expected) == 0);
 
     SETUP();
     START();
@@ -6613,8 +6812,8 @@
 
     END();
     RUN();
-    ASSERT_EQUAL_FP32(expected, s20);
-    ASSERT_EQUAL_FP32(-expected, s21);
+    CHECK_EQUAL_FP32(expected, s20);
+    CHECK_EQUAL_FP32(-expected, s21);
     TEARDOWN();
   }
 }
@@ -6690,36 +6889,36 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(1, x0);
-  ASSERT_EQUAL_64(1, x1);
-  ASSERT_EQUAL_64(3, x2);
-  ASSERT_EQUAL_64(0xfffffffd, x3);
-  ASSERT_EQUAL_64(0x7fffffff, x4);
-  ASSERT_EQUAL_64(0x80000000, x5);
-  ASSERT_EQUAL_64(0x7fffff80, x6);
-  ASSERT_EQUAL_64(0x80000080, x7);
-  ASSERT_EQUAL_64(1, x8);
-  ASSERT_EQUAL_64(1, x9);
-  ASSERT_EQUAL_64(3, x10);
-  ASSERT_EQUAL_64(0xfffffffd, x11);
-  ASSERT_EQUAL_64(0x7fffffff, x12);
-  ASSERT_EQUAL_64(0x80000000, x13);
-  ASSERT_EQUAL_64(0x7ffffffe, x14);
-  ASSERT_EQUAL_64(0x80000001, x15);
-  ASSERT_EQUAL_64(1, x17);
-  ASSERT_EQUAL_64(3, x18);
-  ASSERT_EQUAL_64(0xfffffffffffffffdUL, x19);
-  ASSERT_EQUAL_64(0x7fffffffffffffffUL, x20);
-  ASSERT_EQUAL_64(0x8000000000000000UL, x21);
-  ASSERT_EQUAL_64(0x7fffff8000000000UL, x22);
-  ASSERT_EQUAL_64(0x8000008000000000UL, x23);
-  ASSERT_EQUAL_64(1, x24);
-  ASSERT_EQUAL_64(3, x25);
-  ASSERT_EQUAL_64(0xfffffffffffffffdUL, x26);
-  ASSERT_EQUAL_64(0x7fffffffffffffffUL, x27);
-  ASSERT_EQUAL_64(0x8000000000000000UL, x28);
-  ASSERT_EQUAL_64(0x7ffffffffffffc00UL, x29);
-  ASSERT_EQUAL_64(0x8000000000000400UL, x30);
+  CHECK_EQUAL_64(1, x0);
+  CHECK_EQUAL_64(1, x1);
+  CHECK_EQUAL_64(3, x2);
+  CHECK_EQUAL_64(0xfffffffd, x3);
+  CHECK_EQUAL_64(0x7fffffff, x4);
+  CHECK_EQUAL_64(0x80000000, x5);
+  CHECK_EQUAL_64(0x7fffff80, x6);
+  CHECK_EQUAL_64(0x80000080, x7);
+  CHECK_EQUAL_64(1, x8);
+  CHECK_EQUAL_64(1, x9);
+  CHECK_EQUAL_64(3, x10);
+  CHECK_EQUAL_64(0xfffffffd, x11);
+  CHECK_EQUAL_64(0x7fffffff, x12);
+  CHECK_EQUAL_64(0x80000000, x13);
+  CHECK_EQUAL_64(0x7ffffffe, x14);
+  CHECK_EQUAL_64(0x80000001, x15);
+  CHECK_EQUAL_64(1, x17);
+  CHECK_EQUAL_64(3, x18);
+  CHECK_EQUAL_64(0xfffffffffffffffdUL, x19);
+  CHECK_EQUAL_64(0x7fffffffffffffffUL, x20);
+  CHECK_EQUAL_64(0x8000000000000000UL, x21);
+  CHECK_EQUAL_64(0x7fffff8000000000UL, x22);
+  CHECK_EQUAL_64(0x8000008000000000UL, x23);
+  CHECK_EQUAL_64(1, x24);
+  CHECK_EQUAL_64(3, x25);
+  CHECK_EQUAL_64(0xfffffffffffffffdUL, x26);
+  CHECK_EQUAL_64(0x7fffffffffffffffUL, x27);
+  CHECK_EQUAL_64(0x8000000000000000UL, x28);
+  CHECK_EQUAL_64(0x7ffffffffffffc00UL, x29);
+  CHECK_EQUAL_64(0x8000000000000400UL, x30);
 
   TEARDOWN();
 }
@@ -6792,34 +6991,34 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(1, x0);
-  ASSERT_EQUAL_64(1, x1);
-  ASSERT_EQUAL_64(3, x2);
-  ASSERT_EQUAL_64(0, x3);
-  ASSERT_EQUAL_64(0xffffffff, x4);
-  ASSERT_EQUAL_64(0, x5);
-  ASSERT_EQUAL_64(0xffffff00, x6);
-  ASSERT_EQUAL_64(1, x8);
-  ASSERT_EQUAL_64(1, x9);
-  ASSERT_EQUAL_64(3, x10);
-  ASSERT_EQUAL_64(0, x11);
-  ASSERT_EQUAL_64(0xffffffff, x12);
-  ASSERT_EQUAL_64(0, x13);
-  ASSERT_EQUAL_64(0xfffffffe, x14);
-  ASSERT_EQUAL_64(1, x16);
-  ASSERT_EQUAL_64(1, x17);
-  ASSERT_EQUAL_64(3, x18);
-  ASSERT_EQUAL_64(0, x19);
-  ASSERT_EQUAL_64(0xffffffffffffffffUL, x20);
-  ASSERT_EQUAL_64(0, x21);
-  ASSERT_EQUAL_64(0xffffff0000000000UL, x22);
-  ASSERT_EQUAL_64(1, x24);
-  ASSERT_EQUAL_64(3, x25);
-  ASSERT_EQUAL_64(0, x26);
-  ASSERT_EQUAL_64(0xffffffffffffffffUL, x27);
-  ASSERT_EQUAL_64(0, x28);
-  ASSERT_EQUAL_64(0xfffffffffffff800UL, x29);
-  ASSERT_EQUAL_64(0xffffffff, x30);
+  CHECK_EQUAL_64(1, x0);
+  CHECK_EQUAL_64(1, x1);
+  CHECK_EQUAL_64(3, x2);
+  CHECK_EQUAL_64(0, x3);
+  CHECK_EQUAL_64(0xffffffff, x4);
+  CHECK_EQUAL_64(0, x5);
+  CHECK_EQUAL_64(0xffffff00, x6);
+  CHECK_EQUAL_64(1, x8);
+  CHECK_EQUAL_64(1, x9);
+  CHECK_EQUAL_64(3, x10);
+  CHECK_EQUAL_64(0, x11);
+  CHECK_EQUAL_64(0xffffffff, x12);
+  CHECK_EQUAL_64(0, x13);
+  CHECK_EQUAL_64(0xfffffffe, x14);
+  CHECK_EQUAL_64(1, x16);
+  CHECK_EQUAL_64(1, x17);
+  CHECK_EQUAL_64(3, x18);
+  CHECK_EQUAL_64(0, x19);
+  CHECK_EQUAL_64(0xffffffffffffffffUL, x20);
+  CHECK_EQUAL_64(0, x21);
+  CHECK_EQUAL_64(0xffffff0000000000UL, x22);
+  CHECK_EQUAL_64(1, x24);
+  CHECK_EQUAL_64(3, x25);
+  CHECK_EQUAL_64(0, x26);
+  CHECK_EQUAL_64(0xffffffffffffffffUL, x27);
+  CHECK_EQUAL_64(0, x28);
+  CHECK_EQUAL_64(0xfffffffffffff800UL, x29);
+  CHECK_EQUAL_64(0xffffffff, x30);
 
   TEARDOWN();
 }
@@ -6895,36 +7094,36 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(1, x0);
-  ASSERT_EQUAL_64(1, x1);
-  ASSERT_EQUAL_64(1, x2);
-  ASSERT_EQUAL_64(0xfffffffe, x3);
-  ASSERT_EQUAL_64(0x7fffffff, x4);
-  ASSERT_EQUAL_64(0x80000000, x5);
-  ASSERT_EQUAL_64(0x7fffff80, x6);
-  ASSERT_EQUAL_64(0x80000080, x7);
-  ASSERT_EQUAL_64(1, x8);
-  ASSERT_EQUAL_64(1, x9);
-  ASSERT_EQUAL_64(1, x10);
-  ASSERT_EQUAL_64(0xfffffffe, x11);
-  ASSERT_EQUAL_64(0x7fffffff, x12);
-  ASSERT_EQUAL_64(0x80000000, x13);
-  ASSERT_EQUAL_64(0x7ffffffe, x14);
-  ASSERT_EQUAL_64(0x80000001, x15);
-  ASSERT_EQUAL_64(1, x17);
-  ASSERT_EQUAL_64(1, x18);
-  ASSERT_EQUAL_64(0xfffffffffffffffeUL, x19);
-  ASSERT_EQUAL_64(0x7fffffffffffffffUL, x20);
-  ASSERT_EQUAL_64(0x8000000000000000UL, x21);
-  ASSERT_EQUAL_64(0x7fffff8000000000UL, x22);
-  ASSERT_EQUAL_64(0x8000008000000000UL, x23);
-  ASSERT_EQUAL_64(1, x24);
-  ASSERT_EQUAL_64(1, x25);
-  ASSERT_EQUAL_64(0xfffffffffffffffeUL, x26);
-  ASSERT_EQUAL_64(0x7fffffffffffffffUL, x27);
-  ASSERT_EQUAL_64(0x8000000000000000UL, x28);
-  ASSERT_EQUAL_64(0x7ffffffffffffc00UL, x29);
-  ASSERT_EQUAL_64(0x8000000000000400UL, x30);
+  CHECK_EQUAL_64(1, x0);
+  CHECK_EQUAL_64(1, x1);
+  CHECK_EQUAL_64(1, x2);
+  CHECK_EQUAL_64(0xfffffffe, x3);
+  CHECK_EQUAL_64(0x7fffffff, x4);
+  CHECK_EQUAL_64(0x80000000, x5);
+  CHECK_EQUAL_64(0x7fffff80, x6);
+  CHECK_EQUAL_64(0x80000080, x7);
+  CHECK_EQUAL_64(1, x8);
+  CHECK_EQUAL_64(1, x9);
+  CHECK_EQUAL_64(1, x10);
+  CHECK_EQUAL_64(0xfffffffe, x11);
+  CHECK_EQUAL_64(0x7fffffff, x12);
+  CHECK_EQUAL_64(0x80000000, x13);
+  CHECK_EQUAL_64(0x7ffffffe, x14);
+  CHECK_EQUAL_64(0x80000001, x15);
+  CHECK_EQUAL_64(1, x17);
+  CHECK_EQUAL_64(1, x18);
+  CHECK_EQUAL_64(0xfffffffffffffffeUL, x19);
+  CHECK_EQUAL_64(0x7fffffffffffffffUL, x20);
+  CHECK_EQUAL_64(0x8000000000000000UL, x21);
+  CHECK_EQUAL_64(0x7fffff8000000000UL, x22);
+  CHECK_EQUAL_64(0x8000008000000000UL, x23);
+  CHECK_EQUAL_64(1, x24);
+  CHECK_EQUAL_64(1, x25);
+  CHECK_EQUAL_64(0xfffffffffffffffeUL, x26);
+  CHECK_EQUAL_64(0x7fffffffffffffffUL, x27);
+  CHECK_EQUAL_64(0x8000000000000000UL, x28);
+  CHECK_EQUAL_64(0x7ffffffffffffc00UL, x29);
+  CHECK_EQUAL_64(0x8000000000000400UL, x30);
 
   TEARDOWN();
 }
@@ -6999,35 +7198,35 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(1, x0);
-  ASSERT_EQUAL_64(1, x1);
-  ASSERT_EQUAL_64(1, x2);
-  ASSERT_EQUAL_64(0, x3);
-  ASSERT_EQUAL_64(0xffffffff, x4);
-  ASSERT_EQUAL_64(0, x5);
-  ASSERT_EQUAL_64(0x7fffff80, x6);
-  ASSERT_EQUAL_64(0, x7);
-  ASSERT_EQUAL_64(1, x8);
-  ASSERT_EQUAL_64(1, x9);
-  ASSERT_EQUAL_64(1, x10);
-  ASSERT_EQUAL_64(0, x11);
-  ASSERT_EQUAL_64(0xffffffff, x12);
-  ASSERT_EQUAL_64(0, x13);
-  ASSERT_EQUAL_64(0x7ffffffe, x14);
-  ASSERT_EQUAL_64(1, x17);
-  ASSERT_EQUAL_64(1, x18);
-  ASSERT_EQUAL_64(0x0UL, x19);
-  ASSERT_EQUAL_64(0xffffffffffffffffUL, x20);
-  ASSERT_EQUAL_64(0x0UL, x21);
-  ASSERT_EQUAL_64(0x7fffff8000000000UL, x22);
-  ASSERT_EQUAL_64(0x0UL, x23);
-  ASSERT_EQUAL_64(1, x24);
-  ASSERT_EQUAL_64(1, x25);
-  ASSERT_EQUAL_64(0x0UL, x26);
-  ASSERT_EQUAL_64(0xffffffffffffffffUL, x27);
-  ASSERT_EQUAL_64(0x0UL, x28);
-  ASSERT_EQUAL_64(0x7ffffffffffffc00UL, x29);
-  ASSERT_EQUAL_64(0x0UL, x30);
+  CHECK_EQUAL_64(1, x0);
+  CHECK_EQUAL_64(1, x1);
+  CHECK_EQUAL_64(1, x2);
+  CHECK_EQUAL_64(0, x3);
+  CHECK_EQUAL_64(0xffffffff, x4);
+  CHECK_EQUAL_64(0, x5);
+  CHECK_EQUAL_64(0x7fffff80, x6);
+  CHECK_EQUAL_64(0, x7);
+  CHECK_EQUAL_64(1, x8);
+  CHECK_EQUAL_64(1, x9);
+  CHECK_EQUAL_64(1, x10);
+  CHECK_EQUAL_64(0, x11);
+  CHECK_EQUAL_64(0xffffffff, x12);
+  CHECK_EQUAL_64(0, x13);
+  CHECK_EQUAL_64(0x7ffffffe, x14);
+  CHECK_EQUAL_64(1, x17);
+  CHECK_EQUAL_64(1, x18);
+  CHECK_EQUAL_64(0x0UL, x19);
+  CHECK_EQUAL_64(0xffffffffffffffffUL, x20);
+  CHECK_EQUAL_64(0x0UL, x21);
+  CHECK_EQUAL_64(0x7fffff8000000000UL, x22);
+  CHECK_EQUAL_64(0x0UL, x23);
+  CHECK_EQUAL_64(1, x24);
+  CHECK_EQUAL_64(1, x25);
+  CHECK_EQUAL_64(0x0UL, x26);
+  CHECK_EQUAL_64(0xffffffffffffffffUL, x27);
+  CHECK_EQUAL_64(0x0UL, x28);
+  CHECK_EQUAL_64(0x7ffffffffffffc00UL, x29);
+  CHECK_EQUAL_64(0x0UL, x30);
 
   TEARDOWN();
 }
@@ -7103,36 +7302,36 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(1, x0);
-  ASSERT_EQUAL_64(1, x1);
-  ASSERT_EQUAL_64(2, x2);
-  ASSERT_EQUAL_64(0xfffffffe, x3);
-  ASSERT_EQUAL_64(0x7fffffff, x4);
-  ASSERT_EQUAL_64(0x80000000, x5);
-  ASSERT_EQUAL_64(0x7fffff80, x6);
-  ASSERT_EQUAL_64(0x80000080, x7);
-  ASSERT_EQUAL_64(1, x8);
-  ASSERT_EQUAL_64(1, x9);
-  ASSERT_EQUAL_64(2, x10);
-  ASSERT_EQUAL_64(0xfffffffe, x11);
-  ASSERT_EQUAL_64(0x7fffffff, x12);
-  ASSERT_EQUAL_64(0x80000000, x13);
-  ASSERT_EQUAL_64(0x7ffffffe, x14);
-  ASSERT_EQUAL_64(0x80000001, x15);
-  ASSERT_EQUAL_64(1, x17);
-  ASSERT_EQUAL_64(2, x18);
-  ASSERT_EQUAL_64(0xfffffffffffffffeUL, x19);
-  ASSERT_EQUAL_64(0x7fffffffffffffffUL, x20);
-  ASSERT_EQUAL_64(0x8000000000000000UL, x21);
-  ASSERT_EQUAL_64(0x7fffff8000000000UL, x22);
-  ASSERT_EQUAL_64(0x8000008000000000UL, x23);
-  ASSERT_EQUAL_64(1, x24);
-  ASSERT_EQUAL_64(2, x25);
-  ASSERT_EQUAL_64(0xfffffffffffffffeUL, x26);
-  ASSERT_EQUAL_64(0x7fffffffffffffffUL, x27);
-//  ASSERT_EQUAL_64(0x8000000000000000UL, x28);
-  ASSERT_EQUAL_64(0x7ffffffffffffc00UL, x29);
-  ASSERT_EQUAL_64(0x8000000000000400UL, x30);
+  CHECK_EQUAL_64(1, x0);
+  CHECK_EQUAL_64(1, x1);
+  CHECK_EQUAL_64(2, x2);
+  CHECK_EQUAL_64(0xfffffffe, x3);
+  CHECK_EQUAL_64(0x7fffffff, x4);
+  CHECK_EQUAL_64(0x80000000, x5);
+  CHECK_EQUAL_64(0x7fffff80, x6);
+  CHECK_EQUAL_64(0x80000080, x7);
+  CHECK_EQUAL_64(1, x8);
+  CHECK_EQUAL_64(1, x9);
+  CHECK_EQUAL_64(2, x10);
+  CHECK_EQUAL_64(0xfffffffe, x11);
+  CHECK_EQUAL_64(0x7fffffff, x12);
+  CHECK_EQUAL_64(0x80000000, x13);
+  CHECK_EQUAL_64(0x7ffffffe, x14);
+  CHECK_EQUAL_64(0x80000001, x15);
+  CHECK_EQUAL_64(1, x17);
+  CHECK_EQUAL_64(2, x18);
+  CHECK_EQUAL_64(0xfffffffffffffffeUL, x19);
+  CHECK_EQUAL_64(0x7fffffffffffffffUL, x20);
+  CHECK_EQUAL_64(0x8000000000000000UL, x21);
+  CHECK_EQUAL_64(0x7fffff8000000000UL, x22);
+  CHECK_EQUAL_64(0x8000008000000000UL, x23);
+  CHECK_EQUAL_64(1, x24);
+  CHECK_EQUAL_64(2, x25);
+  CHECK_EQUAL_64(0xfffffffffffffffeUL, x26);
+  CHECK_EQUAL_64(0x7fffffffffffffffUL, x27);
+//  CHECK_EQUAL_64(0x8000000000000000UL, x28);
+  CHECK_EQUAL_64(0x7ffffffffffffc00UL, x29);
+  CHECK_EQUAL_64(0x8000000000000400UL, x30);
 
   TEARDOWN();
 }
@@ -7205,34 +7404,34 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(1, x0);
-  ASSERT_EQUAL_64(1, x1);
-  ASSERT_EQUAL_64(2, x2);
-  ASSERT_EQUAL_64(0, x3);
-  ASSERT_EQUAL_64(0xffffffff, x4);
-  ASSERT_EQUAL_64(0, x5);
-  ASSERT_EQUAL_64(0xffffff00, x6);
-  ASSERT_EQUAL_64(1, x8);
-  ASSERT_EQUAL_64(1, x9);
-  ASSERT_EQUAL_64(2, x10);
-  ASSERT_EQUAL_64(0, x11);
-  ASSERT_EQUAL_64(0xffffffff, x12);
-  ASSERT_EQUAL_64(0, x13);
-  ASSERT_EQUAL_64(0xfffffffe, x14);
-  ASSERT_EQUAL_64(1, x16);
-  ASSERT_EQUAL_64(1, x17);
-  ASSERT_EQUAL_64(2, x18);
-  ASSERT_EQUAL_64(0, x19);
-  ASSERT_EQUAL_64(0xffffffffffffffffUL, x20);
-  ASSERT_EQUAL_64(0, x21);
-  ASSERT_EQUAL_64(0xffffff0000000000UL, x22);
-  ASSERT_EQUAL_64(1, x24);
-  ASSERT_EQUAL_64(2, x25);
-  ASSERT_EQUAL_64(0, x26);
-  ASSERT_EQUAL_64(0xffffffffffffffffUL, x27);
-//  ASSERT_EQUAL_64(0, x28);
-  ASSERT_EQUAL_64(0xfffffffffffff800UL, x29);
-  ASSERT_EQUAL_64(0xffffffff, x30);
+  CHECK_EQUAL_64(1, x0);
+  CHECK_EQUAL_64(1, x1);
+  CHECK_EQUAL_64(2, x2);
+  CHECK_EQUAL_64(0, x3);
+  CHECK_EQUAL_64(0xffffffff, x4);
+  CHECK_EQUAL_64(0, x5);
+  CHECK_EQUAL_64(0xffffff00, x6);
+  CHECK_EQUAL_64(1, x8);
+  CHECK_EQUAL_64(1, x9);
+  CHECK_EQUAL_64(2, x10);
+  CHECK_EQUAL_64(0, x11);
+  CHECK_EQUAL_64(0xffffffff, x12);
+  CHECK_EQUAL_64(0, x13);
+  CHECK_EQUAL_64(0xfffffffe, x14);
+  CHECK_EQUAL_64(1, x16);
+  CHECK_EQUAL_64(1, x17);
+  CHECK_EQUAL_64(2, x18);
+  CHECK_EQUAL_64(0, x19);
+  CHECK_EQUAL_64(0xffffffffffffffffUL, x20);
+  CHECK_EQUAL_64(0, x21);
+  CHECK_EQUAL_64(0xffffff0000000000UL, x22);
+  CHECK_EQUAL_64(1, x24);
+  CHECK_EQUAL_64(2, x25);
+  CHECK_EQUAL_64(0, x26);
+  CHECK_EQUAL_64(0xffffffffffffffffUL, x27);
+//  CHECK_EQUAL_64(0, x28);
+  CHECK_EQUAL_64(0xfffffffffffff800UL, x29);
+  CHECK_EQUAL_64(0xffffffff, x30);
 
   TEARDOWN();
 }
@@ -7308,36 +7507,36 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(1, x0);
-  ASSERT_EQUAL_64(1, x1);
-  ASSERT_EQUAL_64(1, x2);
-  ASSERT_EQUAL_64(0xffffffff, x3);
-  ASSERT_EQUAL_64(0x7fffffff, x4);
-  ASSERT_EQUAL_64(0x80000000, x5);
-  ASSERT_EQUAL_64(0x7fffff80, x6);
-  ASSERT_EQUAL_64(0x80000080, x7);
-  ASSERT_EQUAL_64(1, x8);
-  ASSERT_EQUAL_64(1, x9);
-  ASSERT_EQUAL_64(1, x10);
-  ASSERT_EQUAL_64(0xffffffff, x11);
-  ASSERT_EQUAL_64(0x7fffffff, x12);
-  ASSERT_EQUAL_64(0x80000000, x13);
-  ASSERT_EQUAL_64(0x7ffffffe, x14);
-  ASSERT_EQUAL_64(0x80000001, x15);
-  ASSERT_EQUAL_64(1, x17);
-  ASSERT_EQUAL_64(1, x18);
-  ASSERT_EQUAL_64(0xffffffffffffffffUL, x19);
-  ASSERT_EQUAL_64(0x7fffffffffffffffUL, x20);
-  ASSERT_EQUAL_64(0x8000000000000000UL, x21);
-  ASSERT_EQUAL_64(0x7fffff8000000000UL, x22);
-  ASSERT_EQUAL_64(0x8000008000000000UL, x23);
-  ASSERT_EQUAL_64(1, x24);
-  ASSERT_EQUAL_64(1, x25);
-  ASSERT_EQUAL_64(0xffffffffffffffffUL, x26);
-  ASSERT_EQUAL_64(0x7fffffffffffffffUL, x27);
-  ASSERT_EQUAL_64(0x8000000000000000UL, x28);
-  ASSERT_EQUAL_64(0x7ffffffffffffc00UL, x29);
-  ASSERT_EQUAL_64(0x8000000000000400UL, x30);
+  CHECK_EQUAL_64(1, x0);
+  CHECK_EQUAL_64(1, x1);
+  CHECK_EQUAL_64(1, x2);
+  CHECK_EQUAL_64(0xffffffff, x3);
+  CHECK_EQUAL_64(0x7fffffff, x4);
+  CHECK_EQUAL_64(0x80000000, x5);
+  CHECK_EQUAL_64(0x7fffff80, x6);
+  CHECK_EQUAL_64(0x80000080, x7);
+  CHECK_EQUAL_64(1, x8);
+  CHECK_EQUAL_64(1, x9);
+  CHECK_EQUAL_64(1, x10);
+  CHECK_EQUAL_64(0xffffffff, x11);
+  CHECK_EQUAL_64(0x7fffffff, x12);
+  CHECK_EQUAL_64(0x80000000, x13);
+  CHECK_EQUAL_64(0x7ffffffe, x14);
+  CHECK_EQUAL_64(0x80000001, x15);
+  CHECK_EQUAL_64(1, x17);
+  CHECK_EQUAL_64(1, x18);
+  CHECK_EQUAL_64(0xffffffffffffffffUL, x19);
+  CHECK_EQUAL_64(0x7fffffffffffffffUL, x20);
+  CHECK_EQUAL_64(0x8000000000000000UL, x21);
+  CHECK_EQUAL_64(0x7fffff8000000000UL, x22);
+  CHECK_EQUAL_64(0x8000008000000000UL, x23);
+  CHECK_EQUAL_64(1, x24);
+  CHECK_EQUAL_64(1, x25);
+  CHECK_EQUAL_64(0xffffffffffffffffUL, x26);
+  CHECK_EQUAL_64(0x7fffffffffffffffUL, x27);
+  CHECK_EQUAL_64(0x8000000000000000UL, x28);
+  CHECK_EQUAL_64(0x7ffffffffffffc00UL, x29);
+  CHECK_EQUAL_64(0x8000000000000400UL, x30);
 
   TEARDOWN();
 }
@@ -7412,35 +7611,35 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(1, x0);
-  ASSERT_EQUAL_64(1, x1);
-  ASSERT_EQUAL_64(1, x2);
-  ASSERT_EQUAL_64(0, x3);
-  ASSERT_EQUAL_64(0xffffffff, x4);
-  ASSERT_EQUAL_64(0, x5);
-  ASSERT_EQUAL_64(0x7fffff80, x6);
-  ASSERT_EQUAL_64(0, x7);
-  ASSERT_EQUAL_64(1, x8);
-  ASSERT_EQUAL_64(1, x9);
-  ASSERT_EQUAL_64(1, x10);
-  ASSERT_EQUAL_64(0, x11);
-  ASSERT_EQUAL_64(0xffffffff, x12);
-  ASSERT_EQUAL_64(0, x13);
-  ASSERT_EQUAL_64(0x7ffffffe, x14);
-  ASSERT_EQUAL_64(1, x17);
-  ASSERT_EQUAL_64(1, x18);
-  ASSERT_EQUAL_64(0x0UL, x19);
-  ASSERT_EQUAL_64(0xffffffffffffffffUL, x20);
-  ASSERT_EQUAL_64(0x0UL, x21);
-  ASSERT_EQUAL_64(0x7fffff8000000000UL, x22);
-  ASSERT_EQUAL_64(0x0UL, x23);
-  ASSERT_EQUAL_64(1, x24);
-  ASSERT_EQUAL_64(1, x25);
-  ASSERT_EQUAL_64(0x0UL, x26);
-  ASSERT_EQUAL_64(0xffffffffffffffffUL, x27);
-  ASSERT_EQUAL_64(0x0UL, x28);
-  ASSERT_EQUAL_64(0x7ffffffffffffc00UL, x29);
-  ASSERT_EQUAL_64(0x0UL, x30);
+  CHECK_EQUAL_64(1, x0);
+  CHECK_EQUAL_64(1, x1);
+  CHECK_EQUAL_64(1, x2);
+  CHECK_EQUAL_64(0, x3);
+  CHECK_EQUAL_64(0xffffffff, x4);
+  CHECK_EQUAL_64(0, x5);
+  CHECK_EQUAL_64(0x7fffff80, x6);
+  CHECK_EQUAL_64(0, x7);
+  CHECK_EQUAL_64(1, x8);
+  CHECK_EQUAL_64(1, x9);
+  CHECK_EQUAL_64(1, x10);
+  CHECK_EQUAL_64(0, x11);
+  CHECK_EQUAL_64(0xffffffff, x12);
+  CHECK_EQUAL_64(0, x13);
+  CHECK_EQUAL_64(0x7ffffffe, x14);
+  CHECK_EQUAL_64(1, x17);
+  CHECK_EQUAL_64(1, x18);
+  CHECK_EQUAL_64(0x0UL, x19);
+  CHECK_EQUAL_64(0xffffffffffffffffUL, x20);
+  CHECK_EQUAL_64(0x0UL, x21);
+  CHECK_EQUAL_64(0x7fffff8000000000UL, x22);
+  CHECK_EQUAL_64(0x0UL, x23);
+  CHECK_EQUAL_64(1, x24);
+  CHECK_EQUAL_64(1, x25);
+  CHECK_EQUAL_64(0x0UL, x26);
+  CHECK_EQUAL_64(0xffffffffffffffffUL, x27);
+  CHECK_EQUAL_64(0x0UL, x28);
+  CHECK_EQUAL_64(0x7ffffffffffffc00UL, x29);
+  CHECK_EQUAL_64(0x0UL, x30);
 
   TEARDOWN();
 }
@@ -7528,16 +7727,16 @@
   for (int fbits = 0; fbits <= 32; fbits++) {
     double expected_scvtf = expected_scvtf_base / pow(2.0, fbits);
     double expected_ucvtf = expected_ucvtf_base / pow(2.0, fbits);
-    ASSERT_EQUAL_FP64(expected_scvtf, results_scvtf_x[fbits]);
-    ASSERT_EQUAL_FP64(expected_ucvtf, results_ucvtf_x[fbits]);
-    if (cvtf_s32) ASSERT_EQUAL_FP64(expected_scvtf, results_scvtf_w[fbits]);
-    if (cvtf_u32) ASSERT_EQUAL_FP64(expected_ucvtf, results_ucvtf_w[fbits]);
+    CHECK_EQUAL_FP64(expected_scvtf, results_scvtf_x[fbits]);
+    CHECK_EQUAL_FP64(expected_ucvtf, results_ucvtf_x[fbits]);
+    if (cvtf_s32) CHECK_EQUAL_FP64(expected_scvtf, results_scvtf_w[fbits]);
+    if (cvtf_u32) CHECK_EQUAL_FP64(expected_ucvtf, results_ucvtf_w[fbits]);
   }
   for (int fbits = 33; fbits <= 64; fbits++) {
     double expected_scvtf = expected_scvtf_base / pow(2.0, fbits);
     double expected_ucvtf = expected_ucvtf_base / pow(2.0, fbits);
-    ASSERT_EQUAL_FP64(expected_scvtf, results_scvtf_x[fbits]);
-    ASSERT_EQUAL_FP64(expected_ucvtf, results_ucvtf_x[fbits]);
+    CHECK_EQUAL_FP64(expected_scvtf, results_scvtf_x[fbits]);
+    CHECK_EQUAL_FP64(expected_ucvtf, results_ucvtf_x[fbits]);
   }
 
   TEARDOWN();
@@ -7683,18 +7882,18 @@
   for (int fbits = 0; fbits <= 32; fbits++) {
     float expected_scvtf = expected_scvtf_base / powf(2, fbits);
     float expected_ucvtf = expected_ucvtf_base / powf(2, fbits);
-    ASSERT_EQUAL_FP32(expected_scvtf, results_scvtf_x[fbits]);
-    ASSERT_EQUAL_FP32(expected_ucvtf, results_ucvtf_x[fbits]);
-    if (cvtf_s32) ASSERT_EQUAL_FP32(expected_scvtf, results_scvtf_w[fbits]);
-    if (cvtf_u32) ASSERT_EQUAL_FP32(expected_ucvtf, results_ucvtf_w[fbits]);
+    CHECK_EQUAL_FP32(expected_scvtf, results_scvtf_x[fbits]);
+    CHECK_EQUAL_FP32(expected_ucvtf, results_ucvtf_x[fbits]);
+    if (cvtf_s32) CHECK_EQUAL_FP32(expected_scvtf, results_scvtf_w[fbits]);
+    if (cvtf_u32) CHECK_EQUAL_FP32(expected_ucvtf, results_ucvtf_w[fbits]);
     break;
   }
   for (int fbits = 33; fbits <= 64; fbits++) {
     break;
     float expected_scvtf = expected_scvtf_base / powf(2, fbits);
     float expected_ucvtf = expected_ucvtf_base / powf(2, fbits);
-    ASSERT_EQUAL_FP32(expected_scvtf, results_scvtf_x[fbits]);
-    ASSERT_EQUAL_FP32(expected_ucvtf, results_ucvtf_x[fbits]);
+    CHECK_EQUAL_FP32(expected_scvtf, results_scvtf_x[fbits]);
+    CHECK_EQUAL_FP32(expected_ucvtf, results_ucvtf_x[fbits]);
   }
 
   TEARDOWN();
@@ -7798,13 +7997,13 @@
   RUN();
 
   // NZCV
-  ASSERT_EQUAL_32(ZCFlag, w3);
-  ASSERT_EQUAL_32(NFlag, w4);
-  ASSERT_EQUAL_32(ZCVFlag, w5);
+  CHECK_EQUAL_32(ZCFlag, w3);
+  CHECK_EQUAL_32(NFlag, w4);
+  CHECK_EQUAL_32(ZCVFlag, w5);
 
   // FPCR
   // The default FPCR on Linux-based platforms is 0.
-  ASSERT_EQUAL_32(0, w6);
+  CHECK_EQUAL_32(0, w6);
 
   TEARDOWN();
 }
@@ -7872,11 +8071,11 @@
   RUN();
 
   // We should have incremented x7 (from 0) exactly 8 times.
-  ASSERT_EQUAL_64(8, x7);
+  CHECK_EQUAL_64(8, x7);
 
-  ASSERT_EQUAL_64(fpcr_core, x8);
-  ASSERT_EQUAL_64(fpcr_core, x9);
-  ASSERT_EQUAL_64(0, x10);
+  CHECK_EQUAL_64(fpcr_core, x8);
+  CHECK_EQUAL_64(fpcr_core, x9);
+  CHECK_EQUAL_64(0, x10);
 
   TEARDOWN();
 }
@@ -7894,8 +8093,8 @@
 
   RUN();
 
-  ASSERT_EQUAL_REGISTERS(before);
-  ASSERT_EQUAL_NZCV(before.flags_nzcv());
+  CHECK_EQUAL_REGISTERS(before);
+  CHECK_EQUAL_NZCV(before.flags_nzcv());
 
   TEARDOWN();
 }
@@ -7961,8 +8160,8 @@
 
   RUN();
 
-  ASSERT_EQUAL_REGISTERS(before);
-  ASSERT_EQUAL_NZCV(before.flags_nzcv());
+  CHECK_EQUAL_REGISTERS(before);
+  CHECK_EQUAL_NZCV(before.flags_nzcv());
 
   TEARDOWN();
 }
@@ -8026,7 +8225,7 @@
 
   RUN();
 
-  ASSERT_EQUAL_REGISTERS(before);
+  CHECK_EQUAL_REGISTERS(before);
 
   TEARDOWN();
 }
@@ -8139,15 +8338,15 @@
   END();
   RUN();
 
-  ASSERT_EQUAL_64(literal_base * 1, x0);
-  ASSERT_EQUAL_64(literal_base * 2, x1);
-  ASSERT_EQUAL_64(literal_base * 3, x2);
-  ASSERT_EQUAL_64(literal_base * 4, x3);
+  CHECK_EQUAL_64(literal_base * 1, x0);
+  CHECK_EQUAL_64(literal_base * 2, x1);
+  CHECK_EQUAL_64(literal_base * 3, x2);
+  CHECK_EQUAL_64(literal_base * 4, x3);
 
-  ASSERT_EQUAL_64((literal_base * 1) & 0xffffffff, x10);
-  ASSERT_EQUAL_64((literal_base * 2) & 0xffffffff, x11);
-  ASSERT_EQUAL_64((literal_base * 3) & 0xffffffff, x12);
-  ASSERT_EQUAL_64((literal_base * 4) & 0xffffffff, x13);
+  CHECK_EQUAL_64((literal_base * 1) & 0xffffffff, x10);
+  CHECK_EQUAL_64((literal_base * 2) & 0xffffffff, x11);
+  CHECK_EQUAL_64((literal_base * 3) & 0xffffffff, x12);
+  CHECK_EQUAL_64((literal_base * 4) & 0xffffffff, x13);
 
   TEARDOWN();
 }
@@ -8217,17 +8416,17 @@
   END();
   RUN();
 
-  ASSERT_EQUAL_64(literal_base * 1, x0);
-  ASSERT_EQUAL_64(literal_base * 2, x1);
-  ASSERT_EQUAL_64(literal_base * 3, x2);
-  ASSERT_EQUAL_64(literal_base * 4, x3);
-  ASSERT_EQUAL_64(literal_base * 5, x4);
-  ASSERT_EQUAL_64(literal_base * 6, x5);
-  ASSERT_EQUAL_64(literal_base * 7, x6);
+  CHECK_EQUAL_64(literal_base * 1, x0);
+  CHECK_EQUAL_64(literal_base * 2, x1);
+  CHECK_EQUAL_64(literal_base * 3, x2);
+  CHECK_EQUAL_64(literal_base * 4, x3);
+  CHECK_EQUAL_64(literal_base * 5, x4);
+  CHECK_EQUAL_64(literal_base * 6, x5);
+  CHECK_EQUAL_64(literal_base * 7, x6);
 
-  ASSERT_EQUAL_64((literal_base * 1) & 0xffffffff, x10);
-  ASSERT_EQUAL_64((literal_base * 2) & 0xffffffff, x11);
-  ASSERT_EQUAL_64((literal_base * 3) & 0xffffffff, x12);
+  CHECK_EQUAL_64((literal_base * 1) & 0xffffffff, x10);
+  CHECK_EQUAL_64((literal_base * 2) & 0xffffffff, x11);
+  CHECK_EQUAL_64((literal_base * 3) & 0xffffffff, x12);
 
   TEARDOWN();
 }
@@ -8274,10 +8473,10 @@
   uint64_t x5_expected = ((x1_expected << 16) & 0xffff0000) |
                          ((x1_expected >> 16) & 0x0000ffff);
 
-  ASSERT_EQUAL_64(x0_expected, x0);
-  ASSERT_EQUAL_64(x1_expected, x1);
-  ASSERT_EQUAL_64(x4_expected, x4);
-  ASSERT_EQUAL_64(x5_expected, x5);
+  CHECK_EQUAL_64(x0_expected, x0);
+  CHECK_EQUAL_64(x1_expected, x1);
+  CHECK_EQUAL_64(x4_expected, x4);
+  CHECK_EQUAL_64(x5_expected, x5);
 
   TEARDOWN();
 }
@@ -8311,7 +8510,7 @@
   __ Poke(x1, 8);
   __ Poke(x0, 0);
   {
-    ASSERT(__ StackPointer().Is(csp));
+    DCHECK(__ StackPointer().Is(csp));
     __ Mov(x4, __ StackPointer());
     __ SetStackPointer(x4);
 
@@ -8343,12 +8542,12 @@
   uint64_t x7_expected = ((x1_expected << 16) & 0xffff0000) |
                          ((x0_expected >> 48) & 0x0000ffff);
 
-  ASSERT_EQUAL_64(x0_expected, x0);
-  ASSERT_EQUAL_64(x1_expected, x1);
-  ASSERT_EQUAL_64(x2_expected, x2);
-  ASSERT_EQUAL_64(x3_expected, x3);
-  ASSERT_EQUAL_64(x6_expected, x6);
-  ASSERT_EQUAL_64(x7_expected, x7);
+  CHECK_EQUAL_64(x0_expected, x0);
+  CHECK_EQUAL_64(x1_expected, x1);
+  CHECK_EQUAL_64(x2_expected, x2);
+  CHECK_EQUAL_64(x3_expected, x3);
+  CHECK_EQUAL_64(x6_expected, x6);
+  CHECK_EQUAL_64(x7_expected, x7);
 
   TEARDOWN();
 }
@@ -8408,7 +8607,7 @@
   uint64_t literal_base = 0x0100001000100101UL;
 
   {
-    ASSERT(__ StackPointer().Is(csp));
+    DCHECK(__ StackPointer().Is(csp));
     __ Mov(jssp, __ StackPointer());
     __ SetStackPointer(jssp);
 
@@ -8437,7 +8636,7 @@
           case 3:  __ Push(r[2], r[1], r[0]); break;
           case 2:  __ Push(r[1], r[0]);       break;
           case 1:  __ Push(r[0]);             break;
-          default: ASSERT(i == 0);            break;
+          default: DCHECK(i == 0);            break;
         }
         break;
       case PushPopRegList:
@@ -8459,7 +8658,7 @@
           case 3:  __ Pop(r[i], r[i+1], r[i+2]); break;
           case 2:  __ Pop(r[i], r[i+1]);         break;
           case 1:  __ Pop(r[i]);                 break;
-          default: ASSERT(i == reg_count);       break;
+          default: DCHECK(i == reg_count);       break;
         }
         break;
       case PushPopRegList:
@@ -8479,14 +8678,14 @@
   RUN();
 
   // Check that the register contents were preserved.
-  // Always use ASSERT_EQUAL_64, even when testing W registers, so we can test
+  // Always use CHECK_EQUAL_64, even when testing W registers, so we can test
   // that the upper word was properly cleared by Pop.
   literal_base &= (0xffffffffffffffffUL >> (64-reg_size));
   for (int i = 0; i < reg_count; i++) {
     if (x[i].IsZero()) {
-      ASSERT_EQUAL_64(0, x[i]);
+      CHECK_EQUAL_64(0, x[i]);
     } else {
-      ASSERT_EQUAL_64(literal_base * i, x[i]);
+      CHECK_EQUAL_64(literal_base * i, x[i]);
     }
   }
 
@@ -8590,7 +8789,7 @@
   uint64_t literal_base = 0x0100001000100101UL;
 
   {
-    ASSERT(__ StackPointer().Is(csp));
+    DCHECK(__ StackPointer().Is(csp));
     __ Mov(jssp, __ StackPointer());
     __ SetStackPointer(jssp);
 
@@ -8621,7 +8820,7 @@
           case 3:  __ Push(v[2], v[1], v[0]); break;
           case 2:  __ Push(v[1], v[0]);       break;
           case 1:  __ Push(v[0]);             break;
-          default: ASSERT(i == 0);            break;
+          default: DCHECK(i == 0);            break;
         }
         break;
       case PushPopRegList:
@@ -8643,7 +8842,7 @@
           case 3:  __ Pop(v[i], v[i+1], v[i+2]); break;
           case 2:  __ Pop(v[i], v[i+1]);         break;
           case 1:  __ Pop(v[i]);                 break;
-          default: ASSERT(i == reg_count);       break;
+          default: DCHECK(i == reg_count);       break;
         }
         break;
       case PushPopRegList:
@@ -8663,14 +8862,14 @@
   RUN();
 
   // Check that the register contents were preserved.
-  // Always use ASSERT_EQUAL_FP64, even when testing S registers, so we can
+  // Always use CHECK_EQUAL_FP64, even when testing S registers, so we can
   // test that the upper word was properly cleared by Pop.
   literal_base &= (0xffffffffffffffffUL >> (64-reg_size));
   for (int i = 0; i < reg_count; i++) {
     uint64_t literal = literal_base * i;
     double expected;
     memcpy(&expected, &literal, sizeof(expected));
-    ASSERT_EQUAL_FP64(expected, d[i]);
+    CHECK_EQUAL_FP64(expected, d[i]);
   }
 
   TEARDOWN();
@@ -8767,7 +8966,7 @@
 
   START();
   {
-    ASSERT(__ StackPointer().Is(csp));
+    DCHECK(__ StackPointer().Is(csp));
     __ Mov(jssp, __ StackPointer());
     __ SetStackPointer(jssp);
 
@@ -8803,16 +9002,16 @@
 
   RUN();
 
-  // Always use ASSERT_EQUAL_64, even when testing W registers, so we can test
+  // Always use CHECK_EQUAL_64, even when testing W registers, so we can test
   // that the upper word was properly cleared by Pop.
   literal_base &= (0xffffffffffffffffUL >> (64-reg_size));
 
-  ASSERT_EQUAL_64(literal_base * 3, x[9]);
-  ASSERT_EQUAL_64(literal_base * 2, x[8]);
-  ASSERT_EQUAL_64(literal_base * 0, x[7]);
-  ASSERT_EQUAL_64(literal_base * 3, x[6]);
-  ASSERT_EQUAL_64(literal_base * 1, x[5]);
-  ASSERT_EQUAL_64(literal_base * 2, x[4]);
+  CHECK_EQUAL_64(literal_base * 3, x[9]);
+  CHECK_EQUAL_64(literal_base * 2, x[8]);
+  CHECK_EQUAL_64(literal_base * 0, x[7]);
+  CHECK_EQUAL_64(literal_base * 3, x[6]);
+  CHECK_EQUAL_64(literal_base * 1, x[5]);
+  CHECK_EQUAL_64(literal_base * 2, x[4]);
 
   TEARDOWN();
 }
@@ -8872,7 +9071,7 @@
 
   START();
   {
-    ASSERT(__ StackPointer().Is(csp));
+    DCHECK(__ StackPointer().Is(csp));
     __ Mov(jssp, __ StackPointer());
     __ SetStackPointer(jssp);
 
@@ -8920,7 +9119,7 @@
 
     int active_w_slots = 0;
     for (int i = 0; active_w_slots < requested_w_slots; i++) {
-      ASSERT(i < reg_count);
+      DCHECK(i < reg_count);
       // In order to test various arguments to PushMultipleTimes, and to try to
       // exercise different alignment and overlap effects, we push each
       // register a different number of times.
@@ -8993,7 +9192,7 @@
       }
       next_is_64 = !next_is_64;
     }
-    ASSERT(active_w_slots == 0);
+    DCHECK(active_w_slots == 0);
 
     // Drop memory to restore jssp.
     __ Drop(claim, kByteSizeInBytes);
@@ -9021,15 +9220,15 @@
       expected = stack[slot++];
     }
 
-    // Always use ASSERT_EQUAL_64, even when testing W registers, so we can
+    // Always use CHECK_EQUAL_64, even when testing W registers, so we can
     // test that the upper word was properly cleared by Pop.
     if (x[i].IsZero()) {
-      ASSERT_EQUAL_64(0, x[i]);
+      CHECK_EQUAL_64(0, x[i]);
     } else {
-      ASSERT_EQUAL_64(expected, x[i]);
+      CHECK_EQUAL_64(expected, x[i]);
     }
   }
-  ASSERT(slot == requested_w_slots);
+  DCHECK(slot == requested_w_slots);
 
   TEARDOWN();
 }
@@ -9059,7 +9258,7 @@
 
   START();
 
-  ASSERT(csp.Is(__ StackPointer()));
+  DCHECK(csp.Is(__ StackPointer()));
 
   __ Mov(x3, 0x3333333333333333UL);
   __ Mov(x2, 0x2222222222222222UL);
@@ -9104,40 +9303,40 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(0x1111111111111111UL, x3);
-  ASSERT_EQUAL_64(0x0000000000000000UL, x2);
-  ASSERT_EQUAL_64(0x3333333333333333UL, x1);
-  ASSERT_EQUAL_64(0x2222222222222222UL, x0);
-  ASSERT_EQUAL_64(0x3333333333333333UL, x9);
-  ASSERT_EQUAL_64(0x2222222222222222UL, x8);
-  ASSERT_EQUAL_64(0x0000000000000000UL, x7);
-  ASSERT_EQUAL_64(0x3333333333333333UL, x6);
-  ASSERT_EQUAL_64(0x1111111111111111UL, x5);
-  ASSERT_EQUAL_64(0x2222222222222222UL, x4);
+  CHECK_EQUAL_64(0x1111111111111111UL, x3);
+  CHECK_EQUAL_64(0x0000000000000000UL, x2);
+  CHECK_EQUAL_64(0x3333333333333333UL, x1);
+  CHECK_EQUAL_64(0x2222222222222222UL, x0);
+  CHECK_EQUAL_64(0x3333333333333333UL, x9);
+  CHECK_EQUAL_64(0x2222222222222222UL, x8);
+  CHECK_EQUAL_64(0x0000000000000000UL, x7);
+  CHECK_EQUAL_64(0x3333333333333333UL, x6);
+  CHECK_EQUAL_64(0x1111111111111111UL, x5);
+  CHECK_EQUAL_64(0x2222222222222222UL, x4);
 
-  ASSERT_EQUAL_32(0x11111111U, w13);
-  ASSERT_EQUAL_32(0x33333333U, w12);
-  ASSERT_EQUAL_32(0x00000000U, w11);
-  ASSERT_EQUAL_32(0x22222222U, w10);
-  ASSERT_EQUAL_32(0x11111111U, w17);
-  ASSERT_EQUAL_32(0x00000000U, w16);
-  ASSERT_EQUAL_32(0x33333333U, w15);
-  ASSERT_EQUAL_32(0x22222222U, w14);
+  CHECK_EQUAL_32(0x11111111U, w13);
+  CHECK_EQUAL_32(0x33333333U, w12);
+  CHECK_EQUAL_32(0x00000000U, w11);
+  CHECK_EQUAL_32(0x22222222U, w10);
+  CHECK_EQUAL_32(0x11111111U, w17);
+  CHECK_EQUAL_32(0x00000000U, w16);
+  CHECK_EQUAL_32(0x33333333U, w15);
+  CHECK_EQUAL_32(0x22222222U, w14);
 
-  ASSERT_EQUAL_32(0x11111111U, w18);
-  ASSERT_EQUAL_32(0x11111111U, w19);
-  ASSERT_EQUAL_32(0x11111111U, w20);
-  ASSERT_EQUAL_32(0x11111111U, w21);
-  ASSERT_EQUAL_64(0x3333333333333333UL, x22);
-  ASSERT_EQUAL_64(0x0000000000000000UL, x23);
+  CHECK_EQUAL_32(0x11111111U, w18);
+  CHECK_EQUAL_32(0x11111111U, w19);
+  CHECK_EQUAL_32(0x11111111U, w20);
+  CHECK_EQUAL_32(0x11111111U, w21);
+  CHECK_EQUAL_64(0x3333333333333333UL, x22);
+  CHECK_EQUAL_64(0x0000000000000000UL, x23);
 
-  ASSERT_EQUAL_64(0x3333333333333333UL, x24);
-  ASSERT_EQUAL_64(0x3333333333333333UL, x26);
+  CHECK_EQUAL_64(0x3333333333333333UL, x24);
+  CHECK_EQUAL_64(0x3333333333333333UL, x26);
 
-  ASSERT_EQUAL_32(0x33333333U, w25);
-  ASSERT_EQUAL_32(0x00000000U, w27);
-  ASSERT_EQUAL_32(0x22222222U, w28);
-  ASSERT_EQUAL_32(0x33333333U, w29);
+  CHECK_EQUAL_32(0x33333333U, w25);
+  CHECK_EQUAL_32(0x00000000U, w27);
+  CHECK_EQUAL_32(0x22222222U, w28);
+  CHECK_EQUAL_32(0x33333333U, w29);
   TEARDOWN();
 }
 
@@ -9148,7 +9347,7 @@
 
   START();
 
-  ASSERT(__ StackPointer().Is(csp));
+  DCHECK(__ StackPointer().Is(csp));
   __ Mov(jssp, __ StackPointer());
   __ SetStackPointer(jssp);
 
@@ -9199,19 +9398,19 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(0x1234000000000000, x0);
-  ASSERT_EQUAL_64(0x1234000100010001, x1);
-  ASSERT_EQUAL_64(0x1234000200020002, x2);
-  ASSERT_EQUAL_64(0x1234000300030003, x3);
+  CHECK_EQUAL_64(0x1234000000000000, x0);
+  CHECK_EQUAL_64(0x1234000100010001, x1);
+  CHECK_EQUAL_64(0x1234000200020002, x2);
+  CHECK_EQUAL_64(0x1234000300030003, x3);
 
-  ASSERT_EQUAL_32(0x12340004, w4);
-  ASSERT_EQUAL_32(0x12340005, w5);
-  ASSERT_EQUAL_32(0x12340006, w6);
+  CHECK_EQUAL_32(0x12340004, w4);
+  CHECK_EQUAL_32(0x12340005, w5);
+  CHECK_EQUAL_32(0x12340006, w6);
 
-  ASSERT_EQUAL_FP64(123400.0, d0);
-  ASSERT_EQUAL_FP64(123401.0, d1);
+  CHECK_EQUAL_FP64(123400.0, d0);
+  CHECK_EQUAL_FP64(123401.0, d1);
 
-  ASSERT_EQUAL_FP32(123402.0, s2);
+  CHECK_EQUAL_FP32(123402.0, s2);
 
   TEARDOWN();
 }
@@ -9223,7 +9422,7 @@
 
   START();
 
-  ASSERT(__ StackPointer().Is(csp));
+  DCHECK(__ StackPointer().Is(csp));
   __ Mov(jssp, __ StackPointer());
   __ SetStackPointer(jssp);
 
@@ -9274,19 +9473,19 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(0x1234000000000000, x0);
-  ASSERT_EQUAL_64(0x1234000100010001, x1);
-  ASSERT_EQUAL_64(0x1234000200020002, x2);
-  ASSERT_EQUAL_64(0x1234000300030003, x3);
+  CHECK_EQUAL_64(0x1234000000000000, x0);
+  CHECK_EQUAL_64(0x1234000100010001, x1);
+  CHECK_EQUAL_64(0x1234000200020002, x2);
+  CHECK_EQUAL_64(0x1234000300030003, x3);
 
-  ASSERT_EQUAL_64(0x0000000012340004, x4);
-  ASSERT_EQUAL_64(0x0000000012340005, x5);
-  ASSERT_EQUAL_64(0x0000000012340006, x6);
+  CHECK_EQUAL_64(0x0000000012340004, x4);
+  CHECK_EQUAL_64(0x0000000012340005, x5);
+  CHECK_EQUAL_64(0x0000000012340006, x6);
 
-  ASSERT_EQUAL_FP64(123400.0, d0);
-  ASSERT_EQUAL_FP64(123401.0, d1);
+  CHECK_EQUAL_FP64(123400.0, d0);
+  CHECK_EQUAL_FP64(123401.0, d1);
 
-  ASSERT_EQUAL_FP32(123402.0, s2);
+  CHECK_EQUAL_FP32(123402.0, s2);
 
   TEARDOWN();
 }
@@ -9352,14 +9551,14 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(0x5555555500000001UL, x0);
-  ASSERT_EQUAL_64(0xaaaaaaaa00000001UL, x1);
-  ASSERT_EQUAL_64(0x1234567800000000UL, x2);
-  ASSERT_EQUAL_64(0x8765432100000000UL, x3);
-  ASSERT_EQUAL_64(0, x4);
-  ASSERT_EQUAL_64(0, x5);
-  ASSERT_EQUAL_64(0, x6);
-  ASSERT_EQUAL_64(1, x7);
+  CHECK_EQUAL_64(0x5555555500000001UL, x0);
+  CHECK_EQUAL_64(0xaaaaaaaa00000001UL, x1);
+  CHECK_EQUAL_64(0x1234567800000000UL, x2);
+  CHECK_EQUAL_64(0x8765432100000000UL, x3);
+  CHECK_EQUAL_64(0, x4);
+  CHECK_EQUAL_64(0, x5);
+  CHECK_EQUAL_64(0, x6);
+  CHECK_EQUAL_64(1, x7);
 
   TEARDOWN();
 }
@@ -9425,14 +9624,14 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(0x5555555500000001UL, x0);
-  ASSERT_EQUAL_64(0xaaaaaaaa00000001UL, x1);
-  ASSERT_EQUAL_64(0x1234567800000000UL, x2);
-  ASSERT_EQUAL_64(0x8765432100000000UL, x3);
-  ASSERT_EQUAL_64(0, x4);
-  ASSERT_EQUAL_64(1, x5);
-  ASSERT_EQUAL_64(1, x6);
-  ASSERT_EQUAL_64(1, x7);
+  CHECK_EQUAL_64(0x5555555500000001UL, x0);
+  CHECK_EQUAL_64(0xaaaaaaaa00000001UL, x1);
+  CHECK_EQUAL_64(0x1234567800000000UL, x2);
+  CHECK_EQUAL_64(0x8765432100000000UL, x3);
+  CHECK_EQUAL_64(0, x4);
+  CHECK_EQUAL_64(1, x5);
+  CHECK_EQUAL_64(1, x6);
+  CHECK_EQUAL_64(1, x7);
 
   TEARDOWN();
 }
@@ -9844,7 +10043,7 @@
   __ Printf("%%%%%s%%%c%%\n", x2, w13);
 
   // Print the stack pointer (csp).
-  ASSERT(csp.Is(__ StackPointer()));
+  DCHECK(csp.Is(__ StackPointer()));
   __ Printf("StackPointer(csp): 0x%016" PRIx64 ", 0x%08" PRIx32 "\n",
             __ StackPointer(), __ StackPointer().W());
 
@@ -9874,7 +10073,7 @@
   // Printf preserves all registers by default, we can't look at the number of
   // bytes that were printed. However, the printf_no_preserve test should check
   // that, and here we just test that we didn't clobber any registers.
-  ASSERT_EQUAL_REGISTERS(before);
+  CHECK_EQUAL_REGISTERS(before);
 
   TEARDOWN();
 }
@@ -9968,35 +10167,35 @@
   // use the return code to check that the string length was correct.
 
   // Printf with no arguments.
-  ASSERT_EQUAL_64(strlen(test_plain_string), x19);
+  CHECK_EQUAL_64(strlen(test_plain_string), x19);
   // x0: 1234, x1: 0x00001234
-  ASSERT_EQUAL_64(25, x20);
+  CHECK_EQUAL_64(25, x20);
   // d0: 1.234000
-  ASSERT_EQUAL_64(13, x21);
+  CHECK_EQUAL_64(13, x21);
   // Test %s: 'This is a substring.'
-  ASSERT_EQUAL_64(32, x22);
+  CHECK_EQUAL_64(32, x22);
   // w3(uint32): 4294967295
   // w4(int32): -1
   // x5(uint64): 18446744073709551615
   // x6(int64): -1
-  ASSERT_EQUAL_64(23 + 14 + 33 + 14, x23);
+  CHECK_EQUAL_64(23 + 14 + 33 + 14, x23);
   // %f: 1.234000
   // %g: 2.345
   // %e: 3.456000e+00
   // %E: 4.567000E+00
-  ASSERT_EQUAL_64(13 + 10 + 17 + 17, x24);
+  CHECK_EQUAL_64(13 + 10 + 17 + 17, x24);
   // 0x89abcdef, 0x123456789abcdef
-  ASSERT_EQUAL_64(30, x25);
+  CHECK_EQUAL_64(30, x25);
   // 42
-  ASSERT_EQUAL_64(3, x26);
+  CHECK_EQUAL_64(3, x26);
   // StackPointer(not csp): 0x00007fb037ae2370, 0x37ae2370
   // Note: This is an example value, but the field width is fixed here so the
   // string length is still predictable.
-  ASSERT_EQUAL_64(54, x27);
+  CHECK_EQUAL_64(54, x27);
   // 3=3, 4=40, 5=500
-  ASSERT_EQUAL_64(17, x28);
+  CHECK_EQUAL_64(17, x28);
   // w3: 4294967295, s1: 1.234000, x5: 18446744073709551615, d3: 3.456000
-  ASSERT_EQUAL_64(69, x29);
+  CHECK_EQUAL_64(69, x29);
 
   TEARDOWN();
 }
@@ -10071,58 +10270,6 @@
 }
 
 
-static void DoSmiAbsTest(int32_t value, bool must_fail = false) {
-  SETUP();
-
-  START();
-  Label end, slow;
-  __ Mov(x2, 0xc001c0de);
-  __ Mov(x1, value);
-  __ SmiTag(x1);
-  __ SmiAbs(x1, &slow);
-  __ SmiUntag(x1);
-  __ B(&end);
-
-  __ Bind(&slow);
-  __ Mov(x2, 0xbad);
-
-  __ Bind(&end);
-  END();
-
-  RUN();
-
-  if (must_fail) {
-    // We tested an invalid conversion. The code must have jump on slow.
-    ASSERT_EQUAL_64(0xbad, x2);
-  } else {
-    // The conversion is valid, check the result.
-    int32_t result = (value >= 0) ? value : -value;
-    ASSERT_EQUAL_64(result, x1);
-
-    // Check that we didn't jump on slow.
-    ASSERT_EQUAL_64(0xc001c0de, x2);
-  }
-
-  TEARDOWN();
-}
-
-
-TEST(smi_abs) {
-  INIT_V8();
-  // Simple and edge cases.
-  DoSmiAbsTest(0);
-  DoSmiAbsTest(0x12345);
-  DoSmiAbsTest(0x40000000);
-  DoSmiAbsTest(0x7fffffff);
-  DoSmiAbsTest(-1);
-  DoSmiAbsTest(-12345);
-  DoSmiAbsTest(0x80000001);
-
-  // Check that the most negative SMI is detected.
-  DoSmiAbsTest(0x80000000, true);
-}
-
-
 TEST(blr_lr) {
   // A simple test to check that the simulator correcty handle "blr lr".
   INIT_V8();
@@ -10147,7 +10294,7 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(0xc001c0de, x0);
+  CHECK_EQUAL_64(0xc001c0de, x0);
 
   TEARDOWN();
 }
@@ -10218,14 +10365,14 @@
   // Make sure that NaN propagation works correctly.
   double sn = rawbits_to_double(0x7ff5555511111111);
   double qn = rawbits_to_double(0x7ffaaaaa11111111);
-  ASSERT(IsSignallingNaN(sn));
-  ASSERT(IsQuietNaN(qn));
+  DCHECK(IsSignallingNaN(sn));
+  DCHECK(IsQuietNaN(qn));
 
   // The input NaNs after passing through ProcessNaN.
   double sn_proc = rawbits_to_double(0x7ffd555511111111);
   double qn_proc = qn;
-  ASSERT(IsQuietNaN(sn_proc));
-  ASSERT(IsQuietNaN(qn_proc));
+  DCHECK(IsQuietNaN(sn_proc));
+  DCHECK(IsQuietNaN(qn_proc));
 
   SETUP();
   START();
@@ -10266,24 +10413,24 @@
   uint64_t sn_raw = double_to_rawbits(sn);
 
   //   - Signalling NaN
-  ASSERT_EQUAL_FP64(sn, d1);
-  ASSERT_EQUAL_FP64(rawbits_to_double(sn_raw & ~kDSignMask), d2);
-  ASSERT_EQUAL_FP64(rawbits_to_double(sn_raw ^ kDSignMask), d3);
+  CHECK_EQUAL_FP64(sn, d1);
+  CHECK_EQUAL_FP64(rawbits_to_double(sn_raw & ~kDSignMask), d2);
+  CHECK_EQUAL_FP64(rawbits_to_double(sn_raw ^ kDSignMask), d3);
   //   - Quiet NaN
-  ASSERT_EQUAL_FP64(qn, d11);
-  ASSERT_EQUAL_FP64(rawbits_to_double(qn_raw & ~kDSignMask), d12);
-  ASSERT_EQUAL_FP64(rawbits_to_double(qn_raw ^ kDSignMask), d13);
+  CHECK_EQUAL_FP64(qn, d11);
+  CHECK_EQUAL_FP64(rawbits_to_double(qn_raw & ~kDSignMask), d12);
+  CHECK_EQUAL_FP64(rawbits_to_double(qn_raw ^ kDSignMask), d13);
 
   //   - Signalling NaN
-  ASSERT_EQUAL_FP64(sn_proc, d4);
-  ASSERT_EQUAL_FP64(sn_proc, d5);
-  ASSERT_EQUAL_FP64(sn_proc, d6);
-  ASSERT_EQUAL_FP64(sn_proc, d7);
+  CHECK_EQUAL_FP64(sn_proc, d4);
+  CHECK_EQUAL_FP64(sn_proc, d5);
+  CHECK_EQUAL_FP64(sn_proc, d6);
+  CHECK_EQUAL_FP64(sn_proc, d7);
   //   - Quiet NaN
-  ASSERT_EQUAL_FP64(qn_proc, d14);
-  ASSERT_EQUAL_FP64(qn_proc, d15);
-  ASSERT_EQUAL_FP64(qn_proc, d16);
-  ASSERT_EQUAL_FP64(qn_proc, d17);
+  CHECK_EQUAL_FP64(qn_proc, d14);
+  CHECK_EQUAL_FP64(qn_proc, d15);
+  CHECK_EQUAL_FP64(qn_proc, d16);
+  CHECK_EQUAL_FP64(qn_proc, d17);
 
   TEARDOWN();
 }
@@ -10294,14 +10441,14 @@
   // Make sure that NaN propagation works correctly.
   float sn = rawbits_to_float(0x7f951111);
   float qn = rawbits_to_float(0x7fea1111);
-  ASSERT(IsSignallingNaN(sn));
-  ASSERT(IsQuietNaN(qn));
+  DCHECK(IsSignallingNaN(sn));
+  DCHECK(IsQuietNaN(qn));
 
   // The input NaNs after passing through ProcessNaN.
   float sn_proc = rawbits_to_float(0x7fd51111);
   float qn_proc = qn;
-  ASSERT(IsQuietNaN(sn_proc));
-  ASSERT(IsQuietNaN(qn_proc));
+  DCHECK(IsQuietNaN(sn_proc));
+  DCHECK(IsQuietNaN(qn_proc));
 
   SETUP();
   START();
@@ -10342,32 +10489,32 @@
   uint32_t sn_raw = float_to_rawbits(sn);
 
   //   - Signalling NaN
-  ASSERT_EQUAL_FP32(sn, s1);
-  ASSERT_EQUAL_FP32(rawbits_to_float(sn_raw & ~kSSignMask), s2);
-  ASSERT_EQUAL_FP32(rawbits_to_float(sn_raw ^ kSSignMask), s3);
+  CHECK_EQUAL_FP32(sn, s1);
+  CHECK_EQUAL_FP32(rawbits_to_float(sn_raw & ~kSSignMask), s2);
+  CHECK_EQUAL_FP32(rawbits_to_float(sn_raw ^ kSSignMask), s3);
   //   - Quiet NaN
-  ASSERT_EQUAL_FP32(qn, s11);
-  ASSERT_EQUAL_FP32(rawbits_to_float(qn_raw & ~kSSignMask), s12);
-  ASSERT_EQUAL_FP32(rawbits_to_float(qn_raw ^ kSSignMask), s13);
+  CHECK_EQUAL_FP32(qn, s11);
+  CHECK_EQUAL_FP32(rawbits_to_float(qn_raw & ~kSSignMask), s12);
+  CHECK_EQUAL_FP32(rawbits_to_float(qn_raw ^ kSSignMask), s13);
 
   //   - Signalling NaN
-  ASSERT_EQUAL_FP32(sn_proc, s4);
-  ASSERT_EQUAL_FP32(sn_proc, s5);
-  ASSERT_EQUAL_FP32(sn_proc, s6);
-  ASSERT_EQUAL_FP32(sn_proc, s7);
+  CHECK_EQUAL_FP32(sn_proc, s4);
+  CHECK_EQUAL_FP32(sn_proc, s5);
+  CHECK_EQUAL_FP32(sn_proc, s6);
+  CHECK_EQUAL_FP32(sn_proc, s7);
   //   - Quiet NaN
-  ASSERT_EQUAL_FP32(qn_proc, s14);
-  ASSERT_EQUAL_FP32(qn_proc, s15);
-  ASSERT_EQUAL_FP32(qn_proc, s16);
-  ASSERT_EQUAL_FP32(qn_proc, s17);
+  CHECK_EQUAL_FP32(qn_proc, s14);
+  CHECK_EQUAL_FP32(qn_proc, s15);
+  CHECK_EQUAL_FP32(qn_proc, s16);
+  CHECK_EQUAL_FP32(qn_proc, s17);
 
   TEARDOWN();
 }
 
 
 static void ProcessNaNsHelper(double n, double m, double expected) {
-  ASSERT(std::isnan(n) || std::isnan(m));
-  ASSERT(isnan(expected));
+  DCHECK(std::isnan(n) || std::isnan(m));
+  DCHECK(std::isnan(expected));
 
   SETUP();
   START();
@@ -10387,12 +10534,12 @@
   END();
   RUN();
 
-  ASSERT_EQUAL_FP64(expected, d2);
-  ASSERT_EQUAL_FP64(expected, d3);
-  ASSERT_EQUAL_FP64(expected, d4);
-  ASSERT_EQUAL_FP64(expected, d5);
-  ASSERT_EQUAL_FP64(expected, d6);
-  ASSERT_EQUAL_FP64(expected, d7);
+  CHECK_EQUAL_FP64(expected, d2);
+  CHECK_EQUAL_FP64(expected, d3);
+  CHECK_EQUAL_FP64(expected, d4);
+  CHECK_EQUAL_FP64(expected, d5);
+  CHECK_EQUAL_FP64(expected, d6);
+  CHECK_EQUAL_FP64(expected, d7);
 
   TEARDOWN();
 }
@@ -10405,20 +10552,20 @@
   double sm = rawbits_to_double(0x7ff5555522222222);
   double qn = rawbits_to_double(0x7ffaaaaa11111111);
   double qm = rawbits_to_double(0x7ffaaaaa22222222);
-  ASSERT(IsSignallingNaN(sn));
-  ASSERT(IsSignallingNaN(sm));
-  ASSERT(IsQuietNaN(qn));
-  ASSERT(IsQuietNaN(qm));
+  DCHECK(IsSignallingNaN(sn));
+  DCHECK(IsSignallingNaN(sm));
+  DCHECK(IsQuietNaN(qn));
+  DCHECK(IsQuietNaN(qm));
 
   // The input NaNs after passing through ProcessNaN.
   double sn_proc = rawbits_to_double(0x7ffd555511111111);
   double sm_proc = rawbits_to_double(0x7ffd555522222222);
   double qn_proc = qn;
   double qm_proc = qm;
-  ASSERT(IsQuietNaN(sn_proc));
-  ASSERT(IsQuietNaN(sm_proc));
-  ASSERT(IsQuietNaN(qn_proc));
-  ASSERT(IsQuietNaN(qm_proc));
+  DCHECK(IsQuietNaN(sn_proc));
+  DCHECK(IsQuietNaN(sm_proc));
+  DCHECK(IsQuietNaN(qn_proc));
+  DCHECK(IsQuietNaN(qm_proc));
 
   // Quiet NaNs are propagated.
   ProcessNaNsHelper(qn, 0, qn_proc);
@@ -10438,8 +10585,8 @@
 
 
 static void ProcessNaNsHelper(float n, float m, float expected) {
-  ASSERT(std::isnan(n) || std::isnan(m));
-  ASSERT(isnan(expected));
+  DCHECK(std::isnan(n) || std::isnan(m));
+  DCHECK(std::isnan(expected));
 
   SETUP();
   START();
@@ -10459,12 +10606,12 @@
   END();
   RUN();
 
-  ASSERT_EQUAL_FP32(expected, s2);
-  ASSERT_EQUAL_FP32(expected, s3);
-  ASSERT_EQUAL_FP32(expected, s4);
-  ASSERT_EQUAL_FP32(expected, s5);
-  ASSERT_EQUAL_FP32(expected, s6);
-  ASSERT_EQUAL_FP32(expected, s7);
+  CHECK_EQUAL_FP32(expected, s2);
+  CHECK_EQUAL_FP32(expected, s3);
+  CHECK_EQUAL_FP32(expected, s4);
+  CHECK_EQUAL_FP32(expected, s5);
+  CHECK_EQUAL_FP32(expected, s6);
+  CHECK_EQUAL_FP32(expected, s7);
 
   TEARDOWN();
 }
@@ -10477,20 +10624,20 @@
   float sm = rawbits_to_float(0x7f952222);
   float qn = rawbits_to_float(0x7fea1111);
   float qm = rawbits_to_float(0x7fea2222);
-  ASSERT(IsSignallingNaN(sn));
-  ASSERT(IsSignallingNaN(sm));
-  ASSERT(IsQuietNaN(qn));
-  ASSERT(IsQuietNaN(qm));
+  DCHECK(IsSignallingNaN(sn));
+  DCHECK(IsSignallingNaN(sm));
+  DCHECK(IsQuietNaN(qn));
+  DCHECK(IsQuietNaN(qm));
 
   // The input NaNs after passing through ProcessNaN.
   float sn_proc = rawbits_to_float(0x7fd51111);
   float sm_proc = rawbits_to_float(0x7fd52222);
   float qn_proc = qn;
   float qm_proc = qm;
-  ASSERT(IsQuietNaN(sn_proc));
-  ASSERT(IsQuietNaN(sm_proc));
-  ASSERT(IsQuietNaN(qn_proc));
-  ASSERT(IsQuietNaN(qm_proc));
+  DCHECK(IsQuietNaN(sn_proc));
+  DCHECK(IsQuietNaN(sm_proc));
+  DCHECK(IsQuietNaN(qn_proc));
+  DCHECK(IsQuietNaN(qm_proc));
 
   // Quiet NaNs are propagated.
   ProcessNaNsHelper(qn, 0, qn_proc);
@@ -10510,7 +10657,7 @@
 
 
 static void DefaultNaNHelper(float n, float m, float a) {
-  ASSERT(std::isnan(n) || std::isnan(m) || isnan(a));
+  DCHECK(std::isnan(n) || std::isnan(m) || std::isnan(a));
 
   bool test_1op = std::isnan(n);
   bool test_2op = std::isnan(n) || std::isnan(m);
@@ -10567,29 +10714,29 @@
 
   if (test_1op) {
     uint32_t n_raw = float_to_rawbits(n);
-    ASSERT_EQUAL_FP32(n, s10);
-    ASSERT_EQUAL_FP32(rawbits_to_float(n_raw & ~kSSignMask), s11);
-    ASSERT_EQUAL_FP32(rawbits_to_float(n_raw ^ kSSignMask), s12);
-    ASSERT_EQUAL_FP32(kFP32DefaultNaN, s13);
-    ASSERT_EQUAL_FP32(kFP32DefaultNaN, s14);
-    ASSERT_EQUAL_FP32(kFP32DefaultNaN, s15);
-    ASSERT_EQUAL_FP32(kFP32DefaultNaN, s16);
-    ASSERT_EQUAL_FP64(kFP64DefaultNaN, d17);
+    CHECK_EQUAL_FP32(n, s10);
+    CHECK_EQUAL_FP32(rawbits_to_float(n_raw & ~kSSignMask), s11);
+    CHECK_EQUAL_FP32(rawbits_to_float(n_raw ^ kSSignMask), s12);
+    CHECK_EQUAL_FP32(kFP32DefaultNaN, s13);
+    CHECK_EQUAL_FP32(kFP32DefaultNaN, s14);
+    CHECK_EQUAL_FP32(kFP32DefaultNaN, s15);
+    CHECK_EQUAL_FP32(kFP32DefaultNaN, s16);
+    CHECK_EQUAL_FP64(kFP64DefaultNaN, d17);
   }
 
   if (test_2op) {
-    ASSERT_EQUAL_FP32(kFP32DefaultNaN, s18);
-    ASSERT_EQUAL_FP32(kFP32DefaultNaN, s19);
-    ASSERT_EQUAL_FP32(kFP32DefaultNaN, s20);
-    ASSERT_EQUAL_FP32(kFP32DefaultNaN, s21);
-    ASSERT_EQUAL_FP32(kFP32DefaultNaN, s22);
-    ASSERT_EQUAL_FP32(kFP32DefaultNaN, s23);
+    CHECK_EQUAL_FP32(kFP32DefaultNaN, s18);
+    CHECK_EQUAL_FP32(kFP32DefaultNaN, s19);
+    CHECK_EQUAL_FP32(kFP32DefaultNaN, s20);
+    CHECK_EQUAL_FP32(kFP32DefaultNaN, s21);
+    CHECK_EQUAL_FP32(kFP32DefaultNaN, s22);
+    CHECK_EQUAL_FP32(kFP32DefaultNaN, s23);
   }
 
-  ASSERT_EQUAL_FP32(kFP32DefaultNaN, s24);
-  ASSERT_EQUAL_FP32(kFP32DefaultNaN, s25);
-  ASSERT_EQUAL_FP32(kFP32DefaultNaN, s26);
-  ASSERT_EQUAL_FP32(kFP32DefaultNaN, s27);
+  CHECK_EQUAL_FP32(kFP32DefaultNaN, s24);
+  CHECK_EQUAL_FP32(kFP32DefaultNaN, s25);
+  CHECK_EQUAL_FP32(kFP32DefaultNaN, s26);
+  CHECK_EQUAL_FP32(kFP32DefaultNaN, s27);
 
   TEARDOWN();
 }
@@ -10603,12 +10750,12 @@
   float qn = rawbits_to_float(0x7fea1111);
   float qm = rawbits_to_float(0x7fea2222);
   float qa = rawbits_to_float(0x7feaaaaa);
-  ASSERT(IsSignallingNaN(sn));
-  ASSERT(IsSignallingNaN(sm));
-  ASSERT(IsSignallingNaN(sa));
-  ASSERT(IsQuietNaN(qn));
-  ASSERT(IsQuietNaN(qm));
-  ASSERT(IsQuietNaN(qa));
+  DCHECK(IsSignallingNaN(sn));
+  DCHECK(IsSignallingNaN(sm));
+  DCHECK(IsSignallingNaN(sa));
+  DCHECK(IsQuietNaN(qn));
+  DCHECK(IsQuietNaN(qm));
+  DCHECK(IsQuietNaN(qa));
 
   //   - Signalling NaNs
   DefaultNaNHelper(sn, 0.0f, 0.0f);
@@ -10638,7 +10785,7 @@
 
 
 static void DefaultNaNHelper(double n, double m, double a) {
-  ASSERT(std::isnan(n) || std::isnan(m) || isnan(a));
+  DCHECK(std::isnan(n) || std::isnan(m) || std::isnan(a));
 
   bool test_1op = std::isnan(n);
   bool test_2op = std::isnan(n) || std::isnan(m);
@@ -10695,29 +10842,29 @@
 
   if (test_1op) {
     uint64_t n_raw = double_to_rawbits(n);
-    ASSERT_EQUAL_FP64(n, d10);
-    ASSERT_EQUAL_FP64(rawbits_to_double(n_raw & ~kDSignMask), d11);
-    ASSERT_EQUAL_FP64(rawbits_to_double(n_raw ^ kDSignMask), d12);
-    ASSERT_EQUAL_FP64(kFP64DefaultNaN, d13);
-    ASSERT_EQUAL_FP64(kFP64DefaultNaN, d14);
-    ASSERT_EQUAL_FP64(kFP64DefaultNaN, d15);
-    ASSERT_EQUAL_FP64(kFP64DefaultNaN, d16);
-    ASSERT_EQUAL_FP32(kFP32DefaultNaN, s17);
+    CHECK_EQUAL_FP64(n, d10);
+    CHECK_EQUAL_FP64(rawbits_to_double(n_raw & ~kDSignMask), d11);
+    CHECK_EQUAL_FP64(rawbits_to_double(n_raw ^ kDSignMask), d12);
+    CHECK_EQUAL_FP64(kFP64DefaultNaN, d13);
+    CHECK_EQUAL_FP64(kFP64DefaultNaN, d14);
+    CHECK_EQUAL_FP64(kFP64DefaultNaN, d15);
+    CHECK_EQUAL_FP64(kFP64DefaultNaN, d16);
+    CHECK_EQUAL_FP32(kFP32DefaultNaN, s17);
   }
 
   if (test_2op) {
-    ASSERT_EQUAL_FP64(kFP64DefaultNaN, d18);
-    ASSERT_EQUAL_FP64(kFP64DefaultNaN, d19);
-    ASSERT_EQUAL_FP64(kFP64DefaultNaN, d20);
-    ASSERT_EQUAL_FP64(kFP64DefaultNaN, d21);
-    ASSERT_EQUAL_FP64(kFP64DefaultNaN, d22);
-    ASSERT_EQUAL_FP64(kFP64DefaultNaN, d23);
+    CHECK_EQUAL_FP64(kFP64DefaultNaN, d18);
+    CHECK_EQUAL_FP64(kFP64DefaultNaN, d19);
+    CHECK_EQUAL_FP64(kFP64DefaultNaN, d20);
+    CHECK_EQUAL_FP64(kFP64DefaultNaN, d21);
+    CHECK_EQUAL_FP64(kFP64DefaultNaN, d22);
+    CHECK_EQUAL_FP64(kFP64DefaultNaN, d23);
   }
 
-  ASSERT_EQUAL_FP64(kFP64DefaultNaN, d24);
-  ASSERT_EQUAL_FP64(kFP64DefaultNaN, d25);
-  ASSERT_EQUAL_FP64(kFP64DefaultNaN, d26);
-  ASSERT_EQUAL_FP64(kFP64DefaultNaN, d27);
+  CHECK_EQUAL_FP64(kFP64DefaultNaN, d24);
+  CHECK_EQUAL_FP64(kFP64DefaultNaN, d25);
+  CHECK_EQUAL_FP64(kFP64DefaultNaN, d26);
+  CHECK_EQUAL_FP64(kFP64DefaultNaN, d27);
 
   TEARDOWN();
 }
@@ -10731,12 +10878,12 @@
   double qn = rawbits_to_double(0x7ffaaaaa11111111);
   double qm = rawbits_to_double(0x7ffaaaaa22222222);
   double qa = rawbits_to_double(0x7ffaaaaaaaaaaaaa);
-  ASSERT(IsSignallingNaN(sn));
-  ASSERT(IsSignallingNaN(sm));
-  ASSERT(IsSignallingNaN(sa));
-  ASSERT(IsQuietNaN(qn));
-  ASSERT(IsQuietNaN(qm));
-  ASSERT(IsQuietNaN(qa));
+  DCHECK(IsSignallingNaN(sn));
+  DCHECK(IsSignallingNaN(sm));
+  DCHECK(IsSignallingNaN(sa));
+  DCHECK(IsQuietNaN(qn));
+  DCHECK(IsQuietNaN(qm));
+  DCHECK(IsQuietNaN(qa));
 
   //   - Signalling NaNs
   DefaultNaNHelper(sn, 0.0, 0.0);
@@ -10797,7 +10944,7 @@
 
   RUN();
 
-  ASSERT_EQUAL_64(1, x0);
+  CHECK_EQUAL_64(1, x0);
 
   // The return_address_from_call_start function doesn't currently encounter any
   // non-relocatable sequences, so we check it here to make sure it works.
@@ -10854,12 +11001,12 @@
   END();
   RUN();
 
-  ASSERT_EQUAL_64(0, x0);
-  ASSERT_EQUAL_64(value, x1);
-  ASSERT_EQUAL_64(expected, x10);
-  ASSERT_EQUAL_64(expected, x11);
-  ASSERT_EQUAL_64(expected, x12);
-  ASSERT_EQUAL_64(expected, x13);
+  CHECK_EQUAL_64(0, x0);
+  CHECK_EQUAL_64(value, x1);
+  CHECK_EQUAL_64(expected, x10);
+  CHECK_EQUAL_64(expected, x11);
+  CHECK_EQUAL_64(expected, x12);
+  CHECK_EQUAL_64(expected, x13);
 
   TEARDOWN();
 }
@@ -10911,12 +11058,12 @@
   END();
   RUN();
 
-  ASSERT_EQUAL_32(0, w0);
-  ASSERT_EQUAL_32(value, w1);
-  ASSERT_EQUAL_32(expected, w10);
-  ASSERT_EQUAL_32(expected, w11);
-  ASSERT_EQUAL_32(expected, w12);
-  ASSERT_EQUAL_32(expected, w13);
+  CHECK_EQUAL_32(0, w0);
+  CHECK_EQUAL_32(value, w1);
+  CHECK_EQUAL_32(expected, w10);
+  CHECK_EQUAL_32(expected, w11);
+  CHECK_EQUAL_32(expected, w12);
+  CHECK_EQUAL_32(expected, w13);
 
   TEARDOWN();
 }
@@ -10974,16 +11121,16 @@
   for (RelocIterator it(*code, pool_mask); !it.done(); it.next()) {
     RelocInfo* info = it.rinfo();
     if (RelocInfo::IsConstPool(info->rmode())) {
-      ASSERT(info->data() == constant_pool_size);
+      DCHECK(info->data() == constant_pool_size);
       ++pool_count;
     }
     if (RelocInfo::IsVeneerPool(info->rmode())) {
-      ASSERT(info->data() == veneer_pool_size);
+      DCHECK(info->data() == veneer_pool_size);
       ++pool_count;
     }
   }
 
-  ASSERT(pool_count == 2);
+  DCHECK(pool_count == 2);
 
   TEARDOWN();
 }
diff --git a/test/cctest/test-assembler-ia32.cc b/test/cctest/test-assembler-ia32.cc
index fcbb14b..e8c7f95 100644
--- a/test/cctest/test-assembler-ia32.cc
+++ b/test/cctest/test-assembler-ia32.cc
@@ -29,10 +29,11 @@
 
 #include "src/v8.h"
 
+#include "src/base/platform/platform.h"
 #include "src/disassembler.h"
 #include "src/factory.h"
 #include "src/macro-assembler.h"
-#include "src/platform.h"
+#include "src/ostreams.h"
 #include "src/serialize.h"
 #include "test/cctest/cctest.h"
 
@@ -63,7 +64,8 @@
   Handle<Code> code = isolate->factory()->NewCode(
       desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
 #ifdef OBJECT_PRINT
-  code->Print();
+  OFStream os(stdout);
+  code->Print(os);
 #endif
   F2 f = FUNCTION_CAST<F2>(code->entry());
   int res = f(3, 4);
@@ -99,7 +101,8 @@
   Handle<Code> code = isolate->factory()->NewCode(
       desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
 #ifdef OBJECT_PRINT
-  code->Print();
+  OFStream os(stdout);
+  code->Print(os);
 #endif
   F1 f = FUNCTION_CAST<F1>(code->entry());
   int res = f(100);
@@ -139,7 +142,8 @@
   Handle<Code> code = isolate->factory()->NewCode(
       desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
 #ifdef OBJECT_PRINT
-  code->Print();
+  OFStream os(stdout);
+  code->Print(os);
 #endif
   F1 f = FUNCTION_CAST<F1>(code->entry());
   int res = f(10);
@@ -294,7 +298,8 @@
   Handle<Code> code = isolate->factory()->NewCode(
       desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
 #ifdef OBJECT_PRINT
-  code->Print();
+  OFStream os(stdout);
+  code->Print(os);
 #endif
   F6 f = FUNCTION_CAST<F6>(code->entry());
   double res = f(12);
@@ -347,14 +352,15 @@
   Handle<Code> code = isolate->factory()->NewCode(
       desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
 #ifdef OBJECT_PRINT
-  code->Print();
+  OFStream os(stdout);
+  code->Print(os);
 #endif
 
   F7 f = FUNCTION_CAST<F7>(code->entry());
   CHECK_EQ(kLess, f(1.1, 2.2));
   CHECK_EQ(kEqual, f(2.2, 2.2));
   CHECK_EQ(kGreater, f(3.3, 2.2));
-  CHECK_EQ(kNaN, f(OS::nan_value(), 1.1));
+  CHECK_EQ(kNaN, f(v8::base::OS::nan_value(), 1.1));
 }
 
 
@@ -486,7 +492,7 @@
 
 TEST(StackAlignmentForSSE2) {
   CcTest::InitializeVM();
-  CHECK_EQ(0, OS::ActivationFrameAlignment() % 16);
+  CHECK_EQ(0, v8::base::OS::ActivationFrameAlignment() % 16);
 
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope handle_scope(isolate);
@@ -541,7 +547,8 @@
   Handle<Code> code = isolate->factory()->NewCode(
       desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
 #ifdef OBJECT_PRINT
-  code->Print();
+  OFStream os(stdout);
+  code->Print(os);
 #endif
 
   F4 f = FUNCTION_CAST<F4>(code->entry());
@@ -579,7 +586,8 @@
   Handle<Code> code = isolate->factory()->NewCode(
       desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
 #ifdef OBJECT_PRINT
-  code->Print();
+  OFStream os(stdout);
+  code->Print(os);
 #endif
 
   F8 f = FUNCTION_CAST<F8>(code->entry());
diff --git a/test/cctest/test-assembler-mips.cc b/test/cctest/test-assembler-mips.cc
index cd1d5d6..74dcc3a 100644
--- a/test/cctest/test-assembler-mips.cc
+++ b/test/cctest/test-assembler-mips.cc
@@ -170,7 +170,7 @@
   __ Branch(&error, ne, v0, Operand(0x1));
   __ nop();
   __ sltu(v0, t7, t3);
-  __ Branch(&error, ne, v0, Operand(0x0));
+  __ Branch(&error, ne, v0, Operand(zero_reg));
   __ nop();
   // End of SPECIAL class.
 
@@ -185,7 +185,7 @@
 
   __ slti(v0, t1, 0x00002000);  // 0x1
   __ slti(v0, v0, 0xffff8000);  // 0x0
-  __ Branch(&error, ne, v0, Operand(0x0));
+  __ Branch(&error, ne, v0, Operand(zero_reg));
   __ nop();
   __ sltiu(v0, t1, 0x00002000);  // 0x1
   __ sltiu(v0, v0, 0x00008000);  // 0x1
@@ -293,7 +293,7 @@
   __ sdc1(f14, MemOperand(a0, OFFSET_OF(T, g)) );
   // g = sqrt(f) = 10.97451593465515908537
 
-  if (kArchVariant == kMips32r2) {
+  if (IsMipsArchVariant(kMips32r2)) {
     __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, h)) );
     __ ldc1(f6, MemOperand(a0, OFFSET_OF(T, i)) );
     __ madd_d(f14, f6, f4, f6);
@@ -325,7 +325,7 @@
   CHECK_EQ(1.8066e16, t.e);
   CHECK_EQ(120.44, t.f);
   CHECK_EQ(10.97451593465515908537, t.g);
-  if (kArchVariant == kMips32r2) {
+  if (IsMipsArchVariant(kMips32r2)) {
     CHECK_EQ(6.875, t.h);
   }
 }
@@ -351,16 +351,28 @@
   __ ldc1(f6, MemOperand(a0, OFFSET_OF(T, b)) );
 
   // Swap f4 and f6, by using four integer registers, t0-t3.
-  __ mfc1(t0, f4);
-  __ mfc1(t1, f5);
-  __ mfc1(t2, f6);
-  __ mfc1(t3, f7);
+  if (!IsFp64Mode()) {
+    __ mfc1(t0, f4);
+    __ mfc1(t1, f5);
+    __ mfc1(t2, f6);
+    __ mfc1(t3, f7);
 
-  __ mtc1(t0, f6);
-  __ mtc1(t1, f7);
-  __ mtc1(t2, f4);
-  __ mtc1(t3, f5);
+    __ mtc1(t0, f6);
+    __ mtc1(t1, f7);
+    __ mtc1(t2, f4);
+    __ mtc1(t3, f5);
+  } else {
+    DCHECK(!IsMipsArchVariant(kMips32r1) && !IsMipsArchVariant(kLoongson));
+    __ mfc1(t0, f4);
+    __ mfhc1(t1, f4);
+    __ mfc1(t2, f6);
+    __ mfhc1(t3, f6);
 
+    __ mtc1(t0, f6);
+    __ mthc1(t1, f6);
+    __ mtc1(t2, f4);
+    __ mthc1(t3, f4);
+  }
   // Store the swapped f4 and f5 back to memory.
   __ sdc1(f4, MemOperand(a0, OFFSET_OF(T, a)) );
   __ sdc1(f6, MemOperand(a0, OFFSET_OF(T, c)) );
@@ -554,21 +566,30 @@
 
   __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, a)) );
   __ ldc1(f6, MemOperand(a0, OFFSET_OF(T, b)) );
+  if (!IsMipsArchVariant(kMips32r6)) {
   __ c(UN, D, f4, f6);
   __ bc1f(&neither_is_nan);
+  } else {
+    __ cmp(UN, L, f2, f4, f6);
+    __ bc1eqz(&neither_is_nan, f2);
+  }
   __ nop();
   __ sw(zero_reg, MemOperand(a0, OFFSET_OF(T, result)) );
   __ Branch(&outa_here);
 
   __ bind(&neither_is_nan);
 
-  if (kArchVariant == kLoongson) {
+  if (IsMipsArchVariant(kLoongson)) {
     __ c(OLT, D, f6, f4);
     __ bc1t(&less_than);
+  } else if (IsMipsArchVariant(kMips32r6)) {
+    __ cmp(OLT, L, f2, f6, f4);
+    __ bc1nez(&less_than, f2);
   } else {
     __ c(OLT, D, f6, f4, 2);
     __ bc1t(&less_than, 2);
   }
+
   __ nop();
   __ sw(zero_reg, MemOperand(a0, OFFSET_OF(T, result)) );
   __ Branch(&outa_here);
@@ -716,7 +737,7 @@
   MacroAssembler assm(isolate, NULL, 0);
   Label exit, exit2, exit3;
 
-  __ Branch(&exit, ge, a0, Operand(0x00000000));
+  __ Branch(&exit, ge, a0, Operand(zero_reg));
   __ Branch(&exit2, ge, a0, Operand(0x00001FFF));
   __ Branch(&exit3, ge, a0, Operand(0x0001FFFF));
 
@@ -753,50 +774,52 @@
   Assembler assm(isolate, NULL, 0);
   Label L, C;
 
-  if (kArchVariant == kMips32r2) {
-    // Load all structure elements to registers.
-    __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, a)));
+  if (!IsMipsArchVariant(kMips32r2)) return;
 
-    // Save the raw bits of the double.
-    __ mfc1(t0, f0);
-    __ mfc1(t1, f1);
-    __ sw(t0, MemOperand(a0, OFFSET_OF(T, dbl_mant)));
-    __ sw(t1, MemOperand(a0, OFFSET_OF(T, dbl_exp)));
+  // Load all structure elements to registers.
+  __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, a)));
 
-    // Convert double in f0 to long, save hi/lo parts.
-    __ cvt_w_d(f0, f0);
-    __ mfc1(t0, f0);  // f0 has a 32-bits word.
-    __ sw(t0, MemOperand(a0, OFFSET_OF(T, word)));
+  // Save the raw bits of the double.
+  __ mfc1(t0, f0);
+  __ mfc1(t1, f1);
+  __ sw(t0, MemOperand(a0, OFFSET_OF(T, dbl_mant)));
+  __ sw(t1, MemOperand(a0, OFFSET_OF(T, dbl_exp)));
 
-    // Convert the b long integers to double b.
-    __ lw(t0, MemOperand(a0, OFFSET_OF(T, b_word)));
-    __ mtc1(t0, f8);  // f8 has a 32-bits word.
-    __ cvt_d_w(f10, f8);
-    __ sdc1(f10, MemOperand(a0, OFFSET_OF(T, b)));
+  // Convert double in f0 to long, save hi/lo parts.
+  __ cvt_w_d(f0, f0);
+  __ mfc1(t0, f0);  // f0 has a 32-bits word.
+  __ sw(t0, MemOperand(a0, OFFSET_OF(T, word)));
 
-    __ jr(ra);
-    __ nop();
+  // Convert the b long integers to double b.
+  __ lw(t0, MemOperand(a0, OFFSET_OF(T, b_word)));
+  __ mtc1(t0, f8);  // f8 has a 32-bits word.
+  __ cvt_d_w(f10, f8);
+  __ sdc1(f10, MemOperand(a0, OFFSET_OF(T, b)));
 
-    CodeDesc desc;
-    assm.GetCode(&desc);
-    Handle<Code> code = isolate->factory()->NewCode(
-        desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
-    F3 f = FUNCTION_CAST<F3>(code->entry());
-    t.a = 2.147483646e+09;       // 0x7FFFFFFE -> 0xFF80000041DFFFFF as double.
-    t.b_word = 0x0ff00ff0;       // 0x0FF00FF0 -> 0x as double.
-    Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
-    USE(dummy);
+  __ jr(ra);
+  __ nop();
 
-    CHECK_EQ(0x41DFFFFF, t.dbl_exp);
-    CHECK_EQ(0xFF800000, t.dbl_mant);
-    CHECK_EQ(0X7FFFFFFE, t.word);
-    // 0x0FF00FF0 -> 2.6739096+e08
-    CHECK_EQ(2.6739096e08, t.b);
-  }
+  CodeDesc desc;
+  assm.GetCode(&desc);
+  Handle<Code> code = isolate->factory()->NewCode(
+      desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+  F3 f = FUNCTION_CAST<F3>(code->entry());
+  t.a = 2.147483646e+09;       // 0x7FFFFFFE -> 0xFF80000041DFFFFF as double.
+  t.b_word = 0x0ff00ff0;       // 0x0FF00FF0 -> 0x as double.
+  Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+  USE(dummy);
+
+  CHECK_EQ(0x41DFFFFF, t.dbl_exp);
+  CHECK_EQ(0xFF800000, t.dbl_mant);
+  CHECK_EQ(0X7FFFFFFE, t.word);
+  // 0x0FF00FF0 -> 2.6739096+e08
+  CHECK_EQ(2.6739096e08, t.b);
 }
 
 
 TEST(MIPS11) {
+  // Do not run test on MIPS32r6, as these instructions are removed.
+  if (IsMipsArchVariant(kMips32r6)) return;
   // Test LWL, LWR, SWL and SWR instructions.
   CcTest::InitializeVM();
   Isolate* isolate = CcTest::i_isolate();
diff --git a/test/cctest/test-assembler-mips64.cc b/test/cctest/test-assembler-mips64.cc
new file mode 100644
index 0000000..1ec9a65
--- /dev/null
+++ b/test/cctest/test-assembler-mips64.cc
@@ -0,0 +1,1389 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include "src/v8.h"
+
+#include "src/disassembler.h"
+#include "src/factory.h"
+#include "src/macro-assembler.h"
+#include "src/mips64/macro-assembler-mips64.h"
+#include "src/mips64/simulator-mips64.h"
+
+#include "test/cctest/cctest.h"
+
+using namespace v8::internal;
+
+
+// Define these function prototypes to match JSEntryFunction in execution.cc.
+typedef Object* (*F1)(int x, int p1, int p2, int p3, int p4);
+typedef Object* (*F2)(int x, int y, int p2, int p3, int p4);
+typedef Object* (*F3)(void* p, int p1, int p2, int p3, int p4);
+
+
+#define __ assm.
+
+
+TEST(MIPS0) {
+  CcTest::InitializeVM();
+  Isolate* isolate = CcTest::i_isolate();
+  HandleScope scope(isolate);
+
+  MacroAssembler assm(isolate, NULL, 0);
+
+  // Addition.
+  __ addu(v0, a0, a1);
+  __ jr(ra);
+  __ nop();
+
+  CodeDesc desc;
+  assm.GetCode(&desc);
+  Handle<Code> code = isolate->factory()->NewCode(
+      desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+  F2 f = FUNCTION_CAST<F2>(code->entry());
+  int64_t res =
+      reinterpret_cast<int64_t>(CALL_GENERATED_CODE(f, 0xab0, 0xc, 0, 0, 0));
+  ::printf("f() = %ld\n", res);
+  CHECK_EQ(0xabcL, res);
+}
+
+
+TEST(MIPS1) {
+  CcTest::InitializeVM();
+  Isolate* isolate = CcTest::i_isolate();
+  HandleScope scope(isolate);
+
+  MacroAssembler assm(isolate, NULL, 0);
+  Label L, C;
+
+  __ mov(a1, a0);
+  __ li(v0, 0);
+  __ b(&C);
+  __ nop();
+
+  __ bind(&L);
+  __ addu(v0, v0, a1);
+  __ addiu(a1, a1, -1);
+
+  __ bind(&C);
+  __ xori(v1, a1, 0);
+  __ Branch(&L, ne, v1, Operand((int64_t)0));
+  __ nop();
+
+  __ jr(ra);
+  __ nop();
+
+  CodeDesc desc;
+  assm.GetCode(&desc);
+  Handle<Code> code = isolate->factory()->NewCode(
+      desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+  F1 f = FUNCTION_CAST<F1>(code->entry());
+  int64_t res =
+     reinterpret_cast<int64_t>(CALL_GENERATED_CODE(f, 50, 0, 0, 0, 0));
+  ::printf("f() = %ld\n", res);
+  CHECK_EQ(1275L, res);
+}
+
+
+TEST(MIPS2) {
+  CcTest::InitializeVM();
+  Isolate* isolate = CcTest::i_isolate();
+  HandleScope scope(isolate);
+
+  MacroAssembler assm(isolate, NULL, 0);
+
+  Label exit, error;
+
+  // ----- Test all instructions.
+
+  // Test lui, ori, and addiu, used in the li pseudo-instruction.
+  // This way we can then safely load registers with chosen values.
+
+  __ ori(a4, zero_reg, 0);
+  __ lui(a4, 0x1234);
+  __ ori(a4, a4, 0);
+  __ ori(a4, a4, 0x0f0f);
+  __ ori(a4, a4, 0xf0f0);
+  __ addiu(a5, a4, 1);
+  __ addiu(a6, a5, -0x10);
+
+  // Load values in temporary registers.
+  __ li(a4, 0x00000004);
+  __ li(a5, 0x00001234);
+  __ li(a6, 0x12345678);
+  __ li(a7, 0x7fffffff);
+  __ li(t0, 0xfffffffc);
+  __ li(t1, 0xffffedcc);
+  __ li(t2, 0xedcba988);
+  __ li(t3, 0x80000000);
+
+  // SPECIAL class.
+  __ srl(v0, a6, 8);    // 0x00123456
+  __ sll(v0, v0, 11);   // 0x91a2b000
+  __ sra(v0, v0, 3);    // 0xf2345600
+  __ srav(v0, v0, a4);  // 0xff234560
+  __ sllv(v0, v0, a4);  // 0xf2345600
+  __ srlv(v0, v0, a4);  // 0x0f234560
+  __ Branch(&error, ne, v0, Operand(0x0f234560));
+  __ nop();
+
+  __ addu(v0, a4, a5);  // 0x00001238
+  __ subu(v0, v0, a4);  // 0x00001234
+  __ Branch(&error, ne, v0, Operand(0x00001234));
+  __ nop();
+  __ addu(v1, a7, a4);  // 32bit addu result is sign-extended into 64bit reg.
+  __ Branch(&error, ne, v1, Operand(0xffffffff80000003));
+  __ nop();
+  __ subu(v1, t3, a4);  // 0x7ffffffc
+  __ Branch(&error, ne, v1, Operand(0x7ffffffc));
+  __ nop();
+
+  __ and_(v0, a5, a6);  // 0x0000000000001230
+  __ or_(v0, v0, a5);   // 0x0000000000001234
+  __ xor_(v0, v0, a6);  // 0x000000001234444c
+  __ nor(v0, v0, a6);   // 0xffffffffedcba987
+  __ Branch(&error, ne, v0, Operand(0xffffffffedcba983));
+  __ nop();
+
+  // Shift both 32bit number to left, to preserve meaning of next comparison.
+  __ dsll32(a7, a7, 0);
+  __ dsll32(t3, t3, 0);
+
+  __ slt(v0, t3, a7);
+  __ Branch(&error, ne, v0, Operand(0x1));
+  __ nop();
+  __ sltu(v0, t3, a7);
+  __ Branch(&error, ne, v0, Operand(zero_reg));
+  __ nop();
+
+  // Restore original values in registers.
+  __ dsrl32(a7, a7, 0);
+  __ dsrl32(t3, t3, 0);
+  // End of SPECIAL class.
+
+  __ addiu(v0, zero_reg, 0x7421);  // 0x00007421
+  __ addiu(v0, v0, -0x1);          // 0x00007420
+  __ addiu(v0, v0, -0x20);         // 0x00007400
+  __ Branch(&error, ne, v0, Operand(0x00007400));
+  __ nop();
+  __ addiu(v1, a7, 0x1);  // 0x80000000 - result is sign-extended.
+  __ Branch(&error, ne, v1, Operand(0xffffffff80000000));
+  __ nop();
+
+  __ slti(v0, a5, 0x00002000);  // 0x1
+  __ slti(v0, v0, 0xffff8000);  // 0x0
+  __ Branch(&error, ne, v0, Operand(zero_reg));
+  __ nop();
+  __ sltiu(v0, a5, 0x00002000);  // 0x1
+  __ sltiu(v0, v0, 0x00008000);  // 0x1
+  __ Branch(&error, ne, v0, Operand(0x1));
+  __ nop();
+
+  __ andi(v0, a5, 0xf0f0);  // 0x00001030
+  __ ori(v0, v0, 0x8a00);   // 0x00009a30
+  __ xori(v0, v0, 0x83cc);  // 0x000019fc
+  __ Branch(&error, ne, v0, Operand(0x000019fc));
+  __ nop();
+  __ lui(v1, 0x8123);  // Result is sign-extended into 64bit register.
+  __ Branch(&error, ne, v1, Operand(0xffffffff81230000));
+  __ nop();
+
+  // Bit twiddling instructions & conditional moves.
+  // Uses a4-t3 as set above.
+  __ Clz(v0, a4);       // 29
+  __ Clz(v1, a5);       // 19
+  __ addu(v0, v0, v1);  // 48
+  __ Clz(v1, a6);       // 3
+  __ addu(v0, v0, v1);  // 51
+  __ Clz(v1, t3);       // 0
+  __ addu(v0, v0, v1);  // 51
+  __ Branch(&error, ne, v0, Operand(51));
+  __ Movn(a0, a7, a4);  // Move a0<-a7 (a4 is NOT 0).
+  __ Ins(a0, a5, 12, 8);  // 0x7ff34fff
+  __ Branch(&error, ne, a0, Operand(0x7ff34fff));
+  __ Movz(a0, t2, t3);    // a0 not updated (t3 is NOT 0).
+  __ Ext(a1, a0, 8, 12);  // 0x34f
+  __ Branch(&error, ne, a1, Operand(0x34f));
+  __ Movz(a0, t2, v1);    // a0<-t2, v0 is 0, from 8 instr back.
+  __ Branch(&error, ne, a0, Operand(t2));
+
+  // Everything was correctly executed. Load the expected result.
+  __ li(v0, 0x31415926);
+  __ b(&exit);
+  __ nop();
+
+  __ bind(&error);
+  // Got an error. Return a wrong result.
+  __ li(v0, 666);
+
+  __ bind(&exit);
+  __ jr(ra);
+  __ nop();
+
+  CodeDesc desc;
+  assm.GetCode(&desc);
+  Handle<Code> code = isolate->factory()->NewCode(
+      desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+  F2 f = FUNCTION_CAST<F2>(code->entry());
+  int64_t res =
+      reinterpret_cast<int64_t>(CALL_GENERATED_CODE(f, 0xab0, 0xc, 0, 0, 0));
+  ::printf("f() = %ld\n", res);
+
+  CHECK_EQ(0x31415926L, res);
+}
+
+
+TEST(MIPS3) {
+  // Test floating point instructions.
+  CcTest::InitializeVM();
+  Isolate* isolate = CcTest::i_isolate();
+  HandleScope scope(isolate);
+
+  typedef struct {
+    double a;
+    double b;
+    double c;
+    double d;
+    double e;
+    double f;
+    double g;
+    double h;
+    double i;
+  } T;
+  T t;
+
+  // Create a function that accepts &t, and loads, manipulates, and stores
+  // the doubles t.a ... t.f.
+  MacroAssembler assm(isolate, NULL, 0);
+  Label L, C;
+
+  __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, a)) );
+  __ ldc1(f6, MemOperand(a0, OFFSET_OF(T, b)) );
+  __ add_d(f8, f4, f6);
+  __ sdc1(f8, MemOperand(a0, OFFSET_OF(T, c)) );  // c = a + b.
+
+  __ mov_d(f10, f8);  // c
+  __ neg_d(f12, f6);  // -b
+  __ sub_d(f10, f10, f12);
+  __ sdc1(f10, MemOperand(a0, OFFSET_OF(T, d)) );  // d = c - (-b).
+
+  __ sdc1(f4, MemOperand(a0, OFFSET_OF(T, b)) );   // b = a.
+
+  __ li(a4, 120);
+  __ mtc1(a4, f14);
+  __ cvt_d_w(f14, f14);   // f14 = 120.0.
+  __ mul_d(f10, f10, f14);
+  __ sdc1(f10, MemOperand(a0, OFFSET_OF(T, e)) );  // e = d * 120 = 1.8066e16.
+
+  __ div_d(f12, f10, f4);
+  __ sdc1(f12, MemOperand(a0, OFFSET_OF(T, f)) );  // f = e / a = 120.44.
+
+  __ sqrt_d(f14, f12);
+  __ sdc1(f14, MemOperand(a0, OFFSET_OF(T, g)) );
+  // g = sqrt(f) = 10.97451593465515908537
+
+  if (kArchVariant == kMips64r2) {
+    __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, h)) );
+    __ ldc1(f6, MemOperand(a0, OFFSET_OF(T, i)) );
+    __ madd_d(f14, f6, f4, f6);
+    __ sdc1(f14, MemOperand(a0, OFFSET_OF(T, h)) );
+  }
+
+  __ jr(ra);
+  __ nop();
+
+  CodeDesc desc;
+  assm.GetCode(&desc);
+  Handle<Code> code = isolate->factory()->NewCode(
+      desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+  F3 f = FUNCTION_CAST<F3>(code->entry());
+  t.a = 1.5e14;
+  t.b = 2.75e11;
+  t.c = 0.0;
+  t.d = 0.0;
+  t.e = 0.0;
+  t.f = 0.0;
+  t.h = 1.5;
+  t.i = 2.75;
+  Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+  USE(dummy);
+  CHECK_EQ(1.5e14, t.a);
+  CHECK_EQ(1.5e14, t.b);
+  CHECK_EQ(1.50275e14, t.c);
+  CHECK_EQ(1.50550e14, t.d);
+  CHECK_EQ(1.8066e16, t.e);
+  CHECK_EQ(120.44, t.f);
+  CHECK_EQ(10.97451593465515908537, t.g);
+  if (kArchVariant == kMips64r2) {
+    CHECK_EQ(6.875, t.h);
+  }
+}
+
+
+TEST(MIPS4) {
+  // Test moves between floating point and integer registers.
+  CcTest::InitializeVM();
+  Isolate* isolate = CcTest::i_isolate();
+  HandleScope scope(isolate);
+
+  typedef struct {
+    double a;
+    double b;
+    double c;
+    double d;
+    int64_t high;
+    int64_t low;
+  } T;
+  T t;
+
+  Assembler assm(isolate, NULL, 0);
+  Label L, C;
+
+  __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, a)));
+  __ ldc1(f5, MemOperand(a0, OFFSET_OF(T, b)));
+
+  // Swap f4 and f5, by using 3 integer registers, a4-a6,
+  // both two 32-bit chunks, and one 64-bit chunk.
+  // mXhc1 is mips32/64-r2 only, not r1,
+  // but we will not support r1 in practice.
+  __ mfc1(a4, f4);
+  __ mfhc1(a5, f4);
+  __ dmfc1(a6, f5);
+
+  __ mtc1(a4, f5);
+  __ mthc1(a5, f5);
+  __ dmtc1(a6, f4);
+
+  // Store the swapped f4 and f5 back to memory.
+  __ sdc1(f4, MemOperand(a0, OFFSET_OF(T, a)));
+  __ sdc1(f5, MemOperand(a0, OFFSET_OF(T, c)));
+
+  // Test sign extension of move operations from coprocessor.
+  __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, d)));
+  __ mfhc1(a4, f4);
+  __ mfc1(a5, f4);
+
+  __ sd(a4, MemOperand(a0, OFFSET_OF(T, high)));
+  __ sd(a5, MemOperand(a0, OFFSET_OF(T, low)));
+
+  __ jr(ra);
+  __ nop();
+
+  CodeDesc desc;
+  assm.GetCode(&desc);
+  Handle<Code> code = isolate->factory()->NewCode(
+      desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+  F3 f = FUNCTION_CAST<F3>(code->entry());
+  t.a = 1.5e22;
+  t.b = 2.75e11;
+  t.c = 17.17;
+  t.d = -2.75e11;
+  Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+  USE(dummy);
+
+  CHECK_EQ(2.75e11, t.a);
+  CHECK_EQ(2.75e11, t.b);
+  CHECK_EQ(1.5e22, t.c);
+  CHECK_EQ(0xffffffffc25001d1L, t.high);
+  CHECK_EQ(0xffffffffbf800000L, t.low);
+}
+
+
+TEST(MIPS5) {
+  // Test conversions between doubles and integers.
+  CcTest::InitializeVM();
+  Isolate* isolate = CcTest::i_isolate();
+  HandleScope scope(isolate);
+
+  typedef struct {
+    double a;
+    double b;
+    int i;
+    int j;
+  } T;
+  T t;
+
+  Assembler assm(isolate, NULL, 0);
+  Label L, C;
+
+  // Load all structure elements to registers.
+  __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, a)) );
+  __ ldc1(f6, MemOperand(a0, OFFSET_OF(T, b)) );
+  __ lw(a4, MemOperand(a0, OFFSET_OF(T, i)) );
+  __ lw(a5, MemOperand(a0, OFFSET_OF(T, j)) );
+
+  // Convert double in f4 to int in element i.
+  __ cvt_w_d(f8, f4);
+  __ mfc1(a6, f8);
+  __ sw(a6, MemOperand(a0, OFFSET_OF(T, i)) );
+
+  // Convert double in f6 to int in element j.
+  __ cvt_w_d(f10, f6);
+  __ mfc1(a7, f10);
+  __ sw(a7, MemOperand(a0, OFFSET_OF(T, j)) );
+
+  // Convert int in original i (a4) to double in a.
+  __ mtc1(a4, f12);
+  __ cvt_d_w(f0, f12);
+  __ sdc1(f0, MemOperand(a0, OFFSET_OF(T, a)) );
+
+  // Convert int in original j (a5) to double in b.
+  __ mtc1(a5, f14);
+  __ cvt_d_w(f2, f14);
+  __ sdc1(f2, MemOperand(a0, OFFSET_OF(T, b)) );
+
+  __ jr(ra);
+  __ nop();
+
+  CodeDesc desc;
+  assm.GetCode(&desc);
+  Handle<Code> code = isolate->factory()->NewCode(
+      desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+  F3 f = FUNCTION_CAST<F3>(code->entry());
+  t.a = 1.5e4;
+  t.b = 2.75e8;
+  t.i = 12345678;
+  t.j = -100000;
+  Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+  USE(dummy);
+
+  CHECK_EQ(12345678.0, t.a);
+  CHECK_EQ(-100000.0, t.b);
+  CHECK_EQ(15000, t.i);
+  CHECK_EQ(275000000, t.j);
+}
+
+
+TEST(MIPS6) {
+  // Test simple memory loads and stores.
+  CcTest::InitializeVM();
+  Isolate* isolate = CcTest::i_isolate();
+  HandleScope scope(isolate);
+
+  typedef struct {
+    uint32_t ui;
+    int32_t si;
+    int32_t r1;
+    int32_t r2;
+    int32_t r3;
+    int32_t r4;
+    int32_t r5;
+    int32_t r6;
+  } T;
+  T t;
+
+  Assembler assm(isolate, NULL, 0);
+  Label L, C;
+
+  // Basic word load/store.
+  __ lw(a4, MemOperand(a0, OFFSET_OF(T, ui)) );
+  __ sw(a4, MemOperand(a0, OFFSET_OF(T, r1)) );
+
+  // lh with positive data.
+  __ lh(a5, MemOperand(a0, OFFSET_OF(T, ui)) );
+  __ sw(a5, MemOperand(a0, OFFSET_OF(T, r2)) );
+
+  // lh with negative data.
+  __ lh(a6, MemOperand(a0, OFFSET_OF(T, si)) );
+  __ sw(a6, MemOperand(a0, OFFSET_OF(T, r3)) );
+
+  // lhu with negative data.
+  __ lhu(a7, MemOperand(a0, OFFSET_OF(T, si)) );
+  __ sw(a7, MemOperand(a0, OFFSET_OF(T, r4)) );
+
+  // lb with negative data.
+  __ lb(t0, MemOperand(a0, OFFSET_OF(T, si)) );
+  __ sw(t0, MemOperand(a0, OFFSET_OF(T, r5)) );
+
+  // sh writes only 1/2 of word.
+  __ lui(t1, 0x3333);
+  __ ori(t1, t1, 0x3333);
+  __ sw(t1, MemOperand(a0, OFFSET_OF(T, r6)) );
+  __ lhu(t1, MemOperand(a0, OFFSET_OF(T, si)) );
+  __ sh(t1, MemOperand(a0, OFFSET_OF(T, r6)) );
+
+  __ jr(ra);
+  __ nop();
+
+  CodeDesc desc;
+  assm.GetCode(&desc);
+  Handle<Code> code = isolate->factory()->NewCode(
+      desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+  F3 f = FUNCTION_CAST<F3>(code->entry());
+  t.ui = 0x11223344;
+  t.si = 0x99aabbcc;
+  Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+  USE(dummy);
+
+  CHECK_EQ(0x11223344, t.r1);
+  CHECK_EQ(0x3344, t.r2);
+  CHECK_EQ(0xffffbbcc, t.r3);
+  CHECK_EQ(0x0000bbcc, t.r4);
+  CHECK_EQ(0xffffffcc, t.r5);
+  CHECK_EQ(0x3333bbcc, t.r6);
+}
+
+
+TEST(MIPS7) {
+  // Test floating point compare and branch instructions.
+  CcTest::InitializeVM();
+  Isolate* isolate = CcTest::i_isolate();
+  HandleScope scope(isolate);
+
+  typedef struct {
+    double a;
+    double b;
+    double c;
+    double d;
+    double e;
+    double f;
+    int32_t result;
+  } T;
+  T t;
+
+  // Create a function that accepts &t, and loads, manipulates, and stores
+  // the doubles t.a ... t.f.
+  MacroAssembler assm(isolate, NULL, 0);
+  Label neither_is_nan, less_than, outa_here;
+
+  __ ldc1(f4, MemOperand(a0, OFFSET_OF(T, a)) );
+  __ ldc1(f6, MemOperand(a0, OFFSET_OF(T, b)) );
+  if (kArchVariant != kMips64r6) {
+    __ c(UN, D, f4, f6);
+    __ bc1f(&neither_is_nan);
+  } else {
+    __ cmp(UN, L, f2, f4, f6);
+    __ bc1eqz(&neither_is_nan, f2);
+  }
+  __ nop();
+  __ sw(zero_reg, MemOperand(a0, OFFSET_OF(T, result)) );
+  __ Branch(&outa_here);
+
+  __ bind(&neither_is_nan);
+
+  if (kArchVariant == kMips64r6) {
+    __ cmp(OLT, L, f2, f6, f4);
+    __ bc1nez(&less_than, f2);
+  } else {
+    __ c(OLT, D, f6, f4, 2);
+    __ bc1t(&less_than, 2);
+  }
+
+  __ nop();
+  __ sw(zero_reg, MemOperand(a0, OFFSET_OF(T, result)) );
+  __ Branch(&outa_here);
+
+  __ bind(&less_than);
+  __ Addu(a4, zero_reg, Operand(1));
+  __ sw(a4, MemOperand(a0, OFFSET_OF(T, result)) );  // Set true.
+
+
+  // This test-case should have additional tests.
+
+  __ bind(&outa_here);
+
+  __ jr(ra);
+  __ nop();
+
+  CodeDesc desc;
+  assm.GetCode(&desc);
+  Handle<Code> code = isolate->factory()->NewCode(
+      desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+  F3 f = FUNCTION_CAST<F3>(code->entry());
+  t.a = 1.5e14;
+  t.b = 2.75e11;
+  t.c = 2.0;
+  t.d = -4.0;
+  t.e = 0.0;
+  t.f = 0.0;
+  t.result = 0;
+  Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+  USE(dummy);
+  CHECK_EQ(1.5e14, t.a);
+  CHECK_EQ(2.75e11, t.b);
+  CHECK_EQ(1, t.result);
+}
+
+
+TEST(MIPS8) {
+  // Test ROTR and ROTRV instructions.
+  CcTest::InitializeVM();
+  Isolate* isolate = CcTest::i_isolate();
+  HandleScope scope(isolate);
+
+  typedef struct {
+    int32_t input;
+    int32_t result_rotr_4;
+    int32_t result_rotr_8;
+    int32_t result_rotr_12;
+    int32_t result_rotr_16;
+    int32_t result_rotr_20;
+    int32_t result_rotr_24;
+    int32_t result_rotr_28;
+    int32_t result_rotrv_4;
+    int32_t result_rotrv_8;
+    int32_t result_rotrv_12;
+    int32_t result_rotrv_16;
+    int32_t result_rotrv_20;
+    int32_t result_rotrv_24;
+    int32_t result_rotrv_28;
+  } T;
+  T t;
+
+  MacroAssembler assm(isolate, NULL, 0);
+
+  // Basic word load.
+  __ lw(a4, MemOperand(a0, OFFSET_OF(T, input)) );
+
+  // ROTR instruction (called through the Ror macro).
+  __ Ror(a5, a4, 0x0004);
+  __ Ror(a6, a4, 0x0008);
+  __ Ror(a7, a4, 0x000c);
+  __ Ror(t0, a4, 0x0010);
+  __ Ror(t1, a4, 0x0014);
+  __ Ror(t2, a4, 0x0018);
+  __ Ror(t3, a4, 0x001c);
+
+  // Basic word store.
+  __ sw(a5, MemOperand(a0, OFFSET_OF(T, result_rotr_4)) );
+  __ sw(a6, MemOperand(a0, OFFSET_OF(T, result_rotr_8)) );
+  __ sw(a7, MemOperand(a0, OFFSET_OF(T, result_rotr_12)) );
+  __ sw(t0, MemOperand(a0, OFFSET_OF(T, result_rotr_16)) );
+  __ sw(t1, MemOperand(a0, OFFSET_OF(T, result_rotr_20)) );
+  __ sw(t2, MemOperand(a0, OFFSET_OF(T, result_rotr_24)) );
+  __ sw(t3, MemOperand(a0, OFFSET_OF(T, result_rotr_28)) );
+
+  // ROTRV instruction (called through the Ror macro).
+  __ li(t3, 0x0004);
+  __ Ror(a5, a4, t3);
+  __ li(t3, 0x0008);
+  __ Ror(a6, a4, t3);
+  __ li(t3, 0x000C);
+  __ Ror(a7, a4, t3);
+  __ li(t3, 0x0010);
+  __ Ror(t0, a4, t3);
+  __ li(t3, 0x0014);
+  __ Ror(t1, a4, t3);
+  __ li(t3, 0x0018);
+  __ Ror(t2, a4, t3);
+  __ li(t3, 0x001C);
+  __ Ror(t3, a4, t3);
+
+  // Basic word store.
+  __ sw(a5, MemOperand(a0, OFFSET_OF(T, result_rotrv_4)) );
+  __ sw(a6, MemOperand(a0, OFFSET_OF(T, result_rotrv_8)) );
+  __ sw(a7, MemOperand(a0, OFFSET_OF(T, result_rotrv_12)) );
+  __ sw(t0, MemOperand(a0, OFFSET_OF(T, result_rotrv_16)) );
+  __ sw(t1, MemOperand(a0, OFFSET_OF(T, result_rotrv_20)) );
+  __ sw(t2, MemOperand(a0, OFFSET_OF(T, result_rotrv_24)) );
+  __ sw(t3, MemOperand(a0, OFFSET_OF(T, result_rotrv_28)) );
+
+  __ jr(ra);
+  __ nop();
+
+  CodeDesc desc;
+  assm.GetCode(&desc);
+  Handle<Code> code = isolate->factory()->NewCode(
+      desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+  F3 f = FUNCTION_CAST<F3>(code->entry());
+  t.input = 0x12345678;
+  Object* dummy = CALL_GENERATED_CODE(f, &t, 0x0, 0, 0, 0);
+  USE(dummy);
+  CHECK_EQ(0x81234567, t.result_rotr_4);
+  CHECK_EQ(0x78123456, t.result_rotr_8);
+  CHECK_EQ(0x67812345, t.result_rotr_12);
+  CHECK_EQ(0x56781234, t.result_rotr_16);
+  CHECK_EQ(0x45678123, t.result_rotr_20);
+  CHECK_EQ(0x34567812, t.result_rotr_24);
+  CHECK_EQ(0x23456781, t.result_rotr_28);
+
+  CHECK_EQ(0x81234567, t.result_rotrv_4);
+  CHECK_EQ(0x78123456, t.result_rotrv_8);
+  CHECK_EQ(0x67812345, t.result_rotrv_12);
+  CHECK_EQ(0x56781234, t.result_rotrv_16);
+  CHECK_EQ(0x45678123, t.result_rotrv_20);
+  CHECK_EQ(0x34567812, t.result_rotrv_24);
+  CHECK_EQ(0x23456781, t.result_rotrv_28);
+}
+
+
+TEST(MIPS9) {
+  // Test BRANCH improvements.
+  CcTest::InitializeVM();
+  Isolate* isolate = CcTest::i_isolate();
+  HandleScope scope(isolate);
+
+  MacroAssembler assm(isolate, NULL, 0);
+  Label exit, exit2, exit3;
+
+  __ Branch(&exit, ge, a0, Operand(zero_reg));
+  __ Branch(&exit2, ge, a0, Operand(0x00001FFF));
+  __ Branch(&exit3, ge, a0, Operand(0x0001FFFF));
+
+  __ bind(&exit);
+  __ bind(&exit2);
+  __ bind(&exit3);
+  __ jr(ra);
+  __ nop();
+
+  CodeDesc desc;
+  assm.GetCode(&desc);
+  isolate->factory()->NewCode(
+      desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+}
+
+
+TEST(MIPS10) {
+  // Test conversions between doubles and long integers.
+  // Test hos the long ints map to FP regs pairs.
+  CcTest::InitializeVM();
+  Isolate* isolate = CcTest::i_isolate();
+  HandleScope scope(isolate);
+
+  typedef struct {
+    double a;
+    double a_converted;
+    double b;
+    int32_t dbl_mant;
+    int32_t dbl_exp;
+    int32_t long_hi;
+    int32_t long_lo;
+    int64_t long_as_int64;
+    int32_t b_long_hi;
+    int32_t b_long_lo;
+    int64_t b_long_as_int64;
+  } T;
+  T t;
+
+  Assembler assm(isolate, NULL, 0);
+  Label L, C;
+
+  if (kArchVariant == kMips64r2) {
+    // Rewritten for FR=1 FPU mode:
+    //  -  32 FP regs of 64-bits each, no odd/even pairs.
+    //  -  Note that cvt_l_d/cvt_d_l ARE legal in FR=1 mode.
+    // Load all structure elements to registers.
+    __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, a)));
+
+    // Save the raw bits of the double.
+    __ mfc1(a4, f0);
+    __ mfhc1(a5, f0);
+    __ sw(a4, MemOperand(a0, OFFSET_OF(T, dbl_mant)));
+    __ sw(a5, MemOperand(a0, OFFSET_OF(T, dbl_exp)));
+
+    // Convert double in f0 to long, save hi/lo parts.
+    __ cvt_l_d(f0, f0);
+    __ mfc1(a4, f0);  // f0 LS 32 bits of long.
+    __ mfhc1(a5, f0);  // f0 MS 32 bits of long.
+    __ sw(a4, MemOperand(a0, OFFSET_OF(T, long_lo)));
+    __ sw(a5, MemOperand(a0, OFFSET_OF(T, long_hi)));
+
+    // Combine the high/low ints, convert back to double.
+    __ dsll32(a6, a5, 0);  // Move a5 to high bits of a6.
+    __ or_(a6, a6, a4);
+    __ dmtc1(a6, f1);
+    __ cvt_d_l(f1, f1);
+    __ sdc1(f1, MemOperand(a0, OFFSET_OF(T, a_converted)));
+
+
+    // Convert the b long integers to double b.
+    __ lw(a4, MemOperand(a0, OFFSET_OF(T, b_long_lo)));
+    __ lw(a5, MemOperand(a0, OFFSET_OF(T, b_long_hi)));
+    __ mtc1(a4, f8);  // f8 LS 32-bits.
+    __ mthc1(a5, f8);  // f8 MS 32-bits.
+    __ cvt_d_l(f10, f8);
+    __ sdc1(f10, MemOperand(a0, OFFSET_OF(T, b)));
+
+    // Convert double b back to long-int.
+    __ ldc1(f31, MemOperand(a0, OFFSET_OF(T, b)));
+    __ cvt_l_d(f31, f31);
+    __ dmfc1(a7, f31);
+    __ sd(a7, MemOperand(a0, OFFSET_OF(T, b_long_as_int64)));
+
+
+    __ jr(ra);
+    __ nop();
+
+    CodeDesc desc;
+    assm.GetCode(&desc);
+    Handle<Code> code = isolate->factory()->NewCode(
+        desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+    F3 f = FUNCTION_CAST<F3>(code->entry());
+    t.a = 2.147483647e9;       // 0x7fffffff -> 0x41DFFFFFFFC00000 as double.
+    t.b_long_hi = 0x000000ff;  // 0xFF00FF00FF -> 0x426FE01FE01FE000 as double.
+    t.b_long_lo = 0x00ff00ff;
+    Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+    USE(dummy);
+
+    CHECK_EQ(0x41DFFFFF, t.dbl_exp);
+    CHECK_EQ(0xFFC00000, t.dbl_mant);
+    CHECK_EQ(0, t.long_hi);
+    CHECK_EQ(0x7fffffff, t.long_lo);
+    CHECK_EQ(2.147483647e9, t.a_converted);
+
+    // 0xFF00FF00FF -> 1.095233372415e12.
+    CHECK_EQ(1.095233372415e12, t.b);
+    CHECK_EQ(0xFF00FF00FF, t.b_long_as_int64);
+  }
+}
+
+
+TEST(MIPS11) {
+  // Do not run test on MIPS64r6, as these instructions are removed.
+  if (kArchVariant != kMips64r6) {
+    // Test LWL, LWR, SWL and SWR instructions.
+    CcTest::InitializeVM();
+    Isolate* isolate = CcTest::i_isolate();
+    HandleScope scope(isolate);
+
+    typedef struct {
+      int32_t reg_init;
+      int32_t mem_init;
+      int32_t lwl_0;
+      int32_t lwl_1;
+      int32_t lwl_2;
+      int32_t lwl_3;
+      int32_t lwr_0;
+      int32_t lwr_1;
+      int32_t lwr_2;
+      int32_t lwr_3;
+      int32_t swl_0;
+      int32_t swl_1;
+      int32_t swl_2;
+      int32_t swl_3;
+      int32_t swr_0;
+      int32_t swr_1;
+      int32_t swr_2;
+      int32_t swr_3;
+    } T;
+    T t;
+
+    Assembler assm(isolate, NULL, 0);
+
+    // Test all combinations of LWL and vAddr.
+    __ lw(a4, MemOperand(a0, OFFSET_OF(T, reg_init)));
+    __ lwl(a4, MemOperand(a0, OFFSET_OF(T, mem_init)));
+    __ sw(a4, MemOperand(a0, OFFSET_OF(T, lwl_0)));
+
+    __ lw(a5, MemOperand(a0, OFFSET_OF(T, reg_init)));
+    __ lwl(a5, MemOperand(a0, OFFSET_OF(T, mem_init) + 1));
+    __ sw(a5, MemOperand(a0, OFFSET_OF(T, lwl_1)));
+
+    __ lw(a6, MemOperand(a0, OFFSET_OF(T, reg_init)));
+    __ lwl(a6, MemOperand(a0, OFFSET_OF(T, mem_init) + 2));
+    __ sw(a6, MemOperand(a0, OFFSET_OF(T, lwl_2)));
+
+    __ lw(a7, MemOperand(a0, OFFSET_OF(T, reg_init)));
+    __ lwl(a7, MemOperand(a0, OFFSET_OF(T, mem_init) + 3));
+    __ sw(a7, MemOperand(a0, OFFSET_OF(T, lwl_3)));
+
+    // Test all combinations of LWR and vAddr.
+    __ lw(a4, MemOperand(a0, OFFSET_OF(T, reg_init)));
+    __ lwr(a4, MemOperand(a0, OFFSET_OF(T, mem_init)));
+    __ sw(a4, MemOperand(a0, OFFSET_OF(T, lwr_0)));
+
+    __ lw(a5, MemOperand(a0, OFFSET_OF(T, reg_init)));
+    __ lwr(a5, MemOperand(a0, OFFSET_OF(T, mem_init) + 1));
+    __ sw(a5, MemOperand(a0, OFFSET_OF(T, lwr_1)));
+
+    __ lw(a6, MemOperand(a0, OFFSET_OF(T, reg_init)));
+    __ lwr(a6, MemOperand(a0, OFFSET_OF(T, mem_init) + 2));
+    __ sw(a6, MemOperand(a0, OFFSET_OF(T, lwr_2)) );
+
+    __ lw(a7, MemOperand(a0, OFFSET_OF(T, reg_init)));
+    __ lwr(a7, MemOperand(a0, OFFSET_OF(T, mem_init) + 3));
+    __ sw(a7, MemOperand(a0, OFFSET_OF(T, lwr_3)) );
+
+    // Test all combinations of SWL and vAddr.
+    __ lw(a4, MemOperand(a0, OFFSET_OF(T, mem_init)));
+    __ sw(a4, MemOperand(a0, OFFSET_OF(T, swl_0)));
+    __ lw(a4, MemOperand(a0, OFFSET_OF(T, reg_init)));
+    __ swl(a4, MemOperand(a0, OFFSET_OF(T, swl_0)));
+
+    __ lw(a5, MemOperand(a0, OFFSET_OF(T, mem_init)));
+    __ sw(a5, MemOperand(a0, OFFSET_OF(T, swl_1)));
+    __ lw(a5, MemOperand(a0, OFFSET_OF(T, reg_init)));
+    __ swl(a5, MemOperand(a0, OFFSET_OF(T, swl_1) + 1));
+
+    __ lw(a6, MemOperand(a0, OFFSET_OF(T, mem_init)));
+    __ sw(a6, MemOperand(a0, OFFSET_OF(T, swl_2)));
+    __ lw(a6, MemOperand(a0, OFFSET_OF(T, reg_init)));
+    __ swl(a6, MemOperand(a0, OFFSET_OF(T, swl_2) + 2));
+
+    __ lw(a7, MemOperand(a0, OFFSET_OF(T, mem_init)));
+    __ sw(a7, MemOperand(a0, OFFSET_OF(T, swl_3)));
+    __ lw(a7, MemOperand(a0, OFFSET_OF(T, reg_init)));
+    __ swl(a7, MemOperand(a0, OFFSET_OF(T, swl_3) + 3));
+
+    // Test all combinations of SWR and vAddr.
+    __ lw(a4, MemOperand(a0, OFFSET_OF(T, mem_init)));
+    __ sw(a4, MemOperand(a0, OFFSET_OF(T, swr_0)));
+    __ lw(a4, MemOperand(a0, OFFSET_OF(T, reg_init)));
+    __ swr(a4, MemOperand(a0, OFFSET_OF(T, swr_0)));
+
+    __ lw(a5, MemOperand(a0, OFFSET_OF(T, mem_init)));
+    __ sw(a5, MemOperand(a0, OFFSET_OF(T, swr_1)));
+    __ lw(a5, MemOperand(a0, OFFSET_OF(T, reg_init)));
+    __ swr(a5, MemOperand(a0, OFFSET_OF(T, swr_1) + 1));
+
+    __ lw(a6, MemOperand(a0, OFFSET_OF(T, mem_init)));
+    __ sw(a6, MemOperand(a0, OFFSET_OF(T, swr_2)));
+    __ lw(a6, MemOperand(a0, OFFSET_OF(T, reg_init)));
+    __ swr(a6, MemOperand(a0, OFFSET_OF(T, swr_2) + 2));
+
+    __ lw(a7, MemOperand(a0, OFFSET_OF(T, mem_init)));
+    __ sw(a7, MemOperand(a0, OFFSET_OF(T, swr_3)));
+    __ lw(a7, MemOperand(a0, OFFSET_OF(T, reg_init)));
+    __ swr(a7, MemOperand(a0, OFFSET_OF(T, swr_3) + 3));
+
+    __ jr(ra);
+    __ nop();
+
+    CodeDesc desc;
+    assm.GetCode(&desc);
+    Handle<Code> code = isolate->factory()->NewCode(
+        desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+    F3 f = FUNCTION_CAST<F3>(code->entry());
+    t.reg_init = 0xaabbccdd;
+    t.mem_init = 0x11223344;
+
+    Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+    USE(dummy);
+
+    CHECK_EQ(0x44bbccdd, t.lwl_0);
+    CHECK_EQ(0x3344ccdd, t.lwl_1);
+    CHECK_EQ(0x223344dd, t.lwl_2);
+    CHECK_EQ(0x11223344, t.lwl_3);
+
+    CHECK_EQ(0x11223344, t.lwr_0);
+    CHECK_EQ(0xaa112233, t.lwr_1);
+    CHECK_EQ(0xaabb1122, t.lwr_2);
+    CHECK_EQ(0xaabbcc11, t.lwr_3);
+
+    CHECK_EQ(0x112233aa, t.swl_0);
+    CHECK_EQ(0x1122aabb, t.swl_1);
+    CHECK_EQ(0x11aabbcc, t.swl_2);
+    CHECK_EQ(0xaabbccdd, t.swl_3);
+
+    CHECK_EQ(0xaabbccdd, t.swr_0);
+    CHECK_EQ(0xbbccdd44, t.swr_1);
+    CHECK_EQ(0xccdd3344, t.swr_2);
+    CHECK_EQ(0xdd223344, t.swr_3);
+  }
+}
+
+
+TEST(MIPS12) {
+  CcTest::InitializeVM();
+  Isolate* isolate = CcTest::i_isolate();
+  HandleScope scope(isolate);
+
+  typedef struct {
+      int32_t  x;
+      int32_t  y;
+      int32_t  y1;
+      int32_t  y2;
+      int32_t  y3;
+      int32_t  y4;
+  } T;
+  T t;
+
+  MacroAssembler assm(isolate, NULL, 0);
+
+  __ mov(t2, fp);  // Save frame pointer.
+  __ mov(fp, a0);  // Access struct T by fp.
+  __ lw(a4, MemOperand(a0, OFFSET_OF(T, y)));
+  __ lw(a7, MemOperand(a0, OFFSET_OF(T, y4)));
+
+  __ addu(a5, a4, a7);
+  __ subu(t0, a4, a7);
+  __ nop();
+  __ push(a4);  // These instructions disappear after opt.
+  __ Pop();
+  __ addu(a4, a4, a4);
+  __ nop();
+  __ Pop();     // These instructions disappear after opt.
+  __ push(a7);
+  __ nop();
+  __ push(a7);  // These instructions disappear after opt.
+  __ pop(a7);
+  __ nop();
+  __ push(a7);
+  __ pop(t0);
+  __ nop();
+  __ sw(a4, MemOperand(fp, OFFSET_OF(T, y)));
+  __ lw(a4, MemOperand(fp, OFFSET_OF(T, y)));
+  __ nop();
+  __ sw(a4, MemOperand(fp, OFFSET_OF(T, y)));
+  __ lw(a5, MemOperand(fp, OFFSET_OF(T, y)));
+  __ nop();
+  __ push(a5);
+  __ lw(a5, MemOperand(fp, OFFSET_OF(T, y)));
+  __ pop(a5);
+  __ nop();
+  __ push(a5);
+  __ lw(a6, MemOperand(fp, OFFSET_OF(T, y)));
+  __ pop(a5);
+  __ nop();
+  __ push(a5);
+  __ lw(a6, MemOperand(fp, OFFSET_OF(T, y)));
+  __ pop(a6);
+  __ nop();
+  __ push(a6);
+  __ lw(a6, MemOperand(fp, OFFSET_OF(T, y)));
+  __ pop(a5);
+  __ nop();
+  __ push(a5);
+  __ lw(a6, MemOperand(fp, OFFSET_OF(T, y)));
+  __ pop(a7);
+  __ nop();
+
+  __ mov(fp, t2);
+  __ jr(ra);
+  __ nop();
+
+  CodeDesc desc;
+  assm.GetCode(&desc);
+  Handle<Code> code = isolate->factory()->NewCode(
+      desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+  F3 f = FUNCTION_CAST<F3>(code->entry());
+  t.x = 1;
+  t.y = 2;
+  t.y1 = 3;
+  t.y2 = 4;
+  t.y3 = 0XBABA;
+  t.y4 = 0xDEDA;
+
+  Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+  USE(dummy);
+
+  CHECK_EQ(3, t.y1);
+}
+
+
+TEST(MIPS13) {
+  // Test Cvt_d_uw and Trunc_uw_d macros.
+  CcTest::InitializeVM();
+  Isolate* isolate = CcTest::i_isolate();
+  HandleScope scope(isolate);
+
+  typedef struct {
+    double cvt_big_out;
+    double cvt_small_out;
+    uint32_t trunc_big_out;
+    uint32_t trunc_small_out;
+    uint32_t cvt_big_in;
+    uint32_t cvt_small_in;
+  } T;
+  T t;
+
+  MacroAssembler assm(isolate, NULL, 0);
+
+  __ sw(a4, MemOperand(a0, OFFSET_OF(T, cvt_small_in)));
+  __ Cvt_d_uw(f10, a4, f22);
+  __ sdc1(f10, MemOperand(a0, OFFSET_OF(T, cvt_small_out)));
+
+  __ Trunc_uw_d(f10, f10, f22);
+  __ swc1(f10, MemOperand(a0, OFFSET_OF(T, trunc_small_out)));
+
+  __ sw(a4, MemOperand(a0, OFFSET_OF(T, cvt_big_in)));
+  __ Cvt_d_uw(f8, a4, f22);
+  __ sdc1(f8, MemOperand(a0, OFFSET_OF(T, cvt_big_out)));
+
+  __ Trunc_uw_d(f8, f8, f22);
+  __ swc1(f8, MemOperand(a0, OFFSET_OF(T, trunc_big_out)));
+
+  __ jr(ra);
+  __ nop();
+
+  CodeDesc desc;
+  assm.GetCode(&desc);
+  Handle<Code> code = isolate->factory()->NewCode(
+      desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+  F3 f = FUNCTION_CAST<F3>(code->entry());
+
+  t.cvt_big_in = 0xFFFFFFFF;
+  t.cvt_small_in  = 333;
+
+  Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+  USE(dummy);
+
+  CHECK_EQ(t.cvt_big_out, static_cast<double>(t.cvt_big_in));
+  CHECK_EQ(t.cvt_small_out, static_cast<double>(t.cvt_small_in));
+
+  CHECK_EQ(static_cast<int>(t.trunc_big_out), static_cast<int>(t.cvt_big_in));
+  CHECK_EQ(static_cast<int>(t.trunc_small_out),
+           static_cast<int>(t.cvt_small_in));
+}
+
+
+TEST(MIPS14) {
+  // Test round, floor, ceil, trunc, cvt.
+  CcTest::InitializeVM();
+  Isolate* isolate = CcTest::i_isolate();
+  HandleScope scope(isolate);
+
+#define ROUND_STRUCT_ELEMENT(x) \
+  int32_t x##_up_out; \
+  int32_t x##_down_out; \
+  int32_t neg_##x##_up_out; \
+  int32_t neg_##x##_down_out; \
+  uint32_t x##_err1_out; \
+  uint32_t x##_err2_out; \
+  uint32_t x##_err3_out; \
+  uint32_t x##_err4_out; \
+  int32_t x##_invalid_result;
+
+  typedef struct {
+    double round_up_in;
+    double round_down_in;
+    double neg_round_up_in;
+    double neg_round_down_in;
+    double err1_in;
+    double err2_in;
+    double err3_in;
+    double err4_in;
+
+    ROUND_STRUCT_ELEMENT(round)
+    ROUND_STRUCT_ELEMENT(floor)
+    ROUND_STRUCT_ELEMENT(ceil)
+    ROUND_STRUCT_ELEMENT(trunc)
+    ROUND_STRUCT_ELEMENT(cvt)
+  } T;
+  T t;
+
+#undef ROUND_STRUCT_ELEMENT
+
+  MacroAssembler assm(isolate, NULL, 0);
+
+  // Save FCSR.
+  __ cfc1(a1, FCSR);
+  // Disable FPU exceptions.
+  __ ctc1(zero_reg, FCSR);
+#define RUN_ROUND_TEST(x) \
+  __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, round_up_in))); \
+  __ x##_w_d(f0, f0); \
+  __ swc1(f0, MemOperand(a0, OFFSET_OF(T, x##_up_out))); \
+  \
+  __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, round_down_in))); \
+  __ x##_w_d(f0, f0); \
+  __ swc1(f0, MemOperand(a0, OFFSET_OF(T, x##_down_out))); \
+  \
+  __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, neg_round_up_in))); \
+  __ x##_w_d(f0, f0); \
+  __ swc1(f0, MemOperand(a0, OFFSET_OF(T, neg_##x##_up_out))); \
+  \
+  __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, neg_round_down_in))); \
+  __ x##_w_d(f0, f0); \
+  __ swc1(f0, MemOperand(a0, OFFSET_OF(T, neg_##x##_down_out))); \
+  \
+  __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, err1_in))); \
+  __ ctc1(zero_reg, FCSR); \
+  __ x##_w_d(f0, f0); \
+  __ cfc1(a2, FCSR); \
+  __ sw(a2, MemOperand(a0, OFFSET_OF(T, x##_err1_out))); \
+  \
+  __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, err2_in))); \
+  __ ctc1(zero_reg, FCSR); \
+  __ x##_w_d(f0, f0); \
+  __ cfc1(a2, FCSR); \
+  __ sw(a2, MemOperand(a0, OFFSET_OF(T, x##_err2_out))); \
+  \
+  __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, err3_in))); \
+  __ ctc1(zero_reg, FCSR); \
+  __ x##_w_d(f0, f0); \
+  __ cfc1(a2, FCSR); \
+  __ sw(a2, MemOperand(a0, OFFSET_OF(T, x##_err3_out))); \
+  \
+  __ ldc1(f0, MemOperand(a0, OFFSET_OF(T, err4_in))); \
+  __ ctc1(zero_reg, FCSR); \
+  __ x##_w_d(f0, f0); \
+  __ cfc1(a2, FCSR); \
+  __ sw(a2, MemOperand(a0, OFFSET_OF(T, x##_err4_out))); \
+  __ swc1(f0, MemOperand(a0, OFFSET_OF(T, x##_invalid_result)));
+
+  RUN_ROUND_TEST(round)
+  RUN_ROUND_TEST(floor)
+  RUN_ROUND_TEST(ceil)
+  RUN_ROUND_TEST(trunc)
+  RUN_ROUND_TEST(cvt)
+
+  // Restore FCSR.
+  __ ctc1(a1, FCSR);
+
+  __ jr(ra);
+  __ nop();
+
+  CodeDesc desc;
+  assm.GetCode(&desc);
+  Handle<Code> code = isolate->factory()->NewCode(
+      desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+  F3 f = FUNCTION_CAST<F3>(code->entry());
+
+  t.round_up_in = 123.51;
+  t.round_down_in = 123.49;
+  t.neg_round_up_in = -123.5;
+  t.neg_round_down_in = -123.49;
+  t.err1_in = 123.51;
+  t.err2_in = 1;
+  t.err3_in = static_cast<double>(1) + 0xFFFFFFFF;
+  t.err4_in = NAN;
+
+  Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+  USE(dummy);
+
+#define GET_FPU_ERR(x) (static_cast<int>(x & kFCSRFlagMask))
+#define CHECK_ROUND_RESULT(type) \
+  CHECK(GET_FPU_ERR(t.type##_err1_out) & kFCSRInexactFlagMask); \
+  CHECK_EQ(0, GET_FPU_ERR(t.type##_err2_out)); \
+  CHECK(GET_FPU_ERR(t.type##_err3_out) & kFCSRInvalidOpFlagMask); \
+  CHECK(GET_FPU_ERR(t.type##_err4_out) & kFCSRInvalidOpFlagMask); \
+  CHECK_EQ(static_cast<int32_t>(kFPUInvalidResult), t.type##_invalid_result);
+
+  CHECK_ROUND_RESULT(round);
+  CHECK_ROUND_RESULT(floor);
+  CHECK_ROUND_RESULT(ceil);
+  CHECK_ROUND_RESULT(cvt);
+}
+
+
+TEST(MIPS15) {
+  // Test chaining of label usages within instructions (issue 1644).
+  CcTest::InitializeVM();
+  Isolate* isolate = CcTest::i_isolate();
+  HandleScope scope(isolate);
+  Assembler assm(isolate, NULL, 0);
+
+  Label target;
+  __ beq(v0, v1, &target);
+  __ nop();
+  __ bne(v0, v1, &target);
+  __ nop();
+  __ bind(&target);
+  __ nop();
+}
+
+
+// ----- mips64 tests -----------------------------------------------
+
+TEST(MIPS16) {
+  // Test 64-bit memory loads and stores.
+  CcTest::InitializeVM();
+  Isolate* isolate = CcTest::i_isolate();
+  HandleScope scope(isolate);
+
+  typedef struct {
+    int64_t r1;
+    int64_t r2;
+    int64_t r3;
+    int64_t r4;
+    int64_t r5;
+    int64_t r6;
+    uint32_t ui;
+    int32_t si;
+  } T;
+  T t;
+
+  Assembler assm(isolate, NULL, 0);
+  Label L, C;
+
+  // Basic 32-bit word load/store, with un-signed data.
+  __ lw(a4, MemOperand(a0, OFFSET_OF(T, ui)));
+  __ sw(a4, MemOperand(a0, OFFSET_OF(T, r1)));
+
+  // Check that the data got zero-extended into 64-bit a4.
+  __ sd(a4, MemOperand(a0, OFFSET_OF(T, r2)));
+
+  // Basic 32-bit word load/store, with SIGNED data.
+  __ lw(a5, MemOperand(a0, OFFSET_OF(T, si)));
+  __ sw(a5, MemOperand(a0, OFFSET_OF(T, r3)));
+
+  // Check that the data got sign-extended into 64-bit a4.
+  __ sd(a5, MemOperand(a0, OFFSET_OF(T, r4)));
+
+  // 32-bit UNSIGNED word load/store, with SIGNED data.
+  __ lwu(a6, MemOperand(a0, OFFSET_OF(T, si)));
+  __ sw(a6, MemOperand(a0, OFFSET_OF(T, r5)));
+
+  // Check that the data got zero-extended into 64-bit a4.
+  __ sd(a6, MemOperand(a0, OFFSET_OF(T, r6)));
+
+  // lh with positive data.
+  __ lh(a5, MemOperand(a0, OFFSET_OF(T, ui)));
+  __ sw(a5, MemOperand(a0, OFFSET_OF(T, r2)));
+
+  // lh with negative data.
+  __ lh(a6, MemOperand(a0, OFFSET_OF(T, si)));
+  __ sw(a6, MemOperand(a0, OFFSET_OF(T, r3)));
+
+  // lhu with negative data.
+  __ lhu(a7, MemOperand(a0, OFFSET_OF(T, si)));
+  __ sw(a7, MemOperand(a0, OFFSET_OF(T, r4)));
+
+  // lb with negative data.
+  __ lb(t0, MemOperand(a0, OFFSET_OF(T, si)));
+  __ sw(t0, MemOperand(a0, OFFSET_OF(T, r5)));
+
+  // // sh writes only 1/2 of word.
+  __ lui(t1, 0x3333);
+  __ ori(t1, t1, 0x3333);
+  __ sw(t1, MemOperand(a0, OFFSET_OF(T, r6)));
+  __ lhu(t1, MemOperand(a0, OFFSET_OF(T, si)));
+  __ sh(t1, MemOperand(a0, OFFSET_OF(T, r6)));
+
+  __ jr(ra);
+  __ nop();
+
+  CodeDesc desc;
+  assm.GetCode(&desc);
+  Handle<Code> code = isolate->factory()->NewCode(
+      desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+  F3 f = FUNCTION_CAST<F3>(code->entry());
+  t.ui = 0x44332211;
+  t.si = 0x99aabbcc;
+  t.r1 = 0x1111111111111111;
+  t.r2 = 0x2222222222222222;
+  t.r3 = 0x3333333333333333;
+  t.r4 = 0x4444444444444444;
+  t.r5 = 0x5555555555555555;
+  t.r6 = 0x6666666666666666;
+  Object* dummy = CALL_GENERATED_CODE(f, &t, 0, 0, 0, 0);
+  USE(dummy);
+
+  // Unsigned data, 32 & 64.
+  CHECK_EQ(0x1111111144332211L, t.r1);
+  CHECK_EQ(0x0000000000002211L, t.r2);
+
+  // Signed data, 32 & 64.
+  CHECK_EQ(0x33333333ffffbbccL, t.r3);
+  CHECK_EQ(0xffffffff0000bbccL, t.r4);
+
+  // Signed data, 32 & 64.
+  CHECK_EQ(0x55555555ffffffccL, t.r5);
+  CHECK_EQ(0x000000003333bbccL, t.r6);
+}
+
+#undef __
diff --git a/test/cctest/test-assembler-x64.cc b/test/cctest/test-assembler-x64.cc
index 3e541ff..3d305b6 100644
--- a/test/cctest/test-assembler-x64.cc
+++ b/test/cctest/test-assembler-x64.cc
@@ -29,9 +29,10 @@
 
 #include "src/v8.h"
 
-#include "src/macro-assembler.h"
+#include "src/base/platform/platform.h"
 #include "src/factory.h"
-#include "src/platform.h"
+#include "src/macro-assembler.h"
+#include "src/ostreams.h"
 #include "src/serialize.h"
 #include "test/cctest/cctest.h"
 
@@ -69,9 +70,8 @@
   CcTest::InitializeVM();
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
-                                                 &actual_size,
-                                                 true));
+  byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+      Assembler::kMinimalBufferSize, &actual_size, true));
   CHECK(buffer);
   Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
 
@@ -92,9 +92,8 @@
   CcTest::InitializeVM();
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
-                                                 &actual_size,
-                                                 true));
+  byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+      Assembler::kMinimalBufferSize, &actual_size, true));
   CHECK(buffer);
   Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
 
@@ -125,9 +124,8 @@
   CcTest::InitializeVM();
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
-                                                 &actual_size,
-                                                 true));
+  byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+      Assembler::kMinimalBufferSize, &actual_size, true));
   CHECK(buffer);
   Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
 
@@ -148,9 +146,8 @@
   CcTest::InitializeVM();
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
-                                                 &actual_size,
-                                                 true));
+  byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+      Assembler::kMinimalBufferSize, &actual_size, true));
   CHECK(buffer);
   Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
 
@@ -180,9 +177,8 @@
   CcTest::InitializeVM();
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
-                                                 &actual_size,
-                                                 true));
+  byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+      Assembler::kMinimalBufferSize, &actual_size, true));
   CHECK(buffer);
   Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
 
@@ -209,9 +205,8 @@
   CcTest::InitializeVM();
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
-                                                 &actual_size,
-                                                 true));
+  byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+      Assembler::kMinimalBufferSize, &actual_size, true));
   CHECK(buffer);
   Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
 
@@ -238,9 +233,8 @@
   CcTest::InitializeVM();
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
-                                                 &actual_size,
-                                                 true));
+  byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+      Assembler::kMinimalBufferSize, &actual_size, true));
   CHECK(buffer);
   Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
 
@@ -263,9 +257,8 @@
   CcTest::InitializeVM();
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
-                                                 &actual_size,
-                                                 true));
+  byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+      Assembler::kMinimalBufferSize, &actual_size, true));
   CHECK(buffer);
   Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
 
@@ -286,9 +279,8 @@
   CcTest::InitializeVM();
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
-                                                 &actual_size,
-                                                 true));
+  byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+      Assembler::kMinimalBufferSize, &actual_size, true));
   CHECK(buffer);
   Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
 
@@ -311,9 +303,8 @@
   CcTest::InitializeVM();
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
-                                                 &actual_size,
-                                                 true));
+  byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+      Assembler::kMinimalBufferSize, &actual_size, true));
   CHECK(buffer);
   Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
 
@@ -341,9 +332,8 @@
   CcTest::InitializeVM();
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
-                                                 &actual_size,
-                                                 true));
+  byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+      Assembler::kMinimalBufferSize, &actual_size, true));
   CHECK(buffer);
   Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
 
@@ -366,9 +356,8 @@
   CcTest::InitializeVM();
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
-                                                 &actual_size,
-                                                 true));
+  byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+      Assembler::kMinimalBufferSize, &actual_size, true));
   CHECK(buffer);
   Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
 
@@ -401,9 +390,8 @@
   CcTest::InitializeVM();
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
-                                                 &actual_size,
-                                                 true));
+  byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+      Assembler::kMinimalBufferSize, &actual_size, true));
   CHECK(buffer);
   Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
 
@@ -431,9 +419,8 @@
   CcTest::InitializeVM();
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
-                                                 &actual_size,
-                                                 true));
+  byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+      Assembler::kMinimalBufferSize, &actual_size, true));
   CHECK(buffer);
   Assembler assm(CcTest::i_isolate(), buffer, static_cast<int>(actual_size));
   // Assemble two loops using rax as counter, and verify the ending counts.
@@ -649,7 +636,7 @@
 
 TEST(StackAlignmentForSSE2) {
   CcTest::InitializeVM();
-  CHECK_EQ(0, OS::ActivationFrameAlignment() % 16);
+  CHECK_EQ(0, v8::base::OS::ActivationFrameAlignment() % 16);
 
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope handle_scope(isolate);
@@ -703,7 +690,8 @@
   Handle<Code> code = isolate->factory()->NewCode(
       desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
 #ifdef OBJECT_PRINT
-  code->Print();
+  OFStream os(stdout);
+  code->Print(os);
 #endif
 
   F3 f = FUNCTION_CAST<F3>(code->entry());
@@ -741,7 +729,8 @@
       Code::ComputeFlags(Code::STUB),
       Handle<Code>());
 #ifdef OBJECT_PRINT
-  code->Print();
+  OFStream os(stdout);
+  code->Print(os);
 #endif
 
   F6 f = FUNCTION_CAST<F6>(code->entry());
diff --git a/test/cctest/test-assembler-x87.cc b/test/cctest/test-assembler-x87.cc
index 1c1b18b..8341f9b 100644
--- a/test/cctest/test-assembler-x87.cc
+++ b/test/cctest/test-assembler-x87.cc
@@ -29,10 +29,11 @@
 
 #include "src/v8.h"
 
+#include "src/base/platform/platform.h"
 #include "src/disassembler.h"
 #include "src/factory.h"
 #include "src/macro-assembler.h"
-#include "src/platform.h"
+#include "src/ostreams.h"
 #include "src/serialize.h"
 #include "test/cctest/cctest.h"
 
@@ -63,7 +64,8 @@
   Handle<Code> code = isolate->factory()->NewCode(
       desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
 #ifdef OBJECT_PRINT
-  code->Print();
+  OFStream os(stdout);
+  code->Print(os);
 #endif
   F2 f = FUNCTION_CAST<F2>(code->entry());
   int res = f(3, 4);
@@ -99,7 +101,8 @@
   Handle<Code> code = isolate->factory()->NewCode(
       desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
 #ifdef OBJECT_PRINT
-  code->Print();
+  OFStream os(stdout);
+  code->Print(os);
 #endif
   F1 f = FUNCTION_CAST<F1>(code->entry());
   int res = f(100);
@@ -139,7 +142,8 @@
   Handle<Code> code = isolate->factory()->NewCode(
       desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
 #ifdef OBJECT_PRINT
-  code->Print();
+  OFStream os(stdout);
+  code->Print(os);
 #endif
   F1 f = FUNCTION_CAST<F1>(code->entry());
   int res = f(10);
@@ -217,14 +221,15 @@
   Handle<Code> code = isolate->factory()->NewCode(
       desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
 #ifdef OBJECT_PRINT
-  code->Print();
+  OFStream os(stdout);
+  code->Print(os);
 #endif
 
   F7 f = FUNCTION_CAST<F7>(code->entry());
   CHECK_EQ(kLess, f(1.1, 2.2));
   CHECK_EQ(kEqual, f(2.2, 2.2));
   CHECK_EQ(kGreater, f(3.3, 2.2));
-  CHECK_EQ(kNaN, f(OS::nan_value(), 1.1));
+  CHECK_EQ(kNaN, f(v8::base::OS::nan_value(), 1.1));
 }
 
 
diff --git a/test/cctest/test-ast.cc b/test/cctest/test-ast.cc
index 2285d0c..24819df 100644
--- a/test/cctest/test-ast.cc
+++ b/test/cctest/test-ast.cc
@@ -35,13 +35,13 @@
 using namespace v8::internal;
 
 TEST(List) {
-  v8::internal::V8::Initialize(NULL);
   List<AstNode*>* list = new List<AstNode*>(0);
   CHECK_EQ(0, list->length());
 
   Isolate* isolate = CcTest::i_isolate();
   Zone zone(isolate);
-  AstNodeFactory<AstNullVisitor> factory(&zone);
+  AstNode::IdGen id_gen;
+  AstNodeFactory<AstNullVisitor> factory(&zone, NULL, &id_gen);
   AstNode* node = factory.NewEmptyStatement(RelocInfo::kNoPosition);
   list->Add(node);
   CHECK_EQ(1, list->length());
diff --git a/test/cctest/test-bignum-dtoa.cc b/test/cctest/test-bignum-dtoa.cc
index 1185e4d..9262e01 100644
--- a/test/cctest/test-bignum-dtoa.cc
+++ b/test/cctest/test-bignum-dtoa.cc
@@ -31,8 +31,8 @@
 
 #include "src/bignum-dtoa.h"
 
+#include "src/base/platform/platform.h"
 #include "src/double.h"
-#include "src/platform.h"
 #include "test/cctest/cctest.h"
 #include "test/cctest/gay-fixed.h"
 #include "test/cctest/gay-precision.h"
diff --git a/test/cctest/test-bignum.cc b/test/cctest/test-bignum.cc
index 0894a69..47ce2a4 100644
--- a/test/cctest/test-bignum.cc
+++ b/test/cctest/test-bignum.cc
@@ -29,8 +29,8 @@
 
 #include "src/v8.h"
 
+#include "src/base/platform/platform.h"
 #include "src/bignum.h"
-#include "src/platform.h"
 #include "test/cctest/cctest.h"
 
 using namespace v8::internal;
diff --git a/test/cctest/test-checks.cc b/test/cctest/test-checks.cc
new file mode 100644
index 0000000..79e87dd
--- /dev/null
+++ b/test/cctest/test-checks.cc
@@ -0,0 +1,26 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/checks.h"
+
+#include "test/cctest/cctest.h"
+
+
+TEST(CheckEqualsZeroAndMinusZero) {
+  CHECK_EQ(0.0, 0.0);
+  CHECK_NE(0.0, -0.0);
+  CHECK_NE(-0.0, 0.0);
+  CHECK_EQ(-0.0, -0.0);
+}
+
+
+TEST(CheckEqualsReflexivity) {
+  double inf = V8_INFINITY;
+  double nan = v8::base::OS::nan_value();
+  double constants[] = {-nan, -inf, -3.1415, -1.0,   -0.1, -0.0,
+                        0.0,  0.1,  1.0,     3.1415, inf,  nan};
+  for (size_t i = 0; i < arraysize(constants); ++i) {
+    CHECK_EQ(constants[i], constants[i]);
+  }
+}
diff --git a/test/cctest/test-circular-queue.cc b/test/cctest/test-circular-queue.cc
index 1f568e2..736a9b7 100644
--- a/test/cctest/test-circular-queue.cc
+++ b/test/cctest/test-circular-queue.cc
@@ -28,6 +28,7 @@
 // Tests of the circular queue.
 
 #include "src/v8.h"
+
 #include "src/circular-queue-inl.h"
 #include "test/cctest/cctest.h"
 
@@ -102,17 +103,15 @@
 typedef v8::base::AtomicWord Record;
 typedef SamplingCircularQueue<Record, 12> TestSampleQueue;
 
-class ProducerThread: public i::Thread {
+class ProducerThread: public v8::base::Thread {
  public:
-  ProducerThread(TestSampleQueue* scq,
-                 int records_per_chunk,
-                 Record value,
-                 i::Semaphore* finished)
-      : Thread("producer"),
+  ProducerThread(TestSampleQueue* scq, int records_per_chunk, Record value,
+                 v8::base::Semaphore* finished)
+      : Thread(Options("producer")),
         scq_(scq),
         records_per_chunk_(records_per_chunk),
         value_(value),
-        finished_(finished) { }
+        finished_(finished) {}
 
   virtual void Run() {
     for (Record i = value_; i < value_ + records_per_chunk_; ++i) {
@@ -129,7 +128,7 @@
   TestSampleQueue* scq_;
   const int records_per_chunk_;
   Record value_;
-  i::Semaphore* finished_;
+  v8::base::Semaphore* finished_;
 };
 
 }  // namespace
@@ -142,7 +141,7 @@
 
   const int kRecordsPerChunk = 4;
   TestSampleQueue scq;
-  i::Semaphore semaphore(0);
+  v8::base::Semaphore semaphore(0);
 
   ProducerThread producer1(&scq, kRecordsPerChunk, 1, &semaphore);
   ProducerThread producer2(&scq, kRecordsPerChunk, 10, &semaphore);
diff --git a/test/cctest/test-code-stubs-arm.cc b/test/cctest/test-code-stubs-arm.cc
index 4b9eb1b..8040344 100644
--- a/test/cctest/test-code-stubs-arm.cc
+++ b/test/cctest/test-code-stubs-arm.cc
@@ -29,10 +29,10 @@
 
 #include "src/v8.h"
 
+#include "src/base/platform/platform.h"
 #include "src/code-stubs.h"
 #include "src/factory.h"
 #include "src/macro-assembler.h"
-#include "src/platform.h"
 #include "src/simulator.h"
 #include "test/cctest/cctest.h"
 #include "test/cctest/test-code-stubs.h"
@@ -47,9 +47,8 @@
                                               bool inline_fastpath) {
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
-                                                 &actual_size,
-                                                 true));
+  byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+      Assembler::kMinimalBufferSize, &actual_size, true));
   CHECK(buffer);
   HandleScope handles(isolate);
   MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size));
@@ -127,7 +126,7 @@
 
   CodeDesc desc;
   masm.GetCode(&desc);
-  CPU::FlushICache(buffer, actual_size);
+  CpuFeatures::FlushICache(buffer, actual_size);
   return (reinterpret_cast<ConvertDToIFunc>(
       reinterpret_cast<intptr_t>(buffer)));
 }
diff --git a/test/cctest/test-code-stubs-arm64.cc b/test/cctest/test-code-stubs-arm64.cc
index be97637..6d5b0f4 100644
--- a/test/cctest/test-code-stubs-arm64.cc
+++ b/test/cctest/test-code-stubs-arm64.cc
@@ -29,10 +29,10 @@
 
 #include "src/v8.h"
 
+#include "src/base/platform/platform.h"
 #include "src/code-stubs.h"
 #include "src/factory.h"
 #include "src/macro-assembler.h"
-#include "src/platform.h"
 #include "src/simulator.h"
 #include "test/cctest/cctest.h"
 #include "test/cctest/test-code-stubs.h"
@@ -47,9 +47,8 @@
                                               bool inline_fastpath) {
   // Allocate an executable page of memory.
   size_t actual_size = 4 * Assembler::kMinimalBufferSize;
-  byte* buffer = static_cast<byte*>(OS::Allocate(actual_size,
-                                                 &actual_size,
-                                                 true));
+  byte* buffer = static_cast<byte*>(
+      v8::base::OS::Allocate(actual_size, &actual_size, true));
   CHECK(buffer);
   HandleScope handles(isolate);
   MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size));
@@ -123,7 +122,7 @@
 
   CodeDesc desc;
   masm.GetCode(&desc);
-  CPU::FlushICache(buffer, actual_size);
+  CpuFeatures::FlushICache(buffer, actual_size);
   return (reinterpret_cast<ConvertDToIFunc>(
       reinterpret_cast<intptr_t>(buffer)));
 }
diff --git a/test/cctest/test-code-stubs-ia32.cc b/test/cctest/test-code-stubs-ia32.cc
index 3d23d0b..0b4a8d4 100644
--- a/test/cctest/test-code-stubs-ia32.cc
+++ b/test/cctest/test-code-stubs-ia32.cc
@@ -31,10 +31,10 @@
 
 #include "src/v8.h"
 
+#include "src/base/platform/platform.h"
 #include "src/code-stubs.h"
 #include "src/factory.h"
 #include "src/macro-assembler.h"
-#include "src/platform.h"
 #include "test/cctest/cctest.h"
 #include "test/cctest/test-code-stubs.h"
 
@@ -47,9 +47,8 @@
                                               Register destination_reg) {
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
-                                                   &actual_size,
-                                                   true));
+  byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+      Assembler::kMinimalBufferSize, &actual_size, true));
   CHECK(buffer);
   HandleScope handles(isolate);
   MacroAssembler assm(isolate, buffer, static_cast<int>(actual_size));
diff --git a/test/cctest/test-code-stubs-mips.cc b/test/cctest/test-code-stubs-mips.cc
index 62e2e99..796aa1d 100644
--- a/test/cctest/test-code-stubs-mips.cc
+++ b/test/cctest/test-code-stubs-mips.cc
@@ -29,11 +29,11 @@
 
 #include "src/v8.h"
 
+#include "src/base/platform/platform.h"
 #include "src/code-stubs.h"
 #include "src/factory.h"
 #include "src/macro-assembler.h"
 #include "src/mips/constants-mips.h"
-#include "src/platform.h"
 #include "src/simulator.h"
 #include "test/cctest/cctest.h"
 #include "test/cctest/test-code-stubs.h"
@@ -48,9 +48,8 @@
                                               bool inline_fastpath) {
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
-                                                 &actual_size,
-                                                 true));
+  byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+      Assembler::kMinimalBufferSize, &actual_size, true));
   CHECK(buffer);
   HandleScope handles(isolate);
   MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size));
@@ -128,7 +127,7 @@
 
   CodeDesc desc;
   masm.GetCode(&desc);
-  CPU::FlushICache(buffer, actual_size);
+  CpuFeatures::FlushICache(buffer, actual_size);
   return (reinterpret_cast<ConvertDToIFunc>(
       reinterpret_cast<intptr_t>(buffer)));
 }
diff --git a/test/cctest/test-code-stubs-mips64.cc b/test/cctest/test-code-stubs-mips64.cc
new file mode 100644
index 0000000..025a8ba
--- /dev/null
+++ b/test/cctest/test-code-stubs-mips64.cc
@@ -0,0 +1,188 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Rrdistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Rrdistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Rrdistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+
+#include "src/v8.h"
+
+#include "src/base/platform/platform.h"
+#include "src/code-stubs.h"
+#include "src/factory.h"
+#include "src/macro-assembler.h"
+#include "src/mips64/constants-mips64.h"
+#include "src/simulator.h"
+#include "test/cctest/cctest.h"
+#include "test/cctest/test-code-stubs.h"
+
+using namespace v8::internal;
+
+#define __ masm.
+
+ConvertDToIFunc MakeConvertDToIFuncTrampoline(Isolate* isolate,
+                                              Register source_reg,
+                                              Register destination_reg,
+                                              bool inline_fastpath) {
+  // Allocate an executable page of memory.
+  size_t actual_size;
+  byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+      Assembler::kMinimalBufferSize, &actual_size, true));
+  CHECK(buffer);
+  HandleScope handles(isolate);
+  MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size));
+  DoubleToIStub stub(isolate, source_reg, destination_reg, 0, true,
+                     inline_fastpath);
+
+  byte* start = stub.GetCode()->instruction_start();
+  Label done;
+
+  // Save callee save registers.
+  __ MultiPush(kCalleeSaved | ra.bit());
+
+  // For softfp, move the input value into f12.
+  if (IsMipsSoftFloatABI) {
+    __ Move(f12, a0, a1);
+  }
+  // Push the double argument.
+  __ Dsubu(sp, sp, Operand(kDoubleSize));
+  __ sdc1(f12, MemOperand(sp));
+  __ Move(source_reg, sp);
+
+  // Save registers make sure they don't get clobbered.
+  int source_reg_offset = kDoubleSize;
+  int reg_num = 2;
+  for (;reg_num < Register::NumAllocatableRegisters(); ++reg_num) {
+    Register reg = Register::from_code(reg_num);
+    if (!reg.is(destination_reg)) {
+      __ push(reg);
+      source_reg_offset += kPointerSize;
+    }
+  }
+
+  // Re-push the double argument.
+  __ Dsubu(sp, sp, Operand(kDoubleSize));
+  __ sdc1(f12, MemOperand(sp));
+
+  // Call through to the actual stub
+  if (inline_fastpath) {
+    __ ldc1(f12, MemOperand(source_reg));
+    __ TryInlineTruncateDoubleToI(destination_reg, f12, &done);
+    if (destination_reg.is(source_reg) && !source_reg.is(sp)) {
+      // Restore clobbered source_reg.
+      __ Daddu(source_reg, sp, Operand(source_reg_offset));
+    }
+  }
+  __ Call(start, RelocInfo::EXTERNAL_REFERENCE);
+  __ bind(&done);
+
+  __ Daddu(sp, sp, Operand(kDoubleSize));
+
+  // Make sure no registers have been unexpectedly clobbered
+  for (--reg_num; reg_num >= 2; --reg_num) {
+    Register reg = Register::from_code(reg_num);
+    if (!reg.is(destination_reg)) {
+      __ lw(at, MemOperand(sp, 0));
+      __ Assert(eq, kRegisterWasClobbered, reg, Operand(at));
+      __ Daddu(sp, sp, Operand(kPointerSize));
+    }
+  }
+
+  __ Daddu(sp, sp, Operand(kDoubleSize));
+
+  __ Move(v0, destination_reg);
+  Label ok;
+  __ Branch(&ok, eq, v0, Operand(zero_reg));
+  __ bind(&ok);
+
+  // Restore callee save registers.
+  __ MultiPop(kCalleeSaved | ra.bit());
+
+  Label ok1;
+  __ Branch(&ok1, eq, v0, Operand(zero_reg));
+  __ bind(&ok1);
+  __ Ret();
+
+  CodeDesc desc;
+  masm.GetCode(&desc);
+  CpuFeatures::FlushICache(buffer, actual_size);
+  return (reinterpret_cast<ConvertDToIFunc>(
+      reinterpret_cast<intptr_t>(buffer)));
+}
+
+#undef __
+
+
+static Isolate* GetIsolateFrom(LocalContext* context) {
+  return reinterpret_cast<Isolate*>((*context)->GetIsolate());
+}
+
+
+int32_t RunGeneratedCodeCallWrapper(ConvertDToIFunc func,
+                                    double from) {
+#ifdef USE_SIMULATOR
+  Simulator::current(Isolate::Current())->CallFP(FUNCTION_ADDR(func), from, 0.);
+  return Simulator::current(Isolate::Current())->get_register(v0.code());
+#else
+  return (*func)(from);
+#endif
+}
+
+
+TEST(ConvertDToI) {
+  CcTest::InitializeVM();
+  LocalContext context;
+  Isolate* isolate = GetIsolateFrom(&context);
+  HandleScope scope(isolate);
+
+#if DEBUG
+  // Verify that the tests actually work with the C version. In the release
+  // code, the compiler optimizes it away because it's all constant, but does it
+  // wrong, triggering an assert on gcc.
+  RunAllTruncationTests(&ConvertDToICVersion);
+#endif
+
+  Register source_registers[] = {
+      sp, v0, v1, a0, a1, a2, a3, a4, a5, a6, a7, t0, t1};
+  Register dest_registers[] = {
+      v0, v1, a0, a1, a2, a3, a4, a5, a6, a7, t0, t1};
+
+  for (size_t s = 0; s < sizeof(source_registers) / sizeof(Register); s++) {
+    for (size_t d = 0; d < sizeof(dest_registers) / sizeof(Register); d++) {
+      RunAllTruncationTests(
+          RunGeneratedCodeCallWrapper,
+          MakeConvertDToIFuncTrampoline(isolate,
+                                        source_registers[s],
+                                        dest_registers[d],
+                                        false));
+      RunAllTruncationTests(
+          RunGeneratedCodeCallWrapper,
+          MakeConvertDToIFuncTrampoline(isolate,
+                                        source_registers[s],
+                                        dest_registers[d],
+                                        true));
+    }
+  }
+}
diff --git a/test/cctest/test-code-stubs-x64.cc b/test/cctest/test-code-stubs-x64.cc
index e9b35e3..b58b073 100644
--- a/test/cctest/test-code-stubs-x64.cc
+++ b/test/cctest/test-code-stubs-x64.cc
@@ -29,10 +29,10 @@
 
 #include "src/v8.h"
 
+#include "src/base/platform/platform.h"
 #include "src/code-stubs.h"
 #include "src/factory.h"
 #include "src/macro-assembler.h"
-#include "src/platform.h"
 #include "test/cctest/cctest.h"
 #include "test/cctest/test-code-stubs.h"
 
@@ -46,9 +46,8 @@
                                               Register destination_reg) {
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
-                                                   &actual_size,
-                                                   true));
+  byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+      Assembler::kMinimalBufferSize, &actual_size, true));
   CHECK(buffer);
   HandleScope handles(isolate);
   MacroAssembler assm(isolate, buffer, static_cast<int>(actual_size));
diff --git a/test/cctest/test-code-stubs-x87.cc b/test/cctest/test-code-stubs-x87.cc
index 3d23d0b..0b4a8d4 100644
--- a/test/cctest/test-code-stubs-x87.cc
+++ b/test/cctest/test-code-stubs-x87.cc
@@ -31,10 +31,10 @@
 
 #include "src/v8.h"
 
+#include "src/base/platform/platform.h"
 #include "src/code-stubs.h"
 #include "src/factory.h"
 #include "src/macro-assembler.h"
-#include "src/platform.h"
 #include "test/cctest/cctest.h"
 #include "test/cctest/test-code-stubs.h"
 
@@ -47,9 +47,8 @@
                                               Register destination_reg) {
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
-                                                   &actual_size,
-                                                   true));
+  byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+      Assembler::kMinimalBufferSize, &actual_size, true));
   CHECK(buffer);
   HandleScope handles(isolate);
   MacroAssembler assm(isolate, buffer, static_cast<int>(actual_size));
diff --git a/test/cctest/test-code-stubs.cc b/test/cctest/test-code-stubs.cc
index f3977d4..95035aa 100644
--- a/test/cctest/test-code-stubs.cc
+++ b/test/cctest/test-code-stubs.cc
@@ -31,10 +31,10 @@
 
 #include "src/v8.h"
 
+#include "src/base/platform/platform.h"
 #include "src/code-stubs.h"
 #include "src/factory.h"
 #include "src/macro-assembler.h"
-#include "src/platform.h"
 #include "test/cctest/cctest.h"
 #include "test/cctest/test-code-stubs.h"
 
@@ -62,7 +62,7 @@
     }
   } else {
     uint64_t big_result =
-        (BitCast<uint64_t>(d) & Double::kSignificandMask) | Double::kHiddenBit;
+        (bit_cast<uint64_t>(d) & Double::kSignificandMask) | Double::kHiddenBit;
     big_result = big_result >> (Double::kPhysicalSignificandSize - exponent);
     result = static_cast<uint32_t>(big_result);
   }
@@ -92,7 +92,7 @@
 
 // #define NaN and Infinity so that it's possible to cut-and-paste these tests
 // directly to a .js file and run them.
-#define NaN (OS::nan_value())
+#define NaN (v8::base::OS::nan_value())
 #define Infinity (std::numeric_limits<double>::infinity())
 #define RunOneTruncationTest(p1, p2) \
     RunOneTruncationTestWithTest(callWrapper, func, p1, p2)
@@ -172,3 +172,19 @@
 #undef NaN
 #undef Infinity
 #undef RunOneTruncationTest
+
+
+TEST(CodeStubMajorKeys) {
+  CcTest::InitializeVM();
+  LocalContext context;
+  Isolate* isolate = CcTest::i_isolate();
+
+#define CHECK_STUB(NAME)                        \
+  {                                             \
+    HandleScope scope(isolate);                 \
+    NAME##Stub stub_impl(0xabcd, isolate);      \
+    CodeStub* stub = &stub_impl;                \
+    CHECK_EQ(stub->MajorKey(), CodeStub::NAME); \
+  }
+  CODE_STUB_LIST(CHECK_STUB);
+}
diff --git a/test/cctest/test-compiler.cc b/test/cctest/test-compiler.cc
index cd537b5..4d6e005 100644
--- a/test/cctest/test-compiler.cc
+++ b/test/cctest/test-compiler.cc
@@ -32,6 +32,7 @@
 
 #include "src/compiler.h"
 #include "src/disasm.h"
+#include "src/parser.h"
 #include "test/cctest/cctest.h"
 
 using namespace v8::internal;
@@ -49,7 +50,7 @@
   Handle<String> internalized_name =
       isolate->factory()->InternalizeUtf8String(name);
   Handle<JSObject> global(isolate->context()->global_object());
-  Runtime::SetObjectProperty(isolate, global, internalized_name, object, NONE,
+  Runtime::SetObjectProperty(isolate, global, internalized_name, object,
                              SLOPPY).Check();
 }
 
@@ -58,15 +59,10 @@
   Isolate* isolate = CcTest::i_isolate();
   Handle<String> source_code = isolate->factory()->NewStringFromUtf8(
       CStrVector(source)).ToHandleChecked();
-  Handle<SharedFunctionInfo> shared_function =
-      Compiler::CompileScript(source_code,
-                              Handle<String>(),
-                              0,
-                              0,
-                              false,
-                              Handle<Context>(isolate->native_context()),
-                              NULL, NULL, NO_CACHED_DATA,
-                              NOT_NATIVES_CODE);
+  Handle<SharedFunctionInfo> shared_function = Compiler::CompileScript(
+      source_code, Handle<String>(), 0, 0, false,
+      Handle<Context>(isolate->native_context()), NULL, NULL,
+      v8::ScriptCompiler::kNoCompileOptions, NOT_NATIVES_CODE);
   return isolate->factory()->NewFunctionFromSharedFunctionInfo(
       shared_function, isolate->native_context());
 }
@@ -231,18 +227,18 @@
   Handle<JSObject> global(isolate->context()->global_object());
   Execution::Call(isolate, fun0, global, 0, NULL).Check();
 
-  Handle<String> foo_string = isolate->factory()->InternalizeOneByteString(
-      STATIC_ASCII_VECTOR("foo"));
+  Handle<String> foo_string =
+      isolate->factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("foo"));
   Handle<Object> fun1 = Object::GetProperty(
       isolate->global_object(), foo_string).ToHandleChecked();
   CHECK(fun1->IsJSFunction());
 
-  Handle<Object> argv[] = { isolate->factory()->InternalizeOneByteString(
-      STATIC_ASCII_VECTOR("hello")) };
+  Handle<Object> argv[] = {isolate->factory()->InternalizeOneByteString(
+      STATIC_CHAR_VECTOR("hello"))};
   Execution::Call(isolate,
                   Handle<JSFunction>::cast(fun1),
                   global,
-                  ARRAY_SIZE(argv),
+                  arraysize(argv),
                   argv).Check();
 }
 
@@ -313,8 +309,9 @@
   Handle<FixedArray> feedback_vector(f->shared()->feedback_vector());
 
   // Verify that we gathered feedback.
-  CHECK_EQ(1, feedback_vector->length());
-  CHECK(feedback_vector->get(0)->IsJSFunction());
+  int expected_count = FLAG_vector_ics ? 2 : 1;
+  CHECK_EQ(expected_count, feedback_vector->length());
+  CHECK(feedback_vector->get(expected_count - 1)->IsJSFunction());
 
   CompileRun("%OptimizeFunctionOnNextCall(f); f(fun1);");
 
@@ -322,7 +319,8 @@
   // of the full code.
   CHECK(f->IsOptimized());
   CHECK(f->shared()->has_deoptimization_support());
-  CHECK(f->shared()->feedback_vector()->get(0)->IsJSFunction());
+  CHECK(f->shared()->feedback_vector()->
+        get(expected_count - 1)->IsJSFunction());
 }
 
 
@@ -348,16 +346,15 @@
           *v8::Handle<v8::Function>::Cast(
               CcTest::global()->Get(v8_str("morphing_call"))));
 
-  // morphing_call should have one feedback vector slot for the call to
-  // call_target().
-  CHECK_EQ(1, f->shared()->feedback_vector()->length());
+  int expected_count = FLAG_vector_ics ? 2 : 1;
+  CHECK_EQ(expected_count, f->shared()->feedback_vector()->length());
   // And yet it's not compiled.
   CHECK(!f->shared()->is_compiled());
 
   CompileRun("morphing_call();");
 
   // The vector should have the same size despite the new scoping.
-  CHECK_EQ(1, f->shared()->feedback_vector()->length());
+  CHECK_EQ(expected_count, f->shared()->feedback_vector()->length());
   CHECK(f->shared()->is_compiled());
 }
 
diff --git a/test/cctest/test-condition-variable.cc b/test/cctest/test-condition-variable.cc
deleted file mode 100644
index 5734c19..0000000
--- a/test/cctest/test-condition-variable.cc
+++ /dev/null
@@ -1,304 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "src/v8.h"
-
-#include "src/platform/condition-variable.h"
-#include "src/platform/time.h"
-#include "test/cctest/cctest.h"
-
-using namespace ::v8::internal;
-
-
-TEST(WaitForAfterNofityOnSameThread) {
-  for (int n = 0; n < 10; ++n) {
-    Mutex mutex;
-    ConditionVariable cv;
-
-    LockGuard<Mutex> lock_guard(&mutex);
-
-    cv.NotifyOne();
-    CHECK_EQ(false, cv.WaitFor(&mutex, TimeDelta::FromMicroseconds(n)));
-
-    cv.NotifyAll();
-    CHECK_EQ(false, cv.WaitFor(&mutex, TimeDelta::FromMicroseconds(n)));
-  }
-}
-
-
-class ThreadWithMutexAndConditionVariable V8_FINAL : public Thread {
- public:
-  ThreadWithMutexAndConditionVariable()
-      : Thread("ThreadWithMutexAndConditionVariable"),
-        running_(false), finished_(false) {}
-  virtual ~ThreadWithMutexAndConditionVariable() {}
-
-  virtual void Run() V8_OVERRIDE {
-    LockGuard<Mutex> lock_guard(&mutex_);
-    running_ = true;
-    cv_.NotifyOne();
-    while (running_) {
-      cv_.Wait(&mutex_);
-    }
-    finished_ = true;
-    cv_.NotifyAll();
-  }
-
-  bool running_;
-  bool finished_;
-  ConditionVariable cv_;
-  Mutex mutex_;
-};
-
-
-TEST(MultipleThreadsWithSeparateConditionVariables) {
-  static const int kThreadCount = 128;
-  ThreadWithMutexAndConditionVariable threads[kThreadCount];
-
-  for (int n = 0; n < kThreadCount; ++n) {
-    LockGuard<Mutex> lock_guard(&threads[n].mutex_);
-    CHECK(!threads[n].running_);
-    CHECK(!threads[n].finished_);
-    threads[n].Start();
-    // Wait for nth thread to start.
-    while (!threads[n].running_) {
-      threads[n].cv_.Wait(&threads[n].mutex_);
-    }
-  }
-
-  for (int n = kThreadCount - 1; n >= 0; --n) {
-    LockGuard<Mutex> lock_guard(&threads[n].mutex_);
-    CHECK(threads[n].running_);
-    CHECK(!threads[n].finished_);
-  }
-
-  for (int n = 0; n < kThreadCount; ++n) {
-    LockGuard<Mutex> lock_guard(&threads[n].mutex_);
-    CHECK(threads[n].running_);
-    CHECK(!threads[n].finished_);
-    // Tell the nth thread to quit.
-    threads[n].running_ = false;
-    threads[n].cv_.NotifyOne();
-  }
-
-  for (int n = kThreadCount - 1; n >= 0; --n) {
-    // Wait for nth thread to quit.
-    LockGuard<Mutex> lock_guard(&threads[n].mutex_);
-    while (!threads[n].finished_) {
-      threads[n].cv_.Wait(&threads[n].mutex_);
-    }
-    CHECK(!threads[n].running_);
-    CHECK(threads[n].finished_);
-  }
-
-  for (int n = 0; n < kThreadCount; ++n) {
-    threads[n].Join();
-    LockGuard<Mutex> lock_guard(&threads[n].mutex_);
-    CHECK(!threads[n].running_);
-    CHECK(threads[n].finished_);
-  }
-}
-
-
-class ThreadWithSharedMutexAndConditionVariable V8_FINAL : public Thread {
- public:
-  ThreadWithSharedMutexAndConditionVariable()
-      : Thread("ThreadWithSharedMutexAndConditionVariable"),
-        running_(false), finished_(false), cv_(NULL), mutex_(NULL) {}
-  virtual ~ThreadWithSharedMutexAndConditionVariable() {}
-
-  virtual void Run() V8_OVERRIDE {
-    LockGuard<Mutex> lock_guard(mutex_);
-    running_ = true;
-    cv_->NotifyAll();
-    while (running_) {
-      cv_->Wait(mutex_);
-    }
-    finished_ = true;
-    cv_->NotifyAll();
-  }
-
-  bool running_;
-  bool finished_;
-  ConditionVariable* cv_;
-  Mutex* mutex_;
-};
-
-
-TEST(MultipleThreadsWithSharedSeparateConditionVariables) {
-  static const int kThreadCount = 128;
-  ThreadWithSharedMutexAndConditionVariable threads[kThreadCount];
-  ConditionVariable cv;
-  Mutex mutex;
-
-  for (int n = 0; n < kThreadCount; ++n) {
-    threads[n].mutex_ = &mutex;
-    threads[n].cv_ = &cv;
-  }
-
-  // Start all threads.
-  {
-    LockGuard<Mutex> lock_guard(&mutex);
-    for (int n = 0; n < kThreadCount; ++n) {
-      CHECK(!threads[n].running_);
-      CHECK(!threads[n].finished_);
-      threads[n].Start();
-    }
-  }
-
-  // Wait for all threads to start.
-  {
-    LockGuard<Mutex> lock_guard(&mutex);
-    for (int n = kThreadCount - 1; n >= 0; --n) {
-      while (!threads[n].running_) {
-        cv.Wait(&mutex);
-      }
-    }
-  }
-
-  // Make sure that all threads are running.
-  {
-    LockGuard<Mutex> lock_guard(&mutex);
-    for (int n = 0; n < kThreadCount; ++n) {
-      CHECK(threads[n].running_);
-      CHECK(!threads[n].finished_);
-    }
-  }
-
-  // Tell all threads to quit.
-  {
-    LockGuard<Mutex> lock_guard(&mutex);
-    for (int n = kThreadCount - 1; n >= 0; --n) {
-      CHECK(threads[n].running_);
-      CHECK(!threads[n].finished_);
-      // Tell the nth thread to quit.
-      threads[n].running_ = false;
-    }
-    cv.NotifyAll();
-  }
-
-  // Wait for all threads to quit.
-  {
-    LockGuard<Mutex> lock_guard(&mutex);
-    for (int n = 0; n < kThreadCount; ++n) {
-      while (!threads[n].finished_) {
-        cv.Wait(&mutex);
-      }
-    }
-  }
-
-  // Make sure all threads are finished.
-  {
-    LockGuard<Mutex> lock_guard(&mutex);
-    for (int n = kThreadCount - 1; n >= 0; --n) {
-      CHECK(!threads[n].running_);
-      CHECK(threads[n].finished_);
-    }
-  }
-
-  // Join all threads.
-  for (int n = 0; n < kThreadCount; ++n) {
-    threads[n].Join();
-  }
-}
-
-
-class LoopIncrementThread V8_FINAL : public Thread {
- public:
-  LoopIncrementThread(int rem,
-                      int* counter,
-                      int limit,
-                      int thread_count,
-                      ConditionVariable* cv,
-                      Mutex* mutex)
-      : Thread("LoopIncrementThread"), rem_(rem), counter_(counter),
-        limit_(limit), thread_count_(thread_count), cv_(cv), mutex_(mutex) {
-    CHECK_LT(rem, thread_count);
-    CHECK_EQ(0, limit % thread_count);
-  }
-
-  virtual void Run() V8_OVERRIDE {
-    int last_count = -1;
-    while (true) {
-      LockGuard<Mutex> lock_guard(mutex_);
-      int count = *counter_;
-      while (count % thread_count_ != rem_ && count < limit_) {
-        cv_->Wait(mutex_);
-        count = *counter_;
-      }
-      if (count >= limit_) break;
-      CHECK_EQ(*counter_, count);
-      if (last_count != -1) {
-        CHECK_EQ(last_count + (thread_count_ - 1), count);
-      }
-      count++;
-      *counter_ = count;
-      last_count = count;
-      cv_->NotifyAll();
-    }
-  }
-
- private:
-  const int rem_;
-  int* counter_;
-  const int limit_;
-  const int thread_count_;
-  ConditionVariable* cv_;
-  Mutex* mutex_;
-};
-
-
-TEST(LoopIncrement) {
-  static const int kMaxThreadCount = 16;
-  Mutex mutex;
-  ConditionVariable cv;
-  for (int thread_count = 1; thread_count < kMaxThreadCount; ++thread_count) {
-    int limit = thread_count * 100;
-    int counter = 0;
-
-    // Setup the threads.
-    Thread** threads = new Thread*[thread_count];
-    for (int n = 0; n < thread_count; ++n) {
-      threads[n] = new LoopIncrementThread(
-          n, &counter, limit, thread_count, &cv, &mutex);
-    }
-
-    // Start all threads.
-    for (int n = thread_count - 1; n >= 0; --n) {
-      threads[n]->Start();
-    }
-
-    // Join and cleanup all threads.
-    for (int n = 0; n < thread_count; ++n) {
-      threads[n]->Join();
-      delete threads[n];
-    }
-    delete[] threads;
-
-    CHECK_EQ(limit, counter);
-  }
-}
diff --git a/test/cctest/test-constantpool.cc b/test/cctest/test-constantpool.cc
index 67767a2..4536576 100644
--- a/test/cctest/test-constantpool.cc
+++ b/test/cctest/test-constantpool.cc
@@ -31,7 +31,6 @@
 TEST(ConstantPoolSmall) {
   LocalContext context;
   Isolate* isolate = CcTest::i_isolate();
-  Heap* heap = isolate->heap();
   Factory* factory = isolate->factory();
   v8::HandleScope scope(context->GetIsolate());
 
@@ -51,7 +50,7 @@
 
   // Check getters and setters.
   int64_t big_number = V8_2PART_UINT64_C(0x12345678, 9ABCDEF0);
-  Handle<Object> object = factory->NewHeapNumber(4.0);
+  Handle<Object> object = factory->NewHeapNumber(4.0, IMMUTABLE, TENURED);
   Code* code = DummyCode(&context);
   array->set(0, big_number);
   array->set(1, 0.5);
@@ -67,21 +66,12 @@
   CHECK_EQ(code, array->get_heap_ptr_entry(4));
   CHECK_EQ(*object, array->get_heap_ptr_entry(5));
   CHECK_EQ(50, array->get_int32_entry(6));
-
-  // Check pointers are updated on GC.
-  Object* old_ptr = array->get_heap_ptr_entry(5);
-  CHECK_EQ(*object, old_ptr);
-  heap->CollectGarbage(NEW_SPACE);
-  Object* new_ptr = array->get_heap_ptr_entry(5);
-  CHECK_NE(*object, old_ptr);
-  CHECK_EQ(*object, new_ptr);
 }
 
 
 TEST(ConstantPoolExtended) {
   LocalContext context;
   Isolate* isolate = CcTest::i_isolate();
-  Heap* heap = isolate->heap();
   Factory* factory = isolate->factory();
   v8::HandleScope scope(context->GetIsolate());
 
@@ -116,12 +106,14 @@
   // Check small and large section's don't overlap.
   int64_t small_section_int64 = V8_2PART_UINT64_C(0x56781234, DEF09ABC);
   Code* small_section_code_ptr = DummyCode(&context);
-  Handle<Object> small_section_heap_ptr = factory->NewHeapNumber(4.0);
+  Handle<Object> small_section_heap_ptr =
+      factory->NewHeapNumber(4.0, IMMUTABLE, TENURED);
   int32_t small_section_int32 = 0xab12cd45;
 
   int64_t extended_section_int64 = V8_2PART_UINT64_C(0x12345678, 9ABCDEF0);
   Code* extended_section_code_ptr = DummyCode(&context);
-  Handle<Object> extended_section_heap_ptr = factory->NewHeapNumber(4.0);
+  Handle<Object> extended_section_heap_ptr =
+      factory->NewHeapNumber(5.0, IMMUTABLE, TENURED);
   int32_t extended_section_int32 = 0xef67ab89;
 
   for (int i = array->first_index(ConstantPoolArray::INT64, kSmall);
@@ -178,14 +170,6 @@
       CHECK_EQ(extended_section_int32, array->get_int32_entry(i));
     }
   }
-  // Check pointers are updated on GC in extended section.
-  int index = array->first_index(ConstantPoolArray::HEAP_PTR, kExtended);
-  Object* old_ptr = array->get_heap_ptr_entry(index);
-  CHECK_EQ(*extended_section_heap_ptr, old_ptr);
-  heap->CollectGarbage(NEW_SPACE);
-  Object* new_ptr = array->get_heap_ptr_entry(index);
-  CHECK_NE(*extended_section_heap_ptr, old_ptr);
-  CHECK_EQ(*extended_section_heap_ptr, new_ptr);
 }
 
 
@@ -242,3 +226,86 @@
   int expected_int32_indexs[] = { 1, 2, 3, 4 };
   CheckIterator(array, ConstantPoolArray::INT32, expected_int32_indexs, 4);
 }
+
+
+TEST(ConstantPoolPreciseGC) {
+  LocalContext context;
+  Isolate* isolate = CcTest::i_isolate();
+  Heap* heap = isolate->heap();
+  Factory* factory = isolate->factory();
+  v8::HandleScope scope(context->GetIsolate());
+
+  ConstantPoolArray::NumberOfEntries small(1, 0, 0, 1);
+  Handle<ConstantPoolArray> array = factory->NewConstantPoolArray(small);
+
+  // Check that the store buffer knows which entries are pointers and which are
+  // not.  To do this, make non-pointer entries which look like new space
+  // pointers but are actually invalid and ensure the GC doesn't try to move
+  // them.
+  Handle<HeapObject> object = factory->NewHeapNumber(4.0);
+  Object* raw_ptr = *object;
+  // If interpreted as a pointer, this should be right inside the heap number
+  // which will cause a crash when trying to lookup the 'map' pointer.
+  intptr_t invalid_ptr = reinterpret_cast<intptr_t>(raw_ptr) + kInt32Size;
+  int32_t invalid_ptr_int32 = static_cast<int32_t>(invalid_ptr);
+  int64_t invalid_ptr_int64 = static_cast<int64_t>(invalid_ptr);
+  array->set(0, invalid_ptr_int64);
+  array->set(1, invalid_ptr_int32);
+
+  // Ensure we perform a scan on scavenge for the constant pool's page.
+  MemoryChunk::FromAddress(array->address())->set_scan_on_scavenge(true);
+  heap->CollectGarbage(NEW_SPACE);
+
+  // Check the object was moved by GC.
+  CHECK_NE(*object, raw_ptr);
+
+  // Check the non-pointer entries weren't changed.
+  CHECK_EQ(invalid_ptr_int64, array->get_int64_entry(0));
+  CHECK_EQ(invalid_ptr_int32, array->get_int32_entry(1));
+}
+
+
+TEST(ConstantPoolCompacting) {
+  if (i::FLAG_never_compact) return;
+  i::FLAG_always_compact = true;
+  LocalContext context;
+  Isolate* isolate = CcTest::i_isolate();
+  Heap* heap = isolate->heap();
+  Factory* factory = isolate->factory();
+  v8::HandleScope scope(context->GetIsolate());
+
+  ConstantPoolArray::NumberOfEntries small(0, 0, 1, 0);
+  ConstantPoolArray::NumberOfEntries extended(0, 0, 1, 0);
+  Handle<ConstantPoolArray> array =
+      factory->NewExtendedConstantPoolArray(small, extended);
+
+  // Start a second old-space page so that the heap pointer added to the
+  // constant pool array ends up on the an evacuation candidate page.
+  Page* first_page = heap->old_data_space()->anchor()->next_page();
+  {
+    HandleScope scope(isolate);
+    Handle<HeapObject> temp =
+        factory->NewFixedDoubleArray(900 * KB / kDoubleSize, TENURED);
+    CHECK(heap->InOldDataSpace(temp->address()));
+    Handle<HeapObject> heap_ptr =
+        factory->NewHeapNumber(5.0, IMMUTABLE, TENURED);
+    CHECK(heap->InOldDataSpace(heap_ptr->address()));
+    CHECK(!first_page->Contains(heap_ptr->address()));
+    array->set(0, *heap_ptr);
+    array->set(1, *heap_ptr);
+  }
+
+  // Check heap pointers are correctly updated on GC.
+  Object* old_ptr = array->get_heap_ptr_entry(0);
+  Handle<Object> object(old_ptr, isolate);
+  CHECK_EQ(old_ptr, *object);
+  CHECK_EQ(old_ptr, array->get_heap_ptr_entry(1));
+
+  // Force compacting garbage collection.
+  CHECK(FLAG_always_compact);
+  heap->CollectAllGarbage(Heap::kNoGCFlags);
+
+  CHECK_NE(old_ptr, *object);
+  CHECK_EQ(*object, array->get_heap_ptr_entry(0));
+  CHECK_EQ(*object, array->get_heap_ptr_entry(1));
+}
diff --git a/test/cctest/test-conversions.cc b/test/cctest/test-conversions.cc
index 62403ff..93bed7f 100644
--- a/test/cctest/test-conversions.cc
+++ b/test/cctest/test-conversions.cc
@@ -29,7 +29,7 @@
 
 #include "src/v8.h"
 
-#include "src/platform.h"
+#include "src/base/platform/platform.h"
 #include "test/cctest/cctest.h"
 
 using namespace v8::internal;
@@ -172,9 +172,12 @@
 
 TEST(NonStrDecimalLiteral) {
   UnicodeCache uc;
-  CHECK(std::isnan(StringToDouble(&uc, " ", NO_FLAGS, OS::nan_value())));
-  CHECK(std::isnan(StringToDouble(&uc, "", NO_FLAGS, OS::nan_value())));
-  CHECK(std::isnan(StringToDouble(&uc, " ", NO_FLAGS, OS::nan_value())));
+  CHECK(std::isnan(
+      StringToDouble(&uc, " ", NO_FLAGS, v8::base::OS::nan_value())));
+  CHECK(
+      std::isnan(StringToDouble(&uc, "", NO_FLAGS, v8::base::OS::nan_value())));
+  CHECK(std::isnan(
+      StringToDouble(&uc, " ", NO_FLAGS, v8::base::OS::nan_value())));
   CHECK_EQ(0.0, StringToDouble(&uc, "", NO_FLAGS));
   CHECK_EQ(0.0, StringToDouble(&uc, " ", NO_FLAGS));
 }
diff --git a/test/cctest/test-cpu-ia32.cc b/test/cctest/test-cpu-ia32.cc
deleted file mode 100644
index f1e07cf..0000000
--- a/test/cctest/test-cpu-ia32.cc
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "src/v8.h"
-
-#include "src/cpu.h"
-#include "test/cctest/cctest.h"
-
-using namespace v8::internal;
-
-
-TEST(RequiredFeaturesX64) {
-  // Test for the features required by every x86 CPU in compat/legacy mode.
-  CPU cpu;
-  CHECK(cpu.has_sahf());
-}
diff --git a/test/cctest/test-cpu-profiler.cc b/test/cctest/test-cpu-profiler.cc
index 77b5bd8..8d429d2 100644
--- a/test/cctest/test-cpu-profiler.cc
+++ b/test/cctest/test-cpu-profiler.cc
@@ -30,8 +30,8 @@
 #include "src/v8.h"
 
 #include "include/v8-profiler.h"
+#include "src/base/platform/platform.h"
 #include "src/cpu-profiler-inl.h"
-#include "src/platform.h"
 #include "src/smart-pointers.h"
 #include "src/utils.h"
 #include "test/cctest/cctest.h"
@@ -46,7 +46,6 @@
 using i::ProfilerEventsProcessor;
 using i::ScopedVector;
 using i::SmartPointer;
-using i::TimeDelta;
 using i::Vector;
 
 
@@ -55,7 +54,7 @@
   CpuProfilesCollection profiles(isolate->heap());
   ProfileGenerator generator(&profiles);
   SmartPointer<ProfilerEventsProcessor> processor(new ProfilerEventsProcessor(
-          &generator, NULL, TimeDelta::FromMicroseconds(100)));
+          &generator, NULL, v8::base::TimeDelta::FromMicroseconds(100)));
   processor->Start();
   processor->StopSynchronously();
 }
@@ -143,7 +142,7 @@
   profiles->StartProfiling("", false);
   ProfileGenerator generator(profiles);
   SmartPointer<ProfilerEventsProcessor> processor(new ProfilerEventsProcessor(
-          &generator, NULL, TimeDelta::FromMicroseconds(100)));
+          &generator, NULL, v8::base::TimeDelta::FromMicroseconds(100)));
   processor->Start();
   CpuProfiler profiler(isolate, profiles, &generator, processor.get());
 
@@ -204,7 +203,7 @@
   profiles->StartProfiling("", false);
   ProfileGenerator generator(profiles);
   SmartPointer<ProfilerEventsProcessor> processor(new ProfilerEventsProcessor(
-          &generator, NULL, TimeDelta::FromMicroseconds(100)));
+          &generator, NULL, v8::base::TimeDelta::FromMicroseconds(100)));
   processor->Start();
   CpuProfiler profiler(isolate, profiles, &generator, processor.get());
 
@@ -273,7 +272,7 @@
   profiles->StartProfiling("", false);
   ProfileGenerator generator(profiles);
   SmartPointer<ProfilerEventsProcessor> processor(new ProfilerEventsProcessor(
-          &generator, NULL, TimeDelta::FromMicroseconds(100)));
+          &generator, NULL, v8::base::TimeDelta::FromMicroseconds(100)));
   processor->Start();
   CpuProfiler profiler(isolate, profiles, &generator, processor.get());
 
@@ -283,7 +282,7 @@
   sample->pc = code->address();
   sample->tos = 0;
   sample->frames_count = i::TickSample::kMaxFramesCount;
-  for (int i = 0; i < sample->frames_count; ++i) {
+  for (unsigned i = 0; i < sample->frames_count; ++i) {
     sample->stack[i] = code->address();
   }
   processor->FinishTickSample();
@@ -470,7 +469,7 @@
   const v8::CpuProfileNode* result = FindChild(isolate, node, name);
   if (!result) {
     char buffer[100];
-    i::SNPrintF(Vector<char>(buffer, ARRAY_SIZE(buffer)),
+    i::SNPrintF(Vector<char>(buffer, arraysize(buffer)),
                 "Failed to GetChild: %s", name);
     FATAL(buffer);
   }
@@ -553,8 +552,8 @@
     v8::Integer::New(env->GetIsolate(), profiling_interval_ms)
   };
   v8::CpuProfile* profile =
-      RunProfiler(env.local(), function, args, ARRAY_SIZE(args), 200);
-  function->Call(env->Global(), ARRAY_SIZE(args), args);
+      RunProfiler(env.local(), function, args, arraysize(args), 200);
+  function->Call(env->Global(), arraysize(args), args);
 
   const v8::CpuProfileNode* root = profile->GetTopDownRoot();
 
@@ -576,13 +575,13 @@
 
   const char* barBranch[] = { "bar", "delay", "loop" };
   CheckSimpleBranch(env->GetIsolate(), fooNode, barBranch,
-                    ARRAY_SIZE(barBranch));
+                    arraysize(barBranch));
   const char* bazBranch[] = { "baz", "delay", "loop" };
   CheckSimpleBranch(env->GetIsolate(), fooNode, bazBranch,
-                    ARRAY_SIZE(bazBranch));
+                    arraysize(bazBranch));
   const char* delayBranch[] = { "delay", "loop" };
   CheckSimpleBranch(env->GetIsolate(), fooNode, delayBranch,
-                    ARRAY_SIZE(delayBranch));
+                    arraysize(delayBranch));
 
   profile->Delete();
 }
@@ -631,8 +630,8 @@
     v8::Integer::New(env->GetIsolate(), profiling_interval_ms)
   };
   v8::CpuProfile* profile =
-      RunProfiler(env.local(), function, args, ARRAY_SIZE(args), 200);
-  function->Call(env->Global(), ARRAY_SIZE(args), args);
+      RunProfiler(env.local(), function, args, arraysize(args), 200);
+  function->Call(env->Global(), arraysize(args), args);
 
   const v8::CpuProfileNode* root = profile->GetTopDownRoot();
 
@@ -668,7 +667,7 @@
     v8::Integer::New(env->GetIsolate(), profiling_interval_ms)
   };
   v8::CpuProfile* profile =
-      RunProfiler(env.local(), function, args, ARRAY_SIZE(args), 200, true);
+      RunProfiler(env.local(), function, args, arraysize(args), 200, true);
 
   CHECK_LE(200, profile->GetSamplesCount());
   uint64_t end_time = profile->GetEndTime();
@@ -724,7 +723,7 @@
     v8::Integer::New(env->GetIsolate(), repeat_count)
   };
   v8::CpuProfile* profile =
-      RunProfiler(env.local(), function, args, ARRAY_SIZE(args), 100);
+      RunProfiler(env.local(), function, args, arraysize(args), 100);
 
   const v8::CpuProfileNode* root = profile->GetTopDownRoot();
 
@@ -791,11 +790,11 @@
  private:
   void Wait() {
     if (is_warming_up_) return;
-    double start = i::OS::TimeCurrentMillis();
+    double start = v8::base::OS::TimeCurrentMillis();
     double duration = 0;
     while (duration < min_duration_ms_) {
-      i::OS::Sleep(1);
-      duration = i::OS::TimeCurrentMillis() - start;
+      v8::base::OS::Sleep(1);
+      duration = v8::base::OS::TimeCurrentMillis() - start;
     }
   }
 
@@ -844,7 +843,7 @@
   int32_t repeat_count = 1;
   v8::Handle<v8::Value> args[] = { v8::Integer::New(isolate, repeat_count) };
   v8::CpuProfile* profile =
-      RunProfiler(env.local(), function, args, ARRAY_SIZE(args), 180);
+      RunProfiler(env.local(), function, args, arraysize(args), 180);
 
   const v8::CpuProfileNode* root = profile->GetTopDownRoot();
   const v8::CpuProfileNode* startNode =
@@ -894,14 +893,14 @@
     v8::Handle<v8::Value> args[] = {
       v8::Integer::New(isolate, warm_up_iterations)
     };
-    function->Call(env->Global(), ARRAY_SIZE(args), args);
+    function->Call(env->Global(), arraysize(args), args);
     accessors.set_warming_up(false);
   }
 
   int32_t repeat_count = 100;
   v8::Handle<v8::Value> args[] = { v8::Integer::New(isolate, repeat_count) };
   v8::CpuProfile* profile =
-      RunProfiler(env.local(), function, args, ARRAY_SIZE(args), 200);
+      RunProfiler(env.local(), function, args, arraysize(args), 200);
 
   const v8::CpuProfileNode* root = profile->GetTopDownRoot();
   const v8::CpuProfileNode* startNode =
@@ -955,7 +954,7 @@
   int32_t repeat_count = 1;
   v8::Handle<v8::Value> args[] = { v8::Integer::New(isolate, repeat_count) };
   v8::CpuProfile* profile =
-      RunProfiler(env.local(), function, args, ARRAY_SIZE(args), 100);
+      RunProfiler(env.local(), function, args, arraysize(args), 100);
 
   const v8::CpuProfileNode* root = profile->GetTopDownRoot();
   const v8::CpuProfileNode* startNode =
@@ -1005,14 +1004,14 @@
     v8::Handle<v8::Value> args[] = {
       v8::Integer::New(isolate, warm_up_iterations)
     };
-    function->Call(env->Global(), ARRAY_SIZE(args), args);
+    function->Call(env->Global(), arraysize(args), args);
     callbacks.set_warming_up(false);
   }
 
   int32_t repeat_count = 100;
   v8::Handle<v8::Value> args[] = { v8::Integer::New(isolate, repeat_count) };
   v8::CpuProfile* profile =
-      RunProfiler(env.local(), function, args, ARRAY_SIZE(args), 100);
+      RunProfiler(env.local(), function, args, arraysize(args), 100);
 
   const v8::CpuProfileNode* root = profile->GetTopDownRoot();
   GetChild(isolate, root, "start");
@@ -1024,23 +1023,20 @@
 }
 
 
-static const char* bound_function_test_source = "function foo(iterations) {\n"
-"  var r = 0;\n"
-"  for (var i = 0; i < iterations; i++) { r += i; }\n"
-"  return r;\n"
-"}\n"
-"function start(duration) {\n"
-"  var callback = foo.bind(this);\n"
-"  var start = Date.now();\n"
-"  while (Date.now() - start < duration) {\n"
-"    callback(10 * 1000);\n"
-"  }\n"
-"}";
+static const char* bound_function_test_source =
+    "function foo() {\n"
+    "  startProfiling('my_profile');\n"
+    "}\n"
+    "function start() {\n"
+    "  var callback = foo.bind(this);\n"
+    "  callback();\n"
+    "}";
 
 
 TEST(BoundFunctionCall) {
-  LocalContext env;
-  v8::HandleScope scope(env->GetIsolate());
+  v8::HandleScope scope(CcTest::isolate());
+  v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
+  v8::Context::Scope context_scope(env);
 
   v8::Script::Compile(
       v8::String::NewFromUtf8(env->GetIsolate(), bound_function_test_source))
@@ -1048,12 +1044,7 @@
   v8::Local<v8::Function> function = v8::Local<v8::Function>::Cast(
       env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "start")));
 
-  int32_t duration_ms = 100;
-  v8::Handle<v8::Value> args[] = {
-    v8::Integer::New(env->GetIsolate(), duration_ms)
-  };
-  v8::CpuProfile* profile =
-      RunProfiler(env.local(), function, args, ARRAY_SIZE(args), 100);
+  v8::CpuProfile* profile = RunProfiler(env, function, NULL, 0, 0);
 
   const v8::CpuProfileNode* root = profile->GetTopDownRoot();
   ScopedVector<v8::Handle<v8::String> > names(3);
@@ -1114,7 +1105,7 @@
     v8::Integer::New(env->GetIsolate(), duration_ms)
   };
   v8::CpuProfile* profile =
-      RunProfiler(env.local(), function, args, ARRAY_SIZE(args), 100);
+      RunProfiler(env.local(), function, args, arraysize(args), 100);
 
   const v8::CpuProfileNode* root = profile->GetTopDownRoot();
   {
@@ -1197,7 +1188,7 @@
   };
 
   v8::CpuProfile* profile =
-      RunProfiler(env.local(), function, args, ARRAY_SIZE(args), 100);
+      RunProfiler(env.local(), function, args, arraysize(args), 100);
 
   const v8::CpuProfileNode* root = profile->GetTopDownRoot();
   {
@@ -1248,33 +1239,89 @@
 }
 
 
-static const char* js_native_js_test_source =
-"var is_profiling = false;\n"
-"function foo(iterations) {\n"
-"  if (!is_profiling) {\n"
-"    is_profiling = true;\n"
+static const char* cpu_profiler_deep_stack_test_source =
+"function foo(n) {\n"
+"  if (n)\n"
+"    foo(n - 1);\n"
+"  else\n"
 "    startProfiling('my_profile');\n"
-"  }\n"
-"  var r = 0;\n"
-"  for (var i = 0; i < iterations; i++) { r += i; }\n"
-"  return r;\n"
 "}\n"
-"function bar(iterations) {\n"
-"  try { foo(iterations); } catch(e) {}\n"
-"}\n"
-"function start(duration) {\n"
-"  var start = Date.now();\n"
-"  while (Date.now() - start < duration) {\n"
-"    try {\n"
-"      CallJsFunction(bar, 10 * 1000);\n"
-"    } catch(e) {}\n"
-"  }\n"
-"}";
+"function start() {\n"
+"  foo(250);\n"
+"}\n";
+
+
+// Check a deep stack
+//
+// [Top down]:
+//    0  (root) 0 #1
+//    2    (program) 0 #2
+//    0    start 21 #3 no reason
+//    0      foo 21 #4 no reason
+//    0        foo 21 #5 no reason
+//                ....
+//    0          foo 21 #253 no reason
+//    1            startProfiling 0 #254
+TEST(CpuProfileDeepStack) {
+  v8::HandleScope scope(CcTest::isolate());
+  v8::Local<v8::Context> env = CcTest::NewContext(PROFILER_EXTENSION);
+  v8::Context::Scope context_scope(env);
+
+  v8::Script::Compile(v8::String::NewFromUtf8(
+      env->GetIsolate(), cpu_profiler_deep_stack_test_source))->Run();
+  v8::Local<v8::Function> function = v8::Local<v8::Function>::Cast(
+      env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "start")));
+
+  v8::CpuProfiler* cpu_profiler = env->GetIsolate()->GetCpuProfiler();
+  v8::Local<v8::String> profile_name =
+      v8::String::NewFromUtf8(env->GetIsolate(), "my_profile");
+  function->Call(env->Global(), 0, NULL);
+  v8::CpuProfile* profile = cpu_profiler->StopProfiling(profile_name);
+  CHECK_NE(NULL, profile);
+  // Dump collected profile to have a better diagnostic in case of failure.
+  reinterpret_cast<i::CpuProfile*>(profile)->Print();
+
+  const v8::CpuProfileNode* root = profile->GetTopDownRoot();
+  {
+    ScopedVector<v8::Handle<v8::String> > names(3);
+    names[0] = v8::String::NewFromUtf8(
+        env->GetIsolate(), ProfileGenerator::kGarbageCollectorEntryName);
+    names[1] = v8::String::NewFromUtf8(env->GetIsolate(),
+                                       ProfileGenerator::kProgramEntryName);
+    names[2] = v8::String::NewFromUtf8(env->GetIsolate(), "start");
+    CheckChildrenNames(root, names);
+  }
+
+  const v8::CpuProfileNode* node =
+      GetChild(env->GetIsolate(), root, "start");
+  for (int i = 0; i < 250; ++i) {
+    node = GetChild(env->GetIsolate(), node, "foo");
+  }
+  // TODO(alph):
+  // In theory there must be one more 'foo' and a 'startProfiling' nodes,
+  // but due to unstable top frame extraction these might be missing.
+
+  profile->Delete();
+}
+
+
+static const char* js_native_js_test_source =
+    "function foo() {\n"
+    "  startProfiling('my_profile');\n"
+    "}\n"
+    "function bar() {\n"
+    "  try { foo(); } catch(e) {}\n"
+    "}\n"
+    "function start() {\n"
+    "  try {\n"
+    "    CallJsFunction(bar);\n"
+    "  } catch(e) {}\n"
+    "}";
 
 static void CallJsFunction(const v8::FunctionCallbackInfo<v8::Value>& info) {
   v8::Handle<v8::Function> function = info[0].As<v8::Function>();
   v8::Handle<v8::Value> argv[] = { info[1] };
-  function->Call(info.This(), ARRAY_SIZE(argv), argv);
+  function->Call(info.This(), arraysize(argv), argv);
 }
 
 
@@ -1302,12 +1349,7 @@
   v8::Local<v8::Function> function = v8::Local<v8::Function>::Cast(
       env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "start")));
 
-  int32_t duration_ms = 20;
-  v8::Handle<v8::Value> args[] = {
-    v8::Integer::New(env->GetIsolate(), duration_ms)
-  };
-  v8::CpuProfile* profile =
-      RunProfiler(env, function, args, ARRAY_SIZE(args), 10);
+  v8::CpuProfile* profile = RunProfiler(env, function, NULL, 0, 0);
 
   const v8::CpuProfileNode* root = profile->GetTopDownRoot();
   {
@@ -1338,28 +1380,18 @@
 
 
 static const char* js_native_js_runtime_js_test_source =
-"var is_profiling = false;\n"
-"function foo(iterations) {\n"
-"  if (!is_profiling) {\n"
-"    is_profiling = true;\n"
-"    startProfiling('my_profile');\n"
-"  }\n"
-"  var r = 0;\n"
-"  for (var i = 0; i < iterations; i++) { r += i; }\n"
-"  return r;\n"
-"}\n"
-"var bound = foo.bind(this);\n"
-"function bar(iterations) {\n"
-"  try { bound(iterations); } catch(e) {}\n"
-"}\n"
-"function start(duration) {\n"
-"  var start = Date.now();\n"
-"  while (Date.now() - start < duration) {\n"
-"    try {\n"
-"      CallJsFunction(bar, 10 * 1000);\n"
-"    } catch(e) {}\n"
-"  }\n"
-"}";
+    "function foo() {\n"
+    "  startProfiling('my_profile');\n"
+    "}\n"
+    "var bound = foo.bind(this);\n"
+    "function bar() {\n"
+    "  try { bound(); } catch(e) {}\n"
+    "}\n"
+    "function start() {\n"
+    "  try {\n"
+    "    CallJsFunction(bar);\n"
+    "  } catch(e) {}\n"
+    "}";
 
 
 // [Top down]:
@@ -1387,12 +1419,7 @@
   v8::Local<v8::Function> function = v8::Local<v8::Function>::Cast(
       env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "start")));
 
-  int32_t duration_ms = 20;
-  v8::Handle<v8::Value> args[] = {
-    v8::Integer::New(env->GetIsolate(), duration_ms)
-  };
-  v8::CpuProfile* profile =
-      RunProfiler(env, function, args, ARRAY_SIZE(args), 10);
+  v8::CpuProfile* profile = RunProfiler(env, function, NULL, 0, 0);
 
   const v8::CpuProfileNode* root = profile->GetTopDownRoot();
   ScopedVector<v8::Handle<v8::String> > names(3);
@@ -1425,32 +1452,25 @@
 
 
 static void CallJsFunction2(const v8::FunctionCallbackInfo<v8::Value>& info) {
+  v8::base::OS::Print("In CallJsFunction2\n");
   CallJsFunction(info);
 }
 
 
 static const char* js_native1_js_native2_js_test_source =
-"var is_profiling = false;\n"
-"function foo(iterations) {\n"
-"  if (!is_profiling) {\n"
-"    is_profiling = true;\n"
-"    startProfiling('my_profile');\n"
-"  }\n"
-"  var r = 0;\n"
-"  for (var i = 0; i < iterations; i++) { r += i; }\n"
-"  return r;\n"
-"}\n"
-"function bar(iterations) {\n"
-"  CallJsFunction2(foo, iterations);\n"
-"}\n"
-"function start(duration) {\n"
-"  var start = Date.now();\n"
-"  while (Date.now() - start < duration) {\n"
-"    try {\n"
-"      CallJsFunction1(bar, 10 * 1000);\n"
-"    } catch(e) {}\n"
-"  }\n"
-"}";
+    "function foo() {\n"
+    "  try {\n"
+    "    startProfiling('my_profile');\n"
+    "  } catch(e) {}\n"
+    "}\n"
+    "function bar() {\n"
+    "  CallJsFunction2(foo);\n"
+    "}\n"
+    "function start() {\n"
+    "  try {\n"
+    "    CallJsFunction1(bar);\n"
+    "  } catch(e) {}\n"
+    "}";
 
 
 // [Top down]:
@@ -1485,12 +1505,7 @@
   v8::Local<v8::Function> function = v8::Local<v8::Function>::Cast(
       env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "start")));
 
-  int32_t duration_ms = 20;
-  v8::Handle<v8::Value> args[] = {
-    v8::Integer::New(env->GetIsolate(), duration_ms)
-  };
-  v8::CpuProfile* profile =
-      RunProfiler(env, function, args, ARRAY_SIZE(args), 10);
+  v8::CpuProfile* profile = RunProfiler(env, function, NULL, 0, 0);
 
   const v8::CpuProfileNode* root = profile->GetTopDownRoot();
   ScopedVector<v8::Handle<v8::String> > names(3);
@@ -1614,15 +1629,13 @@
       const_cast<v8::CpuProfileNode*>(current))->Print(0);
   // The tree should look like this:
   //  0   (root) 0 #1
-  //  0    (anonymous function) 19 #2 no reason script_b:1
+  //  0    "" 19 #2 no reason script_b:1
   //  0      baz 19 #3 TryCatchStatement script_b:3
   //  0        foo 18 #4 TryCatchStatement script_a:2
   //  1          bar 18 #5 no reason script_a:3
   const v8::CpuProfileNode* root = profile->GetTopDownRoot();
-  const v8::CpuProfileNode* script = GetChild(env->GetIsolate(), root,
-      ProfileGenerator::kAnonymousFunctionName);
-  CheckFunctionDetails(env->GetIsolate(), script,
-                       ProfileGenerator::kAnonymousFunctionName, "script_b",
+  const v8::CpuProfileNode* script = GetChild(env->GetIsolate(), root, "");
+  CheckFunctionDetails(env->GetIsolate(), script, "", "script_b",
                        script_b->GetUnboundScript()->GetId(), 1, 1);
   const v8::CpuProfileNode* baz = GetChild(env->GetIsolate(), script, "baz");
   CheckFunctionDetails(env->GetIsolate(), baz, "baz", "script_b",
diff --git a/test/cctest/test-cpu-x64.cc b/test/cctest/test-cpu-x64.cc
deleted file mode 100644
index 7f9fafa..0000000
--- a/test/cctest/test-cpu-x64.cc
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "src/v8.h"
-
-#include "src/cpu.h"
-#include "test/cctest/cctest.h"
-
-using namespace v8::internal;
-
-
-TEST(RequiredFeaturesX64) {
-  // Test for the features required by every x64 CPU.
-  CPU cpu;
-  CHECK(cpu.has_fpu());
-  CHECK(cpu.has_cmov());
-  CHECK(cpu.has_mmx());
-  CHECK(cpu.has_sse());
-  CHECK(cpu.has_sse2());
-}
diff --git a/test/cctest/test-cpu-x87.cc b/test/cctest/test-cpu-x87.cc
deleted file mode 100644
index f1e07cf..0000000
--- a/test/cctest/test-cpu-x87.cc
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "src/v8.h"
-
-#include "src/cpu.h"
-#include "test/cctest/cctest.h"
-
-using namespace v8::internal;
-
-
-TEST(RequiredFeaturesX64) {
-  // Test for the features required by every x86 CPU in compat/legacy mode.
-  CPU cpu;
-  CHECK(cpu.has_sahf());
-}
diff --git a/test/cctest/test-cpu.cc b/test/cctest/test-cpu.cc
deleted file mode 100644
index 416213a..0000000
--- a/test/cctest/test-cpu.cc
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "src/v8.h"
-
-#include "src/cpu.h"
-#include "test/cctest/cctest.h"
-
-using namespace v8::internal;
-
-
-TEST(FeatureImplications) {
-  // Test for features implied by other features.
-  CPU cpu;
-
-  // ia32 and x64 features
-  CHECK(!cpu.has_sse() || cpu.has_mmx());
-  CHECK(!cpu.has_sse2() || cpu.has_sse());
-  CHECK(!cpu.has_sse3() || cpu.has_sse2());
-  CHECK(!cpu.has_ssse3() || cpu.has_sse3());
-  CHECK(!cpu.has_sse41() || cpu.has_sse3());
-  CHECK(!cpu.has_sse42() || cpu.has_sse41());
-
-  // arm features
-  CHECK(!cpu.has_vfp3_d32() || cpu.has_vfp3());
-}
-
-
-TEST(NumberOfProcessorsOnline) {
-  CHECK_GT(OS::NumberOfProcessorsOnline(), 0);
-}
diff --git a/test/cctest/test-dataflow.cc b/test/cctest/test-dataflow.cc
index fc1a7fa..43d950d 100644
--- a/test/cctest/test-dataflow.cc
+++ b/test/cctest/test-dataflow.cc
@@ -35,7 +35,6 @@
 using namespace v8::internal;
 
 TEST(BitVector) {
-  v8::internal::V8::Initialize(NULL);
   Zone zone(CcTest::i_isolate());
   {
     BitVector v(15, &zone);
diff --git a/test/cctest/test-date.cc b/test/cctest/test-date.cc
index f218795..2f722c2 100644
--- a/test/cctest/test-date.cc
+++ b/test/cctest/test-date.cc
@@ -132,7 +132,7 @@
   int local_offset_ms = -36000000;  // -10 hours.
 
   DateCacheMock* date_cache =
-    new DateCacheMock(local_offset_ms, rules, ARRAY_SIZE(rules));
+    new DateCacheMock(local_offset_ms, rules, arraysize(rules));
 
   reinterpret_cast<Isolate*>(isolate)->set_date_cache(date_cache);
 
diff --git a/test/cctest/test-debug.cc b/test/cctest/test-debug.cc
index d2cf0e0..2f0674a 100644
--- a/test/cctest/test-debug.cc
+++ b/test/cctest/test-debug.cc
@@ -30,24 +30,23 @@
 #include "src/v8.h"
 
 #include "src/api.h"
+#include "src/base/platform/condition-variable.h"
+#include "src/base/platform/platform.h"
 #include "src/compilation-cache.h"
 #include "src/debug.h"
 #include "src/deoptimizer.h"
 #include "src/frames.h"
-#include "src/platform.h"
-#include "src/platform/condition-variable.h"
-#include "src/stub-cache.h"
 #include "src/utils.h"
 #include "test/cctest/cctest.h"
 
 
-using ::v8::internal::Mutex;
-using ::v8::internal::LockGuard;
-using ::v8::internal::ConditionVariable;
-using ::v8::internal::Semaphore;
+using ::v8::base::Mutex;
+using ::v8::base::LockGuard;
+using ::v8::base::ConditionVariable;
+using ::v8::base::OS;
+using ::v8::base::Semaphore;
 using ::v8::internal::EmbeddedVector;
 using ::v8::internal::Object;
-using ::v8::internal::OS;
 using ::v8::internal::Handle;
 using ::v8::internal::Heap;
 using ::v8::internal::JSGlobalProxy;
@@ -74,16 +73,23 @@
 class DebugLocalContext {
  public:
   inline DebugLocalContext(
+      v8::Isolate* isolate, v8::ExtensionConfiguration* extensions = 0,
+      v8::Handle<v8::ObjectTemplate> global_template =
+          v8::Handle<v8::ObjectTemplate>(),
+      v8::Handle<v8::Value> global_object = v8::Handle<v8::Value>())
+      : scope_(isolate),
+        context_(v8::Context::New(isolate, extensions, global_template,
+                                  global_object)) {
+    context_->Enter();
+  }
+  inline DebugLocalContext(
       v8::ExtensionConfiguration* extensions = 0,
       v8::Handle<v8::ObjectTemplate> global_template =
           v8::Handle<v8::ObjectTemplate>(),
       v8::Handle<v8::Value> global_object = v8::Handle<v8::Value>())
       : scope_(CcTest::isolate()),
-        context_(
-          v8::Context::New(CcTest::isolate(),
-                           extensions,
-                           global_template,
-                           global_object)) {
+        context_(v8::Context::New(CcTest::isolate(), extensions,
+                                  global_template, global_object)) {
     context_->Enter();
   }
   inline ~DebugLocalContext() {
@@ -108,11 +114,9 @@
     Handle<JSGlobalProxy> global(Handle<JSGlobalProxy>::cast(
         v8::Utils::OpenHandle(*context_->Global())));
     Handle<v8::internal::String> debug_string =
-        factory->InternalizeOneByteString(STATIC_ASCII_VECTOR("debug"));
-    v8::internal::Runtime::SetObjectProperty(isolate, global, debug_string,
-        Handle<Object>(debug_context->global_proxy(), isolate),
-        DONT_ENUM,
-        ::v8::internal::SLOPPY).Check();
+        factory->InternalizeOneByteString(STATIC_CHAR_VECTOR("debug"));
+    v8::internal::Runtime::DefineObjectProperty(global, debug_string,
+        handle(debug_context->global_proxy(), isolate), DONT_ENUM).Check();
   }
 
  private:
@@ -140,8 +144,7 @@
                                                const char* source,
                                                const char* function_name) {
   v8::Script::Compile(v8::String::NewFromUtf8(isolate, source))->Run();
-  v8::Local<v8::Object> global =
-      CcTest::isolate()->GetCurrentContext()->Global();
+  v8::Local<v8::Object> global = isolate->GetCurrentContext()->Global();
   return v8::Local<v8::Function>::Cast(
       global->Get(v8::String::NewFromUtf8(isolate, function_name)));
 }
@@ -672,6 +675,8 @@
 int exception_hit_count = 0;
 int uncaught_exception_hit_count = 0;
 int last_js_stack_height = -1;
+v8::Handle<v8::Function> debug_event_listener_callback;
+int debug_event_listener_callback_result;
 
 static void DebugEventCounterClear() {
   break_point_hit_count = 0;
@@ -712,9 +717,17 @@
     static const int kArgc = 1;
     v8::Handle<v8::Value> argv[kArgc] = { exec_state };
     // Using exec_state as receiver is just to have a receiver.
-    v8::Handle<v8::Value> result =  frame_count->Call(exec_state, kArgc, argv);
+    v8::Handle<v8::Value> result = frame_count->Call(exec_state, kArgc, argv);
     last_js_stack_height = result->Int32Value();
   }
+
+  // Run callback from DebugEventListener and check the result.
+  if (!debug_event_listener_callback.IsEmpty()) {
+    v8::Handle<v8::Value> result =
+        debug_event_listener_callback->Call(event_data, 0, NULL);
+    CHECK(!result.IsEmpty());
+    CHECK_EQ(debug_event_listener_callback_result, result->Int32Value());
+  }
 }
 
 
@@ -752,6 +765,7 @@
   CHECK_NE(debug->break_id(), 0);
 
   if (event == v8::Break) {
+    break_point_hit_count++;
     for (int i = 0; checks[i].expr != NULL; i++) {
       const int argc = 3;
       v8::Handle<v8::Value> argv[argc] = {
@@ -2393,7 +2407,7 @@
   };
 
   // Simple test function. The "y=0" is in the function foo to provide a break
-  // location. For "y=0" the "y" is at position 15 in the barbar function
+  // location. For "y=0" the "y" is at position 15 in the foo function
   // therefore setting breakpoint at position 15 will break at "y=0" and
   // setting it higher will break after.
   v8::Local<v8::Function> foo = CompileFunction(&env,
@@ -2426,6 +2440,34 @@
   checks = checks_hh;
   foo->Call(env->Global(), 1, argv_foo);
 
+  // Test that overriding Object.prototype will not interfere into evaluation
+  // on call frame.
+  v8::Local<v8::Function> zoo =
+      CompileFunction(&env,
+                      "x = undefined;"
+                      "function zoo(t) {"
+                      "  var a=x;"
+                      "  Object.prototype.x = 42;"
+                      "  x=t;"
+                      "  y=0;"  // To ensure break location.
+                      "  delete Object.prototype.x;"
+                      "  x=a;"
+                      "}",
+                      "zoo");
+  const int zoo_break_position = 50;
+
+  // Arguments with one parameter "Hello, world!"
+  v8::Handle<v8::Value> argv_zoo[1] = {
+      v8::String::NewFromUtf8(env->GetIsolate(), "Hello, world!")};
+
+  // Call zoo with breakpoint set at y=0.
+  DebugEventCounterClear();
+  bp = SetBreakPoint(zoo, zoo_break_position);
+  checks = checks_hu;
+  zoo->Call(env->Global(), 1, argv_zoo);
+  CHECK_EQ(1, break_point_hit_count);
+  ClearBreakPoint(bp);
+
   // Test function with an inner function. The "y=0" is in function barbar
   // to provide a break location. For "y=0" the "y" is at position 8 in the
   // barbar function therefore setting breakpoint at position 8 will break at
@@ -3193,19 +3235,26 @@
   v8::Local<v8::Function> foo = CompileFunction(&env, src, "foo");
   SetBreakPoint(foo, 8);  // "var a = 0;"
 
+  // Looping 0 times.  We still should break at the while-condition once.
+  step_action = StepIn;
+  break_point_hit_count = 0;
+  v8::Handle<v8::Value> argv_0[argc] = { v8::Number::New(isolate, 0) };
+  foo->Call(env->Global(), argc, argv_0);
+  CHECK_EQ(3, break_point_hit_count);
+
   // Looping 10 times.
   step_action = StepIn;
   break_point_hit_count = 0;
   v8::Handle<v8::Value> argv_10[argc] = { v8::Number::New(isolate, 10) };
   foo->Call(env->Global(), argc, argv_10);
-  CHECK_EQ(22, break_point_hit_count);
+  CHECK_EQ(23, break_point_hit_count);
 
   // Looping 100 times.
   step_action = StepIn;
   break_point_hit_count = 0;
   v8::Handle<v8::Value> argv_100[argc] = { v8::Number::New(isolate, 100) };
   foo->Call(env->Global(), argc, argv_100);
-  CHECK_EQ(202, break_point_hit_count);
+  CHECK_EQ(203, break_point_hit_count);
 
   // Get rid of the debug event listener.
   v8::Debug::SetDebugEventListener(NULL);
@@ -3963,6 +4012,43 @@
 }
 
 
+TEST(EvalJSInDebugEventListenerOnNativeReThrownException) {
+  DebugLocalContext env;
+  v8::HandleScope scope(env->GetIsolate());
+  env.ExposeDebug();
+
+  // Create functions for testing break on exception.
+  v8::Local<v8::Function> noThrowJS = CompileFunction(
+      &env, "function noThrowJS(){var a=[1]; a.push(2); return a.length;}",
+      "noThrowJS");
+
+  debug_event_listener_callback = noThrowJS;
+  debug_event_listener_callback_result = 2;
+
+  v8::V8::AddMessageListener(MessageCallbackCount);
+  v8::Debug::SetDebugEventListener(DebugEventCounter);
+  // Break on uncaught exception
+  ChangeBreakOnException(false, true);
+  DebugEventCounterClear();
+  MessageCallbackCountClear();
+
+  // ReThrow native error
+  {
+    v8::TryCatch tryCatch;
+    env->GetIsolate()->ThrowException(v8::Exception::TypeError(
+        v8::String::NewFromUtf8(env->GetIsolate(), "Type error")));
+    CHECK(tryCatch.HasCaught());
+    tryCatch.ReThrow();
+  }
+  CHECK_EQ(1, exception_hit_count);
+  CHECK_EQ(1, uncaught_exception_hit_count);
+  CHECK_EQ(0, message_callback_count);  // FIXME: Should it be 1 ?
+  CHECK(!debug_event_listener_callback.IsEmpty());
+
+  debug_event_listener_callback.Clear();
+}
+
+
 // Test break on exception from compiler errors. When compiling using
 // v8::Script::Compile there is no JavaScript stack whereas when compiling using
 // eval there are JavaScript frames.
@@ -4146,10 +4232,11 @@
 
   // Set the debug break flag.
   v8::Debug::DebugBreak(env->GetIsolate());
+  CHECK(v8::Debug::CheckDebugBreak(env->GetIsolate()));
 
   // Call all functions with different argument count.
   break_point_hit_count = 0;
-  for (unsigned int i = 0; i < ARRAY_SIZE(argv); i++) {
+  for (unsigned int i = 0; i < arraysize(argv); i++) {
     f0->Call(env->Global(), i, argv);
     f1->Call(env->Global(), i, argv);
     f2->Call(env->Global(), i, argv);
@@ -4157,7 +4244,7 @@
   }
 
   // One break for each function called.
-  CHECK_EQ(4 * ARRAY_SIZE(argv), break_point_hit_count);
+  CHECK_EQ(4 * arraysize(argv), break_point_hit_count);
 
   // Get rid of the debug event listener.
   v8::Debug::SetDebugEventListener(NULL);
@@ -4178,6 +4265,12 @@
   const char* src = "function f() {g()};function g(){i=0; while(i<10){i++}}";
   v8::Local<v8::Function> f = CompileFunction(&env, src, "f");
 
+  // Set, test and cancel debug break.
+  v8::Debug::DebugBreak(env->GetIsolate());
+  CHECK(v8::Debug::CheckDebugBreak(env->GetIsolate()));
+  v8::Debug::CancelDebugBreak(env->GetIsolate());
+  CHECK(!v8::Debug::CheckDebugBreak(env->GetIsolate()));
+
   // Set the debug break flag.
   v8::Debug::DebugBreak(env->GetIsolate());
 
@@ -4372,10 +4465,6 @@
              "named_values[%d] instanceof debug.PropertyMirror", i);
     CHECK(CompileRun(buffer.start())->BooleanValue());
 
-    SNPrintF(buffer, "named_values[%d].propertyType()", i);
-    CHECK_EQ(v8::internal::INTERCEPTOR,
-             CompileRun(buffer.start())->Int32Value());
-
     SNPrintF(buffer, "named_values[%d].isNative()", i);
     CHECK(CompileRun(buffer.start())->BooleanValue());
   }
@@ -4691,7 +4780,7 @@
 // The Wait() call blocks a thread until it is called for the Nth time, then all
 // calls return.  Each ThreadBarrier object can only be used once.
 template <int N>
-class ThreadBarrier V8_FINAL {
+class ThreadBarrier FINAL {
  public:
   ThreadBarrier() : num_blocked_(0) {}
 
@@ -4739,8 +4828,8 @@
   ThreadBarrier<2> barrier_3;
   ThreadBarrier<2> barrier_4;
   ThreadBarrier<2> barrier_5;
-  v8::internal::Semaphore semaphore_1;
-  v8::internal::Semaphore semaphore_2;
+  v8::base::Semaphore semaphore_1;
+  v8::base::Semaphore semaphore_2;
 };
 
 
@@ -4840,10 +4929,10 @@
 
 // This is the debugger thread, that executes no v8 calls except
 // placing JSON debugger commands in the queue.
-class MessageQueueDebuggerThread : public v8::internal::Thread {
+class MessageQueueDebuggerThread : public v8::base::Thread {
  public:
   MessageQueueDebuggerThread()
-      : Thread("MessageQueueDebuggerThread") { }
+      : Thread(Options("MessageQueueDebuggerThread")) {}
   void Run();
 };
 
@@ -5097,16 +5186,24 @@
 
 Barriers threaded_debugging_barriers;
 
-class V8Thread : public v8::internal::Thread {
+class V8Thread : public v8::base::Thread {
  public:
-  V8Thread() : Thread("V8Thread") { }
+  V8Thread() : Thread(Options("V8Thread")) {}
   void Run();
+  v8::Isolate* isolate() { return isolate_; }
+
+ private:
+  v8::Isolate* isolate_;
 };
 
-class DebuggerThread : public v8::internal::Thread {
+class DebuggerThread : public v8::base::Thread {
  public:
-  DebuggerThread() : Thread("DebuggerThread") { }
+  explicit DebuggerThread(v8::Isolate* isolate)
+      : Thread(Options("DebuggerThread")), isolate_(isolate) {}
   void Run();
+
+ private:
+  v8::Isolate* isolate_;
 };
 
 
@@ -5123,10 +5220,7 @@
   if (IsBreakEventMessage(print_buffer)) {
     // Check that we are inside the while loop.
     int source_line = GetSourceLineFromBreakEventMessage(print_buffer);
-    // TODO(2047): This should really be 8 <= source_line <= 13; but we
-    // currently have an off-by-one error when calculating the source
-    // position corresponding to the program counter at the debug break.
-    CHECK(7 <= source_line && source_line <= 13);
+    CHECK(8 <= source_line && source_line <= 13);
     threaded_debugging_barriers.barrier_2.Wait();
   }
 }
@@ -5152,22 +5246,25 @@
       "\n"
       "foo();\n";
 
-  v8::Isolate* isolate = CcTest::isolate();
-  v8::Isolate::Scope isolate_scope(isolate);
-  DebugLocalContext env;
-  v8::HandleScope scope(env->GetIsolate());
-  v8::Debug::SetMessageHandler(&ThreadedMessageHandler);
-  v8::Handle<v8::ObjectTemplate> global_template =
-      v8::ObjectTemplate::New(env->GetIsolate());
-  global_template->Set(
-      v8::String::NewFromUtf8(env->GetIsolate(), "ThreadedAtBarrier1"),
-      v8::FunctionTemplate::New(isolate, ThreadedAtBarrier1));
-  v8::Handle<v8::Context> context = v8::Context::New(isolate,
-                                                     NULL,
-                                                     global_template);
-  v8::Context::Scope context_scope(context);
+  isolate_ = v8::Isolate::New();
+  threaded_debugging_barriers.barrier_3.Wait();
+  {
+    v8::Isolate::Scope isolate_scope(isolate_);
+    DebugLocalContext env(isolate_);
+    v8::HandleScope scope(isolate_);
+    v8::Debug::SetMessageHandler(&ThreadedMessageHandler);
+    v8::Handle<v8::ObjectTemplate> global_template =
+        v8::ObjectTemplate::New(env->GetIsolate());
+    global_template->Set(
+        v8::String::NewFromUtf8(env->GetIsolate(), "ThreadedAtBarrier1"),
+        v8::FunctionTemplate::New(isolate_, ThreadedAtBarrier1));
+    v8::Handle<v8::Context> context =
+        v8::Context::New(isolate_, NULL, global_template);
+    v8::Context::Scope context_scope(context);
 
-  CompileRun(source);
+    CompileRun(source);
+  }
+  isolate_->Dispose();
 }
 
 
@@ -5183,21 +5280,21 @@
       "\"type\":\"request\","
       "\"command\":\"continue\"}";
 
-  v8::Isolate* isolate = CcTest::isolate();
   threaded_debugging_barriers.barrier_1.Wait();
-  v8::Debug::DebugBreak(isolate);
+  v8::Debug::DebugBreak(isolate_);
   threaded_debugging_barriers.barrier_2.Wait();
-  v8::Debug::SendCommand(isolate, buffer, AsciiToUtf16(command_1, buffer));
-  v8::Debug::SendCommand(isolate, buffer, AsciiToUtf16(command_2, buffer));
+  v8::Debug::SendCommand(isolate_, buffer, AsciiToUtf16(command_1, buffer));
+  v8::Debug::SendCommand(isolate_, buffer, AsciiToUtf16(command_2, buffer));
 }
 
 
 TEST(ThreadedDebugging) {
-  DebuggerThread debugger_thread;
   V8Thread v8_thread;
 
   // Create a V8 environment
   v8_thread.Start();
+  threaded_debugging_barriers.barrier_3.Wait();
+  DebuggerThread debugger_thread(v8_thread.isolate());
   debugger_thread.Start();
 
   v8_thread.Join();
@@ -5212,21 +5309,28 @@
  * breakpoint is hit when enabled, and missed when disabled.
  */
 
-class BreakpointsV8Thread : public v8::internal::Thread {
+class BreakpointsV8Thread : public v8::base::Thread {
  public:
-  BreakpointsV8Thread() : Thread("BreakpointsV8Thread") { }
+  BreakpointsV8Thread() : Thread(Options("BreakpointsV8Thread")) {}
   void Run();
+
+  v8::Isolate* isolate() { return isolate_; }
+
+ private:
+  v8::Isolate* isolate_;
 };
 
-class BreakpointsDebuggerThread : public v8::internal::Thread {
+class BreakpointsDebuggerThread : public v8::base::Thread {
  public:
-  explicit BreakpointsDebuggerThread(bool global_evaluate)
-      : Thread("BreakpointsDebuggerThread"),
-        global_evaluate_(global_evaluate) {}
+  BreakpointsDebuggerThread(bool global_evaluate, v8::Isolate* isolate)
+      : Thread(Options("BreakpointsDebuggerThread")),
+        global_evaluate_(global_evaluate),
+        isolate_(isolate) {}
   void Run();
 
  private:
   bool global_evaluate_;
+  v8::Isolate* isolate_;
 };
 
 
@@ -5271,16 +5375,20 @@
   const char* source_2 = "cat(17);\n"
     "cat(19);\n";
 
-  v8::Isolate* isolate = CcTest::isolate();
-  v8::Isolate::Scope isolate_scope(isolate);
-  DebugLocalContext env;
-  v8::HandleScope scope(isolate);
-  v8::Debug::SetMessageHandler(&BreakpointsMessageHandler);
+  isolate_ = v8::Isolate::New();
+  breakpoints_barriers->barrier_3.Wait();
+  {
+    v8::Isolate::Scope isolate_scope(isolate_);
+    DebugLocalContext env(isolate_);
+    v8::HandleScope scope(isolate_);
+    v8::Debug::SetMessageHandler(&BreakpointsMessageHandler);
 
-  CompileRun(source_1);
-  breakpoints_barriers->barrier_1.Wait();
-  breakpoints_barriers->barrier_2.Wait();
-  CompileRun(source_2);
+    CompileRun(source_1);
+    breakpoints_barriers->barrier_1.Wait();
+    breakpoints_barriers->barrier_2.Wait();
+    CompileRun(source_2);
+  }
+  isolate_->Dispose();
 }
 
 
@@ -5346,14 +5454,12 @@
       "\"command\":\"continue\"}";
 
 
-  v8::Isolate* isolate = CcTest::isolate();
-  v8::Isolate::Scope isolate_scope(isolate);
   // v8 thread initializes, runs source_1
   breakpoints_barriers->barrier_1.Wait();
   // 1:Set breakpoint in cat() (will get id 1).
-  v8::Debug::SendCommand(isolate, buffer, AsciiToUtf16(command_1, buffer));
+  v8::Debug::SendCommand(isolate_, buffer, AsciiToUtf16(command_1, buffer));
   // 2:Set breakpoint in dog() (will get id 2).
-  v8::Debug::SendCommand(isolate, buffer, AsciiToUtf16(command_2, buffer));
+  v8::Debug::SendCommand(isolate_, buffer, AsciiToUtf16(command_2, buffer));
   breakpoints_barriers->barrier_2.Wait();
   // V8 thread starts compiling source_2.
   // Automatic break happens, to run queued commands
@@ -5365,43 +5471,42 @@
   // Must have hit breakpoint #1.
   CHECK_EQ(1, break_event_breakpoint_id);
   // 4:Evaluate dog() (which has a breakpoint).
-  v8::Debug::SendCommand(isolate, buffer, AsciiToUtf16(command_3, buffer));
+  v8::Debug::SendCommand(isolate_, buffer, AsciiToUtf16(command_3, buffer));
   // V8 thread hits breakpoint in dog().
   breakpoints_barriers->semaphore_1.Wait();  // wait for break event
   // Must have hit breakpoint #2.
   CHECK_EQ(2, break_event_breakpoint_id);
   // 5:Evaluate (x + 1).
-  v8::Debug::SendCommand(isolate, buffer, AsciiToUtf16(command_4, buffer));
+  v8::Debug::SendCommand(isolate_, buffer, AsciiToUtf16(command_4, buffer));
   // Evaluate (x + 1) finishes.
   breakpoints_barriers->semaphore_1.Wait();
   // Must have result 108.
   CHECK_EQ(108, evaluate_int_result);
   // 6:Continue evaluation of dog().
-  v8::Debug::SendCommand(isolate, buffer, AsciiToUtf16(command_5, buffer));
+  v8::Debug::SendCommand(isolate_, buffer, AsciiToUtf16(command_5, buffer));
   // Evaluate dog() finishes.
   breakpoints_barriers->semaphore_1.Wait();
   // Must have result 107.
   CHECK_EQ(107, evaluate_int_result);
   // 7:Continue evaluation of source_2, finish cat(17), hit breakpoint
   // in cat(19).
-  v8::Debug::SendCommand(isolate, buffer, AsciiToUtf16(command_6, buffer));
+  v8::Debug::SendCommand(isolate_, buffer, AsciiToUtf16(command_6, buffer));
   // Message callback gets break event.
   breakpoints_barriers->semaphore_1.Wait();  // wait for break event
   // Must have hit breakpoint #1.
   CHECK_EQ(1, break_event_breakpoint_id);
   // 8: Evaluate dog() with breaks disabled.
-  v8::Debug::SendCommand(isolate, buffer, AsciiToUtf16(command_7, buffer));
+  v8::Debug::SendCommand(isolate_, buffer, AsciiToUtf16(command_7, buffer));
   // Evaluate dog() finishes.
   breakpoints_barriers->semaphore_1.Wait();
   // Must have result 116.
   CHECK_EQ(116, evaluate_int_result);
   // 9: Continue evaluation of source2, reach end.
-  v8::Debug::SendCommand(isolate, buffer, AsciiToUtf16(command_8, buffer));
+  v8::Debug::SendCommand(isolate_, buffer, AsciiToUtf16(command_8, buffer));
 }
 
 
 void TestRecursiveBreakpointsGeneric(bool global_evaluate) {
-  BreakpointsDebuggerThread breakpoints_debugger_thread(global_evaluate);
   BreakpointsV8Thread breakpoints_v8_thread;
 
   // Create a V8 environment
@@ -5409,6 +5514,9 @@
   breakpoints_barriers = &stack_allocated_breakpoints_barriers;
 
   breakpoints_v8_thread.Start();
+  breakpoints_barriers->barrier_3.Wait();
+  BreakpointsDebuggerThread breakpoints_debugger_thread(
+      global_evaluate, breakpoints_v8_thread.isolate());
   breakpoints_debugger_thread.Start();
 
   breakpoints_v8_thread.Join();
@@ -6217,120 +6325,6 @@
 }
 
 
-// Debug event listener which counts the script collected events.
-int script_collected_count = 0;
-static void DebugEventScriptCollectedEvent(
-    const v8::Debug::EventDetails& event_details) {
-  v8::DebugEvent event = event_details.GetEvent();
-  // Count the number of breaks.
-  if (event == v8::ScriptCollected) {
-    script_collected_count++;
-  }
-}
-
-
-// Test that scripts collected are reported through the debug event listener.
-TEST(ScriptCollectedEvent) {
-  v8::internal::Debug* debug = CcTest::i_isolate()->debug();
-  break_point_hit_count = 0;
-  script_collected_count = 0;
-  DebugLocalContext env;
-  v8::HandleScope scope(env->GetIsolate());
-
-  // Request the loaded scripts to initialize the debugger script cache.
-  debug->GetLoadedScripts();
-
-  // Do garbage collection to ensure that only the script in this test will be
-  // collected afterwards.
-  CcTest::heap()->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
-
-  script_collected_count = 0;
-  v8::Debug::SetDebugEventListener(DebugEventScriptCollectedEvent);
-  {
-    v8::Script::Compile(
-        v8::String::NewFromUtf8(env->GetIsolate(), "eval('a=1')"))->Run();
-    v8::Script::Compile(
-        v8::String::NewFromUtf8(env->GetIsolate(), "eval('a=2')"))->Run();
-  }
-
-  // Do garbage collection to collect the script above which is no longer
-  // referenced.
-  CcTest::heap()->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
-
-  CHECK_EQ(2, script_collected_count);
-
-  v8::Debug::SetDebugEventListener(NULL);
-  CheckDebuggerUnloaded();
-}
-
-
-// Debug event listener which counts the script collected events.
-int script_collected_message_count = 0;
-static void ScriptCollectedMessageHandler(const v8::Debug::Message& message) {
-  // Count the number of scripts collected.
-  if (message.IsEvent() && message.GetEvent() == v8::ScriptCollected) {
-    script_collected_message_count++;
-    v8::Handle<v8::Context> context = message.GetEventContext();
-    CHECK(context.IsEmpty());
-  }
-}
-
-
-// Test that GetEventContext doesn't fail and return empty handle for
-// ScriptCollected events.
-TEST(ScriptCollectedEventContext) {
-  i::FLAG_stress_compaction = false;
-  v8::Isolate* isolate = CcTest::isolate();
-  v8::internal::Debug* debug =
-      reinterpret_cast<v8::internal::Isolate*>(isolate)->debug();
-  script_collected_message_count = 0;
-  v8::HandleScope scope(isolate);
-
-  v8::Persistent<v8::Context> context;
-  {
-    v8::HandleScope scope(isolate);
-    context.Reset(isolate, v8::Context::New(isolate));
-  }
-
-  // Enter context.  We can't have a handle to the context in the outer
-  // scope, so we have to do it the hard way.
-  {
-    v8::HandleScope scope(isolate);
-    v8::Local<v8::Context> local_context =
-        v8::Local<v8::Context>::New(isolate, context);
-    local_context->Enter();
-  }
-
-  // Request the loaded scripts to initialize the debugger script cache.
-  debug->GetLoadedScripts();
-
-  // Do garbage collection to ensure that only the script in this test will be
-  // collected afterwards.
-  CcTest::heap()->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
-
-  v8::Debug::SetMessageHandler(ScriptCollectedMessageHandler);
-  v8::Script::Compile(v8::String::NewFromUtf8(isolate, "eval('a=1')"))->Run();
-  v8::Script::Compile(v8::String::NewFromUtf8(isolate, "eval('a=2')"))->Run();
-
-  // Leave context
-  {
-    v8::HandleScope scope(isolate);
-    v8::Local<v8::Context> local_context =
-        v8::Local<v8::Context>::New(isolate, context);
-    local_context->Exit();
-  }
-  context.Reset();
-
-  // Do garbage collection to collect the script above which is no longer
-  // referenced.
-  CcTest::heap()->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
-
-  CHECK_EQ(2, script_collected_message_count);
-
-  v8::Debug::SetMessageHandler(NULL);
-}
-
-
 // Debug event listener which counts the after compile events.
 int after_compile_message_count = 0;
 static void AfterCompileMessageHandler(const v8::Debug::Message& message) {
@@ -6372,6 +6366,60 @@
 }
 
 
+// Syntax error event handler which counts a number of events.
+int compile_error_event_count = 0;
+
+static void CompileErrorEventCounterClear() {
+  compile_error_event_count = 0;
+}
+
+static void CompileErrorEventCounter(
+    const v8::Debug::EventDetails& event_details) {
+  v8::DebugEvent event = event_details.GetEvent();
+
+  if (event == v8::CompileError) {
+    compile_error_event_count++;
+  }
+}
+
+
+// Tests that syntax error event is sent as many times as there are scripts
+// with syntax error compiled.
+TEST(SyntaxErrorMessageOnSyntaxException) {
+  DebugLocalContext env;
+  v8::HandleScope scope(env->GetIsolate());
+
+  // For this test, we want to break on uncaught exceptions:
+  ChangeBreakOnException(false, true);
+
+  v8::Debug::SetDebugEventListener(CompileErrorEventCounter);
+
+  CompileErrorEventCounterClear();
+
+  // Check initial state.
+  CHECK_EQ(0, compile_error_event_count);
+
+  // Throws SyntaxError: Unexpected end of input
+  v8::Script::Compile(v8::String::NewFromUtf8(env->GetIsolate(), "+++"));
+  CHECK_EQ(1, compile_error_event_count);
+
+  v8::Script::Compile(
+    v8::String::NewFromUtf8(env->GetIsolate(), "/sel\\/: \\"));
+  CHECK_EQ(2, compile_error_event_count);
+
+  v8::Script::Compile(
+    v8::String::NewFromUtf8(env->GetIsolate(), "JSON.parse('1234:')"));
+  CHECK_EQ(2, compile_error_event_count);
+
+  v8::Script::Compile(
+    v8::String::NewFromUtf8(env->GetIsolate(), "new RegExp('/\\/\\\\');"));
+  CHECK_EQ(2, compile_error_event_count);
+
+  v8::Script::Compile(v8::String::NewFromUtf8(env->GetIsolate(), "throw 1;"));
+  CHECK_EQ(2, compile_error_event_count);
+}
+
+
 // Tests that break event is sent when message handler is reset.
 TEST(BreakMessageWhenMessageHandlerIsReset) {
   DebugLocalContext env;
@@ -6489,19 +6537,12 @@
   } else if (message.IsEvent() && message.GetEvent() == v8::AfterCompile) {
     i::HandleScope scope(isolate);
 
-    bool is_debug_break = isolate->stack_guard()->CheckDebugBreak();
-    // Force DebugBreak flag while serializer is working.
-    isolate->stack_guard()->RequestDebugBreak();
+    int current_count = break_point_hit_count;
 
     // Force serialization to trigger some internal JS execution.
     message.GetJSON();
 
-    // Restore previous state.
-    if (is_debug_break) {
-      isolate->stack_guard()->RequestDebugBreak();
-    } else {
-      isolate->stack_guard()->ClearDebugBreak();
-    }
+    CHECK_EQ(current_count, break_point_hit_count);
   }
 }
 
@@ -6587,16 +6628,23 @@
 }
 
 
-class SendCommandThread : public v8::internal::Thread {
+class SendCommandThread;
+static SendCommandThread* send_command_thread_ = NULL;
+
+
+class SendCommandThread : public v8::base::Thread {
  public:
   explicit SendCommandThread(v8::Isolate* isolate)
-      : Thread("SendCommandThread"),
+      : Thread(Options("SendCommandThread")),
         semaphore_(0),
-        isolate_(isolate) { }
+        isolate_(isolate) {}
 
-  static void ProcessDebugMessages(v8::Isolate* isolate, void* data) {
-    v8::Debug::ProcessDebugMessages();
-    reinterpret_cast<v8::internal::Semaphore*>(data)->Signal();
+  static void CountingAndSignallingMessageHandler(
+      const v8::Debug::Message& message) {
+    if (message.IsResponse()) {
+      counting_message_handler_counter++;
+      send_command_thread_->semaphore_.Signal();
+    }
   }
 
   virtual void Run() {
@@ -6610,30 +6658,29 @@
     int length = AsciiToUtf16(scripts_command, buffer);
     // Send scripts command.
 
-    for (int i = 0; i < 100; i++) {
+    for (int i = 0; i < 20; i++) {
+      v8::base::ElapsedTimer timer;
+      timer.Start();
       CHECK_EQ(i, counting_message_handler_counter);
       // Queue debug message.
       v8::Debug::SendCommand(isolate_, buffer, length);
-      // Synchronize with the main thread to force message processing.
-      isolate_->RequestInterrupt(ProcessDebugMessages, &semaphore_);
+      // Wait for the message handler to pick up the response.
       semaphore_.Wait();
+      i::PrintF("iteration %d took %f ms\n", i,
+                timer.Elapsed().InMillisecondsF());
     }
 
     v8::V8::TerminateExecution(isolate_);
   }
 
-  void StartSending() {
-    semaphore_.Signal();
-  }
+  void StartSending() { semaphore_.Signal(); }
 
  private:
-  v8::internal::Semaphore semaphore_;
+  v8::base::Semaphore semaphore_;
   v8::Isolate* isolate_;
 };
 
 
-static SendCommandThread* send_command_thread_ = NULL;
-
 static void StartSendingCommands(
     const v8::FunctionCallbackInfo<v8::Value>& info) {
   send_command_thread_->StartSending();
@@ -6647,7 +6694,8 @@
 
   counting_message_handler_counter = 0;
 
-  v8::Debug::SetMessageHandler(CountingMessageHandler);
+  v8::Debug::SetMessageHandler(
+      SendCommandThread::CountingAndSignallingMessageHandler);
   send_command_thread_ = new SendCommandThread(isolate);
   send_command_thread_->Start();
 
@@ -6657,7 +6705,7 @@
 
   CompileRun("start(); while (true) { }");
 
-  CHECK_EQ(100, counting_message_handler_counter);
+  CHECK_EQ(20, counting_message_handler_counter);
 
   v8::Debug::SetMessageHandler(NULL);
   CheckDebuggerUnloaded();
@@ -7386,7 +7434,7 @@
       "};                              \n"
       "a = b = c = 2;                  \n"
       "bar();                          \n";
-  v8::Local<v8::Value> result = PreCompileCompileRun(source);
+  v8::Local<v8::Value> result = ParserCacheCompileRun(source);
   CHECK(result->IsString());
   v8::String::Utf8Value utf8(result);
   CHECK_EQ("bar", *utf8);
@@ -7427,8 +7475,8 @@
 }
 
 
-v8::internal::Semaphore terminate_requested_semaphore(0);
-v8::internal::Semaphore terminate_fired_semaphore(0);
+v8::base::Semaphore terminate_requested_semaphore(0);
+v8::base::Semaphore terminate_fired_semaphore(0);
 bool terminate_already_fired = false;
 
 
@@ -7437,18 +7485,15 @@
   if (event_details.GetEvent() != v8::Break || terminate_already_fired) return;
   terminate_requested_semaphore.Signal();
   // Wait for at most 2 seconds for the terminate request.
-  CHECK(terminate_fired_semaphore.WaitFor(i::TimeDelta::FromSeconds(2)));
+  CHECK(terminate_fired_semaphore.WaitFor(v8::base::TimeDelta::FromSeconds(2)));
   terminate_already_fired = true;
-  v8::internal::Isolate* isolate =
-      v8::Utils::OpenHandle(*event_details.GetEventContext())->GetIsolate();
-  CHECK(isolate->stack_guard()->CheckTerminateExecution());
 }
 
 
-class TerminationThread : public v8::internal::Thread {
+class TerminationThread : public v8::base::Thread {
  public:
-  explicit TerminationThread(v8::Isolate* isolate) : Thread("terminator"),
-                                                     isolate_(isolate) { }
+  explicit TerminationThread(v8::Isolate* isolate)
+      : Thread(Options("terminator")), isolate_(isolate) {}
 
   virtual void Run() {
     terminate_requested_semaphore.Wait();
@@ -7468,6 +7513,8 @@
   v8::Debug::SetDebugEventListener(DebugBreakTriggerTerminate);
   TerminationThread terminator(isolate);
   terminator.Start();
+  v8::TryCatch try_catch;
   v8::Debug::DebugBreak(isolate);
   CompileRun("while (true);");
+  CHECK(try_catch.HasTerminated());
 }
diff --git a/test/cctest/test-declarative-accessors.cc b/test/cctest/test-declarative-accessors.cc
index 7b2170d..8d93245 100644
--- a/test/cctest/test-declarative-accessors.cc
+++ b/test/cctest/test-declarative-accessors.cc
@@ -37,7 +37,7 @@
 class HandleArray : public Malloced {
  public:
   static const unsigned kArraySize = 200;
-  explicit HandleArray() {}
+  HandleArray() {}
   ~HandleArray() { Reset(); }
   void Reset() {
     for (unsigned i = 0; i < kArraySize; i++) {
diff --git a/test/cctest/test-decls.cc b/test/cctest/test-decls.cc
index cd33811..34f0b69 100644
--- a/test/cctest/test-decls.cc
+++ b/test/cctest/test-decls.cc
@@ -29,7 +29,7 @@
 
 #include "src/v8.h"
 
-#include "src/heap.h"
+#include "src/heap/heap.h"
 #include "test/cctest/cctest.h"
 
 using namespace v8;
@@ -236,17 +236,14 @@
   { DeclarationContext context;
     context.Check("var x; x",
                   1,  // access
-                  1,  // declaration
-                  2,  // declaration + initialization
-                  EXPECT_RESULT, Undefined(CcTest::isolate()));
+                  0, 0, EXPECT_RESULT, Undefined(CcTest::isolate()));
   }
 
   { DeclarationContext context;
     context.Check("var x = 0; x",
                   1,  // access
-                  2,  // declaration + initialization
-                  2,  // declaration + initialization
-                  EXPECT_RESULT, Number::New(CcTest::isolate(), 0));
+                  1,  // initialization
+                  0, EXPECT_RESULT, Number::New(CcTest::isolate(), 0));
   }
 
   { DeclarationContext context;
@@ -260,78 +257,19 @@
   { DeclarationContext context;
     context.Check("const x; x",
                   1,  // access
-                  2,  // declaration + initialization
-                  1,  // declaration
-                  EXPECT_RESULT, Undefined(CcTest::isolate()));
+                  0, 0, EXPECT_RESULT, Undefined(CcTest::isolate()));
   }
 
   { DeclarationContext context;
-    // SB 0 - BUG 1213579
     context.Check("const x = 0; x",
                   1,  // access
-                  2,  // declaration + initialization
-                  1,  // declaration
-                  EXPECT_RESULT, Undefined(CcTest::isolate()));
-  }
-}
-
-
-
-class PresentPropertyContext: public DeclarationContext {
- protected:
-  virtual v8::Handle<Integer> Query(Local<String> key) {
-    return Integer::New(isolate(), v8::None);
-  }
-};
-
-
-
-TEST(Present) {
-  HandleScope scope(CcTest::isolate());
-
-  { PresentPropertyContext context;
-    context.Check("var x; x",
-                  1,  // access
-                  0,
-                  2,  // declaration + initialization
-                  EXPECT_EXCEPTION);  // x is not defined!
-  }
-
-  { PresentPropertyContext context;
-    context.Check("var x = 0; x",
-                  1,  // access
-                  1,  // initialization
-                  2,  // declaration + initialization
-                  EXPECT_RESULT, Number::New(CcTest::isolate(), 0));
-  }
-
-  { PresentPropertyContext context;
-    context.Check("function x() { }; x",
-                  1,  // access
                   0,
                   0,
-                  EXPECT_RESULT);
-  }
-
-  { PresentPropertyContext context;
-    context.Check("const x; x",
-                  1,  // access
-                  1,  // initialization
-                  1,  // (re-)declaration
-                  EXPECT_RESULT, Undefined(CcTest::isolate()));
-  }
-
-  { PresentPropertyContext context;
-    context.Check("const x = 0; x",
-                  1,  // access
-                  1,  // initialization
-                  1,  // (re-)declaration
                   EXPECT_RESULT, Number::New(CcTest::isolate(), 0));
   }
 }
 
 
-
 class AbsentPropertyContext: public DeclarationContext {
  protected:
   virtual v8::Handle<Integer> Query(Local<String> key) {
@@ -348,17 +286,14 @@
   { AbsentPropertyContext context;
     context.Check("var x; x",
                   1,  // access
-                  1,  // declaration
-                  2,  // declaration + initialization
-                  EXPECT_RESULT, Undefined(isolate));
+                  0, 0, EXPECT_RESULT, Undefined(isolate));
   }
 
   { AbsentPropertyContext context;
     context.Check("var x = 0; x",
                   1,  // access
-                  2,  // declaration + initialization
-                  2,  // declaration + initialization
-                  EXPECT_RESULT, Number::New(isolate, 0));
+                  1,  // initialization
+                  0, EXPECT_RESULT, Number::New(isolate, 0));
   }
 
   { AbsentPropertyContext context;
@@ -372,25 +307,19 @@
   { AbsentPropertyContext context;
     context.Check("const x; x",
                   1,  // access
-                  2,  // declaration + initialization
-                  1,  // declaration
-                  EXPECT_RESULT, Undefined(isolate));
+                  0, 0, EXPECT_RESULT, Undefined(isolate));
   }
 
   { AbsentPropertyContext context;
     context.Check("const x = 0; x",
                   1,  // access
-                  2,  // declaration + initialization
-                  1,  // declaration
-                  EXPECT_RESULT, Undefined(isolate));  // SB 0 - BUG 1213579
+                  0, 0, EXPECT_RESULT, Number::New(isolate, 0));
   }
 
   { AbsentPropertyContext context;
     context.Check("if (false) { var x = 0 }; x",
                   1,  // access
-                  1,  // declaration
-                  1,  // declaration + initialization
-                  EXPECT_RESULT, Undefined(isolate));
+                  0, 0, EXPECT_RESULT, Undefined(isolate));
   }
 }
 
@@ -439,17 +368,14 @@
   { AppearingPropertyContext context;
     context.Check("var x; x",
                   1,  // access
-                  1,  // declaration
-                  2,  // declaration + initialization
-                  EXPECT_RESULT, Undefined(CcTest::isolate()));
+                  0, 0, EXPECT_RESULT, Undefined(CcTest::isolate()));
   }
 
   { AppearingPropertyContext context;
     context.Check("var x = 0; x",
                   1,  // access
-                  2,  // declaration + initialization
-                  2,  // declaration + initialization
-                  EXPECT_RESULT, Number::New(CcTest::isolate(), 0));
+                  1,  // initialization
+                  0, EXPECT_RESULT, Number::New(CcTest::isolate(), 0));
   }
 
   { AppearingPropertyContext context;
@@ -463,78 +389,13 @@
   { AppearingPropertyContext context;
     context.Check("const x; x",
                   1,  // access
-                  2,  // declaration + initialization
-                  1,  // declaration
-                  EXPECT_RESULT, Undefined(CcTest::isolate()));
+                  0, 0, EXPECT_RESULT, Undefined(CcTest::isolate()));
   }
 
   { AppearingPropertyContext context;
     context.Check("const x = 0; x",
                   1,  // access
-                  2,  // declaration + initialization
-                  1,  // declaration
-                  EXPECT_RESULT, Undefined(CcTest::isolate()));
-                  // Result is undefined because declaration succeeded but
-                  // initialization to 0 failed (due to context behavior).
-  }
-}
-
-
-
-class ReappearingPropertyContext: public DeclarationContext {
- public:
-  enum State {
-    DECLARE,
-    DONT_DECLARE,
-    INITIALIZE,
-    UNKNOWN
-  };
-
-  ReappearingPropertyContext() : state_(DECLARE) { }
-
- protected:
-  virtual v8::Handle<Integer> Query(Local<String> key) {
-    switch (state_) {
-      case DECLARE:
-        // Force the first declaration by returning that
-        // the property is absent.
-        state_ = DONT_DECLARE;
-        return Handle<Integer>();
-      case DONT_DECLARE:
-        // Ignore the second declaration by returning
-        // that the property is already there.
-        state_ = INITIALIZE;
-        return Integer::New(isolate(), v8::None);
-      case INITIALIZE:
-        // Force an initialization by returning that
-        // the property is absent. This will make sure
-        // that the setter is called and it will not
-        // lead to redeclaration conflicts (yet).
-        state_ = UNKNOWN;
-        return Handle<Integer>();
-      default:
-        CHECK(state_ == UNKNOWN);
-        break;
-    }
-    // Do the lookup in the object.
-    return Handle<Integer>();
-  }
-
- private:
-  State state_;
-};
-
-
-TEST(Reappearing) {
-  v8::V8::Initialize();
-  HandleScope scope(CcTest::isolate());
-
-  { ReappearingPropertyContext context;
-    context.Check("const x; var x = 0",
-                  0,
-                  3,  // const declaration+initialization, var initialization
-                  3,  // 2 x declaration + var initialization
-                  EXPECT_RESULT, Undefined(CcTest::isolate()));
+                  0, 0, EXPECT_RESULT, Number::New(CcTest::isolate(), 0));
   }
 }
 
@@ -562,11 +423,8 @@
   // Sanity check to make sure that the holder of the interceptor
   // really is the prototype object.
   { ExistsInPrototypeContext context;
-    context.Check("this.x = 87; this.x",
-                  0,
-                  0,
-                  0,
-                  EXPECT_RESULT, Number::New(CcTest::isolate(), 87));
+    context.Check("this.x = 87; this.x", 0, 0, 1, EXPECT_RESULT,
+                  Number::New(CcTest::isolate(), 87));
   }
 
   { ExistsInPrototypeContext context;
@@ -669,19 +527,13 @@
   HandleScope scope(CcTest::isolate());
 
   { ExistsInHiddenPrototypeContext context;
-    context.Check("var x; x",
-                  1,  // access
-                  0,
-                  2,  // declaration + initialization
-                  EXPECT_EXCEPTION);  // x is not defined!
+    context.Check("var x; x", 0, 0, 0, EXPECT_RESULT,
+                  Undefined(CcTest::isolate()));
   }
 
   { ExistsInHiddenPrototypeContext context;
-    context.Check("var x = 0; x",
-                  1,  // access
-                  1,  // initialization
-                  2,  // declaration + initialization
-                  EXPECT_RESULT, Number::New(CcTest::isolate(), 0));
+    context.Check("var x = 0; x", 0, 0, 0, EXPECT_RESULT,
+                  Number::New(CcTest::isolate(), 0));
   }
 
   { ExistsInHiddenPrototypeContext context;
@@ -694,20 +546,14 @@
 
   // TODO(mstarzinger): The semantics of global const is vague.
   { ExistsInHiddenPrototypeContext context;
-    context.Check("const x; x",
-                  0,
-                  0,
-                  1,  // (re-)declaration
-                  EXPECT_RESULT, Undefined(CcTest::isolate()));
+    context.Check("const x; x", 0, 0, 0, EXPECT_RESULT,
+                  Undefined(CcTest::isolate()));
   }
 
   // TODO(mstarzinger): The semantics of global const is vague.
   { ExistsInHiddenPrototypeContext context;
-    context.Check("const x = 0; x",
-                  0,
-                  0,
-                  1,  // (re-)declaration
-                  EXPECT_RESULT, Number::New(CcTest::isolate(), 0));
+    context.Check("const x = 0; x", 0, 0, 0, EXPECT_RESULT,
+                  Number::New(CcTest::isolate(), 0));
   }
 }
 
@@ -768,10 +614,8 @@
                   EXPECT_RESULT, Number::New(isolate, 1));
     context.Check("var x = 2; x",
                   EXPECT_RESULT, Number::New(isolate, 2));
-    context.Check("const x = 3; x",
-                  EXPECT_RESULT, Number::New(isolate, 3));
-    context.Check("const x = 4; x",
-                  EXPECT_RESULT, Number::New(isolate, 4));
+    context.Check("const x = 3; x", EXPECT_EXCEPTION);
+    context.Check("const x = 4; x", EXPECT_EXCEPTION);
     context.Check("x = 5; x",
                   EXPECT_RESULT, Number::New(isolate, 5));
     context.Check("var x = 6; x",
@@ -787,8 +631,7 @@
                   EXPECT_RESULT, Number::New(isolate, 1));
     context.Check("var x = 2; x",  // assignment ignored
                   EXPECT_RESULT, Number::New(isolate, 1));
-    context.Check("const x = 3; x",
-                  EXPECT_RESULT, Number::New(isolate, 1));
+    context.Check("const x = 3; x", EXPECT_EXCEPTION);
     context.Check("x = 4; x",  // assignment ignored
                   EXPECT_RESULT, Number::New(isolate, 1));
     context.Check("var x = 5; x",  // assignment ignored
diff --git a/test/cctest/test-deoptimization.cc b/test/cctest/test-deoptimization.cc
index f7359a9..a201ccd 100644
--- a/test/cctest/test-deoptimization.cc
+++ b/test/cctest/test-deoptimization.cc
@@ -30,20 +30,19 @@
 #include "src/v8.h"
 
 #include "src/api.h"
+#include "src/base/platform/platform.h"
 #include "src/compilation-cache.h"
 #include "src/debug.h"
 #include "src/deoptimizer.h"
 #include "src/isolate.h"
-#include "src/platform.h"
-#include "src/stub-cache.h"
 #include "test/cctest/cctest.h"
 
+using ::v8::base::OS;
 using ::v8::internal::Deoptimizer;
 using ::v8::internal::EmbeddedVector;
 using ::v8::internal::Handle;
 using ::v8::internal::Isolate;
 using ::v8::internal::JSFunction;
-using ::v8::internal::OS;
 using ::v8::internal::Object;
 
 // Size of temp buffer for formatting small strings.
@@ -99,8 +98,8 @@
 
 // Abort any ongoing incremental marking to make sure that all weak global
 // handle callbacks are processed.
-static void NonIncrementalGC() {
-  CcTest::heap()->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
+static void NonIncrementalGC(i::Isolate* isolate) {
+  isolate->heap()->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
 }
 
 
@@ -113,6 +112,8 @@
 
 
 TEST(DeoptimizeSimple) {
+  i::FLAG_turbo_deoptimization = true;
+
   LocalContext env;
   v8::HandleScope scope(env->GetIsolate());
 
@@ -126,7 +127,7 @@
         "function f() { g(); };"
         "f();");
   }
-  NonIncrementalGC();
+  NonIncrementalGC(CcTest::i_isolate());
 
   CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
   CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized());
@@ -142,7 +143,7 @@
         "function f(x) { if (x) { g(); } else { return } };"
         "f(true);");
   }
-  NonIncrementalGC();
+  NonIncrementalGC(CcTest::i_isolate());
 
   CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
   CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized());
@@ -151,6 +152,8 @@
 
 
 TEST(DeoptimizeSimpleWithArguments) {
+  i::FLAG_turbo_deoptimization = true;
+
   LocalContext env;
   v8::HandleScope scope(env->GetIsolate());
 
@@ -164,7 +167,7 @@
         "function f(x, y, z) { g(1,x); y+z; };"
         "f(1, \"2\", false);");
   }
-  NonIncrementalGC();
+  NonIncrementalGC(CcTest::i_isolate());
 
   CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
   CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized());
@@ -181,7 +184,7 @@
         "function f(x, y, z) { if (x) { g(x, y); } else { return y + z; } };"
         "f(true, 1, \"2\");");
   }
-  NonIncrementalGC();
+  NonIncrementalGC(CcTest::i_isolate());
 
   CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
   CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized());
@@ -190,6 +193,8 @@
 
 
 TEST(DeoptimizeSimpleNested) {
+  i::FLAG_turbo_deoptimization = true;
+
   LocalContext env;
   v8::HandleScope scope(env->GetIsolate());
 
@@ -204,7 +209,7 @@
         "function g(z) { count++; %DeoptimizeFunction(f); return z;}"
         "function f(x,y,z) { return h(x, y, g(z)); };"
         "result = f(1, 2, 3);");
-    NonIncrementalGC();
+    NonIncrementalGC(CcTest::i_isolate());
 
     CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
     CHECK_EQ(6, env->Global()->Get(v8_str("result"))->Int32Value());
@@ -215,6 +220,7 @@
 
 
 TEST(DeoptimizeRecursive) {
+  i::FLAG_turbo_deoptimization = true;
   LocalContext env;
   v8::HandleScope scope(env->GetIsolate());
 
@@ -229,7 +235,7 @@
         "function f(x) { calls++; if (x > 0) { f(x - 1); } else { g(); } };"
         "f(10);");
   }
-  NonIncrementalGC();
+  NonIncrementalGC(CcTest::i_isolate());
 
   CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
   CHECK_EQ(11, env->Global()->Get(v8_str("calls"))->Int32Value());
@@ -242,6 +248,7 @@
 
 
 TEST(DeoptimizeMultiple) {
+  i::FLAG_turbo_deoptimization = true;
   LocalContext env;
   v8::HandleScope scope(env->GetIsolate());
 
@@ -261,7 +268,7 @@
         "function f1(x) { return f2(x + 1, x + 1) + x; };"
         "result = f1(1);");
   }
-  NonIncrementalGC();
+  NonIncrementalGC(CcTest::i_isolate());
 
   CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
   CHECK_EQ(14, env->Global()->Get(v8_str("result"))->Int32Value());
@@ -270,6 +277,7 @@
 
 
 TEST(DeoptimizeConstructor) {
+  i::FLAG_turbo_deoptimization = true;
   LocalContext env;
   v8::HandleScope scope(env->GetIsolate());
 
@@ -282,7 +290,7 @@
         "function f() {  g(); };"
         "result = new f() instanceof f;");
   }
-  NonIncrementalGC();
+  NonIncrementalGC(CcTest::i_isolate());
 
   CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
   CHECK(env->Global()->Get(v8_str("result"))->IsTrue());
@@ -299,7 +307,7 @@
         "result = new f(1, 2);"
         "result = result.x + result.y;");
   }
-  NonIncrementalGC();
+  NonIncrementalGC(CcTest::i_isolate());
 
   CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
   CHECK_EQ(3, env->Global()->Get(v8_str("result"))->Int32Value());
@@ -308,6 +316,7 @@
 
 
 TEST(DeoptimizeConstructorMultiple) {
+  i::FLAG_turbo_deoptimization = true;
   LocalContext env;
   v8::HandleScope scope(env->GetIsolate());
 
@@ -328,7 +337,7 @@
         "function f1(x) { this.result = new f2(x + 1, x + 1).result + x; };"
         "result = new f1(1).result;");
   }
-  NonIncrementalGC();
+  NonIncrementalGC(CcTest::i_isolate());
 
   CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
   CHECK_EQ(14, env->Global()->Get(v8_str("result"))->Int32Value());
@@ -336,50 +345,61 @@
 }
 
 
-TEST(DeoptimizeBinaryOperationADDString) {
+UNINITIALIZED_TEST(DeoptimizeBinaryOperationADDString) {
+  i::FLAG_turbo_deoptimization = true;
   i::FLAG_concurrent_recompilation = false;
   AllowNativesSyntaxNoInlining options;
-  LocalContext env;
-  v8::HandleScope scope(env->GetIsolate());
-
-  const char* f_source = "function f(x, y) { return x + y; };";
-
+  v8::Isolate* isolate = v8::Isolate::New();
+  i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+  isolate->Enter();
   {
-    // Compile function f and collect to type feedback to insert binary op stub
-    // call in the optimized code.
-    i::FLAG_prepare_always_opt = true;
-    CompileRun("var count = 0;"
-               "var result = 0;"
-               "var deopt = false;"
-               "function X() { };"
-               "X.prototype.toString = function () {"
-               "  if (deopt) { count++; %DeoptimizeFunction(f); } return 'an X'"
-               "};");
-    CompileRun(f_source);
-    CompileRun("for (var i = 0; i < 5; i++) {"
-               "  f('a+', new X());"
-               "};");
+    LocalContext env(isolate);
+    v8::HandleScope scope(env->GetIsolate());
 
-    // Compile an optimized version of f.
-    i::FLAG_always_opt = true;
-    CompileRun(f_source);
-    CompileRun("f('a+', new X());");
-    CHECK(!CcTest::i_isolate()->use_crankshaft() ||
-          GetJSFunction(env->Global(), "f")->IsOptimized());
+    const char* f_source = "function f(x, y) { return x + y; };";
 
-    // Call f and force deoptimization while processing the binary operation.
-    CompileRun("deopt = true;"
-               "var result = f('a+', new X());");
+    {
+      // Compile function f and collect to type feedback to insert binary op
+      // stub call in the optimized code.
+      i::FLAG_prepare_always_opt = true;
+      CompileRun(
+          "var count = 0;"
+          "var result = 0;"
+          "var deopt = false;"
+          "function X() { };"
+          "X.prototype.toString = function () {"
+          "  if (deopt) { count++; %DeoptimizeFunction(f); } return 'an X'"
+          "};");
+      CompileRun(f_source);
+      CompileRun(
+          "for (var i = 0; i < 5; i++) {"
+          "  f('a+', new X());"
+          "};");
+
+      // Compile an optimized version of f.
+      i::FLAG_always_opt = true;
+      CompileRun(f_source);
+      CompileRun("f('a+', new X());");
+      CHECK(!i_isolate->use_crankshaft() ||
+            GetJSFunction(env->Global(), "f")->IsOptimized());
+
+      // Call f and force deoptimization while processing the binary operation.
+      CompileRun(
+          "deopt = true;"
+          "var result = f('a+', new X());");
+    }
+    NonIncrementalGC(i_isolate);
+
+    CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized());
+    CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
+    v8::Handle<v8::Value> result = env->Global()->Get(v8_str("result"));
+    CHECK(result->IsString());
+    v8::String::Utf8Value utf8(result);
+    CHECK_EQ("a+an X", *utf8);
+    CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(i_isolate));
   }
-  NonIncrementalGC();
-
-  CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized());
-  CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
-  v8::Handle<v8::Value> result = env->Global()->Get(v8_str("result"));
-  CHECK(result->IsString());
-  v8::String::Utf8Value utf8(result);
-  CHECK_EQ("a+an X", *utf8);
-  CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(CcTest::i_isolate()));
+  isolate->Exit();
+  isolate->Dispose();
 }
 
 
@@ -396,6 +416,7 @@
 
 static void TestDeoptimizeBinaryOpHelper(LocalContext* env,
                                          const char* binary_op) {
+  i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>((*env)->GetIsolate());
   EmbeddedVector<char, SMALL_STRING_BUFFER_SIZE> f_source_buffer;
   SNPrintF(f_source_buffer,
            "function f(x, y) { return x %s y; };",
@@ -416,282 +437,355 @@
   i::FLAG_always_opt = true;
   CompileRun(f_source);
   CompileRun("f(7, new X());");
-  CHECK(!CcTest::i_isolate()->use_crankshaft() ||
+  CHECK(!i_isolate->use_crankshaft() ||
         GetJSFunction((*env)->Global(), "f")->IsOptimized());
 
   // Call f and force deoptimization while processing the binary operation.
   CompileRun("deopt = true;"
              "var result = f(7, new X());");
-  NonIncrementalGC();
+  NonIncrementalGC(i_isolate);
   CHECK(!GetJSFunction((*env)->Global(), "f")->IsOptimized());
 }
 
 
-TEST(DeoptimizeBinaryOperationADD) {
+UNINITIALIZED_TEST(DeoptimizeBinaryOperationADD) {
+  i::FLAG_turbo_deoptimization = true;
   i::FLAG_concurrent_recompilation = false;
-  LocalContext env;
-  v8::HandleScope scope(env->GetIsolate());
-
-  TestDeoptimizeBinaryOpHelper(&env, "+");
-
-  CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
-  CHECK_EQ(15, env->Global()->Get(v8_str("result"))->Int32Value());
-  CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(CcTest::i_isolate()));
-}
-
-
-TEST(DeoptimizeBinaryOperationSUB) {
-  i::FLAG_concurrent_recompilation = false;
-  LocalContext env;
-  v8::HandleScope scope(env->GetIsolate());
-
-  TestDeoptimizeBinaryOpHelper(&env, "-");
-
-  CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
-  CHECK_EQ(-1, env->Global()->Get(v8_str("result"))->Int32Value());
-  CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(CcTest::i_isolate()));
-}
-
-
-TEST(DeoptimizeBinaryOperationMUL) {
-  i::FLAG_concurrent_recompilation = false;
-  LocalContext env;
-  v8::HandleScope scope(env->GetIsolate());
-
-  TestDeoptimizeBinaryOpHelper(&env, "*");
-
-  CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
-  CHECK_EQ(56, env->Global()->Get(v8_str("result"))->Int32Value());
-  CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(CcTest::i_isolate()));
-}
-
-
-TEST(DeoptimizeBinaryOperationDIV) {
-  i::FLAG_concurrent_recompilation = false;
-  LocalContext env;
-  v8::HandleScope scope(env->GetIsolate());
-
-  TestDeoptimizeBinaryOpHelper(&env, "/");
-
-  CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
-  CHECK_EQ(0, env->Global()->Get(v8_str("result"))->Int32Value());
-  CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(CcTest::i_isolate()));
-}
-
-
-TEST(DeoptimizeBinaryOperationMOD) {
-  i::FLAG_concurrent_recompilation = false;
-  LocalContext env;
-  v8::HandleScope scope(env->GetIsolate());
-
-  TestDeoptimizeBinaryOpHelper(&env, "%");
-
-  CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
-  CHECK_EQ(7, env->Global()->Get(v8_str("result"))->Int32Value());
-  CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(CcTest::i_isolate()));
-}
-
-
-TEST(DeoptimizeCompare) {
-  i::FLAG_concurrent_recompilation = false;
-  LocalContext env;
-  v8::HandleScope scope(env->GetIsolate());
-
-  const char* f_source = "function f(x, y) { return x < y; };";
-
+  v8::Isolate* isolate = v8::Isolate::New();
+  i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+  isolate->Enter();
   {
-    AllowNativesSyntaxNoInlining options;
-    // Compile function f and collect to type feedback to insert compare ic
-    // call in the optimized code.
-    i::FLAG_prepare_always_opt = true;
-    CompileRun("var count = 0;"
-               "var result = 0;"
-               "var deopt = false;"
-               "function X() { };"
-               "X.prototype.toString = function () {"
-               "  if (deopt) { count++; %DeoptimizeFunction(f); } return 'b'"
-               "};");
-    CompileRun(f_source);
-    CompileRun("for (var i = 0; i < 5; i++) {"
-               "  f('a', new X());"
-               "};");
+    LocalContext env(isolate);
+    v8::HandleScope scope(env->GetIsolate());
 
-    // Compile an optimized version of f.
-    i::FLAG_always_opt = true;
-    CompileRun(f_source);
-    CompileRun("f('a', new X());");
-    CHECK(!CcTest::i_isolate()->use_crankshaft() ||
-          GetJSFunction(env->Global(), "f")->IsOptimized());
+    TestDeoptimizeBinaryOpHelper(&env, "+");
 
-    // Call f and force deoptimization while processing the comparison.
-    CompileRun("deopt = true;"
-               "var result = f('a', new X());");
+    CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
+    CHECK_EQ(15, env->Global()->Get(v8_str("result"))->Int32Value());
+    CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(i_isolate));
   }
-  NonIncrementalGC();
-
-  CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized());
-  CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
-  CHECK_EQ(true, env->Global()->Get(v8_str("result"))->BooleanValue());
-  CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(CcTest::i_isolate()));
+  isolate->Exit();
+  isolate->Dispose();
 }
 
 
-TEST(DeoptimizeLoadICStoreIC) {
+UNINITIALIZED_TEST(DeoptimizeBinaryOperationSUB) {
+  i::FLAG_turbo_deoptimization = true;
   i::FLAG_concurrent_recompilation = false;
-  LocalContext env;
-  v8::HandleScope scope(env->GetIsolate());
-
-  // Functions to generate load/store/keyed load/keyed store IC calls.
-  const char* f1_source = "function f1(x) { return x.y; };";
-  const char* g1_source = "function g1(x) { x.y = 1; };";
-  const char* f2_source = "function f2(x, y) { return x[y]; };";
-  const char* g2_source = "function g2(x, y) { x[y] = 1; };";
-
+  v8::Isolate* isolate = v8::Isolate::New();
+  i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+  isolate->Enter();
   {
-    AllowNativesSyntaxNoInlining options;
-    // Compile functions and collect to type feedback to insert ic
-    // calls in the optimized code.
-    i::FLAG_prepare_always_opt = true;
-    CompileRun("var count = 0;"
-               "var result = 0;"
-               "var deopt = false;"
-               "function X() { };"
-               "X.prototype.__defineGetter__('y', function () {"
-               "  if (deopt) { count++; %DeoptimizeFunction(f1); };"
-               "  return 13;"
-               "});"
-               "X.prototype.__defineSetter__('y', function () {"
-               "  if (deopt) { count++; %DeoptimizeFunction(g1); };"
-               "});"
-               "X.prototype.__defineGetter__('z', function () {"
-               "  if (deopt) { count++; %DeoptimizeFunction(f2); };"
-               "  return 13;"
-               "});"
-               "X.prototype.__defineSetter__('z', function () {"
-               "  if (deopt) { count++; %DeoptimizeFunction(g2); };"
-               "});");
-    CompileRun(f1_source);
-    CompileRun(g1_source);
-    CompileRun(f2_source);
-    CompileRun(g2_source);
-    CompileRun("for (var i = 0; i < 5; i++) {"
-               "  f1(new X());"
-               "  g1(new X());"
-               "  f2(new X(), 'z');"
-               "  g2(new X(), 'z');"
-               "};");
+    LocalContext env(isolate);
+    v8::HandleScope scope(env->GetIsolate());
 
-    // Compile an optimized version of the functions.
-    i::FLAG_always_opt = true;
-    CompileRun(f1_source);
-    CompileRun(g1_source);
-    CompileRun(f2_source);
-    CompileRun(g2_source);
-    CompileRun("f1(new X());");
-    CompileRun("g1(new X());");
-    CompileRun("f2(new X(), 'z');");
-    CompileRun("g2(new X(), 'z');");
-    if (CcTest::i_isolate()->use_crankshaft()) {
-      CHECK(GetJSFunction(env->Global(), "f1")->IsOptimized());
-      CHECK(GetJSFunction(env->Global(), "g1")->IsOptimized());
-      CHECK(GetJSFunction(env->Global(), "f2")->IsOptimized());
-      CHECK(GetJSFunction(env->Global(), "g2")->IsOptimized());
+    TestDeoptimizeBinaryOpHelper(&env, "-");
+
+    CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
+    CHECK_EQ(-1, env->Global()->Get(v8_str("result"))->Int32Value());
+    CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(i_isolate));
+  }
+  isolate->Exit();
+  isolate->Dispose();
+}
+
+
+UNINITIALIZED_TEST(DeoptimizeBinaryOperationMUL) {
+  i::FLAG_turbo_deoptimization = true;
+  i::FLAG_concurrent_recompilation = false;
+  v8::Isolate* isolate = v8::Isolate::New();
+  i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+  isolate->Enter();
+  {
+    LocalContext env(isolate);
+    v8::HandleScope scope(env->GetIsolate());
+
+    TestDeoptimizeBinaryOpHelper(&env, "*");
+
+    CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
+    CHECK_EQ(56, env->Global()->Get(v8_str("result"))->Int32Value());
+    CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(i_isolate));
+  }
+  isolate->Exit();
+  isolate->Dispose();
+}
+
+
+UNINITIALIZED_TEST(DeoptimizeBinaryOperationDIV) {
+  i::FLAG_turbo_deoptimization = true;
+  i::FLAG_concurrent_recompilation = false;
+  v8::Isolate* isolate = v8::Isolate::New();
+  i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+  isolate->Enter();
+  {
+    LocalContext env(isolate);
+    v8::HandleScope scope(env->GetIsolate());
+
+    TestDeoptimizeBinaryOpHelper(&env, "/");
+
+    CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
+    CHECK_EQ(0, env->Global()->Get(v8_str("result"))->Int32Value());
+    CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(i_isolate));
+  }
+  isolate->Exit();
+  isolate->Dispose();
+}
+
+
+UNINITIALIZED_TEST(DeoptimizeBinaryOperationMOD) {
+  i::FLAG_turbo_deoptimization = true;
+  i::FLAG_concurrent_recompilation = false;
+  v8::Isolate* isolate = v8::Isolate::New();
+  i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+  isolate->Enter();
+  {
+    LocalContext env(isolate);
+    v8::HandleScope scope(env->GetIsolate());
+
+    TestDeoptimizeBinaryOpHelper(&env, "%");
+
+    CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
+    CHECK_EQ(7, env->Global()->Get(v8_str("result"))->Int32Value());
+    CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(i_isolate));
+  }
+  isolate->Exit();
+  isolate->Dispose();
+}
+
+
+UNINITIALIZED_TEST(DeoptimizeCompare) {
+  i::FLAG_turbo_deoptimization = true;
+  i::FLAG_concurrent_recompilation = false;
+  v8::Isolate* isolate = v8::Isolate::New();
+  i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+  isolate->Enter();
+  {
+    LocalContext env(isolate);
+    v8::HandleScope scope(env->GetIsolate());
+
+    const char* f_source = "function f(x, y) { return x < y; };";
+
+    {
+      AllowNativesSyntaxNoInlining options;
+      // Compile function f and collect to type feedback to insert compare ic
+      // call in the optimized code.
+      i::FLAG_prepare_always_opt = true;
+      CompileRun(
+          "var count = 0;"
+          "var result = 0;"
+          "var deopt = false;"
+          "function X() { };"
+          "X.prototype.toString = function () {"
+          "  if (deopt) { count++; %DeoptimizeFunction(f); } return 'b'"
+          "};");
+      CompileRun(f_source);
+      CompileRun(
+          "for (var i = 0; i < 5; i++) {"
+          "  f('a', new X());"
+          "};");
+
+      // Compile an optimized version of f.
+      i::FLAG_always_opt = true;
+      CompileRun(f_source);
+      CompileRun("f('a', new X());");
+      CHECK(!i_isolate->use_crankshaft() ||
+            GetJSFunction(env->Global(), "f")->IsOptimized());
+
+      // Call f and force deoptimization while processing the comparison.
+      CompileRun(
+          "deopt = true;"
+          "var result = f('a', new X());");
     }
+    NonIncrementalGC(i_isolate);
 
-    // Call functions and force deoptimization while processing the ics.
-    CompileRun("deopt = true;"
-               "var result = f1(new X());"
-               "g1(new X());"
-               "f2(new X(), 'z');"
-               "g2(new X(), 'z');");
+    CHECK(!GetJSFunction(env->Global(), "f")->IsOptimized());
+    CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
+    CHECK_EQ(true, env->Global()->Get(v8_str("result"))->BooleanValue());
+    CHECK_EQ(0, Deoptimizer::GetDeoptimizedCodeCount(i_isolate));
   }
-  NonIncrementalGC();
-
-  CHECK(!GetJSFunction(env->Global(), "f1")->IsOptimized());
-  CHECK(!GetJSFunction(env->Global(), "g1")->IsOptimized());
-  CHECK(!GetJSFunction(env->Global(), "f2")->IsOptimized());
-  CHECK(!GetJSFunction(env->Global(), "g2")->IsOptimized());
-  CHECK_EQ(4, env->Global()->Get(v8_str("count"))->Int32Value());
-  CHECK_EQ(13, env->Global()->Get(v8_str("result"))->Int32Value());
+  isolate->Exit();
+  isolate->Dispose();
 }
 
 
-TEST(DeoptimizeLoadICStoreICNested) {
+UNINITIALIZED_TEST(DeoptimizeLoadICStoreIC) {
+  i::FLAG_turbo_deoptimization = true;
   i::FLAG_concurrent_recompilation = false;
-  LocalContext env;
-  v8::HandleScope scope(env->GetIsolate());
-
-  // Functions to generate load/store/keyed load/keyed store IC calls.
-  const char* f1_source = "function f1(x) { return x.y; };";
-  const char* g1_source = "function g1(x) { x.y = 1; };";
-  const char* f2_source = "function f2(x, y) { return x[y]; };";
-  const char* g2_source = "function g2(x, y) { x[y] = 1; };";
-
+  v8::Isolate* isolate = v8::Isolate::New();
+  i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+  isolate->Enter();
   {
-    AllowNativesSyntaxNoInlining options;
-    // Compile functions and collect to type feedback to insert ic
-    // calls in the optimized code.
-    i::FLAG_prepare_always_opt = true;
-    CompileRun("var count = 0;"
-               "var result = 0;"
-               "var deopt = false;"
-               "function X() { };"
-               "X.prototype.__defineGetter__('y', function () {"
-               "  g1(this);"
-               "  return 13;"
-               "});"
-               "X.prototype.__defineSetter__('y', function () {"
-               "  f2(this, 'z');"
-               "});"
-               "X.prototype.__defineGetter__('z', function () {"
-               "  g2(this, 'z');"
-               "});"
-               "X.prototype.__defineSetter__('z', function () {"
-               "  if (deopt) {"
-               "    count++;"
-               "    %DeoptimizeFunction(f1);"
-               "    %DeoptimizeFunction(g1);"
-               "    %DeoptimizeFunction(f2);"
-               "    %DeoptimizeFunction(g2); };"
-               "});");
-    CompileRun(f1_source);
-    CompileRun(g1_source);
-    CompileRun(f2_source);
-    CompileRun(g2_source);
-    CompileRun("for (var i = 0; i < 5; i++) {"
-               "  f1(new X());"
-               "  g1(new X());"
-               "  f2(new X(), 'z');"
-               "  g2(new X(), 'z');"
-               "};");
+    LocalContext env(isolate);
+    v8::HandleScope scope(env->GetIsolate());
 
-    // Compile an optimized version of the functions.
-    i::FLAG_always_opt = true;
-    CompileRun(f1_source);
-    CompileRun(g1_source);
-    CompileRun(f2_source);
-    CompileRun(g2_source);
-    CompileRun("f1(new X());");
-    CompileRun("g1(new X());");
-    CompileRun("f2(new X(), 'z');");
-    CompileRun("g2(new X(), 'z');");
-    if (CcTest::i_isolate()->use_crankshaft()) {
-      CHECK(GetJSFunction(env->Global(), "f1")->IsOptimized());
-      CHECK(GetJSFunction(env->Global(), "g1")->IsOptimized());
-      CHECK(GetJSFunction(env->Global(), "f2")->IsOptimized());
-      CHECK(GetJSFunction(env->Global(), "g2")->IsOptimized());
+    // Functions to generate load/store/keyed load/keyed store IC calls.
+    const char* f1_source = "function f1(x) { return x.y; };";
+    const char* g1_source = "function g1(x) { x.y = 1; };";
+    const char* f2_source = "function f2(x, y) { return x[y]; };";
+    const char* g2_source = "function g2(x, y) { x[y] = 1; };";
+
+    {
+      AllowNativesSyntaxNoInlining options;
+      // Compile functions and collect to type feedback to insert ic
+      // calls in the optimized code.
+      i::FLAG_prepare_always_opt = true;
+      CompileRun(
+          "var count = 0;"
+          "var result = 0;"
+          "var deopt = false;"
+          "function X() { };"
+          "X.prototype.__defineGetter__('y', function () {"
+          "  if (deopt) { count++; %DeoptimizeFunction(f1); };"
+          "  return 13;"
+          "});"
+          "X.prototype.__defineSetter__('y', function () {"
+          "  if (deopt) { count++; %DeoptimizeFunction(g1); };"
+          "});"
+          "X.prototype.__defineGetter__('z', function () {"
+          "  if (deopt) { count++; %DeoptimizeFunction(f2); };"
+          "  return 13;"
+          "});"
+          "X.prototype.__defineSetter__('z', function () {"
+          "  if (deopt) { count++; %DeoptimizeFunction(g2); };"
+          "});");
+      CompileRun(f1_source);
+      CompileRun(g1_source);
+      CompileRun(f2_source);
+      CompileRun(g2_source);
+      CompileRun(
+          "for (var i = 0; i < 5; i++) {"
+          "  f1(new X());"
+          "  g1(new X());"
+          "  f2(new X(), 'z');"
+          "  g2(new X(), 'z');"
+          "};");
+
+      // Compile an optimized version of the functions.
+      i::FLAG_always_opt = true;
+      CompileRun(f1_source);
+      CompileRun(g1_source);
+      CompileRun(f2_source);
+      CompileRun(g2_source);
+      CompileRun("f1(new X());");
+      CompileRun("g1(new X());");
+      CompileRun("f2(new X(), 'z');");
+      CompileRun("g2(new X(), 'z');");
+      if (i_isolate->use_crankshaft()) {
+        CHECK(GetJSFunction(env->Global(), "f1")->IsOptimized());
+        CHECK(GetJSFunction(env->Global(), "g1")->IsOptimized());
+        CHECK(GetJSFunction(env->Global(), "f2")->IsOptimized());
+        CHECK(GetJSFunction(env->Global(), "g2")->IsOptimized());
+      }
+
+      // Call functions and force deoptimization while processing the ics.
+      CompileRun(
+          "deopt = true;"
+          "var result = f1(new X());"
+          "g1(new X());"
+          "f2(new X(), 'z');"
+          "g2(new X(), 'z');");
     }
+    NonIncrementalGC(i_isolate);
 
-    // Call functions and force deoptimization while processing the ics.
-    CompileRun("deopt = true;"
-               "var result = f1(new X());");
+    CHECK(!GetJSFunction(env->Global(), "f1")->IsOptimized());
+    CHECK(!GetJSFunction(env->Global(), "g1")->IsOptimized());
+    CHECK(!GetJSFunction(env->Global(), "f2")->IsOptimized());
+    CHECK(!GetJSFunction(env->Global(), "g2")->IsOptimized());
+    CHECK_EQ(4, env->Global()->Get(v8_str("count"))->Int32Value());
+    CHECK_EQ(13, env->Global()->Get(v8_str("result"))->Int32Value());
   }
-  NonIncrementalGC();
+  isolate->Exit();
+  isolate->Dispose();
+}
 
-  CHECK(!GetJSFunction(env->Global(), "f1")->IsOptimized());
-  CHECK(!GetJSFunction(env->Global(), "g1")->IsOptimized());
-  CHECK(!GetJSFunction(env->Global(), "f2")->IsOptimized());
-  CHECK(!GetJSFunction(env->Global(), "g2")->IsOptimized());
-  CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
-  CHECK_EQ(13, env->Global()->Get(v8_str("result"))->Int32Value());
+
+UNINITIALIZED_TEST(DeoptimizeLoadICStoreICNested) {
+  i::FLAG_turbo_deoptimization = true;
+  i::FLAG_concurrent_recompilation = false;
+  v8::Isolate* isolate = v8::Isolate::New();
+  i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+  isolate->Enter();
+  {
+    LocalContext env(isolate);
+    v8::HandleScope scope(env->GetIsolate());
+
+    // Functions to generate load/store/keyed load/keyed store IC calls.
+    const char* f1_source = "function f1(x) { return x.y; };";
+    const char* g1_source = "function g1(x) { x.y = 1; };";
+    const char* f2_source = "function f2(x, y) { return x[y]; };";
+    const char* g2_source = "function g2(x, y) { x[y] = 1; };";
+
+    {
+      AllowNativesSyntaxNoInlining options;
+      // Compile functions and collect to type feedback to insert ic
+      // calls in the optimized code.
+      i::FLAG_prepare_always_opt = true;
+      CompileRun(
+          "var count = 0;"
+          "var result = 0;"
+          "var deopt = false;"
+          "function X() { };"
+          "X.prototype.__defineGetter__('y', function () {"
+          "  g1(this);"
+          "  return 13;"
+          "});"
+          "X.prototype.__defineSetter__('y', function () {"
+          "  f2(this, 'z');"
+          "});"
+          "X.prototype.__defineGetter__('z', function () {"
+          "  g2(this, 'z');"
+          "});"
+          "X.prototype.__defineSetter__('z', function () {"
+          "  if (deopt) {"
+          "    count++;"
+          "    %DeoptimizeFunction(f1);"
+          "    %DeoptimizeFunction(g1);"
+          "    %DeoptimizeFunction(f2);"
+          "    %DeoptimizeFunction(g2); };"
+          "});");
+      CompileRun(f1_source);
+      CompileRun(g1_source);
+      CompileRun(f2_source);
+      CompileRun(g2_source);
+      CompileRun(
+          "for (var i = 0; i < 5; i++) {"
+          "  f1(new X());"
+          "  g1(new X());"
+          "  f2(new X(), 'z');"
+          "  g2(new X(), 'z');"
+          "};");
+
+      // Compile an optimized version of the functions.
+      i::FLAG_always_opt = true;
+      CompileRun(f1_source);
+      CompileRun(g1_source);
+      CompileRun(f2_source);
+      CompileRun(g2_source);
+      CompileRun("f1(new X());");
+      CompileRun("g1(new X());");
+      CompileRun("f2(new X(), 'z');");
+      CompileRun("g2(new X(), 'z');");
+      if (i_isolate->use_crankshaft()) {
+        CHECK(GetJSFunction(env->Global(), "f1")->IsOptimized());
+        CHECK(GetJSFunction(env->Global(), "g1")->IsOptimized());
+        CHECK(GetJSFunction(env->Global(), "f2")->IsOptimized());
+        CHECK(GetJSFunction(env->Global(), "g2")->IsOptimized());
+      }
+
+      // Call functions and force deoptimization while processing the ics.
+      CompileRun(
+          "deopt = true;"
+          "var result = f1(new X());");
+    }
+    NonIncrementalGC(i_isolate);
+
+    CHECK(!GetJSFunction(env->Global(), "f1")->IsOptimized());
+    CHECK(!GetJSFunction(env->Global(), "g1")->IsOptimized());
+    CHECK(!GetJSFunction(env->Global(), "f2")->IsOptimized());
+    CHECK(!GetJSFunction(env->Global(), "g2")->IsOptimized());
+    CHECK_EQ(1, env->Global()->Get(v8_str("count"))->Int32Value());
+    CHECK_EQ(13, env->Global()->Get(v8_str("result"))->Int32Value());
+  }
+  isolate->Exit();
+  isolate->Dispose();
 }
diff --git a/test/cctest/test-dictionary.cc b/test/cctest/test-dictionary.cc
index 87c58fa..14e5d69 100644
--- a/test/cctest/test-dictionary.cc
+++ b/test/cctest/test-dictionary.cc
@@ -26,15 +26,15 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 #include "src/v8.h"
+#include "test/cctest/cctest.h"
 
 #include "src/api.h"
 #include "src/debug.h"
 #include "src/execution.h"
 #include "src/factory.h"
+#include "src/global-handles.h"
 #include "src/macro-assembler.h"
 #include "src/objects.h"
-#include "src/global-handles.h"
-#include "test/cctest/cctest.h"
 
 using namespace v8::internal;
 
@@ -99,8 +99,8 @@
   for (int i = 0; i < 100; i++) {
     Handle<JSReceiver> key = factory->NewJSArray(7);
     CHECK_EQ(table->Lookup(key), CcTest::heap()->the_hole_value());
-    CHECK_EQ(key->GetIdentityHash(),
-             CcTest::heap()->undefined_value());
+    Object* identity_hash = key->GetIdentityHash();
+    CHECK_EQ(identity_hash, CcTest::heap()->undefined_value());
   }
 }
 
diff --git a/test/cctest/test-disasm-arm64.cc b/test/cctest/test-disasm-arm64.cc
index 346e271..fb01347 100644
--- a/test/cctest/test-disasm-arm64.cc
+++ b/test/cctest/test-disasm-arm64.cc
@@ -27,15 +27,16 @@
 
 #include <stdio.h>
 #include <cstring>
-#include "test/cctest/cctest.h"
 
 #include "src/v8.h"
+#include "test/cctest/cctest.h"
 
 #include "src/macro-assembler.h"
+
 #include "src/arm64/assembler-arm64.h"
-#include "src/arm64/macro-assembler-arm64.h"
 #include "src/arm64/decoder-arm64-inl.h"
 #include "src/arm64/disasm-arm64.h"
+#include "src/arm64/macro-assembler-arm64.h"
 #include "src/arm64/utils-arm64.h"
 
 using namespace v8::internal;
@@ -1601,7 +1602,7 @@
 TEST_(debug) {
   SET_UP();
 
-  ASSERT(kImmExceptionIsDebug == 0xdeb0);
+  DCHECK(kImmExceptionIsDebug == 0xdeb0);
 
   // All debug codes should produce the same instruction, and the debug code
   // can be any uint32_t.
diff --git a/test/cctest/test-disasm-ia32.cc b/test/cctest/test-disasm-ia32.cc
index aa81b4f..49088f6 100644
--- a/test/cctest/test-disasm-ia32.cc
+++ b/test/cctest/test-disasm-ia32.cc
@@ -32,9 +32,9 @@
 #include "src/debug.h"
 #include "src/disasm.h"
 #include "src/disassembler.h"
+#include "src/ic/ic.h"
 #include "src/macro-assembler.h"
 #include "src/serialize.h"
-#include "src/stub-cache.h"
 #include "test/cctest/cctest.h"
 
 using namespace v8::internal;
@@ -168,6 +168,11 @@
 
   __ nop();
   __ idiv(edx);
+  __ idiv(Operand(edx, ecx, times_1, 1));
+  __ idiv(Operand(esp, 12));
+  __ div(edx);
+  __ div(Operand(edx, ecx, times_1, 1));
+  __ div(Operand(esp, 12));
   __ mul(edx);
   __ neg(edx);
   __ not_(edx);
@@ -175,7 +180,9 @@
 
   __ imul(edx, Operand(ebx, ecx, times_4, 10000));
   __ imul(edx, ecx, 12);
+  __ imul(edx, Operand(edx, eax, times_2, 42), 8);
   __ imul(edx, ecx, 1000);
+  __ imul(edx, Operand(ebx, ecx, times_4, 1), 9000);
 
   __ inc(edx);
   __ inc(Operand(ebx, ecx, times_4, 10000));
@@ -197,15 +204,24 @@
   __ sar(edx, 1);
   __ sar(edx, 6);
   __ sar_cl(edx);
+  __ sar(Operand(ebx, ecx, times_4, 10000), 1);
+  __ sar(Operand(ebx, ecx, times_4, 10000), 6);
+  __ sar_cl(Operand(ebx, ecx, times_4, 10000));
   __ sbb(edx, Operand(ebx, ecx, times_4, 10000));
   __ shld(edx, Operand(ebx, ecx, times_4, 10000));
   __ shl(edx, 1);
   __ shl(edx, 6);
   __ shl_cl(edx);
+  __ shl(Operand(ebx, ecx, times_4, 10000), 1);
+  __ shl(Operand(ebx, ecx, times_4, 10000), 6);
+  __ shl_cl(Operand(ebx, ecx, times_4, 10000));
   __ shrd(edx, Operand(ebx, ecx, times_4, 10000));
   __ shr(edx, 1);
   __ shr(edx, 7);
   __ shr_cl(edx);
+  __ shr(Operand(ebx, ecx, times_4, 10000), 1);
+  __ shr(Operand(ebx, ecx, times_4, 10000), 6);
+  __ shr_cl(Operand(ebx, ecx, times_4, 10000));
 
 
   // Immediates
@@ -400,6 +416,7 @@
     __ addsd(xmm1, xmm0);
     __ mulsd(xmm1, xmm0);
     __ subsd(xmm1, xmm0);
+    __ subsd(xmm1, Operand(ebx, ecx, times_4, 10000));
     __ divsd(xmm1, xmm0);
     __ ucomisd(xmm0, xmm1);
     __ cmpltsd(xmm0, xmm1);
@@ -441,6 +458,14 @@
     }
   }
 
+  // xchg.
+  {
+    __ xchg(eax, eax);
+    __ xchg(eax, ebx);
+    __ xchg(ebx, ebx);
+    __ xchg(ebx, Operand(esp, 12));
+  }
+
   // Nop instructions
   for (int i = 0; i < 16; i++) {
     __ Nop(i);
@@ -454,7 +479,8 @@
       desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
   USE(code);
 #ifdef OBJECT_PRINT
-  code->Print();
+  OFStream os(stdout);
+  code->Print(os);
   byte* begin = code->instruction_start();
   byte* end = begin + code->instruction_size();
   disasm::Disassembler::Disassemble(stdout, begin, end);
diff --git a/test/cctest/test-disasm-mips.cc b/test/cctest/test-disasm-mips.cc
index cfd861e..131f413 100644
--- a/test/cctest/test-disasm-mips.cc
+++ b/test/cctest/test-disasm-mips.cc
@@ -110,41 +110,127 @@
   COMPARE(subu(v0, v1, s0),
           "00701023       subu    v0, v1, s0");
 
-  COMPARE(mult(a0, a1),
-          "00850018       mult    a0, a1");
-  COMPARE(mult(t2, t3),
-          "014b0018       mult    t2, t3");
-  COMPARE(mult(v0, v1),
-          "00430018       mult    v0, v1");
+  if (!IsMipsArchVariant(kMips32r6)) {
+    COMPARE(mult(a0, a1),
+            "00850018       mult    a0, a1");
+    COMPARE(mult(t2, t3),
+            "014b0018       mult    t2, t3");
+    COMPARE(mult(v0, v1),
+            "00430018       mult    v0, v1");
 
-  COMPARE(multu(a0, a1),
-          "00850019       multu   a0, a1");
-  COMPARE(multu(t2, t3),
-          "014b0019       multu   t2, t3");
-  COMPARE(multu(v0, v1),
-          "00430019       multu   v0, v1");
+    COMPARE(multu(a0, a1),
+            "00850019       multu   a0, a1");
+    COMPARE(multu(t2, t3),
+            "014b0019       multu   t2, t3");
+    COMPARE(multu(v0, v1),
+            "00430019       multu   v0, v1");
 
-  COMPARE(div(a0, a1),
-          "0085001a       div     a0, a1");
-  COMPARE(div(t2, t3),
-          "014b001a       div     t2, t3");
-  COMPARE(div(v0, v1),
-          "0043001a       div     v0, v1");
+    COMPARE(div(a0, a1),
+            "0085001a       div     a0, a1");
+    COMPARE(div(t2, t3),
+            "014b001a       div     t2, t3");
+    COMPARE(div(v0, v1),
+            "0043001a       div     v0, v1");
 
-  COMPARE(divu(a0, a1),
-          "0085001b       divu    a0, a1");
-  COMPARE(divu(t2, t3),
-          "014b001b       divu    t2, t3");
-  COMPARE(divu(v0, v1),
-          "0043001b       divu    v0, v1");
+    COMPARE(divu(a0, a1),
+            "0085001b       divu    a0, a1");
+    COMPARE(divu(t2, t3),
+            "014b001b       divu    t2, t3");
+    COMPARE(divu(v0, v1),
+            "0043001b       divu    v0, v1");
 
-  if (kArchVariant != kLoongson) {
+    if (!IsMipsArchVariant(kLoongson)) {
+      COMPARE(mul(a0, a1, a2),
+              "70a62002       mul     a0, a1, a2");
+      COMPARE(mul(t2, t3, t4),
+              "716c5002       mul     t2, t3, t4");
+      COMPARE(mul(v0, v1, s0),
+              "70701002       mul     v0, v1, s0");
+    }
+  } else {  // MIPS32r6.
     COMPARE(mul(a0, a1, a2),
-            "70a62002       mul     a0, a1, a2");
-    COMPARE(mul(t2, t3, t4),
-            "716c5002       mul     t2, t3, t4");
-    COMPARE(mul(v0, v1, s0),
-            "70701002       mul     v0, v1, s0");
+            "00a62098       mul    a0, a1, a2");
+    COMPARE(muh(a0, a1, a2),
+            "00a620d8       muh    a0, a1, a2");
+    COMPARE(mul(t1, t2, t3),
+            "014b4898       mul    t1, t2, t3");
+    COMPARE(muh(t1, t2, t3),
+            "014b48d8       muh    t1, t2, t3");
+    COMPARE(mul(v0, v1, a0),
+            "00641098       mul    v0, v1, a0");
+    COMPARE(muh(v0, v1, a0),
+            "006410d8       muh    v0, v1, a0");
+
+    COMPARE(mulu(a0, a1, a2),
+            "00a62099       mulu   a0, a1, a2");
+    COMPARE(muhu(a0, a1, a2),
+            "00a620d9       muhu   a0, a1, a2");
+    COMPARE(mulu(t1, t2, t3),
+            "014b4899       mulu   t1, t2, t3");
+    COMPARE(muhu(t1, t2, t3),
+            "014b48d9       muhu   t1, t2, t3");
+    COMPARE(mulu(v0, v1, a0),
+            "00641099       mulu   v0, v1, a0");
+    COMPARE(muhu(v0, v1, a0),
+            "006410d9       muhu   v0, v1, a0");
+
+    COMPARE(div(a0, a1, a2),
+            "00a6209a       div    a0, a1, a2");
+    COMPARE(mod(a0, a1, a2),
+            "00a620da       mod    a0, a1, a2");
+    COMPARE(div(t1, t2, t3),
+            "014b489a       div    t1, t2, t3");
+    COMPARE(mod(t1, t2, t3),
+            "014b48da       mod    t1, t2, t3");
+    COMPARE(div(v0, v1, a0),
+            "0064109a       div    v0, v1, a0");
+    COMPARE(mod(v0, v1, a0),
+            "006410da       mod    v0, v1, a0");
+
+    COMPARE(divu(a0, a1, a2),
+            "00a6209b       divu   a0, a1, a2");
+    COMPARE(modu(a0, a1, a2),
+            "00a620db       modu   a0, a1, a2");
+    COMPARE(divu(t1, t2, t3),
+            "014b489b       divu   t1, t2, t3");
+    COMPARE(modu(t1, t2, t3),
+            "014b48db       modu   t1, t2, t3");
+    COMPARE(divu(v0, v1, a0),
+            "0064109b       divu   v0, v1, a0");
+    COMPARE(modu(v0, v1, a0),
+            "006410db       modu   v0, v1, a0");
+
+    COMPARE(bovc(a0, a0, static_cast<int16_t>(0)),
+            "20840000       bovc  a0, a0, 0");
+    COMPARE(bovc(a1, a0, static_cast<int16_t>(0)),
+            "20a40000       bovc  a1, a0, 0");
+    COMPARE(bovc(a1, a0, 32767),
+            "20a47fff       bovc  a1, a0, 32767");
+    COMPARE(bovc(a1, a0, -32768),
+            "20a48000       bovc  a1, a0, -32768");
+
+    COMPARE(bnvc(a0, a0, static_cast<int16_t>(0)),
+            "60840000       bnvc  a0, a0, 0");
+    COMPARE(bnvc(a1, a0, static_cast<int16_t>(0)),
+            "60a40000       bnvc  a1, a0, 0");
+    COMPARE(bnvc(a1, a0, 32767),
+            "60a47fff       bnvc  a1, a0, 32767");
+    COMPARE(bnvc(a1, a0, -32768),
+            "60a48000       bnvc  a1, a0, -32768");
+
+    COMPARE(beqzc(a0, 0),
+            "d8800000       beqzc   a0, 0x0");
+    COMPARE(beqzc(a0, 0xfffff),                   // 0x0fffff ==  1048575.
+            "d88fffff       beqzc   a0, 0xfffff");
+    COMPARE(beqzc(a0, 0x100000),                  // 0x100000 == -1048576.
+            "d8900000       beqzc   a0, 0x100000");
+
+    COMPARE(bnezc(a0, 0),
+            "f8800000       bnezc   a0, 0x0");
+    COMPARE(bnezc(a0, 0xfffff),                   // 0x0fffff ==  1048575.
+            "f88fffff       bnezc   a0, 0xfffff");
+    COMPARE(bnezc(a0, 0x100000),                  // 0x100000 == -1048576.
+            "f8900000       bnezc   a0, 0x100000");
   }
 
   COMPARE(addiu(a0, a1, 0x0),
@@ -266,7 +352,7 @@
   COMPARE(srav(v0, v1, fp),
           "03c31007       srav    v0, v1, fp");
 
-  if (kArchVariant == kMips32r2) {
+  if (IsMipsArchVariant(kMips32r2)) {
     COMPARE(rotr(a0, a1, 0),
             "00252002       rotr    a0, a1, 0");
     COMPARE(rotr(s0, s1, 8),
@@ -369,7 +455,7 @@
   COMPARE(sltiu(v0, v1, -1),
           "2c62ffff       sltiu   v0, v1, -1");
 
-  if (kArchVariant != kLoongson) {
+  if (!IsMipsArchVariant(kLoongson)) {
     COMPARE(movz(a0, a1, a2),
             "00a6200a       movz    a0, a1, a2");
     COMPARE(movz(s0, s1, s2),
@@ -404,15 +490,24 @@
     COMPARE(movf(v0, v1, 6),
             "00781001       movf    v0, v1, 6");
 
-    COMPARE(clz(a0, a1),
-            "70a42020       clz     a0, a1");
-    COMPARE(clz(s6, s7),
-            "72f6b020       clz     s6, s7");
-    COMPARE(clz(v0, v1),
-            "70621020       clz     v0, v1");
+    if (IsMipsArchVariant(kMips32r6)) {
+      COMPARE(clz(a0, a1),
+              "00a02050       clz     a0, a1");
+      COMPARE(clz(s6, s7),
+              "02e0b050       clz     s6, s7");
+      COMPARE(clz(v0, v1),
+              "00601050       clz     v0, v1");
+    } else {
+      COMPARE(clz(a0, a1),
+              "70a42020       clz     a0, a1");
+      COMPARE(clz(s6, s7),
+              "72f6b020       clz     s6, s7");
+      COMPARE(clz(v0, v1),
+              "70621020       clz     v0, v1");
+    }
   }
 
-  if (kArchVariant == kMips32r2) {
+  if (IsMipsArchVariant(kMips32r2)) {
     COMPARE(ins_(a0, a1, 31, 1),
             "7ca4ffc4       ins     a0, a1, 31, 1");
     COMPARE(ins_(s6, s7, 30, 2),
diff --git a/test/cctest/test-disasm-mips64.cc b/test/cctest/test-disasm-mips64.cc
new file mode 100644
index 0000000..d682d33
--- /dev/null
+++ b/test/cctest/test-disasm-mips64.cc
@@ -0,0 +1,674 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+
+#include <stdlib.h>
+
+#include "src/v8.h"
+
+#include "src/debug.h"
+#include "src/disasm.h"
+#include "src/disassembler.h"
+#include "src/macro-assembler.h"
+#include "src/serialize.h"
+#include "test/cctest/cctest.h"
+
+using namespace v8::internal;
+
+
+bool DisassembleAndCompare(byte* pc, const char* compare_string) {
+  disasm::NameConverter converter;
+  disasm::Disassembler disasm(converter);
+  EmbeddedVector<char, 128> disasm_buffer;
+
+  disasm.InstructionDecode(disasm_buffer, pc);
+
+  if (strcmp(compare_string, disasm_buffer.start()) != 0) {
+    fprintf(stderr,
+            "expected: \n"
+            "%s\n"
+            "disassembled: \n"
+            "%s\n\n",
+            compare_string, disasm_buffer.start());
+    return false;
+  }
+  return true;
+}
+
+
+// Set up V8 to a state where we can at least run the assembler and
+// disassembler. Declare the variables and allocate the data structures used
+// in the rest of the macros.
+#define SET_UP()                                          \
+  CcTest::InitializeVM();                                 \
+  Isolate* isolate = CcTest::i_isolate();                  \
+  HandleScope scope(isolate);                             \
+  byte *buffer = reinterpret_cast<byte*>(malloc(4*1024)); \
+  Assembler assm(isolate, buffer, 4*1024);                \
+  bool failure = false;
+
+
+// This macro assembles one instruction using the preallocated assembler and
+// disassembles the generated instruction, comparing the output to the expected
+// value. If the comparison fails an error message is printed, but the test
+// continues to run until the end.
+#define COMPARE(asm_, compare_string) \
+  { \
+    int pc_offset = assm.pc_offset(); \
+    byte *progcounter = &buffer[pc_offset]; \
+    assm.asm_; \
+    if (!DisassembleAndCompare(progcounter, compare_string)) failure = true; \
+  }
+
+
+// Verify that all invocations of the COMPARE macro passed successfully.
+// Exit with a failure if at least one of the tests failed.
+#define VERIFY_RUN() \
+if (failure) { \
+    V8_Fatal(__FILE__, __LINE__, "MIPS Disassembler tests failed.\n"); \
+  }
+
+
+TEST(Type0) {
+  SET_UP();
+
+  COMPARE(addu(a0, a1, a2),
+          "00a62021       addu    a0, a1, a2");
+  COMPARE(daddu(a0, a1, a2),
+          "00a6202d       daddu   a0, a1, a2");
+  COMPARE(addu(a6, a7, t0),
+          "016c5021       addu    a6, a7, t0");
+  COMPARE(daddu(a6, a7, t0),
+          "016c502d       daddu   a6, a7, t0");
+  COMPARE(addu(v0, v1, s0),
+          "00701021       addu    v0, v1, s0");
+  COMPARE(daddu(v0, v1, s0),
+          "0070102d       daddu   v0, v1, s0");
+
+  COMPARE(subu(a0, a1, a2),
+          "00a62023       subu    a0, a1, a2");
+  COMPARE(dsubu(a0, a1, a2),
+          "00a6202f       dsubu   a0, a1, a2");
+  COMPARE(subu(a6, a7, t0),
+          "016c5023       subu    a6, a7, t0");
+  COMPARE(dsubu(a6, a7, t0),
+          "016c502f       dsubu   a6, a7, t0");
+  COMPARE(subu(v0, v1, s0),
+          "00701023       subu    v0, v1, s0");
+  COMPARE(dsubu(v0, v1, s0),
+          "0070102f       dsubu   v0, v1, s0");
+
+  if (kArchVariant != kMips64r6) {
+    COMPARE(mult(a0, a1),
+            "00850018       mult    a0, a1");
+    COMPARE(dmult(a0, a1),
+            "0085001c       dmult   a0, a1");
+    COMPARE(mult(a6, a7),
+            "014b0018       mult    a6, a7");
+    COMPARE(dmult(a6, a7),
+            "014b001c       dmult   a6, a7");
+    COMPARE(mult(v0, v1),
+            "00430018       mult    v0, v1");
+    COMPARE(dmult(v0, v1),
+            "0043001c       dmult   v0, v1");
+
+    COMPARE(multu(a0, a1),
+            "00850019       multu   a0, a1");
+    COMPARE(dmultu(a0, a1),
+            "0085001d       dmultu  a0, a1");
+    COMPARE(multu(a6, a7),
+            "014b0019       multu   a6, a7");
+    COMPARE(dmultu(a6, a7),
+            "014b001d       dmultu  a6, a7");
+    COMPARE(multu(v0, v1),
+            "00430019       multu   v0, v1");
+    COMPARE(dmultu(v0, v1),
+            "0043001d       dmultu  v0, v1");
+
+    COMPARE(div(a0, a1),
+            "0085001a       div     a0, a1");
+    COMPARE(div(a6, a7),
+            "014b001a       div     a6, a7");
+    COMPARE(div(v0, v1),
+            "0043001a       div     v0, v1");
+    COMPARE(ddiv(a0, a1),
+            "0085001e       ddiv    a0, a1");
+    COMPARE(ddiv(a6, a7),
+            "014b001e       ddiv    a6, a7");
+    COMPARE(ddiv(v0, v1),
+            "0043001e       ddiv    v0, v1");
+
+    COMPARE(divu(a0, a1),
+            "0085001b       divu    a0, a1");
+    COMPARE(divu(a6, a7),
+            "014b001b       divu    a6, a7");
+    COMPARE(divu(v0, v1),
+            "0043001b       divu    v0, v1");
+    COMPARE(ddivu(a0, a1),
+            "0085001f       ddivu   a0, a1");
+    COMPARE(ddivu(a6, a7),
+            "014b001f       ddivu   a6, a7");
+    COMPARE(ddivu(v0, v1),
+            "0043001f       ddivu   v0, v1");
+    COMPARE(mul(a0, a1, a2),
+            "70a62002       mul     a0, a1, a2");
+    COMPARE(mul(a6, a7, t0),
+            "716c5002       mul     a6, a7, t0");
+    COMPARE(mul(v0, v1, s0),
+            "70701002       mul     v0, v1, s0");
+  } else {  // MIPS64r6.
+    COMPARE(mul(a0, a1, a2),
+            "00a62098       mul    a0, a1, a2");
+    COMPARE(muh(a0, a1, a2),
+            "00a620d8       muh    a0, a1, a2");
+    COMPARE(dmul(a0, a1, a2),
+            "00a6209c       dmul   a0, a1, a2");
+    COMPARE(dmuh(a0, a1, a2),
+            "00a620dc       dmuh   a0, a1, a2");
+    COMPARE(mul(a5, a6, a7),
+            "014b4898       mul    a5, a6, a7");
+    COMPARE(muh(a5, a6, a7),
+            "014b48d8       muh    a5, a6, a7");
+    COMPARE(dmul(a5, a6, a7),
+            "014b489c       dmul   a5, a6, a7");
+    COMPARE(dmuh(a5, a6, a7),
+            "014b48dc       dmuh   a5, a6, a7");
+    COMPARE(mul(v0, v1, a0),
+            "00641098       mul    v0, v1, a0");
+    COMPARE(muh(v0, v1, a0),
+            "006410d8       muh    v0, v1, a0");
+    COMPARE(dmul(v0, v1, a0),
+            "0064109c       dmul   v0, v1, a0");
+    COMPARE(dmuh(v0, v1, a0),
+            "006410dc       dmuh   v0, v1, a0");
+
+    COMPARE(mulu(a0, a1, a2),
+            "00a62099       mulu   a0, a1, a2");
+    COMPARE(muhu(a0, a1, a2),
+            "00a620d9       muhu   a0, a1, a2");
+    COMPARE(dmulu(a0, a1, a2),
+            "00a6209d       dmulu  a0, a1, a2");
+    COMPARE(dmuhu(a0, a1, a2),
+            "00a620dd       dmuhu  a0, a1, a2");
+    COMPARE(mulu(a5, a6, a7),
+            "014b4899       mulu   a5, a6, a7");
+    COMPARE(muhu(a5, a6, a7),
+            "014b48d9       muhu   a5, a6, a7");
+    COMPARE(dmulu(a5, a6, a7),
+            "014b489d       dmulu  a5, a6, a7");
+    COMPARE(dmuhu(a5, a6, a7),
+            "014b48dd       dmuhu  a5, a6, a7");
+    COMPARE(mulu(v0, v1, a0),
+            "00641099       mulu   v0, v1, a0");
+    COMPARE(muhu(v0, v1, a0),
+            "006410d9       muhu   v0, v1, a0");
+    COMPARE(dmulu(v0, v1, a0),
+            "0064109d       dmulu  v0, v1, a0");
+    COMPARE(dmuhu(v0, v1, a0),
+            "006410dd       dmuhu  v0, v1, a0");
+
+    COMPARE(div(a0, a1, a2),
+            "00a6209a       div    a0, a1, a2");
+    COMPARE(mod(a0, a1, a2),
+            "00a620da       mod    a0, a1, a2");
+    COMPARE(ddiv(a0, a1, a2),
+            "00a6209e       ddiv   a0, a1, a2");
+    COMPARE(dmod(a0, a1, a2),
+            "00a620de       dmod   a0, a1, a2");
+    COMPARE(div(a5, a6, a7),
+            "014b489a       div    a5, a6, a7");
+    COMPARE(mod(a5, a6, a7),
+            "014b48da       mod    a5, a6, a7");
+    COMPARE(ddiv(a5, a6, a7),
+            "014b489e       ddiv   a5, a6, a7");
+    COMPARE(dmod(a5, a6, a7),
+            "014b48de       dmod   a5, a6, a7");
+    COMPARE(div(v0, v1, a0),
+            "0064109a       div    v0, v1, a0");
+    COMPARE(mod(v0, v1, a0),
+            "006410da       mod    v0, v1, a0");
+    COMPARE(ddiv(v0, v1, a0),
+            "0064109e       ddiv   v0, v1, a0");
+    COMPARE(dmod(v0, v1, a0),
+            "006410de       dmod   v0, v1, a0");
+
+    COMPARE(divu(a0, a1, a2),
+            "00a6209b       divu   a0, a1, a2");
+    COMPARE(modu(a0, a1, a2),
+            "00a620db       modu   a0, a1, a2");
+    COMPARE(ddivu(a0, a1, a2),
+            "00a6209f       ddivu  a0, a1, a2");
+    COMPARE(dmodu(a0, a1, a2),
+            "00a620df       dmodu  a0, a1, a2");
+    COMPARE(divu(a5, a6, a7),
+            "014b489b       divu   a5, a6, a7");
+    COMPARE(modu(a5, a6, a7),
+            "014b48db       modu   a5, a6, a7");
+    COMPARE(ddivu(a5, a6, a7),
+            "014b489f       ddivu  a5, a6, a7");
+    COMPARE(dmodu(a5, a6, a7),
+            "014b48df       dmodu  a5, a6, a7");
+    COMPARE(divu(v0, v1, a0),
+            "0064109b       divu   v0, v1, a0");
+    COMPARE(modu(v0, v1, a0),
+            "006410db       modu   v0, v1, a0");
+    COMPARE(ddivu(v0, v1, a0),
+            "0064109f       ddivu  v0, v1, a0");
+    COMPARE(dmodu(v0, v1, a0),
+            "006410df       dmodu  v0, v1, a0");
+
+    COMPARE(bovc(a0, a0, static_cast<int16_t>(0)),
+            "20840000       bovc  a0, a0, 0");
+    COMPARE(bovc(a1, a0, static_cast<int16_t>(0)),
+            "20a40000       bovc  a1, a0, 0");
+    COMPARE(bovc(a1, a0, 32767),
+            "20a47fff       bovc  a1, a0, 32767");
+    COMPARE(bovc(a1, a0, -32768),
+            "20a48000       bovc  a1, a0, -32768");
+
+    COMPARE(bnvc(a0, a0, static_cast<int16_t>(0)),
+            "60840000       bnvc  a0, a0, 0");
+    COMPARE(bnvc(a1, a0, static_cast<int16_t>(0)),
+            "60a40000       bnvc  a1, a0, 0");
+    COMPARE(bnvc(a1, a0, 32767),
+            "60a47fff       bnvc  a1, a0, 32767");
+    COMPARE(bnvc(a1, a0, -32768),
+            "60a48000       bnvc  a1, a0, -32768");
+
+    COMPARE(beqzc(a0, 0),
+            "d8800000       beqzc   a0, 0x0");
+    COMPARE(beqzc(a0, 0xfffff),                   // 0x0fffff ==  1048575.
+            "d88fffff       beqzc   a0, 0xfffff");
+    COMPARE(beqzc(a0, 0x100000),                  // 0x100000 == -1048576.
+            "d8900000       beqzc   a0, 0x100000");
+
+    COMPARE(bnezc(a0, 0),
+            "f8800000       bnezc   a0, 0x0");
+    COMPARE(bnezc(a0, 0xfffff),                   // 0x0fffff ==  1048575.
+            "f88fffff       bnezc   a0, 0xfffff");
+    COMPARE(bnezc(a0, 0x100000),                  // 0x100000 == -1048576.
+            "f8900000       bnezc   a0, 0x100000");
+  }
+
+  COMPARE(addiu(a0, a1, 0x0),
+          "24a40000       addiu   a0, a1, 0");
+  COMPARE(addiu(s0, s1, 32767),
+          "26307fff       addiu   s0, s1, 32767");
+  COMPARE(addiu(a6, a7, -32768),
+          "256a8000       addiu   a6, a7, -32768");
+  COMPARE(addiu(v0, v1, -1),
+          "2462ffff       addiu   v0, v1, -1");
+  COMPARE(daddiu(a0, a1, 0x0),
+          "64a40000       daddiu  a0, a1, 0");
+  COMPARE(daddiu(s0, s1, 32767),
+          "66307fff       daddiu  s0, s1, 32767");
+  COMPARE(daddiu(a6, a7, -32768),
+          "656a8000       daddiu  a6, a7, -32768");
+  COMPARE(daddiu(v0, v1, -1),
+          "6462ffff       daddiu  v0, v1, -1");
+
+  COMPARE(and_(a0, a1, a2),
+          "00a62024       and     a0, a1, a2");
+  COMPARE(and_(s0, s1, s2),
+          "02328024       and     s0, s1, s2");
+  COMPARE(and_(a6, a7, t0),
+          "016c5024       and     a6, a7, t0");
+  COMPARE(and_(v0, v1, a2),
+          "00661024       and     v0, v1, a2");
+
+  COMPARE(or_(a0, a1, a2),
+          "00a62025       or      a0, a1, a2");
+  COMPARE(or_(s0, s1, s2),
+          "02328025       or      s0, s1, s2");
+  COMPARE(or_(a6, a7, t0),
+          "016c5025       or      a6, a7, t0");
+  COMPARE(or_(v0, v1, a2),
+          "00661025       or      v0, v1, a2");
+
+  COMPARE(xor_(a0, a1, a2),
+          "00a62026       xor     a0, a1, a2");
+  COMPARE(xor_(s0, s1, s2),
+          "02328026       xor     s0, s1, s2");
+  COMPARE(xor_(a6, a7, t0),
+          "016c5026       xor     a6, a7, t0");
+  COMPARE(xor_(v0, v1, a2),
+          "00661026       xor     v0, v1, a2");
+
+  COMPARE(nor(a0, a1, a2),
+          "00a62027       nor     a0, a1, a2");
+  COMPARE(nor(s0, s1, s2),
+          "02328027       nor     s0, s1, s2");
+  COMPARE(nor(a6, a7, t0),
+          "016c5027       nor     a6, a7, t0");
+  COMPARE(nor(v0, v1, a2),
+          "00661027       nor     v0, v1, a2");
+
+  COMPARE(andi(a0, a1, 0x1),
+          "30a40001       andi    a0, a1, 0x1");
+  COMPARE(andi(v0, v1, 0xffff),
+          "3062ffff       andi    v0, v1, 0xffff");
+
+  COMPARE(ori(a0, a1, 0x1),
+          "34a40001       ori     a0, a1, 0x1");
+  COMPARE(ori(v0, v1, 0xffff),
+          "3462ffff       ori     v0, v1, 0xffff");
+
+  COMPARE(xori(a0, a1, 0x1),
+          "38a40001       xori    a0, a1, 0x1");
+  COMPARE(xori(v0, v1, 0xffff),
+          "3862ffff       xori    v0, v1, 0xffff");
+
+  COMPARE(lui(a0, 0x1),
+          "3c040001       lui     a0, 0x1");
+  COMPARE(lui(v0, 0xffff),
+          "3c02ffff       lui     v0, 0xffff");
+
+  COMPARE(sll(a0, a1, 0),
+          "00052000       sll     a0, a1, 0");
+  COMPARE(sll(s0, s1, 8),
+          "00118200       sll     s0, s1, 8");
+  COMPARE(sll(a6, a7, 24),
+          "000b5600       sll     a6, a7, 24");
+  COMPARE(sll(v0, v1, 31),
+          "000317c0       sll     v0, v1, 31");
+  COMPARE(dsll(a0, a1, 0),
+          "00052038       dsll    a0, a1, 0");
+  COMPARE(dsll(s0, s1, 8),
+          "00118238       dsll    s0, s1, 8");
+  COMPARE(dsll(a6, a7, 24),
+          "000b5638       dsll    a6, a7, 24");
+  COMPARE(dsll(v0, v1, 31),
+          "000317f8       dsll    v0, v1, 31");
+
+  COMPARE(sllv(a0, a1, a2),
+          "00c52004       sllv    a0, a1, a2");
+  COMPARE(sllv(s0, s1, s2),
+          "02518004       sllv    s0, s1, s2");
+  COMPARE(sllv(a6, a7, t0),
+          "018b5004       sllv    a6, a7, t0");
+  COMPARE(sllv(v0, v1, fp),
+          "03c31004       sllv    v0, v1, fp");
+  COMPARE(dsllv(a0, a1, a2),
+          "00c52014       dsllv   a0, a1, a2");
+  COMPARE(dsllv(s0, s1, s2),
+          "02518014       dsllv   s0, s1, s2");
+  COMPARE(dsllv(a6, a7, t0),
+          "018b5014       dsllv   a6, a7, t0");
+  COMPARE(dsllv(v0, v1, fp),
+          "03c31014       dsllv   v0, v1, fp");
+
+  COMPARE(srl(a0, a1, 0),
+          "00052002       srl     a0, a1, 0");
+  COMPARE(srl(s0, s1, 8),
+          "00118202       srl     s0, s1, 8");
+  COMPARE(srl(a6, a7, 24),
+          "000b5602       srl     a6, a7, 24");
+  COMPARE(srl(v0, v1, 31),
+          "000317c2       srl     v0, v1, 31");
+  COMPARE(dsrl(a0, a1, 0),
+          "0005203a       dsrl    a0, a1, 0");
+  COMPARE(dsrl(s0, s1, 8),
+          "0011823a       dsrl    s0, s1, 8");
+  COMPARE(dsrl(a6, a7, 24),
+          "000b563a       dsrl    a6, a7, 24");
+  COMPARE(dsrl(v0, v1, 31),
+          "000317fa       dsrl    v0, v1, 31");
+
+  COMPARE(srlv(a0, a1, a2),
+          "00c52006       srlv    a0, a1, a2");
+  COMPARE(srlv(s0, s1, s2),
+          "02518006       srlv    s0, s1, s2");
+  COMPARE(srlv(a6, a7, t0),
+          "018b5006       srlv    a6, a7, t0");
+  COMPARE(srlv(v0, v1, fp),
+          "03c31006       srlv    v0, v1, fp");
+  COMPARE(dsrlv(a0, a1, a2),
+          "00c52016       dsrlv   a0, a1, a2");
+  COMPARE(dsrlv(s0, s1, s2),
+          "02518016       dsrlv   s0, s1, s2");
+  COMPARE(dsrlv(a6, a7, t0),
+          "018b5016       dsrlv   a6, a7, t0");
+  COMPARE(dsrlv(v0, v1, fp),
+          "03c31016       dsrlv   v0, v1, fp");
+
+  COMPARE(sra(a0, a1, 0),
+          "00052003       sra     a0, a1, 0");
+  COMPARE(sra(s0, s1, 8),
+          "00118203       sra     s0, s1, 8");
+  COMPARE(sra(a6, a7, 24),
+          "000b5603       sra     a6, a7, 24");
+  COMPARE(sra(v0, v1, 31),
+          "000317c3       sra     v0, v1, 31");
+  COMPARE(dsra(a0, a1, 0),
+          "0005203b       dsra    a0, a1, 0");
+  COMPARE(dsra(s0, s1, 8),
+          "0011823b       dsra    s0, s1, 8");
+  COMPARE(dsra(a6, a7, 24),
+          "000b563b       dsra    a6, a7, 24");
+  COMPARE(dsra(v0, v1, 31),
+          "000317fb       dsra    v0, v1, 31");
+
+  COMPARE(srav(a0, a1, a2),
+          "00c52007       srav    a0, a1, a2");
+  COMPARE(srav(s0, s1, s2),
+          "02518007       srav    s0, s1, s2");
+  COMPARE(srav(a6, a7, t0),
+          "018b5007       srav    a6, a7, t0");
+  COMPARE(srav(v0, v1, fp),
+          "03c31007       srav    v0, v1, fp");
+  COMPARE(dsrav(a0, a1, a2),
+          "00c52017       dsrav   a0, a1, a2");
+  COMPARE(dsrav(s0, s1, s2),
+          "02518017       dsrav   s0, s1, s2");
+  COMPARE(dsrav(a6, a7, t0),
+          "018b5017       dsrav   a6, a7, t0");
+  COMPARE(dsrav(v0, v1, fp),
+          "03c31017       dsrav   v0, v1, fp");
+
+  if (kArchVariant == kMips64r2) {
+    COMPARE(rotr(a0, a1, 0),
+            "00252002       rotr    a0, a1, 0");
+    COMPARE(rotr(s0, s1, 8),
+            "00318202       rotr    s0, s1, 8");
+    COMPARE(rotr(a6, a7, 24),
+            "002b5602       rotr    a6, a7, 24");
+    COMPARE(rotr(v0, v1, 31),
+            "002317c2       rotr    v0, v1, 31");
+    COMPARE(drotr(a0, a1, 0),
+            "0025203a       drotr   a0, a1, 0");
+    COMPARE(drotr(s0, s1, 8),
+            "0031823a       drotr   s0, s1, 8");
+    COMPARE(drotr(a6, a7, 24),
+            "002b563a       drotr   a6, a7, 24");
+    COMPARE(drotr(v0, v1, 31),
+            "002317fa       drotr   v0, v1, 31");
+
+    COMPARE(rotrv(a0, a1, a2),
+            "00c52046       rotrv   a0, a1, a2");
+    COMPARE(rotrv(s0, s1, s2),
+            "02518046       rotrv   s0, s1, s2");
+    COMPARE(rotrv(a6, a7, t0),
+            "018b5046       rotrv   a6, a7, t0");
+    COMPARE(rotrv(v0, v1, fp),
+            "03c31046       rotrv   v0, v1, fp");
+    COMPARE(drotrv(a0, a1, a2),
+            "00c52056       drotrv  a0, a1, a2");
+    COMPARE(drotrv(s0, s1, s2),
+            "02518056       drotrv  s0, s1, s2");
+    COMPARE(drotrv(a6, a7, t0),
+            "018b5056       drotrv  a6, a7, t0");
+    COMPARE(drotrv(v0, v1, fp),
+            "03c31056       drotrv  v0, v1, fp");
+  }
+
+  COMPARE(break_(0),
+          "0000000d       break, code: 0x00000 (0)");
+  COMPARE(break_(261120),
+          "00ff000d       break, code: 0x3fc00 (261120)");
+  COMPARE(break_(1047552),
+          "03ff000d       break, code: 0xffc00 (1047552)");
+
+  COMPARE(tge(a0, a1, 0),
+          "00850030       tge     a0, a1, code: 0x000");
+  COMPARE(tge(s0, s1, 1023),
+          "0211fff0       tge     s0, s1, code: 0x3ff");
+  COMPARE(tgeu(a0, a1, 0),
+          "00850031       tgeu    a0, a1, code: 0x000");
+  COMPARE(tgeu(s0, s1, 1023),
+          "0211fff1       tgeu    s0, s1, code: 0x3ff");
+  COMPARE(tlt(a0, a1, 0),
+          "00850032       tlt     a0, a1, code: 0x000");
+  COMPARE(tlt(s0, s1, 1023),
+          "0211fff2       tlt     s0, s1, code: 0x3ff");
+  COMPARE(tltu(a0, a1, 0),
+          "00850033       tltu    a0, a1, code: 0x000");
+  COMPARE(tltu(s0, s1, 1023),
+          "0211fff3       tltu    s0, s1, code: 0x3ff");
+  COMPARE(teq(a0, a1, 0),
+          "00850034       teq     a0, a1, code: 0x000");
+  COMPARE(teq(s0, s1, 1023),
+          "0211fff4       teq     s0, s1, code: 0x3ff");
+  COMPARE(tne(a0, a1, 0),
+          "00850036       tne     a0, a1, code: 0x000");
+  COMPARE(tne(s0, s1, 1023),
+          "0211fff6       tne     s0, s1, code: 0x3ff");
+
+  COMPARE(mfhi(a0),
+          "00002010       mfhi    a0");
+  COMPARE(mfhi(s2),
+          "00009010       mfhi    s2");
+  COMPARE(mfhi(t0),
+          "00006010       mfhi    t0");
+  COMPARE(mfhi(v1),
+          "00001810       mfhi    v1");
+  COMPARE(mflo(a0),
+          "00002012       mflo    a0");
+  COMPARE(mflo(s2),
+          "00009012       mflo    s2");
+  COMPARE(mflo(t0),
+          "00006012       mflo    t0");
+  COMPARE(mflo(v1),
+          "00001812       mflo    v1");
+
+  COMPARE(slt(a0, a1, a2),
+          "00a6202a       slt     a0, a1, a2");
+  COMPARE(slt(s0, s1, s2),
+          "0232802a       slt     s0, s1, s2");
+  COMPARE(slt(a6, a7, t0),
+          "016c502a       slt     a6, a7, t0");
+  COMPARE(slt(v0, v1, a2),
+          "0066102a       slt     v0, v1, a2");
+  COMPARE(sltu(a0, a1, a2),
+          "00a6202b       sltu    a0, a1, a2");
+  COMPARE(sltu(s0, s1, s2),
+          "0232802b       sltu    s0, s1, s2");
+  COMPARE(sltu(a6, a7, t0),
+          "016c502b       sltu    a6, a7, t0");
+  COMPARE(sltu(v0, v1, a2),
+          "0066102b       sltu    v0, v1, a2");
+
+  COMPARE(slti(a0, a1, 0),
+          "28a40000       slti    a0, a1, 0");
+  COMPARE(slti(s0, s1, 32767),
+          "2a307fff       slti    s0, s1, 32767");
+  COMPARE(slti(a6, a7, -32768),
+          "296a8000       slti    a6, a7, -32768");
+  COMPARE(slti(v0, v1, -1),
+          "2862ffff       slti    v0, v1, -1");
+  COMPARE(sltiu(a0, a1, 0),
+          "2ca40000       sltiu   a0, a1, 0");
+  COMPARE(sltiu(s0, s1, 32767),
+          "2e307fff       sltiu   s0, s1, 32767");
+  COMPARE(sltiu(a6, a7, -32768),
+          "2d6a8000       sltiu   a6, a7, -32768");
+  COMPARE(sltiu(v0, v1, -1),
+          "2c62ffff       sltiu   v0, v1, -1");
+  COMPARE(movz(a0, a1, a2),
+          "00a6200a       movz    a0, a1, a2");
+  COMPARE(movz(s0, s1, s2),
+          "0232800a       movz    s0, s1, s2");
+  COMPARE(movz(a6, a7, t0),
+          "016c500a       movz    a6, a7, t0");
+  COMPARE(movz(v0, v1, a2),
+          "0066100a       movz    v0, v1, a2");
+  COMPARE(movn(a0, a1, a2),
+          "00a6200b       movn    a0, a1, a2");
+  COMPARE(movn(s0, s1, s2),
+          "0232800b       movn    s0, s1, s2");
+  COMPARE(movn(a6, a7, t0),
+          "016c500b       movn    a6, a7, t0");
+  COMPARE(movn(v0, v1, a2),
+          "0066100b       movn    v0, v1, a2");
+
+  COMPARE(movt(a0, a1, 1),
+          "00a52001       movt    a0, a1, 1");
+  COMPARE(movt(s0, s1, 2),
+          "02298001       movt    s0, s1, 2");
+  COMPARE(movt(a6, a7, 3),
+          "016d5001       movt    a6, a7, 3");
+  COMPARE(movt(v0, v1, 7),
+          "007d1001       movt    v0, v1, 7");
+  COMPARE(movf(a0, a1, 0),
+          "00a02001       movf    a0, a1, 0");
+  COMPARE(movf(s0, s1, 4),
+          "02308001       movf    s0, s1, 4");
+  COMPARE(movf(a6, a7, 5),
+          "01745001       movf    a6, a7, 5");
+  COMPARE(movf(v0, v1, 6),
+          "00781001       movf    v0, v1, 6");
+
+  if (kArchVariant == kMips64r6) {
+    COMPARE(clz(a0, a1),
+            "00a02050       clz     a0, a1");
+    COMPARE(clz(s6, s7),
+            "02e0b050       clz     s6, s7");
+    COMPARE(clz(v0, v1),
+            "00601050       clz     v0, v1");
+  } else {
+    COMPARE(clz(a0, a1),
+            "70a42020       clz     a0, a1");
+    COMPARE(clz(s6, s7),
+            "72f6b020       clz     s6, s7");
+    COMPARE(clz(v0, v1),
+            "70621020       clz     v0, v1");
+  }
+
+  COMPARE(ins_(a0, a1, 31, 1),
+          "7ca4ffc4       ins     a0, a1, 31, 1");
+  COMPARE(ins_(s6, s7, 30, 2),
+          "7ef6ff84       ins     s6, s7, 30, 2");
+  COMPARE(ins_(v0, v1, 0, 32),
+          "7c62f804       ins     v0, v1, 0, 32");
+  COMPARE(ext_(a0, a1, 31, 1),
+          "7ca407c0       ext     a0, a1, 31, 1");
+  COMPARE(ext_(s6, s7, 30, 2),
+          "7ef60f80       ext     s6, s7, 30, 2");
+  COMPARE(ext_(v0, v1, 0, 32),
+          "7c62f800       ext     v0, v1, 0, 32");
+
+  VERIFY_RUN();
+}
diff --git a/test/cctest/test-disasm-x64.cc b/test/cctest/test-disasm-x64.cc
index b537367..e756ce2 100644
--- a/test/cctest/test-disasm-x64.cc
+++ b/test/cctest/test-disasm-x64.cc
@@ -32,9 +32,9 @@
 #include "src/debug.h"
 #include "src/disasm.h"
 #include "src/disassembler.h"
+#include "src/ic/ic.h"
 #include "src/macro-assembler.h"
 #include "src/serialize.h"
-#include "src/stub-cache.h"
 #include "test/cctest/cctest.h"
 
 using namespace v8::internal;
@@ -378,6 +378,7 @@
     __ cvttsd2si(rdx, Operand(rbx, rcx, times_4, 10000));
     __ cvttsd2si(rdx, xmm1);
     __ cvttsd2siq(rdx, xmm1);
+    __ cvttsd2siq(rdx, Operand(rbx, rcx, times_4, 10000));
     __ movsd(xmm1, Operand(rbx, rcx, times_4, 10000));
     __ movsd(Operand(rbx, rcx, times_4, 10000), xmm1);
     // 128 bit move instructions.
@@ -420,6 +421,14 @@
     }
   }
 
+  // xchg.
+  {
+    __ xchgq(rax, rax);
+    __ xchgq(rax, rbx);
+    __ xchgq(rbx, rbx);
+    __ xchgq(rbx, Operand(rsp, 12));
+  }
+
   // Nop instructions
   for (int i = 0; i < 16; i++) {
     __ Nop(i);
@@ -433,7 +442,8 @@
       desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
   USE(code);
 #ifdef OBJECT_PRINT
-  code->Print();
+  OFStream os(stdout);
+  code->Print(os);
   byte* begin = code->instruction_start();
   byte* end = begin + code->instruction_size();
   disasm::Disassembler::Disassemble(stdout, begin, end);
diff --git a/test/cctest/test-disasm-x87.cc b/test/cctest/test-disasm-x87.cc
index 444090a..6cd33e5 100644
--- a/test/cctest/test-disasm-x87.cc
+++ b/test/cctest/test-disasm-x87.cc
@@ -32,9 +32,9 @@
 #include "src/debug.h"
 #include "src/disasm.h"
 #include "src/disassembler.h"
+#include "src/ic/ic.h"
 #include "src/macro-assembler.h"
 #include "src/serialize.h"
-#include "src/stub-cache.h"
 #include "test/cctest/cctest.h"
 
 using namespace v8::internal;
@@ -168,6 +168,11 @@
 
   __ nop();
   __ idiv(edx);
+  __ idiv(Operand(edx, ecx, times_1, 1));
+  __ idiv(Operand(esp, 12));
+  __ div(edx);
+  __ div(Operand(edx, ecx, times_1, 1));
+  __ div(Operand(esp, 12));
   __ mul(edx);
   __ neg(edx);
   __ not_(edx);
@@ -175,7 +180,9 @@
 
   __ imul(edx, Operand(ebx, ecx, times_4, 10000));
   __ imul(edx, ecx, 12);
+  __ imul(edx, Operand(edx, eax, times_2, 42), 8);
   __ imul(edx, ecx, 1000);
+  __ imul(edx, Operand(ebx, ecx, times_4, 1), 9000);
 
   __ inc(edx);
   __ inc(Operand(ebx, ecx, times_4, 10000));
@@ -197,15 +204,24 @@
   __ sar(edx, 1);
   __ sar(edx, 6);
   __ sar_cl(edx);
+  __ sar(Operand(ebx, ecx, times_4, 10000), 1);
+  __ sar(Operand(ebx, ecx, times_4, 10000), 6);
+  __ sar_cl(Operand(ebx, ecx, times_4, 10000));
   __ sbb(edx, Operand(ebx, ecx, times_4, 10000));
   __ shld(edx, Operand(ebx, ecx, times_4, 10000));
   __ shl(edx, 1);
   __ shl(edx, 6);
   __ shl_cl(edx);
+  __ shl(Operand(ebx, ecx, times_4, 10000), 1);
+  __ shl(Operand(ebx, ecx, times_4, 10000), 6);
+  __ shl_cl(Operand(ebx, ecx, times_4, 10000));
   __ shrd(edx, Operand(ebx, ecx, times_4, 10000));
   __ shr(edx, 1);
   __ shr(edx, 7);
   __ shr_cl(edx);
+  __ shr(Operand(ebx, ecx, times_4, 10000), 1);
+  __ shr(Operand(ebx, ecx, times_4, 10000), 6);
+  __ shr_cl(Operand(ebx, ecx, times_4, 10000));
 
 
   // Immediates
@@ -333,6 +349,7 @@
   __ fprem1();
   __ fincstp();
   __ ftst();
+  __ fxam();
   __ fxch(3);
   __ fld_s(Operand(ebx, ecx, times_4, 10000));
   __ fstp_s(Operand(ebx, ecx, times_4, 10000));
@@ -362,6 +379,20 @@
   __ fninit();
   __ nop();
 
+  __ fldcw(Operand(ebx, ecx, times_4, 10000));
+  __ fnstcw(Operand(ebx, ecx, times_4, 10000));
+  __ fadd_d(Operand(ebx, ecx, times_4, 10000));
+  __ fnsave(Operand(ebx, ecx, times_4, 10000));
+  __ frstor(Operand(ebx, ecx, times_4, 10000));
+
+  // xchg.
+  {
+    __ xchg(eax, eax);
+    __ xchg(eax, ebx);
+    __ xchg(ebx, ebx);
+    __ xchg(ebx, Operand(esp, 12));
+  }
+
   // Nop instructions
   for (int i = 0; i < 16; i++) {
     __ Nop(i);
@@ -375,7 +406,8 @@
       desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
   USE(code);
 #ifdef OBJECT_PRINT
-  code->Print();
+  OFStream os(stdout);
+  code->Print(os);
   byte* begin = code->instruction_start();
   byte* end = begin + code->instruction_size();
   disasm::Disassembler::Disassemble(stdout, begin, end);
diff --git a/test/cctest/test-diy-fp.cc b/test/cctest/test-diy-fp.cc
index e465e46..255118e 100644
--- a/test/cctest/test-diy-fp.cc
+++ b/test/cctest/test-diy-fp.cc
@@ -29,8 +29,8 @@
 
 #include "src/v8.h"
 
+#include "src/base/platform/platform.h"
 #include "src/diy-fp.h"
-#include "src/platform.h"
 #include "test/cctest/cctest.h"
 
 
diff --git a/test/cctest/test-double.cc b/test/cctest/test-double.cc
index 55f0711..16dcb37 100644
--- a/test/cctest/test-double.cc
+++ b/test/cctest/test-double.cc
@@ -29,9 +29,9 @@
 
 #include "src/v8.h"
 
+#include "src/base/platform/platform.h"
 #include "src/diy-fp.h"
 #include "src/double.h"
-#include "src/platform.h"
 #include "test/cctest/cctest.h"
 
 
@@ -105,7 +105,7 @@
 TEST(IsSpecial) {
   CHECK(Double(V8_INFINITY).IsSpecial());
   CHECK(Double(-V8_INFINITY).IsSpecial());
-  CHECK(Double(OS::nan_value()).IsSpecial());
+  CHECK(Double(v8::base::OS::nan_value()).IsSpecial());
   uint64_t bits = V8_2PART_UINT64_C(0xFFF12345, 00000000);
   CHECK(Double(bits).IsSpecial());
   // Denormals are not special:
@@ -128,7 +128,7 @@
 TEST(IsInfinite) {
   CHECK(Double(V8_INFINITY).IsInfinite());
   CHECK(Double(-V8_INFINITY).IsInfinite());
-  CHECK(!Double(OS::nan_value()).IsInfinite());
+  CHECK(!Double(v8::base::OS::nan_value()).IsInfinite());
   CHECK(!Double(0.0).IsInfinite());
   CHECK(!Double(-0.0).IsInfinite());
   CHECK(!Double(1.0).IsInfinite());
diff --git a/test/cctest/test-dtoa.cc b/test/cctest/test-dtoa.cc
index ed3ce59..3f396a5 100644
--- a/test/cctest/test-dtoa.cc
+++ b/test/cctest/test-dtoa.cc
@@ -31,8 +31,8 @@
 
 #include "src/dtoa.h"
 
+#include "src/base/platform/platform.h"
 #include "src/double.h"
-#include "src/platform.h"
 #include "test/cctest/cctest.h"
 #include "test/cctest/gay-fixed.h"
 #include "test/cctest/gay-precision.h"
diff --git a/test/cctest/test-fast-dtoa.cc b/test/cctest/test-fast-dtoa.cc
index d5a23b3..52198a4 100644
--- a/test/cctest/test-fast-dtoa.cc
+++ b/test/cctest/test-fast-dtoa.cc
@@ -29,10 +29,10 @@
 
 #include "src/v8.h"
 
+#include "src/base/platform/platform.h"
 #include "src/diy-fp.h"
 #include "src/double.h"
 #include "src/fast-dtoa.h"
-#include "src/platform.h"
 #include "test/cctest/cctest.h"
 #include "test/cctest/gay-precision.h"
 #include "test/cctest/gay-shortest.h"
diff --git a/test/cctest/test-fixed-dtoa.cc b/test/cctest/test-fixed-dtoa.cc
index f146813..de40d09 100644
--- a/test/cctest/test-fixed-dtoa.cc
+++ b/test/cctest/test-fixed-dtoa.cc
@@ -29,9 +29,9 @@
 
 #include "src/v8.h"
 
+#include "src/base/platform/platform.h"
 #include "src/double.h"
 #include "src/fixed-dtoa.h"
-#include "src/platform.h"
 #include "test/cctest/cctest.h"
 #include "test/cctest/gay-fixed.h"
 
diff --git a/test/cctest/test-gc-tracer.cc b/test/cctest/test-gc-tracer.cc
new file mode 100644
index 0000000..190644d
--- /dev/null
+++ b/test/cctest/test-gc-tracer.cc
@@ -0,0 +1,125 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+#include <utility>
+
+#include "src/v8.h"
+
+#include "test/cctest/cctest.h"
+
+using namespace v8::internal;
+
+TEST(RingBufferPartialFill) {
+  const int max_size = 6;
+  typedef RingBuffer<int, max_size>::const_iterator Iter;
+  RingBuffer<int, max_size> ring_buffer;
+  CHECK(ring_buffer.empty());
+  CHECK_EQ(static_cast<int>(ring_buffer.size()), 0);
+  CHECK(ring_buffer.begin() == ring_buffer.end());
+
+  // Fill ring_buffer partially: [0, 1, 2]
+  for (int i = 0; i < max_size / 2; i++) ring_buffer.push_back(i);
+
+  CHECK(!ring_buffer.empty());
+  CHECK(static_cast<int>(ring_buffer.size()) == max_size / 2);
+  CHECK(ring_buffer.begin() != ring_buffer.end());
+
+  // Test forward itartion
+  int i = 0;
+  for (Iter iter = ring_buffer.begin(); iter != ring_buffer.end(); ++iter) {
+    CHECK(*iter == i);
+    ++i;
+  }
+  CHECK_EQ(i, 3);  // one past last element.
+
+  // Test backward iteration
+  i = 2;
+  Iter iter = ring_buffer.back();
+  while (true) {
+    CHECK(*iter == i);
+    if (iter == ring_buffer.begin()) break;
+    --iter;
+    --i;
+  }
+  CHECK_EQ(i, 0);
+}
+
+
+TEST(RingBufferWrapAround) {
+  const int max_size = 6;
+  typedef RingBuffer<int, max_size>::const_iterator Iter;
+  RingBuffer<int, max_size> ring_buffer;
+
+  // Fill ring_buffer (wrap around): [9, 10, 11, 12, 13, 14]
+  for (int i = 0; i < 2 * max_size + 3; i++) ring_buffer.push_back(i);
+
+  CHECK(!ring_buffer.empty());
+  CHECK(static_cast<int>(ring_buffer.size()) == max_size);
+  CHECK(ring_buffer.begin() != ring_buffer.end());
+
+  // Test forward iteration
+  int i = 9;
+  for (Iter iter = ring_buffer.begin(); iter != ring_buffer.end(); ++iter) {
+    CHECK(*iter == i);
+    ++i;
+  }
+  CHECK_EQ(i, 15);  // one past last element.
+
+  // Test backward iteration
+  i = 14;
+  Iter iter = ring_buffer.back();
+  while (true) {
+    CHECK(*iter == i);
+    if (iter == ring_buffer.begin()) break;
+    --iter;
+    --i;
+  }
+  CHECK_EQ(i, 9);
+}
+
+
+TEST(RingBufferPushFront) {
+  const int max_size = 6;
+  typedef RingBuffer<int, max_size>::const_iterator Iter;
+  RingBuffer<int, max_size> ring_buffer;
+
+  // Fill ring_buffer (wrap around): [14, 13, 12, 11, 10, 9]
+  for (int i = 0; i < 2 * max_size + 3; i++) ring_buffer.push_front(i);
+
+  CHECK(!ring_buffer.empty());
+  CHECK(static_cast<int>(ring_buffer.size()) == max_size);
+  CHECK(ring_buffer.begin() != ring_buffer.end());
+
+  // Test forward iteration
+  int i = 14;
+  for (Iter iter = ring_buffer.begin(); iter != ring_buffer.end(); ++iter) {
+    CHECK(*iter == i);
+    --i;
+  }
+  CHECK_EQ(i, 8);  // one past last element.
+}
diff --git a/test/cctest/test-global-handles.cc b/test/cctest/test-global-handles.cc
index 0a9a3f2..ee295d6 100644
--- a/test/cctest/test-global-handles.cc
+++ b/test/cctest/test-global-handles.cc
@@ -56,7 +56,7 @@
   bool has_been_disposed() { return has_been_disposed_; }
 
   virtual void Dispose() {
-    ASSERT(!has_been_disposed_);
+    DCHECK(!has_been_disposed_);
     has_been_disposed_ = true;
   }
 
@@ -121,16 +121,16 @@
     global_handles->IterateObjectGroups(&visitor, &CanSkipCallback);
 
     // CanSkipCallback was called for all objects.
-    ASSERT(can_skip_called_objects.length() == 4);
-    ASSERT(can_skip_called_objects.Contains(*g1s1.location()));
-    ASSERT(can_skip_called_objects.Contains(*g1s2.location()));
-    ASSERT(can_skip_called_objects.Contains(*g2s1.location()));
-    ASSERT(can_skip_called_objects.Contains(*g2s2.location()));
+    DCHECK(can_skip_called_objects.length() == 4);
+    DCHECK(can_skip_called_objects.Contains(*g1s1.location()));
+    DCHECK(can_skip_called_objects.Contains(*g1s2.location()));
+    DCHECK(can_skip_called_objects.Contains(*g2s1.location()));
+    DCHECK(can_skip_called_objects.Contains(*g2s2.location()));
 
     // Nothing was visited.
-    ASSERT(visitor.visited.length() == 0);
-    ASSERT(!info1.has_been_disposed());
-    ASSERT(!info2.has_been_disposed());
+    DCHECK(visitor.visited.length() == 0);
+    DCHECK(!info1.has_been_disposed());
+    DCHECK(!info2.has_been_disposed());
   }
 
   // Iterate again, now only skip the second object group.
@@ -145,18 +145,18 @@
     global_handles->IterateObjectGroups(&visitor, &CanSkipCallback);
 
     // CanSkipCallback was called for all objects.
-    ASSERT(can_skip_called_objects.length() == 3 ||
+    DCHECK(can_skip_called_objects.length() == 3 ||
            can_skip_called_objects.length() == 4);
-    ASSERT(can_skip_called_objects.Contains(*g1s2.location()));
-    ASSERT(can_skip_called_objects.Contains(*g2s1.location()));
-    ASSERT(can_skip_called_objects.Contains(*g2s2.location()));
+    DCHECK(can_skip_called_objects.Contains(*g1s2.location()));
+    DCHECK(can_skip_called_objects.Contains(*g2s1.location()));
+    DCHECK(can_skip_called_objects.Contains(*g2s2.location()));
 
     // The first group was visited.
-    ASSERT(visitor.visited.length() == 2);
-    ASSERT(visitor.visited.Contains(*g1s1.location()));
-    ASSERT(visitor.visited.Contains(*g1s2.location()));
-    ASSERT(info1.has_been_disposed());
-    ASSERT(!info2.has_been_disposed());
+    DCHECK(visitor.visited.length() == 2);
+    DCHECK(visitor.visited.Contains(*g1s1.location()));
+    DCHECK(visitor.visited.Contains(*g1s2.location()));
+    DCHECK(info1.has_been_disposed());
+    DCHECK(!info2.has_been_disposed());
   }
 
   // Iterate again, don't skip anything.
@@ -166,15 +166,15 @@
     global_handles->IterateObjectGroups(&visitor, &CanSkipCallback);
 
     // CanSkipCallback was called for all objects.
-    ASSERT(can_skip_called_objects.length() == 1);
-    ASSERT(can_skip_called_objects.Contains(*g2s1.location()) ||
+    DCHECK(can_skip_called_objects.length() == 1);
+    DCHECK(can_skip_called_objects.Contains(*g2s1.location()) ||
            can_skip_called_objects.Contains(*g2s2.location()));
 
     // The second group was visited.
-    ASSERT(visitor.visited.length() == 2);
-    ASSERT(visitor.visited.Contains(*g2s1.location()));
-    ASSERT(visitor.visited.Contains(*g2s2.location()));
-    ASSERT(info2.has_been_disposed());
+    DCHECK(visitor.visited.length() == 2);
+    DCHECK(visitor.visited.Contains(*g2s1.location()));
+    DCHECK(visitor.visited.Contains(*g2s2.location()));
+    DCHECK(info2.has_been_disposed());
   }
 }
 
@@ -216,16 +216,16 @@
     global_handles->IterateObjectGroups(&visitor, &CanSkipCallback);
 
     // CanSkipCallback was called for all objects.
-    ASSERT(can_skip_called_objects.length() == 4);
-    ASSERT(can_skip_called_objects.Contains(*g1s1.location()));
-    ASSERT(can_skip_called_objects.Contains(*g1s2.location()));
-    ASSERT(can_skip_called_objects.Contains(*g2s1.location()));
-    ASSERT(can_skip_called_objects.Contains(*g2s2.location()));
+    DCHECK(can_skip_called_objects.length() == 4);
+    DCHECK(can_skip_called_objects.Contains(*g1s1.location()));
+    DCHECK(can_skip_called_objects.Contains(*g1s2.location()));
+    DCHECK(can_skip_called_objects.Contains(*g2s1.location()));
+    DCHECK(can_skip_called_objects.Contains(*g2s2.location()));
 
     // Nothing was visited.
-    ASSERT(visitor.visited.length() == 0);
-    ASSERT(!info1.has_been_disposed());
-    ASSERT(!info2.has_been_disposed());
+    DCHECK(visitor.visited.length() == 0);
+    DCHECK(!info1.has_been_disposed());
+    DCHECK(!info2.has_been_disposed());
   }
 
   // Iterate again, now only skip the second object group.
@@ -240,18 +240,18 @@
     global_handles->IterateObjectGroups(&visitor, &CanSkipCallback);
 
     // CanSkipCallback was called for all objects.
-    ASSERT(can_skip_called_objects.length() == 3 ||
+    DCHECK(can_skip_called_objects.length() == 3 ||
            can_skip_called_objects.length() == 4);
-    ASSERT(can_skip_called_objects.Contains(*g1s2.location()));
-    ASSERT(can_skip_called_objects.Contains(*g2s1.location()));
-    ASSERT(can_skip_called_objects.Contains(*g2s2.location()));
+    DCHECK(can_skip_called_objects.Contains(*g1s2.location()));
+    DCHECK(can_skip_called_objects.Contains(*g2s1.location()));
+    DCHECK(can_skip_called_objects.Contains(*g2s2.location()));
 
     // The first group was visited.
-    ASSERT(visitor.visited.length() == 2);
-    ASSERT(visitor.visited.Contains(*g1s1.location()));
-    ASSERT(visitor.visited.Contains(*g1s2.location()));
-    ASSERT(info1.has_been_disposed());
-    ASSERT(!info2.has_been_disposed());
+    DCHECK(visitor.visited.length() == 2);
+    DCHECK(visitor.visited.Contains(*g1s1.location()));
+    DCHECK(visitor.visited.Contains(*g1s2.location()));
+    DCHECK(info1.has_been_disposed());
+    DCHECK(!info2.has_been_disposed());
   }
 
   // Iterate again, don't skip anything.
@@ -261,15 +261,15 @@
     global_handles->IterateObjectGroups(&visitor, &CanSkipCallback);
 
     // CanSkipCallback was called for all objects.
-    ASSERT(can_skip_called_objects.length() == 1);
-    ASSERT(can_skip_called_objects.Contains(*g2s1.location()) ||
+    DCHECK(can_skip_called_objects.length() == 1);
+    DCHECK(can_skip_called_objects.Contains(*g2s1.location()) ||
            can_skip_called_objects.Contains(*g2s2.location()));
 
     // The second group was visited.
-    ASSERT(visitor.visited.length() == 2);
-    ASSERT(visitor.visited.Contains(*g2s1.location()));
-    ASSERT(visitor.visited.Contains(*g2s2.location()));
-    ASSERT(info2.has_been_disposed());
+    DCHECK(visitor.visited.length() == 2);
+    DCHECK(visitor.visited.Contains(*g2s1.location()));
+    DCHECK(visitor.visited.Contains(*g2s2.location()));
+    DCHECK(info2.has_been_disposed());
   }
 }
 
@@ -306,16 +306,16 @@
   List<ImplicitRefGroup*>* implicit_refs =
       global_handles->implicit_ref_groups();
   USE(implicit_refs);
-  ASSERT(implicit_refs->length() == 2);
-  ASSERT(implicit_refs->at(0)->parent ==
+  DCHECK(implicit_refs->length() == 2);
+  DCHECK(implicit_refs->at(0)->parent ==
          reinterpret_cast<HeapObject**>(g1s1.location()));
-  ASSERT(implicit_refs->at(0)->length == 2);
-  ASSERT(implicit_refs->at(0)->children[0] == g1c1.location());
-  ASSERT(implicit_refs->at(0)->children[1] == g1c2.location());
-  ASSERT(implicit_refs->at(1)->parent ==
+  DCHECK(implicit_refs->at(0)->length == 2);
+  DCHECK(implicit_refs->at(0)->children[0] == g1c1.location());
+  DCHECK(implicit_refs->at(0)->children[1] == g1c2.location());
+  DCHECK(implicit_refs->at(1)->parent ==
          reinterpret_cast<HeapObject**>(g2s1.location()));
-  ASSERT(implicit_refs->at(1)->length == 1);
-  ASSERT(implicit_refs->at(1)->children[0] == g2c1.location());
+  DCHECK(implicit_refs->at(1)->length == 1);
+  DCHECK(implicit_refs->at(1)->children[0] == g2c1.location());
   global_handles->RemoveObjectGroups();
   global_handles->RemoveImplicitRefGroups();
 }
diff --git a/test/cctest/test-hashing.cc b/test/cctest/test-hashing.cc
index a6e022e..692861c 100644
--- a/test/cctest/test-hashing.cc
+++ b/test/cctest/test-hashing.cc
@@ -47,94 +47,6 @@
 #define __ masm->
 
 
-void generate(MacroAssembler* masm, i::Vector<const uint8_t> string) {
-  // GenerateHashInit takes the first character as an argument so it can't
-  // handle the zero length string.
-  ASSERT(string.length() > 0);
-#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
-  __ push(ebx);
-  __ push(ecx);
-  __ mov(eax, Immediate(0));
-  __ mov(ebx, Immediate(string.at(0)));
-  StringHelper::GenerateHashInit(masm, eax, ebx, ecx);
-  for (int i = 1; i < string.length(); i++) {
-    __ mov(ebx, Immediate(string.at(i)));
-    StringHelper::GenerateHashAddCharacter(masm, eax, ebx, ecx);
-  }
-  StringHelper::GenerateHashGetHash(masm, eax, ecx);
-  __ pop(ecx);
-  __ pop(ebx);
-  __ Ret();
-#elif V8_TARGET_ARCH_X64
-  __ pushq(kRootRegister);
-  __ InitializeRootRegister();
-  __ pushq(rbx);
-  __ pushq(rcx);
-  __ movp(rax, Immediate(0));
-  __ movp(rbx, Immediate(string.at(0)));
-  StringHelper::GenerateHashInit(masm, rax, rbx, rcx);
-  for (int i = 1; i < string.length(); i++) {
-    __ movp(rbx, Immediate(string.at(i)));
-    StringHelper::GenerateHashAddCharacter(masm, rax, rbx, rcx);
-  }
-  StringHelper::GenerateHashGetHash(masm, rax, rcx);
-  __ popq(rcx);
-  __ popq(rbx);
-  __ popq(kRootRegister);
-  __ Ret();
-#elif V8_TARGET_ARCH_ARM
-  __ push(kRootRegister);
-  __ InitializeRootRegister();
-
-  __ mov(r0, Operand(0));
-  __ mov(ip, Operand(string.at(0)));
-  StringHelper::GenerateHashInit(masm, r0, ip);
-  for (int i = 1; i < string.length(); i++) {
-    __ mov(ip, Operand(string.at(i)));
-    StringHelper::GenerateHashAddCharacter(masm, r0, ip);
-  }
-  StringHelper::GenerateHashGetHash(masm, r0);
-  __ pop(kRootRegister);
-  __ mov(pc, Operand(lr));
-#elif V8_TARGET_ARCH_ARM64
-  // The ARM64 assembler usually uses jssp (x28) as a stack pointer, but only
-  // csp is initialized by the calling (C++) code.
-  Register old_stack_pointer = __ StackPointer();
-  __ SetStackPointer(csp);
-  __ Push(root, xzr);
-  __ InitializeRootRegister();
-  __ Mov(x0, 0);
-  __ Mov(x10, Operand(string.at(0)));
-  StringHelper::GenerateHashInit(masm, x0, x10);
-  for (int i = 1; i < string.length(); i++) {
-    __ Mov(x10, Operand(string.at(i)));
-    StringHelper::GenerateHashAddCharacter(masm, x0, x10);
-  }
-  StringHelper::GenerateHashGetHash(masm, x0, x10);
-  __ Pop(xzr, root);
-  __ Ret();
-  __ SetStackPointer(old_stack_pointer);
-#elif V8_TARGET_ARCH_MIPS
-  __ push(kRootRegister);
-  __ InitializeRootRegister();
-
-  __ li(v0, Operand(0));
-  __ li(t1, Operand(string.at(0)));
-  StringHelper::GenerateHashInit(masm, v0, t1);
-  for (int i = 1; i < string.length(); i++) {
-    __ li(t1, Operand(string.at(i)));
-    StringHelper::GenerateHashAddCharacter(masm, v0, t1);
-  }
-  StringHelper::GenerateHashGetHash(masm, v0);
-  __ pop(kRootRegister);
-  __ jr(ra);
-  __ nop();
-#else
-#error Unsupported architecture.
-#endif
-}
-
-
 void generate(MacroAssembler* masm, uint32_t key) {
 #if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X87
   __ push(ebx);
@@ -170,7 +82,7 @@
   __ Pop(xzr, root);
   __ Ret();
   __ SetStackPointer(old_stack_pointer);
-#elif V8_TARGET_ARCH_MIPS
+#elif V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
   __ push(kRootRegister);
   __ InitializeRootRegister();
   __ li(v0, Operand(key));
@@ -184,44 +96,6 @@
 }
 
 
-void check(i::Vector<const uint8_t> string) {
-  Isolate* isolate = CcTest::i_isolate();
-  Factory* factory = isolate->factory();
-  HandleScope scope(isolate);
-
-  v8::internal::byte buffer[2048];
-  MacroAssembler masm(isolate, buffer, sizeof buffer);
-
-  generate(&masm, string);
-
-  CodeDesc desc;
-  masm.GetCode(&desc);
-  Handle<Object> undefined(isolate->heap()->undefined_value(), isolate);
-  Handle<Code> code = factory->NewCode(desc,
-                                       Code::ComputeFlags(Code::STUB),
-                                       undefined);
-  CHECK(code->IsCode());
-
-  HASH_FUNCTION hash = FUNCTION_CAST<HASH_FUNCTION>(code->entry());
-  Handle<String> v8_string =
-      factory->NewStringFromOneByte(string).ToHandleChecked();
-  v8_string->set_hash_field(String::kEmptyHashField);
-#ifdef USE_SIMULATOR
-  uint32_t codegen_hash = static_cast<uint32_t>(
-        reinterpret_cast<uintptr_t>(CALL_GENERATED_CODE(hash, 0, 0, 0, 0, 0)));
-#else
-  uint32_t codegen_hash = hash();
-#endif
-  uint32_t runtime_hash = v8_string->Hash();
-  CHECK(runtime_hash == codegen_hash);
-}
-
-
-void check(i::Vector<const char> s) {
-  check(i::Vector<const uint8_t>::cast(s));
-}
-
-
 void check(uint32_t key) {
   Isolate* isolate = CcTest::i_isolate();
   Factory* factory = isolate->factory();
@@ -253,38 +127,11 @@
 }
 
 
-void check_twochars(uint8_t a, uint8_t b) {
-  uint8_t ab[2] = {a, b};
-  check(i::Vector<const uint8_t>(ab, 2));
-}
-
-
 static uint32_t PseudoRandom(uint32_t i, uint32_t j) {
   return ~(~((i * 781) ^ (j * 329)));
 }
 
 
-TEST(StringHash) {
-  v8::Isolate* isolate = CcTest::isolate();
-  v8::HandleScope handle_scope(isolate);
-  v8::Context::Scope context_scope(v8::Context::New(isolate));
-
-  for (uint8_t a = 0; a < String::kMaxOneByteCharCode; a++) {
-    // Numbers are hashed differently.
-    if (a >= '0' && a <= '9') continue;
-    for (uint8_t b = 0; b < String::kMaxOneByteCharCode; b++) {
-      if (b >= '0' && b <= '9') continue;
-      check_twochars(a, b);
-    }
-  }
-  check(i::Vector<const char>("*",       1));
-  check(i::Vector<const char>(".zZ",     3));
-  check(i::Vector<const char>("muc",     3));
-  check(i::Vector<const char>("(>'_')>", 7));
-  check(i::Vector<const char>("-=[ vee eight ftw ]=-", 21));
-}
-
-
 TEST(NumberHash) {
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope handle_scope(isolate);
diff --git a/test/cctest/test-hashmap.cc b/test/cctest/test-hashmap.cc
index a2c8d11..1e94bed 100644
--- a/test/cctest/test-hashmap.cc
+++ b/test/cctest/test-hashmap.cc
@@ -28,9 +28,10 @@
 #include <stdlib.h>
 
 #include "src/v8.h"
-#include "src/hashmap.h"
 #include "test/cctest/cctest.h"
 
+#include "src/hashmap.h"
+
 using namespace v8::internal;
 
 static bool DefaultMatchFun(void* a, void* b) {
diff --git a/test/cctest/test-heap-profiler.cc b/test/cctest/test-heap-profiler.cc
index cd34d06..8f9b484 100644
--- a/test/cctest/test-heap-profiler.cc
+++ b/test/cctest/test-heap-profiler.cc
@@ -441,8 +441,8 @@
   CHECK_EQ(1, global->InternalFieldCount());
 
   i::Factory* factory = CcTest::i_isolate()->factory();
-  i::Handle<i::String> first = factory->NewStringFromStaticAscii("0123456789");
-  i::Handle<i::String> second = factory->NewStringFromStaticAscii("0123456789");
+  i::Handle<i::String> first = factory->NewStringFromStaticChars("0123456789");
+  i::Handle<i::String> second = factory->NewStringFromStaticChars("0123456789");
   i::Handle<i::String> cons_string =
       factory->NewConsString(first, second).ToHandleChecked();
 
@@ -472,8 +472,6 @@
 
 
 TEST(HeapSnapshotSymbol) {
-  i::FLAG_harmony_symbols = true;
-
   LocalContext env;
   v8::HandleScope scope(env->GetIsolate());
   v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
@@ -496,15 +494,14 @@
 
 
 TEST(HeapSnapshotWeakCollection) {
-  i::FLAG_harmony_collections = true;
-
   LocalContext env;
   v8::HandleScope scope(env->GetIsolate());
   v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
 
-  CompileRun("k = {}; v = {};\n"
-             "ws = new WeakSet(); ws.add(k); ws.add(v);\n"
-             "wm = new WeakMap(); wm.set(k, v);\n");
+  CompileRun(
+      "k = {}; v = {}; s = 'str';\n"
+      "ws = new WeakSet(); ws.add(k); ws.add(v); ws[s] = s;\n"
+      "wm = new WeakMap(); wm.set(k, v); wm[s] = s;\n");
   const v8::HeapSnapshot* snapshot =
       heap_profiler->TakeHeapSnapshot(v8_str("WeakCollections"));
   CHECK(ValidateSnapshot(snapshot));
@@ -515,6 +512,9 @@
   const v8::HeapGraphNode* v =
       GetProperty(global, v8::HeapGraphEdge::kProperty, "v");
   CHECK_NE(NULL, v);
+  const v8::HeapGraphNode* s =
+      GetProperty(global, v8::HeapGraphEdge::kProperty, "s");
+  CHECK_NE(NULL, s);
 
   const v8::HeapGraphNode* ws =
       GetProperty(global, v8::HeapGraphEdge::kProperty, "ws");
@@ -535,6 +535,10 @@
     }
   }
   CHECK_EQ(1, weak_entries);
+  const v8::HeapGraphNode* ws_s =
+      GetProperty(ws, v8::HeapGraphEdge::kProperty, "str");
+  CHECK_NE(NULL, ws_s);
+  CHECK_EQ(static_cast<int>(s->GetId()), static_cast<int>(ws_s->GetId()));
 
   const v8::HeapGraphNode* wm =
       GetProperty(global, v8::HeapGraphEdge::kProperty, "wm");
@@ -556,6 +560,83 @@
     }
   }
   CHECK_EQ(2, weak_entries);
+  const v8::HeapGraphNode* wm_s =
+      GetProperty(wm, v8::HeapGraphEdge::kProperty, "str");
+  CHECK_NE(NULL, wm_s);
+  CHECK_EQ(static_cast<int>(s->GetId()), static_cast<int>(wm_s->GetId()));
+}
+
+
+TEST(HeapSnapshotCollection) {
+  LocalContext env;
+  v8::HandleScope scope(env->GetIsolate());
+  v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
+
+  CompileRun(
+      "k = {}; v = {}; s = 'str';\n"
+      "set = new Set(); set.add(k); set.add(v); set[s] = s;\n"
+      "map = new Map(); map.set(k, v); map[s] = s;\n");
+  const v8::HeapSnapshot* snapshot =
+      heap_profiler->TakeHeapSnapshot(v8_str("Collections"));
+  CHECK(ValidateSnapshot(snapshot));
+  const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
+  const v8::HeapGraphNode* k =
+      GetProperty(global, v8::HeapGraphEdge::kProperty, "k");
+  CHECK_NE(NULL, k);
+  const v8::HeapGraphNode* v =
+      GetProperty(global, v8::HeapGraphEdge::kProperty, "v");
+  CHECK_NE(NULL, v);
+  const v8::HeapGraphNode* s =
+      GetProperty(global, v8::HeapGraphEdge::kProperty, "s");
+  CHECK_NE(NULL, s);
+
+  const v8::HeapGraphNode* set =
+      GetProperty(global, v8::HeapGraphEdge::kProperty, "set");
+  CHECK_NE(NULL, set);
+  CHECK_EQ(v8::HeapGraphNode::kObject, set->GetType());
+  CHECK_EQ(v8_str("Set"), set->GetName());
+
+  const v8::HeapGraphNode* set_table =
+      GetProperty(set, v8::HeapGraphEdge::kInternal, "table");
+  CHECK_EQ(v8::HeapGraphNode::kArray, set_table->GetType());
+  CHECK_GT(set_table->GetChildrenCount(), 0);
+  int entries = 0;
+  for (int i = 0, count = set_table->GetChildrenCount(); i < count; ++i) {
+    const v8::HeapGraphEdge* prop = set_table->GetChild(i);
+    const v8::SnapshotObjectId to_node_id = prop->GetToNode()->GetId();
+    if (to_node_id == k->GetId() || to_node_id == v->GetId()) {
+      ++entries;
+    }
+  }
+  CHECK_EQ(2, entries);
+  const v8::HeapGraphNode* set_s =
+      GetProperty(set, v8::HeapGraphEdge::kProperty, "str");
+  CHECK_NE(NULL, set_s);
+  CHECK_EQ(static_cast<int>(s->GetId()), static_cast<int>(set_s->GetId()));
+
+  const v8::HeapGraphNode* map =
+      GetProperty(global, v8::HeapGraphEdge::kProperty, "map");
+  CHECK_NE(NULL, map);
+  CHECK_EQ(v8::HeapGraphNode::kObject, map->GetType());
+  CHECK_EQ(v8_str("Map"), map->GetName());
+
+  const v8::HeapGraphNode* map_table =
+      GetProperty(map, v8::HeapGraphEdge::kInternal, "table");
+  CHECK_EQ(v8::HeapGraphNode::kArray, map_table->GetType());
+  CHECK_GT(map_table->GetChildrenCount(), 0);
+  entries = 0;
+  for (int i = 0, count = map_table->GetChildrenCount(); i < count; ++i) {
+    const v8::HeapGraphEdge* prop = map_table->GetChild(i);
+    const v8::SnapshotObjectId to_node_id = prop->GetToNode()->GetId();
+    if (to_node_id == k->GetId() || to_node_id == v->GetId()) {
+      ++entries;
+    }
+  }
+  CHECK_EQ(2, entries);
+  const v8::HeapGraphNode* map_s =
+      GetProperty(map, v8::HeapGraphEdge::kProperty, "str");
+  CHECK_NE(NULL, map_s);
+  CHECK_EQ(static_cast<int>(s->GetId()), static_cast<int>(map_s->GetId()));
 }
 
 
@@ -781,7 +862,7 @@
     return kContinue;
   }
   virtual WriteResult WriteUint32Chunk(uint32_t* buffer, int chars_written) {
-    ASSERT(false);
+    DCHECK(false);
     return kAbort;
   }
   void WriteTo(i::Vector<char> dest) { buffer_.WriteTo(dest); }
@@ -794,9 +875,9 @@
   int abort_countdown_;
 };
 
-class AsciiResource: public v8::String::ExternalAsciiStringResource {
+class OneByteResource : public v8::String::ExternalOneByteStringResource {
  public:
-  explicit AsciiResource(i::Vector<char> string): data_(string.start()) {
+  explicit OneByteResource(i::Vector<char> string) : data_(string.start()) {
     length_ = string.length();
   }
   virtual const char* data() const { return data_; }
@@ -832,7 +913,7 @@
   stream.WriteTo(json);
 
   // Verify that snapshot string is valid JSON.
-  AsciiResource* json_res = new AsciiResource(json);
+  OneByteResource* json_res = new OneByteResource(json);
   v8::Local<v8::String> json_string =
       v8::String::NewExternal(env->GetIsolate(), json_res);
   env->Global()->Set(v8_str("json_snapshot"), json_string);
@@ -950,13 +1031,13 @@
   virtual ~TestStatsStream() {}
   virtual void EndOfStream() { ++eos_signaled_; }
   virtual WriteResult WriteAsciiChunk(char* buffer, int chars_written) {
-    ASSERT(false);
+    DCHECK(false);
     return kAbort;
   }
   virtual WriteResult WriteHeapStatsChunk(v8::HeapStatsUpdate* buffer,
                                           int updates_written) {
     ++intervals_count_;
-    ASSERT(updates_written);
+    DCHECK(updates_written);
     updates_written_ += updates_written;
     entries_count_ = 0;
     if (first_interval_index_ == -1 && updates_written != 0)
@@ -1641,9 +1722,9 @@
   const v8::HeapGraphNode* global_context =
       GetProperty(global, v8::HeapGraphEdge::kInternal, "global_context");
   CHECK_NE(NULL, global_context);
-  const v8::HeapGraphNode* global_receiver =
-      GetProperty(global, v8::HeapGraphEdge::kInternal, "global_receiver");
-  CHECK_NE(NULL, global_receiver);
+  const v8::HeapGraphNode* global_proxy =
+      GetProperty(global, v8::HeapGraphEdge::kInternal, "global_proxy");
+  CHECK_NE(NULL, global_proxy);
 }
 
 
@@ -1688,7 +1769,7 @@
   v8::HandleScope scope(env->GetIsolate());
   v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
 
-  CompileRun("a = { s_prop: \'value\', n_prop: 0.1 };");
+  CompileRun("a = { s_prop: \'value\', n_prop: \'value2\' };");
   const v8::HeapSnapshot* snapshot =
       heap_profiler->TakeHeapSnapshot(v8_str("value"));
   CHECK(ValidateSnapshot(snapshot));
@@ -1709,10 +1790,9 @@
   CHECK(js_s_prop == heap_profiler->FindObjectById(s_prop->GetId()));
   const v8::HeapGraphNode* n_prop =
       GetProperty(obj, v8::HeapGraphEdge::kProperty, "n_prop");
-  v8::Local<v8::Number> js_n_prop =
-      js_obj->Get(v8_str("n_prop")).As<v8::Number>();
-  CHECK(js_n_prop->NumberValue() ==
-        heap_profiler->FindObjectById(n_prop->GetId())->NumberValue());
+  v8::Local<v8::String> js_n_prop =
+      js_obj->Get(v8_str("n_prop")).As<v8::String>();
+  CHECK(js_n_prop == heap_profiler->FindObjectById(n_prop->GetId()));
 }
 
 
@@ -1783,12 +1863,16 @@
       "Constructor2", i::V8HeapExplorer::GetConstructorName(*js_obj2)));
   v8::Local<v8::Object> obj3 = js_global->Get(v8_str("obj3")).As<v8::Object>();
   i::Handle<i::JSObject> js_obj3 = v8::Utils::OpenHandle(*obj3);
-  CHECK_EQ(0, StringCmp(
-      "Constructor3", i::V8HeapExplorer::GetConstructorName(*js_obj3)));
+  // TODO(verwaest): Restore to Constructor3 once supported by the
+  // heap-snapshot-generator.
+  CHECK_EQ(
+      0, StringCmp("Object", i::V8HeapExplorer::GetConstructorName(*js_obj3)));
   v8::Local<v8::Object> obj4 = js_global->Get(v8_str("obj4")).As<v8::Object>();
   i::Handle<i::JSObject> js_obj4 = v8::Utils::OpenHandle(*obj4);
-  CHECK_EQ(0, StringCmp(
-      "Constructor4", i::V8HeapExplorer::GetConstructorName(*js_obj4)));
+  // TODO(verwaest): Restore to Constructor4 once supported by the
+  // heap-snapshot-generator.
+  CHECK_EQ(
+      0, StringCmp("Object", i::V8HeapExplorer::GetConstructorName(*js_obj4)));
   v8::Local<v8::Object> obj5 = js_global->Get(v8_str("obj5")).As<v8::Object>();
   i::Handle<i::JSObject> js_obj5 = v8::Utils::OpenHandle(*obj5);
   CHECK_EQ(0, StringCmp(
@@ -1903,6 +1987,46 @@
 }
 
 
+TEST(AccessorInfo) {
+  LocalContext env;
+  v8::HandleScope scope(env->GetIsolate());
+  v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
+
+  CompileRun("function foo(x) { }\n");
+  const v8::HeapSnapshot* snapshot =
+      heap_profiler->TakeHeapSnapshot(v8_str("AccessorInfoTest"));
+  CHECK(ValidateSnapshot(snapshot));
+  const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
+  const v8::HeapGraphNode* foo =
+      GetProperty(global, v8::HeapGraphEdge::kProperty, "foo");
+  CHECK_NE(NULL, foo);
+  const v8::HeapGraphNode* map =
+      GetProperty(foo, v8::HeapGraphEdge::kInternal, "map");
+  CHECK_NE(NULL, map);
+  const v8::HeapGraphNode* descriptors =
+      GetProperty(map, v8::HeapGraphEdge::kInternal, "descriptors");
+  CHECK_NE(NULL, descriptors);
+  const v8::HeapGraphNode* length_name =
+      GetProperty(descriptors, v8::HeapGraphEdge::kInternal, "2");
+  CHECK_NE(NULL, length_name);
+  CHECK_EQ("length", *v8::String::Utf8Value(length_name->GetName()));
+  const v8::HeapGraphNode* length_accessor =
+      GetProperty(descriptors, v8::HeapGraphEdge::kInternal, "4");
+  CHECK_NE(NULL, length_accessor);
+  CHECK_EQ("system / ExecutableAccessorInfo",
+           *v8::String::Utf8Value(length_accessor->GetName()));
+  const v8::HeapGraphNode* name =
+      GetProperty(length_accessor, v8::HeapGraphEdge::kInternal, "name");
+  CHECK_NE(NULL, name);
+  const v8::HeapGraphNode* getter =
+      GetProperty(length_accessor, v8::HeapGraphEdge::kInternal, "getter");
+  CHECK_NE(NULL, getter);
+  const v8::HeapGraphNode* setter =
+      GetProperty(length_accessor, v8::HeapGraphEdge::kInternal, "setter");
+  CHECK_NE(NULL, setter);
+}
+
+
 bool HasWeakEdge(const v8::HeapGraphNode* node) {
   for (int i = 0; i < node->GetChildrenCount(); ++i) {
     const v8::HeapGraphEdge* handle_edge = node->GetChild(i);
@@ -2227,7 +2351,7 @@
     "::(ArraySingleArgumentConstructorStub code)"
   };
   const v8::HeapGraphNode* node = GetNodeByPath(snapshot,
-      stub_path, ARRAY_SIZE(stub_path));
+      stub_path, arraysize(stub_path));
   CHECK_NE(NULL, node);
 
   const char* builtin_path1[] = {
@@ -2235,18 +2359,15 @@
     "::(Builtins)",
     "::(KeyedLoadIC_Generic builtin)"
   };
-  node = GetNodeByPath(snapshot, builtin_path1, ARRAY_SIZE(builtin_path1));
+  node = GetNodeByPath(snapshot, builtin_path1, arraysize(builtin_path1));
   CHECK_NE(NULL, node);
 
-  const char* builtin_path2[] = {
-    "::(GC roots)",
-    "::(Builtins)",
-    "::(CompileUnoptimized builtin)"
-  };
-  node = GetNodeByPath(snapshot, builtin_path2, ARRAY_SIZE(builtin_path2));
+  const char* builtin_path2[] = {"::(GC roots)", "::(Builtins)",
+                                 "::(CompileLazy builtin)"};
+  node = GetNodeByPath(snapshot, builtin_path2, arraysize(builtin_path2));
   CHECK_NE(NULL, node);
   v8::String::Utf8Value node_name(node->GetName());
-  CHECK_EQ("(CompileUnoptimized builtin)", *node_name);
+  CHECK_EQ("(CompileLazy builtin)", *node_name);
 }
 
 
@@ -2326,7 +2447,7 @@
     "for (var i = 0; i < 3; ++i)\n"
     "    a.shift();\n");
 
-  const char* names[] = { "(anonymous function)" };
+  const char* names[] = {""};
   AllocationTracker* tracker =
       reinterpret_cast<i::HeapProfiler*>(heap_profiler)->allocation_tracker();
   CHECK_NE(NULL, tracker);
@@ -2336,7 +2457,7 @@
   tracker->trace_tree()->Print(tracker);
 
   AllocationTraceNode* node =
-      FindNode(tracker, Vector<const char*>(names, ARRAY_SIZE(names)));
+      FindNode(tracker, Vector<const char*>(names, arraysize(names)));
   CHECK_NE(NULL, node);
   CHECK_GE(node->allocation_count(), 2);
   CHECK_GE(node->allocation_size(), 4 * 5);
@@ -2361,10 +2482,9 @@
   // Print for better diagnostics in case of failure.
   tracker->trace_tree()->Print(tracker);
 
-  const char* names[] =
-      { "(anonymous function)", "start", "f_0_0", "f_0_1", "f_0_2" };
+  const char* names[] = {"", "start", "f_0_0", "f_0_1", "f_0_2"};
   AllocationTraceNode* node =
-      FindNode(tracker, Vector<const char*>(names, ARRAY_SIZE(names)));
+      FindNode(tracker, Vector<const char*>(names, arraysize(names)));
   CHECK_NE(NULL, node);
   CHECK_GE(node->allocation_count(), 100);
   CHECK_GE(node->allocation_size(), 4 * node->allocation_count());
@@ -2397,7 +2517,7 @@
   LocalContext env;
 
   v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
-  const char* names[] = { "(anonymous function)", "start", "f_0", "f_1" };
+  const char* names[] = {"", "start", "f_0", "f_1"};
   // First check that normally all allocations are recorded.
   {
     heap_profiler->StartTrackingHeapObjects(true);
@@ -2413,7 +2533,7 @@
     tracker->trace_tree()->Print(tracker);
 
     AllocationTraceNode* node =
-        FindNode(tracker, Vector<const char*>(names, ARRAY_SIZE(names)));
+        FindNode(tracker, Vector<const char*>(names, arraysize(names)));
     CHECK_NE(NULL, node);
     CHECK_GE(node->allocation_count(), 100);
     CHECK_GE(node->allocation_size(), 4 * node->allocation_count());
@@ -2439,7 +2559,7 @@
     tracker->trace_tree()->Print(tracker);
 
     AllocationTraceNode* node =
-        FindNode(tracker, Vector<const char*>(names, ARRAY_SIZE(names)));
+        FindNode(tracker, Vector<const char*>(names, arraysize(names)));
     CHECK_NE(NULL, node);
     CHECK_LT(node->allocation_count(), 100);
 
@@ -2469,7 +2589,7 @@
   tracker->trace_tree()->Print(tracker);
 
   AllocationTraceNode* node =
-      FindNode(tracker, Vector<const char*>(names, ARRAY_SIZE(names)));
+      FindNode(tracker, Vector<const char*>(names, arraysize(names)));
   CHECK_NE(NULL, node);
   CHECK_GE(node->allocation_count(), 2);
   CHECK_GE(node->allocation_size(), 4 * node->allocation_count());
@@ -2531,7 +2651,7 @@
 
   CHECK_EQ(1024, static_cast<int>(ab_contents.ByteLength()));
   void* data = ab_contents.Data();
-  ASSERT(data != NULL);
+  DCHECK(data != NULL);
   v8::Local<v8::ArrayBuffer> ab2 =
       v8::ArrayBuffer::New(isolate, data, ab_contents.ByteLength());
   CHECK(ab2->IsExternal());
@@ -2571,7 +2691,7 @@
   v8::Handle<v8::Object> global = global_proxy->GetPrototype().As<v8::Object>();
 
   i::Factory* factory = CcTest::i_isolate()->factory();
-  i::Handle<i::String> string = factory->NewStringFromStaticAscii("string");
+  i::Handle<i::String> string = factory->NewStringFromStaticChars("string");
   i::Handle<i::Object> box = factory->NewBox(string);
   global->Set(0, v8::ToApiHandle<v8::Object>(box));
 
diff --git a/test/cctest/test-heap.cc b/test/cctest/test-heap.cc
index bb54b0a..e526761 100644
--- a/test/cctest/test-heap.cc
+++ b/test/cctest/test-heap.cc
@@ -34,31 +34,12 @@
 #include "src/execution.h"
 #include "src/factory.h"
 #include "src/global-handles.h"
+#include "src/ic/ic.h"
 #include "src/macro-assembler.h"
-#include "src/stub-cache.h"
 #include "test/cctest/cctest.h"
 
 using namespace v8::internal;
 
-// Go through all incremental marking steps in one swoop.
-static void SimulateIncrementalMarking() {
-  MarkCompactCollector* collector = CcTest::heap()->mark_compact_collector();
-  IncrementalMarking* marking = CcTest::heap()->incremental_marking();
-  if (collector->IsConcurrentSweepingInProgress()) {
-    collector->WaitUntilSweepingCompleted();
-  }
-  CHECK(marking->IsMarking() || marking->IsStopped());
-  if (marking->IsStopped()) {
-    marking->Start();
-  }
-  CHECK(marking->IsMarking());
-  while (!marking->IsComplete()) {
-    marking->Step(MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD);
-  }
-  CHECK(marking->IsComplete());
-}
-
-
 static void CheckMap(Map* map, int type, int instance_size) {
   CHECK(map->IsHeapObject());
 #ifdef DEBUG
@@ -179,7 +160,8 @@
   CHECK(value->IsNumber());
   CHECK_EQ(Smi::kMaxValue, Handle<Smi>::cast(value)->value());
 
-#if !defined(V8_TARGET_ARCH_X64) && !defined(V8_TARGET_ARCH_ARM64)
+#if !defined(V8_TARGET_ARCH_X64) && !defined(V8_TARGET_ARCH_ARM64) && \
+    !defined(V8_TARGET_ARCH_MIPS64)
   // TODO(lrn): We need a NumberFromIntptr function in order to test this.
   value = factory->NewNumberFromInt(Smi::kMinValue - 1);
   CHECK(value->IsHeapNumber());
@@ -203,13 +185,15 @@
   CHECK(factory->nan_value()->IsNumber());
   CHECK(std::isnan(factory->nan_value()->Number()));
 
-  Handle<String> s = factory->NewStringFromStaticAscii("fisk hest ");
+  Handle<String> s = factory->NewStringFromStaticChars("fisk hest ");
   CHECK(s->IsString());
   CHECK_EQ(10, s->length());
 
   Handle<String> object_string = Handle<String>::cast(factory->Object_string());
   Handle<GlobalObject> global(CcTest::i_isolate()->context()->global_object());
-  CHECK(JSReceiver::HasOwnProperty(global, object_string));
+  v8::Maybe<bool> maybe = JSReceiver::HasOwnProperty(global, object_string);
+  CHECK(maybe.has_value);
+  CHECK(maybe.value);
 
   // Check ToString for oddballs
   CheckOddball(isolate, heap->true_value(), "true");
@@ -261,13 +245,11 @@
     HandleScope inner_scope(isolate);
     // Allocate a function and keep it in global object's property.
     Handle<JSFunction> function = factory->NewFunction(name);
-    JSReceiver::SetProperty(global, name, function, NONE, SLOPPY).Check();
+    JSReceiver::SetProperty(global, name, function, SLOPPY).Check();
     // Allocate an object.  Unrooted after leaving the scope.
     Handle<JSObject> obj = factory->NewJSObject(function);
-    JSReceiver::SetProperty(
-        obj, prop_name, twenty_three, NONE, SLOPPY).Check();
-    JSReceiver::SetProperty(
-        obj, prop_namex, twenty_four, NONE, SLOPPY).Check();
+    JSReceiver::SetProperty(obj, prop_name, twenty_three, SLOPPY).Check();
+    JSReceiver::SetProperty(obj, prop_namex, twenty_four, SLOPPY).Check();
 
     CHECK_EQ(Smi::FromInt(23),
              *Object::GetProperty(obj, prop_name).ToHandleChecked());
@@ -278,7 +260,9 @@
   heap->CollectGarbage(NEW_SPACE);
 
   // Function should be alive.
-  CHECK(JSReceiver::HasOwnProperty(global, name));
+  v8::Maybe<bool> maybe = JSReceiver::HasOwnProperty(global, name);
+  CHECK(maybe.has_value);
+  CHECK(maybe.value);
   // Check function is retained.
   Handle<Object> func_value =
       Object::GetProperty(global, name).ToHandleChecked();
@@ -289,15 +273,16 @@
     HandleScope inner_scope(isolate);
     // Allocate another object, make it reachable from global.
     Handle<JSObject> obj = factory->NewJSObject(function);
-    JSReceiver::SetProperty(global, obj_name, obj, NONE, SLOPPY).Check();
-    JSReceiver::SetProperty(
-        obj, prop_name, twenty_three, NONE, SLOPPY).Check();
+    JSReceiver::SetProperty(global, obj_name, obj, SLOPPY).Check();
+    JSReceiver::SetProperty(obj, prop_name, twenty_three, SLOPPY).Check();
   }
 
   // After gc, it should survive.
   heap->CollectGarbage(NEW_SPACE);
 
-  CHECK(JSReceiver::HasOwnProperty(global, obj_name));
+  maybe = JSReceiver::HasOwnProperty(global, obj_name);
+  CHECK(maybe.has_value);
+  CHECK(maybe.value);
   Handle<Object> obj =
       Object::GetProperty(global, obj_name).ToHandleChecked();
   CHECK(obj->IsJSObject());
@@ -356,7 +341,7 @@
   {
     HandleScope scope(isolate);
 
-    Handle<Object> i = factory->NewStringFromStaticAscii("fisk");
+    Handle<Object> i = factory->NewStringFromStaticChars("fisk");
     Handle<Object> u = factory->NewNumber(1.12344);
 
     h1 = global_handles->Create(*i);
@@ -411,7 +396,7 @@
   {
     HandleScope scope(isolate);
 
-    Handle<Object> i = factory->NewStringFromStaticAscii("fisk");
+    Handle<Object> i = factory->NewStringFromStaticChars("fisk");
     Handle<Object> u = factory->NewNumber(1.12344);
 
     h1 = global_handles->Create(*i);
@@ -453,7 +438,7 @@
   {
     HandleScope scope(isolate);
 
-    Handle<Object> i = factory->NewStringFromStaticAscii("fisk");
+    Handle<Object> i = factory->NewStringFromStaticChars("fisk");
     Handle<Object> u = factory->NewNumber(1.12344);
 
     h1 = global_handles->Create(*i);
@@ -499,7 +484,7 @@
   {
     HandleScope scope(isolate);
 
-    Handle<Object> i = factory->NewStringFromStaticAscii("fisk");
+    Handle<Object> i = factory->NewStringFromStaticChars("fisk");
     h = global_handles->Create(*i);
   }
 
@@ -626,12 +611,11 @@
 
   Handle<String> prop_name = factory->InternalizeUtf8String("theSlot");
   Handle<JSObject> obj = factory->NewJSObject(function);
-  JSReceiver::SetProperty(obj, prop_name, twenty_three, NONE, SLOPPY).Check();
+  JSReceiver::SetProperty(obj, prop_name, twenty_three, SLOPPY).Check();
   CHECK_EQ(Smi::FromInt(23),
            *Object::GetProperty(obj, prop_name).ToHandleChecked());
   // Check that we can add properties to function objects.
-  JSReceiver::SetProperty(
-      function, prop_name, twenty_four, NONE, SLOPPY).Check();
+  JSReceiver::SetProperty(function, prop_name, twenty_four, SLOPPY).Check();
   CHECK_EQ(Smi::FromInt(24),
            *Object::GetProperty(function, prop_name).ToHandleChecked());
 }
@@ -655,55 +639,85 @@
   Handle<Smi> two(Smi::FromInt(2), isolate);
 
   // check for empty
-  CHECK(!JSReceiver::HasOwnProperty(obj, first));
+  v8::Maybe<bool> maybe = JSReceiver::HasOwnProperty(obj, first);
+  CHECK(maybe.has_value);
+  CHECK(!maybe.value);
 
   // add first
-  JSReceiver::SetProperty(obj, first, one, NONE, SLOPPY).Check();
-  CHECK(JSReceiver::HasOwnProperty(obj, first));
+  JSReceiver::SetProperty(obj, first, one, SLOPPY).Check();
+  maybe = JSReceiver::HasOwnProperty(obj, first);
+  CHECK(maybe.has_value);
+  CHECK(maybe.value);
 
   // delete first
   JSReceiver::DeleteProperty(obj, first, JSReceiver::NORMAL_DELETION).Check();
-  CHECK(!JSReceiver::HasOwnProperty(obj, first));
+  maybe = JSReceiver::HasOwnProperty(obj, first);
+  CHECK(maybe.has_value);
+  CHECK(!maybe.value);
 
   // add first and then second
-  JSReceiver::SetProperty(obj, first, one, NONE, SLOPPY).Check();
-  JSReceiver::SetProperty(obj, second, two, NONE, SLOPPY).Check();
-  CHECK(JSReceiver::HasOwnProperty(obj, first));
-  CHECK(JSReceiver::HasOwnProperty(obj, second));
+  JSReceiver::SetProperty(obj, first, one, SLOPPY).Check();
+  JSReceiver::SetProperty(obj, second, two, SLOPPY).Check();
+  maybe = JSReceiver::HasOwnProperty(obj, first);
+  CHECK(maybe.has_value);
+  CHECK(maybe.value);
+  maybe = JSReceiver::HasOwnProperty(obj, second);
+  CHECK(maybe.has_value);
+  CHECK(maybe.value);
 
   // delete first and then second
   JSReceiver::DeleteProperty(obj, first, JSReceiver::NORMAL_DELETION).Check();
-  CHECK(JSReceiver::HasOwnProperty(obj, second));
+  maybe = JSReceiver::HasOwnProperty(obj, second);
+  CHECK(maybe.has_value);
+  CHECK(maybe.value);
   JSReceiver::DeleteProperty(obj, second, JSReceiver::NORMAL_DELETION).Check();
-  CHECK(!JSReceiver::HasOwnProperty(obj, first));
-  CHECK(!JSReceiver::HasOwnProperty(obj, second));
+  maybe = JSReceiver::HasOwnProperty(obj, first);
+  CHECK(maybe.has_value);
+  CHECK(!maybe.value);
+  maybe = JSReceiver::HasOwnProperty(obj, second);
+  CHECK(maybe.has_value);
+  CHECK(!maybe.value);
 
   // add first and then second
-  JSReceiver::SetProperty(obj, first, one, NONE, SLOPPY).Check();
-  JSReceiver::SetProperty(obj, second, two, NONE, SLOPPY).Check();
-  CHECK(JSReceiver::HasOwnProperty(obj, first));
-  CHECK(JSReceiver::HasOwnProperty(obj, second));
+  JSReceiver::SetProperty(obj, first, one, SLOPPY).Check();
+  JSReceiver::SetProperty(obj, second, two, SLOPPY).Check();
+  maybe = JSReceiver::HasOwnProperty(obj, first);
+  CHECK(maybe.has_value);
+  CHECK(maybe.value);
+  maybe = JSReceiver::HasOwnProperty(obj, second);
+  CHECK(maybe.has_value);
+  CHECK(maybe.value);
 
   // delete second and then first
   JSReceiver::DeleteProperty(obj, second, JSReceiver::NORMAL_DELETION).Check();
-  CHECK(JSReceiver::HasOwnProperty(obj, first));
+  maybe = JSReceiver::HasOwnProperty(obj, first);
+  CHECK(maybe.has_value);
+  CHECK(maybe.value);
   JSReceiver::DeleteProperty(obj, first, JSReceiver::NORMAL_DELETION).Check();
-  CHECK(!JSReceiver::HasOwnProperty(obj, first));
-  CHECK(!JSReceiver::HasOwnProperty(obj, second));
+  maybe = JSReceiver::HasOwnProperty(obj, first);
+  CHECK(maybe.has_value);
+  CHECK(!maybe.value);
+  maybe = JSReceiver::HasOwnProperty(obj, second);
+  CHECK(maybe.has_value);
+  CHECK(!maybe.value);
 
   // check string and internalized string match
   const char* string1 = "fisk";
   Handle<String> s1 = factory->NewStringFromAsciiChecked(string1);
-  JSReceiver::SetProperty(obj, s1, one, NONE, SLOPPY).Check();
+  JSReceiver::SetProperty(obj, s1, one, SLOPPY).Check();
   Handle<String> s1_string = factory->InternalizeUtf8String(string1);
-  CHECK(JSReceiver::HasOwnProperty(obj, s1_string));
+  maybe = JSReceiver::HasOwnProperty(obj, s1_string);
+  CHECK(maybe.has_value);
+  CHECK(maybe.value);
 
   // check internalized string and string match
   const char* string2 = "fugl";
   Handle<String> s2_string = factory->InternalizeUtf8String(string2);
-  JSReceiver::SetProperty(obj, s2_string, one, NONE, SLOPPY).Check();
+  JSReceiver::SetProperty(obj, s2_string, one, SLOPPY).Check();
   Handle<String> s2 = factory->NewStringFromAsciiChecked(string2);
-  CHECK(JSReceiver::HasOwnProperty(obj, s2));
+  maybe = JSReceiver::HasOwnProperty(obj, s2);
+  CHECK(maybe.has_value);
+  CHECK(maybe.value);
 }
 
 
@@ -722,7 +736,7 @@
 
   // Set a propery
   Handle<Smi> twenty_three(Smi::FromInt(23), isolate);
-  JSReceiver::SetProperty(obj, prop_name, twenty_three, NONE, SLOPPY).Check();
+  JSReceiver::SetProperty(obj, prop_name, twenty_three, SLOPPY).Check();
   CHECK_EQ(Smi::FromInt(23),
            *Object::GetProperty(obj, prop_name).ToHandleChecked());
 
@@ -800,8 +814,8 @@
   Handle<Smi> one(Smi::FromInt(1), isolate);
   Handle<Smi> two(Smi::FromInt(2), isolate);
 
-  JSReceiver::SetProperty(obj, first, one, NONE, SLOPPY).Check();
-  JSReceiver::SetProperty(obj, second, two, NONE, SLOPPY).Check();
+  JSReceiver::SetProperty(obj, first, one, SLOPPY).Check();
+  JSReceiver::SetProperty(obj, second, two, SLOPPY).Check();
 
   JSReceiver::SetElement(obj, 0, first, NONE, SLOPPY).Check();
   JSReceiver::SetElement(obj, 1, second, NONE, SLOPPY).Check();
@@ -826,8 +840,8 @@
   CHECK_EQ(*value1, *value2);
 
   // Flip the values.
-  JSReceiver::SetProperty(clone, first, two, NONE, SLOPPY).Check();
-  JSReceiver::SetProperty(clone, second, one, NONE, SLOPPY).Check();
+  JSReceiver::SetProperty(clone, first, two, SLOPPY).Check();
+  JSReceiver::SetProperty(clone, second, one, SLOPPY).Check();
 
   JSReceiver::SetElement(clone, 0, second, NONE, SLOPPY).Check();
   JSReceiver::SetElement(clone, 1, first, NONE, SLOPPY).Check();
@@ -856,33 +870,34 @@
   const unsigned char chars[] = { 0xe5, 0xa4, 0xa7 };
   for (int length = 0; length < 100; length++) {
     v8::HandleScope scope(CcTest::isolate());
-    char* non_ascii = NewArray<char>(3 * length + 1);
-    char* ascii = NewArray<char>(length + 1);
-    non_ascii[3 * length] = 0;
-    ascii[length] = 0;
+    char* non_one_byte = NewArray<char>(3 * length + 1);
+    char* one_byte = NewArray<char>(length + 1);
+    non_one_byte[3 * length] = 0;
+    one_byte[length] = 0;
     for (int i = 0; i < length; i++) {
-      ascii[i] = 'a';
-      non_ascii[3 * i] = chars[0];
-      non_ascii[3 * i + 1] = chars[1];
-      non_ascii[3 * i + 2] = chars[2];
+      one_byte[i] = 'a';
+      non_one_byte[3 * i] = chars[0];
+      non_one_byte[3 * i + 1] = chars[1];
+      non_one_byte[3 * i + 2] = chars[2];
     }
-    Handle<String> non_ascii_sym =
-        factory->InternalizeUtf8String(
-            Vector<const char>(non_ascii, 3 * length));
-    CHECK_EQ(length, non_ascii_sym->length());
-    Handle<String> ascii_sym =
-        factory->InternalizeOneByteString(OneByteVector(ascii, length));
-    CHECK_EQ(length, ascii_sym->length());
-    Handle<String> non_ascii_str = factory->NewStringFromUtf8(
-        Vector<const char>(non_ascii, 3 * length)).ToHandleChecked();
-    non_ascii_str->Hash();
-    CHECK_EQ(length, non_ascii_str->length());
-    Handle<String> ascii_str = factory->NewStringFromUtf8(
-        Vector<const char>(ascii, length)).ToHandleChecked();
-    ascii_str->Hash();
-    CHECK_EQ(length, ascii_str->length());
-    DeleteArray(non_ascii);
-    DeleteArray(ascii);
+    Handle<String> non_one_byte_sym = factory->InternalizeUtf8String(
+        Vector<const char>(non_one_byte, 3 * length));
+    CHECK_EQ(length, non_one_byte_sym->length());
+    Handle<String> one_byte_sym =
+        factory->InternalizeOneByteString(OneByteVector(one_byte, length));
+    CHECK_EQ(length, one_byte_sym->length());
+    Handle<String> non_one_byte_str =
+        factory->NewStringFromUtf8(Vector<const char>(non_one_byte, 3 * length))
+            .ToHandleChecked();
+    non_one_byte_str->Hash();
+    CHECK_EQ(length, non_one_byte_str->length());
+    Handle<String> one_byte_str =
+        factory->NewStringFromUtf8(Vector<const char>(one_byte, length))
+            .ToHandleChecked();
+    one_byte_str->Hash();
+    CHECK_EQ(length, one_byte_str->length());
+    DeleteArray(non_one_byte);
+    DeleteArray(one_byte);
   }
 }
 
@@ -920,10 +935,9 @@
                                                 TENURED);
 
   // Allocate a small string to OLD_DATA_SPACE and NEW_SPACE
+  objs[next_objs_index++] = factory->NewStringFromStaticChars("abcdefghij");
   objs[next_objs_index++] =
-      factory->NewStringFromStaticAscii("abcdefghij");
-  objs[next_objs_index++] =
-      factory->NewStringFromStaticAscii("abcdefghij", TENURED);
+      factory->NewStringFromStaticChars("abcdefghij", TENURED);
 
   // Allocate a large string (for large object space).
   int large_size = Page::kMaxRegularHeapObjectSize + 1;
@@ -978,11 +992,8 @@
   // that region dirty marks are updated correctly.
 
   // Step 1: prepare a map for the object.  We add 1 inobject property to it.
-  Handle<JSFunction> object_ctor(
-      CcTest::i_isolate()->native_context()->object_function());
-  CHECK(object_ctor->has_initial_map());
   // Create a map with single inobject property.
-  Handle<Map> my_map = Map::Create(object_ctor, 1);
+  Handle<Map> my_map = Map::Create(CcTest::i_isolate(), 1);
   int n_properties = my_map->inobject_properties();
   CHECK_GT(n_properties, 0);
 
@@ -1038,53 +1049,61 @@
 }
 
 
-TEST(TestCodeFlushing) {
+UNINITIALIZED_TEST(TestCodeFlushing) {
   // If we do not flush code this test is invalid.
   if (!FLAG_flush_code) return;
   i::FLAG_allow_natives_syntax = true;
   i::FLAG_optimize_for_size = false;
-  CcTest::InitializeVM();
-  Isolate* isolate = CcTest::i_isolate();
-  Factory* factory = isolate->factory();
-  v8::HandleScope scope(CcTest::isolate());
-  const char* source = "function foo() {"
-                       "  var x = 42;"
-                       "  var y = 42;"
-                       "  var z = x + y;"
-                       "};"
-                       "foo()";
-  Handle<String> foo_name = factory->InternalizeUtf8String("foo");
+  v8::Isolate* isolate = v8::Isolate::New();
+  i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+  isolate->Enter();
+  Factory* factory = i_isolate->factory();
+  {
+    v8::HandleScope scope(isolate);
+    v8::Context::New(isolate)->Enter();
+    const char* source =
+        "function foo() {"
+        "  var x = 42;"
+        "  var y = 42;"
+        "  var z = x + y;"
+        "};"
+        "foo()";
+    Handle<String> foo_name = factory->InternalizeUtf8String("foo");
 
-  // This compile will add the code to the compilation cache.
-  { v8::HandleScope scope(CcTest::isolate());
-    CompileRun(source);
+    // This compile will add the code to the compilation cache.
+    {
+      v8::HandleScope scope(isolate);
+      CompileRun(source);
+    }
+
+    // Check function is compiled.
+    Handle<Object> func_value = Object::GetProperty(i_isolate->global_object(),
+                                                    foo_name).ToHandleChecked();
+    CHECK(func_value->IsJSFunction());
+    Handle<JSFunction> function = Handle<JSFunction>::cast(func_value);
+    CHECK(function->shared()->is_compiled());
+
+    // The code will survive at least two GCs.
+    i_isolate->heap()->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+    i_isolate->heap()->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+    CHECK(function->shared()->is_compiled());
+
+    // Simulate several GCs that use full marking.
+    const int kAgingThreshold = 6;
+    for (int i = 0; i < kAgingThreshold; i++) {
+      i_isolate->heap()->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+    }
+
+    // foo should no longer be in the compilation cache
+    CHECK(!function->shared()->is_compiled() || function->IsOptimized());
+    CHECK(!function->is_compiled() || function->IsOptimized());
+    // Call foo to get it recompiled.
+    CompileRun("foo()");
+    CHECK(function->shared()->is_compiled());
+    CHECK(function->is_compiled());
   }
-
-  // Check function is compiled.
-  Handle<Object> func_value = Object::GetProperty(
-      CcTest::i_isolate()->global_object(), foo_name).ToHandleChecked();
-  CHECK(func_value->IsJSFunction());
-  Handle<JSFunction> function = Handle<JSFunction>::cast(func_value);
-  CHECK(function->shared()->is_compiled());
-
-  // The code will survive at least two GCs.
-  CcTest::heap()->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
-  CcTest::heap()->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
-  CHECK(function->shared()->is_compiled());
-
-  // Simulate several GCs that use full marking.
-  const int kAgingThreshold = 6;
-  for (int i = 0; i < kAgingThreshold; i++) {
-    CcTest::heap()->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
-  }
-
-  // foo should no longer be in the compilation cache
-  CHECK(!function->shared()->is_compiled() || function->IsOptimized());
-  CHECK(!function->is_compiled() || function->IsOptimized());
-  // Call foo to get it recompiled.
-  CompileRun("foo()");
-  CHECK(function->shared()->is_compiled());
-  CHECK(function->is_compiled());
+  isolate->Exit();
+  isolate->Dispose();
 }
 
 
@@ -1190,7 +1209,7 @@
   // Simulate several GCs that use incremental marking.
   const int kAgingThreshold = 6;
   for (int i = 0; i < kAgingThreshold; i++) {
-    SimulateIncrementalMarking();
+    SimulateIncrementalMarking(CcTest::heap());
     CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
   }
   CHECK(!function->shared()->is_compiled() || function->IsOptimized());
@@ -1204,7 +1223,7 @@
   // Simulate several GCs that use incremental marking but make sure
   // the loop breaks once the function is enqueued as a candidate.
   for (int i = 0; i < kAgingThreshold; i++) {
-    SimulateIncrementalMarking();
+    SimulateIncrementalMarking(CcTest::heap());
     if (!function->next_function_link()->IsUndefined()) break;
     CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
   }
@@ -1280,7 +1299,7 @@
   // Simulate incremental marking so that the functions are enqueued as
   // code flushing candidates. Then kill one of the functions. Finally
   // perform a scavenge while incremental marking is still running.
-  SimulateIncrementalMarking();
+  SimulateIncrementalMarking(CcTest::heap());
   *function2.location() = NULL;
   CcTest::heap()->CollectGarbage(NEW_SPACE, "test scavenge while marking");
 
@@ -1334,7 +1353,7 @@
 
   // Simulate incremental marking so that the function is enqueued as
   // code flushing candidate.
-  SimulateIncrementalMarking();
+  SimulateIncrementalMarking(heap);
 
   // Enable the debugger and add a breakpoint while incremental marking
   // is running so that incremental marking aborts and code flushing is
@@ -1594,8 +1613,8 @@
   CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
   CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
   MarkCompactCollector* collector = CcTest::heap()->mark_compact_collector();
-  if (collector->IsConcurrentSweepingInProgress()) {
-    collector->WaitUntilSweepingCompleted();
+  if (collector->sweeping_in_progress()) {
+    collector->EnsureSweepingCompleted();
   }
   int initial_size = static_cast<int>(CcTest::heap()->SizeOfObjects());
 
@@ -1621,8 +1640,8 @@
   CHECK_EQ(initial_size, static_cast<int>(CcTest::heap()->SizeOfObjects()));
 
   // Waiting for sweeper threads should not change heap size.
-  if (collector->IsConcurrentSweepingInProgress()) {
-    collector->WaitUntilSweepingCompleted();
+  if (collector->sweeping_in_progress()) {
+    collector->EnsureSweepingCompleted();
   }
   CHECK_EQ(initial_size, static_cast<int>(CcTest::heap()->SizeOfObjects()));
 }
@@ -1670,7 +1689,7 @@
   Factory* factory = isolate->factory();
   HandleScope scope(isolate);
   AlwaysAllocateScope always_allocate(isolate);
-  intptr_t available = new_space->EffectiveCapacity() - new_space->Size();
+  intptr_t available = new_space->Capacity() - new_space->Size();
   intptr_t number_of_fillers = (available / FixedArray::SizeFor(32)) - 1;
   for (intptr_t i = 0; i < number_of_fillers; i++) {
     CHECK(heap->InNewSpace(*factory->NewFixedArray(32, NOT_TENURED)));
@@ -1693,20 +1712,20 @@
 
   // Explicitly growing should double the space capacity.
   intptr_t old_capacity, new_capacity;
-  old_capacity = new_space->Capacity();
+  old_capacity = new_space->TotalCapacity();
   new_space->Grow();
-  new_capacity = new_space->Capacity();
+  new_capacity = new_space->TotalCapacity();
   CHECK(2 * old_capacity == new_capacity);
 
-  old_capacity = new_space->Capacity();
+  old_capacity = new_space->TotalCapacity();
   FillUpNewSpace(new_space);
-  new_capacity = new_space->Capacity();
+  new_capacity = new_space->TotalCapacity();
   CHECK(old_capacity == new_capacity);
 
   // Explicitly shrinking should not affect space capacity.
-  old_capacity = new_space->Capacity();
+  old_capacity = new_space->TotalCapacity();
   new_space->Shrink();
-  new_capacity = new_space->Capacity();
+  new_capacity = new_space->TotalCapacity();
   CHECK(old_capacity == new_capacity);
 
   // Let the scavenger empty the new space.
@@ -1714,17 +1733,17 @@
   CHECK_LE(new_space->Size(), old_capacity);
 
   // Explicitly shrinking should halve the space capacity.
-  old_capacity = new_space->Capacity();
+  old_capacity = new_space->TotalCapacity();
   new_space->Shrink();
-  new_capacity = new_space->Capacity();
+  new_capacity = new_space->TotalCapacity();
   CHECK(old_capacity == 2 * new_capacity);
 
   // Consecutive shrinking should not affect space capacity.
-  old_capacity = new_space->Capacity();
+  old_capacity = new_space->TotalCapacity();
   new_space->Shrink();
   new_space->Shrink();
   new_space->Shrink();
-  new_capacity = new_space->Capacity();
+  new_capacity = new_space->TotalCapacity();
   CHECK(old_capacity == new_capacity);
 }
 
@@ -1743,13 +1762,13 @@
   v8::HandleScope scope(CcTest::isolate());
   NewSpace* new_space = heap->new_space();
   intptr_t old_capacity, new_capacity;
-  old_capacity = new_space->Capacity();
+  old_capacity = new_space->TotalCapacity();
   new_space->Grow();
-  new_capacity = new_space->Capacity();
+  new_capacity = new_space->TotalCapacity();
   CHECK(2 * old_capacity == new_capacity);
   FillUpNewSpace(new_space);
   heap->CollectAllAvailableGarbage();
-  new_capacity = new_space->Capacity();
+  new_capacity = new_space->TotalCapacity();
   CHECK(old_capacity == new_capacity);
 }
 
@@ -1800,7 +1819,7 @@
     ctx2->Exit();
     v8::Local<v8::Context>::New(isolate, ctx1)->Exit();
     ctx1p.Reset();
-    v8::V8::ContextDisposedNotification();
+    isolate->ContextDisposedNotification();
   }
   CcTest::heap()->CollectAllAvailableGarbage();
   CHECK_EQ(2, NumberOfGlobalObjects());
@@ -1846,7 +1865,7 @@
     ctx2->Exit();
     ctx1->Exit();
     ctx1p.Reset();
-    v8::V8::ContextDisposedNotification();
+    isolate->ContextDisposedNotification();
   }
   CcTest::heap()->CollectAllAvailableGarbage();
   CHECK_EQ(2, NumberOfGlobalObjects());
@@ -1890,7 +1909,7 @@
     ctx2->Exit();
     ctx1->Exit();
     ctx1p.Reset();
-    v8::V8::ContextDisposedNotification();
+    isolate->ContextDisposedNotification();
   }
   CcTest::heap()->CollectAllAvailableGarbage();
   CHECK_EQ(2, NumberOfGlobalObjects());
@@ -1938,7 +1957,7 @@
     ctx2->Exit();
     ctx1->Exit();
     ctx1p.Reset();
-    v8::V8::ContextDisposedNotification();
+    isolate->ContextDisposedNotification();
   }
   CcTest::heap()->CollectAllAvailableGarbage();
   CHECK_EQ(2, NumberOfGlobalObjects());
@@ -2099,8 +2118,8 @@
 
   // The following two calls will increment CcTest::heap()->global_ic_age().
   const int kLongIdlePauseInMs = 1000;
-  v8::V8::ContextDisposedNotification();
-  v8::V8::IdleNotification(kLongIdlePauseInMs);
+  CcTest::isolate()->ContextDisposedNotification();
+  CcTest::isolate()->IdleNotification(kLongIdlePauseInMs);
 
   while (!marking->IsStopped() && !marking->IsComplete()) {
     marking->Step(1 * MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD);
@@ -2155,8 +2174,8 @@
   // The following two calls will increment CcTest::heap()->global_ic_age().
   // Since incremental marking is off, IdleNotification will do full GC.
   const int kLongIdlePauseInMs = 1000;
-  v8::V8::ContextDisposedNotification();
-  v8::V8::IdleNotification(kLongIdlePauseInMs);
+  CcTest::isolate()->ContextDisposedNotification();
+  CcTest::isolate()->IdleNotification(kLongIdlePauseInMs);
 
   CHECK_EQ(CcTest::heap()->global_ic_age(), f->shared()->ic_age());
   CHECK_EQ(0, f->shared()->opt_count());
@@ -2725,7 +2744,7 @@
   CompileRun("%DebugPrint(root);");
   CHECK_EQ(transitions_count, transitions_before);
 
-  SimulateIncrementalMarking();
+  SimulateIncrementalMarking(CcTest::heap());
   CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
 
   // Count number of live transitions after marking.  Note that one transition
@@ -2763,8 +2782,7 @@
   i::FLAG_gc_interval = gc_count;
   i::FLAG_gc_global = true;
   CcTest::heap()->set_allocation_timeout(gc_count);
-  JSReceiver::SetProperty(
-      object, prop_name, twenty_three, NONE, SLOPPY).Check();
+  JSReceiver::SetProperty(object, prop_name, twenty_three, SLOPPY).Check();
 }
 
 
@@ -2840,6 +2858,7 @@
 
   root = GetByName("root");
   AddPropertyTo(0, root, "prop9");
+  CcTest::i_isolate()->heap()->CollectGarbage(OLD_POINTER_SPACE);
 
   // Count number of live transitions after marking.  Note that one transition
   // is left, because 'o' still holds an instance of one transition target.
@@ -2867,7 +2886,7 @@
   CompileRun("o = new F;"
              "root = new F");
   root = GetByName("root");
-  ASSERT(root->map()->transitions()->IsSimpleTransition());
+  DCHECK(root->map()->transitions()->IsSimpleTransition());
   AddPropertyTo(2, root, "happy");
 
   // Count number of live transitions after marking.  Note that one transition
@@ -2891,7 +2910,7 @@
              "root.foo = 0;"
              "root = new Object;");
 
-  SimulateIncrementalMarking();
+  SimulateIncrementalMarking(CcTest::heap());
 
   // Compile a StoreIC that performs the prepared map transition. This
   // will restart incremental marking and should make sure the root is
@@ -2932,7 +2951,7 @@
              "root.foo = 0;"
              "root = new Object;");
 
-  SimulateIncrementalMarking();
+  SimulateIncrementalMarking(CcTest::heap());
 
   // Compile an optimized LStoreNamedField that performs the prepared
   // map transition. This will restart incremental marking and should
@@ -2988,7 +3007,8 @@
 
   // Triggering one GC will cause a lot of garbage to be discovered but
   // even spread across all allocated pages.
-  heap->CollectAllGarbage(Heap::kNoGCFlags, "triggered for preparation");
+  heap->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask,
+                          "triggered for preparation");
   CHECK_GE(number_of_test_pages + 1, old_pointer_space->CountTotalPages());
 
   // Triggering subsequent GCs should cause at least half of the pages
@@ -3054,8 +3074,9 @@
           *v8::Handle<v8::Function>::Cast(
               CcTest::global()->Get(v8_str("g"))));
 
-  DisallowHeapAllocation no_allocation;
-  g->shared()->PrintLn();
+  OFStream os(stdout);
+  g->shared()->Print(os);
+  os << endl;
 }
 #endif  // OBJECT_PRINT
 
@@ -3124,20 +3145,24 @@
           *v8::Handle<v8::Function>::Cast(
               CcTest::global()->Get(v8_str("f"))));
 
-  Handle<FixedArray> feedback_vector(f->shared()->feedback_vector());
+  Handle<TypeFeedbackVector> feedback_vector(f->shared()->feedback_vector());
 
-  CHECK_EQ(2, feedback_vector->length());
-  CHECK(feedback_vector->get(0)->IsJSFunction());
-  CHECK(feedback_vector->get(1)->IsJSFunction());
+  int expected_length = FLAG_vector_ics ? 4 : 2;
+  CHECK_EQ(expected_length, feedback_vector->length());
+  for (int i = 0; i < expected_length; i++) {
+    if ((i % 2) == 1) {
+      CHECK(feedback_vector->get(i)->IsJSFunction());
+    }
+  }
 
-  SimulateIncrementalMarking();
+  SimulateIncrementalMarking(CcTest::heap());
   CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
 
-  CHECK_EQ(2, feedback_vector->length());
-  CHECK_EQ(feedback_vector->get(0),
-           *TypeFeedbackInfo::UninitializedSentinel(CcTest::i_isolate()));
-  CHECK_EQ(feedback_vector->get(1),
-           *TypeFeedbackInfo::UninitializedSentinel(CcTest::i_isolate()));
+  CHECK_EQ(expected_length, feedback_vector->length());
+  for (int i = 0; i < expected_length; i++) {
+    CHECK_EQ(feedback_vector->get(i),
+             *TypeFeedbackVector::UninitializedSentinel(CcTest::i_isolate()));
+  }
 }
 
 
@@ -3173,7 +3198,7 @@
   Code* ic_before = FindFirstIC(f->shared()->code(), Code::LOAD_IC);
   CHECK(ic_before->ic_state() == MONOMORPHIC);
 
-  SimulateIncrementalMarking();
+  SimulateIncrementalMarking(CcTest::heap());
   CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
 
   Code* ic_after = FindFirstIC(f->shared()->code(), Code::LOAD_IC);
@@ -3206,8 +3231,8 @@
   CHECK(ic_before->ic_state() == MONOMORPHIC);
 
   // Fire context dispose notification.
-  v8::V8::ContextDisposedNotification();
-  SimulateIncrementalMarking();
+  CcTest::isolate()->ContextDisposedNotification();
+  SimulateIncrementalMarking(CcTest::heap());
   CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
 
   Code* ic_after = FindFirstIC(f->shared()->code(), Code::LOAD_IC);
@@ -3247,8 +3272,8 @@
   CHECK(ic_before->ic_state() == POLYMORPHIC);
 
   // Fire context dispose notification.
-  v8::V8::ContextDisposedNotification();
-  SimulateIncrementalMarking();
+  CcTest::isolate()->ContextDisposedNotification();
+  SimulateIncrementalMarking(CcTest::heap());
   CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
 
   Code* ic_after = FindFirstIC(f->shared()->code(), Code::LOAD_IC);
@@ -3256,7 +3281,7 @@
 }
 
 
-class SourceResource: public v8::String::ExternalAsciiStringResource {
+class SourceResource : public v8::String::ExternalOneByteStringResource {
  public:
   explicit SourceResource(const char* data)
     : data_(data), length_(strlen(data)) { }
@@ -3278,26 +3303,28 @@
 };
 
 
-void ReleaseStackTraceDataTest(const char* source, const char* accessor) {
+void ReleaseStackTraceDataTest(v8::Isolate* isolate, const char* source,
+                               const char* accessor) {
   // Test that the data retained by the Error.stack accessor is released
   // after the first time the accessor is fired.  We use external string
   // to check whether the data is being released since the external string
   // resource's callback is fired when the external string is GC'ed.
-  v8::HandleScope scope(CcTest::isolate());
+  i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+  v8::HandleScope scope(isolate);
   SourceResource* resource = new SourceResource(i::StrDup(source));
   {
-    v8::HandleScope scope(CcTest::isolate());
+    v8::HandleScope scope(isolate);
     v8::Handle<v8::String> source_string =
-        v8::String::NewExternal(CcTest::isolate(), resource);
-    CcTest::heap()->CollectAllAvailableGarbage();
+        v8::String::NewExternal(isolate, resource);
+    i_isolate->heap()->CollectAllAvailableGarbage();
     v8::Script::Compile(source_string)->Run();
     CHECK(!resource->IsDisposed());
   }
-  // CcTest::heap()->CollectAllAvailableGarbage();
+  // i_isolate->heap()->CollectAllAvailableGarbage();
   CHECK(!resource->IsDisposed());
 
   CompileRun(accessor);
-  CcTest::heap()->CollectAllAvailableGarbage();
+  i_isolate->heap()->CollectAllAvailableGarbage();
 
   // External source has been released.
   CHECK(resource->IsDisposed());
@@ -3305,7 +3332,7 @@
 }
 
 
-TEST(ReleaseStackTraceData) {
+UNINITIALIZED_TEST(ReleaseStackTraceData) {
   if (i::FLAG_always_opt) {
     // TODO(ulan): Remove this once the memory leak via code_next_link is fixed.
     // See: https://codereview.chromium.org/181833004/
@@ -3313,46 +3340,52 @@
   }
   FLAG_use_ic = false;  // ICs retain objects.
   FLAG_concurrent_recompilation = false;
-  CcTest::InitializeVM();
-  static const char* source1 = "var error = null;            "
-  /* Normal Error */           "try {                        "
-                               "  throw new Error();         "
-                               "} catch (e) {                "
-                               "  error = e;                 "
-                               "}                            ";
-  static const char* source2 = "var error = null;            "
-  /* Stack overflow */         "try {                        "
-                               "  (function f() { f(); })(); "
-                               "} catch (e) {                "
-                               "  error = e;                 "
-                               "}                            ";
-  static const char* source3 = "var error = null;            "
-  /* Normal Error */           "try {                        "
-  /* as prototype */           "  throw new Error();         "
-                               "} catch (e) {                "
-                               "  error = {};                "
-                               "  error.__proto__ = e;       "
-                               "}                            ";
-  static const char* source4 = "var error = null;            "
-  /* Stack overflow */         "try {                        "
-  /* as prototype   */         "  (function f() { f(); })(); "
-                               "} catch (e) {                "
-                               "  error = {};                "
-                               "  error.__proto__ = e;       "
-                               "}                            ";
-  static const char* getter = "error.stack";
-  static const char* setter = "error.stack = 0";
+  v8::Isolate* isolate = v8::Isolate::New();
+  {
+    v8::Isolate::Scope isolate_scope(isolate);
+    v8::HandleScope handle_scope(isolate);
+    v8::Context::New(isolate)->Enter();
+    static const char* source1 = "var error = null;            "
+    /* Normal Error */           "try {                        "
+                                 "  throw new Error();         "
+                                 "} catch (e) {                "
+                                 "  error = e;                 "
+                                 "}                            ";
+    static const char* source2 = "var error = null;            "
+    /* Stack overflow */         "try {                        "
+                                 "  (function f() { f(); })(); "
+                                 "} catch (e) {                "
+                                 "  error = e;                 "
+                                 "}                            ";
+    static const char* source3 = "var error = null;            "
+    /* Normal Error */           "try {                        "
+    /* as prototype */           "  throw new Error();         "
+                                 "} catch (e) {                "
+                                 "  error = {};                "
+                                 "  error.__proto__ = e;       "
+                                 "}                            ";
+    static const char* source4 = "var error = null;            "
+    /* Stack overflow */         "try {                        "
+    /* as prototype   */         "  (function f() { f(); })(); "
+                                 "} catch (e) {                "
+                                 "  error = {};                "
+                                 "  error.__proto__ = e;       "
+                                 "}                            ";
+    static const char* getter = "error.stack";
+    static const char* setter = "error.stack = 0";
 
-  ReleaseStackTraceDataTest(source1, setter);
-  ReleaseStackTraceDataTest(source2, setter);
-  // We do not test source3 and source4 with setter, since the setter is
-  // supposed to (untypically) write to the receiver, not the holder.  This is
-  // to emulate the behavior of a data property.
+    ReleaseStackTraceDataTest(isolate, source1, setter);
+    ReleaseStackTraceDataTest(isolate, source2, setter);
+    // We do not test source3 and source4 with setter, since the setter is
+    // supposed to (untypically) write to the receiver, not the holder.  This is
+    // to emulate the behavior of a data property.
 
-  ReleaseStackTraceDataTest(source1, getter);
-  ReleaseStackTraceDataTest(source2, getter);
-  ReleaseStackTraceDataTest(source3, getter);
-  ReleaseStackTraceDataTest(source4, getter);
+    ReleaseStackTraceDataTest(isolate, source1, getter);
+    ReleaseStackTraceDataTest(isolate, source2, getter);
+    ReleaseStackTraceDataTest(isolate, source3, getter);
+    ReleaseStackTraceDataTest(isolate, source4, getter);
+  }
+  isolate->Dispose();
 }
 
 
@@ -3409,7 +3442,7 @@
   // Simulate incremental marking so that the functions are enqueued as
   // code flushing candidates. Then optimize one function. Finally
   // finish the GC to complete code flushing.
-  SimulateIncrementalMarking();
+  SimulateIncrementalMarking(heap);
   CompileRun("%OptimizeFunctionOnNextCall(g); g(3);");
   heap->CollectAllGarbage(Heap::kNoGCFlags);
 
@@ -3456,7 +3489,7 @@
 
   // Simulate incremental marking so that unoptimized code is flushed
   // even though it still is cached in the optimized code map.
-  SimulateIncrementalMarking();
+  SimulateIncrementalMarking(heap);
   heap->CollectAllGarbage(Heap::kNoGCFlags);
 
   // Make a new closure that will get code installed from the code map.
@@ -3524,7 +3557,7 @@
   }
 
   // Simulate incremental marking and collect code flushing candidates.
-  SimulateIncrementalMarking();
+  SimulateIncrementalMarking(heap);
   CHECK(shared1->code()->gc_metadata() != NULL);
 
   // Optimize function and make sure the unoptimized code is replaced.
@@ -3670,7 +3703,7 @@
   // Simulate incremental marking so that unoptimized function is enqueued as a
   // candidate for code flushing. The shared function info however will not be
   // explicitly enqueued.
-  SimulateIncrementalMarking();
+  SimulateIncrementalMarking(heap);
 
   // Now optimize the function so that it is taken off the candidate list.
   {
@@ -3727,7 +3760,7 @@
   // Simulate incremental marking so that unoptimized function is enqueued as a
   // candidate for code flushing. The shared function info however will not be
   // explicitly enqueued.
-  SimulateIncrementalMarking();
+  SimulateIncrementalMarking(heap);
 
   // Now enable the debugger which in turn will disable code flushing.
   CHECK(isolate->debug()->Load());
@@ -3756,7 +3789,7 @@
   }
   // An entire block of handles has been filled.
   // Next handle would require a new block.
-  ASSERT(data->next == data->limit);
+  DCHECK(data->next == data->limit);
 
   DeferredHandleScope deferred(isolate);
   DummyVisitor visitor;
@@ -3777,7 +3810,7 @@
   if (marking->IsStopped()) marking->Start();
   // This big step should be sufficient to mark the whole array.
   marking->Step(100 * MB, IncrementalMarking::NO_GC_VIA_STACK_GUARD);
-  ASSERT(marking->IsComplete());
+  DCHECK(marking->IsComplete());
 }
 
 
@@ -3917,7 +3950,7 @@
     heap->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
   }
 
-  ASSERT(code->marked_for_deoptimization());
+  DCHECK(code->marked_for_deoptimization());
 }
 
 
@@ -3958,7 +3991,7 @@
     heap->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
   }
 
-  ASSERT(code->marked_for_deoptimization());
+  DCHECK(code->marked_for_deoptimization());
 }
 
 
@@ -3975,7 +4008,7 @@
   if (!isolate->use_crankshaft()) return;
   HandleScope outer_scope(heap->isolate());
   for (int i = 0; i < 3; i++) {
-    SimulateIncrementalMarking();
+    SimulateIncrementalMarking(heap);
     {
       LocalContext context;
       HandleScope scope(heap->isolate());
@@ -4031,6 +4064,7 @@
 
 TEST(NextCodeLinkIsWeak) {
   i::FLAG_allow_natives_syntax = true;
+  i::FLAG_turbo_deoptimization = true;
   CcTest::InitializeVM();
   Isolate* isolate = CcTest::i_isolate();
   v8::internal::Heap* heap = CcTest::heap();
@@ -4290,7 +4324,7 @@
   global->Set(v8::String::NewFromUtf8(isolate, "interrupt"),
               v8::FunctionTemplate::New(isolate, RequestInterrupt));
   v8::Local<v8::Context> context = v8::Context::New(isolate, NULL, global);
-  ASSERT(!context.IsEmpty());
+  DCHECK(!context.IsEmpty());
   v8::Context::Scope cscope(context);
 
   v8::Local<v8::Value> result = CompileRun(
@@ -4315,6 +4349,7 @@
       "var tmp = new Array(100000);"
       "array[0] = 10;"
       "gc();"
+      "gc();"
       "array.shift();"
       "array;");
 
@@ -4323,11 +4358,150 @@
   CHECK(heap->InOldPointerSpace(o->elements()));
   CHECK(heap->InOldPointerSpace(*o));
   Page* page = Page::FromAddress(o->elements()->address());
-  CHECK(page->WasSwept() ||
+  CHECK(page->parallel_sweeping() <= MemoryChunk::SWEEPING_FINALIZE ||
         Marking::IsBlack(Marking::MarkBitFrom(o->elements())));
 }
 
 
+UNINITIALIZED_TEST(PromotionQueue) {
+  i::FLAG_expose_gc = true;
+  i::FLAG_max_semi_space_size = 2;
+  v8::Isolate* isolate = v8::Isolate::New();
+  i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+  {
+    v8::Isolate::Scope isolate_scope(isolate);
+    v8::HandleScope handle_scope(isolate);
+    v8::Context::New(isolate)->Enter();
+    Heap* heap = i_isolate->heap();
+    NewSpace* new_space = heap->new_space();
+
+    // In this test we will try to overwrite the promotion queue which is at the
+    // end of to-space. To actually make that possible, we need at least two
+    // semi-space pages and take advantage of fragmentation.
+    // (1) Grow semi-space to two pages.
+    // (2) Create a few small long living objects and call the scavenger to
+    // move them to the other semi-space.
+    // (3) Create a huge object, i.e., remainder of first semi-space page and
+    // create another huge object which should be of maximum allocatable memory
+    // size of the second semi-space page.
+    // (4) Call the scavenger again.
+    // What will happen is: the scavenger will promote the objects created in
+    // (2) and will create promotion queue entries at the end of the second
+    // semi-space page during the next scavenge when it promotes the objects to
+    // the old generation. The first allocation of (3) will fill up the first
+    // semi-space page. The second allocation in (3) will not fit into the
+    // first semi-space page, but it will overwrite the promotion queue which
+    // are in the second semi-space page. If the right guards are in place, the
+    // promotion queue will be evacuated in that case.
+
+    // Grow the semi-space to two pages to make semi-space copy overwrite the
+    // promotion queue, which will be at the end of the second page.
+    intptr_t old_capacity = new_space->TotalCapacity();
+
+    // If we are in a low memory config, we can't grow to two pages and we can't
+    // run this test. This also means the issue we are testing cannot arise, as
+    // there is no fragmentation.
+    if (new_space->IsAtMaximumCapacity()) return;
+
+    new_space->Grow();
+    CHECK(new_space->IsAtMaximumCapacity());
+    CHECK(2 * old_capacity == new_space->TotalCapacity());
+
+    // Call the scavenger two times to get an empty new space
+    heap->CollectGarbage(NEW_SPACE);
+    heap->CollectGarbage(NEW_SPACE);
+
+    // First create a few objects which will survive a scavenge, and will get
+    // promoted to the old generation later on. These objects will create
+    // promotion queue entries at the end of the second semi-space page.
+    const int number_handles = 12;
+    Handle<FixedArray> handles[number_handles];
+    for (int i = 0; i < number_handles; i++) {
+      handles[i] = i_isolate->factory()->NewFixedArray(1, NOT_TENURED);
+    }
+    heap->CollectGarbage(NEW_SPACE);
+
+    // Create the first huge object which will exactly fit the first semi-space
+    // page.
+    int new_linear_size =
+        static_cast<int>(*heap->new_space()->allocation_limit_address() -
+                         *heap->new_space()->allocation_top_address());
+    int length = new_linear_size / kPointerSize - FixedArray::kHeaderSize;
+    Handle<FixedArray> first =
+        i_isolate->factory()->NewFixedArray(length, NOT_TENURED);
+    CHECK(heap->InNewSpace(*first));
+
+    // Create the second huge object of maximum allocatable second semi-space
+    // page size.
+    new_linear_size =
+        static_cast<int>(*heap->new_space()->allocation_limit_address() -
+                         *heap->new_space()->allocation_top_address());
+    length = Page::kMaxRegularHeapObjectSize / kPointerSize -
+             FixedArray::kHeaderSize;
+    Handle<FixedArray> second =
+        i_isolate->factory()->NewFixedArray(length, NOT_TENURED);
+    CHECK(heap->InNewSpace(*second));
+
+    // This scavenge will corrupt memory if the promotion queue is not
+    // evacuated.
+    heap->CollectGarbage(NEW_SPACE);
+  }
+  isolate->Dispose();
+}
+
+
+TEST(Regress388880) {
+  i::FLAG_expose_gc = true;
+  CcTest::InitializeVM();
+  v8::HandleScope scope(CcTest::isolate());
+  Isolate* isolate = CcTest::i_isolate();
+  Factory* factory = isolate->factory();
+  Heap* heap = isolate->heap();
+
+  Handle<Map> map1 = Map::Create(isolate, 1);
+  Handle<Map> map2 =
+      Map::CopyWithField(map1, factory->NewStringFromStaticChars("foo"),
+                         HeapType::Any(isolate), NONE, Representation::Tagged(),
+                         OMIT_TRANSITION).ToHandleChecked();
+
+  int desired_offset = Page::kPageSize - map1->instance_size();
+
+  // Allocate fixed array in old pointer space so, that object allocated
+  // afterwards would end at the end of the page.
+  {
+    SimulateFullSpace(heap->old_pointer_space());
+    int padding_size = desired_offset - Page::kObjectStartOffset;
+    int padding_array_length =
+        (padding_size - FixedArray::kHeaderSize) / kPointerSize;
+
+    Handle<FixedArray> temp2 =
+        factory->NewFixedArray(padding_array_length, TENURED);
+    Page* page = Page::FromAddress(temp2->address());
+    CHECK_EQ(Page::kObjectStartOffset, page->Offset(temp2->address()));
+  }
+
+  Handle<JSObject> o = factory->NewJSObjectFromMap(map1, TENURED, false);
+  o->set_properties(*factory->empty_fixed_array());
+
+  // Ensure that the object allocated where we need it.
+  Page* page = Page::FromAddress(o->address());
+  CHECK_EQ(desired_offset, page->Offset(o->address()));
+
+  // Now we have an object right at the end of the page.
+
+  // Enable incremental marking to trigger actions in Heap::AdjustLiveBytes()
+  // that would cause crash.
+  IncrementalMarking* marking = CcTest::heap()->incremental_marking();
+  marking->Abort();
+  marking->Start();
+  CHECK(marking->IsMarking());
+
+  // Now everything is set up for crashing in JSObject::MigrateFastToFast()
+  // when it calls heap->AdjustLiveBytes(...).
+  JSObject::MigrateToMap(o, map2);
+}
+
+
 #ifdef DEBUG
 TEST(PathTracer) {
   CcTest::InitializeVM();
diff --git a/test/cctest/test-javascript-arm64.cc b/test/cctest/test-javascript-arm64.cc
index 2b31f8f..5e45034 100644
--- a/test/cctest/test-javascript-arm64.cc
+++ b/test/cctest/test-javascript-arm64.cc
@@ -30,11 +30,11 @@
 #include "src/v8.h"
 
 #include "src/api.h"
+#include "src/base/platform/platform.h"
 #include "src/compilation-cache.h"
 #include "src/execution.h"
 #include "src/isolate.h"
 #include "src/parser.h"
-#include "src/platform.h"
 #include "src/snapshot.h"
 #include "src/unicode-inl.h"
 #include "src/utils.h"
diff --git a/test/cctest/test-js-arm64-variables.cc b/test/cctest/test-js-arm64-variables.cc
index 4ac14a0..7f27710 100644
--- a/test/cctest/test-js-arm64-variables.cc
+++ b/test/cctest/test-js-arm64-variables.cc
@@ -32,11 +32,11 @@
 #include "src/v8.h"
 
 #include "src/api.h"
+#include "src/base/platform/platform.h"
 #include "src/compilation-cache.h"
 #include "src/execution.h"
 #include "src/isolate.h"
 #include "src/parser.h"
-#include "src/platform.h"
 #include "src/snapshot.h"
 #include "src/unicode-inl.h"
 #include "src/utils.h"
diff --git a/test/cctest/test-libplatform-task-queue.cc b/test/cctest/test-libplatform-task-queue.cc
deleted file mode 100644
index 5d997d9..0000000
--- a/test/cctest/test-libplatform-task-queue.cc
+++ /dev/null
@@ -1,95 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "src/v8.h"
-
-#include "src/libplatform/task-queue.h"
-#include "test/cctest/cctest.h"
-#include "test/cctest/test-libplatform.h"
-
-using namespace v8::internal;
-
-
-TEST(TaskQueueBasic) {
-  TaskCounter task_counter;
-
-  TaskQueue queue;
-
-  TestTask* task = new TestTask(&task_counter);
-  queue.Append(task);
-  CHECK_EQ(1, task_counter.GetCount());
-  CHECK_EQ(task, queue.GetNext());
-  delete task;
-  CHECK_EQ(0, task_counter.GetCount());
-
-  queue.Terminate();
-  CHECK_EQ(NULL, queue.GetNext());
-}
-
-
-class ReadQueueTask : public TestTask {
- public:
-  ReadQueueTask(TaskCounter* task_counter, TaskQueue* queue)
-      : TestTask(task_counter, true), queue_(queue) {}
-  virtual ~ReadQueueTask() {}
-
-  virtual void Run() V8_OVERRIDE {
-    TestTask::Run();
-    CHECK_EQ(NULL, queue_->GetNext());
-  }
-
- private:
-  TaskQueue* queue_;
-
-  DISALLOW_COPY_AND_ASSIGN(ReadQueueTask);
-};
-
-
-TEST(TaskQueueTerminateMultipleReaders) {
-  TaskQueue queue;
-  TaskCounter task_counter;
-  ReadQueueTask* read1 = new ReadQueueTask(&task_counter, &queue);
-  ReadQueueTask* read2 = new ReadQueueTask(&task_counter, &queue);
-
-  TestWorkerThread thread1(read1);
-  TestWorkerThread thread2(read2);
-
-  thread1.Start();
-  thread2.Start();
-
-  CHECK_EQ(2, task_counter.GetCount());
-
-  thread1.Signal();
-  thread2.Signal();
-
-  queue.Terminate();
-
-  thread1.Join();
-  thread2.Join();
-
-  CHECK_EQ(0, task_counter.GetCount());
-}
diff --git a/test/cctest/test-libplatform-worker-thread.cc b/test/cctest/test-libplatform-worker-thread.cc
deleted file mode 100644
index 994750f..0000000
--- a/test/cctest/test-libplatform-worker-thread.cc
+++ /dev/null
@@ -1,64 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "src/v8.h"
-
-#include "src/libplatform/task-queue.h"
-#include "src/libplatform/worker-thread.h"
-#include "test/cctest/cctest.h"
-#include "test/cctest/test-libplatform.h"
-
-using namespace v8::internal;
-
-
-TEST(WorkerThread) {
-  TaskQueue queue;
-  TaskCounter task_counter;
-
-  TestTask* task1 = new TestTask(&task_counter, true);
-  TestTask* task2 = new TestTask(&task_counter, true);
-  TestTask* task3 = new TestTask(&task_counter, true);
-  TestTask* task4 = new TestTask(&task_counter, true);
-
-  WorkerThread* thread1 = new WorkerThread(&queue);
-  WorkerThread* thread2 = new WorkerThread(&queue);
-
-  CHECK_EQ(4, task_counter.GetCount());
-
-  queue.Append(task1);
-  queue.Append(task2);
-  queue.Append(task3);
-  queue.Append(task4);
-
-  // TaskQueue ASSERTs that it is empty in its destructor.
-  queue.Terminate();
-
-  delete thread1;
-  delete thread2;
-
-  CHECK_EQ(0, task_counter.GetCount());
-}
diff --git a/test/cctest/test-libplatform.h b/test/cctest/test-libplatform.h
deleted file mode 100644
index 8150316..0000000
--- a/test/cctest/test-libplatform.h
+++ /dev/null
@@ -1,120 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#ifndef TEST_LIBPLATFORM_H_
-#define TEST_LIBPLATFORM_H_
-
-#include "src/v8.h"
-
-#include "test/cctest/cctest.h"
-
-using namespace v8::internal;
-
-class TaskCounter {
- public:
-  TaskCounter() : counter_(0) {}
-  ~TaskCounter() { CHECK_EQ(0, counter_); }
-
-  int GetCount() const {
-    LockGuard<Mutex> guard(&lock_);
-    return counter_;
-  }
-
-  void Inc() {
-    LockGuard<Mutex> guard(&lock_);
-    ++counter_;
-  }
-
-  void Dec() {
-    LockGuard<Mutex> guard(&lock_);
-    --counter_;
-  }
-
- private:
-  mutable Mutex lock_;
-  int counter_;
-
-  DISALLOW_COPY_AND_ASSIGN(TaskCounter);
-};
-
-
-class TestTask : public v8::Task {
- public:
-  TestTask(TaskCounter* task_counter, bool expected_to_run)
-      : task_counter_(task_counter),
-        expected_to_run_(expected_to_run),
-        executed_(false) {
-    task_counter_->Inc();
-  }
-
-  explicit TestTask(TaskCounter* task_counter)
-      : task_counter_(task_counter), expected_to_run_(false), executed_(false) {
-    task_counter_->Inc();
-  }
-
-  virtual ~TestTask() {
-    CHECK_EQ(expected_to_run_, executed_);
-    task_counter_->Dec();
-  }
-
-  // v8::Task implementation.
-  virtual void Run() V8_OVERRIDE { executed_ = true; }
-
- private:
-  TaskCounter* task_counter_;
-  bool expected_to_run_;
-  bool executed_;
-
-  DISALLOW_COPY_AND_ASSIGN(TestTask);
-};
-
-
-class TestWorkerThread : public Thread {
- public:
-  explicit TestWorkerThread(v8::Task* task)
-      : Thread("libplatform TestWorkerThread"), semaphore_(0), task_(task) {}
-  virtual ~TestWorkerThread() {}
-
-  void Signal() { semaphore_.Signal(); }
-
-  // Thread implementation.
-  virtual void Run() V8_OVERRIDE {
-    semaphore_.Wait();
-    if (task_) {
-      task_->Run();
-      delete task_;
-    }
-  }
-
- private:
-  Semaphore semaphore_;
-  v8::Task* task_;
-
-  DISALLOW_COPY_AND_ASSIGN(TestWorkerThread);
-};
-
-#endif  // TEST_LIBPLATFORM_H_
diff --git a/test/cctest/test-liveedit.cc b/test/cctest/test-liveedit.cc
index bf8d198..6a5f0b2 100644
--- a/test/cctest/test-liveedit.cc
+++ b/test/cctest/test-liveedit.cc
@@ -117,12 +117,12 @@
     int similar_part_length = diff_pos1 - pos1;
     int diff_pos2 = pos2 + similar_part_length;
 
-    ASSERT_EQ(diff_pos2, chunk->pos2);
+    DCHECK_EQ(diff_pos2, chunk->pos2);
 
     for (int j = 0; j < similar_part_length; j++) {
-      ASSERT(pos1 + j < len1);
-      ASSERT(pos2 + j < len2);
-      ASSERT_EQ(s1[pos1 + j], s2[pos2 + j]);
+      DCHECK(pos1 + j < len1);
+      DCHECK(pos2 + j < len2);
+      DCHECK_EQ(s1[pos1 + j], s2[pos2 + j]);
     }
     diff_parameter += chunk->len1 + chunk->len2;
     pos1 = diff_pos1 + chunk->len1;
@@ -131,17 +131,17 @@
   {
     // After last chunk.
     int similar_part_length = len1 - pos1;
-    ASSERT_EQ(similar_part_length, len2 - pos2);
+    DCHECK_EQ(similar_part_length, len2 - pos2);
     USE(len2);
     for (int j = 0; j < similar_part_length; j++) {
-      ASSERT(pos1 + j < len1);
-      ASSERT(pos2 + j < len2);
-      ASSERT_EQ(s1[pos1 + j], s2[pos2 + j]);
+      DCHECK(pos1 + j < len1);
+      DCHECK(pos2 + j < len2);
+      DCHECK_EQ(s1[pos1 + j], s2[pos2 + j]);
     }
   }
 
   if (expected_diff_parameter != -1) {
-    ASSERT_EQ(expected_diff_parameter, diff_parameter);
+    DCHECK_EQ(expected_diff_parameter, diff_parameter);
   }
 }
 
@@ -158,7 +158,6 @@
 // --- T h e   A c t u a l   T e s t s
 
 TEST(LiveEditDiffer) {
-  v8::internal::V8::Initialize(NULL);
   CompareStrings("zz1zzz12zz123zzz", "zzzzzzzzzz", 6);
   CompareStrings("zz1zzz12zz123zzz", "zz0zzz0zz0zzz", 9);
   CompareStrings("123456789", "987654321", 16);
diff --git a/test/cctest/test-lockers.cc b/test/cctest/test-lockers.cc
index f92a81e..ed315ce 100644
--- a/test/cctest/test-lockers.cc
+++ b/test/cctest/test-lockers.cc
@@ -30,11 +30,11 @@
 #include "src/v8.h"
 
 #include "src/api.h"
+#include "src/base/platform/platform.h"
 #include "src/compilation-cache.h"
 #include "src/execution.h"
 #include "src/isolate.h"
 #include "src/parser.h"
-#include "src/platform.h"
 #include "src/smart-pointers.h"
 #include "src/snapshot.h"
 #include "src/unicode-inl.h"
@@ -56,10 +56,10 @@
 
 
 // Migrating an isolate
-class KangarooThread : public v8::internal::Thread {
+class KangarooThread : public v8::base::Thread {
  public:
   KangarooThread(v8::Isolate* isolate, v8::Handle<v8::Context> context)
-      : Thread("KangarooThread"),
+      : Thread(Options("KangarooThread")),
         isolate_(isolate),
         context_(isolate, context) {}
 
@@ -146,12 +146,11 @@
   virtual void Run() = 0;
 
  private:
-  class ThreadWithSemaphore : public i::Thread {
+  class ThreadWithSemaphore : public v8::base::Thread {
    public:
     explicit ThreadWithSemaphore(JoinableThread* joinable_thread)
-      : Thread(joinable_thread->name_),
-        joinable_thread_(joinable_thread) {
-    }
+        : Thread(Options(joinable_thread->name_)),
+          joinable_thread_(joinable_thread) {}
 
     virtual void Run() {
       joinable_thread_->Run();
@@ -163,7 +162,7 @@
   };
 
   const char* name_;
-  i::Semaphore semaphore_;
+  v8::base::Semaphore semaphore_;
   ThreadWithSemaphore thread_;
 
   friend class ThreadWithSemaphore;
@@ -223,9 +222,7 @@
 
 class IsolateNonlockingThread : public JoinableThread {
  public:
-  explicit IsolateNonlockingThread()
-    : JoinableThread("IsolateNonlockingThread") {
-  }
+  IsolateNonlockingThread() : JoinableThread("IsolateNonlockingThread") {}
 
   virtual void Run() {
     v8::Isolate* isolate = v8::Isolate::New();
@@ -247,6 +244,8 @@
 TEST(MultithreadedParallelIsolates) {
 #if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS
   const int kNThreads = 10;
+#elif V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT
+  const int kNThreads = 4;
 #else
   const int kNThreads = 50;
 #endif
@@ -713,6 +712,8 @@
 TEST(ExtensionsRegistration) {
 #if V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS
   const int kNThreads = 10;
+#elif V8_TARGET_ARCH_X64 && V8_TARGET_ARCH_32_BIT
+  const int kNThreads = 4;
 #else
   const int kNThreads = 40;
 #endif
diff --git a/test/cctest/test-log-stack-tracer.cc b/test/cctest/test-log-stack-tracer.cc
index 3f9d0b3..334a201 100644
--- a/test/cctest/test-log-stack-tracer.cc
+++ b/test/cctest/test-log-stack-tracer.cc
@@ -172,7 +172,7 @@
   CHECK_EQ(FUNCTION_ADDR(i::TraceExtension::Trace), sample.external_callback);
 
   // Stack tracing will start from the first JS function, i.e. "JSFuncDoTrace"
-  int base = 0;
+  unsigned base = 0;
   CHECK_GT(sample.frames_count, base + 1);
 
   CHECK(IsAddressWithinFuncCode(
@@ -225,7 +225,7 @@
   CHECK_EQ(FUNCTION_ADDR(i::TraceExtension::JSTrace), sample.external_callback);
 
   // Stack sampling will start from the caller of JSFuncDoTrace, i.e. "JSTrace"
-  int base = 0;
+  unsigned base = 0;
   CHECK_GT(sample.frames_count, base + 1);
   CHECK(IsAddressWithinFuncCode(context, "JSTrace", sample.stack[base + 0]));
   CHECK(IsAddressWithinFuncCode(
diff --git a/test/cctest/test-log.cc b/test/cctest/test-log.cc
index 6c04e7a..482f89f 100644
--- a/test/cctest/test-log.cc
+++ b/test/cctest/test-log.cc
@@ -37,8 +37,8 @@
 #include "src/v8.h"
 
 #include "src/cpu-profiler.h"
-#include "src/log-utils.h"
 #include "src/log.h"
+#include "src/log-utils.h"
 #include "src/natives.h"
 #include "src/utils.h"
 #include "src/v8threads.h"
@@ -61,9 +61,11 @@
         temp_file_(NULL),
         // Need to run this prior to creating the scope.
         trick_to_run_init_flags_(init_flags_()),
-        scope_(CcTest::isolate()),
-        env_(v8::Context::New(CcTest::isolate())),
-        logger_(CcTest::i_isolate()->logger()) {
+        isolate_(v8::Isolate::New()),
+        isolate_scope_(isolate_),
+        scope_(isolate_),
+        env_(v8::Context::New(isolate_)),
+        logger_(reinterpret_cast<i::Isolate*>(isolate_)->logger()) {
     env_->Enter();
   }
 
@@ -77,6 +79,8 @@
 
   v8::Handle<v8::Context>& env() { return env_; }
 
+  v8::Isolate* isolate() { return isolate_; }
+
   Logger* logger() { return logger_; }
 
   FILE* StopLoggingGetTempFile() {
@@ -100,6 +104,8 @@
   const bool saved_prof_;
   FILE* temp_file_;
   const bool trick_to_run_init_flags_;
+  v8::Isolate* isolate_;
+  v8::Isolate::Scope isolate_scope_;
   v8::HandleScope scope_;
   v8::Handle<v8::Context> env_;
   Logger* logger_;
@@ -330,41 +336,41 @@
 
 
 TEST(LogCallbacks) {
-  v8::Isolate* isolate = CcTest::isolate();
-  ScopedLoggerInitializer initialize_logger;
-  Logger* logger = initialize_logger.logger();
+  v8::Isolate* isolate;
+  {
+    ScopedLoggerInitializer initialize_logger;
+    isolate = initialize_logger.isolate();
+    Logger* logger = initialize_logger.logger();
 
-  v8::Local<v8::FunctionTemplate> obj =
-      v8::Local<v8::FunctionTemplate>::New(isolate,
-                                           v8::FunctionTemplate::New(isolate));
-  obj->SetClassName(v8_str("Obj"));
-  v8::Handle<v8::ObjectTemplate> proto = obj->PrototypeTemplate();
-  v8::Local<v8::Signature> signature =
-      v8::Signature::New(isolate, obj);
-  proto->Set(v8_str("method1"),
-             v8::FunctionTemplate::New(isolate,
-                                       ObjMethod1,
-                                       v8::Handle<v8::Value>(),
-                                       signature),
-             static_cast<v8::PropertyAttribute>(v8::DontDelete));
+    v8::Local<v8::FunctionTemplate> obj = v8::Local<v8::FunctionTemplate>::New(
+        isolate, v8::FunctionTemplate::New(isolate));
+    obj->SetClassName(v8_str("Obj"));
+    v8::Handle<v8::ObjectTemplate> proto = obj->PrototypeTemplate();
+    v8::Local<v8::Signature> signature = v8::Signature::New(isolate, obj);
+    proto->Set(v8_str("method1"),
+               v8::FunctionTemplate::New(isolate, ObjMethod1,
+                                         v8::Handle<v8::Value>(), signature),
+               static_cast<v8::PropertyAttribute>(v8::DontDelete));
 
-  initialize_logger.env()->Global()->Set(v8_str("Obj"), obj->GetFunction());
-  CompileRun("Obj.prototype.method1.toString();");
+    initialize_logger.env()->Global()->Set(v8_str("Obj"), obj->GetFunction());
+    CompileRun("Obj.prototype.method1.toString();");
 
-  logger->LogCompiledFunctions();
+    logger->LogCompiledFunctions();
 
-  bool exists = false;
-  i::Vector<const char> log(
-      i::ReadFile(initialize_logger.StopLoggingGetTempFile(), &exists, true));
-  CHECK(exists);
+    bool exists = false;
+    i::Vector<const char> log(
+        i::ReadFile(initialize_logger.StopLoggingGetTempFile(), &exists, true));
+    CHECK(exists);
 
-  i::EmbeddedVector<char, 100> ref_data;
-  i::SNPrintF(ref_data,
-              "code-creation,Callback,-2,0x%" V8PRIxPTR ",1,\"method1\"",
-              reinterpret_cast<intptr_t>(ObjMethod1));
+    i::EmbeddedVector<char, 100> ref_data;
+    i::SNPrintF(ref_data,
+                "code-creation,Callback,-2,0x%" V8PRIxPTR ",1,\"method1\"",
+                reinterpret_cast<intptr_t>(ObjMethod1));
 
-  CHECK_NE(NULL, StrNStr(log.start(), ref_data.start(), log.length()));
-  log.Dispose();
+    CHECK_NE(NULL, StrNStr(log.start(), ref_data.start(), log.length()));
+    log.Dispose();
+  }
+  isolate->Dispose();
 }
 
 
@@ -383,46 +389,49 @@
 
 
 TEST(LogAccessorCallbacks) {
-  v8::Isolate* isolate = CcTest::isolate();
-  ScopedLoggerInitializer initialize_logger;
-  Logger* logger = initialize_logger.logger();
+  v8::Isolate* isolate;
+  {
+    ScopedLoggerInitializer initialize_logger;
+    isolate = initialize_logger.isolate();
+    Logger* logger = initialize_logger.logger();
 
-  v8::Local<v8::FunctionTemplate> obj =
-      v8::Local<v8::FunctionTemplate>::New(isolate,
-                                           v8::FunctionTemplate::New(isolate));
-  obj->SetClassName(v8_str("Obj"));
-  v8::Handle<v8::ObjectTemplate> inst = obj->InstanceTemplate();
-  inst->SetAccessor(v8_str("prop1"), Prop1Getter, Prop1Setter);
-  inst->SetAccessor(v8_str("prop2"), Prop2Getter);
+    v8::Local<v8::FunctionTemplate> obj = v8::Local<v8::FunctionTemplate>::New(
+        isolate, v8::FunctionTemplate::New(isolate));
+    obj->SetClassName(v8_str("Obj"));
+    v8::Handle<v8::ObjectTemplate> inst = obj->InstanceTemplate();
+    inst->SetAccessor(v8_str("prop1"), Prop1Getter, Prop1Setter);
+    inst->SetAccessor(v8_str("prop2"), Prop2Getter);
 
-  logger->LogAccessorCallbacks();
+    logger->LogAccessorCallbacks();
 
-  bool exists = false;
-  i::Vector<const char> log(
-      i::ReadFile(initialize_logger.StopLoggingGetTempFile(), &exists, true));
-  CHECK(exists);
+    bool exists = false;
+    i::Vector<const char> log(
+        i::ReadFile(initialize_logger.StopLoggingGetTempFile(), &exists, true));
+    CHECK(exists);
 
-  EmbeddedVector<char, 100> prop1_getter_record;
-  i::SNPrintF(prop1_getter_record,
-              "code-creation,Callback,-2,0x%" V8PRIxPTR ",1,\"get prop1\"",
-              reinterpret_cast<intptr_t>(Prop1Getter));
-  CHECK_NE(NULL,
-           StrNStr(log.start(), prop1_getter_record.start(), log.length()));
+    EmbeddedVector<char, 100> prop1_getter_record;
+    i::SNPrintF(prop1_getter_record,
+                "code-creation,Callback,-2,0x%" V8PRIxPTR ",1,\"get prop1\"",
+                reinterpret_cast<intptr_t>(Prop1Getter));
+    CHECK_NE(NULL,
+             StrNStr(log.start(), prop1_getter_record.start(), log.length()));
 
-  EmbeddedVector<char, 100> prop1_setter_record;
-  i::SNPrintF(prop1_setter_record,
-              "code-creation,Callback,-2,0x%" V8PRIxPTR ",1,\"set prop1\"",
-              reinterpret_cast<intptr_t>(Prop1Setter));
-  CHECK_NE(NULL,
-           StrNStr(log.start(), prop1_setter_record.start(), log.length()));
+    EmbeddedVector<char, 100> prop1_setter_record;
+    i::SNPrintF(prop1_setter_record,
+                "code-creation,Callback,-2,0x%" V8PRIxPTR ",1,\"set prop1\"",
+                reinterpret_cast<intptr_t>(Prop1Setter));
+    CHECK_NE(NULL,
+             StrNStr(log.start(), prop1_setter_record.start(), log.length()));
 
-  EmbeddedVector<char, 100> prop2_getter_record;
-  i::SNPrintF(prop2_getter_record,
-              "code-creation,Callback,-2,0x%" V8PRIxPTR ",1,\"get prop2\"",
-              reinterpret_cast<intptr_t>(Prop2Getter));
-  CHECK_NE(NULL,
-           StrNStr(log.start(), prop2_getter_record.start(), log.length()));
-  log.Dispose();
+    EmbeddedVector<char, 100> prop2_getter_record;
+    i::SNPrintF(prop2_getter_record,
+                "code-creation,Callback,-2,0x%" V8PRIxPTR ",1,\"get prop2\"",
+                reinterpret_cast<intptr_t>(Prop2Getter));
+    CHECK_NE(NULL,
+             StrNStr(log.start(), prop2_getter_record.start(), log.length()));
+    log.Dispose();
+  }
+  isolate->Dispose();
 }
 
 
@@ -439,57 +448,63 @@
   // are using V8.
 
   // Start with profiling to capture all code events from the beginning.
-  ScopedLoggerInitializer initialize_logger;
-  Logger* logger = initialize_logger.logger();
+  v8::Isolate* isolate;
+  {
+    ScopedLoggerInitializer initialize_logger;
+    isolate = initialize_logger.isolate();
+    Logger* logger = initialize_logger.logger();
 
-  // Compile and run a function that creates other functions.
-  CompileRun(
-      "(function f(obj) {\n"
-      "  obj.test =\n"
-      "    (function a(j) { return function b() { return j; } })(100);\n"
-      "})(this);");
-  logger->StopProfiler();
-  CcTest::heap()->CollectAllGarbage(i::Heap::kMakeHeapIterableMask);
-  logger->StringEvent("test-logging-done", "");
+    // Compile and run a function that creates other functions.
+    CompileRun(
+        "(function f(obj) {\n"
+        "  obj.test =\n"
+        "    (function a(j) { return function b() { return j; } })(100);\n"
+        "})(this);");
+    logger->StopProfiler();
+    reinterpret_cast<i::Isolate*>(isolate)->heap()->CollectAllGarbage(
+        i::Heap::kMakeHeapIterableMask);
+    logger->StringEvent("test-logging-done", "");
 
-  // Iterate heap to find compiled functions, will write to log.
-  logger->LogCompiledFunctions();
-  logger->StringEvent("test-traversal-done", "");
+    // Iterate heap to find compiled functions, will write to log.
+    logger->LogCompiledFunctions();
+    logger->StringEvent("test-traversal-done", "");
 
-  bool exists = false;
-  i::Vector<const char> log(
-      i::ReadFile(initialize_logger.StopLoggingGetTempFile(), &exists, true));
-  CHECK(exists);
-  v8::Handle<v8::String> log_str = v8::String::NewFromUtf8(
-      CcTest::isolate(), log.start(), v8::String::kNormalString, log.length());
-  initialize_logger.env()->Global()->Set(v8_str("_log"), log_str);
+    bool exists = false;
+    i::Vector<const char> log(
+        i::ReadFile(initialize_logger.StopLoggingGetTempFile(), &exists, true));
+    CHECK(exists);
+    v8::Handle<v8::String> log_str = v8::String::NewFromUtf8(
+        isolate, log.start(), v8::String::kNormalString, log.length());
+    initialize_logger.env()->Global()->Set(v8_str("_log"), log_str);
 
-  i::Vector<const unsigned char> source = TestSources::GetScriptsSource();
-  v8::Handle<v8::String> source_str = v8::String::NewFromUtf8(
-      CcTest::isolate(), reinterpret_cast<const char*>(source.start()),
-      v8::String::kNormalString, source.length());
-  v8::TryCatch try_catch;
-  v8::Handle<v8::Script> script = CompileWithOrigin(source_str, "");
-  if (script.IsEmpty()) {
-    v8::String::Utf8Value exception(try_catch.Exception());
-    printf("compile: %s\n", *exception);
-    CHECK(false);
+    i::Vector<const unsigned char> source = TestSources::GetScriptsSource();
+    v8::Handle<v8::String> source_str = v8::String::NewFromUtf8(
+        isolate, reinterpret_cast<const char*>(source.start()),
+        v8::String::kNormalString, source.length());
+    v8::TryCatch try_catch;
+    v8::Handle<v8::Script> script = CompileWithOrigin(source_str, "");
+    if (script.IsEmpty()) {
+      v8::String::Utf8Value exception(try_catch.Exception());
+      printf("compile: %s\n", *exception);
+      CHECK(false);
+    }
+    v8::Handle<v8::Value> result = script->Run();
+    if (result.IsEmpty()) {
+      v8::String::Utf8Value exception(try_catch.Exception());
+      printf("run: %s\n", *exception);
+      CHECK(false);
+    }
+    // The result either be a "true" literal or problem description.
+    if (!result->IsTrue()) {
+      v8::Local<v8::String> s = result->ToString();
+      i::ScopedVector<char> data(s->Utf8Length() + 1);
+      CHECK_NE(NULL, data.start());
+      s->WriteUtf8(data.start());
+      printf("%s\n", data.start());
+      // Make sure that our output is written prior crash due to CHECK failure.
+      fflush(stdout);
+      CHECK(false);
+    }
   }
-  v8::Handle<v8::Value> result = script->Run();
-  if (result.IsEmpty()) {
-    v8::String::Utf8Value exception(try_catch.Exception());
-    printf("run: %s\n", *exception);
-    CHECK(false);
-  }
-  // The result either be a "true" literal or problem description.
-  if (!result->IsTrue()) {
-    v8::Local<v8::String> s = result->ToString();
-    i::ScopedVector<char> data(s->Utf8Length() + 1);
-    CHECK_NE(NULL, data.start());
-    s->WriteUtf8(data.start());
-    printf("%s\n", data.start());
-    // Make sure that our output is written prior crash due to CHECK failure.
-    fflush(stdout);
-    CHECK(false);
-  }
+  isolate->Dispose();
 }
diff --git a/test/cctest/test-macro-assembler-arm.cc b/test/cctest/test-macro-assembler-arm.cc
index 30169cb..3ca0266 100644
--- a/test/cctest/test-macro-assembler-arm.cc
+++ b/test/cctest/test-macro-assembler-arm.cc
@@ -28,11 +28,12 @@
 #include <stdlib.h>
 
 #include "src/v8.h"
+#include "test/cctest/cctest.h"
 
 #include "src/macro-assembler.h"
+
 #include "src/arm/macro-assembler-arm.h"
 #include "src/arm/simulator-arm.h"
-#include "test/cctest/cctest.h"
 
 
 using namespace v8::internal;
@@ -67,10 +68,12 @@
   size_t act_size;
 
   // Allocate two blocks to copy data between.
-  byte* src_buffer = static_cast<byte*>(OS::Allocate(data_size, &act_size, 0));
+  byte* src_buffer =
+      static_cast<byte*>(v8::base::OS::Allocate(data_size, &act_size, 0));
   CHECK(src_buffer);
   CHECK(act_size >= static_cast<size_t>(data_size));
-  byte* dest_buffer = static_cast<byte*>(OS::Allocate(data_size, &act_size, 0));
+  byte* dest_buffer =
+      static_cast<byte*>(v8::base::OS::Allocate(data_size, &act_size, 0));
   CHECK(dest_buffer);
   CHECK(act_size >= static_cast<size_t>(data_size));
 
@@ -134,13 +137,10 @@
 
 
 TEST(LoadAndStoreWithRepresentation) {
-  v8::internal::V8::Initialize(NULL);
-
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
-                                                 &actual_size,
-                                                 true));
+  byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+      Assembler::kMinimalBufferSize, &actual_size, true));
   CHECK(buffer);
   Isolate* isolate = CcTest::i_isolate();
   HandleScope handles(isolate);
diff --git a/test/cctest/test-macro-assembler-ia32.cc b/test/cctest/test-macro-assembler-ia32.cc
index dde17cd..b2b8c94 100644
--- a/test/cctest/test-macro-assembler-ia32.cc
+++ b/test/cctest/test-macro-assembler-ia32.cc
@@ -28,13 +28,13 @@
 #include <stdlib.h>
 
 #include "src/v8.h"
-
-#include "src/macro-assembler.h"
-#include "src/factory.h"
-#include "src/platform.h"
-#include "src/serialize.h"
 #include "test/cctest/cctest.h"
 
+#include "src/base/platform/platform.h"
+#include "src/factory.h"
+#include "src/macro-assembler.h"
+#include "src/serialize.h"
+
 using namespace v8::internal;
 
 #if __GNUC__
@@ -50,13 +50,10 @@
 
 
 TEST(LoadAndStoreWithRepresentation) {
-  v8::internal::V8::Initialize(NULL);
-
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
-                                                 &actual_size,
-                                                 true));
+  byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+      Assembler::kMinimalBufferSize, &actual_size, true));
   CHECK(buffer);
   Isolate* isolate = CcTest::i_isolate();
   HandleScope handles(isolate);
diff --git a/test/cctest/test-macro-assembler-mips.cc b/test/cctest/test-macro-assembler-mips.cc
index 8e4d54c..6cb00e4 100644
--- a/test/cctest/test-macro-assembler-mips.cc
+++ b/test/cctest/test-macro-assembler-mips.cc
@@ -28,10 +28,11 @@
 #include <stdlib.h>
 
 #include "src/v8.h"
+#include "test/cctest/cctest.h"
+
 #include "src/macro-assembler.h"
 #include "src/mips/macro-assembler-mips.h"
 #include "src/mips/simulator-mips.h"
-#include "test/cctest/cctest.h"
 
 
 using namespace v8::internal;
@@ -66,10 +67,12 @@
   size_t act_size;
 
   // Allocate two blocks to copy data between.
-  byte* src_buffer = static_cast<byte*>(OS::Allocate(data_size, &act_size, 0));
+  byte* src_buffer =
+      static_cast<byte*>(v8::base::OS::Allocate(data_size, &act_size, 0));
   CHECK(src_buffer);
   CHECK(act_size >= static_cast<size_t>(data_size));
-  byte* dest_buffer = static_cast<byte*>(OS::Allocate(data_size, &act_size, 0));
+  byte* dest_buffer =
+      static_cast<byte*>(v8::base::OS::Allocate(data_size, &act_size, 0));
   CHECK(dest_buffer);
   CHECK(act_size >= static_cast<size_t>(data_size));
 
@@ -146,9 +149,9 @@
   i::FixedDoubleArray* a = i::FixedDoubleArray::cast(array1->elements());
   double value = a->get_scalar(0);
   CHECK(std::isnan(value) &&
-        i::BitCast<uint64_t>(value) ==
-        i::BitCast<uint64_t>(
-            i::FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
+        bit_cast<uint64_t>(value) ==
+            bit_cast<uint64_t>(
+                i::FixedDoubleArray::canonical_not_the_hole_nan_as_double()));
 }
 
 
diff --git a/test/cctest/test-macro-assembler-mips64.cc b/test/cctest/test-macro-assembler-mips64.cc
new file mode 100644
index 0000000..eef658d
--- /dev/null
+++ b/test/cctest/test-macro-assembler-mips64.cc
@@ -0,0 +1,217 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#include <stdlib.h>
+
+#include "src/v8.h"
+#include "test/cctest/cctest.h"
+
+#include "src/macro-assembler.h"
+#include "src/mips64/macro-assembler-mips64.h"
+#include "src/mips64/simulator-mips64.h"
+
+
+using namespace v8::internal;
+
+typedef void* (*F)(int64_t x, int64_t y, int p2, int p3, int p4);
+
+#define __ masm->
+
+
+static byte to_non_zero(int n) {
+  return static_cast<unsigned>(n) % 255 + 1;
+}
+
+
+static bool all_zeroes(const byte* beg, const byte* end) {
+  CHECK(beg);
+  CHECK(beg <= end);
+  while (beg < end) {
+    if (*beg++ != 0)
+      return false;
+  }
+  return true;
+}
+
+
+TEST(CopyBytes) {
+  CcTest::InitializeVM();
+  Isolate* isolate = Isolate::Current();
+  HandleScope handles(isolate);
+
+  const int data_size = 1 * KB;
+  size_t act_size;
+
+  // Allocate two blocks to copy data between.
+  byte* src_buffer =
+      static_cast<byte*>(v8::base::OS::Allocate(data_size, &act_size, 0));
+  CHECK(src_buffer);
+  CHECK(act_size >= static_cast<size_t>(data_size));
+  byte* dest_buffer =
+      static_cast<byte*>(v8::base::OS::Allocate(data_size, &act_size, 0));
+  CHECK(dest_buffer);
+  CHECK(act_size >= static_cast<size_t>(data_size));
+
+  // Storage for a0 and a1.
+  byte* a0_;
+  byte* a1_;
+
+  MacroAssembler assembler(isolate, NULL, 0);
+  MacroAssembler* masm = &assembler;
+
+  // Code to be generated: The stuff in CopyBytes followed by a store of a0 and
+  // a1, respectively.
+  __ CopyBytes(a0, a1, a2, a3);
+  __ li(a2, Operand(reinterpret_cast<int64_t>(&a0_)));
+  __ li(a3, Operand(reinterpret_cast<int64_t>(&a1_)));
+  __ sd(a0, MemOperand(a2));
+  __ jr(ra);
+  __ sd(a1, MemOperand(a3));
+
+  CodeDesc desc;
+  masm->GetCode(&desc);
+  Handle<Code> code = isolate->factory()->NewCode(
+      desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+
+  ::F f = FUNCTION_CAST< ::F>(code->entry());
+
+  // Initialise source data with non-zero bytes.
+  for (int i = 0; i < data_size; i++) {
+    src_buffer[i] = to_non_zero(i);
+  }
+
+  const int fuzz = 11;
+
+  for (int size = 0; size < 600; size++) {
+    for (const byte* src = src_buffer; src < src_buffer + fuzz; src++) {
+      for (byte* dest = dest_buffer; dest < dest_buffer + fuzz; dest++) {
+        memset(dest_buffer, 0, data_size);
+        CHECK(dest + size < dest_buffer + data_size);
+        (void) CALL_GENERATED_CODE(f, reinterpret_cast<int64_t>(src),
+                                      reinterpret_cast<int64_t>(dest),
+                                      size, 0, 0);
+        // a0 and a1 should point at the first byte after the copied data.
+        CHECK_EQ(src + size, a0_);
+        CHECK_EQ(dest + size, a1_);
+        // Check that we haven't written outside the target area.
+        CHECK(all_zeroes(dest_buffer, dest));
+        CHECK(all_zeroes(dest + size, dest_buffer + data_size));
+        // Check the target area.
+        CHECK_EQ(0, memcmp(src, dest, size));
+      }
+    }
+  }
+
+  // Check that the source data hasn't been clobbered.
+  for (int i = 0; i < data_size; i++) {
+    CHECK(src_buffer[i] == to_non_zero(i));
+  }
+}
+
+
+TEST(LoadConstants) {
+  CcTest::InitializeVM();
+  Isolate* isolate = Isolate::Current();
+  HandleScope handles(isolate);
+
+  int64_t refConstants[64];
+  int64_t result[64];
+
+  int64_t mask = 1;
+  for (int i = 0; i < 64; i++) {
+    refConstants[i] = ~(mask << i);
+  }
+
+  MacroAssembler assembler(isolate, NULL, 0);
+  MacroAssembler* masm = &assembler;
+
+  __ mov(a4, a0);
+  for (int i = 0; i < 64; i++) {
+    // Load constant.
+    __ li(a5, Operand(refConstants[i]));
+    __ sd(a5, MemOperand(a4));
+    __ Daddu(a4, a4, Operand(kPointerSize));
+  }
+
+  __ jr(ra);
+  __ nop();
+
+  CodeDesc desc;
+  masm->GetCode(&desc);
+  Handle<Code> code = isolate->factory()->NewCode(
+      desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+
+  ::F f = FUNCTION_CAST< ::F>(code->entry());
+     (void) CALL_GENERATED_CODE(f, reinterpret_cast<int64_t>(result),
+                                0, 0, 0, 0);
+  // Check results.
+  for (int i = 0; i < 64; i++) {
+    CHECK(refConstants[i] == result[i]);
+  }
+}
+
+
+TEST(LoadAddress) {
+  CcTest::InitializeVM();
+  Isolate* isolate = Isolate::Current();
+  HandleScope handles(isolate);
+
+  MacroAssembler assembler(isolate, NULL, 0);
+  MacroAssembler* masm = &assembler;
+  Label to_jump, skip;
+  __ mov(a4, a0);
+
+  __ Branch(&skip);
+  __ bind(&to_jump);
+  __ nop();
+  __ nop();
+  __ jr(ra);
+  __ nop();
+  __ bind(&skip);
+  __ li(a4, Operand(masm->jump_address(&to_jump)), ADDRESS_LOAD);
+  int check_size = masm->InstructionsGeneratedSince(&skip);
+  CHECK_EQ(check_size, 4);
+  __ jr(a4);
+  __ nop();
+  __ stop("invalid");
+  __ stop("invalid");
+  __ stop("invalid");
+  __ stop("invalid");
+  __ stop("invalid");
+
+
+  CodeDesc desc;
+  masm->GetCode(&desc);
+  Handle<Code> code = isolate->factory()->NewCode(
+      desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+
+  ::F f = FUNCTION_CAST< ::F>(code->entry());
+     (void) CALL_GENERATED_CODE(f, 0, 0, 0, 0, 0);
+  // Check results.
+}
+
+#undef __
diff --git a/test/cctest/test-macro-assembler-x64.cc b/test/cctest/test-macro-assembler-x64.cc
index 1b0274a..7f20a8d 100644
--- a/test/cctest/test-macro-assembler-x64.cc
+++ b/test/cctest/test-macro-assembler-x64.cc
@@ -29,9 +29,9 @@
 
 #include "src/v8.h"
 
-#include "src/macro-assembler.h"
+#include "src/base/platform/platform.h"
 #include "src/factory.h"
-#include "src/platform.h"
+#include "src/macro-assembler.h"
 #include "src/serialize.h"
 #include "test/cctest/cctest.h"
 
@@ -46,7 +46,6 @@
 using i::Isolate;
 using i::Label;
 using i::MacroAssembler;
-using i::OS;
 using i::Operand;
 using i::RelocInfo;
 using i::Representation;
@@ -154,12 +153,10 @@
 
 // Test that we can move a Smi value literally into a register.
 TEST(SmiMove) {
-  i::V8::Initialize(NULL);
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
-                                                   &actual_size,
-                                                   true));
+  byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+      Assembler::kMinimalBufferSize, &actual_size, true));
   CHECK(buffer);
   Isolate* isolate = CcTest::i_isolate();
   HandleScope handles(isolate);
@@ -207,7 +204,7 @@
     __ movl(rax, Immediate(id + 2));
     __ j(less_equal, exit);
   } else {
-    ASSERT_EQ(x, y);
+    DCHECK_EQ(x, y);
     __ movl(rax, Immediate(id + 3));
     __ j(not_equal, exit);
   }
@@ -224,7 +221,7 @@
       __ movl(rax, Immediate(id + 9));
       __ j(greater_equal, exit);
     } else {
-      ASSERT(y > x);
+      DCHECK(y > x);
       __ movl(rax, Immediate(id + 10));
       __ j(less_equal, exit);
     }
@@ -241,13 +238,10 @@
 
 // Test that we can compare smis for equality (and more).
 TEST(SmiCompare) {
-  i::V8::Initialize(NULL);
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer =
-      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
-                                      &actual_size,
-                                      true));
+  byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+      Assembler::kMinimalBufferSize * 2, &actual_size, true));
   CHECK(buffer);
   Isolate* isolate = CcTest::i_isolate();
   HandleScope handles(isolate);
@@ -292,12 +286,10 @@
 
 
 TEST(Integer32ToSmi) {
-  i::V8::Initialize(NULL);
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
-                                                 &actual_size,
-                                                 true));
+  byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+      Assembler::kMinimalBufferSize, &actual_size, true));
   CHECK(buffer);
   Isolate* isolate = CcTest::i_isolate();
   HandleScope handles(isolate);
@@ -399,7 +391,7 @@
                               int64_t x,
                               int y) {
   int64_t result = x + y;
-  ASSERT(Smi::IsValid(result));
+  DCHECK(Smi::IsValid(result));
   __ movl(rax, Immediate(id));
   __ Move(r8, Smi::FromInt(static_cast<int>(result)));
   __ movq(rcx, x);
@@ -420,12 +412,10 @@
 
 
 TEST(Integer64PlusConstantToSmi) {
-  i::V8::Initialize(NULL);
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
-                                                 &actual_size,
-                                                 true));
+  byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+      Assembler::kMinimalBufferSize, &actual_size, true));
   CHECK(buffer);
   Isolate* isolate = CcTest::i_isolate();
   HandleScope handles(isolate);
@@ -464,12 +454,10 @@
 
 
 TEST(SmiCheck) {
-  i::V8::Initialize(NULL);
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
-                                                   &actual_size,
-                                                   true));
+  byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+      Assembler::kMinimalBufferSize, &actual_size, true));
   CHECK(buffer);
   Isolate* isolate = CcTest::i_isolate();
   HandleScope handles(isolate);
@@ -711,13 +699,10 @@
 
 
 TEST(SmiNeg) {
-  i::V8::Initialize(NULL);
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer =
-      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
-                                      &actual_size,
-                                      true));
+  byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+      Assembler::kMinimalBufferSize, &actual_size, true));
   CHECK(buffer);
   Isolate* isolate = CcTest::i_isolate();
   HandleScope handles(isolate);
@@ -820,7 +805,7 @@
                                int id,
                                int x) {
   // Adds a Smi to x so that the addition overflows.
-  ASSERT(x != 0);  // Can't overflow by adding a Smi.
+  DCHECK(x != 0);  // Can't overflow by adding a Smi.
   int y_max = (x > 0) ? (Smi::kMaxValue + 0) : (Smi::kMinValue - x - 1);
   int y_min = (x > 0) ? (Smi::kMaxValue - x + 1) : (Smi::kMinValue + 0);
 
@@ -927,13 +912,10 @@
 
 
 TEST(SmiAdd) {
-  i::V8::Initialize(NULL);
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer =
-      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 3,
-                                      &actual_size,
-                                      true));
+  byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+      Assembler::kMinimalBufferSize * 3, &actual_size, true));
   CHECK(buffer);
   Isolate* isolate = CcTest::i_isolate();
   HandleScope handles(isolate);
@@ -1039,7 +1021,7 @@
                                int id,
                                int x) {
   // Subtracts a Smi from x so that the subtraction overflows.
-  ASSERT(x != -1);  // Can't overflow by subtracting a Smi.
+  DCHECK(x != -1);  // Can't overflow by subtracting a Smi.
   int y_max = (x < 0) ? (Smi::kMaxValue + 0) : (Smi::kMinValue + 0);
   int y_min = (x < 0) ? (Smi::kMaxValue + x + 2) : (Smi::kMinValue + x);
 
@@ -1148,13 +1130,10 @@
 
 
 TEST(SmiSub) {
-  i::V8::Initialize(NULL);
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer =
-      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 4,
-                                      &actual_size,
-                                      true));
+  byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+      Assembler::kMinimalBufferSize * 4, &actual_size, true));
   CHECK(buffer);
   Isolate* isolate = CcTest::i_isolate();
   HandleScope handles(isolate);
@@ -1239,12 +1218,10 @@
 
 
 TEST(SmiMul) {
-  i::V8::Initialize(NULL);
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
-                                                 &actual_size,
-                                                 true));
+  byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+      Assembler::kMinimalBufferSize, &actual_size, true));
   CHECK(buffer);
   Isolate* isolate = CcTest::i_isolate();
   HandleScope handles(isolate);
@@ -1344,13 +1321,10 @@
 
 
 TEST(SmiDiv) {
-  i::V8::Initialize(NULL);
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer =
-      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
-                                      &actual_size,
-                                      true));
+  byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+      Assembler::kMinimalBufferSize * 2, &actual_size, true));
   CHECK(buffer);
   Isolate* isolate = CcTest::i_isolate();
   HandleScope handles(isolate);
@@ -1454,13 +1428,10 @@
 
 
 TEST(SmiMod) {
-  i::V8::Initialize(NULL);
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer =
-      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
-                                      &actual_size,
-                                      true));
+  byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+      Assembler::kMinimalBufferSize * 2, &actual_size, true));
   CHECK(buffer);
   Isolate* isolate = CcTest::i_isolate();
   HandleScope handles(isolate);
@@ -1515,7 +1486,7 @@
   for (int i = 0; i < 8; i++) {
     __ Move(rcx, Smi::FromInt(x));
     SmiIndex index = masm->SmiToIndex(rdx, rcx, i);
-    ASSERT(index.reg.is(rcx) || index.reg.is(rdx));
+    DCHECK(index.reg.is(rcx) || index.reg.is(rdx));
     __ shlq(index.reg, Immediate(index.scale));
     __ Set(r8, static_cast<intptr_t>(x) << i);
     __ cmpq(index.reg, r8);
@@ -1523,7 +1494,7 @@
     __ incq(rax);
     __ Move(rcx, Smi::FromInt(x));
     index = masm->SmiToIndex(rcx, rcx, i);
-    ASSERT(index.reg.is(rcx));
+    DCHECK(index.reg.is(rcx));
     __ shlq(rcx, Immediate(index.scale));
     __ Set(r8, static_cast<intptr_t>(x) << i);
     __ cmpq(rcx, r8);
@@ -1532,7 +1503,7 @@
 
     __ Move(rcx, Smi::FromInt(x));
     index = masm->SmiToNegativeIndex(rdx, rcx, i);
-    ASSERT(index.reg.is(rcx) || index.reg.is(rdx));
+    DCHECK(index.reg.is(rcx) || index.reg.is(rdx));
     __ shlq(index.reg, Immediate(index.scale));
     __ Set(r8, static_cast<intptr_t>(-x) << i);
     __ cmpq(index.reg, r8);
@@ -1540,7 +1511,7 @@
     __ incq(rax);
     __ Move(rcx, Smi::FromInt(x));
     index = masm->SmiToNegativeIndex(rcx, rcx, i);
-    ASSERT(index.reg.is(rcx));
+    DCHECK(index.reg.is(rcx));
     __ shlq(rcx, Immediate(index.scale));
     __ Set(r8, static_cast<intptr_t>(-x) << i);
     __ cmpq(rcx, r8);
@@ -1551,13 +1522,10 @@
 
 
 TEST(SmiIndex) {
-  i::V8::Initialize(NULL);
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer =
-      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 5,
-                                      &actual_size,
-                                      true));
+  byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+      Assembler::kMinimalBufferSize * 5, &actual_size, true));
   CHECK(buffer);
   Isolate* isolate = CcTest::i_isolate();
   HandleScope handles(isolate);
@@ -1620,13 +1588,10 @@
 
 
 TEST(SmiSelectNonSmi) {
-  i::V8::Initialize(NULL);
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer =
-      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
-                                      &actual_size,
-                                      true));
+  byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+      Assembler::kMinimalBufferSize * 2, &actual_size, true));
   CHECK(buffer);
   Isolate* isolate = CcTest::i_isolate();
   HandleScope handles(isolate);
@@ -1699,13 +1664,10 @@
 
 
 TEST(SmiAnd) {
-  i::V8::Initialize(NULL);
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer =
-      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
-                                      &actual_size,
-                                      true));
+  byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+      Assembler::kMinimalBufferSize * 2, &actual_size, true));
   CHECK(buffer);
   Isolate* isolate = CcTest::i_isolate();
   HandleScope handles(isolate);
@@ -1780,13 +1742,10 @@
 
 
 TEST(SmiOr) {
-  i::V8::Initialize(NULL);
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer =
-      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
-                                      &actual_size,
-                                      true));
+  byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+      Assembler::kMinimalBufferSize * 2, &actual_size, true));
   CHECK(buffer);
   Isolate* isolate = CcTest::i_isolate();
   HandleScope handles(isolate);
@@ -1863,13 +1822,10 @@
 
 
 TEST(SmiXor) {
-  i::V8::Initialize(NULL);
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer =
-      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
-                                      &actual_size,
-                                      true));
+  byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+      Assembler::kMinimalBufferSize * 2, &actual_size, true));
   CHECK(buffer);
   Isolate* isolate = CcTest::i_isolate();
   HandleScope handles(isolate);
@@ -1930,13 +1886,10 @@
 
 
 TEST(SmiNot) {
-  i::V8::Initialize(NULL);
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer =
-      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
-                                      &actual_size,
-                                      true));
+  byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+      Assembler::kMinimalBufferSize, &actual_size, true));
   CHECK(buffer);
   Isolate* isolate = CcTest::i_isolate();
   HandleScope handles(isolate);
@@ -2026,13 +1979,10 @@
 
 
 TEST(SmiShiftLeft) {
-  i::V8::Initialize(NULL);
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer =
-      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 7,
-                                      &actual_size,
-                                      true));
+  byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+      Assembler::kMinimalBufferSize * 7, &actual_size, true));
   CHECK(buffer);
   Isolate* isolate = CcTest::i_isolate();
   HandleScope handles(isolate);
@@ -2132,13 +2082,10 @@
 
 
 TEST(SmiShiftLogicalRight) {
-  i::V8::Initialize(NULL);
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer =
-      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 5,
-                                      &actual_size,
-                                      true));
+  byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+      Assembler::kMinimalBufferSize * 5, &actual_size, true));
   CHECK(buffer);
   Isolate* isolate = CcTest::i_isolate();
   HandleScope handles(isolate);
@@ -2201,13 +2148,10 @@
 
 
 TEST(SmiShiftArithmeticRight) {
-  i::V8::Initialize(NULL);
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer =
-      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 3,
-                                      &actual_size,
-                                      true));
+  byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+      Assembler::kMinimalBufferSize * 3, &actual_size, true));
   CHECK(buffer);
   Isolate* isolate = CcTest::i_isolate();
   HandleScope handles(isolate);
@@ -2239,7 +2183,7 @@
 
 
 void TestPositiveSmiPowerUp(MacroAssembler* masm, Label* exit, int id, int x) {
-  ASSERT(x >= 0);
+  DCHECK(x >= 0);
   int powers[] = { 0, 1, 2, 3, 8, 16, 24, 31 };
   int power_count = 8;
   __ movl(rax, Immediate(id));
@@ -2265,13 +2209,10 @@
 
 
 TEST(PositiveSmiTimesPowerOfTwoToInteger64) {
-  i::V8::Initialize(NULL);
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer =
-      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 4,
-                                      &actual_size,
-                                      true));
+  byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+      Assembler::kMinimalBufferSize * 4, &actual_size, true));
   CHECK(buffer);
   Isolate* isolate = CcTest::i_isolate();
   HandleScope handles(isolate);
@@ -2305,16 +2246,13 @@
 
 
 TEST(OperandOffset) {
-  i::V8::Initialize(NULL);
   uint32_t data[256];
   for (uint32_t i = 0; i < 256; i++) { data[i] = i * 0x01010101; }
 
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer =
-      static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize * 2,
-                                      &actual_size,
-                                      true));
+  byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+      Assembler::kMinimalBufferSize * 2, &actual_size, true));
   CHECK(buffer);
   Isolate* isolate = CcTest::i_isolate();
   HandleScope handles(isolate);
@@ -2661,13 +2599,10 @@
 
 
 TEST(LoadAndStoreWithRepresentation) {
-  v8::internal::V8::Initialize(NULL);
-
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
-                                                 &actual_size,
-                                                 true));
+  byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+      Assembler::kMinimalBufferSize, &actual_size, true));
   CHECK(buffer);
   Isolate* isolate = CcTest::i_isolate();
   HandleScope handles(isolate);
diff --git a/test/cctest/test-macro-assembler-x87.cc b/test/cctest/test-macro-assembler-x87.cc
index 01a3604..0b057d8 100644
--- a/test/cctest/test-macro-assembler-x87.cc
+++ b/test/cctest/test-macro-assembler-x87.cc
@@ -28,13 +28,13 @@
 #include <stdlib.h>
 
 #include "src/v8.h"
-
-#include "src/macro-assembler.h"
-#include "src/factory.h"
-#include "src/platform.h"
-#include "src/serialize.h"
 #include "test/cctest/cctest.h"
 
+#include "src/base/platform/platform.h"
+#include "src/factory.h"
+#include "src/macro-assembler.h"
+#include "src/serialize.h"
+
 using namespace v8::internal;
 
 #if __GNUC__
@@ -50,13 +50,10 @@
 
 
 TEST(LoadAndStoreWithRepresentation) {
-  v8::internal::V8::Initialize(NULL);
-
   // Allocate an executable page of memory.
   size_t actual_size;
-  byte* buffer = static_cast<byte*>(OS::Allocate(Assembler::kMinimalBufferSize,
-                                                 &actual_size,
-                                                 true));
+  byte* buffer = static_cast<byte*>(v8::base::OS::Allocate(
+      Assembler::kMinimalBufferSize, &actual_size, true));
   CHECK(buffer);
   Isolate* isolate = CcTest::i_isolate();
   HandleScope handles(isolate);
diff --git a/test/cctest/test-mark-compact.cc b/test/cctest/test-mark-compact.cc
index bea0d34..c7d6531 100644
--- a/test/cctest/test-mark-compact.cc
+++ b/test/cctest/test-mark-compact.cc
@@ -28,11 +28,11 @@
 #include <stdlib.h>
 
 #ifdef __linux__
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <fcntl.h>
-#include <unistd.h>
 #include <errno.h>
+#include <fcntl.h>
+#include <sys/stat.h>
+#include <sys/types.h>
+#include <unistd.h>
 #endif
 
 #include <utility>
@@ -92,7 +92,8 @@
   CHECK(heap->InSpace(*array, NEW_SPACE));
 
   // Call mark compact GC, so array becomes an old object.
-  heap->CollectGarbage(OLD_POINTER_SPACE);
+  heap->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+  heap->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
 
   // Array now sits in the old space
   CHECK(heap->InSpace(*array, OLD_POINTER_SPACE));
@@ -138,13 +139,13 @@
   heap->CollectGarbage(OLD_POINTER_SPACE, "trigger 1");
 
   // keep allocating garbage in new space until it fails
-  const int ARRAY_SIZE = 100;
+  const int arraysize = 100;
   AllocationResult allocation;
   do {
-    allocation = heap->AllocateFixedArray(ARRAY_SIZE);
+    allocation = heap->AllocateFixedArray(arraysize);
   } while (!allocation.IsRetry());
   heap->CollectGarbage(NEW_SPACE, "trigger 2");
-  heap->AllocateFixedArray(ARRAY_SIZE).ToObjectChecked();
+  heap->AllocateFixedArray(arraysize).ToObjectChecked();
 
   // keep allocating maps until it fails
   do {
@@ -157,7 +158,7 @@
     // allocate a garbage
     Handle<String> func_name = factory->InternalizeUtf8String("theFunction");
     Handle<JSFunction> function = factory->NewFunction(func_name);
-    JSReceiver::SetProperty(global, func_name, function, NONE, SLOPPY).Check();
+    JSReceiver::SetProperty(global, func_name, function, SLOPPY).Check();
 
     factory->NewJSObject(function);
   }
@@ -166,7 +167,9 @@
 
   { HandleScope scope(isolate);
     Handle<String> func_name = factory->InternalizeUtf8String("theFunction");
-    CHECK(JSReceiver::HasOwnProperty(global, func_name));
+    v8::Maybe<bool> maybe = JSReceiver::HasOwnProperty(global, func_name);
+    CHECK(maybe.has_value);
+    CHECK(maybe.value);
     Handle<Object> func_value =
         Object::GetProperty(global, func_name).ToHandleChecked();
     CHECK(func_value->IsJSFunction());
@@ -174,17 +177,19 @@
     Handle<JSObject> obj = factory->NewJSObject(function);
 
     Handle<String> obj_name = factory->InternalizeUtf8String("theObject");
-    JSReceiver::SetProperty(global, obj_name, obj, NONE, SLOPPY).Check();
+    JSReceiver::SetProperty(global, obj_name, obj, SLOPPY).Check();
     Handle<String> prop_name = factory->InternalizeUtf8String("theSlot");
     Handle<Smi> twenty_three(Smi::FromInt(23), isolate);
-    JSReceiver::SetProperty(obj, prop_name, twenty_three, NONE, SLOPPY).Check();
+    JSReceiver::SetProperty(obj, prop_name, twenty_three, SLOPPY).Check();
   }
 
   heap->CollectGarbage(OLD_POINTER_SPACE, "trigger 5");
 
   { HandleScope scope(isolate);
     Handle<String> obj_name = factory->InternalizeUtf8String("theObject");
-    CHECK(JSReceiver::HasOwnProperty(global, obj_name));
+    v8::Maybe<bool> maybe = JSReceiver::HasOwnProperty(global, obj_name);
+    CHECK(maybe.has_value);
+    CHECK(maybe.value);
     Handle<Object> object =
         Object::GetProperty(global, obj_name).ToHandleChecked();
     CHECK(object->IsJSObject());
@@ -236,7 +241,7 @@
   std::pair<v8::Persistent<v8::Value>*, int>* p =
       reinterpret_cast<std::pair<v8::Persistent<v8::Value>*, int>*>(
           data.GetParameter());
-  ASSERT_EQ(1234, p->second);
+  DCHECK_EQ(1234, p->second);
   NumberOfWeakCalls++;
   p->first->Reset();
 }
@@ -296,15 +301,13 @@
 
   {
     Object** g1_objects[] = { g1s1.location(), g1s2.location() };
-    Object** g1_children[] = { g1c1.location() };
     Object** g2_objects[] = { g2s1.location(), g2s2.location() };
-    Object** g2_children[] = { g2c1.location() };
     global_handles->AddObjectGroup(g1_objects, 2, NULL);
-    global_handles->AddImplicitReferences(
-        Handle<HeapObject>::cast(g1s1).location(), g1_children, 1);
+    global_handles->SetReference(Handle<HeapObject>::cast(g1s1).location(),
+                                 g1c1.location());
     global_handles->AddObjectGroup(g2_objects, 2, NULL);
-    global_handles->AddImplicitReferences(
-        Handle<HeapObject>::cast(g2s1).location(), g2_children, 1);
+    global_handles->SetReference(Handle<HeapObject>::cast(g2s1).location(),
+                                 g2c1.location());
   }
   // Do a full GC
   heap->CollectGarbage(OLD_POINTER_SPACE);
@@ -325,15 +328,13 @@
   // Groups are deleted, rebuild groups.
   {
     Object** g1_objects[] = { g1s1.location(), g1s2.location() };
-    Object** g1_children[] = { g1c1.location() };
     Object** g2_objects[] = { g2s1.location(), g2s2.location() };
-    Object** g2_children[] = { g2c1.location() };
     global_handles->AddObjectGroup(g1_objects, 2, NULL);
-    global_handles->AddImplicitReferences(
-        Handle<HeapObject>::cast(g1s1).location(), g1_children, 1);
+    global_handles->SetReference(Handle<HeapObject>::cast(g1s1).location(),
+                                 g1c1.location());
     global_handles->AddObjectGroup(g2_objects, 2, NULL);
-    global_handles->AddImplicitReferences(
-        Handle<HeapObject>::cast(g2s1).location(), g2_children, 1);
+    global_handles->SetReference(Handle<HeapObject>::cast(g2s1).location(),
+                                 g2c1.location());
   }
 
   heap->CollectGarbage(OLD_POINTER_SPACE);
@@ -361,7 +362,7 @@
   bool has_been_disposed() { return has_been_disposed_; }
 
   virtual void Dispose() {
-    ASSERT(!has_been_disposed_);
+    DCHECK(!has_been_disposed_);
     has_been_disposed_ = true;
   }
 
@@ -384,15 +385,9 @@
 
   v8::HandleScope handle_scope(CcTest::isolate());
 
-  Handle<Object> object = global_handles->Create(
-      CcTest::test_heap()->AllocateFixedArray(1).ToObjectChecked());
-
   TestRetainedObjectInfo info;
   global_handles->AddObjectGroup(NULL, 0, &info);
-  ASSERT(info.has_been_disposed());
-
-  global_handles->AddImplicitReferences(
-        Handle<HeapObject>::cast(object).location(), NULL, 0);
+  DCHECK(info.has_been_disposed());
 }
 
 
diff --git a/test/cctest/test-mutex.cc b/test/cctest/test-mutex.cc
deleted file mode 100644
index 4e141be..0000000
--- a/test/cctest/test-mutex.cc
+++ /dev/null
@@ -1,118 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <cstdlib>
-
-#include "src/v8.h"
-
-#include "src/platform/mutex.h"
-#include "test/cctest/cctest.h"
-
-using namespace ::v8::internal;
-
-
-TEST(LockGuardMutex) {
-  Mutex mutex;
-  { LockGuard<Mutex> lock_guard(&mutex);
-  }
-  { LockGuard<Mutex> lock_guard(&mutex);
-  }
-}
-
-
-TEST(LockGuardRecursiveMutex) {
-  RecursiveMutex recursive_mutex;
-  { LockGuard<RecursiveMutex> lock_guard(&recursive_mutex);
-  }
-  { LockGuard<RecursiveMutex> lock_guard1(&recursive_mutex);
-    LockGuard<RecursiveMutex> lock_guard2(&recursive_mutex);
-  }
-}
-
-
-TEST(LockGuardLazyMutex) {
-  LazyMutex lazy_mutex = LAZY_MUTEX_INITIALIZER;
-  { LockGuard<Mutex> lock_guard(lazy_mutex.Pointer());
-  }
-  { LockGuard<Mutex> lock_guard(lazy_mutex.Pointer());
-  }
-}
-
-
-TEST(LockGuardLazyRecursiveMutex) {
-  LazyRecursiveMutex lazy_recursive_mutex = LAZY_RECURSIVE_MUTEX_INITIALIZER;
-  { LockGuard<RecursiveMutex> lock_guard(lazy_recursive_mutex.Pointer());
-  }
-  { LockGuard<RecursiveMutex> lock_guard1(lazy_recursive_mutex.Pointer());
-    LockGuard<RecursiveMutex> lock_guard2(lazy_recursive_mutex.Pointer());
-  }
-}
-
-
-TEST(MultipleMutexes) {
-  Mutex mutex1;
-  Mutex mutex2;
-  Mutex mutex3;
-  // Order 1
-  mutex1.Lock();
-  mutex2.Lock();
-  mutex3.Lock();
-  mutex1.Unlock();
-  mutex2.Unlock();
-  mutex3.Unlock();
-  // Order 2
-  mutex1.Lock();
-  mutex2.Lock();
-  mutex3.Lock();
-  mutex3.Unlock();
-  mutex2.Unlock();
-  mutex1.Unlock();
-}
-
-
-TEST(MultipleRecursiveMutexes) {
-  RecursiveMutex recursive_mutex1;
-  RecursiveMutex recursive_mutex2;
-  // Order 1
-  recursive_mutex1.Lock();
-  recursive_mutex2.Lock();
-  CHECK(recursive_mutex1.TryLock());
-  CHECK(recursive_mutex2.TryLock());
-  recursive_mutex1.Unlock();
-  recursive_mutex1.Unlock();
-  recursive_mutex2.Unlock();
-  recursive_mutex2.Unlock();
-  // Order 2
-  recursive_mutex1.Lock();
-  CHECK(recursive_mutex1.TryLock());
-  recursive_mutex2.Lock();
-  CHECK(recursive_mutex2.TryLock());
-  recursive_mutex2.Unlock();
-  recursive_mutex1.Unlock();
-  recursive_mutex2.Unlock();
-  recursive_mutex1.Unlock();
-}
diff --git a/test/cctest/test-object-observe.cc b/test/cctest/test-object-observe.cc
index 2a807bf..d208a26 100644
--- a/test/cctest/test-object-observe.cc
+++ b/test/cctest/test-object-observe.cc
@@ -253,7 +253,7 @@
 
 #define EXPECT_RECORDS(records, expectations)                \
   ExpectRecords(CcTest::isolate(), records, expectations, \
-                ARRAY_SIZE(expectations))
+                arraysize(expectations))
 
 TEST(APITestBasicMutation) {
   v8::Isolate* v8_isolate = CcTest::isolate();
@@ -275,8 +275,8 @@
   // Setting an indexed element via the property setting method
   obj->Set(Number::New(v8_isolate, 1), Number::New(v8_isolate, 5));
   // Setting with a non-String, non-uint32 key
-  obj->Set(Number::New(v8_isolate, 1.1),
-           Number::New(v8_isolate, 6), DontDelete);
+  obj->ForceSet(Number::New(v8_isolate, 1.1), Number::New(v8_isolate, 6),
+                DontDelete);
   obj->Delete(String::NewFromUtf8(v8_isolate, "foo"));
   obj->Delete(1);
   obj->ForceDelete(Number::New(v8_isolate, 1.1));
@@ -658,7 +658,7 @@
                "Object.unobserve(obj, observer);");
   }
 
-  v8::V8::ContextDisposedNotification();
+  CcTest::isolate()->ContextDisposedNotification();
   CheckSurvivingGlobalObjectsCount(1);
 }
 
@@ -679,7 +679,7 @@
     CompileRun("Object.getNotifier(obj);");
   }
 
-  v8::V8::ContextDisposedNotification();
+  CcTest::isolate()->ContextDisposedNotification();
   CheckSurvivingGlobalObjectsCount(1);
 }
 
@@ -706,6 +706,6 @@
                    "notifier, 'foo', function(){})");
   }
 
-  v8::V8::ContextDisposedNotification();
+  CcTest::isolate()->ContextDisposedNotification();
   CheckSurvivingGlobalObjectsCount(1);
 }
diff --git a/test/cctest/test-ordered-hash-table.cc b/test/cctest/test-ordered-hash-table.cc
index 7c4af41..9578936 100644
--- a/test/cctest/test-ordered-hash-table.cc
+++ b/test/cctest/test-ordered-hash-table.cc
@@ -38,8 +38,6 @@
 
 
 TEST(Set) {
-  i::FLAG_harmony_collections = true;
-
   LocalContext context;
   Isolate* isolate = CcTest::i_isolate();
   Factory* factory = isolate->factory();
@@ -105,8 +103,6 @@
 
 
 TEST(Map) {
-  i::FLAG_harmony_collections = true;
-
   LocalContext context;
   Isolate* isolate = CcTest::i_isolate();
   Factory* factory = isolate->factory();
@@ -122,7 +118,8 @@
   CHECK(ordered_map->Lookup(obj)->IsTheHole());
   ordered_map = OrderedHashMap::Put(ordered_map, obj, val);
   CHECK_EQ(1, ordered_map->NumberOfElements());
-  CHECK(ordered_map->Lookup(obj)->SameValue(*val));
+  Object* lookup = ordered_map->Lookup(obj);
+  CHECK(lookup->SameValue(*val));
   bool was_present = false;
   ordered_map = OrderedHashMap::Remove(ordered_map, obj, &was_present);
   CHECK(was_present);
@@ -140,20 +137,28 @@
   ordered_map = OrderedHashMap::Put(ordered_map, obj2, val2);
   ordered_map = OrderedHashMap::Put(ordered_map, obj3, val3);
   CHECK_EQ(3, ordered_map->NumberOfElements());
-  CHECK(ordered_map->Lookup(obj1)->SameValue(*val1));
-  CHECK(ordered_map->Lookup(obj2)->SameValue(*val2));
-  CHECK(ordered_map->Lookup(obj3)->SameValue(*val3));
+  lookup = ordered_map->Lookup(obj1);
+  CHECK(lookup->SameValue(*val1));
+  lookup = ordered_map->Lookup(obj2);
+  CHECK(lookup->SameValue(*val2));
+  lookup = ordered_map->Lookup(obj3);
+  CHECK(lookup->SameValue(*val3));
 
   // Test growth
   ordered_map = OrderedHashMap::Put(ordered_map, obj, val);
   Handle<JSObject> obj4 = factory->NewJSObjectFromMap(map);
   Handle<JSObject> val4 = factory->NewJSObjectFromMap(map);
   ordered_map = OrderedHashMap::Put(ordered_map, obj4, val4);
-  CHECK(ordered_map->Lookup(obj)->SameValue(*val));
-  CHECK(ordered_map->Lookup(obj1)->SameValue(*val1));
-  CHECK(ordered_map->Lookup(obj2)->SameValue(*val2));
-  CHECK(ordered_map->Lookup(obj3)->SameValue(*val3));
-  CHECK(ordered_map->Lookup(obj4)->SameValue(*val4));
+  lookup = ordered_map->Lookup(obj);
+  CHECK(lookup->SameValue(*val));
+  lookup = ordered_map->Lookup(obj1);
+  CHECK(lookup->SameValue(*val1));
+  lookup = ordered_map->Lookup(obj2);
+  CHECK(lookup->SameValue(*val2));
+  lookup = ordered_map->Lookup(obj3);
+  CHECK(lookup->SameValue(*val3));
+  lookup = ordered_map->Lookup(obj4);
+  CHECK(lookup->SameValue(*val4));
   CHECK_EQ(5, ordered_map->NumberOfElements());
   CHECK_EQ(4, ordered_map->NumberOfBuckets());
 
diff --git a/test/cctest/test-ostreams.cc b/test/cctest/test-ostreams.cc
new file mode 100644
index 0000000..c83f96d
--- /dev/null
+++ b/test/cctest/test-ostreams.cc
@@ -0,0 +1,148 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <string.h>
+#include <limits>
+
+#include "include/v8stdint.h"
+#include "src/ostreams.h"
+#include "test/cctest/cctest.h"
+
+using namespace v8::internal;
+
+
+TEST(OStringStreamConstructor) {
+  OStringStream oss;
+  const size_t expected_size = 0;
+  CHECK(expected_size == oss.size());
+  CHECK_GT(oss.capacity(), 0);
+  CHECK_NE(NULL, oss.data());
+  CHECK_EQ("", oss.c_str());
+}
+
+
+#define TEST_STRING            \
+  "Ash nazg durbatuluk, "      \
+  "ash nazg gimbatul, "        \
+  "ash nazg thrakatuluk, "     \
+  "agh burzum-ishi krimpatul."
+
+TEST(OStringStreamGrow) {
+  OStringStream oss;
+  const int repeat = 30;
+  size_t len = strlen(TEST_STRING);
+  for (int i = 0; i < repeat; ++i) {
+    oss.write(TEST_STRING, len);
+  }
+  const char* expected =
+      TEST_STRING TEST_STRING TEST_STRING TEST_STRING TEST_STRING
+      TEST_STRING TEST_STRING TEST_STRING TEST_STRING TEST_STRING
+      TEST_STRING TEST_STRING TEST_STRING TEST_STRING TEST_STRING
+      TEST_STRING TEST_STRING TEST_STRING TEST_STRING TEST_STRING
+      TEST_STRING TEST_STRING TEST_STRING TEST_STRING TEST_STRING
+      TEST_STRING TEST_STRING TEST_STRING TEST_STRING TEST_STRING;
+  const size_t expected_len = len * repeat;
+  CHECK(expected_len == oss.size());
+  CHECK_GT(oss.capacity(), 0);
+  CHECK_EQ(0, strncmp(expected, oss.data(), expected_len));
+  CHECK_EQ(expected, oss.c_str());
+}
+
+
+template <class T>
+static void check(const char* expected, T value) {
+  OStringStream oss;
+  oss << value << " " << hex << value;
+  CHECK_EQ(expected, oss.c_str());
+}
+
+
+TEST(NumericFormatting) {
+  check<bool>("0 0", false);
+  check<bool>("1 1", true);
+
+  check<int16_t>("-12345 cfc7", -12345);
+  check<int16_t>("-32768 8000", std::numeric_limits<int16_t>::min());
+  check<int16_t>("32767 7fff", std::numeric_limits<int16_t>::max());
+
+  check<uint16_t>("34567 8707", 34567);
+  check<uint16_t>("0 0", std::numeric_limits<uint16_t>::min());
+  check<uint16_t>("65535 ffff", std::numeric_limits<uint16_t>::max());
+
+  check<int32_t>("-1234567 ffed2979", -1234567);
+  check<int32_t>("-2147483648 80000000", std::numeric_limits<int32_t>::min());
+  check<int32_t>("2147483647 7fffffff", std::numeric_limits<int32_t>::max());
+
+  check<uint32_t>("3456789 34bf15", 3456789);
+  check<uint32_t>("0 0", std::numeric_limits<uint32_t>::min());
+  check<uint32_t>("4294967295 ffffffff", std::numeric_limits<uint32_t>::max());
+
+  check<int64_t>("-1234567 ffffffffffed2979", -1234567);
+  check<int64_t>("-9223372036854775808 8000000000000000",
+                 std::numeric_limits<int64_t>::min());
+  check<int64_t>("9223372036854775807 7fffffffffffffff",
+                 std::numeric_limits<int64_t>::max());
+
+  check<uint64_t>("3456789 34bf15", 3456789);
+  check<uint64_t>("0 0", std::numeric_limits<uint64_t>::min());
+  check<uint64_t>("18446744073709551615 ffffffffffffffff",
+                  std::numeric_limits<uint64_t>::max());
+
+  check<float>("0 0", 0.0f);
+  check<float>("123 123", 123.0f);
+  check<float>("-0.5 -0.5", -0.5f);
+  check<float>("1.25 1.25", 1.25f);
+  check<float>("0.0625 0.0625", 6.25e-2f);
+
+  check<double>("0 0", 0.0);
+  check<double>("123 123", 123.0);
+  check<double>("-0.5 -0.5", -0.5);
+  check<double>("1.25 1.25", 1.25);
+  check<double>("0.0625 0.0625", 6.25e-2);
+}
+
+
+TEST(CharacterOutput) {
+  check<char>("a a", 'a');
+  check<signed char>("B B", 'B');
+  check<unsigned char>("9 9", '9');
+  check<const char*>("bye bye", "bye");
+
+  OStringStream os;
+  os.put('H').write("ello", 4);
+  CHECK_EQ("Hello", os.c_str());
+}
+
+
+TEST(Manipulators) {
+  OStringStream os;
+  os << 123 << hex << 123 << endl << 123 << dec << 123 << 123;
+  CHECK_EQ("1237b\n7b123123", os.c_str());
+}
+
+
+class MiscStuff {
+ public:
+  MiscStuff(int i, double d, const char* s) : i_(i), d_(d), s_(s) { }
+
+ private:
+  friend OStream& operator<<(OStream& os, const MiscStuff& m);
+
+  int i_;
+  double d_;
+  const char* s_;
+};
+
+
+OStream& operator<<(OStream& os, const MiscStuff& m) {
+  return os << "{i:" << m.i_ << ", d:" << m.d_ << ", s:'" << m.s_ << "'}";
+}
+
+
+TEST(CustomOutput) {
+  OStringStream os;
+  MiscStuff m(123, 4.5, "Hurz!");
+  os << m;
+  CHECK_EQ("{i:123, d:4.5, s:'Hurz!'}", os.c_str());
+}
diff --git a/test/cctest/test-parsing.cc b/test/cctest/test-parsing.cc
index 550ac17..72f2298 100644
--- a/test/cctest/test-parsing.cc
+++ b/test/cctest/test-parsing.cc
@@ -25,21 +25,24 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#include <stdlib.h>
 #include <stdio.h>
+#include <stdlib.h>
 #include <string.h>
 
 #include "src/v8.h"
 
+#include "src/ast-value-factory.h"
 #include "src/compiler.h"
 #include "src/execution.h"
 #include "src/isolate.h"
 #include "src/objects.h"
 #include "src/parser.h"
 #include "src/preparser.h"
+#include "src/rewriter.h"
 #include "src/scanner-character-streams.h"
 #include "src/token.h"
 #include "src/utils.h"
+
 #include "test/cctest/cctest.h"
 
 TEST(ScanKeywords) {
@@ -69,6 +72,7 @@
       // The scanner should parse Harmony keywords for this test.
       scanner.SetHarmonyScoping(true);
       scanner.SetHarmonyModules(true);
+      scanner.SetHarmonyClasses(true);
       scanner.Initialize(&stream);
       CHECK_EQ(key_token.token, scanner.Next());
       CHECK_EQ(i::Token::EOS, scanner.Next());
@@ -83,7 +87,7 @@
     }
     // Adding characters will make keyword matching fail.
     static const char chars_to_append[] = { 'z', '0', '_' };
-    for (int j = 0; j < static_cast<int>(ARRAY_SIZE(chars_to_append)); ++j) {
+    for (int j = 0; j < static_cast<int>(arraysize(chars_to_append)); ++j) {
       i::MemMove(buffer, keyword, length);
       buffer[length] = chars_to_append[j];
       i::Utf8ToUtf16CharacterStream stream(buffer, length + 1);
@@ -141,9 +145,8 @@
   };
 
   // Parser/Scanner needs a stack limit.
-  int marker;
   CcTest::i_isolate()->stack_guard()->SetStackLimit(
-      reinterpret_cast<uintptr_t>(&marker) - 128 * 1024);
+      i::GetCurrentStackPosition() - 128 * 1024);
   uintptr_t stack_limit = CcTest::i_isolate()->stack_guard()->real_climit();
   for (int i = 0; tests[i]; i++) {
     const i::byte* source =
@@ -156,8 +159,7 @@
     preparser.set_allow_lazy(true);
     i::PreParser::PreParseResult result = preparser.PreParseProgram();
     CHECK_EQ(i::PreParser::kPreParseSuccess, result);
-    i::ScriptData data(log.ExtractData());
-    CHECK(!data.has_error());
+    CHECK(!log.HasError());
   }
 
   for (int i = 0; fail_tests[i]; i++) {
@@ -172,13 +174,12 @@
     i::PreParser::PreParseResult result = preparser.PreParseProgram();
     // Even in the case of a syntax error, kPreParseSuccess is returned.
     CHECK_EQ(i::PreParser::kPreParseSuccess, result);
-    i::ScriptData data(log.ExtractData());
-    CHECK(data.has_error());
+    CHECK(log.HasError());
   }
 }
 
 
-class ScriptResource : public v8::String::ExternalAsciiStringResource {
+class ScriptResource : public v8::String::ExternalOneByteStringResource {
  public:
   ScriptResource(const char* data, size_t length)
       : data_(data), length_(length) { }
@@ -197,9 +198,8 @@
   v8::HandleScope handles(isolate);
   v8::Local<v8::Context> context = v8::Context::New(isolate);
   v8::Context::Scope context_scope(context);
-  int marker;
   CcTest::i_isolate()->stack_guard()->SetStackLimit(
-      reinterpret_cast<uintptr_t>(&marker) - 128 * 1024);
+      i::GetCurrentStackPosition() - 128 * 1024);
 
   // Source containing functions that might be lazily compiled  and all types
   // of symbols (string, propertyName, regexp).
@@ -213,23 +213,28 @@
       "var v = /RegExp Literal/;"
       "var w = /RegExp Literal\\u0020With Escape/gin;"
       "var y = { get getter() { return 42; }, "
-      "          set setter(v) { this.value = v; }};";
+      "          set setter(v) { this.value = v; }};"
+      "var f = a => function (b) { return a + b; };"
+      "var g = a => b => a + b;";
   int source_length = i::StrLength(source);
 
   // ScriptResource will be deleted when the corresponding String is GCd.
   v8::ScriptCompiler::Source script_source(v8::String::NewExternal(
       isolate, new ScriptResource(source, source_length)));
+  i::FLAG_harmony_arrow_functions = true;
   i::FLAG_min_preparse_length = 0;
   v8::ScriptCompiler::Compile(isolate, &script_source,
-                              v8::ScriptCompiler::kProduceDataToCache);
+                              v8::ScriptCompiler::kProduceParserCache);
   CHECK(script_source.GetCachedData());
 
   // Compile the script again, using the cached data.
   bool lazy_flag = i::FLAG_lazy;
   i::FLAG_lazy = true;
-  v8::ScriptCompiler::Compile(isolate, &script_source);
+  v8::ScriptCompiler::Compile(isolate, &script_source,
+                              v8::ScriptCompiler::kConsumeParserCache);
   i::FLAG_lazy = false;
-  v8::ScriptCompiler::CompileUnbound(isolate, &script_source);
+  v8::ScriptCompiler::CompileUnbound(isolate, &script_source,
+                                     v8::ScriptCompiler::kConsumeParserCache);
   i::FLAG_lazy = lazy_flag;
 }
 
@@ -240,49 +245,55 @@
 
   // Make preparsing work for short scripts.
   i::FLAG_min_preparse_length = 0;
+  i::FLAG_harmony_arrow_functions = true;
 
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope handles(isolate);
   v8::Local<v8::Context> context = v8::Context::New(isolate);
   v8::Context::Scope context_scope(context);
-  int marker;
   CcTest::i_isolate()->stack_guard()->SetStackLimit(
-      reinterpret_cast<uintptr_t>(&marker) - 128 * 1024);
+      i::GetCurrentStackPosition() - 128 * 1024);
 
-  const char* good_code =
-      "function this_is_lazy() { var a; } function foo() { return 25; } foo();";
+  const char* good_code[] = {
+      "function this_is_lazy() { var a; } function foo() { return 25; } foo();",
+      "var this_is_lazy = () => { var a; }; var foo = () => 25; foo();",
+  };
 
   // Insert a syntax error inside the lazy function.
-  const char* bad_code =
-      "function this_is_lazy() { if (   } function foo() { return 25; } foo();";
+  const char* bad_code[] = {
+      "function this_is_lazy() { if (   } function foo() { return 25; } foo();",
+      "var this_is_lazy = () => { if (   }; var foo = () => 25; foo();",
+  };
 
-  v8::ScriptCompiler::Source good_source(v8_str(good_code));
-  v8::ScriptCompiler::Compile(isolate, &good_source,
-                              v8::ScriptCompiler::kProduceDataToCache);
+  for (unsigned i = 0; i < arraysize(good_code); i++) {
+    v8::ScriptCompiler::Source good_source(v8_str(good_code[i]));
+    v8::ScriptCompiler::Compile(isolate, &good_source,
+                                v8::ScriptCompiler::kProduceDataToCache);
 
-  const v8::ScriptCompiler::CachedData* cached_data =
-      good_source.GetCachedData();
-  CHECK(cached_data->data != NULL);
-  CHECK_GT(cached_data->length, 0);
+    const v8::ScriptCompiler::CachedData* cached_data =
+        good_source.GetCachedData();
+    CHECK(cached_data->data != NULL);
+    CHECK_GT(cached_data->length, 0);
 
-  // Now compile the erroneous code with the good preparse data. If the preparse
-  // data is used, the lazy function is skipped and it should compile fine.
-  v8::ScriptCompiler::Source bad_source(
-      v8_str(bad_code), new v8::ScriptCompiler::CachedData(
-                            cached_data->data, cached_data->length));
-  v8::Local<v8::Value> result =
-      v8::ScriptCompiler::Compile(isolate, &bad_source)->Run();
-  CHECK(result->IsInt32());
-  CHECK_EQ(25, result->Int32Value());
+    // Now compile the erroneous code with the good preparse data. If the
+    // preparse data is used, the lazy function is skipped and it should
+    // compile fine.
+    v8::ScriptCompiler::Source bad_source(
+        v8_str(bad_code[i]), new v8::ScriptCompiler::CachedData(
+                                 cached_data->data, cached_data->length));
+    v8::Local<v8::Value> result =
+        v8::ScriptCompiler::Compile(isolate, &bad_source)->Run();
+    CHECK(result->IsInt32());
+    CHECK_EQ(25, result->Int32Value());
+  }
 }
 
 
 TEST(StandAlonePreParser) {
   v8::V8::Initialize();
 
-  int marker;
   CcTest::i_isolate()->stack_guard()->SetStackLimit(
-      reinterpret_cast<uintptr_t>(&marker) - 128 * 1024);
+      i::GetCurrentStackPosition() - 128 * 1024);
 
   const char* programs[] = {
       "{label: 42}",
@@ -290,6 +301,7 @@
       "function foo(x, y) { return x + y; }",
       "%ArgleBargle(glop);",
       "var x = new new Function('this.x = 42');",
+      "var f = (x, y) => x + y;",
       NULL
   };
 
@@ -306,10 +318,10 @@
     i::PreParser preparser(&scanner, &log, stack_limit);
     preparser.set_allow_lazy(true);
     preparser.set_allow_natives_syntax(true);
+    preparser.set_allow_arrow_functions(true);
     i::PreParser::PreParseResult result = preparser.PreParseProgram();
     CHECK_EQ(i::PreParser::kPreParseSuccess, result);
-    i::ScriptData data(log.ExtractData());
-    CHECK(!data.has_error());
+    CHECK(!log.HasError());
   }
 }
 
@@ -317,9 +329,8 @@
 TEST(StandAlonePreParserNoNatives) {
   v8::V8::Initialize();
 
-  int marker;
   CcTest::i_isolate()->stack_guard()->SetStackLimit(
-      reinterpret_cast<uintptr_t>(&marker) - 128 * 1024);
+      i::GetCurrentStackPosition() - 128 * 1024);
 
   const char* programs[] = {
       "%ArgleBargle(glop);",
@@ -342,9 +353,7 @@
     preparser.set_allow_lazy(true);
     i::PreParser::PreParseResult result = preparser.PreParseProgram();
     CHECK_EQ(i::PreParser::kPreParseSuccess, result);
-    i::ScriptData data(log.ExtractData());
-    // Data contains syntax error.
-    CHECK(data.has_error());
+    CHECK(log.HasError());
   }
 }
 
@@ -356,13 +365,12 @@
   v8::HandleScope handles(isolate);
   v8::Local<v8::Context> context = v8::Context::New(isolate);
   v8::Context::Scope context_scope(context);
-  int marker;
   CcTest::i_isolate()->stack_guard()->SetStackLimit(
-      reinterpret_cast<uintptr_t>(&marker) - 128 * 1024);
+      i::GetCurrentStackPosition() - 128 * 1024);
 
   {
     const char* source = "var myo = {if: \"foo\"}; myo.if;";
-    v8::Local<v8::Value> result = PreCompileCompileRun(source);
+    v8::Local<v8::Value> result = ParserCacheCompileRun(source);
     CHECK(result->IsString());
     v8::String::Utf8Value utf8(result);
     CHECK_EQ("foo", *utf8);
@@ -370,7 +378,7 @@
 
   {
     const char* source = "var myo = {\"bar\": \"foo\"}; myo[\"bar\"];";
-    v8::Local<v8::Value> result = PreCompileCompileRun(source);
+    v8::Local<v8::Value> result = ParserCacheCompileRun(source);
     CHECK(result->IsString());
     v8::String::Utf8Value utf8(result);
     CHECK_EQ("foo", *utf8);
@@ -378,7 +386,7 @@
 
   {
     const char* source = "var myo = {1: \"foo\"}; myo[1];";
-    v8::Local<v8::Value> result = PreCompileCompileRun(source);
+    v8::Local<v8::Value> result = ParserCacheCompileRun(source);
     CHECK(result->IsString());
     v8::String::Utf8Value utf8(result);
     CHECK_EQ("foo", *utf8);
@@ -390,9 +398,8 @@
   v8::V8::Initialize();
   i::Isolate* isolate = CcTest::i_isolate();
 
-  int marker;
-  isolate->stack_guard()->SetStackLimit(
-      reinterpret_cast<uintptr_t>(&marker) - 128 * 1024);
+  isolate->stack_guard()->SetStackLimit(i::GetCurrentStackPosition() -
+                                        128 * 1024);
 
   const char* program = "var x = 'something';\n"
                         "escape: function() {}";
@@ -413,8 +420,7 @@
   i::PreParser::PreParseResult result = preparser.PreParseProgram();
   // Even in the case of a syntax error, kPreParseSuccess is returned.
   CHECK_EQ(i::PreParser::kPreParseSuccess, result);
-  i::ScriptData data(log.ExtractData());
-  CHECK(data.has_error());
+  CHECK(log.HasError());
 }
 
 
@@ -427,9 +433,8 @@
   // as with-content, which made it assume that a function inside
   // the block could be lazily compiled, and an extra, unexpected,
   // entry was added to the data.
-  int marker;
-  isolate->stack_guard()->SetStackLimit(
-      reinterpret_cast<uintptr_t>(&marker) - 128 * 1024);
+  isolate->stack_guard()->SetStackLimit(i::GetCurrentStackPosition() -
+                                        128 * 1024);
 
   const char* program =
       "try { } catch (e) { var foo = function () { /* first */ } }"
@@ -446,15 +451,15 @@
   preparser.set_allow_lazy(true);
   i::PreParser::PreParseResult result = preparser.PreParseProgram();
   CHECK_EQ(i::PreParser::kPreParseSuccess, result);
-  i::ScriptData data(log.ExtractData());
-  CHECK(!data.has_error());
-  data.Initialize();
+  i::ScriptData* sd = log.GetScriptData();
+  i::ParseData pd(sd);
+  pd.Initialize();
 
   int first_function =
       static_cast<int>(strstr(program, "function") - program);
   int first_lbrace = first_function + i::StrLength("function () ");
   CHECK_EQ('{', program[first_lbrace]);
-  i::FunctionEntry entry1 = data.GetFunctionEntry(first_lbrace);
+  i::FunctionEntry entry1 = pd.GetFunctionEntry(first_lbrace);
   CHECK(!entry1.is_valid());
 
   int second_function =
@@ -462,18 +467,18 @@
   int second_lbrace =
       second_function + i::StrLength("function () ");
   CHECK_EQ('{', program[second_lbrace]);
-  i::FunctionEntry entry2 = data.GetFunctionEntry(second_lbrace);
+  i::FunctionEntry entry2 = pd.GetFunctionEntry(second_lbrace);
   CHECK(entry2.is_valid());
   CHECK_EQ('}', program[entry2.end_pos() - 1]);
+  delete sd;
 }
 
 
 TEST(PreParseOverflow) {
   v8::V8::Initialize();
 
-  int marker;
   CcTest::i_isolate()->stack_guard()->SetStackLimit(
-      reinterpret_cast<uintptr_t>(&marker) - 128 * 1024);
+      i::GetCurrentStackPosition() - 128 * 1024);
 
   size_t kProgramSize = 1024 * 1024;
   i::SmartArrayPointer<char> program(i::NewArray<char>(kProgramSize + 1));
@@ -491,6 +496,7 @@
 
   i::PreParser preparser(&scanner, &log, stack_limit);
   preparser.set_allow_lazy(true);
+  preparser.set_allow_arrow_functions(true);
   i::PreParser::PreParseResult result = preparser.PreParseProgram();
   CHECK_EQ(i::PreParser::kPreParseStackOverflow, result);
 }
@@ -518,10 +524,8 @@
 
 #define CHECK_EQU(v1, v2) CHECK_EQ(static_cast<int>(v1), static_cast<int>(v2))
 
-void TestCharacterStream(const char* ascii_source,
-                         unsigned length,
-                         unsigned start = 0,
-                         unsigned end = 0) {
+void TestCharacterStream(const char* one_byte_source, unsigned length,
+                         unsigned start = 0, unsigned end = 0) {
   if (end == 0) end = length;
   unsigned sub_length = end - start;
   i::Isolate* isolate = CcTest::i_isolate();
@@ -529,20 +533,22 @@
   i::HandleScope test_scope(isolate);
   i::SmartArrayPointer<i::uc16> uc16_buffer(new i::uc16[length]);
   for (unsigned i = 0; i < length; i++) {
-    uc16_buffer[i] = static_cast<i::uc16>(ascii_source[i]);
+    uc16_buffer[i] = static_cast<i::uc16>(one_byte_source[i]);
   }
-  i::Vector<const char> ascii_vector(ascii_source, static_cast<int>(length));
-  i::Handle<i::String> ascii_string =
-      factory->NewStringFromAscii(ascii_vector).ToHandleChecked();
+  i::Vector<const char> one_byte_vector(one_byte_source,
+                                        static_cast<int>(length));
+  i::Handle<i::String> one_byte_string =
+      factory->NewStringFromAscii(one_byte_vector).ToHandleChecked();
   TestExternalResource resource(uc16_buffer.get(), length);
   i::Handle<i::String> uc16_string(
       factory->NewExternalStringFromTwoByte(&resource).ToHandleChecked());
 
   i::ExternalTwoByteStringUtf16CharacterStream uc16_stream(
       i::Handle<i::ExternalTwoByteString>::cast(uc16_string), start, end);
-  i::GenericStringUtf16CharacterStream string_stream(ascii_string, start, end);
+  i::GenericStringUtf16CharacterStream string_stream(one_byte_string, start,
+                                                     end);
   i::Utf8ToUtf16CharacterStream utf8_stream(
-      reinterpret_cast<const i::byte*>(ascii_source), end);
+      reinterpret_cast<const i::byte*>(one_byte_source), end);
   utf8_stream.SeekForward(start);
 
   unsigned i = start;
@@ -551,7 +557,7 @@
     CHECK_EQU(i, uc16_stream.pos());
     CHECK_EQU(i, string_stream.pos());
     CHECK_EQU(i, utf8_stream.pos());
-    int32_t c0 = ascii_source[i];
+    int32_t c0 = one_byte_source[i];
     int32_t c1 = uc16_stream.Advance();
     int32_t c2 = string_stream.Advance();
     int32_t c3 = utf8_stream.Advance();
@@ -565,7 +571,7 @@
   }
   while (i > start + sub_length / 4) {
     // Pushback, re-read, pushback again.
-    int32_t c0 = ascii_source[i - 1];
+    int32_t c0 = one_byte_source[i - 1];
     CHECK_EQU(i, uc16_stream.pos());
     CHECK_EQU(i, string_stream.pos());
     CHECK_EQU(i, utf8_stream.pos());
@@ -608,7 +614,7 @@
     CHECK_EQU(i, uc16_stream.pos());
     CHECK_EQU(i, string_stream.pos());
     CHECK_EQU(i, utf8_stream.pos());
-    int32_t c0 = ascii_source[i];
+    int32_t c0 = one_byte_source[i];
     int32_t c1 = uc16_stream.Advance();
     int32_t c2 = string_stream.Advance();
     int32_t c3 = utf8_stream.Advance();
@@ -669,7 +675,7 @@
                                     i,
                                     unibrow::Utf16::kNoPreviousCharacter);
   }
-  ASSERT(cursor == kAllUtf8CharsSizeU);
+  DCHECK(cursor == kAllUtf8CharsSizeU);
 
   i::Utf8ToUtf16CharacterStream stream(reinterpret_cast<const i::byte*>(buffer),
                                        kAllUtf8CharsSizeU);
@@ -758,8 +764,8 @@
       i::Token::EOS,
       i::Token::ILLEGAL
   };
-  ASSERT_EQ('{', str2[19]);
-  ASSERT_EQ('}', str2[37]);
+  DCHECK_EQ('{', str2[19]);
+  DCHECK_EQ('}', str2[37]);
   TestStreamScanner(&stream2, expectations2, 20, 37);
 
   const char* str3 = "{}}}}";
@@ -796,11 +802,15 @@
   CHECK(start == i::Token::DIV || start == i::Token::ASSIGN_DIV);
   CHECK(scanner.ScanRegExpPattern(start == i::Token::ASSIGN_DIV));
   scanner.Next();  // Current token is now the regexp literal.
+  i::Zone zone(CcTest::i_isolate());
+  i::AstValueFactory ast_value_factory(&zone,
+                                       CcTest::i_isolate()->heap()->HashSeed());
+  ast_value_factory.Internalize(CcTest::i_isolate());
   i::Handle<i::String> val =
-      scanner.AllocateInternalizedString(CcTest::i_isolate());
+      scanner.CurrentSymbol(&ast_value_factory)->string();
   i::DisallowHeapAllocation no_alloc;
   i::String::FlatContent content = val->GetFlatContent();
-  CHECK(content.IsAscii());
+  CHECK(content.IsOneByte());
   i::Vector<const uint8_t> actual = content.ToOneByteVector();
   for (int i = 0; i < actual.length(); i++) {
     CHECK_NE('\0', expected[i]);
@@ -964,7 +974,13 @@
       "    infunction;\n"
       "  }", "\n"
       "  more;", i::FUNCTION_SCOPE, i::SLOPPY },
-    { "  (function fun", "(a,b) { infunction; }", ")();",
+    // TODO(aperez): Change to use i::ARROW_SCOPE when implemented
+    { "  start;\n", "(a,b) => a + b", "; more;",
+      i::FUNCTION_SCOPE, i::SLOPPY },
+    { "  start;\n", "(a,b) => { return a+b; }", "\nmore;",
+      i::FUNCTION_SCOPE, i::SLOPPY },
+    { "  start;\n"
+      "  (function fun", "(a,b) { infunction; }", ")();",
       i::FUNCTION_SCOPE, i::SLOPPY },
     { "  for ", "(let x = 1 ; x < 10; ++ x) { block; }", " more;",
       i::BLOCK_SCOPE, i::STRICT },
@@ -1091,9 +1107,8 @@
   v8::Handle<v8::Context> context = v8::Context::New(CcTest::isolate());
   v8::Context::Scope context_scope(context);
 
-  int marker;
-  isolate->stack_guard()->SetStackLimit(
-      reinterpret_cast<uintptr_t>(&marker) - 128 * 1024);
+  isolate->stack_guard()->SetStackLimit(i::GetCurrentStackPosition() -
+                                        128 * 1024);
 
   for (int i = 0; source_data[i].outer_prefix; i++) {
     int kPrefixLen = Utf8LengthHelper(source_data[i].outer_prefix);
@@ -1116,9 +1131,13 @@
     CHECK_EQ(source->length(), kProgramSize);
     i::Handle<i::Script> script = factory->NewScript(source);
     i::CompilationInfoWithZone info(script);
-    i::Parser parser(&info);
+    i::Parser::ParseInfo parse_info = {isolate->stack_guard()->real_climit(),
+                                       isolate->heap()->HashSeed(),
+                                       isolate->unicode_cache()};
+    i::Parser parser(&info, &parse_info);
     parser.set_allow_lazy(true);
     parser.set_allow_harmony_scoping(true);
+    parser.set_allow_arrow_functions(true);
     info.MarkAsGlobal();
     info.SetStrictMode(source_data[i].strict_mode);
     parser.Parse();
@@ -1141,20 +1160,41 @@
 }
 
 
-i::Handle<i::String> FormatMessage(i::ScriptData* data) {
+const char* ReadString(unsigned* start) {
+  int length = start[0];
+  char* result = i::NewArray<char>(length + 1);
+  for (int i = 0; i < length; i++) {
+    result[i] = start[i + 1];
+  }
+  result[length] = '\0';
+  return result;
+}
+
+
+i::Handle<i::String> FormatMessage(i::Vector<unsigned> data) {
   i::Isolate* isolate = CcTest::i_isolate();
   i::Factory* factory = isolate->factory();
-  const char* message = data->BuildMessage();
+  const char* message =
+      ReadString(&data[i::PreparseDataConstants::kMessageTextPos]);
   i::Handle<i::String> format = v8::Utils::OpenHandle(
       *v8::String::NewFromUtf8(CcTest::isolate(), message));
-  const char* arg = data->BuildArg();
-  i::Handle<i::JSArray> args_array = factory->NewJSArray(arg == NULL ? 0 : 1);
-  if (arg != NULL) {
-    i::JSArray::SetElement(
-        args_array, 0, v8::Utils::OpenHandle(*v8::String::NewFromUtf8(
-                                                  CcTest::isolate(), arg)),
-        NONE, i::SLOPPY).Check();
+  int arg_count = data[i::PreparseDataConstants::kMessageArgCountPos];
+  const char* arg = NULL;
+  i::Handle<i::JSArray> args_array;
+  if (arg_count == 1) {
+    // Position after text found by skipping past length field and
+    // length field content words.
+    int pos = i::PreparseDataConstants::kMessageTextPos + 1 +
+              data[i::PreparseDataConstants::kMessageTextPos];
+    arg = ReadString(&data[pos]);
+    args_array = factory->NewJSArray(1);
+    i::JSArray::SetElement(args_array, 0, v8::Utils::OpenHandle(*v8_str(arg)),
+                           NONE, i::SLOPPY).Check();
+  } else {
+    CHECK_EQ(0, arg_count);
+    args_array = factory->NewJSArray(0);
   }
+
   i::Handle<i::JSObject> builtins(isolate->js_builtins_object());
   i::Handle<i::Object> format_fun = i::Object::GetProperty(
       isolate, builtins, "FormatMessage").ToHandleChecked();
@@ -1164,6 +1204,7 @@
   CHECK(result->IsString());
   i::DeleteArray(message);
   i::DeleteArray(arg);
+  data.Dispose();
   return i::Handle<i::String>::cast(result);
 }
 
@@ -1173,9 +1214,10 @@
   kAllowNativesSyntax,
   kAllowHarmonyScoping,
   kAllowModules,
-  kAllowGenerators,
-  kAllowForOf,
-  kAllowHarmonyNumericLiterals
+  kAllowHarmonyNumericLiterals,
+  kAllowArrowFunctions,
+  kAllowClasses,
+  kAllowHarmonyObjectLiterals
 };
 
 
@@ -1192,10 +1234,12 @@
   parser->set_allow_natives_syntax(flags.Contains(kAllowNativesSyntax));
   parser->set_allow_harmony_scoping(flags.Contains(kAllowHarmonyScoping));
   parser->set_allow_modules(flags.Contains(kAllowModules));
-  parser->set_allow_generators(flags.Contains(kAllowGenerators));
-  parser->set_allow_for_of(flags.Contains(kAllowForOf));
   parser->set_allow_harmony_numeric_literals(
       flags.Contains(kAllowHarmonyNumericLiterals));
+  parser->set_allow_harmony_object_literals(
+      flags.Contains(kAllowHarmonyObjectLiterals));
+  parser->set_allow_arrow_functions(flags.Contains(kAllowArrowFunctions));
+  parser->set_allow_classes(flags.Contains(kAllowClasses));
 }
 
 
@@ -1218,14 +1262,18 @@
     i::PreParser::PreParseResult result = preparser.PreParseProgram();
     CHECK_EQ(i::PreParser::kPreParseSuccess, result);
   }
-  i::ScriptData data(log.ExtractData());
+
+  bool preparse_error = log.HasError();
 
   // Parse the data
   i::FunctionLiteral* function;
   {
     i::Handle<i::Script> script = factory->NewScript(source);
     i::CompilationInfoWithZone info(script);
-    i::Parser parser(&info);
+    i::Parser::ParseInfo parse_info = {isolate->stack_guard()->real_climit(),
+                                       isolate->heap()->HashSeed(),
+                                       isolate->unicode_cache()};
+    i::Parser parser(&info, &parse_info);
     SetParserFlags(&parser, flags);
     info.MarkAsGlobal();
     parser.Parse();
@@ -1243,7 +1291,7 @@
             isolate, exception_handle, "message").ToHandleChecked());
 
     if (result == kSuccess) {
-      i::OS::Print(
+      v8::base::OS::Print(
           "Parser failed on:\n"
           "\t%s\n"
           "with error:\n"
@@ -1253,8 +1301,8 @@
       CHECK(false);
     }
 
-    if (!data.has_error()) {
-      i::OS::Print(
+    if (!preparse_error) {
+      v8::base::OS::Print(
           "Parser failed on:\n"
           "\t%s\n"
           "with error:\n"
@@ -1264,9 +1312,10 @@
       CHECK(false);
     }
     // Check that preparser and parser produce the same error.
-    i::Handle<i::String> preparser_message = FormatMessage(&data);
+    i::Handle<i::String> preparser_message =
+        FormatMessage(log.ErrorMessageData());
     if (!i::String::Equals(message_string, preparser_message)) {
-      i::OS::Print(
+      v8::base::OS::Print(
           "Expected parser and preparser to produce the same error on:\n"
           "\t%s\n"
           "However, found the following error messages\n"
@@ -1277,17 +1326,18 @@
           preparser_message->ToCString().get());
       CHECK(false);
     }
-  } else if (data.has_error()) {
-    i::OS::Print(
+  } else if (preparse_error) {
+    v8::base::OS::Print(
         "Preparser failed on:\n"
         "\t%s\n"
         "with error:\n"
         "\t%s\n"
         "However, the parser succeeded",
-        source->ToCString().get(), FormatMessage(&data)->ToCString().get());
+        source->ToCString().get(),
+        FormatMessage(log.ErrorMessageData())->ToCString().get());
     CHECK(false);
   } else if (result == kError) {
-    i::OS::Print(
+    v8::base::OS::Print(
         "Expected error on:\n"
         "\t%s\n"
         "However, parser and preparser succeeded",
@@ -1298,15 +1348,22 @@
 
 
 void TestParserSync(const char* source,
-                    const ParserFlag* flag_list,
-                    size_t flag_list_length,
-                    ParserSyncTestResult result = kSuccessOrError) {
+                    const ParserFlag* varying_flags,
+                    size_t varying_flags_length,
+                    ParserSyncTestResult result = kSuccessOrError,
+                    const ParserFlag* always_true_flags = NULL,
+                    size_t always_true_flags_length = 0) {
   i::Handle<i::String> str =
       CcTest::i_isolate()->factory()->NewStringFromAsciiChecked(source);
-  for (int bits = 0; bits < (1 << flag_list_length); bits++) {
+  for (int bits = 0; bits < (1 << varying_flags_length); bits++) {
     i::EnumSet<ParserFlag> flags;
-    for (size_t flag_index = 0; flag_index < flag_list_length; flag_index++) {
-      if ((bits & (1 << flag_index)) != 0) flags.Add(flag_list[flag_index]);
+    for (size_t flag_index = 0; flag_index < varying_flags_length;
+         ++flag_index) {
+      if ((bits & (1 << flag_index)) != 0) flags.Add(varying_flags[flag_index]);
+    }
+    for (size_t flag_index = 0; flag_index < always_true_flags_length;
+         ++flag_index) {
+      flags.Add(always_true_flags[flag_index]);
     }
     TestParserSyncWithFlags(str, flags, result);
   }
@@ -1387,14 +1444,19 @@
   v8::Handle<v8::Context> context = v8::Context::New(CcTest::isolate());
   v8::Context::Scope context_scope(context);
 
-  int marker;
   CcTest::i_isolate()->stack_guard()->SetStackLimit(
-      reinterpret_cast<uintptr_t>(&marker) - 128 * 1024);
+      i::GetCurrentStackPosition() - 128 * 1024);
 
   static const ParserFlag flags1[] = {
-    kAllowLazy, kAllowHarmonyScoping, kAllowModules, kAllowGenerators,
-    kAllowForOf
+    kAllowArrowFunctions,
+    kAllowClasses,
+    kAllowHarmonyNumericLiterals,
+    kAllowHarmonyObjectLiterals,
+    kAllowHarmonyScoping,
+    kAllowLazy,
+    kAllowModules,
   };
+
   for (int i = 0; context_data[i][0] != NULL; ++i) {
     for (int j = 0; statement_data[j] != NULL; ++j) {
       for (int k = 0; termination_data[k] != NULL; ++k) {
@@ -1414,7 +1476,7 @@
             termination_data[k],
             context_data[i][1]);
         CHECK(length == kProgramSize);
-        TestParserSync(program.start(), flags1, ARRAY_SIZE(flags1));
+        TestParserSync(program.start(), flags1, arraysize(flags1));
       }
     }
   }
@@ -1423,11 +1485,11 @@
   // interaction with the flags above, so test these separately to reduce
   // the combinatorial explosion.
   static const ParserFlag flags2[] = { kAllowHarmonyNumericLiterals };
-  TestParserSync("0o1234", flags2, ARRAY_SIZE(flags2));
-  TestParserSync("0b1011", flags2, ARRAY_SIZE(flags2));
+  TestParserSync("0o1234", flags2, arraysize(flags2));
+  TestParserSync("0b1011", flags2, arraysize(flags2));
 
   static const ParserFlag flags3[] = { kAllowNativesSyntax };
-  TestParserSync("%DebugPrint(123)", flags3, ARRAY_SIZE(flags3));
+  TestParserSync("%DebugPrint(123)", flags3, arraysize(flags3));
 }
 
 
@@ -1458,22 +1520,49 @@
                        const char* statement_data[],
                        ParserSyncTestResult result,
                        const ParserFlag* flags = NULL,
-                       int flags_len = 0) {
+                       int flags_len = 0,
+                       const ParserFlag* always_true_flags = NULL,
+                       int always_true_flags_len = 0) {
   v8::HandleScope handles(CcTest::isolate());
   v8::Handle<v8::Context> context = v8::Context::New(CcTest::isolate());
   v8::Context::Scope context_scope(context);
 
-  int marker;
   CcTest::i_isolate()->stack_guard()->SetStackLimit(
-      reinterpret_cast<uintptr_t>(&marker) - 128 * 1024);
+      i::GetCurrentStackPosition() - 128 * 1024);
 
   static const ParserFlag default_flags[] = {
-    kAllowLazy, kAllowHarmonyScoping, kAllowModules, kAllowGenerators,
-    kAllowForOf, kAllowNativesSyntax
+    kAllowArrowFunctions,
+    kAllowClasses,
+    kAllowHarmonyNumericLiterals,
+    kAllowHarmonyObjectLiterals,
+    kAllowHarmonyScoping,
+    kAllowLazy,
+    kAllowModules,
+    kAllowNativesSyntax,
   };
-  if (!flags) {
+  ParserFlag* generated_flags = NULL;
+  if (flags == NULL) {
     flags = default_flags;
-    flags_len = ARRAY_SIZE(default_flags);
+    flags_len = arraysize(default_flags);
+    if (always_true_flags != NULL) {
+      // Remove always_true_flags from default_flags.
+      CHECK(always_true_flags_len < flags_len);
+      generated_flags = new ParserFlag[flags_len - always_true_flags_len];
+      int flag_index = 0;
+      for (int i = 0; i < flags_len; ++i) {
+        bool use_flag = true;
+        for (int j = 0; j < always_true_flags_len; ++j) {
+          if (flags[i] == always_true_flags[j]) {
+            use_flag = false;
+            break;
+          }
+        }
+        if (use_flag) generated_flags[flag_index++] = flags[i];
+      }
+      CHECK(flag_index == flags_len - always_true_flags_len);
+      flags_len = flag_index;
+      flags = generated_flags;
+    }
   }
   for (int i = 0; context_data[i][0] != NULL; ++i) {
     for (int j = 0; statement_data[j] != NULL; ++j) {
@@ -1493,9 +1582,12 @@
       TestParserSync(program.start(),
                      flags,
                      flags_len,
-                     result);
+                     result,
+                     always_true_flags,
+                     always_true_flags_len);
     }
   }
+  delete[] generated_flags;
 }
 
 
@@ -1523,6 +1615,10 @@
     "function foo(arguments) { }",
     "function foo(bar, eval) { }",
     "function foo(bar, arguments) { }",
+    "(eval) => { }",
+    "(arguments) => { }",
+    "(foo, eval) => { }",
+    "(foo, arguments) => { }",
     "eval = 1;",
     "arguments = 1;",
     "var foo = eval = 1;",
@@ -1579,6 +1675,7 @@
   const char* context_data[][2] = {
     { "\"use strict\";", "" },
     { "function test_func() { \"use strict\";", "}" },
+    { "() => { \"use strict\"; ", "}" },
     { NULL, NULL }
   };
 
@@ -1594,7 +1691,9 @@
     NULL
   };
 
-  RunParserSyncTest(context_data, statement_data, kSuccess);
+  static const ParserFlag always_flags[] = {kAllowArrowFunctions};
+  RunParserSyncTest(context_data, statement_data, kSuccess, NULL, 0,
+                    always_flags, arraysize(always_flags));
 }
 
 
@@ -1606,6 +1705,7 @@
   const char* context_data[][2] = {
     { "\"use strict\";", "" },
     { "function test_func() {\"use strict\"; ", "}"},
+    { "() => { \"use strict\"; ", "}" },
     { NULL, NULL }
   };
 
@@ -1620,10 +1720,13 @@
     "var foo = interface = 1;",
     "++interface;",
     "interface++;",
+    "var yield = 13;",
     NULL
   };
 
-  RunParserSyncTest(context_data, statement_data, kError);
+  static const ParserFlag always_flags[] = {kAllowArrowFunctions};
+  RunParserSyncTest(context_data, statement_data, kError, NULL, 0, always_flags,
+                    arraysize(always_flags));
 }
 
 
@@ -1631,6 +1734,7 @@
   const char* context_data[][2] = {
     { "", "" },
     { "function test_func() {", "}"},
+    { "() => {", "}" },
     { NULL, NULL }
   };
 
@@ -1645,10 +1749,13 @@
     "var foo = interface = 1;",
     "++interface;",
     "interface++;",
+    "var yield = 13;",
     NULL
   };
 
-  RunParserSyncTest(context_data, statement_data, kSuccess);
+  static const ParserFlag always_flags[] = {kAllowArrowFunctions};
+  RunParserSyncTest(context_data, statement_data, kSuccess, NULL, 0,
+                    always_flags, arraysize(always_flags));
 }
 
 
@@ -1661,6 +1768,8 @@
     { "\"use strict\";", "" },
     { "var eval; function test_func() {", "}"},
     { "var eval; function test_func() {\"use strict\"; ", "}"},
+    { "var eval; () => {", "}"},
+    { "var eval; () => {\"use strict\"; ", "}"},
     { NULL, NULL }
   };
 
@@ -1671,6 +1780,8 @@
     "function super() { }",
     "function foo(super) { }",
     "function foo(bar, super) { }",
+    "(super) => { }",
+    "(bar, super) => { }",
     "super = 1;",
     "var foo = super = 1;",
     "++super;",
@@ -1683,26 +1794,33 @@
 }
 
 
-TEST(NoErrorsYieldSloppy) {
-  // In sloppy mode, it's okay to use "yield" as identifier, *except* inside a
-  // generator (see next test).
+TEST(NoErrorsLetSloppyAllModes) {
+  // In sloppy mode, it's okay to use "let" as identifier.
   const char* context_data[][2] = {
     { "", "" },
-    { "function is_not_gen() {", "}" },
+    { "function f() {", "}" },
+    { "(function f() {", "})" },
     { NULL, NULL }
   };
 
   const char* statement_data[] = {
-    "var yield;",
-    "var foo, yield;",
-    "try { } catch (yield) { }",
-    "function yield() { }",
-    "function foo(yield) { }",
-    "function foo(bar, yield) { }",
-    "yield = 1;",
-    "var foo = yield = 1;",
-    "++yield;",
-    "yield++;",
+    "var let;",
+    "var foo, let;",
+    "try { } catch (let) { }",
+    "function let() { }",
+    "(function let() { })",
+    "function foo(let) { }",
+    "function foo(bar, let) { }",
+    "let = 1;",
+    "var foo = let = 1;",
+    "let * 2;",
+    "++let;",
+    "let++;",
+    "let: 34",
+    "function let(let) { let: let(let + let(0)); }",
+    "({ let: 1 })",
+    "({ get let() { 1 } })",
+    "let(100)",
     NULL
   };
 
@@ -1710,9 +1828,13 @@
 }
 
 
-TEST(ErrorsYieldSloppyGenerator) {
+TEST(NoErrorsYieldSloppyAllModes) {
+  // In sloppy mode, it's okay to use "yield" as identifier, *except* inside a
+  // generator (see other test).
   const char* context_data[][2] = {
-    { "function * is_gen() {", "}" },
+    { "", "" },
+    { "function not_gen() {", "}" },
+    { "(function not_gen() {", "})" },
     { NULL, NULL }
   };
 
@@ -1721,28 +1843,76 @@
     "var foo, yield;",
     "try { } catch (yield) { }",
     "function yield() { }",
-    // BUG: These should not be allowed, but they are (if kAllowGenerators is
-    // set)
-    // "function foo(yield) { }",
-    // "function foo(bar, yield) { }",
+    "(function yield() { })",
+    "function foo(yield) { }",
+    "function foo(bar, yield) { }",
     "yield = 1;",
     "var foo = yield = 1;",
+    "yield * 2;",
     "++yield;",
     "yield++;",
+    "yield: 34",
+    "function yield(yield) { yield: yield (yield + yield(0)); }",
+    "({ yield: 1 })",
+    "({ get yield() { 1 } })",
+    "yield(100)",
+    "yield[100]",
     NULL
   };
 
-  // If generators are not allowed, the error will be produced at the '*' token,
-  // so this test works both with and without the kAllowGenerators flag.
-  RunParserSyncTest(context_data, statement_data, kError);
+  RunParserSyncTest(context_data, statement_data, kSuccess);
+}
+
+
+TEST(NoErrorsYieldSloppyGeneratorsEnabled) {
+  // In sloppy mode, it's okay to use "yield" as identifier, *except* inside a
+  // generator (see next test).
+  const char* context_data[][2] = {
+    { "", "" },
+    { "function not_gen() {", "}" },
+    { "function * gen() { function not_gen() {", "} }" },
+    { "(function not_gen() {", "})" },
+    { "(function * gen() { (function not_gen() {", "}) })" },
+    { NULL, NULL }
+  };
+
+  const char* statement_data[] = {
+    "var yield;",
+    "var foo, yield;",
+    "try { } catch (yield) { }",
+    "function yield() { }",
+    "(function yield() { })",
+    "function foo(yield) { }",
+    "function foo(bar, yield) { }",
+    "function * yield() { }",
+    "(function * yield() { })",
+    "yield = 1;",
+    "var foo = yield = 1;",
+    "yield * 2;",
+    "++yield;",
+    "yield++;",
+    "yield: 34",
+    "function yield(yield) { yield: yield (yield + yield(0)); }",
+    "({ yield: 1 })",
+    "({ get yield() { 1 } })",
+    "yield(100)",
+    "yield[100]",
+    NULL
+  };
+
+  RunParserSyncTest(context_data, statement_data, kSuccess);
 }
 
 
 TEST(ErrorsYieldStrict) {
   const char* context_data[][2] = {
     { "\"use strict\";", "" },
-    { "\"use strict\"; function is_not_gen() {", "}" },
+    { "\"use strict\"; function not_gen() {", "}" },
     { "function test_func() {\"use strict\"; ", "}"},
+    { "\"use strict\"; function * gen() { function not_gen() {", "} }" },
+    { "\"use strict\"; (function not_gen() {", "})" },
+    { "\"use strict\"; (function * gen() { (function not_gen() {", "}) })" },
+    { "() => {\"use strict\"; ", "}" },
     { NULL, NULL }
   };
 
@@ -1751,12 +1921,16 @@
     "var foo, yield;",
     "try { } catch (yield) { }",
     "function yield() { }",
+    "(function yield() { })",
     "function foo(yield) { }",
     "function foo(bar, yield) { }",
+    "function * yield() { }",
+    "(function * yield() { })",
     "yield = 1;",
     "var foo = yield = 1;",
     "++yield;",
     "yield++;",
+    "yield: 34;",
     NULL
   };
 
@@ -1764,22 +1938,111 @@
 }
 
 
-TEST(ErrorsYield) {
+TEST(NoErrorsGenerator) {
   const char* context_data[][2] = {
-    { "function * is_gen() {", "}" },
+    { "function * gen() {", "}" },
+    { "(function * gen() {", "})" },
+    { "(function * () {", "})" },
     { NULL, NULL }
   };
 
   const char* statement_data[] = {
-    "yield 2;",  // this is legal inside generator
-    "yield * 2;",  // this is legal inside generator
+    // A generator without a body is valid.
+    ""
+    // Valid yield expressions inside generators.
+    "yield 2;",
+    "yield * 2;",
+    "yield * \n 2;",
+    "yield yield 1;",
+    "yield * yield * 1;",
+    "yield 3 + (yield 4);",
+    "yield * 3 + (yield * 4);",
+    "(yield * 3) + (yield * 4);",
+    "yield 3; yield 4;",
+    "yield * 3; yield * 4;",
+    "(function (yield) { })",
+    "yield { yield: 12 }",
+    "yield /* comment */ { yield: 12 }",
+    "yield * \n { yield: 12 }",
+    "yield /* comment */ * \n { yield: 12 }",
+    // You can return in a generator.
+    "yield 1; return",
+    "yield * 1; return",
+    "yield 1; return 37",
+    "yield * 1; return 37",
+    "yield 1; return 37; yield 'dead';",
+    "yield * 1; return 37; yield * 'dead';",
+    // Yield is still a valid key in object literals.
+    "({ yield: 1 })",
+    "({ get yield() { } })",
+    // Yield without RHS.
+    "yield;",
+    "yield",
+    "yield\n",
+    "yield /* comment */"
+    "yield // comment\n"
+    "(yield)",
+    "[yield]",
+    "{yield}",
+    "yield, yield",
+    "yield; yield",
+    "(yield) ? yield : yield",
+    "(yield) \n ? yield : yield",
+    // If there is a newline before the next token, we don't look for RHS.
+    "yield\nfor (;;) {}",
     NULL
   };
 
-  // Here we cannot assert that there is no error, since there will be without
-  // the kAllowGenerators flag. However, we test that Parser and PreParser
-  // produce the same errors.
-  RunParserSyncTest(context_data, statement_data, kSuccessOrError);
+  RunParserSyncTest(context_data, statement_data, kSuccess);
+}
+
+
+TEST(ErrorsYieldGenerator) {
+  const char* context_data[][2] = {
+    { "function * gen() {", "}" },
+    { "\"use strict\"; function * gen() {", "}" },
+    { NULL, NULL }
+  };
+
+  const char* statement_data[] = {
+    // Invalid yield expressions inside generators.
+    "var yield;",
+    "var foo, yield;",
+    "try { } catch (yield) { }",
+    "function yield() { }",
+    // The name of the NFE is let-bound in the generator, which does not permit
+    // yield to be an identifier.
+    "(function yield() { })",
+    "(function * yield() { })",
+    // Yield isn't valid as a formal parameter for generators.
+    "function * foo(yield) { }",
+    "(function * foo(yield) { })",
+    "yield = 1;",
+    "var foo = yield = 1;",
+    "++yield;",
+    "yield++;",
+    "yield *",
+    "(yield *)",
+    // Yield binds very loosely, so this parses as "yield (3 + yield 4)", which
+    // is invalid.
+    "yield 3 + yield 4;",
+    "yield: 34",
+    "yield ? 1 : 2",
+    // Parses as yield (/ yield): invalid.
+    "yield / yield",
+    "+ yield",
+    "+ yield 3",
+    // Invalid (no newline allowed between yield and *).
+    "yield\n*3",
+    // Invalid (we see a newline, so we parse {yield:42} as a statement, not an
+    // object literal, and yield is not a valid label).
+    "yield\n{yield: 42}",
+    "yield /* comment */\n {yield: 42}",
+    "yield //comment\n {yield: 42}",
+    NULL
+  };
+
+  RunParserSyncTest(context_data, statement_data, kError);
 }
 
 
@@ -1787,16 +2050,18 @@
   // Tests that illegal tokens as names of a strict function produce the correct
   // errors.
   const char* context_data[][2] = {
-    { "", ""},
-    { "\"use strict\";", ""},
+    { "function ", ""},
+    { "\"use strict\"; function", ""},
+    { "function * ", ""},
+    { "\"use strict\"; function * ", ""},
     { NULL, NULL }
   };
 
   const char* statement_data[] = {
-    "function eval() {\"use strict\";}",
-    "function arguments() {\"use strict\";}",
-    "function interface() {\"use strict\";}",
-    "function yield() {\"use strict\";}",
+    "eval() {\"use strict\";}",
+    "arguments() {\"use strict\";}",
+    "interface() {\"use strict\";}",
+    "yield() {\"use strict\";}",
     // Future reserved words are always illegal
     "function super() { }",
     "function super() {\"use strict\";}",
@@ -1809,15 +2074,15 @@
 
 TEST(NoErrorsNameOfStrictFunction) {
   const char* context_data[][2] = {
-    { "", ""},
+    { "function ", ""},
     { NULL, NULL }
   };
 
   const char* statement_data[] = {
-    "function eval() { }",
-    "function arguments() { }",
-    "function interface() { }",
-    "function yield() { }",
+    "eval() { }",
+    "arguments() { }",
+    "interface() { }",
+    "yield() { }",
     NULL
   };
 
@@ -1825,12 +2090,30 @@
 }
 
 
+TEST(NoErrorsNameOfStrictGenerator) {
+  const char* context_data[][2] = {
+    { "function * ", ""},
+    { NULL, NULL }
+  };
+
+  const char* statement_data[] = {
+    "eval() { }",
+    "arguments() { }",
+    "interface() { }",
+    "yield() { }",
+    NULL
+  };
+
+  RunParserSyncTest(context_data, statement_data, kSuccess);
+}
+
 
 TEST(ErrorsIllegalWordsAsLabelsSloppy) {
   // Using future reserved words as labels is always an error.
   const char* context_data[][2] = {
     { "", ""},
     { "function test_func() {", "}" },
+    { "() => {", "}" },
     { NULL, NULL }
   };
 
@@ -1848,6 +2131,7 @@
   const char* context_data[][2] = {
     { "\"use strict\";", "" },
     { "function test_func() {\"use strict\"; ", "}"},
+    { "() => {\"use strict\"; ", "}" },
     { NULL, NULL }
   };
 
@@ -1867,8 +2151,10 @@
   const char* context_data[][2] = {
     { "", ""},
     { "function test_func() {", "}" },
+    { "() => {", "}" },
     { "\"use strict\";", "" },
     { "\"use strict\"; function test_func() {", "}" },
+    { "\"use strict\"; () => {", "}" },
     { NULL, NULL }
   };
 
@@ -1879,7 +2165,9 @@
     NULL
   };
 
-  RunParserSyncTest(context_data, statement_data, kSuccess);
+  static const ParserFlag always_flags[] = {kAllowArrowFunctions};
+  RunParserSyncTest(context_data, statement_data, kSuccess, NULL, 0,
+                    always_flags, arraysize(always_flags));
 }
 
 
@@ -1888,6 +2176,7 @@
   const char* context_data[][2] = {
     { "", ""},
     { "function test_func() {", "}" },
+    { "() => {", "}" },
     { NULL, NULL }
   };
 
@@ -1966,9 +2255,8 @@
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope handles(isolate);
 
-  int marker;
   CcTest::i_isolate()->stack_guard()->SetStackLimit(
-      reinterpret_cast<uintptr_t>(&marker) - 128 * 1024);
+      i::GetCurrentStackPosition() - 128 * 1024);
 
   struct TestCase {
     const char* program;
@@ -1994,22 +2282,20 @@
         factory->NewStringFromUtf8(i::CStrVector(program)).ToHandleChecked();
     i::Handle<i::Script> script = factory->NewScript(source);
     i::CompilationInfoWithZone info(script);
-    i::ScriptData* data = NULL;
-    info.SetCachedData(&data, i::PRODUCE_CACHED_DATA);
+    i::ScriptData* sd = NULL;
+    info.SetCachedData(&sd, v8::ScriptCompiler::kProduceParserCache);
     i::Parser::Parse(&info, true);
-    CHECK(data);
-    CHECK(!data->HasError());
+    i::ParseData pd(sd);
 
-    if (data->function_count() != test_cases[i].functions) {
-      i::OS::Print(
+    if (pd.FunctionCount() != test_cases[i].functions) {
+      v8::base::OS::Print(
           "Expected preparse data for program:\n"
           "\t%s\n"
           "to contain %d functions, however, received %d functions.\n",
-          program, test_cases[i].functions,
-          data->function_count());
+          program, test_cases[i].functions, pd.FunctionCount());
       CHECK(false);
     }
-    delete data;
+    delete sd;
   }
 }
 
@@ -2130,9 +2416,13 @@
     NULL
   };
 
-  // Parsing will fail or succeed depending on whether we allow natives syntax
-  // or not.
-  RunParserSyncTest(context_data, statement_data, kSuccessOrError);
+  // This test requires kAllowNativesSyntax to succeed.
+  static const ParserFlag always_true_flags[] = {
+    kAllowNativesSyntax
+  };
+
+  RunParserSyncTest(context_data, statement_data, kSuccess, NULL, 0,
+                    always_true_flags, 1);
 }
 
 
@@ -2197,10 +2487,12 @@
 TEST(StrictObjectLiteralChecking) {
   const char* strict_context_data[][2] = {
     {"\"use strict\"; var myobject = {", "};"},
+    {"\"use strict\"; var myobject = {", ",};"},
     { NULL, NULL }
   };
   const char* non_strict_context_data[][2] = {
     {"var myobject = {", "};"},
+    {"var myobject = {", ",};"},
     { NULL, NULL }
   };
 
@@ -2229,33 +2521,36 @@
   };
 
   const char* statement_data[] = {
-    "foo: 1, get foo() {}",
-    "foo: 1, set foo(v) {}",
-    "\"foo\": 1, get \"foo\"() {}",
-    "\"foo\": 1, set \"foo\"(v) {}",
-    "1: 1, get 1() {}",
-    "1: 1, set 1() {}",
-    // It's counter-intuitive, but these collide too (even in classic
-    // mode). Note that we can have "foo" and foo as properties in classic mode,
-    // but we cannot have "foo" and get foo, or foo and get "foo".
-    "foo: 1, get \"foo\"() {}",
-    "foo: 1, set \"foo\"(v) {}",
-    "\"foo\": 1, get foo() {}",
-    "\"foo\": 1, set foo(v) {}",
-    "1: 1, get \"1\"() {}",
-    "1: 1, set \"1\"() {}",
-    "\"1\": 1, get 1() {}"
-    "\"1\": 1, set 1(v) {}"
-    // Wrong number of parameters
-    "get bar(x) {}",
-    "get bar(x, y) {}",
-    "set bar() {}",
-    "set bar(x, y) {}",
-    // Parsing FunctionLiteral for getter or setter fails
-    "get foo( +",
-    "get foo() \"error\"",
-    NULL
-  };
+      ",",
+      "foo: 1, get foo() {}",
+      "foo: 1, set foo(v) {}",
+      "\"foo\": 1, get \"foo\"() {}",
+      "\"foo\": 1, set \"foo\"(v) {}",
+      "1: 1, get 1() {}",
+      "1: 1, set 1() {}",
+      "get foo() {}, get foo() {}",
+      "set foo(_) {}, set foo(_) {}",
+      // It's counter-intuitive, but these collide too (even in classic
+      // mode). Note that we can have "foo" and foo as properties in classic
+      // mode,
+      // but we cannot have "foo" and get foo, or foo and get "foo".
+      "foo: 1, get \"foo\"() {}",
+      "foo: 1, set \"foo\"(v) {}",
+      "\"foo\": 1, get foo() {}",
+      "\"foo\": 1, set foo(v) {}",
+      "1: 1, get \"1\"() {}",
+      "1: 1, set \"1\"() {}",
+      "\"1\": 1, get 1() {}"
+      "\"1\": 1, set 1(v) {}"
+      // Wrong number of parameters
+      "get bar(x) {}",
+      "get bar(x, y) {}",
+      "set bar() {}",
+      "set bar(x, y) {}",
+      // Parsing FunctionLiteral for getter or setter fails
+      "get foo( +",
+      "get foo() \"error\"",
+      NULL};
 
   RunParserSyncTest(context_data, statement_data, kError);
 }
@@ -2264,7 +2559,9 @@
 TEST(NoErrorsObjectLiteralChecking) {
   const char* context_data[][2] = {
     {"var myobject = {", "};"},
+    {"var myobject = {", ",};"},
     {"\"use strict\"; var myobject = {", "};"},
+    {"\"use strict\"; var myobject = {", ",};"},
     { NULL, NULL }
   };
 
@@ -2290,6 +2587,8 @@
     "\"foo\": 1, set \"bar\"(v) {}",
     "1: 1, get 2() {}",
     "1: 1, set 2(v) {}",
+    "get: 1, get foo() {}",
+    "set: 1, set foo(_) {}",
     // Keywords, future reserved and strict future reserved are also allowed as
     // property names.
     "if: 4",
@@ -2512,7 +2811,7 @@
       "var obj1 = { oXj2 : { foo1: function() {} } }; "
       "%FunctionGetInferredName(obj1.oXj2.foo1)");
   uint16_t* two_byte_name = AsciiToTwoByteString("obj1.oXj2.foo1");
-  // Make it really non-ASCII (replace the Xs with a non-ASCII character).
+  // Make it really non-Latin1 (replace the Xs with a non-Latin1 character).
   two_byte_source[14] = two_byte_source[78] = two_byte_name[6] = 0x010d;
   v8::Local<v8::String> source =
       v8::String::NewFromTwoByte(isolate, two_byte_source);
@@ -2549,3 +2848,1124 @@
   i::DeleteArray(two_byte_source);
   i::DeleteArray(two_byte_name);
 }
+
+
+TEST(RegressionLazyFunctionWithErrorWithArg) {
+  // The bug occurred when a lazy function had an error which requires a
+  // parameter (such as "unknown label" here). The error message was processed
+  // before the AstValueFactory containing the error message string was
+  // internalized.
+  v8::Isolate* isolate = CcTest::isolate();
+  v8::HandleScope scope(isolate);
+  LocalContext env;
+  i::FLAG_lazy = true;
+  i::FLAG_min_preparse_length = 0;
+  CompileRun("function this_is_lazy() {\n"
+             "  break p;\n"
+             "}\n"
+             "this_is_lazy();\n");
+}
+
+
+TEST(SerializationOfMaybeAssignmentFlag) {
+  i::Isolate* isolate = CcTest::i_isolate();
+  i::Factory* factory = isolate->factory();
+  i::HandleScope scope(isolate);
+  LocalContext env;
+
+  const char* src =
+      "function h() {"
+      "  var result = [];"
+      "  function f() {"
+      "    result.push(2);"
+      "  }"
+      "  function assertResult(r) {"
+      "    f();"
+      "    result = [];"
+      "  }"
+      "  assertResult([2]);"
+      "  assertResult([2]);"
+      "  return f;"
+      "};"
+      "h();";
+
+  i::ScopedVector<char> program(Utf8LengthHelper(src) + 1);
+  i::SNPrintF(program, "%s", src);
+  i::Handle<i::String> source = factory->InternalizeUtf8String(program.start());
+  source->PrintOn(stdout);
+  printf("\n");
+  i::Zone zone(isolate);
+  v8::Local<v8::Value> v = CompileRun(src);
+  i::Handle<i::Object> o = v8::Utils::OpenHandle(*v);
+  i::Handle<i::JSFunction> f = i::Handle<i::JSFunction>::cast(o);
+  i::Context* context = f->context();
+  i::AstValueFactory avf(&zone, isolate->heap()->HashSeed());
+  avf.Internalize(isolate);
+  const i::AstRawString* name = avf.GetOneByteString("result");
+  i::Handle<i::String> str = name->string();
+  CHECK(str->IsInternalizedString());
+  i::Scope* global_scope =
+      new (&zone) i::Scope(NULL, i::GLOBAL_SCOPE, &avf, &zone);
+  global_scope->Initialize();
+  i::Scope* s = i::Scope::DeserializeScopeChain(context, global_scope, &zone);
+  DCHECK(s != global_scope);
+  DCHECK(name != NULL);
+
+  // Get result from h's function context (that is f's context)
+  i::Variable* var = s->Lookup(name);
+
+  CHECK(var != NULL);
+  // Maybe assigned should survive deserialization
+  CHECK(var->maybe_assigned() == i::kMaybeAssigned);
+  // TODO(sigurds) Figure out if is_used should survive context serialization.
+}
+
+
+TEST(IfArgumentsArrayAccessedThenParametersMaybeAssigned) {
+  i::Isolate* isolate = CcTest::i_isolate();
+  i::Factory* factory = isolate->factory();
+  i::HandleScope scope(isolate);
+  LocalContext env;
+
+
+  const char* src =
+      "function f(x) {"
+      "    var a = arguments;"
+      "    function g(i) {"
+      "      ++a[0];"
+      "    };"
+      "    return g;"
+      "  }"
+      "f(0);";
+
+  i::ScopedVector<char> program(Utf8LengthHelper(src) + 1);
+  i::SNPrintF(program, "%s", src);
+  i::Handle<i::String> source = factory->InternalizeUtf8String(program.start());
+  source->PrintOn(stdout);
+  printf("\n");
+  i::Zone zone(isolate);
+  v8::Local<v8::Value> v = CompileRun(src);
+  i::Handle<i::Object> o = v8::Utils::OpenHandle(*v);
+  i::Handle<i::JSFunction> f = i::Handle<i::JSFunction>::cast(o);
+  i::Context* context = f->context();
+  i::AstValueFactory avf(&zone, isolate->heap()->HashSeed());
+  avf.Internalize(isolate);
+
+  i::Scope* global_scope =
+      new (&zone) i::Scope(NULL, i::GLOBAL_SCOPE, &avf, &zone);
+  global_scope->Initialize();
+  i::Scope* s = i::Scope::DeserializeScopeChain(context, global_scope, &zone);
+  DCHECK(s != global_scope);
+  const i::AstRawString* name_x = avf.GetOneByteString("x");
+
+  // Get result from f's function context (that is g's outer context)
+  i::Variable* var_x = s->Lookup(name_x);
+  CHECK(var_x != NULL);
+  CHECK(var_x->maybe_assigned() == i::kMaybeAssigned);
+}
+
+
+TEST(ExportsMaybeAssigned) {
+  i::FLAG_use_strict = true;
+  i::FLAG_harmony_scoping = true;
+  i::FLAG_harmony_modules = true;
+
+  i::Isolate* isolate = CcTest::i_isolate();
+  i::Factory* factory = isolate->factory();
+  i::HandleScope scope(isolate);
+  LocalContext env;
+
+  const char* src =
+      "module A {"
+      "  export var x = 1;"
+      "  export function f() { return x };"
+      "  export const y = 2;"
+      "  module B {}"
+      "  export module C {}"
+      "};"
+      "A.f";
+
+  i::ScopedVector<char> program(Utf8LengthHelper(src) + 1);
+  i::SNPrintF(program, "%s", src);
+  i::Handle<i::String> source = factory->InternalizeUtf8String(program.start());
+  source->PrintOn(stdout);
+  printf("\n");
+  i::Zone zone(isolate);
+  v8::Local<v8::Value> v = CompileRun(src);
+  i::Handle<i::Object> o = v8::Utils::OpenHandle(*v);
+  i::Handle<i::JSFunction> f = i::Handle<i::JSFunction>::cast(o);
+  i::Context* context = f->context();
+  i::AstValueFactory avf(&zone, isolate->heap()->HashSeed());
+  avf.Internalize(isolate);
+
+  i::Scope* global_scope =
+      new (&zone) i::Scope(NULL, i::GLOBAL_SCOPE, &avf, &zone);
+  global_scope->Initialize();
+  i::Scope* s = i::Scope::DeserializeScopeChain(context, global_scope, &zone);
+  DCHECK(s != global_scope);
+  const i::AstRawString* name_x = avf.GetOneByteString("x");
+  const i::AstRawString* name_f = avf.GetOneByteString("f");
+  const i::AstRawString* name_y = avf.GetOneByteString("y");
+  const i::AstRawString* name_B = avf.GetOneByteString("B");
+  const i::AstRawString* name_C = avf.GetOneByteString("C");
+
+  // Get result from h's function context (that is f's context)
+  i::Variable* var_x = s->Lookup(name_x);
+  CHECK(var_x != NULL);
+  CHECK(var_x->maybe_assigned() == i::kMaybeAssigned);
+  i::Variable* var_f = s->Lookup(name_f);
+  CHECK(var_f != NULL);
+  CHECK(var_f->maybe_assigned() == i::kMaybeAssigned);
+  i::Variable* var_y = s->Lookup(name_y);
+  CHECK(var_y != NULL);
+  CHECK(var_y->maybe_assigned() == i::kNotAssigned);
+  i::Variable* var_B = s->Lookup(name_B);
+  CHECK(var_B != NULL);
+  CHECK(var_B->maybe_assigned() == i::kNotAssigned);
+  i::Variable* var_C = s->Lookup(name_C);
+  CHECK(var_C != NULL);
+  CHECK(var_C->maybe_assigned() == i::kNotAssigned);
+}
+
+
+TEST(InnerAssignment) {
+  i::Isolate* isolate = CcTest::i_isolate();
+  i::Factory* factory = isolate->factory();
+  i::HandleScope scope(isolate);
+  LocalContext env;
+
+  const char* prefix = "function f() {";
+  const char* midfix = " function g() {";
+  const char* suffix = "}}";
+  struct { const char* source; bool assigned; bool strict; } outers[] = {
+    // Actual assignments.
+    { "var x; var x = 5;", true, false },
+    { "var x; { var x = 5; }", true, false },
+    { "'use strict'; let x; x = 6;", true, true },
+    { "var x = 5; function x() {}", true, false },
+    // Actual non-assignments.
+    { "var x;", false, false },
+    { "var x = 5;", false, false },
+    { "'use strict'; let x;", false, true },
+    { "'use strict'; let x = 6;", false, true },
+    { "'use strict'; var x = 0; { let x = 6; }", false, true },
+    { "'use strict'; var x = 0; { let x; x = 6; }", false, true },
+    { "'use strict'; let x = 0; { let x = 6; }", false, true },
+    { "'use strict'; let x = 0; { let x; x = 6; }", false, true },
+    { "var x; try {} catch (x) { x = 5; }", false, false },
+    { "function x() {}", false, false },
+    // Eval approximation.
+    { "var x; eval('');", true, false },
+    { "eval(''); var x;", true, false },
+    { "'use strict'; let x; eval('');", true, true },
+    { "'use strict'; eval(''); let x;", true, true },
+    // Non-assignments not recognized, because the analysis is approximative.
+    { "var x; var x;", true, false },
+    { "var x = 5; var x;", true, false },
+    { "var x; { var x; }", true, false },
+    { "var x; function x() {}", true, false },
+    { "function x() {}; var x;", true, false },
+    { "var x; try {} catch (x) { var x = 5; }", true, false },
+  };
+  struct { const char* source; bool assigned; bool with; } inners[] = {
+    // Actual assignments.
+    { "x = 1;", true, false },
+    { "x++;", true, false },
+    { "++x;", true, false },
+    { "x--;", true, false },
+    { "--x;", true, false },
+    { "{ x = 1; }", true, false },
+    { "'use strict'; { let x; }; x = 0;", true, false },
+    { "'use strict'; { const x = 1; }; x = 0;", true, false },
+    { "'use strict'; { function x() {} }; x = 0;", true, false },
+    { "with ({}) { x = 1; }", true, true },
+    { "eval('');", true, false },
+    { "'use strict'; { let y; eval('') }", true, false },
+    { "function h() { x = 0; }", true, false },
+    { "(function() { x = 0; })", true, false },
+    { "(function() { x = 0; })", true, false },
+    { "with ({}) (function() { x = 0; })", true, true },
+    // Actual non-assignments.
+    { "", false, false },
+    { "x;", false, false },
+    { "var x;", false, false },
+    { "var x = 8;", false, false },
+    { "var x; x = 8;", false, false },
+    { "'use strict'; let x;", false, false },
+    { "'use strict'; let x = 8;", false, false },
+    { "'use strict'; let x; x = 8;", false, false },
+    { "'use strict'; const x = 8;", false, false },
+    { "function x() {}", false, false },
+    { "function x() { x = 0; }", false, false },
+    { "function h(x) { x = 0; }", false, false },
+    { "'use strict'; { let x; x = 0; }", false, false },
+    { "{ var x; }; x = 0;", false, false },
+    { "with ({}) {}", false, true },
+    { "var x; { with ({}) { x = 1; } }", false, true },
+    { "try {} catch(x) { x = 0; }", false, false },
+    { "try {} catch(x) { with ({}) { x = 1; } }", false, true },
+    // Eval approximation.
+    { "eval('');", true, false },
+    { "function h() { eval(''); }", true, false },
+    { "(function() { eval(''); })", true, false },
+    // Shadowing not recognized because of eval approximation.
+    { "var x; eval('');", true, false },
+    { "'use strict'; let x; eval('');", true, false },
+    { "try {} catch(x) { eval(''); }", true, false },
+    { "function x() { eval(''); }", true, false },
+    { "(function(x) { eval(''); })", true, false },
+  };
+
+  // Used to trigger lazy compilation of function
+  int comment_len = 2048;
+  i::ScopedVector<char> comment(comment_len + 1);
+  i::SNPrintF(comment, "/*%0*d*/", comment_len - 4, 0);
+  int prefix_len = Utf8LengthHelper(prefix);
+  int midfix_len = Utf8LengthHelper(midfix);
+  int suffix_len = Utf8LengthHelper(suffix);
+  for (unsigned i = 0; i < arraysize(outers); ++i) {
+    const char* outer = outers[i].source;
+    int outer_len = Utf8LengthHelper(outer);
+    for (unsigned j = 0; j < arraysize(inners); ++j) {
+      for (unsigned outer_lazy = 0; outer_lazy < 2; ++outer_lazy) {
+        for (unsigned inner_lazy = 0; inner_lazy < 2; ++inner_lazy) {
+          if (outers[i].strict && inners[j].with) continue;
+          const char* inner = inners[j].source;
+          int inner_len = Utf8LengthHelper(inner);
+
+          int outer_comment_len = outer_lazy ? comment_len : 0;
+          int inner_comment_len = inner_lazy ? comment_len : 0;
+          const char* outer_comment = outer_lazy ? comment.start() : "";
+          const char* inner_comment = inner_lazy ? comment.start() : "";
+          int len = prefix_len + outer_comment_len + outer_len + midfix_len +
+                    inner_comment_len + inner_len + suffix_len;
+          i::ScopedVector<char> program(len + 1);
+
+          i::SNPrintF(program, "%s%s%s%s%s%s%s", prefix, outer_comment, outer,
+                      midfix, inner_comment, inner, suffix);
+          i::Handle<i::String> source =
+              factory->InternalizeUtf8String(program.start());
+          source->PrintOn(stdout);
+          printf("\n");
+
+          i::Handle<i::Script> script = factory->NewScript(source);
+          i::CompilationInfoWithZone info(script);
+          i::Parser::ParseInfo parse_info = {
+              isolate->stack_guard()->real_climit(),
+              isolate->heap()->HashSeed(), isolate->unicode_cache()};
+          i::Parser parser(&info, &parse_info);
+          parser.set_allow_harmony_scoping(true);
+          CHECK(parser.Parse());
+          CHECK(i::Rewriter::Rewrite(&info));
+          CHECK(i::Scope::Analyze(&info));
+          CHECK(info.function() != NULL);
+
+          i::Scope* scope = info.function()->scope();
+          CHECK_EQ(scope->inner_scopes()->length(), 1);
+          i::Scope* inner_scope = scope->inner_scopes()->at(0);
+          const i::AstRawString* var_name =
+              info.ast_value_factory()->GetOneByteString("x");
+          i::Variable* var = inner_scope->Lookup(var_name);
+          bool expected = outers[i].assigned || inners[j].assigned;
+          CHECK(var != NULL);
+          CHECK(var->is_used() || !expected);
+          CHECK((var->maybe_assigned() == i::kMaybeAssigned) == expected);
+        }
+      }
+    }
+  }
+}
+
+namespace {
+
+int* global_use_counts = NULL;
+
+void MockUseCounterCallback(v8::Isolate* isolate,
+                            v8::Isolate::UseCounterFeature feature) {
+  ++global_use_counts[feature];
+}
+
+}
+
+
+TEST(UseAsmUseCount) {
+  i::Isolate* isolate = CcTest::i_isolate();
+  i::HandleScope scope(isolate);
+  LocalContext env;
+  int use_counts[v8::Isolate::kUseCounterFeatureCount] = {};
+  global_use_counts = use_counts;
+  CcTest::isolate()->SetUseCounterCallback(MockUseCounterCallback);
+  CompileRun("\"use asm\";\n"
+             "var foo = 1;\n"
+             "\"use asm\";\n"  // Only the first one counts.
+             "function bar() { \"use asm\"; var baz = 1; }");
+  CHECK_EQ(2, use_counts[v8::Isolate::kUseAsm]);
+}
+
+
+TEST(ErrorsArrowFunctions) {
+  // Tests that parser and preparser generate the same kind of errors
+  // on invalid arrow function syntax.
+  const char* context_data[][2] = {
+    {"", ";"},
+    {"v = ", ";"},
+    {"bar ? (", ") : baz;"},
+    {"bar ? baz : (", ");"},
+    {"bar[", "];"},
+    {"bar, ", ";"},
+    {"", ", bar;"},
+    {NULL, NULL}
+  };
+
+  const char* statement_data[] = {
+    "=> 0",
+    "=>",
+    "() =>",
+    "=> {}",
+    ") => {}",
+    ", => {}",
+    "(,) => {}",
+    "return => {}",
+    "() => {'value': 42}",
+
+    // Check that the early return introduced in ParsePrimaryExpression
+    // does not accept stray closing parentheses.
+    ")",
+    ") => 0",
+    "foo[()]",
+    "()",
+
+    // Parameter lists with extra parens should be recognized as errors.
+    "(()) => 0",
+    "((x)) => 0",
+    "((x, y)) => 0",
+    "(x, (y)) => 0",
+    "((x, y, z)) => 0",
+    "(x, (y, z)) => 0",
+    "((x, y), z) => 0",
+
+    // Parameter lists are always validated as strict, so those are errors.
+    "eval => {}",
+    "arguments => {}",
+    "yield => {}",
+    "interface => {}",
+    "(eval) => {}",
+    "(arguments) => {}",
+    "(yield) => {}",
+    "(interface) => {}",
+    "(eval, bar) => {}",
+    "(bar, eval) => {}",
+    "(bar, arguments) => {}",
+    "(bar, yield) => {}",
+    "(bar, interface) => {}",
+    // TODO(aperez): Detecting duplicates does not work in PreParser.
+    // "(bar, bar) => {}",
+
+    // The parameter list is parsed as an expression, but only
+    // a comma-separated list of identifier is valid.
+    "32 => {}",
+    "(32) => {}",
+    "(a, 32) => {}",
+    "if => {}",
+    "(if) => {}",
+    "(a, if) => {}",
+    "a + b => {}",
+    "(a + b) => {}",
+    "(a + b, c) => {}",
+    "(a, b - c) => {}",
+    "\"a\" => {}",
+    "(\"a\") => {}",
+    "(\"a\", b) => {}",
+    "(a, \"b\") => {}",
+    "-a => {}",
+    "(-a) => {}",
+    "(-a, b) => {}",
+    "(a, -b) => {}",
+    "{} => {}",
+    "({}) => {}",
+    "(a, {}) => {}",
+    "({}, a) => {}",
+    "a++ => {}",
+    "(a++) => {}",
+    "(a++, b) => {}",
+    "(a, b++) => {}",
+    "[] => {}",
+    "([]) => {}",
+    "(a, []) => {}",
+    "([], a) => {}",
+    "(a = b) => {}",
+    "(a = b, c) => {}",
+    "(a, b = c) => {}",
+    "(foo ? bar : baz) => {}",
+    "(a, foo ? bar : baz) => {}",
+    "(foo ? bar : baz, a) => {}",
+    NULL
+  };
+
+  // The test is quite slow, so run it with a reduced set of flags.
+  static const ParserFlag flags[] = {kAllowLazy, kAllowHarmonyScoping};
+  static const ParserFlag always_flags[] = { kAllowArrowFunctions };
+  RunParserSyncTest(context_data, statement_data, kError, flags,
+                    arraysize(flags), always_flags, arraysize(always_flags));
+}
+
+
+TEST(NoErrorsArrowFunctions) {
+  // Tests that parser and preparser accept valid arrow functions syntax.
+  const char* context_data[][2] = {
+    {"", ";"},
+    {"bar ? (", ") : baz;"},
+    {"bar ? baz : (", ");"},
+    {"bar, ", ";"},
+    {"", ", bar;"},
+    {NULL, NULL}
+  };
+
+  const char* statement_data[] = {
+    "() => {}",
+    "() => { return 42 }",
+    "x => { return x; }",
+    "(x) => { return x; }",
+    "(x, y) => { return x + y; }",
+    "(x, y, z) => { return x + y + z; }",
+    "(x, y) => { x.a = y; }",
+    "() => 42",
+    "x => x",
+    "x => x * x",
+    "(x) => x",
+    "(x) => x * x",
+    "(x, y) => x + y",
+    "(x, y, z) => x, y, z",
+    "(x, y) => x.a = y",
+    "() => ({'value': 42})",
+    "x => y => x + y",
+    "(x, y) => (u, v) => x*u + y*v",
+    "(x, y) => z => z * (x + y)",
+    "x => (y, z) => z * (x + y)",
+
+    // Those are comma-separated expressions, with arrow functions as items.
+    // They stress the code for validating arrow function parameter lists.
+    "a, b => 0",
+    "a, b, (c, d) => 0",
+    "(a, b, (c, d) => 0)",
+    "(a, b) => 0, (c, d) => 1",
+    "(a, b => {}, a => a + 1)",
+    "((a, b) => {}, (a => a + 1))",
+    "(a, (a, (b, c) => 0))",
+
+    // Arrow has more precedence, this is the same as: foo ? bar : (baz = {})
+    "foo ? bar : baz => {}",
+    NULL
+  };
+
+  static const ParserFlag always_flags[] = {kAllowArrowFunctions};
+  RunParserSyncTest(context_data, statement_data, kSuccess, NULL, 0,
+                    always_flags, arraysize(always_flags));
+}
+
+
+TEST(NoErrorsSuper) {
+  // Tests that parser and preparser accept 'super' keyword in right places.
+  const char* context_data[][2] = {{"", ";"},
+                                   {"k = ", ";"},
+                                   {"foo(", ");"},
+                                   {NULL, NULL}};
+
+  const char* statement_data[] = {
+    "super.x",
+    "super[27]",
+    "new super",
+    "new super()",
+    "new super(12, 45)",
+    "new new super",
+    "new new super()",
+    "new new super()()",
+    "z.super",  // Ok, property lookup.
+    NULL};
+
+  static const ParserFlag always_flags[] = {kAllowClasses};
+  RunParserSyncTest(context_data, statement_data, kSuccess, NULL, 0,
+                    always_flags, arraysize(always_flags));
+}
+
+
+TEST(ErrorsSuper) {
+  // Tests that parser and preparser generate same errors for 'super'.
+  const char* context_data[][2] = {{"", ";"},
+                                   {"k = ", ";"},
+                                   {"foo(", ");"},
+                                   {NULL, NULL}};
+
+  const char* statement_data[] = {
+    "super = x",
+    "y = super",
+    "f(super)",
+    NULL};
+
+  static const ParserFlag always_flags[] = {kAllowClasses};
+  RunParserSyncTest(context_data, statement_data, kError, NULL, 0,
+                    always_flags, arraysize(always_flags));
+}
+
+
+TEST(NoErrorsMethodDefinition) {
+  const char* context_data[][2] = {{"({", "});"},
+                                   {"'use strict'; ({", "});"},
+                                   {"({*", "});"},
+                                   {"'use strict'; ({*", "});"},
+                                   {NULL, NULL}};
+
+  const char* object_literal_body_data[] = {
+    "m() {}",
+    "m(x) { return x; }",
+    "m(x, y) {}, n() {}",
+    "set(x, y) {}",
+    "get(x, y) {}",
+    NULL
+  };
+
+  static const ParserFlag always_flags[] = {kAllowHarmonyObjectLiterals};
+  RunParserSyncTest(context_data, object_literal_body_data, kSuccess, NULL, 0,
+                    always_flags, arraysize(always_flags));
+}
+
+
+TEST(MethodDefinitionNames) {
+  const char* context_data[][2] = {{"({", "(x, y) {}});"},
+                                   {"'use strict'; ({", "(x, y) {}});"},
+                                   {"({*", "(x, y) {}});"},
+                                   {"'use strict'; ({*", "(x, y) {}});"},
+                                   {NULL, NULL}};
+
+  const char* name_data[] = {
+    "m",
+    "'m'",
+    "\"m\"",
+    "\"m n\"",
+    "true",
+    "false",
+    "null",
+    "0",
+    "1.2",
+    "1e1",
+    "1E1",
+    "1e+1",
+    "1e-1",
+
+    // Keywords
+    "async",
+    "await",
+    "break",
+    "case",
+    "catch",
+    "class",
+    "const",
+    "continue",
+    "debugger",
+    "default",
+    "delete",
+    "do",
+    "else",
+    "enum",
+    "export",
+    "extends",
+    "finally",
+    "for",
+    "function",
+    "if",
+    "implements",
+    "import",
+    "in",
+    "instanceof",
+    "interface",
+    "let",
+    "new",
+    "package",
+    "private",
+    "protected",
+    "public",
+    "return",
+    "static",
+    "super",
+    "switch",
+    "this",
+    "throw",
+    "try",
+    "typeof",
+    "var",
+    "void",
+    "while",
+    "with",
+    "yield",
+    NULL
+  };
+
+  static const ParserFlag always_flags[] = {kAllowHarmonyObjectLiterals};
+  RunParserSyncTest(context_data, name_data, kSuccess, NULL, 0,
+                    always_flags, arraysize(always_flags));
+}
+
+
+TEST(MethodDefinitionStrictFormalParamereters) {
+  const char* context_data[][2] = {{"({method(", "){}});"},
+                                   {"'use strict'; ({method(", "){}});"},
+                                   {"({*method(", "){}});"},
+                                   {"'use strict'; ({*method(", "){}});"},
+                                   {NULL, NULL}};
+
+  const char* params_data[] = {
+    "x, x",
+    "x, y, x",
+    "eval",
+    "arguments",
+    "var",
+    "const",
+    NULL
+  };
+
+  static const ParserFlag always_flags[] = {kAllowHarmonyObjectLiterals};
+  RunParserSyncTest(context_data, params_data, kError, NULL, 0,
+                    always_flags, arraysize(always_flags));
+}
+
+
+TEST(MethodDefinitionDuplicateProperty) {
+  // Duplicate properties are allowed in ES6 but we haven't removed that check
+  // yet.
+  const char* context_data[][2] = {{"'use strict'; ({", "});"},
+                                   {NULL, NULL}};
+
+  const char* params_data[] = {
+    "x: 1, x() {}",
+    "x() {}, x: 1",
+    "x() {}, get x() {}",
+    "x() {}, set x(_) {}",
+    "x() {}, x() {}",
+    "x() {}, y() {}, x() {}",
+    "x() {}, \"x\"() {}",
+    "x() {}, 'x'() {}",
+    "0() {}, '0'() {}",
+    "1.0() {}, 1: 1",
+
+    "x: 1, *x() {}",
+    "*x() {}, x: 1",
+    "*x() {}, get x() {}",
+    "*x() {}, set x(_) {}",
+    "*x() {}, *x() {}",
+    "*x() {}, y() {}, *x() {}",
+    "*x() {}, *\"x\"() {}",
+    "*x() {}, *'x'() {}",
+    "*0() {}, *'0'() {}",
+    "*1.0() {}, 1: 1",
+
+    NULL
+  };
+
+  static const ParserFlag always_flags[] = {kAllowHarmonyObjectLiterals};
+  RunParserSyncTest(context_data, params_data, kError, NULL, 0,
+                    always_flags, arraysize(always_flags));
+}
+
+
+TEST(ClassExpressionNoErrors) {
+  const char* context_data[][2] = {{"(", ");"},
+                                   {"var C = ", ";"},
+                                   {"bar, ", ";"},
+                                   {NULL, NULL}};
+  const char* class_data[] = {
+    "class {}",
+    "class name {}",
+    "class extends F {}",
+    "class name extends F {}",
+    "class extends (F, G) {}",
+    "class name extends (F, G) {}",
+    "class extends class {} {}",
+    "class name extends class {} {}",
+    "class extends class base {} {}",
+    "class name extends class base {} {}",
+    NULL};
+
+  static const ParserFlag always_flags[] = {kAllowClasses};
+  RunParserSyncTest(context_data, class_data, kSuccess, NULL, 0,
+                    always_flags, arraysize(always_flags));
+}
+
+
+TEST(ClassDeclarationNoErrors) {
+  const char* context_data[][2] = {{"", ""},
+                                   {"{", "}"},
+                                   {"if (true) {", "}"},
+                                   {NULL, NULL}};
+  const char* statement_data[] = {
+    "class name {}",
+    "class name extends F {}",
+    "class name extends (F, G) {}",
+    "class name extends class {} {}",
+    "class name extends class base {} {}",
+    NULL};
+
+  static const ParserFlag always_flags[] = {kAllowClasses};
+  RunParserSyncTest(context_data, statement_data, kSuccess, NULL, 0,
+                    always_flags, arraysize(always_flags));
+}
+
+
+TEST(ClassBodyNoErrors) {
+  // Tests that parser and preparser accept valid class syntax.
+  const char* context_data[][2] = {{"(class {", "});"},
+                                   {"(class extends Base {", "});"},
+                                   {"class C {", "}"},
+                                   {"class C extends Base {", "}"},
+                                   {NULL, NULL}};
+  const char* class_body_data[] = {
+    ";",
+    ";;",
+    "m() {}",
+    "m() {};",
+    "; m() {}",
+    "m() {}; n(x) {}",
+    "get x() {}",
+    "set x(v) {}",
+    "get() {}",
+    "set() {}",
+    "*g() {}",
+    "*g() {};",
+    "; *g() {}",
+    "*g() {}; *h(x) {}",
+    "static() {}",
+    "static m() {}",
+    "static get x() {}",
+    "static set x(v) {}",
+    "static get() {}",
+    "static set() {}",
+    "static static() {}",
+    "static get static() {}",
+    "static set static(v) {}",
+    "*static() {}",
+    "*get() {}",
+    "*set() {}",
+    "static *g() {}",
+    NULL};
+
+  static const ParserFlag always_flags[] = {
+    kAllowClasses,
+    kAllowHarmonyObjectLiterals
+  };
+  RunParserSyncTest(context_data, class_body_data, kSuccess, NULL, 0,
+                    always_flags, arraysize(always_flags));
+}
+
+
+TEST(ClassPropertyNameNoErrors) {
+  const char* context_data[][2] = {{"(class {", "() {}});"},
+                                   {"(class { get ", "() {}});"},
+                                   {"(class { set ", "(v) {}});"},
+                                   {"(class { static ", "() {}});"},
+                                   {"(class { static get ", "() {}});"},
+                                   {"(class { static set ", "(v) {}});"},
+                                   {"(class { *", "() {}});"},
+                                   {"(class { static *", "() {}});"},
+                                   {"class C {", "() {}}"},
+                                   {"class C { get ", "() {}}"},
+                                   {"class C { set ", "(v) {}}"},
+                                   {"class C { static ", "() {}}"},
+                                   {"class C { static get ", "() {}}"},
+                                   {"class C { static set ", "(v) {}}"},
+                                   {"class C { *", "() {}}"},
+                                   {"class C { static *", "() {}}"},
+                                   {NULL, NULL}};
+  const char* name_data[] = {
+    "42",
+    "42.5",
+    "42e2",
+    "42e+2",
+    "42e-2",
+    "null",
+    "false",
+    "true",
+    "'str'",
+    "\"str\"",
+    "static",
+    "get",
+    "set",
+    "var",
+    "const",
+    "let",
+    "this",
+    "class",
+    "function",
+    "yield",
+    "if",
+    "else",
+    "for",
+    "while",
+    "do",
+    "try",
+    "catch",
+    "finally",
+    NULL};
+
+  static const ParserFlag always_flags[] = {
+    kAllowClasses,
+    kAllowHarmonyObjectLiterals
+  };
+  RunParserSyncTest(context_data, name_data, kSuccess, NULL, 0,
+                    always_flags, arraysize(always_flags));
+}
+
+
+TEST(ClassExpressionErrors) {
+  const char* context_data[][2] = {{"(", ");"},
+                                   {"var C = ", ";"},
+                                   {"bar, ", ";"},
+                                   {NULL, NULL}};
+  const char* class_data[] = {
+    "class",
+    "class name",
+    "class name extends",
+    "class extends",
+    "class {",
+    "class { m }",
+    "class { m; n }",
+    "class { m: 1 }",
+    "class { m(); n() }",
+    "class { get m }",
+    "class { get m() }",
+    "class { get m() { }",
+    "class { set m() {} }",  // Missing required parameter.
+    "class { m() {}, n() {} }",  // No commas allowed.
+    NULL};
+
+  static const ParserFlag always_flags[] = {
+    kAllowClasses,
+    kAllowHarmonyObjectLiterals
+  };
+  RunParserSyncTest(context_data, class_data, kError, NULL, 0,
+                    always_flags, arraysize(always_flags));
+}
+
+
+TEST(ClassDeclarationErrors) {
+  const char* context_data[][2] = {{"", ""},
+                                   {"{", "}"},
+                                   {"if (true) {", "}"},
+                                   {NULL, NULL}};
+  const char* class_data[] = {
+    "class",
+    "class name",
+    "class name extends",
+    "class extends",
+    "class name {",
+    "class name { m }",
+    "class name { m; n }",
+    "class name { m: 1 }",
+    "class name { m(); n() }",
+    "class name { get x }",
+    "class name { get x() }",
+    "class name { set x() {) }",  // missing required param
+    "class {}",  // Name is required for declaration
+    "class extends base {}",
+    "class name { *",
+    "class name { * }",
+    "class name { *; }",
+    "class name { *get x() {} }",
+    "class name { *set x(_) {} }",
+    "class name { *static m() {} }",
+    NULL};
+
+  static const ParserFlag always_flags[] = {
+    kAllowClasses,
+    kAllowHarmonyNumericLiterals
+  };
+  RunParserSyncTest(context_data, class_data, kError, NULL, 0,
+                    always_flags, arraysize(always_flags));
+}
+
+
+TEST(ClassNameErrors) {
+  const char* context_data[][2] = {{"class ", "{}"},
+                                   {"(class ", "{});"},
+                                   {"'use strict'; class ", "{}"},
+                                   {"'use strict'; (class ", "{});"},
+                                   {NULL, NULL}};
+  const char* class_name[] = {
+    "arguments",
+    "eval",
+    "implements",
+    "interface",
+    "let",
+    "package",
+    "private",
+    "protected",
+    "public",
+    "static",
+    "var",
+    "yield",
+    NULL};
+
+  static const ParserFlag always_flags[] = {
+    kAllowClasses,
+    kAllowHarmonyObjectLiterals
+  };
+  RunParserSyncTest(context_data, class_name, kError, NULL, 0,
+                    always_flags, arraysize(always_flags));
+}
+
+
+TEST(ClassGetterParamNameErrors) {
+  const char* context_data[][2] = {
+    {"class C { get name(", ") {} }"},
+    {"(class { get name(", ") {} });"},
+    {"'use strict'; class C { get name(", ") {} }"},
+    {"'use strict'; (class { get name(", ") {} })"},
+    {NULL, NULL}
+  };
+
+  const char* class_name[] = {
+    "arguments",
+    "eval",
+    "implements",
+    "interface",
+    "let",
+    "package",
+    "private",
+    "protected",
+    "public",
+    "static",
+    "var",
+    "yield",
+    NULL};
+
+  static const ParserFlag always_flags[] = {
+    kAllowClasses,
+    kAllowHarmonyObjectLiterals
+  };
+  RunParserSyncTest(context_data, class_name, kError, NULL, 0,
+                    always_flags, arraysize(always_flags));
+}
+
+
+TEST(ClassStaticPrototypeErrors) {
+  const char* context_data[][2] = {{"class C {", "}"},
+                                   {"(class {", "});"},
+                                   {NULL, NULL}};
+
+  const char* class_body_data[] = {
+    "static prototype() {}",
+    "static get prototype() {}",
+    "static set prototype(_) {}",
+    "static *prototype() {}",
+    NULL};
+
+  static const ParserFlag always_flags[] = {
+    kAllowClasses,
+    kAllowHarmonyObjectLiterals
+  };
+  RunParserSyncTest(context_data, class_body_data, kError, NULL, 0,
+                    always_flags, arraysize(always_flags));
+}
+
+
+TEST(ClassSpecialConstructorErrors) {
+  const char* context_data[][2] = {{"class C {", "}"},
+                                   {"(class {", "});"},
+                                   {NULL, NULL}};
+
+  const char* class_body_data[] = {
+    "get constructor() {}",
+    "get constructor(_) {}",
+    "*constructor() {}",
+    NULL};
+
+  static const ParserFlag always_flags[] = {
+    kAllowClasses,
+    kAllowHarmonyObjectLiterals
+  };
+  RunParserSyncTest(context_data, class_body_data, kError, NULL, 0,
+                    always_flags, arraysize(always_flags));
+}
+
+
+TEST(ClassConstructorNoErrors) {
+  const char* context_data[][2] = {{"class C {", "}"},
+                                   {"(class {", "});"},
+                                   {NULL, NULL}};
+
+  const char* class_body_data[] = {
+    "constructor() {}",
+    "static constructor() {}",
+    "static get constructor() {}",
+    "static set constructor(_) {}",
+    "static *constructor() {}",
+    NULL};
+
+  static const ParserFlag always_flags[] = {
+    kAllowClasses,
+    kAllowHarmonyObjectLiterals
+  };
+  RunParserSyncTest(context_data, class_body_data, kSuccess, NULL, 0,
+                    always_flags, arraysize(always_flags));
+}
+
+
+TEST(ClassMultipleConstructorErrors) {
+  // We currently do not allow any duplicate properties in class bodies. This
+  // test ensures that when we change that we still throw on duplicate
+  // constructors.
+  const char* context_data[][2] = {{"class C {", "}"},
+                                   {"(class {", "});"},
+                                   {NULL, NULL}};
+
+  const char* class_body_data[] = {
+    "constructor() {}; constructor() {}",
+    NULL};
+
+  static const ParserFlag always_flags[] = {
+    kAllowClasses,
+    kAllowHarmonyObjectLiterals
+  };
+  RunParserSyncTest(context_data, class_body_data, kError, NULL, 0,
+                    always_flags, arraysize(always_flags));
+}
+
+
+// TODO(arv): We should allow duplicate property names.
+// https://code.google.com/p/v8/issues/detail?id=3570
+DISABLED_TEST(ClassMultiplePropertyNamesNoErrors) {
+  const char* context_data[][2] = {{"class C {", "}"},
+                                   {"(class {", "});"},
+                                   {NULL, NULL}};
+
+  const char* class_body_data[] = {
+    "constructor() {}; static constructor() {}",
+    "m() {}; static m() {}",
+    "m() {}; m() {}",
+    NULL};
+
+  static const ParserFlag always_flags[] = {
+    kAllowClasses,
+    kAllowHarmonyObjectLiterals
+  };
+  RunParserSyncTest(context_data, class_body_data, kSuccess, NULL, 0,
+                    always_flags, arraysize(always_flags));
+}
+
+
+TEST(ClassesAreStrictErrors) {
+  const char* context_data[][2] = {{"", ""},
+                                   {"(", ");"},
+                                   {NULL, NULL}};
+
+  const char* class_body_data[] = {
+    "class C { method() { with ({}) {} } }",
+    "class C extends function() { with ({}) {} } {}",
+    "class C { *method() { with ({}) {} } }",
+    NULL};
+
+  static const ParserFlag always_flags[] = {
+    kAllowClasses,
+    kAllowHarmonyObjectLiterals
+  };
+  RunParserSyncTest(context_data, class_body_data, kError, NULL, 0,
+                    always_flags, arraysize(always_flags));
+}
diff --git a/test/cctest/test-platform-linux.cc b/test/cctest/test-platform-linux.cc
index 036039e..613638e 100644
--- a/test/cctest/test-platform-linux.cc
+++ b/test/cctest/test-platform-linux.cc
@@ -33,14 +33,14 @@
 
 #include "src/v8.h"
 
-#include "src/platform.h"
+#include "src/base/platform/platform.h"
 #include "test/cctest/cctest.h"
 
 using namespace ::v8::internal;
 
 
 TEST(VirtualMemory) {
-  VirtualMemory* vm = new VirtualMemory(1 * MB);
+  v8::base::VirtualMemory* vm = new v8::base::VirtualMemory(1 * MB);
   CHECK(vm->IsReserved());
   void* block_addr = vm->address();
   size_t block_size = 4 * KB;
@@ -51,8 +51,3 @@
   CHECK(vm->Uncommit(block_addr, block_size));
   delete vm;
 }
-
-
-TEST(GetCurrentProcessId) {
-  CHECK_EQ(static_cast<int>(getpid()), OS::GetCurrentProcessId());
-}
diff --git a/test/cctest/test-platform-macos.cc b/test/cctest/test-platform-macos.cc
deleted file mode 100644
index a44e81f..0000000
--- a/test/cctest/test-platform-macos.cc
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2006-2008 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Tests of the TokenLock class from lock.h
-
-#include <stdlib.h>
-
-#include "src/v8.h"
-#include "test/cctest/cctest.h"
-
-using namespace ::v8::internal;
diff --git a/test/cctest/test-platform-tls.cc b/test/cctest/test-platform-tls.cc
deleted file mode 100644
index f6ad95a..0000000
--- a/test/cctest/test-platform-tls.cc
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-//
-// Tests of fast TLS support.
-
-#include "src/v8.h"
-
-#include "src/checks.h"
-#include "src/platform.h"
-#include "test/cctest/cctest.h"
-
-using v8::internal::Thread;
-
-static const int kValueCount = 128;
-
-static Thread::LocalStorageKey keys[kValueCount];
-
-static void* GetValue(int num) {
-  return reinterpret_cast<void*>(static_cast<intptr_t>(num + 1));
-}
-
-
-static void DoTest() {
-  for (int i = 0; i < kValueCount; i++) {
-    CHECK(!Thread::HasThreadLocal(keys[i]));
-  }
-  for (int i = 0; i < kValueCount; i++) {
-    Thread::SetThreadLocal(keys[i], GetValue(i));
-  }
-  for (int i = 0; i < kValueCount; i++) {
-    CHECK(Thread::HasThreadLocal(keys[i]));
-  }
-  for (int i = 0; i < kValueCount; i++) {
-    CHECK_EQ(GetValue(i), Thread::GetThreadLocal(keys[i]));
-    CHECK_EQ(GetValue(i), Thread::GetExistingThreadLocal(keys[i]));
-  }
-  for (int i = 0; i < kValueCount; i++) {
-    Thread::SetThreadLocal(keys[i], GetValue(kValueCount - i - 1));
-  }
-  for (int i = 0; i < kValueCount; i++) {
-    CHECK(Thread::HasThreadLocal(keys[i]));
-  }
-  for (int i = 0; i < kValueCount; i++) {
-    CHECK_EQ(GetValue(kValueCount - i - 1),
-             Thread::GetThreadLocal(keys[i]));
-    CHECK_EQ(GetValue(kValueCount - i - 1),
-             Thread::GetExistingThreadLocal(keys[i]));
-  }
-}
-
-class TestThread : public Thread {
- public:
-  TestThread() : Thread("TestThread") {}
-
-  virtual void Run() {
-    DoTest();
-  }
-};
-
-
-TEST(FastTLS) {
-  for (int i = 0; i < kValueCount; i++) {
-    keys[i] = Thread::CreateThreadLocalKey();
-  }
-  DoTest();
-  TestThread thread;
-  thread.Start();
-  thread.Join();
-}
diff --git a/test/cctest/test-platform-win32.cc b/test/cctest/test-platform-win32.cc
index 83ee1b3..cecde74 100644
--- a/test/cctest/test-platform-win32.cc
+++ b/test/cctest/test-platform-win32.cc
@@ -31,15 +31,15 @@
 
 #include "src/v8.h"
 
+#include "src/base/platform/platform.h"
 #include "src/base/win32-headers.h"
-#include "src/platform.h"
 #include "test/cctest/cctest.h"
 
 using namespace ::v8::internal;
 
 
 TEST(VirtualMemory) {
-  VirtualMemory* vm = new VirtualMemory(1 * MB);
+  v8::base::VirtualMemory* vm = new v8::base::VirtualMemory(1 * MB);
   CHECK(vm->IsReserved());
   void* block_addr = vm->address();
   size_t block_size = 4 * KB;
@@ -50,9 +50,3 @@
   CHECK(vm->Uncommit(block_addr, block_size));
   delete vm;
 }
-
-
-TEST(GetCurrentProcessId) {
-  CHECK_EQ(static_cast<int>(::GetCurrentProcessId()),
-           OS::GetCurrentProcessId());
-}
diff --git a/test/cctest/test-platform.cc b/test/cctest/test-platform.cc
index 1b6be8b..100a5a7 100644
--- a/test/cctest/test-platform.cc
+++ b/test/cctest/test-platform.cc
@@ -1,77 +1,35 @@
 // Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
 
-#include <stdlib.h>
-
-#include "src/platform.h"
+#include "include/v8stdint.h"
+#include "src/base/build_config.h"
+#include "src/base/platform/platform.h"
 #include "test/cctest/cctest.h"
 
-using namespace ::v8::internal;
+#ifdef V8_CC_GNU
 
-#ifdef __GNUC__
-#define ASM __asm__ __volatile__
+static uintptr_t sp_addr = 0;
 
-#if defined(_M_X64) || defined(__x86_64__)
-#define GET_STACK_POINTER() \
-  static int sp_addr = 0; \
-  do { \
-    ASM("mov %%rsp, %0" : "=g" (sp_addr)); \
-  } while (0)
-#elif defined(_M_IX86) || defined(__i386__)
-#define GET_STACK_POINTER() \
-  static int sp_addr = 0; \
-  do { \
-    ASM("mov %%esp, %0" : "=g" (sp_addr)); \
-  } while (0)
-#elif defined(__ARMEL__)
-#define GET_STACK_POINTER() \
-  static int sp_addr = 0; \
-  do { \
-    ASM("str %%sp, %0" : "=g" (sp_addr)); \
-  } while (0)
-#elif defined(__AARCH64EL__)
-#define GET_STACK_POINTER() \
-  static int sp_addr = 0; \
-  do { \
-    ASM("mov x16, sp; str x16, %0" : "=g" (sp_addr)); \
-  } while (0)
-#elif defined(__MIPSEB__) || defined(__MIPSEL__)
-#define GET_STACK_POINTER() \
-  static int sp_addr = 0; \
-  do { \
-    ASM("sw $sp, %0" : "=g" (sp_addr)); \
-  } while (0)
+void GetStackPointer(const v8::FunctionCallbackInfo<v8::Value>& args) {
+#if V8_HOST_ARCH_X64
+  __asm__ __volatile__("mov %%rsp, %0" : "=g"(sp_addr));
+#elif V8_HOST_ARCH_IA32
+  __asm__ __volatile__("mov %%esp, %0" : "=g"(sp_addr));
+#elif V8_HOST_ARCH_ARM
+  __asm__ __volatile__("str %%sp, %0" : "=g"(sp_addr));
+#elif V8_HOST_ARCH_ARM64
+  __asm__ __volatile__("mov x16, sp; str x16, %0" : "=g"(sp_addr));
+#elif V8_HOST_ARCH_MIPS
+  __asm__ __volatile__("sw $sp, %0" : "=g"(sp_addr));
+#elif V8_HOST_ARCH_MIPS64
+  __asm__ __volatile__("sd $sp, %0" : "=g"(sp_addr));
 #else
 #error Host architecture was not detected as supported by v8
 #endif
 
-void GetStackPointer(const v8::FunctionCallbackInfo<v8::Value>& args) {
-  GET_STACK_POINTER();
-  args.GetReturnValue().Set(v8_num(sp_addr));
+  args.GetReturnValue().Set(v8::Integer::NewFromUnsigned(
+      args.GetIsolate(), static_cast<uint32_t>(sp_addr)));
 }
 
 
@@ -94,9 +52,7 @@
       v8::Local<v8::Function>::Cast(global_object->Get(v8_str("foo")));
 
   v8::Local<v8::Value> result = foo->Call(global_object, 0, NULL);
-  CHECK_EQ(0, result->Int32Value() % OS::ActivationFrameAlignment());
+  CHECK_EQ(0, result->Uint32Value() % v8::base::OS::ActivationFrameAlignment());
 }
 
-#undef GET_STACK_POINTERS
-#undef ASM
-#endif  // __GNUC__
+#endif  // V8_CC_GNU
diff --git a/test/cctest/test-profile-generator.cc b/test/cctest/test-profile-generator.cc
index fd590f3..7578b35 100644
--- a/test/cctest/test-profile-generator.cc
+++ b/test/cctest/test-profile-generator.cc
@@ -572,14 +572,14 @@
   const_cast<ProfileNode*>(current)->Print(0);
   // The tree should look like this:
   //  (root)
-  //   (anonymous function)
+  //   ""
   //     a
   //       b
   //         c
   // There can also be:
   //           startProfiling
   // if the sampler managed to get a tick.
-  current = PickChild(current, "(anonymous function)");
+  current = PickChild(current, "");
   CHECK_NE(NULL, const_cast<ProfileNode*>(current));
   current = PickChild(current, "a");
   CHECK_NE(NULL, const_cast<ProfileNode*>(current));
@@ -651,13 +651,13 @@
       const_cast<v8::CpuProfileNode*>(current))->Print(0);
   // The tree should look like this:
   //  (root)
-  //   (anonymous function)
+  //   ""
   //     b
   //       a
   // There can also be:
   //         startProfiling
   // if the sampler managed to get a tick.
-  current = PickChild(current, i::ProfileGenerator::kAnonymousFunctionName);
+  current = PickChild(current, "");
   CHECK_NE(NULL, const_cast<v8::CpuProfileNode*>(current));
 
   current = PickChild(current, "b");
@@ -760,10 +760,10 @@
       const_cast<v8::CpuProfileNode*>(current))->Print(0);
   // The tree should look like this:
   //  (root)
-  //   (anonymous function)
+  //   ""
   //     kTryFinally
   //       kTryCatch
-  current = PickChild(current, i::ProfileGenerator::kAnonymousFunctionName);
+  current = PickChild(current, "");
   CHECK_NE(NULL, const_cast<v8::CpuProfileNode*>(current));
 
   current = PickChild(current, "TryFinally");
diff --git a/test/cctest/test-random-number-generator.cc b/test/cctest/test-random-number-generator.cc
index db7ec75..04b5882 100644
--- a/test/cctest/test-random-number-generator.cc
+++ b/test/cctest/test-random-number-generator.cc
@@ -26,71 +26,24 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 #include "src/v8.h"
-
-#include "src/utils/random-number-generator.h"
-#include "src/isolate-inl.h"
 #include "test/cctest/cctest.h"
 
+#include "src/base/utils/random-number-generator.h"
+#include "src/isolate-inl.h"
+
 using namespace v8::internal;
 
 
-static const int kMaxRuns = 12345;
-static const int kRandomSeeds[] = {
-  -1, 1, 42, 100, 1234567890, 987654321
-};
-
-
-TEST(NextIntWithMaxValue) {
-  for (unsigned n = 0; n < ARRAY_SIZE(kRandomSeeds); ++n) {
-    RandomNumberGenerator rng(kRandomSeeds[n]);
-    for (int max = 1; max <= kMaxRuns; ++max) {
-      int n = rng.NextInt(max);
-      CHECK_LE(0, n);
-      CHECK_LT(n, max);
-    }
-  }
-}
-
-
-TEST(NextBoolReturnsBooleanValue) {
-  for (unsigned n = 0; n < ARRAY_SIZE(kRandomSeeds); ++n) {
-    RandomNumberGenerator rng(kRandomSeeds[n]);
-    for (int k = 0; k < kMaxRuns; ++k) {
-      bool b = rng.NextBool();
-      CHECK(b == false || b == true);
-    }
-  }
-}
-
-
-TEST(NextDoubleRange) {
-  for (unsigned n = 0; n < ARRAY_SIZE(kRandomSeeds); ++n) {
-    RandomNumberGenerator rng(kRandomSeeds[n]);
-    for (int k = 0; k < kMaxRuns; ++k) {
-      double d = rng.NextDouble();
-      CHECK_LE(0.0, d);
-      CHECK_LT(d, 1.0);
-    }
-  }
-}
+static const int64_t kRandomSeeds[] = {-1, 1, 42, 100, 1234567890, 987654321};
 
 
 TEST(RandomSeedFlagIsUsed) {
-  for (unsigned n = 0; n < ARRAY_SIZE(kRandomSeeds); ++n) {
-    FLAG_random_seed = kRandomSeeds[n];
+  for (unsigned n = 0; n < arraysize(kRandomSeeds); ++n) {
+    FLAG_random_seed = static_cast<int>(kRandomSeeds[n]);
     v8::Isolate* i = v8::Isolate::New();
-    RandomNumberGenerator& rng1 =
+    v8::base::RandomNumberGenerator& rng =
         *reinterpret_cast<Isolate*>(i)->random_number_generator();
-    RandomNumberGenerator rng2(kRandomSeeds[n]);
-    for (int k = 1; k <= kMaxRuns; ++k) {
-      int64_t i1, i2;
-      rng1.NextBytes(&i1, sizeof(i1));
-      rng2.NextBytes(&i2, sizeof(i2));
-      CHECK_EQ(i2, i1);
-      CHECK_EQ(rng2.NextInt(), rng1.NextInt());
-      CHECK_EQ(rng2.NextInt(k), rng1.NextInt(k));
-      CHECK_EQ(rng2.NextDouble(), rng1.NextDouble());
-    }
+    CHECK_EQ(kRandomSeeds[n], rng.initial_seed());
     i->Dispose();
   }
 }
diff --git a/test/cctest/test-regexp.cc b/test/cctest/test-regexp.cc
index 82b430c..9d1d52e 100644
--- a/test/cctest/test-regexp.cc
+++ b/test/cctest/test-regexp.cc
@@ -33,18 +33,18 @@
 #include "src/ast.h"
 #include "src/char-predicates-inl.h"
 #include "src/jsregexp.h"
+#include "src/ostreams.h"
 #include "src/parser.h"
-#include "src/regexp-macro-assembler-irregexp.h"
 #include "src/regexp-macro-assembler.h"
+#include "src/regexp-macro-assembler-irregexp.h"
 #include "src/string-stream.h"
 #include "src/zone-inl.h"
 #ifdef V8_INTERPRETED_REGEXP
 #include "src/interpreter-irregexp.h"
 #else  // V8_INTERPRETED_REGEXP
 #include "src/macro-assembler.h"
-#include "src/code.h"
 #if V8_TARGET_ARCH_ARM
-#include "src/arm/assembler-arm.h"
+#include "src/arm/assembler-arm.h"  // NOLINT
 #include "src/arm/macro-assembler-arm.h"
 #include "src/arm/regexp-macro-assembler-arm.h"
 #endif
@@ -58,6 +58,11 @@
 #include "src/mips/macro-assembler-mips.h"
 #include "src/mips/regexp-macro-assembler-mips.h"
 #endif
+#if V8_TARGET_ARCH_MIPS64
+#include "src/mips64/assembler-mips64.h"
+#include "src/mips64/macro-assembler-mips64.h"
+#include "src/mips64/regexp-macro-assembler-mips64.h"
+#endif
 #if V8_TARGET_ARCH_X64
 #include "src/x64/assembler-x64.h"
 #include "src/x64/macro-assembler-x64.h"
@@ -80,7 +85,6 @@
 
 
 static bool CheckParse(const char* input) {
-  V8::Initialize(NULL);
   v8::HandleScope scope(CcTest::isolate());
   Zone zone(CcTest::i_isolate());
   FlatStringReader reader(CcTest::i_isolate(), CStrVector(input));
@@ -90,8 +94,7 @@
 }
 
 
-static SmartArrayPointer<const char> Parse(const char* input) {
-  V8::Initialize(NULL);
+static void CheckParseEq(const char* input, const char* expected) {
   v8::HandleScope scope(CcTest::isolate());
   Zone zone(CcTest::i_isolate());
   FlatStringReader reader(CcTest::i_isolate(), CStrVector(input));
@@ -100,13 +103,13 @@
       &reader, false, &result, &zone));
   CHECK(result.tree != NULL);
   CHECK(result.error.is_null());
-  SmartArrayPointer<const char> output = result.tree->ToString(&zone);
-  return output;
+  OStringStream os;
+  result.tree->Print(os, &zone);
+  CHECK_EQ(expected, os.c_str());
 }
 
 
 static bool CheckSimple(const char* input) {
-  V8::Initialize(NULL);
   v8::HandleScope scope(CcTest::isolate());
   Zone zone(CcTest::i_isolate());
   FlatStringReader reader(CcTest::i_isolate(), CStrVector(input));
@@ -125,7 +128,6 @@
 
 
 static MinMaxPair CheckMinMaxMatch(const char* input) {
-  V8::Initialize(NULL);
   v8::HandleScope scope(CcTest::isolate());
   Zone zone(CcTest::i_isolate());
   FlatStringReader reader(CcTest::i_isolate(), CStrVector(input));
@@ -142,7 +144,6 @@
 
 
 #define CHECK_PARSE_ERROR(input) CHECK(!CheckParse(input))
-#define CHECK_PARSE_EQ(input, expected) CHECK_EQ(expected, Parse(input).get())
 #define CHECK_SIMPLE(input, simple) CHECK_EQ(simple, CheckSimple(input));
 #define CHECK_MIN_MAX(input, min, max)                                         \
   { MinMaxPair min_max = CheckMinMaxMatch(input);                              \
@@ -151,130 +152,131 @@
   }
 
 TEST(Parser) {
-  V8::Initialize(NULL);
-
   CHECK_PARSE_ERROR("?");
 
-  CHECK_PARSE_EQ("abc", "'abc'");
-  CHECK_PARSE_EQ("", "%");
-  CHECK_PARSE_EQ("abc|def", "(| 'abc' 'def')");
-  CHECK_PARSE_EQ("abc|def|ghi", "(| 'abc' 'def' 'ghi')");
-  CHECK_PARSE_EQ("^xxx$", "(: @^i 'xxx' @$i)");
-  CHECK_PARSE_EQ("ab\\b\\d\\bcd", "(: 'ab' @b [0-9] @b 'cd')");
-  CHECK_PARSE_EQ("\\w|\\d", "(| [0-9 A-Z _ a-z] [0-9])");
-  CHECK_PARSE_EQ("a*", "(# 0 - g 'a')");
-  CHECK_PARSE_EQ("a*?", "(# 0 - n 'a')");
-  CHECK_PARSE_EQ("abc+", "(: 'ab' (# 1 - g 'c'))");
-  CHECK_PARSE_EQ("abc+?", "(: 'ab' (# 1 - n 'c'))");
-  CHECK_PARSE_EQ("xyz?", "(: 'xy' (# 0 1 g 'z'))");
-  CHECK_PARSE_EQ("xyz??", "(: 'xy' (# 0 1 n 'z'))");
-  CHECK_PARSE_EQ("xyz{0,1}", "(: 'xy' (# 0 1 g 'z'))");
-  CHECK_PARSE_EQ("xyz{0,1}?", "(: 'xy' (# 0 1 n 'z'))");
-  CHECK_PARSE_EQ("xyz{93}", "(: 'xy' (# 93 93 g 'z'))");
-  CHECK_PARSE_EQ("xyz{93}?", "(: 'xy' (# 93 93 n 'z'))");
-  CHECK_PARSE_EQ("xyz{1,32}", "(: 'xy' (# 1 32 g 'z'))");
-  CHECK_PARSE_EQ("xyz{1,32}?", "(: 'xy' (# 1 32 n 'z'))");
-  CHECK_PARSE_EQ("xyz{1,}", "(: 'xy' (# 1 - g 'z'))");
-  CHECK_PARSE_EQ("xyz{1,}?", "(: 'xy' (# 1 - n 'z'))");
-  CHECK_PARSE_EQ("a\\fb\\nc\\rd\\te\\vf", "'a\\x0cb\\x0ac\\x0dd\\x09e\\x0bf'");
-  CHECK_PARSE_EQ("a\\nb\\bc", "(: 'a\\x0ab' @b 'c')");
-  CHECK_PARSE_EQ("(?:foo)", "'foo'");
-  CHECK_PARSE_EQ("(?: foo )", "' foo '");
-  CHECK_PARSE_EQ("(foo|bar|baz)", "(^ (| 'foo' 'bar' 'baz'))");
-  CHECK_PARSE_EQ("foo|(bar|baz)|quux", "(| 'foo' (^ (| 'bar' 'baz')) 'quux')");
-  CHECK_PARSE_EQ("foo(?=bar)baz", "(: 'foo' (-> + 'bar') 'baz')");
-  CHECK_PARSE_EQ("foo(?!bar)baz", "(: 'foo' (-> - 'bar') 'baz')");
-  CHECK_PARSE_EQ("()", "(^ %)");
-  CHECK_PARSE_EQ("(?=)", "(-> + %)");
-  CHECK_PARSE_EQ("[]", "^[\\x00-\\uffff]");   // Doesn't compile on windows
-  CHECK_PARSE_EQ("[^]", "[\\x00-\\uffff]");   // \uffff isn't in codepage 1252
-  CHECK_PARSE_EQ("[x]", "[x]");
-  CHECK_PARSE_EQ("[xyz]", "[x y z]");
-  CHECK_PARSE_EQ("[a-zA-Z0-9]", "[a-z A-Z 0-9]");
-  CHECK_PARSE_EQ("[-123]", "[- 1 2 3]");
-  CHECK_PARSE_EQ("[^123]", "^[1 2 3]");
-  CHECK_PARSE_EQ("]", "']'");
-  CHECK_PARSE_EQ("}", "'}'");
-  CHECK_PARSE_EQ("[a-b-c]", "[a-b - c]");
-  CHECK_PARSE_EQ("[\\d]", "[0-9]");
-  CHECK_PARSE_EQ("[x\\dz]", "[x 0-9 z]");
-  CHECK_PARSE_EQ("[\\d-z]", "[0-9 - z]");
-  CHECK_PARSE_EQ("[\\d-\\d]", "[0-9 - 0-9]");
-  CHECK_PARSE_EQ("[z-\\d]", "[z - 0-9]");
+  CheckParseEq("abc", "'abc'");
+  CheckParseEq("", "%");
+  CheckParseEq("abc|def", "(| 'abc' 'def')");
+  CheckParseEq("abc|def|ghi", "(| 'abc' 'def' 'ghi')");
+  CheckParseEq("^xxx$", "(: @^i 'xxx' @$i)");
+  CheckParseEq("ab\\b\\d\\bcd", "(: 'ab' @b [0-9] @b 'cd')");
+  CheckParseEq("\\w|\\d", "(| [0-9 A-Z _ a-z] [0-9])");
+  CheckParseEq("a*", "(# 0 - g 'a')");
+  CheckParseEq("a*?", "(# 0 - n 'a')");
+  CheckParseEq("abc+", "(: 'ab' (# 1 - g 'c'))");
+  CheckParseEq("abc+?", "(: 'ab' (# 1 - n 'c'))");
+  CheckParseEq("xyz?", "(: 'xy' (# 0 1 g 'z'))");
+  CheckParseEq("xyz??", "(: 'xy' (# 0 1 n 'z'))");
+  CheckParseEq("xyz{0,1}", "(: 'xy' (# 0 1 g 'z'))");
+  CheckParseEq("xyz{0,1}?", "(: 'xy' (# 0 1 n 'z'))");
+  CheckParseEq("xyz{93}", "(: 'xy' (# 93 93 g 'z'))");
+  CheckParseEq("xyz{93}?", "(: 'xy' (# 93 93 n 'z'))");
+  CheckParseEq("xyz{1,32}", "(: 'xy' (# 1 32 g 'z'))");
+  CheckParseEq("xyz{1,32}?", "(: 'xy' (# 1 32 n 'z'))");
+  CheckParseEq("xyz{1,}", "(: 'xy' (# 1 - g 'z'))");
+  CheckParseEq("xyz{1,}?", "(: 'xy' (# 1 - n 'z'))");
+  CheckParseEq("a\\fb\\nc\\rd\\te\\vf", "'a\\x0cb\\x0ac\\x0dd\\x09e\\x0bf'");
+  CheckParseEq("a\\nb\\bc", "(: 'a\\x0ab' @b 'c')");
+  CheckParseEq("(?:foo)", "'foo'");
+  CheckParseEq("(?: foo )", "' foo '");
+  CheckParseEq("(foo|bar|baz)", "(^ (| 'foo' 'bar' 'baz'))");
+  CheckParseEq("foo|(bar|baz)|quux", "(| 'foo' (^ (| 'bar' 'baz')) 'quux')");
+  CheckParseEq("foo(?=bar)baz", "(: 'foo' (-> + 'bar') 'baz')");
+  CheckParseEq("foo(?!bar)baz", "(: 'foo' (-> - 'bar') 'baz')");
+  CheckParseEq("()", "(^ %)");
+  CheckParseEq("(?=)", "(-> + %)");
+  CheckParseEq("[]", "^[\\x00-\\uffff]");  // Doesn't compile on windows
+  CheckParseEq("[^]", "[\\x00-\\uffff]");  // \uffff isn't in codepage 1252
+  CheckParseEq("[x]", "[x]");
+  CheckParseEq("[xyz]", "[x y z]");
+  CheckParseEq("[a-zA-Z0-9]", "[a-z A-Z 0-9]");
+  CheckParseEq("[-123]", "[- 1 2 3]");
+  CheckParseEq("[^123]", "^[1 2 3]");
+  CheckParseEq("]", "']'");
+  CheckParseEq("}", "'}'");
+  CheckParseEq("[a-b-c]", "[a-b - c]");
+  CheckParseEq("[\\d]", "[0-9]");
+  CheckParseEq("[x\\dz]", "[x 0-9 z]");
+  CheckParseEq("[\\d-z]", "[0-9 - z]");
+  CheckParseEq("[\\d-\\d]", "[0-9 - 0-9]");
+  CheckParseEq("[z-\\d]", "[z - 0-9]");
   // Control character outside character class.
-  CHECK_PARSE_EQ("\\cj\\cJ\\ci\\cI\\ck\\cK",
-                 "'\\x0a\\x0a\\x09\\x09\\x0b\\x0b'");
-  CHECK_PARSE_EQ("\\c!", "'\\c!'");
-  CHECK_PARSE_EQ("\\c_", "'\\c_'");
-  CHECK_PARSE_EQ("\\c~", "'\\c~'");
-  CHECK_PARSE_EQ("\\c1", "'\\c1'");
+  CheckParseEq("\\cj\\cJ\\ci\\cI\\ck\\cK", "'\\x0a\\x0a\\x09\\x09\\x0b\\x0b'");
+  CheckParseEq("\\c!", "'\\c!'");
+  CheckParseEq("\\c_", "'\\c_'");
+  CheckParseEq("\\c~", "'\\c~'");
+  CheckParseEq("\\c1", "'\\c1'");
   // Control character inside character class.
-  CHECK_PARSE_EQ("[\\c!]", "[\\ c !]");
-  CHECK_PARSE_EQ("[\\c_]", "[\\x1f]");
-  CHECK_PARSE_EQ("[\\c~]", "[\\ c ~]");
-  CHECK_PARSE_EQ("[\\ca]", "[\\x01]");
-  CHECK_PARSE_EQ("[\\cz]", "[\\x1a]");
-  CHECK_PARSE_EQ("[\\cA]", "[\\x01]");
-  CHECK_PARSE_EQ("[\\cZ]", "[\\x1a]");
-  CHECK_PARSE_EQ("[\\c1]", "[\\x11]");
+  CheckParseEq("[\\c!]", "[\\ c !]");
+  CheckParseEq("[\\c_]", "[\\x1f]");
+  CheckParseEq("[\\c~]", "[\\ c ~]");
+  CheckParseEq("[\\ca]", "[\\x01]");
+  CheckParseEq("[\\cz]", "[\\x1a]");
+  CheckParseEq("[\\cA]", "[\\x01]");
+  CheckParseEq("[\\cZ]", "[\\x1a]");
+  CheckParseEq("[\\c1]", "[\\x11]");
 
-  CHECK_PARSE_EQ("[a\\]c]", "[a ] c]");
-  CHECK_PARSE_EQ("\\[\\]\\{\\}\\(\\)\\%\\^\\#\\ ", "'[]{}()%^# '");
-  CHECK_PARSE_EQ("[\\[\\]\\{\\}\\(\\)\\%\\^\\#\\ ]", "[[ ] { } ( ) % ^ #  ]");
-  CHECK_PARSE_EQ("\\0", "'\\x00'");
-  CHECK_PARSE_EQ("\\8", "'8'");
-  CHECK_PARSE_EQ("\\9", "'9'");
-  CHECK_PARSE_EQ("\\11", "'\\x09'");
-  CHECK_PARSE_EQ("\\11a", "'\\x09a'");
-  CHECK_PARSE_EQ("\\011", "'\\x09'");
-  CHECK_PARSE_EQ("\\00011", "'\\x0011'");
-  CHECK_PARSE_EQ("\\118", "'\\x098'");
-  CHECK_PARSE_EQ("\\111", "'I'");
-  CHECK_PARSE_EQ("\\1111", "'I1'");
-  CHECK_PARSE_EQ("(x)(x)(x)\\1", "(: (^ 'x') (^ 'x') (^ 'x') (<- 1))");
-  CHECK_PARSE_EQ("(x)(x)(x)\\2", "(: (^ 'x') (^ 'x') (^ 'x') (<- 2))");
-  CHECK_PARSE_EQ("(x)(x)(x)\\3", "(: (^ 'x') (^ 'x') (^ 'x') (<- 3))");
-  CHECK_PARSE_EQ("(x)(x)(x)\\4", "(: (^ 'x') (^ 'x') (^ 'x') '\\x04')");
-  CHECK_PARSE_EQ("(x)(x)(x)\\1*", "(: (^ 'x') (^ 'x') (^ 'x')"
-                               " (# 0 - g (<- 1)))");
-  CHECK_PARSE_EQ("(x)(x)(x)\\2*", "(: (^ 'x') (^ 'x') (^ 'x')"
-                               " (# 0 - g (<- 2)))");
-  CHECK_PARSE_EQ("(x)(x)(x)\\3*", "(: (^ 'x') (^ 'x') (^ 'x')"
-                               " (# 0 - g (<- 3)))");
-  CHECK_PARSE_EQ("(x)(x)(x)\\4*", "(: (^ 'x') (^ 'x') (^ 'x')"
-                               " (# 0 - g '\\x04'))");
-  CHECK_PARSE_EQ("(x)(x)(x)(x)(x)(x)(x)(x)(x)(x)\\10",
-              "(: (^ 'x') (^ 'x') (^ 'x') (^ 'x') (^ 'x') (^ 'x')"
-              " (^ 'x') (^ 'x') (^ 'x') (^ 'x') (<- 10))");
-  CHECK_PARSE_EQ("(x)(x)(x)(x)(x)(x)(x)(x)(x)(x)\\11",
-              "(: (^ 'x') (^ 'x') (^ 'x') (^ 'x') (^ 'x') (^ 'x')"
-              " (^ 'x') (^ 'x') (^ 'x') (^ 'x') '\\x09')");
-  CHECK_PARSE_EQ("(a)\\1", "(: (^ 'a') (<- 1))");
-  CHECK_PARSE_EQ("(a\\1)", "(^ 'a')");
-  CHECK_PARSE_EQ("(\\1a)", "(^ 'a')");
-  CHECK_PARSE_EQ("(?=a)?a", "'a'");
-  CHECK_PARSE_EQ("(?=a){0,10}a", "'a'");
-  CHECK_PARSE_EQ("(?=a){1,10}a", "(: (-> + 'a') 'a')");
-  CHECK_PARSE_EQ("(?=a){9,10}a", "(: (-> + 'a') 'a')");
-  CHECK_PARSE_EQ("(?!a)?a", "'a'");
-  CHECK_PARSE_EQ("\\1(a)", "(^ 'a')");
-  CHECK_PARSE_EQ("(?!(a))\\1", "(: (-> - (^ 'a')) (<- 1))");
-  CHECK_PARSE_EQ("(?!\\1(a\\1)\\1)\\1", "(: (-> - (: (^ 'a') (<- 1))) (<- 1))");
-  CHECK_PARSE_EQ("[\\0]", "[\\x00]");
-  CHECK_PARSE_EQ("[\\11]", "[\\x09]");
-  CHECK_PARSE_EQ("[\\11a]", "[\\x09 a]");
-  CHECK_PARSE_EQ("[\\011]", "[\\x09]");
-  CHECK_PARSE_EQ("[\\00011]", "[\\x00 1 1]");
-  CHECK_PARSE_EQ("[\\118]", "[\\x09 8]");
-  CHECK_PARSE_EQ("[\\111]", "[I]");
-  CHECK_PARSE_EQ("[\\1111]", "[I 1]");
-  CHECK_PARSE_EQ("\\x34", "'\x34'");
-  CHECK_PARSE_EQ("\\x60", "'\x60'");
-  CHECK_PARSE_EQ("\\x3z", "'x3z'");
-  CHECK_PARSE_EQ("\\c", "'\\c'");
-  CHECK_PARSE_EQ("\\u0034", "'\x34'");
-  CHECK_PARSE_EQ("\\u003z", "'u003z'");
-  CHECK_PARSE_EQ("foo[z]*", "(: 'foo' (# 0 - g [z]))");
+  CheckParseEq("[a\\]c]", "[a ] c]");
+  CheckParseEq("\\[\\]\\{\\}\\(\\)\\%\\^\\#\\ ", "'[]{}()%^# '");
+  CheckParseEq("[\\[\\]\\{\\}\\(\\)\\%\\^\\#\\ ]", "[[ ] { } ( ) % ^ #  ]");
+  CheckParseEq("\\0", "'\\x00'");
+  CheckParseEq("\\8", "'8'");
+  CheckParseEq("\\9", "'9'");
+  CheckParseEq("\\11", "'\\x09'");
+  CheckParseEq("\\11a", "'\\x09a'");
+  CheckParseEq("\\011", "'\\x09'");
+  CheckParseEq("\\00011", "'\\x0011'");
+  CheckParseEq("\\118", "'\\x098'");
+  CheckParseEq("\\111", "'I'");
+  CheckParseEq("\\1111", "'I1'");
+  CheckParseEq("(x)(x)(x)\\1", "(: (^ 'x') (^ 'x') (^ 'x') (<- 1))");
+  CheckParseEq("(x)(x)(x)\\2", "(: (^ 'x') (^ 'x') (^ 'x') (<- 2))");
+  CheckParseEq("(x)(x)(x)\\3", "(: (^ 'x') (^ 'x') (^ 'x') (<- 3))");
+  CheckParseEq("(x)(x)(x)\\4", "(: (^ 'x') (^ 'x') (^ 'x') '\\x04')");
+  CheckParseEq("(x)(x)(x)\\1*",
+               "(: (^ 'x') (^ 'x') (^ 'x')"
+               " (# 0 - g (<- 1)))");
+  CheckParseEq("(x)(x)(x)\\2*",
+               "(: (^ 'x') (^ 'x') (^ 'x')"
+               " (# 0 - g (<- 2)))");
+  CheckParseEq("(x)(x)(x)\\3*",
+               "(: (^ 'x') (^ 'x') (^ 'x')"
+               " (# 0 - g (<- 3)))");
+  CheckParseEq("(x)(x)(x)\\4*",
+               "(: (^ 'x') (^ 'x') (^ 'x')"
+               " (# 0 - g '\\x04'))");
+  CheckParseEq("(x)(x)(x)(x)(x)(x)(x)(x)(x)(x)\\10",
+               "(: (^ 'x') (^ 'x') (^ 'x') (^ 'x') (^ 'x') (^ 'x')"
+               " (^ 'x') (^ 'x') (^ 'x') (^ 'x') (<- 10))");
+  CheckParseEq("(x)(x)(x)(x)(x)(x)(x)(x)(x)(x)\\11",
+               "(: (^ 'x') (^ 'x') (^ 'x') (^ 'x') (^ 'x') (^ 'x')"
+               " (^ 'x') (^ 'x') (^ 'x') (^ 'x') '\\x09')");
+  CheckParseEq("(a)\\1", "(: (^ 'a') (<- 1))");
+  CheckParseEq("(a\\1)", "(^ 'a')");
+  CheckParseEq("(\\1a)", "(^ 'a')");
+  CheckParseEq("(?=a)?a", "'a'");
+  CheckParseEq("(?=a){0,10}a", "'a'");
+  CheckParseEq("(?=a){1,10}a", "(: (-> + 'a') 'a')");
+  CheckParseEq("(?=a){9,10}a", "(: (-> + 'a') 'a')");
+  CheckParseEq("(?!a)?a", "'a'");
+  CheckParseEq("\\1(a)", "(^ 'a')");
+  CheckParseEq("(?!(a))\\1", "(: (-> - (^ 'a')) (<- 1))");
+  CheckParseEq("(?!\\1(a\\1)\\1)\\1", "(: (-> - (: (^ 'a') (<- 1))) (<- 1))");
+  CheckParseEq("[\\0]", "[\\x00]");
+  CheckParseEq("[\\11]", "[\\x09]");
+  CheckParseEq("[\\11a]", "[\\x09 a]");
+  CheckParseEq("[\\011]", "[\\x09]");
+  CheckParseEq("[\\00011]", "[\\x00 1 1]");
+  CheckParseEq("[\\118]", "[\\x09 8]");
+  CheckParseEq("[\\111]", "[I]");
+  CheckParseEq("[\\1111]", "[I 1]");
+  CheckParseEq("\\x34", "'\x34'");
+  CheckParseEq("\\x60", "'\x60'");
+  CheckParseEq("\\x3z", "'x3z'");
+  CheckParseEq("\\c", "'\\c'");
+  CheckParseEq("\\u0034", "'\x34'");
+  CheckParseEq("\\u003z", "'u003z'");
+  CheckParseEq("foo[z]*", "(: 'foo' (# 0 - g [z]))");
 
   CHECK_SIMPLE("", false);
   CHECK_SIMPLE("a", true);
@@ -322,22 +324,22 @@
   CHECK_SIMPLE("(?!a)?a\\1", false);
   CHECK_SIMPLE("(?:(?=a))a\\1", false);
 
-  CHECK_PARSE_EQ("a{}", "'a{}'");
-  CHECK_PARSE_EQ("a{,}", "'a{,}'");
-  CHECK_PARSE_EQ("a{", "'a{'");
-  CHECK_PARSE_EQ("a{z}", "'a{z}'");
-  CHECK_PARSE_EQ("a{1z}", "'a{1z}'");
-  CHECK_PARSE_EQ("a{12z}", "'a{12z}'");
-  CHECK_PARSE_EQ("a{12,", "'a{12,'");
-  CHECK_PARSE_EQ("a{12,3b", "'a{12,3b'");
-  CHECK_PARSE_EQ("{}", "'{}'");
-  CHECK_PARSE_EQ("{,}", "'{,}'");
-  CHECK_PARSE_EQ("{", "'{'");
-  CHECK_PARSE_EQ("{z}", "'{z}'");
-  CHECK_PARSE_EQ("{1z}", "'{1z}'");
-  CHECK_PARSE_EQ("{12z}", "'{12z}'");
-  CHECK_PARSE_EQ("{12,", "'{12,'");
-  CHECK_PARSE_EQ("{12,3b", "'{12,3b'");
+  CheckParseEq("a{}", "'a{}'");
+  CheckParseEq("a{,}", "'a{,}'");
+  CheckParseEq("a{", "'a{'");
+  CheckParseEq("a{z}", "'a{z}'");
+  CheckParseEq("a{1z}", "'a{1z}'");
+  CheckParseEq("a{12z}", "'a{12z}'");
+  CheckParseEq("a{12,", "'a{12,'");
+  CheckParseEq("a{12,3b", "'a{12,3b'");
+  CheckParseEq("{}", "'{}'");
+  CheckParseEq("{,}", "'{,}'");
+  CheckParseEq("{", "'{'");
+  CheckParseEq("{z}", "'{z}'");
+  CheckParseEq("{1z}", "'{1z}'");
+  CheckParseEq("{12z}", "'{12z}'");
+  CheckParseEq("{12,", "'{12,'");
+  CheckParseEq("{12,3b", "'{12,3b'");
 
   CHECK_MIN_MAX("a", 1, 1);
   CHECK_MIN_MAX("abc", 3, 3);
@@ -391,15 +393,14 @@
 
 
 TEST(ParserRegression) {
-  CHECK_PARSE_EQ("[A-Z$-][x]", "(! [A-Z $ -] [x])");
-  CHECK_PARSE_EQ("a{3,4*}", "(: 'a{3,' (# 0 - g '4') '}')");
-  CHECK_PARSE_EQ("{", "'{'");
-  CHECK_PARSE_EQ("a|", "(| 'a' %)");
+  CheckParseEq("[A-Z$-][x]", "(! [A-Z $ -] [x])");
+  CheckParseEq("a{3,4*}", "(: 'a{3,' (# 0 - g '4') '}')");
+  CheckParseEq("{", "'{'");
+  CheckParseEq("a|", "(| 'a' %)");
 }
 
 static void ExpectError(const char* input,
                         const char* expected) {
-  V8::Initialize(NULL);
   v8::HandleScope scope(CcTest::isolate());
   Zone zone(CcTest::i_isolate());
   FlatStringReader reader(CcTest::i_isolate(), CStrVector(input));
@@ -434,13 +435,11 @@
   // Check that we don't allow more than kMaxCapture captures
   const int kMaxCaptures = 1 << 16;  // Must match RegExpParser::kMaxCaptures.
   const char* kTooManyCaptures = "Too many captures";
-  HeapStringAllocator allocator;
-  StringStream accumulator(&allocator);
+  OStringStream os;
   for (int i = 0; i <= kMaxCaptures; i++) {
-    accumulator.Add("()");
+    os << "()";
   }
-  SmartArrayPointer<const char> many_captures(accumulator.ToCString());
-  ExpectError(many_captures.get(), kTooManyCaptures);
+  ExpectError(os.c_str(), kTooManyCaptures);
 }
 
 
@@ -488,7 +487,6 @@
 
 
 TEST(CharacterClassEscapes) {
-  v8::internal::V8::Initialize(NULL);
   TestCharacterClassEscapes('.', IsRegExpNewline);
   TestCharacterClassEscapes('d', IsDigit);
   TestCharacterClassEscapes('D', NotDigit);
@@ -499,11 +497,8 @@
 }
 
 
-static RegExpNode* Compile(const char* input,
-                           bool multiline,
-                           bool is_ascii,
+static RegExpNode* Compile(const char* input, bool multiline, bool is_one_byte,
                            Zone* zone) {
-  V8::Initialize(NULL);
   Isolate* isolate = CcTest::i_isolate();
   FlatStringReader reader(isolate, CStrVector(input));
   RegExpCompileData compile_data;
@@ -514,25 +509,17 @@
       NewStringFromUtf8(CStrVector(input)).ToHandleChecked();
   Handle<String> sample_subject =
       isolate->factory()->NewStringFromUtf8(CStrVector("")).ToHandleChecked();
-  RegExpEngine::Compile(&compile_data,
-                        false,
-                        false,
-                        multiline,
-                        pattern,
-                        sample_subject,
-                        is_ascii,
-                        zone);
+  RegExpEngine::Compile(&compile_data, false, false, multiline, false, pattern,
+                        sample_subject, is_one_byte, zone);
   return compile_data.node;
 }
 
 
-static void Execute(const char* input,
-                    bool multiline,
-                    bool is_ascii,
+static void Execute(const char* input, bool multiline, bool is_one_byte,
                     bool dot_output = false) {
   v8::HandleScope scope(CcTest::isolate());
   Zone zone(CcTest::i_isolate());
-  RegExpNode* node = Compile(input, multiline, is_ascii, &zone);
+  RegExpNode* node = Compile(input, multiline, is_one_byte, &zone);
   USE(node);
 #ifdef DEBUG
   if (dot_output) {
@@ -568,7 +555,6 @@
 
 
 TEST(SplayTreeSimple) {
-  v8::internal::V8::Initialize(NULL);
   static const unsigned kLimit = 1000;
   Zone zone(CcTest::i_isolate());
   ZoneSplayTree<TestConfig> tree(&zone);
@@ -621,7 +607,6 @@
 
 
 TEST(DispatchTableConstruction) {
-  v8::internal::V8::Initialize(NULL);
   // Initialize test data.
   static const int kLimit = 1000;
   static const int kRangeCount = 8;
@@ -668,11 +653,11 @@
   // Enable possessive quantifier syntax.
   FLAG_regexp_possessive_quantifier = true;
 
-  CHECK_PARSE_EQ("a*+", "(# 0 - p 'a')");
-  CHECK_PARSE_EQ("a++", "(# 1 - p 'a')");
-  CHECK_PARSE_EQ("a?+", "(# 0 1 p 'a')");
-  CHECK_PARSE_EQ("a{10,20}+", "(# 10 20 p 'a')");
-  CHECK_PARSE_EQ("za{10,20}+b", "(: 'z' (# 10 20 p 'a') 'b')");
+  CheckParseEq("a*+", "(# 0 - p 'a')");
+  CheckParseEq("a++", "(# 1 - p 'a')");
+  CheckParseEq("a?+", "(# 0 1 p 'a')");
+  CheckParseEq("a{10,20}+", "(# 10 20 p 'a')");
+  CheckParseEq("za{10,20}+b", "(: 'z' (# 10 20 p 'a') 'b')");
 
   // Disable possessive quantifier syntax.
   FLAG_regexp_possessive_quantifier = false;
@@ -703,6 +688,8 @@
 typedef RegExpMacroAssemblerARM64 ArchRegExpMacroAssembler;
 #elif V8_TARGET_ARCH_MIPS
 typedef RegExpMacroAssemblerMIPS ArchRegExpMacroAssembler;
+#elif V8_TARGET_ARCH_MIPS64
+typedef RegExpMacroAssemblerMIPS ArchRegExpMacroAssembler;
 #elif V8_TARGET_ARCH_X87
 typedef RegExpMacroAssemblerX87 ArchRegExpMacroAssembler;
 #endif
@@ -748,16 +735,16 @@
   Factory* factory = isolate->factory();
   Zone zone(isolate);
 
-  ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::ASCII, 4, &zone);
+  ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::LATIN1, 4, &zone);
 
   m.Succeed();
 
-  Handle<String> source = factory->NewStringFromStaticAscii("");
+  Handle<String> source = factory->NewStringFromStaticChars("");
   Handle<Object> code_object = m.GetCode(source);
   Handle<Code> code = Handle<Code>::cast(code_object);
 
   int captures[4] = {42, 37, 87, 117};
-  Handle<String> input = factory->NewStringFromStaticAscii("foofoo");
+  Handle<String> input = factory->NewStringFromStaticChars("foofoo");
   Handle<SeqOneByteString> seq_input = Handle<SeqOneByteString>::cast(input);
   const byte* start_adr =
       reinterpret_cast<const byte*>(seq_input->GetCharsAddress());
@@ -785,7 +772,7 @@
   Factory* factory = isolate->factory();
   Zone zone(isolate);
 
-  ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::ASCII, 4, &zone);
+  ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::LATIN1, 4, &zone);
 
   Label fail, backtrack;
   m.PushBacktrack(&fail);
@@ -806,12 +793,12 @@
   m.Bind(&fail);
   m.Fail();
 
-  Handle<String> source = factory->NewStringFromStaticAscii("^foo");
+  Handle<String> source = factory->NewStringFromStaticChars("^foo");
   Handle<Object> code_object = m.GetCode(source);
   Handle<Code> code = Handle<Code>::cast(code_object);
 
   int captures[4] = {42, 37, 87, 117};
-  Handle<String> input = factory->NewStringFromStaticAscii("foofoo");
+  Handle<String> input = factory->NewStringFromStaticChars("foofoo");
   Handle<SeqOneByteString> seq_input = Handle<SeqOneByteString>::cast(input);
   Address start_adr = seq_input->GetCharsAddress();
 
@@ -829,7 +816,7 @@
   CHECK_EQ(-1, captures[2]);
   CHECK_EQ(-1, captures[3]);
 
-  input = factory->NewStringFromStaticAscii("barbarbar");
+  input = factory->NewStringFromStaticChars("barbarbar");
   seq_input = Handle<SeqOneByteString>::cast(input);
   start_adr = seq_input->GetCharsAddress();
 
@@ -872,7 +859,7 @@
   m.Bind(&fail);
   m.Fail();
 
-  Handle<String> source = factory->NewStringFromStaticAscii("^foo");
+  Handle<String> source = factory->NewStringFromStaticChars("^foo");
   Handle<Object> code_object = m.GetCode(source);
   Handle<Code> code = Handle<Code>::cast(code_object);
 
@@ -923,7 +910,7 @@
   Factory* factory = isolate->factory();
   Zone zone(isolate);
 
-  ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::ASCII, 0, &zone);
+  ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::LATIN1, 0, &zone);
 
   Label fail;
   Label backtrack;
@@ -936,11 +923,11 @@
   m.Bind(&backtrack);
   m.Fail();
 
-  Handle<String> source = factory->NewStringFromStaticAscii("..........");
+  Handle<String> source = factory->NewStringFromStaticChars("..........");
   Handle<Object> code_object = m.GetCode(source);
   Handle<Code> code = Handle<Code>::cast(code_object);
 
-  Handle<String> input = factory->NewStringFromStaticAscii("foofoo");
+  Handle<String> input = factory->NewStringFromStaticChars("foofoo");
   Handle<SeqOneByteString> seq_input = Handle<SeqOneByteString>::cast(input);
   Address start_adr = seq_input->GetCharsAddress();
 
@@ -956,14 +943,14 @@
 }
 
 
-TEST(MacroAssemblerNativeBackReferenceASCII) {
+TEST(MacroAssemblerNativeBackReferenceLATIN1) {
   v8::V8::Initialize();
   ContextInitializer initializer;
   Isolate* isolate = CcTest::i_isolate();
   Factory* factory = isolate->factory();
   Zone zone(isolate);
 
-  ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::ASCII, 4, &zone);
+  ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::LATIN1, 4, &zone);
 
   m.WriteCurrentPositionToRegister(0, 0);
   m.AdvanceCurrentPosition(2);
@@ -980,11 +967,11 @@
   m.Bind(&missing_match);
   m.Fail();
 
-  Handle<String> source = factory->NewStringFromStaticAscii("^(..)..\1");
+  Handle<String> source = factory->NewStringFromStaticChars("^(..)..\1");
   Handle<Object> code_object = m.GetCode(source);
   Handle<Code> code = Handle<Code>::cast(code_object);
 
-  Handle<String> input = factory->NewStringFromStaticAscii("fooofo");
+  Handle<String> input = factory->NewStringFromStaticChars("fooofo");
   Handle<SeqOneByteString> seq_input = Handle<SeqOneByteString>::cast(input);
   Address start_adr = seq_input->GetCharsAddress();
 
@@ -1029,7 +1016,7 @@
   m.Bind(&missing_match);
   m.Fail();
 
-  Handle<String> source = factory->NewStringFromStaticAscii("^(..)..\1");
+  Handle<String> source = factory->NewStringFromStaticChars("^(..)..\1");
   Handle<Object> code_object = m.GetCode(source);
   Handle<Code> code = Handle<Code>::cast(code_object);
 
@@ -1064,7 +1051,7 @@
   Factory* factory = isolate->factory();
   Zone zone(isolate);
 
-  ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::ASCII, 0, &zone);
+  ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::LATIN1, 0, &zone);
 
   Label not_at_start, newline, fail;
   m.CheckNotAtStart(&not_at_start);
@@ -1087,11 +1074,11 @@
   m.CheckNotCharacter('b', &fail);
   m.Succeed();
 
-  Handle<String> source = factory->NewStringFromStaticAscii("(^f|ob)");
+  Handle<String> source = factory->NewStringFromStaticChars("(^f|ob)");
   Handle<Object> code_object = m.GetCode(source);
   Handle<Code> code = Handle<Code>::cast(code_object);
 
-  Handle<String> input = factory->NewStringFromStaticAscii("foobar");
+  Handle<String> input = factory->NewStringFromStaticChars("foobar");
   Handle<SeqOneByteString> seq_input = Handle<SeqOneByteString>::cast(input);
   Address start_adr = seq_input->GetCharsAddress();
 
@@ -1123,7 +1110,7 @@
   Factory* factory = isolate->factory();
   Zone zone(isolate);
 
-  ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::ASCII, 4, &zone);
+  ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::LATIN1, 4, &zone);
 
   Label fail, succ;
 
@@ -1148,12 +1135,11 @@
   m.Succeed();
 
   Handle<String> source =
-      factory->NewStringFromStaticAscii("^(abc)\1\1(?!\1)...(?!\1)");
+      factory->NewStringFromStaticChars("^(abc)\1\1(?!\1)...(?!\1)");
   Handle<Object> code_object = m.GetCode(source);
   Handle<Code> code = Handle<Code>::cast(code_object);
 
-  Handle<String> input =
-      factory->NewStringFromStaticAscii("aBcAbCABCxYzab");
+  Handle<String> input = factory->NewStringFromStaticChars("aBcAbCABCxYzab");
   Handle<SeqOneByteString> seq_input = Handle<SeqOneByteString>::cast(input);
   Address start_adr = seq_input->GetCharsAddress();
 
@@ -1182,7 +1168,7 @@
   Factory* factory = isolate->factory();
   Zone zone(isolate);
 
-  ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::ASCII, 6, &zone);
+  ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::LATIN1, 6, &zone);
 
   uc16 foo_chars[3] = {'f', 'o', 'o'};
   Vector<const uc16> foo(foo_chars, 3);
@@ -1248,14 +1234,12 @@
   m.Bind(&fail);
   m.Fail();
 
-  Handle<String> source =
-      factory->NewStringFromStaticAscii("<loop test>");
+  Handle<String> source = factory->NewStringFromStaticChars("<loop test>");
   Handle<Object> code_object = m.GetCode(source);
   Handle<Code> code = Handle<Code>::cast(code_object);
 
   // String long enough for test (content doesn't matter).
-  Handle<String> input =
-      factory->NewStringFromStaticAscii("foofoofoofoofoo");
+  Handle<String> input = factory->NewStringFromStaticChars("foofoofoofoofoo");
   Handle<SeqOneByteString> seq_input = Handle<SeqOneByteString>::cast(input);
   Address start_adr = seq_input->GetCharsAddress();
 
@@ -1285,7 +1269,7 @@
   Factory* factory = isolate->factory();
   Zone zone(isolate);
 
-  ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::ASCII, 0, &zone);
+  ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::LATIN1, 0, &zone);
 
   Label loop;
   m.Bind(&loop);
@@ -1293,13 +1277,12 @@
   m.GoTo(&loop);
 
   Handle<String> source =
-      factory->NewStringFromStaticAscii("<stack overflow test>");
+      factory->NewStringFromStaticChars("<stack overflow test>");
   Handle<Object> code_object = m.GetCode(source);
   Handle<Code> code = Handle<Code>::cast(code_object);
 
   // String long enough for test (content doesn't matter).
-  Handle<String> input =
-      factory->NewStringFromStaticAscii("dummy");
+  Handle<String> input = factory->NewStringFromStaticChars("dummy");
   Handle<SeqOneByteString> seq_input = Handle<SeqOneByteString>::cast(input);
   Address start_adr = seq_input->GetCharsAddress();
 
@@ -1324,7 +1307,7 @@
   Factory* factory = isolate->factory();
   Zone zone(isolate);
 
-  ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::ASCII, 2, &zone);
+  ArchRegExpMacroAssembler m(NativeRegExpMacroAssembler::LATIN1, 2, &zone);
 
   // At least 2048, to ensure the allocated space for registers
   // span one full page.
@@ -1340,13 +1323,12 @@
   m.Succeed();
 
   Handle<String> source =
-      factory->NewStringFromStaticAscii("<huge register space test>");
+      factory->NewStringFromStaticChars("<huge register space test>");
   Handle<Object> code_object = m.GetCode(source);
   Handle<Code> code = Handle<Code>::cast(code_object);
 
   // String long enough for test (content doesn't matter).
-  Handle<String> input =
-      factory->NewStringFromStaticAscii("sample text");
+  Handle<String> input = factory->NewStringFromStaticChars("sample text");
   Handle<SeqOneByteString> seq_input = Handle<SeqOneByteString>::cast(input);
   Address start_adr = seq_input->GetCharsAddress();
 
@@ -1369,7 +1351,6 @@
 #else  // V8_INTERPRETED_REGEXP
 
 TEST(MacroAssembler) {
-  V8::Initialize(NULL);
   byte codes[1024];
   Zone zone(CcTest::i_isolate());
   RegExpMacroAssemblerIrregexp m(Vector<byte>(codes, 1024), &zone);
@@ -1408,7 +1389,7 @@
   Factory* factory = isolate->factory();
   HandleScope scope(isolate);
 
-  Handle<String> source = factory->NewStringFromStaticAscii("^f(o)o");
+  Handle<String> source = factory->NewStringFromStaticChars("^f(o)o");
   Handle<ByteArray> array = Handle<ByteArray>::cast(m.GetCode(source));
   int captures[5];
 
@@ -1435,7 +1416,6 @@
 
 
 TEST(AddInverseToTable) {
-  v8::internal::V8::Initialize(NULL);
   static const int kLimit = 1000;
   static const int kRangeCount = 16;
   for (int t = 0; t < 10; t++) {
@@ -1595,7 +1575,6 @@
 
 
 TEST(CharacterRangeCaseIndependence) {
-  v8::internal::V8::Initialize(NULL);
   TestSimpleRangeCaseIndependence(CharacterRange::Singleton('a'),
                                   CharacterRange::Singleton('A'));
   TestSimpleRangeCaseIndependence(CharacterRange::Singleton('z'),
@@ -1637,7 +1616,6 @@
 
 
 TEST(CharClassDifference) {
-  v8::internal::V8::Initialize(NULL);
   Zone zone(CcTest::i_isolate());
   ZoneList<CharacterRange>* base =
       new(&zone) ZoneList<CharacterRange>(1, &zone);
@@ -1665,7 +1643,6 @@
 
 
 TEST(CanonicalizeCharacterSets) {
-  v8::internal::V8::Initialize(NULL);
   Zone zone(CcTest::i_isolate());
   ZoneList<CharacterRange>* list =
       new(&zone) ZoneList<CharacterRange>(4, &zone);
@@ -1675,26 +1652,26 @@
   list->Add(CharacterRange(30, 40), &zone);
   list->Add(CharacterRange(50, 60), &zone);
   set.Canonicalize();
-  ASSERT_EQ(3, list->length());
-  ASSERT_EQ(10, list->at(0).from());
-  ASSERT_EQ(20, list->at(0).to());
-  ASSERT_EQ(30, list->at(1).from());
-  ASSERT_EQ(40, list->at(1).to());
-  ASSERT_EQ(50, list->at(2).from());
-  ASSERT_EQ(60, list->at(2).to());
+  DCHECK_EQ(3, list->length());
+  DCHECK_EQ(10, list->at(0).from());
+  DCHECK_EQ(20, list->at(0).to());
+  DCHECK_EQ(30, list->at(1).from());
+  DCHECK_EQ(40, list->at(1).to());
+  DCHECK_EQ(50, list->at(2).from());
+  DCHECK_EQ(60, list->at(2).to());
 
   list->Rewind(0);
   list->Add(CharacterRange(10, 20), &zone);
   list->Add(CharacterRange(50, 60), &zone);
   list->Add(CharacterRange(30, 40), &zone);
   set.Canonicalize();
-  ASSERT_EQ(3, list->length());
-  ASSERT_EQ(10, list->at(0).from());
-  ASSERT_EQ(20, list->at(0).to());
-  ASSERT_EQ(30, list->at(1).from());
-  ASSERT_EQ(40, list->at(1).to());
-  ASSERT_EQ(50, list->at(2).from());
-  ASSERT_EQ(60, list->at(2).to());
+  DCHECK_EQ(3, list->length());
+  DCHECK_EQ(10, list->at(0).from());
+  DCHECK_EQ(20, list->at(0).to());
+  DCHECK_EQ(30, list->at(1).from());
+  DCHECK_EQ(40, list->at(1).to());
+  DCHECK_EQ(50, list->at(2).from());
+  DCHECK_EQ(60, list->at(2).to());
 
   list->Rewind(0);
   list->Add(CharacterRange(30, 40), &zone);
@@ -1703,31 +1680,30 @@
   list->Add(CharacterRange(100, 100), &zone);
   list->Add(CharacterRange(1, 1), &zone);
   set.Canonicalize();
-  ASSERT_EQ(5, list->length());
-  ASSERT_EQ(1, list->at(0).from());
-  ASSERT_EQ(1, list->at(0).to());
-  ASSERT_EQ(10, list->at(1).from());
-  ASSERT_EQ(20, list->at(1).to());
-  ASSERT_EQ(25, list->at(2).from());
-  ASSERT_EQ(25, list->at(2).to());
-  ASSERT_EQ(30, list->at(3).from());
-  ASSERT_EQ(40, list->at(3).to());
-  ASSERT_EQ(100, list->at(4).from());
-  ASSERT_EQ(100, list->at(4).to());
+  DCHECK_EQ(5, list->length());
+  DCHECK_EQ(1, list->at(0).from());
+  DCHECK_EQ(1, list->at(0).to());
+  DCHECK_EQ(10, list->at(1).from());
+  DCHECK_EQ(20, list->at(1).to());
+  DCHECK_EQ(25, list->at(2).from());
+  DCHECK_EQ(25, list->at(2).to());
+  DCHECK_EQ(30, list->at(3).from());
+  DCHECK_EQ(40, list->at(3).to());
+  DCHECK_EQ(100, list->at(4).from());
+  DCHECK_EQ(100, list->at(4).to());
 
   list->Rewind(0);
   list->Add(CharacterRange(10, 19), &zone);
   list->Add(CharacterRange(21, 30), &zone);
   list->Add(CharacterRange(20, 20), &zone);
   set.Canonicalize();
-  ASSERT_EQ(1, list->length());
-  ASSERT_EQ(10, list->at(0).from());
-  ASSERT_EQ(30, list->at(0).to());
+  DCHECK_EQ(1, list->length());
+  DCHECK_EQ(10, list->at(0).from());
+  DCHECK_EQ(30, list->at(0).to());
 }
 
 
 TEST(CharacterRangeMerge) {
-  v8::internal::V8::Initialize(NULL);
   Zone zone(CcTest::i_isolate());
   ZoneList<CharacterRange> l1(4, &zone);
   ZoneList<CharacterRange> l2(4, &zone);
@@ -1805,8 +1781,8 @@
     offset += 9;
   }
 
-  ASSERT(CharacterRange::IsCanonical(&l1));
-  ASSERT(CharacterRange::IsCanonical(&l2));
+  DCHECK(CharacterRange::IsCanonical(&l1));
+  DCHECK(CharacterRange::IsCanonical(&l2));
 
   ZoneList<CharacterRange> first_only(4, &zone);
   ZoneList<CharacterRange> second_only(4, &zone);
@@ -1815,6 +1791,5 @@
 
 
 TEST(Graph) {
-  V8::Initialize(NULL);
   Execute("\\b\\w+\\b", false, true, true);
 }
diff --git a/test/cctest/test-representation.cc b/test/cctest/test-representation.cc
index a3fec6e..fc1f531 100644
--- a/test/cctest/test-representation.cc
+++ b/test/cctest/test-representation.cc
@@ -25,10 +25,11 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-#include "src/types.h"
-#include "src/property-details.h"
 #include "test/cctest/cctest.h"
 
+#include "src/property-details.h"
+#include "src/types.h"
+
 using namespace v8::internal;
 
 
diff --git a/test/cctest/test-semaphore.cc b/test/cctest/test-semaphore.cc
deleted file mode 100644
index 027f7cd..0000000
--- a/test/cctest/test-semaphore.cc
+++ /dev/null
@@ -1,155 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <stdlib.h>
-
-#include "src/v8.h"
-
-#include "src/platform.h"
-#include "test/cctest/cctest.h"
-
-
-using namespace ::v8::internal;
-
-
-class WaitAndSignalThread V8_FINAL : public Thread {
- public:
-  explicit WaitAndSignalThread(Semaphore* semaphore)
-      : Thread("WaitAndSignalThread"), semaphore_(semaphore) {}
-  virtual ~WaitAndSignalThread() {}
-
-  virtual void Run() V8_OVERRIDE {
-    for (int n = 0; n < 1000; ++n) {
-      semaphore_->Wait();
-      bool result = semaphore_->WaitFor(TimeDelta::FromMicroseconds(1));
-      ASSERT(!result);
-      USE(result);
-      semaphore_->Signal();
-    }
-  }
-
- private:
-  Semaphore* semaphore_;
-};
-
-
-TEST(WaitAndSignal) {
-  Semaphore semaphore(0);
-  WaitAndSignalThread t1(&semaphore);
-  WaitAndSignalThread t2(&semaphore);
-
-  t1.Start();
-  t2.Start();
-
-  // Make something available.
-  semaphore.Signal();
-
-  t1.Join();
-  t2.Join();
-
-  semaphore.Wait();
-
-  bool result = semaphore.WaitFor(TimeDelta::FromMicroseconds(1));
-  ASSERT(!result);
-  USE(result);
-}
-
-
-TEST(WaitFor) {
-  bool ok;
-  Semaphore semaphore(0);
-
-  // Semaphore not signalled - timeout.
-  ok = semaphore.WaitFor(TimeDelta::FromMicroseconds(0));
-  CHECK(!ok);
-  ok = semaphore.WaitFor(TimeDelta::FromMicroseconds(100));
-  CHECK(!ok);
-  ok = semaphore.WaitFor(TimeDelta::FromMicroseconds(1000));
-  CHECK(!ok);
-
-  // Semaphore signalled - no timeout.
-  semaphore.Signal();
-  ok = semaphore.WaitFor(TimeDelta::FromMicroseconds(0));
-  CHECK(ok);
-  semaphore.Signal();
-  ok = semaphore.WaitFor(TimeDelta::FromMicroseconds(100));
-  CHECK(ok);
-  semaphore.Signal();
-  ok = semaphore.WaitFor(TimeDelta::FromMicroseconds(1000));
-  CHECK(ok);
-}
-
-
-static const char alphabet[] = "XKOAD";
-static const int kAlphabetSize = sizeof(alphabet) - 1;
-static const int kBufferSize = 4096;  // GCD(buffer size, alphabet size) = 1
-static char buffer[kBufferSize];
-static const int kDataSize = kBufferSize * kAlphabetSize * 10;
-
-static Semaphore free_space(kBufferSize);
-static Semaphore used_space(0);
-
-
-class ProducerThread V8_FINAL : public Thread {
- public:
-  ProducerThread() : Thread("ProducerThread") {}
-  virtual ~ProducerThread() {}
-
-  virtual void Run() V8_OVERRIDE {
-    for (int n = 0; n < kDataSize; ++n) {
-      free_space.Wait();
-      buffer[n % kBufferSize] = alphabet[n % kAlphabetSize];
-      used_space.Signal();
-    }
-  }
-};
-
-
-class ConsumerThread V8_FINAL : public Thread {
- public:
-  ConsumerThread() : Thread("ConsumerThread") {}
-  virtual ~ConsumerThread() {}
-
-  virtual void Run() V8_OVERRIDE {
-    for (int n = 0; n < kDataSize; ++n) {
-      used_space.Wait();
-      ASSERT_EQ(static_cast<int>(alphabet[n % kAlphabetSize]),
-                static_cast<int>(buffer[n % kBufferSize]));
-      free_space.Signal();
-    }
-  }
-};
-
-
-TEST(ProducerConsumer) {
-  ProducerThread producer_thread;
-  ConsumerThread consumer_thread;
-  producer_thread.Start();
-  consumer_thread.Start();
-  producer_thread.Join();
-  consumer_thread.Join();
-}
diff --git a/test/cctest/test-serialize.cc b/test/cctest/test-serialize.cc
index f320598..94b400e 100644
--- a/test/cctest/test-serialize.cc
+++ b/test/cctest/test-serialize.cc
@@ -32,55 +32,19 @@
 #include "src/v8.h"
 
 #include "src/bootstrapper.h"
+#include "src/compilation-cache.h"
 #include "src/debug.h"
-#include "src/ic-inl.h"
+#include "src/heap/spaces.h"
 #include "src/natives.h"
 #include "src/objects.h"
 #include "src/runtime.h"
 #include "src/scopeinfo.h"
 #include "src/serialize.h"
 #include "src/snapshot.h"
-#include "src/spaces.h"
 #include "test/cctest/cctest.h"
 
 using namespace v8::internal;
 
-static const unsigned kCounters = 256;
-static int local_counters[kCounters];
-static const char* local_counter_names[kCounters];
-
-
-static unsigned CounterHash(const char* s) {
-  unsigned hash = 0;
-  while (*++s) {
-    hash |= hash << 5;
-    hash += *s;
-  }
-  return hash;
-}
-
-
-// Callback receiver to track counters in test.
-static int* counter_function(const char* name) {
-  unsigned hash = CounterHash(name) % kCounters;
-  unsigned original_hash = hash;
-  USE(original_hash);
-  while (true) {
-    if (local_counter_names[hash] == name) {
-      return &local_counters[hash];
-    }
-    if (local_counter_names[hash] == 0) {
-      local_counter_names[hash] = name;
-      return &local_counters[hash];
-    }
-    if (strcmp(local_counter_names[hash], name) == 0) {
-      return &local_counters[hash];
-    }
-    hash = (hash + 1) % kCounters;
-    ASSERT(hash != original_hash);  // Hash table has been filled up.
-  }
-}
-
 
 template <class T>
 static Address AddressOf(T id) {
@@ -101,7 +65,6 @@
 
 TEST(ExternalReferenceEncoder) {
   Isolate* isolate = CcTest::i_isolate();
-  isolate->stats_table()->SetCounterFunction(counter_function);
   v8::V8::Initialize();
 
   ExternalReferenceEncoder encoder(isolate);
@@ -109,34 +72,29 @@
            Encode(encoder, Builtins::kArrayCode));
   CHECK_EQ(make_code(v8::internal::RUNTIME_FUNCTION, Runtime::kAbort),
            Encode(encoder, Runtime::kAbort));
-  ExternalReference total_compile_size =
-      ExternalReference(isolate->counters()->total_compile_size());
-  CHECK_EQ(make_code(STATS_COUNTER, Counters::k_total_compile_size),
-           encoder.Encode(total_compile_size.address()));
   ExternalReference stack_limit_address =
       ExternalReference::address_of_stack_limit(isolate);
-  CHECK_EQ(make_code(UNCLASSIFIED, 4),
+  CHECK_EQ(make_code(UNCLASSIFIED, 2),
            encoder.Encode(stack_limit_address.address()));
   ExternalReference real_stack_limit_address =
       ExternalReference::address_of_real_stack_limit(isolate);
-  CHECK_EQ(make_code(UNCLASSIFIED, 5),
-           encoder.Encode(real_stack_limit_address.address()));
-  CHECK_EQ(make_code(UNCLASSIFIED, 16),
-           encoder.Encode(ExternalReference::debug_break(isolate).address()));
-  CHECK_EQ(make_code(UNCLASSIFIED, 10),
-           encoder.Encode(
-               ExternalReference::new_space_start(isolate).address()));
   CHECK_EQ(make_code(UNCLASSIFIED, 3),
-           encoder.Encode(
-               ExternalReference::roots_array_start(isolate).address()));
-  CHECK_EQ(make_code(UNCLASSIFIED, 52),
+           encoder.Encode(real_stack_limit_address.address()));
+  CHECK_EQ(make_code(UNCLASSIFIED, 8),
+           encoder.Encode(ExternalReference::debug_break(isolate).address()));
+  CHECK_EQ(
+      make_code(UNCLASSIFIED, 4),
+      encoder.Encode(ExternalReference::new_space_start(isolate).address()));
+  CHECK_EQ(
+      make_code(UNCLASSIFIED, 1),
+      encoder.Encode(ExternalReference::roots_array_start(isolate).address()));
+  CHECK_EQ(make_code(UNCLASSIFIED, 34),
            encoder.Encode(ExternalReference::cpu_features().address()));
 }
 
 
 TEST(ExternalReferenceDecoder) {
   Isolate* isolate = CcTest::i_isolate();
-  isolate->stats_table()->SetCounterFunction(counter_function);
   v8::V8::Initialize();
 
   ExternalReferenceDecoder decoder(isolate);
@@ -145,27 +103,21 @@
   CHECK_EQ(AddressOf(Runtime::kAbort),
            decoder.Decode(make_code(v8::internal::RUNTIME_FUNCTION,
                                     Runtime::kAbort)));
-  ExternalReference total_compile_size =
-      ExternalReference(isolate->counters()->total_compile_size());
-  CHECK_EQ(total_compile_size.address(),
-           decoder.Decode(
-               make_code(STATS_COUNTER,
-                         Counters::k_total_compile_size)));
   CHECK_EQ(ExternalReference::address_of_stack_limit(isolate).address(),
-           decoder.Decode(make_code(UNCLASSIFIED, 4)));
+           decoder.Decode(make_code(UNCLASSIFIED, 2)));
   CHECK_EQ(ExternalReference::address_of_real_stack_limit(isolate).address(),
-           decoder.Decode(make_code(UNCLASSIFIED, 5)));
+           decoder.Decode(make_code(UNCLASSIFIED, 3)));
   CHECK_EQ(ExternalReference::debug_break(isolate).address(),
-           decoder.Decode(make_code(UNCLASSIFIED, 16)));
+           decoder.Decode(make_code(UNCLASSIFIED, 8)));
   CHECK_EQ(ExternalReference::new_space_start(isolate).address(),
-           decoder.Decode(make_code(UNCLASSIFIED, 10)));
+           decoder.Decode(make_code(UNCLASSIFIED, 4)));
 }
 
 
 class FileByteSink : public SnapshotByteSink {
  public:
   explicit FileByteSink(const char* snapshot_file) {
-    fp_ = OS::FOpen(snapshot_file, "wb");
+    fp_ = v8::base::OS::FOpen(snapshot_file, "wb");
     file_name_ = snapshot_file;
     if (fp_ == NULL) {
       PrintF("Unable to write to snapshot file \"%s\"\n", snapshot_file);
@@ -177,9 +129,9 @@
       fclose(fp_);
     }
   }
-  virtual void Put(int byte, const char* description) {
+  virtual void Put(byte b, const char* description) {
     if (fp_ != NULL) {
-      fputc(byte, fp_);
+      fputc(b, fp_);
     }
   }
   virtual int Position() {
@@ -211,7 +163,7 @@
   int file_name_length = StrLength(file_name_) + 10;
   Vector<char> name = Vector<char>::New(file_name_length + 1);
   SNPrintF(name, "%s.size", file_name_);
-  FILE* fp = OS::FOpen(name.start(), "w");
+  FILE* fp = v8::base::OS::FOpen(name.start(), "w");
   name.Dispose();
   fprintf(fp, "new %d\n", new_space_used);
   fprintf(fp, "pointer %d\n", pointer_space_used);
@@ -242,40 +194,42 @@
 }
 
 
-static void Serialize() {
+static void Serialize(v8::Isolate* isolate) {
   // We have to create one context.  One reason for this is so that the builtins
   // can be loaded from v8natives.js and their addresses can be processed.  This
   // will clear the pending fixups array, which would otherwise contain GC roots
   // that would confuse the serialization/deserialization process.
-  v8::Isolate* isolate = CcTest::isolate();
+  v8::Isolate::Scope isolate_scope(isolate);
   {
     v8::HandleScope scope(isolate);
     v8::Context::New(isolate);
   }
 
-  Isolate* internal_isolate = CcTest::i_isolate();
+  Isolate* internal_isolate = reinterpret_cast<Isolate*>(isolate);
   internal_isolate->heap()->CollectAllGarbage(Heap::kNoGCFlags, "serialize");
   WriteToFile(internal_isolate, FLAG_testing_serialization_file);
 }
 
 
 // Test that the whole heap can be serialized.
-TEST(Serialize) {
+UNINITIALIZED_TEST(Serialize) {
   if (!Snapshot::HaveASnapshotToStartFrom()) {
-    CcTest::i_isolate()->enable_serializer();
-    v8::V8::Initialize();
-    Serialize();
+    v8::Isolate::CreateParams params;
+    params.enable_serializer = true;
+    v8::Isolate* isolate = v8::Isolate::New(params);
+    Serialize(isolate);
   }
 }
 
 
 // Test that heap serialization is non-destructive.
-TEST(SerializeTwice) {
+UNINITIALIZED_TEST(SerializeTwice) {
   if (!Snapshot::HaveASnapshotToStartFrom()) {
-    CcTest::i_isolate()->enable_serializer();
-    v8::V8::Initialize();
-    Serialize();
-    Serialize();
+    v8::Isolate::CreateParams params;
+    params.enable_serializer = true;
+    v8::Isolate* isolate = v8::Isolate::New(params);
+    Serialize(isolate);
+    Serialize(isolate);
   }
 }
 
@@ -283,172 +237,13 @@
 //----------------------------------------------------------------------------
 // Tests that the heap can be deserialized.
 
-static void Deserialize() {
-  CHECK(Snapshot::Initialize(FLAG_testing_serialization_file));
-}
-
-
-static void SanityCheck() {
-  Isolate* isolate = CcTest::i_isolate();
-  v8::HandleScope scope(CcTest::isolate());
-#ifdef VERIFY_HEAP
-  CcTest::heap()->Verify();
-#endif
-  CHECK(isolate->global_object()->IsJSObject());
-  CHECK(isolate->native_context()->IsContext());
-  CHECK(CcTest::heap()->string_table()->IsStringTable());
-  isolate->factory()->InternalizeOneByteString(STATIC_ASCII_VECTOR("Empty"));
-}
-
-
-DEPENDENT_TEST(Deserialize, Serialize) {
-  // The serialize-deserialize tests only work if the VM is built without
-  // serialization.  That doesn't matter.  We don't need to be able to
-  // serialize a snapshot in a VM that is booted from a snapshot.
-  if (!Snapshot::HaveASnapshotToStartFrom()) {
-    v8::Isolate* isolate = CcTest::isolate();
-    v8::HandleScope scope(isolate);
-    Deserialize();
-
-    v8::Local<v8::Context> env = v8::Context::New(isolate);
-    env->Enter();
-
-    SanityCheck();
-  }
-}
-
-
-DEPENDENT_TEST(DeserializeFromSecondSerialization, SerializeTwice) {
-  if (!Snapshot::HaveASnapshotToStartFrom()) {
-    v8::Isolate* isolate = CcTest::isolate();
-    v8::HandleScope scope(isolate);
-    Deserialize();
-
-    v8::Local<v8::Context> env = v8::Context::New(isolate);
-    env->Enter();
-
-    SanityCheck();
-  }
-}
-
-
-DEPENDENT_TEST(DeserializeAndRunScript2, Serialize) {
-  if (!Snapshot::HaveASnapshotToStartFrom()) {
-    v8::Isolate* isolate = CcTest::isolate();
-    v8::HandleScope scope(isolate);
-    Deserialize();
-
-    v8::Local<v8::Context> env = v8::Context::New(isolate);
-    env->Enter();
-
-    const char* c_source = "\"1234\".length";
-    v8::Local<v8::String> source = v8::String::NewFromUtf8(isolate, c_source);
-    v8::Local<v8::Script> script = v8::Script::Compile(source);
-    CHECK_EQ(4, script->Run()->Int32Value());
-  }
-}
-
-
-DEPENDENT_TEST(DeserializeFromSecondSerializationAndRunScript2,
-               SerializeTwice) {
-  if (!Snapshot::HaveASnapshotToStartFrom()) {
-    v8::Isolate* isolate = CcTest::isolate();
-    v8::HandleScope scope(isolate);
-    Deserialize();
-
-    v8::Local<v8::Context> env = v8::Context::New(isolate);
-    env->Enter();
-
-    const char* c_source = "\"1234\".length";
-    v8::Local<v8::String> source = v8::String::NewFromUtf8(isolate, c_source);
-    v8::Local<v8::Script> script = v8::Script::Compile(source);
-    CHECK_EQ(4, script->Run()->Int32Value());
-  }
-}
-
-
-TEST(PartialSerialization) {
-  if (!Snapshot::HaveASnapshotToStartFrom()) {
-    Isolate* isolate = CcTest::i_isolate();
-    CcTest::i_isolate()->enable_serializer();
-    v8::V8::Initialize();
-    v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
-    Heap* heap = isolate->heap();
-
-    v8::Persistent<v8::Context> env;
-    {
-      HandleScope scope(isolate);
-      env.Reset(v8_isolate, v8::Context::New(v8_isolate));
-    }
-    ASSERT(!env.IsEmpty());
-    {
-      v8::HandleScope handle_scope(v8_isolate);
-      v8::Local<v8::Context>::New(v8_isolate, env)->Enter();
-    }
-    // Make sure all builtin scripts are cached.
-    { HandleScope scope(isolate);
-      for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
-        isolate->bootstrapper()->NativesSourceLookup(i);
-      }
-    }
-    heap->CollectAllGarbage(Heap::kNoGCFlags);
-    heap->CollectAllGarbage(Heap::kNoGCFlags);
-
-    Object* raw_foo;
-    {
-      v8::HandleScope handle_scope(v8_isolate);
-      v8::Local<v8::String> foo = v8::String::NewFromUtf8(v8_isolate, "foo");
-      ASSERT(!foo.IsEmpty());
-      raw_foo = *(v8::Utils::OpenHandle(*foo));
-    }
-
-    int file_name_length = StrLength(FLAG_testing_serialization_file) + 10;
-    Vector<char> startup_name = Vector<char>::New(file_name_length + 1);
-    SNPrintF(startup_name, "%s.startup", FLAG_testing_serialization_file);
-
-    {
-      v8::HandleScope handle_scope(v8_isolate);
-      v8::Local<v8::Context>::New(v8_isolate, env)->Exit();
-    }
-    env.Reset();
-
-    FileByteSink startup_sink(startup_name.start());
-    StartupSerializer startup_serializer(isolate, &startup_sink);
-    startup_serializer.SerializeStrongReferences();
-
-    FileByteSink partial_sink(FLAG_testing_serialization_file);
-    PartialSerializer p_ser(isolate, &startup_serializer, &partial_sink);
-    p_ser.Serialize(&raw_foo);
-    startup_serializer.SerializeWeakReferences();
-
-    partial_sink.WriteSpaceUsed(
-        p_ser.CurrentAllocationAddress(NEW_SPACE),
-        p_ser.CurrentAllocationAddress(OLD_POINTER_SPACE),
-        p_ser.CurrentAllocationAddress(OLD_DATA_SPACE),
-        p_ser.CurrentAllocationAddress(CODE_SPACE),
-        p_ser.CurrentAllocationAddress(MAP_SPACE),
-        p_ser.CurrentAllocationAddress(CELL_SPACE),
-        p_ser.CurrentAllocationAddress(PROPERTY_CELL_SPACE));
-
-    startup_sink.WriteSpaceUsed(
-        startup_serializer.CurrentAllocationAddress(NEW_SPACE),
-        startup_serializer.CurrentAllocationAddress(OLD_POINTER_SPACE),
-        startup_serializer.CurrentAllocationAddress(OLD_DATA_SPACE),
-        startup_serializer.CurrentAllocationAddress(CODE_SPACE),
-        startup_serializer.CurrentAllocationAddress(MAP_SPACE),
-        startup_serializer.CurrentAllocationAddress(CELL_SPACE),
-        startup_serializer.CurrentAllocationAddress(PROPERTY_CELL_SPACE));
-    startup_name.Dispose();
-  }
-}
-
 
 static void ReserveSpaceForSnapshot(Deserializer* deserializer,
                                     const char* file_name) {
   int file_name_length = StrLength(file_name) + 10;
   Vector<char> name = Vector<char>::New(file_name_length + 1);
   SNPrintF(name, "%s.size", file_name);
-  FILE* fp = OS::FOpen(name.start(), "r");
+  FILE* fp = v8::base::OS::FOpen(name.start(), "r");
   name.Dispose();
   int new_size, pointer_size, data_size, code_size, map_size, cell_size,
       property_cell_size;
@@ -478,154 +273,370 @@
 }
 
 
-DEPENDENT_TEST(PartialDeserialization, PartialSerialization) {
-  if (!Snapshot::IsEnabled()) {
-    int file_name_length = StrLength(FLAG_testing_serialization_file) + 10;
-    Vector<char> startup_name = Vector<char>::New(file_name_length + 1);
-    SNPrintF(startup_name, "%s.startup", FLAG_testing_serialization_file);
+v8::Isolate* InitializeFromFile(const char* snapshot_file) {
+  int len;
+  byte* str = ReadBytes(snapshot_file, &len);
+  if (!str) return NULL;
+  v8::Isolate* v8_isolate = NULL;
+  {
+    SnapshotByteSource source(str, len);
+    Deserializer deserializer(&source);
+    ReserveSpaceForSnapshot(&deserializer, snapshot_file);
+    Isolate* isolate = Isolate::NewForTesting();
+    v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
+    v8::Isolate::Scope isolate_scope(v8_isolate);
+    isolate->Init(&deserializer);
+  }
+  DeleteArray(str);
+  return v8_isolate;
+}
 
-    CHECK(Snapshot::Initialize(startup_name.start()));
-    startup_name.Dispose();
 
-    const char* file_name = FLAG_testing_serialization_file;
+static v8::Isolate* Deserialize() {
+  v8::Isolate* isolate = InitializeFromFile(FLAG_testing_serialization_file);
+  CHECK(isolate);
+  return isolate;
+}
 
-    int snapshot_size = 0;
-    byte* snapshot = ReadBytes(file_name, &snapshot_size);
 
-    Isolate* isolate = CcTest::i_isolate();
-    Object* root;
+static void SanityCheck(v8::Isolate* v8_isolate) {
+  Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
+  v8::HandleScope scope(v8_isolate);
+#ifdef VERIFY_HEAP
+  isolate->heap()->Verify();
+#endif
+  CHECK(isolate->global_object()->IsJSObject());
+  CHECK(isolate->native_context()->IsContext());
+  CHECK(isolate->heap()->string_table()->IsStringTable());
+  isolate->factory()->InternalizeOneByteString(STATIC_CHAR_VECTOR("Empty"));
+}
+
+
+UNINITIALIZED_DEPENDENT_TEST(Deserialize, Serialize) {
+  // The serialize-deserialize tests only work if the VM is built without
+  // serialization.  That doesn't matter.  We don't need to be able to
+  // serialize a snapshot in a VM that is booted from a snapshot.
+  if (!Snapshot::HaveASnapshotToStartFrom()) {
+    v8::Isolate* isolate = Deserialize();
     {
-      SnapshotByteSource source(snapshot, snapshot_size);
-      Deserializer deserializer(&source);
-      ReserveSpaceForSnapshot(&deserializer, file_name);
-      deserializer.DeserializePartial(isolate, &root);
-      CHECK(root->IsString());
-    }
-    HandleScope handle_scope(isolate);
-    Handle<Object> root_handle(root, isolate);
+      v8::HandleScope handle_scope(isolate);
+      v8::Isolate::Scope isolate_scope(isolate);
 
+      v8::Local<v8::Context> env = v8::Context::New(isolate);
+      env->Enter();
 
-    Object* root2;
-    {
-      SnapshotByteSource source(snapshot, snapshot_size);
-      Deserializer deserializer(&source);
-      ReserveSpaceForSnapshot(&deserializer, file_name);
-      deserializer.DeserializePartial(isolate, &root2);
-      CHECK(root2->IsString());
-      CHECK(*root_handle == root2);
+      SanityCheck(isolate);
     }
+    isolate->Dispose();
   }
 }
 
 
-TEST(ContextSerialization) {
+UNINITIALIZED_DEPENDENT_TEST(DeserializeFromSecondSerialization,
+                             SerializeTwice) {
   if (!Snapshot::HaveASnapshotToStartFrom()) {
-    Isolate* isolate = CcTest::i_isolate();
-    CcTest::i_isolate()->enable_serializer();
-    v8::V8::Initialize();
-    v8::Isolate* v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
-    Heap* heap = isolate->heap();
+    v8::Isolate* isolate = Deserialize();
+    {
+      v8::Isolate::Scope isolate_scope(isolate);
+      v8::HandleScope handle_scope(isolate);
 
-    v8::Persistent<v8::Context> env;
-    {
-      HandleScope scope(isolate);
-      env.Reset(v8_isolate, v8::Context::New(v8_isolate));
+      v8::Local<v8::Context> env = v8::Context::New(isolate);
+      env->Enter();
+
+      SanityCheck(isolate);
     }
-    ASSERT(!env.IsEmpty());
+    isolate->Dispose();
+  }
+}
+
+
+UNINITIALIZED_DEPENDENT_TEST(DeserializeAndRunScript2, Serialize) {
+  if (!Snapshot::HaveASnapshotToStartFrom()) {
+    v8::Isolate* isolate = Deserialize();
     {
-      v8::HandleScope handle_scope(v8_isolate);
-      v8::Local<v8::Context>::New(v8_isolate, env)->Enter();
+      v8::Isolate::Scope isolate_scope(isolate);
+      v8::HandleScope handle_scope(isolate);
+
+
+      v8::Local<v8::Context> env = v8::Context::New(isolate);
+      env->Enter();
+
+      const char* c_source = "\"1234\".length";
+      v8::Local<v8::String> source = v8::String::NewFromUtf8(isolate, c_source);
+      v8::Local<v8::Script> script = v8::Script::Compile(source);
+      CHECK_EQ(4, script->Run()->Int32Value());
     }
-    // Make sure all builtin scripts are cached.
-    { HandleScope scope(isolate);
-      for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
-        isolate->bootstrapper()->NativesSourceLookup(i);
+    isolate->Dispose();
+  }
+}
+
+
+UNINITIALIZED_DEPENDENT_TEST(DeserializeFromSecondSerializationAndRunScript2,
+                             SerializeTwice) {
+  if (!Snapshot::HaveASnapshotToStartFrom()) {
+    v8::Isolate* isolate = Deserialize();
+    {
+      v8::Isolate::Scope isolate_scope(isolate);
+      v8::HandleScope handle_scope(isolate);
+
+      v8::Local<v8::Context> env = v8::Context::New(isolate);
+      env->Enter();
+
+      const char* c_source = "\"1234\".length";
+      v8::Local<v8::String> source = v8::String::NewFromUtf8(isolate, c_source);
+      v8::Local<v8::Script> script = v8::Script::Compile(source);
+      CHECK_EQ(4, script->Run()->Int32Value());
+    }
+    isolate->Dispose();
+  }
+}
+
+
+UNINITIALIZED_TEST(PartialSerialization) {
+  if (!Snapshot::HaveASnapshotToStartFrom()) {
+    v8::Isolate::CreateParams params;
+    params.enable_serializer = true;
+    v8::Isolate* v8_isolate = v8::Isolate::New(params);
+    Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
+    v8_isolate->Enter();
+    {
+      Heap* heap = isolate->heap();
+
+      v8::Persistent<v8::Context> env;
+      {
+        HandleScope scope(isolate);
+        env.Reset(v8_isolate, v8::Context::New(v8_isolate));
+      }
+      DCHECK(!env.IsEmpty());
+      {
+        v8::HandleScope handle_scope(v8_isolate);
+        v8::Local<v8::Context>::New(v8_isolate, env)->Enter();
+      }
+      // Make sure all builtin scripts are cached.
+      {
+        HandleScope scope(isolate);
+        for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
+          isolate->bootstrapper()->NativesSourceLookup(i);
+        }
+      }
+      heap->CollectAllGarbage(Heap::kNoGCFlags);
+      heap->CollectAllGarbage(Heap::kNoGCFlags);
+
+      Object* raw_foo;
+      {
+        v8::HandleScope handle_scope(v8_isolate);
+        v8::Local<v8::String> foo = v8::String::NewFromUtf8(v8_isolate, "foo");
+        DCHECK(!foo.IsEmpty());
+        raw_foo = *(v8::Utils::OpenHandle(*foo));
+      }
+
+      int file_name_length = StrLength(FLAG_testing_serialization_file) + 10;
+      Vector<char> startup_name = Vector<char>::New(file_name_length + 1);
+      SNPrintF(startup_name, "%s.startup", FLAG_testing_serialization_file);
+
+      {
+        v8::HandleScope handle_scope(v8_isolate);
+        v8::Local<v8::Context>::New(v8_isolate, env)->Exit();
+      }
+      env.Reset();
+
+      FileByteSink startup_sink(startup_name.start());
+      StartupSerializer startup_serializer(isolate, &startup_sink);
+      startup_serializer.SerializeStrongReferences();
+
+      FileByteSink partial_sink(FLAG_testing_serialization_file);
+      PartialSerializer p_ser(isolate, &startup_serializer, &partial_sink);
+      p_ser.Serialize(&raw_foo);
+      startup_serializer.SerializeWeakReferences();
+
+      partial_sink.WriteSpaceUsed(
+          p_ser.CurrentAllocationAddress(NEW_SPACE),
+          p_ser.CurrentAllocationAddress(OLD_POINTER_SPACE),
+          p_ser.CurrentAllocationAddress(OLD_DATA_SPACE),
+          p_ser.CurrentAllocationAddress(CODE_SPACE),
+          p_ser.CurrentAllocationAddress(MAP_SPACE),
+          p_ser.CurrentAllocationAddress(CELL_SPACE),
+          p_ser.CurrentAllocationAddress(PROPERTY_CELL_SPACE));
+
+      startup_sink.WriteSpaceUsed(
+          startup_serializer.CurrentAllocationAddress(NEW_SPACE),
+          startup_serializer.CurrentAllocationAddress(OLD_POINTER_SPACE),
+          startup_serializer.CurrentAllocationAddress(OLD_DATA_SPACE),
+          startup_serializer.CurrentAllocationAddress(CODE_SPACE),
+          startup_serializer.CurrentAllocationAddress(MAP_SPACE),
+          startup_serializer.CurrentAllocationAddress(CELL_SPACE),
+          startup_serializer.CurrentAllocationAddress(PROPERTY_CELL_SPACE));
+      startup_name.Dispose();
+    }
+    v8_isolate->Exit();
+    v8_isolate->Dispose();
+  }
+}
+
+
+UNINITIALIZED_DEPENDENT_TEST(PartialDeserialization, PartialSerialization) {
+  if (!Snapshot::HaveASnapshotToStartFrom()) {
+    int file_name_length = StrLength(FLAG_testing_serialization_file) + 10;
+    Vector<char> startup_name = Vector<char>::New(file_name_length + 1);
+    SNPrintF(startup_name, "%s.startup", FLAG_testing_serialization_file);
+
+    v8::Isolate* v8_isolate = InitializeFromFile(startup_name.start());
+    CHECK(v8_isolate);
+    startup_name.Dispose();
+    {
+      v8::Isolate::Scope isolate_scope(v8_isolate);
+
+      const char* file_name = FLAG_testing_serialization_file;
+
+      int snapshot_size = 0;
+      byte* snapshot = ReadBytes(file_name, &snapshot_size);
+
+      Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
+      Object* root;
+      {
+        SnapshotByteSource source(snapshot, snapshot_size);
+        Deserializer deserializer(&source);
+        ReserveSpaceForSnapshot(&deserializer, file_name);
+        deserializer.DeserializePartial(isolate, &root);
+        CHECK(root->IsString());
+      }
+      HandleScope handle_scope(isolate);
+      Handle<Object> root_handle(root, isolate);
+
+
+      Object* root2;
+      {
+        SnapshotByteSource source(snapshot, snapshot_size);
+        Deserializer deserializer(&source);
+        ReserveSpaceForSnapshot(&deserializer, file_name);
+        deserializer.DeserializePartial(isolate, &root2);
+        CHECK(root2->IsString());
+        CHECK(*root_handle == root2);
       }
     }
-    // If we don't do this then we end up with a stray root pointing at the
-    // context even after we have disposed of env.
-    heap->CollectAllGarbage(Heap::kNoGCFlags);
-
-    int file_name_length = StrLength(FLAG_testing_serialization_file) + 10;
-    Vector<char> startup_name = Vector<char>::New(file_name_length + 1);
-    SNPrintF(startup_name, "%s.startup", FLAG_testing_serialization_file);
-
-    {
-      v8::HandleScope handle_scope(v8_isolate);
-      v8::Local<v8::Context>::New(v8_isolate, env)->Exit();
-    }
-
-    i::Object* raw_context = *v8::Utils::OpenPersistent(env);
-
-    env.Reset();
-
-    FileByteSink startup_sink(startup_name.start());
-    StartupSerializer startup_serializer(isolate, &startup_sink);
-    startup_serializer.SerializeStrongReferences();
-
-    FileByteSink partial_sink(FLAG_testing_serialization_file);
-    PartialSerializer p_ser(isolate, &startup_serializer, &partial_sink);
-    p_ser.Serialize(&raw_context);
-    startup_serializer.SerializeWeakReferences();
-
-    partial_sink.WriteSpaceUsed(
-        p_ser.CurrentAllocationAddress(NEW_SPACE),
-        p_ser.CurrentAllocationAddress(OLD_POINTER_SPACE),
-        p_ser.CurrentAllocationAddress(OLD_DATA_SPACE),
-        p_ser.CurrentAllocationAddress(CODE_SPACE),
-        p_ser.CurrentAllocationAddress(MAP_SPACE),
-        p_ser.CurrentAllocationAddress(CELL_SPACE),
-        p_ser.CurrentAllocationAddress(PROPERTY_CELL_SPACE));
-
-    startup_sink.WriteSpaceUsed(
-        startup_serializer.CurrentAllocationAddress(NEW_SPACE),
-        startup_serializer.CurrentAllocationAddress(OLD_POINTER_SPACE),
-        startup_serializer.CurrentAllocationAddress(OLD_DATA_SPACE),
-        startup_serializer.CurrentAllocationAddress(CODE_SPACE),
-        startup_serializer.CurrentAllocationAddress(MAP_SPACE),
-        startup_serializer.CurrentAllocationAddress(CELL_SPACE),
-        startup_serializer.CurrentAllocationAddress(PROPERTY_CELL_SPACE));
-    startup_name.Dispose();
+    v8_isolate->Dispose();
   }
 }
 
 
-DEPENDENT_TEST(ContextDeserialization, ContextSerialization) {
+UNINITIALIZED_TEST(ContextSerialization) {
+  if (!Snapshot::HaveASnapshotToStartFrom()) {
+    v8::Isolate::CreateParams params;
+    params.enable_serializer = true;
+    v8::Isolate* v8_isolate = v8::Isolate::New(params);
+    Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
+    Heap* heap = isolate->heap();
+    {
+      v8::Isolate::Scope isolate_scope(v8_isolate);
+
+      v8::Persistent<v8::Context> env;
+      {
+        HandleScope scope(isolate);
+        env.Reset(v8_isolate, v8::Context::New(v8_isolate));
+      }
+      DCHECK(!env.IsEmpty());
+      {
+        v8::HandleScope handle_scope(v8_isolate);
+        v8::Local<v8::Context>::New(v8_isolate, env)->Enter();
+      }
+      // Make sure all builtin scripts are cached.
+      {
+        HandleScope scope(isolate);
+        for (int i = 0; i < Natives::GetBuiltinsCount(); i++) {
+          isolate->bootstrapper()->NativesSourceLookup(i);
+        }
+      }
+      // If we don't do this then we end up with a stray root pointing at the
+      // context even after we have disposed of env.
+      heap->CollectAllGarbage(Heap::kNoGCFlags);
+
+      int file_name_length = StrLength(FLAG_testing_serialization_file) + 10;
+      Vector<char> startup_name = Vector<char>::New(file_name_length + 1);
+      SNPrintF(startup_name, "%s.startup", FLAG_testing_serialization_file);
+
+      {
+        v8::HandleScope handle_scope(v8_isolate);
+        v8::Local<v8::Context>::New(v8_isolate, env)->Exit();
+      }
+
+      i::Object* raw_context = *v8::Utils::OpenPersistent(env);
+
+      env.Reset();
+
+      FileByteSink startup_sink(startup_name.start());
+      StartupSerializer startup_serializer(isolate, &startup_sink);
+      startup_serializer.SerializeStrongReferences();
+
+      FileByteSink partial_sink(FLAG_testing_serialization_file);
+      PartialSerializer p_ser(isolate, &startup_serializer, &partial_sink);
+      p_ser.Serialize(&raw_context);
+      startup_serializer.SerializeWeakReferences();
+
+      partial_sink.WriteSpaceUsed(
+          p_ser.CurrentAllocationAddress(NEW_SPACE),
+          p_ser.CurrentAllocationAddress(OLD_POINTER_SPACE),
+          p_ser.CurrentAllocationAddress(OLD_DATA_SPACE),
+          p_ser.CurrentAllocationAddress(CODE_SPACE),
+          p_ser.CurrentAllocationAddress(MAP_SPACE),
+          p_ser.CurrentAllocationAddress(CELL_SPACE),
+          p_ser.CurrentAllocationAddress(PROPERTY_CELL_SPACE));
+
+      startup_sink.WriteSpaceUsed(
+          startup_serializer.CurrentAllocationAddress(NEW_SPACE),
+          startup_serializer.CurrentAllocationAddress(OLD_POINTER_SPACE),
+          startup_serializer.CurrentAllocationAddress(OLD_DATA_SPACE),
+          startup_serializer.CurrentAllocationAddress(CODE_SPACE),
+          startup_serializer.CurrentAllocationAddress(MAP_SPACE),
+          startup_serializer.CurrentAllocationAddress(CELL_SPACE),
+          startup_serializer.CurrentAllocationAddress(PROPERTY_CELL_SPACE));
+      startup_name.Dispose();
+    }
+    v8_isolate->Dispose();
+  }
+}
+
+
+UNINITIALIZED_DEPENDENT_TEST(ContextDeserialization, ContextSerialization) {
   if (!Snapshot::HaveASnapshotToStartFrom()) {
     int file_name_length = StrLength(FLAG_testing_serialization_file) + 10;
     Vector<char> startup_name = Vector<char>::New(file_name_length + 1);
     SNPrintF(startup_name, "%s.startup", FLAG_testing_serialization_file);
 
-    CHECK(Snapshot::Initialize(startup_name.start()));
+    v8::Isolate* v8_isolate = InitializeFromFile(startup_name.start());
+    CHECK(v8_isolate);
     startup_name.Dispose();
-
-    const char* file_name = FLAG_testing_serialization_file;
-
-    int snapshot_size = 0;
-    byte* snapshot = ReadBytes(file_name, &snapshot_size);
-
-    Isolate* isolate = CcTest::i_isolate();
-    Object* root;
     {
-      SnapshotByteSource source(snapshot, snapshot_size);
-      Deserializer deserializer(&source);
-      ReserveSpaceForSnapshot(&deserializer, file_name);
-      deserializer.DeserializePartial(isolate, &root);
-      CHECK(root->IsContext());
-    }
-    HandleScope handle_scope(isolate);
-    Handle<Object> root_handle(root, isolate);
+      v8::Isolate::Scope isolate_scope(v8_isolate);
+
+      const char* file_name = FLAG_testing_serialization_file;
+
+      int snapshot_size = 0;
+      byte* snapshot = ReadBytes(file_name, &snapshot_size);
+
+      Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
+      Object* root;
+      {
+        SnapshotByteSource source(snapshot, snapshot_size);
+        Deserializer deserializer(&source);
+        ReserveSpaceForSnapshot(&deserializer, file_name);
+        deserializer.DeserializePartial(isolate, &root);
+        CHECK(root->IsContext());
+      }
+      HandleScope handle_scope(isolate);
+      Handle<Object> root_handle(root, isolate);
 
 
-    Object* root2;
-    {
-      SnapshotByteSource source(snapshot, snapshot_size);
-      Deserializer deserializer(&source);
-      ReserveSpaceForSnapshot(&deserializer, file_name);
-      deserializer.DeserializePartial(isolate, &root2);
-      CHECK(root2->IsContext());
-      CHECK(*root_handle != root2);
+      Object* root2;
+      {
+        SnapshotByteSource source(snapshot, snapshot_size);
+        Deserializer deserializer(&source);
+        ReserveSpaceForSnapshot(&deserializer, file_name);
+        deserializer.DeserializePartial(isolate, &root2);
+        CHECK(root2->IsContext());
+        CHECK(*root_handle != root2);
+      }
     }
+    v8_isolate->Dispose();
   }
 }
 
@@ -644,3 +655,185 @@
   bool ArtificialFailure2 = false;
   CHECK(ArtificialFailure2);
 }
+
+
+int CountBuiltins() {
+  // Check that we have not deserialized any additional builtin.
+  HeapIterator iterator(CcTest::heap());
+  DisallowHeapAllocation no_allocation;
+  int counter = 0;
+  for (HeapObject* obj = iterator.next(); obj != NULL; obj = iterator.next()) {
+    if (obj->IsCode() && Code::cast(obj)->kind() == Code::BUILTIN) counter++;
+  }
+  return counter;
+}
+
+
+TEST(SerializeToplevelOnePlusOne) {
+  FLAG_serialize_toplevel = true;
+  LocalContext context;
+  Isolate* isolate = CcTest::i_isolate();
+  isolate->compilation_cache()->Disable();  // Disable same-isolate code cache.
+
+  v8::HandleScope scope(CcTest::isolate());
+
+  const char* source = "1 + 1";
+
+  Handle<String> orig_source = isolate->factory()
+                                   ->NewStringFromUtf8(CStrVector(source))
+                                   .ToHandleChecked();
+  Handle<String> copy_source = isolate->factory()
+                                   ->NewStringFromUtf8(CStrVector(source))
+                                   .ToHandleChecked();
+  CHECK(!orig_source.is_identical_to(copy_source));
+  CHECK(orig_source->Equals(*copy_source));
+
+  ScriptData* cache = NULL;
+
+  Handle<SharedFunctionInfo> orig = Compiler::CompileScript(
+      orig_source, Handle<String>(), 0, 0, false,
+      Handle<Context>(isolate->native_context()), NULL, &cache,
+      v8::ScriptCompiler::kProduceCodeCache, NOT_NATIVES_CODE);
+
+  int builtins_count = CountBuiltins();
+
+  Handle<SharedFunctionInfo> copy;
+  {
+    DisallowCompilation no_compile_expected(isolate);
+    copy = Compiler::CompileScript(
+        copy_source, Handle<String>(), 0, 0, false,
+        Handle<Context>(isolate->native_context()), NULL, &cache,
+        v8::ScriptCompiler::kConsumeCodeCache, NOT_NATIVES_CODE);
+  }
+
+  CHECK_NE(*orig, *copy);
+  CHECK(Script::cast(copy->script())->source() == *copy_source);
+
+  Handle<JSFunction> copy_fun =
+      isolate->factory()->NewFunctionFromSharedFunctionInfo(
+          copy, isolate->native_context());
+  Handle<JSObject> global(isolate->context()->global_object());
+  Handle<Object> copy_result =
+      Execution::Call(isolate, copy_fun, global, 0, NULL).ToHandleChecked();
+  CHECK_EQ(2, Handle<Smi>::cast(copy_result)->value());
+
+  CHECK_EQ(builtins_count, CountBuiltins());
+
+  delete cache;
+}
+
+
+TEST(SerializeToplevelInternalizedString) {
+  FLAG_serialize_toplevel = true;
+  LocalContext context;
+  Isolate* isolate = CcTest::i_isolate();
+  isolate->compilation_cache()->Disable();  // Disable same-isolate code cache.
+
+  v8::HandleScope scope(CcTest::isolate());
+
+  const char* source = "'string1'";
+
+  Handle<String> orig_source = isolate->factory()
+                                   ->NewStringFromUtf8(CStrVector(source))
+                                   .ToHandleChecked();
+  Handle<String> copy_source = isolate->factory()
+                                   ->NewStringFromUtf8(CStrVector(source))
+                                   .ToHandleChecked();
+  CHECK(!orig_source.is_identical_to(copy_source));
+  CHECK(orig_source->Equals(*copy_source));
+
+  Handle<JSObject> global(isolate->context()->global_object());
+  ScriptData* cache = NULL;
+
+  Handle<SharedFunctionInfo> orig = Compiler::CompileScript(
+      orig_source, Handle<String>(), 0, 0, false,
+      Handle<Context>(isolate->native_context()), NULL, &cache,
+      v8::ScriptCompiler::kProduceCodeCache, NOT_NATIVES_CODE);
+  Handle<JSFunction> orig_fun =
+      isolate->factory()->NewFunctionFromSharedFunctionInfo(
+          orig, isolate->native_context());
+  Handle<Object> orig_result =
+      Execution::Call(isolate, orig_fun, global, 0, NULL).ToHandleChecked();
+  CHECK(orig_result->IsInternalizedString());
+
+  int builtins_count = CountBuiltins();
+
+  Handle<SharedFunctionInfo> copy;
+  {
+    DisallowCompilation no_compile_expected(isolate);
+    copy = Compiler::CompileScript(
+        copy_source, Handle<String>(), 0, 0, false,
+        Handle<Context>(isolate->native_context()), NULL, &cache,
+        v8::ScriptCompiler::kConsumeCodeCache, NOT_NATIVES_CODE);
+  }
+  CHECK_NE(*orig, *copy);
+  CHECK(Script::cast(copy->script())->source() == *copy_source);
+
+  Handle<JSFunction> copy_fun =
+      isolate->factory()->NewFunctionFromSharedFunctionInfo(
+          copy, isolate->native_context());
+  CHECK_NE(*orig_fun, *copy_fun);
+  Handle<Object> copy_result =
+      Execution::Call(isolate, copy_fun, global, 0, NULL).ToHandleChecked();
+  CHECK(orig_result.is_identical_to(copy_result));
+  Handle<String> expected =
+      isolate->factory()->NewStringFromAsciiChecked("string1");
+
+  CHECK(Handle<String>::cast(copy_result)->Equals(*expected));
+  CHECK_EQ(builtins_count, CountBuiltins());
+
+  delete cache;
+}
+
+
+TEST(SerializeToplevelIsolates) {
+  FLAG_serialize_toplevel = true;
+
+  const char* source = "function f() { return 'abc'; }; f() + 'def'";
+  v8::ScriptCompiler::CachedData* cache;
+
+  v8::Isolate* isolate1 = v8::Isolate::New();
+  {
+    v8::Isolate::Scope iscope(isolate1);
+    v8::HandleScope scope(isolate1);
+    v8::Local<v8::Context> context = v8::Context::New(isolate1);
+    v8::Context::Scope context_scope(context);
+
+    v8::Local<v8::String> source_str = v8_str(source);
+    v8::ScriptOrigin origin(v8_str("test"));
+    v8::ScriptCompiler::Source source(source_str, origin);
+    v8::Local<v8::UnboundScript> script = v8::ScriptCompiler::CompileUnbound(
+        isolate1, &source, v8::ScriptCompiler::kProduceCodeCache);
+    const v8::ScriptCompiler::CachedData* data = source.GetCachedData();
+    // Persist cached data.
+    uint8_t* buffer = NewArray<uint8_t>(data->length);
+    MemCopy(buffer, data->data, data->length);
+    cache = new v8::ScriptCompiler::CachedData(
+        buffer, data->length, v8::ScriptCompiler::CachedData::BufferOwned);
+
+    v8::Local<v8::Value> result = script->BindToCurrentContext()->Run();
+    CHECK(result->ToString()->Equals(v8_str("abcdef")));
+  }
+  isolate1->Dispose();
+
+  v8::Isolate* isolate2 = v8::Isolate::New();
+  {
+    v8::Isolate::Scope iscope(isolate2);
+    v8::HandleScope scope(isolate2);
+    v8::Local<v8::Context> context = v8::Context::New(isolate2);
+    v8::Context::Scope context_scope(context);
+
+    v8::Local<v8::String> source_str = v8_str(source);
+    v8::ScriptOrigin origin(v8_str("test"));
+    v8::ScriptCompiler::Source source(source_str, origin, cache);
+    v8::Local<v8::UnboundScript> script;
+    {
+      DisallowCompilation no_compile(reinterpret_cast<Isolate*>(isolate2));
+      script = v8::ScriptCompiler::CompileUnbound(
+          isolate2, &source, v8::ScriptCompiler::kConsumeCodeCache);
+    }
+    v8::Local<v8::Value> result = script->BindToCurrentContext()->Run();
+    CHECK(result->ToString()->Equals(v8_str("abcdef")));
+  }
+  isolate2->Dispose();
+}
diff --git a/test/cctest/test-spaces.cc b/test/cctest/test-spaces.cc
index cc73cfc..d09c128 100644
--- a/test/cctest/test-spaces.cc
+++ b/test/cctest/test-spaces.cc
@@ -27,9 +27,11 @@
 
 #include <stdlib.h>
 
+#include "src/snapshot.h"
 #include "src/v8.h"
 #include "test/cctest/cctest.h"
 
+
 using namespace v8::internal;
 
 #if 0
@@ -170,11 +172,13 @@
                                                               executable,
                                                               NULL);
   size_t alignment = code_range != NULL && code_range->valid() ?
-                     MemoryChunk::kAlignment : OS::CommitPageSize();
-  size_t reserved_size = ((executable == EXECUTABLE))
-      ? RoundUp(header_size + guard_size + reserve_area_size + guard_size,
-                alignment)
-      : RoundUp(header_size + reserve_area_size, OS::CommitPageSize());
+                     MemoryChunk::kAlignment : v8::base::OS::CommitPageSize();
+  size_t reserved_size =
+      ((executable == EXECUTABLE))
+          ? RoundUp(header_size + guard_size + reserve_area_size + guard_size,
+                    alignment)
+          : RoundUp(header_size + reserve_area_size,
+                    v8::base::OS::CommitPageSize());
   CHECK(memory_chunk->size() == reserved_size);
   CHECK(memory_chunk->area_start() < memory_chunk->address() +
                                      memory_chunk->size());
@@ -199,6 +203,33 @@
 }
 
 
+TEST(Regress3540) {
+  Isolate* isolate = CcTest::i_isolate();
+  Heap* heap = isolate->heap();
+  MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
+  CHECK(
+      memory_allocator->SetUp(heap->MaxReserved(), heap->MaxExecutableSize()));
+  TestMemoryAllocatorScope test_allocator_scope(isolate, memory_allocator);
+  CodeRange* code_range = new CodeRange(isolate);
+  const size_t code_range_size = 4 * MB;
+  if (!code_range->SetUp(code_range_size)) return;
+  Address address;
+  size_t size;
+  address = code_range->AllocateRawMemory(code_range_size - MB,
+                                          code_range_size - MB, &size);
+  CHECK(address != NULL);
+  Address null_address;
+  size_t null_size;
+  null_address = code_range->AllocateRawMemory(
+      code_range_size - MB, code_range_size - MB, &null_size);
+  CHECK(null_address == NULL);
+  code_range->FreeRawMemory(address, size);
+  delete code_range;
+  memory_allocator->TearDown();
+  delete memory_allocator;
+}
+
+
 static unsigned int Pseudorandom() {
   static uint32_t lo = 2345;
   lo = 18273 * (lo & 0xFFFFF) + (lo >> 16);
@@ -208,9 +239,7 @@
 
 TEST(MemoryChunk) {
   Isolate* isolate = CcTest::i_isolate();
-  isolate->InitializeLoggingAndCounters();
   Heap* heap = isolate->heap();
-  CHECK(heap->ConfigureHeapDefault());
 
   size_t reserve_area_size = 1 * MB;
   size_t initial_commit_area_size, second_commit_area_size;
@@ -264,9 +293,7 @@
 
 TEST(MemoryAllocator) {
   Isolate* isolate = CcTest::i_isolate();
-  isolate->InitializeLoggingAndCounters();
   Heap* heap = isolate->heap();
-  CHECK(isolate->heap()->ConfigureHeapDefault());
 
   MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
   CHECK(memory_allocator->SetUp(heap->MaxReserved(),
@@ -313,9 +340,7 @@
 
 TEST(NewSpace) {
   Isolate* isolate = CcTest::i_isolate();
-  isolate->InitializeLoggingAndCounters();
   Heap* heap = isolate->heap();
-  CHECK(heap->ConfigureHeapDefault());
   MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
   CHECK(memory_allocator->SetUp(heap->MaxReserved(),
                                 heap->MaxExecutableSize()));
@@ -341,9 +366,7 @@
 
 TEST(OldSpace) {
   Isolate* isolate = CcTest::i_isolate();
-  isolate->InitializeLoggingAndCounters();
   Heap* heap = isolate->heap();
-  CHECK(heap->ConfigureHeapDefault());
   MemoryAllocator* memory_allocator = new MemoryAllocator(isolate);
   CHECK(memory_allocator->SetUp(heap->MaxReserved(),
                                 heap->MaxExecutableSize()));
@@ -403,6 +426,8 @@
 
 TEST(SizeOfFirstPageIsLargeEnough) {
   if (i::FLAG_always_opt) return;
+  // Bootstrapping without a snapshot causes more allocations.
+  if (!i::Snapshot::HaveASnapshotToStartFrom()) return;
   CcTest::InitializeVM();
   Isolate* isolate = CcTest::i_isolate();
 
diff --git a/test/cctest/test-strings.cc b/test/cctest/test-strings.cc
index 904e207..ef13c4d 100644
--- a/test/cctest/test-strings.cc
+++ b/test/cctest/test-strings.cc
@@ -77,7 +77,7 @@
   }
 
   bool next(double threshold) {
-    ASSERT(threshold >= 0.0 && threshold <= 1.0);
+    DCHECK(threshold >= 0.0 && threshold <= 1.0);
     if (threshold == 1.0) return true;
     if (threshold == 0.0) return false;
     uint32_t value = next() % 100000;
@@ -112,11 +112,11 @@
 };
 
 
-class AsciiResource: public v8::String::ExternalAsciiStringResource {
+class OneByteResource : public v8::String::ExternalOneByteStringResource {
  public:
-  AsciiResource(const char* data, size_t length)
+  OneByteResource(const char* data, size_t length)
       : data_(data), length_(length) {}
-  ~AsciiResource() { i::DeleteArray(data_); }
+  ~OneByteResource() { i::DeleteArray(data_); }
   virtual const char* data() const { return data_; }
   virtual size_t length() const { return length_; }
 
@@ -202,7 +202,7 @@
         for (int j = 0; j < len; j++) {
           buf[j] = rng->next(0x80);
         }
-        AsciiResource* resource = new AsciiResource(buf, len);
+        OneByteResource* resource = new OneByteResource(buf, len);
         building_blocks[i] =
             v8::Utils::OpenHandle(
                 *v8::String::NewExternal(CcTest::isolate(), resource));
@@ -454,7 +454,7 @@
     ConsStringGenerationData* data,
     int depth) {
   Factory* factory = CcTest::i_isolate()->factory();
-  Handle<String> answer = factory->NewStringFromStaticAscii("");
+  Handle<String> answer = factory->NewStringFromStaticChars("");
   data->stats_.leaves_++;
   for (int i = 0; i < depth; i++) {
     Handle<String> block = data->block(i);
@@ -473,7 +473,7 @@
     ConsStringGenerationData* data,
     int depth) {
   Factory* factory = CcTest::i_isolate()->factory();
-  Handle<String> answer = factory->NewStringFromStaticAscii("");
+  Handle<String> answer = factory->NewStringFromStaticChars("");
   data->stats_.leaves_++;
   for (int i = depth - 1; i >= 0; i--) {
     Handle<String> block = data->block(i);
@@ -848,23 +848,23 @@
 }
 
 
-static const int DEEP_ASCII_DEPTH = 100000;
+static const int kDeepOneByteDepth = 100000;
 
 
-TEST(DeepAscii) {
-  printf("TestDeepAscii\n");
+TEST(DeepOneByte) {
   CcTest::InitializeVM();
   Factory* factory = CcTest::i_isolate()->factory();
   v8::HandleScope scope(CcTest::isolate());
 
-  char* foo = NewArray<char>(DEEP_ASCII_DEPTH);
-  for (int i = 0; i < DEEP_ASCII_DEPTH; i++) {
+  char* foo = NewArray<char>(kDeepOneByteDepth);
+  for (int i = 0; i < kDeepOneByteDepth; i++) {
     foo[i] = "foo "[i % 4];
   }
-  Handle<String> string = factory->NewStringFromOneByte(
-      OneByteVector(foo, DEEP_ASCII_DEPTH)).ToHandleChecked();
-  Handle<String> foo_string = factory->NewStringFromStaticAscii("foo");
-  for (int i = 0; i < DEEP_ASCII_DEPTH; i += 10) {
+  Handle<String> string =
+      factory->NewStringFromOneByte(OneByteVector(foo, kDeepOneByteDepth))
+          .ToHandleChecked();
+  Handle<String> foo_string = factory->NewStringFromStaticChars("foo");
+  for (int i = 0; i < kDeepOneByteDepth; i += 10) {
     string = factory->NewConsString(string, foo_string).ToHandleChecked();
   }
   Handle<String> flat_string =
@@ -872,7 +872,7 @@
   String::Flatten(flat_string);
 
   for (int i = 0; i < 500; i++) {
-    TraverseFirst(flat_string, string, DEEP_ASCII_DEPTH);
+    TraverseFirst(flat_string, string, kDeepOneByteDepth);
   }
   DeleteArray<char>(foo);
 }
@@ -882,13 +882,13 @@
   // Smoke test for converting strings to utf-8.
   CcTest::InitializeVM();
   v8::HandleScope handle_scope(CcTest::isolate());
-  // A simple ascii string
-  const char* ascii_string = "abcdef12345";
-  int len = v8::String::NewFromUtf8(CcTest::isolate(), ascii_string,
+  // A simple one-byte string
+  const char* one_byte_string = "abcdef12345";
+  int len = v8::String::NewFromUtf8(CcTest::isolate(), one_byte_string,
                                     v8::String::kNormalString,
-                                    StrLength(ascii_string))->Utf8Length();
-  CHECK_EQ(StrLength(ascii_string), len);
-  // A mixed ascii and non-ascii string
+                                    StrLength(one_byte_string))->Utf8Length();
+  CHECK_EQ(StrLength(one_byte_string), len);
+  // A mixed one-byte and two-byte string
   // U+02E4 -> CB A4
   // U+0064 -> 64
   // U+12E4 -> E1 8B A4
@@ -934,79 +934,89 @@
   CHECK_GT(kMaxLength, i::ConsString::kMinLength);
 
   // Allocate two JavaScript arrays for holding short strings.
-  v8::Handle<v8::Array> ascii_external_strings =
+  v8::Handle<v8::Array> one_byte_external_strings =
       v8::Array::New(CcTest::isolate(), kMaxLength + 1);
-  v8::Handle<v8::Array> non_ascii_external_strings =
+  v8::Handle<v8::Array> non_one_byte_external_strings =
       v8::Array::New(CcTest::isolate(), kMaxLength + 1);
 
-  // Generate short ascii and non-ascii external strings.
+  // Generate short one-byte and two-byte external strings.
   for (int i = 0; i <= kMaxLength; i++) {
-    char* ascii = NewArray<char>(i + 1);
+    char* one_byte = NewArray<char>(i + 1);
     for (int j = 0; j < i; j++) {
-      ascii[j] = 'a';
+      one_byte[j] = 'a';
     }
     // Terminating '\0' is left out on purpose. It is not required for external
     // string data.
-    AsciiResource* ascii_resource = new AsciiResource(ascii, i);
-    v8::Local<v8::String> ascii_external_string =
-        v8::String::NewExternal(CcTest::isolate(), ascii_resource);
+    OneByteResource* one_byte_resource = new OneByteResource(one_byte, i);
+    v8::Local<v8::String> one_byte_external_string =
+        v8::String::NewExternal(CcTest::isolate(), one_byte_resource);
 
-    ascii_external_strings->Set(v8::Integer::New(CcTest::isolate(), i),
-                                ascii_external_string);
-    uc16* non_ascii = NewArray<uc16>(i + 1);
+    one_byte_external_strings->Set(v8::Integer::New(CcTest::isolate(), i),
+                                   one_byte_external_string);
+    uc16* non_one_byte = NewArray<uc16>(i + 1);
     for (int j = 0; j < i; j++) {
-      non_ascii[j] = 0x1234;
+      non_one_byte[j] = 0x1234;
     }
     // Terminating '\0' is left out on purpose. It is not required for external
     // string data.
-    Resource* resource = new Resource(non_ascii, i);
-    v8::Local<v8::String> non_ascii_external_string =
-      v8::String::NewExternal(CcTest::isolate(), resource);
-    non_ascii_external_strings->Set(v8::Integer::New(CcTest::isolate(), i),
-                                    non_ascii_external_string);
+    Resource* resource = new Resource(non_one_byte, i);
+    v8::Local<v8::String> non_one_byte_external_string =
+        v8::String::NewExternal(CcTest::isolate(), resource);
+    non_one_byte_external_strings->Set(v8::Integer::New(CcTest::isolate(), i),
+                                       non_one_byte_external_string);
   }
 
   // Add the arrays with the short external strings in the global object.
   v8::Handle<v8::Object> global = context->Global();
-  global->Set(v8_str("external_ascii"), ascii_external_strings);
-  global->Set(v8_str("external_non_ascii"), non_ascii_external_strings);
+  global->Set(v8_str("external_one_byte"), one_byte_external_strings);
+  global->Set(v8_str("external_non_one_byte"), non_one_byte_external_strings);
   global->Set(v8_str("max_length"),
               v8::Integer::New(CcTest::isolate(), kMaxLength));
 
-  // Add short external ascii and non-ascii strings checking the result.
+  // Add short external one-byte and two-byte strings checking the result.
   static const char* source =
-    "function test() {"
-    "  var ascii_chars = 'aaaaaaaaaaaaaaaaaaaa';"
-    "  var non_ascii_chars = '\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234';"  //NOLINT
-    "  if (ascii_chars.length != max_length) return 1;"
-    "  if (non_ascii_chars.length != max_length) return 2;"
-    "  var ascii = Array(max_length + 1);"
-    "  var non_ascii = Array(max_length + 1);"
-    "  for (var i = 0; i <= max_length; i++) {"
-    "    ascii[i] = ascii_chars.substring(0, i);"
-    "    non_ascii[i] = non_ascii_chars.substring(0, i);"
-    "  };"
-    "  for (var i = 0; i <= max_length; i++) {"
-    "    if (ascii[i] != external_ascii[i]) return 3;"
-    "    if (non_ascii[i] != external_non_ascii[i]) return 4;"
-    "    for (var j = 0; j < i; j++) {"
-    "      if (external_ascii[i] !="
-    "          (external_ascii[j] + external_ascii[i - j])) return 5;"
-    "      if (external_non_ascii[i] !="
-    "          (external_non_ascii[j] + external_non_ascii[i - j])) return 6;"
-    "      if (non_ascii[i] != (non_ascii[j] + non_ascii[i - j])) return 7;"
-    "      if (ascii[i] != (ascii[j] + ascii[i - j])) return 8;"
-    "      if (ascii[i] != (external_ascii[j] + ascii[i - j])) return 9;"
-    "      if (ascii[i] != (ascii[j] + external_ascii[i - j])) return 10;"
-    "      if (non_ascii[i] !="
-    "          (external_non_ascii[j] + non_ascii[i - j])) return 11;"
-    "      if (non_ascii[i] !="
-    "          (non_ascii[j] + external_non_ascii[i - j])) return 12;"
-    "    }"
-    "  }"
-    "  return 0;"
-    "};"
-    "test()";
+      "function test() {"
+      "  var one_byte_chars = 'aaaaaaaaaaaaaaaaaaaa';"
+      "  var non_one_byte_chars = "
+      "'\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1"
+      "234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\u1234\\"
+      "u1234';"  // NOLINT
+      "  if (one_byte_chars.length != max_length) return 1;"
+      "  if (non_one_byte_chars.length != max_length) return 2;"
+      "  var one_byte = Array(max_length + 1);"
+      "  var non_one_byte = Array(max_length + 1);"
+      "  for (var i = 0; i <= max_length; i++) {"
+      "    one_byte[i] = one_byte_chars.substring(0, i);"
+      "    non_one_byte[i] = non_one_byte_chars.substring(0, i);"
+      "  };"
+      "  for (var i = 0; i <= max_length; i++) {"
+      "    if (one_byte[i] != external_one_byte[i]) return 3;"
+      "    if (non_one_byte[i] != external_non_one_byte[i]) return 4;"
+      "    for (var j = 0; j < i; j++) {"
+      "      if (external_one_byte[i] !="
+      "          (external_one_byte[j] + external_one_byte[i - j])) return "
+      "5;"
+      "      if (external_non_one_byte[i] !="
+      "          (external_non_one_byte[j] + external_non_one_byte[i - "
+      "j])) return 6;"
+      "      if (non_one_byte[i] != (non_one_byte[j] + non_one_byte[i - "
+      "j])) return 7;"
+      "      if (one_byte[i] != (one_byte[j] + one_byte[i - j])) return 8;"
+      "      if (one_byte[i] != (external_one_byte[j] + one_byte[i - j])) "
+      "return 9;"
+      "      if (one_byte[i] != (one_byte[j] + external_one_byte[i - j])) "
+      "return 10;"
+      "      if (non_one_byte[i] !="
+      "          (external_non_one_byte[j] + non_one_byte[i - j])) return "
+      "11;"
+      "      if (non_one_byte[i] !="
+      "          (non_one_byte[j] + external_non_one_byte[i - j])) return "
+      "12;"
+      "    }"
+      "  }"
+      "  return 0;"
+      "};"
+      "test()";
   CHECK_EQ(0, CompileRun(source)->Int32Value());
 }
 
@@ -1091,7 +1101,7 @@
   Factory* factory = CcTest::i_isolate()->factory();
   v8::HandleScope scope(CcTest::isolate());
   Handle<String> string =
-      factory->NewStringFromStaticAscii("parentparentparent");
+      factory->NewStringFromStaticChars("parentparentparent");
   Handle<String> parent =
       factory->NewConsString(string, string).ToHandleChecked();
   CHECK(parent->IsConsString());
@@ -1109,11 +1119,11 @@
 }
 
 
-class AsciiVectorResource : public v8::String::ExternalAsciiStringResource {
+class OneByteVectorResource : public v8::String::ExternalOneByteStringResource {
  public:
-  explicit AsciiVectorResource(i::Vector<const char> vector)
+  explicit OneByteVectorResource(i::Vector<const char> vector)
       : data_(vector) {}
-  virtual ~AsciiVectorResource() {}
+  virtual ~OneByteVectorResource() {}
   virtual size_t length() const { return data_.length(); }
   virtual const char* data() const { return data_.start(); }
  private:
@@ -1126,10 +1136,10 @@
   CcTest::InitializeVM();
   Factory* factory = CcTest::i_isolate()->factory();
   v8::HandleScope scope(CcTest::isolate());
-  AsciiVectorResource resource(
+  OneByteVectorResource resource(
       i::Vector<const char>("abcdefghijklmnopqrstuvwxyz", 26));
   Handle<String> string =
-      factory->NewExternalStringFromAscii(&resource).ToHandleChecked();
+      factory->NewExternalStringFromOneByte(&resource).ToHandleChecked();
   CHECK(string->IsExternalString());
   Handle<String> slice = factory->NewSubString(string, 1, 25);
   CHECK(slice->IsSlicedString());
@@ -1199,28 +1209,34 @@
 }
 
 
-TEST(AsciiArrayJoin) {
+UNINITIALIZED_TEST(OneByteArrayJoin) {
+  v8::Isolate::CreateParams create_params;
   // Set heap limits.
-  v8::ResourceConstraints constraints;
-  constraints.set_max_semi_space_size(1);
-  constraints.set_max_old_space_size(4);
-  v8::SetResourceConstraints(CcTest::isolate(), &constraints);
+  create_params.constraints.set_max_semi_space_size(1);
+  create_params.constraints.set_max_old_space_size(4);
+  v8::Isolate* isolate = v8::Isolate::New(create_params);
+  isolate->Enter();
 
-  // String s is made of 2^17 = 131072 'c' characters and a is an array
-  // starting with 'bad', followed by 2^14 times the string s. That means the
-  // total length of the concatenated strings is 2^31 + 3. So on 32bit systems
-  // summing the lengths of the strings (as Smis) overflows and wraps.
-  LocalContext context;
-  v8::HandleScope scope(CcTest::isolate());
-  v8::TryCatch try_catch;
-  CHECK(CompileRun(
-      "var two_14 = Math.pow(2, 14);"
-      "var two_17 = Math.pow(2, 17);"
-      "var s = Array(two_17 + 1).join('c');"
-      "var a = ['bad'];"
-      "for (var i = 1; i <= two_14; i++) a.push(s);"
-      "a.join("");").IsEmpty());
-  CHECK(try_catch.HasCaught());
+  {
+    // String s is made of 2^17 = 131072 'c' characters and a is an array
+    // starting with 'bad', followed by 2^14 times the string s. That means the
+    // total length of the concatenated strings is 2^31 + 3. So on 32bit systems
+    // summing the lengths of the strings (as Smis) overflows and wraps.
+    LocalContext context(isolate);
+    v8::HandleScope scope(isolate);
+    v8::TryCatch try_catch;
+    CHECK(CompileRun(
+              "var two_14 = Math.pow(2, 14);"
+              "var two_17 = Math.pow(2, 17);"
+              "var s = Array(two_17 + 1).join('c');"
+              "var a = ['bad'];"
+              "for (var i = 1; i <= two_14; i++) a.push(s);"
+              "a.join("
+              ");").IsEmpty());
+    CHECK(try_catch.HasCaught());
+  }
+  isolate->Exit();
+  isolate->Dispose();
 }
 
 
@@ -1281,14 +1297,14 @@
   v8::HandleScope scope(CcTest::isolate());
   LocalContext context;
   v8::Local<v8::Value> result = CompileRun(
-      "var subject = 'ascii~only~string~'; "
+      "var subject = 'one_byte~only~string~'; "
       "var replace = '\x80';            "
       "subject.replace(/~/g, replace);  ");
   CHECK(result->IsString());
   Handle<String> string = v8::Utils::OpenHandle(v8::String::Cast(*result));
   CHECK(string->IsSeqTwoByteString());
 
-  v8::Local<v8::String> expected = v8_str("ascii\x80only\x80string\x80");
+  v8::Local<v8::String> expected = v8_str("one_byte\x80only\x80string\x80");
   CHECK(expected->Equals(result));
 }
 
@@ -1375,7 +1391,7 @@
   Isolate* isolate = CcTest::i_isolate();
   { HandleScope scope(isolate);
     DummyOneByteResource r;
-    CHECK(isolate->factory()->NewExternalStringFromAscii(&r).is_null());
+    CHECK(isolate->factory()->NewExternalStringFromOneByte(&r).is_null());
     CHECK(isolate->has_pending_exception());
     isolate->clear_pending_exception();
   }
diff --git a/test/cctest/test-strtod.cc b/test/cctest/test-strtod.cc
index 7fed67d..7c11186 100644
--- a/test/cctest/test-strtod.cc
+++ b/test/cctest/test-strtod.cc
@@ -29,11 +29,11 @@
 
 #include "src/v8.h"
 
+#include "src/base/utils/random-number-generator.h"
 #include "src/bignum.h"
 #include "src/diy-fp.h"
 #include "src/double.h"
 #include "src/strtod.h"
-#include "src/utils/random-number-generator.h"
 #include "test/cctest/cctest.h"
 
 using namespace v8::internal;
@@ -449,7 +449,7 @@
 static const int kLargeStrtodRandomCount = 2;
 
 TEST(RandomStrtod) {
-  RandomNumberGenerator rng;
+  v8::base::RandomNumberGenerator rng;
   char buffer[kBufferSize];
   for (int length = 1; length < 15; length++) {
     for (int i = 0; i < kShortStrtodRandomCount; ++i) {
diff --git a/test/cctest/test-symbols.cc b/test/cctest/test-symbols.cc
index 389145d..066c997 100644
--- a/test/cctest/test-symbols.cc
+++ b/test/cctest/test-symbols.cc
@@ -8,6 +8,7 @@
 #include "src/v8.h"
 
 #include "src/objects.h"
+#include "src/ostreams.h"
 #include "test/cctest/cctest.h"
 
 using namespace v8::internal;
@@ -21,16 +22,16 @@
   const int kNumSymbols = 30;
   Handle<Symbol> symbols[kNumSymbols];
 
+  OFStream os(stdout);
   for (int i = 0; i < kNumSymbols; ++i) {
     symbols[i] = isolate->factory()->NewSymbol();
     CHECK(symbols[i]->IsName());
     CHECK(symbols[i]->IsSymbol());
     CHECK(symbols[i]->HasHashCode());
     CHECK_GT(symbols[i]->Hash(), 0);
-    symbols[i]->ShortPrint();
-    PrintF("\n");
+    os << Brief(*symbols[i]) << "\n";
 #if OBJECT_PRINT
-    symbols[i]->Print();
+    symbols[i]->Print(os);
 #endif
 #if VERIFY_HEAP
     symbols[i]->ObjectVerify();
diff --git a/test/cctest/test-thread-termination.cc b/test/cctest/test-thread-termination.cc
index b4048ae..21d3b95 100644
--- a/test/cctest/test-thread-termination.cc
+++ b/test/cctest/test-thread-termination.cc
@@ -26,11 +26,12 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 #include "src/v8.h"
-#include "src/platform.h"
 #include "test/cctest/cctest.h"
 
+#include "src/base/platform/platform.h"
 
-v8::internal::Semaphore* semaphore = NULL;
+
+v8::base::Semaphore* semaphore = NULL;
 
 
 void Signal(const v8::FunctionCallbackInfo<v8::Value>& args) {
@@ -158,11 +159,11 @@
 }
 
 
-class TerminatorThread : public v8::internal::Thread {
+class TerminatorThread : public v8::base::Thread {
  public:
   explicit TerminatorThread(i::Isolate* isolate)
-      : Thread("TerminatorThread"),
-        isolate_(reinterpret_cast<v8::Isolate*>(isolate)) { }
+      : Thread(Options("TerminatorThread")),
+        isolate_(reinterpret_cast<v8::Isolate*>(isolate)) {}
   void Run() {
     semaphore->Wait();
     CHECK(!v8::V8::IsExecutionTerminating(isolate_));
@@ -177,7 +178,7 @@
 // Test that a single thread of JavaScript execution can be terminated
 // from the side by another thread.
 TEST(TerminateOnlyV8ThreadFromOtherThread) {
-  semaphore = new v8::internal::Semaphore(0);
+  semaphore = new v8::base::Semaphore(0);
   TerminatorThread thread(CcTest::i_isolate());
   thread.Start();
 
@@ -377,7 +378,7 @@
 
 
 TEST(TerminateFromOtherThreadWhileMicrotaskRunning) {
-  semaphore = new v8::internal::Semaphore(0);
+  semaphore = new v8::base::Semaphore(0);
   TerminatorThread thread(CcTest::i_isolate());
   thread.Start();
 
@@ -402,3 +403,71 @@
   delete semaphore;
   semaphore = NULL;
 }
+
+
+static int callback_counter = 0;
+
+
+static void CounterCallback(v8::Isolate* isolate, void* data) {
+  callback_counter++;
+}
+
+
+TEST(PostponeTerminateException) {
+  v8::Isolate* isolate = CcTest::isolate();
+  v8::HandleScope scope(isolate);
+  v8::Handle<v8::ObjectTemplate> global =
+      CreateGlobalTemplate(CcTest::isolate(), TerminateCurrentThread, DoLoop);
+  v8::Handle<v8::Context> context =
+      v8::Context::New(CcTest::isolate(), NULL, global);
+  v8::Context::Scope context_scope(context);
+
+  v8::TryCatch try_catch;
+  static const char* terminate_and_loop =
+      "terminate(); for (var i = 0; i < 10000; i++);";
+
+  { // Postpone terminate execution interrupts.
+    i::PostponeInterruptsScope p1(CcTest::i_isolate(),
+                                  i::StackGuard::TERMINATE_EXECUTION) ;
+
+    // API interrupts should still be triggered.
+    CcTest::isolate()->RequestInterrupt(&CounterCallback, NULL);
+    CHECK_EQ(0, callback_counter);
+    CompileRun(terminate_and_loop);
+    CHECK(!try_catch.HasTerminated());
+    CHECK_EQ(1, callback_counter);
+
+    { // Postpone API interrupts as well.
+      i::PostponeInterruptsScope p2(CcTest::i_isolate(),
+                                    i::StackGuard::API_INTERRUPT);
+
+      // None of the two interrupts should trigger.
+      CcTest::isolate()->RequestInterrupt(&CounterCallback, NULL);
+      CompileRun(terminate_and_loop);
+      CHECK(!try_catch.HasTerminated());
+      CHECK_EQ(1, callback_counter);
+    }
+
+    // Now the previously requested API interrupt should trigger.
+    CompileRun(terminate_and_loop);
+    CHECK(!try_catch.HasTerminated());
+    CHECK_EQ(2, callback_counter);
+  }
+
+  // Now the previously requested terminate execution interrupt should trigger.
+  CompileRun("for (var i = 0; i < 10000; i++);");
+  CHECK(try_catch.HasTerminated());
+  CHECK_EQ(2, callback_counter);
+}
+
+
+TEST(ErrorObjectAfterTermination) {
+  v8::Isolate* isolate = CcTest::isolate();
+  v8::HandleScope scope(isolate);
+  v8::Handle<v8::Context> context = v8::Context::New(CcTest::isolate());
+  v8::Context::Scope context_scope(context);
+  v8::V8::TerminateExecution(isolate);
+  v8::Local<v8::Value> error = v8::Exception::Error(v8_str("error"));
+  // TODO(yangguo): crbug/403509. Check for empty handle instead.
+  CHECK(error->IsUndefined());
+}
diff --git a/test/cctest/test-threads.cc b/test/cctest/test-threads.cc
index ca8ccaf..1204226 100644
--- a/test/cctest/test-threads.cc
+++ b/test/cctest/test-threads.cc
@@ -26,12 +26,11 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 #include "src/v8.h"
-
-#include "src/platform.h"
-#include "src/isolate.h"
-
 #include "test/cctest/cctest.h"
 
+#include "src/base/platform/platform.h"
+#include "src/isolate.h"
+
 
 enum Turn {
   FILL_CACHE,
@@ -43,9 +42,9 @@
 static Turn turn = FILL_CACHE;
 
 
-class ThreadA : public v8::internal::Thread {
+class ThreadA : public v8::base::Thread {
  public:
-  ThreadA() : Thread("ThreadA") { }
+  ThreadA() : Thread(Options("ThreadA")) {}
   void Run() {
     v8::Isolate* isolate = CcTest::isolate();
     v8::Locker locker(isolate);
@@ -83,9 +82,9 @@
 };
 
 
-class ThreadB : public v8::internal::Thread {
+class ThreadB : public v8::base::Thread {
  public:
-  ThreadB() : Thread("ThreadB") { }
+  ThreadB() : Thread(Options("ThreadB")) {}
   void Run() {
     do {
       {
@@ -123,16 +122,16 @@
   CHECK_EQ(DONE, turn);
 }
 
-class ThreadIdValidationThread : public v8::internal::Thread {
+class ThreadIdValidationThread : public v8::base::Thread {
  public:
-  ThreadIdValidationThread(i::Thread* thread_to_start,
-                           i::List<i::ThreadId>* refs,
-                           unsigned int thread_no,
-                           i::Semaphore* semaphore)
-    : Thread("ThreadRefValidationThread"),
-      refs_(refs), thread_no_(thread_no), thread_to_start_(thread_to_start),
-      semaphore_(semaphore) {
-  }
+  ThreadIdValidationThread(v8::base::Thread* thread_to_start,
+                           i::List<i::ThreadId>* refs, unsigned int thread_no,
+                           v8::base::Semaphore* semaphore)
+      : Thread(Options("ThreadRefValidationThread")),
+        refs_(refs),
+        thread_no_(thread_no),
+        thread_to_start_(thread_to_start),
+        semaphore_(semaphore) {}
 
   void Run() {
     i::ThreadId thread_id = i::ThreadId::Current();
@@ -150,8 +149,8 @@
  private:
   i::List<i::ThreadId>* refs_;
   int thread_no_;
-  i::Thread* thread_to_start_;
-  i::Semaphore* semaphore_;
+  v8::base::Thread* thread_to_start_;
+  v8::base::Semaphore* semaphore_;
 };
 
 
@@ -159,7 +158,7 @@
   const int kNThreads = 100;
   i::List<ThreadIdValidationThread*> threads(kNThreads);
   i::List<i::ThreadId> refs(kNThreads);
-  i::Semaphore semaphore(0);
+  v8::base::Semaphore semaphore(0);
   ThreadIdValidationThread* prev = NULL;
   for (int i = kNThreads - 1; i >= 0; i--) {
     ThreadIdValidationThread* newThread =
@@ -176,19 +175,3 @@
     delete threads[i];
   }
 }
-
-
-class ThreadC : public v8::internal::Thread {
- public:
-  ThreadC() : Thread("ThreadC") { }
-  void Run() {
-    Join();
-  }
-};
-
-
-TEST(ThreadJoinSelf) {
-  ThreadC thread;
-  thread.Start();
-  thread.Join();
-}
diff --git a/test/cctest/test-time.cc b/test/cctest/test-time.cc
deleted file mode 100644
index 7f7cce8..0000000
--- a/test/cctest/test-time.cc
+++ /dev/null
@@ -1,197 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "src/v8.h"
-
-#if V8_OS_POSIX
-#include <sys/time.h>  // NOLINT
-#endif
-
-#include "test/cctest/cctest.h"
-#if V8_OS_WIN
-#include "src/base/win32-headers.h"
-#endif
-
-using namespace v8::internal;
-
-
-TEST(TimeDeltaFromAndIn) {
-  CHECK(TimeDelta::FromDays(2) == TimeDelta::FromHours(48));
-  CHECK(TimeDelta::FromHours(3) == TimeDelta::FromMinutes(180));
-  CHECK(TimeDelta::FromMinutes(2) == TimeDelta::FromSeconds(120));
-  CHECK(TimeDelta::FromSeconds(2) == TimeDelta::FromMilliseconds(2000));
-  CHECK(TimeDelta::FromMilliseconds(2) == TimeDelta::FromMicroseconds(2000));
-  CHECK_EQ(static_cast<int>(13), TimeDelta::FromDays(13).InDays());
-  CHECK_EQ(static_cast<int>(13), TimeDelta::FromHours(13).InHours());
-  CHECK_EQ(static_cast<int>(13), TimeDelta::FromMinutes(13).InMinutes());
-  CHECK_EQ(static_cast<int64_t>(13), TimeDelta::FromSeconds(13).InSeconds());
-  CHECK_EQ(13.0, TimeDelta::FromSeconds(13).InSecondsF());
-  CHECK_EQ(static_cast<int64_t>(13),
-           TimeDelta::FromMilliseconds(13).InMilliseconds());
-  CHECK_EQ(13.0, TimeDelta::FromMilliseconds(13).InMillisecondsF());
-  CHECK_EQ(static_cast<int64_t>(13),
-           TimeDelta::FromMicroseconds(13).InMicroseconds());
-}
-
-
-#if V8_OS_MACOSX
-TEST(TimeDeltaFromMachTimespec) {
-  TimeDelta null = TimeDelta();
-  CHECK(null == TimeDelta::FromMachTimespec(null.ToMachTimespec()));
-  TimeDelta delta1 = TimeDelta::FromMilliseconds(42);
-  CHECK(delta1 == TimeDelta::FromMachTimespec(delta1.ToMachTimespec()));
-  TimeDelta delta2 = TimeDelta::FromDays(42);
-  CHECK(delta2 == TimeDelta::FromMachTimespec(delta2.ToMachTimespec()));
-}
-#endif
-
-
-TEST(TimeJsTime) {
-  Time t = Time::FromJsTime(700000.3);
-  CHECK_EQ(700000.3, t.ToJsTime());
-}
-
-
-#if V8_OS_POSIX
-TEST(TimeFromTimespec) {
-  Time null;
-  CHECK(null.IsNull());
-  CHECK(null == Time::FromTimespec(null.ToTimespec()));
-  Time now = Time::Now();
-  CHECK(now == Time::FromTimespec(now.ToTimespec()));
-  Time now_sys = Time::NowFromSystemTime();
-  CHECK(now_sys == Time::FromTimespec(now_sys.ToTimespec()));
-  Time unix_epoch = Time::UnixEpoch();
-  CHECK(unix_epoch == Time::FromTimespec(unix_epoch.ToTimespec()));
-  Time max = Time::Max();
-  CHECK(max.IsMax());
-  CHECK(max == Time::FromTimespec(max.ToTimespec()));
-}
-
-
-TEST(TimeFromTimeval) {
-  Time null;
-  CHECK(null.IsNull());
-  CHECK(null == Time::FromTimeval(null.ToTimeval()));
-  Time now = Time::Now();
-  CHECK(now == Time::FromTimeval(now.ToTimeval()));
-  Time now_sys = Time::NowFromSystemTime();
-  CHECK(now_sys == Time::FromTimeval(now_sys.ToTimeval()));
-  Time unix_epoch = Time::UnixEpoch();
-  CHECK(unix_epoch == Time::FromTimeval(unix_epoch.ToTimeval()));
-  Time max = Time::Max();
-  CHECK(max.IsMax());
-  CHECK(max == Time::FromTimeval(max.ToTimeval()));
-}
-#endif
-
-
-#if V8_OS_WIN
-TEST(TimeFromFiletime) {
-  Time null;
-  CHECK(null.IsNull());
-  CHECK(null == Time::FromFiletime(null.ToFiletime()));
-  Time now = Time::Now();
-  CHECK(now == Time::FromFiletime(now.ToFiletime()));
-  Time now_sys = Time::NowFromSystemTime();
-  CHECK(now_sys == Time::FromFiletime(now_sys.ToFiletime()));
-  Time unix_epoch = Time::UnixEpoch();
-  CHECK(unix_epoch == Time::FromFiletime(unix_epoch.ToFiletime()));
-  Time max = Time::Max();
-  CHECK(max.IsMax());
-  CHECK(max == Time::FromFiletime(max.ToFiletime()));
-}
-#endif
-
-
-TEST(TimeTicksIsMonotonic) {
-  TimeTicks previous_normal_ticks;
-  TimeTicks previous_highres_ticks;
-  ElapsedTimer timer;
-  timer.Start();
-  while (!timer.HasExpired(TimeDelta::FromMilliseconds(100))) {
-    TimeTicks normal_ticks = TimeTicks::Now();
-    TimeTicks highres_ticks = TimeTicks::HighResolutionNow();
-    CHECK_GE(normal_ticks, previous_normal_ticks);
-    CHECK_GE((normal_ticks - previous_normal_ticks).InMicroseconds(), 0);
-    CHECK_GE(highres_ticks, previous_highres_ticks);
-    CHECK_GE((highres_ticks - previous_highres_ticks).InMicroseconds(), 0);
-    previous_normal_ticks = normal_ticks;
-    previous_highres_ticks = highres_ticks;
-  }
-}
-
-
-template <typename T>
-static void ResolutionTest(T (*Now)(), TimeDelta target_granularity) {
-  // We're trying to measure that intervals increment in a VERY small amount
-  // of time -- according to the specified target granularity. Unfortunately,
-  // if we happen to have a context switch in the middle of our test, the
-  // context switch could easily exceed our limit. So, we iterate on this
-  // several times. As long as we're able to detect the fine-granularity
-  // timers at least once, then the test has succeeded.
-  static const TimeDelta kExpirationTimeout = TimeDelta::FromSeconds(1);
-  ElapsedTimer timer;
-  timer.Start();
-  TimeDelta delta;
-  do {
-    T start = Now();
-    T now = start;
-    // Loop until we can detect that the clock has changed. Non-HighRes timers
-    // will increment in chunks, i.e. 15ms. By spinning until we see a clock
-    // change, we detect the minimum time between measurements.
-    do {
-      now = Now();
-      delta = now - start;
-    } while (now <= start);
-    CHECK_NE(static_cast<int64_t>(0), delta.InMicroseconds());
-  } while (delta > target_granularity && !timer.HasExpired(kExpirationTimeout));
-  CHECK_LE(delta, target_granularity);
-}
-
-
-TEST(TimeNowResolution) {
-  // We assume that Time::Now() has at least 16ms resolution.
-  static const TimeDelta kTargetGranularity = TimeDelta::FromMilliseconds(16);
-  ResolutionTest<Time>(&Time::Now, kTargetGranularity);
-}
-
-
-TEST(TimeTicksNowResolution) {
-  // We assume that TimeTicks::Now() has at least 16ms resolution.
-  static const TimeDelta kTargetGranularity = TimeDelta::FromMilliseconds(16);
-  ResolutionTest<TimeTicks>(&TimeTicks::Now, kTargetGranularity);
-}
-
-
-TEST(TimeTicksHighResolutionNowResolution) {
-  if (!TimeTicks::IsHighResolutionClockWorking()) return;
-
-  // We assume that TimeTicks::HighResolutionNow() has sub-ms resolution.
-  static const TimeDelta kTargetGranularity = TimeDelta::FromMilliseconds(1);
-  ResolutionTest<TimeTicks>(&TimeTicks::HighResolutionNow, kTargetGranularity);
-}
diff --git a/test/cctest/test-types.cc b/test/cctest/test-types.cc
index 86887e8..0cd2472 100644
--- a/test/cctest/test-types.cc
+++ b/test/cctest/test-types.cc
@@ -1,68 +1,33 @@
 // Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
 
 #include <vector>
 
 #include "src/hydrogen-types.h"
+#include "src/isolate-inl.h"
 #include "src/types.h"
-#include "src/utils/random-number-generator.h"
 #include "test/cctest/cctest.h"
 
 using namespace v8::internal;
 
 // Testing auxiliaries (breaking the Type abstraction).
+typedef uint32_t bitset;
+
 struct ZoneRep {
   typedef void* Struct;
 
   static bool IsStruct(Type* t, int tag) {
     return !IsBitset(t) && reinterpret_cast<intptr_t>(AsStruct(t)[0]) == tag;
   }
-  static bool IsBitset(Type* t) { return reinterpret_cast<intptr_t>(t) & 1; }
-  static bool IsClass(Type* t) { return IsStruct(t, 0); }
-  static bool IsConstant(Type* t) { return IsStruct(t, 1); }
-  static bool IsContext(Type* t) { return IsStruct(t, 2); }
-  static bool IsArray(Type* t) { return IsStruct(t, 3); }
-  static bool IsFunction(Type* t) { return IsStruct(t, 4); }
-  static bool IsUnion(Type* t) { return IsStruct(t, 5); }
+  static bool IsBitset(Type* t) { return reinterpret_cast<uintptr_t>(t) & 1; }
+  static bool IsUnion(Type* t) { return IsStruct(t, 6); }
 
   static Struct* AsStruct(Type* t) {
     return reinterpret_cast<Struct*>(t);
   }
-  static int AsBitset(Type* t) {
-    return static_cast<int>(reinterpret_cast<intptr_t>(t) >> 1);
-  }
-  static Map* AsClass(Type* t) {
-    return *static_cast<Map**>(AsStruct(t)[3]);
-  }
-  static Object* AsConstant(Type* t) {
-    return *static_cast<Object**>(AsStruct(t)[3]);
-  }
-  static Type* AsContext(Type* t) {
-    return *static_cast<Type**>(AsStruct(t)[2]);
+  static bitset AsBitset(Type* t) {
+    return static_cast<bitset>(reinterpret_cast<uintptr_t>(t) ^ 1u);
   }
   static Struct* AsUnion(Type* t) {
     return AsStruct(t);
@@ -77,7 +42,7 @@
     using Type::BitsetType::New;
     using Type::BitsetType::Glb;
     using Type::BitsetType::Lub;
-    using Type::BitsetType::InherentLub;
+    using Type::BitsetType::IsInhabited;
   };
 };
 
@@ -89,23 +54,11 @@
     return t->IsFixedArray() && Smi::cast(AsStruct(t)->get(0))->value() == tag;
   }
   static bool IsBitset(Handle<HeapType> t) { return t->IsSmi(); }
-  static bool IsClass(Handle<HeapType> t) {
-    return t->IsMap() || IsStruct(t, 0);
-  }
-  static bool IsConstant(Handle<HeapType> t) { return IsStruct(t, 1); }
-  static bool IsContext(Handle<HeapType> t) { return IsStruct(t, 2); }
-  static bool IsArray(Handle<HeapType> t) { return IsStruct(t, 3); }
-  static bool IsFunction(Handle<HeapType> t) { return IsStruct(t, 4); }
-  static bool IsUnion(Handle<HeapType> t) { return IsStruct(t, 5); }
+  static bool IsUnion(Handle<HeapType> t) { return IsStruct(t, 6); }
 
   static Struct* AsStruct(Handle<HeapType> t) { return FixedArray::cast(*t); }
-  static int AsBitset(Handle<HeapType> t) { return Smi::cast(*t)->value(); }
-  static Map* AsClass(Handle<HeapType> t) {
-    return t->IsMap() ? Map::cast(*t) : Map::cast(AsStruct(t)->get(2));
-  }
-  static Object* AsConstant(Handle<HeapType> t) { return AsStruct(t)->get(2); }
-  static HeapType* AsContext(Handle<HeapType> t) {
-    return HeapType::cast(AsStruct(t)->get(1));
+  static bitset AsBitset(Handle<HeapType> t) {
+    return static_cast<bitset>(reinterpret_cast<uintptr_t>(*t));
   }
   static Struct* AsUnion(Handle<HeapType> t) { return AsStruct(t); }
   static int Length(Struct* structured) { return structured->length() - 1; }
@@ -116,10 +69,9 @@
     using HeapType::BitsetType::New;
     using HeapType::BitsetType::Glb;
     using HeapType::BitsetType::Lub;
-    using HeapType::BitsetType::InherentLub;
-    static int Glb(Handle<HeapType> type) { return Glb(*type); }
-    static int Lub(Handle<HeapType> type) { return Lub(*type); }
-    static int InherentLub(Handle<HeapType> type) { return InherentLub(*type); }
+    using HeapType::BitsetType::IsInhabited;
+    static bitset Glb(Handle<HeapType> type) { return Glb(*type); }
+    static bitset Lub(Handle<HeapType> type) { return Lub(*type); }
   };
 };
 
@@ -127,18 +79,24 @@
 template<class Type, class TypeHandle, class Region>
 class Types {
  public:
-  Types(Region* region, Isolate* isolate) : region_(region) {
+  Types(Region* region, Isolate* isolate)
+      : region_(region), rng_(isolate->random_number_generator()) {
     #define DECLARE_TYPE(name, value) \
       name = Type::name(region); \
       types.push_back(name);
-    BITSET_TYPE_LIST(DECLARE_TYPE)
+    PROPER_BITSET_TYPE_LIST(DECLARE_TYPE)
     #undef DECLARE_TYPE
 
-    object_map = isolate->factory()->NewMap(JS_OBJECT_TYPE, 3 * kPointerSize);
-    array_map = isolate->factory()->NewMap(JS_ARRAY_TYPE, 4 * kPointerSize);
+    object_map = isolate->factory()->NewMap(
+        JS_OBJECT_TYPE, JSObject::kHeaderSize);
+    array_map = isolate->factory()->NewMap(
+        JS_ARRAY_TYPE, JSArray::kSize);
+    number_map = isolate->factory()->NewMap(
+        HEAP_NUMBER_TYPE, HeapNumber::kSize);
     uninitialized_map = isolate->factory()->uninitialized_map();
     ObjectClass = Type::Class(object_map, region);
     ArrayClass = Type::Class(array_map, region);
+    NumberClass = Type::Class(number_map, region);
     UninitializedClass = Type::Class(uninitialized_map, region);
 
     maps.push_back(object_map);
@@ -171,6 +129,17 @@
       types.push_back(Type::Constant(*it, region));
     }
 
+    integers.push_back(isolate->factory()->NewNumber(-V8_INFINITY));
+    integers.push_back(isolate->factory()->NewNumber(+V8_INFINITY));
+    integers.push_back(isolate->factory()->NewNumber(-rng_->NextInt(10)));
+    integers.push_back(isolate->factory()->NewNumber(+rng_->NextInt(10)));
+    for (int i = 0; i < 10; ++i) {
+      double x = rng_->NextInt();
+      integers.push_back(isolate->factory()->NewNumber(x));
+      x *= rng_->NextInt();
+      if (!IsMinusZero(x)) integers.push_back(isolate->factory()->NewNumber(x));
+    }
+
     NumberArray = Type::Array(Number, region);
     StringArray = Type::Array(String, region);
     AnyArray = Type::Array(Any, region);
@@ -180,13 +149,14 @@
     NumberFunction2 = Type::Function(Number, Number, Number, region);
     MethodFunction = Type::Function(String, Object, 0, region);
 
-    for (int i = 0; i < 50; ++i) {
+    for (int i = 0; i < 30; ++i) {
       types.push_back(Fuzz());
     }
   }
 
   Handle<i::Map> object_map;
   Handle<i::Map> array_map;
+  Handle<i::Map> number_map;
   Handle<i::Map> uninitialized_map;
 
   Handle<i::Smi> smi;
@@ -202,6 +172,7 @@
 
   TypeHandle ObjectClass;
   TypeHandle ArrayClass;
+  TypeHandle NumberClass;
   TypeHandle UninitializedClass;
 
   TypeHandle SmiConstant;
@@ -223,9 +194,11 @@
   typedef std::vector<TypeHandle> TypeVector;
   typedef std::vector<Handle<i::Map> > MapVector;
   typedef std::vector<Handle<i::Object> > ValueVector;
+
   TypeVector types;
   MapVector maps;
   ValueVector values;
+  ValueVector integers;  // "Integer" values used for range limits.
 
   TypeHandle Of(Handle<i::Object> value) {
     return Type::Of(value, region_);
@@ -235,12 +208,20 @@
     return Type::NowOf(value, region_);
   }
 
+  TypeHandle Class(Handle<i::Map> map) {
+    return Type::Class(map, region_);
+  }
+
   TypeHandle Constant(Handle<i::Object> value) {
     return Type::Constant(value, region_);
   }
 
-  TypeHandle Class(Handle<i::Map> map) {
-    return Type::Class(map, region_);
+  TypeHandle Range(Handle<i::Object> min, Handle<i::Object> max) {
+    return Type::Range(min, max, region_);
+  }
+
+  TypeHandle Context(TypeHandle outer) {
+    return Type::Context(outer, region_);
   }
 
   TypeHandle Array1(TypeHandle element) {
@@ -274,56 +255,64 @@
   }
 
   TypeHandle Random() {
-    return types[rng_.NextInt(static_cast<int>(types.size()))];
+    return types[rng_->NextInt(static_cast<int>(types.size()))];
   }
 
-  TypeHandle Fuzz(int depth = 5) {
-    switch (rng_.NextInt(depth == 0 ? 3 : 20)) {
+  TypeHandle Fuzz(int depth = 4) {
+    switch (rng_->NextInt(depth == 0 ? 3 : 20)) {
       case 0: {  // bitset
         int n = 0
         #define COUNT_BITSET_TYPES(type, value) + 1
-        BITSET_TYPE_LIST(COUNT_BITSET_TYPES)
+        PROPER_BITSET_TYPE_LIST(COUNT_BITSET_TYPES)
         #undef COUNT_BITSET_TYPES
         ;
-        int i = rng_.NextInt(n);
+        int i = rng_->NextInt(n);
         #define PICK_BITSET_TYPE(type, value) \
           if (i-- == 0) return Type::type(region_);
-        BITSET_TYPE_LIST(PICK_BITSET_TYPE)
+        PROPER_BITSET_TYPE_LIST(PICK_BITSET_TYPE)
         #undef PICK_BITSET_TYPE
         UNREACHABLE();
       }
       case 1: {  // class
-        int i = rng_.NextInt(static_cast<int>(maps.size()));
+        int i = rng_->NextInt(static_cast<int>(maps.size()));
         return Type::Class(maps[i], region_);
       }
       case 2: {  // constant
-        int i = rng_.NextInt(static_cast<int>(values.size()));
+        int i = rng_->NextInt(static_cast<int>(values.size()));
         return Type::Constant(values[i], region_);
       }
-      case 3: {  // context
-        int depth = rng_.NextInt(3);
+      case 3: {  // range
+        int i = rng_->NextInt(static_cast<int>(integers.size()));
+        int j = rng_->NextInt(static_cast<int>(integers.size()));
+        i::Handle<i::Object> min = integers[i];
+        i::Handle<i::Object> max = integers[j];
+        if (min->Number() > max->Number()) std::swap(min, max);
+        return Type::Range(min, max, region_);
+      }
+      case 4: {  // context
+        int depth = rng_->NextInt(3);
         TypeHandle type = Type::Internal(region_);
         for (int i = 0; i < depth; ++i) type = Type::Context(type, region_);
         return type;
       }
-      case 4: {  // array
+      case 5: {  // array
         TypeHandle element = Fuzz(depth / 2);
         return Type::Array(element, region_);
       }
-      case 5:
       case 6:
       case 7: {  // function
         TypeHandle result = Fuzz(depth / 2);
         TypeHandle receiver = Fuzz(depth / 2);
-        int arity = rng_.NextInt(3);
+        int arity = rng_->NextInt(3);
         TypeHandle type = Type::Function(result, receiver, arity, region_);
         for (int i = 0; i < type->AsFunction()->Arity(); ++i) {
-          TypeHandle parameter = Fuzz(depth - 1);
+          TypeHandle parameter = Fuzz(depth / 2);
           type->AsFunction()->InitParameter(i, parameter);
         }
+        return type;
       }
       default: {  // union
-        int n = rng_.NextInt(10);
+        int n = rng_->NextInt(10);
         TypeHandle type = None;
         for (int i = 0; i < n; ++i) {
           TypeHandle operand = Fuzz(depth - 1);
@@ -339,7 +328,7 @@
 
  private:
   Region* region_;
-  RandomNumberGenerator rng_;
+  v8::base::RandomNumberGenerator* rng_;
 };
 
 
@@ -364,25 +353,16 @@
 
   bool Equal(TypeHandle type1, TypeHandle type2) {
     return
-        type1->Is(type2) && type2->Is(type1) &&
-        Rep::IsBitset(type1) == Rep::IsBitset(type2) &&
-        Rep::IsClass(type1) == Rep::IsClass(type2) &&
-        Rep::IsConstant(type1) == Rep::IsConstant(type2) &&
-        Rep::IsContext(type1) == Rep::IsContext(type2) &&
-        Rep::IsArray(type1) == Rep::IsArray(type2) &&
-        Rep::IsFunction(type1) == Rep::IsFunction(type2) &&
-        Rep::IsUnion(type1) == Rep::IsUnion(type2) &&
+        type1->Equals(type2) &&
+        this->IsBitset(type1) == this->IsBitset(type2) &&
+        this->IsUnion(type1) == this->IsUnion(type2) &&
         type1->NumClasses() == type2->NumClasses() &&
         type1->NumConstants() == type2->NumConstants() &&
-        (!Rep::IsBitset(type1) ||
-          Rep::AsBitset(type1) == Rep::AsBitset(type2)) &&
-        (!Rep::IsClass(type1) ||
-          Rep::AsClass(type1) == Rep::AsClass(type2)) &&
-        (!Rep::IsConstant(type1) ||
-          Rep::AsConstant(type1) == Rep::AsConstant(type2)) &&
-          // TODO(rossberg): Check details of arrays, functions, bounds.
-          (!Rep::IsUnion(type1) ||
-          Rep::Length(Rep::AsUnion(type1)) == Rep::Length(Rep::AsUnion(type2)));
+        (!this->IsBitset(type1) ||
+          this->AsBitset(type1) == this->AsBitset(type2)) &&
+        (!this->IsUnion(type1) ||
+          this->Length(this->AsUnion(type1)) ==
+              this->Length(this->AsUnion(type2)));
   }
 
   void CheckEqual(TypeHandle type1, TypeHandle type2) {
@@ -392,36 +372,37 @@
   void CheckSub(TypeHandle type1, TypeHandle type2) {
     CHECK(type1->Is(type2));
     CHECK(!type2->Is(type1));
-    if (Rep::IsBitset(type1) && Rep::IsBitset(type2)) {
-      CHECK_NE(Rep::AsBitset(type1), Rep::AsBitset(type2));
+    if (this->IsBitset(type1) && this->IsBitset(type2)) {
+      CHECK(this->AsBitset(type1) != this->AsBitset(type2));
     }
   }
 
   void CheckUnordered(TypeHandle type1, TypeHandle type2) {
     CHECK(!type1->Is(type2));
     CHECK(!type2->Is(type1));
-    if (Rep::IsBitset(type1) && Rep::IsBitset(type2)) {
-      CHECK_NE(Rep::AsBitset(type1), Rep::AsBitset(type2));
+    if (this->IsBitset(type1) && this->IsBitset(type2)) {
+      CHECK(this->AsBitset(type1) != this->AsBitset(type2));
     }
   }
 
-  void CheckOverlap(TypeHandle type1, TypeHandle type2, TypeHandle mask) {
+  void CheckOverlap(TypeHandle type1, TypeHandle type2) {
     CHECK(type1->Maybe(type2));
     CHECK(type2->Maybe(type1));
-    if (Rep::IsBitset(type1) && Rep::IsBitset(type2)) {
-      CHECK_NE(0,
-          Rep::AsBitset(type1) & Rep::AsBitset(type2) & Rep::AsBitset(mask));
-    }
   }
 
-  void CheckDisjoint(TypeHandle type1, TypeHandle type2, TypeHandle mask) {
+  void CheckDisjoint(TypeHandle type1, TypeHandle type2) {
     CHECK(!type1->Is(type2));
     CHECK(!type2->Is(type1));
     CHECK(!type1->Maybe(type2));
     CHECK(!type2->Maybe(type1));
-    if (Rep::IsBitset(type1) && Rep::IsBitset(type2)) {
-      CHECK_EQ(0,
-          Rep::AsBitset(type1) & Rep::AsBitset(type2) & Rep::AsBitset(mask));
+  }
+
+  void IsSomeType() {
+    for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
+      TypeHandle t = *it;
+      CHECK(1 ==
+          this->IsBitset(t) + t->IsClass() + t->IsConstant() + t->IsRange() +
+          this->IsUnion(t) + t->IsArray() + t->IsFunction() + t->IsContext());
     }
   }
 
@@ -430,8 +411,8 @@
     CHECK(this->IsBitset(T.None));
     CHECK(this->IsBitset(T.Any));
 
-    CHECK_EQ(0, this->AsBitset(T.None));
-    CHECK_EQ(-1, this->AsBitset(T.Any));
+    CHECK(bitset(0) == this->AsBitset(T.None));
+    CHECK(bitset(0xfffffffeu) == this->AsBitset(T.Any));
 
     // Union(T1, T2) is bitset for bitsets T1,T2
     for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
@@ -473,22 +454,23 @@
         TypeHandle type2 = *it2;
         TypeHandle union12 = T.Union(type1, type2);
         if (this->IsBitset(type1) && this->IsBitset(type2)) {
-          CHECK_EQ(
-              this->AsBitset(type1) | this->AsBitset(type2),
+          CHECK(
+              (this->AsBitset(type1) | this->AsBitset(type2)) ==
               this->AsBitset(union12));
         }
       }
     }
 
-    // Intersect(T1, T2) is bitwise conjunction for bitsets T1,T2
+    // Intersect(T1, T2) is bitwise conjunction for bitsets T1,T2 (modulo None)
     for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
       for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
         TypeHandle type1 = *it1;
         TypeHandle type2 = *it2;
         TypeHandle intersect12 = T.Intersect(type1, type2);
         if (this->IsBitset(type1) && this->IsBitset(type2)) {
-          CHECK_EQ(
-              this->AsBitset(type1) & this->AsBitset(type2),
+          bitset bits = this->AsBitset(type1) & this->AsBitset(type2);
+          CHECK(
+              (Rep::BitsetType::IsInhabited(bits) ? bits : 0) ==
               this->AsBitset(intersect12));
         }
       }
@@ -500,7 +482,7 @@
     for (MapIterator mt = T.maps.begin(); mt != T.maps.end(); ++mt) {
       Handle<i::Map> map = *mt;
       TypeHandle type = T.Class(map);
-      CHECK(this->IsClass(type));
+      CHECK(type->IsClass());
     }
 
     // Map attribute
@@ -527,7 +509,7 @@
     for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
       Handle<i::Object> value = *vt;
       TypeHandle type = T.Constant(value);
-      CHECK(this->IsConstant(type));
+      CHECK(type->IsConstant());
     }
 
     // Value attribute
@@ -583,17 +565,66 @@
     CHECK(T.Constant(fac->NewNumber(-10.1))->Is(T.OtherNumber));
     CHECK(T.Constant(fac->NewNumber(10e60))->Is(T.OtherNumber));
     CHECK(T.Constant(fac->NewNumber(-1.0*0.0))->Is(T.MinusZero));
-    CHECK(T.Constant(fac->NewNumber(OS::nan_value()))->Is(T.NaN));
+    CHECK(T.Constant(fac->NewNumber(v8::base::OS::nan_value()))->Is(T.NaN));
     CHECK(T.Constant(fac->NewNumber(V8_INFINITY))->Is(T.OtherNumber));
     CHECK(T.Constant(fac->NewNumber(-V8_INFINITY))->Is(T.OtherNumber));
   }
 
+  void Range() {
+    // Constructor
+    for (ValueIterator i = T.integers.begin(); i != T.integers.end(); ++i) {
+      for (ValueIterator j = T.integers.begin(); j != T.integers.end(); ++j) {
+        i::Handle<i::Object> min = *i;
+        i::Handle<i::Object> max = *j;
+        if (min->Number() > max->Number()) std::swap(min, max);
+        TypeHandle type = T.Range(min, max);
+        CHECK(type->IsRange());
+      }
+    }
+
+    // Range attributes
+    for (ValueIterator i = T.integers.begin(); i != T.integers.end(); ++i) {
+      for (ValueIterator j = T.integers.begin(); j != T.integers.end(); ++j) {
+        i::Handle<i::Object> min = *i;
+        i::Handle<i::Object> max = *j;
+        if (min->Number() > max->Number()) std::swap(min, max);
+        TypeHandle type = T.Range(min, max);
+        CHECK(*min == *type->AsRange()->Min());
+        CHECK(*max == *type->AsRange()->Max());
+      }
+    }
+
+    // Functionality & Injectivity:
+    // Range(min1, max1) = Range(min2, max2) <=> min1 = min2 /\ max1 = max2
+    for (ValueIterator i1 = T.integers.begin();
+        i1 != T.integers.end(); ++i1) {
+      for (ValueIterator j1 = T.integers.begin();
+          j1 != T.integers.end(); ++j1) {
+        for (ValueIterator i2 = T.integers.begin();
+            i2 != T.integers.end(); ++i2) {
+          for (ValueIterator j2 = T.integers.begin();
+              j2 != T.integers.end(); ++j2) {
+            i::Handle<i::Object> min1 = *i1;
+            i::Handle<i::Object> max1 = *j1;
+            i::Handle<i::Object> min2 = *i2;
+            i::Handle<i::Object> max2 = *j2;
+            if (min1->Number() > max1->Number()) std::swap(min1, max1);
+            if (min2->Number() > max2->Number()) std::swap(min2, max2);
+            TypeHandle type1 = T.Range(min1, max1);
+            TypeHandle type2 = T.Range(min2, max2);
+            CHECK(Equal(type1, type2) == (*min1 == *min2 && *max1 == *max2));
+          }
+        }
+      }
+    }
+  }
+
   void Array() {
     // Constructor
     for (int i = 0; i < 20; ++i) {
       TypeHandle type = T.Random();
       TypeHandle array = T.Array1(type);
-      CHECK(this->IsArray(array));
+      CHECK(array->IsArray());
     }
 
     // Attributes
@@ -693,15 +724,26 @@
       CHECK(const_type->Is(of_type));
     }
 
-    // Constant(V)->Is(T) iff Of(V)->Is(T) or T->Maybe(Constant(V))
+    // If Of(V)->Is(T), then Constant(V)->Is(T)
     for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
       for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
         Handle<i::Object> value = *vt;
         TypeHandle type = *it;
         TypeHandle const_type = T.Constant(value);
         TypeHandle of_type = T.Of(value);
-        CHECK(const_type->Is(type) ==
-              (of_type->Is(type) || type->Maybe(const_type)));
+        CHECK(!of_type->Is(type) || const_type->Is(type));
+      }
+    }
+
+    // If Constant(V)->Is(T), then Of(V)->Is(T) or T->Maybe(Constant(V))
+    for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
+      for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
+        Handle<i::Object> value = *vt;
+        TypeHandle type = *it;
+        TypeHandle const_type = T.Constant(value);
+        TypeHandle of_type = T.Of(value);
+        CHECK(!const_type->Is(type) ||
+              of_type->Is(type) || type->Maybe(const_type));
       }
     }
   }
@@ -723,19 +765,32 @@
       CHECK(nowof_type->Is(of_type));
     }
 
-    // Constant(V)->NowIs(T) iff NowOf(V)->NowIs(T) or T->Maybe(Constant(V))
+    // If NowOf(V)->NowIs(T), then Constant(V)->NowIs(T)
     for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
       for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
         Handle<i::Object> value = *vt;
         TypeHandle type = *it;
         TypeHandle const_type = T.Constant(value);
         TypeHandle nowof_type = T.NowOf(value);
-        CHECK(const_type->NowIs(type) ==
-              (nowof_type->NowIs(type) || type->Maybe(const_type)));
+        CHECK(!nowof_type->NowIs(type) || const_type->NowIs(type));
       }
     }
 
-    // Constant(V)->Is(T) implies NowOf(V)->Is(T) or T->Maybe(Constant(V))
+    // If Constant(V)->NowIs(T),
+    // then NowOf(V)->NowIs(T) or T->Maybe(Constant(V))
+    for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
+      for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
+        Handle<i::Object> value = *vt;
+        TypeHandle type = *it;
+        TypeHandle const_type = T.Constant(value);
+        TypeHandle nowof_type = T.NowOf(value);
+        CHECK(!const_type->NowIs(type) ||
+              nowof_type->NowIs(type) || type->Maybe(const_type));
+      }
+    }
+
+    // If Constant(V)->Is(T),
+    // then NowOf(V)->Is(T) or T->Maybe(Constant(V))
     for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
       for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
         Handle<i::Object> value = *vt;
@@ -743,23 +798,13 @@
         TypeHandle const_type = T.Constant(value);
         TypeHandle nowof_type = T.NowOf(value);
         CHECK(!const_type->Is(type) ||
-              (nowof_type->Is(type) || type->Maybe(const_type)));
+              nowof_type->Is(type) || type->Maybe(const_type));
       }
     }
   }
 
-  void Bounds() {
-    // Ordering: (T->BitsetGlb())->Is(T->BitsetLub())
-    for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
-      TypeHandle type = *it;
-      TypeHandle glb =
-          Rep::BitsetType::New(Rep::BitsetType::Glb(type), T.region());
-      TypeHandle lub =
-          Rep::BitsetType::New(Rep::BitsetType::Lub(type), T.region());
-      CHECK(glb->Is(lub));
-    }
-
-    // Lower bound: (T->BitsetGlb())->Is(T)
+  void BitsetGlb() {
+    // Lower: (T->BitsetGlb())->Is(T)
     for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
       TypeHandle type = *it;
       TypeHandle glb =
@@ -767,7 +812,33 @@
       CHECK(glb->Is(type));
     }
 
-    // Upper bound: T->Is(T->BitsetLub())
+    // Greatest: If T1->IsBitset() and T1->Is(T2), then T1->Is(T2->BitsetGlb())
+    for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+      for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
+        TypeHandle type1 = *it1;
+        TypeHandle type2 = *it2;
+        TypeHandle glb2 =
+            Rep::BitsetType::New(Rep::BitsetType::Glb(type2), T.region());
+        CHECK(!this->IsBitset(type1) || !type1->Is(type2) || type1->Is(glb2));
+      }
+    }
+
+    // Monotonicity: T1->Is(T2) implies (T1->BitsetGlb())->Is(T2->BitsetGlb())
+    for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+      for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
+        TypeHandle type1 = *it1;
+        TypeHandle type2 = *it2;
+        TypeHandle glb1 =
+            Rep::BitsetType::New(Rep::BitsetType::Glb(type1), T.region());
+        TypeHandle glb2 =
+            Rep::BitsetType::New(Rep::BitsetType::Glb(type2), T.region());
+        CHECK(!type1->Is(type2) || glb1->Is(glb2));
+      }
+    }
+  }
+
+  void BitsetLub() {
+    // Upper: T->Is(T->BitsetLub())
     for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
       TypeHandle type = *it;
       TypeHandle lub =
@@ -775,14 +846,28 @@
       CHECK(type->Is(lub));
     }
 
-    // Inherent bound: (T->BitsetLub())->Is(T->InherentBitsetLub())
-    for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
-      TypeHandle type = *it;
-      TypeHandle lub =
-          Rep::BitsetType::New(Rep::BitsetType::Lub(type), T.region());
-      TypeHandle inherent =
-          Rep::BitsetType::New(Rep::BitsetType::InherentLub(type), T.region());
-      CHECK(lub->Is(inherent));
+    // Least: If T2->IsBitset() and T1->Is(T2), then (T1->BitsetLub())->Is(T2)
+    for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+      for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
+        TypeHandle type1 = *it1;
+        TypeHandle type2 = *it2;
+        TypeHandle lub1 =
+            Rep::BitsetType::New(Rep::BitsetType::Lub(type1), T.region());
+        CHECK(!this->IsBitset(type2) || !type1->Is(type2) || lub1->Is(type2));
+      }
+    }
+
+    // Monotonicity: T1->Is(T2) implies (T1->BitsetLub())->Is(T2->BitsetLub())
+    for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+      for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
+        TypeHandle type1 = *it1;
+        TypeHandle type2 = *it2;
+        TypeHandle lub1 =
+            Rep::BitsetType::New(Rep::BitsetType::Lub(type1), T.region());
+        TypeHandle lub2 =
+            Rep::BitsetType::New(Rep::BitsetType::Lub(type2), T.region());
+        CHECK(!type1->Is(type2) || lub1->Is(lub2));
+      }
     }
   }
 
@@ -838,17 +923,6 @@
       }
     }
 
-    // Constant(V1)->Is(Constant(V2)) iff V1 = V2
-    for (ValueIterator vt1 = T.values.begin(); vt1 != T.values.end(); ++vt1) {
-      for (ValueIterator vt2 = T.values.begin(); vt2 != T.values.end(); ++vt2) {
-        Handle<i::Object> value1 = *vt1;
-        Handle<i::Object> value2 = *vt2;
-        TypeHandle const_type1 = T.Constant(value1);
-        TypeHandle const_type2 = T.Constant(value2);
-        CHECK(const_type1->Is(const_type2) == (*value1 == *value2));
-      }
-    }
-
     // Class(M1)->Is(Class(M2)) iff M1 = M2
     for (MapIterator mt1 = T.maps.begin(); mt1 != T.maps.end(); ++mt1) {
       for (MapIterator mt2 = T.maps.begin(); mt2 != T.maps.end(); ++mt2) {
@@ -860,25 +934,94 @@
       }
     }
 
-    // Constant(V)->Is(Class(M)) never
-    for (MapIterator mt = T.maps.begin(); mt != T.maps.end(); ++mt) {
-      for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
-        Handle<i::Map> map = *mt;
-        Handle<i::Object> value = *vt;
-        TypeHandle constant_type = T.Constant(value);
-        TypeHandle class_type = T.Class(map);
-        CHECK(!constant_type->Is(class_type));
+    // Constant(V1)->Is(Constant(V2)) iff V1 = V2
+    for (ValueIterator vt1 = T.values.begin(); vt1 != T.values.end(); ++vt1) {
+      for (ValueIterator vt2 = T.values.begin(); vt2 != T.values.end(); ++vt2) {
+        Handle<i::Object> value1 = *vt1;
+        Handle<i::Object> value2 = *vt2;
+        TypeHandle const_type1 = T.Constant(value1);
+        TypeHandle const_type2 = T.Constant(value2);
+        CHECK(const_type1->Is(const_type2) == (*value1 == *value2));
       }
     }
 
-    // Class(M)->Is(Constant(V)) never
-    for (MapIterator mt = T.maps.begin(); mt != T.maps.end(); ++mt) {
-      for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
-        Handle<i::Map> map = *mt;
-        Handle<i::Object> value = *vt;
-        TypeHandle constant_type = T.Constant(value);
-        TypeHandle class_type = T.Class(map);
-        CHECK(!class_type->Is(constant_type));
+    // Range(min1, max1)->Is(Range(min2, max2)) iff
+    // min1 >= min2 /\ max1 <= max2
+    for (ValueIterator i1 = T.integers.begin();
+        i1 != T.integers.end(); ++i1) {
+      for (ValueIterator j1 = T.integers.begin();
+          j1 != T.integers.end(); ++j1) {
+        for (ValueIterator i2 = T.integers.begin();
+             i2 != T.integers.end(); ++i2) {
+          for (ValueIterator j2 = T.integers.begin();
+               j2 != T.integers.end(); ++j2) {
+            i::Handle<i::Object> min1 = *i1;
+            i::Handle<i::Object> max1 = *j1;
+            i::Handle<i::Object> min2 = *i2;
+            i::Handle<i::Object> max2 = *j2;
+            if (min1->Number() > max1->Number()) std::swap(min1, max1);
+            if (min2->Number() > max2->Number()) std::swap(min2, max2);
+            TypeHandle type1 = T.Range(min1, max1);
+            TypeHandle type2 = T.Range(min2, max2);
+            CHECK(type1->Is(type2) ==
+                (min2->Number() <= min1->Number() &&
+                 max1->Number() <= max2->Number()));
+          }
+        }
+      }
+    }
+
+    // Context(T1)->Is(Context(T2)) iff T1 = T2
+    for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+      for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
+        TypeHandle outer1 = *it1;
+        TypeHandle outer2 = *it2;
+        TypeHandle type1 = T.Context(outer1);
+        TypeHandle type2 = T.Context(outer2);
+        CHECK(type1->Is(type2) == outer1->Equals(outer2));
+      }
+    }
+
+    // Array(T1)->Is(Array(T2)) iff T1 = T2
+    for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+      for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
+        TypeHandle element1 = *it1;
+        TypeHandle element2 = *it2;
+        TypeHandle type1 = T.Array1(element1);
+        TypeHandle type2 = T.Array1(element2);
+        CHECK(type1->Is(type2) == element1->Equals(element2));
+      }
+    }
+
+    // Function0(S1, T1)->Is(Function0(S2, T2)) iff S1 = S2 and T1 = T2
+    for (TypeIterator i = T.types.begin(); i != T.types.end(); ++i) {
+      for (TypeIterator j = T.types.begin(); j != T.types.end(); ++j) {
+        TypeHandle result1 = *i;
+        TypeHandle receiver1 = *j;
+        TypeHandle type1 = T.Function0(result1, receiver1);
+        TypeHandle result2 = T.Random();
+        TypeHandle receiver2 = T.Random();
+        TypeHandle type2 = T.Function0(result2, receiver2);
+        CHECK(type1->Is(type2) ==
+            (result1->Equals(result2) && receiver1->Equals(receiver2)));
+      }
+    }
+
+    // (In-)Compatibilities.
+    for (TypeIterator i = T.types.begin(); i != T.types.end(); ++i) {
+      for (TypeIterator j = T.types.begin(); j != T.types.end(); ++j) {
+        TypeHandle type1 = *i;
+        TypeHandle type2 = *j;
+        CHECK(!type1->Is(type2) || this->IsBitset(type2) ||
+              this->IsUnion(type2) || this->IsUnion(type1) ||
+              (type1->IsClass() && type2->IsClass()) ||
+              (type1->IsConstant() && type2->IsConstant()) ||
+              (type1->IsConstant() && type2->IsRange()) ||
+              (type1->IsRange() && type2->IsRange()) ||
+              (type1->IsContext() && type2->IsContext()) ||
+              (type1->IsArray() && type2->IsArray()) ||
+              (type1->IsFunction() && type2->IsFunction()) ||
+              type1->Equals(T.None));
       }
     }
 
@@ -1069,16 +1212,6 @@
         CHECK(type->Contains(value) == const_type->Is(type));
       }
     }
-
-    // Of(V)->Is(T) implies T->Contains(V)
-    for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
-      for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
-        TypeHandle type = *it;
-        Handle<i::Object> value = *vt;
-        TypeHandle of_type = T.Of(value);
-        CHECK(!of_type->Is(type) || type->Contains(value));
-      }
-    }
   }
 
   void NowContains() {
@@ -1110,16 +1243,6 @@
         CHECK(!nowof_type->NowIs(type) || type->NowContains(value));
       }
     }
-
-    // NowOf(V)->NowIs(T) implies T->NowContains(V)
-    for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
-      for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
-        TypeHandle type = *it;
-        Handle<i::Object> value = *vt;
-        TypeHandle nowof_type = T.Of(value);
-        CHECK(!nowof_type->NowIs(type) || type->NowContains(value));
-      }
-    }
   }
 
   void Maybe() {
@@ -1203,6 +1326,8 @@
     }
 
     // Constant(V)->Maybe(Class(M)) never
+    // This does NOT hold!
+    /*
     for (MapIterator mt = T.maps.begin(); mt != T.maps.end(); ++mt) {
       for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
         Handle<i::Map> map = *mt;
@@ -1212,8 +1337,11 @@
         CHECK(!const_type->Maybe(class_type));
       }
     }
+    */
 
     // Class(M)->Maybe(Constant(V)) never
+    // This does NOT hold!
+    /*
     for (MapIterator mt = T.maps.begin(); mt != T.maps.end(); ++mt) {
       for (ValueIterator vt = T.values.begin(); vt != T.values.end(); ++vt) {
         Handle<i::Map> map = *mt;
@@ -1223,67 +1351,62 @@
         CHECK(!class_type->Maybe(const_type));
       }
     }
+    */
 
     // Basic types
-    CheckDisjoint(T.Boolean, T.Null, T.Semantic);
-    CheckDisjoint(T.Undefined, T.Null, T.Semantic);
-    CheckDisjoint(T.Boolean, T.Undefined, T.Semantic);
-
-    CheckOverlap(T.SignedSmall, T.Number, T.Semantic);
-    CheckOverlap(T.NaN, T.Number, T.Semantic);
-    CheckDisjoint(T.Signed32, T.NaN, T.Semantic);
-
-    CheckOverlap(T.UniqueName, T.Name, T.Semantic);
-    CheckOverlap(T.String, T.Name, T.Semantic);
-    CheckOverlap(T.InternalizedString, T.String, T.Semantic);
-    CheckOverlap(T.InternalizedString, T.UniqueName, T.Semantic);
-    CheckOverlap(T.InternalizedString, T.Name, T.Semantic);
-    CheckOverlap(T.Symbol, T.UniqueName, T.Semantic);
-    CheckOverlap(T.Symbol, T.Name, T.Semantic);
-    CheckOverlap(T.String, T.UniqueName, T.Semantic);
-    CheckDisjoint(T.String, T.Symbol, T.Semantic);
-    CheckDisjoint(T.InternalizedString, T.Symbol, T.Semantic);
-
-    CheckOverlap(T.Object, T.Receiver, T.Semantic);
-    CheckOverlap(T.Array, T.Object, T.Semantic);
-    CheckOverlap(T.Function, T.Object, T.Semantic);
-    CheckOverlap(T.Proxy, T.Receiver, T.Semantic);
-    CheckDisjoint(T.Object, T.Proxy, T.Semantic);
-    CheckDisjoint(T.Array, T.Function, T.Semantic);
+    CheckDisjoint(T.Boolean, T.Null);
+    CheckDisjoint(T.Undefined, T.Null);
+    CheckDisjoint(T.Boolean, T.Undefined);
+    CheckOverlap(T.SignedSmall, T.Number);
+    CheckOverlap(T.NaN, T.Number);
+    CheckDisjoint(T.Signed32, T.NaN);
+    CheckOverlap(T.UniqueName, T.Name);
+    CheckOverlap(T.String, T.Name);
+    CheckOverlap(T.InternalizedString, T.String);
+    CheckOverlap(T.InternalizedString, T.UniqueName);
+    CheckOverlap(T.InternalizedString, T.Name);
+    CheckOverlap(T.Symbol, T.UniqueName);
+    CheckOverlap(T.Symbol, T.Name);
+    CheckOverlap(T.String, T.UniqueName);
+    CheckDisjoint(T.String, T.Symbol);
+    CheckDisjoint(T.InternalizedString, T.Symbol);
+    CheckOverlap(T.Object, T.Receiver);
+    CheckOverlap(T.Array, T.Object);
+    CheckOverlap(T.Function, T.Object);
+    CheckOverlap(T.Proxy, T.Receiver);
+    CheckDisjoint(T.Object, T.Proxy);
+    CheckDisjoint(T.Array, T.Function);
 
     // Structural types
-    CheckOverlap(T.ObjectClass, T.Object, T.Semantic);
-    CheckOverlap(T.ArrayClass, T.Object, T.Semantic);
-    CheckOverlap(T.ObjectClass, T.ObjectClass, T.Semantic);
-    CheckOverlap(T.ArrayClass, T.ArrayClass, T.Semantic);
-    CheckDisjoint(T.ObjectClass, T.ArrayClass, T.Semantic);
-
-    CheckOverlap(T.SmiConstant, T.SignedSmall, T.Semantic);
-    CheckOverlap(T.SmiConstant, T.Signed32, T.Semantic);
-    CheckOverlap(T.SmiConstant, T.Number, T.Semantic);
-    CheckOverlap(T.ObjectConstant1, T.Object, T.Semantic);
-    CheckOverlap(T.ObjectConstant2, T.Object, T.Semantic);
-    CheckOverlap(T.ArrayConstant, T.Object, T.Semantic);
-    CheckOverlap(T.ArrayConstant, T.Array, T.Semantic);
-    CheckOverlap(T.ObjectConstant1, T.ObjectConstant1, T.Semantic);
-    CheckDisjoint(T.ObjectConstant1, T.ObjectConstant2, T.Semantic);
-    CheckDisjoint(T.ObjectConstant1, T.ArrayConstant, T.Semantic);
-
-    CheckDisjoint(T.ObjectConstant1, T.ObjectClass, T.Semantic);
-    CheckDisjoint(T.ObjectConstant2, T.ObjectClass, T.Semantic);
-    CheckDisjoint(T.ObjectConstant1, T.ArrayClass, T.Semantic);
-    CheckDisjoint(T.ObjectConstant2, T.ArrayClass, T.Semantic);
-    CheckDisjoint(T.ArrayConstant, T.ObjectClass, T.Semantic);
-
-    CheckOverlap(T.NumberArray, T.Array, T.Semantic);
-    CheckDisjoint(T.NumberArray, T.AnyArray, T.Semantic);
-    CheckDisjoint(T.NumberArray, T.StringArray, T.Semantic);
-
-    CheckOverlap(T.MethodFunction, T.Function, T.Semantic);
-    CheckDisjoint(T.SignedFunction1, T.NumberFunction1, T.Semantic);
-    CheckDisjoint(T.SignedFunction1, T.NumberFunction2, T.Semantic);
-    CheckDisjoint(T.NumberFunction1, T.NumberFunction2, T.Semantic);
-    CheckDisjoint(T.SignedFunction1, T.MethodFunction, T.Semantic);
+    CheckOverlap(T.ObjectClass, T.Object);
+    CheckOverlap(T.ArrayClass, T.Object);
+    CheckOverlap(T.ObjectClass, T.ObjectClass);
+    CheckOverlap(T.ArrayClass, T.ArrayClass);
+    CheckDisjoint(T.ObjectClass, T.ArrayClass);
+    CheckOverlap(T.SmiConstant, T.SignedSmall);
+    CheckOverlap(T.SmiConstant, T.Signed32);
+    CheckOverlap(T.SmiConstant, T.Number);
+    CheckOverlap(T.ObjectConstant1, T.Object);
+    CheckOverlap(T.ObjectConstant2, T.Object);
+    CheckOverlap(T.ArrayConstant, T.Object);
+    CheckOverlap(T.ArrayConstant, T.Array);
+    CheckOverlap(T.ObjectConstant1, T.ObjectConstant1);
+    CheckDisjoint(T.ObjectConstant1, T.ObjectConstant2);
+    CheckDisjoint(T.ObjectConstant1, T.ArrayConstant);
+    CheckDisjoint(T.ObjectConstant1, T.ArrayClass);
+    CheckDisjoint(T.ObjectConstant2, T.ArrayClass);
+    CheckDisjoint(T.ArrayConstant, T.ObjectClass);
+    CheckOverlap(T.NumberArray, T.Array);
+    CheckDisjoint(T.NumberArray, T.AnyArray);
+    CheckDisjoint(T.NumberArray, T.StringArray);
+    CheckOverlap(T.MethodFunction, T.Function);
+    CheckDisjoint(T.SignedFunction1, T.NumberFunction1);
+    CheckDisjoint(T.SignedFunction1, T.NumberFunction2);
+    CheckDisjoint(T.NumberFunction1, T.NumberFunction2);
+    CheckDisjoint(T.SignedFunction1, T.MethodFunction);
+    CheckOverlap(T.ObjectConstant1, T.ObjectClass);  // !!!
+    CheckOverlap(T.ObjectConstant2, T.ObjectClass);  // !!!
+    CheckOverlap(T.NumberClass, T.Intersect(T.Number, T.Untagged));  // !!!
   }
 
   void Union1() {
@@ -1320,6 +1443,8 @@
     }
 
     // Associativity: Union(T1, Union(T2, T3)) = Union(Union(T1, T2), T3)
+    // This does NOT hold!
+    /*
     for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
       for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
         for (TypeIterator it3 = T.types.begin(); it3 != T.types.end(); ++it3) {
@@ -1334,6 +1459,7 @@
         }
       }
     }
+    */
 
     // Meet: T1->Is(Union(T1, T2)) and T2->Is(Union(T1, T2))
     for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
@@ -1355,10 +1481,10 @@
         if (type1->Is(type2)) CheckEqual(union12, type2);
       }
     }
-  }
 
-  void Union2() {
     // Monotonicity: T1->Is(T2) implies Union(T1, T3)->Is(Union(T2, T3))
+    // This does NOT hold.
+    /*
     for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
       for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
         for (TypeIterator it3 = T.types.begin(); it3 != T.types.end(); ++it3) {
@@ -1371,8 +1497,14 @@
         }
       }
     }
+    */
+  }
 
+  void Union2() {
     // Monotonicity: T1->Is(T3) and T2->Is(T3) implies Union(T1, T2)->Is(T3)
+    // This does NOT hold.  TODO(neis): Could fix this by splitting
+    // OtherNumber into a negative and a positive part.
+    /*
     for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
       for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
         for (TypeIterator it3 = T.types.begin(); it3 != T.types.end(); ++it3) {
@@ -1384,7 +1516,10 @@
         }
       }
     }
+    */
+  }
 
+  void Union3() {
     // Monotonicity: T1->Is(T2) or T1->Is(T3) implies T1->Is(Union(T2, T3))
     for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
       for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
@@ -1397,12 +1532,14 @@
         }
       }
     }
+  }
 
+  void Union4() {
     // Class-class
     CheckSub(T.Union(T.ObjectClass, T.ArrayClass), T.Object);
     CheckUnordered(T.Union(T.ObjectClass, T.ArrayClass), T.Array);
-    CheckOverlap(T.Union(T.ObjectClass, T.ArrayClass), T.Array, T.Semantic);
-    CheckDisjoint(T.Union(T.ObjectClass, T.ArrayClass), T.Number, T.Semantic);
+    CheckOverlap(T.Union(T.ObjectClass, T.ArrayClass), T.Array);
+    CheckDisjoint(T.Union(T.ObjectClass, T.ArrayClass), T.Number);
 
     // Constant-constant
     CheckSub(T.Union(T.ObjectConstant1, T.ObjectConstant2), T.Object);
@@ -1410,11 +1547,11 @@
     CheckUnordered(
         T.Union(T.ObjectConstant1, T.ObjectConstant2), T.ObjectClass);
     CheckOverlap(
-        T.Union(T.ObjectConstant1, T.ArrayConstant), T.Array, T.Semantic);
+        T.Union(T.ObjectConstant1, T.ArrayConstant), T.Array);
     CheckDisjoint(
-        T.Union(T.ObjectConstant1, T.ArrayConstant), T.Number, T.Semantic);
-    CheckDisjoint(
-        T.Union(T.ObjectConstant1, T.ArrayConstant), T.ObjectClass, T.Semantic);
+        T.Union(T.ObjectConstant1, T.ArrayConstant), T.Number);
+    CheckOverlap(
+        T.Union(T.ObjectConstant1, T.ArrayConstant), T.ObjectClass);  // !!!
 
     // Bitset-array
     CHECK(this->IsBitset(T.Union(T.AnyArray, T.Array)));
@@ -1422,8 +1559,8 @@
 
     CheckEqual(T.Union(T.AnyArray, T.Array), T.Array);
     CheckUnordered(T.Union(T.AnyArray, T.String), T.Array);
-    CheckOverlap(T.Union(T.NumberArray, T.String), T.Object, T.Semantic);
-    CheckDisjoint(T.Union(T.NumberArray, T.String), T.Number, T.Semantic);
+    CheckOverlap(T.Union(T.NumberArray, T.String), T.Object);
+    CheckDisjoint(T.Union(T.NumberArray, T.String), T.Number);
 
     // Bitset-function
     CHECK(this->IsBitset(T.Union(T.MethodFunction, T.Function)));
@@ -1431,24 +1568,24 @@
 
     CheckEqual(T.Union(T.MethodFunction, T.Function), T.Function);
     CheckUnordered(T.Union(T.NumberFunction1, T.String), T.Function);
-    CheckOverlap(T.Union(T.NumberFunction2, T.String), T.Object, T.Semantic);
-    CheckDisjoint(T.Union(T.NumberFunction1, T.String), T.Number, T.Semantic);
+    CheckOverlap(T.Union(T.NumberFunction2, T.String), T.Object);
+    CheckDisjoint(T.Union(T.NumberFunction1, T.String), T.Number);
 
     // Bitset-class
     CheckSub(
         T.Union(T.ObjectClass, T.SignedSmall), T.Union(T.Object, T.Number));
     CheckSub(T.Union(T.ObjectClass, T.Array), T.Object);
     CheckUnordered(T.Union(T.ObjectClass, T.String), T.Array);
-    CheckOverlap(T.Union(T.ObjectClass, T.String), T.Object, T.Semantic);
-    CheckDisjoint(T.Union(T.ObjectClass, T.String), T.Number, T.Semantic);
+    CheckOverlap(T.Union(T.ObjectClass, T.String), T.Object);
+    CheckDisjoint(T.Union(T.ObjectClass, T.String), T.Number);
 
     // Bitset-constant
     CheckSub(
         T.Union(T.ObjectConstant1, T.Signed32), T.Union(T.Object, T.Number));
     CheckSub(T.Union(T.ObjectConstant1, T.Array), T.Object);
     CheckUnordered(T.Union(T.ObjectConstant1, T.String), T.Array);
-    CheckOverlap(T.Union(T.ObjectConstant1, T.String), T.Object, T.Semantic);
-    CheckDisjoint(T.Union(T.ObjectConstant1, T.String), T.Number, T.Semantic);
+    CheckOverlap(T.Union(T.ObjectConstant1, T.String), T.Object);
+    CheckDisjoint(T.Union(T.ObjectConstant1, T.String), T.Number);
 
     // Class-constant
     CheckSub(T.Union(T.ObjectConstant1, T.ArrayClass), T.Object);
@@ -1457,10 +1594,9 @@
         T.Union(T.ObjectConstant1, T.ArrayClass), T.Union(T.Array, T.Object));
     CheckUnordered(T.Union(T.ObjectConstant1, T.ArrayClass), T.ArrayConstant);
     CheckDisjoint(
-        T.Union(T.ObjectConstant1, T.ArrayClass), T.ObjectConstant2,
-        T.Semantic);
-    CheckDisjoint(
-        T.Union(T.ObjectConstant1, T.ArrayClass), T.ObjectClass, T.Semantic);
+        T.Union(T.ObjectConstant1, T.ArrayClass), T.ObjectConstant2);
+    CheckOverlap(
+        T.Union(T.ObjectConstant1, T.ArrayClass), T.ObjectClass);  // !!!
 
     // Bitset-union
     CheckSub(
@@ -1514,7 +1650,7 @@
         T.Union(T.Number, T.Array));
   }
 
-  void Intersect1() {
+  void Intersect() {
     // Identity: Intersect(T, Any) = T
     for (TypeIterator it = T.types.begin(); it != T.types.end(); ++it) {
       TypeHandle type = *it;
@@ -1549,6 +1685,8 @@
 
     // Associativity:
     // Intersect(T1, Intersect(T2, T3)) = Intersect(Intersect(T1, T2), T3)
+    // This does NOT hold.
+    /*
     for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
       for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
         for (TypeIterator it3 = T.types.begin(); it3 != T.types.end(); ++it3) {
@@ -1563,8 +1701,11 @@
         }
       }
     }
+    */
 
     // Join: Intersect(T1, T2)->Is(T1) and Intersect(T1, T2)->Is(T2)
+    // This does NOT hold.  Not even the disjunction.
+    /*
     for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
       for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
         TypeHandle type1 = *it1;
@@ -1574,6 +1715,7 @@
         CHECK(intersect12->Is(type2));
       }
     }
+    */
 
     // Lower Boundedness: T1->Is(T2) implies Intersect(T1, T2) = T1
     for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
@@ -1584,10 +1726,10 @@
         if (type1->Is(type2)) CheckEqual(intersect12, type1);
       }
     }
-  }
 
-  void Intersect2() {
     // Monotonicity: T1->Is(T2) implies Intersect(T1, T3)->Is(Intersect(T2, T3))
+    // This does NOT hold.
+    /*
     for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
       for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
         for (TypeIterator it3 = T.types.begin(); it3 != T.types.end(); ++it3) {
@@ -1600,8 +1742,11 @@
         }
       }
     }
+    */
 
     // Monotonicity: T1->Is(T3) or T2->Is(T3) implies Intersect(T1, T2)->Is(T3)
+    // This does NOT hold.
+    /*
     for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
       for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
         for (TypeIterator it3 = T.types.begin(); it3 != T.types.end(); ++it3) {
@@ -1614,8 +1759,11 @@
         }
       }
     }
+    */
 
     // Monotonicity: T1->Is(T2) and T1->Is(T3) implies T1->Is(Intersect(T2, T3))
+    // This does NOT hold.
+    /*
     for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
       for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
         for (TypeIterator it3 = T.types.begin(); it3 != T.types.end(); ++it3) {
@@ -1628,19 +1776,20 @@
         }
       }
     }
+    */
 
     // Bitset-class
     CheckEqual(T.Intersect(T.ObjectClass, T.Object), T.ObjectClass);
-    CheckSub(T.Intersect(T.ObjectClass, T.Array), T.Representation);
-    CheckSub(T.Intersect(T.ObjectClass, T.Number), T.Representation);
+    CheckEqual(T.Intersect(T.ObjectClass, T.Array), T.None);
+    CheckEqual(T.Intersect(T.ObjectClass, T.Number), T.None);
 
     // Bitset-array
     CheckEqual(T.Intersect(T.NumberArray, T.Object), T.NumberArray);
-    CheckSub(T.Intersect(T.AnyArray, T.Function), T.Representation);
+    CheckEqual(T.Intersect(T.AnyArray, T.Function), T.None);
 
     // Bitset-function
     CheckEqual(T.Intersect(T.MethodFunction, T.Object), T.MethodFunction);
-    CheckSub(T.Intersect(T.NumberFunction1, T.Array), T.Representation);
+    CheckEqual(T.Intersect(T.NumberFunction1, T.Array), T.None);
 
     // Bitset-union
     CheckEqual(
@@ -1651,7 +1800,7 @@
             ->IsInhabited());
 
     // Class-constant
-    CHECK(!T.Intersect(T.ObjectConstant1, T.ObjectClass)->IsInhabited());
+    CHECK(T.Intersect(T.ObjectConstant1, T.ObjectClass)->IsInhabited());  // !!!
     CHECK(!T.Intersect(T.ArrayClass, T.ObjectConstant2)->IsInhabited());
 
     // Array-union
@@ -1684,8 +1833,8 @@
         T.Intersect(T.ArrayClass, T.Union(T.Object, T.SmiConstant)),
         T.ArrayClass);
     CHECK(
-        !T.Intersect(T.Union(T.ObjectClass, T.ArrayConstant), T.ArrayClass)
-            ->IsInhabited());
+        T.Intersect(T.Union(T.ObjectClass, T.ArrayConstant), T.ArrayClass)
+            ->IsInhabited());  // !!!
 
     // Constant-union
     CheckEqual(
@@ -1696,9 +1845,9 @@
         T.Intersect(T.SmiConstant, T.Union(T.Number, T.ObjectConstant2)),
         T.SmiConstant);
     CHECK(
-        !T.Intersect(
+        T.Intersect(
             T.Union(T.ArrayConstant, T.ObjectClass), T.ObjectConstant1)
-                ->IsInhabited());
+                ->IsInhabited());  // !!!
 
     // Union-union
     CheckEqual(
@@ -1719,16 +1868,20 @@
     CheckEqual(
         T.Intersect(
             T.Union(
-                T.Union(T.ObjectConstant2, T.ObjectConstant1), T.ArrayClass),
+                T.ArrayClass,
+                T.Union(T.ObjectConstant2, T.ObjectConstant1)),
             T.Union(
                 T.ObjectConstant1,
                 T.Union(T.ArrayConstant, T.ObjectConstant2))),
-        T.Union(T.ObjectConstant2, T.ObjectConstant1));
+        T.Union(
+            T.ArrayConstant,
+            T.Union(T.ObjectConstant2, T.ObjectConstant1)));  // !!!
   }
 
-  void Distributivity1() {
-    // Distributivity:
+  void Distributivity() {
     // Union(T1, Intersect(T2, T3)) = Intersect(Union(T1, T2), Union(T1, T3))
+    // This does NOT hold.
+    /*
     for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
       for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
         for (TypeIterator it3 = T.types.begin(); it3 != T.types.end(); ++it3) {
@@ -1744,11 +1897,11 @@
         }
       }
     }
-  }
+    */
 
-  void Distributivity2() {
-    // Distributivity:
     // Intersect(T1, Union(T2, T3)) = Union(Intersect(T1, T2), Intersect(T1,T3))
+    // This does NOT hold.
+    /*
     for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
       for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
         for (TypeIterator it3 = T.types.begin(); it3 != T.types.end(); ++it3) {
@@ -1764,6 +1917,7 @@
         }
       }
     }
+    */
   }
 
   template<class Type2, class TypeHandle2, class Region2, class Rep2>
@@ -1795,6 +1949,13 @@
 typedef Tests<HeapType, Handle<HeapType>, Isolate, HeapRep> HeapTests;
 
 
+TEST(IsSomeType) {
+  CcTest::InitializeVM();
+  ZoneTests().IsSomeType();
+  HeapTests().IsSomeType();
+}
+
+
 TEST(BitsetType) {
   CcTest::InitializeVM();
   ZoneTests().Bitset();
@@ -1816,6 +1977,13 @@
 }
 
 
+TEST(RangeType) {
+  CcTest::InitializeVM();
+  ZoneTests().Range();
+  HeapTests().Range();
+}
+
+
 TEST(ArrayType) {
   CcTest::InitializeVM();
   ZoneTests().Array();
@@ -1844,10 +2012,17 @@
 }
 
 
-TEST(Bounds) {
+TEST(BitsetGlb) {
   CcTest::InitializeVM();
-  ZoneTests().Bounds();
-  HeapTests().Bounds();
+  ZoneTests().BitsetGlb();
+  HeapTests().BitsetGlb();
+}
+
+
+TEST(BitsetLub) {
+  CcTest::InitializeVM();
+  ZoneTests().BitsetLub();
+  HeapTests().BitsetLub();
 }
 
 
@@ -1893,39 +2068,43 @@
 }
 
 
+/*
 TEST(Union2) {
   CcTest::InitializeVM();
   ZoneTests().Union2();
   HeapTests().Union2();
 }
+*/
 
 
-TEST(Intersect1) {
+TEST(Union3) {
   CcTest::InitializeVM();
-  ZoneTests().Intersect1();
-  HeapTests().Intersect1();
+  ZoneTests().Union3();
+  HeapTests().Union3();
 }
 
 
-TEST(Intersect2) {
+TEST(Union4) {
   CcTest::InitializeVM();
-  ZoneTests().Intersect2();
-  HeapTests().Intersect2();
+  ZoneTests().Union4();
+  HeapTests().Union4();
 }
 
 
-TEST(Distributivity1) {
+TEST(Intersect) {
   CcTest::InitializeVM();
-  ZoneTests().Distributivity1();
-  HeapTests().Distributivity1();
+  ZoneTests().Intersect();
+  HeapTests().Intersect();
 }
 
 
-TEST(Distributivity2) {
+/*
+TEST(Distributivity) {
   CcTest::InitializeVM();
-  ZoneTests().Distributivity2();
-  HeapTests().Distributivity2();
+  ZoneTests().Distributivity();
+  HeapTests().Distributivity();
 }
+*/
 
 
 TEST(Convert) {
diff --git a/test/cctest/test-unbound-queue.cc b/test/cctest/test-unbound-queue.cc
index 5f5f50e..6da91e6 100644
--- a/test/cctest/test-unbound-queue.cc
+++ b/test/cctest/test-unbound-queue.cc
@@ -28,9 +28,10 @@
 // Tests of the unbound queue.
 
 #include "src/v8.h"
-#include "src/unbound-queue-inl.h"
 #include "test/cctest/cctest.h"
 
+#include "src/unbound-queue-inl.h"
+
 using i::UnboundQueue;
 
 
diff --git a/test/cctest/test-unscopables-hidden-prototype.cc b/test/cctest/test-unscopables-hidden-prototype.cc
new file mode 100644
index 0000000..aef2ccf
--- /dev/null
+++ b/test/cctest/test-unscopables-hidden-prototype.cc
@@ -0,0 +1,103 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdlib.h>
+
+#include "src/v8.h"
+#include "test/cctest/cctest.h"
+
+namespace {
+
+
+static void Cleanup() {
+  CompileRun(
+      "delete object.x;"
+      "delete hidden_prototype.x;"
+      "delete object[Symbol.unscopables];"
+      "delete hidden_prototype[Symbol.unscopables];");
+}
+
+
+TEST(Unscopables) {
+  LocalContext context;
+  v8::Isolate* isolate = context->GetIsolate();
+  v8::HandleScope handle_scope(isolate);
+
+  v8::Local<v8::FunctionTemplate> t0 = v8::FunctionTemplate::New(isolate);
+  v8::Local<v8::FunctionTemplate> t1 = v8::FunctionTemplate::New(isolate);
+
+  t1->SetHiddenPrototype(true);
+
+  v8::Local<v8::Object> object = t0->GetFunction()->NewInstance();
+  v8::Local<v8::Object> hidden_prototype = t1->GetFunction()->NewInstance();
+
+  object->SetPrototype(hidden_prototype);
+
+  context->Global()->Set(v8_str("object"), object);
+  context->Global()->Set(v8_str("hidden_prototype"), hidden_prototype);
+
+  CHECK_EQ(1, CompileRun(
+                  "var result;"
+                  "var x = 0;"
+                  "object.x = 1;"
+                  "with (object) {"
+                  "  result = x;"
+                  "}"
+                  "result")->Int32Value());
+
+  Cleanup();
+  CHECK_EQ(2, CompileRun(
+                  "var result;"
+                  "var x = 0;"
+                  "hidden_prototype.x = 2;"
+                  "with (object) {"
+                  "  result = x;"
+                  "}"
+                  "result")->Int32Value());
+
+  Cleanup();
+  CHECK_EQ(0, CompileRun(
+                  "var result;"
+                  "var x = 0;"
+                  "object.x = 3;"
+                  "object[Symbol.unscopables] = {x: true};"
+                  "with (object) {"
+                  "  result = x;"
+                  "}"
+                  "result")->Int32Value());
+
+  Cleanup();
+  CHECK_EQ(0, CompileRun(
+                  "var result;"
+                  "var x = 0;"
+                  "hidden_prototype.x = 4;"
+                  "hidden_prototype[Symbol.unscopables] = {x: true};"
+                  "with (object) {"
+                  "  result = x;"
+                  "}"
+                  "result")->Int32Value());
+
+  Cleanup();
+  CHECK_EQ(0, CompileRun(
+                  "var result;"
+                  "var x = 0;"
+                  "object.x = 5;"
+                  "hidden_prototype[Symbol.unscopables] = {x: true};"
+                  "with (object) {"
+                  "  result = x;"
+                  "}"
+                  "result;")->Int32Value());
+
+  Cleanup();
+  CHECK_EQ(0, CompileRun(
+                  "var result;"
+                  "var x = 0;"
+                  "hidden_prototype.x = 6;"
+                  "object[Symbol.unscopables] = {x: true};"
+                  "with (object) {"
+                  "  result = x;"
+                  "}"
+                  "result")->Int32Value());
+}
+}
diff --git a/test/cctest/test-utils-arm64.cc b/test/cctest/test-utils-arm64.cc
index dd6b11f..b0b77bc 100644
--- a/test/cctest/test-utils-arm64.cc
+++ b/test/cctest/test-utils-arm64.cc
@@ -27,8 +27,8 @@
 
 #include "src/v8.h"
 
-#include "src/macro-assembler.h"
 #include "src/arm64/utils-arm64.h"
+#include "src/macro-assembler.h"
 #include "test/cctest/cctest.h"
 #include "test/cctest/test-utils-arm64.h"
 
@@ -95,7 +95,7 @@
 
 
 bool Equal32(uint32_t expected, const RegisterDump* core, const Register& reg) {
-  ASSERT(reg.Is32Bits());
+  DCHECK(reg.Is32Bits());
   // Retrieve the corresponding X register so we can check that the upper part
   // was properly cleared.
   int64_t result_x = core->xreg(reg.code());
@@ -112,7 +112,7 @@
 bool Equal64(uint64_t expected,
              const RegisterDump* core,
              const Register& reg) {
-  ASSERT(reg.Is64Bits());
+  DCHECK(reg.Is64Bits());
   uint64_t result = core->xreg(reg.code());
   return Equal64(expected, core, result);
 }
@@ -121,7 +121,7 @@
 bool EqualFP32(float expected,
                const RegisterDump* core,
                const FPRegister& fpreg) {
-  ASSERT(fpreg.Is32Bits());
+  DCHECK(fpreg.Is32Bits());
   // Retrieve the corresponding D register so we can check that the upper part
   // was properly cleared.
   uint64_t result_64 = core->dreg_bits(fpreg.code());
@@ -138,7 +138,7 @@
 bool EqualFP64(double expected,
                const RegisterDump* core,
                const FPRegister& fpreg) {
-  ASSERT(fpreg.Is64Bits());
+  DCHECK(fpreg.Is64Bits());
   return EqualFP64(expected, core, core->dreg(fpreg.code()));
 }
 
@@ -146,7 +146,7 @@
 bool Equal64(const Register& reg0,
              const RegisterDump* core,
              const Register& reg1) {
-  ASSERT(reg0.Is64Bits() && reg1.Is64Bits());
+  DCHECK(reg0.Is64Bits() && reg1.Is64Bits());
   int64_t expected = core->xreg(reg0.code());
   int64_t result = core->xreg(reg1.code());
   return Equal64(expected, core, result);
@@ -174,8 +174,8 @@
 
 
 bool EqualNzcv(uint32_t expected, uint32_t result) {
-  ASSERT((expected & ~NZCVFlag) == 0);
-  ASSERT((result & ~NZCVFlag) == 0);
+  DCHECK((expected & ~NZCVFlag) == 0);
+  DCHECK((result & ~NZCVFlag) == 0);
   if (result != expected) {
     printf("Expected: %c%c%c%c\t Found: %c%c%c%c\n",
         FlagN(expected), FlagZ(expected), FlagC(expected), FlagV(expected),
@@ -231,7 +231,7 @@
     }
   }
   // Check that we got enough registers.
-  ASSERT(CountSetBits(list, kNumberOfRegisters) == reg_count);
+  DCHECK(CountSetBits(list, kNumberOfRegisters) == reg_count);
 
   return list;
 }
@@ -258,7 +258,7 @@
     }
   }
   // Check that we got enough registers.
-  ASSERT(CountSetBits(list, kNumberOfFPRegisters) == reg_count);
+  DCHECK(CountSetBits(list, kNumberOfFPRegisters) == reg_count);
 
   return list;
 }
@@ -270,7 +270,7 @@
     if (reg_list & (1UL << i)) {
       Register xn = Register::Create(i, kXRegSizeInBits);
       // We should never write into csp here.
-      ASSERT(!xn.Is(csp));
+      DCHECK(!xn.Is(csp));
       if (!xn.IsZero()) {
         if (!first.IsValid()) {
           // This is the first register we've hit, so construct the literal.
@@ -320,7 +320,7 @@
 
 
 void RegisterDump::Dump(MacroAssembler* masm) {
-  ASSERT(__ StackPointer().Is(csp));
+  DCHECK(__ StackPointer().Is(csp));
 
   // Ensure that we don't unintentionally clobber any registers.
   RegList old_tmp_list = masm->TmpList()->list();
@@ -396,7 +396,7 @@
   // easily restore them.
   Register dump2_base = x10;
   Register dump2 = x11;
-  ASSERT(!AreAliased(dump_base, dump, tmp, dump2_base, dump2));
+  DCHECK(!AreAliased(dump_base, dump, tmp, dump2_base, dump2));
 
   // Don't lose the dump_ address.
   __ Mov(dump2_base, dump_base);
diff --git a/test/cctest/test-utils-arm64.h b/test/cctest/test-utils-arm64.h
index dbb095e..d00ad5e 100644
--- a/test/cctest/test-utils-arm64.h
+++ b/test/cctest/test-utils-arm64.h
@@ -29,11 +29,11 @@
 #define V8_ARM64_TEST_UTILS_ARM64_H_
 
 #include "src/v8.h"
+#include "test/cctest/cctest.h"
 
-#include "src/macro-assembler.h"
 #include "src/arm64/macro-assembler-arm64.h"
 #include "src/arm64/utils-arm64.h"
-#include "test/cctest/cctest.h"
+#include "src/macro-assembler.h"
 
 
 using namespace v8::internal;
@@ -59,7 +59,7 @@
     if (code == kSPRegInternalCode) {
       return wspreg();
     }
-    ASSERT(RegAliasesMatch(code));
+    DCHECK(RegAliasesMatch(code));
     return dump_.w_[code];
   }
 
@@ -67,13 +67,13 @@
     if (code == kSPRegInternalCode) {
       return spreg();
     }
-    ASSERT(RegAliasesMatch(code));
+    DCHECK(RegAliasesMatch(code));
     return dump_.x_[code];
   }
 
   // FPRegister accessors.
   inline uint32_t sreg_bits(unsigned code) const {
-    ASSERT(FPRegAliasesMatch(code));
+    DCHECK(FPRegAliasesMatch(code));
     return dump_.s_[code];
   }
 
@@ -82,7 +82,7 @@
   }
 
   inline uint64_t dreg_bits(unsigned code) const {
-    ASSERT(FPRegAliasesMatch(code));
+    DCHECK(FPRegAliasesMatch(code));
     return dump_.d_[code];
   }
 
@@ -92,19 +92,19 @@
 
   // Stack pointer accessors.
   inline int64_t spreg() const {
-    ASSERT(SPRegAliasesMatch());
+    DCHECK(SPRegAliasesMatch());
     return dump_.sp_;
   }
 
   inline int64_t wspreg() const {
-    ASSERT(SPRegAliasesMatch());
+    DCHECK(SPRegAliasesMatch());
     return dump_.wsp_;
   }
 
   // Flags accessors.
   inline uint64_t flags_nzcv() const {
-    ASSERT(IsComplete());
-    ASSERT((dump_.flags_ & ~Flags_mask) == 0);
+    DCHECK(IsComplete());
+    DCHECK((dump_.flags_ & ~Flags_mask) == 0);
     return dump_.flags_ & Flags_mask;
   }
 
@@ -120,21 +120,21 @@
   // w<code>. A failure of this test most likely represents a failure in the
   // ::Dump method, or a failure in the simulator.
   bool RegAliasesMatch(unsigned code) const {
-    ASSERT(IsComplete());
-    ASSERT(code < kNumberOfRegisters);
+    DCHECK(IsComplete());
+    DCHECK(code < kNumberOfRegisters);
     return ((dump_.x_[code] & kWRegMask) == dump_.w_[code]);
   }
 
   // As RegAliasesMatch, but for the stack pointer.
   bool SPRegAliasesMatch() const {
-    ASSERT(IsComplete());
+    DCHECK(IsComplete());
     return ((dump_.sp_ & kWRegMask) == dump_.wsp_);
   }
 
   // As RegAliasesMatch, but for floating-point registers.
   bool FPRegAliasesMatch(unsigned code) const {
-    ASSERT(IsComplete());
-    ASSERT(code < kNumberOfFPRegisters);
+    DCHECK(IsComplete());
+    DCHECK(code < kNumberOfFPRegisters);
     return (dump_.d_[code] & kSRegMask) == dump_.s_[code];
   }
 
diff --git a/test/cctest/test-utils.cc b/test/cctest/test-utils.cc
index 0920638..9ea8b2b 100644
--- a/test/cctest/test-utils.cc
+++ b/test/cctest/test-utils.cc
@@ -27,9 +27,11 @@
 
 #include <stdlib.h>
 
+#include <vector>
+
 #include "src/v8.h"
 
-#include "src/platform.h"
+#include "src/base/platform/platform.h"
 #include "src/utils-inl.h"
 #include "test/cctest/cctest.h"
 
@@ -70,7 +72,7 @@
 
   CHECK_EQ(INT_MAX, FastD2IChecked(1.0e100));
   CHECK_EQ(INT_MIN, FastD2IChecked(-1.0e100));
-  CHECK_EQ(INT_MIN, FastD2IChecked(OS::nan_value()));
+  CHECK_EQ(INT_MIN, FastD2IChecked(v8::base::OS::nan_value()));
 }
 
 
@@ -218,3 +220,40 @@
   CHECK_EQ(0, strncmp("0123456789012345678901234567890123",
                       seq.start(), seq.length()));
 }
+
+
+// TODO(svenpanne) Unconditionally test this when our infrastructure is fixed.
+#if !V8_OS_NACL
+TEST(CPlusPlus11Features) {
+  struct S {
+    bool x;
+    struct T {
+      double y;
+      int z[3];
+    } t;
+  };
+  S s{true, {3.1415, {1, 2, 3}}};
+  CHECK_EQ(2, s.t.z[1]);
+
+// TODO(svenpanne) Remove the old-skool code when we ship the new C++ headers.
+#if 0
+  std::vector<int> vec{11, 22, 33, 44};
+#else
+  std::vector<int> vec;
+  vec.push_back(11);
+  vec.push_back(22);
+  vec.push_back(33);
+  vec.push_back(44);
+#endif
+  vec.push_back(55);
+  vec.push_back(66);
+  for (auto& i : vec) {
+    ++i;
+  }
+  int j = 12;
+  for (auto i : vec) {
+    CHECK_EQ(j, i);
+    j += 11;
+  }
+}
+#endif
diff --git a/test/cctest/test-weakmaps.cc b/test/cctest/test-weakmaps.cc
index 2f28cb6..bb412a8 100644
--- a/test/cctest/test-weakmaps.cc
+++ b/test/cctest/test-weakmaps.cc
@@ -71,7 +71,7 @@
   std::pair<v8::Persistent<v8::Value>*, int>* p =
       reinterpret_cast<std::pair<v8::Persistent<v8::Value>*, int>*>(
           data.GetParameter());
-  ASSERT_EQ(1234, p->second);
+  DCHECK_EQ(1234, p->second);
   NumberOfWeakCalls++;
   p->first->Reset();
 }
@@ -255,3 +255,20 @@
   heap->CollectAllGarbage(Heap::kNoGCFlags);
   heap->CollectAllGarbage(Heap::kNoGCFlags);
 }
+
+
+TEST(Regress399527) {
+  CcTest::InitializeVM();
+  v8::HandleScope scope(CcTest::isolate());
+  Isolate* isolate = CcTest::i_isolate();
+  Heap* heap = isolate->heap();
+  {
+    HandleScope scope(isolate);
+    AllocateJSWeakMap(isolate);
+    SimulateIncrementalMarking(heap);
+  }
+  // The weak map is marked black here but leaving the handle scope will make
+  // the object unreachable. Aborting incremental marking will clear all the
+  // marking bits which makes the weak map garbage.
+  heap->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+}
diff --git a/test/cctest/test-weaksets.cc b/test/cctest/test-weaksets.cc
index 12efea3..299cc92 100644
--- a/test/cctest/test-weaksets.cc
+++ b/test/cctest/test-weaksets.cc
@@ -71,7 +71,7 @@
   std::pair<v8::Persistent<v8::Value>*, int>* p =
       reinterpret_cast<std::pair<v8::Persistent<v8::Value>*, int>*>(
           data.GetParameter());
-  ASSERT_EQ(1234, p->second);
+  DCHECK_EQ(1234, p->second);
   NumberOfWeakCalls++;
   p->first->Reset();
 }
diff --git a/test/cctest/test-weaktypedarrays.cc b/test/cctest/test-weaktypedarrays.cc
index df5d4c5..d40b7e9 100644
--- a/test/cctest/test-weaktypedarrays.cc
+++ b/test/cctest/test-weaktypedarrays.cc
@@ -28,12 +28,12 @@
 #include <stdlib.h>
 
 #include "src/v8.h"
-#include "src/api.h"
-#include "src/heap.h"
-#include "src/objects.h"
-
 #include "test/cctest/cctest.h"
 
+#include "src/api.h"
+#include "src/heap/heap.h"
+#include "src/objects.h"
+
 using namespace v8::internal;
 
 static Isolate* GetIsolateFrom(LocalContext* context) {
diff --git a/test/compiler-unittests/compiler-unittests.status b/test/compiler-unittests/compiler-unittests.status
new file mode 100644
index 0000000..d439913
--- /dev/null
+++ b/test/compiler-unittests/compiler-unittests.status
@@ -0,0 +1,6 @@
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+[
+]
diff --git a/test/fuzz-natives/base.js b/test/fuzz-natives/base.js
index b9f7004..d1f721d 100644
--- a/test/fuzz-natives/base.js
+++ b/test/fuzz-natives/base.js
@@ -2,6 +2,8 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+// Flags: --allow-natives-syntax
+
 // TODO(jkummerow): There are many ways to improve these tests, e.g.:
 // - more variance in randomized inputs
 // - better time complexity management
@@ -15,7 +17,9 @@
   result.push(17);
   result.push(-31);
   result.push(new Array(100));
-  result.push(new Array(100003));
+  var a = %NormalizeElements([]);
+  a.length = 100003;
+  result.push(a);
   result.push(Number.MIN_VALUE);
   result.push("whoops");
   result.push("x");
diff --git a/test/fuzz-natives/fuzz-natives.status b/test/fuzz-natives/fuzz-natives.status
index fb3cae9..c81188a 100644
--- a/test/fuzz-natives/fuzz-natives.status
+++ b/test/fuzz-natives/fuzz-natives.status
@@ -31,6 +31,27 @@
   "CreateDateTimeFormat": [SKIP],
   "CreateNumberFormat": [SKIP],
 
+  # TODO(danno): Fix these internal function that are only callable form stubs
+  # and un-blacklist them!
+  "CompileLazy": [SKIP],
+  "NotifyDeoptimized": [SKIP],
+  "NotifyStubFailure": [SKIP],
+  "NewSloppyArguments": [SKIP],
+  "NewStrictArguments": [SKIP],
+  "ArrayConstructor": [SKIP],
+  "InternalArrayConstructor": [SKIP],
+  "FinalizeInstanceSize": [SKIP],
+  "PromoteScheduledException": [SKIP],
+  "NewFunctionContext": [SKIP],
+  "PushWithContext": [SKIP],
+  "PushCatchContext": [SKIP],
+  "PushModuleContext": [SKIP],
+  "LoadLookupSlot": [SKIP],
+  "LoadLookupSlotNoReferenceError": [SKIP],
+  "ResolvePossiblyDirectEval": [SKIP],
+  "ForInInit": [SKIP],
+  "ForInNext": [SKIP],
+
   # TODO(jkummerow): Figure out what to do about inlined functions.
   "_GeneratorNext": [SKIP],
   "_GeneratorThrow": [SKIP],
diff --git a/test/fuzz-natives/testcfg.py b/test/fuzz-natives/testcfg.py
index df6bc5b..5e00b40 100644
--- a/test/fuzz-natives/testcfg.py
+++ b/test/fuzz-natives/testcfg.py
@@ -31,11 +31,16 @@
       assert False, "Failed to get natives list."
     tests = []
     for line in output.stdout.strip().split():
-      (name, argc) = line.split(",")
-      flags = ["--allow-natives-syntax",
-               "-e", "var NAME = '%s', ARGC = %s;" % (name, argc)]
-      test = testcase.TestCase(self, name, flags)
-      tests.append(test)
+      try:
+        (name, argc) = line.split(",")
+        flags = ["--allow-natives-syntax",
+                 "-e", "var NAME = '%s', ARGC = %s;" % (name, argc)]
+        test = testcase.TestCase(self, name, flags)
+        tests.append(test)
+      except:
+        # Work-around: If parsing didn't work, it might have been due to output
+        # caused by other d8 flags.
+        pass
     return tests
 
   def GetFlagsForTestCase(self, testcase, context):
diff --git a/test/heap-unittests/heap-unittests.status b/test/heap-unittests/heap-unittests.status
new file mode 100644
index 0000000..d439913
--- /dev/null
+++ b/test/heap-unittests/heap-unittests.status
@@ -0,0 +1,6 @@
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+[
+]
diff --git a/test/intl/intl.status b/test/intl/intl.status
index 4ecbf32..d48d695 100644
--- a/test/intl/intl.status
+++ b/test/intl/intl.status
@@ -25,10 +25,9 @@
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-
-# The following tests use getDefaultTimeZone().
 [
 [ALWAYS, {
+  # The following tests use getDefaultTimeZone().
   'date-format/resolved-options': [FAIL],
   'date-format/timezone': [FAIL],
   'general/v8Intl-exists': [FAIL],
@@ -38,5 +37,11 @@
 
   # BUG(2899): default locale for search fails on mac and on android.
   'collator/default-locale': [['system == macos or arch == android_arm or arch == android_ia32', FAIL]],
+
+  # BUG(v8:3454).
+  'date-format/parse-MMMdy': [FAIL],
+  'date-format/parse-mdyhms': [FAIL],
+  'number-format/parse-decimal': [FAIL],
+  'number-format/parse-percent': [FAIL],
 }],  # ALWAYS
 ]
diff --git a/test/libplatform-unittests/libplatform-unittests.status b/test/libplatform-unittests/libplatform-unittests.status
new file mode 100644
index 0000000..d439913
--- /dev/null
+++ b/test/libplatform-unittests/libplatform-unittests.status
@@ -0,0 +1,6 @@
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+[
+]
diff --git a/test/message/message.status b/test/message/message.status
index 00f6e34..234bf0f 100644
--- a/test/message/message.status
+++ b/test/message/message.status
@@ -25,10 +25,9 @@
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-
-# All tests in the bug directory are expected to fail.
 [
 [ALWAYS, {
+  # All tests in the bug directory are expected to fail.
   'bugs/*': [FAIL],
 }],  # ALWAYS
 ]
diff --git a/test/mjsunit/allocation-site-info.js b/test/mjsunit/allocation-site-info.js
index 35b60ee..9984f5b 100644
--- a/test/mjsunit/allocation-site-info.js
+++ b/test/mjsunit/allocation-site-info.js
@@ -25,25 +25,9 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --allow-natives-syntax --smi-only-arrays --expose-gc
+// Flags: --allow-natives-syntax --expose-gc
 // Flags: --noalways-opt
 
-// Test element kind of objects.
-// Since --smi-only-arrays affects builtins, its default setting at compile
-// time sticks if built with snapshot.  If --smi-only-arrays is deactivated
-// by default, only a no-snapshot build actually has smi-only arrays enabled
-// in this test case.  Depending on whether smi-only arrays are actually
-// enabled, this test takes the appropriate code path to check smi-only arrays.
-
-// support_smi_only_arrays = %HasFastSmiElements(new Array(1,2,3,4,5,6,7,8));
-support_smi_only_arrays = true;
-
-if (support_smi_only_arrays) {
-  print("Tests include smi-only arrays.");
-} else {
-  print("Tests do NOT include smi-only arrays.");
-}
-
 var elements_kind = {
   fast_smi_only            :  'fast smi only elements',
   fast                     :  'fast elements',
@@ -73,10 +57,6 @@
 }
 
 function assertKind(expected, obj, name_opt) {
-  if (!support_smi_only_arrays &&
-      expected == elements_kind.fast_smi_only) {
-    expected = elements_kind.fast;
-  }
   assertEquals(expected, getKind(obj), name_opt);
 }
 
@@ -88,403 +68,402 @@
   assertEquals(false, isHoley(obj), name_opt);
 }
 
-if (support_smi_only_arrays) {
-  obj = [];
-  assertNotHoley(obj);
-  assertKind(elements_kind.fast_smi_only, obj);
+obj = [];
+assertNotHoley(obj);
+assertKind(elements_kind.fast_smi_only, obj);
 
-  obj = [1, 2, 3];
-  assertNotHoley(obj);
-  assertKind(elements_kind.fast_smi_only, obj);
+obj = [1, 2, 3];
+assertNotHoley(obj);
+assertKind(elements_kind.fast_smi_only, obj);
 
-  obj = new Array();
-  assertNotHoley(obj);
-  assertKind(elements_kind.fast_smi_only, obj);
+obj = new Array();
+assertNotHoley(obj);
+assertKind(elements_kind.fast_smi_only, obj);
 
-  obj = new Array(0);
-  assertNotHoley(obj);
-  assertKind(elements_kind.fast_smi_only, obj);
+obj = new Array(0);
+assertNotHoley(obj);
+assertKind(elements_kind.fast_smi_only, obj);
 
-  obj = new Array(2);
-  assertHoley(obj);
-  assertKind(elements_kind.fast_smi_only, obj);
+obj = new Array(2);
+assertHoley(obj);
+assertKind(elements_kind.fast_smi_only, obj);
 
-  obj = new Array(1,2,3);
-  assertNotHoley(obj);
-  assertKind(elements_kind.fast_smi_only, obj);
+obj = new Array(1,2,3);
+assertNotHoley(obj);
+assertKind(elements_kind.fast_smi_only, obj);
 
-  obj = new Array(1, "hi", 2, undefined);
-  assertNotHoley(obj);
-  assertKind(elements_kind.fast, obj);
+obj = new Array(1, "hi", 2, undefined);
+assertNotHoley(obj);
+assertKind(elements_kind.fast, obj);
 
-  function fastliteralcase(literal, value) {
-    literal[0] = value;
-    return literal;
-  }
+function fastliteralcase(literal, value) {
+  literal[0] = value;
+  return literal;
+}
 
-  function get_standard_literal() {
-    var literal = [1, 2, 3];
-    return literal;
-  }
+function get_standard_literal() {
+  var literal = [1, 2, 3];
+  return literal;
+}
 
-  // Case: [1,2,3] as allocation site
-  obj = fastliteralcase(get_standard_literal(), 1);
-  assertKind(elements_kind.fast_smi_only, obj);
-  obj = fastliteralcase(get_standard_literal(), 1.5);
+// Case: [1,2,3] as allocation site
+obj = fastliteralcase(get_standard_literal(), 1);
+assertKind(elements_kind.fast_smi_only, obj);
+obj = fastliteralcase(get_standard_literal(), 1.5);
+assertKind(elements_kind.fast_double, obj);
+obj = fastliteralcase(get_standard_literal(), 2);
+assertKind(elements_kind.fast_double, obj);
+
+// The test below is in a loop because arrays that live
+// at global scope without the chance of being recreated
+// don't have allocation site information attached.
+for (i = 0; i < 2; i++) {
+  obj = fastliteralcase([5, 3, 2], 1.5);
   assertKind(elements_kind.fast_double, obj);
-  obj = fastliteralcase(get_standard_literal(), 2);
+  obj = fastliteralcase([3, 6, 2], 1.5);
   assertKind(elements_kind.fast_double, obj);
 
-  // The test below is in a loop because arrays that live
-  // at global scope without the chance of being recreated
-  // don't have allocation site information attached.
-  for (i = 0; i < 2; i++) {
-    obj = fastliteralcase([5, 3, 2], 1.5);
-    assertKind(elements_kind.fast_double, obj);
-    obj = fastliteralcase([3, 6, 2], 1.5);
-    assertKind(elements_kind.fast_double, obj);
+  // Note: thanks to pessimistic transition store stubs, we'll attempt
+  // to transition to the most general elements kind seen at a particular
+  // store site. So, the elements kind will be double.
+  obj = fastliteralcase([2, 6, 3], 2);
+  assertKind(elements_kind.fast_double, obj);
+}
 
-    // Note: thanks to pessimistic transition store stubs, we'll attempt
-    // to transition to the most general elements kind seen at a particular
-    // store site. So, the elements kind will be double.
-    obj = fastliteralcase([2, 6, 3], 2);
-    assertKind(elements_kind.fast_double, obj);
-  }
+// Verify that we will not pretransition the double->fast path.
+obj = fastliteralcase(get_standard_literal(), "elliot");
+assertKind(elements_kind.fast, obj);
+obj = fastliteralcase(get_standard_literal(), 3);
+assertKind(elements_kind.fast, obj);
 
-  // Verify that we will not pretransition the double->fast path.
-  obj = fastliteralcase(get_standard_literal(), "elliot");
-  assertKind(elements_kind.fast, obj);
-  obj = fastliteralcase(get_standard_literal(), 3);
-  assertKind(elements_kind.fast, obj);
-
-  // Make sure this works in crankshafted code too.
+// Make sure this works in crankshafted code too.
   %OptimizeFunctionOnNextCall(get_standard_literal);
-  get_standard_literal();
-  obj = get_standard_literal();
-  assertKind(elements_kind.fast, obj);
+get_standard_literal();
+obj = get_standard_literal();
+assertKind(elements_kind.fast, obj);
 
-  function fastliteralcase_smifast(value) {
-    var literal = [1, 2, 3, 4];
-    literal[0] = value;
-    return literal;
-  }
+function fastliteralcase_smifast(value) {
+  var literal = [1, 2, 3, 4];
+  literal[0] = value;
+  return literal;
+}
 
-  obj = fastliteralcase_smifast(1);
-  assertKind(elements_kind.fast_smi_only, obj);
-  obj = fastliteralcase_smifast("carter");
-  assertKind(elements_kind.fast, obj);
-  obj = fastliteralcase_smifast(2);
-  assertKind(elements_kind.fast, obj);
+obj = fastliteralcase_smifast(1);
+assertKind(elements_kind.fast_smi_only, obj);
+obj = fastliteralcase_smifast("carter");
+assertKind(elements_kind.fast, obj);
+obj = fastliteralcase_smifast(2);
+assertKind(elements_kind.fast, obj);
 
-  // Case: make sure transitions from packed to holey are tracked
-  function fastliteralcase_smiholey(index, value) {
-    var literal = [1, 2, 3, 4];
-    literal[index] = value;
-    return literal;
-  }
+// Case: make sure transitions from packed to holey are tracked
+function fastliteralcase_smiholey(index, value) {
+  var literal = [1, 2, 3, 4];
+  literal[index] = value;
+  return literal;
+}
 
-  obj = fastliteralcase_smiholey(5, 1);
-  assertKind(elements_kind.fast_smi_only, obj);
-  assertHoley(obj);
-  obj = fastliteralcase_smiholey(0, 1);
-  assertKind(elements_kind.fast_smi_only, obj);
-  assertHoley(obj);
+obj = fastliteralcase_smiholey(5, 1);
+assertKind(elements_kind.fast_smi_only, obj);
+assertHoley(obj);
+obj = fastliteralcase_smiholey(0, 1);
+assertKind(elements_kind.fast_smi_only, obj);
+assertHoley(obj);
 
-  function newarraycase_smidouble(value) {
-    var a = new Array();
-    a[0] = value;
+function newarraycase_smidouble(value) {
+  var a = new Array();
+  a[0] = value;
+  return a;
+}
+
+// Case: new Array() as allocation site, smi->double
+obj = newarraycase_smidouble(1);
+assertKind(elements_kind.fast_smi_only, obj);
+obj = newarraycase_smidouble(1.5);
+assertKind(elements_kind.fast_double, obj);
+obj = newarraycase_smidouble(2);
+assertKind(elements_kind.fast_double, obj);
+
+function newarraycase_smiobj(value) {
+  var a = new Array();
+  a[0] = value;
+  return a;
+}
+
+// Case: new Array() as allocation site, smi->fast
+obj = newarraycase_smiobj(1);
+assertKind(elements_kind.fast_smi_only, obj);
+obj = newarraycase_smiobj("gloria");
+assertKind(elements_kind.fast, obj);
+obj = newarraycase_smiobj(2);
+assertKind(elements_kind.fast, obj);
+
+function newarraycase_length_smidouble(value) {
+  var a = new Array(3);
+  a[0] = value;
+  return a;
+}
+
+// Case: new Array(length) as allocation site
+obj = newarraycase_length_smidouble(1);
+assertKind(elements_kind.fast_smi_only, obj);
+obj = newarraycase_length_smidouble(1.5);
+assertKind(elements_kind.fast_double, obj);
+obj = newarraycase_length_smidouble(2);
+assertKind(elements_kind.fast_double, obj);
+
+// Try to continue the transition to fast object.
+// TODO(mvstanton): re-enable commented out code when
+// FLAG_pretenuring_call_new is turned on in the build.
+obj = newarraycase_length_smidouble("coates");
+assertKind(elements_kind.fast, obj);
+obj = newarraycase_length_smidouble(2);
+// assertKind(elements_kind.fast, obj);
+
+function newarraycase_length_smiobj(value) {
+  var a = new Array(3);
+  a[0] = value;
+  return a;
+}
+
+// Case: new Array(<length>) as allocation site, smi->fast
+obj = newarraycase_length_smiobj(1);
+assertKind(elements_kind.fast_smi_only, obj);
+obj = newarraycase_length_smiobj("gloria");
+assertKind(elements_kind.fast, obj);
+obj = newarraycase_length_smiobj(2);
+assertKind(elements_kind.fast, obj);
+
+function newarraycase_list_smidouble(value) {
+  var a = new Array(1, 2, 3);
+  a[0] = value;
+  return a;
+}
+
+obj = newarraycase_list_smidouble(1);
+assertKind(elements_kind.fast_smi_only, obj);
+obj = newarraycase_list_smidouble(1.5);
+assertKind(elements_kind.fast_double, obj);
+obj = newarraycase_list_smidouble(2);
+assertKind(elements_kind.fast_double, obj);
+
+function newarraycase_list_smiobj(value) {
+  var a = new Array(4, 5, 6);
+  a[0] = value;
+  return a;
+}
+
+obj = newarraycase_list_smiobj(1);
+assertKind(elements_kind.fast_smi_only, obj);
+obj = newarraycase_list_smiobj("coates");
+assertKind(elements_kind.fast, obj);
+obj = newarraycase_list_smiobj(2);
+assertKind(elements_kind.fast, obj);
+
+// Case: array constructor calls with out of date feedback.
+// The boilerplate should incorporate all feedback, but the input array
+// should be minimally transitioned based on immediate need.
+(function() {
+  function foo(i) {
+    // We have two cases, one for literals one for constructed arrays.
+    var a = (i == 0)
+      ? [1, 2, 3]
+      : new Array(1, 2, 3);
     return a;
   }
 
-  // Case: new Array() as allocation site, smi->double
-  obj = newarraycase_smidouble(1);
-  assertKind(elements_kind.fast_smi_only, obj);
-  obj = newarraycase_smidouble(1.5);
-  assertKind(elements_kind.fast_double, obj);
-  obj = newarraycase_smidouble(2);
-  assertKind(elements_kind.fast_double, obj);
-
-  function newarraycase_smiobj(value) {
-    var a = new Array();
-    a[0] = value;
-    return a;
+  for (i = 0; i < 2; i++) {
+    a = foo(i);
+    b = foo(i);
+    b[5] = 1;  // boilerplate goes holey
+    assertHoley(foo(i));
+    a[0] = 3.5;  // boilerplate goes holey double
+    assertKind(elements_kind.fast_double, a);
+    assertNotHoley(a);
+    c = foo(i);
+    assertKind(elements_kind.fast_double, c);
+    assertHoley(c);
   }
+})();
 
-  // Case: new Array() as allocation site, smi->fast
-  obj = newarraycase_smiobj(1);
-  assertKind(elements_kind.fast_smi_only, obj);
-  obj = newarraycase_smiobj("gloria");
-  assertKind(elements_kind.fast, obj);
-  obj = newarraycase_smiobj(2);
-  assertKind(elements_kind.fast, obj);
+function newarraycase_onearg(len, value) {
+  var a = new Array(len);
+  a[0] = value;
+  return a;
+}
 
-  function newarraycase_length_smidouble(value) {
-    var a = new Array(3);
-    a[0] = value;
-    return a;
-  }
+obj = newarraycase_onearg(5, 3.5);
+assertKind(elements_kind.fast_double, obj);
+obj = newarraycase_onearg(10, 5);
+assertKind(elements_kind.fast_double, obj);
+obj = newarraycase_onearg(0, 5);
+assertKind(elements_kind.fast_double, obj);
 
-  // Case: new Array(length) as allocation site
-  obj = newarraycase_length_smidouble(1);
-  assertKind(elements_kind.fast_smi_only, obj);
-  obj = newarraycase_length_smidouble(1.5);
-  assertKind(elements_kind.fast_double, obj);
-  obj = newarraycase_length_smidouble(2);
-  assertKind(elements_kind.fast_double, obj);
+// Verify that cross context calls work
+var realmA = Realm.current();
+var realmB = Realm.create();
+assertEquals(0, realmA);
+assertEquals(1, realmB);
 
-  // Try to continue the transition to fast object.
-  // TODO(mvstanton): re-enable commented out code when
-  // FLAG_pretenuring_call_new is turned on in the build.
-  obj = newarraycase_length_smidouble("coates");
-  assertKind(elements_kind.fast, obj);
-  obj = newarraycase_length_smidouble(2);
-  // assertKind(elements_kind.fast, obj);
+function instanceof_check(type) {
+  assertTrue(new type() instanceof type);
+  assertTrue(new type(5) instanceof type);
+  assertTrue(new type(1,2,3) instanceof type);
+}
 
-  function newarraycase_length_smiobj(value) {
-    var a = new Array(3);
-    a[0] = value;
-    return a;
-  }
+function instanceof_check2(type) {
+  assertTrue(new type() instanceof type);
+  assertTrue(new type(5) instanceof type);
+  assertTrue(new type(1,2,3) instanceof type);
+}
 
-  // Case: new Array(<length>) as allocation site, smi->fast
-  obj = newarraycase_length_smiobj(1);
-  assertKind(elements_kind.fast_smi_only, obj);
-  obj = newarraycase_length_smiobj("gloria");
-  assertKind(elements_kind.fast, obj);
-  obj = newarraycase_length_smiobj(2);
-  assertKind(elements_kind.fast, obj);
+var realmBArray = Realm.eval(realmB, "Array");
+instanceof_check(Array);
+instanceof_check(realmBArray);
 
-  function newarraycase_list_smidouble(value) {
-    var a = new Array(1, 2, 3);
-    a[0] = value;
-    return a;
-  }
-
-  obj = newarraycase_list_smidouble(1);
-  assertKind(elements_kind.fast_smi_only, obj);
-  obj = newarraycase_list_smidouble(1.5);
-  assertKind(elements_kind.fast_double, obj);
-  obj = newarraycase_list_smidouble(2);
-  assertKind(elements_kind.fast_double, obj);
-
-  function newarraycase_list_smiobj(value) {
-    var a = new Array(4, 5, 6);
-    a[0] = value;
-    return a;
-  }
-
-  obj = newarraycase_list_smiobj(1);
-  assertKind(elements_kind.fast_smi_only, obj);
-  obj = newarraycase_list_smiobj("coates");
-  assertKind(elements_kind.fast, obj);
-  obj = newarraycase_list_smiobj(2);
-  assertKind(elements_kind.fast, obj);
-
-  // Case: array constructor calls with out of date feedback.
-  // The boilerplate should incorporate all feedback, but the input array
-  // should be minimally transitioned based on immediate need.
-  (function() {
-    function foo(i) {
-      // We have two cases, one for literals one for constructed arrays.
-      var a = (i == 0)
-        ? [1, 2, 3]
-        : new Array(1, 2, 3);
-      return a;
-    }
-
-    for (i = 0; i < 2; i++) {
-      a = foo(i);
-      b = foo(i);
-      b[5] = 1;  // boilerplate goes holey
-      assertHoley(foo(i));
-      a[0] = 3.5;  // boilerplate goes holey double
-      assertKind(elements_kind.fast_double, a);
-      assertNotHoley(a);
-      c = foo(i);
-      assertKind(elements_kind.fast_double, c);
-      assertHoley(c);
-    }
-  })();
-
-  function newarraycase_onearg(len, value) {
-    var a = new Array(len);
-    a[0] = value;
-    return a;
-  }
-
-  obj = newarraycase_onearg(5, 3.5);
-  assertKind(elements_kind.fast_double, obj);
-  obj = newarraycase_onearg(10, 5);
-  assertKind(elements_kind.fast_double, obj);
-  obj = newarraycase_onearg(0, 5);
-  assertKind(elements_kind.fast_double, obj);
-  // Now pass a length that forces the dictionary path.
-  obj = newarraycase_onearg(100000, 5);
-  assertKind(elements_kind.dictionary, obj);
-  assertTrue(obj.length == 100000);
-
-  // Verify that cross context calls work
-  var realmA = Realm.current();
-  var realmB = Realm.create();
-  assertEquals(0, realmA);
-  assertEquals(1, realmB);
-
-  function instanceof_check(type) {
-    assertTrue(new type() instanceof type);
-    assertTrue(new type(5) instanceof type);
-    assertTrue(new type(1,2,3) instanceof type);
-  }
-
-  function instanceof_check2(type) {
-    assertTrue(new type() instanceof type);
-    assertTrue(new type(5) instanceof type);
-    assertTrue(new type(1,2,3) instanceof type);
-  }
-
-  var realmBArray = Realm.eval(realmB, "Array");
-  instanceof_check(Array);
-  instanceof_check(realmBArray);
-
-  // instanceof_check2 is here because the call site goes through a state.
-  // Since instanceof_check(Array) was first called with the current context
-  // Array function, it went from (uninit->Array) then (Array->megamorphic).
-  // We'll get a different state traversal if we start with realmBArray.
-  // It'll go (uninit->realmBArray) then (realmBArray->megamorphic). Recognize
-  // that state "Array" implies an AllocationSite is present, and code is
-  // configured to use it.
-  instanceof_check2(realmBArray);
-  instanceof_check2(Array);
+// instanceof_check2 is here because the call site goes through a state.
+// Since instanceof_check(Array) was first called with the current context
+// Array function, it went from (uninit->Array) then (Array->megamorphic).
+// We'll get a different state traversal if we start with realmBArray.
+// It'll go (uninit->realmBArray) then (realmBArray->megamorphic). Recognize
+// that state "Array" implies an AllocationSite is present, and code is
+// configured to use it.
+instanceof_check2(realmBArray);
+instanceof_check2(Array);
 
   %OptimizeFunctionOnNextCall(instanceof_check);
 
-  // No de-opt will occur because HCallNewArray wasn't selected, on account of
-  // the call site not being monomorphic to Array.
-  instanceof_check(Array);
-  assertOptimized(instanceof_check);
-  instanceof_check(realmBArray);
-  assertOptimized(instanceof_check);
+// No de-opt will occur because HCallNewArray wasn't selected, on account of
+// the call site not being monomorphic to Array.
+instanceof_check(Array);
+assertOptimized(instanceof_check);
+instanceof_check(realmBArray);
+assertOptimized(instanceof_check);
 
-  // Try to optimize again, but first clear all type feedback, and allow it
-  // to be monomorphic on first call. Only after crankshafting do we introduce
-  // realmBArray. This should deopt the method.
+// Try to optimize again, but first clear all type feedback, and allow it
+// to be monomorphic on first call. Only after crankshafting do we introduce
+// realmBArray. This should deopt the method.
   %DeoptimizeFunction(instanceof_check);
   %ClearFunctionTypeFeedback(instanceof_check);
-  instanceof_check(Array);
-  instanceof_check(Array);
+instanceof_check(Array);
+instanceof_check(Array);
   %OptimizeFunctionOnNextCall(instanceof_check);
-  instanceof_check(Array);
-  assertOptimized(instanceof_check);
+instanceof_check(Array);
+assertOptimized(instanceof_check);
 
-  instanceof_check(realmBArray);
-  assertUnoptimized(instanceof_check);
+instanceof_check(realmBArray);
+assertUnoptimized(instanceof_check);
 
-  // Case: make sure nested arrays benefit from allocation site feedback as
-  // well.
-  (function() {
-    // Make sure we handle nested arrays
-   function get_nested_literal() {
-     var literal = [[1,2,3,4], [2], [3]];
-     return literal;
-   }
+// Case: make sure nested arrays benefit from allocation site feedback as
+// well.
+(function() {
+  // Make sure we handle nested arrays
+  function get_nested_literal() {
+    var literal = [[1,2,3,4], [2], [3]];
+    return literal;
+  }
 
-   obj = get_nested_literal();
-   assertKind(elements_kind.fast, obj);
-   obj[0][0] = 3.5;
-   obj[2][0] = "hello";
-   obj = get_nested_literal();
-   assertKind(elements_kind.fast_double, obj[0]);
-   assertKind(elements_kind.fast_smi_only, obj[1]);
-   assertKind(elements_kind.fast, obj[2]);
+  obj = get_nested_literal();
+  assertKind(elements_kind.fast, obj);
+  obj[0][0] = 3.5;
+  obj[2][0] = "hello";
+  obj = get_nested_literal();
+  assertKind(elements_kind.fast_double, obj[0]);
+  assertKind(elements_kind.fast_smi_only, obj[1]);
+  assertKind(elements_kind.fast, obj[2]);
 
-   // A more complex nested literal case.
-   function get_deep_nested_literal() {
-     var literal = [[1], [[2], "hello"], 3, [4]];
-     return literal;
-   }
+  // A more complex nested literal case.
+  function get_deep_nested_literal() {
+    var literal = [[1], [[2], "hello"], 3, [4]];
+    return literal;
+  }
 
-   obj = get_deep_nested_literal();
-   assertKind(elements_kind.fast_smi_only, obj[1][0]);
-   obj[0][0] = 3.5;
-   obj[1][0][0] = "goodbye";
-   assertKind(elements_kind.fast_double, obj[0]);
-   assertKind(elements_kind.fast, obj[1][0]);
+  obj = get_deep_nested_literal();
+  assertKind(elements_kind.fast_smi_only, obj[1][0]);
+  obj[0][0] = 3.5;
+  obj[1][0][0] = "goodbye";
+  assertKind(elements_kind.fast_double, obj[0]);
+  assertKind(elements_kind.fast, obj[1][0]);
 
-   obj = get_deep_nested_literal();
-   assertKind(elements_kind.fast_double, obj[0]);
-   assertKind(elements_kind.fast, obj[1][0]);
-  })();
+  obj = get_deep_nested_literal();
+  assertKind(elements_kind.fast_double, obj[0]);
+  assertKind(elements_kind.fast, obj[1][0]);
+})();
 
+// Perform a gc because without it the test below can experience an
+// allocation failure at an inconvenient point. Allocation mementos get
+// cleared on gc, and they can't deliver elements kind feedback when that
+// happens.
+gc();
 
-  // Make sure object literals with array fields benefit from the type feedback
-  // that allocation mementos provide.
-  (function() {
-    // A literal in an object
-    function get_object_literal() {
-      var literal = {
-        array: [1,2,3],
-        data: 3.5
-      };
-      return literal;
-    }
+// Make sure object literals with array fields benefit from the type feedback
+// that allocation mementos provide.
+(function() {
+  // A literal in an object
+  function get_object_literal() {
+    var literal = {
+      array: [1,2,3],
+      data: 3.5
+    };
+    return literal;
+  }
 
-    obj = get_object_literal();
-    assertKind(elements_kind.fast_smi_only, obj.array);
-    obj.array[1] = 3.5;
-    assertKind(elements_kind.fast_double, obj.array);
-    obj = get_object_literal();
-    assertKind(elements_kind.fast_double, obj.array);
+  obj = get_object_literal();
+  assertKind(elements_kind.fast_smi_only, obj.array);
+  obj.array[1] = 3.5;
+  assertKind(elements_kind.fast_double, obj.array);
+  obj = get_object_literal();
+  assertKind(elements_kind.fast_double, obj.array);
 
-    function get_nested_object_literal() {
-      var literal = {
-        array: [[1],[2],[3]],
-        data: 3.5
-      };
-      return literal;
-    }
+  function get_nested_object_literal() {
+    var literal = {
+      array: [[1],[2],[3]],
+      data: 3.5
+    };
+    return literal;
+  }
 
-    obj = get_nested_object_literal();
-    assertKind(elements_kind.fast, obj.array);
-    assertKind(elements_kind.fast_smi_only, obj.array[1]);
-    obj.array[1][0] = 3.5;
-    assertKind(elements_kind.fast_double, obj.array[1]);
-    obj = get_nested_object_literal();
-    assertKind(elements_kind.fast_double, obj.array[1]);
+  obj = get_nested_object_literal();
+  assertKind(elements_kind.fast, obj.array);
+  assertKind(elements_kind.fast_smi_only, obj.array[1]);
+  obj.array[1][0] = 3.5;
+  assertKind(elements_kind.fast_double, obj.array[1]);
+  obj = get_nested_object_literal();
+  assertKind(elements_kind.fast_double, obj.array[1]);
 
     %OptimizeFunctionOnNextCall(get_nested_object_literal);
-    get_nested_object_literal();
-    obj = get_nested_object_literal();
-    assertKind(elements_kind.fast_double, obj.array[1]);
+  get_nested_object_literal();
+  obj = get_nested_object_literal();
+  assertKind(elements_kind.fast_double, obj.array[1]);
 
-    // Make sure we handle nested arrays
-    function get_nested_literal() {
-      var literal = [[1,2,3,4], [2], [3]];
-      return literal;
-    }
+  // Make sure we handle nested arrays
+  function get_nested_literal() {
+    var literal = [[1,2,3,4], [2], [3]];
+    return literal;
+  }
 
-    obj = get_nested_literal();
-    assertKind(elements_kind.fast, obj);
-    obj[0][0] = 3.5;
-    obj[2][0] = "hello";
-    obj = get_nested_literal();
-    assertKind(elements_kind.fast_double, obj[0]);
-    assertKind(elements_kind.fast_smi_only, obj[1]);
-    assertKind(elements_kind.fast, obj[2]);
+  obj = get_nested_literal();
+  assertKind(elements_kind.fast, obj);
+  obj[0][0] = 3.5;
+  obj[2][0] = "hello";
+  obj = get_nested_literal();
+  assertKind(elements_kind.fast_double, obj[0]);
+  assertKind(elements_kind.fast_smi_only, obj[1]);
+  assertKind(elements_kind.fast, obj[2]);
 
-    // A more complex nested literal case.
-    function get_deep_nested_literal() {
-      var literal = [[1], [[2], "hello"], 3, [4]];
-      return literal;
-    }
+  // A more complex nested literal case.
+  function get_deep_nested_literal() {
+    var literal = [[1], [[2], "hello"], 3, [4]];
+    return literal;
+  }
 
-    obj = get_deep_nested_literal();
-    assertKind(elements_kind.fast_smi_only, obj[1][0]);
-    obj[0][0] = 3.5;
-    obj[1][0][0] = "goodbye";
-    assertKind(elements_kind.fast_double, obj[0]);
-    assertKind(elements_kind.fast, obj[1][0]);
+  obj = get_deep_nested_literal();
+  assertKind(elements_kind.fast_smi_only, obj[1][0]);
+  obj[0][0] = 3.5;
+  obj[1][0][0] = "goodbye";
+  assertKind(elements_kind.fast_double, obj[0]);
+  assertKind(elements_kind.fast, obj[1][0]);
 
-    obj = get_deep_nested_literal();
-    assertKind(elements_kind.fast_double, obj[0]);
-    assertKind(elements_kind.fast, obj[1][0]);
-  })();
-}
+  obj = get_deep_nested_literal();
+  assertKind(elements_kind.fast_double, obj[0]);
+  assertKind(elements_kind.fast, obj[1][0]);
+})();
diff --git a/test/mjsunit/apply.js b/test/mjsunit/apply.js
index 413ee93..abbc9a1 100644
--- a/test/mjsunit/apply.js
+++ b/test/mjsunit/apply.js
@@ -25,6 +25,8 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+// Flags: --allow-natives-syntax
+
 function f0() {
   return this;
 }
@@ -114,7 +116,8 @@
 
 for (var j = 1; j < 0x40000000; j <<= 1) {
   try {
-    var a = new Array(j);
+    var a = %NormalizeElements([]);
+    a.length = j;
     a[j - 1] = 42;
     assertEquals(42 + j, al.apply(345, a));
   } catch (e) {
@@ -122,7 +125,8 @@
     for (; j < 0x40000000; j <<= 1) {
       var caught = false;
       try {
-        a = new Array(j);
+        a = %NormalizeElements([]);
+        a.length = j;
         a[j - 1] = 42;
         al.apply(345, a);
         assertUnreachable("Apply of array with length " + a.length +
diff --git a/test/mjsunit/array-construct-transition.js b/test/mjsunit/array-construct-transition.js
index f8d7c83..3847f94 100644
--- a/test/mjsunit/array-construct-transition.js
+++ b/test/mjsunit/array-construct-transition.js
@@ -25,15 +25,11 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --allow-natives-syntax --smi-only-arrays
+// Flags: --allow-natives-syntax
 
-support_smi_only_arrays = %HasFastSmiElements(new Array(1,2,3,4,5,6));
-
-if (support_smi_only_arrays) {
-  var a = new Array(0, 1, 2);
-  assertTrue(%HasFastSmiElements(a));
-  var b = new Array(0.5, 1.2, 2.3);
-  assertTrue(%HasFastDoubleElements(b));
-  var c = new Array(0.5, 1.2, new Object());
-  assertTrue(%HasFastObjectElements(c));
-}
+var a = new Array(0, 1, 2);
+assertTrue(%HasFastSmiElements(a));
+var b = new Array(0.5, 1.2, 2.3);
+assertTrue(%HasFastDoubleElements(b));
+var c = new Array(0.5, 1.2, new Object());
+assertTrue(%HasFastObjectElements(c));
diff --git a/test/mjsunit/array-constructor-feedback.js b/test/mjsunit/array-constructor-feedback.js
index 9bc62e4..c2c1a18 100644
--- a/test/mjsunit/array-constructor-feedback.js
+++ b/test/mjsunit/array-constructor-feedback.js
@@ -25,24 +25,10 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --allow-natives-syntax --smi-only-arrays --expose-gc
+// Flags: --allow-natives-syntax --expose-gc
 // Flags: --noalways-opt
 
 // Test element kind of objects.
-// Since --smi-only-arrays affects builtins, its default setting at compile
-// time sticks if built with snapshot.  If --smi-only-arrays is deactivated
-// by default, only a no-snapshot build actually has smi-only arrays enabled
-// in this test case.  Depending on whether smi-only arrays are actually
-// enabled, this test takes the appropriate code path to check smi-only arrays.
-
-// support_smi_only_arrays = %HasFastSmiElements(new Array(1,2,3,4,5,6,7,8));
-support_smi_only_arrays = true;
-
-if (support_smi_only_arrays) {
-  print("Tests include smi-only arrays.");
-} else {
-  print("Tests do NOT include smi-only arrays.");
-}
 
 var elements_kind = {
   fast_smi_only            :  'fast smi only elements',
@@ -73,169 +59,161 @@
 }
 
 function assertKind(expected, obj, name_opt) {
-  if (!support_smi_only_arrays &&
-      expected == elements_kind.fast_smi_only) {
-    expected = elements_kind.fast;
-  }
   assertEquals(expected, getKind(obj), name_opt);
 }
 
-if (support_smi_only_arrays) {
+// Test: If a call site goes megamorphic, it retains the ability to
+// use allocation site feedback (if FLAG_allocation_site_pretenuring
+// is on).
+(function() {
+  function bar(t, len) {
+    return new t(len);
+  }
 
-  // Test: If a call site goes megamorphic, it retains the ability to
-  // use allocation site feedback (if FLAG_allocation_site_pretenuring
-  // is on).
-  (function() {
-    function bar(t, len) {
-      return new t(len);
-    }
-
-    a = bar(Array, 10);
-    a[0] = 3.5;
-    b = bar(Array, 1);
-    assertKind(elements_kind.fast_double, b);
-    c = bar(Object, 3);
-    b = bar(Array, 10);
-    // TODO(mvstanton): re-enable when FLAG_allocation_site_pretenuring
-    // is on in the build.
-    // assertKind(elements_kind.fast_double, b);
-  })();
+  a = bar(Array, 10);
+  a[0] = 3.5;
+  b = bar(Array, 1);
+  assertKind(elements_kind.fast_double, b);
+  c = bar(Object, 3);
+  b = bar(Array, 10);
+  // TODO(mvstanton): re-enable when FLAG_allocation_site_pretenuring
+  // is on in the build.
+  // assertKind(elements_kind.fast_double, b);
+})();
 
 
-  // Test: ensure that crankshafted array constructor sites are deopted
-  // if another function is used.
-  (function() {
-    function bar0(t) {
-      return new t();
-    }
-    a = bar0(Array);
-    a[0] = 3.5;
-    b = bar0(Array);
-    assertKind(elements_kind.fast_double, b);
+// Test: ensure that crankshafted array constructor sites are deopted
+// if another function is used.
+(function() {
+  function bar0(t) {
+    return new t();
+  }
+  a = bar0(Array);
+  a[0] = 3.5;
+  b = bar0(Array);
+  assertKind(elements_kind.fast_double, b);
     %OptimizeFunctionOnNextCall(bar0);
-    b = bar0(Array);
-    assertKind(elements_kind.fast_double, b);
+  b = bar0(Array);
+  assertKind(elements_kind.fast_double, b);
+  assertOptimized(bar0);
+  // bar0 should deopt
+  b = bar0(Object);
+  assertUnoptimized(bar0)
+  // When it's re-optimized, we should call through the full stub
+  bar0(Array);
+    %OptimizeFunctionOnNextCall(bar0);
+  b = bar0(Array);
+  // This only makes sense to test if we allow crankshafting
+  if (4 != %GetOptimizationStatus(bar0)) {
+    // We also lost our ability to record kind feedback, as the site
+    // is megamorphic now.
+    assertKind(elements_kind.fast_smi_only, b);
     assertOptimized(bar0);
-    // bar0 should deopt
-    b = bar0(Object);
-    assertUnoptimized(bar0)
-    // When it's re-optimized, we should call through the full stub
-    bar0(Array);
-    %OptimizeFunctionOnNextCall(bar0);
-    b = bar0(Array);
-    // This only makes sense to test if we allow crankshafting
-    if (4 != %GetOptimizationStatus(bar0)) {
-      // We also lost our ability to record kind feedback, as the site
-      // is megamorphic now.
-      assertKind(elements_kind.fast_smi_only, b);
-      assertOptimized(bar0);
-      b[0] = 3.5;
-      c = bar0(Array);
-      assertKind(elements_kind.fast_smi_only, c);
-    }
-  })();
+    b[0] = 3.5;
+    c = bar0(Array);
+    assertKind(elements_kind.fast_smi_only, c);
+  }
+})();
 
 
-  // Test: Ensure that inlined array calls in crankshaft learn from deopts
-  // based on the move to a dictionary for the array.
-  (function() {
-    function bar(len) {
-      return new Array(len);
-    }
-    a = bar(10);
-    a[0] = "a string";
-    a = bar(10);
-    assertKind(elements_kind.fast, a);
+// Test: Ensure that inlined array calls in crankshaft learn from deopts
+// based on the move to a dictionary for the array.
+(function() {
+  function bar(len) {
+    return new Array(len);
+  }
+  a = bar(10);
+  a[0] = "a string";
+  a = bar(10);
+  assertKind(elements_kind.fast, a);
     %OptimizeFunctionOnNextCall(bar);
-    a = bar(10);
-    assertKind(elements_kind.fast, a);
-    assertOptimized(bar);
-    a = bar(100000);
-    assertKind(elements_kind.dictionary, a);
-    assertOptimized(bar);
+  a = bar(10);
+  assertKind(elements_kind.fast, a);
+  assertOptimized(bar);
+  bar(100000);
+  assertOptimized(bar);
 
-    // If the argument isn't a smi, things should still work.
-    a = bar("oops");
-    assertOptimized(bar);
-    assertKind(elements_kind.fast, a);
+  // If the argument isn't a smi, things should still work.
+  a = bar("oops");
+  assertOptimized(bar);
+  assertKind(elements_kind.fast, a);
 
-    function barn(one, two, three) {
-      return new Array(one, two, three);
-    }
+  function barn(one, two, three) {
+    return new Array(one, two, three);
+  }
 
-    barn(1, 2, 3);
-    barn(1, 2, 3);
+  barn(1, 2, 3);
+  barn(1, 2, 3);
     %OptimizeFunctionOnNextCall(barn);
-    barn(1, 2, 3);
-    assertOptimized(barn);
-    a = barn(1, "oops", 3);
-    assertOptimized(barn);
-  })();
+  barn(1, 2, 3);
+  assertOptimized(barn);
+  a = barn(1, "oops", 3);
+  assertOptimized(barn);
+})();
 
 
-  // Test: When a method with array constructor is crankshafted, the type
-  // feedback for elements kind is baked in. Verify that transitions don't
-  // change it anymore
-  (function() {
-    function bar() {
-      return new Array();
-    }
-    a = bar();
-    bar();
+// Test: When a method with array constructor is crankshafted, the type
+// feedback for elements kind is baked in. Verify that transitions don't
+// change it anymore
+(function() {
+  function bar() {
+    return new Array();
+  }
+  a = bar();
+  bar();
     %OptimizeFunctionOnNextCall(bar);
-    b = bar();
-    // This only makes sense to test if we allow crankshafting
-    if (4 != %GetOptimizationStatus(bar)) {
-      assertOptimized(bar);
+  b = bar();
+  // This only makes sense to test if we allow crankshafting
+  if (4 != %GetOptimizationStatus(bar)) {
+    assertOptimized(bar);
       %DebugPrint(3);
-      b[0] = 3.5;
-      c = bar();
-      assertKind(elements_kind.fast_smi_only, c);
-      assertOptimized(bar);
-    }
-  })();
-
-
-  // Test: create arrays in two contexts, verifying that the correct
-  // map for Array in that context will be used.
-  (function() {
-    function bar() { return new Array(); }
-    bar();
-    bar();
-    %OptimizeFunctionOnNextCall(bar);
-    a = bar();
-    assertTrue(a instanceof Array);
-
-    var contextB = Realm.create();
-    Realm.eval(contextB, "function bar2() { return new Array(); };");
-    Realm.eval(contextB, "bar2(); bar2();");
-    Realm.eval(contextB, "%OptimizeFunctionOnNextCall(bar2);");
-    Realm.eval(contextB, "bar2();");
-    assertFalse(Realm.eval(contextB, "bar2();") instanceof Array);
-    assertTrue(Realm.eval(contextB, "bar2() instanceof Array"));
-  })();
-
-  // Test: create array with packed feedback, then optimize function, which
-  // should deal with arguments that create holey arrays.
-  (function() {
-    function bar(len) { return new Array(len); }
-    bar(0);
-    bar(0);
-    %OptimizeFunctionOnNextCall(bar);
-    a = bar(0);
+    b[0] = 3.5;
+    c = bar();
+    assertKind(elements_kind.fast_smi_only, c);
     assertOptimized(bar);
+  }
+})();
+
+
+// Test: create arrays in two contexts, verifying that the correct
+// map for Array in that context will be used.
+(function() {
+  function bar() { return new Array(); }
+  bar();
+  bar();
+    %OptimizeFunctionOnNextCall(bar);
+  a = bar();
+  assertTrue(a instanceof Array);
+
+  var contextB = Realm.create();
+  Realm.eval(contextB, "function bar2() { return new Array(); };");
+  Realm.eval(contextB, "bar2(); bar2();");
+  Realm.eval(contextB, "%OptimizeFunctionOnNextCall(bar2);");
+  Realm.eval(contextB, "bar2();");
+  assertFalse(Realm.eval(contextB, "bar2();") instanceof Array);
+  assertTrue(Realm.eval(contextB, "bar2() instanceof Array"));
+})();
+
+// Test: create array with packed feedback, then optimize function, which
+// should deal with arguments that create holey arrays.
+(function() {
+  function bar(len) { return new Array(len); }
+  bar(0);
+  bar(0);
+    %OptimizeFunctionOnNextCall(bar);
+  a = bar(0);
+  assertOptimized(bar);
+  assertFalse(isHoley(a));
+  a = bar(1);  // ouch!
+  assertOptimized(bar);
+  assertTrue(isHoley(a));
+  a = bar(100);
+  assertTrue(isHoley(a));
+  a = bar(0);
+  assertOptimized(bar);
+  // Crankshafted functions don't use mementos, so feedback still
+  // indicates a packed array is desired. (unless --nocrankshaft is in use).
+  if (4 != %GetOptimizationStatus(bar)) {
     assertFalse(isHoley(a));
-    a = bar(1);  // ouch!
-    assertOptimized(bar);
-    assertTrue(isHoley(a));
-    a = bar(100);
-    assertTrue(isHoley(a));
-    a = bar(0);
-    assertOptimized(bar);
-    // Crankshafted functions don't use mementos, so feedback still
-    // indicates a packed array is desired. (unless --nocrankshaft is in use).
-    if (4 != %GetOptimizationStatus(bar)) {
-      assertFalse(isHoley(a));
-    }
-  })();
-}
+  }
+})();
diff --git a/test/mjsunit/array-feedback.js b/test/mjsunit/array-feedback.js
index 75a5358..ffbb49b 100644
--- a/test/mjsunit/array-feedback.js
+++ b/test/mjsunit/array-feedback.js
@@ -25,25 +25,9 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --allow-natives-syntax --smi-only-arrays --expose-gc
+// Flags: --allow-natives-syntax --expose-gc
 // Flags: --noalways-opt
 
-// Test element kind of objects.
-// Since --smi-only-arrays affects builtins, its default setting at compile
-// time sticks if built with snapshot.  If --smi-only-arrays is deactivated
-// by default, only a no-snapshot build actually has smi-only arrays enabled
-// in this test case.  Depending on whether smi-only arrays are actually
-// enabled, this test takes the appropriate code path to check smi-only arrays.
-
-// support_smi_only_arrays = %HasFastSmiElements(new Array(1,2,3,4,5,6,7,8));
-support_smi_only_arrays = true;
-
-if (support_smi_only_arrays) {
-  print("Tests include smi-only arrays.");
-} else {
-  print("Tests do NOT include smi-only arrays.");
-}
-
 var elements_kind = {
   fast_smi_only            :  'fast smi only elements',
   fast                     :  'fast elements',
@@ -73,160 +57,153 @@
 }
 
 function assertKind(expected, obj, name_opt) {
-  if (!support_smi_only_arrays &&
-      expected == elements_kind.fast_smi_only) {
-    expected = elements_kind.fast;
-  }
   assertEquals(expected, getKind(obj), name_opt);
 }
 
-if (support_smi_only_arrays) {
+// Verify that basic elements kind feedback works for non-constructor
+// array calls (as long as the call is made through an IC, and not
+// a CallStub).
+(function (){
+  function create0() {
+    return Array();
+  }
 
-  // Verify that basic elements kind feedback works for non-constructor
-  // array calls (as long as the call is made through an IC, and not
-  // a CallStub).
-  (function (){
-    function create0() {
-      return Array();
-    }
+  // Calls through ICs need warm up through uninitialized, then
+  // premonomorphic first.
+  create0();
+  a = create0();
+  assertKind(elements_kind.fast_smi_only, a);
+  a[0] = 3.5;
+  b = create0();
+  assertKind(elements_kind.fast_double, b);
 
-    // Calls through ICs need warm up through uninitialized, then
-    // premonomorphic first.
-    create0();
-    a = create0();
-    assertKind(elements_kind.fast_smi_only, a);
-    a[0] = 3.5;
-    b = create0();
-    assertKind(elements_kind.fast_double, b);
+  function create1(arg) {
+    return Array(arg);
+  }
 
-    function create1(arg) {
-      return Array(arg);
-    }
+  create1(0);
+  create1(0);
+  a = create1(0);
+  assertFalse(isHoley(a));
+  assertKind(elements_kind.fast_smi_only, a);
+  a[0] = "hello";
+  b = create1(10);
+  assertTrue(isHoley(b));
+  assertKind(elements_kind.fast, b);
 
-    create1(0);
-    create1(0);
-    a = create1(0);
-    assertFalse(isHoley(a));
-    assertKind(elements_kind.fast_smi_only, a);
-    a[0] = "hello";
-    b = create1(10);
-    assertTrue(isHoley(b));
-    assertKind(elements_kind.fast, b);
+  a = create1(100000);
+  assertKind(elements_kind.fast_smi_only, a);
 
-    a = create1(100000);
-    assertKind(elements_kind.dictionary, a);
+  function create3(arg1, arg2, arg3) {
+    return Array(arg1, arg2, arg3);
+  }
 
-    function create3(arg1, arg2, arg3) {
-      return Array(arg1, arg2, arg3);
-    }
-
-    create3(1,2,3);
-    create3(1,2,3);
-    a = create3(1,2,3);
-    a[0] = 3.035;
-    assertKind(elements_kind.fast_double, a);
-    b = create3(1,2,3);
-    assertKind(elements_kind.fast_double, b);
-    assertFalse(isHoley(b));
-  })();
+  create3(1,2,3);
+  create3(1,2,3);
+  a = create3(1,2,3);
+  a[0] = 3.035;
+  assertKind(elements_kind.fast_double, a);
+  b = create3(1,2,3);
+  assertKind(elements_kind.fast_double, b);
+  assertFalse(isHoley(b));
+})();
 
 
-  // Verify that keyed calls work
-  (function (){
-    function create0(name) {
-      return this[name]();
-    }
+// Verify that keyed calls work
+(function (){
+  function create0(name) {
+    return this[name]();
+  }
 
-    name = "Array";
-    create0(name);
-    create0(name);
-    a = create0(name);
-    a[0] = 3.5;
-    b = create0(name);
-    assertKind(elements_kind.fast_double, b);
-  })();
+  name = "Array";
+  create0(name);
+  create0(name);
+  a = create0(name);
+  a[0] = 3.5;
+  b = create0(name);
+  assertKind(elements_kind.fast_double, b);
+})();
 
 
-  // Verify that feedback is turned off if the call site goes megamorphic.
-  (function (){
-    function foo(arg) { return arg(); }
-    foo(Array);
-    foo(function() {});
-    foo(Array);
+// Verify that feedback is turned off if the call site goes megamorphic.
+(function (){
+  function foo(arg) { return arg(); }
+  foo(Array);
+  foo(function() {});
+  foo(Array);
 
-    gc();
+  gc();
 
-    a = foo(Array);
-    a[0] = 3.5;
-    b = foo(Array);
-    // b doesn't benefit from elements kind feedback at a megamorphic site.
-    assertKind(elements_kind.fast_smi_only, b);
-  })();
+  a = foo(Array);
+  a[0] = 3.5;
+  b = foo(Array);
+  // b doesn't benefit from elements kind feedback at a megamorphic site.
+  assertKind(elements_kind.fast_smi_only, b);
+})();
 
 
-  // Verify that crankshaft consumes type feedback.
-  (function (){
-    function create0() {
-      return Array();
-    }
+// Verify that crankshaft consumes type feedback.
+(function (){
+  function create0() {
+    return Array();
+  }
 
-    create0();
-    create0();
-    a = create0();
-    a[0] = 3.5;
+  create0();
+  create0();
+  a = create0();
+  a[0] = 3.5;
     %OptimizeFunctionOnNextCall(create0);
-    create0();
-    create0();
-    b = create0();
-    assertKind(elements_kind.fast_double, b);
-    assertOptimized(create0);
+  create0();
+  create0();
+  b = create0();
+  assertKind(elements_kind.fast_double, b);
+  assertOptimized(create0);
 
-    function create1(arg) {
-      return Array(arg);
-    }
+  function create1(arg) {
+    return Array(arg);
+  }
 
-    create1(8);
-    create1(8);
-    a = create1(8);
-    a[0] = 3.5;
+  create1(8);
+  create1(8);
+  a = create1(8);
+  a[0] = 3.5;
     %OptimizeFunctionOnNextCall(create1);
-    b = create1(8);
-    assertKind(elements_kind.fast_double, b);
-    assertOptimized(create1);
+  b = create1(8);
+  assertKind(elements_kind.fast_double, b);
+  assertOptimized(create1);
 
-    function createN(arg1, arg2, arg3) {
-      return Array(arg1, arg2, arg3);
-    }
+  function createN(arg1, arg2, arg3) {
+    return Array(arg1, arg2, arg3);
+  }
 
-    createN(1, 2, 3);
-    createN(1, 2, 3);
-    a = createN(1, 2, 3);
-    a[0] = 3.5;
+  createN(1, 2, 3);
+  createN(1, 2, 3);
+  a = createN(1, 2, 3);
+  a[0] = 3.5;
     %OptimizeFunctionOnNextCall(createN);
-    b = createN(1, 2, 3);
-    assertKind(elements_kind.fast_double, b);
-    assertOptimized(createN);
-  })();
+  b = createN(1, 2, 3);
+  assertKind(elements_kind.fast_double, b);
+  assertOptimized(createN);
+})();
 
-  // Verify that cross context calls work
-  (function (){
-    var realmA = Realm.current();
-    var realmB = Realm.create();
-    assertEquals(0, realmA);
-    assertEquals(1, realmB);
+// Verify that cross context calls work
+(function (){
+  var realmA = Realm.current();
+  var realmB = Realm.create();
+  assertEquals(0, realmA);
+  assertEquals(1, realmB);
 
-    function instanceof_check(type) {
-      assertTrue(type() instanceof type);
-      assertTrue(type(5) instanceof type);
-      assertTrue(type(1,2,3) instanceof type);
-    }
+  function instanceof_check(type) {
+    assertTrue(type() instanceof type);
+    assertTrue(type(5) instanceof type);
+    assertTrue(type(1,2,3) instanceof type);
+  }
 
-    var realmBArray = Realm.eval(realmB, "Array");
-    instanceof_check(Array);
-    instanceof_check(Array);
-    instanceof_check(Array);
-    instanceof_check(realmBArray);
-    instanceof_check(realmBArray);
-    instanceof_check(realmBArray);
-  })();
-}
+  var realmBArray = Realm.eval(realmB, "Array");
+  instanceof_check(Array);
+  instanceof_check(Array);
+  instanceof_check(Array);
+  instanceof_check(realmBArray);
+  instanceof_check(realmBArray);
+  instanceof_check(realmBArray);
+})();
diff --git a/test/mjsunit/array-literal-feedback.js b/test/mjsunit/array-literal-feedback.js
index cfda0f6..ed9c4e8 100644
--- a/test/mjsunit/array-literal-feedback.js
+++ b/test/mjsunit/array-literal-feedback.js
@@ -25,25 +25,9 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --allow-natives-syntax --smi-only-arrays --expose-gc
+// Flags: --allow-natives-syntax --expose-gc
 // Flags: --noalways-opt
 
-// Test element kind of objects.
-// Since --smi-only-arrays affects builtins, its default setting at compile
-// time sticks if built with snapshot.  If --smi-only-arrays is deactivated
-// by default, only a no-snapshot build actually has smi-only arrays enabled
-// in this test case.  Depending on whether smi-only arrays are actually
-// enabled, this test takes the appropriate code path to check smi-only arrays.
-
-// support_smi_only_arrays = %HasFastSmiElements(new Array(1,2,3,4,5,6,7,8));
-support_smi_only_arrays = true;
-
-if (support_smi_only_arrays) {
-  print("Tests include smi-only arrays.");
-} else {
-  print("Tests do NOT include smi-only arrays.");
-}
-
 var elements_kind = {
   fast_smi_only            :  'fast smi only elements',
   fast                     :  'fast elements',
@@ -73,58 +57,51 @@
 }
 
 function assertKind(expected, obj, name_opt) {
-  if (!support_smi_only_arrays &&
-      expected == elements_kind.fast_smi_only) {
-    expected = elements_kind.fast;
-  }
   assertEquals(expected, getKind(obj), name_opt);
 }
 
-if (support_smi_only_arrays) {
+function get_literal(x) {
+  var literal = [1, 2, x];
+  return literal;
+}
 
-  function get_literal(x) {
-    var literal = [1, 2, x];
-    return literal;
+get_literal(3);
+// It's important to store a from before we crankshaft get_literal, because
+// mementos won't be created from crankshafted code at all.
+a = get_literal(3);
+  %OptimizeFunctionOnNextCall(get_literal);
+get_literal(3);
+assertOptimized(get_literal);
+assertTrue(%HasFastSmiElements(a));
+// a has a memento so the transition caused by the store will affect the
+// boilerplate.
+a[0] = 3.5;
+
+// We should have transitioned the boilerplate array to double, and
+// crankshafted code should de-opt on the unexpected elements kind
+b = get_literal(3);
+assertTrue(%HasFastDoubleElements(b));
+assertEquals([1, 2, 3], b);
+assertUnoptimized(get_literal);
+
+// Optimize again
+get_literal(3);
+  %OptimizeFunctionOnNextCall(get_literal);
+b = get_literal(3);
+assertTrue(%HasFastDoubleElements(b));
+assertOptimized(get_literal);
+
+
+// Test: make sure allocation site information is updated through a
+// transition from SMI->DOUBLE->FAST
+(function() {
+  function bar(a, b, c) {
+    return [a, b, c];
   }
 
-  get_literal(3);
-  // It's important to store a from before we crankshaft get_literal, because
-  // mementos won't be created from crankshafted code at all.
-  a = get_literal(3);
-  %OptimizeFunctionOnNextCall(get_literal);
-  get_literal(3);
-  assertOptimized(get_literal);
-  assertTrue(%HasFastSmiElements(a));
-  // a has a memento so the transition caused by the store will affect the
-  // boilerplate.
+  a = bar(1, 2, 3);
   a[0] = 3.5;
-
-  // We should have transitioned the boilerplate array to double, and
-  // crankshafted code should de-opt on the unexpected elements kind
-  b = get_literal(3);
-  assertTrue(%HasFastDoubleElements(b));
-  assertEquals([1, 2, 3], b);
-  assertUnoptimized(get_literal);
-
-  // Optimize again
-  get_literal(3);
-  %OptimizeFunctionOnNextCall(get_literal);
-  b = get_literal(3);
-  assertTrue(%HasFastDoubleElements(b));
-  assertOptimized(get_literal);
-
-
-  // Test: make sure allocation site information is updated through a
-  // transition from SMI->DOUBLE->FAST
-  (function() {
-    function bar(a, b, c) {
-      return [a, b, c];
-    }
-
-    a = bar(1, 2, 3);
-    a[0] = 3.5;
-    a[1] = 'hi';
-    b = bar(1, 2, 3);
-    assertKind(elements_kind.fast, b);
-  })();
-}
+  a[1] = 'hi';
+  b = bar(1, 2, 3);
+  assertKind(elements_kind.fast, b);
+})();
diff --git a/test/mjsunit/array-literal-transitions.js b/test/mjsunit/array-literal-transitions.js
index ca6033b..e162455 100644
--- a/test/mjsunit/array-literal-transitions.js
+++ b/test/mjsunit/array-literal-transitions.js
@@ -25,22 +25,7 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --allow-natives-syntax --smi-only-arrays --expose-gc
-
-// Test element kind of objects.
-// Since --smi-only-arrays affects builtins, its default setting at compile
-// time sticks if built with snapshot.  If --smi-only-arrays is deactivated
-// by default, only a no-snapshot build actually has smi-only arrays enabled
-// in this test case.  Depending on whether smi-only arrays are actually
-// enabled, this test takes the appropriate code path to check smi-only arrays.
-
-support_smi_only_arrays = %HasFastSmiElements([1,2,3,4,5,6,7,8,9,10]);
-
-if (support_smi_only_arrays) {
-  print("Tests include smi-only arrays.");
-} else {
-  print("Tests do NOT include smi-only arrays.");
-}
+// Flags: --allow-natives-syntax --expose-gc
 
 // IC and Crankshaft support for smi-only elements in dynamic array literals.
 function get(foo) { return foo; }  // Used to generate dynamic values.
@@ -94,114 +79,112 @@
   assertEquals(1, f0[0]);
 }
 
-if (support_smi_only_arrays) {
-  for (var i = 0; i < 3; i++) {
-    array_literal_test();
-  }
-  %OptimizeFunctionOnNextCall(array_literal_test);
+for (var i = 0; i < 3; i++) {
   array_literal_test();
-
-  function test_large_literal() {
-
-    function d() {
-      gc();
-      return 2.5;
-    }
-
-    function o() {
-      gc();
-      return new Object();
-    }
-
-    large =
-        [ 0, 1, 2, 3, 4, 5, d(), d(), d(), d(), d(), d(), o(), o(), o(), o() ];
-    assertFalse(%HasDictionaryElements(large));
-    assertFalse(%HasFastSmiElements(large));
-    assertFalse(%HasFastDoubleElements(large));
-    assertTrue(%HasFastObjectElements(large));
-    assertEquals(large,
-                 [0, 1, 2, 3, 4, 5, 2.5, 2.5, 2.5, 2.5, 2.5, 2.5,
-                  new Object(), new Object(), new Object(), new Object()]);
-  }
-
-  for (var i = 0; i < 3; i++) {
-    test_large_literal();
-  }
-  %OptimizeFunctionOnNextCall(test_large_literal);
-  test_large_literal();
-
-  function deopt_array(use_literal) {
-    if (use_literal) {
-      return [.5, 3, 4];
-    }  else {
-      return new Array();
-    }
-  }
-
-  deopt_array(false);
-  deopt_array(false);
-  deopt_array(false);
-  %OptimizeFunctionOnNextCall(deopt_array);
-  var array = deopt_array(false);
-  assertOptimized(deopt_array);
-  deopt_array(true);
-  assertOptimized(deopt_array);
-  array = deopt_array(false);
-  assertOptimized(deopt_array);
-
-  // Check that unexpected changes in the objects stored into the boilerplate
-  // also force a deopt.
-  function deopt_array_literal_all_smis(a) {
-    return [0, 1, a];
-  }
-
-  deopt_array_literal_all_smis(2);
-  deopt_array_literal_all_smis(3);
-  deopt_array_literal_all_smis(4);
-  array = deopt_array_literal_all_smis(4);
-  assertEquals(0, array[0]);
-  assertEquals(1, array[1]);
-  assertEquals(4, array[2]);
-  %OptimizeFunctionOnNextCall(deopt_array_literal_all_smis);
-  array = deopt_array_literal_all_smis(5);
-  array = deopt_array_literal_all_smis(6);
-  assertOptimized(deopt_array_literal_all_smis);
-  assertEquals(0, array[0]);
-  assertEquals(1, array[1]);
-  assertEquals(6, array[2]);
-
-  array = deopt_array_literal_all_smis(.5);
-  assertUnoptimized(deopt_array_literal_all_smis);
-  assertEquals(0, array[0]);
-  assertEquals(1, array[1]);
-  assertEquals(.5, array[2]);
-
-  function deopt_array_literal_all_doubles(a) {
-    return [0.5, 1, a];
-  }
-
-  deopt_array_literal_all_doubles(.5);
-  deopt_array_literal_all_doubles(.5);
-  deopt_array_literal_all_doubles(.5);
-  array = deopt_array_literal_all_doubles(0.5);
-  assertEquals(0.5, array[0]);
-  assertEquals(1, array[1]);
-  assertEquals(0.5, array[2]);
-  %OptimizeFunctionOnNextCall(deopt_array_literal_all_doubles);
-  array = deopt_array_literal_all_doubles(5);
-  array = deopt_array_literal_all_doubles(6);
-  assertOptimized(deopt_array_literal_all_doubles);
-  assertEquals(0.5, array[0]);
-  assertEquals(1, array[1]);
-  assertEquals(6, array[2]);
-
-  var foo = new Object();
-  array = deopt_array_literal_all_doubles(foo);
-  assertUnoptimized(deopt_array_literal_all_doubles);
-  assertEquals(0.5, array[0]);
-  assertEquals(1, array[1]);
-  assertEquals(foo, array[2]);
 }
+  %OptimizeFunctionOnNextCall(array_literal_test);
+array_literal_test();
+
+function test_large_literal() {
+
+  function d() {
+    gc();
+    return 2.5;
+  }
+
+  function o() {
+    gc();
+    return new Object();
+  }
+
+  large =
+    [ 0, 1, 2, 3, 4, 5, d(), d(), d(), d(), d(), d(), o(), o(), o(), o() ];
+  assertFalse(%HasDictionaryElements(large));
+  assertFalse(%HasFastSmiElements(large));
+  assertFalse(%HasFastDoubleElements(large));
+  assertTrue(%HasFastObjectElements(large));
+  assertEquals(large,
+               [0, 1, 2, 3, 4, 5, 2.5, 2.5, 2.5, 2.5, 2.5, 2.5,
+                new Object(), new Object(), new Object(), new Object()]);
+}
+
+for (var i = 0; i < 3; i++) {
+  test_large_literal();
+}
+  %OptimizeFunctionOnNextCall(test_large_literal);
+test_large_literal();
+
+function deopt_array(use_literal) {
+  if (use_literal) {
+    return [.5, 3, 4];
+  }  else {
+    return new Array();
+  }
+}
+
+deopt_array(false);
+deopt_array(false);
+deopt_array(false);
+  %OptimizeFunctionOnNextCall(deopt_array);
+var array = deopt_array(false);
+assertOptimized(deopt_array);
+deopt_array(true);
+assertOptimized(deopt_array);
+array = deopt_array(false);
+assertOptimized(deopt_array);
+
+// Check that unexpected changes in the objects stored into the boilerplate
+// also force a deopt.
+function deopt_array_literal_all_smis(a) {
+  return [0, 1, a];
+}
+
+deopt_array_literal_all_smis(2);
+deopt_array_literal_all_smis(3);
+deopt_array_literal_all_smis(4);
+array = deopt_array_literal_all_smis(4);
+assertEquals(0, array[0]);
+assertEquals(1, array[1]);
+assertEquals(4, array[2]);
+  %OptimizeFunctionOnNextCall(deopt_array_literal_all_smis);
+array = deopt_array_literal_all_smis(5);
+array = deopt_array_literal_all_smis(6);
+assertOptimized(deopt_array_literal_all_smis);
+assertEquals(0, array[0]);
+assertEquals(1, array[1]);
+assertEquals(6, array[2]);
+
+array = deopt_array_literal_all_smis(.5);
+assertUnoptimized(deopt_array_literal_all_smis);
+assertEquals(0, array[0]);
+assertEquals(1, array[1]);
+assertEquals(.5, array[2]);
+
+function deopt_array_literal_all_doubles(a) {
+  return [0.5, 1, a];
+}
+
+deopt_array_literal_all_doubles(.5);
+deopt_array_literal_all_doubles(.5);
+deopt_array_literal_all_doubles(.5);
+array = deopt_array_literal_all_doubles(0.5);
+assertEquals(0.5, array[0]);
+assertEquals(1, array[1]);
+assertEquals(0.5, array[2]);
+  %OptimizeFunctionOnNextCall(deopt_array_literal_all_doubles);
+array = deopt_array_literal_all_doubles(5);
+array = deopt_array_literal_all_doubles(6);
+assertOptimized(deopt_array_literal_all_doubles);
+assertEquals(0.5, array[0]);
+assertEquals(1, array[1]);
+assertEquals(6, array[2]);
+
+var foo = new Object();
+array = deopt_array_literal_all_doubles(foo);
+assertUnoptimized(deopt_array_literal_all_doubles);
+assertEquals(0.5, array[0]);
+assertEquals(1, array[1]);
+assertEquals(foo, array[2]);
 
 (function literals_after_osr() {
   var color = [0];
diff --git a/test/mjsunit/array-natives-elements.js b/test/mjsunit/array-natives-elements.js
index f64818d..d63346d 100644
--- a/test/mjsunit/array-natives-elements.js
+++ b/test/mjsunit/array-natives-elements.js
@@ -25,22 +25,7 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --allow-natives-syntax --smi-only-arrays
-
-// Test element kind of objects.
-// Since --smi-only-arrays affects builtins, its default setting at compile time
-// sticks if built with snapshot. If --smi-only-arrays is deactivated by
-// default, only a no-snapshot build actually has smi-only arrays enabled in
-// this test case. Depending on whether smi-only arrays are actually enabled,
-// this test takes the appropriate code path to check smi-only arrays.
-
-support_smi_only_arrays = %HasFastSmiElements([1,2,3,4,5,6,7,8,9,10]);
-
-if (support_smi_only_arrays) {
-  print("Tests include smi-only arrays.");
-} else {
-  print("Tests do NOT include smi-only arrays.");
-}
+// Flags: --allow-natives-syntax
 
 // IC and Crankshaft support for smi-only elements in dynamic array literals.
 function get(foo) { return foo; }  // Used to generate dynamic values.
@@ -308,10 +293,8 @@
   assertEquals([1.1,{},2,3], a4);
 }
 
-if (support_smi_only_arrays) {
-  for (var i = 0; i < 3; i++) {
-    array_natives_test();
-  }
-  %OptimizeFunctionOnNextCall(array_natives_test);
+for (var i = 0; i < 3; i++) {
   array_natives_test();
 }
+%OptimizeFunctionOnNextCall(array_natives_test);
+array_natives_test();
diff --git a/test/mjsunit/array-sort.js b/test/mjsunit/array-sort.js
index 3fa623a..6275542 100644
--- a/test/mjsunit/array-sort.js
+++ b/test/mjsunit/array-sort.js
@@ -404,3 +404,47 @@
   return a.val - b.val;
 }
 arr.sort(cmpTest);
+
+function TestSortDoesNotDependOnObjectPrototypeHasOwnProperty() {
+  Array.prototype.sort.call({
+    __proto__: { hasOwnProperty: null, 0: 1 },
+    length: 5
+  });
+
+  var arr = new Array(2);
+  Object.defineProperty(arr, 0, { get: function() {}, set: function() {} });
+  arr.hasOwnProperty = null;
+  arr.sort();
+}
+
+TestSortDoesNotDependOnObjectPrototypeHasOwnProperty();
+
+function TestSortDoesNotDependOnArrayPrototypePush() {
+  // InsertionSort is used for arrays which length <= 22
+  var arr = [];
+  for (var i = 0; i < 22; i++) arr[i] = {};
+  Array.prototype.push = function() {
+    fail('Should not call push');
+  };
+  arr.sort();
+
+  // Quicksort is used for arrays which length > 22
+  // Arrays which length > 1000 guarantee GetThirdIndex is executed
+  arr = [];
+  for (var i = 0; i < 2000; ++i) arr[i] = {};
+  arr.sort();
+}
+
+TestSortDoesNotDependOnArrayPrototypePush();
+
+function TestSortDoesNotDependOnArrayPrototypeSort() {
+  var arr = [];
+  for (var i = 0; i < 2000; i++) arr[i] = {};
+  var sortfn = Array.prototype.sort;
+  Array.prototype.sort = function() {
+    fail('Should not call sort');
+  };
+  sortfn.call(arr);
+}
+
+TestSortDoesNotDependOnArrayPrototypeSort();
diff --git a/test/mjsunit/assert-opt-and-deopt.js b/test/mjsunit/assert-opt-and-deopt.js
index d0caafa..e9aba1d 100644
--- a/test/mjsunit/assert-opt-and-deopt.js
+++ b/test/mjsunit/assert-opt-and-deopt.js
@@ -137,7 +137,7 @@
     case OptTracker.OptimizationState.NEVER:
       return true;
   }
-  return false;
+  return true;
 }
 // (End of class OptTracker.)
 
diff --git a/test/mjsunit/builtins.js b/test/mjsunit/builtins.js
index ce2c680..fe7d35d 100644
--- a/test/mjsunit/builtins.js
+++ b/test/mjsunit/builtins.js
@@ -38,6 +38,14 @@
   return typeof obj == "function";
 }
 
+function isV8Native(name) {
+  return name == "GeneratorFunctionPrototype" ||
+      name == "SetIterator" ||
+      name == "MapIterator" ||
+      name == "ArrayIterator" ||
+      name == "StringIterator";
+}
+
 function checkConstructor(func, name) {
   // A constructor is a function with a prototype and properties on the
   // prototype object besides "constructor";
@@ -54,12 +62,13 @@
   assertFalse(proto_desc.writable, name);
   assertFalse(proto_desc.configurable, name);
   var prototype = proto_desc.value;
-  assertEquals(name == "GeneratorFunctionPrototype" ? Object.prototype : null,
+  assertEquals(isV8Native(name) ? Object.prototype : null,
                Object.getPrototypeOf(prototype),
                name);
   for (var i = 0; i < propNames.length; i++) {
     var propName = propNames[i];
     if (propName == "constructor") continue;
+    if (isV8Native(name)) continue;
     var testName = name + "-" + propName;
     var propDesc = Object.getOwnPropertyDescriptor(prototype, propName);
     assertTrue(propDesc.hasOwnProperty("value"), testName);
diff --git a/test/mjsunit/compiler/inline-arguments.js b/test/mjsunit/compiler/inline-arguments.js
index 1337ab2..d52f31b 100644
--- a/test/mjsunit/compiler/inline-arguments.js
+++ b/test/mjsunit/compiler/inline-arguments.js
@@ -309,3 +309,29 @@
   delete forceDeopt.deopt;
   outer();
 })();
+
+
+// Test inlining of functions with %_Arguments and %_ArgumentsLength intrinsic.
+(function () {
+  function inner(len,a,b,c) {
+    assertSame(len, %_ArgumentsLength());
+    for (var i = 1; i < len; ++i) {
+      var c = String.fromCharCode(96 + i);
+      assertSame(c, %_Arguments(i));
+    }
+  }
+
+  function outer() {
+    inner(1);
+    inner(2, 'a');
+    inner(3, 'a', 'b');
+    inner(4, 'a', 'b', 'c');
+    inner(5, 'a', 'b', 'c', 'd');
+    inner(6, 'a', 'b', 'c', 'd', 'e');
+  }
+
+  outer();
+  outer();
+  %OptimizeFunctionOnNextCall(outer);
+  outer();
+})();
diff --git a/test/mjsunit/compiler/opt-next-call.js b/test/mjsunit/compiler/opt-next-call.js
new file mode 100644
index 0000000..6366c7d
--- /dev/null
+++ b/test/mjsunit/compiler/opt-next-call.js
@@ -0,0 +1,13 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo() {
+  return "fooed";
+}
+
+%OptimizeFunctionOnNextCall(foo);
+assertEquals("fooed", foo());
+assertOptimized(foo);
diff --git a/test/mjsunit/compiler/osr-warm.js b/test/mjsunit/compiler/osr-warm.js
index 65ada1e..73e1fd5 100644
--- a/test/mjsunit/compiler/osr-warm.js
+++ b/test/mjsunit/compiler/osr-warm.js
@@ -35,7 +35,7 @@
 }
 
 assertEquals(0, f1(1));
-assertEquals(0, f1(10000000));
+assertEquals(0, f1(200000));
 
 function f2(x) {
   var sum = 1;
@@ -47,4 +47,4 @@
 }
 
 assertEquals(2, f2(1));
-assertEquals(10000001, f2(10000000));
+assertEquals(200001, f2(200000));
diff --git a/test/mjsunit/compiler/regress-411262.js b/test/mjsunit/compiler/regress-411262.js
new file mode 100644
index 0000000..ffbfe2e
--- /dev/null
+++ b/test/mjsunit/compiler/regress-411262.js
@@ -0,0 +1,37 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+function b() {
+}
+function f() {
+  b.apply(this, arguments);
+}
+
+%OptimizeFunctionOnNextCall(f);
+f();
diff --git a/test/mjsunit/compiler/shift-shr.js b/test/mjsunit/compiler/shift-shr.js
new file mode 100644
index 0000000..a300b2a
--- /dev/null
+++ b/test/mjsunit/compiler/shift-shr.js
@@ -0,0 +1,26 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --noopt-safe-uint32-operations
+
+// Check the results of `left >>> right`. The result is always unsigned (and
+// therefore positive).
+function test_shr(left) {
+  var errors = 0;
+
+  for (var i = 1; i < 1024; i++) {
+    var temp = left >>> i;
+    if (temp < 0) {
+      errors++;
+    }
+  }
+
+  return errors;
+}
+
+assertEquals(0, test_shr(1));
+%OptimizeFunctionOnNextCall(test_shr);
+for (var i = 5; i >= -5; i--) {
+  assertEquals(0, test_shr(i));
+}
diff --git a/test/mjsunit/const-eval-init.js b/test/mjsunit/const-eval-init.js
index d350384..50e3a8d 100644
--- a/test/mjsunit/const-eval-init.js
+++ b/test/mjsunit/const-eval-init.js
@@ -36,29 +36,29 @@
   var source =
       // Deleting 'x' removes the local const property.
       "delete x;" +
-      // Initialization turns into assignment to global 'x'.
+      // Initialization redefines global 'x'.
       "const x = 3; assertEquals(3, x);" +
-      // No constness of the global 'x'.
-      "x = 4; assertEquals(4, x);";
+      // Test constness of the global 'x'.
+      "x = 4; assertEquals(3, x);";
   eval(source);
 }
 
 testIntroduceGlobal();
-assertEquals(4, x);
+assertEquals("undefined", typeof x);
 
 function testAssignExistingGlobal() {
   var source =
       // Delete 'x' to remove the local const property.
       "delete x;" +
-      // Initialization turns into assignment to global 'x'.
+      // Initialization redefines global 'x'.
       "const x = 5; assertEquals(5, x);" +
-      // No constness of the global 'x'.
-      "x = 6; assertEquals(6, x);";
+      // Test constness of the global 'x'.
+      "x = 6; assertEquals(5, x);";
   eval(source);
 }
 
 testAssignExistingGlobal();
-assertEquals(6, x);
+assertEquals("undefined", typeof x);
 
 function testAssignmentArgument(x) {
   function local() {
@@ -66,7 +66,7 @@
     eval(source);
   }
   local();
-  assertEquals(7, x);
+  assertEquals("undefined", typeof x);
 }
 
 for (var i = 0; i < 5; i++) {
@@ -74,17 +74,18 @@
 }
 %OptimizeFunctionOnNextCall(testAssignmentArgument);
 testAssignmentArgument();
-assertEquals(6, x);
+assertEquals("undefined", typeof x);
 
 __defineSetter__('x', function() { throw 42; });
-function testAssignGlobalThrows() {
-  // Initialization turns into assignment to global 'x' which
-  // throws an exception.
-  var source = "delete x; const x = 8";
+var finished = false;
+function testRedefineGlobal() {
+  // Initialization redefines global 'x'.
+  var source = "delete x; const x = 8; finished = true;";
   eval(source);
 }
 
-assertThrows("testAssignGlobalThrows()");
+testRedefineGlobal();
+assertTrue(finished);
 
 function testInitFastCaseExtension() {
   var source = "const x = 9; assertEquals(9, x); x = 10; assertEquals(9, x)";
@@ -111,7 +112,7 @@
     eval(source);
   }
   local();
-  assertEquals(13, x);
+  assertEquals(12, x);
 }
 
 testAssignSurroundingContextSlot();
diff --git a/test/mjsunit/const-redecl.js b/test/mjsunit/const-redecl.js
index c0b97e6..f311f0d 100644
--- a/test/mjsunit/const-redecl.js
+++ b/test/mjsunit/const-redecl.js
@@ -49,37 +49,6 @@
 }
 
 
-// NOTE: TestGlobal usually only tests the given string in the context
-// of a global object in dictionary mode. This is because we use
-// delete to get rid of any added properties.
-function TestGlobal(s,e) {
-  // Collect the global properties before the call.
-  var properties = [];
-  for (var key in this) properties.push(key);
-  // Compute the result.
-  var result;
-  try {
-    var code = s + (e ? "; $$$result=" + e : "");
-    if (this.execScript) {
-      execScript(code);
-    } else {
-      this.eval(code);
-    }
-    // Avoid issues if $$$result is not defined by
-    // reading it through this.
-    result = this.$$$result;
-  } catch (x) {
-    result = CheckException(x);
-  }
-  // Get rid of any introduced global properties before
-  // returning the result.
-  for (var key in this) {
-    if (properties.indexOf(key) == -1) delete this[key];
-  }
-  return result;
-}
-
-
 function TestContext(s,e) {
   try {
     // Use a with-statement to force the system to do dynamic
@@ -98,8 +67,6 @@
   var msg = s;
   if (opt_e) { e = opt_e; msg += "; " + opt_e; }
   assertEquals(expected, TestLocal(s,e), "local:'" + msg + "'");
-  // Redeclarations of global consts do not throw, they are silently ignored.
-  assertEquals(42, TestGlobal(s, 42), "global:'" + msg + "'");
   assertEquals(expected, TestContext(s,e), "context:'" + msg + "'");
 }
 
@@ -112,7 +79,7 @@
   // Eval first definition.
   TestAll("TypeError", 'eval("' + def0 +'"); ' + def1);
   // Eval second definition.
-  TestAll("TypeError", def0 + '; eval("' + def1 + '")');
+  TestAll("TypeError", def0 + '; eval("' + def1 +'")');
   // Eval both definitions separately.
   TestAll("TypeError", 'eval("' + def0 +'"); eval("' + def1 + '")');
 }
@@ -234,47 +201,26 @@
 assertEquals(original_undef, undefined, "undefined got overwritten");
 undefined = original_undef;
 
-var a; const a; const a = 1;
-assertEquals(1, a, "a has wrong value");
-a = 2;
-assertEquals(2, a, "a should be writable");
-
-var b = 1; const b = 2;
-assertEquals(2, b, "b has wrong value");
-
-var c = 1; const c = 2; const c = 3;
-assertEquals(3, c, "c has wrong value");
-
-const d = 1; const d = 2;
-assertEquals(1, d, "d has wrong value");
-
-const e = 1; var e = 2;
+const e = 1; eval('var e = 2');
 assertEquals(1, e, "e has wrong value");
 
-const f = 1; const f;
-assertEquals(1, f, "f has wrong value");
-
-var g; const g = 1;
-assertEquals(1, g, "g has wrong value");
-g = 2;
-assertEquals(2, g, "g should be writable");
-
-const h; var h = 1;
-assertEquals(undefined,h,  "h has wrong value");
+const h; eval('var h = 1');
+assertEquals(undefined, h, "h has wrong value");
 
 eval("Object.defineProperty(this, 'i', { writable: true });"
    + "const i = 7;"
    + "assertEquals(7, i, \"i has wrong value\");");
 
 var global = this;
-assertThrows(function() {
-  Object.defineProperty(global, 'j', { writable: true })
-}, TypeError);
-const j = 2;  // This is what makes the function above throw, because the
-// const declaration gets hoisted and makes the property non-configurable.
+Object.defineProperty(global, 'j', { value: 100, writable: true });
+assertEquals(100, j);
+// The const declaration stays configurable, so the declaration above goes
+// through even though the const declaration is hoisted above.
+const j = 2;
 assertEquals(2, j, "j has wrong value");
 
-var k = 1; const k;
-// You could argue about the expected result here. For now, the winning
-// argument is that "const k;" is equivalent to "const k = undefined;".
-assertEquals(undefined, k, "k has wrong value");
+var k = 1;
+try { eval('const k'); } catch(e) { }
+assertEquals(1, k, "k has wrong value");
+try { eval('const k = 10'); } catch(e) { }
+assertEquals(1, k, "k has wrong value");
diff --git a/test/mjsunit/constant-folding-2.js b/test/mjsunit/constant-folding-2.js
index f429c6c..73cf040 100644
--- a/test/mjsunit/constant-folding-2.js
+++ b/test/mjsunit/constant-folding-2.js
@@ -181,6 +181,17 @@
   assertEquals(Math.pow(2, 52) + 1, Math.round(Math.pow(2, 52) + 1));
 });
 
+test(function mathFround() {
+  assertTrue(isNaN(Math.fround(NaN)));
+  assertEquals("Infinity", String(1/Math.fround(0)));
+  assertEquals("-Infinity", String(1/Math.fround(-0)));
+  assertEquals("Infinity", String(Math.fround(Infinity)));
+  assertEquals("-Infinity", String(Math.fround(-Infinity)));
+  assertEquals("Infinity", String(Math.fround(1E200)));
+  assertEquals("-Infinity", String(Math.fround(-1E200)));
+  assertEquals(3.1415927410125732, Math.fround(Math.PI));
+});
+
 test(function mathFloor() {
   assertEquals(1, Math.floor(1.5));
   assertEquals(-2, Math.floor(-1.5));
diff --git a/test/mjsunit/cross-realm-filtering.js b/test/mjsunit/cross-realm-filtering.js
index 902cceb..9523e8c 100644
--- a/test/mjsunit/cross-realm-filtering.js
+++ b/test/mjsunit/cross-realm-filtering.js
@@ -70,72 +70,3 @@
 Realm.eval(realms[0], script);
 assertSame(Realm.shared.caller_0, Realm.shared.result_0);
 assertSame(null, Realm.shared.result_1);
-
-
-// Check function constructor.
-var ctor_script = "Function.constructor";
-var ctor_a_script =
-    "(function() { return Function.constructor.apply(this, ['return 1;']); })";
-var ctor_b_script = "Function.constructor.bind(this, 'return 1;')";
-var ctor_c_script =
-    "(function() { return Function.constructor.call(this, 'return 1;'); })";
-Realm.shared = {
-  ctor_0 : Realm.eval(realms[0], ctor_script),
-  ctor_1 : Realm.eval(realms[1], ctor_script),
-  ctor_a_0 : Realm.eval(realms[0], ctor_a_script),
-  ctor_a_1 : Realm.eval(realms[1], ctor_a_script),
-  ctor_b_0 : Realm.eval(realms[0], ctor_b_script),
-  ctor_b_1 : Realm.eval(realms[1], ctor_b_script),
-  ctor_c_0 : Realm.eval(realms[0], ctor_c_script),
-  ctor_c_1 : Realm.eval(realms[1], ctor_c_script),
-}
-
-var script_0 = "                                                               \
-  var ctor_0 = Realm.shared.ctor_0;                                            \
-  Realm.shared.direct_0 = ctor_0('return 1');                                  \
-  Realm.shared.indirect_0 = (function() { return ctor_0('return 1;'); })();    \
-  Realm.shared.apply_0 = ctor_0.apply(this, ['return 1']);                     \
-  Realm.shared.bind_0 = ctor_0.bind(this, 'return 1')();                       \
-  Realm.shared.call_0 = ctor_0.call(this, 'return 1');                         \
-  Realm.shared.a_0 = Realm.shared.ctor_a_0();                                  \
-  Realm.shared.b_0 = Realm.shared.ctor_b_0();                                  \
-  Realm.shared.c_0 = Realm.shared.ctor_c_0();                                  \
-";
-
-script = script_0 + script_0.replace(/_0/g, "_1");
-
-Realm.eval(realms[0], script);
-assertSame(1, Realm.shared.direct_0());
-assertSame(1, Realm.shared.indirect_0());
-assertSame(1, Realm.shared.apply_0());
-assertSame(1, Realm.shared.bind_0());
-assertSame(1, Realm.shared.call_0());
-assertSame(1, Realm.shared.a_0());
-assertSame(1, Realm.shared.b_0());
-assertSame(1, Realm.shared.c_0());
-assertSame(undefined, Realm.shared.direct_1);
-assertSame(undefined, Realm.shared.indirect_1);
-assertSame(undefined, Realm.shared.apply_1);
-assertSame(undefined, Realm.shared.bind_1);
-assertSame(undefined, Realm.shared.call_1);
-assertSame(1, Realm.shared.a_1());
-assertSame(undefined, Realm.shared.b_1);
-assertSame(1, Realm.shared.c_1());
-
-Realm.eval(realms[1], script);
-assertSame(undefined, Realm.shared.direct_0);
-assertSame(undefined, Realm.shared.indirect_0);
-assertSame(undefined, Realm.shared.apply_0);
-assertSame(undefined, Realm.shared.bind_0);
-assertSame(undefined, Realm.shared.call_0);
-assertSame(1, Realm.shared.a_0());
-assertSame(undefined, Realm.shared.b_0);
-assertSame(1, Realm.shared.c_1());
-assertSame(1, Realm.shared.direct_1());
-assertSame(1, Realm.shared.indirect_1());
-assertSame(1, Realm.shared.apply_1());
-assertSame(1, Realm.shared.bind_1());
-assertSame(1, Realm.shared.call_1());
-assertSame(1, Realm.shared.a_1());
-assertSame(1, Realm.shared.b_1());
-assertSame(1, Realm.shared.c_1());
diff --git a/test/mjsunit/debug-backtrace-text.js b/test/mjsunit/debug-backtrace-text.js
index 61648fa..3bfaeb0 100644
--- a/test/mjsunit/debug-backtrace-text.js
+++ b/test/mjsunit/debug-backtrace-text.js
@@ -25,7 +25,7 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --expose-debug-as debug
+// Flags: --expose-debug-as debug --turbo-deoptimization
 
 // The functions used for testing backtraces.
 function Point(x, y) {
diff --git a/test/mjsunit/debug-break-inline.js b/test/mjsunit/debug-break-inline.js
index 4418fa8..3ef4d4e 100644
--- a/test/mjsunit/debug-break-inline.js
+++ b/test/mjsunit/debug-break-inline.js
@@ -25,7 +25,7 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --expose-debug-as debug --allow-natives-syntax
+// Flags: --expose-debug-as debug --allow-natives-syntax --turbo-deoptimization
 
 // This test tests that deoptimization due to debug breaks works for
 // inlined functions where the full-code is generated before the
diff --git a/test/mjsunit/debug-clearbreakpointgroup.js b/test/mjsunit/debug-clearbreakpointgroup.js
index 0cfc5c9..137dfec 100644
--- a/test/mjsunit/debug-clearbreakpointgroup.js
+++ b/test/mjsunit/debug-clearbreakpointgroup.js
@@ -26,6 +26,7 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 // Flags: --expose-debug-as debug
+// Flags: --turbo-deoptimization
 // Get the Debug object exposed from the debug context global object.
 var Debug = debug.Debug
 
diff --git a/test/mjsunit/debug-compile-event.js b/test/mjsunit/debug-compile-event.js
index 89a71dd..c38cd84 100644
--- a/test/mjsunit/debug-compile-event.js
+++ b/test/mjsunit/debug-compile-event.js
@@ -32,6 +32,7 @@
 var exception = false;  // Exception in debug event listener.
 var before_compile_count = 0;
 var after_compile_count = 0;
+var compile_error_count = 0;
 var current_source = '';  // Current source being compiled.
 var source_count = 0;  // Total number of scources compiled.
 var host_compilations = 0;  // Number of scources compiled through the API.
@@ -48,11 +49,12 @@
 function listener(event, exec_state, event_data, data) {
   try {
     if (event == Debug.DebugEvent.BeforeCompile ||
-        event == Debug.DebugEvent.AfterCompile) {
+        event == Debug.DebugEvent.AfterCompile ||
+        event == Debug.DebugEvent.CompileError) {
       // Count the events.
       if (event == Debug.DebugEvent.BeforeCompile) {
         before_compile_count++;
-      } else {
+      } else if (event == Debug.DebugEvent.AfterCompile) {
         after_compile_count++;
         switch (event_data.script().compilationType()) {
           case Debug.ScriptCompilationType.Host:
@@ -62,6 +64,8 @@
             eval_compilations++;
             break;
         }
+      } else {
+        compile_error_count++;
       }
 
       // If the compiled source contains 'eval' there will be additional compile
@@ -81,9 +85,11 @@
       assertTrue('context' in msg.body.script);
 
       // Check that we pick script name from //# sourceURL, iff present
-      assertEquals(current_source.indexOf('sourceURL') >= 0 ?
-                     'myscript.js' : undefined,
-                   event_data.script().name());
+      if (event == Debug.DebugEvent.AfterCompile) {
+        assertEquals(current_source.indexOf('sourceURL') >= 0 ?
+            'myscript.js' : undefined,
+                     event_data.script().name());
+      }
     }
   } catch (e) {
     exception = e
@@ -105,11 +111,17 @@
 // Using JSON.parse does not causes additional compilation events.
 compileSource('x=1; //# sourceURL=myscript.js');
 
+try {
+  compileSource('}');
+} catch(e) {
+}
+
 // Make sure that the debug event listener was invoked.
 assertFalse(exception, "exception in listener")
 
-// Number of before and after compile events should be the same.
-assertEquals(before_compile_count, after_compile_count);
+// Number of before and after + error events should be the same.
+assertEquals(before_compile_count, after_compile_count + compile_error_count);
+assertEquals(compile_error_count, 1);
 
 // Check the actual number of events (no compilation through the API as all
 // source compiled through eval).
diff --git a/test/mjsunit/debug-evaluate-arguments.js b/test/mjsunit/debug-evaluate-arguments.js
index 92b745f..9765f19 100644
--- a/test/mjsunit/debug-evaluate-arguments.js
+++ b/test/mjsunit/debug-evaluate-arguments.js
@@ -25,7 +25,7 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --expose-debug-as debug
+// Flags: --expose-debug-as debug --turbo-deoptimization
 // Get the Debug object exposed from the debug context global object.
 Debug = debug.Debug
 
diff --git a/test/mjsunit/debug-evaluate-closure.js b/test/mjsunit/debug-evaluate-closure.js
index 778defd..cf507b5 100644
--- a/test/mjsunit/debug-evaluate-closure.js
+++ b/test/mjsunit/debug-evaluate-closure.js
@@ -26,6 +26,7 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 // Flags: --expose-debug-as debug --allow-natives-syntax
+// Flags: --turbo-deoptimization
 
 Debug = debug.Debug;
 var listened = false;
diff --git a/test/mjsunit/debug-evaluate-with.js b/test/mjsunit/debug-evaluate-with.js
index c19a707..3f3310f 100644
--- a/test/mjsunit/debug-evaluate-with.js
+++ b/test/mjsunit/debug-evaluate-with.js
@@ -26,6 +26,8 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 // Flags: --expose-debug-as debug
+// Flags: --turbo-deoptimization
+
 // Get the Debug object exposed from the debug context global object.
 Debug = debug.Debug
 
diff --git a/test/mjsunit/debug-is-active.js b/test/mjsunit/debug-is-active.js
new file mode 100644
index 0000000..19968f0
--- /dev/null
+++ b/test/mjsunit/debug-is-active.js
@@ -0,0 +1,28 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax
+
+Debug = debug.Debug;
+
+function f() { return %_DebugIsActive() != 0; }
+
+assertFalse(f());
+assertFalse(f());
+Debug.setListener(function() {});
+assertTrue(f());
+Debug.setListener(null);
+assertFalse(f());
+
+%OptimizeFunctionOnNextCall(f);
+assertFalse(f());
+assertOptimized(f);
+
+Debug.setListener(function() {});
+assertTrue(f());
+assertOptimized(f);
+
+Debug.setListener(null);
+assertFalse(f());
+assertOptimized(f);
diff --git a/test/mjsunit/debug-receiver.js b/test/mjsunit/debug-receiver.js
index 21cdde8..2d5d2e0 100644
--- a/test/mjsunit/debug-receiver.js
+++ b/test/mjsunit/debug-receiver.js
@@ -25,7 +25,7 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --expose-debug-as debug
+// Flags: --expose-debug-as debug --turbo-deoptimization
 // Get the Debug object exposed from the debug context global object.
 Debug = debug.Debug;
 
diff --git a/test/mjsunit/debug-scopes.js b/test/mjsunit/debug-scopes.js
index ce37d24..4823496 100644
--- a/test/mjsunit/debug-scopes.js
+++ b/test/mjsunit/debug-scopes.js
@@ -25,7 +25,7 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --expose-debug-as debug --allow-natives-syntax
+// Flags: --expose-debug-as debug --allow-natives-syntax --turbo-deoptimization
 // The functions used for testing backtraces. They are at the top to make the
 // testing of source line/column easier.
 
diff --git a/test/mjsunit/debug-script.js b/test/mjsunit/debug-script.js
index 80d423e..5ffada1 100644
--- a/test/mjsunit/debug-script.js
+++ b/test/mjsunit/debug-script.js
@@ -59,7 +59,7 @@
 }
 
 // This has to be updated if the number of native scripts change.
-assertTrue(named_native_count == 19 || named_native_count == 20);
+assertTrue(named_native_count == 26 || named_native_count == 27);
 // Only the 'gc' extension is loaded.
 assertEquals(1, extension_count);
 // This script and mjsunit.js has been loaded.  If using d8, d8 loads
diff --git a/test/mjsunit/debug-step-2.js b/test/mjsunit/debug-step-2.js
index 502b426..5fe7466 100644
--- a/test/mjsunit/debug-step-2.js
+++ b/test/mjsunit/debug-step-2.js
@@ -26,6 +26,7 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 // Flags: --expose-debug-as debug
+// Flags: --turbo-deoptimization
 
 // This test tests that full code compiled without debug break slots
 // is recompiled with debug break slots when debugging is started.
diff --git a/test/mjsunit/deopt-global-accessor.js b/test/mjsunit/deopt-global-accessor.js
new file mode 100644
index 0000000..5c544a0
--- /dev/null
+++ b/test/mjsunit/deopt-global-accessor.js
@@ -0,0 +1,23 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+x = 1;
+x = 2;
+x = 3;
+
+function f() {
+  return x;
+}
+
+f();
+f();
+f();
+%OptimizeFunctionOnNextCall(f);
+f();
+
+Object.defineProperty(this, "x", {get:function() { return 100; }});
+
+assertEquals(100, f());
diff --git a/test/mjsunit/deserialize-reference.js b/test/mjsunit/deserialize-reference.js
new file mode 100644
index 0000000..b032013
--- /dev/null
+++ b/test/mjsunit/deserialize-reference.js
@@ -0,0 +1,8 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --cache=code --serialize-toplevel
+
+var a = "123";
+assertEquals(a, "123");
diff --git a/test/mjsunit/dictionary-properties.js b/test/mjsunit/dictionary-properties.js
index c70c598..0659268 100644
--- a/test/mjsunit/dictionary-properties.js
+++ b/test/mjsunit/dictionary-properties.js
@@ -39,8 +39,10 @@
 SlowPrototype.prototype.bar = 2;
 SlowPrototype.prototype.baz = 3;
 delete SlowPrototype.prototype.baz;
+new SlowPrototype;
 
-assertFalse(%HasFastProperties(SlowPrototype.prototype));
+// Prototypes stay fast even after deleting properties.
+assertTrue(%HasFastProperties(SlowPrototype.prototype));
 var fast_proto = new SlowPrototype();
 assertTrue(%HasFastProperties(SlowPrototype.prototype));
 assertTrue(%HasFastProperties(fast_proto.__proto__));
diff --git a/test/mjsunit/elements-kind-depends.js b/test/mjsunit/elements-kind-depends.js
index 82f188b..539fbd0 100644
--- a/test/mjsunit/elements-kind-depends.js
+++ b/test/mjsunit/elements-kind-depends.js
@@ -25,7 +25,7 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --allow-natives-syntax --smi-only-arrays
+// Flags: --allow-natives-syntax
 
 function burn() {
   var a = new Array(3);
diff --git a/test/mjsunit/elements-kind.js b/test/mjsunit/elements-kind.js
index 3aa513a..64b4a09 100644
--- a/test/mjsunit/elements-kind.js
+++ b/test/mjsunit/elements-kind.js
@@ -25,22 +25,7 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --allow-natives-syntax --smi-only-arrays --expose-gc --nostress-opt --typed-array-max_size_in-heap=2048
-
-// Test element kind of objects.
-// Since --smi-only-arrays affects builtins, its default setting at compile
-// time sticks if built with snapshot.  If --smi-only-arrays is deactivated
-// by default, only a no-snapshot build actually has smi-only arrays enabled
-// in this test case.  Depending on whether smi-only arrays are actually
-// enabled, this test takes the appropriate code path to check smi-only arrays.
-
-support_smi_only_arrays = %HasFastSmiElements(new Array(1,2,3,4,5,6,7,8));
-
-if (support_smi_only_arrays) {
-  print("Tests include smi-only arrays.");
-} else {
-  print("Tests do NOT include smi-only arrays.");
-}
+// Flags: --allow-natives-syntax --expose-gc --nostress-opt --typed-array-max_size_in-heap=2048
 
 var elements_kind = {
   fast_smi_only             :  'fast smi only elements',
@@ -131,10 +116,6 @@
 }
 
 function assertKind(expected, obj, name_opt) {
-  if (!support_smi_only_arrays &&
-      expected == elements_kind.fast_smi_only) {
-    expected = elements_kind.fast;
-  }
   assertEquals(expected, getKind(obj), name_opt);
 }
 
@@ -144,13 +125,11 @@
 me.drink = 0xC0C0A;
 assertKind(elements_kind.fast, me);
 
-if (support_smi_only_arrays) {
-  var too = [1,2,3];
-  assertKind(elements_kind.fast_smi_only, too);
-  too.dance = 0xD15C0;
-  too.drink = 0xC0C0A;
-  assertKind(elements_kind.fast_smi_only, too);
-}
+var too = [1,2,3];
+assertKind(elements_kind.fast_smi_only, too);
+too.dance = 0xD15C0;
+too.drink = 0xC0C0A;
+assertKind(elements_kind.fast_smi_only, too);
 
 // Make sure the element kind transitions from smi when a non-smi is stored.
 function test_wrapper() {
@@ -166,7 +145,9 @@
   }
   assertKind(elements_kind.fast, you);
 
-  assertKind(elements_kind.dictionary, new Array(0xDECAF));
+  var temp = [];
+  temp[0xDECAF] = 0;
+  assertKind(elements_kind.dictionary, temp);
 
   var fast_double_array = new Array(0xDECAF);
   for (var i = 0; i < 0xDECAF; i++) fast_double_array[i] = i / 2;
@@ -217,111 +198,106 @@
 test_wrapper();
 %ClearFunctionTypeFeedback(test_wrapper);
 
-if (support_smi_only_arrays) {
-  %NeverOptimizeFunction(construct_smis);
+%NeverOptimizeFunction(construct_smis);
 
-  // This code exists to eliminate the learning influence of AllocationSites
-  // on the following tests.
-  var __sequence = 0;
-  function make_array_string() {
-    this.__sequence = this.__sequence + 1;
-    return "/* " + this.__sequence + " */  [0, 0, 0];"
-  }
-  function make_array() {
-    return eval(make_array_string());
-  }
-
-  function construct_smis() {
-    var a = make_array();
-    a[0] = 0;  // Send the COW array map to the steak house.
-    assertKind(elements_kind.fast_smi_only, a);
-    return a;
-  }
-  %NeverOptimizeFunction(construct_doubles);
-  function construct_doubles() {
-    var a = construct_smis();
-    a[0] = 1.5;
-    assertKind(elements_kind.fast_double, a);
-    return a;
-  }
-  %NeverOptimizeFunction(construct_objects);
-  function construct_objects() {
-    var a = construct_smis();
-    a[0] = "one";
-    assertKind(elements_kind.fast, a);
-    return a;
-  }
-
-  // Test crankshafted transition SMI->DOUBLE.
-  %NeverOptimizeFunction(convert_to_double);
-  function convert_to_double(array) {
-    array[1] = 2.5;
-    assertKind(elements_kind.fast_double, array);
-    assertEquals(2.5, array[1]);
-  }
-  var smis = construct_smis();
-  for (var i = 0; i < 3; i++) convert_to_double(smis);
-  %OptimizeFunctionOnNextCall(convert_to_double);
-  smis = construct_smis();
-  convert_to_double(smis);
-  // Test crankshafted transitions SMI->FAST and DOUBLE->FAST.
-  %NeverOptimizeFunction(convert_to_fast);
-  function convert_to_fast(array) {
-    array[1] = "two";
-    assertKind(elements_kind.fast, array);
-    assertEquals("two", array[1]);
-  }
-  smis = construct_smis();
-  for (var i = 0; i < 3; i++) convert_to_fast(smis);
-  var doubles = construct_doubles();
-  for (var i = 0; i < 3; i++) convert_to_fast(doubles);
-  smis = construct_smis();
-  doubles = construct_doubles();
-  %OptimizeFunctionOnNextCall(convert_to_fast);
-  convert_to_fast(smis);
-  convert_to_fast(doubles);
-  // Test transition chain SMI->DOUBLE->FAST (crankshafted function will
-  // transition to FAST directly).
-  %NeverOptimizeFunction(convert_mixed);
-  function convert_mixed(array, value, kind) {
-    array[1] = value;
-    assertKind(kind, array);
-    assertEquals(value, array[1]);
-  }
-  smis = construct_smis();
-  for (var i = 0; i < 3; i++) {
-    convert_mixed(smis, 1.5, elements_kind.fast_double);
-  }
-  doubles = construct_doubles();
-  for (var i = 0; i < 3; i++) {
-    convert_mixed(doubles, "three", elements_kind.fast);
-  }
-  convert_mixed(construct_smis(), "three", elements_kind.fast);
-  convert_mixed(construct_doubles(), "three", elements_kind.fast);
-  %OptimizeFunctionOnNextCall(convert_mixed);
-  smis = construct_smis();
-  doubles = construct_doubles();
-  convert_mixed(smis, 1, elements_kind.fast);
-  convert_mixed(doubles, 1, elements_kind.fast);
-  assertTrue(%HaveSameMap(smis, doubles));
+// This code exists to eliminate the learning influence of AllocationSites
+// on the following tests.
+var __sequence = 0;
+function make_array_string() {
+  this.__sequence = this.__sequence + 1;
+  return "/* " + this.__sequence + " */  [0, 0, 0];"
 }
+function make_array() {
+  return eval(make_array_string());
+}
+
+function construct_smis() {
+  var a = make_array();
+  a[0] = 0;  // Send the COW array map to the steak house.
+  assertKind(elements_kind.fast_smi_only, a);
+  return a;
+}
+  %NeverOptimizeFunction(construct_doubles);
+function construct_doubles() {
+  var a = construct_smis();
+  a[0] = 1.5;
+  assertKind(elements_kind.fast_double, a);
+  return a;
+}
+  %NeverOptimizeFunction(construct_objects);
+function construct_objects() {
+  var a = construct_smis();
+  a[0] = "one";
+  assertKind(elements_kind.fast, a);
+  return a;
+}
+
+// Test crankshafted transition SMI->DOUBLE.
+  %NeverOptimizeFunction(convert_to_double);
+function convert_to_double(array) {
+  array[1] = 2.5;
+  assertKind(elements_kind.fast_double, array);
+  assertEquals(2.5, array[1]);
+}
+var smis = construct_smis();
+for (var i = 0; i < 3; i++) convert_to_double(smis);
+  %OptimizeFunctionOnNextCall(convert_to_double);
+smis = construct_smis();
+convert_to_double(smis);
+// Test crankshafted transitions SMI->FAST and DOUBLE->FAST.
+  %NeverOptimizeFunction(convert_to_fast);
+function convert_to_fast(array) {
+  array[1] = "two";
+  assertKind(elements_kind.fast, array);
+  assertEquals("two", array[1]);
+}
+smis = construct_smis();
+for (var i = 0; i < 3; i++) convert_to_fast(smis);
+var doubles = construct_doubles();
+for (var i = 0; i < 3; i++) convert_to_fast(doubles);
+smis = construct_smis();
+doubles = construct_doubles();
+  %OptimizeFunctionOnNextCall(convert_to_fast);
+convert_to_fast(smis);
+convert_to_fast(doubles);
+// Test transition chain SMI->DOUBLE->FAST (crankshafted function will
+// transition to FAST directly).
+  %NeverOptimizeFunction(convert_mixed);
+function convert_mixed(array, value, kind) {
+  array[1] = value;
+  assertKind(kind, array);
+  assertEquals(value, array[1]);
+}
+smis = construct_smis();
+for (var i = 0; i < 3; i++) {
+  convert_mixed(smis, 1.5, elements_kind.fast_double);
+}
+doubles = construct_doubles();
+for (var i = 0; i < 3; i++) {
+  convert_mixed(doubles, "three", elements_kind.fast);
+}
+convert_mixed(construct_smis(), "three", elements_kind.fast);
+convert_mixed(construct_doubles(), "three", elements_kind.fast);
+  %OptimizeFunctionOnNextCall(convert_mixed);
+smis = construct_smis();
+doubles = construct_doubles();
+convert_mixed(smis, 1, elements_kind.fast);
+convert_mixed(doubles, 1, elements_kind.fast);
+assertTrue(%HaveSameMap(smis, doubles));
 
 // Crankshaft support for smi-only elements in dynamic array literals.
 function get(foo) { return foo; }  // Used to generate dynamic values.
 
 function crankshaft_test() {
-  if (support_smi_only_arrays) {
-    var a1 = [get(1), get(2), get(3)];
-    assertKind(elements_kind.fast_smi_only, a1);
-  }
+  var a1 = [get(1), get(2), get(3)];
+  assertKind(elements_kind.fast_smi_only, a1);
+
   var a2 = new Array(get(1), get(2), get(3));
   assertKind(elements_kind.fast_smi_only, a2);
   var b = [get(1), get(2), get("three")];
   assertKind(elements_kind.fast, b);
   var c = [get(1), get(2), get(3.5)];
-  if (support_smi_only_arrays) {
-    assertKind(elements_kind.fast_double, c);
-  }
+  assertKind(elements_kind.fast_double, c);
 }
 for (var i = 0; i < 3; i++) {
   crankshaft_test();
@@ -335,85 +311,76 @@
 // DOUBLE->OBJECT, and SMI->OBJECT. No matter in which order these three are
 // created, they must always end up with the same FAST map.
 
-// This test is meaningless without FAST_SMI_ONLY_ELEMENTS.
-if (support_smi_only_arrays) {
-  // Preparation: create one pair of identical objects for each case.
-  var a = [1, 2, 3];
-  var b = [1, 2, 3];
-  assertTrue(%HaveSameMap(a, b));
-  assertKind(elements_kind.fast_smi_only, a);
-  var c = [1, 2, 3];
-  c["case2"] = true;
-  var d = [1, 2, 3];
-  d["case2"] = true;
-  assertTrue(%HaveSameMap(c, d));
-  assertFalse(%HaveSameMap(a, c));
-  assertKind(elements_kind.fast_smi_only, c);
-  var e = [1, 2, 3];
-  e["case3"] = true;
-  var f = [1, 2, 3];
-  f["case3"] = true;
-  assertTrue(%HaveSameMap(e, f));
-  assertFalse(%HaveSameMap(a, e));
-  assertFalse(%HaveSameMap(c, e));
-  assertKind(elements_kind.fast_smi_only, e);
-  // Case 1: SMI->DOUBLE, DOUBLE->OBJECT, SMI->OBJECT.
-  a[0] = 1.5;
-  assertKind(elements_kind.fast_double, a);
-  a[0] = "foo";
-  assertKind(elements_kind.fast, a);
-  b[0] = "bar";
-  assertTrue(%HaveSameMap(a, b));
-  // Case 2: SMI->DOUBLE, SMI->OBJECT, DOUBLE->OBJECT.
-  c[0] = 1.5;
-  assertKind(elements_kind.fast_double, c);
-  assertFalse(%HaveSameMap(c, d));
-  d[0] = "foo";
-  assertKind(elements_kind.fast, d);
-  assertFalse(%HaveSameMap(c, d));
-  c[0] = "bar";
-  assertTrue(%HaveSameMap(c, d));
-  // Case 3: SMI->OBJECT, SMI->DOUBLE, DOUBLE->OBJECT.
-  e[0] = "foo";
-  assertKind(elements_kind.fast, e);
-  assertFalse(%HaveSameMap(e, f));
-  f[0] = 1.5;
-  assertKind(elements_kind.fast_double, f);
-  assertFalse(%HaveSameMap(e, f));
-  f[0] = "bar";
-  assertKind(elements_kind.fast, f);
-  assertTrue(%HaveSameMap(e, f));
-}
+// Preparation: create one pair of identical objects for each case.
+var a = [1, 2, 3];
+var b = [1, 2, 3];
+assertTrue(%HaveSameMap(a, b));
+assertKind(elements_kind.fast_smi_only, a);
+var c = [1, 2, 3];
+c["case2"] = true;
+var d = [1, 2, 3];
+d["case2"] = true;
+assertTrue(%HaveSameMap(c, d));
+assertFalse(%HaveSameMap(a, c));
+assertKind(elements_kind.fast_smi_only, c);
+var e = [1, 2, 3];
+e["case3"] = true;
+var f = [1, 2, 3];
+f["case3"] = true;
+assertTrue(%HaveSameMap(e, f));
+assertFalse(%HaveSameMap(a, e));
+assertFalse(%HaveSameMap(c, e));
+assertKind(elements_kind.fast_smi_only, e);
+// Case 1: SMI->DOUBLE, DOUBLE->OBJECT, SMI->OBJECT.
+a[0] = 1.5;
+assertKind(elements_kind.fast_double, a);
+a[0] = "foo";
+assertKind(elements_kind.fast, a);
+b[0] = "bar";
+assertTrue(%HaveSameMap(a, b));
+// Case 2: SMI->DOUBLE, SMI->OBJECT, DOUBLE->OBJECT.
+c[0] = 1.5;
+assertKind(elements_kind.fast_double, c);
+assertFalse(%HaveSameMap(c, d));
+d[0] = "foo";
+assertKind(elements_kind.fast, d);
+assertFalse(%HaveSameMap(c, d));
+c[0] = "bar";
+assertTrue(%HaveSameMap(c, d));
+// Case 3: SMI->OBJECT, SMI->DOUBLE, DOUBLE->OBJECT.
+e[0] = "foo";
+assertKind(elements_kind.fast, e);
+assertFalse(%HaveSameMap(e, f));
+f[0] = 1.5;
+assertKind(elements_kind.fast_double, f);
+assertFalse(%HaveSameMap(e, f));
+f[0] = "bar";
+assertKind(elements_kind.fast, f);
+assertTrue(%HaveSameMap(e, f));
 
 // Test if Array.concat() works correctly with DOUBLE elements.
-if (support_smi_only_arrays) {
-  var a = [1, 2];
-  assertKind(elements_kind.fast_smi_only, a);
-  var b = [4.5, 5.5];
-  assertKind(elements_kind.fast_double, b);
-  var c = a.concat(b);
-  assertEquals([1, 2, 4.5, 5.5], c);
-  assertKind(elements_kind.fast_double, c);
-}
+var a = [1, 2];
+assertKind(elements_kind.fast_smi_only, a);
+var b = [4.5, 5.5];
+assertKind(elements_kind.fast_double, b);
+var c = a.concat(b);
+assertEquals([1, 2, 4.5, 5.5], c);
+assertKind(elements_kind.fast_double, c);
 
 // Test that Array.push() correctly handles SMI elements.
-if (support_smi_only_arrays) {
-  var a = [1, 2];
-  assertKind(elements_kind.fast_smi_only, a);
-  a.push(3, 4, 5);
-  assertKind(elements_kind.fast_smi_only, a);
-  assertEquals([1, 2, 3, 4, 5], a);
-}
+var a = [1, 2];
+assertKind(elements_kind.fast_smi_only, a);
+a.push(3, 4, 5);
+assertKind(elements_kind.fast_smi_only, a);
+assertEquals([1, 2, 3, 4, 5], a);
 
 // Test that Array.splice() and Array.slice() return correct ElementsKinds.
-if (support_smi_only_arrays) {
-  var a = ["foo", "bar"];
-  assertKind(elements_kind.fast, a);
-  var b = a.splice(0, 1);
-  assertKind(elements_kind.fast, b);
-  var c = a.slice(0, 1);
-  assertKind(elements_kind.fast, c);
-}
+var a = ["foo", "bar"];
+assertKind(elements_kind.fast, a);
+var b = a.splice(0, 1);
+assertKind(elements_kind.fast, b);
+var c = a.slice(0, 1);
+assertKind(elements_kind.fast, c);
 
 // Throw away type information in the ICs for next stress run.
 gc();
diff --git a/test/mjsunit/elements-transition-hoisting.js b/test/mjsunit/elements-transition-hoisting.js
index 76027b9..9f229d2 100644
--- a/test/mjsunit/elements-transition-hoisting.js
+++ b/test/mjsunit/elements-transition-hoisting.js
@@ -25,21 +25,13 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --allow-natives-syntax --smi-only-arrays
+// Flags: --allow-natives-syntax
 // Flags: --nostress-opt
 
 // Ensure that ElementsKind transitions in various situations are hoisted (or
 // not hoisted) correctly, don't change the semantics programs and don't trigger
 // deopt through hoisting in important situations.
 
-support_smi_only_arrays = %HasFastSmiElements(new Array(1,2,3,4,5,6));
-
-if (support_smi_only_arrays) {
-  print("Tests include smi-only arrays.");
-} else {
-  print("Tests do NOT include smi-only arrays.");
-}
-
 function test_wrapper() {
   // Make sure that a simple elements array transitions inside a loop before
   // stores to an array gets hoisted in a way that doesn't generate a deopt in
@@ -238,9 +230,7 @@
   %ClearFunctionTypeFeedback(testStraightLineDupeElinination);
 }
 
-if (support_smi_only_arrays) {
-  // The test is called in a test wrapper that has type feedback cleared to
-  // prevent the influence of allocation-sites, which learn from transitions.
-  test_wrapper();
-  %ClearFunctionTypeFeedback(test_wrapper);
-}
+// The test is called in a test wrapper that has type feedback cleared to
+// prevent the influence of allocation-sites, which learn from transitions.
+test_wrapper();
+%ClearFunctionTypeFeedback(test_wrapper);
diff --git a/test/mjsunit/elements-transition.js b/test/mjsunit/elements-transition.js
index 7298e68..f6a8188 100644
--- a/test/mjsunit/elements-transition.js
+++ b/test/mjsunit/elements-transition.js
@@ -25,107 +25,95 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --allow-natives-syntax --smi-only-arrays
+// Flags: --allow-natives-syntax
 // Flags: --nostress-opt
 
-support_smi_only_arrays = %HasFastSmiElements(new Array(1,2,3,4,5,6,7,8));
-
-if (support_smi_only_arrays) {
-  print("Tests include smi-only arrays.");
-} else {
-  print("Tests do NOT include smi-only arrays.");
+// This code exists to eliminate the learning influence of AllocationSites
+// on the following tests.
+var __sequence = 0;
+function make_array_string(length) {
+  this.__sequence = this.__sequence + 1;
+  return "/* " + this.__sequence + " */  new Array(" + length + ");";
+}
+function make_array(length) {
+  return eval(make_array_string(length));
 }
 
-if (support_smi_only_arrays) {
-  // This code exists to eliminate the learning influence of AllocationSites
-  // on the following tests.
-  var __sequence = 0;
-  function make_array_string(length) {
-    this.__sequence = this.__sequence + 1;
-    return "/* " + this.__sequence + " */  new Array(" + length + ");";
-  }
-  function make_array(length) {
-    return eval(make_array_string(length));
-  }
+function test(test_double, test_object, set, length) {
+  // We apply the same operations to two identical arrays.  The first array
+  // triggers an IC miss, upon which the conversion stub is generated, but the
+  // actual conversion is done in runtime.  The second array, arriving at
+  // the previously patched IC, is then converted using the conversion stub.
+  var array_1 = make_array(length);
+  var array_2 = make_array(length);
 
-  function test(test_double, test_object, set, length) {
-    // We apply the same operations to two identical arrays.  The first array
-    // triggers an IC miss, upon which the conversion stub is generated, but the
-    // actual conversion is done in runtime.  The second array, arriving at
-    // the previously patched IC, is then converted using the conversion stub.
-    var array_1 = make_array(length);
-    var array_2 = make_array(length);
-
-    // false, true, nice setter function, 20
-    assertTrue(%HasFastSmiElements(array_1));
-    assertTrue(%HasFastSmiElements(array_2));
-    for (var i = 0; i < length; i++) {
-      if (i == length - 5 && test_double) {
-        // Trigger conversion to fast double elements at length-5.
-        set(array_1, i, 0.5);
-        set(array_2, i, 0.5);
-        assertTrue(%HasFastDoubleElements(array_1));
-        assertTrue(%HasFastDoubleElements(array_2));
-      } else if (i == length - 3 && test_object) {
-        // Trigger conversion to fast object elements at length-3.
-        set(array_1, i, 'object');
-        set(array_2, i, 'object');
-        assertTrue(%HasFastObjectElements(array_1));
-        assertTrue(%HasFastObjectElements(array_2));
-      } else if (i != length - 7) {
-        // Set the element to an integer but leave a hole at length-7.
-        set(array_1, i, 2*i+1);
-        set(array_2, i, 2*i+1);
-      }
+  // false, true, nice setter function, 20
+  assertTrue(%HasFastSmiElements(array_1));
+  assertTrue(%HasFastSmiElements(array_2));
+  for (var i = 0; i < length; i++) {
+    if (i == length - 5 && test_double) {
+      // Trigger conversion to fast double elements at length-5.
+      set(array_1, i, 0.5);
+      set(array_2, i, 0.5);
+      assertTrue(%HasFastDoubleElements(array_1));
+      assertTrue(%HasFastDoubleElements(array_2));
+    } else if (i == length - 3 && test_object) {
+      // Trigger conversion to fast object elements at length-3.
+      set(array_1, i, 'object');
+      set(array_2, i, 'object');
+      assertTrue(%HasFastObjectElements(array_1));
+      assertTrue(%HasFastObjectElements(array_2));
+    } else if (i != length - 7) {
+      // Set the element to an integer but leave a hole at length-7.
+      set(array_1, i, 2*i+1);
+      set(array_2, i, 2*i+1);
     }
-
-    for (var i = 0; i < length; i++) {
-      if (i == length - 5 && test_double) {
-        assertEquals(0.5, array_1[i]);
-        assertEquals(0.5, array_2[i]);
-      } else if (i == length - 3 && test_object) {
-        assertEquals('object', array_1[i]);
-        assertEquals('object', array_2[i]);
-      } else if (i != length - 7) {
-        assertEquals(2*i+1, array_1[i]);
-        assertEquals(2*i+1, array_2[i]);
-      } else {
-        assertEquals(undefined, array_1[i]);
-        assertEquals(undefined, array_2[i]);
-      }
-    }
-
-    assertEquals(length, array_1.length);
-    assertEquals(length, array_2.length);
   }
 
-  function run_test(test_double, test_object, set, length) {
-    test(test_double, test_object, set, length);
+  for (var i = 0; i < length; i++) {
+    if (i == length - 5 && test_double) {
+      assertEquals(0.5, array_1[i]);
+      assertEquals(0.5, array_2[i]);
+    } else if (i == length - 3 && test_object) {
+      assertEquals('object', array_1[i]);
+      assertEquals('object', array_2[i]);
+    } else if (i != length - 7) {
+      assertEquals(2*i+1, array_1[i]);
+      assertEquals(2*i+1, array_2[i]);
+    } else {
+      assertEquals(undefined, array_1[i]);
+      assertEquals(undefined, array_2[i]);
+    }
+  }
+
+  assertEquals(length, array_1.length);
+  assertEquals(length, array_2.length);
+}
+
+function run_test(test_double, test_object, set, length) {
+  test(test_double, test_object, set, length);
     %ClearFunctionTypeFeedback(test);
-  }
-
-  run_test(false, false, function(a,i,v){ a[i] = v; }, 20);
-  run_test(true,  false, function(a,i,v){ a[i] = v; }, 20);
-  run_test(false, true,  function(a,i,v){ a[i] = v; }, 20);
-  run_test(true,  true,  function(a,i,v){ a[i] = v; }, 20);
-
-  run_test(false, false, function(a,i,v){ a[i] = v; }, 10000);
-  run_test(true,  false, function(a,i,v){ a[i] = v; }, 10000);
-  run_test(false, true,  function(a,i,v){ a[i] = v; }, 10000);
-  run_test(true,  true,  function(a,i,v){ a[i] = v; }, 10000);
-
-  // Check COW arrays
-  function get_cow() { return [1, 2, 3]; }
-
-  function transition(x) { x[0] = 1.5; }
-
-  var ignore = get_cow();
-  transition(ignore);  // Handled by runtime.
-  var a = get_cow();
-  var b = get_cow();
-  transition(a);  // Handled by IC.
-  assertEquals(1.5, a[0]);
-  assertEquals(1, b[0]);
-} else {
-  print("Test skipped because smi only arrays are not supported.");
 }
+
+run_test(false, false, function(a,i,v){ a[i] = v; }, 20);
+run_test(true,  false, function(a,i,v){ a[i] = v; }, 20);
+run_test(false, true,  function(a,i,v){ a[i] = v; }, 20);
+run_test(true,  true,  function(a,i,v){ a[i] = v; }, 20);
+
+run_test(false, false, function(a,i,v){ a[i] = v; }, 10000);
+run_test(true,  false, function(a,i,v){ a[i] = v; }, 10000);
+run_test(false, true,  function(a,i,v){ a[i] = v; }, 10000);
+run_test(true,  true,  function(a,i,v){ a[i] = v; }, 10000);
+
+// Check COW arrays
+function get_cow() { return [1, 2, 3]; }
+
+function transition(x) { x[0] = 1.5; }
+
+var ignore = get_cow();
+transition(ignore);  // Handled by runtime.
+var a = get_cow();
+var b = get_cow();
+transition(a);  // Handled by IC.
+assertEquals(1.5, a[0]);
+assertEquals(1, b[0]);
diff --git a/test/mjsunit/es6/arguments-iterator.js b/test/mjsunit/es6/arguments-iterator.js
new file mode 100644
index 0000000..32d4b11
--- /dev/null
+++ b/test/mjsunit/es6/arguments-iterator.js
@@ -0,0 +1,230 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+
+// Note in general that "arguments.foo" and "var o = arguments; o.foo"
+// are treated differently by full-codegen, and so both cases need to be
+// tested.
+
+function TestDirectArgumentsIteratorProperty() {
+  assertTrue(arguments.hasOwnProperty(Symbol.iterator));
+  assertFalse(arguments.propertyIsEnumerable(Symbol.iterator));
+  var descriptor = Object.getOwnPropertyDescriptor(arguments, Symbol.iterator);
+  assertTrue(descriptor.writable);
+  assertFalse(descriptor.enumerable);
+  assertTrue(descriptor.configurable);
+  assertEquals(descriptor.value, [][Symbol.iterator]);
+  assertEquals(arguments[Symbol.iterator], [][Symbol.iterator]);
+}
+TestDirectArgumentsIteratorProperty();
+
+
+function TestIndirectArgumentsIteratorProperty() {
+  var o = arguments;
+  assertTrue(o.hasOwnProperty(Symbol.iterator));
+  assertFalse(o.propertyIsEnumerable(Symbol.iterator));
+  assertEquals(o[Symbol.iterator], [][Symbol.iterator]);
+}
+TestIndirectArgumentsIteratorProperty();
+
+
+function assertIteratorResult(value, done, result) {
+  assertEquals({value: value, done: done}, result);
+}
+
+
+function TestDirectValues1(a, b, c) {
+  var iterator = arguments[Symbol.iterator]();
+  assertIteratorResult(a, false, iterator.next());
+  assertIteratorResult(b, false, iterator.next());
+  assertIteratorResult(c, false, iterator.next());
+  assertIteratorResult(undefined, true, iterator.next());
+}
+TestDirectValues1(1, 2, 3);
+
+
+function TestIndirectValues1(a, b, c) {
+  var args = arguments;
+  var iterator = args[Symbol.iterator]();
+  assertIteratorResult(a, false, iterator.next());
+  assertIteratorResult(b, false, iterator.next());
+  assertIteratorResult(c, false, iterator.next());
+  assertIteratorResult(undefined, true, iterator.next());
+}
+TestIndirectValues1(1, 2, 3);
+
+
+function TestDirectValues2(a, b, c) {
+  var iterator = arguments[Symbol.iterator]();
+  assertIteratorResult(a, false, iterator.next());
+  assertIteratorResult(b, false, iterator.next());
+  assertIteratorResult(c, false, iterator.next());
+  assertIteratorResult(undefined, true, iterator.next());
+
+  arguments[3] = 4;
+  arguments.length = 4;
+  assertIteratorResult(undefined, true, iterator.next());
+}
+TestDirectValues2(1, 2, 3);
+
+
+function TestIndirectValues2(a, b, c) {
+  var args = arguments;
+  var iterator = args[Symbol.iterator]();
+  assertIteratorResult(a, false, iterator.next());
+  assertIteratorResult(b, false, iterator.next());
+  assertIteratorResult(c, false, iterator.next());
+  assertIteratorResult(undefined, true, iterator.next());
+
+  arguments[3] = 4;
+  arguments.length = 4;
+  assertIteratorResult(undefined, true, iterator.next());
+}
+TestIndirectValues2(1, 2, 3);
+
+
+function TestDirectValues3(a, b, c) {
+  var iterator = arguments[Symbol.iterator]();
+  assertIteratorResult(a, false, iterator.next());
+  assertIteratorResult(b, false, iterator.next());
+
+  arguments.length = 2;
+  assertIteratorResult(undefined, true, iterator.next());
+}
+TestDirectValues3(1, 2, 3);
+
+
+function TestIndirectValues3(a, b, c) {
+  var args = arguments;
+  var iterator = args[Symbol.iterator]();
+  assertIteratorResult(a, false, iterator.next());
+  assertIteratorResult(b, false, iterator.next());
+
+  arguments.length = 2;
+  assertIteratorResult(undefined, true, iterator.next());
+}
+TestIndirectValues3(1, 2, 3);
+
+
+function TestDirectValues4(a, b, c) {
+  var iterator = arguments[Symbol.iterator]();
+  assertIteratorResult(a, false, iterator.next());
+  assertIteratorResult(b, false, iterator.next());
+  assertIteratorResult(c, false, iterator.next());
+
+  arguments.length = 4;
+  assertIteratorResult(undefined, false, iterator.next());
+  assertIteratorResult(undefined, true, iterator.next());
+}
+TestDirectValues4(1, 2, 3);
+
+
+function TestIndirectValues4(a, b, c) {
+  var args = arguments;
+  var iterator = args[Symbol.iterator]();
+  assertIteratorResult(a, false, iterator.next());
+  assertIteratorResult(b, false, iterator.next());
+  assertIteratorResult(c, false, iterator.next());
+
+  arguments.length = 4;
+  assertIteratorResult(undefined, false, iterator.next());
+  assertIteratorResult(undefined, true, iterator.next());
+}
+TestIndirectValues4(1, 2, 3);
+
+
+function TestForOf() {
+  var i = 0;
+  for (var value of arguments) {
+    assertEquals(arguments[i++], value);
+  }
+
+  assertEquals(arguments.length, i);
+}
+TestForOf(1, 2, 3, 4, 5);
+
+
+function TestAssignmentToIterator() {
+  var i = 0;
+  arguments[Symbol.iterator] = [].entries;
+  for (var entry of arguments) {
+    assertEquals([i, arguments[i]], entry);
+    i++;
+  }
+
+  assertEquals(arguments.length, i);
+}
+TestAssignmentToIterator(1, 2, 3, 4, 5);
+
+
+function TestArgumentsMutation() {
+  var i = 0;
+  for (var x of arguments) {
+    assertEquals(arguments[i], x);
+    arguments[i+1] *= 2;
+    i++;
+  }
+
+  assertEquals(arguments.length, i);
+}
+TestArgumentsMutation(1, 2, 3, 4, 5);
+
+
+function TestSloppyArgumentsAliasing(a0, a1, a2, a3, a4) {
+  var i = 0;
+  for (var x of arguments) {
+    assertEquals(arguments[i], x);
+    a0 = a1; a1 = a2; a3 = a4;
+    i++;
+  }
+
+  assertEquals(arguments.length, i);
+}
+TestSloppyArgumentsAliasing(1, 2, 3, 4, 5);
+
+
+function TestStrictArgumentsAliasing(a0, a1, a2, a3, a4) {
+  "use strict";
+  var i = 0;
+  for (var x of arguments) {
+    a0 = a1; a1 = a2; a3 = a4;
+    assertEquals(arguments[i], x);
+    i++;
+  }
+
+  assertEquals(arguments.length, i);
+}
+TestStrictArgumentsAliasing(1, 2, 3, 4, 5);
+
+
+function TestArgumentsAsProto() {
+  "use strict";
+
+  var o = {__proto__:arguments};
+  assertSame([][Symbol.iterator], o[Symbol.iterator]);
+  // Make o dict-mode.
+  %OptimizeObjectForAddingMultipleProperties(o, 0);
+  assertFalse(o.hasOwnProperty(Symbol.iterator));
+  assertSame([][Symbol.iterator], o[Symbol.iterator]);
+  o[Symbol.iterator] = 10;
+  assertTrue(o.hasOwnProperty(Symbol.iterator));
+  assertEquals(10, o[Symbol.iterator]);
+  assertSame([][Symbol.iterator], arguments[Symbol.iterator]);
+
+  // Frozen o.
+  o = Object.freeze({__proto__:arguments});
+  assertSame([][Symbol.iterator], o[Symbol.iterator]);
+  assertFalse(o.hasOwnProperty(Symbol.iterator));
+  assertSame([][Symbol.iterator], o[Symbol.iterator]);
+  // This should throw, but currently it doesn't, because
+  // ExecutableAccessorInfo callbacks don't see the current strict mode.
+  // See note in accessors.cc:SetPropertyOnInstanceIfInherited.
+  o[Symbol.iterator] = 10;
+  assertFalse(o.hasOwnProperty(Symbol.iterator));
+  assertEquals([][Symbol.iterator], o[Symbol.iterator]);
+  assertSame([][Symbol.iterator], arguments[Symbol.iterator]);
+}
+TestArgumentsAsProto();
diff --git a/test/mjsunit/es6/array-iterator.js b/test/mjsunit/es6/array-iterator.js
new file mode 100644
index 0000000..96122cd
--- /dev/null
+++ b/test/mjsunit/es6/array-iterator.js
@@ -0,0 +1,246 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+
+var NONE = 0;
+var READ_ONLY = 1;
+var DONT_ENUM = 2;
+var DONT_DELETE = 4;
+
+
+function assertHasOwnProperty(object, name, attrs) {
+  assertTrue(object.hasOwnProperty(name));
+  var desc = Object.getOwnPropertyDescriptor(object, name);
+  assertEquals(desc.writable, !(attrs & READ_ONLY));
+  assertEquals(desc.enumerable, !(attrs & DONT_ENUM));
+  assertEquals(desc.configurable, !(attrs & DONT_DELETE));
+}
+
+
+function TestArrayPrototype() {
+  assertHasOwnProperty(Array.prototype, 'entries', DONT_ENUM);
+  assertHasOwnProperty(Array.prototype, 'keys', DONT_ENUM);
+  assertHasOwnProperty(Array.prototype, Symbol.iterator, DONT_ENUM);
+}
+TestArrayPrototype();
+
+
+function assertIteratorResult(value, done, result) {
+  assertEquals({value: value, done: done}, result);
+}
+
+
+function TestValues() {
+  var array = ['a', 'b', 'c'];
+  var iterator = array[Symbol.iterator]();
+  assertIteratorResult('a', false, iterator.next());
+  assertIteratorResult('b', false, iterator.next());
+  assertIteratorResult('c', false, iterator.next());
+  assertIteratorResult(void 0, true, iterator.next());
+
+  array.push('d');
+  assertIteratorResult(void 0, true, iterator.next());
+}
+TestValues();
+
+
+function TestValuesMutate() {
+  var array = ['a', 'b', 'c'];
+  var iterator = array[Symbol.iterator]();
+  assertIteratorResult('a', false, iterator.next());
+  assertIteratorResult('b', false, iterator.next());
+  assertIteratorResult('c', false, iterator.next());
+  array.push('d');
+  assertIteratorResult('d', false, iterator.next());
+  assertIteratorResult(void 0, true, iterator.next());
+}
+TestValuesMutate();
+
+
+function TestKeys() {
+  var array = ['a', 'b', 'c'];
+  var iterator = array.keys();
+  assertIteratorResult(0, false, iterator.next());
+  assertIteratorResult(1, false, iterator.next());
+  assertIteratorResult(2, false, iterator.next());
+  assertIteratorResult(void 0, true, iterator.next());
+
+  array.push('d');
+  assertIteratorResult(void 0, true, iterator.next());
+}
+TestKeys();
+
+
+function TestKeysMutate() {
+  var array = ['a', 'b', 'c'];
+  var iterator = array.keys();
+  assertIteratorResult(0, false, iterator.next());
+  assertIteratorResult(1, false, iterator.next());
+  assertIteratorResult(2, false, iterator.next());
+  array.push('d');
+  assertIteratorResult(3, false, iterator.next());
+  assertIteratorResult(void 0, true, iterator.next());
+}
+TestKeysMutate();
+
+
+function TestEntries() {
+  var array = ['a', 'b', 'c'];
+  var iterator = array.entries();
+  assertIteratorResult([0, 'a'], false, iterator.next());
+  assertIteratorResult([1, 'b'], false, iterator.next());
+  assertIteratorResult([2, 'c'], false, iterator.next());
+  assertIteratorResult(void 0, true, iterator.next());
+
+  array.push('d');
+  assertIteratorResult(void 0, true, iterator.next());
+}
+TestEntries();
+
+
+function TestEntriesMutate() {
+  var array = ['a', 'b', 'c'];
+  var iterator = array.entries();
+  assertIteratorResult([0, 'a'], false, iterator.next());
+  assertIteratorResult([1, 'b'], false, iterator.next());
+  assertIteratorResult([2, 'c'], false, iterator.next());
+  array.push('d');
+  assertIteratorResult([3, 'd'], false, iterator.next());
+  assertIteratorResult(void 0, true, iterator.next());
+}
+TestEntriesMutate();
+
+
+function TestArrayIteratorPrototype() {
+  var array = [];
+  var iterator = array.keys();
+
+  var ArrayIteratorPrototype = iterator.__proto__;
+
+  assertEquals(ArrayIteratorPrototype, array[Symbol.iterator]().__proto__);
+  assertEquals(ArrayIteratorPrototype, array.keys().__proto__);
+  assertEquals(ArrayIteratorPrototype, array.entries().__proto__);
+
+  assertEquals(Object.prototype, ArrayIteratorPrototype.__proto__);
+
+  assertEquals('Array Iterator', %_ClassOf(array[Symbol.iterator]()));
+  assertEquals('Array Iterator', %_ClassOf(array.keys()));
+  assertEquals('Array Iterator', %_ClassOf(array.entries()));
+
+  assertFalse(ArrayIteratorPrototype.hasOwnProperty('constructor'));
+  assertArrayEquals(['next'],
+      Object.getOwnPropertyNames(ArrayIteratorPrototype));
+  assertHasOwnProperty(ArrayIteratorPrototype, 'next', DONT_ENUM);
+  assertHasOwnProperty(ArrayIteratorPrototype, Symbol.iterator, DONT_ENUM);
+}
+TestArrayIteratorPrototype();
+
+
+function TestForArrayValues() {
+  var buffer = [];
+  var array = [0, 'a', true, false, null, /* hole */, undefined, NaN];
+  var i = 0;
+  for (var value of array[Symbol.iterator]()) {
+    buffer[i++] = value;
+  }
+
+  assertEquals(8, buffer.length);
+
+  for (var i = 0; i < buffer.length; i++) {
+    assertSame(array[i], buffer[i]);
+  }
+}
+TestForArrayValues();
+
+
+function TestForArrayKeys() {
+  var buffer = [];
+  var array = [0, 'a', true, false, null, /* hole */, undefined, NaN];
+  var i = 0;
+  for (var key of array.keys()) {
+    buffer[i++] = key;
+  }
+
+  assertEquals(8, buffer.length);
+
+  for (var i = 0; i < buffer.length; i++) {
+    assertEquals(i, buffer[i]);
+  }
+}
+TestForArrayKeys();
+
+
+function TestForArrayEntries() {
+  var buffer = [];
+  var array = [0, 'a', true, false, null, /* hole */, undefined, NaN];
+  var i = 0;
+  for (var entry of array.entries()) {
+    buffer[i++] = entry;
+  }
+
+  assertEquals(8, buffer.length);
+
+  for (var i = 0; i < buffer.length; i++) {
+    assertSame(array[i], buffer[i][1]);
+  }
+
+  for (var i = 0; i < buffer.length; i++) {
+    assertEquals(i, buffer[i][0]);
+  }
+}
+TestForArrayEntries();
+
+
+function TestForArray() {
+  var buffer = [];
+  var array = [0, 'a', true, false, null, /* hole */, undefined, NaN];
+  var i = 0;
+  for (var value of array) {
+    buffer[i++] = value;
+  }
+
+  assertEquals(8, buffer.length);
+
+  for (var i = 0; i < buffer.length; i++) {
+    assertSame(array[i], buffer[i]);
+  }
+}
+TestForArrayValues();
+
+
+function TestNonOwnSlots() {
+  var array = [0];
+  var iterator = array[Symbol.iterator]();
+  var object = {__proto__: iterator};
+
+  assertThrows(function() {
+    object.next();
+  }, TypeError);
+}
+TestNonOwnSlots();
diff --git a/test/mjsunit/es6/collection-iterator.js b/test/mjsunit/es6/collection-iterator.js
new file mode 100644
index 0000000..5503fe5
--- /dev/null
+++ b/test/mjsunit/es6/collection-iterator.js
@@ -0,0 +1,200 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+
+(function TestSetIterator() {
+  var s = new Set;
+  var iter = s.values();
+  assertEquals('Set Iterator', %_ClassOf(iter));
+
+  var SetIteratorPrototype = iter.__proto__;
+  assertFalse(SetIteratorPrototype.hasOwnProperty('constructor'));
+  assertEquals(SetIteratorPrototype.__proto__, Object.prototype);
+
+  var propertyNames = Object.getOwnPropertyNames(SetIteratorPrototype);
+  assertArrayEquals(['next'], propertyNames);
+
+  assertEquals(new Set().values().__proto__, SetIteratorPrototype);
+  assertEquals(new Set().entries().__proto__, SetIteratorPrototype);
+})();
+
+
+(function TestSetIteratorValues() {
+  var s = new Set;
+  s.add(1);
+  s.add(2);
+  s.add(3);
+  var iter = s.values();
+
+  assertEquals({value: 1, done: false}, iter.next());
+  assertEquals({value: 2, done: false}, iter.next());
+  assertEquals({value: 3, done: false}, iter.next());
+  assertEquals({value: undefined, done: true}, iter.next());
+  assertEquals({value: undefined, done: true}, iter.next());
+})();
+
+
+(function TestSetIteratorKeys() {
+  assertEquals(Set.prototype.keys, Set.prototype.values);
+})();
+
+
+(function TestSetIteratorEntries() {
+  var s = new Set;
+  s.add(1);
+  s.add(2);
+  s.add(3);
+  var iter = s.entries();
+
+  assertEquals({value: [1, 1], done: false}, iter.next());
+  assertEquals({value: [2, 2], done: false}, iter.next());
+  assertEquals({value: [3, 3], done: false}, iter.next());
+  assertEquals({value: undefined, done: true}, iter.next());
+  assertEquals({value: undefined, done: true}, iter.next());
+})();
+
+
+(function TestSetIteratorMutations() {
+  var s = new Set;
+  s.add(1);
+  var iter = s.values();
+  assertEquals({value: 1, done: false}, iter.next());
+  s.add(2);
+  s.add(3);
+  s.add(4);
+  s.add(5);
+  assertEquals({value: 2, done: false}, iter.next());
+  s.delete(3);
+  assertEquals({value: 4, done: false}, iter.next());
+  s.delete(5);
+  assertEquals({value: undefined, done: true}, iter.next());
+  s.add(4);
+  assertEquals({value: undefined, done: true}, iter.next());
+})();
+
+
+(function TestSetInvalidReceiver() {
+  assertThrows(function() {
+    Set.prototype.values.call({});
+  }, TypeError);
+  assertThrows(function() {
+    Set.prototype.entries.call({});
+  }, TypeError);
+})();
+
+
+(function TestSetIteratorInvalidReceiver() {
+  var iter = new Set().values();
+  assertThrows(function() {
+    iter.next.call({});
+  });
+})();
+
+
+(function TestSetIteratorSymbol() {
+  assertEquals(Set.prototype[Symbol.iterator], Set.prototype.values);
+  assertTrue(Set.prototype.hasOwnProperty(Symbol.iterator));
+  assertFalse(Set.prototype.propertyIsEnumerable(Symbol.iterator));
+
+  var iter = new Set().values();
+  assertEquals(iter, iter[Symbol.iterator]());
+  assertEquals(iter[Symbol.iterator].name, '[Symbol.iterator]');
+})();
+
+
+(function TestMapIterator() {
+  var m = new Map;
+  var iter = m.values();
+  assertEquals('Map Iterator', %_ClassOf(iter));
+
+  var MapIteratorPrototype = iter.__proto__;
+  assertFalse(MapIteratorPrototype.hasOwnProperty('constructor'));
+  assertEquals(MapIteratorPrototype.__proto__, Object.prototype);
+
+  var propertyNames = Object.getOwnPropertyNames(MapIteratorPrototype);
+  assertArrayEquals(['next'], propertyNames);
+
+  assertEquals(new Map().values().__proto__, MapIteratorPrototype);
+  assertEquals(new Map().keys().__proto__, MapIteratorPrototype);
+  assertEquals(new Map().entries().__proto__, MapIteratorPrototype);
+})();
+
+
+(function TestMapIteratorValues() {
+  var m = new Map;
+  m.set(1, 11);
+  m.set(2, 22);
+  m.set(3, 33);
+  var iter = m.values();
+
+  assertEquals({value: 11, done: false}, iter.next());
+  assertEquals({value: 22, done: false}, iter.next());
+  assertEquals({value: 33, done: false}, iter.next());
+  assertEquals({value: undefined, done: true}, iter.next());
+  assertEquals({value: undefined, done: true}, iter.next());
+})();
+
+
+(function TestMapIteratorKeys() {
+  var m = new Map;
+  m.set(1, 11);
+  m.set(2, 22);
+  m.set(3, 33);
+  var iter = m.keys();
+
+  assertEquals({value: 1, done: false}, iter.next());
+  assertEquals({value: 2, done: false}, iter.next());
+  assertEquals({value: 3, done: false}, iter.next());
+  assertEquals({value: undefined, done: true}, iter.next());
+  assertEquals({value: undefined, done: true}, iter.next());
+})();
+
+
+(function TestMapIteratorEntries() {
+  var m = new Map;
+  m.set(1, 11);
+  m.set(2, 22);
+  m.set(3, 33);
+  var iter = m.entries();
+
+  assertEquals({value: [1, 11], done: false}, iter.next());
+  assertEquals({value: [2, 22], done: false}, iter.next());
+  assertEquals({value: [3, 33], done: false}, iter.next());
+  assertEquals({value: undefined, done: true}, iter.next());
+  assertEquals({value: undefined, done: true}, iter.next());
+})();
+
+
+(function TestMapInvalidReceiver() {
+  assertThrows(function() {
+    Map.prototype.values.call({});
+  }, TypeError);
+  assertThrows(function() {
+    Map.prototype.keys.call({});
+  }, TypeError);
+  assertThrows(function() {
+    Map.prototype.entries.call({});
+  }, TypeError);
+})();
+
+
+(function TestMapIteratorInvalidReceiver() {
+  var iter = new Map().values();
+  assertThrows(function() {
+    iter.next.call({});
+  }, TypeError);
+})();
+
+
+(function TestMapIteratorSymbol() {
+  assertEquals(Map.prototype[Symbol.iterator], Map.prototype.entries);
+  assertTrue(Map.prototype.hasOwnProperty(Symbol.iterator));
+  assertFalse(Map.prototype.propertyIsEnumerable(Symbol.iterator));
+
+  var iter = new Map().values();
+  assertEquals(iter, iter[Symbol.iterator]());
+  assertEquals(iter[Symbol.iterator].name, '[Symbol.iterator]');
+})();
diff --git a/test/mjsunit/es6/collections.js b/test/mjsunit/es6/collections.js
new file mode 100644
index 0000000..940c0b9
--- /dev/null
+++ b/test/mjsunit/es6/collections.js
@@ -0,0 +1,1368 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-gc --allow-natives-syntax
+
+
+function assertSize(expected, collection) {
+  if (collection instanceof Map || collection instanceof Set) {
+    assertEquals(expected, collection.size);
+  }
+}
+
+
+// Test valid getter and setter calls on Sets and WeakSets
+function TestValidSetCalls(m) {
+  assertDoesNotThrow(function () { m.add(new Object) });
+  assertDoesNotThrow(function () { m.has(new Object) });
+  assertDoesNotThrow(function () { m.delete(new Object) });
+}
+TestValidSetCalls(new Set);
+TestValidSetCalls(new WeakSet);
+
+
+// Test valid getter and setter calls on Maps and WeakMaps
+function TestValidMapCalls(m) {
+  assertDoesNotThrow(function () { m.get(new Object) });
+  assertDoesNotThrow(function () { m.set(new Object) });
+  assertDoesNotThrow(function () { m.has(new Object) });
+  assertDoesNotThrow(function () { m.delete(new Object) });
+}
+TestValidMapCalls(new Map);
+TestValidMapCalls(new WeakMap);
+
+
+// Test invalid getter and setter calls for WeakMap only
+function TestInvalidCalls(m) {
+  assertThrows(function () { m.get(undefined) }, TypeError);
+  assertThrows(function () { m.set(undefined, 0) }, TypeError);
+  assertThrows(function () { m.get(null) }, TypeError);
+  assertThrows(function () { m.set(null, 0) }, TypeError);
+  assertThrows(function () { m.get(0) }, TypeError);
+  assertThrows(function () { m.set(0, 0) }, TypeError);
+  assertThrows(function () { m.get('a-key') }, TypeError);
+  assertThrows(function () { m.set('a-key', 0) }, TypeError);
+}
+TestInvalidCalls(new WeakMap);
+
+
+// Test expected behavior for Sets and WeakSets
+function TestSet(set, key) {
+  assertFalse(set.has(key));
+  assertSame(set, set.add(key));
+  assertTrue(set.has(key));
+  assertTrue(set.delete(key));
+  assertFalse(set.has(key));
+  assertFalse(set.delete(key));
+  assertFalse(set.has(key));
+}
+function TestSetBehavior(set) {
+  for (var i = 0; i < 20; i++) {
+    TestSet(set, new Object);
+    TestSet(set, i);
+    TestSet(set, i / 100);
+    TestSet(set, 'key-' + i);
+  }
+  var keys = [ +0, -0, +Infinity, -Infinity, true, false, null, undefined ];
+  for (var i = 0; i < keys.length; i++) {
+    TestSet(set, keys[i]);
+  }
+}
+TestSetBehavior(new Set);
+TestSet(new WeakSet, new Object);
+
+
+// Test expected mapping behavior for Maps and WeakMaps
+function TestMapping(map, key, value) {
+  assertSame(map, map.set(key, value));
+  assertSame(value, map.get(key));
+}
+function TestMapBehavior1(m) {
+  TestMapping(m, new Object, 23);
+  TestMapping(m, new Object, 'the-value');
+  TestMapping(m, new Object, new Object);
+}
+TestMapBehavior1(new Map);
+TestMapBehavior1(new WeakMap);
+
+
+// Test expected mapping behavior for Maps only
+function TestMapBehavior2(m) {
+  for (var i = 0; i < 20; i++) {
+    TestMapping(m, i, new Object);
+    TestMapping(m, i / 10, new Object);
+    TestMapping(m, 'key-' + i, new Object);
+  }
+  // -0 is handled in TestMinusZeroMap
+  var keys = [ 0, +Infinity, -Infinity, true, false, null, undefined ];
+  for (var i = 0; i < keys.length; i++) {
+    TestMapping(m, keys[i], new Object);
+  }
+}
+TestMapBehavior2(new Map);
+
+
+// Test expected querying behavior of Maps and WeakMaps
+function TestQuery(m) {
+  var key = new Object;
+  var values = [ 'x', 0, +Infinity, -Infinity, true, false, null, undefined ];
+  for (var i = 0; i < values.length; i++) {
+    TestMapping(m, key, values[i]);
+    assertTrue(m.has(key));
+    assertFalse(m.has(new Object));
+  }
+}
+TestQuery(new Map);
+TestQuery(new WeakMap);
+
+
+// Test expected deletion behavior of Maps and WeakMaps
+function TestDelete(m) {
+  var key = new Object;
+  TestMapping(m, key, 'to-be-deleted');
+  assertTrue(m.delete(key));
+  assertFalse(m.delete(key));
+  assertFalse(m.delete(new Object));
+  assertSame(m.get(key), undefined);
+}
+TestDelete(new Map);
+TestDelete(new WeakMap);
+
+
+// Test GC of Maps and WeakMaps with entry
+function TestGC1(m) {
+  var key = new Object;
+  m.set(key, 'not-collected');
+  gc();
+  assertSame('not-collected', m.get(key));
+}
+TestGC1(new Map);
+TestGC1(new WeakMap);
+
+
+// Test GC of Maps and WeakMaps with chained entries
+function TestGC2(m) {
+  var head = new Object;
+  for (key = head, i = 0; i < 10; i++, key = m.get(key)) {
+    m.set(key, new Object);
+  }
+  gc();
+  var count = 0;
+  for (key = head; key != undefined; key = m.get(key)) {
+    count++;
+  }
+  assertEquals(11, count);
+}
+TestGC2(new Map);
+TestGC2(new WeakMap);
+
+
+// Test property attribute [[Enumerable]]
+function TestEnumerable(func) {
+  function props(x) {
+    var array = [];
+    for (var p in x) array.push(p);
+    return array.sort();
+  }
+  assertArrayEquals([], props(func));
+  assertArrayEquals([], props(func.prototype));
+  assertArrayEquals([], props(new func()));
+}
+TestEnumerable(Set);
+TestEnumerable(Map);
+TestEnumerable(WeakMap);
+TestEnumerable(WeakSet);
+
+
+// Test arbitrary properties on Maps and WeakMaps
+function TestArbitrary(m) {
+  function TestProperty(map, property, value) {
+    map[property] = value;
+    assertEquals(value, map[property]);
+  }
+  for (var i = 0; i < 20; i++) {
+    TestProperty(m, i, 'val' + i);
+    TestProperty(m, 'foo' + i, 'bar' + i);
+  }
+  TestMapping(m, new Object, 'foobar');
+}
+TestArbitrary(new Map);
+TestArbitrary(new WeakMap);
+
+
+// Test direct constructor call
+assertThrows(function() { Set(); }, TypeError);
+assertThrows(function() { Map(); }, TypeError);
+assertThrows(function() { WeakMap(); }, TypeError);
+assertThrows(function() { WeakSet(); }, TypeError);
+
+
+// Test whether NaN values as keys are treated correctly.
+var s = new Set;
+assertFalse(s.has(NaN));
+assertFalse(s.has(NaN + 1));
+assertFalse(s.has(23));
+s.add(NaN);
+assertTrue(s.has(NaN));
+assertTrue(s.has(NaN + 1));
+assertFalse(s.has(23));
+var m = new Map;
+assertFalse(m.has(NaN));
+assertFalse(m.has(NaN + 1));
+assertFalse(m.has(23));
+m.set(NaN, 'a-value');
+assertTrue(m.has(NaN));
+assertTrue(m.has(NaN + 1));
+assertFalse(m.has(23));
+
+
+// Test some common JavaScript idioms for Sets
+var s = new Set;
+assertTrue(s instanceof Set);
+assertTrue(Set.prototype.add instanceof Function)
+assertTrue(Set.prototype.has instanceof Function)
+assertTrue(Set.prototype.delete instanceof Function)
+assertTrue(Set.prototype.clear instanceof Function)
+
+
+// Test some common JavaScript idioms for Maps
+var m = new Map;
+assertTrue(m instanceof Map);
+assertTrue(Map.prototype.set instanceof Function)
+assertTrue(Map.prototype.get instanceof Function)
+assertTrue(Map.prototype.has instanceof Function)
+assertTrue(Map.prototype.delete instanceof Function)
+assertTrue(Map.prototype.clear instanceof Function)
+
+
+// Test some common JavaScript idioms for WeakMaps
+var m = new WeakMap;
+assertTrue(m instanceof WeakMap);
+assertTrue(WeakMap.prototype.set instanceof Function)
+assertTrue(WeakMap.prototype.get instanceof Function)
+assertTrue(WeakMap.prototype.has instanceof Function)
+assertTrue(WeakMap.prototype.delete instanceof Function)
+assertTrue(WeakMap.prototype.clear instanceof Function)
+
+
+// Test some common JavaScript idioms for WeakSets
+var s = new WeakSet;
+assertTrue(s instanceof WeakSet);
+assertTrue(WeakSet.prototype.add instanceof Function)
+assertTrue(WeakSet.prototype.has instanceof Function)
+assertTrue(WeakSet.prototype.delete instanceof Function)
+assertTrue(WeakSet.prototype.clear instanceof Function)
+
+
+// Test class of instance and prototype.
+assertEquals("Set", %_ClassOf(new Set))
+assertEquals("Object", %_ClassOf(Set.prototype))
+assertEquals("Map", %_ClassOf(new Map))
+assertEquals("Object", %_ClassOf(Map.prototype))
+assertEquals("WeakMap", %_ClassOf(new WeakMap))
+assertEquals("Object", %_ClassOf(WeakMap.prototype))
+assertEquals("WeakSet", %_ClassOf(new WeakSet))
+assertEquals("Object", %_ClassOf(WeakMap.prototype))
+
+
+// Test name of constructor.
+assertEquals("Set", Set.name);
+assertEquals("Map", Map.name);
+assertEquals("WeakMap", WeakMap.name);
+assertEquals("WeakSet", WeakSet.name);
+
+
+// Test prototype property of Set, Map, WeakMap and WeakSet.
+function TestPrototype(C) {
+  assertTrue(C.prototype instanceof Object);
+  assertEquals({
+    value: {},
+    writable: false,
+    enumerable: false,
+    configurable: false
+  }, Object.getOwnPropertyDescriptor(C, "prototype"));
+}
+TestPrototype(Set);
+TestPrototype(Map);
+TestPrototype(WeakMap);
+TestPrototype(WeakSet);
+
+
+// Test constructor property of the Set, Map, WeakMap and WeakSet prototype.
+function TestConstructor(C) {
+  assertFalse(C === Object.prototype.constructor);
+  assertSame(C, C.prototype.constructor);
+  assertSame(C, (new C).__proto__.constructor);
+  assertEquals(1, C.length);
+}
+TestConstructor(Set);
+TestConstructor(Map);
+TestConstructor(WeakMap);
+TestConstructor(WeakSet);
+
+
+// Test the Set, Map, WeakMap and WeakSet global properties themselves.
+function TestDescriptor(global, C) {
+  assertEquals({
+    value: C,
+    writable: true,
+    enumerable: false,
+    configurable: true
+  }, Object.getOwnPropertyDescriptor(global, C.name));
+}
+TestDescriptor(this, Set);
+TestDescriptor(this, Map);
+TestDescriptor(this, WeakMap);
+TestDescriptor(this, WeakSet);
+
+
+// Regression test for WeakMap prototype.
+assertTrue(WeakMap.prototype.constructor === WeakMap)
+assertTrue(Object.getPrototypeOf(WeakMap.prototype) === Object.prototype)
+
+
+// Regression test for issue 1617: The prototype of the WeakMap constructor
+// needs to be unique (i.e. different from the one of the Object constructor).
+assertFalse(WeakMap.prototype === Object.prototype);
+var o = Object.create({});
+assertFalse("get" in o);
+assertFalse("set" in o);
+assertEquals(undefined, o.get);
+assertEquals(undefined, o.set);
+var o = Object.create({}, { myValue: {
+  value: 10,
+  enumerable: false,
+  configurable: true,
+  writable: true
+}});
+assertEquals(10, o.myValue);
+
+
+// Regression test for issue 1884: Invoking any of the methods for Harmony
+// maps, sets, or weak maps, with a wrong type of receiver should be throwing
+// a proper TypeError.
+var alwaysBogus = [ undefined, null, true, "x", 23, {} ];
+var bogusReceiversTestSet = [
+  { proto: Set.prototype,
+    funcs: [ 'add', 'has', 'delete' ],
+    receivers: alwaysBogus.concat([ new Map, new WeakMap, new WeakSet ]),
+  },
+  { proto: Map.prototype,
+    funcs: [ 'get', 'set', 'has', 'delete' ],
+    receivers: alwaysBogus.concat([ new Set, new WeakMap, new WeakSet ]),
+  },
+  { proto: WeakMap.prototype,
+    funcs: [ 'get', 'set', 'has', 'delete' ],
+    receivers: alwaysBogus.concat([ new Set, new Map, new WeakSet ]),
+  },
+  { proto: WeakSet.prototype,
+    funcs: [ 'add', 'has', 'delete' ],
+    receivers: alwaysBogus.concat([ new Set, new Map, new WeakMap ]),
+  },
+];
+function TestBogusReceivers(testSet) {
+  for (var i = 0; i < testSet.length; i++) {
+    var proto = testSet[i].proto;
+    var funcs = testSet[i].funcs;
+    var receivers = testSet[i].receivers;
+    for (var j = 0; j < funcs.length; j++) {
+      var func = proto[funcs[j]];
+      for (var k = 0; k < receivers.length; k++) {
+        assertThrows(function () { func.call(receivers[k], {}) }, TypeError);
+      }
+    }
+  }
+}
+TestBogusReceivers(bogusReceiversTestSet);
+
+
+// Stress Test
+// There is a proposed stress-test available at the es-discuss mailing list
+// which cannot be reasonably automated.  Check it out by hand if you like:
+// https://mail.mozilla.org/pipermail/es-discuss/2011-May/014096.html
+
+
+// Set and Map size getters
+var setSizeDescriptor = Object.getOwnPropertyDescriptor(Set.prototype, 'size');
+assertEquals(undefined, setSizeDescriptor.value);
+assertEquals(undefined, setSizeDescriptor.set);
+assertTrue(setSizeDescriptor.get instanceof Function);
+assertEquals(undefined, setSizeDescriptor.get.prototype);
+assertFalse(setSizeDescriptor.enumerable);
+assertTrue(setSizeDescriptor.configurable);
+
+var s = new Set();
+assertFalse(s.hasOwnProperty('size'));
+for (var i = 0; i < 10; i++) {
+  assertEquals(i, s.size);
+  s.add(i);
+}
+for (var i = 9; i >= 0; i--) {
+  s.delete(i);
+  assertEquals(i, s.size);
+}
+
+
+var mapSizeDescriptor = Object.getOwnPropertyDescriptor(Map.prototype, 'size');
+assertEquals(undefined, mapSizeDescriptor.value);
+assertEquals(undefined, mapSizeDescriptor.set);
+assertTrue(mapSizeDescriptor.get instanceof Function);
+assertEquals(undefined, mapSizeDescriptor.get.prototype);
+assertFalse(mapSizeDescriptor.enumerable);
+assertTrue(mapSizeDescriptor.configurable);
+
+var m = new Map();
+assertFalse(m.hasOwnProperty('size'));
+for (var i = 0; i < 10; i++) {
+  assertEquals(i, m.size);
+  m.set(i, i);
+}
+for (var i = 9; i >= 0; i--) {
+  m.delete(i);
+  assertEquals(i, m.size);
+}
+
+
+// Test Set clear
+(function() {
+  var s = new Set();
+  s.add(42);
+  assertTrue(s.has(42));
+  assertEquals(1, s.size);
+  s.clear();
+  assertFalse(s.has(42));
+  assertEquals(0, s.size);
+})();
+
+
+// Test Map clear
+(function() {
+  var m = new Map();
+  m.set(42, true);
+  assertTrue(m.has(42));
+  assertEquals(1, m.size);
+  m.clear();
+  assertFalse(m.has(42));
+  assertEquals(0, m.size);
+})();
+
+
+// Test WeakMap clear
+(function() {
+  var k = new Object();
+  var w = new WeakMap();
+  w.set(k, 23);
+  assertTrue(w.has(k));
+  assertEquals(23, w.get(k));
+  w.clear();
+  assertFalse(w.has(k));
+  assertEquals(undefined, w.get(k));
+})();
+
+
+// Test WeakSet clear
+(function() {
+  var k = new Object();
+  var w = new WeakSet();
+  w.add(k);
+  assertTrue(w.has(k));
+  w.clear();
+  assertFalse(w.has(k));
+})();
+
+
+(function TestMinusZeroSet() {
+  var s = new Set();
+  s.add(-0);
+  assertSame(0, s.values().next().value);
+  s.add(0);
+  assertEquals(1, s.size);
+  assertTrue(s.has(0));
+  assertTrue(s.has(-0));
+})();
+
+
+(function TestMinusZeroMap() {
+  var m = new Map();
+  m.set(-0, 'minus');
+  assertSame(0, m.keys().next().value);
+  m.set(0, 'plus');
+  assertEquals(1, m.size);
+  assertTrue(m.has(0));
+  assertTrue(m.has(-0));
+  assertEquals('plus', m.get(0));
+  assertEquals('plus', m.get(-0));
+})();
+
+
+(function TestSetForEachInvalidTypes() {
+  assertThrows(function() {
+    Set.prototype.set.forEach.call({});
+  }, TypeError);
+
+  var set = new Set();
+  assertThrows(function() {
+    set.forEach({});
+  }, TypeError);
+})();
+
+
+(function TestSetForEach() {
+  var set = new Set();
+  set.add('a');
+  set.add('b');
+  set.add('c');
+
+  var buffer = '';
+  var receiver = {};
+  set.forEach(function(v, k, s) {
+    assertSame(v, k);
+    assertSame(set, s);
+    assertSame(this, receiver);
+    buffer += v;
+    if (v === 'a') {
+      set.delete('b');
+      set.add('d');
+      set.add('e');
+      set.add('f');
+    } else if (v === 'c') {
+      set.add('b');
+      set.delete('e');
+    }
+  }, receiver);
+
+  assertEquals('acdfb', buffer);
+})();
+
+
+(function TestSetForEachAddAtEnd() {
+  var set = new Set();
+  set.add('a');
+  set.add('b');
+
+  var buffer = '';
+  set.forEach(function(v) {
+    buffer += v;
+    if (v === 'b') {
+      set.add('c');
+    }
+  });
+
+  assertEquals('abc', buffer);
+})();
+
+
+(function TestSetForEachDeleteNext() {
+  var set = new Set();
+  set.add('a');
+  set.add('b');
+  set.add('c');
+
+  var buffer = '';
+  set.forEach(function(v) {
+    buffer += v;
+    if (v === 'b') {
+      set.delete('c');
+    }
+  });
+
+  assertEquals('ab', buffer);
+})();
+
+
+(function TestSetForEachDeleteVisitedAndAddAgain() {
+  var set = new Set();
+  set.add('a');
+  set.add('b');
+  set.add('c');
+
+  var buffer = '';
+  set.forEach(function(v) {
+    buffer += v;
+    if (v === 'b') {
+      set.delete('a');
+    } else if (v === 'c') {
+      set.add('a');
+    }
+  });
+
+  assertEquals('abca', buffer);
+})();
+
+
+(function TestSetForEachClear() {
+  var set = new Set();
+  set.add('a');
+  set.add('b');
+  set.add('c');
+
+  var buffer = '';
+  set.forEach(function(v) {
+    buffer += v;
+    if (v === 'a') {
+      set.clear();
+      set.add('d');
+      set.add('e');
+    }
+  });
+
+  assertEquals('ade', buffer);
+})();
+
+
+(function TestSetForEachNested() {
+  var set = new Set();
+  set.add('a');
+  set.add('b');
+  set.add('c');
+
+  var buffer = '';
+  set.forEach(function(v) {
+    buffer += v;
+    set.forEach(function(v) {
+      buffer += v;
+      if (v === 'a') {
+        set.delete('b');
+      }
+    });
+  });
+
+  assertEquals('aaccac', buffer);
+})();
+
+
+(function TestSetForEachEarlyExit() {
+  var set = new Set();
+  set.add('a');
+  set.add('b');
+  set.add('c');
+
+  var buffer = '';
+  var ex = {};
+  try {
+    set.forEach(function(v) {
+      buffer += v;
+      throw ex;
+    });
+  } catch (e) {
+    assertEquals(ex, e);
+  }
+  assertEquals('a', buffer);
+})();
+
+
+(function TestSetForEachGC() {
+  var set = new Set();
+  for (var i = 0; i < 100; i++) {
+    set.add(i);
+  }
+
+  var accumulated = 0;
+  set.forEach(function(v) {
+    accumulated += v;
+    if (v % 10 === 0) {
+      gc();
+    }
+  });
+  assertEquals(4950, accumulated);
+})();
+
+(function TestMapForEachInvalidTypes() {
+  assertThrows(function() {
+    Map.prototype.map.forEach.call({});
+  }, TypeError);
+
+  var map = new Map();
+  assertThrows(function() {
+    map.forEach({});
+  }, TypeError);
+})();
+
+
+(function TestMapForEach() {
+  var map = new Map();
+  map.set(0, 'a');
+  map.set(1, 'b');
+  map.set(2, 'c');
+
+  var buffer = [];
+  var receiver = {};
+  map.forEach(function(v, k, m) {
+    assertEquals(map, m);
+    assertEquals(this, receiver);
+    buffer.push(k, v);
+    if (k === 0) {
+      map.delete(1);
+      map.set(3, 'd');
+      map.set(4, 'e');
+      map.set(5, 'f');
+    } else if (k === 2) {
+      map.set(1, 'B');
+      map.delete(4);
+    }
+  }, receiver);
+
+  assertArrayEquals([0, 'a', 2, 'c', 3, 'd', 5, 'f', 1, 'B'], buffer);
+})();
+
+
+(function TestMapForEachAddAtEnd() {
+  var map = new Map();
+  map.set(0, 'a');
+  map.set(1, 'b');
+
+  var buffer = [];
+  map.forEach(function(v, k) {
+    buffer.push(k, v);
+    if (k === 1) {
+      map.set(2, 'c');
+    }
+  });
+
+  assertArrayEquals([0, 'a', 1, 'b', 2, 'c'], buffer);
+})();
+
+
+(function TestMapForEachDeleteNext() {
+  var map = new Map();
+  map.set(0, 'a');
+  map.set(1, 'b');
+  map.set(2, 'c');
+
+  var buffer = [];
+  map.forEach(function(v, k) {
+    buffer.push(k, v);
+    if (k === 1) {
+      map.delete(2);
+    }
+  });
+
+  assertArrayEquals([0, 'a', 1, 'b'], buffer);
+})();
+
+
+(function TestSetForEachDeleteVisitedAndAddAgain() {
+  var map = new Map();
+  map.set(0, 'a');
+  map.set(1, 'b');
+  map.set(2, 'c');
+
+  var buffer = [];
+  map.forEach(function(v, k) {
+    buffer.push(k, v);
+    if (k === 1) {
+      map.delete(0);
+    } else if (k === 2) {
+      map.set(0, 'a');
+    }
+  });
+
+  assertArrayEquals([0, 'a', 1, 'b', 2, 'c', 0, 'a'], buffer);
+})();
+
+
+(function TestMapForEachClear() {
+  var map = new Map();
+  map.set(0, 'a');
+  map.set(1, 'b');
+  map.set(2, 'c');
+
+  var buffer = [];
+  map.forEach(function(v, k) {
+    buffer.push(k, v);
+    if (k === 0) {
+      map.clear();
+      map.set(3, 'd');
+      map.set(4, 'e');
+    }
+  });
+
+  assertArrayEquals([0, 'a', 3, 'd', 4, 'e'], buffer);
+})();
+
+
+(function TestMapForEachNested() {
+  var map = new Map();
+  map.set(0, 'a');
+  map.set(1, 'b');
+  map.set(2, 'c');
+
+  var buffer = [];
+  map.forEach(function(v, k) {
+    buffer.push(k, v);
+    map.forEach(function(v, k) {
+      buffer.push(k, v);
+      if (k === 0) {
+        map.delete(1);
+      }
+    });
+  });
+
+  assertArrayEquals([0, 'a', 0, 'a', 2, 'c', 2, 'c', 0, 'a', 2, 'c'], buffer);
+})();
+
+
+(function TestMapForEachEarlyExit() {
+  var map = new Map();
+  map.set(0, 'a');
+  map.set(1, 'b');
+  map.set(2, 'c');
+
+  var buffer = [];
+  var ex = {};
+  try {
+    map.forEach(function(v, k) {
+      buffer.push(k, v);
+      throw ex;
+    });
+  } catch (e) {
+    assertEquals(ex, e);
+  }
+  assertArrayEquals([0, 'a'], buffer);
+})();
+
+
+(function TestMapForEachGC() {
+  var map = new Map();
+  for (var i = 0; i < 100; i++) {
+    map.set(i, i);
+  }
+
+  var accumulated = 0;
+  map.forEach(function(v) {
+    accumulated += v;
+    if (v % 10 === 0) {
+      gc();
+    }
+  });
+  assertEquals(4950, accumulated);
+})();
+
+
+(function TestMapForEachAllRemovedTransition() {
+  var map = new Map;
+  map.set(0, 0);
+
+  var buffer = [];
+  map.forEach(function(v) {
+    buffer.push(v);
+    if (v === 0) {
+      for (var i = 1; i < 4; i++) {
+        map.set(i, i);
+      }
+    }
+
+    if (v === 3) {
+      for (var i = 0; i < 4; i++) {
+        map.delete(i);
+      }
+      for (var i = 4; i < 8; i++) {
+        map.set(i, i);
+      }
+    }
+  });
+
+  assertArrayEquals([0, 1, 2, 3, 4, 5, 6, 7], buffer);
+})();
+
+
+(function TestMapForEachClearTransition() {
+  var map = new Map;
+  map.set(0, 0);
+
+  var i = 0;
+  var buffer = [];
+  map.forEach(function(v) {
+    buffer.push(v);
+    if (++i < 5) {
+      for (var j = 0; j < 5; j++) {
+        map.clear();
+        map.set(i, i);
+      }
+    }
+  });
+
+  assertArrayEquals([0, 1, 2, 3, 4], buffer);
+})();
+
+
+(function TestMapForEachNestedNonTrivialTransition() {
+  var map = new Map;
+  map.set(0, 0);
+  map.set(1, 1);
+  map.set(2, 2);
+  map.set(3, 3);
+  map.delete(0);
+
+  var i = 0;
+  var buffer = [];
+  map.forEach(function(v) {
+    if (++i > 10) return;
+
+    buffer.push(v);
+
+    if (v == 3) {
+      map.delete(1);
+      for (var j = 4; j < 10; j++) {
+        map.set(j, j);
+      }
+      for (var j = 4; j < 10; j += 2) {
+        map.delete(j);
+      }
+      map.delete(2);
+
+      for (var j = 10; j < 20; j++) {
+        map.set(j, j);
+      }
+      for (var j = 10; j < 20; j += 2) {
+        map.delete(j);
+      }
+
+      map.delete(3);
+    }
+  });
+
+  assertArrayEquals([1, 2, 3, 5, 7, 9, 11, 13, 15, 17], buffer);
+})();
+
+
+(function TestMapForEachAllRemovedTransitionNoClear() {
+  var map = new Map;
+  map.set(0, 0);
+
+  var buffer = [];
+  map.forEach(function(v) {
+    buffer.push(v);
+    if (v === 0) {
+      for (var i = 1; i < 8; i++) {
+        map.set(i, i);
+      }
+    }
+
+    if (v === 4) {
+      for (var i = 0; i < 8; i++) {
+        map.delete(i);
+      }
+    }
+  });
+
+  assertArrayEquals([0, 1, 2, 3, 4], buffer);
+})();
+
+
+(function TestMapForEachNoMoreElementsAfterTransition() {
+  var map = new Map;
+  map.set(0, 0);
+
+  var buffer = [];
+  map.forEach(function(v) {
+    buffer.push(v);
+    if (v === 0) {
+      for (var i = 1; i < 16; i++) {
+        map.set(i, i);
+      }
+    }
+
+    if (v === 4) {
+      for (var i = 5; i < 16; i++) {
+        map.delete(i);
+      }
+    }
+  });
+
+  assertArrayEquals([0, 1, 2, 3, 4], buffer);
+})();
+
+
+// Allows testing iterator-based constructors easily.
+var oneAndTwo = new Map();
+var k0 = {key: 0};
+var k1 = {key: 1};
+var k2 = {key: 2};
+oneAndTwo.set(k1, 1);
+oneAndTwo.set(k2, 2);
+
+
+function TestSetConstructor(ctor) {
+  var s = new ctor(null);
+  assertSize(0, s);
+
+  s = new ctor(undefined);
+  assertSize(0, s);
+
+  // No @@iterator
+  assertThrows(function() {
+    new ctor({});
+  }, TypeError);
+  assertThrows(function() {
+    new ctor(true);
+  }, TypeError);
+
+  // @@iterator not callable
+  assertThrows(function() {
+    var object = {};
+    object[Symbol.iterator] = 42;
+    new ctor(object);
+  }, TypeError);
+
+  // @@iterator result not object
+  assertThrows(function() {
+    var object = {};
+    object[Symbol.iterator] = function() {
+      return 42;
+    };
+    new ctor(object);
+  }, TypeError);
+
+  var s2 = new Set();
+  s2.add(k0);
+  s2.add(k1);
+  s2.add(k2);
+  s = new ctor(s2.values());
+  assertSize(3, s);
+  assertTrue(s.has(k0));
+  assertTrue(s.has(k1));
+  assertTrue(s.has(k2));
+}
+TestSetConstructor(Set);
+TestSetConstructor(WeakSet);
+
+
+function TestSetConstructorAddNotCallable(ctor) {
+  var originalPrototypeAdd = ctor.prototype.add;
+  assertThrows(function() {
+    ctor.prototype.add = 42;
+    new ctor(oneAndTwo.values());
+  }, TypeError);
+  ctor.prototype.add = originalPrototypeAdd;
+}
+TestSetConstructorAddNotCallable(Set);
+TestSetConstructorAddNotCallable(WeakSet);
+
+
+function TestSetConstructorGetAddOnce(ctor) {
+  var originalPrototypeAdd = ctor.prototype.add;
+  var getAddCount = 0;
+  Object.defineProperty(ctor.prototype, 'add', {
+    get: function() {
+      getAddCount++;
+      return function() {};
+    }
+  });
+  var s = new ctor(oneAndTwo.values());
+  assertEquals(1, getAddCount);
+  assertSize(0, s);
+  Object.defineProperty(ctor.prototype, 'add', {
+    value: originalPrototypeAdd,
+    writable: true
+  });
+}
+TestSetConstructorGetAddOnce(Set);
+TestSetConstructorGetAddOnce(WeakSet);
+
+
+function TestSetConstructorAddReplaced(ctor) {
+  var originalPrototypeAdd = ctor.prototype.add;
+  var addCount = 0;
+  ctor.prototype.add = function(value) {
+    addCount++;
+    originalPrototypeAdd.call(this, value);
+    ctor.prototype.add = null;
+  };
+  var s = new ctor(oneAndTwo.keys());
+  assertEquals(2, addCount);
+  assertSize(2, s);
+  ctor.prototype.add = originalPrototypeAdd;
+}
+TestSetConstructorAddReplaced(Set);
+TestSetConstructorAddReplaced(WeakSet);
+
+
+function TestSetConstructorOrderOfDoneValue(ctor) {
+  var valueCount = 0, doneCount = 0;
+  var iterator = {
+    next: function() {
+      return {
+        get value() {
+          valueCount++;
+        },
+        get done() {
+          doneCount++;
+          throw new Error();
+        }
+      };
+    }
+  };
+  iterator[Symbol.iterator] = function() {
+    return this;
+  };
+  assertThrows(function() {
+    new ctor(iterator);
+  });
+  assertEquals(1, doneCount);
+  assertEquals(0, valueCount);
+}
+TestSetConstructorOrderOfDoneValue(Set);
+TestSetConstructorOrderOfDoneValue(WeakSet);
+
+
+function TestSetConstructorNextNotAnObject(ctor) {
+  var iterator = {
+    next: function() {
+      return 'abc';
+    }
+  };
+  iterator[Symbol.iterator] = function() {
+    return this;
+  };
+  assertThrows(function() {
+    new ctor(iterator);
+  }, TypeError);
+}
+TestSetConstructorNextNotAnObject(Set);
+TestSetConstructorNextNotAnObject(WeakSet);
+
+
+(function TestWeakSetConstructorNonObjectKeys() {
+  assertThrows(function() {
+    new WeakSet([1]);
+  }, TypeError);
+})();
+
+
+function TestSetConstructorIterableValue(ctor) {
+  'use strict';
+  // Strict mode is required to prevent implicit wrapping in the getter.
+  Object.defineProperty(Number.prototype, Symbol.iterator, {
+    get: function() {
+      assertEquals('object', typeof this);
+      return function() {
+        return oneAndTwo.keys();
+      };
+    },
+    configurable: true
+  });
+
+  var set = new ctor(42);
+  assertSize(2, set);
+  assertTrue(set.has(k1));
+  assertTrue(set.has(k2));
+
+  delete Number.prototype[Symbol.iterator];
+}
+TestSetConstructorIterableValue(Set);
+TestSetConstructorIterableValue(WeakSet);
+
+
+(function TestSetConstructorStringValue() {
+  var s = new Set('abc');
+  assertSize(3, s);
+  assertTrue(s.has('a'));
+  assertTrue(s.has('b'));
+  assertTrue(s.has('c'));
+})();
+
+
+function TestMapConstructor(ctor) {
+  var m = new ctor(null);
+  assertSize(0, m);
+
+  m = new ctor(undefined);
+  assertSize(0, m);
+
+  // No @@iterator
+  assertThrows(function() {
+    new ctor({});
+  }, TypeError);
+  assertThrows(function() {
+    new ctor(true);
+  }, TypeError);
+
+  // @@iterator not callable
+  assertThrows(function() {
+    var object = {};
+    object[Symbol.iterator] = 42;
+    new ctor(object);
+  }, TypeError);
+
+  // @@iterator result not object
+  assertThrows(function() {
+    var object = {};
+    object[Symbol.iterator] = function() {
+      return 42;
+    };
+    new ctor(object);
+  }, TypeError);
+
+  var m2 = new Map();
+  m2.set(k0, 'a');
+  m2.set(k1, 'b');
+  m2.set(k2, 'c');
+  m = new ctor(m2.entries());
+  assertSize(3, m);
+  assertEquals('a', m.get(k0));
+  assertEquals('b', m.get(k1));
+  assertEquals('c', m.get(k2));
+}
+TestMapConstructor(Map);
+TestMapConstructor(WeakMap);
+
+
+function TestMapConstructorSetNotCallable(ctor) {
+  var originalPrototypeSet = ctor.prototype.set;
+  assertThrows(function() {
+    ctor.prototype.set = 42;
+    new ctor(oneAndTwo.entries());
+  }, TypeError);
+  ctor.prototype.set = originalPrototypeSet;
+}
+TestMapConstructorSetNotCallable(Map);
+TestMapConstructorSetNotCallable(WeakMap);
+
+
+function TestMapConstructorGetAddOnce(ctor) {
+  var originalPrototypeSet = ctor.prototype.set;
+  var getSetCount = 0;
+  Object.defineProperty(ctor.prototype, 'set', {
+    get: function() {
+      getSetCount++;
+      return function() {};
+    }
+  });
+  var m = new ctor(oneAndTwo.entries());
+  assertEquals(1, getSetCount);
+  assertSize(0, m);
+  Object.defineProperty(ctor.prototype, 'set', {
+    value: originalPrototypeSet,
+    writable: true
+  });
+}
+TestMapConstructorGetAddOnce(Map);
+TestMapConstructorGetAddOnce(WeakMap);
+
+
+function TestMapConstructorSetReplaced(ctor) {
+  var originalPrototypeSet = ctor.prototype.set;
+  var setCount = 0;
+  ctor.prototype.set = function(key, value) {
+    setCount++;
+    originalPrototypeSet.call(this, key, value);
+    ctor.prototype.set = null;
+  };
+  var m = new ctor(oneAndTwo.entries());
+  assertEquals(2, setCount);
+  assertSize(2, m);
+  ctor.prototype.set = originalPrototypeSet;
+}
+TestMapConstructorSetReplaced(Map);
+TestMapConstructorSetReplaced(WeakMap);
+
+
+function TestMapConstructorOrderOfDoneValue(ctor) {
+  var valueCount = 0, doneCount = 0;
+  function FakeError() {}
+  var iterator = {
+    next: function() {
+      return {
+        get value() {
+          valueCount++;
+        },
+        get done() {
+          doneCount++;
+          throw new FakeError();
+        }
+      };
+    }
+  };
+  iterator[Symbol.iterator] = function() {
+    return this;
+  };
+  assertThrows(function() {
+    new ctor(iterator);
+  }, FakeError);
+  assertEquals(1, doneCount);
+  assertEquals(0, valueCount);
+}
+TestMapConstructorOrderOfDoneValue(Map);
+TestMapConstructorOrderOfDoneValue(WeakMap);
+
+
+function TestMapConstructorNextNotAnObject(ctor) {
+  var iterator = {
+    next: function() {
+      return 'abc';
+    }
+  };
+  iterator[Symbol.iterator] = function() {
+    return this;
+  };
+  assertThrows(function() {
+    new ctor(iterator);
+  }, TypeError);
+}
+TestMapConstructorNextNotAnObject(Map);
+TestMapConstructorNextNotAnObject(WeakMap);
+
+
+function TestMapConstructorIteratorNotObjectValues(ctor) {
+  assertThrows(function() {
+    new ctor(oneAndTwo.values());
+  }, TypeError);
+}
+TestMapConstructorIteratorNotObjectValues(Map);
+TestMapConstructorIteratorNotObjectValues(WeakMap);
+
+
+(function TestWeakMapConstructorNonObjectKeys() {
+  assertThrows(function() {
+    new WeakMap([[1, 2]])
+  }, TypeError);
+})();
+
+
+function TestMapConstructorIterableValue(ctor) {
+  'use strict';
+  // Strict mode is required to prevent implicit wrapping in the getter.
+  Object.defineProperty(Number.prototype, Symbol.iterator, {
+    get: function() {
+      assertEquals('object', typeof this);
+      return function() {
+        return oneAndTwo.entries();
+      };
+    },
+    configurable: true
+  });
+
+  var map = new ctor(42);
+  assertSize(2, map);
+  assertEquals(1, map.get(k1));
+  assertEquals(2, map.get(k2));
+
+  delete Number.prototype[Symbol.iterator];
+}
+TestMapConstructorIterableValue(Map);
+TestMapConstructorIterableValue(WeakMap);
diff --git a/test/mjsunit/es6/debug-promises-caught-all.js b/test/mjsunit/es6/debug-promises-caught-all.js
deleted file mode 100644
index 5189373..0000000
--- a/test/mjsunit/es6/debug-promises-caught-all.js
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --harmony-promises --expose-debug-as debug
-
-// Test debug events when we listen to all exceptions and
-// there is a catch handler for the exception thrown in a Promise.
-// We expect a normal Exception debug event to be triggered.
-
-Debug = debug.Debug;
-
-var log = [];
-var step = 0;
-
-var p = new Promise(function(resolve, reject) {
-  log.push("resolve");
-  resolve();
-});
-
-var q = p.chain(
-  function() {
-    log.push("throw");
-    throw new Error("caught");
-  });
-
-q.catch(
-  function(e) {
-    assertEquals("caught", e.message);
-  });
-
-function listener(event, exec_state, event_data, data) {
-  try {
-    // Ignore exceptions during startup in stress runs.
-    if (step >= 1) return;
-    assertEquals(["resolve", "end main", "throw"], log);
-    if (event == Debug.DebugEvent.Exception) {
-      assertEquals("caught", event_data.exception().message);
-      assertEquals(undefined, event_data.promise());
-      assertFalse(event_data.uncaught());
-      step++;
-    }
-  } catch (e) {
-    // Signal a failure with exit code 1.  This is necessary since the
-    // debugger swallows exceptions and we expect the chained function
-    // and this listener to be executed after the main script is finished.
-    print("Unexpected exception: " + e + "\n" + e.stack);
-    quit(1);
-  }
-}
-
-Debug.setBreakOnException();
-Debug.setListener(listener);
-
-log.push("end main");
diff --git a/test/mjsunit/es6/debug-promises-caught-late.js b/test/mjsunit/es6/debug-promises-caught-late.js
deleted file mode 100644
index 66e073d..0000000
--- a/test/mjsunit/es6/debug-promises-caught-late.js
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --harmony-promises --expose-debug-as debug
-
-// Test debug events when we only listen to uncaught exceptions, the Promise
-// throws, and a catch handler is installed right before throwing.
-// We expect no debug event to be triggered.
-
-Debug = debug.Debug;
-
-var p = new Promise(function(resolve, reject) {
-  resolve();
-});
-
-var q = p.chain(
-  function() {
-    q.catch(function(e) {
-      assertEquals("caught", e.message);
-    });
-    throw new Error("caught");
-  });
-
-function listener(event, exec_state, event_data, data) {
-  try {
-    assertTrue(event != Debug.DebugEvent.Exception);
-  } catch (e) {
-    // Signal a failure with exit code 1.  This is necessary since the
-    // debugger swallows exceptions and we expect the chained function
-    // and this listener to be executed after the main script is finished.
-    print("Unexpected exception: " + e + "\n" + e.stack);
-    quit(1);
-  }
-}
-
-Debug.setBreakOnUncaughtException();
-Debug.setListener(listener);
diff --git a/test/mjsunit/es6/debug-promises-caught-uncaught.js b/test/mjsunit/es6/debug-promises-caught-uncaught.js
deleted file mode 100644
index 9620d31..0000000
--- a/test/mjsunit/es6/debug-promises-caught-uncaught.js
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --harmony-promises --expose-debug-as debug
-
-// Test debug events when we only listen to uncaught exceptions and
-// there is a catch handler for the exception thrown in a Promise.
-// We expect no debug event to be triggered.
-
-Debug = debug.Debug;
-
-var p = new Promise(function(resolve, reject) {
-  resolve();
-});
-
-var q = p.chain(
-  function() {
-    throw new Error("caught");
-  });
-
-q.catch(
-  function(e) {
-    assertEquals("caught", e.message);
-  });
-
-function listener(event, exec_state, event_data, data) {
-  try {
-    assertTrue(event != Debug.DebugEvent.Exception);
-  } catch (e) {
-    // Signal a failure with exit code 1.  This is necessary since the
-    // debugger swallows exceptions and we expect the chained function
-    // and this listener to be executed after the main script is finished.
-    print("Unexpected exception: " + e + "\n" + e.stack);
-    quit(1);
-  }
-}
-
-Debug.setBreakOnUncaughtException();
-Debug.setListener(listener);
diff --git a/test/mjsunit/es6/debug-promises-reentry.js b/test/mjsunit/es6/debug-promises-reentry.js
deleted file mode 100644
index 03c7fc2..0000000
--- a/test/mjsunit/es6/debug-promises-reentry.js
+++ /dev/null
@@ -1,17 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --harmony-promises --expose-debug-as debug
-
-// Test reentry of special try catch for Promises.
-
-Debug = debug.Debug;
-
-Debug.setBreakOnUncaughtException();
-Debug.setListener(function(event, exec_state, event_data, data) { });
-
-var p = new Promise(function(resolve, reject) { resolve(); });
-var q = p.chain(function() {
-  new Promise(function(resolve, reject) { resolve(); });
-});
diff --git a/test/mjsunit/es6/debug-promises-throw-in-constructor.js b/test/mjsunit/es6/debug-promises-throw-in-constructor.js
deleted file mode 100644
index d0267ce..0000000
--- a/test/mjsunit/es6/debug-promises-throw-in-constructor.js
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --harmony-promises --expose-debug-as debug
-
-// Test debug events when we only listen to uncaught exceptions and
-// an exception is thrown in the the Promise constructor.
-// We expect an Exception debug event with a promise to be triggered.
-
-Debug = debug.Debug;
-
-var step = 0;
-var exception = null;
-
-function listener(event, exec_state, event_data, data) {
-  try {
-    // Ignore exceptions during startup in stress runs.
-    if (step >= 1) return;
-    if (event == Debug.DebugEvent.Exception) {
-      assertEquals(0, step);
-      assertEquals("uncaught", event_data.exception().message);
-      assertTrue(event_data.promise() instanceof Promise);
-      assertTrue(event_data.uncaught());
-      // Assert that the debug event is triggered at the throw site.
-      assertTrue(exec_state.frame(0).sourceLineText().indexOf("// event") > 0);
-      step++;
-    }
-  } catch (e) {
-    // Signal a failure with exit code 1.  This is necessary since the
-    // debugger swallows exceptions and we expect the chained function
-    // and this listener to be executed after the main script is finished.
-    print("Unexpected exception: " + e + "\n" + e.stack);
-    exception = e;
-  }
-}
-
-Debug.setBreakOnUncaughtException();
-Debug.setListener(listener);
-
-var p = new Promise(function(resolve, reject) {
-  throw new Error("uncaught");  // event
-});
-
-assertEquals(1, step);
-assertNull(exception);
diff --git a/test/mjsunit/es6/debug-promises-throw-in-reject.js b/test/mjsunit/es6/debug-promises-throw-in-reject.js
deleted file mode 100644
index cdf7596..0000000
--- a/test/mjsunit/es6/debug-promises-throw-in-reject.js
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --harmony-promises --expose-debug-as debug
-
-// Test debug events when an exception is thrown inside a Promise, which is
-// caught by a custom promise, which throws a new exception in its reject
-// handler.  We expect an Exception debug event with a promise to be triggered.
-
-Debug = debug.Debug;
-
-var log = [];
-var step = 0;
-
-var p = new Promise(function(resolve, reject) {
-  log.push("resolve");
-  resolve();
-});
-
-function MyPromise(resolver) {
-  var reject = function() {
-    log.push("throw reject");
-    throw new Error("reject");  // event
-  };
-  var resolve = function() { };
-  log.push("construct");
-  resolver(resolve, reject);
-};
-
-MyPromise.prototype = p;
-p.constructor = MyPromise;
-
-var q = p.chain(
-  function() {
-    log.push("throw caught");
-    throw new Error("caught");
-  });
-
-function listener(event, exec_state, event_data, data) {
-  try {
-    if (event == Debug.DebugEvent.Exception) {
-      assertEquals(["resolve", "construct", "end main",
-                    "throw caught", "throw reject"], log);
-      assertEquals("reject", event_data.exception().message);
-      assertEquals(q, event_data.promise());
-      assertTrue(exec_state.frame(0).sourceLineText().indexOf('// event') > 0);
-    }
-  } catch (e) {
-    // Signal a failure with exit code 1.  This is necessary since the
-    // debugger swallows exceptions and we expect the chained function
-    // and this listener to be executed after the main script is finished.
-    print("Unexpected exception: " + e + "\n" + e.stack);
-    quit(1);
-  }
-}
-
-Debug.setBreakOnUncaughtException();
-Debug.setListener(listener);
-
-log.push("end main");
diff --git a/test/mjsunit/es6/debug-promises-uncaught-all.js b/test/mjsunit/es6/debug-promises-uncaught-all.js
deleted file mode 100644
index 714e7da..0000000
--- a/test/mjsunit/es6/debug-promises-uncaught-all.js
+++ /dev/null
@@ -1,55 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --harmony-promises --expose-debug-as debug
-
-// Test debug events when we listen to all exceptions and
-// there is a catch handler for the exception thrown in a Promise.
-// We expect an Exception debug event with a promise to be triggered.
-
-Debug = debug.Debug;
-
-var log = [];
-var step = 0;
-var exception = undefined;
-
-var p = new Promise(function(resolve, reject) {
-  log.push("resolve");
-  resolve();
-});
-
-var q = p.chain(
-  function() {
-    log.push("throw");
-    throw new Error("uncaught");  // event
-  });
-
-function listener(event, exec_state, event_data, data) {
-  try {
-    // Ignore exceptions during startup in stress runs.
-    if (step >= 1) return;
-    assertEquals(["resolve", "end main", "throw"], log);
-    if (event == Debug.DebugEvent.Exception) {
-      assertEquals(0, step);
-      assertEquals("uncaught", event_data.exception().message);
-      assertTrue(event_data.promise() instanceof Promise);
-      assertEquals(q, event_data.promise());
-      assertTrue(event_data.uncaught());
-      // Assert that the debug event is triggered at the throw site.
-      assertTrue(exec_state.frame(0).sourceLineText().indexOf("// event") > 0);
-      step++;
-    }
-  } catch (e) {
-    // Signal a failure with exit code 1.  This is necessary since the
-    // debugger swallows exceptions and we expect the chained function
-    // and this listener to be executed after the main script is finished.
-    print("Unexpected exception: " + e + "\n" + e.stack);
-    quit(1);
-  }
-}
-
-Debug.setBreakOnException();
-Debug.setListener(listener);
-
-log.push("end main");
diff --git a/test/mjsunit/es6/debug-promises-uncaught-uncaught.js b/test/mjsunit/es6/debug-promises-uncaught-uncaught.js
deleted file mode 100644
index fa97ac0..0000000
--- a/test/mjsunit/es6/debug-promises-uncaught-uncaught.js
+++ /dev/null
@@ -1,54 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --harmony-promises --expose-debug-as debug
-
-// Test debug events when we only listen to uncaught exceptions and
-// there is a catch handler for the exception thrown in a Promise.
-// We expect an Exception debug event with a promise to be triggered.
-
-Debug = debug.Debug;
-
-var log = [];
-var step = 0;
-
-var p = new Promise(function(resolve, reject) {
-  log.push("resolve");
-  resolve();
-});
-
-var q = p.chain(
-  function() {
-    log.push("throw");
-    throw new Error("uncaught");  // event
-  });
-
-function listener(event, exec_state, event_data, data) {
-  try {
-    // Ignore exceptions during startup in stress runs.
-    if (step >= 1) return;
-    assertEquals(["resolve", "end main", "throw"], log);
-    if (event == Debug.DebugEvent.Exception) {
-      assertEquals(0, step);
-      assertEquals("uncaught", event_data.exception().message);
-      assertTrue(event_data.promise() instanceof Promise);
-      assertEquals(q, event_data.promise());
-      assertTrue(event_data.uncaught());
-      // Assert that the debug event is triggered at the throw site.
-      assertTrue(exec_state.frame(0).sourceLineText().indexOf("// event") > 0);
-      step++;
-    }
-  } catch (e) {
-    // Signal a failure with exit code 1.  This is necessary since the
-    // debugger swallows exceptions and we expect the chained function
-    // and this listener to be executed after the main script is finished.
-    print("Unexpected exception: " + e + "\n" + e.stack);
-    quit(1);
-  }
-}
-
-Debug.setBreakOnUncaughtException();
-Debug.setListener(listener);
-
-log.push("end main");
diff --git a/test/mjsunit/es6/debug-promises-undefined-reject.js b/test/mjsunit/es6/debug-promises-undefined-reject.js
deleted file mode 100644
index 5bad5bd..0000000
--- a/test/mjsunit/es6/debug-promises-undefined-reject.js
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --harmony-promises --expose-debug-as debug
-
-// Test debug events when an exception is thrown inside a Promise, which is
-// caught by a custom promise, which has no reject handler.
-// We expect an Exception event with a promise to be triggered.
-
-Debug = debug.Debug;
-
-var log = [];
-var step = 0;
-
-var p = new Promise(function(resolve, reject) {
-  log.push("resolve");
-  resolve();
-});
-
-function MyPromise(resolver) {
-  var reject = undefined;
-  var resolve = function() { };
-  log.push("construct");
-  resolver(resolve, reject);
-};
-
-MyPromise.prototype = p;
-p.constructor = MyPromise;
-
-var q = p.chain(
-  function() {
-    log.push("throw caught");
-    throw new Error("caught");  // event
-  });
-
-function listener(event, exec_state, event_data, data) {
-  try {
-    if (event == Debug.DebugEvent.Exception) {
-      assertEquals(["resolve", "construct", "end main", "throw caught"], log);
-      assertEquals("undefined is not a function",
-                   event_data.exception().message);
-      assertEquals(q, event_data.promise());
-    }
-  } catch (e) {
-    // Signal a failure with exit code 1.  This is necessary since the
-    // debugger swallows exceptions and we expect the chained function
-    // and this listener to be executed after the main script is finished.
-    print("Unexpected exception: " + e + "\n" + e.stack);
-    quit(1);
-  }
-}
-
-Debug.setBreakOnUncaughtException();
-Debug.setListener(listener);
-
-log.push("end main");
diff --git a/test/mjsunit/es6/debug-promises/async-task-event.js b/test/mjsunit/es6/debug-promises/async-task-event.js
new file mode 100644
index 0000000..88030a2
--- /dev/null
+++ b/test/mjsunit/es6/debug-promises/async-task-event.js
@@ -0,0 +1,61 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug
+
+Debug = debug.Debug;
+
+var base_id = -1;
+var exception = null;
+var expected = [
+  "enqueue #1",
+  "willHandle #1",
+  "then #1",
+  "enqueue #2",
+  "didHandle #1",
+  "willHandle #2",
+  "then #2",
+  "enqueue #3",
+  "didHandle #2",
+  "willHandle #3",
+  "didHandle #3"
+];
+
+function assertLog(msg) {
+  print(msg);
+  assertTrue(expected.length > 0);
+  assertEquals(expected.shift(), msg);
+  if (!expected.length) {
+    Debug.setListener(null);
+  }
+}
+
+function listener(event, exec_state, event_data, data) {
+  if (event != Debug.DebugEvent.AsyncTaskEvent) return;
+  try {
+    if (base_id < 0)
+      base_id = event_data.id();
+    var id = event_data.id() - base_id + 1;
+    assertEquals("Promise.resolve", event_data.name());
+    assertLog(event_data.type() + " #" + id);
+  } catch (e) {
+    print(e + e.stack)
+    exception = e;
+  }
+}
+
+Debug.setListener(listener);
+
+var resolver;
+var p = new Promise(function(resolve, reject) {
+  resolver = resolve;
+});
+p.then(function() {
+  assertLog("then #1");
+}).then(function() {
+  assertLog("then #2");
+});
+resolver();
+
+assertNull(exception);
diff --git a/test/mjsunit/es6/debug-promises/events.js b/test/mjsunit/es6/debug-promises/events.js
new file mode 100644
index 0000000..a9f9454
--- /dev/null
+++ b/test/mjsunit/es6/debug-promises/events.js
@@ -0,0 +1,124 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --expose-debug-as debug
+
+Debug = debug.Debug;
+
+var eventsExpected = 16;
+var exception = null;
+var result = [];
+
+function updatePromise(promise, parentPromise, status, value) {
+  var i;
+  for (i = 0; i < result.length; ++i) {
+    if (result[i].promise === promise) {
+      result[i].parentPromise = parentPromise || result[i].parentPromise;
+      result[i].status = status || result[i].status;
+      result[i].value = value || result[i].value;
+      break;
+    }
+  }
+  assertTrue(i < result.length);
+}
+
+function listener(event, exec_state, event_data, data) {
+  if (event != Debug.DebugEvent.PromiseEvent) return;
+  try {
+    eventsExpected--;
+    assertTrue(event_data.promise().isPromise());
+    if (event_data.status() === 0) {
+      // New promise.
+      assertEquals("pending", event_data.promise().status());
+      result.push({ promise: event_data.promise().value(), status: 0 });
+      assertTrue(exec_state.frame(0).sourceLineText().indexOf("// event") > 0);
+    } else if (event_data.status() !== undefined) {
+      // Resolve/reject promise.
+      updatePromise(event_data.promise().value(),
+                    undefined,
+                    event_data.status(),
+                    event_data.value().value());
+    } else {
+      // Chain promises.
+      assertTrue(event_data.parentPromise().isPromise());
+      updatePromise(event_data.promise().value(),
+                    event_data.parentPromise().value());
+      assertTrue(exec_state.frame(0).sourceLineText().indexOf("// event") > 0);
+    }
+  } catch (e) {
+    print(e + e.stack)
+    exception = e;
+  }
+}
+
+Debug.setListener(listener);
+
+function resolver(resolve, reject) { resolve(); }
+
+var p1 = new Promise(resolver);  // event
+var p2 = p1.then().then();  // event
+var p3 = new Promise(function(resolve, reject) {  // event
+  reject("rejected");
+});
+var p4 = p3.then();  // event
+var p5 = p1.then();  // event
+
+function assertAsync(b, s) {
+  if (b) {
+    print(s, "succeeded");
+  } else {
+    %AbortJS(s + " FAILED!");
+  }
+}
+
+function testDone(iteration) {
+  function checkResult() {
+    if (eventsExpected === 0) {
+      assertAsync(result.length === 6, "result.length");
+
+      assertAsync(result[0].promise === p1, "result[0].promise");
+      assertAsync(result[0].parentPromise === undefined,
+                  "result[0].parentPromise");
+      assertAsync(result[0].status === 1, "result[0].status");
+      assertAsync(result[0].value === undefined, "result[0].value");
+
+      assertAsync(result[1].parentPromise === p1,
+                  "result[1].parentPromise");
+      assertAsync(result[1].status === 1, "result[1].status");
+
+      assertAsync(result[2].promise === p2, "result[2].promise");
+
+      assertAsync(result[3].promise === p3, "result[3].promise");
+      assertAsync(result[3].parentPromise === undefined,
+                  "result[3].parentPromise");
+      assertAsync(result[3].status === -1, "result[3].status");
+      assertAsync(result[3].value === "rejected", "result[3].value");
+
+      assertAsync(result[4].promise === p4, "result[4].promise");
+      assertAsync(result[4].parentPromise === p3,
+                  "result[4].parentPromise");
+      assertAsync(result[4].status === -1, "result[4].status");
+      assertAsync(result[4].value === "rejected", "result[4].value");
+
+      assertAsync(result[5].promise === p5, "result[5].promise");
+      assertAsync(result[5].parentPromise === p1,
+                  "result[5].parentPromise");
+      assertAsync(result[5].status === 1, "result[5].status");
+
+      assertAsync(exception === null, "exception === null");
+      Debug.setListener(null);
+    } else if (iteration > 10) {
+      %AbortJS("Not all events were received!");
+    } else {
+      testDone(iteration + 1);
+    }
+  }
+
+  var iteration = iteration || 0;
+  var dummy = {};
+  Object.observe(dummy, checkResult);
+  dummy.dummy = dummy;
+}
+
+testDone();
diff --git a/test/mjsunit/es6/debug-promises/reentry.js b/test/mjsunit/es6/debug-promises/reentry.js
new file mode 100644
index 0000000..fbe5424
--- /dev/null
+++ b/test/mjsunit/es6/debug-promises/reentry.js
@@ -0,0 +1,17 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug
+
+// Test reentry of special try catch for Promises.
+
+Debug = debug.Debug;
+
+Debug.setBreakOnUncaughtException();
+Debug.setListener(function(event, exec_state, event_data, data) { });
+
+var p = new Promise(function(resolve, reject) { resolve(); });
+var q = p.chain(function() {
+  new Promise(function(resolve, reject) { resolve(); });
+});
diff --git a/test/mjsunit/es6/debug-promises/reject-after-resolve.js b/test/mjsunit/es6/debug-promises/reject-after-resolve.js
new file mode 100644
index 0000000..a0036cf
--- /dev/null
+++ b/test/mjsunit/es6/debug-promises/reject-after-resolve.js
@@ -0,0 +1,37 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax
+
+// Test debug events when we listen to uncaught exceptions and
+// the Promise is rejected in a chained closure after it has been resolved.
+// We expect no Exception debug event to be triggered.
+
+Debug = debug.Debug;
+
+var log = [];
+
+var p = new Promise(function(resolve, reject) {
+  log.push("resolve");
+  resolve(reject);
+});
+
+var q = p.chain(
+  function(value) {
+    assertEquals(["resolve", "end main"], log);
+    value(new Error("reject"));
+  });
+
+function listener(event, exec_state, event_data, data) {
+  try {
+    assertTrue(event != Debug.DebugEvent.Exception);
+  } catch (e) {
+    %AbortJS(e + "\n" + e.stack);
+  }
+}
+
+Debug.setBreakOnException();
+Debug.setListener(listener);
+
+log.push("end main");
diff --git a/test/mjsunit/es6/debug-promises/reject-caught-all.js b/test/mjsunit/es6/debug-promises/reject-caught-all.js
new file mode 100644
index 0000000..0fca577
--- /dev/null
+++ b/test/mjsunit/es6/debug-promises/reject-caught-all.js
@@ -0,0 +1,72 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax
+
+// Test debug events when we listen to all exceptions and
+// there is a catch handler for the to-be-rejected Promise.
+// We expect a normal Exception debug event to be triggered.
+
+Debug = debug.Debug;
+
+var log = [];
+var expected_events = 1;
+
+var p = new Promise(function(resolve, reject) {
+  log.push("resolve");
+  resolve();
+});
+
+var q = p.chain(
+  function(value) {
+    log.push("reject");
+    return Promise.reject(new Error("reject"));
+  });
+
+q.catch(
+  function(e) {
+    assertEquals("reject", e.message);
+  });
+
+
+function listener(event, exec_state, event_data, data) {
+  try {
+    if (event == Debug.DebugEvent.Exception) {
+      expected_events--;
+      assertTrue(expected_events >= 0);
+      assertEquals("reject", event_data.exception().message);
+      assertSame(q, event_data.promise());
+      assertFalse(event_data.uncaught());
+    }
+  } catch (e) {
+    %AbortJS(e + "\n" + e.stack);
+  }
+}
+
+Debug.setBreakOnException();
+Debug.setListener(listener);
+
+log.push("end main");
+
+function testDone(iteration) {
+  function checkResult() {
+    try {
+      assertTrue(iteration < 10);
+      if (expected_events === 0) {
+        assertEquals(["resolve", "end main", "reject"], log);
+      } else {
+        testDone(iteration + 1);
+      }
+    } catch (e) {
+      %AbortJS(e + "\n" + e.stack);
+    }
+  }
+
+  // Run testDone through the Object.observe processing loop.
+  var dummy = {};
+  Object.observe(dummy, checkResult);
+  dummy.dummy = dummy;
+}
+
+testDone(0);
diff --git a/test/mjsunit/es6/debug-promises/reject-caught-by-default-reject-handler.js b/test/mjsunit/es6/debug-promises/reject-caught-by-default-reject-handler.js
new file mode 100644
index 0000000..63151df
--- /dev/null
+++ b/test/mjsunit/es6/debug-promises/reject-caught-by-default-reject-handler.js
@@ -0,0 +1,86 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax
+
+// Test debug events when we only listen to uncaught exceptions and
+// there is only a default reject handler for the to-be-rejected Promise.
+// We expect two Exception debug events:
+//  - when the first Promise is rejected and only has default reject handlers.
+//  - when the default reject handler passes the rejection on.
+
+Debug = debug.Debug;
+
+var expected_events = 2;
+var log = [];
+
+var resolve, reject;
+var p0 = new Promise(function(res, rej) { resolve = res; reject = rej; });
+var p1 = p0.then(function() {
+  log.push("p0.then");
+  return Promise.reject(new Error("123"));
+});
+var p2 = p1.then(function() {
+  log.push("p1.then");
+});
+
+var q = new Promise(function(res, rej) {
+  log.push("resolve q");
+  res();
+});
+
+q.then(function() {
+  log.push("resolve p");
+  resolve();
+})
+
+
+function listener(event, exec_state, event_data, data) {
+  try {
+    if (event == Debug.DebugEvent.Exception) {
+      expected_events--;
+      assertTrue(expected_events >= 0);
+      assertTrue(event_data.uncaught());
+      assertTrue(event_data.promise() instanceof Promise);
+      if (expected_events == 1) {
+        // p1 is rejected, uncaught except for its default reject handler.
+        assertEquals(0, exec_state.frameCount());
+        assertSame(p1, event_data.promise());
+      } else {
+        // p2 is rejected by p1's default reject handler.
+        assertEquals(0, exec_state.frameCount());
+        assertSame(p2, event_data.promise());
+      }
+    }
+  } catch (e) {
+    %AbortJS(e + "\n" + e.stack);
+  }
+}
+
+Debug.setBreakOnUncaughtException();
+Debug.setListener(listener);
+
+log.push("end main");
+
+function testDone(iteration) {
+  function checkResult() {
+    try {
+      assertTrue(iteration < 10);
+      if (expected_events === 0) {
+        assertEquals(["resolve q", "end main", "resolve p", "p0.then"], log);
+      } else {
+        testDone(iteration + 1);
+      }
+    } catch (e) {
+      %AbortJS(e + "\n" + e.stack);
+    }
+  }
+
+  // Run testDone through the Object.observe processing loop.
+  var dummy = {};
+  Object.observe(dummy, checkResult);
+  dummy.dummy = dummy;
+}
+
+testDone(0);
diff --git a/test/mjsunit/es6/debug-promises/reject-caught-late.js b/test/mjsunit/es6/debug-promises/reject-caught-late.js
new file mode 100644
index 0000000..2ff13d5
--- /dev/null
+++ b/test/mjsunit/es6/debug-promises/reject-caught-late.js
@@ -0,0 +1,34 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax
+
+// Test debug events when we only listen to uncaught exceptions, the Promise
+// is rejected, and a catch handler is installed right before the rejection.
+// We expect no debug event to be triggered.
+
+Debug = debug.Debug;
+
+var p = new Promise(function(resolve, reject) {
+  resolve();
+});
+
+var q = p.chain(
+  function() {
+    q.catch(function(e) {
+      assertEquals("caught", e.message);
+    });
+    return Promise.reject(Error("caught"));
+  });
+
+function listener(event, exec_state, event_data, data) {
+  try {
+    assertTrue(event != Debug.DebugEvent.Exception);
+  } catch (e) {
+    %AbortJS(e + "\n" + e.stack);
+  }
+}
+
+Debug.setBreakOnUncaughtException();
+Debug.setListener(listener);
diff --git a/test/mjsunit/es6/debug-promises/reject-caught-uncaught.js b/test/mjsunit/es6/debug-promises/reject-caught-uncaught.js
new file mode 100644
index 0000000..d3fd9f3
--- /dev/null
+++ b/test/mjsunit/es6/debug-promises/reject-caught-uncaught.js
@@ -0,0 +1,36 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax
+
+// Test debug events when we only listen to uncaught exceptions and
+// there is a catch handler for the to-be-rejected Promise.
+// We expect no debug event to be triggered.
+
+Debug = debug.Debug;
+
+var p = new Promise(function(resolve, reject) {
+  resolve();
+});
+
+var q = p.chain(
+  function() {
+    return Promise.reject(Error("caught reject"));
+  });
+
+q.catch(
+  function(e) {
+    assertEquals("caught reject", e.message);
+  });
+
+function listener(event, exec_state, event_data, data) {
+  try {
+    assertTrue(event != Debug.DebugEvent.Exception);
+  } catch (e) {
+    %AbortJS(e + "\n" + e.stack);
+  }
+}
+
+Debug.setBreakOnUncaughtException();
+Debug.setListener(listener);
diff --git a/test/mjsunit/es6/debug-promises/reject-in-constructor.js b/test/mjsunit/es6/debug-promises/reject-in-constructor.js
new file mode 100644
index 0000000..a05b3ac
--- /dev/null
+++ b/test/mjsunit/es6/debug-promises/reject-in-constructor.js
@@ -0,0 +1,39 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug
+
+// Test debug events when we only listen to uncaught exceptions and
+// the Promise is rejected in the Promise constructor.
+// We expect an Exception debug event with a promise to be triggered.
+
+Debug = debug.Debug;
+
+var steps = 0;
+var exception = null;
+
+function listener(event, exec_state, event_data, data) {
+  try {
+    if (event == Debug.DebugEvent.Exception) {
+      steps++;
+      assertEquals("uncaught", event_data.exception().message);
+      assertTrue(event_data.promise() instanceof Promise);
+      assertTrue(event_data.uncaught());
+      // Assert that the debug event is triggered at the throw site.
+      assertTrue(exec_state.frame(0).sourceLineText().indexOf("// event") > 0);
+    }
+  } catch (e) {
+    exception = e;
+  }
+}
+
+Debug.setBreakOnUncaughtException();
+Debug.setListener(listener);
+
+var p = new Promise(function(resolve, reject) {
+  reject(new Error("uncaught"));  // event
+});
+
+assertEquals(1, steps);
+assertNull(exception);
diff --git a/test/mjsunit/es6/debug-promises/reject-uncaught-all.js b/test/mjsunit/es6/debug-promises/reject-uncaught-all.js
new file mode 100644
index 0000000..beaf187
--- /dev/null
+++ b/test/mjsunit/es6/debug-promises/reject-uncaught-all.js
@@ -0,0 +1,69 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax
+
+// Test debug events when we listen to all exceptions and
+// there is a catch handler for the to-be-rejected Promise.
+// We expect an Exception debug event with a promise to be triggered.
+
+Debug = debug.Debug;
+
+var expected_events = 1;
+var log = [];
+
+var p = new Promise(function(resolve, reject) {
+  log.push("resolve");
+  resolve();
+});
+
+var q = p.chain(
+  function() {
+    log.push("reject");
+    return Promise.reject(new Error("uncaught reject"));
+  });
+
+function listener(event, exec_state, event_data, data) {
+  try {
+    if (event == Debug.DebugEvent.Exception) {
+      expected_events--;
+      assertTrue(expected_events >= 0);
+      assertEquals("uncaught reject", event_data.exception().message);
+      assertTrue(event_data.promise() instanceof Promise);
+      assertSame(q, event_data.promise());
+      assertTrue(event_data.uncaught());
+      // All of the frames on the stack are from native Javascript.
+      assertEquals(0, exec_state.frameCount());
+    }
+  } catch (e) {
+    %AbortJS(e + "\n" + e.stack);
+  }
+}
+
+Debug.setBreakOnException();
+Debug.setListener(listener);
+
+log.push("end main");
+
+function testDone(iteration) {
+  function checkResult() {
+    try {
+      assertTrue(iteration < 10);
+      if (expected_events === 0) {
+        assertEquals(["resolve", "end main", "reject"], log);
+      } else {
+        testDone(iteration + 1);
+      }
+    } catch (e) {
+      %AbortJS(e + "\n" + e.stack);
+    }
+  }
+
+  // Run testDone through the Object.observe processing loop.
+  var dummy = {};
+  Object.observe(dummy, checkResult);
+  dummy.dummy = dummy;
+}
+
+testDone(0);
diff --git a/test/mjsunit/es6/debug-promises/reject-uncaught-late.js b/test/mjsunit/es6/debug-promises/reject-uncaught-late.js
new file mode 100644
index 0000000..4a883da
--- /dev/null
+++ b/test/mjsunit/es6/debug-promises/reject-uncaught-late.js
@@ -0,0 +1,76 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax
+
+// Test debug events when we only listen to uncaught exceptions and
+// there is a catch handler for the to-be-rejected Promise.
+// We expect an Exception debug event with a promise to be triggered.
+
+Debug = debug.Debug;
+
+var expected_events = 1;
+var log = [];
+
+var reject_closure;
+
+var p = new Promise(function(resolve, reject) {
+  log.push("postpone p");
+  reject_closure = reject;
+});
+
+var q = new Promise(function(resolve, reject) {
+  log.push("resolve q");
+  resolve();
+});
+
+q.then(function() {
+  log.push("reject p");
+  reject_closure(new Error("uncaught reject p"));  // event
+})
+
+
+function listener(event, exec_state, event_data, data) {
+  try {
+    if (event == Debug.DebugEvent.Exception) {
+      expected_events--;
+      assertTrue(expected_events >= 0);
+      assertEquals("uncaught reject p", event_data.exception().message);
+      assertTrue(event_data.promise() instanceof Promise);
+      assertSame(p, event_data.promise());
+      assertTrue(event_data.uncaught());
+      // Assert that the debug event is triggered at the throw site.
+      assertTrue(exec_state.frame(0).sourceLineText().indexOf("// event") > 0);
+    }
+  } catch (e) {
+    %AbortJS(e + "\n" + e.stack);
+  }
+}
+
+Debug.setBreakOnUncaughtException();
+Debug.setListener(listener);
+
+log.push("end main");
+
+function testDone(iteration) {
+  function checkResult() {
+    try {
+      assertTrue(iteration < 10);
+      if (expected_events === 0) {
+        assertEquals(["postpone p", "resolve q", "end main", "reject p"], log);
+      } else {
+        testDone(iteration + 1);
+      }
+    } catch (e) {
+      %AbortJS(e + "\n" + e.stack);
+    }
+  }
+
+  // Run testDone through the Object.observe processing loop.
+  var dummy = {};
+  Object.observe(dummy, checkResult);
+  dummy.dummy = dummy;
+}
+
+testDone(0);
diff --git a/test/mjsunit/es6/debug-promises/reject-uncaught-uncaught.js b/test/mjsunit/es6/debug-promises/reject-uncaught-uncaught.js
new file mode 100644
index 0000000..86e2a81
--- /dev/null
+++ b/test/mjsunit/es6/debug-promises/reject-uncaught-uncaught.js
@@ -0,0 +1,69 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax
+
+// Test debug events when we only listen to uncaught exceptions and
+// there is no catch handler for the to-be-rejected Promise.
+// We expect an Exception debug event with a promise to be triggered.
+
+Debug = debug.Debug;
+
+var expected_events = 1;
+var log = [];
+
+var p = new Promise(function(resolve, reject) {
+  log.push("resolve");
+  resolve();
+});
+
+var q = p.chain(
+  function() {
+    log.push("reject");
+    return Promise.reject(Error("uncaught reject"));  // event
+  });
+
+function listener(event, exec_state, event_data, data) {
+  try {
+    if (event == Debug.DebugEvent.Exception) {
+      expected_events--;
+      assertTrue(expected_events >= 0);
+      assertEquals("uncaught reject", event_data.exception().message);
+      assertTrue(event_data.promise() instanceof Promise);
+      assertSame(q, event_data.promise());
+      assertTrue(event_data.uncaught());
+      // All of the frames on the stack are from native Javascript.
+      assertEquals(0, exec_state.frameCount());
+    }
+  } catch (e) {
+    %AbortJS(e + "\n" + e.stack);
+  }
+}
+
+Debug.setBreakOnUncaughtException();
+Debug.setListener(listener);
+
+log.push("end main");
+
+function testDone(iteration) {
+  function checkResult() {
+    try {
+      assertTrue(iteration < 10);
+      if (expected_events === 0) {
+        assertEquals(["resolve", "end main", "reject"], log);
+      } else {
+        testDone(iteration + 1);
+      }
+    } catch (e) {
+      %AbortJS(e + "\n" + e.stack);
+    }
+  }
+
+  // Run testDone through the Object.observe processing loop.
+  var dummy = {};
+  Object.observe(dummy, checkResult);
+  dummy.dummy = dummy;
+}
+
+testDone(0);
diff --git a/test/mjsunit/es6/debug-promises/reject-with-invalid-reject.js b/test/mjsunit/es6/debug-promises/reject-with-invalid-reject.js
new file mode 100644
index 0000000..fc6233d
--- /dev/null
+++ b/test/mjsunit/es6/debug-promises/reject-with-invalid-reject.js
@@ -0,0 +1,77 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax
+
+// Test debug events when a Promise is rejected, which is caught by a custom
+// promise, which has a number for reject closure.  We expect an Exception debug
+// events trying to call the invalid reject closure.
+
+Debug = debug.Debug;
+
+var expected_events = 1;
+var log = [];
+
+var p = new Promise(function(resolve, reject) {
+  log.push("resolve");
+  resolve();
+});
+
+function MyPromise(resolver) {
+  var reject = 1;
+  var resolve = function() { };
+  log.push("construct");
+  resolver(resolve, reject);
+};
+
+MyPromise.prototype = new Promise(function() {});
+p.constructor = MyPromise;
+
+var q = p.chain(
+  function() {
+    log.push("reject caught");
+    return Promise.reject(new Error("caught"));
+  });
+
+function listener(event, exec_state, event_data, data) {
+  try {
+    if (event == Debug.DebugEvent.Exception) {
+      expected_events--;
+      assertTrue(expected_events >= 0);
+      assertEquals("number is not a function", event_data.exception().message);
+      // All of the frames on the stack are from native Javascript.
+      assertEquals(0, exec_state.frameCount());
+    }
+  } catch (e) {
+    %AbortJS(e + "\n" + e.stack);
+  }
+}
+
+Debug.setBreakOnUncaughtException();
+Debug.setListener(listener);
+
+function testDone(iteration) {
+  function checkResult() {
+    try {
+      assertTrue(iteration < 10);
+      if (expected_events === 0) {
+        assertEquals(["resolve", "construct", "end main", "reject caught"],
+                     log);
+      } else {
+        testDone(iteration + 1);
+      }
+    } catch (e) {
+      %AbortJS(e + "\n" + e.stack);
+    }
+  }
+
+  // Run testDone through the Object.observe processing loop.
+  var dummy = {};
+  Object.observe(dummy, checkResult);
+  dummy.dummy = dummy;
+}
+
+testDone(0);
+
+log.push("end main");
diff --git a/test/mjsunit/es6/debug-promises/reject-with-throw-in-reject.js b/test/mjsunit/es6/debug-promises/reject-with-throw-in-reject.js
new file mode 100644
index 0000000..15e464e
--- /dev/null
+++ b/test/mjsunit/es6/debug-promises/reject-with-throw-in-reject.js
@@ -0,0 +1,87 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax
+
+// Test debug events when a Promise is rejected, which is caught by a
+// custom promise, which throws a new exception in its reject handler.
+// We expect two Exception debug events:
+//  1) when promise q is rejected.
+//  2) when the custom reject closure in MyPromise throws an exception.
+
+Debug = debug.Debug;
+
+var expected_events = 1;
+var log = [];
+
+var p = new Promise(function(resolve, reject) {
+  log.push("resolve");
+  resolve();
+});
+
+function MyPromise(resolver) {
+  var reject = function() {
+    log.push("throw in reject");
+    throw new Error("reject");  // event
+  };
+  var resolve = function() { };
+  log.push("construct");
+  resolver(resolve, reject);
+};
+
+MyPromise.prototype = new Promise(function() {});
+p.constructor = MyPromise;
+
+var q = p.chain(
+  function() {
+    log.push("reject caught");
+    return Promise.reject(new Error("caught"));
+  });
+
+function listener(event, exec_state, event_data, data) {
+  try {
+    if (event == Debug.DebugEvent.Exception) {
+      expected_events--;
+      assertTrue(expected_events >= 0);
+      assertEquals("reject", event_data.exception().message);
+      // Assert that the debug event is triggered at the throw site.
+      assertTrue(
+          exec_state.frame(0).sourceLineText().indexOf("// event") > 0);
+    }
+  } catch (e) {
+    // Signal a failure with exit code 1.  This is necessary since the
+    // debugger swallows exceptions and we expect the chained function
+    // and this listener to be executed after the main script is finished.
+    print("Unexpected exception: " + e + "\n" + e.stack);
+    quit(1);
+  }
+}
+
+Debug.setBreakOnUncaughtException();
+Debug.setListener(listener);
+
+log.push("end main");
+
+function testDone(iteration) {
+  function checkResult() {
+    try {
+      assertTrue(iteration < 10);
+      if (expected_events === 0) {
+        assertEquals(["resolve", "construct", "end main",
+                      "reject caught", "throw in reject"], log);
+      } else {
+        testDone(iteration + 1);
+      }
+    } catch (e) {
+      %AbortJS(e + "\n" + e.stack);
+    }
+  }
+
+  // Run testDone through the Object.observe processing loop.
+  var dummy = {};
+  Object.observe(dummy, checkResult);
+  dummy.dummy = dummy;
+}
+
+testDone(0);
diff --git a/test/mjsunit/es6/debug-promises/reject-with-undefined-reject.js b/test/mjsunit/es6/debug-promises/reject-with-undefined-reject.js
new file mode 100644
index 0000000..d11c01f
--- /dev/null
+++ b/test/mjsunit/es6/debug-promises/reject-with-undefined-reject.js
@@ -0,0 +1,77 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax
+
+// Test debug events when a Promise is rejected, which is caught by a custom
+// promise, which has undefined for reject closure.  We expect an Exception
+// debug even calling the (undefined) custom rejected closure.
+
+Debug = debug.Debug;
+
+var expected_events = 1;
+var log = [];
+
+var p = new Promise(function(resolve, reject) {
+  log.push("resolve");
+  resolve();
+});
+
+function MyPromise(resolver) {
+  var reject = undefined;
+  var resolve = function() { };
+  log.push("construct");
+  resolver(resolve, reject);
+};
+
+MyPromise.prototype = new Promise(function() {});
+p.constructor = MyPromise;
+
+var q = p.chain(
+  function() {
+    log.push("reject caught");
+    return Promise.reject(new Error("caught"));
+  });
+
+function listener(event, exec_state, event_data, data) {
+  try {
+    if (event == Debug.DebugEvent.Exception) {
+      expected_events--;
+      assertTrue(expected_events >= 0);
+      assertEquals("caught", event_data.exception().message);
+      // All of the frames on the stack are from native Javascript.
+      assertEquals(0, exec_state.frameCount());
+    }
+  } catch (e) {
+    %AbortJS(e + "\n" + e.stack);
+  }
+}
+
+Debug.setBreakOnUncaughtException();
+Debug.setListener(listener);
+
+function testDone(iteration) {
+  function checkResult() {
+    try {
+      assertTrue(iteration < 10);
+      if (expected_events === 0) {
+        assertEquals(["resolve", "construct", "end main", "reject caught"],
+                     log);
+      } else {
+        testDone(iteration + 1);
+      }
+    } catch (e) {
+      %AbortJS(e + "\n" + e.stack);
+    }
+  }
+
+  // Run testDone through the Object.observe processing loop.
+  var dummy = {};
+  Object.observe(dummy, checkResult);
+  dummy.dummy = dummy;
+}
+
+testDone(0);
+
+log.push("end main");
diff --git a/test/mjsunit/es6/debug-promises/throw-caught-all.js b/test/mjsunit/es6/debug-promises/throw-caught-all.js
new file mode 100644
index 0000000..2fbf051
--- /dev/null
+++ b/test/mjsunit/es6/debug-promises/throw-caught-all.js
@@ -0,0 +1,71 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax
+
+// Test debug events when we listen to all exceptions and
+// there is a catch handler for the exception thrown in a Promise.
+// We expect a normal Exception debug event to be triggered.
+
+Debug = debug.Debug;
+
+var expected_events = 1;
+var log = [];
+
+var p = new Promise(function(resolve, reject) {
+  log.push("resolve");
+  resolve();
+});
+
+var q = p.chain(
+  function() {
+    log.push("throw");
+    throw new Error("caught");
+  });
+
+q.catch(
+  function(e) {
+    assertEquals("caught", e.message);
+  });
+
+function listener(event, exec_state, event_data, data) {
+  try {
+    if (event == Debug.DebugEvent.Exception) {
+      expected_events--;
+      assertTrue(expected_events >= 0);
+      assertEquals("caught", event_data.exception().message);
+      assertSame(q, event_data.promise());
+      assertFalse(event_data.uncaught());
+    }
+  } catch (e) {
+    %AbortJS(e + "\n" + e.stack);
+  }
+}
+
+Debug.setBreakOnException();
+Debug.setListener(listener);
+
+log.push("end main");
+
+function testDone(iteration) {
+  function checkResult() {
+    try {
+      assertTrue(iteration < 10);
+      if (expected_events === 0) {
+        assertEquals(["resolve", "end main", "throw"], log);
+      } else {
+        testDone(iteration + 1);
+      }
+    } catch (e) {
+      %AbortJS(e + "\n" + e.stack);
+    }
+  }
+
+  // Run testDone through the Object.observe processing loop.
+  var dummy = {};
+  Object.observe(dummy, checkResult);
+  dummy.dummy = dummy;
+}
+
+testDone(0);
diff --git a/test/mjsunit/es6/debug-promises/throw-caught-by-default-reject-handler.js b/test/mjsunit/es6/debug-promises/throw-caught-by-default-reject-handler.js
new file mode 100644
index 0000000..36b5565
--- /dev/null
+++ b/test/mjsunit/es6/debug-promises/throw-caught-by-default-reject-handler.js
@@ -0,0 +1,87 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax
+
+// Test debug events when we only listen to uncaught exceptions and
+// there is only a default reject handler for the to-be-rejected Promise.
+// We expect two Exception debug events:
+//  - when the first Promise is rejected and only has default reject handlers.
+//  - when the default reject handler passes the rejection on.
+
+Debug = debug.Debug;
+
+var expected_events = 2;
+var log = [];
+
+var resolve, reject;
+var p0 = new Promise(function(res, rej) { resolve = res; reject = rej; });
+var p1 = p0.then(function() {
+  log.push("p0.then");
+  throw new Error("123");  // event
+});
+var p2 = p1.then(function() {
+  log.push("p1.then");
+});
+
+var q = new Promise(function(res, rej) {
+  log.push("resolve q");
+  res();
+});
+
+q.then(function() {
+  log.push("resolve p");
+  resolve();
+})
+
+
+function listener(event, exec_state, event_data, data) {
+  try {
+    if (event == Debug.DebugEvent.Exception) {
+      expected_events--;
+      assertTrue(expected_events >= 0);
+      assertTrue(event_data.uncaught());
+      assertTrue(event_data.promise() instanceof Promise);
+      if (expected_events == 1) {
+        // p1 is rejected, uncaught except for its default reject handler.
+        assertTrue(
+            exec_state.frame(0).sourceLineText().indexOf("// event") > 0);
+        assertSame(p1, event_data.promise());
+      } else {
+        // p2 is rejected by p1's default reject handler.
+        assertEquals(0, exec_state.frameCount());
+        assertSame(p2, event_data.promise());
+      }
+    }
+  } catch (e) {
+    %AbortJS(e + "\n" + e.stack);
+  }
+}
+
+Debug.setBreakOnUncaughtException();
+Debug.setListener(listener);
+
+log.push("end main");
+
+function testDone(iteration) {
+  function checkResult() {
+    try {
+      assertTrue(iteration < 10);
+      if (expected_events === 0) {
+        assertEquals(["resolve q", "end main", "resolve p", "p0.then"], log);
+      } else {
+        testDone(iteration + 1);
+      }
+    } catch (e) {
+      %AbortJS(e + "\n" + e.stack);
+    }
+  }
+
+  // Run testDone through the Object.observe processing loop.
+  var dummy = {};
+  Object.observe(dummy, checkResult);
+  dummy.dummy = dummy;
+}
+
+testDone(0);
diff --git a/test/mjsunit/es6/debug-promises/throw-caught-late.js b/test/mjsunit/es6/debug-promises/throw-caught-late.js
new file mode 100644
index 0000000..ac79aba
--- /dev/null
+++ b/test/mjsunit/es6/debug-promises/throw-caught-late.js
@@ -0,0 +1,34 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax
+
+// Test debug events when we only listen to uncaught exceptions, the Promise
+// throws, and a catch handler is installed right before throwing.
+// We expect no debug event to be triggered.
+
+Debug = debug.Debug;
+
+var p = new Promise(function(resolve, reject) {
+  resolve();
+});
+
+var q = p.chain(
+  function() {
+    q.catch(function(e) {
+      assertEquals("caught", e.message);
+    });
+    throw new Error("caught");
+  });
+
+function listener(event, exec_state, event_data, data) {
+  try {
+    assertTrue(event != Debug.DebugEvent.Exception);
+  } catch (e) {
+    %AbortJS(e + "\n" + e.stack);
+  }
+}
+
+Debug.setBreakOnUncaughtException();
+Debug.setListener(listener);
diff --git a/test/mjsunit/es6/debug-promises/throw-caught-uncaught.js b/test/mjsunit/es6/debug-promises/throw-caught-uncaught.js
new file mode 100644
index 0000000..0ad9ce4
--- /dev/null
+++ b/test/mjsunit/es6/debug-promises/throw-caught-uncaught.js
@@ -0,0 +1,36 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax
+
+// Test debug events when we only listen to uncaught exceptions and
+// there is a catch handler for the exception thrown in a Promise.
+// We expect no debug event to be triggered.
+
+Debug = debug.Debug;
+
+var p = new Promise(function(resolve, reject) {
+  resolve();
+});
+
+var q = p.chain(
+  function() {
+    throw new Error("caught throw");
+  });
+
+q.catch(
+  function(e) {
+    assertEquals("caught throw", e.message);
+  });
+
+function listener(event, exec_state, event_data, data) {
+  try {
+    assertTrue(event != Debug.DebugEvent.Exception);
+  } catch (e) {
+    %AbortJS(e + "\n" + e.stack);
+  }
+}
+
+Debug.setBreakOnUncaughtException();
+Debug.setListener(listener);
diff --git a/test/mjsunit/es6/debug-promises/throw-eventually-caught.js b/test/mjsunit/es6/debug-promises/throw-eventually-caught.js
new file mode 100644
index 0000000..19610f7
--- /dev/null
+++ b/test/mjsunit/es6/debug-promises/throw-eventually-caught.js
@@ -0,0 +1,42 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax
+
+// Test debug events when we only listen to uncaught exceptions and
+// there is a catch handler for the to-be-rejected Promise.
+// We expect no Exception debug events, since the default reject handler passes
+// the rejection on to a user-defined reject handler.
+
+Debug = debug.Debug;
+
+var resolve, reject;
+var p0 = new Promise(function(res, rej) { resolve = res; reject = rej; });
+
+var p1 = p0.then(function() {
+  throw new Error();
+});
+
+var p2 = p1.then(function() { });
+var p3 = p2.catch(function() { });
+
+var q = new Promise(function(res, rej) {
+  res();
+});
+
+q.then(function() {
+  resolve();
+})
+
+
+function listener(event, exec_state, event_data, data) {
+  try {
+    assertTrue(event != Debug.DebugEvent.Exception);
+  } catch (e) {
+    %AbortJS(e + "\n" + e.stack);
+  }
+}
+
+Debug.setBreakOnUncaughtException();
+Debug.setListener(listener);
diff --git a/test/mjsunit/es6/debug-promises/throw-in-constructor.js b/test/mjsunit/es6/debug-promises/throw-in-constructor.js
new file mode 100644
index 0000000..fd6b4dd
--- /dev/null
+++ b/test/mjsunit/es6/debug-promises/throw-in-constructor.js
@@ -0,0 +1,40 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug
+
+// Test debug events when we only listen to uncaught exceptions and
+// an exception is thrown in the Promise constructor.
+// We expect an Exception debug event with a promise to be triggered.
+
+Debug = debug.Debug;
+
+var step = 0;
+var exception = null;
+
+function listener(event, exec_state, event_data, data) {
+  try {
+    if (event == Debug.DebugEvent.Exception) {
+      assertEquals(0, step);
+      assertEquals("uncaught", event_data.exception().message);
+      assertTrue(event_data.promise() instanceof Promise);
+      assertTrue(event_data.uncaught());
+      // Assert that the debug event is triggered at the throw site.
+      assertTrue(exec_state.frame(0).sourceLineText().indexOf("// event") > 0);
+      step++;
+    }
+  } catch (e) {
+    exception = e;
+  }
+}
+
+Debug.setBreakOnUncaughtException();
+Debug.setListener(listener);
+
+var p = new Promise(function(resolve, reject) {
+  throw new Error("uncaught");  // event
+});
+
+assertEquals(1, step);
+assertNull(exception);
diff --git a/test/mjsunit/es6/debug-promises/throw-uncaught-all.js b/test/mjsunit/es6/debug-promises/throw-uncaught-all.js
new file mode 100644
index 0000000..72f800b
--- /dev/null
+++ b/test/mjsunit/es6/debug-promises/throw-uncaught-all.js
@@ -0,0 +1,70 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax
+
+// Test debug events when we listen to all exceptions and
+// there is no catch handler for the exception thrown in a Promise.
+// We expect an Exception debug event with a promise to be triggered.
+
+Debug = debug.Debug;
+
+var expected_events = 1;
+var log = [];
+
+var p = new Promise(function(resolve, reject) {
+  log.push("resolve");
+  resolve();
+});
+
+var q = p.chain(
+  function() {
+    log.push("throw");
+    throw new Error("uncaught");  // event
+  });
+
+function listener(event, exec_state, event_data, data) {
+  try {
+    // Ignore exceptions during startup in stress runs.
+    if (event == Debug.DebugEvent.Exception) {
+      expected_events--;
+      assertTrue(expected_events >= 0);
+      assertEquals("uncaught", event_data.exception().message);
+      assertTrue(event_data.promise() instanceof Promise);
+      assertSame(q, event_data.promise());
+      assertTrue(event_data.uncaught());
+      // Assert that the debug event is triggered at the throw site.
+      assertTrue(exec_state.frame(0).sourceLineText().indexOf("// event") > 0);
+    }
+  } catch (e) {
+    %AbortJS(e + "\n" + e.stack);
+  }
+}
+
+Debug.setBreakOnException();
+Debug.setListener(listener);
+
+log.push("end main");
+
+function testDone(iteration) {
+  function checkResult() {
+    try {
+      assertTrue(iteration < 10);
+      if (expected_events === 0) {
+        assertEquals(["resolve", "end main", "throw"], log);
+      } else {
+        testDone(iteration + 1);
+      }
+    } catch (e) {
+      %AbortJS(e + "\n" + e.stack);
+    }
+  }
+
+  // Rerun testDone through the Object.observe processing loop.
+  var dummy = {};
+  Object.observe(dummy, checkResult);
+  dummy.dummy = dummy;
+}
+
+testDone(0);
diff --git a/test/mjsunit/es6/debug-promises/throw-uncaught-uncaught.js b/test/mjsunit/es6/debug-promises/throw-uncaught-uncaught.js
new file mode 100644
index 0000000..69aa8eb
--- /dev/null
+++ b/test/mjsunit/es6/debug-promises/throw-uncaught-uncaught.js
@@ -0,0 +1,70 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax
+
+// Test debug events when we only listen to uncaught exceptions and
+// there is a catch handler for the exception thrown in a Promise.
+// We expect an Exception debug event with a promise to be triggered.
+
+Debug = debug.Debug;
+
+var expected_events = 1;
+var log = [];
+
+var p = new Promise(function(resolve, reject) {
+  log.push("resolve");
+  resolve();
+});
+
+var q = p.chain(
+  function() {
+    log.push("throw");
+    throw new Error("uncaught");  // event
+  });
+
+function listener(event, exec_state, event_data, data) {
+  if (event == Debug.DebugEvent.AsyncTaskEvent) return;
+  try {
+    if (event == Debug.DebugEvent.Exception) {
+      expected_events--;
+      assertTrue(expected_events >= 0);
+      assertEquals("uncaught", event_data.exception().message);
+      assertTrue(event_data.promise() instanceof Promise);
+      assertSame(q, event_data.promise());
+      assertTrue(event_data.uncaught());
+      // Assert that the debug event is triggered at the throw site.
+      assertTrue(exec_state.frame(0).sourceLineText().indexOf("// event") > 0);
+    }
+  } catch (e) {
+    %AbortJS(e + "\n" + e.stack);
+  }
+}
+
+Debug.setBreakOnUncaughtException();
+Debug.setListener(listener);
+
+log.push("end main");
+
+function testDone(iteration) {
+  function checkResult() {
+    try {
+      assertTrue(iteration < 10);
+      if (expected_events === 0) {
+        assertEquals(["resolve", "end main", "throw"], log);
+      } else {
+        testDone(iteration + 1);
+      }
+    } catch (e) {
+      %AbortJS(e + "\n" + e.stack);
+    }
+  }
+
+  // Run testDone through the Object.observe processing loop.
+  var dummy = {};
+  Object.observe(dummy, checkResult);
+  dummy.dummy = dummy;
+}
+
+testDone(0);
diff --git a/test/mjsunit/es6/debug-promises/throw-with-throw-in-reject.js b/test/mjsunit/es6/debug-promises/throw-with-throw-in-reject.js
new file mode 100644
index 0000000..1ea1c7f
--- /dev/null
+++ b/test/mjsunit/es6/debug-promises/throw-with-throw-in-reject.js
@@ -0,0 +1,90 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax
+
+// Test debug events when an exception is thrown inside a Promise, which is
+// caught by a custom promise, which throws a new exception in its reject
+// handler. We expect two Exception debug events:
+//  1) when the exception is thrown in the promise q.
+//  2) when the custom reject closure in MyPromise throws an exception.
+
+Debug = debug.Debug;
+
+var expected_events = 2;
+var log = [];
+
+var p = new Promise(function(resolve, reject) {
+  log.push("resolve");
+  resolve();
+});
+
+function MyPromise(resolver) {
+  var reject = function() {
+    log.push("throw in reject");
+    throw new Error("reject");  // event
+  };
+  var resolve = function() { };
+  log.push("construct");
+  resolver(resolve, reject);
+};
+
+MyPromise.prototype = new Promise(function() {});
+p.constructor = MyPromise;
+
+var q = p.chain(
+  function() {
+    log.push("throw caught");
+    throw new Error("caught");  // event
+  });
+
+function listener(event, exec_state, event_data, data) {
+  try {
+    if (event == Debug.DebugEvent.Exception) {
+      expected_events--;
+      assertTrue(expected_events >= 0);
+      if (expected_events == 1) {
+        assertEquals(["resolve", "construct", "end main",
+                      "throw caught"], log);
+        assertEquals("caught", event_data.exception().message);
+      } else if (expected_events == 0) {
+        assertEquals("reject", event_data.exception().message);
+      } else {
+        assertUnreachable();
+      }
+      assertSame(q, event_data.promise());
+      assertTrue(exec_state.frame(0).sourceLineText().indexOf('// event') > 0);
+    }
+  } catch (e) {
+    %AbortJS(e + "\n" + e.stack);
+  }
+}
+
+Debug.setBreakOnUncaughtException();
+Debug.setListener(listener);
+
+log.push("end main");
+
+function testDone(iteration) {
+  function checkResult() {
+    try {
+      assertTrue(iteration < 10);
+      if (expected_events === 0) {
+        assertEquals(["resolve", "construct", "end main",
+                      "throw caught", "throw in reject"], log);
+      } else {
+        testDone(iteration + 1);
+      }
+    } catch (e) {
+      %AbortJS(e + "\n" + e.stack);
+    }
+  }
+
+  // Run testDone through the Object.observe processing loop.
+  var dummy = {};
+  Object.observe(dummy, checkResult);
+  dummy.dummy = dummy;
+}
+
+testDone(0);
diff --git a/test/mjsunit/es6/debug-promises/throw-with-undefined-reject.js b/test/mjsunit/es6/debug-promises/throw-with-undefined-reject.js
new file mode 100644
index 0000000..94dcdff
--- /dev/null
+++ b/test/mjsunit/es6/debug-promises/throw-with-undefined-reject.js
@@ -0,0 +1,88 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --allow-natives-syntax
+
+// Test debug events when an exception is thrown inside a Promise, which is
+// caught by a custom promise, which has no reject handler.
+// We expect two Exception debug events:
+//  1) when the exception is thrown in the promise q.
+//  2) when calling the undefined custom reject closure in MyPromise throws.
+
+Debug = debug.Debug;
+
+var expected_events = 2;
+var log = [];
+
+var p = new Promise(function(resolve, reject) {
+  log.push("resolve");
+  resolve();
+});
+
+function MyPromise(resolver) {
+  var reject = undefined;
+  var resolve = function() { };
+  log.push("construct");
+  resolver(resolve, reject);
+};
+
+MyPromise.prototype = new Promise(function() {});
+p.constructor = MyPromise;
+
+var q = p.chain(
+  function() {
+    log.push("throw caught");
+    throw new Error("caught");  // event
+  });
+
+function listener(event, exec_state, event_data, data) {
+  try {
+    if (event == Debug.DebugEvent.Exception) {
+      expected_events--;
+      assertTrue(expected_events >= 0);
+      if (expected_events == 1) {
+        assertTrue(
+            exec_state.frame(0).sourceLineText().indexOf('// event') > 0);
+        assertEquals("caught", event_data.exception().message);
+      } else if (expected_events == 0) {
+        // All of the frames on the stack are from native Javascript.
+        assertEquals(0, exec_state.frameCount());
+        assertEquals("undefined is not a function",
+                     event_data.exception().message);
+      } else {
+        assertUnreachable();
+      }
+      assertSame(q, event_data.promise());
+    }
+  } catch (e) {
+    %AbortJS(e + "\n" + e.stack);
+  }
+}
+
+Debug.setBreakOnUncaughtException();
+Debug.setListener(listener);
+
+log.push("end main");
+
+function testDone(iteration) {
+  function checkResult() {
+    try {
+      assertTrue(iteration < 10);
+      if (expected_events === 0) {
+        assertEquals(["resolve", "construct", "end main", "throw caught"], log);
+      } else {
+        testDone(iteration + 1);
+      }
+    } catch (e) {
+      %AbortJS(e + "\n" + e.stack);
+    }
+  }
+
+  // Run testDone through the Object.observe processing loop.
+  var dummy = {};
+  Object.observe(dummy, checkResult);
+  dummy.dummy = dummy;
+}
+
+testDone(0);
diff --git a/test/mjsunit/es6/debug-promises/try-reject-in-constructor.js b/test/mjsunit/es6/debug-promises/try-reject-in-constructor.js
new file mode 100644
index 0000000..00981a6
--- /dev/null
+++ b/test/mjsunit/es6/debug-promises/try-reject-in-constructor.js
@@ -0,0 +1,42 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug
+
+// Test debug events when we only listen to uncaught exceptions and
+// the Promise is rejected within a try-catch in the Promise constructor.
+// We expect an Exception debug event with a promise to be triggered.
+
+Debug = debug.Debug;
+
+var step = 0;
+var exception = null;
+
+function listener(event, exec_state, event_data, data) {
+  try {
+    if (event == Debug.DebugEvent.Exception) {
+      assertEquals(0, step);
+      assertEquals("uncaught", event_data.exception().message);
+      assertTrue(event_data.promise() instanceof Promise);
+      assertTrue(event_data.uncaught());
+      // Assert that the debug event is triggered at the throw site.
+      assertTrue(exec_state.frame(0).sourceLineText().indexOf("// event") > 0);
+      step++;
+    }
+  } catch (e) {
+    exception = e;
+  }
+}
+
+Debug.setBreakOnUncaughtException();
+Debug.setListener(listener);
+
+var p = new Promise(function(resolve, reject) {
+  try {  // This try-catch must not prevent this uncaught reject event.
+    reject(new Error("uncaught"));  // event
+  } catch (e) { }
+});
+
+assertEquals(1, step);
+assertNull(exception);
diff --git a/test/mjsunit/es6/debug-promises/try-throw-reject-in-constructor.js b/test/mjsunit/es6/debug-promises/try-throw-reject-in-constructor.js
new file mode 100644
index 0000000..feff81d
--- /dev/null
+++ b/test/mjsunit/es6/debug-promises/try-throw-reject-in-constructor.js
@@ -0,0 +1,44 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug
+
+// Test debug events when we only listen to uncaught exceptions and
+// an exception is thrown in the Promise constructor, but caught in an
+// inner try-catch.  The Promise is rejected afterwards.
+// We expect an Exception debug event with a promise to be triggered.
+
+Debug = debug.Debug;
+
+var step = 0;
+var exception = null;
+
+function listener(event, exec_state, event_data, data) {
+  try {
+    if (event == Debug.DebugEvent.Exception) {
+      assertEquals(0, step);
+      assertEquals("uncaught", event_data.exception().message);
+      assertTrue(event_data.promise() instanceof Promise);
+      assertTrue(event_data.uncaught());
+      // Assert that the debug event is triggered at the throw site.
+      assertTrue(exec_state.frame(0).sourceLineText().indexOf("// event") > 0);
+      step++;
+    }
+  } catch (e) {
+    exception = e;
+  }
+}
+
+Debug.setBreakOnUncaughtException();
+Debug.setListener(listener);
+
+var p = new Promise(function(resolve, reject) {
+  try {  // This try-catch must not prevent this uncaught reject event.
+    throw new Error("caught");
+  } catch (e) { }
+  reject(new Error("uncaught"));  // event
+});
+
+assertEquals(1, step);
+assertNull(exception);
diff --git a/test/mjsunit/es6/debug-stepin-collections-foreach.js b/test/mjsunit/es6/debug-stepin-collections-foreach.js
new file mode 100644
index 0000000..08938f7
--- /dev/null
+++ b/test/mjsunit/es6/debug-stepin-collections-foreach.js
@@ -0,0 +1,118 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --expose-debug-as debug
+
+Debug = debug.Debug
+
+var exception = false;
+
+function listener(event, exec_state, event_data, data) {
+  try {
+    if (event == Debug.DebugEvent.Break) {
+      if (breaks == 0) {
+        exec_state.prepareStep(Debug.StepAction.StepIn, 2);
+        breaks = 1;
+      } else if (breaks <= 3) {
+        breaks++;
+        // Check whether we break at the expected line.
+        print(event_data.sourceLineText());
+        assertTrue(event_data.sourceLineText().indexOf("Expected to step") > 0);
+        exec_state.prepareStep(Debug.StepAction.StepIn, 3);
+      }
+    }
+  } catch (e) {
+    exception = true;
+  }
+}
+
+function cb_set(num) {
+  print("element " + num);  // Expected to step to this point.
+  return true;
+}
+
+function cb_map(key, val) {
+  print("key " + key + ", value " + val);  // Expected to step to this point.
+  return true;
+}
+
+var s = new Set();
+s.add(1);
+s.add(2);
+s.add(3);
+s.add(4);
+
+var m = new Map();
+m.set('foo', 1);
+m.set('bar', 2);
+m.set('baz', 3);
+m.set('bat', 4);
+
+Debug.setListener(listener);
+
+var breaks = 0;
+debugger;
+s.forEach(cb_set);
+assertFalse(exception);
+assertEquals(4, breaks);
+
+breaks = 0;
+debugger;
+m.forEach(cb_map);
+assertFalse(exception);
+assertEquals(4, breaks);
+
+Debug.setListener(null);
+
+
+// Test two levels of builtin callbacks:
+// Array.forEach calls a callback function, which by itself uses
+// Array.forEach with another callback function.
+
+function second_level_listener(event, exec_state, event_data, data) {
+  try {
+    if (event == Debug.DebugEvent.Break) {
+      if (breaks == 0) {
+        exec_state.prepareStep(Debug.StepAction.StepIn, 3);
+        breaks = 1;
+      } else if (breaks <= 16) {
+        breaks++;
+        // Check whether we break at the expected line.
+        assertTrue(event_data.sourceLineText().indexOf("Expected to step") > 0);
+        // Step two steps further every four breaks to skip the
+        // forEach call in the first level of recurision.
+        var step = (breaks % 4 == 1) ? 6 : 3;
+        exec_state.prepareStep(Debug.StepAction.StepIn, step);
+      }
+    }
+  } catch (e) {
+    exception = true;
+  }
+}
+
+function cb_set_foreach(num) {
+  s.forEach(cb_set);
+  print("back to the first level of recursion.");
+}
+
+function cb_map_foreach(key, val) {
+  m.forEach(cb_set);
+  print("back to the first level of recursion.");
+}
+
+Debug.setListener(second_level_listener);
+
+breaks = 0;
+debugger;
+s.forEach(cb_set_foreach);
+assertFalse(exception);
+assertEquals(17, breaks);
+
+breaks = 0;
+debugger;
+m.forEach(cb_map_foreach);
+assertFalse(exception);
+assertEquals(17, breaks);
+
+Debug.setListener(null);
diff --git a/test/mjsunit/es6/debug-stepin-generators.js b/test/mjsunit/es6/debug-stepin-generators.js
new file mode 100644
index 0000000..f48c5ef
--- /dev/null
+++ b/test/mjsunit/es6/debug-stepin-generators.js
@@ -0,0 +1,45 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug
+
+Debug = debug.Debug
+var exception = null;
+var yields = 0;
+
+function listener(event, exec_state, event_data, data) {
+  if (event != Debug.DebugEvent.Break) return;
+  try {
+    var source = exec_state.frame(0).sourceLineText();
+    print(source);
+    if (/stop stepping/.test(source)) return;
+    if (/yield/.test(source)) yields++;
+    exec_state.prepareStep(Debug.StepAction.StepIn, 1);
+  } catch (e) {
+    print(e, e.stack);
+    exception = e;
+  }
+};
+
+Debug.setListener(listener);
+
+function* g() {
+  for (var i = 0; i < 3; ++i) {
+    yield i;
+  }
+}
+
+var i = g();
+debugger;
+for (var num of g()) {}
+i.next();
+
+print(); // stop stepping
+
+// Not stepped into.
+i.next();
+i.next();
+
+assertNull(exception);
+assertEquals(4, yields);
diff --git a/test/mjsunit/es6/generators-debug-liveedit.js b/test/mjsunit/es6/generators-debug-liveedit.js
new file mode 100644
index 0000000..6f0c443
--- /dev/null
+++ b/test/mjsunit/es6/generators-debug-liveedit.js
@@ -0,0 +1,119 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug
+
+var Debug = debug.Debug;
+var LiveEdit = Debug.LiveEdit;
+
+unique_id = 0;
+
+var Generator = (function*(){}).constructor;
+
+function assertIteratorResult(value, done, result) {
+  assertEquals({value: value, done: done}, result);
+}
+
+function MakeGenerator() {
+  // Prevents eval script caching.
+  unique_id++;
+  return Generator('callback',
+      "/* " + unique_id + "*/\n" +
+      "yield callback();\n" +
+      "return 'Cat';\n");
+}
+
+function MakeFunction() {
+  // Prevents eval script caching.
+  unique_id++;
+  return Function('callback',
+      "/* " + unique_id + "*/\n" +
+      "callback();\n" +
+      "return 'Cat';\n");
+}
+
+// First, try MakeGenerator with no perturbations.
+(function(){
+  var generator = MakeGenerator();
+  function callback() {};
+  var iter = generator(callback);
+  assertIteratorResult(undefined, false, iter.next());
+  assertIteratorResult("Cat", true, iter.next());
+})();
+
+function patch(fun, from, to) {
+  function debug() {
+    var log = new Array();
+    var script = Debug.findScript(fun);
+    var pos = script.source.indexOf(from);
+    try {
+      LiveEdit.TestApi.ApplySingleChunkPatch(script, pos, from.length, to,
+                                             log);
+    } finally {
+      print("Change log: " + JSON.stringify(log) + "\n");
+    }
+  }
+  Debug.ExecuteInDebugContext(debug, false);
+}
+
+// Try to edit a MakeGenerator while it's running, then again while it's
+// stopped.
+(function(){
+  var generator = MakeGenerator();
+
+  var gen_patch_attempted = false;
+  function attempt_gen_patch() {
+    assertFalse(gen_patch_attempted);
+    gen_patch_attempted = true;
+    assertThrows(function() { patch(generator, "'Cat'", "'Capybara'") },
+                 LiveEdit.Failure);
+  };
+  var iter = generator(attempt_gen_patch);
+  assertIteratorResult(undefined, false, iter.next());
+  // Patch should not succeed because there is a live generator activation on
+  // the stack.
+  assertIteratorResult("Cat", true, iter.next());
+  assertTrue(gen_patch_attempted);
+
+  // At this point one iterator is live, but closed, so the patch will succeed.
+  patch(generator, "'Cat'", "'Capybara'");
+  iter = generator(function(){});
+  assertIteratorResult(undefined, false, iter.next());
+  // Patch successful.
+  assertIteratorResult("Capybara", true, iter.next());
+
+  // Patching will fail however when a live iterator is suspended.
+  iter = generator(function(){});
+  assertIteratorResult(undefined, false, iter.next());
+  assertThrows(function() { patch(generator, "'Capybara'", "'Tapir'") },
+               LiveEdit.Failure);
+  assertIteratorResult("Capybara", true, iter.next());
+
+  // Try to patch functions with activations inside and outside generator
+  // function activations.  We should succeed in the former case, but not in the
+  // latter.
+  var fun_outside = MakeFunction();
+  var fun_inside = MakeFunction();
+  var fun_patch_attempted = false;
+  var fun_patch_restarted = false;
+  function attempt_fun_patches() {
+    if (fun_patch_attempted) {
+      assertFalse(fun_patch_restarted);
+      fun_patch_restarted = true;
+      return;
+    }
+    fun_patch_attempted = true;
+    // Patching outside a generator activation must fail.
+    assertThrows(function() { patch(fun_outside, "'Cat'", "'Cobra'") },
+                 LiveEdit.Failure);
+    // Patching inside a generator activation may succeed.
+    patch(fun_inside, "'Cat'", "'Koala'");
+  }
+  iter = generator(function() { return fun_inside(attempt_fun_patches) });
+  assertEquals('Cat',
+               fun_outside(function () {
+                 assertIteratorResult('Koala', false, iter.next());
+                 assertTrue(fun_patch_restarted);
+               }));
+})();
diff --git a/test/mjsunit/es6/generators-debug-scopes.js b/test/mjsunit/es6/generators-debug-scopes.js
new file mode 100644
index 0000000..d55e561
--- /dev/null
+++ b/test/mjsunit/es6/generators-debug-scopes.js
@@ -0,0 +1,326 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug
+
+var Debug = debug.Debug;
+
+function RunTest(name, formals_and_body, args, handler, continuation) {
+  var handler_called = false;
+  var exception = null;
+
+  function listener(event, exec_state, event_data, data) {
+    try {
+      if (event == Debug.DebugEvent.Break) {
+        handler_called = true;
+        handler(exec_state);
+      }
+    } catch (e) {
+      exception = e;
+    }
+  }
+
+  function run(thunk) {
+    handler_called = false;
+    exception = null;
+
+    var res = thunk();
+    if (continuation)
+      continuation(res);
+
+    assertTrue(handler_called, "listener not called for " + name);
+    assertNull(exception, name + " / " + exception);
+  }
+
+  var fun = Function.apply(null, formals_and_body);
+  var gen = (function*(){}).constructor.apply(null, formals_and_body);
+
+  Debug.setListener(listener);
+
+  run(function () { return fun.apply(null, args) });
+  run(function () { return gen.apply(null, args).next().value });
+
+  // TODO(wingo): Uncomment after bug 2838 is fixed.
+  // Debug.setListener(null);
+}
+
+// Check that two scope are the same.
+function assertScopeMirrorEquals(scope1, scope2) {
+  assertEquals(scope1.scopeType(), scope2.scopeType());
+  assertEquals(scope1.frameIndex(), scope2.frameIndex());
+  assertEquals(scope1.scopeIndex(), scope2.scopeIndex());
+  assertPropertiesEqual(scope1.scopeObject().value(), scope2.scopeObject().value());
+}
+
+function CheckFastAllScopes(scopes, exec_state) {
+  var fast_all_scopes = exec_state.frame().allScopes(true);
+  var length = fast_all_scopes.length;
+  assertTrue(scopes.length >= length);
+  for (var i = 0; i < scopes.length && i < length; i++) {
+    var scope = fast_all_scopes[length - i - 1];
+    assertTrue(scope.isScope());
+    assertEquals(scopes[scopes.length - i - 1], scope.scopeType());
+  }
+}
+
+// Check that the scope chain contains the expected types of scopes.
+function CheckScopeChain(scopes, exec_state) {
+  var all_scopes = exec_state.frame().allScopes();
+  assertEquals(scopes.length, exec_state.frame().scopeCount());
+  assertEquals(scopes.length, all_scopes.length, "FrameMirror.allScopes length");
+  for (var i = 0; i < scopes.length; i++) {
+    var scope = exec_state.frame().scope(i);
+    assertTrue(scope.isScope());
+    assertEquals(scopes[i], scope.scopeType());
+    assertScopeMirrorEquals(all_scopes[i], scope);
+
+    // Check the global object when hitting the global scope.
+    if (scopes[i] == debug.ScopeType.Global) {
+      // Objects don't have same class (one is "global", other is "Object",
+      // so just check the properties directly.
+      assertPropertiesEqual(this, scope.scopeObject().value());
+    }
+  }
+  CheckFastAllScopes(scopes, exec_state);
+
+  // Get the debug command processor.
+  var dcp = exec_state.debugCommandProcessor("unspecified_running_state");
+
+  // Send a scopes request and check the result.
+  var json;
+  var request_json = '{"seq":0,"type":"request","command":"scopes"}';
+  var response_json = dcp.processDebugJSONRequest(request_json);
+  var response = JSON.parse(response_json);
+  assertEquals(scopes.length, response.body.scopes.length);
+  for (var i = 0; i < scopes.length; i++) {
+    assertEquals(i, response.body.scopes[i].index);
+    assertEquals(scopes[i], response.body.scopes[i].type);
+    if (scopes[i] == debug.ScopeType.Local ||
+        scopes[i] == debug.ScopeType.Closure) {
+      assertTrue(response.body.scopes[i].object.ref < 0);
+    } else {
+      assertTrue(response.body.scopes[i].object.ref >= 0);
+    }
+    var found = false;
+    for (var j = 0; j < response.refs.length && !found; j++) {
+      found = response.refs[j].handle == response.body.scopes[i].object.ref;
+    }
+    assertTrue(found, "Scope object " + response.body.scopes[i].object.ref + " not found");
+  }
+}
+
+// Check that the content of the scope is as expected. For functions just check
+// that there is a function.
+function CheckScopeContent(content, number, exec_state) {
+  var scope = exec_state.frame().scope(number);
+  var count = 0;
+  for (var p in content) {
+    var property_mirror = scope.scopeObject().property(p);
+    assertFalse(property_mirror.isUndefined(), 'property ' + p + ' not found in scope');
+    if (typeof(content[p]) === 'function') {
+      assertTrue(property_mirror.value().isFunction());
+    } else {
+      assertEquals(content[p], property_mirror.value().value(), 'property ' + p + ' has unexpected value');
+    }
+    count++;
+  }
+
+  // 'arguments' and might be exposed in the local and closure scope. Just
+  // ignore this.
+  var scope_size = scope.scopeObject().properties().length;
+  if (!scope.scopeObject().property('arguments').isUndefined()) {
+    scope_size--;
+  }
+  // Skip property with empty name.
+  if (!scope.scopeObject().property('').isUndefined()) {
+    scope_size--;
+  }
+
+  if (count != scope_size) {
+    print('Names found in scope:');
+    var names = scope.scopeObject().propertyNames();
+    for (var i = 0; i < names.length; i++) {
+      print(names[i]);
+    }
+  }
+  assertEquals(count, scope_size);
+
+  // Get the debug command processor.
+  var dcp = exec_state.debugCommandProcessor("unspecified_running_state");
+
+  // Send a scope request for information on a single scope and check the
+  // result.
+  var request_json = '{"seq":0,"type":"request","command":"scope","arguments":{"number":';
+  request_json += scope.scopeIndex();
+  request_json += '}}';
+  var response_json = dcp.processDebugJSONRequest(request_json);
+  var response = JSON.parse(response_json);
+  assertEquals(scope.scopeType(), response.body.type);
+  assertEquals(number, response.body.index);
+  if (scope.scopeType() == debug.ScopeType.Local ||
+      scope.scopeType() == debug.ScopeType.Closure) {
+    assertTrue(response.body.object.ref < 0);
+  } else {
+    assertTrue(response.body.object.ref >= 0);
+  }
+  var found = false;
+  for (var i = 0; i < response.refs.length && !found; i++) {
+    found = response.refs[i].handle == response.body.object.ref;
+  }
+  assertTrue(found, "Scope object " + response.body.object.ref + " not found");
+}
+
+
+// Simple empty local scope.
+RunTest("Local 1",
+        ['debugger;'],
+        [],
+        function (exec_state) {
+          CheckScopeChain([debug.ScopeType.Local,
+                           debug.ScopeType.Global], exec_state);
+          CheckScopeContent({}, 0, exec_state);
+        });
+
+// Local scope with a parameter.
+RunTest("Local 2",
+        ['a', 'debugger;'],
+        [1],
+        function (exec_state) {
+          CheckScopeChain([debug.ScopeType.Local,
+                           debug.ScopeType.Global], exec_state);
+          CheckScopeContent({a:1}, 0, exec_state);
+        });
+
+// Local scope with a parameter and a local variable.
+RunTest("Local 3",
+        ['a', 'var x = 3; debugger;'],
+        [1],
+        function (exec_state) {
+          CheckScopeChain([debug.ScopeType.Local,
+                           debug.ScopeType.Global], exec_state);
+          CheckScopeContent({a:1,x:3}, 0, exec_state);
+        });
+
+// Local scope with parameters and local variables.
+RunTest("Local 4",
+        ['a', 'b', 'var x = 3; var y = 4; debugger;'],
+        [1, 2],
+        function (exec_state) {
+          CheckScopeChain([debug.ScopeType.Local,
+                           debug.ScopeType.Global], exec_state);
+          CheckScopeContent({a:1,b:2,x:3,y:4}, 0, exec_state);
+        });
+
+// Empty local scope with use of eval.
+RunTest("Local 5",
+        ['eval(""); debugger;'],
+        [],
+        function (exec_state) {
+          CheckScopeChain([debug.ScopeType.Local,
+                           debug.ScopeType.Global], exec_state);
+          CheckScopeContent({}, 0, exec_state);
+        });
+
+// Local introducing local variable using eval.
+RunTest("Local 6",
+        ['eval("var i = 5"); debugger;'],
+        [],
+        function (exec_state) {
+          CheckScopeChain([debug.ScopeType.Local,
+                           debug.ScopeType.Global], exec_state);
+          CheckScopeContent({i:5}, 0, exec_state);
+        });
+
+// Local scope with parameters, local variables and local variable introduced
+// using eval.
+RunTest("Local 7",
+        ['a', 'b',
+         "var x = 3; var y = 4;\n"
+         + "eval('var i = 5'); eval ('var j = 6');\n"
+         + "debugger;"],
+        [1, 2],
+        function (exec_state) {
+          CheckScopeChain([debug.ScopeType.Local,
+                           debug.ScopeType.Global], exec_state);
+          CheckScopeContent({a:1,b:2,x:3,y:4,i:5,j:6}, 0, exec_state);
+        });
+
+// Nested empty with blocks.
+RunTest("With",
+        ["with ({}) { with ({}) { debugger; } }"],
+        [],
+        function (exec_state) {
+          CheckScopeChain([debug.ScopeType.With,
+                           debug.ScopeType.With,
+                           debug.ScopeType.Local,
+                           debug.ScopeType.Global], exec_state);
+          CheckScopeContent({}, 0, exec_state);
+          CheckScopeContent({}, 1, exec_state);
+        });
+
+// Simple closure formed by returning an inner function referering the outer
+// functions arguments.
+RunTest("Closure 1",
+        ['a', 'return function() { debugger; return a; }'],
+        [1],
+        function (exec_state) {
+          CheckScopeChain([debug.ScopeType.Local,
+                           debug.ScopeType.Closure,
+                           debug.ScopeType.Global], exec_state);
+          CheckScopeContent({a:1}, 1, exec_state);
+        },
+       function (result) { result() });
+
+RunTest("The full monty",
+        ['a', 'b',
+         "var x = 3;\n" +
+         "var y = 4;\n" +
+         "eval('var i = 5');\n" +
+         "eval('var j = 6');\n" +
+         "function f(a, b) {\n" +
+         "  var x = 9;\n" +
+         "  var y = 10;\n" +
+         "  eval('var i = 11');\n" +
+         "  eval('var j = 12');\n" +
+         "  with ({j:13}){\n" +
+         "    return function() {\n" +
+         "      var x = 14;\n" +
+         "      with ({a:15}) {\n" +
+         "        with ({b:16}) {\n" +
+         "          debugger;\n" +
+         "          some_global = a;\n" +
+         "          return f;\n" +
+         "        }\n" +
+         "      }\n" +
+         "    };\n" +
+         "  }\n" +
+         "}\n" +
+         "return f(a, b);"],
+        [1, 2],
+        function (exec_state) {
+          CheckScopeChain([debug.ScopeType.With,
+                           debug.ScopeType.With,
+                           debug.ScopeType.Local,
+                           debug.ScopeType.With,
+                           debug.ScopeType.Closure,
+                           debug.ScopeType.Closure,
+                           debug.ScopeType.Global], exec_state);
+          CheckScopeContent({b:16}, 0, exec_state);
+          CheckScopeContent({a:15}, 1, exec_state);
+          CheckScopeContent({x:14}, 2, exec_state);
+          CheckScopeContent({j:13}, 3, exec_state);
+          CheckScopeContent({a:1,b:2,x:9,y:10,i:11,j:12}, 4, exec_state);
+          CheckScopeContent({a:1,b:2,x:3,y:4,i:5,j:6,f:function(){}}, 5, exec_state);
+        },
+        function (result) { result() });
+
+RunTest("Catch block 1",
+        ["try { throw 'Exception'; } catch (e) { debugger; }"],
+        [],
+        function (exec_state) {
+          CheckScopeChain([debug.ScopeType.Catch,
+                           debug.ScopeType.Local,
+                           debug.ScopeType.Global], exec_state);
+          CheckScopeContent({e:'Exception'}, 0, exec_state);
+        });
diff --git a/test/mjsunit/es6/generators-iteration.js b/test/mjsunit/es6/generators-iteration.js
new file mode 100644
index 0000000..b6fcdaa
--- /dev/null
+++ b/test/mjsunit/es6/generators-iteration.js
@@ -0,0 +1,698 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-gc
+
+// Test generator iteration.
+
+var GeneratorFunction = (function*(){yield 1;}).__proto__.constructor;
+
+function assertIteratorResult(value, done, result) {
+  assertEquals({ value: value, done: done}, result);
+}
+
+function assertIteratorIsClosed(iter) {
+  assertIteratorResult(undefined, true, iter.next());
+  assertDoesNotThrow(function() { iter.next(); });
+}
+
+function assertThrownIteratorIsClosed(iter) {
+  // TODO(yusukesuzuki): Since status of a thrown generator is "executing",
+  // following tests are failed.
+  // https://code.google.com/p/v8/issues/detail?id=3096
+  // assertIteratorIsClosed(iter);
+}
+
+function TestGeneratorResultPrototype() {
+  function* g() { yield 1; }
+  var iter = g();
+  var result = iter.next();
+
+  assertSame(Object.prototype, Object.getPrototypeOf(result));
+  property_names = Object.getOwnPropertyNames(result);
+  property_names.sort();
+  assertEquals(["done", "value"], property_names);
+  assertIteratorResult(1, false, result);
+}
+TestGeneratorResultPrototype()
+
+function TestGenerator(g, expected_values_for_next,
+                       send_val, expected_values_for_send) {
+  function testNext(thunk) {
+    var iter = thunk();
+    for (var i = 0; i < expected_values_for_next.length; i++) {
+      var v1 = expected_values_for_next[i];
+      var v2 = i == expected_values_for_next.length - 1;
+      // var v3 = iter.next();
+      assertIteratorResult(v1, v2, iter.next());
+    }
+    assertIteratorIsClosed(iter);
+  }
+  function testSend(thunk) {
+    var iter = thunk();
+    for (var i = 0; i < expected_values_for_send.length; i++) {
+      assertIteratorResult(expected_values_for_send[i],
+                           i == expected_values_for_send.length - 1,
+                           iter.next(send_val));
+    }
+    assertIteratorIsClosed(iter);
+  }
+  function testThrow(thunk) {
+    for (var i = 0; i < expected_values_for_next.length; i++) {
+      var iter = thunk();
+      for (var j = 0; j < i; j++) {
+        assertIteratorResult(expected_values_for_next[j],
+                             j == expected_values_for_next.length - 1,
+                             iter.next());
+      }
+      function Sentinel() {}
+      assertThrows(function () { iter.throw(new Sentinel); }, Sentinel);
+      assertThrownIteratorIsClosed(iter);
+    }
+  }
+
+  testNext(g);
+  testSend(g);
+  testThrow(g);
+
+  testNext(function*() { return yield* g(); });
+  testSend(function*() { return yield* g(); });
+  testThrow(function*() { return yield* g(); });
+
+  if (g instanceof GeneratorFunction) {
+    testNext(function() { return new g(); });
+    testSend(function() { return new g(); });
+    testThrow(function() { return new g(); });
+  }
+}
+
+TestGenerator(function* g1() { },
+              [undefined],
+              "foo",
+              [undefined]);
+
+TestGenerator(function* g2() { yield 1; },
+              [1, undefined],
+              "foo",
+              [1, undefined]);
+
+TestGenerator(function* g3() { yield 1; yield 2; },
+              [1, 2, undefined],
+              "foo",
+              [1, 2, undefined]);
+
+TestGenerator(function* g4() { yield 1; yield 2; return 3; },
+              [1, 2, 3],
+              "foo",
+              [1, 2, 3]);
+
+TestGenerator(function* g5() { return 1; },
+              [1],
+             "foo",
+              [1]);
+
+TestGenerator(function* g6() { var x = yield 1; return x; },
+              [1, undefined],
+              "foo",
+              [1, "foo"]);
+
+TestGenerator(function* g7() { var x = yield 1; yield 2; return x; },
+              [1, 2, undefined],
+              "foo",
+              [1, 2, "foo"]);
+
+TestGenerator(function* g8() { for (var x = 0; x < 4; x++) { yield x; } },
+              [0, 1, 2, 3, undefined],
+              "foo",
+              [0, 1, 2, 3, undefined]);
+
+// Generator with arguments.
+TestGenerator(
+    function g9() {
+      return (function*(a, b, c, d) {
+        yield a; yield b; yield c; yield d;
+      })("fee", "fi", "fo", "fum");
+    },
+    ["fee", "fi", "fo", "fum", undefined],
+    "foo",
+    ["fee", "fi", "fo", "fum", undefined]);
+
+// Too few arguments.
+TestGenerator(
+    function g10() {
+      return (function*(a, b, c, d) {
+        yield a; yield b; yield c; yield d;
+      })("fee", "fi");
+    },
+    ["fee", "fi", undefined, undefined, undefined],
+    "foo",
+    ["fee", "fi", undefined, undefined, undefined]);
+
+// Too many arguments.
+TestGenerator(
+    function g11() {
+      return (function*(a, b, c, d) {
+        yield a; yield b; yield c; yield d;
+      })("fee", "fi", "fo", "fum", "I smell the blood of an Englishman");
+    },
+    ["fee", "fi", "fo", "fum", undefined],
+    "foo",
+    ["fee", "fi", "fo", "fum", undefined]);
+
+// The arguments object.
+TestGenerator(
+    function g12() {
+      return (function*(a, b, c, d) {
+        for (var i = 0; i < arguments.length; i++) {
+          yield arguments[i];
+        }
+      })("fee", "fi", "fo", "fum", "I smell the blood of an Englishman");
+    },
+    ["fee", "fi", "fo", "fum", "I smell the blood of an Englishman",
+     undefined],
+    "foo",
+    ["fee", "fi", "fo", "fum", "I smell the blood of an Englishman",
+     undefined]);
+
+// Access to captured free variables.
+TestGenerator(
+    function g13() {
+      return (function(a, b, c, d) {
+        return (function*() {
+          yield a; yield b; yield c; yield d;
+        })();
+      })("fee", "fi", "fo", "fum");
+    },
+    ["fee", "fi", "fo", "fum", undefined],
+    "foo",
+    ["fee", "fi", "fo", "fum", undefined]);
+
+// Abusing the arguments object.
+TestGenerator(
+    function g14() {
+      return (function*(a, b, c, d) {
+        arguments[0] = "Be he live";
+        arguments[1] = "or be he dead";
+        arguments[2] = "I'll grind his bones";
+        arguments[3] = "to make my bread";
+        yield a; yield b; yield c; yield d;
+      })("fee", "fi", "fo", "fum");
+    },
+    ["Be he live", "or be he dead", "I'll grind his bones", "to make my bread",
+     undefined],
+    "foo",
+    ["Be he live", "or be he dead", "I'll grind his bones", "to make my bread",
+     undefined]);
+
+// Abusing the arguments object: strict mode.
+TestGenerator(
+    function g15() {
+      return (function*(a, b, c, d) {
+        "use strict";
+        arguments[0] = "Be he live";
+        arguments[1] = "or be he dead";
+        arguments[2] = "I'll grind his bones";
+        arguments[3] = "to make my bread";
+        yield a; yield b; yield c; yield d;
+      })("fee", "fi", "fo", "fum");
+    },
+    ["fee", "fi", "fo", "fum", undefined],
+    "foo",
+    ["fee", "fi", "fo", "fum", undefined]);
+
+// GC.
+TestGenerator(function* g16() { yield "baz"; gc(); yield "qux"; },
+              ["baz", "qux", undefined],
+              "foo",
+              ["baz", "qux", undefined]);
+
+// Receivers.
+TestGenerator(
+    function g17() {
+      function* g() { yield this.x; yield this.y; }
+      var o = { start: g, x: 1, y: 2 };
+      return o.start();
+    },
+    [1, 2, undefined],
+    "foo",
+    [1, 2, undefined]);
+
+TestGenerator(
+    function g18() {
+      function* g() { yield this.x; yield this.y; }
+      var iter = new g;
+      iter.x = 1;
+      iter.y = 2;
+      return iter;
+    },
+    [1, 2, undefined],
+    "foo",
+    [1, 2, undefined]);
+
+TestGenerator(
+    function* g19() {
+      var x = 1;
+      yield x;
+      with({x:2}) { yield x; }
+      yield x;
+    },
+    [1, 2, 1, undefined],
+    "foo",
+    [1, 2, 1, undefined]);
+
+TestGenerator(
+    function* g20() { yield (1 + (yield 2) + 3); },
+    [2, NaN, undefined],
+    "foo",
+    [2, "1foo3", undefined]);
+
+TestGenerator(
+    function* g21() { return (1 + (yield 2) + 3); },
+    [2, NaN],
+    "foo",
+    [2, "1foo3"]);
+
+TestGenerator(
+    function* g22() { yield (1 + (yield 2) + 3); yield (4 + (yield 5) + 6); },
+    [2, NaN, 5, NaN, undefined],
+    "foo",
+    [2, "1foo3", 5, "4foo6", undefined]);
+
+TestGenerator(
+    function* g23() {
+      return (yield (1 + (yield 2) + 3)) + (yield (4 + (yield 5) + 6));
+    },
+    [2, NaN, 5, NaN, NaN],
+    "foo",
+    [2, "1foo3", 5, "4foo6", "foofoo"]);
+
+// Rewind a try context with and without operands on the stack.
+TestGenerator(
+    function* g24() {
+      try {
+        return (yield (1 + (yield 2) + 3)) + (yield (4 + (yield 5) + 6));
+      } catch (e) {
+        throw e;
+      }
+    },
+    [2, NaN, 5, NaN, NaN],
+    "foo",
+    [2, "1foo3", 5, "4foo6", "foofoo"]);
+
+// Yielding in a catch context, with and without operands on the stack.
+TestGenerator(
+    function* g25() {
+      try {
+        throw (yield (1 + (yield 2) + 3))
+      } catch (e) {
+        if (typeof e == 'object') throw e;
+        return e + (yield (4 + (yield 5) + 6));
+      }
+    },
+    [2, NaN, 5, NaN, NaN],
+    "foo",
+    [2, "1foo3", 5, "4foo6", "foofoo"]);
+
+// Yield with no arguments yields undefined.
+TestGenerator(
+    function* g26() { return yield yield },
+    [undefined, undefined, undefined],
+    "foo",
+    [undefined, "foo", "foo"]);
+
+// A newline causes the parser to stop looking for an argument to yield.
+TestGenerator(
+    function* g27() {
+      yield
+      3
+      return
+    },
+    [undefined, undefined],
+    "foo",
+    [undefined, undefined]);
+
+// TODO(wingo): We should use TestGenerator for these, except that
+// currently yield* will unconditionally propagate a throw() to the
+// delegate iterator, which fails for these iterators that don't have
+// throw().  See http://code.google.com/p/v8/issues/detail?id=3484.
+(function() {
+    function* g28() {
+      yield* [1, 2, 3];
+    }
+    var iter = g28();
+    assertIteratorResult(1, false, iter.next());
+    assertIteratorResult(2, false, iter.next());
+    assertIteratorResult(3, false, iter.next());
+    assertIteratorResult(undefined, true, iter.next());
+})();
+
+(function() {
+    function* g29() {
+      yield* "abc";
+    }
+    var iter = g29();
+    assertIteratorResult("a", false, iter.next());
+    assertIteratorResult("b", false, iter.next());
+    assertIteratorResult("c", false, iter.next());
+    assertIteratorResult(undefined, true, iter.next());
+})();
+
+// Generator function instances.
+TestGenerator(GeneratorFunction(),
+              [undefined],
+              "foo",
+              [undefined]);
+
+TestGenerator(new GeneratorFunction(),
+              [undefined],
+              "foo",
+              [undefined]);
+
+TestGenerator(GeneratorFunction('yield 1;'),
+              [1, undefined],
+              "foo",
+              [1, undefined]);
+
+TestGenerator(
+    function() { return GeneratorFunction('x', 'y', 'yield x + y;')(1, 2) },
+    [3, undefined],
+    "foo",
+    [3, undefined]);
+
+// Access to this with formal arguments.
+TestGenerator(
+    function () {
+      return ({ x: 42, g: function* (a) { yield this.x } }).g(0);
+    },
+    [42, undefined],
+    "foo",
+    [42, undefined]);
+
+// Test that yield* re-yields received results without re-boxing.
+function TestDelegatingYield() {
+  function results(results) {
+    var i = 0;
+    function next() {
+      return results[i++];
+    }
+    var iter = { next: next };
+    var ret = {};
+    ret[Symbol.iterator] = function() { return iter; };
+    return ret;
+  }
+  function* yield_results(expected) {
+    return yield* results(expected);
+  }
+  function collect_results(iterable) {
+    var iter = iterable[Symbol.iterator]();
+    var ret = [];
+    var result;
+    do {
+      result = iter.next();
+      ret.push(result);
+    } while (!result.done);
+    return ret;
+  }
+  // We have to put a full result for the end, because the return will re-box.
+  var expected = [{value: 1}, 13, "foo", {value: 34, done: true}];
+
+  // Sanity check.
+  assertEquals(expected, collect_results(results(expected)));
+  assertEquals(expected, collect_results(yield_results(expected)));
+}
+TestDelegatingYield();
+
+function TestTryCatch(instantiate) {
+  function* g() { yield 1; try { yield 2; } catch (e) { yield e; } yield 3; }
+  function Sentinel() {}
+
+  function Test1(iter) {
+    assertIteratorResult(1, false, iter.next());
+    assertIteratorResult(2, false, iter.next());
+    assertIteratorResult(3, false, iter.next());
+    assertIteratorIsClosed(iter);
+  }
+  Test1(instantiate(g));
+
+  function Test2(iter) {
+    assertThrows(function() { iter.throw(new Sentinel); }, Sentinel);
+    assertThrownIteratorIsClosed(iter);
+  }
+  Test2(instantiate(g));
+
+  function Test3(iter) {
+    assertIteratorResult(1, false, iter.next());
+    assertThrows(function() { iter.throw(new Sentinel); }, Sentinel);
+    assertThrownIteratorIsClosed(iter);
+  }
+  Test3(instantiate(g));
+
+  function Test4(iter) {
+    assertIteratorResult(1, false, iter.next());
+    assertIteratorResult(2, false, iter.next());
+    var exn = new Sentinel;
+    assertIteratorResult(exn, false, iter.throw(exn));
+    assertIteratorResult(3, false, iter.next());
+    assertIteratorIsClosed(iter);
+  }
+  Test4(instantiate(g));
+
+  function Test5(iter) {
+    assertIteratorResult(1, false, iter.next());
+    assertIteratorResult(2, false, iter.next());
+    var exn = new Sentinel;
+    assertIteratorResult(exn, false, iter.throw(exn));
+    assertIteratorResult(3, false, iter.next());
+    assertThrows(function() { iter.throw(new Sentinel); }, Sentinel);
+    assertThrownIteratorIsClosed(iter);
+  }
+  Test5(instantiate(g));
+
+  function Test6(iter) {
+    assertIteratorResult(1, false, iter.next());
+    assertIteratorResult(2, false, iter.next());
+    var exn = new Sentinel;
+    assertIteratorResult(exn, false, iter.throw(exn));
+    assertThrows(function() { iter.throw(new Sentinel); }, Sentinel);
+    assertThrownIteratorIsClosed(iter);
+  }
+  Test6(instantiate(g));
+
+  function Test7(iter) {
+    assertIteratorResult(1, false, iter.next());
+    assertIteratorResult(2, false, iter.next());
+    assertIteratorResult(3, false, iter.next());
+    assertIteratorIsClosed(iter);
+  }
+  Test7(instantiate(g));
+}
+TestTryCatch(function (g) { return g(); });
+TestTryCatch(function* (g) { return yield* g(); });
+
+function TestTryFinally(instantiate) {
+  function* g() { yield 1; try { yield 2; } finally { yield 3; } yield 4; }
+  function Sentinel() {}
+  function Sentinel2() {}
+
+  function Test1(iter) {
+    assertIteratorResult(1, false, iter.next());
+    assertIteratorResult(2, false, iter.next());
+    assertIteratorResult(3, false, iter.next());
+    assertIteratorResult(4, false, iter.next());
+    assertIteratorIsClosed(iter);
+  }
+  Test1(instantiate(g));
+
+  function Test2(iter) {
+    assertThrows(function() { iter.throw(new Sentinel); }, Sentinel);
+    assertThrownIteratorIsClosed(iter);
+  }
+  Test2(instantiate(g));
+
+  function Test3(iter) {
+    assertIteratorResult(1, false, iter.next());
+    assertThrows(function() { iter.throw(new Sentinel); }, Sentinel);
+    assertThrownIteratorIsClosed(iter);
+  }
+  Test3(instantiate(g));
+
+  function Test4(iter) {
+    assertIteratorResult(1, false, iter.next());
+    assertIteratorResult(2, false, iter.next());
+    assertIteratorResult(3, false, iter.throw(new Sentinel));
+    assertThrows(function() { iter.next(); }, Sentinel);
+    assertThrownIteratorIsClosed(iter);
+  }
+  Test4(instantiate(g));
+
+  function Test5(iter) {
+    assertIteratorResult(1, false, iter.next());
+    assertIteratorResult(2, false, iter.next());
+    assertIteratorResult(3, false, iter.throw(new Sentinel));
+    assertThrows(function() { iter.throw(new Sentinel2); }, Sentinel2);
+    assertThrownIteratorIsClosed(iter);
+  }
+  Test5(instantiate(g));
+
+  function Test6(iter) {
+    assertIteratorResult(1, false, iter.next());
+    assertIteratorResult(2, false, iter.next());
+    assertIteratorResult(3, false, iter.next());
+    assertThrows(function() { iter.throw(new Sentinel); }, Sentinel);
+    assertThrownIteratorIsClosed(iter);
+  }
+  Test6(instantiate(g));
+
+  function Test7(iter) {
+    assertIteratorResult(1, false, iter.next());
+    assertIteratorResult(2, false, iter.next());
+    assertIteratorResult(3, false, iter.next());
+    assertIteratorResult(4, false, iter.next());
+    assertThrows(function() { iter.throw(new Sentinel); }, Sentinel);
+    assertThrownIteratorIsClosed(iter);
+  }
+  Test7(instantiate(g));
+
+  function Test8(iter) {
+    assertIteratorResult(1, false, iter.next());
+    assertIteratorResult(2, false, iter.next());
+    assertIteratorResult(3, false, iter.next());
+    assertIteratorResult(4, false, iter.next());
+    assertIteratorIsClosed(iter);
+  }
+  Test8(instantiate(g));
+}
+TestTryFinally(function (g) { return g(); });
+TestTryFinally(function* (g) { return yield* g(); });
+
+function TestNestedTry(instantiate) {
+  function* g() {
+    try {
+      yield 1;
+      try { yield 2; } catch (e) { yield e; }
+      yield 3;
+    } finally {
+      yield 4;
+    }
+    yield 5;
+  }
+  function Sentinel() {}
+  function Sentinel2() {}
+
+  function Test1(iter) {
+    assertIteratorResult(1, false, iter.next());
+    assertIteratorResult(2, false, iter.next());
+    assertIteratorResult(3, false, iter.next());
+    assertIteratorResult(4, false, iter.next());
+    assertIteratorResult(5, false, iter.next());
+    assertIteratorIsClosed(iter);
+  }
+  Test1(instantiate(g));
+
+  function Test2(iter) {
+    assertThrows(function() { iter.throw(new Sentinel); }, Sentinel);
+    assertThrownIteratorIsClosed(iter);
+  }
+  Test2(instantiate(g));
+
+  function Test3(iter) {
+    assertIteratorResult(1, false, iter.next());
+    assertIteratorResult(4, false, iter.throw(new Sentinel));
+    assertThrows(function() { iter.next(); }, Sentinel);
+    assertThrownIteratorIsClosed(iter);
+  }
+  Test3(instantiate(g));
+
+  function Test4(iter) {
+    assertIteratorResult(1, false, iter.next());
+    assertIteratorResult(4, false, iter.throw(new Sentinel));
+    assertThrows(function() { iter.throw(new Sentinel2); }, Sentinel2);
+    assertThrownIteratorIsClosed(iter);
+  }
+  Test4(instantiate(g));
+
+  function Test5(iter) {
+    assertIteratorResult(1, false, iter.next());
+    assertIteratorResult(2, false, iter.next());
+    var exn = new Sentinel;
+    assertIteratorResult(exn, false, iter.throw(exn));
+    assertIteratorResult(3, false, iter.next());
+    assertIteratorResult(4, false, iter.next());
+    assertIteratorResult(5, false, iter.next());
+    assertIteratorIsClosed(iter);
+  }
+  Test5(instantiate(g));
+
+  function Test6(iter) {
+    assertIteratorResult(1, false, iter.next());
+    assertIteratorResult(2, false, iter.next());
+    var exn = new Sentinel;
+    assertIteratorResult(exn, false, iter.throw(exn));
+    assertIteratorResult(4, false, iter.throw(new Sentinel2));
+    assertThrows(function() { iter.next(); }, Sentinel2);
+    assertThrownIteratorIsClosed(iter);
+  }
+  Test6(instantiate(g));
+
+  function Test7(iter) {
+    assertIteratorResult(1, false, iter.next());
+    assertIteratorResult(2, false, iter.next());
+    var exn = new Sentinel;
+    assertIteratorResult(exn, false, iter.throw(exn));
+    assertIteratorResult(3, false, iter.next());
+    assertIteratorResult(4, false, iter.throw(new Sentinel2));
+    assertThrows(function() { iter.next(); }, Sentinel2);
+    assertThrownIteratorIsClosed(iter);
+  }
+  Test7(instantiate(g));
+
+  // That's probably enough.
+}
+TestNestedTry(function (g) { return g(); });
+TestNestedTry(function* (g) { return yield* g(); });
+
+function TestRecursion() {
+  function TestNextRecursion() {
+    function* g() { yield iter.next(); }
+    var iter = g();
+    return iter.next();
+  }
+  function TestSendRecursion() {
+    function* g() { yield iter.next(42); }
+    var iter = g();
+    return iter.next();
+  }
+  function TestThrowRecursion() {
+    function* g() { yield iter.throw(1); }
+    var iter = g();
+    return iter.next();
+  }
+  assertThrows(TestNextRecursion, Error);
+  assertThrows(TestSendRecursion, Error);
+  assertThrows(TestThrowRecursion, Error);
+}
+TestRecursion();
diff --git a/test/mjsunit/es6/generators-mirror.js b/test/mjsunit/es6/generators-mirror.js
new file mode 100644
index 0000000..6925285
--- /dev/null
+++ b/test/mjsunit/es6/generators-mirror.js
@@ -0,0 +1,84 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug
+// Test the mirror object for functions.
+
+function *generator(f) {
+  "use strict";
+  yield;
+  f();
+  yield;
+}
+
+function MirrorRefCache(json_refs) {
+  var tmp = eval('(' + json_refs + ')');
+  this.refs_ = [];
+  for (var i = 0; i < tmp.length; i++) {
+    this.refs_[tmp[i].handle] = tmp[i];
+  }
+}
+
+MirrorRefCache.prototype.lookup = function(handle) {
+  return this.refs_[handle];
+}
+
+function TestGeneratorMirror(g, test) {
+  // Create mirror and JSON representation.
+  var mirror = debug.MakeMirror(g);
+  var serializer = debug.MakeMirrorSerializer();
+  var json = JSON.stringify(serializer.serializeValue(mirror));
+  var refs = new MirrorRefCache(
+      JSON.stringify(serializer.serializeReferencedObjects()));
+
+  // Check the mirror hierachy.
+  assertTrue(mirror instanceof debug.Mirror);
+  assertTrue(mirror instanceof debug.ValueMirror);
+  assertTrue(mirror instanceof debug.ObjectMirror);
+  assertTrue(mirror instanceof debug.GeneratorMirror);
+
+  // Check the mirror properties.
+  assertTrue(mirror.isGenerator());
+  assertEquals('generator', mirror.type());
+  assertFalse(mirror.isPrimitive());
+  assertEquals('Generator', mirror.className());
+
+  assertTrue(mirror.receiver().isUndefined());
+  assertEquals(generator, mirror.func().value());
+
+  test(mirror);
+}
+
+var iter = generator(function () {
+  assertEquals('running', debug.MakeMirror(iter).status());
+})
+
+// Note that line numbers are 0-based, not 1-based.
+function assertSourceLocation(loc, line, column) {
+  assertEquals(line, loc.line);
+  assertEquals(column, loc.column);
+}
+
+TestGeneratorMirror(iter, function (mirror) {
+  assertEquals('suspended', mirror.status())
+  assertSourceLocation(mirror.sourceLocation(), 7, 19);
+});
+
+iter.next();
+TestGeneratorMirror(iter, function (mirror) {
+  assertEquals('suspended', mirror.status())
+  assertSourceLocation(mirror.sourceLocation(), 9, 2);
+});
+
+iter.next();
+TestGeneratorMirror(iter, function (mirror) {
+  assertEquals('suspended', mirror.status())
+  assertSourceLocation(mirror.sourceLocation(), 11, 2);
+});
+
+iter.next();
+TestGeneratorMirror(iter, function (mirror) {
+  assertEquals('closed', mirror.status())
+  assertEquals(undefined, mirror.sourceLocation());
+});
diff --git a/test/mjsunit/es6/generators-objects.js b/test/mjsunit/es6/generators-objects.js
new file mode 100644
index 0000000..8a052ff
--- /dev/null
+++ b/test/mjsunit/es6/generators-objects.js
@@ -0,0 +1,93 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony-scoping --allow-natives-syntax
+
+// Test instantations of generators.
+
+// Generators shouldn't allocate stack slots.  This test will abort in debug
+// mode if generators have stack slots.
+function TestContextAllocation() {
+  function* g1(a, b, c) { yield 1; return [a, b, c]; }
+  function* g2() { yield 1; return arguments; }
+  function* g3() { yield 1; return this; }
+  function* g4() { var x = 10; yield 1; return x; }
+  // Temporary variable context allocation
+  function* g5(l) { "use strict"; yield 1; for (let x in l) { yield x; } }
+
+  g1();
+  g2();
+  g3();
+  g4();
+  g5(["foo"]);
+}
+TestContextAllocation();
+
+
+// Test the properties and prototype of a generator object.
+function TestGeneratorObject() {
+  function* g() { yield 1; }
+
+  var iter = g();
+  assertSame(g.prototype, Object.getPrototypeOf(iter));
+  assertTrue(iter instanceof g);
+  assertEquals("Generator", %_ClassOf(iter));
+  assertEquals("[object Generator]", String(iter));
+  assertEquals([], Object.getOwnPropertyNames(iter));
+  assertTrue(iter !== g());
+
+  // g() is the same as new g().
+  iter = new g();
+  assertSame(g.prototype, Object.getPrototypeOf(iter));
+  assertTrue(iter instanceof g);
+  assertEquals("Generator", %_ClassOf(iter));
+  assertEquals("[object Generator]", String(iter));
+  assertEquals([], Object.getOwnPropertyNames(iter));
+  assertTrue(iter !== new g());
+}
+TestGeneratorObject();
+
+
+// Test the methods of generator objects.
+function TestGeneratorObjectMethods() {
+  function* g() { yield 1; }
+  var iter = g();
+
+  function TestNonGenerator(non_generator) {
+    assertThrows(function() { iter.next.call(non_generator); }, TypeError);
+    assertThrows(function() { iter.next.call(non_generator, 1); }, TypeError);
+    assertThrows(function() { iter.throw.call(non_generator, 1); }, TypeError);
+    assertThrows(function() { iter.close.call(non_generator); }, TypeError);
+  }
+
+  TestNonGenerator(1);
+  TestNonGenerator({});
+  TestNonGenerator(function(){});
+  TestNonGenerator(g);
+  TestNonGenerator(g.prototype);
+}
+TestGeneratorObjectMethods();
diff --git a/test/mjsunit/es6/generators-parsing.js b/test/mjsunit/es6/generators-parsing.js
new file mode 100644
index 0000000..e440836
--- /dev/null
+++ b/test/mjsunit/es6/generators-parsing.js
@@ -0,0 +1,131 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test basic generator syntax.
+
+// Yield statements.
+function* g() { yield 3; yield 4; }
+
+// Yield expressions.
+function* g() { (yield 3) + (yield 4); }
+
+// Yield without a RHS.
+function* g() { yield; }
+function* g() { yield }
+function* g() {
+  yield
+}
+function* g() { (yield) }
+function* g() { [yield] }
+function* g() { {yield} }
+function* g() { yield, yield }
+function* g() { yield; yield }
+function* g() { (yield) ? yield : yield }
+function* g() {
+  (yield)
+  ? yield
+  : yield
+}
+
+// If yield has a RHS, it needs to start on the same line.  The * in a
+// yield* counts as starting the RHS.
+function* g() {
+  yield *
+  foo
+}
+assertThrows("function* g() { yield\n* foo }", SyntaxError);
+assertEquals(undefined,
+             (function*(){
+               yield
+               3
+             })().next().value);
+
+// A YieldExpression is not a LogicalORExpression.
+assertThrows("function* g() { yield ? yield : yield }", SyntaxError);
+
+// You can have a generator in strict mode.
+function* g() { "use strict"; yield 3; yield 4; }
+
+// Generators can have return statements also, which internally parse to a kind
+// of yield expression.
+function* g() { yield 1; return; }
+function* g() { yield 1; return 2; }
+function* g() { yield 1; return 2; yield "dead"; }
+
+// Generator expression.
+(function* () { yield 3; });
+
+// Named generator expression.
+(function* g() { yield 3; });
+
+// You can have a generator without a yield.
+function* g() { }
+
+// A YieldExpression is valid as the RHS of a YieldExpression.
+function* g() { yield yield 1; }
+function* g() { yield 3 + (yield 4); }
+
+// Generator definitions with a name of "yield" are not specifically ruled out
+// by the spec, as the `yield' name is outside the generator itself.  However,
+// in strict-mode, "yield" is an invalid identifier.
+function* yield() { (yield 3) + (yield 4); }
+assertThrows("function* yield() { \"use strict\"; (yield 3) + (yield 4); }",
+             SyntaxError);
+
+// In sloppy mode, yield is a normal identifier, outside of generators.
+function yield(yield) { yield: yield (yield + yield (0)); }
+
+// Yield is always valid as a key in an object literal.
+({ yield: 1 });
+function* g() { yield ({ yield: 1 }) }
+function* g() { yield ({ get yield() { return 1; }}) }
+
+// Checks that yield is a valid label in sloppy mode, but not valid in a strict
+// mode or in generators.
+function f() { yield: 1 }
+assertThrows("function f() { \"use strict\"; yield: 1 }", SyntaxError)
+assertThrows("function* g() { yield: 1 }", SyntaxError)
+
+// Yield is only a keyword in the body of the generator, not in nested
+// functions.
+function* g() { function f() { yield (yield + yield (0)); } }
+
+// Yield in a generator is not an identifier.
+assertThrows("function* g() { yield = 10; }", SyntaxError);
+
+// Yield binds very loosely, so this parses as "yield (3 + yield 4)", which is
+// invalid.
+assertThrows("function* g() { yield 3 + yield 4; }", SyntaxError);
+
+// Yield is still a future-reserved-word in strict mode
+assertThrows("function f() { \"use strict\"; var yield = 13; }", SyntaxError);
+
+// The name of the NFE is let-bound in G, so is invalid.
+assertThrows("function* g() { yield (function yield() {}); }", SyntaxError);
+
+// In generators, yield is invalid as a formal argument name.
+assertThrows("function* g(yield) { yield (10); }", SyntaxError);
diff --git a/test/mjsunit/es6/generators-poisoned-properties.js b/test/mjsunit/es6/generators-poisoned-properties.js
new file mode 100644
index 0000000..44d823a
--- /dev/null
+++ b/test/mjsunit/es6/generators-poisoned-properties.js
@@ -0,0 +1,40 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function assertIteratorResult(value, done, result) {
+  assertEquals({value: value, done: done}, result);
+}
+
+function test(f) {
+  var cdesc = Object.getOwnPropertyDescriptor(f, "caller");
+  var adesc = Object.getOwnPropertyDescriptor(f, "arguments");
+
+  assertFalse(cdesc.enumerable);
+  assertFalse(cdesc.configurable);
+
+  assertFalse(adesc.enumerable);
+  assertFalse(adesc.configurable);
+
+  assertSame(cdesc.get, cdesc.set);
+  assertSame(cdesc.get, adesc.get);
+  assertSame(cdesc.get, adesc.set);
+
+  assertTrue(cdesc.get instanceof Function);
+  assertEquals(0, cdesc.get.length);
+  assertThrows(cdesc.get, TypeError);
+
+  assertThrows(function() { return f.caller; }, TypeError);
+  assertThrows(function() { f.caller = 42; }, TypeError);
+  assertThrows(function() { return f.arguments; }, TypeError);
+  assertThrows(function() { f.arguments = 42; }, TypeError);
+}
+
+function *sloppy() { test(sloppy); }
+function *strict() { "use strict"; test(strict); }
+
+test(sloppy);
+test(strict);
+
+assertIteratorResult(undefined, true, sloppy().next());
+assertIteratorResult(undefined, true, strict().next());
diff --git a/test/mjsunit/es6/generators-relocation.js b/test/mjsunit/es6/generators-relocation.js
new file mode 100644
index 0000000..6babb14
--- /dev/null
+++ b/test/mjsunit/es6/generators-relocation.js
@@ -0,0 +1,61 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug
+
+var Debug = debug.Debug;
+
+function assertIteratorResult(value, done, result) {
+  assertEquals({value: value, done: done}, result);
+}
+
+function RunTest(formals_and_body, args, value1, value2) {
+  // A null listener. It isn't important what the listener does.
+  function listener(event, exec_state, event_data, data) {
+  }
+
+  // Create the generator function outside a debugging context. It will probably
+  // be lazily compiled.
+  var gen = (function*(){}).constructor.apply(null, formals_and_body);
+
+  // Instantiate the generator object.
+  var obj = gen.apply(null, args);
+
+  // Advance to the first yield.
+  assertIteratorResult(value1, false, obj.next());
+
+  // Add a breakpoint on line 3 (the second yield).
+  var bp = Debug.setBreakPoint(gen, 3);
+
+  // Enable the debugger, which should force recompilation of the generator
+  // function and relocation of the suspended generator activation.
+  Debug.setListener(listener);
+
+  // Check that the generator resumes and suspends properly.
+  assertIteratorResult(value2, false, obj.next());
+
+  // Disable debugger -- should not force recompilation.
+  Debug.clearBreakPoint(bp);
+  Debug.setListener(null);
+
+  // Run to completion.
+  assertIteratorResult(undefined, true, obj.next());
+}
+
+function prog(a, b, c) {
+  return a + ';\n' + 'yield ' + b + ';\n' + 'yield ' + c;
+}
+
+// Simple empty local scope.
+RunTest([prog('', '1', '2')], [], 1, 2);
+
+RunTest([prog('for (;;) break', '1', '2')], [], 1, 2);
+
+RunTest([prog('while (0) foo()', '1', '2')], [], 1, 2);
+
+RunTest(['a', prog('var x = 3', 'a', 'x')], [1], 1, 3);
+
+RunTest(['a', prog('', '1', '2')], [42], 1, 2);
+
+RunTest(['a', prog('for (;;) break', '1', '2')], [42], 1, 2);
diff --git a/test/mjsunit/es6/generators-runtime.js b/test/mjsunit/es6/generators-runtime.js
new file mode 100644
index 0000000..8fa70b6
--- /dev/null
+++ b/test/mjsunit/es6/generators-runtime.js
@@ -0,0 +1,146 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test aspects of the generator runtime.
+
+// See:
+// http://people.mozilla.org/~jorendorff/es6-draft.html#sec-generatorfunction-objects
+
+function f() { }
+function* g() { yield 1; }
+var GeneratorFunctionPrototype = Object.getPrototypeOf(g);
+var GeneratorFunction = GeneratorFunctionPrototype.constructor;
+var GeneratorObjectPrototype = GeneratorFunctionPrototype.prototype;
+
+// A generator function should have the same set of properties as any
+// other function.
+function TestGeneratorFunctionInstance() {
+  var f_own_property_names = Object.getOwnPropertyNames(f);
+  var g_own_property_names = Object.getOwnPropertyNames(g);
+
+  f_own_property_names.sort();
+  g_own_property_names.sort();
+
+  assertArrayEquals(f_own_property_names, g_own_property_names);
+  var i;
+  for (i = 0; i < f_own_property_names.length; i++) {
+    var prop = f_own_property_names[i];
+    var f_desc = Object.getOwnPropertyDescriptor(f, prop);
+    var g_desc = Object.getOwnPropertyDescriptor(g, prop);
+    assertEquals(f_desc.configurable, g_desc.configurable, prop);
+    if (prop === 'arguments' || prop === 'caller') {
+      // Unlike sloppy functions, which have read-only data arguments and caller
+      // properties, sloppy generators have a poison pill implemented via
+      // accessors
+      assertFalse('writable' in g_desc, prop);
+      assertTrue(g_desc.get instanceof Function, prop);
+      assertEquals(g_desc.get, g_desc.set, prop);
+    } else {
+      assertEquals(f_desc.writable, g_desc.writable, prop);
+    }
+    assertEquals(f_desc.enumerable, g_desc.enumerable, prop);
+  }
+}
+TestGeneratorFunctionInstance();
+
+
+// Generators have an additional object interposed in the chain between
+// themselves and Function.prototype.
+function TestGeneratorFunctionPrototype() {
+  // Sanity check.
+  assertSame(Object.getPrototypeOf(f), Function.prototype);
+  assertFalse(GeneratorFunctionPrototype === Function.prototype);
+  assertSame(Function.prototype,
+             Object.getPrototypeOf(GeneratorFunctionPrototype));
+  assertSame(GeneratorFunctionPrototype,
+             Object.getPrototypeOf(function* () {}));
+}
+TestGeneratorFunctionPrototype();
+
+
+// Functions that we associate with generator objects are actually defined by
+// a common prototype.
+function TestGeneratorObjectPrototype() {
+  assertSame(Object.prototype,
+             Object.getPrototypeOf(GeneratorObjectPrototype));
+  assertSame(GeneratorObjectPrototype,
+             Object.getPrototypeOf((function*(){yield 1}).prototype));
+
+  var expected_property_names = ["next", "throw", "constructor"];
+  var found_property_names =
+      Object.getOwnPropertyNames(GeneratorObjectPrototype);
+
+  expected_property_names.sort();
+  found_property_names.sort();
+
+  assertArrayEquals(expected_property_names, found_property_names);
+
+  iterator_desc = Object.getOwnPropertyDescriptor(GeneratorObjectPrototype,
+      Symbol.iterator);
+  assertTrue(iterator_desc !== undefined);
+  assertFalse(iterator_desc.writable);
+  assertFalse(iterator_desc.enumerable);
+  assertFalse(iterator_desc.configurable);
+
+  // The generator object's "iterator" function is just the identity.
+  assertSame(iterator_desc.value.call(42), 42);
+}
+TestGeneratorObjectPrototype();
+
+
+// This tests the object that would be called "GeneratorFunction", if it were
+// like "Function".
+function TestGeneratorFunction() {
+  assertSame(GeneratorFunctionPrototype, GeneratorFunction.prototype);
+  assertTrue(g instanceof GeneratorFunction);
+
+  assertSame(Function, Object.getPrototypeOf(GeneratorFunction));
+  assertTrue(g instanceof Function);
+
+  assertEquals("function* g() { yield 1; }", g.toString());
+
+  // Not all functions are generators.
+  assertTrue(f instanceof Function);  // Sanity check.
+  assertTrue(!(f instanceof GeneratorFunction));
+
+  assertTrue((new GeneratorFunction()) instanceof GeneratorFunction);
+  assertTrue(GeneratorFunction() instanceof GeneratorFunction);
+}
+TestGeneratorFunction();
+
+
+function TestPerGeneratorPrototype() {
+  assertTrue((function*(){}).prototype !== (function*(){}).prototype);
+  assertTrue((function*(){}).prototype !== g.prototype);
+  assertTrue(g.prototype instanceof GeneratorFunctionPrototype);
+  assertSame(GeneratorObjectPrototype, Object.getPrototypeOf(g.prototype));
+  assertTrue(!(g.prototype instanceof Function));
+  assertSame(typeof (g.prototype), "object");
+
+  assertArrayEquals([], Object.getOwnPropertyNames(g.prototype));
+}
+TestPerGeneratorPrototype();
diff --git a/test/mjsunit/es6/iteration-semantics.js b/test/mjsunit/es6/iteration-semantics.js
new file mode 100644
index 0000000..544c94d
--- /dev/null
+++ b/test/mjsunit/es6/iteration-semantics.js
@@ -0,0 +1,336 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony-scoping --harmony-proxies
+
+// Test for-of semantics.
+
+"use strict";
+
+
+// First, some helpers.
+
+function* values() {
+  for (var i = 0; i < arguments.length; i++) {
+    yield arguments[i];
+  }
+}
+
+function wrap_iterator(iterator) {
+    var iterable = {};
+    iterable[Symbol.iterator] = function() { return iterator; };
+    return iterable;
+}
+
+function integers_until(max) {
+  function next() {
+    var ret = { value: this.n, done: this.n == max };
+    this.n++;
+    return ret;
+  }
+  return wrap_iterator({ next: next, n: 0 });
+}
+
+function results(results) {
+  var i = 0;
+  function next() {
+    return results[i++];
+  }
+  return wrap_iterator({ next: next });
+}
+
+function* integers_from(n) {
+  while (1) yield n++;
+}
+
+// A destructive append.
+function append(x, tail) {
+  tail[tail.length] = x;
+  return tail;
+}
+
+function sum(x, tail) {
+  return x + tail;
+}
+
+function fold(cons, seed, iterable) {
+  for (var x of iterable) {
+    seed = cons(x, seed);
+  }
+  return seed;
+}
+
+function* take(iterable, n) {
+  if (n == 0) return;
+  for (let x of iterable) {
+    yield x;
+    if (--n == 0) break;
+  }
+}
+
+function nth(iterable, n) {
+  for (let x of iterable) {
+    if (n-- == 0) return x;
+  }
+  throw "unreachable";
+}
+
+function* skip_every(iterable, n) {
+  var i = 0;
+  for (let x of iterable) {
+    if (++i % n == 0) continue;
+    yield x;
+  }
+}
+
+function* iter_map(iterable, f) {
+  for (var x of iterable) {
+    yield f(x);
+  }
+}
+function nested_fold(cons, seed, iterable) {
+  var visited = []
+  for (let x of iterable) {
+    for (let y of x) {
+      seed = cons(y, seed);
+    }
+  }
+  return seed;
+}
+
+function* unreachable(iterable) {
+  for (let x of iterable) {
+    throw "not reached";
+  }
+}
+
+function one_time_getter(o, prop, val) {
+  function set_never() { throw "unreachable"; }
+  var gotten = false;
+  function get_once() {
+    if (gotten) throw "got twice";
+    gotten = true;
+    return val;
+  }
+  Object.defineProperty(o, prop, {get: get_once, set: set_never})
+  return o;
+}
+
+function never_getter(o, prop) {
+  function never() { throw "unreachable"; }
+  Object.defineProperty(o, prop, {get: never, set: never})
+  return o;
+}
+
+function remove_next_after(iterable, n) {
+  var iterator = iterable[Symbol.iterator]();
+  function next() {
+    if (n-- == 0) delete this.next;
+    return iterator.next();
+  }
+  return wrap_iterator({ next: next });
+}
+
+function poison_next_after(iterable, n) {
+  var iterator = iterable[Symbol.iterator]();
+  function next() {
+    return iterator.next();
+  }
+  function next_getter() {
+    if (n-- < 0)
+      throw "poisoned";
+    return next;
+  }
+  var o = {};
+  Object.defineProperty(o, 'next', { get: next_getter });
+  return wrap_iterator(o);
+}
+
+// Now, the tests.
+
+// Non-generator iterators.
+assertEquals(45, fold(sum, 0, integers_until(10)));
+// Generator iterators.
+assertEquals([1, 2, 3], fold(append, [], values(1, 2, 3)));
+// Break.
+assertEquals(45, fold(sum, 0, take(integers_from(0), 10)));
+// Continue.
+assertEquals(90, fold(sum, 0, take(skip_every(integers_from(0), 2), 10)));
+// Return.
+assertEquals(10, nth(integers_from(0), 10));
+// Nested for-of.
+assertEquals([0, 0, 1, 0, 1, 2, 0, 1, 2, 3],
+             nested_fold(append,
+                         [],
+                         iter_map(integers_until(5), integers_until)));
+// Result objects with sparse fields.
+assertEquals([undefined, 1, 2, 3],
+             fold(append, [],
+                  results([{ done: false },
+                           { value: 1, done: false },
+                           // A missing "done" is the same as undefined, which
+                           // is false.
+                           { value: 2 },
+                           // Not done.
+                           { value: 3, done: 0 },
+                           // Done.
+                           { value: 4, done: 42 }])));
+// Results that are not objects.
+assertEquals([undefined, undefined, undefined],
+             fold(append, [],
+                  results([10, "foo", /qux/, { value: 37, done: true }])));
+// Getters (shudder).
+assertEquals([1, 2],
+             fold(append, [],
+                  results([one_time_getter({ value: 1 }, 'done', false),
+                           one_time_getter({ done: false }, 'value', 2),
+                           { value: 37, done: true },
+                           never_getter(never_getter({}, 'done'), 'value')])));
+
+// Unlike the case with for-in, null and undefined cause an error.
+assertThrows('fold(sum, 0, unreachable(null))', TypeError);
+assertThrows('fold(sum, 0, unreachable(undefined))', TypeError);
+
+// Other non-iterators do cause an error.
+assertThrows('fold(sum, 0, unreachable({}))', TypeError);
+assertThrows('fold(sum, 0, unreachable(false))', TypeError);
+assertThrows('fold(sum, 0, unreachable(37))', TypeError);
+
+// "next" is looked up each time.
+assertThrows('fold(sum, 0, remove_next_after(integers_until(10), 5))',
+             TypeError);
+// It is not called at any other time.
+assertEquals(45,
+             fold(sum, 0, remove_next_after(integers_until(10), 10)));
+// It is not looked up too many times.
+assertEquals(45,
+             fold(sum, 0, poison_next_after(integers_until(10), 10)));
+
+function labelled_continue(iterable) {
+  var n = 0;
+outer:
+  while (true) {
+    n++;
+    for (var x of iterable) continue outer;
+    break;
+  }
+  return n;
+}
+assertEquals(11, labelled_continue(integers_until(10)));
+
+function labelled_break(iterable) {
+  var n = 0;
+outer:
+  while (true) {
+    n++;
+    for (var x of iterable) break outer;
+  }
+  return n;
+}
+assertEquals(1, labelled_break(integers_until(10)));
+
+// Test continue/break in catch.
+function catch_control(iterable, k) {
+  var n = 0;
+  for (var x of iterable) {
+    try {
+      return k(x);
+    } catch (e) {
+      if (e == "continue") continue;
+      else if (e == "break") break;
+      else throw e;
+    }
+  } while (false);
+  return false;
+}
+assertEquals(false,
+             catch_control(integers_until(10),
+                           function() { throw "break" }));
+assertEquals(false,
+             catch_control(integers_until(10),
+                           function() { throw "continue" }));
+assertEquals(5,
+             catch_control(integers_until(10),
+                           function(x) {
+                             if (x == 5) return x;
+                             throw "continue";
+                           }));
+
+// Test continue/break in try.
+function try_control(iterable, k) {
+  var n = 0;
+  for (var x of iterable) {
+    try {
+      var e = k(x);
+      if (e == "continue") continue;
+      else if (e == "break") break;
+      return e;
+    } catch (e) {
+      throw e;
+    }
+  } while (false);
+  return false;
+}
+assertEquals(false,
+             try_control(integers_until(10),
+                         function() { return "break" }));
+assertEquals(false,
+             try_control(integers_until(10),
+                         function() { return "continue" }));
+assertEquals(5,
+             try_control(integers_until(10),
+                         function(x) { return (x == 5) ? x : "continue" }));
+
+// Proxy results, with getters.
+function transparent_proxy(x) {
+  return Proxy.create({
+    get: function(receiver, name) { return x[name]; }
+  });
+}
+assertEquals([1, 2],
+             fold(append, [],
+                  results([one_time_getter({ value: 1 }, 'done', false),
+                           one_time_getter({ done: false }, 'value', 2),
+                           { value: 37, done: true },
+                           never_getter(never_getter({}, 'done'), 'value')]
+                          .map(transparent_proxy))));
+
+// Proxy iterators.
+function poison_proxy_after(iterable, n) {
+  var iterator = iterable[Symbol.iterator]();
+  return wrap_iterator(Proxy.create({
+    get: function(receiver, name) {
+      if (name == 'next' && n-- < 0) throw "unreachable";
+      return iterator[name];
+    },
+    // Needed for integers_until(10)'s this.n++.
+    set: function(receiver, name, val) {
+      return iterator[name] = val;
+    }
+  }));
+}
+assertEquals(45, fold(sum, 0, poison_proxy_after(integers_until(10), 10)));
diff --git a/test/mjsunit/es6/iteration-syntax.js b/test/mjsunit/es6/iteration-syntax.js
new file mode 100644
index 0000000..356a978
--- /dev/null
+++ b/test/mjsunit/es6/iteration-syntax.js
@@ -0,0 +1,70 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony-scoping --use-strict
+
+// Test for-of syntax.
+
+"use strict";
+
+function f() { for (x of y) { } }
+function f() { for (var x of y) { } }
+function f() { for (let x of y) { } }
+
+assertThrows("function f() { for (x of) { } }", SyntaxError);
+assertThrows("function f() { for (x of y z) { } }", SyntaxError);
+assertThrows("function f() { for (x of y;) { } }", SyntaxError);
+
+assertThrows("function f() { for (var x of) { } }", SyntaxError);
+assertThrows("function f() { for (var x of y z) { } }", SyntaxError);
+assertThrows("function f() { for (var x of y;) { } }", SyntaxError);
+
+assertThrows("function f() { for (let x of) { } }", SyntaxError);
+assertThrows("function f() { for (let x of y z) { } }", SyntaxError);
+assertThrows("function f() { for (let x of y;) { } }", SyntaxError);
+
+assertThrows("function f() { for (of y) { } }", SyntaxError);
+assertThrows("function f() { for (of of) { } }", SyntaxError);
+assertThrows("function f() { for (var of y) { } }", SyntaxError);
+assertThrows("function f() { for (var of of) { } }", SyntaxError);
+assertThrows("function f() { for (let of y) { } }", SyntaxError);
+assertThrows("function f() { for (let of of) { } }", SyntaxError);
+
+assertThrows("function f() { for (x = 3 of y) { } }", SyntaxError);
+assertThrows("function f() { for (var x = 3 of y) { } }", SyntaxError);
+assertThrows("function f() { for (let x = 3 of y) { } }", SyntaxError);
+
+
+// Alack, this appears to be valid.
+function f() { for (of of y) { } }
+function f() { for (let of of y) { } }
+function f() { for (var of of y) { } }
+
+// This too, of course.
+function f() { for (of in y) { } }
+function f() { for (var of in y) { } }
+function f() { for (let of in y) { } }
diff --git a/test/mjsunit/es6/math-cbrt.js b/test/mjsunit/es6/math-cbrt.js
index 83d9eb5..713c020 100644
--- a/test/mjsunit/es6/math-cbrt.js
+++ b/test/mjsunit/es6/math-cbrt.js
@@ -2,8 +2,6 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-// Flags: --harmony-maths
-
 assertTrue(isNaN(Math.cbrt(NaN)));
 assertTrue(isNaN(Math.cbrt(function() {})));
 assertTrue(isNaN(Math.cbrt({ toString: function() { return NaN; } })));
diff --git a/test/mjsunit/es6/math-clz32.js b/test/mjsunit/es6/math-clz32.js
index 816f6a9..3cbd4c3 100644
--- a/test/mjsunit/es6/math-clz32.js
+++ b/test/mjsunit/es6/math-clz32.js
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-// Flags: --harmony-maths --allow-natives-syntax
+// Flags: --allow-natives-syntax
 
 [NaN, Infinity, -Infinity, 0, -0, "abc", "Infinity", "-Infinity", {}].forEach(
   function(x) {
diff --git a/test/mjsunit/es6/math-expm1.js b/test/mjsunit/es6/math-expm1.js
index de915c0..7cbb1b4 100644
--- a/test/mjsunit/es6/math-expm1.js
+++ b/test/mjsunit/es6/math-expm1.js
@@ -2,25 +2,28 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-// Flags: --harmony-maths --no-fast-math
+// Flags: --no-fast-math
 
 assertTrue(isNaN(Math.expm1(NaN)));
 assertTrue(isNaN(Math.expm1(function() {})));
 assertTrue(isNaN(Math.expm1({ toString: function() { return NaN; } })));
 assertTrue(isNaN(Math.expm1({ valueOf: function() { return "abc"; } })));
-assertEquals("Infinity", String(1/Math.expm1(0)));
-assertEquals("-Infinity", String(1/Math.expm1(-0)));
-assertEquals("Infinity", String(Math.expm1(Infinity)));
+assertEquals(Infinity, 1/Math.expm1(0));
+assertEquals(-Infinity, 1/Math.expm1(-0));
+assertEquals(Infinity, Math.expm1(Infinity));
 assertEquals(-1, Math.expm1(-Infinity));
 
-for (var x = 0.1; x < 700; x += 0.1) {
+
+// Sanity check:
+// Math.expm1(x) stays reasonably close to Math.exp(x) - 1 for large values.
+for (var x = 1; x < 700; x += 0.25) {
   var expected = Math.exp(x) - 1;
-  assertEqualsDelta(expected, Math.expm1(x), expected * 1E-14);
+  assertEqualsDelta(expected, Math.expm1(x), expected * 1E-15);
   expected = Math.exp(-x) - 1;
-  assertEqualsDelta(expected, Math.expm1(-x), -expected * 1E-14);
+  assertEqualsDelta(expected, Math.expm1(-x), -expected * 1E-15);
 }
 
-// Values close to 0:
+// Approximation for values close to 0:
 // Use six terms of Taylor expansion at 0 for exp(x) as test expectation:
 // exp(x) - 1 == exp(0) + exp(0) * x + x * x / 2 + ... - 1
 //            == x + x * x / 2 + x * x * x / 6 + ...
@@ -32,7 +35,44 @@
               1/362880 + x * (1/3628800))))))))));
 }
 
+// Sanity check:
+// Math.expm1(x) stays reasonabliy close to the Taylor series for small values.
 for (var x = 1E-1; x > 1E-300; x *= 0.8) {
   var expected = expm1(x);
-  assertEqualsDelta(expected, Math.expm1(x), expected * 1E-14);
+  assertEqualsDelta(expected, Math.expm1(x), expected * 1E-15);
 }
+
+
+// Tests related to the fdlibm implementation.
+// Test overflow.
+assertEquals(Infinity, Math.expm1(709.8));
+// Test largest double value.
+assertEquals(Infinity, Math.exp(1.7976931348623157e308));
+// Cover various code paths.
+assertEquals(-1, Math.expm1(-56 * Math.LN2));
+assertEquals(-1, Math.expm1(-50));
+// Test most negative double value.
+assertEquals(-1, Math.expm1(-1.7976931348623157e308));
+// Test argument reduction.
+// Cases for 0.5*log(2) < |x| < 1.5*log(2).
+assertEquals(Math.E - 1, Math.expm1(1));
+assertEquals(1/Math.E - 1, Math.expm1(-1));
+// Cases for 1.5*log(2) < |x|.
+assertEquals(6.38905609893065, Math.expm1(2));
+assertEquals(-0.8646647167633873, Math.expm1(-2));
+// Cases where Math.expm1(x) = x.
+assertEquals(0, Math.expm1(0));
+assertEquals(Math.pow(2,-55), Math.expm1(Math.pow(2,-55)));
+// Tests for the case where argument reduction has x in the primary range.
+// Test branch for k = 0.
+assertEquals(0.18920711500272105, Math.expm1(0.25 * Math.LN2));
+// Test branch for k = -1.
+assertEquals(-0.5, Math.expm1(-Math.LN2));
+// Test branch for k = 1.
+assertEquals(1, Math.expm1(Math.LN2));
+// Test branch for k <= -2 || k > 56. k = -3.
+assertEquals(1.4411518807585582e17, Math.expm1(57 * Math.LN2));
+// Test last branch for k < 20, k = 19.
+assertEquals(524286.99999999994, Math.expm1(19 * Math.LN2));
+// Test the else branch, k = 20.
+assertEquals(1048575, Math.expm1(20 * Math.LN2));
diff --git a/test/mjsunit/es6/math-fround.js b/test/mjsunit/es6/math-fround.js
index ea432ea..c53396a 100644
--- a/test/mjsunit/es6/math-fround.js
+++ b/test/mjsunit/es6/math-fround.js
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-// Flags: --harmony-maths
+// Flags: --allow-natives-syntax
 
 // Monkey-patch Float32Array.
 Float32Array = function(x) { this[0] = 0; };
@@ -11,15 +11,33 @@
 assertTrue(isNaN(Math.fround(function() {})));
 assertTrue(isNaN(Math.fround({ toString: function() { return NaN; } })));
 assertTrue(isNaN(Math.fround({ valueOf: function() { return "abc"; } })));
-assertEquals("Infinity", String(1/Math.fround(0)));
-assertEquals("-Infinity", String(1/Math.fround(-0)));
-assertEquals("Infinity", String(Math.fround(Infinity)));
-assertEquals("-Infinity", String(Math.fround(-Infinity)));
+assertTrue(isNaN(Math.fround(NaN)));
+assertTrue(isNaN(Math.fround(function() {})));
+assertTrue(isNaN(Math.fround({ toString: function() { return NaN; } })));
+assertTrue(isNaN(Math.fround({ valueOf: function() { return "abc"; } })));
 
-assertEquals("Infinity", String(Math.fround(1E200)));
-assertEquals("-Infinity", String(Math.fround(-1E200)));
-assertEquals("Infinity", String(1/Math.fround(1E-300)));
-assertEquals("-Infinity", String(1/Math.fround(-1E-300)));
+function unopt(x) { return Math.fround(x); }
+function opt(y) { return Math.fround(y); }
+
+opt(0.1);
+opt(0.1);
+unopt(0.1);
+%NeverOptimizeFunction(unopt);
+%OptimizeFunctionOnNextCall(opt);
+
+function test(f) {
+  assertEquals("Infinity", String(1/f(0)));
+  assertEquals("-Infinity", String(1/f(-0)));
+  assertEquals("Infinity", String(f(Infinity)));
+  assertEquals("-Infinity", String(f(-Infinity)));
+  assertEquals("Infinity", String(f(1E200)));
+  assertEquals("-Infinity", String(f(-1E200)));
+  assertEquals("Infinity", String(1/f(1E-300)));
+  assertEquals("-Infinity", String(1/f(-1E-300)));
+}
+
+test(opt);
+test(unopt);
 
 mantissa_23_shift = Math.pow(2, -23);
 mantissa_29_shift = Math.pow(2, -23-29);
@@ -81,13 +99,16 @@
 
 
 var pi = new ieee754float(0, 0x400, 0x490fda, 0x14442d18);
-assertEquals(pi.toSingle(), Math.fround(pi.toDouble()));
+assertEquals(pi.toSingle(), opt(pi.toDouble()));
+assertEquals(pi.toSingle(), unopt(pi.toDouble()));
+
 
 function fuzz_mantissa(sign, exp, m1inc, m2inc) {
   for (var m1 = 0; m1 < (1 << 23); m1 += m1inc) {
     for (var m2 = 0; m2 < (1 << 29); m2 += m2inc) {
       var float = new ieee754float(sign, exp, m1, m2);
-      assertEquals(float.toSingle(), Math.fround(float.toDouble()));
+      assertEquals(float.toSingle(), unopt(float.toDouble()));
+      assertEquals(float.toSingle(), opt(float.toDouble()));
     }
   }
 }
diff --git a/test/mjsunit/es6/math-hyperbolic.js b/test/mjsunit/es6/math-hyperbolic.js
index c45a19c..8970f6e 100644
--- a/test/mjsunit/es6/math-hyperbolic.js
+++ b/test/mjsunit/es6/math-hyperbolic.js
@@ -25,7 +25,8 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --harmony-maths
+// TODO(3468): we rely on a precise Math.exp.
+// Flags: --no-fast-math
 
 [Math.sinh, Math.cosh, Math.tanh, Math.asinh, Math.acosh, Math.atanh].
     forEach(function(fun) {
@@ -68,14 +69,14 @@
 });
 
 
-assertEquals("Infinity", String(Math.cosh(-Infinity)));
-assertEquals("Infinity", String(Math.cosh(Infinity)));
-assertEquals("Infinity", String(Math.cosh("-Infinity")));
-assertEquals("Infinity", String(Math.cosh("Infinity")));
+assertEquals(Infinity, Math.cosh(-Infinity));
+assertEquals(Infinity, Math.cosh(Infinity));
+assertEquals(Infinity, Math.cosh("-Infinity"));
+assertEquals(Infinity, Math.cosh("Infinity"));
 
 
-assertEquals("-Infinity", String(Math.atanh(-1)));
-assertEquals("Infinity", String(Math.atanh(1)));
+assertEquals(-Infinity, Math.atanh(-1));
+assertEquals(Infinity, Math.atanh(1));
 
 // Math.atanh(x) is NaN for |x| > 1 and NaN
 [1.000000000001, Math.PI, 10000000, 2, Infinity, NaN].forEach(function(x) {
@@ -84,6 +85,8 @@
 });
 
 
+assertEquals(0, Math.sinh(0));
+assertEquals(-Infinity, 1/Math.sinh(-0));
 assertEquals(1, Math.tanh(Infinity));
 assertEquals(-1, Math.tanh(-Infinity));
 assertEquals(1, Math.cosh(0));
@@ -99,15 +102,13 @@
 
 
 // Some random samples.
-assertEqualsDelta(0.5210953054937, Math.sinh(0.5), 1E-12);
-assertEqualsDelta(74.203210577788, Math.sinh(5), 1E-12);
-assertEqualsDelta(-0.5210953054937, Math.sinh(-0.5), 1E-12);
-assertEqualsDelta(-74.203210577788, Math.sinh(-5), 1E-12);
+assertEqualsDelta(74.20321057778875, Math.sinh(5), 1E-12);
+assertEqualsDelta(-74.20321057778875, Math.sinh(-5), 1E-12);
 
-assertEqualsDelta(1.1276259652063, Math.cosh(0.5), 1E-12);
-assertEqualsDelta(74.209948524787, Math.cosh(5), 1E-12);
-assertEqualsDelta(1.1276259652063, Math.cosh(-0.5), 1E-12);
-assertEqualsDelta(74.209948524787, Math.cosh(-5), 1E-12);
+assertEqualsDelta(1.1276259652063807, Math.cosh(0.5), 1E-12);
+assertEqualsDelta(74.20994852478785, Math.cosh(5), 1E-12);
+assertEqualsDelta(1.1276259652063807, Math.cosh(-0.5), 1E-12);
+assertEqualsDelta(74.20994852478785, Math.cosh(-5), 1E-12);
 
 assertEqualsDelta(0.4621171572600, Math.tanh(0.5), 1E-12);
 assertEqualsDelta(0.9999092042625, Math.tanh(5), 1E-12);
@@ -136,3 +137,52 @@
 [1-(1E-16), 0, 1E-10, 1E-50].forEach(function(x) {
   assertEqualsDelta(Math.atanh(x), -Math.atanh(-x), 1E-12);
 });
+
+
+// Implementation-specific tests for sinh.
+// Case |x| < 2^-28
+assertEquals(Math.pow(2, -29), Math.sinh(Math.pow(2, -29)));
+assertEquals(-Math.pow(2, -29), Math.sinh(-Math.pow(2, -29)));
+// Case |x| < 1
+assertEquals(0.5210953054937474, Math.sinh(0.5));
+assertEquals(-0.5210953054937474, Math.sinh(-0.5));
+// sinh(10*log(2)) = 1048575/2048, case |x| < 22
+assertEquals(1048575/2048, Math.sinh(10*Math.LN2));
+assertEquals(-1048575/2048, Math.sinh(-10*Math.LN2));
+// Case |x| < 22
+assertEquals(11013.232874703393, Math.sinh(10));
+assertEquals(-11013.232874703393, Math.sinh(-10));
+// Case |x| in [22, log(maxdouble)]
+assertEquals(2.1474836479999983e9, Math.sinh(32*Math.LN2));
+assertEquals(-2.1474836479999983e9, Math.sinh(-32*Math.LN2));
+// Case |x| in [22, log(maxdouble)]
+assertEquals(1.3440585709080678e43, Math.sinh(100));
+assertEquals(-1.3440585709080678e43, Math.sinh(-100));
+// No overflow, case |x| in [log(maxdouble), threshold]
+assertEquals(1.7976931348621744e308, Math.sinh(710.4758600739439));
+assertEquals(-1.7976931348621744e308, Math.sinh(-710.4758600739439));
+// Overflow, case |x| > threshold
+assertEquals(Infinity, Math.sinh(710.475860073944));
+assertEquals(-Infinity, Math.sinh(-710.475860073944));
+assertEquals(Infinity, Math.sinh(1000));
+assertEquals(-Infinity, Math.sinh(-1000));
+
+// Implementation-specific tests for cosh.
+// Case |x| < 2^-55
+assertEquals(1, Math.cosh(Math.pow(2, -56)));
+assertEquals(1, Math.cosh(-Math.pow(2, -56)));
+// Case |x| < 1/2*log(2). cosh(Math.LN2/4) = (sqrt(2)+1)/2^(5/4)
+assertEquals(1.0150517651282178, Math.cosh(Math.LN2/4));
+assertEquals(1.0150517651282178, Math.cosh(-Math.LN2/4));
+// Case 1/2*log(2) < |x| < 22. cosh(10*Math.LN2) = 1048577/2048
+assertEquals(512.00048828125, Math.cosh(10*Math.LN2));
+assertEquals(512.00048828125, Math.cosh(-10*Math.LN2));
+// Case 22 <= |x| < log(maxdouble)
+assertEquals(2.1474836479999983e9, Math.cosh(32*Math.LN2));
+assertEquals(2.1474836479999983e9, Math.cosh(-32*Math.LN2));
+// Case log(maxdouble) <= |x| <= overflowthreshold
+assertEquals(1.7976931348621744e308, Math.cosh(710.4758600739439));
+assertEquals(1.7976931348621744e308, Math.cosh(-710.4758600739439));
+// Overflow.
+assertEquals(Infinity, Math.cosh(710.475860073944));
+assertEquals(Infinity, Math.cosh(-710.475860073944));
diff --git a/test/mjsunit/es6/math-hypot.js b/test/mjsunit/es6/math-hypot.js
index 1052627..d2392df 100644
--- a/test/mjsunit/es6/math-hypot.js
+++ b/test/mjsunit/es6/math-hypot.js
@@ -25,8 +25,6 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --harmony-maths
-
 assertTrue(isNaN(Math.hypot({})));
 assertTrue(isNaN(Math.hypot(undefined, 1)));
 assertTrue(isNaN(Math.hypot(1, undefined)));
diff --git a/test/mjsunit/es6/math-log1p.js b/test/mjsunit/es6/math-log1p.js
index eefea6e..5468444 100644
--- a/test/mjsunit/es6/math-log1p.js
+++ b/test/mjsunit/es6/math-log1p.js
@@ -2,22 +2,20 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-// Flags: --harmony-maths
-
 assertTrue(isNaN(Math.log1p(NaN)));
 assertTrue(isNaN(Math.log1p(function() {})));
 assertTrue(isNaN(Math.log1p({ toString: function() { return NaN; } })));
 assertTrue(isNaN(Math.log1p({ valueOf: function() { return "abc"; } })));
-assertEquals("Infinity", String(1/Math.log1p(0)));
-assertEquals("-Infinity", String(1/Math.log1p(-0)));
-assertEquals("Infinity", String(Math.log1p(Infinity)));
-assertEquals("-Infinity", String(Math.log1p(-1)));
+assertEquals(Infinity, 1/Math.log1p(0));
+assertEquals(-Infinity, 1/Math.log1p(-0));
+assertEquals(Infinity, Math.log1p(Infinity));
+assertEquals(-Infinity, Math.log1p(-1));
 assertTrue(isNaN(Math.log1p(-2)));
 assertTrue(isNaN(Math.log1p(-Infinity)));
 
-for (var x = 1E300; x > 1E-1; x *= 0.8) {
+for (var x = 1E300; x > 1E16; x *= 0.8) {
   var expected = Math.log(x + 1);
-  assertEqualsDelta(expected, Math.log1p(x), expected * 1E-14);
+  assertEqualsDelta(expected, Math.log1p(x), expected * 1E-16);
 }
 
 // Values close to 0:
@@ -37,5 +35,36 @@
 
 for (var x = 1E-1; x > 1E-300; x *= 0.8) {
   var expected = log1p(x);
-  assertEqualsDelta(expected, Math.log1p(x), expected * 1E-14);
+  assertEqualsDelta(expected, Math.log1p(x), expected * 1E-16);
 }
+
+// Issue 3481.
+assertEquals(6.9756137364252422e-03,
+             Math.log1p(8070450532247929/Math.pow(2,60)));
+
+// Tests related to the fdlibm implementation.
+// Test largest double value.
+assertEquals(709.782712893384, Math.log1p(1.7976931348623157e308));
+// Test small values.
+assertEquals(Math.pow(2, -55), Math.log1p(Math.pow(2, -55)));
+assertEquals(9.313225741817976e-10, Math.log1p(Math.pow(2, -30)));
+// Cover various code paths.
+// -.2929 < x < .41422, k = 0
+assertEquals(-0.2876820724517809, Math.log1p(-0.25));
+assertEquals(0.22314355131420976, Math.log1p(0.25));
+// 0.41422 < x < 9.007e15
+assertEquals(2.3978952727983707, Math.log1p(10));
+// x > 9.007e15
+assertEquals(36.841361487904734, Math.log1p(10e15));
+// Normalize u.
+assertEquals(37.08337388996168, Math.log1p(12738099905822720));
+// Normalize u/2.
+assertEquals(37.08336444902049, Math.log1p(12737979646738432));
+// |f| = 0, k != 0
+assertEquals(1.3862943611198906, Math.log1p(3));
+// |f| != 0, k != 0
+assertEquals(1.3862945995384413, Math.log1p(3 + Math.pow(2,-20)));
+// final if-clause: k = 0
+assertEquals(0.5596157879354227, Math.log1p(0.75));
+// final if-clause: k != 0
+assertEquals(0.8109302162163288, Math.log1p(1.25));
diff --git a/test/mjsunit/es6/math-log2-log10.js b/test/mjsunit/es6/math-log2-log10.js
index 2ab4960..4479894 100644
--- a/test/mjsunit/es6/math-log2-log10.js
+++ b/test/mjsunit/es6/math-log2-log10.js
@@ -25,8 +25,6 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --harmony-maths
-
 [Math.log10, Math.log2].forEach( function(fun) {
   assertTrue(isNaN(fun(NaN)));
   assertTrue(isNaN(fun(fun)));
diff --git a/test/mjsunit/es6/math-sign.js b/test/mjsunit/es6/math-sign.js
index 8a89d62..65f1609 100644
--- a/test/mjsunit/es6/math-sign.js
+++ b/test/mjsunit/es6/math-sign.js
@@ -25,8 +25,6 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --harmony-maths
-
 assertEquals("Infinity", String(1/Math.sign(0)));
 assertEquals("-Infinity", String(1/Math.sign(-0)));
 assertEquals(1, Math.sign(100));
diff --git a/test/mjsunit/es6/math-trunc.js b/test/mjsunit/es6/math-trunc.js
index ed91ed1..9231576 100644
--- a/test/mjsunit/es6/math-trunc.js
+++ b/test/mjsunit/es6/math-trunc.js
@@ -25,8 +25,6 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --harmony-maths
-
 assertEquals("Infinity", String(1/Math.trunc(0)));
 assertEquals("-Infinity", String(1/Math.trunc(-0)));
 assertEquals("Infinity", String(1/Math.trunc(Math.PI/4)));
diff --git a/test/mjsunit/es6/mirror-collections.js b/test/mjsunit/es6/mirror-collections.js
new file mode 100644
index 0000000..e10f5c1
--- /dev/null
+++ b/test/mjsunit/es6/mirror-collections.js
@@ -0,0 +1,144 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --expose-gc
+
+function testMapMirror(mirror) {
+  // Create JSON representation.
+  var serializer = debug.MakeMirrorSerializer();
+  var json = JSON.stringify(serializer.serializeValue(mirror));
+
+  // Check the mirror hierachy.
+  assertTrue(mirror instanceof debug.Mirror);
+  assertTrue(mirror instanceof debug.ValueMirror);
+  assertTrue(mirror instanceof debug.ObjectMirror);
+  assertTrue(mirror instanceof debug.MapMirror);
+
+  assertTrue(mirror.isMap());
+
+  // Parse JSON representation and check.
+  var fromJSON = eval('(' + json + ')');
+  assertEquals('map', fromJSON.type);
+}
+
+function testSetMirror(mirror) {
+  // Create JSON representation.
+  var serializer = debug.MakeMirrorSerializer();
+  var json = JSON.stringify(serializer.serializeValue(mirror));
+
+  // Check the mirror hierachy.
+  assertTrue(mirror instanceof debug.Mirror);
+  assertTrue(mirror instanceof debug.ValueMirror);
+  assertTrue(mirror instanceof debug.ObjectMirror);
+  assertTrue(mirror instanceof debug.SetMirror);
+
+  assertTrue(mirror.isSet());
+
+  // Parse JSON representation and check.
+  var fromJSON = eval('(' + json + ')');
+  assertEquals('set', fromJSON.type);
+}
+
+var o1 = new Object();
+var o2 = new Object();
+var o3 = new Object();
+
+// Test the mirror object for Maps
+var map = new Map();
+map.set(o1, 11);
+map.set(o2, 22);
+map.delete(o1);
+var mapMirror = debug.MakeMirror(map);
+testMapMirror(mapMirror);
+var entries = mapMirror.entries();
+assertEquals(1, entries.length);
+assertSame(o2, entries[0].key);
+assertEquals(22, entries[0].value);
+map.set(o1, 33);
+map.set(o3, o2);
+map.delete(o2);
+map.set(undefined, 44);
+entries = mapMirror.entries();
+assertEquals(3, entries.length);
+assertSame(o1, entries[0].key);
+assertEquals(33, entries[0].value);
+assertSame(o3, entries[1].key);
+assertSame(o2, entries[1].value);
+assertEquals(undefined, entries[2].key);
+assertEquals(44, entries[2].value);
+
+// Test the mirror object for Sets
+var set = new Set();
+set.add(o1);
+set.add(o2);
+set.delete(o1);
+set.add(undefined);
+var setMirror = debug.MakeMirror(set);
+testSetMirror(setMirror);
+var values = setMirror.values();
+assertEquals(2, values.length);
+assertSame(o2, values[0]);
+assertEquals(undefined, values[1]);
+
+// Test the mirror object for WeakMaps
+var weakMap = new WeakMap();
+weakMap.set(o1, 11);
+weakMap.set(new Object(), 22);
+weakMap.set(o3, 33);
+weakMap.set(new Object(), 44);
+var weakMapMirror = debug.MakeMirror(weakMap);
+testMapMirror(weakMapMirror);
+weakMap.set(new Object(), 55);
+assertTrue(weakMapMirror.entries().length <= 5);
+gc();
+
+function testWeakMapEntries(weakMapMirror) {
+  var entries = weakMapMirror.entries();
+  assertEquals(2, entries.length);
+  var found = 0;
+  for (var i = 0; i < entries.length; i++) {
+    if (Object.is(entries[i].key, o1)) {
+      assertEquals(11, entries[i].value);
+      found++;
+    }
+    if (Object.is(entries[i].key, o3)) {
+      assertEquals(33, entries[i].value);
+      found++;
+    }
+  }
+  assertEquals(2, found);
+}
+
+testWeakMapEntries(weakMapMirror);
+
+// Test the mirror object for WeakSets
+var weakSet = new WeakSet();
+weakSet.add(o1);
+weakSet.add(new Object());
+weakSet.add(o2);
+weakSet.add(new Object());
+weakSet.add(new Object());
+weakSet.add(o3);
+weakSet.delete(o2);
+var weakSetMirror = debug.MakeMirror(weakSet);
+testSetMirror(weakSetMirror);
+assertTrue(weakSetMirror.values().length <= 5);
+gc();
+
+function testWeakSetValues(weakSetMirror) {
+  var values = weakSetMirror.values();
+  assertEquals(2, values.length);
+  var found = 0;
+  for (var i = 0; i < values.length; i++) {
+    if (Object.is(values[i], o1)) {
+      found++;
+    }
+    if (Object.is(values[i], o3)) {
+      found++;
+    }
+  }
+  assertEquals(2, found);
+}
+
+testWeakSetValues(weakSetMirror);
diff --git a/test/mjsunit/es6/mirror-promises.js b/test/mjsunit/es6/mirror-promises.js
index bce26f4..deeba8f 100644
--- a/test/mjsunit/es6/mirror-promises.js
+++ b/test/mjsunit/es6/mirror-promises.js
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-// Flags: --expose-debug-as debug --harmony-promises
+// Flags: --expose-debug-as debug
 // Test the mirror object for promises.
 
 function MirrorRefCache(json_refs) {
diff --git a/test/mjsunit/es6/mirror-symbols.js b/test/mjsunit/es6/mirror-symbols.js
index 0f43491..f218332 100644
--- a/test/mjsunit/es6/mirror-symbols.js
+++ b/test/mjsunit/es6/mirror-symbols.js
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-// Flags: --expose-debug-as debug --harmony-symbols
+// Flags: --expose-debug-as debug
 // Test the mirror object for symbols.
 
 function testSymbolMirror(symbol, description) {
diff --git a/test/mjsunit/es6/promises.js b/test/mjsunit/es6/promises.js
index 6dfe926..faf154e 100644
--- a/test/mjsunit/es6/promises.js
+++ b/test/mjsunit/es6/promises.js
@@ -27,6 +27,42 @@
 
 // Flags: --allow-natives-syntax
 
+// Make sure we don't rely on functions patchable by monkeys.
+var call = Function.prototype.call.call.bind(Function.prototype.call)
+var observe = Object.observe;
+var getOwnPropertyNames = Object.getOwnPropertyNames
+var defineProperty = Object.defineProperty
+
+function clear(o) {
+  if (o === null || (typeof o !== 'object' && typeof o !== 'function')) return
+  clear(o.__proto__)
+  var properties = getOwnPropertyNames(o)
+  for (var i in properties) {
+    clearProp(o, properties[i])
+  }
+}
+
+function clearProp(o, name) {
+  var poisoned = {caller: 0, callee: 0, arguments: 0}
+  try {
+    var x = o[name]
+    o[name] = undefined
+    clear(x)
+  } catch(e) {} // assertTrue(name in poisoned) }
+}
+
+// Find intrinsics and null them out.
+var globals = Object.getOwnPropertyNames(this)
+var whitelist = {Promise: true, TypeError: true}
+for (var i in globals) {
+  var name = globals[i]
+  if (name in whitelist || name[0] === name[0].toLowerCase()) delete globals[i]
+}
+for (var i in globals) {
+  if (globals[i]) clearProp(this, globals[i])
+}
+
+
 var asyncAssertsExpected = 0;
 
 function assertAsyncRan() { ++asyncAssertsExpected }
@@ -43,7 +79,7 @@
 function assertAsyncDone(iteration) {
   var iteration = iteration || 0
   var dummy = {}
-  Object.observe(dummy,
+  observe(dummy,
     function() {
       if (asyncAssertsExpected === 0)
         assertAsync(true, "all")
@@ -777,13 +813,13 @@
   MyPromise.__proto__ = Promise
   MyPromise.defer = function() {
     log += "d"
-    return this.__proto__.defer.call(this)
+    return call(this.__proto__.defer, this)
   }
 
   MyPromise.prototype.__proto__ = Promise.prototype
   MyPromise.prototype.chain = function(resolve, reject) {
     log += "c"
-    return this.__proto__.__proto__.chain.call(this, resolve, reject)
+    return call(this.__proto__.__proto__.chain, this, resolve, reject)
   }
 
   log = ""
diff --git a/test/mjsunit/es6/regress/regress-2186.js b/test/mjsunit/es6/regress/regress-2186.js
new file mode 100644
index 0000000..c82242a
--- /dev/null
+++ b/test/mjsunit/es6/regress/regress-2186.js
@@ -0,0 +1,47 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function heapify(i) {
+  return 2.0 * (i / 2);
+}
+heapify(1);
+
+var ONE = 1;
+var ANOTHER_ONE = heapify(ONE);
+assertSame(ONE, ANOTHER_ONE);
+assertEquals("number", typeof ONE);
+assertEquals("number", typeof ANOTHER_ONE);
+
+var set = new Set;
+set.add(ONE);
+assertTrue(set.has(ONE));
+assertTrue(set.has(ANOTHER_ONE));
+
+var map = new Map;
+map.set(ONE, 23);
+assertSame(23, map.get(ONE));
+assertSame(23, map.get(ANOTHER_ONE));
diff --git a/test/mjsunit/es6/regress/regress-2681.js b/test/mjsunit/es6/regress/regress-2681.js
new file mode 100644
index 0000000..8d8e4ad
--- /dev/null
+++ b/test/mjsunit/es6/regress/regress-2681.js
@@ -0,0 +1,48 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --expose-gc --noincremental-marking
+
+// Check that we are not flushing code for generators.
+
+function flush_all_code() {
+  // Each GC ages code, and currently 6 gcs will flush all code.
+  for (var i = 0; i < 10; i++) gc();
+}
+
+function* g() {
+  yield 1;
+  yield 2;
+}
+
+var o = g();
+assertEquals({ value: 1, done: false }, o.next());
+
+flush_all_code();
+
+assertEquals({ value: 2, done: false }, o.next());
+assertEquals({ value: undefined, done: true }, o.next());
diff --git a/test/mjsunit/es6/regress/regress-2691.js b/test/mjsunit/es6/regress/regress-2691.js
new file mode 100644
index 0000000..d7d0c4f
--- /dev/null
+++ b/test/mjsunit/es6/regress/regress-2691.js
@@ -0,0 +1,32 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Check that yield* on non-objects raises a TypeError.
+
+assertThrows('(function*() { yield* 10 })().next()', TypeError);
+assertThrows('(function*() { yield* {} })().next()', TypeError);
+assertThrows('(function*() { yield* undefined })().next()', TypeError);
diff --git a/test/mjsunit/es6/regress/regress-3280.js b/test/mjsunit/es6/regress/regress-3280.js
new file mode 100644
index 0000000..2dadd02
--- /dev/null
+++ b/test/mjsunit/es6/regress/regress-3280.js
@@ -0,0 +1,25 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug
+
+var Debug = debug.Debug;
+
+var listener_called;
+
+function listener(event, exec_state, event_data, data) {
+  if (event == Debug.DebugEvent.Break) {
+    listener_called = true;
+    exec_state.frame().allScopes();
+  }
+}
+
+Debug.setListener(listener);
+
+function *generator_local_2(a) {
+  debugger;
+}
+generator_local_2(1).next();
+
+assertTrue(listener_called, "listener not called");
diff --git a/test/mjsunit/es6/regress/regress-crbug-248025.js b/test/mjsunit/es6/regress/regress-crbug-248025.js
new file mode 100644
index 0000000..b7982cd
--- /dev/null
+++ b/test/mjsunit/es6/regress/regress-crbug-248025.js
@@ -0,0 +1,38 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Filler long enough to trigger lazy parsing.
+var filler = "//" + new Array(1024).join('x');
+
+// Test that the pre-parser does not crash when the expected contextual
+// keyword as part if a 'for' statement is not and identifier.
+try {
+  eval(filler + "\nfunction f() { for (x : y) { } }");
+  throw "not reached";
+} catch (e) {
+  if (!(e instanceof SyntaxError)) throw e;
+}
diff --git a/test/mjsunit/es6/regress/regress-crbug-346141.js b/test/mjsunit/es6/regress/regress-crbug-346141.js
index 798b770..2b9655e 100644
--- a/test/mjsunit/es6/regress/regress-crbug-346141.js
+++ b/test/mjsunit/es6/regress/regress-crbug-346141.js
@@ -2,8 +2,6 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-// Flags: --harmony-symbols
-
 var s = Symbol()
 var o = {}
 o[s] = 2
diff --git a/test/mjsunit/es6/string-html.js b/test/mjsunit/es6/string-html.js
new file mode 100644
index 0000000..4f3feb5
--- /dev/null
+++ b/test/mjsunit/es6/string-html.js
@@ -0,0 +1,159 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Tests taken from:
+// http://mathias.html5.org/tests/javascript/string/
+
+assertEquals('_'.anchor('b'), '<a name="b">_</a>');
+assertEquals('<'.anchor('<'), '<a name="<"><</a>');
+assertEquals('_'.anchor(0x2A), '<a name="42">_</a>');
+assertEquals('_'.anchor('\x22'), '<a name="&quot;">_</a>');
+assertEquals(String.prototype.anchor.call(0x2A, 0x2A), '<a name="42">42</a>');
+assertThrows(function() {
+  String.prototype.anchor.call(undefined);
+}, TypeError);
+assertThrows(function() {
+  String.prototype.anchor.call(null);
+}, TypeError);
+assertEquals(String.prototype.anchor.length, 1);
+
+assertEquals('_'.big(), '<big>_</big>');
+assertEquals('<'.big(), '<big><</big>');
+assertEquals(String.prototype.big.call(0x2A), '<big>42</big>');
+assertThrows(function() {
+  String.prototype.big.call(undefined);
+}, TypeError);
+assertThrows(function() {
+  String.prototype.big.call(null);
+}, TypeError);
+assertEquals(String.prototype.big.length, 0);
+
+assertEquals('_'.blink(), '<blink>_</blink>');
+assertEquals('<'.blink(), '<blink><</blink>');
+assertEquals(String.prototype.blink.call(0x2A), '<blink>42</blink>');
+assertThrows(function() {
+  String.prototype.blink.call(undefined);
+}, TypeError);
+assertThrows(function() {
+  String.prototype.blink.call(null);
+}, TypeError);
+assertEquals(String.prototype.blink.length, 0);
+
+assertEquals('_'.bold(), '<b>_</b>');
+assertEquals('<'.bold(), '<b><</b>');
+assertEquals(String.prototype.bold.call(0x2A), '<b>42</b>');
+assertThrows(function() {
+  String.prototype.bold.call(undefined);
+}, TypeError);
+assertThrows(function() {
+  String.prototype.bold.call(null);
+}, TypeError);
+assertEquals(String.prototype.bold.length, 0);
+
+assertEquals('_'.fixed(), '<tt>_</tt>');
+assertEquals('<'.fixed(), '<tt><</tt>');
+assertEquals(String.prototype.fixed.call(0x2A), '<tt>42</tt>');
+assertThrows(function() {
+  String.prototype.fixed.call(undefined);
+}, TypeError);
+assertThrows(function() {
+  String.prototype.fixed.call(null);
+}, TypeError);
+assertEquals(String.prototype.fixed.length, 0);
+
+assertEquals('_'.fontcolor('b'), '<font color="b">_</font>');
+assertEquals('<'.fontcolor('<'), '<font color="<"><</font>');
+assertEquals('_'.fontcolor(0x2A), '<font color="42">_</font>');
+assertEquals('_'.fontcolor('\x22'), '<font color="&quot;">_</font>');
+assertEquals(String.prototype.fontcolor.call(0x2A, 0x2A),
+  '<font color="42">42</font>');
+assertThrows(function() {
+  String.prototype.fontcolor.call(undefined);
+}, TypeError);
+assertThrows(function() {
+  String.prototype.fontcolor.call(null);
+}, TypeError);
+assertEquals(String.prototype.fontcolor.length, 1);
+
+assertEquals('_'.fontsize('b'), '<font size="b">_</font>');
+assertEquals('<'.fontsize('<'), '<font size="<"><</font>');
+assertEquals('_'.fontsize(0x2A), '<font size="42">_</font>');
+assertEquals('_'.fontsize('\x22'), '<font size="&quot;">_</font>');
+assertEquals(String.prototype.fontsize.call(0x2A, 0x2A),
+  '<font size="42">42</font>');
+assertThrows(function() {
+  String.prototype.fontsize.call(undefined);
+}, TypeError);
+assertThrows(function() {
+  String.prototype.fontsize.call(null);
+}, TypeError);
+assertEquals(String.prototype.fontsize.length, 1);
+
+assertEquals('_'.italics(), '<i>_</i>');
+assertEquals('<'.italics(), '<i><</i>');
+assertEquals(String.prototype.italics.call(0x2A), '<i>42</i>');
+assertThrows(function() {
+  String.prototype.italics.call(undefined);
+}, TypeError);
+assertThrows(function() {
+  String.prototype.italics.call(null);
+}, TypeError);
+assertEquals(String.prototype.italics.length, 0);
+
+assertEquals('_'.link('b'), '<a href="b">_</a>');
+assertEquals('<'.link('<'), '<a href="<"><</a>');
+assertEquals('_'.link(0x2A), '<a href="42">_</a>');
+assertEquals('_'.link('\x22'), '<a href="&quot;">_</a>');
+assertEquals(String.prototype.link.call(0x2A, 0x2A), '<a href="42">42</a>');
+assertThrows(function() {
+  String.prototype.link.call(undefined);
+}, TypeError);
+assertThrows(function() {
+  String.prototype.link.call(null);
+}, TypeError);
+assertEquals(String.prototype.link.length, 1);
+
+assertEquals('_'.small(), '<small>_</small>');
+assertEquals('<'.small(), '<small><</small>');
+assertEquals(String.prototype.small.call(0x2A), '<small>42</small>');
+assertThrows(function() {
+  String.prototype.small.call(undefined);
+}, TypeError);
+assertThrows(function() {
+  String.prototype.small.call(null);
+}, TypeError);
+assertEquals(String.prototype.small.length, 0);
+
+assertEquals('_'.strike(), '<strike>_</strike>');
+assertEquals('<'.strike(), '<strike><</strike>');
+assertEquals(String.prototype.strike.call(0x2A), '<strike>42</strike>');
+assertThrows(function() {
+  String.prototype.strike.call(undefined);
+}, TypeError);
+assertThrows(function() {
+  String.prototype.strike.call(null);
+}, TypeError);
+assertEquals(String.prototype.strike.length, 0);
+
+assertEquals('_'.sub(), '<sub>_</sub>');
+assertEquals('<'.sub(), '<sub><</sub>');
+assertEquals(String.prototype.sub.call(0x2A), '<sub>42</sub>');
+assertThrows(function() {
+  String.prototype.sub.call(undefined);
+}, TypeError);
+assertThrows(function() {
+  String.prototype.sub.call(null);
+}, TypeError);
+assertEquals(String.prototype.sub.length, 0);
+
+assertEquals('_'.sup(), '<sup>_</sup>');
+assertEquals('<'.sup(), '<sup><</sup>');
+assertEquals(String.prototype.sup.call(0x2A), '<sup>42</sup>');
+assertThrows(function() {
+  String.prototype.sup.call(undefined);
+}, TypeError);
+assertThrows(function() {
+  String.prototype.sup.call(null);
+}, TypeError);
+assertEquals(String.prototype.sup.length, 0);
diff --git a/test/mjsunit/es6/string-iterator.js b/test/mjsunit/es6/string-iterator.js
new file mode 100644
index 0000000..e6bea6d
--- /dev/null
+++ b/test/mjsunit/es6/string-iterator.js
@@ -0,0 +1,89 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+function TestStringPrototypeIterator() {
+  assertTrue(String.prototype.hasOwnProperty(Symbol.iterator));
+  assertFalse("".hasOwnProperty(Symbol.iterator));
+  assertFalse("".propertyIsEnumerable(Symbol.iterator));
+}
+TestStringPrototypeIterator();
+
+
+function assertIteratorResult(value, done, result) {
+  assertEquals({value: value, done: done}, result);
+}
+
+
+function TestManualIteration() {
+  var string = "abc";
+  var iterator = string[Symbol.iterator]();
+  assertIteratorResult('a', false, iterator.next());
+  assertIteratorResult('b', false, iterator.next());
+  assertIteratorResult('c', false, iterator.next());
+  assertIteratorResult(void 0, true, iterator.next());
+  assertIteratorResult(void 0, true, iterator.next());
+}
+TestManualIteration();
+
+
+function TestSurrogatePairs() {
+  var lo = "\uD834";
+  var hi = "\uDF06";
+  var pair = lo + hi;
+  var string = "abc" + pair + "def" + lo + pair + hi + lo;
+  var iterator = string[Symbol.iterator]();
+  assertIteratorResult('a', false, iterator.next());
+  assertIteratorResult('b', false, iterator.next());
+  assertIteratorResult('c', false, iterator.next());
+  assertIteratorResult(pair, false, iterator.next());
+  assertIteratorResult('d', false, iterator.next());
+  assertIteratorResult('e', false, iterator.next());
+  assertIteratorResult('f', false, iterator.next());
+  assertIteratorResult(lo, false, iterator.next());
+  assertIteratorResult(pair, false, iterator.next());
+  assertIteratorResult(hi, false, iterator.next());
+  assertIteratorResult(lo, false, iterator.next());
+  assertIteratorResult(void 0, true, iterator.next());
+  assertIteratorResult(void 0, true, iterator.next());
+}
+TestSurrogatePairs();
+
+
+function TestStringIteratorPrototype() {
+  var iterator = ""[Symbol.iterator]();
+  var StringIteratorPrototype = iterator.__proto__;
+  assertFalse(StringIteratorPrototype.hasOwnProperty('constructor'));
+  assertEquals(StringIteratorPrototype.__proto__, Object.prototype);
+  assertArrayEquals(['next'],
+      Object.getOwnPropertyNames(StringIteratorPrototype));
+  assertEquals('[object String Iterator]', "" + iterator);
+}
+TestStringIteratorPrototype();
+
+
+function TestForOf() {
+  var lo = "\uD834";
+  var hi = "\uDF06";
+  var pair = lo + hi;
+  var string = "abc" + pair + "def" + lo + pair + hi + lo;
+  var expected = ['a', 'b', 'c', pair, 'd', 'e', 'f', lo, pair, hi, lo];
+
+  var i = 0;
+  for (var char of string) {
+    assertEquals(expected[i++], char);
+  }
+
+  assertEquals(expected.length, i);
+}
+TestForOf();
+
+
+function TestNonOwnSlots() {
+  var iterator = ""[Symbol.iterator]();
+  var object = {__proto__: iterator};
+
+  assertThrows(function() { object.next(); }, TypeError);
+}
+TestNonOwnSlots();
diff --git a/test/mjsunit/es6/symbols.js b/test/mjsunit/es6/symbols.js
index 2204392..60737af 100644
--- a/test/mjsunit/es6/symbols.js
+++ b/test/mjsunit/es6/symbols.js
@@ -25,7 +25,6 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --harmony-symbols --harmony-collections
 // Flags: --expose-gc --allow-natives-syntax
 
 var symbols = []
@@ -102,7 +101,9 @@
 
 function TestValueOf() {
   for (var i in symbols) {
+    assertTrue(symbols[i] === Object(symbols[i]).valueOf())
     assertTrue(symbols[i] === symbols[i].valueOf())
+    assertTrue(Symbol.prototype.valueOf.call(Object(symbols[i])) === symbols[i])
     assertTrue(Symbol.prototype.valueOf.call(symbols[i]) === symbols[i])
   }
 }
@@ -111,9 +112,10 @@
 
 function TestToString() {
   for (var i in symbols) {
-    assertThrows(function() { String(symbols[i]) }, TypeError)
+    assertThrows(function() { new String(symbols[i]) }, TypeError)
+    assertEquals(symbols[i].toString(), String(symbols[i]))
     assertThrows(function() { symbols[i] + "" }, TypeError)
-    assertTrue(isValidSymbolString(String(Object(symbols[i]))))
+    assertThrows(function() { String(Object(symbols[i])) }, TypeError)
     assertTrue(isValidSymbolString(symbols[i].toString()))
     assertTrue(isValidSymbolString(Object(symbols[i]).toString()))
     assertTrue(
@@ -127,6 +129,8 @@
 
 function TestToBoolean() {
   for (var i in symbols) {
+    assertTrue(Boolean(Object(symbols[i])))
+    assertFalse(!Object(symbols[i]))
     assertTrue(Boolean(symbols[i]).valueOf())
     assertFalse(!symbols[i])
     assertTrue(!!symbols[i])
@@ -144,8 +148,10 @@
 
 function TestToNumber() {
   for (var i in symbols) {
-    assertSame(NaN, Number(symbols[i]).valueOf())
-    assertSame(NaN, symbols[i] + 0)
+    assertThrows(function() { Number(Object(symbols[i])) }, TypeError)
+    assertThrows(function() { +Object(symbols[i]) }, TypeError)
+    assertThrows(function() { Number(symbols[i]) }, TypeError)
+    assertThrows(function() { symbols[i] + 0 }, TypeError)
   }
 }
 TestToNumber()
@@ -367,6 +373,34 @@
 }
 
 
+function TestDefineProperties() {
+  var properties = {}
+  for (var i in symbols) {
+    Object.defineProperty(
+        properties, symbols[i], {value: {value: i}, enumerable: i % 2 === 0})
+  }
+  var o = Object.defineProperties({}, properties)
+  for (var i in symbols) {
+    assertEquals(i % 2 === 0, symbols[i] in o)
+  }
+}
+TestDefineProperties()
+
+
+function TestCreate() {
+  var properties = {}
+  for (var i in symbols) {
+    Object.defineProperty(
+      properties, symbols[i], {value: {value: i}, enumerable: i % 2 === 0})
+  }
+  var o = Object.create(Object.prototype, properties)
+  for (var i in symbols) {
+    assertEquals(i % 2 === 0, symbols[i] in o)
+  }
+}
+TestCreate()
+
+
 function TestCachedKeyAfterScavenge() {
   gc();
   // Keyed property lookup are cached.  Hereby we assume that the keys are
@@ -412,8 +446,9 @@
 
 function TestWellKnown() {
   var symbols = [
-    "create", "hasInstance", "isConcatSpreadable", "isRegExp",
-    "iterator", "toStringTag", "unscopables"
+    // TODO(rossberg): reactivate once implemented.
+    // "hasInstance", "isConcatSpreadable", "isRegExp",
+    "iterator", /* "toStringTag", */ "unscopables"
   ]
 
   for (var i in symbols) {
diff --git a/test/mjsunit/es6/typed-array-iterator.js b/test/mjsunit/es6/typed-array-iterator.js
new file mode 100644
index 0000000..9903b0a
--- /dev/null
+++ b/test/mjsunit/es6/typed-array-iterator.js
@@ -0,0 +1,39 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+var constructors = [Uint8Array, Int8Array,
+                    Uint16Array, Int16Array,
+                    Uint32Array, Int32Array,
+                    Float32Array, Float64Array,
+                    Uint8ClampedArray];
+
+function TestTypedArrayPrototype(constructor) {
+  assertTrue(constructor.prototype.hasOwnProperty('entries'));
+  assertTrue(constructor.prototype.hasOwnProperty('values'));
+  assertTrue(constructor.prototype.hasOwnProperty('keys'));
+  assertTrue(constructor.prototype.hasOwnProperty(Symbol.iterator));
+
+  assertFalse(constructor.prototype.propertyIsEnumerable('entries'));
+  assertFalse(constructor.prototype.propertyIsEnumerable('values'));
+  assertFalse(constructor.prototype.propertyIsEnumerable('keys'));
+  assertFalse(constructor.prototype.propertyIsEnumerable(Symbol.iterator));
+
+  assertEquals(Array.prototype.entries, constructor.prototype.entries);
+  assertEquals(Array.prototype[Symbol.iterator], constructor.prototype.values);
+  assertEquals(Array.prototype.keys, constructor.prototype.keys);
+  assertEquals(Array.prototype[Symbol.iterator], constructor.prototype[Symbol.iterator]);
+}
+constructors.forEach(TestTypedArrayPrototype);
+
+
+function TestTypedArrayValues(constructor) {
+  var array = [1, 2, 3];
+  var i = 0;
+  for (var value of new constructor(array)) {
+    assertEquals(array[i++], value);
+  }
+  assertEquals(i, array.length);
+}
+constructors.forEach(TestTypedArrayValues);
diff --git a/test/mjsunit/es6/unscopables.js b/test/mjsunit/es6/unscopables.js
new file mode 100644
index 0000000..36365d2
--- /dev/null
+++ b/test/mjsunit/es6/unscopables.js
@@ -0,0 +1,661 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var global = this;
+var globalProto = Object.getPrototypeOf(global);
+
+// Number of objects being tested. There is an assert ensuring this is correct.
+var objectCount = 21;
+
+
+function runTest(f) {
+  function restore(object, oldProto) {
+    delete object[Symbol.unscopables];
+    delete object.x;
+    delete object.x_;
+    delete object.y;
+    delete object.z;
+    Object.setPrototypeOf(object, oldProto);
+  }
+
+  function getObject(i) {
+    var objects = [
+      {},
+      [],
+      function() {},
+      function() {
+        return arguments;
+      }(),
+      function() {
+        'use strict';
+        return arguments;
+      }(),
+      Object(1),
+      Object(true),
+      Object('bla'),
+      new Date,
+      new RegExp,
+      new Set,
+      new Map,
+      new WeakMap,
+      new WeakSet,
+      new ArrayBuffer(10),
+      new Int32Array(5),
+      Object,
+      Function,
+      Date,
+      RegExp,
+      global
+    ];
+
+    assertEquals(objectCount, objects.length);
+    return objects[i];
+  }
+
+  // Tests depends on this not being there to start with.
+  delete Array.prototype[Symbol.unscopables];
+
+  if (f.length === 1) {
+    for (var i = 0; i < objectCount; i++) {
+      var object = getObject(i);
+      var oldObjectProto = Object.getPrototypeOf(object);
+      f(object);
+      restore(object, oldObjectProto);
+    }
+  } else {
+    for (var i = 0; i < objectCount; i++) {
+      for (var j = 0; j < objectCount; j++) {
+        var object = getObject(i);
+        var proto = getObject(j);
+        if (object === proto) {
+          continue;
+        }
+        var oldObjectProto = Object.getPrototypeOf(object);
+        var oldProtoProto = Object.getPrototypeOf(proto);
+        f(object, proto);
+        restore(object, oldObjectProto);
+        restore(proto, oldProtoProto);
+      }
+    }
+  }
+}
+
+// Test array first, since other tests are changing
+// Array.prototype[Symbol.unscopables].
+function TestArrayPrototypeUnscopables() {
+  var descr = Object.getOwnPropertyDescriptor(Array.prototype,
+                                              Symbol.unscopables);
+  assertFalse(descr.enumerable);
+  assertFalse(descr.writable);
+  assertTrue(descr.configurable);
+  assertEquals(null, Object.getPrototypeOf(descr.value));
+
+  var copyWithin = 'local copyWithin';
+  var entries = 'local entries';
+  var fill = 'local fill';
+  var find = 'local find';
+  var findIndex = 'local findIndex';
+  var keys = 'local keys';
+  var values = 'local values';
+
+  var array = [];
+  array.toString = 42;
+
+  with (array) {
+    assertEquals('local copyWithin', copyWithin);
+    assertEquals('local entries', entries);
+    assertEquals('local fill', fill);
+    assertEquals('local find', find);
+    assertEquals('local findIndex', findIndex);
+    assertEquals('local keys', keys);
+    assertEquals('local values', values);
+    assertEquals(42, toString);
+  }
+}
+TestArrayPrototypeUnscopables();
+
+
+
+function TestBasics(object) {
+  var x = 1;
+  var y = 2;
+  var z = 3;
+  object.x = 4;
+  object.y = 5;
+
+  with (object) {
+    assertEquals(4, x);
+    assertEquals(5, y);
+    assertEquals(3, z);
+  }
+
+  object[Symbol.unscopables] = {x: true};
+  with (object) {
+    assertEquals(1, x);
+    assertEquals(5, y);
+    assertEquals(3, z);
+  }
+
+  object[Symbol.unscopables] = {x: 0, y: true};
+  with (object) {
+    assertEquals(1, x);
+    assertEquals(2, y);
+    assertEquals(3, z);
+  }
+}
+runTest(TestBasics);
+
+
+function TestUnscopableChain(object) {
+  var x = 1;
+  object.x = 2;
+
+  with (object) {
+    assertEquals(2, x);
+  }
+
+  object[Symbol.unscopables] = {
+    __proto__: {x: true}
+  };
+  with (object) {
+    assertEquals(1, x);
+  }
+}
+runTest(TestUnscopableChain);
+
+
+function TestBasicsSet(object) {
+  var x = 1;
+  object.x = 2;
+
+  with (object) {
+    assertEquals(2, x);
+  }
+
+  object[Symbol.unscopables] = {x: true};
+  with (object) {
+    assertEquals(1, x);
+    x = 3;
+    assertEquals(3, x);
+  }
+
+  assertEquals(3, x);
+  assertEquals(2, object.x);
+}
+runTest(TestBasicsSet);
+
+
+function TestOnProto(object, proto) {
+  var x = 1;
+  var y = 2;
+  var z = 3;
+  proto.x = 4;
+
+  Object.setPrototypeOf(object, proto);
+  object.y = 5;
+
+  with (object) {
+    assertEquals(4, x);
+    assertEquals(5, y);
+    assertEquals(3, z);
+  }
+
+  proto[Symbol.unscopables] = {x: true};
+  with (object) {
+    assertEquals(1, x);
+    assertEquals(5, y);
+    assertEquals(3, z);
+  }
+
+  object[Symbol.unscopables] = {y: true};
+  with (object) {
+    assertEquals(4, x);
+    assertEquals(2, y);
+    assertEquals(3, z);
+  }
+
+  proto[Symbol.unscopables] = {y: true};
+  object[Symbol.unscopables] = {x: true};
+  with (object) {
+    assertEquals(1, x);
+    assertEquals(5, y);
+    assertEquals(3, z);
+  }
+}
+runTest(TestOnProto);
+
+
+function TestSetBlockedOnProto(object, proto) {
+  var x = 1;
+  object.x = 2;
+
+  with (object) {
+    assertEquals(2, x);
+  }
+
+  Object.setPrototypeOf(object, proto);
+  proto[Symbol.unscopables] = {x: true};
+  with (object) {
+    assertEquals(1, x);
+    x = 3;
+    assertEquals(3, x);
+  }
+
+  assertEquals(3, x);
+  assertEquals(2, object.x);
+}
+runTest(TestSetBlockedOnProto);
+
+
+function TestNonObject(object) {
+  var x = 1;
+  var y = 2;
+  object.x = 3;
+  object.y = 4;
+
+  object[Symbol.unscopables] = 'xy';
+  with (object) {
+    assertEquals(3, x);
+    assertEquals(4, y);
+  }
+
+  object[Symbol.unscopables] = null;
+  with (object) {
+    assertEquals(3, x);
+    assertEquals(4, y);
+  }
+}
+runTest(TestNonObject);
+
+
+function TestChangeDuringWith(object) {
+  var x = 1;
+  var y = 2;
+  object.x = 3;
+  object.y = 4;
+
+  with (object) {
+    assertEquals(3, x);
+    assertEquals(4, y);
+    object[Symbol.unscopables] = {x: true};
+    assertEquals(1, x);
+    assertEquals(4, y);
+  }
+}
+runTest(TestChangeDuringWith);
+
+
+function TestChangeDuringWithWithPossibleOptimization(object) {
+  var x = 1;
+  object.x = 2;
+  with (object) {
+    for (var i = 0; i < 1000; i++) {
+      if (i === 500) object[Symbol.unscopables] = {x: true};
+      assertEquals(i < 500 ? 2: 1, x);
+    }
+  }
+}
+TestChangeDuringWithWithPossibleOptimization({});
+
+
+function TestChangeDuringWithWithPossibleOptimization2(object) {
+  var x = 1;
+  object.x = 2;
+  object[Symbol.unscopables] = {x: true};
+  with (object) {
+    for (var i = 0; i < 1000; i++) {
+      if (i === 500) delete object[Symbol.unscopables];
+      assertEquals(i < 500 ? 1 : 2, x);
+    }
+  }
+}
+TestChangeDuringWithWithPossibleOptimization2({});
+
+
+function TestChangeDuringWithWithPossibleOptimization3(object) {
+  var x = 1;
+  object.x = 2;
+  object[Symbol.unscopables] = {};
+  with (object) {
+    for (var i = 0; i < 1000; i++) {
+      if (i === 500) object[Symbol.unscopables].x = true;
+      assertEquals(i < 500 ? 2 : 1, x);
+    }
+  }
+}
+TestChangeDuringWithWithPossibleOptimization3({});
+
+
+function TestChangeDuringWithWithPossibleOptimization4(object) {
+  var x = 1;
+  object.x = 2;
+  object[Symbol.unscopables] = {x: true};
+  with (object) {
+    for (var i = 0; i < 1000; i++) {
+      if (i === 500) delete object[Symbol.unscopables].x;
+      assertEquals(i < 500 ? 1 : 2, x);
+    }
+  }
+}
+TestChangeDuringWithWithPossibleOptimization4({});
+
+
+function TestAccessorReceiver(object, proto) {
+  var x = 'local';
+
+  Object.defineProperty(proto, 'x', {
+    get: function() {
+      assertEquals(object, this);
+      return this.x_;
+    },
+    configurable: true
+  });
+  proto.x_ = 'proto';
+
+  Object.setPrototypeOf(object, proto);
+  proto.x_ = 'object';
+
+  with (object) {
+    assertEquals('object', x);
+  }
+}
+runTest(TestAccessorReceiver);
+
+
+function TestUnscopablesGetter(object) {
+  // This test gets really messy when object is the global since the assert
+  // functions are properties on the global object and the call count gets
+  // completely different.
+  if (object === global) return;
+
+  var x = 'local';
+  object.x = 'object';
+
+  var callCount = 0;
+  Object.defineProperty(object, Symbol.unscopables, {
+    get: function() {
+      callCount++;
+      return {};
+    },
+    configurable: true
+  });
+  with (object) {
+    assertEquals('object', x);
+  }
+  // Once for HasBinding
+  assertEquals(1, callCount);
+
+  callCount = 0;
+  Object.defineProperty(object, Symbol.unscopables, {
+    get: function() {
+      callCount++;
+      return {x: true};
+    },
+    configurable: true
+  });
+  with (object) {
+    assertEquals('local', x);
+  }
+  // Once for HasBinding
+  assertEquals(1, callCount);
+
+  callCount = 0;
+  Object.defineProperty(object, Symbol.unscopables, {
+    get: function() {
+      callCount++;
+      return callCount == 1 ? {} : {x: true};
+    },
+    configurable: true
+  });
+  with (object) {
+    x = 1;
+  }
+  // Once for HasBinding
+  assertEquals(1, callCount);
+  assertEquals(1, object.x);
+  assertEquals('local', x);
+  with (object) {
+    x = 2;
+  }
+  // One more HasBinding.
+  assertEquals(2, callCount);
+  assertEquals(1, object.x);
+  assertEquals(2, x);
+}
+runTest(TestUnscopablesGetter);
+
+
+var global = this;
+function TestUnscopablesGetter2() {
+  var x = 'local';
+
+  var globalProto = Object.getPrototypeOf(global);
+  var protos = [{}, [], function() {}, global];
+  var objects = [{}, [], function() {}];
+
+  protos.forEach(function(proto) {
+    objects.forEach(function(object) {
+      Object.defineProperty(proto, 'x', {
+        get: function() {
+          assertEquals(object, this);
+          return 'proto';
+        },
+        configurable: true
+      });
+
+      object.__proto__ = proto;
+      Object.defineProperty(object, 'x', {
+        get: function() {
+          assertEquals(object, this);
+          return 'object';
+        },
+        configurable: true
+      });
+
+      with (object) {
+        assertEquals('object', x);
+      }
+
+      object[Symbol.unscopables] = {x: true};
+      with (object) {
+        assertEquals('local', x);
+      }
+
+      delete proto[Symbol.unscopables];
+      delete object[Symbol.unscopables];
+    });
+  });
+
+  delete global.x;
+  Object.setPrototypeOf(global, globalProto);
+}
+TestUnscopablesGetter2();
+
+
+function TestSetterOnBlacklisted(object, proto) {
+  var x = 'local';
+  Object.defineProperty(proto, 'x', {
+    set: function(x) {
+      assertUnreachable();
+    },
+    get: function() {
+      return 'proto';
+    },
+    configurable: true
+  });
+  Object.setPrototypeOf(object, proto);
+  Object.defineProperty(object, 'x', {
+    get: function() {
+      return this.x_;
+    },
+    set: function(x) {
+      this.x_ = x;
+    },
+    configurable: true
+  });
+  object.x_ = 1;
+
+  with (object) {
+    x = 2;
+    assertEquals(2, x);
+  }
+
+  assertEquals(2, object.x);
+
+  object[Symbol.unscopables] = {x: true};
+
+  with (object) {
+    x = 3;
+    assertEquals(3, x);
+  }
+
+  assertEquals(2, object.x);
+}
+runTest(TestSetterOnBlacklisted);
+
+
+function TestObjectsAsUnscopables(object, unscopables) {
+  var x = 1;
+  object.x = 2;
+
+  with (object) {
+    assertEquals(2, x);
+    object[Symbol.unscopables] = unscopables;
+    assertEquals(2, x);
+  }
+}
+runTest(TestObjectsAsUnscopables);
+
+
+function TestAccessorOnUnscopables(object) {
+  var x = 1;
+  object.x = 2;
+
+  var unscopables = {
+    get x() {
+      assertUnreachable();
+    }
+  };
+
+  with (object) {
+    assertEquals(2, x);
+    object[Symbol.unscopables] = unscopables;
+    assertEquals(1, x);
+  }
+}
+runTest(TestAccessorOnUnscopables);
+
+
+function TestLengthUnscopables(object, proto) {
+  var length = 2;
+  with (object) {
+    assertEquals(1, length);
+    object[Symbol.unscopables] = {length: true};
+    assertEquals(2, length);
+    delete object[Symbol.unscopables];
+    assertEquals(1, length);
+  }
+}
+TestLengthUnscopables([1], Array.prototype);
+TestLengthUnscopables(function(x) {}, Function.prototype);
+TestLengthUnscopables(new String('x'), String.prototype);
+
+
+function TestFunctionNameUnscopables(object) {
+  var name = 'local';
+  with (object) {
+    assertEquals('f', name);
+    object[Symbol.unscopables] = {name: true};
+    assertEquals('local', name);
+    delete object[Symbol.unscopables];
+    assertEquals('f', name);
+  }
+}
+TestFunctionNameUnscopables(function f() {});
+
+
+function TestFunctionPrototypeUnscopables() {
+  var prototype = 'local';
+  var f = function() {};
+  var g = function() {};
+  Object.setPrototypeOf(f, g);
+  var fp = f.prototype;
+  var gp = g.prototype;
+  with (f) {
+    assertEquals(fp, prototype);
+    f[Symbol.unscopables] = {prototype: true};
+    assertEquals('local', prototype);
+    delete f[Symbol.unscopables];
+    assertEquals(fp, prototype);
+  }
+}
+TestFunctionPrototypeUnscopables(function() {});
+
+
+function TestFunctionArgumentsUnscopables() {
+  var func = function() {
+    var arguments = 'local';
+    var args = func.arguments;
+    with (func) {
+      assertEquals(args, arguments);
+      func[Symbol.unscopables] = {arguments: true};
+      assertEquals('local', arguments);
+      delete func[Symbol.unscopables];
+      assertEquals(args, arguments);
+    }
+  }
+  func(1);
+}
+TestFunctionArgumentsUnscopables();
+
+
+function TestArgumentsLengthUnscopables() {
+  var func = function() {
+    var length = 'local';
+    with (arguments) {
+      assertEquals(1, length);
+      arguments[Symbol.unscopables] = {length: true};
+      assertEquals('local', length);
+    }
+  }
+  func(1);
+}
+TestArgumentsLengthUnscopables();
+
+
+function TestFunctionCallerUnscopables() {
+  var func = function() {
+    var caller = 'local';
+    with (func) {
+      assertEquals(TestFunctionCallerUnscopables, caller);
+      func[Symbol.unscopables] = {caller: true};
+      assertEquals('local', caller);
+      delete func[Symbol.unscopables];
+      assertEquals(TestFunctionCallerUnscopables, caller);
+    }
+  }
+  func(1);
+}
+TestFunctionCallerUnscopables();
+
+
+function TestGetUnscopablesGetterThrows() {
+  var object = {
+    get x() {
+      assertUnreachable();
+    }
+  };
+  function CustomError() {}
+  Object.defineProperty(object, Symbol.unscopables, {
+    get: function() {
+      throw new CustomError();
+    }
+  });
+  assertThrows(function() {
+    with (object) {
+      x;
+    }
+  }, CustomError);
+}
+TestGetUnscopablesGetterThrows();
diff --git a/test/mjsunit/es6/weak_collections.js b/test/mjsunit/es6/weak_collections.js
deleted file mode 100644
index 74235e7..0000000
--- a/test/mjsunit/es6/weak_collections.js
+++ /dev/null
@@ -1,333 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --expose-gc --allow-natives-syntax
-
-
-// Note: this test is superseded by harmony/collections.js.
-// IF YOU CHANGE THIS FILE, apply the same changes to harmony/collections.js!
-// TODO(rossberg): Remove once non-weak collections have caught up.
-
-// Test valid getter and setter calls on WeakSets.
-function TestValidSetCalls(m) {
-  assertDoesNotThrow(function () { m.add(new Object) });
-  assertDoesNotThrow(function () { m.has(new Object) });
-  assertDoesNotThrow(function () { m.delete(new Object) });
-}
-TestValidSetCalls(new WeakSet);
-
-
-// Test valid getter and setter calls on WeakMaps
-function TestValidMapCalls(m) {
-  assertDoesNotThrow(function () { m.get(new Object) });
-  assertDoesNotThrow(function () { m.set(new Object) });
-  assertDoesNotThrow(function () { m.has(new Object) });
-  assertDoesNotThrow(function () { m.delete(new Object) });
-}
-TestValidMapCalls(new WeakMap);
-
-
-// Test invalid getter and setter calls for WeakMap
-function TestInvalidCalls(m) {
-  assertThrows(function () { m.get(undefined) }, TypeError);
-  assertThrows(function () { m.set(undefined, 0) }, TypeError);
-  assertThrows(function () { m.get(null) }, TypeError);
-  assertThrows(function () { m.set(null, 0) }, TypeError);
-  assertThrows(function () { m.get(0) }, TypeError);
-  assertThrows(function () { m.set(0, 0) }, TypeError);
-  assertThrows(function () { m.get('a-key') }, TypeError);
-  assertThrows(function () { m.set('a-key', 0) }, TypeError);
-}
-TestInvalidCalls(new WeakMap);
-
-
-// Test expected behavior for WeakSets
-function TestSet(set, key) {
-  assertFalse(set.has(key));
-  assertSame(undefined, set.add(key));
-  assertTrue(set.has(key));
-  assertTrue(set.delete(key));
-  assertFalse(set.has(key));
-  assertFalse(set.delete(key));
-  assertFalse(set.has(key));
-}
-function TestSetBehavior(set) {
-  for (var i = 0; i < 20; i++) {
-    TestSet(set, new Object);
-    TestSet(set, i);
-    TestSet(set, i / 100);
-    TestSet(set, 'key-' + i);
-  }
-  var keys = [ +0, -0, +Infinity, -Infinity, true, false, null, undefined ];
-  for (var i = 0; i < keys.length; i++) {
-    TestSet(set, keys[i]);
-  }
-}
-TestSet(new WeakSet, new Object);
-
-
-// Test expected mapping behavior for WeakMaps
-function TestMapping(map, key, value) {
-  assertSame(undefined, map.set(key, value));
-  assertSame(value, map.get(key));
-}
-function TestMapBehavior1(m) {
-  TestMapping(m, new Object, 23);
-  TestMapping(m, new Object, 'the-value');
-  TestMapping(m, new Object, new Object);
-}
-TestMapBehavior1(new WeakMap);
-
-
-// Test expected querying behavior of WeakMaps
-function TestQuery(m) {
-  var key = new Object;
-  var values = [ 'x', 0, +Infinity, -Infinity, true, false, null, undefined ];
-  for (var i = 0; i < values.length; i++) {
-    TestMapping(m, key, values[i]);
-    assertTrue(m.has(key));
-    assertFalse(m.has(new Object));
-  }
-}
-TestQuery(new WeakMap);
-
-
-// Test expected deletion behavior of WeakMaps
-function TestDelete(m) {
-  var key = new Object;
-  TestMapping(m, key, 'to-be-deleted');
-  assertTrue(m.delete(key));
-  assertFalse(m.delete(key));
-  assertFalse(m.delete(new Object));
-  assertSame(m.get(key), undefined);
-}
-TestDelete(new WeakMap);
-
-
-// Test GC of WeakMaps with entry
-function TestGC1(m) {
-  var key = new Object;
-  m.set(key, 'not-collected');
-  gc();
-  assertSame('not-collected', m.get(key));
-}
-TestGC1(new WeakMap);
-
-
-// Test GC of WeakMaps with chained entries
-function TestGC2(m) {
-  var head = new Object;
-  for (key = head, i = 0; i < 10; i++, key = m.get(key)) {
-    m.set(key, new Object);
-  }
-  gc();
-  var count = 0;
-  for (key = head; key != undefined; key = m.get(key)) {
-    count++;
-  }
-  assertEquals(11, count);
-}
-TestGC2(new WeakMap);
-
-
-// Test property attribute [[Enumerable]]
-function TestEnumerable(func) {
-  function props(x) {
-    var array = [];
-    for (var p in x) array.push(p);
-    return array.sort();
-  }
-  assertArrayEquals([], props(func));
-  assertArrayEquals([], props(func.prototype));
-  assertArrayEquals([], props(new func()));
-}
-TestEnumerable(WeakMap);
-TestEnumerable(WeakSet);
-
-
-// Test arbitrary properties on WeakMaps
-function TestArbitrary(m) {
-  function TestProperty(map, property, value) {
-    map[property] = value;
-    assertEquals(value, map[property]);
-  }
-  for (var i = 0; i < 20; i++) {
-    TestProperty(m, i, 'val' + i);
-    TestProperty(m, 'foo' + i, 'bar' + i);
-  }
-  TestMapping(m, new Object, 'foobar');
-}
-TestArbitrary(new WeakMap);
-
-
-// Test direct constructor call
-assertThrows(function() { WeakMap(); }, TypeError);
-assertThrows(function() { WeakSet(); }, TypeError);
-
-
-// Test some common JavaScript idioms for WeakMaps
-var m = new WeakMap;
-assertTrue(m instanceof WeakMap);
-assertTrue(WeakMap.prototype.set instanceof Function)
-assertTrue(WeakMap.prototype.get instanceof Function)
-assertTrue(WeakMap.prototype.has instanceof Function)
-assertTrue(WeakMap.prototype.delete instanceof Function)
-assertTrue(WeakMap.prototype.clear instanceof Function)
-
-
-// Test some common JavaScript idioms for WeakSets
-var s = new WeakSet;
-assertTrue(s instanceof WeakSet);
-assertTrue(WeakSet.prototype.add instanceof Function)
-assertTrue(WeakSet.prototype.has instanceof Function)
-assertTrue(WeakSet.prototype.delete instanceof Function)
-assertTrue(WeakSet.prototype.clear instanceof Function)
-
-
-// Test class of instance and prototype.
-assertEquals("WeakMap", %_ClassOf(new WeakMap))
-assertEquals("Object", %_ClassOf(WeakMap.prototype))
-assertEquals("WeakSet", %_ClassOf(new WeakSet))
-assertEquals("Object", %_ClassOf(WeakMap.prototype))
-
-
-// Test name of constructor.
-assertEquals("WeakMap", WeakMap.name);
-assertEquals("WeakSet", WeakSet.name);
-
-
-// Test prototype property of WeakMap and WeakSet.
-function TestPrototype(C) {
-  assertTrue(C.prototype instanceof Object);
-  assertEquals({
-    value: {},
-    writable: false,
-    enumerable: false,
-    configurable: false
-  }, Object.getOwnPropertyDescriptor(C, "prototype"));
-}
-TestPrototype(WeakMap);
-TestPrototype(WeakSet);
-
-
-// Test constructor property of the WeakMap and WeakSet prototype.
-function TestConstructor(C) {
-  assertFalse(C === Object.prototype.constructor);
-  assertSame(C, C.prototype.constructor);
-  assertSame(C, (new C).__proto__.constructor);
-}
-TestConstructor(WeakMap);
-TestConstructor(WeakSet);
-
-
-// Test the WeakMap and WeakSet global properties themselves.
-function TestDescriptor(global, C) {
-  assertEquals({
-    value: C,
-    writable: true,
-    enumerable: false,
-    configurable: true
-  }, Object.getOwnPropertyDescriptor(global, C.name));
-}
-TestDescriptor(this, WeakMap);
-TestDescriptor(this, WeakSet);
-
-
-// Regression test for WeakMap prototype.
-assertTrue(WeakMap.prototype.constructor === WeakMap)
-assertTrue(Object.getPrototypeOf(WeakMap.prototype) === Object.prototype)
-
-
-// Regression test for issue 1617: The prototype of the WeakMap constructor
-// needs to be unique (i.e. different from the one of the Object constructor).
-assertFalse(WeakMap.prototype === Object.prototype);
-var o = Object.create({});
-assertFalse("get" in o);
-assertFalse("set" in o);
-assertEquals(undefined, o.get);
-assertEquals(undefined, o.set);
-var o = Object.create({}, { myValue: {
-  value: 10,
-  enumerable: false,
-  configurable: true,
-  writable: true
-}});
-assertEquals(10, o.myValue);
-
-
-// Regression test for issue 1884: Invoking any of the methods for Harmony
-// maps, sets, or weak maps, with a wrong type of receiver should be throwing
-// a proper TypeError.
-var alwaysBogus = [ undefined, null, true, "x", 23, {} ];
-var bogusReceiversTestSet = [
-  { proto: WeakMap.prototype,
-    funcs: [ 'get', 'set', 'has', 'delete' ],
-    receivers: alwaysBogus.concat([ new WeakSet ]),
-  },
-  { proto: WeakSet.prototype,
-    funcs: [ 'add', 'has', 'delete' ],
-    receivers: alwaysBogus.concat([ new WeakMap ]),
-  },
-];
-function TestBogusReceivers(testSet) {
-  for (var i = 0; i < testSet.length; i++) {
-    var proto = testSet[i].proto;
-    var funcs = testSet[i].funcs;
-    var receivers = testSet[i].receivers;
-    for (var j = 0; j < funcs.length; j++) {
-      var func = proto[funcs[j]];
-      for (var k = 0; k < receivers.length; k++) {
-        assertThrows(function () { func.call(receivers[k], {}) }, TypeError);
-      }
-    }
-  }
-}
-TestBogusReceivers(bogusReceiversTestSet);
-
-
-// Test WeakMap clear
-(function() {
-  var k = new Object();
-  var w = new WeakMap();
-  w.set(k, 23);
-  assertTrue(w.has(k));
-  assertEquals(23, w.get(k));
-  w.clear();
-  assertFalse(w.has(k));
-  assertEquals(undefined, w.get(k));
-})();
-
-
-// Test WeakSet clear
-(function() {
-  var k = new Object();
-  var w = new WeakSet();
-  w.add(k);
-  assertTrue(w.has(k));
-  w.clear();
-  assertFalse(w.has(k));
-})();
diff --git a/test/mjsunit/es7/object-observe-debug-event.js b/test/mjsunit/es7/object-observe-debug-event.js
new file mode 100644
index 0000000..ed62764
--- /dev/null
+++ b/test/mjsunit/es7/object-observe-debug-event.js
@@ -0,0 +1,51 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug
+
+Debug = debug.Debug;
+
+var base_id = -1;
+var exception = null;
+var expected = [
+  "enqueue #1",
+  "willHandle #1",
+  "didHandle #1",
+];
+
+function assertLog(msg) {
+  print(msg);
+  assertTrue(expected.length > 0);
+  assertEquals(expected.shift(), msg);
+  if (!expected.length) {
+    Debug.setListener(null);
+  }
+}
+
+function listener(event, exec_state, event_data, data) {
+  if (event != Debug.DebugEvent.AsyncTaskEvent) return;
+  try {
+    if (base_id < 0)
+      base_id = event_data.id();
+    var id = event_data.id() - base_id + 1;
+    assertEquals("Object.observe", event_data.name());
+    assertLog(event_data.type() + " #" + id);
+  } catch (e) {
+    print(e + e.stack)
+    exception = e;
+  }
+}
+
+Debug.setListener(listener);
+
+var obj = {};
+Object.observe(obj, function(changes) {
+  print(change.type + " " + change.name + " " + change.oldValue);
+});
+
+obj.foo = 1;
+obj.zoo = 2;
+obj.foo = 3;
+
+assertNull(exception);
diff --git a/test/mjsunit/es7/object-observe.js b/test/mjsunit/es7/object-observe.js
index 7bb579f..5af205e 100644
--- a/test/mjsunit/es7/object-observe.js
+++ b/test/mjsunit/es7/object-observe.js
@@ -25,8 +25,8 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --harmony-proxies --harmony-collections
-// Flags: --harmony-symbols --allow-natives-syntax
+// Flags: --harmony-proxies
+// Flags: --allow-natives-syntax
 
 var allObservers = [];
 function reset() {
@@ -1234,8 +1234,9 @@
 
 // Updating length on large (slow) array
 reset();
-var slow_arr = new Array(1000000000);
+var slow_arr = %NormalizeElements([]);
 slow_arr[500000000] = 'hello';
+slow_arr.length = 1000000000;
 Object.observe(slow_arr, observer.callback);
 var spliceRecords;
 function slowSpliceCallback(records) {
@@ -1685,8 +1686,10 @@
 Object.observe(obj, observer.callback);
 obj.prototype = 7;
 Object.deliverChangeRecords(observer.callback);
-observer.assertNotCalled();
-
+observer.assertRecordCount(1);
+observer.assertCallbackRecords([
+  { object: obj, name: 'prototype', type: 'add' },
+]);
 
 // Check that changes in observation status are detected in all IC states and
 // in optimized code, especially in cases usually using fast elements.
diff --git a/test/mjsunit/fast-prototype.js b/test/mjsunit/fast-prototype.js
index cdcc1a9..9864761 100644
--- a/test/mjsunit/fast-prototype.js
+++ b/test/mjsunit/fast-prototype.js
@@ -50,6 +50,8 @@
     (new Sub()).__proto__ = proto;
   } else {
     Sub.prototype = proto;
+    // Need to instantiate Sub to mark .prototype as prototype.
+    new Sub();
   }
 }
 
@@ -72,10 +74,15 @@
     // Still fast
     assertTrue(%HasFastProperties(proto));
     AddProps(proto);
-    // After we add all those properties it went slow mode again :-(
-    assertFalse(%HasFastProperties(proto));
+    if (set__proto__) {
+      // After we add all those properties it went slow mode again :-(
+      assertFalse(%HasFastProperties(proto));
+    } else {
+      // .prototype keeps it fast.
+      assertTrue(%HasFastProperties(proto));
+    }
   }
-  if (same_map_as && !add_first) {
+  if (same_map_as && !add_first && set__proto__) {
     assertTrue(%HaveSameMap(same_map_as, proto));
   }
   return proto;
diff --git a/test/mjsunit/global-const-var-conflicts.js b/test/mjsunit/global-const-var-conflicts.js
index 2fca96f..3b87e3d 100644
--- a/test/mjsunit/global-const-var-conflicts.js
+++ b/test/mjsunit/global-const-var-conflicts.js
@@ -41,17 +41,20 @@
 assertEquals(0, b);
 try { eval("var b = 1"); } catch (e) { caught++; assertTrue(e instanceof TypeError); }
 assertEquals(0, b);
+assertEquals(0, caught);
 
 eval("var c");
 try { eval("const c"); } catch (e) { caught++; assertTrue(e instanceof TypeError); }
 assertTrue(typeof c == 'undefined');
+assertEquals(1, caught);
 try { eval("const c = 1"); } catch (e) { caught++; assertTrue(e instanceof TypeError); }
-assertEquals(1, c);
+assertEquals(undefined, c);
+assertEquals(2, caught);
 
 eval("var d = 0");
 try { eval("const d"); } catch (e) { caught++; assertTrue(e instanceof TypeError); }
-assertEquals(undefined, d);
+assertEquals(0, d);
+assertEquals(3, caught);
 try { eval("const d = 1"); } catch (e) { caught++; assertTrue(e instanceof TypeError); }
-assertEquals(1, d);
-
-assertEquals(0, caught);
+assertEquals(0, d);
+assertEquals(4, caught);
diff --git a/test/mjsunit/harmony/array-iterator.js b/test/mjsunit/harmony/array-iterator.js
deleted file mode 100644
index 2642d7b..0000000
--- a/test/mjsunit/harmony/array-iterator.js
+++ /dev/null
@@ -1,228 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --harmony-iteration --allow-natives-syntax
-
-
-function TestArrayPrototype() {
-  assertTrue(Array.prototype.hasOwnProperty('entries'));
-  assertTrue(Array.prototype.hasOwnProperty('values'));
-  assertTrue(Array.prototype.hasOwnProperty('keys'));
-
-  assertFalse(Array.prototype.propertyIsEnumerable('entries'));
-  assertFalse(Array.prototype.propertyIsEnumerable('values'));
-  assertFalse(Array.prototype.propertyIsEnumerable('keys'));
-}
-TestArrayPrototype();
-
-
-function assertIteratorResult(value, done, result) {
-  assertEquals({value: value, done: done}, result);
-}
-
-
-function TestValues() {
-  var array = ['a', 'b', 'c'];
-  var iterator = array.values();
-  assertIteratorResult('a', false, iterator.next());
-  assertIteratorResult('b', false, iterator.next());
-  assertIteratorResult('c', false, iterator.next());
-  assertIteratorResult(void 0, true, iterator.next());
-
-  array.push('d');
-  assertIteratorResult(void 0, true, iterator.next());
-}
-TestValues();
-
-
-function TestValuesMutate() {
-  var array = ['a', 'b', 'c'];
-  var iterator = array.values();
-  assertIteratorResult('a', false, iterator.next());
-  assertIteratorResult('b', false, iterator.next());
-  assertIteratorResult('c', false, iterator.next());
-  array.push('d');
-  assertIteratorResult('d', false, iterator.next());
-  assertIteratorResult(void 0, true, iterator.next());
-}
-TestValuesMutate();
-
-
-function TestKeys() {
-  var array = ['a', 'b', 'c'];
-  var iterator = array.keys();
-  assertIteratorResult(0, false, iterator.next());
-  assertIteratorResult(1, false, iterator.next());
-  assertIteratorResult(2, false, iterator.next());
-  assertIteratorResult(void 0, true, iterator.next());
-
-  array.push('d');
-  assertIteratorResult(void 0, true, iterator.next());
-}
-TestKeys();
-
-
-function TestKeysMutate() {
-  var array = ['a', 'b', 'c'];
-  var iterator = array.keys();
-  assertIteratorResult(0, false, iterator.next());
-  assertIteratorResult(1, false, iterator.next());
-  assertIteratorResult(2, false, iterator.next());
-  array.push('d');
-  assertIteratorResult(3, false, iterator.next());
-  assertIteratorResult(void 0, true, iterator.next());
-}
-TestKeysMutate();
-
-
-function TestEntries() {
-  var array = ['a', 'b', 'c'];
-  var iterator = array.entries();
-  assertIteratorResult([0, 'a'], false, iterator.next());
-  assertIteratorResult([1, 'b'], false, iterator.next());
-  assertIteratorResult([2, 'c'], false, iterator.next());
-  assertIteratorResult(void 0, true, iterator.next());
-
-  array.push('d');
-  assertIteratorResult(void 0, true, iterator.next());
-}
-TestEntries();
-
-
-function TestEntriesMutate() {
-  var array = ['a', 'b', 'c'];
-  var iterator = array.entries();
-  assertIteratorResult([0, 'a'], false, iterator.next());
-  assertIteratorResult([1, 'b'], false, iterator.next());
-  assertIteratorResult([2, 'c'], false, iterator.next());
-  array.push('d');
-  assertIteratorResult([3, 'd'], false, iterator.next());
-  assertIteratorResult(void 0, true, iterator.next());
-}
-TestEntriesMutate();
-
-
-function TestArrayIteratorPrototype() {
-  var ArrayIteratorPrototype = [].values().__proto__;
-  assertFalse(ArrayIteratorPrototype.hasOwnProperty('constructor'));
-  assertEquals(ArrayIteratorPrototype.__proto__, Object.prototype);
-  assertArrayEquals(['next'],
-      Object.getOwnPropertyNames(ArrayIteratorPrototype));
-}
-TestArrayIteratorPrototype();
-
-
-function TestArrayIteratorPrototype() {
-  var array = [];
-  var iterator = array.values();
-
-  var ArrayIteratorPrototype = iterator.__proto__;
-
-  assertEquals(ArrayIteratorPrototype, array.values().__proto__);
-  assertEquals(ArrayIteratorPrototype, array.keys().__proto__);
-  assertEquals(ArrayIteratorPrototype, array.entries().__proto__);
-
-  assertEquals(Object.prototype, ArrayIteratorPrototype.__proto__);
-
-  assertEquals('Array Iterator', %_ClassOf(array.values()));
-  assertEquals('Array Iterator', %_ClassOf(array.keys()));
-  assertEquals('Array Iterator', %_ClassOf(array.entries()));
-
-  assertFalse(ArrayIteratorPrototype.hasOwnProperty('constructor'));
-  assertArrayEquals(['next'],
-      Object.getOwnPropertyNames(ArrayIteratorPrototype));
-}
-TestArrayIteratorPrototype();
-
-
-function TestForArrayValues() {
-  var buffer = [];
-  var array = [0, 'a', true, false, null, /* hole */, undefined, NaN];
-  var i = 0;
-  for (var value of array.values()) {
-    buffer[i++] = value;
-  }
-
-  assertEquals(8, buffer.length);
-
-  for (var i = 0; i < buffer.length - 1; i++) {
-    assertEquals(array[i], buffer[i]);
-  }
-  assertTrue(isNaN(buffer[buffer.length - 1]));
-}
-TestForArrayValues();
-
-
-function TestForArrayKeys() {
-  var buffer = [];
-  var array = [0, 'a', true, false, null, /* hole */, undefined, NaN];
-  var i = 0;
-  for (var key of array.keys()) {
-    buffer[i++] = key;
-  }
-
-  assertEquals(8, buffer.length);
-
-  for (var i = 0; i < buffer.length; i++) {
-    assertEquals(i, buffer[i]);
-  }
-}
-TestForArrayKeys();
-
-
-function TestForArrayEntries() {
-  var buffer = [];
-  var array = [0, 'a', true, false, null, /* hole */, undefined, NaN];
-  var i = 0;
-  for (var entry of array.entries()) {
-    buffer[i++] = entry;
-  }
-
-  assertEquals(8, buffer.length);
-
-  for (var i = 0; i < buffer.length - 1; i++) {
-    assertEquals(array[i], buffer[i][1]);
-  }
-  assertTrue(isNaN(buffer[buffer.length - 1][1]));
-
-  for (var i = 0; i < buffer.length; i++) {
-    assertEquals(i, buffer[i][0]);
-  }
-}
-TestForArrayEntries();
-
-
-function TestNonOwnSlots() {
-  var array = [0];
-  var iterator = array.values();
-  var object = {__proto__: iterator};
-
-  assertThrows(function() {
-    object.next();
-  }, TypeError);
-}
-TestNonOwnSlots();
diff --git a/test/mjsunit/harmony/array-of.js b/test/mjsunit/harmony/array-of.js
new file mode 100644
index 0000000..c0a8ed1
--- /dev/null
+++ b/test/mjsunit/harmony/array-of.js
@@ -0,0 +1,164 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Based on Mozilla Array.of() tests at http://dxr.mozilla.org/mozilla-central/source/js/src/jit-test/tests/collections
+
+// Flags: --harmony-arrays
+
+
+
+// Array.of makes real arrays.
+
+function check(a) {
+    assertEquals(Object.getPrototypeOf(a), Array.prototype);
+    assertEquals(Array.isArray(a), true);
+    a[9] = 9;
+    assertEquals(a.length, 10);
+}
+
+
+check(Array.of());
+check(Array.of(0));
+check(Array.of(0, 1, 2));
+var f = Array.of;
+check(f());
+
+
+// Array.of basics
+
+var a = Array.of();
+
+assertEquals(a.length, 0);
+a = Array.of(undefined, null, 3.14, []);
+assertEquals(a, [undefined, null, 3.14, []]);
+a = [];
+for (var i = 0; i < 1000; i++)
+    a[i] = i;
+assertEquals(Array.of.apply(null, a), a);
+
+
+// Array.of does not leave holes
+
+assertEquals(Array.of(undefined), [undefined]);
+assertEquals(Array.of(undefined, undefined), [undefined, undefined]);
+assertEquals(Array.of.apply(null, [,,undefined]), [undefined, undefined, undefined]);
+assertEquals(Array.of.apply(null, Array(4)), [undefined, undefined, undefined, undefined]);
+
+
+// Array.of can be transplanted to other classes.
+
+var hits = 0;
+function Bag() {
+    hits++;
+}
+Bag.of = Array.of;
+
+hits = 0;
+var actual = Bag.of("zero", "one");
+assertEquals(hits, 1);
+
+hits = 0;
+var expected = new Bag;
+expected[0] = "zero";
+expected[1] = "one";
+expected.length = 2;
+assertEquals(areSame(actual, expected), true);
+
+hits = 0;
+actual = Array.of.call(Bag, "zero", "one");
+assertEquals(hits, 1);
+assertEquals(areSame(actual, expected), true);
+
+function areSame(object, array) {
+    var result = object.length == array.length;
+    for (var i = 0; i < object.length; i++) {
+        result = result && object[i] == array[i];
+    }
+    return result;
+}
+
+
+// Array.of does not trigger prototype setters.
+// (It defines elements rather than assigning to them.)
+
+var status = "pass";
+Object.defineProperty(Array.prototype, "0", {set: function(v) {status = "FAIL 1"}});
+assertEquals(Array.of(1)[0], 1);
+assertEquals(status, "pass");
+
+Object.defineProperty(Bag.prototype, "0", {set: function(v) {status = "FAIL 2"}});
+assertEquals(Bag.of(1)[0], 1);
+assertEquals(status, "pass");
+
+
+// Array.of passes the number of arguments to the constructor it calls.
+
+var hits = 0;
+
+function Herd(n) {
+    assertEquals(arguments.length, 1);
+    assertEquals(n, 5);
+    hits++;
+}
+
+Herd.of = Array.of;
+Herd.of("sheep", "cattle", "elephants", "whales", "seals");
+assertEquals(hits, 1);
+
+
+// Array.of calls a "length" setter if one is present.
+
+var hits = 0;
+var lastObj = null, lastVal = undefined;
+function setter(v) {
+    hits++;
+    lastObj = this;
+    lastVal = v;
+}
+
+// when the setter is on the new object
+function Pack() {
+    Object.defineProperty(this, "length", {set: setter});
+}
+Pack.of = Array.of;
+var pack = Pack.of("wolves", "cards", "cigarettes", "lies");
+assertEquals(lastObj, pack);
+assertEquals(lastVal, 4);
+
+// when the setter is on the new object's prototype
+function Bevy() {}
+Object.defineProperty(Bevy.prototype, "length", {set: setter});
+Bevy.of = Array.of;
+var bevy = Bevy.of("quail");
+assertEquals(lastObj, bevy);
+assertEquals(lastVal, 1);
+
+
+// Array.of does a strict assignment to the new object's .length.
+// The assignment is strict even if the code we're calling from is not strict.
+
+function Empty() {}
+Empty.of = Array.of;
+Object.defineProperty(Empty.prototype, "length", {get: function() { return 0; }});
+
+var nothing = new Empty;
+nothing.length = 2;  // no exception; this is not a strict mode assignment
+
+assertThrows(function() { Empty.of(); }, TypeError);
+
+
+// Check superficial features of Array.of.
+
+var desc = Object.getOwnPropertyDescriptor(Array, "of");
+
+assertEquals(desc.configurable, true);
+assertEquals(desc.enumerable, false);
+assertEquals(desc.writable, true);
+assertEquals(Array.of.length, 0);
+assertThrows(function() { new Array.of() }, TypeError);  // not a constructor
+
+// When the this-value passed in is not a constructor, the result is an array.
+[undefined, null, false, "cow"].forEach(function(val) {
+    assertEquals(Array.isArray(Array.of(val)), true);
+});
diff --git a/test/mjsunit/harmony/arrow-functions.js b/test/mjsunit/harmony/arrow-functions.js
new file mode 100644
index 0000000..0ffa936
--- /dev/null
+++ b/test/mjsunit/harmony/arrow-functions.js
@@ -0,0 +1,49 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-arrow-functions
+
+// Arrow functions are like functions, except they throw when using the
+// "new" operator on them.
+assertEquals("function", typeof (() => {}));
+assertEquals(Function.prototype, Object.getPrototypeOf(() => {}));
+assertThrows(function() { new (() => {}); }, TypeError);
+assertFalse("prototype" in (() => {}));
+
+// Check the different syntax variations
+assertEquals(1, (() => 1)());
+assertEquals(2, (a => a + 1)(1));
+assertEquals(3, (() => { return 3; })());
+assertEquals(4, (a => { return a + 3; })(1));
+assertEquals(5, ((a, b) => a + b)(1, 4));
+assertEquals(6, ((a, b) => { return a + b; })(1, 5));
+
+// The following are tests from:
+// http://wiki.ecmascript.org/doku.php?id=harmony:arrow_function_syntax
+
+// Empty arrow function returns undefined
+var empty = () => {};
+assertEquals(undefined, empty());
+
+// Single parameter case needs no parentheses around parameter list
+var identity = x => x;
+assertEquals(empty, identity(empty));
+
+// No need for parentheses even for lower-precedence expression body
+var square = x => x * x;
+assertEquals(9, square(3));
+
+// Parenthesize the body to return an object literal expression
+var key_maker = val => ({key: val});
+assertEquals(empty, key_maker(empty).key);
+
+// Statement body needs braces, must use 'return' explicitly if not void
+var evens = [0, 2, 4, 6, 8];
+assertEquals([1, 3, 5, 7, 9], evens.map(v => v + 1));
+
+var fives = [];
+[1, 2, 3, 4, 5, 6, 7, 8, 9, 10].forEach(v => {
+  if (v % 5 === 0) fives.push(v);
+});
+assertEquals([5, 10], fives);
diff --git a/test/mjsunit/harmony/block-conflicts.js b/test/mjsunit/harmony/block-conflicts.js
index 76ae11b..1eedb68 100644
--- a/test/mjsunit/harmony/block-conflicts.js
+++ b/test/mjsunit/harmony/block-conflicts.js
@@ -39,9 +39,18 @@
 }
 
 
+function TestGlobal(s,e) {
+  try {
+    return eval(s + e);
+  } catch (x) {
+    return CheckException(x);
+  }
+}
+
+
 function TestFunction(s,e) {
   try {
-    return eval("(function(){" + s + ";return " + e + "})")();
+    return eval("(function(){" + s + " return " + e + "})")();
   } catch (x) {
     return CheckException(x);
   }
@@ -50,7 +59,7 @@
 
 function TestBlock(s,e) {
   try {
-    return eval("(function(){ if (true) { " + s + "; }; return " + e + "})")();
+    return eval("(function(){ {" + s + "} return " + e + "})")();
   } catch (x) {
     return CheckException(x);
   }
@@ -59,76 +68,123 @@
 function TestAll(expected,s,opt_e) {
   var e = "";
   var msg = s;
-  if (opt_e) { e = opt_e; msg += "; " + opt_e; }
-  assertEquals(expected, TestFunction(s,e), "function:'" + msg + "'");
-  assertEquals(expected, TestBlock(s,e), "block:'" + msg + "'");
+  if (opt_e) { e = opt_e; msg += opt_e; }
+  assertEquals(expected === 'LocalConflict' ? 'NoConflict' : expected,
+      TestGlobal(s,e), "global:'" + msg + "'");
+  assertEquals(expected === 'LocalConflict' ? 'NoConflict' : expected,
+      TestFunction(s,e), "function:'" + msg + "'");
+  assertEquals(expected === 'LocalConflict' ? 'Conflict' : expected,
+      TestBlock(s,e), "block:'" + msg + "'");
 }
 
 
 function TestConflict(s) {
   TestAll('Conflict', s);
-  TestAll('Conflict', 'eval("' + s + '")');
+  TestAll('Conflict', 'eval("' + s + '");');
 }
 
-
 function TestNoConflict(s) {
   TestAll('NoConflict', s, "'NoConflict'");
-  TestAll('NoConflict', 'eval("' + s + '")', "'NoConflict'");
+  TestAll('NoConflict', 'eval("' + s + '");', "'NoConflict'");
 }
 
-var letbinds = [ "let x",
-                 "let x = 0",
-                 "let x = undefined",
-                 "function x() { }",
-                 "let x = function() {}",
-                 "let x, y",
-                 "let y, x",
-                 "const x = 0",
-                 "const x = undefined",
-                 "const x = function() {}",
-                 "const x = 2, y = 3",
-                 "const y = 4, x = 5",
-                 ];
-var varbinds = [ "var x",
-                 "var x = 0",
-                 "var x = undefined",
-                 "var x = function() {}",
-                 "var x, y",
-                 "var y, x",
-                 ];
+function TestLocalConflict(s) {
+  TestAll('LocalConflict', s, "'NoConflict'");
+  TestAll('NoConflict', 'eval("' + s + '");', "'NoConflict'");
+}
 
+var letbinds = [ "let x;",
+                 "let x = 0;",
+                 "let x = undefined;",
+                 "let x = function() {};",
+                 "let x, y;",
+                 "let y, x;",
+                 "const x = 0;",
+                 "const x = undefined;",
+                 "const x = function() {};",
+                 "const x = 2, y = 3;",
+                 "const y = 4, x = 5;",
+                 ];
+var varbinds = [ "var x;",
+                 "var x = 0;",
+                 "var x = undefined;",
+                 "var x = function() {};",
+                 "var x, y;",
+                 "var y, x;",
+                 ];
+var funbind = "function x() {}";
 
 for (var l = 0; l < letbinds.length; ++l) {
   // Test conflicting let/var bindings.
   for (var v = 0; v < varbinds.length; ++v) {
     // Same level.
-    TestConflict(letbinds[l] +'; ' + varbinds[v]);
-    TestConflict(varbinds[v] +'; ' + letbinds[l]);
+    TestConflict(letbinds[l] + varbinds[v]);
+    TestConflict(varbinds[v] + letbinds[l]);
     // Different level.
-    TestConflict(letbinds[l] +'; {' + varbinds[v] + '; }');
-    TestConflict('{ ' + varbinds[v] +'; }' + letbinds[l]);
+    TestConflict(letbinds[l] + '{' + varbinds[v] + '}');
+    TestConflict('{' + varbinds[v] +'}' + letbinds[l]);
+    TestNoConflict(varbinds[v] + '{' + letbinds[l] + '}');
+    TestNoConflict('{' + letbinds[l] + '}' + varbinds[v]);
+    // For loop.
+    TestConflict('for (' + letbinds[l] + '0;) {' + varbinds[v] + '}');
+    TestNoConflict('for (' + varbinds[v] + '0;) {' + letbinds[l] + '}');
   }
 
   // Test conflicting let/let bindings.
   for (var k = 0; k < letbinds.length; ++k) {
     // Same level.
-    TestConflict(letbinds[l] +'; ' + letbinds[k]);
-    TestConflict(letbinds[k] +'; ' + letbinds[l]);
+    TestConflict(letbinds[l] + letbinds[k]);
+    TestConflict(letbinds[k] + letbinds[l]);
     // Different level.
-    TestNoConflict(letbinds[l] +'; { ' + letbinds[k] + '; }');
-    TestNoConflict('{ ' + letbinds[k] +'; } ' + letbinds[l]);
+    TestNoConflict(letbinds[l] + '{ ' + letbinds[k] + '}');
+    TestNoConflict('{' + letbinds[k] +'} ' + letbinds[l]);
+    // For loop.
+    TestNoConflict('for (' + letbinds[l] + '0;) {' + letbinds[k] + '}');
+    TestNoConflict('for (' + letbinds[k] + '0;) {' + letbinds[l] + '}');
   }
 
+  // Test conflicting function/let bindings.
+  // Same level.
+  TestConflict(letbinds[l] + funbind);
+  TestConflict(funbind + letbinds[l]);
+  // Different level.
+  TestNoConflict(letbinds[l] + '{' + funbind + '}');
+  TestNoConflict('{' + funbind + '}' + letbinds[l]);
+  TestNoConflict(funbind + '{' + letbinds[l] + '}');
+  TestNoConflict('{' + letbinds[l] + '}' + funbind);
+  // For loop.
+  TestNoConflict('for (' + letbinds[l] + '0;) {' + funbind + '}');
+
   // Test conflicting parameter/let bindings.
-  TestConflict('(function (x) { ' + letbinds[l] + '; })()');
+  TestConflict('(function(x) {' + letbinds[l] + '})();');
+}
+
+// Test conflicting function/var bindings.
+for (var v = 0; v < varbinds.length; ++v) {
+  // Same level.
+  TestLocalConflict(varbinds[v] + funbind);
+  TestLocalConflict(funbind + varbinds[v]);
+  // Different level.
+  TestLocalConflict(funbind + '{' + varbinds[v] + '}');
+  TestLocalConflict('{' + varbinds[v] +'}' + funbind);
+  TestNoConflict(varbinds[v] + '{' + funbind + '}');
+  TestNoConflict('{' + funbind + '}' + varbinds[v]);
+  // For loop.
+  TestNoConflict('for (' + varbinds[v] + '0;) {' + funbind + '}');
 }
 
 // Test conflicting catch/var bindings.
 for (var v = 0; v < varbinds.length; ++v) {
-  TestConflict('try {} catch (x) { ' + varbinds[v] + '; }');
+  TestConflict('try {} catch(x) {' + varbinds[v] + '}');
 }
 
 // Test conflicting parameter/var bindings.
 for (var v = 0; v < varbinds.length; ++v) {
-  TestNoConflict('(function (x) { ' + varbinds[v] + '; })()');
+  TestNoConflict('(function (x) {' + varbinds[v] + '})();');
 }
+
+// Test conflicting catch/function bindings.
+TestNoConflict('try {} catch(x) {' + funbind + '}');
+
+// Test conflicting parameter/function bindings.
+TestNoConflict('(function (x) {' + funbind + '})();');
diff --git a/test/mjsunit/harmony/block-early-errors.js b/test/mjsunit/harmony/block-early-errors.js
index 791f001..8ed5ea8 100644
--- a/test/mjsunit/harmony/block-early-errors.js
+++ b/test/mjsunit/harmony/block-early-errors.js
@@ -30,7 +30,6 @@
 function CheckException(e) {
   var string = e.toString();
   assertInstanceof(e, SyntaxError);
-  assertTrue(string.indexOf("Illegal let") >= 0);
 }
 
 function Check(str) {
@@ -49,7 +48,7 @@
 }
 
 // Check for early syntax errors when using let
-// declarations outside of extended mode.
+// declarations outside of strict mode.
 Check("let x;");
 Check("let x = 1;");
 Check("let x, y;");
diff --git a/test/mjsunit/harmony/block-let-declaration.js b/test/mjsunit/harmony/block-let-declaration.js
index afbc670..44a0049 100644
--- a/test/mjsunit/harmony/block-let-declaration.js
+++ b/test/mjsunit/harmony/block-let-declaration.js
@@ -56,11 +56,11 @@
 // an exception in eval code during parsing, before even compiling or executing
 // the code. Thus the generated function is not called here.
 function TestLocalThrows(str, expect) {
-  assertThrows("(function(){ 'use strict'; " + str + "})", expect);
+  assertThrows("(function(arg){ 'use strict'; " + str + "})", expect);
 }
 
 function TestLocalDoesNotThrow(str) {
-  assertDoesNotThrow("(function(){ 'use strict'; " + str + "})()");
+  assertDoesNotThrow("(function(arg){ 'use strict'; " + str + "})()");
 }
 
 // Test let declarations in statement positions.
@@ -108,6 +108,28 @@
 TestLocalDoesNotThrow("switch (true) { case true: var x; }");
 TestLocalDoesNotThrow("switch (true) { default: var x; }");
 
+// Test that redeclarations of functions are only allowed in outermost scope.
+TestLocalThrows("{ let f; var f; }");
+TestLocalThrows("{ var f; let f; }");
+TestLocalThrows("{ function f() {} let f; }");
+TestLocalThrows("{ let f; function f() {} }");
+TestLocalThrows("{ function f() {} var f; }");
+TestLocalThrows("{ var f; function f() {} }");
+TestLocalThrows("{ function f() {} function f() {} }");
+TestLocalThrows("function f() {} let f;");
+TestLocalThrows("let f; function f() {}");
+TestLocalDoesNotThrow("function arg() {}");
+TestLocalDoesNotThrow("function f() {} var f;");
+TestLocalDoesNotThrow("var f; function f() {}");
+TestLocalDoesNotThrow("function f() {} function f() {}");
+
+function g(f) {
+  function f() { return 1 }
+  return f()
+}
+assertEquals(1, g(function() { return 2 }))
+
+
 // Test function declarations in source element and
 // sloppy statement positions.
 function f() {
diff --git a/test/mjsunit/harmony/collection-iterator.js b/test/mjsunit/harmony/collection-iterator.js
deleted file mode 100644
index a3a950a..0000000
--- a/test/mjsunit/harmony/collection-iterator.js
+++ /dev/null
@@ -1,195 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --harmony-collections --allow-natives-syntax
-
-
-(function TestSetIterator() {
-  var s = new Set;
-  var iter = s.values();
-  assertEquals('Set Iterator', %_ClassOf(iter));
-
-  var SetIteratorPrototype = iter.__proto__;
-  assertFalse(SetIteratorPrototype.hasOwnProperty('constructor'));
-  assertEquals(SetIteratorPrototype.__proto__, Object.prototype);
-
-  var propertyNames = Object.getOwnPropertyNames(SetIteratorPrototype);
-  assertArrayEquals(['next'], propertyNames);
-
-  assertEquals(new Set().values().__proto__, SetIteratorPrototype);
-  assertEquals(new Set().entries().__proto__, SetIteratorPrototype);
-})();
-
-
-(function TestSetIteratorValues() {
-  var s = new Set;
-  s.add(1);
-  s.add(2);
-  s.add(3);
-  var iter = s.values();
-
-  assertEquals({value: 1, done: false}, iter.next());
-  assertEquals({value: 2, done: false}, iter.next());
-  assertEquals({value: 3, done: false}, iter.next());
-  assertEquals({value: undefined, done: true}, iter.next());
-  assertEquals({value: undefined, done: true}, iter.next());
-})();
-
-
-(function TestSetIteratorEntries() {
-  var s = new Set;
-  s.add(1);
-  s.add(2);
-  s.add(3);
-  var iter = s.entries();
-
-  assertEquals({value: [1, 1], done: false}, iter.next());
-  assertEquals({value: [2, 2], done: false}, iter.next());
-  assertEquals({value: [3, 3], done: false}, iter.next());
-  assertEquals({value: undefined, done: true}, iter.next());
-  assertEquals({value: undefined, done: true}, iter.next());
-})();
-
-
-(function TestSetIteratorMutations() {
-  var s = new Set;
-  s.add(1);
-  var iter = s.values();
-  assertEquals({value: 1, done: false}, iter.next());
-  s.add(2);
-  s.add(3);
-  s.add(4);
-  s.add(5);
-  assertEquals({value: 2, done: false}, iter.next());
-  s.delete(3);
-  assertEquals({value: 4, done: false}, iter.next());
-  s.delete(5);
-  assertEquals({value: undefined, done: true}, iter.next());
-  s.add(4);
-  assertEquals({value: undefined, done: true}, iter.next());
-})();
-
-
-(function TestSetInvalidReceiver() {
-  assertThrows(function() {
-    Set.prototype.values.call({});
-  }, TypeError);
-  assertThrows(function() {
-    Set.prototype.entries.call({});
-  }, TypeError);
-})();
-
-
-(function TestSetIteratorInvalidReceiver() {
-  var iter = new Set().values();
-  assertThrows(function() {
-    iter.next.call({});
-  });
-})();
-
-
-(function TestSetIteratorSymbol() {
-  assertEquals(Set.prototype[Symbol.iterator], Set.prototype.values);
-  assertTrue(Set.prototype.hasOwnProperty(Symbol.iterator));
-  assertFalse(Set.prototype.propertyIsEnumerable(Symbol.iterator));
-
-  var iter = new Set().values();
-  assertEquals(iter, iter[Symbol.iterator]());
-  assertEquals(iter[Symbol.iterator].name, '[Symbol.iterator]');
-})();
-
-
-(function TestMapIterator() {
-  var m = new Map;
-  var iter = m.values();
-  assertEquals('Map Iterator', %_ClassOf(iter));
-
-  var MapIteratorPrototype = iter.__proto__;
-  assertFalse(MapIteratorPrototype.hasOwnProperty('constructor'));
-  assertEquals(MapIteratorPrototype.__proto__, Object.prototype);
-
-  var propertyNames = Object.getOwnPropertyNames(MapIteratorPrototype);
-  assertArrayEquals(['next'], propertyNames);
-
-  assertEquals(new Map().values().__proto__, MapIteratorPrototype);
-  assertEquals(new Map().keys().__proto__, MapIteratorPrototype);
-  assertEquals(new Map().entries().__proto__, MapIteratorPrototype);
-})();
-
-
-(function TestMapIteratorValues() {
-  var m = new Map;
-  m.set(1, 11);
-  m.set(2, 22);
-  m.set(3, 33);
-  var iter = m.values();
-
-  assertEquals({value: 11, done: false}, iter.next());
-  assertEquals({value: 22, done: false}, iter.next());
-  assertEquals({value: 33, done: false}, iter.next());
-  assertEquals({value: undefined, done: true}, iter.next());
-  assertEquals({value: undefined, done: true}, iter.next());
-})();
-
-
-(function TestMapIteratorKeys() {
-  var m = new Map;
-  m.set(1, 11);
-  m.set(2, 22);
-  m.set(3, 33);
-  var iter = m.keys();
-
-  assertEquals({value: 1, done: false}, iter.next());
-  assertEquals({value: 2, done: false}, iter.next());
-  assertEquals({value: 3, done: false}, iter.next());
-  assertEquals({value: undefined, done: true}, iter.next());
-  assertEquals({value: undefined, done: true}, iter.next());
-})();
-
-
-(function TestMapIteratorEntries() {
-  var m = new Map;
-  m.set(1, 11);
-  m.set(2, 22);
-  m.set(3, 33);
-  var iter = m.entries();
-
-  assertEquals({value: [1, 11], done: false}, iter.next());
-  assertEquals({value: [2, 22], done: false}, iter.next());
-  assertEquals({value: [3, 33], done: false}, iter.next());
-  assertEquals({value: undefined, done: true}, iter.next());
-  assertEquals({value: undefined, done: true}, iter.next());
-})();
-
-
-(function TestMapInvalidReceiver() {
-  assertThrows(function() {
-    Map.prototype.values.call({});
-  }, TypeError);
-  assertThrows(function() {
-    Map.prototype.keys.call({});
-  }, TypeError);
-  assertThrows(function() {
-    Map.prototype.entries.call({});
-  }, TypeError);
-})();
-
-
-(function TestMapIteratorInvalidReceiver() {
-  var iter = new Map().values();
-  assertThrows(function() {
-    iter.next.call({});
-  }, TypeError);
-})();
-
-
-(function TestMapIteratorSymbol() {
-  assertEquals(Map.prototype[Symbol.iterator], Map.prototype.entries);
-  assertTrue(Map.prototype.hasOwnProperty(Symbol.iterator));
-  assertFalse(Map.prototype.propertyIsEnumerable(Symbol.iterator));
-
-  var iter = new Map().values();
-  assertEquals(iter, iter[Symbol.iterator]());
-  assertEquals(iter[Symbol.iterator].name, '[Symbol.iterator]');
-})();
diff --git a/test/mjsunit/harmony/collections.js b/test/mjsunit/harmony/collections.js
deleted file mode 100644
index edbdd41..0000000
--- a/test/mjsunit/harmony/collections.js
+++ /dev/null
@@ -1,989 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --harmony-collections
-// Flags: --expose-gc --allow-natives-syntax
-
-
-// Test valid getter and setter calls on Sets and WeakSets
-function TestValidSetCalls(m) {
-  assertDoesNotThrow(function () { m.add(new Object) });
-  assertDoesNotThrow(function () { m.has(new Object) });
-  assertDoesNotThrow(function () { m.delete(new Object) });
-}
-TestValidSetCalls(new Set);
-TestValidSetCalls(new WeakSet);
-
-
-// Test valid getter and setter calls on Maps and WeakMaps
-function TestValidMapCalls(m) {
-  assertDoesNotThrow(function () { m.get(new Object) });
-  assertDoesNotThrow(function () { m.set(new Object) });
-  assertDoesNotThrow(function () { m.has(new Object) });
-  assertDoesNotThrow(function () { m.delete(new Object) });
-}
-TestValidMapCalls(new Map);
-TestValidMapCalls(new WeakMap);
-
-
-// Test invalid getter and setter calls for WeakMap only
-function TestInvalidCalls(m) {
-  assertThrows(function () { m.get(undefined) }, TypeError);
-  assertThrows(function () { m.set(undefined, 0) }, TypeError);
-  assertThrows(function () { m.get(null) }, TypeError);
-  assertThrows(function () { m.set(null, 0) }, TypeError);
-  assertThrows(function () { m.get(0) }, TypeError);
-  assertThrows(function () { m.set(0, 0) }, TypeError);
-  assertThrows(function () { m.get('a-key') }, TypeError);
-  assertThrows(function () { m.set('a-key', 0) }, TypeError);
-}
-TestInvalidCalls(new WeakMap);
-
-
-// Test expected behavior for Sets and WeakSets
-function TestSet(set, key) {
-  assertFalse(set.has(key));
-  assertSame(undefined, set.add(key));
-  assertTrue(set.has(key));
-  assertTrue(set.delete(key));
-  assertFalse(set.has(key));
-  assertFalse(set.delete(key));
-  assertFalse(set.has(key));
-}
-function TestSetBehavior(set) {
-  for (var i = 0; i < 20; i++) {
-    TestSet(set, new Object);
-    TestSet(set, i);
-    TestSet(set, i / 100);
-    TestSet(set, 'key-' + i);
-  }
-  var keys = [ +0, -0, +Infinity, -Infinity, true, false, null, undefined ];
-  for (var i = 0; i < keys.length; i++) {
-    TestSet(set, keys[i]);
-  }
-}
-TestSetBehavior(new Set);
-TestSet(new WeakSet, new Object);
-
-
-// Test expected mapping behavior for Maps and WeakMaps
-function TestMapping(map, key, value) {
-  assertSame(undefined, map.set(key, value));
-  assertSame(value, map.get(key));
-}
-function TestMapBehavior1(m) {
-  TestMapping(m, new Object, 23);
-  TestMapping(m, new Object, 'the-value');
-  TestMapping(m, new Object, new Object);
-}
-TestMapBehavior1(new Map);
-TestMapBehavior1(new WeakMap);
-
-
-// Test expected mapping behavior for Maps only
-function TestMapBehavior2(m) {
-  for (var i = 0; i < 20; i++) {
-    TestMapping(m, i, new Object);
-    TestMapping(m, i / 10, new Object);
-    TestMapping(m, 'key-' + i, new Object);
-  }
-  var keys = [ +0, -0, +Infinity, -Infinity, true, false, null, undefined ];
-  for (var i = 0; i < keys.length; i++) {
-    TestMapping(m, keys[i], new Object);
-  }
-}
-TestMapBehavior2(new Map);
-
-
-// Test expected querying behavior of Maps and WeakMaps
-function TestQuery(m) {
-  var key = new Object;
-  var values = [ 'x', 0, +Infinity, -Infinity, true, false, null, undefined ];
-  for (var i = 0; i < values.length; i++) {
-    TestMapping(m, key, values[i]);
-    assertTrue(m.has(key));
-    assertFalse(m.has(new Object));
-  }
-}
-TestQuery(new Map);
-TestQuery(new WeakMap);
-
-
-// Test expected deletion behavior of Maps and WeakMaps
-function TestDelete(m) {
-  var key = new Object;
-  TestMapping(m, key, 'to-be-deleted');
-  assertTrue(m.delete(key));
-  assertFalse(m.delete(key));
-  assertFalse(m.delete(new Object));
-  assertSame(m.get(key), undefined);
-}
-TestDelete(new Map);
-TestDelete(new WeakMap);
-
-
-// Test GC of Maps and WeakMaps with entry
-function TestGC1(m) {
-  var key = new Object;
-  m.set(key, 'not-collected');
-  gc();
-  assertSame('not-collected', m.get(key));
-}
-TestGC1(new Map);
-TestGC1(new WeakMap);
-
-
-// Test GC of Maps and WeakMaps with chained entries
-function TestGC2(m) {
-  var head = new Object;
-  for (key = head, i = 0; i < 10; i++, key = m.get(key)) {
-    m.set(key, new Object);
-  }
-  gc();
-  var count = 0;
-  for (key = head; key != undefined; key = m.get(key)) {
-    count++;
-  }
-  assertEquals(11, count);
-}
-TestGC2(new Map);
-TestGC2(new WeakMap);
-
-
-// Test property attribute [[Enumerable]]
-function TestEnumerable(func) {
-  function props(x) {
-    var array = [];
-    for (var p in x) array.push(p);
-    return array.sort();
-  }
-  assertArrayEquals([], props(func));
-  assertArrayEquals([], props(func.prototype));
-  assertArrayEquals([], props(new func()));
-}
-TestEnumerable(Set);
-TestEnumerable(Map);
-TestEnumerable(WeakMap);
-TestEnumerable(WeakSet);
-
-
-// Test arbitrary properties on Maps and WeakMaps
-function TestArbitrary(m) {
-  function TestProperty(map, property, value) {
-    map[property] = value;
-    assertEquals(value, map[property]);
-  }
-  for (var i = 0; i < 20; i++) {
-    TestProperty(m, i, 'val' + i);
-    TestProperty(m, 'foo' + i, 'bar' + i);
-  }
-  TestMapping(m, new Object, 'foobar');
-}
-TestArbitrary(new Map);
-TestArbitrary(new WeakMap);
-
-
-// Test direct constructor call
-assertThrows(function() { Set(); }, TypeError);
-assertThrows(function() { Map(); }, TypeError);
-assertThrows(function() { WeakMap(); }, TypeError);
-assertThrows(function() { WeakSet(); }, TypeError);
-
-
-// Test whether NaN values as keys are treated correctly.
-var s = new Set;
-assertFalse(s.has(NaN));
-assertFalse(s.has(NaN + 1));
-assertFalse(s.has(23));
-s.add(NaN);
-assertTrue(s.has(NaN));
-assertTrue(s.has(NaN + 1));
-assertFalse(s.has(23));
-var m = new Map;
-assertFalse(m.has(NaN));
-assertFalse(m.has(NaN + 1));
-assertFalse(m.has(23));
-m.set(NaN, 'a-value');
-assertTrue(m.has(NaN));
-assertTrue(m.has(NaN + 1));
-assertFalse(m.has(23));
-
-
-// Test some common JavaScript idioms for Sets
-var s = new Set;
-assertTrue(s instanceof Set);
-assertTrue(Set.prototype.add instanceof Function)
-assertTrue(Set.prototype.has instanceof Function)
-assertTrue(Set.prototype.delete instanceof Function)
-assertTrue(Set.prototype.clear instanceof Function)
-
-
-// Test some common JavaScript idioms for Maps
-var m = new Map;
-assertTrue(m instanceof Map);
-assertTrue(Map.prototype.set instanceof Function)
-assertTrue(Map.prototype.get instanceof Function)
-assertTrue(Map.prototype.has instanceof Function)
-assertTrue(Map.prototype.delete instanceof Function)
-assertTrue(Map.prototype.clear instanceof Function)
-
-
-// Test some common JavaScript idioms for WeakMaps
-var m = new WeakMap;
-assertTrue(m instanceof WeakMap);
-assertTrue(WeakMap.prototype.set instanceof Function)
-assertTrue(WeakMap.prototype.get instanceof Function)
-assertTrue(WeakMap.prototype.has instanceof Function)
-assertTrue(WeakMap.prototype.delete instanceof Function)
-assertTrue(WeakMap.prototype.clear instanceof Function)
-
-
-// Test some common JavaScript idioms for WeakSets
-var s = new WeakSet;
-assertTrue(s instanceof WeakSet);
-assertTrue(WeakSet.prototype.add instanceof Function)
-assertTrue(WeakSet.prototype.has instanceof Function)
-assertTrue(WeakSet.prototype.delete instanceof Function)
-assertTrue(WeakSet.prototype.clear instanceof Function)
-
-
-// Test class of instance and prototype.
-assertEquals("Set", %_ClassOf(new Set))
-assertEquals("Object", %_ClassOf(Set.prototype))
-assertEquals("Map", %_ClassOf(new Map))
-assertEquals("Object", %_ClassOf(Map.prototype))
-assertEquals("WeakMap", %_ClassOf(new WeakMap))
-assertEquals("Object", %_ClassOf(WeakMap.prototype))
-assertEquals("WeakSet", %_ClassOf(new WeakSet))
-assertEquals("Object", %_ClassOf(WeakMap.prototype))
-
-
-// Test name of constructor.
-assertEquals("Set", Set.name);
-assertEquals("Map", Map.name);
-assertEquals("WeakMap", WeakMap.name);
-assertEquals("WeakSet", WeakSet.name);
-
-
-// Test prototype property of Set, Map, WeakMap and WeakSet.
-// TODO(2793): Should all be non-writable, and the extra flag removed.
-function TestPrototype(C, writable) {
-  assertTrue(C.prototype instanceof Object);
-  assertEquals({
-    value: {},
-    writable: writable,
-    enumerable: false,
-    configurable: false
-  }, Object.getOwnPropertyDescriptor(C, "prototype"));
-}
-TestPrototype(Set, true);
-TestPrototype(Map, true);
-TestPrototype(WeakMap, false);
-TestPrototype(WeakSet, false);
-
-
-// Test constructor property of the Set, Map, WeakMap and WeakSet prototype.
-function TestConstructor(C) {
-  assertFalse(C === Object.prototype.constructor);
-  assertSame(C, C.prototype.constructor);
-  assertSame(C, (new C).__proto__.constructor);
-}
-TestConstructor(Set);
-TestConstructor(Map);
-TestConstructor(WeakMap);
-TestConstructor(WeakSet);
-
-
-// Test the Set, Map, WeakMap and WeakSet global properties themselves.
-function TestDescriptor(global, C) {
-  assertEquals({
-    value: C,
-    writable: true,
-    enumerable: false,
-    configurable: true
-  }, Object.getOwnPropertyDescriptor(global, C.name));
-}
-TestDescriptor(this, Set);
-TestDescriptor(this, Map);
-TestDescriptor(this, WeakMap);
-TestDescriptor(this, WeakSet);
-
-
-// Regression test for WeakMap prototype.
-assertTrue(WeakMap.prototype.constructor === WeakMap)
-assertTrue(Object.getPrototypeOf(WeakMap.prototype) === Object.prototype)
-
-
-// Regression test for issue 1617: The prototype of the WeakMap constructor
-// needs to be unique (i.e. different from the one of the Object constructor).
-assertFalse(WeakMap.prototype === Object.prototype);
-var o = Object.create({});
-assertFalse("get" in o);
-assertFalse("set" in o);
-assertEquals(undefined, o.get);
-assertEquals(undefined, o.set);
-var o = Object.create({}, { myValue: {
-  value: 10,
-  enumerable: false,
-  configurable: true,
-  writable: true
-}});
-assertEquals(10, o.myValue);
-
-
-// Regression test for issue 1884: Invoking any of the methods for Harmony
-// maps, sets, or weak maps, with a wrong type of receiver should be throwing
-// a proper TypeError.
-var alwaysBogus = [ undefined, null, true, "x", 23, {} ];
-var bogusReceiversTestSet = [
-  { proto: Set.prototype,
-    funcs: [ 'add', 'has', 'delete' ],
-    receivers: alwaysBogus.concat([ new Map, new WeakMap, new WeakSet ]),
-  },
-  { proto: Map.prototype,
-    funcs: [ 'get', 'set', 'has', 'delete' ],
-    receivers: alwaysBogus.concat([ new Set, new WeakMap, new WeakSet ]),
-  },
-  { proto: WeakMap.prototype,
-    funcs: [ 'get', 'set', 'has', 'delete' ],
-    receivers: alwaysBogus.concat([ new Set, new Map, new WeakSet ]),
-  },
-  { proto: WeakSet.prototype,
-    funcs: [ 'add', 'has', 'delete' ],
-    receivers: alwaysBogus.concat([ new Set, new Map, new WeakMap ]),
-  },
-];
-function TestBogusReceivers(testSet) {
-  for (var i = 0; i < testSet.length; i++) {
-    var proto = testSet[i].proto;
-    var funcs = testSet[i].funcs;
-    var receivers = testSet[i].receivers;
-    for (var j = 0; j < funcs.length; j++) {
-      var func = proto[funcs[j]];
-      for (var k = 0; k < receivers.length; k++) {
-        assertThrows(function () { func.call(receivers[k], {}) }, TypeError);
-      }
-    }
-  }
-}
-TestBogusReceivers(bogusReceiversTestSet);
-
-
-// Stress Test
-// There is a proposed stress-test available at the es-discuss mailing list
-// which cannot be reasonably automated.  Check it out by hand if you like:
-// https://mail.mozilla.org/pipermail/es-discuss/2011-May/014096.html
-
-
-// Set and Map size getters
-var setSizeDescriptor = Object.getOwnPropertyDescriptor(Set.prototype, 'size');
-assertEquals(undefined, setSizeDescriptor.value);
-assertEquals(undefined, setSizeDescriptor.set);
-assertTrue(setSizeDescriptor.get instanceof Function);
-assertEquals(undefined, setSizeDescriptor.get.prototype);
-assertFalse(setSizeDescriptor.enumerable);
-assertTrue(setSizeDescriptor.configurable);
-
-var s = new Set();
-assertFalse(s.hasOwnProperty('size'));
-for (var i = 0; i < 10; i++) {
-  assertEquals(i, s.size);
-  s.add(i);
-}
-for (var i = 9; i >= 0; i--) {
-  s.delete(i);
-  assertEquals(i, s.size);
-}
-
-
-var mapSizeDescriptor = Object.getOwnPropertyDescriptor(Map.prototype, 'size');
-assertEquals(undefined, mapSizeDescriptor.value);
-assertEquals(undefined, mapSizeDescriptor.set);
-assertTrue(mapSizeDescriptor.get instanceof Function);
-assertEquals(undefined, mapSizeDescriptor.get.prototype);
-assertFalse(mapSizeDescriptor.enumerable);
-assertTrue(mapSizeDescriptor.configurable);
-
-var m = new Map();
-assertFalse(m.hasOwnProperty('size'));
-for (var i = 0; i < 10; i++) {
-  assertEquals(i, m.size);
-  m.set(i, i);
-}
-for (var i = 9; i >= 0; i--) {
-  m.delete(i);
-  assertEquals(i, m.size);
-}
-
-
-// Test Set clear
-(function() {
-  var s = new Set();
-  s.add(42);
-  assertTrue(s.has(42));
-  assertEquals(1, s.size);
-  s.clear();
-  assertFalse(s.has(42));
-  assertEquals(0, s.size);
-})();
-
-
-// Test Map clear
-(function() {
-  var m = new Map();
-  m.set(42, true);
-  assertTrue(m.has(42));
-  assertEquals(1, m.size);
-  m.clear();
-  assertFalse(m.has(42));
-  assertEquals(0, m.size);
-})();
-
-
-// Test WeakMap clear
-(function() {
-  var k = new Object();
-  var w = new WeakMap();
-  w.set(k, 23);
-  assertTrue(w.has(k));
-  assertEquals(23, w.get(k));
-  w.clear();
-  assertFalse(w.has(k));
-  assertEquals(undefined, w.get(k));
-})();
-
-
-// Test WeakSet clear
-(function() {
-  var k = new Object();
-  var w = new WeakSet();
-  w.add(k);
-  assertTrue(w.has(k));
-  w.clear();
-  assertFalse(w.has(k));
-})();
-
-
-(function TestMinusZeroSet() {
-  var m = new Set();
-  m.add(0);
-  m.add(-0);
-  assertEquals(1, m.size);
-  assertTrue(m.has(0));
-  assertTrue(m.has(-0));
-})();
-
-
-(function TestMinusZeroMap() {
-  var m = new Map();
-  m.set(0, 'plus');
-  m.set(-0, 'minus');
-  assertEquals(1, m.size);
-  assertTrue(m.has(0));
-  assertTrue(m.has(-0));
-  assertEquals('minus', m.get(0));
-  assertEquals('minus', m.get(-0));
-})();
-
-
-(function TestSetForEachInvalidTypes() {
-  assertThrows(function() {
-    Set.prototype.set.forEach.call({});
-  }, TypeError);
-
-  var set = new Set();
-  assertThrows(function() {
-    set.forEach({});
-  }, TypeError);
-})();
-
-
-(function TestSetForEach() {
-  var set = new Set();
-  set.add('a');
-  set.add('b');
-  set.add('c');
-
-  var buffer = '';
-  var receiver = {};
-  set.forEach(function(v, k, s) {
-    assertSame(v, k);
-    assertSame(set, s);
-    assertSame(this, receiver);
-    buffer += v;
-    if (v === 'a') {
-      set.delete('b');
-      set.add('d');
-      set.add('e');
-      set.add('f');
-    } else if (v === 'c') {
-      set.add('b');
-      set.delete('e');
-    }
-  }, receiver);
-
-  assertEquals('acdfb', buffer);
-})();
-
-
-(function TestSetForEachAddAtEnd() {
-  var set = new Set();
-  set.add('a');
-  set.add('b');
-
-  var buffer = '';
-  set.forEach(function(v) {
-    buffer += v;
-    if (v === 'b') {
-      set.add('c');
-    }
-  });
-
-  assertEquals('abc', buffer);
-})();
-
-
-(function TestSetForEachDeleteNext() {
-  var set = new Set();
-  set.add('a');
-  set.add('b');
-  set.add('c');
-
-  var buffer = '';
-  set.forEach(function(v) {
-    buffer += v;
-    if (v === 'b') {
-      set.delete('c');
-    }
-  });
-
-  assertEquals('ab', buffer);
-})();
-
-
-(function TestSetForEachDeleteVisitedAndAddAgain() {
-  var set = new Set();
-  set.add('a');
-  set.add('b');
-  set.add('c');
-
-  var buffer = '';
-  set.forEach(function(v) {
-    buffer += v;
-    if (v === 'b') {
-      set.delete('a');
-    } else if (v === 'c') {
-      set.add('a');
-    }
-  });
-
-  assertEquals('abca', buffer);
-})();
-
-
-(function TestSetForEachClear() {
-  var set = new Set();
-  set.add('a');
-  set.add('b');
-  set.add('c');
-
-  var buffer = '';
-  set.forEach(function(v) {
-    buffer += v;
-    if (v === 'a') {
-      set.clear();
-      set.add('d');
-      set.add('e');
-    }
-  });
-
-  assertEquals('ade', buffer);
-})();
-
-
-(function TestSetForEachNested() {
-  var set = new Set();
-  set.add('a');
-  set.add('b');
-  set.add('c');
-
-  var buffer = '';
-  set.forEach(function(v) {
-    buffer += v;
-    set.forEach(function(v) {
-      buffer += v;
-      if (v === 'a') {
-        set.delete('b');
-      }
-    });
-  });
-
-  assertEquals('aaccac', buffer);
-})();
-
-
-(function TestSetForEachEarlyExit() {
-  var set = new Set();
-  set.add('a');
-  set.add('b');
-  set.add('c');
-
-  var buffer = '';
-  var ex = {};
-  try {
-    set.forEach(function(v) {
-      buffer += v;
-      throw ex;
-    });
-  } catch (e) {
-    assertEquals(ex, e);
-  }
-  assertEquals('a', buffer);
-})();
-
-
-(function TestSetForEachGC() {
-  var set = new Set();
-  for (var i = 0; i < 100; i++) {
-    set.add(i);
-  }
-
-  var accumulated = 0;
-  set.forEach(function(v) {
-    accumulated += v;
-    if (v % 10 === 0) {
-      gc();
-    }
-  });
-  assertEquals(4950, accumulated);
-})();
-
-(function TestMapForEachInvalidTypes() {
-  assertThrows(function() {
-    Map.prototype.map.forEach.call({});
-  }, TypeError);
-
-  var map = new Map();
-  assertThrows(function() {
-    map.forEach({});
-  }, TypeError);
-})();
-
-
-(function TestMapForEach() {
-  var map = new Map();
-  map.set(0, 'a');
-  map.set(1, 'b');
-  map.set(2, 'c');
-
-  var buffer = [];
-  var receiver = {};
-  map.forEach(function(v, k, m) {
-    assertEquals(map, m);
-    assertEquals(this, receiver);
-    buffer.push(k, v);
-    if (k === 0) {
-      map.delete(1);
-      map.set(3, 'd');
-      map.set(4, 'e');
-      map.set(5, 'f');
-    } else if (k === 2) {
-      map.set(1, 'B');
-      map.delete(4);
-    }
-  }, receiver);
-
-  assertArrayEquals([0, 'a', 2, 'c', 3, 'd', 5, 'f', 1, 'B'], buffer);
-})();
-
-
-(function TestMapForEachAddAtEnd() {
-  var map = new Map();
-  map.set(0, 'a');
-  map.set(1, 'b');
-
-  var buffer = [];
-  map.forEach(function(v, k) {
-    buffer.push(k, v);
-    if (k === 1) {
-      map.set(2, 'c');
-    }
-  });
-
-  assertArrayEquals([0, 'a', 1, 'b', 2, 'c'], buffer);
-})();
-
-
-(function TestMapForEachDeleteNext() {
-  var map = new Map();
-  map.set(0, 'a');
-  map.set(1, 'b');
-  map.set(2, 'c');
-
-  var buffer = [];
-  map.forEach(function(v, k) {
-    buffer.push(k, v);
-    if (k === 1) {
-      map.delete(2);
-    }
-  });
-
-  assertArrayEquals([0, 'a', 1, 'b'], buffer);
-})();
-
-
-(function TestSetForEachDeleteVisitedAndAddAgain() {
-  var map = new Map();
-  map.set(0, 'a');
-  map.set(1, 'b');
-  map.set(2, 'c');
-
-  var buffer = [];
-  map.forEach(function(v, k) {
-    buffer.push(k, v);
-    if (k === 1) {
-      map.delete(0);
-    } else if (k === 2) {
-      map.set(0, 'a');
-    }
-  });
-
-  assertArrayEquals([0, 'a', 1, 'b', 2, 'c', 0, 'a'], buffer);
-})();
-
-
-(function TestMapForEachClear() {
-  var map = new Map();
-  map.set(0, 'a');
-  map.set(1, 'b');
-  map.set(2, 'c');
-
-  var buffer = [];
-  map.forEach(function(v, k) {
-    buffer.push(k, v);
-    if (k === 0) {
-      map.clear();
-      map.set(3, 'd');
-      map.set(4, 'e');
-    }
-  });
-
-  assertArrayEquals([0, 'a', 3, 'd', 4, 'e'], buffer);
-})();
-
-
-(function TestMapForEachNested() {
-  var map = new Map();
-  map.set(0, 'a');
-  map.set(1, 'b');
-  map.set(2, 'c');
-
-  var buffer = [];
-  map.forEach(function(v, k) {
-    buffer.push(k, v);
-    map.forEach(function(v, k) {
-      buffer.push(k, v);
-      if (k === 0) {
-        map.delete(1);
-      }
-    });
-  });
-
-  assertArrayEquals([0, 'a', 0, 'a', 2, 'c', 2, 'c', 0, 'a', 2, 'c'], buffer);
-})();
-
-
-(function TestMapForEachEarlyExit() {
-  var map = new Map();
-  map.set(0, 'a');
-  map.set(1, 'b');
-  map.set(2, 'c');
-
-  var buffer = [];
-  var ex = {};
-  try {
-    map.forEach(function(v, k) {
-      buffer.push(k, v);
-      throw ex;
-    });
-  } catch (e) {
-    assertEquals(ex, e);
-  }
-  assertArrayEquals([0, 'a'], buffer);
-})();
-
-
-(function TestMapForEachGC() {
-  var map = new Map();
-  for (var i = 0; i < 100; i++) {
-    map.set(i, i);
-  }
-
-  var accumulated = 0;
-  map.forEach(function(v) {
-    accumulated += v;
-    if (v % 10 === 0) {
-      gc();
-    }
-  });
-  assertEquals(4950, accumulated);
-})();
-
-
-(function TestMapForEachAllRemovedTransition() {
-  var map = new Map;
-  map.set(0, 0);
-
-  var buffer = [];
-  map.forEach(function(v) {
-    buffer.push(v);
-    if (v === 0) {
-      for (var i = 1; i < 4; i++) {
-        map.set(i, i);
-      }
-    }
-
-    if (v === 3) {
-      for (var i = 0; i < 4; i++) {
-        map.delete(i);
-      }
-      for (var i = 4; i < 8; i++) {
-        map.set(i, i);
-      }
-    }
-  });
-
-  assertArrayEquals([0, 1, 2, 3, 4, 5, 6, 7], buffer);
-})();
-
-
-(function TestMapForEachClearTransition() {
-  var map = new Map;
-  map.set(0, 0);
-
-  var i = 0;
-  var buffer = [];
-  map.forEach(function(v) {
-    buffer.push(v);
-    if (++i < 5) {
-      for (var j = 0; j < 5; j++) {
-        map.clear();
-        map.set(i, i);
-      }
-    }
-  });
-
-  assertArrayEquals([0, 1, 2, 3, 4], buffer);
-})();
-
-
-(function TestMapForEachNestedNonTrivialTransition() {
-  var map = new Map;
-  map.set(0, 0);
-  map.set(1, 1);
-  map.set(2, 2);
-  map.set(3, 3);
-  map.delete(0);
-
-  var i = 0;
-  var buffer = [];
-  map.forEach(function(v) {
-    if (++i > 10) return;
-
-    buffer.push(v);
-
-    if (v == 3) {
-      map.delete(1);
-      for (var j = 4; j < 10; j++) {
-        map.set(j, j);
-      }
-      for (var j = 4; j < 10; j += 2) {
-        map.delete(j);
-      }
-      map.delete(2);
-
-      for (var j = 10; j < 20; j++) {
-        map.set(j, j);
-      }
-      for (var j = 10; j < 20; j += 2) {
-        map.delete(j);
-      }
-
-      map.delete(3);
-    }
-  });
-
-  assertArrayEquals([1, 2, 3, 5, 7, 9, 11, 13, 15, 17], buffer);
-})();
-
-
-(function TestMapForEachAllRemovedTransitionNoClear() {
-  var map = new Map;
-  map.set(0, 0);
-
-  var buffer = [];
-  map.forEach(function(v) {
-    buffer.push(v);
-    if (v === 0) {
-      for (var i = 1; i < 8; i++) {
-        map.set(i, i);
-      }
-    }
-
-    if (v === 4) {
-      for (var i = 0; i < 8; i++) {
-        map.delete(i);
-      }
-    }
-  });
-
-  assertArrayEquals([0, 1, 2, 3, 4], buffer);
-})();
-
-
-(function TestMapForEachNoMoreElementsAfterTransition() {
-  var map = new Map;
-  map.set(0, 0);
-
-  var buffer = [];
-  map.forEach(function(v) {
-    buffer.push(v);
-    if (v === 0) {
-      for (var i = 1; i < 16; i++) {
-        map.set(i, i);
-      }
-    }
-
-    if (v === 4) {
-      for (var i = 5; i < 16; i++) {
-        map.delete(i);
-      }
-    }
-  });
-
-  assertArrayEquals([0, 1, 2, 3, 4], buffer);
-})();
diff --git a/test/mjsunit/harmony/debug-stepin-collections-foreach.js b/test/mjsunit/harmony/debug-stepin-collections-foreach.js
deleted file mode 100644
index 30fa8c0..0000000
--- a/test/mjsunit/harmony/debug-stepin-collections-foreach.js
+++ /dev/null
@@ -1,118 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-//
-// Flags: --expose-debug-as debug --harmony-collections
-
-Debug = debug.Debug
-
-var exception = false;
-
-function listener(event, exec_state, event_data, data) {
-  try {
-    if (event == Debug.DebugEvent.Break) {
-      if (breaks == 0) {
-        exec_state.prepareStep(Debug.StepAction.StepIn, 2);
-        breaks = 1;
-      } else if (breaks <= 3) {
-        breaks++;
-        // Check whether we break at the expected line.
-        print(event_data.sourceLineText());
-        assertTrue(event_data.sourceLineText().indexOf("Expected to step") > 0);
-        exec_state.prepareStep(Debug.StepAction.StepIn, 3);
-      }
-    }
-  } catch (e) {
-    exception = true;
-  }
-}
-
-function cb_set(num) {
-  print("element " + num);  // Expected to step to this point.
-  return true;
-}
-
-function cb_map(key, val) {
-  print("key " + key + ", value " + val);  // Expected to step to this point.
-  return true;
-}
-
-var s = new Set();
-s.add(1);
-s.add(2);
-s.add(3);
-s.add(4);
-
-var m = new Map();
-m.set('foo', 1);
-m.set('bar', 2);
-m.set('baz', 3);
-m.set('bat', 4);
-
-Debug.setListener(listener);
-
-var breaks = 0;
-debugger;
-s.forEach(cb_set);
-assertFalse(exception);
-assertEquals(4, breaks);
-
-breaks = 0;
-debugger;
-m.forEach(cb_map);
-assertFalse(exception);
-assertEquals(4, breaks);
-
-Debug.setListener(null);
-
-
-// Test two levels of builtin callbacks:
-// Array.forEach calls a callback function, which by itself uses
-// Array.forEach with another callback function.
-
-function second_level_listener(event, exec_state, event_data, data) {
-  try {
-    if (event == Debug.DebugEvent.Break) {
-      if (breaks == 0) {
-        exec_state.prepareStep(Debug.StepAction.StepIn, 3);
-        breaks = 1;
-      } else if (breaks <= 16) {
-        breaks++;
-        // Check whether we break at the expected line.
-        assertTrue(event_data.sourceLineText().indexOf("Expected to step") > 0);
-        // Step two steps further every four breaks to skip the
-        // forEach call in the first level of recurision.
-        var step = (breaks % 4 == 1) ? 6 : 3;
-        exec_state.prepareStep(Debug.StepAction.StepIn, step);
-      }
-    }
-  } catch (e) {
-    exception = true;
-  }
-}
-
-function cb_set_foreach(num) {
-  s.forEach(cb_set);
-  print("back to the first level of recursion.");
-}
-
-function cb_map_foreach(key, val) {
-  m.forEach(cb_set);
-  print("back to the first level of recursion.");
-}
-
-Debug.setListener(second_level_listener);
-
-breaks = 0;
-debugger;
-s.forEach(cb_set_foreach);
-assertFalse(exception);
-assertEquals(17, breaks);
-
-breaks = 0;
-debugger;
-m.forEach(cb_map_foreach);
-assertFalse(exception);
-assertEquals(17, breaks);
-
-Debug.setListener(null);
diff --git a/test/mjsunit/harmony/empty-for.js b/test/mjsunit/harmony/empty-for.js
new file mode 100644
index 0000000..0221126
--- /dev/null
+++ b/test/mjsunit/harmony/empty-for.js
@@ -0,0 +1,72 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony-scoping
+
+"use strict";
+
+function for_const() {
+  for (const x = 1;;) {
+    if (x == 1) break;
+  }
+  for (const x = 1; x < 2;) {
+    if (x == 1) break;
+  }
+  for (const x = 1;; 0) {
+    if (x == 1) break;
+  }
+}
+
+for_const();
+
+function for_let() {
+  for (let x;;) {
+    if (!x) break;
+  }
+  for (let x; x < 2;) {
+    if (!x) break;
+  }
+  for (let x = 1;; x++) {
+    if (x == 2) break;
+  }
+}
+
+for_let();
+
+function for_var() {
+  for (var x;;) {
+    if (!x) break;
+  }
+  for (var x; x < 2;) {
+    if (!x) break;
+  }
+  for (var x = 1;; x++) {
+    if (x == 2) break;
+  }
+}
+
+for_var();
diff --git a/test/mjsunit/harmony/generators-debug-liveedit.js b/test/mjsunit/harmony/generators-debug-liveedit.js
deleted file mode 100644
index 341ef48..0000000
--- a/test/mjsunit/harmony/generators-debug-liveedit.js
+++ /dev/null
@@ -1,119 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --expose-debug-as debug --harmony-generators
-
-var Debug = debug.Debug;
-var LiveEdit = Debug.LiveEdit;
-
-unique_id = 0;
-
-var Generator = (function*(){}).constructor;
-
-function assertIteratorResult(value, done, result) {
-  assertEquals({value: value, done: done}, result);
-}
-
-function MakeGenerator() {
-  // Prevents eval script caching.
-  unique_id++;
-  return Generator('callback',
-      "/* " + unique_id + "*/\n" +
-      "yield callback();\n" +
-      "return 'Cat';\n");
-}
-
-function MakeFunction() {
-  // Prevents eval script caching.
-  unique_id++;
-  return Function('callback',
-      "/* " + unique_id + "*/\n" +
-      "callback();\n" +
-      "return 'Cat';\n");
-}
-
-// First, try MakeGenerator with no perturbations.
-(function(){
-  var generator = MakeGenerator();
-  function callback() {};
-  var iter = generator(callback);
-  assertIteratorResult(undefined, false, iter.next());
-  assertIteratorResult("Cat", true, iter.next());
-})();
-
-function patch(fun, from, to) {
-  function debug() {
-    var log = new Array();
-    var script = Debug.findScript(fun);
-    var pos = script.source.indexOf(from);
-    try {
-      LiveEdit.TestApi.ApplySingleChunkPatch(script, pos, from.length, to,
-                                             log);
-    } finally {
-      print("Change log: " + JSON.stringify(log) + "\n");
-    }
-  }
-  Debug.ExecuteInDebugContext(debug, false);
-}
-
-// Try to edit a MakeGenerator while it's running, then again while it's
-// stopped.
-(function(){
-  var generator = MakeGenerator();
-
-  var gen_patch_attempted = false;
-  function attempt_gen_patch() {
-    assertFalse(gen_patch_attempted);
-    gen_patch_attempted = true;
-    assertThrows(function() { patch(generator, "'Cat'", "'Capybara'") },
-                 LiveEdit.Failure);
-  };
-  var iter = generator(attempt_gen_patch);
-  assertIteratorResult(undefined, false, iter.next());
-  // Patch should not succeed because there is a live generator activation on
-  // the stack.
-  assertIteratorResult("Cat", true, iter.next());
-  assertTrue(gen_patch_attempted);
-
-  // At this point one iterator is live, but closed, so the patch will succeed.
-  patch(generator, "'Cat'", "'Capybara'");
-  iter = generator(function(){});
-  assertIteratorResult(undefined, false, iter.next());
-  // Patch successful.
-  assertIteratorResult("Capybara", true, iter.next());
-
-  // Patching will fail however when a live iterator is suspended.
-  iter = generator(function(){});
-  assertIteratorResult(undefined, false, iter.next());
-  assertThrows(function() { patch(generator, "'Capybara'", "'Tapir'") },
-               LiveEdit.Failure);
-  assertIteratorResult("Capybara", true, iter.next());
-
-  // Try to patch functions with activations inside and outside generator
-  // function activations.  We should succeed in the former case, but not in the
-  // latter.
-  var fun_outside = MakeFunction();
-  var fun_inside = MakeFunction();
-  var fun_patch_attempted = false;
-  var fun_patch_restarted = false;
-  function attempt_fun_patches() {
-    if (fun_patch_attempted) {
-      assertFalse(fun_patch_restarted);
-      fun_patch_restarted = true;
-      return;
-    }
-    fun_patch_attempted = true;
-    // Patching outside a generator activation must fail.
-    assertThrows(function() { patch(fun_outside, "'Cat'", "'Cobra'") },
-                 LiveEdit.Failure);
-    // Patching inside a generator activation may succeed.
-    patch(fun_inside, "'Cat'", "'Koala'");
-  }
-  iter = generator(function() { return fun_inside(attempt_fun_patches) });
-  assertEquals('Cat',
-               fun_outside(function () {
-                 assertIteratorResult('Koala', false, iter.next());
-                 assertTrue(fun_patch_restarted);
-               }));
-})();
diff --git a/test/mjsunit/harmony/generators-debug-scopes.js b/test/mjsunit/harmony/generators-debug-scopes.js
deleted file mode 100644
index ad0ea53..0000000
--- a/test/mjsunit/harmony/generators-debug-scopes.js
+++ /dev/null
@@ -1,326 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --expose-debug-as debug --harmony-generators
-
-var Debug = debug.Debug;
-
-function RunTest(name, formals_and_body, args, handler, continuation) {
-  var handler_called = false;
-  var exception = null;
-
-  function listener(event, exec_state, event_data, data) {
-    try {
-      if (event == Debug.DebugEvent.Break) {
-        handler_called = true;
-        handler(exec_state);
-      }
-    } catch (e) {
-      exception = e;
-    }
-  }
-
-  function run(thunk) {
-    handler_called = false;
-    exception = null;
-
-    var res = thunk();
-    if (continuation)
-      continuation(res);
-
-    assertTrue(handler_called, "listener not called for " + name);
-    assertNull(exception, name + " / " + exception);
-  }
-
-  var fun = Function.apply(null, formals_and_body);
-  var gen = (function*(){}).constructor.apply(null, formals_and_body);
-
-  Debug.setListener(listener);
-
-  run(function () { return fun.apply(null, args) });
-  run(function () { return gen.apply(null, args).next().value });
-
-  // TODO(wingo): Uncomment after bug 2838 is fixed.
-  // Debug.setListener(null);
-}
-
-// Check that two scope are the same.
-function assertScopeMirrorEquals(scope1, scope2) {
-  assertEquals(scope1.scopeType(), scope2.scopeType());
-  assertEquals(scope1.frameIndex(), scope2.frameIndex());
-  assertEquals(scope1.scopeIndex(), scope2.scopeIndex());
-  assertPropertiesEqual(scope1.scopeObject().value(), scope2.scopeObject().value());
-}
-
-function CheckFastAllScopes(scopes, exec_state) {
-  var fast_all_scopes = exec_state.frame().allScopes(true);
-  var length = fast_all_scopes.length;
-  assertTrue(scopes.length >= length);
-  for (var i = 0; i < scopes.length && i < length; i++) {
-    var scope = fast_all_scopes[length - i - 1];
-    assertTrue(scope.isScope());
-    assertEquals(scopes[scopes.length - i - 1], scope.scopeType());
-  }
-}
-
-// Check that the scope chain contains the expected types of scopes.
-function CheckScopeChain(scopes, exec_state) {
-  var all_scopes = exec_state.frame().allScopes();
-  assertEquals(scopes.length, exec_state.frame().scopeCount());
-  assertEquals(scopes.length, all_scopes.length, "FrameMirror.allScopes length");
-  for (var i = 0; i < scopes.length; i++) {
-    var scope = exec_state.frame().scope(i);
-    assertTrue(scope.isScope());
-    assertEquals(scopes[i], scope.scopeType());
-    assertScopeMirrorEquals(all_scopes[i], scope);
-
-    // Check the global object when hitting the global scope.
-    if (scopes[i] == debug.ScopeType.Global) {
-      // Objects don't have same class (one is "global", other is "Object",
-      // so just check the properties directly.
-      assertPropertiesEqual(this, scope.scopeObject().value());
-    }
-  }
-  CheckFastAllScopes(scopes, exec_state);
-
-  // Get the debug command processor.
-  var dcp = exec_state.debugCommandProcessor("unspecified_running_state");
-
-  // Send a scopes request and check the result.
-  var json;
-  var request_json = '{"seq":0,"type":"request","command":"scopes"}';
-  var response_json = dcp.processDebugJSONRequest(request_json);
-  var response = JSON.parse(response_json);
-  assertEquals(scopes.length, response.body.scopes.length);
-  for (var i = 0; i < scopes.length; i++) {
-    assertEquals(i, response.body.scopes[i].index);
-    assertEquals(scopes[i], response.body.scopes[i].type);
-    if (scopes[i] == debug.ScopeType.Local ||
-        scopes[i] == debug.ScopeType.Closure) {
-      assertTrue(response.body.scopes[i].object.ref < 0);
-    } else {
-      assertTrue(response.body.scopes[i].object.ref >= 0);
-    }
-    var found = false;
-    for (var j = 0; j < response.refs.length && !found; j++) {
-      found = response.refs[j].handle == response.body.scopes[i].object.ref;
-    }
-    assertTrue(found, "Scope object " + response.body.scopes[i].object.ref + " not found");
-  }
-}
-
-// Check that the content of the scope is as expected. For functions just check
-// that there is a function.
-function CheckScopeContent(content, number, exec_state) {
-  var scope = exec_state.frame().scope(number);
-  var count = 0;
-  for (var p in content) {
-    var property_mirror = scope.scopeObject().property(p);
-    assertFalse(property_mirror.isUndefined(), 'property ' + p + ' not found in scope');
-    if (typeof(content[p]) === 'function') {
-      assertTrue(property_mirror.value().isFunction());
-    } else {
-      assertEquals(content[p], property_mirror.value().value(), 'property ' + p + ' has unexpected value');
-    }
-    count++;
-  }
-
-  // 'arguments' and might be exposed in the local and closure scope. Just
-  // ignore this.
-  var scope_size = scope.scopeObject().properties().length;
-  if (!scope.scopeObject().property('arguments').isUndefined()) {
-    scope_size--;
-  }
-  // Skip property with empty name.
-  if (!scope.scopeObject().property('').isUndefined()) {
-    scope_size--;
-  }
-
-  if (count != scope_size) {
-    print('Names found in scope:');
-    var names = scope.scopeObject().propertyNames();
-    for (var i = 0; i < names.length; i++) {
-      print(names[i]);
-    }
-  }
-  assertEquals(count, scope_size);
-
-  // Get the debug command processor.
-  var dcp = exec_state.debugCommandProcessor("unspecified_running_state");
-
-  // Send a scope request for information on a single scope and check the
-  // result.
-  var request_json = '{"seq":0,"type":"request","command":"scope","arguments":{"number":';
-  request_json += scope.scopeIndex();
-  request_json += '}}';
-  var response_json = dcp.processDebugJSONRequest(request_json);
-  var response = JSON.parse(response_json);
-  assertEquals(scope.scopeType(), response.body.type);
-  assertEquals(number, response.body.index);
-  if (scope.scopeType() == debug.ScopeType.Local ||
-      scope.scopeType() == debug.ScopeType.Closure) {
-    assertTrue(response.body.object.ref < 0);
-  } else {
-    assertTrue(response.body.object.ref >= 0);
-  }
-  var found = false;
-  for (var i = 0; i < response.refs.length && !found; i++) {
-    found = response.refs[i].handle == response.body.object.ref;
-  }
-  assertTrue(found, "Scope object " + response.body.object.ref + " not found");
-}
-
-
-// Simple empty local scope.
-RunTest("Local 1",
-        ['debugger;'],
-        [],
-        function (exec_state) {
-          CheckScopeChain([debug.ScopeType.Local,
-                           debug.ScopeType.Global], exec_state);
-          CheckScopeContent({}, 0, exec_state);
-        });
-
-// Local scope with a parameter.
-RunTest("Local 2",
-        ['a', 'debugger;'],
-        [1],
-        function (exec_state) {
-          CheckScopeChain([debug.ScopeType.Local,
-                           debug.ScopeType.Global], exec_state);
-          CheckScopeContent({a:1}, 0, exec_state);
-        });
-
-// Local scope with a parameter and a local variable.
-RunTest("Local 3",
-        ['a', 'var x = 3; debugger;'],
-        [1],
-        function (exec_state) {
-          CheckScopeChain([debug.ScopeType.Local,
-                           debug.ScopeType.Global], exec_state);
-          CheckScopeContent({a:1,x:3}, 0, exec_state);
-        });
-
-// Local scope with parameters and local variables.
-RunTest("Local 4",
-        ['a', 'b', 'var x = 3; var y = 4; debugger;'],
-        [1, 2],
-        function (exec_state) {
-          CheckScopeChain([debug.ScopeType.Local,
-                           debug.ScopeType.Global], exec_state);
-          CheckScopeContent({a:1,b:2,x:3,y:4}, 0, exec_state);
-        });
-
-// Empty local scope with use of eval.
-RunTest("Local 5",
-        ['eval(""); debugger;'],
-        [],
-        function (exec_state) {
-          CheckScopeChain([debug.ScopeType.Local,
-                           debug.ScopeType.Global], exec_state);
-          CheckScopeContent({}, 0, exec_state);
-        });
-
-// Local introducing local variable using eval.
-RunTest("Local 6",
-        ['eval("var i = 5"); debugger;'],
-        [],
-        function (exec_state) {
-          CheckScopeChain([debug.ScopeType.Local,
-                           debug.ScopeType.Global], exec_state);
-          CheckScopeContent({i:5}, 0, exec_state);
-        });
-
-// Local scope with parameters, local variables and local variable introduced
-// using eval.
-RunTest("Local 7",
-        ['a', 'b',
-         "var x = 3; var y = 4;\n"
-         + "eval('var i = 5'); eval ('var j = 6');\n"
-         + "debugger;"],
-        [1, 2],
-        function (exec_state) {
-          CheckScopeChain([debug.ScopeType.Local,
-                           debug.ScopeType.Global], exec_state);
-          CheckScopeContent({a:1,b:2,x:3,y:4,i:5,j:6}, 0, exec_state);
-        });
-
-// Nested empty with blocks.
-RunTest("With",
-        ["with ({}) { with ({}) { debugger; } }"],
-        [],
-        function (exec_state) {
-          CheckScopeChain([debug.ScopeType.With,
-                           debug.ScopeType.With,
-                           debug.ScopeType.Local,
-                           debug.ScopeType.Global], exec_state);
-          CheckScopeContent({}, 0, exec_state);
-          CheckScopeContent({}, 1, exec_state);
-        });
-
-// Simple closure formed by returning an inner function referering the outer
-// functions arguments.
-RunTest("Closure 1",
-        ['a', 'return function() { debugger; return a; }'],
-        [1],
-        function (exec_state) {
-          CheckScopeChain([debug.ScopeType.Local,
-                           debug.ScopeType.Closure,
-                           debug.ScopeType.Global], exec_state);
-          CheckScopeContent({a:1}, 1, exec_state);
-        },
-       function (result) { result() });
-
-RunTest("The full monty",
-        ['a', 'b',
-         "var x = 3;\n" +
-         "var y = 4;\n" +
-         "eval('var i = 5');\n" +
-         "eval('var j = 6');\n" +
-         "function f(a, b) {\n" +
-         "  var x = 9;\n" +
-         "  var y = 10;\n" +
-         "  eval('var i = 11');\n" +
-         "  eval('var j = 12');\n" +
-         "  with ({j:13}){\n" +
-         "    return function() {\n" +
-         "      var x = 14;\n" +
-         "      with ({a:15}) {\n" +
-         "        with ({b:16}) {\n" +
-         "          debugger;\n" +
-         "          some_global = a;\n" +
-         "          return f;\n" +
-         "        }\n" +
-         "      }\n" +
-         "    };\n" +
-         "  }\n" +
-         "}\n" +
-         "return f(a, b);"],
-        [1, 2],
-        function (exec_state) {
-          CheckScopeChain([debug.ScopeType.With,
-                           debug.ScopeType.With,
-                           debug.ScopeType.Local,
-                           debug.ScopeType.With,
-                           debug.ScopeType.Closure,
-                           debug.ScopeType.Closure,
-                           debug.ScopeType.Global], exec_state);
-          CheckScopeContent({b:16}, 0, exec_state);
-          CheckScopeContent({a:15}, 1, exec_state);
-          CheckScopeContent({x:14}, 2, exec_state);
-          CheckScopeContent({j:13}, 3, exec_state);
-          CheckScopeContent({a:1,b:2,x:9,y:10,i:11,j:12}, 4, exec_state);
-          CheckScopeContent({a:1,b:2,x:3,y:4,i:5,j:6,f:function(){}}, 5, exec_state);
-        },
-        function (result) { result() });
-
-RunTest("Catch block 1",
-        ["try { throw 'Exception'; } catch (e) { debugger; }"],
-        [],
-        function (exec_state) {
-          CheckScopeChain([debug.ScopeType.Catch,
-                           debug.ScopeType.Local,
-                           debug.ScopeType.Global], exec_state);
-          CheckScopeContent({e:'Exception'}, 0, exec_state);
-        });
diff --git a/test/mjsunit/harmony/generators-iteration.js b/test/mjsunit/harmony/generators-iteration.js
deleted file mode 100644
index d86a20f..0000000
--- a/test/mjsunit/harmony/generators-iteration.js
+++ /dev/null
@@ -1,650 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --harmony-generators --expose-gc
-
-// Test generator iteration.
-
-var GeneratorFunction = (function*(){yield 1;}).__proto__.constructor;
-
-function assertIteratorResult(value, done, result) {
-  assertEquals({ value: value, done: done}, result);
-}
-
-function assertIteratorIsClosed(iter) {
-  assertIteratorResult(undefined, true, iter.next());
-  assertDoesNotThrow(function() { iter.next(); });
-}
-
-function assertThrownIteratorIsClosed(iter) {
-  // TODO(yusukesuzuki): Since status of a thrown generator is "executing",
-  // following tests are failed.
-  // https://code.google.com/p/v8/issues/detail?id=3096
-  // assertIteratorIsClosed(iter);
-}
-
-function TestGeneratorResultPrototype() {
-  function* g() { yield 1; }
-  var iter = g();
-  var result = iter.next();
-
-  assertSame(Object.prototype, Object.getPrototypeOf(result));
-  property_names = Object.getOwnPropertyNames(result);
-  property_names.sort();
-  assertEquals(["done", "value"], property_names);
-  assertIteratorResult(1, false, result);
-}
-TestGeneratorResultPrototype()
-
-function TestGenerator(g, expected_values_for_next,
-                       send_val, expected_values_for_send) {
-  function testNext(thunk) {
-    var iter = thunk();
-    for (var i = 0; i < expected_values_for_next.length; i++) {
-      var v1 = expected_values_for_next[i];
-      var v2 = i == expected_values_for_next.length - 1;
-      // var v3 = iter.next();
-      assertIteratorResult(v1, v2, iter.next());
-    }
-    assertIteratorIsClosed(iter);
-  }
-  function testSend(thunk) {
-    var iter = thunk();
-    for (var i = 0; i < expected_values_for_send.length; i++) {
-      assertIteratorResult(expected_values_for_send[i],
-                           i == expected_values_for_send.length - 1,
-                           iter.next(send_val));
-    }
-    assertIteratorIsClosed(iter);
-  }
-  function testThrow(thunk) {
-    for (var i = 0; i < expected_values_for_next.length; i++) {
-      var iter = thunk();
-      for (var j = 0; j < i; j++) {
-        assertIteratorResult(expected_values_for_next[j],
-                             j == expected_values_for_next.length - 1,
-                             iter.next());
-      }
-      function Sentinel() {}
-      assertThrows(function () { iter.throw(new Sentinel); }, Sentinel);
-      assertThrownIteratorIsClosed(iter);
-    }
-  }
-
-  testNext(g);
-  testSend(g);
-  testThrow(g);
-
-  testNext(function*() { return yield* g(); });
-  testSend(function*() { return yield* g(); });
-  testThrow(function*() { return yield* g(); });
-
-  if (g instanceof GeneratorFunction) {
-    testNext(function() { return new g(); });
-    testSend(function() { return new g(); });
-    testThrow(function() { return new g(); });
-  }
-}
-
-TestGenerator(function* g1() { },
-              [undefined],
-              "foo",
-              [undefined]);
-
-TestGenerator(function* g2() { yield 1; },
-              [1, undefined],
-              "foo",
-              [1, undefined]);
-
-TestGenerator(function* g3() { yield 1; yield 2; },
-              [1, 2, undefined],
-              "foo",
-              [1, 2, undefined]);
-
-TestGenerator(function* g4() { yield 1; yield 2; return 3; },
-              [1, 2, 3],
-              "foo",
-              [1, 2, 3]);
-
-TestGenerator(function* g5() { return 1; },
-              [1],
-             "foo",
-              [1]);
-
-TestGenerator(function* g6() { var x = yield 1; return x; },
-              [1, undefined],
-              "foo",
-              [1, "foo"]);
-
-TestGenerator(function* g7() { var x = yield 1; yield 2; return x; },
-              [1, 2, undefined],
-              "foo",
-              [1, 2, "foo"]);
-
-TestGenerator(function* g8() { for (var x = 0; x < 4; x++) { yield x; } },
-              [0, 1, 2, 3, undefined],
-              "foo",
-              [0, 1, 2, 3, undefined]);
-
-// Generator with arguments.
-TestGenerator(
-    function g9() {
-      return (function*(a, b, c, d) {
-        yield a; yield b; yield c; yield d;
-      })("fee", "fi", "fo", "fum");
-    },
-    ["fee", "fi", "fo", "fum", undefined],
-    "foo",
-    ["fee", "fi", "fo", "fum", undefined]);
-
-// Too few arguments.
-TestGenerator(
-    function g10() {
-      return (function*(a, b, c, d) {
-        yield a; yield b; yield c; yield d;
-      })("fee", "fi");
-    },
-    ["fee", "fi", undefined, undefined, undefined],
-    "foo",
-    ["fee", "fi", undefined, undefined, undefined]);
-
-// Too many arguments.
-TestGenerator(
-    function g11() {
-      return (function*(a, b, c, d) {
-        yield a; yield b; yield c; yield d;
-      })("fee", "fi", "fo", "fum", "I smell the blood of an Englishman");
-    },
-    ["fee", "fi", "fo", "fum", undefined],
-    "foo",
-    ["fee", "fi", "fo", "fum", undefined]);
-
-// The arguments object.
-TestGenerator(
-    function g12() {
-      return (function*(a, b, c, d) {
-        for (var i = 0; i < arguments.length; i++) {
-          yield arguments[i];
-        }
-      })("fee", "fi", "fo", "fum", "I smell the blood of an Englishman");
-    },
-    ["fee", "fi", "fo", "fum", "I smell the blood of an Englishman",
-     undefined],
-    "foo",
-    ["fee", "fi", "fo", "fum", "I smell the blood of an Englishman",
-     undefined]);
-
-// Access to captured free variables.
-TestGenerator(
-    function g13() {
-      return (function(a, b, c, d) {
-        return (function*() {
-          yield a; yield b; yield c; yield d;
-        })();
-      })("fee", "fi", "fo", "fum");
-    },
-    ["fee", "fi", "fo", "fum", undefined],
-    "foo",
-    ["fee", "fi", "fo", "fum", undefined]);
-
-// Abusing the arguments object.
-TestGenerator(
-    function g14() {
-      return (function*(a, b, c, d) {
-        arguments[0] = "Be he live";
-        arguments[1] = "or be he dead";
-        arguments[2] = "I'll grind his bones";
-        arguments[3] = "to make my bread";
-        yield a; yield b; yield c; yield d;
-      })("fee", "fi", "fo", "fum");
-    },
-    ["Be he live", "or be he dead", "I'll grind his bones", "to make my bread",
-     undefined],
-    "foo",
-    ["Be he live", "or be he dead", "I'll grind his bones", "to make my bread",
-     undefined]);
-
-// Abusing the arguments object: strict mode.
-TestGenerator(
-    function g15() {
-      return (function*(a, b, c, d) {
-        "use strict";
-        arguments[0] = "Be he live";
-        arguments[1] = "or be he dead";
-        arguments[2] = "I'll grind his bones";
-        arguments[3] = "to make my bread";
-        yield a; yield b; yield c; yield d;
-      })("fee", "fi", "fo", "fum");
-    },
-    ["fee", "fi", "fo", "fum", undefined],
-    "foo",
-    ["fee", "fi", "fo", "fum", undefined]);
-
-// GC.
-TestGenerator(function* g16() { yield "baz"; gc(); yield "qux"; },
-              ["baz", "qux", undefined],
-              "foo",
-              ["baz", "qux", undefined]);
-
-// Receivers.
-TestGenerator(
-    function g17() {
-      function* g() { yield this.x; yield this.y; }
-      var o = { start: g, x: 1, y: 2 };
-      return o.start();
-    },
-    [1, 2, undefined],
-    "foo",
-    [1, 2, undefined]);
-
-TestGenerator(
-    function g18() {
-      function* g() { yield this.x; yield this.y; }
-      var iter = new g;
-      iter.x = 1;
-      iter.y = 2;
-      return iter;
-    },
-    [1, 2, undefined],
-    "foo",
-    [1, 2, undefined]);
-
-TestGenerator(
-    function* g19() {
-      var x = 1;
-      yield x;
-      with({x:2}) { yield x; }
-      yield x;
-    },
-    [1, 2, 1, undefined],
-    "foo",
-    [1, 2, 1, undefined]);
-
-TestGenerator(
-    function* g20() { yield (1 + (yield 2) + 3); },
-    [2, NaN, undefined],
-    "foo",
-    [2, "1foo3", undefined]);
-
-TestGenerator(
-    function* g21() { return (1 + (yield 2) + 3); },
-    [2, NaN],
-    "foo",
-    [2, "1foo3"]);
-
-TestGenerator(
-    function* g22() { yield (1 + (yield 2) + 3); yield (4 + (yield 5) + 6); },
-    [2, NaN, 5, NaN, undefined],
-    "foo",
-    [2, "1foo3", 5, "4foo6", undefined]);
-
-TestGenerator(
-    function* g23() {
-      return (yield (1 + (yield 2) + 3)) + (yield (4 + (yield 5) + 6));
-    },
-    [2, NaN, 5, NaN, NaN],
-    "foo",
-    [2, "1foo3", 5, "4foo6", "foofoo"]);
-
-// Rewind a try context with and without operands on the stack.
-TestGenerator(
-    function* g24() {
-      try {
-        return (yield (1 + (yield 2) + 3)) + (yield (4 + (yield 5) + 6));
-      } catch (e) {
-        throw e;
-      }
-    },
-    [2, NaN, 5, NaN, NaN],
-    "foo",
-    [2, "1foo3", 5, "4foo6", "foofoo"]);
-
-// Yielding in a catch context, with and without operands on the stack.
-TestGenerator(
-    function* g25() {
-      try {
-        throw (yield (1 + (yield 2) + 3))
-      } catch (e) {
-        if (typeof e == 'object') throw e;
-        return e + (yield (4 + (yield 5) + 6));
-      }
-    },
-    [2, NaN, 5, NaN, NaN],
-    "foo",
-    [2, "1foo3", 5, "4foo6", "foofoo"]);
-
-// Generator function instances.
-TestGenerator(GeneratorFunction(),
-              [undefined],
-              "foo",
-              [undefined]);
-
-TestGenerator(new GeneratorFunction(),
-              [undefined],
-              "foo",
-              [undefined]);
-
-TestGenerator(GeneratorFunction('yield 1;'),
-              [1, undefined],
-              "foo",
-              [1, undefined]);
-
-TestGenerator(
-    function() { return GeneratorFunction('x', 'y', 'yield x + y;')(1, 2) },
-    [3, undefined],
-    "foo",
-    [3, undefined]);
-
-// Access to this with formal arguments.
-TestGenerator(
-    function () {
-      return ({ x: 42, g: function* (a) { yield this.x } }).g(0);
-    },
-    [42, undefined],
-    "foo",
-    [42, undefined]);
-
-// Test that yield* re-yields received results without re-boxing.
-function TestDelegatingYield() {
-  function results(results) {
-    var i = 0;
-    function next() {
-      return results[i++];
-    }
-    return { next: next }
-  }
-  function* yield_results(expected) {
-    return yield* results(expected);
-  }
-  function collect_results(iter) {
-    var ret = [];
-    var result;
-    do {
-      result = iter.next();
-      ret.push(result);
-    } while (!result.done);
-    return ret;
-  }
-  // We have to put a full result for the end, because the return will re-box.
-  var expected = [{value: 1}, 13, "foo", {value: 34, done: true}];
-
-  // Sanity check.
-  assertEquals(expected, collect_results(results(expected)));
-  assertEquals(expected, collect_results(yield_results(expected)));
-}
-TestDelegatingYield();
-
-function TestTryCatch(instantiate) {
-  function* g() { yield 1; try { yield 2; } catch (e) { yield e; } yield 3; }
-  function Sentinel() {}
-
-  function Test1(iter) {
-    assertIteratorResult(1, false, iter.next());
-    assertIteratorResult(2, false, iter.next());
-    assertIteratorResult(3, false, iter.next());
-    assertIteratorIsClosed(iter);
-  }
-  Test1(instantiate(g));
-
-  function Test2(iter) {
-    assertThrows(function() { iter.throw(new Sentinel); }, Sentinel);
-    assertThrownIteratorIsClosed(iter);
-  }
-  Test2(instantiate(g));
-
-  function Test3(iter) {
-    assertIteratorResult(1, false, iter.next());
-    assertThrows(function() { iter.throw(new Sentinel); }, Sentinel);
-    assertThrownIteratorIsClosed(iter);
-  }
-  Test3(instantiate(g));
-
-  function Test4(iter) {
-    assertIteratorResult(1, false, iter.next());
-    assertIteratorResult(2, false, iter.next());
-    var exn = new Sentinel;
-    assertIteratorResult(exn, false, iter.throw(exn));
-    assertIteratorResult(3, false, iter.next());
-    assertIteratorIsClosed(iter);
-  }
-  Test4(instantiate(g));
-
-  function Test5(iter) {
-    assertIteratorResult(1, false, iter.next());
-    assertIteratorResult(2, false, iter.next());
-    var exn = new Sentinel;
-    assertIteratorResult(exn, false, iter.throw(exn));
-    assertIteratorResult(3, false, iter.next());
-    assertThrows(function() { iter.throw(new Sentinel); }, Sentinel);
-    assertThrownIteratorIsClosed(iter);
-  }
-  Test5(instantiate(g));
-
-  function Test6(iter) {
-    assertIteratorResult(1, false, iter.next());
-    assertIteratorResult(2, false, iter.next());
-    var exn = new Sentinel;
-    assertIteratorResult(exn, false, iter.throw(exn));
-    assertThrows(function() { iter.throw(new Sentinel); }, Sentinel);
-    assertThrownIteratorIsClosed(iter);
-  }
-  Test6(instantiate(g));
-
-  function Test7(iter) {
-    assertIteratorResult(1, false, iter.next());
-    assertIteratorResult(2, false, iter.next());
-    assertIteratorResult(3, false, iter.next());
-    assertIteratorIsClosed(iter);
-  }
-  Test7(instantiate(g));
-}
-TestTryCatch(function (g) { return g(); });
-TestTryCatch(function* (g) { return yield* g(); });
-
-function TestTryFinally(instantiate) {
-  function* g() { yield 1; try { yield 2; } finally { yield 3; } yield 4; }
-  function Sentinel() {}
-  function Sentinel2() {}
-
-  function Test1(iter) {
-    assertIteratorResult(1, false, iter.next());
-    assertIteratorResult(2, false, iter.next());
-    assertIteratorResult(3, false, iter.next());
-    assertIteratorResult(4, false, iter.next());
-    assertIteratorIsClosed(iter);
-  }
-  Test1(instantiate(g));
-
-  function Test2(iter) {
-    assertThrows(function() { iter.throw(new Sentinel); }, Sentinel);
-    assertThrownIteratorIsClosed(iter);
-  }
-  Test2(instantiate(g));
-
-  function Test3(iter) {
-    assertIteratorResult(1, false, iter.next());
-    assertThrows(function() { iter.throw(new Sentinel); }, Sentinel);
-    assertThrownIteratorIsClosed(iter);
-  }
-  Test3(instantiate(g));
-
-  function Test4(iter) {
-    assertIteratorResult(1, false, iter.next());
-    assertIteratorResult(2, false, iter.next());
-    assertIteratorResult(3, false, iter.throw(new Sentinel));
-    assertThrows(function() { iter.next(); }, Sentinel);
-    assertThrownIteratorIsClosed(iter);
-  }
-  Test4(instantiate(g));
-
-  function Test5(iter) {
-    assertIteratorResult(1, false, iter.next());
-    assertIteratorResult(2, false, iter.next());
-    assertIteratorResult(3, false, iter.throw(new Sentinel));
-    assertThrows(function() { iter.throw(new Sentinel2); }, Sentinel2);
-    assertThrownIteratorIsClosed(iter);
-  }
-  Test5(instantiate(g));
-
-  function Test6(iter) {
-    assertIteratorResult(1, false, iter.next());
-    assertIteratorResult(2, false, iter.next());
-    assertIteratorResult(3, false, iter.next());
-    assertThrows(function() { iter.throw(new Sentinel); }, Sentinel);
-    assertThrownIteratorIsClosed(iter);
-  }
-  Test6(instantiate(g));
-
-  function Test7(iter) {
-    assertIteratorResult(1, false, iter.next());
-    assertIteratorResult(2, false, iter.next());
-    assertIteratorResult(3, false, iter.next());
-    assertIteratorResult(4, false, iter.next());
-    assertThrows(function() { iter.throw(new Sentinel); }, Sentinel);
-    assertThrownIteratorIsClosed(iter);
-  }
-  Test7(instantiate(g));
-
-  function Test8(iter) {
-    assertIteratorResult(1, false, iter.next());
-    assertIteratorResult(2, false, iter.next());
-    assertIteratorResult(3, false, iter.next());
-    assertIteratorResult(4, false, iter.next());
-    assertIteratorIsClosed(iter);
-  }
-  Test8(instantiate(g));
-}
-TestTryFinally(function (g) { return g(); });
-TestTryFinally(function* (g) { return yield* g(); });
-
-function TestNestedTry(instantiate) {
-  function* g() {
-    try {
-      yield 1;
-      try { yield 2; } catch (e) { yield e; }
-      yield 3;
-    } finally {
-      yield 4;
-    }
-    yield 5;
-  }
-  function Sentinel() {}
-  function Sentinel2() {}
-
-  function Test1(iter) {
-    assertIteratorResult(1, false, iter.next());
-    assertIteratorResult(2, false, iter.next());
-    assertIteratorResult(3, false, iter.next());
-    assertIteratorResult(4, false, iter.next());
-    assertIteratorResult(5, false, iter.next());
-    assertIteratorIsClosed(iter);
-  }
-  Test1(instantiate(g));
-
-  function Test2(iter) {
-    assertThrows(function() { iter.throw(new Sentinel); }, Sentinel);
-    assertThrownIteratorIsClosed(iter);
-  }
-  Test2(instantiate(g));
-
-  function Test3(iter) {
-    assertIteratorResult(1, false, iter.next());
-    assertIteratorResult(4, false, iter.throw(new Sentinel));
-    assertThrows(function() { iter.next(); }, Sentinel);
-    assertThrownIteratorIsClosed(iter);
-  }
-  Test3(instantiate(g));
-
-  function Test4(iter) {
-    assertIteratorResult(1, false, iter.next());
-    assertIteratorResult(4, false, iter.throw(new Sentinel));
-    assertThrows(function() { iter.throw(new Sentinel2); }, Sentinel2);
-    assertThrownIteratorIsClosed(iter);
-  }
-  Test4(instantiate(g));
-
-  function Test5(iter) {
-    assertIteratorResult(1, false, iter.next());
-    assertIteratorResult(2, false, iter.next());
-    var exn = new Sentinel;
-    assertIteratorResult(exn, false, iter.throw(exn));
-    assertIteratorResult(3, false, iter.next());
-    assertIteratorResult(4, false, iter.next());
-    assertIteratorResult(5, false, iter.next());
-    assertIteratorIsClosed(iter);
-  }
-  Test5(instantiate(g));
-
-  function Test6(iter) {
-    assertIteratorResult(1, false, iter.next());
-    assertIteratorResult(2, false, iter.next());
-    var exn = new Sentinel;
-    assertIteratorResult(exn, false, iter.throw(exn));
-    assertIteratorResult(4, false, iter.throw(new Sentinel2));
-    assertThrows(function() { iter.next(); }, Sentinel2);
-    assertThrownIteratorIsClosed(iter);
-  }
-  Test6(instantiate(g));
-
-  function Test7(iter) {
-    assertIteratorResult(1, false, iter.next());
-    assertIteratorResult(2, false, iter.next());
-    var exn = new Sentinel;
-    assertIteratorResult(exn, false, iter.throw(exn));
-    assertIteratorResult(3, false, iter.next());
-    assertIteratorResult(4, false, iter.throw(new Sentinel2));
-    assertThrows(function() { iter.next(); }, Sentinel2);
-    assertThrownIteratorIsClosed(iter);
-  }
-  Test7(instantiate(g));
-
-  // That's probably enough.
-}
-TestNestedTry(function (g) { return g(); });
-TestNestedTry(function* (g) { return yield* g(); });
-
-function TestRecursion() {
-  function TestNextRecursion() {
-    function* g() { yield iter.next(); }
-    var iter = g();
-    return iter.next();
-  }
-  function TestSendRecursion() {
-    function* g() { yield iter.next(42); }
-    var iter = g();
-    return iter.next();
-  }
-  function TestThrowRecursion() {
-    function* g() { yield iter.throw(1); }
-    var iter = g();
-    return iter.next();
-  }
-  assertThrows(TestNextRecursion, Error);
-  assertThrows(TestSendRecursion, Error);
-  assertThrows(TestThrowRecursion, Error);
-}
-TestRecursion();
diff --git a/test/mjsunit/harmony/generators-objects.js b/test/mjsunit/harmony/generators-objects.js
deleted file mode 100644
index c1cda07..0000000
--- a/test/mjsunit/harmony/generators-objects.js
+++ /dev/null
@@ -1,93 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --harmony-generators --harmony-scoping --allow-natives-syntax
-
-// Test instantations of generators.
-
-// Generators shouldn't allocate stack slots.  This test will abort in debug
-// mode if generators have stack slots.
-function TestContextAllocation() {
-  function* g1(a, b, c) { yield 1; return [a, b, c]; }
-  function* g2() { yield 1; return arguments; }
-  function* g3() { yield 1; return this; }
-  function* g4() { var x = 10; yield 1; return x; }
-  // Temporary variable context allocation
-  function* g5(l) { "use strict"; yield 1; for (let x in l) { yield x; } }
-
-  g1();
-  g2();
-  g3();
-  g4();
-  g5(["foo"]);
-}
-TestContextAllocation();
-
-
-// Test the properties and prototype of a generator object.
-function TestGeneratorObject() {
-  function* g() { yield 1; }
-
-  var iter = g();
-  assertSame(g.prototype, Object.getPrototypeOf(iter));
-  assertTrue(iter instanceof g);
-  assertEquals("Generator", %_ClassOf(iter));
-  assertEquals("[object Generator]", String(iter));
-  assertEquals([], Object.getOwnPropertyNames(iter));
-  assertTrue(iter !== g());
-
-  // g() is the same as new g().
-  iter = new g();
-  assertSame(g.prototype, Object.getPrototypeOf(iter));
-  assertTrue(iter instanceof g);
-  assertEquals("Generator", %_ClassOf(iter));
-  assertEquals("[object Generator]", String(iter));
-  assertEquals([], Object.getOwnPropertyNames(iter));
-  assertTrue(iter !== new g());
-}
-TestGeneratorObject();
-
-
-// Test the methods of generator objects.
-function TestGeneratorObjectMethods() {
-  function* g() { yield 1; }
-  var iter = g();
-
-  function TestNonGenerator(non_generator) {
-    assertThrows(function() { iter.next.call(non_generator); }, TypeError);
-    assertThrows(function() { iter.next.call(non_generator, 1); }, TypeError);
-    assertThrows(function() { iter.throw.call(non_generator, 1); }, TypeError);
-    assertThrows(function() { iter.close.call(non_generator); }, TypeError);
-  }
-
-  TestNonGenerator(1);
-  TestNonGenerator({});
-  TestNonGenerator(function(){});
-  TestNonGenerator(g);
-  TestNonGenerator(g.prototype);
-}
-TestGeneratorObjectMethods();
diff --git a/test/mjsunit/harmony/generators-parsing.js b/test/mjsunit/harmony/generators-parsing.js
deleted file mode 100644
index 2a4a68c..0000000
--- a/test/mjsunit/harmony/generators-parsing.js
+++ /dev/null
@@ -1,106 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --harmony-generators
-
-// Test basic generator syntax.
-
-// Yield statements.
-function* g() { yield 3; yield 4; }
-
-// Yield expressions.
-function* g() { (yield 3) + (yield 4); }
-
-// You can have a generator in strict mode.
-function* g() { "use strict"; yield 3; yield 4; }
-
-// Generators can have return statements also, which internally parse to a kind
-// of yield expression.
-function* g() { yield 1; return; }
-function* g() { yield 1; return 2; }
-function* g() { yield 1; return 2; yield "dead"; }
-
-// Generator expression.
-(function* () { yield 3; });
-
-// Named generator expression.
-(function* g() { yield 3; });
-
-// A generator without a yield is specified as causing an early error.  This
-// behavior is currently unimplemented.  See
-// https://bugs.ecmascript.org/show_bug.cgi?id=1283.
-function* g() { }
-
-// A YieldExpression in the RHS of a YieldExpression is currently specified as
-// causing an early error.  This behavior is currently unimplemented.  See
-// https://bugs.ecmascript.org/show_bug.cgi?id=1283.
-function* g() { yield yield 1; }
-function* g() { yield 3 + (yield 4); }
-
-// Generator definitions with a name of "yield" are not specifically ruled out
-// by the spec, as the `yield' name is outside the generator itself.  However,
-// in strict-mode, "yield" is an invalid identifier.
-function* yield() { (yield 3) + (yield 4); }
-assertThrows("function* yield() { \"use strict\"; (yield 3) + (yield 4); }",
-             SyntaxError);
-
-// In sloppy mode, yield is a normal identifier, outside of generators.
-function yield(yield) { yield: yield (yield + yield (0)); }
-
-// Yield is always valid as a key in an object literal.
-({ yield: 1 });
-function* g() { yield ({ yield: 1 }) }
-function* g() { yield ({ get yield() { return 1; }}) }
-
-// Checks that yield is a valid label in sloppy mode, but not valid in a strict
-// mode or in generators.
-function f() { yield: 1 }
-assertThrows("function f() { \"use strict\"; yield: 1 }", SyntaxError)
-assertThrows("function* g() { yield: 1 }", SyntaxError)
-
-// Yield is only a keyword in the body of the generator, not in nested
-// functions.
-function* g() { function f() { yield (yield + yield (0)); } }
-
-// Yield needs a RHS.
-assertThrows("function* g() { yield; }", SyntaxError);
-
-// Yield in a generator is not an identifier.
-assertThrows("function* g() { yield = 10; }", SyntaxError);
-
-// Yield binds very loosely, so this parses as "yield (3 + yield 4)", which is
-// invalid.
-assertThrows("function* g() { yield 3 + yield 4; }", SyntaxError);
-
-// Yield is still a future-reserved-word in strict mode
-assertThrows("function f() { \"use strict\"; var yield = 13; }", SyntaxError);
-
-// The name of the NFE is let-bound in G, so is invalid.
-assertThrows("function* g() { yield (function yield() {}); }", SyntaxError);
-
-// In generators, yield is invalid as a formal argument name.
-assertThrows("function* g(yield) { yield (10); }", SyntaxError);
diff --git a/test/mjsunit/harmony/generators-poisoned-properties.js b/test/mjsunit/harmony/generators-poisoned-properties.js
deleted file mode 100644
index 39a583e..0000000
--- a/test/mjsunit/harmony/generators-poisoned-properties.js
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --harmony-generators
-
-function assertIteratorResult(value, done, result) {
-  assertEquals({value: value, done: done}, result);
-}
-
-function test(f) {
-  var cdesc = Object.getOwnPropertyDescriptor(f, "caller");
-  var adesc = Object.getOwnPropertyDescriptor(f, "arguments");
-
-  assertFalse(cdesc.enumerable);
-  assertFalse(cdesc.configurable);
-
-  assertFalse(adesc.enumerable);
-  assertFalse(adesc.configurable);
-
-  assertSame(cdesc.get, cdesc.set);
-  assertSame(cdesc.get, adesc.get);
-  assertSame(cdesc.get, adesc.set);
-
-  assertTrue(cdesc.get instanceof Function);
-  assertEquals(0, cdesc.get.length);
-  assertThrows(cdesc.get, TypeError);
-
-  assertThrows(function() { return f.caller; }, TypeError);
-  assertThrows(function() { f.caller = 42; }, TypeError);
-  assertThrows(function() { return f.arguments; }, TypeError);
-  assertThrows(function() { f.arguments = 42; }, TypeError);
-}
-
-function *sloppy() { test(sloppy); }
-function *strict() { "use strict"; test(strict); }
-
-test(sloppy);
-test(strict);
-
-assertIteratorResult(undefined, true, sloppy().next());
-assertIteratorResult(undefined, true, strict().next());
diff --git a/test/mjsunit/harmony/generators-relocation.js b/test/mjsunit/harmony/generators-relocation.js
deleted file mode 100644
index 4074235..0000000
--- a/test/mjsunit/harmony/generators-relocation.js
+++ /dev/null
@@ -1,61 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --expose-debug-as debug --harmony-generators
-
-var Debug = debug.Debug;
-
-function assertIteratorResult(value, done, result) {
-  assertEquals({value: value, done: done}, result);
-}
-
-function RunTest(formals_and_body, args, value1, value2) {
-  // A null listener. It isn't important what the listener does.
-  function listener(event, exec_state, event_data, data) {
-  }
-
-  // Create the generator function outside a debugging context. It will probably
-  // be lazily compiled.
-  var gen = (function*(){}).constructor.apply(null, formals_and_body);
-
-  // Instantiate the generator object.
-  var obj = gen.apply(null, args);
-
-  // Advance to the first yield.
-  assertIteratorResult(value1, false, obj.next());
-
-  // Add a breakpoint on line 3 (the second yield).
-  var bp = Debug.setBreakPoint(gen, 3);
-
-  // Enable the debugger, which should force recompilation of the generator
-  // function and relocation of the suspended generator activation.
-  Debug.setListener(listener);
-
-  // Check that the generator resumes and suspends properly.
-  assertIteratorResult(value2, false, obj.next());
-
-  // Disable debugger -- should not force recompilation.
-  Debug.clearBreakPoint(bp);
-  Debug.setListener(null);
-
-  // Run to completion.
-  assertIteratorResult(undefined, true, obj.next());
-}
-
-function prog(a, b, c) {
-  return a + ';\n' + 'yield ' + b + ';\n' + 'yield ' + c;
-}
-
-// Simple empty local scope.
-RunTest([prog('', '1', '2')], [], 1, 2);
-
-RunTest([prog('for (;;) break', '1', '2')], [], 1, 2);
-
-RunTest([prog('while (0) foo()', '1', '2')], [], 1, 2);
-
-RunTest(['a', prog('var x = 3', 'a', 'x')], [1], 1, 3);
-
-RunTest(['a', prog('', '1', '2')], [42], 1, 2);
-
-RunTest(['a', prog('for (;;) break', '1', '2')], [42], 1, 2);
diff --git a/test/mjsunit/harmony/generators-runtime.js b/test/mjsunit/harmony/generators-runtime.js
deleted file mode 100644
index 9fb7075..0000000
--- a/test/mjsunit/harmony/generators-runtime.js
+++ /dev/null
@@ -1,148 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --harmony-generators
-
-// Test aspects of the generator runtime.
-
-// See:
-// http://people.mozilla.org/~jorendorff/es6-draft.html#sec-generatorfunction-objects
-
-function f() { }
-function* g() { yield 1; }
-var GeneratorFunctionPrototype = Object.getPrototypeOf(g);
-var GeneratorFunction = GeneratorFunctionPrototype.constructor;
-var GeneratorObjectPrototype = GeneratorFunctionPrototype.prototype;
-
-// A generator function should have the same set of properties as any
-// other function.
-function TestGeneratorFunctionInstance() {
-  var f_own_property_names = Object.getOwnPropertyNames(f);
-  var g_own_property_names = Object.getOwnPropertyNames(g);
-
-  f_own_property_names.sort();
-  g_own_property_names.sort();
-
-  assertArrayEquals(f_own_property_names, g_own_property_names);
-  var i;
-  for (i = 0; i < f_own_property_names.length; i++) {
-    var prop = f_own_property_names[i];
-    var f_desc = Object.getOwnPropertyDescriptor(f, prop);
-    var g_desc = Object.getOwnPropertyDescriptor(g, prop);
-    assertEquals(f_desc.configurable, g_desc.configurable, prop);
-    if (prop === 'arguments' || prop === 'caller') {
-      // Unlike sloppy functions, which have read-only data arguments and caller
-      // properties, sloppy generators have a poison pill implemented via
-      // accessors
-      assertFalse('writable' in g_desc, prop);
-      assertTrue(g_desc.get instanceof Function, prop);
-      assertEquals(g_desc.get, g_desc.set, prop);
-    } else {
-      assertEquals(f_desc.writable, g_desc.writable, prop);
-    }
-    assertEquals(f_desc.enumerable, g_desc.enumerable, prop);
-  }
-}
-TestGeneratorFunctionInstance();
-
-
-// Generators have an additional object interposed in the chain between
-// themselves and Function.prototype.
-function TestGeneratorFunctionPrototype() {
-  // Sanity check.
-  assertSame(Object.getPrototypeOf(f), Function.prototype);
-  assertFalse(GeneratorFunctionPrototype === Function.prototype);
-  assertSame(Function.prototype,
-             Object.getPrototypeOf(GeneratorFunctionPrototype));
-  assertSame(GeneratorFunctionPrototype,
-             Object.getPrototypeOf(function* () {}));
-}
-TestGeneratorFunctionPrototype();
-
-
-// Functions that we associate with generator objects are actually defined by
-// a common prototype.
-function TestGeneratorObjectPrototype() {
-  assertSame(Object.prototype,
-             Object.getPrototypeOf(GeneratorObjectPrototype));
-  assertSame(GeneratorObjectPrototype,
-             Object.getPrototypeOf((function*(){yield 1}).prototype));
-
-  var expected_property_names = ["next", "throw", "constructor"];
-  var found_property_names =
-      Object.getOwnPropertyNames(GeneratorObjectPrototype);
-
-  expected_property_names.sort();
-  found_property_names.sort();
-
-  assertArrayEquals(expected_property_names, found_property_names);
-
-  iterator_desc = Object.getOwnPropertyDescriptor(GeneratorObjectPrototype,
-      Symbol.iterator);
-  assertTrue(iterator_desc !== undefined);
-  assertFalse(iterator_desc.writable);
-  assertFalse(iterator_desc.enumerable);
-  assertFalse(iterator_desc.configurable);
-
-  // The generator object's "iterator" function is just the identity.
-  assertSame(iterator_desc.value.call(42), 42);
-}
-TestGeneratorObjectPrototype();
-
-
-// This tests the object that would be called "GeneratorFunction", if it were
-// like "Function".
-function TestGeneratorFunction() {
-  assertSame(GeneratorFunctionPrototype, GeneratorFunction.prototype);
-  assertTrue(g instanceof GeneratorFunction);
-
-  assertSame(Function, Object.getPrototypeOf(GeneratorFunction));
-  assertTrue(g instanceof Function);
-
-  assertEquals("function* g() { yield 1; }", g.toString());
-
-  // Not all functions are generators.
-  assertTrue(f instanceof Function);  // Sanity check.
-  assertTrue(!(f instanceof GeneratorFunction));
-
-  assertTrue((new GeneratorFunction()) instanceof GeneratorFunction);
-  assertTrue(GeneratorFunction() instanceof GeneratorFunction);
-}
-TestGeneratorFunction();
-
-
-function TestPerGeneratorPrototype() {
-  assertTrue((function*(){}).prototype !== (function*(){}).prototype);
-  assertTrue((function*(){}).prototype !== g.prototype);
-  assertTrue(g.prototype instanceof GeneratorFunctionPrototype);
-  assertSame(GeneratorObjectPrototype, Object.getPrototypeOf(g.prototype));
-  assertTrue(!(g.prototype instanceof Function));
-  assertSame(typeof (g.prototype), "object");
-
-  assertArrayEquals([], Object.getOwnPropertyNames(g.prototype));
-}
-TestPerGeneratorPrototype();
diff --git a/test/mjsunit/harmony/iteration-semantics.js b/test/mjsunit/harmony/iteration-semantics.js
deleted file mode 100644
index 27033e1..0000000
--- a/test/mjsunit/harmony/iteration-semantics.js
+++ /dev/null
@@ -1,338 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --harmony-iteration
-// Flags: --harmony-generators --harmony-scoping --harmony-proxies
-// Flags: --harmony-symbols
-
-// Test for-of semantics.
-
-"use strict";
-
-
-// First, some helpers.
-
-function* values() {
-  for (var i = 0; i < arguments.length; i++) {
-    yield arguments[i];
-  }
-}
-
-function wrap_iterator(iterator) {
-    var iterable = {};
-    iterable[Symbol.iterator] = function() { return iterator; };
-    return iterable;
-}
-
-function integers_until(max) {
-  function next() {
-    var ret = { value: this.n, done: this.n == max };
-    this.n++;
-    return ret;
-  }
-  return wrap_iterator({ next: next, n: 0 });
-}
-
-function results(results) {
-  var i = 0;
-  function next() {
-    return results[i++];
-  }
-  return wrap_iterator({ next: next });
-}
-
-function* integers_from(n) {
-  while (1) yield n++;
-}
-
-// A destructive append.
-function append(x, tail) {
-  tail[tail.length] = x;
-  return tail;
-}
-
-function sum(x, tail) {
-  return x + tail;
-}
-
-function fold(cons, seed, iterable) {
-  for (var x of iterable) {
-    seed = cons(x, seed);
-  }
-  return seed;
-}
-
-function* take(iterable, n) {
-  if (n == 0) return;
-  for (let x of iterable) {
-    yield x;
-    if (--n == 0) break;
-  }
-}
-
-function nth(iterable, n) {
-  for (let x of iterable) {
-    if (n-- == 0) return x;
-  }
-  throw "unreachable";
-}
-
-function* skip_every(iterable, n) {
-  var i = 0;
-  for (let x of iterable) {
-    if (++i % n == 0) continue;
-    yield x;
-  }
-}
-
-function* iter_map(iterable, f) {
-  for (var x of iterable) {
-    yield f(x);
-  }
-}
-function nested_fold(cons, seed, iterable) {
-  var visited = []
-  for (let x of iterable) {
-    for (let y of x) {
-      seed = cons(y, seed);
-    }
-  }
-  return seed;
-}
-
-function* unreachable(iterable) {
-  for (let x of iterable) {
-    throw "not reached";
-  }
-}
-
-function one_time_getter(o, prop, val) {
-  function set_never() { throw "unreachable"; }
-  var gotten = false;
-  function get_once() {
-    if (gotten) throw "got twice";
-    gotten = true;
-    return val;
-  }
-  Object.defineProperty(o, prop, {get: get_once, set: set_never})
-  return o;
-}
-
-function never_getter(o, prop) {
-  function never() { throw "unreachable"; }
-  Object.defineProperty(o, prop, {get: never, set: never})
-  return o;
-}
-
-function remove_next_after(iterable, n) {
-  var iterator = iterable[Symbol.iterator]();
-  function next() {
-    if (n-- == 0) delete this.next;
-    return iterator.next();
-  }
-  return wrap_iterator({ next: next });
-}
-
-function poison_next_after(iterable, n) {
-  var iterator = iterable[Symbol.iterator]();
-  function next() {
-    return iterator.next();
-  }
-  function next_getter() {
-    if (n-- < 0)
-      throw "poisoned";
-    return next;
-  }
-  var o = {};
-  Object.defineProperty(o, 'next', { get: next_getter });
-  return wrap_iterator(o);
-}
-
-// Now, the tests.
-
-// Non-generator iterators.
-assertEquals(45, fold(sum, 0, integers_until(10)));
-// Generator iterators.
-assertEquals([1, 2, 3], fold(append, [], values(1, 2, 3)));
-// Break.
-assertEquals(45, fold(sum, 0, take(integers_from(0), 10)));
-// Continue.
-assertEquals(90, fold(sum, 0, take(skip_every(integers_from(0), 2), 10)));
-// Return.
-assertEquals(10, nth(integers_from(0), 10));
-// Nested for-of.
-assertEquals([0, 0, 1, 0, 1, 2, 0, 1, 2, 3],
-             nested_fold(append,
-                         [],
-                         iter_map(integers_until(5), integers_until)));
-// Result objects with sparse fields.
-assertEquals([undefined, 1, 2, 3],
-             fold(append, [],
-                  results([{ done: false },
-                           { value: 1, done: false },
-                           // A missing "done" is the same as undefined, which
-                           // is false.
-                           { value: 2 },
-                           // Not done.
-                           { value: 3, done: 0 },
-                           // Done.
-                           { value: 4, done: 42 }])));
-// Results that are not objects.
-assertEquals([undefined, undefined, undefined],
-             fold(append, [],
-                  results([10, "foo", /qux/, { value: 37, done: true }])));
-// Getters (shudder).
-assertEquals([1, 2],
-             fold(append, [],
-                  results([one_time_getter({ value: 1 }, 'done', false),
-                           one_time_getter({ done: false }, 'value', 2),
-                           { value: 37, done: true },
-                           never_getter(never_getter({}, 'done'), 'value')])));
-
-// Null and undefined do not cause an error.
-assertEquals(0, fold(sum, 0, unreachable(null)));
-assertEquals(0, fold(sum, 0, unreachable(undefined)));
-
-// Other non-iterators do cause an error.
-assertThrows('fold(sum, 0, unreachable({}))', TypeError);
-assertThrows('fold(sum, 0, unreachable("foo"))', TypeError);
-assertThrows('fold(sum, 0, unreachable(37))', TypeError);
-
-// "next" is looked up each time.
-assertThrows('fold(sum, 0, remove_next_after(integers_until(10), 5))',
-             TypeError);
-// It is not called at any other time.
-assertEquals(45,
-             fold(sum, 0, remove_next_after(integers_until(10), 10)));
-// It is not looked up too many times.
-assertEquals(45,
-             fold(sum, 0, poison_next_after(integers_until(10), 10)));
-
-function labelled_continue(iterable) {
-  var n = 0;
-outer:
-  while (true) {
-    n++;
-    for (var x of iterable) continue outer;
-    break;
-  }
-  return n;
-}
-assertEquals(11, labelled_continue(integers_until(10)));
-
-function labelled_break(iterable) {
-  var n = 0;
-outer:
-  while (true) {
-    n++;
-    for (var x of iterable) break outer;
-  }
-  return n;
-}
-assertEquals(1, labelled_break(integers_until(10)));
-
-// Test continue/break in catch.
-function catch_control(iterable, k) {
-  var n = 0;
-  for (var x of iterable) {
-    try {
-      return k(x);
-    } catch (e) {
-      if (e == "continue") continue;
-      else if (e == "break") break;
-      else throw e;
-    }
-  } while (false);
-  return false;
-}
-assertEquals(false,
-             catch_control(integers_until(10),
-                           function() { throw "break" }));
-assertEquals(false,
-             catch_control(integers_until(10),
-                           function() { throw "continue" }));
-assertEquals(5,
-             catch_control(integers_until(10),
-                           function(x) {
-                             if (x == 5) return x;
-                             throw "continue";
-                           }));
-
-// Test continue/break in try.
-function try_control(iterable, k) {
-  var n = 0;
-  for (var x of iterable) {
-    try {
-      var e = k(x);
-      if (e == "continue") continue;
-      else if (e == "break") break;
-      return e;
-    } catch (e) {
-      throw e;
-    }
-  } while (false);
-  return false;
-}
-assertEquals(false,
-             try_control(integers_until(10),
-                         function() { return "break" }));
-assertEquals(false,
-             try_control(integers_until(10),
-                         function() { return "continue" }));
-assertEquals(5,
-             try_control(integers_until(10),
-                         function(x) { return (x == 5) ? x : "continue" }));
-
-// Proxy results, with getters.
-function transparent_proxy(x) {
-  return Proxy.create({
-    get: function(receiver, name) { return x[name]; }
-  });
-}
-assertEquals([1, 2],
-             fold(append, [],
-                  results([one_time_getter({ value: 1 }, 'done', false),
-                           one_time_getter({ done: false }, 'value', 2),
-                           { value: 37, done: true },
-                           never_getter(never_getter({}, 'done'), 'value')]
-                          .map(transparent_proxy))));
-
-// Proxy iterators.
-function poison_proxy_after(iterable, n) {
-  var iterator = iterable[Symbol.iterator]();
-  return wrap_iterator(Proxy.create({
-    get: function(receiver, name) {
-      if (name == 'next' && n-- < 0) throw "unreachable";
-      return iterator[name];
-    },
-    // Needed for integers_until(10)'s this.n++.
-    set: function(receiver, name, val) {
-      return iterator[name] = val;
-    }
-  }));
-}
-assertEquals(45, fold(sum, 0, poison_proxy_after(integers_until(10), 10)));
diff --git a/test/mjsunit/harmony/iteration-syntax.js b/test/mjsunit/harmony/iteration-syntax.js
deleted file mode 100644
index 3bda78e..0000000
--- a/test/mjsunit/harmony/iteration-syntax.js
+++ /dev/null
@@ -1,70 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --harmony-iteration --harmony-scoping
-
-// Test for-of syntax.
-
-"use strict";
-
-function f() { for (x of y) { } }
-function f() { for (var x of y) { } }
-function f() { for (let x of y) { } }
-
-assertThrows("function f() { for (x of) { } }", SyntaxError);
-assertThrows("function f() { for (x of y z) { } }", SyntaxError);
-assertThrows("function f() { for (x of y;) { } }", SyntaxError);
-
-assertThrows("function f() { for (var x of) { } }", SyntaxError);
-assertThrows("function f() { for (var x of y z) { } }", SyntaxError);
-assertThrows("function f() { for (var x of y;) { } }", SyntaxError);
-
-assertThrows("function f() { for (let x of) { } }", SyntaxError);
-assertThrows("function f() { for (let x of y z) { } }", SyntaxError);
-assertThrows("function f() { for (let x of y;) { } }", SyntaxError);
-
-assertThrows("function f() { for (of y) { } }", SyntaxError);
-assertThrows("function f() { for (of of) { } }", SyntaxError);
-assertThrows("function f() { for (var of y) { } }", SyntaxError);
-assertThrows("function f() { for (var of of) { } }", SyntaxError);
-assertThrows("function f() { for (let of y) { } }", SyntaxError);
-assertThrows("function f() { for (let of of) { } }", SyntaxError);
-
-assertThrows("function f() { for (x = 3 of y) { } }", SyntaxError);
-assertThrows("function f() { for (var x = 3 of y) { } }", SyntaxError);
-assertThrows("function f() { for (let x = 3 of y) { } }", SyntaxError);
-
-
-// Alack, this appears to be valid.
-function f() { for (of of y) { } }
-function f() { for (let of of y) { } }
-function f() { for (var of of y) { } }
-
-// This too, of course.
-function f() { for (of in y) { } }
-function f() { for (var of in y) { } }
-function f() { for (let of in y) { } }
diff --git a/test/mjsunit/harmony/object-literals-method.js b/test/mjsunit/harmony/object-literals-method.js
new file mode 100644
index 0000000..71f44d1
--- /dev/null
+++ b/test/mjsunit/harmony/object-literals-method.js
@@ -0,0 +1,248 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-object-literals --allow-natives-syntax
+
+
+(function TestBasics() {
+  var object = {
+    method() {
+      return 42;
+    }
+  };
+  assertEquals(42, object.method());
+})();
+
+
+(function TestThis() {
+  var object = {
+    method() {
+      assertEquals(object, this);
+    }
+  };
+  object.method();
+})();
+
+
+(function TestDescriptor() {
+  var object = {
+    method() {
+      return 42;
+    }
+  };
+
+  var desc = Object.getOwnPropertyDescriptor(object, 'method');
+  assertTrue(desc.enumerable);
+  assertTrue(desc.configurable);
+  assertTrue(desc.writable);
+  assertEquals('function', typeof desc.value);
+
+  assertEquals(42, desc.value());
+})();
+
+
+(function TestProto() {
+  var object = {
+    method() {}
+  };
+
+  assertEquals(Function.prototype, Object.getPrototypeOf(object.method));
+})();
+
+
+(function TestNotConstructable() {
+  var object = {
+    method() {}
+  };
+
+  assertThrows(function() {
+    new object.method;
+  });
+})();
+
+
+(function TestFunctionName() {
+  var object = {
+    method() {},
+    1() {},
+    2.0() {}
+  };
+  var f = object.method;
+  assertEquals('method', f.name);
+  var g = object[1];
+  assertEquals('1', g.name);
+  var h = object[2];
+  assertEquals('2', h.name);
+})();
+
+
+(function TestNoBinding() {
+  var method = 'local';
+  var calls = 0;
+  var object = {
+    method() {
+      calls++;
+      assertEquals('local', method);
+    }
+  };
+  object.method();
+  assertEquals(1, calls);
+})();
+
+
+(function TestNoPrototype() {
+  var object = {
+    method() {}
+  };
+  var f = object.method;
+  assertFalse(f.hasOwnProperty('prototype'));
+  assertEquals(undefined, f.prototype);
+
+  f.prototype = 42;
+  assertEquals(42, f.prototype);
+})();
+
+
+(function TestToString() {
+  var object = {
+    method() { 42; }
+  };
+  assertEquals('method() { 42; }', object.method.toString());
+})();
+
+
+(function TestOptimized() {
+  var object = {
+    method() { return 42; }
+  };
+  assertEquals(42, object.method());
+  assertEquals(42, object.method());
+  %OptimizeFunctionOnNextCall(object.method);
+  assertEquals(42, object.method());
+  assertFalse(object.method.hasOwnProperty('prototype'));
+})();
+
+
+///////////////////////////////////////////////////////////////////////////////
+
+
+var GeneratorFunction = function*() {}.__proto__.constructor;
+
+
+function assertIteratorResult(value, done, result) {
+  assertEquals({value: value, done: done}, result);
+}
+
+
+(function TestGeneratorBasics() {
+  var object = {
+    *method() {
+      yield 1;
+    }
+  };
+  var g = object.method();
+  assertIteratorResult(1, false, g.next());
+  assertIteratorResult(undefined, true, g.next());
+})();
+
+
+(function TestGeneratorThis() {
+  var object = {
+    *method() {
+      yield this;
+    }
+  };
+  var g = object.method();
+  assertIteratorResult(object, false, g.next());
+  assertIteratorResult(undefined, true, g.next());
+})();
+
+
+(function TestGeneratorSymbolIterator() {
+  var object = {
+    *method() {}
+  };
+  var g = object.method();
+  assertEquals(g, g[Symbol.iterator]());
+})();
+
+
+(function TestGeneratorDescriptor() {
+  var object = {
+    *method() {
+      yield 1;
+    }
+  };
+
+  var desc = Object.getOwnPropertyDescriptor(object, 'method');
+  assertTrue(desc.enumerable);
+  assertTrue(desc.configurable);
+  assertTrue(desc.writable);
+  assertEquals('function', typeof desc.value);
+
+  var g = desc.value();
+  assertIteratorResult(1, false, g.next());
+  assertIteratorResult(undefined, true, g.next());
+})();
+
+
+(function TestGeneratorProto() {
+  var object = {
+    *method() {}
+  };
+
+  assertEquals(GeneratorFunction.prototype,
+               Object.getPrototypeOf(object.method));
+})();
+
+
+(function TestGeneratorConstructable() {
+  var object = {
+    *method() {
+      yield 1;
+    }
+  };
+
+  var g = new object.method();
+  assertIteratorResult(1, false, g.next());
+  assertIteratorResult(undefined, true, g.next());
+})();
+
+
+(function TestGeneratorName() {
+  var object = {
+    *method() {},
+    *1() {},
+    *2.0() {}
+  };
+  var f = object.method;
+  assertEquals('method', f.name);
+  var g = object[1];
+  assertEquals('1', g.name);
+  var h = object[2];
+  assertEquals('2', h.name);
+})();
+
+
+(function TestGeneratorNoBinding() {
+  var method = 'local';
+  var calls = 0;
+  var object = {
+    *method() {
+      calls++;
+      assertEquals('local', method);
+    }
+  };
+  var g = object.method();
+  assertIteratorResult(undefined, true, g.next());
+  assertEquals(1, calls);
+})();
+
+
+(function TestGeneratorToString() {
+  var object = {
+    *method() { yield 1; }
+  };
+  assertEquals('*method() { yield 1; }', object.method.toString());
+})();
diff --git a/test/mjsunit/harmony/private.js b/test/mjsunit/harmony/private.js
index 2257998..218094c 100644
--- a/test/mjsunit/harmony/private.js
+++ b/test/mjsunit/harmony/private.js
@@ -25,7 +25,6 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --harmony-symbols --harmony-collections
 // Flags: --expose-gc --allow-natives-syntax
 
 var symbols = []
@@ -84,7 +83,8 @@
 
 function TestToString() {
   for (var i in symbols) {
-    assertThrows(function() { String(symbols[i]) }, TypeError)
+    assertThrows(function() {new String(symbols[i]) }, TypeError)
+    assertEquals(symbols[i].toString(), String(symbols[i]))
     assertThrows(function() { symbols[i] + "" }, TypeError)
     assertTrue(isValidSymbolString(symbols[i].toString()))
     assertTrue(isValidSymbolString(Object(symbols[i]).toString()))
@@ -115,8 +115,8 @@
 
 function TestToNumber() {
   for (var i in symbols) {
-    assertSame(NaN, Number(symbols[i]).valueOf())
-    assertSame(NaN, symbols[i] + 0)
+    assertThrows(function() { Number(symbols[i]); }, TypeError);
+    assertThrows(function() { symbols[i] + 0; }, TypeError);
   }
 }
 TestToNumber()
@@ -342,3 +342,18 @@
   assertEquals(syms, [publicSymbol, publicSymbol2])
 }
 TestGetOwnPropertySymbols()
+
+
+function TestSealAndFreeze(freeze) {
+  var sym = %CreatePrivateSymbol("private")
+  var obj = {}
+  obj[sym] = 1
+  freeze(obj)
+  obj[sym] = 2
+  assertEquals(2, obj[sym])
+  assertTrue(delete obj[sym])
+  assertEquals(undefined, obj[sym])
+}
+TestSealAndFreeze(Object.seal)
+TestSealAndFreeze(Object.freeze)
+TestSealAndFreeze(Object.preventExtensions)
diff --git a/test/mjsunit/harmony/proxies-example-membrane.js b/test/mjsunit/harmony/proxies-example-membrane.js
index a645a66..7b2af72 100644
--- a/test/mjsunit/harmony/proxies-example-membrane.js
+++ b/test/mjsunit/harmony/proxies-example-membrane.js
@@ -25,7 +25,7 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --harmony
+// Flags: --harmony --harmony-proxies
 
 
 // A simple no-op handler. Adapted from:
diff --git a/test/mjsunit/harmony/proxies-hash.js b/test/mjsunit/harmony/proxies-hash.js
index 789de35..65d2d3c 100644
--- a/test/mjsunit/harmony/proxies-hash.js
+++ b/test/mjsunit/harmony/proxies-hash.js
@@ -25,7 +25,7 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --harmony-proxies --harmony-collections
+// Flags: --harmony-proxies
 
 
 // Helper.
diff --git a/test/mjsunit/harmony/proxies-json.js b/test/mjsunit/harmony/proxies-json.js
index 539c5a8..eba10a1 100644
--- a/test/mjsunit/harmony/proxies-json.js
+++ b/test/mjsunit/harmony/proxies-json.js
@@ -25,7 +25,7 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --harmony
+// Flags: --harmony-proxies
 
 function testStringify(expected, object) {
   // Test fast case that bails out to slow case.
diff --git a/test/mjsunit/harmony/proxies-symbols.js b/test/mjsunit/harmony/proxies-symbols.js
index 8920e39..52353c0 100644
--- a/test/mjsunit/harmony/proxies-symbols.js
+++ b/test/mjsunit/harmony/proxies-symbols.js
@@ -25,7 +25,7 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --harmony-proxies --harmony-symbols
+// Flags: --harmony-proxies
 
 
 // Helper.
diff --git a/test/mjsunit/harmony/proxies-with-unscopables.js b/test/mjsunit/harmony/proxies-with-unscopables.js
new file mode 100644
index 0000000..191bad3
--- /dev/null
+++ b/test/mjsunit/harmony/proxies-with-unscopables.js
@@ -0,0 +1,152 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-proxies
+
+
+// TODO(arv): Once proxies can intercept symbols, add more tests.
+
+
+function TestBasics() {
+  var log = [];
+
+  var proxy = Proxy.create({
+    getPropertyDescriptor: function(key) {
+      log.push(key);
+      if (key === 'x') {
+        return {
+          value: 1,
+          configurable: true
+        };
+      }
+      return undefined;
+    }
+  });
+
+  var x = 'local';
+
+  with (proxy) {
+    assertEquals(1, x);
+  }
+
+  // One 'x' for HasBinding and one for GetBindingValue
+  assertEquals(['assertEquals', 'x', 'x'], log);
+}
+TestBasics();
+
+
+function TestInconsistent() {
+  var log = [];
+  var calls = 0;
+
+  var proxy = Proxy.create({
+    getPropertyDescriptor: function(key) {
+      log.push(key);
+      if (key === 'x' && calls < 1) {
+        calls++;
+        return {
+          value: 1,
+          configurable: true
+        };
+      }
+      return undefined;
+    }
+  });
+
+  var x = 'local';
+
+  with (proxy) {
+    assertEquals(void 0, x);
+  }
+
+  // One 'x' for HasBinding and one for GetBindingValue
+  assertEquals(['assertEquals', 'x', 'x'], log);
+}
+TestInconsistent();
+
+
+function TestUseProxyAsUnscopables() {
+  var x = 1;
+  var object = {
+    x: 2
+  };
+  var calls = 0;
+  var proxy = Proxy.create({
+    has: function(key) {
+      calls++;
+      assertEquals('x', key);
+      return calls === 2;
+    },
+    getPropertyDescriptor: function(key) {
+      assertUnreachable();
+    }
+  });
+
+  object[Symbol.unscopables] = proxy;
+
+  with (object) {
+    assertEquals(2, x);
+    assertEquals(1, x);
+  }
+
+  // HasBinding, HasBinding
+  assertEquals(2, calls);
+}
+TestUseProxyAsUnscopables();
+
+
+function TestThrowInHasUnscopables() {
+  var x = 1;
+  var object = {
+    x: 2
+  };
+
+  function CustomError() {}
+
+  var calls = 0;
+  var proxy = Proxy.create({
+    has: function(key) {
+      if (calls++ === 0) {
+        throw new CustomError();
+      }
+      assertUnreachable();
+    },
+    getPropertyDescriptor: function(key) {
+      assertUnreachable();
+    }
+  });
+
+  object[Symbol.unscopables] = proxy;
+
+  assertThrows(function() {
+    with (object) {
+      x;
+    }
+  }, CustomError);
+}
+TestThrowInHasUnscopables();
+
+
+var global = this;
+function TestGlobalShouldIgnoreUnscopables() {
+  global.x = 1;
+  var proxy = Proxy.create({
+    getPropertyDescriptor: function() {
+      assertUnreachable();
+    }
+  });
+  global[Symbol.unscopables] = proxy;
+
+  assertEquals(1, global.x);
+  assertEquals(1, x);
+
+  global.x = 2;
+  assertEquals(2, global.x);
+  assertEquals(2, x);
+
+  x = 3;
+  assertEquals(3, global.x);
+  assertEquals(3, x);
+}
+TestGlobalShouldIgnoreUnscopables();
diff --git a/test/mjsunit/harmony/proxies.js b/test/mjsunit/harmony/proxies.js
index 00e605f..b082c06 100644
--- a/test/mjsunit/harmony/proxies.js
+++ b/test/mjsunit/harmony/proxies.js
@@ -1807,7 +1807,7 @@
   },
 })
 
-TestKeysThrow([], {
+TestKeysThrow({
   get getOwnPropertyNames() {
     return function() { return [1, 2] }
   },
diff --git a/test/mjsunit/harmony/regexp-sticky.js b/test/mjsunit/harmony/regexp-sticky.js
new file mode 100644
index 0000000..bd7f646
--- /dev/null
+++ b/test/mjsunit/harmony/regexp-sticky.js
@@ -0,0 +1,132 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony-regexps
+
+var re = /foo.bar/;
+
+assertTrue(!!"foo*bar".match(re));
+assertTrue(!!"..foo*bar".match(re));
+
+var plain = /foobar/;
+
+assertTrue(!!"foobar".match(plain));
+assertTrue(!!"..foobar".match(plain));
+
+var sticky = /foo.bar/y;
+
+assertTrue(!!"foo*bar".match(sticky));
+assertEquals(0, sticky.lastIndex);
+assertFalse(!!"..foo*bar".match(sticky));
+
+var stickyplain = /foobar/y;
+
+assertTrue(!!"foobar".match(stickyplain));
+assertEquals(0, stickyplain.lastIndex);
+assertFalse(!!"..foobar".match(stickyplain));
+
+var global = /foo.bar/g;
+
+assertTrue(global.test("foo*bar"));
+assertFalse(global.test("..foo*bar"));
+global.lastIndex = 0;
+assertTrue(global.test("..foo*bar"));
+
+var plainglobal = /foobar/g;
+
+assertTrue(plainglobal.test("foobar"));
+assertFalse(plainglobal.test("foobar"));
+plainglobal.lastIndex = 0;
+assertTrue(plainglobal.test("foobar"));
+
+var stickyglobal = /foo.bar/gy;
+
+assertTrue(stickyglobal.test("foo*bar"));
+assertEquals(7, stickyglobal.lastIndex);
+assertFalse(stickyglobal.test("..foo*bar"));
+stickyglobal.lastIndex = 0;
+assertFalse(stickyglobal.test("..foo*bar"));
+stickyglobal.lastIndex = 2;
+assertTrue(stickyglobal.test("..foo*bar"));
+assertEquals(9, stickyglobal.lastIndex);
+
+var stickyplainglobal = /foobar/yg;
+assertTrue(stickyplainglobal.sticky);
+stickyplainglobal.sticky = false;
+
+assertTrue(stickyplainglobal.test("foobar"));
+assertEquals(6, stickyplainglobal.lastIndex);
+assertFalse(stickyplainglobal.test("..foobar"));
+stickyplainglobal.lastIndex = 0;
+assertFalse(stickyplainglobal.test("..foobar"));
+stickyplainglobal.lastIndex = 2;
+assertTrue(stickyplainglobal.test("..foobar"));
+assertEquals(8, stickyplainglobal.lastIndex);
+
+assertEquals("/foo.bar/gy", "" + stickyglobal);
+assertEquals("/foo.bar/g", "" + global);
+
+assertTrue(stickyglobal.sticky);
+stickyglobal.sticky = false;
+assertTrue(stickyglobal.sticky);
+
+var stickyglobal2 = new RegExp("foo.bar", "gy");
+assertTrue(stickyglobal2.test("foo*bar"));
+assertEquals(7, stickyglobal2.lastIndex);
+assertFalse(stickyglobal2.test("..foo*bar"));
+stickyglobal2.lastIndex = 0;
+assertFalse(stickyglobal2.test("..foo*bar"));
+stickyglobal2.lastIndex = 2;
+assertTrue(stickyglobal2.test("..foo*bar"));
+assertEquals(9, stickyglobal2.lastIndex);
+
+assertEquals("/foo.bar/gy", "" + stickyglobal2);
+
+assertTrue(stickyglobal2.sticky);
+stickyglobal2.sticky = false;
+assertTrue(stickyglobal2.sticky);
+
+sticky.lastIndex = -1; // Causes sticky regexp to fail fast
+assertFalse(sticky.test("..foo.bar"));
+assertEquals(0, sticky.lastIndex);
+
+sticky.lastIndex = -1; // Causes sticky regexp to fail fast
+assertFalse(!!sticky.exec("..foo.bar"));
+assertEquals(0, sticky.lastIndex);
+
+// ES6 draft says: Even when the y flag is used with a pattern, ^ always
+// matches only at the beginning of Input, or (if Multiline is true) at the
+// beginning of a line.
+var hat = /^foo/y;
+hat.lastIndex = 2;
+assertFalse(hat.test("..foo"));
+
+var mhat = /^foo/my;
+mhat.lastIndex = 2;
+assertFalse(mhat.test("..foo"));
+mhat.lastIndex = 2;
+assertTrue(mhat.test(".\nfoo"));
diff --git a/test/mjsunit/harmony/regress/regress-2186.js b/test/mjsunit/harmony/regress/regress-2186.js
deleted file mode 100644
index 0921dce..0000000
--- a/test/mjsunit/harmony/regress/regress-2186.js
+++ /dev/null
@@ -1,49 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --harmony-collections
-
-function heapify(i) {
-  return 2.0 * (i / 2);
-}
-heapify(1);
-
-var ONE = 1;
-var ANOTHER_ONE = heapify(ONE);
-assertSame(ONE, ANOTHER_ONE);
-assertEquals("number", typeof ONE);
-assertEquals("number", typeof ANOTHER_ONE);
-
-var set = new Set;
-set.add(ONE);
-assertTrue(set.has(ONE));
-assertTrue(set.has(ANOTHER_ONE));
-
-var map = new Map;
-map.set(ONE, 23);
-assertSame(23, map.get(ONE));
-assertSame(23, map.get(ANOTHER_ONE));
diff --git a/test/mjsunit/harmony/regress/regress-2681.js b/test/mjsunit/harmony/regress/regress-2681.js
deleted file mode 100644
index 9841d84..0000000
--- a/test/mjsunit/harmony/regress/regress-2681.js
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --expose-gc --noincremental-marking --harmony-generators
-
-// Check that we are not flushing code for generators.
-
-function flush_all_code() {
-  // Each GC ages code, and currently 6 gcs will flush all code.
-  for (var i = 0; i < 10; i++) gc();
-}
-
-function* g() {
-  yield 1;
-  yield 2;
-}
-
-var o = g();
-assertEquals({ value: 1, done: false }, o.next());
-
-flush_all_code();
-
-assertEquals({ value: 2, done: false }, o.next());
-assertEquals({ value: undefined, done: true }, o.next());
diff --git a/test/mjsunit/harmony/regress/regress-2691.js b/test/mjsunit/harmony/regress/regress-2691.js
deleted file mode 100644
index e17be10..0000000
--- a/test/mjsunit/harmony/regress/regress-2691.js
+++ /dev/null
@@ -1,34 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --harmony-generators
-
-// Check that yield* on non-objects raises a TypeError.
-
-assertThrows('(function*() { yield* 10 })().next()', TypeError);
-assertThrows('(function*() { yield* {} })().next()', TypeError);
-assertThrows('(function*() { yield* undefined })().next()', TypeError);
diff --git a/test/mjsunit/harmony/regress/regress-3280.js b/test/mjsunit/harmony/regress/regress-3280.js
deleted file mode 100644
index 2fc72cc..0000000
--- a/test/mjsunit/harmony/regress/regress-3280.js
+++ /dev/null
@@ -1,25 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-// Flags: --harmony-generators --expose-debug-as debug
-
-var Debug = debug.Debug;
-
-var listener_called;
-
-function listener(event, exec_state, event_data, data) {
-  if (event == Debug.DebugEvent.Break) {
-    listener_called = true;
-    exec_state.frame().allScopes();
-  }
-}
-
-Debug.setListener(listener);
-
-function *generator_local_2(a) {
-  debugger;
-}
-generator_local_2(1).next();
-
-assertTrue(listener_called, "listener not called");
diff --git a/test/mjsunit/harmony/regress/regress-3426.js b/test/mjsunit/harmony/regress/regress-3426.js
new file mode 100644
index 0000000..c3b11a1
--- /dev/null
+++ b/test/mjsunit/harmony/regress/regress-3426.js
@@ -0,0 +1,7 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-scoping
+
+assertThrows("(function() { 'use strict'; { let f; var f; } })", SyntaxError);
diff --git a/test/mjsunit/harmony/regress/regress-405844.js b/test/mjsunit/harmony/regress/regress-405844.js
new file mode 100644
index 0000000..fbe7310
--- /dev/null
+++ b/test/mjsunit/harmony/regress/regress-405844.js
@@ -0,0 +1,13 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-proxies
+
+var proxy = Proxy.create({ fix: function() { return {}; } });
+Object.preventExtensions(proxy);
+Object.observe(proxy, function(){});
+
+var functionProxy = Proxy.createFunction({ fix: function() { return {}; } }, function(){});
+Object.preventExtensions(functionProxy);
+Object.observe(functionProxy, function(){});
diff --git a/test/mjsunit/harmony/regress/regress-crbug-248025.js b/test/mjsunit/harmony/regress/regress-crbug-248025.js
deleted file mode 100644
index c598859..0000000
--- a/test/mjsunit/harmony/regress/regress-crbug-248025.js
+++ /dev/null
@@ -1,40 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --harmony-iteration
-
-// Filler long enough to trigger lazy parsing.
-var filler = "//" + new Array(1024).join('x');
-
-// Test that the pre-parser does not crash when the expected contextual
-// keyword as part if a 'for' statement is not and identifier.
-try {
-  eval(filler + "\nfunction f() { for (x : y) { } }");
-  throw "not reached";
-} catch (e) {
-  if (!(e instanceof SyntaxError)) throw e;
-}
diff --git a/test/mjsunit/harmony/set-prototype-of.js b/test/mjsunit/harmony/set-prototype-of.js
index 02bd5e2..810220d 100644
--- a/test/mjsunit/harmony/set-prototype-of.js
+++ b/test/mjsunit/harmony/set-prototype-of.js
@@ -25,8 +25,6 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --harmony-symbols
-
 
 function getObjects() {
   function func() {}
diff --git a/test/mjsunit/harmony/string-codepointat.js b/test/mjsunit/harmony/string-codepointat.js
new file mode 100644
index 0000000..411b0f2
--- /dev/null
+++ b/test/mjsunit/harmony/string-codepointat.js
@@ -0,0 +1,91 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-strings
+
+// Tests taken from:
+// https://github.com/mathiasbynens/String.prototype.codePointAt
+
+assertEquals(String.prototype.codePointAt.length, 1);
+assertEquals(String.prototype.propertyIsEnumerable("codePointAt"), false);
+
+// String that starts with a BMP symbol
+assertEquals("abc\uD834\uDF06def".codePointAt(""), 0x61);
+assertEquals("abc\uD834\uDF06def".codePointAt("_"), 0x61);
+assertEquals("abc\uD834\uDF06def".codePointAt(), 0x61);
+assertEquals("abc\uD834\uDF06def".codePointAt(-Infinity), undefined);
+assertEquals("abc\uD834\uDF06def".codePointAt(-1), undefined);
+assertEquals("abc\uD834\uDF06def".codePointAt(-0), 0x61);
+assertEquals("abc\uD834\uDF06def".codePointAt(0), 0x61);
+assertEquals("abc\uD834\uDF06def".codePointAt(3), 0x1D306);
+assertEquals("abc\uD834\uDF06def".codePointAt(4), 0xDF06);
+assertEquals("abc\uD834\uDF06def".codePointAt(5), 0x64);
+assertEquals("abc\uD834\uDF06def".codePointAt(42), undefined);
+assertEquals("abc\uD834\uDF06def".codePointAt(Infinity), undefined);
+assertEquals("abc\uD834\uDF06def".codePointAt(Infinity), undefined);
+assertEquals("abc\uD834\uDF06def".codePointAt(NaN), 0x61);
+assertEquals("abc\uD834\uDF06def".codePointAt(false), 0x61);
+assertEquals("abc\uD834\uDF06def".codePointAt(null), 0x61);
+assertEquals("abc\uD834\uDF06def".codePointAt(undefined), 0x61);
+
+// String that starts with an astral symbol
+assertEquals("\uD834\uDF06def".codePointAt(""), 0x1D306);
+assertEquals("\uD834\uDF06def".codePointAt("1"), 0xDF06);
+assertEquals("\uD834\uDF06def".codePointAt("_"), 0x1D306);
+assertEquals("\uD834\uDF06def".codePointAt(), 0x1D306);
+assertEquals("\uD834\uDF06def".codePointAt(-1), undefined);
+assertEquals("\uD834\uDF06def".codePointAt(-0), 0x1D306);
+assertEquals("\uD834\uDF06def".codePointAt(0), 0x1D306);
+assertEquals("\uD834\uDF06def".codePointAt(1), 0xDF06);
+assertEquals("\uD834\uDF06def".codePointAt(42), undefined);
+assertEquals("\uD834\uDF06def".codePointAt(false), 0x1D306);
+assertEquals("\uD834\uDF06def".codePointAt(null), 0x1D306);
+assertEquals("\uD834\uDF06def".codePointAt(undefined), 0x1D306);
+
+// Lone high surrogates
+assertEquals("\uD834abc".codePointAt(""), 0xD834);
+assertEquals("\uD834abc".codePointAt("_"), 0xD834);
+assertEquals("\uD834abc".codePointAt(), 0xD834);
+assertEquals("\uD834abc".codePointAt(-1), undefined);
+assertEquals("\uD834abc".codePointAt(-0), 0xD834);
+assertEquals("\uD834abc".codePointAt(0), 0xD834);
+assertEquals("\uD834abc".codePointAt(false), 0xD834);
+assertEquals("\uD834abc".codePointAt(NaN), 0xD834);
+assertEquals("\uD834abc".codePointAt(null), 0xD834);
+assertEquals("\uD834abc".codePointAt(undefined), 0xD834);
+
+// Lone low surrogates
+assertEquals("\uDF06abc".codePointAt(""), 0xDF06);
+assertEquals("\uDF06abc".codePointAt("_"), 0xDF06);
+assertEquals("\uDF06abc".codePointAt(), 0xDF06);
+assertEquals("\uDF06abc".codePointAt(-1), undefined);
+assertEquals("\uDF06abc".codePointAt(-0), 0xDF06);
+assertEquals("\uDF06abc".codePointAt(0), 0xDF06);
+assertEquals("\uDF06abc".codePointAt(false), 0xDF06);
+assertEquals("\uDF06abc".codePointAt(NaN), 0xDF06);
+assertEquals("\uDF06abc".codePointAt(null), 0xDF06);
+assertEquals("\uDF06abc".codePointAt(undefined), 0xDF06);
+
+assertThrows(function() {
+  String.prototype.codePointAt.call(undefined);
+}, TypeError);
+assertThrows(function() {
+  String.prototype.codePointAt.call(undefined, 4);
+}, TypeError);
+assertThrows(function() {
+  String.prototype.codePointAt.call(null);
+}, TypeError);
+assertThrows(function() {
+  String.prototype.codePointAt.call(null, 4);
+}, TypeError);
+assertEquals(String.prototype.codePointAt.call(42, 0), 0x34);
+assertEquals(String.prototype.codePointAt.call(42, 1), 0x32);
+assertEquals(String.prototype.codePointAt.call({
+  toString: function() { return "abc"; }
+}, 2), 0x63);
+var tmp = 0;
+assertEquals(String.prototype.codePointAt.call({
+  toString: function() { ++tmp; return String(tmp); }
+}, 0), 0x31);
+assertEquals(tmp, 1);
diff --git a/test/mjsunit/harmony/string-fromcodepoint.js b/test/mjsunit/harmony/string-fromcodepoint.js
new file mode 100644
index 0000000..97ecf0e
--- /dev/null
+++ b/test/mjsunit/harmony/string-fromcodepoint.js
@@ -0,0 +1,62 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-strings
+
+// Tests taken from:
+// https://github.com/mathiasbynens/String.fromCodePoint
+
+assertEquals(String.fromCodePoint.length, 1);
+assertEquals(String.propertyIsEnumerable("fromCodePoint"), false);
+
+assertEquals(String.fromCodePoint(""), "\0");
+assertEquals(String.fromCodePoint(), "");
+assertEquals(String.fromCodePoint(-0), "\0");
+assertEquals(String.fromCodePoint(0), "\0");
+assertEquals(String.fromCodePoint(0x1D306), "\uD834\uDF06");
+assertEquals(
+    String.fromCodePoint(0x1D306, 0x61, 0x1D307),
+    "\uD834\uDF06a\uD834\uDF07");
+assertEquals(String.fromCodePoint(0x61, 0x62, 0x1D307), "ab\uD834\uDF07");
+assertEquals(String.fromCodePoint(false), "\0");
+assertEquals(String.fromCodePoint(null), "\0");
+
+assertThrows(function() { String.fromCodePoint("_"); }, RangeError);
+assertThrows(function() { String.fromCodePoint("+Infinity"); }, RangeError);
+assertThrows(function() { String.fromCodePoint("-Infinity"); }, RangeError);
+assertThrows(function() { String.fromCodePoint(-1); }, RangeError);
+assertThrows(function() { String.fromCodePoint(0x10FFFF + 1); }, RangeError);
+assertThrows(function() { String.fromCodePoint(3.14); }, RangeError);
+assertThrows(function() { String.fromCodePoint(3e-2); }, RangeError);
+assertThrows(function() { String.fromCodePoint(-Infinity); }, RangeError);
+assertThrows(function() { String.fromCodePoint(+Infinity); }, RangeError);
+assertThrows(function() { String.fromCodePoint(NaN); }, RangeError);
+assertThrows(function() { String.fromCodePoint(undefined); }, RangeError);
+assertThrows(function() { String.fromCodePoint({}); }, RangeError);
+assertThrows(function() { String.fromCodePoint(/./); }, RangeError);
+assertThrows(function() { String.fromCodePoint({
+  valueOf: function() { throw Error(); } });
+}, Error);
+assertThrows(function() { String.fromCodePoint({
+  valueOf: function() { throw Error(); } });
+}, Error);
+var tmp = 0x60;
+assertEquals(String.fromCodePoint({
+  valueOf: function() { ++tmp; return tmp; }
+}), "a");
+assertEquals(tmp, 0x61);
+
+var counter = Math.pow(2, 15) * 3 / 2;
+var result = [];
+while (--counter >= 0) {
+  result.push(0); // one code unit per symbol
+}
+String.fromCodePoint.apply(null, result); // must not throw
+
+var counter = Math.pow(2, 15) * 3 / 2;
+var result = [];
+while (--counter >= 0) {
+  result.push(0xFFFF + 1); // two code units per symbol
+}
+String.fromCodePoint.apply(null, result); // must not throw
diff --git a/test/mjsunit/harmony/super.js b/test/mjsunit/harmony/super.js
new file mode 100644
index 0000000..89fb4b1
--- /dev/null
+++ b/test/mjsunit/harmony/super.js
@@ -0,0 +1,127 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-classes
+
+
+(function TestSuperNamedLoads() {
+  function Base() { }
+  function Derived() {
+    this.derivedDataProperty = "xxx";
+  }
+  Derived.prototype = Object.create(Base.prototype);
+
+  function fBase() { return "Base " + this.toString(); }
+
+  Base.prototype.f = fBase.toMethod(Base.prototype);
+
+  function fDerived() {
+     assertEquals("Base this is Derived", super.f());
+     assertEquals(15, super.x);
+     assertEquals(27, this.x);
+
+     return "Derived"
+  }
+
+  Base.prototype.x = 15;
+  Base.prototype.toString = function() { return "this is Base"; };
+  Derived.prototype.toString = function() { return "this is Derived"; };
+  Derived.prototype.x = 27;
+  Derived.prototype.f = fDerived.toMethod(Derived.prototype);
+
+  assertEquals("Base this is Base", new Base().f());
+  assertEquals("Derived", new Derived().f());
+}());
+
+(function TestSuperKeywordNonMethod() {
+  function f() {
+    super.unknown();
+  }
+
+  assertThrows(f, ReferenceError);
+}());
+
+
+(function TestGetter() {
+  function Base() {}
+  var derived;
+  Base.prototype = {
+    constructor: Base,
+    get x() {
+      assertSame(this, derived);
+      return this._x;
+    },
+    _x: 'base'
+  };
+
+  function Derived() {}
+  Derived.__proto__ = Base;
+  Derived.prototype = {
+    __proto__: Base.prototype,
+    constructor: Derived,
+    _x: 'derived'
+  };
+  Derived.prototype.testGetter = function() {
+    return super.x;
+  }.toMethod(Derived.prototype);
+  derived = new Derived();
+  assertEquals('derived', derived.testGetter());
+}());
+
+/*
+ * TODO[dslomov]: named stores and keyed loads/stores not implemented yet.
+(function TestSetter() {
+  function Base() {}
+  Base.prototype = {
+    constructor: Base,
+    get x() {
+      return this._x;
+    },
+    set x(v) {
+      this._x = v;
+    },
+    _x: 'base'
+  };
+
+  function Derived() {}
+  Derived.__proto__ = Base;
+  Derived.prototype = {
+    __proto__: Base.prototype,
+    constructor: Derived,
+    _x: 'derived'
+  };
+  Derived.prototype.testSetter = function() {
+      super.x = 'foobar';
+    }.toMethod(Derived.prototype);
+  var d = new Derived();
+  d.testSetter();
+  assertEquals('base', Base.prototype._x);
+  assertEquals('foobar', d._x);
+}());
+
+
+(function TestKeyedGetter() {
+  function Base() {}
+  Base.prototype = {
+    constructor: Base,
+    _x: 'base'
+  };
+
+  Object.defineProperty(Base.prototype, '0',
+        { get: function() { return this._x; } });
+
+  function Derived() {}
+  Derived.__proto__ = Base;
+  Derived.prototype = {
+    __proto__: Base.prototype,
+    constructor: Derived,
+    _x: 'derived'
+  };
+  Derived.prototype.testGetter = function() {
+      return super[0];
+    }.toMethod(Derived.prototype);
+  assertEquals('derived', new Derived()[0]);
+  // assertEquals('derived', new Derived().testGetter());
+}());
+*/
diff --git a/test/mjsunit/harmony/toMethod.js b/test/mjsunit/harmony/toMethod.js
new file mode 100644
index 0000000..ad51b2f
--- /dev/null
+++ b/test/mjsunit/harmony/toMethod.js
@@ -0,0 +1,115 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-classes --allow-natives-syntax
+
+
+(function TestSingleClass() {
+  function f(x) {
+    var a = [0, 1, 2]
+    return a[x];
+  }
+
+  function ClassD() { }
+
+  assertEquals(1, f(1));
+  var g = f.toMethod(ClassD.prototype);
+  assertEquals(1, g(1));
+  assertEquals(undefined, f[%HomeObjectSymbol()]);
+  assertEquals(ClassD.prototype, g[%HomeObjectSymbol()]);
+}());
+
+
+(function TestClassHierarchy() {
+  function f(x) {
+    return function g(y)  { x++; return x + y; };
+  }
+
+  function Base() {}
+  function Derived() { }
+  Derived.prototype = Object.create(Base.prototype);
+
+  var q = f(0);
+  assertEquals(2, q(1));
+  assertEquals(3, q(1));
+  var g = q.toMethod(Derived.prototype);
+  assertFalse(g === q);
+  assertEquals(4, g(1));
+  assertEquals(5, q(1));
+}());
+
+
+(function TestErrorCases() {
+  var sFun = Function.prototype.toMethod;
+  assertThrows(function() { sFun.call({}); }, TypeError);
+  assertThrows(function() { sFun.call({}, {}); }, TypeError);
+  function f(){};
+  assertThrows(function() { f.toMethod(1); }, TypeError);
+}());
+
+
+(function TestPrototypeChain() {
+  var o = {};
+  var o1 = {};
+  function f() { }
+
+  function g() { }
+
+  var fMeth = f.toMethod(o);
+  assertEquals(o, fMeth[%HomeObjectSymbol()]);
+  g.__proto__ = fMeth;
+  assertEquals(undefined, g[%HomeObjectSymbol()]);
+  var gMeth = g.toMethod(o1);
+  assertEquals(fMeth, gMeth.__proto__);
+  assertEquals(o, fMeth[%HomeObjectSymbol()]);
+  assertEquals(o1, gMeth[%HomeObjectSymbol()]);
+}());
+
+
+(function TestBoundFunction() {
+  var o = {};
+  var p = {};
+
+
+  function f(x, y, z, w) {
+    assertEquals(o, this);
+    assertEquals(1, x);
+    assertEquals(2, y);
+    assertEquals(3, z);
+    assertEquals(4, w);
+    return x+y+z+w;
+  }
+
+  var fBound = f.bind(o, 1, 2, 3);
+  var fMeth = fBound.toMethod(p);
+  assertEquals(10, fMeth(4));
+  assertEquals(10, fMeth.call(p, 4));
+  var fBound1 = fBound.bind(o, 4);
+  assertEquals(10, fBound1());
+  var fMethBound = fMeth.bind(o, 4);
+  assertEquals(10, fMethBound());
+}());
+
+(function TestOptimized() {
+  function f(o) {
+    return o.x;
+  }
+  var o = {x : 15};
+  assertEquals(15, f(o));
+  assertEquals(15, f(o));
+  %OptimizeFunctionOnNextCall(f);
+  assertEquals(15, f(o));
+  var g = f.toMethod({});
+  var o1 = {y : 1024, x : "abc"};
+  assertEquals("abc", f(o1));
+  assertEquals("abc", g(o1));
+} ());
+
+(function TestExtensibility() {
+  function f() {}
+  Object.preventExtensions(f);
+  assertFalse(Object.isExtensible(f));
+  var m = f.toMethod({});
+  assertTrue(Object.isExtensible(m));
+}());
diff --git a/test/mjsunit/harmony/typeof.js b/test/mjsunit/harmony/typeof.js
deleted file mode 100644
index acde977..0000000
--- a/test/mjsunit/harmony/typeof.js
+++ /dev/null
@@ -1,35 +0,0 @@
-// Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --harmony-typeof
-
-assertFalse(typeof null == 'object')
-assertFalse(typeof null === 'object')
-assertTrue(typeof null == 'null')
-assertTrue(typeof null === 'null')
-assertEquals("null", typeof null)
-assertSame("null", typeof null)
diff --git a/test/mjsunit/json-stringify-recursive.js b/test/mjsunit/json-stringify-recursive.js
index 31aa002..d2788a5 100644
--- a/test/mjsunit/json-stringify-recursive.js
+++ b/test/mjsunit/json-stringify-recursive.js
@@ -25,6 +25,8 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+// Flags: --stack-size=100
+
 var a = {};
 for (i = 0; i < 10000; i++) {
   var current = {};
diff --git a/test/mjsunit/keyed-named-access.js b/test/mjsunit/keyed-named-access.js
new file mode 100644
index 0000000..11f8fb5
--- /dev/null
+++ b/test/mjsunit/keyed-named-access.js
@@ -0,0 +1,72 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var k = "x";
+var o1 = {x: 10};
+var o2 = {x: 11, y: 20};
+var o3 = {x: 12, y: 20, z: 100};
+
+function f(o) {
+  var result = 0;
+  for (var i = 0; i < 100; i++) {
+    result += o[k];
+  }
+  return result;
+}
+
+f(o1);
+f(o1);
+f(o1);
+%OptimizeFunctionOnNextCall(f);
+assertEquals(1000, f(o1));
+
+f(o2);
+f(o2);
+f(o2);
+%OptimizeFunctionOnNextCall(f);
+assertEquals(1100, f(o2));
+
+f(o3);
+f(o3);
+f(o3);
+%OptimizeFunctionOnNextCall(f);
+assertEquals(1200, f(o3));
+
+(function CountOperationDeoptimizationGetter() {
+  var global = {};
+  global.__defineGetter__("A", function () { return "x"; });
+
+  function h() {
+    return "A";
+  }
+
+  function g(a, b, c) {
+    try {
+      return a + b.toString() + c;
+    } catch (e) { }
+  }
+
+  function test(o)  {
+   return g(1, o[h()]--, 10);
+  }
+
+  test(global);
+  test(global);
+  %OptimizeFunctionOnNextCall(test);
+  print(test(global));
+})();
+
+
+(function CountOperationDeoptimizationPoint() {
+  function test()  {
+   this[0, ""]--;
+  }
+
+  test();
+  test();
+  %OptimizeFunctionOnNextCall(test);
+  test();
+})();
diff --git a/test/mjsunit/lithium/SeqStringSetChar.js b/test/mjsunit/lithium/SeqStringSetChar.js
index 3c890a8..c5bd145 100644
--- a/test/mjsunit/lithium/SeqStringSetChar.js
+++ b/test/mjsunit/lithium/SeqStringSetChar.js
@@ -29,13 +29,13 @@
 
 function MyStringFromCharCode(code, i) {
   var one_byte = %NewString(3, true);
-  %_OneByteSeqStringSetChar(one_byte, 0, code);
-  %_OneByteSeqStringSetChar(one_byte, 1, code);
-  %_OneByteSeqStringSetChar(one_byte, i, code);
+  %_OneByteSeqStringSetChar(0, code, one_byte);
+  %_OneByteSeqStringSetChar(1, code, one_byte);
+  %_OneByteSeqStringSetChar(i, code, one_byte);
   var two_byte = %NewString(3, false);
-  %_TwoByteSeqStringSetChar(two_byte, 0, code);
-  %_TwoByteSeqStringSetChar(two_byte, 1, code);
-  %_TwoByteSeqStringSetChar(two_byte, i, code);
+  %_TwoByteSeqStringSetChar(0, code, two_byte);
+  %_TwoByteSeqStringSetChar(1, code, two_byte);
+  %_TwoByteSeqStringSetChar(i, code, two_byte);
   return one_byte + two_byte;
 }
 
diff --git a/test/mjsunit/migrations.js b/test/mjsunit/migrations.js
new file mode 100644
index 0000000..6a2ea64
--- /dev/null
+++ b/test/mjsunit/migrations.js
@@ -0,0 +1,311 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-ayle license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --track-fields --expose-gc
+
+var global = Function('return this')();
+var verbose = 0;
+
+function test(ctor_desc, use_desc, migr_desc) {
+  var n = 5;
+  var objects = [];
+  var results = [];
+
+  if (verbose) {
+    print();
+    print("===========================================================");
+    print("=== " + ctor_desc.name +
+          " | " + use_desc.name + " |--> " + migr_desc.name);
+    print("===========================================================");
+  }
+
+  // Clean ICs and transitions.
+  %NotifyContextDisposed();
+  gc(); gc(); gc();
+
+
+  // create objects
+  if (verbose) {
+    print("-----------------------------");
+    print("--- construct");
+    print();
+  }
+  for (var i = 0; i < n; i++) {
+    objects[i] = ctor_desc.ctor.apply(ctor_desc, ctor_desc.args(i));
+  }
+
+  try {
+    // use them
+    if (verbose) {
+      print("-----------------------------");
+      print("--- use 1");
+      print();
+    }
+    var use = use_desc.use1;
+    for (var i = 0; i < n; i++) {
+      if (i == 3) %OptimizeFunctionOnNextCall(use);
+      results[i] = use(objects[i], i);
+    }
+
+    // trigger migrations
+    if (verbose) {
+      print("-----------------------------");
+      print("--- trigger migration");
+      print();
+    }
+    var migr = migr_desc.migr;
+    for (var i = 0; i < n; i++) {
+      if (i == 3) %OptimizeFunctionOnNextCall(migr);
+      migr(objects[i], i);
+    }
+
+    // use again
+    if (verbose) {
+      print("-----------------------------");
+      print("--- use 2");
+      print();
+    }
+    var use = use_desc.use2 !== undefined ? use_desc.use2 : use_desc.use1;
+    for (var i = 0; i < n; i++) {
+      if (i == 3) %OptimizeFunctionOnNextCall(use);
+      results[i] = use(objects[i], i);
+      if (verbose >= 2) print(results[i]);
+    }
+
+  } catch (e) {
+    if (verbose) print("--- incompatible use: " + e);
+  }
+  return results;
+}
+
+
+var ctors = [
+  {
+    name: "none-to-double",
+    ctor: function(v) { return {a: v}; },
+    args: function(i) { return [1.5 + i]; },
+  },
+  {
+    name: "double",
+    ctor: function(v) { var o = {}; o.a = v; return o; },
+    args: function(i) { return [1.5 + i]; },
+  },
+  {
+    name: "none-to-smi",
+    ctor: function(v) { return {a: v}; },
+    args: function(i) { return [i]; },
+  },
+  {
+    name: "smi",
+    ctor: function(v) { var o = {}; o.a = v; return o; },
+    args: function(i) { return [i]; },
+  },
+  {
+    name: "none-to-object",
+    ctor: function(v) { return {a: v}; },
+    args: function(i) { return ["s"]; },
+  },
+  {
+    name: "object",
+    ctor: function(v) { var o = {}; o.a = v; return o; },
+    args: function(i) { return ["s"]; },
+  },
+  {
+    name: "{a:, b:, c:}",
+    ctor: function(v1, v2, v3) { return {a: v1, b: v2, c: v3}; },
+    args: function(i)    { return [1.5 + i, 1.6, 1.7]; },
+  },
+  {
+    name: "{a..h:}",
+    ctor: function(v) { var o = {}; o.h=o.g=o.f=o.e=o.d=o.c=o.b=o.a=v; return o; },
+    args: function(i) { return [1.5 + i]; },
+  },
+  {
+    name: "1",
+    ctor: function(v) { var o = 1; o.a = v; return o; },
+    args: function(i) { return [1.5 + i]; },
+  },
+  {
+    name: "f()",
+    ctor: function(v) { var o = function() { return v;}; o.a = v; return o; },
+    args: function(i) { return [1.5 + i]; },
+  },
+  {
+    name: "f().bind",
+    ctor: function(v) { var o = function(a,b,c) { return a+b+c; }; o = o.bind(o, v, v+1, v+2.2); return o; },
+    args: function(i) { return [1.5 + i]; },
+  },
+  {
+    name: "dictionary elements",
+    ctor: function(v) { var o = []; o[1] = v; o[200000] = v; return o; },
+    args: function(i) { return [1.5 + i]; },
+  },
+  {
+    name: "json",
+    ctor: function(v) { var json = '{"a":' + v + ',"b":' + v + '}'; return JSON.parse(json); },
+    args: function(i) { return [1.5 + i]; },
+  },
+  {
+    name: "fast accessors",
+    accessor: {
+        get: function() { return this.a_; },
+        set: function(value) {this.a_ = value; },
+        configurable: true,
+    },
+    ctor: function(v) {
+      var o = {a_:v};
+      Object.defineProperty(o, "a", this.accessor);
+      return o;
+    },
+    args: function(i) { return [1.5 + i]; },
+  },
+  {
+    name: "slow accessor",
+    accessor1: { value: this.a_, configurable: true },
+    accessor2: {
+        get: function() { return this.a_; },
+        set: function(value) {this.a_ = value; },
+        configurable: true,
+    },
+    ctor: function(v) {
+      var o = {a_:v};
+      Object.defineProperty(o, "a", this.accessor1);
+      Object.defineProperty(o, "a", this.accessor2);
+      return o;
+    },
+    args: function(i) { return [1.5 + i]; },
+  },
+  {
+    name: "slow",
+    proto: {},
+    ctor: function(v) {
+      var o = {__proto__: this.proto};
+      o.a = v;
+      for (var i = 0; %HasFastProperties(o); i++) o["f"+i] = v;
+      return o;
+    },
+    args: function(i) { return [1.5 + i]; },
+  },
+  {
+    name: "global",
+    ctor: function(v) { return global; },
+    args: function(i) { return [i]; },
+  },
+];
+
+
+
+var uses = [
+  {
+    name: "o.a+1.0",
+    use1: function(o, i) { return o.a + 1.0; },
+    use2: function(o, i) { return o.a + 1.1; },
+  },
+  {
+    name: "o.b+1.0",
+    use1: function(o, i) { return o.b + 1.0; },
+    use2: function(o, i) { return o.b + 1.1; },
+  },
+  {
+    name: "o[1]+1.0",
+    use1: function(o, i) { return o[1] + 1.0; },
+    use2: function(o, i) { return o[1] + 1.1; },
+  },
+  {
+    name: "o[-1]+1.0",
+    use1: function(o, i) { return o[-1] + 1.0; },
+    use2: function(o, i) { return o[-1] + 1.1; },
+  },
+  {
+    name: "()",
+    use1: function(o, i) { return o() + 1.0; },
+    use2: function(o, i) { return o() + 1.1; },
+  },
+];
+
+
+
+var migrations = [
+  {
+    name: "to smi",
+    migr: function(o, i) { if (i == 0) o.a = 1; },
+  },
+  {
+    name: "to double",
+    migr: function(o, i) { if (i == 0) o.a = 1.1; },
+  },
+  {
+    name: "to object",
+    migr: function(o, i) { if (i == 0) o.a = {}; },
+  },
+  {
+    name: "set prototype {}",
+    migr: function(o, i) { o.__proto__ = {}; },
+  },
+  {
+    name: "%FunctionSetPrototype",
+    migr: function(o, i) { %FunctionSetPrototype(o, null); },
+  },
+  {
+    name: "modify prototype",
+    migr: function(o, i) { if (i == 0) o.__proto__.__proto1__ = [,,,5,,,]; },
+  },
+  {
+    name: "freeze prototype",
+    migr: function(o, i) { if (i == 0) Object.freeze(o.__proto__); },
+  },
+  {
+    name: "delete and re-add property",
+    migr: function(o, i) { var v = o.a; delete o.a; o.a = v; },
+  },
+  {
+    name: "modify prototype",
+    migr: function(o, i) { if (i >= 0) o.__proto__ = {}; },
+  },
+  {
+    name: "set property callback",
+    migr: function(o, i) {
+      Object.defineProperty(o, "a", {
+        get: function() { return 1.5 + i; },
+        set: function(value) {},
+        configurable: true,
+      });
+    },
+  },
+  {
+    name: "observe",
+    migr: function(o, i) { Object.observe(o, function(){}); },
+  },
+  {
+    name: "%EnableAccessChecks",
+    migr: function(o, i) {
+      if (typeof (o) !== 'function') %EnableAccessChecks(o);
+    },
+  },
+  {
+    name: "%DisableAccessChecks",
+    migr: function(o, i) {
+      if ((typeof (o) !== 'function') && (o !== global)) %DisableAccessChecks(o);
+    },
+  },
+  {
+    name: "seal",
+    migr: function(o, i) { Object.seal(o); },
+  },
+  { // Must be the last in the sequence, because after the global object freeze
+    // the other modifications does not make sence.
+    name: "freeze",
+    migr: function(o, i) { Object.freeze(o); },
+  },
+];
+
+
+
+migrations.forEach(function(migr) {
+  uses.forEach(function(use) {
+    ctors.forEach(function(ctor) {
+      test(ctor, use, migr);
+    });
+  });
+});
diff --git a/test/mjsunit/mirror-object.js b/test/mjsunit/mirror-object.js
index 8bf8a2d..7020338 100644
--- a/test/mjsunit/mirror-object.js
+++ b/test/mjsunit/mirror-object.js
@@ -111,12 +111,14 @@
 
   // Check that the serialization contains all properties.
   assertEquals(names.length, fromJSON.properties.length, 'Some properties missing in JSON');
-  for (var i = 0; i < fromJSON.properties.length; i++) {
-    var name = fromJSON.properties[i].name;
-    if (typeof name == 'undefined') name = fromJSON.properties[i].index;
+  for (var j = 0; j < names.length; j++) {
+    var name = names[j];
+    // Serialization of symbol-named properties to JSON doesn't really
+    // work currently, as they don't get a {name: ...} entry.
+    if (typeof name === 'symbol') continue;
     var found = false;
-    for (var j = 0; j < names.length; j++) {
-      if (names[j] == name) {
+    for (var i = 0; i < fromJSON.properties.length; i++) {
+      if (fromJSON.properties[i].name == name) {
         // Check that serialized handle is correct.
         assertEquals(properties[i].value().handle(), fromJSON.properties[i].ref, 'Unexpected serialized handle');
 
@@ -170,6 +172,9 @@
   this.y_ = y;
 }
 
+var object_with_symbol = {};
+object_with_symbol[Symbol.iterator] = 42;
+
 // Test a number of different objects.
 testObjectMirror({}, 'Object', 'Object');
 testObjectMirror({'a':1,'b':2}, 'Object', 'Object');
@@ -180,6 +185,7 @@
 testObjectMirror([], 'Array', 'Array');
 testObjectMirror([1,2], 'Array', 'Array');
 testObjectMirror(Object(17), 'Number', 'Number');
+testObjectMirror(object_with_symbol, 'Object', 'Object');
 
 // Test circular references.
 o = {};
diff --git a/test/mjsunit/mirror-script.js b/test/mjsunit/mirror-script.js
index 1d64ac2..e545a61 100644
--- a/test/mjsunit/mirror-script.js
+++ b/test/mjsunit/mirror-script.js
@@ -84,7 +84,7 @@
 
 // Test the script mirror for different functions.
 testScriptMirror(function(){}, 'mirror-script.js', 98, 2, 0);
-testScriptMirror(Math.sin, 'native math.js', -1, 0, 0);
+testScriptMirror(Math.round, 'native math.js', -1, 0, 0);
 testScriptMirror(eval('(function(){})'), null, 1, 2, 1, '(function(){})', 87);
 testScriptMirror(eval('(function(){\n  })'), null, 2, 2, 1, '(function(){\n  })', 88);
 
diff --git a/test/mjsunit/mjsunit.js b/test/mjsunit/mjsunit.js
index bcd4913..0430279 100644
--- a/test/mjsunit/mjsunit.js
+++ b/test/mjsunit/mjsunit.js
@@ -391,9 +391,8 @@
       } catch (e) {
         throw new Error("natives syntax not allowed");
       }
-    } else {
-      OptimizationStatusImpl(fun, sync_opt);
     }
+    return OptimizationStatusImpl(fun, sync_opt);
   }
 
   assertUnoptimized = function assertUnoptimized(fun, sync_opt, name_opt) {
diff --git a/test/mjsunit/mjsunit.status b/test/mjsunit/mjsunit.status
index e147dd6..bba86bd 100644
--- a/test/mjsunit/mjsunit.status
+++ b/test/mjsunit/mjsunit.status
@@ -51,6 +51,78 @@
   # Issue 3389: deopt_every_n_garbage_collections is unsafe
   'regress/regress-2653': [SKIP],
 
+  # This test relies on --noopt-safe-uint32-operations, which is broken. See
+  # issue 3487 for details.
+  'compiler/shift-shr': [SKIP],
+
+  ##############################################################################
+  # TurboFan compiler failures.
+
+  # TODO(mstarzinger): An arguments object materialized in the prologue can't
+  # be accessed indirectly. Either we drop that requirement or wait for support
+  # from the deoptimizer to do that.
+  'arguments-indirect': [PASS, NO_VARIANTS],
+
+  # TODO(rossberg): Typer doesn't like contexts very much.
+  'harmony/block-conflicts': [PASS, NO_VARIANTS],
+  'harmony/block-for': [PASS, NO_VARIANTS],
+  'harmony/block-leave': [PASS, NO_VARIANTS],
+  'harmony/block-let-crankshaft': [PASS, NO_VARIANTS],
+  'harmony/empty-for': [PASS, NO_VARIANTS],
+
+  # Some tests are over-restrictive about object layout.
+  'array-constructor-feedback': [PASS, NO_VARIANTS],
+  'array-feedback': [PASS, NO_VARIANTS],
+  'elements-kind': [PASS, NO_VARIANTS],
+
+  # Some tests are just too slow to run for now.
+  'bit-not': [PASS, NO_VARIANTS],
+  'json2': [PASS, NO_VARIANTS],
+  'packed-elements': [PASS, NO_VARIANTS],
+  'unbox-double-arrays': [PASS, NO_VARIANTS],
+  'whitespaces': [PASS, NO_VARIANTS],
+  'compiler/osr-assert': [PASS, NO_VARIANTS],
+  'regress/regress-2185-2': [PASS, NO_VARIANTS],
+
+  # Support for %GetFrameDetails is missing and requires checkpoints.
+  'debug-evaluate-bool-constructor': [PASS, NO_VARIANTS],
+  'debug-evaluate-const': [PASS, NO_VARIANTS],
+  'debug-evaluate-locals-optimized-double': [PASS, NO_VARIANTS],
+  'debug-evaluate-locals-optimized': [PASS, NO_VARIANTS],
+  'debug-evaluate-locals': [PASS, NO_VARIANTS],
+  'debug-evaluate-with-context': [PASS, NO_VARIANTS],
+  'debug-liveedit-double-call': [PASS, NO_VARIANTS],
+  'debug-liveedit-restart-frame': [PASS, NO_VARIANTS],
+  'debug-return-value': [PASS, NO_VARIANTS],
+  'debug-set-variable-value': [PASS, NO_VARIANTS],
+  'debug-step-stub-callfunction': [PASS, NO_VARIANTS],
+  'debug-stepin-accessor': [PASS, NO_VARIANTS],
+  'debug-stepin-builtin': [PASS, NO_VARIANTS],
+  'debug-stepin-constructor': [PASS, NO_VARIANTS],
+  'debug-stepin-function-call': [PASS, NO_VARIANTS],
+  'debug-stepnext-do-while': [PASS, NO_VARIANTS],
+  'debug-stepout-recursive-function': [PASS, NO_VARIANTS],
+  'debug-stepout-scope-part1': [PASS, NO_VARIANTS],
+  'debug-stepout-scope-part2': [PASS, NO_VARIANTS],
+  'debug-stepout-scope-part3': [PASS, NO_VARIANTS],
+  'debug-stepout-scope-part7': [PASS, NO_VARIANTS],
+  'debug-stepout-to-builtin': [PASS, NO_VARIANTS],
+  'es6/debug-promises/throw-in-constructor': [PASS, NO_VARIANTS],
+  'es6/debug-promises/reject-in-constructor': [PASS, NO_VARIANTS],
+  'es6/debug-promises/throw-with-undefined-reject': [PASS, NO_VARIANTS],
+  'es6/debug-promises/throw-with-throw-in-reject': [PASS, NO_VARIANTS],
+  'es6/debug-promises/reject-with-throw-in-reject': [PASS, NO_VARIANTS],
+  'es6/debug-promises/throw-uncaught-all': [PASS, NO_VARIANTS],
+  'es6/debug-promises/throw-uncaught-uncaught': [PASS, NO_VARIANTS],
+  'es6/debug-promises/reject-uncaught-late': [PASS, NO_VARIANTS],
+  'es6/debug-promises/throw-caught-by-default-reject-handler': [PASS, NO_VARIANTS],
+  'es6/generators-debug-scopes': [PASS, NO_VARIANTS],
+  'harmony/debug-blockscopes': [PASS, NO_VARIANTS],
+  'regress/regress-1081309': [PASS, NO_VARIANTS],
+  'regress/regress-269': [PASS, NO_VARIANTS],
+  'regress/regress-crbug-259300': [PASS, NO_VARIANTS],
+  'regress/regress-frame-details-null-receiver': [PASS, NO_VARIANTS],
+
   ##############################################################################
   # Too slow in debug mode with --stress-opt mode.
   'compiler/regress-stacktrace-methods': [PASS, ['mode == debug', SKIP]],
@@ -70,14 +142,11 @@
   # No need to waste time for this test.
   'd8-performance-now': [PASS, NO_VARIANTS],
 
-  ##############################################################################
-  'big-object-literal': [PASS, ['arch == arm or arch == android_arm or arch == android_arm64', SKIP]],
-
   # Issue 488: this test sometimes times out.
   'array-constructor': [PASS, TIMEOUT],
 
   # Very slow on ARM and MIPS, contains no architecture dependent code.
-  'unicode-case-overoptimization': [PASS, NO_VARIANTS, ['arch == arm or arch == android_arm or arch == android_arm64 or arch == mipsel or arch == mips', TIMEOUT]],
+  'unicode-case-overoptimization': [PASS, NO_VARIANTS, ['arch == arm or arch == android_arm or arch == android_arm64 or arch == mipsel or arch == mips64el or arch == mips', TIMEOUT]],
 
   ##############################################################################
   # This test expects to reach a certain recursion depth, which may not work
@@ -87,6 +156,8 @@
   ##############################################################################
   # Skip long running tests that time out in debug mode.
   'generated-transition-stub': [PASS, ['mode == debug', SKIP]],
+  'migrations': [SKIP],
+  'array-functions-prototype-misc': [PASS, ['mode == debug', SKIP]],
 
   ##############################################################################
   # This test sets the umask on a per-process basis and hence cannot be
@@ -97,7 +168,7 @@
   # get the same random seed and would generate the same directory name. Besides
   # that, it doesn't make sense to run several variants of d8-os anyways.
   'd8-os': [PASS, NO_VARIANTS, ['isolates or arch == android_arm or arch == android_arm64 or arch == android_ia32', SKIP]],
-  'tools/tickprocessor': [PASS, ['arch == android_arm or arch == android_arm64 or arch == android_ia32', SKIP]],
+  'tools/tickprocessor': [PASS, NO_VARIANTS, ['arch == android_arm or arch == android_arm64 or arch == android_ia32', SKIP]],
 
   ##############################################################################
   # Long running test that reproduces memory leak and should be run manually.
@@ -136,45 +207,40 @@
   'array-feedback': [SKIP],
   'array-literal-feedback': [SKIP],
   'd8-performance-now': [SKIP],
+  'debug-stepout-scope-part8': [PASS, ['arch == arm ', FAIL]],
   'elements-kind': [SKIP],
+  'elements-transition-hoisting': [SKIP],
   'fast-prototype': [SKIP],
+  'getters-on-elements': [SKIP],
+  'harmony/block-let-crankshaft': [SKIP],
   'opt-elements-kind': [SKIP],
   'osr-elements-kind': [SKIP],
   'regress/regress-165637': [SKIP],
   'regress/regress-2249': [SKIP],
-  'debug-stepout-scope-part8': [PASS, ['arch == arm ', FAIL]],
+  # Tests taking too long
+  'debug-stepout-scope-part8': [SKIP],
+  'mirror-object': [SKIP],
+  'packed-elements': [SKIP],
+  'regress/regress-1122': [SKIP],
+  'regress/regress-331444': [SKIP],
+  'regress/regress-353551': [SKIP],
+  'regress/regress-crbug-119926': [SKIP],
+  'regress/short-circuit': [SKIP],
+  'stack-traces-overflow': [SKIP],
+  'unicode-test': [SKIP],
+  'whitespaces': [SKIP],
+
+  # TODO(mstarzinger): Takes too long with TF.
+  'array-sort': [PASS, NO_VARIANTS],
+  'regress/regress-91008': [PASS, NO_VARIANTS],
 }],  # 'gc_stress == True'
 
 ##############################################################################
-['no_i18n', {
-  # Don't call runtime functions that don't exist without i18n support.
-  'runtime-gen/availablelocalesof': [SKIP],
-  'runtime-gen/breakiteratoradopttext': [SKIP],
-  'runtime-gen/breakiteratorbreaktype': [SKIP],
-  'runtime-gen/breakiteratorbreaktype': [SKIP],
-  'runtime-gen/breakiteratorcurrent': [SKIP],
-  'runtime-gen/breakiteratorfirst': [SKIP],
-  'runtime-gen/breakiteratornext': [SKIP],
-  'runtime-gen/canonicalizelanguagetag': [SKIP],
-  'runtime-gen/createbreakiterator': [SKIP],
-  'runtime-gen/createcollator': [SKIP],
-  'runtime-gen/getdefaulticulocale': [SKIP],
-  'runtime-gen/getimplfrominitializedintlobject': [SKIP],
-  'runtime-gen/getlanguagetagvariants': [SKIP],
-  'runtime-gen/internalcompare': [SKIP],
-  'runtime-gen/internaldateformat': [SKIP],
-  'runtime-gen/internaldateparse': [SKIP],
-  'runtime-gen/internalnumberformat': [SKIP],
-  'runtime-gen/internalnumberparse': [SKIP],
-  'runtime-gen/isinitializedintlobject': [SKIP],
-  'runtime-gen/isinitializedintlobjectoftype': [SKIP],
-  'runtime-gen/markasinitializedintlobjectoftype': [SKIP],
-  'runtime-gen/stringnormalize': [SKIP],
-}],
-
-##############################################################################
 ['arch == arm64 or arch == android_arm64', {
 
+  # arm64 TF timeout.
+  'regress/regress-1257': [PASS, TIMEOUT],
+
   # Requires bigger stack size in the Genesis and if stack size is increased,
   # the test requires too much time to run.  However, the problem test covers
   # should be platform-independent.
@@ -183,6 +249,7 @@
   # Pass but take too long to run. Skip.
   # Some similar tests (with fewer iterations) may be included in arm64-js
   # tests.
+  'big-object-literal': [SKIP],
   'compiler/regress-arguments': [SKIP],
   'compiler/regress-gvn': [SKIP],
   'compiler/regress-max-locals-for-osr': [SKIP],
@@ -204,16 +271,12 @@
   'regress/regress-2185-2': [PASS, TIMEOUT],
   'whitespaces': [PASS, TIMEOUT, SLOW],
 
-  # Stack manipulations in LiveEdit is not implemented for this arch.
-  'debug-liveedit-check-stack': [SKIP],
-  'debug-liveedit-stack-padding': [SKIP],
-  'debug-liveedit-restart-frame': [SKIP],
-  'debug-liveedit-double-call': [SKIP],
-  'harmony/generators-debug-liveedit': [SKIP],
-
   # BUG(v8:3147). It works on other architectures by accident.
   'regress/regress-conditional-position': [FAIL],
 
+  # BUG(v8:3457).
+  'deserialize-reference': [PASS, FAIL],
+
   # Slow tests.
   'array-concat': [PASS, SLOW],
   'array-constructor': [PASS, SLOW],
@@ -224,7 +287,6 @@
   'bit-not': [PASS, SLOW],
   'compiler/alloc-number': [PASS, SLOW],
   'compiler/osr-assert': [PASS, SLOW],
-  'compiler/osr-warm': [PASS, SLOW],
   'compiler/osr-with-args': [PASS, SLOW],
   'debug-scopes': [PASS, SLOW],
   'generated-transition-stub': [PASS, SLOW],
@@ -290,6 +352,7 @@
 
   # Long running tests. Skipping because having them timeout takes too long on
   # the buildbot.
+  'big-object-literal': [SKIP],
   'compiler/alloc-number': [SKIP],
   'regress/regress-490': [SKIP],
   'regress/regress-634': [SKIP],
@@ -301,13 +364,6 @@
   # should be platform-independent.
   'regress/regress-1132': [SKIP],
 
-  # Stack manipulations in LiveEdit is not implemented for this arch.
-  'debug-liveedit-check-stack': [SKIP],
-  'debug-liveedit-stack-padding': [SKIP],
-  'debug-liveedit-restart-frame': [SKIP],
-  'debug-liveedit-double-call': [SKIP],
-  'harmony/generators-debug-liveedit': [SKIP],
-
   # Currently always deopt on minus zero
   'math-floor-of-div-minus-zero': [SKIP],
 
@@ -353,18 +409,75 @@
   # should be platform-independent.
   'regress/regress-1132': [SKIP],
 
-  # Stack manipulations in LiveEdit is not implemented for this arch.
-  'debug-liveedit-check-stack': [SKIP],
-  'debug-liveedit-stack-padding': [SKIP],
-  'debug-liveedit-restart-frame': [SKIP],
-  'debug-liveedit-double-call': [SKIP],
-  'harmony/generators-debug-liveedit': [SKIP],
-
   # Currently always deopt on minus zero
   'math-floor-of-div-minus-zero': [SKIP],
 }],  # 'arch == mipsel or arch == mips'
 
 ##############################################################################
+['arch == mips64el', {
+
+  # Slow tests which times out in debug mode.
+  'try': [PASS, ['mode == debug', SKIP]],
+  'debug-scripts-request': [PASS, ['mode == debug', SKIP]],
+  'array-constructor': [PASS, ['mode == debug', SKIP]],
+
+  # Times out often in release mode on MIPS.
+  'compiler/regress-stacktrace-methods': [PASS, PASS, ['mode == release', TIMEOUT]],
+  'array-splice': [PASS, TIMEOUT],
+
+  # Long running test.
+  'mirror-object': [PASS, TIMEOUT],
+  'string-indexof-2': [PASS, TIMEOUT],
+
+  # BUG(3251035): Timeouts in long looping crankshaft optimization
+  # tests. Skipping because having them timeout takes too long on the
+  # buildbot.
+  'compiler/alloc-number': [PASS, SLOW],
+  'compiler/array-length': [PASS, SLOW],
+  'compiler/assignment-deopt': [PASS, SLOW],
+  'compiler/deopt-args': [PASS, SLOW],
+  'compiler/inline-compare': [PASS, SLOW],
+  'compiler/inline-global-access': [PASS, SLOW],
+  'compiler/optimized-function-calls': [PASS, SLOW],
+  'compiler/pic': [PASS, SLOW],
+  'compiler/property-calls': [PASS, SLOW],
+  'compiler/recursive-deopt': [PASS, SLOW],
+  'compiler/regress-4': [PASS, SLOW],
+  'compiler/regress-funcaller': [PASS, SLOW],
+  'compiler/regress-rep-change': [PASS, SLOW],
+  'compiler/regress-arguments': [PASS, SLOW],
+  'compiler/regress-funarguments': [PASS, SLOW],
+  'compiler/regress-3249650': [PASS, SLOW],
+  'compiler/simple-deopt': [PASS, SLOW],
+  'regress/regress-490': [PASS, SLOW],
+  'regress/regress-634': [PASS, SLOW],
+  'regress/regress-create-exception': [PASS, SLOW],
+  'regress/regress-3218915': [PASS, SLOW],
+  'regress/regress-3247124': [PASS, SLOW],
+
+  # Requires bigger stack size in the Genesis and if stack size is increased,
+  # the test requires too much time to run.  However, the problem test covers
+  # should be platform-independent.
+  'regress/regress-1132': [SKIP],
+
+  # Currently always deopt on minus zero
+  'math-floor-of-div-minus-zero': [SKIP],
+}],  # 'arch == mips64el'
+
+['arch == mips64el and simulator_run == False', {
+  # Random failures on HW, need investigation.
+  'debug-*': [SKIP],
+}],
+##############################################################################
+['system == windows', {
+  # TODO(mstarzinger): Too slow with turbo fan.
+  'big-object-literal': [PASS, ['mode == debug', SKIP]],
+
+  # BUG(v8:3435)
+  'debug-script-breakpoints': [PASS, FAIL],
+}],  # 'system == windows'
+
+##############################################################################
 # Native Client uses the ARM simulator so will behave similarly to arm
 # on mjsunit tests.
 # TODO(bradchen): enable more tests for NaCl V8 when it stops using
@@ -396,6 +509,18 @@
   # Skip long running test that times out in debug mode and goes OOM on NaCl.
   'regress/regress-crbug-160010': [SKIP],
 
+  # Skip tests that timout with turbofan.
+  'regress/regress-1257': [PASS, NO_VARIANTS],
+  'regress/regress-2618': [PASS, NO_VARIANTS],
+  'regress/regress-298269': [PASS, NO_VARIANTS],
+  'regress/regress-634': [PASS, NO_VARIANTS],
+  'regress/regress-91008': [PASS, NO_VARIANTS],
+  'compiler/osr-alignment': [PASS, NO_VARIANTS],
+  'compiler/osr-one': [PASS, NO_VARIANTS],
+  'compiler/osr-two': [PASS, NO_VARIANTS],
+  'stack-traces-overflow': [PASS, NO_VARIANTS],
+  'mirror-object': [PASS, NO_VARIANTS],
+
   # Bug(v8:2978).
   'lithium/MathExp': [PASS, FAIL],
 
diff --git a/test/mjsunit/new-string-add.js b/test/mjsunit/new-string-add.js
deleted file mode 100644
index f5b7cbf..0000000
--- a/test/mjsunit/new-string-add.js
+++ /dev/null
@@ -1,197 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --new-string-add
-
-assertEquals("ab", "a" + "b", "ll");
-
-assertEquals("12", "1" + "2", "dd");
-assertEquals("123", "1" + "2" + "3", "ddd");
-assertEquals("123", 1 + "2" + "3", "ndd");
-assertEquals("123", "1" + 2 + "3", "dnd");
-assertEquals("123", "1" + "2" + 3, "ddn");
-
-assertEquals("123", "1" + 2 + 3, "dnn");
-assertEquals("123", 1 + "2" + 3, "ndn");
-assertEquals("33", 1 + 2 + "3", "nnd");
-
-var x = "1";
-assertEquals("12", x + 2, "vn");
-assertEquals("12", x + "2", "vd");
-assertEquals("21", 2 + x, "nv");
-assertEquals("21", "2" + x, "dv");
-
-var y = "2";
-assertEquals("12", x + y, "vdvd");
-
-x = 1;
-assertEquals("12", x + y, "vnvd");
-
-y = 2;
-assertEquals(3, x + y, "vnvn");
-
-x = "1";
-assertEquals("12", x + y, "vdvn");
-
-y = "2";
-assertEquals("12", x + y, "vdvd2");
-
-(function(x, y) {
-  var z = "3";
-  var w = "4";
-
-  assertEquals("11", x + x, "xx");
-  assertEquals("12", x + y, "xy");
-  assertEquals("13", x + z, "xz");
-  assertEquals("14", x + w, "xw");
-
-  assertEquals("21", y + x, "yx");
-  assertEquals("22", y + y, "yy");
-  assertEquals("23", y + z, "yz");
-  assertEquals("24", y + w, "yw");
-
-  assertEquals("31", z + x, "zx");
-  assertEquals("32", z + y, "zy");
-  assertEquals("33", z + z, "zz");
-  assertEquals("34", z + w, "zw");
-
-  assertEquals("41", w + x, "wx");
-  assertEquals("42", w + y, "wy");
-  assertEquals("43", w + z, "wz");
-  assertEquals("44", w + w, "ww");
-
-  (function(){x = 1; z = 3;})();
-
-  assertEquals(2, x + x, "x'x");
-  assertEquals("12", x + y, "x'y");
-  assertEquals(4, x + z, "x'z'");
-  assertEquals("14", x + w, "x'w");
-
-  assertEquals("21", y + x, "yx'");
-  assertEquals("22", y + y, "yy");
-  assertEquals("23", y + z, "yz'");
-  assertEquals("24", y + w, "yw");
-
-  assertEquals(4, z + x, "z'x'");
-  assertEquals("32", z + y, "z'y");
-  assertEquals(6, z + z, "z'z'");
-  assertEquals("34", z + w, "z'w");
-
-  assertEquals("41", w + x, "wx'");
-  assertEquals("42", w + y, "wy");
-  assertEquals("43", w + z, "wz'");
-  assertEquals("44", w + w, "ww");
-})("1", "2");
-
-assertEquals("142", "1" + new Number(42), "sN");
-assertEquals("421", new Number(42) + "1", "Ns");
-assertEquals(84, new Number(42) + new Number(42), "NN");
-
-assertEquals("142", "1" + new String("42"), "sS");
-assertEquals("421", new String("42") + "1", "Ss");
-assertEquals("142", "1" + new String("42"), "sS");
-assertEquals("4242", new String("42") + new String("42"), "SS");
-
-assertEquals("1true", "1" + true, "sb");
-assertEquals("true1", true + "1", "bs");
-assertEquals(2, true + true, "bs");
-
-assertEquals("1true", "1" + new Boolean(true), "sB");
-assertEquals("true1", new Boolean(true) + "1", "Bs");
-assertEquals(2, new Boolean(true) + new Boolean(true), "Bs");
-
-assertEquals("1undefined", "1" + void 0, "sv");
-assertEquals("undefined1", (void 0)  + "1", "vs");
-assertTrue(isNaN(void 0 + void 0), "vv");
-
-assertEquals("1null", "1" + null, "su");
-assertEquals("null1", null + "1", "us");
-assertEquals(0, null + null, "uu");
-
-(function (i) {
-  // Check that incoming frames are merged correctly.
-  var x;
-  var y;
-  var z;
-  var w;
-  switch (i) {
-  case 1: x = 42; y = "stry"; z = "strz"; w = 42; break;
-  default: x = "strx", y = 42; z = "strz"; w = 42; break;
-  }
-  var resxx = x + x;
-  var resxy = x + y;
-  var resxz = x + z;
-  var resxw = x + w;
-  var resyx = y + x;
-  var resyy = y + y;
-  var resyz = y + z;
-  var resyw = y + w;
-  var reszx = z + x;
-  var reszy = z + y;
-  var reszz = z + z;
-  var reszw = z + w;
-  var reswx = w + x;
-  var reswy = w + y;
-  var reswz = w + z;
-  var resww = w + w;
-  assertEquals(84, resxx, "swxx");
-  assertEquals("42stry", resxy, "swxy");
-  assertEquals("42strz", resxz, "swxz");
-  assertEquals(84, resxw, "swxw");
-  assertEquals("stry42", resyx, "swyx");
-  assertEquals("strystry", resyy, "swyy");
-  assertEquals("strystrz", resyz, "swyz");
-  assertEquals("stry42", resyw, "swyw");
-  assertEquals("strz42", reszx, "swzx");
-  assertEquals("strzstry", reszy, "swzy");
-  assertEquals("strzstrz", reszz, "swzz");
-  assertEquals("strz42", reszw, "swzw");
-  assertEquals(84, reswx, "swwx");
-  assertEquals("42stry", reswy, "swwy");
-  assertEquals("42strz", reswz, "swwz");
-  assertEquals(84, resww, "swww");
-})(1);
-
-// Generate ascii and non ascii strings from length 0 to 20.
-var ascii = 'aaaaaaaaaaaaaaaaaaaa';
-var non_ascii = '\u1234\u1234\u1234\u1234\u1234\u1234\u1234\u1234\u1234\u1234\u1234\u1234\u1234\u1234\u1234\u1234\u1234\u1234\u1234\u1234';
-assertEquals(20, ascii.length);
-assertEquals(20, non_ascii.length);
-var a = Array(21);
-var b = Array(21);
-for (var i = 0; i <= 20; i++) {
-  a[i] = ascii.substring(0, i);
-  b[i] = non_ascii.substring(0, i);
-}
-
-// Add ascii and non-ascii strings generating strings with length from 0 to 20.
-for (var i = 0; i <= 20; i++) {
-  for (var j = 0; j < i; j++) {
-    assertEquals(a[i], a[j] + a[i - j])
-    assertEquals(b[i], b[j] + b[i - j])
-  }
-}
diff --git a/test/mjsunit/number-literal.js b/test/mjsunit/number-literal.js
new file mode 100644
index 0000000..7191a1c
--- /dev/null
+++ b/test/mjsunit/number-literal.js
@@ -0,0 +1,33 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function test(message, a, b, skipStrictMode) {
+  assertSame(eval(a), eval(b), message);
+  if (!skipStrictMode) {
+    (function() {
+      'use strict';
+      assertSame(eval(a), eval(b), message);
+    })();
+  }
+}
+
+test('hex-int', '0x20', '32');
+test('oct-int', '040', '32', true);  // Octals disallowed in strict mode.
+test('dec-int', '32.00', '32');
+test('dec-underflow-int', '32.00000000000000000000000000000000000000001', '32');
+test('exp-int', '3.2e1', '32');
+test('exp-int', '3200e-2', '32');
+test('overflow-inf', '1e2000', 'Infinity');
+test('overflow-inf-exact', '1.797693134862315808e+308', 'Infinity');
+test('non-overflow-inf-exact', '1.797693134862315807e+308',
+     '1.7976931348623157e+308');
+test('underflow-0', '1e-2000', '0');
+test('underflow-0-exact', '2.4703282292062E-324', '0');
+test('non-underflow-0-exact', '2.4703282292063E-324', '5e-324');
+test('precission-loss-high', '9007199254740992', '9007199254740993');
+test('precission-loss-low', '1.9999999999999998', '1.9999999999999997');
+test('non-canonical-literal-int', '1.0', '1');
+test('non-canonical-literal-frac', '1.50', '1.5');
+test('rounding-down', '1.12512512512512452', '1.1251251251251244');
+test('rounding-up', '1.12512512512512453', '1.1251251251251246');
diff --git a/test/mjsunit/object-define-property.js b/test/mjsunit/object-define-property.js
index cbb2d21..4c495c6 100644
--- a/test/mjsunit/object-define-property.js
+++ b/test/mjsunit/object-define-property.js
@@ -27,7 +27,7 @@
 
 // Tests the object.defineProperty method - ES 15.2.3.6
 
-// Flags: --allow-natives-syntax --es5-readonly
+// Flags: --allow-natives-syntax
 
 // Check that an exception is thrown when null is passed as object.
 var exception = false;
@@ -467,35 +467,35 @@
 }
 
 
-// Test runtime calls to DefineOrRedefineDataProperty and
-// DefineOrRedefineAccessorProperty - make sure we don't
+// Test runtime calls to DefineDataPropertyUnchecked and
+// DefineAccessorPropertyUnchecked - make sure we don't
 // crash.
 try {
-  %DefineOrRedefineAccessorProperty(0, 0, 0, 0, 0);
+  %DefineAccessorPropertyUnchecked(0, 0, 0, 0, 0);
 } catch (e) {
   assertTrue(/illegal access/.test(e));
 }
 
 try {
-  %DefineOrRedefineDataProperty(0, 0, 0, 0);
+  %DefineDataPropertyUnchecked(0, 0, 0, 0);
 } catch (e) {
   assertTrue(/illegal access/.test(e));
 }
 
 try {
-  %DefineOrRedefineDataProperty(null, null, null, null);
+  %DefineDataPropertyUnchecked(null, null, null, null);
 } catch (e) {
   assertTrue(/illegal access/.test(e));
 }
 
 try {
-  %DefineOrRedefineAccessorProperty(null, null, null, null, null);
+  %DefineAccessorPropertyUnchecked(null, null, null, null, null);
 } catch (e) {
   assertTrue(/illegal access/.test(e));
 }
 
 try {
-  %DefineOrRedefineDataProperty({}, null, null, null);
+  %DefineDataPropertyUnchecked({}, null, null, null);
 } catch (e) {
   assertTrue(/illegal access/.test(e));
 }
@@ -503,13 +503,13 @@
 // Defining properties null should fail even when we have
 // other allowed values
 try {
-  %DefineOrRedefineAccessorProperty(null, 'foo', func, null, 0);
+  %DefineAccessorPropertyUnchecked(null, 'foo', func, null, 0);
 } catch (e) {
   assertTrue(/illegal access/.test(e));
 }
 
 try {
-  %DefineOrRedefineDataProperty(null, 'foo', 0, 0);
+  %DefineDataPropertyUnchecked(null, 'foo', 0, 0);
 } catch (e) {
   assertTrue(/illegal access/.test(e));
 }
diff --git a/test/mjsunit/object-literal.js b/test/mjsunit/object-literal.js
index 3d0b33b..53188d1 100644
--- a/test/mjsunit/object-literal.js
+++ b/test/mjsunit/object-literal.js
@@ -190,3 +190,73 @@
 for (var i = 0; i < keywords.length; i++) {
   testKeywordProperty(keywords[i]);
 }
+
+
+(function TestNumericNames() {
+  var o = {
+    1: 1,
+    2.: 2,
+    3.0: 3,
+    4e0: 4,
+    5E0: 5,
+    6e-0: 6,
+    7E-0: 7,
+    0x8: 8,
+    0X9: 9,
+  }
+  assertEquals(['1', '2', '3', '4', '5', '6', '7', '8', '9'], Object.keys(o));
+
+  o = {
+    1.2: 1.2,
+    1.30: 1.3
+  };
+  assertEquals(['1.2', '1.3'], Object.keys(o));
+})();
+
+
+function TestNumericNamesGetter(expectedKeys, object) {
+  assertEquals(expectedKeys, Object.keys(object));
+  expectedKeys.forEach(function(key) {
+    var descr = Object.getOwnPropertyDescriptor(object, key);
+    assertEquals(key, descr.get.name);
+  });
+}
+TestNumericNamesGetter(['1', '2', '3', '4', '5', '6', '7', '8', '9'], {
+  get 1() {},
+  get 2.() {},
+  get 3.0() {},
+  get 4e0() {},
+  get 5E0() {},
+  get 6e-0() {},
+  get 7E-0() {},
+  get 0x8() {},
+  get 0X9() {},
+});
+TestNumericNamesGetter(['1.2', '1.3'], {
+  get 1.2() {},
+  get 1.30() {}
+});
+
+
+function TestNumericNamesSetter(expectedKeys, object) {
+  assertEquals(expectedKeys, Object.keys(object));
+  expectedKeys.forEach(function(key) {
+    var descr = Object.getOwnPropertyDescriptor(object, key);
+    assertEquals(key, descr.set.name);
+  });
+}
+TestNumericNamesSetter(['1', '2', '3', '4', '5', '6', '7', '8', '9'], {
+  set 1(_) {},
+  set 2.(_) {},
+  set 3.0(_) {},
+  set 4e0(_) {},
+  set 5E0(_) {},
+  set 6e-0(_) {},
+  set 7E-0(_) {},
+  set 0x8(_) {},
+  set 0X9(_) {},
+});
+TestNumericNamesSetter(['1.2', '1.3'], {
+  set 1.2(_) {; },
+  set 1.30(_) {; }
+});
diff --git a/test/mjsunit/object-toprimitive.js b/test/mjsunit/object-toprimitive.js
index 3a67ced..34803ec 100644
--- a/test/mjsunit/object-toprimitive.js
+++ b/test/mjsunit/object-toprimitive.js
@@ -102,3 +102,5 @@
 var nt = Number(ot);
 assertEquals(87, nt);
 assertEquals(["gvo", "gts", "ts"], trace);
+
+assertThrows('Number(Symbol())', TypeError);
diff --git a/test/mjsunit/opt-elements-kind.js b/test/mjsunit/opt-elements-kind.js
index f26bb42..be7303b 100644
--- a/test/mjsunit/opt-elements-kind.js
+++ b/test/mjsunit/opt-elements-kind.js
@@ -25,28 +25,13 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --allow-natives-syntax --smi-only-arrays --expose-gc
+// Flags: --allow-natives-syntax --expose-gc
 
 // Limit the number of stress runs to reduce polymorphism it defeats some of the
 // assumptions made about how elements transitions work because transition stubs
 // end up going generic.
 // Flags: --stress-runs=2
 
-// Test element kind of objects.
-// Since --smi-only-arrays affects builtins, its default setting at compile
-// time sticks if built with snapshot.  If --smi-only-arrays is deactivated
-// by default, only a no-snapshot build actually has smi-only arrays enabled
-// in this test case.  Depending on whether smi-only arrays are actually
-// enabled, this test takes the appropriate code path to check smi-only arrays.
-
-support_smi_only_arrays = %HasFastSmiElements(new Array(1,2,3,4,5,6,7,8));
-
-if (support_smi_only_arrays) {
-  print("Tests include smi-only arrays.");
-} else {
-  print("Tests do NOT include smi-only arrays.");
-}
-
 var elements_kind = {
   fast_smi_only            :  'fast smi only elements',
   fast                     :  'fast elements',
@@ -100,10 +85,6 @@
 }
 
 function assertKind(expected, obj, name_opt) {
-  if (!support_smi_only_arrays &&
-      expected == elements_kind.fast_smi_only) {
-    expected = elements_kind.fast;
-  }
   assertEquals(expected, getKind(obj), name_opt);
 }
 
@@ -143,8 +124,6 @@
 }
 
 function test1() {
-  if (!support_smi_only_arrays) return;
-
   // Test transition chain SMI->DOUBLE->FAST (crankshafted function will
   // transition to FAST directly).
   var smis = construct_smis();
diff --git a/test/mjsunit/osr-elements-kind.js b/test/mjsunit/osr-elements-kind.js
index 2ad3c43..518b984 100644
--- a/test/mjsunit/osr-elements-kind.js
+++ b/test/mjsunit/osr-elements-kind.js
@@ -25,28 +25,13 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --allow-natives-syntax --smi-only-arrays --expose-gc
+// Flags: --allow-natives-syntax --expose-gc
 
 // Limit the number of stress runs to reduce polymorphism it defeats some of the
 // assumptions made about how elements transitions work because transition stubs
 // end up going generic.
 // Flags: --stress-runs=2
 
-// Test element kind of objects.
-// Since --smi-only-arrays affects builtins, its default setting at compile
-// time sticks if built with snapshot.  If --smi-only-arrays is deactivated
-// by default, only a no-snapshot build actually has smi-only arrays enabled
-// in this test case.  Depending on whether smi-only arrays are actually
-// enabled, this test takes the appropriate code path to check smi-only arrays.
-
-support_smi_only_arrays = %HasFastSmiElements(new Array(1,2,3,4,5,6,7,8));
-
-if (support_smi_only_arrays) {
-  print("Tests include smi-only arrays.");
-} else {
-  print("Tests do NOT include smi-only arrays.");
-}
-
 var elements_kind = {
   fast_smi_only            :  'fast smi only elements',
   fast                     :  'fast elements',
@@ -100,10 +85,6 @@
 }
 
 function assertKind(expected, obj, name_opt) {
-  if (!support_smi_only_arrays &&
-      expected == elements_kind.fast_smi_only) {
-    expected = elements_kind.fast;
-  }
   assertEquals(expected, getKind(obj), name_opt);
 }
 
@@ -113,53 +94,51 @@
 %NeverOptimizeFunction(convert_mixed);
 for (var i = 0; i < 1000000; i++) { }
 
-if (support_smi_only_arrays) {
-  // This code exists to eliminate the learning influence of AllocationSites
-  // on the following tests.
-  var __sequence = 0;
-  function make_array_string() {
-    this.__sequence = this.__sequence + 1;
-    return "/* " + this.__sequence + " */  [0, 0, 0];"
-  }
-  function make_array() {
-    return eval(make_array_string());
-  }
-
-  function construct_smis() {
-    var a = make_array();
-    a[0] = 0;  // Send the COW array map to the steak house.
-    assertKind(elements_kind.fast_smi_only, a);
-    return a;
-  }
-  function construct_doubles() {
-    var a = construct_smis();
-    a[0] = 1.5;
-    assertKind(elements_kind.fast_double, a);
-    return a;
-  }
-
-  // Test transition chain SMI->DOUBLE->FAST (crankshafted function will
-  // transition to FAST directly).
-  function convert_mixed(array, value, kind) {
-    array[1] = value;
-    assertKind(kind, array);
-    assertEquals(value, array[1]);
-  }
-  smis = construct_smis();
-  convert_mixed(smis, 1.5, elements_kind.fast_double);
-
-  doubles = construct_doubles();
-  convert_mixed(doubles, "three", elements_kind.fast);
-
-  convert_mixed(construct_smis(), "three", elements_kind.fast);
-  convert_mixed(construct_doubles(), "three", elements_kind.fast);
-
-  smis = construct_smis();
-  doubles = construct_doubles();
-  convert_mixed(smis, 1, elements_kind.fast);
-  convert_mixed(doubles, 1, elements_kind.fast);
-  assertTrue(%HaveSameMap(smis, doubles));
+// This code exists to eliminate the learning influence of AllocationSites
+// on the following tests.
+var __sequence = 0;
+function make_array_string() {
+  this.__sequence = this.__sequence + 1;
+  return "/* " + this.__sequence + " */  [0, 0, 0];"
 }
+function make_array() {
+  return eval(make_array_string());
+}
+
+function construct_smis() {
+  var a = make_array();
+  a[0] = 0;  // Send the COW array map to the steak house.
+  assertKind(elements_kind.fast_smi_only, a);
+  return a;
+}
+function construct_doubles() {
+  var a = construct_smis();
+  a[0] = 1.5;
+  assertKind(elements_kind.fast_double, a);
+  return a;
+}
+
+// Test transition chain SMI->DOUBLE->FAST (crankshafted function will
+// transition to FAST directly).
+function convert_mixed(array, value, kind) {
+  array[1] = value;
+  assertKind(kind, array);
+  assertEquals(value, array[1]);
+}
+smis = construct_smis();
+convert_mixed(smis, 1.5, elements_kind.fast_double);
+
+doubles = construct_doubles();
+convert_mixed(doubles, "three", elements_kind.fast);
+
+convert_mixed(construct_smis(), "three", elements_kind.fast);
+convert_mixed(construct_doubles(), "three", elements_kind.fast);
+
+smis = construct_smis();
+doubles = construct_doubles();
+convert_mixed(smis, 1, elements_kind.fast);
+convert_mixed(doubles, 1, elements_kind.fast);
+assertTrue(%HaveSameMap(smis, doubles));
 
 // Throw away type information in the ICs for next stress run.
 gc();
diff --git a/test/mjsunit/override-read-only-property.js b/test/mjsunit/override-read-only-property.js
index 2876ae1..f8114a6 100644
--- a/test/mjsunit/override-read-only-property.js
+++ b/test/mjsunit/override-read-only-property.js
@@ -25,8 +25,6 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --es5_readonly
-
 // According to ECMA-262, sections 8.6.2.2 and 8.6.2.3 you're not
 // allowed to override read-only properties, not even if the read-only
 // property is in the prototype chain.
diff --git a/test/mjsunit/own-symbols.js b/test/mjsunit/own-symbols.js
new file mode 100644
index 0000000..588a032
--- /dev/null
+++ b/test/mjsunit/own-symbols.js
@@ -0,0 +1,55 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --allow-natives-syntax
+
+var s = %CreatePrivateOwnSymbol("s");
+var s1 = %CreatePrivateOwnSymbol("s1");
+
+function TestSimple() {
+  var p = {}
+  p[s] = "moo";
+
+  var o = Object.create(p);
+
+  assertEquals(undefined, o[s]);
+  assertEquals("moo", p[s]);
+
+  o[s] = "bow-wow";
+  assertEquals("bow-wow", o[s]);
+  assertEquals("moo", p[s]);
+}
+
+TestSimple();
+
+
+function TestICs() {
+  var p = {}
+  p[s] = "moo";
+
+
+  var o = Object.create(p);
+  o[s1] = "bow-wow";
+  function checkNonOwn(o) {
+    assertEquals(undefined, o[s]);
+    assertEquals("bow-wow", o[s1]);
+  }
+
+  checkNonOwn(o);
+
+  // Test monomorphic/optimized.
+  for (var i = 0; i < 1000; i++) {
+    checkNonOwn(o);
+  }
+
+  // Test non-monomorphic.
+  for (var i = 0; i < 1000; i++) {
+    var oNew = Object.create(p);
+    oNew["s" + i] = i;
+    oNew[s1] = "bow-wow";
+    checkNonOwn(oNew);
+  }
+}
+
+TestICs();
diff --git a/test/mjsunit/packed-elements.js b/test/mjsunit/packed-elements.js
index 4a87373..3ce92d1 100644
--- a/test/mjsunit/packed-elements.js
+++ b/test/mjsunit/packed-elements.js
@@ -25,9 +25,7 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --allow-natives-syntax --smi-only-arrays --packed-arrays
-
-var has_packed_elements = !%HasFastHoleyElements(Array());
+// Flags: --allow-natives-syntax
 
 function test1() {
   var a = Array(8);
@@ -101,11 +99,9 @@
   for (i = 0; i < 25000; ++i) f(); // Make sure GC happens
 }
 
-if (has_packed_elements) {
-  test_with_optimization(test1);
-  test_with_optimization(test2);
-  test_with_optimization(test3);
-  test_with_optimization(test4);
-  test_with_optimization(test5);
-  test_with_optimization(test6);
-}
+test_with_optimization(test1);
+test_with_optimization(test2);
+test_with_optimization(test3);
+test_with_optimization(test4);
+test_with_optimization(test5);
+test_with_optimization(test6);
diff --git a/test/mjsunit/polymorph-arrays.js b/test/mjsunit/polymorph-arrays.js
index ff0c433..2bb0433 100644
--- a/test/mjsunit/polymorph-arrays.js
+++ b/test/mjsunit/polymorph-arrays.js
@@ -37,7 +37,7 @@
     a[i] = i;
   }
   a[5000000] = 256;
-  assertTrue(%HasDictionaryElements(a));
+  return %NormalizeElements(a);
 }
 
 function testPolymorphicLoads() {
@@ -49,7 +49,7 @@
     var object_array = new Object;
     var sparse_object_array = new Object;
     var js_array = new Array(10);
-    var sparse_js_array = new Array(5000001);
+    var sparse_js_array = %NormalizeElements([]);
 
     init_array(object_array);
     init_array(js_array);
@@ -67,7 +67,7 @@
   var object_array = new Object;
   var sparse_object_array = new Object;
   var js_array = new Array(10);
-  var sparse_js_array = new Array(5000001);
+  var sparse_js_array = %NormalizeElements([]);
 
   init_array(object_array);
   init_array(js_array);
@@ -114,7 +114,8 @@
     var object_array = new Object;
     var sparse_object_array = new Object;
     var js_array = new Array(10);
-    var sparse_js_array = new Array(5000001);
+    var sparse_js_array = [];
+    sparse_js_array.length = 5000001;
 
     init_array(object_array);
     init_array(js_array);
@@ -132,7 +133,8 @@
   var object_array = new Object;
   var sparse_object_array = new Object;
   var js_array = new Array(10);
-  var sparse_js_array = new Array(5000001);
+  var sparse_js_array = %NormalizeElements([]);
+  sparse_js_array.length = 5000001;
 
   init_array(object_array);
   init_array(js_array);
diff --git a/test/mjsunit/proto-accessor.js b/test/mjsunit/proto-accessor.js
index b2e7d34..513a044 100644
--- a/test/mjsunit/proto-accessor.js
+++ b/test/mjsunit/proto-accessor.js
@@ -25,8 +25,6 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --harmony-symbols
-
 // Fake Symbol if undefined, allowing test to run in non-Harmony mode as well.
 this.Symbol = typeof Symbol != 'undefined' ? Symbol : String;
 
diff --git a/test/mjsunit/readonly.js b/test/mjsunit/readonly.js
index 050e256..084e9ff 100644
--- a/test/mjsunit/readonly.js
+++ b/test/mjsunit/readonly.js
@@ -25,8 +25,7 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --allow-natives-syntax --es5_readonly
-// Flags: --harmony-proxies
+// Flags: --allow-natives-syntax --harmony-proxies
 
 // Different ways to create an object.
 
diff --git a/test/mjsunit/regexp-not-sticky-yet.js b/test/mjsunit/regexp-not-sticky-yet.js
new file mode 100644
index 0000000..4186a63
--- /dev/null
+++ b/test/mjsunit/regexp-not-sticky-yet.js
@@ -0,0 +1,65 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Test that sticky regexp support is not affecting V8 when the
+// --harmony-regexps flag is not on.
+
+assertThrows(function() { eval("/foo.bar/y"); }, SyntaxError);
+assertThrows(function() { eval("/foobar/y"); }, SyntaxError);
+assertThrows(function() { eval("/foo.bar/gy"); }, SyntaxError);
+assertThrows(function() { eval("/foobar/gy"); }, SyntaxError);
+assertThrows(function() { new RegExp("foo.bar", "y"); }, SyntaxError);
+assertThrows(function() { new RegExp("foobar", "y"); }, SyntaxError);
+assertThrows(function() { new RegExp("foo.bar", "gy"); }, SyntaxError);
+assertThrows(function() { new RegExp("foobar", "gy"); }, SyntaxError);
+
+var re = /foo.bar/;
+assertEquals("/foo.bar/", "" + re);
+var plain = /foobar/;
+assertEquals("/foobar/", "" + plain);
+
+re.compile("foo.bar");
+assertEquals(void 0, re.sticky);
+
+var global = /foo.bar/g;
+assertEquals("/foo.bar/g", "" + global);
+var plainglobal = /foobar/g;
+assertEquals("/foobar/g", "" + plainglobal);
+
+assertEquals(void 0, re.sticky);
+re.sticky = true; // Has no effect on the regexp, just sets a property.
+assertTrue(re.sticky);
+
+assertTrue(re.test("..foo.bar"));
+
+re.lastIndex = -1; // Ignored for non-global, non-sticky.
+assertTrue(re.test("..foo.bar"));
+assertEquals(-1, re.lastIndex);
+
+re.lastIndex = -1; // Ignored for non-global, non-sticky.
+assertTrue(!!re.exec("..foo.bar"));
+assertEquals(-1, re.lastIndex);
diff --git a/test/mjsunit/regress-3225.js b/test/mjsunit/regress-3225.js
index 357f94b..fe44b85 100644
--- a/test/mjsunit/regress-3225.js
+++ b/test/mjsunit/regress-3225.js
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-// Flags: --expose-debug-as debug  --harmony-generators
+// Flags: --expose-debug-as debug
 
 Debug = debug.Debug
 
diff --git a/test/mjsunit/regress-3456.js b/test/mjsunit/regress-3456.js
new file mode 100644
index 0000000..498953b
--- /dev/null
+++ b/test/mjsunit/regress-3456.js
@@ -0,0 +1,13 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --min-preparse-length 1
+
+// Arrow function parsing (commit r22366) changed the flags stored in
+// PreParserExpression, and IsValidReferenceExpression() would return
+// false for certain valid expressions. This case is the minimum amount
+// of code needed to validate that IsValidReferenceExpression() works
+// properly. If it does not, a ReferenceError is thrown during parsing.
+
+function f() { ++(this.foo) }
diff --git a/test/mjsunit/regress/poly_count_operation.js b/test/mjsunit/regress/poly_count_operation.js
index a8a1ed2..99f041b 100644
--- a/test/mjsunit/regress/poly_count_operation.js
+++ b/test/mjsunit/regress/poly_count_operation.js
@@ -25,7 +25,7 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --allow-natives-syntax
+// Flags: --allow-natives-syntax --turbo-deoptimization
 
 var o1 = {x:1};
 var o2 = {};
diff --git a/test/mjsunit/regress/regress-1170.js b/test/mjsunit/regress/regress-1170.js
index 8c5f6f8..5d5800e 100644
--- a/test/mjsunit/regress/regress-1170.js
+++ b/test/mjsunit/regress/regress-1170.js
@@ -25,8 +25,6 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --es52_globals
-
 var setter_value = 0;
 
 this.__defineSetter__("a", function(v) { setter_value = v; });
@@ -35,8 +33,9 @@
 assertFalse("value" in Object.getOwnPropertyDescriptor(this, "a"));
 
 eval("with({}) { eval('var a = 2') }");
-assertEquals(2, setter_value);
+assertTrue("get" in Object.getOwnPropertyDescriptor(this, "a"));
 assertFalse("value" in Object.getOwnPropertyDescriptor(this, "a"));
+assertEquals(2, setter_value);
 
 // Function declarations are treated specially to match Safari. We do
 // not call setters for them.
@@ -47,10 +46,8 @@
 this.__defineSetter__("b", function(v) { setter_value = v; });
 try {
   eval("const b = 3");
-} catch(e) {
-  assertUnreachable();
-}
-assertEquals(3, setter_value);
+} catch(e) { }
+assertEquals(2, setter_value);
 
 try {
   eval("with({}) { eval('const b = 23') }");
diff --git a/test/mjsunit/regress/regress-1170187.js b/test/mjsunit/regress/regress-1170187.js
index 5e82f8a..3621bc4 100644
--- a/test/mjsunit/regress/regress-1170187.js
+++ b/test/mjsunit/regress/regress-1170187.js
@@ -26,6 +26,8 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 // Flags: --expose-debug-as debug
+// Flags: --turbo-deoptimization
+
 // Make sure that the retreival of local variables are performed correctly even
 // when an adapter frame is present.
 
diff --git a/test/mjsunit/regress/regress-119609.js b/test/mjsunit/regress/regress-119609.js
index 99041ad..0c85063 100644
--- a/test/mjsunit/regress/regress-119609.js
+++ b/test/mjsunit/regress/regress-119609.js
@@ -26,6 +26,7 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 // Flags: --expose-debug-as debug
+// Flags: --turbo-deoptimization
 
 Debug = debug.Debug;
 
diff --git a/test/mjsunit/regress/regress-1199637.js b/test/mjsunit/regress/regress-1199637.js
index 8b02a65..397aeb8 100644
--- a/test/mjsunit/regress/regress-1199637.js
+++ b/test/mjsunit/regress/regress-1199637.js
@@ -34,43 +34,43 @@
 const READ_ONLY = 1;
 
 // Use DeclareGlobal...
-%SetProperty(this.__proto__, "a", 1234, NONE);
+%AddNamedProperty(this.__proto__, "a", 1234, NONE);
 assertEquals(1234, a);
 eval("var a = 5678;");
 assertEquals(5678, a);
 
-%SetProperty(this.__proto__, "b", 1234, NONE);
+%AddNamedProperty(this.__proto__, "b", 1234, NONE);
 assertEquals(1234, b);
 eval("const b = 5678;");
 assertEquals(5678, b);
 
-%SetProperty(this.__proto__, "c", 1234, READ_ONLY);
+%AddNamedProperty(this.__proto__, "c", 1234, READ_ONLY);
 assertEquals(1234, c);
 eval("var c = 5678;");
 assertEquals(5678, c);
 
-%SetProperty(this.__proto__, "d", 1234, READ_ONLY);
+%AddNamedProperty(this.__proto__, "d", 1234, READ_ONLY);
 assertEquals(1234, d);
 eval("const d = 5678;");
 assertEquals(5678, d);
 
 // Use DeclareContextSlot...
-%SetProperty(this.__proto__, "x", 1234, NONE);
+%AddNamedProperty(this.__proto__, "x", 1234, NONE);
 assertEquals(1234, x);
 eval("with({}) { var x = 5678; }");
 assertEquals(5678, x);
 
-%SetProperty(this.__proto__, "y", 1234, NONE);
+%AddNamedProperty(this.__proto__, "y", 1234, NONE);
 assertEquals(1234, y);
 eval("with({}) { const y = 5678; }");
 assertEquals(5678, y);
 
-%SetProperty(this.__proto__, "z", 1234, READ_ONLY);
+%AddNamedProperty(this.__proto__, "z", 1234, READ_ONLY);
 assertEquals(1234, z);
 eval("with({}) { var z = 5678; }");
 assertEquals(5678, z);
 
-%SetProperty(this.__proto__, "w", 1234, READ_ONLY);
+%AddNamedProperty(this.__proto__, "w", 1234, READ_ONLY);
 assertEquals(1234, w);
 eval("with({}) { const w = 5678; }");
 assertEquals(5678, w);
diff --git a/test/mjsunit/regress/regress-1213575.js b/test/mjsunit/regress/regress-1213575.js
index f3a11db..8c197bc 100644
--- a/test/mjsunit/regress/regress-1213575.js
+++ b/test/mjsunit/regress/regress-1213575.js
@@ -37,4 +37,4 @@
   assertTrue(e instanceof TypeError);
   caught = true;
 }
-assertFalse(caught);
+assertTrue(caught);
diff --git a/test/mjsunit/regress/regress-131994.js b/test/mjsunit/regress/regress-131994.js
index 7f60095..3de3813 100644
--- a/test/mjsunit/regress/regress-131994.js
+++ b/test/mjsunit/regress/regress-131994.js
@@ -26,6 +26,7 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 // Flags: --expose-debug-as debug
+// Flags: --turbo-deoptimization
 
 // Test that a variable in the local scope that shadows a context-allocated
 // variable is correctly resolved when being evaluated in the debugger.
diff --git a/test/mjsunit/regress/regress-1530.js b/test/mjsunit/regress/regress-1530.js
index db21144..20d1f26 100644
--- a/test/mjsunit/regress/regress-1530.js
+++ b/test/mjsunit/regress/regress-1530.js
@@ -33,34 +33,52 @@
 
 // Verify that normal assignment of 'prototype' property works properly
 // and updates the internal value.
-var x = { foo: 'bar' };
-f.prototype = x;
-assertSame(f.prototype, x);
+var a = { foo: 'bar' };
+f.prototype = a;
+assertSame(f.prototype, a);
 assertSame(f.prototype.foo, 'bar');
 assertSame(new f().foo, 'bar');
-assertSame(Object.getPrototypeOf(new f()), x);
-assertSame(Object.getOwnPropertyDescriptor(f, 'prototype').value, x);
+assertSame(Object.getPrototypeOf(new f()), a);
+assertSame(Object.getOwnPropertyDescriptor(f, 'prototype').value, a);
+assertTrue(Object.getOwnPropertyDescriptor(f, 'prototype').writable);
 
 // Verify that 'prototype' behaves like a data property when it comes to
 // redefining with Object.defineProperty() and the internal value gets
 // updated.
-var y = { foo: 'baz' };
-Object.defineProperty(f, 'prototype', { value: y, writable: true });
-assertSame(f.prototype, y);
+var b = { foo: 'baz' };
+Object.defineProperty(f, 'prototype', { value: b, writable: true });
+assertSame(f.prototype, b);
 assertSame(f.prototype.foo, 'baz');
 assertSame(new f().foo, 'baz');
-assertSame(Object.getPrototypeOf(new f()), y);
-assertSame(Object.getOwnPropertyDescriptor(f, 'prototype').value, y);
+assertSame(Object.getPrototypeOf(new f()), b);
+assertSame(Object.getOwnPropertyDescriptor(f, 'prototype').value, b);
+assertTrue(Object.getOwnPropertyDescriptor(f, 'prototype').writable);
 
 // Verify that the previous redefinition didn't screw up callbacks and
 // the internal value still gets updated.
-var z = { foo: 'other' };
-f.prototype = z;
-assertSame(f.prototype, z);
+var c = { foo: 'other' };
+f.prototype = c;
+assertSame(f.prototype, c);
 assertSame(f.prototype.foo, 'other');
 assertSame(new f().foo, 'other');
-assertSame(Object.getPrototypeOf(new f()), z);
-assertSame(Object.getOwnPropertyDescriptor(f, 'prototype').value, z);
+assertSame(Object.getPrototypeOf(new f()), c);
+assertSame(Object.getOwnPropertyDescriptor(f, 'prototype').value, c);
+assertTrue(Object.getOwnPropertyDescriptor(f, 'prototype').writable);
+
+// Verify that 'prototype' can be redefined to contain a different value
+// and have a different writability attribute at the same time.
+var d = { foo: 'final' };
+Object.defineProperty(f, 'prototype', { value: d, writable: false });
+assertSame(f.prototype, d);
+assertSame(f.prototype.foo, 'final');
+assertSame(new f().foo, 'final');
+assertSame(Object.getPrototypeOf(new f()), d);
+assertSame(Object.getOwnPropertyDescriptor(f, 'prototype').value, d);
+assertFalse(Object.getOwnPropertyDescriptor(f, 'prototype').writable);
+
+// Verify that non-writability of redefined 'prototype' is respected.
+assertThrows("'use strict'; f.prototype = {}");
+assertThrows("Object.defineProperty(f, 'prototype', { value: {} })");
 
 // Verify that non-writability of other properties is respected.
 assertThrows("Object.defineProperty(f, 'name', { value: {} })");
diff --git a/test/mjsunit/regress/regress-2336.js b/test/mjsunit/regress/regress-2336.js
deleted file mode 100644
index edfff60..0000000
--- a/test/mjsunit/regress/regress-2336.js
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --expose-debug-as debug --expose-gc
-
-// Check that we can cope with a debug listener that runs in the
-// GC epilogue and causes enough allocation to trigger a new GC during
-// the epilogue.
-
-var f = eval("(function f() { return 42; })");
-
-Debug = debug.Debug;
-
-var called = false;
-
-function listener(event, exec_state, event_data, data) {
-  if (event == Debug.DebugEvent.ScriptCollected) {
-    if (!called) {
-      called = true;
-      gc();
-    }
-  }
-};
-
-Debug.scripts();
-Debug.setListener(listener);
-f = void 0;
-gc();
-assertTrue(called);
diff --git a/test/mjsunit/regress/regress-2790.js b/test/mjsunit/regress/regress-2790.js
index 927f260..ac79e64 100644
--- a/test/mjsunit/regress/regress-2790.js
+++ b/test/mjsunit/regress/regress-2790.js
@@ -26,6 +26,6 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 // Test that we can create arrays of any size.
-for (var i = 1000; i < 1000000; i += 197) {
+for (var i = 1000; i < 1000000; i += 19703) {
   new Array(i);
 }
diff --git a/test/mjsunit/regress/regress-3116.js b/test/mjsunit/regress/regress-3116.js
new file mode 100644
index 0000000..ca55ccc
--- /dev/null
+++ b/test/mjsunit/regress/regress-3116.js
@@ -0,0 +1,314 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function timezone(tz) {
+  var str = (new Date(2014, 0, 10)).toString();
+  if (tz == "CET") {
+    return str == "Fri Jan 10 2014 00:00:00 GMT+0100 (CET)";
+  }
+  if (tz == "BRT") {
+    return str == "Fri Jan 10 2014 00:00:00 GMT-0200 (BRST)";
+  }
+  if (tz == "PST") {
+    return str == "Fri Jan 10 2014 00:00:00 GMT-0800 (PST)";
+  }
+  return false;
+}
+
+if (timezone("CET")) {
+  assertEquals("Sat Mar 29 2014 22:59:00 GMT+0100 (CET)",
+               (new Date(2014, 2, 29, 22, 59)).toString());
+  assertEquals("Sat, 29 Mar 2014 21:59:00 GMT",
+               (new Date(2014, 2, 29, 22, 59)).toUTCString());
+  assertEquals("Sat Mar 29 2014 23:00:00 GMT+0100 (CET)",
+               (new Date(2014, 2, 29, 23, 0)).toString());
+  assertEquals("Sat, 29 Mar 2014 22:00:00 GMT",
+               (new Date(2014, 2, 29, 23, 0)).toUTCString());
+  assertEquals("Sat Mar 29 2014 23:59:00 GMT+0100 (CET)",
+               (new Date(2014, 2, 29, 23, 59)).toString());
+  assertEquals("Sat, 29 Mar 2014 22:59:00 GMT",
+               (new Date(2014, 2, 29, 23, 59)).toUTCString());
+  assertEquals("Sun Mar 30 2014 00:00:00 GMT+0100 (CET)",
+               (new Date(2014, 2, 30, 0, 0)).toString());
+  assertEquals("Sat, 29 Mar 2014 23:00:00 GMT",
+               (new Date(2014, 2, 30, 0, 0)).toUTCString());
+  assertEquals("Sun Mar 30 2014 00:59:00 GMT+0100 (CET)",
+               (new Date(2014, 2, 30, 0, 59)).toString());
+  assertEquals("Sat, 29 Mar 2014 23:59:00 GMT",
+               (new Date(2014, 2, 30, 0, 59)).toUTCString());
+  assertEquals("Sun Mar 30 2014 01:00:00 GMT+0100 (CET)",
+               (new Date(2014, 2, 30, 1, 0)).toString());
+  assertEquals("Sun, 30 Mar 2014 00:00:00 GMT",
+               (new Date(2014, 2, 30, 1, 0)).toUTCString());
+  assertEquals("Sun Mar 30 2014 01:59:00 GMT+0100 (CET)",
+               (new Date(2014, 2, 30, 1, 59)).toString());
+  assertEquals("Sun, 30 Mar 2014 00:59:00 GMT",
+               (new Date(2014, 2, 30, 1, 59)).toUTCString());
+  assertEquals("Sun Mar 30 2014 03:00:00 GMT+0200 (CEST)",
+               (new Date(2014, 2, 30, 2, 0)).toString());
+  assertEquals("Sun, 30 Mar 2014 01:00:00 GMT",
+               (new Date(2014, 2, 30, 2, 0)).toUTCString());
+  assertEquals("Sun Mar 30 2014 03:59:00 GMT+0200 (CEST)",
+               (new Date(2014, 2, 30, 2, 59)).toString());
+  assertEquals("Sun, 30 Mar 2014 01:59:00 GMT",
+               (new Date(2014, 2, 30, 2, 59)).toUTCString());
+  assertEquals("Sun Mar 30 2014 03:00:00 GMT+0200 (CEST)",
+               (new Date(2014, 2, 30, 3, 0)).toString());
+  assertEquals("Sun, 30 Mar 2014 01:00:00 GMT",
+               (new Date(2014, 2, 30, 3, 0)).toUTCString());
+  assertEquals("Sun Mar 30 2014 03:59:00 GMT+0200 (CEST)",
+               (new Date(2014, 2, 30, 3, 59)).toString());
+  assertEquals("Sun, 30 Mar 2014 01:59:00 GMT",
+               (new Date(2014, 2, 30, 3, 59)).toUTCString());
+  assertEquals("Sun Mar 30 2014 04:00:00 GMT+0200 (CEST)",
+               (new Date(2014, 2, 30, 4, 0)).toString());
+  assertEquals("Sun, 30 Mar 2014 02:00:00 GMT",
+               (new Date(2014, 2, 30, 4, 0)).toUTCString());
+  assertEquals("Sat Oct 25 2014 22:59:00 GMT+0200 (CEST)",
+               (new Date(2014, 9, 25, 22, 59)).toString());
+  assertEquals("Sat, 25 Oct 2014 20:59:00 GMT",
+               (new Date(2014, 9, 25, 22, 59)).toUTCString());
+  assertEquals("Sat Oct 25 2014 23:00:00 GMT+0200 (CEST)",
+               (new Date(2014, 9, 25, 23, 0)).toString());
+  assertEquals("Sat, 25 Oct 2014 21:00:00 GMT",
+               (new Date(2014, 9, 25, 23, 0)).toUTCString());
+  assertEquals("Sat Oct 25 2014 23:59:00 GMT+0200 (CEST)",
+               (new Date(2014, 9, 25, 23, 59)).toString());
+  assertEquals("Sat, 25 Oct 2014 21:59:00 GMT",
+               (new Date(2014, 9, 25, 23, 59)).toUTCString());
+  assertEquals("Sun Oct 26 2014 00:00:00 GMT+0200 (CEST)",
+               (new Date(2014, 9, 26, 0, 0)).toString());
+  assertEquals("Sat, 25 Oct 2014 22:00:00 GMT",
+               (new Date(2014, 9, 26, 0, 0)).toUTCString());
+  assertEquals("Sun Oct 26 2014 00:59:00 GMT+0200 (CEST)",
+               (new Date(2014, 9, 26, 0, 59)).toString());
+  assertEquals("Sat, 25 Oct 2014 22:59:00 GMT",
+               (new Date(2014, 9, 26, 0, 59)).toUTCString());
+  assertEquals("Sun Oct 26 2014 01:00:00 GMT+0200 (CEST)",
+               (new Date(2014, 9, 26, 1, 0)).toString());
+  assertEquals("Sat, 25 Oct 2014 23:00:00 GMT",
+               (new Date(2014, 9, 26, 1, 0)).toUTCString());
+  assertEquals("Sun Oct 26 2014 01:59:00 GMT+0200 (CEST)",
+               (new Date(2014, 9, 26, 1, 59)).toString());
+  assertEquals("Sat, 25 Oct 2014 23:59:00 GMT",
+               (new Date(2014, 9, 26, 1, 59)).toUTCString());
+  assertEquals("Sun Oct 26 2014 02:00:00 GMT+0200 (CEST)",
+               (new Date(2014, 9, 26, 2, 0)).toString());
+  assertEquals("Sun, 26 Oct 2014 00:00:00 GMT",
+               (new Date(2014, 9, 26, 2, 0)).toUTCString());
+  assertEquals("Sun Oct 26 2014 02:59:00 GMT+0200 (CEST)",
+               (new Date(2014, 9, 26, 2, 59)).toString());
+  assertEquals("Sun, 26 Oct 2014 00:59:00 GMT",
+               (new Date(2014, 9, 26, 2, 59)).toUTCString());
+  assertEquals("Sun Oct 26 2014 03:00:00 GMT+0100 (CET)",
+               (new Date(2014, 9, 26, 3, 0)).toString());
+  assertEquals("Sun, 26 Oct 2014 02:00:00 GMT",
+               (new Date(2014, 9, 26, 3, 0)).toUTCString());
+  assertEquals("Sun Oct 26 2014 03:59:00 GMT+0100 (CET)",
+               (new Date(2014, 9, 26, 3, 59)).toString());
+  assertEquals("Sun, 26 Oct 2014 02:59:00 GMT",
+               (new Date(2014, 9, 26, 3, 59)).toUTCString());
+  assertEquals("Sun Oct 26 2014 04:00:00 GMT+0100 (CET)",
+               (new Date(2014, 9, 26, 4, 0)).toString());
+  assertEquals("Sun, 26 Oct 2014 03:00:00 GMT",
+               (new Date(2014, 9, 26, 4, 0)).toUTCString());
+}
+
+if (timezone("BRT")) {
+  assertEquals("Sat Oct 18 2014 22:59:00 GMT-0300 (BRT)",
+               (new Date(2014, 9, 18, 22, 59)).toString());
+  assertEquals("Sun, 19 Oct 2014 01:59:00 GMT",
+               (new Date(2014, 9, 18, 22, 59)).toUTCString());
+  assertEquals("Sat Oct 18 2014 23:00:00 GMT-0300 (BRT)",
+               (new Date(2014, 9, 18, 23, 0)).toString());
+  assertEquals("Sun, 19 Oct 2014 02:00:00 GMT",
+               (new Date(2014, 9, 18, 23, 0)).toUTCString());
+  assertEquals("Sat Oct 18 2014 23:59:00 GMT-0300 (BRT)",
+               (new Date(2014, 9, 18, 23, 59)).toString());
+  assertEquals("Sun, 19 Oct 2014 02:59:00 GMT",
+               (new Date(2014, 9, 18, 23, 59)).toUTCString());
+  assertEquals("Sun Oct 19 2014 01:00:00 GMT-0200 (BRST)",
+               (new Date(2014, 9, 19, 0, 0)).toString());
+  assertEquals("Sun, 19 Oct 2014 03:00:00 GMT",
+               (new Date(2014, 9, 19, 0, 0)).toUTCString());
+  assertEquals("Sun Oct 19 2014 01:59:00 GMT-0200 (BRST)",
+               (new Date(2014, 9, 19, 0, 59)).toString());
+  assertEquals("Sun, 19 Oct 2014 03:59:00 GMT",
+               (new Date(2014, 9, 19, 0, 59)).toUTCString());
+  assertEquals("Sun Oct 19 2014 01:00:00 GMT-0200 (BRST)",
+               (new Date(2014, 9, 19, 1, 0)).toString());
+  assertEquals("Sun, 19 Oct 2014 03:00:00 GMT",
+               (new Date(2014, 9, 19, 1, 0)).toUTCString());
+  assertEquals("Sun Oct 19 2014 01:59:00 GMT-0200 (BRST)",
+               (new Date(2014, 9, 19, 1, 59)).toString());
+  assertEquals("Sun, 19 Oct 2014 03:59:00 GMT",
+               (new Date(2014, 9, 19, 1, 59)).toUTCString());
+  assertEquals("Sun Oct 19 2014 02:00:00 GMT-0200 (BRST)",
+               (new Date(2014, 9, 19, 2, 0)).toString());
+  assertEquals("Sun, 19 Oct 2014 04:00:00 GMT",
+               (new Date(2014, 9, 19, 2, 0)).toUTCString());
+  assertEquals("Sun Oct 19 2014 02:59:00 GMT-0200 (BRST)",
+               (new Date(2014, 9, 19, 2, 59)).toString());
+  assertEquals("Sun, 19 Oct 2014 04:59:00 GMT",
+               (new Date(2014, 9, 19, 2, 59)).toUTCString());
+  assertEquals("Sun Oct 19 2014 03:00:00 GMT-0200 (BRST)",
+               (new Date(2014, 9, 19, 3, 0)).toString());
+  assertEquals("Sun, 19 Oct 2014 05:00:00 GMT",
+               (new Date(2014, 9, 19, 3, 0)).toUTCString());
+  assertEquals("Sun Oct 19 2014 03:59:00 GMT-0200 (BRST)",
+               (new Date(2014, 9, 19, 3, 59)).toString());
+  assertEquals("Sun, 19 Oct 2014 05:59:00 GMT",
+               (new Date(2014, 9, 19, 3, 59)).toUTCString());
+  assertEquals("Sun Oct 19 2014 04:00:00 GMT-0200 (BRST)",
+               (new Date(2014, 9, 19, 4, 0)).toString());
+  assertEquals("Sun, 19 Oct 2014 06:00:00 GMT",
+               (new Date(2014, 9, 19, 4, 0)).toUTCString());
+  assertEquals("Sat Feb 15 2014 22:59:00 GMT-0200 (BRST)",
+               (new Date(2014, 1, 15, 22, 59)).toString());
+  assertEquals("Sun, 16 Feb 2014 00:59:00 GMT",
+               (new Date(2014, 1, 15, 22, 59)).toUTCString());
+  assertEquals("Sat Feb 15 2014 23:00:00 GMT-0200 (BRST)",
+               (new Date(2014, 1, 15, 23, 0)).toString());
+  assertEquals("Sun, 16 Feb 2014 01:00:00 GMT",
+               (new Date(2014, 1, 15, 23, 0)).toUTCString());
+  assertEquals("Sat Feb 15 2014 23:59:00 GMT-0200 (BRST)",
+               (new Date(2014, 1, 15, 23, 59)).toString());
+  assertEquals("Sun, 16 Feb 2014 01:59:00 GMT",
+               (new Date(2014, 1, 15, 23, 59)).toUTCString());
+  assertEquals("Sun Feb 16 2014 00:00:00 GMT-0300 (BRT)",
+               (new Date(2014, 1, 16, 0, 0)).toString());
+  assertEquals("Sun, 16 Feb 2014 03:00:00 GMT",
+               (new Date(2014, 1, 16, 0, 0)).toUTCString());
+  assertEquals("Sun Feb 16 2014 00:59:00 GMT-0300 (BRT)",
+               (new Date(2014, 1, 16, 0, 59)).toString());
+  assertEquals("Sun, 16 Feb 2014 03:59:00 GMT",
+               (new Date(2014, 1, 16, 0, 59)).toUTCString());
+  assertEquals("Sun Feb 16 2014 01:00:00 GMT-0300 (BRT)",
+               (new Date(2014, 1, 16, 1, 0)).toString());
+  assertEquals("Sun, 16 Feb 2014 04:00:00 GMT",
+               (new Date(2014, 1, 16, 1, 0)).toUTCString());
+  assertEquals("Sun Feb 16 2014 01:59:00 GMT-0300 (BRT)",
+               (new Date(2014, 1, 16, 1, 59)).toString());
+  assertEquals("Sun, 16 Feb 2014 04:59:00 GMT",
+               (new Date(2014, 1, 16, 1, 59)).toUTCString());
+  assertEquals("Sun Feb 16 2014 02:00:00 GMT-0300 (BRT)",
+               (new Date(2014, 1, 16, 2, 0)).toString());
+  assertEquals("Sun, 16 Feb 2014 05:00:00 GMT",
+               (new Date(2014, 1, 16, 2, 0)).toUTCString());
+  assertEquals("Sun Feb 16 2014 02:59:00 GMT-0300 (BRT)",
+               (new Date(2014, 1, 16, 2, 59)).toString());
+  assertEquals("Sun, 16 Feb 2014 05:59:00 GMT",
+               (new Date(2014, 1, 16, 2, 59)).toUTCString());
+  assertEquals("Sun Feb 16 2014 03:00:00 GMT-0300 (BRT)",
+               (new Date(2014, 1, 16, 3, 0)).toString());
+  assertEquals("Sun, 16 Feb 2014 06:00:00 GMT",
+               (new Date(2014, 1, 16, 3, 0)).toUTCString());
+  assertEquals("Sun Feb 16 2014 03:59:00 GMT-0300 (BRT)",
+               (new Date(2014, 1, 16, 3, 59)).toString());
+  assertEquals("Sun, 16 Feb 2014 06:59:00 GMT",
+               (new Date(2014, 1, 16, 3, 59)).toUTCString());
+  assertEquals("Sun Feb 16 2014 04:00:00 GMT-0300 (BRT)",
+               (new Date(2014, 1, 16, 4, 0)).toString());
+  assertEquals("Sun, 16 Feb 2014 07:00:00 GMT",
+               (new Date(2014, 1, 16, 4, 0)).toUTCString());
+}
+
+if (timezone("PST")) {
+  assertEquals("Sat Mar 08 2014 22:59:00 GMT-0800 (PST)",
+               (new Date(2014, 2, 8, 22, 59)).toString());
+  assertEquals("Sun, 09 Mar 2014 06:59:00 GMT",
+               (new Date(2014, 2, 8, 22, 59)).toUTCString());
+  assertEquals("Sat Mar 08 2014 23:00:00 GMT-0800 (PST)",
+               (new Date(2014, 2, 8, 23, 0)).toString());
+  assertEquals("Sun, 09 Mar 2014 07:00:00 GMT",
+               (new Date(2014, 2, 8, 23, 0)).toUTCString());
+  assertEquals("Sat Mar 08 2014 23:59:00 GMT-0800 (PST)",
+               (new Date(2014, 2, 8, 23, 59)).toString());
+  assertEquals("Sun, 09 Mar 2014 07:59:00 GMT",
+               (new Date(2014, 2, 8, 23, 59)).toUTCString());
+  assertEquals("Sun Mar 09 2014 00:00:00 GMT-0800 (PST)",
+               (new Date(2014, 2, 9, 0, 0)).toString());
+  assertEquals("Sun, 09 Mar 2014 08:00:00 GMT",
+               (new Date(2014, 2, 9, 0, 0)).toUTCString());
+  assertEquals("Sun Mar 09 2014 00:59:00 GMT-0800 (PST)",
+               (new Date(2014, 2, 9, 0, 59)).toString());
+  assertEquals("Sun, 09 Mar 2014 08:59:00 GMT",
+               (new Date(2014, 2, 9, 0, 59)).toUTCString());
+  assertEquals("Sun Mar 09 2014 01:00:00 GMT-0800 (PST)",
+               (new Date(2014, 2, 9, 1, 0)).toString());
+  assertEquals("Sun, 09 Mar 2014 09:00:00 GMT",
+               (new Date(2014, 2, 9, 1, 0)).toUTCString());
+  assertEquals("Sun Mar 09 2014 01:59:00 GMT-0800 (PST)",
+               (new Date(2014, 2, 9, 1, 59)).toString());
+  assertEquals("Sun, 09 Mar 2014 09:59:00 GMT",
+               (new Date(2014, 2, 9, 1, 59)).toUTCString());
+  assertEquals("Sun Mar 09 2014 03:00:00 GMT-0700 (PDT)",
+               (new Date(2014, 2, 9, 2, 0)).toString());
+  assertEquals("Sun, 09 Mar 2014 10:00:00 GMT",
+               (new Date(2014, 2, 9, 2, 0)).toUTCString());
+  assertEquals("Sun Mar 09 2014 03:59:00 GMT-0700 (PDT)",
+               (new Date(2014, 2, 9, 2, 59)).toString());
+  assertEquals("Sun, 09 Mar 2014 10:59:00 GMT",
+               (new Date(2014, 2, 9, 2, 59)).toUTCString());
+  assertEquals("Sun Mar 09 2014 03:00:00 GMT-0700 (PDT)",
+               (new Date(2014, 2, 9, 3, 0)).toString());
+  assertEquals("Sun, 09 Mar 2014 10:00:00 GMT",
+               (new Date(2014, 2, 9, 3, 0)).toUTCString());
+  assertEquals("Sun Mar 09 2014 03:59:00 GMT-0700 (PDT)",
+               (new Date(2014, 2, 9, 3, 59)).toString());
+  assertEquals("Sun, 09 Mar 2014 10:59:00 GMT",
+               (new Date(2014, 2, 9, 3, 59)).toUTCString());
+  assertEquals("Sun Mar 09 2014 04:00:00 GMT-0700 (PDT)",
+               (new Date(2014, 2, 9, 4, 0)).toString());
+  assertEquals("Sun, 09 Mar 2014 11:00:00 GMT",
+               (new Date(2014, 2, 9, 4, 0)).toUTCString());
+  assertEquals("Sat Nov 01 2014 22:59:00 GMT-0700 (PDT)",
+               (new Date(2014, 10, 1, 22, 59)).toString());
+  assertEquals("Sun, 02 Nov 2014 05:59:00 GMT",
+               (new Date(2014, 10, 1, 22, 59)).toUTCString());
+  assertEquals("Sat Nov 01 2014 23:00:00 GMT-0700 (PDT)",
+               (new Date(2014, 10, 1, 23, 0)).toString());
+  assertEquals("Sun, 02 Nov 2014 06:00:00 GMT",
+               (new Date(2014, 10, 1, 23, 0)).toUTCString());
+  assertEquals("Sat Nov 01 2014 23:59:00 GMT-0700 (PDT)",
+               (new Date(2014, 10, 1, 23, 59)).toString());
+  assertEquals("Sun, 02 Nov 2014 06:59:00 GMT",
+               (new Date(2014, 10, 1, 23, 59)).toUTCString());
+  assertEquals("Sun Nov 02 2014 00:00:00 GMT-0700 (PDT)",
+               (new Date(2014, 10, 2, 0, 0)).toString());
+  assertEquals("Sun, 02 Nov 2014 07:00:00 GMT",
+               (new Date(2014, 10, 2, 0, 0)).toUTCString());
+  assertEquals("Sun Nov 02 2014 00:59:00 GMT-0700 (PDT)",
+               (new Date(2014, 10, 2, 0, 59)).toString());
+  assertEquals("Sun, 02 Nov 2014 07:59:00 GMT",
+               (new Date(2014, 10, 2, 0, 59)).toUTCString());
+  assertEquals("Sun Nov 02 2014 01:00:00 GMT-0700 (PDT)",
+               (new Date(2014, 10, 2, 1, 0)).toString());
+  assertEquals("Sun, 02 Nov 2014 08:00:00 GMT",
+               (new Date(2014, 10, 2, 1, 0)).toUTCString());
+  assertEquals("Sun Nov 02 2014 01:59:00 GMT-0700 (PDT)",
+               (new Date(2014, 10, 2, 1, 59)).toString());
+  assertEquals("Sun, 02 Nov 2014 08:59:00 GMT",
+               (new Date(2014, 10, 2, 1, 59)).toUTCString());
+  assertEquals("Sun Nov 02 2014 02:00:00 GMT-0800 (PST)",
+               (new Date(2014, 10, 2, 2, 0)).toString());
+  assertEquals("Sun, 02 Nov 2014 10:00:00 GMT",
+               (new Date(2014, 10, 2, 2, 0)).toUTCString());
+  assertEquals("Sun Nov 02 2014 02:59:00 GMT-0800 (PST)",
+               (new Date(2014, 10, 2, 2, 59)).toString());
+  assertEquals("Sun, 02 Nov 2014 10:59:00 GMT",
+               (new Date(2014, 10, 2, 2, 59)).toUTCString());
+  assertEquals("Sun Nov 02 2014 03:00:00 GMT-0800 (PST)",
+               (new Date(2014, 10, 2, 3, 0)).toString());
+  assertEquals("Sun, 02 Nov 2014 11:00:00 GMT",
+               (new Date(2014, 10, 2, 3, 0)).toUTCString());
+  assertEquals("Sun Nov 02 2014 03:59:00 GMT-0800 (PST)",
+               (new Date(2014, 10, 2, 3, 59)).toString());
+  assertEquals("Sun, 02 Nov 2014 11:59:00 GMT",
+               (new Date(2014, 10, 2, 3, 59)).toUTCString());
+  assertEquals("Sun Nov 02 2014 04:00:00 GMT-0800 (PST)",
+               (new Date(2014, 10, 2, 4, 0)).toString());
+  assertEquals("Sun, 02 Nov 2014 12:00:00 GMT",
+               (new Date(2014, 10, 2, 4, 0)).toUTCString());
+}
diff --git a/test/mjsunit/regress/regress-320532.js b/test/mjsunit/regress/regress-320532.js
index 6ec4b97..0c3198f 100644
--- a/test/mjsunit/regress/regress-320532.js
+++ b/test/mjsunit/regress/regress-320532.js
@@ -25,7 +25,7 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 //
-// Flags: --allow-natives-syntax --smi-only-arrays --expose-gc
+// Flags: --allow-natives-syntax --expose-gc
 // Flags: --noalways-opt
 // Flags: --stress-runs=8 --send-idle-notification --gc-global
 
diff --git a/test/mjsunit/regress/regress-325676.js b/test/mjsunit/regress/regress-325676.js
index 427bbc3..7450a6d 100644
--- a/test/mjsunit/regress/regress-325676.js
+++ b/test/mjsunit/regress/regress-325676.js
@@ -26,6 +26,7 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 // Flags: --expose-debug-as debug
+// Flags: --turbo-deoptimization
 
 // If a function parameter is forced to be context allocated,
 // debug evaluate need to resolve it to a context slot instead of
diff --git a/test/mjsunit/regress/regress-3281.js b/test/mjsunit/regress/regress-3281.js
index ee256b8..7d42c02 100644
--- a/test/mjsunit/regress/regress-3281.js
+++ b/test/mjsunit/regress/regress-3281.js
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-// Flags: --harmony-collections --expose-natives-as=builtins
+// Flags: --expose-natives-as=builtins
 // Should not crash or raise an exception.
 
 var s = new Set();
diff --git a/test/mjsunit/regress/regress-334.js b/test/mjsunit/regress/regress-334.js
index 37dd299..c52c72a 100644
--- a/test/mjsunit/regress/regress-334.js
+++ b/test/mjsunit/regress/regress-334.js
@@ -37,10 +37,10 @@
 function func2(){}
 
 var object = {__proto__:{}};
-%SetProperty(object, "foo", func1, DONT_ENUM | DONT_DELETE);
-%SetProperty(object, "bar", func1, DONT_ENUM | READ_ONLY);
-%SetProperty(object, "baz", func1, DONT_DELETE | READ_ONLY);
-%SetProperty(object.__proto__, "bif", func1, DONT_ENUM | DONT_DELETE);
+%AddNamedProperty(object, "foo", func1, DONT_ENUM | DONT_DELETE);
+%AddNamedProperty(object, "bar", func1, DONT_ENUM | READ_ONLY);
+%AddNamedProperty(object, "baz", func1, DONT_DELETE | READ_ONLY);
+%AddNamedProperty(object.__proto__, "bif", func1, DONT_ENUM | DONT_DELETE);
 object.bif = func2;
 
 function enumerable(obj) {
diff --git a/test/mjsunit/regress/regress-3404.js b/test/mjsunit/regress/regress-3404.js
new file mode 100644
index 0000000..c4d280e
--- /dev/null
+++ b/test/mjsunit/regress/regress-3404.js
@@ -0,0 +1,27 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function testError(error) {
+  // Reconfigure e.stack to be non-configurable
+  var desc1 = Object.getOwnPropertyDescriptor(error, "stack");
+  Object.defineProperty(error, "stack",
+                        {get: desc1.get, set: desc1.set, configurable: false});
+
+  var desc2 = Object.getOwnPropertyDescriptor(error, "stack");
+  assertFalse(desc2.configurable);
+  assertEquals(desc1.get, desc2.get);
+  assertEquals(desc2.get, desc2.get);
+}
+
+function stackOverflow() {
+  function f() { f(); }
+  try { f() } catch (e) { return e; }
+}
+
+function referenceError() {
+  try { g() } catch (e) { return e; }
+}
+
+testError(referenceError());
+testError(stackOverflow());
diff --git a/test/mjsunit/regress/regress-3462.js b/test/mjsunit/regress/regress-3462.js
new file mode 100644
index 0000000..5a33559
--- /dev/null
+++ b/test/mjsunit/regress/regress-3462.js
@@ -0,0 +1,48 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+function TestFunctionPrototypeSetter() {
+  var f = function() {};
+  var o = {__proto__: f};
+  o.prototype = 42;
+  assertEquals(42, o.prototype);
+  assertTrue(o.hasOwnProperty('prototype'));
+}
+TestFunctionPrototypeSetter();
+
+
+function TestFunctionPrototypeSetterOnValue() {
+  var f = function() {};
+  var fp = f.prototype;
+  Number.prototype.__proto__ = f;
+  var n = 42;
+  var o = {};
+  n.prototype = o;
+  assertEquals(fp, n.prototype);
+  assertEquals(fp, f.prototype);
+  assertFalse(Number.prototype.hasOwnProperty('prototype'));
+}
+TestFunctionPrototypeSetterOnValue();
+
+
+function TestArrayLengthSetter() {
+  var a = [1];
+  var o = {__proto__: a};
+  o.length = 2;
+  assertEquals(2, o.length);
+  assertEquals(1, a.length);
+  assertTrue(o.hasOwnProperty('length'));
+}
+TestArrayLengthSetter();
+
+
+function TestArrayLengthSetterOnValue() {
+  Number.prototype.__proto__ = [1];
+  var n = 42;
+  n.length = 2;
+  assertEquals(1, n.length);
+  assertFalse(Number.prototype.hasOwnProperty('length'));
+}
+TestArrayLengthSetterOnValue();
diff --git a/test/mjsunit/regress/regress-3476.js b/test/mjsunit/regress/regress-3476.js
new file mode 100644
index 0000000..f4333db
--- /dev/null
+++ b/test/mjsunit/regress/regress-3476.js
@@ -0,0 +1,24 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function MyWrapper(v) {
+  return { valueOf: function() { return v } };
+}
+
+function f() {
+  assertEquals("truex", true + "x");
+  assertEquals("truey", true + new String("y"));
+  assertEquals("truez", true + new MyWrapper("z"));
+
+  assertEquals("xtrue", "x" + true);
+  assertEquals("ytrue", new String("y") + true);
+  assertEquals("ztrue", new MyWrapper("z") + true);
+}
+
+f();
+f();
+%OptimizeFunctionOnNextCall(f);
+f();
diff --git a/test/mjsunit/regress/regress-3564.js b/test/mjsunit/regress/regress-3564.js
new file mode 100644
index 0000000..a0b9eb2
--- /dev/null
+++ b/test/mjsunit/regress/regress-3564.js
@@ -0,0 +1,24 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function MyWrapper(v) {
+  return { valueOf: function() { return v } };
+}
+
+function f() {
+  assertTrue("a" < "x");
+  assertTrue("a" < new String("y"));
+  assertTrue("a" < new MyWrapper("z"));
+
+  assertFalse("a" > "x");
+  assertFalse("a" > new String("y"));
+  assertFalse("a" > new MyWrapper("z"));
+}
+
+f();
+f();
+%OptimizeFunctionOnNextCall(f);
+f();
diff --git a/test/mjsunit/regress/regress-380092.js b/test/mjsunit/regress/regress-380092.js
new file mode 100644
index 0000000..fe6b0b7
--- /dev/null
+++ b/test/mjsunit/regress/regress-380092.js
@@ -0,0 +1,22 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function many_hoist(o, index) {
+  return o[index + 33554427];
+}
+
+var obj = {};
+many_hoist(obj, 0);
+%OptimizeFunctionOnNextCall(many_hoist);
+many_hoist(obj, 5);
+
+function constant_too_large(o, index) {
+  return o[index + 1033554433];
+}
+
+constant_too_large(obj, 0);
+%OptimizeFunctionOnNextCall(constant_too_large);
+constant_too_large(obj, 5);
diff --git a/test/mjsunit/regress/regress-381313.js b/test/mjsunit/regress/regress-381313.js
new file mode 100644
index 0000000..d2b9d7c
--- /dev/null
+++ b/test/mjsunit/regress/regress-381313.js
@@ -0,0 +1,42 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var g = 0;
+
+function f(x, deopt) {
+  var a0 = x;
+  var a1 = 2 * x;
+  var a2 = 3 * x;
+  var a3 = 4 * x;
+  var a4 = 5 * x;
+  var a5 = 6 * x;
+  var a6 = 7 * x;
+  var a7 = 8 * x;
+  var a8 = 9 * x;
+  var a9 = 10 * x;
+  var a10 = 11 * x;
+  var a11 = 12 * x;
+  var a12 = 13 * x;
+  var a13 = 14 * x;
+  var a14 = 15 * x;
+  var a15 = 16 * x;
+  var a16 = 17 * x;
+  var a17 = 18 * x;
+  var a18 = 19 * x;
+  var a19 = 20 * x;
+
+  g = 1;
+
+  deopt + 0;
+
+  return a0 + a1 + a2 + a3 + a4 + a5 + a6 + a7 + a8 + a9 +
+         a10 + a11 + a12 + a13 + a14 + a15 + a16 + a17 + a18 + a19;
+}
+
+f(0.5, 0);
+f(0.5, 0);
+%OptimizeFunctionOnNextCall(f);
+print(f(0.5, ""));
diff --git a/test/mjsunit/regress/regress-403292.js b/test/mjsunit/regress/regress-403292.js
new file mode 100644
index 0000000..4e7ba28
--- /dev/null
+++ b/test/mjsunit/regress/regress-403292.js
@@ -0,0 +1,53 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --expose-natives-as=builtins --expose-gc
+
+var __v_7 = [];
+var __v_8 = {};
+var __v_10 = {};
+var __v_11 = this;
+var __v_12 = {};
+var __v_13 = {};
+var __v_14 = "";
+var __v_15 = {};
+try {
+__v_1 = {x:0};
+%OptimizeFunctionOnNextCall(__f_1);
+assertEquals("good", __f_1());
+delete __v_1.x;
+assertEquals("good", __f_1());
+} catch(e) { print("Caught: " + e); }
+try {
+__v_3 = new Set();
+__v_5 = new builtins.SetIterator(__v_3, -12);
+__v_4 = new Map();
+__v_6 = new builtins.MapIterator(__v_4, 2);
+__f_3(Array);
+} catch(e) { print("Caught: " + e); }
+function __f_4(__v_8, filter) {
+  function __f_6(v) {
+    for (var __v_4 in v) {
+      for (var __v_4 in v) {}
+    }
+    %OptimizeFunctionOnNextCall(filter);
+    return filter(v);
+  }
+  var __v_7 = eval(__v_8);
+  gc();
+  return __f_6(__v_7);
+}
+function __f_5(__v_6) {
+  var __v_5 = new Array(__v_6);
+  for (var __v_4 = 0; __v_4 < __v_6; __v_4++) __v_5.push('{}');
+  return __v_5;
+}
+try {
+try {
+  __v_8.test("\x80");
+  assertUnreachable();
+} catch (e) {
+}
+gc();
+} catch(e) { print("Caught: " + e); }
diff --git a/test/mjsunit/regress/regress-404981.js b/test/mjsunit/regress/regress-404981.js
new file mode 100644
index 0000000..5508d6f
--- /dev/null
+++ b/test/mjsunit/regress/regress-404981.js
@@ -0,0 +1,6 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var large_object = new Array(5000001);
+large_object.length = 23;
diff --git a/test/mjsunit/regress/regress-408036.js b/test/mjsunit/regress/regress-408036.js
new file mode 100644
index 0000000..a4dfade
--- /dev/null
+++ b/test/mjsunit/regress/regress-408036.js
@@ -0,0 +1,5 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-natives-as 1
diff --git a/test/mjsunit/regress/regress-409533.js b/test/mjsunit/regress/regress-409533.js
new file mode 100644
index 0000000..e51065e
--- /dev/null
+++ b/test/mjsunit/regress/regress-409533.js
@@ -0,0 +1,13 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function f() {
+  %_RegExpConstructResult(0, {}, {});
+}
+f();
+f();
+%OptimizeFunctionOnNextCall(f);
+f();
diff --git a/test/mjsunit/regress/regress-410912.js b/test/mjsunit/regress/regress-410912.js
new file mode 100644
index 0000000..98367bd
--- /dev/null
+++ b/test/mjsunit/regress/regress-410912.js
@@ -0,0 +1,206 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --expose-gc
+
+var assertDoesNotThrow;
+var assertInstanceof;
+var assertUnreachable;
+var assertOptimized;
+var assertUnoptimized;
+function classOf(object) { var string = Object.prototype.toString.call(object); return string.substring(8, string.length - 1); }
+function PrettyPrint(value) { return ""; }
+function PrettyPrintArrayElement(value, index, array) { return ""; }
+function fail(expectedText, found, name_opt) { }
+function deepObjectEquals(a, b) { var aProps = Object.keys(a); aProps.sort(); var bProps = Object.keys(b); bProps.sort(); if (!deepEquals(aProps, bProps)) { return false; } for (var i = 0; i < aProps.length; i++) { if (!deepEquals(a[aProps[i]], b[aProps[i]])) { return false; } } return true; }
+function deepEquals(a, b) { if (a === b) { if (a === 0) return (1 / a) === (1 / b); return true; } if (typeof a != typeof b) return false; if (typeof a == "number") return isNaN(a) && isNaN(b); if (typeof a !== "object" && typeof a !== "function") return false; var objectClass = classOf(a); if (objectClass !== classOf(b)) return false; if (objectClass === "RegExp") { return (a.toString() === b.toString()); } if (objectClass === "Function") return false; if (objectClass === "Array") { var elementCount = 0; if (a.length != b.length) { return false; } for (var i = 0; i < a.length; i++) { if (!deepEquals(a[i], b[i])) return false; } return true; } if (objectClass == "String" || objectClass == "Number" || objectClass == "Boolean" || objectClass == "Date") { if (a.valueOf() !== b.valueOf()) return false; } return deepObjectEquals(a, b); }
+assertSame = function assertSame(expected, found, name_opt) { if (found === expected) { if (expected !== 0 || (1 / expected) == (1 / found)) return; } else if ((expected !== expected) && (found !== found)) { return; } fail(PrettyPrint(expected), found, name_opt); }; assertEquals = function assertEquals(expected, found, name_opt) { if (!deepEquals(found, expected)) { fail(PrettyPrint(expected), found, name_opt); } };
+assertEqualsDelta = function assertEqualsDelta(expected, found, delta, name_opt) { assertTrue(Math.abs(expected - found) <= delta, name_opt); };
+assertArrayEquals = function assertArrayEquals(expected, found, name_opt) { var start = ""; if (name_opt) { start = name_opt + " - "; } assertEquals(expected.length, found.length, start + "array length"); if (expected.length == found.length) { for (var i = 0; i < expected.length; ++i) { assertEquals(expected[i], found[i], start + "array element at index " + i); } } };
+assertPropertiesEqual = function assertPropertiesEqual(expected, found, name_opt) { if (!deepObjectEquals(expected, found)) { fail(expected, found, name_opt); } };
+assertToStringEquals = function assertToStringEquals(expected, found, name_opt) { if (expected != String(found)) { fail(expected, found, name_opt); } };
+assertTrue = function assertTrue(value, name_opt) { assertEquals(true, value, name_opt); };
+assertFalse = function assertFalse(value, name_opt) { assertEquals(false, value, name_opt); };
+assertNull = function assertNull(value, name_opt) { if (value !== null) { fail("null", value, name_opt); } };
+assertNotNull = function assertNotNull(value, name_opt) { if (value === null) { fail("not null", value, name_opt); } };
+var __v_39 = {};
+var __v_40 = {};
+var __v_41 = {};
+var __v_42 = {};
+var __v_43 = {};
+var __v_44 = {};
+try {
+__v_0 = [1.5,,1.7];
+__v_1 = {__v_0:1.8};
+} catch(e) { print("Caught: " + e); }
+function __f_0(__v_1,__v_0,i) {
+  __v_1.a = __v_0[i];
+  gc();
+}
+try {
+__f_0(__v_1,__v_0,0);
+__f_0(__v_1,__v_0,0);
+%OptimizeFunctionOnNextCall(__f_0);
+__f_0(__v_1,__v_0,1);
+assertEquals(undefined, __v_1.a);
+__v_0 = [1,,3];
+__v_1 = {ab:5};
+} catch(e) { print("Caught: " + e); }
+function __f_1(__v_1,__v_0,i) {
+  __v_1.ab = __v_0[i];
+}
+try {
+__f_1(__v_1,__v_0,1);
+} catch(e) { print("Caught: " + e); }
+function __f_5(x) {
+  return ~x;
+}
+try {
+__f_5(42);
+assertEquals(~12, __f_5(12.45));
+assertEquals(~46, __f_5(42.87));
+__v_2 = 1, __v_4 = 2, __v_3 = 4, __v_6 = 8;
+} catch(e) { print("Caught: " + e); }
+function __f_4() {
+  return __v_2 | (__v_4 | (__v_3 | __v_6));
+}
+try {
+__f_4();
+__v_3 = "16";
+assertEquals(17 | -13 | 0 | -5, __f_4());
+} catch(e) { print("Caught: " + e); }
+function __f_6() {
+  return __f_4();
+}
+try {
+assertEquals(1 | 2 | 16 | 8, __f_6());
+__f_4 = function() { return 42; };
+assertEquals(42, __f_6());
+__v_5 = {};
+__v_5.__f_4 = __f_4;
+} catch(e) { print("Caught: " + e); }
+function __f_7(o) {
+  return o.__f_4();
+}
+try {
+for (var __v_7 = 0; __v_7 < 5; __v_7++) __f_7(__v_5);
+%OptimizeFunctionOnNextCall(__f_7);
+__f_7(__v_5);
+assertEquals(42, __f_7(__v_5));
+assertEquals(87, __f_7({__f_4: function() { return 87; }}));
+} catch(e) { print("Caught: " + e); }
+function __f_8(x,y) {
+  x = 42;
+  y = 1;
+  y = y << "0";
+  return x | y;
+}
+try {
+assertEquals(43, __f_8(0,0));
+} catch(e) { print("Caught: " + e); }
+function __f_2(x) {
+  return 'lit[' + (x + ']');
+}
+try {
+assertEquals('lit[-87]', __f_2(-87));
+assertEquals('lit[0]', __f_2(0));
+assertEquals('lit[42]', __f_2(42));
+__v_9 = "abc";
+gc();
+var __v_8;
+} catch(e) { print("Caught: " + e); }
+function __f_9(n) { return __v_9.charAt(n); }
+try {
+for (var __v_7 = 0; __v_7 < 5; __v_7++) {
+  __v_8 = __f_9(0);
+}
+%OptimizeFunctionOnNextCall(__f_9);
+__v_8 = __f_9(0);
+} catch(e) { print("Caught: " + e); }
+function __f_3(__v_2,__v_4,__v_3,__v_6) {
+  return __v_2+__v_4+__v_3+__v_6;
+}
+try {
+assertEquals(0x40000003, __f_3(1,1,2,0x3fffffff));
+} catch(e) { print("Caught: " + e); }
+try {
+__v_19 = {
+  fast_smi_only             :  'fast smi only elements',
+  fast                      :  'fast elements',
+  fast_double               :  'fast double elements',
+  dictionary                :  'dictionary elements',
+  external_int32            :  'external int8 elements',
+  external_uint8            :  'external uint8 elements',
+  external_int16            :  'external int16 elements',
+  external_uint16           :  'external uint16 elements',
+  external_int32            :  'external int32 elements',
+  external_uint32           :  'external uint32 elements',
+  external_float32          :  'external float32 elements',
+  external_float64          :  'external float64 elements',
+  external_uint8_clamped    :  'external uint8_clamped elements',
+  fixed_int32               :  'fixed int8 elements',
+  fixed_uint8               :  'fixed uint8 elements',
+  fixed_int16               :  'fixed int16 elements',
+  fixed_uint16              :  'fixed uint16 elements',
+  fixed_int32               :  'fixed int32 elements',
+  fixed_uint32              :  'fixed uint32 elements',
+  fixed_float32             :  'fixed float32 elements',
+  fixed_float64             :  'fixed float64 elements',
+  fixed_uint8_clamped       :  'fixed uint8_clamped elements'
+}
+} catch(e) { print("Caught: " + e); }
+function __f_12() {
+}
+__v_10 = {};
+__v_10.dance = 0xD15C0;
+__v_10.drink = 0xC0C0A;
+__f_12(__v_19.fast, __v_10);
+__v_24 = [1,2,3];
+__f_12(__v_19.fast_smi_only, __v_24);
+__v_24.dance = 0xD15C0;
+__v_24.drink = 0xC0C0A;
+__f_12(__v_19.fast_smi_only, __v_24);
+function __f_18() {
+  var __v_27 = new Array();
+  __f_12(__v_19.fast_smi_only, __v_27);
+  for (var __v_18 = 0; __v_18 < 1337; __v_18++) {
+    var __v_16 = __v_18;
+    if (__v_18 == 1336) {
+      __f_12(__v_19.fast_smi_only, __v_27);
+      __v_16 = new Object();
+    }
+    __v_27[__v_18] = __v_16;
+  }
+  __f_12(__v_19.fast, __v_27);
+  var __v_15 = [];
+  __v_15[912570] = 7;
+  __f_12(__v_19.dictionary, __v_15);
+  var __v_26 = new Array(912561);
+  %SetAllocationTimeout(100000000, 10000000);
+  for (var __v_18 = 0; __v_18 < 0x20000; __v_18++) {
+    __v_26[0] = __v_18 / 2;
+  }
+  __f_12(__v_19.fixed_int8,    new Int8Array(007));
+  __f_12(__v_19.fixed_uint8,   new Uint8Array(007));
+  __f_12(__v_19.fixed_int16,   new Int16Array(666));
+  __f_12(__v_19.fixed_uint16,  new Uint16Array(42));
+  __f_12(__v_19.fixed_int32,   new Int32Array(0xF));
+  __f_12(__v_19.fixed_uint32,  new Uint32Array(23));
+  __f_12(__v_19.fixed_float32, new Float32Array(7));
+  __f_12(__v_19.fixed_float64, new Float64Array(0));
+  __f_12(__v_19.fixed_uint8_clamped, new Uint8ClampedArray(512));
+  var __v_13 = new ArrayBuffer(128);
+  __f_12(__v_19.external_int8,    new Int8Array(__v_13));
+  __f_12(__v_37.external_uint8,   new Uint8Array(__v_13));
+  __f_12(__v_19.external_int16,   new Int16Array(__v_13));
+  __f_12(__v_19.external_uint16,  new Uint16Array(__v_13));
+  __f_12(__v_19.external_int32,   new Int32Array(__v_13));
+  __f_12(__v_19.external_uint32,  new Uint32Array(__v_13));
+  __f_12(__v_19.external_float32, new Float32Array(__v_13));
+  __f_12(__v_19.external_float64, new Float64Array(__v_13));
+  __f_12(__v_19.external_uint8_clamped, new Uint8ClampedArray(__v_13));
+}
+try {
+__f_18();
+} catch(e) { }
diff --git a/test/mjsunit/regress/regress-411210.js b/test/mjsunit/regress/regress-411210.js
new file mode 100644
index 0000000..2dbc5ff
--- /dev/null
+++ b/test/mjsunit/regress/regress-411210.js
@@ -0,0 +1,22 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --gc-interval=439 --random-seed=-423594851
+
+var __v_3;
+function __f_2() {
+  var __v_1 = new Array(3);
+  __v_1[0] = 10;
+  __v_1[1] = 15.5;
+  __v_3 = __f_2();
+  __v_1[2] = 20;
+  return __v_1;
+}
+
+try {
+  for (var __v_2 = 0; __v_2 < 3; ++__v_2) {
+    __v_3 = __f_2();
+  }
+}
+catch (e) { }
diff --git a/test/mjsunit/regress/regress-411237.js b/test/mjsunit/regress/regress-411237.js
new file mode 100644
index 0000000..8b75ba3
--- /dev/null
+++ b/test/mjsunit/regress/regress-411237.js
@@ -0,0 +1,15 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --harmony
+
+try {
+  %OptimizeFunctionOnNextCall(print);
+} catch(e) { }
+
+try {
+  function* f() {
+  }
+  %OptimizeFunctionOnNextCall(f);
+} catch(e) { }
diff --git a/test/mjsunit/regress/regress-412162.js b/test/mjsunit/regress/regress-412162.js
new file mode 100644
index 0000000..6a7ad0c
--- /dev/null
+++ b/test/mjsunit/regress/regress-412162.js
@@ -0,0 +1,14 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function test() {
+  Math.abs(-NaN).toString();
+}
+
+test();
+test();
+%OptimizeFunctionOnNextCall(test);
+test();
diff --git a/test/mjsunit/regress/regress-416416.js b/test/mjsunit/regress/regress-416416.js
new file mode 100644
index 0000000..66e882e
--- /dev/null
+++ b/test/mjsunit/regress/regress-416416.js
@@ -0,0 +1,14 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function foo() {
+  try {
+    String.prototype.length.x();
+  } catch (e) {
+  }
+}
+
+foo();
+foo();
+foo();
diff --git a/test/mjsunit/regress/regress-cntl-descriptors-enum.js b/test/mjsunit/regress/regress-cntl-descriptors-enum.js
index ee72faf..fd4ac6d 100644
--- a/test/mjsunit/regress/regress-cntl-descriptors-enum.js
+++ b/test/mjsunit/regress/regress-cntl-descriptors-enum.js
@@ -30,10 +30,10 @@
 DontEnum = 2;
 
 var o = {};
-%SetProperty(o, "a", 0, DontEnum);
+%AddNamedProperty(o, "a", 0, DontEnum);
 
 var o2 = {};
-%SetProperty(o2, "a", 0, DontEnum);
+%AddNamedProperty(o2, "a", 0, DontEnum);
 
 assertTrue(%HaveSameMap(o, o2));
 
diff --git a/test/mjsunit/regress/regress-conditional-position.js b/test/mjsunit/regress/regress-conditional-position.js
index cd8f7bd..ae5a3ac 100644
--- a/test/mjsunit/regress/regress-conditional-position.js
+++ b/test/mjsunit/regress/regress-conditional-position.js
@@ -25,7 +25,7 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --always-full-compiler
+// Flags: --nocrankshaft
 
 var functionToCatch;
 var lineNumber;
diff --git a/test/mjsunit/regress/regress-crbug-107996.js b/test/mjsunit/regress/regress-crbug-107996.js
index dfe07e5..b4907f3 100644
--- a/test/mjsunit/regress/regress-crbug-107996.js
+++ b/test/mjsunit/regress/regress-crbug-107996.js
@@ -26,6 +26,7 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 // Flags: --expose-debug-as debug
+// Flags: --turbo-deoptimization
 
 Debug = debug.Debug;
 
diff --git a/test/mjsunit/regress/regress-crbug-171715.js b/test/mjsunit/regress/regress-crbug-171715.js
index 040c381..309f50a 100644
--- a/test/mjsunit/regress/regress-crbug-171715.js
+++ b/test/mjsunit/regress/regress-crbug-171715.js
@@ -26,6 +26,7 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 // Flags: --expose-debug-as debug
+// Flags: --turbo-deoptimization
 
 Debug = debug.Debug
 
diff --git a/test/mjsunit/regress/regress-crbug-222893.js b/test/mjsunit/regress/regress-crbug-222893.js
index 39363bc..75e1728 100644
--- a/test/mjsunit/regress/regress-crbug-222893.js
+++ b/test/mjsunit/regress/regress-crbug-222893.js
@@ -26,6 +26,7 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 // Flags: --expose-debug-as debug
+// Flags: --turbo-deoptimization
 
 Debug = debug.Debug
 
diff --git a/test/mjsunit/regress/regress-crbug-245480.js b/test/mjsunit/regress/regress-crbug-245480.js
index ec88509..43fa6ba 100644
--- a/test/mjsunit/regress/regress-crbug-245480.js
+++ b/test/mjsunit/regress/regress-crbug-245480.js
@@ -25,25 +25,9 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --allow-natives-syntax --smi-only-arrays --expose-gc
+// Flags: --allow-natives-syntax --expose-gc
 // Flags: --noalways-opt
 
-// Test element kind of objects.
-// Since --smi-only-arrays affects builtins, its default setting at compile
-// time sticks if built with snapshot.  If --smi-only-arrays is deactivated
-// by default, only a no-snapshot build actually has smi-only arrays enabled
-// in this test case.  Depending on whether smi-only arrays are actually
-// enabled, this test takes the appropriate code path to check smi-only arrays.
-
-// support_smi_only_arrays = %HasFastSmiElements(new Array(1,2,3,4,5,6,7,8));
-support_smi_only_arrays = true;
-
-if (support_smi_only_arrays) {
-  print("Tests include smi-only arrays.");
-} else {
-  print("Tests do NOT include smi-only arrays.");
-}
-
 function isHoley(obj) {
   if (%HasFastHoleyElements(obj)) return true;
   return false;
@@ -57,19 +41,17 @@
   assertEquals(false, isHoley(obj), name_opt);
 }
 
-if (support_smi_only_arrays) {
-  function create_array(arg) {
-    return new Array(arg);
-  }
-
-  obj = create_array(0);
-  assertNotHoley(obj);
-  create_array(0);
-  %OptimizeFunctionOnNextCall(create_array);
-  obj = create_array(10);
-  assertHoley(obj);
+function create_array(arg) {
+  return new Array(arg);
 }
 
+obj = create_array(0);
+assertNotHoley(obj);
+create_array(0);
+%OptimizeFunctionOnNextCall(create_array);
+obj = create_array(10);
+assertHoley(obj);
+
 // The code below would assert in debug or crash in release
 function f(length) {
   return new Array(length)
diff --git a/test/mjsunit/regress/regress-crbug-320922.js b/test/mjsunit/regress/regress-crbug-320922.js
index 4a5b581..9ba759a 100644
--- a/test/mjsunit/regress/regress-crbug-320922.js
+++ b/test/mjsunit/regress/regress-crbug-320922.js
@@ -30,7 +30,7 @@
 var string = "hello world";
 var expected = "Hello " + "world";
 function Capitalize() {
-  %_OneByteSeqStringSetChar(string, 0, 0x48);
+  %_OneByteSeqStringSetChar(0, 0x48, string);
 }
 Capitalize();
 assertEquals(expected, string);
@@ -40,7 +40,7 @@
 var twobyte = "\u20ACello world";
 
 function TwoByteCapitalize() {
-  %_TwoByteSeqStringSetChar(twobyte, 0, 0x48);
+  %_TwoByteSeqStringSetChar(0, 0x48, twobyte);
 }
 TwoByteCapitalize();
 assertEquals(expected, twobyte);
diff --git a/test/mjsunit/regress/regress-crbug-350864.js b/test/mjsunit/regress/regress-crbug-350864.js
index 8a793cb..510834b 100644
--- a/test/mjsunit/regress/regress-crbug-350864.js
+++ b/test/mjsunit/regress/regress-crbug-350864.js
@@ -25,8 +25,6 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --harmony-symbols
-
 var v0 = new WeakMap;
 var v1 = {};
 v0.set(v1, 1);
diff --git a/test/mjsunit/regress/regress-crbug-357052.js b/test/mjsunit/regress/regress-crbug-357052.js
index 9cde1b6..7a58396 100644
--- a/test/mjsunit/regress/regress-crbug-357052.js
+++ b/test/mjsunit/regress/regress-crbug-357052.js
@@ -7,5 +7,6 @@
   for (var i = 0; i < 30; i++) {
     str += "abcdefgh12345678" + str;
   }
+  return str;
 }
 assertThrows(f);
diff --git a/test/mjsunit/regress/regress-crbug-387599.js b/test/mjsunit/regress/regress-crbug-387599.js
new file mode 100644
index 0000000..98750aa
--- /dev/null
+++ b/test/mjsunit/regress/regress-crbug-387599.js
@@ -0,0 +1,19 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --expose-debug-as debug
+
+Debug = debug.Debug;
+Debug.setListener(function() {});
+
+function f() {
+  for (var i = 0; i < 100; i++) {
+    %OptimizeFunctionOnNextCall(f, "osr");
+  }
+}
+
+Debug.setBreakPoint(f, 0, 0);
+f();
+f();
+Debug.setListener(null);
diff --git a/test/mjsunit/regress/regress-crbug-387636.js b/test/mjsunit/regress/regress-crbug-387636.js
new file mode 100644
index 0000000..1e50ace
--- /dev/null
+++ b/test/mjsunit/regress/regress-crbug-387636.js
@@ -0,0 +1,14 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function f() {
+  [].indexOf(0x40000000);
+}
+
+f();
+f();
+%OptimizeFunctionOnNextCall(f);
+f();
diff --git a/test/mjsunit/regress/regress-crbug-390918.js b/test/mjsunit/regress/regress-crbug-390918.js
new file mode 100644
index 0000000..4c202b3
--- /dev/null
+++ b/test/mjsunit/regress/regress-crbug-390918.js
@@ -0,0 +1,18 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function f(scale) {
+  var arr = {a: 1.1};
+
+  for (var i = 0; i < 2; i++) {
+    arr[2 * scale] = 0;
+  }
+}
+
+f({});
+f({});
+%OptimizeFunctionOnNextCall(f);
+f(1004);
diff --git a/test/mjsunit/regress/regress-crbug-390925.js b/test/mjsunit/regress/regress-crbug-390925.js
new file mode 100644
index 0000000..24873df
--- /dev/null
+++ b/test/mjsunit/regress/regress-crbug-390925.js
@@ -0,0 +1,9 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var a = new Array();
+Object.freeze(a);
+assertThrows(function() { %LiveEditCheckAndDropActivations(a, true); });
diff --git a/test/mjsunit/regress/regress-crbug-393988.js b/test/mjsunit/regress/regress-crbug-393988.js
new file mode 100644
index 0000000..9543e1e
--- /dev/null
+++ b/test/mjsunit/regress/regress-crbug-393988.js
@@ -0,0 +1,8 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var o = {};
+Error.captureStackTrace(o);
+Object.defineProperty(o, "stack", { value: 1 });
+assertEquals(1, o.stack);
diff --git a/test/mjsunit/regress/regress-crbug-401915.js b/test/mjsunit/regress/regress-crbug-401915.js
new file mode 100644
index 0000000..96dce04
--- /dev/null
+++ b/test/mjsunit/regress/regress-crbug-401915.js
@@ -0,0 +1,20 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --expose-debug-as debug
+
+Debug = debug.Debug;
+Debug.setListener(function() {});
+Debug.setBreakOnException();
+
+try {
+  try {
+    %DebugPushPromise(new Promise(function() {}));
+  } catch (e) {
+  }
+  throw new Error();
+} catch (e) {
+}
+
+Debug.setListener(null);
diff --git a/test/mjsunit/regress/regress-crbug-403409.js b/test/mjsunit/regress/regress-crbug-403409.js
new file mode 100644
index 0000000..ffd100b
--- /dev/null
+++ b/test/mjsunit/regress/regress-crbug-403409.js
@@ -0,0 +1,18 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+Array.prototype[0] = 777;
+var kElements = 10;
+
+var input_array = [];
+for (var i = 1; i < kElements; i++) {
+  input_array[i] = 0.5;
+}
+var output_array = input_array.concat(0.5);
+
+assertEquals(kElements + 1, output_array.length);
+assertEquals(777, output_array[0]);
+for (var j = 1; j < kElements; j++) {
+  assertEquals(0.5, output_array[j]);
+}
diff --git a/test/mjsunit/regress/regress-crbug-405491.js b/test/mjsunit/regress/regress-crbug-405491.js
new file mode 100644
index 0000000..b633781
--- /dev/null
+++ b/test/mjsunit/regress/regress-crbug-405491.js
@@ -0,0 +1,5 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as 1
diff --git a/test/mjsunit/regress/regress-crbug-405517.js b/test/mjsunit/regress/regress-crbug-405517.js
new file mode 100644
index 0000000..36c3f4f
--- /dev/null
+++ b/test/mjsunit/regress/regress-crbug-405517.js
@@ -0,0 +1,16 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --gc-interval=203
+
+function f() {
+ var e = [0];
+ %PreventExtensions(e);
+ for (var i = 0; i < 4; i++) e.shift();
+}
+
+f();
+f();
+%OptimizeFunctionOnNextCall(f);
+f();
diff --git a/test/mjsunit/regress/regress-crbug-405922.js b/test/mjsunit/regress/regress-crbug-405922.js
new file mode 100644
index 0000000..9f76a86
--- /dev/null
+++ b/test/mjsunit/regress/regress-crbug-405922.js
@@ -0,0 +1,27 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --expose-debug-as debug
+
+Debug = debug.Debug
+
+function listener(event, exec_state, event_data, data) {
+  try {
+    if (event == Debug.DebugEvent.Break) {
+      exec_state.prepareStep(Debug.StepAction.StepIn, 3);
+    }
+  } catch (e) {
+  }
+}
+
+Debug.setListener(listener);
+
+function f(x) {
+  if (x > 0) %_CallFunction(null, x-1, f);
+}
+
+debugger;
+f(2);
+
+Debug.setListener(null);
diff --git a/test/mjsunit/regress/regress-crbug-407946.js b/test/mjsunit/regress/regress-crbug-407946.js
new file mode 100644
index 0000000..d5687cc
--- /dev/null
+++ b/test/mjsunit/regress/regress-crbug-407946.js
@@ -0,0 +1,12 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function f(n) { return [0].indexOf((n - n) + 0); }
+
+assertEquals(0, f(.1));
+assertEquals(0, f(.1));
+%OptimizeFunctionOnNextCall(f);
+assertEquals(0, f(.1));
diff --git a/test/mjsunit/regress/regress-crbug-412203.js b/test/mjsunit/regress/regress-crbug-412203.js
new file mode 100644
index 0000000..f150859
--- /dev/null
+++ b/test/mjsunit/regress/regress-crbug-412203.js
@@ -0,0 +1,36 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var b = [];
+b[10000] = 1;
+// Required to reproduce the bug.
+assertTrue(%HasDictionaryElements(b));
+
+var a1 = [1.5];
+b.__proto__ = a1;
+assertEquals(1.5, ([].concat(b))[0]);
+
+var a2 = new Int32Array(2);
+a2[0] = 3;
+b.__proto__ = a2
+assertEquals(3, ([].concat(b))[0]);
+
+function foo(x, y) {
+  var a = [];
+  a[10000] = 1;
+  assertTrue(%HasDictionaryElements(a));
+
+  a.__proto__ = arguments;
+  var c = [].concat(a);
+  for (var i = 0; i < arguments.length; i++) {
+    assertEquals(i + 2, c[i]);
+  }
+  assertEquals(undefined, c[arguments.length]);
+  assertEquals(undefined, c[arguments.length + 1]);
+}
+foo(2);
+foo(2, 3);
+foo(2, 3, 4);
diff --git a/test/mjsunit/regress/regress-crbug-412208.js b/test/mjsunit/regress/regress-crbug-412208.js
new file mode 100644
index 0000000..a194f85
--- /dev/null
+++ b/test/mjsunit/regress/regress-crbug-412208.js
@@ -0,0 +1,16 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var non_const_true = true;
+
+function f() {
+  return non_const_true || (f() = this);
+}
+
+assertTrue(f());
+assertTrue(f());
+%OptimizeFunctionOnNextCall(f);
+assertTrue(f());
diff --git a/test/mjsunit/regress/regress-crbug-412210.js b/test/mjsunit/regress/regress-crbug-412210.js
new file mode 100644
index 0000000..6ec7d62
--- /dev/null
+++ b/test/mjsunit/regress/regress-crbug-412210.js
@@ -0,0 +1,12 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function f(x) {
+  return (x ? "" >> 0 : "") + /a/;
+};
+
+%OptimizeFunctionOnNextCall(f);
+f();
diff --git a/test/mjsunit/regress/regress-crbug-412215.js b/test/mjsunit/regress/regress-crbug-412215.js
new file mode 100644
index 0000000..ad926fc
--- /dev/null
+++ b/test/mjsunit/regress/regress-crbug-412215.js
@@ -0,0 +1,33 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var dummy = {foo: "true"};
+
+var a = {y:0.5};
+a.y = 357;
+var b = a.y;
+
+var d;
+function f(  )  {
+  d = 357;
+  return {foo: b};
+}
+f();
+f();
+%OptimizeFunctionOnNextCall(f);
+var x = f();
+
+// With the bug, x is now an invalid object; the code below
+// triggers a crash.
+
+function g(obj) {
+  return obj.foo.length;
+}
+
+g(dummy);
+g(dummy);
+%OptimizeFunctionOnNextCall(g);
+g(x);
diff --git a/test/mjsunit/regress/regress-crbug-412319.js b/test/mjsunit/regress/regress-crbug-412319.js
new file mode 100644
index 0000000..21386e3
--- /dev/null
+++ b/test/mjsunit/regress/regress-crbug-412319.js
@@ -0,0 +1,19 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function __f_6() {
+ var __v_7 = [0];
+ %PreventExtensions(__v_7);
+ for (var __v_6 = -2; __v_6 < 19; __v_6++) __v_7.shift();
+ __f_7(__v_7);
+}
+__f_6();
+__f_6();
+%OptimizeFunctionOnNextCall(__f_6);
+__f_6();
+function __f_7(__v_7) {
+  __v_7.push(Infinity);
+}
diff --git a/test/mjsunit/regress/regress-crbug-417508.js b/test/mjsunit/regress/regress-crbug-417508.js
new file mode 100644
index 0000000..589fb88
--- /dev/null
+++ b/test/mjsunit/regress/regress-crbug-417508.js
@@ -0,0 +1,29 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function foo(x) {
+  var k = "value";
+  return x[k] = 1;
+}
+var obj = {};
+Object.defineProperty(obj, "value", {set: function(x) { throw "nope"; }});
+try { foo(obj); } catch(e) {}
+try { foo(obj); } catch(e) {}
+%OptimizeFunctionOnNextCall(foo);
+try { foo(obj); } catch(e) {}
+
+function bar(x) {
+  var k = "value";
+  return (x[k] = 1) ? "ok" : "nope";
+}
+var obj2 = {};
+Object.defineProperty(obj2, "value",
+    {set: function(x) { throw "nope"; return true; } });
+
+try { bar(obj2); } catch(e) {}
+try { bar(obj2); } catch(e) {}
+%OptimizeFunctionOnNextCall(bar);
+try { bar(obj2); } catch(e) {}
diff --git a/test/mjsunit/regress/regress-crbug-423687.js b/test/mjsunit/regress/regress-crbug-423687.js
new file mode 100644
index 0000000..6000352
--- /dev/null
+++ b/test/mjsunit/regress/regress-crbug-423687.js
@@ -0,0 +1,10 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var json = '{"a":{"c":2.1,"d":0},"b":{"c":7,"1024":8}}';
+var data = JSON.parse(json);
+
+data.b.c++;
diff --git a/test/mjsunit/regress/regress-debug-deopt-while-recompile.js b/test/mjsunit/regress/regress-debug-deopt-while-recompile.js
index 52c32e9..ce5220a 100644
--- a/test/mjsunit/regress/regress-debug-deopt-while-recompile.js
+++ b/test/mjsunit/regress/regress-debug-deopt-while-recompile.js
@@ -26,6 +26,7 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 // Flags: --expose-debug-as debug --allow-natives-syntax
+// Flags: --turbo-deoptimization
 
 Debug = debug.Debug;
 
diff --git a/test/mjsunit/regress/regress-double-property.js b/test/mjsunit/regress/regress-double-property.js
new file mode 100644
index 0000000..2ddb45b
--- /dev/null
+++ b/test/mjsunit/regress/regress-double-property.js
@@ -0,0 +1,9 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function f(a) {
+  return {0.1: a};
+}
+
+f();
diff --git a/test/mjsunit/regress/regress-force-constant-representation.js b/test/mjsunit/regress/regress-force-constant-representation.js
new file mode 100644
index 0000000..4ec2a6a
--- /dev/null
+++ b/test/mjsunit/regress/regress-force-constant-representation.js
@@ -0,0 +1,18 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// Test push double as tagged.
+var a = [{}];
+function f(a) {
+  a.push(Infinity);
+}
+
+f(a);
+f(a);
+f(a);
+%OptimizeFunctionOnNextCall(f);
+f(a);
+assertEquals([{}, Infinity, Infinity, Infinity, Infinity], a);
diff --git a/test/mjsunit/regress/regress-freeze-setter.js b/test/mjsunit/regress/regress-freeze-setter.js
new file mode 100644
index 0000000..c5ac986
--- /dev/null
+++ b/test/mjsunit/regress/regress-freeze-setter.js
@@ -0,0 +1,7 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+Object.defineProperty(this, 'x', {set: function() { }});
+Object.freeze(this);
+eval('"use strict"; x = 20;');
diff --git a/test/mjsunit/regress/regress-function-constructor-receiver.js b/test/mjsunit/regress/regress-function-constructor-receiver.js
new file mode 100644
index 0000000..f345435
--- /dev/null
+++ b/test/mjsunit/regress/regress-function-constructor-receiver.js
@@ -0,0 +1,17 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Return the raw CallSites array.
+Error.prepareStackTrace = function (a,b) { return b; };
+
+var threw = false;
+try {
+  new Function({toString:0,valueOf:0});
+} catch (e) {
+  threw = true;
+  // Ensure that the receiver during "new Function" is the global proxy.
+  assertEquals(this, e.stack[0].getThis());
+}
+
+assertTrue(threw);
diff --git a/test/mjsunit/regress/regress-global-freeze-const.js b/test/mjsunit/regress/regress-global-freeze-const.js
deleted file mode 100644
index 0b9e1f3..0000000
--- a/test/mjsunit/regress/regress-global-freeze-const.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-__defineSetter__('x', function() { });
-Object.freeze(this);
-eval('const x = 1');
diff --git a/test/mjsunit/regress/regress-inline-constant-load.js b/test/mjsunit/regress/regress-inline-constant-load.js
new file mode 100644
index 0000000..303639c
--- /dev/null
+++ b/test/mjsunit/regress/regress-inline-constant-load.js
@@ -0,0 +1,27 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var o1 = {};
+var o2 = {};
+
+function foo(x) {
+  return x.bar;
+}
+
+Object.defineProperty(o1, "bar", {value:200});
+foo(o1);
+foo(o1);
+
+function f(b) {
+  var o = o2;
+  if (b) { return foo(o) }
+}
+
+f(false);
+%OptimizeFunctionOnNextCall(f);
+assertEquals(undefined, f(false));
+Object.defineProperty(o2, "bar", {value: 100});
+assertEquals(100, f(true));
diff --git a/test/mjsunit/regress/regress-json-parse-index.js b/test/mjsunit/regress/regress-json-parse-index.js
new file mode 100644
index 0000000..d1a785a
--- /dev/null
+++ b/test/mjsunit/regress/regress-json-parse-index.js
@@ -0,0 +1,6 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var o = JSON.parse('{"\\u0030":100}');
+assertEquals(100, o[0]);
diff --git a/test/mjsunit/regress/regress-mask-array-length.js b/test/mjsunit/regress/regress-mask-array-length.js
new file mode 100644
index 0000000..bd87e7c
--- /dev/null
+++ b/test/mjsunit/regress/regress-mask-array-length.js
@@ -0,0 +1,10 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var a = [];
+var o = {
+  __proto__: a
+};
+Object.preventExtensions(o);
+o.length = 'abc';
diff --git a/test/mjsunit/regress/regress-opt-after-debug-deopt.js b/test/mjsunit/regress/regress-opt-after-debug-deopt.js
index c637be5..5cbaabc 100644
--- a/test/mjsunit/regress/regress-opt-after-debug-deopt.js
+++ b/test/mjsunit/regress/regress-opt-after-debug-deopt.js
@@ -27,6 +27,7 @@
 
 // Flags: --expose-debug-as debug --allow-natives-syntax
 // Flags: --concurrent-recompilation --block-concurrent-recompilation
+// Flags: --turbo-deoptimization
 
 if (!%IsConcurrentRecompilationSupported()) {
   print("Concurrent recompilation is disabled. Skipping this test.");
diff --git a/test/mjsunit/regress/regress-regexp-nocase.js b/test/mjsunit/regress/regress-regexp-nocase.js
new file mode 100644
index 0000000..27637da
--- /dev/null
+++ b/test/mjsunit/regress/regress-regexp-nocase.js
@@ -0,0 +1,30 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+var s = "('')x\nx\uF670";
+
+assertEquals(s.match(/\((').*\1\)/i), ["('')", "'"]);
diff --git a/test/mjsunit/regress/regress-reset-dictionary-elements.js b/test/mjsunit/regress/regress-reset-dictionary-elements.js
new file mode 100644
index 0000000..d3d093e
--- /dev/null
+++ b/test/mjsunit/regress/regress-reset-dictionary-elements.js
@@ -0,0 +1,14 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var a = [];
+a[10000] = 1;
+a.length = 0;
+a[1] = 1;
+a.length = 0;
+assertEquals(undefined, a[1]);
+
+var o = {};
+Object.freeze(o);
+assertEquals(undefined, o[1]);
diff --git a/test/mjsunit/regress/regress-seqstrsetchar-ex1.js b/test/mjsunit/regress/regress-seqstrsetchar-ex1.js
index c85cf56..1fd8d81 100644
--- a/test/mjsunit/regress/regress-seqstrsetchar-ex1.js
+++ b/test/mjsunit/regress/regress-seqstrsetchar-ex1.js
@@ -47,7 +47,7 @@
   var two_byte = %NewString(n - i, false);
   for (var j = 0; i < n; i++, j++) {
     var code = %_Arguments(i);
-    %_TwoByteSeqStringSetChar(two_byte, j, code);
+    %_TwoByteSeqStringSetChar(j, code, two_byte);
   }
   return one_byte + two_byte;
 }
diff --git a/test/mjsunit/regress/regress-seqstrsetchar-ex3.js b/test/mjsunit/regress/regress-seqstrsetchar-ex3.js
index 43d2b08..0a6b211 100644
--- a/test/mjsunit/regress/regress-seqstrsetchar-ex3.js
+++ b/test/mjsunit/regress/regress-seqstrsetchar-ex3.js
@@ -30,8 +30,8 @@
 function test() {
   var string = %NewString(10, true);
   for (var i = 0; i < 10; i++) {
-    %_OneByteSeqStringSetChar(string, i, 65);
-    %_OneByteSeqStringSetChar(string, i, 66);
+    %_OneByteSeqStringSetChar(i, 65, string);
+    %_OneByteSeqStringSetChar(i, 66, string);
   }
   for (var i = 0; i < 10; i++) {
     assertEquals("B", string[i]);
diff --git a/test/mjsunit/regress/regress-sliced-external-cons-regexp.js b/test/mjsunit/regress/regress-sliced-external-cons-regexp.js
new file mode 100644
index 0000000..145c831
--- /dev/null
+++ b/test/mjsunit/regress/regress-sliced-external-cons-regexp.js
@@ -0,0 +1,21 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-externalize-string --expose-gc
+
+var re = /(B)/;
+var cons1 = "0123456789" + "ABCDEFGHIJ";
+var cons2 = "0123456789\u1234" + "ABCDEFGHIJ";
+gc();
+gc();  // Promote cons.
+
+try { externalizeString(cons1, false); } catch (e) { }
+try { externalizeString(cons2, true); } catch (e) { }
+
+var slice1 = cons1.slice(1,-1);
+var slice2 = cons2.slice(1,-1);
+for (var i = 0; i < 10; i++) {
+  assertEquals(["B", "B"], re.exec(slice1));
+  assertEquals(["B", "B"], re.exec(slice2));
+}
diff --git a/test/mjsunit/regress/regress-update-field-type-attributes.js b/test/mjsunit/regress/regress-update-field-type-attributes.js
new file mode 100644
index 0000000..c23d062
--- /dev/null
+++ b/test/mjsunit/regress/regress-update-field-type-attributes.js
@@ -0,0 +1,12 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function test(){
+  function InnerClass(){}
+  var container = {field: new InnerClass()};
+  return Object.freeze(container);
+};
+
+assertTrue(Object.isFrozen(test()));
+assertTrue(Object.isFrozen(test()));
diff --git a/test/mjsunit/regress/string-compare-memcmp.js b/test/mjsunit/regress/string-compare-memcmp.js
new file mode 100644
index 0000000..45f4734
--- /dev/null
+++ b/test/mjsunit/regress/string-compare-memcmp.js
@@ -0,0 +1,7 @@
+// Copyright 2012 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+assertEquals(-1, %StringCompare("abc\u0102", "abc\u0201"));
diff --git a/test/mjsunit/regress/string-set-char-deopt.js b/test/mjsunit/regress/string-set-char-deopt.js
index 9f6d434..c8e8538 100644
--- a/test/mjsunit/regress/string-set-char-deopt.js
+++ b/test/mjsunit/regress/string-set-char-deopt.js
@@ -25,7 +25,7 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --allow-natives-syntax
+// Flags: --allow-natives-syntax --turbo-deoptimization
 
 (function OneByteSeqStringSetCharDeoptOsr() {
   function deopt() {
@@ -34,7 +34,7 @@
 
   function f(string, osr) {
     var world = " world";
-    %_OneByteSeqStringSetChar(string, 0, (deopt(), 0x48));
+    %_OneByteSeqStringSetChar(0, (deopt(), 0x48), string);
 
     if (osr) while (%GetOptimizationStatus(f) == 2) {}
 
@@ -56,7 +56,7 @@
   }
 
   function f(string) {
-    g(%_OneByteSeqStringSetChar(string, 0, (deopt(), 0x48)));
+    g(%_OneByteSeqStringSetChar(0, (deopt(), 0x48), string));
     return string;
   }
 
@@ -75,7 +75,7 @@
   }
 
   function f(string) {
-    g(%_TwoByteSeqStringSetChar(string, 0, (deopt(), 0x48)));
+    g(%_TwoByteSeqStringSetChar(0, (deopt(), 0x48), string));
     return string;
   }
 
diff --git a/test/mjsunit/runtime-gen/apply.js b/test/mjsunit/runtime-gen/apply.js
deleted file mode 100644
index 90e9c71..0000000
--- a/test/mjsunit/runtime-gen/apply.js
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var arg0 = function() {};
-var _receiver = new Object();
-var _arguments = new Object();
-var _offset = 1;
-var _argc = 1;
-%Apply(arg0, _receiver, _arguments, _offset, _argc);
diff --git a/test/mjsunit/runtime-gen/arraybuffergetbytelength.js b/test/mjsunit/runtime-gen/arraybuffergetbytelength.js
deleted file mode 100644
index b363791..0000000
--- a/test/mjsunit/runtime-gen/arraybuffergetbytelength.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _holder = new ArrayBuffer(8);
-%ArrayBufferGetByteLength(_holder);
diff --git a/test/mjsunit/runtime-gen/arraybufferinitialize.js b/test/mjsunit/runtime-gen/arraybufferinitialize.js
deleted file mode 100644
index 9ae5d9f..0000000
--- a/test/mjsunit/runtime-gen/arraybufferinitialize.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _holder = new ArrayBuffer(8);
-var _byteLength = 1.5;
-%ArrayBufferInitialize(_holder, _byteLength);
diff --git a/test/mjsunit/runtime-gen/arraybufferisview.js b/test/mjsunit/runtime-gen/arraybufferisview.js
deleted file mode 100644
index def6b3b..0000000
--- a/test/mjsunit/runtime-gen/arraybufferisview.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _object = new Object();
-%ArrayBufferIsView(_object);
diff --git a/test/mjsunit/runtime-gen/arraybufferneuter.js b/test/mjsunit/runtime-gen/arraybufferneuter.js
deleted file mode 100644
index f239edf..0000000
--- a/test/mjsunit/runtime-gen/arraybufferneuter.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _array_buffer = new ArrayBuffer(8);
-%ArrayBufferNeuter(_array_buffer);
diff --git a/test/mjsunit/runtime-gen/arraybuffersliceimpl.js b/test/mjsunit/runtime-gen/arraybuffersliceimpl.js
deleted file mode 100644
index 8ed24d6..0000000
--- a/test/mjsunit/runtime-gen/arraybuffersliceimpl.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _source = new ArrayBuffer(8);
-var _target = new ArrayBuffer(8);
-var arg2 = 0;
-%ArrayBufferSliceImpl(_source, _target, arg2);
diff --git a/test/mjsunit/runtime-gen/arraybufferviewgetbytelength.js b/test/mjsunit/runtime-gen/arraybufferviewgetbytelength.js
deleted file mode 100644
index 243c382..0000000
--- a/test/mjsunit/runtime-gen/arraybufferviewgetbytelength.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _holder = new Int32Array(2);
-%ArrayBufferViewGetByteLength(_holder);
diff --git a/test/mjsunit/runtime-gen/arraybufferviewgetbyteoffset.js b/test/mjsunit/runtime-gen/arraybufferviewgetbyteoffset.js
deleted file mode 100644
index c9d025b..0000000
--- a/test/mjsunit/runtime-gen/arraybufferviewgetbyteoffset.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _holder = new Int32Array(2);
-%ArrayBufferViewGetByteOffset(_holder);
diff --git a/test/mjsunit/runtime-gen/arrayconcat.js b/test/mjsunit/runtime-gen/arrayconcat.js
deleted file mode 100644
index 0b08069..0000000
--- a/test/mjsunit/runtime-gen/arrayconcat.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var arg0 = [1, 'a'];
-%ArrayConcat(arg0);
diff --git a/test/mjsunit/runtime-gen/availablelocalesof.js b/test/mjsunit/runtime-gen/availablelocalesof.js
deleted file mode 100644
index 819acbe..0000000
--- a/test/mjsunit/runtime-gen/availablelocalesof.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _service = "foo";
-%AvailableLocalesOf(_service);
diff --git a/test/mjsunit/runtime-gen/basicjsonstringify.js b/test/mjsunit/runtime-gen/basicjsonstringify.js
deleted file mode 100644
index 0b2f086..0000000
--- a/test/mjsunit/runtime-gen/basicjsonstringify.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _object = new Object();
-%BasicJSONStringify(_object);
diff --git a/test/mjsunit/runtime-gen/boundfunctiongetbindings.js b/test/mjsunit/runtime-gen/boundfunctiongetbindings.js
deleted file mode 100644
index 8c4986f..0000000
--- a/test/mjsunit/runtime-gen/boundfunctiongetbindings.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _callable = new Object();
-%BoundFunctionGetBindings(_callable);
diff --git a/test/mjsunit/runtime-gen/break.js b/test/mjsunit/runtime-gen/break.js
deleted file mode 100644
index f53766a..0000000
--- a/test/mjsunit/runtime-gen/break.js
+++ /dev/null
@@ -1,4 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-%Break();
diff --git a/test/mjsunit/runtime-gen/breakiteratoradopttext.js b/test/mjsunit/runtime-gen/breakiteratoradopttext.js
deleted file mode 100644
index 768b948..0000000
--- a/test/mjsunit/runtime-gen/breakiteratoradopttext.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var arg0 = %GetImplFromInitializedIntlObject(new Intl.v8BreakIterator());
-var _text = "foo";
-%BreakIteratorAdoptText(arg0, _text);
diff --git a/test/mjsunit/runtime-gen/breakiteratorbreaktype.js b/test/mjsunit/runtime-gen/breakiteratorbreaktype.js
deleted file mode 100644
index 5f21fae..0000000
--- a/test/mjsunit/runtime-gen/breakiteratorbreaktype.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var arg0 = %GetImplFromInitializedIntlObject(new Intl.v8BreakIterator());
-%BreakIteratorBreakType(arg0);
diff --git a/test/mjsunit/runtime-gen/breakiteratorcurrent.js b/test/mjsunit/runtime-gen/breakiteratorcurrent.js
deleted file mode 100644
index e11d43d..0000000
--- a/test/mjsunit/runtime-gen/breakiteratorcurrent.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var arg0 = %GetImplFromInitializedIntlObject(new Intl.v8BreakIterator());
-%BreakIteratorCurrent(arg0);
diff --git a/test/mjsunit/runtime-gen/breakiteratorfirst.js b/test/mjsunit/runtime-gen/breakiteratorfirst.js
deleted file mode 100644
index ab584d5..0000000
--- a/test/mjsunit/runtime-gen/breakiteratorfirst.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var arg0 = %GetImplFromInitializedIntlObject(new Intl.v8BreakIterator());
-%BreakIteratorFirst(arg0);
diff --git a/test/mjsunit/runtime-gen/breakiteratornext.js b/test/mjsunit/runtime-gen/breakiteratornext.js
deleted file mode 100644
index 5401641..0000000
--- a/test/mjsunit/runtime-gen/breakiteratornext.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var arg0 = %GetImplFromInitializedIntlObject(new Intl.v8BreakIterator());
-%BreakIteratorNext(arg0);
diff --git a/test/mjsunit/runtime-gen/canonicalizelanguagetag.js b/test/mjsunit/runtime-gen/canonicalizelanguagetag.js
deleted file mode 100644
index debf352..0000000
--- a/test/mjsunit/runtime-gen/canonicalizelanguagetag.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _locale_id_str = "foo";
-%CanonicalizeLanguageTag(_locale_id_str);
diff --git a/test/mjsunit/runtime-gen/changebreakonexception.js b/test/mjsunit/runtime-gen/changebreakonexception.js
deleted file mode 100644
index e4e3bfa..0000000
--- a/test/mjsunit/runtime-gen/changebreakonexception.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _type_arg = 32;
-var _enable = true;
-%ChangeBreakOnException(_type_arg, _enable);
diff --git a/test/mjsunit/runtime-gen/charfromcode.js b/test/mjsunit/runtime-gen/charfromcode.js
deleted file mode 100644
index 9e01522..0000000
--- a/test/mjsunit/runtime-gen/charfromcode.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _code = 32;
-%CharFromCode(_code);
diff --git a/test/mjsunit/runtime-gen/checkexecutionstate.js b/test/mjsunit/runtime-gen/checkexecutionstate.js
deleted file mode 100644
index 93690fe..0000000
--- a/test/mjsunit/runtime-gen/checkexecutionstate.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _break_id = 32;
-try {
-%CheckExecutionState(_break_id);
-} catch(e) {}
diff --git a/test/mjsunit/runtime-gen/checkisbootstrapping.js b/test/mjsunit/runtime-gen/checkisbootstrapping.js
deleted file mode 100644
index 27a8224..0000000
--- a/test/mjsunit/runtime-gen/checkisbootstrapping.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-try {
-%CheckIsBootstrapping();
-} catch(e) {}
diff --git a/test/mjsunit/runtime-gen/classof.js b/test/mjsunit/runtime-gen/classof.js
deleted file mode 100644
index 59fdde8..0000000
--- a/test/mjsunit/runtime-gen/classof.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _obj = new Object();
-%_ClassOf(_obj);
diff --git a/test/mjsunit/runtime-gen/clearbreakpoint.js b/test/mjsunit/runtime-gen/clearbreakpoint.js
deleted file mode 100644
index 20ecded..0000000
--- a/test/mjsunit/runtime-gen/clearbreakpoint.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _break_point_object_arg = new Object();
-%ClearBreakPoint(_break_point_object_arg);
diff --git a/test/mjsunit/runtime-gen/clearfunctiontypefeedback.js b/test/mjsunit/runtime-gen/clearfunctiontypefeedback.js
deleted file mode 100644
index e613228..0000000
--- a/test/mjsunit/runtime-gen/clearfunctiontypefeedback.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _function = function() {};
-%ClearFunctionTypeFeedback(_function);
diff --git a/test/mjsunit/runtime-gen/clearstepping.js b/test/mjsunit/runtime-gen/clearstepping.js
deleted file mode 100644
index 8461e31..0000000
--- a/test/mjsunit/runtime-gen/clearstepping.js
+++ /dev/null
@@ -1,4 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-%ClearStepping();
diff --git a/test/mjsunit/runtime-gen/collectstacktrace.js b/test/mjsunit/runtime-gen/collectstacktrace.js
deleted file mode 100644
index 0272863..0000000
--- a/test/mjsunit/runtime-gen/collectstacktrace.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _error_object = new Object();
-var _caller = new Object();
-var _limit = 32;
-%CollectStackTrace(_error_object, _caller, _limit);
diff --git a/test/mjsunit/runtime-gen/compilestring.js b/test/mjsunit/runtime-gen/compilestring.js
deleted file mode 100644
index cc3e10c..0000000
--- a/test/mjsunit/runtime-gen/compilestring.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _source = "foo";
-var arg1 = false;
-%CompileString(_source, arg1);
diff --git a/test/mjsunit/runtime-gen/constructdouble.js b/test/mjsunit/runtime-gen/constructdouble.js
deleted file mode 100644
index 4ed6c6a..0000000
--- a/test/mjsunit/runtime-gen/constructdouble.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _hi = 32;
-var _lo = 32;
-%ConstructDouble(_hi, _lo);
diff --git a/test/mjsunit/runtime-gen/createbreakiterator.js b/test/mjsunit/runtime-gen/createbreakiterator.js
deleted file mode 100644
index 03f4451..0000000
--- a/test/mjsunit/runtime-gen/createbreakiterator.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var arg0 = 'en-US';
-var arg1 = {type: 'string'};
-var _resolved = new Object();
-%CreateBreakIterator(arg0, arg1, _resolved);
diff --git a/test/mjsunit/runtime-gen/createcollator.js b/test/mjsunit/runtime-gen/createcollator.js
deleted file mode 100644
index 2ecebb8..0000000
--- a/test/mjsunit/runtime-gen/createcollator.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _locale = "foo";
-var _options = new Object();
-var _resolved = new Object();
-%CreateCollator(_locale, _options, _resolved);
diff --git a/test/mjsunit/runtime-gen/createglobalprivatesymbol.js b/test/mjsunit/runtime-gen/createglobalprivatesymbol.js
deleted file mode 100644
index c90ac44..0000000
--- a/test/mjsunit/runtime-gen/createglobalprivatesymbol.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _name = "foo";
-%CreateGlobalPrivateSymbol(_name);
diff --git a/test/mjsunit/runtime-gen/createjsfunctionproxy.js b/test/mjsunit/runtime-gen/createjsfunctionproxy.js
deleted file mode 100644
index cdb0725..0000000
--- a/test/mjsunit/runtime-gen/createjsfunctionproxy.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _handler = new Object();
-var arg1 = function() {};
-var _construct_trap = function() {};
-var _prototype = new Object();
-%CreateJSFunctionProxy(_handler, arg1, _construct_trap, _prototype);
diff --git a/test/mjsunit/runtime-gen/createjsproxy.js b/test/mjsunit/runtime-gen/createjsproxy.js
deleted file mode 100644
index 4fb9744..0000000
--- a/test/mjsunit/runtime-gen/createjsproxy.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _handler = new Object();
-var _prototype = new Object();
-%CreateJSProxy(_handler, _prototype);
diff --git a/test/mjsunit/runtime-gen/createprivatesymbol.js b/test/mjsunit/runtime-gen/createprivatesymbol.js
deleted file mode 100644
index 23f4cca..0000000
--- a/test/mjsunit/runtime-gen/createprivatesymbol.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var arg0 = "foo";
-%CreatePrivateSymbol(arg0);
diff --git a/test/mjsunit/runtime-gen/createsymbol.js b/test/mjsunit/runtime-gen/createsymbol.js
deleted file mode 100644
index 0a02aa9..0000000
--- a/test/mjsunit/runtime-gen/createsymbol.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var arg0 = "foo";
-%CreateSymbol(arg0);
diff --git a/test/mjsunit/runtime-gen/dataviewgetbuffer.js b/test/mjsunit/runtime-gen/dataviewgetbuffer.js
deleted file mode 100644
index b4dc225..0000000
--- a/test/mjsunit/runtime-gen/dataviewgetbuffer.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _holder = new DataView(new ArrayBuffer(24));
-%DataViewGetBuffer(_holder);
diff --git a/test/mjsunit/runtime-gen/dataviewgetfloat32.js b/test/mjsunit/runtime-gen/dataviewgetfloat32.js
deleted file mode 100644
index 3d377a2..0000000
--- a/test/mjsunit/runtime-gen/dataviewgetfloat32.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _holder = new DataView(new ArrayBuffer(24));
-var _offset = 1.5;
-var _is_little_endian = true;
-%DataViewGetFloat32(_holder, _offset, _is_little_endian);
diff --git a/test/mjsunit/runtime-gen/dataviewgetfloat64.js b/test/mjsunit/runtime-gen/dataviewgetfloat64.js
deleted file mode 100644
index 82fc220..0000000
--- a/test/mjsunit/runtime-gen/dataviewgetfloat64.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _holder = new DataView(new ArrayBuffer(24));
-var _offset = 1.5;
-var _is_little_endian = true;
-%DataViewGetFloat64(_holder, _offset, _is_little_endian);
diff --git a/test/mjsunit/runtime-gen/dataviewgetint16.js b/test/mjsunit/runtime-gen/dataviewgetint16.js
deleted file mode 100644
index e418ed2..0000000
--- a/test/mjsunit/runtime-gen/dataviewgetint16.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _holder = new DataView(new ArrayBuffer(24));
-var _offset = 1.5;
-var _is_little_endian = true;
-%DataViewGetInt16(_holder, _offset, _is_little_endian);
diff --git a/test/mjsunit/runtime-gen/dataviewgetint32.js b/test/mjsunit/runtime-gen/dataviewgetint32.js
deleted file mode 100644
index 787101d..0000000
--- a/test/mjsunit/runtime-gen/dataviewgetint32.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _holder = new DataView(new ArrayBuffer(24));
-var _offset = 1.5;
-var _is_little_endian = true;
-%DataViewGetInt32(_holder, _offset, _is_little_endian);
diff --git a/test/mjsunit/runtime-gen/dataviewgetint8.js b/test/mjsunit/runtime-gen/dataviewgetint8.js
deleted file mode 100644
index d3a3864..0000000
--- a/test/mjsunit/runtime-gen/dataviewgetint8.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _holder = new DataView(new ArrayBuffer(24));
-var _offset = 1.5;
-var _is_little_endian = true;
-%DataViewGetInt8(_holder, _offset, _is_little_endian);
diff --git a/test/mjsunit/runtime-gen/dataviewgetuint16.js b/test/mjsunit/runtime-gen/dataviewgetuint16.js
deleted file mode 100644
index 0437811..0000000
--- a/test/mjsunit/runtime-gen/dataviewgetuint16.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _holder = new DataView(new ArrayBuffer(24));
-var _offset = 1.5;
-var _is_little_endian = true;
-%DataViewGetUint16(_holder, _offset, _is_little_endian);
diff --git a/test/mjsunit/runtime-gen/dataviewgetuint32.js b/test/mjsunit/runtime-gen/dataviewgetuint32.js
deleted file mode 100644
index af5122d..0000000
--- a/test/mjsunit/runtime-gen/dataviewgetuint32.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _holder = new DataView(new ArrayBuffer(24));
-var _offset = 1.5;
-var _is_little_endian = true;
-%DataViewGetUint32(_holder, _offset, _is_little_endian);
diff --git a/test/mjsunit/runtime-gen/dataviewgetuint8.js b/test/mjsunit/runtime-gen/dataviewgetuint8.js
deleted file mode 100644
index 77e2c2d..0000000
--- a/test/mjsunit/runtime-gen/dataviewgetuint8.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _holder = new DataView(new ArrayBuffer(24));
-var _offset = 1.5;
-var _is_little_endian = true;
-%DataViewGetUint8(_holder, _offset, _is_little_endian);
diff --git a/test/mjsunit/runtime-gen/dataviewinitialize.js b/test/mjsunit/runtime-gen/dataviewinitialize.js
deleted file mode 100644
index 0836651..0000000
--- a/test/mjsunit/runtime-gen/dataviewinitialize.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _holder = new DataView(new ArrayBuffer(24));
-var _buffer = new ArrayBuffer(8);
-var _byte_offset = 1.5;
-var _byte_length = 1.5;
-%DataViewInitialize(_holder, _buffer, _byte_offset, _byte_length);
diff --git a/test/mjsunit/runtime-gen/dataviewsetfloat32.js b/test/mjsunit/runtime-gen/dataviewsetfloat32.js
deleted file mode 100644
index 009bbcc..0000000
--- a/test/mjsunit/runtime-gen/dataviewsetfloat32.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _holder = new DataView(new ArrayBuffer(24));
-var _offset = 1.5;
-var _value = 1.5;
-var _is_little_endian = true;
-%DataViewSetFloat32(_holder, _offset, _value, _is_little_endian);
diff --git a/test/mjsunit/runtime-gen/dataviewsetfloat64.js b/test/mjsunit/runtime-gen/dataviewsetfloat64.js
deleted file mode 100644
index 97c5d3e..0000000
--- a/test/mjsunit/runtime-gen/dataviewsetfloat64.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _holder = new DataView(new ArrayBuffer(24));
-var _offset = 1.5;
-var _value = 1.5;
-var _is_little_endian = true;
-%DataViewSetFloat64(_holder, _offset, _value, _is_little_endian);
diff --git a/test/mjsunit/runtime-gen/dataviewsetint16.js b/test/mjsunit/runtime-gen/dataviewsetint16.js
deleted file mode 100644
index 27b608b..0000000
--- a/test/mjsunit/runtime-gen/dataviewsetint16.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _holder = new DataView(new ArrayBuffer(24));
-var _offset = 1.5;
-var _value = 1.5;
-var _is_little_endian = true;
-%DataViewSetInt16(_holder, _offset, _value, _is_little_endian);
diff --git a/test/mjsunit/runtime-gen/dataviewsetint32.js b/test/mjsunit/runtime-gen/dataviewsetint32.js
deleted file mode 100644
index 2a4164c..0000000
--- a/test/mjsunit/runtime-gen/dataviewsetint32.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _holder = new DataView(new ArrayBuffer(24));
-var _offset = 1.5;
-var _value = 1.5;
-var _is_little_endian = true;
-%DataViewSetInt32(_holder, _offset, _value, _is_little_endian);
diff --git a/test/mjsunit/runtime-gen/dataviewsetint8.js b/test/mjsunit/runtime-gen/dataviewsetint8.js
deleted file mode 100644
index 9990c4b..0000000
--- a/test/mjsunit/runtime-gen/dataviewsetint8.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _holder = new DataView(new ArrayBuffer(24));
-var _offset = 1.5;
-var _value = 1.5;
-var _is_little_endian = true;
-%DataViewSetInt8(_holder, _offset, _value, _is_little_endian);
diff --git a/test/mjsunit/runtime-gen/dataviewsetuint16.js b/test/mjsunit/runtime-gen/dataviewsetuint16.js
deleted file mode 100644
index fc2800c..0000000
--- a/test/mjsunit/runtime-gen/dataviewsetuint16.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _holder = new DataView(new ArrayBuffer(24));
-var _offset = 1.5;
-var _value = 1.5;
-var _is_little_endian = true;
-%DataViewSetUint16(_holder, _offset, _value, _is_little_endian);
diff --git a/test/mjsunit/runtime-gen/dataviewsetuint32.js b/test/mjsunit/runtime-gen/dataviewsetuint32.js
deleted file mode 100644
index 837623f..0000000
--- a/test/mjsunit/runtime-gen/dataviewsetuint32.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _holder = new DataView(new ArrayBuffer(24));
-var _offset = 1.5;
-var _value = 1.5;
-var _is_little_endian = true;
-%DataViewSetUint32(_holder, _offset, _value, _is_little_endian);
diff --git a/test/mjsunit/runtime-gen/dataviewsetuint8.js b/test/mjsunit/runtime-gen/dataviewsetuint8.js
deleted file mode 100644
index d1e7bc1..0000000
--- a/test/mjsunit/runtime-gen/dataviewsetuint8.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _holder = new DataView(new ArrayBuffer(24));
-var _offset = 1.5;
-var _value = 1.5;
-var _is_little_endian = true;
-%DataViewSetUint8(_holder, _offset, _value, _is_little_endian);
diff --git a/test/mjsunit/runtime-gen/datecacheversion.js b/test/mjsunit/runtime-gen/datecacheversion.js
deleted file mode 100644
index d622fa2..0000000
--- a/test/mjsunit/runtime-gen/datecacheversion.js
+++ /dev/null
@@ -1,4 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-%DateCacheVersion();
diff --git a/test/mjsunit/runtime-gen/datecurrenttime.js b/test/mjsunit/runtime-gen/datecurrenttime.js
deleted file mode 100644
index 569eeff..0000000
--- a/test/mjsunit/runtime-gen/datecurrenttime.js
+++ /dev/null
@@ -1,4 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-%DateCurrentTime();
diff --git a/test/mjsunit/runtime-gen/datelocaltimezone.js b/test/mjsunit/runtime-gen/datelocaltimezone.js
deleted file mode 100644
index 16897f3..0000000
--- a/test/mjsunit/runtime-gen/datelocaltimezone.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _x = 1.5;
-%DateLocalTimezone(_x);
diff --git a/test/mjsunit/runtime-gen/datemakeday.js b/test/mjsunit/runtime-gen/datemakeday.js
deleted file mode 100644
index 1be1e1d..0000000
--- a/test/mjsunit/runtime-gen/datemakeday.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _year = 1;
-var _month = 1;
-%DateMakeDay(_year, _month);
diff --git a/test/mjsunit/runtime-gen/dateparsestring.js b/test/mjsunit/runtime-gen/dateparsestring.js
deleted file mode 100644
index e050e58..0000000
--- a/test/mjsunit/runtime-gen/dateparsestring.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _str = "foo";
-var arg1 = new Array(8);
-%DateParseString(_str, arg1);
diff --git a/test/mjsunit/runtime-gen/datesetvalue.js b/test/mjsunit/runtime-gen/datesetvalue.js
deleted file mode 100644
index 773f0af..0000000
--- a/test/mjsunit/runtime-gen/datesetvalue.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _date = new Date();
-var _time = 1.5;
-var _is_utc = 1;
-%DateSetValue(_date, _time, _is_utc);
diff --git a/test/mjsunit/runtime-gen/datetoutc.js b/test/mjsunit/runtime-gen/datetoutc.js
deleted file mode 100644
index cfa70d1..0000000
--- a/test/mjsunit/runtime-gen/datetoutc.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _x = 1.5;
-%DateToUTC(_x);
diff --git a/test/mjsunit/runtime-gen/debugbreak.js b/test/mjsunit/runtime-gen/debugbreak.js
deleted file mode 100644
index be807d9..0000000
--- a/test/mjsunit/runtime-gen/debugbreak.js
+++ /dev/null
@@ -1,4 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-%DebugBreak();
diff --git a/test/mjsunit/runtime-gen/debugcallbacksupportsstepping.js b/test/mjsunit/runtime-gen/debugcallbacksupportsstepping.js
deleted file mode 100644
index 8707381..0000000
--- a/test/mjsunit/runtime-gen/debugcallbacksupportsstepping.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _callback = new Object();
-%DebugCallbackSupportsStepping(_callback);
diff --git a/test/mjsunit/runtime-gen/debugconstructedby.js b/test/mjsunit/runtime-gen/debugconstructedby.js
deleted file mode 100644
index d7fa0f4..0000000
--- a/test/mjsunit/runtime-gen/debugconstructedby.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _constructor = function() {};
-var _max_references = 32;
-%DebugConstructedBy(_constructor, _max_references);
diff --git a/test/mjsunit/runtime-gen/debugdisassembleconstructor.js b/test/mjsunit/runtime-gen/debugdisassembleconstructor.js
deleted file mode 100644
index a2b1a53..0000000
--- a/test/mjsunit/runtime-gen/debugdisassembleconstructor.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _func = function() {};
-%DebugDisassembleConstructor(_func);
diff --git a/test/mjsunit/runtime-gen/debugdisassemblefunction.js b/test/mjsunit/runtime-gen/debugdisassemblefunction.js
deleted file mode 100644
index e49d974..0000000
--- a/test/mjsunit/runtime-gen/debugdisassemblefunction.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _func = function() {};
-%DebugDisassembleFunction(_func);
diff --git a/test/mjsunit/runtime-gen/debugevaluate.js b/test/mjsunit/runtime-gen/debugevaluate.js
deleted file mode 100644
index 18a83b2..0000000
--- a/test/mjsunit/runtime-gen/debugevaluate.js
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _break_id = 32;
-var _wrapped_id = 1;
-var _inlined_jsframe_index = 32;
-var _source = "foo";
-var _disable_break = true;
-var _context_extension = new Object();
-try {
-%DebugEvaluate(_break_id, _wrapped_id, _inlined_jsframe_index, _source, _disable_break, _context_extension);
-} catch(e) {}
diff --git a/test/mjsunit/runtime-gen/debugevaluateglobal.js b/test/mjsunit/runtime-gen/debugevaluateglobal.js
deleted file mode 100644
index 90c41ae..0000000
--- a/test/mjsunit/runtime-gen/debugevaluateglobal.js
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _break_id = 32;
-var _source = "foo";
-var _disable_break = true;
-var _context_extension = new Object();
-try {
-%DebugEvaluateGlobal(_break_id, _source, _disable_break, _context_extension);
-} catch(e) {}
diff --git a/test/mjsunit/runtime-gen/debuggetproperty.js b/test/mjsunit/runtime-gen/debuggetproperty.js
deleted file mode 100644
index b5b5191..0000000
--- a/test/mjsunit/runtime-gen/debuggetproperty.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _obj = new Object();
-var _name = "name";
-%DebugGetProperty(_obj, _name);
diff --git a/test/mjsunit/runtime-gen/debuggetpropertydetails.js b/test/mjsunit/runtime-gen/debuggetpropertydetails.js
deleted file mode 100644
index e317636..0000000
--- a/test/mjsunit/runtime-gen/debuggetpropertydetails.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _obj = new Object();
-var _name = "name";
-%DebugGetPropertyDetails(_obj, _name);
diff --git a/test/mjsunit/runtime-gen/debuggetprototype.js b/test/mjsunit/runtime-gen/debuggetprototype.js
deleted file mode 100644
index f2fc9d5..0000000
--- a/test/mjsunit/runtime-gen/debuggetprototype.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _obj = new Object();
-%DebugGetPrototype(_obj);
diff --git a/test/mjsunit/runtime-gen/debugindexedinterceptorelementvalue.js b/test/mjsunit/runtime-gen/debugindexedinterceptorelementvalue.js
deleted file mode 100644
index 26d9cc4..0000000
--- a/test/mjsunit/runtime-gen/debugindexedinterceptorelementvalue.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _obj = new Object();
-var _index = 32;
-try {
-%DebugIndexedInterceptorElementValue(_obj, _index);
-} catch(e) {}
diff --git a/test/mjsunit/runtime-gen/debugnamedinterceptorpropertyvalue.js b/test/mjsunit/runtime-gen/debugnamedinterceptorpropertyvalue.js
deleted file mode 100644
index 714bd08..0000000
--- a/test/mjsunit/runtime-gen/debugnamedinterceptorpropertyvalue.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _obj = new Object();
-var _name = "name";
-try {
-%DebugNamedInterceptorPropertyValue(_obj, _name);
-} catch(e) {}
diff --git a/test/mjsunit/runtime-gen/debugpreparestepinifstepping.js b/test/mjsunit/runtime-gen/debugpreparestepinifstepping.js
deleted file mode 100644
index 48af82c..0000000
--- a/test/mjsunit/runtime-gen/debugpreparestepinifstepping.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _callback = function() {};
-%DebugPrepareStepInIfStepping(_callback);
diff --git a/test/mjsunit/runtime-gen/debugprintscopes.js b/test/mjsunit/runtime-gen/debugprintscopes.js
deleted file mode 100644
index 8ecff6a..0000000
--- a/test/mjsunit/runtime-gen/debugprintscopes.js
+++ /dev/null
@@ -1,4 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-%DebugPrintScopes();
diff --git a/test/mjsunit/runtime-gen/debugpromisehandleepilogue.js b/test/mjsunit/runtime-gen/debugpromisehandleepilogue.js
deleted file mode 100644
index 998c800..0000000
--- a/test/mjsunit/runtime-gen/debugpromisehandleepilogue.js
+++ /dev/null
@@ -1,4 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-%DebugPromiseHandleEpilogue();
diff --git a/test/mjsunit/runtime-gen/debugpromisehandleprologue.js b/test/mjsunit/runtime-gen/debugpromisehandleprologue.js
deleted file mode 100644
index f9c14ea..0000000
--- a/test/mjsunit/runtime-gen/debugpromisehandleprologue.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _promise_getter = function() {};
-%DebugPromiseHandlePrologue(_promise_getter);
diff --git a/test/mjsunit/runtime-gen/debugpropertyattributesfromdetails.js b/test/mjsunit/runtime-gen/debugpropertyattributesfromdetails.js
deleted file mode 100644
index 34ef24a..0000000
--- a/test/mjsunit/runtime-gen/debugpropertyattributesfromdetails.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _details = 513;
-%DebugPropertyAttributesFromDetails(_details);
diff --git a/test/mjsunit/runtime-gen/debugpropertyindexfromdetails.js b/test/mjsunit/runtime-gen/debugpropertyindexfromdetails.js
deleted file mode 100644
index 215870a..0000000
--- a/test/mjsunit/runtime-gen/debugpropertyindexfromdetails.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _details = 513;
-%DebugPropertyIndexFromDetails(_details);
diff --git a/test/mjsunit/runtime-gen/debugpropertytypefromdetails.js b/test/mjsunit/runtime-gen/debugpropertytypefromdetails.js
deleted file mode 100644
index 115982e..0000000
--- a/test/mjsunit/runtime-gen/debugpropertytypefromdetails.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _details = 513;
-%DebugPropertyTypeFromDetails(_details);
diff --git a/test/mjsunit/runtime-gen/debugreferencedby.js b/test/mjsunit/runtime-gen/debugreferencedby.js
deleted file mode 100644
index 214e6d4..0000000
--- a/test/mjsunit/runtime-gen/debugreferencedby.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _target = new Object();
-var _instance_filter = new Object();
-var _max_references = 32;
-%DebugReferencedBy(_target, _instance_filter, _max_references);
diff --git a/test/mjsunit/runtime-gen/debugtrace.js b/test/mjsunit/runtime-gen/debugtrace.js
deleted file mode 100644
index 4200333..0000000
--- a/test/mjsunit/runtime-gen/debugtrace.js
+++ /dev/null
@@ -1,4 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-%DebugTrace();
diff --git a/test/mjsunit/runtime-gen/defineorredefineaccessorproperty.js b/test/mjsunit/runtime-gen/defineorredefineaccessorproperty.js
deleted file mode 100644
index 5eb6f24..0000000
--- a/test/mjsunit/runtime-gen/defineorredefineaccessorproperty.js
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _obj = new Object();
-var _name = "name";
-var arg2 = function() {};
-var arg3 = function() {};
-var arg4 = 2;
-%DefineOrRedefineAccessorProperty(_obj, _name, arg2, arg3, arg4);
diff --git a/test/mjsunit/runtime-gen/defineorredefinedataproperty.js b/test/mjsunit/runtime-gen/defineorredefinedataproperty.js
deleted file mode 100644
index 64f8b98..0000000
--- a/test/mjsunit/runtime-gen/defineorredefinedataproperty.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _js_object = new Object();
-var _name = "name";
-var _obj_value = new Object();
-var _unchecked = 1;
-%DefineOrRedefineDataProperty(_js_object, _name, _obj_value, _unchecked);
diff --git a/test/mjsunit/runtime-gen/deleteproperty.js b/test/mjsunit/runtime-gen/deleteproperty.js
deleted file mode 100644
index 2f17c34..0000000
--- a/test/mjsunit/runtime-gen/deleteproperty.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _object = new Object();
-var _key = "name";
-var _strict_mode = 1;
-%DeleteProperty(_object, _key, _strict_mode);
diff --git a/test/mjsunit/runtime-gen/deoptimizefunction.js b/test/mjsunit/runtime-gen/deoptimizefunction.js
deleted file mode 100644
index 64fddac..0000000
--- a/test/mjsunit/runtime-gen/deoptimizefunction.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _function = function() {};
-%DeoptimizeFunction(_function);
diff --git a/test/mjsunit/runtime-gen/doublehi.js b/test/mjsunit/runtime-gen/doublehi.js
deleted file mode 100644
index 9212f6d..0000000
--- a/test/mjsunit/runtime-gen/doublehi.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _x = 1.5;
-%DoubleHi(_x);
diff --git a/test/mjsunit/runtime-gen/doublelo.js b/test/mjsunit/runtime-gen/doublelo.js
deleted file mode 100644
index cbc0b8e..0000000
--- a/test/mjsunit/runtime-gen/doublelo.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _x = 1.5;
-%DoubleLo(_x);
diff --git a/test/mjsunit/runtime-gen/enqueuemicrotask.js b/test/mjsunit/runtime-gen/enqueuemicrotask.js
deleted file mode 100644
index 94d7495..0000000
--- a/test/mjsunit/runtime-gen/enqueuemicrotask.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _microtask = function() {};
-%EnqueueMicrotask(_microtask);
diff --git a/test/mjsunit/runtime-gen/estimatenumberofelements.js b/test/mjsunit/runtime-gen/estimatenumberofelements.js
deleted file mode 100644
index dd1f397..0000000
--- a/test/mjsunit/runtime-gen/estimatenumberofelements.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _object = new Array();
-%EstimateNumberOfElements(_object);
diff --git a/test/mjsunit/runtime-gen/executeindebugcontext.js b/test/mjsunit/runtime-gen/executeindebugcontext.js
deleted file mode 100644
index 8d5080a..0000000
--- a/test/mjsunit/runtime-gen/executeindebugcontext.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _function = function() {};
-var _without_debugger = true;
-%ExecuteInDebugContext(_function, _without_debugger);
diff --git a/test/mjsunit/runtime-gen/finisharrayprototypesetup.js b/test/mjsunit/runtime-gen/finisharrayprototypesetup.js
deleted file mode 100644
index 6ced997..0000000
--- a/test/mjsunit/runtime-gen/finisharrayprototypesetup.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _prototype = new Array();
-%FinishArrayPrototypeSetup(_prototype);
diff --git a/test/mjsunit/runtime-gen/fix.js b/test/mjsunit/runtime-gen/fix.js
deleted file mode 100644
index e0ec1c6..0000000
--- a/test/mjsunit/runtime-gen/fix.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _proxy = Proxy.create({});
-%Fix(_proxy);
diff --git a/test/mjsunit/runtime-gen/flattenstring.js b/test/mjsunit/runtime-gen/flattenstring.js
deleted file mode 100644
index 5fd5e17..0000000
--- a/test/mjsunit/runtime-gen/flattenstring.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _str = "foo";
-%FlattenString(_str);
diff --git a/test/mjsunit/runtime-gen/functionbindarguments.js b/test/mjsunit/runtime-gen/functionbindarguments.js
deleted file mode 100644
index cc842fa..0000000
--- a/test/mjsunit/runtime-gen/functionbindarguments.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _bound_function = function() {};
-var _bindee = new Object();
-var arg2 = undefined;
-var _new_length = 1.5;
-%FunctionBindArguments(_bound_function, _bindee, arg2, _new_length);
diff --git a/test/mjsunit/runtime-gen/functiongetinferredname.js b/test/mjsunit/runtime-gen/functiongetinferredname.js
deleted file mode 100644
index 4db674c..0000000
--- a/test/mjsunit/runtime-gen/functiongetinferredname.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _f = function() {};
-%FunctionGetInferredName(_f);
diff --git a/test/mjsunit/runtime-gen/functiongetname.js b/test/mjsunit/runtime-gen/functiongetname.js
deleted file mode 100644
index eae0d18..0000000
--- a/test/mjsunit/runtime-gen/functiongetname.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _f = function() {};
-%FunctionGetName(_f);
diff --git a/test/mjsunit/runtime-gen/functiongetscript.js b/test/mjsunit/runtime-gen/functiongetscript.js
deleted file mode 100644
index c641990..0000000
--- a/test/mjsunit/runtime-gen/functiongetscript.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _fun = function() {};
-%FunctionGetScript(_fun);
diff --git a/test/mjsunit/runtime-gen/functiongetscriptsourceposition.js b/test/mjsunit/runtime-gen/functiongetscriptsourceposition.js
deleted file mode 100644
index 996b633..0000000
--- a/test/mjsunit/runtime-gen/functiongetscriptsourceposition.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _fun = function() {};
-%FunctionGetScriptSourcePosition(_fun);
diff --git a/test/mjsunit/runtime-gen/functiongetsourcecode.js b/test/mjsunit/runtime-gen/functiongetsourcecode.js
deleted file mode 100644
index 8f1195c..0000000
--- a/test/mjsunit/runtime-gen/functiongetsourcecode.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _f = function() {};
-%FunctionGetSourceCode(_f);
diff --git a/test/mjsunit/runtime-gen/functionisapifunction.js b/test/mjsunit/runtime-gen/functionisapifunction.js
deleted file mode 100644
index bd00a33..0000000
--- a/test/mjsunit/runtime-gen/functionisapifunction.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _f = function() {};
-%FunctionIsAPIFunction(_f);
diff --git a/test/mjsunit/runtime-gen/functionisbuiltin.js b/test/mjsunit/runtime-gen/functionisbuiltin.js
deleted file mode 100644
index be4f734..0000000
--- a/test/mjsunit/runtime-gen/functionisbuiltin.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _f = function() {};
-%FunctionIsBuiltin(_f);
diff --git a/test/mjsunit/runtime-gen/functionisgenerator.js b/test/mjsunit/runtime-gen/functionisgenerator.js
deleted file mode 100644
index 7e9f2f8..0000000
--- a/test/mjsunit/runtime-gen/functionisgenerator.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _f = function() {};
-%FunctionIsGenerator(_f);
diff --git a/test/mjsunit/runtime-gen/functionmarknameshouldprintasanonymous.js b/test/mjsunit/runtime-gen/functionmarknameshouldprintasanonymous.js
deleted file mode 100644
index 244420b..0000000
--- a/test/mjsunit/runtime-gen/functionmarknameshouldprintasanonymous.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _f = function() {};
-%FunctionMarkNameShouldPrintAsAnonymous(_f);
diff --git a/test/mjsunit/runtime-gen/functionnameshouldprintasanonymous.js b/test/mjsunit/runtime-gen/functionnameshouldprintasanonymous.js
deleted file mode 100644
index 4d4941d..0000000
--- a/test/mjsunit/runtime-gen/functionnameshouldprintasanonymous.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _f = function() {};
-%FunctionNameShouldPrintAsAnonymous(_f);
diff --git a/test/mjsunit/runtime-gen/functionremoveprototype.js b/test/mjsunit/runtime-gen/functionremoveprototype.js
deleted file mode 100644
index e1433a6..0000000
--- a/test/mjsunit/runtime-gen/functionremoveprototype.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _f = function() {};
-%FunctionRemovePrototype(_f);
diff --git a/test/mjsunit/runtime-gen/functionsetinstanceclassname.js b/test/mjsunit/runtime-gen/functionsetinstanceclassname.js
deleted file mode 100644
index 7a44ffd..0000000
--- a/test/mjsunit/runtime-gen/functionsetinstanceclassname.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _fun = function() {};
-var _name = "foo";
-%FunctionSetInstanceClassName(_fun, _name);
diff --git a/test/mjsunit/runtime-gen/functionsetlength.js b/test/mjsunit/runtime-gen/functionsetlength.js
deleted file mode 100644
index 4bb1072..0000000
--- a/test/mjsunit/runtime-gen/functionsetlength.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _fun = function() {};
-var _length = 1;
-%FunctionSetLength(_fun, _length);
diff --git a/test/mjsunit/runtime-gen/functionsetname.js b/test/mjsunit/runtime-gen/functionsetname.js
deleted file mode 100644
index f46ced2..0000000
--- a/test/mjsunit/runtime-gen/functionsetname.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _f = function() {};
-var _name = "foo";
-%FunctionSetName(_f, _name);
diff --git a/test/mjsunit/runtime-gen/functionsetprototype.js b/test/mjsunit/runtime-gen/functionsetprototype.js
deleted file mode 100644
index 5cfe78a..0000000
--- a/test/mjsunit/runtime-gen/functionsetprototype.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _fun = function() {};
-var _value = new Object();
-%FunctionSetPrototype(_fun, _value);
diff --git a/test/mjsunit/runtime-gen/getallscopesdetails.js b/test/mjsunit/runtime-gen/getallscopesdetails.js
deleted file mode 100644
index dc26830..0000000
--- a/test/mjsunit/runtime-gen/getallscopesdetails.js
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _break_id = 32;
-var _wrapped_id = 1;
-var _inlined_jsframe_index = 32;
-var _flag = true;
-try {
-%GetAllScopesDetails(_break_id, _wrapped_id, _inlined_jsframe_index, _flag);
-} catch(e) {}
diff --git a/test/mjsunit/runtime-gen/getandclearoverflowedstacktrace.js b/test/mjsunit/runtime-gen/getandclearoverflowedstacktrace.js
deleted file mode 100644
index 8abf790..0000000
--- a/test/mjsunit/runtime-gen/getandclearoverflowedstacktrace.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _error_object = new Object();
-%GetAndClearOverflowedStackTrace(_error_object);
diff --git a/test/mjsunit/runtime-gen/getargumentsproperty.js b/test/mjsunit/runtime-gen/getargumentsproperty.js
deleted file mode 100644
index 4802277..0000000
--- a/test/mjsunit/runtime-gen/getargumentsproperty.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _raw_key = new Object();
-%GetArgumentsProperty(_raw_key);
diff --git a/test/mjsunit/runtime-gen/getarraykeys.js b/test/mjsunit/runtime-gen/getarraykeys.js
deleted file mode 100644
index bb7e88a..0000000
--- a/test/mjsunit/runtime-gen/getarraykeys.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _array = new Object();
-var _length = 32;
-%GetArrayKeys(_array, _length);
diff --git a/test/mjsunit/runtime-gen/getbreaklocations.js b/test/mjsunit/runtime-gen/getbreaklocations.js
deleted file mode 100644
index be23e4d..0000000
--- a/test/mjsunit/runtime-gen/getbreaklocations.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _fun = function() {};
-var arg1 = 0;
-%GetBreakLocations(_fun, arg1);
diff --git a/test/mjsunit/runtime-gen/getcalltrap.js b/test/mjsunit/runtime-gen/getcalltrap.js
deleted file mode 100644
index 1ec8a7d..0000000
--- a/test/mjsunit/runtime-gen/getcalltrap.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _proxy = Proxy.createFunction({}, function() {});
-%GetCallTrap(_proxy);
diff --git a/test/mjsunit/runtime-gen/getconstructordelegate.js b/test/mjsunit/runtime-gen/getconstructordelegate.js
deleted file mode 100644
index 55bce7c..0000000
--- a/test/mjsunit/runtime-gen/getconstructordelegate.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _object = new Object();
-%GetConstructorDelegate(_object);
diff --git a/test/mjsunit/runtime-gen/getconstructtrap.js b/test/mjsunit/runtime-gen/getconstructtrap.js
deleted file mode 100644
index 2d88957..0000000
--- a/test/mjsunit/runtime-gen/getconstructtrap.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _proxy = Proxy.createFunction({}, function() {});
-%GetConstructTrap(_proxy);
diff --git a/test/mjsunit/runtime-gen/getdataproperty.js b/test/mjsunit/runtime-gen/getdataproperty.js
deleted file mode 100644
index 9d18efd..0000000
--- a/test/mjsunit/runtime-gen/getdataproperty.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _object = new Object();
-var _key = "name";
-%GetDataProperty(_object, _key);
diff --git a/test/mjsunit/runtime-gen/getdefaulticulocale.js b/test/mjsunit/runtime-gen/getdefaulticulocale.js
deleted file mode 100644
index ef971d1..0000000
--- a/test/mjsunit/runtime-gen/getdefaulticulocale.js
+++ /dev/null
@@ -1,4 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-%GetDefaultICULocale();
diff --git a/test/mjsunit/runtime-gen/getdefaultreceiver.js b/test/mjsunit/runtime-gen/getdefaultreceiver.js
deleted file mode 100644
index 313bdce..0000000
--- a/test/mjsunit/runtime-gen/getdefaultreceiver.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var arg0 = function() {};
-%GetDefaultReceiver(arg0);
diff --git a/test/mjsunit/runtime-gen/getframecount.js b/test/mjsunit/runtime-gen/getframecount.js
deleted file mode 100644
index 361da7f..0000000
--- a/test/mjsunit/runtime-gen/getframecount.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _break_id = 32;
-try {
-%GetFrameCount(_break_id);
-} catch(e) {}
diff --git a/test/mjsunit/runtime-gen/getframedetails.js b/test/mjsunit/runtime-gen/getframedetails.js
deleted file mode 100644
index 9f04756..0000000
--- a/test/mjsunit/runtime-gen/getframedetails.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _break_id = 32;
-var _index = 32;
-try {
-%GetFrameDetails(_break_id, _index);
-} catch(e) {}
diff --git a/test/mjsunit/runtime-gen/getfunctioncodepositionfromsource.js b/test/mjsunit/runtime-gen/getfunctioncodepositionfromsource.js
deleted file mode 100644
index 8767e69..0000000
--- a/test/mjsunit/runtime-gen/getfunctioncodepositionfromsource.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _function = function() {};
-var _source_position = 32;
-%GetFunctionCodePositionFromSource(_function, _source_position);
diff --git a/test/mjsunit/runtime-gen/getfunctiondelegate.js b/test/mjsunit/runtime-gen/getfunctiondelegate.js
deleted file mode 100644
index 4fb5002..0000000
--- a/test/mjsunit/runtime-gen/getfunctiondelegate.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _object = new Object();
-%GetFunctionDelegate(_object);
diff --git a/test/mjsunit/runtime-gen/getfunctionscopecount.js b/test/mjsunit/runtime-gen/getfunctionscopecount.js
deleted file mode 100644
index afd5b8a..0000000
--- a/test/mjsunit/runtime-gen/getfunctionscopecount.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _fun = function() {};
-%GetFunctionScopeCount(_fun);
diff --git a/test/mjsunit/runtime-gen/getfunctionscopedetails.js b/test/mjsunit/runtime-gen/getfunctionscopedetails.js
deleted file mode 100644
index f1f5378..0000000
--- a/test/mjsunit/runtime-gen/getfunctionscopedetails.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _fun = function() {};
-var _index = 32;
-%GetFunctionScopeDetails(_fun, _index);
diff --git a/test/mjsunit/runtime-gen/gethandler.js b/test/mjsunit/runtime-gen/gethandler.js
deleted file mode 100644
index 411f608..0000000
--- a/test/mjsunit/runtime-gen/gethandler.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _proxy = Proxy.create({});
-%GetHandler(_proxy);
diff --git a/test/mjsunit/runtime-gen/getheapusage.js b/test/mjsunit/runtime-gen/getheapusage.js
deleted file mode 100644
index 50b4f2e..0000000
--- a/test/mjsunit/runtime-gen/getheapusage.js
+++ /dev/null
@@ -1,4 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-%GetHeapUsage();
diff --git a/test/mjsunit/runtime-gen/getimplfrominitializedintlobject.js b/test/mjsunit/runtime-gen/getimplfrominitializedintlobject.js
deleted file mode 100644
index 2eb845e..0000000
--- a/test/mjsunit/runtime-gen/getimplfrominitializedintlobject.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var arg0 = new Intl.NumberFormat('en-US');
-%GetImplFromInitializedIntlObject(arg0);
diff --git a/test/mjsunit/runtime-gen/getindexedinterceptorelementnames.js b/test/mjsunit/runtime-gen/getindexedinterceptorelementnames.js
deleted file mode 100644
index 0e3505d..0000000
--- a/test/mjsunit/runtime-gen/getindexedinterceptorelementnames.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _obj = new Object();
-%GetIndexedInterceptorElementNames(_obj);
diff --git a/test/mjsunit/runtime-gen/getinterceptorinfo.js b/test/mjsunit/runtime-gen/getinterceptorinfo.js
deleted file mode 100644
index d7f8395..0000000
--- a/test/mjsunit/runtime-gen/getinterceptorinfo.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _obj = new Object();
-%GetInterceptorInfo(_obj);
diff --git a/test/mjsunit/runtime-gen/getlanguagetagvariants.js b/test/mjsunit/runtime-gen/getlanguagetagvariants.js
deleted file mode 100644
index fdced6f..0000000
--- a/test/mjsunit/runtime-gen/getlanguagetagvariants.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _input = new Array();
-%GetLanguageTagVariants(_input);
diff --git a/test/mjsunit/runtime-gen/getnamedinterceptorpropertynames.js b/test/mjsunit/runtime-gen/getnamedinterceptorpropertynames.js
deleted file mode 100644
index 30ebdcf..0000000
--- a/test/mjsunit/runtime-gen/getnamedinterceptorpropertynames.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _obj = new Object();
-%GetNamedInterceptorPropertyNames(_obj);
diff --git a/test/mjsunit/runtime-gen/getobjectcontextnotifierperformchange.js b/test/mjsunit/runtime-gen/getobjectcontextnotifierperformchange.js
deleted file mode 100644
index 4c5cdc6..0000000
--- a/test/mjsunit/runtime-gen/getobjectcontextnotifierperformchange.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _object_info = new Object();
-%GetObjectContextNotifierPerformChange(_object_info);
diff --git a/test/mjsunit/runtime-gen/getobjectcontextobjectgetnotifier.js b/test/mjsunit/runtime-gen/getobjectcontextobjectgetnotifier.js
deleted file mode 100644
index eb458a2..0000000
--- a/test/mjsunit/runtime-gen/getobjectcontextobjectgetnotifier.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _object = new Object();
-%GetObjectContextObjectGetNotifier(_object);
diff --git a/test/mjsunit/runtime-gen/getobjectcontextobjectobserve.js b/test/mjsunit/runtime-gen/getobjectcontextobjectobserve.js
deleted file mode 100644
index 3b10b56..0000000
--- a/test/mjsunit/runtime-gen/getobjectcontextobjectobserve.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _object = new Object();
-%GetObjectContextObjectObserve(_object);
diff --git a/test/mjsunit/runtime-gen/getobservationstate.js b/test/mjsunit/runtime-gen/getobservationstate.js
deleted file mode 100644
index 4a2d8ee..0000000
--- a/test/mjsunit/runtime-gen/getobservationstate.js
+++ /dev/null
@@ -1,4 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-%GetObservationState();
diff --git a/test/mjsunit/runtime-gen/getoptimizationcount.js b/test/mjsunit/runtime-gen/getoptimizationcount.js
deleted file mode 100644
index 1979919..0000000
--- a/test/mjsunit/runtime-gen/getoptimizationcount.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _function = function() {};
-%GetOptimizationCount(_function);
diff --git a/test/mjsunit/runtime-gen/getoptimizationstatus.js b/test/mjsunit/runtime-gen/getoptimizationstatus.js
deleted file mode 100644
index c386196..0000000
--- a/test/mjsunit/runtime-gen/getoptimizationstatus.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _function = function() {};
-var _sync = "foo";
-%GetOptimizationStatus(_function, _sync);
diff --git a/test/mjsunit/runtime-gen/getownelementnames.js b/test/mjsunit/runtime-gen/getownelementnames.js
deleted file mode 100644
index 3f02cba..0000000
--- a/test/mjsunit/runtime-gen/getownelementnames.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _obj = new Object();
-%GetOwnElementNames(_obj);
diff --git a/test/mjsunit/runtime-gen/getownproperty.js b/test/mjsunit/runtime-gen/getownproperty.js
deleted file mode 100644
index f864279..0000000
--- a/test/mjsunit/runtime-gen/getownproperty.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _obj = new Object();
-var _name = "name";
-%GetOwnProperty(_obj, _name);
diff --git a/test/mjsunit/runtime-gen/getownpropertynames.js b/test/mjsunit/runtime-gen/getownpropertynames.js
deleted file mode 100644
index f05268f..0000000
--- a/test/mjsunit/runtime-gen/getownpropertynames.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _obj = new Object();
-var _filter_value = 1;
-%GetOwnPropertyNames(_obj, _filter_value);
diff --git a/test/mjsunit/runtime-gen/getproperty.js b/test/mjsunit/runtime-gen/getproperty.js
deleted file mode 100644
index 66a49c8..0000000
--- a/test/mjsunit/runtime-gen/getproperty.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _object = new Object();
-var _key = new Object();
-%GetProperty(_object, _key);
diff --git a/test/mjsunit/runtime-gen/getpropertynames.js b/test/mjsunit/runtime-gen/getpropertynames.js
deleted file mode 100644
index 2a41896..0000000
--- a/test/mjsunit/runtime-gen/getpropertynames.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _object = new Object();
-%GetPropertyNames(_object);
diff --git a/test/mjsunit/runtime-gen/getpropertynamesfast.js b/test/mjsunit/runtime-gen/getpropertynamesfast.js
deleted file mode 100644
index 2fbe93c..0000000
--- a/test/mjsunit/runtime-gen/getpropertynamesfast.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _raw_object = new Object();
-%GetPropertyNamesFast(_raw_object);
diff --git a/test/mjsunit/runtime-gen/getprototype.js b/test/mjsunit/runtime-gen/getprototype.js
deleted file mode 100644
index c17be9c..0000000
--- a/test/mjsunit/runtime-gen/getprototype.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _obj = new Object();
-%GetPrototype(_obj);
diff --git a/test/mjsunit/runtime-gen/getrootnan.js b/test/mjsunit/runtime-gen/getrootnan.js
deleted file mode 100644
index 9acc60f..0000000
--- a/test/mjsunit/runtime-gen/getrootnan.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-try {
-%GetRootNaN();
-} catch(e) {}
diff --git a/test/mjsunit/runtime-gen/getscopecount.js b/test/mjsunit/runtime-gen/getscopecount.js
deleted file mode 100644
index f1d0a2c..0000000
--- a/test/mjsunit/runtime-gen/getscopecount.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _break_id = 32;
-var _wrapped_id = 1;
-try {
-%GetScopeCount(_break_id, _wrapped_id);
-} catch(e) {}
diff --git a/test/mjsunit/runtime-gen/getscopedetails.js b/test/mjsunit/runtime-gen/getscopedetails.js
deleted file mode 100644
index e6aeb97..0000000
--- a/test/mjsunit/runtime-gen/getscopedetails.js
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _break_id = 32;
-var _wrapped_id = 1;
-var _inlined_jsframe_index = 32;
-var _index = 32;
-try {
-%GetScopeDetails(_break_id, _wrapped_id, _inlined_jsframe_index, _index);
-} catch(e) {}
diff --git a/test/mjsunit/runtime-gen/getscript.js b/test/mjsunit/runtime-gen/getscript.js
deleted file mode 100644
index 9dbe491..0000000
--- a/test/mjsunit/runtime-gen/getscript.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _script_name = "foo";
-%GetScript(_script_name);
diff --git a/test/mjsunit/runtime-gen/getstepinpositions.js b/test/mjsunit/runtime-gen/getstepinpositions.js
deleted file mode 100644
index e5a442a..0000000
--- a/test/mjsunit/runtime-gen/getstepinpositions.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _break_id = 32;
-var _wrapped_id = 1;
-try {
-%GetStepInPositions(_break_id, _wrapped_id);
-} catch(e) {}
diff --git a/test/mjsunit/runtime-gen/gettemplatefield.js b/test/mjsunit/runtime-gen/gettemplatefield.js
deleted file mode 100644
index 1bf0ced..0000000
--- a/test/mjsunit/runtime-gen/gettemplatefield.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _templ = new Object();
-var _index = 1;
-try {
-%GetTemplateField(_templ, _index);
-} catch(e) {}
diff --git a/test/mjsunit/runtime-gen/getthreadcount.js b/test/mjsunit/runtime-gen/getthreadcount.js
deleted file mode 100644
index 0926cb6..0000000
--- a/test/mjsunit/runtime-gen/getthreadcount.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _break_id = 32;
-try {
-%GetThreadCount(_break_id);
-} catch(e) {}
diff --git a/test/mjsunit/runtime-gen/getthreaddetails.js b/test/mjsunit/runtime-gen/getthreaddetails.js
deleted file mode 100644
index 7712cda..0000000
--- a/test/mjsunit/runtime-gen/getthreaddetails.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _break_id = 32;
-var _index = 32;
-try {
-%GetThreadDetails(_break_id, _index);
-} catch(e) {}
diff --git a/test/mjsunit/runtime-gen/getv8version.js b/test/mjsunit/runtime-gen/getv8version.js
deleted file mode 100644
index 05ca133..0000000
--- a/test/mjsunit/runtime-gen/getv8version.js
+++ /dev/null
@@ -1,4 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-%GetV8Version();
diff --git a/test/mjsunit/runtime-gen/globalprint.js b/test/mjsunit/runtime-gen/globalprint.js
deleted file mode 100644
index bf4d7e9..0000000
--- a/test/mjsunit/runtime-gen/globalprint.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _string = "foo";
-%GlobalPrint(_string);
diff --git a/test/mjsunit/runtime-gen/globalreceiver.js b/test/mjsunit/runtime-gen/globalreceiver.js
deleted file mode 100644
index 332a334..0000000
--- a/test/mjsunit/runtime-gen/globalreceiver.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _global = new Object();
-%GlobalReceiver(_global);
diff --git a/test/mjsunit/runtime-gen/haselement.js b/test/mjsunit/runtime-gen/haselement.js
deleted file mode 100644
index 94dd9af..0000000
--- a/test/mjsunit/runtime-gen/haselement.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _receiver = new Object();
-var _index = 1;
-%HasElement(_receiver, _index);
diff --git a/test/mjsunit/runtime-gen/hasownproperty.js b/test/mjsunit/runtime-gen/hasownproperty.js
deleted file mode 100644
index 7be0a65..0000000
--- a/test/mjsunit/runtime-gen/hasownproperty.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _object = new Object();
-var _key = "name";
-%HasOwnProperty(_object, _key);
diff --git a/test/mjsunit/runtime-gen/hasproperty.js b/test/mjsunit/runtime-gen/hasproperty.js
deleted file mode 100644
index e9b5d97..0000000
--- a/test/mjsunit/runtime-gen/hasproperty.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _receiver = new Object();
-var _key = "name";
-%HasProperty(_receiver, _key);
diff --git a/test/mjsunit/runtime-gen/havesamemap.js b/test/mjsunit/runtime-gen/havesamemap.js
deleted file mode 100644
index 54d5530..0000000
--- a/test/mjsunit/runtime-gen/havesamemap.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _obj1 = new Object();
-var _obj2 = new Object();
-%HaveSameMap(_obj1, _obj2);
diff --git a/test/mjsunit/runtime-gen/ignoreattributesandsetproperty.js b/test/mjsunit/runtime-gen/ignoreattributesandsetproperty.js
deleted file mode 100644
index 0c82809..0000000
--- a/test/mjsunit/runtime-gen/ignoreattributesandsetproperty.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _object = new Object();
-var _name = "name";
-var _value = new Object();
-var _unchecked_value = 1;
-%IgnoreAttributesAndSetProperty(_object, _name, _value, _unchecked_value);
diff --git a/test/mjsunit/runtime-gen/initializevarglobal.js b/test/mjsunit/runtime-gen/initializevarglobal.js
deleted file mode 100644
index ad42fdc..0000000
--- a/test/mjsunit/runtime-gen/initializevarglobal.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _name = "foo";
-var _strict_mode = 1;
-var _value = new Object();
-%InitializeVarGlobal(_name, _strict_mode, _value);
diff --git a/test/mjsunit/runtime-gen/internalcompare.js b/test/mjsunit/runtime-gen/internalcompare.js
deleted file mode 100644
index 02dd91e..0000000
--- a/test/mjsunit/runtime-gen/internalcompare.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var arg0 = %GetImplFromInitializedIntlObject(new Intl.Collator('en-US'));
-var _string1 = "foo";
-var _string2 = "foo";
-%InternalCompare(arg0, _string1, _string2);
diff --git a/test/mjsunit/runtime-gen/internaldateformat.js b/test/mjsunit/runtime-gen/internaldateformat.js
deleted file mode 100644
index e3bdce6..0000000
--- a/test/mjsunit/runtime-gen/internaldateformat.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var arg0 = %GetImplFromInitializedIntlObject(new Intl.DateTimeFormat('en-US'));
-var _date = new Date();
-%InternalDateFormat(arg0, _date);
diff --git a/test/mjsunit/runtime-gen/internaldateparse.js b/test/mjsunit/runtime-gen/internaldateparse.js
deleted file mode 100644
index 7bf57a6..0000000
--- a/test/mjsunit/runtime-gen/internaldateparse.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var arg0 = %GetImplFromInitializedIntlObject(new Intl.DateTimeFormat('en-US'));
-var _date_string = "foo";
-%InternalDateParse(arg0, _date_string);
diff --git a/test/mjsunit/runtime-gen/internalnumberformat.js b/test/mjsunit/runtime-gen/internalnumberformat.js
deleted file mode 100644
index e2634ac..0000000
--- a/test/mjsunit/runtime-gen/internalnumberformat.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var arg0 = %GetImplFromInitializedIntlObject(new Intl.NumberFormat('en-US'));
-var _number = new Object();
-%InternalNumberFormat(arg0, _number);
diff --git a/test/mjsunit/runtime-gen/internalnumberparse.js b/test/mjsunit/runtime-gen/internalnumberparse.js
deleted file mode 100644
index d3f8097..0000000
--- a/test/mjsunit/runtime-gen/internalnumberparse.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var arg0 = %GetImplFromInitializedIntlObject(new Intl.NumberFormat('en-US'));
-var _number_string = "foo";
-%InternalNumberParse(arg0, _number_string);
diff --git a/test/mjsunit/runtime-gen/isattachedglobal.js b/test/mjsunit/runtime-gen/isattachedglobal.js
deleted file mode 100644
index bd20076..0000000
--- a/test/mjsunit/runtime-gen/isattachedglobal.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _global = new Object();
-%IsAttachedGlobal(_global);
diff --git a/test/mjsunit/runtime-gen/isbreakonexception.js b/test/mjsunit/runtime-gen/isbreakonexception.js
deleted file mode 100644
index 880bced..0000000
--- a/test/mjsunit/runtime-gen/isbreakonexception.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _type_arg = 32;
-%IsBreakOnException(_type_arg);
diff --git a/test/mjsunit/runtime-gen/isconcurrentrecompilationsupported.js b/test/mjsunit/runtime-gen/isconcurrentrecompilationsupported.js
deleted file mode 100644
index 8cbe7b7..0000000
--- a/test/mjsunit/runtime-gen/isconcurrentrecompilationsupported.js
+++ /dev/null
@@ -1,4 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-%IsConcurrentRecompilationSupported();
diff --git a/test/mjsunit/runtime-gen/isextensible.js b/test/mjsunit/runtime-gen/isextensible.js
deleted file mode 100644
index a64e477..0000000
--- a/test/mjsunit/runtime-gen/isextensible.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _obj = new Object();
-%IsExtensible(_obj);
diff --git a/test/mjsunit/runtime-gen/isinitializedintlobject.js b/test/mjsunit/runtime-gen/isinitializedintlobject.js
deleted file mode 100644
index f0b7d3e..0000000
--- a/test/mjsunit/runtime-gen/isinitializedintlobject.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _input = new Object();
-%IsInitializedIntlObject(_input);
diff --git a/test/mjsunit/runtime-gen/isinitializedintlobjectoftype.js b/test/mjsunit/runtime-gen/isinitializedintlobjectoftype.js
deleted file mode 100644
index 752b200..0000000
--- a/test/mjsunit/runtime-gen/isinitializedintlobjectoftype.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _input = new Object();
-var _expected_type = "foo";
-%IsInitializedIntlObjectOfType(_input, _expected_type);
diff --git a/test/mjsunit/runtime-gen/isinprototypechain.js b/test/mjsunit/runtime-gen/isinprototypechain.js
deleted file mode 100644
index fbc476b..0000000
--- a/test/mjsunit/runtime-gen/isinprototypechain.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _O = new Object();
-var _V = new Object();
-%IsInPrototypeChain(_O, _V);
diff --git a/test/mjsunit/runtime-gen/isjsfunctionproxy.js b/test/mjsunit/runtime-gen/isjsfunctionproxy.js
deleted file mode 100644
index 699be03..0000000
--- a/test/mjsunit/runtime-gen/isjsfunctionproxy.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _obj = new Object();
-%IsJSFunctionProxy(_obj);
diff --git a/test/mjsunit/runtime-gen/isjsglobalproxy.js b/test/mjsunit/runtime-gen/isjsglobalproxy.js
deleted file mode 100644
index 6bcc768..0000000
--- a/test/mjsunit/runtime-gen/isjsglobalproxy.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _obj = new Object();
-%IsJSGlobalProxy(_obj);
diff --git a/test/mjsunit/runtime-gen/isjsmodule.js b/test/mjsunit/runtime-gen/isjsmodule.js
deleted file mode 100644
index 577b470..0000000
--- a/test/mjsunit/runtime-gen/isjsmodule.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _obj = new Object();
-%IsJSModule(_obj);
diff --git a/test/mjsunit/runtime-gen/isjsproxy.js b/test/mjsunit/runtime-gen/isjsproxy.js
deleted file mode 100644
index 477dacc..0000000
--- a/test/mjsunit/runtime-gen/isjsproxy.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _obj = new Object();
-%IsJSProxy(_obj);
diff --git a/test/mjsunit/runtime-gen/isobserved.js b/test/mjsunit/runtime-gen/isobserved.js
deleted file mode 100644
index cab9a9b..0000000
--- a/test/mjsunit/runtime-gen/isobserved.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _obj = new Object();
-%IsObserved(_obj);
diff --git a/test/mjsunit/runtime-gen/ispropertyenumerable.js b/test/mjsunit/runtime-gen/ispropertyenumerable.js
deleted file mode 100644
index ea1b4e2..0000000
--- a/test/mjsunit/runtime-gen/ispropertyenumerable.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _object = new Object();
-var _key = "name";
-%IsPropertyEnumerable(_object, _key);
diff --git a/test/mjsunit/runtime-gen/issloppymodefunction.js b/test/mjsunit/runtime-gen/issloppymodefunction.js
deleted file mode 100644
index 16cfe89..0000000
--- a/test/mjsunit/runtime-gen/issloppymodefunction.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var arg0 = function() {};
-%IsSloppyModeFunction(arg0);
diff --git a/test/mjsunit/runtime-gen/istemplate.js b/test/mjsunit/runtime-gen/istemplate.js
deleted file mode 100644
index 582967f..0000000
--- a/test/mjsunit/runtime-gen/istemplate.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _arg = new Object();
-%IsTemplate(_arg);
diff --git a/test/mjsunit/runtime-gen/isvalidsmi.js b/test/mjsunit/runtime-gen/isvalidsmi.js
deleted file mode 100644
index 9bd5b47..0000000
--- a/test/mjsunit/runtime-gen/isvalidsmi.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _number = 32;
-%IsValidSmi(_number);
diff --git a/test/mjsunit/runtime-gen/keyedgetproperty.js b/test/mjsunit/runtime-gen/keyedgetproperty.js
deleted file mode 100644
index df199ab..0000000
--- a/test/mjsunit/runtime-gen/keyedgetproperty.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _receiver_obj = new Object();
-var _key_obj = new Object();
-%KeyedGetProperty(_receiver_obj, _key_obj);
diff --git a/test/mjsunit/runtime-gen/liveeditcheckanddropactivations.js b/test/mjsunit/runtime-gen/liveeditcheckanddropactivations.js
deleted file mode 100644
index 8c435f1..0000000
--- a/test/mjsunit/runtime-gen/liveeditcheckanddropactivations.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _shared_array = new Array();
-var _do_drop = true;
-%LiveEditCheckAndDropActivations(_shared_array, _do_drop);
diff --git a/test/mjsunit/runtime-gen/liveeditcomparestrings.js b/test/mjsunit/runtime-gen/liveeditcomparestrings.js
deleted file mode 100644
index 1b66738..0000000
--- a/test/mjsunit/runtime-gen/liveeditcomparestrings.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _s1 = "foo";
-var _s2 = "foo";
-%LiveEditCompareStrings(_s1, _s2);
diff --git a/test/mjsunit/runtime-gen/liveeditfunctionsetscript.js b/test/mjsunit/runtime-gen/liveeditfunctionsetscript.js
deleted file mode 100644
index 3f50b11..0000000
--- a/test/mjsunit/runtime-gen/liveeditfunctionsetscript.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _function_object = new Object();
-var _script_object = new Object();
-%LiveEditFunctionSetScript(_function_object, _script_object);
diff --git a/test/mjsunit/runtime-gen/loadfromsuper.js b/test/mjsunit/runtime-gen/loadfromsuper.js
new file mode 100644
index 0000000..25f4ff9
--- /dev/null
+++ b/test/mjsunit/runtime-gen/loadfromsuper.js
@@ -0,0 +1,7 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
+// Flags: --allow-natives-syntax --harmony --harmony-proxies
+var _home_object = new Object();
+var _receiver = new Object();
+var _name = "name";
+%LoadFromSuper(_home_object, _receiver, _name);
diff --git a/test/mjsunit/runtime-gen/loadmutabledouble.js b/test/mjsunit/runtime-gen/loadmutabledouble.js
deleted file mode 100644
index 7864bcd..0000000
--- a/test/mjsunit/runtime-gen/loadmutabledouble.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var arg0 = {foo: 1.2};
-var _index = 1;
-%LoadMutableDouble(arg0, _index);
diff --git a/test/mjsunit/runtime-gen/lookupaccessor.js b/test/mjsunit/runtime-gen/lookupaccessor.js
deleted file mode 100644
index 42ac82a..0000000
--- a/test/mjsunit/runtime-gen/lookupaccessor.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _receiver = new Object();
-var _name = "name";
-var _flag = 1;
-%LookupAccessor(_receiver, _name, _flag);
diff --git a/test/mjsunit/runtime-gen/mapclear.js b/test/mjsunit/runtime-gen/mapclear.js
deleted file mode 100644
index 1e1b0e7..0000000
--- a/test/mjsunit/runtime-gen/mapclear.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _holder = new Map();
-%MapClear(_holder);
diff --git a/test/mjsunit/runtime-gen/mapdelete.js b/test/mjsunit/runtime-gen/mapdelete.js
deleted file mode 100644
index 995dee1..0000000
--- a/test/mjsunit/runtime-gen/mapdelete.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _holder = new Map();
-var _key = new Object();
-%MapDelete(_holder, _key);
diff --git a/test/mjsunit/runtime-gen/mapget.js b/test/mjsunit/runtime-gen/mapget.js
deleted file mode 100644
index 6a88b6e..0000000
--- a/test/mjsunit/runtime-gen/mapget.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _holder = new Map();
-var _key = new Object();
-%MapGet(_holder, _key);
diff --git a/test/mjsunit/runtime-gen/mapgetsize.js b/test/mjsunit/runtime-gen/mapgetsize.js
deleted file mode 100644
index be25087..0000000
--- a/test/mjsunit/runtime-gen/mapgetsize.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _holder = new Map();
-%MapGetSize(_holder);
diff --git a/test/mjsunit/runtime-gen/maphas.js b/test/mjsunit/runtime-gen/maphas.js
deleted file mode 100644
index 9b804ba..0000000
--- a/test/mjsunit/runtime-gen/maphas.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _holder = new Map();
-var _key = new Object();
-%MapHas(_holder, _key);
diff --git a/test/mjsunit/runtime-gen/mapinitialize.js b/test/mjsunit/runtime-gen/mapinitialize.js
deleted file mode 100644
index 5149abf..0000000
--- a/test/mjsunit/runtime-gen/mapinitialize.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _holder = new Map();
-%MapInitialize(_holder);
diff --git a/test/mjsunit/runtime-gen/mapiteratorinitialize.js b/test/mjsunit/runtime-gen/mapiteratorinitialize.js
deleted file mode 100644
index afc5b2e..0000000
--- a/test/mjsunit/runtime-gen/mapiteratorinitialize.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _holder = new Map().entries();
-var _map = new Map();
-var _kind = 1;
-%MapIteratorInitialize(_holder, _map, _kind);
diff --git a/test/mjsunit/runtime-gen/mapiteratornext.js b/test/mjsunit/runtime-gen/mapiteratornext.js
deleted file mode 100644
index cbd7e5b..0000000
--- a/test/mjsunit/runtime-gen/mapiteratornext.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _holder = new Map().entries();
-%MapIteratorNext(_holder);
diff --git a/test/mjsunit/runtime-gen/mapset.js b/test/mjsunit/runtime-gen/mapset.js
deleted file mode 100644
index 75a5ad2..0000000
--- a/test/mjsunit/runtime-gen/mapset.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _holder = new Map();
-var _key = new Object();
-var _value = new Object();
-%MapSet(_holder, _key, _value);
diff --git a/test/mjsunit/runtime-gen/markasinitializedintlobjectoftype.js b/test/mjsunit/runtime-gen/markasinitializedintlobjectoftype.js
deleted file mode 100644
index ddf2da3..0000000
--- a/test/mjsunit/runtime-gen/markasinitializedintlobjectoftype.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _input = new Object();
-var _type = "foo";
-var _impl = new Object();
-%MarkAsInitializedIntlObjectOfType(_input, _type, _impl);
diff --git a/test/mjsunit/runtime-gen/mathacos.js b/test/mjsunit/runtime-gen/mathacos.js
deleted file mode 100644
index e09b216..0000000
--- a/test/mjsunit/runtime-gen/mathacos.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _x = 1.5;
-%MathAcos(_x);
diff --git a/test/mjsunit/runtime-gen/mathasin.js b/test/mjsunit/runtime-gen/mathasin.js
deleted file mode 100644
index 6f268a6..0000000
--- a/test/mjsunit/runtime-gen/mathasin.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _x = 1.5;
-%MathAsin(_x);
diff --git a/test/mjsunit/runtime-gen/mathatan.js b/test/mjsunit/runtime-gen/mathatan.js
deleted file mode 100644
index 2de6785..0000000
--- a/test/mjsunit/runtime-gen/mathatan.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _x = 1.5;
-%MathAtan(_x);
diff --git a/test/mjsunit/runtime-gen/mathatan2.js b/test/mjsunit/runtime-gen/mathatan2.js
deleted file mode 100644
index 0938e78..0000000
--- a/test/mjsunit/runtime-gen/mathatan2.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _x = 1.5;
-var _y = 1.5;
-%MathAtan2(_x, _y);
diff --git a/test/mjsunit/runtime-gen/mathexprt.js b/test/mjsunit/runtime-gen/mathexprt.js
deleted file mode 100644
index 4aaacd0..0000000
--- a/test/mjsunit/runtime-gen/mathexprt.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _x = 1.5;
-%MathExpRT(_x);
diff --git a/test/mjsunit/runtime-gen/mathfloorrt.js b/test/mjsunit/runtime-gen/mathfloorrt.js
deleted file mode 100644
index 0918474..0000000
--- a/test/mjsunit/runtime-gen/mathfloorrt.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _x = 1.5;
-%MathFloorRT(_x);
diff --git a/test/mjsunit/runtime-gen/mathfround.js b/test/mjsunit/runtime-gen/mathfround.js
deleted file mode 100644
index 1531529..0000000
--- a/test/mjsunit/runtime-gen/mathfround.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _x = 1.5;
-%MathFround(_x);
diff --git a/test/mjsunit/runtime-gen/mathlogrt.js b/test/mjsunit/runtime-gen/mathlogrt.js
deleted file mode 100644
index 6df1b66..0000000
--- a/test/mjsunit/runtime-gen/mathlogrt.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _x = 1.5;
-%MathLogRT(_x);
diff --git a/test/mjsunit/runtime-gen/mathsqrtrt.js b/test/mjsunit/runtime-gen/mathsqrtrt.js
deleted file mode 100644
index 622d68a..0000000
--- a/test/mjsunit/runtime-gen/mathsqrtrt.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _x = 1.5;
-%MathSqrtRT(_x);
diff --git a/test/mjsunit/runtime-gen/maxsmi.js b/test/mjsunit/runtime-gen/maxsmi.js
deleted file mode 100644
index 24cf862..0000000
--- a/test/mjsunit/runtime-gen/maxsmi.js
+++ /dev/null
@@ -1,4 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-%MaxSmi();
diff --git a/test/mjsunit/runtime-gen/movearraycontents.js b/test/mjsunit/runtime-gen/movearraycontents.js
deleted file mode 100644
index 74c2ab0..0000000
--- a/test/mjsunit/runtime-gen/movearraycontents.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _from = new Array();
-var _to = new Array();
-%MoveArrayContents(_from, _to);
diff --git a/test/mjsunit/runtime-gen/neveroptimizefunction.js b/test/mjsunit/runtime-gen/neveroptimizefunction.js
deleted file mode 100644
index 4cbb9ce..0000000
--- a/test/mjsunit/runtime-gen/neveroptimizefunction.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _function = function() {};
-%NeverOptimizeFunction(_function);
diff --git a/test/mjsunit/runtime-gen/newobjectfrombound.js b/test/mjsunit/runtime-gen/newobjectfrombound.js
deleted file mode 100644
index e18e7cb..0000000
--- a/test/mjsunit/runtime-gen/newobjectfrombound.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var arg0 = (function() {}).bind({});
-%NewObjectFromBound(arg0);
diff --git a/test/mjsunit/runtime-gen/newstring.js b/test/mjsunit/runtime-gen/newstring.js
deleted file mode 100644
index 51fb421..0000000
--- a/test/mjsunit/runtime-gen/newstring.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _length = 1;
-var _is_one_byte = true;
-%NewString(_length, _is_one_byte);
diff --git a/test/mjsunit/runtime-gen/newstringwrapper.js b/test/mjsunit/runtime-gen/newstringwrapper.js
deleted file mode 100644
index 5339304..0000000
--- a/test/mjsunit/runtime-gen/newstringwrapper.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _value = "foo";
-%NewStringWrapper(_value);
diff --git a/test/mjsunit/runtime-gen/newsymbolwrapper.js b/test/mjsunit/runtime-gen/newsymbolwrapper.js
deleted file mode 100644
index d860226..0000000
--- a/test/mjsunit/runtime-gen/newsymbolwrapper.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _symbol = Symbol("symbol");
-%NewSymbolWrapper(_symbol);
diff --git a/test/mjsunit/runtime-gen/notifycontextdisposed.js b/test/mjsunit/runtime-gen/notifycontextdisposed.js
deleted file mode 100644
index 96933d5..0000000
--- a/test/mjsunit/runtime-gen/notifycontextdisposed.js
+++ /dev/null
@@ -1,4 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-%NotifyContextDisposed();
diff --git a/test/mjsunit/runtime-gen/numberadd.js b/test/mjsunit/runtime-gen/numberadd.js
deleted file mode 100644
index 2c7c3ef..0000000
--- a/test/mjsunit/runtime-gen/numberadd.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _x = 1.5;
-var _y = 1.5;
-%NumberAdd(_x, _y);
diff --git a/test/mjsunit/runtime-gen/numberand.js b/test/mjsunit/runtime-gen/numberand.js
deleted file mode 100644
index b4b5ef2..0000000
--- a/test/mjsunit/runtime-gen/numberand.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _x = 32;
-var _y = 32;
-%NumberAnd(_x, _y);
diff --git a/test/mjsunit/runtime-gen/numbercompare.js b/test/mjsunit/runtime-gen/numbercompare.js
deleted file mode 100644
index ddf3140..0000000
--- a/test/mjsunit/runtime-gen/numbercompare.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _x = 1.5;
-var _y = 1.5;
-var _uncomparable_result = new Object();
-%NumberCompare(_x, _y, _uncomparable_result);
diff --git a/test/mjsunit/runtime-gen/numberdiv.js b/test/mjsunit/runtime-gen/numberdiv.js
deleted file mode 100644
index f92fa01..0000000
--- a/test/mjsunit/runtime-gen/numberdiv.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _x = 1.5;
-var _y = 1.5;
-%NumberDiv(_x, _y);
diff --git a/test/mjsunit/runtime-gen/numberequals.js b/test/mjsunit/runtime-gen/numberequals.js
deleted file mode 100644
index 3400a31..0000000
--- a/test/mjsunit/runtime-gen/numberequals.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _x = 1.5;
-var _y = 1.5;
-%NumberEquals(_x, _y);
diff --git a/test/mjsunit/runtime-gen/numberimul.js b/test/mjsunit/runtime-gen/numberimul.js
deleted file mode 100644
index eefd909..0000000
--- a/test/mjsunit/runtime-gen/numberimul.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _x = 32;
-var _y = 32;
-%NumberImul(_x, _y);
diff --git a/test/mjsunit/runtime-gen/numbermod.js b/test/mjsunit/runtime-gen/numbermod.js
deleted file mode 100644
index 517afe2..0000000
--- a/test/mjsunit/runtime-gen/numbermod.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _x = 1.5;
-var _y = 1.5;
-%NumberMod(_x, _y);
diff --git a/test/mjsunit/runtime-gen/numbermul.js b/test/mjsunit/runtime-gen/numbermul.js
deleted file mode 100644
index a48e4a5..0000000
--- a/test/mjsunit/runtime-gen/numbermul.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _x = 1.5;
-var _y = 1.5;
-%NumberMul(_x, _y);
diff --git a/test/mjsunit/runtime-gen/numberor.js b/test/mjsunit/runtime-gen/numberor.js
deleted file mode 100644
index 463a3bf..0000000
--- a/test/mjsunit/runtime-gen/numberor.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _x = 32;
-var _y = 32;
-%NumberOr(_x, _y);
diff --git a/test/mjsunit/runtime-gen/numbersar.js b/test/mjsunit/runtime-gen/numbersar.js
deleted file mode 100644
index b45ce68..0000000
--- a/test/mjsunit/runtime-gen/numbersar.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _x = 32;
-var _y = 32;
-%NumberSar(_x, _y);
diff --git a/test/mjsunit/runtime-gen/numbershl.js b/test/mjsunit/runtime-gen/numbershl.js
deleted file mode 100644
index ee3acec..0000000
--- a/test/mjsunit/runtime-gen/numbershl.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _x = 32;
-var _y = 32;
-%NumberShl(_x, _y);
diff --git a/test/mjsunit/runtime-gen/numbershr.js b/test/mjsunit/runtime-gen/numbershr.js
deleted file mode 100644
index 6cb706a..0000000
--- a/test/mjsunit/runtime-gen/numbershr.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _x = 32;
-var _y = 32;
-%NumberShr(_x, _y);
diff --git a/test/mjsunit/runtime-gen/numbersub.js b/test/mjsunit/runtime-gen/numbersub.js
deleted file mode 100644
index bf387f5..0000000
--- a/test/mjsunit/runtime-gen/numbersub.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _x = 1.5;
-var _y = 1.5;
-%NumberSub(_x, _y);
diff --git a/test/mjsunit/runtime-gen/numbertoexponential.js b/test/mjsunit/runtime-gen/numbertoexponential.js
deleted file mode 100644
index 21468b4..0000000
--- a/test/mjsunit/runtime-gen/numbertoexponential.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _value = 1.5;
-var _f_number = 1.5;
-%NumberToExponential(_value, _f_number);
diff --git a/test/mjsunit/runtime-gen/numbertofixed.js b/test/mjsunit/runtime-gen/numbertofixed.js
deleted file mode 100644
index ac398d0..0000000
--- a/test/mjsunit/runtime-gen/numbertofixed.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _value = 1.5;
-var _f_number = 1.5;
-%NumberToFixed(_value, _f_number);
diff --git a/test/mjsunit/runtime-gen/numbertointeger.js b/test/mjsunit/runtime-gen/numbertointeger.js
deleted file mode 100644
index 8c66518..0000000
--- a/test/mjsunit/runtime-gen/numbertointeger.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _number = 1.5;
-%NumberToInteger(_number);
diff --git a/test/mjsunit/runtime-gen/numbertointegermapminuszero.js b/test/mjsunit/runtime-gen/numbertointegermapminuszero.js
deleted file mode 100644
index 395a952..0000000
--- a/test/mjsunit/runtime-gen/numbertointegermapminuszero.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _number = 1.5;
-%NumberToIntegerMapMinusZero(_number);
diff --git a/test/mjsunit/runtime-gen/numbertojsint32.js b/test/mjsunit/runtime-gen/numbertojsint32.js
deleted file mode 100644
index 31efee6..0000000
--- a/test/mjsunit/runtime-gen/numbertojsint32.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _number = 1.5;
-%NumberToJSInt32(_number);
diff --git a/test/mjsunit/runtime-gen/numbertojsuint32.js b/test/mjsunit/runtime-gen/numbertojsuint32.js
deleted file mode 100644
index 657ae21..0000000
--- a/test/mjsunit/runtime-gen/numbertojsuint32.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _number = 32;
-%NumberToJSUint32(_number);
diff --git a/test/mjsunit/runtime-gen/numbertoprecision.js b/test/mjsunit/runtime-gen/numbertoprecision.js
deleted file mode 100644
index 4775200..0000000
--- a/test/mjsunit/runtime-gen/numbertoprecision.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _value = 1.5;
-var _f_number = 1.5;
-%NumberToPrecision(_value, _f_number);
diff --git a/test/mjsunit/runtime-gen/numbertoradixstring.js b/test/mjsunit/runtime-gen/numbertoradixstring.js
deleted file mode 100644
index 6a90cd9..0000000
--- a/test/mjsunit/runtime-gen/numbertoradixstring.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _value = 1.5;
-var arg1 = 2;
-%NumberToRadixString(_value, arg1);
diff --git a/test/mjsunit/runtime-gen/numberunaryminus.js b/test/mjsunit/runtime-gen/numberunaryminus.js
deleted file mode 100644
index c334b9d..0000000
--- a/test/mjsunit/runtime-gen/numberunaryminus.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _x = 1.5;
-%NumberUnaryMinus(_x);
diff --git a/test/mjsunit/runtime-gen/numberxor.js b/test/mjsunit/runtime-gen/numberxor.js
deleted file mode 100644
index 6c71065..0000000
--- a/test/mjsunit/runtime-gen/numberxor.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _x = 32;
-var _y = 32;
-%NumberXor(_x, _y);
diff --git a/test/mjsunit/runtime-gen/objectfreeze.js b/test/mjsunit/runtime-gen/objectfreeze.js
deleted file mode 100644
index 84ee4aa..0000000
--- a/test/mjsunit/runtime-gen/objectfreeze.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _object = new Object();
-%ObjectFreeze(_object);
diff --git a/test/mjsunit/runtime-gen/objectwascreatedincurrentorigin.js b/test/mjsunit/runtime-gen/objectwascreatedincurrentorigin.js
deleted file mode 100644
index a819868..0000000
--- a/test/mjsunit/runtime-gen/objectwascreatedincurrentorigin.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _object = new Object();
-%ObjectWasCreatedInCurrentOrigin(_object);
diff --git a/test/mjsunit/runtime-gen/observationweakmapcreate.js b/test/mjsunit/runtime-gen/observationweakmapcreate.js
deleted file mode 100644
index e744254..0000000
--- a/test/mjsunit/runtime-gen/observationweakmapcreate.js
+++ /dev/null
@@ -1,4 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-%ObservationWeakMapCreate();
diff --git a/test/mjsunit/runtime-gen/observerobjectandrecordhavesameorigin.js b/test/mjsunit/runtime-gen/observerobjectandrecordhavesameorigin.js
deleted file mode 100644
index 7f41cd0..0000000
--- a/test/mjsunit/runtime-gen/observerobjectandrecordhavesameorigin.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _observer = function() {};
-var _object = new Object();
-var _record = new Object();
-%ObserverObjectAndRecordHaveSameOrigin(_observer, _object, _record);
diff --git a/test/mjsunit/runtime-gen/optimizefunctiononnextcall.js b/test/mjsunit/runtime-gen/optimizefunctiononnextcall.js
deleted file mode 100644
index 435c4ce..0000000
--- a/test/mjsunit/runtime-gen/optimizefunctiononnextcall.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _function = function() {};
-var _type = "foo";
-%OptimizeFunctionOnNextCall(_function, _type);
diff --git a/test/mjsunit/runtime-gen/optimizeobjectforaddingmultipleproperties.js b/test/mjsunit/runtime-gen/optimizeobjectforaddingmultipleproperties.js
deleted file mode 100644
index f394fbf..0000000
--- a/test/mjsunit/runtime-gen/optimizeobjectforaddingmultipleproperties.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _object = new Object();
-var _properties = 1;
-%OptimizeObjectForAddingMultipleProperties(_object, _properties);
diff --git a/test/mjsunit/runtime-gen/ownkeys.js b/test/mjsunit/runtime-gen/ownkeys.js
deleted file mode 100644
index 7e4220d..0000000
--- a/test/mjsunit/runtime-gen/ownkeys.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _raw_object = new Object();
-%OwnKeys(_raw_object);
diff --git a/test/mjsunit/runtime-gen/parsejson.js b/test/mjsunit/runtime-gen/parsejson.js
deleted file mode 100644
index 062520c..0000000
--- a/test/mjsunit/runtime-gen/parsejson.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var arg0 = "{}";
-%ParseJson(arg0);
diff --git a/test/mjsunit/runtime-gen/preventextensions.js b/test/mjsunit/runtime-gen/preventextensions.js
deleted file mode 100644
index d3abed5..0000000
--- a/test/mjsunit/runtime-gen/preventextensions.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _obj = new Object();
-%PreventExtensions(_obj);
diff --git a/test/mjsunit/runtime-gen/pushifabsent.js b/test/mjsunit/runtime-gen/pushifabsent.js
deleted file mode 100644
index a139caf..0000000
--- a/test/mjsunit/runtime-gen/pushifabsent.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _array = new Array();
-var _element = new Object();
-%PushIfAbsent(_array, _element);
diff --git a/test/mjsunit/runtime-gen/quotejsonstring.js b/test/mjsunit/runtime-gen/quotejsonstring.js
deleted file mode 100644
index 02ed21f..0000000
--- a/test/mjsunit/runtime-gen/quotejsonstring.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _string = "foo";
-%QuoteJSONString(_string);
diff --git a/test/mjsunit/runtime-gen/regexpcompile.js b/test/mjsunit/runtime-gen/regexpcompile.js
deleted file mode 100644
index ba51a62..0000000
--- a/test/mjsunit/runtime-gen/regexpcompile.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _re = /ab/g;
-var _pattern = "foo";
-var _flags = "foo";
-%RegExpCompile(_re, _pattern, _flags);
diff --git a/test/mjsunit/runtime-gen/regexpexecmultiple.js b/test/mjsunit/runtime-gen/regexpexecmultiple.js
deleted file mode 100644
index feab2a6..0000000
--- a/test/mjsunit/runtime-gen/regexpexecmultiple.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _regexp = /ab/g;
-var _subject = "foo";
-var arg2 = ['a'];
-var arg3 = ['a'];
-%RegExpExecMultiple(_regexp, _subject, arg2, arg3);
diff --git a/test/mjsunit/runtime-gen/regexpinitializeobject.js b/test/mjsunit/runtime-gen/regexpinitializeobject.js
deleted file mode 100644
index 538e4da..0000000
--- a/test/mjsunit/runtime-gen/regexpinitializeobject.js
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _regexp = /ab/g;
-var _source = "foo";
-var _global = new Object();
-var _ignoreCase = new Object();
-var _multiline = new Object();
-%RegExpInitializeObject(_regexp, _source, _global, _ignoreCase, _multiline);
diff --git a/test/mjsunit/runtime-gen/removearrayholes.js b/test/mjsunit/runtime-gen/removearrayholes.js
deleted file mode 100644
index c70e10b..0000000
--- a/test/mjsunit/runtime-gen/removearrayholes.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _object = new Object();
-var _limit = 32;
-%RemoveArrayHoles(_object, _limit);
diff --git a/test/mjsunit/runtime-gen/roundnumber.js b/test/mjsunit/runtime-gen/roundnumber.js
deleted file mode 100644
index a0f4d45..0000000
--- a/test/mjsunit/runtime-gen/roundnumber.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _input = 1.5;
-%RoundNumber(_input);
diff --git a/test/mjsunit/runtime-gen/runmicrotasks.js b/test/mjsunit/runtime-gen/runmicrotasks.js
deleted file mode 100644
index 7661406..0000000
--- a/test/mjsunit/runtime-gen/runmicrotasks.js
+++ /dev/null
@@ -1,4 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-%RunMicrotasks();
diff --git a/test/mjsunit/runtime-gen/runninginsimulator.js b/test/mjsunit/runtime-gen/runninginsimulator.js
deleted file mode 100644
index c7a7112..0000000
--- a/test/mjsunit/runtime-gen/runninginsimulator.js
+++ /dev/null
@@ -1,4 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-%RunningInSimulator();
diff --git a/test/mjsunit/runtime-gen/setaccessorproperty.js b/test/mjsunit/runtime-gen/setaccessorproperty.js
deleted file mode 100644
index 1f805f7..0000000
--- a/test/mjsunit/runtime-gen/setaccessorproperty.js
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _object = new Object();
-var _name = "name";
-var arg2 = undefined;
-var arg3 = undefined;
-var _attribute = 1;
-var _access_control = 1;
-%SetAccessorProperty(_object, _name, arg2, arg3, _attribute, _access_control);
diff --git a/test/mjsunit/runtime-gen/setadd.js b/test/mjsunit/runtime-gen/setadd.js
deleted file mode 100644
index 87e30e1..0000000
--- a/test/mjsunit/runtime-gen/setadd.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _holder = new Set();
-var _key = new Object();
-%SetAdd(_holder, _key);
diff --git a/test/mjsunit/runtime-gen/setclear.js b/test/mjsunit/runtime-gen/setclear.js
deleted file mode 100644
index f37ad51..0000000
--- a/test/mjsunit/runtime-gen/setclear.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _holder = new Set();
-%SetClear(_holder);
diff --git a/test/mjsunit/runtime-gen/setcode.js b/test/mjsunit/runtime-gen/setcode.js
deleted file mode 100644
index 464e0f9..0000000
--- a/test/mjsunit/runtime-gen/setcode.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _target = function() {};
-var _source = function() {};
-%SetCode(_target, _source);
diff --git a/test/mjsunit/runtime-gen/setdebugeventlistener.js b/test/mjsunit/runtime-gen/setdebugeventlistener.js
deleted file mode 100644
index 85f583c..0000000
--- a/test/mjsunit/runtime-gen/setdebugeventlistener.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var arg0 = undefined;
-var _data = new Object();
-%SetDebugEventListener(arg0, _data);
diff --git a/test/mjsunit/runtime-gen/setdelete.js b/test/mjsunit/runtime-gen/setdelete.js
deleted file mode 100644
index f9790a8..0000000
--- a/test/mjsunit/runtime-gen/setdelete.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _holder = new Set();
-var _key = new Object();
-%SetDelete(_holder, _key);
diff --git a/test/mjsunit/runtime-gen/setdisablebreak.js b/test/mjsunit/runtime-gen/setdisablebreak.js
deleted file mode 100644
index 2b716b9..0000000
--- a/test/mjsunit/runtime-gen/setdisablebreak.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _disable_break = true;
-%SetDisableBreak(_disable_break);
diff --git a/test/mjsunit/runtime-gen/setflags.js b/test/mjsunit/runtime-gen/setflags.js
deleted file mode 100644
index b75f25d..0000000
--- a/test/mjsunit/runtime-gen/setflags.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _arg = "foo";
-%SetFlags(_arg);
diff --git a/test/mjsunit/runtime-gen/setfunctionbreakpoint.js b/test/mjsunit/runtime-gen/setfunctionbreakpoint.js
deleted file mode 100644
index 1fc3516..0000000
--- a/test/mjsunit/runtime-gen/setfunctionbreakpoint.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _function = function() {};
-var arg1 = 200;
-var _break_point_object_arg = new Object();
-%SetFunctionBreakPoint(_function, arg1, _break_point_object_arg);
diff --git a/test/mjsunit/runtime-gen/setgetsize.js b/test/mjsunit/runtime-gen/setgetsize.js
deleted file mode 100644
index f6d3c27..0000000
--- a/test/mjsunit/runtime-gen/setgetsize.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _holder = new Set();
-%SetGetSize(_holder);
diff --git a/test/mjsunit/runtime-gen/sethas.js b/test/mjsunit/runtime-gen/sethas.js
deleted file mode 100644
index 76aa7c0..0000000
--- a/test/mjsunit/runtime-gen/sethas.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _holder = new Set();
-var _key = new Object();
-%SetHas(_holder, _key);
diff --git a/test/mjsunit/runtime-gen/sethiddenproperty.js b/test/mjsunit/runtime-gen/sethiddenproperty.js
deleted file mode 100644
index 305046a..0000000
--- a/test/mjsunit/runtime-gen/sethiddenproperty.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _object = new Object();
-var _key = "foo";
-var _value = new Object();
-%SetHiddenProperty(_object, _key, _value);
diff --git a/test/mjsunit/runtime-gen/setinitialize.js b/test/mjsunit/runtime-gen/setinitialize.js
deleted file mode 100644
index 585529c..0000000
--- a/test/mjsunit/runtime-gen/setinitialize.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _holder = new Set();
-%SetInitialize(_holder);
diff --git a/test/mjsunit/runtime-gen/setinlinebuiltinflag.js b/test/mjsunit/runtime-gen/setinlinebuiltinflag.js
deleted file mode 100644
index bbcaf96..0000000
--- a/test/mjsunit/runtime-gen/setinlinebuiltinflag.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _object = new Object();
-%SetInlineBuiltinFlag(_object);
diff --git a/test/mjsunit/runtime-gen/setisobserved.js b/test/mjsunit/runtime-gen/setisobserved.js
deleted file mode 100644
index f194de8..0000000
--- a/test/mjsunit/runtime-gen/setisobserved.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _obj = new Object();
-%SetIsObserved(_obj);
diff --git a/test/mjsunit/runtime-gen/setiteratorinitialize.js b/test/mjsunit/runtime-gen/setiteratorinitialize.js
deleted file mode 100644
index 9cf36b4..0000000
--- a/test/mjsunit/runtime-gen/setiteratorinitialize.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _holder = new Set().values();
-var _set = new Set();
-var arg2 = 2;
-%SetIteratorInitialize(_holder, _set, arg2);
diff --git a/test/mjsunit/runtime-gen/setiteratornext.js b/test/mjsunit/runtime-gen/setiteratornext.js
deleted file mode 100644
index 7cf88cf..0000000
--- a/test/mjsunit/runtime-gen/setiteratornext.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _holder = new Set().values();
-%SetIteratorNext(_holder);
diff --git a/test/mjsunit/runtime-gen/setnativeflag.js b/test/mjsunit/runtime-gen/setnativeflag.js
deleted file mode 100644
index 60775ac..0000000
--- a/test/mjsunit/runtime-gen/setnativeflag.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _object = new Object();
-%SetNativeFlag(_object);
diff --git a/test/mjsunit/runtime-gen/setproperty.js b/test/mjsunit/runtime-gen/setproperty.js
deleted file mode 100644
index bec3ad8..0000000
--- a/test/mjsunit/runtime-gen/setproperty.js
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _object = new Object();
-var _key = new Object();
-var _value = new Object();
-var _unchecked_attributes = 1;
-var _strict_mode_arg = 1;
-%SetProperty(_object, _key, _value, _unchecked_attributes, _strict_mode_arg);
diff --git a/test/mjsunit/runtime-gen/setprototype.js b/test/mjsunit/runtime-gen/setprototype.js
deleted file mode 100644
index 05b2409..0000000
--- a/test/mjsunit/runtime-gen/setprototype.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _obj = new Object();
-var _prototype = new Object();
-%SetPrototype(_obj, _prototype);
diff --git a/test/mjsunit/runtime-gen/setscopevariablevalue.js b/test/mjsunit/runtime-gen/setscopevariablevalue.js
deleted file mode 100644
index 9ca03c5..0000000
--- a/test/mjsunit/runtime-gen/setscopevariablevalue.js
+++ /dev/null
@@ -1,10 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _fun = function() {};
-var _wrapped_id = 1;
-var _inlined_jsframe_index = 32;
-var _index = 32;
-var _variable_name = "foo";
-var _new_value = new Object();
-%SetScopeVariableValue(_fun, _wrapped_id, _inlined_jsframe_index, _index, _variable_name, _new_value);
diff --git a/test/mjsunit/runtime-gen/smilexicographiccompare.js b/test/mjsunit/runtime-gen/smilexicographiccompare.js
deleted file mode 100644
index d5a0bf5..0000000
--- a/test/mjsunit/runtime-gen/smilexicographiccompare.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _x_value = 1;
-var _y_value = 1;
-%SmiLexicographicCompare(_x_value, _y_value);
diff --git a/test/mjsunit/runtime-gen/sparsejoinwithseparator.js b/test/mjsunit/runtime-gen/sparsejoinwithseparator.js
deleted file mode 100644
index 13e056b..0000000
--- a/test/mjsunit/runtime-gen/sparsejoinwithseparator.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _elements_array = new Array();
-var _array_length = 32;
-var _separator = "foo";
-%SparseJoinWithSeparator(_elements_array, _array_length, _separator);
diff --git a/test/mjsunit/runtime-gen/specialarrayfunctions.js b/test/mjsunit/runtime-gen/specialarrayfunctions.js
deleted file mode 100644
index e7f5a47..0000000
--- a/test/mjsunit/runtime-gen/specialarrayfunctions.js
+++ /dev/null
@@ -1,4 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-%SpecialArrayFunctions();
diff --git a/test/mjsunit/runtime-gen/stringbuilderconcat.js b/test/mjsunit/runtime-gen/stringbuilderconcat.js
deleted file mode 100644
index a0bd41b..0000000
--- a/test/mjsunit/runtime-gen/stringbuilderconcat.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var arg0 = [1, 2, 3];
-var arg1 = 3;
-var _special = "foo";
-%StringBuilderConcat(arg0, arg1, _special);
diff --git a/test/mjsunit/runtime-gen/stringbuilderjoin.js b/test/mjsunit/runtime-gen/stringbuilderjoin.js
deleted file mode 100644
index f0506c0..0000000
--- a/test/mjsunit/runtime-gen/stringbuilderjoin.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var arg0 = ['a', 'b'];
-var arg1 = 4;
-var _separator = "foo";
-%StringBuilderJoin(arg0, arg1, _separator);
diff --git a/test/mjsunit/runtime-gen/stringequals.js b/test/mjsunit/runtime-gen/stringequals.js
deleted file mode 100644
index 320a9a8..0000000
--- a/test/mjsunit/runtime-gen/stringequals.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _x = "foo";
-var _y = "foo";
-%StringEquals(_x, _y);
diff --git a/test/mjsunit/runtime-gen/stringindexof.js b/test/mjsunit/runtime-gen/stringindexof.js
deleted file mode 100644
index 5ee0e38..0000000
--- a/test/mjsunit/runtime-gen/stringindexof.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _sub = "foo";
-var _pat = "foo";
-var _index = new Object();
-%StringIndexOf(_sub, _pat, _index);
diff --git a/test/mjsunit/runtime-gen/stringlastindexof.js b/test/mjsunit/runtime-gen/stringlastindexof.js
deleted file mode 100644
index 1d142dd..0000000
--- a/test/mjsunit/runtime-gen/stringlastindexof.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _sub = "foo";
-var _pat = "foo";
-var _index = new Object();
-%StringLastIndexOf(_sub, _pat, _index);
diff --git a/test/mjsunit/runtime-gen/stringlocalecompare.js b/test/mjsunit/runtime-gen/stringlocalecompare.js
deleted file mode 100644
index 8c1fffc..0000000
--- a/test/mjsunit/runtime-gen/stringlocalecompare.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _str1 = "foo";
-var _str2 = "foo";
-%StringLocaleCompare(_str1, _str2);
diff --git a/test/mjsunit/runtime-gen/stringmatch.js b/test/mjsunit/runtime-gen/stringmatch.js
deleted file mode 100644
index 927a670..0000000
--- a/test/mjsunit/runtime-gen/stringmatch.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _subject = "foo";
-var _regexp = /ab/g;
-var arg2 = ['a', 'b'];
-%StringMatch(_subject, _regexp, arg2);
diff --git a/test/mjsunit/runtime-gen/stringnormalize.js b/test/mjsunit/runtime-gen/stringnormalize.js
deleted file mode 100644
index 85af3d6..0000000
--- a/test/mjsunit/runtime-gen/stringnormalize.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _stringValue = "foo";
-var arg1 = 2;
-%StringNormalize(_stringValue, arg1);
diff --git a/test/mjsunit/runtime-gen/stringparsefloat.js b/test/mjsunit/runtime-gen/stringparsefloat.js
deleted file mode 100644
index b31eccf..0000000
--- a/test/mjsunit/runtime-gen/stringparsefloat.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _subject = "foo";
-%StringParseFloat(_subject);
diff --git a/test/mjsunit/runtime-gen/stringparseint.js b/test/mjsunit/runtime-gen/stringparseint.js
deleted file mode 100644
index 66aa596..0000000
--- a/test/mjsunit/runtime-gen/stringparseint.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _subject = "foo";
-var _radix = 32;
-%StringParseInt(_subject, _radix);
diff --git a/test/mjsunit/runtime-gen/stringreplaceglobalregexpwithstring.js b/test/mjsunit/runtime-gen/stringreplaceglobalregexpwithstring.js
deleted file mode 100644
index d39134a..0000000
--- a/test/mjsunit/runtime-gen/stringreplaceglobalregexpwithstring.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _subject = "foo";
-var _regexp = /ab/g;
-var _replacement = "foo";
-var arg3 = ['a'];
-%StringReplaceGlobalRegExpWithString(_subject, _regexp, _replacement, arg3);
diff --git a/test/mjsunit/runtime-gen/stringreplaceonecharwithstring.js b/test/mjsunit/runtime-gen/stringreplaceonecharwithstring.js
deleted file mode 100644
index 8eca0da..0000000
--- a/test/mjsunit/runtime-gen/stringreplaceonecharwithstring.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _subject = "foo";
-var _search = "foo";
-var _replace = "foo";
-%StringReplaceOneCharWithString(_subject, _search, _replace);
diff --git a/test/mjsunit/runtime-gen/stringsplit.js b/test/mjsunit/runtime-gen/stringsplit.js
deleted file mode 100644
index e4f02e1..0000000
--- a/test/mjsunit/runtime-gen/stringsplit.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _subject = "foo";
-var _pattern = "foo";
-var _limit = 32;
-%StringSplit(_subject, _pattern, _limit);
diff --git a/test/mjsunit/runtime-gen/stringtoarray.js b/test/mjsunit/runtime-gen/stringtoarray.js
deleted file mode 100644
index e765ba5..0000000
--- a/test/mjsunit/runtime-gen/stringtoarray.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _s = "foo";
-var _limit = 32;
-%StringToArray(_s, _limit);
diff --git a/test/mjsunit/runtime-gen/stringtolowercase.js b/test/mjsunit/runtime-gen/stringtolowercase.js
deleted file mode 100644
index 48d6908..0000000
--- a/test/mjsunit/runtime-gen/stringtolowercase.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _s = "foo";
-%StringToLowerCase(_s);
diff --git a/test/mjsunit/runtime-gen/stringtonumber.js b/test/mjsunit/runtime-gen/stringtonumber.js
deleted file mode 100644
index 77cff38..0000000
--- a/test/mjsunit/runtime-gen/stringtonumber.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _subject = "foo";
-%StringToNumber(_subject);
diff --git a/test/mjsunit/runtime-gen/stringtouppercase.js b/test/mjsunit/runtime-gen/stringtouppercase.js
deleted file mode 100644
index d84822e..0000000
--- a/test/mjsunit/runtime-gen/stringtouppercase.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _s = "foo";
-%StringToUpperCase(_s);
diff --git a/test/mjsunit/runtime-gen/stringtrim.js b/test/mjsunit/runtime-gen/stringtrim.js
deleted file mode 100644
index 33ac02d..0000000
--- a/test/mjsunit/runtime-gen/stringtrim.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _string = "foo";
-var _trimLeft = true;
-var _trimRight = true;
-%StringTrim(_string, _trimLeft, _trimRight);
diff --git a/test/mjsunit/runtime-gen/symboldescription.js b/test/mjsunit/runtime-gen/symboldescription.js
deleted file mode 100644
index 95905b7..0000000
--- a/test/mjsunit/runtime-gen/symboldescription.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _symbol = Symbol("symbol");
-%SymbolDescription(_symbol);
diff --git a/test/mjsunit/runtime-gen/symbolisprivate.js b/test/mjsunit/runtime-gen/symbolisprivate.js
deleted file mode 100644
index 9f40396..0000000
--- a/test/mjsunit/runtime-gen/symbolisprivate.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _symbol = Symbol("symbol");
-%SymbolIsPrivate(_symbol);
diff --git a/test/mjsunit/runtime-gen/symbolregistry.js b/test/mjsunit/runtime-gen/symbolregistry.js
deleted file mode 100644
index 520caa3..0000000
--- a/test/mjsunit/runtime-gen/symbolregistry.js
+++ /dev/null
@@ -1,4 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-%SymbolRegistry();
diff --git a/test/mjsunit/runtime-gen/tobool.js b/test/mjsunit/runtime-gen/tobool.js
deleted file mode 100644
index 7e913c2..0000000
--- a/test/mjsunit/runtime-gen/tobool.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _object = new Object();
-%ToBool(_object);
diff --git a/test/mjsunit/runtime-gen/tofastproperties.js b/test/mjsunit/runtime-gen/tofastproperties.js
deleted file mode 100644
index 8267d2b..0000000
--- a/test/mjsunit/runtime-gen/tofastproperties.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _object = new Object();
-%ToFastProperties(_object);
diff --git a/test/mjsunit/runtime-gen/traceenter.js b/test/mjsunit/runtime-gen/traceenter.js
deleted file mode 100644
index 7c52907..0000000
--- a/test/mjsunit/runtime-gen/traceenter.js
+++ /dev/null
@@ -1,4 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-%TraceEnter();
diff --git a/test/mjsunit/runtime-gen/traceexit.js b/test/mjsunit/runtime-gen/traceexit.js
deleted file mode 100644
index f05d841..0000000
--- a/test/mjsunit/runtime-gen/traceexit.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _obj = new Object();
-%TraceExit(_obj);
diff --git a/test/mjsunit/runtime-gen/truncatestring.js b/test/mjsunit/runtime-gen/truncatestring.js
deleted file mode 100644
index 1239c22..0000000
--- a/test/mjsunit/runtime-gen/truncatestring.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _string = "seqstring";
-var _new_length = 1;
-%TruncateString(_string, _new_length);
diff --git a/test/mjsunit/runtime-gen/trymigrateinstance.js b/test/mjsunit/runtime-gen/trymigrateinstance.js
deleted file mode 100644
index 2c3414c..0000000
--- a/test/mjsunit/runtime-gen/trymigrateinstance.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _object = new Object();
-%TryMigrateInstance(_object);
diff --git a/test/mjsunit/runtime-gen/typedarraygetbuffer.js b/test/mjsunit/runtime-gen/typedarraygetbuffer.js
deleted file mode 100644
index ea8733e..0000000
--- a/test/mjsunit/runtime-gen/typedarraygetbuffer.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _holder = new Int32Array(2);
-%TypedArrayGetBuffer(_holder);
diff --git a/test/mjsunit/runtime-gen/typedarraygetlength.js b/test/mjsunit/runtime-gen/typedarraygetlength.js
deleted file mode 100644
index 56ceb95..0000000
--- a/test/mjsunit/runtime-gen/typedarraygetlength.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _holder = new Int32Array(2);
-%TypedArrayGetLength(_holder);
diff --git a/test/mjsunit/runtime-gen/typedarrayinitialize.js b/test/mjsunit/runtime-gen/typedarrayinitialize.js
deleted file mode 100644
index 177bf3b..0000000
--- a/test/mjsunit/runtime-gen/typedarrayinitialize.js
+++ /dev/null
@@ -1,9 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _holder = new Int32Array(2);
-var arg1 = 6;
-var arg2 = new ArrayBuffer(8);
-var _byte_offset_object = 1.5;
-var arg4 = 4;
-%TypedArrayInitialize(_holder, arg1, arg2, _byte_offset_object, arg4);
diff --git a/test/mjsunit/runtime-gen/typedarrayinitializefromarraylike.js b/test/mjsunit/runtime-gen/typedarrayinitializefromarraylike.js
deleted file mode 100644
index e2fad5a..0000000
--- a/test/mjsunit/runtime-gen/typedarrayinitializefromarraylike.js
+++ /dev/null
@@ -1,8 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _holder = new Int32Array(2);
-var arg1 = 6;
-var _source = new Object();
-var _length_obj = 1.5;
-%TypedArrayInitializeFromArrayLike(_holder, arg1, _source, _length_obj);
diff --git a/test/mjsunit/runtime-gen/typedarraymaxsizeinheap.js b/test/mjsunit/runtime-gen/typedarraymaxsizeinheap.js
deleted file mode 100644
index 4c2cd2a..0000000
--- a/test/mjsunit/runtime-gen/typedarraymaxsizeinheap.js
+++ /dev/null
@@ -1,4 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-%TypedArrayMaxSizeInHeap();
diff --git a/test/mjsunit/runtime-gen/typedarraysetfastcases.js b/test/mjsunit/runtime-gen/typedarraysetfastcases.js
deleted file mode 100644
index 9940574..0000000
--- a/test/mjsunit/runtime-gen/typedarraysetfastcases.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _target_obj = new Int32Array(2);
-var _source_obj = new Int32Array(2);
-var arg2 = 0;
-%TypedArraySetFastCases(_target_obj, _source_obj, arg2);
diff --git a/test/mjsunit/runtime-gen/typeof.js b/test/mjsunit/runtime-gen/typeof.js
deleted file mode 100644
index cf8297c..0000000
--- a/test/mjsunit/runtime-gen/typeof.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _obj = new Object();
-%Typeof(_obj);
diff --git a/test/mjsunit/runtime-gen/unblockconcurrentrecompilation.js b/test/mjsunit/runtime-gen/unblockconcurrentrecompilation.js
deleted file mode 100644
index e734860..0000000
--- a/test/mjsunit/runtime-gen/unblockconcurrentrecompilation.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-try {
-%UnblockConcurrentRecompilation();
-} catch(e) {}
diff --git a/test/mjsunit/runtime-gen/uriescape.js b/test/mjsunit/runtime-gen/uriescape.js
deleted file mode 100644
index 798d132..0000000
--- a/test/mjsunit/runtime-gen/uriescape.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _source = "foo";
-%URIEscape(_source);
diff --git a/test/mjsunit/runtime-gen/uriunescape.js b/test/mjsunit/runtime-gen/uriunescape.js
deleted file mode 100644
index 9573a01..0000000
--- a/test/mjsunit/runtime-gen/uriunescape.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _source = "foo";
-%URIUnescape(_source);
diff --git a/test/mjsunit/runtime-gen/weakcollectiondelete.js b/test/mjsunit/runtime-gen/weakcollectiondelete.js
deleted file mode 100644
index fa24778..0000000
--- a/test/mjsunit/runtime-gen/weakcollectiondelete.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _weak_collection = new WeakMap();
-var _key = new Object();
-%WeakCollectionDelete(_weak_collection, _key);
diff --git a/test/mjsunit/runtime-gen/weakcollectionget.js b/test/mjsunit/runtime-gen/weakcollectionget.js
deleted file mode 100644
index 07f486c..0000000
--- a/test/mjsunit/runtime-gen/weakcollectionget.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _weak_collection = new WeakMap();
-var _key = new Object();
-%WeakCollectionGet(_weak_collection, _key);
diff --git a/test/mjsunit/runtime-gen/weakcollectionhas.js b/test/mjsunit/runtime-gen/weakcollectionhas.js
deleted file mode 100644
index d563075..0000000
--- a/test/mjsunit/runtime-gen/weakcollectionhas.js
+++ /dev/null
@@ -1,6 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _weak_collection = new WeakMap();
-var _key = new Object();
-%WeakCollectionHas(_weak_collection, _key);
diff --git a/test/mjsunit/runtime-gen/weakcollectioninitialize.js b/test/mjsunit/runtime-gen/weakcollectioninitialize.js
deleted file mode 100644
index a28dc50..0000000
--- a/test/mjsunit/runtime-gen/weakcollectioninitialize.js
+++ /dev/null
@@ -1,5 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _weak_collection = new WeakMap();
-%WeakCollectionInitialize(_weak_collection);
diff --git a/test/mjsunit/runtime-gen/weakcollectionset.js b/test/mjsunit/runtime-gen/weakcollectionset.js
deleted file mode 100644
index 037157a..0000000
--- a/test/mjsunit/runtime-gen/weakcollectionset.js
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony
-var _weak_collection = new WeakMap();
-var _key = new Object();
-var _value = new Object();
-%WeakCollectionSet(_weak_collection, _key, _value);
diff --git a/test/mjsunit/sin-cos.js b/test/mjsunit/sin-cos.js
index 02ae57b..fb6f858 100644
--- a/test/mjsunit/sin-cos.js
+++ b/test/mjsunit/sin-cos.js
@@ -157,8 +157,8 @@
 assertEquals(1, Math.cos("0x00000"));
 assertTrue(isNaN(Math.sin(Infinity)));
 assertTrue(isNaN(Math.cos("-Infinity")));
-assertEquals("Infinity", String(Math.tan(Math.PI/2)));
-assertEquals("-Infinity", String(Math.tan(-Math.PI/2)));
+assertTrue(Math.tan(Math.PI/2) > 1e16);
+assertTrue(Math.tan(-Math.PI/2) < -1e16);
 assertEquals("-Infinity", String(1/Math.sin("-0")));
 
 // Assert that the remainder after division by pi is reasonably precise.
@@ -185,3 +185,98 @@
 assertFalse(isNaN(Math.cos(1.57079632679489700)));
 assertFalse(isNaN(Math.cos(-1e-100)));
 assertFalse(isNaN(Math.cos(-1e-323)));
+
+// Tests for specific values expected from the fdlibm implementation.
+
+var two_32 = Math.pow(2, -32);
+var two_28 = Math.pow(2, -28);
+
+// Tests for Math.sin for |x| < pi/4
+assertEquals(Infinity, 1/Math.sin(+0.0));
+assertEquals(-Infinity, 1/Math.sin(-0.0));
+// sin(x) = x for x < 2^-27
+assertEquals(two_32, Math.sin(two_32));
+assertEquals(-two_32, Math.sin(-two_32));
+// sin(pi/8) = sqrt(sqrt(2)-1)/2^(3/4)
+assertEquals(0.3826834323650898, Math.sin(Math.PI/8));
+assertEquals(-0.3826834323650898, -Math.sin(Math.PI/8));
+
+// Tests for Math.cos for |x| < pi/4
+// cos(x) = 1 for |x| < 2^-27
+assertEquals(1, Math.cos(two_32));
+assertEquals(1, Math.cos(-two_32));
+// Test KERNELCOS for |x| < 0.3.
+// cos(pi/20) = sqrt(sqrt(2)*sqrt(sqrt(5)+5)+4)/2^(3/2)
+assertEquals(0.9876883405951378, Math.cos(Math.PI/20));
+// Test KERNELCOS for x ~= 0.78125
+assertEquals(0.7100335477927638, Math.cos(0.7812504768371582));
+assertEquals(0.7100338835660797, Math.cos(0.78125));
+// Test KERNELCOS for |x| > 0.3.
+// cos(pi/8) = sqrt(sqrt(2)+1)/2^(3/4)
+assertEquals(0.9238795325112867, Math.cos(Math.PI/8));
+// Test KERNELTAN for |x| < 0.67434.
+assertEquals(0.9238795325112867, Math.cos(-Math.PI/8));
+
+// Tests for Math.tan for |x| < pi/4
+assertEquals(Infinity, 1/Math.tan(0.0));
+assertEquals(-Infinity, 1/Math.tan(-0.0));
+// tan(x) = x for |x| < 2^-28
+assertEquals(two_32, Math.tan(two_32));
+assertEquals(-two_32, Math.tan(-two_32));
+// Test KERNELTAN for |x| > 0.67434.
+assertEquals(0.8211418015898941, Math.tan(11/16));
+assertEquals(-0.8211418015898941, Math.tan(-11/16));
+assertEquals(0.41421356237309503, Math.tan(Math.PI / 8));
+// crbug/427468
+assertEquals(0.7993357819992383, Math.tan(0.6743358));
+
+// Tests for Math.sin.
+assertEquals(0.479425538604203, Math.sin(0.5));
+assertEquals(-0.479425538604203, Math.sin(-0.5));
+assertEquals(1, Math.sin(Math.PI/2));
+assertEquals(-1, Math.sin(-Math.PI/2));
+// Test that Math.sin(Math.PI) != 0 since Math.PI is not exact.
+assertEquals(1.2246467991473532e-16, Math.sin(Math.PI));
+assertEquals(-7.047032979958965e-14, Math.sin(2200*Math.PI));
+// Test Math.sin for various phases.
+assertEquals(-0.7071067811865477, Math.sin(7/4 * Math.PI));
+assertEquals(0.7071067811865474, Math.sin(9/4 * Math.PI));
+assertEquals(0.7071067811865483, Math.sin(11/4 * Math.PI));
+assertEquals(-0.7071067811865479, Math.sin(13/4 * Math.PI));
+assertEquals(-3.2103381051568376e-11, Math.sin(1048576/4 * Math.PI));
+
+// Tests for Math.cos.
+assertEquals(1, Math.cos(two_28));
+// Cover different code paths in KERNELCOS.
+assertEquals(0.9689124217106447, Math.cos(0.25));
+assertEquals(0.8775825618903728, Math.cos(0.5));
+assertEquals(0.7073882691671998, Math.cos(0.785));
+// Test that Math.cos(Math.PI/2) != 0 since Math.PI is not exact.
+assertEquals(6.123233995736766e-17, Math.cos(Math.PI/2));
+// Test Math.cos for various phases.
+assertEquals(0.7071067811865474, Math.cos(7/4 * Math.PI));
+assertEquals(0.7071067811865477, Math.cos(9/4 * Math.PI));
+assertEquals(-0.7071067811865467, Math.cos(11/4 * Math.PI));
+assertEquals(-0.7071067811865471, Math.cos(13/4 * Math.PI));
+assertEquals(0.9367521275331447, Math.cos(1000000));
+assertEquals(-3.435757038074824e-12, Math.cos(1048575/2 * Math.PI));
+
+// Tests for Math.tan.
+assertEquals(two_28, Math.tan(two_28));
+// Test that  Math.tan(Math.PI/2) != Infinity since Math.PI is not exact.
+assertEquals(1.633123935319537e16, Math.tan(Math.PI/2));
+// Cover different code paths in KERNELTAN (tangent and cotangent)
+assertEquals(0.5463024898437905, Math.tan(0.5));
+assertEquals(2.0000000000000027, Math.tan(1.107148717794091));
+assertEquals(-1.0000000000000004, Math.tan(7/4*Math.PI));
+assertEquals(0.9999999999999994, Math.tan(9/4*Math.PI));
+assertEquals(-6.420676210313675e-11, Math.tan(1048576/2*Math.PI));
+assertEquals(2.910566692924059e11, Math.tan(1048575/2*Math.PI));
+
+// Test Hayne-Panek reduction.
+assertEquals(0.377820109360752e0, Math.sin(Math.pow(2, 120)));
+assertEquals(-0.9258790228548379e0, Math.cos(Math.pow(2, 120)));
+assertEquals(-0.40806638884180424e0, Math.tan(Math.pow(2, 120)));
+assertEquals(-0.377820109360752e0, Math.sin(-Math.pow(2, 120)));
+assertEquals(-0.9258790228548379e0, Math.cos(-Math.pow(2, 120)));
+assertEquals(0.40806638884180424e0, Math.tan(-Math.pow(2, 120)));
diff --git a/test/mjsunit/stack-traces-overflow.js b/test/mjsunit/stack-traces-overflow.js
index 7722e93..e20c609 100644
--- a/test/mjsunit/stack-traces-overflow.js
+++ b/test/mjsunit/stack-traces-overflow.js
@@ -25,6 +25,8 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+// Flags: --stack-size=100
+
 function rec1(a) { rec1(a+1); }
 function rec2(a) { rec3(a+1); }
 function rec3(a) { rec2(a+1); }
@@ -61,8 +63,8 @@
 function testErrorPrototype(prototype) {
   var object = {};
   object.__proto__ = prototype;
-  object.stack = "123";
-  assertEquals("123", object.stack);
+  object.stack = "123";  // Overwriting stack property fails.
+  assertEquals(prototype.stack, object.stack);
   assertTrue("123" != prototype.stack);
 }
 
@@ -106,11 +108,28 @@
   assertEquals(1, e.stack.split('\n').length);
 }
 
+// A limit outside the range of integers.
+Error.stackTraceLimit = 1e12;
+try {
+  rec1(0);
+} catch (e) {
+  assertTrue(e.stack.split('\n').length > 100);
+}
+
+Error.stackTraceLimit = Infinity;
+try {
+  rec1(0);
+} catch (e) {
+  assertTrue(e.stack.split('\n').length > 100);
+}
+
 Error.stackTraceLimit = "not a number";
 try {
   rec1(0);
 } catch (e) {
   assertEquals(undefined, e.stack);
+  e.stack = "abc";
+  assertEquals("abc", e.stack);
 }
 
 Error.stackTraceLimit = 3;
diff --git a/test/mjsunit/stack-traces.js b/test/mjsunit/stack-traces.js
index 46a16eb..f80a627 100644
--- a/test/mjsunit/stack-traces.js
+++ b/test/mjsunit/stack-traces.js
@@ -331,3 +331,23 @@
 new Error().stack;
 
 assertEquals("custom", Error.prepareStackTrace);
+
+// Check that the formatted stack trace can be set to undefined.
+error = new Error();
+error.stack = undefined;
+assertEquals(undefined, error.stack);
+
+// Check that the stack trace accessors are not forcibly set.
+var my_error = {};
+Object.freeze(my_error);
+assertThrows(function() { Error.captureStackTrace(my_error); });
+
+my_error = {};
+Object.preventExtensions(my_error);
+assertThrows(function() { Error.captureStackTrace(my_error); });
+
+var fake_error = {};
+my_error = new Error();
+var stolen_getter = Object.getOwnPropertyDescriptor(my_error, 'stack').get;
+Object.defineProperty(fake_error, 'stack', { get: stolen_getter });
+assertEquals(undefined, fake_error.stack);
diff --git a/test/mjsunit/string-external-cached.js b/test/mjsunit/string-external-cached.js
index 6e24285..cd368f6 100644
--- a/test/mjsunit/string-external-cached.js
+++ b/test/mjsunit/string-external-cached.js
@@ -68,8 +68,8 @@
     externalizeString(ascii, false);
     externalizeString(twobyte, true);
   } catch (ex) { }
-  assertTrue(isAsciiString(ascii));
-  assertFalse(isAsciiString(twobyte));
+  assertTrue(isOneByteString(ascii));
+  assertFalse(isOneByteString(twobyte));
   var ascii_slice = ascii.slice(1,-1);
   var twobyte_slice = twobyte.slice(2,-1);
   var ascii_cons = ascii + ascii;
@@ -97,18 +97,18 @@
     externalizeString(long_ascii, false);
     externalizeString(short_twobyte, true);
     externalizeString(long_twobyte, true);
-    assertTrue(isAsciiString(short_asii) && isAsciiString(long_ascii));
-    assertFalse(isAsciiString(short_twobyte) || isAsciiString(long_twobyte));
+    assertTrue(isOneByteString(short_asii) && isOneByteString(long_ascii));
+    assertFalse(isOneByteString(short_twobyte) || isOneByteString(long_twobyte));
   } catch (ex) { }
   assertEquals("E=MCsquared", short_ascii + long_ascii);
-  assertTrue(isAsciiString(short_ascii + long_ascii));
+  assertTrue(isOneByteString(short_ascii + long_ascii));
   assertEquals("MCsquaredE=", long_ascii + short_ascii);
   assertEquals("E\u1234MCsquare\u1234", short_twobyte + long_twobyte);
-  assertFalse(isAsciiString(short_twobyte + long_twobyte));
+  assertFalse(isOneByteString(short_twobyte + long_twobyte));
   assertEquals("E=MCsquared", "E=" + long_ascii);
   assertEquals("E\u1234MCsquared", short_twobyte + "MCsquared");
   assertEquals("E\u1234MCsquared", short_twobyte + long_ascii);
-  assertFalse(isAsciiString(short_twobyte + long_ascii));
+  assertFalse(isOneByteString(short_twobyte + long_ascii));
 }
 
 // Run the test many times to ensure IC-s don't break things.
diff --git a/test/mjsunit/string-externalize.js b/test/mjsunit/string-externalize.js
index d52a7e2..39cc124 100644
--- a/test/mjsunit/string-externalize.js
+++ b/test/mjsunit/string-externalize.js
@@ -36,27 +36,27 @@
   for (var i = 0; i < size; i++) {
       str += String.fromCharCode(i & 0x7f);
   }
-  assertTrue(isAsciiString(str));
+  assertTrue(isOneByteString(str));
 
-  var twoByteExternalWithAsciiData =
+  var twoByteExternalWithOneByteData =
       "AA" + (function() { return "A"; })();
-  externalizeString(twoByteExternalWithAsciiData, true /* force two-byte */);
-  assertFalse(isAsciiString(twoByteExternalWithAsciiData));
+  externalizeString(twoByteExternalWithOneByteData, true /* force two-byte */);
+  assertFalse(isOneByteString(twoByteExternalWithOneByteData));
 
   var realTwoByteExternalString =
       "\u1234\u1234\u1234\u1234" + (function() { return "\u1234"; })();
   externalizeString(realTwoByteExternalString);
-  assertFalse(isAsciiString(realTwoByteExternalString));
+  assertFalse(isOneByteString(realTwoByteExternalString));
 
-  assertTrue(isAsciiString(["a", twoByteExternalWithAsciiData].join("")));
+  assertTrue(isOneByteString(["a", twoByteExternalWithOneByteData].join("")));
 
   // Appending a two-byte string that contains only ascii chars should
   // still produce an ascii cons.
-  var str1 = str + twoByteExternalWithAsciiData;
-  assertTrue(isAsciiString(str1));
+  var str1 = str + twoByteExternalWithOneByteData;
+  assertTrue(isOneByteString(str1));
 
   // Force flattening of the string.
-  var old_length = str1.length - twoByteExternalWithAsciiData.length;
+  var old_length = str1.length - twoByteExternalWithOneByteData.length;
   for (var i = 0; i < old_length; i++) {
     assertEquals(String.fromCharCode(i & 0x7f), str1[i]);
   }
@@ -65,16 +65,16 @@
   }
 
   // Flattened string should still be ascii.
-  assertTrue(isAsciiString(str1));
+  assertTrue(isOneByteString(str1));
 
   // Lower-casing an ascii string should produce ascii.
-  assertTrue(isAsciiString(str1.toLowerCase()));
+  assertTrue(isOneByteString(str1.toLowerCase()));
 
-  assertFalse(isAsciiString(["a", realTwoByteExternalString].join("")));
+  assertFalse(isOneByteString(["a", realTwoByteExternalString].join("")));
 
   // Appending a real two-byte string should produce a two-byte cons.
   var str2 = str + realTwoByteExternalString;
-  assertFalse(isAsciiString(str2));
+  assertFalse(isOneByteString(str2));
 
   // Force flattening of the string.
   old_length = str2.length - realTwoByteExternalString.length;
@@ -86,7 +86,7 @@
   }
 
   // Flattened string should still be two-byte.
-  assertFalse(isAsciiString(str2));
+  assertFalse(isOneByteString(str2));
 }
 
 // Run the test many times to ensure IC-s don't break things.
diff --git a/test/mjsunit/string-match.js b/test/mjsunit/string-match.js
index 202396d..7689652 100644
--- a/test/mjsunit/string-match.js
+++ b/test/mjsunit/string-match.js
@@ -66,7 +66,6 @@
   assertEquals(undefined, RegExp.$10, name + "-nocapture-10");
 
   assertEquals(input, RegExp.input, name + "-input");
-  assertEquals(input, RegExp.$input, name + "-$input");
   assertEquals(input, RegExp.$_, name + "-$_");
 
   assertEquals(preMatch, RegExp["$`"], name + "-$`");
diff --git a/test/mjsunit/string-natives.js b/test/mjsunit/string-natives.js
index 7a9009b..40fe9c6 100644
--- a/test/mjsunit/string-natives.js
+++ b/test/mjsunit/string-natives.js
@@ -29,27 +29,27 @@
 
 function test() {
   var s1 = %NewString(26, true);
-  for (i = 0; i < 26; i++) %_OneByteSeqStringSetChar(s1, i, 65);
+  for (i = 0; i < 26; i++) %_OneByteSeqStringSetChar(i, 65, s1);
   assertEquals("AAAAAAAAAAAAAAAAAAAAAAAAAA", s1);
-  %_OneByteSeqStringSetChar(s1, 25, 66);
+  %_OneByteSeqStringSetChar(25, 66, s1);
   assertEquals("AAAAAAAAAAAAAAAAAAAAAAAAAB", s1);
-  for (i = 0; i < 26; i++) %_OneByteSeqStringSetChar(s1, i, i+65);
+  for (i = 0; i < 26; i++) %_OneByteSeqStringSetChar(i, i+65, s1);
   assertEquals("ABCDEFGHIJKLMNOPQRSTUVWXYZ", s1);
   s1 = %TruncateString(s1, 13);
   assertEquals("ABCDEFGHIJKLM", s1);
 
   var s2 = %NewString(26, false);
-  for (i = 0; i < 26; i++) %_TwoByteSeqStringSetChar(s2, i, 65);
+  for (i = 0; i < 26; i++) %_TwoByteSeqStringSetChar(i, 65, s2);
   assertEquals("AAAAAAAAAAAAAAAAAAAAAAAAAA", s2);
-  %_TwoByteSeqStringSetChar(s2, 25, 66);
+  %_TwoByteSeqStringSetChar(25, 66, s2);
   assertEquals("AAAAAAAAAAAAAAAAAAAAAAAAAB", s2);
-  for (i = 0; i < 26; i++) %_TwoByteSeqStringSetChar(s2, i, i+65);
+  for (i = 0; i < 26; i++) %_TwoByteSeqStringSetChar(i, i+65, s2);
   assertEquals("ABCDEFGHIJKLMNOPQRSTUVWXYZ", s2);
   s2 = %TruncateString(s2, 13);
   assertEquals("ABCDEFGHIJKLM", s2);
 
   var s3 = %NewString(26, false);
-  for (i = 0; i < 26; i++) %_TwoByteSeqStringSetChar(s3, i, i+1000);
+  for (i = 0; i < 26; i++) %_TwoByteSeqStringSetChar(i, i+1000, s3);
   for (i = 0; i < 26; i++) assertEquals(s3[i], String.fromCharCode(i+1000));
 
   var a = [];
diff --git a/test/mjsunit/string-oom-concat.js b/test/mjsunit/string-oom-concat.js
index 9529c89..0b35021 100644
--- a/test/mjsunit/string-oom-concat.js
+++ b/test/mjsunit/string-oom-concat.js
@@ -7,6 +7,7 @@
   for (var i = 0; i < 100; i++) {
     a += a;
   }
+  return a;
 }
 
 assertThrows(concat, RangeError);
diff --git a/test/mjsunit/string-slices.js b/test/mjsunit/string-slices.js
index 2fec04b..c3f889b 100644
--- a/test/mjsunit/string-slices.js
+++ b/test/mjsunit/string-slices.js
@@ -197,9 +197,9 @@
 var b = "23456789qwertyuiopasdfghjklzxcvbn"
 assertEquals(a.slice(1,-1), b);
 
-assertTrue(isAsciiString(a));
+assertTrue(isOneByteString(a));
 externalizeString(a, true);
-assertFalse(isAsciiString(a));
+assertFalse(isOneByteString(a));
 
 assertEquals(a.slice(1,-1), b);
 assertTrue(/3456789qwe/.test(a));
diff --git a/test/mjsunit/tools/tickprocessor-test.default b/test/mjsunit/tools/tickprocessor-test.default
index 8ab17c3..3e01532 100644
--- a/test/mjsunit/tools/tickprocessor-test.default
+++ b/test/mjsunit/tools/tickprocessor-test.default
@@ -1,13 +1,9 @@
 Statistical profiling result from v8.log, (13 ticks, 2 unaccounted, 0 excluded).
 
- [Unknown]:
-   ticks  total  nonlib   name
-      2   15.4%
-
  [Shared libraries]:
    ticks  total  nonlib   name
-      3   23.1%    0.0%  /lib32/libm-2.7.so
-      1    7.7%    0.0%  ffffe000-fffff000
+      3   23.1%          /lib32/libm-2.7.so
+      1    7.7%          ffffe000-fffff000
 
  [JavaScript]:
    ticks  total  nonlib   name
@@ -20,9 +16,13 @@
       1    7.7%   11.1%  v8::internal::HashTable<v8::internal::StringDictionaryShape, v8::internal::String*>::FindEntry(v8::internal::String*)
       1    7.7%   11.1%  exp
 
- [GC]:
+ [Summary]:
    ticks  total  nonlib   name
-      0    0.0%
+      1    7.7%   11.1%  JavaScript
+      5   38.5%   55.6%  C++
+      0    0.0%    0.0%  GC
+      4   30.8%          Shared libraries
+      2   15.4%          Unaccounted
 
  [Bottom up (heavy) profile]:
   Note: percentage shows a share of a particular caller in the total
diff --git a/test/mjsunit/tools/tickprocessor-test.func-info b/test/mjsunit/tools/tickprocessor-test.func-info
index a66b90f..c93b6ec 100644
--- a/test/mjsunit/tools/tickprocessor-test.func-info
+++ b/test/mjsunit/tools/tickprocessor-test.func-info
@@ -11,9 +11,12 @@
  [C++]:
    ticks  total  nonlib   name
 
- [GC]:
+ [Summary]:
    ticks  total  nonlib   name
-      0    0.0%
+      3  100.0%  100.0%  JavaScript
+      0    0.0%    0.0%  C++
+      0    0.0%    0.0%  GC
+      0    0.0%          Shared libraries
 
  [Bottom up (heavy) profile]:
   Note: percentage shows a share of a particular caller in the total
diff --git a/test/mjsunit/tools/tickprocessor-test.gc-state b/test/mjsunit/tools/tickprocessor-test.gc-state
index 40f90db..6b1a6a3 100644
--- a/test/mjsunit/tools/tickprocessor-test.gc-state
+++ b/test/mjsunit/tools/tickprocessor-test.gc-state
@@ -9,9 +9,12 @@
  [C++]:
    ticks  total  nonlib   name
 
- [GC]:
+ [Summary]:
    ticks  total  nonlib   name
-      0    0.0%
+      0    0.0%    0.0%  JavaScript
+      0    0.0%    0.0%  C++
+      0    0.0%    0.0%  GC
+      0    0.0%          Shared libraries
 
  [Bottom up (heavy) profile]:
   Note: percentage shows a share of a particular caller in the total
diff --git a/test/mjsunit/tools/tickprocessor-test.ignore-unknown b/test/mjsunit/tools/tickprocessor-test.ignore-unknown
index 677da9c..de70527 100644
--- a/test/mjsunit/tools/tickprocessor-test.ignore-unknown
+++ b/test/mjsunit/tools/tickprocessor-test.ignore-unknown
@@ -2,8 +2,8 @@
 
  [Shared libraries]:
    ticks  total  nonlib   name
-      3   27.3%    0.0%  /lib32/libm-2.7.so
-      1    9.1%    0.0%  ffffe000-fffff000
+      3   27.3%          /lib32/libm-2.7.so
+      1    9.1%          ffffe000-fffff000
 
  [JavaScript]:
    ticks  total  nonlib   name
@@ -16,9 +16,12 @@
       1    9.1%   14.3%  v8::internal::HashTable<v8::internal::StringDictionaryShape, v8::internal::String*>::FindEntry(v8::internal::String*)
       1    9.1%   14.3%  exp
 
- [GC]:
+ [Summary]:
    ticks  total  nonlib   name
-      0    0.0%
+      1    9.1%   14.3%  JavaScript
+      5   45.5%   71.4%  C++
+      0    0.0%    0.0%  GC
+      4   36.4%          Shared libraries
 
  [Bottom up (heavy) profile]:
   Note: percentage shows a share of a particular caller in the total
diff --git a/test/mjsunit/tools/tickprocessor-test.separate-ic b/test/mjsunit/tools/tickprocessor-test.separate-ic
index 2d49f73..119ccbe 100644
--- a/test/mjsunit/tools/tickprocessor-test.separate-ic
+++ b/test/mjsunit/tools/tickprocessor-test.separate-ic
@@ -1,13 +1,9 @@
 Statistical profiling result from v8.log, (13 ticks, 2 unaccounted, 0 excluded).
 
- [Unknown]:
-   ticks  total  nonlib   name
-      2   15.4%
-
  [Shared libraries]:
    ticks  total  nonlib   name
-      3   23.1%    0.0%  /lib32/libm-2.7.so
-      1    7.7%    0.0%  ffffe000-fffff000
+      3   23.1%          /lib32/libm-2.7.so
+      1    7.7%          ffffe000-fffff000
 
  [JavaScript]:
    ticks  total  nonlib   name
@@ -22,9 +18,13 @@
       1    7.7%   11.1%  v8::internal::HashTable<v8::internal::StringDictionaryShape, v8::internal::String*>::FindEntry(v8::internal::String*)
       1    7.7%   11.1%  exp
 
- [GC]:
+ [Summary]:
    ticks  total  nonlib   name
-      0    0.0%
+      3   23.1%   33.3%  JavaScript
+      5   38.5%   55.6%  C++
+      0    0.0%    0.0%  GC
+      4   30.8%          Shared libraries
+      2   15.4%          Unaccounted
 
  [Bottom up (heavy) profile]:
   Note: percentage shows a share of a particular caller in the total
diff --git a/test/mjsunit/tools/tickprocessor.js b/test/mjsunit/tools/tickprocessor.js
index 78a7c43..f460d34 100644
--- a/test/mjsunit/tools/tickprocessor.js
+++ b/test/mjsunit/tools/tickprocessor.js
@@ -25,10 +25,6 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// This test case is not compatible with optimization stress because the
-// generated profile will look vastly different when more is optimized.
-// Flags: --nostress-opt --noalways-opt
-
 // Load implementations from <project root>/tools.
 // Files: tools/splaytree.js tools/codemap.js tools/csvparser.js
 // Files: tools/consarray.js tools/profile.js tools/profile_view.js
diff --git a/test/mjsunit/value-wrapper-accessor.js b/test/mjsunit/value-wrapper-accessor.js
index f951456..79db407 100644
--- a/test/mjsunit/value-wrapper-accessor.js
+++ b/test/mjsunit/value-wrapper-accessor.js
@@ -77,20 +77,14 @@
     %OptimizeFunctionOnNextCall(nonstrict);
     result = undefined;
     nonstrict(object);
-    // TODO(1475): Support storing to primitive values.
-    // This should return "object" once storing to primitive values is
-    // supported.
-    assertEquals("undefined", typeof result);
+    assertEquals("object", typeof result);
 
     strict(object);
     strict(object);
     %OptimizeFunctionOnNextCall(strict);
     result = undefined;
     strict(object);
-    // TODO(1475): Support storing to primitive values.
-    // This should return "object" once storing to primitive values is
-    // supported.
-    assertEquals("undefined", typeof result);
+    assertEquals(object, result);
   })();
 }
 
diff --git a/test/mjsunit/with-readonly.js b/test/mjsunit/with-readonly.js
index 29982b3..4358334 100644
--- a/test/mjsunit/with-readonly.js
+++ b/test/mjsunit/with-readonly.js
@@ -27,8 +27,6 @@
 
 // Test that readonly variables are treated correctly.
 
-// Flags: --es5_readonly
-
 // Create an object with a read-only length property in the prototype
 // chain by putting the string split function in the prototype chain.
 var o = {};
diff --git a/test/mozilla/mozilla.status b/test/mozilla/mozilla.status
index 9e68e5f..e9f58c6 100644
--- a/test/mozilla/mozilla.status
+++ b/test/mozilla/mozilla.status
@@ -51,6 +51,17 @@
   'ecma_3/Number/15.7.4.3-02': [PASS, FAIL],
   'ecma_3/Date/15.9.5.5-02': [PASS, FAIL],
 
+  ################## TURBO-FAN FAILURES ###################
+
+  # TODO(turbofan): These are all covered by mjsunit as well. Enable them once
+  # we pass 'mjsunit' and 'webkit' with TurboFan.
+  'js1_4/Functions/function-001': [PASS, NO_VARIANTS],
+  'js1_5/Regress/regress-396684': [PASS, NO_VARIANTS],
+  'js1_5/Regress/regress-80981': [PASS, NO_VARIANTS],
+
+  # TODO(turbofan): Large switch statements crash.
+  'js1_5/Regress/regress-398085-01': [PASS, NO_VARIANTS],
+
   ##################### SKIPPED TESTS #####################
 
   # This test checks that we behave properly in an out-of-memory
@@ -170,7 +181,7 @@
   'js1_5/String/regress-56940-02': [PASS, FAIL],
   'js1_5/String/regress-157334-01': [PASS, FAIL],
   'js1_5/String/regress-322772': [PASS, FAIL],
-  'js1_5/Array/regress-99120-01': [PASS, FAIL],
+  'js1_5/Array/regress-99120-01': [PASS, FAIL, NO_VARIANTS],
   'js1_5/Array/regress-99120-02': [PASS, FAIL],
   'js1_5/Regress/regress-347306-01': [PASS, FAIL],
   'js1_5/Regress/regress-416628': [PASS, FAIL, ['mode == debug', TIMEOUT, NO_VARIANTS]],
@@ -642,10 +653,6 @@
   # We do not correctly handle assignments within "with"
   'ecma_3/Statements/12.10-01': [FAIL],
 
-  # We do not throw an exception when a const is redeclared.
-  # (We only fail section 1 of the test.)
-  'js1_5/Regress/regress-103602': [FAIL],
-
   ##################### MOZILLA EXTENSION TESTS #####################
 
   'ecma/extensions/15.1.2.1-1': [FAIL_OK],
@@ -850,7 +857,6 @@
   'js1_5/Regress/regress-404755': [SKIP],
   'js1_5/Regress/regress-451322': [SKIP],
 
-
   # BUG(1040): Allow this test to timeout.
   'js1_5/GC/regress-203278-2': [PASS, TIMEOUT, NO_VARIANTS],
 }],  # 'arch == arm or arch == arm64'
@@ -859,10 +865,13 @@
 ['arch ==  arm64', {
   # BUG(v8:3152): Runs out of stack in debug mode.
   'js1_5/extensions/regress-355497': [FAIL_OK, ['mode == debug', SKIP]],
+
+  # BUG(v8:3503): Times out in debug mode.
+  'js1_5/Regress/regress-280769-2': [PASS, FAIL, ['mode == debug', SKIP]],
 }],  # 'arch ==  arm64'
 
 
-['arch == mipsel', {
+['arch == mipsel or arch == mips64el', {
 
   # BUG(3251229): Times out when running new crankshaft test script.
   'ecma_3/RegExp/regress-311414': [SKIP],
@@ -879,7 +888,7 @@
 
   # BUG(1040): Allow this test to timeout.
   'js1_5/GC/regress-203278-2': [PASS, TIMEOUT, NO_VARIANTS],
-}],  # 'arch == mipsel'
+}],  # 'arch == mipsel or arch == mips64el'
 
 ['arch == mips', {
 
diff --git a/test/perf-test/Collections/Collections.json b/test/perf-test/Collections/Collections.json
new file mode 100644
index 0000000..bf735c0
--- /dev/null
+++ b/test/perf-test/Collections/Collections.json
@@ -0,0 +1,15 @@
+{
+  "path": ["."],
+  "main": "run.js",
+  "flags": ["--harmony-collections"],
+  "run_count": 5,
+  "units": "score",
+  "results_regexp": "^%s\\-Collections\\(Score\\): (.+)$",
+  "total": true,
+  "tests": [
+    {"name": "Map"},
+    {"name": "Set"},
+    {"name": "WeakMap"},
+    {"name": "WeakSet"}
+  ]
+}
diff --git a/test/perf-test/Collections/base.js b/test/perf-test/Collections/base.js
new file mode 100644
index 0000000..b0ce40b
--- /dev/null
+++ b/test/perf-test/Collections/base.js
@@ -0,0 +1,367 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+// Performance.now is used in latency benchmarks, the fallback is Date.now.
+var performance = performance || {};
+performance.now = (function() {
+  return performance.now       ||
+         performance.mozNow    ||
+         performance.msNow     ||
+         performance.oNow      ||
+         performance.webkitNow ||
+         Date.now;
+})();
+
+// Simple framework for running the benchmark suites and
+// computing a score based on the timing measurements.
+
+
+// A benchmark has a name (string) and a function that will be run to
+// do the performance measurement. The optional setup and tearDown
+// arguments are functions that will be invoked before and after
+// running the benchmark, but the running time of these functions will
+// not be accounted for in the benchmark score.
+function Benchmark(name, doWarmup, doDeterministic, deterministicIterations,
+                   run, setup, tearDown, rmsResult, minIterations) {
+  this.name = name;
+  this.doWarmup = doWarmup;
+  this.doDeterministic = doDeterministic;
+  this.deterministicIterations = deterministicIterations;
+  this.run = run;
+  this.Setup = setup ? setup : function() { };
+  this.TearDown = tearDown ? tearDown : function() { };
+  this.rmsResult = rmsResult ? rmsResult : null;
+  this.minIterations = minIterations ? minIterations : 32;
+}
+
+
+// Benchmark results hold the benchmark and the measured time used to
+// run the benchmark. The benchmark score is computed later once a
+// full benchmark suite has run to completion. If latency is set to 0
+// then there is no latency score for this benchmark.
+function BenchmarkResult(benchmark, time, latency) {
+  this.benchmark = benchmark;
+  this.time = time;
+  this.latency = latency;
+}
+
+
+// Automatically convert results to numbers. Used by the geometric
+// mean computation.
+BenchmarkResult.prototype.valueOf = function() {
+  return this.time;
+}
+
+
+// Suites of benchmarks consist of a name and the set of benchmarks in
+// addition to the reference timing that the final score will be based
+// on. This way, all scores are relative to a reference run and higher
+// scores implies better performance.
+function BenchmarkSuite(name, reference, benchmarks) {
+  this.name = name;
+  this.reference = reference;
+  this.benchmarks = benchmarks;
+  BenchmarkSuite.suites.push(this);
+}
+
+
+// Keep track of all declared benchmark suites.
+BenchmarkSuite.suites = [];
+
+// Scores are not comparable across versions. Bump the version if
+// you're making changes that will affect that scores, e.g. if you add
+// a new benchmark or change an existing one.
+BenchmarkSuite.version = '1';
+
+
+// Defines global benchsuite running mode that overrides benchmark suite
+// behavior. Intended to be set by the benchmark driver. Undefined
+// values here allow a benchmark to define behaviour itself.
+BenchmarkSuite.config = {
+  doWarmup: undefined,
+  doDeterministic: undefined
+};
+
+
+// Override the alert function to throw an exception instead.
+alert = function(s) {
+  throw "Alert called with argument: " + s;
+};
+
+
+// To make the benchmark results predictable, we replace Math.random
+// with a 100% deterministic alternative.
+BenchmarkSuite.ResetRNG = function() {
+  Math.random = (function() {
+    var seed = 49734321;
+    return function() {
+      // Robert Jenkins' 32 bit integer hash function.
+      seed = ((seed + 0x7ed55d16) + (seed << 12))  & 0xffffffff;
+      seed = ((seed ^ 0xc761c23c) ^ (seed >>> 19)) & 0xffffffff;
+      seed = ((seed + 0x165667b1) + (seed << 5))   & 0xffffffff;
+      seed = ((seed + 0xd3a2646c) ^ (seed << 9))   & 0xffffffff;
+      seed = ((seed + 0xfd7046c5) + (seed << 3))   & 0xffffffff;
+      seed = ((seed ^ 0xb55a4f09) ^ (seed >>> 16)) & 0xffffffff;
+      return (seed & 0xfffffff) / 0x10000000;
+    };
+  })();
+}
+
+
+// Runs all registered benchmark suites and optionally yields between
+// each individual benchmark to avoid running for too long in the
+// context of browsers. Once done, the final score is reported to the
+// runner.
+BenchmarkSuite.RunSuites = function(runner, skipBenchmarks) {
+  skipBenchmarks = typeof skipBenchmarks === 'undefined' ? [] : skipBenchmarks;
+  var continuation = null;
+  var suites = BenchmarkSuite.suites;
+  var length = suites.length;
+  BenchmarkSuite.scores = [];
+  var index = 0;
+  function RunStep() {
+    while (continuation || index < length) {
+      if (continuation) {
+        continuation = continuation();
+      } else {
+        var suite = suites[index++];
+        if (runner.NotifyStart) runner.NotifyStart(suite.name);
+        if (skipBenchmarks.indexOf(suite.name) > -1) {
+          suite.NotifySkipped(runner);
+        } else {
+          continuation = suite.RunStep(runner);
+        }
+      }
+      if (continuation && typeof window != 'undefined' && window.setTimeout) {
+        window.setTimeout(RunStep, 25);
+        return;
+      }
+    }
+
+    // show final result
+    if (runner.NotifyScore) {
+      var score = BenchmarkSuite.GeometricMean(BenchmarkSuite.scores);
+      var formatted = BenchmarkSuite.FormatScore(100 * score);
+      runner.NotifyScore(formatted);
+    }
+  }
+  RunStep();
+}
+
+
+// Counts the total number of registered benchmarks. Useful for
+// showing progress as a percentage.
+BenchmarkSuite.CountBenchmarks = function() {
+  var result = 0;
+  var suites = BenchmarkSuite.suites;
+  for (var i = 0; i < suites.length; i++) {
+    result += suites[i].benchmarks.length;
+  }
+  return result;
+}
+
+
+// Computes the geometric mean of a set of numbers.
+BenchmarkSuite.GeometricMean = function(numbers) {
+  var log = 0;
+  for (var i = 0; i < numbers.length; i++) {
+    log += Math.log(numbers[i]);
+  }
+  return Math.pow(Math.E, log / numbers.length);
+}
+
+
+// Computes the geometric mean of a set of throughput time measurements.
+BenchmarkSuite.GeometricMeanTime = function(measurements) {
+  var log = 0;
+  for (var i = 0; i < measurements.length; i++) {
+    log += Math.log(measurements[i].time);
+  }
+  return Math.pow(Math.E, log / measurements.length);
+}
+
+
+// Computes the geometric mean of a set of rms measurements.
+BenchmarkSuite.GeometricMeanLatency = function(measurements) {
+  var log = 0;
+  var hasLatencyResult = false;
+  for (var i = 0; i < measurements.length; i++) {
+    if (measurements[i].latency != 0) {
+      log += Math.log(measurements[i].latency);
+      hasLatencyResult = true;
+    }
+  }
+  if (hasLatencyResult) {
+    return Math.pow(Math.E, log / measurements.length);
+  } else {
+    return 0;
+  }
+}
+
+
+// Converts a score value to a string with at least three significant
+// digits.
+BenchmarkSuite.FormatScore = function(value) {
+  if (value > 100) {
+    return value.toFixed(0);
+  } else {
+    return value.toPrecision(3);
+  }
+}
+
+// Notifies the runner that we're done running a single benchmark in
+// the benchmark suite. This can be useful to report progress.
+BenchmarkSuite.prototype.NotifyStep = function(result) {
+  this.results.push(result);
+  if (this.runner.NotifyStep) this.runner.NotifyStep(result.benchmark.name);
+}
+
+
+// Notifies the runner that we're done with running a suite and that
+// we have a result which can be reported to the user if needed.
+BenchmarkSuite.prototype.NotifyResult = function() {
+  var mean = BenchmarkSuite.GeometricMeanTime(this.results);
+  var score = this.reference[0] / mean;
+  BenchmarkSuite.scores.push(score);
+  if (this.runner.NotifyResult) {
+    var formatted = BenchmarkSuite.FormatScore(100 * score);
+    this.runner.NotifyResult(this.name, formatted);
+  }
+  if (this.reference.length == 2) {
+    var meanLatency = BenchmarkSuite.GeometricMeanLatency(this.results);
+    if (meanLatency != 0) {
+      var scoreLatency = this.reference[1] / meanLatency;
+      BenchmarkSuite.scores.push(scoreLatency);
+      if (this.runner.NotifyResult) {
+        var formattedLatency = BenchmarkSuite.FormatScore(100 * scoreLatency)
+        this.runner.NotifyResult(this.name + "Latency", formattedLatency);
+      }
+    }
+  }
+}
+
+
+BenchmarkSuite.prototype.NotifySkipped = function(runner) {
+  BenchmarkSuite.scores.push(1);  // push default reference score.
+  if (runner.NotifyResult) {
+    runner.NotifyResult(this.name, "Skipped");
+  }
+}
+
+
+// Notifies the runner that running a benchmark resulted in an error.
+BenchmarkSuite.prototype.NotifyError = function(error) {
+  if (this.runner.NotifyError) {
+    this.runner.NotifyError(this.name, error);
+  }
+  if (this.runner.NotifyStep) {
+    this.runner.NotifyStep(this.name);
+  }
+}
+
+
+// Runs a single benchmark for at least a second and computes the
+// average time it takes to run a single iteration.
+BenchmarkSuite.prototype.RunSingleBenchmark = function(benchmark, data) {
+  var config = BenchmarkSuite.config;
+  var doWarmup = config.doWarmup !== undefined
+                 ? config.doWarmup
+                 : benchmark.doWarmup;
+  var doDeterministic = config.doDeterministic !== undefined
+                        ? config.doDeterministic
+                        : benchmark.doDeterministic;
+
+  function Measure(data) {
+    var elapsed = 0;
+    var start = new Date();
+
+  // Run either for 1 second or for the number of iterations specified
+  // by minIterations, depending on the config flag doDeterministic.
+    for (var i = 0; (doDeterministic ?
+      i<benchmark.deterministicIterations : elapsed < 1000); i++) {
+      benchmark.run();
+      elapsed = new Date() - start;
+    }
+    if (data != null) {
+      data.runs += i;
+      data.elapsed += elapsed;
+    }
+  }
+
+  // Sets up data in order to skip or not the warmup phase.
+  if (!doWarmup && data == null) {
+    data = { runs: 0, elapsed: 0 };
+  }
+
+  if (data == null) {
+    Measure(null);
+    return { runs: 0, elapsed: 0 };
+  } else {
+    Measure(data);
+    // If we've run too few iterations, we continue for another second.
+    if (data.runs < benchmark.minIterations) return data;
+    var usec = (data.elapsed * 1000) / data.runs;
+    var rms = (benchmark.rmsResult != null) ? benchmark.rmsResult() : 0;
+    this.NotifyStep(new BenchmarkResult(benchmark, usec, rms));
+    return null;
+  }
+}
+
+
+// This function starts running a suite, but stops between each
+// individual benchmark in the suite and returns a continuation
+// function which can be invoked to run the next benchmark. Once the
+// last benchmark has been executed, null is returned.
+BenchmarkSuite.prototype.RunStep = function(runner) {
+  BenchmarkSuite.ResetRNG();
+  this.results = [];
+  this.runner = runner;
+  var length = this.benchmarks.length;
+  var index = 0;
+  var suite = this;
+  var data;
+
+  // Run the setup, the actual benchmark, and the tear down in three
+  // separate steps to allow the framework to yield between any of the
+  // steps.
+
+  function RunNextSetup() {
+    if (index < length) {
+      try {
+        suite.benchmarks[index].Setup();
+      } catch (e) {
+        suite.NotifyError(e);
+        return null;
+      }
+      return RunNextBenchmark;
+    }
+    suite.NotifyResult();
+    return null;
+  }
+
+  function RunNextBenchmark() {
+    try {
+      data = suite.RunSingleBenchmark(suite.benchmarks[index], data);
+    } catch (e) {
+      suite.NotifyError(e);
+      return null;
+    }
+    // If data is null, we're done with this benchmark.
+    return (data == null) ? RunNextTearDown : RunNextBenchmark();
+  }
+
+  function RunNextTearDown() {
+    try {
+      suite.benchmarks[index++].TearDown();
+    } catch (e) {
+      suite.NotifyError(e);
+      return null;
+    }
+    return RunNextSetup;
+  }
+
+  // Start out running the setup.
+  return RunNextSetup();
+}
diff --git a/test/perf-test/Collections/map.js b/test/perf-test/Collections/map.js
new file mode 100644
index 0000000..b310a71
--- /dev/null
+++ b/test/perf-test/Collections/map.js
@@ -0,0 +1,81 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+var MapBenchmark = new BenchmarkSuite('Map', [1000], [
+  new Benchmark('Set', false, false, 0, MapSet),
+  new Benchmark('Has', false, false, 0, MapHas, MapSetup, MapTearDown),
+  new Benchmark('Get', false, false, 0, MapGet, MapSetup, MapTearDown),
+  new Benchmark('Delete', false, false, 0, MapDelete, MapSetup, MapTearDown),
+  new Benchmark('ForEach', false, false, 0, MapForEach, MapSetup, MapTearDown),
+]);
+
+
+var map;
+var N = 10;
+
+
+function MapSetup() {
+  map = new Map;
+  for (var i = 0; i < N; i++) {
+    map.set(i, i);
+  }
+}
+
+
+function MapTearDown() {
+  map = null;
+}
+
+
+function MapSet() {
+  MapSetup();
+  MapTearDown();
+}
+
+
+function MapHas() {
+  for (var i = 0; i < N; i++) {
+    if (!map.has(i)) {
+      throw new Error();
+    }
+  }
+  for (var i = N; i < 2 * N; i++) {
+    if (map.has(i)) {
+      throw new Error();
+    }
+  }
+}
+
+
+function MapGet() {
+  for (var i = 0; i < N; i++) {
+    if (map.get(i) !== i) {
+      throw new Error();
+    }
+  }
+  for (var i = N; i < 2 * N; i++) {
+    if (map.get(i) !== undefined) {
+      throw new Error();
+    }
+  }
+}
+
+
+function MapDelete() {
+  // This is run more than once per setup so we will end up deleting items
+  // more than once. Therefore, we do not the return value of delete.
+  for (var i = 0; i < N; i++) {
+    map.delete(i);
+  }
+}
+
+
+function MapForEach() {
+  map.forEach(function(v, k) {
+    if (v !== k) {
+      throw new Error();
+    }
+  });
+}
diff --git a/test/perf-test/Collections/run.js b/test/perf-test/Collections/run.js
new file mode 100644
index 0000000..cfd1aef
--- /dev/null
+++ b/test/perf-test/Collections/run.js
@@ -0,0 +1,30 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+load('base.js');
+load('map.js');
+load('set.js');
+load('weakmap.js');
+load('weakset.js');
+
+
+var success = true;
+
+function PrintResult(name, result) {
+  print(name + '-Collections(Score): ' + result);
+}
+
+
+function PrintError(name, error) {
+  PrintResult(name, error);
+  success = false;
+}
+
+
+BenchmarkSuite.config.doWarmup = undefined;
+BenchmarkSuite.config.doDeterministic = undefined;
+
+BenchmarkSuite.RunSuites({ NotifyResult: PrintResult,
+                           NotifyError: PrintError });
diff --git a/test/perf-test/Collections/set.js b/test/perf-test/Collections/set.js
new file mode 100644
index 0000000..e6455e1
--- /dev/null
+++ b/test/perf-test/Collections/set.js
@@ -0,0 +1,66 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+var SetBenchmark = new BenchmarkSuite('Set', [1000], [
+  new Benchmark('Add', false, false, 0, SetAdd),
+  new Benchmark('Has', false, false, 0, SetHas, SetSetup, SetTearDown),
+  new Benchmark('Delete', false, false, 0, SetDelete, SetSetup, SetTearDown),
+  new Benchmark('ForEach', false, false, 0, SetForEach, SetSetup, SetTearDown),
+]);
+
+
+var set;
+var N = 10;
+
+
+function SetSetup() {
+  set = new Set;
+  for (var i = 0; i < N; i++) {
+    set.add(i);
+  }
+}
+
+
+function SetTearDown() {
+  map = null;
+}
+
+
+function SetAdd() {
+  SetSetup();
+  SetTearDown();
+}
+
+
+function SetHas() {
+  for (var i = 0; i < N; i++) {
+    if (!set.has(i)) {
+      throw new Error();
+    }
+  }
+  for (var i = N; i < 2 * N; i++) {
+    if (set.has(i)) {
+      throw new Error();
+    }
+  }
+}
+
+
+function SetDelete() {
+  // This is run more than once per setup so we will end up deleting items
+  // more than once. Therefore, we do not the return value of delete.
+  for (var i = 0; i < N; i++) {
+    set.delete(i);
+  }
+}
+
+
+function SetForEach() {
+  set.forEach(function(v, k) {
+    if (v !== k) {
+      throw new Error();
+    }
+  });
+}
diff --git a/test/perf-test/Collections/weakmap.js b/test/perf-test/Collections/weakmap.js
new file mode 100644
index 0000000..8736dfd
--- /dev/null
+++ b/test/perf-test/Collections/weakmap.js
@@ -0,0 +1,80 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+var MapBenchmark = new BenchmarkSuite('WeakMap', [1000], [
+  new Benchmark('Set', false, false, 0, WeakMapSet),
+  new Benchmark('Has', false, false, 0, WeakMapHas, WeakMapSetup,
+      WeakMapTearDown),
+  new Benchmark('Get', false, false, 0, WeakMapGet, WeakMapSetup,
+      WeakMapTearDown),
+  new Benchmark('Delete', false, false, 0, WeakMapDelete, WeakMapSetup,
+      WeakMapTearDown),
+]);
+
+
+var wm;
+var N = 10;
+var keys = [];
+
+
+for (var i = 0; i < N * 2; i++) {
+  keys[i] = {};
+}
+
+
+function WeakMapSetup() {
+  wm = new WeakMap;
+  for (var i = 0; i < N; i++) {
+    wm.set(keys[i], i);
+  }
+}
+
+
+function WeakMapTearDown() {
+  wm = null;
+}
+
+
+function WeakMapSet() {
+  WeakMapSetup();
+  WeakMapTearDown();
+}
+
+
+function WeakMapHas() {
+  for (var i = 0; i < N; i++) {
+    if (!wm.has(keys[i])) {
+      throw new Error();
+    }
+  }
+  for (var i = N; i < 2 * N; i++) {
+    if (wm.has(keys[i])) {
+      throw new Error();
+    }
+  }
+}
+
+
+function WeakMapGet() {
+  for (var i = 0; i < N; i++) {
+    if (wm.get(keys[i]) !== i) {
+      throw new Error();
+    }
+  }
+  for (var i = N; i < 2 * N; i++) {
+    if (wm.get(keys[i]) !== undefined) {
+      throw new Error();
+    }
+  }
+}
+
+
+function WeakMapDelete() {
+  // This is run more than once per setup so we will end up deleting items
+  // more than once. Therefore, we do not the return value of delete.
+  for (var i = 0; i < N; i++) {
+    wm.delete(keys[i]);
+  }
+}
diff --git a/test/perf-test/Collections/weakset.js b/test/perf-test/Collections/weakset.js
new file mode 100644
index 0000000..a7d0f3d
--- /dev/null
+++ b/test/perf-test/Collections/weakset.js
@@ -0,0 +1,64 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+var SetBenchmark = new BenchmarkSuite('WeakSet', [1000], [
+  new Benchmark('Add', false, false, 0, WeakSetAdd),
+  new Benchmark('Has', false, false, 0, WeakSetHas, WeakSetSetup,
+      WeakSetTearDown),
+  new Benchmark('Delete', false, false, 0, WeakSetDelete, WeakSetSetup,
+      WeakSetTearDown),
+]);
+
+
+var ws;
+var N = 10;
+var keys = [];
+
+
+for (var i = 0; i < N * 2; i++) {
+  keys[i] = {};
+}
+
+
+function WeakSetSetup() {
+  ws = new WeakSet;
+  for (var i = 0; i < N; i++) {
+    ws.add(keys[i]);
+  }
+}
+
+
+function WeakSetTearDown() {
+  ws = null;
+}
+
+
+function WeakSetAdd() {
+  WeakSetSetup();
+  WeakSetTearDown();
+}
+
+
+function WeakSetHas() {
+  for (var i = 0; i < N; i++) {
+    if (!ws.has(keys[i])) {
+      throw new Error();
+    }
+  }
+  for (var i = N; i < 2 * N; i++) {
+    if (ws.has(keys[i])) {
+      throw new Error();
+    }
+  }
+}
+
+
+function WeakSetDelete() {
+  // This is run more than once per setup so we will end up deleting items
+  // more than once. Therefore, we do not the return value of delete.
+  for (var i = 0; i < N; i++) {
+    ws.delete(keys[i]);
+  }
+}
diff --git a/test/preparser/duplicate-property.pyt b/test/preparser/duplicate-property.pyt
deleted file mode 100644
index 594b478..0000000
--- a/test/preparser/duplicate-property.pyt
+++ /dev/null
@@ -1,162 +0,0 @@
-# Copyright 2011 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-#       notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-#       copyright notice, this list of conditions and the following
-#       disclaimer in the documentation and/or other materials provided
-#       with the distribution.
-#     * Neither the name of Google Inc. nor the names of its
-#       contributors may be used to endorse or promote products derived
-#       from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-# Tests of duplicate properties in object literals.
-
-# ----------------------------------------------------------------------
-# Utility functions to generate a number of tests for each property
-# name pair.
-
-def PropertyTest(name, propa, propb, allow_strict = True):
-  replacement = {"id1": propa, "id2": propb, "name": name}
-
-  # Tests same test in both strict and non-strict context.
-  def StrictTest(name, source, replacement, expectation):
-    if (allow_strict):
-      Template("strict-" + name,
-               "\"use strict\";\n" + source)(replacement, expectation)
-      Template(name, source)(replacement, expectation)
-
-  # This one only fails in non-strict context.
-  if (allow_strict):
-    Template("strict-$name-data-data", """
-        "use strict";
-        var o = {$id1: 42, $id2: 42};
-      """)(replacement, "strict_duplicate_property")
-
-  Template("$name-data-data", """
-      var o = {$id1: 42, $id2: 42};
-    """)(replacement, None)
-
-  StrictTest("$name-data-get", """
-      var o = {$id1: 42, get $id2(){}};
-    """, replacement, "accessor_data_property")
-
-  StrictTest("$name-data-set", """
-      var o = {$id1: 42, set $id2(v){}};
-    """, replacement, "accessor_data_property")
-
-  StrictTest("$name-get-data", """
-      var o = {get $id1(){}, $id2: 42};
-    """, replacement, "accessor_data_property")
-
-  StrictTest("$name-set-data", """
-      var o = {set $id1(v){}, $id2: 42};
-    """, replacement, "accessor_data_property")
-
-  StrictTest("$name-get-get", """
-      var o = {get $id1(){}, get $id2(){}};
-    """, replacement, "accessor_get_set")
-
-  StrictTest("$name-set-set", """
-      var o = {set $id1(v){}, set $id2(v){}};
-    """, replacement, "accessor_get_set")
-
-  StrictTest("$name-nested-get", """
-      var o = {get $id1(){}, o: {get $id2(){} } };
-    """, replacement, None)
-
-  StrictTest("$name-nested-set", """
-      var o = {set $id1(v){}, o: {set $id2(v){} } };
-    """, replacement, None)
-
-
-def TestBothWays(name, propa, propb, allow_strict = True):
-  PropertyTest(name + "-1", propa, propb, allow_strict)
-  PropertyTest(name + "-2", propb, propa, allow_strict)
-
-def TestSame(name, prop, allow_strict = True):
-  PropertyTest(name, prop, prop, allow_strict)
-
-#-----------------------------------------------------------------------
-
-# Simple identifier property
-TestSame("a", "a")
-
-# Get/set identifiers
-TestSame("get-id", "get")
-TestSame("set-id", "set")
-
-# Number properties
-TestSame("0", "0")
-TestSame("0.1", "0.1")
-TestSame("1.0", "1.0")
-TestSame("42.33", "42.33")
-TestSame("2^32-2", "4294967294")
-TestSame("2^32", "4294967296")
-TestSame("2^53", "9007199254740992")
-TestSame("Hex20", "0x20")
-TestSame("exp10", "1e10")
-TestSame("exp20", "1e20")
-TestSame("Oct40", "040", False);
-
-
-# String properties
-TestSame("str-a", '"a"')
-TestSame("str-0", '"0"')
-TestSame("str-42", '"42"')
-TestSame("str-empty", '""')
-
-# Keywords
-TestSame("if", "if")
-TestSame("case", "case")
-
-# Future reserved keywords
-TestSame("public", "public")
-TestSame("class", "class")
-
-
-# Test that numbers are converted to string correctly.
-
-TestBothWays("hex-int", "0x20", "32")
-TestBothWays("oct-int", "040", "32", False)  # Octals disallowed in strict mode.
-TestBothWays("dec-int", "32.00", "32")
-TestBothWays("dec-underflow-int",
-             "32.00000000000000000000000000000000000000001", "32")
-TestBothWays("exp-int", "3.2e1", "32")
-TestBothWays("exp-int", "3200e-2", "32")
-TestBothWays("overflow-inf", "1e2000", "Infinity")
-TestBothWays("overflow-inf-exact", "1.797693134862315808e+308", "Infinity")
-TestBothWays("non-overflow-inf-exact", "1.797693134862315807e+308",
-                                       "1.7976931348623157e+308")
-TestBothWays("underflow-0", "1e-2000", "0")
-TestBothWays("underflow-0-exact", "2.4703282292062E-324", "0")
-TestBothWays("non-underflow-0-exact", "2.4703282292063E-324", "5e-324")
-TestBothWays("precission-loss-high", "9007199254740992", "9007199254740993")
-TestBothWays("precission-loss-low", "1.9999999999999998", "1.9999999999999997")
-TestBothWays("non-canonical-literal-int", "1.0", "1")
-TestBothWays("non-canonical-literal-frac", "1.50", "1.5")
-TestBothWays("rounding-down", "1.12512512512512452", "1.1251251251251244")
-TestBothWays("rounding-up", "1.12512512512512453", "1.1251251251251246")
-
-TestBothWays("hex-int-str", "0x20", '"32"')
-TestBothWays("dec-int-str", "32.00", '"32"')
-TestBothWays("exp-int-str", "3.2e1", '"32"')
-TestBothWays("overflow-inf-str", "1e2000", '"Infinity"')
-TestBothWays("underflow-0-str", "1e-2000", '"0"')
-TestBothWays("non-canonical-literal-int-str", "1.0", '"1"')
-TestBothWays("non-canonical-literal-frac-str", "1.50", '"1.5"')
diff --git a/test/preparser/preparser.status b/test/preparser/preparser.status
index babf35d..9d69988 100644
--- a/test/preparser/preparser.status
+++ b/test/preparser/preparser.status
@@ -25,10 +25,6 @@
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-
-# We don't parse RegExps at scanning time, so we can't fail on octal
-# escapes (we need to parse to distinguish octal escapes from valid
-# back-references).
 [
 [ALWAYS, {
   # TODO(mstarzinger): This script parses but throws a TypeError when run.
diff --git a/test/promises-aplus/promises-aplus.status b/test/promises-aplus/promises-aplus.status
index fdcf40b..5da9efa 100644
--- a/test/promises-aplus/promises-aplus.status
+++ b/test/promises-aplus/promises-aplus.status
@@ -25,7 +25,6 @@
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-
 [
 [ALWAYS, {
 }],  # ALWAYS
diff --git a/test/test262-es6/README b/test/test262-es6/README
new file mode 100644
index 0000000..d0b3b42
--- /dev/null
+++ b/test/test262-es6/README
@@ -0,0 +1,18 @@
+This directory contains code for binding the test262 test suite
+into the v8 test harness. To use the tests check out the test262
+tests from
+
+  https://github.com/tc39/test262
+
+at hash 9bd6686 (2014/08/25 revision) as 'data' in this directory.  Using later
+version may be possible but the tests are only known to pass (and indeed run)
+with that revision.
+
+  git clone https://github.com/tc39/test262 data
+  cd data
+  git checkout 9bd6686
+
+If you do update to a newer revision you may have to change the test
+harness adapter code since it uses internal functionality from the
+harness that comes bundled with the tests.  You will most likely also
+have to update the test expectation file.
diff --git a/test/test262-es6/harness-adapt.js b/test/test262-es6/harness-adapt.js
new file mode 100644
index 0000000..60c0858
--- /dev/null
+++ b/test/test262-es6/harness-adapt.js
@@ -0,0 +1,91 @@
+// Copyright 2011 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+function fnGlobalObject() { return (function() { return this; })(); }
+
+var ES5Harness = (function() {
+  var currentTest = {};
+  var $this = this;
+
+  function Test262Error(id, path, description, codeString,
+                        preconditionString, result, error) {
+    this.id = id;
+    this.path = path;
+    this.description = description;
+    this.result = result;
+    this.error = error;
+    this.code = codeString;
+    this.pre = preconditionString;
+  }
+
+  Test262Error.prototype.toString = function() {
+    return this.result + " " + this.error;
+  }
+
+  function registerTest(test) {
+    if (!(test.precondition && !test.precondition())) {
+      var error;
+      try {
+        var res = test.test.call($this);
+      } catch(e) {
+        res = 'fail';
+        error = e;
+      }
+      var retVal = /^s/i.test(test.id)
+          ? (res === true || typeof res == 'undefined' ? 'pass' : 'fail')
+          : (res === true ? 'pass' : 'fail');
+
+      if (retVal != 'pass') {
+         var precondition = (test.precondition !== undefined)
+             ? test.precondition.toString()
+             : '';
+
+         throw new Test262Error(
+            test.id,
+            test.path,
+            test.description,
+            test.test.toString(),
+            precondition,
+            retVal,
+            error);
+      }
+    }
+  }
+
+  return {
+    registerTest: registerTest
+  }
+})();
+
+function $DONE(arg){
+    if (arg) {
+        print('FAILED! Error: ' + arg);
+        quit(1);
+    }
+
+    quit(0);
+};
diff --git a/test/test262-es6/test262-es6.status b/test/test262-es6/test262-es6.status
new file mode 100644
index 0000000..c4c94f3
--- /dev/null
+++ b/test/test262-es6/test262-es6.status
@@ -0,0 +1,166 @@
+# Copyright 2011 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+[
+[ALWAYS, {
+  ############################### BUGS ###################################
+
+  '15.5.4.9_CE': [['no_i18n', SKIP]],
+
+  # BUG(v8:3455)
+  '11.2.3_b': [FAIL],
+  '12.2.3_b': [FAIL],
+
+  ###################### NEEDS INVESTIGATION #######################
+
+  # Possibly same cause as S8.5_A2.1, below: floating-point tests.
+  'S15.8.2.16_A7': [PASS, FAIL_OK],
+  'S15.8.2.18_A7': [PASS, FAIL_OK],
+  'S15.8.2.7_A7': [PASS, FAIL_OK],
+
+  # This is an incompatibility between ES5 and V8 on enumerating
+  # shadowed elements in a for..in loop.
+  # https://code.google.com/p/v8/issues/detail?id=705
+  '12.6.4-2': [PASS, FAIL_OK],
+
+  ###################### MISSING ES6 FEATURES #######################
+
+  # Array.from
+  'S22.1.2.1_T1': [FAIL],
+  'S22.1.2.1_T2': [FAIL],
+
+  # Direct proxies
+  'Array.prototype.find_callable-predicate': [FAIL],
+
+  ######################## OBSOLETED BY ES6 ###########################
+
+  # ES6 allows duplicate properties
+  # TODO(arv): Reactivate when check removal has relanded.
+  # '11.1.5-4-4-a-1-s': [FAIL],
+  # '11.1.5_4-4-b-1': [FAIL],
+  # '11.1.5_4-4-b-2': [FAIL],
+  # '11.1.5_4-4-c-1': [FAIL],
+  # '11.1.5_4-4-c-2': [FAIL],
+  # '11.1.5_4-4-d-1': [FAIL],
+  # '11.1.5_4-4-d-2': [FAIL],
+  # '11.1.5_4-4-d-3': [FAIL],
+  # '11.1.5_4-4-d-4': [FAIL],
+
+  # ES6 allows block-local functions.
+  'Sbp_A1_T1': [FAIL],
+  'Sbp_A2_T1': [FAIL],
+  'Sbp_A2_T2': [FAIL],
+  'Sbp_A3_T1': [FAIL],
+  'Sbp_A3_T2': [FAIL],
+  'Sbp_A4_T1': [FAIL],
+  'Sbp_A4_T2': [FAIL],
+  'Sbp_A5_T1': [PASS], # Test is broken (strict reference to unbound variable)
+  'Sbp_A5_T2': [FAIL],
+
+  ######################## NEEDS INVESTIGATION ###########################
+
+  # These test failures are specific to the intl402 suite and need investigation
+  # to be either marked as bugs with issues filed for them or as deliberate
+  # incompatibilities if the test cases turn out to be broken or ambiguous.
+  '6.2.3': [FAIL],
+  '9.2.1_2': [FAIL],
+  '9.2.6_2': [FAIL],
+  '10.1.1_a': [FAIL],
+  '10.1.1_19_c': [PASS, FAIL, NO_VARIANTS],
+  '10.1.2.1_4': [FAIL],
+  '10.2.3_b': [PASS, FAIL],
+  '10.3_a': [FAIL],
+  '11.1.1_17': [PASS, FAIL],
+  '11.1.1_19': [PASS, FAIL],
+  '11.1.1_20_c': [FAIL],
+  '11.1.1_a': [FAIL],
+  '11.1.2.1_4': [FAIL],
+  '11.3.2_FN_2': [PASS, FAIL],
+  '11.3.2_TRF': [PASS, FAIL],
+  '11.3_a': [FAIL],
+  '12.1.1_a': [FAIL],
+  '12.1.2.1_4': [FAIL],
+  '12.3.2_FDT_7_a_iv': [FAIL],
+  '12.3.3': [FAIL],
+  '12.3_a': [FAIL],
+  '15.5.4.9_3': [PASS, FAIL],
+
+  ##################### DELIBERATE INCOMPATIBILITIES #####################
+
+  'S15.8.2.8_A6': [PASS, FAIL_OK],  # Math.exp (less precise with --fast-math)
+
+  # Linux for ia32 (and therefore simulators) default to extended 80 bit
+  # floating point formats, so these tests checking 64-bit FP precision fail.
+  # The other platforms/arch's pass these tests.
+  # We follow the other major JS engines by keeping this default.
+  'S8.5_A2.1': [PASS, FAIL_OK],
+  'S8.5_A2.2': [PASS, FAIL_OK],
+
+  ############################ INVALID TESTS #############################
+
+  # The reference value calculated by Test262 is incorrect if you run these
+  # tests in PST/PDT between first Sunday in March and first Sunday in April.
+  # The DST switch was moved in 2007 whereas Test262 bases the reference value
+  # on 2000. Test262 Bug: https://bugs.ecmascript.org/show_bug.cgi?id=293
+  'S15.9.3.1_A5_T1': [PASS, FAIL_OK],
+  'S15.9.3.1_A5_T2': [PASS, FAIL_OK],
+  'S15.9.3.1_A5_T3': [PASS, FAIL_OK],
+  'S15.9.3.1_A5_T4': [PASS, FAIL_OK],
+  'S15.9.3.1_A5_T5': [PASS, FAIL_OK],
+  'S15.9.3.1_A5_T6': [PASS, FAIL_OK],
+
+  # Test makes unjustified assumptions about the number of calls to SortCompare.
+  # Test262 Bug: https://bugs.ecmascript.org/show_bug.cgi?id=596
+  'bug_596_1': [PASS, FAIL_OK],
+
+  ############################ SKIPPED TESTS #############################
+
+  # These tests take a looong time to run in debug mode.
+  'S15.1.3.1_A2.5_T1': [PASS, ['mode == debug', SKIP]],
+  'S15.1.3.2_A2.5_T1': [PASS, ['mode == debug', SKIP]],
+}],  # ALWAYS
+
+['system == macos', {
+  '11.3.2_TRP': [FAIL],
+  '9.2.5_11_g_ii_2': [FAIL],
+}],  # system == macos
+
+['arch == arm or arch == mipsel or arch == mips or arch == arm64 or arch == mips64el', {
+
+  # TODO(mstarzinger): Causes stack overflow on simulators due to eager
+  # compilation of parenthesized function literals. Needs investigation.
+  'S13.2.1_A1_T1': [SKIP],
+
+  # BUG(3251225): Tests that timeout with --nocrankshaft.
+  'S15.1.3.1_A2.4_T1': [SKIP],
+  'S15.1.3.1_A2.5_T1': [SKIP],
+  'S15.1.3.2_A2.4_T1': [SKIP],
+  'S15.1.3.2_A2.5_T1': [SKIP],
+  'S15.1.3.3_A2.3_T1': [SKIP],
+  'S15.1.3.4_A2.3_T1': [SKIP],
+}],  # 'arch == arm or arch == mipsel or arch == mips or arch == arm64'
+]
diff --git a/test/test262-es6/testcfg.py b/test/test262-es6/testcfg.py
new file mode 100644
index 0000000..59eda32
--- /dev/null
+++ b/test/test262-es6/testcfg.py
@@ -0,0 +1,164 @@
+# Copyright 2012 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+import hashlib
+import os
+import shutil
+import sys
+import tarfile
+import imp
+
+from testrunner.local import testsuite
+from testrunner.local import utils
+from testrunner.objects import testcase
+
+TEST_262_ARCHIVE_REVISION = "9bd6686"  # This is the 2014-08-25 revision.
+TEST_262_ARCHIVE_MD5 = "0f5928b391864890d5a397f8cdc82705"
+TEST_262_URL = "https://github.com/tc39/test262/tarball/%s"
+TEST_262_HARNESS_FILES = ["sta.js"]
+
+TEST_262_SUITE_PATH = ["data", "test", "suite"]
+TEST_262_HARNESS_PATH = ["data", "test", "harness"]
+TEST_262_TOOLS_PATH = ["data", "tools", "packaging"]
+
+class Test262TestSuite(testsuite.TestSuite):
+
+  def __init__(self, name, root):
+    super(Test262TestSuite, self).__init__(name, root)
+    self.testroot = os.path.join(self.root, *TEST_262_SUITE_PATH)
+    self.harnesspath = os.path.join(self.root, *TEST_262_HARNESS_PATH)
+    self.harness = [os.path.join(self.harnesspath, f)
+                    for f in TEST_262_HARNESS_FILES]
+    self.harness += [os.path.join(self.root, "harness-adapt.js")]
+    self.ParseTestRecord = None
+
+  def CommonTestName(self, testcase):
+    return testcase.path.split(os.path.sep)[-1]
+
+  def ListTests(self, context):
+    tests = []
+    for dirname, dirs, files in os.walk(self.testroot):
+      for dotted in [x for x in dirs if x.startswith(".")]:
+        dirs.remove(dotted)
+      if context.noi18n and "intl402" in dirs:
+        dirs.remove("intl402")
+      dirs.sort()
+      files.sort()
+      for filename in files:
+        if filename.endswith(".js"):
+          testname = os.path.join(dirname[len(self.testroot) + 1:],
+                                  filename[:-3])
+          case = testcase.TestCase(self, testname)
+          tests.append(case)
+    return tests
+
+  def GetFlagsForTestCase(self, testcase, context):
+    return (testcase.flags + context.mode_flags + self.harness +
+            self.GetIncludesForTest(testcase) + ["--harmony"] +
+            [os.path.join(self.testroot, testcase.path + ".js")])
+
+  def LoadParseTestRecord(self):
+    if not self.ParseTestRecord:
+      root = os.path.join(self.root, *TEST_262_TOOLS_PATH)
+      f = None
+      try:
+        (f, pathname, description) = imp.find_module("parseTestRecord", [root])
+        module = imp.load_module("parseTestRecord", f, pathname, description)
+        self.ParseTestRecord = module.parseTestRecord
+      except:
+        raise ImportError("Cannot load parseTestRecord; you may need to "
+                          "--download-data for test262")
+      finally:
+        if f:
+          f.close()
+    return self.ParseTestRecord
+
+  def GetTestRecord(self, testcase):
+    if not hasattr(testcase, "test_record"):
+      ParseTestRecord = self.LoadParseTestRecord()
+      testcase.test_record = ParseTestRecord(self.GetSourceForTest(testcase),
+                                             testcase.path)
+    return testcase.test_record
+
+  def GetIncludesForTest(self, testcase):
+    test_record = self.GetTestRecord(testcase)
+    if "includes" in test_record:
+      includes = [os.path.join(self.harnesspath, f)
+                  for f in test_record["includes"]]
+    else:
+      includes = []
+    return includes
+
+  def GetSourceForTest(self, testcase):
+    filename = os.path.join(self.testroot, testcase.path + ".js")
+    with open(filename) as f:
+      return f.read()
+
+  def IsNegativeTest(self, testcase):
+    test_record = self.GetTestRecord(testcase)
+    return "negative" in test_record
+
+  def IsFailureOutput(self, output, testpath):
+    if output.exit_code != 0:
+      return True
+    return "FAILED!" in output.stdout
+
+  def DownloadData(self):
+    revision = TEST_262_ARCHIVE_REVISION
+    archive_url = TEST_262_URL % revision
+    archive_name = os.path.join(self.root, "tc39-test262-%s.tar.gz" % revision)
+    directory_name = os.path.join(self.root, "data")
+    directory_old_name = os.path.join(self.root, "data.old")
+    if not os.path.exists(archive_name):
+      print "Downloading test data from %s ..." % archive_url
+      utils.URLRetrieve(archive_url, archive_name)
+      if os.path.exists(directory_name):
+        if os.path.exists(directory_old_name):
+          shutil.rmtree(directory_old_name)
+        os.rename(directory_name, directory_old_name)
+    if not os.path.exists(directory_name):
+      print "Extracting test262-%s.tar.gz ..." % revision
+      md5 = hashlib.md5()
+      with open(archive_name, "rb") as f:
+        for chunk in iter(lambda: f.read(8192), ""):
+          md5.update(chunk)
+      if md5.hexdigest() != TEST_262_ARCHIVE_MD5:
+        os.remove(archive_name)
+        raise Exception("Hash mismatch of test data file")
+      archive = tarfile.open(archive_name, "r:gz")
+      if sys.platform in ("win32", "cygwin"):
+        # Magic incantation to allow longer path names on Windows.
+        archive.extractall(u"\\\\?\\%s" % self.root)
+      else:
+        archive.extractall(self.root)
+      os.rename(os.path.join(self.root, "tc39-test262-%s" % revision),
+                directory_name)
+
+
+def GetSuite(name, root):
+  return Test262TestSuite(name, root)
diff --git a/test/test262/test262.status b/test/test262/test262.status
index 247bd5c..8666313 100644
--- a/test/test262/test262.status
+++ b/test/test262/test262.status
@@ -31,6 +31,10 @@
 
   '15.5.4.9_CE': [['no_i18n', SKIP]],
 
+  # BUG(v8:3455)
+  '11.2.3_b': [FAIL],
+  '12.2.3_b': [FAIL],
+
   ######################## NEEDS INVESTIGATION ###########################
 
   # These test failures are specific to the intl402 suite and need investigation
@@ -38,10 +42,9 @@
   # incompatibilities if the test cases turn out to be broken or ambiguous.
   '6.2.3': [FAIL],
   '9.2.1_2': [FAIL],
-  '9.2.5_11_g_ii_2': [FAIL],
   '9.2.6_2': [FAIL],
   '10.1.1_a': [FAIL],
-  '10.1.1_19_c': [PASS, FAIL],
+  '10.1.1_19_c': [PASS, FAIL, NO_VARIANTS],
   '10.1.2.1_4': [FAIL],
   '10.2.3_b': [PASS, FAIL],
   '10.3_a': [FAIL],
@@ -52,7 +55,6 @@
   '11.1.2.1_4': [FAIL],
   '11.3.2_FN_2': [PASS, FAIL],
   '11.3.2_TRF': [PASS, FAIL],
-  '11.3.2_TRP': [FAIL],
   '11.3_a': [FAIL],
   '12.1.1_a': [FAIL],
   '12.1.2.1_4': [FAIL],
@@ -63,14 +65,7 @@
 
   ##################### DELIBERATE INCOMPATIBILITIES #####################
 
-  # This tests precision of Math functions.  The implementation for those
-  # trigonometric functions are platform/compiler dependent.  Furthermore, the
-  # expectation values by far deviates from the actual result given by an
-  # arbitrary-precision calculator, making those tests partly bogus.
-  'S15.8.2.7_A7': [PASS, FAIL_OK],  # Math.cos
   'S15.8.2.8_A6': [PASS, FAIL_OK],  # Math.exp (less precise with --fast-math)
-  'S15.8.2.16_A7': [PASS, FAIL_OK],  # Math.sin
-  'S15.8.2.18_A7': [PASS, FAIL_OK],  # Math.tan
 
   # Linux for ia32 (and therefore simulators) default to extended 80 bit
   # floating point formats, so these tests checking 64-bit FP precision fail.
@@ -99,7 +94,12 @@
   'S15.1.3.2_A2.5_T1': [PASS, ['mode == debug', SKIP]],
 }],  # ALWAYS
 
-['arch == arm or arch == mipsel or arch == mips or arch == arm64', {
+['system == macos', {
+  '11.3.2_TRP': [FAIL],
+  '9.2.5_11_g_ii_2': [FAIL],
+}],  # system == macos
+
+['arch == arm or arch == mipsel or arch == mips or arch == arm64 or arch == mips64el', {
 
   # TODO(mstarzinger): Causes stack overflow on simulators due to eager
   # compilation of parenthesized function literals. Needs investigation.
diff --git a/test/webkit/fast/js/Object-getOwnPropertyNames-expected.txt b/test/webkit/fast/js/Object-getOwnPropertyNames-expected.txt
index 52babed..030d7f9 100644
--- a/test/webkit/fast/js/Object-getOwnPropertyNames-expected.txt
+++ b/test/webkit/fast/js/Object-getOwnPropertyNames-expected.txt
@@ -53,35 +53,35 @@
 PASS getSortedOwnPropertyNames(argumentsObject(1)) is ['0', 'callee', 'length']
 PASS getSortedOwnPropertyNames(argumentsObject(1,2,3)) is ['0', '1', '2', 'callee', 'length']
 PASS getSortedOwnPropertyNames((function(){arguments.__proto__=[1,2,3];return arguments;})()) is ['callee', 'length']
-FAIL getSortedOwnPropertyNames(parseInt) should be length,name. Was arguments,caller,length,name.
-FAIL getSortedOwnPropertyNames(parseFloat) should be length,name. Was arguments,caller,length,name.
-FAIL getSortedOwnPropertyNames(isNaN) should be length,name. Was arguments,caller,length,name.
-FAIL getSortedOwnPropertyNames(isFinite) should be length,name. Was arguments,caller,length,name.
-FAIL getSortedOwnPropertyNames(escape) should be length,name. Was arguments,caller,length,name.
-FAIL getSortedOwnPropertyNames(unescape) should be length,name. Was arguments,caller,length,name.
-FAIL getSortedOwnPropertyNames(decodeURI) should be length,name. Was arguments,caller,length,name.
-FAIL getSortedOwnPropertyNames(decodeURIComponent) should be length,name. Was arguments,caller,length,name.
-FAIL getSortedOwnPropertyNames(encodeURI) should be length,name. Was arguments,caller,length,name.
-FAIL getSortedOwnPropertyNames(encodeURIComponent) should be length,name. Was arguments,caller,length,name.
-FAIL getSortedOwnPropertyNames(Object) should be create,defineProperties,defineProperty,freeze,getOwnPropertyDescriptor,getOwnPropertyNames,getPrototypeOf,isExtensible,isFrozen,isSealed,keys,length,name,preventExtensions,prototype,seal,setPrototypeOf. Was arguments,caller,create,defineProperties,defineProperty,deliverChangeRecords,freeze,getNotifier,getOwnPropertyDescriptor,getOwnPropertyNames,getPrototypeOf,is,isExtensible,isFrozen,isSealed,keys,length,name,observe,preventExtensions,prototype,seal,setPrototypeOf,unobserve.
+PASS getSortedOwnPropertyNames(parseInt) is ['arguments', 'caller', 'length', 'name']
+PASS getSortedOwnPropertyNames(parseFloat) is ['arguments', 'caller', 'length', 'name']
+PASS getSortedOwnPropertyNames(isNaN) is ['arguments', 'caller', 'length', 'name']
+PASS getSortedOwnPropertyNames(isFinite) is ['arguments', 'caller', 'length', 'name']
+PASS getSortedOwnPropertyNames(escape) is ['arguments', 'caller', 'length', 'name']
+PASS getSortedOwnPropertyNames(unescape) is ['arguments', 'caller', 'length', 'name']
+PASS getSortedOwnPropertyNames(decodeURI) is ['arguments', 'caller', 'length', 'name']
+PASS getSortedOwnPropertyNames(decodeURIComponent) is ['arguments', 'caller', 'length', 'name']
+PASS getSortedOwnPropertyNames(encodeURI) is ['arguments', 'caller', 'length', 'name']
+PASS getSortedOwnPropertyNames(encodeURIComponent) is ['arguments', 'caller', 'length', 'name']
+PASS getSortedOwnPropertyNames(Object) is ['arguments', 'caller', 'create', 'defineProperties', 'defineProperty', 'deliverChangeRecords', 'freeze', 'getNotifier', 'getOwnPropertyDescriptor', 'getOwnPropertyNames', 'getOwnPropertySymbols', 'getPrototypeOf', 'is', 'isExtensible', 'isFrozen', 'isSealed', 'keys', 'length', 'name', 'observe', 'preventExtensions', 'prototype', 'seal', 'setPrototypeOf', 'unobserve']
 PASS getSortedOwnPropertyNames(Object.prototype) is ['__defineGetter__', '__defineSetter__', '__lookupGetter__', '__lookupSetter__', '__proto__', 'constructor', 'hasOwnProperty', 'isPrototypeOf', 'propertyIsEnumerable', 'toLocaleString', 'toString', 'valueOf']
-FAIL getSortedOwnPropertyNames(Function) should be length,name,prototype. Was arguments,caller,length,name,prototype.
-FAIL getSortedOwnPropertyNames(Function.prototype) should be apply,bind,call,constructor,length,name,toString. Was apply,arguments,bind,call,caller,constructor,length,name,toString.
-FAIL getSortedOwnPropertyNames(Array) should be isArray,length,name,prototype. Was arguments,caller,isArray,length,name,observe,prototype,unobserve.
-PASS getSortedOwnPropertyNames(Array.prototype) is ['concat', 'constructor', 'every', 'filter', 'forEach', 'indexOf', 'join', 'lastIndexOf', 'length', 'map', 'pop', 'push', 'reduce', 'reduceRight', 'reverse', 'shift', 'slice', 'some', 'sort', 'splice', 'toLocaleString', 'toString', 'unshift']
-FAIL getSortedOwnPropertyNames(String) should be fromCharCode,length,name,prototype. Was arguments,caller,fromCharCode,length,name,prototype.
+PASS getSortedOwnPropertyNames(Function) is ['arguments', 'caller', 'length', 'name', 'prototype']
+PASS getSortedOwnPropertyNames(Function.prototype) is ['apply', 'arguments', 'bind', 'call', 'caller', 'constructor', 'length', 'name', 'toString']
+PASS getSortedOwnPropertyNames(Array) is ['arguments', 'caller', 'isArray', 'length', 'name', 'observe', 'prototype', 'unobserve']
+PASS getSortedOwnPropertyNames(Array.prototype) is ['concat', 'constructor', 'entries', 'every', 'filter', 'forEach', 'indexOf', 'join', 'keys', 'lastIndexOf', 'length', 'map', 'pop', 'push', 'reduce', 'reduceRight', 'reverse', 'shift', 'slice', 'some', 'sort', 'splice', 'toLocaleString', 'toString', 'unshift']
+PASS getSortedOwnPropertyNames(String) is ['arguments', 'caller', 'fromCharCode', 'length', 'name', 'prototype']
 PASS getSortedOwnPropertyNames(String.prototype) is ['anchor', 'big', 'blink', 'bold', 'charAt', 'charCodeAt', 'concat', 'constructor', 'fixed', 'fontcolor', 'fontsize', 'indexOf', 'italics', 'lastIndexOf', 'length', 'link', 'localeCompare', 'match', 'normalize', 'replace', 'search', 'slice', 'small', 'split', 'strike', 'sub', 'substr', 'substring', 'sup', 'toLocaleLowerCase', 'toLocaleUpperCase', 'toLowerCase', 'toString', 'toUpperCase', 'trim', 'trimLeft', 'trimRight', 'valueOf']
-FAIL getSortedOwnPropertyNames(Boolean) should be length,name,prototype. Was arguments,caller,length,name,prototype.
+PASS getSortedOwnPropertyNames(Boolean) is ['arguments', 'caller', 'length', 'name', 'prototype']
 PASS getSortedOwnPropertyNames(Boolean.prototype) is ['constructor', 'toString', 'valueOf']
-FAIL getSortedOwnPropertyNames(Number) should be MAX_VALUE,MIN_VALUE,NEGATIVE_INFINITY,NaN,POSITIVE_INFINITY,length,name,prototype. Was EPSILON,MAX_SAFE_INTEGER,MAX_VALUE,MIN_SAFE_INTEGER,MIN_VALUE,NEGATIVE_INFINITY,NaN,POSITIVE_INFINITY,arguments,caller,isFinite,isInteger,isNaN,isSafeInteger,length,name,parseFloat,parseInt,prototype.
+PASS getSortedOwnPropertyNames(Number) is ['EPSILON', 'MAX_SAFE_INTEGER', 'MAX_VALUE', 'MIN_SAFE_INTEGER', 'MIN_VALUE', 'NEGATIVE_INFINITY', 'NaN', 'POSITIVE_INFINITY', 'arguments', 'caller', 'isFinite', 'isInteger', 'isNaN', 'isSafeInteger', 'length', 'name', 'parseFloat', 'parseInt', 'prototype']
 PASS getSortedOwnPropertyNames(Number.prototype) is ['constructor', 'toExponential', 'toFixed', 'toLocaleString', 'toPrecision', 'toString', 'valueOf']
-FAIL getSortedOwnPropertyNames(Date) should be UTC,length,name,now,parse,prototype. Was UTC,arguments,caller,length,name,now,parse,prototype.
+PASS getSortedOwnPropertyNames(Date) is ['UTC', 'arguments', 'caller', 'length', 'name', 'now', 'parse', 'prototype']
 PASS getSortedOwnPropertyNames(Date.prototype) is ['constructor', 'getDate', 'getDay', 'getFullYear', 'getHours', 'getMilliseconds', 'getMinutes', 'getMonth', 'getSeconds', 'getTime', 'getTimezoneOffset', 'getUTCDate', 'getUTCDay', 'getUTCFullYear', 'getUTCHours', 'getUTCMilliseconds', 'getUTCMinutes', 'getUTCMonth', 'getUTCSeconds', 'getYear', 'setDate', 'setFullYear', 'setHours', 'setMilliseconds', 'setMinutes', 'setMonth', 'setSeconds', 'setTime', 'setUTCDate', 'setUTCFullYear', 'setUTCHours', 'setUTCMilliseconds', 'setUTCMinutes', 'setUTCMonth', 'setUTCSeconds', 'setYear', 'toDateString', 'toGMTString', 'toISOString', 'toJSON', 'toLocaleDateString', 'toLocaleString', 'toLocaleTimeString', 'toString', 'toTimeString', 'toUTCString', 'valueOf']
-FAIL getSortedOwnPropertyNames(RegExp) should be $&,$',$*,$+,$1,$2,$3,$4,$5,$6,$7,$8,$9,$_,$`,input,lastMatch,lastParen,leftContext,length,multiline,name,prototype,rightContext. Was $&,$',$*,$+,$1,$2,$3,$4,$5,$6,$7,$8,$9,$_,$`,$input,arguments,caller,input,lastMatch,lastParen,leftContext,length,multiline,name,prototype,rightContext.
+PASS getSortedOwnPropertyNames(RegExp) is ['$&', "$'", '$*', '$+', '$1', '$2', '$3', '$4', '$5', '$6', '$7', '$8', '$9', '$_', '$`', 'arguments', 'caller', 'input', 'lastMatch', 'lastParen', 'leftContext', 'length', 'multiline', 'name', 'prototype', 'rightContext']
 PASS getSortedOwnPropertyNames(RegExp.prototype) is ['compile', 'constructor', 'exec', 'global', 'ignoreCase', 'lastIndex', 'multiline', 'source', 'test', 'toString']
-FAIL getSortedOwnPropertyNames(Error) should be length,name,prototype. Was arguments,caller,captureStackTrace,length,name,prototype,stackTraceLimit.
+PASS getSortedOwnPropertyNames(Error) is ['arguments', 'caller', 'captureStackTrace', 'length', 'name', 'prototype', 'stackTraceLimit']
 PASS getSortedOwnPropertyNames(Error.prototype) is ['constructor', 'message', 'name', 'toString']
-FAIL getSortedOwnPropertyNames(Math) should be E,LN10,LN2,LOG10E,LOG2E,PI,SQRT1_2,SQRT2,abs,acos,asin,atan,atan2,ceil,cos,exp,floor,log,max,min,pow,random,round,sin,sqrt,tan. Was E,LN10,LN2,LOG10E,LOG2E,PI,SQRT1_2,SQRT2,abs,acos,asin,atan,atan2,ceil,cos,exp,floor,imul,log,max,min,pow,random,round,sin,sqrt,tan.
+PASS getSortedOwnPropertyNames(Math) is ['E', 'LN10', 'LN2', 'LOG10E', 'LOG2E', 'PI', 'SQRT1_2', 'SQRT2', 'abs', 'acos', 'acosh', 'asin', 'asinh', 'atan', 'atan2', 'atanh', 'cbrt', 'ceil', 'clz32', 'cos', 'cosh', 'exp', 'expm1', 'floor', 'fround', 'hypot', 'imul', 'log', 'log10', 'log1p', 'log2', 'max', 'min', 'pow', 'random', 'round', 'sign', 'sin', 'sinh', 'sqrt', 'tan', 'tanh', 'trunc']
 PASS getSortedOwnPropertyNames(JSON) is ['parse', 'stringify']
 PASS globalPropertyNames.indexOf('NaN') != -1 is true
 PASS globalPropertyNames.indexOf('Infinity') != -1 is true
diff --git a/test/webkit/fast/js/Object-getOwnPropertyNames.js b/test/webkit/fast/js/Object-getOwnPropertyNames.js
index 6373cf1..caa0111 100644
--- a/test/webkit/fast/js/Object-getOwnPropertyNames.js
+++ b/test/webkit/fast/js/Object-getOwnPropertyNames.js
@@ -60,36 +60,36 @@
      "argumentsObject(1,2,3)": "['0', '1', '2', 'callee', 'length']",
     "(function(){arguments.__proto__=[1,2,3];return arguments;})()": "['callee', 'length']",
 // Built-in ECMA functions
-    "parseInt": "['length', 'name']",
-    "parseFloat": "['length', 'name']",
-    "isNaN": "['length', 'name']",
-    "isFinite": "['length', 'name']",
-    "escape": "['length', 'name']",
-    "unescape": "['length', 'name']",
-    "decodeURI": "['length', 'name']",
-    "decodeURIComponent": "['length', 'name']",
-    "encodeURI": "['length', 'name']",
-    "encodeURIComponent": "['length', 'name']",
+    "parseInt": "['arguments', 'caller', 'length', 'name']",
+    "parseFloat": "['arguments', 'caller', 'length', 'name']",
+    "isNaN": "['arguments', 'caller', 'length', 'name']",
+    "isFinite": "['arguments', 'caller', 'length', 'name']",
+    "escape": "['arguments', 'caller', 'length', 'name']",
+    "unescape": "['arguments', 'caller', 'length', 'name']",
+    "decodeURI": "['arguments', 'caller', 'length', 'name']",
+    "decodeURIComponent": "['arguments', 'caller', 'length', 'name']",
+    "encodeURI": "['arguments', 'caller', 'length', 'name']",
+    "encodeURIComponent": "['arguments', 'caller', 'length', 'name']",
 // Built-in ECMA objects
-    "Object": "['create', 'defineProperties', 'defineProperty', 'freeze', 'getOwnPropertyDescriptor', 'getOwnPropertyNames', 'getPrototypeOf', 'isExtensible', 'isFrozen', 'isSealed', 'keys', 'length', 'name', 'preventExtensions', 'prototype', 'seal', 'setPrototypeOf']",
+    "Object": "['arguments', 'caller', 'create', 'defineProperties', 'defineProperty', 'deliverChangeRecords', 'freeze', 'getNotifier', 'getOwnPropertyDescriptor', 'getOwnPropertyNames', 'getOwnPropertySymbols', 'getPrototypeOf', 'is', 'isExtensible', 'isFrozen', 'isSealed', 'keys', 'length', 'name', 'observe', 'preventExtensions', 'prototype', 'seal', 'setPrototypeOf', 'unobserve']",
     "Object.prototype": "['__defineGetter__', '__defineSetter__', '__lookupGetter__', '__lookupSetter__', '__proto__', 'constructor', 'hasOwnProperty', 'isPrototypeOf', 'propertyIsEnumerable', 'toLocaleString', 'toString', 'valueOf']",
-    "Function": "['length', 'name', 'prototype']",
-    "Function.prototype": "['apply', 'bind', 'call', 'constructor', 'length', 'name', 'toString']",
-    "Array": "['isArray', 'length', 'name', 'prototype']",
-    "Array.prototype": "['concat', 'constructor', 'every', 'filter', 'forEach', 'indexOf', 'join', 'lastIndexOf', 'length', 'map', 'pop', 'push', 'reduce', 'reduceRight', 'reverse', 'shift', 'slice', 'some', 'sort', 'splice', 'toLocaleString', 'toString', 'unshift']",
-    "String": "['fromCharCode', 'length', 'name', 'prototype']",
+    "Function": "['arguments', 'caller', 'length', 'name', 'prototype']",
+    "Function.prototype": "['apply', 'arguments', 'bind', 'call', 'caller', 'constructor', 'length', 'name', 'toString']",
+    "Array": "['arguments', 'caller', 'isArray', 'length', 'name', 'observe', 'prototype', 'unobserve']",
+    "Array.prototype": "['concat', 'constructor', 'entries', 'every', 'filter', 'forEach', 'indexOf', 'join', 'keys', 'lastIndexOf', 'length', 'map', 'pop', 'push', 'reduce', 'reduceRight', 'reverse', 'shift', 'slice', 'some', 'sort', 'splice', 'toLocaleString', 'toString', 'unshift']",
+    "String": "['arguments', 'caller', 'fromCharCode', 'length', 'name', 'prototype']",
     "String.prototype": "['anchor', 'big', 'blink', 'bold', 'charAt', 'charCodeAt', 'concat', 'constructor', 'fixed', 'fontcolor', 'fontsize', 'indexOf', 'italics', 'lastIndexOf', 'length', 'link', 'localeCompare', 'match', 'normalize', 'replace', 'search', 'slice', 'small', 'split', 'strike', 'sub', 'substr', 'substring', 'sup', 'toLocaleLowerCase', 'toLocaleUpperCase', 'toLowerCase', 'toString', 'toUpperCase', 'trim', 'trimLeft', 'trimRight', 'valueOf']",
-    "Boolean": "['length', 'name', 'prototype']",
+    "Boolean": "['arguments', 'caller', 'length', 'name', 'prototype']",
     "Boolean.prototype": "['constructor', 'toString', 'valueOf']",
-    "Number": "['MAX_VALUE', 'MIN_VALUE', 'NEGATIVE_INFINITY', 'NaN', 'POSITIVE_INFINITY', 'length', 'name', 'prototype']",
+    "Number": "['EPSILON', 'MAX_SAFE_INTEGER', 'MAX_VALUE', 'MIN_SAFE_INTEGER', 'MIN_VALUE', 'NEGATIVE_INFINITY', 'NaN', 'POSITIVE_INFINITY', 'arguments', 'caller', 'isFinite', 'isInteger', 'isNaN', 'isSafeInteger', 'length', 'name', 'parseFloat', 'parseInt', 'prototype']",
     "Number.prototype": "['constructor', 'toExponential', 'toFixed', 'toLocaleString', 'toPrecision', 'toString', 'valueOf']",
-    "Date": "['UTC', 'length', 'name', 'now', 'parse', 'prototype']",
+    "Date": "['UTC', 'arguments', 'caller', 'length', 'name', 'now', 'parse', 'prototype']",
     "Date.prototype": "['constructor', 'getDate', 'getDay', 'getFullYear', 'getHours', 'getMilliseconds', 'getMinutes', 'getMonth', 'getSeconds', 'getTime', 'getTimezoneOffset', 'getUTCDate', 'getUTCDay', 'getUTCFullYear', 'getUTCHours', 'getUTCMilliseconds', 'getUTCMinutes', 'getUTCMonth', 'getUTCSeconds', 'getYear', 'setDate', 'setFullYear', 'setHours', 'setMilliseconds', 'setMinutes', 'setMonth', 'setSeconds', 'setTime', 'setUTCDate', 'setUTCFullYear', 'setUTCHours', 'setUTCMilliseconds', 'setUTCMinutes', 'setUTCMonth', 'setUTCSeconds', 'setYear', 'toDateString', 'toGMTString', 'toISOString', 'toJSON', 'toLocaleDateString', 'toLocaleString', 'toLocaleTimeString', 'toString', 'toTimeString', 'toUTCString', 'valueOf']",
-    "RegExp": "['$&', \"$'\", '$*', '$+', '$1', '$2', '$3', '$4', '$5', '$6', '$7', '$8', '$9', '$_', '$`', 'input', 'lastMatch', 'lastParen', 'leftContext', 'length', 'multiline', 'name', 'prototype', 'rightContext']",
+    "RegExp": "['$&', \"$'\", '$*', '$+', '$1', '$2', '$3', '$4', '$5', '$6', '$7', '$8', '$9', '$_', '$`', 'arguments', 'caller', 'input', 'lastMatch', 'lastParen', 'leftContext', 'length', 'multiline', 'name', 'prototype', 'rightContext']",
     "RegExp.prototype": "['compile', 'constructor', 'exec', 'global', 'ignoreCase', 'lastIndex', 'multiline', 'source', 'test', 'toString']",
-    "Error": "['length', 'name', 'prototype']",
+    "Error": "['arguments', 'caller', 'captureStackTrace', 'length', 'name', 'prototype', 'stackTraceLimit']",
     "Error.prototype": "['constructor', 'message', 'name', 'toString']",
-    "Math": "['E', 'LN10', 'LN2', 'LOG10E', 'LOG2E', 'PI', 'SQRT1_2', 'SQRT2', 'abs', 'acos', 'asin', 'atan', 'atan2', 'ceil', 'cos', 'exp', 'floor', 'log', 'max', 'min', 'pow', 'random', 'round', 'sin', 'sqrt', 'tan']",
+    "Math": "['E', 'LN10', 'LN2', 'LOG10E', 'LOG2E', 'PI', 'SQRT1_2', 'SQRT2', 'abs', 'acos', 'acosh', 'asin', 'asinh', 'atan', 'atan2', 'atanh', 'cbrt', 'ceil', 'clz32', 'cos', 'cosh', 'exp', 'expm1', 'floor', 'fround', 'hypot', 'imul', 'log', 'log10', 'log1p', 'log2', 'max', 'min', 'pow', 'random', 'round', 'sign', 'sin', 'sinh', 'sqrt', 'tan', 'tanh', 'trunc']",
     "JSON": "['parse', 'stringify']"
 };
 
diff --git a/test/webkit/fast/js/primitive-property-access-edge-cases-expected.txt b/test/webkit/fast/js/primitive-property-access-edge-cases-expected.txt
index f07d273..cc273df 100644
--- a/test/webkit/fast/js/primitive-property-access-edge-cases-expected.txt
+++ b/test/webkit/fast/js/primitive-property-access-edge-cases-expected.txt
@@ -29,15 +29,15 @@
 PASS checkGet(1, Number) is true
 PASS checkGet('hello', String) is true
 PASS checkGet(true, Boolean) is true
-FAIL checkSet(1, Number) should be true. Was false.
-FAIL checkSet('hello', String) should be true. Was false.
-FAIL checkSet(true, Boolean) should be true. Was false.
+PASS checkSet(1, Number) is true
+PASS checkSet('hello', String) is true
+PASS checkSet(true, Boolean) is true
 PASS checkGetStrict(1, Number) is true
 PASS checkGetStrict('hello', String) is true
 PASS checkGetStrict(true, Boolean) is true
-FAIL checkSetStrict(1, Number) should be true. Was false.
-FAIL checkSetStrict('hello', String) should be true. Was false.
-FAIL checkSetStrict(true, Boolean) should be true. Was false.
+PASS checkSetStrict(1, Number) is true
+PASS checkSetStrict('hello', String) is true
+PASS checkSetStrict(true, Boolean) is true
 PASS checkRead(1, Number) is true
 PASS checkRead('hello', String) is true
 PASS checkRead(true, Boolean) is true
@@ -47,9 +47,9 @@
 PASS checkReadStrict(1, Number) is true
 PASS checkReadStrict('hello', String) is true
 PASS checkReadStrict(true, Boolean) is true
-FAIL checkWriteStrict(1, Number) should throw an exception. Was true.
-FAIL checkWriteStrict('hello', String) should throw an exception. Was true.
-FAIL checkWriteStrict(true, Boolean) should throw an exception. Was true.
+PASS checkWriteStrict(1, Number) threw exception TypeError: Cannot assign to read only property 'foo' of 1.
+PASS checkWriteStrict('hello', String) threw exception TypeError: Cannot assign to read only property 'foo' of hello.
+PASS checkWriteStrict(true, Boolean) threw exception TypeError: Cannot assign to read only property 'foo' of true.
 PASS checkNumericGet(1, Number) is true
 PASS checkNumericGet('hello', String) is true
 PASS checkNumericGet(true, Boolean) is true
diff --git a/test/webkit/fast/js/read-modify-eval-expected.txt b/test/webkit/fast/js/read-modify-eval-expected.txt
index 4a16d0a..b375b37 100644
--- a/test/webkit/fast/js/read-modify-eval-expected.txt
+++ b/test/webkit/fast/js/read-modify-eval-expected.txt
@@ -42,7 +42,7 @@
 PASS postIncTest(); is true
 PASS postDecTest(); is true
 PASS primitiveThisTest.call(1); is true
-FAIL strictThisTest.call(1); should throw an exception. Was true.
+PASS strictThisTest.call(1); threw exception TypeError: Cannot assign to read only property 'value' of 1.
 PASS successfullyParsed is true
 
 TEST COMPLETE
diff --git a/test/webkit/fast/js/string-anchor-expected.txt b/test/webkit/fast/js/string-anchor-expected.txt
index 3a50054..91a8338 100644
--- a/test/webkit/fast/js/string-anchor-expected.txt
+++ b/test/webkit/fast/js/string-anchor-expected.txt
@@ -32,8 +32,8 @@
 PASS '_'.anchor('"') is "<a name=\"&quot;\">_</a>"
 PASS '_'.anchor('" href="http://www.evil.com') is "<a name=\"&quot; href=&quot;http://www.evil.com\">_</a>"
 PASS String.prototype.anchor.call(0x2A, 0x2A) is "<a name=\"42\">42</a>"
-FAIL String.prototype.anchor.call(undefined) should throw TypeError: Type error. Was <a name="undefined">undefined</a>.
-FAIL String.prototype.anchor.call(null) should throw TypeError: Type error. Was <a name="undefined">null</a>.
+FAIL String.prototype.anchor.call(undefined) should throw TypeError: Type error. Threw exception TypeError: String.prototype.anchor called on null or undefined.
+FAIL String.prototype.anchor.call(null) should throw TypeError: Type error. Threw exception TypeError: String.prototype.anchor called on null or undefined.
 PASS String.prototype.anchor.length is 1
 PASS successfullyParsed is true
 
diff --git a/test/webkit/fast/js/string-fontcolor-expected.txt b/test/webkit/fast/js/string-fontcolor-expected.txt
index af2c707..2ffda69 100644
--- a/test/webkit/fast/js/string-fontcolor-expected.txt
+++ b/test/webkit/fast/js/string-fontcolor-expected.txt
@@ -32,8 +32,8 @@
 PASS '_'.fontcolor('"') is "<font color=\"&quot;\">_</font>"
 PASS '_'.fontcolor('" size="2px') is "<font color=\"&quot; size=&quot;2px\">_</font>"
 PASS String.prototype.fontcolor.call(0x2A, 0x2A) is "<font color=\"42\">42</font>"
-FAIL String.prototype.fontcolor.call(undefined) should throw TypeError: Type error. Was <font color="undefined">undefined</font>.
-FAIL String.prototype.fontcolor.call(null) should throw TypeError: Type error. Was <font color="undefined">null</font>.
+FAIL String.prototype.fontcolor.call(undefined) should throw TypeError: Type error. Threw exception TypeError: String.prototype.fontcolor called on null or undefined.
+FAIL String.prototype.fontcolor.call(null) should throw TypeError: Type error. Threw exception TypeError: String.prototype.fontcolor called on null or undefined.
 PASS String.prototype.fontcolor.length is 1
 PASS successfullyParsed is true
 
diff --git a/test/webkit/fast/js/string-fontsize-expected.txt b/test/webkit/fast/js/string-fontsize-expected.txt
index c114f74..656f7fa 100644
--- a/test/webkit/fast/js/string-fontsize-expected.txt
+++ b/test/webkit/fast/js/string-fontsize-expected.txt
@@ -33,8 +33,8 @@
 PASS '_'.fontsize('"') is "<font size=\"&quot;\">_</font>"
 PASS '_'.fontsize('" color="b') is "<font size=\"&quot; color=&quot;b\">_</font>"
 PASS String.prototype.fontsize.call(0x2A, 0x2A) is "<font size=\"42\">42</font>"
-FAIL String.prototype.fontsize.call(undefined) should throw TypeError: Type error. Was <font size="undefined">undefined</font>.
-FAIL String.prototype.fontsize.call(null) should throw TypeError: Type error. Was <font size="undefined">null</font>.
+FAIL String.prototype.fontsize.call(undefined) should throw TypeError: Type error. Threw exception TypeError: String.prototype.fontsize called on null or undefined.
+FAIL String.prototype.fontsize.call(null) should throw TypeError: Type error. Threw exception TypeError: String.prototype.fontsize called on null or undefined.
 PASS String.prototype.fontsize.length is 1
 PASS successfullyParsed is true
 
diff --git a/test/webkit/fast/js/string-link-expected.txt b/test/webkit/fast/js/string-link-expected.txt
index afacbe6..2443bd4 100644
--- a/test/webkit/fast/js/string-link-expected.txt
+++ b/test/webkit/fast/js/string-link-expected.txt
@@ -33,8 +33,8 @@
 PASS '_'.link('"') is "<a href=\"&quot;\">_</a>"
 PASS '_'.link('" target="_blank') is "<a href=\"&quot; target=&quot;_blank\">_</a>"
 PASS String.prototype.link.call(0x2A, 0x2A) is "<a href=\"42\">42</a>"
-FAIL String.prototype.link.call(undefined) should throw TypeError: Type error. Was <a href="undefined">undefined</a>.
-FAIL String.prototype.link.call(null) should throw TypeError: Type error. Was <a href="undefined">null</a>.
+FAIL String.prototype.link.call(undefined) should throw TypeError: Type error. Threw exception TypeError: String.prototype.link called on null or undefined.
+FAIL String.prototype.link.call(null) should throw TypeError: Type error. Threw exception TypeError: String.prototype.link called on null or undefined.
 PASS String.prototype.link.length is 1
 PASS successfullyParsed is true
 
diff --git a/test/webkit/webkit.status b/test/webkit/webkit.status
index c897fd4..3bb6574 100644
--- a/test/webkit/webkit.status
+++ b/test/webkit/webkit.status
@@ -33,6 +33,10 @@
   'dfg-inline-arguments-become-int32': [PASS, FAIL],
   'dfg-inline-arguments-reset': [PASS, FAIL],
   'dfg-inline-arguments-reset-changetype': [PASS, FAIL],
+  # TODO(turbofan): We run out of stack earlier on 64-bit for now.
+  'fast/js/deep-recursion-test': [PASS, NO_VARIANTS],
+  # TODO(bmeurer,svenpanne): Investigate test failure.
+  'fast/js/toString-number': [SKIP],
 }],  # ALWAYS
 ['mode == debug', {
   # Too slow in debug mode.
@@ -47,4 +51,13 @@
 ['arch == arm64 and simulator_run == True', {
   'dfg-int-overflow-in-loop': [SKIP],
 }],  # 'arch == arm64 and simulator_run == True'
+
+
+##############################################################################
+['gc_stress == True', {
+  # Tests taking too long
+  'fast/js/excessive-comma-usage': [SKIP]
+}],  # 'gc_stress == True'
+
+##############################################################################
 ]
diff --git a/testing/gmock-support.h b/testing/gmock-support.h
new file mode 100644
index 0000000..44348b6
--- /dev/null
+++ b/testing/gmock-support.h
@@ -0,0 +1,72 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TESTING_GMOCK_SUPPORT_H_
+#define V8_TESTING_GMOCK_SUPPORT_H_
+
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace testing {
+
+template <typename T>
+class Capture {
+ public:
+  Capture() : value_(), has_value_(false) {}
+
+  const T& value() const { return value_; }
+  bool has_value() const { return has_value_; }
+
+  void SetValue(const T& value) {
+    DCHECK(!has_value());
+    value_ = value;
+    has_value_ = true;
+  }
+
+ private:
+  T value_;
+  bool has_value_;
+};
+
+
+namespace internal {
+
+template <typename T>
+class CaptureEqMatcher : public MatcherInterface<T> {
+ public:
+  explicit CaptureEqMatcher(Capture<T>* capture) : capture_(capture) {}
+
+  virtual void DescribeTo(std::ostream* os) const {
+    *os << "captured by " << static_cast<const void*>(capture_);
+    if (capture_->has_value()) *os << " which has value " << capture_->value();
+  }
+
+  virtual bool MatchAndExplain(T value, MatchResultListener* listener) const {
+    if (!capture_->has_value()) {
+      capture_->SetValue(value);
+      return true;
+    }
+    if (value != capture_->value()) {
+      *listener << "which is not equal to " << capture_->value();
+      return false;
+    }
+    return true;
+  }
+
+ private:
+  Capture<T>* capture_;
+};
+
+}  // namespace internal
+
+
+// CaptureEq(capture) captures the value passed in during matching as long as it
+// is unset, and once set, compares the value for equality with the argument.
+template <typename T>
+Matcher<T> CaptureEq(Capture<T>* capture) {
+  return MakeMatcher(new internal::CaptureEqMatcher<T>(capture));
+}
+
+}  // namespace testing
+
+#endif  // V8_TESTING_GMOCK_SUPPORT_H_
diff --git a/testing/gmock.gyp b/testing/gmock.gyp
new file mode 100644
index 0000000..ba43861
--- /dev/null
+++ b/testing/gmock.gyp
@@ -0,0 +1,69 @@
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+  'targets': [
+    {
+      'target_name': 'gmock',
+      'type': 'static_library',
+      'dependencies': [
+        'gtest.gyp:gtest',
+      ],
+      'sources': [
+        # Sources based on files in r173 of gmock.
+        'gmock/include/gmock/gmock-actions.h',
+        'gmock/include/gmock/gmock-cardinalities.h',
+        'gmock/include/gmock/gmock-generated-actions.h',
+        'gmock/include/gmock/gmock-generated-function-mockers.h',
+        'gmock/include/gmock/gmock-generated-matchers.h',
+        'gmock/include/gmock/gmock-generated-nice-strict.h',
+        'gmock/include/gmock/gmock-matchers.h',
+        'gmock/include/gmock/gmock-spec-builders.h',
+        'gmock/include/gmock/gmock.h',
+        'gmock/include/gmock/internal/gmock-generated-internal-utils.h',
+        'gmock/include/gmock/internal/gmock-internal-utils.h',
+        'gmock/include/gmock/internal/gmock-port.h',
+        'gmock/src/gmock-all.cc',
+        'gmock/src/gmock-cardinalities.cc',
+        'gmock/src/gmock-internal-utils.cc',
+        'gmock/src/gmock-matchers.cc',
+        'gmock/src/gmock-spec-builders.cc',
+        'gmock/src/gmock.cc',
+        'gmock-support.h',  # gMock helpers
+      ],
+      'sources!': [
+        'gmock/src/gmock-all.cc',  # Not needed by our build.
+      ],
+      'include_dirs': [
+        'gmock',
+        'gmock/include',
+      ],
+      'direct_dependent_settings': {
+        'include_dirs': [
+          'gmock/include',  # So that gmock headers can find themselves.
+        ],
+      },
+      'export_dependent_settings': [
+        'gtest.gyp:gtest',
+      ],
+      'conditions': [
+        ['want_separate_host_toolset==1', {
+          'toolsets': ['host', 'target'],
+        }, {
+          'toolsets': ['target'],
+        }],
+      ],
+    },
+    {
+      'target_name': 'gmock_main',
+      'type': 'static_library',
+      'dependencies': [
+        'gmock',
+      ],
+      'sources': [
+        'gmock/src/gmock_main.cc',
+      ],
+    },
+  ],
+}
diff --git a/testing/gtest-support.h b/testing/gtest-support.h
new file mode 100644
index 0000000..66b1094
--- /dev/null
+++ b/testing/gtest-support.h
@@ -0,0 +1,58 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_TESTING_GTEST_SUPPORT_H_
+#define V8_TESTING_GTEST_SUPPORT_H_
+
+#include "include/v8stdint.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace testing {
+namespace internal {
+
+#define GET_TYPE_NAME(type)                \
+  template <>                              \
+  inline std::string GetTypeName<type>() { \
+    return #type;                          \
+  }
+GET_TYPE_NAME(int8_t)
+GET_TYPE_NAME(uint8_t)
+GET_TYPE_NAME(int16_t)
+GET_TYPE_NAME(uint16_t)
+GET_TYPE_NAME(int32_t)
+GET_TYPE_NAME(uint32_t)
+GET_TYPE_NAME(int64_t)
+GET_TYPE_NAME(uint64_t)
+GET_TYPE_NAME(float)
+GET_TYPE_NAME(double)
+#undef GET_TYPE_NAME
+
+
+// TRACED_FOREACH(type, var, array) expands to a loop that assigns |var| every
+// item in the |array| and adds a SCOPED_TRACE() message for the |var| while
+// inside the loop body.
+// TODO(bmeurer): Migrate to C++11 once we're ready.
+#define TRACED_FOREACH(_type, _var, _array)                                \
+  for (size_t _i = 0; _i < arraysize(_array); ++_i)                        \
+    for (bool _done = false; !_done;)                                      \
+      for (_type const _var = _array[_i]; !_done;)                         \
+        for (SCOPED_TRACE(::testing::Message() << #_var << " = " << _var); \
+             !_done; _done = true)
+
+
+// TRACED_FORRANGE(type, var, low, high) expands to a loop that assigns |var|
+// every value in the range |low| to (including) |high| and adds a
+// SCOPED_TRACE() message for the |var| while inside the loop body.
+// TODO(bmeurer): Migrate to C++11 once we're ready.
+#define TRACED_FORRANGE(_type, _var, _low, _high)                          \
+  for (_type _i = _low; _i <= _high; ++_i)                                 \
+    for (bool _done = false; !_done;)                                      \
+      for (_type const _var = _i; !_done;)                                 \
+        for (SCOPED_TRACE(::testing::Message() << #_var << " = " << _var); \
+             !_done; _done = true)
+
+}  // namespace internal
+}  // namespace testing
+
+#endif  // V8_TESTING_GTEST_SUPPORT_H_
diff --git a/testing/gtest.gyp b/testing/gtest.gyp
new file mode 100644
index 0000000..d766210
--- /dev/null
+++ b/testing/gtest.gyp
@@ -0,0 +1,159 @@
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+{
+  'targets': [
+    {
+      'target_name': 'gtest',
+      'toolsets': ['host', 'target'],
+      'type': 'static_library',
+      'sources': [
+        'gtest/include/gtest/gtest-death-test.h',
+        'gtest/include/gtest/gtest-message.h',
+        'gtest/include/gtest/gtest-param-test.h',
+        'gtest/include/gtest/gtest-printers.h',
+        'gtest/include/gtest/gtest-spi.h',
+        'gtest/include/gtest/gtest-test-part.h',
+        'gtest/include/gtest/gtest-typed-test.h',
+        'gtest/include/gtest/gtest.h',
+        'gtest/include/gtest/gtest_pred_impl.h',
+        'gtest/include/gtest/internal/gtest-death-test-internal.h',
+        'gtest/include/gtest/internal/gtest-filepath.h',
+        'gtest/include/gtest/internal/gtest-internal.h',
+        'gtest/include/gtest/internal/gtest-linked_ptr.h',
+        'gtest/include/gtest/internal/gtest-param-util-generated.h',
+        'gtest/include/gtest/internal/gtest-param-util.h',
+        'gtest/include/gtest/internal/gtest-port.h',
+        'gtest/include/gtest/internal/gtest-string.h',
+        'gtest/include/gtest/internal/gtest-tuple.h',
+        'gtest/include/gtest/internal/gtest-type-util.h',
+        'gtest/src/gtest-all.cc',
+        'gtest/src/gtest-death-test.cc',
+        'gtest/src/gtest-filepath.cc',
+        'gtest/src/gtest-internal-inl.h',
+        'gtest/src/gtest-port.cc',
+        'gtest/src/gtest-printers.cc',
+        'gtest/src/gtest-test-part.cc',
+        'gtest/src/gtest-typed-test.cc',
+        'gtest/src/gtest.cc',
+        'gtest-support.h',
+      ],
+      'sources!': [
+        'gtest/src/gtest-all.cc',  # Not needed by our build.
+      ],
+      'include_dirs': [
+        'gtest',
+        'gtest/include',
+      ],
+      'dependencies': [
+        'gtest_prod',
+      ],
+      'defines': [
+        # In order to allow regex matches in gtest to be shared between Windows
+        # and other systems, we tell gtest to always use it's internal engine.
+        'GTEST_HAS_POSIX_RE=0',
+        # Chrome doesn't support / require C++11, yet.
+        'GTEST_LANG_CXX11=0',
+      ],
+      'all_dependent_settings': {
+        'defines': [
+          'GTEST_HAS_POSIX_RE=0',
+          'GTEST_LANG_CXX11=0',
+        ],
+      },
+      'conditions': [
+        ['os_posix == 1', {
+          'defines': [
+            # gtest isn't able to figure out when RTTI is disabled for gcc
+            # versions older than 4.3.2, and assumes it's enabled.  Our Mac
+            # and Linux builds disable RTTI, and cannot guarantee that the
+            # compiler will be 4.3.2. or newer.  The Mac, for example, uses
+            # 4.2.1 as that is the latest available on that platform.  gtest
+            # must be instructed that RTTI is disabled here, and for any
+            # direct dependents that might include gtest headers.
+            'GTEST_HAS_RTTI=0',
+          ],
+          'direct_dependent_settings': {
+            'defines': [
+              'GTEST_HAS_RTTI=0',
+            ],
+          },
+        }],
+        ['OS=="android"', {
+          'defines': [
+            'GTEST_HAS_CLONE=0',
+          ],
+          'direct_dependent_settings': {
+            'defines': [
+              'GTEST_HAS_CLONE=0',
+            ],
+          },
+        }],
+        ['OS=="android"', {
+          # We want gtest features that use tr1::tuple, but we currently
+          # don't support the variadic templates used by libstdc++'s
+          # implementation. gtest supports this scenario by providing its
+          # own implementation but we must opt in to it.
+          'defines': [
+            'GTEST_USE_OWN_TR1_TUPLE=1',
+            # GTEST_USE_OWN_TR1_TUPLE only works if GTEST_HAS_TR1_TUPLE is set.
+            # gtest r625 made it so that GTEST_HAS_TR1_TUPLE is set to 0
+            # automatically on android, so it has to be set explicitly here.
+            'GTEST_HAS_TR1_TUPLE=1',
+          ],
+          'direct_dependent_settings': {
+            'defines': [
+              'GTEST_USE_OWN_TR1_TUPLE=1',
+              'GTEST_HAS_TR1_TUPLE=1',
+            ],
+          },
+        }],
+      ],
+      'direct_dependent_settings': {
+        'defines': [
+          'UNIT_TEST',
+        ],
+        'include_dirs': [
+          'gtest/include',  # So that gtest headers can find themselves.
+        ],
+        'target_conditions': [
+          ['_type=="executable"', {
+            'test': 1,
+            'conditions': [
+              ['OS=="mac"', {
+                'run_as': {
+                  'action????': ['${BUILT_PRODUCTS_DIR}/${PRODUCT_NAME}'],
+                },
+              }],
+              ['OS=="win"', {
+                'run_as': {
+                  'action????': ['$(TargetPath)', '--gtest_print_time'],
+                },
+              }],
+            ],
+          }],
+        ],
+        'msvs_disabled_warnings': [4800],
+      },
+    },
+    {
+      'target_name': 'gtest_main',
+      'type': 'static_library',
+      'dependencies': [
+        'gtest',
+      ],
+      'sources': [
+        'gtest/src/gtest_main.cc',
+      ],
+    },
+    {
+      'target_name': 'gtest_prod',
+      'toolsets': ['host', 'target'],
+      'type': 'none',
+      'sources': [
+        'gtest/include/gtest/gtest_prod.h',
+      ],
+    },
+  ],
+}
diff --git a/third_party/fdlibm/LICENSE b/third_party/fdlibm/LICENSE
new file mode 100644
index 0000000..b54cb52
--- /dev/null
+++ b/third_party/fdlibm/LICENSE
@@ -0,0 +1,6 @@
+Copyright (C) 1993-2004 by Sun Microsystems, Inc. All rights reserved.
+
+Developed at SunSoft, a Sun Microsystems, Inc. business.
+Permission to use, copy, modify, and distribute this
+software is freely granted, provided that this notice
+is preserved.
diff --git a/third_party/fdlibm/README.v8 b/third_party/fdlibm/README.v8
new file mode 100644
index 0000000..ea8fdb6
--- /dev/null
+++ b/third_party/fdlibm/README.v8
@@ -0,0 +1,18 @@
+Name: Freely Distributable LIBM 
+Short Name: fdlibm
+URL: http://www.netlib.org/fdlibm/
+Version: 5.3 
+License: Freely Distributable.
+License File: LICENSE.
+Security Critical: yes.
+License Android Compatible: yes.
+
+Description:
+This is used to provide a accurate implementation for trigonometric functions
+used in V8.
+
+Local Modifications:
+For the use in V8, fdlibm has been reduced to include only sine, cosine and
+tangent.  To make inlining into generated code possible, a large portion of
+that has been translated to Javascript.  The rest remains in C, but has been
+refactored and reformatted to interoperate with the rest of V8.
diff --git a/third_party/fdlibm/fdlibm.cc b/third_party/fdlibm/fdlibm.cc
new file mode 100644
index 0000000..c009cd0
--- /dev/null
+++ b/third_party/fdlibm/fdlibm.cc
@@ -0,0 +1,281 @@
+// The following is adapted from fdlibm (http://www.netlib.org/fdlibm).
+//
+// ====================================================
+// Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+//
+// Developed at SunSoft, a Sun Microsystems, Inc. business.
+// Permission to use, copy, modify, and distribute this
+// software is freely granted, provided that this notice
+// is preserved.
+// ====================================================
+//
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2014 the V8 project authors. All rights reserved.
+
+#include "src/v8.h"
+
+#include "src/double.h"
+#include "third_party/fdlibm/fdlibm.h"
+
+
+namespace v8 {
+namespace fdlibm {
+
+#ifdef _MSC_VER
+inline double scalbn(double x, int y) { return _scalb(x, y); }
+#endif  // _MSC_VER
+
+const double MathConstants::constants[] = {
+    6.36619772367581382433e-01,   // invpio2   0
+    1.57079632673412561417e+00,   // pio2_1    1
+    6.07710050650619224932e-11,   // pio2_1t   2
+    6.07710050630396597660e-11,   // pio2_2    3
+    2.02226624879595063154e-21,   // pio2_2t   4
+    2.02226624871116645580e-21,   // pio2_3    5
+    8.47842766036889956997e-32,   // pio2_3t   6
+    -1.66666666666666324348e-01,  // S1        7  coefficients for sin
+    8.33333333332248946124e-03,   //           8
+    -1.98412698298579493134e-04,  //           9
+    2.75573137070700676789e-06,   //          10
+    -2.50507602534068634195e-08,  //          11
+    1.58969099521155010221e-10,   // S6       12
+    4.16666666666666019037e-02,   // C1       13  coefficients for cos
+    -1.38888888888741095749e-03,  //          14
+    2.48015872894767294178e-05,   //          15
+    -2.75573143513906633035e-07,  //          16
+    2.08757232129817482790e-09,   //          17
+    -1.13596475577881948265e-11,  // C6       18
+    3.33333333333334091986e-01,   // T0       19  coefficients for tan
+    1.33333333333201242699e-01,   //          20
+    5.39682539762260521377e-02,   //          21
+    2.18694882948595424599e-02,   //          22
+    8.86323982359930005737e-03,   //          23
+    3.59207910759131235356e-03,   //          24
+    1.45620945432529025516e-03,   //          25
+    5.88041240820264096874e-04,   //          26
+    2.46463134818469906812e-04,   //          27
+    7.81794442939557092300e-05,   //          28
+    7.14072491382608190305e-05,   //          29
+    -1.85586374855275456654e-05,  //          30
+    2.59073051863633712884e-05,   // T12      31
+    7.85398163397448278999e-01,   // pio4     32
+    3.06161699786838301793e-17,   // pio4lo   33
+    6.93147180369123816490e-01,   // ln2_hi   34
+    1.90821492927058770002e-10,   // ln2_lo   35
+    1.80143985094819840000e+16,   // 2^54     36
+    6.666666666666666666e-01,     // 2/3      37
+    6.666666666666735130e-01,     // LP1      38  coefficients for log1p
+    3.999999999940941908e-01,     //          39
+    2.857142874366239149e-01,     //          40
+    2.222219843214978396e-01,     //          41
+    1.818357216161805012e-01,     //          42
+    1.531383769920937332e-01,     //          43
+    1.479819860511658591e-01,     // LP7      44
+    7.09782712893383973096e+02,   //          45  overflow threshold for expm1
+    1.44269504088896338700e+00,   // 1/ln2    46
+    -3.33333333333331316428e-02,  // Q1       47  coefficients for expm1
+    1.58730158725481460165e-03,   //          48
+    -7.93650757867487942473e-05,  //          49
+    4.00821782732936239552e-06,   //          50
+    -2.01099218183624371326e-07,  // Q5       51
+    710.4758600739439             //          52  overflow threshold sinh, cosh
+};
+
+
+// Table of constants for 2/pi, 396 Hex digits (476 decimal) of 2/pi
+static const int two_over_pi[] = {
+    0xA2F983, 0x6E4E44, 0x1529FC, 0x2757D1, 0xF534DD, 0xC0DB62, 0x95993C,
+    0x439041, 0xFE5163, 0xABDEBB, 0xC561B7, 0x246E3A, 0x424DD2, 0xE00649,
+    0x2EEA09, 0xD1921C, 0xFE1DEB, 0x1CB129, 0xA73EE8, 0x8235F5, 0x2EBB44,
+    0x84E99C, 0x7026B4, 0x5F7E41, 0x3991D6, 0x398353, 0x39F49C, 0x845F8B,
+    0xBDF928, 0x3B1FF8, 0x97FFDE, 0x05980F, 0xEF2F11, 0x8B5A0A, 0x6D1F6D,
+    0x367ECF, 0x27CB09, 0xB74F46, 0x3F669E, 0x5FEA2D, 0x7527BA, 0xC7EBE5,
+    0xF17B3D, 0x0739F7, 0x8A5292, 0xEA6BFB, 0x5FB11F, 0x8D5D08, 0x560330,
+    0x46FC7B, 0x6BABF0, 0xCFBC20, 0x9AF436, 0x1DA9E3, 0x91615E, 0xE61B08,
+    0x659985, 0x5F14A0, 0x68408D, 0xFFD880, 0x4D7327, 0x310606, 0x1556CA,
+    0x73A8C9, 0x60E27B, 0xC08C6B};
+
+static const double zero = 0.0;
+static const double two24 = 1.6777216e+07;
+static const double one = 1.0;
+static const double twon24 = 5.9604644775390625e-08;
+
+static const double PIo2[] = {
+    1.57079625129699707031e+00,  // 0x3FF921FB, 0x40000000
+    7.54978941586159635335e-08,  // 0x3E74442D, 0x00000000
+    5.39030252995776476554e-15,  // 0x3CF84698, 0x80000000
+    3.28200341580791294123e-22,  // 0x3B78CC51, 0x60000000
+    1.27065575308067607349e-29,  // 0x39F01B83, 0x80000000
+    1.22933308981111328932e-36,  // 0x387A2520, 0x40000000
+    2.73370053816464559624e-44,  // 0x36E38222, 0x80000000
+    2.16741683877804819444e-51   // 0x3569F31D, 0x00000000
+};
+
+
+int __kernel_rem_pio2(double* x, double* y, int e0, int nx) {
+  static const int32_t jk = 3;
+  double fw;
+  int32_t jx = nx - 1;
+  int32_t jv = (e0 - 3) / 24;
+  if (jv < 0) jv = 0;
+  int32_t q0 = e0 - 24 * (jv + 1);
+  int32_t m = jx + jk;
+
+  double f[10];
+  for (int i = 0, j = jv - jx; i <= m; i++, j++) {
+    f[i] = (j < 0) ? zero : static_cast<double>(two_over_pi[j]);
+  }
+
+  double q[10];
+  for (int i = 0; i <= jk; i++) {
+    fw = 0.0;
+    for (int j = 0; j <= jx; j++) fw += x[j] * f[jx + i - j];
+    q[i] = fw;
+  }
+
+  int32_t jz = jk;
+
+recompute:
+
+  int32_t iq[10];
+  double z = q[jz];
+  for (int i = 0, j = jz; j > 0; i++, j--) {
+    fw = static_cast<double>(static_cast<int32_t>(twon24 * z));
+    iq[i] = static_cast<int32_t>(z - two24 * fw);
+    z = q[j - 1] + fw;
+  }
+
+  z = scalbn(z, q0);
+  z -= 8.0 * std::floor(z * 0.125);
+  int32_t n = static_cast<int32_t>(z);
+  z -= static_cast<double>(n);
+  int32_t ih = 0;
+  if (q0 > 0) {
+    int32_t i = (iq[jz - 1] >> (24 - q0));
+    n += i;
+    iq[jz - 1] -= i << (24 - q0);
+    ih = iq[jz - 1] >> (23 - q0);
+  } else if (q0 == 0) {
+    ih = iq[jz - 1] >> 23;
+  } else if (z >= 0.5) {
+    ih = 2;
+  }
+
+  if (ih > 0) {
+    n += 1;
+    int32_t carry = 0;
+    for (int i = 0; i < jz; i++) {
+      int32_t j = iq[i];
+      if (carry == 0) {
+        if (j != 0) {
+          carry = 1;
+          iq[i] = 0x1000000 - j;
+        }
+      } else {
+        iq[i] = 0xffffff - j;
+      }
+    }
+    if (q0 == 1) {
+      iq[jz - 1] &= 0x7fffff;
+    } else if (q0 == 2) {
+      iq[jz - 1] &= 0x3fffff;
+    }
+    if (ih == 2) {
+      z = one - z;
+      if (carry != 0) z -= scalbn(one, q0);
+    }
+  }
+
+  if (z == zero) {
+    int32_t j = 0;
+    for (int i = jz - 1; i >= jk; i--) j |= iq[i];
+    if (j == 0) {
+      int32_t k = 1;
+      while (iq[jk - k] == 0) k++;
+      for (int i = jz + 1; i <= jz + k; i++) {
+        f[jx + i] = static_cast<double>(two_over_pi[jv + i]);
+        for (j = 0, fw = 0.0; j <= jx; j++) fw += x[j] * f[jx + i - j];
+        q[i] = fw;
+      }
+      jz += k;
+      goto recompute;
+    }
+  }
+
+  if (z == 0.0) {
+    jz -= 1;
+    q0 -= 24;
+    while (iq[jz] == 0) {
+      jz--;
+      q0 -= 24;
+    }
+  } else {
+    z = scalbn(z, -q0);
+    if (z >= two24) {
+      fw = static_cast<double>(static_cast<int32_t>(twon24 * z));
+      iq[jz] = static_cast<int32_t>(z - two24 * fw);
+      jz += 1;
+      q0 += 24;
+      iq[jz] = static_cast<int32_t>(fw);
+    } else {
+      iq[jz] = static_cast<int32_t>(z);
+    }
+  }
+
+  fw = scalbn(one, q0);
+  for (int i = jz; i >= 0; i--) {
+    q[i] = fw * static_cast<double>(iq[i]);
+    fw *= twon24;
+  }
+
+  double fq[10];
+  for (int i = jz; i >= 0; i--) {
+    fw = 0.0;
+    for (int k = 0; k <= jk && k <= jz - i; k++) fw += PIo2[k] * q[i + k];
+    fq[jz - i] = fw;
+  }
+
+  fw = 0.0;
+  for (int i = jz; i >= 0; i--) fw += fq[i];
+  y[0] = (ih == 0) ? fw : -fw;
+  fw = fq[0] - fw;
+  for (int i = 1; i <= jz; i++) fw += fq[i];
+  y[1] = (ih == 0) ? fw : -fw;
+  return n & 7;
+}
+
+
+int rempio2(double x, double* y) {
+  int32_t hx = static_cast<int32_t>(internal::double_to_uint64(x) >> 32);
+  int32_t ix = hx & 0x7fffffff;
+
+  if (ix >= 0x7ff00000) {
+    *y = base::OS::nan_value();
+    return 0;
+  }
+
+  int32_t e0 = (ix >> 20) - 1046;
+  uint64_t zi = internal::double_to_uint64(x) & 0xFFFFFFFFu;
+  zi |= static_cast<uint64_t>(ix - (e0 << 20)) << 32;
+  double z = internal::uint64_to_double(zi);
+
+  double tx[3];
+  for (int i = 0; i < 2; i++) {
+    tx[i] = static_cast<double>(static_cast<int32_t>(z));
+    z = (z - tx[i]) * two24;
+  }
+  tx[2] = z;
+
+  int nx = 3;
+  while (tx[nx - 1] == zero) nx--;
+  int n = __kernel_rem_pio2(tx, y, e0, nx);
+  if (hx < 0) {
+    y[0] = -y[0];
+    y[1] = -y[1];
+    return -n;
+  }
+  return n;
+}
+}
+}  // namespace v8::internal
diff --git a/third_party/fdlibm/fdlibm.h b/third_party/fdlibm/fdlibm.h
new file mode 100644
index 0000000..cadf85b
--- /dev/null
+++ b/third_party/fdlibm/fdlibm.h
@@ -0,0 +1,31 @@
+// The following is adapted from fdlibm (http://www.netlib.org/fdlibm).
+//
+// ====================================================
+// Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved.
+//
+// Developed at SunSoft, a Sun Microsystems, Inc. business.
+// Permission to use, copy, modify, and distribute this
+// software is freely granted, provided that this notice
+// is preserved.
+// ====================================================
+//
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2014 the V8 project authors. All rights reserved.
+
+#ifndef V8_FDLIBM_H_
+#define V8_FDLIBM_H_
+
+namespace v8 {
+namespace fdlibm {
+
+int rempio2(double x, double* y);
+
+// Constants to be exposed to builtins via Float64Array.
+struct MathConstants {
+  static const double constants[53];
+};
+}
+}  // namespace v8::internal
+
+#endif  // V8_FDLIBM_H_
diff --git a/third_party/fdlibm/fdlibm.js b/third_party/fdlibm/fdlibm.js
new file mode 100644
index 0000000..7fd9adf
--- /dev/null
+++ b/third_party/fdlibm/fdlibm.js
@@ -0,0 +1,814 @@
+// The following is adapted from fdlibm (http://www.netlib.org/fdlibm),
+//
+// ====================================================
+// Copyright (C) 1993-2004 by Sun Microsystems, Inc. All rights reserved.
+//
+// Developed at SunSoft, a Sun Microsystems, Inc. business.
+// Permission to use, copy, modify, and distribute this
+// software is freely granted, provided that this notice
+// is preserved.
+// ====================================================
+//
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2014 the V8 project authors. All rights reserved.
+//
+// The following is a straightforward translation of fdlibm routines
+// by Raymond Toy (rtoy@google.com).
+
+// Double constants that do not have empty lower 32 bits are found in fdlibm.cc
+// and exposed through kMath as typed array. We assume the compiler to convert
+// from decimal to binary accurately enough to produce the intended values.
+// kMath is initialized to a Float64Array during genesis and not writable.
+var kMath;
+
+const INVPIO2 = kMath[0];
+const PIO2_1  = kMath[1];
+const PIO2_1T = kMath[2];
+const PIO2_2  = kMath[3];
+const PIO2_2T = kMath[4];
+const PIO2_3  = kMath[5];
+const PIO2_3T = kMath[6];
+const PIO4    = kMath[32];
+const PIO4LO  = kMath[33];
+
+// Compute k and r such that x - k*pi/2 = r where |r| < pi/4. For
+// precision, r is returned as two values y0 and y1 such that r = y0 + y1
+// to more than double precision.
+macro REMPIO2(X)
+  var n, y0, y1;
+  var hx = %_DoubleHi(X);
+  var ix = hx & 0x7fffffff;
+
+  if (ix < 0x4002d97c) {
+    // |X| ~< 3*pi/4, special case with n = +/- 1
+    if (hx > 0) {
+      var z = X - PIO2_1;
+      if (ix != 0x3ff921fb) {
+        // 33+53 bit pi is good enough
+        y0 = z - PIO2_1T;
+        y1 = (z - y0) - PIO2_1T;
+      } else {
+        // near pi/2, use 33+33+53 bit pi
+        z -= PIO2_2;
+        y0 = z - PIO2_2T;
+        y1 = (z - y0) - PIO2_2T;
+      }
+      n = 1;
+    } else {
+      // Negative X
+      var z = X + PIO2_1;
+      if (ix != 0x3ff921fb) {
+        // 33+53 bit pi is good enough
+        y0 = z + PIO2_1T;
+        y1 = (z - y0) + PIO2_1T;
+      } else {
+        // near pi/2, use 33+33+53 bit pi
+        z += PIO2_2;
+        y0 = z + PIO2_2T;
+        y1 = (z - y0) + PIO2_2T;
+      }
+      n = -1;
+    }
+  } else if (ix <= 0x413921fb) {
+    // |X| ~<= 2^19*(pi/2), medium size
+    var t = MathAbs(X);
+    n = (t * INVPIO2 + 0.5) | 0;
+    var r = t - n * PIO2_1;
+    var w = n * PIO2_1T;
+    // First round good to 85 bit
+    y0 = r - w;
+    if (ix - (%_DoubleHi(y0) & 0x7ff00000) > 0x1000000) {
+      // 2nd iteration needed, good to 118
+      t = r;
+      w = n * PIO2_2;
+      r = t - w;
+      w = n * PIO2_2T - ((t - r) - w);
+      y0 = r - w;
+      if (ix - (%_DoubleHi(y0) & 0x7ff00000) > 0x3100000) {
+        // 3rd iteration needed. 151 bits accuracy
+        t = r;
+        w = n * PIO2_3;
+        r = t - w;
+        w = n * PIO2_3T - ((t - r) - w);
+        y0 = r - w;
+      }
+    }
+    y1 = (r - y0) - w;
+    if (hx < 0) {
+      n = -n;
+      y0 = -y0;
+      y1 = -y1;
+    }
+  } else {
+    // Need to do full Payne-Hanek reduction here.
+    var r = %RemPiO2(X);
+    n = r[0];
+    y0 = r[1];
+    y1 = r[2];
+  }
+endmacro
+
+
+// __kernel_sin(X, Y, IY)
+// kernel sin function on [-pi/4, pi/4], pi/4 ~ 0.7854
+// Input X is assumed to be bounded by ~pi/4 in magnitude.
+// Input Y is the tail of X so that x = X + Y.
+//
+// Algorithm
+//  1. Since ieee_sin(-x) = -ieee_sin(x), we need only to consider positive x.
+//  2. ieee_sin(x) is approximated by a polynomial of degree 13 on
+//     [0,pi/4]
+//                           3            13
+//          sin(x) ~ x + S1*x + ... + S6*x
+//     where
+//
+//    |ieee_sin(x)    2     4     6     8     10     12  |     -58
+//    |----- - (1+S1*x +S2*x +S3*x +S4*x +S5*x  +S6*x   )| <= 2
+//    |  x                                               |
+//
+//  3. ieee_sin(X+Y) = ieee_sin(X) + sin'(X')*Y
+//              ~ ieee_sin(X) + (1-X*X/2)*Y
+//     For better accuracy, let
+//               3      2      2      2      2
+//          r = X *(S2+X *(S3+X *(S4+X *(S5+X *S6))))
+//     then                   3    2
+//          sin(x) = X + (S1*X + (X *(r-Y/2)+Y))
+//
+macro KSIN(x)
+kMath[7+x]
+endmacro
+
+macro RETURN_KERNELSIN(X, Y, SIGN)
+  var z = X * X;
+  var v = z * X;
+  var r = KSIN(1) + z * (KSIN(2) + z * (KSIN(3) +
+                    z * (KSIN(4) + z * KSIN(5))));
+  return (X - ((z * (0.5 * Y - v * r) - Y) - v * KSIN(0))) SIGN;
+endmacro
+
+// __kernel_cos(X, Y)
+// kernel cos function on [-pi/4, pi/4], pi/4 ~ 0.785398164
+// Input X is assumed to be bounded by ~pi/4 in magnitude.
+// Input Y is the tail of X so that x = X + Y.
+//
+// Algorithm
+//  1. Since ieee_cos(-x) = ieee_cos(x), we need only to consider positive x.
+//  2. ieee_cos(x) is approximated by a polynomial of degree 14 on
+//     [0,pi/4]
+//                                   4            14
+//          cos(x) ~ 1 - x*x/2 + C1*x + ... + C6*x
+//     where the remez error is
+//
+//  |                   2     4     6     8     10    12     14 |     -58
+//  |ieee_cos(x)-(1-.5*x +C1*x +C2*x +C3*x +C4*x +C5*x  +C6*x  )| <= 2
+//  |                                                           |
+//
+//                 4     6     8     10    12     14
+//  3. let r = C1*x +C2*x +C3*x +C4*x +C5*x  +C6*x  , then
+//         ieee_cos(x) = 1 - x*x/2 + r
+//     since ieee_cos(X+Y) ~ ieee_cos(X) - ieee_sin(X)*Y
+//                    ~ ieee_cos(X) - X*Y,
+//     a correction term is necessary in ieee_cos(x) and hence
+//         cos(X+Y) = 1 - (X*X/2 - (r - X*Y))
+//     For better accuracy when x > 0.3, let qx = |x|/4 with
+//     the last 32 bits mask off, and if x > 0.78125, let qx = 0.28125.
+//     Then
+//         cos(X+Y) = (1-qx) - ((X*X/2-qx) - (r-X*Y)).
+//     Note that 1-qx and (X*X/2-qx) is EXACT here, and the
+//     magnitude of the latter is at least a quarter of X*X/2,
+//     thus, reducing the rounding error in the subtraction.
+//
+macro KCOS(x)
+kMath[13+x]
+endmacro
+
+macro RETURN_KERNELCOS(X, Y, SIGN)
+  var ix = %_DoubleHi(X) & 0x7fffffff;
+  var z = X * X;
+  var r = z * (KCOS(0) + z * (KCOS(1) + z * (KCOS(2)+
+          z * (KCOS(3) + z * (KCOS(4) + z * KCOS(5))))));
+  if (ix < 0x3fd33333) {  // |x| ~< 0.3
+    return (1 - (0.5 * z - (z * r - X * Y))) SIGN;
+  } else {
+    var qx;
+    if (ix > 0x3fe90000) {  // |x| > 0.78125
+      qx = 0.28125;
+    } else {
+      qx = %_ConstructDouble(%_DoubleHi(0.25 * X), 0);
+    }
+    var hz = 0.5 * z - qx;
+    return (1 - qx - (hz - (z * r - X * Y))) SIGN;
+  }
+endmacro
+
+
+// kernel tan function on [-pi/4, pi/4], pi/4 ~ 0.7854
+// Input x is assumed to be bounded by ~pi/4 in magnitude.
+// Input y is the tail of x.
+// Input k indicates whether ieee_tan (if k = 1) or -1/tan (if k = -1)
+// is returned.
+//
+// Algorithm
+//  1. Since ieee_tan(-x) = -ieee_tan(x), we need only to consider positive x.
+//  2. if x < 2^-28 (hx<0x3e300000 0), return x with inexact if x!=0.
+//  3. ieee_tan(x) is approximated by a odd polynomial of degree 27 on
+//     [0,0.67434]
+//                           3             27
+//          tan(x) ~ x + T1*x + ... + T13*x
+//     where
+//
+//     |ieee_tan(x)    2     4            26   |     -59.2
+//     |----- - (1+T1*x +T2*x +.... +T13*x    )| <= 2
+//     |  x                                    |
+//
+//     Note: ieee_tan(x+y) = ieee_tan(x) + tan'(x)*y
+//                    ~ ieee_tan(x) + (1+x*x)*y
+//     Therefore, for better accuracy in computing ieee_tan(x+y), let
+//               3      2      2       2       2
+//          r = x *(T2+x *(T3+x *(...+x *(T12+x *T13))))
+//     then
+//                              3    2
+//          tan(x+y) = x + (T1*x + (x *(r+y)+y))
+//
+//  4. For x in [0.67434,pi/4],  let y = pi/4 - x, then
+//          tan(x) = ieee_tan(pi/4-y) = (1-ieee_tan(y))/(1+ieee_tan(y))
+//                 = 1 - 2*(ieee_tan(y) - (ieee_tan(y)^2)/(1+ieee_tan(y)))
+//
+// Set returnTan to 1 for tan; -1 for cot.  Anything else is illegal
+// and will cause incorrect results.
+//
+macro KTAN(x)
+kMath[19+x]
+endmacro
+
+function KernelTan(x, y, returnTan) {
+  var z;
+  var w;
+  var hx = %_DoubleHi(x);
+  var ix = hx & 0x7fffffff;
+
+  if (ix < 0x3e300000) {  // |x| < 2^-28
+    if (((ix | %_DoubleLo(x)) | (returnTan + 1)) == 0) {
+      // x == 0 && returnTan = -1
+      return 1 / MathAbs(x);
+    } else {
+      if (returnTan == 1) {
+        return x;
+      } else {
+        // Compute -1/(x + y) carefully
+        var w = x + y;
+        var z = %_ConstructDouble(%_DoubleHi(w), 0);
+        var v = y - (z - x);
+        var a = -1 / w;
+        var t = %_ConstructDouble(%_DoubleHi(a), 0);
+        var s = 1 + t * z;
+        return t + a * (s + t * v);
+      }
+    }
+  }
+  if (ix >= 0x3fe59428) {  // |x| > .6744
+    if (x < 0) {
+      x = -x;
+      y = -y;
+    }
+    z = PIO4 - x;
+    w = PIO4LO - y;
+    x = z + w;
+    y = 0;
+  }
+  z = x * x;
+  w = z * z;
+
+  // Break x^5 * (T1 + x^2*T2 + ...) into
+  // x^5 * (T1 + x^4*T3 + ... + x^20*T11) +
+  // x^5 * (x^2 * (T2 + x^4*T4 + ... + x^22*T12))
+  var r = KTAN(1) + w * (KTAN(3) + w * (KTAN(5) +
+                    w * (KTAN(7) + w * (KTAN(9) + w * KTAN(11)))));
+  var v = z * (KTAN(2) + w * (KTAN(4) + w * (KTAN(6) +
+                         w * (KTAN(8) + w * (KTAN(10) + w * KTAN(12))))));
+  var s = z * x;
+  r = y + z * (s * (r + v) + y);
+  r = r + KTAN(0) * s;
+  w = x + r;
+  if (ix >= 0x3fe59428) {
+    return (1 - ((hx >> 30) & 2)) *
+      (returnTan - 2.0 * (x - (w * w / (w + returnTan) - r)));
+  }
+  if (returnTan == 1) {
+    return w;
+  } else {
+    z = %_ConstructDouble(%_DoubleHi(w), 0);
+    v = r - (z - x);
+    var a = -1 / w;
+    var t = %_ConstructDouble(%_DoubleHi(a), 0);
+    s = 1 + t * z;
+    return t + a * (s + t * v);
+  }
+}
+
+function MathSinSlow(x) {
+  REMPIO2(x);
+  var sign = 1 - (n & 2);
+  if (n & 1) {
+    RETURN_KERNELCOS(y0, y1, * sign);
+  } else {
+    RETURN_KERNELSIN(y0, y1, * sign);
+  }
+}
+
+function MathCosSlow(x) {
+  REMPIO2(x);
+  if (n & 1) {
+    var sign = (n & 2) - 1;
+    RETURN_KERNELSIN(y0, y1, * sign);
+  } else {
+    var sign = 1 - (n & 2);
+    RETURN_KERNELCOS(y0, y1, * sign);
+  }
+}
+
+// ECMA 262 - 15.8.2.16
+function MathSin(x) {
+  x = x * 1;  // Convert to number.
+  if ((%_DoubleHi(x) & 0x7fffffff) <= 0x3fe921fb) {
+    // |x| < pi/4, approximately.  No reduction needed.
+    RETURN_KERNELSIN(x, 0, /* empty */);
+  }
+  return MathSinSlow(x);
+}
+
+// ECMA 262 - 15.8.2.7
+function MathCos(x) {
+  x = x * 1;  // Convert to number.
+  if ((%_DoubleHi(x) & 0x7fffffff) <= 0x3fe921fb) {
+    // |x| < pi/4, approximately.  No reduction needed.
+    RETURN_KERNELCOS(x, 0, /* empty */);
+  }
+  return MathCosSlow(x);
+}
+
+// ECMA 262 - 15.8.2.18
+function MathTan(x) {
+  x = x * 1;  // Convert to number.
+  if ((%_DoubleHi(x) & 0x7fffffff) <= 0x3fe921fb) {
+    // |x| < pi/4, approximately.  No reduction needed.
+    return KernelTan(x, 0, 1);
+  }
+  REMPIO2(x);
+  return KernelTan(y0, y1, (n & 1) ? -1 : 1);
+}
+
+// ES6 draft 09-27-13, section 20.2.2.20.
+// Math.log1p
+//
+// Method :
+//   1. Argument Reduction: find k and f such that
+//                      1+x = 2^k * (1+f),
+//         where  sqrt(2)/2 < 1+f < sqrt(2) .
+//
+//      Note. If k=0, then f=x is exact. However, if k!=0, then f
+//      may not be representable exactly. In that case, a correction
+//      term is need. Let u=1+x rounded. Let c = (1+x)-u, then
+//      log(1+x) - log(u) ~ c/u. Thus, we proceed to compute log(u),
+//      and add back the correction term c/u.
+//      (Note: when x > 2**53, one can simply return log(x))
+//
+//   2. Approximation of log1p(f).
+//      Let s = f/(2+f) ; based on log(1+f) = log(1+s) - log(1-s)
+//            = 2s + 2/3 s**3 + 2/5 s**5 + .....,
+//            = 2s + s*R
+//      We use a special Reme algorithm on [0,0.1716] to generate
+//      a polynomial of degree 14 to approximate R The maximum error
+//      of this polynomial approximation is bounded by 2**-58.45. In
+//      other words,
+//                      2      4      6      8      10      12      14
+//          R(z) ~ Lp1*s +Lp2*s +Lp3*s +Lp4*s +Lp5*s  +Lp6*s  +Lp7*s
+//      (the values of Lp1 to Lp7 are listed in the program)
+//      and
+//          |      2          14          |     -58.45
+//          | Lp1*s +...+Lp7*s    -  R(z) | <= 2
+//          |                             |
+//      Note that 2s = f - s*f = f - hfsq + s*hfsq, where hfsq = f*f/2.
+//      In order to guarantee error in log below 1ulp, we compute log
+//      by
+//              log1p(f) = f - (hfsq - s*(hfsq+R)).
+//
+//      3. Finally, log1p(x) = k*ln2 + log1p(f).
+//                           = k*ln2_hi+(f-(hfsq-(s*(hfsq+R)+k*ln2_lo)))
+//         Here ln2 is split into two floating point number:
+//                      ln2_hi + ln2_lo,
+//         where n*ln2_hi is always exact for |n| < 2000.
+//
+// Special cases:
+//      log1p(x) is NaN with signal if x < -1 (including -INF) ;
+//      log1p(+INF) is +INF; log1p(-1) is -INF with signal;
+//      log1p(NaN) is that NaN with no signal.
+//
+// Accuracy:
+//      according to an error analysis, the error is always less than
+//      1 ulp (unit in the last place).
+//
+// Constants:
+//      Constants are found in fdlibm.cc. We assume the C++ compiler to convert
+//      from decimal to binary accurately enough to produce the intended values.
+//
+// Note: Assuming log() return accurate answer, the following
+//       algorithm can be used to compute log1p(x) to within a few ULP:
+//
+//              u = 1+x;
+//              if (u==1.0) return x ; else
+//                          return log(u)*(x/(u-1.0));
+//
+//       See HP-15C Advanced Functions Handbook, p.193.
+//
+const LN2_HI    = kMath[34];
+const LN2_LO    = kMath[35];
+const TWO54     = kMath[36];
+const TWO_THIRD = kMath[37];
+macro KLOG1P(x)
+(kMath[38+x])
+endmacro
+
+function MathLog1p(x) {
+  x = x * 1;  // Convert to number.
+  var hx = %_DoubleHi(x);
+  var ax = hx & 0x7fffffff;
+  var k = 1;
+  var f = x;
+  var hu = 1;
+  var c = 0;
+  var u = x;
+
+  if (hx < 0x3fda827a) {
+    // x < 0.41422
+    if (ax >= 0x3ff00000) {  // |x| >= 1
+      if (x === -1) {
+        return -INFINITY;  // log1p(-1) = -inf
+      } else {
+        return NAN;  // log1p(x<-1) = NaN
+      }
+    } else if (ax < 0x3c900000)  {
+      // For |x| < 2^-54 we can return x.
+      return x;
+    } else if (ax < 0x3e200000) {
+      // For |x| < 2^-29 we can use a simple two-term Taylor series.
+      return x - x * x * 0.5;
+    }
+
+    if ((hx > 0) || (hx <= -0x402D413D)) {  // (int) 0xbfd2bec3 = -0x402d413d
+      // -.2929 < x < 0.41422
+      k = 0;
+    }
+  }
+
+  // Handle Infinity and NAN
+  if (hx >= 0x7ff00000) return x;
+
+  if (k !== 0) {
+    if (hx < 0x43400000) {
+      // x < 2^53
+      u = 1 + x;
+      hu = %_DoubleHi(u);
+      k = (hu >> 20) - 1023;
+      c = (k > 0) ? 1 - (u - x) : x - (u - 1);
+      c = c / u;
+    } else {
+      hu = %_DoubleHi(u);
+      k = (hu >> 20) - 1023;
+    }
+    hu = hu & 0xfffff;
+    if (hu < 0x6a09e) {
+      u = %_ConstructDouble(hu | 0x3ff00000, %_DoubleLo(u));  // Normalize u.
+    } else {
+      ++k;
+      u = %_ConstructDouble(hu | 0x3fe00000, %_DoubleLo(u));  // Normalize u/2.
+      hu = (0x00100000 - hu) >> 2;
+    }
+    f = u - 1;
+  }
+
+  var hfsq = 0.5 * f * f;
+  if (hu === 0) {
+    // |f| < 2^-20;
+    if (f === 0) {
+      if (k === 0) {
+        return 0.0;
+      } else {
+        return k * LN2_HI + (c + k * LN2_LO);
+      }
+    }
+    var R = hfsq * (1 - TWO_THIRD * f);
+    if (k === 0) {
+      return f - R;
+    } else {
+      return k * LN2_HI - ((R - (k * LN2_LO + c)) - f);
+    }
+  }
+
+  var s = f / (2 + f);
+  var z = s * s;
+  var R = z * (KLOG1P(0) + z * (KLOG1P(1) + z *
+              (KLOG1P(2) + z * (KLOG1P(3) + z *
+              (KLOG1P(4) + z * (KLOG1P(5) + z * KLOG1P(6)))))));
+  if (k === 0) {
+    return f - (hfsq - s * (hfsq + R));
+  } else {
+    return k * LN2_HI - ((hfsq - (s * (hfsq + R) + (k * LN2_LO + c))) - f);
+  }
+}
+
+// ES6 draft 09-27-13, section 20.2.2.14.
+// Math.expm1
+// Returns exp(x)-1, the exponential of x minus 1.
+//
+// Method
+//   1. Argument reduction:
+//      Given x, find r and integer k such that
+//
+//               x = k*ln2 + r,  |r| <= 0.5*ln2 ~ 0.34658
+//
+//      Here a correction term c will be computed to compensate
+//      the error in r when rounded to a floating-point number.
+//
+//   2. Approximating expm1(r) by a special rational function on
+//      the interval [0,0.34658]:
+//      Since
+//          r*(exp(r)+1)/(exp(r)-1) = 2+ r^2/6 - r^4/360 + ...
+//      we define R1(r*r) by
+//          r*(exp(r)+1)/(exp(r)-1) = 2+ r^2/6 * R1(r*r)
+//      That is,
+//          R1(r**2) = 6/r *((exp(r)+1)/(exp(r)-1) - 2/r)
+//                   = 6/r * ( 1 + 2.0*(1/(exp(r)-1) - 1/r))
+//                   = 1 - r^2/60 + r^4/2520 - r^6/100800 + ...
+//      We use a special Remes algorithm on [0,0.347] to generate
+//      a polynomial of degree 5 in r*r to approximate R1. The
+//      maximum error of this polynomial approximation is bounded
+//      by 2**-61. In other words,
+//          R1(z) ~ 1.0 + Q1*z + Q2*z**2 + Q3*z**3 + Q4*z**4 + Q5*z**5
+//      where   Q1  =  -1.6666666666666567384E-2,
+//              Q2  =   3.9682539681370365873E-4,
+//              Q3  =  -9.9206344733435987357E-6,
+//              Q4  =   2.5051361420808517002E-7,
+//              Q5  =  -6.2843505682382617102E-9;
+//      (where z=r*r, and the values of Q1 to Q5 are listed below)
+//      with error bounded by
+//          |                  5           |     -61
+//          | 1.0+Q1*z+...+Q5*z   -  R1(z) | <= 2
+//          |                              |
+//
+//      expm1(r) = exp(r)-1 is then computed by the following
+//      specific way which minimize the accumulation rounding error:
+//                             2     3
+//                            r     r    [ 3 - (R1 + R1*r/2)  ]
+//            expm1(r) = r + --- + --- * [--------------------]
+//                            2     2    [ 6 - r*(3 - R1*r/2) ]
+//
+//      To compensate the error in the argument reduction, we use
+//              expm1(r+c) = expm1(r) + c + expm1(r)*c
+//                         ~ expm1(r) + c + r*c
+//      Thus c+r*c will be added in as the correction terms for
+//      expm1(r+c). Now rearrange the term to avoid optimization
+//      screw up:
+//                      (      2                                    2 )
+//                      ({  ( r    [ R1 -  (3 - R1*r/2) ]  )  }    r  )
+//       expm1(r+c)~r - ({r*(--- * [--------------------]-c)-c} - --- )
+//                      ({  ( 2    [ 6 - r*(3 - R1*r/2) ]  )  }    2  )
+//                      (                                             )
+//
+//                 = r - E
+//   3. Scale back to obtain expm1(x):
+//      From step 1, we have
+//         expm1(x) = either 2^k*[expm1(r)+1] - 1
+//                  = or     2^k*[expm1(r) + (1-2^-k)]
+//   4. Implementation notes:
+//      (A). To save one multiplication, we scale the coefficient Qi
+//           to Qi*2^i, and replace z by (x^2)/2.
+//      (B). To achieve maximum accuracy, we compute expm1(x) by
+//        (i)   if x < -56*ln2, return -1.0, (raise inexact if x!=inf)
+//        (ii)  if k=0, return r-E
+//        (iii) if k=-1, return 0.5*(r-E)-0.5
+//        (iv)  if k=1 if r < -0.25, return 2*((r+0.5)- E)
+//                     else          return  1.0+2.0*(r-E);
+//        (v)   if (k<-2||k>56) return 2^k(1-(E-r)) - 1 (or exp(x)-1)
+//        (vi)  if k <= 20, return 2^k((1-2^-k)-(E-r)), else
+//        (vii) return 2^k(1-((E+2^-k)-r))
+//
+// Special cases:
+//      expm1(INF) is INF, expm1(NaN) is NaN;
+//      expm1(-INF) is -1, and
+//      for finite argument, only expm1(0)=0 is exact.
+//
+// Accuracy:
+//      according to an error analysis, the error is always less than
+//      1 ulp (unit in the last place).
+//
+// Misc. info.
+//      For IEEE double
+//          if x > 7.09782712893383973096e+02 then expm1(x) overflow
+//
+const KEXPM1_OVERFLOW = kMath[45];
+const INVLN2          = kMath[46];
+macro KEXPM1(x)
+(kMath[47+x])
+endmacro
+
+function MathExpm1(x) {
+  x = x * 1;  // Convert to number.
+  var y;
+  var hi;
+  var lo;
+  var k;
+  var t;
+  var c;
+
+  var hx = %_DoubleHi(x);
+  var xsb = hx & 0x80000000;     // Sign bit of x
+  var y = (xsb === 0) ? x : -x;  // y = |x|
+  hx &= 0x7fffffff;              // High word of |x|
+
+  // Filter out huge and non-finite argument
+  if (hx >= 0x4043687a) {     // if |x| ~=> 56 * ln2
+    if (hx >= 0x40862e42) {   // if |x| >= 709.78
+      if (hx >= 0x7ff00000) {
+        // expm1(inf) = inf; expm1(-inf) = -1; expm1(nan) = nan;
+        return (x === -INFINITY) ? -1 : x;
+      }
+      if (x > KEXPM1_OVERFLOW) return INFINITY;  // Overflow
+    }
+    if (xsb != 0) return -1;  // x < -56 * ln2, return -1.
+  }
+
+  // Argument reduction
+  if (hx > 0x3fd62e42) {    // if |x| > 0.5 * ln2
+    if (hx < 0x3ff0a2b2) {  // and |x| < 1.5 * ln2
+      if (xsb === 0) {
+        hi = x - LN2_HI;
+        lo = LN2_LO;
+        k = 1;
+      } else {
+        hi = x + LN2_HI;
+        lo = -LN2_LO;
+        k = -1;
+      }
+    } else {
+      k = (INVLN2 * x + ((xsb === 0) ? 0.5 : -0.5)) | 0;
+      t = k;
+      // t * ln2_hi is exact here.
+      hi = x - t * LN2_HI;
+      lo = t * LN2_LO;
+    }
+    x = hi - lo;
+    c = (hi - x) - lo;
+  } else if (hx < 0x3c900000)	{
+    // When |x| < 2^-54, we can return x.
+    return x;
+  } else {
+    // Fall through.
+    k = 0;
+  }
+
+  // x is now in primary range
+  var hfx = 0.5 * x;
+  var hxs = x * hfx;
+  var r1 = 1 + hxs * (KEXPM1(0) + hxs * (KEXPM1(1) + hxs *
+                     (KEXPM1(2) + hxs * (KEXPM1(3) + hxs * KEXPM1(4)))));
+  t = 3 - r1 * hfx;
+  var e = hxs * ((r1 - t) / (6 - x * t));
+  if (k === 0) {  // c is 0
+    return x - (x*e - hxs);
+  } else {
+    e = (x * (e - c) - c);
+    e -= hxs;
+    if (k === -1) return 0.5 * (x - e) - 0.5;
+    if (k === 1) {
+      if (x < -0.25) return -2 * (e - (x + 0.5));
+      return 1 + 2 * (x - e);
+    }
+
+    if (k <= -2 || k > 56) {
+      // suffice to return exp(x) + 1
+      y = 1 - (e - x);
+      // Add k to y's exponent
+      y = %_ConstructDouble(%_DoubleHi(y) + (k << 20), %_DoubleLo(y));
+      return y - 1;
+    }
+    if (k < 20) {
+      // t = 1 - 2^k
+      t = %_ConstructDouble(0x3ff00000 - (0x200000 >> k), 0);
+      y = t - (e - x);
+      // Add k to y's exponent
+      y = %_ConstructDouble(%_DoubleHi(y) + (k << 20), %_DoubleLo(y));
+    } else {
+      // t = 2^-k
+      t = %_ConstructDouble((0x3ff - k) << 20, 0);
+      y = x - (e + t);
+      y += 1;
+      // Add k to y's exponent
+      y = %_ConstructDouble(%_DoubleHi(y) + (k << 20), %_DoubleLo(y));
+    }
+  }
+  return y;
+}
+
+
+// ES6 draft 09-27-13, section 20.2.2.30.
+// Math.sinh
+// Method :
+// mathematically sinh(x) if defined to be (exp(x)-exp(-x))/2
+//      1. Replace x by |x| (sinh(-x) = -sinh(x)).
+//      2.
+//                                                  E + E/(E+1)
+//          0        <= x <= 22     :  sinh(x) := --------------, E=expm1(x)
+//                                                      2
+//
+//          22       <= x <= lnovft :  sinh(x) := exp(x)/2
+//          lnovft   <= x <= ln2ovft:  sinh(x) := exp(x/2)/2 * exp(x/2)
+//          ln2ovft  <  x           :  sinh(x) := x*shuge (overflow)
+//
+// Special cases:
+//      sinh(x) is |x| if x is +Infinity, -Infinity, or NaN.
+//      only sinh(0)=0 is exact for finite x.
+//
+const KSINH_OVERFLOW = kMath[52];
+const TWO_M28 = 3.725290298461914e-9;  // 2^-28, empty lower half
+const LOG_MAXD = 709.7822265625;  // 0x40862e42 00000000, empty lower half
+
+function MathSinh(x) {
+  x = x * 1;  // Convert to number.
+  var h = (x < 0) ? -0.5 : 0.5;
+  // |x| in [0, 22]. return sign(x)*0.5*(E+E/(E+1))
+  var ax = MathAbs(x);
+  if (ax < 22) {
+    // For |x| < 2^-28, sinh(x) = x
+    if (ax < TWO_M28) return x;
+    var t = MathExpm1(ax);
+    if (ax < 1) return h * (2 * t - t * t / (t + 1));
+    return h * (t + t / (t + 1));
+  }
+  // |x| in [22, log(maxdouble)], return 0.5 * exp(|x|)
+  if (ax < LOG_MAXD) return h * MathExp(ax);
+  // |x| in [log(maxdouble), overflowthreshold]
+  // overflowthreshold = 710.4758600739426
+  if (ax <= KSINH_OVERFLOW) {
+    var w = MathExp(0.5 * ax);
+    var t = h * w;
+    return t * w;
+  }
+  // |x| > overflowthreshold or is NaN.
+  // Return Infinity of the appropriate sign or NaN.
+  return x * INFINITY;
+}
+
+
+// ES6 draft 09-27-13, section 20.2.2.12.
+// Math.cosh
+// Method :
+// mathematically cosh(x) if defined to be (exp(x)+exp(-x))/2
+//      1. Replace x by |x| (cosh(x) = cosh(-x)).
+//      2.
+//                                                      [ exp(x) - 1 ]^2
+//          0        <= x <= ln2/2  :  cosh(x) := 1 + -------------------
+//                                                         2*exp(x)
+//
+//                                                 exp(x) + 1/exp(x)
+//          ln2/2    <= x <= 22     :  cosh(x) := -------------------
+//                                                        2
+//          22       <= x <= lnovft :  cosh(x) := exp(x)/2
+//          lnovft   <= x <= ln2ovft:  cosh(x) := exp(x/2)/2 * exp(x/2)
+//          ln2ovft  <  x           :  cosh(x) := huge*huge (overflow)
+//
+// Special cases:
+//      cosh(x) is |x| if x is +INF, -INF, or NaN.
+//      only cosh(0)=1 is exact for finite x.
+//
+const KCOSH_OVERFLOW = kMath[52];
+
+function MathCosh(x) {
+  x = x * 1;  // Convert to number.
+  var ix = %_DoubleHi(x) & 0x7fffffff;
+  // |x| in [0,0.5*log2], return 1+expm1(|x|)^2/(2*exp(|x|))
+  if (ix < 0x3fd62e43) {
+    var t = MathExpm1(MathAbs(x));
+    var w = 1 + t;
+    // For |x| < 2^-55, cosh(x) = 1
+    if (ix < 0x3c800000) return w;
+    return 1 + (t * t) / (w + w);
+  }
+  // |x| in [0.5*log2, 22], return (exp(|x|)+1/exp(|x|)/2
+  if (ix < 0x40360000) {
+    var t = MathExp(MathAbs(x));
+    return 0.5 * t + 0.5 / t;
+  }
+  // |x| in [22, log(maxdouble)], return half*exp(|x|)
+  if (ix < 0x40862e42) return 0.5 * MathExp(MathAbs(x));
+  // |x| in [log(maxdouble), overflowthreshold]
+  if (MathAbs(x) <= KCOSH_OVERFLOW) {
+    var w = MathExp(0.5 * MathAbs(x));
+    var t = 0.5 * w;
+    return t * w;
+  }
+  if (NUMBER_IS_NAN(x)) return x;
+  // |x| > overflowthreshold.
+  return INFINITY;
+}
diff --git a/tools/check-name-clashes.py b/tools/check-name-clashes.py
new file mode 100755
index 0000000..e448930
--- /dev/null
+++ b/tools/check-name-clashes.py
@@ -0,0 +1,201 @@
+#!/usr/bin/env python
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import js2c
+import os
+import re
+import sys
+
+FILENAME = "src/runtime.cc"
+FUNCTION = re.compile("^RUNTIME_FUNCTION\(Runtime_(\w+)")
+FUNCTIONEND = "}\n"
+MACRO = re.compile(r"^#define ([^ ]+)\(([^)]*)\) *([^\\]*)\\?\n$")
+FIRST_WORD = re.compile("^\s*(.*?)[\s({\[]")
+
+# Expand these macros, they define further runtime functions.
+EXPAND_MACROS = [
+  "BUFFER_VIEW_GETTER",
+  "DATA_VIEW_GETTER",
+  "DATA_VIEW_SETTER",
+  "ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION",
+  "FIXED_TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION",
+  "RUNTIME_UNARY_MATH",
+  "TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION",
+]
+
+
+class Function(object):
+  def __init__(self, match):
+    self.name = match.group(1)
+
+
+class Macro(object):
+  def __init__(self, match):
+    self.name = match.group(1)
+    self.args = [s.strip() for s in match.group(2).split(",")]
+    self.lines = []
+    self.indentation = 0
+    self.AddLine(match.group(3))
+
+  def AddLine(self, line):
+    if not line: return
+    if not self.lines:
+      # This is the first line, detect indentation.
+      self.indentation = len(line) - len(line.lstrip())
+    line = line.rstrip("\\\n ")
+    if not line: return
+    assert len(line[:self.indentation].strip()) == 0, \
+        ("expected whitespace: '%s', full line: '%s'" %
+         (line[:self.indentation], line))
+    line = line[self.indentation:]
+    if not line: return
+    self.lines.append(line + "\n")
+
+  def Finalize(self):
+    for arg in self.args:
+      pattern = re.compile(r"(##|\b)%s(##|\b)" % arg)
+      for i in range(len(self.lines)):
+        self.lines[i] = re.sub(pattern, "%%(%s)s" % arg, self.lines[i])
+
+  def FillIn(self, arg_values):
+    filler = {}
+    assert len(arg_values) == len(self.args)
+    for i in range(len(self.args)):
+      filler[self.args[i]] = arg_values[i]
+    result = []
+    for line in self.lines:
+      result.append(line % filler)
+    return result
+
+
+def ReadFileAndExpandMacros(filename):
+  found_macros = {}
+  expanded_lines = []
+  with open(filename, "r") as f:
+    found_macro = None
+    for line in f:
+      if found_macro is not None:
+        found_macro.AddLine(line)
+        if not line.endswith("\\\n"):
+          found_macro.Finalize()
+          found_macro = None
+        continue
+
+      match = MACRO.match(line)
+      if match:
+        found_macro = Macro(match)
+        if found_macro.name in EXPAND_MACROS:
+          found_macros[found_macro.name] = found_macro
+        else:
+          found_macro = None
+        continue
+
+      match = FIRST_WORD.match(line)
+      if match:
+        first_word = match.group(1)
+        if first_word in found_macros:
+          MACRO_CALL = re.compile("%s\(([^)]*)\)" % first_word)
+          match = MACRO_CALL.match(line)
+          assert match
+          args = [s.strip() for s in match.group(1).split(",")]
+          expanded_lines += found_macros[first_word].FillIn(args)
+          continue
+
+      expanded_lines.append(line)
+  return expanded_lines
+
+
+# Detects runtime functions by parsing FILENAME.
+def FindRuntimeFunctions():
+  functions = []
+  expanded_lines = ReadFileAndExpandMacros(FILENAME)
+  function = None
+  partial_line = ""
+  for line in expanded_lines:
+    # Multi-line definition support, ignoring macros.
+    if line.startswith("RUNTIME_FUNCTION") and not line.endswith("{\n"):
+      if line.endswith("\\\n"): continue
+      partial_line = line.rstrip()
+      continue
+    if partial_line:
+      partial_line += " " + line.strip()
+      if partial_line.endswith("{"):
+        line = partial_line
+        partial_line = ""
+      else:
+        continue
+
+    match = FUNCTION.match(line)
+    if match:
+      function = Function(match)
+      continue
+    if function is None: continue
+
+    if line == FUNCTIONEND:
+      if function is not None:
+        functions.append(function)
+        function = None
+  return functions
+
+
+class Builtin(object):
+  def __init__(self, match):
+    self.name = match.group(1)
+
+
+def FindJSNatives():
+  PATH = "src"
+  fileslist = []
+  for (root, dirs, files) in os.walk(PATH):
+    for f in files:
+      if f.endswith(".js"):
+        fileslist.append(os.path.join(root, f))
+  natives = []
+  regexp = re.compile("^function (\w+)\s*\((.*?)\) {")
+  matches = 0
+  for filename in fileslist:
+    with open(filename, "r") as f:
+      file_contents = f.read()
+    file_contents = js2c.ExpandInlineMacros(file_contents)
+    lines = file_contents.split("\n")
+    partial_line = ""
+    for line in lines:
+      if line.startswith("function") and not '{' in line:
+        partial_line += line.rstrip()
+        continue
+      if partial_line:
+        partial_line += " " + line.strip()
+        if '{' in line:
+          line = partial_line
+          partial_line = ""
+        else:
+          continue
+      match = regexp.match(line)
+      if match:
+        natives.append(Builtin(match))
+  return natives
+
+
+def Main():
+  functions = FindRuntimeFunctions()
+  natives = FindJSNatives()
+  errors = 0
+  runtime_map = {}
+  for f in functions:
+    runtime_map[f.name] = 1
+  for b in natives:
+    if b.name in runtime_map:
+      print("JS_Native/Runtime_Function name clash: %s" % b.name)
+      errors += 1
+
+  if errors > 0:
+    return 1
+  print("Runtime/Natives name clashes: checked %d/%d functions, all good." %
+        (len(functions), len(natives)))
+  return 0
+
+
+if __name__ == "__main__":
+  sys.exit(Main())
diff --git a/tools/check-static-initializers.sh b/tools/check-static-initializers.sh
index 11ba080..da43170 100755
--- a/tools/check-static-initializers.sh
+++ b/tools/check-static-initializers.sh
@@ -32,8 +32,7 @@
 # Allow:
 #  - _GLOBAL__I__ZN2v810LineEditor6first_E
 #  - _GLOBAL__I__ZN2v88internal32AtomicOps_Internalx86CPUFeaturesE
-#  - _GLOBAL__I__ZN2v88internal8ThreadId18highest_thread_id_E
-expected_static_init_count=3
+expected_static_init_count=2
 
 v8_root=$(readlink -f $(dirname $BASH_SOURCE)/../)
 
diff --git a/tools/concatenate-files.py b/tools/concatenate-files.py
new file mode 100644
index 0000000..86bdf56
--- /dev/null
+++ b/tools/concatenate-files.py
@@ -0,0 +1,75 @@
+#!/usr/bin/env python
+#
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+#     * Redistributions of source code must retain the above copyright
+#       notice, this list of conditions and the following disclaimer.
+#     * Redistributions in binary form must reproduce the above
+#       copyright notice, this list of conditions and the following
+#       disclaimer in the documentation and/or other materials provided
+#       with the distribution.
+#     * Neither the name of Google Inc. nor the names of its
+#       contributors may be used to endorse or promote products derived
+#       from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+# This utility concatenates several files into one. On Unix-like systems
+# it is equivalent to:
+#   cat file1 file2 file3 ...files... > target
+#
+# The reason for writing a seperate utility is that 'cat' is not available
+# on all supported build platforms, but Python is, and hence this provides
+# us with an easy and uniform way of doing this on all platforms.
+
+import optparse
+
+
+def Concatenate(filenames):
+  """Concatenate files.
+
+  Args:
+    files: Array of file names.
+           The last name is the target; all earlier ones are sources.
+
+  Returns:
+    True, if the operation was successful.
+  """
+  if len(filenames) < 2:
+    print "An error occured generating %s:\nNothing to do." % filenames[-1]
+    return False
+
+  try:
+    with open(filenames[-1], "wb") as target:
+      for filename in filenames[:-1]:
+        with open(filename, "rb") as current:
+          target.write(current.read())
+    return True
+  except IOError as e:
+    print "An error occured when writing %s:\n%s" % (filenames[-1], e)
+    return False
+
+
+def main():
+  parser = optparse.OptionParser()
+  parser.set_usage("""Concatenate several files into one.
+      Equivalent to: cat file1 ... > target.""")
+  (options, args) = parser.parse_args()
+  exit(0 if Concatenate(args) else 1)
+
+
+if __name__ == "__main__":
+  main()
diff --git a/tools/cpu.sh b/tools/cpu.sh
new file mode 100755
index 0000000..8e8a243
--- /dev/null
+++ b/tools/cpu.sh
@@ -0,0 +1,62 @@
+#!/bin/bash
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+CPUPATH=/sys/devices/system/cpu
+
+MAXID=$(cat $CPUPATH/present | awk -F- '{print $NF}')
+
+set_governor() {
+  echo "Setting CPU frequency governor to \"$1\""
+  for (( i=0; i<=$MAXID; i++ )); do
+    echo "$1" > $CPUPATH/cpu$i/cpufreq/scaling_governor
+  done
+}
+
+dual_core() {
+  echo "Switching to dual-core mode"
+  for (( i=2; i<=$MAXID; i++ )); do
+    echo 0 > $CPUPATH/cpu$i/online
+  done
+}
+
+single_core() {
+  echo "Switching to single-core mode"
+  for (( i=1; i<=$MAXID; i++ )); do
+    echo 0 > $CPUPATH/cpu$i/online
+  done
+}
+
+
+all_cores() {
+  echo "Reactivating all CPU cores"
+  for (( i=2; i<=$MAXID; i++ )); do
+    echo 1 > $CPUPATH/cpu$i/online
+  done
+}
+
+case "$1" in
+  fast | performance)
+    set_governor "performance"
+    ;;
+  slow | powersave)
+    set_governor "powersave"
+    ;;
+  default | ondemand)
+    set_governor "ondemand"
+    ;;
+  dualcore | dual)
+    dual_core
+    ;;
+  singlecore | single)
+    single_core
+    ;;
+  allcores | all)
+    all_cores
+    ;;
+  *)
+    echo "Usage: $0 fast|slow|default|singlecore|dualcore|all"
+    exit 1
+    ;;
+esac 
diff --git a/tools/detect-builtins.js b/tools/detect-builtins.js
new file mode 100644
index 0000000..2a476ba
--- /dev/null
+++ b/tools/detect-builtins.js
@@ -0,0 +1,51 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function(global) {
+
+  var GetProperties = function(this_name, object) {
+    var result = {};
+    try {
+      var names = Object.getOwnPropertyNames(object);
+    } catch(e) {
+      return;
+    }
+    for (var i = 0; i < names.length; ++i) {
+      var name = names[i];
+      if (typeof object === "function") {
+        if (name === "length" ||
+            name === "name" ||
+            name === "arguments" ||
+            name === "caller" ||
+            name === "prototype") {
+          continue;
+        }
+      }
+      // Avoid endless recursion.
+      if (this_name === "prototype" && name === "constructor") continue;
+      // Could get this from the parent, but having it locally is easier.
+      var property = { "name": name };
+      try {
+        var value = object[name];
+      } catch(e) {
+        property.type = "getter";
+        result[name] = property;
+        continue;
+      }
+      var type = typeof value;
+      property.type = type;
+      if (type === "function") {
+        property.length = value.length;
+        property.prototype = GetProperties("prototype", value.prototype);
+      }
+      property.properties = GetProperties(name, value);
+      result[name] = property;
+    }
+    return result;
+  };
+
+  var g = GetProperties("", global, "");
+  print(JSON.stringify(g, undefined, 2));
+
+})(this);  // Must wrap in anonymous closure or it'll detect itself as builtin.
diff --git a/tools/disasm.py b/tools/disasm.py
index 6fa81ca..cc7ef06 100644
--- a/tools/disasm.py
+++ b/tools/disasm.py
@@ -49,7 +49,8 @@
   "ia32": "-m i386",
   "x64": "-m i386 -M x86-64",
   "arm": "-m arm",  # Not supported by our objdump build.
-  "mips": "-m mips"  # Not supported by our objdump build.
+  "mips": "-m mips",  # Not supported by our objdump build.
+  "arm64": "-m aarch64"
 }
 
 
diff --git a/tools/external-reference-check.py b/tools/external-reference-check.py
new file mode 100644
index 0000000..386d4a9
--- /dev/null
+++ b/tools/external-reference-check.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import re
+import os
+import sys
+
+DECLARE_FILE = "src/assembler.h"
+REGISTER_FILE = "src/serialize.cc"
+DECLARE_RE = re.compile("\s*static ExternalReference ([^(]+)\(")
+REGISTER_RE = re.compile("\s*Add\(ExternalReference::([^(]+)\(")
+
+WORKSPACE = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), ".."))
+
+# Ignore those.
+BLACKLISTED = [
+  "page_flags",
+  "math_exp_constants",
+  "math_exp_log_table",
+  "ForDeoptEntry",
+]
+
+def Find(filename, re):
+  references = []
+  with open(filename, "r") as f:
+    for line in f:
+      match = re.match(line)
+      if match:
+        references.append(match.group(1))
+  return references
+
+def Main():
+  declarations = Find(DECLARE_FILE, DECLARE_RE)
+  registrations = Find(REGISTER_FILE, REGISTER_RE)
+  difference = list(set(declarations) - set(registrations) - set(BLACKLISTED))
+  for reference in difference:
+    print("Declared but not registered: ExternalReference::%s" % reference)
+  return len(difference) > 0
+
+if __name__ == "__main__":
+  sys.exit(Main())
diff --git a/tools/gcmole/Makefile b/tools/gcmole/Makefile
index 764245c..ee43c00 100644
--- a/tools/gcmole/Makefile
+++ b/tools/gcmole/Makefile
@@ -31,13 +31,12 @@
 CLANG_INCLUDE:=$(LLVM_SRC_ROOT)/tools/clang/include
 
 libgcmole.so: gcmole.cc
-	g++ -I$(LLVM_INCLUDE) -I$(CLANG_INCLUDE) -I. -D_DEBUG -D_GNU_SOURCE \
-	-D__STDC_LIMIT_MACROS -D__STDC_CONSTANT_MACROS -O3 		    \
-	-fomit-frame-pointer -fno-exceptions -fno-rtti -fPIC 	            \
-	-Woverloaded-virtual -Wcast-qual -fno-strict-aliasing               \
-	-pedantic -Wno-long-long -Wall 	                                    \
-	-W -Wno-unused-parameter -Wwrite-strings                            \
-	-shared -o libgcmole.so gcmole.cc
+	$(CXX) -I$(LLVM_INCLUDE) -I$(CLANG_INCLUDE) -I. -D_DEBUG              \
+	-D_GNU_SOURCE -D__STDC_CONSTANT_MACROS -D__STDC_FORMAT_MACROS         \
+	-D__STDC_LIMIT_MACROS -O3 -fomit-frame-pointer -fno-exceptions        \
+	-fno-rtti -fPIC -Woverloaded-virtual -Wcast-qual -fno-strict-aliasing \
+	-pedantic -Wno-long-long -Wall -W -Wno-unused-parameter               \
+	-Wwrite-strings -std=c++0x -shared -o libgcmole.so gcmole.cc
 
 clean:
-	rm -f libgcmole.so
+	$(RM) libgcmole.so
diff --git a/tools/gcmole/bootstrap.sh b/tools/gcmole/bootstrap.sh
index baa0b1f..ac6593c 100755
--- a/tools/gcmole/bootstrap.sh
+++ b/tools/gcmole/bootstrap.sh
@@ -27,9 +27,12 @@
 # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-# This script will build libgcmole.so.
+# This script will build libgcmole.so. Building a recent clang needs a
+# recent GCC, so if you explicitly want to use GCC 4.8, use:
+#
+#    CC=gcc-4.8 CPP=cpp-4.8 CXX=g++-4.8 CXXFLAGS=-static-libstdc++ CXXCPP=cpp-4.8 ./bootstrap.sh
 
-CLANG_RELEASE=2.9
+CLANG_RELEASE=3.5
 
 THIS_DIR="$(dirname "${0}")"
 LLVM_DIR="${THIS_DIR}/../../third_party/llvm"
@@ -110,7 +113,7 @@
   # See http://crbug.com/256342
   STRIP_FLAGS=-x
 fi
-strip ${STRIP_FLAGS} Release/bin/clang
+strip ${STRIP_FLAGS} Release+Asserts/bin/clang
 cd -
 
 # Build libgcmole.so
@@ -122,5 +125,5 @@
 echo
 echo You can now run gcmole using this command:
 echo
-echo CLANG_BIN=\"third_party/llvm/Release/bin\" lua tools/gcmole/gcmole.lua
+echo CLANG_BIN=\"third_party/llvm/Release+Asserts/bin\" lua tools/gcmole/gcmole.lua
 echo
diff --git a/tools/gcmole/gcmole.cc b/tools/gcmole/gcmole.cc
index bdff189..9f1f781 100644
--- a/tools/gcmole/gcmole.cc
+++ b/tools/gcmole/gcmole.cc
@@ -51,8 +51,8 @@
 static bool GetMangledName(clang::MangleContext* ctx,
                            const clang::NamedDecl* decl,
                            MangledName* result) {
-  if (!isa<clang::CXXConstructorDecl>(decl) &&
-      !isa<clang::CXXDestructorDecl>(decl)) {
+  if (!llvm::isa<clang::CXXConstructorDecl>(decl) &&
+      !llvm::isa<clang::CXXDestructorDecl>(decl)) {
     llvm::SmallVector<char, 512> output;
     llvm::raw_svector_ostream out(output);
     ctx->mangleName(decl, out);
@@ -74,7 +74,7 @@
 
 static bool IsExternalVMState(const clang::ValueDecl* var) {
   const clang::EnumConstantDecl* enum_constant =
-      dyn_cast<clang::EnumConstantDecl>(var);
+      llvm::dyn_cast<clang::EnumConstantDecl>(var);
   if (enum_constant != NULL && enum_constant->getNameAsString() == EXTERNAL) {
     clang::QualType type = enum_constant->getType();
     return (type.getAsString() == STATE_TAG);
@@ -109,11 +109,10 @@
     clang::DeclContext::lookup_result result =
         decl_ctx_->lookup(ResolveName(n));
 
-    clang::DeclContext::lookup_iterator end = result.second;
-    for (clang::DeclContext::lookup_iterator i = result.first;
-         i != end;
+    clang::DeclContext::lookup_iterator end = result.end();
+    for (clang::DeclContext::lookup_iterator i = result.begin(); i != end;
          i++) {
-      if (isa<T>(*i)) return cast<T>(*i);
+      if (llvm::isa<T>(*i)) return llvm::cast<T>(*i);
     }
 
     return NULL;
@@ -208,13 +207,13 @@
     : public clang::ASTConsumer,
       public clang::RecursiveASTVisitor<FunctionDeclarationFinder> {
  public:
-  explicit FunctionDeclarationFinder(clang::Diagnostic& d,
+  explicit FunctionDeclarationFinder(clang::DiagnosticsEngine& d,
                                      clang::SourceManager& sm,
                                      const std::vector<std::string>& args)
-      : d_(d), sm_(sm) { }
+      : d_(d), sm_(sm) {}
 
   virtual void HandleTranslationUnit(clang::ASTContext &ctx) {
-    mangle_context_ = clang::createItaniumMangleContext(ctx, d_);
+    mangle_context_ = clang::ItaniumMangleContext::create(ctx, d_);
     callees_printer_ = new CalleesPrinter(mangle_context_);
 
     TraverseDecl(ctx.getTranslationUnitDecl());
@@ -228,7 +227,7 @@
   }
 
  private:
-  clang::Diagnostic& d_;
+  clang::DiagnosticsEngine& d_;
   clang::SourceManager& sm_;
   clang::MangleContext* mangle_context_;
 
@@ -508,10 +507,8 @@
   FunctionAnalyzer(clang::MangleContext* ctx,
                    clang::DeclarationName handle_decl_name,
                    clang::CXXRecordDecl* object_decl,
-                   clang::CXXRecordDecl* smi_decl,
-                   clang::Diagnostic& d,
-                   clang::SourceManager& sm,
-                   bool dead_vars_analysis)
+                   clang::CXXRecordDecl* smi_decl, clang::DiagnosticsEngine& d,
+                   clang::SourceManager& sm, bool dead_vars_analysis)
       : ctx_(ctx),
         handle_decl_name_(handle_decl_name),
         object_decl_(object_decl),
@@ -519,8 +516,7 @@
         d_(d),
         sm_(sm),
         block_(NULL),
-        dead_vars_analysis_(dead_vars_analysis) {
-  }
+        dead_vars_analysis_(dead_vars_analysis) {}
 
 
   // --------------------------------------------------------------------------
@@ -528,19 +524,18 @@
   // --------------------------------------------------------------------------
 
   ExprEffect VisitExpr(clang::Expr* expr, const Environment& env) {
-#define VISIT(type) do {                                                \
-      clang::type* concrete_expr = dyn_cast_or_null<clang::type>(expr); \
-      if (concrete_expr != NULL) {                                      \
-        return Visit##type (concrete_expr, env);                        \
-      }                                                                 \
-    } while(0);
+#define VISIT(type)                                                         \
+  do {                                                                      \
+    clang::type* concrete_expr = llvm::dyn_cast_or_null<clang::type>(expr); \
+    if (concrete_expr != NULL) {                                            \
+      return Visit##type(concrete_expr, env);                               \
+    }                                                                       \
+  } while (0);
 
     VISIT(AbstractConditionalOperator);
     VISIT(AddrLabelExpr);
     VISIT(ArraySubscriptExpr);
     VISIT(BinaryOperator);
-    VISIT(BinaryTypeTraitExpr);
-    VISIT(BlockDeclRefExpr);
     VISIT(BlockExpr);
     VISIT(CallExpr);
     VISIT(CastExpr);
@@ -587,8 +582,8 @@
     VISIT(StmtExpr);
     VISIT(StringLiteral);
     VISIT(SubstNonTypeTemplateParmPackExpr);
+    VISIT(TypeTraitExpr);
     VISIT(UnaryOperator);
-    VISIT(UnaryTypeTraitExpr);
     VISIT(VAArgExpr);
 #undef VISIT
 
@@ -604,7 +599,6 @@
   }
 
   IGNORE_EXPR(AddrLabelExpr);
-  IGNORE_EXPR(BinaryTypeTraitExpr);
   IGNORE_EXPR(BlockExpr);
   IGNORE_EXPR(CharacterLiteral);
   IGNORE_EXPR(ChooseExpr);
@@ -633,7 +627,7 @@
   IGNORE_EXPR(StmtExpr);
   IGNORE_EXPR(StringLiteral);
   IGNORE_EXPR(SubstNonTypeTemplateParmPackExpr);
-  IGNORE_EXPR(UnaryTypeTraitExpr);
+  IGNORE_EXPR(TypeTraitExpr);
   IGNORE_EXPR(VAArgExpr);
   IGNORE_EXPR(GNUNullExpr);
   IGNORE_EXPR(OverloadExpr);
@@ -654,12 +648,9 @@
   }
 
   bool IsRawPointerVar(clang::Expr* expr, std::string* var_name) {
-    if (isa<clang::BlockDeclRefExpr>(expr)) {
-      *var_name = cast<clang::BlockDeclRefExpr>(expr)->getDecl()->
-          getNameAsString();
-      return true;
-    } else if (isa<clang::DeclRefExpr>(expr)) {
-      *var_name = cast<clang::DeclRefExpr>(expr)->getDecl()->getNameAsString();
+    if (llvm::isa<clang::DeclRefExpr>(expr)) {
+      *var_name =
+          llvm::cast<clang::DeclRefExpr>(expr)->getDecl()->getNameAsString();
       return true;
     }
     return false;
@@ -707,12 +698,7 @@
     return VisitExpr(expr->getArgument(), env);
   }
 
-  DECL_VISIT_EXPR(CXXNewExpr) {
-    return Par(expr,
-               expr->getNumConstructorArgs(),
-               expr->getConstructorArgs(),
-               env);
-  }
+  DECL_VISIT_EXPR(CXXNewExpr) { return VisitExpr(expr->getInitializer(), env); }
 
   DECL_VISIT_EXPR(ExprWithCleanups) {
     return VisitExpr(expr->getSubExpr(), env);
@@ -766,10 +752,6 @@
     return Use(expr, expr->getDecl(), env);
   }
 
-  DECL_VISIT_EXPR(BlockDeclRefExpr) {
-    return Use(expr, expr->getDecl(), env);
-  }
-
   ExprEffect Par(clang::Expr* parent,
                  int n,
                  clang::Expr** exprs,
@@ -844,7 +826,7 @@
     CallProps props;
 
     clang::CXXMemberCallExpr* memcall =
-        dyn_cast_or_null<clang::CXXMemberCallExpr>(call);
+        llvm::dyn_cast_or_null<clang::CXXMemberCallExpr>(call);
     if (memcall != NULL) {
       clang::Expr* receiver = memcall->getImplicitObjectArgument();
       props.SetEffect(0, VisitExpr(receiver, env));
@@ -870,14 +852,15 @@
   // --------------------------------------------------------------------------
 
   Environment VisitStmt(clang::Stmt* stmt, const Environment& env) {
-#define VISIT(type) do {                                                \
-      clang::type* concrete_stmt = dyn_cast_or_null<clang::type>(stmt); \
-      if (concrete_stmt != NULL) {                                      \
-        return Visit##type (concrete_stmt, env);                        \
-      }                                                                 \
-    } while(0);
+#define VISIT(type)                                                         \
+  do {                                                                      \
+    clang::type* concrete_stmt = llvm::dyn_cast_or_null<clang::type>(stmt); \
+    if (concrete_stmt != NULL) {                                            \
+      return Visit##type(concrete_stmt, env);                               \
+    }                                                                       \
+  } while (0);
 
-    if (clang::Expr* expr = dyn_cast_or_null<clang::Expr>(stmt)) {
+    if (clang::Expr* expr = llvm::dyn_cast_or_null<clang::Expr>(stmt)) {
       return env.ApplyEffect(VisitExpr(expr, env));
     }
 
@@ -1078,11 +1061,12 @@
   const clang::TagType* ToTagType(const clang::Type* t) {
     if (t == NULL) {
       return NULL;
-    } else if (isa<clang::TagType>(t)) {
-      return cast<clang::TagType>(t);
-    } else if (isa<clang::SubstTemplateTypeParmType>(t)) {
-      return ToTagType(cast<clang::SubstTemplateTypeParmType>(t)->
-                           getReplacementType().getTypePtr());
+    } else if (llvm::isa<clang::TagType>(t)) {
+      return llvm::cast<clang::TagType>(t);
+    } else if (llvm::isa<clang::SubstTemplateTypeParmType>(t)) {
+      return ToTagType(llvm::cast<clang::SubstTemplateTypeParmType>(t)
+                           ->getReplacementType()
+                           .getTypePtr());
     } else {
       return NULL;
     }
@@ -1095,7 +1079,7 @@
 
   bool IsRawPointerType(clang::QualType qtype) {
     const clang::PointerType* type =
-        dyn_cast_or_null<clang::PointerType>(qtype.getTypePtrOrNull());
+        llvm::dyn_cast_or_null<clang::PointerType>(qtype.getTypePtrOrNull());
     if (type == NULL) return false;
 
     const clang::TagType* pointee =
@@ -1103,7 +1087,7 @@
     if (pointee == NULL) return false;
 
     clang::CXXRecordDecl* record =
-        dyn_cast_or_null<clang::CXXRecordDecl>(pointee->getDecl());
+        llvm::dyn_cast_or_null<clang::CXXRecordDecl>(pointee->getDecl());
     if (record == NULL) return false;
 
     if (!InV8Namespace(record)) return false;
@@ -1117,7 +1101,7 @@
   }
 
   Environment VisitDecl(clang::Decl* decl, const Environment& env) {
-    if (clang::VarDecl* var = dyn_cast<clang::VarDecl>(decl)) {
+    if (clang::VarDecl* var = llvm::dyn_cast<clang::VarDecl>(decl)) {
       Environment out = var->hasInit() ? VisitStmt(var->getInit(), env) : env;
 
       if (IsRawPointerType(var->getType())) {
@@ -1177,7 +1161,8 @@
  private:
   void ReportUnsafe(const clang::Expr* expr, const std::string& msg) {
     d_.Report(clang::FullSourceLoc(expr->getExprLoc(), sm_),
-              d_.getCustomDiagID(clang::Diagnostic::Warning, msg));
+              d_.getCustomDiagID(clang::DiagnosticsEngine::Warning, "%0"))
+        << msg;
   }
 
 
@@ -1186,7 +1171,7 @@
   clang::CXXRecordDecl* object_decl_;
   clang::CXXRecordDecl* smi_decl_;
 
-  clang::Diagnostic& d_;
+  clang::DiagnosticsEngine& d_;
   clang::SourceManager& sm_;
 
   Block* block_;
@@ -1197,8 +1182,7 @@
 class ProblemsFinder : public clang::ASTConsumer,
                        public clang::RecursiveASTVisitor<ProblemsFinder> {
  public:
-  ProblemsFinder(clang::Diagnostic& d,
-                 clang::SourceManager& sm,
+  ProblemsFinder(clang::DiagnosticsEngine& d, clang::SourceManager& sm,
                  const std::vector<std::string>& args)
       : d_(d), sm_(sm), dead_vars_analysis_(false) {
     for (unsigned i = 0; i < args.size(); ++i) {
@@ -1224,14 +1208,9 @@
     if (smi_decl != NULL) smi_decl = smi_decl->getDefinition();
 
     if (object_decl != NULL && smi_decl != NULL) {
-      function_analyzer_ =
-          new FunctionAnalyzer(clang::createItaniumMangleContext(ctx, d_),
-                               r.ResolveName("Handle"),
-                               object_decl,
-                               smi_decl,
-                               d_,
-                               sm_,
-                               dead_vars_analysis_);
+      function_analyzer_ = new FunctionAnalyzer(
+          clang::ItaniumMangleContext::create(ctx, d_), r.ResolveName("Handle"),
+          object_decl, smi_decl, d_, sm_, dead_vars_analysis_);
       TraverseDecl(ctx.getTranslationUnitDecl());
     } else {
       if (object_decl == NULL) {
@@ -1249,7 +1228,7 @@
   }
 
  private:
-  clang::Diagnostic& d_;
+  clang::DiagnosticsEngine& d_;
   clang::SourceManager& sm_;
   bool dead_vars_analysis_;
 
diff --git a/tools/gcmole/gcmole.lua b/tools/gcmole/gcmole.lua
index 706f4de..d287f7b 100644
--- a/tools/gcmole/gcmole.lua
+++ b/tools/gcmole/gcmole.lua
@@ -93,14 +93,16 @@
 local function MakeClangCommandLine(plugin, plugin_args, triple, arch_define)
    if plugin_args then
      for i = 1, #plugin_args do
-        plugin_args[i] = "-plugin-arg-" .. plugin .. " " .. plugin_args[i]
+        plugin_args[i] = "-Xclang -plugin-arg-" .. plugin
+           .. " -Xclang " .. plugin_args[i]
      end
      plugin_args = " " .. table.concat(plugin_args, " ")
    end
-   return CLANG_BIN .. "/clang -cc1 -load " .. CLANG_PLUGINS .. "/libgcmole.so"
-      .. " -plugin "  .. plugin
+   return CLANG_BIN .. "/clang++ -std=c++11 -c "
+      .. " -Xclang -load -Xclang " .. CLANG_PLUGINS .. "/libgcmole.so"
+      .. " -Xclang -plugin -Xclang "  .. plugin
       .. (plugin_args or "")
-      .. " -triple " .. triple
+      .. " -Xclang -triple -Xclang " .. triple
       .. " -D" .. arch_define
       .. " -DENABLE_DEBUGGER_SUPPORT"
       .. " -DV8_I18N_SUPPORT"
diff --git a/tools/gdbinit b/tools/gdbinit
new file mode 100644
index 0000000..20cdff6
--- /dev/null
+++ b/tools/gdbinit
@@ -0,0 +1,33 @@
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+# Print HeapObjects.
+define job
+print ((v8::internal::HeapObject*)($arg0))->Print()
+end
+document job
+Print a v8 JavaScript object
+Usage: job tagged_ptr
+end
+
+# Print Code objects containing given PC.
+define jco
+job (v8::internal::Isolate::Current()->FindCodeObject((v8::internal::Address)$arg0))
+end
+document jco
+Print a v8 Code object from an internal code address
+Usage: jco pc
+end
+
+# Print JavaScript stack trace.
+define jst
+print v8::internal::Isolate::Current()->PrintStack(stdout)
+end
+document jst
+Print the current JavaScript stack trace
+Usage: jst
+end
+
+set disassembly-flavor intel
+set disable-randomization off
diff --git a/tools/gen-postmortem-metadata.py b/tools/gen-postmortem-metadata.py
index fff2e34..04a1ea8 100644
--- a/tools/gen-postmortem-metadata.py
+++ b/tools/gen-postmortem-metadata.py
@@ -61,7 +61,7 @@
 
     { 'name': 'StringEncodingMask',     'value': 'kStringEncodingMask' },
     { 'name': 'TwoByteStringTag',       'value': 'kTwoByteStringTag' },
-    { 'name': 'AsciiStringTag',         'value': 'kOneByteStringTag' },
+    { 'name': 'OneByteStringTag',       'value': 'kOneByteStringTag' },
 
     { 'name': 'StringRepresentationMask',
         'value': 'kStringRepresentationMask' },
@@ -80,6 +80,16 @@
     { 'name': 'SmiShiftSize',           'value': 'kSmiShiftSize' },
     { 'name': 'PointerSizeLog2',        'value': 'kPointerSizeLog2' },
 
+    { 'name': 'OddballFalse',           'value': 'Oddball::kFalse' },
+    { 'name': 'OddballTrue',            'value': 'Oddball::kTrue' },
+    { 'name': 'OddballTheHole',         'value': 'Oddball::kTheHole' },
+    { 'name': 'OddballNull',            'value': 'Oddball::kNull' },
+    { 'name': 'OddballArgumentMarker',  'value': 'Oddball::kArgumentMarker' },
+    { 'name': 'OddballUndefined',       'value': 'Oddball::kUndefined' },
+    { 'name': 'OddballUninitialized',   'value': 'Oddball::kUninitialized' },
+    { 'name': 'OddballOther',           'value': 'Oddball::kOther' },
+    { 'name': 'OddballException',       'value': 'Oddball::kException' },
+
     { 'name': 'prop_idx_first',
         'value': 'DescriptorArray::kFirstIndex' },
     { 'name': 'prop_type_field',
@@ -88,6 +98,10 @@
         'value': 'TRANSITION' },
     { 'name': 'prop_type_mask',
         'value': 'PropertyDetails::TypeField::kMask' },
+    { 'name': 'prop_index_mask',
+        'value': 'PropertyDetails::FieldIndexField::kMask' },
+    { 'name': 'prop_index_shift',
+        'value': 'PropertyDetails::FieldIndexField::kShift' },
 
     { 'name': 'prop_desc_key',
         'value': 'DescriptorArray::kDescriptorKey' },
@@ -98,6 +112,20 @@
     { 'name': 'prop_desc_size',
         'value': 'DescriptorArray::kDescriptorSize' },
 
+    { 'name': 'elements_fast_holey_elements',
+        'value': 'FAST_HOLEY_ELEMENTS' },
+    { 'name': 'elements_fast_elements',
+        'value': 'FAST_ELEMENTS' },
+    { 'name': 'elements_dictionary_elements',
+        'value': 'DICTIONARY_ELEMENTS' },
+
+    { 'name': 'bit_field2_elements_kind_mask',
+       'value': 'Map::kElementsKindMask' },
+    { 'name': 'bit_field2_elements_kind_shift',
+       'value': 'Map::kElementsKindShift' },
+    { 'name': 'bit_field3_dictionary_map_shift',
+        'value': 'Map::DictionaryMap::kShift' },
+
     { 'name': 'off_fp_context',
         'value': 'StandardFrameConstants::kContextOffset' },
     { 'name': 'off_fp_constant_pool',
@@ -120,6 +148,16 @@
     'Map, instance_attributes, int, kInstanceAttributesOffset',
     'Map, inobject_properties, int, kInObjectPropertiesOffset',
     'Map, instance_size, int, kInstanceSizeOffset',
+    'Map, bit_field, char, kBitFieldOffset',
+    'Map, bit_field2, char, kBitField2Offset',
+    'Map, bit_field3, SMI, kBitField3Offset',
+    'Map, prototype, Object, kPrototypeOffset',
+    'NameDictionaryShape, prefix_size, int, kPrefixSize',
+    'NameDictionaryShape, entry_size, int, kEntrySize',
+    'SeededNumberDictionaryShape, prefix_size, int, kPrefixSize',
+    'UnseededNumberDictionaryShape, prefix_size, int, kPrefixSize',
+    'NumberDictionaryShape, entry_size, int, kEntrySize',
+    'Oddball, kind_offset, int, kKindOffset',
     'HeapNumber, value, double, kValueOffset',
     'ConsString, first, String, kFirstOffset',
     'ConsString, second, String, kSecondOffset',
@@ -277,11 +315,11 @@
                 #
                 # Mapping string types is more complicated.  Both types and
                 # class names for Strings specify a representation (e.g., Seq,
-                # Cons, External, or Sliced) and an encoding (TwoByte or Ascii),
+                # Cons, External, or Sliced) and an encoding (TwoByte/OneByte),
                 # In the simplest case, both of these are explicit in both
                 # names, as in:
                 #
-                #       EXTERNAL_ASCII_STRING_TYPE => ExternalAsciiString
+                #       EXTERNAL_ONE_BYTE_STRING_TYPE => ExternalOneByteString
                 #
                 # However, either the representation or encoding can be omitted
                 # from the type name, in which case "Seq" and "TwoByte" are
@@ -292,7 +330,7 @@
                 # Additionally, sometimes the type name has more information
                 # than the class, as in:
                 #
-                #       CONS_ASCII_STRING_TYPE => ConsString
+                #       CONS_ONE_BYTE_STRING_TYPE => ConsString
                 #
                 # To figure this out dynamically, we first check for a
                 # representation and encoding and add them if they're not
@@ -303,19 +341,19 @@
                         if (cctype.find('Cons') == -1 and
                             cctype.find('External') == -1 and
                             cctype.find('Sliced') == -1):
-                                if (cctype.find('Ascii') != -1):
-                                        cctype = re.sub('AsciiString$',
+                                if (cctype.find('OneByte') != -1):
+                                        cctype = re.sub('OneByteString$',
                                             'SeqOneByteString', cctype);
                                 else:
                                         cctype = re.sub('String$',
                                             'SeqString', cctype);
 
-                        if (cctype.find('Ascii') == -1):
+                        if (cctype.find('OneByte') == -1):
                                 cctype = re.sub('String$', 'TwoByteString',
                                     cctype);
 
                         if (not (cctype in klasses)):
-                                cctype = re.sub('Ascii', '', cctype);
+                                cctype = re.sub('OneByte', '', cctype);
                                 cctype = re.sub('TwoByte', '', cctype);
 
                 #
@@ -361,7 +399,7 @@
                     'value': '%s::%s' % (klass, offset)
                 });
 
-        assert(kind == 'SMI_ACCESSORS');
+        assert(kind == 'SMI_ACCESSORS' or kind == 'ACCESSORS_TO_SMI');
         klass = args[0];
         field = args[1];
         offset = args[2];
@@ -385,7 +423,8 @@
         # may span multiple lines and may contain nested parentheses.  We also
         # call parse_field() to pick apart the invocation.
         #
-        prefixes = [ 'ACCESSORS', 'ACCESSORS_GCSAFE', 'SMI_ACCESSORS' ];
+        prefixes = [ 'ACCESSORS', 'ACCESSORS_GCSAFE',
+                     'SMI_ACCESSORS', 'ACCESSORS_TO_SMI' ];
         current = '';
         opens = 0;
 
diff --git a/tools/generate-builtins-tests.py b/tools/generate-builtins-tests.py
new file mode 100755
index 0000000..4e6961d
--- /dev/null
+++ b/tools/generate-builtins-tests.py
@@ -0,0 +1,158 @@
+#!/usr/bin/env python
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import json
+import optparse
+import os
+import random
+import shutil
+import subprocess
+import sys
+
+
+BLACKLIST = [
+  # Skip special d8 functions.
+  "load", "os", "print", "read", "readline", "quit"
+]
+
+
+def GetRandomObject():
+  return random.choice([
+    "0", "1", "2.5", "0x1000", "\"string\"", "{foo: \"bar\"}", "[1, 2, 3]",
+    "function() { return 0; }"
+  ])
+
+
+g_var_index = 0
+
+
+def GetVars(result, num, first = []):
+  global g_var_index
+  variables = []
+  for i in range(num):
+    variables.append("__v_%d" % g_var_index)
+    g_var_index += 1
+  for var in variables:
+    result.append("var %s = %s;" % (var, GetRandomObject()))
+  return ", ".join(first + variables)
+
+
+# Wraps |string| in try..catch.
+def TryCatch(result, string, exception_behavior = ""):
+  result.append("try { %s } catch(e) { %s }" % (string, exception_behavior))
+
+
+def BuildTests(function, full_name, options):
+  assert function["type"] == "function"
+  global g_var_index
+  g_var_index = 0
+  result = ["// AUTO-GENERATED BY tools/generate-builtins-tests.py.\n"]
+  result.append("// Function call test:")
+  length = function["length"]
+  TryCatch(result, "%s(%s);" % (full_name, GetVars(result, length)))
+
+  if "prototype" in function:
+    proto = function["prototype"]
+    result.append("\n// Constructor test:")
+    TryCatch(result,
+             "var recv = new %s(%s);" % (full_name, GetVars(result, length)),
+             "var recv = new Object();")
+
+    getters = []
+    methods = []
+    for prop in proto:
+      proto_property = proto[prop]
+      proto_property_type = proto_property["type"]
+      if proto_property_type == "getter":
+        getters.append(proto_property)
+        result.append("recv.__defineGetter__(\"%s\", "
+                      "function() { return %s; });" %
+                      (proto_property["name"], GetVars(result, 1)))
+      if proto_property_type == "number":
+        result.append("recv.__defineGetter__(\"%s\", "
+                      "function() { return %s; });" %
+                      (proto_property["name"], GetVars(result, 1)))
+      if proto_property_type == "function":
+        methods.append(proto_property)
+    if getters:
+      result.append("\n// Getter tests:")
+      for getter in getters:
+        result.append("print(recv.%s);" % getter["name"])
+    if methods:
+      result.append("\n// Method tests:")
+      for method in methods:
+        args = GetVars(result, method["length"], ["recv"])
+        call = "%s.prototype.%s.call(%s)" % (full_name, method["name"], args)
+        TryCatch(result, call)
+
+  filename = os.path.join(options.outdir, "%s.js" % (full_name))
+  with open(filename, "w") as f:
+    f.write("\n".join(result))
+    f.write("\n")
+
+
+def VisitObject(obj, path, options):
+  obj_type = obj["type"]
+  obj_name = "%s%s" % (path, obj["name"])
+  if obj_type == "function":
+    BuildTests(obj, obj_name, options)
+  if "properties" in obj:
+    for prop_name in obj["properties"]:
+      prop = obj["properties"][prop_name]
+      VisitObject(prop, "%s." % (obj_name), options)
+
+
+def ClearGeneratedFiles(options):
+  if os.path.exists(options.outdir):
+    shutil.rmtree(options.outdir)
+
+
+def GenerateTests(options):
+  ClearGeneratedFiles(options)  # Re-generate everything.
+  output = subprocess.check_output(
+      "%s %s" % (options.d8, options.script), shell=True).strip()
+  objects = json.loads(output)
+
+  os.makedirs(options.outdir)
+  for obj_name in objects:
+    if obj_name in BLACKLIST: continue
+    obj = objects[obj_name]
+    VisitObject(obj, "", options)
+
+
+def BuildOptions():
+  result = optparse.OptionParser()
+  result.add_option("--d8", help="d8 binary to use",
+                    default="out/ia32.release/d8")
+  result.add_option("--outdir", help="directory where to place generated tests",
+                    default="test/mjsunit/builtins-gen")
+  result.add_option("--script", help="builtins detector script to run in d8",
+                    default="tools/detect-builtins.js")
+  return result
+
+
+def Main():
+  parser = BuildOptions()
+  (options, args) = parser.parse_args()
+  if len(args) != 1 or args[0] == "help":
+    parser.print_help()
+    return 1
+  action = args[0]
+
+  if action == "generate":
+    GenerateTests(options)
+    return 0
+
+  if action == "clear":
+    ClearGeneratedFiles(options)
+    return 0
+
+  print("Unknown action: %s" % action)
+  parser.print_help()
+  return 1
+
+
+if __name__ == "__main__":
+  sys.exit(Main())
diff --git a/tools/generate-runtime-tests.py b/tools/generate-runtime-tests.py
deleted file mode 100755
index 04c9043..0000000
--- a/tools/generate-runtime-tests.py
+++ /dev/null
@@ -1,1323 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2014 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-import itertools
-import js2c
-import multiprocessing
-import optparse
-import os
-import random
-import re
-import shutil
-import signal
-import string
-import subprocess
-import sys
-import time
-
-FILENAME = "src/runtime.cc"
-HEADERFILENAME = "src/runtime.h"
-FUNCTION = re.compile("^RUNTIME_FUNCTION\(Runtime_(\w+)")
-ARGSLENGTH = re.compile(".*ASSERT\(.*args\.length\(\) == (\d+)\);")
-FUNCTIONEND = "}\n"
-MACRO = re.compile(r"^#define ([^ ]+)\(([^)]*)\) *([^\\]*)\\?\n$")
-FIRST_WORD = re.compile("^\s*(.*?)[\s({\[]")
-
-WORKSPACE = os.path.abspath(os.path.join(os.path.dirname(sys.argv[0]), ".."))
-BASEPATH = os.path.join(WORKSPACE, "test", "mjsunit", "runtime-gen")
-THIS_SCRIPT = os.path.relpath(sys.argv[0])
-
-# Expand these macros, they define further runtime functions.
-EXPAND_MACROS = [
-  "BUFFER_VIEW_GETTER",
-  "DATA_VIEW_GETTER",
-  "DATA_VIEW_SETTER",
-  "RUNTIME_UNARY_MATH",
-]
-# TODO(jkummerow): We could also whitelist the following macros, but the
-# functions they define are so trivial that it's unclear how much benefit
-# that would provide:
-# ELEMENTS_KIND_CHECK_RUNTIME_FUNCTION
-# FIXED_TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION
-# TYPED_ARRAYS_CHECK_RUNTIME_FUNCTION
-
-# Counts of functions in each detection state. These are used to assert
-# that the parser doesn't bit-rot. Change the values as needed when you add,
-# remove or change runtime functions, but make sure we don't lose our ability
-# to parse them!
-EXPECTED_FUNCTION_COUNT = 358
-EXPECTED_FUZZABLE_COUNT = 326
-EXPECTED_CCTEST_COUNT = 6
-EXPECTED_UNKNOWN_COUNT = 4
-EXPECTED_BUILTINS_COUNT = 798
-
-
-# Don't call these at all.
-BLACKLISTED = [
-  "Abort",  # Kills the process.
-  "AbortJS",  # Kills the process.
-  "CompileForOnStackReplacement",  # Riddled with ASSERTs.
-  "IS_VAR",  # Not implemented in the runtime.
-  "ListNatives",  # Not available in Release mode.
-  "SetAllocationTimeout",  # Too slow for fuzzing.
-  "SystemBreak",  # Kills (int3) the process.
-
-  # These are weird. They violate some invariants when called after
-  # bootstrapping.
-  "DisableAccessChecks",
-  "EnableAccessChecks",
-
-  # The current LiveEdit implementation relies on and messes with internals
-  # in ways that makes it fundamentally unfuzzable :-(
-  "DebugGetLoadedScripts",
-  "DebugSetScriptSource",
-  "LiveEditFindSharedFunctionInfosForScript",
-  "LiveEditFunctionSourceUpdated",
-  "LiveEditGatherCompileInfo",
-  "LiveEditPatchFunctionPositions",
-  "LiveEditReplaceFunctionCode",
-  "LiveEditReplaceRefToNestedFunction",
-  "LiveEditReplaceScript",
-  "LiveEditRestartFrame",
-  "SetScriptBreakPoint",
-
-  # TODO(jkummerow): Fix these and un-blacklist them!
-  "CreateDateTimeFormat",
-  "CreateNumberFormat",
-]
-
-
-# These will always throw.
-THROWS = [
-  "CheckExecutionState",  # Needs to hit a break point.
-  "CheckIsBootstrapping",  # Needs to be bootstrapping.
-  "DebugEvaluate",  # Needs to hit a break point.
-  "DebugEvaluateGlobal",  # Needs to hit a break point.
-  "DebugIndexedInterceptorElementValue",  # Needs an indexed interceptor.
-  "DebugNamedInterceptorPropertyValue",  # Needs a named interceptor.
-  "DebugSetScriptSource",  # Checks compilation state of script.
-  "GetAllScopesDetails",  # Needs to hit a break point.
-  "GetFrameCount",  # Needs to hit a break point.
-  "GetFrameDetails",  # Needs to hit a break point.
-  "GetRootNaN",  # Needs to be bootstrapping.
-  "GetScopeCount",  # Needs to hit a break point.
-  "GetScopeDetails",  # Needs to hit a break point.
-  "GetStepInPositions",  # Needs to hit a break point.
-  "GetTemplateField",  # Needs a {Function,Object}TemplateInfo.
-  "GetThreadCount",  # Needs to hit a break point.
-  "GetThreadDetails",  # Needs to hit a break point.
-  "IsAccessAllowedForObserver",  # Needs access-check-required object.
-  "UnblockConcurrentRecompilation"  # Needs --block-concurrent-recompilation.
-]
-
-
-# Definitions used in CUSTOM_KNOWN_GOOD_INPUT below.
-_BREAK_ITERATOR = (
-    "%GetImplFromInitializedIntlObject(new Intl.v8BreakIterator())")
-_COLLATOR = "%GetImplFromInitializedIntlObject(new Intl.Collator('en-US'))"
-_DATETIME_FORMAT = (
-    "%GetImplFromInitializedIntlObject(new Intl.DateTimeFormat('en-US'))")
-_NUMBER_FORMAT = (
-    "%GetImplFromInitializedIntlObject(new Intl.NumberFormat('en-US'))")
-
-
-# Custom definitions for function input that does not throw.
-# Format: "FunctionName": ["arg0", "arg1", ..., argslength].
-# None means "fall back to autodetected value".
-CUSTOM_KNOWN_GOOD_INPUT = {
-  "Apply": ["function() {}", None, None, None, None, None],
-  "ArrayBufferSliceImpl": [None, None, 0, None],
-  "ArrayConcat": ["[1, 'a']", None],
-  "BreakIteratorAdoptText": [_BREAK_ITERATOR, None, None],
-  "BreakIteratorBreakType": [_BREAK_ITERATOR, None],
-  "BreakIteratorCurrent": [_BREAK_ITERATOR, None],
-  "BreakIteratorFirst": [_BREAK_ITERATOR, None],
-  "BreakIteratorNext": [_BREAK_ITERATOR, None],
-  "CompileString": [None, "false", None],
-  "CreateBreakIterator": ["'en-US'", "{type: 'string'}", None, None],
-  "CreateJSFunctionProxy": [None, "function() {}", None, None, None],
-  "CreatePrivateSymbol": ["\"foo\"", None],
-  "CreateSymbol": ["\"foo\"", None],
-  "DateParseString": [None, "new Array(8)", None],
-  "DefineOrRedefineAccessorProperty": [None, None, "function() {}",
-                                       "function() {}", 2, None],
-  "FunctionBindArguments": [None, None, "undefined", None, None],
-  "GetBreakLocations": [None, 0, None],
-  "GetDefaultReceiver": ["function() {}", None],
-  "GetImplFromInitializedIntlObject": ["new Intl.NumberFormat('en-US')", None],
-  "InternalCompare": [_COLLATOR, None, None, None],
-  "InternalDateFormat": [_DATETIME_FORMAT, None, None],
-  "InternalDateParse": [_DATETIME_FORMAT, None, None],
-  "InternalNumberFormat": [_NUMBER_FORMAT, None, None],
-  "InternalNumberParse": [_NUMBER_FORMAT, None, None],
-  "IsSloppyModeFunction": ["function() {}", None],
-  "LoadMutableDouble": ["{foo: 1.2}", None, None],
-  "NewObjectFromBound": ["(function() {}).bind({})", None],
-  "NumberToRadixString": [None, "2", None],
-  "ParseJson": ["\"{}\"", 1],
-  "RegExpExecMultiple": [None, None, "['a']", "['a']", None],
-  "SetAccessorProperty": [None, None, "undefined", "undefined", None, None,
-                          None],
-  "SetIteratorInitialize": [None, None, "2", None],
-  "SetDebugEventListener": ["undefined", None, None],
-  "SetFunctionBreakPoint": [None, 200, None, None],
-  "StringBuilderConcat": ["[1, 2, 3]", 3, None, None],
-  "StringBuilderJoin": ["['a', 'b']", 4, None, None],
-  "StringMatch": [None, None, "['a', 'b']", None],
-  "StringNormalize": [None, 2, None],
-  "StringReplaceGlobalRegExpWithString": [None, None, None, "['a']", None],
-  "TypedArrayInitialize": [None, 6, "new ArrayBuffer(8)", None, 4, None],
-  "TypedArrayInitializeFromArrayLike": [None, 6, None, None, None],
-  "TypedArraySetFastCases": [None, None, "0", None],
-}
-
-
-# Types of arguments that cannot be generated in a JavaScript testcase.
-NON_JS_TYPES = [
-  "Code", "Context", "FixedArray", "FunctionTemplateInfo",
-  "JSFunctionResultCache", "JSMessageObject", "Map", "ScopeInfo",
-  "SharedFunctionInfo"]
-
-
-class Generator(object):
-
-  def RandomVariable(self, varname, vartype, simple):
-    if simple:
-      return self._Variable(varname, self.GENERATORS[vartype][0])
-    return self.GENERATORS[vartype][1](self, varname,
-                                       self.DEFAULT_RECURSION_BUDGET)
-
-  @staticmethod
-  def IsTypeSupported(typename):
-    return typename in Generator.GENERATORS
-
-  USUAL_SUSPECT_PROPERTIES = ["size", "length", "byteLength", "__proto__",
-                              "prototype", "0", "1", "-1"]
-  DEFAULT_RECURSION_BUDGET = 2
-  PROXY_TRAPS = """{
-      getOwnPropertyDescriptor: function(name) {
-        return {value: function() {}, configurable: true, writable: true,
-                enumerable: true};
-      },
-      getPropertyDescriptor: function(name) {
-        return {value: function() {}, configurable: true, writable: true,
-                enumerable: true};
-      },
-      getOwnPropertyNames: function() { return []; },
-      getPropertyNames: function() { return []; },
-      defineProperty: function(name, descriptor) {},
-      delete: function(name) { return true; },
-      fix: function() {}
-    }"""
-
-  def _Variable(self, name, value, fallback=None):
-    args = { "name": name, "value": value, "fallback": fallback }
-    if fallback:
-      wrapper = "try { %%s } catch(e) { var %(name)s = %(fallback)s; }" % args
-    else:
-      wrapper = "%s"
-    return [wrapper % ("var %(name)s = %(value)s;" % args)]
-
-  def _Boolean(self, name, recursion_budget):
-    return self._Variable(name, random.choice(["true", "false"]))
-
-  def _Oddball(self, name, recursion_budget):
-    return self._Variable(name,
-                          random.choice(["true", "false", "undefined", "null"]))
-
-  def _StrictMode(self, name, recursion_budget):
-    return self._Variable(name, random.choice([0, 1]))
-
-  def _Int32(self, name, recursion_budget=0):
-    die = random.random()
-    if die < 0.5:
-      value = random.choice([-3, -1, 0, 1, 2, 10, 515, 0x3fffffff, 0x7fffffff,
-                             0x40000000, -0x40000000, -0x80000000])
-    elif die < 0.75:
-      value = random.randint(-1000, 1000)
-    else:
-      value = random.randint(-0x80000000, 0x7fffffff)
-    return self._Variable(name, value)
-
-  def _Uint32(self, name, recursion_budget=0):
-    die = random.random()
-    if die < 0.5:
-      value = random.choice([0, 1, 2, 3, 4, 8, 0x3fffffff, 0x40000000,
-                             0x7fffffff, 0xffffffff])
-    elif die < 0.75:
-      value = random.randint(0, 1000)
-    else:
-      value = random.randint(0, 0xffffffff)
-    return self._Variable(name, value)
-
-  def _Smi(self, name, recursion_budget):
-    die = random.random()
-    if die < 0.5:
-      value = random.choice([-5, -1, 0, 1, 2, 3, 0x3fffffff, -0x40000000])
-    elif die < 0.75:
-      value = random.randint(-1000, 1000)
-    else:
-      value = random.randint(-0x40000000, 0x3fffffff)
-    return self._Variable(name, value)
-
-  def _Number(self, name, recursion_budget):
-    die = random.random()
-    if die < 0.5:
-      return self._Smi(name, recursion_budget)
-    elif die < 0.6:
-      value = random.choice(["Infinity", "-Infinity", "NaN", "-0",
-                             "1.7976931348623157e+308",  # Max value.
-                             "2.2250738585072014e-308",  # Min value.
-                             "4.9406564584124654e-324"])  # Min subnormal.
-    else:
-      value = random.lognormvariate(0, 15)
-    return self._Variable(name, value)
-
-  def _RawRandomString(self, minlength=0, maxlength=100,
-                       alphabet=string.ascii_letters):
-    length = random.randint(minlength, maxlength)
-    result = ""
-    for i in xrange(length):
-      result += random.choice(alphabet)
-    return result
-
-  def _SeqString(self, name, recursion_budget):
-    s1 = self._RawRandomString(1, 5)
-    s2 = self._RawRandomString(1, 5)
-    # 'foo' + 'bar'
-    return self._Variable(name, "\"%s\" + \"%s\"" % (s1, s2))
-
-  def _SeqTwoByteString(self, name):
-    s1 = self._RawRandomString(1, 5)
-    s2 = self._RawRandomString(1, 5)
-    # 'foo' + unicode + 'bar'
-    return self._Variable(name, "\"%s\" + \"\\2082\" + \"%s\"" % (s1, s2))
-
-  def _SlicedString(self, name):
-    s = self._RawRandomString(20, 30)
-    # 'ffoo12345678901234567890'.substr(1)
-    return self._Variable(name, "\"%s\".substr(1)" % s)
-
-  def _ConsString(self, name):
-    s1 = self._RawRandomString(8, 15)
-    s2 = self._RawRandomString(8, 15)
-    # 'foo12345' + (function() { return 'bar12345';})()
-    return self._Variable(name,
-        "\"%s\" + (function() { return \"%s\";})()" % (s1, s2))
-
-  def _InternalizedString(self, name):
-    return self._Variable(name, "\"%s\"" % self._RawRandomString(0, 20))
-
-  def _String(self, name, recursion_budget):
-    die = random.random()
-    if die < 0.5:
-      string = random.choice(self.USUAL_SUSPECT_PROPERTIES)
-      return self._Variable(name, "\"%s\"" % string)
-    elif die < 0.6:
-      number_name = name + "_number"
-      result = self._Number(number_name, recursion_budget)
-      return result + self._Variable(name, "\"\" + %s" % number_name)
-    elif die < 0.7:
-      return self._SeqString(name, recursion_budget)
-    elif die < 0.8:
-      return self._ConsString(name)
-    elif die < 0.9:
-      return self._InternalizedString(name)
-    else:
-      return self._SlicedString(name)
-
-  def _Symbol(self, name, recursion_budget):
-    raw_string_name = name + "_1"
-    result = self._String(raw_string_name, recursion_budget)
-    return result + self._Variable(name, "Symbol(%s)" % raw_string_name)
-
-  def _Name(self, name, recursion_budget):
-    if random.random() < 0.2:
-      return self._Symbol(name, recursion_budget)
-    return self._String(name, recursion_budget)
-
-  def _JSValue(self, name, recursion_budget):
-    die = random.random()
-    raw_name = name + "_1"
-    if die < 0.33:
-      result = self._String(raw_name, recursion_budget)
-      return result + self._Variable(name, "new String(%s)" % raw_name)
-    elif die < 0.66:
-      result = self._Boolean(raw_name, recursion_budget)
-      return result + self._Variable(name, "new Boolean(%s)" % raw_name)
-    else:
-      result = self._Number(raw_name, recursion_budget)
-      return result + self._Variable(name, "new Number(%s)" % raw_name)
-
-  def _RawRandomPropertyName(self):
-    if random.random() < 0.5:
-      return random.choice(self.USUAL_SUSPECT_PROPERTIES)
-    return self._RawRandomString(0, 10)
-
-  def _AddProperties(self, name, result, recursion_budget):
-    propcount = random.randint(0, 3)
-    propname = None
-    for i in range(propcount):
-      die = random.random()
-      if die < 0.5:
-        propname = "%s_prop%d" % (name, i)
-        result += self._Name(propname, recursion_budget - 1)
-      else:
-        propname = "\"%s\"" % self._RawRandomPropertyName()
-      propvalue_name = "%s_val%d" % (name, i)
-      result += self._Object(propvalue_name, recursion_budget - 1)
-      result.append("try { %s[%s] = %s; } catch (e) {}" %
-                    (name, propname, propvalue_name))
-    if random.random() < 0.2 and propname:
-      # Force the object to slow mode.
-      result.append("delete %s[%s];" % (name, propname))
-
-  def _RandomElementIndex(self, element_name, result):
-    if random.random() < 0.5:
-      return random.randint(-1000, 1000)
-    result += self._Smi(element_name, 0)
-    return element_name
-
-  def _AddElements(self, name, result, recursion_budget):
-    elementcount = random.randint(0, 3)
-    for i in range(elementcount):
-      element_name = "%s_idx%d" % (name, i)
-      index = self._RandomElementIndex(element_name, result)
-      value_name = "%s_elt%d" % (name, i)
-      result += self._Object(value_name, recursion_budget - 1)
-      result.append("try { %s[%s] = %s; } catch(e) {}" %
-                    (name, index, value_name))
-
-  def _AddAccessors(self, name, result, recursion_budget):
-    accessorcount = random.randint(0, 3)
-    for i in range(accessorcount):
-      propname = self._RawRandomPropertyName()
-      what = random.choice(["get", "set"])
-      function_name = "%s_access%d" % (name, i)
-      result += self._PlainFunction(function_name, recursion_budget - 1)
-      result.append("try { Object.defineProperty(%s, \"%s\", {%s: %s}); } "
-                    "catch (e) {}" % (name, propname, what, function_name))
-
-  def _PlainArray(self, name, recursion_budget):
-    die = random.random()
-    if die < 0.5:
-      literal = random.choice(["[]", "[1, 2]", "[1.5, 2.5]",
-                               "['a', 'b', 1, true]"])
-      return self._Variable(name, literal)
-    else:
-      new = random.choice(["", "new "])
-      length = random.randint(0, 101000)
-      return self._Variable(name, "%sArray(%d)" % (new, length))
-
-  def _PlainObject(self, name, recursion_budget):
-    die = random.random()
-    if die < 0.67:
-      literal_propcount = random.randint(0, 3)
-      properties = []
-      result = []
-      for i in range(literal_propcount):
-        propname = self._RawRandomPropertyName()
-        propvalue_name = "%s_lit%d" % (name, i)
-        result += self._Object(propvalue_name, recursion_budget - 1)
-        properties.append("\"%s\": %s" % (propname, propvalue_name))
-      return result + self._Variable(name, "{%s}" % ", ".join(properties))
-    else:
-      return self._Variable(name, "new Object()")
-
-  def _JSArray(self, name, recursion_budget):
-    result = self._PlainArray(name, recursion_budget)
-    self._AddAccessors(name, result, recursion_budget)
-    self._AddProperties(name, result, recursion_budget)
-    self._AddElements(name, result, recursion_budget)
-    return result
-
-  def _RawRandomBufferLength(self):
-    if random.random() < 0.2:
-      return random.choice([0, 1, 8, 0x40000000, 0x80000000])
-    return random.randint(0, 1000)
-
-  def _JSArrayBuffer(self, name, recursion_budget):
-    length = self._RawRandomBufferLength()
-    return self._Variable(name, "new ArrayBuffer(%d)" % length)
-
-  def _JSDataView(self, name, recursion_budget):
-    buffer_name = name + "_buffer"
-    result = self._JSArrayBuffer(buffer_name, recursion_budget)
-    args = [buffer_name]
-    die = random.random()
-    if die < 0.67:
-      offset = self._RawRandomBufferLength()
-      args.append("%d" % offset)
-      if die < 0.33:
-        length = self._RawRandomBufferLength()
-        args.append("%d" % length)
-    result += self._Variable(name, "new DataView(%s)" % ", ".join(args),
-                             fallback="new DataView(new ArrayBuffer(8))")
-    return result
-
-  def _JSDate(self, name, recursion_budget):
-    die = random.random()
-    if die < 0.25:
-      return self._Variable(name, "new Date()")
-    elif die < 0.5:
-      ms_name = name + "_ms"
-      result = self._Number(ms_name, recursion_budget)
-      return result + self._Variable(name, "new Date(%s)" % ms_name)
-    elif die < 0.75:
-      str_name = name + "_str"
-      month = random.choice(["Jan", "Feb", "Mar", "Apr", "May", "Jun", "Jul",
-                             "Aug", "Sep", "Oct", "Nov", "Dec"])
-      day = random.randint(1, 28)
-      year = random.randint(1900, 2100)
-      hour = random.randint(0, 23)
-      minute = random.randint(0, 59)
-      second = random.randint(0, 59)
-      str_value = ("\"%s %s, %s %s:%s:%s\"" %
-                   (month, day, year, hour, minute, second))
-      result = self._Variable(str_name, str_value)
-      return result + self._Variable(name, "new Date(%s)" % str_name)
-    else:
-      components = tuple(map(lambda x: "%s_%s" % (name, x),
-                             ["y", "m", "d", "h", "min", "s", "ms"]))
-      return ([j for i in map(self._Int32, components) for j in i] +
-              self._Variable(name, "new Date(%s)" % ", ".join(components)))
-
-  def _PlainFunction(self, name, recursion_budget):
-    result_name = "result"
-    body = ["function() {"]
-    body += self._Object(result_name, recursion_budget - 1)
-    body.append("return result;\n}")
-    return self._Variable(name, "%s" % "\n".join(body))
-
-  def _JSFunction(self, name, recursion_budget):
-    result = self._PlainFunction(name, recursion_budget)
-    self._AddAccessors(name, result, recursion_budget)
-    self._AddProperties(name, result, recursion_budget)
-    self._AddElements(name, result, recursion_budget)
-    return result
-
-  def _JSFunctionProxy(self, name, recursion_budget):
-    # TODO(jkummerow): Revisit this as the Proxy implementation evolves.
-    return self._Variable(name, "Proxy.createFunction(%s, function() {})" %
-                                self.PROXY_TRAPS)
-
-  def _JSGeneratorObject(self, name, recursion_budget):
-    # TODO(jkummerow): Be more creative here?
-    return self._Variable(name, "(function*() { yield 1; })()")
-
-  def _JSMap(self, name, recursion_budget, weak=""):
-    result = self._Variable(name, "new %sMap()" % weak)
-    num_entries = random.randint(0, 3)
-    for i in range(num_entries):
-      key_name = "%s_k%d" % (name, i)
-      value_name = "%s_v%d" % (name, i)
-      if weak:
-        result += self._JSObject(key_name, recursion_budget - 1)
-      else:
-        result += self._Object(key_name, recursion_budget - 1)
-      result += self._Object(value_name, recursion_budget - 1)
-      result.append("%s.set(%s, %s)" % (name, key_name, value_name))
-    return result
-
-  def _JSMapIterator(self, name, recursion_budget):
-    map_name = name + "_map"
-    result = self._JSMap(map_name, recursion_budget)
-    iterator_type = random.choice(['keys', 'values', 'entries'])
-    return (result + self._Variable(name, "%s.%s()" %
-                                          (map_name, iterator_type)))
-
-  def _JSProxy(self, name, recursion_budget):
-    # TODO(jkummerow): Revisit this as the Proxy implementation evolves.
-    return self._Variable(name, "Proxy.create(%s)" % self.PROXY_TRAPS)
-
-  def _JSRegExp(self, name, recursion_budget):
-    flags = random.choice(["", "g", "i", "m", "gi"])
-    string = "a(b|c)*a"  # TODO(jkummerow): Be more creative here?
-    ctor = random.choice(["/%s/%s", "new RegExp(\"%s\", \"%s\")"])
-    return self._Variable(name, ctor % (string, flags))
-
-  def _JSSet(self, name, recursion_budget, weak=""):
-    result = self._Variable(name, "new %sSet()" % weak)
-    num_entries = random.randint(0, 3)
-    for i in range(num_entries):
-      element_name = "%s_e%d" % (name, i)
-      if weak:
-        result += self._JSObject(element_name, recursion_budget - 1)
-      else:
-        result += self._Object(element_name, recursion_budget - 1)
-      result.append("%s.add(%s)" % (name, element_name))
-    return result
-
-  def _JSSetIterator(self, name, recursion_budget):
-    set_name = name + "_set"
-    result = self._JSSet(set_name, recursion_budget)
-    iterator_type = random.choice(['values', 'entries'])
-    return (result + self._Variable(name, "%s.%s()" %
-                                          (set_name, iterator_type)))
-
-  def _JSTypedArray(self, name, recursion_budget):
-    arraytype = random.choice(["Int8", "Int16", "Int32", "Uint8", "Uint16",
-                               "Uint32", "Float32", "Float64", "Uint8Clamped"])
-    ctor_type = random.randint(0, 3)
-    if ctor_type == 0:
-      length = random.randint(0, 1000)
-      return self._Variable(name, "new %sArray(%d)" % (arraytype, length),
-                            fallback="new %sArray(8)" % arraytype)
-    elif ctor_type == 1:
-      input_name = name + "_typedarray"
-      result = self._JSTypedArray(input_name, recursion_budget - 1)
-      return (result +
-              self._Variable(name, "new %sArray(%s)" % (arraytype, input_name),
-                             fallback="new %sArray(8)" % arraytype))
-    elif ctor_type == 2:
-      arraylike_name = name + "_arraylike"
-      result = self._JSObject(arraylike_name, recursion_budget - 1)
-      length = random.randint(0, 1000)
-      result.append("try { %s.length = %d; } catch(e) {}" %
-                    (arraylike_name, length))
-      return (result +
-              self._Variable(name,
-                             "new %sArray(%s)" % (arraytype, arraylike_name),
-                             fallback="new %sArray(8)" % arraytype))
-    else:
-      die = random.random()
-      buffer_name = name + "_buffer"
-      args = [buffer_name]
-      result = self._JSArrayBuffer(buffer_name, recursion_budget)
-      if die < 0.67:
-        offset_name = name + "_offset"
-        args.append(offset_name)
-        result += self._Int32(offset_name)
-      if die < 0.33:
-        length_name = name + "_length"
-        args.append(length_name)
-        result += self._Int32(length_name)
-      return (result +
-              self._Variable(name,
-                             "new %sArray(%s)" % (arraytype, ", ".join(args)),
-                             fallback="new %sArray(8)" % arraytype))
-
-  def _JSArrayBufferView(self, name, recursion_budget):
-    if random.random() < 0.4:
-      return self._JSDataView(name, recursion_budget)
-    else:
-      return self._JSTypedArray(name, recursion_budget)
-
-  def _JSWeakCollection(self, name, recursion_budget):
-    ctor = random.choice([self._JSMap, self._JSSet])
-    return ctor(name, recursion_budget, weak="Weak")
-
-  def _PropertyDetails(self, name, recursion_budget):
-    # TODO(jkummerow): Be more clever here?
-    return self._Int32(name)
-
-  def _JSObject(self, name, recursion_budget):
-    die = random.random()
-    if die < 0.4:
-      function = random.choice([self._PlainObject, self._PlainArray,
-                                self._PlainFunction])
-    elif die < 0.5:
-      return self._Variable(name, "this")  # Global object.
-    else:
-      function = random.choice([self._JSArrayBuffer, self._JSDataView,
-                                self._JSDate, self._JSFunctionProxy,
-                                self._JSGeneratorObject, self._JSMap,
-                                self._JSMapIterator, self._JSRegExp,
-                                self._JSSet, self._JSSetIterator,
-                                self._JSTypedArray, self._JSValue,
-                                self._JSWeakCollection])
-    result = function(name, recursion_budget)
-    self._AddAccessors(name, result, recursion_budget)
-    self._AddProperties(name, result, recursion_budget)
-    self._AddElements(name, result, recursion_budget)
-    return result
-
-  def _JSReceiver(self, name, recursion_budget):
-    if random.random() < 0.9: return self._JSObject(name, recursion_budget)
-    return self._JSProxy(name, recursion_budget)
-
-  def _HeapObject(self, name, recursion_budget):
-    die = random.random()
-    if die < 0.9: return self._JSReceiver(name, recursion_budget)
-    elif die < 0.95: return  self._Oddball(name, recursion_budget)
-    else: return self._Name(name, recursion_budget)
-
-  def _Object(self, name, recursion_budget):
-    if recursion_budget <= 0:
-      function = random.choice([self._Oddball, self._Number, self._Name,
-                                self._JSValue, self._JSRegExp])
-      return function(name, recursion_budget)
-    if random.random() < 0.2:
-      return self._Smi(name, recursion_budget)
-    return self._HeapObject(name, recursion_budget)
-
-  GENERATORS = {
-    "Boolean": ["true", _Boolean],
-    "HeapObject": ["new Object()", _HeapObject],
-    "Int32": ["32", _Int32],
-    "JSArray": ["new Array()", _JSArray],
-    "JSArrayBuffer": ["new ArrayBuffer(8)", _JSArrayBuffer],
-    "JSArrayBufferView": ["new Int32Array(2)", _JSArrayBufferView],
-    "JSDataView": ["new DataView(new ArrayBuffer(24))", _JSDataView],
-    "JSDate": ["new Date()", _JSDate],
-    "JSFunction": ["function() {}", _JSFunction],
-    "JSFunctionProxy": ["Proxy.createFunction({}, function() {})",
-                        _JSFunctionProxy],
-    "JSGeneratorObject": ["(function*(){ yield 1; })()", _JSGeneratorObject],
-    "JSMap": ["new Map()", _JSMap],
-    "JSMapIterator": ["new Map().entries()", _JSMapIterator],
-    "JSObject": ["new Object()", _JSObject],
-    "JSProxy": ["Proxy.create({})", _JSProxy],
-    "JSReceiver": ["new Object()", _JSReceiver],
-    "JSRegExp": ["/ab/g", _JSRegExp],
-    "JSSet": ["new Set()", _JSSet],
-    "JSSetIterator": ["new Set().values()", _JSSetIterator],
-    "JSTypedArray": ["new Int32Array(2)", _JSTypedArray],
-    "JSValue": ["new String('foo')", _JSValue],
-    "JSWeakCollection": ["new WeakMap()", _JSWeakCollection],
-    "Name": ["\"name\"", _Name],
-    "Number": ["1.5", _Number],
-    "Object": ["new Object()", _Object],
-    "PropertyDetails": ["513", _PropertyDetails],
-    "SeqOneByteString": ["\"seq 1-byte\"", _SeqString],
-    "SeqString": ["\"seqstring\"", _SeqString],
-    "SeqTwoByteString": ["\"seq \\u2082-byte\"", _SeqTwoByteString],
-    "Smi": ["1", _Smi],
-    "StrictMode": ["1", _StrictMode],
-    "String": ["\"foo\"", _String],
-    "Symbol": ["Symbol(\"symbol\")", _Symbol],
-    "Uint32": ["32", _Uint32],
-  }
-
-
-class ArgParser(object):
-  def __init__(self, regex, ctor):
-    self.regex = regex
-    self.ArgCtor = ctor
-
-
-class Arg(object):
-  def __init__(self, typename, varname, index):
-    self.type = typename
-    self.name = "_%s" % varname
-    self.index = index
-
-
-class Function(object):
-  def __init__(self, match):
-    self.name = match.group(1)
-    self.argslength = -1
-    self.args = {}
-    self.inline = ""
-
-  handle_arg_parser = ArgParser(
-      re.compile("^\s*CONVERT_ARG_HANDLE_CHECKED\((\w+), (\w+), (\d+)\)"),
-      lambda match: Arg(match.group(1), match.group(2), int(match.group(3))))
-
-  plain_arg_parser = ArgParser(
-      re.compile("^\s*CONVERT_ARG_CHECKED\((\w+), (\w+), (\d+)\)"),
-      lambda match: Arg(match.group(1), match.group(2), int(match.group(3))))
-
-  number_handle_arg_parser = ArgParser(
-      re.compile("^\s*CONVERT_NUMBER_ARG_HANDLE_CHECKED\((\w+), (\d+)\)"),
-      lambda match: Arg("Number", match.group(1), int(match.group(2))))
-
-  smi_arg_parser = ArgParser(
-      re.compile("^\s*CONVERT_SMI_ARG_CHECKED\((\w+), (\d+)\)"),
-      lambda match: Arg("Smi", match.group(1), int(match.group(2))))
-
-  double_arg_parser = ArgParser(
-      re.compile("^\s*CONVERT_DOUBLE_ARG_CHECKED\((\w+), (\d+)\)"),
-      lambda match: Arg("Number", match.group(1), int(match.group(2))))
-
-  number_arg_parser = ArgParser(
-      re.compile(
-          "^\s*CONVERT_NUMBER_CHECKED\(\w+, (\w+), (\w+), args\[(\d+)\]\)"),
-      lambda match: Arg(match.group(2), match.group(1), int(match.group(3))))
-
-  strict_mode_arg_parser = ArgParser(
-      re.compile("^\s*CONVERT_STRICT_MODE_ARG_CHECKED\((\w+), (\d+)\)"),
-      lambda match: Arg("StrictMode", match.group(1), int(match.group(2))))
-
-  boolean_arg_parser = ArgParser(
-      re.compile("^\s*CONVERT_BOOLEAN_ARG_CHECKED\((\w+), (\d+)\)"),
-      lambda match: Arg("Boolean", match.group(1), int(match.group(2))))
-
-  property_details_parser = ArgParser(
-      re.compile("^\s*CONVERT_PROPERTY_DETAILS_CHECKED\((\w+), (\d+)\)"),
-      lambda match: Arg("PropertyDetails", match.group(1), int(match.group(2))))
-
-  arg_parsers = [handle_arg_parser, plain_arg_parser, number_handle_arg_parser,
-                 smi_arg_parser,
-                 double_arg_parser, number_arg_parser, strict_mode_arg_parser,
-                 boolean_arg_parser, property_details_parser]
-
-  def SetArgsLength(self, match):
-    self.argslength = int(match.group(1))
-
-  def TryParseArg(self, line):
-    for parser in Function.arg_parsers:
-      match = parser.regex.match(line)
-      if match:
-        arg = parser.ArgCtor(match)
-        self.args[arg.index] = arg
-        return True
-    return False
-
-  def Filename(self):
-    return "%s.js" % self.name.lower()
-
-  def __str__(self):
-    s = [self.name, "("]
-    argcount = self.argslength
-    if argcount < 0:
-      print("WARNING: unknown argslength for function %s" % self.name)
-      if self.args:
-        argcount = max([self.args[i].index + 1 for i in self.args])
-      else:
-        argcount = 0
-    for i in range(argcount):
-      if i > 0: s.append(", ")
-      s.append(self.args[i].type if i in self.args else "<unknown>")
-    s.append(")")
-    return "".join(s)
-
-
-class Macro(object):
-  def __init__(self, match):
-    self.name = match.group(1)
-    self.args = [s.strip() for s in match.group(2).split(",")]
-    self.lines = []
-    self.indentation = 0
-    self.AddLine(match.group(3))
-
-  def AddLine(self, line):
-    if not line: return
-    if not self.lines:
-      # This is the first line, detect indentation.
-      self.indentation = len(line) - len(line.lstrip())
-    line = line.rstrip("\\\n ")
-    if not line: return
-    assert len(line[:self.indentation].strip()) == 0, \
-        ("expected whitespace: '%s', full line: '%s'" %
-         (line[:self.indentation], line))
-    line = line[self.indentation:]
-    if not line: return
-    self.lines.append(line + "\n")
-
-  def Finalize(self):
-    for arg in self.args:
-      pattern = re.compile(r"(##|\b)%s(##|\b)" % arg)
-      for i in range(len(self.lines)):
-        self.lines[i] = re.sub(pattern, "%%(%s)s" % arg, self.lines[i])
-
-  def FillIn(self, arg_values):
-    filler = {}
-    assert len(arg_values) == len(self.args)
-    for i in range(len(self.args)):
-      filler[self.args[i]] = arg_values[i]
-    result = []
-    for line in self.lines:
-      result.append(line % filler)
-    return result
-
-
-# Parses HEADERFILENAME to find out which runtime functions are "inline".
-def FindInlineRuntimeFunctions():
-  inline_functions = []
-  with open(HEADERFILENAME, "r") as f:
-    inline_list = "#define INLINE_FUNCTION_LIST(F) \\\n"
-    inline_function = re.compile(r"^\s*F\((\w+), \d+, \d+\)\s*\\?")
-    mode = "SEARCHING"
-    for line in f:
-      if mode == "ACTIVE":
-        match = inline_function.match(line)
-        if match:
-          inline_functions.append(match.group(1))
-        if not line.endswith("\\\n"):
-          mode = "SEARCHING"
-      elif mode == "SEARCHING":
-        if line == inline_list:
-          mode = "ACTIVE"
-  return inline_functions
-
-
-def ReadFileAndExpandMacros(filename):
-  found_macros = {}
-  expanded_lines = []
-  with open(filename, "r") as f:
-    found_macro = None
-    for line in f:
-      if found_macro is not None:
-        found_macro.AddLine(line)
-        if not line.endswith("\\\n"):
-          found_macro.Finalize()
-          found_macro = None
-        continue
-
-      match = MACRO.match(line)
-      if match:
-        found_macro = Macro(match)
-        if found_macro.name in EXPAND_MACROS:
-          found_macros[found_macro.name] = found_macro
-        else:
-          found_macro = None
-        continue
-
-      match = FIRST_WORD.match(line)
-      if match:
-        first_word = match.group(1)
-        if first_word in found_macros:
-          MACRO_CALL = re.compile("%s\(([^)]*)\)" % first_word)
-          match = MACRO_CALL.match(line)
-          assert match
-          args = [s.strip() for s in match.group(1).split(",")]
-          expanded_lines += found_macros[first_word].FillIn(args)
-          continue
-
-      expanded_lines.append(line)
-  return expanded_lines
-
-
-# Detects runtime functions by parsing FILENAME.
-def FindRuntimeFunctions():
-  inline_functions = FindInlineRuntimeFunctions()
-  functions = []
-  expanded_lines = ReadFileAndExpandMacros(FILENAME)
-  function = None
-  partial_line = ""
-  for line in expanded_lines:
-    # Multi-line definition support, ignoring macros.
-    if line.startswith("RUNTIME_FUNCTION") and not line.endswith("{\n"):
-      if line.endswith("\\\n"): continue
-      partial_line = line.rstrip()
-      continue
-    if partial_line:
-      partial_line += " " + line.strip()
-      if partial_line.endswith("{"):
-        line = partial_line
-        partial_line = ""
-      else:
-        continue
-
-    match = FUNCTION.match(line)
-    if match:
-      function = Function(match)
-      if function.name in inline_functions:
-        function.inline = "_"
-      continue
-    if function is None: continue
-
-    match = ARGSLENGTH.match(line)
-    if match:
-      function.SetArgsLength(match)
-      continue
-
-    if function.TryParseArg(line):
-      continue
-
-    if line == FUNCTIONEND:
-      if function is not None:
-        functions.append(function)
-        function = None
-  return functions
-
-
-# Hack: This must have the same fields as class Function above, because the
-# two are used polymorphically in RunFuzzer(). We could use inheritance...
-class Builtin(object):
-  def __init__(self, match):
-    self.name = match.group(1)
-    args = match.group(2)
-    self.argslength = 0 if args == "" else args.count(",") + 1
-    self.inline = ""
-    self.args = {}
-    if self.argslength > 0:
-      args = args.split(",")
-      for i in range(len(args)):
-        # a = args[i].strip()  # TODO: filter out /* comments */ first.
-        a = ""
-        self.args[i] = Arg("Object", a, i)
-
-  def __str__(self):
-    return "%s(%d)" % (self.name, self.argslength)
-
-
-def FindJSBuiltins():
-  PATH = "src"
-  fileslist = []
-  for (root, dirs, files) in os.walk(PATH):
-    for f in files:
-      if f.endswith(".js"):
-        fileslist.append(os.path.join(root, f))
-  builtins = []
-  regexp = re.compile("^function (\w+)\s*\((.*?)\) {")
-  matches = 0
-  for filename in fileslist:
-    with open(filename, "r") as f:
-      file_contents = f.read()
-    file_contents = js2c.ExpandInlineMacros(file_contents)
-    lines = file_contents.split("\n")
-    partial_line = ""
-    for line in lines:
-      if line.startswith("function") and not '{' in line:
-        partial_line += line.rstrip()
-        continue
-      if partial_line:
-        partial_line += " " + line.strip()
-        if '{' in line:
-          line = partial_line
-          partial_line = ""
-        else:
-          continue
-      match = regexp.match(line)
-      if match:
-        builtins.append(Builtin(match))
-  return builtins
-
-
-# Classifies runtime functions.
-def ClassifyFunctions(functions):
-  # Can be fuzzed with a JavaScript testcase.
-  js_fuzzable_functions = []
-  # We have enough information to fuzz these, but they need inputs that
-  # cannot be created or passed around in JavaScript.
-  cctest_fuzzable_functions = []
-  # This script does not have enough information about these.
-  unknown_functions = []
-
-  types = {}
-  for f in functions:
-    if f.name in BLACKLISTED:
-      continue
-    decision = js_fuzzable_functions
-    custom = CUSTOM_KNOWN_GOOD_INPUT.get(f.name, None)
-    if f.argslength < 0:
-      # Unknown length -> give up unless there's a custom definition.
-      if custom and custom[-1] is not None:
-        f.argslength = custom[-1]
-        assert len(custom) == f.argslength + 1, \
-            ("%s: last custom definition must be argslength" % f.name)
-      else:
-        decision = unknown_functions
-    else:
-      if custom:
-        # Any custom definitions must match the known argslength.
-        assert len(custom) == f.argslength + 1, \
-            ("%s should have %d custom definitions but has %d" %
-            (f.name, f.argslength + 1, len(custom)))
-      for i in range(f.argslength):
-        if custom and custom[i] is not None:
-          # All good, there's a custom definition.
-          pass
-        elif not i in f.args:
-          # No custom definition and no parse result -> give up.
-          decision = unknown_functions
-        else:
-          t = f.args[i].type
-          if t in NON_JS_TYPES:
-            decision = cctest_fuzzable_functions
-          else:
-            assert Generator.IsTypeSupported(t), \
-                ("type generator not found for %s, function: %s" % (t, f))
-    decision.append(f)
-  return (js_fuzzable_functions, cctest_fuzzable_functions, unknown_functions)
-
-
-def _GetKnownGoodArgs(function, generator):
-  custom_input = CUSTOM_KNOWN_GOOD_INPUT.get(function.name, None)
-  definitions = []
-  argslist = []
-  for i in range(function.argslength):
-    if custom_input and custom_input[i] is not None:
-      name = "arg%d" % i
-      definitions.append("var %s = %s;" % (name, custom_input[i]))
-    else:
-      arg = function.args[i]
-      name = arg.name
-      definitions += generator.RandomVariable(name, arg.type, simple=True)
-    argslist.append(name)
-  return (definitions, argslist)
-
-
-def _GenerateTestcase(function, definitions, argslist, throws):
-  s = ["// Copyright 2014 the V8 project authors. All rights reserved.",
-       "// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY",
-       "// Flags: --allow-natives-syntax --harmony"] + definitions
-  call = "%%%s%s(%s);" % (function.inline, function.name, ", ".join(argslist))
-  if throws:
-    s.append("try {")
-    s.append(call);
-    s.append("} catch(e) {}")
-  else:
-    s.append(call)
-  testcase = "\n".join(s)
-  return testcase
-
-
-def GenerateJSTestcaseForFunction(function):
-  gen = Generator()
-  (definitions, argslist) = _GetKnownGoodArgs(function, gen)
-  testcase = _GenerateTestcase(function, definitions, argslist,
-                               function.name in THROWS)
-  path = os.path.join(BASEPATH, function.Filename())
-  with open(path, "w") as f:
-    f.write("%s\n" % testcase)
-
-
-def GenerateTestcases(functions):
-  shutil.rmtree(BASEPATH)  # Re-generate everything.
-  os.makedirs(BASEPATH)
-  for f in functions:
-    GenerateJSTestcaseForFunction(f)
-
-
-def _SaveFileName(save_path, process_id, save_file_index):
-  return "%s/fuzz_%d_%d.js" % (save_path, process_id, save_file_index)
-
-
-def _GetFuzzableRuntimeFunctions():
-  functions = FindRuntimeFunctions()
-  (js_fuzzable_functions, cctest_fuzzable_functions, unknown_functions) = \
-      ClassifyFunctions(functions)
-  return js_fuzzable_functions
-
-
-FUZZ_TARGET_LISTS = {
-  "runtime": _GetFuzzableRuntimeFunctions,
-  "builtins": FindJSBuiltins,
-}
-
-
-def RunFuzzer(process_id, options, stop_running):
-  MAX_SLEEP_TIME = 0.1
-  INITIAL_SLEEP_TIME = 0.001
-  SLEEP_TIME_FACTOR = 1.25
-  base_file_name = "/dev/shm/runtime_fuzz_%d" % process_id
-  test_file_name = "%s.js" % base_file_name
-  stderr_file_name = "%s.out" % base_file_name
-  save_file_index = 0
-  while os.path.exists(_SaveFileName(options.save_path, process_id,
-                                     save_file_index)):
-    save_file_index += 1
-
-  targets = FUZZ_TARGET_LISTS[options.fuzz_target]()
-  try:
-    for i in range(options.num_tests):
-      if stop_running.is_set(): break
-      function = None
-      while function is None or function.argslength == 0:
-        function = random.choice(targets)
-      args = []
-      definitions = []
-      gen = Generator()
-      for i in range(function.argslength):
-        arg = function.args[i]
-        argname = "arg%d%s" % (i, arg.name)
-        args.append(argname)
-        definitions += gen.RandomVariable(argname, arg.type, simple=False)
-      testcase = _GenerateTestcase(function, definitions, args, True)
-      with open(test_file_name, "w") as f:
-        f.write("%s\n" % testcase)
-      with open("/dev/null", "w") as devnull:
-        with open(stderr_file_name, "w") as stderr:
-          process = subprocess.Popen(
-              [options.binary, "--allow-natives-syntax", "--harmony",
-               "--enable-slow-asserts", test_file_name],
-              stdout=devnull, stderr=stderr)
-          end_time = time.time() + options.timeout
-          timed_out = False
-          exit_code = None
-          sleep_time = INITIAL_SLEEP_TIME
-          while exit_code is None:
-            if time.time() >= end_time:
-              # Kill the process and wait for it to exit.
-              os.kill(process.pid, signal.SIGTERM)
-              exit_code = process.wait()
-              timed_out = True
-            else:
-              exit_code = process.poll()
-              time.sleep(sleep_time)
-              sleep_time = sleep_time * SLEEP_TIME_FACTOR
-              if sleep_time > MAX_SLEEP_TIME:
-                sleep_time = MAX_SLEEP_TIME
-      if exit_code != 0 and not timed_out:
-        oom = False
-        with open(stderr_file_name, "r") as stderr:
-          for line in stderr:
-            if line.strip() == "# Allocation failed - process out of memory":
-              oom = True
-              break
-        if oom: continue
-        save_name = _SaveFileName(options.save_path, process_id,
-                                  save_file_index)
-        shutil.copyfile(test_file_name, save_name)
-        save_file_index += 1
-  except KeyboardInterrupt:
-    stop_running.set()
-  finally:
-    if os.path.exists(test_file_name):
-      os.remove(test_file_name)
-    if os.path.exists(stderr_file_name):
-      os.remove(stderr_file_name)
-
-
-def BuildOptionParser():
-  usage = """Usage: %%prog [options] ACTION
-
-where ACTION can be:
-
-info      Print diagnostic info.
-check     Check that runtime functions can be parsed as expected, and that
-          test cases exist.
-generate  Parse source code for runtime functions, and auto-generate
-          test cases for them. Warning: this will nuke and re-create
-          %(path)s.
-fuzz      Generate fuzz tests, run them, save those that crashed (see options).
-""" % {"path": os.path.relpath(BASEPATH)}
-
-  o = optparse.OptionParser(usage=usage)
-  o.add_option("--binary", default="out/x64.debug/d8",
-               help="d8 binary used for running fuzz tests (default: %default)")
-  o.add_option("--fuzz-target", default="runtime",
-               help="Set of functions targeted by fuzzing. Allowed values: "
-                    "%s (default: %%default)" % ", ".join(FUZZ_TARGET_LISTS))
-  o.add_option("-n", "--num-tests", default=1000, type="int",
-               help="Number of fuzz tests to generate per worker process"
-                    " (default: %default)")
-  o.add_option("--save-path", default="~/runtime_fuzz_output",
-               help="Path to directory where failing tests will be stored"
-                    " (default: %default)")
-  o.add_option("--timeout", default=20, type="int",
-               help="Timeout for each fuzz test (in seconds, default:"
-                    "%default)")
-  return o
-
-
-def ProcessOptions(options, args):
-  options.save_path = os.path.expanduser(options.save_path)
-  if options.fuzz_target not in FUZZ_TARGET_LISTS:
-    print("Invalid fuzz target: %s" % options.fuzz_target)
-    return False
-  if len(args) != 1 or args[0] == "help":
-    return False
-  return True
-
-
-def Main():
-  parser = BuildOptionParser()
-  (options, args) = parser.parse_args()
-
-  if not ProcessOptions(options, args):
-    parser.print_help()
-    return 1
-  action = args[0]
-
-  functions = FindRuntimeFunctions()
-  (js_fuzzable_functions, cctest_fuzzable_functions, unknown_functions) = \
-      ClassifyFunctions(functions)
-  builtins = FindJSBuiltins()
-
-  if action == "test":
-    print("put your temporary debugging code here")
-    return 0
-
-  if action == "info":
-    print("%d functions total; js_fuzzable_functions: %d, "
-          "cctest_fuzzable_functions: %d, unknown_functions: %d"
-          % (len(functions), len(js_fuzzable_functions),
-             len(cctest_fuzzable_functions), len(unknown_functions)))
-    print("%d JavaScript builtins" % len(builtins))
-    print("unknown functions:")
-    for f in unknown_functions:
-      print(f)
-    return 0
-
-  if action == "check":
-    errors = 0
-
-    def CheckCount(actual, expected, description):
-      if len(actual) != expected:
-        print("Expected to detect %d %s, but found %d." % (
-              expected, description, len(actual)))
-        print("If this change is intentional, please update the expectations"
-              " at the top of %s." % THIS_SCRIPT)
-        return 1
-      return 0
-
-    errors += CheckCount(functions, EXPECTED_FUNCTION_COUNT,
-                         "functions in total")
-    errors += CheckCount(js_fuzzable_functions, EXPECTED_FUZZABLE_COUNT,
-                         "JavaScript-fuzzable functions")
-    errors += CheckCount(cctest_fuzzable_functions, EXPECTED_CCTEST_COUNT,
-                         "cctest-fuzzable functions")
-    errors += CheckCount(unknown_functions, EXPECTED_UNKNOWN_COUNT,
-                         "functions with incomplete type information")
-    errors += CheckCount(builtins, EXPECTED_BUILTINS_COUNT,
-                         "JavaScript builtins")
-
-    def CheckTestcasesExisting(functions):
-      errors = 0
-      for f in functions:
-        if not os.path.isfile(os.path.join(BASEPATH, f.Filename())):
-          print("Missing testcase for %s, please run '%s generate'" %
-                (f.name, THIS_SCRIPT))
-          errors += 1
-      files = filter(lambda filename: not filename.startswith("."),
-                     os.listdir(BASEPATH))
-      if (len(files) != len(functions)):
-        unexpected_files = set(files) - set([f.Filename() for f in functions])
-        for f in unexpected_files:
-          print("Unexpected testcase: %s" % os.path.join(BASEPATH, f))
-          errors += 1
-        print("Run '%s generate' to automatically clean these up."
-              % THIS_SCRIPT)
-      return errors
-
-    errors += CheckTestcasesExisting(js_fuzzable_functions)
-
-    def CheckNameClashes(runtime_functions, builtins):
-      errors = 0
-      runtime_map = {}
-      for f in runtime_functions:
-        runtime_map[f.name] = 1
-      for b in builtins:
-        if b.name in runtime_map:
-          print("Builtin/Runtime_Function name clash: %s" % b.name)
-          errors += 1
-      return errors
-
-    errors += CheckNameClashes(functions, builtins)
-
-    if errors > 0:
-      return 1
-    print("Generated runtime tests: all good.")
-    return 0
-
-  if action == "generate":
-    GenerateTestcases(js_fuzzable_functions)
-    return 0
-
-  if action == "fuzz":
-    processes = []
-    if not os.path.isdir(options.save_path):
-      os.makedirs(options.save_path)
-    stop_running = multiprocessing.Event()
-    for i in range(multiprocessing.cpu_count()):
-      args = (i, options, stop_running)
-      p = multiprocessing.Process(target=RunFuzzer, args=args)
-      p.start()
-      processes.append(p)
-    try:
-      for i in range(len(processes)):
-        processes[i].join()
-    except KeyboardInterrupt:
-      stop_running.set()
-      for i in range(len(processes)):
-        processes[i].join()
-    return 0
-
-if __name__ == "__main__":
-  sys.exit(Main())
diff --git a/tools/generate-trig-table.py b/tools/generate-trig-table.py
deleted file mode 100644
index 0a4fe28..0000000
--- a/tools/generate-trig-table.py
+++ /dev/null
@@ -1,83 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright 2013 the V8 project authors. All rights reserved.
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions are
-# met:
-#
-#     * Redistributions of source code must retain the above copyright
-#       notice, this list of conditions and the following disclaimer.
-#     * Redistributions in binary form must reproduce the above
-#       copyright notice, this list of conditions and the following
-#       disclaimer in the documentation and/or other materials provided
-#       with the distribution.
-#     * Neither the name of Google Inc. nor the names of its
-#       contributors may be used to endorse or promote products derived
-#       from this software without specific prior written permission.
-#
-# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-# This is a utility for populating the lookup table for the
-# approximation of trigonometric functions.
-
-import sys, math
-
-SAMPLES = 1800
-
-TEMPLATE = """\
-// Copyright 2013 Google Inc. All Rights Reserved.
-
-// This file was generated from a python script.
-
-#include "src/v8.h"
-#include "src/trig-table.h"
-
-namespace v8 {
-namespace internal {
-
-  const double TrigonometricLookupTable::kSinTable[] =
-      { %(sine_table)s };
-  const double TrigonometricLookupTable::kCosXIntervalTable[] =
-      { %(cosine_table)s };
-  const int TrigonometricLookupTable::kSamples = %(samples)i;
-  const int TrigonometricLookupTable::kTableSize = %(table_size)i;
-  const double TrigonometricLookupTable::kSamplesOverPiHalf =
-      %(samples_over_pi_half)s;
-
-} }  // v8::internal
-"""
-
-def main():
-  pi_half = math.pi / 2
-  interval = pi_half / SAMPLES
-  sin = []
-  cos_times_interval = []
-  table_size = SAMPLES + 2
-
-  for i in range(0, table_size):
-    sample = i * interval
-    sin.append(repr(math.sin(sample)))
-    cos_times_interval.append(repr(math.cos(sample) * interval))
-
-  output_file = sys.argv[1]
-  output = open(str(output_file), "w")
-  output.write(TEMPLATE % {
-    'sine_table': ','.join(sin),
-    'cosine_table': ','.join(cos_times_interval),
-    'samples': SAMPLES,
-    'table_size': table_size,
-    'samples_over_pi_half': repr(SAMPLES / pi_half)
-  })
-
-if __name__ == "__main__":
-  main()
diff --git a/tools/grokdump.py b/tools/grokdump.py
index 8178b2f..2177ec2 100755
--- a/tools/grokdump.py
+++ b/tools/grokdump.py
@@ -1482,22 +1482,22 @@
 class V8Heap(object):
   CLASS_MAP = {
     "SYMBOL_TYPE": SeqString,
-    "ASCII_SYMBOL_TYPE": SeqString,
+    "ONE_BYTE_SYMBOL_TYPE": SeqString,
     "CONS_SYMBOL_TYPE": ConsString,
-    "CONS_ASCII_SYMBOL_TYPE": ConsString,
+    "CONS_ONE_BYTE_SYMBOL_TYPE": ConsString,
     "EXTERNAL_SYMBOL_TYPE": ExternalString,
-    "EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE": ExternalString,
-    "EXTERNAL_ASCII_SYMBOL_TYPE": ExternalString,
+    "EXTERNAL_SYMBOL_WITH_ONE_BYTE_DATA_TYPE": ExternalString,
+    "EXTERNAL_ONE_BYTE_SYMBOL_TYPE": ExternalString,
     "SHORT_EXTERNAL_SYMBOL_TYPE": ExternalString,
-    "SHORT_EXTERNAL_SYMBOL_WITH_ASCII_DATA_TYPE": ExternalString,
-    "SHORT_EXTERNAL_ASCII_SYMBOL_TYPE": ExternalString,
+    "SHORT_EXTERNAL_SYMBOL_WITH_ONE_BYTE_DATA_TYPE": ExternalString,
+    "SHORT_EXTERNAL_ONE_BYTE_SYMBOL_TYPE": ExternalString,
     "STRING_TYPE": SeqString,
-    "ASCII_STRING_TYPE": SeqString,
+    "ONE_BYTE_STRING_TYPE": SeqString,
     "CONS_STRING_TYPE": ConsString,
-    "CONS_ASCII_STRING_TYPE": ConsString,
+    "CONS_ONE_BYTE_STRING_TYPE": ConsString,
     "EXTERNAL_STRING_TYPE": ExternalString,
-    "EXTERNAL_STRING_WITH_ASCII_DATA_TYPE": ExternalString,
-    "EXTERNAL_ASCII_STRING_TYPE": ExternalString,
+    "EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE": ExternalString,
+    "EXTERNAL_ONE_BYTE_STRING_TYPE": ExternalString,
     "MAP_TYPE": Map,
     "ODDBALL_TYPE": Oddball,
     "FIXED_ARRAY_TYPE": FixedArray,
@@ -3103,15 +3103,18 @@
       frame_pointer = reader.ExceptionFP()
       print "Annotated stack (from exception.esp to bottom):"
       for slot in xrange(stack_top, stack_bottom, reader.PointerSize()):
+        ascii_content = [c if c >= '\x20' and c <  '\x7f' else '.'
+                         for c in reader.ReadBytes(slot, reader.PointerSize())]
         maybe_address = reader.ReadUIntPtr(slot)
         heap_object = heap.FindObject(maybe_address)
         maybe_symbol = reader.FindSymbol(maybe_address)
         if slot == frame_pointer:
           maybe_symbol = "<---- frame pointer"
           frame_pointer = maybe_address
-        print "%s: %s %s" % (reader.FormatIntPtr(slot),
-                             reader.FormatIntPtr(maybe_address),
-                             maybe_symbol or "")
+        print "%s: %s %s %s" % (reader.FormatIntPtr(slot),
+                                reader.FormatIntPtr(maybe_address),
+                                "".join(ascii_content),
+                                maybe_symbol or "")
         if heap_object:
           heap_object.Print(Printer())
           print
diff --git a/tools/gyp/generate_trig_table.host.darwin-arm.mk b/tools/gyp/generate_trig_table.host.darwin-arm.mk
deleted file mode 100644
index 9b24dca..0000000
--- a/tools/gyp/generate_trig_table.host.darwin-arm.mk
+++ /dev/null
@@ -1,56 +0,0 @@
-# This file is generated by gyp; do not edit.
-
-include $(CLEAR_VARS)
-
-LOCAL_MODULE_CLASS := GYP
-LOCAL_MODULE := v8_tools_gyp_generate_trig_table_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
-LOCAL_MODULE_STEM := generate_trig_table
-LOCAL_MODULE_SUFFIX := .stamp
-LOCAL_MODULE_TAGS := optional
-LOCAL_IS_HOST_MODULE := true
-LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
-gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
-gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
-
-# Make sure our deps are built first.
-GYP_TARGET_DEPENDENCIES :=
-
-### Rules for action "generate":
-$(gyp_shared_intermediate_dir)/trig-table.cc: gyp_local_path := $(LOCAL_PATH)
-$(gyp_shared_intermediate_dir)/trig-table.cc: gyp_var_prefix := $(GYP_VAR_PREFIX)
-$(gyp_shared_intermediate_dir)/trig-table.cc: gyp_intermediate_dir := $(abspath $(gyp_intermediate_dir))
-$(gyp_shared_intermediate_dir)/trig-table.cc: gyp_shared_intermediate_dir := $(abspath $(gyp_shared_intermediate_dir))
-$(gyp_shared_intermediate_dir)/trig-table.cc: export PATH := $(subst $(ANDROID_BUILD_PATHS),,$(PATH))
-$(gyp_shared_intermediate_dir)/trig-table.cc: $(LOCAL_PATH)/v8/tools/generate-trig-table.py $(GYP_TARGET_DEPENDENCIES)
-	@echo "Gyp action: v8_tools_gyp_v8_gyp_generate_trig_table_host_generate ($@)"
-	$(hide)cd $(gyp_local_path)/v8/tools/gyp; mkdir -p $(gyp_shared_intermediate_dir); python ../../tools/generate-trig-table.py "$(gyp_shared_intermediate_dir)/trig-table.cc"
-
-
-
-GYP_GENERATED_OUTPUTS := \
-	$(gyp_shared_intermediate_dir)/trig-table.cc
-
-# Make sure our deps and generated files are built first.
-LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) $(GYP_GENERATED_OUTPUTS)
-
-### Rules for final target.
-# Add target alias to "gyp_all_modules" target.
-.PHONY: gyp_all_modules
-gyp_all_modules: v8_tools_gyp_generate_trig_table_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
-
-# Alias gyp target name.
-.PHONY: generate_trig_table
-generate_trig_table: v8_tools_gyp_generate_trig_table_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
-
-LOCAL_MODULE_PATH := $(PRODUCT_OUT)/gyp_stamp
-LOCAL_UNINSTALLABLE_MODULE := true
-LOCAL_2ND_ARCH_VAR_PREFIX := $(GYP_HOST_VAR_PREFIX)
-
-include $(BUILD_SYSTEM)/base_rules.mk
-
-$(LOCAL_BUILT_MODULE): $(LOCAL_ADDITIONAL_DEPENDENCIES)
-	$(hide) echo "Gyp timestamp: $@"
-	$(hide) mkdir -p $(dir $@)
-	$(hide) touch $@
-
-LOCAL_2ND_ARCH_VAR_PREFIX :=
diff --git a/tools/gyp/generate_trig_table.host.darwin-arm64.mk b/tools/gyp/generate_trig_table.host.darwin-arm64.mk
deleted file mode 100644
index 9b24dca..0000000
--- a/tools/gyp/generate_trig_table.host.darwin-arm64.mk
+++ /dev/null
@@ -1,56 +0,0 @@
-# This file is generated by gyp; do not edit.
-
-include $(CLEAR_VARS)
-
-LOCAL_MODULE_CLASS := GYP
-LOCAL_MODULE := v8_tools_gyp_generate_trig_table_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
-LOCAL_MODULE_STEM := generate_trig_table
-LOCAL_MODULE_SUFFIX := .stamp
-LOCAL_MODULE_TAGS := optional
-LOCAL_IS_HOST_MODULE := true
-LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
-gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
-gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
-
-# Make sure our deps are built first.
-GYP_TARGET_DEPENDENCIES :=
-
-### Rules for action "generate":
-$(gyp_shared_intermediate_dir)/trig-table.cc: gyp_local_path := $(LOCAL_PATH)
-$(gyp_shared_intermediate_dir)/trig-table.cc: gyp_var_prefix := $(GYP_VAR_PREFIX)
-$(gyp_shared_intermediate_dir)/trig-table.cc: gyp_intermediate_dir := $(abspath $(gyp_intermediate_dir))
-$(gyp_shared_intermediate_dir)/trig-table.cc: gyp_shared_intermediate_dir := $(abspath $(gyp_shared_intermediate_dir))
-$(gyp_shared_intermediate_dir)/trig-table.cc: export PATH := $(subst $(ANDROID_BUILD_PATHS),,$(PATH))
-$(gyp_shared_intermediate_dir)/trig-table.cc: $(LOCAL_PATH)/v8/tools/generate-trig-table.py $(GYP_TARGET_DEPENDENCIES)
-	@echo "Gyp action: v8_tools_gyp_v8_gyp_generate_trig_table_host_generate ($@)"
-	$(hide)cd $(gyp_local_path)/v8/tools/gyp; mkdir -p $(gyp_shared_intermediate_dir); python ../../tools/generate-trig-table.py "$(gyp_shared_intermediate_dir)/trig-table.cc"
-
-
-
-GYP_GENERATED_OUTPUTS := \
-	$(gyp_shared_intermediate_dir)/trig-table.cc
-
-# Make sure our deps and generated files are built first.
-LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) $(GYP_GENERATED_OUTPUTS)
-
-### Rules for final target.
-# Add target alias to "gyp_all_modules" target.
-.PHONY: gyp_all_modules
-gyp_all_modules: v8_tools_gyp_generate_trig_table_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
-
-# Alias gyp target name.
-.PHONY: generate_trig_table
-generate_trig_table: v8_tools_gyp_generate_trig_table_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
-
-LOCAL_MODULE_PATH := $(PRODUCT_OUT)/gyp_stamp
-LOCAL_UNINSTALLABLE_MODULE := true
-LOCAL_2ND_ARCH_VAR_PREFIX := $(GYP_HOST_VAR_PREFIX)
-
-include $(BUILD_SYSTEM)/base_rules.mk
-
-$(LOCAL_BUILT_MODULE): $(LOCAL_ADDITIONAL_DEPENDENCIES)
-	$(hide) echo "Gyp timestamp: $@"
-	$(hide) mkdir -p $(dir $@)
-	$(hide) touch $@
-
-LOCAL_2ND_ARCH_VAR_PREFIX :=
diff --git a/tools/gyp/generate_trig_table.host.darwin-mips.mk b/tools/gyp/generate_trig_table.host.darwin-mips.mk
deleted file mode 100644
index 9b24dca..0000000
--- a/tools/gyp/generate_trig_table.host.darwin-mips.mk
+++ /dev/null
@@ -1,56 +0,0 @@
-# This file is generated by gyp; do not edit.
-
-include $(CLEAR_VARS)
-
-LOCAL_MODULE_CLASS := GYP
-LOCAL_MODULE := v8_tools_gyp_generate_trig_table_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
-LOCAL_MODULE_STEM := generate_trig_table
-LOCAL_MODULE_SUFFIX := .stamp
-LOCAL_MODULE_TAGS := optional
-LOCAL_IS_HOST_MODULE := true
-LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
-gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
-gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
-
-# Make sure our deps are built first.
-GYP_TARGET_DEPENDENCIES :=
-
-### Rules for action "generate":
-$(gyp_shared_intermediate_dir)/trig-table.cc: gyp_local_path := $(LOCAL_PATH)
-$(gyp_shared_intermediate_dir)/trig-table.cc: gyp_var_prefix := $(GYP_VAR_PREFIX)
-$(gyp_shared_intermediate_dir)/trig-table.cc: gyp_intermediate_dir := $(abspath $(gyp_intermediate_dir))
-$(gyp_shared_intermediate_dir)/trig-table.cc: gyp_shared_intermediate_dir := $(abspath $(gyp_shared_intermediate_dir))
-$(gyp_shared_intermediate_dir)/trig-table.cc: export PATH := $(subst $(ANDROID_BUILD_PATHS),,$(PATH))
-$(gyp_shared_intermediate_dir)/trig-table.cc: $(LOCAL_PATH)/v8/tools/generate-trig-table.py $(GYP_TARGET_DEPENDENCIES)
-	@echo "Gyp action: v8_tools_gyp_v8_gyp_generate_trig_table_host_generate ($@)"
-	$(hide)cd $(gyp_local_path)/v8/tools/gyp; mkdir -p $(gyp_shared_intermediate_dir); python ../../tools/generate-trig-table.py "$(gyp_shared_intermediate_dir)/trig-table.cc"
-
-
-
-GYP_GENERATED_OUTPUTS := \
-	$(gyp_shared_intermediate_dir)/trig-table.cc
-
-# Make sure our deps and generated files are built first.
-LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) $(GYP_GENERATED_OUTPUTS)
-
-### Rules for final target.
-# Add target alias to "gyp_all_modules" target.
-.PHONY: gyp_all_modules
-gyp_all_modules: v8_tools_gyp_generate_trig_table_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
-
-# Alias gyp target name.
-.PHONY: generate_trig_table
-generate_trig_table: v8_tools_gyp_generate_trig_table_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
-
-LOCAL_MODULE_PATH := $(PRODUCT_OUT)/gyp_stamp
-LOCAL_UNINSTALLABLE_MODULE := true
-LOCAL_2ND_ARCH_VAR_PREFIX := $(GYP_HOST_VAR_PREFIX)
-
-include $(BUILD_SYSTEM)/base_rules.mk
-
-$(LOCAL_BUILT_MODULE): $(LOCAL_ADDITIONAL_DEPENDENCIES)
-	$(hide) echo "Gyp timestamp: $@"
-	$(hide) mkdir -p $(dir $@)
-	$(hide) touch $@
-
-LOCAL_2ND_ARCH_VAR_PREFIX :=
diff --git a/tools/gyp/generate_trig_table.host.darwin-x86.mk b/tools/gyp/generate_trig_table.host.darwin-x86.mk
deleted file mode 100644
index 9b24dca..0000000
--- a/tools/gyp/generate_trig_table.host.darwin-x86.mk
+++ /dev/null
@@ -1,56 +0,0 @@
-# This file is generated by gyp; do not edit.
-
-include $(CLEAR_VARS)
-
-LOCAL_MODULE_CLASS := GYP
-LOCAL_MODULE := v8_tools_gyp_generate_trig_table_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
-LOCAL_MODULE_STEM := generate_trig_table
-LOCAL_MODULE_SUFFIX := .stamp
-LOCAL_MODULE_TAGS := optional
-LOCAL_IS_HOST_MODULE := true
-LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
-gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
-gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
-
-# Make sure our deps are built first.
-GYP_TARGET_DEPENDENCIES :=
-
-### Rules for action "generate":
-$(gyp_shared_intermediate_dir)/trig-table.cc: gyp_local_path := $(LOCAL_PATH)
-$(gyp_shared_intermediate_dir)/trig-table.cc: gyp_var_prefix := $(GYP_VAR_PREFIX)
-$(gyp_shared_intermediate_dir)/trig-table.cc: gyp_intermediate_dir := $(abspath $(gyp_intermediate_dir))
-$(gyp_shared_intermediate_dir)/trig-table.cc: gyp_shared_intermediate_dir := $(abspath $(gyp_shared_intermediate_dir))
-$(gyp_shared_intermediate_dir)/trig-table.cc: export PATH := $(subst $(ANDROID_BUILD_PATHS),,$(PATH))
-$(gyp_shared_intermediate_dir)/trig-table.cc: $(LOCAL_PATH)/v8/tools/generate-trig-table.py $(GYP_TARGET_DEPENDENCIES)
-	@echo "Gyp action: v8_tools_gyp_v8_gyp_generate_trig_table_host_generate ($@)"
-	$(hide)cd $(gyp_local_path)/v8/tools/gyp; mkdir -p $(gyp_shared_intermediate_dir); python ../../tools/generate-trig-table.py "$(gyp_shared_intermediate_dir)/trig-table.cc"
-
-
-
-GYP_GENERATED_OUTPUTS := \
-	$(gyp_shared_intermediate_dir)/trig-table.cc
-
-# Make sure our deps and generated files are built first.
-LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) $(GYP_GENERATED_OUTPUTS)
-
-### Rules for final target.
-# Add target alias to "gyp_all_modules" target.
-.PHONY: gyp_all_modules
-gyp_all_modules: v8_tools_gyp_generate_trig_table_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
-
-# Alias gyp target name.
-.PHONY: generate_trig_table
-generate_trig_table: v8_tools_gyp_generate_trig_table_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
-
-LOCAL_MODULE_PATH := $(PRODUCT_OUT)/gyp_stamp
-LOCAL_UNINSTALLABLE_MODULE := true
-LOCAL_2ND_ARCH_VAR_PREFIX := $(GYP_HOST_VAR_PREFIX)
-
-include $(BUILD_SYSTEM)/base_rules.mk
-
-$(LOCAL_BUILT_MODULE): $(LOCAL_ADDITIONAL_DEPENDENCIES)
-	$(hide) echo "Gyp timestamp: $@"
-	$(hide) mkdir -p $(dir $@)
-	$(hide) touch $@
-
-LOCAL_2ND_ARCH_VAR_PREFIX :=
diff --git a/tools/gyp/generate_trig_table.host.darwin-x86_64.mk b/tools/gyp/generate_trig_table.host.darwin-x86_64.mk
deleted file mode 100644
index 9b24dca..0000000
--- a/tools/gyp/generate_trig_table.host.darwin-x86_64.mk
+++ /dev/null
@@ -1,56 +0,0 @@
-# This file is generated by gyp; do not edit.
-
-include $(CLEAR_VARS)
-
-LOCAL_MODULE_CLASS := GYP
-LOCAL_MODULE := v8_tools_gyp_generate_trig_table_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
-LOCAL_MODULE_STEM := generate_trig_table
-LOCAL_MODULE_SUFFIX := .stamp
-LOCAL_MODULE_TAGS := optional
-LOCAL_IS_HOST_MODULE := true
-LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
-gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
-gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
-
-# Make sure our deps are built first.
-GYP_TARGET_DEPENDENCIES :=
-
-### Rules for action "generate":
-$(gyp_shared_intermediate_dir)/trig-table.cc: gyp_local_path := $(LOCAL_PATH)
-$(gyp_shared_intermediate_dir)/trig-table.cc: gyp_var_prefix := $(GYP_VAR_PREFIX)
-$(gyp_shared_intermediate_dir)/trig-table.cc: gyp_intermediate_dir := $(abspath $(gyp_intermediate_dir))
-$(gyp_shared_intermediate_dir)/trig-table.cc: gyp_shared_intermediate_dir := $(abspath $(gyp_shared_intermediate_dir))
-$(gyp_shared_intermediate_dir)/trig-table.cc: export PATH := $(subst $(ANDROID_BUILD_PATHS),,$(PATH))
-$(gyp_shared_intermediate_dir)/trig-table.cc: $(LOCAL_PATH)/v8/tools/generate-trig-table.py $(GYP_TARGET_DEPENDENCIES)
-	@echo "Gyp action: v8_tools_gyp_v8_gyp_generate_trig_table_host_generate ($@)"
-	$(hide)cd $(gyp_local_path)/v8/tools/gyp; mkdir -p $(gyp_shared_intermediate_dir); python ../../tools/generate-trig-table.py "$(gyp_shared_intermediate_dir)/trig-table.cc"
-
-
-
-GYP_GENERATED_OUTPUTS := \
-	$(gyp_shared_intermediate_dir)/trig-table.cc
-
-# Make sure our deps and generated files are built first.
-LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) $(GYP_GENERATED_OUTPUTS)
-
-### Rules for final target.
-# Add target alias to "gyp_all_modules" target.
-.PHONY: gyp_all_modules
-gyp_all_modules: v8_tools_gyp_generate_trig_table_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
-
-# Alias gyp target name.
-.PHONY: generate_trig_table
-generate_trig_table: v8_tools_gyp_generate_trig_table_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
-
-LOCAL_MODULE_PATH := $(PRODUCT_OUT)/gyp_stamp
-LOCAL_UNINSTALLABLE_MODULE := true
-LOCAL_2ND_ARCH_VAR_PREFIX := $(GYP_HOST_VAR_PREFIX)
-
-include $(BUILD_SYSTEM)/base_rules.mk
-
-$(LOCAL_BUILT_MODULE): $(LOCAL_ADDITIONAL_DEPENDENCIES)
-	$(hide) echo "Gyp timestamp: $@"
-	$(hide) mkdir -p $(dir $@)
-	$(hide) touch $@
-
-LOCAL_2ND_ARCH_VAR_PREFIX :=
diff --git a/tools/gyp/generate_trig_table.host.linux-arm.mk b/tools/gyp/generate_trig_table.host.linux-arm.mk
deleted file mode 100644
index 9b24dca..0000000
--- a/tools/gyp/generate_trig_table.host.linux-arm.mk
+++ /dev/null
@@ -1,56 +0,0 @@
-# This file is generated by gyp; do not edit.
-
-include $(CLEAR_VARS)
-
-LOCAL_MODULE_CLASS := GYP
-LOCAL_MODULE := v8_tools_gyp_generate_trig_table_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
-LOCAL_MODULE_STEM := generate_trig_table
-LOCAL_MODULE_SUFFIX := .stamp
-LOCAL_MODULE_TAGS := optional
-LOCAL_IS_HOST_MODULE := true
-LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
-gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
-gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
-
-# Make sure our deps are built first.
-GYP_TARGET_DEPENDENCIES :=
-
-### Rules for action "generate":
-$(gyp_shared_intermediate_dir)/trig-table.cc: gyp_local_path := $(LOCAL_PATH)
-$(gyp_shared_intermediate_dir)/trig-table.cc: gyp_var_prefix := $(GYP_VAR_PREFIX)
-$(gyp_shared_intermediate_dir)/trig-table.cc: gyp_intermediate_dir := $(abspath $(gyp_intermediate_dir))
-$(gyp_shared_intermediate_dir)/trig-table.cc: gyp_shared_intermediate_dir := $(abspath $(gyp_shared_intermediate_dir))
-$(gyp_shared_intermediate_dir)/trig-table.cc: export PATH := $(subst $(ANDROID_BUILD_PATHS),,$(PATH))
-$(gyp_shared_intermediate_dir)/trig-table.cc: $(LOCAL_PATH)/v8/tools/generate-trig-table.py $(GYP_TARGET_DEPENDENCIES)
-	@echo "Gyp action: v8_tools_gyp_v8_gyp_generate_trig_table_host_generate ($@)"
-	$(hide)cd $(gyp_local_path)/v8/tools/gyp; mkdir -p $(gyp_shared_intermediate_dir); python ../../tools/generate-trig-table.py "$(gyp_shared_intermediate_dir)/trig-table.cc"
-
-
-
-GYP_GENERATED_OUTPUTS := \
-	$(gyp_shared_intermediate_dir)/trig-table.cc
-
-# Make sure our deps and generated files are built first.
-LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) $(GYP_GENERATED_OUTPUTS)
-
-### Rules for final target.
-# Add target alias to "gyp_all_modules" target.
-.PHONY: gyp_all_modules
-gyp_all_modules: v8_tools_gyp_generate_trig_table_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
-
-# Alias gyp target name.
-.PHONY: generate_trig_table
-generate_trig_table: v8_tools_gyp_generate_trig_table_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
-
-LOCAL_MODULE_PATH := $(PRODUCT_OUT)/gyp_stamp
-LOCAL_UNINSTALLABLE_MODULE := true
-LOCAL_2ND_ARCH_VAR_PREFIX := $(GYP_HOST_VAR_PREFIX)
-
-include $(BUILD_SYSTEM)/base_rules.mk
-
-$(LOCAL_BUILT_MODULE): $(LOCAL_ADDITIONAL_DEPENDENCIES)
-	$(hide) echo "Gyp timestamp: $@"
-	$(hide) mkdir -p $(dir $@)
-	$(hide) touch $@
-
-LOCAL_2ND_ARCH_VAR_PREFIX :=
diff --git a/tools/gyp/generate_trig_table.host.linux-arm64.mk b/tools/gyp/generate_trig_table.host.linux-arm64.mk
deleted file mode 100644
index 9b24dca..0000000
--- a/tools/gyp/generate_trig_table.host.linux-arm64.mk
+++ /dev/null
@@ -1,56 +0,0 @@
-# This file is generated by gyp; do not edit.
-
-include $(CLEAR_VARS)
-
-LOCAL_MODULE_CLASS := GYP
-LOCAL_MODULE := v8_tools_gyp_generate_trig_table_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
-LOCAL_MODULE_STEM := generate_trig_table
-LOCAL_MODULE_SUFFIX := .stamp
-LOCAL_MODULE_TAGS := optional
-LOCAL_IS_HOST_MODULE := true
-LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
-gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
-gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
-
-# Make sure our deps are built first.
-GYP_TARGET_DEPENDENCIES :=
-
-### Rules for action "generate":
-$(gyp_shared_intermediate_dir)/trig-table.cc: gyp_local_path := $(LOCAL_PATH)
-$(gyp_shared_intermediate_dir)/trig-table.cc: gyp_var_prefix := $(GYP_VAR_PREFIX)
-$(gyp_shared_intermediate_dir)/trig-table.cc: gyp_intermediate_dir := $(abspath $(gyp_intermediate_dir))
-$(gyp_shared_intermediate_dir)/trig-table.cc: gyp_shared_intermediate_dir := $(abspath $(gyp_shared_intermediate_dir))
-$(gyp_shared_intermediate_dir)/trig-table.cc: export PATH := $(subst $(ANDROID_BUILD_PATHS),,$(PATH))
-$(gyp_shared_intermediate_dir)/trig-table.cc: $(LOCAL_PATH)/v8/tools/generate-trig-table.py $(GYP_TARGET_DEPENDENCIES)
-	@echo "Gyp action: v8_tools_gyp_v8_gyp_generate_trig_table_host_generate ($@)"
-	$(hide)cd $(gyp_local_path)/v8/tools/gyp; mkdir -p $(gyp_shared_intermediate_dir); python ../../tools/generate-trig-table.py "$(gyp_shared_intermediate_dir)/trig-table.cc"
-
-
-
-GYP_GENERATED_OUTPUTS := \
-	$(gyp_shared_intermediate_dir)/trig-table.cc
-
-# Make sure our deps and generated files are built first.
-LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) $(GYP_GENERATED_OUTPUTS)
-
-### Rules for final target.
-# Add target alias to "gyp_all_modules" target.
-.PHONY: gyp_all_modules
-gyp_all_modules: v8_tools_gyp_generate_trig_table_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
-
-# Alias gyp target name.
-.PHONY: generate_trig_table
-generate_trig_table: v8_tools_gyp_generate_trig_table_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
-
-LOCAL_MODULE_PATH := $(PRODUCT_OUT)/gyp_stamp
-LOCAL_UNINSTALLABLE_MODULE := true
-LOCAL_2ND_ARCH_VAR_PREFIX := $(GYP_HOST_VAR_PREFIX)
-
-include $(BUILD_SYSTEM)/base_rules.mk
-
-$(LOCAL_BUILT_MODULE): $(LOCAL_ADDITIONAL_DEPENDENCIES)
-	$(hide) echo "Gyp timestamp: $@"
-	$(hide) mkdir -p $(dir $@)
-	$(hide) touch $@
-
-LOCAL_2ND_ARCH_VAR_PREFIX :=
diff --git a/tools/gyp/generate_trig_table.host.linux-mips.mk b/tools/gyp/generate_trig_table.host.linux-mips.mk
deleted file mode 100644
index 9b24dca..0000000
--- a/tools/gyp/generate_trig_table.host.linux-mips.mk
+++ /dev/null
@@ -1,56 +0,0 @@
-# This file is generated by gyp; do not edit.
-
-include $(CLEAR_VARS)
-
-LOCAL_MODULE_CLASS := GYP
-LOCAL_MODULE := v8_tools_gyp_generate_trig_table_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
-LOCAL_MODULE_STEM := generate_trig_table
-LOCAL_MODULE_SUFFIX := .stamp
-LOCAL_MODULE_TAGS := optional
-LOCAL_IS_HOST_MODULE := true
-LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
-gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
-gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
-
-# Make sure our deps are built first.
-GYP_TARGET_DEPENDENCIES :=
-
-### Rules for action "generate":
-$(gyp_shared_intermediate_dir)/trig-table.cc: gyp_local_path := $(LOCAL_PATH)
-$(gyp_shared_intermediate_dir)/trig-table.cc: gyp_var_prefix := $(GYP_VAR_PREFIX)
-$(gyp_shared_intermediate_dir)/trig-table.cc: gyp_intermediate_dir := $(abspath $(gyp_intermediate_dir))
-$(gyp_shared_intermediate_dir)/trig-table.cc: gyp_shared_intermediate_dir := $(abspath $(gyp_shared_intermediate_dir))
-$(gyp_shared_intermediate_dir)/trig-table.cc: export PATH := $(subst $(ANDROID_BUILD_PATHS),,$(PATH))
-$(gyp_shared_intermediate_dir)/trig-table.cc: $(LOCAL_PATH)/v8/tools/generate-trig-table.py $(GYP_TARGET_DEPENDENCIES)
-	@echo "Gyp action: v8_tools_gyp_v8_gyp_generate_trig_table_host_generate ($@)"
-	$(hide)cd $(gyp_local_path)/v8/tools/gyp; mkdir -p $(gyp_shared_intermediate_dir); python ../../tools/generate-trig-table.py "$(gyp_shared_intermediate_dir)/trig-table.cc"
-
-
-
-GYP_GENERATED_OUTPUTS := \
-	$(gyp_shared_intermediate_dir)/trig-table.cc
-
-# Make sure our deps and generated files are built first.
-LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) $(GYP_GENERATED_OUTPUTS)
-
-### Rules for final target.
-# Add target alias to "gyp_all_modules" target.
-.PHONY: gyp_all_modules
-gyp_all_modules: v8_tools_gyp_generate_trig_table_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
-
-# Alias gyp target name.
-.PHONY: generate_trig_table
-generate_trig_table: v8_tools_gyp_generate_trig_table_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
-
-LOCAL_MODULE_PATH := $(PRODUCT_OUT)/gyp_stamp
-LOCAL_UNINSTALLABLE_MODULE := true
-LOCAL_2ND_ARCH_VAR_PREFIX := $(GYP_HOST_VAR_PREFIX)
-
-include $(BUILD_SYSTEM)/base_rules.mk
-
-$(LOCAL_BUILT_MODULE): $(LOCAL_ADDITIONAL_DEPENDENCIES)
-	$(hide) echo "Gyp timestamp: $@"
-	$(hide) mkdir -p $(dir $@)
-	$(hide) touch $@
-
-LOCAL_2ND_ARCH_VAR_PREFIX :=
diff --git a/tools/gyp/generate_trig_table.host.linux-x86.mk b/tools/gyp/generate_trig_table.host.linux-x86.mk
deleted file mode 100644
index 9b24dca..0000000
--- a/tools/gyp/generate_trig_table.host.linux-x86.mk
+++ /dev/null
@@ -1,56 +0,0 @@
-# This file is generated by gyp; do not edit.
-
-include $(CLEAR_VARS)
-
-LOCAL_MODULE_CLASS := GYP
-LOCAL_MODULE := v8_tools_gyp_generate_trig_table_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
-LOCAL_MODULE_STEM := generate_trig_table
-LOCAL_MODULE_SUFFIX := .stamp
-LOCAL_MODULE_TAGS := optional
-LOCAL_IS_HOST_MODULE := true
-LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
-gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
-gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
-
-# Make sure our deps are built first.
-GYP_TARGET_DEPENDENCIES :=
-
-### Rules for action "generate":
-$(gyp_shared_intermediate_dir)/trig-table.cc: gyp_local_path := $(LOCAL_PATH)
-$(gyp_shared_intermediate_dir)/trig-table.cc: gyp_var_prefix := $(GYP_VAR_PREFIX)
-$(gyp_shared_intermediate_dir)/trig-table.cc: gyp_intermediate_dir := $(abspath $(gyp_intermediate_dir))
-$(gyp_shared_intermediate_dir)/trig-table.cc: gyp_shared_intermediate_dir := $(abspath $(gyp_shared_intermediate_dir))
-$(gyp_shared_intermediate_dir)/trig-table.cc: export PATH := $(subst $(ANDROID_BUILD_PATHS),,$(PATH))
-$(gyp_shared_intermediate_dir)/trig-table.cc: $(LOCAL_PATH)/v8/tools/generate-trig-table.py $(GYP_TARGET_DEPENDENCIES)
-	@echo "Gyp action: v8_tools_gyp_v8_gyp_generate_trig_table_host_generate ($@)"
-	$(hide)cd $(gyp_local_path)/v8/tools/gyp; mkdir -p $(gyp_shared_intermediate_dir); python ../../tools/generate-trig-table.py "$(gyp_shared_intermediate_dir)/trig-table.cc"
-
-
-
-GYP_GENERATED_OUTPUTS := \
-	$(gyp_shared_intermediate_dir)/trig-table.cc
-
-# Make sure our deps and generated files are built first.
-LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) $(GYP_GENERATED_OUTPUTS)
-
-### Rules for final target.
-# Add target alias to "gyp_all_modules" target.
-.PHONY: gyp_all_modules
-gyp_all_modules: v8_tools_gyp_generate_trig_table_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
-
-# Alias gyp target name.
-.PHONY: generate_trig_table
-generate_trig_table: v8_tools_gyp_generate_trig_table_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
-
-LOCAL_MODULE_PATH := $(PRODUCT_OUT)/gyp_stamp
-LOCAL_UNINSTALLABLE_MODULE := true
-LOCAL_2ND_ARCH_VAR_PREFIX := $(GYP_HOST_VAR_PREFIX)
-
-include $(BUILD_SYSTEM)/base_rules.mk
-
-$(LOCAL_BUILT_MODULE): $(LOCAL_ADDITIONAL_DEPENDENCIES)
-	$(hide) echo "Gyp timestamp: $@"
-	$(hide) mkdir -p $(dir $@)
-	$(hide) touch $@
-
-LOCAL_2ND_ARCH_VAR_PREFIX :=
diff --git a/tools/gyp/generate_trig_table.host.linux-x86_64.mk b/tools/gyp/generate_trig_table.host.linux-x86_64.mk
deleted file mode 100644
index 9b24dca..0000000
--- a/tools/gyp/generate_trig_table.host.linux-x86_64.mk
+++ /dev/null
@@ -1,56 +0,0 @@
-# This file is generated by gyp; do not edit.
-
-include $(CLEAR_VARS)
-
-LOCAL_MODULE_CLASS := GYP
-LOCAL_MODULE := v8_tools_gyp_generate_trig_table_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
-LOCAL_MODULE_STEM := generate_trig_table
-LOCAL_MODULE_SUFFIX := .stamp
-LOCAL_MODULE_TAGS := optional
-LOCAL_IS_HOST_MODULE := true
-LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
-gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
-gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
-
-# Make sure our deps are built first.
-GYP_TARGET_DEPENDENCIES :=
-
-### Rules for action "generate":
-$(gyp_shared_intermediate_dir)/trig-table.cc: gyp_local_path := $(LOCAL_PATH)
-$(gyp_shared_intermediate_dir)/trig-table.cc: gyp_var_prefix := $(GYP_VAR_PREFIX)
-$(gyp_shared_intermediate_dir)/trig-table.cc: gyp_intermediate_dir := $(abspath $(gyp_intermediate_dir))
-$(gyp_shared_intermediate_dir)/trig-table.cc: gyp_shared_intermediate_dir := $(abspath $(gyp_shared_intermediate_dir))
-$(gyp_shared_intermediate_dir)/trig-table.cc: export PATH := $(subst $(ANDROID_BUILD_PATHS),,$(PATH))
-$(gyp_shared_intermediate_dir)/trig-table.cc: $(LOCAL_PATH)/v8/tools/generate-trig-table.py $(GYP_TARGET_DEPENDENCIES)
-	@echo "Gyp action: v8_tools_gyp_v8_gyp_generate_trig_table_host_generate ($@)"
-	$(hide)cd $(gyp_local_path)/v8/tools/gyp; mkdir -p $(gyp_shared_intermediate_dir); python ../../tools/generate-trig-table.py "$(gyp_shared_intermediate_dir)/trig-table.cc"
-
-
-
-GYP_GENERATED_OUTPUTS := \
-	$(gyp_shared_intermediate_dir)/trig-table.cc
-
-# Make sure our deps and generated files are built first.
-LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) $(GYP_GENERATED_OUTPUTS)
-
-### Rules for final target.
-# Add target alias to "gyp_all_modules" target.
-.PHONY: gyp_all_modules
-gyp_all_modules: v8_tools_gyp_generate_trig_table_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
-
-# Alias gyp target name.
-.PHONY: generate_trig_table
-generate_trig_table: v8_tools_gyp_generate_trig_table_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
-
-LOCAL_MODULE_PATH := $(PRODUCT_OUT)/gyp_stamp
-LOCAL_UNINSTALLABLE_MODULE := true
-LOCAL_2ND_ARCH_VAR_PREFIX := $(GYP_HOST_VAR_PREFIX)
-
-include $(BUILD_SYSTEM)/base_rules.mk
-
-$(LOCAL_BUILT_MODULE): $(LOCAL_ADDITIONAL_DEPENDENCIES)
-	$(hide) echo "Gyp timestamp: $@"
-	$(hide) mkdir -p $(dir $@)
-	$(hide) touch $@
-
-LOCAL_2ND_ARCH_VAR_PREFIX :=
diff --git a/tools/gyp/js2c.host.darwin-arm.mk b/tools/gyp/js2c.host.darwin-arm.mk
index 0a2b5a8..27f05b1 100644
--- a/tools/gyp/js2c.host.darwin-arm.mk
+++ b/tools/gyp/js2c.host.darwin-arm.mk
@@ -6,7 +6,6 @@
 LOCAL_MODULE := v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
 LOCAL_MODULE_STEM := js2c
 LOCAL_MODULE_SUFFIX := .stamp
-LOCAL_MODULE_TAGS := optional
 LOCAL_IS_HOST_MODULE := true
 LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
@@ -21,9 +20,9 @@
 $(gyp_shared_intermediate_dir)/libraries.cc: gyp_intermediate_dir := $(abspath $(gyp_intermediate_dir))
 $(gyp_shared_intermediate_dir)/libraries.cc: gyp_shared_intermediate_dir := $(abspath $(gyp_shared_intermediate_dir))
 $(gyp_shared_intermediate_dir)/libraries.cc: export PATH := $(subst $(ANDROID_BUILD_PATHS),,$(PATH))
-$(gyp_shared_intermediate_dir)/libraries.cc: $(LOCAL_PATH)/v8/tools/js2c.py $(LOCAL_PATH)/v8/src/runtime.js $(LOCAL_PATH)/v8/src/v8natives.js $(LOCAL_PATH)/v8/src/array.js $(LOCAL_PATH)/v8/src/string.js $(LOCAL_PATH)/v8/src/uri.js $(LOCAL_PATH)/v8/src/math.js $(LOCAL_PATH)/v8/src/messages.js $(LOCAL_PATH)/v8/src/apinatives.js $(LOCAL_PATH)/v8/src/debug-debugger.js $(LOCAL_PATH)/v8/src/mirror-debugger.js $(LOCAL_PATH)/v8/src/liveedit-debugger.js $(LOCAL_PATH)/v8/src/date.js $(LOCAL_PATH)/v8/src/json.js $(LOCAL_PATH)/v8/src/regexp.js $(LOCAL_PATH)/v8/src/arraybuffer.js $(LOCAL_PATH)/v8/src/typedarray.js $(LOCAL_PATH)/v8/src/weak_collection.js $(LOCAL_PATH)/v8/src/promise.js $(LOCAL_PATH)/v8/src/object-observe.js $(LOCAL_PATH)/v8/src/macros.py $(LOCAL_PATH)/v8/src/i18n.js $(GYP_TARGET_DEPENDENCIES)
+$(gyp_shared_intermediate_dir)/libraries.cc: $(LOCAL_PATH)/v8/tools/js2c.py $(LOCAL_PATH)/v8/src/runtime.js $(LOCAL_PATH)/v8/src/v8natives.js $(LOCAL_PATH)/v8/src/symbol.js $(LOCAL_PATH)/v8/src/array.js $(LOCAL_PATH)/v8/src/string.js $(LOCAL_PATH)/v8/src/uri.js $(LOCAL_PATH)/v8/third_party/fdlibm/fdlibm.js $(LOCAL_PATH)/v8/src/math.js $(LOCAL_PATH)/v8/src/apinatives.js $(LOCAL_PATH)/v8/src/date.js $(LOCAL_PATH)/v8/src/regexp.js $(LOCAL_PATH)/v8/src/arraybuffer.js $(LOCAL_PATH)/v8/src/typedarray.js $(LOCAL_PATH)/v8/src/generator.js $(LOCAL_PATH)/v8/src/object-observe.js $(LOCAL_PATH)/v8/src/collection.js $(LOCAL_PATH)/v8/src/weak-collection.js $(LOCAL_PATH)/v8/src/collection-iterator.js $(LOCAL_PATH)/v8/src/promise.js $(LOCAL_PATH)/v8/src/messages.js $(LOCAL_PATH)/v8/src/json.js $(LOCAL_PATH)/v8/src/array-iterator.js $(LOCAL_PATH)/v8/src/string-iterator.js $(LOCAL_PATH)/v8/src/debug-debugger.js $(LOCAL_PATH)/v8/src/mirror-debugger.js $(LOCAL_PATH)/v8/src/liveedit-debugger.js $(LOCAL_PATH)/v8/src/macros.py $(LOCAL_PATH)/v8/src/i18n.js $(GYP_TARGET_DEPENDENCIES)
 	@echo "Gyp action: v8_tools_gyp_v8_gyp_js2c_host_js2c ($@)"
-	$(hide)cd $(gyp_local_path)/v8/tools/gyp; mkdir -p $(gyp_shared_intermediate_dir); python ../../tools/js2c.py "$(gyp_shared_intermediate_dir)/libraries.cc" CORE off ../../src/runtime.js ../../src/v8natives.js ../../src/array.js ../../src/string.js ../../src/uri.js ../../src/math.js ../../src/messages.js ../../src/apinatives.js ../../src/debug-debugger.js ../../src/mirror-debugger.js ../../src/liveedit-debugger.js ../../src/date.js ../../src/json.js ../../src/regexp.js ../../src/arraybuffer.js ../../src/typedarray.js ../../src/weak_collection.js ../../src/promise.js ../../src/object-observe.js ../../src/macros.py ../../src/i18n.js
+	$(hide)cd $(gyp_local_path)/v8/tools/gyp; mkdir -p $(gyp_shared_intermediate_dir); python ../../tools/js2c.py "$(gyp_shared_intermediate_dir)/libraries.cc" CORE off ../../src/runtime.js ../../src/v8natives.js ../../src/symbol.js ../../src/array.js ../../src/string.js ../../src/uri.js ../../third_party/fdlibm/fdlibm.js ../../src/math.js ../../src/apinatives.js ../../src/date.js ../../src/regexp.js ../../src/arraybuffer.js ../../src/typedarray.js ../../src/generator.js ../../src/object-observe.js ../../src/collection.js ../../src/weak-collection.js ../../src/collection-iterator.js ../../src/promise.js ../../src/messages.js ../../src/json.js ../../src/array-iterator.js ../../src/string-iterator.js ../../src/debug-debugger.js ../../src/mirror-debugger.js ../../src/liveedit-debugger.js ../../src/macros.py ../../src/i18n.js
 
 
 ### Rules for action "js2c_experimental":
@@ -32,9 +31,9 @@
 $(gyp_shared_intermediate_dir)/experimental-libraries.cc: gyp_intermediate_dir := $(abspath $(gyp_intermediate_dir))
 $(gyp_shared_intermediate_dir)/experimental-libraries.cc: gyp_shared_intermediate_dir := $(abspath $(gyp_shared_intermediate_dir))
 $(gyp_shared_intermediate_dir)/experimental-libraries.cc: export PATH := $(subst $(ANDROID_BUILD_PATHS),,$(PATH))
-$(gyp_shared_intermediate_dir)/experimental-libraries.cc: $(LOCAL_PATH)/v8/tools/js2c.py $(LOCAL_PATH)/v8/src/macros.py $(LOCAL_PATH)/v8/src/symbol.js $(LOCAL_PATH)/v8/src/proxy.js $(LOCAL_PATH)/v8/src/collection.js $(LOCAL_PATH)/v8/src/collection-iterator.js $(LOCAL_PATH)/v8/src/generator.js $(LOCAL_PATH)/v8/src/array-iterator.js $(LOCAL_PATH)/v8/src/harmony-string.js $(LOCAL_PATH)/v8/src/harmony-array.js $(LOCAL_PATH)/v8/src/harmony-math.js $(GYP_TARGET_DEPENDENCIES)
+$(gyp_shared_intermediate_dir)/experimental-libraries.cc: $(LOCAL_PATH)/v8/tools/js2c.py $(LOCAL_PATH)/v8/src/macros.py $(LOCAL_PATH)/v8/src/proxy.js $(LOCAL_PATH)/v8/src/generator.js $(LOCAL_PATH)/v8/src/harmony-string.js $(LOCAL_PATH)/v8/src/harmony-array.js $(LOCAL_PATH)/v8/src/harmony-classes.js $(GYP_TARGET_DEPENDENCIES)
 	@echo "Gyp action: v8_tools_gyp_v8_gyp_js2c_host_js2c_experimental ($@)"
-	$(hide)cd $(gyp_local_path)/v8/tools/gyp; mkdir -p $(gyp_shared_intermediate_dir); python ../../tools/js2c.py "$(gyp_shared_intermediate_dir)/experimental-libraries.cc" EXPERIMENTAL off ../../src/macros.py ../../src/symbol.js ../../src/proxy.js ../../src/collection.js ../../src/collection-iterator.js ../../src/generator.js ../../src/array-iterator.js ../../src/harmony-string.js ../../src/harmony-array.js ../../src/harmony-math.js
+	$(hide)cd $(gyp_local_path)/v8/tools/gyp; mkdir -p $(gyp_shared_intermediate_dir); python ../../tools/js2c.py "$(gyp_shared_intermediate_dir)/experimental-libraries.cc" EXPERIMENTAL off ../../src/macros.py ../../src/proxy.js ../../src/generator.js ../../src/harmony-string.js ../../src/harmony-array.js ../../src/harmony-classes.js
 
 
 
@@ -46,6 +45,9 @@
 LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) $(GYP_GENERATED_OUTPUTS)
 
 ### Rules for final target.
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
+
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
 gyp_all_modules: v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
diff --git a/tools/gyp/js2c.host.darwin-arm64.mk b/tools/gyp/js2c.host.darwin-arm64.mk
index 0a2b5a8..27f05b1 100644
--- a/tools/gyp/js2c.host.darwin-arm64.mk
+++ b/tools/gyp/js2c.host.darwin-arm64.mk
@@ -6,7 +6,6 @@
 LOCAL_MODULE := v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
 LOCAL_MODULE_STEM := js2c
 LOCAL_MODULE_SUFFIX := .stamp
-LOCAL_MODULE_TAGS := optional
 LOCAL_IS_HOST_MODULE := true
 LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
@@ -21,9 +20,9 @@
 $(gyp_shared_intermediate_dir)/libraries.cc: gyp_intermediate_dir := $(abspath $(gyp_intermediate_dir))
 $(gyp_shared_intermediate_dir)/libraries.cc: gyp_shared_intermediate_dir := $(abspath $(gyp_shared_intermediate_dir))
 $(gyp_shared_intermediate_dir)/libraries.cc: export PATH := $(subst $(ANDROID_BUILD_PATHS),,$(PATH))
-$(gyp_shared_intermediate_dir)/libraries.cc: $(LOCAL_PATH)/v8/tools/js2c.py $(LOCAL_PATH)/v8/src/runtime.js $(LOCAL_PATH)/v8/src/v8natives.js $(LOCAL_PATH)/v8/src/array.js $(LOCAL_PATH)/v8/src/string.js $(LOCAL_PATH)/v8/src/uri.js $(LOCAL_PATH)/v8/src/math.js $(LOCAL_PATH)/v8/src/messages.js $(LOCAL_PATH)/v8/src/apinatives.js $(LOCAL_PATH)/v8/src/debug-debugger.js $(LOCAL_PATH)/v8/src/mirror-debugger.js $(LOCAL_PATH)/v8/src/liveedit-debugger.js $(LOCAL_PATH)/v8/src/date.js $(LOCAL_PATH)/v8/src/json.js $(LOCAL_PATH)/v8/src/regexp.js $(LOCAL_PATH)/v8/src/arraybuffer.js $(LOCAL_PATH)/v8/src/typedarray.js $(LOCAL_PATH)/v8/src/weak_collection.js $(LOCAL_PATH)/v8/src/promise.js $(LOCAL_PATH)/v8/src/object-observe.js $(LOCAL_PATH)/v8/src/macros.py $(LOCAL_PATH)/v8/src/i18n.js $(GYP_TARGET_DEPENDENCIES)
+$(gyp_shared_intermediate_dir)/libraries.cc: $(LOCAL_PATH)/v8/tools/js2c.py $(LOCAL_PATH)/v8/src/runtime.js $(LOCAL_PATH)/v8/src/v8natives.js $(LOCAL_PATH)/v8/src/symbol.js $(LOCAL_PATH)/v8/src/array.js $(LOCAL_PATH)/v8/src/string.js $(LOCAL_PATH)/v8/src/uri.js $(LOCAL_PATH)/v8/third_party/fdlibm/fdlibm.js $(LOCAL_PATH)/v8/src/math.js $(LOCAL_PATH)/v8/src/apinatives.js $(LOCAL_PATH)/v8/src/date.js $(LOCAL_PATH)/v8/src/regexp.js $(LOCAL_PATH)/v8/src/arraybuffer.js $(LOCAL_PATH)/v8/src/typedarray.js $(LOCAL_PATH)/v8/src/generator.js $(LOCAL_PATH)/v8/src/object-observe.js $(LOCAL_PATH)/v8/src/collection.js $(LOCAL_PATH)/v8/src/weak-collection.js $(LOCAL_PATH)/v8/src/collection-iterator.js $(LOCAL_PATH)/v8/src/promise.js $(LOCAL_PATH)/v8/src/messages.js $(LOCAL_PATH)/v8/src/json.js $(LOCAL_PATH)/v8/src/array-iterator.js $(LOCAL_PATH)/v8/src/string-iterator.js $(LOCAL_PATH)/v8/src/debug-debugger.js $(LOCAL_PATH)/v8/src/mirror-debugger.js $(LOCAL_PATH)/v8/src/liveedit-debugger.js $(LOCAL_PATH)/v8/src/macros.py $(LOCAL_PATH)/v8/src/i18n.js $(GYP_TARGET_DEPENDENCIES)
 	@echo "Gyp action: v8_tools_gyp_v8_gyp_js2c_host_js2c ($@)"
-	$(hide)cd $(gyp_local_path)/v8/tools/gyp; mkdir -p $(gyp_shared_intermediate_dir); python ../../tools/js2c.py "$(gyp_shared_intermediate_dir)/libraries.cc" CORE off ../../src/runtime.js ../../src/v8natives.js ../../src/array.js ../../src/string.js ../../src/uri.js ../../src/math.js ../../src/messages.js ../../src/apinatives.js ../../src/debug-debugger.js ../../src/mirror-debugger.js ../../src/liveedit-debugger.js ../../src/date.js ../../src/json.js ../../src/regexp.js ../../src/arraybuffer.js ../../src/typedarray.js ../../src/weak_collection.js ../../src/promise.js ../../src/object-observe.js ../../src/macros.py ../../src/i18n.js
+	$(hide)cd $(gyp_local_path)/v8/tools/gyp; mkdir -p $(gyp_shared_intermediate_dir); python ../../tools/js2c.py "$(gyp_shared_intermediate_dir)/libraries.cc" CORE off ../../src/runtime.js ../../src/v8natives.js ../../src/symbol.js ../../src/array.js ../../src/string.js ../../src/uri.js ../../third_party/fdlibm/fdlibm.js ../../src/math.js ../../src/apinatives.js ../../src/date.js ../../src/regexp.js ../../src/arraybuffer.js ../../src/typedarray.js ../../src/generator.js ../../src/object-observe.js ../../src/collection.js ../../src/weak-collection.js ../../src/collection-iterator.js ../../src/promise.js ../../src/messages.js ../../src/json.js ../../src/array-iterator.js ../../src/string-iterator.js ../../src/debug-debugger.js ../../src/mirror-debugger.js ../../src/liveedit-debugger.js ../../src/macros.py ../../src/i18n.js
 
 
 ### Rules for action "js2c_experimental":
@@ -32,9 +31,9 @@
 $(gyp_shared_intermediate_dir)/experimental-libraries.cc: gyp_intermediate_dir := $(abspath $(gyp_intermediate_dir))
 $(gyp_shared_intermediate_dir)/experimental-libraries.cc: gyp_shared_intermediate_dir := $(abspath $(gyp_shared_intermediate_dir))
 $(gyp_shared_intermediate_dir)/experimental-libraries.cc: export PATH := $(subst $(ANDROID_BUILD_PATHS),,$(PATH))
-$(gyp_shared_intermediate_dir)/experimental-libraries.cc: $(LOCAL_PATH)/v8/tools/js2c.py $(LOCAL_PATH)/v8/src/macros.py $(LOCAL_PATH)/v8/src/symbol.js $(LOCAL_PATH)/v8/src/proxy.js $(LOCAL_PATH)/v8/src/collection.js $(LOCAL_PATH)/v8/src/collection-iterator.js $(LOCAL_PATH)/v8/src/generator.js $(LOCAL_PATH)/v8/src/array-iterator.js $(LOCAL_PATH)/v8/src/harmony-string.js $(LOCAL_PATH)/v8/src/harmony-array.js $(LOCAL_PATH)/v8/src/harmony-math.js $(GYP_TARGET_DEPENDENCIES)
+$(gyp_shared_intermediate_dir)/experimental-libraries.cc: $(LOCAL_PATH)/v8/tools/js2c.py $(LOCAL_PATH)/v8/src/macros.py $(LOCAL_PATH)/v8/src/proxy.js $(LOCAL_PATH)/v8/src/generator.js $(LOCAL_PATH)/v8/src/harmony-string.js $(LOCAL_PATH)/v8/src/harmony-array.js $(LOCAL_PATH)/v8/src/harmony-classes.js $(GYP_TARGET_DEPENDENCIES)
 	@echo "Gyp action: v8_tools_gyp_v8_gyp_js2c_host_js2c_experimental ($@)"
-	$(hide)cd $(gyp_local_path)/v8/tools/gyp; mkdir -p $(gyp_shared_intermediate_dir); python ../../tools/js2c.py "$(gyp_shared_intermediate_dir)/experimental-libraries.cc" EXPERIMENTAL off ../../src/macros.py ../../src/symbol.js ../../src/proxy.js ../../src/collection.js ../../src/collection-iterator.js ../../src/generator.js ../../src/array-iterator.js ../../src/harmony-string.js ../../src/harmony-array.js ../../src/harmony-math.js
+	$(hide)cd $(gyp_local_path)/v8/tools/gyp; mkdir -p $(gyp_shared_intermediate_dir); python ../../tools/js2c.py "$(gyp_shared_intermediate_dir)/experimental-libraries.cc" EXPERIMENTAL off ../../src/macros.py ../../src/proxy.js ../../src/generator.js ../../src/harmony-string.js ../../src/harmony-array.js ../../src/harmony-classes.js
 
 
 
@@ -46,6 +45,9 @@
 LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) $(GYP_GENERATED_OUTPUTS)
 
 ### Rules for final target.
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
+
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
 gyp_all_modules: v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
diff --git a/tools/gyp/js2c.host.darwin-mips.mk b/tools/gyp/js2c.host.darwin-mips.mk
index 0a2b5a8..27f05b1 100644
--- a/tools/gyp/js2c.host.darwin-mips.mk
+++ b/tools/gyp/js2c.host.darwin-mips.mk
@@ -6,7 +6,6 @@
 LOCAL_MODULE := v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
 LOCAL_MODULE_STEM := js2c
 LOCAL_MODULE_SUFFIX := .stamp
-LOCAL_MODULE_TAGS := optional
 LOCAL_IS_HOST_MODULE := true
 LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
@@ -21,9 +20,9 @@
 $(gyp_shared_intermediate_dir)/libraries.cc: gyp_intermediate_dir := $(abspath $(gyp_intermediate_dir))
 $(gyp_shared_intermediate_dir)/libraries.cc: gyp_shared_intermediate_dir := $(abspath $(gyp_shared_intermediate_dir))
 $(gyp_shared_intermediate_dir)/libraries.cc: export PATH := $(subst $(ANDROID_BUILD_PATHS),,$(PATH))
-$(gyp_shared_intermediate_dir)/libraries.cc: $(LOCAL_PATH)/v8/tools/js2c.py $(LOCAL_PATH)/v8/src/runtime.js $(LOCAL_PATH)/v8/src/v8natives.js $(LOCAL_PATH)/v8/src/array.js $(LOCAL_PATH)/v8/src/string.js $(LOCAL_PATH)/v8/src/uri.js $(LOCAL_PATH)/v8/src/math.js $(LOCAL_PATH)/v8/src/messages.js $(LOCAL_PATH)/v8/src/apinatives.js $(LOCAL_PATH)/v8/src/debug-debugger.js $(LOCAL_PATH)/v8/src/mirror-debugger.js $(LOCAL_PATH)/v8/src/liveedit-debugger.js $(LOCAL_PATH)/v8/src/date.js $(LOCAL_PATH)/v8/src/json.js $(LOCAL_PATH)/v8/src/regexp.js $(LOCAL_PATH)/v8/src/arraybuffer.js $(LOCAL_PATH)/v8/src/typedarray.js $(LOCAL_PATH)/v8/src/weak_collection.js $(LOCAL_PATH)/v8/src/promise.js $(LOCAL_PATH)/v8/src/object-observe.js $(LOCAL_PATH)/v8/src/macros.py $(LOCAL_PATH)/v8/src/i18n.js $(GYP_TARGET_DEPENDENCIES)
+$(gyp_shared_intermediate_dir)/libraries.cc: $(LOCAL_PATH)/v8/tools/js2c.py $(LOCAL_PATH)/v8/src/runtime.js $(LOCAL_PATH)/v8/src/v8natives.js $(LOCAL_PATH)/v8/src/symbol.js $(LOCAL_PATH)/v8/src/array.js $(LOCAL_PATH)/v8/src/string.js $(LOCAL_PATH)/v8/src/uri.js $(LOCAL_PATH)/v8/third_party/fdlibm/fdlibm.js $(LOCAL_PATH)/v8/src/math.js $(LOCAL_PATH)/v8/src/apinatives.js $(LOCAL_PATH)/v8/src/date.js $(LOCAL_PATH)/v8/src/regexp.js $(LOCAL_PATH)/v8/src/arraybuffer.js $(LOCAL_PATH)/v8/src/typedarray.js $(LOCAL_PATH)/v8/src/generator.js $(LOCAL_PATH)/v8/src/object-observe.js $(LOCAL_PATH)/v8/src/collection.js $(LOCAL_PATH)/v8/src/weak-collection.js $(LOCAL_PATH)/v8/src/collection-iterator.js $(LOCAL_PATH)/v8/src/promise.js $(LOCAL_PATH)/v8/src/messages.js $(LOCAL_PATH)/v8/src/json.js $(LOCAL_PATH)/v8/src/array-iterator.js $(LOCAL_PATH)/v8/src/string-iterator.js $(LOCAL_PATH)/v8/src/debug-debugger.js $(LOCAL_PATH)/v8/src/mirror-debugger.js $(LOCAL_PATH)/v8/src/liveedit-debugger.js $(LOCAL_PATH)/v8/src/macros.py $(LOCAL_PATH)/v8/src/i18n.js $(GYP_TARGET_DEPENDENCIES)
 	@echo "Gyp action: v8_tools_gyp_v8_gyp_js2c_host_js2c ($@)"
-	$(hide)cd $(gyp_local_path)/v8/tools/gyp; mkdir -p $(gyp_shared_intermediate_dir); python ../../tools/js2c.py "$(gyp_shared_intermediate_dir)/libraries.cc" CORE off ../../src/runtime.js ../../src/v8natives.js ../../src/array.js ../../src/string.js ../../src/uri.js ../../src/math.js ../../src/messages.js ../../src/apinatives.js ../../src/debug-debugger.js ../../src/mirror-debugger.js ../../src/liveedit-debugger.js ../../src/date.js ../../src/json.js ../../src/regexp.js ../../src/arraybuffer.js ../../src/typedarray.js ../../src/weak_collection.js ../../src/promise.js ../../src/object-observe.js ../../src/macros.py ../../src/i18n.js
+	$(hide)cd $(gyp_local_path)/v8/tools/gyp; mkdir -p $(gyp_shared_intermediate_dir); python ../../tools/js2c.py "$(gyp_shared_intermediate_dir)/libraries.cc" CORE off ../../src/runtime.js ../../src/v8natives.js ../../src/symbol.js ../../src/array.js ../../src/string.js ../../src/uri.js ../../third_party/fdlibm/fdlibm.js ../../src/math.js ../../src/apinatives.js ../../src/date.js ../../src/regexp.js ../../src/arraybuffer.js ../../src/typedarray.js ../../src/generator.js ../../src/object-observe.js ../../src/collection.js ../../src/weak-collection.js ../../src/collection-iterator.js ../../src/promise.js ../../src/messages.js ../../src/json.js ../../src/array-iterator.js ../../src/string-iterator.js ../../src/debug-debugger.js ../../src/mirror-debugger.js ../../src/liveedit-debugger.js ../../src/macros.py ../../src/i18n.js
 
 
 ### Rules for action "js2c_experimental":
@@ -32,9 +31,9 @@
 $(gyp_shared_intermediate_dir)/experimental-libraries.cc: gyp_intermediate_dir := $(abspath $(gyp_intermediate_dir))
 $(gyp_shared_intermediate_dir)/experimental-libraries.cc: gyp_shared_intermediate_dir := $(abspath $(gyp_shared_intermediate_dir))
 $(gyp_shared_intermediate_dir)/experimental-libraries.cc: export PATH := $(subst $(ANDROID_BUILD_PATHS),,$(PATH))
-$(gyp_shared_intermediate_dir)/experimental-libraries.cc: $(LOCAL_PATH)/v8/tools/js2c.py $(LOCAL_PATH)/v8/src/macros.py $(LOCAL_PATH)/v8/src/symbol.js $(LOCAL_PATH)/v8/src/proxy.js $(LOCAL_PATH)/v8/src/collection.js $(LOCAL_PATH)/v8/src/collection-iterator.js $(LOCAL_PATH)/v8/src/generator.js $(LOCAL_PATH)/v8/src/array-iterator.js $(LOCAL_PATH)/v8/src/harmony-string.js $(LOCAL_PATH)/v8/src/harmony-array.js $(LOCAL_PATH)/v8/src/harmony-math.js $(GYP_TARGET_DEPENDENCIES)
+$(gyp_shared_intermediate_dir)/experimental-libraries.cc: $(LOCAL_PATH)/v8/tools/js2c.py $(LOCAL_PATH)/v8/src/macros.py $(LOCAL_PATH)/v8/src/proxy.js $(LOCAL_PATH)/v8/src/generator.js $(LOCAL_PATH)/v8/src/harmony-string.js $(LOCAL_PATH)/v8/src/harmony-array.js $(LOCAL_PATH)/v8/src/harmony-classes.js $(GYP_TARGET_DEPENDENCIES)
 	@echo "Gyp action: v8_tools_gyp_v8_gyp_js2c_host_js2c_experimental ($@)"
-	$(hide)cd $(gyp_local_path)/v8/tools/gyp; mkdir -p $(gyp_shared_intermediate_dir); python ../../tools/js2c.py "$(gyp_shared_intermediate_dir)/experimental-libraries.cc" EXPERIMENTAL off ../../src/macros.py ../../src/symbol.js ../../src/proxy.js ../../src/collection.js ../../src/collection-iterator.js ../../src/generator.js ../../src/array-iterator.js ../../src/harmony-string.js ../../src/harmony-array.js ../../src/harmony-math.js
+	$(hide)cd $(gyp_local_path)/v8/tools/gyp; mkdir -p $(gyp_shared_intermediate_dir); python ../../tools/js2c.py "$(gyp_shared_intermediate_dir)/experimental-libraries.cc" EXPERIMENTAL off ../../src/macros.py ../../src/proxy.js ../../src/generator.js ../../src/harmony-string.js ../../src/harmony-array.js ../../src/harmony-classes.js
 
 
 
@@ -46,6 +45,9 @@
 LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) $(GYP_GENERATED_OUTPUTS)
 
 ### Rules for final target.
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
+
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
 gyp_all_modules: v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
diff --git a/tools/gyp/js2c.host.darwin-mips64.mk b/tools/gyp/js2c.host.darwin-mips64.mk
new file mode 100644
index 0000000..27f05b1
--- /dev/null
+++ b/tools/gyp/js2c.host.darwin-mips64.mk
@@ -0,0 +1,70 @@
+# This file is generated by gyp; do not edit.
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE_CLASS := GYP
+LOCAL_MODULE := v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+LOCAL_MODULE_STEM := js2c
+LOCAL_MODULE_SUFFIX := .stamp
+LOCAL_IS_HOST_MODULE := true
+LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
+gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
+gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
+
+# Make sure our deps are built first.
+GYP_TARGET_DEPENDENCIES :=
+
+### Rules for action "js2c":
+$(gyp_shared_intermediate_dir)/libraries.cc: gyp_local_path := $(LOCAL_PATH)
+$(gyp_shared_intermediate_dir)/libraries.cc: gyp_var_prefix := $(GYP_VAR_PREFIX)
+$(gyp_shared_intermediate_dir)/libraries.cc: gyp_intermediate_dir := $(abspath $(gyp_intermediate_dir))
+$(gyp_shared_intermediate_dir)/libraries.cc: gyp_shared_intermediate_dir := $(abspath $(gyp_shared_intermediate_dir))
+$(gyp_shared_intermediate_dir)/libraries.cc: export PATH := $(subst $(ANDROID_BUILD_PATHS),,$(PATH))
+$(gyp_shared_intermediate_dir)/libraries.cc: $(LOCAL_PATH)/v8/tools/js2c.py $(LOCAL_PATH)/v8/src/runtime.js $(LOCAL_PATH)/v8/src/v8natives.js $(LOCAL_PATH)/v8/src/symbol.js $(LOCAL_PATH)/v8/src/array.js $(LOCAL_PATH)/v8/src/string.js $(LOCAL_PATH)/v8/src/uri.js $(LOCAL_PATH)/v8/third_party/fdlibm/fdlibm.js $(LOCAL_PATH)/v8/src/math.js $(LOCAL_PATH)/v8/src/apinatives.js $(LOCAL_PATH)/v8/src/date.js $(LOCAL_PATH)/v8/src/regexp.js $(LOCAL_PATH)/v8/src/arraybuffer.js $(LOCAL_PATH)/v8/src/typedarray.js $(LOCAL_PATH)/v8/src/generator.js $(LOCAL_PATH)/v8/src/object-observe.js $(LOCAL_PATH)/v8/src/collection.js $(LOCAL_PATH)/v8/src/weak-collection.js $(LOCAL_PATH)/v8/src/collection-iterator.js $(LOCAL_PATH)/v8/src/promise.js $(LOCAL_PATH)/v8/src/messages.js $(LOCAL_PATH)/v8/src/json.js $(LOCAL_PATH)/v8/src/array-iterator.js $(LOCAL_PATH)/v8/src/string-iterator.js $(LOCAL_PATH)/v8/src/debug-debugger.js $(LOCAL_PATH)/v8/src/mirror-debugger.js $(LOCAL_PATH)/v8/src/liveedit-debugger.js $(LOCAL_PATH)/v8/src/macros.py $(LOCAL_PATH)/v8/src/i18n.js $(GYP_TARGET_DEPENDENCIES)
+	@echo "Gyp action: v8_tools_gyp_v8_gyp_js2c_host_js2c ($@)"
+	$(hide)cd $(gyp_local_path)/v8/tools/gyp; mkdir -p $(gyp_shared_intermediate_dir); python ../../tools/js2c.py "$(gyp_shared_intermediate_dir)/libraries.cc" CORE off ../../src/runtime.js ../../src/v8natives.js ../../src/symbol.js ../../src/array.js ../../src/string.js ../../src/uri.js ../../third_party/fdlibm/fdlibm.js ../../src/math.js ../../src/apinatives.js ../../src/date.js ../../src/regexp.js ../../src/arraybuffer.js ../../src/typedarray.js ../../src/generator.js ../../src/object-observe.js ../../src/collection.js ../../src/weak-collection.js ../../src/collection-iterator.js ../../src/promise.js ../../src/messages.js ../../src/json.js ../../src/array-iterator.js ../../src/string-iterator.js ../../src/debug-debugger.js ../../src/mirror-debugger.js ../../src/liveedit-debugger.js ../../src/macros.py ../../src/i18n.js
+
+
+### Rules for action "js2c_experimental":
+$(gyp_shared_intermediate_dir)/experimental-libraries.cc: gyp_local_path := $(LOCAL_PATH)
+$(gyp_shared_intermediate_dir)/experimental-libraries.cc: gyp_var_prefix := $(GYP_VAR_PREFIX)
+$(gyp_shared_intermediate_dir)/experimental-libraries.cc: gyp_intermediate_dir := $(abspath $(gyp_intermediate_dir))
+$(gyp_shared_intermediate_dir)/experimental-libraries.cc: gyp_shared_intermediate_dir := $(abspath $(gyp_shared_intermediate_dir))
+$(gyp_shared_intermediate_dir)/experimental-libraries.cc: export PATH := $(subst $(ANDROID_BUILD_PATHS),,$(PATH))
+$(gyp_shared_intermediate_dir)/experimental-libraries.cc: $(LOCAL_PATH)/v8/tools/js2c.py $(LOCAL_PATH)/v8/src/macros.py $(LOCAL_PATH)/v8/src/proxy.js $(LOCAL_PATH)/v8/src/generator.js $(LOCAL_PATH)/v8/src/harmony-string.js $(LOCAL_PATH)/v8/src/harmony-array.js $(LOCAL_PATH)/v8/src/harmony-classes.js $(GYP_TARGET_DEPENDENCIES)
+	@echo "Gyp action: v8_tools_gyp_v8_gyp_js2c_host_js2c_experimental ($@)"
+	$(hide)cd $(gyp_local_path)/v8/tools/gyp; mkdir -p $(gyp_shared_intermediate_dir); python ../../tools/js2c.py "$(gyp_shared_intermediate_dir)/experimental-libraries.cc" EXPERIMENTAL off ../../src/macros.py ../../src/proxy.js ../../src/generator.js ../../src/harmony-string.js ../../src/harmony-array.js ../../src/harmony-classes.js
+
+
+
+GYP_GENERATED_OUTPUTS := \
+	$(gyp_shared_intermediate_dir)/libraries.cc \
+	$(gyp_shared_intermediate_dir)/experimental-libraries.cc
+
+# Make sure our deps and generated files are built first.
+LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) $(GYP_GENERATED_OUTPUTS)
+
+### Rules for final target.
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
+
+# Add target alias to "gyp_all_modules" target.
+.PHONY: gyp_all_modules
+gyp_all_modules: v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+
+# Alias gyp target name.
+.PHONY: js2c
+js2c: v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+
+LOCAL_MODULE_PATH := $(PRODUCT_OUT)/gyp_stamp
+LOCAL_UNINSTALLABLE_MODULE := true
+LOCAL_2ND_ARCH_VAR_PREFIX := $(GYP_HOST_VAR_PREFIX)
+
+include $(BUILD_SYSTEM)/base_rules.mk
+
+$(LOCAL_BUILT_MODULE): $(LOCAL_ADDITIONAL_DEPENDENCIES)
+	$(hide) echo "Gyp timestamp: $@"
+	$(hide) mkdir -p $(dir $@)
+	$(hide) touch $@
+
+LOCAL_2ND_ARCH_VAR_PREFIX :=
diff --git a/tools/gyp/js2c.host.darwin-x86.mk b/tools/gyp/js2c.host.darwin-x86.mk
index 0a2b5a8..27f05b1 100644
--- a/tools/gyp/js2c.host.darwin-x86.mk
+++ b/tools/gyp/js2c.host.darwin-x86.mk
@@ -6,7 +6,6 @@
 LOCAL_MODULE := v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
 LOCAL_MODULE_STEM := js2c
 LOCAL_MODULE_SUFFIX := .stamp
-LOCAL_MODULE_TAGS := optional
 LOCAL_IS_HOST_MODULE := true
 LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
@@ -21,9 +20,9 @@
 $(gyp_shared_intermediate_dir)/libraries.cc: gyp_intermediate_dir := $(abspath $(gyp_intermediate_dir))
 $(gyp_shared_intermediate_dir)/libraries.cc: gyp_shared_intermediate_dir := $(abspath $(gyp_shared_intermediate_dir))
 $(gyp_shared_intermediate_dir)/libraries.cc: export PATH := $(subst $(ANDROID_BUILD_PATHS),,$(PATH))
-$(gyp_shared_intermediate_dir)/libraries.cc: $(LOCAL_PATH)/v8/tools/js2c.py $(LOCAL_PATH)/v8/src/runtime.js $(LOCAL_PATH)/v8/src/v8natives.js $(LOCAL_PATH)/v8/src/array.js $(LOCAL_PATH)/v8/src/string.js $(LOCAL_PATH)/v8/src/uri.js $(LOCAL_PATH)/v8/src/math.js $(LOCAL_PATH)/v8/src/messages.js $(LOCAL_PATH)/v8/src/apinatives.js $(LOCAL_PATH)/v8/src/debug-debugger.js $(LOCAL_PATH)/v8/src/mirror-debugger.js $(LOCAL_PATH)/v8/src/liveedit-debugger.js $(LOCAL_PATH)/v8/src/date.js $(LOCAL_PATH)/v8/src/json.js $(LOCAL_PATH)/v8/src/regexp.js $(LOCAL_PATH)/v8/src/arraybuffer.js $(LOCAL_PATH)/v8/src/typedarray.js $(LOCAL_PATH)/v8/src/weak_collection.js $(LOCAL_PATH)/v8/src/promise.js $(LOCAL_PATH)/v8/src/object-observe.js $(LOCAL_PATH)/v8/src/macros.py $(LOCAL_PATH)/v8/src/i18n.js $(GYP_TARGET_DEPENDENCIES)
+$(gyp_shared_intermediate_dir)/libraries.cc: $(LOCAL_PATH)/v8/tools/js2c.py $(LOCAL_PATH)/v8/src/runtime.js $(LOCAL_PATH)/v8/src/v8natives.js $(LOCAL_PATH)/v8/src/symbol.js $(LOCAL_PATH)/v8/src/array.js $(LOCAL_PATH)/v8/src/string.js $(LOCAL_PATH)/v8/src/uri.js $(LOCAL_PATH)/v8/third_party/fdlibm/fdlibm.js $(LOCAL_PATH)/v8/src/math.js $(LOCAL_PATH)/v8/src/apinatives.js $(LOCAL_PATH)/v8/src/date.js $(LOCAL_PATH)/v8/src/regexp.js $(LOCAL_PATH)/v8/src/arraybuffer.js $(LOCAL_PATH)/v8/src/typedarray.js $(LOCAL_PATH)/v8/src/generator.js $(LOCAL_PATH)/v8/src/object-observe.js $(LOCAL_PATH)/v8/src/collection.js $(LOCAL_PATH)/v8/src/weak-collection.js $(LOCAL_PATH)/v8/src/collection-iterator.js $(LOCAL_PATH)/v8/src/promise.js $(LOCAL_PATH)/v8/src/messages.js $(LOCAL_PATH)/v8/src/json.js $(LOCAL_PATH)/v8/src/array-iterator.js $(LOCAL_PATH)/v8/src/string-iterator.js $(LOCAL_PATH)/v8/src/debug-debugger.js $(LOCAL_PATH)/v8/src/mirror-debugger.js $(LOCAL_PATH)/v8/src/liveedit-debugger.js $(LOCAL_PATH)/v8/src/macros.py $(LOCAL_PATH)/v8/src/i18n.js $(GYP_TARGET_DEPENDENCIES)
 	@echo "Gyp action: v8_tools_gyp_v8_gyp_js2c_host_js2c ($@)"
-	$(hide)cd $(gyp_local_path)/v8/tools/gyp; mkdir -p $(gyp_shared_intermediate_dir); python ../../tools/js2c.py "$(gyp_shared_intermediate_dir)/libraries.cc" CORE off ../../src/runtime.js ../../src/v8natives.js ../../src/array.js ../../src/string.js ../../src/uri.js ../../src/math.js ../../src/messages.js ../../src/apinatives.js ../../src/debug-debugger.js ../../src/mirror-debugger.js ../../src/liveedit-debugger.js ../../src/date.js ../../src/json.js ../../src/regexp.js ../../src/arraybuffer.js ../../src/typedarray.js ../../src/weak_collection.js ../../src/promise.js ../../src/object-observe.js ../../src/macros.py ../../src/i18n.js
+	$(hide)cd $(gyp_local_path)/v8/tools/gyp; mkdir -p $(gyp_shared_intermediate_dir); python ../../tools/js2c.py "$(gyp_shared_intermediate_dir)/libraries.cc" CORE off ../../src/runtime.js ../../src/v8natives.js ../../src/symbol.js ../../src/array.js ../../src/string.js ../../src/uri.js ../../third_party/fdlibm/fdlibm.js ../../src/math.js ../../src/apinatives.js ../../src/date.js ../../src/regexp.js ../../src/arraybuffer.js ../../src/typedarray.js ../../src/generator.js ../../src/object-observe.js ../../src/collection.js ../../src/weak-collection.js ../../src/collection-iterator.js ../../src/promise.js ../../src/messages.js ../../src/json.js ../../src/array-iterator.js ../../src/string-iterator.js ../../src/debug-debugger.js ../../src/mirror-debugger.js ../../src/liveedit-debugger.js ../../src/macros.py ../../src/i18n.js
 
 
 ### Rules for action "js2c_experimental":
@@ -32,9 +31,9 @@
 $(gyp_shared_intermediate_dir)/experimental-libraries.cc: gyp_intermediate_dir := $(abspath $(gyp_intermediate_dir))
 $(gyp_shared_intermediate_dir)/experimental-libraries.cc: gyp_shared_intermediate_dir := $(abspath $(gyp_shared_intermediate_dir))
 $(gyp_shared_intermediate_dir)/experimental-libraries.cc: export PATH := $(subst $(ANDROID_BUILD_PATHS),,$(PATH))
-$(gyp_shared_intermediate_dir)/experimental-libraries.cc: $(LOCAL_PATH)/v8/tools/js2c.py $(LOCAL_PATH)/v8/src/macros.py $(LOCAL_PATH)/v8/src/symbol.js $(LOCAL_PATH)/v8/src/proxy.js $(LOCAL_PATH)/v8/src/collection.js $(LOCAL_PATH)/v8/src/collection-iterator.js $(LOCAL_PATH)/v8/src/generator.js $(LOCAL_PATH)/v8/src/array-iterator.js $(LOCAL_PATH)/v8/src/harmony-string.js $(LOCAL_PATH)/v8/src/harmony-array.js $(LOCAL_PATH)/v8/src/harmony-math.js $(GYP_TARGET_DEPENDENCIES)
+$(gyp_shared_intermediate_dir)/experimental-libraries.cc: $(LOCAL_PATH)/v8/tools/js2c.py $(LOCAL_PATH)/v8/src/macros.py $(LOCAL_PATH)/v8/src/proxy.js $(LOCAL_PATH)/v8/src/generator.js $(LOCAL_PATH)/v8/src/harmony-string.js $(LOCAL_PATH)/v8/src/harmony-array.js $(LOCAL_PATH)/v8/src/harmony-classes.js $(GYP_TARGET_DEPENDENCIES)
 	@echo "Gyp action: v8_tools_gyp_v8_gyp_js2c_host_js2c_experimental ($@)"
-	$(hide)cd $(gyp_local_path)/v8/tools/gyp; mkdir -p $(gyp_shared_intermediate_dir); python ../../tools/js2c.py "$(gyp_shared_intermediate_dir)/experimental-libraries.cc" EXPERIMENTAL off ../../src/macros.py ../../src/symbol.js ../../src/proxy.js ../../src/collection.js ../../src/collection-iterator.js ../../src/generator.js ../../src/array-iterator.js ../../src/harmony-string.js ../../src/harmony-array.js ../../src/harmony-math.js
+	$(hide)cd $(gyp_local_path)/v8/tools/gyp; mkdir -p $(gyp_shared_intermediate_dir); python ../../tools/js2c.py "$(gyp_shared_intermediate_dir)/experimental-libraries.cc" EXPERIMENTAL off ../../src/macros.py ../../src/proxy.js ../../src/generator.js ../../src/harmony-string.js ../../src/harmony-array.js ../../src/harmony-classes.js
 
 
 
@@ -46,6 +45,9 @@
 LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) $(GYP_GENERATED_OUTPUTS)
 
 ### Rules for final target.
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
+
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
 gyp_all_modules: v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
diff --git a/tools/gyp/js2c.host.darwin-x86_64.mk b/tools/gyp/js2c.host.darwin-x86_64.mk
index 0a2b5a8..27f05b1 100644
--- a/tools/gyp/js2c.host.darwin-x86_64.mk
+++ b/tools/gyp/js2c.host.darwin-x86_64.mk
@@ -6,7 +6,6 @@
 LOCAL_MODULE := v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
 LOCAL_MODULE_STEM := js2c
 LOCAL_MODULE_SUFFIX := .stamp
-LOCAL_MODULE_TAGS := optional
 LOCAL_IS_HOST_MODULE := true
 LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
@@ -21,9 +20,9 @@
 $(gyp_shared_intermediate_dir)/libraries.cc: gyp_intermediate_dir := $(abspath $(gyp_intermediate_dir))
 $(gyp_shared_intermediate_dir)/libraries.cc: gyp_shared_intermediate_dir := $(abspath $(gyp_shared_intermediate_dir))
 $(gyp_shared_intermediate_dir)/libraries.cc: export PATH := $(subst $(ANDROID_BUILD_PATHS),,$(PATH))
-$(gyp_shared_intermediate_dir)/libraries.cc: $(LOCAL_PATH)/v8/tools/js2c.py $(LOCAL_PATH)/v8/src/runtime.js $(LOCAL_PATH)/v8/src/v8natives.js $(LOCAL_PATH)/v8/src/array.js $(LOCAL_PATH)/v8/src/string.js $(LOCAL_PATH)/v8/src/uri.js $(LOCAL_PATH)/v8/src/math.js $(LOCAL_PATH)/v8/src/messages.js $(LOCAL_PATH)/v8/src/apinatives.js $(LOCAL_PATH)/v8/src/debug-debugger.js $(LOCAL_PATH)/v8/src/mirror-debugger.js $(LOCAL_PATH)/v8/src/liveedit-debugger.js $(LOCAL_PATH)/v8/src/date.js $(LOCAL_PATH)/v8/src/json.js $(LOCAL_PATH)/v8/src/regexp.js $(LOCAL_PATH)/v8/src/arraybuffer.js $(LOCAL_PATH)/v8/src/typedarray.js $(LOCAL_PATH)/v8/src/weak_collection.js $(LOCAL_PATH)/v8/src/promise.js $(LOCAL_PATH)/v8/src/object-observe.js $(LOCAL_PATH)/v8/src/macros.py $(LOCAL_PATH)/v8/src/i18n.js $(GYP_TARGET_DEPENDENCIES)
+$(gyp_shared_intermediate_dir)/libraries.cc: $(LOCAL_PATH)/v8/tools/js2c.py $(LOCAL_PATH)/v8/src/runtime.js $(LOCAL_PATH)/v8/src/v8natives.js $(LOCAL_PATH)/v8/src/symbol.js $(LOCAL_PATH)/v8/src/array.js $(LOCAL_PATH)/v8/src/string.js $(LOCAL_PATH)/v8/src/uri.js $(LOCAL_PATH)/v8/third_party/fdlibm/fdlibm.js $(LOCAL_PATH)/v8/src/math.js $(LOCAL_PATH)/v8/src/apinatives.js $(LOCAL_PATH)/v8/src/date.js $(LOCAL_PATH)/v8/src/regexp.js $(LOCAL_PATH)/v8/src/arraybuffer.js $(LOCAL_PATH)/v8/src/typedarray.js $(LOCAL_PATH)/v8/src/generator.js $(LOCAL_PATH)/v8/src/object-observe.js $(LOCAL_PATH)/v8/src/collection.js $(LOCAL_PATH)/v8/src/weak-collection.js $(LOCAL_PATH)/v8/src/collection-iterator.js $(LOCAL_PATH)/v8/src/promise.js $(LOCAL_PATH)/v8/src/messages.js $(LOCAL_PATH)/v8/src/json.js $(LOCAL_PATH)/v8/src/array-iterator.js $(LOCAL_PATH)/v8/src/string-iterator.js $(LOCAL_PATH)/v8/src/debug-debugger.js $(LOCAL_PATH)/v8/src/mirror-debugger.js $(LOCAL_PATH)/v8/src/liveedit-debugger.js $(LOCAL_PATH)/v8/src/macros.py $(LOCAL_PATH)/v8/src/i18n.js $(GYP_TARGET_DEPENDENCIES)
 	@echo "Gyp action: v8_tools_gyp_v8_gyp_js2c_host_js2c ($@)"
-	$(hide)cd $(gyp_local_path)/v8/tools/gyp; mkdir -p $(gyp_shared_intermediate_dir); python ../../tools/js2c.py "$(gyp_shared_intermediate_dir)/libraries.cc" CORE off ../../src/runtime.js ../../src/v8natives.js ../../src/array.js ../../src/string.js ../../src/uri.js ../../src/math.js ../../src/messages.js ../../src/apinatives.js ../../src/debug-debugger.js ../../src/mirror-debugger.js ../../src/liveedit-debugger.js ../../src/date.js ../../src/json.js ../../src/regexp.js ../../src/arraybuffer.js ../../src/typedarray.js ../../src/weak_collection.js ../../src/promise.js ../../src/object-observe.js ../../src/macros.py ../../src/i18n.js
+	$(hide)cd $(gyp_local_path)/v8/tools/gyp; mkdir -p $(gyp_shared_intermediate_dir); python ../../tools/js2c.py "$(gyp_shared_intermediate_dir)/libraries.cc" CORE off ../../src/runtime.js ../../src/v8natives.js ../../src/symbol.js ../../src/array.js ../../src/string.js ../../src/uri.js ../../third_party/fdlibm/fdlibm.js ../../src/math.js ../../src/apinatives.js ../../src/date.js ../../src/regexp.js ../../src/arraybuffer.js ../../src/typedarray.js ../../src/generator.js ../../src/object-observe.js ../../src/collection.js ../../src/weak-collection.js ../../src/collection-iterator.js ../../src/promise.js ../../src/messages.js ../../src/json.js ../../src/array-iterator.js ../../src/string-iterator.js ../../src/debug-debugger.js ../../src/mirror-debugger.js ../../src/liveedit-debugger.js ../../src/macros.py ../../src/i18n.js
 
 
 ### Rules for action "js2c_experimental":
@@ -32,9 +31,9 @@
 $(gyp_shared_intermediate_dir)/experimental-libraries.cc: gyp_intermediate_dir := $(abspath $(gyp_intermediate_dir))
 $(gyp_shared_intermediate_dir)/experimental-libraries.cc: gyp_shared_intermediate_dir := $(abspath $(gyp_shared_intermediate_dir))
 $(gyp_shared_intermediate_dir)/experimental-libraries.cc: export PATH := $(subst $(ANDROID_BUILD_PATHS),,$(PATH))
-$(gyp_shared_intermediate_dir)/experimental-libraries.cc: $(LOCAL_PATH)/v8/tools/js2c.py $(LOCAL_PATH)/v8/src/macros.py $(LOCAL_PATH)/v8/src/symbol.js $(LOCAL_PATH)/v8/src/proxy.js $(LOCAL_PATH)/v8/src/collection.js $(LOCAL_PATH)/v8/src/collection-iterator.js $(LOCAL_PATH)/v8/src/generator.js $(LOCAL_PATH)/v8/src/array-iterator.js $(LOCAL_PATH)/v8/src/harmony-string.js $(LOCAL_PATH)/v8/src/harmony-array.js $(LOCAL_PATH)/v8/src/harmony-math.js $(GYP_TARGET_DEPENDENCIES)
+$(gyp_shared_intermediate_dir)/experimental-libraries.cc: $(LOCAL_PATH)/v8/tools/js2c.py $(LOCAL_PATH)/v8/src/macros.py $(LOCAL_PATH)/v8/src/proxy.js $(LOCAL_PATH)/v8/src/generator.js $(LOCAL_PATH)/v8/src/harmony-string.js $(LOCAL_PATH)/v8/src/harmony-array.js $(LOCAL_PATH)/v8/src/harmony-classes.js $(GYP_TARGET_DEPENDENCIES)
 	@echo "Gyp action: v8_tools_gyp_v8_gyp_js2c_host_js2c_experimental ($@)"
-	$(hide)cd $(gyp_local_path)/v8/tools/gyp; mkdir -p $(gyp_shared_intermediate_dir); python ../../tools/js2c.py "$(gyp_shared_intermediate_dir)/experimental-libraries.cc" EXPERIMENTAL off ../../src/macros.py ../../src/symbol.js ../../src/proxy.js ../../src/collection.js ../../src/collection-iterator.js ../../src/generator.js ../../src/array-iterator.js ../../src/harmony-string.js ../../src/harmony-array.js ../../src/harmony-math.js
+	$(hide)cd $(gyp_local_path)/v8/tools/gyp; mkdir -p $(gyp_shared_intermediate_dir); python ../../tools/js2c.py "$(gyp_shared_intermediate_dir)/experimental-libraries.cc" EXPERIMENTAL off ../../src/macros.py ../../src/proxy.js ../../src/generator.js ../../src/harmony-string.js ../../src/harmony-array.js ../../src/harmony-classes.js
 
 
 
@@ -46,6 +45,9 @@
 LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) $(GYP_GENERATED_OUTPUTS)
 
 ### Rules for final target.
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
+
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
 gyp_all_modules: v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
diff --git a/tools/gyp/js2c.host.linux-arm.mk b/tools/gyp/js2c.host.linux-arm.mk
index 0a2b5a8..27f05b1 100644
--- a/tools/gyp/js2c.host.linux-arm.mk
+++ b/tools/gyp/js2c.host.linux-arm.mk
@@ -6,7 +6,6 @@
 LOCAL_MODULE := v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
 LOCAL_MODULE_STEM := js2c
 LOCAL_MODULE_SUFFIX := .stamp
-LOCAL_MODULE_TAGS := optional
 LOCAL_IS_HOST_MODULE := true
 LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
@@ -21,9 +20,9 @@
 $(gyp_shared_intermediate_dir)/libraries.cc: gyp_intermediate_dir := $(abspath $(gyp_intermediate_dir))
 $(gyp_shared_intermediate_dir)/libraries.cc: gyp_shared_intermediate_dir := $(abspath $(gyp_shared_intermediate_dir))
 $(gyp_shared_intermediate_dir)/libraries.cc: export PATH := $(subst $(ANDROID_BUILD_PATHS),,$(PATH))
-$(gyp_shared_intermediate_dir)/libraries.cc: $(LOCAL_PATH)/v8/tools/js2c.py $(LOCAL_PATH)/v8/src/runtime.js $(LOCAL_PATH)/v8/src/v8natives.js $(LOCAL_PATH)/v8/src/array.js $(LOCAL_PATH)/v8/src/string.js $(LOCAL_PATH)/v8/src/uri.js $(LOCAL_PATH)/v8/src/math.js $(LOCAL_PATH)/v8/src/messages.js $(LOCAL_PATH)/v8/src/apinatives.js $(LOCAL_PATH)/v8/src/debug-debugger.js $(LOCAL_PATH)/v8/src/mirror-debugger.js $(LOCAL_PATH)/v8/src/liveedit-debugger.js $(LOCAL_PATH)/v8/src/date.js $(LOCAL_PATH)/v8/src/json.js $(LOCAL_PATH)/v8/src/regexp.js $(LOCAL_PATH)/v8/src/arraybuffer.js $(LOCAL_PATH)/v8/src/typedarray.js $(LOCAL_PATH)/v8/src/weak_collection.js $(LOCAL_PATH)/v8/src/promise.js $(LOCAL_PATH)/v8/src/object-observe.js $(LOCAL_PATH)/v8/src/macros.py $(LOCAL_PATH)/v8/src/i18n.js $(GYP_TARGET_DEPENDENCIES)
+$(gyp_shared_intermediate_dir)/libraries.cc: $(LOCAL_PATH)/v8/tools/js2c.py $(LOCAL_PATH)/v8/src/runtime.js $(LOCAL_PATH)/v8/src/v8natives.js $(LOCAL_PATH)/v8/src/symbol.js $(LOCAL_PATH)/v8/src/array.js $(LOCAL_PATH)/v8/src/string.js $(LOCAL_PATH)/v8/src/uri.js $(LOCAL_PATH)/v8/third_party/fdlibm/fdlibm.js $(LOCAL_PATH)/v8/src/math.js $(LOCAL_PATH)/v8/src/apinatives.js $(LOCAL_PATH)/v8/src/date.js $(LOCAL_PATH)/v8/src/regexp.js $(LOCAL_PATH)/v8/src/arraybuffer.js $(LOCAL_PATH)/v8/src/typedarray.js $(LOCAL_PATH)/v8/src/generator.js $(LOCAL_PATH)/v8/src/object-observe.js $(LOCAL_PATH)/v8/src/collection.js $(LOCAL_PATH)/v8/src/weak-collection.js $(LOCAL_PATH)/v8/src/collection-iterator.js $(LOCAL_PATH)/v8/src/promise.js $(LOCAL_PATH)/v8/src/messages.js $(LOCAL_PATH)/v8/src/json.js $(LOCAL_PATH)/v8/src/array-iterator.js $(LOCAL_PATH)/v8/src/string-iterator.js $(LOCAL_PATH)/v8/src/debug-debugger.js $(LOCAL_PATH)/v8/src/mirror-debugger.js $(LOCAL_PATH)/v8/src/liveedit-debugger.js $(LOCAL_PATH)/v8/src/macros.py $(LOCAL_PATH)/v8/src/i18n.js $(GYP_TARGET_DEPENDENCIES)
 	@echo "Gyp action: v8_tools_gyp_v8_gyp_js2c_host_js2c ($@)"
-	$(hide)cd $(gyp_local_path)/v8/tools/gyp; mkdir -p $(gyp_shared_intermediate_dir); python ../../tools/js2c.py "$(gyp_shared_intermediate_dir)/libraries.cc" CORE off ../../src/runtime.js ../../src/v8natives.js ../../src/array.js ../../src/string.js ../../src/uri.js ../../src/math.js ../../src/messages.js ../../src/apinatives.js ../../src/debug-debugger.js ../../src/mirror-debugger.js ../../src/liveedit-debugger.js ../../src/date.js ../../src/json.js ../../src/regexp.js ../../src/arraybuffer.js ../../src/typedarray.js ../../src/weak_collection.js ../../src/promise.js ../../src/object-observe.js ../../src/macros.py ../../src/i18n.js
+	$(hide)cd $(gyp_local_path)/v8/tools/gyp; mkdir -p $(gyp_shared_intermediate_dir); python ../../tools/js2c.py "$(gyp_shared_intermediate_dir)/libraries.cc" CORE off ../../src/runtime.js ../../src/v8natives.js ../../src/symbol.js ../../src/array.js ../../src/string.js ../../src/uri.js ../../third_party/fdlibm/fdlibm.js ../../src/math.js ../../src/apinatives.js ../../src/date.js ../../src/regexp.js ../../src/arraybuffer.js ../../src/typedarray.js ../../src/generator.js ../../src/object-observe.js ../../src/collection.js ../../src/weak-collection.js ../../src/collection-iterator.js ../../src/promise.js ../../src/messages.js ../../src/json.js ../../src/array-iterator.js ../../src/string-iterator.js ../../src/debug-debugger.js ../../src/mirror-debugger.js ../../src/liveedit-debugger.js ../../src/macros.py ../../src/i18n.js
 
 
 ### Rules for action "js2c_experimental":
@@ -32,9 +31,9 @@
 $(gyp_shared_intermediate_dir)/experimental-libraries.cc: gyp_intermediate_dir := $(abspath $(gyp_intermediate_dir))
 $(gyp_shared_intermediate_dir)/experimental-libraries.cc: gyp_shared_intermediate_dir := $(abspath $(gyp_shared_intermediate_dir))
 $(gyp_shared_intermediate_dir)/experimental-libraries.cc: export PATH := $(subst $(ANDROID_BUILD_PATHS),,$(PATH))
-$(gyp_shared_intermediate_dir)/experimental-libraries.cc: $(LOCAL_PATH)/v8/tools/js2c.py $(LOCAL_PATH)/v8/src/macros.py $(LOCAL_PATH)/v8/src/symbol.js $(LOCAL_PATH)/v8/src/proxy.js $(LOCAL_PATH)/v8/src/collection.js $(LOCAL_PATH)/v8/src/collection-iterator.js $(LOCAL_PATH)/v8/src/generator.js $(LOCAL_PATH)/v8/src/array-iterator.js $(LOCAL_PATH)/v8/src/harmony-string.js $(LOCAL_PATH)/v8/src/harmony-array.js $(LOCAL_PATH)/v8/src/harmony-math.js $(GYP_TARGET_DEPENDENCIES)
+$(gyp_shared_intermediate_dir)/experimental-libraries.cc: $(LOCAL_PATH)/v8/tools/js2c.py $(LOCAL_PATH)/v8/src/macros.py $(LOCAL_PATH)/v8/src/proxy.js $(LOCAL_PATH)/v8/src/generator.js $(LOCAL_PATH)/v8/src/harmony-string.js $(LOCAL_PATH)/v8/src/harmony-array.js $(LOCAL_PATH)/v8/src/harmony-classes.js $(GYP_TARGET_DEPENDENCIES)
 	@echo "Gyp action: v8_tools_gyp_v8_gyp_js2c_host_js2c_experimental ($@)"
-	$(hide)cd $(gyp_local_path)/v8/tools/gyp; mkdir -p $(gyp_shared_intermediate_dir); python ../../tools/js2c.py "$(gyp_shared_intermediate_dir)/experimental-libraries.cc" EXPERIMENTAL off ../../src/macros.py ../../src/symbol.js ../../src/proxy.js ../../src/collection.js ../../src/collection-iterator.js ../../src/generator.js ../../src/array-iterator.js ../../src/harmony-string.js ../../src/harmony-array.js ../../src/harmony-math.js
+	$(hide)cd $(gyp_local_path)/v8/tools/gyp; mkdir -p $(gyp_shared_intermediate_dir); python ../../tools/js2c.py "$(gyp_shared_intermediate_dir)/experimental-libraries.cc" EXPERIMENTAL off ../../src/macros.py ../../src/proxy.js ../../src/generator.js ../../src/harmony-string.js ../../src/harmony-array.js ../../src/harmony-classes.js
 
 
 
@@ -46,6 +45,9 @@
 LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) $(GYP_GENERATED_OUTPUTS)
 
 ### Rules for final target.
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
+
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
 gyp_all_modules: v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
diff --git a/tools/gyp/js2c.host.linux-arm64.mk b/tools/gyp/js2c.host.linux-arm64.mk
index 0a2b5a8..27f05b1 100644
--- a/tools/gyp/js2c.host.linux-arm64.mk
+++ b/tools/gyp/js2c.host.linux-arm64.mk
@@ -6,7 +6,6 @@
 LOCAL_MODULE := v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
 LOCAL_MODULE_STEM := js2c
 LOCAL_MODULE_SUFFIX := .stamp
-LOCAL_MODULE_TAGS := optional
 LOCAL_IS_HOST_MODULE := true
 LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
@@ -21,9 +20,9 @@
 $(gyp_shared_intermediate_dir)/libraries.cc: gyp_intermediate_dir := $(abspath $(gyp_intermediate_dir))
 $(gyp_shared_intermediate_dir)/libraries.cc: gyp_shared_intermediate_dir := $(abspath $(gyp_shared_intermediate_dir))
 $(gyp_shared_intermediate_dir)/libraries.cc: export PATH := $(subst $(ANDROID_BUILD_PATHS),,$(PATH))
-$(gyp_shared_intermediate_dir)/libraries.cc: $(LOCAL_PATH)/v8/tools/js2c.py $(LOCAL_PATH)/v8/src/runtime.js $(LOCAL_PATH)/v8/src/v8natives.js $(LOCAL_PATH)/v8/src/array.js $(LOCAL_PATH)/v8/src/string.js $(LOCAL_PATH)/v8/src/uri.js $(LOCAL_PATH)/v8/src/math.js $(LOCAL_PATH)/v8/src/messages.js $(LOCAL_PATH)/v8/src/apinatives.js $(LOCAL_PATH)/v8/src/debug-debugger.js $(LOCAL_PATH)/v8/src/mirror-debugger.js $(LOCAL_PATH)/v8/src/liveedit-debugger.js $(LOCAL_PATH)/v8/src/date.js $(LOCAL_PATH)/v8/src/json.js $(LOCAL_PATH)/v8/src/regexp.js $(LOCAL_PATH)/v8/src/arraybuffer.js $(LOCAL_PATH)/v8/src/typedarray.js $(LOCAL_PATH)/v8/src/weak_collection.js $(LOCAL_PATH)/v8/src/promise.js $(LOCAL_PATH)/v8/src/object-observe.js $(LOCAL_PATH)/v8/src/macros.py $(LOCAL_PATH)/v8/src/i18n.js $(GYP_TARGET_DEPENDENCIES)
+$(gyp_shared_intermediate_dir)/libraries.cc: $(LOCAL_PATH)/v8/tools/js2c.py $(LOCAL_PATH)/v8/src/runtime.js $(LOCAL_PATH)/v8/src/v8natives.js $(LOCAL_PATH)/v8/src/symbol.js $(LOCAL_PATH)/v8/src/array.js $(LOCAL_PATH)/v8/src/string.js $(LOCAL_PATH)/v8/src/uri.js $(LOCAL_PATH)/v8/third_party/fdlibm/fdlibm.js $(LOCAL_PATH)/v8/src/math.js $(LOCAL_PATH)/v8/src/apinatives.js $(LOCAL_PATH)/v8/src/date.js $(LOCAL_PATH)/v8/src/regexp.js $(LOCAL_PATH)/v8/src/arraybuffer.js $(LOCAL_PATH)/v8/src/typedarray.js $(LOCAL_PATH)/v8/src/generator.js $(LOCAL_PATH)/v8/src/object-observe.js $(LOCAL_PATH)/v8/src/collection.js $(LOCAL_PATH)/v8/src/weak-collection.js $(LOCAL_PATH)/v8/src/collection-iterator.js $(LOCAL_PATH)/v8/src/promise.js $(LOCAL_PATH)/v8/src/messages.js $(LOCAL_PATH)/v8/src/json.js $(LOCAL_PATH)/v8/src/array-iterator.js $(LOCAL_PATH)/v8/src/string-iterator.js $(LOCAL_PATH)/v8/src/debug-debugger.js $(LOCAL_PATH)/v8/src/mirror-debugger.js $(LOCAL_PATH)/v8/src/liveedit-debugger.js $(LOCAL_PATH)/v8/src/macros.py $(LOCAL_PATH)/v8/src/i18n.js $(GYP_TARGET_DEPENDENCIES)
 	@echo "Gyp action: v8_tools_gyp_v8_gyp_js2c_host_js2c ($@)"
-	$(hide)cd $(gyp_local_path)/v8/tools/gyp; mkdir -p $(gyp_shared_intermediate_dir); python ../../tools/js2c.py "$(gyp_shared_intermediate_dir)/libraries.cc" CORE off ../../src/runtime.js ../../src/v8natives.js ../../src/array.js ../../src/string.js ../../src/uri.js ../../src/math.js ../../src/messages.js ../../src/apinatives.js ../../src/debug-debugger.js ../../src/mirror-debugger.js ../../src/liveedit-debugger.js ../../src/date.js ../../src/json.js ../../src/regexp.js ../../src/arraybuffer.js ../../src/typedarray.js ../../src/weak_collection.js ../../src/promise.js ../../src/object-observe.js ../../src/macros.py ../../src/i18n.js
+	$(hide)cd $(gyp_local_path)/v8/tools/gyp; mkdir -p $(gyp_shared_intermediate_dir); python ../../tools/js2c.py "$(gyp_shared_intermediate_dir)/libraries.cc" CORE off ../../src/runtime.js ../../src/v8natives.js ../../src/symbol.js ../../src/array.js ../../src/string.js ../../src/uri.js ../../third_party/fdlibm/fdlibm.js ../../src/math.js ../../src/apinatives.js ../../src/date.js ../../src/regexp.js ../../src/arraybuffer.js ../../src/typedarray.js ../../src/generator.js ../../src/object-observe.js ../../src/collection.js ../../src/weak-collection.js ../../src/collection-iterator.js ../../src/promise.js ../../src/messages.js ../../src/json.js ../../src/array-iterator.js ../../src/string-iterator.js ../../src/debug-debugger.js ../../src/mirror-debugger.js ../../src/liveedit-debugger.js ../../src/macros.py ../../src/i18n.js
 
 
 ### Rules for action "js2c_experimental":
@@ -32,9 +31,9 @@
 $(gyp_shared_intermediate_dir)/experimental-libraries.cc: gyp_intermediate_dir := $(abspath $(gyp_intermediate_dir))
 $(gyp_shared_intermediate_dir)/experimental-libraries.cc: gyp_shared_intermediate_dir := $(abspath $(gyp_shared_intermediate_dir))
 $(gyp_shared_intermediate_dir)/experimental-libraries.cc: export PATH := $(subst $(ANDROID_BUILD_PATHS),,$(PATH))
-$(gyp_shared_intermediate_dir)/experimental-libraries.cc: $(LOCAL_PATH)/v8/tools/js2c.py $(LOCAL_PATH)/v8/src/macros.py $(LOCAL_PATH)/v8/src/symbol.js $(LOCAL_PATH)/v8/src/proxy.js $(LOCAL_PATH)/v8/src/collection.js $(LOCAL_PATH)/v8/src/collection-iterator.js $(LOCAL_PATH)/v8/src/generator.js $(LOCAL_PATH)/v8/src/array-iterator.js $(LOCAL_PATH)/v8/src/harmony-string.js $(LOCAL_PATH)/v8/src/harmony-array.js $(LOCAL_PATH)/v8/src/harmony-math.js $(GYP_TARGET_DEPENDENCIES)
+$(gyp_shared_intermediate_dir)/experimental-libraries.cc: $(LOCAL_PATH)/v8/tools/js2c.py $(LOCAL_PATH)/v8/src/macros.py $(LOCAL_PATH)/v8/src/proxy.js $(LOCAL_PATH)/v8/src/generator.js $(LOCAL_PATH)/v8/src/harmony-string.js $(LOCAL_PATH)/v8/src/harmony-array.js $(LOCAL_PATH)/v8/src/harmony-classes.js $(GYP_TARGET_DEPENDENCIES)
 	@echo "Gyp action: v8_tools_gyp_v8_gyp_js2c_host_js2c_experimental ($@)"
-	$(hide)cd $(gyp_local_path)/v8/tools/gyp; mkdir -p $(gyp_shared_intermediate_dir); python ../../tools/js2c.py "$(gyp_shared_intermediate_dir)/experimental-libraries.cc" EXPERIMENTAL off ../../src/macros.py ../../src/symbol.js ../../src/proxy.js ../../src/collection.js ../../src/collection-iterator.js ../../src/generator.js ../../src/array-iterator.js ../../src/harmony-string.js ../../src/harmony-array.js ../../src/harmony-math.js
+	$(hide)cd $(gyp_local_path)/v8/tools/gyp; mkdir -p $(gyp_shared_intermediate_dir); python ../../tools/js2c.py "$(gyp_shared_intermediate_dir)/experimental-libraries.cc" EXPERIMENTAL off ../../src/macros.py ../../src/proxy.js ../../src/generator.js ../../src/harmony-string.js ../../src/harmony-array.js ../../src/harmony-classes.js
 
 
 
@@ -46,6 +45,9 @@
 LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) $(GYP_GENERATED_OUTPUTS)
 
 ### Rules for final target.
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
+
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
 gyp_all_modules: v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
diff --git a/tools/gyp/js2c.host.linux-mips.mk b/tools/gyp/js2c.host.linux-mips.mk
index 0a2b5a8..27f05b1 100644
--- a/tools/gyp/js2c.host.linux-mips.mk
+++ b/tools/gyp/js2c.host.linux-mips.mk
@@ -6,7 +6,6 @@
 LOCAL_MODULE := v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
 LOCAL_MODULE_STEM := js2c
 LOCAL_MODULE_SUFFIX := .stamp
-LOCAL_MODULE_TAGS := optional
 LOCAL_IS_HOST_MODULE := true
 LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
@@ -21,9 +20,9 @@
 $(gyp_shared_intermediate_dir)/libraries.cc: gyp_intermediate_dir := $(abspath $(gyp_intermediate_dir))
 $(gyp_shared_intermediate_dir)/libraries.cc: gyp_shared_intermediate_dir := $(abspath $(gyp_shared_intermediate_dir))
 $(gyp_shared_intermediate_dir)/libraries.cc: export PATH := $(subst $(ANDROID_BUILD_PATHS),,$(PATH))
-$(gyp_shared_intermediate_dir)/libraries.cc: $(LOCAL_PATH)/v8/tools/js2c.py $(LOCAL_PATH)/v8/src/runtime.js $(LOCAL_PATH)/v8/src/v8natives.js $(LOCAL_PATH)/v8/src/array.js $(LOCAL_PATH)/v8/src/string.js $(LOCAL_PATH)/v8/src/uri.js $(LOCAL_PATH)/v8/src/math.js $(LOCAL_PATH)/v8/src/messages.js $(LOCAL_PATH)/v8/src/apinatives.js $(LOCAL_PATH)/v8/src/debug-debugger.js $(LOCAL_PATH)/v8/src/mirror-debugger.js $(LOCAL_PATH)/v8/src/liveedit-debugger.js $(LOCAL_PATH)/v8/src/date.js $(LOCAL_PATH)/v8/src/json.js $(LOCAL_PATH)/v8/src/regexp.js $(LOCAL_PATH)/v8/src/arraybuffer.js $(LOCAL_PATH)/v8/src/typedarray.js $(LOCAL_PATH)/v8/src/weak_collection.js $(LOCAL_PATH)/v8/src/promise.js $(LOCAL_PATH)/v8/src/object-observe.js $(LOCAL_PATH)/v8/src/macros.py $(LOCAL_PATH)/v8/src/i18n.js $(GYP_TARGET_DEPENDENCIES)
+$(gyp_shared_intermediate_dir)/libraries.cc: $(LOCAL_PATH)/v8/tools/js2c.py $(LOCAL_PATH)/v8/src/runtime.js $(LOCAL_PATH)/v8/src/v8natives.js $(LOCAL_PATH)/v8/src/symbol.js $(LOCAL_PATH)/v8/src/array.js $(LOCAL_PATH)/v8/src/string.js $(LOCAL_PATH)/v8/src/uri.js $(LOCAL_PATH)/v8/third_party/fdlibm/fdlibm.js $(LOCAL_PATH)/v8/src/math.js $(LOCAL_PATH)/v8/src/apinatives.js $(LOCAL_PATH)/v8/src/date.js $(LOCAL_PATH)/v8/src/regexp.js $(LOCAL_PATH)/v8/src/arraybuffer.js $(LOCAL_PATH)/v8/src/typedarray.js $(LOCAL_PATH)/v8/src/generator.js $(LOCAL_PATH)/v8/src/object-observe.js $(LOCAL_PATH)/v8/src/collection.js $(LOCAL_PATH)/v8/src/weak-collection.js $(LOCAL_PATH)/v8/src/collection-iterator.js $(LOCAL_PATH)/v8/src/promise.js $(LOCAL_PATH)/v8/src/messages.js $(LOCAL_PATH)/v8/src/json.js $(LOCAL_PATH)/v8/src/array-iterator.js $(LOCAL_PATH)/v8/src/string-iterator.js $(LOCAL_PATH)/v8/src/debug-debugger.js $(LOCAL_PATH)/v8/src/mirror-debugger.js $(LOCAL_PATH)/v8/src/liveedit-debugger.js $(LOCAL_PATH)/v8/src/macros.py $(LOCAL_PATH)/v8/src/i18n.js $(GYP_TARGET_DEPENDENCIES)
 	@echo "Gyp action: v8_tools_gyp_v8_gyp_js2c_host_js2c ($@)"
-	$(hide)cd $(gyp_local_path)/v8/tools/gyp; mkdir -p $(gyp_shared_intermediate_dir); python ../../tools/js2c.py "$(gyp_shared_intermediate_dir)/libraries.cc" CORE off ../../src/runtime.js ../../src/v8natives.js ../../src/array.js ../../src/string.js ../../src/uri.js ../../src/math.js ../../src/messages.js ../../src/apinatives.js ../../src/debug-debugger.js ../../src/mirror-debugger.js ../../src/liveedit-debugger.js ../../src/date.js ../../src/json.js ../../src/regexp.js ../../src/arraybuffer.js ../../src/typedarray.js ../../src/weak_collection.js ../../src/promise.js ../../src/object-observe.js ../../src/macros.py ../../src/i18n.js
+	$(hide)cd $(gyp_local_path)/v8/tools/gyp; mkdir -p $(gyp_shared_intermediate_dir); python ../../tools/js2c.py "$(gyp_shared_intermediate_dir)/libraries.cc" CORE off ../../src/runtime.js ../../src/v8natives.js ../../src/symbol.js ../../src/array.js ../../src/string.js ../../src/uri.js ../../third_party/fdlibm/fdlibm.js ../../src/math.js ../../src/apinatives.js ../../src/date.js ../../src/regexp.js ../../src/arraybuffer.js ../../src/typedarray.js ../../src/generator.js ../../src/object-observe.js ../../src/collection.js ../../src/weak-collection.js ../../src/collection-iterator.js ../../src/promise.js ../../src/messages.js ../../src/json.js ../../src/array-iterator.js ../../src/string-iterator.js ../../src/debug-debugger.js ../../src/mirror-debugger.js ../../src/liveedit-debugger.js ../../src/macros.py ../../src/i18n.js
 
 
 ### Rules for action "js2c_experimental":
@@ -32,9 +31,9 @@
 $(gyp_shared_intermediate_dir)/experimental-libraries.cc: gyp_intermediate_dir := $(abspath $(gyp_intermediate_dir))
 $(gyp_shared_intermediate_dir)/experimental-libraries.cc: gyp_shared_intermediate_dir := $(abspath $(gyp_shared_intermediate_dir))
 $(gyp_shared_intermediate_dir)/experimental-libraries.cc: export PATH := $(subst $(ANDROID_BUILD_PATHS),,$(PATH))
-$(gyp_shared_intermediate_dir)/experimental-libraries.cc: $(LOCAL_PATH)/v8/tools/js2c.py $(LOCAL_PATH)/v8/src/macros.py $(LOCAL_PATH)/v8/src/symbol.js $(LOCAL_PATH)/v8/src/proxy.js $(LOCAL_PATH)/v8/src/collection.js $(LOCAL_PATH)/v8/src/collection-iterator.js $(LOCAL_PATH)/v8/src/generator.js $(LOCAL_PATH)/v8/src/array-iterator.js $(LOCAL_PATH)/v8/src/harmony-string.js $(LOCAL_PATH)/v8/src/harmony-array.js $(LOCAL_PATH)/v8/src/harmony-math.js $(GYP_TARGET_DEPENDENCIES)
+$(gyp_shared_intermediate_dir)/experimental-libraries.cc: $(LOCAL_PATH)/v8/tools/js2c.py $(LOCAL_PATH)/v8/src/macros.py $(LOCAL_PATH)/v8/src/proxy.js $(LOCAL_PATH)/v8/src/generator.js $(LOCAL_PATH)/v8/src/harmony-string.js $(LOCAL_PATH)/v8/src/harmony-array.js $(LOCAL_PATH)/v8/src/harmony-classes.js $(GYP_TARGET_DEPENDENCIES)
 	@echo "Gyp action: v8_tools_gyp_v8_gyp_js2c_host_js2c_experimental ($@)"
-	$(hide)cd $(gyp_local_path)/v8/tools/gyp; mkdir -p $(gyp_shared_intermediate_dir); python ../../tools/js2c.py "$(gyp_shared_intermediate_dir)/experimental-libraries.cc" EXPERIMENTAL off ../../src/macros.py ../../src/symbol.js ../../src/proxy.js ../../src/collection.js ../../src/collection-iterator.js ../../src/generator.js ../../src/array-iterator.js ../../src/harmony-string.js ../../src/harmony-array.js ../../src/harmony-math.js
+	$(hide)cd $(gyp_local_path)/v8/tools/gyp; mkdir -p $(gyp_shared_intermediate_dir); python ../../tools/js2c.py "$(gyp_shared_intermediate_dir)/experimental-libraries.cc" EXPERIMENTAL off ../../src/macros.py ../../src/proxy.js ../../src/generator.js ../../src/harmony-string.js ../../src/harmony-array.js ../../src/harmony-classes.js
 
 
 
@@ -46,6 +45,9 @@
 LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) $(GYP_GENERATED_OUTPUTS)
 
 ### Rules for final target.
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
+
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
 gyp_all_modules: v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
diff --git a/tools/gyp/js2c.host.linux-mips64.mk b/tools/gyp/js2c.host.linux-mips64.mk
new file mode 100644
index 0000000..27f05b1
--- /dev/null
+++ b/tools/gyp/js2c.host.linux-mips64.mk
@@ -0,0 +1,70 @@
+# This file is generated by gyp; do not edit.
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE_CLASS := GYP
+LOCAL_MODULE := v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+LOCAL_MODULE_STEM := js2c
+LOCAL_MODULE_SUFFIX := .stamp
+LOCAL_IS_HOST_MODULE := true
+LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
+gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
+gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
+
+# Make sure our deps are built first.
+GYP_TARGET_DEPENDENCIES :=
+
+### Rules for action "js2c":
+$(gyp_shared_intermediate_dir)/libraries.cc: gyp_local_path := $(LOCAL_PATH)
+$(gyp_shared_intermediate_dir)/libraries.cc: gyp_var_prefix := $(GYP_VAR_PREFIX)
+$(gyp_shared_intermediate_dir)/libraries.cc: gyp_intermediate_dir := $(abspath $(gyp_intermediate_dir))
+$(gyp_shared_intermediate_dir)/libraries.cc: gyp_shared_intermediate_dir := $(abspath $(gyp_shared_intermediate_dir))
+$(gyp_shared_intermediate_dir)/libraries.cc: export PATH := $(subst $(ANDROID_BUILD_PATHS),,$(PATH))
+$(gyp_shared_intermediate_dir)/libraries.cc: $(LOCAL_PATH)/v8/tools/js2c.py $(LOCAL_PATH)/v8/src/runtime.js $(LOCAL_PATH)/v8/src/v8natives.js $(LOCAL_PATH)/v8/src/symbol.js $(LOCAL_PATH)/v8/src/array.js $(LOCAL_PATH)/v8/src/string.js $(LOCAL_PATH)/v8/src/uri.js $(LOCAL_PATH)/v8/third_party/fdlibm/fdlibm.js $(LOCAL_PATH)/v8/src/math.js $(LOCAL_PATH)/v8/src/apinatives.js $(LOCAL_PATH)/v8/src/date.js $(LOCAL_PATH)/v8/src/regexp.js $(LOCAL_PATH)/v8/src/arraybuffer.js $(LOCAL_PATH)/v8/src/typedarray.js $(LOCAL_PATH)/v8/src/generator.js $(LOCAL_PATH)/v8/src/object-observe.js $(LOCAL_PATH)/v8/src/collection.js $(LOCAL_PATH)/v8/src/weak-collection.js $(LOCAL_PATH)/v8/src/collection-iterator.js $(LOCAL_PATH)/v8/src/promise.js $(LOCAL_PATH)/v8/src/messages.js $(LOCAL_PATH)/v8/src/json.js $(LOCAL_PATH)/v8/src/array-iterator.js $(LOCAL_PATH)/v8/src/string-iterator.js $(LOCAL_PATH)/v8/src/debug-debugger.js $(LOCAL_PATH)/v8/src/mirror-debugger.js $(LOCAL_PATH)/v8/src/liveedit-debugger.js $(LOCAL_PATH)/v8/src/macros.py $(LOCAL_PATH)/v8/src/i18n.js $(GYP_TARGET_DEPENDENCIES)
+	@echo "Gyp action: v8_tools_gyp_v8_gyp_js2c_host_js2c ($@)"
+	$(hide)cd $(gyp_local_path)/v8/tools/gyp; mkdir -p $(gyp_shared_intermediate_dir); python ../../tools/js2c.py "$(gyp_shared_intermediate_dir)/libraries.cc" CORE off ../../src/runtime.js ../../src/v8natives.js ../../src/symbol.js ../../src/array.js ../../src/string.js ../../src/uri.js ../../third_party/fdlibm/fdlibm.js ../../src/math.js ../../src/apinatives.js ../../src/date.js ../../src/regexp.js ../../src/arraybuffer.js ../../src/typedarray.js ../../src/generator.js ../../src/object-observe.js ../../src/collection.js ../../src/weak-collection.js ../../src/collection-iterator.js ../../src/promise.js ../../src/messages.js ../../src/json.js ../../src/array-iterator.js ../../src/string-iterator.js ../../src/debug-debugger.js ../../src/mirror-debugger.js ../../src/liveedit-debugger.js ../../src/macros.py ../../src/i18n.js
+
+
+### Rules for action "js2c_experimental":
+$(gyp_shared_intermediate_dir)/experimental-libraries.cc: gyp_local_path := $(LOCAL_PATH)
+$(gyp_shared_intermediate_dir)/experimental-libraries.cc: gyp_var_prefix := $(GYP_VAR_PREFIX)
+$(gyp_shared_intermediate_dir)/experimental-libraries.cc: gyp_intermediate_dir := $(abspath $(gyp_intermediate_dir))
+$(gyp_shared_intermediate_dir)/experimental-libraries.cc: gyp_shared_intermediate_dir := $(abspath $(gyp_shared_intermediate_dir))
+$(gyp_shared_intermediate_dir)/experimental-libraries.cc: export PATH := $(subst $(ANDROID_BUILD_PATHS),,$(PATH))
+$(gyp_shared_intermediate_dir)/experimental-libraries.cc: $(LOCAL_PATH)/v8/tools/js2c.py $(LOCAL_PATH)/v8/src/macros.py $(LOCAL_PATH)/v8/src/proxy.js $(LOCAL_PATH)/v8/src/generator.js $(LOCAL_PATH)/v8/src/harmony-string.js $(LOCAL_PATH)/v8/src/harmony-array.js $(LOCAL_PATH)/v8/src/harmony-classes.js $(GYP_TARGET_DEPENDENCIES)
+	@echo "Gyp action: v8_tools_gyp_v8_gyp_js2c_host_js2c_experimental ($@)"
+	$(hide)cd $(gyp_local_path)/v8/tools/gyp; mkdir -p $(gyp_shared_intermediate_dir); python ../../tools/js2c.py "$(gyp_shared_intermediate_dir)/experimental-libraries.cc" EXPERIMENTAL off ../../src/macros.py ../../src/proxy.js ../../src/generator.js ../../src/harmony-string.js ../../src/harmony-array.js ../../src/harmony-classes.js
+
+
+
+GYP_GENERATED_OUTPUTS := \
+	$(gyp_shared_intermediate_dir)/libraries.cc \
+	$(gyp_shared_intermediate_dir)/experimental-libraries.cc
+
+# Make sure our deps and generated files are built first.
+LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) $(GYP_GENERATED_OUTPUTS)
+
+### Rules for final target.
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
+
+# Add target alias to "gyp_all_modules" target.
+.PHONY: gyp_all_modules
+gyp_all_modules: v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+
+# Alias gyp target name.
+.PHONY: js2c
+js2c: v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+
+LOCAL_MODULE_PATH := $(PRODUCT_OUT)/gyp_stamp
+LOCAL_UNINSTALLABLE_MODULE := true
+LOCAL_2ND_ARCH_VAR_PREFIX := $(GYP_HOST_VAR_PREFIX)
+
+include $(BUILD_SYSTEM)/base_rules.mk
+
+$(LOCAL_BUILT_MODULE): $(LOCAL_ADDITIONAL_DEPENDENCIES)
+	$(hide) echo "Gyp timestamp: $@"
+	$(hide) mkdir -p $(dir $@)
+	$(hide) touch $@
+
+LOCAL_2ND_ARCH_VAR_PREFIX :=
diff --git a/tools/gyp/js2c.host.linux-x86.mk b/tools/gyp/js2c.host.linux-x86.mk
index 0a2b5a8..27f05b1 100644
--- a/tools/gyp/js2c.host.linux-x86.mk
+++ b/tools/gyp/js2c.host.linux-x86.mk
@@ -6,7 +6,6 @@
 LOCAL_MODULE := v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
 LOCAL_MODULE_STEM := js2c
 LOCAL_MODULE_SUFFIX := .stamp
-LOCAL_MODULE_TAGS := optional
 LOCAL_IS_HOST_MODULE := true
 LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
@@ -21,9 +20,9 @@
 $(gyp_shared_intermediate_dir)/libraries.cc: gyp_intermediate_dir := $(abspath $(gyp_intermediate_dir))
 $(gyp_shared_intermediate_dir)/libraries.cc: gyp_shared_intermediate_dir := $(abspath $(gyp_shared_intermediate_dir))
 $(gyp_shared_intermediate_dir)/libraries.cc: export PATH := $(subst $(ANDROID_BUILD_PATHS),,$(PATH))
-$(gyp_shared_intermediate_dir)/libraries.cc: $(LOCAL_PATH)/v8/tools/js2c.py $(LOCAL_PATH)/v8/src/runtime.js $(LOCAL_PATH)/v8/src/v8natives.js $(LOCAL_PATH)/v8/src/array.js $(LOCAL_PATH)/v8/src/string.js $(LOCAL_PATH)/v8/src/uri.js $(LOCAL_PATH)/v8/src/math.js $(LOCAL_PATH)/v8/src/messages.js $(LOCAL_PATH)/v8/src/apinatives.js $(LOCAL_PATH)/v8/src/debug-debugger.js $(LOCAL_PATH)/v8/src/mirror-debugger.js $(LOCAL_PATH)/v8/src/liveedit-debugger.js $(LOCAL_PATH)/v8/src/date.js $(LOCAL_PATH)/v8/src/json.js $(LOCAL_PATH)/v8/src/regexp.js $(LOCAL_PATH)/v8/src/arraybuffer.js $(LOCAL_PATH)/v8/src/typedarray.js $(LOCAL_PATH)/v8/src/weak_collection.js $(LOCAL_PATH)/v8/src/promise.js $(LOCAL_PATH)/v8/src/object-observe.js $(LOCAL_PATH)/v8/src/macros.py $(LOCAL_PATH)/v8/src/i18n.js $(GYP_TARGET_DEPENDENCIES)
+$(gyp_shared_intermediate_dir)/libraries.cc: $(LOCAL_PATH)/v8/tools/js2c.py $(LOCAL_PATH)/v8/src/runtime.js $(LOCAL_PATH)/v8/src/v8natives.js $(LOCAL_PATH)/v8/src/symbol.js $(LOCAL_PATH)/v8/src/array.js $(LOCAL_PATH)/v8/src/string.js $(LOCAL_PATH)/v8/src/uri.js $(LOCAL_PATH)/v8/third_party/fdlibm/fdlibm.js $(LOCAL_PATH)/v8/src/math.js $(LOCAL_PATH)/v8/src/apinatives.js $(LOCAL_PATH)/v8/src/date.js $(LOCAL_PATH)/v8/src/regexp.js $(LOCAL_PATH)/v8/src/arraybuffer.js $(LOCAL_PATH)/v8/src/typedarray.js $(LOCAL_PATH)/v8/src/generator.js $(LOCAL_PATH)/v8/src/object-observe.js $(LOCAL_PATH)/v8/src/collection.js $(LOCAL_PATH)/v8/src/weak-collection.js $(LOCAL_PATH)/v8/src/collection-iterator.js $(LOCAL_PATH)/v8/src/promise.js $(LOCAL_PATH)/v8/src/messages.js $(LOCAL_PATH)/v8/src/json.js $(LOCAL_PATH)/v8/src/array-iterator.js $(LOCAL_PATH)/v8/src/string-iterator.js $(LOCAL_PATH)/v8/src/debug-debugger.js $(LOCAL_PATH)/v8/src/mirror-debugger.js $(LOCAL_PATH)/v8/src/liveedit-debugger.js $(LOCAL_PATH)/v8/src/macros.py $(LOCAL_PATH)/v8/src/i18n.js $(GYP_TARGET_DEPENDENCIES)
 	@echo "Gyp action: v8_tools_gyp_v8_gyp_js2c_host_js2c ($@)"
-	$(hide)cd $(gyp_local_path)/v8/tools/gyp; mkdir -p $(gyp_shared_intermediate_dir); python ../../tools/js2c.py "$(gyp_shared_intermediate_dir)/libraries.cc" CORE off ../../src/runtime.js ../../src/v8natives.js ../../src/array.js ../../src/string.js ../../src/uri.js ../../src/math.js ../../src/messages.js ../../src/apinatives.js ../../src/debug-debugger.js ../../src/mirror-debugger.js ../../src/liveedit-debugger.js ../../src/date.js ../../src/json.js ../../src/regexp.js ../../src/arraybuffer.js ../../src/typedarray.js ../../src/weak_collection.js ../../src/promise.js ../../src/object-observe.js ../../src/macros.py ../../src/i18n.js
+	$(hide)cd $(gyp_local_path)/v8/tools/gyp; mkdir -p $(gyp_shared_intermediate_dir); python ../../tools/js2c.py "$(gyp_shared_intermediate_dir)/libraries.cc" CORE off ../../src/runtime.js ../../src/v8natives.js ../../src/symbol.js ../../src/array.js ../../src/string.js ../../src/uri.js ../../third_party/fdlibm/fdlibm.js ../../src/math.js ../../src/apinatives.js ../../src/date.js ../../src/regexp.js ../../src/arraybuffer.js ../../src/typedarray.js ../../src/generator.js ../../src/object-observe.js ../../src/collection.js ../../src/weak-collection.js ../../src/collection-iterator.js ../../src/promise.js ../../src/messages.js ../../src/json.js ../../src/array-iterator.js ../../src/string-iterator.js ../../src/debug-debugger.js ../../src/mirror-debugger.js ../../src/liveedit-debugger.js ../../src/macros.py ../../src/i18n.js
 
 
 ### Rules for action "js2c_experimental":
@@ -32,9 +31,9 @@
 $(gyp_shared_intermediate_dir)/experimental-libraries.cc: gyp_intermediate_dir := $(abspath $(gyp_intermediate_dir))
 $(gyp_shared_intermediate_dir)/experimental-libraries.cc: gyp_shared_intermediate_dir := $(abspath $(gyp_shared_intermediate_dir))
 $(gyp_shared_intermediate_dir)/experimental-libraries.cc: export PATH := $(subst $(ANDROID_BUILD_PATHS),,$(PATH))
-$(gyp_shared_intermediate_dir)/experimental-libraries.cc: $(LOCAL_PATH)/v8/tools/js2c.py $(LOCAL_PATH)/v8/src/macros.py $(LOCAL_PATH)/v8/src/symbol.js $(LOCAL_PATH)/v8/src/proxy.js $(LOCAL_PATH)/v8/src/collection.js $(LOCAL_PATH)/v8/src/collection-iterator.js $(LOCAL_PATH)/v8/src/generator.js $(LOCAL_PATH)/v8/src/array-iterator.js $(LOCAL_PATH)/v8/src/harmony-string.js $(LOCAL_PATH)/v8/src/harmony-array.js $(LOCAL_PATH)/v8/src/harmony-math.js $(GYP_TARGET_DEPENDENCIES)
+$(gyp_shared_intermediate_dir)/experimental-libraries.cc: $(LOCAL_PATH)/v8/tools/js2c.py $(LOCAL_PATH)/v8/src/macros.py $(LOCAL_PATH)/v8/src/proxy.js $(LOCAL_PATH)/v8/src/generator.js $(LOCAL_PATH)/v8/src/harmony-string.js $(LOCAL_PATH)/v8/src/harmony-array.js $(LOCAL_PATH)/v8/src/harmony-classes.js $(GYP_TARGET_DEPENDENCIES)
 	@echo "Gyp action: v8_tools_gyp_v8_gyp_js2c_host_js2c_experimental ($@)"
-	$(hide)cd $(gyp_local_path)/v8/tools/gyp; mkdir -p $(gyp_shared_intermediate_dir); python ../../tools/js2c.py "$(gyp_shared_intermediate_dir)/experimental-libraries.cc" EXPERIMENTAL off ../../src/macros.py ../../src/symbol.js ../../src/proxy.js ../../src/collection.js ../../src/collection-iterator.js ../../src/generator.js ../../src/array-iterator.js ../../src/harmony-string.js ../../src/harmony-array.js ../../src/harmony-math.js
+	$(hide)cd $(gyp_local_path)/v8/tools/gyp; mkdir -p $(gyp_shared_intermediate_dir); python ../../tools/js2c.py "$(gyp_shared_intermediate_dir)/experimental-libraries.cc" EXPERIMENTAL off ../../src/macros.py ../../src/proxy.js ../../src/generator.js ../../src/harmony-string.js ../../src/harmony-array.js ../../src/harmony-classes.js
 
 
 
@@ -46,6 +45,9 @@
 LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) $(GYP_GENERATED_OUTPUTS)
 
 ### Rules for final target.
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
+
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
 gyp_all_modules: v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
diff --git a/tools/gyp/js2c.host.linux-x86_64.mk b/tools/gyp/js2c.host.linux-x86_64.mk
index 0a2b5a8..27f05b1 100644
--- a/tools/gyp/js2c.host.linux-x86_64.mk
+++ b/tools/gyp/js2c.host.linux-x86_64.mk
@@ -6,7 +6,6 @@
 LOCAL_MODULE := v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
 LOCAL_MODULE_STEM := js2c
 LOCAL_MODULE_SUFFIX := .stamp
-LOCAL_MODULE_TAGS := optional
 LOCAL_IS_HOST_MODULE := true
 LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
@@ -21,9 +20,9 @@
 $(gyp_shared_intermediate_dir)/libraries.cc: gyp_intermediate_dir := $(abspath $(gyp_intermediate_dir))
 $(gyp_shared_intermediate_dir)/libraries.cc: gyp_shared_intermediate_dir := $(abspath $(gyp_shared_intermediate_dir))
 $(gyp_shared_intermediate_dir)/libraries.cc: export PATH := $(subst $(ANDROID_BUILD_PATHS),,$(PATH))
-$(gyp_shared_intermediate_dir)/libraries.cc: $(LOCAL_PATH)/v8/tools/js2c.py $(LOCAL_PATH)/v8/src/runtime.js $(LOCAL_PATH)/v8/src/v8natives.js $(LOCAL_PATH)/v8/src/array.js $(LOCAL_PATH)/v8/src/string.js $(LOCAL_PATH)/v8/src/uri.js $(LOCAL_PATH)/v8/src/math.js $(LOCAL_PATH)/v8/src/messages.js $(LOCAL_PATH)/v8/src/apinatives.js $(LOCAL_PATH)/v8/src/debug-debugger.js $(LOCAL_PATH)/v8/src/mirror-debugger.js $(LOCAL_PATH)/v8/src/liveedit-debugger.js $(LOCAL_PATH)/v8/src/date.js $(LOCAL_PATH)/v8/src/json.js $(LOCAL_PATH)/v8/src/regexp.js $(LOCAL_PATH)/v8/src/arraybuffer.js $(LOCAL_PATH)/v8/src/typedarray.js $(LOCAL_PATH)/v8/src/weak_collection.js $(LOCAL_PATH)/v8/src/promise.js $(LOCAL_PATH)/v8/src/object-observe.js $(LOCAL_PATH)/v8/src/macros.py $(LOCAL_PATH)/v8/src/i18n.js $(GYP_TARGET_DEPENDENCIES)
+$(gyp_shared_intermediate_dir)/libraries.cc: $(LOCAL_PATH)/v8/tools/js2c.py $(LOCAL_PATH)/v8/src/runtime.js $(LOCAL_PATH)/v8/src/v8natives.js $(LOCAL_PATH)/v8/src/symbol.js $(LOCAL_PATH)/v8/src/array.js $(LOCAL_PATH)/v8/src/string.js $(LOCAL_PATH)/v8/src/uri.js $(LOCAL_PATH)/v8/third_party/fdlibm/fdlibm.js $(LOCAL_PATH)/v8/src/math.js $(LOCAL_PATH)/v8/src/apinatives.js $(LOCAL_PATH)/v8/src/date.js $(LOCAL_PATH)/v8/src/regexp.js $(LOCAL_PATH)/v8/src/arraybuffer.js $(LOCAL_PATH)/v8/src/typedarray.js $(LOCAL_PATH)/v8/src/generator.js $(LOCAL_PATH)/v8/src/object-observe.js $(LOCAL_PATH)/v8/src/collection.js $(LOCAL_PATH)/v8/src/weak-collection.js $(LOCAL_PATH)/v8/src/collection-iterator.js $(LOCAL_PATH)/v8/src/promise.js $(LOCAL_PATH)/v8/src/messages.js $(LOCAL_PATH)/v8/src/json.js $(LOCAL_PATH)/v8/src/array-iterator.js $(LOCAL_PATH)/v8/src/string-iterator.js $(LOCAL_PATH)/v8/src/debug-debugger.js $(LOCAL_PATH)/v8/src/mirror-debugger.js $(LOCAL_PATH)/v8/src/liveedit-debugger.js $(LOCAL_PATH)/v8/src/macros.py $(LOCAL_PATH)/v8/src/i18n.js $(GYP_TARGET_DEPENDENCIES)
 	@echo "Gyp action: v8_tools_gyp_v8_gyp_js2c_host_js2c ($@)"
-	$(hide)cd $(gyp_local_path)/v8/tools/gyp; mkdir -p $(gyp_shared_intermediate_dir); python ../../tools/js2c.py "$(gyp_shared_intermediate_dir)/libraries.cc" CORE off ../../src/runtime.js ../../src/v8natives.js ../../src/array.js ../../src/string.js ../../src/uri.js ../../src/math.js ../../src/messages.js ../../src/apinatives.js ../../src/debug-debugger.js ../../src/mirror-debugger.js ../../src/liveedit-debugger.js ../../src/date.js ../../src/json.js ../../src/regexp.js ../../src/arraybuffer.js ../../src/typedarray.js ../../src/weak_collection.js ../../src/promise.js ../../src/object-observe.js ../../src/macros.py ../../src/i18n.js
+	$(hide)cd $(gyp_local_path)/v8/tools/gyp; mkdir -p $(gyp_shared_intermediate_dir); python ../../tools/js2c.py "$(gyp_shared_intermediate_dir)/libraries.cc" CORE off ../../src/runtime.js ../../src/v8natives.js ../../src/symbol.js ../../src/array.js ../../src/string.js ../../src/uri.js ../../third_party/fdlibm/fdlibm.js ../../src/math.js ../../src/apinatives.js ../../src/date.js ../../src/regexp.js ../../src/arraybuffer.js ../../src/typedarray.js ../../src/generator.js ../../src/object-observe.js ../../src/collection.js ../../src/weak-collection.js ../../src/collection-iterator.js ../../src/promise.js ../../src/messages.js ../../src/json.js ../../src/array-iterator.js ../../src/string-iterator.js ../../src/debug-debugger.js ../../src/mirror-debugger.js ../../src/liveedit-debugger.js ../../src/macros.py ../../src/i18n.js
 
 
 ### Rules for action "js2c_experimental":
@@ -32,9 +31,9 @@
 $(gyp_shared_intermediate_dir)/experimental-libraries.cc: gyp_intermediate_dir := $(abspath $(gyp_intermediate_dir))
 $(gyp_shared_intermediate_dir)/experimental-libraries.cc: gyp_shared_intermediate_dir := $(abspath $(gyp_shared_intermediate_dir))
 $(gyp_shared_intermediate_dir)/experimental-libraries.cc: export PATH := $(subst $(ANDROID_BUILD_PATHS),,$(PATH))
-$(gyp_shared_intermediate_dir)/experimental-libraries.cc: $(LOCAL_PATH)/v8/tools/js2c.py $(LOCAL_PATH)/v8/src/macros.py $(LOCAL_PATH)/v8/src/symbol.js $(LOCAL_PATH)/v8/src/proxy.js $(LOCAL_PATH)/v8/src/collection.js $(LOCAL_PATH)/v8/src/collection-iterator.js $(LOCAL_PATH)/v8/src/generator.js $(LOCAL_PATH)/v8/src/array-iterator.js $(LOCAL_PATH)/v8/src/harmony-string.js $(LOCAL_PATH)/v8/src/harmony-array.js $(LOCAL_PATH)/v8/src/harmony-math.js $(GYP_TARGET_DEPENDENCIES)
+$(gyp_shared_intermediate_dir)/experimental-libraries.cc: $(LOCAL_PATH)/v8/tools/js2c.py $(LOCAL_PATH)/v8/src/macros.py $(LOCAL_PATH)/v8/src/proxy.js $(LOCAL_PATH)/v8/src/generator.js $(LOCAL_PATH)/v8/src/harmony-string.js $(LOCAL_PATH)/v8/src/harmony-array.js $(LOCAL_PATH)/v8/src/harmony-classes.js $(GYP_TARGET_DEPENDENCIES)
 	@echo "Gyp action: v8_tools_gyp_v8_gyp_js2c_host_js2c_experimental ($@)"
-	$(hide)cd $(gyp_local_path)/v8/tools/gyp; mkdir -p $(gyp_shared_intermediate_dir); python ../../tools/js2c.py "$(gyp_shared_intermediate_dir)/experimental-libraries.cc" EXPERIMENTAL off ../../src/macros.py ../../src/symbol.js ../../src/proxy.js ../../src/collection.js ../../src/collection-iterator.js ../../src/generator.js ../../src/array-iterator.js ../../src/harmony-string.js ../../src/harmony-array.js ../../src/harmony-math.js
+	$(hide)cd $(gyp_local_path)/v8/tools/gyp; mkdir -p $(gyp_shared_intermediate_dir); python ../../tools/js2c.py "$(gyp_shared_intermediate_dir)/experimental-libraries.cc" EXPERIMENTAL off ../../src/macros.py ../../src/proxy.js ../../src/generator.js ../../src/harmony-string.js ../../src/harmony-array.js ../../src/harmony-classes.js
 
 
 
@@ -46,6 +45,9 @@
 LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) $(GYP_GENERATED_OUTPUTS)
 
 ### Rules for final target.
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
+
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
 gyp_all_modules: v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
diff --git a/tools/gyp/mksnapshot.host.darwin-arm.mk b/tools/gyp/mksnapshot.host.darwin-arm.mk
index e8911b3..d155991 100644
--- a/tools/gyp/mksnapshot.host.darwin-arm.mk
+++ b/tools/gyp/mksnapshot.host.darwin-arm.mk
@@ -6,7 +6,6 @@
 LOCAL_MODULE := v8_tools_gyp_mksnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
 LOCAL_MODULE_STEM := mksnapshot
 LOCAL_MODULE_SUFFIX := 
-LOCAL_MODULE_TAGS := optional
 LOCAL_IS_HOST_MODULE := true
 LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
@@ -16,12 +15,12 @@
 GYP_TARGET_DEPENDENCIES := \
 	$(call intermediates-dir-for,STATIC_LIBRARIES,v8_tools_gyp_v8_base_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/v8_tools_gyp_v8_base_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
 	$(call intermediates-dir-for,STATIC_LIBRARIES,v8_tools_gyp_v8_nosnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/v8_tools_gyp_v8_nosnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
+	$(call intermediates-dir-for,STATIC_LIBRARIES,v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
 	$(call intermediates-dir-for,STATIC_LIBRARIES,v8_tools_gyp_v8_libbase_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/v8_tools_gyp_v8_libbase_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
 	$(call intermediates-dir-for,STATIC_LIBRARIES,third_party_icu_icui18n_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/third_party_icu_icui18n_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
 	$(call intermediates-dir-for,STATIC_LIBRARIES,third_party_icu_icuuc_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/third_party_icu_icuuc_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
 	$(call intermediates-dir-for,STATIC_LIBRARIES,third_party_icu_icudata_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/third_party_icu_icudata_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
-	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp \
-	$(call intermediates-dir-for,GYP,v8_tools_gyp_generate_trig_table_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/generate_trig_table.stamp
+	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp
 
 GYP_GENERATED_OUTPUTS :=
 
@@ -42,7 +41,6 @@
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -50,9 +48,18 @@
 	-pipe \
 	-fPIC \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m32 \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-fomit-frame-pointer \
@@ -60,7 +67,6 @@
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -77,17 +83,18 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_ARM' \
+	'-DCAN_USE_ARMV7_INSTRUCTIONS' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
-	'-DARM_TEST' \
-	'-DCAN_USE_ARMV7_INSTRUCTIONS=1' \
 	'-DDYNAMIC_ANNOTATIONS_ENABLED=1' \
 	'-DWTF_USE_DYNAMIC_ANNOTATIONS=1' \
 	'-D_DEBUG' \
@@ -107,20 +114,19 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -128,6 +134,14 @@
 	-pipe \
 	-fPIC \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m32 \
 	-fno-ident \
 	-fdata-sections \
@@ -140,7 +154,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -157,17 +170,18 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_ARM' \
+	'-DCAN_USE_ARMV7_INSTRUCTIONS' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
-	'-DARM_TEST' \
-	'-DCAN_USE_ARMV7_INSTRUCTIONS=1' \
 	'-DNDEBUG' \
 	'-DNVALGRIND' \
 	'-DDYNAMIC_ANNOTATIONS_ENABLED=0'
@@ -181,16 +195,15 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Release := false
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 # Undefine ANDROID for host modules
 LOCAL_CFLAGS += -UANDROID
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
@@ -215,6 +228,7 @@
 LOCAL_STATIC_LIBRARIES := \
 	v8_tools_gyp_v8_base_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
 	v8_tools_gyp_v8_nosnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
+	v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
 	v8_tools_gyp_v8_libbase_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
 	third_party_icu_icui18n_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
 	third_party_icu_icuuc_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
@@ -222,8 +236,8 @@
 
 # Enable grouping to fix circular references
 LOCAL_GROUP_STATIC_LIBRARIES := true
-
-LOCAL_SHARED_LIBRARIES :=
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
 
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
diff --git a/tools/gyp/mksnapshot.host.darwin-arm64.mk b/tools/gyp/mksnapshot.host.darwin-arm64.mk
index 99b5a7a..52de23f 100644
--- a/tools/gyp/mksnapshot.host.darwin-arm64.mk
+++ b/tools/gyp/mksnapshot.host.darwin-arm64.mk
@@ -6,7 +6,6 @@
 LOCAL_MODULE := v8_tools_gyp_mksnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
 LOCAL_MODULE_STEM := mksnapshot
 LOCAL_MODULE_SUFFIX := 
-LOCAL_MODULE_TAGS := optional
 LOCAL_IS_HOST_MODULE := true
 LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
@@ -16,12 +15,12 @@
 GYP_TARGET_DEPENDENCIES := \
 	$(call intermediates-dir-for,STATIC_LIBRARIES,v8_tools_gyp_v8_base_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/v8_tools_gyp_v8_base_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
 	$(call intermediates-dir-for,STATIC_LIBRARIES,v8_tools_gyp_v8_nosnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/v8_tools_gyp_v8_nosnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
+	$(call intermediates-dir-for,STATIC_LIBRARIES,v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
 	$(call intermediates-dir-for,STATIC_LIBRARIES,v8_tools_gyp_v8_libbase_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/v8_tools_gyp_v8_libbase_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
 	$(call intermediates-dir-for,STATIC_LIBRARIES,third_party_icu_icui18n_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/third_party_icu_icui18n_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
 	$(call intermediates-dir-for,STATIC_LIBRARIES,third_party_icu_icuuc_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/third_party_icu_icuuc_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
 	$(call intermediates-dir-for,STATIC_LIBRARIES,third_party_icu_icudata_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/third_party_icu_icudata_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
-	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp \
-	$(call intermediates-dir-for,GYP,v8_tools_gyp_generate_trig_table_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/generate_trig_table.stamp
+	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp
 
 GYP_GENERATED_OUTPUTS :=
 
@@ -42,7 +41,6 @@
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -50,16 +48,24 @@
 	-pipe \
 	-fPIC \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m64 \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-funwind-tables
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -76,11 +82,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_ARM64' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
@@ -104,20 +112,19 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -125,6 +132,14 @@
 	-pipe \
 	-fPIC \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m64 \
 	-fno-ident \
 	-fdata-sections \
@@ -136,7 +151,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -153,11 +167,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_ARM64' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
@@ -175,16 +191,15 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Release := false
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 # Undefine ANDROID for host modules
 LOCAL_CFLAGS += -UANDROID
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
@@ -209,6 +224,7 @@
 LOCAL_STATIC_LIBRARIES := \
 	v8_tools_gyp_v8_base_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
 	v8_tools_gyp_v8_nosnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
+	v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
 	v8_tools_gyp_v8_libbase_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
 	third_party_icu_icui18n_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
 	third_party_icu_icuuc_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
@@ -216,8 +232,8 @@
 
 # Enable grouping to fix circular references
 LOCAL_GROUP_STATIC_LIBRARIES := true
-
-LOCAL_SHARED_LIBRARIES :=
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
 
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
diff --git a/tools/gyp/mksnapshot.host.darwin-mips.mk b/tools/gyp/mksnapshot.host.darwin-mips.mk
index d07116f..867293f 100644
--- a/tools/gyp/mksnapshot.host.darwin-mips.mk
+++ b/tools/gyp/mksnapshot.host.darwin-mips.mk
@@ -6,7 +6,6 @@
 LOCAL_MODULE := v8_tools_gyp_mksnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
 LOCAL_MODULE_STEM := mksnapshot
 LOCAL_MODULE_SUFFIX := 
-LOCAL_MODULE_TAGS := optional
 LOCAL_IS_HOST_MODULE := true
 LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
@@ -16,12 +15,12 @@
 GYP_TARGET_DEPENDENCIES := \
 	$(call intermediates-dir-for,STATIC_LIBRARIES,v8_tools_gyp_v8_base_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/v8_tools_gyp_v8_base_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
 	$(call intermediates-dir-for,STATIC_LIBRARIES,v8_tools_gyp_v8_nosnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/v8_tools_gyp_v8_nosnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
+	$(call intermediates-dir-for,STATIC_LIBRARIES,v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
 	$(call intermediates-dir-for,STATIC_LIBRARIES,v8_tools_gyp_v8_libbase_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/v8_tools_gyp_v8_libbase_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
 	$(call intermediates-dir-for,STATIC_LIBRARIES,third_party_icu_icui18n_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/third_party_icu_icui18n_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
 	$(call intermediates-dir-for,STATIC_LIBRARIES,third_party_icu_icuuc_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/third_party_icu_icuuc_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
 	$(call intermediates-dir-for,STATIC_LIBRARIES,third_party_icu_icudata_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/third_party_icu_icudata_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
-	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp \
-	$(call intermediates-dir-for,GYP,v8_tools_gyp_generate_trig_table_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/generate_trig_table.stamp
+	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp
 
 GYP_GENERATED_OUTPUTS :=
 
@@ -43,7 +42,6 @@
 	--param=ssp-buffer-size=4 \
 	 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -51,9 +49,18 @@
 	-pipe \
 	-fPIC \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m32 \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-fomit-frame-pointer \
@@ -61,7 +68,6 @@
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -78,11 +84,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_MIPS' \
 	'-DCAN_USE_FPU_INSTRUCTIONS' \
 	'-D__mips_hard_float=1' \
@@ -109,21 +117,20 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -131,6 +138,14 @@
 	-pipe \
 	-fPIC \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m32 \
 	-fno-ident \
 	-fdata-sections \
@@ -143,7 +158,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -160,11 +174,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_MIPS' \
 	'-DCAN_USE_FPU_INSTRUCTIONS' \
 	'-D__mips_hard_float=1' \
@@ -185,16 +201,15 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Release := false
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 # Undefine ANDROID for host modules
 LOCAL_CFLAGS += -UANDROID
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
@@ -219,6 +234,7 @@
 LOCAL_STATIC_LIBRARIES := \
 	v8_tools_gyp_v8_base_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
 	v8_tools_gyp_v8_nosnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
+	v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
 	v8_tools_gyp_v8_libbase_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
 	third_party_icu_icui18n_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
 	third_party_icu_icuuc_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
@@ -226,8 +242,8 @@
 
 # Enable grouping to fix circular references
 LOCAL_GROUP_STATIC_LIBRARIES := true
-
-LOCAL_SHARED_LIBRARIES :=
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
 
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
diff --git a/tools/gyp/mksnapshot.host.darwin-mips64.mk b/tools/gyp/mksnapshot.host.darwin-mips64.mk
new file mode 100644
index 0000000..16e54b8
--- /dev/null
+++ b/tools/gyp/mksnapshot.host.darwin-mips64.mk
@@ -0,0 +1,253 @@
+# This file is generated by gyp; do not edit.
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE_CLASS := EXECUTABLES
+LOCAL_MODULE := v8_tools_gyp_mksnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+LOCAL_MODULE_STEM := mksnapshot
+LOCAL_MODULE_SUFFIX := 
+LOCAL_IS_HOST_MODULE := true
+LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
+gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
+gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
+
+# Make sure our deps are built first.
+GYP_TARGET_DEPENDENCIES := \
+	$(call intermediates-dir-for,STATIC_LIBRARIES,v8_tools_gyp_v8_base_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/v8_tools_gyp_v8_base_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
+	$(call intermediates-dir-for,STATIC_LIBRARIES,v8_tools_gyp_v8_nosnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/v8_tools_gyp_v8_nosnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
+	$(call intermediates-dir-for,STATIC_LIBRARIES,v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
+	$(call intermediates-dir-for,STATIC_LIBRARIES,v8_tools_gyp_v8_libbase_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/v8_tools_gyp_v8_libbase_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
+	$(call intermediates-dir-for,STATIC_LIBRARIES,third_party_icu_icui18n_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/third_party_icu_icui18n_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
+	$(call intermediates-dir-for,STATIC_LIBRARIES,third_party_icu_icuuc_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/third_party_icu_icuuc_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
+	$(call intermediates-dir-for,STATIC_LIBRARIES,third_party_icu_icudata_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/third_party_icu_icudata_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
+	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp
+
+GYP_GENERATED_OUTPUTS :=
+
+# Make sure our deps and generated files are built first.
+LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) $(GYP_GENERATED_OUTPUTS)
+
+LOCAL_CPP_EXTENSION := .cc
+LOCAL_GENERATED_SOURCES :=
+
+GYP_COPIED_SOURCE_ORIGIN_DIRS :=
+
+LOCAL_SRC_FILES := \
+	v8/src/mksnapshot.cc
+
+
+# Flags passed to both C and C++ files.
+MY_CFLAGS_Debug := \
+	-fstack-protector \
+	--param=ssp-buffer-size=4 \
+	 \
+	-pthread \
+	-fno-strict-aliasing \
+	-Wno-unused-parameter \
+	-Wno-missing-field-initializers \
+	-fvisibility=hidden \
+	-pipe \
+	-fPIC \
+	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
+	-Os \
+	-g \
+	-gdwarf-4 \
+	-fdata-sections \
+	-ffunction-sections \
+	-fomit-frame-pointer \
+	-funwind-tables
+
+MY_DEFS_Debug := \
+	'-DV8_DEPRECATION_WARNINGS' \
+	'-D_FILE_OFFSET_BITS=64' \
+	'-DNO_TCMALLOC' \
+	'-DDISABLE_NACL' \
+	'-DCHROMIUM_BUILD' \
+	'-DUSE_LIBJPEG_TURBO=1' \
+	'-DENABLE_WEBRTC=1' \
+	'-DUSE_PROPRIETARY_CODECS' \
+	'-DENABLE_BROWSER_CDMS' \
+	'-DENABLE_CONFIGURATION_POLICY' \
+	'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
+	'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
+	'-DENABLE_EGLIMAGE=1' \
+	'-DCLD_VERSION=1' \
+	'-DENABLE_PRINTING=1' \
+	'-DENABLE_MANAGED_USERS=1' \
+	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
+	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
+	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
+	'-DV8_TARGET_ARCH_MIPS64' \
+	'-DCAN_USE_FPU_INSTRUCTIONS' \
+	'-D__mips_hard_float=1' \
+	'-D_MIPS_ARCH_MIPS64R2' \
+	'-DV8_I18N_SUPPORT' \
+	'-DUSE_OPENSSL=1' \
+	'-DUSE_OPENSSL_CERTS=1' \
+	'-DDYNAMIC_ANNOTATIONS_ENABLED=1' \
+	'-DWTF_USE_DYNAMIC_ANNOTATIONS=1' \
+	'-D_DEBUG' \
+	'-DENABLE_DISASSEMBLER' \
+	'-DV8_ENABLE_CHECKS' \
+	'-DOBJECT_PRINT' \
+	'-DVERIFY_HEAP' \
+	'-DENABLE_EXTRA_CHECKS' \
+	'-DENABLE_HANDLE_ZAPPING'
+
+
+# Include paths placed before CFLAGS/CPPFLAGS
+LOCAL_C_INCLUDES_Debug := \
+	$(LOCAL_PATH)/v8 \
+	$(gyp_shared_intermediate_dir)
+
+
+# Flags passed to only C++ (and not C) files.
+LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
+	-fno-rtti \
+	-fno-threadsafe-statics \
+	-fvisibility-inlines-hidden \
+	-Wno-deprecated \
+	-std=gnu++11
+
+
+# Flags passed to both C and C++ files.
+MY_CFLAGS_Release := \
+	-fstack-protector \
+	--param=ssp-buffer-size=4 \
+	 \
+	-pthread \
+	-fno-strict-aliasing \
+	-Wno-unused-parameter \
+	-Wno-missing-field-initializers \
+	-fvisibility=hidden \
+	-pipe \
+	-fPIC \
+	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
+	-fno-ident \
+	-fdata-sections \
+	-ffunction-sections \
+	-fomit-frame-pointer \
+	-funwind-tables \
+	-fdata-sections \
+	-ffunction-sections \
+	-O2
+
+MY_DEFS_Release := \
+	'-DV8_DEPRECATION_WARNINGS' \
+	'-D_FILE_OFFSET_BITS=64' \
+	'-DNO_TCMALLOC' \
+	'-DDISABLE_NACL' \
+	'-DCHROMIUM_BUILD' \
+	'-DUSE_LIBJPEG_TURBO=1' \
+	'-DENABLE_WEBRTC=1' \
+	'-DUSE_PROPRIETARY_CODECS' \
+	'-DENABLE_BROWSER_CDMS' \
+	'-DENABLE_CONFIGURATION_POLICY' \
+	'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
+	'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
+	'-DENABLE_EGLIMAGE=1' \
+	'-DCLD_VERSION=1' \
+	'-DENABLE_PRINTING=1' \
+	'-DENABLE_MANAGED_USERS=1' \
+	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
+	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
+	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
+	'-DV8_TARGET_ARCH_MIPS64' \
+	'-DCAN_USE_FPU_INSTRUCTIONS' \
+	'-D__mips_hard_float=1' \
+	'-D_MIPS_ARCH_MIPS64R2' \
+	'-DV8_I18N_SUPPORT' \
+	'-DUSE_OPENSSL=1' \
+	'-DUSE_OPENSSL_CERTS=1' \
+	'-DNDEBUG' \
+	'-DNVALGRIND' \
+	'-DDYNAMIC_ANNOTATIONS_ENABLED=0'
+
+
+# Include paths placed before CFLAGS/CPPFLAGS
+LOCAL_C_INCLUDES_Release := \
+	$(LOCAL_PATH)/v8 \
+	$(gyp_shared_intermediate_dir)
+
+
+# Flags passed to only C++ (and not C) files.
+LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
+	-fno-rtti \
+	-fno-threadsafe-statics \
+	-fvisibility-inlines-hidden \
+	-Wno-deprecated \
+	-std=gnu++11
+
+
+LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
+# Undefine ANDROID for host modules
+LOCAL_CFLAGS += -UANDROID
+LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
+LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
+LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
+### Rules for final target.
+
+LOCAL_LDFLAGS_Debug := \
+	-pthread \
+	-fPIC
+
+
+LOCAL_LDFLAGS_Release := \
+	-pthread \
+	-fPIC
+
+
+LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
+
+LOCAL_STATIC_LIBRARIES := \
+	v8_tools_gyp_v8_base_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
+	v8_tools_gyp_v8_nosnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
+	v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
+	v8_tools_gyp_v8_libbase_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
+	third_party_icu_icui18n_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
+	third_party_icu_icuuc_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
+	third_party_icu_icudata_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+
+# Enable grouping to fix circular references
+LOCAL_GROUP_STATIC_LIBRARIES := true
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
+
+# Add target alias to "gyp_all_modules" target.
+.PHONY: gyp_all_modules
+gyp_all_modules: v8_tools_gyp_mksnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+
+# Alias gyp target name.
+.PHONY: mksnapshot
+mksnapshot: v8_tools_gyp_mksnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+
+LOCAL_MODULE_PATH := $(gyp_shared_intermediate_dir)
+include $(BUILD_HOST_EXECUTABLE)
diff --git a/tools/gyp/mksnapshot.host.darwin-x86.mk b/tools/gyp/mksnapshot.host.darwin-x86.mk
index d1aa85f..8a4aa3f 100644
--- a/tools/gyp/mksnapshot.host.darwin-x86.mk
+++ b/tools/gyp/mksnapshot.host.darwin-x86.mk
@@ -6,7 +6,6 @@
 LOCAL_MODULE := v8_tools_gyp_mksnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
 LOCAL_MODULE_STEM := mksnapshot
 LOCAL_MODULE_SUFFIX := 
-LOCAL_MODULE_TAGS := optional
 LOCAL_IS_HOST_MODULE := true
 LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
@@ -16,12 +15,12 @@
 GYP_TARGET_DEPENDENCIES := \
 	$(call intermediates-dir-for,STATIC_LIBRARIES,v8_tools_gyp_v8_base_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/v8_tools_gyp_v8_base_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
 	$(call intermediates-dir-for,STATIC_LIBRARIES,v8_tools_gyp_v8_nosnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/v8_tools_gyp_v8_nosnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
+	$(call intermediates-dir-for,STATIC_LIBRARIES,v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
 	$(call intermediates-dir-for,STATIC_LIBRARIES,v8_tools_gyp_v8_libbase_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/v8_tools_gyp_v8_libbase_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
 	$(call intermediates-dir-for,STATIC_LIBRARIES,third_party_icu_icui18n_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/third_party_icu_icui18n_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
 	$(call intermediates-dir-for,STATIC_LIBRARIES,third_party_icu_icuuc_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/third_party_icu_icuuc_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
 	$(call intermediates-dir-for,STATIC_LIBRARIES,third_party_icu_icudata_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/third_party_icu_icudata_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
-	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp \
-	$(call intermediates-dir-for,GYP,v8_tools_gyp_generate_trig_table_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/generate_trig_table.stamp
+	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp
 
 GYP_GENERATED_OUTPUTS :=
 
@@ -42,7 +41,6 @@
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -50,9 +48,18 @@
 	-pipe \
 	-fPIC \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m32 \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-fomit-frame-pointer \
@@ -60,7 +67,6 @@
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -77,11 +83,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_IA32' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
@@ -105,20 +113,19 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -126,6 +133,14 @@
 	-pipe \
 	-fPIC \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m32 \
 	-fno-ident \
 	-fdata-sections \
@@ -138,7 +153,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -155,11 +169,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_IA32' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
@@ -177,16 +193,15 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Release := false
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 # Undefine ANDROID for host modules
 LOCAL_CFLAGS += -UANDROID
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
@@ -211,6 +226,7 @@
 LOCAL_STATIC_LIBRARIES := \
 	v8_tools_gyp_v8_base_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
 	v8_tools_gyp_v8_nosnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
+	v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
 	v8_tools_gyp_v8_libbase_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
 	third_party_icu_icui18n_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
 	third_party_icu_icuuc_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
@@ -218,8 +234,8 @@
 
 # Enable grouping to fix circular references
 LOCAL_GROUP_STATIC_LIBRARIES := true
-
-LOCAL_SHARED_LIBRARIES :=
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
 
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
diff --git a/tools/gyp/mksnapshot.host.darwin-x86_64.mk b/tools/gyp/mksnapshot.host.darwin-x86_64.mk
index 6118090..2608ae7 100644
--- a/tools/gyp/mksnapshot.host.darwin-x86_64.mk
+++ b/tools/gyp/mksnapshot.host.darwin-x86_64.mk
@@ -6,7 +6,6 @@
 LOCAL_MODULE := v8_tools_gyp_mksnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
 LOCAL_MODULE_STEM := mksnapshot
 LOCAL_MODULE_SUFFIX := 
-LOCAL_MODULE_TAGS := optional
 LOCAL_IS_HOST_MODULE := true
 LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
@@ -16,12 +15,12 @@
 GYP_TARGET_DEPENDENCIES := \
 	$(call intermediates-dir-for,STATIC_LIBRARIES,v8_tools_gyp_v8_base_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/v8_tools_gyp_v8_base_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
 	$(call intermediates-dir-for,STATIC_LIBRARIES,v8_tools_gyp_v8_nosnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/v8_tools_gyp_v8_nosnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
+	$(call intermediates-dir-for,STATIC_LIBRARIES,v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
 	$(call intermediates-dir-for,STATIC_LIBRARIES,v8_tools_gyp_v8_libbase_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/v8_tools_gyp_v8_libbase_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
 	$(call intermediates-dir-for,STATIC_LIBRARIES,third_party_icu_icui18n_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/third_party_icu_icui18n_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
 	$(call intermediates-dir-for,STATIC_LIBRARIES,third_party_icu_icuuc_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/third_party_icu_icuuc_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
 	$(call intermediates-dir-for,STATIC_LIBRARIES,third_party_icu_icudata_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/third_party_icu_icudata_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
-	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp \
-	$(call intermediates-dir-for,GYP,v8_tools_gyp_generate_trig_table_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/generate_trig_table.stamp
+	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp
 
 GYP_GENERATED_OUTPUTS :=
 
@@ -42,7 +41,6 @@
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -50,9 +48,18 @@
 	-pipe \
 	-fPIC \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m64 \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-fomit-frame-pointer \
@@ -60,7 +67,6 @@
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -77,11 +83,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_X64' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
@@ -105,20 +113,19 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -126,6 +133,14 @@
 	-pipe \
 	-fPIC \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m64 \
 	-fno-ident \
 	-fdata-sections \
@@ -138,7 +153,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -155,11 +169,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_X64' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
@@ -177,16 +193,15 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Release := false
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 # Undefine ANDROID for host modules
 LOCAL_CFLAGS += -UANDROID
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
@@ -211,6 +226,7 @@
 LOCAL_STATIC_LIBRARIES := \
 	v8_tools_gyp_v8_base_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
 	v8_tools_gyp_v8_nosnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
+	v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
 	v8_tools_gyp_v8_libbase_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
 	third_party_icu_icui18n_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
 	third_party_icu_icuuc_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
@@ -218,8 +234,8 @@
 
 # Enable grouping to fix circular references
 LOCAL_GROUP_STATIC_LIBRARIES := true
-
-LOCAL_SHARED_LIBRARIES :=
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
 
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
diff --git a/tools/gyp/mksnapshot.host.linux-arm.mk b/tools/gyp/mksnapshot.host.linux-arm.mk
index 0076b9b..1536733 100644
--- a/tools/gyp/mksnapshot.host.linux-arm.mk
+++ b/tools/gyp/mksnapshot.host.linux-arm.mk
@@ -6,7 +6,6 @@
 LOCAL_MODULE := v8_tools_gyp_mksnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
 LOCAL_MODULE_STEM := mksnapshot
 LOCAL_MODULE_SUFFIX := 
-LOCAL_MODULE_TAGS := optional
 LOCAL_IS_HOST_MODULE := true
 LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
@@ -16,12 +15,12 @@
 GYP_TARGET_DEPENDENCIES := \
 	$(call intermediates-dir-for,STATIC_LIBRARIES,v8_tools_gyp_v8_base_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/v8_tools_gyp_v8_base_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
 	$(call intermediates-dir-for,STATIC_LIBRARIES,v8_tools_gyp_v8_nosnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/v8_tools_gyp_v8_nosnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
+	$(call intermediates-dir-for,STATIC_LIBRARIES,v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
 	$(call intermediates-dir-for,STATIC_LIBRARIES,v8_tools_gyp_v8_libbase_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/v8_tools_gyp_v8_libbase_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
 	$(call intermediates-dir-for,STATIC_LIBRARIES,third_party_icu_icui18n_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/third_party_icu_icui18n_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
 	$(call intermediates-dir-for,STATIC_LIBRARIES,third_party_icu_icuuc_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/third_party_icu_icuuc_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
 	$(call intermediates-dir-for,STATIC_LIBRARIES,third_party_icu_icudata_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/third_party_icu_icudata_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
-	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp \
-	$(call intermediates-dir-for,GYP,v8_tools_gyp_generate_trig_table_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/generate_trig_table.stamp
+	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp
 
 GYP_GENERATED_OUTPUTS :=
 
@@ -42,18 +41,25 @@
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
 	-fvisibility=hidden \
 	-pipe \
 	-fPIC \
-	-Wno-unused-local-typedefs \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m32 \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-fomit-frame-pointer \
@@ -61,7 +67,6 @@
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -78,17 +83,18 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_ARM' \
+	'-DCAN_USE_ARMV7_INSTRUCTIONS' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
-	'-DARM_TEST' \
-	'-DCAN_USE_ARMV7_INSTRUCTIONS=1' \
 	'-DDYNAMIC_ANNOTATIONS_ENABLED=1' \
 	'-DWTF_USE_DYNAMIC_ANNOTATIONS=1' \
 	'-D_DEBUG' \
@@ -108,28 +114,34 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
 	-fvisibility=hidden \
 	-pipe \
 	-fPIC \
-	-Wno-unused-local-typedefs \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m32 \
 	-fno-ident \
 	-fdata-sections \
@@ -142,7 +154,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -159,17 +170,18 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_ARM' \
+	'-DCAN_USE_ARMV7_INSTRUCTIONS' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
-	'-DARM_TEST' \
-	'-DCAN_USE_ARMV7_INSTRUCTIONS=1' \
 	'-DNDEBUG' \
 	'-DNVALGRIND' \
 	'-DDYNAMIC_ANNOTATIONS_ENABLED=0'
@@ -183,16 +195,15 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Release := false
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 # Undefine ANDROID for host modules
 LOCAL_CFLAGS += -UANDROID
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
@@ -221,6 +232,7 @@
 LOCAL_STATIC_LIBRARIES := \
 	v8_tools_gyp_v8_base_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
 	v8_tools_gyp_v8_nosnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
+	v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
 	v8_tools_gyp_v8_libbase_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
 	third_party_icu_icui18n_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
 	third_party_icu_icuuc_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
@@ -228,8 +240,8 @@
 
 # Enable grouping to fix circular references
 LOCAL_GROUP_STATIC_LIBRARIES := true
-
-LOCAL_SHARED_LIBRARIES :=
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
 
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
diff --git a/tools/gyp/mksnapshot.host.linux-arm64.mk b/tools/gyp/mksnapshot.host.linux-arm64.mk
index 49c56d7..c02efa3 100644
--- a/tools/gyp/mksnapshot.host.linux-arm64.mk
+++ b/tools/gyp/mksnapshot.host.linux-arm64.mk
@@ -6,7 +6,6 @@
 LOCAL_MODULE := v8_tools_gyp_mksnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
 LOCAL_MODULE_STEM := mksnapshot
 LOCAL_MODULE_SUFFIX := 
-LOCAL_MODULE_TAGS := optional
 LOCAL_IS_HOST_MODULE := true
 LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
@@ -16,12 +15,12 @@
 GYP_TARGET_DEPENDENCIES := \
 	$(call intermediates-dir-for,STATIC_LIBRARIES,v8_tools_gyp_v8_base_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/v8_tools_gyp_v8_base_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
 	$(call intermediates-dir-for,STATIC_LIBRARIES,v8_tools_gyp_v8_nosnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/v8_tools_gyp_v8_nosnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
+	$(call intermediates-dir-for,STATIC_LIBRARIES,v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
 	$(call intermediates-dir-for,STATIC_LIBRARIES,v8_tools_gyp_v8_libbase_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/v8_tools_gyp_v8_libbase_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
 	$(call intermediates-dir-for,STATIC_LIBRARIES,third_party_icu_icui18n_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/third_party_icu_icui18n_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
 	$(call intermediates-dir-for,STATIC_LIBRARIES,third_party_icu_icuuc_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/third_party_icu_icuuc_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
 	$(call intermediates-dir-for,STATIC_LIBRARIES,third_party_icu_icudata_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/third_party_icu_icudata_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
-	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp \
-	$(call intermediates-dir-for,GYP,v8_tools_gyp_generate_trig_table_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/generate_trig_table.stamp
+	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp
 
 GYP_GENERATED_OUTPUTS :=
 
@@ -42,25 +41,31 @@
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
 	-fvisibility=hidden \
 	-pipe \
 	-fPIC \
-	-Wno-unused-local-typedefs \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m64 \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-funwind-tables
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -77,11 +82,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_ARM64' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
@@ -105,28 +112,34 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
 	-fvisibility=hidden \
 	-pipe \
 	-fPIC \
-	-Wno-unused-local-typedefs \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m64 \
 	-fno-ident \
 	-fdata-sections \
@@ -138,7 +151,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -155,11 +167,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_ARM64' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
@@ -177,16 +191,15 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Release := false
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 # Undefine ANDROID for host modules
 LOCAL_CFLAGS += -UANDROID
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
@@ -215,6 +228,7 @@
 LOCAL_STATIC_LIBRARIES := \
 	v8_tools_gyp_v8_base_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
 	v8_tools_gyp_v8_nosnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
+	v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
 	v8_tools_gyp_v8_libbase_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
 	third_party_icu_icui18n_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
 	third_party_icu_icuuc_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
@@ -222,8 +236,8 @@
 
 # Enable grouping to fix circular references
 LOCAL_GROUP_STATIC_LIBRARIES := true
-
-LOCAL_SHARED_LIBRARIES :=
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
 
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
diff --git a/tools/gyp/mksnapshot.host.linux-mips.mk b/tools/gyp/mksnapshot.host.linux-mips.mk
index 881e249..12552f2 100644
--- a/tools/gyp/mksnapshot.host.linux-mips.mk
+++ b/tools/gyp/mksnapshot.host.linux-mips.mk
@@ -6,7 +6,6 @@
 LOCAL_MODULE := v8_tools_gyp_mksnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
 LOCAL_MODULE_STEM := mksnapshot
 LOCAL_MODULE_SUFFIX := 
-LOCAL_MODULE_TAGS := optional
 LOCAL_IS_HOST_MODULE := true
 LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
@@ -16,12 +15,12 @@
 GYP_TARGET_DEPENDENCIES := \
 	$(call intermediates-dir-for,STATIC_LIBRARIES,v8_tools_gyp_v8_base_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/v8_tools_gyp_v8_base_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
 	$(call intermediates-dir-for,STATIC_LIBRARIES,v8_tools_gyp_v8_nosnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/v8_tools_gyp_v8_nosnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
+	$(call intermediates-dir-for,STATIC_LIBRARIES,v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
 	$(call intermediates-dir-for,STATIC_LIBRARIES,v8_tools_gyp_v8_libbase_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/v8_tools_gyp_v8_libbase_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
 	$(call intermediates-dir-for,STATIC_LIBRARIES,third_party_icu_icui18n_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/third_party_icu_icui18n_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
 	$(call intermediates-dir-for,STATIC_LIBRARIES,third_party_icu_icuuc_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/third_party_icu_icuuc_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
 	$(call intermediates-dir-for,STATIC_LIBRARIES,third_party_icu_icudata_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/third_party_icu_icudata_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
-	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp \
-	$(call intermediates-dir-for,GYP,v8_tools_gyp_generate_trig_table_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/generate_trig_table.stamp
+	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp
 
 GYP_GENERATED_OUTPUTS :=
 
@@ -43,18 +42,25 @@
 	--param=ssp-buffer-size=4 \
 	 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
 	-fvisibility=hidden \
 	-pipe \
 	-fPIC \
-	-Wno-unused-local-typedefs \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m32 \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-fomit-frame-pointer \
@@ -62,7 +68,6 @@
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -79,11 +84,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_MIPS' \
 	'-DCAN_USE_FPU_INSTRUCTIONS' \
 	'-D__mips_hard_float=1' \
@@ -110,29 +117,35 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
 	-fvisibility=hidden \
 	-pipe \
 	-fPIC \
-	-Wno-unused-local-typedefs \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m32 \
 	-fno-ident \
 	-fdata-sections \
@@ -145,7 +158,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -162,11 +174,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_MIPS' \
 	'-DCAN_USE_FPU_INSTRUCTIONS' \
 	'-D__mips_hard_float=1' \
@@ -187,16 +201,15 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Release := false
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 # Undefine ANDROID for host modules
 LOCAL_CFLAGS += -UANDROID
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
@@ -225,6 +238,7 @@
 LOCAL_STATIC_LIBRARIES := \
 	v8_tools_gyp_v8_base_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
 	v8_tools_gyp_v8_nosnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
+	v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
 	v8_tools_gyp_v8_libbase_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
 	third_party_icu_icui18n_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
 	third_party_icu_icuuc_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
@@ -232,8 +246,8 @@
 
 # Enable grouping to fix circular references
 LOCAL_GROUP_STATIC_LIBRARIES := true
-
-LOCAL_SHARED_LIBRARIES :=
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
 
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
diff --git a/tools/gyp/mksnapshot.host.linux-mips64.mk b/tools/gyp/mksnapshot.host.linux-mips64.mk
new file mode 100644
index 0000000..5c17490
--- /dev/null
+++ b/tools/gyp/mksnapshot.host.linux-mips64.mk
@@ -0,0 +1,257 @@
+# This file is generated by gyp; do not edit.
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE_CLASS := EXECUTABLES
+LOCAL_MODULE := v8_tools_gyp_mksnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+LOCAL_MODULE_STEM := mksnapshot
+LOCAL_MODULE_SUFFIX := 
+LOCAL_IS_HOST_MODULE := true
+LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
+gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
+gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
+
+# Make sure our deps are built first.
+GYP_TARGET_DEPENDENCIES := \
+	$(call intermediates-dir-for,STATIC_LIBRARIES,v8_tools_gyp_v8_base_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/v8_tools_gyp_v8_base_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
+	$(call intermediates-dir-for,STATIC_LIBRARIES,v8_tools_gyp_v8_nosnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/v8_tools_gyp_v8_nosnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
+	$(call intermediates-dir-for,STATIC_LIBRARIES,v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
+	$(call intermediates-dir-for,STATIC_LIBRARIES,v8_tools_gyp_v8_libbase_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/v8_tools_gyp_v8_libbase_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
+	$(call intermediates-dir-for,STATIC_LIBRARIES,third_party_icu_icui18n_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/third_party_icu_icui18n_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
+	$(call intermediates-dir-for,STATIC_LIBRARIES,third_party_icu_icuuc_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/third_party_icu_icuuc_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
+	$(call intermediates-dir-for,STATIC_LIBRARIES,third_party_icu_icudata_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/third_party_icu_icudata_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
+	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp
+
+GYP_GENERATED_OUTPUTS :=
+
+# Make sure our deps and generated files are built first.
+LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) $(GYP_GENERATED_OUTPUTS)
+
+LOCAL_CPP_EXTENSION := .cc
+LOCAL_GENERATED_SOURCES :=
+
+GYP_COPIED_SOURCE_ORIGIN_DIRS :=
+
+LOCAL_SRC_FILES := \
+	v8/src/mksnapshot.cc
+
+
+# Flags passed to both C and C++ files.
+MY_CFLAGS_Debug := \
+	-fstack-protector \
+	--param=ssp-buffer-size=4 \
+	 \
+	-pthread \
+	-fno-strict-aliasing \
+	-Wno-unused-parameter \
+	-Wno-missing-field-initializers \
+	-fvisibility=hidden \
+	-pipe \
+	-fPIC \
+	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
+	-Os \
+	-g \
+	-gdwarf-4 \
+	-fdata-sections \
+	-ffunction-sections \
+	-fomit-frame-pointer \
+	-funwind-tables
+
+MY_DEFS_Debug := \
+	'-DV8_DEPRECATION_WARNINGS' \
+	'-D_FILE_OFFSET_BITS=64' \
+	'-DNO_TCMALLOC' \
+	'-DDISABLE_NACL' \
+	'-DCHROMIUM_BUILD' \
+	'-DUSE_LIBJPEG_TURBO=1' \
+	'-DENABLE_WEBRTC=1' \
+	'-DUSE_PROPRIETARY_CODECS' \
+	'-DENABLE_BROWSER_CDMS' \
+	'-DENABLE_CONFIGURATION_POLICY' \
+	'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
+	'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
+	'-DENABLE_EGLIMAGE=1' \
+	'-DCLD_VERSION=1' \
+	'-DENABLE_PRINTING=1' \
+	'-DENABLE_MANAGED_USERS=1' \
+	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
+	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
+	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
+	'-DV8_TARGET_ARCH_MIPS64' \
+	'-DCAN_USE_FPU_INSTRUCTIONS' \
+	'-D__mips_hard_float=1' \
+	'-D_MIPS_ARCH_MIPS64R2' \
+	'-DV8_I18N_SUPPORT' \
+	'-DUSE_OPENSSL=1' \
+	'-DUSE_OPENSSL_CERTS=1' \
+	'-DDYNAMIC_ANNOTATIONS_ENABLED=1' \
+	'-DWTF_USE_DYNAMIC_ANNOTATIONS=1' \
+	'-D_DEBUG' \
+	'-DENABLE_DISASSEMBLER' \
+	'-DV8_ENABLE_CHECKS' \
+	'-DOBJECT_PRINT' \
+	'-DVERIFY_HEAP' \
+	'-DENABLE_EXTRA_CHECKS' \
+	'-DENABLE_HANDLE_ZAPPING'
+
+
+# Include paths placed before CFLAGS/CPPFLAGS
+LOCAL_C_INCLUDES_Debug := \
+	$(LOCAL_PATH)/v8 \
+	$(gyp_shared_intermediate_dir)
+
+
+# Flags passed to only C++ (and not C) files.
+LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
+	-fno-rtti \
+	-fno-threadsafe-statics \
+	-fvisibility-inlines-hidden \
+	-Wno-deprecated \
+	-std=gnu++11
+
+
+# Flags passed to both C and C++ files.
+MY_CFLAGS_Release := \
+	-fstack-protector \
+	--param=ssp-buffer-size=4 \
+	 \
+	-pthread \
+	-fno-strict-aliasing \
+	-Wno-unused-parameter \
+	-Wno-missing-field-initializers \
+	-fvisibility=hidden \
+	-pipe \
+	-fPIC \
+	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
+	-fno-ident \
+	-fdata-sections \
+	-ffunction-sections \
+	-fomit-frame-pointer \
+	-funwind-tables \
+	-fdata-sections \
+	-ffunction-sections \
+	-O2
+
+MY_DEFS_Release := \
+	'-DV8_DEPRECATION_WARNINGS' \
+	'-D_FILE_OFFSET_BITS=64' \
+	'-DNO_TCMALLOC' \
+	'-DDISABLE_NACL' \
+	'-DCHROMIUM_BUILD' \
+	'-DUSE_LIBJPEG_TURBO=1' \
+	'-DENABLE_WEBRTC=1' \
+	'-DUSE_PROPRIETARY_CODECS' \
+	'-DENABLE_BROWSER_CDMS' \
+	'-DENABLE_CONFIGURATION_POLICY' \
+	'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
+	'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
+	'-DENABLE_EGLIMAGE=1' \
+	'-DCLD_VERSION=1' \
+	'-DENABLE_PRINTING=1' \
+	'-DENABLE_MANAGED_USERS=1' \
+	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
+	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
+	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
+	'-DV8_TARGET_ARCH_MIPS64' \
+	'-DCAN_USE_FPU_INSTRUCTIONS' \
+	'-D__mips_hard_float=1' \
+	'-D_MIPS_ARCH_MIPS64R2' \
+	'-DV8_I18N_SUPPORT' \
+	'-DUSE_OPENSSL=1' \
+	'-DUSE_OPENSSL_CERTS=1' \
+	'-DNDEBUG' \
+	'-DNVALGRIND' \
+	'-DDYNAMIC_ANNOTATIONS_ENABLED=0'
+
+
+# Include paths placed before CFLAGS/CPPFLAGS
+LOCAL_C_INCLUDES_Release := \
+	$(LOCAL_PATH)/v8 \
+	$(gyp_shared_intermediate_dir)
+
+
+# Flags passed to only C++ (and not C) files.
+LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
+	-fno-rtti \
+	-fno-threadsafe-statics \
+	-fvisibility-inlines-hidden \
+	-Wno-deprecated \
+	-std=gnu++11
+
+
+LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
+# Undefine ANDROID for host modules
+LOCAL_CFLAGS += -UANDROID
+LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
+LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
+LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
+### Rules for final target.
+
+LOCAL_LDFLAGS_Debug := \
+	-Wl,-z,now \
+	-Wl,-z,relro \
+	-pthread \
+	-fPIC
+
+
+LOCAL_LDFLAGS_Release := \
+	-Wl,-z,now \
+	-Wl,-z,relro \
+	-pthread \
+	-fPIC
+
+
+LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
+
+LOCAL_STATIC_LIBRARIES := \
+	v8_tools_gyp_v8_base_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
+	v8_tools_gyp_v8_nosnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
+	v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
+	v8_tools_gyp_v8_libbase_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
+	third_party_icu_icui18n_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
+	third_party_icu_icuuc_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
+	third_party_icu_icudata_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+
+# Enable grouping to fix circular references
+LOCAL_GROUP_STATIC_LIBRARIES := true
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
+
+# Add target alias to "gyp_all_modules" target.
+.PHONY: gyp_all_modules
+gyp_all_modules: v8_tools_gyp_mksnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+
+# Alias gyp target name.
+.PHONY: mksnapshot
+mksnapshot: v8_tools_gyp_mksnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+
+LOCAL_MODULE_PATH := $(gyp_shared_intermediate_dir)
+include $(BUILD_HOST_EXECUTABLE)
diff --git a/tools/gyp/mksnapshot.host.linux-x86.mk b/tools/gyp/mksnapshot.host.linux-x86.mk
index a2eace4..e7b1912 100644
--- a/tools/gyp/mksnapshot.host.linux-x86.mk
+++ b/tools/gyp/mksnapshot.host.linux-x86.mk
@@ -6,7 +6,6 @@
 LOCAL_MODULE := v8_tools_gyp_mksnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
 LOCAL_MODULE_STEM := mksnapshot
 LOCAL_MODULE_SUFFIX := 
-LOCAL_MODULE_TAGS := optional
 LOCAL_IS_HOST_MODULE := true
 LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
@@ -16,12 +15,12 @@
 GYP_TARGET_DEPENDENCIES := \
 	$(call intermediates-dir-for,STATIC_LIBRARIES,v8_tools_gyp_v8_base_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/v8_tools_gyp_v8_base_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
 	$(call intermediates-dir-for,STATIC_LIBRARIES,v8_tools_gyp_v8_nosnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/v8_tools_gyp_v8_nosnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
+	$(call intermediates-dir-for,STATIC_LIBRARIES,v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
 	$(call intermediates-dir-for,STATIC_LIBRARIES,v8_tools_gyp_v8_libbase_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/v8_tools_gyp_v8_libbase_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
 	$(call intermediates-dir-for,STATIC_LIBRARIES,third_party_icu_icui18n_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/third_party_icu_icui18n_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
 	$(call intermediates-dir-for,STATIC_LIBRARIES,third_party_icu_icuuc_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/third_party_icu_icuuc_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
 	$(call intermediates-dir-for,STATIC_LIBRARIES,third_party_icu_icudata_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/third_party_icu_icudata_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
-	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp \
-	$(call intermediates-dir-for,GYP,v8_tools_gyp_generate_trig_table_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/generate_trig_table.stamp
+	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp
 
 GYP_GENERATED_OUTPUTS :=
 
@@ -42,18 +41,25 @@
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
 	-fvisibility=hidden \
 	-pipe \
 	-fPIC \
-	-Wno-unused-local-typedefs \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m32 \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-fomit-frame-pointer \
@@ -61,7 +67,6 @@
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -78,11 +83,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_IA32' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
@@ -106,28 +113,34 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
 	-fvisibility=hidden \
 	-pipe \
 	-fPIC \
-	-Wno-unused-local-typedefs \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m32 \
 	-fno-ident \
 	-fdata-sections \
@@ -140,7 +153,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -157,11 +169,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_IA32' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
@@ -179,16 +193,15 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Release := false
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 # Undefine ANDROID for host modules
 LOCAL_CFLAGS += -UANDROID
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
@@ -217,6 +230,7 @@
 LOCAL_STATIC_LIBRARIES := \
 	v8_tools_gyp_v8_base_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
 	v8_tools_gyp_v8_nosnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
+	v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
 	v8_tools_gyp_v8_libbase_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
 	third_party_icu_icui18n_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
 	third_party_icu_icuuc_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
@@ -224,8 +238,8 @@
 
 # Enable grouping to fix circular references
 LOCAL_GROUP_STATIC_LIBRARIES := true
-
-LOCAL_SHARED_LIBRARIES :=
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
 
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
diff --git a/tools/gyp/mksnapshot.host.linux-x86_64.mk b/tools/gyp/mksnapshot.host.linux-x86_64.mk
index 4185769..b41ba3b 100644
--- a/tools/gyp/mksnapshot.host.linux-x86_64.mk
+++ b/tools/gyp/mksnapshot.host.linux-x86_64.mk
@@ -6,7 +6,6 @@
 LOCAL_MODULE := v8_tools_gyp_mksnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
 LOCAL_MODULE_STEM := mksnapshot
 LOCAL_MODULE_SUFFIX := 
-LOCAL_MODULE_TAGS := optional
 LOCAL_IS_HOST_MODULE := true
 LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
@@ -16,12 +15,12 @@
 GYP_TARGET_DEPENDENCIES := \
 	$(call intermediates-dir-for,STATIC_LIBRARIES,v8_tools_gyp_v8_base_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/v8_tools_gyp_v8_base_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
 	$(call intermediates-dir-for,STATIC_LIBRARIES,v8_tools_gyp_v8_nosnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/v8_tools_gyp_v8_nosnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
+	$(call intermediates-dir-for,STATIC_LIBRARIES,v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
 	$(call intermediates-dir-for,STATIC_LIBRARIES,v8_tools_gyp_v8_libbase_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/v8_tools_gyp_v8_libbase_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
 	$(call intermediates-dir-for,STATIC_LIBRARIES,third_party_icu_icui18n_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/third_party_icu_icui18n_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
 	$(call intermediates-dir-for,STATIC_LIBRARIES,third_party_icu_icuuc_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/third_party_icu_icuuc_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
 	$(call intermediates-dir-for,STATIC_LIBRARIES,third_party_icu_icudata_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/third_party_icu_icudata_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp.a \
-	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp \
-	$(call intermediates-dir-for,GYP,v8_tools_gyp_generate_trig_table_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/generate_trig_table.stamp
+	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp
 
 GYP_GENERATED_OUTPUTS :=
 
@@ -42,18 +41,25 @@
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
 	-fvisibility=hidden \
 	-pipe \
 	-fPIC \
-	-Wno-unused-local-typedefs \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m64 \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-fomit-frame-pointer \
@@ -61,7 +67,6 @@
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -78,11 +83,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_X64' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
@@ -106,28 +113,34 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
 	-fvisibility=hidden \
 	-pipe \
 	-fPIC \
-	-Wno-unused-local-typedefs \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m64 \
 	-fno-ident \
 	-fdata-sections \
@@ -140,7 +153,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -157,11 +169,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_X64' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
@@ -179,16 +193,15 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Release := false
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 # Undefine ANDROID for host modules
 LOCAL_CFLAGS += -UANDROID
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
@@ -217,6 +230,7 @@
 LOCAL_STATIC_LIBRARIES := \
 	v8_tools_gyp_v8_base_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
 	v8_tools_gyp_v8_nosnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
+	v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
 	v8_tools_gyp_v8_libbase_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
 	third_party_icu_icui18n_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
 	third_party_icu_icuuc_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp \
@@ -224,8 +238,8 @@
 
 # Enable grouping to fix circular references
 LOCAL_GROUP_STATIC_LIBRARIES := true
-
-LOCAL_SHARED_LIBRARIES :=
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
 
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
diff --git a/tools/gyp/v8.gyp b/tools/gyp/v8.gyp
index 72450d8..c63cd94 100644
--- a/tools/gyp/v8.gyp
+++ b/tools/gyp/v8.gyp
@@ -42,18 +42,37 @@
         }, {
           'toolsets': ['target'],
         }],
-        ['v8_use_snapshot=="true"', {
+
+        ['v8_use_snapshot=="true" and v8_use_external_startup_data==0', {
           # The dependency on v8_base should come from a transitive
           # dependency however the Android toolchain requires libv8_base.a
           # to appear before libv8_snapshot.a so it's listed explicitly.
           'dependencies': ['v8_base', 'v8_snapshot'],
-        },
-        {
+        }],
+        ['v8_use_snapshot!="true" and v8_use_external_startup_data==0', {
           # The dependency on v8_base should come from a transitive
           # dependency however the Android toolchain requires libv8_base.a
           # to appear before libv8_snapshot.a so it's listed explicitly.
           'dependencies': ['v8_base', 'v8_nosnapshot'],
         }],
+        ['v8_use_external_startup_data==1 and want_separate_host_toolset==1', {
+          'dependencies': ['v8_base', 'v8_external_snapshot'],
+          'target_conditions': [
+            ['_toolset=="host"', {
+              'inputs': [
+                '<(PRODUCT_DIR)/snapshot_blob_host.bin',
+              ],
+            }, {
+              'inputs': [
+                '<(PRODUCT_DIR)/snapshot_blob.bin',
+              ],
+            }],
+          ],
+        }],
+        ['v8_use_external_startup_data==1 and want_separate_host_toolset==0', {
+          'dependencies': ['v8_base', 'v8_external_snapshot'],
+          'inputs': [ '<(PRODUCT_DIR)/snapshot_blob.bin', ],
+        }],
         ['component=="shared_library"', {
           'type': '<(component)',
           'sources': [
@@ -114,14 +133,12 @@
           'dependencies': [
             'mksnapshot#host',
             'js2c#host',
-            'generate_trig_table#host',
           ],
         }, {
           'toolsets': ['target'],
           'dependencies': [
             'mksnapshot',
             'js2c',
-            'generate_trig_table',
           ],
         }],
         ['component=="shared_library"', {
@@ -146,8 +163,8 @@
       'sources': [
         '<(SHARED_INTERMEDIATE_DIR)/libraries.cc',
         '<(SHARED_INTERMEDIATE_DIR)/experimental-libraries.cc',
-        '<(SHARED_INTERMEDIATE_DIR)/trig-table.cc',
         '<(INTERMEDIATE_DIR)/snapshot.cc',
+        '../../src/snapshot-common.cc',
       ],
       'actions': [
         {
@@ -172,7 +189,7 @@
           'action': [
             '<@(_inputs)',
             '<@(mksnapshot_flags)',
-            '<@(_outputs)'
+            '<@(INTERMEDIATE_DIR)/snapshot.cc'
           ],
         },
       ],
@@ -189,16 +206,16 @@
       'sources': [
         '<(SHARED_INTERMEDIATE_DIR)/libraries.cc',
         '<(SHARED_INTERMEDIATE_DIR)/experimental-libraries.cc',
-        '<(SHARED_INTERMEDIATE_DIR)/trig-table.cc',
+        '../../src/snapshot-common.cc',
         '../../src/snapshot-empty.cc',
       ],
       'conditions': [
         ['want_separate_host_toolset==1', {
           'toolsets': ['host', 'target'],
-          'dependencies': ['js2c#host', 'generate_trig_table#host'],
+          'dependencies': ['js2c#host'],
         }, {
           'toolsets': ['target'],
-          'dependencies': ['js2c', 'generate_trig_table'],
+          'dependencies': ['js2c'],
         }],
         ['component=="shared_library"', {
           'defines': [
@@ -208,31 +225,94 @@
         }],
       ]
     },
-    { 'target_name': 'generate_trig_table',
-      'type': 'none',
+    {
+      'target_name': 'v8_external_snapshot',
+      'type': 'static_library',
       'conditions': [
         ['want_separate_host_toolset==1', {
-          'toolsets': ['host'],
-        }, {
+          'toolsets': ['host', 'target'],
+          'dependencies': [
+            'mksnapshot#host',
+            'js2c#host',
+            'natives_blob',
+        ]}, {
           'toolsets': ['target'],
+          'dependencies': [
+            'mksnapshot',
+            'js2c',
+            'natives_blob',
+          ],
         }],
+        ['component=="shared_library"', {
+          'defines': [
+            'V8_SHARED',
+            'BUILDING_V8_SHARED',
+          ],
+          'direct_dependent_settings': {
+            'defines': [
+              'V8_SHARED',
+              'USING_V8_SHARED',
+            ],
+          },
+        }],
+      ],
+      'dependencies': [
+        'v8_base',
+      ],
+      'include_dirs+': [
+        '../..',
+      ],
+      'sources': [
+        '../../src/natives-external.cc',
+        '../../src/snapshot-external.cc',
       ],
       'actions': [
         {
-          'action_name': 'generate',
+          'action_name': 'run_mksnapshot (external)',
           'inputs': [
-            '../../tools/generate-trig-table.py',
+            '<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)mksnapshot<(EXECUTABLE_SUFFIX)',
           ],
-          'outputs': [
-            '<(SHARED_INTERMEDIATE_DIR)/trig-table.cc',
+          'conditions': [
+            ['want_separate_host_toolset==1', {
+              'target_conditions': [
+                ['_toolset=="host"', {
+                  'outputs': [
+                    '<(INTERMEDIATE_DIR)/snapshot.cc',
+                    '<(PRODUCT_DIR)/snapshot_blob_host.bin',
+                  ],
+                }, {
+                  'outputs': [
+                    '<(INTERMEDIATE_DIR)/snapshot.cc',
+                    '<(PRODUCT_DIR)/snapshot_blob.bin',
+                  ],
+                }],
+              ],
+            }, {
+              'outputs': [
+                '<(INTERMEDIATE_DIR)/snapshot.cc',
+                '<(PRODUCT_DIR)/snapshot_blob.bin',
+              ],
+            }],
           ],
+          'variables': {
+            'mksnapshot_flags': [
+              '--log-snapshot-positions',
+              '--logfile', '<(INTERMEDIATE_DIR)/snapshot.log',
+            ],
+            'conditions': [
+              ['v8_random_seed!=0', {
+                'mksnapshot_flags': ['--random-seed', '<(v8_random_seed)'],
+              }],
+            ],
+          },
           'action': [
-            'python',
-            '../../tools/generate-trig-table.py',
-            '<@(_outputs)',
+            '<@(_inputs)',
+            '<@(mksnapshot_flags)',
+            '<@(INTERMEDIATE_DIR)/snapshot.cc',
+            '--startup_blob', '<(PRODUCT_DIR)/snapshot_blob.bin',
           ],
         },
-      ]
+      ],
     },
     {
       'target_name': 'v8_base',
@@ -263,8 +343,14 @@
         '../../src/assembler.h',
         '../../src/assert-scope.h',
         '../../src/assert-scope.cc',
+        '../../src/ast-value-factory.cc',
+        '../../src/ast-value-factory.h',
         '../../src/ast.cc',
         '../../src/ast.h',
+        '../../src/background-parsing-task.cc',
+        '../../src/background-parsing-task.h',
+        '../../src/bailout-reason.cc',
+        '../../src/bailout-reason.h',
         '../../src/bignum-dtoa.cc',
         '../../src/bignum-dtoa.h',
         '../../src/bignum.cc',
@@ -282,6 +368,8 @@
         '../../src/checks.h',
         '../../src/circular-queue-inl.h',
         '../../src/circular-queue.h',
+        '../../src/code-factory.cc',
+        '../../src/code-factory.h',
         '../../src/code-stubs.cc',
         '../../src/code-stubs.h',
         '../../src/code-stubs-hydrogen.cc',
@@ -290,6 +378,107 @@
         '../../src/codegen.h',
         '../../src/compilation-cache.cc',
         '../../src/compilation-cache.h',
+        '../../src/compiler/access-builder.cc',
+        '../../src/compiler/access-builder.h',
+        '../../src/compiler/ast-graph-builder.cc',
+        '../../src/compiler/ast-graph-builder.h',
+        '../../src/compiler/change-lowering.cc',
+        '../../src/compiler/change-lowering.h',
+        '../../src/compiler/code-generator-impl.h',
+        '../../src/compiler/code-generator.cc',
+        '../../src/compiler/code-generator.h',
+        '../../src/compiler/common-node-cache.h',
+        '../../src/compiler/common-operator.cc',
+        '../../src/compiler/common-operator.h',
+        '../../src/compiler/control-builders.cc',
+        '../../src/compiler/control-builders.h',
+        '../../src/compiler/frame.h',
+        '../../src/compiler/gap-resolver.cc',
+        '../../src/compiler/gap-resolver.h',
+        '../../src/compiler/generic-algorithm-inl.h',
+        '../../src/compiler/generic-algorithm.h',
+        '../../src/compiler/generic-graph.h',
+        '../../src/compiler/generic-node-inl.h',
+        '../../src/compiler/generic-node.h',
+        '../../src/compiler/graph-builder.cc',
+        '../../src/compiler/graph-builder.h',
+        '../../src/compiler/graph-inl.h',
+        '../../src/compiler/graph-reducer.cc',
+        '../../src/compiler/graph-reducer.h',
+        '../../src/compiler/graph-replay.cc',
+        '../../src/compiler/graph-replay.h',
+        '../../src/compiler/graph-visualizer.cc',
+        '../../src/compiler/graph-visualizer.h',
+        '../../src/compiler/graph.cc',
+        '../../src/compiler/graph.h',
+        '../../src/compiler/instruction-codes.h',
+        '../../src/compiler/instruction-selector-impl.h',
+        '../../src/compiler/instruction-selector.cc',
+        '../../src/compiler/instruction-selector.h',
+        '../../src/compiler/instruction.cc',
+        '../../src/compiler/instruction.h',
+        '../../src/compiler/js-builtin-reducer.cc',
+        '../../src/compiler/js-builtin-reducer.h',
+        '../../src/compiler/js-context-specialization.cc',
+        '../../src/compiler/js-context-specialization.h',
+        '../../src/compiler/js-generic-lowering.cc',
+        '../../src/compiler/js-generic-lowering.h',
+        '../../src/compiler/js-graph.cc',
+        '../../src/compiler/js-graph.h',
+        '../../src/compiler/js-inlining.cc',
+        '../../src/compiler/js-inlining.h',
+        '../../src/compiler/js-operator.h',
+        '../../src/compiler/js-typed-lowering.cc',
+        '../../src/compiler/js-typed-lowering.h',
+        '../../src/compiler/linkage-impl.h',
+        '../../src/compiler/linkage.cc',
+        '../../src/compiler/linkage.h',
+        '../../src/compiler/machine-operator-reducer.cc',
+        '../../src/compiler/machine-operator-reducer.h',
+        '../../src/compiler/machine-operator.cc',
+        '../../src/compiler/machine-operator.h',
+        '../../src/compiler/machine-type.cc',
+        '../../src/compiler/machine-type.h',
+        '../../src/compiler/node-aux-data-inl.h',
+        '../../src/compiler/node-aux-data.h',
+        '../../src/compiler/node-cache.cc',
+        '../../src/compiler/node-cache.h',
+        '../../src/compiler/node-matchers.h',
+        '../../src/compiler/node-properties-inl.h',
+        '../../src/compiler/node-properties.h',
+        '../../src/compiler/node.cc',
+        '../../src/compiler/node.h',
+        '../../src/compiler/opcodes.h',
+        '../../src/compiler/operator-properties-inl.h',
+        '../../src/compiler/operator-properties.h',
+        '../../src/compiler/operator.cc',
+        '../../src/compiler/operator.h',
+        '../../src/compiler/phi-reducer.h',
+        '../../src/compiler/pipeline.cc',
+        '../../src/compiler/pipeline.h',
+        '../../src/compiler/raw-machine-assembler.cc',
+        '../../src/compiler/raw-machine-assembler.h',
+        '../../src/compiler/register-allocator.cc',
+        '../../src/compiler/register-allocator.h',
+        '../../src/compiler/representation-change.h',
+        '../../src/compiler/schedule.cc',
+        '../../src/compiler/schedule.h',
+        '../../src/compiler/scheduler.cc',
+        '../../src/compiler/scheduler.h',
+        '../../src/compiler/simplified-lowering.cc',
+        '../../src/compiler/simplified-lowering.h',
+        '../../src/compiler/simplified-operator-reducer.cc',
+        '../../src/compiler/simplified-operator-reducer.h',
+        '../../src/compiler/simplified-operator.cc',
+        '../../src/compiler/simplified-operator.h',
+        '../../src/compiler/source-position.cc',
+        '../../src/compiler/source-position.h',
+        '../../src/compiler/typer.cc',
+        '../../src/compiler/typer.h',
+        '../../src/compiler/value-numbering-reducer.cc',
+        '../../src/compiler/value-numbering-reducer.h',
+        '../../src/compiler/verifier.cc',
+        '../../src/compiler/verifier.h',
         '../../src/compiler.cc',
         '../../src/compiler.h',
         '../../src/contexts.cc',
@@ -302,8 +491,6 @@
         '../../src/cpu-profiler-inl.h',
         '../../src/cpu-profiler.cc',
         '../../src/cpu-profiler.h',
-        '../../src/cpu.cc',
-        '../../src/cpu.h',
         '../../src/data-flow.cc',
         '../../src/data-flow.h',
         '../../src/date.cc',
@@ -345,7 +532,6 @@
         '../../src/fast-dtoa.cc',
         '../../src/fast-dtoa.h',
         '../../src/feedback-slots.h',
-        '../../src/field-index.cc',
         '../../src/field-index.h',
         '../../src/field-index-inl.h',
         '../../src/fixed-dtoa.cc',
@@ -369,14 +555,35 @@
         '../../src/handles.cc',
         '../../src/handles.h',
         '../../src/hashmap.h',
-        '../../src/heap-inl.h',
         '../../src/heap-profiler.cc',
         '../../src/heap-profiler.h',
         '../../src/heap-snapshot-generator-inl.h',
         '../../src/heap-snapshot-generator.cc',
         '../../src/heap-snapshot-generator.h',
-        '../../src/heap.cc',
-        '../../src/heap.h',
+        '../../src/heap/gc-idle-time-handler.cc',
+        '../../src/heap/gc-idle-time-handler.h',
+        '../../src/heap/gc-tracer.cc',
+        '../../src/heap/gc-tracer.h',
+        '../../src/heap/heap-inl.h',
+        '../../src/heap/heap.cc',
+        '../../src/heap/heap.h',
+        '../../src/heap/incremental-marking-inl.h',
+        '../../src/heap/incremental-marking.cc',
+        '../../src/heap/incremental-marking.h',
+        '../../src/heap/mark-compact-inl.h',
+        '../../src/heap/mark-compact.cc',
+        '../../src/heap/mark-compact.h',
+        '../../src/heap/objects-visiting-inl.h',
+        '../../src/heap/objects-visiting.cc',
+        '../../src/heap/objects-visiting.h',
+        '../../src/heap/spaces-inl.h',
+        '../../src/heap/spaces.cc',
+        '../../src/heap/spaces.h',
+        '../../src/heap/store-buffer-inl.h',
+        '../../src/heap/store-buffer.cc',
+        '../../src/heap/store-buffer.h',
+        '../../src/heap/sweeper-thread.h',
+        '../../src/heap/sweeper-thread.cc',
         '../../src/hydrogen-alias-analysis.h',
         '../../src/hydrogen-bce.cc',
         '../../src/hydrogen-bce.h',
@@ -433,13 +640,23 @@
         '../../src/i18n.h',
         '../../src/icu_util.cc',
         '../../src/icu_util.h',
-        '../../src/ic-inl.h',
-        '../../src/ic.cc',
-        '../../src/ic.h',
-        '../../src/incremental-marking.cc',
-        '../../src/incremental-marking.h',
+        '../../src/ic/access-compiler.cc',
+        '../../src/ic/access-compiler.h',
+        '../../src/ic/call-optimization.cc',
+        '../../src/ic/call-optimization.h',
+        '../../src/ic/handler-compiler.cc',
+        '../../src/ic/handler-compiler.h',
+        '../../src/ic/ic-inl.h',
+        '../../src/ic/ic-state.cc',
+        '../../src/ic/ic-state.h',
+        '../../src/ic/ic.cc',
+        '../../src/ic/ic.h',
+        '../../src/ic/ic-compiler.cc',
+        '../../src/ic/ic-compiler.h',
         '../../src/interface.cc',
         '../../src/interface.h',
+        '../../src/interface-descriptors.cc',
+        '../../src/interface-descriptors.h',
         '../../src/interpreter-irregexp.cc',
         '../../src/interpreter-irregexp.h',
         '../../src/isolate.cc',
@@ -449,13 +666,6 @@
         '../../src/jsregexp-inl.h',
         '../../src/jsregexp.cc',
         '../../src/jsregexp.h',
-        # TODO(jochen): move libplatform/ files to their own target.
-        '../../src/libplatform/default-platform.cc',
-        '../../src/libplatform/default-platform.h',
-        '../../src/libplatform/task-queue.cc',
-        '../../src/libplatform/task-queue.h',
-        '../../src/libplatform/worker-thread.cc',
-        '../../src/libplatform/worker-thread.h',
         '../../src/list-inl.h',
         '../../src/list.h',
         '../../src/lithium-allocator-inl.h',
@@ -465,6 +675,7 @@
         '../../src/lithium-codegen.h',
         '../../src/lithium.cc',
         '../../src/lithium.h',
+        '../../src/lithium-inl.h',
         '../../src/liveedit.cc',
         '../../src/liveedit.h',
         '../../src/log-inl.h',
@@ -472,11 +683,10 @@
         '../../src/log-utils.h',
         '../../src/log.cc',
         '../../src/log.h',
+        '../../src/lookup-inl.h',
         '../../src/lookup.cc',
         '../../src/lookup.h',
         '../../src/macro-assembler.h',
-        '../../src/mark-compact.cc',
-        '../../src/mark-compact.h',
         '../../src/messages.cc',
         '../../src/messages.h',
         '../../src/msan.h',
@@ -484,24 +694,16 @@
         '../../src/objects-debug.cc',
         '../../src/objects-inl.h',
         '../../src/objects-printer.cc',
-        '../../src/objects-visiting.cc',
-        '../../src/objects-visiting.h',
         '../../src/objects.cc',
         '../../src/objects.h',
-        '../../src/optimizing-compiler-thread.h',
         '../../src/optimizing-compiler-thread.cc',
+        '../../src/optimizing-compiler-thread.h',
+        '../../src/ostreams.cc',
+        '../../src/ostreams.h',
         '../../src/parser.cc',
         '../../src/parser.h',
-        '../../src/platform/elapsed-timer.h',
-        '../../src/platform/time.cc',
-        '../../src/platform/time.h',
-        '../../src/platform.h',
-        '../../src/platform/condition-variable.cc',
-        '../../src/platform/condition-variable.h',
-        '../../src/platform/mutex.cc',
-        '../../src/platform/mutex.h',
-        '../../src/platform/semaphore.cc',
-        '../../src/platform/semaphore.h',
+        '../../src/perf-jit.cc',
+        '../../src/perf-jit.h',
         '../../src/preparse-data-format.h',
         '../../src/preparse-data.cc',
         '../../src/preparse-data.h',
@@ -515,6 +717,7 @@
         '../../src/property-details.h',
         '../../src/property.cc',
         '../../src/property.h',
+        '../../src/prototype.h',
         '../../src/regexp-macro-assembler-irregexp-inl.h',
         '../../src/regexp-macro-assembler-irregexp.cc',
         '../../src/regexp-macro-assembler-irregexp.h',
@@ -546,29 +749,25 @@
         '../../src/serialize.h',
         '../../src/small-pointer-list.h',
         '../../src/smart-pointers.h',
-        '../../src/snapshot-common.cc',
         '../../src/snapshot.h',
-        '../../src/spaces-inl.h',
-        '../../src/spaces.cc',
-        '../../src/spaces.h',
-        '../../src/store-buffer-inl.h',
-        '../../src/store-buffer.cc',
-        '../../src/store-buffer.h',
+        '../../src/snapshot-source-sink.cc',
+        '../../src/snapshot-source-sink.h',
         '../../src/string-search.cc',
         '../../src/string-search.h',
         '../../src/string-stream.cc',
         '../../src/string-stream.h',
         '../../src/strtod.cc',
         '../../src/strtod.h',
-        '../../src/stub-cache.cc',
-        '../../src/stub-cache.h',
-        '../../src/sweeper-thread.h',
-        '../../src/sweeper-thread.cc',
+        '../../src/ic/stub-cache.cc',
+        '../../src/ic/stub-cache.h',
         '../../src/token.cc',
         '../../src/token.h',
         '../../src/transitions-inl.h',
         '../../src/transitions.cc',
         '../../src/transitions.h',
+        '../../src/type-feedback-vector-inl.h',
+        '../../src/type-feedback-vector.cc',
+        '../../src/type-feedback-vector.h',
         '../../src/type-info.cc',
         '../../src/type-info.h',
         '../../src/types-inl.h',
@@ -586,11 +785,8 @@
         '../../src/utils-inl.h',
         '../../src/utils.cc',
         '../../src/utils.h',
-        '../../src/utils/random-number-generator.cc',
-        '../../src/utils/random-number-generator.h',
         '../../src/v8.cc',
         '../../src/v8.h',
-        '../../src/v8checks.h',
         '../../src/v8memory.h',
         '../../src/v8threads.cc',
         '../../src/v8threads.h',
@@ -604,6 +800,8 @@
         '../../src/zone-inl.h',
         '../../src/zone.cc',
         '../../src/zone.h',
+        '../../third_party/fdlibm/fdlibm.cc',
+        '../../third_party/fdlibm/fdlibm.h',
       ],
       'conditions': [
         ['want_separate_host_toolset==1', {
@@ -630,7 +828,8 @@
             '../../src/arm/frames-arm.cc',
             '../../src/arm/frames-arm.h',
             '../../src/arm/full-codegen-arm.cc',
-            '../../src/arm/ic-arm.cc',
+            '../../src/arm/interface-descriptors-arm.cc',
+            '../../src/arm/interface-descriptors-arm.h',
             '../../src/arm/lithium-arm.cc',
             '../../src/arm/lithium-arm.h',
             '../../src/arm/lithium-codegen-arm.cc',
@@ -642,7 +841,15 @@
             '../../src/arm/regexp-macro-assembler-arm.cc',
             '../../src/arm/regexp-macro-assembler-arm.h',
             '../../src/arm/simulator-arm.cc',
-            '../../src/arm/stub-cache-arm.cc',
+            '../../src/compiler/arm/code-generator-arm.cc',
+            '../../src/compiler/arm/instruction-codes-arm.h',
+            '../../src/compiler/arm/instruction-selector-arm.cc',
+            '../../src/compiler/arm/linkage-arm.cc',
+            '../../src/ic/arm/access-compiler-arm.cc',
+            '../../src/ic/arm/handler-compiler-arm.cc',
+            '../../src/ic/arm/ic-arm.cc',
+            '../../src/ic/arm/ic-compiler-arm.cc',
+            '../../src/ic/arm/stub-cache-arm.cc',
           ],
         }],
         ['v8_target_arch=="arm64"', {
@@ -661,17 +868,21 @@
             '../../src/arm64/decoder-arm64.cc',
             '../../src/arm64/decoder-arm64.h',
             '../../src/arm64/decoder-arm64-inl.h',
+            '../../src/arm64/delayed-masm-arm64.cc',
+            '../../src/arm64/delayed-masm-arm64.h',
+            '../../src/arm64/delayed-masm-arm64-inl.h',
             '../../src/arm64/deoptimizer-arm64.cc',
             '../../src/arm64/disasm-arm64.cc',
             '../../src/arm64/disasm-arm64.h',
             '../../src/arm64/frames-arm64.cc',
             '../../src/arm64/frames-arm64.h',
             '../../src/arm64/full-codegen-arm64.cc',
-            '../../src/arm64/ic-arm64.cc',
             '../../src/arm64/instructions-arm64.cc',
             '../../src/arm64/instructions-arm64.h',
             '../../src/arm64/instrument-arm64.cc',
             '../../src/arm64/instrument-arm64.h',
+            '../../src/arm64/interface-descriptors-arm64.cc',
+            '../../src/arm64/interface-descriptors-arm64.h',
             '../../src/arm64/lithium-arm64.cc',
             '../../src/arm64/lithium-arm64.h',
             '../../src/arm64/lithium-codegen-arm64.cc',
@@ -685,9 +896,17 @@
             '../../src/arm64/regexp-macro-assembler-arm64.h',
             '../../src/arm64/simulator-arm64.cc',
             '../../src/arm64/simulator-arm64.h',
-            '../../src/arm64/stub-cache-arm64.cc',
             '../../src/arm64/utils-arm64.cc',
             '../../src/arm64/utils-arm64.h',
+            '../../src/compiler/arm64/code-generator-arm64.cc',
+            '../../src/compiler/arm64/instruction-codes-arm64.h',
+            '../../src/compiler/arm64/instruction-selector-arm64.cc',
+            '../../src/compiler/arm64/linkage-arm64.cc',
+            '../../src/ic/arm64/access-compiler-arm64.cc',
+            '../../src/ic/arm64/handler-compiler-arm64.cc',
+            '../../src/ic/arm64/ic-arm64.cc',
+            '../../src/ic/arm64/ic-compiler-arm64.cc',
+            '../../src/ic/arm64/stub-cache-arm64.cc',
           ],
         }],
         ['v8_target_arch=="ia32"', {
@@ -707,7 +926,7 @@
             '../../src/ia32/frames-ia32.cc',
             '../../src/ia32/frames-ia32.h',
             '../../src/ia32/full-codegen-ia32.cc',
-            '../../src/ia32/ic-ia32.cc',
+            '../../src/ia32/interface-descriptors-ia32.cc',
             '../../src/ia32/lithium-codegen-ia32.cc',
             '../../src/ia32/lithium-codegen-ia32.h',
             '../../src/ia32/lithium-gap-resolver-ia32.cc',
@@ -718,7 +937,15 @@
             '../../src/ia32/macro-assembler-ia32.h',
             '../../src/ia32/regexp-macro-assembler-ia32.cc',
             '../../src/ia32/regexp-macro-assembler-ia32.h',
-            '../../src/ia32/stub-cache-ia32.cc',
+            '../../src/compiler/ia32/code-generator-ia32.cc',
+            '../../src/compiler/ia32/instruction-codes-ia32.h',
+            '../../src/compiler/ia32/instruction-selector-ia32.cc',
+            '../../src/compiler/ia32/linkage-ia32.cc',
+            '../../src/ic/ia32/access-compiler-ia32.cc',
+            '../../src/ic/ia32/handler-compiler-ia32.cc',
+            '../../src/ic/ia32/ic-ia32.cc',
+            '../../src/ic/ia32/ic-compiler-ia32.cc',
+            '../../src/ic/ia32/stub-cache-ia32.cc',
           ],
         }],
         ['v8_target_arch=="x87"', {
@@ -738,7 +965,7 @@
             '../../src/x87/frames-x87.cc',
             '../../src/x87/frames-x87.h',
             '../../src/x87/full-codegen-x87.cc',
-            '../../src/x87/ic-x87.cc',
+            '../../src/x87/interface-descriptors-x87.cc',
             '../../src/x87/lithium-codegen-x87.cc',
             '../../src/x87/lithium-codegen-x87.h',
             '../../src/x87/lithium-gap-resolver-x87.cc',
@@ -749,7 +976,11 @@
             '../../src/x87/macro-assembler-x87.h',
             '../../src/x87/regexp-macro-assembler-x87.cc',
             '../../src/x87/regexp-macro-assembler-x87.h',
-            '../../src/x87/stub-cache-x87.cc',
+            '../../src/ic/x87/access-compiler-x87.cc',
+            '../../src/ic/x87/handler-compiler-x87.cc',
+            '../../src/ic/x87/ic-x87.cc',
+            '../../src/ic/x87/ic-compiler-x87.cc',
+            '../../src/ic/x87/stub-cache-x87.cc',
           ],
         }],
         ['v8_target_arch=="mips" or v8_target_arch=="mipsel"', {
@@ -771,7 +1002,7 @@
             '../../src/mips/frames-mips.cc',
             '../../src/mips/frames-mips.h',
             '../../src/mips/full-codegen-mips.cc',
-            '../../src/mips/ic-mips.cc',
+            '../../src/mips/interface-descriptors-mips.cc',
             '../../src/mips/lithium-codegen-mips.cc',
             '../../src/mips/lithium-codegen-mips.h',
             '../../src/mips/lithium-gap-resolver-mips.cc',
@@ -783,10 +1014,52 @@
             '../../src/mips/regexp-macro-assembler-mips.cc',
             '../../src/mips/regexp-macro-assembler-mips.h',
             '../../src/mips/simulator-mips.cc',
-            '../../src/mips/stub-cache-mips.cc',
+            '../../src/ic/mips/access-compiler-mips.cc',
+            '../../src/ic/mips/handler-compiler-mips.cc',
+            '../../src/ic/mips/ic-mips.cc',
+            '../../src/ic/mips/ic-compiler-mips.cc',
+            '../../src/ic/mips/stub-cache-mips.cc',
           ],
         }],
-        ['v8_target_arch=="x64"', {
+        ['v8_target_arch=="mips64el"', {
+          'sources': [  ### gcmole(arch:mips64el) ###
+            '../../src/mips64/assembler-mips64.cc',
+            '../../src/mips64/assembler-mips64.h',
+            '../../src/mips64/assembler-mips64-inl.h',
+            '../../src/mips64/builtins-mips64.cc',
+            '../../src/mips64/codegen-mips64.cc',
+            '../../src/mips64/codegen-mips64.h',
+            '../../src/mips64/code-stubs-mips64.cc',
+            '../../src/mips64/code-stubs-mips64.h',
+            '../../src/mips64/constants-mips64.cc',
+            '../../src/mips64/constants-mips64.h',
+            '../../src/mips64/cpu-mips64.cc',
+            '../../src/mips64/debug-mips64.cc',
+            '../../src/mips64/deoptimizer-mips64.cc',
+            '../../src/mips64/disasm-mips64.cc',
+            '../../src/mips64/frames-mips64.cc',
+            '../../src/mips64/frames-mips64.h',
+            '../../src/mips64/full-codegen-mips64.cc',
+            '../../src/mips64/interface-descriptors-mips64.cc',
+            '../../src/mips64/lithium-codegen-mips64.cc',
+            '../../src/mips64/lithium-codegen-mips64.h',
+            '../../src/mips64/lithium-gap-resolver-mips64.cc',
+            '../../src/mips64/lithium-gap-resolver-mips64.h',
+            '../../src/mips64/lithium-mips64.cc',
+            '../../src/mips64/lithium-mips64.h',
+            '../../src/mips64/macro-assembler-mips64.cc',
+            '../../src/mips64/macro-assembler-mips64.h',
+            '../../src/mips64/regexp-macro-assembler-mips64.cc',
+            '../../src/mips64/regexp-macro-assembler-mips64.h',
+            '../../src/mips64/simulator-mips64.cc',
+            '../../src/ic/mips64/access-compiler-mips64.cc',
+            '../../src/ic/mips64/handler-compiler-mips64.cc',
+            '../../src/ic/mips64/ic-mips64.cc',
+            '../../src/ic/mips64/ic-compiler-mips64.cc',
+            '../../src/ic/mips64/stub-cache-mips64.cc',
+          ],
+        }],
+        ['v8_target_arch=="x64" or v8_target_arch=="x32"', {
           'sources': [  ### gcmole(arch:x64) ###
             '../../src/x64/assembler-x64-inl.h',
             '../../src/x64/assembler-x64.cc',
@@ -803,7 +1076,7 @@
             '../../src/x64/frames-x64.cc',
             '../../src/x64/frames-x64.h',
             '../../src/x64/full-codegen-x64.cc',
-            '../../src/x64/ic-x64.cc',
+            '../../src/x64/interface-descriptors-x64.cc',
             '../../src/x64/lithium-codegen-x64.cc',
             '../../src/x64/lithium-codegen-x64.h',
             '../../src/x64/lithium-gap-resolver-x64.cc',
@@ -814,7 +1087,15 @@
             '../../src/x64/macro-assembler-x64.h',
             '../../src/x64/regexp-macro-assembler-x64.cc',
             '../../src/x64/regexp-macro-assembler-x64.h',
-            '../../src/x64/stub-cache-x64.cc',
+            '../../src/compiler/x64/code-generator-x64.cc',
+            '../../src/compiler/x64/instruction-codes-x64.h',
+            '../../src/compiler/x64/instruction-selector-x64.cc',
+            '../../src/compiler/x64/linkage-x64.cc',
+            '../../src/ic/x64/access-compiler-x64.cc',
+            '../../src/ic/x64/handler-compiler-x64.cc',
+            '../../src/ic/x64/ic-x64.cc',
+            '../../src/ic/x64/ic-compiler-x64.cc',
+            '../../src/ic/x64/stub-cache-x64.cc',
           ],
         }],
         ['OS=="linux"', {
@@ -826,194 +1107,14 @@
                   ]
                 }],
               ],
-              'libraries': [
-                '-lrt'
-              ]
             },
-            'sources': [  ### gcmole(os:linux) ###
-              '../../src/platform-linux.cc',
-              '../../src/platform-posix.cc'
-            ],
           }
         ],
-        ['OS=="android"', {
-            'defines': [
-              'CAN_USE_VFP_INSTRUCTIONS',
-            ],
-            'sources': [
-              '../../src/platform-posix.cc'
-            ],
-            'conditions': [
-              ['host_os=="mac"', {
-                'target_conditions': [
-                  ['_toolset=="host"', {
-                    'sources': [
-                      '../../src/platform-macos.cc'
-                    ]
-                  }, {
-                    'sources': [
-                      '../../src/platform-linux.cc'
-                    ]
-                  }],
-                ],
-              }, {
-                # TODO(bmeurer): What we really want here, is this:
-                #
-                # 'link_settings': {
-                #   'target_conditions': [
-                #     ['_toolset=="host"', {
-                #       'libraries': [
-                #         '-lrt'
-                #       ]
-                #     }]
-                #   ]
-                # },
-                #
-                # but we can't do this right now, as the AOSP does not support
-                # linking against the host librt, so we need to work around this
-                # for now, using the following hack (see platform/time.cc):
-                'target_conditions': [
-                  ['_toolset=="host"', {
-                    'defines': [
-                      'V8_LIBRT_NOT_AVAILABLE=1',
-                    ],
-                  }],
-                ],
-                'sources': [
-                  '../../src/platform-linux.cc'
-                ]
-              }],
-            ],
-          },
-        ],
-        ['OS=="qnx"', {
-            'link_settings': {
-              'target_conditions': [
-                ['_toolset=="host" and host_os=="linux"', {
-                  'libraries': [
-                    '-lrt'
-                  ],
-                }],
-                ['_toolset=="target"', {
-                  'libraries': [
-                    '-lbacktrace'
-                  ],
-                }],
-              ],
-            },
-            'sources': [
-              '../../src/platform-posix.cc',
-            ],
-            'target_conditions': [
-              ['_toolset=="host" and host_os=="linux"', {
-                'sources': [
-                  '../../src/platform-linux.cc'
-                ],
-              }],
-              ['_toolset=="host" and host_os=="mac"', {
-                'sources': [
-                  '../../src/platform-macos.cc'
-                ],
-              }],
-              ['_toolset=="target"', {
-                'sources': [
-                  '../../src/platform-qnx.cc'
-                ],
-              }],
-            ],
-          },
-        ],
-        ['OS=="freebsd"', {
-            'link_settings': {
-              'libraries': [
-                '-L/usr/local/lib -lexecinfo',
-            ]},
-            'sources': [
-              '../../src/platform-freebsd.cc',
-              '../../src/platform-posix.cc'
-            ],
-          }
-        ],
-        ['OS=="openbsd"', {
-            'link_settings': {
-              'libraries': [
-                '-L/usr/local/lib -lexecinfo',
-            ]},
-            'sources': [
-              '../../src/platform-openbsd.cc',
-              '../../src/platform-posix.cc'
-            ],
-          }
-        ],
-        ['OS=="netbsd"', {
-            'link_settings': {
-              'libraries': [
-                '-L/usr/pkg/lib -Wl,-R/usr/pkg/lib -lexecinfo',
-            ]},
-            'sources': [
-              '../../src/platform-openbsd.cc',
-              '../../src/platform-posix.cc'
-            ],
-          }
-        ],
-        ['OS=="solaris"', {
-            'link_settings': {
-              'libraries': [
-                '-lnsl',
-            ]},
-            'sources': [
-              '../../src/platform-solaris.cc',
-              '../../src/platform-posix.cc'
-            ],
-          }
-        ],
-        ['OS=="mac"', {
-          'sources': [
-            '../../src/platform-macos.cc',
-            '../../src/platform-posix.cc'
-          ]},
-        ],
         ['OS=="win"', {
-          'defines': [
-            '_CRT_RAND_S'  # for rand_s()
-          ],
           'variables': {
             'gyp_generators': '<!(echo $GYP_GENERATORS)',
           },
-          'conditions': [
-            ['gyp_generators=="make"', {
-              'variables': {
-                'build_env': '<!(uname -o)',
-              },
-              'conditions': [
-                ['build_env=="Cygwin"', {
-                  'sources': [
-                    '../../src/platform-cygwin.cc',
-                    '../../src/platform-posix.cc'
-                  ],
-                }, {
-                  'sources': [
-                    '../../src/platform-win32.cc',
-                    '../../src/win32-math.cc',
-                    '../../src/win32-math.h'
-                  ],
-                }],
-              ],
-              'link_settings':  {
-                'libraries': [ '-lwinmm', '-lws2_32' ],
-              },
-            }, {
-              'sources': [
-                '../../src/platform-win32.cc',
-                '../../src/win32-math.cc',
-                '../../src/win32-math.h'
-              ],
-              'msvs_disabled_warnings': [4351, 4355, 4800],
-              'link_settings':  {
-                'libraries': [ '-lwinmm.lib', '-lws2_32.lib' ],
-              },
-            }],
-          ],
+          'msvs_disabled_warnings': [4351, 4355, 4800],
         }],
         ['component=="shared_library"', {
           'defines': [
@@ -1075,16 +1176,39 @@
         '../../src/base/atomicops_internals_x86_gcc.cc',
         '../../src/base/atomicops_internals_x86_gcc.h',
         '../../src/base/atomicops_internals_x86_msvc.h',
+        '../../src/base/bits.cc',
+        '../../src/base/bits.h',
         '../../src/base/build_config.h',
+        '../../src/base/compiler-specific.h',
+        '../../src/base/cpu.cc',
+        '../../src/base/cpu.h',
+        '../../src/base/division-by-constant.cc',
+        '../../src/base/division-by-constant.h',
+        '../../src/base/flags.h',
         '../../src/base/lazy-instance.h',
+        '../../src/base/logging.cc',
+        '../../src/base/logging.h',
         '../../src/base/macros.h',
         '../../src/base/once.cc',
         '../../src/base/once.h',
+        '../../src/base/platform/elapsed-timer.h',
+        '../../src/base/platform/time.cc',
+        '../../src/base/platform/time.h',
+        '../../src/base/platform/condition-variable.cc',
+        '../../src/base/platform/condition-variable.h',
+        '../../src/base/platform/mutex.cc',
+        '../../src/base/platform/mutex.h',
+        '../../src/base/platform/platform.h',
+        '../../src/base/platform/semaphore.cc',
+        '../../src/base/platform/semaphore.h',
         '../../src/base/safe_conversions.h',
         '../../src/base/safe_conversions_impl.h',
         '../../src/base/safe_math.h',
         '../../src/base/safe_math_impl.h',
-        '../../src/base/win32-headers.h',
+        '../../src/base/sys-info.cc',
+        '../../src/base/sys-info.h',
+        '../../src/base/utils/random-number-generator.cc',
+        '../../src/base/utils/random-number-generator.h',
       ],
       'conditions': [
         ['want_separate_host_toolset==1', {
@@ -1092,15 +1216,285 @@
         }, {
           'toolsets': ['target'],
         }],
-        ['component=="shared_library"', {
+        ['OS=="linux"', {
+            'link_settings': {
+              'libraries': [
+                '-lrt'
+              ]
+            },
+            'sources': [
+              '../../src/base/platform/platform-linux.cc',
+              '../../src/base/platform/platform-posix.cc'
+            ],
+          }
+        ],
+        ['OS=="android"', {
+            'sources': [
+              '../../src/base/platform/platform-posix.cc'
+            ],
+            'conditions': [
+              ['host_os=="mac"', {
+                'target_conditions': [
+                  ['_toolset=="host"', {
+                    'sources': [
+                      '../../src/base/platform/platform-macos.cc'
+                    ]
+                  }, {
+                    'sources': [
+                      '../../src/base/platform/platform-linux.cc'
+                    ]
+                  }],
+                ],
+              }, {
+                # TODO(bmeurer): What we really want here, is this:
+                #
+                # 'link_settings': {
+                #   'target_conditions': [
+                #     ['_toolset=="host"', {
+                #       'libraries': [
+                #         '-lrt'
+                #       ]
+                #     }]
+                #   ]
+                # },
+                #
+                # but we can't do this right now, as the AOSP does not support
+                # linking against the host librt, so we need to work around this
+                # for now, using the following hack (see platform/time.cc):
+                'target_conditions': [
+                  ['_toolset=="host"', {
+                    'defines': [
+                      'V8_LIBRT_NOT_AVAILABLE=1',
+                    ],
+                  }],
+                ],
+                'sources': [
+                  '../../src/base/platform/platform-linux.cc'
+                ]
+              }],
+            ],
+          },
+        ],
+        ['OS=="qnx"', {
+            'link_settings': {
+              'target_conditions': [
+                ['_toolset=="host" and host_os=="linux"', {
+                  'libraries': [
+                    '-lrt'
+                  ],
+                }],
+                ['_toolset=="target"', {
+                  'libraries': [
+                    '-lbacktrace'
+                  ],
+                }],
+              ],
+            },
+            'sources': [
+              '../../src/base/platform/platform-posix.cc',
+              '../../src/base/qnx-math.h',
+            ],
+            'target_conditions': [
+              ['_toolset=="host" and host_os=="linux"', {
+                'sources': [
+                  '../../src/base/platform/platform-linux.cc'
+                ],
+              }],
+              ['_toolset=="host" and host_os=="mac"', {
+                'sources': [
+                  '../../src/base/platform/platform-macos.cc'
+                ],
+              }],
+              ['_toolset=="target"', {
+                'sources': [
+                  '../../src/base/platform/platform-qnx.cc'
+                ],
+              }],
+            ],
+          },
+        ],
+        ['OS=="freebsd"', {
+            'link_settings': {
+              'libraries': [
+                '-L/usr/local/lib -lexecinfo',
+            ]},
+            'sources': [
+              '../../src/base/platform/platform-freebsd.cc',
+              '../../src/base/platform/platform-posix.cc'
+            ],
+          }
+        ],
+        ['OS=="openbsd"', {
+            'link_settings': {
+              'libraries': [
+                '-L/usr/local/lib -lexecinfo',
+            ]},
+            'sources': [
+              '../../src/base/platform/platform-openbsd.cc',
+              '../../src/base/platform/platform-posix.cc'
+            ],
+          }
+        ],
+        ['OS=="netbsd"', {
+            'link_settings': {
+              'libraries': [
+                '-L/usr/pkg/lib -Wl,-R/usr/pkg/lib -lexecinfo',
+            ]},
+            'sources': [
+              '../../src/base/platform/platform-openbsd.cc',
+              '../../src/base/platform/platform-posix.cc'
+            ],
+          }
+        ],
+        ['OS=="solaris"', {
+            'link_settings': {
+              'libraries': [
+                '-lnsl',
+            ]},
+            'sources': [
+              '../../src/base/platform/platform-solaris.cc',
+              '../../src/base/platform/platform-posix.cc'
+            ],
+          }
+        ],
+        ['OS=="mac"', {
+          'sources': [
+            '../../src/base/platform/platform-macos.cc',
+            '../../src/base/platform/platform-posix.cc'
+          ]},
+        ],
+        ['OS=="win"', {
           'defines': [
-            'BUILDING_V8_SHARED',
-            'V8_SHARED',
+            '_CRT_RAND_S'  # for rand_s()
+          ],
+          'variables': {
+            'gyp_generators': '<!(echo $GYP_GENERATORS)',
+          },
+          'conditions': [
+            ['gyp_generators=="make"', {
+              'variables': {
+                'build_env': '<!(uname -o)',
+              },
+              'conditions': [
+                ['build_env=="Cygwin"', {
+                  'sources': [
+                    '../../src/base/platform/platform-cygwin.cc',
+                    '../../src/base/platform/platform-posix.cc'
+                  ],
+                }, {
+                  'sources': [
+                    '../../src/base/platform/platform-win32.cc',
+                    '../../src/base/win32-headers.h',
+                    '../../src/base/win32-math.cc',
+                    '../../src/base/win32-math.h'
+                  ],
+                }],
+              ],
+              'link_settings':  {
+                'libraries': [ '-lwinmm', '-lws2_32' ],
+              },
+            }, {
+              'sources': [
+                '../../src/base/platform/platform-win32.cc',
+                '../../src/base/win32-headers.h',
+                '../../src/base/win32-math.cc',
+                '../../src/base/win32-math.h'
+              ],
+              'msvs_disabled_warnings': [4351, 4355, 4800],
+              'link_settings':  {
+                'libraries': [ '-lwinmm.lib', '-lws2_32.lib' ],
+              },
+            }],
           ],
         }],
       ],
     },
     {
+      'target_name': 'v8_libplatform',
+      'type': 'static_library',
+      'variables': {
+        'optimize': 'max',
+      },
+      'dependencies': [
+        'v8_libbase',
+      ],
+      'include_dirs+': [
+        '../..',
+      ],
+      'sources': [
+        '../../include/libplatform/libplatform.h',
+        '../../src/libplatform/default-platform.cc',
+        '../../src/libplatform/default-platform.h',
+        '../../src/libplatform/task-queue.cc',
+        '../../src/libplatform/task-queue.h',
+        '../../src/libplatform/worker-thread.cc',
+        '../../src/libplatform/worker-thread.h',
+      ],
+      'conditions': [
+        ['want_separate_host_toolset==1', {
+          'toolsets': ['host', 'target'],
+        }, {
+          'toolsets': ['target'],
+        }],
+      ],
+    },
+    {
+      'target_name': 'natives_blob',
+      'type': 'none',
+      'conditions': [
+        [ 'v8_use_external_startup_data==1', {
+          'conditions': [
+            ['want_separate_host_toolset==1', {
+              'dependencies': ['js2c#host'],
+            }, {
+              'dependencies': ['js2c'],
+            }],
+          ],
+          'actions': [{
+            'action_name': 'concatenate_natives_blob',
+            'inputs': [
+              '../../tools/concatenate-files.py',
+              '<(SHARED_INTERMEDIATE_DIR)/libraries.bin',
+              '<(SHARED_INTERMEDIATE_DIR)/libraries-experimental.bin',
+            ],
+            'conditions': [
+              ['want_separate_host_toolset==1', {
+                'target_conditions': [
+                  ['_toolset=="host"', {
+                    'outputs': [
+                      '<(PRODUCT_DIR)/natives_blob_host.bin',
+                    ],
+                    'action': [
+                      'python', '<@(_inputs)', '<(PRODUCT_DIR)/natives_blob_host.bin'
+                    ],
+                  }, {
+                    'outputs': [
+                      '<(PRODUCT_DIR)/natives_blob.bin',
+                    ],
+                    'action': [
+                      'python', '<@(_inputs)', '<(PRODUCT_DIR)/natives_blob.bin'
+                    ],
+                  }],
+                ],
+              }, {
+                'outputs': [
+                  '<(PRODUCT_DIR)/natives_blob.bin',
+                ],
+                'action': [
+                  'python', '<@(_inputs)', '<(PRODUCT_DIR)/natives_blob.bin'
+                ],
+              }],
+            ],
+          }],
+        }],
+        ['want_separate_host_toolset==1', {
+          'toolsets': ['host', 'target'],
+        }, {
+          'toolsets': ['target'],
+        }],
+      ]
+    },
+    {
       'target_name': 'js2c',
       'type': 'none',
       'conditions': [
@@ -1125,37 +1519,42 @@
         'library_files': [
           '../../src/runtime.js',
           '../../src/v8natives.js',
+          '../../src/symbol.js',
           '../../src/array.js',
           '../../src/string.js',
           '../../src/uri.js',
+          '../../third_party/fdlibm/fdlibm.js',
           '../../src/math.js',
-          '../../src/messages.js',
           '../../src/apinatives.js',
-          '../../src/debug-debugger.js',
-          '../../src/mirror-debugger.js',
-          '../../src/liveedit-debugger.js',
           '../../src/date.js',
-          '../../src/json.js',
           '../../src/regexp.js',
           '../../src/arraybuffer.js',
           '../../src/typedarray.js',
-          '../../src/weak_collection.js',
-          '../../src/promise.js',
+          '../../src/generator.js',
           '../../src/object-observe.js',
+          '../../src/collection.js',
+          '../../src/weak-collection.js',
+          '../../src/collection-iterator.js',
+          '../../src/promise.js',
+          '../../src/messages.js',
+          '../../src/json.js',
+          '../../src/array-iterator.js',
+          '../../src/string-iterator.js',
+          '../../src/debug-debugger.js',
+          '../../src/mirror-debugger.js',
+          '../../src/liveedit-debugger.js',
           '../../src/macros.py',
         ],
         'experimental_library_files': [
           '../../src/macros.py',
-          '../../src/symbol.js',
           '../../src/proxy.js',
-          '../../src/collection.js',
-          '../../src/collection-iterator.js',
           '../../src/generator.js',
-          '../../src/array-iterator.js',
           '../../src/harmony-string.js',
           '../../src/harmony-array.js',
-          '../../src/harmony-math.js'
+          '../../src/harmony-classes.js',
         ],
+        'libraries_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries.bin',
+        'libraries_experimental_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries-experimental.bin',
       },
       'actions': [
         {
@@ -1171,12 +1570,20 @@
           'action': [
             'python',
             '../../tools/js2c.py',
-            '<@(_outputs)',
+            '<(SHARED_INTERMEDIATE_DIR)/libraries.cc',
             'CORE',
             '<(v8_compress_startup_data)',
             '<@(library_files)',
             '<@(i18n_library_files)',
           ],
+          'conditions': [
+            [ 'v8_use_external_startup_data==1', {
+              'outputs': ['<@(libraries_bin_file)'],
+              'action': [
+                '--startup_blob', '<@(libraries_bin_file)',
+              ],
+            }],
+          ],
         },
         {
           'action_name': 'js2c_experimental',
@@ -1190,11 +1597,19 @@
           'action': [
             'python',
             '../../tools/js2c.py',
-            '<@(_outputs)',
+            '<(SHARED_INTERMEDIATE_DIR)/experimental-libraries.cc',
             'EXPERIMENTAL',
             '<(v8_compress_startup_data)',
             '<@(experimental_library_files)'
           ],
+          'conditions': [
+            [ 'v8_use_external_startup_data==1', {
+              'outputs': ['<@(libraries_experimental_bin_file)'],
+              'action': [
+                '--startup_blob', '<@(libraries_experimental_bin_file)'
+              ],
+            }],
+          ],
         },
       ],
     },
@@ -1229,7 +1644,7 @@
     {
       'target_name': 'mksnapshot',
       'type': 'executable',
-      'dependencies': ['v8_base', 'v8_nosnapshot'],
+      'dependencies': ['v8_base', 'v8_nosnapshot', 'v8_libplatform'],
       'include_dirs+': [
         '../..',
       ],
diff --git a/tools/gyp/v8.target.darwin-arm.mk b/tools/gyp/v8.target.darwin-arm.mk
index a67d43c..1c74995 100644
--- a/tools/gyp/v8.target.darwin-arm.mk
+++ b/tools/gyp/v8.target.darwin-arm.mk
@@ -6,7 +6,6 @@
 LOCAL_MODULE := v8_tools_gyp_v8_gyp
 LOCAL_MODULE_STEM := v8
 LOCAL_MODULE_SUFFIX := .stamp
-LOCAL_MODULE_TAGS := optional
 LOCAL_MODULE_TARGET_ARCH := $(TARGET_$(GYP_VAR_PREFIX)ARCH)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_VAR_PREFIX))
 gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
@@ -22,6 +21,9 @@
 LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) $(GYP_GENERATED_OUTPUTS)
 
 ### Rules for final target.
+### Set directly by aosp_build_settings.
+LOCAL_FDO_SUPPORT := true
+
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
 gyp_all_modules: v8_tools_gyp_v8_gyp
diff --git a/tools/gyp/v8.target.darwin-arm64.mk b/tools/gyp/v8.target.darwin-arm64.mk
index a67d43c..1c74995 100644
--- a/tools/gyp/v8.target.darwin-arm64.mk
+++ b/tools/gyp/v8.target.darwin-arm64.mk
@@ -6,7 +6,6 @@
 LOCAL_MODULE := v8_tools_gyp_v8_gyp
 LOCAL_MODULE_STEM := v8
 LOCAL_MODULE_SUFFIX := .stamp
-LOCAL_MODULE_TAGS := optional
 LOCAL_MODULE_TARGET_ARCH := $(TARGET_$(GYP_VAR_PREFIX)ARCH)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_VAR_PREFIX))
 gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
@@ -22,6 +21,9 @@
 LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) $(GYP_GENERATED_OUTPUTS)
 
 ### Rules for final target.
+### Set directly by aosp_build_settings.
+LOCAL_FDO_SUPPORT := true
+
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
 gyp_all_modules: v8_tools_gyp_v8_gyp
diff --git a/tools/gyp/v8.target.darwin-mips.mk b/tools/gyp/v8.target.darwin-mips.mk
index a67d43c..1c74995 100644
--- a/tools/gyp/v8.target.darwin-mips.mk
+++ b/tools/gyp/v8.target.darwin-mips.mk
@@ -6,7 +6,6 @@
 LOCAL_MODULE := v8_tools_gyp_v8_gyp
 LOCAL_MODULE_STEM := v8
 LOCAL_MODULE_SUFFIX := .stamp
-LOCAL_MODULE_TAGS := optional
 LOCAL_MODULE_TARGET_ARCH := $(TARGET_$(GYP_VAR_PREFIX)ARCH)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_VAR_PREFIX))
 gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
@@ -22,6 +21,9 @@
 LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) $(GYP_GENERATED_OUTPUTS)
 
 ### Rules for final target.
+### Set directly by aosp_build_settings.
+LOCAL_FDO_SUPPORT := true
+
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
 gyp_all_modules: v8_tools_gyp_v8_gyp
diff --git a/tools/gyp/v8.target.darwin-mips64.mk b/tools/gyp/v8.target.darwin-mips64.mk
new file mode 100644
index 0000000..624fbfb
--- /dev/null
+++ b/tools/gyp/v8.target.darwin-mips64.mk
@@ -0,0 +1,43 @@
+# This file is generated by gyp; do not edit.
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE_CLASS := GYP
+LOCAL_MODULE := v8_tools_gyp_v8_gyp
+LOCAL_MODULE_STEM := v8
+LOCAL_MODULE_SUFFIX := .stamp
+LOCAL_MODULE_TARGET_ARCH := $(TARGET_$(GYP_VAR_PREFIX)ARCH)
+gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_VAR_PREFIX))
+gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
+
+# Make sure our deps are built first.
+GYP_TARGET_DEPENDENCIES := \
+	$(call intermediates-dir-for,STATIC_LIBRARIES,v8_tools_gyp_v8_base_gyp,,,$(GYP_VAR_PREFIX))/v8_tools_gyp_v8_base_gyp.a \
+	$(call intermediates-dir-for,STATIC_LIBRARIES,v8_tools_gyp_v8_snapshot_gyp,,,$(GYP_VAR_PREFIX))/v8_tools_gyp_v8_snapshot_gyp.a
+
+GYP_GENERATED_OUTPUTS :=
+
+# Make sure our deps and generated files are built first.
+LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) $(GYP_GENERATED_OUTPUTS)
+
+### Rules for final target.
+# Add target alias to "gyp_all_modules" target.
+.PHONY: gyp_all_modules
+gyp_all_modules: v8_tools_gyp_v8_gyp
+
+# Alias gyp target name.
+.PHONY: v8
+v8: v8_tools_gyp_v8_gyp
+
+LOCAL_MODULE_PATH := $(PRODUCT_OUT)/gyp_stamp
+LOCAL_UNINSTALLABLE_MODULE := true
+LOCAL_2ND_ARCH_VAR_PREFIX := $(GYP_VAR_PREFIX)
+
+include $(BUILD_SYSTEM)/base_rules.mk
+
+$(LOCAL_BUILT_MODULE): $(LOCAL_ADDITIONAL_DEPENDENCIES)
+	$(hide) echo "Gyp timestamp: $@"
+	$(hide) mkdir -p $(dir $@)
+	$(hide) touch $@
+
+LOCAL_2ND_ARCH_VAR_PREFIX :=
diff --git a/tools/gyp/v8.target.darwin-x86.mk b/tools/gyp/v8.target.darwin-x86.mk
index a67d43c..1c74995 100644
--- a/tools/gyp/v8.target.darwin-x86.mk
+++ b/tools/gyp/v8.target.darwin-x86.mk
@@ -6,7 +6,6 @@
 LOCAL_MODULE := v8_tools_gyp_v8_gyp
 LOCAL_MODULE_STEM := v8
 LOCAL_MODULE_SUFFIX := .stamp
-LOCAL_MODULE_TAGS := optional
 LOCAL_MODULE_TARGET_ARCH := $(TARGET_$(GYP_VAR_PREFIX)ARCH)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_VAR_PREFIX))
 gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
@@ -22,6 +21,9 @@
 LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) $(GYP_GENERATED_OUTPUTS)
 
 ### Rules for final target.
+### Set directly by aosp_build_settings.
+LOCAL_FDO_SUPPORT := true
+
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
 gyp_all_modules: v8_tools_gyp_v8_gyp
diff --git a/tools/gyp/v8.target.darwin-x86_64.mk b/tools/gyp/v8.target.darwin-x86_64.mk
index a67d43c..1c74995 100644
--- a/tools/gyp/v8.target.darwin-x86_64.mk
+++ b/tools/gyp/v8.target.darwin-x86_64.mk
@@ -6,7 +6,6 @@
 LOCAL_MODULE := v8_tools_gyp_v8_gyp
 LOCAL_MODULE_STEM := v8
 LOCAL_MODULE_SUFFIX := .stamp
-LOCAL_MODULE_TAGS := optional
 LOCAL_MODULE_TARGET_ARCH := $(TARGET_$(GYP_VAR_PREFIX)ARCH)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_VAR_PREFIX))
 gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
@@ -22,6 +21,9 @@
 LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) $(GYP_GENERATED_OUTPUTS)
 
 ### Rules for final target.
+### Set directly by aosp_build_settings.
+LOCAL_FDO_SUPPORT := true
+
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
 gyp_all_modules: v8_tools_gyp_v8_gyp
diff --git a/tools/gyp/v8.target.linux-arm.mk b/tools/gyp/v8.target.linux-arm.mk
index a67d43c..1c74995 100644
--- a/tools/gyp/v8.target.linux-arm.mk
+++ b/tools/gyp/v8.target.linux-arm.mk
@@ -6,7 +6,6 @@
 LOCAL_MODULE := v8_tools_gyp_v8_gyp
 LOCAL_MODULE_STEM := v8
 LOCAL_MODULE_SUFFIX := .stamp
-LOCAL_MODULE_TAGS := optional
 LOCAL_MODULE_TARGET_ARCH := $(TARGET_$(GYP_VAR_PREFIX)ARCH)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_VAR_PREFIX))
 gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
@@ -22,6 +21,9 @@
 LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) $(GYP_GENERATED_OUTPUTS)
 
 ### Rules for final target.
+### Set directly by aosp_build_settings.
+LOCAL_FDO_SUPPORT := true
+
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
 gyp_all_modules: v8_tools_gyp_v8_gyp
diff --git a/tools/gyp/v8.target.linux-arm64.mk b/tools/gyp/v8.target.linux-arm64.mk
index a67d43c..1c74995 100644
--- a/tools/gyp/v8.target.linux-arm64.mk
+++ b/tools/gyp/v8.target.linux-arm64.mk
@@ -6,7 +6,6 @@
 LOCAL_MODULE := v8_tools_gyp_v8_gyp
 LOCAL_MODULE_STEM := v8
 LOCAL_MODULE_SUFFIX := .stamp
-LOCAL_MODULE_TAGS := optional
 LOCAL_MODULE_TARGET_ARCH := $(TARGET_$(GYP_VAR_PREFIX)ARCH)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_VAR_PREFIX))
 gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
@@ -22,6 +21,9 @@
 LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) $(GYP_GENERATED_OUTPUTS)
 
 ### Rules for final target.
+### Set directly by aosp_build_settings.
+LOCAL_FDO_SUPPORT := true
+
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
 gyp_all_modules: v8_tools_gyp_v8_gyp
diff --git a/tools/gyp/v8.target.linux-mips.mk b/tools/gyp/v8.target.linux-mips.mk
index a67d43c..1c74995 100644
--- a/tools/gyp/v8.target.linux-mips.mk
+++ b/tools/gyp/v8.target.linux-mips.mk
@@ -6,7 +6,6 @@
 LOCAL_MODULE := v8_tools_gyp_v8_gyp
 LOCAL_MODULE_STEM := v8
 LOCAL_MODULE_SUFFIX := .stamp
-LOCAL_MODULE_TAGS := optional
 LOCAL_MODULE_TARGET_ARCH := $(TARGET_$(GYP_VAR_PREFIX)ARCH)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_VAR_PREFIX))
 gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
@@ -22,6 +21,9 @@
 LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) $(GYP_GENERATED_OUTPUTS)
 
 ### Rules for final target.
+### Set directly by aosp_build_settings.
+LOCAL_FDO_SUPPORT := true
+
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
 gyp_all_modules: v8_tools_gyp_v8_gyp
diff --git a/tools/gyp/v8.target.linux-mips64.mk b/tools/gyp/v8.target.linux-mips64.mk
new file mode 100644
index 0000000..624fbfb
--- /dev/null
+++ b/tools/gyp/v8.target.linux-mips64.mk
@@ -0,0 +1,43 @@
+# This file is generated by gyp; do not edit.
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE_CLASS := GYP
+LOCAL_MODULE := v8_tools_gyp_v8_gyp
+LOCAL_MODULE_STEM := v8
+LOCAL_MODULE_SUFFIX := .stamp
+LOCAL_MODULE_TARGET_ARCH := $(TARGET_$(GYP_VAR_PREFIX)ARCH)
+gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_VAR_PREFIX))
+gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
+
+# Make sure our deps are built first.
+GYP_TARGET_DEPENDENCIES := \
+	$(call intermediates-dir-for,STATIC_LIBRARIES,v8_tools_gyp_v8_base_gyp,,,$(GYP_VAR_PREFIX))/v8_tools_gyp_v8_base_gyp.a \
+	$(call intermediates-dir-for,STATIC_LIBRARIES,v8_tools_gyp_v8_snapshot_gyp,,,$(GYP_VAR_PREFIX))/v8_tools_gyp_v8_snapshot_gyp.a
+
+GYP_GENERATED_OUTPUTS :=
+
+# Make sure our deps and generated files are built first.
+LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) $(GYP_GENERATED_OUTPUTS)
+
+### Rules for final target.
+# Add target alias to "gyp_all_modules" target.
+.PHONY: gyp_all_modules
+gyp_all_modules: v8_tools_gyp_v8_gyp
+
+# Alias gyp target name.
+.PHONY: v8
+v8: v8_tools_gyp_v8_gyp
+
+LOCAL_MODULE_PATH := $(PRODUCT_OUT)/gyp_stamp
+LOCAL_UNINSTALLABLE_MODULE := true
+LOCAL_2ND_ARCH_VAR_PREFIX := $(GYP_VAR_PREFIX)
+
+include $(BUILD_SYSTEM)/base_rules.mk
+
+$(LOCAL_BUILT_MODULE): $(LOCAL_ADDITIONAL_DEPENDENCIES)
+	$(hide) echo "Gyp timestamp: $@"
+	$(hide) mkdir -p $(dir $@)
+	$(hide) touch $@
+
+LOCAL_2ND_ARCH_VAR_PREFIX :=
diff --git a/tools/gyp/v8.target.linux-x86.mk b/tools/gyp/v8.target.linux-x86.mk
index a67d43c..1c74995 100644
--- a/tools/gyp/v8.target.linux-x86.mk
+++ b/tools/gyp/v8.target.linux-x86.mk
@@ -6,7 +6,6 @@
 LOCAL_MODULE := v8_tools_gyp_v8_gyp
 LOCAL_MODULE_STEM := v8
 LOCAL_MODULE_SUFFIX := .stamp
-LOCAL_MODULE_TAGS := optional
 LOCAL_MODULE_TARGET_ARCH := $(TARGET_$(GYP_VAR_PREFIX)ARCH)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_VAR_PREFIX))
 gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
@@ -22,6 +21,9 @@
 LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) $(GYP_GENERATED_OUTPUTS)
 
 ### Rules for final target.
+### Set directly by aosp_build_settings.
+LOCAL_FDO_SUPPORT := true
+
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
 gyp_all_modules: v8_tools_gyp_v8_gyp
diff --git a/tools/gyp/v8.target.linux-x86_64.mk b/tools/gyp/v8.target.linux-x86_64.mk
index a67d43c..1c74995 100644
--- a/tools/gyp/v8.target.linux-x86_64.mk
+++ b/tools/gyp/v8.target.linux-x86_64.mk
@@ -6,7 +6,6 @@
 LOCAL_MODULE := v8_tools_gyp_v8_gyp
 LOCAL_MODULE_STEM := v8
 LOCAL_MODULE_SUFFIX := .stamp
-LOCAL_MODULE_TAGS := optional
 LOCAL_MODULE_TARGET_ARCH := $(TARGET_$(GYP_VAR_PREFIX)ARCH)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_VAR_PREFIX))
 gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
@@ -22,6 +21,9 @@
 LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) $(GYP_GENERATED_OUTPUTS)
 
 ### Rules for final target.
+### Set directly by aosp_build_settings.
+LOCAL_FDO_SUPPORT := true
+
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
 gyp_all_modules: v8_tools_gyp_v8_gyp
diff --git a/tools/gyp/v8_base.host.darwin-arm.mk b/tools/gyp/v8_base.host.darwin-arm.mk
index 5ef2410..f077609 100644
--- a/tools/gyp/v8_base.host.darwin-arm.mk
+++ b/tools/gyp/v8_base.host.darwin-arm.mk
@@ -5,7 +5,6 @@
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_MODULE := v8_tools_gyp_v8_base_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
 LOCAL_MODULE_SUFFIX := .a
-LOCAL_MODULE_TAGS := optional
 LOCAL_IS_HOST_MODULE := true
 LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
@@ -33,23 +32,65 @@
 	v8/src/arguments.cc \
 	v8/src/assembler.cc \
 	v8/src/assert-scope.cc \
+	v8/src/ast-value-factory.cc \
 	v8/src/ast.cc \
+	v8/src/background-parsing-task.cc \
+	v8/src/bailout-reason.cc \
 	v8/src/bignum-dtoa.cc \
 	v8/src/bignum.cc \
 	v8/src/bootstrapper.cc \
 	v8/src/builtins.cc \
 	v8/src/cached-powers.cc \
 	v8/src/checks.cc \
+	v8/src/code-factory.cc \
 	v8/src/code-stubs.cc \
 	v8/src/code-stubs-hydrogen.cc \
 	v8/src/codegen.cc \
 	v8/src/compilation-cache.cc \
+	v8/src/compiler/access-builder.cc \
+	v8/src/compiler/ast-graph-builder.cc \
+	v8/src/compiler/change-lowering.cc \
+	v8/src/compiler/code-generator.cc \
+	v8/src/compiler/common-operator.cc \
+	v8/src/compiler/control-builders.cc \
+	v8/src/compiler/gap-resolver.cc \
+	v8/src/compiler/graph-builder.cc \
+	v8/src/compiler/graph-reducer.cc \
+	v8/src/compiler/graph-replay.cc \
+	v8/src/compiler/graph-visualizer.cc \
+	v8/src/compiler/graph.cc \
+	v8/src/compiler/instruction-selector.cc \
+	v8/src/compiler/instruction.cc \
+	v8/src/compiler/js-builtin-reducer.cc \
+	v8/src/compiler/js-context-specialization.cc \
+	v8/src/compiler/js-generic-lowering.cc \
+	v8/src/compiler/js-graph.cc \
+	v8/src/compiler/js-inlining.cc \
+	v8/src/compiler/js-typed-lowering.cc \
+	v8/src/compiler/linkage.cc \
+	v8/src/compiler/machine-operator-reducer.cc \
+	v8/src/compiler/machine-operator.cc \
+	v8/src/compiler/machine-type.cc \
+	v8/src/compiler/node-cache.cc \
+	v8/src/compiler/node.cc \
+	v8/src/compiler/operator.cc \
+	v8/src/compiler/pipeline.cc \
+	v8/src/compiler/raw-machine-assembler.cc \
+	v8/src/compiler/register-allocator.cc \
+	v8/src/compiler/schedule.cc \
+	v8/src/compiler/scheduler.cc \
+	v8/src/compiler/simplified-lowering.cc \
+	v8/src/compiler/simplified-operator-reducer.cc \
+	v8/src/compiler/simplified-operator.cc \
+	v8/src/compiler/source-position.cc \
+	v8/src/compiler/typer.cc \
+	v8/src/compiler/value-numbering-reducer.cc \
+	v8/src/compiler/verifier.cc \
 	v8/src/compiler.cc \
 	v8/src/contexts.cc \
 	v8/src/conversions.cc \
 	v8/src/counters.cc \
 	v8/src/cpu-profiler.cc \
-	v8/src/cpu.cc \
 	v8/src/data-flow.cc \
 	v8/src/date.cc \
 	v8/src/dateparser.cc \
@@ -68,7 +109,6 @@
 	v8/src/extensions/trigger-failure-extension.cc \
 	v8/src/factory.cc \
 	v8/src/fast-dtoa.cc \
-	v8/src/field-index.cc \
 	v8/src/fixed-dtoa.cc \
 	v8/src/flags.cc \
 	v8/src/frames.cc \
@@ -79,7 +119,15 @@
 	v8/src/handles.cc \
 	v8/src/heap-profiler.cc \
 	v8/src/heap-snapshot-generator.cc \
-	v8/src/heap.cc \
+	v8/src/heap/gc-idle-time-handler.cc \
+	v8/src/heap/gc-tracer.cc \
+	v8/src/heap/heap.cc \
+	v8/src/heap/incremental-marking.cc \
+	v8/src/heap/mark-compact.cc \
+	v8/src/heap/objects-visiting.cc \
+	v8/src/heap/spaces.cc \
+	v8/src/heap/store-buffer.cc \
+	v8/src/heap/sweeper-thread.cc \
 	v8/src/hydrogen-bce.cc \
 	v8/src/hydrogen-bch.cc \
 	v8/src/hydrogen-canonicalize.cc \
@@ -107,15 +155,17 @@
 	v8/src/hydrogen-uint32-analysis.cc \
 	v8/src/i18n.cc \
 	v8/src/icu_util.cc \
-	v8/src/ic.cc \
-	v8/src/incremental-marking.cc \
+	v8/src/ic/access-compiler.cc \
+	v8/src/ic/call-optimization.cc \
+	v8/src/ic/handler-compiler.cc \
+	v8/src/ic/ic-state.cc \
+	v8/src/ic/ic.cc \
+	v8/src/ic/ic-compiler.cc \
 	v8/src/interface.cc \
+	v8/src/interface-descriptors.cc \
 	v8/src/interpreter-irregexp.cc \
 	v8/src/isolate.cc \
 	v8/src/jsregexp.cc \
-	v8/src/libplatform/default-platform.cc \
-	v8/src/libplatform/task-queue.cc \
-	v8/src/libplatform/worker-thread.cc \
 	v8/src/lithium-allocator.cc \
 	v8/src/lithium-codegen.cc \
 	v8/src/lithium.cc \
@@ -123,18 +173,14 @@
 	v8/src/log-utils.cc \
 	v8/src/log.cc \
 	v8/src/lookup.cc \
-	v8/src/mark-compact.cc \
 	v8/src/messages.cc \
 	v8/src/objects-debug.cc \
 	v8/src/objects-printer.cc \
-	v8/src/objects-visiting.cc \
 	v8/src/objects.cc \
 	v8/src/optimizing-compiler-thread.cc \
+	v8/src/ostreams.cc \
 	v8/src/parser.cc \
-	v8/src/platform/time.cc \
-	v8/src/platform/condition-variable.cc \
-	v8/src/platform/mutex.cc \
-	v8/src/platform/semaphore.cc \
+	v8/src/perf-jit.cc \
 	v8/src/preparse-data.cc \
 	v8/src/preparser.cc \
 	v8/src/prettyprinter.cc \
@@ -154,27 +200,25 @@
 	v8/src/scopeinfo.cc \
 	v8/src/scopes.cc \
 	v8/src/serialize.cc \
-	v8/src/snapshot-common.cc \
-	v8/src/spaces.cc \
-	v8/src/store-buffer.cc \
+	v8/src/snapshot-source-sink.cc \
 	v8/src/string-search.cc \
 	v8/src/string-stream.cc \
 	v8/src/strtod.cc \
-	v8/src/stub-cache.cc \
-	v8/src/sweeper-thread.cc \
+	v8/src/ic/stub-cache.cc \
 	v8/src/token.cc \
 	v8/src/transitions.cc \
+	v8/src/type-feedback-vector.cc \
 	v8/src/type-info.cc \
 	v8/src/types.cc \
 	v8/src/typing.cc \
 	v8/src/unicode.cc \
 	v8/src/utils.cc \
-	v8/src/utils/random-number-generator.cc \
 	v8/src/v8.cc \
 	v8/src/v8threads.cc \
 	v8/src/variables.cc \
 	v8/src/version.cc \
 	v8/src/zone.cc \
+	v8/third_party/fdlibm/fdlibm.cc \
 	v8/src/arm/assembler-arm.cc \
 	v8/src/arm/builtins-arm.cc \
 	v8/src/arm/code-stubs-arm.cc \
@@ -186,16 +230,21 @@
 	v8/src/arm/disasm-arm.cc \
 	v8/src/arm/frames-arm.cc \
 	v8/src/arm/full-codegen-arm.cc \
-	v8/src/arm/ic-arm.cc \
+	v8/src/arm/interface-descriptors-arm.cc \
 	v8/src/arm/lithium-arm.cc \
 	v8/src/arm/lithium-codegen-arm.cc \
 	v8/src/arm/lithium-gap-resolver-arm.cc \
 	v8/src/arm/macro-assembler-arm.cc \
 	v8/src/arm/regexp-macro-assembler-arm.cc \
 	v8/src/arm/simulator-arm.cc \
-	v8/src/arm/stub-cache-arm.cc \
-	v8/src/platform-posix.cc \
-	v8/src/platform-macos.cc
+	v8/src/compiler/arm/code-generator-arm.cc \
+	v8/src/compiler/arm/instruction-selector-arm.cc \
+	v8/src/compiler/arm/linkage-arm.cc \
+	v8/src/ic/arm/access-compiler-arm.cc \
+	v8/src/ic/arm/handler-compiler-arm.cc \
+	v8/src/ic/arm/ic-arm.cc \
+	v8/src/ic/arm/ic-compiler-arm.cc \
+	v8/src/ic/arm/stub-cache-arm.cc
 
 
 # Flags passed to both C and C++ files.
@@ -203,7 +252,6 @@
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -211,9 +259,18 @@
 	-pipe \
 	-fPIC \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m32 \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-fomit-frame-pointer \
@@ -221,7 +278,6 @@
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -238,21 +294,22 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_ARM' \
+	'-DCAN_USE_ARMV7_INSTRUCTIONS' \
 	'-DV8_I18N_SUPPORT' \
-	'-DCAN_USE_VFP_INSTRUCTIONS' \
 	'-DICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC' \
 	'-DU_USING_ICU_NAMESPACE=0' \
+	'-DU_ENABLE_DYLOAD=0' \
 	'-DU_STATIC_IMPLEMENTATION' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
-	'-DARM_TEST' \
-	'-DCAN_USE_ARMV7_INSTRUCTIONS=1' \
 	'-DDYNAMIC_ANNOTATIONS_ENABLED=1' \
 	'-DWTF_USE_DYNAMIC_ANNOTATIONS=1' \
 	'-D_DEBUG' \
@@ -274,20 +331,19 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -295,6 +351,14 @@
 	-pipe \
 	-fPIC \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m32 \
 	-fno-ident \
 	-fdata-sections \
@@ -307,7 +371,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -324,21 +387,22 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_ARM' \
+	'-DCAN_USE_ARMV7_INSTRUCTIONS' \
 	'-DV8_I18N_SUPPORT' \
-	'-DCAN_USE_VFP_INSTRUCTIONS' \
 	'-DICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC' \
 	'-DU_USING_ICU_NAMESPACE=0' \
+	'-DU_ENABLE_DYLOAD=0' \
 	'-DU_STATIC_IMPLEMENTATION' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
-	'-DARM_TEST' \
-	'-DCAN_USE_ARMV7_INSTRUCTIONS=1' \
 	'-DNDEBUG' \
 	'-DNVALGRIND' \
 	'-DDYNAMIC_ANNOTATIONS_ENABLED=0'
@@ -354,43 +418,23 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Release := false
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 # Undefine ANDROID for host modules
 LOCAL_CFLAGS += -UANDROID
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
 LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
 LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
 ### Rules for final target.
-
-LOCAL_LDFLAGS_Debug := \
-	-pthread \
-	-fPIC \
-	-m32
-
-
-LOCAL_LDFLAGS_Release := \
-	-pthread \
-	-fPIC \
-	-m32
-
-
-LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
-
-LOCAL_STATIC_LIBRARIES :=
-
-# Enable grouping to fix circular references
-LOCAL_GROUP_STATIC_LIBRARIES := true
-
-LOCAL_SHARED_LIBRARIES :=
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
 
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
diff --git a/tools/gyp/v8_base.host.darwin-arm64.mk b/tools/gyp/v8_base.host.darwin-arm64.mk
index 9b8d183..60257cc 100644
--- a/tools/gyp/v8_base.host.darwin-arm64.mk
+++ b/tools/gyp/v8_base.host.darwin-arm64.mk
@@ -5,7 +5,6 @@
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_MODULE := v8_tools_gyp_v8_base_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
 LOCAL_MODULE_SUFFIX := .a
-LOCAL_MODULE_TAGS := optional
 LOCAL_IS_HOST_MODULE := true
 LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
@@ -33,23 +32,65 @@
 	v8/src/arguments.cc \
 	v8/src/assembler.cc \
 	v8/src/assert-scope.cc \
+	v8/src/ast-value-factory.cc \
 	v8/src/ast.cc \
+	v8/src/background-parsing-task.cc \
+	v8/src/bailout-reason.cc \
 	v8/src/bignum-dtoa.cc \
 	v8/src/bignum.cc \
 	v8/src/bootstrapper.cc \
 	v8/src/builtins.cc \
 	v8/src/cached-powers.cc \
 	v8/src/checks.cc \
+	v8/src/code-factory.cc \
 	v8/src/code-stubs.cc \
 	v8/src/code-stubs-hydrogen.cc \
 	v8/src/codegen.cc \
 	v8/src/compilation-cache.cc \
+	v8/src/compiler/access-builder.cc \
+	v8/src/compiler/ast-graph-builder.cc \
+	v8/src/compiler/change-lowering.cc \
+	v8/src/compiler/code-generator.cc \
+	v8/src/compiler/common-operator.cc \
+	v8/src/compiler/control-builders.cc \
+	v8/src/compiler/gap-resolver.cc \
+	v8/src/compiler/graph-builder.cc \
+	v8/src/compiler/graph-reducer.cc \
+	v8/src/compiler/graph-replay.cc \
+	v8/src/compiler/graph-visualizer.cc \
+	v8/src/compiler/graph.cc \
+	v8/src/compiler/instruction-selector.cc \
+	v8/src/compiler/instruction.cc \
+	v8/src/compiler/js-builtin-reducer.cc \
+	v8/src/compiler/js-context-specialization.cc \
+	v8/src/compiler/js-generic-lowering.cc \
+	v8/src/compiler/js-graph.cc \
+	v8/src/compiler/js-inlining.cc \
+	v8/src/compiler/js-typed-lowering.cc \
+	v8/src/compiler/linkage.cc \
+	v8/src/compiler/machine-operator-reducer.cc \
+	v8/src/compiler/machine-operator.cc \
+	v8/src/compiler/machine-type.cc \
+	v8/src/compiler/node-cache.cc \
+	v8/src/compiler/node.cc \
+	v8/src/compiler/operator.cc \
+	v8/src/compiler/pipeline.cc \
+	v8/src/compiler/raw-machine-assembler.cc \
+	v8/src/compiler/register-allocator.cc \
+	v8/src/compiler/schedule.cc \
+	v8/src/compiler/scheduler.cc \
+	v8/src/compiler/simplified-lowering.cc \
+	v8/src/compiler/simplified-operator-reducer.cc \
+	v8/src/compiler/simplified-operator.cc \
+	v8/src/compiler/source-position.cc \
+	v8/src/compiler/typer.cc \
+	v8/src/compiler/value-numbering-reducer.cc \
+	v8/src/compiler/verifier.cc \
 	v8/src/compiler.cc \
 	v8/src/contexts.cc \
 	v8/src/conversions.cc \
 	v8/src/counters.cc \
 	v8/src/cpu-profiler.cc \
-	v8/src/cpu.cc \
 	v8/src/data-flow.cc \
 	v8/src/date.cc \
 	v8/src/dateparser.cc \
@@ -68,7 +109,6 @@
 	v8/src/extensions/trigger-failure-extension.cc \
 	v8/src/factory.cc \
 	v8/src/fast-dtoa.cc \
-	v8/src/field-index.cc \
 	v8/src/fixed-dtoa.cc \
 	v8/src/flags.cc \
 	v8/src/frames.cc \
@@ -79,7 +119,15 @@
 	v8/src/handles.cc \
 	v8/src/heap-profiler.cc \
 	v8/src/heap-snapshot-generator.cc \
-	v8/src/heap.cc \
+	v8/src/heap/gc-idle-time-handler.cc \
+	v8/src/heap/gc-tracer.cc \
+	v8/src/heap/heap.cc \
+	v8/src/heap/incremental-marking.cc \
+	v8/src/heap/mark-compact.cc \
+	v8/src/heap/objects-visiting.cc \
+	v8/src/heap/spaces.cc \
+	v8/src/heap/store-buffer.cc \
+	v8/src/heap/sweeper-thread.cc \
 	v8/src/hydrogen-bce.cc \
 	v8/src/hydrogen-bch.cc \
 	v8/src/hydrogen-canonicalize.cc \
@@ -107,15 +155,17 @@
 	v8/src/hydrogen-uint32-analysis.cc \
 	v8/src/i18n.cc \
 	v8/src/icu_util.cc \
-	v8/src/ic.cc \
-	v8/src/incremental-marking.cc \
+	v8/src/ic/access-compiler.cc \
+	v8/src/ic/call-optimization.cc \
+	v8/src/ic/handler-compiler.cc \
+	v8/src/ic/ic-state.cc \
+	v8/src/ic/ic.cc \
+	v8/src/ic/ic-compiler.cc \
 	v8/src/interface.cc \
+	v8/src/interface-descriptors.cc \
 	v8/src/interpreter-irregexp.cc \
 	v8/src/isolate.cc \
 	v8/src/jsregexp.cc \
-	v8/src/libplatform/default-platform.cc \
-	v8/src/libplatform/task-queue.cc \
-	v8/src/libplatform/worker-thread.cc \
 	v8/src/lithium-allocator.cc \
 	v8/src/lithium-codegen.cc \
 	v8/src/lithium.cc \
@@ -123,18 +173,14 @@
 	v8/src/log-utils.cc \
 	v8/src/log.cc \
 	v8/src/lookup.cc \
-	v8/src/mark-compact.cc \
 	v8/src/messages.cc \
 	v8/src/objects-debug.cc \
 	v8/src/objects-printer.cc \
-	v8/src/objects-visiting.cc \
 	v8/src/objects.cc \
 	v8/src/optimizing-compiler-thread.cc \
+	v8/src/ostreams.cc \
 	v8/src/parser.cc \
-	v8/src/platform/time.cc \
-	v8/src/platform/condition-variable.cc \
-	v8/src/platform/mutex.cc \
-	v8/src/platform/semaphore.cc \
+	v8/src/perf-jit.cc \
 	v8/src/preparse-data.cc \
 	v8/src/preparser.cc \
 	v8/src/prettyprinter.cc \
@@ -154,27 +200,25 @@
 	v8/src/scopeinfo.cc \
 	v8/src/scopes.cc \
 	v8/src/serialize.cc \
-	v8/src/snapshot-common.cc \
-	v8/src/spaces.cc \
-	v8/src/store-buffer.cc \
+	v8/src/snapshot-source-sink.cc \
 	v8/src/string-search.cc \
 	v8/src/string-stream.cc \
 	v8/src/strtod.cc \
-	v8/src/stub-cache.cc \
-	v8/src/sweeper-thread.cc \
+	v8/src/ic/stub-cache.cc \
 	v8/src/token.cc \
 	v8/src/transitions.cc \
+	v8/src/type-feedback-vector.cc \
 	v8/src/type-info.cc \
 	v8/src/types.cc \
 	v8/src/typing.cc \
 	v8/src/unicode.cc \
 	v8/src/utils.cc \
-	v8/src/utils/random-number-generator.cc \
 	v8/src/v8.cc \
 	v8/src/v8threads.cc \
 	v8/src/variables.cc \
 	v8/src/version.cc \
 	v8/src/zone.cc \
+	v8/third_party/fdlibm/fdlibm.cc \
 	v8/src/arm64/assembler-arm64.cc \
 	v8/src/arm64/builtins-arm64.cc \
 	v8/src/arm64/codegen-arm64.cc \
@@ -182,23 +226,29 @@
 	v8/src/arm64/cpu-arm64.cc \
 	v8/src/arm64/debug-arm64.cc \
 	v8/src/arm64/decoder-arm64.cc \
+	v8/src/arm64/delayed-masm-arm64.cc \
 	v8/src/arm64/deoptimizer-arm64.cc \
 	v8/src/arm64/disasm-arm64.cc \
 	v8/src/arm64/frames-arm64.cc \
 	v8/src/arm64/full-codegen-arm64.cc \
-	v8/src/arm64/ic-arm64.cc \
 	v8/src/arm64/instructions-arm64.cc \
 	v8/src/arm64/instrument-arm64.cc \
+	v8/src/arm64/interface-descriptors-arm64.cc \
 	v8/src/arm64/lithium-arm64.cc \
 	v8/src/arm64/lithium-codegen-arm64.cc \
 	v8/src/arm64/lithium-gap-resolver-arm64.cc \
 	v8/src/arm64/macro-assembler-arm64.cc \
 	v8/src/arm64/regexp-macro-assembler-arm64.cc \
 	v8/src/arm64/simulator-arm64.cc \
-	v8/src/arm64/stub-cache-arm64.cc \
 	v8/src/arm64/utils-arm64.cc \
-	v8/src/platform-posix.cc \
-	v8/src/platform-macos.cc
+	v8/src/compiler/arm64/code-generator-arm64.cc \
+	v8/src/compiler/arm64/instruction-selector-arm64.cc \
+	v8/src/compiler/arm64/linkage-arm64.cc \
+	v8/src/ic/arm64/access-compiler-arm64.cc \
+	v8/src/ic/arm64/handler-compiler-arm64.cc \
+	v8/src/ic/arm64/ic-arm64.cc \
+	v8/src/ic/arm64/ic-compiler-arm64.cc \
+	v8/src/ic/arm64/stub-cache-arm64.cc
 
 
 # Flags passed to both C and C++ files.
@@ -206,7 +256,6 @@
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -214,16 +263,24 @@
 	-pipe \
 	-fPIC \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m64 \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-funwind-tables
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -240,16 +297,18 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_ARM64' \
 	'-DV8_I18N_SUPPORT' \
-	'-DCAN_USE_VFP_INSTRUCTIONS' \
 	'-DICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC' \
 	'-DU_USING_ICU_NAMESPACE=0' \
+	'-DU_ENABLE_DYLOAD=0' \
 	'-DU_STATIC_IMPLEMENTATION' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
@@ -274,20 +333,19 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -295,6 +353,14 @@
 	-pipe \
 	-fPIC \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m64 \
 	-fno-ident \
 	-fdata-sections \
@@ -306,7 +372,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -323,16 +388,18 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_ARM64' \
 	'-DV8_I18N_SUPPORT' \
-	'-DCAN_USE_VFP_INSTRUCTIONS' \
 	'-DICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC' \
 	'-DU_USING_ICU_NAMESPACE=0' \
+	'-DU_ENABLE_DYLOAD=0' \
 	'-DU_STATIC_IMPLEMENTATION' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
@@ -351,43 +418,23 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Release := false
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 # Undefine ANDROID for host modules
 LOCAL_CFLAGS += -UANDROID
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
 LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
 LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
 ### Rules for final target.
-
-LOCAL_LDFLAGS_Debug := \
-	-pthread \
-	-fPIC \
-	-m64
-
-
-LOCAL_LDFLAGS_Release := \
-	-pthread \
-	-fPIC \
-	-m64
-
-
-LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
-
-LOCAL_STATIC_LIBRARIES :=
-
-# Enable grouping to fix circular references
-LOCAL_GROUP_STATIC_LIBRARIES := true
-
-LOCAL_SHARED_LIBRARIES :=
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
 
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
diff --git a/tools/gyp/v8_base.host.darwin-mips.mk b/tools/gyp/v8_base.host.darwin-mips.mk
index 1c3dfa5..2d78901 100644
--- a/tools/gyp/v8_base.host.darwin-mips.mk
+++ b/tools/gyp/v8_base.host.darwin-mips.mk
@@ -5,7 +5,6 @@
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_MODULE := v8_tools_gyp_v8_base_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
 LOCAL_MODULE_SUFFIX := .a
-LOCAL_MODULE_TAGS := optional
 LOCAL_IS_HOST_MODULE := true
 LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
@@ -33,23 +32,65 @@
 	v8/src/arguments.cc \
 	v8/src/assembler.cc \
 	v8/src/assert-scope.cc \
+	v8/src/ast-value-factory.cc \
 	v8/src/ast.cc \
+	v8/src/background-parsing-task.cc \
+	v8/src/bailout-reason.cc \
 	v8/src/bignum-dtoa.cc \
 	v8/src/bignum.cc \
 	v8/src/bootstrapper.cc \
 	v8/src/builtins.cc \
 	v8/src/cached-powers.cc \
 	v8/src/checks.cc \
+	v8/src/code-factory.cc \
 	v8/src/code-stubs.cc \
 	v8/src/code-stubs-hydrogen.cc \
 	v8/src/codegen.cc \
 	v8/src/compilation-cache.cc \
+	v8/src/compiler/access-builder.cc \
+	v8/src/compiler/ast-graph-builder.cc \
+	v8/src/compiler/change-lowering.cc \
+	v8/src/compiler/code-generator.cc \
+	v8/src/compiler/common-operator.cc \
+	v8/src/compiler/control-builders.cc \
+	v8/src/compiler/gap-resolver.cc \
+	v8/src/compiler/graph-builder.cc \
+	v8/src/compiler/graph-reducer.cc \
+	v8/src/compiler/graph-replay.cc \
+	v8/src/compiler/graph-visualizer.cc \
+	v8/src/compiler/graph.cc \
+	v8/src/compiler/instruction-selector.cc \
+	v8/src/compiler/instruction.cc \
+	v8/src/compiler/js-builtin-reducer.cc \
+	v8/src/compiler/js-context-specialization.cc \
+	v8/src/compiler/js-generic-lowering.cc \
+	v8/src/compiler/js-graph.cc \
+	v8/src/compiler/js-inlining.cc \
+	v8/src/compiler/js-typed-lowering.cc \
+	v8/src/compiler/linkage.cc \
+	v8/src/compiler/machine-operator-reducer.cc \
+	v8/src/compiler/machine-operator.cc \
+	v8/src/compiler/machine-type.cc \
+	v8/src/compiler/node-cache.cc \
+	v8/src/compiler/node.cc \
+	v8/src/compiler/operator.cc \
+	v8/src/compiler/pipeline.cc \
+	v8/src/compiler/raw-machine-assembler.cc \
+	v8/src/compiler/register-allocator.cc \
+	v8/src/compiler/schedule.cc \
+	v8/src/compiler/scheduler.cc \
+	v8/src/compiler/simplified-lowering.cc \
+	v8/src/compiler/simplified-operator-reducer.cc \
+	v8/src/compiler/simplified-operator.cc \
+	v8/src/compiler/source-position.cc \
+	v8/src/compiler/typer.cc \
+	v8/src/compiler/value-numbering-reducer.cc \
+	v8/src/compiler/verifier.cc \
 	v8/src/compiler.cc \
 	v8/src/contexts.cc \
 	v8/src/conversions.cc \
 	v8/src/counters.cc \
 	v8/src/cpu-profiler.cc \
-	v8/src/cpu.cc \
 	v8/src/data-flow.cc \
 	v8/src/date.cc \
 	v8/src/dateparser.cc \
@@ -68,7 +109,6 @@
 	v8/src/extensions/trigger-failure-extension.cc \
 	v8/src/factory.cc \
 	v8/src/fast-dtoa.cc \
-	v8/src/field-index.cc \
 	v8/src/fixed-dtoa.cc \
 	v8/src/flags.cc \
 	v8/src/frames.cc \
@@ -79,7 +119,15 @@
 	v8/src/handles.cc \
 	v8/src/heap-profiler.cc \
 	v8/src/heap-snapshot-generator.cc \
-	v8/src/heap.cc \
+	v8/src/heap/gc-idle-time-handler.cc \
+	v8/src/heap/gc-tracer.cc \
+	v8/src/heap/heap.cc \
+	v8/src/heap/incremental-marking.cc \
+	v8/src/heap/mark-compact.cc \
+	v8/src/heap/objects-visiting.cc \
+	v8/src/heap/spaces.cc \
+	v8/src/heap/store-buffer.cc \
+	v8/src/heap/sweeper-thread.cc \
 	v8/src/hydrogen-bce.cc \
 	v8/src/hydrogen-bch.cc \
 	v8/src/hydrogen-canonicalize.cc \
@@ -107,15 +155,17 @@
 	v8/src/hydrogen-uint32-analysis.cc \
 	v8/src/i18n.cc \
 	v8/src/icu_util.cc \
-	v8/src/ic.cc \
-	v8/src/incremental-marking.cc \
+	v8/src/ic/access-compiler.cc \
+	v8/src/ic/call-optimization.cc \
+	v8/src/ic/handler-compiler.cc \
+	v8/src/ic/ic-state.cc \
+	v8/src/ic/ic.cc \
+	v8/src/ic/ic-compiler.cc \
 	v8/src/interface.cc \
+	v8/src/interface-descriptors.cc \
 	v8/src/interpreter-irregexp.cc \
 	v8/src/isolate.cc \
 	v8/src/jsregexp.cc \
-	v8/src/libplatform/default-platform.cc \
-	v8/src/libplatform/task-queue.cc \
-	v8/src/libplatform/worker-thread.cc \
 	v8/src/lithium-allocator.cc \
 	v8/src/lithium-codegen.cc \
 	v8/src/lithium.cc \
@@ -123,18 +173,14 @@
 	v8/src/log-utils.cc \
 	v8/src/log.cc \
 	v8/src/lookup.cc \
-	v8/src/mark-compact.cc \
 	v8/src/messages.cc \
 	v8/src/objects-debug.cc \
 	v8/src/objects-printer.cc \
-	v8/src/objects-visiting.cc \
 	v8/src/objects.cc \
 	v8/src/optimizing-compiler-thread.cc \
+	v8/src/ostreams.cc \
 	v8/src/parser.cc \
-	v8/src/platform/time.cc \
-	v8/src/platform/condition-variable.cc \
-	v8/src/platform/mutex.cc \
-	v8/src/platform/semaphore.cc \
+	v8/src/perf-jit.cc \
 	v8/src/preparse-data.cc \
 	v8/src/preparser.cc \
 	v8/src/prettyprinter.cc \
@@ -154,27 +200,25 @@
 	v8/src/scopeinfo.cc \
 	v8/src/scopes.cc \
 	v8/src/serialize.cc \
-	v8/src/snapshot-common.cc \
-	v8/src/spaces.cc \
-	v8/src/store-buffer.cc \
+	v8/src/snapshot-source-sink.cc \
 	v8/src/string-search.cc \
 	v8/src/string-stream.cc \
 	v8/src/strtod.cc \
-	v8/src/stub-cache.cc \
-	v8/src/sweeper-thread.cc \
+	v8/src/ic/stub-cache.cc \
 	v8/src/token.cc \
 	v8/src/transitions.cc \
+	v8/src/type-feedback-vector.cc \
 	v8/src/type-info.cc \
 	v8/src/types.cc \
 	v8/src/typing.cc \
 	v8/src/unicode.cc \
 	v8/src/utils.cc \
-	v8/src/utils/random-number-generator.cc \
 	v8/src/v8.cc \
 	v8/src/v8threads.cc \
 	v8/src/variables.cc \
 	v8/src/version.cc \
 	v8/src/zone.cc \
+	v8/third_party/fdlibm/fdlibm.cc \
 	v8/src/mips/assembler-mips.cc \
 	v8/src/mips/builtins-mips.cc \
 	v8/src/mips/codegen-mips.cc \
@@ -186,16 +230,18 @@
 	v8/src/mips/disasm-mips.cc \
 	v8/src/mips/frames-mips.cc \
 	v8/src/mips/full-codegen-mips.cc \
-	v8/src/mips/ic-mips.cc \
+	v8/src/mips/interface-descriptors-mips.cc \
 	v8/src/mips/lithium-codegen-mips.cc \
 	v8/src/mips/lithium-gap-resolver-mips.cc \
 	v8/src/mips/lithium-mips.cc \
 	v8/src/mips/macro-assembler-mips.cc \
 	v8/src/mips/regexp-macro-assembler-mips.cc \
 	v8/src/mips/simulator-mips.cc \
-	v8/src/mips/stub-cache-mips.cc \
-	v8/src/platform-posix.cc \
-	v8/src/platform-macos.cc
+	v8/src/ic/mips/access-compiler-mips.cc \
+	v8/src/ic/mips/handler-compiler-mips.cc \
+	v8/src/ic/mips/ic-mips.cc \
+	v8/src/ic/mips/ic-compiler-mips.cc \
+	v8/src/ic/mips/stub-cache-mips.cc
 
 
 # Flags passed to both C and C++ files.
@@ -204,7 +250,6 @@
 	--param=ssp-buffer-size=4 \
 	 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -212,9 +257,18 @@
 	-pipe \
 	-fPIC \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m32 \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-fomit-frame-pointer \
@@ -222,7 +276,6 @@
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -239,19 +292,21 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_MIPS' \
 	'-DCAN_USE_FPU_INSTRUCTIONS' \
 	'-D__mips_hard_float=1' \
 	'-D_MIPS_ARCH_MIPS32R2' \
 	'-DV8_I18N_SUPPORT' \
-	'-DCAN_USE_VFP_INSTRUCTIONS' \
 	'-DICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC' \
 	'-DU_USING_ICU_NAMESPACE=0' \
+	'-DU_ENABLE_DYLOAD=0' \
 	'-DU_STATIC_IMPLEMENTATION' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
@@ -276,21 +331,20 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -298,6 +352,14 @@
 	-pipe \
 	-fPIC \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m32 \
 	-fno-ident \
 	-fdata-sections \
@@ -310,7 +372,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -327,19 +388,21 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_MIPS' \
 	'-DCAN_USE_FPU_INSTRUCTIONS' \
 	'-D__mips_hard_float=1' \
 	'-D_MIPS_ARCH_MIPS32R2' \
 	'-DV8_I18N_SUPPORT' \
-	'-DCAN_USE_VFP_INSTRUCTIONS' \
 	'-DICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC' \
 	'-DU_USING_ICU_NAMESPACE=0' \
+	'-DU_ENABLE_DYLOAD=0' \
 	'-DU_STATIC_IMPLEMENTATION' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
@@ -358,43 +421,23 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Release := false
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 # Undefine ANDROID for host modules
 LOCAL_CFLAGS += -UANDROID
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
 LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
 LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
 ### Rules for final target.
-
-LOCAL_LDFLAGS_Debug := \
-	-pthread \
-	-fPIC \
-	-m32
-
-
-LOCAL_LDFLAGS_Release := \
-	-pthread \
-	-fPIC \
-	-m32
-
-
-LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
-
-LOCAL_STATIC_LIBRARIES :=
-
-# Enable grouping to fix circular references
-LOCAL_GROUP_STATIC_LIBRARIES := true
-
-LOCAL_SHARED_LIBRARIES :=
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
 
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
diff --git a/tools/gyp/v8_base.host.darwin-mips64.mk b/tools/gyp/v8_base.host.darwin-mips64.mk
new file mode 100644
index 0000000..3942ac8
--- /dev/null
+++ b/tools/gyp/v8_base.host.darwin-mips64.mk
@@ -0,0 +1,448 @@
+# This file is generated by gyp; do not edit.
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE_CLASS := STATIC_LIBRARIES
+LOCAL_MODULE := v8_tools_gyp_v8_base_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+LOCAL_MODULE_SUFFIX := .a
+LOCAL_IS_HOST_MODULE := true
+LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
+gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
+gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
+
+# Make sure our deps are built first.
+GYP_TARGET_DEPENDENCIES :=
+
+GYP_GENERATED_OUTPUTS :=
+
+# Make sure our deps and generated files are built first.
+LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) $(GYP_GENERATED_OUTPUTS)
+
+LOCAL_CPP_EXTENSION := .cc
+LOCAL_GENERATED_SOURCES :=
+
+GYP_COPIED_SOURCE_ORIGIN_DIRS :=
+
+LOCAL_SRC_FILES := \
+	v8/src/accessors.cc \
+	v8/src/allocation.cc \
+	v8/src/allocation-site-scopes.cc \
+	v8/src/allocation-tracker.cc \
+	v8/src/api.cc \
+	v8/src/arguments.cc \
+	v8/src/assembler.cc \
+	v8/src/assert-scope.cc \
+	v8/src/ast-value-factory.cc \
+	v8/src/ast.cc \
+	v8/src/background-parsing-task.cc \
+	v8/src/bailout-reason.cc \
+	v8/src/bignum-dtoa.cc \
+	v8/src/bignum.cc \
+	v8/src/bootstrapper.cc \
+	v8/src/builtins.cc \
+	v8/src/cached-powers.cc \
+	v8/src/checks.cc \
+	v8/src/code-factory.cc \
+	v8/src/code-stubs.cc \
+	v8/src/code-stubs-hydrogen.cc \
+	v8/src/codegen.cc \
+	v8/src/compilation-cache.cc \
+	v8/src/compiler/access-builder.cc \
+	v8/src/compiler/ast-graph-builder.cc \
+	v8/src/compiler/change-lowering.cc \
+	v8/src/compiler/code-generator.cc \
+	v8/src/compiler/common-operator.cc \
+	v8/src/compiler/control-builders.cc \
+	v8/src/compiler/gap-resolver.cc \
+	v8/src/compiler/graph-builder.cc \
+	v8/src/compiler/graph-reducer.cc \
+	v8/src/compiler/graph-replay.cc \
+	v8/src/compiler/graph-visualizer.cc \
+	v8/src/compiler/graph.cc \
+	v8/src/compiler/instruction-selector.cc \
+	v8/src/compiler/instruction.cc \
+	v8/src/compiler/js-builtin-reducer.cc \
+	v8/src/compiler/js-context-specialization.cc \
+	v8/src/compiler/js-generic-lowering.cc \
+	v8/src/compiler/js-graph.cc \
+	v8/src/compiler/js-inlining.cc \
+	v8/src/compiler/js-typed-lowering.cc \
+	v8/src/compiler/linkage.cc \
+	v8/src/compiler/machine-operator-reducer.cc \
+	v8/src/compiler/machine-operator.cc \
+	v8/src/compiler/machine-type.cc \
+	v8/src/compiler/node-cache.cc \
+	v8/src/compiler/node.cc \
+	v8/src/compiler/operator.cc \
+	v8/src/compiler/pipeline.cc \
+	v8/src/compiler/raw-machine-assembler.cc \
+	v8/src/compiler/register-allocator.cc \
+	v8/src/compiler/schedule.cc \
+	v8/src/compiler/scheduler.cc \
+	v8/src/compiler/simplified-lowering.cc \
+	v8/src/compiler/simplified-operator-reducer.cc \
+	v8/src/compiler/simplified-operator.cc \
+	v8/src/compiler/source-position.cc \
+	v8/src/compiler/typer.cc \
+	v8/src/compiler/value-numbering-reducer.cc \
+	v8/src/compiler/verifier.cc \
+	v8/src/compiler.cc \
+	v8/src/contexts.cc \
+	v8/src/conversions.cc \
+	v8/src/counters.cc \
+	v8/src/cpu-profiler.cc \
+	v8/src/data-flow.cc \
+	v8/src/date.cc \
+	v8/src/dateparser.cc \
+	v8/src/debug.cc \
+	v8/src/deoptimizer.cc \
+	v8/src/disassembler.cc \
+	v8/src/diy-fp.cc \
+	v8/src/dtoa.cc \
+	v8/src/elements-kind.cc \
+	v8/src/elements.cc \
+	v8/src/execution.cc \
+	v8/src/extensions/externalize-string-extension.cc \
+	v8/src/extensions/free-buffer-extension.cc \
+	v8/src/extensions/gc-extension.cc \
+	v8/src/extensions/statistics-extension.cc \
+	v8/src/extensions/trigger-failure-extension.cc \
+	v8/src/factory.cc \
+	v8/src/fast-dtoa.cc \
+	v8/src/fixed-dtoa.cc \
+	v8/src/flags.cc \
+	v8/src/frames.cc \
+	v8/src/full-codegen.cc \
+	v8/src/func-name-inferrer.cc \
+	v8/src/gdb-jit.cc \
+	v8/src/global-handles.cc \
+	v8/src/handles.cc \
+	v8/src/heap-profiler.cc \
+	v8/src/heap-snapshot-generator.cc \
+	v8/src/heap/gc-idle-time-handler.cc \
+	v8/src/heap/gc-tracer.cc \
+	v8/src/heap/heap.cc \
+	v8/src/heap/incremental-marking.cc \
+	v8/src/heap/mark-compact.cc \
+	v8/src/heap/objects-visiting.cc \
+	v8/src/heap/spaces.cc \
+	v8/src/heap/store-buffer.cc \
+	v8/src/heap/sweeper-thread.cc \
+	v8/src/hydrogen-bce.cc \
+	v8/src/hydrogen-bch.cc \
+	v8/src/hydrogen-canonicalize.cc \
+	v8/src/hydrogen-check-elimination.cc \
+	v8/src/hydrogen-dce.cc \
+	v8/src/hydrogen-dehoist.cc \
+	v8/src/hydrogen-environment-liveness.cc \
+	v8/src/hydrogen-escape-analysis.cc \
+	v8/src/hydrogen-instructions.cc \
+	v8/src/hydrogen.cc \
+	v8/src/hydrogen-gvn.cc \
+	v8/src/hydrogen-infer-representation.cc \
+	v8/src/hydrogen-infer-types.cc \
+	v8/src/hydrogen-load-elimination.cc \
+	v8/src/hydrogen-mark-deoptimize.cc \
+	v8/src/hydrogen-mark-unreachable.cc \
+	v8/src/hydrogen-osr.cc \
+	v8/src/hydrogen-range-analysis.cc \
+	v8/src/hydrogen-redundant-phi.cc \
+	v8/src/hydrogen-removable-simulates.cc \
+	v8/src/hydrogen-representation-changes.cc \
+	v8/src/hydrogen-sce.cc \
+	v8/src/hydrogen-store-elimination.cc \
+	v8/src/hydrogen-types.cc \
+	v8/src/hydrogen-uint32-analysis.cc \
+	v8/src/i18n.cc \
+	v8/src/icu_util.cc \
+	v8/src/ic/access-compiler.cc \
+	v8/src/ic/call-optimization.cc \
+	v8/src/ic/handler-compiler.cc \
+	v8/src/ic/ic-state.cc \
+	v8/src/ic/ic.cc \
+	v8/src/ic/ic-compiler.cc \
+	v8/src/interface.cc \
+	v8/src/interface-descriptors.cc \
+	v8/src/interpreter-irregexp.cc \
+	v8/src/isolate.cc \
+	v8/src/jsregexp.cc \
+	v8/src/lithium-allocator.cc \
+	v8/src/lithium-codegen.cc \
+	v8/src/lithium.cc \
+	v8/src/liveedit.cc \
+	v8/src/log-utils.cc \
+	v8/src/log.cc \
+	v8/src/lookup.cc \
+	v8/src/messages.cc \
+	v8/src/objects-debug.cc \
+	v8/src/objects-printer.cc \
+	v8/src/objects.cc \
+	v8/src/optimizing-compiler-thread.cc \
+	v8/src/ostreams.cc \
+	v8/src/parser.cc \
+	v8/src/perf-jit.cc \
+	v8/src/preparse-data.cc \
+	v8/src/preparser.cc \
+	v8/src/prettyprinter.cc \
+	v8/src/profile-generator.cc \
+	v8/src/property.cc \
+	v8/src/regexp-macro-assembler-irregexp.cc \
+	v8/src/regexp-macro-assembler-tracer.cc \
+	v8/src/regexp-macro-assembler.cc \
+	v8/src/regexp-stack.cc \
+	v8/src/rewriter.cc \
+	v8/src/runtime-profiler.cc \
+	v8/src/runtime.cc \
+	v8/src/safepoint-table.cc \
+	v8/src/sampler.cc \
+	v8/src/scanner-character-streams.cc \
+	v8/src/scanner.cc \
+	v8/src/scopeinfo.cc \
+	v8/src/scopes.cc \
+	v8/src/serialize.cc \
+	v8/src/snapshot-source-sink.cc \
+	v8/src/string-search.cc \
+	v8/src/string-stream.cc \
+	v8/src/strtod.cc \
+	v8/src/ic/stub-cache.cc \
+	v8/src/token.cc \
+	v8/src/transitions.cc \
+	v8/src/type-feedback-vector.cc \
+	v8/src/type-info.cc \
+	v8/src/types.cc \
+	v8/src/typing.cc \
+	v8/src/unicode.cc \
+	v8/src/utils.cc \
+	v8/src/v8.cc \
+	v8/src/v8threads.cc \
+	v8/src/variables.cc \
+	v8/src/version.cc \
+	v8/src/zone.cc \
+	v8/third_party/fdlibm/fdlibm.cc \
+	v8/src/mips64/assembler-mips64.cc \
+	v8/src/mips64/builtins-mips64.cc \
+	v8/src/mips64/codegen-mips64.cc \
+	v8/src/mips64/code-stubs-mips64.cc \
+	v8/src/mips64/constants-mips64.cc \
+	v8/src/mips64/cpu-mips64.cc \
+	v8/src/mips64/debug-mips64.cc \
+	v8/src/mips64/deoptimizer-mips64.cc \
+	v8/src/mips64/disasm-mips64.cc \
+	v8/src/mips64/frames-mips64.cc \
+	v8/src/mips64/full-codegen-mips64.cc \
+	v8/src/mips64/interface-descriptors-mips64.cc \
+	v8/src/mips64/lithium-codegen-mips64.cc \
+	v8/src/mips64/lithium-gap-resolver-mips64.cc \
+	v8/src/mips64/lithium-mips64.cc \
+	v8/src/mips64/macro-assembler-mips64.cc \
+	v8/src/mips64/regexp-macro-assembler-mips64.cc \
+	v8/src/mips64/simulator-mips64.cc \
+	v8/src/ic/mips64/access-compiler-mips64.cc \
+	v8/src/ic/mips64/handler-compiler-mips64.cc \
+	v8/src/ic/mips64/ic-mips64.cc \
+	v8/src/ic/mips64/ic-compiler-mips64.cc \
+	v8/src/ic/mips64/stub-cache-mips64.cc
+
+
+# Flags passed to both C and C++ files.
+MY_CFLAGS_Debug := \
+	-fstack-protector \
+	--param=ssp-buffer-size=4 \
+	 \
+	-pthread \
+	-fno-strict-aliasing \
+	-Wno-unused-parameter \
+	-Wno-missing-field-initializers \
+	-fvisibility=hidden \
+	-pipe \
+	-fPIC \
+	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
+	-Os \
+	-g \
+	-gdwarf-4 \
+	-fdata-sections \
+	-ffunction-sections \
+	-fomit-frame-pointer \
+	-funwind-tables
+
+MY_DEFS_Debug := \
+	'-DV8_DEPRECATION_WARNINGS' \
+	'-D_FILE_OFFSET_BITS=64' \
+	'-DNO_TCMALLOC' \
+	'-DDISABLE_NACL' \
+	'-DCHROMIUM_BUILD' \
+	'-DUSE_LIBJPEG_TURBO=1' \
+	'-DENABLE_WEBRTC=1' \
+	'-DUSE_PROPRIETARY_CODECS' \
+	'-DENABLE_BROWSER_CDMS' \
+	'-DENABLE_CONFIGURATION_POLICY' \
+	'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
+	'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
+	'-DENABLE_EGLIMAGE=1' \
+	'-DCLD_VERSION=1' \
+	'-DENABLE_PRINTING=1' \
+	'-DENABLE_MANAGED_USERS=1' \
+	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
+	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
+	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
+	'-DV8_TARGET_ARCH_MIPS64' \
+	'-DCAN_USE_FPU_INSTRUCTIONS' \
+	'-D__mips_hard_float=1' \
+	'-D_MIPS_ARCH_MIPS64R2' \
+	'-DV8_I18N_SUPPORT' \
+	'-DICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC' \
+	'-DU_USING_ICU_NAMESPACE=0' \
+	'-DU_ENABLE_DYLOAD=0' \
+	'-DU_STATIC_IMPLEMENTATION' \
+	'-DUSE_OPENSSL=1' \
+	'-DUSE_OPENSSL_CERTS=1' \
+	'-DDYNAMIC_ANNOTATIONS_ENABLED=1' \
+	'-DWTF_USE_DYNAMIC_ANNOTATIONS=1' \
+	'-D_DEBUG' \
+	'-DENABLE_DISASSEMBLER' \
+	'-DV8_ENABLE_CHECKS' \
+	'-DOBJECT_PRINT' \
+	'-DVERIFY_HEAP' \
+	'-DENABLE_EXTRA_CHECKS' \
+	'-DENABLE_HANDLE_ZAPPING'
+
+
+# Include paths placed before CFLAGS/CPPFLAGS
+LOCAL_C_INCLUDES_Debug := \
+	$(LOCAL_PATH)/v8 \
+	$(gyp_shared_intermediate_dir) \
+	$(LOCAL_PATH)/third_party/icu/source/i18n \
+	$(LOCAL_PATH)/third_party/icu/source/common
+
+
+# Flags passed to only C++ (and not C) files.
+LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
+	-fno-rtti \
+	-fno-threadsafe-statics \
+	-fvisibility-inlines-hidden \
+	-Wno-deprecated \
+	-std=gnu++11
+
+
+# Flags passed to both C and C++ files.
+MY_CFLAGS_Release := \
+	-fstack-protector \
+	--param=ssp-buffer-size=4 \
+	 \
+	-pthread \
+	-fno-strict-aliasing \
+	-Wno-unused-parameter \
+	-Wno-missing-field-initializers \
+	-fvisibility=hidden \
+	-pipe \
+	-fPIC \
+	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
+	-fno-ident \
+	-fdata-sections \
+	-ffunction-sections \
+	-fomit-frame-pointer \
+	-funwind-tables \
+	-fdata-sections \
+	-ffunction-sections \
+	-O2
+
+MY_DEFS_Release := \
+	'-DV8_DEPRECATION_WARNINGS' \
+	'-D_FILE_OFFSET_BITS=64' \
+	'-DNO_TCMALLOC' \
+	'-DDISABLE_NACL' \
+	'-DCHROMIUM_BUILD' \
+	'-DUSE_LIBJPEG_TURBO=1' \
+	'-DENABLE_WEBRTC=1' \
+	'-DUSE_PROPRIETARY_CODECS' \
+	'-DENABLE_BROWSER_CDMS' \
+	'-DENABLE_CONFIGURATION_POLICY' \
+	'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
+	'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
+	'-DENABLE_EGLIMAGE=1' \
+	'-DCLD_VERSION=1' \
+	'-DENABLE_PRINTING=1' \
+	'-DENABLE_MANAGED_USERS=1' \
+	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
+	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
+	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
+	'-DV8_TARGET_ARCH_MIPS64' \
+	'-DCAN_USE_FPU_INSTRUCTIONS' \
+	'-D__mips_hard_float=1' \
+	'-D_MIPS_ARCH_MIPS64R2' \
+	'-DV8_I18N_SUPPORT' \
+	'-DICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC' \
+	'-DU_USING_ICU_NAMESPACE=0' \
+	'-DU_ENABLE_DYLOAD=0' \
+	'-DU_STATIC_IMPLEMENTATION' \
+	'-DUSE_OPENSSL=1' \
+	'-DUSE_OPENSSL_CERTS=1' \
+	'-DNDEBUG' \
+	'-DNVALGRIND' \
+	'-DDYNAMIC_ANNOTATIONS_ENABLED=0'
+
+
+# Include paths placed before CFLAGS/CPPFLAGS
+LOCAL_C_INCLUDES_Release := \
+	$(LOCAL_PATH)/v8 \
+	$(gyp_shared_intermediate_dir) \
+	$(LOCAL_PATH)/third_party/icu/source/i18n \
+	$(LOCAL_PATH)/third_party/icu/source/common
+
+
+# Flags passed to only C++ (and not C) files.
+LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
+	-fno-rtti \
+	-fno-threadsafe-statics \
+	-fvisibility-inlines-hidden \
+	-Wno-deprecated \
+	-std=gnu++11
+
+
+LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
+# Undefine ANDROID for host modules
+LOCAL_CFLAGS += -UANDROID
+LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
+LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
+LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
+### Rules for final target.
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
+
+# Add target alias to "gyp_all_modules" target.
+.PHONY: gyp_all_modules
+gyp_all_modules: v8_tools_gyp_v8_base_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+
+# Alias gyp target name.
+.PHONY: v8_base
+v8_base: v8_tools_gyp_v8_base_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+
+include $(BUILD_HOST_STATIC_LIBRARY)
diff --git a/tools/gyp/v8_base.host.darwin-x86.mk b/tools/gyp/v8_base.host.darwin-x86.mk
index fea0a64..e810a4a 100644
--- a/tools/gyp/v8_base.host.darwin-x86.mk
+++ b/tools/gyp/v8_base.host.darwin-x86.mk
@@ -5,7 +5,6 @@
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_MODULE := v8_tools_gyp_v8_base_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
 LOCAL_MODULE_SUFFIX := .a
-LOCAL_MODULE_TAGS := optional
 LOCAL_IS_HOST_MODULE := true
 LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
@@ -33,23 +32,65 @@
 	v8/src/arguments.cc \
 	v8/src/assembler.cc \
 	v8/src/assert-scope.cc \
+	v8/src/ast-value-factory.cc \
 	v8/src/ast.cc \
+	v8/src/background-parsing-task.cc \
+	v8/src/bailout-reason.cc \
 	v8/src/bignum-dtoa.cc \
 	v8/src/bignum.cc \
 	v8/src/bootstrapper.cc \
 	v8/src/builtins.cc \
 	v8/src/cached-powers.cc \
 	v8/src/checks.cc \
+	v8/src/code-factory.cc \
 	v8/src/code-stubs.cc \
 	v8/src/code-stubs-hydrogen.cc \
 	v8/src/codegen.cc \
 	v8/src/compilation-cache.cc \
+	v8/src/compiler/access-builder.cc \
+	v8/src/compiler/ast-graph-builder.cc \
+	v8/src/compiler/change-lowering.cc \
+	v8/src/compiler/code-generator.cc \
+	v8/src/compiler/common-operator.cc \
+	v8/src/compiler/control-builders.cc \
+	v8/src/compiler/gap-resolver.cc \
+	v8/src/compiler/graph-builder.cc \
+	v8/src/compiler/graph-reducer.cc \
+	v8/src/compiler/graph-replay.cc \
+	v8/src/compiler/graph-visualizer.cc \
+	v8/src/compiler/graph.cc \
+	v8/src/compiler/instruction-selector.cc \
+	v8/src/compiler/instruction.cc \
+	v8/src/compiler/js-builtin-reducer.cc \
+	v8/src/compiler/js-context-specialization.cc \
+	v8/src/compiler/js-generic-lowering.cc \
+	v8/src/compiler/js-graph.cc \
+	v8/src/compiler/js-inlining.cc \
+	v8/src/compiler/js-typed-lowering.cc \
+	v8/src/compiler/linkage.cc \
+	v8/src/compiler/machine-operator-reducer.cc \
+	v8/src/compiler/machine-operator.cc \
+	v8/src/compiler/machine-type.cc \
+	v8/src/compiler/node-cache.cc \
+	v8/src/compiler/node.cc \
+	v8/src/compiler/operator.cc \
+	v8/src/compiler/pipeline.cc \
+	v8/src/compiler/raw-machine-assembler.cc \
+	v8/src/compiler/register-allocator.cc \
+	v8/src/compiler/schedule.cc \
+	v8/src/compiler/scheduler.cc \
+	v8/src/compiler/simplified-lowering.cc \
+	v8/src/compiler/simplified-operator-reducer.cc \
+	v8/src/compiler/simplified-operator.cc \
+	v8/src/compiler/source-position.cc \
+	v8/src/compiler/typer.cc \
+	v8/src/compiler/value-numbering-reducer.cc \
+	v8/src/compiler/verifier.cc \
 	v8/src/compiler.cc \
 	v8/src/contexts.cc \
 	v8/src/conversions.cc \
 	v8/src/counters.cc \
 	v8/src/cpu-profiler.cc \
-	v8/src/cpu.cc \
 	v8/src/data-flow.cc \
 	v8/src/date.cc \
 	v8/src/dateparser.cc \
@@ -68,7 +109,6 @@
 	v8/src/extensions/trigger-failure-extension.cc \
 	v8/src/factory.cc \
 	v8/src/fast-dtoa.cc \
-	v8/src/field-index.cc \
 	v8/src/fixed-dtoa.cc \
 	v8/src/flags.cc \
 	v8/src/frames.cc \
@@ -79,7 +119,15 @@
 	v8/src/handles.cc \
 	v8/src/heap-profiler.cc \
 	v8/src/heap-snapshot-generator.cc \
-	v8/src/heap.cc \
+	v8/src/heap/gc-idle-time-handler.cc \
+	v8/src/heap/gc-tracer.cc \
+	v8/src/heap/heap.cc \
+	v8/src/heap/incremental-marking.cc \
+	v8/src/heap/mark-compact.cc \
+	v8/src/heap/objects-visiting.cc \
+	v8/src/heap/spaces.cc \
+	v8/src/heap/store-buffer.cc \
+	v8/src/heap/sweeper-thread.cc \
 	v8/src/hydrogen-bce.cc \
 	v8/src/hydrogen-bch.cc \
 	v8/src/hydrogen-canonicalize.cc \
@@ -107,15 +155,17 @@
 	v8/src/hydrogen-uint32-analysis.cc \
 	v8/src/i18n.cc \
 	v8/src/icu_util.cc \
-	v8/src/ic.cc \
-	v8/src/incremental-marking.cc \
+	v8/src/ic/access-compiler.cc \
+	v8/src/ic/call-optimization.cc \
+	v8/src/ic/handler-compiler.cc \
+	v8/src/ic/ic-state.cc \
+	v8/src/ic/ic.cc \
+	v8/src/ic/ic-compiler.cc \
 	v8/src/interface.cc \
+	v8/src/interface-descriptors.cc \
 	v8/src/interpreter-irregexp.cc \
 	v8/src/isolate.cc \
 	v8/src/jsregexp.cc \
-	v8/src/libplatform/default-platform.cc \
-	v8/src/libplatform/task-queue.cc \
-	v8/src/libplatform/worker-thread.cc \
 	v8/src/lithium-allocator.cc \
 	v8/src/lithium-codegen.cc \
 	v8/src/lithium.cc \
@@ -123,18 +173,14 @@
 	v8/src/log-utils.cc \
 	v8/src/log.cc \
 	v8/src/lookup.cc \
-	v8/src/mark-compact.cc \
 	v8/src/messages.cc \
 	v8/src/objects-debug.cc \
 	v8/src/objects-printer.cc \
-	v8/src/objects-visiting.cc \
 	v8/src/objects.cc \
 	v8/src/optimizing-compiler-thread.cc \
+	v8/src/ostreams.cc \
 	v8/src/parser.cc \
-	v8/src/platform/time.cc \
-	v8/src/platform/condition-variable.cc \
-	v8/src/platform/mutex.cc \
-	v8/src/platform/semaphore.cc \
+	v8/src/perf-jit.cc \
 	v8/src/preparse-data.cc \
 	v8/src/preparser.cc \
 	v8/src/prettyprinter.cc \
@@ -154,27 +200,25 @@
 	v8/src/scopeinfo.cc \
 	v8/src/scopes.cc \
 	v8/src/serialize.cc \
-	v8/src/snapshot-common.cc \
-	v8/src/spaces.cc \
-	v8/src/store-buffer.cc \
+	v8/src/snapshot-source-sink.cc \
 	v8/src/string-search.cc \
 	v8/src/string-stream.cc \
 	v8/src/strtod.cc \
-	v8/src/stub-cache.cc \
-	v8/src/sweeper-thread.cc \
+	v8/src/ic/stub-cache.cc \
 	v8/src/token.cc \
 	v8/src/transitions.cc \
+	v8/src/type-feedback-vector.cc \
 	v8/src/type-info.cc \
 	v8/src/types.cc \
 	v8/src/typing.cc \
 	v8/src/unicode.cc \
 	v8/src/utils.cc \
-	v8/src/utils/random-number-generator.cc \
 	v8/src/v8.cc \
 	v8/src/v8threads.cc \
 	v8/src/variables.cc \
 	v8/src/version.cc \
 	v8/src/zone.cc \
+	v8/third_party/fdlibm/fdlibm.cc \
 	v8/src/ia32/assembler-ia32.cc \
 	v8/src/ia32/builtins-ia32.cc \
 	v8/src/ia32/code-stubs-ia32.cc \
@@ -185,15 +229,20 @@
 	v8/src/ia32/disasm-ia32.cc \
 	v8/src/ia32/frames-ia32.cc \
 	v8/src/ia32/full-codegen-ia32.cc \
-	v8/src/ia32/ic-ia32.cc \
+	v8/src/ia32/interface-descriptors-ia32.cc \
 	v8/src/ia32/lithium-codegen-ia32.cc \
 	v8/src/ia32/lithium-gap-resolver-ia32.cc \
 	v8/src/ia32/lithium-ia32.cc \
 	v8/src/ia32/macro-assembler-ia32.cc \
 	v8/src/ia32/regexp-macro-assembler-ia32.cc \
-	v8/src/ia32/stub-cache-ia32.cc \
-	v8/src/platform-posix.cc \
-	v8/src/platform-macos.cc
+	v8/src/compiler/ia32/code-generator-ia32.cc \
+	v8/src/compiler/ia32/instruction-selector-ia32.cc \
+	v8/src/compiler/ia32/linkage-ia32.cc \
+	v8/src/ic/ia32/access-compiler-ia32.cc \
+	v8/src/ic/ia32/handler-compiler-ia32.cc \
+	v8/src/ic/ia32/ic-ia32.cc \
+	v8/src/ic/ia32/ic-compiler-ia32.cc \
+	v8/src/ic/ia32/stub-cache-ia32.cc
 
 
 # Flags passed to both C and C++ files.
@@ -201,7 +250,6 @@
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -209,9 +257,18 @@
 	-pipe \
 	-fPIC \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m32 \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-fomit-frame-pointer \
@@ -219,7 +276,6 @@
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -236,16 +292,18 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_IA32' \
 	'-DV8_I18N_SUPPORT' \
-	'-DCAN_USE_VFP_INSTRUCTIONS' \
 	'-DICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC' \
 	'-DU_USING_ICU_NAMESPACE=0' \
+	'-DU_ENABLE_DYLOAD=0' \
 	'-DU_STATIC_IMPLEMENTATION' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
@@ -270,20 +328,19 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -291,6 +348,14 @@
 	-pipe \
 	-fPIC \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m32 \
 	-fno-ident \
 	-fdata-sections \
@@ -303,7 +368,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -320,16 +384,18 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_IA32' \
 	'-DV8_I18N_SUPPORT' \
-	'-DCAN_USE_VFP_INSTRUCTIONS' \
 	'-DICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC' \
 	'-DU_USING_ICU_NAMESPACE=0' \
+	'-DU_ENABLE_DYLOAD=0' \
 	'-DU_STATIC_IMPLEMENTATION' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
@@ -348,43 +414,23 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Release := false
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 # Undefine ANDROID for host modules
 LOCAL_CFLAGS += -UANDROID
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
 LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
 LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
 ### Rules for final target.
-
-LOCAL_LDFLAGS_Debug := \
-	-pthread \
-	-fPIC \
-	-m32
-
-
-LOCAL_LDFLAGS_Release := \
-	-pthread \
-	-fPIC \
-	-m32
-
-
-LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
-
-LOCAL_STATIC_LIBRARIES :=
-
-# Enable grouping to fix circular references
-LOCAL_GROUP_STATIC_LIBRARIES := true
-
-LOCAL_SHARED_LIBRARIES :=
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
 
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
diff --git a/tools/gyp/v8_base.host.darwin-x86_64.mk b/tools/gyp/v8_base.host.darwin-x86_64.mk
index 6d98cf2..72d5670 100644
--- a/tools/gyp/v8_base.host.darwin-x86_64.mk
+++ b/tools/gyp/v8_base.host.darwin-x86_64.mk
@@ -5,7 +5,6 @@
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_MODULE := v8_tools_gyp_v8_base_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
 LOCAL_MODULE_SUFFIX := .a
-LOCAL_MODULE_TAGS := optional
 LOCAL_IS_HOST_MODULE := true
 LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
@@ -33,23 +32,65 @@
 	v8/src/arguments.cc \
 	v8/src/assembler.cc \
 	v8/src/assert-scope.cc \
+	v8/src/ast-value-factory.cc \
 	v8/src/ast.cc \
+	v8/src/background-parsing-task.cc \
+	v8/src/bailout-reason.cc \
 	v8/src/bignum-dtoa.cc \
 	v8/src/bignum.cc \
 	v8/src/bootstrapper.cc \
 	v8/src/builtins.cc \
 	v8/src/cached-powers.cc \
 	v8/src/checks.cc \
+	v8/src/code-factory.cc \
 	v8/src/code-stubs.cc \
 	v8/src/code-stubs-hydrogen.cc \
 	v8/src/codegen.cc \
 	v8/src/compilation-cache.cc \
+	v8/src/compiler/access-builder.cc \
+	v8/src/compiler/ast-graph-builder.cc \
+	v8/src/compiler/change-lowering.cc \
+	v8/src/compiler/code-generator.cc \
+	v8/src/compiler/common-operator.cc \
+	v8/src/compiler/control-builders.cc \
+	v8/src/compiler/gap-resolver.cc \
+	v8/src/compiler/graph-builder.cc \
+	v8/src/compiler/graph-reducer.cc \
+	v8/src/compiler/graph-replay.cc \
+	v8/src/compiler/graph-visualizer.cc \
+	v8/src/compiler/graph.cc \
+	v8/src/compiler/instruction-selector.cc \
+	v8/src/compiler/instruction.cc \
+	v8/src/compiler/js-builtin-reducer.cc \
+	v8/src/compiler/js-context-specialization.cc \
+	v8/src/compiler/js-generic-lowering.cc \
+	v8/src/compiler/js-graph.cc \
+	v8/src/compiler/js-inlining.cc \
+	v8/src/compiler/js-typed-lowering.cc \
+	v8/src/compiler/linkage.cc \
+	v8/src/compiler/machine-operator-reducer.cc \
+	v8/src/compiler/machine-operator.cc \
+	v8/src/compiler/machine-type.cc \
+	v8/src/compiler/node-cache.cc \
+	v8/src/compiler/node.cc \
+	v8/src/compiler/operator.cc \
+	v8/src/compiler/pipeline.cc \
+	v8/src/compiler/raw-machine-assembler.cc \
+	v8/src/compiler/register-allocator.cc \
+	v8/src/compiler/schedule.cc \
+	v8/src/compiler/scheduler.cc \
+	v8/src/compiler/simplified-lowering.cc \
+	v8/src/compiler/simplified-operator-reducer.cc \
+	v8/src/compiler/simplified-operator.cc \
+	v8/src/compiler/source-position.cc \
+	v8/src/compiler/typer.cc \
+	v8/src/compiler/value-numbering-reducer.cc \
+	v8/src/compiler/verifier.cc \
 	v8/src/compiler.cc \
 	v8/src/contexts.cc \
 	v8/src/conversions.cc \
 	v8/src/counters.cc \
 	v8/src/cpu-profiler.cc \
-	v8/src/cpu.cc \
 	v8/src/data-flow.cc \
 	v8/src/date.cc \
 	v8/src/dateparser.cc \
@@ -68,7 +109,6 @@
 	v8/src/extensions/trigger-failure-extension.cc \
 	v8/src/factory.cc \
 	v8/src/fast-dtoa.cc \
-	v8/src/field-index.cc \
 	v8/src/fixed-dtoa.cc \
 	v8/src/flags.cc \
 	v8/src/frames.cc \
@@ -79,7 +119,15 @@
 	v8/src/handles.cc \
 	v8/src/heap-profiler.cc \
 	v8/src/heap-snapshot-generator.cc \
-	v8/src/heap.cc \
+	v8/src/heap/gc-idle-time-handler.cc \
+	v8/src/heap/gc-tracer.cc \
+	v8/src/heap/heap.cc \
+	v8/src/heap/incremental-marking.cc \
+	v8/src/heap/mark-compact.cc \
+	v8/src/heap/objects-visiting.cc \
+	v8/src/heap/spaces.cc \
+	v8/src/heap/store-buffer.cc \
+	v8/src/heap/sweeper-thread.cc \
 	v8/src/hydrogen-bce.cc \
 	v8/src/hydrogen-bch.cc \
 	v8/src/hydrogen-canonicalize.cc \
@@ -107,15 +155,17 @@
 	v8/src/hydrogen-uint32-analysis.cc \
 	v8/src/i18n.cc \
 	v8/src/icu_util.cc \
-	v8/src/ic.cc \
-	v8/src/incremental-marking.cc \
+	v8/src/ic/access-compiler.cc \
+	v8/src/ic/call-optimization.cc \
+	v8/src/ic/handler-compiler.cc \
+	v8/src/ic/ic-state.cc \
+	v8/src/ic/ic.cc \
+	v8/src/ic/ic-compiler.cc \
 	v8/src/interface.cc \
+	v8/src/interface-descriptors.cc \
 	v8/src/interpreter-irregexp.cc \
 	v8/src/isolate.cc \
 	v8/src/jsregexp.cc \
-	v8/src/libplatform/default-platform.cc \
-	v8/src/libplatform/task-queue.cc \
-	v8/src/libplatform/worker-thread.cc \
 	v8/src/lithium-allocator.cc \
 	v8/src/lithium-codegen.cc \
 	v8/src/lithium.cc \
@@ -123,18 +173,14 @@
 	v8/src/log-utils.cc \
 	v8/src/log.cc \
 	v8/src/lookup.cc \
-	v8/src/mark-compact.cc \
 	v8/src/messages.cc \
 	v8/src/objects-debug.cc \
 	v8/src/objects-printer.cc \
-	v8/src/objects-visiting.cc \
 	v8/src/objects.cc \
 	v8/src/optimizing-compiler-thread.cc \
+	v8/src/ostreams.cc \
 	v8/src/parser.cc \
-	v8/src/platform/time.cc \
-	v8/src/platform/condition-variable.cc \
-	v8/src/platform/mutex.cc \
-	v8/src/platform/semaphore.cc \
+	v8/src/perf-jit.cc \
 	v8/src/preparse-data.cc \
 	v8/src/preparser.cc \
 	v8/src/prettyprinter.cc \
@@ -154,27 +200,25 @@
 	v8/src/scopeinfo.cc \
 	v8/src/scopes.cc \
 	v8/src/serialize.cc \
-	v8/src/snapshot-common.cc \
-	v8/src/spaces.cc \
-	v8/src/store-buffer.cc \
+	v8/src/snapshot-source-sink.cc \
 	v8/src/string-search.cc \
 	v8/src/string-stream.cc \
 	v8/src/strtod.cc \
-	v8/src/stub-cache.cc \
-	v8/src/sweeper-thread.cc \
+	v8/src/ic/stub-cache.cc \
 	v8/src/token.cc \
 	v8/src/transitions.cc \
+	v8/src/type-feedback-vector.cc \
 	v8/src/type-info.cc \
 	v8/src/types.cc \
 	v8/src/typing.cc \
 	v8/src/unicode.cc \
 	v8/src/utils.cc \
-	v8/src/utils/random-number-generator.cc \
 	v8/src/v8.cc \
 	v8/src/v8threads.cc \
 	v8/src/variables.cc \
 	v8/src/version.cc \
 	v8/src/zone.cc \
+	v8/third_party/fdlibm/fdlibm.cc \
 	v8/src/x64/assembler-x64.cc \
 	v8/src/x64/builtins-x64.cc \
 	v8/src/x64/code-stubs-x64.cc \
@@ -185,15 +229,20 @@
 	v8/src/x64/disasm-x64.cc \
 	v8/src/x64/frames-x64.cc \
 	v8/src/x64/full-codegen-x64.cc \
-	v8/src/x64/ic-x64.cc \
+	v8/src/x64/interface-descriptors-x64.cc \
 	v8/src/x64/lithium-codegen-x64.cc \
 	v8/src/x64/lithium-gap-resolver-x64.cc \
 	v8/src/x64/lithium-x64.cc \
 	v8/src/x64/macro-assembler-x64.cc \
 	v8/src/x64/regexp-macro-assembler-x64.cc \
-	v8/src/x64/stub-cache-x64.cc \
-	v8/src/platform-posix.cc \
-	v8/src/platform-macos.cc
+	v8/src/compiler/x64/code-generator-x64.cc \
+	v8/src/compiler/x64/instruction-selector-x64.cc \
+	v8/src/compiler/x64/linkage-x64.cc \
+	v8/src/ic/x64/access-compiler-x64.cc \
+	v8/src/ic/x64/handler-compiler-x64.cc \
+	v8/src/ic/x64/ic-x64.cc \
+	v8/src/ic/x64/ic-compiler-x64.cc \
+	v8/src/ic/x64/stub-cache-x64.cc
 
 
 # Flags passed to both C and C++ files.
@@ -201,7 +250,6 @@
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -209,9 +257,18 @@
 	-pipe \
 	-fPIC \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m64 \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-fomit-frame-pointer \
@@ -219,7 +276,6 @@
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -236,16 +292,18 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_X64' \
 	'-DV8_I18N_SUPPORT' \
-	'-DCAN_USE_VFP_INSTRUCTIONS' \
 	'-DICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC' \
 	'-DU_USING_ICU_NAMESPACE=0' \
+	'-DU_ENABLE_DYLOAD=0' \
 	'-DU_STATIC_IMPLEMENTATION' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
@@ -270,20 +328,19 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -291,6 +348,14 @@
 	-pipe \
 	-fPIC \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m64 \
 	-fno-ident \
 	-fdata-sections \
@@ -303,7 +368,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -320,16 +384,18 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_X64' \
 	'-DV8_I18N_SUPPORT' \
-	'-DCAN_USE_VFP_INSTRUCTIONS' \
 	'-DICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC' \
 	'-DU_USING_ICU_NAMESPACE=0' \
+	'-DU_ENABLE_DYLOAD=0' \
 	'-DU_STATIC_IMPLEMENTATION' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
@@ -348,43 +414,23 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Release := false
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 # Undefine ANDROID for host modules
 LOCAL_CFLAGS += -UANDROID
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
 LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
 LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
 ### Rules for final target.
-
-LOCAL_LDFLAGS_Debug := \
-	-pthread \
-	-fPIC \
-	-m64
-
-
-LOCAL_LDFLAGS_Release := \
-	-pthread \
-	-fPIC \
-	-m64
-
-
-LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
-
-LOCAL_STATIC_LIBRARIES :=
-
-# Enable grouping to fix circular references
-LOCAL_GROUP_STATIC_LIBRARIES := true
-
-LOCAL_SHARED_LIBRARIES :=
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
 
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
diff --git a/tools/gyp/v8_base.host.linux-arm.mk b/tools/gyp/v8_base.host.linux-arm.mk
index d55d446..f077609 100644
--- a/tools/gyp/v8_base.host.linux-arm.mk
+++ b/tools/gyp/v8_base.host.linux-arm.mk
@@ -5,7 +5,6 @@
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_MODULE := v8_tools_gyp_v8_base_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
 LOCAL_MODULE_SUFFIX := .a
-LOCAL_MODULE_TAGS := optional
 LOCAL_IS_HOST_MODULE := true
 LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
@@ -33,23 +32,65 @@
 	v8/src/arguments.cc \
 	v8/src/assembler.cc \
 	v8/src/assert-scope.cc \
+	v8/src/ast-value-factory.cc \
 	v8/src/ast.cc \
+	v8/src/background-parsing-task.cc \
+	v8/src/bailout-reason.cc \
 	v8/src/bignum-dtoa.cc \
 	v8/src/bignum.cc \
 	v8/src/bootstrapper.cc \
 	v8/src/builtins.cc \
 	v8/src/cached-powers.cc \
 	v8/src/checks.cc \
+	v8/src/code-factory.cc \
 	v8/src/code-stubs.cc \
 	v8/src/code-stubs-hydrogen.cc \
 	v8/src/codegen.cc \
 	v8/src/compilation-cache.cc \
+	v8/src/compiler/access-builder.cc \
+	v8/src/compiler/ast-graph-builder.cc \
+	v8/src/compiler/change-lowering.cc \
+	v8/src/compiler/code-generator.cc \
+	v8/src/compiler/common-operator.cc \
+	v8/src/compiler/control-builders.cc \
+	v8/src/compiler/gap-resolver.cc \
+	v8/src/compiler/graph-builder.cc \
+	v8/src/compiler/graph-reducer.cc \
+	v8/src/compiler/graph-replay.cc \
+	v8/src/compiler/graph-visualizer.cc \
+	v8/src/compiler/graph.cc \
+	v8/src/compiler/instruction-selector.cc \
+	v8/src/compiler/instruction.cc \
+	v8/src/compiler/js-builtin-reducer.cc \
+	v8/src/compiler/js-context-specialization.cc \
+	v8/src/compiler/js-generic-lowering.cc \
+	v8/src/compiler/js-graph.cc \
+	v8/src/compiler/js-inlining.cc \
+	v8/src/compiler/js-typed-lowering.cc \
+	v8/src/compiler/linkage.cc \
+	v8/src/compiler/machine-operator-reducer.cc \
+	v8/src/compiler/machine-operator.cc \
+	v8/src/compiler/machine-type.cc \
+	v8/src/compiler/node-cache.cc \
+	v8/src/compiler/node.cc \
+	v8/src/compiler/operator.cc \
+	v8/src/compiler/pipeline.cc \
+	v8/src/compiler/raw-machine-assembler.cc \
+	v8/src/compiler/register-allocator.cc \
+	v8/src/compiler/schedule.cc \
+	v8/src/compiler/scheduler.cc \
+	v8/src/compiler/simplified-lowering.cc \
+	v8/src/compiler/simplified-operator-reducer.cc \
+	v8/src/compiler/simplified-operator.cc \
+	v8/src/compiler/source-position.cc \
+	v8/src/compiler/typer.cc \
+	v8/src/compiler/value-numbering-reducer.cc \
+	v8/src/compiler/verifier.cc \
 	v8/src/compiler.cc \
 	v8/src/contexts.cc \
 	v8/src/conversions.cc \
 	v8/src/counters.cc \
 	v8/src/cpu-profiler.cc \
-	v8/src/cpu.cc \
 	v8/src/data-flow.cc \
 	v8/src/date.cc \
 	v8/src/dateparser.cc \
@@ -68,7 +109,6 @@
 	v8/src/extensions/trigger-failure-extension.cc \
 	v8/src/factory.cc \
 	v8/src/fast-dtoa.cc \
-	v8/src/field-index.cc \
 	v8/src/fixed-dtoa.cc \
 	v8/src/flags.cc \
 	v8/src/frames.cc \
@@ -79,7 +119,15 @@
 	v8/src/handles.cc \
 	v8/src/heap-profiler.cc \
 	v8/src/heap-snapshot-generator.cc \
-	v8/src/heap.cc \
+	v8/src/heap/gc-idle-time-handler.cc \
+	v8/src/heap/gc-tracer.cc \
+	v8/src/heap/heap.cc \
+	v8/src/heap/incremental-marking.cc \
+	v8/src/heap/mark-compact.cc \
+	v8/src/heap/objects-visiting.cc \
+	v8/src/heap/spaces.cc \
+	v8/src/heap/store-buffer.cc \
+	v8/src/heap/sweeper-thread.cc \
 	v8/src/hydrogen-bce.cc \
 	v8/src/hydrogen-bch.cc \
 	v8/src/hydrogen-canonicalize.cc \
@@ -107,15 +155,17 @@
 	v8/src/hydrogen-uint32-analysis.cc \
 	v8/src/i18n.cc \
 	v8/src/icu_util.cc \
-	v8/src/ic.cc \
-	v8/src/incremental-marking.cc \
+	v8/src/ic/access-compiler.cc \
+	v8/src/ic/call-optimization.cc \
+	v8/src/ic/handler-compiler.cc \
+	v8/src/ic/ic-state.cc \
+	v8/src/ic/ic.cc \
+	v8/src/ic/ic-compiler.cc \
 	v8/src/interface.cc \
+	v8/src/interface-descriptors.cc \
 	v8/src/interpreter-irregexp.cc \
 	v8/src/isolate.cc \
 	v8/src/jsregexp.cc \
-	v8/src/libplatform/default-platform.cc \
-	v8/src/libplatform/task-queue.cc \
-	v8/src/libplatform/worker-thread.cc \
 	v8/src/lithium-allocator.cc \
 	v8/src/lithium-codegen.cc \
 	v8/src/lithium.cc \
@@ -123,18 +173,14 @@
 	v8/src/log-utils.cc \
 	v8/src/log.cc \
 	v8/src/lookup.cc \
-	v8/src/mark-compact.cc \
 	v8/src/messages.cc \
 	v8/src/objects-debug.cc \
 	v8/src/objects-printer.cc \
-	v8/src/objects-visiting.cc \
 	v8/src/objects.cc \
 	v8/src/optimizing-compiler-thread.cc \
+	v8/src/ostreams.cc \
 	v8/src/parser.cc \
-	v8/src/platform/time.cc \
-	v8/src/platform/condition-variable.cc \
-	v8/src/platform/mutex.cc \
-	v8/src/platform/semaphore.cc \
+	v8/src/perf-jit.cc \
 	v8/src/preparse-data.cc \
 	v8/src/preparser.cc \
 	v8/src/prettyprinter.cc \
@@ -154,27 +200,25 @@
 	v8/src/scopeinfo.cc \
 	v8/src/scopes.cc \
 	v8/src/serialize.cc \
-	v8/src/snapshot-common.cc \
-	v8/src/spaces.cc \
-	v8/src/store-buffer.cc \
+	v8/src/snapshot-source-sink.cc \
 	v8/src/string-search.cc \
 	v8/src/string-stream.cc \
 	v8/src/strtod.cc \
-	v8/src/stub-cache.cc \
-	v8/src/sweeper-thread.cc \
+	v8/src/ic/stub-cache.cc \
 	v8/src/token.cc \
 	v8/src/transitions.cc \
+	v8/src/type-feedback-vector.cc \
 	v8/src/type-info.cc \
 	v8/src/types.cc \
 	v8/src/typing.cc \
 	v8/src/unicode.cc \
 	v8/src/utils.cc \
-	v8/src/utils/random-number-generator.cc \
 	v8/src/v8.cc \
 	v8/src/v8threads.cc \
 	v8/src/variables.cc \
 	v8/src/version.cc \
 	v8/src/zone.cc \
+	v8/third_party/fdlibm/fdlibm.cc \
 	v8/src/arm/assembler-arm.cc \
 	v8/src/arm/builtins-arm.cc \
 	v8/src/arm/code-stubs-arm.cc \
@@ -186,16 +230,21 @@
 	v8/src/arm/disasm-arm.cc \
 	v8/src/arm/frames-arm.cc \
 	v8/src/arm/full-codegen-arm.cc \
-	v8/src/arm/ic-arm.cc \
+	v8/src/arm/interface-descriptors-arm.cc \
 	v8/src/arm/lithium-arm.cc \
 	v8/src/arm/lithium-codegen-arm.cc \
 	v8/src/arm/lithium-gap-resolver-arm.cc \
 	v8/src/arm/macro-assembler-arm.cc \
 	v8/src/arm/regexp-macro-assembler-arm.cc \
 	v8/src/arm/simulator-arm.cc \
-	v8/src/arm/stub-cache-arm.cc \
-	v8/src/platform-posix.cc \
-	v8/src/platform-linux.cc
+	v8/src/compiler/arm/code-generator-arm.cc \
+	v8/src/compiler/arm/instruction-selector-arm.cc \
+	v8/src/compiler/arm/linkage-arm.cc \
+	v8/src/ic/arm/access-compiler-arm.cc \
+	v8/src/ic/arm/handler-compiler-arm.cc \
+	v8/src/ic/arm/ic-arm.cc \
+	v8/src/ic/arm/ic-compiler-arm.cc \
+	v8/src/ic/arm/stub-cache-arm.cc
 
 
 # Flags passed to both C and C++ files.
@@ -203,18 +252,25 @@
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
 	-fvisibility=hidden \
 	-pipe \
 	-fPIC \
-	-Wno-unused-local-typedefs \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m32 \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-fomit-frame-pointer \
@@ -222,7 +278,6 @@
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -239,22 +294,22 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_ARM' \
+	'-DCAN_USE_ARMV7_INSTRUCTIONS' \
 	'-DV8_I18N_SUPPORT' \
-	'-DCAN_USE_VFP_INSTRUCTIONS' \
 	'-DICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC' \
 	'-DU_USING_ICU_NAMESPACE=0' \
+	'-DU_ENABLE_DYLOAD=0' \
 	'-DU_STATIC_IMPLEMENTATION' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
-	'-DARM_TEST' \
-	'-DCAN_USE_ARMV7_INSTRUCTIONS=1' \
-	'-DV8_LIBRT_NOT_AVAILABLE=1' \
 	'-DDYNAMIC_ANNOTATIONS_ENABLED=1' \
 	'-DWTF_USE_DYNAMIC_ANNOTATIONS=1' \
 	'-D_DEBUG' \
@@ -276,28 +331,34 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
 	-fvisibility=hidden \
 	-pipe \
 	-fPIC \
-	-Wno-unused-local-typedefs \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m32 \
 	-fno-ident \
 	-fdata-sections \
@@ -310,7 +371,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -327,22 +387,22 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_ARM' \
+	'-DCAN_USE_ARMV7_INSTRUCTIONS' \
 	'-DV8_I18N_SUPPORT' \
-	'-DCAN_USE_VFP_INSTRUCTIONS' \
 	'-DICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC' \
 	'-DU_USING_ICU_NAMESPACE=0' \
+	'-DU_ENABLE_DYLOAD=0' \
 	'-DU_STATIC_IMPLEMENTATION' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
-	'-DARM_TEST' \
-	'-DCAN_USE_ARMV7_INSTRUCTIONS=1' \
-	'-DV8_LIBRT_NOT_AVAILABLE=1' \
 	'-DNDEBUG' \
 	'-DNVALGRIND' \
 	'-DDYNAMIC_ANNOTATIONS_ENABLED=0'
@@ -358,47 +418,23 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Release := false
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 # Undefine ANDROID for host modules
 LOCAL_CFLAGS += -UANDROID
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
 LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
 LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
 ### Rules for final target.
-
-LOCAL_LDFLAGS_Debug := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-pthread \
-	-fPIC \
-	-m32
-
-
-LOCAL_LDFLAGS_Release := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-pthread \
-	-fPIC \
-	-m32
-
-
-LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
-
-LOCAL_STATIC_LIBRARIES :=
-
-# Enable grouping to fix circular references
-LOCAL_GROUP_STATIC_LIBRARIES := true
-
-LOCAL_SHARED_LIBRARIES :=
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
 
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
diff --git a/tools/gyp/v8_base.host.linux-arm64.mk b/tools/gyp/v8_base.host.linux-arm64.mk
index 3d5cff3..60257cc 100644
--- a/tools/gyp/v8_base.host.linux-arm64.mk
+++ b/tools/gyp/v8_base.host.linux-arm64.mk
@@ -5,7 +5,6 @@
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_MODULE := v8_tools_gyp_v8_base_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
 LOCAL_MODULE_SUFFIX := .a
-LOCAL_MODULE_TAGS := optional
 LOCAL_IS_HOST_MODULE := true
 LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
@@ -33,23 +32,65 @@
 	v8/src/arguments.cc \
 	v8/src/assembler.cc \
 	v8/src/assert-scope.cc \
+	v8/src/ast-value-factory.cc \
 	v8/src/ast.cc \
+	v8/src/background-parsing-task.cc \
+	v8/src/bailout-reason.cc \
 	v8/src/bignum-dtoa.cc \
 	v8/src/bignum.cc \
 	v8/src/bootstrapper.cc \
 	v8/src/builtins.cc \
 	v8/src/cached-powers.cc \
 	v8/src/checks.cc \
+	v8/src/code-factory.cc \
 	v8/src/code-stubs.cc \
 	v8/src/code-stubs-hydrogen.cc \
 	v8/src/codegen.cc \
 	v8/src/compilation-cache.cc \
+	v8/src/compiler/access-builder.cc \
+	v8/src/compiler/ast-graph-builder.cc \
+	v8/src/compiler/change-lowering.cc \
+	v8/src/compiler/code-generator.cc \
+	v8/src/compiler/common-operator.cc \
+	v8/src/compiler/control-builders.cc \
+	v8/src/compiler/gap-resolver.cc \
+	v8/src/compiler/graph-builder.cc \
+	v8/src/compiler/graph-reducer.cc \
+	v8/src/compiler/graph-replay.cc \
+	v8/src/compiler/graph-visualizer.cc \
+	v8/src/compiler/graph.cc \
+	v8/src/compiler/instruction-selector.cc \
+	v8/src/compiler/instruction.cc \
+	v8/src/compiler/js-builtin-reducer.cc \
+	v8/src/compiler/js-context-specialization.cc \
+	v8/src/compiler/js-generic-lowering.cc \
+	v8/src/compiler/js-graph.cc \
+	v8/src/compiler/js-inlining.cc \
+	v8/src/compiler/js-typed-lowering.cc \
+	v8/src/compiler/linkage.cc \
+	v8/src/compiler/machine-operator-reducer.cc \
+	v8/src/compiler/machine-operator.cc \
+	v8/src/compiler/machine-type.cc \
+	v8/src/compiler/node-cache.cc \
+	v8/src/compiler/node.cc \
+	v8/src/compiler/operator.cc \
+	v8/src/compiler/pipeline.cc \
+	v8/src/compiler/raw-machine-assembler.cc \
+	v8/src/compiler/register-allocator.cc \
+	v8/src/compiler/schedule.cc \
+	v8/src/compiler/scheduler.cc \
+	v8/src/compiler/simplified-lowering.cc \
+	v8/src/compiler/simplified-operator-reducer.cc \
+	v8/src/compiler/simplified-operator.cc \
+	v8/src/compiler/source-position.cc \
+	v8/src/compiler/typer.cc \
+	v8/src/compiler/value-numbering-reducer.cc \
+	v8/src/compiler/verifier.cc \
 	v8/src/compiler.cc \
 	v8/src/contexts.cc \
 	v8/src/conversions.cc \
 	v8/src/counters.cc \
 	v8/src/cpu-profiler.cc \
-	v8/src/cpu.cc \
 	v8/src/data-flow.cc \
 	v8/src/date.cc \
 	v8/src/dateparser.cc \
@@ -68,7 +109,6 @@
 	v8/src/extensions/trigger-failure-extension.cc \
 	v8/src/factory.cc \
 	v8/src/fast-dtoa.cc \
-	v8/src/field-index.cc \
 	v8/src/fixed-dtoa.cc \
 	v8/src/flags.cc \
 	v8/src/frames.cc \
@@ -79,7 +119,15 @@
 	v8/src/handles.cc \
 	v8/src/heap-profiler.cc \
 	v8/src/heap-snapshot-generator.cc \
-	v8/src/heap.cc \
+	v8/src/heap/gc-idle-time-handler.cc \
+	v8/src/heap/gc-tracer.cc \
+	v8/src/heap/heap.cc \
+	v8/src/heap/incremental-marking.cc \
+	v8/src/heap/mark-compact.cc \
+	v8/src/heap/objects-visiting.cc \
+	v8/src/heap/spaces.cc \
+	v8/src/heap/store-buffer.cc \
+	v8/src/heap/sweeper-thread.cc \
 	v8/src/hydrogen-bce.cc \
 	v8/src/hydrogen-bch.cc \
 	v8/src/hydrogen-canonicalize.cc \
@@ -107,15 +155,17 @@
 	v8/src/hydrogen-uint32-analysis.cc \
 	v8/src/i18n.cc \
 	v8/src/icu_util.cc \
-	v8/src/ic.cc \
-	v8/src/incremental-marking.cc \
+	v8/src/ic/access-compiler.cc \
+	v8/src/ic/call-optimization.cc \
+	v8/src/ic/handler-compiler.cc \
+	v8/src/ic/ic-state.cc \
+	v8/src/ic/ic.cc \
+	v8/src/ic/ic-compiler.cc \
 	v8/src/interface.cc \
+	v8/src/interface-descriptors.cc \
 	v8/src/interpreter-irregexp.cc \
 	v8/src/isolate.cc \
 	v8/src/jsregexp.cc \
-	v8/src/libplatform/default-platform.cc \
-	v8/src/libplatform/task-queue.cc \
-	v8/src/libplatform/worker-thread.cc \
 	v8/src/lithium-allocator.cc \
 	v8/src/lithium-codegen.cc \
 	v8/src/lithium.cc \
@@ -123,18 +173,14 @@
 	v8/src/log-utils.cc \
 	v8/src/log.cc \
 	v8/src/lookup.cc \
-	v8/src/mark-compact.cc \
 	v8/src/messages.cc \
 	v8/src/objects-debug.cc \
 	v8/src/objects-printer.cc \
-	v8/src/objects-visiting.cc \
 	v8/src/objects.cc \
 	v8/src/optimizing-compiler-thread.cc \
+	v8/src/ostreams.cc \
 	v8/src/parser.cc \
-	v8/src/platform/time.cc \
-	v8/src/platform/condition-variable.cc \
-	v8/src/platform/mutex.cc \
-	v8/src/platform/semaphore.cc \
+	v8/src/perf-jit.cc \
 	v8/src/preparse-data.cc \
 	v8/src/preparser.cc \
 	v8/src/prettyprinter.cc \
@@ -154,27 +200,25 @@
 	v8/src/scopeinfo.cc \
 	v8/src/scopes.cc \
 	v8/src/serialize.cc \
-	v8/src/snapshot-common.cc \
-	v8/src/spaces.cc \
-	v8/src/store-buffer.cc \
+	v8/src/snapshot-source-sink.cc \
 	v8/src/string-search.cc \
 	v8/src/string-stream.cc \
 	v8/src/strtod.cc \
-	v8/src/stub-cache.cc \
-	v8/src/sweeper-thread.cc \
+	v8/src/ic/stub-cache.cc \
 	v8/src/token.cc \
 	v8/src/transitions.cc \
+	v8/src/type-feedback-vector.cc \
 	v8/src/type-info.cc \
 	v8/src/types.cc \
 	v8/src/typing.cc \
 	v8/src/unicode.cc \
 	v8/src/utils.cc \
-	v8/src/utils/random-number-generator.cc \
 	v8/src/v8.cc \
 	v8/src/v8threads.cc \
 	v8/src/variables.cc \
 	v8/src/version.cc \
 	v8/src/zone.cc \
+	v8/third_party/fdlibm/fdlibm.cc \
 	v8/src/arm64/assembler-arm64.cc \
 	v8/src/arm64/builtins-arm64.cc \
 	v8/src/arm64/codegen-arm64.cc \
@@ -182,23 +226,29 @@
 	v8/src/arm64/cpu-arm64.cc \
 	v8/src/arm64/debug-arm64.cc \
 	v8/src/arm64/decoder-arm64.cc \
+	v8/src/arm64/delayed-masm-arm64.cc \
 	v8/src/arm64/deoptimizer-arm64.cc \
 	v8/src/arm64/disasm-arm64.cc \
 	v8/src/arm64/frames-arm64.cc \
 	v8/src/arm64/full-codegen-arm64.cc \
-	v8/src/arm64/ic-arm64.cc \
 	v8/src/arm64/instructions-arm64.cc \
 	v8/src/arm64/instrument-arm64.cc \
+	v8/src/arm64/interface-descriptors-arm64.cc \
 	v8/src/arm64/lithium-arm64.cc \
 	v8/src/arm64/lithium-codegen-arm64.cc \
 	v8/src/arm64/lithium-gap-resolver-arm64.cc \
 	v8/src/arm64/macro-assembler-arm64.cc \
 	v8/src/arm64/regexp-macro-assembler-arm64.cc \
 	v8/src/arm64/simulator-arm64.cc \
-	v8/src/arm64/stub-cache-arm64.cc \
 	v8/src/arm64/utils-arm64.cc \
-	v8/src/platform-posix.cc \
-	v8/src/platform-linux.cc
+	v8/src/compiler/arm64/code-generator-arm64.cc \
+	v8/src/compiler/arm64/instruction-selector-arm64.cc \
+	v8/src/compiler/arm64/linkage-arm64.cc \
+	v8/src/ic/arm64/access-compiler-arm64.cc \
+	v8/src/ic/arm64/handler-compiler-arm64.cc \
+	v8/src/ic/arm64/ic-arm64.cc \
+	v8/src/ic/arm64/ic-compiler-arm64.cc \
+	v8/src/ic/arm64/stub-cache-arm64.cc
 
 
 # Flags passed to both C and C++ files.
@@ -206,25 +256,31 @@
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
 	-fvisibility=hidden \
 	-pipe \
 	-fPIC \
-	-Wno-unused-local-typedefs \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m64 \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-funwind-tables
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -241,20 +297,21 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_ARM64' \
 	'-DV8_I18N_SUPPORT' \
-	'-DCAN_USE_VFP_INSTRUCTIONS' \
 	'-DICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC' \
 	'-DU_USING_ICU_NAMESPACE=0' \
+	'-DU_ENABLE_DYLOAD=0' \
 	'-DU_STATIC_IMPLEMENTATION' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
-	'-DV8_LIBRT_NOT_AVAILABLE=1' \
 	'-DDYNAMIC_ANNOTATIONS_ENABLED=1' \
 	'-DWTF_USE_DYNAMIC_ANNOTATIONS=1' \
 	'-D_DEBUG' \
@@ -276,28 +333,34 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
 	-fvisibility=hidden \
 	-pipe \
 	-fPIC \
-	-Wno-unused-local-typedefs \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m64 \
 	-fno-ident \
 	-fdata-sections \
@@ -309,7 +372,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -326,20 +388,21 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_ARM64' \
 	'-DV8_I18N_SUPPORT' \
-	'-DCAN_USE_VFP_INSTRUCTIONS' \
 	'-DICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC' \
 	'-DU_USING_ICU_NAMESPACE=0' \
+	'-DU_ENABLE_DYLOAD=0' \
 	'-DU_STATIC_IMPLEMENTATION' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
-	'-DV8_LIBRT_NOT_AVAILABLE=1' \
 	'-DNDEBUG' \
 	'-DNVALGRIND' \
 	'-DDYNAMIC_ANNOTATIONS_ENABLED=0'
@@ -355,47 +418,23 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Release := false
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 # Undefine ANDROID for host modules
 LOCAL_CFLAGS += -UANDROID
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
 LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
 LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
 ### Rules for final target.
-
-LOCAL_LDFLAGS_Debug := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-pthread \
-	-fPIC \
-	-m64
-
-
-LOCAL_LDFLAGS_Release := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-pthread \
-	-fPIC \
-	-m64
-
-
-LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
-
-LOCAL_STATIC_LIBRARIES :=
-
-# Enable grouping to fix circular references
-LOCAL_GROUP_STATIC_LIBRARIES := true
-
-LOCAL_SHARED_LIBRARIES :=
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
 
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
diff --git a/tools/gyp/v8_base.host.linux-mips.mk b/tools/gyp/v8_base.host.linux-mips.mk
index a72576c..2d78901 100644
--- a/tools/gyp/v8_base.host.linux-mips.mk
+++ b/tools/gyp/v8_base.host.linux-mips.mk
@@ -5,7 +5,6 @@
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_MODULE := v8_tools_gyp_v8_base_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
 LOCAL_MODULE_SUFFIX := .a
-LOCAL_MODULE_TAGS := optional
 LOCAL_IS_HOST_MODULE := true
 LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
@@ -33,23 +32,65 @@
 	v8/src/arguments.cc \
 	v8/src/assembler.cc \
 	v8/src/assert-scope.cc \
+	v8/src/ast-value-factory.cc \
 	v8/src/ast.cc \
+	v8/src/background-parsing-task.cc \
+	v8/src/bailout-reason.cc \
 	v8/src/bignum-dtoa.cc \
 	v8/src/bignum.cc \
 	v8/src/bootstrapper.cc \
 	v8/src/builtins.cc \
 	v8/src/cached-powers.cc \
 	v8/src/checks.cc \
+	v8/src/code-factory.cc \
 	v8/src/code-stubs.cc \
 	v8/src/code-stubs-hydrogen.cc \
 	v8/src/codegen.cc \
 	v8/src/compilation-cache.cc \
+	v8/src/compiler/access-builder.cc \
+	v8/src/compiler/ast-graph-builder.cc \
+	v8/src/compiler/change-lowering.cc \
+	v8/src/compiler/code-generator.cc \
+	v8/src/compiler/common-operator.cc \
+	v8/src/compiler/control-builders.cc \
+	v8/src/compiler/gap-resolver.cc \
+	v8/src/compiler/graph-builder.cc \
+	v8/src/compiler/graph-reducer.cc \
+	v8/src/compiler/graph-replay.cc \
+	v8/src/compiler/graph-visualizer.cc \
+	v8/src/compiler/graph.cc \
+	v8/src/compiler/instruction-selector.cc \
+	v8/src/compiler/instruction.cc \
+	v8/src/compiler/js-builtin-reducer.cc \
+	v8/src/compiler/js-context-specialization.cc \
+	v8/src/compiler/js-generic-lowering.cc \
+	v8/src/compiler/js-graph.cc \
+	v8/src/compiler/js-inlining.cc \
+	v8/src/compiler/js-typed-lowering.cc \
+	v8/src/compiler/linkage.cc \
+	v8/src/compiler/machine-operator-reducer.cc \
+	v8/src/compiler/machine-operator.cc \
+	v8/src/compiler/machine-type.cc \
+	v8/src/compiler/node-cache.cc \
+	v8/src/compiler/node.cc \
+	v8/src/compiler/operator.cc \
+	v8/src/compiler/pipeline.cc \
+	v8/src/compiler/raw-machine-assembler.cc \
+	v8/src/compiler/register-allocator.cc \
+	v8/src/compiler/schedule.cc \
+	v8/src/compiler/scheduler.cc \
+	v8/src/compiler/simplified-lowering.cc \
+	v8/src/compiler/simplified-operator-reducer.cc \
+	v8/src/compiler/simplified-operator.cc \
+	v8/src/compiler/source-position.cc \
+	v8/src/compiler/typer.cc \
+	v8/src/compiler/value-numbering-reducer.cc \
+	v8/src/compiler/verifier.cc \
 	v8/src/compiler.cc \
 	v8/src/contexts.cc \
 	v8/src/conversions.cc \
 	v8/src/counters.cc \
 	v8/src/cpu-profiler.cc \
-	v8/src/cpu.cc \
 	v8/src/data-flow.cc \
 	v8/src/date.cc \
 	v8/src/dateparser.cc \
@@ -68,7 +109,6 @@
 	v8/src/extensions/trigger-failure-extension.cc \
 	v8/src/factory.cc \
 	v8/src/fast-dtoa.cc \
-	v8/src/field-index.cc \
 	v8/src/fixed-dtoa.cc \
 	v8/src/flags.cc \
 	v8/src/frames.cc \
@@ -79,7 +119,15 @@
 	v8/src/handles.cc \
 	v8/src/heap-profiler.cc \
 	v8/src/heap-snapshot-generator.cc \
-	v8/src/heap.cc \
+	v8/src/heap/gc-idle-time-handler.cc \
+	v8/src/heap/gc-tracer.cc \
+	v8/src/heap/heap.cc \
+	v8/src/heap/incremental-marking.cc \
+	v8/src/heap/mark-compact.cc \
+	v8/src/heap/objects-visiting.cc \
+	v8/src/heap/spaces.cc \
+	v8/src/heap/store-buffer.cc \
+	v8/src/heap/sweeper-thread.cc \
 	v8/src/hydrogen-bce.cc \
 	v8/src/hydrogen-bch.cc \
 	v8/src/hydrogen-canonicalize.cc \
@@ -107,15 +155,17 @@
 	v8/src/hydrogen-uint32-analysis.cc \
 	v8/src/i18n.cc \
 	v8/src/icu_util.cc \
-	v8/src/ic.cc \
-	v8/src/incremental-marking.cc \
+	v8/src/ic/access-compiler.cc \
+	v8/src/ic/call-optimization.cc \
+	v8/src/ic/handler-compiler.cc \
+	v8/src/ic/ic-state.cc \
+	v8/src/ic/ic.cc \
+	v8/src/ic/ic-compiler.cc \
 	v8/src/interface.cc \
+	v8/src/interface-descriptors.cc \
 	v8/src/interpreter-irregexp.cc \
 	v8/src/isolate.cc \
 	v8/src/jsregexp.cc \
-	v8/src/libplatform/default-platform.cc \
-	v8/src/libplatform/task-queue.cc \
-	v8/src/libplatform/worker-thread.cc \
 	v8/src/lithium-allocator.cc \
 	v8/src/lithium-codegen.cc \
 	v8/src/lithium.cc \
@@ -123,18 +173,14 @@
 	v8/src/log-utils.cc \
 	v8/src/log.cc \
 	v8/src/lookup.cc \
-	v8/src/mark-compact.cc \
 	v8/src/messages.cc \
 	v8/src/objects-debug.cc \
 	v8/src/objects-printer.cc \
-	v8/src/objects-visiting.cc \
 	v8/src/objects.cc \
 	v8/src/optimizing-compiler-thread.cc \
+	v8/src/ostreams.cc \
 	v8/src/parser.cc \
-	v8/src/platform/time.cc \
-	v8/src/platform/condition-variable.cc \
-	v8/src/platform/mutex.cc \
-	v8/src/platform/semaphore.cc \
+	v8/src/perf-jit.cc \
 	v8/src/preparse-data.cc \
 	v8/src/preparser.cc \
 	v8/src/prettyprinter.cc \
@@ -154,27 +200,25 @@
 	v8/src/scopeinfo.cc \
 	v8/src/scopes.cc \
 	v8/src/serialize.cc \
-	v8/src/snapshot-common.cc \
-	v8/src/spaces.cc \
-	v8/src/store-buffer.cc \
+	v8/src/snapshot-source-sink.cc \
 	v8/src/string-search.cc \
 	v8/src/string-stream.cc \
 	v8/src/strtod.cc \
-	v8/src/stub-cache.cc \
-	v8/src/sweeper-thread.cc \
+	v8/src/ic/stub-cache.cc \
 	v8/src/token.cc \
 	v8/src/transitions.cc \
+	v8/src/type-feedback-vector.cc \
 	v8/src/type-info.cc \
 	v8/src/types.cc \
 	v8/src/typing.cc \
 	v8/src/unicode.cc \
 	v8/src/utils.cc \
-	v8/src/utils/random-number-generator.cc \
 	v8/src/v8.cc \
 	v8/src/v8threads.cc \
 	v8/src/variables.cc \
 	v8/src/version.cc \
 	v8/src/zone.cc \
+	v8/third_party/fdlibm/fdlibm.cc \
 	v8/src/mips/assembler-mips.cc \
 	v8/src/mips/builtins-mips.cc \
 	v8/src/mips/codegen-mips.cc \
@@ -186,16 +230,18 @@
 	v8/src/mips/disasm-mips.cc \
 	v8/src/mips/frames-mips.cc \
 	v8/src/mips/full-codegen-mips.cc \
-	v8/src/mips/ic-mips.cc \
+	v8/src/mips/interface-descriptors-mips.cc \
 	v8/src/mips/lithium-codegen-mips.cc \
 	v8/src/mips/lithium-gap-resolver-mips.cc \
 	v8/src/mips/lithium-mips.cc \
 	v8/src/mips/macro-assembler-mips.cc \
 	v8/src/mips/regexp-macro-assembler-mips.cc \
 	v8/src/mips/simulator-mips.cc \
-	v8/src/mips/stub-cache-mips.cc \
-	v8/src/platform-posix.cc \
-	v8/src/platform-linux.cc
+	v8/src/ic/mips/access-compiler-mips.cc \
+	v8/src/ic/mips/handler-compiler-mips.cc \
+	v8/src/ic/mips/ic-mips.cc \
+	v8/src/ic/mips/ic-compiler-mips.cc \
+	v8/src/ic/mips/stub-cache-mips.cc
 
 
 # Flags passed to both C and C++ files.
@@ -204,18 +250,25 @@
 	--param=ssp-buffer-size=4 \
 	 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
 	-fvisibility=hidden \
 	-pipe \
 	-fPIC \
-	-Wno-unused-local-typedefs \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m32 \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-fomit-frame-pointer \
@@ -223,7 +276,6 @@
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -240,23 +292,24 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_MIPS' \
 	'-DCAN_USE_FPU_INSTRUCTIONS' \
 	'-D__mips_hard_float=1' \
 	'-D_MIPS_ARCH_MIPS32R2' \
 	'-DV8_I18N_SUPPORT' \
-	'-DCAN_USE_VFP_INSTRUCTIONS' \
 	'-DICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC' \
 	'-DU_USING_ICU_NAMESPACE=0' \
+	'-DU_ENABLE_DYLOAD=0' \
 	'-DU_STATIC_IMPLEMENTATION' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
-	'-DV8_LIBRT_NOT_AVAILABLE=1' \
 	'-DDYNAMIC_ANNOTATIONS_ENABLED=1' \
 	'-DWTF_USE_DYNAMIC_ANNOTATIONS=1' \
 	'-D_DEBUG' \
@@ -278,29 +331,35 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
 	-fvisibility=hidden \
 	-pipe \
 	-fPIC \
-	-Wno-unused-local-typedefs \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m32 \
 	-fno-ident \
 	-fdata-sections \
@@ -313,7 +372,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -330,23 +388,24 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_MIPS' \
 	'-DCAN_USE_FPU_INSTRUCTIONS' \
 	'-D__mips_hard_float=1' \
 	'-D_MIPS_ARCH_MIPS32R2' \
 	'-DV8_I18N_SUPPORT' \
-	'-DCAN_USE_VFP_INSTRUCTIONS' \
 	'-DICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC' \
 	'-DU_USING_ICU_NAMESPACE=0' \
+	'-DU_ENABLE_DYLOAD=0' \
 	'-DU_STATIC_IMPLEMENTATION' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
-	'-DV8_LIBRT_NOT_AVAILABLE=1' \
 	'-DNDEBUG' \
 	'-DNVALGRIND' \
 	'-DDYNAMIC_ANNOTATIONS_ENABLED=0'
@@ -362,47 +421,23 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Release := false
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 # Undefine ANDROID for host modules
 LOCAL_CFLAGS += -UANDROID
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
 LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
 LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
 ### Rules for final target.
-
-LOCAL_LDFLAGS_Debug := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-pthread \
-	-fPIC \
-	-m32
-
-
-LOCAL_LDFLAGS_Release := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-pthread \
-	-fPIC \
-	-m32
-
-
-LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
-
-LOCAL_STATIC_LIBRARIES :=
-
-# Enable grouping to fix circular references
-LOCAL_GROUP_STATIC_LIBRARIES := true
-
-LOCAL_SHARED_LIBRARIES :=
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
 
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
diff --git a/tools/gyp/v8_base.host.linux-mips64.mk b/tools/gyp/v8_base.host.linux-mips64.mk
new file mode 100644
index 0000000..3942ac8
--- /dev/null
+++ b/tools/gyp/v8_base.host.linux-mips64.mk
@@ -0,0 +1,448 @@
+# This file is generated by gyp; do not edit.
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE_CLASS := STATIC_LIBRARIES
+LOCAL_MODULE := v8_tools_gyp_v8_base_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+LOCAL_MODULE_SUFFIX := .a
+LOCAL_IS_HOST_MODULE := true
+LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
+gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
+gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
+
+# Make sure our deps are built first.
+GYP_TARGET_DEPENDENCIES :=
+
+GYP_GENERATED_OUTPUTS :=
+
+# Make sure our deps and generated files are built first.
+LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) $(GYP_GENERATED_OUTPUTS)
+
+LOCAL_CPP_EXTENSION := .cc
+LOCAL_GENERATED_SOURCES :=
+
+GYP_COPIED_SOURCE_ORIGIN_DIRS :=
+
+LOCAL_SRC_FILES := \
+	v8/src/accessors.cc \
+	v8/src/allocation.cc \
+	v8/src/allocation-site-scopes.cc \
+	v8/src/allocation-tracker.cc \
+	v8/src/api.cc \
+	v8/src/arguments.cc \
+	v8/src/assembler.cc \
+	v8/src/assert-scope.cc \
+	v8/src/ast-value-factory.cc \
+	v8/src/ast.cc \
+	v8/src/background-parsing-task.cc \
+	v8/src/bailout-reason.cc \
+	v8/src/bignum-dtoa.cc \
+	v8/src/bignum.cc \
+	v8/src/bootstrapper.cc \
+	v8/src/builtins.cc \
+	v8/src/cached-powers.cc \
+	v8/src/checks.cc \
+	v8/src/code-factory.cc \
+	v8/src/code-stubs.cc \
+	v8/src/code-stubs-hydrogen.cc \
+	v8/src/codegen.cc \
+	v8/src/compilation-cache.cc \
+	v8/src/compiler/access-builder.cc \
+	v8/src/compiler/ast-graph-builder.cc \
+	v8/src/compiler/change-lowering.cc \
+	v8/src/compiler/code-generator.cc \
+	v8/src/compiler/common-operator.cc \
+	v8/src/compiler/control-builders.cc \
+	v8/src/compiler/gap-resolver.cc \
+	v8/src/compiler/graph-builder.cc \
+	v8/src/compiler/graph-reducer.cc \
+	v8/src/compiler/graph-replay.cc \
+	v8/src/compiler/graph-visualizer.cc \
+	v8/src/compiler/graph.cc \
+	v8/src/compiler/instruction-selector.cc \
+	v8/src/compiler/instruction.cc \
+	v8/src/compiler/js-builtin-reducer.cc \
+	v8/src/compiler/js-context-specialization.cc \
+	v8/src/compiler/js-generic-lowering.cc \
+	v8/src/compiler/js-graph.cc \
+	v8/src/compiler/js-inlining.cc \
+	v8/src/compiler/js-typed-lowering.cc \
+	v8/src/compiler/linkage.cc \
+	v8/src/compiler/machine-operator-reducer.cc \
+	v8/src/compiler/machine-operator.cc \
+	v8/src/compiler/machine-type.cc \
+	v8/src/compiler/node-cache.cc \
+	v8/src/compiler/node.cc \
+	v8/src/compiler/operator.cc \
+	v8/src/compiler/pipeline.cc \
+	v8/src/compiler/raw-machine-assembler.cc \
+	v8/src/compiler/register-allocator.cc \
+	v8/src/compiler/schedule.cc \
+	v8/src/compiler/scheduler.cc \
+	v8/src/compiler/simplified-lowering.cc \
+	v8/src/compiler/simplified-operator-reducer.cc \
+	v8/src/compiler/simplified-operator.cc \
+	v8/src/compiler/source-position.cc \
+	v8/src/compiler/typer.cc \
+	v8/src/compiler/value-numbering-reducer.cc \
+	v8/src/compiler/verifier.cc \
+	v8/src/compiler.cc \
+	v8/src/contexts.cc \
+	v8/src/conversions.cc \
+	v8/src/counters.cc \
+	v8/src/cpu-profiler.cc \
+	v8/src/data-flow.cc \
+	v8/src/date.cc \
+	v8/src/dateparser.cc \
+	v8/src/debug.cc \
+	v8/src/deoptimizer.cc \
+	v8/src/disassembler.cc \
+	v8/src/diy-fp.cc \
+	v8/src/dtoa.cc \
+	v8/src/elements-kind.cc \
+	v8/src/elements.cc \
+	v8/src/execution.cc \
+	v8/src/extensions/externalize-string-extension.cc \
+	v8/src/extensions/free-buffer-extension.cc \
+	v8/src/extensions/gc-extension.cc \
+	v8/src/extensions/statistics-extension.cc \
+	v8/src/extensions/trigger-failure-extension.cc \
+	v8/src/factory.cc \
+	v8/src/fast-dtoa.cc \
+	v8/src/fixed-dtoa.cc \
+	v8/src/flags.cc \
+	v8/src/frames.cc \
+	v8/src/full-codegen.cc \
+	v8/src/func-name-inferrer.cc \
+	v8/src/gdb-jit.cc \
+	v8/src/global-handles.cc \
+	v8/src/handles.cc \
+	v8/src/heap-profiler.cc \
+	v8/src/heap-snapshot-generator.cc \
+	v8/src/heap/gc-idle-time-handler.cc \
+	v8/src/heap/gc-tracer.cc \
+	v8/src/heap/heap.cc \
+	v8/src/heap/incremental-marking.cc \
+	v8/src/heap/mark-compact.cc \
+	v8/src/heap/objects-visiting.cc \
+	v8/src/heap/spaces.cc \
+	v8/src/heap/store-buffer.cc \
+	v8/src/heap/sweeper-thread.cc \
+	v8/src/hydrogen-bce.cc \
+	v8/src/hydrogen-bch.cc \
+	v8/src/hydrogen-canonicalize.cc \
+	v8/src/hydrogen-check-elimination.cc \
+	v8/src/hydrogen-dce.cc \
+	v8/src/hydrogen-dehoist.cc \
+	v8/src/hydrogen-environment-liveness.cc \
+	v8/src/hydrogen-escape-analysis.cc \
+	v8/src/hydrogen-instructions.cc \
+	v8/src/hydrogen.cc \
+	v8/src/hydrogen-gvn.cc \
+	v8/src/hydrogen-infer-representation.cc \
+	v8/src/hydrogen-infer-types.cc \
+	v8/src/hydrogen-load-elimination.cc \
+	v8/src/hydrogen-mark-deoptimize.cc \
+	v8/src/hydrogen-mark-unreachable.cc \
+	v8/src/hydrogen-osr.cc \
+	v8/src/hydrogen-range-analysis.cc \
+	v8/src/hydrogen-redundant-phi.cc \
+	v8/src/hydrogen-removable-simulates.cc \
+	v8/src/hydrogen-representation-changes.cc \
+	v8/src/hydrogen-sce.cc \
+	v8/src/hydrogen-store-elimination.cc \
+	v8/src/hydrogen-types.cc \
+	v8/src/hydrogen-uint32-analysis.cc \
+	v8/src/i18n.cc \
+	v8/src/icu_util.cc \
+	v8/src/ic/access-compiler.cc \
+	v8/src/ic/call-optimization.cc \
+	v8/src/ic/handler-compiler.cc \
+	v8/src/ic/ic-state.cc \
+	v8/src/ic/ic.cc \
+	v8/src/ic/ic-compiler.cc \
+	v8/src/interface.cc \
+	v8/src/interface-descriptors.cc \
+	v8/src/interpreter-irregexp.cc \
+	v8/src/isolate.cc \
+	v8/src/jsregexp.cc \
+	v8/src/lithium-allocator.cc \
+	v8/src/lithium-codegen.cc \
+	v8/src/lithium.cc \
+	v8/src/liveedit.cc \
+	v8/src/log-utils.cc \
+	v8/src/log.cc \
+	v8/src/lookup.cc \
+	v8/src/messages.cc \
+	v8/src/objects-debug.cc \
+	v8/src/objects-printer.cc \
+	v8/src/objects.cc \
+	v8/src/optimizing-compiler-thread.cc \
+	v8/src/ostreams.cc \
+	v8/src/parser.cc \
+	v8/src/perf-jit.cc \
+	v8/src/preparse-data.cc \
+	v8/src/preparser.cc \
+	v8/src/prettyprinter.cc \
+	v8/src/profile-generator.cc \
+	v8/src/property.cc \
+	v8/src/regexp-macro-assembler-irregexp.cc \
+	v8/src/regexp-macro-assembler-tracer.cc \
+	v8/src/regexp-macro-assembler.cc \
+	v8/src/regexp-stack.cc \
+	v8/src/rewriter.cc \
+	v8/src/runtime-profiler.cc \
+	v8/src/runtime.cc \
+	v8/src/safepoint-table.cc \
+	v8/src/sampler.cc \
+	v8/src/scanner-character-streams.cc \
+	v8/src/scanner.cc \
+	v8/src/scopeinfo.cc \
+	v8/src/scopes.cc \
+	v8/src/serialize.cc \
+	v8/src/snapshot-source-sink.cc \
+	v8/src/string-search.cc \
+	v8/src/string-stream.cc \
+	v8/src/strtod.cc \
+	v8/src/ic/stub-cache.cc \
+	v8/src/token.cc \
+	v8/src/transitions.cc \
+	v8/src/type-feedback-vector.cc \
+	v8/src/type-info.cc \
+	v8/src/types.cc \
+	v8/src/typing.cc \
+	v8/src/unicode.cc \
+	v8/src/utils.cc \
+	v8/src/v8.cc \
+	v8/src/v8threads.cc \
+	v8/src/variables.cc \
+	v8/src/version.cc \
+	v8/src/zone.cc \
+	v8/third_party/fdlibm/fdlibm.cc \
+	v8/src/mips64/assembler-mips64.cc \
+	v8/src/mips64/builtins-mips64.cc \
+	v8/src/mips64/codegen-mips64.cc \
+	v8/src/mips64/code-stubs-mips64.cc \
+	v8/src/mips64/constants-mips64.cc \
+	v8/src/mips64/cpu-mips64.cc \
+	v8/src/mips64/debug-mips64.cc \
+	v8/src/mips64/deoptimizer-mips64.cc \
+	v8/src/mips64/disasm-mips64.cc \
+	v8/src/mips64/frames-mips64.cc \
+	v8/src/mips64/full-codegen-mips64.cc \
+	v8/src/mips64/interface-descriptors-mips64.cc \
+	v8/src/mips64/lithium-codegen-mips64.cc \
+	v8/src/mips64/lithium-gap-resolver-mips64.cc \
+	v8/src/mips64/lithium-mips64.cc \
+	v8/src/mips64/macro-assembler-mips64.cc \
+	v8/src/mips64/regexp-macro-assembler-mips64.cc \
+	v8/src/mips64/simulator-mips64.cc \
+	v8/src/ic/mips64/access-compiler-mips64.cc \
+	v8/src/ic/mips64/handler-compiler-mips64.cc \
+	v8/src/ic/mips64/ic-mips64.cc \
+	v8/src/ic/mips64/ic-compiler-mips64.cc \
+	v8/src/ic/mips64/stub-cache-mips64.cc
+
+
+# Flags passed to both C and C++ files.
+MY_CFLAGS_Debug := \
+	-fstack-protector \
+	--param=ssp-buffer-size=4 \
+	 \
+	-pthread \
+	-fno-strict-aliasing \
+	-Wno-unused-parameter \
+	-Wno-missing-field-initializers \
+	-fvisibility=hidden \
+	-pipe \
+	-fPIC \
+	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
+	-Os \
+	-g \
+	-gdwarf-4 \
+	-fdata-sections \
+	-ffunction-sections \
+	-fomit-frame-pointer \
+	-funwind-tables
+
+MY_DEFS_Debug := \
+	'-DV8_DEPRECATION_WARNINGS' \
+	'-D_FILE_OFFSET_BITS=64' \
+	'-DNO_TCMALLOC' \
+	'-DDISABLE_NACL' \
+	'-DCHROMIUM_BUILD' \
+	'-DUSE_LIBJPEG_TURBO=1' \
+	'-DENABLE_WEBRTC=1' \
+	'-DUSE_PROPRIETARY_CODECS' \
+	'-DENABLE_BROWSER_CDMS' \
+	'-DENABLE_CONFIGURATION_POLICY' \
+	'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
+	'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
+	'-DENABLE_EGLIMAGE=1' \
+	'-DCLD_VERSION=1' \
+	'-DENABLE_PRINTING=1' \
+	'-DENABLE_MANAGED_USERS=1' \
+	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
+	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
+	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
+	'-DV8_TARGET_ARCH_MIPS64' \
+	'-DCAN_USE_FPU_INSTRUCTIONS' \
+	'-D__mips_hard_float=1' \
+	'-D_MIPS_ARCH_MIPS64R2' \
+	'-DV8_I18N_SUPPORT' \
+	'-DICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC' \
+	'-DU_USING_ICU_NAMESPACE=0' \
+	'-DU_ENABLE_DYLOAD=0' \
+	'-DU_STATIC_IMPLEMENTATION' \
+	'-DUSE_OPENSSL=1' \
+	'-DUSE_OPENSSL_CERTS=1' \
+	'-DDYNAMIC_ANNOTATIONS_ENABLED=1' \
+	'-DWTF_USE_DYNAMIC_ANNOTATIONS=1' \
+	'-D_DEBUG' \
+	'-DENABLE_DISASSEMBLER' \
+	'-DV8_ENABLE_CHECKS' \
+	'-DOBJECT_PRINT' \
+	'-DVERIFY_HEAP' \
+	'-DENABLE_EXTRA_CHECKS' \
+	'-DENABLE_HANDLE_ZAPPING'
+
+
+# Include paths placed before CFLAGS/CPPFLAGS
+LOCAL_C_INCLUDES_Debug := \
+	$(LOCAL_PATH)/v8 \
+	$(gyp_shared_intermediate_dir) \
+	$(LOCAL_PATH)/third_party/icu/source/i18n \
+	$(LOCAL_PATH)/third_party/icu/source/common
+
+
+# Flags passed to only C++ (and not C) files.
+LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
+	-fno-rtti \
+	-fno-threadsafe-statics \
+	-fvisibility-inlines-hidden \
+	-Wno-deprecated \
+	-std=gnu++11
+
+
+# Flags passed to both C and C++ files.
+MY_CFLAGS_Release := \
+	-fstack-protector \
+	--param=ssp-buffer-size=4 \
+	 \
+	-pthread \
+	-fno-strict-aliasing \
+	-Wno-unused-parameter \
+	-Wno-missing-field-initializers \
+	-fvisibility=hidden \
+	-pipe \
+	-fPIC \
+	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
+	-fno-ident \
+	-fdata-sections \
+	-ffunction-sections \
+	-fomit-frame-pointer \
+	-funwind-tables \
+	-fdata-sections \
+	-ffunction-sections \
+	-O2
+
+MY_DEFS_Release := \
+	'-DV8_DEPRECATION_WARNINGS' \
+	'-D_FILE_OFFSET_BITS=64' \
+	'-DNO_TCMALLOC' \
+	'-DDISABLE_NACL' \
+	'-DCHROMIUM_BUILD' \
+	'-DUSE_LIBJPEG_TURBO=1' \
+	'-DENABLE_WEBRTC=1' \
+	'-DUSE_PROPRIETARY_CODECS' \
+	'-DENABLE_BROWSER_CDMS' \
+	'-DENABLE_CONFIGURATION_POLICY' \
+	'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
+	'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
+	'-DENABLE_EGLIMAGE=1' \
+	'-DCLD_VERSION=1' \
+	'-DENABLE_PRINTING=1' \
+	'-DENABLE_MANAGED_USERS=1' \
+	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
+	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
+	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
+	'-DV8_TARGET_ARCH_MIPS64' \
+	'-DCAN_USE_FPU_INSTRUCTIONS' \
+	'-D__mips_hard_float=1' \
+	'-D_MIPS_ARCH_MIPS64R2' \
+	'-DV8_I18N_SUPPORT' \
+	'-DICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC' \
+	'-DU_USING_ICU_NAMESPACE=0' \
+	'-DU_ENABLE_DYLOAD=0' \
+	'-DU_STATIC_IMPLEMENTATION' \
+	'-DUSE_OPENSSL=1' \
+	'-DUSE_OPENSSL_CERTS=1' \
+	'-DNDEBUG' \
+	'-DNVALGRIND' \
+	'-DDYNAMIC_ANNOTATIONS_ENABLED=0'
+
+
+# Include paths placed before CFLAGS/CPPFLAGS
+LOCAL_C_INCLUDES_Release := \
+	$(LOCAL_PATH)/v8 \
+	$(gyp_shared_intermediate_dir) \
+	$(LOCAL_PATH)/third_party/icu/source/i18n \
+	$(LOCAL_PATH)/third_party/icu/source/common
+
+
+# Flags passed to only C++ (and not C) files.
+LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
+	-fno-rtti \
+	-fno-threadsafe-statics \
+	-fvisibility-inlines-hidden \
+	-Wno-deprecated \
+	-std=gnu++11
+
+
+LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
+# Undefine ANDROID for host modules
+LOCAL_CFLAGS += -UANDROID
+LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
+LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
+LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
+### Rules for final target.
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
+
+# Add target alias to "gyp_all_modules" target.
+.PHONY: gyp_all_modules
+gyp_all_modules: v8_tools_gyp_v8_base_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+
+# Alias gyp target name.
+.PHONY: v8_base
+v8_base: v8_tools_gyp_v8_base_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+
+include $(BUILD_HOST_STATIC_LIBRARY)
diff --git a/tools/gyp/v8_base.host.linux-x86.mk b/tools/gyp/v8_base.host.linux-x86.mk
index 32049d0..e810a4a 100644
--- a/tools/gyp/v8_base.host.linux-x86.mk
+++ b/tools/gyp/v8_base.host.linux-x86.mk
@@ -5,7 +5,6 @@
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_MODULE := v8_tools_gyp_v8_base_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
 LOCAL_MODULE_SUFFIX := .a
-LOCAL_MODULE_TAGS := optional
 LOCAL_IS_HOST_MODULE := true
 LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
@@ -33,23 +32,65 @@
 	v8/src/arguments.cc \
 	v8/src/assembler.cc \
 	v8/src/assert-scope.cc \
+	v8/src/ast-value-factory.cc \
 	v8/src/ast.cc \
+	v8/src/background-parsing-task.cc \
+	v8/src/bailout-reason.cc \
 	v8/src/bignum-dtoa.cc \
 	v8/src/bignum.cc \
 	v8/src/bootstrapper.cc \
 	v8/src/builtins.cc \
 	v8/src/cached-powers.cc \
 	v8/src/checks.cc \
+	v8/src/code-factory.cc \
 	v8/src/code-stubs.cc \
 	v8/src/code-stubs-hydrogen.cc \
 	v8/src/codegen.cc \
 	v8/src/compilation-cache.cc \
+	v8/src/compiler/access-builder.cc \
+	v8/src/compiler/ast-graph-builder.cc \
+	v8/src/compiler/change-lowering.cc \
+	v8/src/compiler/code-generator.cc \
+	v8/src/compiler/common-operator.cc \
+	v8/src/compiler/control-builders.cc \
+	v8/src/compiler/gap-resolver.cc \
+	v8/src/compiler/graph-builder.cc \
+	v8/src/compiler/graph-reducer.cc \
+	v8/src/compiler/graph-replay.cc \
+	v8/src/compiler/graph-visualizer.cc \
+	v8/src/compiler/graph.cc \
+	v8/src/compiler/instruction-selector.cc \
+	v8/src/compiler/instruction.cc \
+	v8/src/compiler/js-builtin-reducer.cc \
+	v8/src/compiler/js-context-specialization.cc \
+	v8/src/compiler/js-generic-lowering.cc \
+	v8/src/compiler/js-graph.cc \
+	v8/src/compiler/js-inlining.cc \
+	v8/src/compiler/js-typed-lowering.cc \
+	v8/src/compiler/linkage.cc \
+	v8/src/compiler/machine-operator-reducer.cc \
+	v8/src/compiler/machine-operator.cc \
+	v8/src/compiler/machine-type.cc \
+	v8/src/compiler/node-cache.cc \
+	v8/src/compiler/node.cc \
+	v8/src/compiler/operator.cc \
+	v8/src/compiler/pipeline.cc \
+	v8/src/compiler/raw-machine-assembler.cc \
+	v8/src/compiler/register-allocator.cc \
+	v8/src/compiler/schedule.cc \
+	v8/src/compiler/scheduler.cc \
+	v8/src/compiler/simplified-lowering.cc \
+	v8/src/compiler/simplified-operator-reducer.cc \
+	v8/src/compiler/simplified-operator.cc \
+	v8/src/compiler/source-position.cc \
+	v8/src/compiler/typer.cc \
+	v8/src/compiler/value-numbering-reducer.cc \
+	v8/src/compiler/verifier.cc \
 	v8/src/compiler.cc \
 	v8/src/contexts.cc \
 	v8/src/conversions.cc \
 	v8/src/counters.cc \
 	v8/src/cpu-profiler.cc \
-	v8/src/cpu.cc \
 	v8/src/data-flow.cc \
 	v8/src/date.cc \
 	v8/src/dateparser.cc \
@@ -68,7 +109,6 @@
 	v8/src/extensions/trigger-failure-extension.cc \
 	v8/src/factory.cc \
 	v8/src/fast-dtoa.cc \
-	v8/src/field-index.cc \
 	v8/src/fixed-dtoa.cc \
 	v8/src/flags.cc \
 	v8/src/frames.cc \
@@ -79,7 +119,15 @@
 	v8/src/handles.cc \
 	v8/src/heap-profiler.cc \
 	v8/src/heap-snapshot-generator.cc \
-	v8/src/heap.cc \
+	v8/src/heap/gc-idle-time-handler.cc \
+	v8/src/heap/gc-tracer.cc \
+	v8/src/heap/heap.cc \
+	v8/src/heap/incremental-marking.cc \
+	v8/src/heap/mark-compact.cc \
+	v8/src/heap/objects-visiting.cc \
+	v8/src/heap/spaces.cc \
+	v8/src/heap/store-buffer.cc \
+	v8/src/heap/sweeper-thread.cc \
 	v8/src/hydrogen-bce.cc \
 	v8/src/hydrogen-bch.cc \
 	v8/src/hydrogen-canonicalize.cc \
@@ -107,15 +155,17 @@
 	v8/src/hydrogen-uint32-analysis.cc \
 	v8/src/i18n.cc \
 	v8/src/icu_util.cc \
-	v8/src/ic.cc \
-	v8/src/incremental-marking.cc \
+	v8/src/ic/access-compiler.cc \
+	v8/src/ic/call-optimization.cc \
+	v8/src/ic/handler-compiler.cc \
+	v8/src/ic/ic-state.cc \
+	v8/src/ic/ic.cc \
+	v8/src/ic/ic-compiler.cc \
 	v8/src/interface.cc \
+	v8/src/interface-descriptors.cc \
 	v8/src/interpreter-irregexp.cc \
 	v8/src/isolate.cc \
 	v8/src/jsregexp.cc \
-	v8/src/libplatform/default-platform.cc \
-	v8/src/libplatform/task-queue.cc \
-	v8/src/libplatform/worker-thread.cc \
 	v8/src/lithium-allocator.cc \
 	v8/src/lithium-codegen.cc \
 	v8/src/lithium.cc \
@@ -123,18 +173,14 @@
 	v8/src/log-utils.cc \
 	v8/src/log.cc \
 	v8/src/lookup.cc \
-	v8/src/mark-compact.cc \
 	v8/src/messages.cc \
 	v8/src/objects-debug.cc \
 	v8/src/objects-printer.cc \
-	v8/src/objects-visiting.cc \
 	v8/src/objects.cc \
 	v8/src/optimizing-compiler-thread.cc \
+	v8/src/ostreams.cc \
 	v8/src/parser.cc \
-	v8/src/platform/time.cc \
-	v8/src/platform/condition-variable.cc \
-	v8/src/platform/mutex.cc \
-	v8/src/platform/semaphore.cc \
+	v8/src/perf-jit.cc \
 	v8/src/preparse-data.cc \
 	v8/src/preparser.cc \
 	v8/src/prettyprinter.cc \
@@ -154,27 +200,25 @@
 	v8/src/scopeinfo.cc \
 	v8/src/scopes.cc \
 	v8/src/serialize.cc \
-	v8/src/snapshot-common.cc \
-	v8/src/spaces.cc \
-	v8/src/store-buffer.cc \
+	v8/src/snapshot-source-sink.cc \
 	v8/src/string-search.cc \
 	v8/src/string-stream.cc \
 	v8/src/strtod.cc \
-	v8/src/stub-cache.cc \
-	v8/src/sweeper-thread.cc \
+	v8/src/ic/stub-cache.cc \
 	v8/src/token.cc \
 	v8/src/transitions.cc \
+	v8/src/type-feedback-vector.cc \
 	v8/src/type-info.cc \
 	v8/src/types.cc \
 	v8/src/typing.cc \
 	v8/src/unicode.cc \
 	v8/src/utils.cc \
-	v8/src/utils/random-number-generator.cc \
 	v8/src/v8.cc \
 	v8/src/v8threads.cc \
 	v8/src/variables.cc \
 	v8/src/version.cc \
 	v8/src/zone.cc \
+	v8/third_party/fdlibm/fdlibm.cc \
 	v8/src/ia32/assembler-ia32.cc \
 	v8/src/ia32/builtins-ia32.cc \
 	v8/src/ia32/code-stubs-ia32.cc \
@@ -185,15 +229,20 @@
 	v8/src/ia32/disasm-ia32.cc \
 	v8/src/ia32/frames-ia32.cc \
 	v8/src/ia32/full-codegen-ia32.cc \
-	v8/src/ia32/ic-ia32.cc \
+	v8/src/ia32/interface-descriptors-ia32.cc \
 	v8/src/ia32/lithium-codegen-ia32.cc \
 	v8/src/ia32/lithium-gap-resolver-ia32.cc \
 	v8/src/ia32/lithium-ia32.cc \
 	v8/src/ia32/macro-assembler-ia32.cc \
 	v8/src/ia32/regexp-macro-assembler-ia32.cc \
-	v8/src/ia32/stub-cache-ia32.cc \
-	v8/src/platform-posix.cc \
-	v8/src/platform-linux.cc
+	v8/src/compiler/ia32/code-generator-ia32.cc \
+	v8/src/compiler/ia32/instruction-selector-ia32.cc \
+	v8/src/compiler/ia32/linkage-ia32.cc \
+	v8/src/ic/ia32/access-compiler-ia32.cc \
+	v8/src/ic/ia32/handler-compiler-ia32.cc \
+	v8/src/ic/ia32/ic-ia32.cc \
+	v8/src/ic/ia32/ic-compiler-ia32.cc \
+	v8/src/ic/ia32/stub-cache-ia32.cc
 
 
 # Flags passed to both C and C++ files.
@@ -201,18 +250,25 @@
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
 	-fvisibility=hidden \
 	-pipe \
 	-fPIC \
-	-Wno-unused-local-typedefs \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m32 \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-fomit-frame-pointer \
@@ -220,7 +276,6 @@
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -237,20 +292,21 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_IA32' \
 	'-DV8_I18N_SUPPORT' \
-	'-DCAN_USE_VFP_INSTRUCTIONS' \
 	'-DICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC' \
 	'-DU_USING_ICU_NAMESPACE=0' \
+	'-DU_ENABLE_DYLOAD=0' \
 	'-DU_STATIC_IMPLEMENTATION' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
-	'-DV8_LIBRT_NOT_AVAILABLE=1' \
 	'-DDYNAMIC_ANNOTATIONS_ENABLED=1' \
 	'-DWTF_USE_DYNAMIC_ANNOTATIONS=1' \
 	'-D_DEBUG' \
@@ -272,28 +328,34 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
 	-fvisibility=hidden \
 	-pipe \
 	-fPIC \
-	-Wno-unused-local-typedefs \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m32 \
 	-fno-ident \
 	-fdata-sections \
@@ -306,7 +368,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -323,20 +384,21 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_IA32' \
 	'-DV8_I18N_SUPPORT' \
-	'-DCAN_USE_VFP_INSTRUCTIONS' \
 	'-DICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC' \
 	'-DU_USING_ICU_NAMESPACE=0' \
+	'-DU_ENABLE_DYLOAD=0' \
 	'-DU_STATIC_IMPLEMENTATION' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
-	'-DV8_LIBRT_NOT_AVAILABLE=1' \
 	'-DNDEBUG' \
 	'-DNVALGRIND' \
 	'-DDYNAMIC_ANNOTATIONS_ENABLED=0'
@@ -352,47 +414,23 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Release := false
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 # Undefine ANDROID for host modules
 LOCAL_CFLAGS += -UANDROID
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
 LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
 LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
 ### Rules for final target.
-
-LOCAL_LDFLAGS_Debug := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-pthread \
-	-fPIC \
-	-m32
-
-
-LOCAL_LDFLAGS_Release := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-pthread \
-	-fPIC \
-	-m32
-
-
-LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
-
-LOCAL_STATIC_LIBRARIES :=
-
-# Enable grouping to fix circular references
-LOCAL_GROUP_STATIC_LIBRARIES := true
-
-LOCAL_SHARED_LIBRARIES :=
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
 
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
diff --git a/tools/gyp/v8_base.host.linux-x86_64.mk b/tools/gyp/v8_base.host.linux-x86_64.mk
index 938069d..72d5670 100644
--- a/tools/gyp/v8_base.host.linux-x86_64.mk
+++ b/tools/gyp/v8_base.host.linux-x86_64.mk
@@ -5,7 +5,6 @@
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_MODULE := v8_tools_gyp_v8_base_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
 LOCAL_MODULE_SUFFIX := .a
-LOCAL_MODULE_TAGS := optional
 LOCAL_IS_HOST_MODULE := true
 LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
@@ -33,23 +32,65 @@
 	v8/src/arguments.cc \
 	v8/src/assembler.cc \
 	v8/src/assert-scope.cc \
+	v8/src/ast-value-factory.cc \
 	v8/src/ast.cc \
+	v8/src/background-parsing-task.cc \
+	v8/src/bailout-reason.cc \
 	v8/src/bignum-dtoa.cc \
 	v8/src/bignum.cc \
 	v8/src/bootstrapper.cc \
 	v8/src/builtins.cc \
 	v8/src/cached-powers.cc \
 	v8/src/checks.cc \
+	v8/src/code-factory.cc \
 	v8/src/code-stubs.cc \
 	v8/src/code-stubs-hydrogen.cc \
 	v8/src/codegen.cc \
 	v8/src/compilation-cache.cc \
+	v8/src/compiler/access-builder.cc \
+	v8/src/compiler/ast-graph-builder.cc \
+	v8/src/compiler/change-lowering.cc \
+	v8/src/compiler/code-generator.cc \
+	v8/src/compiler/common-operator.cc \
+	v8/src/compiler/control-builders.cc \
+	v8/src/compiler/gap-resolver.cc \
+	v8/src/compiler/graph-builder.cc \
+	v8/src/compiler/graph-reducer.cc \
+	v8/src/compiler/graph-replay.cc \
+	v8/src/compiler/graph-visualizer.cc \
+	v8/src/compiler/graph.cc \
+	v8/src/compiler/instruction-selector.cc \
+	v8/src/compiler/instruction.cc \
+	v8/src/compiler/js-builtin-reducer.cc \
+	v8/src/compiler/js-context-specialization.cc \
+	v8/src/compiler/js-generic-lowering.cc \
+	v8/src/compiler/js-graph.cc \
+	v8/src/compiler/js-inlining.cc \
+	v8/src/compiler/js-typed-lowering.cc \
+	v8/src/compiler/linkage.cc \
+	v8/src/compiler/machine-operator-reducer.cc \
+	v8/src/compiler/machine-operator.cc \
+	v8/src/compiler/machine-type.cc \
+	v8/src/compiler/node-cache.cc \
+	v8/src/compiler/node.cc \
+	v8/src/compiler/operator.cc \
+	v8/src/compiler/pipeline.cc \
+	v8/src/compiler/raw-machine-assembler.cc \
+	v8/src/compiler/register-allocator.cc \
+	v8/src/compiler/schedule.cc \
+	v8/src/compiler/scheduler.cc \
+	v8/src/compiler/simplified-lowering.cc \
+	v8/src/compiler/simplified-operator-reducer.cc \
+	v8/src/compiler/simplified-operator.cc \
+	v8/src/compiler/source-position.cc \
+	v8/src/compiler/typer.cc \
+	v8/src/compiler/value-numbering-reducer.cc \
+	v8/src/compiler/verifier.cc \
 	v8/src/compiler.cc \
 	v8/src/contexts.cc \
 	v8/src/conversions.cc \
 	v8/src/counters.cc \
 	v8/src/cpu-profiler.cc \
-	v8/src/cpu.cc \
 	v8/src/data-flow.cc \
 	v8/src/date.cc \
 	v8/src/dateparser.cc \
@@ -68,7 +109,6 @@
 	v8/src/extensions/trigger-failure-extension.cc \
 	v8/src/factory.cc \
 	v8/src/fast-dtoa.cc \
-	v8/src/field-index.cc \
 	v8/src/fixed-dtoa.cc \
 	v8/src/flags.cc \
 	v8/src/frames.cc \
@@ -79,7 +119,15 @@
 	v8/src/handles.cc \
 	v8/src/heap-profiler.cc \
 	v8/src/heap-snapshot-generator.cc \
-	v8/src/heap.cc \
+	v8/src/heap/gc-idle-time-handler.cc \
+	v8/src/heap/gc-tracer.cc \
+	v8/src/heap/heap.cc \
+	v8/src/heap/incremental-marking.cc \
+	v8/src/heap/mark-compact.cc \
+	v8/src/heap/objects-visiting.cc \
+	v8/src/heap/spaces.cc \
+	v8/src/heap/store-buffer.cc \
+	v8/src/heap/sweeper-thread.cc \
 	v8/src/hydrogen-bce.cc \
 	v8/src/hydrogen-bch.cc \
 	v8/src/hydrogen-canonicalize.cc \
@@ -107,15 +155,17 @@
 	v8/src/hydrogen-uint32-analysis.cc \
 	v8/src/i18n.cc \
 	v8/src/icu_util.cc \
-	v8/src/ic.cc \
-	v8/src/incremental-marking.cc \
+	v8/src/ic/access-compiler.cc \
+	v8/src/ic/call-optimization.cc \
+	v8/src/ic/handler-compiler.cc \
+	v8/src/ic/ic-state.cc \
+	v8/src/ic/ic.cc \
+	v8/src/ic/ic-compiler.cc \
 	v8/src/interface.cc \
+	v8/src/interface-descriptors.cc \
 	v8/src/interpreter-irregexp.cc \
 	v8/src/isolate.cc \
 	v8/src/jsregexp.cc \
-	v8/src/libplatform/default-platform.cc \
-	v8/src/libplatform/task-queue.cc \
-	v8/src/libplatform/worker-thread.cc \
 	v8/src/lithium-allocator.cc \
 	v8/src/lithium-codegen.cc \
 	v8/src/lithium.cc \
@@ -123,18 +173,14 @@
 	v8/src/log-utils.cc \
 	v8/src/log.cc \
 	v8/src/lookup.cc \
-	v8/src/mark-compact.cc \
 	v8/src/messages.cc \
 	v8/src/objects-debug.cc \
 	v8/src/objects-printer.cc \
-	v8/src/objects-visiting.cc \
 	v8/src/objects.cc \
 	v8/src/optimizing-compiler-thread.cc \
+	v8/src/ostreams.cc \
 	v8/src/parser.cc \
-	v8/src/platform/time.cc \
-	v8/src/platform/condition-variable.cc \
-	v8/src/platform/mutex.cc \
-	v8/src/platform/semaphore.cc \
+	v8/src/perf-jit.cc \
 	v8/src/preparse-data.cc \
 	v8/src/preparser.cc \
 	v8/src/prettyprinter.cc \
@@ -154,27 +200,25 @@
 	v8/src/scopeinfo.cc \
 	v8/src/scopes.cc \
 	v8/src/serialize.cc \
-	v8/src/snapshot-common.cc \
-	v8/src/spaces.cc \
-	v8/src/store-buffer.cc \
+	v8/src/snapshot-source-sink.cc \
 	v8/src/string-search.cc \
 	v8/src/string-stream.cc \
 	v8/src/strtod.cc \
-	v8/src/stub-cache.cc \
-	v8/src/sweeper-thread.cc \
+	v8/src/ic/stub-cache.cc \
 	v8/src/token.cc \
 	v8/src/transitions.cc \
+	v8/src/type-feedback-vector.cc \
 	v8/src/type-info.cc \
 	v8/src/types.cc \
 	v8/src/typing.cc \
 	v8/src/unicode.cc \
 	v8/src/utils.cc \
-	v8/src/utils/random-number-generator.cc \
 	v8/src/v8.cc \
 	v8/src/v8threads.cc \
 	v8/src/variables.cc \
 	v8/src/version.cc \
 	v8/src/zone.cc \
+	v8/third_party/fdlibm/fdlibm.cc \
 	v8/src/x64/assembler-x64.cc \
 	v8/src/x64/builtins-x64.cc \
 	v8/src/x64/code-stubs-x64.cc \
@@ -185,15 +229,20 @@
 	v8/src/x64/disasm-x64.cc \
 	v8/src/x64/frames-x64.cc \
 	v8/src/x64/full-codegen-x64.cc \
-	v8/src/x64/ic-x64.cc \
+	v8/src/x64/interface-descriptors-x64.cc \
 	v8/src/x64/lithium-codegen-x64.cc \
 	v8/src/x64/lithium-gap-resolver-x64.cc \
 	v8/src/x64/lithium-x64.cc \
 	v8/src/x64/macro-assembler-x64.cc \
 	v8/src/x64/regexp-macro-assembler-x64.cc \
-	v8/src/x64/stub-cache-x64.cc \
-	v8/src/platform-posix.cc \
-	v8/src/platform-linux.cc
+	v8/src/compiler/x64/code-generator-x64.cc \
+	v8/src/compiler/x64/instruction-selector-x64.cc \
+	v8/src/compiler/x64/linkage-x64.cc \
+	v8/src/ic/x64/access-compiler-x64.cc \
+	v8/src/ic/x64/handler-compiler-x64.cc \
+	v8/src/ic/x64/ic-x64.cc \
+	v8/src/ic/x64/ic-compiler-x64.cc \
+	v8/src/ic/x64/stub-cache-x64.cc
 
 
 # Flags passed to both C and C++ files.
@@ -201,18 +250,25 @@
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
 	-fvisibility=hidden \
 	-pipe \
 	-fPIC \
-	-Wno-unused-local-typedefs \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m64 \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-fomit-frame-pointer \
@@ -220,7 +276,6 @@
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -237,20 +292,21 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_X64' \
 	'-DV8_I18N_SUPPORT' \
-	'-DCAN_USE_VFP_INSTRUCTIONS' \
 	'-DICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC' \
 	'-DU_USING_ICU_NAMESPACE=0' \
+	'-DU_ENABLE_DYLOAD=0' \
 	'-DU_STATIC_IMPLEMENTATION' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
-	'-DV8_LIBRT_NOT_AVAILABLE=1' \
 	'-DDYNAMIC_ANNOTATIONS_ENABLED=1' \
 	'-DWTF_USE_DYNAMIC_ANNOTATIONS=1' \
 	'-D_DEBUG' \
@@ -272,28 +328,34 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
 	-fvisibility=hidden \
 	-pipe \
 	-fPIC \
-	-Wno-unused-local-typedefs \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m64 \
 	-fno-ident \
 	-fdata-sections \
@@ -306,7 +368,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -323,20 +384,21 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_X64' \
 	'-DV8_I18N_SUPPORT' \
-	'-DCAN_USE_VFP_INSTRUCTIONS' \
 	'-DICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC' \
 	'-DU_USING_ICU_NAMESPACE=0' \
+	'-DU_ENABLE_DYLOAD=0' \
 	'-DU_STATIC_IMPLEMENTATION' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
-	'-DV8_LIBRT_NOT_AVAILABLE=1' \
 	'-DNDEBUG' \
 	'-DNVALGRIND' \
 	'-DDYNAMIC_ANNOTATIONS_ENABLED=0'
@@ -352,47 +414,23 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Release := false
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 # Undefine ANDROID for host modules
 LOCAL_CFLAGS += -UANDROID
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
 LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
 LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
 ### Rules for final target.
-
-LOCAL_LDFLAGS_Debug := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-pthread \
-	-fPIC \
-	-m64
-
-
-LOCAL_LDFLAGS_Release := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-pthread \
-	-fPIC \
-	-m64
-
-
-LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
-
-LOCAL_STATIC_LIBRARIES :=
-
-# Enable grouping to fix circular references
-LOCAL_GROUP_STATIC_LIBRARIES := true
-
-LOCAL_SHARED_LIBRARIES :=
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
 
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
diff --git a/tools/gyp/v8_base.target.darwin-arm.mk b/tools/gyp/v8_base.target.darwin-arm.mk
index 1f8587b..ed43608 100644
--- a/tools/gyp/v8_base.target.darwin-arm.mk
+++ b/tools/gyp/v8_base.target.darwin-arm.mk
@@ -5,7 +5,6 @@
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_MODULE := v8_tools_gyp_v8_base_gyp
 LOCAL_MODULE_SUFFIX := .a
-LOCAL_MODULE_TAGS := optional
 LOCAL_MODULE_TARGET_ARCH := $(TARGET_$(GYP_VAR_PREFIX)ARCH)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_VAR_PREFIX))
 gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
@@ -34,23 +33,65 @@
 	v8/src/arguments.cc \
 	v8/src/assembler.cc \
 	v8/src/assert-scope.cc \
+	v8/src/ast-value-factory.cc \
 	v8/src/ast.cc \
+	v8/src/background-parsing-task.cc \
+	v8/src/bailout-reason.cc \
 	v8/src/bignum-dtoa.cc \
 	v8/src/bignum.cc \
 	v8/src/bootstrapper.cc \
 	v8/src/builtins.cc \
 	v8/src/cached-powers.cc \
 	v8/src/checks.cc \
+	v8/src/code-factory.cc \
 	v8/src/code-stubs.cc \
 	v8/src/code-stubs-hydrogen.cc \
 	v8/src/codegen.cc \
 	v8/src/compilation-cache.cc \
+	v8/src/compiler/access-builder.cc \
+	v8/src/compiler/ast-graph-builder.cc \
+	v8/src/compiler/change-lowering.cc \
+	v8/src/compiler/code-generator.cc \
+	v8/src/compiler/common-operator.cc \
+	v8/src/compiler/control-builders.cc \
+	v8/src/compiler/gap-resolver.cc \
+	v8/src/compiler/graph-builder.cc \
+	v8/src/compiler/graph-reducer.cc \
+	v8/src/compiler/graph-replay.cc \
+	v8/src/compiler/graph-visualizer.cc \
+	v8/src/compiler/graph.cc \
+	v8/src/compiler/instruction-selector.cc \
+	v8/src/compiler/instruction.cc \
+	v8/src/compiler/js-builtin-reducer.cc \
+	v8/src/compiler/js-context-specialization.cc \
+	v8/src/compiler/js-generic-lowering.cc \
+	v8/src/compiler/js-graph.cc \
+	v8/src/compiler/js-inlining.cc \
+	v8/src/compiler/js-typed-lowering.cc \
+	v8/src/compiler/linkage.cc \
+	v8/src/compiler/machine-operator-reducer.cc \
+	v8/src/compiler/machine-operator.cc \
+	v8/src/compiler/machine-type.cc \
+	v8/src/compiler/node-cache.cc \
+	v8/src/compiler/node.cc \
+	v8/src/compiler/operator.cc \
+	v8/src/compiler/pipeline.cc \
+	v8/src/compiler/raw-machine-assembler.cc \
+	v8/src/compiler/register-allocator.cc \
+	v8/src/compiler/schedule.cc \
+	v8/src/compiler/scheduler.cc \
+	v8/src/compiler/simplified-lowering.cc \
+	v8/src/compiler/simplified-operator-reducer.cc \
+	v8/src/compiler/simplified-operator.cc \
+	v8/src/compiler/source-position.cc \
+	v8/src/compiler/typer.cc \
+	v8/src/compiler/value-numbering-reducer.cc \
+	v8/src/compiler/verifier.cc \
 	v8/src/compiler.cc \
 	v8/src/contexts.cc \
 	v8/src/conversions.cc \
 	v8/src/counters.cc \
 	v8/src/cpu-profiler.cc \
-	v8/src/cpu.cc \
 	v8/src/data-flow.cc \
 	v8/src/date.cc \
 	v8/src/dateparser.cc \
@@ -69,7 +110,6 @@
 	v8/src/extensions/trigger-failure-extension.cc \
 	v8/src/factory.cc \
 	v8/src/fast-dtoa.cc \
-	v8/src/field-index.cc \
 	v8/src/fixed-dtoa.cc \
 	v8/src/flags.cc \
 	v8/src/frames.cc \
@@ -80,7 +120,15 @@
 	v8/src/handles.cc \
 	v8/src/heap-profiler.cc \
 	v8/src/heap-snapshot-generator.cc \
-	v8/src/heap.cc \
+	v8/src/heap/gc-idle-time-handler.cc \
+	v8/src/heap/gc-tracer.cc \
+	v8/src/heap/heap.cc \
+	v8/src/heap/incremental-marking.cc \
+	v8/src/heap/mark-compact.cc \
+	v8/src/heap/objects-visiting.cc \
+	v8/src/heap/spaces.cc \
+	v8/src/heap/store-buffer.cc \
+	v8/src/heap/sweeper-thread.cc \
 	v8/src/hydrogen-bce.cc \
 	v8/src/hydrogen-bch.cc \
 	v8/src/hydrogen-canonicalize.cc \
@@ -108,15 +156,17 @@
 	v8/src/hydrogen-uint32-analysis.cc \
 	v8/src/i18n.cc \
 	v8/src/icu_util.cc \
-	v8/src/ic.cc \
-	v8/src/incremental-marking.cc \
+	v8/src/ic/access-compiler.cc \
+	v8/src/ic/call-optimization.cc \
+	v8/src/ic/handler-compiler.cc \
+	v8/src/ic/ic-state.cc \
+	v8/src/ic/ic.cc \
+	v8/src/ic/ic-compiler.cc \
 	v8/src/interface.cc \
+	v8/src/interface-descriptors.cc \
 	v8/src/interpreter-irregexp.cc \
 	v8/src/isolate.cc \
 	v8/src/jsregexp.cc \
-	v8/src/libplatform/default-platform.cc \
-	v8/src/libplatform/task-queue.cc \
-	v8/src/libplatform/worker-thread.cc \
 	v8/src/lithium-allocator.cc \
 	v8/src/lithium-codegen.cc \
 	v8/src/lithium.cc \
@@ -124,18 +174,14 @@
 	v8/src/log-utils.cc \
 	v8/src/log.cc \
 	v8/src/lookup.cc \
-	v8/src/mark-compact.cc \
 	v8/src/messages.cc \
 	v8/src/objects-debug.cc \
 	v8/src/objects-printer.cc \
-	v8/src/objects-visiting.cc \
 	v8/src/objects.cc \
 	v8/src/optimizing-compiler-thread.cc \
+	v8/src/ostreams.cc \
 	v8/src/parser.cc \
-	v8/src/platform/time.cc \
-	v8/src/platform/condition-variable.cc \
-	v8/src/platform/mutex.cc \
-	v8/src/platform/semaphore.cc \
+	v8/src/perf-jit.cc \
 	v8/src/preparse-data.cc \
 	v8/src/preparser.cc \
 	v8/src/prettyprinter.cc \
@@ -155,27 +201,25 @@
 	v8/src/scopeinfo.cc \
 	v8/src/scopes.cc \
 	v8/src/serialize.cc \
-	v8/src/snapshot-common.cc \
-	v8/src/spaces.cc \
-	v8/src/store-buffer.cc \
+	v8/src/snapshot-source-sink.cc \
 	v8/src/string-search.cc \
 	v8/src/string-stream.cc \
 	v8/src/strtod.cc \
-	v8/src/stub-cache.cc \
-	v8/src/sweeper-thread.cc \
+	v8/src/ic/stub-cache.cc \
 	v8/src/token.cc \
 	v8/src/transitions.cc \
+	v8/src/type-feedback-vector.cc \
 	v8/src/type-info.cc \
 	v8/src/types.cc \
 	v8/src/typing.cc \
 	v8/src/unicode.cc \
 	v8/src/utils.cc \
-	v8/src/utils/random-number-generator.cc \
 	v8/src/v8.cc \
 	v8/src/v8threads.cc \
 	v8/src/variables.cc \
 	v8/src/version.cc \
 	v8/src/zone.cc \
+	v8/third_party/fdlibm/fdlibm.cc \
 	v8/src/arm/assembler-arm.cc \
 	v8/src/arm/builtins-arm.cc \
 	v8/src/arm/code-stubs-arm.cc \
@@ -187,23 +231,27 @@
 	v8/src/arm/disasm-arm.cc \
 	v8/src/arm/frames-arm.cc \
 	v8/src/arm/full-codegen-arm.cc \
-	v8/src/arm/ic-arm.cc \
+	v8/src/arm/interface-descriptors-arm.cc \
 	v8/src/arm/lithium-arm.cc \
 	v8/src/arm/lithium-codegen-arm.cc \
 	v8/src/arm/lithium-gap-resolver-arm.cc \
 	v8/src/arm/macro-assembler-arm.cc \
 	v8/src/arm/regexp-macro-assembler-arm.cc \
 	v8/src/arm/simulator-arm.cc \
-	v8/src/arm/stub-cache-arm.cc \
-	v8/src/platform-posix.cc \
-	v8/src/platform-linux.cc
+	v8/src/compiler/arm/code-generator-arm.cc \
+	v8/src/compiler/arm/instruction-selector-arm.cc \
+	v8/src/compiler/arm/linkage-arm.cc \
+	v8/src/ic/arm/access-compiler-arm.cc \
+	v8/src/ic/arm/handler-compiler-arm.cc \
+	v8/src/ic/arm/ic-arm.cc \
+	v8/src/ic/arm/ic-compiler-arm.cc \
+	v8/src/ic/arm/stub-cache-arm.cc
 
 
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Debug := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -213,13 +261,13 @@
 	-Wno-unused-local-typedefs \
 	-Wno-format \
 	-fno-tree-sra \
+	-fno-caller-saves \
+	-Wno-psabi \
 	-fno-partial-inlining \
 	-fno-early-inlining \
 	-fno-tree-copy-prop \
 	-fno-tree-loop-optimize \
 	-fno-move-loop-invariants \
-	-fno-caller-saves \
-	-Wno-psabi \
 	-ffunction-sections \
 	-funwind-tables \
 	-g \
@@ -238,6 +286,7 @@
 	-Wno-sequence-point \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-fomit-frame-pointer \
@@ -245,7 +294,6 @@
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -262,16 +310,19 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_ARM' \
+	'-DCAN_USE_ARMV7_INSTRUCTIONS' \
 	'-DV8_I18N_SUPPORT' \
-	'-DCAN_USE_VFP_INSTRUCTIONS' \
 	'-DICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC' \
 	'-DU_USING_ICU_NAMESPACE=0' \
+	'-DU_ENABLE_DYLOAD=0' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
 	'-DANDROID' \
@@ -279,8 +330,6 @@
 	'-DUSE_STLPORT=1' \
 	'-D_STLP_USE_PTR_SPECIALIZATIONS=1' \
 	'-DCHROME_BUILD_ID=""' \
-	'-DARM_TEST' \
-	'-DCAN_USE_ARMV7_INSTRUCTIONS=1' \
 	'-DDYNAMIC_ANNOTATIONS_ENABLED=1' \
 	'-DWTF_USE_DYNAMIC_ANNOTATIONS=1' \
 	'-D_DEBUG' \
@@ -307,23 +356,24 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
 	-Wno-deprecated \
 	-Wno-abi \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
 	-Wno-non-virtual-dtor \
 	-Wno-sign-promo \
 	-Wno-non-virtual-dtor
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -333,13 +383,13 @@
 	-Wno-unused-local-typedefs \
 	-Wno-format \
 	-fno-tree-sra \
+	-fno-caller-saves \
+	-Wno-psabi \
 	-fno-partial-inlining \
 	-fno-early-inlining \
 	-fno-tree-copy-prop \
 	-fno-tree-loop-optimize \
 	-fno-move-loop-invariants \
-	-fno-caller-saves \
-	-Wno-psabi \
 	-ffunction-sections \
 	-funwind-tables \
 	-g \
@@ -367,7 +417,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -384,16 +433,19 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_ARM' \
+	'-DCAN_USE_ARMV7_INSTRUCTIONS' \
 	'-DV8_I18N_SUPPORT' \
-	'-DCAN_USE_VFP_INSTRUCTIONS' \
 	'-DICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC' \
 	'-DU_USING_ICU_NAMESPACE=0' \
+	'-DU_ENABLE_DYLOAD=0' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
 	'-DANDROID' \
@@ -401,8 +453,6 @@
 	'-DUSE_STLPORT=1' \
 	'-D_STLP_USE_PTR_SPECIALIZATIONS=1' \
 	'-DCHROME_BUILD_ID=""' \
-	'-DARM_TEST' \
-	'-DCAN_USE_ARMV7_INSTRUCTIONS=1' \
 	'-DNDEBUG' \
 	'-DNVALGRIND' \
 	'-DDYNAMIC_ANNOTATIONS_ENABLED=0'
@@ -423,73 +473,33 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
 	-Wno-deprecated \
 	-Wno-abi \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
 	-Wno-non-virtual-dtor \
 	-Wno-sign-promo \
 	-Wno-non-virtual-dtor
 
 
-LOCAL_FDO_SUPPORT_Release := true
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
 LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
 LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
 ### Rules for final target.
 
-LOCAL_LDFLAGS_Debug := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-Wl,--fatal-warnings \
-	-Wl,-z,noexecstack \
-	-fPIC \
-	-Wl,-z,relro \
-	-Wl,-z,now \
-	-fuse-ld=gold \
-	-nostdlib \
-	-Wl,--no-undefined \
-	-Wl,--exclude-libs=ALL \
-	-Wl,--icf=safe \
-	-Wl,--warn-shared-textrel \
-	-Wl,-O1 \
-	-Wl,--as-needed
-
-
-LOCAL_LDFLAGS_Release := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-Wl,--fatal-warnings \
-	-Wl,-z,noexecstack \
-	-fPIC \
-	-Wl,-z,relro \
-	-Wl,-z,now \
-	-fuse-ld=gold \
-	-nostdlib \
-	-Wl,--no-undefined \
-	-Wl,--exclude-libs=ALL \
-	-Wl,--icf=safe \
-	-Wl,-O1 \
-	-Wl,--as-needed \
-	-Wl,--gc-sections \
-	-Wl,--warn-shared-textrel
-
-
-LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
-
-LOCAL_STATIC_LIBRARIES :=
-
-# Enable grouping to fix circular references
-LOCAL_GROUP_STATIC_LIBRARIES := true
-
 LOCAL_SHARED_LIBRARIES := \
 	libstlport \
 	libdl
 
+### Set directly by aosp_build_settings.
+LOCAL_FDO_SUPPORT := true
+
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
 gyp_all_modules: v8_tools_gyp_v8_base_gyp
diff --git a/tools/gyp/v8_base.target.darwin-arm64.mk b/tools/gyp/v8_base.target.darwin-arm64.mk
index 21135fe..03bdb6f 100644
--- a/tools/gyp/v8_base.target.darwin-arm64.mk
+++ b/tools/gyp/v8_base.target.darwin-arm64.mk
@@ -5,7 +5,6 @@
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_MODULE := v8_tools_gyp_v8_base_gyp
 LOCAL_MODULE_SUFFIX := .a
-LOCAL_MODULE_TAGS := optional
 LOCAL_MODULE_TARGET_ARCH := $(TARGET_$(GYP_VAR_PREFIX)ARCH)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_VAR_PREFIX))
 gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
@@ -34,23 +33,65 @@
 	v8/src/arguments.cc \
 	v8/src/assembler.cc \
 	v8/src/assert-scope.cc \
+	v8/src/ast-value-factory.cc \
 	v8/src/ast.cc \
+	v8/src/background-parsing-task.cc \
+	v8/src/bailout-reason.cc \
 	v8/src/bignum-dtoa.cc \
 	v8/src/bignum.cc \
 	v8/src/bootstrapper.cc \
 	v8/src/builtins.cc \
 	v8/src/cached-powers.cc \
 	v8/src/checks.cc \
+	v8/src/code-factory.cc \
 	v8/src/code-stubs.cc \
 	v8/src/code-stubs-hydrogen.cc \
 	v8/src/codegen.cc \
 	v8/src/compilation-cache.cc \
+	v8/src/compiler/access-builder.cc \
+	v8/src/compiler/ast-graph-builder.cc \
+	v8/src/compiler/change-lowering.cc \
+	v8/src/compiler/code-generator.cc \
+	v8/src/compiler/common-operator.cc \
+	v8/src/compiler/control-builders.cc \
+	v8/src/compiler/gap-resolver.cc \
+	v8/src/compiler/graph-builder.cc \
+	v8/src/compiler/graph-reducer.cc \
+	v8/src/compiler/graph-replay.cc \
+	v8/src/compiler/graph-visualizer.cc \
+	v8/src/compiler/graph.cc \
+	v8/src/compiler/instruction-selector.cc \
+	v8/src/compiler/instruction.cc \
+	v8/src/compiler/js-builtin-reducer.cc \
+	v8/src/compiler/js-context-specialization.cc \
+	v8/src/compiler/js-generic-lowering.cc \
+	v8/src/compiler/js-graph.cc \
+	v8/src/compiler/js-inlining.cc \
+	v8/src/compiler/js-typed-lowering.cc \
+	v8/src/compiler/linkage.cc \
+	v8/src/compiler/machine-operator-reducer.cc \
+	v8/src/compiler/machine-operator.cc \
+	v8/src/compiler/machine-type.cc \
+	v8/src/compiler/node-cache.cc \
+	v8/src/compiler/node.cc \
+	v8/src/compiler/operator.cc \
+	v8/src/compiler/pipeline.cc \
+	v8/src/compiler/raw-machine-assembler.cc \
+	v8/src/compiler/register-allocator.cc \
+	v8/src/compiler/schedule.cc \
+	v8/src/compiler/scheduler.cc \
+	v8/src/compiler/simplified-lowering.cc \
+	v8/src/compiler/simplified-operator-reducer.cc \
+	v8/src/compiler/simplified-operator.cc \
+	v8/src/compiler/source-position.cc \
+	v8/src/compiler/typer.cc \
+	v8/src/compiler/value-numbering-reducer.cc \
+	v8/src/compiler/verifier.cc \
 	v8/src/compiler.cc \
 	v8/src/contexts.cc \
 	v8/src/conversions.cc \
 	v8/src/counters.cc \
 	v8/src/cpu-profiler.cc \
-	v8/src/cpu.cc \
 	v8/src/data-flow.cc \
 	v8/src/date.cc \
 	v8/src/dateparser.cc \
@@ -69,7 +110,6 @@
 	v8/src/extensions/trigger-failure-extension.cc \
 	v8/src/factory.cc \
 	v8/src/fast-dtoa.cc \
-	v8/src/field-index.cc \
 	v8/src/fixed-dtoa.cc \
 	v8/src/flags.cc \
 	v8/src/frames.cc \
@@ -80,7 +120,15 @@
 	v8/src/handles.cc \
 	v8/src/heap-profiler.cc \
 	v8/src/heap-snapshot-generator.cc \
-	v8/src/heap.cc \
+	v8/src/heap/gc-idle-time-handler.cc \
+	v8/src/heap/gc-tracer.cc \
+	v8/src/heap/heap.cc \
+	v8/src/heap/incremental-marking.cc \
+	v8/src/heap/mark-compact.cc \
+	v8/src/heap/objects-visiting.cc \
+	v8/src/heap/spaces.cc \
+	v8/src/heap/store-buffer.cc \
+	v8/src/heap/sweeper-thread.cc \
 	v8/src/hydrogen-bce.cc \
 	v8/src/hydrogen-bch.cc \
 	v8/src/hydrogen-canonicalize.cc \
@@ -108,15 +156,17 @@
 	v8/src/hydrogen-uint32-analysis.cc \
 	v8/src/i18n.cc \
 	v8/src/icu_util.cc \
-	v8/src/ic.cc \
-	v8/src/incremental-marking.cc \
+	v8/src/ic/access-compiler.cc \
+	v8/src/ic/call-optimization.cc \
+	v8/src/ic/handler-compiler.cc \
+	v8/src/ic/ic-state.cc \
+	v8/src/ic/ic.cc \
+	v8/src/ic/ic-compiler.cc \
 	v8/src/interface.cc \
+	v8/src/interface-descriptors.cc \
 	v8/src/interpreter-irregexp.cc \
 	v8/src/isolate.cc \
 	v8/src/jsregexp.cc \
-	v8/src/libplatform/default-platform.cc \
-	v8/src/libplatform/task-queue.cc \
-	v8/src/libplatform/worker-thread.cc \
 	v8/src/lithium-allocator.cc \
 	v8/src/lithium-codegen.cc \
 	v8/src/lithium.cc \
@@ -124,18 +174,14 @@
 	v8/src/log-utils.cc \
 	v8/src/log.cc \
 	v8/src/lookup.cc \
-	v8/src/mark-compact.cc \
 	v8/src/messages.cc \
 	v8/src/objects-debug.cc \
 	v8/src/objects-printer.cc \
-	v8/src/objects-visiting.cc \
 	v8/src/objects.cc \
 	v8/src/optimizing-compiler-thread.cc \
+	v8/src/ostreams.cc \
 	v8/src/parser.cc \
-	v8/src/platform/time.cc \
-	v8/src/platform/condition-variable.cc \
-	v8/src/platform/mutex.cc \
-	v8/src/platform/semaphore.cc \
+	v8/src/perf-jit.cc \
 	v8/src/preparse-data.cc \
 	v8/src/preparser.cc \
 	v8/src/prettyprinter.cc \
@@ -155,27 +201,25 @@
 	v8/src/scopeinfo.cc \
 	v8/src/scopes.cc \
 	v8/src/serialize.cc \
-	v8/src/snapshot-common.cc \
-	v8/src/spaces.cc \
-	v8/src/store-buffer.cc \
+	v8/src/snapshot-source-sink.cc \
 	v8/src/string-search.cc \
 	v8/src/string-stream.cc \
 	v8/src/strtod.cc \
-	v8/src/stub-cache.cc \
-	v8/src/sweeper-thread.cc \
+	v8/src/ic/stub-cache.cc \
 	v8/src/token.cc \
 	v8/src/transitions.cc \
+	v8/src/type-feedback-vector.cc \
 	v8/src/type-info.cc \
 	v8/src/types.cc \
 	v8/src/typing.cc \
 	v8/src/unicode.cc \
 	v8/src/utils.cc \
-	v8/src/utils/random-number-generator.cc \
 	v8/src/v8.cc \
 	v8/src/v8threads.cc \
 	v8/src/variables.cc \
 	v8/src/version.cc \
 	v8/src/zone.cc \
+	v8/third_party/fdlibm/fdlibm.cc \
 	v8/src/arm64/assembler-arm64.cc \
 	v8/src/arm64/builtins-arm64.cc \
 	v8/src/arm64/codegen-arm64.cc \
@@ -183,29 +227,34 @@
 	v8/src/arm64/cpu-arm64.cc \
 	v8/src/arm64/debug-arm64.cc \
 	v8/src/arm64/decoder-arm64.cc \
+	v8/src/arm64/delayed-masm-arm64.cc \
 	v8/src/arm64/deoptimizer-arm64.cc \
 	v8/src/arm64/disasm-arm64.cc \
 	v8/src/arm64/frames-arm64.cc \
 	v8/src/arm64/full-codegen-arm64.cc \
-	v8/src/arm64/ic-arm64.cc \
 	v8/src/arm64/instructions-arm64.cc \
 	v8/src/arm64/instrument-arm64.cc \
+	v8/src/arm64/interface-descriptors-arm64.cc \
 	v8/src/arm64/lithium-arm64.cc \
 	v8/src/arm64/lithium-codegen-arm64.cc \
 	v8/src/arm64/lithium-gap-resolver-arm64.cc \
 	v8/src/arm64/macro-assembler-arm64.cc \
 	v8/src/arm64/regexp-macro-assembler-arm64.cc \
 	v8/src/arm64/simulator-arm64.cc \
-	v8/src/arm64/stub-cache-arm64.cc \
 	v8/src/arm64/utils-arm64.cc \
-	v8/src/platform-posix.cc \
-	v8/src/platform-linux.cc
+	v8/src/compiler/arm64/code-generator-arm64.cc \
+	v8/src/compiler/arm64/instruction-selector-arm64.cc \
+	v8/src/compiler/arm64/linkage-arm64.cc \
+	v8/src/ic/arm64/access-compiler-arm64.cc \
+	v8/src/ic/arm64/handler-compiler-arm64.cc \
+	v8/src/ic/arm64/ic-arm64.cc \
+	v8/src/ic/arm64/ic-compiler-arm64.cc \
+	v8/src/ic/arm64/stub-cache-arm64.cc
 
 
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Debug := \
 	--param=ssp-buffer-size=4 \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -231,13 +280,13 @@
 	-Wno-sequence-point \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-funwind-tables
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -254,16 +303,18 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_ARM64' \
 	'-DV8_I18N_SUPPORT' \
-	'-DCAN_USE_VFP_INSTRUCTIONS' \
 	'-DICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC' \
 	'-DU_USING_ICU_NAMESPACE=0' \
+	'-DU_ENABLE_DYLOAD=0' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
 	'-DANDROID' \
@@ -297,21 +348,22 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
 	-Wno-deprecated \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
 	-Wno-non-virtual-dtor \
 	-Wno-sign-promo \
 	-Wno-non-virtual-dtor
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	--param=ssp-buffer-size=4 \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -345,7 +397,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -362,16 +413,18 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_ARM64' \
 	'-DV8_I18N_SUPPORT' \
-	'-DCAN_USE_VFP_INSTRUCTIONS' \
 	'-DICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC' \
 	'-DU_USING_ICU_NAMESPACE=0' \
+	'-DU_ENABLE_DYLOAD=0' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
 	'-DANDROID' \
@@ -399,64 +452,32 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
 	-Wno-deprecated \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
 	-Wno-non-virtual-dtor \
 	-Wno-sign-promo \
 	-Wno-non-virtual-dtor
 
 
-LOCAL_FDO_SUPPORT_Release := true
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
 LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
 LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
 ### Rules for final target.
 
-LOCAL_LDFLAGS_Debug := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-Wl,--fatal-warnings \
-	-Wl,-z,noexecstack \
-	-fPIC \
-	-nostdlib \
-	-Wl,--no-undefined \
-	-Wl,--exclude-libs=ALL \
-	-Wl,--warn-shared-textrel \
-	-Wl,-O1 \
-	-Wl,--as-needed
-
-
-LOCAL_LDFLAGS_Release := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-Wl,--fatal-warnings \
-	-Wl,-z,noexecstack \
-	-fPIC \
-	-nostdlib \
-	-Wl,--no-undefined \
-	-Wl,--exclude-libs=ALL \
-	-Wl,-O1 \
-	-Wl,--as-needed \
-	-Wl,--gc-sections \
-	-Wl,--warn-shared-textrel
-
-
-LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
-
-LOCAL_STATIC_LIBRARIES :=
-
-# Enable grouping to fix circular references
-LOCAL_GROUP_STATIC_LIBRARIES := true
-
 LOCAL_SHARED_LIBRARIES := \
 	libstlport \
 	libdl
 
+### Set directly by aosp_build_settings.
+LOCAL_FDO_SUPPORT := true
+
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
 gyp_all_modules: v8_tools_gyp_v8_base_gyp
diff --git a/tools/gyp/v8_base.target.darwin-mips.mk b/tools/gyp/v8_base.target.darwin-mips.mk
index 2bc88cc..7065708 100644
--- a/tools/gyp/v8_base.target.darwin-mips.mk
+++ b/tools/gyp/v8_base.target.darwin-mips.mk
@@ -5,7 +5,6 @@
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_MODULE := v8_tools_gyp_v8_base_gyp
 LOCAL_MODULE_SUFFIX := .a
-LOCAL_MODULE_TAGS := optional
 LOCAL_MODULE_TARGET_ARCH := $(TARGET_$(GYP_VAR_PREFIX)ARCH)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_VAR_PREFIX))
 gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
@@ -34,23 +33,65 @@
 	v8/src/arguments.cc \
 	v8/src/assembler.cc \
 	v8/src/assert-scope.cc \
+	v8/src/ast-value-factory.cc \
 	v8/src/ast.cc \
+	v8/src/background-parsing-task.cc \
+	v8/src/bailout-reason.cc \
 	v8/src/bignum-dtoa.cc \
 	v8/src/bignum.cc \
 	v8/src/bootstrapper.cc \
 	v8/src/builtins.cc \
 	v8/src/cached-powers.cc \
 	v8/src/checks.cc \
+	v8/src/code-factory.cc \
 	v8/src/code-stubs.cc \
 	v8/src/code-stubs-hydrogen.cc \
 	v8/src/codegen.cc \
 	v8/src/compilation-cache.cc \
+	v8/src/compiler/access-builder.cc \
+	v8/src/compiler/ast-graph-builder.cc \
+	v8/src/compiler/change-lowering.cc \
+	v8/src/compiler/code-generator.cc \
+	v8/src/compiler/common-operator.cc \
+	v8/src/compiler/control-builders.cc \
+	v8/src/compiler/gap-resolver.cc \
+	v8/src/compiler/graph-builder.cc \
+	v8/src/compiler/graph-reducer.cc \
+	v8/src/compiler/graph-replay.cc \
+	v8/src/compiler/graph-visualizer.cc \
+	v8/src/compiler/graph.cc \
+	v8/src/compiler/instruction-selector.cc \
+	v8/src/compiler/instruction.cc \
+	v8/src/compiler/js-builtin-reducer.cc \
+	v8/src/compiler/js-context-specialization.cc \
+	v8/src/compiler/js-generic-lowering.cc \
+	v8/src/compiler/js-graph.cc \
+	v8/src/compiler/js-inlining.cc \
+	v8/src/compiler/js-typed-lowering.cc \
+	v8/src/compiler/linkage.cc \
+	v8/src/compiler/machine-operator-reducer.cc \
+	v8/src/compiler/machine-operator.cc \
+	v8/src/compiler/machine-type.cc \
+	v8/src/compiler/node-cache.cc \
+	v8/src/compiler/node.cc \
+	v8/src/compiler/operator.cc \
+	v8/src/compiler/pipeline.cc \
+	v8/src/compiler/raw-machine-assembler.cc \
+	v8/src/compiler/register-allocator.cc \
+	v8/src/compiler/schedule.cc \
+	v8/src/compiler/scheduler.cc \
+	v8/src/compiler/simplified-lowering.cc \
+	v8/src/compiler/simplified-operator-reducer.cc \
+	v8/src/compiler/simplified-operator.cc \
+	v8/src/compiler/source-position.cc \
+	v8/src/compiler/typer.cc \
+	v8/src/compiler/value-numbering-reducer.cc \
+	v8/src/compiler/verifier.cc \
 	v8/src/compiler.cc \
 	v8/src/contexts.cc \
 	v8/src/conversions.cc \
 	v8/src/counters.cc \
 	v8/src/cpu-profiler.cc \
-	v8/src/cpu.cc \
 	v8/src/data-flow.cc \
 	v8/src/date.cc \
 	v8/src/dateparser.cc \
@@ -69,7 +110,6 @@
 	v8/src/extensions/trigger-failure-extension.cc \
 	v8/src/factory.cc \
 	v8/src/fast-dtoa.cc \
-	v8/src/field-index.cc \
 	v8/src/fixed-dtoa.cc \
 	v8/src/flags.cc \
 	v8/src/frames.cc \
@@ -80,7 +120,15 @@
 	v8/src/handles.cc \
 	v8/src/heap-profiler.cc \
 	v8/src/heap-snapshot-generator.cc \
-	v8/src/heap.cc \
+	v8/src/heap/gc-idle-time-handler.cc \
+	v8/src/heap/gc-tracer.cc \
+	v8/src/heap/heap.cc \
+	v8/src/heap/incremental-marking.cc \
+	v8/src/heap/mark-compact.cc \
+	v8/src/heap/objects-visiting.cc \
+	v8/src/heap/spaces.cc \
+	v8/src/heap/store-buffer.cc \
+	v8/src/heap/sweeper-thread.cc \
 	v8/src/hydrogen-bce.cc \
 	v8/src/hydrogen-bch.cc \
 	v8/src/hydrogen-canonicalize.cc \
@@ -108,15 +156,17 @@
 	v8/src/hydrogen-uint32-analysis.cc \
 	v8/src/i18n.cc \
 	v8/src/icu_util.cc \
-	v8/src/ic.cc \
-	v8/src/incremental-marking.cc \
+	v8/src/ic/access-compiler.cc \
+	v8/src/ic/call-optimization.cc \
+	v8/src/ic/handler-compiler.cc \
+	v8/src/ic/ic-state.cc \
+	v8/src/ic/ic.cc \
+	v8/src/ic/ic-compiler.cc \
 	v8/src/interface.cc \
+	v8/src/interface-descriptors.cc \
 	v8/src/interpreter-irregexp.cc \
 	v8/src/isolate.cc \
 	v8/src/jsregexp.cc \
-	v8/src/libplatform/default-platform.cc \
-	v8/src/libplatform/task-queue.cc \
-	v8/src/libplatform/worker-thread.cc \
 	v8/src/lithium-allocator.cc \
 	v8/src/lithium-codegen.cc \
 	v8/src/lithium.cc \
@@ -124,18 +174,14 @@
 	v8/src/log-utils.cc \
 	v8/src/log.cc \
 	v8/src/lookup.cc \
-	v8/src/mark-compact.cc \
 	v8/src/messages.cc \
 	v8/src/objects-debug.cc \
 	v8/src/objects-printer.cc \
-	v8/src/objects-visiting.cc \
 	v8/src/objects.cc \
 	v8/src/optimizing-compiler-thread.cc \
+	v8/src/ostreams.cc \
 	v8/src/parser.cc \
-	v8/src/platform/time.cc \
-	v8/src/platform/condition-variable.cc \
-	v8/src/platform/mutex.cc \
-	v8/src/platform/semaphore.cc \
+	v8/src/perf-jit.cc \
 	v8/src/preparse-data.cc \
 	v8/src/preparser.cc \
 	v8/src/prettyprinter.cc \
@@ -155,27 +201,25 @@
 	v8/src/scopeinfo.cc \
 	v8/src/scopes.cc \
 	v8/src/serialize.cc \
-	v8/src/snapshot-common.cc \
-	v8/src/spaces.cc \
-	v8/src/store-buffer.cc \
+	v8/src/snapshot-source-sink.cc \
 	v8/src/string-search.cc \
 	v8/src/string-stream.cc \
 	v8/src/strtod.cc \
-	v8/src/stub-cache.cc \
-	v8/src/sweeper-thread.cc \
+	v8/src/ic/stub-cache.cc \
 	v8/src/token.cc \
 	v8/src/transitions.cc \
+	v8/src/type-feedback-vector.cc \
 	v8/src/type-info.cc \
 	v8/src/types.cc \
 	v8/src/typing.cc \
 	v8/src/unicode.cc \
 	v8/src/utils.cc \
-	v8/src/utils/random-number-generator.cc \
 	v8/src/v8.cc \
 	v8/src/v8threads.cc \
 	v8/src/variables.cc \
 	v8/src/version.cc \
 	v8/src/zone.cc \
+	v8/third_party/fdlibm/fdlibm.cc \
 	v8/src/mips/assembler-mips.cc \
 	v8/src/mips/builtins-mips.cc \
 	v8/src/mips/codegen-mips.cc \
@@ -187,16 +231,18 @@
 	v8/src/mips/disasm-mips.cc \
 	v8/src/mips/frames-mips.cc \
 	v8/src/mips/full-codegen-mips.cc \
-	v8/src/mips/ic-mips.cc \
+	v8/src/mips/interface-descriptors-mips.cc \
 	v8/src/mips/lithium-codegen-mips.cc \
 	v8/src/mips/lithium-gap-resolver-mips.cc \
 	v8/src/mips/lithium-mips.cc \
 	v8/src/mips/macro-assembler-mips.cc \
 	v8/src/mips/regexp-macro-assembler-mips.cc \
 	v8/src/mips/simulator-mips.cc \
-	v8/src/mips/stub-cache-mips.cc \
-	v8/src/platform-posix.cc \
-	v8/src/platform-linux.cc
+	v8/src/ic/mips/access-compiler-mips.cc \
+	v8/src/ic/mips/handler-compiler-mips.cc \
+	v8/src/ic/mips/ic-mips.cc \
+	v8/src/ic/mips/ic-compiler-mips.cc \
+	v8/src/ic/mips/stub-cache-mips.cc
 
 
 # Flags passed to both C and C++ files.
@@ -204,7 +250,6 @@
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	 \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -213,8 +258,6 @@
 	-fPIC \
 	-Wno-unused-local-typedefs \
 	-Wno-format \
-	-EL \
-	-mhard-float \
 	-ffunction-sections \
 	-funwind-tables \
 	-g \
@@ -233,6 +276,7 @@
 	-Wno-sequence-point \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-fomit-frame-pointer \
@@ -240,7 +284,6 @@
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -257,19 +300,21 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_MIPS' \
 	'-DCAN_USE_FPU_INSTRUCTIONS' \
 	'-D__mips_hard_float=1' \
 	'-D_MIPS_ARCH_MIPS32R2' \
 	'-DV8_I18N_SUPPORT' \
-	'-DCAN_USE_VFP_INSTRUCTIONS' \
 	'-DICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC' \
 	'-DU_USING_ICU_NAMESPACE=0' \
+	'-DU_ENABLE_DYLOAD=0' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
 	'-DANDROID' \
@@ -303,24 +348,25 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
 	-Wno-deprecated \
 	-Wno-uninitialized \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
 	-Wno-non-virtual-dtor \
 	-Wno-sign-promo \
 	-Wno-non-virtual-dtor
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	 \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -329,8 +375,6 @@
 	-fPIC \
 	-Wno-unused-local-typedefs \
 	-Wno-format \
-	-EL \
-	-mhard-float \
 	-ffunction-sections \
 	-funwind-tables \
 	-g \
@@ -358,7 +402,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -375,19 +418,21 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_MIPS' \
 	'-DCAN_USE_FPU_INSTRUCTIONS' \
 	'-D__mips_hard_float=1' \
 	'-D_MIPS_ARCH_MIPS32R2' \
 	'-DV8_I18N_SUPPORT' \
-	'-DCAN_USE_VFP_INSTRUCTIONS' \
 	'-DICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC' \
 	'-DU_USING_ICU_NAMESPACE=0' \
+	'-DU_ENABLE_DYLOAD=0' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
 	'-DANDROID' \
@@ -415,69 +460,33 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
 	-Wno-deprecated \
 	-Wno-uninitialized \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
 	-Wno-non-virtual-dtor \
 	-Wno-sign-promo \
 	-Wno-non-virtual-dtor
 
 
-LOCAL_FDO_SUPPORT_Release := true
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
 LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
 LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
 ### Rules for final target.
 
-LOCAL_LDFLAGS_Debug := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-Wl,--fatal-warnings \
-	-Wl,-z,noexecstack \
-	-fPIC \
-	-EL \
-	-Wl,--no-keep-memory \
-	-nostdlib \
-	-Wl,--no-undefined \
-	-Wl,--exclude-libs=ALL \
-	-Wl,--warn-shared-textrel \
-	-Wl,-O1 \
-	-Wl,--as-needed
-
-
-LOCAL_LDFLAGS_Release := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-Wl,--fatal-warnings \
-	-Wl,-z,noexecstack \
-	-fPIC \
-	-EL \
-	-Wl,--no-keep-memory \
-	-nostdlib \
-	-Wl,--no-undefined \
-	-Wl,--exclude-libs=ALL \
-	-Wl,-O1 \
-	-Wl,--as-needed \
-	-Wl,--gc-sections \
-	-Wl,--warn-shared-textrel
-
-
-LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
-
-LOCAL_STATIC_LIBRARIES :=
-
-# Enable grouping to fix circular references
-LOCAL_GROUP_STATIC_LIBRARIES := true
-
 LOCAL_SHARED_LIBRARIES := \
 	libstlport \
 	libdl
 
+### Set directly by aosp_build_settings.
+LOCAL_FDO_SUPPORT := true
+
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
 gyp_all_modules: v8_tools_gyp_v8_base_gyp
diff --git a/tools/gyp/v8_base.target.darwin-mips64.mk b/tools/gyp/v8_base.target.darwin-mips64.mk
new file mode 100644
index 0000000..4ab5e3a
--- /dev/null
+++ b/tools/gyp/v8_base.target.darwin-mips64.mk
@@ -0,0 +1,495 @@
+# This file is generated by gyp; do not edit.
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE_CLASS := STATIC_LIBRARIES
+LOCAL_MODULE := v8_tools_gyp_v8_base_gyp
+LOCAL_MODULE_SUFFIX := .a
+LOCAL_MODULE_TARGET_ARCH := $(TARGET_$(GYP_VAR_PREFIX)ARCH)
+gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_VAR_PREFIX))
+gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
+
+# Make sure our deps are built first.
+GYP_TARGET_DEPENDENCIES := \
+	$(call intermediates-dir-for,GYP,third_party_icu_icui18n_gyp,,,$(GYP_VAR_PREFIX))/icui18n.stamp \
+	$(call intermediates-dir-for,GYP,third_party_icu_icuuc_gyp,,,$(GYP_VAR_PREFIX))/icuuc.stamp
+
+GYP_GENERATED_OUTPUTS :=
+
+# Make sure our deps and generated files are built first.
+LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) $(GYP_GENERATED_OUTPUTS)
+
+LOCAL_CPP_EXTENSION := .cc
+LOCAL_GENERATED_SOURCES :=
+
+GYP_COPIED_SOURCE_ORIGIN_DIRS :=
+
+LOCAL_SRC_FILES := \
+	v8/src/accessors.cc \
+	v8/src/allocation.cc \
+	v8/src/allocation-site-scopes.cc \
+	v8/src/allocation-tracker.cc \
+	v8/src/api.cc \
+	v8/src/arguments.cc \
+	v8/src/assembler.cc \
+	v8/src/assert-scope.cc \
+	v8/src/ast-value-factory.cc \
+	v8/src/ast.cc \
+	v8/src/background-parsing-task.cc \
+	v8/src/bailout-reason.cc \
+	v8/src/bignum-dtoa.cc \
+	v8/src/bignum.cc \
+	v8/src/bootstrapper.cc \
+	v8/src/builtins.cc \
+	v8/src/cached-powers.cc \
+	v8/src/checks.cc \
+	v8/src/code-factory.cc \
+	v8/src/code-stubs.cc \
+	v8/src/code-stubs-hydrogen.cc \
+	v8/src/codegen.cc \
+	v8/src/compilation-cache.cc \
+	v8/src/compiler/access-builder.cc \
+	v8/src/compiler/ast-graph-builder.cc \
+	v8/src/compiler/change-lowering.cc \
+	v8/src/compiler/code-generator.cc \
+	v8/src/compiler/common-operator.cc \
+	v8/src/compiler/control-builders.cc \
+	v8/src/compiler/gap-resolver.cc \
+	v8/src/compiler/graph-builder.cc \
+	v8/src/compiler/graph-reducer.cc \
+	v8/src/compiler/graph-replay.cc \
+	v8/src/compiler/graph-visualizer.cc \
+	v8/src/compiler/graph.cc \
+	v8/src/compiler/instruction-selector.cc \
+	v8/src/compiler/instruction.cc \
+	v8/src/compiler/js-builtin-reducer.cc \
+	v8/src/compiler/js-context-specialization.cc \
+	v8/src/compiler/js-generic-lowering.cc \
+	v8/src/compiler/js-graph.cc \
+	v8/src/compiler/js-inlining.cc \
+	v8/src/compiler/js-typed-lowering.cc \
+	v8/src/compiler/linkage.cc \
+	v8/src/compiler/machine-operator-reducer.cc \
+	v8/src/compiler/machine-operator.cc \
+	v8/src/compiler/machine-type.cc \
+	v8/src/compiler/node-cache.cc \
+	v8/src/compiler/node.cc \
+	v8/src/compiler/operator.cc \
+	v8/src/compiler/pipeline.cc \
+	v8/src/compiler/raw-machine-assembler.cc \
+	v8/src/compiler/register-allocator.cc \
+	v8/src/compiler/schedule.cc \
+	v8/src/compiler/scheduler.cc \
+	v8/src/compiler/simplified-lowering.cc \
+	v8/src/compiler/simplified-operator-reducer.cc \
+	v8/src/compiler/simplified-operator.cc \
+	v8/src/compiler/source-position.cc \
+	v8/src/compiler/typer.cc \
+	v8/src/compiler/value-numbering-reducer.cc \
+	v8/src/compiler/verifier.cc \
+	v8/src/compiler.cc \
+	v8/src/contexts.cc \
+	v8/src/conversions.cc \
+	v8/src/counters.cc \
+	v8/src/cpu-profiler.cc \
+	v8/src/data-flow.cc \
+	v8/src/date.cc \
+	v8/src/dateparser.cc \
+	v8/src/debug.cc \
+	v8/src/deoptimizer.cc \
+	v8/src/disassembler.cc \
+	v8/src/diy-fp.cc \
+	v8/src/dtoa.cc \
+	v8/src/elements-kind.cc \
+	v8/src/elements.cc \
+	v8/src/execution.cc \
+	v8/src/extensions/externalize-string-extension.cc \
+	v8/src/extensions/free-buffer-extension.cc \
+	v8/src/extensions/gc-extension.cc \
+	v8/src/extensions/statistics-extension.cc \
+	v8/src/extensions/trigger-failure-extension.cc \
+	v8/src/factory.cc \
+	v8/src/fast-dtoa.cc \
+	v8/src/fixed-dtoa.cc \
+	v8/src/flags.cc \
+	v8/src/frames.cc \
+	v8/src/full-codegen.cc \
+	v8/src/func-name-inferrer.cc \
+	v8/src/gdb-jit.cc \
+	v8/src/global-handles.cc \
+	v8/src/handles.cc \
+	v8/src/heap-profiler.cc \
+	v8/src/heap-snapshot-generator.cc \
+	v8/src/heap/gc-idle-time-handler.cc \
+	v8/src/heap/gc-tracer.cc \
+	v8/src/heap/heap.cc \
+	v8/src/heap/incremental-marking.cc \
+	v8/src/heap/mark-compact.cc \
+	v8/src/heap/objects-visiting.cc \
+	v8/src/heap/spaces.cc \
+	v8/src/heap/store-buffer.cc \
+	v8/src/heap/sweeper-thread.cc \
+	v8/src/hydrogen-bce.cc \
+	v8/src/hydrogen-bch.cc \
+	v8/src/hydrogen-canonicalize.cc \
+	v8/src/hydrogen-check-elimination.cc \
+	v8/src/hydrogen-dce.cc \
+	v8/src/hydrogen-dehoist.cc \
+	v8/src/hydrogen-environment-liveness.cc \
+	v8/src/hydrogen-escape-analysis.cc \
+	v8/src/hydrogen-instructions.cc \
+	v8/src/hydrogen.cc \
+	v8/src/hydrogen-gvn.cc \
+	v8/src/hydrogen-infer-representation.cc \
+	v8/src/hydrogen-infer-types.cc \
+	v8/src/hydrogen-load-elimination.cc \
+	v8/src/hydrogen-mark-deoptimize.cc \
+	v8/src/hydrogen-mark-unreachable.cc \
+	v8/src/hydrogen-osr.cc \
+	v8/src/hydrogen-range-analysis.cc \
+	v8/src/hydrogen-redundant-phi.cc \
+	v8/src/hydrogen-removable-simulates.cc \
+	v8/src/hydrogen-representation-changes.cc \
+	v8/src/hydrogen-sce.cc \
+	v8/src/hydrogen-store-elimination.cc \
+	v8/src/hydrogen-types.cc \
+	v8/src/hydrogen-uint32-analysis.cc \
+	v8/src/i18n.cc \
+	v8/src/icu_util.cc \
+	v8/src/ic/access-compiler.cc \
+	v8/src/ic/call-optimization.cc \
+	v8/src/ic/handler-compiler.cc \
+	v8/src/ic/ic-state.cc \
+	v8/src/ic/ic.cc \
+	v8/src/ic/ic-compiler.cc \
+	v8/src/interface.cc \
+	v8/src/interface-descriptors.cc \
+	v8/src/interpreter-irregexp.cc \
+	v8/src/isolate.cc \
+	v8/src/jsregexp.cc \
+	v8/src/lithium-allocator.cc \
+	v8/src/lithium-codegen.cc \
+	v8/src/lithium.cc \
+	v8/src/liveedit.cc \
+	v8/src/log-utils.cc \
+	v8/src/log.cc \
+	v8/src/lookup.cc \
+	v8/src/messages.cc \
+	v8/src/objects-debug.cc \
+	v8/src/objects-printer.cc \
+	v8/src/objects.cc \
+	v8/src/optimizing-compiler-thread.cc \
+	v8/src/ostreams.cc \
+	v8/src/parser.cc \
+	v8/src/perf-jit.cc \
+	v8/src/preparse-data.cc \
+	v8/src/preparser.cc \
+	v8/src/prettyprinter.cc \
+	v8/src/profile-generator.cc \
+	v8/src/property.cc \
+	v8/src/regexp-macro-assembler-irregexp.cc \
+	v8/src/regexp-macro-assembler-tracer.cc \
+	v8/src/regexp-macro-assembler.cc \
+	v8/src/regexp-stack.cc \
+	v8/src/rewriter.cc \
+	v8/src/runtime-profiler.cc \
+	v8/src/runtime.cc \
+	v8/src/safepoint-table.cc \
+	v8/src/sampler.cc \
+	v8/src/scanner-character-streams.cc \
+	v8/src/scanner.cc \
+	v8/src/scopeinfo.cc \
+	v8/src/scopes.cc \
+	v8/src/serialize.cc \
+	v8/src/snapshot-source-sink.cc \
+	v8/src/string-search.cc \
+	v8/src/string-stream.cc \
+	v8/src/strtod.cc \
+	v8/src/ic/stub-cache.cc \
+	v8/src/token.cc \
+	v8/src/transitions.cc \
+	v8/src/type-feedback-vector.cc \
+	v8/src/type-info.cc \
+	v8/src/types.cc \
+	v8/src/typing.cc \
+	v8/src/unicode.cc \
+	v8/src/utils.cc \
+	v8/src/v8.cc \
+	v8/src/v8threads.cc \
+	v8/src/variables.cc \
+	v8/src/version.cc \
+	v8/src/zone.cc \
+	v8/third_party/fdlibm/fdlibm.cc \
+	v8/src/mips64/assembler-mips64.cc \
+	v8/src/mips64/builtins-mips64.cc \
+	v8/src/mips64/codegen-mips64.cc \
+	v8/src/mips64/code-stubs-mips64.cc \
+	v8/src/mips64/constants-mips64.cc \
+	v8/src/mips64/cpu-mips64.cc \
+	v8/src/mips64/debug-mips64.cc \
+	v8/src/mips64/deoptimizer-mips64.cc \
+	v8/src/mips64/disasm-mips64.cc \
+	v8/src/mips64/frames-mips64.cc \
+	v8/src/mips64/full-codegen-mips64.cc \
+	v8/src/mips64/interface-descriptors-mips64.cc \
+	v8/src/mips64/lithium-codegen-mips64.cc \
+	v8/src/mips64/lithium-gap-resolver-mips64.cc \
+	v8/src/mips64/lithium-mips64.cc \
+	v8/src/mips64/macro-assembler-mips64.cc \
+	v8/src/mips64/regexp-macro-assembler-mips64.cc \
+	v8/src/mips64/simulator-mips64.cc \
+	v8/src/ic/mips64/access-compiler-mips64.cc \
+	v8/src/ic/mips64/handler-compiler-mips64.cc \
+	v8/src/ic/mips64/ic-mips64.cc \
+	v8/src/ic/mips64/ic-compiler-mips64.cc \
+	v8/src/ic/mips64/stub-cache-mips64.cc
+
+
+# Flags passed to both C and C++ files.
+MY_CFLAGS_Debug := \
+	-fstack-protector \
+	--param=ssp-buffer-size=4 \
+	 \
+	-fno-strict-aliasing \
+	-Wno-unused-parameter \
+	-Wno-missing-field-initializers \
+	-fvisibility=hidden \
+	-pipe \
+	-fPIC \
+	-Wno-unused-local-typedefs \
+	-Wno-format \
+	-ffunction-sections \
+	-funwind-tables \
+	-g \
+	-fstack-protector \
+	-fno-short-enums \
+	-finline-limit=64 \
+	-Wa,--noexecstack \
+	-U_FORTIFY_SOURCE \
+	-Wno-extra \
+	-Wno-ignored-qualifiers \
+	-Wno-type-limits \
+	-Wno-unused-but-set-variable \
+	-Wno-address \
+	-Wno-format-security \
+	-Wno-return-type \
+	-Wno-sequence-point \
+	-Os \
+	-g \
+	-gdwarf-4 \
+	-fdata-sections \
+	-ffunction-sections \
+	-fomit-frame-pointer \
+	-funwind-tables
+
+MY_DEFS_Debug := \
+	'-DV8_DEPRECATION_WARNINGS' \
+	'-D_FILE_OFFSET_BITS=64' \
+	'-DNO_TCMALLOC' \
+	'-DDISABLE_NACL' \
+	'-DCHROMIUM_BUILD' \
+	'-DUSE_LIBJPEG_TURBO=1' \
+	'-DENABLE_WEBRTC=1' \
+	'-DUSE_PROPRIETARY_CODECS' \
+	'-DENABLE_BROWSER_CDMS' \
+	'-DENABLE_CONFIGURATION_POLICY' \
+	'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
+	'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
+	'-DENABLE_EGLIMAGE=1' \
+	'-DCLD_VERSION=1' \
+	'-DENABLE_PRINTING=1' \
+	'-DENABLE_MANAGED_USERS=1' \
+	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
+	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
+	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
+	'-DV8_TARGET_ARCH_MIPS64' \
+	'-DCAN_USE_FPU_INSTRUCTIONS' \
+	'-D__mips_hard_float=1' \
+	'-D_MIPS_ARCH_MIPS64R2' \
+	'-DV8_I18N_SUPPORT' \
+	'-DICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC' \
+	'-DU_USING_ICU_NAMESPACE=0' \
+	'-DU_ENABLE_DYLOAD=0' \
+	'-DUSE_OPENSSL=1' \
+	'-DUSE_OPENSSL_CERTS=1' \
+	'-DANDROID' \
+	'-D__GNU_SOURCE=1' \
+	'-DUSE_STLPORT=1' \
+	'-D_STLP_USE_PTR_SPECIALIZATIONS=1' \
+	'-DCHROME_BUILD_ID=""' \
+	'-DDYNAMIC_ANNOTATIONS_ENABLED=1' \
+	'-DWTF_USE_DYNAMIC_ANNOTATIONS=1' \
+	'-D_DEBUG' \
+	'-DENABLE_DISASSEMBLER' \
+	'-DV8_ENABLE_CHECKS' \
+	'-DOBJECT_PRINT' \
+	'-DVERIFY_HEAP' \
+	'-DENABLE_EXTRA_CHECKS' \
+	'-DENABLE_HANDLE_ZAPPING'
+
+
+# Include paths placed before CFLAGS/CPPFLAGS
+LOCAL_C_INCLUDES_Debug := \
+	$(gyp_shared_intermediate_dir)/shim_headers/icuuc/target \
+	$(gyp_shared_intermediate_dir)/shim_headers/icui18n/target \
+	$(LOCAL_PATH)/v8 \
+	$(gyp_shared_intermediate_dir) \
+	$(PWD)/external/icu/icu4c/source/common \
+	$(PWD)/external/icu/icu4c/source/i18n \
+	$(PWD)/frameworks/wilhelm/include \
+	$(PWD)/bionic \
+	$(PWD)/external/stlport/stlport
+
+
+# Flags passed to only C++ (and not C) files.
+LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
+	-fno-rtti \
+	-fno-threadsafe-statics \
+	-fvisibility-inlines-hidden \
+	-Wno-deprecated \
+	-Wno-uninitialized \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
+	-Wno-non-virtual-dtor \
+	-Wno-sign-promo \
+	-Wno-non-virtual-dtor
+
+
+# Flags passed to both C and C++ files.
+MY_CFLAGS_Release := \
+	-fstack-protector \
+	--param=ssp-buffer-size=4 \
+	 \
+	-fno-strict-aliasing \
+	-Wno-unused-parameter \
+	-Wno-missing-field-initializers \
+	-fvisibility=hidden \
+	-pipe \
+	-fPIC \
+	-Wno-unused-local-typedefs \
+	-Wno-format \
+	-ffunction-sections \
+	-funwind-tables \
+	-g \
+	-fstack-protector \
+	-fno-short-enums \
+	-finline-limit=64 \
+	-Wa,--noexecstack \
+	-U_FORTIFY_SOURCE \
+	-Wno-extra \
+	-Wno-ignored-qualifiers \
+	-Wno-type-limits \
+	-Wno-unused-but-set-variable \
+	-Wno-address \
+	-Wno-format-security \
+	-Wno-return-type \
+	-Wno-sequence-point \
+	-fno-ident \
+	-fdata-sections \
+	-ffunction-sections \
+	-fomit-frame-pointer \
+	-funwind-tables \
+	-fdata-sections \
+	-ffunction-sections \
+	-O2
+
+MY_DEFS_Release := \
+	'-DV8_DEPRECATION_WARNINGS' \
+	'-D_FILE_OFFSET_BITS=64' \
+	'-DNO_TCMALLOC' \
+	'-DDISABLE_NACL' \
+	'-DCHROMIUM_BUILD' \
+	'-DUSE_LIBJPEG_TURBO=1' \
+	'-DENABLE_WEBRTC=1' \
+	'-DUSE_PROPRIETARY_CODECS' \
+	'-DENABLE_BROWSER_CDMS' \
+	'-DENABLE_CONFIGURATION_POLICY' \
+	'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
+	'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
+	'-DENABLE_EGLIMAGE=1' \
+	'-DCLD_VERSION=1' \
+	'-DENABLE_PRINTING=1' \
+	'-DENABLE_MANAGED_USERS=1' \
+	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
+	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
+	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
+	'-DV8_TARGET_ARCH_MIPS64' \
+	'-DCAN_USE_FPU_INSTRUCTIONS' \
+	'-D__mips_hard_float=1' \
+	'-D_MIPS_ARCH_MIPS64R2' \
+	'-DV8_I18N_SUPPORT' \
+	'-DICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC' \
+	'-DU_USING_ICU_NAMESPACE=0' \
+	'-DU_ENABLE_DYLOAD=0' \
+	'-DUSE_OPENSSL=1' \
+	'-DUSE_OPENSSL_CERTS=1' \
+	'-DANDROID' \
+	'-D__GNU_SOURCE=1' \
+	'-DUSE_STLPORT=1' \
+	'-D_STLP_USE_PTR_SPECIALIZATIONS=1' \
+	'-DCHROME_BUILD_ID=""' \
+	'-DNDEBUG' \
+	'-DNVALGRIND' \
+	'-DDYNAMIC_ANNOTATIONS_ENABLED=0'
+
+
+# Include paths placed before CFLAGS/CPPFLAGS
+LOCAL_C_INCLUDES_Release := \
+	$(gyp_shared_intermediate_dir)/shim_headers/icuuc/target \
+	$(gyp_shared_intermediate_dir)/shim_headers/icui18n/target \
+	$(LOCAL_PATH)/v8 \
+	$(gyp_shared_intermediate_dir) \
+	$(PWD)/external/icu/icu4c/source/common \
+	$(PWD)/external/icu/icu4c/source/i18n \
+	$(PWD)/frameworks/wilhelm/include \
+	$(PWD)/bionic \
+	$(PWD)/external/stlport/stlport
+
+
+# Flags passed to only C++ (and not C) files.
+LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
+	-fno-rtti \
+	-fno-threadsafe-statics \
+	-fvisibility-inlines-hidden \
+	-Wno-deprecated \
+	-Wno-uninitialized \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
+	-Wno-non-virtual-dtor \
+	-Wno-sign-promo \
+	-Wno-non-virtual-dtor
+
+
+LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
+LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
+LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
+LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
+### Rules for final target.
+
+LOCAL_SHARED_LIBRARIES := \
+	libstlport \
+	libdl
+
+# Add target alias to "gyp_all_modules" target.
+.PHONY: gyp_all_modules
+gyp_all_modules: v8_tools_gyp_v8_base_gyp
+
+# Alias gyp target name.
+.PHONY: v8_base
+v8_base: v8_tools_gyp_v8_base_gyp
+
+include $(BUILD_STATIC_LIBRARY)
diff --git a/tools/gyp/v8_base.target.darwin-x86.mk b/tools/gyp/v8_base.target.darwin-x86.mk
index 8979eb5..2b402da 100644
--- a/tools/gyp/v8_base.target.darwin-x86.mk
+++ b/tools/gyp/v8_base.target.darwin-x86.mk
@@ -5,7 +5,6 @@
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_MODULE := v8_tools_gyp_v8_base_gyp
 LOCAL_MODULE_SUFFIX := .a
-LOCAL_MODULE_TAGS := optional
 LOCAL_MODULE_TARGET_ARCH := $(TARGET_$(GYP_VAR_PREFIX)ARCH)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_VAR_PREFIX))
 gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
@@ -34,23 +33,65 @@
 	v8/src/arguments.cc \
 	v8/src/assembler.cc \
 	v8/src/assert-scope.cc \
+	v8/src/ast-value-factory.cc \
 	v8/src/ast.cc \
+	v8/src/background-parsing-task.cc \
+	v8/src/bailout-reason.cc \
 	v8/src/bignum-dtoa.cc \
 	v8/src/bignum.cc \
 	v8/src/bootstrapper.cc \
 	v8/src/builtins.cc \
 	v8/src/cached-powers.cc \
 	v8/src/checks.cc \
+	v8/src/code-factory.cc \
 	v8/src/code-stubs.cc \
 	v8/src/code-stubs-hydrogen.cc \
 	v8/src/codegen.cc \
 	v8/src/compilation-cache.cc \
+	v8/src/compiler/access-builder.cc \
+	v8/src/compiler/ast-graph-builder.cc \
+	v8/src/compiler/change-lowering.cc \
+	v8/src/compiler/code-generator.cc \
+	v8/src/compiler/common-operator.cc \
+	v8/src/compiler/control-builders.cc \
+	v8/src/compiler/gap-resolver.cc \
+	v8/src/compiler/graph-builder.cc \
+	v8/src/compiler/graph-reducer.cc \
+	v8/src/compiler/graph-replay.cc \
+	v8/src/compiler/graph-visualizer.cc \
+	v8/src/compiler/graph.cc \
+	v8/src/compiler/instruction-selector.cc \
+	v8/src/compiler/instruction.cc \
+	v8/src/compiler/js-builtin-reducer.cc \
+	v8/src/compiler/js-context-specialization.cc \
+	v8/src/compiler/js-generic-lowering.cc \
+	v8/src/compiler/js-graph.cc \
+	v8/src/compiler/js-inlining.cc \
+	v8/src/compiler/js-typed-lowering.cc \
+	v8/src/compiler/linkage.cc \
+	v8/src/compiler/machine-operator-reducer.cc \
+	v8/src/compiler/machine-operator.cc \
+	v8/src/compiler/machine-type.cc \
+	v8/src/compiler/node-cache.cc \
+	v8/src/compiler/node.cc \
+	v8/src/compiler/operator.cc \
+	v8/src/compiler/pipeline.cc \
+	v8/src/compiler/raw-machine-assembler.cc \
+	v8/src/compiler/register-allocator.cc \
+	v8/src/compiler/schedule.cc \
+	v8/src/compiler/scheduler.cc \
+	v8/src/compiler/simplified-lowering.cc \
+	v8/src/compiler/simplified-operator-reducer.cc \
+	v8/src/compiler/simplified-operator.cc \
+	v8/src/compiler/source-position.cc \
+	v8/src/compiler/typer.cc \
+	v8/src/compiler/value-numbering-reducer.cc \
+	v8/src/compiler/verifier.cc \
 	v8/src/compiler.cc \
 	v8/src/contexts.cc \
 	v8/src/conversions.cc \
 	v8/src/counters.cc \
 	v8/src/cpu-profiler.cc \
-	v8/src/cpu.cc \
 	v8/src/data-flow.cc \
 	v8/src/date.cc \
 	v8/src/dateparser.cc \
@@ -69,7 +110,6 @@
 	v8/src/extensions/trigger-failure-extension.cc \
 	v8/src/factory.cc \
 	v8/src/fast-dtoa.cc \
-	v8/src/field-index.cc \
 	v8/src/fixed-dtoa.cc \
 	v8/src/flags.cc \
 	v8/src/frames.cc \
@@ -80,7 +120,15 @@
 	v8/src/handles.cc \
 	v8/src/heap-profiler.cc \
 	v8/src/heap-snapshot-generator.cc \
-	v8/src/heap.cc \
+	v8/src/heap/gc-idle-time-handler.cc \
+	v8/src/heap/gc-tracer.cc \
+	v8/src/heap/heap.cc \
+	v8/src/heap/incremental-marking.cc \
+	v8/src/heap/mark-compact.cc \
+	v8/src/heap/objects-visiting.cc \
+	v8/src/heap/spaces.cc \
+	v8/src/heap/store-buffer.cc \
+	v8/src/heap/sweeper-thread.cc \
 	v8/src/hydrogen-bce.cc \
 	v8/src/hydrogen-bch.cc \
 	v8/src/hydrogen-canonicalize.cc \
@@ -108,15 +156,17 @@
 	v8/src/hydrogen-uint32-analysis.cc \
 	v8/src/i18n.cc \
 	v8/src/icu_util.cc \
-	v8/src/ic.cc \
-	v8/src/incremental-marking.cc \
+	v8/src/ic/access-compiler.cc \
+	v8/src/ic/call-optimization.cc \
+	v8/src/ic/handler-compiler.cc \
+	v8/src/ic/ic-state.cc \
+	v8/src/ic/ic.cc \
+	v8/src/ic/ic-compiler.cc \
 	v8/src/interface.cc \
+	v8/src/interface-descriptors.cc \
 	v8/src/interpreter-irregexp.cc \
 	v8/src/isolate.cc \
 	v8/src/jsregexp.cc \
-	v8/src/libplatform/default-platform.cc \
-	v8/src/libplatform/task-queue.cc \
-	v8/src/libplatform/worker-thread.cc \
 	v8/src/lithium-allocator.cc \
 	v8/src/lithium-codegen.cc \
 	v8/src/lithium.cc \
@@ -124,18 +174,14 @@
 	v8/src/log-utils.cc \
 	v8/src/log.cc \
 	v8/src/lookup.cc \
-	v8/src/mark-compact.cc \
 	v8/src/messages.cc \
 	v8/src/objects-debug.cc \
 	v8/src/objects-printer.cc \
-	v8/src/objects-visiting.cc \
 	v8/src/objects.cc \
 	v8/src/optimizing-compiler-thread.cc \
+	v8/src/ostreams.cc \
 	v8/src/parser.cc \
-	v8/src/platform/time.cc \
-	v8/src/platform/condition-variable.cc \
-	v8/src/platform/mutex.cc \
-	v8/src/platform/semaphore.cc \
+	v8/src/perf-jit.cc \
 	v8/src/preparse-data.cc \
 	v8/src/preparser.cc \
 	v8/src/prettyprinter.cc \
@@ -155,27 +201,25 @@
 	v8/src/scopeinfo.cc \
 	v8/src/scopes.cc \
 	v8/src/serialize.cc \
-	v8/src/snapshot-common.cc \
-	v8/src/spaces.cc \
-	v8/src/store-buffer.cc \
+	v8/src/snapshot-source-sink.cc \
 	v8/src/string-search.cc \
 	v8/src/string-stream.cc \
 	v8/src/strtod.cc \
-	v8/src/stub-cache.cc \
-	v8/src/sweeper-thread.cc \
+	v8/src/ic/stub-cache.cc \
 	v8/src/token.cc \
 	v8/src/transitions.cc \
+	v8/src/type-feedback-vector.cc \
 	v8/src/type-info.cc \
 	v8/src/types.cc \
 	v8/src/typing.cc \
 	v8/src/unicode.cc \
 	v8/src/utils.cc \
-	v8/src/utils/random-number-generator.cc \
 	v8/src/v8.cc \
 	v8/src/v8threads.cc \
 	v8/src/variables.cc \
 	v8/src/version.cc \
 	v8/src/zone.cc \
+	v8/third_party/fdlibm/fdlibm.cc \
 	v8/src/ia32/assembler-ia32.cc \
 	v8/src/ia32/builtins-ia32.cc \
 	v8/src/ia32/code-stubs-ia32.cc \
@@ -186,21 +230,25 @@
 	v8/src/ia32/disasm-ia32.cc \
 	v8/src/ia32/frames-ia32.cc \
 	v8/src/ia32/full-codegen-ia32.cc \
-	v8/src/ia32/ic-ia32.cc \
+	v8/src/ia32/interface-descriptors-ia32.cc \
 	v8/src/ia32/lithium-codegen-ia32.cc \
 	v8/src/ia32/lithium-gap-resolver-ia32.cc \
 	v8/src/ia32/lithium-ia32.cc \
 	v8/src/ia32/macro-assembler-ia32.cc \
 	v8/src/ia32/regexp-macro-assembler-ia32.cc \
-	v8/src/ia32/stub-cache-ia32.cc \
-	v8/src/platform-posix.cc \
-	v8/src/platform-linux.cc
+	v8/src/compiler/ia32/code-generator-ia32.cc \
+	v8/src/compiler/ia32/instruction-selector-ia32.cc \
+	v8/src/compiler/ia32/linkage-ia32.cc \
+	v8/src/ic/ia32/access-compiler-ia32.cc \
+	v8/src/ic/ia32/handler-compiler-ia32.cc \
+	v8/src/ic/ia32/ic-ia32.cc \
+	v8/src/ic/ia32/ic-compiler-ia32.cc \
+	v8/src/ic/ia32/stub-cache-ia32.cc
 
 
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Debug := \
 	--param=ssp-buffer-size=4 \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -229,8 +277,10 @@
 	-Wno-format-security \
 	-Wno-return-type \
 	-Wno-sequence-point \
+	-m32 \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-fomit-frame-pointer \
@@ -238,7 +288,6 @@
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -255,16 +304,18 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_IA32' \
 	'-DV8_I18N_SUPPORT' \
-	'-DCAN_USE_VFP_INSTRUCTIONS' \
 	'-DICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC' \
 	'-DU_USING_ICU_NAMESPACE=0' \
+	'-DU_ENABLE_DYLOAD=0' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
 	'-DANDROID' \
@@ -298,21 +349,22 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
 	-Wno-deprecated \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
 	-Wno-non-virtual-dtor \
 	-Wno-sign-promo \
 	-Wno-non-virtual-dtor
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	--param=ssp-buffer-size=4 \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -341,6 +393,7 @@
 	-Wno-format-security \
 	-Wno-return-type \
 	-Wno-sequence-point \
+	-m32 \
 	-fno-ident \
 	-fdata-sections \
 	-ffunction-sections \
@@ -352,7 +405,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -369,16 +421,18 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_IA32' \
 	'-DV8_I18N_SUPPORT' \
-	'-DCAN_USE_VFP_INSTRUCTIONS' \
 	'-DICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC' \
 	'-DU_USING_ICU_NAMESPACE=0' \
+	'-DU_ENABLE_DYLOAD=0' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
 	'-DANDROID' \
@@ -406,68 +460,32 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
 	-Wno-deprecated \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
 	-Wno-non-virtual-dtor \
 	-Wno-sign-promo \
 	-Wno-non-virtual-dtor
 
 
-LOCAL_FDO_SUPPORT_Release := true
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
 LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
 LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
 ### Rules for final target.
 
-LOCAL_LDFLAGS_Debug := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-Wl,--fatal-warnings \
-	-Wl,-z,noexecstack \
-	-fPIC \
-	-m32 \
-	-fuse-ld=gold \
-	-nostdlib \
-	-Wl,--no-undefined \
-	-Wl,--exclude-libs=ALL \
-	-Wl,--warn-shared-textrel \
-	-Wl,-O1 \
-	-Wl,--as-needed
-
-
-LOCAL_LDFLAGS_Release := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-Wl,--fatal-warnings \
-	-Wl,-z,noexecstack \
-	-fPIC \
-	-m32 \
-	-fuse-ld=gold \
-	-nostdlib \
-	-Wl,--no-undefined \
-	-Wl,--exclude-libs=ALL \
-	-Wl,-O1 \
-	-Wl,--as-needed \
-	-Wl,--gc-sections \
-	-Wl,--warn-shared-textrel
-
-
-LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
-
-LOCAL_STATIC_LIBRARIES :=
-
-# Enable grouping to fix circular references
-LOCAL_GROUP_STATIC_LIBRARIES := true
-
 LOCAL_SHARED_LIBRARIES := \
 	libstlport \
 	libdl
 
+### Set directly by aosp_build_settings.
+LOCAL_FDO_SUPPORT := true
+
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
 gyp_all_modules: v8_tools_gyp_v8_base_gyp
diff --git a/tools/gyp/v8_base.target.darwin-x86_64.mk b/tools/gyp/v8_base.target.darwin-x86_64.mk
index 9eb56dc..172298f 100644
--- a/tools/gyp/v8_base.target.darwin-x86_64.mk
+++ b/tools/gyp/v8_base.target.darwin-x86_64.mk
@@ -5,7 +5,6 @@
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_MODULE := v8_tools_gyp_v8_base_gyp
 LOCAL_MODULE_SUFFIX := .a
-LOCAL_MODULE_TAGS := optional
 LOCAL_MODULE_TARGET_ARCH := $(TARGET_$(GYP_VAR_PREFIX)ARCH)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_VAR_PREFIX))
 gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
@@ -34,23 +33,65 @@
 	v8/src/arguments.cc \
 	v8/src/assembler.cc \
 	v8/src/assert-scope.cc \
+	v8/src/ast-value-factory.cc \
 	v8/src/ast.cc \
+	v8/src/background-parsing-task.cc \
+	v8/src/bailout-reason.cc \
 	v8/src/bignum-dtoa.cc \
 	v8/src/bignum.cc \
 	v8/src/bootstrapper.cc \
 	v8/src/builtins.cc \
 	v8/src/cached-powers.cc \
 	v8/src/checks.cc \
+	v8/src/code-factory.cc \
 	v8/src/code-stubs.cc \
 	v8/src/code-stubs-hydrogen.cc \
 	v8/src/codegen.cc \
 	v8/src/compilation-cache.cc \
+	v8/src/compiler/access-builder.cc \
+	v8/src/compiler/ast-graph-builder.cc \
+	v8/src/compiler/change-lowering.cc \
+	v8/src/compiler/code-generator.cc \
+	v8/src/compiler/common-operator.cc \
+	v8/src/compiler/control-builders.cc \
+	v8/src/compiler/gap-resolver.cc \
+	v8/src/compiler/graph-builder.cc \
+	v8/src/compiler/graph-reducer.cc \
+	v8/src/compiler/graph-replay.cc \
+	v8/src/compiler/graph-visualizer.cc \
+	v8/src/compiler/graph.cc \
+	v8/src/compiler/instruction-selector.cc \
+	v8/src/compiler/instruction.cc \
+	v8/src/compiler/js-builtin-reducer.cc \
+	v8/src/compiler/js-context-specialization.cc \
+	v8/src/compiler/js-generic-lowering.cc \
+	v8/src/compiler/js-graph.cc \
+	v8/src/compiler/js-inlining.cc \
+	v8/src/compiler/js-typed-lowering.cc \
+	v8/src/compiler/linkage.cc \
+	v8/src/compiler/machine-operator-reducer.cc \
+	v8/src/compiler/machine-operator.cc \
+	v8/src/compiler/machine-type.cc \
+	v8/src/compiler/node-cache.cc \
+	v8/src/compiler/node.cc \
+	v8/src/compiler/operator.cc \
+	v8/src/compiler/pipeline.cc \
+	v8/src/compiler/raw-machine-assembler.cc \
+	v8/src/compiler/register-allocator.cc \
+	v8/src/compiler/schedule.cc \
+	v8/src/compiler/scheduler.cc \
+	v8/src/compiler/simplified-lowering.cc \
+	v8/src/compiler/simplified-operator-reducer.cc \
+	v8/src/compiler/simplified-operator.cc \
+	v8/src/compiler/source-position.cc \
+	v8/src/compiler/typer.cc \
+	v8/src/compiler/value-numbering-reducer.cc \
+	v8/src/compiler/verifier.cc \
 	v8/src/compiler.cc \
 	v8/src/contexts.cc \
 	v8/src/conversions.cc \
 	v8/src/counters.cc \
 	v8/src/cpu-profiler.cc \
-	v8/src/cpu.cc \
 	v8/src/data-flow.cc \
 	v8/src/date.cc \
 	v8/src/dateparser.cc \
@@ -69,7 +110,6 @@
 	v8/src/extensions/trigger-failure-extension.cc \
 	v8/src/factory.cc \
 	v8/src/fast-dtoa.cc \
-	v8/src/field-index.cc \
 	v8/src/fixed-dtoa.cc \
 	v8/src/flags.cc \
 	v8/src/frames.cc \
@@ -80,7 +120,15 @@
 	v8/src/handles.cc \
 	v8/src/heap-profiler.cc \
 	v8/src/heap-snapshot-generator.cc \
-	v8/src/heap.cc \
+	v8/src/heap/gc-idle-time-handler.cc \
+	v8/src/heap/gc-tracer.cc \
+	v8/src/heap/heap.cc \
+	v8/src/heap/incremental-marking.cc \
+	v8/src/heap/mark-compact.cc \
+	v8/src/heap/objects-visiting.cc \
+	v8/src/heap/spaces.cc \
+	v8/src/heap/store-buffer.cc \
+	v8/src/heap/sweeper-thread.cc \
 	v8/src/hydrogen-bce.cc \
 	v8/src/hydrogen-bch.cc \
 	v8/src/hydrogen-canonicalize.cc \
@@ -108,15 +156,17 @@
 	v8/src/hydrogen-uint32-analysis.cc \
 	v8/src/i18n.cc \
 	v8/src/icu_util.cc \
-	v8/src/ic.cc \
-	v8/src/incremental-marking.cc \
+	v8/src/ic/access-compiler.cc \
+	v8/src/ic/call-optimization.cc \
+	v8/src/ic/handler-compiler.cc \
+	v8/src/ic/ic-state.cc \
+	v8/src/ic/ic.cc \
+	v8/src/ic/ic-compiler.cc \
 	v8/src/interface.cc \
+	v8/src/interface-descriptors.cc \
 	v8/src/interpreter-irregexp.cc \
 	v8/src/isolate.cc \
 	v8/src/jsregexp.cc \
-	v8/src/libplatform/default-platform.cc \
-	v8/src/libplatform/task-queue.cc \
-	v8/src/libplatform/worker-thread.cc \
 	v8/src/lithium-allocator.cc \
 	v8/src/lithium-codegen.cc \
 	v8/src/lithium.cc \
@@ -124,18 +174,14 @@
 	v8/src/log-utils.cc \
 	v8/src/log.cc \
 	v8/src/lookup.cc \
-	v8/src/mark-compact.cc \
 	v8/src/messages.cc \
 	v8/src/objects-debug.cc \
 	v8/src/objects-printer.cc \
-	v8/src/objects-visiting.cc \
 	v8/src/objects.cc \
 	v8/src/optimizing-compiler-thread.cc \
+	v8/src/ostreams.cc \
 	v8/src/parser.cc \
-	v8/src/platform/time.cc \
-	v8/src/platform/condition-variable.cc \
-	v8/src/platform/mutex.cc \
-	v8/src/platform/semaphore.cc \
+	v8/src/perf-jit.cc \
 	v8/src/preparse-data.cc \
 	v8/src/preparser.cc \
 	v8/src/prettyprinter.cc \
@@ -155,27 +201,25 @@
 	v8/src/scopeinfo.cc \
 	v8/src/scopes.cc \
 	v8/src/serialize.cc \
-	v8/src/snapshot-common.cc \
-	v8/src/spaces.cc \
-	v8/src/store-buffer.cc \
+	v8/src/snapshot-source-sink.cc \
 	v8/src/string-search.cc \
 	v8/src/string-stream.cc \
 	v8/src/strtod.cc \
-	v8/src/stub-cache.cc \
-	v8/src/sweeper-thread.cc \
+	v8/src/ic/stub-cache.cc \
 	v8/src/token.cc \
 	v8/src/transitions.cc \
+	v8/src/type-feedback-vector.cc \
 	v8/src/type-info.cc \
 	v8/src/types.cc \
 	v8/src/typing.cc \
 	v8/src/unicode.cc \
 	v8/src/utils.cc \
-	v8/src/utils/random-number-generator.cc \
 	v8/src/v8.cc \
 	v8/src/v8threads.cc \
 	v8/src/variables.cc \
 	v8/src/version.cc \
 	v8/src/zone.cc \
+	v8/third_party/fdlibm/fdlibm.cc \
 	v8/src/x64/assembler-x64.cc \
 	v8/src/x64/builtins-x64.cc \
 	v8/src/x64/code-stubs-x64.cc \
@@ -186,22 +230,26 @@
 	v8/src/x64/disasm-x64.cc \
 	v8/src/x64/frames-x64.cc \
 	v8/src/x64/full-codegen-x64.cc \
-	v8/src/x64/ic-x64.cc \
+	v8/src/x64/interface-descriptors-x64.cc \
 	v8/src/x64/lithium-codegen-x64.cc \
 	v8/src/x64/lithium-gap-resolver-x64.cc \
 	v8/src/x64/lithium-x64.cc \
 	v8/src/x64/macro-assembler-x64.cc \
 	v8/src/x64/regexp-macro-assembler-x64.cc \
-	v8/src/x64/stub-cache-x64.cc \
-	v8/src/platform-posix.cc \
-	v8/src/platform-linux.cc
+	v8/src/compiler/x64/code-generator-x64.cc \
+	v8/src/compiler/x64/instruction-selector-x64.cc \
+	v8/src/compiler/x64/linkage-x64.cc \
+	v8/src/ic/x64/access-compiler-x64.cc \
+	v8/src/ic/x64/handler-compiler-x64.cc \
+	v8/src/ic/x64/ic-x64.cc \
+	v8/src/ic/x64/ic-compiler-x64.cc \
+	v8/src/ic/x64/stub-cache-x64.cc
 
 
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Debug := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -228,8 +276,10 @@
 	-Wno-format-security \
 	-Wno-return-type \
 	-Wno-sequence-point \
+	-m64 \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-fomit-frame-pointer \
@@ -237,7 +287,6 @@
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -254,16 +303,18 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_X64' \
 	'-DV8_I18N_SUPPORT' \
-	'-DCAN_USE_VFP_INSTRUCTIONS' \
 	'-DICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC' \
 	'-DU_USING_ICU_NAMESPACE=0' \
+	'-DU_ENABLE_DYLOAD=0' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
 	'-DANDROID' \
@@ -297,22 +348,23 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
 	-Wno-deprecated \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
 	-Wno-non-virtual-dtor \
 	-Wno-sign-promo \
 	-Wno-non-virtual-dtor
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -339,6 +391,7 @@
 	-Wno-format-security \
 	-Wno-return-type \
 	-Wno-sequence-point \
+	-m64 \
 	-fno-ident \
 	-fdata-sections \
 	-ffunction-sections \
@@ -350,7 +403,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -367,16 +419,18 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_X64' \
 	'-DV8_I18N_SUPPORT' \
-	'-DCAN_USE_VFP_INSTRUCTIONS' \
 	'-DICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC' \
 	'-DU_USING_ICU_NAMESPACE=0' \
+	'-DU_ENABLE_DYLOAD=0' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
 	'-DANDROID' \
@@ -404,68 +458,32 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
 	-Wno-deprecated \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
 	-Wno-non-virtual-dtor \
 	-Wno-sign-promo \
 	-Wno-non-virtual-dtor
 
 
-LOCAL_FDO_SUPPORT_Release := true
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
 LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
 LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
 ### Rules for final target.
 
-LOCAL_LDFLAGS_Debug := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-Wl,--fatal-warnings \
-	-Wl,-z,noexecstack \
-	-fPIC \
-	-m64 \
-	-fuse-ld=gold \
-	-nostdlib \
-	-Wl,--no-undefined \
-	-Wl,--exclude-libs=ALL \
-	-Wl,--warn-shared-textrel \
-	-Wl,-O1 \
-	-Wl,--as-needed
-
-
-LOCAL_LDFLAGS_Release := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-Wl,--fatal-warnings \
-	-Wl,-z,noexecstack \
-	-fPIC \
-	-m64 \
-	-fuse-ld=gold \
-	-nostdlib \
-	-Wl,--no-undefined \
-	-Wl,--exclude-libs=ALL \
-	-Wl,-O1 \
-	-Wl,--as-needed \
-	-Wl,--gc-sections \
-	-Wl,--warn-shared-textrel
-
-
-LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
-
-LOCAL_STATIC_LIBRARIES :=
-
-# Enable grouping to fix circular references
-LOCAL_GROUP_STATIC_LIBRARIES := true
-
 LOCAL_SHARED_LIBRARIES := \
 	libstlport \
 	libdl
 
+### Set directly by aosp_build_settings.
+LOCAL_FDO_SUPPORT := true
+
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
 gyp_all_modules: v8_tools_gyp_v8_base_gyp
diff --git a/tools/gyp/v8_base.target.linux-arm.mk b/tools/gyp/v8_base.target.linux-arm.mk
index 1f8587b..ed43608 100644
--- a/tools/gyp/v8_base.target.linux-arm.mk
+++ b/tools/gyp/v8_base.target.linux-arm.mk
@@ -5,7 +5,6 @@
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_MODULE := v8_tools_gyp_v8_base_gyp
 LOCAL_MODULE_SUFFIX := .a
-LOCAL_MODULE_TAGS := optional
 LOCAL_MODULE_TARGET_ARCH := $(TARGET_$(GYP_VAR_PREFIX)ARCH)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_VAR_PREFIX))
 gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
@@ -34,23 +33,65 @@
 	v8/src/arguments.cc \
 	v8/src/assembler.cc \
 	v8/src/assert-scope.cc \
+	v8/src/ast-value-factory.cc \
 	v8/src/ast.cc \
+	v8/src/background-parsing-task.cc \
+	v8/src/bailout-reason.cc \
 	v8/src/bignum-dtoa.cc \
 	v8/src/bignum.cc \
 	v8/src/bootstrapper.cc \
 	v8/src/builtins.cc \
 	v8/src/cached-powers.cc \
 	v8/src/checks.cc \
+	v8/src/code-factory.cc \
 	v8/src/code-stubs.cc \
 	v8/src/code-stubs-hydrogen.cc \
 	v8/src/codegen.cc \
 	v8/src/compilation-cache.cc \
+	v8/src/compiler/access-builder.cc \
+	v8/src/compiler/ast-graph-builder.cc \
+	v8/src/compiler/change-lowering.cc \
+	v8/src/compiler/code-generator.cc \
+	v8/src/compiler/common-operator.cc \
+	v8/src/compiler/control-builders.cc \
+	v8/src/compiler/gap-resolver.cc \
+	v8/src/compiler/graph-builder.cc \
+	v8/src/compiler/graph-reducer.cc \
+	v8/src/compiler/graph-replay.cc \
+	v8/src/compiler/graph-visualizer.cc \
+	v8/src/compiler/graph.cc \
+	v8/src/compiler/instruction-selector.cc \
+	v8/src/compiler/instruction.cc \
+	v8/src/compiler/js-builtin-reducer.cc \
+	v8/src/compiler/js-context-specialization.cc \
+	v8/src/compiler/js-generic-lowering.cc \
+	v8/src/compiler/js-graph.cc \
+	v8/src/compiler/js-inlining.cc \
+	v8/src/compiler/js-typed-lowering.cc \
+	v8/src/compiler/linkage.cc \
+	v8/src/compiler/machine-operator-reducer.cc \
+	v8/src/compiler/machine-operator.cc \
+	v8/src/compiler/machine-type.cc \
+	v8/src/compiler/node-cache.cc \
+	v8/src/compiler/node.cc \
+	v8/src/compiler/operator.cc \
+	v8/src/compiler/pipeline.cc \
+	v8/src/compiler/raw-machine-assembler.cc \
+	v8/src/compiler/register-allocator.cc \
+	v8/src/compiler/schedule.cc \
+	v8/src/compiler/scheduler.cc \
+	v8/src/compiler/simplified-lowering.cc \
+	v8/src/compiler/simplified-operator-reducer.cc \
+	v8/src/compiler/simplified-operator.cc \
+	v8/src/compiler/source-position.cc \
+	v8/src/compiler/typer.cc \
+	v8/src/compiler/value-numbering-reducer.cc \
+	v8/src/compiler/verifier.cc \
 	v8/src/compiler.cc \
 	v8/src/contexts.cc \
 	v8/src/conversions.cc \
 	v8/src/counters.cc \
 	v8/src/cpu-profiler.cc \
-	v8/src/cpu.cc \
 	v8/src/data-flow.cc \
 	v8/src/date.cc \
 	v8/src/dateparser.cc \
@@ -69,7 +110,6 @@
 	v8/src/extensions/trigger-failure-extension.cc \
 	v8/src/factory.cc \
 	v8/src/fast-dtoa.cc \
-	v8/src/field-index.cc \
 	v8/src/fixed-dtoa.cc \
 	v8/src/flags.cc \
 	v8/src/frames.cc \
@@ -80,7 +120,15 @@
 	v8/src/handles.cc \
 	v8/src/heap-profiler.cc \
 	v8/src/heap-snapshot-generator.cc \
-	v8/src/heap.cc \
+	v8/src/heap/gc-idle-time-handler.cc \
+	v8/src/heap/gc-tracer.cc \
+	v8/src/heap/heap.cc \
+	v8/src/heap/incremental-marking.cc \
+	v8/src/heap/mark-compact.cc \
+	v8/src/heap/objects-visiting.cc \
+	v8/src/heap/spaces.cc \
+	v8/src/heap/store-buffer.cc \
+	v8/src/heap/sweeper-thread.cc \
 	v8/src/hydrogen-bce.cc \
 	v8/src/hydrogen-bch.cc \
 	v8/src/hydrogen-canonicalize.cc \
@@ -108,15 +156,17 @@
 	v8/src/hydrogen-uint32-analysis.cc \
 	v8/src/i18n.cc \
 	v8/src/icu_util.cc \
-	v8/src/ic.cc \
-	v8/src/incremental-marking.cc \
+	v8/src/ic/access-compiler.cc \
+	v8/src/ic/call-optimization.cc \
+	v8/src/ic/handler-compiler.cc \
+	v8/src/ic/ic-state.cc \
+	v8/src/ic/ic.cc \
+	v8/src/ic/ic-compiler.cc \
 	v8/src/interface.cc \
+	v8/src/interface-descriptors.cc \
 	v8/src/interpreter-irregexp.cc \
 	v8/src/isolate.cc \
 	v8/src/jsregexp.cc \
-	v8/src/libplatform/default-platform.cc \
-	v8/src/libplatform/task-queue.cc \
-	v8/src/libplatform/worker-thread.cc \
 	v8/src/lithium-allocator.cc \
 	v8/src/lithium-codegen.cc \
 	v8/src/lithium.cc \
@@ -124,18 +174,14 @@
 	v8/src/log-utils.cc \
 	v8/src/log.cc \
 	v8/src/lookup.cc \
-	v8/src/mark-compact.cc \
 	v8/src/messages.cc \
 	v8/src/objects-debug.cc \
 	v8/src/objects-printer.cc \
-	v8/src/objects-visiting.cc \
 	v8/src/objects.cc \
 	v8/src/optimizing-compiler-thread.cc \
+	v8/src/ostreams.cc \
 	v8/src/parser.cc \
-	v8/src/platform/time.cc \
-	v8/src/platform/condition-variable.cc \
-	v8/src/platform/mutex.cc \
-	v8/src/platform/semaphore.cc \
+	v8/src/perf-jit.cc \
 	v8/src/preparse-data.cc \
 	v8/src/preparser.cc \
 	v8/src/prettyprinter.cc \
@@ -155,27 +201,25 @@
 	v8/src/scopeinfo.cc \
 	v8/src/scopes.cc \
 	v8/src/serialize.cc \
-	v8/src/snapshot-common.cc \
-	v8/src/spaces.cc \
-	v8/src/store-buffer.cc \
+	v8/src/snapshot-source-sink.cc \
 	v8/src/string-search.cc \
 	v8/src/string-stream.cc \
 	v8/src/strtod.cc \
-	v8/src/stub-cache.cc \
-	v8/src/sweeper-thread.cc \
+	v8/src/ic/stub-cache.cc \
 	v8/src/token.cc \
 	v8/src/transitions.cc \
+	v8/src/type-feedback-vector.cc \
 	v8/src/type-info.cc \
 	v8/src/types.cc \
 	v8/src/typing.cc \
 	v8/src/unicode.cc \
 	v8/src/utils.cc \
-	v8/src/utils/random-number-generator.cc \
 	v8/src/v8.cc \
 	v8/src/v8threads.cc \
 	v8/src/variables.cc \
 	v8/src/version.cc \
 	v8/src/zone.cc \
+	v8/third_party/fdlibm/fdlibm.cc \
 	v8/src/arm/assembler-arm.cc \
 	v8/src/arm/builtins-arm.cc \
 	v8/src/arm/code-stubs-arm.cc \
@@ -187,23 +231,27 @@
 	v8/src/arm/disasm-arm.cc \
 	v8/src/arm/frames-arm.cc \
 	v8/src/arm/full-codegen-arm.cc \
-	v8/src/arm/ic-arm.cc \
+	v8/src/arm/interface-descriptors-arm.cc \
 	v8/src/arm/lithium-arm.cc \
 	v8/src/arm/lithium-codegen-arm.cc \
 	v8/src/arm/lithium-gap-resolver-arm.cc \
 	v8/src/arm/macro-assembler-arm.cc \
 	v8/src/arm/regexp-macro-assembler-arm.cc \
 	v8/src/arm/simulator-arm.cc \
-	v8/src/arm/stub-cache-arm.cc \
-	v8/src/platform-posix.cc \
-	v8/src/platform-linux.cc
+	v8/src/compiler/arm/code-generator-arm.cc \
+	v8/src/compiler/arm/instruction-selector-arm.cc \
+	v8/src/compiler/arm/linkage-arm.cc \
+	v8/src/ic/arm/access-compiler-arm.cc \
+	v8/src/ic/arm/handler-compiler-arm.cc \
+	v8/src/ic/arm/ic-arm.cc \
+	v8/src/ic/arm/ic-compiler-arm.cc \
+	v8/src/ic/arm/stub-cache-arm.cc
 
 
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Debug := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -213,13 +261,13 @@
 	-Wno-unused-local-typedefs \
 	-Wno-format \
 	-fno-tree-sra \
+	-fno-caller-saves \
+	-Wno-psabi \
 	-fno-partial-inlining \
 	-fno-early-inlining \
 	-fno-tree-copy-prop \
 	-fno-tree-loop-optimize \
 	-fno-move-loop-invariants \
-	-fno-caller-saves \
-	-Wno-psabi \
 	-ffunction-sections \
 	-funwind-tables \
 	-g \
@@ -238,6 +286,7 @@
 	-Wno-sequence-point \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-fomit-frame-pointer \
@@ -245,7 +294,6 @@
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -262,16 +310,19 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_ARM' \
+	'-DCAN_USE_ARMV7_INSTRUCTIONS' \
 	'-DV8_I18N_SUPPORT' \
-	'-DCAN_USE_VFP_INSTRUCTIONS' \
 	'-DICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC' \
 	'-DU_USING_ICU_NAMESPACE=0' \
+	'-DU_ENABLE_DYLOAD=0' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
 	'-DANDROID' \
@@ -279,8 +330,6 @@
 	'-DUSE_STLPORT=1' \
 	'-D_STLP_USE_PTR_SPECIALIZATIONS=1' \
 	'-DCHROME_BUILD_ID=""' \
-	'-DARM_TEST' \
-	'-DCAN_USE_ARMV7_INSTRUCTIONS=1' \
 	'-DDYNAMIC_ANNOTATIONS_ENABLED=1' \
 	'-DWTF_USE_DYNAMIC_ANNOTATIONS=1' \
 	'-D_DEBUG' \
@@ -307,23 +356,24 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
 	-Wno-deprecated \
 	-Wno-abi \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
 	-Wno-non-virtual-dtor \
 	-Wno-sign-promo \
 	-Wno-non-virtual-dtor
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -333,13 +383,13 @@
 	-Wno-unused-local-typedefs \
 	-Wno-format \
 	-fno-tree-sra \
+	-fno-caller-saves \
+	-Wno-psabi \
 	-fno-partial-inlining \
 	-fno-early-inlining \
 	-fno-tree-copy-prop \
 	-fno-tree-loop-optimize \
 	-fno-move-loop-invariants \
-	-fno-caller-saves \
-	-Wno-psabi \
 	-ffunction-sections \
 	-funwind-tables \
 	-g \
@@ -367,7 +417,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -384,16 +433,19 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_ARM' \
+	'-DCAN_USE_ARMV7_INSTRUCTIONS' \
 	'-DV8_I18N_SUPPORT' \
-	'-DCAN_USE_VFP_INSTRUCTIONS' \
 	'-DICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC' \
 	'-DU_USING_ICU_NAMESPACE=0' \
+	'-DU_ENABLE_DYLOAD=0' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
 	'-DANDROID' \
@@ -401,8 +453,6 @@
 	'-DUSE_STLPORT=1' \
 	'-D_STLP_USE_PTR_SPECIALIZATIONS=1' \
 	'-DCHROME_BUILD_ID=""' \
-	'-DARM_TEST' \
-	'-DCAN_USE_ARMV7_INSTRUCTIONS=1' \
 	'-DNDEBUG' \
 	'-DNVALGRIND' \
 	'-DDYNAMIC_ANNOTATIONS_ENABLED=0'
@@ -423,73 +473,33 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
 	-Wno-deprecated \
 	-Wno-abi \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
 	-Wno-non-virtual-dtor \
 	-Wno-sign-promo \
 	-Wno-non-virtual-dtor
 
 
-LOCAL_FDO_SUPPORT_Release := true
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
 LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
 LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
 ### Rules for final target.
 
-LOCAL_LDFLAGS_Debug := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-Wl,--fatal-warnings \
-	-Wl,-z,noexecstack \
-	-fPIC \
-	-Wl,-z,relro \
-	-Wl,-z,now \
-	-fuse-ld=gold \
-	-nostdlib \
-	-Wl,--no-undefined \
-	-Wl,--exclude-libs=ALL \
-	-Wl,--icf=safe \
-	-Wl,--warn-shared-textrel \
-	-Wl,-O1 \
-	-Wl,--as-needed
-
-
-LOCAL_LDFLAGS_Release := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-Wl,--fatal-warnings \
-	-Wl,-z,noexecstack \
-	-fPIC \
-	-Wl,-z,relro \
-	-Wl,-z,now \
-	-fuse-ld=gold \
-	-nostdlib \
-	-Wl,--no-undefined \
-	-Wl,--exclude-libs=ALL \
-	-Wl,--icf=safe \
-	-Wl,-O1 \
-	-Wl,--as-needed \
-	-Wl,--gc-sections \
-	-Wl,--warn-shared-textrel
-
-
-LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
-
-LOCAL_STATIC_LIBRARIES :=
-
-# Enable grouping to fix circular references
-LOCAL_GROUP_STATIC_LIBRARIES := true
-
 LOCAL_SHARED_LIBRARIES := \
 	libstlport \
 	libdl
 
+### Set directly by aosp_build_settings.
+LOCAL_FDO_SUPPORT := true
+
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
 gyp_all_modules: v8_tools_gyp_v8_base_gyp
diff --git a/tools/gyp/v8_base.target.linux-arm64.mk b/tools/gyp/v8_base.target.linux-arm64.mk
index 21135fe..03bdb6f 100644
--- a/tools/gyp/v8_base.target.linux-arm64.mk
+++ b/tools/gyp/v8_base.target.linux-arm64.mk
@@ -5,7 +5,6 @@
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_MODULE := v8_tools_gyp_v8_base_gyp
 LOCAL_MODULE_SUFFIX := .a
-LOCAL_MODULE_TAGS := optional
 LOCAL_MODULE_TARGET_ARCH := $(TARGET_$(GYP_VAR_PREFIX)ARCH)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_VAR_PREFIX))
 gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
@@ -34,23 +33,65 @@
 	v8/src/arguments.cc \
 	v8/src/assembler.cc \
 	v8/src/assert-scope.cc \
+	v8/src/ast-value-factory.cc \
 	v8/src/ast.cc \
+	v8/src/background-parsing-task.cc \
+	v8/src/bailout-reason.cc \
 	v8/src/bignum-dtoa.cc \
 	v8/src/bignum.cc \
 	v8/src/bootstrapper.cc \
 	v8/src/builtins.cc \
 	v8/src/cached-powers.cc \
 	v8/src/checks.cc \
+	v8/src/code-factory.cc \
 	v8/src/code-stubs.cc \
 	v8/src/code-stubs-hydrogen.cc \
 	v8/src/codegen.cc \
 	v8/src/compilation-cache.cc \
+	v8/src/compiler/access-builder.cc \
+	v8/src/compiler/ast-graph-builder.cc \
+	v8/src/compiler/change-lowering.cc \
+	v8/src/compiler/code-generator.cc \
+	v8/src/compiler/common-operator.cc \
+	v8/src/compiler/control-builders.cc \
+	v8/src/compiler/gap-resolver.cc \
+	v8/src/compiler/graph-builder.cc \
+	v8/src/compiler/graph-reducer.cc \
+	v8/src/compiler/graph-replay.cc \
+	v8/src/compiler/graph-visualizer.cc \
+	v8/src/compiler/graph.cc \
+	v8/src/compiler/instruction-selector.cc \
+	v8/src/compiler/instruction.cc \
+	v8/src/compiler/js-builtin-reducer.cc \
+	v8/src/compiler/js-context-specialization.cc \
+	v8/src/compiler/js-generic-lowering.cc \
+	v8/src/compiler/js-graph.cc \
+	v8/src/compiler/js-inlining.cc \
+	v8/src/compiler/js-typed-lowering.cc \
+	v8/src/compiler/linkage.cc \
+	v8/src/compiler/machine-operator-reducer.cc \
+	v8/src/compiler/machine-operator.cc \
+	v8/src/compiler/machine-type.cc \
+	v8/src/compiler/node-cache.cc \
+	v8/src/compiler/node.cc \
+	v8/src/compiler/operator.cc \
+	v8/src/compiler/pipeline.cc \
+	v8/src/compiler/raw-machine-assembler.cc \
+	v8/src/compiler/register-allocator.cc \
+	v8/src/compiler/schedule.cc \
+	v8/src/compiler/scheduler.cc \
+	v8/src/compiler/simplified-lowering.cc \
+	v8/src/compiler/simplified-operator-reducer.cc \
+	v8/src/compiler/simplified-operator.cc \
+	v8/src/compiler/source-position.cc \
+	v8/src/compiler/typer.cc \
+	v8/src/compiler/value-numbering-reducer.cc \
+	v8/src/compiler/verifier.cc \
 	v8/src/compiler.cc \
 	v8/src/contexts.cc \
 	v8/src/conversions.cc \
 	v8/src/counters.cc \
 	v8/src/cpu-profiler.cc \
-	v8/src/cpu.cc \
 	v8/src/data-flow.cc \
 	v8/src/date.cc \
 	v8/src/dateparser.cc \
@@ -69,7 +110,6 @@
 	v8/src/extensions/trigger-failure-extension.cc \
 	v8/src/factory.cc \
 	v8/src/fast-dtoa.cc \
-	v8/src/field-index.cc \
 	v8/src/fixed-dtoa.cc \
 	v8/src/flags.cc \
 	v8/src/frames.cc \
@@ -80,7 +120,15 @@
 	v8/src/handles.cc \
 	v8/src/heap-profiler.cc \
 	v8/src/heap-snapshot-generator.cc \
-	v8/src/heap.cc \
+	v8/src/heap/gc-idle-time-handler.cc \
+	v8/src/heap/gc-tracer.cc \
+	v8/src/heap/heap.cc \
+	v8/src/heap/incremental-marking.cc \
+	v8/src/heap/mark-compact.cc \
+	v8/src/heap/objects-visiting.cc \
+	v8/src/heap/spaces.cc \
+	v8/src/heap/store-buffer.cc \
+	v8/src/heap/sweeper-thread.cc \
 	v8/src/hydrogen-bce.cc \
 	v8/src/hydrogen-bch.cc \
 	v8/src/hydrogen-canonicalize.cc \
@@ -108,15 +156,17 @@
 	v8/src/hydrogen-uint32-analysis.cc \
 	v8/src/i18n.cc \
 	v8/src/icu_util.cc \
-	v8/src/ic.cc \
-	v8/src/incremental-marking.cc \
+	v8/src/ic/access-compiler.cc \
+	v8/src/ic/call-optimization.cc \
+	v8/src/ic/handler-compiler.cc \
+	v8/src/ic/ic-state.cc \
+	v8/src/ic/ic.cc \
+	v8/src/ic/ic-compiler.cc \
 	v8/src/interface.cc \
+	v8/src/interface-descriptors.cc \
 	v8/src/interpreter-irregexp.cc \
 	v8/src/isolate.cc \
 	v8/src/jsregexp.cc \
-	v8/src/libplatform/default-platform.cc \
-	v8/src/libplatform/task-queue.cc \
-	v8/src/libplatform/worker-thread.cc \
 	v8/src/lithium-allocator.cc \
 	v8/src/lithium-codegen.cc \
 	v8/src/lithium.cc \
@@ -124,18 +174,14 @@
 	v8/src/log-utils.cc \
 	v8/src/log.cc \
 	v8/src/lookup.cc \
-	v8/src/mark-compact.cc \
 	v8/src/messages.cc \
 	v8/src/objects-debug.cc \
 	v8/src/objects-printer.cc \
-	v8/src/objects-visiting.cc \
 	v8/src/objects.cc \
 	v8/src/optimizing-compiler-thread.cc \
+	v8/src/ostreams.cc \
 	v8/src/parser.cc \
-	v8/src/platform/time.cc \
-	v8/src/platform/condition-variable.cc \
-	v8/src/platform/mutex.cc \
-	v8/src/platform/semaphore.cc \
+	v8/src/perf-jit.cc \
 	v8/src/preparse-data.cc \
 	v8/src/preparser.cc \
 	v8/src/prettyprinter.cc \
@@ -155,27 +201,25 @@
 	v8/src/scopeinfo.cc \
 	v8/src/scopes.cc \
 	v8/src/serialize.cc \
-	v8/src/snapshot-common.cc \
-	v8/src/spaces.cc \
-	v8/src/store-buffer.cc \
+	v8/src/snapshot-source-sink.cc \
 	v8/src/string-search.cc \
 	v8/src/string-stream.cc \
 	v8/src/strtod.cc \
-	v8/src/stub-cache.cc \
-	v8/src/sweeper-thread.cc \
+	v8/src/ic/stub-cache.cc \
 	v8/src/token.cc \
 	v8/src/transitions.cc \
+	v8/src/type-feedback-vector.cc \
 	v8/src/type-info.cc \
 	v8/src/types.cc \
 	v8/src/typing.cc \
 	v8/src/unicode.cc \
 	v8/src/utils.cc \
-	v8/src/utils/random-number-generator.cc \
 	v8/src/v8.cc \
 	v8/src/v8threads.cc \
 	v8/src/variables.cc \
 	v8/src/version.cc \
 	v8/src/zone.cc \
+	v8/third_party/fdlibm/fdlibm.cc \
 	v8/src/arm64/assembler-arm64.cc \
 	v8/src/arm64/builtins-arm64.cc \
 	v8/src/arm64/codegen-arm64.cc \
@@ -183,29 +227,34 @@
 	v8/src/arm64/cpu-arm64.cc \
 	v8/src/arm64/debug-arm64.cc \
 	v8/src/arm64/decoder-arm64.cc \
+	v8/src/arm64/delayed-masm-arm64.cc \
 	v8/src/arm64/deoptimizer-arm64.cc \
 	v8/src/arm64/disasm-arm64.cc \
 	v8/src/arm64/frames-arm64.cc \
 	v8/src/arm64/full-codegen-arm64.cc \
-	v8/src/arm64/ic-arm64.cc \
 	v8/src/arm64/instructions-arm64.cc \
 	v8/src/arm64/instrument-arm64.cc \
+	v8/src/arm64/interface-descriptors-arm64.cc \
 	v8/src/arm64/lithium-arm64.cc \
 	v8/src/arm64/lithium-codegen-arm64.cc \
 	v8/src/arm64/lithium-gap-resolver-arm64.cc \
 	v8/src/arm64/macro-assembler-arm64.cc \
 	v8/src/arm64/regexp-macro-assembler-arm64.cc \
 	v8/src/arm64/simulator-arm64.cc \
-	v8/src/arm64/stub-cache-arm64.cc \
 	v8/src/arm64/utils-arm64.cc \
-	v8/src/platform-posix.cc \
-	v8/src/platform-linux.cc
+	v8/src/compiler/arm64/code-generator-arm64.cc \
+	v8/src/compiler/arm64/instruction-selector-arm64.cc \
+	v8/src/compiler/arm64/linkage-arm64.cc \
+	v8/src/ic/arm64/access-compiler-arm64.cc \
+	v8/src/ic/arm64/handler-compiler-arm64.cc \
+	v8/src/ic/arm64/ic-arm64.cc \
+	v8/src/ic/arm64/ic-compiler-arm64.cc \
+	v8/src/ic/arm64/stub-cache-arm64.cc
 
 
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Debug := \
 	--param=ssp-buffer-size=4 \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -231,13 +280,13 @@
 	-Wno-sequence-point \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-funwind-tables
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -254,16 +303,18 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_ARM64' \
 	'-DV8_I18N_SUPPORT' \
-	'-DCAN_USE_VFP_INSTRUCTIONS' \
 	'-DICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC' \
 	'-DU_USING_ICU_NAMESPACE=0' \
+	'-DU_ENABLE_DYLOAD=0' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
 	'-DANDROID' \
@@ -297,21 +348,22 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
 	-Wno-deprecated \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
 	-Wno-non-virtual-dtor \
 	-Wno-sign-promo \
 	-Wno-non-virtual-dtor
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	--param=ssp-buffer-size=4 \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -345,7 +397,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -362,16 +413,18 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_ARM64' \
 	'-DV8_I18N_SUPPORT' \
-	'-DCAN_USE_VFP_INSTRUCTIONS' \
 	'-DICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC' \
 	'-DU_USING_ICU_NAMESPACE=0' \
+	'-DU_ENABLE_DYLOAD=0' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
 	'-DANDROID' \
@@ -399,64 +452,32 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
 	-Wno-deprecated \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
 	-Wno-non-virtual-dtor \
 	-Wno-sign-promo \
 	-Wno-non-virtual-dtor
 
 
-LOCAL_FDO_SUPPORT_Release := true
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
 LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
 LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
 ### Rules for final target.
 
-LOCAL_LDFLAGS_Debug := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-Wl,--fatal-warnings \
-	-Wl,-z,noexecstack \
-	-fPIC \
-	-nostdlib \
-	-Wl,--no-undefined \
-	-Wl,--exclude-libs=ALL \
-	-Wl,--warn-shared-textrel \
-	-Wl,-O1 \
-	-Wl,--as-needed
-
-
-LOCAL_LDFLAGS_Release := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-Wl,--fatal-warnings \
-	-Wl,-z,noexecstack \
-	-fPIC \
-	-nostdlib \
-	-Wl,--no-undefined \
-	-Wl,--exclude-libs=ALL \
-	-Wl,-O1 \
-	-Wl,--as-needed \
-	-Wl,--gc-sections \
-	-Wl,--warn-shared-textrel
-
-
-LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
-
-LOCAL_STATIC_LIBRARIES :=
-
-# Enable grouping to fix circular references
-LOCAL_GROUP_STATIC_LIBRARIES := true
-
 LOCAL_SHARED_LIBRARIES := \
 	libstlport \
 	libdl
 
+### Set directly by aosp_build_settings.
+LOCAL_FDO_SUPPORT := true
+
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
 gyp_all_modules: v8_tools_gyp_v8_base_gyp
diff --git a/tools/gyp/v8_base.target.linux-mips.mk b/tools/gyp/v8_base.target.linux-mips.mk
index 2bc88cc..7065708 100644
--- a/tools/gyp/v8_base.target.linux-mips.mk
+++ b/tools/gyp/v8_base.target.linux-mips.mk
@@ -5,7 +5,6 @@
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_MODULE := v8_tools_gyp_v8_base_gyp
 LOCAL_MODULE_SUFFIX := .a
-LOCAL_MODULE_TAGS := optional
 LOCAL_MODULE_TARGET_ARCH := $(TARGET_$(GYP_VAR_PREFIX)ARCH)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_VAR_PREFIX))
 gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
@@ -34,23 +33,65 @@
 	v8/src/arguments.cc \
 	v8/src/assembler.cc \
 	v8/src/assert-scope.cc \
+	v8/src/ast-value-factory.cc \
 	v8/src/ast.cc \
+	v8/src/background-parsing-task.cc \
+	v8/src/bailout-reason.cc \
 	v8/src/bignum-dtoa.cc \
 	v8/src/bignum.cc \
 	v8/src/bootstrapper.cc \
 	v8/src/builtins.cc \
 	v8/src/cached-powers.cc \
 	v8/src/checks.cc \
+	v8/src/code-factory.cc \
 	v8/src/code-stubs.cc \
 	v8/src/code-stubs-hydrogen.cc \
 	v8/src/codegen.cc \
 	v8/src/compilation-cache.cc \
+	v8/src/compiler/access-builder.cc \
+	v8/src/compiler/ast-graph-builder.cc \
+	v8/src/compiler/change-lowering.cc \
+	v8/src/compiler/code-generator.cc \
+	v8/src/compiler/common-operator.cc \
+	v8/src/compiler/control-builders.cc \
+	v8/src/compiler/gap-resolver.cc \
+	v8/src/compiler/graph-builder.cc \
+	v8/src/compiler/graph-reducer.cc \
+	v8/src/compiler/graph-replay.cc \
+	v8/src/compiler/graph-visualizer.cc \
+	v8/src/compiler/graph.cc \
+	v8/src/compiler/instruction-selector.cc \
+	v8/src/compiler/instruction.cc \
+	v8/src/compiler/js-builtin-reducer.cc \
+	v8/src/compiler/js-context-specialization.cc \
+	v8/src/compiler/js-generic-lowering.cc \
+	v8/src/compiler/js-graph.cc \
+	v8/src/compiler/js-inlining.cc \
+	v8/src/compiler/js-typed-lowering.cc \
+	v8/src/compiler/linkage.cc \
+	v8/src/compiler/machine-operator-reducer.cc \
+	v8/src/compiler/machine-operator.cc \
+	v8/src/compiler/machine-type.cc \
+	v8/src/compiler/node-cache.cc \
+	v8/src/compiler/node.cc \
+	v8/src/compiler/operator.cc \
+	v8/src/compiler/pipeline.cc \
+	v8/src/compiler/raw-machine-assembler.cc \
+	v8/src/compiler/register-allocator.cc \
+	v8/src/compiler/schedule.cc \
+	v8/src/compiler/scheduler.cc \
+	v8/src/compiler/simplified-lowering.cc \
+	v8/src/compiler/simplified-operator-reducer.cc \
+	v8/src/compiler/simplified-operator.cc \
+	v8/src/compiler/source-position.cc \
+	v8/src/compiler/typer.cc \
+	v8/src/compiler/value-numbering-reducer.cc \
+	v8/src/compiler/verifier.cc \
 	v8/src/compiler.cc \
 	v8/src/contexts.cc \
 	v8/src/conversions.cc \
 	v8/src/counters.cc \
 	v8/src/cpu-profiler.cc \
-	v8/src/cpu.cc \
 	v8/src/data-flow.cc \
 	v8/src/date.cc \
 	v8/src/dateparser.cc \
@@ -69,7 +110,6 @@
 	v8/src/extensions/trigger-failure-extension.cc \
 	v8/src/factory.cc \
 	v8/src/fast-dtoa.cc \
-	v8/src/field-index.cc \
 	v8/src/fixed-dtoa.cc \
 	v8/src/flags.cc \
 	v8/src/frames.cc \
@@ -80,7 +120,15 @@
 	v8/src/handles.cc \
 	v8/src/heap-profiler.cc \
 	v8/src/heap-snapshot-generator.cc \
-	v8/src/heap.cc \
+	v8/src/heap/gc-idle-time-handler.cc \
+	v8/src/heap/gc-tracer.cc \
+	v8/src/heap/heap.cc \
+	v8/src/heap/incremental-marking.cc \
+	v8/src/heap/mark-compact.cc \
+	v8/src/heap/objects-visiting.cc \
+	v8/src/heap/spaces.cc \
+	v8/src/heap/store-buffer.cc \
+	v8/src/heap/sweeper-thread.cc \
 	v8/src/hydrogen-bce.cc \
 	v8/src/hydrogen-bch.cc \
 	v8/src/hydrogen-canonicalize.cc \
@@ -108,15 +156,17 @@
 	v8/src/hydrogen-uint32-analysis.cc \
 	v8/src/i18n.cc \
 	v8/src/icu_util.cc \
-	v8/src/ic.cc \
-	v8/src/incremental-marking.cc \
+	v8/src/ic/access-compiler.cc \
+	v8/src/ic/call-optimization.cc \
+	v8/src/ic/handler-compiler.cc \
+	v8/src/ic/ic-state.cc \
+	v8/src/ic/ic.cc \
+	v8/src/ic/ic-compiler.cc \
 	v8/src/interface.cc \
+	v8/src/interface-descriptors.cc \
 	v8/src/interpreter-irregexp.cc \
 	v8/src/isolate.cc \
 	v8/src/jsregexp.cc \
-	v8/src/libplatform/default-platform.cc \
-	v8/src/libplatform/task-queue.cc \
-	v8/src/libplatform/worker-thread.cc \
 	v8/src/lithium-allocator.cc \
 	v8/src/lithium-codegen.cc \
 	v8/src/lithium.cc \
@@ -124,18 +174,14 @@
 	v8/src/log-utils.cc \
 	v8/src/log.cc \
 	v8/src/lookup.cc \
-	v8/src/mark-compact.cc \
 	v8/src/messages.cc \
 	v8/src/objects-debug.cc \
 	v8/src/objects-printer.cc \
-	v8/src/objects-visiting.cc \
 	v8/src/objects.cc \
 	v8/src/optimizing-compiler-thread.cc \
+	v8/src/ostreams.cc \
 	v8/src/parser.cc \
-	v8/src/platform/time.cc \
-	v8/src/platform/condition-variable.cc \
-	v8/src/platform/mutex.cc \
-	v8/src/platform/semaphore.cc \
+	v8/src/perf-jit.cc \
 	v8/src/preparse-data.cc \
 	v8/src/preparser.cc \
 	v8/src/prettyprinter.cc \
@@ -155,27 +201,25 @@
 	v8/src/scopeinfo.cc \
 	v8/src/scopes.cc \
 	v8/src/serialize.cc \
-	v8/src/snapshot-common.cc \
-	v8/src/spaces.cc \
-	v8/src/store-buffer.cc \
+	v8/src/snapshot-source-sink.cc \
 	v8/src/string-search.cc \
 	v8/src/string-stream.cc \
 	v8/src/strtod.cc \
-	v8/src/stub-cache.cc \
-	v8/src/sweeper-thread.cc \
+	v8/src/ic/stub-cache.cc \
 	v8/src/token.cc \
 	v8/src/transitions.cc \
+	v8/src/type-feedback-vector.cc \
 	v8/src/type-info.cc \
 	v8/src/types.cc \
 	v8/src/typing.cc \
 	v8/src/unicode.cc \
 	v8/src/utils.cc \
-	v8/src/utils/random-number-generator.cc \
 	v8/src/v8.cc \
 	v8/src/v8threads.cc \
 	v8/src/variables.cc \
 	v8/src/version.cc \
 	v8/src/zone.cc \
+	v8/third_party/fdlibm/fdlibm.cc \
 	v8/src/mips/assembler-mips.cc \
 	v8/src/mips/builtins-mips.cc \
 	v8/src/mips/codegen-mips.cc \
@@ -187,16 +231,18 @@
 	v8/src/mips/disasm-mips.cc \
 	v8/src/mips/frames-mips.cc \
 	v8/src/mips/full-codegen-mips.cc \
-	v8/src/mips/ic-mips.cc \
+	v8/src/mips/interface-descriptors-mips.cc \
 	v8/src/mips/lithium-codegen-mips.cc \
 	v8/src/mips/lithium-gap-resolver-mips.cc \
 	v8/src/mips/lithium-mips.cc \
 	v8/src/mips/macro-assembler-mips.cc \
 	v8/src/mips/regexp-macro-assembler-mips.cc \
 	v8/src/mips/simulator-mips.cc \
-	v8/src/mips/stub-cache-mips.cc \
-	v8/src/platform-posix.cc \
-	v8/src/platform-linux.cc
+	v8/src/ic/mips/access-compiler-mips.cc \
+	v8/src/ic/mips/handler-compiler-mips.cc \
+	v8/src/ic/mips/ic-mips.cc \
+	v8/src/ic/mips/ic-compiler-mips.cc \
+	v8/src/ic/mips/stub-cache-mips.cc
 
 
 # Flags passed to both C and C++ files.
@@ -204,7 +250,6 @@
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	 \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -213,8 +258,6 @@
 	-fPIC \
 	-Wno-unused-local-typedefs \
 	-Wno-format \
-	-EL \
-	-mhard-float \
 	-ffunction-sections \
 	-funwind-tables \
 	-g \
@@ -233,6 +276,7 @@
 	-Wno-sequence-point \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-fomit-frame-pointer \
@@ -240,7 +284,6 @@
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -257,19 +300,21 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_MIPS' \
 	'-DCAN_USE_FPU_INSTRUCTIONS' \
 	'-D__mips_hard_float=1' \
 	'-D_MIPS_ARCH_MIPS32R2' \
 	'-DV8_I18N_SUPPORT' \
-	'-DCAN_USE_VFP_INSTRUCTIONS' \
 	'-DICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC' \
 	'-DU_USING_ICU_NAMESPACE=0' \
+	'-DU_ENABLE_DYLOAD=0' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
 	'-DANDROID' \
@@ -303,24 +348,25 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
 	-Wno-deprecated \
 	-Wno-uninitialized \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
 	-Wno-non-virtual-dtor \
 	-Wno-sign-promo \
 	-Wno-non-virtual-dtor
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	 \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -329,8 +375,6 @@
 	-fPIC \
 	-Wno-unused-local-typedefs \
 	-Wno-format \
-	-EL \
-	-mhard-float \
 	-ffunction-sections \
 	-funwind-tables \
 	-g \
@@ -358,7 +402,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -375,19 +418,21 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_MIPS' \
 	'-DCAN_USE_FPU_INSTRUCTIONS' \
 	'-D__mips_hard_float=1' \
 	'-D_MIPS_ARCH_MIPS32R2' \
 	'-DV8_I18N_SUPPORT' \
-	'-DCAN_USE_VFP_INSTRUCTIONS' \
 	'-DICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC' \
 	'-DU_USING_ICU_NAMESPACE=0' \
+	'-DU_ENABLE_DYLOAD=0' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
 	'-DANDROID' \
@@ -415,69 +460,33 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
 	-Wno-deprecated \
 	-Wno-uninitialized \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
 	-Wno-non-virtual-dtor \
 	-Wno-sign-promo \
 	-Wno-non-virtual-dtor
 
 
-LOCAL_FDO_SUPPORT_Release := true
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
 LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
 LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
 ### Rules for final target.
 
-LOCAL_LDFLAGS_Debug := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-Wl,--fatal-warnings \
-	-Wl,-z,noexecstack \
-	-fPIC \
-	-EL \
-	-Wl,--no-keep-memory \
-	-nostdlib \
-	-Wl,--no-undefined \
-	-Wl,--exclude-libs=ALL \
-	-Wl,--warn-shared-textrel \
-	-Wl,-O1 \
-	-Wl,--as-needed
-
-
-LOCAL_LDFLAGS_Release := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-Wl,--fatal-warnings \
-	-Wl,-z,noexecstack \
-	-fPIC \
-	-EL \
-	-Wl,--no-keep-memory \
-	-nostdlib \
-	-Wl,--no-undefined \
-	-Wl,--exclude-libs=ALL \
-	-Wl,-O1 \
-	-Wl,--as-needed \
-	-Wl,--gc-sections \
-	-Wl,--warn-shared-textrel
-
-
-LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
-
-LOCAL_STATIC_LIBRARIES :=
-
-# Enable grouping to fix circular references
-LOCAL_GROUP_STATIC_LIBRARIES := true
-
 LOCAL_SHARED_LIBRARIES := \
 	libstlport \
 	libdl
 
+### Set directly by aosp_build_settings.
+LOCAL_FDO_SUPPORT := true
+
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
 gyp_all_modules: v8_tools_gyp_v8_base_gyp
diff --git a/tools/gyp/v8_base.target.linux-mips64.mk b/tools/gyp/v8_base.target.linux-mips64.mk
new file mode 100644
index 0000000..4ab5e3a
--- /dev/null
+++ b/tools/gyp/v8_base.target.linux-mips64.mk
@@ -0,0 +1,495 @@
+# This file is generated by gyp; do not edit.
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE_CLASS := STATIC_LIBRARIES
+LOCAL_MODULE := v8_tools_gyp_v8_base_gyp
+LOCAL_MODULE_SUFFIX := .a
+LOCAL_MODULE_TARGET_ARCH := $(TARGET_$(GYP_VAR_PREFIX)ARCH)
+gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_VAR_PREFIX))
+gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
+
+# Make sure our deps are built first.
+GYP_TARGET_DEPENDENCIES := \
+	$(call intermediates-dir-for,GYP,third_party_icu_icui18n_gyp,,,$(GYP_VAR_PREFIX))/icui18n.stamp \
+	$(call intermediates-dir-for,GYP,third_party_icu_icuuc_gyp,,,$(GYP_VAR_PREFIX))/icuuc.stamp
+
+GYP_GENERATED_OUTPUTS :=
+
+# Make sure our deps and generated files are built first.
+LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) $(GYP_GENERATED_OUTPUTS)
+
+LOCAL_CPP_EXTENSION := .cc
+LOCAL_GENERATED_SOURCES :=
+
+GYP_COPIED_SOURCE_ORIGIN_DIRS :=
+
+LOCAL_SRC_FILES := \
+	v8/src/accessors.cc \
+	v8/src/allocation.cc \
+	v8/src/allocation-site-scopes.cc \
+	v8/src/allocation-tracker.cc \
+	v8/src/api.cc \
+	v8/src/arguments.cc \
+	v8/src/assembler.cc \
+	v8/src/assert-scope.cc \
+	v8/src/ast-value-factory.cc \
+	v8/src/ast.cc \
+	v8/src/background-parsing-task.cc \
+	v8/src/bailout-reason.cc \
+	v8/src/bignum-dtoa.cc \
+	v8/src/bignum.cc \
+	v8/src/bootstrapper.cc \
+	v8/src/builtins.cc \
+	v8/src/cached-powers.cc \
+	v8/src/checks.cc \
+	v8/src/code-factory.cc \
+	v8/src/code-stubs.cc \
+	v8/src/code-stubs-hydrogen.cc \
+	v8/src/codegen.cc \
+	v8/src/compilation-cache.cc \
+	v8/src/compiler/access-builder.cc \
+	v8/src/compiler/ast-graph-builder.cc \
+	v8/src/compiler/change-lowering.cc \
+	v8/src/compiler/code-generator.cc \
+	v8/src/compiler/common-operator.cc \
+	v8/src/compiler/control-builders.cc \
+	v8/src/compiler/gap-resolver.cc \
+	v8/src/compiler/graph-builder.cc \
+	v8/src/compiler/graph-reducer.cc \
+	v8/src/compiler/graph-replay.cc \
+	v8/src/compiler/graph-visualizer.cc \
+	v8/src/compiler/graph.cc \
+	v8/src/compiler/instruction-selector.cc \
+	v8/src/compiler/instruction.cc \
+	v8/src/compiler/js-builtin-reducer.cc \
+	v8/src/compiler/js-context-specialization.cc \
+	v8/src/compiler/js-generic-lowering.cc \
+	v8/src/compiler/js-graph.cc \
+	v8/src/compiler/js-inlining.cc \
+	v8/src/compiler/js-typed-lowering.cc \
+	v8/src/compiler/linkage.cc \
+	v8/src/compiler/machine-operator-reducer.cc \
+	v8/src/compiler/machine-operator.cc \
+	v8/src/compiler/machine-type.cc \
+	v8/src/compiler/node-cache.cc \
+	v8/src/compiler/node.cc \
+	v8/src/compiler/operator.cc \
+	v8/src/compiler/pipeline.cc \
+	v8/src/compiler/raw-machine-assembler.cc \
+	v8/src/compiler/register-allocator.cc \
+	v8/src/compiler/schedule.cc \
+	v8/src/compiler/scheduler.cc \
+	v8/src/compiler/simplified-lowering.cc \
+	v8/src/compiler/simplified-operator-reducer.cc \
+	v8/src/compiler/simplified-operator.cc \
+	v8/src/compiler/source-position.cc \
+	v8/src/compiler/typer.cc \
+	v8/src/compiler/value-numbering-reducer.cc \
+	v8/src/compiler/verifier.cc \
+	v8/src/compiler.cc \
+	v8/src/contexts.cc \
+	v8/src/conversions.cc \
+	v8/src/counters.cc \
+	v8/src/cpu-profiler.cc \
+	v8/src/data-flow.cc \
+	v8/src/date.cc \
+	v8/src/dateparser.cc \
+	v8/src/debug.cc \
+	v8/src/deoptimizer.cc \
+	v8/src/disassembler.cc \
+	v8/src/diy-fp.cc \
+	v8/src/dtoa.cc \
+	v8/src/elements-kind.cc \
+	v8/src/elements.cc \
+	v8/src/execution.cc \
+	v8/src/extensions/externalize-string-extension.cc \
+	v8/src/extensions/free-buffer-extension.cc \
+	v8/src/extensions/gc-extension.cc \
+	v8/src/extensions/statistics-extension.cc \
+	v8/src/extensions/trigger-failure-extension.cc \
+	v8/src/factory.cc \
+	v8/src/fast-dtoa.cc \
+	v8/src/fixed-dtoa.cc \
+	v8/src/flags.cc \
+	v8/src/frames.cc \
+	v8/src/full-codegen.cc \
+	v8/src/func-name-inferrer.cc \
+	v8/src/gdb-jit.cc \
+	v8/src/global-handles.cc \
+	v8/src/handles.cc \
+	v8/src/heap-profiler.cc \
+	v8/src/heap-snapshot-generator.cc \
+	v8/src/heap/gc-idle-time-handler.cc \
+	v8/src/heap/gc-tracer.cc \
+	v8/src/heap/heap.cc \
+	v8/src/heap/incremental-marking.cc \
+	v8/src/heap/mark-compact.cc \
+	v8/src/heap/objects-visiting.cc \
+	v8/src/heap/spaces.cc \
+	v8/src/heap/store-buffer.cc \
+	v8/src/heap/sweeper-thread.cc \
+	v8/src/hydrogen-bce.cc \
+	v8/src/hydrogen-bch.cc \
+	v8/src/hydrogen-canonicalize.cc \
+	v8/src/hydrogen-check-elimination.cc \
+	v8/src/hydrogen-dce.cc \
+	v8/src/hydrogen-dehoist.cc \
+	v8/src/hydrogen-environment-liveness.cc \
+	v8/src/hydrogen-escape-analysis.cc \
+	v8/src/hydrogen-instructions.cc \
+	v8/src/hydrogen.cc \
+	v8/src/hydrogen-gvn.cc \
+	v8/src/hydrogen-infer-representation.cc \
+	v8/src/hydrogen-infer-types.cc \
+	v8/src/hydrogen-load-elimination.cc \
+	v8/src/hydrogen-mark-deoptimize.cc \
+	v8/src/hydrogen-mark-unreachable.cc \
+	v8/src/hydrogen-osr.cc \
+	v8/src/hydrogen-range-analysis.cc \
+	v8/src/hydrogen-redundant-phi.cc \
+	v8/src/hydrogen-removable-simulates.cc \
+	v8/src/hydrogen-representation-changes.cc \
+	v8/src/hydrogen-sce.cc \
+	v8/src/hydrogen-store-elimination.cc \
+	v8/src/hydrogen-types.cc \
+	v8/src/hydrogen-uint32-analysis.cc \
+	v8/src/i18n.cc \
+	v8/src/icu_util.cc \
+	v8/src/ic/access-compiler.cc \
+	v8/src/ic/call-optimization.cc \
+	v8/src/ic/handler-compiler.cc \
+	v8/src/ic/ic-state.cc \
+	v8/src/ic/ic.cc \
+	v8/src/ic/ic-compiler.cc \
+	v8/src/interface.cc \
+	v8/src/interface-descriptors.cc \
+	v8/src/interpreter-irregexp.cc \
+	v8/src/isolate.cc \
+	v8/src/jsregexp.cc \
+	v8/src/lithium-allocator.cc \
+	v8/src/lithium-codegen.cc \
+	v8/src/lithium.cc \
+	v8/src/liveedit.cc \
+	v8/src/log-utils.cc \
+	v8/src/log.cc \
+	v8/src/lookup.cc \
+	v8/src/messages.cc \
+	v8/src/objects-debug.cc \
+	v8/src/objects-printer.cc \
+	v8/src/objects.cc \
+	v8/src/optimizing-compiler-thread.cc \
+	v8/src/ostreams.cc \
+	v8/src/parser.cc \
+	v8/src/perf-jit.cc \
+	v8/src/preparse-data.cc \
+	v8/src/preparser.cc \
+	v8/src/prettyprinter.cc \
+	v8/src/profile-generator.cc \
+	v8/src/property.cc \
+	v8/src/regexp-macro-assembler-irregexp.cc \
+	v8/src/regexp-macro-assembler-tracer.cc \
+	v8/src/regexp-macro-assembler.cc \
+	v8/src/regexp-stack.cc \
+	v8/src/rewriter.cc \
+	v8/src/runtime-profiler.cc \
+	v8/src/runtime.cc \
+	v8/src/safepoint-table.cc \
+	v8/src/sampler.cc \
+	v8/src/scanner-character-streams.cc \
+	v8/src/scanner.cc \
+	v8/src/scopeinfo.cc \
+	v8/src/scopes.cc \
+	v8/src/serialize.cc \
+	v8/src/snapshot-source-sink.cc \
+	v8/src/string-search.cc \
+	v8/src/string-stream.cc \
+	v8/src/strtod.cc \
+	v8/src/ic/stub-cache.cc \
+	v8/src/token.cc \
+	v8/src/transitions.cc \
+	v8/src/type-feedback-vector.cc \
+	v8/src/type-info.cc \
+	v8/src/types.cc \
+	v8/src/typing.cc \
+	v8/src/unicode.cc \
+	v8/src/utils.cc \
+	v8/src/v8.cc \
+	v8/src/v8threads.cc \
+	v8/src/variables.cc \
+	v8/src/version.cc \
+	v8/src/zone.cc \
+	v8/third_party/fdlibm/fdlibm.cc \
+	v8/src/mips64/assembler-mips64.cc \
+	v8/src/mips64/builtins-mips64.cc \
+	v8/src/mips64/codegen-mips64.cc \
+	v8/src/mips64/code-stubs-mips64.cc \
+	v8/src/mips64/constants-mips64.cc \
+	v8/src/mips64/cpu-mips64.cc \
+	v8/src/mips64/debug-mips64.cc \
+	v8/src/mips64/deoptimizer-mips64.cc \
+	v8/src/mips64/disasm-mips64.cc \
+	v8/src/mips64/frames-mips64.cc \
+	v8/src/mips64/full-codegen-mips64.cc \
+	v8/src/mips64/interface-descriptors-mips64.cc \
+	v8/src/mips64/lithium-codegen-mips64.cc \
+	v8/src/mips64/lithium-gap-resolver-mips64.cc \
+	v8/src/mips64/lithium-mips64.cc \
+	v8/src/mips64/macro-assembler-mips64.cc \
+	v8/src/mips64/regexp-macro-assembler-mips64.cc \
+	v8/src/mips64/simulator-mips64.cc \
+	v8/src/ic/mips64/access-compiler-mips64.cc \
+	v8/src/ic/mips64/handler-compiler-mips64.cc \
+	v8/src/ic/mips64/ic-mips64.cc \
+	v8/src/ic/mips64/ic-compiler-mips64.cc \
+	v8/src/ic/mips64/stub-cache-mips64.cc
+
+
+# Flags passed to both C and C++ files.
+MY_CFLAGS_Debug := \
+	-fstack-protector \
+	--param=ssp-buffer-size=4 \
+	 \
+	-fno-strict-aliasing \
+	-Wno-unused-parameter \
+	-Wno-missing-field-initializers \
+	-fvisibility=hidden \
+	-pipe \
+	-fPIC \
+	-Wno-unused-local-typedefs \
+	-Wno-format \
+	-ffunction-sections \
+	-funwind-tables \
+	-g \
+	-fstack-protector \
+	-fno-short-enums \
+	-finline-limit=64 \
+	-Wa,--noexecstack \
+	-U_FORTIFY_SOURCE \
+	-Wno-extra \
+	-Wno-ignored-qualifiers \
+	-Wno-type-limits \
+	-Wno-unused-but-set-variable \
+	-Wno-address \
+	-Wno-format-security \
+	-Wno-return-type \
+	-Wno-sequence-point \
+	-Os \
+	-g \
+	-gdwarf-4 \
+	-fdata-sections \
+	-ffunction-sections \
+	-fomit-frame-pointer \
+	-funwind-tables
+
+MY_DEFS_Debug := \
+	'-DV8_DEPRECATION_WARNINGS' \
+	'-D_FILE_OFFSET_BITS=64' \
+	'-DNO_TCMALLOC' \
+	'-DDISABLE_NACL' \
+	'-DCHROMIUM_BUILD' \
+	'-DUSE_LIBJPEG_TURBO=1' \
+	'-DENABLE_WEBRTC=1' \
+	'-DUSE_PROPRIETARY_CODECS' \
+	'-DENABLE_BROWSER_CDMS' \
+	'-DENABLE_CONFIGURATION_POLICY' \
+	'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
+	'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
+	'-DENABLE_EGLIMAGE=1' \
+	'-DCLD_VERSION=1' \
+	'-DENABLE_PRINTING=1' \
+	'-DENABLE_MANAGED_USERS=1' \
+	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
+	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
+	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
+	'-DV8_TARGET_ARCH_MIPS64' \
+	'-DCAN_USE_FPU_INSTRUCTIONS' \
+	'-D__mips_hard_float=1' \
+	'-D_MIPS_ARCH_MIPS64R2' \
+	'-DV8_I18N_SUPPORT' \
+	'-DICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC' \
+	'-DU_USING_ICU_NAMESPACE=0' \
+	'-DU_ENABLE_DYLOAD=0' \
+	'-DUSE_OPENSSL=1' \
+	'-DUSE_OPENSSL_CERTS=1' \
+	'-DANDROID' \
+	'-D__GNU_SOURCE=1' \
+	'-DUSE_STLPORT=1' \
+	'-D_STLP_USE_PTR_SPECIALIZATIONS=1' \
+	'-DCHROME_BUILD_ID=""' \
+	'-DDYNAMIC_ANNOTATIONS_ENABLED=1' \
+	'-DWTF_USE_DYNAMIC_ANNOTATIONS=1' \
+	'-D_DEBUG' \
+	'-DENABLE_DISASSEMBLER' \
+	'-DV8_ENABLE_CHECKS' \
+	'-DOBJECT_PRINT' \
+	'-DVERIFY_HEAP' \
+	'-DENABLE_EXTRA_CHECKS' \
+	'-DENABLE_HANDLE_ZAPPING'
+
+
+# Include paths placed before CFLAGS/CPPFLAGS
+LOCAL_C_INCLUDES_Debug := \
+	$(gyp_shared_intermediate_dir)/shim_headers/icuuc/target \
+	$(gyp_shared_intermediate_dir)/shim_headers/icui18n/target \
+	$(LOCAL_PATH)/v8 \
+	$(gyp_shared_intermediate_dir) \
+	$(PWD)/external/icu/icu4c/source/common \
+	$(PWD)/external/icu/icu4c/source/i18n \
+	$(PWD)/frameworks/wilhelm/include \
+	$(PWD)/bionic \
+	$(PWD)/external/stlport/stlport
+
+
+# Flags passed to only C++ (and not C) files.
+LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
+	-fno-rtti \
+	-fno-threadsafe-statics \
+	-fvisibility-inlines-hidden \
+	-Wno-deprecated \
+	-Wno-uninitialized \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
+	-Wno-non-virtual-dtor \
+	-Wno-sign-promo \
+	-Wno-non-virtual-dtor
+
+
+# Flags passed to both C and C++ files.
+MY_CFLAGS_Release := \
+	-fstack-protector \
+	--param=ssp-buffer-size=4 \
+	 \
+	-fno-strict-aliasing \
+	-Wno-unused-parameter \
+	-Wno-missing-field-initializers \
+	-fvisibility=hidden \
+	-pipe \
+	-fPIC \
+	-Wno-unused-local-typedefs \
+	-Wno-format \
+	-ffunction-sections \
+	-funwind-tables \
+	-g \
+	-fstack-protector \
+	-fno-short-enums \
+	-finline-limit=64 \
+	-Wa,--noexecstack \
+	-U_FORTIFY_SOURCE \
+	-Wno-extra \
+	-Wno-ignored-qualifiers \
+	-Wno-type-limits \
+	-Wno-unused-but-set-variable \
+	-Wno-address \
+	-Wno-format-security \
+	-Wno-return-type \
+	-Wno-sequence-point \
+	-fno-ident \
+	-fdata-sections \
+	-ffunction-sections \
+	-fomit-frame-pointer \
+	-funwind-tables \
+	-fdata-sections \
+	-ffunction-sections \
+	-O2
+
+MY_DEFS_Release := \
+	'-DV8_DEPRECATION_WARNINGS' \
+	'-D_FILE_OFFSET_BITS=64' \
+	'-DNO_TCMALLOC' \
+	'-DDISABLE_NACL' \
+	'-DCHROMIUM_BUILD' \
+	'-DUSE_LIBJPEG_TURBO=1' \
+	'-DENABLE_WEBRTC=1' \
+	'-DUSE_PROPRIETARY_CODECS' \
+	'-DENABLE_BROWSER_CDMS' \
+	'-DENABLE_CONFIGURATION_POLICY' \
+	'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
+	'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
+	'-DENABLE_EGLIMAGE=1' \
+	'-DCLD_VERSION=1' \
+	'-DENABLE_PRINTING=1' \
+	'-DENABLE_MANAGED_USERS=1' \
+	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
+	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
+	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
+	'-DV8_TARGET_ARCH_MIPS64' \
+	'-DCAN_USE_FPU_INSTRUCTIONS' \
+	'-D__mips_hard_float=1' \
+	'-D_MIPS_ARCH_MIPS64R2' \
+	'-DV8_I18N_SUPPORT' \
+	'-DICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC' \
+	'-DU_USING_ICU_NAMESPACE=0' \
+	'-DU_ENABLE_DYLOAD=0' \
+	'-DUSE_OPENSSL=1' \
+	'-DUSE_OPENSSL_CERTS=1' \
+	'-DANDROID' \
+	'-D__GNU_SOURCE=1' \
+	'-DUSE_STLPORT=1' \
+	'-D_STLP_USE_PTR_SPECIALIZATIONS=1' \
+	'-DCHROME_BUILD_ID=""' \
+	'-DNDEBUG' \
+	'-DNVALGRIND' \
+	'-DDYNAMIC_ANNOTATIONS_ENABLED=0'
+
+
+# Include paths placed before CFLAGS/CPPFLAGS
+LOCAL_C_INCLUDES_Release := \
+	$(gyp_shared_intermediate_dir)/shim_headers/icuuc/target \
+	$(gyp_shared_intermediate_dir)/shim_headers/icui18n/target \
+	$(LOCAL_PATH)/v8 \
+	$(gyp_shared_intermediate_dir) \
+	$(PWD)/external/icu/icu4c/source/common \
+	$(PWD)/external/icu/icu4c/source/i18n \
+	$(PWD)/frameworks/wilhelm/include \
+	$(PWD)/bionic \
+	$(PWD)/external/stlport/stlport
+
+
+# Flags passed to only C++ (and not C) files.
+LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
+	-fno-rtti \
+	-fno-threadsafe-statics \
+	-fvisibility-inlines-hidden \
+	-Wno-deprecated \
+	-Wno-uninitialized \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
+	-Wno-non-virtual-dtor \
+	-Wno-sign-promo \
+	-Wno-non-virtual-dtor
+
+
+LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
+LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
+LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
+LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
+### Rules for final target.
+
+LOCAL_SHARED_LIBRARIES := \
+	libstlport \
+	libdl
+
+# Add target alias to "gyp_all_modules" target.
+.PHONY: gyp_all_modules
+gyp_all_modules: v8_tools_gyp_v8_base_gyp
+
+# Alias gyp target name.
+.PHONY: v8_base
+v8_base: v8_tools_gyp_v8_base_gyp
+
+include $(BUILD_STATIC_LIBRARY)
diff --git a/tools/gyp/v8_base.target.linux-x86.mk b/tools/gyp/v8_base.target.linux-x86.mk
index 8979eb5..2b402da 100644
--- a/tools/gyp/v8_base.target.linux-x86.mk
+++ b/tools/gyp/v8_base.target.linux-x86.mk
@@ -5,7 +5,6 @@
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_MODULE := v8_tools_gyp_v8_base_gyp
 LOCAL_MODULE_SUFFIX := .a
-LOCAL_MODULE_TAGS := optional
 LOCAL_MODULE_TARGET_ARCH := $(TARGET_$(GYP_VAR_PREFIX)ARCH)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_VAR_PREFIX))
 gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
@@ -34,23 +33,65 @@
 	v8/src/arguments.cc \
 	v8/src/assembler.cc \
 	v8/src/assert-scope.cc \
+	v8/src/ast-value-factory.cc \
 	v8/src/ast.cc \
+	v8/src/background-parsing-task.cc \
+	v8/src/bailout-reason.cc \
 	v8/src/bignum-dtoa.cc \
 	v8/src/bignum.cc \
 	v8/src/bootstrapper.cc \
 	v8/src/builtins.cc \
 	v8/src/cached-powers.cc \
 	v8/src/checks.cc \
+	v8/src/code-factory.cc \
 	v8/src/code-stubs.cc \
 	v8/src/code-stubs-hydrogen.cc \
 	v8/src/codegen.cc \
 	v8/src/compilation-cache.cc \
+	v8/src/compiler/access-builder.cc \
+	v8/src/compiler/ast-graph-builder.cc \
+	v8/src/compiler/change-lowering.cc \
+	v8/src/compiler/code-generator.cc \
+	v8/src/compiler/common-operator.cc \
+	v8/src/compiler/control-builders.cc \
+	v8/src/compiler/gap-resolver.cc \
+	v8/src/compiler/graph-builder.cc \
+	v8/src/compiler/graph-reducer.cc \
+	v8/src/compiler/graph-replay.cc \
+	v8/src/compiler/graph-visualizer.cc \
+	v8/src/compiler/graph.cc \
+	v8/src/compiler/instruction-selector.cc \
+	v8/src/compiler/instruction.cc \
+	v8/src/compiler/js-builtin-reducer.cc \
+	v8/src/compiler/js-context-specialization.cc \
+	v8/src/compiler/js-generic-lowering.cc \
+	v8/src/compiler/js-graph.cc \
+	v8/src/compiler/js-inlining.cc \
+	v8/src/compiler/js-typed-lowering.cc \
+	v8/src/compiler/linkage.cc \
+	v8/src/compiler/machine-operator-reducer.cc \
+	v8/src/compiler/machine-operator.cc \
+	v8/src/compiler/machine-type.cc \
+	v8/src/compiler/node-cache.cc \
+	v8/src/compiler/node.cc \
+	v8/src/compiler/operator.cc \
+	v8/src/compiler/pipeline.cc \
+	v8/src/compiler/raw-machine-assembler.cc \
+	v8/src/compiler/register-allocator.cc \
+	v8/src/compiler/schedule.cc \
+	v8/src/compiler/scheduler.cc \
+	v8/src/compiler/simplified-lowering.cc \
+	v8/src/compiler/simplified-operator-reducer.cc \
+	v8/src/compiler/simplified-operator.cc \
+	v8/src/compiler/source-position.cc \
+	v8/src/compiler/typer.cc \
+	v8/src/compiler/value-numbering-reducer.cc \
+	v8/src/compiler/verifier.cc \
 	v8/src/compiler.cc \
 	v8/src/contexts.cc \
 	v8/src/conversions.cc \
 	v8/src/counters.cc \
 	v8/src/cpu-profiler.cc \
-	v8/src/cpu.cc \
 	v8/src/data-flow.cc \
 	v8/src/date.cc \
 	v8/src/dateparser.cc \
@@ -69,7 +110,6 @@
 	v8/src/extensions/trigger-failure-extension.cc \
 	v8/src/factory.cc \
 	v8/src/fast-dtoa.cc \
-	v8/src/field-index.cc \
 	v8/src/fixed-dtoa.cc \
 	v8/src/flags.cc \
 	v8/src/frames.cc \
@@ -80,7 +120,15 @@
 	v8/src/handles.cc \
 	v8/src/heap-profiler.cc \
 	v8/src/heap-snapshot-generator.cc \
-	v8/src/heap.cc \
+	v8/src/heap/gc-idle-time-handler.cc \
+	v8/src/heap/gc-tracer.cc \
+	v8/src/heap/heap.cc \
+	v8/src/heap/incremental-marking.cc \
+	v8/src/heap/mark-compact.cc \
+	v8/src/heap/objects-visiting.cc \
+	v8/src/heap/spaces.cc \
+	v8/src/heap/store-buffer.cc \
+	v8/src/heap/sweeper-thread.cc \
 	v8/src/hydrogen-bce.cc \
 	v8/src/hydrogen-bch.cc \
 	v8/src/hydrogen-canonicalize.cc \
@@ -108,15 +156,17 @@
 	v8/src/hydrogen-uint32-analysis.cc \
 	v8/src/i18n.cc \
 	v8/src/icu_util.cc \
-	v8/src/ic.cc \
-	v8/src/incremental-marking.cc \
+	v8/src/ic/access-compiler.cc \
+	v8/src/ic/call-optimization.cc \
+	v8/src/ic/handler-compiler.cc \
+	v8/src/ic/ic-state.cc \
+	v8/src/ic/ic.cc \
+	v8/src/ic/ic-compiler.cc \
 	v8/src/interface.cc \
+	v8/src/interface-descriptors.cc \
 	v8/src/interpreter-irregexp.cc \
 	v8/src/isolate.cc \
 	v8/src/jsregexp.cc \
-	v8/src/libplatform/default-platform.cc \
-	v8/src/libplatform/task-queue.cc \
-	v8/src/libplatform/worker-thread.cc \
 	v8/src/lithium-allocator.cc \
 	v8/src/lithium-codegen.cc \
 	v8/src/lithium.cc \
@@ -124,18 +174,14 @@
 	v8/src/log-utils.cc \
 	v8/src/log.cc \
 	v8/src/lookup.cc \
-	v8/src/mark-compact.cc \
 	v8/src/messages.cc \
 	v8/src/objects-debug.cc \
 	v8/src/objects-printer.cc \
-	v8/src/objects-visiting.cc \
 	v8/src/objects.cc \
 	v8/src/optimizing-compiler-thread.cc \
+	v8/src/ostreams.cc \
 	v8/src/parser.cc \
-	v8/src/platform/time.cc \
-	v8/src/platform/condition-variable.cc \
-	v8/src/platform/mutex.cc \
-	v8/src/platform/semaphore.cc \
+	v8/src/perf-jit.cc \
 	v8/src/preparse-data.cc \
 	v8/src/preparser.cc \
 	v8/src/prettyprinter.cc \
@@ -155,27 +201,25 @@
 	v8/src/scopeinfo.cc \
 	v8/src/scopes.cc \
 	v8/src/serialize.cc \
-	v8/src/snapshot-common.cc \
-	v8/src/spaces.cc \
-	v8/src/store-buffer.cc \
+	v8/src/snapshot-source-sink.cc \
 	v8/src/string-search.cc \
 	v8/src/string-stream.cc \
 	v8/src/strtod.cc \
-	v8/src/stub-cache.cc \
-	v8/src/sweeper-thread.cc \
+	v8/src/ic/stub-cache.cc \
 	v8/src/token.cc \
 	v8/src/transitions.cc \
+	v8/src/type-feedback-vector.cc \
 	v8/src/type-info.cc \
 	v8/src/types.cc \
 	v8/src/typing.cc \
 	v8/src/unicode.cc \
 	v8/src/utils.cc \
-	v8/src/utils/random-number-generator.cc \
 	v8/src/v8.cc \
 	v8/src/v8threads.cc \
 	v8/src/variables.cc \
 	v8/src/version.cc \
 	v8/src/zone.cc \
+	v8/third_party/fdlibm/fdlibm.cc \
 	v8/src/ia32/assembler-ia32.cc \
 	v8/src/ia32/builtins-ia32.cc \
 	v8/src/ia32/code-stubs-ia32.cc \
@@ -186,21 +230,25 @@
 	v8/src/ia32/disasm-ia32.cc \
 	v8/src/ia32/frames-ia32.cc \
 	v8/src/ia32/full-codegen-ia32.cc \
-	v8/src/ia32/ic-ia32.cc \
+	v8/src/ia32/interface-descriptors-ia32.cc \
 	v8/src/ia32/lithium-codegen-ia32.cc \
 	v8/src/ia32/lithium-gap-resolver-ia32.cc \
 	v8/src/ia32/lithium-ia32.cc \
 	v8/src/ia32/macro-assembler-ia32.cc \
 	v8/src/ia32/regexp-macro-assembler-ia32.cc \
-	v8/src/ia32/stub-cache-ia32.cc \
-	v8/src/platform-posix.cc \
-	v8/src/platform-linux.cc
+	v8/src/compiler/ia32/code-generator-ia32.cc \
+	v8/src/compiler/ia32/instruction-selector-ia32.cc \
+	v8/src/compiler/ia32/linkage-ia32.cc \
+	v8/src/ic/ia32/access-compiler-ia32.cc \
+	v8/src/ic/ia32/handler-compiler-ia32.cc \
+	v8/src/ic/ia32/ic-ia32.cc \
+	v8/src/ic/ia32/ic-compiler-ia32.cc \
+	v8/src/ic/ia32/stub-cache-ia32.cc
 
 
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Debug := \
 	--param=ssp-buffer-size=4 \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -229,8 +277,10 @@
 	-Wno-format-security \
 	-Wno-return-type \
 	-Wno-sequence-point \
+	-m32 \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-fomit-frame-pointer \
@@ -238,7 +288,6 @@
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -255,16 +304,18 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_IA32' \
 	'-DV8_I18N_SUPPORT' \
-	'-DCAN_USE_VFP_INSTRUCTIONS' \
 	'-DICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC' \
 	'-DU_USING_ICU_NAMESPACE=0' \
+	'-DU_ENABLE_DYLOAD=0' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
 	'-DANDROID' \
@@ -298,21 +349,22 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
 	-Wno-deprecated \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
 	-Wno-non-virtual-dtor \
 	-Wno-sign-promo \
 	-Wno-non-virtual-dtor
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	--param=ssp-buffer-size=4 \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -341,6 +393,7 @@
 	-Wno-format-security \
 	-Wno-return-type \
 	-Wno-sequence-point \
+	-m32 \
 	-fno-ident \
 	-fdata-sections \
 	-ffunction-sections \
@@ -352,7 +405,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -369,16 +421,18 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_IA32' \
 	'-DV8_I18N_SUPPORT' \
-	'-DCAN_USE_VFP_INSTRUCTIONS' \
 	'-DICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC' \
 	'-DU_USING_ICU_NAMESPACE=0' \
+	'-DU_ENABLE_DYLOAD=0' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
 	'-DANDROID' \
@@ -406,68 +460,32 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
 	-Wno-deprecated \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
 	-Wno-non-virtual-dtor \
 	-Wno-sign-promo \
 	-Wno-non-virtual-dtor
 
 
-LOCAL_FDO_SUPPORT_Release := true
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
 LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
 LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
 ### Rules for final target.
 
-LOCAL_LDFLAGS_Debug := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-Wl,--fatal-warnings \
-	-Wl,-z,noexecstack \
-	-fPIC \
-	-m32 \
-	-fuse-ld=gold \
-	-nostdlib \
-	-Wl,--no-undefined \
-	-Wl,--exclude-libs=ALL \
-	-Wl,--warn-shared-textrel \
-	-Wl,-O1 \
-	-Wl,--as-needed
-
-
-LOCAL_LDFLAGS_Release := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-Wl,--fatal-warnings \
-	-Wl,-z,noexecstack \
-	-fPIC \
-	-m32 \
-	-fuse-ld=gold \
-	-nostdlib \
-	-Wl,--no-undefined \
-	-Wl,--exclude-libs=ALL \
-	-Wl,-O1 \
-	-Wl,--as-needed \
-	-Wl,--gc-sections \
-	-Wl,--warn-shared-textrel
-
-
-LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
-
-LOCAL_STATIC_LIBRARIES :=
-
-# Enable grouping to fix circular references
-LOCAL_GROUP_STATIC_LIBRARIES := true
-
 LOCAL_SHARED_LIBRARIES := \
 	libstlport \
 	libdl
 
+### Set directly by aosp_build_settings.
+LOCAL_FDO_SUPPORT := true
+
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
 gyp_all_modules: v8_tools_gyp_v8_base_gyp
diff --git a/tools/gyp/v8_base.target.linux-x86_64.mk b/tools/gyp/v8_base.target.linux-x86_64.mk
index 9eb56dc..172298f 100644
--- a/tools/gyp/v8_base.target.linux-x86_64.mk
+++ b/tools/gyp/v8_base.target.linux-x86_64.mk
@@ -5,7 +5,6 @@
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_MODULE := v8_tools_gyp_v8_base_gyp
 LOCAL_MODULE_SUFFIX := .a
-LOCAL_MODULE_TAGS := optional
 LOCAL_MODULE_TARGET_ARCH := $(TARGET_$(GYP_VAR_PREFIX)ARCH)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_VAR_PREFIX))
 gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
@@ -34,23 +33,65 @@
 	v8/src/arguments.cc \
 	v8/src/assembler.cc \
 	v8/src/assert-scope.cc \
+	v8/src/ast-value-factory.cc \
 	v8/src/ast.cc \
+	v8/src/background-parsing-task.cc \
+	v8/src/bailout-reason.cc \
 	v8/src/bignum-dtoa.cc \
 	v8/src/bignum.cc \
 	v8/src/bootstrapper.cc \
 	v8/src/builtins.cc \
 	v8/src/cached-powers.cc \
 	v8/src/checks.cc \
+	v8/src/code-factory.cc \
 	v8/src/code-stubs.cc \
 	v8/src/code-stubs-hydrogen.cc \
 	v8/src/codegen.cc \
 	v8/src/compilation-cache.cc \
+	v8/src/compiler/access-builder.cc \
+	v8/src/compiler/ast-graph-builder.cc \
+	v8/src/compiler/change-lowering.cc \
+	v8/src/compiler/code-generator.cc \
+	v8/src/compiler/common-operator.cc \
+	v8/src/compiler/control-builders.cc \
+	v8/src/compiler/gap-resolver.cc \
+	v8/src/compiler/graph-builder.cc \
+	v8/src/compiler/graph-reducer.cc \
+	v8/src/compiler/graph-replay.cc \
+	v8/src/compiler/graph-visualizer.cc \
+	v8/src/compiler/graph.cc \
+	v8/src/compiler/instruction-selector.cc \
+	v8/src/compiler/instruction.cc \
+	v8/src/compiler/js-builtin-reducer.cc \
+	v8/src/compiler/js-context-specialization.cc \
+	v8/src/compiler/js-generic-lowering.cc \
+	v8/src/compiler/js-graph.cc \
+	v8/src/compiler/js-inlining.cc \
+	v8/src/compiler/js-typed-lowering.cc \
+	v8/src/compiler/linkage.cc \
+	v8/src/compiler/machine-operator-reducer.cc \
+	v8/src/compiler/machine-operator.cc \
+	v8/src/compiler/machine-type.cc \
+	v8/src/compiler/node-cache.cc \
+	v8/src/compiler/node.cc \
+	v8/src/compiler/operator.cc \
+	v8/src/compiler/pipeline.cc \
+	v8/src/compiler/raw-machine-assembler.cc \
+	v8/src/compiler/register-allocator.cc \
+	v8/src/compiler/schedule.cc \
+	v8/src/compiler/scheduler.cc \
+	v8/src/compiler/simplified-lowering.cc \
+	v8/src/compiler/simplified-operator-reducer.cc \
+	v8/src/compiler/simplified-operator.cc \
+	v8/src/compiler/source-position.cc \
+	v8/src/compiler/typer.cc \
+	v8/src/compiler/value-numbering-reducer.cc \
+	v8/src/compiler/verifier.cc \
 	v8/src/compiler.cc \
 	v8/src/contexts.cc \
 	v8/src/conversions.cc \
 	v8/src/counters.cc \
 	v8/src/cpu-profiler.cc \
-	v8/src/cpu.cc \
 	v8/src/data-flow.cc \
 	v8/src/date.cc \
 	v8/src/dateparser.cc \
@@ -69,7 +110,6 @@
 	v8/src/extensions/trigger-failure-extension.cc \
 	v8/src/factory.cc \
 	v8/src/fast-dtoa.cc \
-	v8/src/field-index.cc \
 	v8/src/fixed-dtoa.cc \
 	v8/src/flags.cc \
 	v8/src/frames.cc \
@@ -80,7 +120,15 @@
 	v8/src/handles.cc \
 	v8/src/heap-profiler.cc \
 	v8/src/heap-snapshot-generator.cc \
-	v8/src/heap.cc \
+	v8/src/heap/gc-idle-time-handler.cc \
+	v8/src/heap/gc-tracer.cc \
+	v8/src/heap/heap.cc \
+	v8/src/heap/incremental-marking.cc \
+	v8/src/heap/mark-compact.cc \
+	v8/src/heap/objects-visiting.cc \
+	v8/src/heap/spaces.cc \
+	v8/src/heap/store-buffer.cc \
+	v8/src/heap/sweeper-thread.cc \
 	v8/src/hydrogen-bce.cc \
 	v8/src/hydrogen-bch.cc \
 	v8/src/hydrogen-canonicalize.cc \
@@ -108,15 +156,17 @@
 	v8/src/hydrogen-uint32-analysis.cc \
 	v8/src/i18n.cc \
 	v8/src/icu_util.cc \
-	v8/src/ic.cc \
-	v8/src/incremental-marking.cc \
+	v8/src/ic/access-compiler.cc \
+	v8/src/ic/call-optimization.cc \
+	v8/src/ic/handler-compiler.cc \
+	v8/src/ic/ic-state.cc \
+	v8/src/ic/ic.cc \
+	v8/src/ic/ic-compiler.cc \
 	v8/src/interface.cc \
+	v8/src/interface-descriptors.cc \
 	v8/src/interpreter-irregexp.cc \
 	v8/src/isolate.cc \
 	v8/src/jsregexp.cc \
-	v8/src/libplatform/default-platform.cc \
-	v8/src/libplatform/task-queue.cc \
-	v8/src/libplatform/worker-thread.cc \
 	v8/src/lithium-allocator.cc \
 	v8/src/lithium-codegen.cc \
 	v8/src/lithium.cc \
@@ -124,18 +174,14 @@
 	v8/src/log-utils.cc \
 	v8/src/log.cc \
 	v8/src/lookup.cc \
-	v8/src/mark-compact.cc \
 	v8/src/messages.cc \
 	v8/src/objects-debug.cc \
 	v8/src/objects-printer.cc \
-	v8/src/objects-visiting.cc \
 	v8/src/objects.cc \
 	v8/src/optimizing-compiler-thread.cc \
+	v8/src/ostreams.cc \
 	v8/src/parser.cc \
-	v8/src/platform/time.cc \
-	v8/src/platform/condition-variable.cc \
-	v8/src/platform/mutex.cc \
-	v8/src/platform/semaphore.cc \
+	v8/src/perf-jit.cc \
 	v8/src/preparse-data.cc \
 	v8/src/preparser.cc \
 	v8/src/prettyprinter.cc \
@@ -155,27 +201,25 @@
 	v8/src/scopeinfo.cc \
 	v8/src/scopes.cc \
 	v8/src/serialize.cc \
-	v8/src/snapshot-common.cc \
-	v8/src/spaces.cc \
-	v8/src/store-buffer.cc \
+	v8/src/snapshot-source-sink.cc \
 	v8/src/string-search.cc \
 	v8/src/string-stream.cc \
 	v8/src/strtod.cc \
-	v8/src/stub-cache.cc \
-	v8/src/sweeper-thread.cc \
+	v8/src/ic/stub-cache.cc \
 	v8/src/token.cc \
 	v8/src/transitions.cc \
+	v8/src/type-feedback-vector.cc \
 	v8/src/type-info.cc \
 	v8/src/types.cc \
 	v8/src/typing.cc \
 	v8/src/unicode.cc \
 	v8/src/utils.cc \
-	v8/src/utils/random-number-generator.cc \
 	v8/src/v8.cc \
 	v8/src/v8threads.cc \
 	v8/src/variables.cc \
 	v8/src/version.cc \
 	v8/src/zone.cc \
+	v8/third_party/fdlibm/fdlibm.cc \
 	v8/src/x64/assembler-x64.cc \
 	v8/src/x64/builtins-x64.cc \
 	v8/src/x64/code-stubs-x64.cc \
@@ -186,22 +230,26 @@
 	v8/src/x64/disasm-x64.cc \
 	v8/src/x64/frames-x64.cc \
 	v8/src/x64/full-codegen-x64.cc \
-	v8/src/x64/ic-x64.cc \
+	v8/src/x64/interface-descriptors-x64.cc \
 	v8/src/x64/lithium-codegen-x64.cc \
 	v8/src/x64/lithium-gap-resolver-x64.cc \
 	v8/src/x64/lithium-x64.cc \
 	v8/src/x64/macro-assembler-x64.cc \
 	v8/src/x64/regexp-macro-assembler-x64.cc \
-	v8/src/x64/stub-cache-x64.cc \
-	v8/src/platform-posix.cc \
-	v8/src/platform-linux.cc
+	v8/src/compiler/x64/code-generator-x64.cc \
+	v8/src/compiler/x64/instruction-selector-x64.cc \
+	v8/src/compiler/x64/linkage-x64.cc \
+	v8/src/ic/x64/access-compiler-x64.cc \
+	v8/src/ic/x64/handler-compiler-x64.cc \
+	v8/src/ic/x64/ic-x64.cc \
+	v8/src/ic/x64/ic-compiler-x64.cc \
+	v8/src/ic/x64/stub-cache-x64.cc
 
 
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Debug := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -228,8 +276,10 @@
 	-Wno-format-security \
 	-Wno-return-type \
 	-Wno-sequence-point \
+	-m64 \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-fomit-frame-pointer \
@@ -237,7 +287,6 @@
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -254,16 +303,18 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_X64' \
 	'-DV8_I18N_SUPPORT' \
-	'-DCAN_USE_VFP_INSTRUCTIONS' \
 	'-DICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC' \
 	'-DU_USING_ICU_NAMESPACE=0' \
+	'-DU_ENABLE_DYLOAD=0' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
 	'-DANDROID' \
@@ -297,22 +348,23 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
 	-Wno-deprecated \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
 	-Wno-non-virtual-dtor \
 	-Wno-sign-promo \
 	-Wno-non-virtual-dtor
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -339,6 +391,7 @@
 	-Wno-format-security \
 	-Wno-return-type \
 	-Wno-sequence-point \
+	-m64 \
 	-fno-ident \
 	-fdata-sections \
 	-ffunction-sections \
@@ -350,7 +403,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -367,16 +419,18 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_X64' \
 	'-DV8_I18N_SUPPORT' \
-	'-DCAN_USE_VFP_INSTRUCTIONS' \
 	'-DICU_UTIL_DATA_IMPL=ICU_UTIL_DATA_STATIC' \
 	'-DU_USING_ICU_NAMESPACE=0' \
+	'-DU_ENABLE_DYLOAD=0' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
 	'-DANDROID' \
@@ -404,68 +458,32 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
 	-Wno-deprecated \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
 	-Wno-non-virtual-dtor \
 	-Wno-sign-promo \
 	-Wno-non-virtual-dtor
 
 
-LOCAL_FDO_SUPPORT_Release := true
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
 LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
 LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
 ### Rules for final target.
 
-LOCAL_LDFLAGS_Debug := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-Wl,--fatal-warnings \
-	-Wl,-z,noexecstack \
-	-fPIC \
-	-m64 \
-	-fuse-ld=gold \
-	-nostdlib \
-	-Wl,--no-undefined \
-	-Wl,--exclude-libs=ALL \
-	-Wl,--warn-shared-textrel \
-	-Wl,-O1 \
-	-Wl,--as-needed
-
-
-LOCAL_LDFLAGS_Release := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-Wl,--fatal-warnings \
-	-Wl,-z,noexecstack \
-	-fPIC \
-	-m64 \
-	-fuse-ld=gold \
-	-nostdlib \
-	-Wl,--no-undefined \
-	-Wl,--exclude-libs=ALL \
-	-Wl,-O1 \
-	-Wl,--as-needed \
-	-Wl,--gc-sections \
-	-Wl,--warn-shared-textrel
-
-
-LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
-
-LOCAL_STATIC_LIBRARIES :=
-
-# Enable grouping to fix circular references
-LOCAL_GROUP_STATIC_LIBRARIES := true
-
 LOCAL_SHARED_LIBRARIES := \
 	libstlport \
 	libdl
 
+### Set directly by aosp_build_settings.
+LOCAL_FDO_SUPPORT := true
+
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
 gyp_all_modules: v8_tools_gyp_v8_base_gyp
diff --git a/tools/gyp/v8_libbase.host.darwin-arm.mk b/tools/gyp/v8_libbase.host.darwin-arm.mk
index c3e0050..4fc97d1 100644
--- a/tools/gyp/v8_libbase.host.darwin-arm.mk
+++ b/tools/gyp/v8_libbase.host.darwin-arm.mk
@@ -5,7 +5,6 @@
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_MODULE := v8_tools_gyp_v8_libbase_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
 LOCAL_MODULE_SUFFIX := .a
-LOCAL_MODULE_TAGS := optional
 LOCAL_IS_HOST_MODULE := true
 LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
@@ -26,7 +25,19 @@
 
 LOCAL_SRC_FILES := \
 	v8/src/base/atomicops_internals_x86_gcc.cc \
-	v8/src/base/once.cc
+	v8/src/base/bits.cc \
+	v8/src/base/cpu.cc \
+	v8/src/base/division-by-constant.cc \
+	v8/src/base/logging.cc \
+	v8/src/base/once.cc \
+	v8/src/base/platform/time.cc \
+	v8/src/base/platform/condition-variable.cc \
+	v8/src/base/platform/mutex.cc \
+	v8/src/base/platform/semaphore.cc \
+	v8/src/base/sys-info.cc \
+	v8/src/base/utils/random-number-generator.cc \
+	v8/src/base/platform/platform-posix.cc \
+	v8/src/base/platform/platform-macos.cc
 
 
 # Flags passed to both C and C++ files.
@@ -34,7 +45,6 @@
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -42,9 +52,18 @@
 	-pipe \
 	-fPIC \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m32 \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-fomit-frame-pointer \
@@ -52,7 +71,6 @@
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -69,17 +87,18 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_ARM' \
+	'-DCAN_USE_ARMV7_INSTRUCTIONS' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
-	'-DARM_TEST' \
-	'-DCAN_USE_ARMV7_INSTRUCTIONS=1' \
 	'-DDYNAMIC_ANNOTATIONS_ENABLED=1' \
 	'-DWTF_USE_DYNAMIC_ANNOTATIONS=1' \
 	'-D_DEBUG' \
@@ -99,20 +118,19 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -120,6 +138,14 @@
 	-pipe \
 	-fPIC \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m32 \
 	-fno-ident \
 	-fdata-sections \
@@ -132,7 +158,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -149,17 +174,18 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_ARM' \
+	'-DCAN_USE_ARMV7_INSTRUCTIONS' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
-	'-DARM_TEST' \
-	'-DCAN_USE_ARMV7_INSTRUCTIONS=1' \
 	'-DNDEBUG' \
 	'-DNVALGRIND' \
 	'-DDYNAMIC_ANNOTATIONS_ENABLED=0'
@@ -173,43 +199,23 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Release := false
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 # Undefine ANDROID for host modules
 LOCAL_CFLAGS += -UANDROID
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
 LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
 LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
 ### Rules for final target.
-
-LOCAL_LDFLAGS_Debug := \
-	-pthread \
-	-fPIC \
-	-m32
-
-
-LOCAL_LDFLAGS_Release := \
-	-pthread \
-	-fPIC \
-	-m32
-
-
-LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
-
-LOCAL_STATIC_LIBRARIES :=
-
-# Enable grouping to fix circular references
-LOCAL_GROUP_STATIC_LIBRARIES := true
-
-LOCAL_SHARED_LIBRARIES :=
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
 
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
diff --git a/tools/gyp/v8_libbase.host.darwin-arm64.mk b/tools/gyp/v8_libbase.host.darwin-arm64.mk
index 363a271..8f42d0e 100644
--- a/tools/gyp/v8_libbase.host.darwin-arm64.mk
+++ b/tools/gyp/v8_libbase.host.darwin-arm64.mk
@@ -5,7 +5,6 @@
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_MODULE := v8_tools_gyp_v8_libbase_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
 LOCAL_MODULE_SUFFIX := .a
-LOCAL_MODULE_TAGS := optional
 LOCAL_IS_HOST_MODULE := true
 LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
@@ -26,7 +25,19 @@
 
 LOCAL_SRC_FILES := \
 	v8/src/base/atomicops_internals_x86_gcc.cc \
-	v8/src/base/once.cc
+	v8/src/base/bits.cc \
+	v8/src/base/cpu.cc \
+	v8/src/base/division-by-constant.cc \
+	v8/src/base/logging.cc \
+	v8/src/base/once.cc \
+	v8/src/base/platform/time.cc \
+	v8/src/base/platform/condition-variable.cc \
+	v8/src/base/platform/mutex.cc \
+	v8/src/base/platform/semaphore.cc \
+	v8/src/base/sys-info.cc \
+	v8/src/base/utils/random-number-generator.cc \
+	v8/src/base/platform/platform-posix.cc \
+	v8/src/base/platform/platform-macos.cc
 
 
 # Flags passed to both C and C++ files.
@@ -34,7 +45,6 @@
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -42,16 +52,24 @@
 	-pipe \
 	-fPIC \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m64 \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-funwind-tables
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -68,11 +86,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_ARM64' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
@@ -96,20 +116,19 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -117,6 +136,14 @@
 	-pipe \
 	-fPIC \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m64 \
 	-fno-ident \
 	-fdata-sections \
@@ -128,7 +155,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -145,11 +171,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_ARM64' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
@@ -167,43 +195,23 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Release := false
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 # Undefine ANDROID for host modules
 LOCAL_CFLAGS += -UANDROID
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
 LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
 LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
 ### Rules for final target.
-
-LOCAL_LDFLAGS_Debug := \
-	-pthread \
-	-fPIC \
-	-m64
-
-
-LOCAL_LDFLAGS_Release := \
-	-pthread \
-	-fPIC \
-	-m64
-
-
-LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
-
-LOCAL_STATIC_LIBRARIES :=
-
-# Enable grouping to fix circular references
-LOCAL_GROUP_STATIC_LIBRARIES := true
-
-LOCAL_SHARED_LIBRARIES :=
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
 
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
diff --git a/tools/gyp/v8_libbase.host.darwin-mips.mk b/tools/gyp/v8_libbase.host.darwin-mips.mk
index 9fa2719..22192e3 100644
--- a/tools/gyp/v8_libbase.host.darwin-mips.mk
+++ b/tools/gyp/v8_libbase.host.darwin-mips.mk
@@ -5,7 +5,6 @@
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_MODULE := v8_tools_gyp_v8_libbase_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
 LOCAL_MODULE_SUFFIX := .a
-LOCAL_MODULE_TAGS := optional
 LOCAL_IS_HOST_MODULE := true
 LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
@@ -26,7 +25,19 @@
 
 LOCAL_SRC_FILES := \
 	v8/src/base/atomicops_internals_x86_gcc.cc \
-	v8/src/base/once.cc
+	v8/src/base/bits.cc \
+	v8/src/base/cpu.cc \
+	v8/src/base/division-by-constant.cc \
+	v8/src/base/logging.cc \
+	v8/src/base/once.cc \
+	v8/src/base/platform/time.cc \
+	v8/src/base/platform/condition-variable.cc \
+	v8/src/base/platform/mutex.cc \
+	v8/src/base/platform/semaphore.cc \
+	v8/src/base/sys-info.cc \
+	v8/src/base/utils/random-number-generator.cc \
+	v8/src/base/platform/platform-posix.cc \
+	v8/src/base/platform/platform-macos.cc
 
 
 # Flags passed to both C and C++ files.
@@ -35,7 +46,6 @@
 	--param=ssp-buffer-size=4 \
 	 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -43,9 +53,18 @@
 	-pipe \
 	-fPIC \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m32 \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-fomit-frame-pointer \
@@ -53,7 +72,6 @@
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -70,11 +88,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_MIPS' \
 	'-DCAN_USE_FPU_INSTRUCTIONS' \
 	'-D__mips_hard_float=1' \
@@ -101,21 +121,20 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -123,6 +142,14 @@
 	-pipe \
 	-fPIC \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m32 \
 	-fno-ident \
 	-fdata-sections \
@@ -135,7 +162,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -152,11 +178,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_MIPS' \
 	'-DCAN_USE_FPU_INSTRUCTIONS' \
 	'-D__mips_hard_float=1' \
@@ -177,43 +205,23 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Release := false
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 # Undefine ANDROID for host modules
 LOCAL_CFLAGS += -UANDROID
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
 LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
 LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
 ### Rules for final target.
-
-LOCAL_LDFLAGS_Debug := \
-	-pthread \
-	-fPIC \
-	-m32
-
-
-LOCAL_LDFLAGS_Release := \
-	-pthread \
-	-fPIC \
-	-m32
-
-
-LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
-
-LOCAL_STATIC_LIBRARIES :=
-
-# Enable grouping to fix circular references
-LOCAL_GROUP_STATIC_LIBRARIES := true
-
-LOCAL_SHARED_LIBRARIES :=
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
 
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
diff --git a/tools/gyp/v8_libbase.host.darwin-mips64.mk b/tools/gyp/v8_libbase.host.darwin-mips64.mk
new file mode 100644
index 0000000..2467bf2
--- /dev/null
+++ b/tools/gyp/v8_libbase.host.darwin-mips64.mk
@@ -0,0 +1,232 @@
+# This file is generated by gyp; do not edit.
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE_CLASS := STATIC_LIBRARIES
+LOCAL_MODULE := v8_tools_gyp_v8_libbase_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+LOCAL_MODULE_SUFFIX := .a
+LOCAL_IS_HOST_MODULE := true
+LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
+gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
+gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
+
+# Make sure our deps are built first.
+GYP_TARGET_DEPENDENCIES :=
+
+GYP_GENERATED_OUTPUTS :=
+
+# Make sure our deps and generated files are built first.
+LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) $(GYP_GENERATED_OUTPUTS)
+
+LOCAL_CPP_EXTENSION := .cc
+LOCAL_GENERATED_SOURCES :=
+
+GYP_COPIED_SOURCE_ORIGIN_DIRS :=
+
+LOCAL_SRC_FILES := \
+	v8/src/base/atomicops_internals_x86_gcc.cc \
+	v8/src/base/bits.cc \
+	v8/src/base/cpu.cc \
+	v8/src/base/division-by-constant.cc \
+	v8/src/base/logging.cc \
+	v8/src/base/once.cc \
+	v8/src/base/platform/time.cc \
+	v8/src/base/platform/condition-variable.cc \
+	v8/src/base/platform/mutex.cc \
+	v8/src/base/platform/semaphore.cc \
+	v8/src/base/sys-info.cc \
+	v8/src/base/utils/random-number-generator.cc \
+	v8/src/base/platform/platform-posix.cc \
+	v8/src/base/platform/platform-macos.cc
+
+
+# Flags passed to both C and C++ files.
+MY_CFLAGS_Debug := \
+	-fstack-protector \
+	--param=ssp-buffer-size=4 \
+	 \
+	-pthread \
+	-fno-strict-aliasing \
+	-Wno-unused-parameter \
+	-Wno-missing-field-initializers \
+	-fvisibility=hidden \
+	-pipe \
+	-fPIC \
+	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
+	-Os \
+	-g \
+	-gdwarf-4 \
+	-fdata-sections \
+	-ffunction-sections \
+	-fomit-frame-pointer \
+	-funwind-tables
+
+MY_DEFS_Debug := \
+	'-DV8_DEPRECATION_WARNINGS' \
+	'-D_FILE_OFFSET_BITS=64' \
+	'-DNO_TCMALLOC' \
+	'-DDISABLE_NACL' \
+	'-DCHROMIUM_BUILD' \
+	'-DUSE_LIBJPEG_TURBO=1' \
+	'-DENABLE_WEBRTC=1' \
+	'-DUSE_PROPRIETARY_CODECS' \
+	'-DENABLE_BROWSER_CDMS' \
+	'-DENABLE_CONFIGURATION_POLICY' \
+	'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
+	'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
+	'-DENABLE_EGLIMAGE=1' \
+	'-DCLD_VERSION=1' \
+	'-DENABLE_PRINTING=1' \
+	'-DENABLE_MANAGED_USERS=1' \
+	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
+	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
+	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
+	'-DV8_TARGET_ARCH_MIPS64' \
+	'-DCAN_USE_FPU_INSTRUCTIONS' \
+	'-D__mips_hard_float=1' \
+	'-D_MIPS_ARCH_MIPS64R2' \
+	'-DV8_I18N_SUPPORT' \
+	'-DUSE_OPENSSL=1' \
+	'-DUSE_OPENSSL_CERTS=1' \
+	'-DDYNAMIC_ANNOTATIONS_ENABLED=1' \
+	'-DWTF_USE_DYNAMIC_ANNOTATIONS=1' \
+	'-D_DEBUG' \
+	'-DENABLE_DISASSEMBLER' \
+	'-DV8_ENABLE_CHECKS' \
+	'-DOBJECT_PRINT' \
+	'-DVERIFY_HEAP' \
+	'-DENABLE_EXTRA_CHECKS' \
+	'-DENABLE_HANDLE_ZAPPING'
+
+
+# Include paths placed before CFLAGS/CPPFLAGS
+LOCAL_C_INCLUDES_Debug := \
+	$(LOCAL_PATH)/v8 \
+	$(gyp_shared_intermediate_dir)
+
+
+# Flags passed to only C++ (and not C) files.
+LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
+	-fno-rtti \
+	-fno-threadsafe-statics \
+	-fvisibility-inlines-hidden \
+	-Wno-deprecated \
+	-std=gnu++11
+
+
+# Flags passed to both C and C++ files.
+MY_CFLAGS_Release := \
+	-fstack-protector \
+	--param=ssp-buffer-size=4 \
+	 \
+	-pthread \
+	-fno-strict-aliasing \
+	-Wno-unused-parameter \
+	-Wno-missing-field-initializers \
+	-fvisibility=hidden \
+	-pipe \
+	-fPIC \
+	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
+	-fno-ident \
+	-fdata-sections \
+	-ffunction-sections \
+	-fomit-frame-pointer \
+	-funwind-tables \
+	-fdata-sections \
+	-ffunction-sections \
+	-O2
+
+MY_DEFS_Release := \
+	'-DV8_DEPRECATION_WARNINGS' \
+	'-D_FILE_OFFSET_BITS=64' \
+	'-DNO_TCMALLOC' \
+	'-DDISABLE_NACL' \
+	'-DCHROMIUM_BUILD' \
+	'-DUSE_LIBJPEG_TURBO=1' \
+	'-DENABLE_WEBRTC=1' \
+	'-DUSE_PROPRIETARY_CODECS' \
+	'-DENABLE_BROWSER_CDMS' \
+	'-DENABLE_CONFIGURATION_POLICY' \
+	'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
+	'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
+	'-DENABLE_EGLIMAGE=1' \
+	'-DCLD_VERSION=1' \
+	'-DENABLE_PRINTING=1' \
+	'-DENABLE_MANAGED_USERS=1' \
+	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
+	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
+	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
+	'-DV8_TARGET_ARCH_MIPS64' \
+	'-DCAN_USE_FPU_INSTRUCTIONS' \
+	'-D__mips_hard_float=1' \
+	'-D_MIPS_ARCH_MIPS64R2' \
+	'-DV8_I18N_SUPPORT' \
+	'-DUSE_OPENSSL=1' \
+	'-DUSE_OPENSSL_CERTS=1' \
+	'-DNDEBUG' \
+	'-DNVALGRIND' \
+	'-DDYNAMIC_ANNOTATIONS_ENABLED=0'
+
+
+# Include paths placed before CFLAGS/CPPFLAGS
+LOCAL_C_INCLUDES_Release := \
+	$(LOCAL_PATH)/v8 \
+	$(gyp_shared_intermediate_dir)
+
+
+# Flags passed to only C++ (and not C) files.
+LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
+	-fno-rtti \
+	-fno-threadsafe-statics \
+	-fvisibility-inlines-hidden \
+	-Wno-deprecated \
+	-std=gnu++11
+
+
+LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
+# Undefine ANDROID for host modules
+LOCAL_CFLAGS += -UANDROID
+LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
+LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
+LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
+### Rules for final target.
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
+
+# Add target alias to "gyp_all_modules" target.
+.PHONY: gyp_all_modules
+gyp_all_modules: v8_tools_gyp_v8_libbase_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+
+# Alias gyp target name.
+.PHONY: v8_libbase
+v8_libbase: v8_tools_gyp_v8_libbase_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+
+include $(BUILD_HOST_STATIC_LIBRARY)
diff --git a/tools/gyp/v8_libbase.host.darwin-x86.mk b/tools/gyp/v8_libbase.host.darwin-x86.mk
index c44b2bc..837da07 100644
--- a/tools/gyp/v8_libbase.host.darwin-x86.mk
+++ b/tools/gyp/v8_libbase.host.darwin-x86.mk
@@ -5,7 +5,6 @@
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_MODULE := v8_tools_gyp_v8_libbase_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
 LOCAL_MODULE_SUFFIX := .a
-LOCAL_MODULE_TAGS := optional
 LOCAL_IS_HOST_MODULE := true
 LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
@@ -26,7 +25,19 @@
 
 LOCAL_SRC_FILES := \
 	v8/src/base/atomicops_internals_x86_gcc.cc \
-	v8/src/base/once.cc
+	v8/src/base/bits.cc \
+	v8/src/base/cpu.cc \
+	v8/src/base/division-by-constant.cc \
+	v8/src/base/logging.cc \
+	v8/src/base/once.cc \
+	v8/src/base/platform/time.cc \
+	v8/src/base/platform/condition-variable.cc \
+	v8/src/base/platform/mutex.cc \
+	v8/src/base/platform/semaphore.cc \
+	v8/src/base/sys-info.cc \
+	v8/src/base/utils/random-number-generator.cc \
+	v8/src/base/platform/platform-posix.cc \
+	v8/src/base/platform/platform-macos.cc
 
 
 # Flags passed to both C and C++ files.
@@ -34,7 +45,6 @@
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -42,9 +52,18 @@
 	-pipe \
 	-fPIC \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m32 \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-fomit-frame-pointer \
@@ -52,7 +71,6 @@
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -69,11 +87,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_IA32' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
@@ -97,20 +117,19 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -118,6 +137,14 @@
 	-pipe \
 	-fPIC \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m32 \
 	-fno-ident \
 	-fdata-sections \
@@ -130,7 +157,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -147,11 +173,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_IA32' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
@@ -169,43 +197,23 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Release := false
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 # Undefine ANDROID for host modules
 LOCAL_CFLAGS += -UANDROID
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
 LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
 LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
 ### Rules for final target.
-
-LOCAL_LDFLAGS_Debug := \
-	-pthread \
-	-fPIC \
-	-m32
-
-
-LOCAL_LDFLAGS_Release := \
-	-pthread \
-	-fPIC \
-	-m32
-
-
-LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
-
-LOCAL_STATIC_LIBRARIES :=
-
-# Enable grouping to fix circular references
-LOCAL_GROUP_STATIC_LIBRARIES := true
-
-LOCAL_SHARED_LIBRARIES :=
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
 
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
diff --git a/tools/gyp/v8_libbase.host.darwin-x86_64.mk b/tools/gyp/v8_libbase.host.darwin-x86_64.mk
index a562489..2ac1e5e 100644
--- a/tools/gyp/v8_libbase.host.darwin-x86_64.mk
+++ b/tools/gyp/v8_libbase.host.darwin-x86_64.mk
@@ -5,7 +5,6 @@
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_MODULE := v8_tools_gyp_v8_libbase_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
 LOCAL_MODULE_SUFFIX := .a
-LOCAL_MODULE_TAGS := optional
 LOCAL_IS_HOST_MODULE := true
 LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
@@ -26,7 +25,19 @@
 
 LOCAL_SRC_FILES := \
 	v8/src/base/atomicops_internals_x86_gcc.cc \
-	v8/src/base/once.cc
+	v8/src/base/bits.cc \
+	v8/src/base/cpu.cc \
+	v8/src/base/division-by-constant.cc \
+	v8/src/base/logging.cc \
+	v8/src/base/once.cc \
+	v8/src/base/platform/time.cc \
+	v8/src/base/platform/condition-variable.cc \
+	v8/src/base/platform/mutex.cc \
+	v8/src/base/platform/semaphore.cc \
+	v8/src/base/sys-info.cc \
+	v8/src/base/utils/random-number-generator.cc \
+	v8/src/base/platform/platform-posix.cc \
+	v8/src/base/platform/platform-macos.cc
 
 
 # Flags passed to both C and C++ files.
@@ -34,7 +45,6 @@
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -42,9 +52,18 @@
 	-pipe \
 	-fPIC \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m64 \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-fomit-frame-pointer \
@@ -52,7 +71,6 @@
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -69,11 +87,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_X64' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
@@ -97,20 +117,19 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -118,6 +137,14 @@
 	-pipe \
 	-fPIC \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m64 \
 	-fno-ident \
 	-fdata-sections \
@@ -130,7 +157,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -147,11 +173,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_X64' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
@@ -169,43 +197,23 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Release := false
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 # Undefine ANDROID for host modules
 LOCAL_CFLAGS += -UANDROID
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
 LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
 LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
 ### Rules for final target.
-
-LOCAL_LDFLAGS_Debug := \
-	-pthread \
-	-fPIC \
-	-m64
-
-
-LOCAL_LDFLAGS_Release := \
-	-pthread \
-	-fPIC \
-	-m64
-
-
-LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
-
-LOCAL_STATIC_LIBRARIES :=
-
-# Enable grouping to fix circular references
-LOCAL_GROUP_STATIC_LIBRARIES := true
-
-LOCAL_SHARED_LIBRARIES :=
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
 
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
diff --git a/tools/gyp/v8_libbase.host.linux-arm.mk b/tools/gyp/v8_libbase.host.linux-arm.mk
index 868e363..c7a3873 100644
--- a/tools/gyp/v8_libbase.host.linux-arm.mk
+++ b/tools/gyp/v8_libbase.host.linux-arm.mk
@@ -5,7 +5,6 @@
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_MODULE := v8_tools_gyp_v8_libbase_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
 LOCAL_MODULE_SUFFIX := .a
-LOCAL_MODULE_TAGS := optional
 LOCAL_IS_HOST_MODULE := true
 LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
@@ -26,7 +25,19 @@
 
 LOCAL_SRC_FILES := \
 	v8/src/base/atomicops_internals_x86_gcc.cc \
-	v8/src/base/once.cc
+	v8/src/base/bits.cc \
+	v8/src/base/cpu.cc \
+	v8/src/base/division-by-constant.cc \
+	v8/src/base/logging.cc \
+	v8/src/base/once.cc \
+	v8/src/base/platform/time.cc \
+	v8/src/base/platform/condition-variable.cc \
+	v8/src/base/platform/mutex.cc \
+	v8/src/base/platform/semaphore.cc \
+	v8/src/base/sys-info.cc \
+	v8/src/base/utils/random-number-generator.cc \
+	v8/src/base/platform/platform-posix.cc \
+	v8/src/base/platform/platform-linux.cc
 
 
 # Flags passed to both C and C++ files.
@@ -34,18 +45,25 @@
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
 	-fvisibility=hidden \
 	-pipe \
 	-fPIC \
-	-Wno-unused-local-typedefs \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m32 \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-fomit-frame-pointer \
@@ -53,7 +71,6 @@
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -70,17 +87,19 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_ARM' \
+	'-DCAN_USE_ARMV7_INSTRUCTIONS' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
-	'-DARM_TEST' \
-	'-DCAN_USE_ARMV7_INSTRUCTIONS=1' \
+	'-DV8_LIBRT_NOT_AVAILABLE=1' \
 	'-DDYNAMIC_ANNOTATIONS_ENABLED=1' \
 	'-DWTF_USE_DYNAMIC_ANNOTATIONS=1' \
 	'-D_DEBUG' \
@@ -100,28 +119,34 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
 	-fvisibility=hidden \
 	-pipe \
 	-fPIC \
-	-Wno-unused-local-typedefs \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m32 \
 	-fno-ident \
 	-fdata-sections \
@@ -134,7 +159,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -151,17 +175,19 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_ARM' \
+	'-DCAN_USE_ARMV7_INSTRUCTIONS' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
-	'-DARM_TEST' \
-	'-DCAN_USE_ARMV7_INSTRUCTIONS=1' \
+	'-DV8_LIBRT_NOT_AVAILABLE=1' \
 	'-DNDEBUG' \
 	'-DNVALGRIND' \
 	'-DDYNAMIC_ANNOTATIONS_ENABLED=0'
@@ -175,47 +201,23 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Release := false
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 # Undefine ANDROID for host modules
 LOCAL_CFLAGS += -UANDROID
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
 LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
 LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
 ### Rules for final target.
-
-LOCAL_LDFLAGS_Debug := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-pthread \
-	-fPIC \
-	-m32
-
-
-LOCAL_LDFLAGS_Release := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-pthread \
-	-fPIC \
-	-m32
-
-
-LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
-
-LOCAL_STATIC_LIBRARIES :=
-
-# Enable grouping to fix circular references
-LOCAL_GROUP_STATIC_LIBRARIES := true
-
-LOCAL_SHARED_LIBRARIES :=
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
 
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
diff --git a/tools/gyp/v8_libbase.host.linux-arm64.mk b/tools/gyp/v8_libbase.host.linux-arm64.mk
index 87b190a..7514baa 100644
--- a/tools/gyp/v8_libbase.host.linux-arm64.mk
+++ b/tools/gyp/v8_libbase.host.linux-arm64.mk
@@ -5,7 +5,6 @@
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_MODULE := v8_tools_gyp_v8_libbase_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
 LOCAL_MODULE_SUFFIX := .a
-LOCAL_MODULE_TAGS := optional
 LOCAL_IS_HOST_MODULE := true
 LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
@@ -26,7 +25,19 @@
 
 LOCAL_SRC_FILES := \
 	v8/src/base/atomicops_internals_x86_gcc.cc \
-	v8/src/base/once.cc
+	v8/src/base/bits.cc \
+	v8/src/base/cpu.cc \
+	v8/src/base/division-by-constant.cc \
+	v8/src/base/logging.cc \
+	v8/src/base/once.cc \
+	v8/src/base/platform/time.cc \
+	v8/src/base/platform/condition-variable.cc \
+	v8/src/base/platform/mutex.cc \
+	v8/src/base/platform/semaphore.cc \
+	v8/src/base/sys-info.cc \
+	v8/src/base/utils/random-number-generator.cc \
+	v8/src/base/platform/platform-posix.cc \
+	v8/src/base/platform/platform-linux.cc
 
 
 # Flags passed to both C and C++ files.
@@ -34,25 +45,31 @@
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
 	-fvisibility=hidden \
 	-pipe \
 	-fPIC \
-	-Wno-unused-local-typedefs \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m64 \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-funwind-tables
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -69,15 +86,18 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_ARM64' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
+	'-DV8_LIBRT_NOT_AVAILABLE=1' \
 	'-DDYNAMIC_ANNOTATIONS_ENABLED=1' \
 	'-DWTF_USE_DYNAMIC_ANNOTATIONS=1' \
 	'-D_DEBUG' \
@@ -97,28 +117,34 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
 	-fvisibility=hidden \
 	-pipe \
 	-fPIC \
-	-Wno-unused-local-typedefs \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m64 \
 	-fno-ident \
 	-fdata-sections \
@@ -130,7 +156,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -147,15 +172,18 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_ARM64' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
+	'-DV8_LIBRT_NOT_AVAILABLE=1' \
 	'-DNDEBUG' \
 	'-DNVALGRIND' \
 	'-DDYNAMIC_ANNOTATIONS_ENABLED=0'
@@ -169,47 +197,23 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Release := false
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 # Undefine ANDROID for host modules
 LOCAL_CFLAGS += -UANDROID
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
 LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
 LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
 ### Rules for final target.
-
-LOCAL_LDFLAGS_Debug := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-pthread \
-	-fPIC \
-	-m64
-
-
-LOCAL_LDFLAGS_Release := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-pthread \
-	-fPIC \
-	-m64
-
-
-LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
-
-LOCAL_STATIC_LIBRARIES :=
-
-# Enable grouping to fix circular references
-LOCAL_GROUP_STATIC_LIBRARIES := true
-
-LOCAL_SHARED_LIBRARIES :=
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
 
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
diff --git a/tools/gyp/v8_libbase.host.linux-mips.mk b/tools/gyp/v8_libbase.host.linux-mips.mk
index eaa6a9e..8e87abc 100644
--- a/tools/gyp/v8_libbase.host.linux-mips.mk
+++ b/tools/gyp/v8_libbase.host.linux-mips.mk
@@ -5,7 +5,6 @@
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_MODULE := v8_tools_gyp_v8_libbase_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
 LOCAL_MODULE_SUFFIX := .a
-LOCAL_MODULE_TAGS := optional
 LOCAL_IS_HOST_MODULE := true
 LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
@@ -26,7 +25,19 @@
 
 LOCAL_SRC_FILES := \
 	v8/src/base/atomicops_internals_x86_gcc.cc \
-	v8/src/base/once.cc
+	v8/src/base/bits.cc \
+	v8/src/base/cpu.cc \
+	v8/src/base/division-by-constant.cc \
+	v8/src/base/logging.cc \
+	v8/src/base/once.cc \
+	v8/src/base/platform/time.cc \
+	v8/src/base/platform/condition-variable.cc \
+	v8/src/base/platform/mutex.cc \
+	v8/src/base/platform/semaphore.cc \
+	v8/src/base/sys-info.cc \
+	v8/src/base/utils/random-number-generator.cc \
+	v8/src/base/platform/platform-posix.cc \
+	v8/src/base/platform/platform-linux.cc
 
 
 # Flags passed to both C and C++ files.
@@ -35,18 +46,25 @@
 	--param=ssp-buffer-size=4 \
 	 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
 	-fvisibility=hidden \
 	-pipe \
 	-fPIC \
-	-Wno-unused-local-typedefs \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m32 \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-fomit-frame-pointer \
@@ -54,7 +72,6 @@
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -71,11 +88,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_MIPS' \
 	'-DCAN_USE_FPU_INSTRUCTIONS' \
 	'-D__mips_hard_float=1' \
@@ -83,6 +102,7 @@
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
+	'-DV8_LIBRT_NOT_AVAILABLE=1' \
 	'-DDYNAMIC_ANNOTATIONS_ENABLED=1' \
 	'-DWTF_USE_DYNAMIC_ANNOTATIONS=1' \
 	'-D_DEBUG' \
@@ -102,29 +122,35 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
 	-fvisibility=hidden \
 	-pipe \
 	-fPIC \
-	-Wno-unused-local-typedefs \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m32 \
 	-fno-ident \
 	-fdata-sections \
@@ -137,7 +163,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -154,11 +179,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_MIPS' \
 	'-DCAN_USE_FPU_INSTRUCTIONS' \
 	'-D__mips_hard_float=1' \
@@ -166,6 +193,7 @@
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
+	'-DV8_LIBRT_NOT_AVAILABLE=1' \
 	'-DNDEBUG' \
 	'-DNVALGRIND' \
 	'-DDYNAMIC_ANNOTATIONS_ENABLED=0'
@@ -179,47 +207,23 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Release := false
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 # Undefine ANDROID for host modules
 LOCAL_CFLAGS += -UANDROID
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
 LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
 LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
 ### Rules for final target.
-
-LOCAL_LDFLAGS_Debug := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-pthread \
-	-fPIC \
-	-m32
-
-
-LOCAL_LDFLAGS_Release := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-pthread \
-	-fPIC \
-	-m32
-
-
-LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
-
-LOCAL_STATIC_LIBRARIES :=
-
-# Enable grouping to fix circular references
-LOCAL_GROUP_STATIC_LIBRARIES := true
-
-LOCAL_SHARED_LIBRARIES :=
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
 
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
diff --git a/tools/gyp/v8_libbase.host.linux-mips64.mk b/tools/gyp/v8_libbase.host.linux-mips64.mk
new file mode 100644
index 0000000..5c631fb
--- /dev/null
+++ b/tools/gyp/v8_libbase.host.linux-mips64.mk
@@ -0,0 +1,234 @@
+# This file is generated by gyp; do not edit.
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE_CLASS := STATIC_LIBRARIES
+LOCAL_MODULE := v8_tools_gyp_v8_libbase_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+LOCAL_MODULE_SUFFIX := .a
+LOCAL_IS_HOST_MODULE := true
+LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
+gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
+gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
+
+# Make sure our deps are built first.
+GYP_TARGET_DEPENDENCIES :=
+
+GYP_GENERATED_OUTPUTS :=
+
+# Make sure our deps and generated files are built first.
+LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) $(GYP_GENERATED_OUTPUTS)
+
+LOCAL_CPP_EXTENSION := .cc
+LOCAL_GENERATED_SOURCES :=
+
+GYP_COPIED_SOURCE_ORIGIN_DIRS :=
+
+LOCAL_SRC_FILES := \
+	v8/src/base/atomicops_internals_x86_gcc.cc \
+	v8/src/base/bits.cc \
+	v8/src/base/cpu.cc \
+	v8/src/base/division-by-constant.cc \
+	v8/src/base/logging.cc \
+	v8/src/base/once.cc \
+	v8/src/base/platform/time.cc \
+	v8/src/base/platform/condition-variable.cc \
+	v8/src/base/platform/mutex.cc \
+	v8/src/base/platform/semaphore.cc \
+	v8/src/base/sys-info.cc \
+	v8/src/base/utils/random-number-generator.cc \
+	v8/src/base/platform/platform-posix.cc \
+	v8/src/base/platform/platform-linux.cc
+
+
+# Flags passed to both C and C++ files.
+MY_CFLAGS_Debug := \
+	-fstack-protector \
+	--param=ssp-buffer-size=4 \
+	 \
+	-pthread \
+	-fno-strict-aliasing \
+	-Wno-unused-parameter \
+	-Wno-missing-field-initializers \
+	-fvisibility=hidden \
+	-pipe \
+	-fPIC \
+	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
+	-Os \
+	-g \
+	-gdwarf-4 \
+	-fdata-sections \
+	-ffunction-sections \
+	-fomit-frame-pointer \
+	-funwind-tables
+
+MY_DEFS_Debug := \
+	'-DV8_DEPRECATION_WARNINGS' \
+	'-D_FILE_OFFSET_BITS=64' \
+	'-DNO_TCMALLOC' \
+	'-DDISABLE_NACL' \
+	'-DCHROMIUM_BUILD' \
+	'-DUSE_LIBJPEG_TURBO=1' \
+	'-DENABLE_WEBRTC=1' \
+	'-DUSE_PROPRIETARY_CODECS' \
+	'-DENABLE_BROWSER_CDMS' \
+	'-DENABLE_CONFIGURATION_POLICY' \
+	'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
+	'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
+	'-DENABLE_EGLIMAGE=1' \
+	'-DCLD_VERSION=1' \
+	'-DENABLE_PRINTING=1' \
+	'-DENABLE_MANAGED_USERS=1' \
+	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
+	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
+	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
+	'-DV8_TARGET_ARCH_MIPS64' \
+	'-DCAN_USE_FPU_INSTRUCTIONS' \
+	'-D__mips_hard_float=1' \
+	'-D_MIPS_ARCH_MIPS64R2' \
+	'-DV8_I18N_SUPPORT' \
+	'-DUSE_OPENSSL=1' \
+	'-DUSE_OPENSSL_CERTS=1' \
+	'-DV8_LIBRT_NOT_AVAILABLE=1' \
+	'-DDYNAMIC_ANNOTATIONS_ENABLED=1' \
+	'-DWTF_USE_DYNAMIC_ANNOTATIONS=1' \
+	'-D_DEBUG' \
+	'-DENABLE_DISASSEMBLER' \
+	'-DV8_ENABLE_CHECKS' \
+	'-DOBJECT_PRINT' \
+	'-DVERIFY_HEAP' \
+	'-DENABLE_EXTRA_CHECKS' \
+	'-DENABLE_HANDLE_ZAPPING'
+
+
+# Include paths placed before CFLAGS/CPPFLAGS
+LOCAL_C_INCLUDES_Debug := \
+	$(LOCAL_PATH)/v8 \
+	$(gyp_shared_intermediate_dir)
+
+
+# Flags passed to only C++ (and not C) files.
+LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
+	-fno-rtti \
+	-fno-threadsafe-statics \
+	-fvisibility-inlines-hidden \
+	-Wno-deprecated \
+	-std=gnu++11
+
+
+# Flags passed to both C and C++ files.
+MY_CFLAGS_Release := \
+	-fstack-protector \
+	--param=ssp-buffer-size=4 \
+	 \
+	-pthread \
+	-fno-strict-aliasing \
+	-Wno-unused-parameter \
+	-Wno-missing-field-initializers \
+	-fvisibility=hidden \
+	-pipe \
+	-fPIC \
+	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
+	-fno-ident \
+	-fdata-sections \
+	-ffunction-sections \
+	-fomit-frame-pointer \
+	-funwind-tables \
+	-fdata-sections \
+	-ffunction-sections \
+	-O2
+
+MY_DEFS_Release := \
+	'-DV8_DEPRECATION_WARNINGS' \
+	'-D_FILE_OFFSET_BITS=64' \
+	'-DNO_TCMALLOC' \
+	'-DDISABLE_NACL' \
+	'-DCHROMIUM_BUILD' \
+	'-DUSE_LIBJPEG_TURBO=1' \
+	'-DENABLE_WEBRTC=1' \
+	'-DUSE_PROPRIETARY_CODECS' \
+	'-DENABLE_BROWSER_CDMS' \
+	'-DENABLE_CONFIGURATION_POLICY' \
+	'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
+	'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
+	'-DENABLE_EGLIMAGE=1' \
+	'-DCLD_VERSION=1' \
+	'-DENABLE_PRINTING=1' \
+	'-DENABLE_MANAGED_USERS=1' \
+	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
+	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
+	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
+	'-DV8_TARGET_ARCH_MIPS64' \
+	'-DCAN_USE_FPU_INSTRUCTIONS' \
+	'-D__mips_hard_float=1' \
+	'-D_MIPS_ARCH_MIPS64R2' \
+	'-DV8_I18N_SUPPORT' \
+	'-DUSE_OPENSSL=1' \
+	'-DUSE_OPENSSL_CERTS=1' \
+	'-DV8_LIBRT_NOT_AVAILABLE=1' \
+	'-DNDEBUG' \
+	'-DNVALGRIND' \
+	'-DDYNAMIC_ANNOTATIONS_ENABLED=0'
+
+
+# Include paths placed before CFLAGS/CPPFLAGS
+LOCAL_C_INCLUDES_Release := \
+	$(LOCAL_PATH)/v8 \
+	$(gyp_shared_intermediate_dir)
+
+
+# Flags passed to only C++ (and not C) files.
+LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
+	-fno-rtti \
+	-fno-threadsafe-statics \
+	-fvisibility-inlines-hidden \
+	-Wno-deprecated \
+	-std=gnu++11
+
+
+LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
+# Undefine ANDROID for host modules
+LOCAL_CFLAGS += -UANDROID
+LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
+LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
+LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
+### Rules for final target.
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
+
+# Add target alias to "gyp_all_modules" target.
+.PHONY: gyp_all_modules
+gyp_all_modules: v8_tools_gyp_v8_libbase_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+
+# Alias gyp target name.
+.PHONY: v8_libbase
+v8_libbase: v8_tools_gyp_v8_libbase_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+
+include $(BUILD_HOST_STATIC_LIBRARY)
diff --git a/tools/gyp/v8_libbase.host.linux-x86.mk b/tools/gyp/v8_libbase.host.linux-x86.mk
index fe47f44..ea2b3e0 100644
--- a/tools/gyp/v8_libbase.host.linux-x86.mk
+++ b/tools/gyp/v8_libbase.host.linux-x86.mk
@@ -5,7 +5,6 @@
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_MODULE := v8_tools_gyp_v8_libbase_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
 LOCAL_MODULE_SUFFIX := .a
-LOCAL_MODULE_TAGS := optional
 LOCAL_IS_HOST_MODULE := true
 LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
@@ -26,7 +25,19 @@
 
 LOCAL_SRC_FILES := \
 	v8/src/base/atomicops_internals_x86_gcc.cc \
-	v8/src/base/once.cc
+	v8/src/base/bits.cc \
+	v8/src/base/cpu.cc \
+	v8/src/base/division-by-constant.cc \
+	v8/src/base/logging.cc \
+	v8/src/base/once.cc \
+	v8/src/base/platform/time.cc \
+	v8/src/base/platform/condition-variable.cc \
+	v8/src/base/platform/mutex.cc \
+	v8/src/base/platform/semaphore.cc \
+	v8/src/base/sys-info.cc \
+	v8/src/base/utils/random-number-generator.cc \
+	v8/src/base/platform/platform-posix.cc \
+	v8/src/base/platform/platform-linux.cc
 
 
 # Flags passed to both C and C++ files.
@@ -34,18 +45,25 @@
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
 	-fvisibility=hidden \
 	-pipe \
 	-fPIC \
-	-Wno-unused-local-typedefs \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m32 \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-fomit-frame-pointer \
@@ -53,7 +71,6 @@
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -70,15 +87,18 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_IA32' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
+	'-DV8_LIBRT_NOT_AVAILABLE=1' \
 	'-DDYNAMIC_ANNOTATIONS_ENABLED=1' \
 	'-DWTF_USE_DYNAMIC_ANNOTATIONS=1' \
 	'-D_DEBUG' \
@@ -98,28 +118,34 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
 	-fvisibility=hidden \
 	-pipe \
 	-fPIC \
-	-Wno-unused-local-typedefs \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m32 \
 	-fno-ident \
 	-fdata-sections \
@@ -132,7 +158,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -149,15 +174,18 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_IA32' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
+	'-DV8_LIBRT_NOT_AVAILABLE=1' \
 	'-DNDEBUG' \
 	'-DNVALGRIND' \
 	'-DDYNAMIC_ANNOTATIONS_ENABLED=0'
@@ -171,47 +199,23 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Release := false
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 # Undefine ANDROID for host modules
 LOCAL_CFLAGS += -UANDROID
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
 LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
 LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
 ### Rules for final target.
-
-LOCAL_LDFLAGS_Debug := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-pthread \
-	-fPIC \
-	-m32
-
-
-LOCAL_LDFLAGS_Release := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-pthread \
-	-fPIC \
-	-m32
-
-
-LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
-
-LOCAL_STATIC_LIBRARIES :=
-
-# Enable grouping to fix circular references
-LOCAL_GROUP_STATIC_LIBRARIES := true
-
-LOCAL_SHARED_LIBRARIES :=
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
 
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
diff --git a/tools/gyp/v8_libbase.host.linux-x86_64.mk b/tools/gyp/v8_libbase.host.linux-x86_64.mk
index 32d87e9..d5c9675 100644
--- a/tools/gyp/v8_libbase.host.linux-x86_64.mk
+++ b/tools/gyp/v8_libbase.host.linux-x86_64.mk
@@ -5,7 +5,6 @@
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_MODULE := v8_tools_gyp_v8_libbase_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
 LOCAL_MODULE_SUFFIX := .a
-LOCAL_MODULE_TAGS := optional
 LOCAL_IS_HOST_MODULE := true
 LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
@@ -26,7 +25,19 @@
 
 LOCAL_SRC_FILES := \
 	v8/src/base/atomicops_internals_x86_gcc.cc \
-	v8/src/base/once.cc
+	v8/src/base/bits.cc \
+	v8/src/base/cpu.cc \
+	v8/src/base/division-by-constant.cc \
+	v8/src/base/logging.cc \
+	v8/src/base/once.cc \
+	v8/src/base/platform/time.cc \
+	v8/src/base/platform/condition-variable.cc \
+	v8/src/base/platform/mutex.cc \
+	v8/src/base/platform/semaphore.cc \
+	v8/src/base/sys-info.cc \
+	v8/src/base/utils/random-number-generator.cc \
+	v8/src/base/platform/platform-posix.cc \
+	v8/src/base/platform/platform-linux.cc
 
 
 # Flags passed to both C and C++ files.
@@ -34,18 +45,25 @@
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
 	-fvisibility=hidden \
 	-pipe \
 	-fPIC \
-	-Wno-unused-local-typedefs \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m64 \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-fomit-frame-pointer \
@@ -53,7 +71,6 @@
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -70,15 +87,18 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_X64' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
+	'-DV8_LIBRT_NOT_AVAILABLE=1' \
 	'-DDYNAMIC_ANNOTATIONS_ENABLED=1' \
 	'-DWTF_USE_DYNAMIC_ANNOTATIONS=1' \
 	'-D_DEBUG' \
@@ -98,28 +118,34 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
 	-fvisibility=hidden \
 	-pipe \
 	-fPIC \
-	-Wno-unused-local-typedefs \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m64 \
 	-fno-ident \
 	-fdata-sections \
@@ -132,7 +158,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -149,15 +174,18 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_X64' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
+	'-DV8_LIBRT_NOT_AVAILABLE=1' \
 	'-DNDEBUG' \
 	'-DNVALGRIND' \
 	'-DDYNAMIC_ANNOTATIONS_ENABLED=0'
@@ -171,47 +199,23 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Release := false
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 # Undefine ANDROID for host modules
 LOCAL_CFLAGS += -UANDROID
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
 LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
 LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
 ### Rules for final target.
-
-LOCAL_LDFLAGS_Debug := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-pthread \
-	-fPIC \
-	-m64
-
-
-LOCAL_LDFLAGS_Release := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-pthread \
-	-fPIC \
-	-m64
-
-
-LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
-
-LOCAL_STATIC_LIBRARIES :=
-
-# Enable grouping to fix circular references
-LOCAL_GROUP_STATIC_LIBRARIES := true
-
-LOCAL_SHARED_LIBRARIES :=
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
 
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
diff --git a/tools/gyp/v8_libbase.target.darwin-arm.mk b/tools/gyp/v8_libbase.target.darwin-arm.mk
index 765407c..928d99e 100644
--- a/tools/gyp/v8_libbase.target.darwin-arm.mk
+++ b/tools/gyp/v8_libbase.target.darwin-arm.mk
@@ -5,7 +5,6 @@
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_MODULE := v8_tools_gyp_v8_libbase_gyp
 LOCAL_MODULE_SUFFIX := .a
-LOCAL_MODULE_TAGS := optional
 LOCAL_MODULE_TARGET_ARCH := $(TARGET_$(GYP_VAR_PREFIX)ARCH)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_VAR_PREFIX))
 gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
@@ -25,14 +24,25 @@
 
 LOCAL_SRC_FILES := \
 	v8/src/base/atomicops_internals_x86_gcc.cc \
-	v8/src/base/once.cc
+	v8/src/base/bits.cc \
+	v8/src/base/cpu.cc \
+	v8/src/base/division-by-constant.cc \
+	v8/src/base/logging.cc \
+	v8/src/base/once.cc \
+	v8/src/base/platform/time.cc \
+	v8/src/base/platform/condition-variable.cc \
+	v8/src/base/platform/mutex.cc \
+	v8/src/base/platform/semaphore.cc \
+	v8/src/base/sys-info.cc \
+	v8/src/base/utils/random-number-generator.cc \
+	v8/src/base/platform/platform-posix.cc \
+	v8/src/base/platform/platform-linux.cc
 
 
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Debug := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -42,13 +52,13 @@
 	-Wno-unused-local-typedefs \
 	-Wno-format \
 	-fno-tree-sra \
+	-fno-caller-saves \
+	-Wno-psabi \
 	-fno-partial-inlining \
 	-fno-early-inlining \
 	-fno-tree-copy-prop \
 	-fno-tree-loop-optimize \
 	-fno-move-loop-invariants \
-	-fno-caller-saves \
-	-Wno-psabi \
 	-ffunction-sections \
 	-funwind-tables \
 	-g \
@@ -67,6 +77,7 @@
 	-Wno-sequence-point \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-fomit-frame-pointer \
@@ -74,7 +85,6 @@
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -91,12 +101,15 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_ARM' \
+	'-DCAN_USE_ARMV7_INSTRUCTIONS' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
@@ -105,8 +118,6 @@
 	'-DUSE_STLPORT=1' \
 	'-D_STLP_USE_PTR_SPECIALIZATIONS=1' \
 	'-DCHROME_BUILD_ID=""' \
-	'-DARM_TEST' \
-	'-DCAN_USE_ARMV7_INSTRUCTIONS=1' \
 	'-DDYNAMIC_ANNOTATIONS_ENABLED=1' \
 	'-DWTF_USE_DYNAMIC_ANNOTATIONS=1' \
 	'-D_DEBUG' \
@@ -129,23 +140,24 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
 	-Wno-deprecated \
 	-Wno-abi \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
 	-Wno-non-virtual-dtor \
 	-Wno-sign-promo \
 	-Wno-non-virtual-dtor
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -155,13 +167,13 @@
 	-Wno-unused-local-typedefs \
 	-Wno-format \
 	-fno-tree-sra \
+	-fno-caller-saves \
+	-Wno-psabi \
 	-fno-partial-inlining \
 	-fno-early-inlining \
 	-fno-tree-copy-prop \
 	-fno-tree-loop-optimize \
 	-fno-move-loop-invariants \
-	-fno-caller-saves \
-	-Wno-psabi \
 	-ffunction-sections \
 	-funwind-tables \
 	-g \
@@ -189,7 +201,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -206,12 +217,15 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_ARM' \
+	'-DCAN_USE_ARMV7_INSTRUCTIONS' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
@@ -220,8 +234,6 @@
 	'-DUSE_STLPORT=1' \
 	'-D_STLP_USE_PTR_SPECIALIZATIONS=1' \
 	'-DCHROME_BUILD_ID=""' \
-	'-DARM_TEST' \
-	'-DCAN_USE_ARMV7_INSTRUCTIONS=1' \
 	'-DNDEBUG' \
 	'-DNVALGRIND' \
 	'-DDYNAMIC_ANNOTATIONS_ENABLED=0'
@@ -238,73 +250,33 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
 	-Wno-deprecated \
 	-Wno-abi \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
 	-Wno-non-virtual-dtor \
 	-Wno-sign-promo \
 	-Wno-non-virtual-dtor
 
 
-LOCAL_FDO_SUPPORT_Release := true
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
 LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
 LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
 ### Rules for final target.
 
-LOCAL_LDFLAGS_Debug := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-Wl,--fatal-warnings \
-	-Wl,-z,noexecstack \
-	-fPIC \
-	-Wl,-z,relro \
-	-Wl,-z,now \
-	-fuse-ld=gold \
-	-nostdlib \
-	-Wl,--no-undefined \
-	-Wl,--exclude-libs=ALL \
-	-Wl,--icf=safe \
-	-Wl,--warn-shared-textrel \
-	-Wl,-O1 \
-	-Wl,--as-needed
-
-
-LOCAL_LDFLAGS_Release := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-Wl,--fatal-warnings \
-	-Wl,-z,noexecstack \
-	-fPIC \
-	-Wl,-z,relro \
-	-Wl,-z,now \
-	-fuse-ld=gold \
-	-nostdlib \
-	-Wl,--no-undefined \
-	-Wl,--exclude-libs=ALL \
-	-Wl,--icf=safe \
-	-Wl,-O1 \
-	-Wl,--as-needed \
-	-Wl,--gc-sections \
-	-Wl,--warn-shared-textrel
-
-
-LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
-
-LOCAL_STATIC_LIBRARIES :=
-
-# Enable grouping to fix circular references
-LOCAL_GROUP_STATIC_LIBRARIES := true
-
 LOCAL_SHARED_LIBRARIES := \
 	libstlport \
 	libdl
 
+### Set directly by aosp_build_settings.
+LOCAL_FDO_SUPPORT := true
+
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
 gyp_all_modules: v8_tools_gyp_v8_libbase_gyp
diff --git a/tools/gyp/v8_libbase.target.darwin-arm64.mk b/tools/gyp/v8_libbase.target.darwin-arm64.mk
index bcc4e7b..e3ffaa0 100644
--- a/tools/gyp/v8_libbase.target.darwin-arm64.mk
+++ b/tools/gyp/v8_libbase.target.darwin-arm64.mk
@@ -5,7 +5,6 @@
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_MODULE := v8_tools_gyp_v8_libbase_gyp
 LOCAL_MODULE_SUFFIX := .a
-LOCAL_MODULE_TAGS := optional
 LOCAL_MODULE_TARGET_ARCH := $(TARGET_$(GYP_VAR_PREFIX)ARCH)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_VAR_PREFIX))
 gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
@@ -25,13 +24,24 @@
 
 LOCAL_SRC_FILES := \
 	v8/src/base/atomicops_internals_x86_gcc.cc \
-	v8/src/base/once.cc
+	v8/src/base/bits.cc \
+	v8/src/base/cpu.cc \
+	v8/src/base/division-by-constant.cc \
+	v8/src/base/logging.cc \
+	v8/src/base/once.cc \
+	v8/src/base/platform/time.cc \
+	v8/src/base/platform/condition-variable.cc \
+	v8/src/base/platform/mutex.cc \
+	v8/src/base/platform/semaphore.cc \
+	v8/src/base/sys-info.cc \
+	v8/src/base/utils/random-number-generator.cc \
+	v8/src/base/platform/platform-posix.cc \
+	v8/src/base/platform/platform-linux.cc
 
 
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Debug := \
 	--param=ssp-buffer-size=4 \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -57,13 +67,13 @@
 	-Wno-sequence-point \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-funwind-tables
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -80,11 +90,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_ARM64' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
@@ -116,21 +128,22 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
 	-Wno-deprecated \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
 	-Wno-non-virtual-dtor \
 	-Wno-sign-promo \
 	-Wno-non-virtual-dtor
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	--param=ssp-buffer-size=4 \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -164,7 +177,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -181,11 +193,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_ARM64' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
@@ -211,64 +225,32 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
 	-Wno-deprecated \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
 	-Wno-non-virtual-dtor \
 	-Wno-sign-promo \
 	-Wno-non-virtual-dtor
 
 
-LOCAL_FDO_SUPPORT_Release := true
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
 LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
 LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
 ### Rules for final target.
 
-LOCAL_LDFLAGS_Debug := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-Wl,--fatal-warnings \
-	-Wl,-z,noexecstack \
-	-fPIC \
-	-nostdlib \
-	-Wl,--no-undefined \
-	-Wl,--exclude-libs=ALL \
-	-Wl,--warn-shared-textrel \
-	-Wl,-O1 \
-	-Wl,--as-needed
-
-
-LOCAL_LDFLAGS_Release := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-Wl,--fatal-warnings \
-	-Wl,-z,noexecstack \
-	-fPIC \
-	-nostdlib \
-	-Wl,--no-undefined \
-	-Wl,--exclude-libs=ALL \
-	-Wl,-O1 \
-	-Wl,--as-needed \
-	-Wl,--gc-sections \
-	-Wl,--warn-shared-textrel
-
-
-LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
-
-LOCAL_STATIC_LIBRARIES :=
-
-# Enable grouping to fix circular references
-LOCAL_GROUP_STATIC_LIBRARIES := true
-
 LOCAL_SHARED_LIBRARIES := \
 	libstlport \
 	libdl
 
+### Set directly by aosp_build_settings.
+LOCAL_FDO_SUPPORT := true
+
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
 gyp_all_modules: v8_tools_gyp_v8_libbase_gyp
diff --git a/tools/gyp/v8_libbase.target.darwin-mips.mk b/tools/gyp/v8_libbase.target.darwin-mips.mk
index cae2ffe..d8b1cd2 100644
--- a/tools/gyp/v8_libbase.target.darwin-mips.mk
+++ b/tools/gyp/v8_libbase.target.darwin-mips.mk
@@ -5,7 +5,6 @@
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_MODULE := v8_tools_gyp_v8_libbase_gyp
 LOCAL_MODULE_SUFFIX := .a
-LOCAL_MODULE_TAGS := optional
 LOCAL_MODULE_TARGET_ARCH := $(TARGET_$(GYP_VAR_PREFIX)ARCH)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_VAR_PREFIX))
 gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
@@ -25,7 +24,19 @@
 
 LOCAL_SRC_FILES := \
 	v8/src/base/atomicops_internals_x86_gcc.cc \
-	v8/src/base/once.cc
+	v8/src/base/bits.cc \
+	v8/src/base/cpu.cc \
+	v8/src/base/division-by-constant.cc \
+	v8/src/base/logging.cc \
+	v8/src/base/once.cc \
+	v8/src/base/platform/time.cc \
+	v8/src/base/platform/condition-variable.cc \
+	v8/src/base/platform/mutex.cc \
+	v8/src/base/platform/semaphore.cc \
+	v8/src/base/sys-info.cc \
+	v8/src/base/utils/random-number-generator.cc \
+	v8/src/base/platform/platform-posix.cc \
+	v8/src/base/platform/platform-linux.cc
 
 
 # Flags passed to both C and C++ files.
@@ -33,7 +44,6 @@
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	 \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -42,8 +52,6 @@
 	-fPIC \
 	-Wno-unused-local-typedefs \
 	-Wno-format \
-	-EL \
-	-mhard-float \
 	-ffunction-sections \
 	-funwind-tables \
 	-g \
@@ -62,6 +70,7 @@
 	-Wno-sequence-point \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-fomit-frame-pointer \
@@ -69,7 +78,6 @@
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -86,11 +94,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_MIPS' \
 	'-DCAN_USE_FPU_INSTRUCTIONS' \
 	'-D__mips_hard_float=1' \
@@ -125,24 +135,25 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
 	-Wno-deprecated \
 	-Wno-uninitialized \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
 	-Wno-non-virtual-dtor \
 	-Wno-sign-promo \
 	-Wno-non-virtual-dtor
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	 \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -151,8 +162,6 @@
 	-fPIC \
 	-Wno-unused-local-typedefs \
 	-Wno-format \
-	-EL \
-	-mhard-float \
 	-ffunction-sections \
 	-funwind-tables \
 	-g \
@@ -180,7 +189,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -197,11 +205,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_MIPS' \
 	'-DCAN_USE_FPU_INSTRUCTIONS' \
 	'-D__mips_hard_float=1' \
@@ -230,69 +240,33 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
 	-Wno-deprecated \
 	-Wno-uninitialized \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
 	-Wno-non-virtual-dtor \
 	-Wno-sign-promo \
 	-Wno-non-virtual-dtor
 
 
-LOCAL_FDO_SUPPORT_Release := true
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
 LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
 LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
 ### Rules for final target.
 
-LOCAL_LDFLAGS_Debug := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-Wl,--fatal-warnings \
-	-Wl,-z,noexecstack \
-	-fPIC \
-	-EL \
-	-Wl,--no-keep-memory \
-	-nostdlib \
-	-Wl,--no-undefined \
-	-Wl,--exclude-libs=ALL \
-	-Wl,--warn-shared-textrel \
-	-Wl,-O1 \
-	-Wl,--as-needed
-
-
-LOCAL_LDFLAGS_Release := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-Wl,--fatal-warnings \
-	-Wl,-z,noexecstack \
-	-fPIC \
-	-EL \
-	-Wl,--no-keep-memory \
-	-nostdlib \
-	-Wl,--no-undefined \
-	-Wl,--exclude-libs=ALL \
-	-Wl,-O1 \
-	-Wl,--as-needed \
-	-Wl,--gc-sections \
-	-Wl,--warn-shared-textrel
-
-
-LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
-
-LOCAL_STATIC_LIBRARIES :=
-
-# Enable grouping to fix circular references
-LOCAL_GROUP_STATIC_LIBRARIES := true
-
 LOCAL_SHARED_LIBRARIES := \
 	libstlport \
 	libdl
 
+### Set directly by aosp_build_settings.
+LOCAL_FDO_SUPPORT := true
+
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
 gyp_all_modules: v8_tools_gyp_v8_libbase_gyp
diff --git a/tools/gyp/v8_libbase.target.darwin-mips64.mk b/tools/gyp/v8_libbase.target.darwin-mips64.mk
new file mode 100644
index 0000000..6fb4c86
--- /dev/null
+++ b/tools/gyp/v8_libbase.target.darwin-mips64.mk
@@ -0,0 +1,275 @@
+# This file is generated by gyp; do not edit.
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE_CLASS := STATIC_LIBRARIES
+LOCAL_MODULE := v8_tools_gyp_v8_libbase_gyp
+LOCAL_MODULE_SUFFIX := .a
+LOCAL_MODULE_TARGET_ARCH := $(TARGET_$(GYP_VAR_PREFIX)ARCH)
+gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_VAR_PREFIX))
+gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
+
+# Make sure our deps are built first.
+GYP_TARGET_DEPENDENCIES :=
+
+GYP_GENERATED_OUTPUTS :=
+
+# Make sure our deps and generated files are built first.
+LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) $(GYP_GENERATED_OUTPUTS)
+
+LOCAL_CPP_EXTENSION := .cc
+LOCAL_GENERATED_SOURCES :=
+
+GYP_COPIED_SOURCE_ORIGIN_DIRS :=
+
+LOCAL_SRC_FILES := \
+	v8/src/base/atomicops_internals_x86_gcc.cc \
+	v8/src/base/bits.cc \
+	v8/src/base/cpu.cc \
+	v8/src/base/division-by-constant.cc \
+	v8/src/base/logging.cc \
+	v8/src/base/once.cc \
+	v8/src/base/platform/time.cc \
+	v8/src/base/platform/condition-variable.cc \
+	v8/src/base/platform/mutex.cc \
+	v8/src/base/platform/semaphore.cc \
+	v8/src/base/sys-info.cc \
+	v8/src/base/utils/random-number-generator.cc \
+	v8/src/base/platform/platform-posix.cc \
+	v8/src/base/platform/platform-linux.cc
+
+
+# Flags passed to both C and C++ files.
+MY_CFLAGS_Debug := \
+	-fstack-protector \
+	--param=ssp-buffer-size=4 \
+	 \
+	-fno-strict-aliasing \
+	-Wno-unused-parameter \
+	-Wno-missing-field-initializers \
+	-fvisibility=hidden \
+	-pipe \
+	-fPIC \
+	-Wno-unused-local-typedefs \
+	-Wno-format \
+	-ffunction-sections \
+	-funwind-tables \
+	-g \
+	-fstack-protector \
+	-fno-short-enums \
+	-finline-limit=64 \
+	-Wa,--noexecstack \
+	-U_FORTIFY_SOURCE \
+	-Wno-extra \
+	-Wno-ignored-qualifiers \
+	-Wno-type-limits \
+	-Wno-unused-but-set-variable \
+	-Wno-address \
+	-Wno-format-security \
+	-Wno-return-type \
+	-Wno-sequence-point \
+	-Os \
+	-g \
+	-gdwarf-4 \
+	-fdata-sections \
+	-ffunction-sections \
+	-fomit-frame-pointer \
+	-funwind-tables
+
+MY_DEFS_Debug := \
+	'-DV8_DEPRECATION_WARNINGS' \
+	'-D_FILE_OFFSET_BITS=64' \
+	'-DNO_TCMALLOC' \
+	'-DDISABLE_NACL' \
+	'-DCHROMIUM_BUILD' \
+	'-DUSE_LIBJPEG_TURBO=1' \
+	'-DENABLE_WEBRTC=1' \
+	'-DUSE_PROPRIETARY_CODECS' \
+	'-DENABLE_BROWSER_CDMS' \
+	'-DENABLE_CONFIGURATION_POLICY' \
+	'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
+	'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
+	'-DENABLE_EGLIMAGE=1' \
+	'-DCLD_VERSION=1' \
+	'-DENABLE_PRINTING=1' \
+	'-DENABLE_MANAGED_USERS=1' \
+	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
+	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
+	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
+	'-DV8_TARGET_ARCH_MIPS64' \
+	'-DCAN_USE_FPU_INSTRUCTIONS' \
+	'-D__mips_hard_float=1' \
+	'-D_MIPS_ARCH_MIPS64R2' \
+	'-DV8_I18N_SUPPORT' \
+	'-DUSE_OPENSSL=1' \
+	'-DUSE_OPENSSL_CERTS=1' \
+	'-DANDROID' \
+	'-D__GNU_SOURCE=1' \
+	'-DUSE_STLPORT=1' \
+	'-D_STLP_USE_PTR_SPECIALIZATIONS=1' \
+	'-DCHROME_BUILD_ID=""' \
+	'-DDYNAMIC_ANNOTATIONS_ENABLED=1' \
+	'-DWTF_USE_DYNAMIC_ANNOTATIONS=1' \
+	'-D_DEBUG' \
+	'-DENABLE_DISASSEMBLER' \
+	'-DV8_ENABLE_CHECKS' \
+	'-DOBJECT_PRINT' \
+	'-DVERIFY_HEAP' \
+	'-DENABLE_EXTRA_CHECKS' \
+	'-DENABLE_HANDLE_ZAPPING'
+
+
+# Include paths placed before CFLAGS/CPPFLAGS
+LOCAL_C_INCLUDES_Debug := \
+	$(LOCAL_PATH)/v8 \
+	$(gyp_shared_intermediate_dir) \
+	$(PWD)/frameworks/wilhelm/include \
+	$(PWD)/bionic \
+	$(PWD)/external/stlport/stlport
+
+
+# Flags passed to only C++ (and not C) files.
+LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
+	-fno-rtti \
+	-fno-threadsafe-statics \
+	-fvisibility-inlines-hidden \
+	-Wno-deprecated \
+	-Wno-uninitialized \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
+	-Wno-non-virtual-dtor \
+	-Wno-sign-promo \
+	-Wno-non-virtual-dtor
+
+
+# Flags passed to both C and C++ files.
+MY_CFLAGS_Release := \
+	-fstack-protector \
+	--param=ssp-buffer-size=4 \
+	 \
+	-fno-strict-aliasing \
+	-Wno-unused-parameter \
+	-Wno-missing-field-initializers \
+	-fvisibility=hidden \
+	-pipe \
+	-fPIC \
+	-Wno-unused-local-typedefs \
+	-Wno-format \
+	-ffunction-sections \
+	-funwind-tables \
+	-g \
+	-fstack-protector \
+	-fno-short-enums \
+	-finline-limit=64 \
+	-Wa,--noexecstack \
+	-U_FORTIFY_SOURCE \
+	-Wno-extra \
+	-Wno-ignored-qualifiers \
+	-Wno-type-limits \
+	-Wno-unused-but-set-variable \
+	-Wno-address \
+	-Wno-format-security \
+	-Wno-return-type \
+	-Wno-sequence-point \
+	-fno-ident \
+	-fdata-sections \
+	-ffunction-sections \
+	-fomit-frame-pointer \
+	-funwind-tables \
+	-fdata-sections \
+	-ffunction-sections \
+	-O2
+
+MY_DEFS_Release := \
+	'-DV8_DEPRECATION_WARNINGS' \
+	'-D_FILE_OFFSET_BITS=64' \
+	'-DNO_TCMALLOC' \
+	'-DDISABLE_NACL' \
+	'-DCHROMIUM_BUILD' \
+	'-DUSE_LIBJPEG_TURBO=1' \
+	'-DENABLE_WEBRTC=1' \
+	'-DUSE_PROPRIETARY_CODECS' \
+	'-DENABLE_BROWSER_CDMS' \
+	'-DENABLE_CONFIGURATION_POLICY' \
+	'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
+	'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
+	'-DENABLE_EGLIMAGE=1' \
+	'-DCLD_VERSION=1' \
+	'-DENABLE_PRINTING=1' \
+	'-DENABLE_MANAGED_USERS=1' \
+	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
+	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
+	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
+	'-DV8_TARGET_ARCH_MIPS64' \
+	'-DCAN_USE_FPU_INSTRUCTIONS' \
+	'-D__mips_hard_float=1' \
+	'-D_MIPS_ARCH_MIPS64R2' \
+	'-DV8_I18N_SUPPORT' \
+	'-DUSE_OPENSSL=1' \
+	'-DUSE_OPENSSL_CERTS=1' \
+	'-DANDROID' \
+	'-D__GNU_SOURCE=1' \
+	'-DUSE_STLPORT=1' \
+	'-D_STLP_USE_PTR_SPECIALIZATIONS=1' \
+	'-DCHROME_BUILD_ID=""' \
+	'-DNDEBUG' \
+	'-DNVALGRIND' \
+	'-DDYNAMIC_ANNOTATIONS_ENABLED=0'
+
+
+# Include paths placed before CFLAGS/CPPFLAGS
+LOCAL_C_INCLUDES_Release := \
+	$(LOCAL_PATH)/v8 \
+	$(gyp_shared_intermediate_dir) \
+	$(PWD)/frameworks/wilhelm/include \
+	$(PWD)/bionic \
+	$(PWD)/external/stlport/stlport
+
+
+# Flags passed to only C++ (and not C) files.
+LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
+	-fno-rtti \
+	-fno-threadsafe-statics \
+	-fvisibility-inlines-hidden \
+	-Wno-deprecated \
+	-Wno-uninitialized \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
+	-Wno-non-virtual-dtor \
+	-Wno-sign-promo \
+	-Wno-non-virtual-dtor
+
+
+LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
+LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
+LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
+LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
+### Rules for final target.
+
+LOCAL_SHARED_LIBRARIES := \
+	libstlport \
+	libdl
+
+# Add target alias to "gyp_all_modules" target.
+.PHONY: gyp_all_modules
+gyp_all_modules: v8_tools_gyp_v8_libbase_gyp
+
+# Alias gyp target name.
+.PHONY: v8_libbase
+v8_libbase: v8_tools_gyp_v8_libbase_gyp
+
+include $(BUILD_STATIC_LIBRARY)
diff --git a/tools/gyp/v8_libbase.target.darwin-x86.mk b/tools/gyp/v8_libbase.target.darwin-x86.mk
index 8ea8265..dedce1c 100644
--- a/tools/gyp/v8_libbase.target.darwin-x86.mk
+++ b/tools/gyp/v8_libbase.target.darwin-x86.mk
@@ -5,7 +5,6 @@
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_MODULE := v8_tools_gyp_v8_libbase_gyp
 LOCAL_MODULE_SUFFIX := .a
-LOCAL_MODULE_TAGS := optional
 LOCAL_MODULE_TARGET_ARCH := $(TARGET_$(GYP_VAR_PREFIX)ARCH)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_VAR_PREFIX))
 gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
@@ -25,13 +24,24 @@
 
 LOCAL_SRC_FILES := \
 	v8/src/base/atomicops_internals_x86_gcc.cc \
-	v8/src/base/once.cc
+	v8/src/base/bits.cc \
+	v8/src/base/cpu.cc \
+	v8/src/base/division-by-constant.cc \
+	v8/src/base/logging.cc \
+	v8/src/base/once.cc \
+	v8/src/base/platform/time.cc \
+	v8/src/base/platform/condition-variable.cc \
+	v8/src/base/platform/mutex.cc \
+	v8/src/base/platform/semaphore.cc \
+	v8/src/base/sys-info.cc \
+	v8/src/base/utils/random-number-generator.cc \
+	v8/src/base/platform/platform-posix.cc \
+	v8/src/base/platform/platform-linux.cc
 
 
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Debug := \
 	--param=ssp-buffer-size=4 \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -60,8 +70,10 @@
 	-Wno-format-security \
 	-Wno-return-type \
 	-Wno-sequence-point \
+	-m32 \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-fomit-frame-pointer \
@@ -69,7 +81,6 @@
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -86,11 +97,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_IA32' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
@@ -122,21 +135,22 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
 	-Wno-deprecated \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
 	-Wno-non-virtual-dtor \
 	-Wno-sign-promo \
 	-Wno-non-virtual-dtor
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	--param=ssp-buffer-size=4 \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -165,6 +179,7 @@
 	-Wno-format-security \
 	-Wno-return-type \
 	-Wno-sequence-point \
+	-m32 \
 	-fno-ident \
 	-fdata-sections \
 	-ffunction-sections \
@@ -176,7 +191,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -193,11 +207,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_IA32' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
@@ -223,68 +239,32 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
 	-Wno-deprecated \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
 	-Wno-non-virtual-dtor \
 	-Wno-sign-promo \
 	-Wno-non-virtual-dtor
 
 
-LOCAL_FDO_SUPPORT_Release := true
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
 LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
 LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
 ### Rules for final target.
 
-LOCAL_LDFLAGS_Debug := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-Wl,--fatal-warnings \
-	-Wl,-z,noexecstack \
-	-fPIC \
-	-m32 \
-	-fuse-ld=gold \
-	-nostdlib \
-	-Wl,--no-undefined \
-	-Wl,--exclude-libs=ALL \
-	-Wl,--warn-shared-textrel \
-	-Wl,-O1 \
-	-Wl,--as-needed
-
-
-LOCAL_LDFLAGS_Release := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-Wl,--fatal-warnings \
-	-Wl,-z,noexecstack \
-	-fPIC \
-	-m32 \
-	-fuse-ld=gold \
-	-nostdlib \
-	-Wl,--no-undefined \
-	-Wl,--exclude-libs=ALL \
-	-Wl,-O1 \
-	-Wl,--as-needed \
-	-Wl,--gc-sections \
-	-Wl,--warn-shared-textrel
-
-
-LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
-
-LOCAL_STATIC_LIBRARIES :=
-
-# Enable grouping to fix circular references
-LOCAL_GROUP_STATIC_LIBRARIES := true
-
 LOCAL_SHARED_LIBRARIES := \
 	libstlport \
 	libdl
 
+### Set directly by aosp_build_settings.
+LOCAL_FDO_SUPPORT := true
+
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
 gyp_all_modules: v8_tools_gyp_v8_libbase_gyp
diff --git a/tools/gyp/v8_libbase.target.darwin-x86_64.mk b/tools/gyp/v8_libbase.target.darwin-x86_64.mk
index e61707f..fefb503 100644
--- a/tools/gyp/v8_libbase.target.darwin-x86_64.mk
+++ b/tools/gyp/v8_libbase.target.darwin-x86_64.mk
@@ -5,7 +5,6 @@
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_MODULE := v8_tools_gyp_v8_libbase_gyp
 LOCAL_MODULE_SUFFIX := .a
-LOCAL_MODULE_TAGS := optional
 LOCAL_MODULE_TARGET_ARCH := $(TARGET_$(GYP_VAR_PREFIX)ARCH)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_VAR_PREFIX))
 gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
@@ -25,14 +24,25 @@
 
 LOCAL_SRC_FILES := \
 	v8/src/base/atomicops_internals_x86_gcc.cc \
-	v8/src/base/once.cc
+	v8/src/base/bits.cc \
+	v8/src/base/cpu.cc \
+	v8/src/base/division-by-constant.cc \
+	v8/src/base/logging.cc \
+	v8/src/base/once.cc \
+	v8/src/base/platform/time.cc \
+	v8/src/base/platform/condition-variable.cc \
+	v8/src/base/platform/mutex.cc \
+	v8/src/base/platform/semaphore.cc \
+	v8/src/base/sys-info.cc \
+	v8/src/base/utils/random-number-generator.cc \
+	v8/src/base/platform/platform-posix.cc \
+	v8/src/base/platform/platform-linux.cc
 
 
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Debug := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -59,8 +69,10 @@
 	-Wno-format-security \
 	-Wno-return-type \
 	-Wno-sequence-point \
+	-m64 \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-fomit-frame-pointer \
@@ -68,7 +80,6 @@
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -85,11 +96,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_X64' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
@@ -121,22 +134,23 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
 	-Wno-deprecated \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
 	-Wno-non-virtual-dtor \
 	-Wno-sign-promo \
 	-Wno-non-virtual-dtor
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -163,6 +177,7 @@
 	-Wno-format-security \
 	-Wno-return-type \
 	-Wno-sequence-point \
+	-m64 \
 	-fno-ident \
 	-fdata-sections \
 	-ffunction-sections \
@@ -174,7 +189,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -191,11 +205,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_X64' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
@@ -221,68 +237,32 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
 	-Wno-deprecated \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
 	-Wno-non-virtual-dtor \
 	-Wno-sign-promo \
 	-Wno-non-virtual-dtor
 
 
-LOCAL_FDO_SUPPORT_Release := true
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
 LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
 LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
 ### Rules for final target.
 
-LOCAL_LDFLAGS_Debug := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-Wl,--fatal-warnings \
-	-Wl,-z,noexecstack \
-	-fPIC \
-	-m64 \
-	-fuse-ld=gold \
-	-nostdlib \
-	-Wl,--no-undefined \
-	-Wl,--exclude-libs=ALL \
-	-Wl,--warn-shared-textrel \
-	-Wl,-O1 \
-	-Wl,--as-needed
-
-
-LOCAL_LDFLAGS_Release := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-Wl,--fatal-warnings \
-	-Wl,-z,noexecstack \
-	-fPIC \
-	-m64 \
-	-fuse-ld=gold \
-	-nostdlib \
-	-Wl,--no-undefined \
-	-Wl,--exclude-libs=ALL \
-	-Wl,-O1 \
-	-Wl,--as-needed \
-	-Wl,--gc-sections \
-	-Wl,--warn-shared-textrel
-
-
-LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
-
-LOCAL_STATIC_LIBRARIES :=
-
-# Enable grouping to fix circular references
-LOCAL_GROUP_STATIC_LIBRARIES := true
-
 LOCAL_SHARED_LIBRARIES := \
 	libstlport \
 	libdl
 
+### Set directly by aosp_build_settings.
+LOCAL_FDO_SUPPORT := true
+
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
 gyp_all_modules: v8_tools_gyp_v8_libbase_gyp
diff --git a/tools/gyp/v8_libbase.target.linux-arm.mk b/tools/gyp/v8_libbase.target.linux-arm.mk
index 765407c..928d99e 100644
--- a/tools/gyp/v8_libbase.target.linux-arm.mk
+++ b/tools/gyp/v8_libbase.target.linux-arm.mk
@@ -5,7 +5,6 @@
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_MODULE := v8_tools_gyp_v8_libbase_gyp
 LOCAL_MODULE_SUFFIX := .a
-LOCAL_MODULE_TAGS := optional
 LOCAL_MODULE_TARGET_ARCH := $(TARGET_$(GYP_VAR_PREFIX)ARCH)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_VAR_PREFIX))
 gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
@@ -25,14 +24,25 @@
 
 LOCAL_SRC_FILES := \
 	v8/src/base/atomicops_internals_x86_gcc.cc \
-	v8/src/base/once.cc
+	v8/src/base/bits.cc \
+	v8/src/base/cpu.cc \
+	v8/src/base/division-by-constant.cc \
+	v8/src/base/logging.cc \
+	v8/src/base/once.cc \
+	v8/src/base/platform/time.cc \
+	v8/src/base/platform/condition-variable.cc \
+	v8/src/base/platform/mutex.cc \
+	v8/src/base/platform/semaphore.cc \
+	v8/src/base/sys-info.cc \
+	v8/src/base/utils/random-number-generator.cc \
+	v8/src/base/platform/platform-posix.cc \
+	v8/src/base/platform/platform-linux.cc
 
 
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Debug := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -42,13 +52,13 @@
 	-Wno-unused-local-typedefs \
 	-Wno-format \
 	-fno-tree-sra \
+	-fno-caller-saves \
+	-Wno-psabi \
 	-fno-partial-inlining \
 	-fno-early-inlining \
 	-fno-tree-copy-prop \
 	-fno-tree-loop-optimize \
 	-fno-move-loop-invariants \
-	-fno-caller-saves \
-	-Wno-psabi \
 	-ffunction-sections \
 	-funwind-tables \
 	-g \
@@ -67,6 +77,7 @@
 	-Wno-sequence-point \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-fomit-frame-pointer \
@@ -74,7 +85,6 @@
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -91,12 +101,15 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_ARM' \
+	'-DCAN_USE_ARMV7_INSTRUCTIONS' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
@@ -105,8 +118,6 @@
 	'-DUSE_STLPORT=1' \
 	'-D_STLP_USE_PTR_SPECIALIZATIONS=1' \
 	'-DCHROME_BUILD_ID=""' \
-	'-DARM_TEST' \
-	'-DCAN_USE_ARMV7_INSTRUCTIONS=1' \
 	'-DDYNAMIC_ANNOTATIONS_ENABLED=1' \
 	'-DWTF_USE_DYNAMIC_ANNOTATIONS=1' \
 	'-D_DEBUG' \
@@ -129,23 +140,24 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
 	-Wno-deprecated \
 	-Wno-abi \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
 	-Wno-non-virtual-dtor \
 	-Wno-sign-promo \
 	-Wno-non-virtual-dtor
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -155,13 +167,13 @@
 	-Wno-unused-local-typedefs \
 	-Wno-format \
 	-fno-tree-sra \
+	-fno-caller-saves \
+	-Wno-psabi \
 	-fno-partial-inlining \
 	-fno-early-inlining \
 	-fno-tree-copy-prop \
 	-fno-tree-loop-optimize \
 	-fno-move-loop-invariants \
-	-fno-caller-saves \
-	-Wno-psabi \
 	-ffunction-sections \
 	-funwind-tables \
 	-g \
@@ -189,7 +201,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -206,12 +217,15 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_ARM' \
+	'-DCAN_USE_ARMV7_INSTRUCTIONS' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
@@ -220,8 +234,6 @@
 	'-DUSE_STLPORT=1' \
 	'-D_STLP_USE_PTR_SPECIALIZATIONS=1' \
 	'-DCHROME_BUILD_ID=""' \
-	'-DARM_TEST' \
-	'-DCAN_USE_ARMV7_INSTRUCTIONS=1' \
 	'-DNDEBUG' \
 	'-DNVALGRIND' \
 	'-DDYNAMIC_ANNOTATIONS_ENABLED=0'
@@ -238,73 +250,33 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
 	-Wno-deprecated \
 	-Wno-abi \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
 	-Wno-non-virtual-dtor \
 	-Wno-sign-promo \
 	-Wno-non-virtual-dtor
 
 
-LOCAL_FDO_SUPPORT_Release := true
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
 LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
 LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
 ### Rules for final target.
 
-LOCAL_LDFLAGS_Debug := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-Wl,--fatal-warnings \
-	-Wl,-z,noexecstack \
-	-fPIC \
-	-Wl,-z,relro \
-	-Wl,-z,now \
-	-fuse-ld=gold \
-	-nostdlib \
-	-Wl,--no-undefined \
-	-Wl,--exclude-libs=ALL \
-	-Wl,--icf=safe \
-	-Wl,--warn-shared-textrel \
-	-Wl,-O1 \
-	-Wl,--as-needed
-
-
-LOCAL_LDFLAGS_Release := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-Wl,--fatal-warnings \
-	-Wl,-z,noexecstack \
-	-fPIC \
-	-Wl,-z,relro \
-	-Wl,-z,now \
-	-fuse-ld=gold \
-	-nostdlib \
-	-Wl,--no-undefined \
-	-Wl,--exclude-libs=ALL \
-	-Wl,--icf=safe \
-	-Wl,-O1 \
-	-Wl,--as-needed \
-	-Wl,--gc-sections \
-	-Wl,--warn-shared-textrel
-
-
-LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
-
-LOCAL_STATIC_LIBRARIES :=
-
-# Enable grouping to fix circular references
-LOCAL_GROUP_STATIC_LIBRARIES := true
-
 LOCAL_SHARED_LIBRARIES := \
 	libstlport \
 	libdl
 
+### Set directly by aosp_build_settings.
+LOCAL_FDO_SUPPORT := true
+
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
 gyp_all_modules: v8_tools_gyp_v8_libbase_gyp
diff --git a/tools/gyp/v8_libbase.target.linux-arm64.mk b/tools/gyp/v8_libbase.target.linux-arm64.mk
index bcc4e7b..e3ffaa0 100644
--- a/tools/gyp/v8_libbase.target.linux-arm64.mk
+++ b/tools/gyp/v8_libbase.target.linux-arm64.mk
@@ -5,7 +5,6 @@
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_MODULE := v8_tools_gyp_v8_libbase_gyp
 LOCAL_MODULE_SUFFIX := .a
-LOCAL_MODULE_TAGS := optional
 LOCAL_MODULE_TARGET_ARCH := $(TARGET_$(GYP_VAR_PREFIX)ARCH)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_VAR_PREFIX))
 gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
@@ -25,13 +24,24 @@
 
 LOCAL_SRC_FILES := \
 	v8/src/base/atomicops_internals_x86_gcc.cc \
-	v8/src/base/once.cc
+	v8/src/base/bits.cc \
+	v8/src/base/cpu.cc \
+	v8/src/base/division-by-constant.cc \
+	v8/src/base/logging.cc \
+	v8/src/base/once.cc \
+	v8/src/base/platform/time.cc \
+	v8/src/base/platform/condition-variable.cc \
+	v8/src/base/platform/mutex.cc \
+	v8/src/base/platform/semaphore.cc \
+	v8/src/base/sys-info.cc \
+	v8/src/base/utils/random-number-generator.cc \
+	v8/src/base/platform/platform-posix.cc \
+	v8/src/base/platform/platform-linux.cc
 
 
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Debug := \
 	--param=ssp-buffer-size=4 \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -57,13 +67,13 @@
 	-Wno-sequence-point \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-funwind-tables
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -80,11 +90,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_ARM64' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
@@ -116,21 +128,22 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
 	-Wno-deprecated \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
 	-Wno-non-virtual-dtor \
 	-Wno-sign-promo \
 	-Wno-non-virtual-dtor
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	--param=ssp-buffer-size=4 \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -164,7 +177,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -181,11 +193,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_ARM64' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
@@ -211,64 +225,32 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
 	-Wno-deprecated \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
 	-Wno-non-virtual-dtor \
 	-Wno-sign-promo \
 	-Wno-non-virtual-dtor
 
 
-LOCAL_FDO_SUPPORT_Release := true
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
 LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
 LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
 ### Rules for final target.
 
-LOCAL_LDFLAGS_Debug := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-Wl,--fatal-warnings \
-	-Wl,-z,noexecstack \
-	-fPIC \
-	-nostdlib \
-	-Wl,--no-undefined \
-	-Wl,--exclude-libs=ALL \
-	-Wl,--warn-shared-textrel \
-	-Wl,-O1 \
-	-Wl,--as-needed
-
-
-LOCAL_LDFLAGS_Release := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-Wl,--fatal-warnings \
-	-Wl,-z,noexecstack \
-	-fPIC \
-	-nostdlib \
-	-Wl,--no-undefined \
-	-Wl,--exclude-libs=ALL \
-	-Wl,-O1 \
-	-Wl,--as-needed \
-	-Wl,--gc-sections \
-	-Wl,--warn-shared-textrel
-
-
-LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
-
-LOCAL_STATIC_LIBRARIES :=
-
-# Enable grouping to fix circular references
-LOCAL_GROUP_STATIC_LIBRARIES := true
-
 LOCAL_SHARED_LIBRARIES := \
 	libstlport \
 	libdl
 
+### Set directly by aosp_build_settings.
+LOCAL_FDO_SUPPORT := true
+
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
 gyp_all_modules: v8_tools_gyp_v8_libbase_gyp
diff --git a/tools/gyp/v8_libbase.target.linux-mips.mk b/tools/gyp/v8_libbase.target.linux-mips.mk
index cae2ffe..d8b1cd2 100644
--- a/tools/gyp/v8_libbase.target.linux-mips.mk
+++ b/tools/gyp/v8_libbase.target.linux-mips.mk
@@ -5,7 +5,6 @@
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_MODULE := v8_tools_gyp_v8_libbase_gyp
 LOCAL_MODULE_SUFFIX := .a
-LOCAL_MODULE_TAGS := optional
 LOCAL_MODULE_TARGET_ARCH := $(TARGET_$(GYP_VAR_PREFIX)ARCH)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_VAR_PREFIX))
 gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
@@ -25,7 +24,19 @@
 
 LOCAL_SRC_FILES := \
 	v8/src/base/atomicops_internals_x86_gcc.cc \
-	v8/src/base/once.cc
+	v8/src/base/bits.cc \
+	v8/src/base/cpu.cc \
+	v8/src/base/division-by-constant.cc \
+	v8/src/base/logging.cc \
+	v8/src/base/once.cc \
+	v8/src/base/platform/time.cc \
+	v8/src/base/platform/condition-variable.cc \
+	v8/src/base/platform/mutex.cc \
+	v8/src/base/platform/semaphore.cc \
+	v8/src/base/sys-info.cc \
+	v8/src/base/utils/random-number-generator.cc \
+	v8/src/base/platform/platform-posix.cc \
+	v8/src/base/platform/platform-linux.cc
 
 
 # Flags passed to both C and C++ files.
@@ -33,7 +44,6 @@
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	 \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -42,8 +52,6 @@
 	-fPIC \
 	-Wno-unused-local-typedefs \
 	-Wno-format \
-	-EL \
-	-mhard-float \
 	-ffunction-sections \
 	-funwind-tables \
 	-g \
@@ -62,6 +70,7 @@
 	-Wno-sequence-point \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-fomit-frame-pointer \
@@ -69,7 +78,6 @@
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -86,11 +94,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_MIPS' \
 	'-DCAN_USE_FPU_INSTRUCTIONS' \
 	'-D__mips_hard_float=1' \
@@ -125,24 +135,25 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
 	-Wno-deprecated \
 	-Wno-uninitialized \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
 	-Wno-non-virtual-dtor \
 	-Wno-sign-promo \
 	-Wno-non-virtual-dtor
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	 \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -151,8 +162,6 @@
 	-fPIC \
 	-Wno-unused-local-typedefs \
 	-Wno-format \
-	-EL \
-	-mhard-float \
 	-ffunction-sections \
 	-funwind-tables \
 	-g \
@@ -180,7 +189,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -197,11 +205,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_MIPS' \
 	'-DCAN_USE_FPU_INSTRUCTIONS' \
 	'-D__mips_hard_float=1' \
@@ -230,69 +240,33 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
 	-Wno-deprecated \
 	-Wno-uninitialized \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
 	-Wno-non-virtual-dtor \
 	-Wno-sign-promo \
 	-Wno-non-virtual-dtor
 
 
-LOCAL_FDO_SUPPORT_Release := true
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
 LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
 LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
 ### Rules for final target.
 
-LOCAL_LDFLAGS_Debug := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-Wl,--fatal-warnings \
-	-Wl,-z,noexecstack \
-	-fPIC \
-	-EL \
-	-Wl,--no-keep-memory \
-	-nostdlib \
-	-Wl,--no-undefined \
-	-Wl,--exclude-libs=ALL \
-	-Wl,--warn-shared-textrel \
-	-Wl,-O1 \
-	-Wl,--as-needed
-
-
-LOCAL_LDFLAGS_Release := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-Wl,--fatal-warnings \
-	-Wl,-z,noexecstack \
-	-fPIC \
-	-EL \
-	-Wl,--no-keep-memory \
-	-nostdlib \
-	-Wl,--no-undefined \
-	-Wl,--exclude-libs=ALL \
-	-Wl,-O1 \
-	-Wl,--as-needed \
-	-Wl,--gc-sections \
-	-Wl,--warn-shared-textrel
-
-
-LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
-
-LOCAL_STATIC_LIBRARIES :=
-
-# Enable grouping to fix circular references
-LOCAL_GROUP_STATIC_LIBRARIES := true
-
 LOCAL_SHARED_LIBRARIES := \
 	libstlport \
 	libdl
 
+### Set directly by aosp_build_settings.
+LOCAL_FDO_SUPPORT := true
+
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
 gyp_all_modules: v8_tools_gyp_v8_libbase_gyp
diff --git a/tools/gyp/v8_libbase.target.linux-mips64.mk b/tools/gyp/v8_libbase.target.linux-mips64.mk
new file mode 100644
index 0000000..6fb4c86
--- /dev/null
+++ b/tools/gyp/v8_libbase.target.linux-mips64.mk
@@ -0,0 +1,275 @@
+# This file is generated by gyp; do not edit.
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE_CLASS := STATIC_LIBRARIES
+LOCAL_MODULE := v8_tools_gyp_v8_libbase_gyp
+LOCAL_MODULE_SUFFIX := .a
+LOCAL_MODULE_TARGET_ARCH := $(TARGET_$(GYP_VAR_PREFIX)ARCH)
+gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_VAR_PREFIX))
+gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
+
+# Make sure our deps are built first.
+GYP_TARGET_DEPENDENCIES :=
+
+GYP_GENERATED_OUTPUTS :=
+
+# Make sure our deps and generated files are built first.
+LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) $(GYP_GENERATED_OUTPUTS)
+
+LOCAL_CPP_EXTENSION := .cc
+LOCAL_GENERATED_SOURCES :=
+
+GYP_COPIED_SOURCE_ORIGIN_DIRS :=
+
+LOCAL_SRC_FILES := \
+	v8/src/base/atomicops_internals_x86_gcc.cc \
+	v8/src/base/bits.cc \
+	v8/src/base/cpu.cc \
+	v8/src/base/division-by-constant.cc \
+	v8/src/base/logging.cc \
+	v8/src/base/once.cc \
+	v8/src/base/platform/time.cc \
+	v8/src/base/platform/condition-variable.cc \
+	v8/src/base/platform/mutex.cc \
+	v8/src/base/platform/semaphore.cc \
+	v8/src/base/sys-info.cc \
+	v8/src/base/utils/random-number-generator.cc \
+	v8/src/base/platform/platform-posix.cc \
+	v8/src/base/platform/platform-linux.cc
+
+
+# Flags passed to both C and C++ files.
+MY_CFLAGS_Debug := \
+	-fstack-protector \
+	--param=ssp-buffer-size=4 \
+	 \
+	-fno-strict-aliasing \
+	-Wno-unused-parameter \
+	-Wno-missing-field-initializers \
+	-fvisibility=hidden \
+	-pipe \
+	-fPIC \
+	-Wno-unused-local-typedefs \
+	-Wno-format \
+	-ffunction-sections \
+	-funwind-tables \
+	-g \
+	-fstack-protector \
+	-fno-short-enums \
+	-finline-limit=64 \
+	-Wa,--noexecstack \
+	-U_FORTIFY_SOURCE \
+	-Wno-extra \
+	-Wno-ignored-qualifiers \
+	-Wno-type-limits \
+	-Wno-unused-but-set-variable \
+	-Wno-address \
+	-Wno-format-security \
+	-Wno-return-type \
+	-Wno-sequence-point \
+	-Os \
+	-g \
+	-gdwarf-4 \
+	-fdata-sections \
+	-ffunction-sections \
+	-fomit-frame-pointer \
+	-funwind-tables
+
+MY_DEFS_Debug := \
+	'-DV8_DEPRECATION_WARNINGS' \
+	'-D_FILE_OFFSET_BITS=64' \
+	'-DNO_TCMALLOC' \
+	'-DDISABLE_NACL' \
+	'-DCHROMIUM_BUILD' \
+	'-DUSE_LIBJPEG_TURBO=1' \
+	'-DENABLE_WEBRTC=1' \
+	'-DUSE_PROPRIETARY_CODECS' \
+	'-DENABLE_BROWSER_CDMS' \
+	'-DENABLE_CONFIGURATION_POLICY' \
+	'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
+	'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
+	'-DENABLE_EGLIMAGE=1' \
+	'-DCLD_VERSION=1' \
+	'-DENABLE_PRINTING=1' \
+	'-DENABLE_MANAGED_USERS=1' \
+	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
+	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
+	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
+	'-DV8_TARGET_ARCH_MIPS64' \
+	'-DCAN_USE_FPU_INSTRUCTIONS' \
+	'-D__mips_hard_float=1' \
+	'-D_MIPS_ARCH_MIPS64R2' \
+	'-DV8_I18N_SUPPORT' \
+	'-DUSE_OPENSSL=1' \
+	'-DUSE_OPENSSL_CERTS=1' \
+	'-DANDROID' \
+	'-D__GNU_SOURCE=1' \
+	'-DUSE_STLPORT=1' \
+	'-D_STLP_USE_PTR_SPECIALIZATIONS=1' \
+	'-DCHROME_BUILD_ID=""' \
+	'-DDYNAMIC_ANNOTATIONS_ENABLED=1' \
+	'-DWTF_USE_DYNAMIC_ANNOTATIONS=1' \
+	'-D_DEBUG' \
+	'-DENABLE_DISASSEMBLER' \
+	'-DV8_ENABLE_CHECKS' \
+	'-DOBJECT_PRINT' \
+	'-DVERIFY_HEAP' \
+	'-DENABLE_EXTRA_CHECKS' \
+	'-DENABLE_HANDLE_ZAPPING'
+
+
+# Include paths placed before CFLAGS/CPPFLAGS
+LOCAL_C_INCLUDES_Debug := \
+	$(LOCAL_PATH)/v8 \
+	$(gyp_shared_intermediate_dir) \
+	$(PWD)/frameworks/wilhelm/include \
+	$(PWD)/bionic \
+	$(PWD)/external/stlport/stlport
+
+
+# Flags passed to only C++ (and not C) files.
+LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
+	-fno-rtti \
+	-fno-threadsafe-statics \
+	-fvisibility-inlines-hidden \
+	-Wno-deprecated \
+	-Wno-uninitialized \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
+	-Wno-non-virtual-dtor \
+	-Wno-sign-promo \
+	-Wno-non-virtual-dtor
+
+
+# Flags passed to both C and C++ files.
+MY_CFLAGS_Release := \
+	-fstack-protector \
+	--param=ssp-buffer-size=4 \
+	 \
+	-fno-strict-aliasing \
+	-Wno-unused-parameter \
+	-Wno-missing-field-initializers \
+	-fvisibility=hidden \
+	-pipe \
+	-fPIC \
+	-Wno-unused-local-typedefs \
+	-Wno-format \
+	-ffunction-sections \
+	-funwind-tables \
+	-g \
+	-fstack-protector \
+	-fno-short-enums \
+	-finline-limit=64 \
+	-Wa,--noexecstack \
+	-U_FORTIFY_SOURCE \
+	-Wno-extra \
+	-Wno-ignored-qualifiers \
+	-Wno-type-limits \
+	-Wno-unused-but-set-variable \
+	-Wno-address \
+	-Wno-format-security \
+	-Wno-return-type \
+	-Wno-sequence-point \
+	-fno-ident \
+	-fdata-sections \
+	-ffunction-sections \
+	-fomit-frame-pointer \
+	-funwind-tables \
+	-fdata-sections \
+	-ffunction-sections \
+	-O2
+
+MY_DEFS_Release := \
+	'-DV8_DEPRECATION_WARNINGS' \
+	'-D_FILE_OFFSET_BITS=64' \
+	'-DNO_TCMALLOC' \
+	'-DDISABLE_NACL' \
+	'-DCHROMIUM_BUILD' \
+	'-DUSE_LIBJPEG_TURBO=1' \
+	'-DENABLE_WEBRTC=1' \
+	'-DUSE_PROPRIETARY_CODECS' \
+	'-DENABLE_BROWSER_CDMS' \
+	'-DENABLE_CONFIGURATION_POLICY' \
+	'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
+	'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
+	'-DENABLE_EGLIMAGE=1' \
+	'-DCLD_VERSION=1' \
+	'-DENABLE_PRINTING=1' \
+	'-DENABLE_MANAGED_USERS=1' \
+	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
+	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
+	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
+	'-DV8_TARGET_ARCH_MIPS64' \
+	'-DCAN_USE_FPU_INSTRUCTIONS' \
+	'-D__mips_hard_float=1' \
+	'-D_MIPS_ARCH_MIPS64R2' \
+	'-DV8_I18N_SUPPORT' \
+	'-DUSE_OPENSSL=1' \
+	'-DUSE_OPENSSL_CERTS=1' \
+	'-DANDROID' \
+	'-D__GNU_SOURCE=1' \
+	'-DUSE_STLPORT=1' \
+	'-D_STLP_USE_PTR_SPECIALIZATIONS=1' \
+	'-DCHROME_BUILD_ID=""' \
+	'-DNDEBUG' \
+	'-DNVALGRIND' \
+	'-DDYNAMIC_ANNOTATIONS_ENABLED=0'
+
+
+# Include paths placed before CFLAGS/CPPFLAGS
+LOCAL_C_INCLUDES_Release := \
+	$(LOCAL_PATH)/v8 \
+	$(gyp_shared_intermediate_dir) \
+	$(PWD)/frameworks/wilhelm/include \
+	$(PWD)/bionic \
+	$(PWD)/external/stlport/stlport
+
+
+# Flags passed to only C++ (and not C) files.
+LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
+	-fno-rtti \
+	-fno-threadsafe-statics \
+	-fvisibility-inlines-hidden \
+	-Wno-deprecated \
+	-Wno-uninitialized \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
+	-Wno-non-virtual-dtor \
+	-Wno-sign-promo \
+	-Wno-non-virtual-dtor
+
+
+LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
+LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
+LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
+LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
+### Rules for final target.
+
+LOCAL_SHARED_LIBRARIES := \
+	libstlport \
+	libdl
+
+# Add target alias to "gyp_all_modules" target.
+.PHONY: gyp_all_modules
+gyp_all_modules: v8_tools_gyp_v8_libbase_gyp
+
+# Alias gyp target name.
+.PHONY: v8_libbase
+v8_libbase: v8_tools_gyp_v8_libbase_gyp
+
+include $(BUILD_STATIC_LIBRARY)
diff --git a/tools/gyp/v8_libbase.target.linux-x86.mk b/tools/gyp/v8_libbase.target.linux-x86.mk
index 8ea8265..dedce1c 100644
--- a/tools/gyp/v8_libbase.target.linux-x86.mk
+++ b/tools/gyp/v8_libbase.target.linux-x86.mk
@@ -5,7 +5,6 @@
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_MODULE := v8_tools_gyp_v8_libbase_gyp
 LOCAL_MODULE_SUFFIX := .a
-LOCAL_MODULE_TAGS := optional
 LOCAL_MODULE_TARGET_ARCH := $(TARGET_$(GYP_VAR_PREFIX)ARCH)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_VAR_PREFIX))
 gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
@@ -25,13 +24,24 @@
 
 LOCAL_SRC_FILES := \
 	v8/src/base/atomicops_internals_x86_gcc.cc \
-	v8/src/base/once.cc
+	v8/src/base/bits.cc \
+	v8/src/base/cpu.cc \
+	v8/src/base/division-by-constant.cc \
+	v8/src/base/logging.cc \
+	v8/src/base/once.cc \
+	v8/src/base/platform/time.cc \
+	v8/src/base/platform/condition-variable.cc \
+	v8/src/base/platform/mutex.cc \
+	v8/src/base/platform/semaphore.cc \
+	v8/src/base/sys-info.cc \
+	v8/src/base/utils/random-number-generator.cc \
+	v8/src/base/platform/platform-posix.cc \
+	v8/src/base/platform/platform-linux.cc
 
 
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Debug := \
 	--param=ssp-buffer-size=4 \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -60,8 +70,10 @@
 	-Wno-format-security \
 	-Wno-return-type \
 	-Wno-sequence-point \
+	-m32 \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-fomit-frame-pointer \
@@ -69,7 +81,6 @@
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -86,11 +97,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_IA32' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
@@ -122,21 +135,22 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
 	-Wno-deprecated \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
 	-Wno-non-virtual-dtor \
 	-Wno-sign-promo \
 	-Wno-non-virtual-dtor
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	--param=ssp-buffer-size=4 \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -165,6 +179,7 @@
 	-Wno-format-security \
 	-Wno-return-type \
 	-Wno-sequence-point \
+	-m32 \
 	-fno-ident \
 	-fdata-sections \
 	-ffunction-sections \
@@ -176,7 +191,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -193,11 +207,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_IA32' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
@@ -223,68 +239,32 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
 	-Wno-deprecated \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
 	-Wno-non-virtual-dtor \
 	-Wno-sign-promo \
 	-Wno-non-virtual-dtor
 
 
-LOCAL_FDO_SUPPORT_Release := true
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
 LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
 LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
 ### Rules for final target.
 
-LOCAL_LDFLAGS_Debug := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-Wl,--fatal-warnings \
-	-Wl,-z,noexecstack \
-	-fPIC \
-	-m32 \
-	-fuse-ld=gold \
-	-nostdlib \
-	-Wl,--no-undefined \
-	-Wl,--exclude-libs=ALL \
-	-Wl,--warn-shared-textrel \
-	-Wl,-O1 \
-	-Wl,--as-needed
-
-
-LOCAL_LDFLAGS_Release := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-Wl,--fatal-warnings \
-	-Wl,-z,noexecstack \
-	-fPIC \
-	-m32 \
-	-fuse-ld=gold \
-	-nostdlib \
-	-Wl,--no-undefined \
-	-Wl,--exclude-libs=ALL \
-	-Wl,-O1 \
-	-Wl,--as-needed \
-	-Wl,--gc-sections \
-	-Wl,--warn-shared-textrel
-
-
-LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
-
-LOCAL_STATIC_LIBRARIES :=
-
-# Enable grouping to fix circular references
-LOCAL_GROUP_STATIC_LIBRARIES := true
-
 LOCAL_SHARED_LIBRARIES := \
 	libstlport \
 	libdl
 
+### Set directly by aosp_build_settings.
+LOCAL_FDO_SUPPORT := true
+
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
 gyp_all_modules: v8_tools_gyp_v8_libbase_gyp
diff --git a/tools/gyp/v8_libbase.target.linux-x86_64.mk b/tools/gyp/v8_libbase.target.linux-x86_64.mk
index e61707f..fefb503 100644
--- a/tools/gyp/v8_libbase.target.linux-x86_64.mk
+++ b/tools/gyp/v8_libbase.target.linux-x86_64.mk
@@ -5,7 +5,6 @@
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_MODULE := v8_tools_gyp_v8_libbase_gyp
 LOCAL_MODULE_SUFFIX := .a
-LOCAL_MODULE_TAGS := optional
 LOCAL_MODULE_TARGET_ARCH := $(TARGET_$(GYP_VAR_PREFIX)ARCH)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_VAR_PREFIX))
 gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
@@ -25,14 +24,25 @@
 
 LOCAL_SRC_FILES := \
 	v8/src/base/atomicops_internals_x86_gcc.cc \
-	v8/src/base/once.cc
+	v8/src/base/bits.cc \
+	v8/src/base/cpu.cc \
+	v8/src/base/division-by-constant.cc \
+	v8/src/base/logging.cc \
+	v8/src/base/once.cc \
+	v8/src/base/platform/time.cc \
+	v8/src/base/platform/condition-variable.cc \
+	v8/src/base/platform/mutex.cc \
+	v8/src/base/platform/semaphore.cc \
+	v8/src/base/sys-info.cc \
+	v8/src/base/utils/random-number-generator.cc \
+	v8/src/base/platform/platform-posix.cc \
+	v8/src/base/platform/platform-linux.cc
 
 
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Debug := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -59,8 +69,10 @@
 	-Wno-format-security \
 	-Wno-return-type \
 	-Wno-sequence-point \
+	-m64 \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-fomit-frame-pointer \
@@ -68,7 +80,6 @@
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -85,11 +96,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_X64' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
@@ -121,22 +134,23 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
 	-Wno-deprecated \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
 	-Wno-non-virtual-dtor \
 	-Wno-sign-promo \
 	-Wno-non-virtual-dtor
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -163,6 +177,7 @@
 	-Wno-format-security \
 	-Wno-return-type \
 	-Wno-sequence-point \
+	-m64 \
 	-fno-ident \
 	-fdata-sections \
 	-ffunction-sections \
@@ -174,7 +189,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -191,11 +205,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_X64' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
@@ -221,68 +237,32 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
 	-Wno-deprecated \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
 	-Wno-non-virtual-dtor \
 	-Wno-sign-promo \
 	-Wno-non-virtual-dtor
 
 
-LOCAL_FDO_SUPPORT_Release := true
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
 LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
 LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
 ### Rules for final target.
 
-LOCAL_LDFLAGS_Debug := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-Wl,--fatal-warnings \
-	-Wl,-z,noexecstack \
-	-fPIC \
-	-m64 \
-	-fuse-ld=gold \
-	-nostdlib \
-	-Wl,--no-undefined \
-	-Wl,--exclude-libs=ALL \
-	-Wl,--warn-shared-textrel \
-	-Wl,-O1 \
-	-Wl,--as-needed
-
-
-LOCAL_LDFLAGS_Release := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-Wl,--fatal-warnings \
-	-Wl,-z,noexecstack \
-	-fPIC \
-	-m64 \
-	-fuse-ld=gold \
-	-nostdlib \
-	-Wl,--no-undefined \
-	-Wl,--exclude-libs=ALL \
-	-Wl,-O1 \
-	-Wl,--as-needed \
-	-Wl,--gc-sections \
-	-Wl,--warn-shared-textrel
-
-
-LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
-
-LOCAL_STATIC_LIBRARIES :=
-
-# Enable grouping to fix circular references
-LOCAL_GROUP_STATIC_LIBRARIES := true
-
 LOCAL_SHARED_LIBRARIES := \
 	libstlport \
 	libdl
 
+### Set directly by aosp_build_settings.
+LOCAL_FDO_SUPPORT := true
+
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
 gyp_all_modules: v8_tools_gyp_v8_libbase_gyp
diff --git a/tools/gyp/v8_libplatform.host.darwin-arm.mk b/tools/gyp/v8_libplatform.host.darwin-arm.mk
new file mode 100644
index 0000000..f74456f
--- /dev/null
+++ b/tools/gyp/v8_libplatform.host.darwin-arm.mk
@@ -0,0 +1,217 @@
+# This file is generated by gyp; do not edit.
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE_CLASS := STATIC_LIBRARIES
+LOCAL_MODULE := v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+LOCAL_MODULE_SUFFIX := .a
+LOCAL_IS_HOST_MODULE := true
+LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
+gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
+gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
+
+# Make sure our deps are built first.
+GYP_TARGET_DEPENDENCIES :=
+
+GYP_GENERATED_OUTPUTS :=
+
+# Make sure our deps and generated files are built first.
+LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) $(GYP_GENERATED_OUTPUTS)
+
+LOCAL_CPP_EXTENSION := .cc
+LOCAL_GENERATED_SOURCES :=
+
+GYP_COPIED_SOURCE_ORIGIN_DIRS :=
+
+LOCAL_SRC_FILES := \
+	v8/src/libplatform/default-platform.cc \
+	v8/src/libplatform/task-queue.cc \
+	v8/src/libplatform/worker-thread.cc
+
+
+# Flags passed to both C and C++ files.
+MY_CFLAGS_Debug := \
+	-fstack-protector \
+	--param=ssp-buffer-size=4 \
+	-pthread \
+	-fno-strict-aliasing \
+	-Wno-unused-parameter \
+	-Wno-missing-field-initializers \
+	-fvisibility=hidden \
+	-pipe \
+	-fPIC \
+	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
+	-m32 \
+	-Os \
+	-g \
+	-gdwarf-4 \
+	-fdata-sections \
+	-ffunction-sections \
+	-fomit-frame-pointer \
+	-funwind-tables
+
+MY_DEFS_Debug := \
+	'-DV8_DEPRECATION_WARNINGS' \
+	'-D_FILE_OFFSET_BITS=64' \
+	'-DNO_TCMALLOC' \
+	'-DDISABLE_NACL' \
+	'-DCHROMIUM_BUILD' \
+	'-DUSE_LIBJPEG_TURBO=1' \
+	'-DENABLE_WEBRTC=1' \
+	'-DUSE_PROPRIETARY_CODECS' \
+	'-DENABLE_BROWSER_CDMS' \
+	'-DENABLE_CONFIGURATION_POLICY' \
+	'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
+	'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
+	'-DENABLE_EGLIMAGE=1' \
+	'-DCLD_VERSION=1' \
+	'-DENABLE_PRINTING=1' \
+	'-DENABLE_MANAGED_USERS=1' \
+	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
+	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
+	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
+	'-DV8_TARGET_ARCH_ARM' \
+	'-DCAN_USE_ARMV7_INSTRUCTIONS' \
+	'-DV8_I18N_SUPPORT' \
+	'-DUSE_OPENSSL=1' \
+	'-DUSE_OPENSSL_CERTS=1' \
+	'-DDYNAMIC_ANNOTATIONS_ENABLED=1' \
+	'-DWTF_USE_DYNAMIC_ANNOTATIONS=1' \
+	'-D_DEBUG' \
+	'-DENABLE_DISASSEMBLER' \
+	'-DV8_ENABLE_CHECKS' \
+	'-DOBJECT_PRINT' \
+	'-DVERIFY_HEAP' \
+	'-DENABLE_EXTRA_CHECKS' \
+	'-DENABLE_HANDLE_ZAPPING'
+
+
+# Include paths placed before CFLAGS/CPPFLAGS
+LOCAL_C_INCLUDES_Debug := \
+	$(LOCAL_PATH)/v8 \
+	$(gyp_shared_intermediate_dir)
+
+
+# Flags passed to only C++ (and not C) files.
+LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
+	-fno-rtti \
+	-fno-threadsafe-statics \
+	-fvisibility-inlines-hidden \
+	-Wno-deprecated \
+	-std=gnu++11
+
+
+# Flags passed to both C and C++ files.
+MY_CFLAGS_Release := \
+	-fstack-protector \
+	--param=ssp-buffer-size=4 \
+	-pthread \
+	-fno-strict-aliasing \
+	-Wno-unused-parameter \
+	-Wno-missing-field-initializers \
+	-fvisibility=hidden \
+	-pipe \
+	-fPIC \
+	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
+	-m32 \
+	-fno-ident \
+	-fdata-sections \
+	-ffunction-sections \
+	-fomit-frame-pointer \
+	-funwind-tables \
+	-fdata-sections \
+	-ffunction-sections \
+	-O2
+
+MY_DEFS_Release := \
+	'-DV8_DEPRECATION_WARNINGS' \
+	'-D_FILE_OFFSET_BITS=64' \
+	'-DNO_TCMALLOC' \
+	'-DDISABLE_NACL' \
+	'-DCHROMIUM_BUILD' \
+	'-DUSE_LIBJPEG_TURBO=1' \
+	'-DENABLE_WEBRTC=1' \
+	'-DUSE_PROPRIETARY_CODECS' \
+	'-DENABLE_BROWSER_CDMS' \
+	'-DENABLE_CONFIGURATION_POLICY' \
+	'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
+	'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
+	'-DENABLE_EGLIMAGE=1' \
+	'-DCLD_VERSION=1' \
+	'-DENABLE_PRINTING=1' \
+	'-DENABLE_MANAGED_USERS=1' \
+	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
+	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
+	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
+	'-DV8_TARGET_ARCH_ARM' \
+	'-DCAN_USE_ARMV7_INSTRUCTIONS' \
+	'-DV8_I18N_SUPPORT' \
+	'-DUSE_OPENSSL=1' \
+	'-DUSE_OPENSSL_CERTS=1' \
+	'-DNDEBUG' \
+	'-DNVALGRIND' \
+	'-DDYNAMIC_ANNOTATIONS_ENABLED=0'
+
+
+# Include paths placed before CFLAGS/CPPFLAGS
+LOCAL_C_INCLUDES_Release := \
+	$(LOCAL_PATH)/v8 \
+	$(gyp_shared_intermediate_dir)
+
+
+# Flags passed to only C++ (and not C) files.
+LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
+	-fno-rtti \
+	-fno-threadsafe-statics \
+	-fvisibility-inlines-hidden \
+	-Wno-deprecated \
+	-std=gnu++11
+
+
+LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
+# Undefine ANDROID for host modules
+LOCAL_CFLAGS += -UANDROID
+LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
+LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
+LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
+### Rules for final target.
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
+
+# Add target alias to "gyp_all_modules" target.
+.PHONY: gyp_all_modules
+gyp_all_modules: v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+
+# Alias gyp target name.
+.PHONY: v8_libplatform
+v8_libplatform: v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+
+include $(BUILD_HOST_STATIC_LIBRARY)
diff --git a/tools/gyp/v8_libplatform.host.darwin-arm64.mk b/tools/gyp/v8_libplatform.host.darwin-arm64.mk
new file mode 100644
index 0000000..9312fd7
--- /dev/null
+++ b/tools/gyp/v8_libplatform.host.darwin-arm64.mk
@@ -0,0 +1,213 @@
+# This file is generated by gyp; do not edit.
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE_CLASS := STATIC_LIBRARIES
+LOCAL_MODULE := v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+LOCAL_MODULE_SUFFIX := .a
+LOCAL_IS_HOST_MODULE := true
+LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
+gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
+gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
+
+# Make sure our deps are built first.
+GYP_TARGET_DEPENDENCIES :=
+
+GYP_GENERATED_OUTPUTS :=
+
+# Make sure our deps and generated files are built first.
+LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) $(GYP_GENERATED_OUTPUTS)
+
+LOCAL_CPP_EXTENSION := .cc
+LOCAL_GENERATED_SOURCES :=
+
+GYP_COPIED_SOURCE_ORIGIN_DIRS :=
+
+LOCAL_SRC_FILES := \
+	v8/src/libplatform/default-platform.cc \
+	v8/src/libplatform/task-queue.cc \
+	v8/src/libplatform/worker-thread.cc
+
+
+# Flags passed to both C and C++ files.
+MY_CFLAGS_Debug := \
+	-fstack-protector \
+	--param=ssp-buffer-size=4 \
+	-pthread \
+	-fno-strict-aliasing \
+	-Wno-unused-parameter \
+	-Wno-missing-field-initializers \
+	-fvisibility=hidden \
+	-pipe \
+	-fPIC \
+	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
+	-m64 \
+	-Os \
+	-g \
+	-gdwarf-4 \
+	-fdata-sections \
+	-ffunction-sections \
+	-funwind-tables
+
+MY_DEFS_Debug := \
+	'-DV8_DEPRECATION_WARNINGS' \
+	'-D_FILE_OFFSET_BITS=64' \
+	'-DNO_TCMALLOC' \
+	'-DDISABLE_NACL' \
+	'-DCHROMIUM_BUILD' \
+	'-DUSE_LIBJPEG_TURBO=1' \
+	'-DENABLE_WEBRTC=1' \
+	'-DUSE_PROPRIETARY_CODECS' \
+	'-DENABLE_BROWSER_CDMS' \
+	'-DENABLE_CONFIGURATION_POLICY' \
+	'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
+	'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
+	'-DENABLE_EGLIMAGE=1' \
+	'-DCLD_VERSION=1' \
+	'-DENABLE_PRINTING=1' \
+	'-DENABLE_MANAGED_USERS=1' \
+	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
+	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
+	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
+	'-DV8_TARGET_ARCH_ARM64' \
+	'-DV8_I18N_SUPPORT' \
+	'-DUSE_OPENSSL=1' \
+	'-DUSE_OPENSSL_CERTS=1' \
+	'-DDYNAMIC_ANNOTATIONS_ENABLED=1' \
+	'-DWTF_USE_DYNAMIC_ANNOTATIONS=1' \
+	'-D_DEBUG' \
+	'-DENABLE_DISASSEMBLER' \
+	'-DV8_ENABLE_CHECKS' \
+	'-DOBJECT_PRINT' \
+	'-DVERIFY_HEAP' \
+	'-DENABLE_EXTRA_CHECKS' \
+	'-DENABLE_HANDLE_ZAPPING'
+
+
+# Include paths placed before CFLAGS/CPPFLAGS
+LOCAL_C_INCLUDES_Debug := \
+	$(LOCAL_PATH)/v8 \
+	$(gyp_shared_intermediate_dir)
+
+
+# Flags passed to only C++ (and not C) files.
+LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
+	-fno-rtti \
+	-fno-threadsafe-statics \
+	-fvisibility-inlines-hidden \
+	-Wno-deprecated \
+	-std=gnu++11
+
+
+# Flags passed to both C and C++ files.
+MY_CFLAGS_Release := \
+	-fstack-protector \
+	--param=ssp-buffer-size=4 \
+	-pthread \
+	-fno-strict-aliasing \
+	-Wno-unused-parameter \
+	-Wno-missing-field-initializers \
+	-fvisibility=hidden \
+	-pipe \
+	-fPIC \
+	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
+	-m64 \
+	-fno-ident \
+	-fdata-sections \
+	-ffunction-sections \
+	-funwind-tables \
+	-fdata-sections \
+	-ffunction-sections \
+	-O2
+
+MY_DEFS_Release := \
+	'-DV8_DEPRECATION_WARNINGS' \
+	'-D_FILE_OFFSET_BITS=64' \
+	'-DNO_TCMALLOC' \
+	'-DDISABLE_NACL' \
+	'-DCHROMIUM_BUILD' \
+	'-DUSE_LIBJPEG_TURBO=1' \
+	'-DENABLE_WEBRTC=1' \
+	'-DUSE_PROPRIETARY_CODECS' \
+	'-DENABLE_BROWSER_CDMS' \
+	'-DENABLE_CONFIGURATION_POLICY' \
+	'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
+	'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
+	'-DENABLE_EGLIMAGE=1' \
+	'-DCLD_VERSION=1' \
+	'-DENABLE_PRINTING=1' \
+	'-DENABLE_MANAGED_USERS=1' \
+	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
+	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
+	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
+	'-DV8_TARGET_ARCH_ARM64' \
+	'-DV8_I18N_SUPPORT' \
+	'-DUSE_OPENSSL=1' \
+	'-DUSE_OPENSSL_CERTS=1' \
+	'-DNDEBUG' \
+	'-DNVALGRIND' \
+	'-DDYNAMIC_ANNOTATIONS_ENABLED=0'
+
+
+# Include paths placed before CFLAGS/CPPFLAGS
+LOCAL_C_INCLUDES_Release := \
+	$(LOCAL_PATH)/v8 \
+	$(gyp_shared_intermediate_dir)
+
+
+# Flags passed to only C++ (and not C) files.
+LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
+	-fno-rtti \
+	-fno-threadsafe-statics \
+	-fvisibility-inlines-hidden \
+	-Wno-deprecated \
+	-std=gnu++11
+
+
+LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
+# Undefine ANDROID for host modules
+LOCAL_CFLAGS += -UANDROID
+LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
+LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
+LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
+### Rules for final target.
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
+
+# Add target alias to "gyp_all_modules" target.
+.PHONY: gyp_all_modules
+gyp_all_modules: v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+
+# Alias gyp target name.
+.PHONY: v8_libplatform
+v8_libplatform: v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+
+include $(BUILD_HOST_STATIC_LIBRARY)
diff --git a/tools/gyp/v8_libplatform.host.darwin-mips.mk b/tools/gyp/v8_libplatform.host.darwin-mips.mk
new file mode 100644
index 0000000..6100d59
--- /dev/null
+++ b/tools/gyp/v8_libplatform.host.darwin-mips.mk
@@ -0,0 +1,223 @@
+# This file is generated by gyp; do not edit.
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE_CLASS := STATIC_LIBRARIES
+LOCAL_MODULE := v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+LOCAL_MODULE_SUFFIX := .a
+LOCAL_IS_HOST_MODULE := true
+LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
+gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
+gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
+
+# Make sure our deps are built first.
+GYP_TARGET_DEPENDENCIES :=
+
+GYP_GENERATED_OUTPUTS :=
+
+# Make sure our deps and generated files are built first.
+LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) $(GYP_GENERATED_OUTPUTS)
+
+LOCAL_CPP_EXTENSION := .cc
+LOCAL_GENERATED_SOURCES :=
+
+GYP_COPIED_SOURCE_ORIGIN_DIRS :=
+
+LOCAL_SRC_FILES := \
+	v8/src/libplatform/default-platform.cc \
+	v8/src/libplatform/task-queue.cc \
+	v8/src/libplatform/worker-thread.cc
+
+
+# Flags passed to both C and C++ files.
+MY_CFLAGS_Debug := \
+	-fstack-protector \
+	--param=ssp-buffer-size=4 \
+	 \
+	-pthread \
+	-fno-strict-aliasing \
+	-Wno-unused-parameter \
+	-Wno-missing-field-initializers \
+	-fvisibility=hidden \
+	-pipe \
+	-fPIC \
+	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
+	-m32 \
+	-Os \
+	-g \
+	-gdwarf-4 \
+	-fdata-sections \
+	-ffunction-sections \
+	-fomit-frame-pointer \
+	-funwind-tables
+
+MY_DEFS_Debug := \
+	'-DV8_DEPRECATION_WARNINGS' \
+	'-D_FILE_OFFSET_BITS=64' \
+	'-DNO_TCMALLOC' \
+	'-DDISABLE_NACL' \
+	'-DCHROMIUM_BUILD' \
+	'-DUSE_LIBJPEG_TURBO=1' \
+	'-DENABLE_WEBRTC=1' \
+	'-DUSE_PROPRIETARY_CODECS' \
+	'-DENABLE_BROWSER_CDMS' \
+	'-DENABLE_CONFIGURATION_POLICY' \
+	'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
+	'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
+	'-DENABLE_EGLIMAGE=1' \
+	'-DCLD_VERSION=1' \
+	'-DENABLE_PRINTING=1' \
+	'-DENABLE_MANAGED_USERS=1' \
+	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
+	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
+	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
+	'-DV8_TARGET_ARCH_MIPS' \
+	'-DCAN_USE_FPU_INSTRUCTIONS' \
+	'-D__mips_hard_float=1' \
+	'-D_MIPS_ARCH_MIPS32R2' \
+	'-DV8_I18N_SUPPORT' \
+	'-DUSE_OPENSSL=1' \
+	'-DUSE_OPENSSL_CERTS=1' \
+	'-DDYNAMIC_ANNOTATIONS_ENABLED=1' \
+	'-DWTF_USE_DYNAMIC_ANNOTATIONS=1' \
+	'-D_DEBUG' \
+	'-DENABLE_DISASSEMBLER' \
+	'-DV8_ENABLE_CHECKS' \
+	'-DOBJECT_PRINT' \
+	'-DVERIFY_HEAP' \
+	'-DENABLE_EXTRA_CHECKS' \
+	'-DENABLE_HANDLE_ZAPPING'
+
+
+# Include paths placed before CFLAGS/CPPFLAGS
+LOCAL_C_INCLUDES_Debug := \
+	$(LOCAL_PATH)/v8 \
+	$(gyp_shared_intermediate_dir)
+
+
+# Flags passed to only C++ (and not C) files.
+LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
+	-fno-rtti \
+	-fno-threadsafe-statics \
+	-fvisibility-inlines-hidden \
+	-Wno-deprecated \
+	-std=gnu++11
+
+
+# Flags passed to both C and C++ files.
+MY_CFLAGS_Release := \
+	-fstack-protector \
+	--param=ssp-buffer-size=4 \
+	 \
+	-pthread \
+	-fno-strict-aliasing \
+	-Wno-unused-parameter \
+	-Wno-missing-field-initializers \
+	-fvisibility=hidden \
+	-pipe \
+	-fPIC \
+	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
+	-m32 \
+	-fno-ident \
+	-fdata-sections \
+	-ffunction-sections \
+	-fomit-frame-pointer \
+	-funwind-tables \
+	-fdata-sections \
+	-ffunction-sections \
+	-O2
+
+MY_DEFS_Release := \
+	'-DV8_DEPRECATION_WARNINGS' \
+	'-D_FILE_OFFSET_BITS=64' \
+	'-DNO_TCMALLOC' \
+	'-DDISABLE_NACL' \
+	'-DCHROMIUM_BUILD' \
+	'-DUSE_LIBJPEG_TURBO=1' \
+	'-DENABLE_WEBRTC=1' \
+	'-DUSE_PROPRIETARY_CODECS' \
+	'-DENABLE_BROWSER_CDMS' \
+	'-DENABLE_CONFIGURATION_POLICY' \
+	'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
+	'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
+	'-DENABLE_EGLIMAGE=1' \
+	'-DCLD_VERSION=1' \
+	'-DENABLE_PRINTING=1' \
+	'-DENABLE_MANAGED_USERS=1' \
+	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
+	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
+	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
+	'-DV8_TARGET_ARCH_MIPS' \
+	'-DCAN_USE_FPU_INSTRUCTIONS' \
+	'-D__mips_hard_float=1' \
+	'-D_MIPS_ARCH_MIPS32R2' \
+	'-DV8_I18N_SUPPORT' \
+	'-DUSE_OPENSSL=1' \
+	'-DUSE_OPENSSL_CERTS=1' \
+	'-DNDEBUG' \
+	'-DNVALGRIND' \
+	'-DDYNAMIC_ANNOTATIONS_ENABLED=0'
+
+
+# Include paths placed before CFLAGS/CPPFLAGS
+LOCAL_C_INCLUDES_Release := \
+	$(LOCAL_PATH)/v8 \
+	$(gyp_shared_intermediate_dir)
+
+
+# Flags passed to only C++ (and not C) files.
+LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
+	-fno-rtti \
+	-fno-threadsafe-statics \
+	-fvisibility-inlines-hidden \
+	-Wno-deprecated \
+	-std=gnu++11
+
+
+LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
+# Undefine ANDROID for host modules
+LOCAL_CFLAGS += -UANDROID
+LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
+LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
+LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
+### Rules for final target.
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
+
+# Add target alias to "gyp_all_modules" target.
+.PHONY: gyp_all_modules
+gyp_all_modules: v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+
+# Alias gyp target name.
+.PHONY: v8_libplatform
+v8_libplatform: v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+
+include $(BUILD_HOST_STATIC_LIBRARY)
diff --git a/tools/gyp/v8_libplatform.host.darwin-mips64.mk b/tools/gyp/v8_libplatform.host.darwin-mips64.mk
new file mode 100644
index 0000000..ffa15a7
--- /dev/null
+++ b/tools/gyp/v8_libplatform.host.darwin-mips64.mk
@@ -0,0 +1,221 @@
+# This file is generated by gyp; do not edit.
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE_CLASS := STATIC_LIBRARIES
+LOCAL_MODULE := v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+LOCAL_MODULE_SUFFIX := .a
+LOCAL_IS_HOST_MODULE := true
+LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
+gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
+gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
+
+# Make sure our deps are built first.
+GYP_TARGET_DEPENDENCIES :=
+
+GYP_GENERATED_OUTPUTS :=
+
+# Make sure our deps and generated files are built first.
+LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) $(GYP_GENERATED_OUTPUTS)
+
+LOCAL_CPP_EXTENSION := .cc
+LOCAL_GENERATED_SOURCES :=
+
+GYP_COPIED_SOURCE_ORIGIN_DIRS :=
+
+LOCAL_SRC_FILES := \
+	v8/src/libplatform/default-platform.cc \
+	v8/src/libplatform/task-queue.cc \
+	v8/src/libplatform/worker-thread.cc
+
+
+# Flags passed to both C and C++ files.
+MY_CFLAGS_Debug := \
+	-fstack-protector \
+	--param=ssp-buffer-size=4 \
+	 \
+	-pthread \
+	-fno-strict-aliasing \
+	-Wno-unused-parameter \
+	-Wno-missing-field-initializers \
+	-fvisibility=hidden \
+	-pipe \
+	-fPIC \
+	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
+	-Os \
+	-g \
+	-gdwarf-4 \
+	-fdata-sections \
+	-ffunction-sections \
+	-fomit-frame-pointer \
+	-funwind-tables
+
+MY_DEFS_Debug := \
+	'-DV8_DEPRECATION_WARNINGS' \
+	'-D_FILE_OFFSET_BITS=64' \
+	'-DNO_TCMALLOC' \
+	'-DDISABLE_NACL' \
+	'-DCHROMIUM_BUILD' \
+	'-DUSE_LIBJPEG_TURBO=1' \
+	'-DENABLE_WEBRTC=1' \
+	'-DUSE_PROPRIETARY_CODECS' \
+	'-DENABLE_BROWSER_CDMS' \
+	'-DENABLE_CONFIGURATION_POLICY' \
+	'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
+	'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
+	'-DENABLE_EGLIMAGE=1' \
+	'-DCLD_VERSION=1' \
+	'-DENABLE_PRINTING=1' \
+	'-DENABLE_MANAGED_USERS=1' \
+	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
+	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
+	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
+	'-DV8_TARGET_ARCH_MIPS64' \
+	'-DCAN_USE_FPU_INSTRUCTIONS' \
+	'-D__mips_hard_float=1' \
+	'-D_MIPS_ARCH_MIPS64R2' \
+	'-DV8_I18N_SUPPORT' \
+	'-DUSE_OPENSSL=1' \
+	'-DUSE_OPENSSL_CERTS=1' \
+	'-DDYNAMIC_ANNOTATIONS_ENABLED=1' \
+	'-DWTF_USE_DYNAMIC_ANNOTATIONS=1' \
+	'-D_DEBUG' \
+	'-DENABLE_DISASSEMBLER' \
+	'-DV8_ENABLE_CHECKS' \
+	'-DOBJECT_PRINT' \
+	'-DVERIFY_HEAP' \
+	'-DENABLE_EXTRA_CHECKS' \
+	'-DENABLE_HANDLE_ZAPPING'
+
+
+# Include paths placed before CFLAGS/CPPFLAGS
+LOCAL_C_INCLUDES_Debug := \
+	$(LOCAL_PATH)/v8 \
+	$(gyp_shared_intermediate_dir)
+
+
+# Flags passed to only C++ (and not C) files.
+LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
+	-fno-rtti \
+	-fno-threadsafe-statics \
+	-fvisibility-inlines-hidden \
+	-Wno-deprecated \
+	-std=gnu++11
+
+
+# Flags passed to both C and C++ files.
+MY_CFLAGS_Release := \
+	-fstack-protector \
+	--param=ssp-buffer-size=4 \
+	 \
+	-pthread \
+	-fno-strict-aliasing \
+	-Wno-unused-parameter \
+	-Wno-missing-field-initializers \
+	-fvisibility=hidden \
+	-pipe \
+	-fPIC \
+	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
+	-fno-ident \
+	-fdata-sections \
+	-ffunction-sections \
+	-fomit-frame-pointer \
+	-funwind-tables \
+	-fdata-sections \
+	-ffunction-sections \
+	-O2
+
+MY_DEFS_Release := \
+	'-DV8_DEPRECATION_WARNINGS' \
+	'-D_FILE_OFFSET_BITS=64' \
+	'-DNO_TCMALLOC' \
+	'-DDISABLE_NACL' \
+	'-DCHROMIUM_BUILD' \
+	'-DUSE_LIBJPEG_TURBO=1' \
+	'-DENABLE_WEBRTC=1' \
+	'-DUSE_PROPRIETARY_CODECS' \
+	'-DENABLE_BROWSER_CDMS' \
+	'-DENABLE_CONFIGURATION_POLICY' \
+	'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
+	'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
+	'-DENABLE_EGLIMAGE=1' \
+	'-DCLD_VERSION=1' \
+	'-DENABLE_PRINTING=1' \
+	'-DENABLE_MANAGED_USERS=1' \
+	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
+	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
+	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
+	'-DV8_TARGET_ARCH_MIPS64' \
+	'-DCAN_USE_FPU_INSTRUCTIONS' \
+	'-D__mips_hard_float=1' \
+	'-D_MIPS_ARCH_MIPS64R2' \
+	'-DV8_I18N_SUPPORT' \
+	'-DUSE_OPENSSL=1' \
+	'-DUSE_OPENSSL_CERTS=1' \
+	'-DNDEBUG' \
+	'-DNVALGRIND' \
+	'-DDYNAMIC_ANNOTATIONS_ENABLED=0'
+
+
+# Include paths placed before CFLAGS/CPPFLAGS
+LOCAL_C_INCLUDES_Release := \
+	$(LOCAL_PATH)/v8 \
+	$(gyp_shared_intermediate_dir)
+
+
+# Flags passed to only C++ (and not C) files.
+LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
+	-fno-rtti \
+	-fno-threadsafe-statics \
+	-fvisibility-inlines-hidden \
+	-Wno-deprecated \
+	-std=gnu++11
+
+
+LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
+# Undefine ANDROID for host modules
+LOCAL_CFLAGS += -UANDROID
+LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
+LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
+LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
+### Rules for final target.
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
+
+# Add target alias to "gyp_all_modules" target.
+.PHONY: gyp_all_modules
+gyp_all_modules: v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+
+# Alias gyp target name.
+.PHONY: v8_libplatform
+v8_libplatform: v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+
+include $(BUILD_HOST_STATIC_LIBRARY)
diff --git a/tools/gyp/v8_libplatform.host.darwin-x86.mk b/tools/gyp/v8_libplatform.host.darwin-x86.mk
new file mode 100644
index 0000000..ef57bad
--- /dev/null
+++ b/tools/gyp/v8_libplatform.host.darwin-x86.mk
@@ -0,0 +1,215 @@
+# This file is generated by gyp; do not edit.
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE_CLASS := STATIC_LIBRARIES
+LOCAL_MODULE := v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+LOCAL_MODULE_SUFFIX := .a
+LOCAL_IS_HOST_MODULE := true
+LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
+gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
+gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
+
+# Make sure our deps are built first.
+GYP_TARGET_DEPENDENCIES :=
+
+GYP_GENERATED_OUTPUTS :=
+
+# Make sure our deps and generated files are built first.
+LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) $(GYP_GENERATED_OUTPUTS)
+
+LOCAL_CPP_EXTENSION := .cc
+LOCAL_GENERATED_SOURCES :=
+
+GYP_COPIED_SOURCE_ORIGIN_DIRS :=
+
+LOCAL_SRC_FILES := \
+	v8/src/libplatform/default-platform.cc \
+	v8/src/libplatform/task-queue.cc \
+	v8/src/libplatform/worker-thread.cc
+
+
+# Flags passed to both C and C++ files.
+MY_CFLAGS_Debug := \
+	-fstack-protector \
+	--param=ssp-buffer-size=4 \
+	-pthread \
+	-fno-strict-aliasing \
+	-Wno-unused-parameter \
+	-Wno-missing-field-initializers \
+	-fvisibility=hidden \
+	-pipe \
+	-fPIC \
+	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
+	-m32 \
+	-Os \
+	-g \
+	-gdwarf-4 \
+	-fdata-sections \
+	-ffunction-sections \
+	-fomit-frame-pointer \
+	-funwind-tables
+
+MY_DEFS_Debug := \
+	'-DV8_DEPRECATION_WARNINGS' \
+	'-D_FILE_OFFSET_BITS=64' \
+	'-DNO_TCMALLOC' \
+	'-DDISABLE_NACL' \
+	'-DCHROMIUM_BUILD' \
+	'-DUSE_LIBJPEG_TURBO=1' \
+	'-DENABLE_WEBRTC=1' \
+	'-DUSE_PROPRIETARY_CODECS' \
+	'-DENABLE_BROWSER_CDMS' \
+	'-DENABLE_CONFIGURATION_POLICY' \
+	'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
+	'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
+	'-DENABLE_EGLIMAGE=1' \
+	'-DCLD_VERSION=1' \
+	'-DENABLE_PRINTING=1' \
+	'-DENABLE_MANAGED_USERS=1' \
+	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
+	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
+	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
+	'-DV8_TARGET_ARCH_IA32' \
+	'-DV8_I18N_SUPPORT' \
+	'-DUSE_OPENSSL=1' \
+	'-DUSE_OPENSSL_CERTS=1' \
+	'-DDYNAMIC_ANNOTATIONS_ENABLED=1' \
+	'-DWTF_USE_DYNAMIC_ANNOTATIONS=1' \
+	'-D_DEBUG' \
+	'-DENABLE_DISASSEMBLER' \
+	'-DV8_ENABLE_CHECKS' \
+	'-DOBJECT_PRINT' \
+	'-DVERIFY_HEAP' \
+	'-DENABLE_EXTRA_CHECKS' \
+	'-DENABLE_HANDLE_ZAPPING'
+
+
+# Include paths placed before CFLAGS/CPPFLAGS
+LOCAL_C_INCLUDES_Debug := \
+	$(LOCAL_PATH)/v8 \
+	$(gyp_shared_intermediate_dir)
+
+
+# Flags passed to only C++ (and not C) files.
+LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
+	-fno-rtti \
+	-fno-threadsafe-statics \
+	-fvisibility-inlines-hidden \
+	-Wno-deprecated \
+	-std=gnu++11
+
+
+# Flags passed to both C and C++ files.
+MY_CFLAGS_Release := \
+	-fstack-protector \
+	--param=ssp-buffer-size=4 \
+	-pthread \
+	-fno-strict-aliasing \
+	-Wno-unused-parameter \
+	-Wno-missing-field-initializers \
+	-fvisibility=hidden \
+	-pipe \
+	-fPIC \
+	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
+	-m32 \
+	-fno-ident \
+	-fdata-sections \
+	-ffunction-sections \
+	-fomit-frame-pointer \
+	-funwind-tables \
+	-fdata-sections \
+	-ffunction-sections \
+	-O2
+
+MY_DEFS_Release := \
+	'-DV8_DEPRECATION_WARNINGS' \
+	'-D_FILE_OFFSET_BITS=64' \
+	'-DNO_TCMALLOC' \
+	'-DDISABLE_NACL' \
+	'-DCHROMIUM_BUILD' \
+	'-DUSE_LIBJPEG_TURBO=1' \
+	'-DENABLE_WEBRTC=1' \
+	'-DUSE_PROPRIETARY_CODECS' \
+	'-DENABLE_BROWSER_CDMS' \
+	'-DENABLE_CONFIGURATION_POLICY' \
+	'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
+	'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
+	'-DENABLE_EGLIMAGE=1' \
+	'-DCLD_VERSION=1' \
+	'-DENABLE_PRINTING=1' \
+	'-DENABLE_MANAGED_USERS=1' \
+	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
+	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
+	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
+	'-DV8_TARGET_ARCH_IA32' \
+	'-DV8_I18N_SUPPORT' \
+	'-DUSE_OPENSSL=1' \
+	'-DUSE_OPENSSL_CERTS=1' \
+	'-DNDEBUG' \
+	'-DNVALGRIND' \
+	'-DDYNAMIC_ANNOTATIONS_ENABLED=0'
+
+
+# Include paths placed before CFLAGS/CPPFLAGS
+LOCAL_C_INCLUDES_Release := \
+	$(LOCAL_PATH)/v8 \
+	$(gyp_shared_intermediate_dir)
+
+
+# Flags passed to only C++ (and not C) files.
+LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
+	-fno-rtti \
+	-fno-threadsafe-statics \
+	-fvisibility-inlines-hidden \
+	-Wno-deprecated \
+	-std=gnu++11
+
+
+LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
+# Undefine ANDROID for host modules
+LOCAL_CFLAGS += -UANDROID
+LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
+LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
+LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
+### Rules for final target.
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
+
+# Add target alias to "gyp_all_modules" target.
+.PHONY: gyp_all_modules
+gyp_all_modules: v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+
+# Alias gyp target name.
+.PHONY: v8_libplatform
+v8_libplatform: v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+
+include $(BUILD_HOST_STATIC_LIBRARY)
diff --git a/tools/gyp/v8_libplatform.host.darwin-x86_64.mk b/tools/gyp/v8_libplatform.host.darwin-x86_64.mk
new file mode 100644
index 0000000..31ee0fd
--- /dev/null
+++ b/tools/gyp/v8_libplatform.host.darwin-x86_64.mk
@@ -0,0 +1,215 @@
+# This file is generated by gyp; do not edit.
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE_CLASS := STATIC_LIBRARIES
+LOCAL_MODULE := v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+LOCAL_MODULE_SUFFIX := .a
+LOCAL_IS_HOST_MODULE := true
+LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
+gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
+gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
+
+# Make sure our deps are built first.
+GYP_TARGET_DEPENDENCIES :=
+
+GYP_GENERATED_OUTPUTS :=
+
+# Make sure our deps and generated files are built first.
+LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) $(GYP_GENERATED_OUTPUTS)
+
+LOCAL_CPP_EXTENSION := .cc
+LOCAL_GENERATED_SOURCES :=
+
+GYP_COPIED_SOURCE_ORIGIN_DIRS :=
+
+LOCAL_SRC_FILES := \
+	v8/src/libplatform/default-platform.cc \
+	v8/src/libplatform/task-queue.cc \
+	v8/src/libplatform/worker-thread.cc
+
+
+# Flags passed to both C and C++ files.
+MY_CFLAGS_Debug := \
+	-fstack-protector \
+	--param=ssp-buffer-size=4 \
+	-pthread \
+	-fno-strict-aliasing \
+	-Wno-unused-parameter \
+	-Wno-missing-field-initializers \
+	-fvisibility=hidden \
+	-pipe \
+	-fPIC \
+	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
+	-m64 \
+	-Os \
+	-g \
+	-gdwarf-4 \
+	-fdata-sections \
+	-ffunction-sections \
+	-fomit-frame-pointer \
+	-funwind-tables
+
+MY_DEFS_Debug := \
+	'-DV8_DEPRECATION_WARNINGS' \
+	'-D_FILE_OFFSET_BITS=64' \
+	'-DNO_TCMALLOC' \
+	'-DDISABLE_NACL' \
+	'-DCHROMIUM_BUILD' \
+	'-DUSE_LIBJPEG_TURBO=1' \
+	'-DENABLE_WEBRTC=1' \
+	'-DUSE_PROPRIETARY_CODECS' \
+	'-DENABLE_BROWSER_CDMS' \
+	'-DENABLE_CONFIGURATION_POLICY' \
+	'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
+	'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
+	'-DENABLE_EGLIMAGE=1' \
+	'-DCLD_VERSION=1' \
+	'-DENABLE_PRINTING=1' \
+	'-DENABLE_MANAGED_USERS=1' \
+	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
+	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
+	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
+	'-DV8_TARGET_ARCH_X64' \
+	'-DV8_I18N_SUPPORT' \
+	'-DUSE_OPENSSL=1' \
+	'-DUSE_OPENSSL_CERTS=1' \
+	'-DDYNAMIC_ANNOTATIONS_ENABLED=1' \
+	'-DWTF_USE_DYNAMIC_ANNOTATIONS=1' \
+	'-D_DEBUG' \
+	'-DENABLE_DISASSEMBLER' \
+	'-DV8_ENABLE_CHECKS' \
+	'-DOBJECT_PRINT' \
+	'-DVERIFY_HEAP' \
+	'-DENABLE_EXTRA_CHECKS' \
+	'-DENABLE_HANDLE_ZAPPING'
+
+
+# Include paths placed before CFLAGS/CPPFLAGS
+LOCAL_C_INCLUDES_Debug := \
+	$(LOCAL_PATH)/v8 \
+	$(gyp_shared_intermediate_dir)
+
+
+# Flags passed to only C++ (and not C) files.
+LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
+	-fno-rtti \
+	-fno-threadsafe-statics \
+	-fvisibility-inlines-hidden \
+	-Wno-deprecated \
+	-std=gnu++11
+
+
+# Flags passed to both C and C++ files.
+MY_CFLAGS_Release := \
+	-fstack-protector \
+	--param=ssp-buffer-size=4 \
+	-pthread \
+	-fno-strict-aliasing \
+	-Wno-unused-parameter \
+	-Wno-missing-field-initializers \
+	-fvisibility=hidden \
+	-pipe \
+	-fPIC \
+	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
+	-m64 \
+	-fno-ident \
+	-fdata-sections \
+	-ffunction-sections \
+	-fomit-frame-pointer \
+	-funwind-tables \
+	-fdata-sections \
+	-ffunction-sections \
+	-O2
+
+MY_DEFS_Release := \
+	'-DV8_DEPRECATION_WARNINGS' \
+	'-D_FILE_OFFSET_BITS=64' \
+	'-DNO_TCMALLOC' \
+	'-DDISABLE_NACL' \
+	'-DCHROMIUM_BUILD' \
+	'-DUSE_LIBJPEG_TURBO=1' \
+	'-DENABLE_WEBRTC=1' \
+	'-DUSE_PROPRIETARY_CODECS' \
+	'-DENABLE_BROWSER_CDMS' \
+	'-DENABLE_CONFIGURATION_POLICY' \
+	'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
+	'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
+	'-DENABLE_EGLIMAGE=1' \
+	'-DCLD_VERSION=1' \
+	'-DENABLE_PRINTING=1' \
+	'-DENABLE_MANAGED_USERS=1' \
+	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
+	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
+	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
+	'-DV8_TARGET_ARCH_X64' \
+	'-DV8_I18N_SUPPORT' \
+	'-DUSE_OPENSSL=1' \
+	'-DUSE_OPENSSL_CERTS=1' \
+	'-DNDEBUG' \
+	'-DNVALGRIND' \
+	'-DDYNAMIC_ANNOTATIONS_ENABLED=0'
+
+
+# Include paths placed before CFLAGS/CPPFLAGS
+LOCAL_C_INCLUDES_Release := \
+	$(LOCAL_PATH)/v8 \
+	$(gyp_shared_intermediate_dir)
+
+
+# Flags passed to only C++ (and not C) files.
+LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
+	-fno-rtti \
+	-fno-threadsafe-statics \
+	-fvisibility-inlines-hidden \
+	-Wno-deprecated \
+	-std=gnu++11
+
+
+LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
+# Undefine ANDROID for host modules
+LOCAL_CFLAGS += -UANDROID
+LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
+LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
+LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
+### Rules for final target.
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
+
+# Add target alias to "gyp_all_modules" target.
+.PHONY: gyp_all_modules
+gyp_all_modules: v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+
+# Alias gyp target name.
+.PHONY: v8_libplatform
+v8_libplatform: v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+
+include $(BUILD_HOST_STATIC_LIBRARY)
diff --git a/tools/gyp/v8_libplatform.host.linux-arm.mk b/tools/gyp/v8_libplatform.host.linux-arm.mk
new file mode 100644
index 0000000..f74456f
--- /dev/null
+++ b/tools/gyp/v8_libplatform.host.linux-arm.mk
@@ -0,0 +1,217 @@
+# This file is generated by gyp; do not edit.
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE_CLASS := STATIC_LIBRARIES
+LOCAL_MODULE := v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+LOCAL_MODULE_SUFFIX := .a
+LOCAL_IS_HOST_MODULE := true
+LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
+gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
+gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
+
+# Make sure our deps are built first.
+GYP_TARGET_DEPENDENCIES :=
+
+GYP_GENERATED_OUTPUTS :=
+
+# Make sure our deps and generated files are built first.
+LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) $(GYP_GENERATED_OUTPUTS)
+
+LOCAL_CPP_EXTENSION := .cc
+LOCAL_GENERATED_SOURCES :=
+
+GYP_COPIED_SOURCE_ORIGIN_DIRS :=
+
+LOCAL_SRC_FILES := \
+	v8/src/libplatform/default-platform.cc \
+	v8/src/libplatform/task-queue.cc \
+	v8/src/libplatform/worker-thread.cc
+
+
+# Flags passed to both C and C++ files.
+MY_CFLAGS_Debug := \
+	-fstack-protector \
+	--param=ssp-buffer-size=4 \
+	-pthread \
+	-fno-strict-aliasing \
+	-Wno-unused-parameter \
+	-Wno-missing-field-initializers \
+	-fvisibility=hidden \
+	-pipe \
+	-fPIC \
+	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
+	-m32 \
+	-Os \
+	-g \
+	-gdwarf-4 \
+	-fdata-sections \
+	-ffunction-sections \
+	-fomit-frame-pointer \
+	-funwind-tables
+
+MY_DEFS_Debug := \
+	'-DV8_DEPRECATION_WARNINGS' \
+	'-D_FILE_OFFSET_BITS=64' \
+	'-DNO_TCMALLOC' \
+	'-DDISABLE_NACL' \
+	'-DCHROMIUM_BUILD' \
+	'-DUSE_LIBJPEG_TURBO=1' \
+	'-DENABLE_WEBRTC=1' \
+	'-DUSE_PROPRIETARY_CODECS' \
+	'-DENABLE_BROWSER_CDMS' \
+	'-DENABLE_CONFIGURATION_POLICY' \
+	'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
+	'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
+	'-DENABLE_EGLIMAGE=1' \
+	'-DCLD_VERSION=1' \
+	'-DENABLE_PRINTING=1' \
+	'-DENABLE_MANAGED_USERS=1' \
+	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
+	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
+	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
+	'-DV8_TARGET_ARCH_ARM' \
+	'-DCAN_USE_ARMV7_INSTRUCTIONS' \
+	'-DV8_I18N_SUPPORT' \
+	'-DUSE_OPENSSL=1' \
+	'-DUSE_OPENSSL_CERTS=1' \
+	'-DDYNAMIC_ANNOTATIONS_ENABLED=1' \
+	'-DWTF_USE_DYNAMIC_ANNOTATIONS=1' \
+	'-D_DEBUG' \
+	'-DENABLE_DISASSEMBLER' \
+	'-DV8_ENABLE_CHECKS' \
+	'-DOBJECT_PRINT' \
+	'-DVERIFY_HEAP' \
+	'-DENABLE_EXTRA_CHECKS' \
+	'-DENABLE_HANDLE_ZAPPING'
+
+
+# Include paths placed before CFLAGS/CPPFLAGS
+LOCAL_C_INCLUDES_Debug := \
+	$(LOCAL_PATH)/v8 \
+	$(gyp_shared_intermediate_dir)
+
+
+# Flags passed to only C++ (and not C) files.
+LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
+	-fno-rtti \
+	-fno-threadsafe-statics \
+	-fvisibility-inlines-hidden \
+	-Wno-deprecated \
+	-std=gnu++11
+
+
+# Flags passed to both C and C++ files.
+MY_CFLAGS_Release := \
+	-fstack-protector \
+	--param=ssp-buffer-size=4 \
+	-pthread \
+	-fno-strict-aliasing \
+	-Wno-unused-parameter \
+	-Wno-missing-field-initializers \
+	-fvisibility=hidden \
+	-pipe \
+	-fPIC \
+	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
+	-m32 \
+	-fno-ident \
+	-fdata-sections \
+	-ffunction-sections \
+	-fomit-frame-pointer \
+	-funwind-tables \
+	-fdata-sections \
+	-ffunction-sections \
+	-O2
+
+MY_DEFS_Release := \
+	'-DV8_DEPRECATION_WARNINGS' \
+	'-D_FILE_OFFSET_BITS=64' \
+	'-DNO_TCMALLOC' \
+	'-DDISABLE_NACL' \
+	'-DCHROMIUM_BUILD' \
+	'-DUSE_LIBJPEG_TURBO=1' \
+	'-DENABLE_WEBRTC=1' \
+	'-DUSE_PROPRIETARY_CODECS' \
+	'-DENABLE_BROWSER_CDMS' \
+	'-DENABLE_CONFIGURATION_POLICY' \
+	'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
+	'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
+	'-DENABLE_EGLIMAGE=1' \
+	'-DCLD_VERSION=1' \
+	'-DENABLE_PRINTING=1' \
+	'-DENABLE_MANAGED_USERS=1' \
+	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
+	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
+	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
+	'-DV8_TARGET_ARCH_ARM' \
+	'-DCAN_USE_ARMV7_INSTRUCTIONS' \
+	'-DV8_I18N_SUPPORT' \
+	'-DUSE_OPENSSL=1' \
+	'-DUSE_OPENSSL_CERTS=1' \
+	'-DNDEBUG' \
+	'-DNVALGRIND' \
+	'-DDYNAMIC_ANNOTATIONS_ENABLED=0'
+
+
+# Include paths placed before CFLAGS/CPPFLAGS
+LOCAL_C_INCLUDES_Release := \
+	$(LOCAL_PATH)/v8 \
+	$(gyp_shared_intermediate_dir)
+
+
+# Flags passed to only C++ (and not C) files.
+LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
+	-fno-rtti \
+	-fno-threadsafe-statics \
+	-fvisibility-inlines-hidden \
+	-Wno-deprecated \
+	-std=gnu++11
+
+
+LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
+# Undefine ANDROID for host modules
+LOCAL_CFLAGS += -UANDROID
+LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
+LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
+LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
+### Rules for final target.
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
+
+# Add target alias to "gyp_all_modules" target.
+.PHONY: gyp_all_modules
+gyp_all_modules: v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+
+# Alias gyp target name.
+.PHONY: v8_libplatform
+v8_libplatform: v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+
+include $(BUILD_HOST_STATIC_LIBRARY)
diff --git a/tools/gyp/v8_libplatform.host.linux-arm64.mk b/tools/gyp/v8_libplatform.host.linux-arm64.mk
new file mode 100644
index 0000000..9312fd7
--- /dev/null
+++ b/tools/gyp/v8_libplatform.host.linux-arm64.mk
@@ -0,0 +1,213 @@
+# This file is generated by gyp; do not edit.
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE_CLASS := STATIC_LIBRARIES
+LOCAL_MODULE := v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+LOCAL_MODULE_SUFFIX := .a
+LOCAL_IS_HOST_MODULE := true
+LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
+gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
+gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
+
+# Make sure our deps are built first.
+GYP_TARGET_DEPENDENCIES :=
+
+GYP_GENERATED_OUTPUTS :=
+
+# Make sure our deps and generated files are built first.
+LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) $(GYP_GENERATED_OUTPUTS)
+
+LOCAL_CPP_EXTENSION := .cc
+LOCAL_GENERATED_SOURCES :=
+
+GYP_COPIED_SOURCE_ORIGIN_DIRS :=
+
+LOCAL_SRC_FILES := \
+	v8/src/libplatform/default-platform.cc \
+	v8/src/libplatform/task-queue.cc \
+	v8/src/libplatform/worker-thread.cc
+
+
+# Flags passed to both C and C++ files.
+MY_CFLAGS_Debug := \
+	-fstack-protector \
+	--param=ssp-buffer-size=4 \
+	-pthread \
+	-fno-strict-aliasing \
+	-Wno-unused-parameter \
+	-Wno-missing-field-initializers \
+	-fvisibility=hidden \
+	-pipe \
+	-fPIC \
+	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
+	-m64 \
+	-Os \
+	-g \
+	-gdwarf-4 \
+	-fdata-sections \
+	-ffunction-sections \
+	-funwind-tables
+
+MY_DEFS_Debug := \
+	'-DV8_DEPRECATION_WARNINGS' \
+	'-D_FILE_OFFSET_BITS=64' \
+	'-DNO_TCMALLOC' \
+	'-DDISABLE_NACL' \
+	'-DCHROMIUM_BUILD' \
+	'-DUSE_LIBJPEG_TURBO=1' \
+	'-DENABLE_WEBRTC=1' \
+	'-DUSE_PROPRIETARY_CODECS' \
+	'-DENABLE_BROWSER_CDMS' \
+	'-DENABLE_CONFIGURATION_POLICY' \
+	'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
+	'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
+	'-DENABLE_EGLIMAGE=1' \
+	'-DCLD_VERSION=1' \
+	'-DENABLE_PRINTING=1' \
+	'-DENABLE_MANAGED_USERS=1' \
+	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
+	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
+	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
+	'-DV8_TARGET_ARCH_ARM64' \
+	'-DV8_I18N_SUPPORT' \
+	'-DUSE_OPENSSL=1' \
+	'-DUSE_OPENSSL_CERTS=1' \
+	'-DDYNAMIC_ANNOTATIONS_ENABLED=1' \
+	'-DWTF_USE_DYNAMIC_ANNOTATIONS=1' \
+	'-D_DEBUG' \
+	'-DENABLE_DISASSEMBLER' \
+	'-DV8_ENABLE_CHECKS' \
+	'-DOBJECT_PRINT' \
+	'-DVERIFY_HEAP' \
+	'-DENABLE_EXTRA_CHECKS' \
+	'-DENABLE_HANDLE_ZAPPING'
+
+
+# Include paths placed before CFLAGS/CPPFLAGS
+LOCAL_C_INCLUDES_Debug := \
+	$(LOCAL_PATH)/v8 \
+	$(gyp_shared_intermediate_dir)
+
+
+# Flags passed to only C++ (and not C) files.
+LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
+	-fno-rtti \
+	-fno-threadsafe-statics \
+	-fvisibility-inlines-hidden \
+	-Wno-deprecated \
+	-std=gnu++11
+
+
+# Flags passed to both C and C++ files.
+MY_CFLAGS_Release := \
+	-fstack-protector \
+	--param=ssp-buffer-size=4 \
+	-pthread \
+	-fno-strict-aliasing \
+	-Wno-unused-parameter \
+	-Wno-missing-field-initializers \
+	-fvisibility=hidden \
+	-pipe \
+	-fPIC \
+	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
+	-m64 \
+	-fno-ident \
+	-fdata-sections \
+	-ffunction-sections \
+	-funwind-tables \
+	-fdata-sections \
+	-ffunction-sections \
+	-O2
+
+MY_DEFS_Release := \
+	'-DV8_DEPRECATION_WARNINGS' \
+	'-D_FILE_OFFSET_BITS=64' \
+	'-DNO_TCMALLOC' \
+	'-DDISABLE_NACL' \
+	'-DCHROMIUM_BUILD' \
+	'-DUSE_LIBJPEG_TURBO=1' \
+	'-DENABLE_WEBRTC=1' \
+	'-DUSE_PROPRIETARY_CODECS' \
+	'-DENABLE_BROWSER_CDMS' \
+	'-DENABLE_CONFIGURATION_POLICY' \
+	'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
+	'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
+	'-DENABLE_EGLIMAGE=1' \
+	'-DCLD_VERSION=1' \
+	'-DENABLE_PRINTING=1' \
+	'-DENABLE_MANAGED_USERS=1' \
+	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
+	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
+	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
+	'-DV8_TARGET_ARCH_ARM64' \
+	'-DV8_I18N_SUPPORT' \
+	'-DUSE_OPENSSL=1' \
+	'-DUSE_OPENSSL_CERTS=1' \
+	'-DNDEBUG' \
+	'-DNVALGRIND' \
+	'-DDYNAMIC_ANNOTATIONS_ENABLED=0'
+
+
+# Include paths placed before CFLAGS/CPPFLAGS
+LOCAL_C_INCLUDES_Release := \
+	$(LOCAL_PATH)/v8 \
+	$(gyp_shared_intermediate_dir)
+
+
+# Flags passed to only C++ (and not C) files.
+LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
+	-fno-rtti \
+	-fno-threadsafe-statics \
+	-fvisibility-inlines-hidden \
+	-Wno-deprecated \
+	-std=gnu++11
+
+
+LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
+# Undefine ANDROID for host modules
+LOCAL_CFLAGS += -UANDROID
+LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
+LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
+LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
+### Rules for final target.
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
+
+# Add target alias to "gyp_all_modules" target.
+.PHONY: gyp_all_modules
+gyp_all_modules: v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+
+# Alias gyp target name.
+.PHONY: v8_libplatform
+v8_libplatform: v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+
+include $(BUILD_HOST_STATIC_LIBRARY)
diff --git a/tools/gyp/v8_libplatform.host.linux-mips.mk b/tools/gyp/v8_libplatform.host.linux-mips.mk
new file mode 100644
index 0000000..6100d59
--- /dev/null
+++ b/tools/gyp/v8_libplatform.host.linux-mips.mk
@@ -0,0 +1,223 @@
+# This file is generated by gyp; do not edit.
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE_CLASS := STATIC_LIBRARIES
+LOCAL_MODULE := v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+LOCAL_MODULE_SUFFIX := .a
+LOCAL_IS_HOST_MODULE := true
+LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
+gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
+gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
+
+# Make sure our deps are built first.
+GYP_TARGET_DEPENDENCIES :=
+
+GYP_GENERATED_OUTPUTS :=
+
+# Make sure our deps and generated files are built first.
+LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) $(GYP_GENERATED_OUTPUTS)
+
+LOCAL_CPP_EXTENSION := .cc
+LOCAL_GENERATED_SOURCES :=
+
+GYP_COPIED_SOURCE_ORIGIN_DIRS :=
+
+LOCAL_SRC_FILES := \
+	v8/src/libplatform/default-platform.cc \
+	v8/src/libplatform/task-queue.cc \
+	v8/src/libplatform/worker-thread.cc
+
+
+# Flags passed to both C and C++ files.
+MY_CFLAGS_Debug := \
+	-fstack-protector \
+	--param=ssp-buffer-size=4 \
+	 \
+	-pthread \
+	-fno-strict-aliasing \
+	-Wno-unused-parameter \
+	-Wno-missing-field-initializers \
+	-fvisibility=hidden \
+	-pipe \
+	-fPIC \
+	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
+	-m32 \
+	-Os \
+	-g \
+	-gdwarf-4 \
+	-fdata-sections \
+	-ffunction-sections \
+	-fomit-frame-pointer \
+	-funwind-tables
+
+MY_DEFS_Debug := \
+	'-DV8_DEPRECATION_WARNINGS' \
+	'-D_FILE_OFFSET_BITS=64' \
+	'-DNO_TCMALLOC' \
+	'-DDISABLE_NACL' \
+	'-DCHROMIUM_BUILD' \
+	'-DUSE_LIBJPEG_TURBO=1' \
+	'-DENABLE_WEBRTC=1' \
+	'-DUSE_PROPRIETARY_CODECS' \
+	'-DENABLE_BROWSER_CDMS' \
+	'-DENABLE_CONFIGURATION_POLICY' \
+	'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
+	'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
+	'-DENABLE_EGLIMAGE=1' \
+	'-DCLD_VERSION=1' \
+	'-DENABLE_PRINTING=1' \
+	'-DENABLE_MANAGED_USERS=1' \
+	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
+	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
+	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
+	'-DV8_TARGET_ARCH_MIPS' \
+	'-DCAN_USE_FPU_INSTRUCTIONS' \
+	'-D__mips_hard_float=1' \
+	'-D_MIPS_ARCH_MIPS32R2' \
+	'-DV8_I18N_SUPPORT' \
+	'-DUSE_OPENSSL=1' \
+	'-DUSE_OPENSSL_CERTS=1' \
+	'-DDYNAMIC_ANNOTATIONS_ENABLED=1' \
+	'-DWTF_USE_DYNAMIC_ANNOTATIONS=1' \
+	'-D_DEBUG' \
+	'-DENABLE_DISASSEMBLER' \
+	'-DV8_ENABLE_CHECKS' \
+	'-DOBJECT_PRINT' \
+	'-DVERIFY_HEAP' \
+	'-DENABLE_EXTRA_CHECKS' \
+	'-DENABLE_HANDLE_ZAPPING'
+
+
+# Include paths placed before CFLAGS/CPPFLAGS
+LOCAL_C_INCLUDES_Debug := \
+	$(LOCAL_PATH)/v8 \
+	$(gyp_shared_intermediate_dir)
+
+
+# Flags passed to only C++ (and not C) files.
+LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
+	-fno-rtti \
+	-fno-threadsafe-statics \
+	-fvisibility-inlines-hidden \
+	-Wno-deprecated \
+	-std=gnu++11
+
+
+# Flags passed to both C and C++ files.
+MY_CFLAGS_Release := \
+	-fstack-protector \
+	--param=ssp-buffer-size=4 \
+	 \
+	-pthread \
+	-fno-strict-aliasing \
+	-Wno-unused-parameter \
+	-Wno-missing-field-initializers \
+	-fvisibility=hidden \
+	-pipe \
+	-fPIC \
+	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
+	-m32 \
+	-fno-ident \
+	-fdata-sections \
+	-ffunction-sections \
+	-fomit-frame-pointer \
+	-funwind-tables \
+	-fdata-sections \
+	-ffunction-sections \
+	-O2
+
+MY_DEFS_Release := \
+	'-DV8_DEPRECATION_WARNINGS' \
+	'-D_FILE_OFFSET_BITS=64' \
+	'-DNO_TCMALLOC' \
+	'-DDISABLE_NACL' \
+	'-DCHROMIUM_BUILD' \
+	'-DUSE_LIBJPEG_TURBO=1' \
+	'-DENABLE_WEBRTC=1' \
+	'-DUSE_PROPRIETARY_CODECS' \
+	'-DENABLE_BROWSER_CDMS' \
+	'-DENABLE_CONFIGURATION_POLICY' \
+	'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
+	'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
+	'-DENABLE_EGLIMAGE=1' \
+	'-DCLD_VERSION=1' \
+	'-DENABLE_PRINTING=1' \
+	'-DENABLE_MANAGED_USERS=1' \
+	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
+	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
+	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
+	'-DV8_TARGET_ARCH_MIPS' \
+	'-DCAN_USE_FPU_INSTRUCTIONS' \
+	'-D__mips_hard_float=1' \
+	'-D_MIPS_ARCH_MIPS32R2' \
+	'-DV8_I18N_SUPPORT' \
+	'-DUSE_OPENSSL=1' \
+	'-DUSE_OPENSSL_CERTS=1' \
+	'-DNDEBUG' \
+	'-DNVALGRIND' \
+	'-DDYNAMIC_ANNOTATIONS_ENABLED=0'
+
+
+# Include paths placed before CFLAGS/CPPFLAGS
+LOCAL_C_INCLUDES_Release := \
+	$(LOCAL_PATH)/v8 \
+	$(gyp_shared_intermediate_dir)
+
+
+# Flags passed to only C++ (and not C) files.
+LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
+	-fno-rtti \
+	-fno-threadsafe-statics \
+	-fvisibility-inlines-hidden \
+	-Wno-deprecated \
+	-std=gnu++11
+
+
+LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
+# Undefine ANDROID for host modules
+LOCAL_CFLAGS += -UANDROID
+LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
+LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
+LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
+### Rules for final target.
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
+
+# Add target alias to "gyp_all_modules" target.
+.PHONY: gyp_all_modules
+gyp_all_modules: v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+
+# Alias gyp target name.
+.PHONY: v8_libplatform
+v8_libplatform: v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+
+include $(BUILD_HOST_STATIC_LIBRARY)
diff --git a/tools/gyp/v8_libplatform.host.linux-mips64.mk b/tools/gyp/v8_libplatform.host.linux-mips64.mk
new file mode 100644
index 0000000..ffa15a7
--- /dev/null
+++ b/tools/gyp/v8_libplatform.host.linux-mips64.mk
@@ -0,0 +1,221 @@
+# This file is generated by gyp; do not edit.
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE_CLASS := STATIC_LIBRARIES
+LOCAL_MODULE := v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+LOCAL_MODULE_SUFFIX := .a
+LOCAL_IS_HOST_MODULE := true
+LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
+gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
+gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
+
+# Make sure our deps are built first.
+GYP_TARGET_DEPENDENCIES :=
+
+GYP_GENERATED_OUTPUTS :=
+
+# Make sure our deps and generated files are built first.
+LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) $(GYP_GENERATED_OUTPUTS)
+
+LOCAL_CPP_EXTENSION := .cc
+LOCAL_GENERATED_SOURCES :=
+
+GYP_COPIED_SOURCE_ORIGIN_DIRS :=
+
+LOCAL_SRC_FILES := \
+	v8/src/libplatform/default-platform.cc \
+	v8/src/libplatform/task-queue.cc \
+	v8/src/libplatform/worker-thread.cc
+
+
+# Flags passed to both C and C++ files.
+MY_CFLAGS_Debug := \
+	-fstack-protector \
+	--param=ssp-buffer-size=4 \
+	 \
+	-pthread \
+	-fno-strict-aliasing \
+	-Wno-unused-parameter \
+	-Wno-missing-field-initializers \
+	-fvisibility=hidden \
+	-pipe \
+	-fPIC \
+	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
+	-Os \
+	-g \
+	-gdwarf-4 \
+	-fdata-sections \
+	-ffunction-sections \
+	-fomit-frame-pointer \
+	-funwind-tables
+
+MY_DEFS_Debug := \
+	'-DV8_DEPRECATION_WARNINGS' \
+	'-D_FILE_OFFSET_BITS=64' \
+	'-DNO_TCMALLOC' \
+	'-DDISABLE_NACL' \
+	'-DCHROMIUM_BUILD' \
+	'-DUSE_LIBJPEG_TURBO=1' \
+	'-DENABLE_WEBRTC=1' \
+	'-DUSE_PROPRIETARY_CODECS' \
+	'-DENABLE_BROWSER_CDMS' \
+	'-DENABLE_CONFIGURATION_POLICY' \
+	'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
+	'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
+	'-DENABLE_EGLIMAGE=1' \
+	'-DCLD_VERSION=1' \
+	'-DENABLE_PRINTING=1' \
+	'-DENABLE_MANAGED_USERS=1' \
+	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
+	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
+	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
+	'-DV8_TARGET_ARCH_MIPS64' \
+	'-DCAN_USE_FPU_INSTRUCTIONS' \
+	'-D__mips_hard_float=1' \
+	'-D_MIPS_ARCH_MIPS64R2' \
+	'-DV8_I18N_SUPPORT' \
+	'-DUSE_OPENSSL=1' \
+	'-DUSE_OPENSSL_CERTS=1' \
+	'-DDYNAMIC_ANNOTATIONS_ENABLED=1' \
+	'-DWTF_USE_DYNAMIC_ANNOTATIONS=1' \
+	'-D_DEBUG' \
+	'-DENABLE_DISASSEMBLER' \
+	'-DV8_ENABLE_CHECKS' \
+	'-DOBJECT_PRINT' \
+	'-DVERIFY_HEAP' \
+	'-DENABLE_EXTRA_CHECKS' \
+	'-DENABLE_HANDLE_ZAPPING'
+
+
+# Include paths placed before CFLAGS/CPPFLAGS
+LOCAL_C_INCLUDES_Debug := \
+	$(LOCAL_PATH)/v8 \
+	$(gyp_shared_intermediate_dir)
+
+
+# Flags passed to only C++ (and not C) files.
+LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
+	-fno-rtti \
+	-fno-threadsafe-statics \
+	-fvisibility-inlines-hidden \
+	-Wno-deprecated \
+	-std=gnu++11
+
+
+# Flags passed to both C and C++ files.
+MY_CFLAGS_Release := \
+	-fstack-protector \
+	--param=ssp-buffer-size=4 \
+	 \
+	-pthread \
+	-fno-strict-aliasing \
+	-Wno-unused-parameter \
+	-Wno-missing-field-initializers \
+	-fvisibility=hidden \
+	-pipe \
+	-fPIC \
+	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
+	-fno-ident \
+	-fdata-sections \
+	-ffunction-sections \
+	-fomit-frame-pointer \
+	-funwind-tables \
+	-fdata-sections \
+	-ffunction-sections \
+	-O2
+
+MY_DEFS_Release := \
+	'-DV8_DEPRECATION_WARNINGS' \
+	'-D_FILE_OFFSET_BITS=64' \
+	'-DNO_TCMALLOC' \
+	'-DDISABLE_NACL' \
+	'-DCHROMIUM_BUILD' \
+	'-DUSE_LIBJPEG_TURBO=1' \
+	'-DENABLE_WEBRTC=1' \
+	'-DUSE_PROPRIETARY_CODECS' \
+	'-DENABLE_BROWSER_CDMS' \
+	'-DENABLE_CONFIGURATION_POLICY' \
+	'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
+	'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
+	'-DENABLE_EGLIMAGE=1' \
+	'-DCLD_VERSION=1' \
+	'-DENABLE_PRINTING=1' \
+	'-DENABLE_MANAGED_USERS=1' \
+	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
+	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
+	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
+	'-DV8_TARGET_ARCH_MIPS64' \
+	'-DCAN_USE_FPU_INSTRUCTIONS' \
+	'-D__mips_hard_float=1' \
+	'-D_MIPS_ARCH_MIPS64R2' \
+	'-DV8_I18N_SUPPORT' \
+	'-DUSE_OPENSSL=1' \
+	'-DUSE_OPENSSL_CERTS=1' \
+	'-DNDEBUG' \
+	'-DNVALGRIND' \
+	'-DDYNAMIC_ANNOTATIONS_ENABLED=0'
+
+
+# Include paths placed before CFLAGS/CPPFLAGS
+LOCAL_C_INCLUDES_Release := \
+	$(LOCAL_PATH)/v8 \
+	$(gyp_shared_intermediate_dir)
+
+
+# Flags passed to only C++ (and not C) files.
+LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
+	-fno-rtti \
+	-fno-threadsafe-statics \
+	-fvisibility-inlines-hidden \
+	-Wno-deprecated \
+	-std=gnu++11
+
+
+LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
+# Undefine ANDROID for host modules
+LOCAL_CFLAGS += -UANDROID
+LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
+LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
+LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
+### Rules for final target.
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
+
+# Add target alias to "gyp_all_modules" target.
+.PHONY: gyp_all_modules
+gyp_all_modules: v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+
+# Alias gyp target name.
+.PHONY: v8_libplatform
+v8_libplatform: v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+
+include $(BUILD_HOST_STATIC_LIBRARY)
diff --git a/tools/gyp/v8_libplatform.host.linux-x86.mk b/tools/gyp/v8_libplatform.host.linux-x86.mk
new file mode 100644
index 0000000..ef57bad
--- /dev/null
+++ b/tools/gyp/v8_libplatform.host.linux-x86.mk
@@ -0,0 +1,215 @@
+# This file is generated by gyp; do not edit.
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE_CLASS := STATIC_LIBRARIES
+LOCAL_MODULE := v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+LOCAL_MODULE_SUFFIX := .a
+LOCAL_IS_HOST_MODULE := true
+LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
+gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
+gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
+
+# Make sure our deps are built first.
+GYP_TARGET_DEPENDENCIES :=
+
+GYP_GENERATED_OUTPUTS :=
+
+# Make sure our deps and generated files are built first.
+LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) $(GYP_GENERATED_OUTPUTS)
+
+LOCAL_CPP_EXTENSION := .cc
+LOCAL_GENERATED_SOURCES :=
+
+GYP_COPIED_SOURCE_ORIGIN_DIRS :=
+
+LOCAL_SRC_FILES := \
+	v8/src/libplatform/default-platform.cc \
+	v8/src/libplatform/task-queue.cc \
+	v8/src/libplatform/worker-thread.cc
+
+
+# Flags passed to both C and C++ files.
+MY_CFLAGS_Debug := \
+	-fstack-protector \
+	--param=ssp-buffer-size=4 \
+	-pthread \
+	-fno-strict-aliasing \
+	-Wno-unused-parameter \
+	-Wno-missing-field-initializers \
+	-fvisibility=hidden \
+	-pipe \
+	-fPIC \
+	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
+	-m32 \
+	-Os \
+	-g \
+	-gdwarf-4 \
+	-fdata-sections \
+	-ffunction-sections \
+	-fomit-frame-pointer \
+	-funwind-tables
+
+MY_DEFS_Debug := \
+	'-DV8_DEPRECATION_WARNINGS' \
+	'-D_FILE_OFFSET_BITS=64' \
+	'-DNO_TCMALLOC' \
+	'-DDISABLE_NACL' \
+	'-DCHROMIUM_BUILD' \
+	'-DUSE_LIBJPEG_TURBO=1' \
+	'-DENABLE_WEBRTC=1' \
+	'-DUSE_PROPRIETARY_CODECS' \
+	'-DENABLE_BROWSER_CDMS' \
+	'-DENABLE_CONFIGURATION_POLICY' \
+	'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
+	'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
+	'-DENABLE_EGLIMAGE=1' \
+	'-DCLD_VERSION=1' \
+	'-DENABLE_PRINTING=1' \
+	'-DENABLE_MANAGED_USERS=1' \
+	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
+	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
+	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
+	'-DV8_TARGET_ARCH_IA32' \
+	'-DV8_I18N_SUPPORT' \
+	'-DUSE_OPENSSL=1' \
+	'-DUSE_OPENSSL_CERTS=1' \
+	'-DDYNAMIC_ANNOTATIONS_ENABLED=1' \
+	'-DWTF_USE_DYNAMIC_ANNOTATIONS=1' \
+	'-D_DEBUG' \
+	'-DENABLE_DISASSEMBLER' \
+	'-DV8_ENABLE_CHECKS' \
+	'-DOBJECT_PRINT' \
+	'-DVERIFY_HEAP' \
+	'-DENABLE_EXTRA_CHECKS' \
+	'-DENABLE_HANDLE_ZAPPING'
+
+
+# Include paths placed before CFLAGS/CPPFLAGS
+LOCAL_C_INCLUDES_Debug := \
+	$(LOCAL_PATH)/v8 \
+	$(gyp_shared_intermediate_dir)
+
+
+# Flags passed to only C++ (and not C) files.
+LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
+	-fno-rtti \
+	-fno-threadsafe-statics \
+	-fvisibility-inlines-hidden \
+	-Wno-deprecated \
+	-std=gnu++11
+
+
+# Flags passed to both C and C++ files.
+MY_CFLAGS_Release := \
+	-fstack-protector \
+	--param=ssp-buffer-size=4 \
+	-pthread \
+	-fno-strict-aliasing \
+	-Wno-unused-parameter \
+	-Wno-missing-field-initializers \
+	-fvisibility=hidden \
+	-pipe \
+	-fPIC \
+	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
+	-m32 \
+	-fno-ident \
+	-fdata-sections \
+	-ffunction-sections \
+	-fomit-frame-pointer \
+	-funwind-tables \
+	-fdata-sections \
+	-ffunction-sections \
+	-O2
+
+MY_DEFS_Release := \
+	'-DV8_DEPRECATION_WARNINGS' \
+	'-D_FILE_OFFSET_BITS=64' \
+	'-DNO_TCMALLOC' \
+	'-DDISABLE_NACL' \
+	'-DCHROMIUM_BUILD' \
+	'-DUSE_LIBJPEG_TURBO=1' \
+	'-DENABLE_WEBRTC=1' \
+	'-DUSE_PROPRIETARY_CODECS' \
+	'-DENABLE_BROWSER_CDMS' \
+	'-DENABLE_CONFIGURATION_POLICY' \
+	'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
+	'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
+	'-DENABLE_EGLIMAGE=1' \
+	'-DCLD_VERSION=1' \
+	'-DENABLE_PRINTING=1' \
+	'-DENABLE_MANAGED_USERS=1' \
+	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
+	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
+	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
+	'-DV8_TARGET_ARCH_IA32' \
+	'-DV8_I18N_SUPPORT' \
+	'-DUSE_OPENSSL=1' \
+	'-DUSE_OPENSSL_CERTS=1' \
+	'-DNDEBUG' \
+	'-DNVALGRIND' \
+	'-DDYNAMIC_ANNOTATIONS_ENABLED=0'
+
+
+# Include paths placed before CFLAGS/CPPFLAGS
+LOCAL_C_INCLUDES_Release := \
+	$(LOCAL_PATH)/v8 \
+	$(gyp_shared_intermediate_dir)
+
+
+# Flags passed to only C++ (and not C) files.
+LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
+	-fno-rtti \
+	-fno-threadsafe-statics \
+	-fvisibility-inlines-hidden \
+	-Wno-deprecated \
+	-std=gnu++11
+
+
+LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
+# Undefine ANDROID for host modules
+LOCAL_CFLAGS += -UANDROID
+LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
+LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
+LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
+### Rules for final target.
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
+
+# Add target alias to "gyp_all_modules" target.
+.PHONY: gyp_all_modules
+gyp_all_modules: v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+
+# Alias gyp target name.
+.PHONY: v8_libplatform
+v8_libplatform: v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+
+include $(BUILD_HOST_STATIC_LIBRARY)
diff --git a/tools/gyp/v8_libplatform.host.linux-x86_64.mk b/tools/gyp/v8_libplatform.host.linux-x86_64.mk
new file mode 100644
index 0000000..31ee0fd
--- /dev/null
+++ b/tools/gyp/v8_libplatform.host.linux-x86_64.mk
@@ -0,0 +1,215 @@
+# This file is generated by gyp; do not edit.
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE_CLASS := STATIC_LIBRARIES
+LOCAL_MODULE := v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+LOCAL_MODULE_SUFFIX := .a
+LOCAL_IS_HOST_MODULE := true
+LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
+gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
+gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
+
+# Make sure our deps are built first.
+GYP_TARGET_DEPENDENCIES :=
+
+GYP_GENERATED_OUTPUTS :=
+
+# Make sure our deps and generated files are built first.
+LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) $(GYP_GENERATED_OUTPUTS)
+
+LOCAL_CPP_EXTENSION := .cc
+LOCAL_GENERATED_SOURCES :=
+
+GYP_COPIED_SOURCE_ORIGIN_DIRS :=
+
+LOCAL_SRC_FILES := \
+	v8/src/libplatform/default-platform.cc \
+	v8/src/libplatform/task-queue.cc \
+	v8/src/libplatform/worker-thread.cc
+
+
+# Flags passed to both C and C++ files.
+MY_CFLAGS_Debug := \
+	-fstack-protector \
+	--param=ssp-buffer-size=4 \
+	-pthread \
+	-fno-strict-aliasing \
+	-Wno-unused-parameter \
+	-Wno-missing-field-initializers \
+	-fvisibility=hidden \
+	-pipe \
+	-fPIC \
+	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
+	-m64 \
+	-Os \
+	-g \
+	-gdwarf-4 \
+	-fdata-sections \
+	-ffunction-sections \
+	-fomit-frame-pointer \
+	-funwind-tables
+
+MY_DEFS_Debug := \
+	'-DV8_DEPRECATION_WARNINGS' \
+	'-D_FILE_OFFSET_BITS=64' \
+	'-DNO_TCMALLOC' \
+	'-DDISABLE_NACL' \
+	'-DCHROMIUM_BUILD' \
+	'-DUSE_LIBJPEG_TURBO=1' \
+	'-DENABLE_WEBRTC=1' \
+	'-DUSE_PROPRIETARY_CODECS' \
+	'-DENABLE_BROWSER_CDMS' \
+	'-DENABLE_CONFIGURATION_POLICY' \
+	'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
+	'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
+	'-DENABLE_EGLIMAGE=1' \
+	'-DCLD_VERSION=1' \
+	'-DENABLE_PRINTING=1' \
+	'-DENABLE_MANAGED_USERS=1' \
+	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
+	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
+	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
+	'-DV8_TARGET_ARCH_X64' \
+	'-DV8_I18N_SUPPORT' \
+	'-DUSE_OPENSSL=1' \
+	'-DUSE_OPENSSL_CERTS=1' \
+	'-DDYNAMIC_ANNOTATIONS_ENABLED=1' \
+	'-DWTF_USE_DYNAMIC_ANNOTATIONS=1' \
+	'-D_DEBUG' \
+	'-DENABLE_DISASSEMBLER' \
+	'-DV8_ENABLE_CHECKS' \
+	'-DOBJECT_PRINT' \
+	'-DVERIFY_HEAP' \
+	'-DENABLE_EXTRA_CHECKS' \
+	'-DENABLE_HANDLE_ZAPPING'
+
+
+# Include paths placed before CFLAGS/CPPFLAGS
+LOCAL_C_INCLUDES_Debug := \
+	$(LOCAL_PATH)/v8 \
+	$(gyp_shared_intermediate_dir)
+
+
+# Flags passed to only C++ (and not C) files.
+LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
+	-fno-rtti \
+	-fno-threadsafe-statics \
+	-fvisibility-inlines-hidden \
+	-Wno-deprecated \
+	-std=gnu++11
+
+
+# Flags passed to both C and C++ files.
+MY_CFLAGS_Release := \
+	-fstack-protector \
+	--param=ssp-buffer-size=4 \
+	-pthread \
+	-fno-strict-aliasing \
+	-Wno-unused-parameter \
+	-Wno-missing-field-initializers \
+	-fvisibility=hidden \
+	-pipe \
+	-fPIC \
+	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
+	-m64 \
+	-fno-ident \
+	-fdata-sections \
+	-ffunction-sections \
+	-fomit-frame-pointer \
+	-funwind-tables \
+	-fdata-sections \
+	-ffunction-sections \
+	-O2
+
+MY_DEFS_Release := \
+	'-DV8_DEPRECATION_WARNINGS' \
+	'-D_FILE_OFFSET_BITS=64' \
+	'-DNO_TCMALLOC' \
+	'-DDISABLE_NACL' \
+	'-DCHROMIUM_BUILD' \
+	'-DUSE_LIBJPEG_TURBO=1' \
+	'-DENABLE_WEBRTC=1' \
+	'-DUSE_PROPRIETARY_CODECS' \
+	'-DENABLE_BROWSER_CDMS' \
+	'-DENABLE_CONFIGURATION_POLICY' \
+	'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
+	'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
+	'-DENABLE_EGLIMAGE=1' \
+	'-DCLD_VERSION=1' \
+	'-DENABLE_PRINTING=1' \
+	'-DENABLE_MANAGED_USERS=1' \
+	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
+	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
+	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
+	'-DV8_TARGET_ARCH_X64' \
+	'-DV8_I18N_SUPPORT' \
+	'-DUSE_OPENSSL=1' \
+	'-DUSE_OPENSSL_CERTS=1' \
+	'-DNDEBUG' \
+	'-DNVALGRIND' \
+	'-DDYNAMIC_ANNOTATIONS_ENABLED=0'
+
+
+# Include paths placed before CFLAGS/CPPFLAGS
+LOCAL_C_INCLUDES_Release := \
+	$(LOCAL_PATH)/v8 \
+	$(gyp_shared_intermediate_dir)
+
+
+# Flags passed to only C++ (and not C) files.
+LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
+	-fno-rtti \
+	-fno-threadsafe-statics \
+	-fvisibility-inlines-hidden \
+	-Wno-deprecated \
+	-std=gnu++11
+
+
+LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
+# Undefine ANDROID for host modules
+LOCAL_CFLAGS += -UANDROID
+LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
+LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
+LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
+### Rules for final target.
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
+
+# Add target alias to "gyp_all_modules" target.
+.PHONY: gyp_all_modules
+gyp_all_modules: v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+
+# Alias gyp target name.
+.PHONY: v8_libplatform
+v8_libplatform: v8_tools_gyp_v8_libplatform_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+
+include $(BUILD_HOST_STATIC_LIBRARY)
diff --git a/tools/gyp/v8_nosnapshot.host.darwin-arm.mk b/tools/gyp/v8_nosnapshot.host.darwin-arm.mk
index 9153f36..c2c7474 100644
--- a/tools/gyp/v8_nosnapshot.host.darwin-arm.mk
+++ b/tools/gyp/v8_nosnapshot.host.darwin-arm.mk
@@ -5,7 +5,6 @@
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_MODULE := v8_tools_gyp_v8_nosnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
 LOCAL_MODULE_SUFFIX := .a
-LOCAL_MODULE_TAGS := optional
 LOCAL_IS_HOST_MODULE := true
 LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
@@ -13,8 +12,7 @@
 
 # Make sure our deps are built first.
 GYP_TARGET_DEPENDENCIES := \
-	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp \
-	$(call intermediates-dir-for,GYP,v8_tools_gyp_generate_trig_table_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/generate_trig_table.stamp
+	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp
 
 GYP_GENERATED_OUTPUTS :=
 
@@ -26,17 +24,15 @@
 	mkdir -p $(@D); cp $< $@
 $(gyp_intermediate_dir)/experimental-libraries.cc: $(gyp_shared_intermediate_dir)/experimental-libraries.cc
 	mkdir -p $(@D); cp $< $@
-$(gyp_intermediate_dir)/trig-table.cc: $(gyp_shared_intermediate_dir)/trig-table.cc
-	mkdir -p $(@D); cp $< $@
 LOCAL_GENERATED_SOURCES := \
 	$(gyp_intermediate_dir)/libraries.cc \
-	$(gyp_intermediate_dir)/experimental-libraries.cc \
-	$(gyp_intermediate_dir)/trig-table.cc
+	$(gyp_intermediate_dir)/experimental-libraries.cc
 
 GYP_COPIED_SOURCE_ORIGIN_DIRS := \
 	$(gyp_shared_intermediate_dir)
 
 LOCAL_SRC_FILES := \
+	v8/src/snapshot-common.cc \
 	v8/src/snapshot-empty.cc
 
 
@@ -45,7 +41,6 @@
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -53,9 +48,18 @@
 	-pipe \
 	-fPIC \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m32 \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-fomit-frame-pointer \
@@ -63,7 +67,6 @@
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -80,17 +83,18 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_ARM' \
+	'-DCAN_USE_ARMV7_INSTRUCTIONS' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
-	'-DARM_TEST' \
-	'-DCAN_USE_ARMV7_INSTRUCTIONS=1' \
 	'-DDYNAMIC_ANNOTATIONS_ENABLED=1' \
 	'-DWTF_USE_DYNAMIC_ANNOTATIONS=1' \
 	'-D_DEBUG' \
@@ -110,20 +114,19 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -131,6 +134,14 @@
 	-pipe \
 	-fPIC \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m32 \
 	-fno-ident \
 	-fdata-sections \
@@ -143,7 +154,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -160,17 +170,18 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_ARM' \
+	'-DCAN_USE_ARMV7_INSTRUCTIONS' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
-	'-DARM_TEST' \
-	'-DCAN_USE_ARMV7_INSTRUCTIONS=1' \
 	'-DNDEBUG' \
 	'-DNVALGRIND' \
 	'-DDYNAMIC_ANNOTATIONS_ENABLED=0'
@@ -184,43 +195,23 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Release := false
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 # Undefine ANDROID for host modules
 LOCAL_CFLAGS += -UANDROID
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
 LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
 LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
 ### Rules for final target.
-
-LOCAL_LDFLAGS_Debug := \
-	-pthread \
-	-fPIC \
-	-m32
-
-
-LOCAL_LDFLAGS_Release := \
-	-pthread \
-	-fPIC \
-	-m32
-
-
-LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
-
-LOCAL_STATIC_LIBRARIES :=
-
-# Enable grouping to fix circular references
-LOCAL_GROUP_STATIC_LIBRARIES := true
-
-LOCAL_SHARED_LIBRARIES :=
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
 
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
diff --git a/tools/gyp/v8_nosnapshot.host.darwin-arm64.mk b/tools/gyp/v8_nosnapshot.host.darwin-arm64.mk
index 2a1632a..2d05d96 100644
--- a/tools/gyp/v8_nosnapshot.host.darwin-arm64.mk
+++ b/tools/gyp/v8_nosnapshot.host.darwin-arm64.mk
@@ -5,7 +5,6 @@
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_MODULE := v8_tools_gyp_v8_nosnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
 LOCAL_MODULE_SUFFIX := .a
-LOCAL_MODULE_TAGS := optional
 LOCAL_IS_HOST_MODULE := true
 LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
@@ -13,8 +12,7 @@
 
 # Make sure our deps are built first.
 GYP_TARGET_DEPENDENCIES := \
-	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp \
-	$(call intermediates-dir-for,GYP,v8_tools_gyp_generate_trig_table_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/generate_trig_table.stamp
+	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp
 
 GYP_GENERATED_OUTPUTS :=
 
@@ -26,17 +24,15 @@
 	mkdir -p $(@D); cp $< $@
 $(gyp_intermediate_dir)/experimental-libraries.cc: $(gyp_shared_intermediate_dir)/experimental-libraries.cc
 	mkdir -p $(@D); cp $< $@
-$(gyp_intermediate_dir)/trig-table.cc: $(gyp_shared_intermediate_dir)/trig-table.cc
-	mkdir -p $(@D); cp $< $@
 LOCAL_GENERATED_SOURCES := \
 	$(gyp_intermediate_dir)/libraries.cc \
-	$(gyp_intermediate_dir)/experimental-libraries.cc \
-	$(gyp_intermediate_dir)/trig-table.cc
+	$(gyp_intermediate_dir)/experimental-libraries.cc
 
 GYP_COPIED_SOURCE_ORIGIN_DIRS := \
 	$(gyp_shared_intermediate_dir)
 
 LOCAL_SRC_FILES := \
+	v8/src/snapshot-common.cc \
 	v8/src/snapshot-empty.cc
 
 
@@ -45,7 +41,6 @@
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -53,16 +48,24 @@
 	-pipe \
 	-fPIC \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m64 \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-funwind-tables
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -79,11 +82,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_ARM64' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
@@ -107,20 +112,19 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -128,6 +132,14 @@
 	-pipe \
 	-fPIC \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m64 \
 	-fno-ident \
 	-fdata-sections \
@@ -139,7 +151,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -156,11 +167,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_ARM64' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
@@ -178,43 +191,23 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Release := false
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 # Undefine ANDROID for host modules
 LOCAL_CFLAGS += -UANDROID
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
 LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
 LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
 ### Rules for final target.
-
-LOCAL_LDFLAGS_Debug := \
-	-pthread \
-	-fPIC \
-	-m64
-
-
-LOCAL_LDFLAGS_Release := \
-	-pthread \
-	-fPIC \
-	-m64
-
-
-LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
-
-LOCAL_STATIC_LIBRARIES :=
-
-# Enable grouping to fix circular references
-LOCAL_GROUP_STATIC_LIBRARIES := true
-
-LOCAL_SHARED_LIBRARIES :=
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
 
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
diff --git a/tools/gyp/v8_nosnapshot.host.darwin-mips.mk b/tools/gyp/v8_nosnapshot.host.darwin-mips.mk
index 20747f4..4eb6c6a 100644
--- a/tools/gyp/v8_nosnapshot.host.darwin-mips.mk
+++ b/tools/gyp/v8_nosnapshot.host.darwin-mips.mk
@@ -5,7 +5,6 @@
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_MODULE := v8_tools_gyp_v8_nosnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
 LOCAL_MODULE_SUFFIX := .a
-LOCAL_MODULE_TAGS := optional
 LOCAL_IS_HOST_MODULE := true
 LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
@@ -13,8 +12,7 @@
 
 # Make sure our deps are built first.
 GYP_TARGET_DEPENDENCIES := \
-	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp \
-	$(call intermediates-dir-for,GYP,v8_tools_gyp_generate_trig_table_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/generate_trig_table.stamp
+	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp
 
 GYP_GENERATED_OUTPUTS :=
 
@@ -26,17 +24,15 @@
 	mkdir -p $(@D); cp $< $@
 $(gyp_intermediate_dir)/experimental-libraries.cc: $(gyp_shared_intermediate_dir)/experimental-libraries.cc
 	mkdir -p $(@D); cp $< $@
-$(gyp_intermediate_dir)/trig-table.cc: $(gyp_shared_intermediate_dir)/trig-table.cc
-	mkdir -p $(@D); cp $< $@
 LOCAL_GENERATED_SOURCES := \
 	$(gyp_intermediate_dir)/libraries.cc \
-	$(gyp_intermediate_dir)/experimental-libraries.cc \
-	$(gyp_intermediate_dir)/trig-table.cc
+	$(gyp_intermediate_dir)/experimental-libraries.cc
 
 GYP_COPIED_SOURCE_ORIGIN_DIRS := \
 	$(gyp_shared_intermediate_dir)
 
 LOCAL_SRC_FILES := \
+	v8/src/snapshot-common.cc \
 	v8/src/snapshot-empty.cc
 
 
@@ -46,7 +42,6 @@
 	--param=ssp-buffer-size=4 \
 	 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -54,9 +49,18 @@
 	-pipe \
 	-fPIC \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m32 \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-fomit-frame-pointer \
@@ -64,7 +68,6 @@
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -81,11 +84,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_MIPS' \
 	'-DCAN_USE_FPU_INSTRUCTIONS' \
 	'-D__mips_hard_float=1' \
@@ -112,21 +117,20 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -134,6 +138,14 @@
 	-pipe \
 	-fPIC \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m32 \
 	-fno-ident \
 	-fdata-sections \
@@ -146,7 +158,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -163,11 +174,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_MIPS' \
 	'-DCAN_USE_FPU_INSTRUCTIONS' \
 	'-D__mips_hard_float=1' \
@@ -188,43 +201,23 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Release := false
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 # Undefine ANDROID for host modules
 LOCAL_CFLAGS += -UANDROID
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
 LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
 LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
 ### Rules for final target.
-
-LOCAL_LDFLAGS_Debug := \
-	-pthread \
-	-fPIC \
-	-m32
-
-
-LOCAL_LDFLAGS_Release := \
-	-pthread \
-	-fPIC \
-	-m32
-
-
-LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
-
-LOCAL_STATIC_LIBRARIES :=
-
-# Enable grouping to fix circular references
-LOCAL_GROUP_STATIC_LIBRARIES := true
-
-LOCAL_SHARED_LIBRARIES :=
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
 
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
diff --git a/tools/gyp/v8_nosnapshot.host.darwin-mips64.mk b/tools/gyp/v8_nosnapshot.host.darwin-mips64.mk
new file mode 100644
index 0000000..4e913b6
--- /dev/null
+++ b/tools/gyp/v8_nosnapshot.host.darwin-mips64.mk
@@ -0,0 +1,228 @@
+# This file is generated by gyp; do not edit.
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE_CLASS := STATIC_LIBRARIES
+LOCAL_MODULE := v8_tools_gyp_v8_nosnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+LOCAL_MODULE_SUFFIX := .a
+LOCAL_IS_HOST_MODULE := true
+LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
+gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
+gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
+
+# Make sure our deps are built first.
+GYP_TARGET_DEPENDENCIES := \
+	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp
+
+GYP_GENERATED_OUTPUTS :=
+
+# Make sure our deps and generated files are built first.
+LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) $(GYP_GENERATED_OUTPUTS)
+
+LOCAL_CPP_EXTENSION := .cc
+$(gyp_intermediate_dir)/libraries.cc: $(gyp_shared_intermediate_dir)/libraries.cc
+	mkdir -p $(@D); cp $< $@
+$(gyp_intermediate_dir)/experimental-libraries.cc: $(gyp_shared_intermediate_dir)/experimental-libraries.cc
+	mkdir -p $(@D); cp $< $@
+LOCAL_GENERATED_SOURCES := \
+	$(gyp_intermediate_dir)/libraries.cc \
+	$(gyp_intermediate_dir)/experimental-libraries.cc
+
+GYP_COPIED_SOURCE_ORIGIN_DIRS := \
+	$(gyp_shared_intermediate_dir)
+
+LOCAL_SRC_FILES := \
+	v8/src/snapshot-common.cc \
+	v8/src/snapshot-empty.cc
+
+
+# Flags passed to both C and C++ files.
+MY_CFLAGS_Debug := \
+	-fstack-protector \
+	--param=ssp-buffer-size=4 \
+	 \
+	-pthread \
+	-fno-strict-aliasing \
+	-Wno-unused-parameter \
+	-Wno-missing-field-initializers \
+	-fvisibility=hidden \
+	-pipe \
+	-fPIC \
+	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
+	-Os \
+	-g \
+	-gdwarf-4 \
+	-fdata-sections \
+	-ffunction-sections \
+	-fomit-frame-pointer \
+	-funwind-tables
+
+MY_DEFS_Debug := \
+	'-DV8_DEPRECATION_WARNINGS' \
+	'-D_FILE_OFFSET_BITS=64' \
+	'-DNO_TCMALLOC' \
+	'-DDISABLE_NACL' \
+	'-DCHROMIUM_BUILD' \
+	'-DUSE_LIBJPEG_TURBO=1' \
+	'-DENABLE_WEBRTC=1' \
+	'-DUSE_PROPRIETARY_CODECS' \
+	'-DENABLE_BROWSER_CDMS' \
+	'-DENABLE_CONFIGURATION_POLICY' \
+	'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
+	'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
+	'-DENABLE_EGLIMAGE=1' \
+	'-DCLD_VERSION=1' \
+	'-DENABLE_PRINTING=1' \
+	'-DENABLE_MANAGED_USERS=1' \
+	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
+	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
+	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
+	'-DV8_TARGET_ARCH_MIPS64' \
+	'-DCAN_USE_FPU_INSTRUCTIONS' \
+	'-D__mips_hard_float=1' \
+	'-D_MIPS_ARCH_MIPS64R2' \
+	'-DV8_I18N_SUPPORT' \
+	'-DUSE_OPENSSL=1' \
+	'-DUSE_OPENSSL_CERTS=1' \
+	'-DDYNAMIC_ANNOTATIONS_ENABLED=1' \
+	'-DWTF_USE_DYNAMIC_ANNOTATIONS=1' \
+	'-D_DEBUG' \
+	'-DENABLE_DISASSEMBLER' \
+	'-DV8_ENABLE_CHECKS' \
+	'-DOBJECT_PRINT' \
+	'-DVERIFY_HEAP' \
+	'-DENABLE_EXTRA_CHECKS' \
+	'-DENABLE_HANDLE_ZAPPING'
+
+
+# Include paths placed before CFLAGS/CPPFLAGS
+LOCAL_C_INCLUDES_Debug := \
+	$(LOCAL_PATH)/v8 \
+	$(gyp_shared_intermediate_dir)
+
+
+# Flags passed to only C++ (and not C) files.
+LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
+	-fno-rtti \
+	-fno-threadsafe-statics \
+	-fvisibility-inlines-hidden \
+	-Wno-deprecated \
+	-std=gnu++11
+
+
+# Flags passed to both C and C++ files.
+MY_CFLAGS_Release := \
+	-fstack-protector \
+	--param=ssp-buffer-size=4 \
+	 \
+	-pthread \
+	-fno-strict-aliasing \
+	-Wno-unused-parameter \
+	-Wno-missing-field-initializers \
+	-fvisibility=hidden \
+	-pipe \
+	-fPIC \
+	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
+	-fno-ident \
+	-fdata-sections \
+	-ffunction-sections \
+	-fomit-frame-pointer \
+	-funwind-tables \
+	-fdata-sections \
+	-ffunction-sections \
+	-O2
+
+MY_DEFS_Release := \
+	'-DV8_DEPRECATION_WARNINGS' \
+	'-D_FILE_OFFSET_BITS=64' \
+	'-DNO_TCMALLOC' \
+	'-DDISABLE_NACL' \
+	'-DCHROMIUM_BUILD' \
+	'-DUSE_LIBJPEG_TURBO=1' \
+	'-DENABLE_WEBRTC=1' \
+	'-DUSE_PROPRIETARY_CODECS' \
+	'-DENABLE_BROWSER_CDMS' \
+	'-DENABLE_CONFIGURATION_POLICY' \
+	'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
+	'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
+	'-DENABLE_EGLIMAGE=1' \
+	'-DCLD_VERSION=1' \
+	'-DENABLE_PRINTING=1' \
+	'-DENABLE_MANAGED_USERS=1' \
+	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
+	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
+	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
+	'-DV8_TARGET_ARCH_MIPS64' \
+	'-DCAN_USE_FPU_INSTRUCTIONS' \
+	'-D__mips_hard_float=1' \
+	'-D_MIPS_ARCH_MIPS64R2' \
+	'-DV8_I18N_SUPPORT' \
+	'-DUSE_OPENSSL=1' \
+	'-DUSE_OPENSSL_CERTS=1' \
+	'-DNDEBUG' \
+	'-DNVALGRIND' \
+	'-DDYNAMIC_ANNOTATIONS_ENABLED=0'
+
+
+# Include paths placed before CFLAGS/CPPFLAGS
+LOCAL_C_INCLUDES_Release := \
+	$(LOCAL_PATH)/v8 \
+	$(gyp_shared_intermediate_dir)
+
+
+# Flags passed to only C++ (and not C) files.
+LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
+	-fno-rtti \
+	-fno-threadsafe-statics \
+	-fvisibility-inlines-hidden \
+	-Wno-deprecated \
+	-std=gnu++11
+
+
+LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
+# Undefine ANDROID for host modules
+LOCAL_CFLAGS += -UANDROID
+LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
+LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
+LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
+### Rules for final target.
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
+
+# Add target alias to "gyp_all_modules" target.
+.PHONY: gyp_all_modules
+gyp_all_modules: v8_tools_gyp_v8_nosnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+
+# Alias gyp target name.
+.PHONY: v8_nosnapshot
+v8_nosnapshot: v8_tools_gyp_v8_nosnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+
+include $(BUILD_HOST_STATIC_LIBRARY)
diff --git a/tools/gyp/v8_nosnapshot.host.darwin-x86.mk b/tools/gyp/v8_nosnapshot.host.darwin-x86.mk
index 2877f0a..a921690 100644
--- a/tools/gyp/v8_nosnapshot.host.darwin-x86.mk
+++ b/tools/gyp/v8_nosnapshot.host.darwin-x86.mk
@@ -5,7 +5,6 @@
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_MODULE := v8_tools_gyp_v8_nosnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
 LOCAL_MODULE_SUFFIX := .a
-LOCAL_MODULE_TAGS := optional
 LOCAL_IS_HOST_MODULE := true
 LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
@@ -13,8 +12,7 @@
 
 # Make sure our deps are built first.
 GYP_TARGET_DEPENDENCIES := \
-	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp \
-	$(call intermediates-dir-for,GYP,v8_tools_gyp_generate_trig_table_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/generate_trig_table.stamp
+	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp
 
 GYP_GENERATED_OUTPUTS :=
 
@@ -26,17 +24,15 @@
 	mkdir -p $(@D); cp $< $@
 $(gyp_intermediate_dir)/experimental-libraries.cc: $(gyp_shared_intermediate_dir)/experimental-libraries.cc
 	mkdir -p $(@D); cp $< $@
-$(gyp_intermediate_dir)/trig-table.cc: $(gyp_shared_intermediate_dir)/trig-table.cc
-	mkdir -p $(@D); cp $< $@
 LOCAL_GENERATED_SOURCES := \
 	$(gyp_intermediate_dir)/libraries.cc \
-	$(gyp_intermediate_dir)/experimental-libraries.cc \
-	$(gyp_intermediate_dir)/trig-table.cc
+	$(gyp_intermediate_dir)/experimental-libraries.cc
 
 GYP_COPIED_SOURCE_ORIGIN_DIRS := \
 	$(gyp_shared_intermediate_dir)
 
 LOCAL_SRC_FILES := \
+	v8/src/snapshot-common.cc \
 	v8/src/snapshot-empty.cc
 
 
@@ -45,7 +41,6 @@
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -53,9 +48,18 @@
 	-pipe \
 	-fPIC \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m32 \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-fomit-frame-pointer \
@@ -63,7 +67,6 @@
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -80,11 +83,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_IA32' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
@@ -108,20 +113,19 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -129,6 +133,14 @@
 	-pipe \
 	-fPIC \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m32 \
 	-fno-ident \
 	-fdata-sections \
@@ -141,7 +153,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -158,11 +169,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_IA32' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
@@ -180,43 +193,23 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Release := false
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 # Undefine ANDROID for host modules
 LOCAL_CFLAGS += -UANDROID
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
 LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
 LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
 ### Rules for final target.
-
-LOCAL_LDFLAGS_Debug := \
-	-pthread \
-	-fPIC \
-	-m32
-
-
-LOCAL_LDFLAGS_Release := \
-	-pthread \
-	-fPIC \
-	-m32
-
-
-LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
-
-LOCAL_STATIC_LIBRARIES :=
-
-# Enable grouping to fix circular references
-LOCAL_GROUP_STATIC_LIBRARIES := true
-
-LOCAL_SHARED_LIBRARIES :=
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
 
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
diff --git a/tools/gyp/v8_nosnapshot.host.darwin-x86_64.mk b/tools/gyp/v8_nosnapshot.host.darwin-x86_64.mk
index a5f4dab..652a700 100644
--- a/tools/gyp/v8_nosnapshot.host.darwin-x86_64.mk
+++ b/tools/gyp/v8_nosnapshot.host.darwin-x86_64.mk
@@ -5,7 +5,6 @@
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_MODULE := v8_tools_gyp_v8_nosnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
 LOCAL_MODULE_SUFFIX := .a
-LOCAL_MODULE_TAGS := optional
 LOCAL_IS_HOST_MODULE := true
 LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
@@ -13,8 +12,7 @@
 
 # Make sure our deps are built first.
 GYP_TARGET_DEPENDENCIES := \
-	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp \
-	$(call intermediates-dir-for,GYP,v8_tools_gyp_generate_trig_table_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/generate_trig_table.stamp
+	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp
 
 GYP_GENERATED_OUTPUTS :=
 
@@ -26,17 +24,15 @@
 	mkdir -p $(@D); cp $< $@
 $(gyp_intermediate_dir)/experimental-libraries.cc: $(gyp_shared_intermediate_dir)/experimental-libraries.cc
 	mkdir -p $(@D); cp $< $@
-$(gyp_intermediate_dir)/trig-table.cc: $(gyp_shared_intermediate_dir)/trig-table.cc
-	mkdir -p $(@D); cp $< $@
 LOCAL_GENERATED_SOURCES := \
 	$(gyp_intermediate_dir)/libraries.cc \
-	$(gyp_intermediate_dir)/experimental-libraries.cc \
-	$(gyp_intermediate_dir)/trig-table.cc
+	$(gyp_intermediate_dir)/experimental-libraries.cc
 
 GYP_COPIED_SOURCE_ORIGIN_DIRS := \
 	$(gyp_shared_intermediate_dir)
 
 LOCAL_SRC_FILES := \
+	v8/src/snapshot-common.cc \
 	v8/src/snapshot-empty.cc
 
 
@@ -45,7 +41,6 @@
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -53,9 +48,18 @@
 	-pipe \
 	-fPIC \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m64 \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-fomit-frame-pointer \
@@ -63,7 +67,6 @@
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -80,11 +83,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_X64' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
@@ -108,20 +113,19 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -129,6 +133,14 @@
 	-pipe \
 	-fPIC \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m64 \
 	-fno-ident \
 	-fdata-sections \
@@ -141,7 +153,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -158,11 +169,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_X64' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
@@ -180,43 +193,23 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Release := false
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 # Undefine ANDROID for host modules
 LOCAL_CFLAGS += -UANDROID
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
 LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
 LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
 ### Rules for final target.
-
-LOCAL_LDFLAGS_Debug := \
-	-pthread \
-	-fPIC \
-	-m64
-
-
-LOCAL_LDFLAGS_Release := \
-	-pthread \
-	-fPIC \
-	-m64
-
-
-LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
-
-LOCAL_STATIC_LIBRARIES :=
-
-# Enable grouping to fix circular references
-LOCAL_GROUP_STATIC_LIBRARIES := true
-
-LOCAL_SHARED_LIBRARIES :=
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
 
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
diff --git a/tools/gyp/v8_nosnapshot.host.linux-arm.mk b/tools/gyp/v8_nosnapshot.host.linux-arm.mk
index 3c098da..c2c7474 100644
--- a/tools/gyp/v8_nosnapshot.host.linux-arm.mk
+++ b/tools/gyp/v8_nosnapshot.host.linux-arm.mk
@@ -5,7 +5,6 @@
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_MODULE := v8_tools_gyp_v8_nosnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
 LOCAL_MODULE_SUFFIX := .a
-LOCAL_MODULE_TAGS := optional
 LOCAL_IS_HOST_MODULE := true
 LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
@@ -13,8 +12,7 @@
 
 # Make sure our deps are built first.
 GYP_TARGET_DEPENDENCIES := \
-	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp \
-	$(call intermediates-dir-for,GYP,v8_tools_gyp_generate_trig_table_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/generate_trig_table.stamp
+	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp
 
 GYP_GENERATED_OUTPUTS :=
 
@@ -26,17 +24,15 @@
 	mkdir -p $(@D); cp $< $@
 $(gyp_intermediate_dir)/experimental-libraries.cc: $(gyp_shared_intermediate_dir)/experimental-libraries.cc
 	mkdir -p $(@D); cp $< $@
-$(gyp_intermediate_dir)/trig-table.cc: $(gyp_shared_intermediate_dir)/trig-table.cc
-	mkdir -p $(@D); cp $< $@
 LOCAL_GENERATED_SOURCES := \
 	$(gyp_intermediate_dir)/libraries.cc \
-	$(gyp_intermediate_dir)/experimental-libraries.cc \
-	$(gyp_intermediate_dir)/trig-table.cc
+	$(gyp_intermediate_dir)/experimental-libraries.cc
 
 GYP_COPIED_SOURCE_ORIGIN_DIRS := \
 	$(gyp_shared_intermediate_dir)
 
 LOCAL_SRC_FILES := \
+	v8/src/snapshot-common.cc \
 	v8/src/snapshot-empty.cc
 
 
@@ -45,18 +41,25 @@
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
 	-fvisibility=hidden \
 	-pipe \
 	-fPIC \
-	-Wno-unused-local-typedefs \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m32 \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-fomit-frame-pointer \
@@ -64,7 +67,6 @@
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -81,17 +83,18 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_ARM' \
+	'-DCAN_USE_ARMV7_INSTRUCTIONS' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
-	'-DARM_TEST' \
-	'-DCAN_USE_ARMV7_INSTRUCTIONS=1' \
 	'-DDYNAMIC_ANNOTATIONS_ENABLED=1' \
 	'-DWTF_USE_DYNAMIC_ANNOTATIONS=1' \
 	'-D_DEBUG' \
@@ -111,28 +114,34 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
 	-fvisibility=hidden \
 	-pipe \
 	-fPIC \
-	-Wno-unused-local-typedefs \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m32 \
 	-fno-ident \
 	-fdata-sections \
@@ -145,7 +154,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -162,17 +170,18 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_ARM' \
+	'-DCAN_USE_ARMV7_INSTRUCTIONS' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
-	'-DARM_TEST' \
-	'-DCAN_USE_ARMV7_INSTRUCTIONS=1' \
 	'-DNDEBUG' \
 	'-DNVALGRIND' \
 	'-DDYNAMIC_ANNOTATIONS_ENABLED=0'
@@ -186,47 +195,23 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Release := false
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 # Undefine ANDROID for host modules
 LOCAL_CFLAGS += -UANDROID
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
 LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
 LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
 ### Rules for final target.
-
-LOCAL_LDFLAGS_Debug := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-pthread \
-	-fPIC \
-	-m32
-
-
-LOCAL_LDFLAGS_Release := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-pthread \
-	-fPIC \
-	-m32
-
-
-LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
-
-LOCAL_STATIC_LIBRARIES :=
-
-# Enable grouping to fix circular references
-LOCAL_GROUP_STATIC_LIBRARIES := true
-
-LOCAL_SHARED_LIBRARIES :=
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
 
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
diff --git a/tools/gyp/v8_nosnapshot.host.linux-arm64.mk b/tools/gyp/v8_nosnapshot.host.linux-arm64.mk
index 374da70..2d05d96 100644
--- a/tools/gyp/v8_nosnapshot.host.linux-arm64.mk
+++ b/tools/gyp/v8_nosnapshot.host.linux-arm64.mk
@@ -5,7 +5,6 @@
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_MODULE := v8_tools_gyp_v8_nosnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
 LOCAL_MODULE_SUFFIX := .a
-LOCAL_MODULE_TAGS := optional
 LOCAL_IS_HOST_MODULE := true
 LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
@@ -13,8 +12,7 @@
 
 # Make sure our deps are built first.
 GYP_TARGET_DEPENDENCIES := \
-	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp \
-	$(call intermediates-dir-for,GYP,v8_tools_gyp_generate_trig_table_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/generate_trig_table.stamp
+	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp
 
 GYP_GENERATED_OUTPUTS :=
 
@@ -26,17 +24,15 @@
 	mkdir -p $(@D); cp $< $@
 $(gyp_intermediate_dir)/experimental-libraries.cc: $(gyp_shared_intermediate_dir)/experimental-libraries.cc
 	mkdir -p $(@D); cp $< $@
-$(gyp_intermediate_dir)/trig-table.cc: $(gyp_shared_intermediate_dir)/trig-table.cc
-	mkdir -p $(@D); cp $< $@
 LOCAL_GENERATED_SOURCES := \
 	$(gyp_intermediate_dir)/libraries.cc \
-	$(gyp_intermediate_dir)/experimental-libraries.cc \
-	$(gyp_intermediate_dir)/trig-table.cc
+	$(gyp_intermediate_dir)/experimental-libraries.cc
 
 GYP_COPIED_SOURCE_ORIGIN_DIRS := \
 	$(gyp_shared_intermediate_dir)
 
 LOCAL_SRC_FILES := \
+	v8/src/snapshot-common.cc \
 	v8/src/snapshot-empty.cc
 
 
@@ -45,25 +41,31 @@
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
 	-fvisibility=hidden \
 	-pipe \
 	-fPIC \
-	-Wno-unused-local-typedefs \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m64 \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-funwind-tables
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -80,11 +82,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_ARM64' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
@@ -108,28 +112,34 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
 	-fvisibility=hidden \
 	-pipe \
 	-fPIC \
-	-Wno-unused-local-typedefs \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m64 \
 	-fno-ident \
 	-fdata-sections \
@@ -141,7 +151,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -158,11 +167,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_ARM64' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
@@ -180,47 +191,23 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Release := false
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 # Undefine ANDROID for host modules
 LOCAL_CFLAGS += -UANDROID
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
 LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
 LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
 ### Rules for final target.
-
-LOCAL_LDFLAGS_Debug := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-pthread \
-	-fPIC \
-	-m64
-
-
-LOCAL_LDFLAGS_Release := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-pthread \
-	-fPIC \
-	-m64
-
-
-LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
-
-LOCAL_STATIC_LIBRARIES :=
-
-# Enable grouping to fix circular references
-LOCAL_GROUP_STATIC_LIBRARIES := true
-
-LOCAL_SHARED_LIBRARIES :=
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
 
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
diff --git a/tools/gyp/v8_nosnapshot.host.linux-mips.mk b/tools/gyp/v8_nosnapshot.host.linux-mips.mk
index d0ddd73..4eb6c6a 100644
--- a/tools/gyp/v8_nosnapshot.host.linux-mips.mk
+++ b/tools/gyp/v8_nosnapshot.host.linux-mips.mk
@@ -5,7 +5,6 @@
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_MODULE := v8_tools_gyp_v8_nosnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
 LOCAL_MODULE_SUFFIX := .a
-LOCAL_MODULE_TAGS := optional
 LOCAL_IS_HOST_MODULE := true
 LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
@@ -13,8 +12,7 @@
 
 # Make sure our deps are built first.
 GYP_TARGET_DEPENDENCIES := \
-	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp \
-	$(call intermediates-dir-for,GYP,v8_tools_gyp_generate_trig_table_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/generate_trig_table.stamp
+	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp
 
 GYP_GENERATED_OUTPUTS :=
 
@@ -26,17 +24,15 @@
 	mkdir -p $(@D); cp $< $@
 $(gyp_intermediate_dir)/experimental-libraries.cc: $(gyp_shared_intermediate_dir)/experimental-libraries.cc
 	mkdir -p $(@D); cp $< $@
-$(gyp_intermediate_dir)/trig-table.cc: $(gyp_shared_intermediate_dir)/trig-table.cc
-	mkdir -p $(@D); cp $< $@
 LOCAL_GENERATED_SOURCES := \
 	$(gyp_intermediate_dir)/libraries.cc \
-	$(gyp_intermediate_dir)/experimental-libraries.cc \
-	$(gyp_intermediate_dir)/trig-table.cc
+	$(gyp_intermediate_dir)/experimental-libraries.cc
 
 GYP_COPIED_SOURCE_ORIGIN_DIRS := \
 	$(gyp_shared_intermediate_dir)
 
 LOCAL_SRC_FILES := \
+	v8/src/snapshot-common.cc \
 	v8/src/snapshot-empty.cc
 
 
@@ -46,18 +42,25 @@
 	--param=ssp-buffer-size=4 \
 	 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
 	-fvisibility=hidden \
 	-pipe \
 	-fPIC \
-	-Wno-unused-local-typedefs \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m32 \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-fomit-frame-pointer \
@@ -65,7 +68,6 @@
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -82,11 +84,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_MIPS' \
 	'-DCAN_USE_FPU_INSTRUCTIONS' \
 	'-D__mips_hard_float=1' \
@@ -113,29 +117,35 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
 	-fvisibility=hidden \
 	-pipe \
 	-fPIC \
-	-Wno-unused-local-typedefs \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m32 \
 	-fno-ident \
 	-fdata-sections \
@@ -148,7 +158,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -165,11 +174,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_MIPS' \
 	'-DCAN_USE_FPU_INSTRUCTIONS' \
 	'-D__mips_hard_float=1' \
@@ -190,47 +201,23 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Release := false
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 # Undefine ANDROID for host modules
 LOCAL_CFLAGS += -UANDROID
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
 LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
 LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
 ### Rules for final target.
-
-LOCAL_LDFLAGS_Debug := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-pthread \
-	-fPIC \
-	-m32
-
-
-LOCAL_LDFLAGS_Release := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-pthread \
-	-fPIC \
-	-m32
-
-
-LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
-
-LOCAL_STATIC_LIBRARIES :=
-
-# Enable grouping to fix circular references
-LOCAL_GROUP_STATIC_LIBRARIES := true
-
-LOCAL_SHARED_LIBRARIES :=
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
 
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
diff --git a/tools/gyp/v8_nosnapshot.host.linux-mips64.mk b/tools/gyp/v8_nosnapshot.host.linux-mips64.mk
new file mode 100644
index 0000000..4e913b6
--- /dev/null
+++ b/tools/gyp/v8_nosnapshot.host.linux-mips64.mk
@@ -0,0 +1,228 @@
+# This file is generated by gyp; do not edit.
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE_CLASS := STATIC_LIBRARIES
+LOCAL_MODULE := v8_tools_gyp_v8_nosnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+LOCAL_MODULE_SUFFIX := .a
+LOCAL_IS_HOST_MODULE := true
+LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
+gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
+gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
+
+# Make sure our deps are built first.
+GYP_TARGET_DEPENDENCIES := \
+	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp
+
+GYP_GENERATED_OUTPUTS :=
+
+# Make sure our deps and generated files are built first.
+LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) $(GYP_GENERATED_OUTPUTS)
+
+LOCAL_CPP_EXTENSION := .cc
+$(gyp_intermediate_dir)/libraries.cc: $(gyp_shared_intermediate_dir)/libraries.cc
+	mkdir -p $(@D); cp $< $@
+$(gyp_intermediate_dir)/experimental-libraries.cc: $(gyp_shared_intermediate_dir)/experimental-libraries.cc
+	mkdir -p $(@D); cp $< $@
+LOCAL_GENERATED_SOURCES := \
+	$(gyp_intermediate_dir)/libraries.cc \
+	$(gyp_intermediate_dir)/experimental-libraries.cc
+
+GYP_COPIED_SOURCE_ORIGIN_DIRS := \
+	$(gyp_shared_intermediate_dir)
+
+LOCAL_SRC_FILES := \
+	v8/src/snapshot-common.cc \
+	v8/src/snapshot-empty.cc
+
+
+# Flags passed to both C and C++ files.
+MY_CFLAGS_Debug := \
+	-fstack-protector \
+	--param=ssp-buffer-size=4 \
+	 \
+	-pthread \
+	-fno-strict-aliasing \
+	-Wno-unused-parameter \
+	-Wno-missing-field-initializers \
+	-fvisibility=hidden \
+	-pipe \
+	-fPIC \
+	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
+	-Os \
+	-g \
+	-gdwarf-4 \
+	-fdata-sections \
+	-ffunction-sections \
+	-fomit-frame-pointer \
+	-funwind-tables
+
+MY_DEFS_Debug := \
+	'-DV8_DEPRECATION_WARNINGS' \
+	'-D_FILE_OFFSET_BITS=64' \
+	'-DNO_TCMALLOC' \
+	'-DDISABLE_NACL' \
+	'-DCHROMIUM_BUILD' \
+	'-DUSE_LIBJPEG_TURBO=1' \
+	'-DENABLE_WEBRTC=1' \
+	'-DUSE_PROPRIETARY_CODECS' \
+	'-DENABLE_BROWSER_CDMS' \
+	'-DENABLE_CONFIGURATION_POLICY' \
+	'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
+	'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
+	'-DENABLE_EGLIMAGE=1' \
+	'-DCLD_VERSION=1' \
+	'-DENABLE_PRINTING=1' \
+	'-DENABLE_MANAGED_USERS=1' \
+	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
+	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
+	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
+	'-DV8_TARGET_ARCH_MIPS64' \
+	'-DCAN_USE_FPU_INSTRUCTIONS' \
+	'-D__mips_hard_float=1' \
+	'-D_MIPS_ARCH_MIPS64R2' \
+	'-DV8_I18N_SUPPORT' \
+	'-DUSE_OPENSSL=1' \
+	'-DUSE_OPENSSL_CERTS=1' \
+	'-DDYNAMIC_ANNOTATIONS_ENABLED=1' \
+	'-DWTF_USE_DYNAMIC_ANNOTATIONS=1' \
+	'-D_DEBUG' \
+	'-DENABLE_DISASSEMBLER' \
+	'-DV8_ENABLE_CHECKS' \
+	'-DOBJECT_PRINT' \
+	'-DVERIFY_HEAP' \
+	'-DENABLE_EXTRA_CHECKS' \
+	'-DENABLE_HANDLE_ZAPPING'
+
+
+# Include paths placed before CFLAGS/CPPFLAGS
+LOCAL_C_INCLUDES_Debug := \
+	$(LOCAL_PATH)/v8 \
+	$(gyp_shared_intermediate_dir)
+
+
+# Flags passed to only C++ (and not C) files.
+LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
+	-fno-rtti \
+	-fno-threadsafe-statics \
+	-fvisibility-inlines-hidden \
+	-Wno-deprecated \
+	-std=gnu++11
+
+
+# Flags passed to both C and C++ files.
+MY_CFLAGS_Release := \
+	-fstack-protector \
+	--param=ssp-buffer-size=4 \
+	 \
+	-pthread \
+	-fno-strict-aliasing \
+	-Wno-unused-parameter \
+	-Wno-missing-field-initializers \
+	-fvisibility=hidden \
+	-pipe \
+	-fPIC \
+	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
+	-fno-ident \
+	-fdata-sections \
+	-ffunction-sections \
+	-fomit-frame-pointer \
+	-funwind-tables \
+	-fdata-sections \
+	-ffunction-sections \
+	-O2
+
+MY_DEFS_Release := \
+	'-DV8_DEPRECATION_WARNINGS' \
+	'-D_FILE_OFFSET_BITS=64' \
+	'-DNO_TCMALLOC' \
+	'-DDISABLE_NACL' \
+	'-DCHROMIUM_BUILD' \
+	'-DUSE_LIBJPEG_TURBO=1' \
+	'-DENABLE_WEBRTC=1' \
+	'-DUSE_PROPRIETARY_CODECS' \
+	'-DENABLE_BROWSER_CDMS' \
+	'-DENABLE_CONFIGURATION_POLICY' \
+	'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
+	'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
+	'-DENABLE_EGLIMAGE=1' \
+	'-DCLD_VERSION=1' \
+	'-DENABLE_PRINTING=1' \
+	'-DENABLE_MANAGED_USERS=1' \
+	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
+	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
+	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
+	'-DV8_TARGET_ARCH_MIPS64' \
+	'-DCAN_USE_FPU_INSTRUCTIONS' \
+	'-D__mips_hard_float=1' \
+	'-D_MIPS_ARCH_MIPS64R2' \
+	'-DV8_I18N_SUPPORT' \
+	'-DUSE_OPENSSL=1' \
+	'-DUSE_OPENSSL_CERTS=1' \
+	'-DNDEBUG' \
+	'-DNVALGRIND' \
+	'-DDYNAMIC_ANNOTATIONS_ENABLED=0'
+
+
+# Include paths placed before CFLAGS/CPPFLAGS
+LOCAL_C_INCLUDES_Release := \
+	$(LOCAL_PATH)/v8 \
+	$(gyp_shared_intermediate_dir)
+
+
+# Flags passed to only C++ (and not C) files.
+LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
+	-fno-rtti \
+	-fno-threadsafe-statics \
+	-fvisibility-inlines-hidden \
+	-Wno-deprecated \
+	-std=gnu++11
+
+
+LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
+# Undefine ANDROID for host modules
+LOCAL_CFLAGS += -UANDROID
+LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
+LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
+LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
+### Rules for final target.
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
+
+# Add target alias to "gyp_all_modules" target.
+.PHONY: gyp_all_modules
+gyp_all_modules: v8_tools_gyp_v8_nosnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+
+# Alias gyp target name.
+.PHONY: v8_nosnapshot
+v8_nosnapshot: v8_tools_gyp_v8_nosnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
+
+include $(BUILD_HOST_STATIC_LIBRARY)
diff --git a/tools/gyp/v8_nosnapshot.host.linux-x86.mk b/tools/gyp/v8_nosnapshot.host.linux-x86.mk
index 3c6867d..a921690 100644
--- a/tools/gyp/v8_nosnapshot.host.linux-x86.mk
+++ b/tools/gyp/v8_nosnapshot.host.linux-x86.mk
@@ -5,7 +5,6 @@
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_MODULE := v8_tools_gyp_v8_nosnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
 LOCAL_MODULE_SUFFIX := .a
-LOCAL_MODULE_TAGS := optional
 LOCAL_IS_HOST_MODULE := true
 LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
@@ -13,8 +12,7 @@
 
 # Make sure our deps are built first.
 GYP_TARGET_DEPENDENCIES := \
-	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp \
-	$(call intermediates-dir-for,GYP,v8_tools_gyp_generate_trig_table_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/generate_trig_table.stamp
+	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp
 
 GYP_GENERATED_OUTPUTS :=
 
@@ -26,17 +24,15 @@
 	mkdir -p $(@D); cp $< $@
 $(gyp_intermediate_dir)/experimental-libraries.cc: $(gyp_shared_intermediate_dir)/experimental-libraries.cc
 	mkdir -p $(@D); cp $< $@
-$(gyp_intermediate_dir)/trig-table.cc: $(gyp_shared_intermediate_dir)/trig-table.cc
-	mkdir -p $(@D); cp $< $@
 LOCAL_GENERATED_SOURCES := \
 	$(gyp_intermediate_dir)/libraries.cc \
-	$(gyp_intermediate_dir)/experimental-libraries.cc \
-	$(gyp_intermediate_dir)/trig-table.cc
+	$(gyp_intermediate_dir)/experimental-libraries.cc
 
 GYP_COPIED_SOURCE_ORIGIN_DIRS := \
 	$(gyp_shared_intermediate_dir)
 
 LOCAL_SRC_FILES := \
+	v8/src/snapshot-common.cc \
 	v8/src/snapshot-empty.cc
 
 
@@ -45,18 +41,25 @@
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
 	-fvisibility=hidden \
 	-pipe \
 	-fPIC \
-	-Wno-unused-local-typedefs \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m32 \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-fomit-frame-pointer \
@@ -64,7 +67,6 @@
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -81,11 +83,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_IA32' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
@@ -109,28 +113,34 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
 	-fvisibility=hidden \
 	-pipe \
 	-fPIC \
-	-Wno-unused-local-typedefs \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m32 \
 	-fno-ident \
 	-fdata-sections \
@@ -143,7 +153,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -160,11 +169,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_IA32' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
@@ -182,47 +193,23 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Release := false
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 # Undefine ANDROID for host modules
 LOCAL_CFLAGS += -UANDROID
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
 LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
 LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
 ### Rules for final target.
-
-LOCAL_LDFLAGS_Debug := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-pthread \
-	-fPIC \
-	-m32
-
-
-LOCAL_LDFLAGS_Release := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-pthread \
-	-fPIC \
-	-m32
-
-
-LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
-
-LOCAL_STATIC_LIBRARIES :=
-
-# Enable grouping to fix circular references
-LOCAL_GROUP_STATIC_LIBRARIES := true
-
-LOCAL_SHARED_LIBRARIES :=
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
 
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
diff --git a/tools/gyp/v8_nosnapshot.host.linux-x86_64.mk b/tools/gyp/v8_nosnapshot.host.linux-x86_64.mk
index 2c9b2e6..652a700 100644
--- a/tools/gyp/v8_nosnapshot.host.linux-x86_64.mk
+++ b/tools/gyp/v8_nosnapshot.host.linux-x86_64.mk
@@ -5,7 +5,6 @@
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_MODULE := v8_tools_gyp_v8_nosnapshot_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp
 LOCAL_MODULE_SUFFIX := .a
-LOCAL_MODULE_TAGS := optional
 LOCAL_IS_HOST_MODULE := true
 LOCAL_MULTILIB := $(GYP_HOST_MULTILIB)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_HOST_VAR_PREFIX))
@@ -13,8 +12,7 @@
 
 # Make sure our deps are built first.
 GYP_TARGET_DEPENDENCIES := \
-	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp \
-	$(call intermediates-dir-for,GYP,v8_tools_gyp_generate_trig_table_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/generate_trig_table.stamp
+	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp
 
 GYP_GENERATED_OUTPUTS :=
 
@@ -26,17 +24,15 @@
 	mkdir -p $(@D); cp $< $@
 $(gyp_intermediate_dir)/experimental-libraries.cc: $(gyp_shared_intermediate_dir)/experimental-libraries.cc
 	mkdir -p $(@D); cp $< $@
-$(gyp_intermediate_dir)/trig-table.cc: $(gyp_shared_intermediate_dir)/trig-table.cc
-	mkdir -p $(@D); cp $< $@
 LOCAL_GENERATED_SOURCES := \
 	$(gyp_intermediate_dir)/libraries.cc \
-	$(gyp_intermediate_dir)/experimental-libraries.cc \
-	$(gyp_intermediate_dir)/trig-table.cc
+	$(gyp_intermediate_dir)/experimental-libraries.cc
 
 GYP_COPIED_SOURCE_ORIGIN_DIRS := \
 	$(gyp_shared_intermediate_dir)
 
 LOCAL_SRC_FILES := \
+	v8/src/snapshot-common.cc \
 	v8/src/snapshot-empty.cc
 
 
@@ -45,18 +41,25 @@
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
 	-fvisibility=hidden \
 	-pipe \
 	-fPIC \
-	-Wno-unused-local-typedefs \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m64 \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-fomit-frame-pointer \
@@ -64,7 +67,6 @@
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -81,11 +83,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_X64' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
@@ -109,28 +113,34 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	-pthread \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
 	-fvisibility=hidden \
 	-pipe \
 	-fPIC \
-	-Wno-unused-local-typedefs \
 	-Wno-format \
+	-Wheader-hygiene \
+	-Wno-char-subscripts \
+	-Wno-unneeded-internal-declaration \
+	-Wno-covered-switch-default \
+	-Wstring-conversion \
+	-Wno-c++11-narrowing \
+	-Wno-deprecated-register \
+	-Wno-unused-local-typedef \
 	-m64 \
 	-fno-ident \
 	-fdata-sections \
@@ -143,7 +153,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -160,11 +169,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_X64' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
@@ -182,47 +193,23 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
-	-Wno-deprecated
+	-Wno-deprecated \
+	-std=gnu++11
 
 
-LOCAL_FDO_SUPPORT_Release := false
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 # Undefine ANDROID for host modules
 LOCAL_CFLAGS += -UANDROID
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
 LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
 LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
 ### Rules for final target.
-
-LOCAL_LDFLAGS_Debug := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-pthread \
-	-fPIC \
-	-m64
-
-
-LOCAL_LDFLAGS_Release := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-pthread \
-	-fPIC \
-	-m64
-
-
-LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
-
-LOCAL_STATIC_LIBRARIES :=
-
-# Enable grouping to fix circular references
-LOCAL_GROUP_STATIC_LIBRARIES := true
-
-LOCAL_SHARED_LIBRARIES :=
+### Set directly by aosp_build_settings.
+LOCAL_CLANG := true
 
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
diff --git a/tools/gyp/v8_snapshot.target.darwin-arm.mk b/tools/gyp/v8_snapshot.target.darwin-arm.mk
index c378270..eb5a0ab 100644
--- a/tools/gyp/v8_snapshot.target.darwin-arm.mk
+++ b/tools/gyp/v8_snapshot.target.darwin-arm.mk
@@ -5,7 +5,6 @@
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_MODULE := v8_tools_gyp_v8_snapshot_gyp
 LOCAL_MODULE_SUFFIX := .a
-LOCAL_MODULE_TAGS := optional
 LOCAL_MODULE_TARGET_ARCH := $(TARGET_$(GYP_VAR_PREFIX)ARCH)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_VAR_PREFIX))
 gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
@@ -13,8 +12,7 @@
 # Make sure our deps are built first.
 GYP_TARGET_DEPENDENCIES := \
 	$(gyp_shared_intermediate_dir)/mksnapshot \
-	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp \
-	$(call intermediates-dir-for,GYP,v8_tools_gyp_generate_trig_table_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/generate_trig_table.stamp
+	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp
 
 ### Rules for action "run_mksnapshot":
 $(gyp_intermediate_dir)/snapshot.cc: gyp_local_path := $(LOCAL_PATH)
@@ -39,25 +37,22 @@
 	mkdir -p $(@D); cp $< $@
 $(gyp_intermediate_dir)/experimental-libraries.cc: $(gyp_shared_intermediate_dir)/experimental-libraries.cc
 	mkdir -p $(@D); cp $< $@
-$(gyp_intermediate_dir)/trig-table.cc: $(gyp_shared_intermediate_dir)/trig-table.cc
-	mkdir -p $(@D); cp $< $@
 LOCAL_GENERATED_SOURCES := \
 	$(gyp_intermediate_dir)/libraries.cc \
 	$(gyp_intermediate_dir)/experimental-libraries.cc \
-	$(gyp_intermediate_dir)/trig-table.cc \
 	$(gyp_intermediate_dir)/snapshot.cc
 
 GYP_COPIED_SOURCE_ORIGIN_DIRS := \
 	$(gyp_shared_intermediate_dir)
 
-LOCAL_SRC_FILES :=
+LOCAL_SRC_FILES := \
+	v8/src/snapshot-common.cc
 
 
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Debug := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -67,13 +62,13 @@
 	-Wno-unused-local-typedefs \
 	-Wno-format \
 	-fno-tree-sra \
+	-fno-caller-saves \
+	-Wno-psabi \
 	-fno-partial-inlining \
 	-fno-early-inlining \
 	-fno-tree-copy-prop \
 	-fno-tree-loop-optimize \
 	-fno-move-loop-invariants \
-	-fno-caller-saves \
-	-Wno-psabi \
 	-ffunction-sections \
 	-funwind-tables \
 	-g \
@@ -92,6 +87,7 @@
 	-Wno-sequence-point \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-fomit-frame-pointer \
@@ -99,7 +95,6 @@
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -116,12 +111,15 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_ARM' \
+	'-DCAN_USE_ARMV7_INSTRUCTIONS' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
@@ -130,8 +128,6 @@
 	'-DUSE_STLPORT=1' \
 	'-D_STLP_USE_PTR_SPECIALIZATIONS=1' \
 	'-DCHROME_BUILD_ID=""' \
-	'-DARM_TEST' \
-	'-DCAN_USE_ARMV7_INSTRUCTIONS=1' \
 	'-DDYNAMIC_ANNOTATIONS_ENABLED=1' \
 	'-DWTF_USE_DYNAMIC_ANNOTATIONS=1' \
 	'-D_DEBUG' \
@@ -156,23 +152,24 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
 	-Wno-deprecated \
 	-Wno-abi \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
 	-Wno-non-virtual-dtor \
 	-Wno-sign-promo \
 	-Wno-non-virtual-dtor
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -182,13 +179,13 @@
 	-Wno-unused-local-typedefs \
 	-Wno-format \
 	-fno-tree-sra \
+	-fno-caller-saves \
+	-Wno-psabi \
 	-fno-partial-inlining \
 	-fno-early-inlining \
 	-fno-tree-copy-prop \
 	-fno-tree-loop-optimize \
 	-fno-move-loop-invariants \
-	-fno-caller-saves \
-	-Wno-psabi \
 	-ffunction-sections \
 	-funwind-tables \
 	-g \
@@ -216,7 +213,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -233,12 +229,15 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_ARM' \
+	'-DCAN_USE_ARMV7_INSTRUCTIONS' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
@@ -247,8 +246,6 @@
 	'-DUSE_STLPORT=1' \
 	'-D_STLP_USE_PTR_SPECIALIZATIONS=1' \
 	'-DCHROME_BUILD_ID=""' \
-	'-DARM_TEST' \
-	'-DCAN_USE_ARMV7_INSTRUCTIONS=1' \
 	'-DNDEBUG' \
 	'-DNVALGRIND' \
 	'-DDYNAMIC_ANNOTATIONS_ENABLED=0'
@@ -267,73 +264,33 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
 	-Wno-deprecated \
 	-Wno-abi \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
 	-Wno-non-virtual-dtor \
 	-Wno-sign-promo \
 	-Wno-non-virtual-dtor
 
 
-LOCAL_FDO_SUPPORT_Release := true
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
 LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
 LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
 ### Rules for final target.
 
-LOCAL_LDFLAGS_Debug := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-Wl,--fatal-warnings \
-	-Wl,-z,noexecstack \
-	-fPIC \
-	-Wl,-z,relro \
-	-Wl,-z,now \
-	-fuse-ld=gold \
-	-nostdlib \
-	-Wl,--no-undefined \
-	-Wl,--exclude-libs=ALL \
-	-Wl,--icf=safe \
-	-Wl,--warn-shared-textrel \
-	-Wl,-O1 \
-	-Wl,--as-needed
-
-
-LOCAL_LDFLAGS_Release := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-Wl,--fatal-warnings \
-	-Wl,-z,noexecstack \
-	-fPIC \
-	-Wl,-z,relro \
-	-Wl,-z,now \
-	-fuse-ld=gold \
-	-nostdlib \
-	-Wl,--no-undefined \
-	-Wl,--exclude-libs=ALL \
-	-Wl,--icf=safe \
-	-Wl,-O1 \
-	-Wl,--as-needed \
-	-Wl,--gc-sections \
-	-Wl,--warn-shared-textrel
-
-
-LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
-
-LOCAL_STATIC_LIBRARIES :=
-
-# Enable grouping to fix circular references
-LOCAL_GROUP_STATIC_LIBRARIES := true
-
 LOCAL_SHARED_LIBRARIES := \
 	libstlport \
 	libdl
 
+### Set directly by aosp_build_settings.
+LOCAL_FDO_SUPPORT := true
+
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
 gyp_all_modules: v8_tools_gyp_v8_snapshot_gyp
diff --git a/tools/gyp/v8_snapshot.target.darwin-arm64.mk b/tools/gyp/v8_snapshot.target.darwin-arm64.mk
index d3119a2..f7ecad5 100644
--- a/tools/gyp/v8_snapshot.target.darwin-arm64.mk
+++ b/tools/gyp/v8_snapshot.target.darwin-arm64.mk
@@ -5,7 +5,6 @@
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_MODULE := v8_tools_gyp_v8_snapshot_gyp
 LOCAL_MODULE_SUFFIX := .a
-LOCAL_MODULE_TAGS := optional
 LOCAL_MODULE_TARGET_ARCH := $(TARGET_$(GYP_VAR_PREFIX)ARCH)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_VAR_PREFIX))
 gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
@@ -13,8 +12,7 @@
 # Make sure our deps are built first.
 GYP_TARGET_DEPENDENCIES := \
 	$(gyp_shared_intermediate_dir)/mksnapshot \
-	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp \
-	$(call intermediates-dir-for,GYP,v8_tools_gyp_generate_trig_table_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/generate_trig_table.stamp
+	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp
 
 ### Rules for action "run_mksnapshot":
 $(gyp_intermediate_dir)/snapshot.cc: gyp_local_path := $(LOCAL_PATH)
@@ -39,24 +37,21 @@
 	mkdir -p $(@D); cp $< $@
 $(gyp_intermediate_dir)/experimental-libraries.cc: $(gyp_shared_intermediate_dir)/experimental-libraries.cc
 	mkdir -p $(@D); cp $< $@
-$(gyp_intermediate_dir)/trig-table.cc: $(gyp_shared_intermediate_dir)/trig-table.cc
-	mkdir -p $(@D); cp $< $@
 LOCAL_GENERATED_SOURCES := \
 	$(gyp_intermediate_dir)/libraries.cc \
 	$(gyp_intermediate_dir)/experimental-libraries.cc \
-	$(gyp_intermediate_dir)/trig-table.cc \
 	$(gyp_intermediate_dir)/snapshot.cc
 
 GYP_COPIED_SOURCE_ORIGIN_DIRS := \
 	$(gyp_shared_intermediate_dir)
 
-LOCAL_SRC_FILES :=
+LOCAL_SRC_FILES := \
+	v8/src/snapshot-common.cc
 
 
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Debug := \
 	--param=ssp-buffer-size=4 \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -82,13 +77,13 @@
 	-Wno-sequence-point \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-funwind-tables
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -105,11 +100,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_ARM64' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
@@ -143,21 +140,22 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
 	-Wno-deprecated \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
 	-Wno-non-virtual-dtor \
 	-Wno-sign-promo \
 	-Wno-non-virtual-dtor
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	--param=ssp-buffer-size=4 \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -191,7 +189,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -208,11 +205,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_ARM64' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
@@ -240,64 +239,32 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
 	-Wno-deprecated \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
 	-Wno-non-virtual-dtor \
 	-Wno-sign-promo \
 	-Wno-non-virtual-dtor
 
 
-LOCAL_FDO_SUPPORT_Release := true
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
 LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
 LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
 ### Rules for final target.
 
-LOCAL_LDFLAGS_Debug := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-Wl,--fatal-warnings \
-	-Wl,-z,noexecstack \
-	-fPIC \
-	-nostdlib \
-	-Wl,--no-undefined \
-	-Wl,--exclude-libs=ALL \
-	-Wl,--warn-shared-textrel \
-	-Wl,-O1 \
-	-Wl,--as-needed
-
-
-LOCAL_LDFLAGS_Release := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-Wl,--fatal-warnings \
-	-Wl,-z,noexecstack \
-	-fPIC \
-	-nostdlib \
-	-Wl,--no-undefined \
-	-Wl,--exclude-libs=ALL \
-	-Wl,-O1 \
-	-Wl,--as-needed \
-	-Wl,--gc-sections \
-	-Wl,--warn-shared-textrel
-
-
-LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
-
-LOCAL_STATIC_LIBRARIES :=
-
-# Enable grouping to fix circular references
-LOCAL_GROUP_STATIC_LIBRARIES := true
-
 LOCAL_SHARED_LIBRARIES := \
 	libstlport \
 	libdl
 
+### Set directly by aosp_build_settings.
+LOCAL_FDO_SUPPORT := true
+
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
 gyp_all_modules: v8_tools_gyp_v8_snapshot_gyp
diff --git a/tools/gyp/v8_snapshot.target.darwin-mips.mk b/tools/gyp/v8_snapshot.target.darwin-mips.mk
index 7486f13..94378e4 100644
--- a/tools/gyp/v8_snapshot.target.darwin-mips.mk
+++ b/tools/gyp/v8_snapshot.target.darwin-mips.mk
@@ -5,7 +5,6 @@
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_MODULE := v8_tools_gyp_v8_snapshot_gyp
 LOCAL_MODULE_SUFFIX := .a
-LOCAL_MODULE_TAGS := optional
 LOCAL_MODULE_TARGET_ARCH := $(TARGET_$(GYP_VAR_PREFIX)ARCH)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_VAR_PREFIX))
 gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
@@ -13,8 +12,7 @@
 # Make sure our deps are built first.
 GYP_TARGET_DEPENDENCIES := \
 	$(gyp_shared_intermediate_dir)/mksnapshot \
-	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp \
-	$(call intermediates-dir-for,GYP,v8_tools_gyp_generate_trig_table_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/generate_trig_table.stamp
+	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp
 
 ### Rules for action "run_mksnapshot":
 $(gyp_intermediate_dir)/snapshot.cc: gyp_local_path := $(LOCAL_PATH)
@@ -39,18 +37,16 @@
 	mkdir -p $(@D); cp $< $@
 $(gyp_intermediate_dir)/experimental-libraries.cc: $(gyp_shared_intermediate_dir)/experimental-libraries.cc
 	mkdir -p $(@D); cp $< $@
-$(gyp_intermediate_dir)/trig-table.cc: $(gyp_shared_intermediate_dir)/trig-table.cc
-	mkdir -p $(@D); cp $< $@
 LOCAL_GENERATED_SOURCES := \
 	$(gyp_intermediate_dir)/libraries.cc \
 	$(gyp_intermediate_dir)/experimental-libraries.cc \
-	$(gyp_intermediate_dir)/trig-table.cc \
 	$(gyp_intermediate_dir)/snapshot.cc
 
 GYP_COPIED_SOURCE_ORIGIN_DIRS := \
 	$(gyp_shared_intermediate_dir)
 
-LOCAL_SRC_FILES :=
+LOCAL_SRC_FILES := \
+	v8/src/snapshot-common.cc
 
 
 # Flags passed to both C and C++ files.
@@ -58,7 +54,6 @@
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	 \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -67,8 +62,6 @@
 	-fPIC \
 	-Wno-unused-local-typedefs \
 	-Wno-format \
-	-EL \
-	-mhard-float \
 	-ffunction-sections \
 	-funwind-tables \
 	-g \
@@ -87,6 +80,7 @@
 	-Wno-sequence-point \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-fomit-frame-pointer \
@@ -94,7 +88,6 @@
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -111,11 +104,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_MIPS' \
 	'-DCAN_USE_FPU_INSTRUCTIONS' \
 	'-D__mips_hard_float=1' \
@@ -152,24 +147,25 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
 	-Wno-deprecated \
 	-Wno-uninitialized \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
 	-Wno-non-virtual-dtor \
 	-Wno-sign-promo \
 	-Wno-non-virtual-dtor
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	 \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -178,8 +174,6 @@
 	-fPIC \
 	-Wno-unused-local-typedefs \
 	-Wno-format \
-	-EL \
-	-mhard-float \
 	-ffunction-sections \
 	-funwind-tables \
 	-g \
@@ -207,7 +201,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -224,11 +217,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_MIPS' \
 	'-DCAN_USE_FPU_INSTRUCTIONS' \
 	'-D__mips_hard_float=1' \
@@ -259,69 +254,33 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
 	-Wno-deprecated \
 	-Wno-uninitialized \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
 	-Wno-non-virtual-dtor \
 	-Wno-sign-promo \
 	-Wno-non-virtual-dtor
 
 
-LOCAL_FDO_SUPPORT_Release := true
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
 LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
 LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
 ### Rules for final target.
 
-LOCAL_LDFLAGS_Debug := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-Wl,--fatal-warnings \
-	-Wl,-z,noexecstack \
-	-fPIC \
-	-EL \
-	-Wl,--no-keep-memory \
-	-nostdlib \
-	-Wl,--no-undefined \
-	-Wl,--exclude-libs=ALL \
-	-Wl,--warn-shared-textrel \
-	-Wl,-O1 \
-	-Wl,--as-needed
-
-
-LOCAL_LDFLAGS_Release := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-Wl,--fatal-warnings \
-	-Wl,-z,noexecstack \
-	-fPIC \
-	-EL \
-	-Wl,--no-keep-memory \
-	-nostdlib \
-	-Wl,--no-undefined \
-	-Wl,--exclude-libs=ALL \
-	-Wl,-O1 \
-	-Wl,--as-needed \
-	-Wl,--gc-sections \
-	-Wl,--warn-shared-textrel
-
-
-LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
-
-LOCAL_STATIC_LIBRARIES :=
-
-# Enable grouping to fix circular references
-LOCAL_GROUP_STATIC_LIBRARIES := true
-
 LOCAL_SHARED_LIBRARIES := \
 	libstlport \
 	libdl
 
+### Set directly by aosp_build_settings.
+LOCAL_FDO_SUPPORT := true
+
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
 gyp_all_modules: v8_tools_gyp_v8_snapshot_gyp
diff --git a/tools/gyp/v8_snapshot.target.darwin-mips64.mk b/tools/gyp/v8_snapshot.target.darwin-mips64.mk
new file mode 100644
index 0000000..ac4a2ab
--- /dev/null
+++ b/tools/gyp/v8_snapshot.target.darwin-mips64.mk
@@ -0,0 +1,289 @@
+# This file is generated by gyp; do not edit.
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE_CLASS := STATIC_LIBRARIES
+LOCAL_MODULE := v8_tools_gyp_v8_snapshot_gyp
+LOCAL_MODULE_SUFFIX := .a
+LOCAL_MODULE_TARGET_ARCH := $(TARGET_$(GYP_VAR_PREFIX)ARCH)
+gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_VAR_PREFIX))
+gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
+
+# Make sure our deps are built first.
+GYP_TARGET_DEPENDENCIES := \
+	$(gyp_shared_intermediate_dir)/mksnapshot \
+	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp
+
+### Rules for action "run_mksnapshot":
+$(gyp_intermediate_dir)/snapshot.cc: gyp_local_path := $(LOCAL_PATH)
+$(gyp_intermediate_dir)/snapshot.cc: gyp_var_prefix := $(GYP_VAR_PREFIX)
+$(gyp_intermediate_dir)/snapshot.cc: gyp_intermediate_dir := $(abspath $(gyp_intermediate_dir))
+$(gyp_intermediate_dir)/snapshot.cc: gyp_shared_intermediate_dir := $(abspath $(gyp_shared_intermediate_dir))
+$(gyp_intermediate_dir)/snapshot.cc: export PATH := $(subst $(ANDROID_BUILD_PATHS),,$(PATH))
+$(gyp_intermediate_dir)/snapshot.cc: $(gyp_shared_intermediate_dir)/mksnapshot $(GYP_TARGET_DEPENDENCIES)
+	@echo "Gyp action: v8_tools_gyp_v8_gyp_v8_snapshot_target_run_mksnapshot ($@)"
+	$(hide)cd $(gyp_local_path)/v8/tools/gyp; mkdir -p $(gyp_intermediate_dir); "$(gyp_shared_intermediate_dir)/mksnapshot" --log-snapshot-positions --logfile "$(gyp_intermediate_dir)/snapshot.log" --random-seed 314159265 "$(gyp_intermediate_dir)/snapshot.cc"
+
+
+
+GYP_GENERATED_OUTPUTS := \
+	$(gyp_intermediate_dir)/snapshot.cc
+
+# Make sure our deps and generated files are built first.
+LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) $(GYP_GENERATED_OUTPUTS)
+
+LOCAL_CPP_EXTENSION := .cc
+$(gyp_intermediate_dir)/libraries.cc: $(gyp_shared_intermediate_dir)/libraries.cc
+	mkdir -p $(@D); cp $< $@
+$(gyp_intermediate_dir)/experimental-libraries.cc: $(gyp_shared_intermediate_dir)/experimental-libraries.cc
+	mkdir -p $(@D); cp $< $@
+LOCAL_GENERATED_SOURCES := \
+	$(gyp_intermediate_dir)/libraries.cc \
+	$(gyp_intermediate_dir)/experimental-libraries.cc \
+	$(gyp_intermediate_dir)/snapshot.cc
+
+GYP_COPIED_SOURCE_ORIGIN_DIRS := \
+	$(gyp_shared_intermediate_dir)
+
+LOCAL_SRC_FILES := \
+	v8/src/snapshot-common.cc
+
+
+# Flags passed to both C and C++ files.
+MY_CFLAGS_Debug := \
+	-fstack-protector \
+	--param=ssp-buffer-size=4 \
+	 \
+	-fno-strict-aliasing \
+	-Wno-unused-parameter \
+	-Wno-missing-field-initializers \
+	-fvisibility=hidden \
+	-pipe \
+	-fPIC \
+	-Wno-unused-local-typedefs \
+	-Wno-format \
+	-ffunction-sections \
+	-funwind-tables \
+	-g \
+	-fstack-protector \
+	-fno-short-enums \
+	-finline-limit=64 \
+	-Wa,--noexecstack \
+	-U_FORTIFY_SOURCE \
+	-Wno-extra \
+	-Wno-ignored-qualifiers \
+	-Wno-type-limits \
+	-Wno-unused-but-set-variable \
+	-Wno-address \
+	-Wno-format-security \
+	-Wno-return-type \
+	-Wno-sequence-point \
+	-Os \
+	-g \
+	-gdwarf-4 \
+	-fdata-sections \
+	-ffunction-sections \
+	-fomit-frame-pointer \
+	-funwind-tables
+
+MY_DEFS_Debug := \
+	'-DV8_DEPRECATION_WARNINGS' \
+	'-D_FILE_OFFSET_BITS=64' \
+	'-DNO_TCMALLOC' \
+	'-DDISABLE_NACL' \
+	'-DCHROMIUM_BUILD' \
+	'-DUSE_LIBJPEG_TURBO=1' \
+	'-DENABLE_WEBRTC=1' \
+	'-DUSE_PROPRIETARY_CODECS' \
+	'-DENABLE_BROWSER_CDMS' \
+	'-DENABLE_CONFIGURATION_POLICY' \
+	'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
+	'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
+	'-DENABLE_EGLIMAGE=1' \
+	'-DCLD_VERSION=1' \
+	'-DENABLE_PRINTING=1' \
+	'-DENABLE_MANAGED_USERS=1' \
+	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
+	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
+	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
+	'-DV8_TARGET_ARCH_MIPS64' \
+	'-DCAN_USE_FPU_INSTRUCTIONS' \
+	'-D__mips_hard_float=1' \
+	'-D_MIPS_ARCH_MIPS64R2' \
+	'-DV8_I18N_SUPPORT' \
+	'-DUSE_OPENSSL=1' \
+	'-DUSE_OPENSSL_CERTS=1' \
+	'-DANDROID' \
+	'-D__GNU_SOURCE=1' \
+	'-DUSE_STLPORT=1' \
+	'-D_STLP_USE_PTR_SPECIALIZATIONS=1' \
+	'-DCHROME_BUILD_ID=""' \
+	'-DDYNAMIC_ANNOTATIONS_ENABLED=1' \
+	'-DWTF_USE_DYNAMIC_ANNOTATIONS=1' \
+	'-D_DEBUG' \
+	'-DENABLE_DISASSEMBLER' \
+	'-DV8_ENABLE_CHECKS' \
+	'-DOBJECT_PRINT' \
+	'-DVERIFY_HEAP' \
+	'-DENABLE_EXTRA_CHECKS' \
+	'-DENABLE_HANDLE_ZAPPING'
+
+
+# Include paths placed before CFLAGS/CPPFLAGS
+LOCAL_C_INCLUDES_Debug := \
+	$(gyp_shared_intermediate_dir)/shim_headers/icuuc/target \
+	$(gyp_shared_intermediate_dir)/shim_headers/icui18n/target \
+	$(LOCAL_PATH)/v8 \
+	$(gyp_shared_intermediate_dir) \
+	$(PWD)/frameworks/wilhelm/include \
+	$(PWD)/bionic \
+	$(PWD)/external/stlport/stlport
+
+
+# Flags passed to only C++ (and not C) files.
+LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
+	-fno-rtti \
+	-fno-threadsafe-statics \
+	-fvisibility-inlines-hidden \
+	-Wno-deprecated \
+	-Wno-uninitialized \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
+	-Wno-non-virtual-dtor \
+	-Wno-sign-promo \
+	-Wno-non-virtual-dtor
+
+
+# Flags passed to both C and C++ files.
+MY_CFLAGS_Release := \
+	-fstack-protector \
+	--param=ssp-buffer-size=4 \
+	 \
+	-fno-strict-aliasing \
+	-Wno-unused-parameter \
+	-Wno-missing-field-initializers \
+	-fvisibility=hidden \
+	-pipe \
+	-fPIC \
+	-Wno-unused-local-typedefs \
+	-Wno-format \
+	-ffunction-sections \
+	-funwind-tables \
+	-g \
+	-fstack-protector \
+	-fno-short-enums \
+	-finline-limit=64 \
+	-Wa,--noexecstack \
+	-U_FORTIFY_SOURCE \
+	-Wno-extra \
+	-Wno-ignored-qualifiers \
+	-Wno-type-limits \
+	-Wno-unused-but-set-variable \
+	-Wno-address \
+	-Wno-format-security \
+	-Wno-return-type \
+	-Wno-sequence-point \
+	-fno-ident \
+	-fdata-sections \
+	-ffunction-sections \
+	-fomit-frame-pointer \
+	-funwind-tables \
+	-fdata-sections \
+	-ffunction-sections \
+	-O2
+
+MY_DEFS_Release := \
+	'-DV8_DEPRECATION_WARNINGS' \
+	'-D_FILE_OFFSET_BITS=64' \
+	'-DNO_TCMALLOC' \
+	'-DDISABLE_NACL' \
+	'-DCHROMIUM_BUILD' \
+	'-DUSE_LIBJPEG_TURBO=1' \
+	'-DENABLE_WEBRTC=1' \
+	'-DUSE_PROPRIETARY_CODECS' \
+	'-DENABLE_BROWSER_CDMS' \
+	'-DENABLE_CONFIGURATION_POLICY' \
+	'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
+	'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
+	'-DENABLE_EGLIMAGE=1' \
+	'-DCLD_VERSION=1' \
+	'-DENABLE_PRINTING=1' \
+	'-DENABLE_MANAGED_USERS=1' \
+	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
+	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
+	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
+	'-DV8_TARGET_ARCH_MIPS64' \
+	'-DCAN_USE_FPU_INSTRUCTIONS' \
+	'-D__mips_hard_float=1' \
+	'-D_MIPS_ARCH_MIPS64R2' \
+	'-DV8_I18N_SUPPORT' \
+	'-DUSE_OPENSSL=1' \
+	'-DUSE_OPENSSL_CERTS=1' \
+	'-DANDROID' \
+	'-D__GNU_SOURCE=1' \
+	'-DUSE_STLPORT=1' \
+	'-D_STLP_USE_PTR_SPECIALIZATIONS=1' \
+	'-DCHROME_BUILD_ID=""' \
+	'-DNDEBUG' \
+	'-DNVALGRIND' \
+	'-DDYNAMIC_ANNOTATIONS_ENABLED=0'
+
+
+# Include paths placed before CFLAGS/CPPFLAGS
+LOCAL_C_INCLUDES_Release := \
+	$(gyp_shared_intermediate_dir)/shim_headers/icuuc/target \
+	$(gyp_shared_intermediate_dir)/shim_headers/icui18n/target \
+	$(LOCAL_PATH)/v8 \
+	$(gyp_shared_intermediate_dir) \
+	$(PWD)/frameworks/wilhelm/include \
+	$(PWD)/bionic \
+	$(PWD)/external/stlport/stlport
+
+
+# Flags passed to only C++ (and not C) files.
+LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
+	-fno-rtti \
+	-fno-threadsafe-statics \
+	-fvisibility-inlines-hidden \
+	-Wno-deprecated \
+	-Wno-uninitialized \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
+	-Wno-non-virtual-dtor \
+	-Wno-sign-promo \
+	-Wno-non-virtual-dtor
+
+
+LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
+LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
+LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
+LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
+### Rules for final target.
+
+LOCAL_SHARED_LIBRARIES := \
+	libstlport \
+	libdl
+
+# Add target alias to "gyp_all_modules" target.
+.PHONY: gyp_all_modules
+gyp_all_modules: v8_tools_gyp_v8_snapshot_gyp
+
+# Alias gyp target name.
+.PHONY: v8_snapshot
+v8_snapshot: v8_tools_gyp_v8_snapshot_gyp
+
+include $(BUILD_STATIC_LIBRARY)
diff --git a/tools/gyp/v8_snapshot.target.darwin-x86.mk b/tools/gyp/v8_snapshot.target.darwin-x86.mk
index 4cd81bf..8c7b34f 100644
--- a/tools/gyp/v8_snapshot.target.darwin-x86.mk
+++ b/tools/gyp/v8_snapshot.target.darwin-x86.mk
@@ -5,7 +5,6 @@
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_MODULE := v8_tools_gyp_v8_snapshot_gyp
 LOCAL_MODULE_SUFFIX := .a
-LOCAL_MODULE_TAGS := optional
 LOCAL_MODULE_TARGET_ARCH := $(TARGET_$(GYP_VAR_PREFIX)ARCH)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_VAR_PREFIX))
 gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
@@ -13,8 +12,7 @@
 # Make sure our deps are built first.
 GYP_TARGET_DEPENDENCIES := \
 	$(gyp_shared_intermediate_dir)/mksnapshot \
-	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp \
-	$(call intermediates-dir-for,GYP,v8_tools_gyp_generate_trig_table_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/generate_trig_table.stamp
+	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp
 
 ### Rules for action "run_mksnapshot":
 $(gyp_intermediate_dir)/snapshot.cc: gyp_local_path := $(LOCAL_PATH)
@@ -39,24 +37,21 @@
 	mkdir -p $(@D); cp $< $@
 $(gyp_intermediate_dir)/experimental-libraries.cc: $(gyp_shared_intermediate_dir)/experimental-libraries.cc
 	mkdir -p $(@D); cp $< $@
-$(gyp_intermediate_dir)/trig-table.cc: $(gyp_shared_intermediate_dir)/trig-table.cc
-	mkdir -p $(@D); cp $< $@
 LOCAL_GENERATED_SOURCES := \
 	$(gyp_intermediate_dir)/libraries.cc \
 	$(gyp_intermediate_dir)/experimental-libraries.cc \
-	$(gyp_intermediate_dir)/trig-table.cc \
 	$(gyp_intermediate_dir)/snapshot.cc
 
 GYP_COPIED_SOURCE_ORIGIN_DIRS := \
 	$(gyp_shared_intermediate_dir)
 
-LOCAL_SRC_FILES :=
+LOCAL_SRC_FILES := \
+	v8/src/snapshot-common.cc
 
 
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Debug := \
 	--param=ssp-buffer-size=4 \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -85,8 +80,10 @@
 	-Wno-format-security \
 	-Wno-return-type \
 	-Wno-sequence-point \
+	-m32 \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-fomit-frame-pointer \
@@ -94,7 +91,6 @@
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -111,11 +107,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_IA32' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
@@ -149,21 +147,22 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
 	-Wno-deprecated \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
 	-Wno-non-virtual-dtor \
 	-Wno-sign-promo \
 	-Wno-non-virtual-dtor
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	--param=ssp-buffer-size=4 \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -192,6 +191,7 @@
 	-Wno-format-security \
 	-Wno-return-type \
 	-Wno-sequence-point \
+	-m32 \
 	-fno-ident \
 	-fdata-sections \
 	-ffunction-sections \
@@ -203,7 +203,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -220,11 +219,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_IA32' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
@@ -252,68 +253,32 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
 	-Wno-deprecated \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
 	-Wno-non-virtual-dtor \
 	-Wno-sign-promo \
 	-Wno-non-virtual-dtor
 
 
-LOCAL_FDO_SUPPORT_Release := true
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
 LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
 LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
 ### Rules for final target.
 
-LOCAL_LDFLAGS_Debug := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-Wl,--fatal-warnings \
-	-Wl,-z,noexecstack \
-	-fPIC \
-	-m32 \
-	-fuse-ld=gold \
-	-nostdlib \
-	-Wl,--no-undefined \
-	-Wl,--exclude-libs=ALL \
-	-Wl,--warn-shared-textrel \
-	-Wl,-O1 \
-	-Wl,--as-needed
-
-
-LOCAL_LDFLAGS_Release := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-Wl,--fatal-warnings \
-	-Wl,-z,noexecstack \
-	-fPIC \
-	-m32 \
-	-fuse-ld=gold \
-	-nostdlib \
-	-Wl,--no-undefined \
-	-Wl,--exclude-libs=ALL \
-	-Wl,-O1 \
-	-Wl,--as-needed \
-	-Wl,--gc-sections \
-	-Wl,--warn-shared-textrel
-
-
-LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
-
-LOCAL_STATIC_LIBRARIES :=
-
-# Enable grouping to fix circular references
-LOCAL_GROUP_STATIC_LIBRARIES := true
-
 LOCAL_SHARED_LIBRARIES := \
 	libstlport \
 	libdl
 
+### Set directly by aosp_build_settings.
+LOCAL_FDO_SUPPORT := true
+
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
 gyp_all_modules: v8_tools_gyp_v8_snapshot_gyp
diff --git a/tools/gyp/v8_snapshot.target.darwin-x86_64.mk b/tools/gyp/v8_snapshot.target.darwin-x86_64.mk
index a232826..75a1037 100644
--- a/tools/gyp/v8_snapshot.target.darwin-x86_64.mk
+++ b/tools/gyp/v8_snapshot.target.darwin-x86_64.mk
@@ -5,7 +5,6 @@
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_MODULE := v8_tools_gyp_v8_snapshot_gyp
 LOCAL_MODULE_SUFFIX := .a
-LOCAL_MODULE_TAGS := optional
 LOCAL_MODULE_TARGET_ARCH := $(TARGET_$(GYP_VAR_PREFIX)ARCH)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_VAR_PREFIX))
 gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
@@ -13,8 +12,7 @@
 # Make sure our deps are built first.
 GYP_TARGET_DEPENDENCIES := \
 	$(gyp_shared_intermediate_dir)/mksnapshot \
-	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp \
-	$(call intermediates-dir-for,GYP,v8_tools_gyp_generate_trig_table_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/generate_trig_table.stamp
+	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp
 
 ### Rules for action "run_mksnapshot":
 $(gyp_intermediate_dir)/snapshot.cc: gyp_local_path := $(LOCAL_PATH)
@@ -39,25 +37,22 @@
 	mkdir -p $(@D); cp $< $@
 $(gyp_intermediate_dir)/experimental-libraries.cc: $(gyp_shared_intermediate_dir)/experimental-libraries.cc
 	mkdir -p $(@D); cp $< $@
-$(gyp_intermediate_dir)/trig-table.cc: $(gyp_shared_intermediate_dir)/trig-table.cc
-	mkdir -p $(@D); cp $< $@
 LOCAL_GENERATED_SOURCES := \
 	$(gyp_intermediate_dir)/libraries.cc \
 	$(gyp_intermediate_dir)/experimental-libraries.cc \
-	$(gyp_intermediate_dir)/trig-table.cc \
 	$(gyp_intermediate_dir)/snapshot.cc
 
 GYP_COPIED_SOURCE_ORIGIN_DIRS := \
 	$(gyp_shared_intermediate_dir)
 
-LOCAL_SRC_FILES :=
+LOCAL_SRC_FILES := \
+	v8/src/snapshot-common.cc
 
 
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Debug := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -84,8 +79,10 @@
 	-Wno-format-security \
 	-Wno-return-type \
 	-Wno-sequence-point \
+	-m64 \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-fomit-frame-pointer \
@@ -93,7 +90,6 @@
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -110,11 +106,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_X64' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
@@ -148,22 +146,23 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
 	-Wno-deprecated \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
 	-Wno-non-virtual-dtor \
 	-Wno-sign-promo \
 	-Wno-non-virtual-dtor
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -190,6 +189,7 @@
 	-Wno-format-security \
 	-Wno-return-type \
 	-Wno-sequence-point \
+	-m64 \
 	-fno-ident \
 	-fdata-sections \
 	-ffunction-sections \
@@ -201,7 +201,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -218,11 +217,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_X64' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
@@ -250,68 +251,32 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
 	-Wno-deprecated \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
 	-Wno-non-virtual-dtor \
 	-Wno-sign-promo \
 	-Wno-non-virtual-dtor
 
 
-LOCAL_FDO_SUPPORT_Release := true
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
 LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
 LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
 ### Rules for final target.
 
-LOCAL_LDFLAGS_Debug := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-Wl,--fatal-warnings \
-	-Wl,-z,noexecstack \
-	-fPIC \
-	-m64 \
-	-fuse-ld=gold \
-	-nostdlib \
-	-Wl,--no-undefined \
-	-Wl,--exclude-libs=ALL \
-	-Wl,--warn-shared-textrel \
-	-Wl,-O1 \
-	-Wl,--as-needed
-
-
-LOCAL_LDFLAGS_Release := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-Wl,--fatal-warnings \
-	-Wl,-z,noexecstack \
-	-fPIC \
-	-m64 \
-	-fuse-ld=gold \
-	-nostdlib \
-	-Wl,--no-undefined \
-	-Wl,--exclude-libs=ALL \
-	-Wl,-O1 \
-	-Wl,--as-needed \
-	-Wl,--gc-sections \
-	-Wl,--warn-shared-textrel
-
-
-LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
-
-LOCAL_STATIC_LIBRARIES :=
-
-# Enable grouping to fix circular references
-LOCAL_GROUP_STATIC_LIBRARIES := true
-
 LOCAL_SHARED_LIBRARIES := \
 	libstlport \
 	libdl
 
+### Set directly by aosp_build_settings.
+LOCAL_FDO_SUPPORT := true
+
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
 gyp_all_modules: v8_tools_gyp_v8_snapshot_gyp
diff --git a/tools/gyp/v8_snapshot.target.linux-arm.mk b/tools/gyp/v8_snapshot.target.linux-arm.mk
index c378270..eb5a0ab 100644
--- a/tools/gyp/v8_snapshot.target.linux-arm.mk
+++ b/tools/gyp/v8_snapshot.target.linux-arm.mk
@@ -5,7 +5,6 @@
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_MODULE := v8_tools_gyp_v8_snapshot_gyp
 LOCAL_MODULE_SUFFIX := .a
-LOCAL_MODULE_TAGS := optional
 LOCAL_MODULE_TARGET_ARCH := $(TARGET_$(GYP_VAR_PREFIX)ARCH)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_VAR_PREFIX))
 gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
@@ -13,8 +12,7 @@
 # Make sure our deps are built first.
 GYP_TARGET_DEPENDENCIES := \
 	$(gyp_shared_intermediate_dir)/mksnapshot \
-	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp \
-	$(call intermediates-dir-for,GYP,v8_tools_gyp_generate_trig_table_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/generate_trig_table.stamp
+	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp
 
 ### Rules for action "run_mksnapshot":
 $(gyp_intermediate_dir)/snapshot.cc: gyp_local_path := $(LOCAL_PATH)
@@ -39,25 +37,22 @@
 	mkdir -p $(@D); cp $< $@
 $(gyp_intermediate_dir)/experimental-libraries.cc: $(gyp_shared_intermediate_dir)/experimental-libraries.cc
 	mkdir -p $(@D); cp $< $@
-$(gyp_intermediate_dir)/trig-table.cc: $(gyp_shared_intermediate_dir)/trig-table.cc
-	mkdir -p $(@D); cp $< $@
 LOCAL_GENERATED_SOURCES := \
 	$(gyp_intermediate_dir)/libraries.cc \
 	$(gyp_intermediate_dir)/experimental-libraries.cc \
-	$(gyp_intermediate_dir)/trig-table.cc \
 	$(gyp_intermediate_dir)/snapshot.cc
 
 GYP_COPIED_SOURCE_ORIGIN_DIRS := \
 	$(gyp_shared_intermediate_dir)
 
-LOCAL_SRC_FILES :=
+LOCAL_SRC_FILES := \
+	v8/src/snapshot-common.cc
 
 
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Debug := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -67,13 +62,13 @@
 	-Wno-unused-local-typedefs \
 	-Wno-format \
 	-fno-tree-sra \
+	-fno-caller-saves \
+	-Wno-psabi \
 	-fno-partial-inlining \
 	-fno-early-inlining \
 	-fno-tree-copy-prop \
 	-fno-tree-loop-optimize \
 	-fno-move-loop-invariants \
-	-fno-caller-saves \
-	-Wno-psabi \
 	-ffunction-sections \
 	-funwind-tables \
 	-g \
@@ -92,6 +87,7 @@
 	-Wno-sequence-point \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-fomit-frame-pointer \
@@ -99,7 +95,6 @@
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -116,12 +111,15 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_ARM' \
+	'-DCAN_USE_ARMV7_INSTRUCTIONS' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
@@ -130,8 +128,6 @@
 	'-DUSE_STLPORT=1' \
 	'-D_STLP_USE_PTR_SPECIALIZATIONS=1' \
 	'-DCHROME_BUILD_ID=""' \
-	'-DARM_TEST' \
-	'-DCAN_USE_ARMV7_INSTRUCTIONS=1' \
 	'-DDYNAMIC_ANNOTATIONS_ENABLED=1' \
 	'-DWTF_USE_DYNAMIC_ANNOTATIONS=1' \
 	'-D_DEBUG' \
@@ -156,23 +152,24 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
 	-Wno-deprecated \
 	-Wno-abi \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
 	-Wno-non-virtual-dtor \
 	-Wno-sign-promo \
 	-Wno-non-virtual-dtor
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -182,13 +179,13 @@
 	-Wno-unused-local-typedefs \
 	-Wno-format \
 	-fno-tree-sra \
+	-fno-caller-saves \
+	-Wno-psabi \
 	-fno-partial-inlining \
 	-fno-early-inlining \
 	-fno-tree-copy-prop \
 	-fno-tree-loop-optimize \
 	-fno-move-loop-invariants \
-	-fno-caller-saves \
-	-Wno-psabi \
 	-ffunction-sections \
 	-funwind-tables \
 	-g \
@@ -216,7 +213,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -233,12 +229,15 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_ARM' \
+	'-DCAN_USE_ARMV7_INSTRUCTIONS' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
 	'-DUSE_OPENSSL_CERTS=1' \
@@ -247,8 +246,6 @@
 	'-DUSE_STLPORT=1' \
 	'-D_STLP_USE_PTR_SPECIALIZATIONS=1' \
 	'-DCHROME_BUILD_ID=""' \
-	'-DARM_TEST' \
-	'-DCAN_USE_ARMV7_INSTRUCTIONS=1' \
 	'-DNDEBUG' \
 	'-DNVALGRIND' \
 	'-DDYNAMIC_ANNOTATIONS_ENABLED=0'
@@ -267,73 +264,33 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
 	-Wno-deprecated \
 	-Wno-abi \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
 	-Wno-non-virtual-dtor \
 	-Wno-sign-promo \
 	-Wno-non-virtual-dtor
 
 
-LOCAL_FDO_SUPPORT_Release := true
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
 LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
 LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
 ### Rules for final target.
 
-LOCAL_LDFLAGS_Debug := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-Wl,--fatal-warnings \
-	-Wl,-z,noexecstack \
-	-fPIC \
-	-Wl,-z,relro \
-	-Wl,-z,now \
-	-fuse-ld=gold \
-	-nostdlib \
-	-Wl,--no-undefined \
-	-Wl,--exclude-libs=ALL \
-	-Wl,--icf=safe \
-	-Wl,--warn-shared-textrel \
-	-Wl,-O1 \
-	-Wl,--as-needed
-
-
-LOCAL_LDFLAGS_Release := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-Wl,--fatal-warnings \
-	-Wl,-z,noexecstack \
-	-fPIC \
-	-Wl,-z,relro \
-	-Wl,-z,now \
-	-fuse-ld=gold \
-	-nostdlib \
-	-Wl,--no-undefined \
-	-Wl,--exclude-libs=ALL \
-	-Wl,--icf=safe \
-	-Wl,-O1 \
-	-Wl,--as-needed \
-	-Wl,--gc-sections \
-	-Wl,--warn-shared-textrel
-
-
-LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
-
-LOCAL_STATIC_LIBRARIES :=
-
-# Enable grouping to fix circular references
-LOCAL_GROUP_STATIC_LIBRARIES := true
-
 LOCAL_SHARED_LIBRARIES := \
 	libstlport \
 	libdl
 
+### Set directly by aosp_build_settings.
+LOCAL_FDO_SUPPORT := true
+
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
 gyp_all_modules: v8_tools_gyp_v8_snapshot_gyp
diff --git a/tools/gyp/v8_snapshot.target.linux-arm64.mk b/tools/gyp/v8_snapshot.target.linux-arm64.mk
index d3119a2..f7ecad5 100644
--- a/tools/gyp/v8_snapshot.target.linux-arm64.mk
+++ b/tools/gyp/v8_snapshot.target.linux-arm64.mk
@@ -5,7 +5,6 @@
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_MODULE := v8_tools_gyp_v8_snapshot_gyp
 LOCAL_MODULE_SUFFIX := .a
-LOCAL_MODULE_TAGS := optional
 LOCAL_MODULE_TARGET_ARCH := $(TARGET_$(GYP_VAR_PREFIX)ARCH)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_VAR_PREFIX))
 gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
@@ -13,8 +12,7 @@
 # Make sure our deps are built first.
 GYP_TARGET_DEPENDENCIES := \
 	$(gyp_shared_intermediate_dir)/mksnapshot \
-	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp \
-	$(call intermediates-dir-for,GYP,v8_tools_gyp_generate_trig_table_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/generate_trig_table.stamp
+	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp
 
 ### Rules for action "run_mksnapshot":
 $(gyp_intermediate_dir)/snapshot.cc: gyp_local_path := $(LOCAL_PATH)
@@ -39,24 +37,21 @@
 	mkdir -p $(@D); cp $< $@
 $(gyp_intermediate_dir)/experimental-libraries.cc: $(gyp_shared_intermediate_dir)/experimental-libraries.cc
 	mkdir -p $(@D); cp $< $@
-$(gyp_intermediate_dir)/trig-table.cc: $(gyp_shared_intermediate_dir)/trig-table.cc
-	mkdir -p $(@D); cp $< $@
 LOCAL_GENERATED_SOURCES := \
 	$(gyp_intermediate_dir)/libraries.cc \
 	$(gyp_intermediate_dir)/experimental-libraries.cc \
-	$(gyp_intermediate_dir)/trig-table.cc \
 	$(gyp_intermediate_dir)/snapshot.cc
 
 GYP_COPIED_SOURCE_ORIGIN_DIRS := \
 	$(gyp_shared_intermediate_dir)
 
-LOCAL_SRC_FILES :=
+LOCAL_SRC_FILES := \
+	v8/src/snapshot-common.cc
 
 
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Debug := \
 	--param=ssp-buffer-size=4 \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -82,13 +77,13 @@
 	-Wno-sequence-point \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-funwind-tables
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -105,11 +100,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_ARM64' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
@@ -143,21 +140,22 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
 	-Wno-deprecated \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
 	-Wno-non-virtual-dtor \
 	-Wno-sign-promo \
 	-Wno-non-virtual-dtor
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	--param=ssp-buffer-size=4 \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -191,7 +189,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -208,11 +205,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_ARM64' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
@@ -240,64 +239,32 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
 	-Wno-deprecated \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
 	-Wno-non-virtual-dtor \
 	-Wno-sign-promo \
 	-Wno-non-virtual-dtor
 
 
-LOCAL_FDO_SUPPORT_Release := true
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
 LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
 LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
 ### Rules for final target.
 
-LOCAL_LDFLAGS_Debug := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-Wl,--fatal-warnings \
-	-Wl,-z,noexecstack \
-	-fPIC \
-	-nostdlib \
-	-Wl,--no-undefined \
-	-Wl,--exclude-libs=ALL \
-	-Wl,--warn-shared-textrel \
-	-Wl,-O1 \
-	-Wl,--as-needed
-
-
-LOCAL_LDFLAGS_Release := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-Wl,--fatal-warnings \
-	-Wl,-z,noexecstack \
-	-fPIC \
-	-nostdlib \
-	-Wl,--no-undefined \
-	-Wl,--exclude-libs=ALL \
-	-Wl,-O1 \
-	-Wl,--as-needed \
-	-Wl,--gc-sections \
-	-Wl,--warn-shared-textrel
-
-
-LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
-
-LOCAL_STATIC_LIBRARIES :=
-
-# Enable grouping to fix circular references
-LOCAL_GROUP_STATIC_LIBRARIES := true
-
 LOCAL_SHARED_LIBRARIES := \
 	libstlport \
 	libdl
 
+### Set directly by aosp_build_settings.
+LOCAL_FDO_SUPPORT := true
+
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
 gyp_all_modules: v8_tools_gyp_v8_snapshot_gyp
diff --git a/tools/gyp/v8_snapshot.target.linux-mips.mk b/tools/gyp/v8_snapshot.target.linux-mips.mk
index 7486f13..94378e4 100644
--- a/tools/gyp/v8_snapshot.target.linux-mips.mk
+++ b/tools/gyp/v8_snapshot.target.linux-mips.mk
@@ -5,7 +5,6 @@
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_MODULE := v8_tools_gyp_v8_snapshot_gyp
 LOCAL_MODULE_SUFFIX := .a
-LOCAL_MODULE_TAGS := optional
 LOCAL_MODULE_TARGET_ARCH := $(TARGET_$(GYP_VAR_PREFIX)ARCH)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_VAR_PREFIX))
 gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
@@ -13,8 +12,7 @@
 # Make sure our deps are built first.
 GYP_TARGET_DEPENDENCIES := \
 	$(gyp_shared_intermediate_dir)/mksnapshot \
-	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp \
-	$(call intermediates-dir-for,GYP,v8_tools_gyp_generate_trig_table_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/generate_trig_table.stamp
+	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp
 
 ### Rules for action "run_mksnapshot":
 $(gyp_intermediate_dir)/snapshot.cc: gyp_local_path := $(LOCAL_PATH)
@@ -39,18 +37,16 @@
 	mkdir -p $(@D); cp $< $@
 $(gyp_intermediate_dir)/experimental-libraries.cc: $(gyp_shared_intermediate_dir)/experimental-libraries.cc
 	mkdir -p $(@D); cp $< $@
-$(gyp_intermediate_dir)/trig-table.cc: $(gyp_shared_intermediate_dir)/trig-table.cc
-	mkdir -p $(@D); cp $< $@
 LOCAL_GENERATED_SOURCES := \
 	$(gyp_intermediate_dir)/libraries.cc \
 	$(gyp_intermediate_dir)/experimental-libraries.cc \
-	$(gyp_intermediate_dir)/trig-table.cc \
 	$(gyp_intermediate_dir)/snapshot.cc
 
 GYP_COPIED_SOURCE_ORIGIN_DIRS := \
 	$(gyp_shared_intermediate_dir)
 
-LOCAL_SRC_FILES :=
+LOCAL_SRC_FILES := \
+	v8/src/snapshot-common.cc
 
 
 # Flags passed to both C and C++ files.
@@ -58,7 +54,6 @@
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	 \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -67,8 +62,6 @@
 	-fPIC \
 	-Wno-unused-local-typedefs \
 	-Wno-format \
-	-EL \
-	-mhard-float \
 	-ffunction-sections \
 	-funwind-tables \
 	-g \
@@ -87,6 +80,7 @@
 	-Wno-sequence-point \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-fomit-frame-pointer \
@@ -94,7 +88,6 @@
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -111,11 +104,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_MIPS' \
 	'-DCAN_USE_FPU_INSTRUCTIONS' \
 	'-D__mips_hard_float=1' \
@@ -152,24 +147,25 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
 	-Wno-deprecated \
 	-Wno-uninitialized \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
 	-Wno-non-virtual-dtor \
 	-Wno-sign-promo \
 	-Wno-non-virtual-dtor
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
 	 \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -178,8 +174,6 @@
 	-fPIC \
 	-Wno-unused-local-typedefs \
 	-Wno-format \
-	-EL \
-	-mhard-float \
 	-ffunction-sections \
 	-funwind-tables \
 	-g \
@@ -207,7 +201,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -224,11 +217,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_MIPS' \
 	'-DCAN_USE_FPU_INSTRUCTIONS' \
 	'-D__mips_hard_float=1' \
@@ -259,69 +254,33 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
 	-Wno-deprecated \
 	-Wno-uninitialized \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
 	-Wno-non-virtual-dtor \
 	-Wno-sign-promo \
 	-Wno-non-virtual-dtor
 
 
-LOCAL_FDO_SUPPORT_Release := true
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
 LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
 LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
 ### Rules for final target.
 
-LOCAL_LDFLAGS_Debug := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-Wl,--fatal-warnings \
-	-Wl,-z,noexecstack \
-	-fPIC \
-	-EL \
-	-Wl,--no-keep-memory \
-	-nostdlib \
-	-Wl,--no-undefined \
-	-Wl,--exclude-libs=ALL \
-	-Wl,--warn-shared-textrel \
-	-Wl,-O1 \
-	-Wl,--as-needed
-
-
-LOCAL_LDFLAGS_Release := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-Wl,--fatal-warnings \
-	-Wl,-z,noexecstack \
-	-fPIC \
-	-EL \
-	-Wl,--no-keep-memory \
-	-nostdlib \
-	-Wl,--no-undefined \
-	-Wl,--exclude-libs=ALL \
-	-Wl,-O1 \
-	-Wl,--as-needed \
-	-Wl,--gc-sections \
-	-Wl,--warn-shared-textrel
-
-
-LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
-
-LOCAL_STATIC_LIBRARIES :=
-
-# Enable grouping to fix circular references
-LOCAL_GROUP_STATIC_LIBRARIES := true
-
 LOCAL_SHARED_LIBRARIES := \
 	libstlport \
 	libdl
 
+### Set directly by aosp_build_settings.
+LOCAL_FDO_SUPPORT := true
+
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
 gyp_all_modules: v8_tools_gyp_v8_snapshot_gyp
diff --git a/tools/gyp/v8_snapshot.target.linux-mips64.mk b/tools/gyp/v8_snapshot.target.linux-mips64.mk
new file mode 100644
index 0000000..ac4a2ab
--- /dev/null
+++ b/tools/gyp/v8_snapshot.target.linux-mips64.mk
@@ -0,0 +1,289 @@
+# This file is generated by gyp; do not edit.
+
+include $(CLEAR_VARS)
+
+LOCAL_MODULE_CLASS := STATIC_LIBRARIES
+LOCAL_MODULE := v8_tools_gyp_v8_snapshot_gyp
+LOCAL_MODULE_SUFFIX := .a
+LOCAL_MODULE_TARGET_ARCH := $(TARGET_$(GYP_VAR_PREFIX)ARCH)
+gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_VAR_PREFIX))
+gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
+
+# Make sure our deps are built first.
+GYP_TARGET_DEPENDENCIES := \
+	$(gyp_shared_intermediate_dir)/mksnapshot \
+	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp
+
+### Rules for action "run_mksnapshot":
+$(gyp_intermediate_dir)/snapshot.cc: gyp_local_path := $(LOCAL_PATH)
+$(gyp_intermediate_dir)/snapshot.cc: gyp_var_prefix := $(GYP_VAR_PREFIX)
+$(gyp_intermediate_dir)/snapshot.cc: gyp_intermediate_dir := $(abspath $(gyp_intermediate_dir))
+$(gyp_intermediate_dir)/snapshot.cc: gyp_shared_intermediate_dir := $(abspath $(gyp_shared_intermediate_dir))
+$(gyp_intermediate_dir)/snapshot.cc: export PATH := $(subst $(ANDROID_BUILD_PATHS),,$(PATH))
+$(gyp_intermediate_dir)/snapshot.cc: $(gyp_shared_intermediate_dir)/mksnapshot $(GYP_TARGET_DEPENDENCIES)
+	@echo "Gyp action: v8_tools_gyp_v8_gyp_v8_snapshot_target_run_mksnapshot ($@)"
+	$(hide)cd $(gyp_local_path)/v8/tools/gyp; mkdir -p $(gyp_intermediate_dir); "$(gyp_shared_intermediate_dir)/mksnapshot" --log-snapshot-positions --logfile "$(gyp_intermediate_dir)/snapshot.log" --random-seed 314159265 "$(gyp_intermediate_dir)/snapshot.cc"
+
+
+
+GYP_GENERATED_OUTPUTS := \
+	$(gyp_intermediate_dir)/snapshot.cc
+
+# Make sure our deps and generated files are built first.
+LOCAL_ADDITIONAL_DEPENDENCIES := $(GYP_TARGET_DEPENDENCIES) $(GYP_GENERATED_OUTPUTS)
+
+LOCAL_CPP_EXTENSION := .cc
+$(gyp_intermediate_dir)/libraries.cc: $(gyp_shared_intermediate_dir)/libraries.cc
+	mkdir -p $(@D); cp $< $@
+$(gyp_intermediate_dir)/experimental-libraries.cc: $(gyp_shared_intermediate_dir)/experimental-libraries.cc
+	mkdir -p $(@D); cp $< $@
+LOCAL_GENERATED_SOURCES := \
+	$(gyp_intermediate_dir)/libraries.cc \
+	$(gyp_intermediate_dir)/experimental-libraries.cc \
+	$(gyp_intermediate_dir)/snapshot.cc
+
+GYP_COPIED_SOURCE_ORIGIN_DIRS := \
+	$(gyp_shared_intermediate_dir)
+
+LOCAL_SRC_FILES := \
+	v8/src/snapshot-common.cc
+
+
+# Flags passed to both C and C++ files.
+MY_CFLAGS_Debug := \
+	-fstack-protector \
+	--param=ssp-buffer-size=4 \
+	 \
+	-fno-strict-aliasing \
+	-Wno-unused-parameter \
+	-Wno-missing-field-initializers \
+	-fvisibility=hidden \
+	-pipe \
+	-fPIC \
+	-Wno-unused-local-typedefs \
+	-Wno-format \
+	-ffunction-sections \
+	-funwind-tables \
+	-g \
+	-fstack-protector \
+	-fno-short-enums \
+	-finline-limit=64 \
+	-Wa,--noexecstack \
+	-U_FORTIFY_SOURCE \
+	-Wno-extra \
+	-Wno-ignored-qualifiers \
+	-Wno-type-limits \
+	-Wno-unused-but-set-variable \
+	-Wno-address \
+	-Wno-format-security \
+	-Wno-return-type \
+	-Wno-sequence-point \
+	-Os \
+	-g \
+	-gdwarf-4 \
+	-fdata-sections \
+	-ffunction-sections \
+	-fomit-frame-pointer \
+	-funwind-tables
+
+MY_DEFS_Debug := \
+	'-DV8_DEPRECATION_WARNINGS' \
+	'-D_FILE_OFFSET_BITS=64' \
+	'-DNO_TCMALLOC' \
+	'-DDISABLE_NACL' \
+	'-DCHROMIUM_BUILD' \
+	'-DUSE_LIBJPEG_TURBO=1' \
+	'-DENABLE_WEBRTC=1' \
+	'-DUSE_PROPRIETARY_CODECS' \
+	'-DENABLE_BROWSER_CDMS' \
+	'-DENABLE_CONFIGURATION_POLICY' \
+	'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
+	'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
+	'-DENABLE_EGLIMAGE=1' \
+	'-DCLD_VERSION=1' \
+	'-DENABLE_PRINTING=1' \
+	'-DENABLE_MANAGED_USERS=1' \
+	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
+	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
+	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
+	'-DV8_TARGET_ARCH_MIPS64' \
+	'-DCAN_USE_FPU_INSTRUCTIONS' \
+	'-D__mips_hard_float=1' \
+	'-D_MIPS_ARCH_MIPS64R2' \
+	'-DV8_I18N_SUPPORT' \
+	'-DUSE_OPENSSL=1' \
+	'-DUSE_OPENSSL_CERTS=1' \
+	'-DANDROID' \
+	'-D__GNU_SOURCE=1' \
+	'-DUSE_STLPORT=1' \
+	'-D_STLP_USE_PTR_SPECIALIZATIONS=1' \
+	'-DCHROME_BUILD_ID=""' \
+	'-DDYNAMIC_ANNOTATIONS_ENABLED=1' \
+	'-DWTF_USE_DYNAMIC_ANNOTATIONS=1' \
+	'-D_DEBUG' \
+	'-DENABLE_DISASSEMBLER' \
+	'-DV8_ENABLE_CHECKS' \
+	'-DOBJECT_PRINT' \
+	'-DVERIFY_HEAP' \
+	'-DENABLE_EXTRA_CHECKS' \
+	'-DENABLE_HANDLE_ZAPPING'
+
+
+# Include paths placed before CFLAGS/CPPFLAGS
+LOCAL_C_INCLUDES_Debug := \
+	$(gyp_shared_intermediate_dir)/shim_headers/icuuc/target \
+	$(gyp_shared_intermediate_dir)/shim_headers/icui18n/target \
+	$(LOCAL_PATH)/v8 \
+	$(gyp_shared_intermediate_dir) \
+	$(PWD)/frameworks/wilhelm/include \
+	$(PWD)/bionic \
+	$(PWD)/external/stlport/stlport
+
+
+# Flags passed to only C++ (and not C) files.
+LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
+	-fno-rtti \
+	-fno-threadsafe-statics \
+	-fvisibility-inlines-hidden \
+	-Wno-deprecated \
+	-Wno-uninitialized \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
+	-Wno-non-virtual-dtor \
+	-Wno-sign-promo \
+	-Wno-non-virtual-dtor
+
+
+# Flags passed to both C and C++ files.
+MY_CFLAGS_Release := \
+	-fstack-protector \
+	--param=ssp-buffer-size=4 \
+	 \
+	-fno-strict-aliasing \
+	-Wno-unused-parameter \
+	-Wno-missing-field-initializers \
+	-fvisibility=hidden \
+	-pipe \
+	-fPIC \
+	-Wno-unused-local-typedefs \
+	-Wno-format \
+	-ffunction-sections \
+	-funwind-tables \
+	-g \
+	-fstack-protector \
+	-fno-short-enums \
+	-finline-limit=64 \
+	-Wa,--noexecstack \
+	-U_FORTIFY_SOURCE \
+	-Wno-extra \
+	-Wno-ignored-qualifiers \
+	-Wno-type-limits \
+	-Wno-unused-but-set-variable \
+	-Wno-address \
+	-Wno-format-security \
+	-Wno-return-type \
+	-Wno-sequence-point \
+	-fno-ident \
+	-fdata-sections \
+	-ffunction-sections \
+	-fomit-frame-pointer \
+	-funwind-tables \
+	-fdata-sections \
+	-ffunction-sections \
+	-O2
+
+MY_DEFS_Release := \
+	'-DV8_DEPRECATION_WARNINGS' \
+	'-D_FILE_OFFSET_BITS=64' \
+	'-DNO_TCMALLOC' \
+	'-DDISABLE_NACL' \
+	'-DCHROMIUM_BUILD' \
+	'-DUSE_LIBJPEG_TURBO=1' \
+	'-DENABLE_WEBRTC=1' \
+	'-DUSE_PROPRIETARY_CODECS' \
+	'-DENABLE_BROWSER_CDMS' \
+	'-DENABLE_CONFIGURATION_POLICY' \
+	'-DDISCARDABLE_MEMORY_ALWAYS_SUPPORTED_NATIVELY' \
+	'-DSYSTEM_NATIVELY_SIGNALS_MEMORY_PRESSURE' \
+	'-DENABLE_EGLIMAGE=1' \
+	'-DCLD_VERSION=1' \
+	'-DENABLE_PRINTING=1' \
+	'-DENABLE_MANAGED_USERS=1' \
+	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
+	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
+	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
+	'-DV8_TARGET_ARCH_MIPS64' \
+	'-DCAN_USE_FPU_INSTRUCTIONS' \
+	'-D__mips_hard_float=1' \
+	'-D_MIPS_ARCH_MIPS64R2' \
+	'-DV8_I18N_SUPPORT' \
+	'-DUSE_OPENSSL=1' \
+	'-DUSE_OPENSSL_CERTS=1' \
+	'-DANDROID' \
+	'-D__GNU_SOURCE=1' \
+	'-DUSE_STLPORT=1' \
+	'-D_STLP_USE_PTR_SPECIALIZATIONS=1' \
+	'-DCHROME_BUILD_ID=""' \
+	'-DNDEBUG' \
+	'-DNVALGRIND' \
+	'-DDYNAMIC_ANNOTATIONS_ENABLED=0'
+
+
+# Include paths placed before CFLAGS/CPPFLAGS
+LOCAL_C_INCLUDES_Release := \
+	$(gyp_shared_intermediate_dir)/shim_headers/icuuc/target \
+	$(gyp_shared_intermediate_dir)/shim_headers/icui18n/target \
+	$(LOCAL_PATH)/v8 \
+	$(gyp_shared_intermediate_dir) \
+	$(PWD)/frameworks/wilhelm/include \
+	$(PWD)/bionic \
+	$(PWD)/external/stlport/stlport
+
+
+# Flags passed to only C++ (and not C) files.
+LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
+	-fno-rtti \
+	-fno-threadsafe-statics \
+	-fvisibility-inlines-hidden \
+	-Wno-deprecated \
+	-Wno-uninitialized \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
+	-Wno-non-virtual-dtor \
+	-Wno-sign-promo \
+	-Wno-non-virtual-dtor
+
+
+LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
+LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
+LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
+LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
+### Rules for final target.
+
+LOCAL_SHARED_LIBRARIES := \
+	libstlport \
+	libdl
+
+# Add target alias to "gyp_all_modules" target.
+.PHONY: gyp_all_modules
+gyp_all_modules: v8_tools_gyp_v8_snapshot_gyp
+
+# Alias gyp target name.
+.PHONY: v8_snapshot
+v8_snapshot: v8_tools_gyp_v8_snapshot_gyp
+
+include $(BUILD_STATIC_LIBRARY)
diff --git a/tools/gyp/v8_snapshot.target.linux-x86.mk b/tools/gyp/v8_snapshot.target.linux-x86.mk
index 4cd81bf..8c7b34f 100644
--- a/tools/gyp/v8_snapshot.target.linux-x86.mk
+++ b/tools/gyp/v8_snapshot.target.linux-x86.mk
@@ -5,7 +5,6 @@
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_MODULE := v8_tools_gyp_v8_snapshot_gyp
 LOCAL_MODULE_SUFFIX := .a
-LOCAL_MODULE_TAGS := optional
 LOCAL_MODULE_TARGET_ARCH := $(TARGET_$(GYP_VAR_PREFIX)ARCH)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_VAR_PREFIX))
 gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
@@ -13,8 +12,7 @@
 # Make sure our deps are built first.
 GYP_TARGET_DEPENDENCIES := \
 	$(gyp_shared_intermediate_dir)/mksnapshot \
-	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp \
-	$(call intermediates-dir-for,GYP,v8_tools_gyp_generate_trig_table_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/generate_trig_table.stamp
+	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp
 
 ### Rules for action "run_mksnapshot":
 $(gyp_intermediate_dir)/snapshot.cc: gyp_local_path := $(LOCAL_PATH)
@@ -39,24 +37,21 @@
 	mkdir -p $(@D); cp $< $@
 $(gyp_intermediate_dir)/experimental-libraries.cc: $(gyp_shared_intermediate_dir)/experimental-libraries.cc
 	mkdir -p $(@D); cp $< $@
-$(gyp_intermediate_dir)/trig-table.cc: $(gyp_shared_intermediate_dir)/trig-table.cc
-	mkdir -p $(@D); cp $< $@
 LOCAL_GENERATED_SOURCES := \
 	$(gyp_intermediate_dir)/libraries.cc \
 	$(gyp_intermediate_dir)/experimental-libraries.cc \
-	$(gyp_intermediate_dir)/trig-table.cc \
 	$(gyp_intermediate_dir)/snapshot.cc
 
 GYP_COPIED_SOURCE_ORIGIN_DIRS := \
 	$(gyp_shared_intermediate_dir)
 
-LOCAL_SRC_FILES :=
+LOCAL_SRC_FILES := \
+	v8/src/snapshot-common.cc
 
 
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Debug := \
 	--param=ssp-buffer-size=4 \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -85,8 +80,10 @@
 	-Wno-format-security \
 	-Wno-return-type \
 	-Wno-sequence-point \
+	-m32 \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-fomit-frame-pointer \
@@ -94,7 +91,6 @@
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -111,11 +107,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_IA32' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
@@ -149,21 +147,22 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
 	-Wno-deprecated \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
 	-Wno-non-virtual-dtor \
 	-Wno-sign-promo \
 	-Wno-non-virtual-dtor
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	--param=ssp-buffer-size=4 \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -192,6 +191,7 @@
 	-Wno-format-security \
 	-Wno-return-type \
 	-Wno-sequence-point \
+	-m32 \
 	-fno-ident \
 	-fdata-sections \
 	-ffunction-sections \
@@ -203,7 +203,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -220,11 +219,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_IA32' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
@@ -252,68 +253,32 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
 	-Wno-deprecated \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
 	-Wno-non-virtual-dtor \
 	-Wno-sign-promo \
 	-Wno-non-virtual-dtor
 
 
-LOCAL_FDO_SUPPORT_Release := true
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
 LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
 LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
 ### Rules for final target.
 
-LOCAL_LDFLAGS_Debug := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-Wl,--fatal-warnings \
-	-Wl,-z,noexecstack \
-	-fPIC \
-	-m32 \
-	-fuse-ld=gold \
-	-nostdlib \
-	-Wl,--no-undefined \
-	-Wl,--exclude-libs=ALL \
-	-Wl,--warn-shared-textrel \
-	-Wl,-O1 \
-	-Wl,--as-needed
-
-
-LOCAL_LDFLAGS_Release := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-Wl,--fatal-warnings \
-	-Wl,-z,noexecstack \
-	-fPIC \
-	-m32 \
-	-fuse-ld=gold \
-	-nostdlib \
-	-Wl,--no-undefined \
-	-Wl,--exclude-libs=ALL \
-	-Wl,-O1 \
-	-Wl,--as-needed \
-	-Wl,--gc-sections \
-	-Wl,--warn-shared-textrel
-
-
-LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
-
-LOCAL_STATIC_LIBRARIES :=
-
-# Enable grouping to fix circular references
-LOCAL_GROUP_STATIC_LIBRARIES := true
-
 LOCAL_SHARED_LIBRARIES := \
 	libstlport \
 	libdl
 
+### Set directly by aosp_build_settings.
+LOCAL_FDO_SUPPORT := true
+
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
 gyp_all_modules: v8_tools_gyp_v8_snapshot_gyp
diff --git a/tools/gyp/v8_snapshot.target.linux-x86_64.mk b/tools/gyp/v8_snapshot.target.linux-x86_64.mk
index a232826..75a1037 100644
--- a/tools/gyp/v8_snapshot.target.linux-x86_64.mk
+++ b/tools/gyp/v8_snapshot.target.linux-x86_64.mk
@@ -5,7 +5,6 @@
 LOCAL_MODULE_CLASS := STATIC_LIBRARIES
 LOCAL_MODULE := v8_tools_gyp_v8_snapshot_gyp
 LOCAL_MODULE_SUFFIX := .a
-LOCAL_MODULE_TAGS := optional
 LOCAL_MODULE_TARGET_ARCH := $(TARGET_$(GYP_VAR_PREFIX)ARCH)
 gyp_intermediate_dir := $(call local-intermediates-dir,,$(GYP_VAR_PREFIX))
 gyp_shared_intermediate_dir := $(call intermediates-dir-for,GYP,shared,,,$(GYP_VAR_PREFIX))
@@ -13,8 +12,7 @@
 # Make sure our deps are built first.
 GYP_TARGET_DEPENDENCIES := \
 	$(gyp_shared_intermediate_dir)/mksnapshot \
-	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp \
-	$(call intermediates-dir-for,GYP,v8_tools_gyp_generate_trig_table_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/generate_trig_table.stamp
+	$(call intermediates-dir-for,GYP,v8_tools_gyp_js2c_$(TARGET_$(GYP_VAR_PREFIX)ARCH)_host_gyp,true,,$(GYP_HOST_VAR_PREFIX))/js2c.stamp
 
 ### Rules for action "run_mksnapshot":
 $(gyp_intermediate_dir)/snapshot.cc: gyp_local_path := $(LOCAL_PATH)
@@ -39,25 +37,22 @@
 	mkdir -p $(@D); cp $< $@
 $(gyp_intermediate_dir)/experimental-libraries.cc: $(gyp_shared_intermediate_dir)/experimental-libraries.cc
 	mkdir -p $(@D); cp $< $@
-$(gyp_intermediate_dir)/trig-table.cc: $(gyp_shared_intermediate_dir)/trig-table.cc
-	mkdir -p $(@D); cp $< $@
 LOCAL_GENERATED_SOURCES := \
 	$(gyp_intermediate_dir)/libraries.cc \
 	$(gyp_intermediate_dir)/experimental-libraries.cc \
-	$(gyp_intermediate_dir)/trig-table.cc \
 	$(gyp_intermediate_dir)/snapshot.cc
 
 GYP_COPIED_SOURCE_ORIGIN_DIRS := \
 	$(gyp_shared_intermediate_dir)
 
-LOCAL_SRC_FILES :=
+LOCAL_SRC_FILES := \
+	v8/src/snapshot-common.cc
 
 
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Debug := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -84,8 +79,10 @@
 	-Wno-format-security \
 	-Wno-return-type \
 	-Wno-sequence-point \
+	-m64 \
 	-Os \
 	-g \
+	-gdwarf-4 \
 	-fdata-sections \
 	-ffunction-sections \
 	-fomit-frame-pointer \
@@ -93,7 +90,6 @@
 
 MY_DEFS_Debug := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -110,11 +106,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_X64' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
@@ -148,22 +146,23 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Debug := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
 	-Wno-deprecated \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
 	-Wno-non-virtual-dtor \
 	-Wno-sign-promo \
 	-Wno-non-virtual-dtor
 
 
-LOCAL_FDO_SUPPORT_Debug := false
-
 # Flags passed to both C and C++ files.
 MY_CFLAGS_Release := \
 	-fstack-protector \
 	--param=ssp-buffer-size=4 \
-	-fno-exceptions \
 	-fno-strict-aliasing \
 	-Wno-unused-parameter \
 	-Wno-missing-field-initializers \
@@ -190,6 +189,7 @@
 	-Wno-format-security \
 	-Wno-return-type \
 	-Wno-sequence-point \
+	-m64 \
 	-fno-ident \
 	-fdata-sections \
 	-ffunction-sections \
@@ -201,7 +201,6 @@
 
 MY_DEFS_Release := \
 	'-DV8_DEPRECATION_WARNINGS' \
-	'-DBLINK_SCALE_FILTERS_AT_RECORD_TIME' \
 	'-D_FILE_OFFSET_BITS=64' \
 	'-DNO_TCMALLOC' \
 	'-DDISABLE_NACL' \
@@ -218,11 +217,13 @@
 	'-DENABLE_PRINTING=1' \
 	'-DENABLE_MANAGED_USERS=1' \
 	'-DDATA_REDUCTION_FALLBACK_HOST="http://compress.googlezip.net:80/"' \
-	'-DDATA_REDUCTION_DEV_HOST="http://proxy-dev.googlezip.net:80/"' \
+	'-DDATA_REDUCTION_DEV_HOST="https://proxy-dev.googlezip.net:443/"' \
+	'-DDATA_REDUCTION_DEV_FALLBACK_HOST="http://proxy-dev.googlezip.net:80/"' \
 	'-DSPDY_PROXY_AUTH_ORIGIN="https://proxy.googlezip.net:443/"' \
 	'-DDATA_REDUCTION_PROXY_PROBE_URL="http://check.googlezip.net/connect"' \
 	'-DDATA_REDUCTION_PROXY_WARMUP_URL="http://www.gstatic.com/generate_204"' \
 	'-DVIDEO_HOLE=1' \
+	'-DENABLE_LOAD_COMPLETION_HACKS=1' \
 	'-DV8_TARGET_ARCH_X64' \
 	'-DV8_I18N_SUPPORT' \
 	'-DUSE_OPENSSL=1' \
@@ -250,68 +251,32 @@
 
 # Flags passed to only C++ (and not C) files.
 LOCAL_CPPFLAGS_Release := \
+	-fno-exceptions \
 	-fno-rtti \
 	-fno-threadsafe-statics \
 	-fvisibility-inlines-hidden \
 	-Wno-deprecated \
+	-std=gnu++11 \
+	-Wno-narrowing \
+	-Wno-literal-suffix \
 	-Wno-non-virtual-dtor \
 	-Wno-sign-promo \
 	-Wno-non-virtual-dtor
 
 
-LOCAL_FDO_SUPPORT_Release := true
-
 LOCAL_CFLAGS := $(MY_CFLAGS_$(GYP_CONFIGURATION)) $(MY_DEFS_$(GYP_CONFIGURATION))
-LOCAL_FDO_SUPPORT := $(LOCAL_FDO_SUPPORT_$(GYP_CONFIGURATION))
 LOCAL_C_INCLUDES := $(GYP_COPIED_SOURCE_ORIGIN_DIRS) $(LOCAL_C_INCLUDES_$(GYP_CONFIGURATION))
 LOCAL_CPPFLAGS := $(LOCAL_CPPFLAGS_$(GYP_CONFIGURATION))
 LOCAL_ASFLAGS := $(LOCAL_CFLAGS)
 ### Rules for final target.
 
-LOCAL_LDFLAGS_Debug := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-Wl,--fatal-warnings \
-	-Wl,-z,noexecstack \
-	-fPIC \
-	-m64 \
-	-fuse-ld=gold \
-	-nostdlib \
-	-Wl,--no-undefined \
-	-Wl,--exclude-libs=ALL \
-	-Wl,--warn-shared-textrel \
-	-Wl,-O1 \
-	-Wl,--as-needed
-
-
-LOCAL_LDFLAGS_Release := \
-	-Wl,-z,now \
-	-Wl,-z,relro \
-	-Wl,--fatal-warnings \
-	-Wl,-z,noexecstack \
-	-fPIC \
-	-m64 \
-	-fuse-ld=gold \
-	-nostdlib \
-	-Wl,--no-undefined \
-	-Wl,--exclude-libs=ALL \
-	-Wl,-O1 \
-	-Wl,--as-needed \
-	-Wl,--gc-sections \
-	-Wl,--warn-shared-textrel
-
-
-LOCAL_LDFLAGS := $(LOCAL_LDFLAGS_$(GYP_CONFIGURATION))
-
-LOCAL_STATIC_LIBRARIES :=
-
-# Enable grouping to fix circular references
-LOCAL_GROUP_STATIC_LIBRARIES := true
-
 LOCAL_SHARED_LIBRARIES := \
 	libstlport \
 	libdl
 
+### Set directly by aosp_build_settings.
+LOCAL_FDO_SUPPORT := true
+
 # Add target alias to "gyp_all_modules" target.
 .PHONY: gyp_all_modules
 gyp_all_modules: v8_tools_gyp_v8_snapshot_gyp
diff --git a/tools/js2c.py b/tools/js2c.py
index bda4656..77485f6 100755
--- a/tools/js2c.py
+++ b/tools/js2c.py
@@ -218,6 +218,27 @@
     lines = ExpandMacroDefinition(lines, pos, name_pattern, macro, non_expander)
 
 
+INLINE_CONSTANT_PATTERN = re.compile(r'const\s+([a-zA-Z0-9_]+)\s*=\s*([^;\n]+)[;\n]')
+
+def ExpandInlineConstants(lines):
+  pos = 0
+  while True:
+    const_match = INLINE_CONSTANT_PATTERN.search(lines, pos)
+    if const_match is None:
+      # no more constants
+      return lines
+    name = const_match.group(1)
+    replacement = const_match.group(2)
+    name_pattern = re.compile("\\b%s\\b" % name)
+
+    # remove constant definition and replace
+    lines = (lines[:const_match.start()] +
+             re.sub(name_pattern, replacement, lines[const_match.end():]))
+
+    # advance position to where the constant defintion was
+    pos = const_match.start()
+
+
 HEADER_TEMPLATE = """\
 // Copyright 2011 Google Inc. All Rights Reserved.
 
@@ -276,7 +297,7 @@
 
   template <>
   void NativesCollection<%(type)s>::SetRawScriptsSource(Vector<const char> raw_source) {
-    ASSERT(%(raw_total_length)i == raw_source.length());
+    DCHECK(%(raw_total_length)i == raw_source.length());
     raw_sources = raw_source.start();
   }
 
@@ -333,6 +354,7 @@
   filter_chain.extend([
     RemoveCommentsAndTrailingWhitespace,
     ExpandInlineMacros,
+    ExpandInlineConstants,
     Validate,
     jsmin.JavaScriptMinifier().JSMinify
   ])
@@ -397,7 +419,7 @@
   return result
 
 
-def BuildMetadata(sources, source_bytes, native_type, omit):
+def BuildMetadata(sources, source_bytes, native_type):
   """Build the meta data required to generate a libaries file.
 
   Args:
@@ -405,7 +427,6 @@
     source_bytes: A list of source bytes.
         (The concatenation of all sources; might be compressed.)
     native_type: The parameter for the NativesCollection template.
-    omit: bool, whether we should omit the sources in the output.
 
   Returns:
     A dictionary for use with HEADER_TEMPLATE.
@@ -438,7 +459,7 @@
   assert offset == len(raw_sources)
 
   # If we have the raw sources we can declare them accordingly.
-  have_raw_sources = source_bytes == raw_sources and not omit
+  have_raw_sources = source_bytes == raw_sources
   raw_sources_declaration = (RAW_SOURCES_DECLARATION
       if have_raw_sources else RAW_SOURCES_COMPRESSION_DECLARATION)
 
@@ -446,7 +467,6 @@
     "builtin_count": len(sources.modules),
     "debugger_count": sum(sources.is_debugger_id),
     "sources_declaration": SOURCES_DECLARATION % ToCArray(source_bytes),
-    "sources_data": ToCArray(source_bytes) if not omit else "",
     "raw_sources_declaration": raw_sources_declaration,
     "raw_total_length": sum(map(len, sources.modules)),
     "total_length": total_length,
@@ -477,10 +497,51 @@
     raise Error("Unknown compression type %s." % compression_type)
 
 
-def JS2C(source, target, native_type, compression_type, raw_file, omit):
+def PutInt(blob_file, value):
+  assert(value >= 0 and value < (1 << 20))
+  size = 1 if (value < 1 << 6) else (2 if (value < 1 << 14) else 3)
+  value_with_length = (value << 2) | size
+
+  byte_sequence = bytearray()
+  for i in xrange(size):
+    byte_sequence.append(value_with_length & 255)
+    value_with_length >>= 8;
+  blob_file.write(byte_sequence)
+
+
+def PutStr(blob_file, value):
+  PutInt(blob_file, len(value));
+  blob_file.write(value);
+
+
+def WriteStartupBlob(sources, startup_blob):
+  """Write a startup blob, as expected by V8 Initialize ...
+    TODO(vogelheim): Add proper method name.
+
+  Args:
+    sources: A Sources instance with the prepared sources.
+    startup_blob_file: Name of file to write the blob to.
+  """
+  output = open(startup_blob, "wb")
+
+  debug_sources = sum(sources.is_debugger_id);
+  PutInt(output, debug_sources)
+  for i in xrange(debug_sources):
+    PutStr(output, sources.names[i]);
+    PutStr(output, sources.modules[i]);
+
+  PutInt(output, len(sources.names) - debug_sources)
+  for i in xrange(debug_sources, len(sources.names)):
+    PutStr(output, sources.names[i]);
+    PutStr(output, sources.modules[i]);
+
+  output.close()
+
+
+def JS2C(source, target, native_type, compression_type, raw_file, startup_blob):
   sources = PrepareSources(source)
   sources_bytes = CompressMaybe(sources, compression_type)
-  metadata = BuildMetadata(sources, sources_bytes, native_type, omit)
+  metadata = BuildMetadata(sources, sources_bytes, native_type)
 
   # Optionally emit raw file.
   if raw_file:
@@ -488,6 +549,9 @@
     output.write(sources_bytes)
     output.close()
 
+  if startup_blob:
+    WriteStartupBlob(sources, startup_blob);
+
   # Emit resulting source file.
   output = open(target, "w")
   output.write(HEADER_TEMPLATE % metadata)
@@ -497,9 +561,9 @@
 def main():
   parser = optparse.OptionParser()
   parser.add_option("--raw", action="store",
-                      help="file to write the processed sources array to.")
-  parser.add_option("--omit", dest="omit", action="store_true",
-                    help="Omit the raw sources from the generated code.")
+                    help="file to write the processed sources array to.")
+  parser.add_option("--startup_blob", action="store",
+                    help="file to write the startup blob to.")
   parser.set_usage("""js2c out.cc type compression sources.js ...
       out.cc: C code to be generated.
       type: type parameter for NativesCollection template.
@@ -507,7 +571,7 @@
       sources.js: JS internal sources or macros.py.""")
   (options, args) = parser.parse_args()
 
-  JS2C(args[3:], args[0], args[1], args[2], options.raw, options.omit)
+  JS2C(args[3:], args[0], args[1], args[2], options.raw, options.startup_blob)
 
 
 if __name__ == "__main__":
diff --git a/tools/lexer-shell.cc b/tools/lexer-shell.cc
index 6ef8b0b..f8ddc02 100644
--- a/tools/lexer-shell.cc
+++ b/tools/lexer-shell.cc
@@ -33,9 +33,10 @@
 #include <vector>
 #include "src/v8.h"
 
+#include "include/libplatform/libplatform.h"
 #include "src/api.h"
+#include "src/base/platform/platform.h"
 #include "src/messages.h"
-#include "src/platform.h"
 #include "src/runtime.h"
 #include "src/scanner-character-streams.h"
 #include "src/scopeinfo.h"
@@ -52,7 +53,7 @@
   BaselineScanner(const char* fname,
                   Isolate* isolate,
                   Encoding encoding,
-                  ElapsedTimer* timer,
+                  v8::base::ElapsedTimer* timer,
                   int repeat)
       : stream_(NULL) {
     int length = 0;
@@ -127,13 +128,11 @@
 };
 
 
-TimeDelta RunBaselineScanner(const char* fname,
-                             Isolate* isolate,
-                             Encoding encoding,
-                             bool dump_tokens,
-                             std::vector<TokenWithLocation>* tokens,
-                             int repeat) {
-  ElapsedTimer timer;
+v8::base::TimeDelta RunBaselineScanner(const char* fname, Isolate* isolate,
+                                       Encoding encoding, bool dump_tokens,
+                                       std::vector<TokenWithLocation>* tokens,
+                                       int repeat) {
+  v8::base::ElapsedTimer timer;
   BaselineScanner scanner(fname, isolate, encoding, &timer, repeat);
   Token::Value token;
   int beg, end;
@@ -158,7 +157,7 @@
 }
 
 
-TimeDelta ProcessFile(
+v8::base::TimeDelta ProcessFile(
     const char* fname,
     Encoding encoding,
     Isolate* isolate,
@@ -169,7 +168,7 @@
   }
   HandleScope handle_scope(isolate);
   std::vector<TokenWithLocation> baseline_tokens;
-  TimeDelta baseline_time;
+  v8::base::TimeDelta baseline_time;
   baseline_time = RunBaselineScanner(
       fname, isolate, encoding, print_tokens,
       &baseline_tokens, repeat);
@@ -181,8 +180,11 @@
 
 
 int main(int argc, char* argv[]) {
-  v8::V8::InitializeICU();
   v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
+  v8::V8::InitializeICU();
+  v8::Platform* platform = v8::platform::CreateDefaultPlatform();
+  v8::V8::InitializePlatform(platform);
+  v8::V8::Initialize();
   Encoding encoding = LATIN1;
   bool print_tokens = false;
   std::vector<std::string> fnames;
@@ -212,12 +214,12 @@
     v8::HandleScope handle_scope(isolate);
     v8::Handle<v8::ObjectTemplate> global = v8::ObjectTemplate::New(isolate);
     v8::Local<v8::Context> context = v8::Context::New(isolate, NULL, global);
-    ASSERT(!context.IsEmpty());
+    DCHECK(!context.IsEmpty());
     {
       v8::Context::Scope scope(context);
       double baseline_total = 0;
       for (size_t i = 0; i < fnames.size(); i++) {
-        TimeDelta time;
+        v8::base::TimeDelta time;
         time = ProcessFile(fnames[i].c_str(), encoding,
                            reinterpret_cast<Isolate*>(isolate), print_tokens,
                            repeat);
@@ -228,5 +230,7 @@
     }
   }
   v8::V8::Dispose();
+  v8::V8::ShutdownPlatform();
+  delete platform;
   return 0;
 }
diff --git a/tools/lexer-shell.gyp b/tools/lexer-shell.gyp
index 6fd129a..836ea97 100644
--- a/tools/lexer-shell.gyp
+++ b/tools/lexer-shell.gyp
@@ -37,6 +37,7 @@
       'type': 'executable',
       'dependencies': [
         '../tools/gyp/v8.gyp:v8',
+        '../tools/gyp/v8.gyp:v8_libplatform',
       ],
       'conditions': [
         ['v8_enable_i18n_support==1', {
@@ -59,6 +60,7 @@
       'type': 'executable',
       'dependencies': [
         '../tools/gyp/v8.gyp:v8',
+        '../tools/gyp/v8.gyp:v8_libplatform',
       ],
       'conditions': [
         ['v8_enable_i18n_support==1', {
diff --git a/tools/ll_prof.py b/tools/ll_prof.py
index 216929d..409b396 100755
--- a/tools/ll_prof.py
+++ b/tools/ll_prof.py
@@ -351,7 +351,8 @@
     "ia32": ctypes.c_uint32,
     "arm": ctypes.c_uint32,
     "mips": ctypes.c_uint32,
-    "x64": ctypes.c_uint64
+    "x64": ctypes.c_uint64,
+    "arm64": ctypes.c_uint64
   }
 
   _CODE_CREATE_TAG = "C"
diff --git a/tools/parser-shell.cc b/tools/parser-shell.cc
index c229185..2cafc83 100644
--- a/tools/parser-shell.cc
+++ b/tools/parser-shell.cc
@@ -33,6 +33,7 @@
 #include <vector>
 #include "src/v8.h"
 
+#include "include/libplatform/libplatform.h"
 #include "src/api.h"
 #include "src/compiler.h"
 #include "src/scanner-character-streams.h"
@@ -44,7 +45,19 @@
 
 using namespace v8::internal;
 
-std::pair<TimeDelta, TimeDelta> RunBaselineParser(
+class StringResource8 : public v8::String::ExternalOneByteStringResource {
+ public:
+  StringResource8(const char* data, int length)
+      : data_(data), length_(length) { }
+  virtual size_t length() const { return length_; }
+  virtual const char* data() const { return data_; }
+
+ private:
+  const char* data_;
+  int length_;
+};
+
+std::pair<v8::base::TimeDelta, v8::base::TimeDelta> RunBaselineParser(
     const char* fname, Encoding encoding, int repeat, v8::Isolate* isolate,
     v8::Handle<v8::Context> context) {
   int length = 0;
@@ -63,11 +76,13 @@
       break;
     }
     case LATIN1: {
-      source_handle = v8::String::NewFromOneByte(isolate, source);
+      StringResource8* string_resource =
+          new StringResource8(reinterpret_cast<const char*>(source), length);
+      source_handle = v8::String::NewExternal(isolate, string_resource);
       break;
     }
   }
-  TimeDelta parse_time1, parse_time2;
+  v8::base::TimeDelta parse_time1, parse_time2;
   Handle<Script> script = Isolate::Current()->factory()->NewScript(
       v8::Utils::OpenHandle(*source_handle));
   i::ScriptData* cached_data_impl = NULL;
@@ -75,30 +90,32 @@
   {
     CompilationInfoWithZone info(script);
     info.MarkAsGlobal();
-    info.SetCachedData(&cached_data_impl, i::PRODUCE_CACHED_DATA);
-    ElapsedTimer timer;
+    info.SetCachedData(&cached_data_impl,
+                       v8::ScriptCompiler::kProduceParserCache);
+    v8::base::ElapsedTimer timer;
     timer.Start();
     // Allow lazy parsing; otherwise we won't produce cached data.
     bool success = Parser::Parse(&info, true);
     parse_time1 = timer.Elapsed();
     if (!success) {
       fprintf(stderr, "Parsing failed\n");
-      return std::make_pair(TimeDelta(), TimeDelta());
+      return std::make_pair(v8::base::TimeDelta(), v8::base::TimeDelta());
     }
   }
   // Second round of parsing (consume cached data).
   {
     CompilationInfoWithZone info(script);
     info.MarkAsGlobal();
-    info.SetCachedData(&cached_data_impl, i::CONSUME_CACHED_DATA);
-    ElapsedTimer timer;
+    info.SetCachedData(&cached_data_impl,
+                       v8::ScriptCompiler::kConsumeParserCache);
+    v8::base::ElapsedTimer timer;
     timer.Start();
     // Allow lazy parsing; otherwise cached data won't help.
     bool success = Parser::Parse(&info, true);
     parse_time2 = timer.Elapsed();
     if (!success) {
       fprintf(stderr, "Parsing failed\n");
-      return std::make_pair(TimeDelta(), TimeDelta());
+      return std::make_pair(v8::base::TimeDelta(), v8::base::TimeDelta());
     }
   }
   return std::make_pair(parse_time1, parse_time2);
@@ -106,8 +123,11 @@
 
 
 int main(int argc, char* argv[]) {
-  v8::V8::InitializeICU();
   v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
+  v8::V8::InitializeICU();
+  v8::Platform* platform = v8::platform::CreateDefaultPlatform();
+  v8::V8::InitializePlatform(platform);
+  v8::V8::Initialize();
   Encoding encoding = LATIN1;
   std::vector<std::string> fnames;
   std::string benchmark;
@@ -134,14 +154,15 @@
     v8::HandleScope handle_scope(isolate);
     v8::Handle<v8::ObjectTemplate> global = v8::ObjectTemplate::New(isolate);
     v8::Local<v8::Context> context = v8::Context::New(isolate, NULL, global);
-    ASSERT(!context.IsEmpty());
+    DCHECK(!context.IsEmpty());
     {
       v8::Context::Scope scope(context);
       double first_parse_total = 0;
       double second_parse_total = 0;
       for (size_t i = 0; i < fnames.size(); i++) {
-        std::pair<TimeDelta, TimeDelta> time = RunBaselineParser(
-            fnames[i].c_str(), encoding, repeat, isolate, context);
+        std::pair<v8::base::TimeDelta, v8::base::TimeDelta> time =
+            RunBaselineParser(fnames[i].c_str(), encoding, repeat, isolate,
+                              context);
         first_parse_total += time.first.InMillisecondsF();
         second_parse_total += time.second.InMillisecondsF();
       }
@@ -153,5 +174,7 @@
     }
   }
   v8::V8::Dispose();
+  v8::V8::ShutdownPlatform();
+  delete platform;
   return 0;
 }
diff --git a/tools/presubmit.py b/tools/presubmit.py
index 518f2d9..8a6ff2a 100755
--- a/tools/presubmit.py
+++ b/tools/presubmit.py
@@ -54,6 +54,7 @@
 build/deprecated
 build/endif_comment
 build/forward_decl
+build/include_alpha
 build/include_order
 build/printf_format
 build/storage_class
@@ -61,7 +62,6 @@
 readability/boost
 readability/braces
 readability/casting
-readability/check
 readability/constructors
 readability/fn_size
 readability/function
@@ -80,7 +80,6 @@
 runtime/nonconf
 runtime/printf
 runtime/printf_format
-runtime/references
 runtime/rtti
 runtime/sizeof
 runtime/string
@@ -101,6 +100,7 @@
 whitespace/todo
 """.split()
 
+# TODO(bmeurer): Fix and re-enable readability/check
 
 LINT_OUTPUT_PATTERN = re.compile(r'^.+[:(]\d+[:)]|^Done processing')
 
@@ -200,7 +200,8 @@
 
   def IgnoreDir(self, name):
     return (name.startswith('.') or
-            name in ('buildtools', 'data', 'kraken', 'octane', 'sunspider'))
+            name in ('buildtools', 'data', 'gmock', 'gtest', 'kraken',
+                     'octane', 'sunspider'))
 
   def IgnoreFile(self, name):
     return name.startswith('.')
@@ -416,10 +417,15 @@
     return success
 
 
-def CheckGeneratedRuntimeTests(workspace):
+def CheckRuntimeVsNativesNameClashes(workspace):
   code = subprocess.call(
-      [sys.executable, join(workspace, "tools", "generate-runtime-tests.py"),
-       "check"])
+      [sys.executable, join(workspace, "tools", "check-name-clashes.py")])
+  return code == 0
+
+
+def CheckExternalReferenceRegistration(workspace):
+  code = subprocess.call(
+      [sys.executable, join(workspace, "tools", "external-reference-check.py")])
   return code == 0
 
 
@@ -441,7 +447,8 @@
   print "Running copyright header, trailing whitespaces and " \
         "two empty lines between declarations check..."
   success = SourceProcessor().Run(workspace) and success
-  success = CheckGeneratedRuntimeTests(workspace) and success
+  success = CheckRuntimeVsNativesNameClashes(workspace) and success
+  success = CheckExternalReferenceRegistration(workspace) and success
   if success:
     return 0
   else:
diff --git a/tools/profile_view.js b/tools/profile_view.js
index e041909..d1545ac 100644
--- a/tools/profile_view.js
+++ b/tools/profile_view.js
@@ -169,24 +169,6 @@
 
 
 /**
- * Returns a share of the function's total time in application's total time.
- */
-ProfileView.Node.prototype.__defineGetter__(
-    'totalPercent',
-    function() { return this.totalTime /
-      (this.head ? this.head.totalTime : this.totalTime) * 100.0; });
-
-
-/**
- * Returns a share of the function's self time in application's total time.
- */
-ProfileView.Node.prototype.__defineGetter__(
-    'selfPercent',
-    function() { return this.selfTime /
-      (this.head ? this.head.totalTime : this.totalTime) * 100.0; });
-
-
-/**
  * Returns a share of the function's total time in its parent's total time.
  */
 ProfileView.Node.prototype.__defineGetter__(
diff --git a/tools/push-to-trunk/auto_push.py b/tools/push-to-trunk/auto_push.py
index aeaea80..fef3b53 100755
--- a/tools/push-to-trunk/auto_push.py
+++ b/tools/push-to-trunk/auto_push.py
@@ -36,22 +36,13 @@
 from common_includes import *
 import push_to_trunk
 
-SETTINGS_LOCATION = "SETTINGS_LOCATION"
-
-CONFIG = {
-  PERSISTFILE_BASENAME: "/tmp/v8-auto-push-tempfile",
-  DOT_GIT_LOCATION: ".git",
-  SETTINGS_LOCATION: "~/.auto-roll",
-}
-
 PUSH_MESSAGE_RE = re.compile(r".* \(based on bleeding_edge revision r(\d+)\)$")
 
-
 class Preparation(Step):
   MESSAGE = "Preparation."
 
   def RunStep(self):
-    self.InitialEnvironmentChecks()
+    self.InitialEnvironmentChecks(self.default_cwd)
     self.CommonPrepare()
 
 
@@ -59,7 +50,7 @@
   MESSAGE = "Checking settings file."
 
   def RunStep(self):
-    settings_file = os.path.realpath(self.Config(SETTINGS_LOCATION))
+    settings_file = os.path.realpath(self.Config("SETTINGS_LOCATION"))
     if os.path.exists(settings_file):
       settings_dict = json.loads(FileToText(settings_file))
       if settings_dict.get("enable_auto_roll") is False:
@@ -119,9 +110,8 @@
 
     # TODO(machenbach): Update the script before calling it.
     if self._options.push:
-      P = push_to_trunk.PushToTrunk
       self._side_effect_handler.Call(
-          P(push_to_trunk.CONFIG, self._side_effect_handler).Run,
+          push_to_trunk.PushToTrunk().Run,
           ["--author", self._options.author,
            "--reviewer", self._options.reviewer,
            "--revision", self["lkgr"],
@@ -141,6 +131,12 @@
     options.requires_editor = False
     return True
 
+  def _Config(self):
+    return {
+      "PERSISTFILE_BASENAME": "/tmp/v8-auto-push-tempfile",
+      "SETTINGS_LOCATION": "~/.auto-roll",
+    }
+
   def _Steps(self):
     return [
       Preparation,
@@ -153,4 +149,4 @@
 
 
 if __name__ == "__main__":  # pragma: no cover
-  sys.exit(AutoPush(CONFIG).Run())
+  sys.exit(AutoPush().Run())
diff --git a/tools/push-to-trunk/auto_roll.py b/tools/push-to-trunk/auto_roll.py
index 607ca08..120e633 100755
--- a/tools/push-to-trunk/auto_roll.py
+++ b/tools/push-to-trunk/auto_roll.py
@@ -12,11 +12,6 @@
 from common_includes import *
 import chromium_roll
 
-CONFIG = {
-  PERSISTFILE_BASENAME: "/tmp/v8-auto-roll-tempfile",
-}
-
-CR_DEPS_URL = 'http://src.chromium.org/svn/trunk/src/DEPS'
 
 class CheckActiveRoll(Step):
   MESSAGE = "Check active roll."
@@ -47,8 +42,9 @@
   MESSAGE = "Detect commit ID of the last push to trunk."
 
   def RunStep(self):
-    push_hash = self.FindLastTrunkPush(include_patches=True)
-    self["last_push"] = self.GitSVNFindSVNRev(push_hash)
+    push_hash = self.FindLastTrunkPush(
+        branch="origin/master", include_patches=True)
+    self["last_push"] = self.GetCommitPositionNumber(push_hash)
 
 
 class DetectLastRoll(Step):
@@ -56,15 +52,37 @@
 
   def RunStep(self):
     # Interpret the DEPS file to retrieve the v8 revision.
+    # TODO(machenbach): This should be part or the roll-deps api of
+    # depot_tools.
     Var = lambda var: '%s'
-    exec(self.ReadURL(CR_DEPS_URL))
-    last_roll = vars['v8_revision']
-    if last_roll >= self["last_push"]:
+    exec(FileToText(os.path.join(self._options.chromium, "DEPS")))
+    last_roll = self.GetCommitPositionNumber(vars['v8_revision'])
+    # FIXME(machenbach): When rolling from bleeding edge and from trunk there
+    # be different commit numbers here. Better use version?
+    if int(last_roll) >= int(self["last_push"]):
       print("There is no newer v8 revision than the one in Chromium (%s)."
             % last_roll)
       return True
 
 
+class CheckClusterFuzz(Step):
+  MESSAGE = "Check ClusterFuzz api for new problems."
+
+  def RunStep(self):
+    if not os.path.exists(self.Config("CLUSTERFUZZ_API_KEY_FILE")):
+      print "Skipping ClusterFuzz check. No api key file found."
+      return False
+    api_key = FileToText(self.Config("CLUSTERFUZZ_API_KEY_FILE"))
+    # Check for open, reproducible issues that have no associated bug.
+    result = self._side_effect_handler.ReadClusterFuzzAPI(
+        api_key, job_type="linux_asan_d8_dbg", reproducible="True",
+        open="True", bug_information="",
+        revision_greater_or_equal=str(self["last_push"]))
+    if result:
+      print "Stop due to pending ClusterFuzz issues."
+      return True
+
+
 class RollChromium(Step):
   MESSAGE = "Roll V8 into Chromium."
 
@@ -74,15 +92,14 @@
         "--author", self._options.author,
         "--reviewer", self._options.reviewer,
         "--chromium", self._options.chromium,
-        "--force",
+        "--use-commit-queue",
       ]
       if self._options.sheriff:
         args.extend([
             "--sheriff", "--googlers-mapping", self._options.googlers_mapping])
-      R = chromium_roll.ChromiumRoll
-      self._side_effect_handler.Call(
-          R(chromium_roll.CONFIG, self._side_effect_handler).Run,
-          args)
+      if self._options.dry_run:
+        args.extend(["--dry-run"])
+      self._side_effect_handler.Call(chromium_roll.ChromiumRoll().Run, args)
 
 
 class AutoRoll(ScriptsBase):
@@ -90,8 +107,7 @@
     parser.add_argument("-c", "--chromium", required=True,
                         help=("The path to your Chromium src/ "
                               "directory to automate the V8 roll."))
-    parser.add_argument("--roll",
-                        help="Make Chromium roll. Dry run if unspecified.",
+    parser.add_argument("--roll", help="Call Chromium roll script.",
                         default=False, action="store_true")
 
   def _ProcessOptions(self, options):  # pragma: no cover
@@ -103,14 +119,21 @@
       return False
     return True
 
+  def _Config(self):
+    return {
+      "PERSISTFILE_BASENAME": "/tmp/v8-auto-roll-tempfile",
+      "CLUSTERFUZZ_API_KEY_FILE": ".cf_api_key",
+    }
+
   def _Steps(self):
     return [
       CheckActiveRoll,
       DetectLastPush,
       DetectLastRoll,
+      CheckClusterFuzz,
       RollChromium,
     ]
 
 
 if __name__ == "__main__":  # pragma: no cover
-  sys.exit(AutoRoll(CONFIG).Run())
+  sys.exit(AutoRoll().Run())
diff --git a/tools/push-to-trunk/auto_tag.py b/tools/push-to-trunk/auto_tag.py
new file mode 100755
index 0000000..175e10e
--- /dev/null
+++ b/tools/push-to-trunk/auto_tag.py
@@ -0,0 +1,199 @@
+#!/usr/bin/env python
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import argparse
+import sys
+
+from common_includes import *
+
+
+class Preparation(Step):
+  MESSAGE = "Preparation."
+
+  def RunStep(self):
+    self.CommonPrepare()
+    self.PrepareBranch()
+    self.GitCheckout("master")
+    self.GitSVNRebase()
+
+
+class GetTags(Step):
+  MESSAGE = "Get all V8 tags."
+
+  def RunStep(self):
+    self.GitCreateBranch(self._config["BRANCHNAME"])
+
+    # Get remote tags.
+    tags = filter(lambda s: re.match(r"^svn/tags/[\d+\.]+$", s),
+                  self.GitRemotes())
+
+    # Remove 'svn/tags/' prefix.
+    self["tags"] = map(lambda s: s[9:], tags)
+
+
+class GetOldestUntaggedVersion(Step):
+  MESSAGE = "Check if there's a version on bleeding edge without a tag."
+
+  def RunStep(self):
+    tags = set(self["tags"])
+    self["candidate"] = None
+    self["candidate_version"] = None
+    self["next"] = None
+    self["next_version"] = None
+
+    # Iterate backwards through all automatic version updates.
+    for git_hash in self.GitLog(
+        format="%H", grep="\\[Auto\\-roll\\] Bump up version to").splitlines():
+
+      # Get the version.
+      if not self.GitCheckoutFileSafe(VERSION_FILE, git_hash):
+        continue
+
+      self.ReadAndPersistVersion()
+      version = self.ArrayToVersion("")
+
+      # Strip off trailing patch level (tags don't include tag level 0).
+      if version.endswith(".0"):
+        version = version[:-2]
+
+      # Clean up checked-out version file.
+      self.GitCheckoutFileSafe(VERSION_FILE, "HEAD")
+
+      if version in tags:
+        if self["candidate"]:
+          # Revision "git_hash" is tagged already and "candidate" was the next
+          # newer revision without a tag.
+          break
+        else:
+          print("Stop as %s is the latest version and it has been tagged." %
+                version)
+          self.CommonCleanup()
+          return True
+      else:
+        # This is the second oldest version without a tag.
+        self["next"] = self["candidate"]
+        self["next_version"] = self["candidate_version"]
+
+        # This is the oldest version without a tag.
+        self["candidate"] = git_hash
+        self["candidate_version"] = version
+
+    if not self["candidate"] or not self["candidate_version"]:
+      print "Nothing found to tag."
+      self.CommonCleanup()
+      return True
+
+    print("Candidate for tagging is %s with version %s" %
+          (self["candidate"], self["candidate_version"]))
+
+
+class GetLKGRs(Step):
+  MESSAGE = "Get the last lkgrs."
+
+  def RunStep(self):
+    revision_url = "https://v8-status.appspot.com/revisions?format=json"
+    status_json = self.ReadURL(revision_url, wait_plan=[5, 20])
+    self["lkgrs"] = [entry["revision"]
+                     for entry in json.loads(status_json) if entry["status"]]
+
+
+class CalculateTagRevision(Step):
+  MESSAGE = "Calculate the revision to tag."
+
+  def LastLKGR(self, min_rev, max_rev):
+    """Finds the newest lkgr between min_rev (inclusive) and max_rev
+    (exclusive).
+    """
+    for lkgr in self["lkgrs"]:
+      # LKGRs are reverse sorted.
+      if int(min_rev) <= int(lkgr) and int(lkgr) < int(max_rev):
+        return lkgr
+    return None
+
+  def RunStep(self):
+    # Get the lkgr after the tag candidate and before the next tag candidate.
+    candidate_svn = self.GitSVNFindSVNRev(self["candidate"])
+    if self["next"]:
+      next_svn = self.GitSVNFindSVNRev(self["next"])
+    else:
+      # Don't include the version change commit itself if there is no upper
+      # limit yet.
+      candidate_svn =  str(int(candidate_svn) + 1)
+      next_svn = sys.maxint
+    lkgr_svn = self.LastLKGR(candidate_svn, next_svn)
+
+    if not lkgr_svn:
+      print "There is no lkgr since the candidate version yet."
+      self.CommonCleanup()
+      return True
+
+    # Let's check if the lkgr is at least three hours old.
+    self["lkgr"] = self.GitSVNFindGitHash(lkgr_svn)
+    if not self["lkgr"]:
+      print "Couldn't find git hash for lkgr %s" % lkgr_svn
+      self.CommonCleanup()
+      return True
+
+    lkgr_utc_time = int(self.GitLog(n=1, format="%at", git_hash=self["lkgr"]))
+    current_utc_time = self._side_effect_handler.GetUTCStamp()
+
+    if current_utc_time < lkgr_utc_time + 10800:
+      print "Candidate lkgr %s is too recent for tagging." % lkgr_svn
+      self.CommonCleanup()
+      return True
+
+    print "Tagging revision %s with %s" % (lkgr_svn, self["candidate_version"])
+
+
+class MakeTag(Step):
+  MESSAGE = "Tag the version."
+
+  def RunStep(self):
+    if not self._options.dry_run:
+      self.GitReset(self["lkgr"])
+      self.GitSVNTag(self["candidate_version"])
+
+
+class CleanUp(Step):
+  MESSAGE = "Clean up."
+
+  def RunStep(self):
+    self.CommonCleanup()
+
+
+class AutoTag(ScriptsBase):
+  def _PrepareOptions(self, parser):
+    parser.add_argument("--dry_run", help="Don't tag the new version.",
+                        default=False, action="store_true")
+
+  def _ProcessOptions(self, options):  # pragma: no cover
+    if not options.dry_run and not options.author:
+      print "Specify your chromium.org email with -a"
+      return False
+    options.wait_for_lgtm = False
+    options.force_readline_defaults = True
+    options.force_upload = True
+    return True
+
+  def _Config(self):
+    return {
+      "BRANCHNAME": "auto-tag-v8",
+      "PERSISTFILE_BASENAME": "/tmp/v8-auto-tag-tempfile",
+    }
+
+  def _Steps(self):
+    return [
+      Preparation,
+      GetTags,
+      GetOldestUntaggedVersion,
+      GetLKGRs,
+      CalculateTagRevision,
+      MakeTag,
+      CleanUp,
+    ]
+
+
+if __name__ == "__main__":  # pragma: no cover
+  sys.exit(AutoTag().Run())
diff --git a/tools/push-to-trunk/bump_up_version.py b/tools/push-to-trunk/bump_up_version.py
new file mode 100755
index 0000000..c9f052b
--- /dev/null
+++ b/tools/push-to-trunk/bump_up_version.py
@@ -0,0 +1,245 @@
+#!/usr/bin/env python
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Script for auto-increasing the version on bleeding_edge.
+
+The script can be run regularly by a cron job. It will increase the build
+level of the version on bleeding_edge if:
+- the lkgr version is smaller than the version of the latest revision,
+- the lkgr version is not a version change itself,
+- the tree is not closed for maintenance.
+
+The new version will be the maximum of the bleeding_edge and trunk versions +1.
+E.g. latest bleeding_edge version: 3.22.11.0 and latest trunk 3.23.0.0 gives
+the new version 3.23.1.0.
+
+This script requires a depot tools git checkout. I.e. 'fetch v8'.
+"""
+
+import argparse
+import os
+import sys
+
+from common_includes import *
+
+VERSION_BRANCH = "auto-bump-up-version"
+
+
+class Preparation(Step):
+  MESSAGE = "Preparation."
+
+  def RunStep(self):
+    # Check for a clean workdir.
+    if not self.GitIsWorkdirClean():  # pragma: no cover
+      # This is in case a developer runs this script on a dirty tree.
+      self.GitStash()
+
+    # TODO(machenbach): This should be called master after the git switch.
+    self.GitCheckout("bleeding_edge")
+
+    self.GitPull()
+
+    # Ensure a clean version branch.
+    self.DeleteBranch(VERSION_BRANCH)
+
+
+class GetCurrentBleedingEdgeVersion(Step):
+  MESSAGE = "Get latest bleeding edge version."
+
+  def RunStep(self):
+    # TODO(machenbach): This should be called master after the git switch.
+    self.GitCheckout("bleeding_edge")
+
+    # Store latest version and revision.
+    self.ReadAndPersistVersion()
+    self["latest_version"] = self.ArrayToVersion("")
+    self["latest"] = self.GitLog(n=1, format="%H")
+    print "Bleeding edge version: %s" % self["latest_version"]
+
+
+# This step is pure paranoia. It forbids the script to continue if the last
+# commit changed version.cc. Just in case the other bailout has a bug, this
+# prevents the script from continuously commiting version changes.
+class LastChangeBailout(Step):
+  MESSAGE = "Stop script if the last change modified the version."
+
+  def RunStep(self):
+    if VERSION_FILE in self.GitChangedFiles(self["latest"]):
+      print "Stop due to recent version change."
+      return True
+
+
+# TODO(machenbach): Implement this for git.
+class FetchLKGR(Step):
+  MESSAGE = "Fetching V8 LKGR."
+
+  def RunStep(self):
+    lkgr_url = "https://v8-status.appspot.com/lkgr"
+    self["lkgr_svn"] = self.ReadURL(lkgr_url, wait_plan=[5])
+
+
+# TODO(machenbach): Implement this for git. With a git lkgr we could simply
+# checkout that revision. With svn, we have to search backwards until that
+# revision is found.
+class GetLKGRVersion(Step):
+  MESSAGE = "Get bleeding edge lkgr version."
+
+  def RunStep(self):
+    self.GitCheckout("bleeding_edge")
+    # If the commit was made from svn, there is a mapping entry in the commit
+    # message.
+    self["lkgr"] = self.GitLog(
+        grep="^git-svn-id: [^@]*@%s [A-Za-z0-9-]*$" % self["lkgr_svn"],
+        format="%H")
+
+    # FIXME(machenbach): http://crbug.com/391712 can lead to svn lkgrs on the
+    # trunk branch (rarely).
+    if not self["lkgr"]:  # pragma: no cover
+      self.Die("No git hash found for svn lkgr.")
+
+    self.GitCreateBranch(VERSION_BRANCH, self["lkgr"])
+    self.ReadAndPersistVersion("lkgr_")
+    self["lkgr_version"] = self.ArrayToVersion("lkgr_")
+    print "LKGR version: %s" % self["lkgr_version"]
+
+    # Ensure a clean version branch.
+    self.GitCheckout("bleeding_edge")
+    self.DeleteBranch(VERSION_BRANCH)
+
+
+class LKGRVersionUpToDateBailout(Step):
+  MESSAGE = "Stop script if the lkgr has a renewed version."
+
+  def RunStep(self):
+    # If a version-change commit becomes the lkgr, don't bump up the version
+    # again.
+    if VERSION_FILE in self.GitChangedFiles(self["lkgr"]):
+      print "Stop because the lkgr is a version change itself."
+      return True
+
+    # Don't bump up the version if it got updated already after the lkgr.
+    if SortingKey(self["lkgr_version"]) < SortingKey(self["latest_version"]):
+      print("Stop because the latest version already changed since the lkgr "
+            "version.")
+      return True
+
+
+class GetTrunkVersion(Step):
+  MESSAGE = "Get latest trunk version."
+
+  def RunStep(self):
+    # TODO(machenbach): This should be called trunk after the git switch.
+    self.GitCheckout("master")
+    self.GitPull()
+    self.ReadAndPersistVersion("trunk_")
+    self["trunk_version"] = self.ArrayToVersion("trunk_")
+    print "Trunk version: %s" % self["trunk_version"]
+
+
+class CalculateVersion(Step):
+  MESSAGE = "Calculate the new version."
+
+  def RunStep(self):
+    if self["lkgr_build"] == "9999":  # pragma: no cover
+      # If version control on bleeding edge was switched off, just use the last
+      # trunk version.
+      self["lkgr_version"] = self["trunk_version"]
+
+    # The new version needs to be greater than the max on bleeding edge and
+    # trunk.
+    max_version = max(self["trunk_version"],
+                      self["lkgr_version"],
+                      key=SortingKey)
+
+    # Strip off possible leading zeros.
+    self["new_major"], self["new_minor"], self["new_build"], _ = (
+        map(str, map(int, max_version.split("."))))
+
+    self["new_build"] = str(int(self["new_build"]) + 1)
+    self["new_patch"] = "0"
+
+    self["new_version"] = ("%s.%s.%s.0" %
+        (self["new_major"], self["new_minor"], self["new_build"]))
+    print "New version is %s" % self["new_version"]
+
+    if self._options.dry_run:  # pragma: no cover
+      print "Dry run, skipping version change."
+      return True
+
+
+class CheckTreeStatus(Step):
+  MESSAGE = "Checking v8 tree status message."
+
+  def RunStep(self):
+    status_url = "https://v8-status.appspot.com/current?format=json"
+    status_json = self.ReadURL(status_url, wait_plan=[5, 20, 300, 300])
+    message = json.loads(status_json)["message"]
+    if re.search(r"maintenance|no commits", message, flags=re.I):
+      print "Skip version change by tree status: \"%s\"" % message
+      return True
+
+
+class ChangeVersion(Step):
+  MESSAGE = "Bump up the version."
+
+  def RunStep(self):
+    self.GitCreateBranch(VERSION_BRANCH, "bleeding_edge")
+
+    self.SetVersion(os.path.join(self.default_cwd, VERSION_FILE), "new_")
+
+    try:
+      msg = "[Auto-roll] Bump up version to %s" % self["new_version"]
+      self.GitCommit("%s\n\nTBR=%s" % (msg, self._options.author),
+                     author=self._options.author)
+      if self._options.svn:
+        self.SVNCommit("branches/bleeding_edge", msg)
+      else:
+        self.GitUpload(author=self._options.author,
+                       force=self._options.force_upload,
+                       bypass_hooks=True)
+        self.GitDCommit()
+      print "Successfully changed the version."
+    finally:
+      # Clean up.
+      self.GitCheckout("bleeding_edge")
+      self.DeleteBranch(VERSION_BRANCH)
+
+
+class BumpUpVersion(ScriptsBase):
+  def _PrepareOptions(self, parser):
+    parser.add_argument("--dry_run", help="Don't commit the new version.",
+                        default=False, action="store_true")
+
+  def _ProcessOptions(self, options):  # pragma: no cover
+    if not options.dry_run and not options.author:
+      print "Specify your chromium.org email with -a"
+      return False
+    options.wait_for_lgtm = False
+    options.force_readline_defaults = True
+    options.force_upload = True
+    return True
+
+  def _Config(self):
+    return {
+      "PERSISTFILE_BASENAME": "/tmp/v8-bump-up-version-tempfile",
+    }
+
+  def _Steps(self):
+    return [
+      Preparation,
+      GetCurrentBleedingEdgeVersion,
+      LastChangeBailout,
+      FetchLKGR,
+      GetLKGRVersion,
+      LKGRVersionUpToDateBailout,
+      GetTrunkVersion,
+      CalculateVersion,
+      CheckTreeStatus,
+      ChangeVersion,
+    ]
+
+if __name__ == "__main__":  # pragma: no cover
+  sys.exit(BumpUpVersion().Run())
diff --git a/tools/push-to-trunk/chromium_roll.py b/tools/push-to-trunk/chromium_roll.py
index 35ab24b..dc5e6eb 100755
--- a/tools/push-to-trunk/chromium_roll.py
+++ b/tools/push-to-trunk/chromium_roll.py
@@ -9,21 +9,13 @@
 
 from common_includes import *
 
-DEPS_FILE = "DEPS_FILE"
-CHROMIUM = "CHROMIUM"
-
-CONFIG = {
-  PERSISTFILE_BASENAME: "/tmp/v8-chromium-roll-tempfile",
-  DOT_GIT_LOCATION: ".git",
-  DEPS_FILE: "DEPS",
-}
-
 
 class Preparation(Step):
   MESSAGE = "Preparation."
 
   def RunStep(self):
-    self.CommonPrepare()
+    # Update v8 remote tracking branches.
+    self.GitFetchOrigin()
 
 
 class DetectLastPush(Step):
@@ -31,87 +23,79 @@
 
   def RunStep(self):
     self["last_push"] = self._options.last_push or self.FindLastTrunkPush(
-        include_patches=True)
-    self["trunk_revision"] = self.GitSVNFindSVNRev(self["last_push"])
+        branch="origin/master", include_patches=True)
+    self["trunk_revision"] = self.GetCommitPositionNumber(self["last_push"])
     self["push_title"] = self.GitLog(n=1, format="%s",
                                      git_hash=self["last_push"])
 
 
-class CheckChromium(Step):
-  MESSAGE = "Ask for chromium checkout."
-
-  def Run(self):
-    self["chrome_path"] = self._options.chromium
-    while not self["chrome_path"]:
-      self.DieNoManualMode("Please specify the path to a Chromium checkout in "
-                           "forced mode.")
-      print ("Please specify the path to the chromium \"src\" directory: "),
-      self["chrome_path"] = self.ReadLine()
-
-
 class SwitchChromium(Step):
   MESSAGE = "Switch to Chromium checkout."
-  REQUIRES = "chrome_path"
 
   def RunStep(self):
     self["v8_path"] = os.getcwd()
-    os.chdir(self["chrome_path"])
-    self.InitialEnvironmentChecks()
+    cwd = self._options.chromium
+    os.chdir(cwd)
+    self.InitialEnvironmentChecks(cwd)
     # Check for a clean workdir.
-    if not self.GitIsWorkdirClean():  # pragma: no cover
+    if not self.GitIsWorkdirClean(cwd=cwd):  # pragma: no cover
       self.Die("Workspace is not clean. Please commit or undo your changes.")
     # Assert that the DEPS file is there.
-    if not os.path.exists(self.Config(DEPS_FILE)):  # pragma: no cover
+    if not os.path.exists(os.path.join(cwd, "DEPS")):  # pragma: no cover
       self.Die("DEPS file not present.")
 
 
 class UpdateChromiumCheckout(Step):
   MESSAGE = "Update the checkout and create a new branch."
-  REQUIRES = "chrome_path"
 
   def RunStep(self):
-    os.chdir(self["chrome_path"])
-    self.GitCheckout("master")
-    self.GitPull()
-    self.GitCreateBranch("v8-roll-%s" % self["trunk_revision"])
+    self.GitCheckout("master", cwd=self._options.chromium)
+    self.Command("gclient", "sync --nohooks", cwd=self._options.chromium)
+    self.GitPull(cwd=self._options.chromium)
+
+    # Update v8 remotes.
+    self.GitFetchOrigin()
+
+    self.GitCreateBranch("v8-roll-%s" % self["trunk_revision"],
+                         cwd=self._options.chromium)
 
 
 class UploadCL(Step):
   MESSAGE = "Create and upload CL."
-  REQUIRES = "chrome_path"
 
   def RunStep(self):
-    os.chdir(self["chrome_path"])
-
     # Patch DEPS file.
-    deps = FileToText(self.Config(DEPS_FILE))
-    deps = re.sub("(?<=\"v8_revision\": \")([0-9]+)(?=\")",
-                  self["trunk_revision"],
-                  deps)
-    TextToFile(deps, self.Config(DEPS_FILE))
-
-    if self._options.reviewer and not self._options.manual:
-      print "Using account %s for review." % self._options.reviewer
-      rev = self._options.reviewer
-    else:
-      print "Please enter the email address of a reviewer for the roll CL: ",
-      self.DieNoManualMode("A reviewer must be specified in forced mode.")
-      rev = self.ReadLine()
+    if self.Command(
+        "roll-dep", "v8 %s" % self["trunk_revision"],
+        cwd=self._options.chromium) is None:
+      self.Die("Failed to create deps for %s" % self["trunk_revision"])
 
     commit_title = "Update V8 to %s." % self["push_title"].lower()
     sheriff = ""
     if self["sheriff"]:
       sheriff = ("\n\nPlease reply to the V8 sheriff %s in case of problems."
                  % self["sheriff"])
-    self.GitCommit("%s%s\n\nTBR=%s" % (commit_title, sheriff, rev))
-    self.GitUpload(author=self._options.author,
-                   force=self._options.force_upload)
-    print "CL uploaded."
+    self.GitCommit("%s%s\n\nTBR=%s" %
+                       (commit_title, sheriff, self._options.reviewer),
+                   author=self._options.author,
+                   cwd=self._options.chromium)
+    if not self._options.dry_run:
+      self.GitUpload(author=self._options.author,
+                     force=True,
+                     cq=self._options.use_commit_queue,
+                     cwd=self._options.chromium)
+      print "CL uploaded."
+    else:
+      self.GitCheckout("master", cwd=self._options.chromium)
+      self.GitDeleteBranch("v8-roll-%s" % self["trunk_revision"],
+                           cwd=self._options.chromium)
+      print "Dry run - don't upload."
 
 
+# TODO(machenbach): Make this obsolete. We are only in the chromium chechout
+# for the initial .git check.
 class SwitchV8(Step):
   MESSAGE = "Returning to V8 checkout."
-  REQUIRES = "chrome_path"
 
   def RunStep(self):
     os.chdir(self["v8_path"])
@@ -126,43 +110,39 @@
           % self["trunk_revision"])
 
     # Clean up all temporary files.
-    Command("rm", "-f %s*" % self._config[PERSISTFILE_BASENAME])
+    Command("rm", "-f %s*" % self._config["PERSISTFILE_BASENAME"])
 
 
 class ChromiumRoll(ScriptsBase):
   def _PrepareOptions(self, parser):
-    group = parser.add_mutually_exclusive_group()
-    group.add_argument("-f", "--force",
-                      help="Don't prompt the user.",
-                      default=False, action="store_true")
-    group.add_argument("-m", "--manual",
-                      help="Prompt the user at every important step.",
-                      default=False, action="store_true")
-    parser.add_argument("-c", "--chromium",
+    parser.add_argument("-c", "--chromium", required=True,
                         help=("The path to your Chromium src/ "
                               "directory to automate the V8 roll."))
     parser.add_argument("-l", "--last-push",
                         help="The git commit ID of the last push to trunk.")
+    parser.add_argument("--use-commit-queue",
+                        help="Check the CQ bit on upload.",
+                        default=False, action="store_true")
 
   def _ProcessOptions(self, options):  # pragma: no cover
-    if not options.manual and not options.reviewer:
-      print "A reviewer (-r) is required in (semi-)automatic mode."
-      return False
-    if not options.manual and not options.chromium:
-      print "A chromium checkout (-c) is required in (semi-)automatic mode."
-      return False
-    if not options.manual and not options.author:
-      print "Specify your chromium.org email with -a in (semi-)automatic mode."
+    if not options.author or not options.reviewer:
+      print "A reviewer (-r) and an author (-a) are required."
       return False
 
-    options.tbr_commit = not options.manual
+    options.requires_editor = False
+    options.force = True
+    options.manual = False
     return True
 
+  def _Config(self):
+    return {
+      "PERSISTFILE_BASENAME": "/tmp/v8-chromium-roll-tempfile",
+    }
+
   def _Steps(self):
     return [
       Preparation,
       DetectLastPush,
-      CheckChromium,
       DetermineV8Sheriff,
       SwitchChromium,
       UpdateChromiumCheckout,
@@ -173,4 +153,4 @@
 
 
 if __name__ == "__main__":  # pragma: no cover
-  sys.exit(ChromiumRoll(CONFIG).Run())
+  sys.exit(ChromiumRoll().Run())
diff --git a/tools/push-to-trunk/common_includes.py b/tools/push-to-trunk/common_includes.py
index 482509f..00fb097 100644
--- a/tools/push-to-trunk/common_includes.py
+++ b/tools/push-to-trunk/common_includes.py
@@ -28,27 +28,28 @@
 
 import argparse
 import datetime
+import httplib
+import glob
 import imp
 import json
 import os
 import re
+import shutil
 import subprocess
 import sys
 import textwrap
 import time
+import urllib
 import urllib2
 
 from git_recipes import GitRecipesMixin
 from git_recipes import GitFailedException
 
-PERSISTFILE_BASENAME = "PERSISTFILE_BASENAME"
-BRANCHNAME = "BRANCHNAME"
-DOT_GIT_LOCATION = "DOT_GIT_LOCATION"
-VERSION_FILE = "VERSION_FILE"
-CHANGELOG_FILE = "CHANGELOG_FILE"
-CHANGELOG_ENTRY_FILE = "CHANGELOG_ENTRY_FILE"
-COMMITMSG_FILE = "COMMITMSG_FILE"
-PATCH_FILE = "PATCH_FILE"
+VERSION_FILE = os.path.join("src", "version.cc")
+
+# V8 base directory.
+DEFAULT_CWD = os.path.dirname(
+    os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
 
 
 def TextToFile(text, file_name):
@@ -169,18 +170,30 @@
     return ""
 
 
+def SortingKey(version):
+  """Key for sorting version number strings: '3.11' > '3.2.1.1'"""
+  version_keys = map(int, version.split("."))
+  # Fill up to full version numbers to normalize comparison.
+  while len(version_keys) < 4:  # pragma: no cover
+    version_keys.append(0)
+  # Fill digits.
+  return ".".join(map("{0:04d}".format, version_keys))
+
+
 # Some commands don't like the pipe, e.g. calling vi from within the script or
 # from subscripts like git cl upload.
-def Command(cmd, args="", prefix="", pipe=True):
+def Command(cmd, args="", prefix="", pipe=True, cwd=None):
+  cwd = cwd or os.getcwd()
   # TODO(machenbach): Use timeout.
   cmd_line = "%s %s %s" % (prefix, cmd, args)
   print "Command: %s" % cmd_line
+  print "in %s" % cwd
   sys.stdout.flush()
   try:
     if pipe:
-      return subprocess.check_output(cmd_line, shell=True)
+      return subprocess.check_output(cmd_line, shell=True, cwd=cwd)
     else:
-      return subprocess.check_call(cmd_line, shell=True)
+      return subprocess.check_call(cmd_line, shell=True, cwd=cwd)
   except subprocess.CalledProcessError:
     return None
   finally:
@@ -193,8 +206,8 @@
   def Call(self, fun, *args, **kwargs):
     return fun(*args, **kwargs)
 
-  def Command(self, cmd, args="", prefix="", pipe=True):
-    return Command(cmd, args, prefix, pipe)
+  def Command(self, cmd, args="", prefix="", pipe=True, cwd=None):
+    return Command(cmd, args, prefix, pipe, cwd=cwd)
 
   def ReadLine(self):
     return sys.stdin.readline().strip()
@@ -207,12 +220,34 @@
     finally:
       url_fh.close()
 
+  def ReadClusterFuzzAPI(self, api_key, **params):
+    params["api_key"] = api_key.strip()
+    params = urllib.urlencode(params)
+
+    headers = {"Content-type": "application/x-www-form-urlencoded"}
+
+    conn = httplib.HTTPSConnection("backend-dot-cluster-fuzz.appspot.com")
+    conn.request("POST", "/_api/", params, headers)
+
+    response = conn.getresponse()
+    data = response.read()
+
+    try:
+      return json.loads(data)
+    except:
+      print data
+      print "ERROR: Could not read response. Is your key valid?"
+      raise
+
   def Sleep(self, seconds):
     time.sleep(seconds)
 
   def GetDate(self):
     return datetime.date.today().strftime("%Y-%m-%d")
 
+  def GetUTCStamp(self):
+    return time.mktime(datetime.datetime.utcnow().timetuple())
+
 DEFAULT_SIDE_EFFECT_HANDLER = SideEffectHandler()
 
 
@@ -221,14 +256,17 @@
 
 
 class Step(GitRecipesMixin):
-  def __init__(self, text, requires, number, config, state, options, handler):
+  def __init__(self, text, number, config, state, options, handler):
     self._text = text
-    self._requires = requires
     self._number = number
     self._config = config
     self._state = state
     self._options = options
     self._side_effect_handler = handler
+
+    # The testing configuration might set a different default cwd.
+    self.default_cwd = self._config.get("DEFAULT_CWD") or DEFAULT_CWD
+
     assert self._number >= 0
     assert self._config is not None
     assert self._state is not None
@@ -249,14 +287,10 @@
 
   def Run(self):
     # Restore state.
-    state_file = "%s-state.json" % self._config[PERSISTFILE_BASENAME]
+    state_file = "%s-state.json" % self._config["PERSISTFILE_BASENAME"]
     if not self._state and os.path.exists(state_file):
       self._state.update(json.loads(FileToText(state_file)))
 
-    # Skip step if requirement is not met.
-    if self._requires and not self._state.get(self._requires):
-      return
-
     print ">>> Step %d: %s" % (self._number, self._text)
     try:
       return self.RunStep()
@@ -284,13 +318,14 @@
       got_exception = False
       try:
         result = cb()
-      except NoRetryException, e:
+      except NoRetryException as e:
         raise e
-      except Exception:
-        got_exception = True
+      except Exception as e:
+        got_exception = e
       if got_exception or retry_on(result):
         if not wait_plan:  # pragma: no cover
-          raise Exception("Retried too often. Giving up.")
+          raise Exception("Retried too often. Giving up. Reason: %s" %
+                          str(got_exception))
         wait_time = wait_plan.pop()
         print "Waiting for %f seconds." % wait_time
         self._side_effect_handler.Sleep(wait_time)
@@ -306,21 +341,31 @@
     else:
       return self._side_effect_handler.ReadLine()
 
-  def Git(self, args="", prefix="", pipe=True, retry_on=None):
-    cmd = lambda: self._side_effect_handler.Command("git", args, prefix, pipe)
+  def Command(self, name, args, cwd=None):
+    cmd = lambda: self._side_effect_handler.Command(
+        name, args, "", True, cwd=cwd or self.default_cwd)
+    return self.Retry(cmd, None, [5])
+
+  def Git(self, args="", prefix="", pipe=True, retry_on=None, cwd=None):
+    cmd = lambda: self._side_effect_handler.Command(
+        "git", args, prefix, pipe, cwd=cwd or self.default_cwd)
     result = self.Retry(cmd, retry_on, [5, 30])
     if result is None:
       raise GitFailedException("'git %s' failed." % args)
     return result
 
-  def SVN(self, args="", prefix="", pipe=True, retry_on=None):
-    cmd = lambda: self._side_effect_handler.Command("svn", args, prefix, pipe)
+  def SVN(self, args="", prefix="", pipe=True, retry_on=None, cwd=None):
+    cmd = lambda: self._side_effect_handler.Command(
+        "svn", args, prefix, pipe, cwd=cwd or self.default_cwd)
     return self.Retry(cmd, retry_on, [5, 30])
 
   def Editor(self, args):
     if self._options.requires_editor:
-      return self._side_effect_handler.Command(os.environ["EDITOR"], args,
-                                               pipe=False)
+      return self._side_effect_handler.Command(
+          os.environ["EDITOR"],
+          args,
+          pipe=False,
+          cwd=self.default_cwd)
 
   def ReadURL(self, url, params=None, retry_on=None, wait_plan=None):
     wait_plan = wait_plan or [3, 60, 600]
@@ -348,7 +393,7 @@
 
   def DeleteBranch(self, name):
     for line in self.GitBranch().splitlines():
-      if re.match(r".*\s+%s$" % name, line):
+      if re.match(r"\*?\s*%s$" % re.escape(name), line):
         msg = "Branch %s exists, do you want to delete it?" % name
         if self.Confirm(msg):
           self.GitDeleteBranch(name)
@@ -357,14 +402,15 @@
           msg = "Can't continue. Please delete branch %s and try again." % name
           self.Die(msg)
 
-  def InitialEnvironmentChecks(self):
+  def InitialEnvironmentChecks(self, cwd):
     # Cancel if this is not a git checkout.
-    if not os.path.exists(self._config[DOT_GIT_LOCATION]):  # pragma: no cover
+    if not os.path.exists(os.path.join(cwd, ".git")):  # pragma: no cover
       self.Die("This is not a git checkout, this script won't work for you.")
 
     # Cancel if EDITOR is unset or not executable.
     if (self._options.requires_editor and (not os.environ.get("EDITOR") or
-        Command("which", os.environ["EDITOR"]) is None)):  # pragma: no cover
+        self.Command(
+            "which", os.environ["EDITOR"]) is None)):  # pragma: no cover
       self.Die("Please set your EDITOR environment variable, you'll need it.")
 
   def CommonPrepare(self):
@@ -380,15 +426,19 @@
 
   def PrepareBranch(self):
     # Delete the branch that will be created later if it exists already.
-    self.DeleteBranch(self._config[BRANCHNAME])
+    self.DeleteBranch(self._config["BRANCHNAME"])
 
   def CommonCleanup(self):
     self.GitCheckout(self["current_branch"])
-    if self._config[BRANCHNAME] != self["current_branch"]:
-      self.GitDeleteBranch(self._config[BRANCHNAME])
+    if self._config["BRANCHNAME"] != self["current_branch"]:
+      self.GitDeleteBranch(self._config["BRANCHNAME"])
 
     # Clean up all temporary files.
-    Command("rm", "-f %s*" % self._config[PERSISTFILE_BASENAME])
+    for f in glob.iglob("%s*" % self._config["PERSISTFILE_BASENAME"]):
+      if os.path.isfile(f):
+        os.remove(f)
+      if os.path.isdir(f):
+        shutil.rmtree(f)
 
   def ReadAndPersistVersion(self, prefix=""):
     def ReadAndPersist(var_name, def_name):
@@ -396,7 +446,7 @@
       if match:
         value = match.group(1)
         self["%s%s" % (prefix, var_name)] = value
-    for line in LinesInFile(self._config[VERSION_FILE]):
+    for line in LinesInFile(os.path.join(self.default_cwd, VERSION_FILE)):
       for (var_name, def_name) in [("major", "MAJOR_VERSION"),
                                    ("minor", "MINOR_VERSION"),
                                    ("build", "BUILD_NUMBER"),
@@ -436,16 +486,53 @@
     except GitFailedException:
       self.WaitForResolvingConflicts(patch_file)
 
-  def FindLastTrunkPush(self, parent_hash="", include_patches=False):
+  def FindLastTrunkPush(
+      self, parent_hash="", branch="", include_patches=False):
     push_pattern = "^Version [[:digit:]]*\.[[:digit:]]*\.[[:digit:]]*"
     if not include_patches:
       # Non-patched versions only have three numbers followed by the "(based
       # on...) comment."
       push_pattern += " (based"
-    branch = "" if parent_hash else "svn/trunk"
+    branch = "" if parent_hash else branch or "svn/trunk"
     return self.GitLog(n=1, format="%H", grep=push_pattern,
                        parent_hash=parent_hash, branch=branch)
 
+  def ArrayToVersion(self, prefix):
+    return ".".join([self[prefix + "major"],
+                     self[prefix + "minor"],
+                     self[prefix + "build"],
+                     self[prefix + "patch"]])
+
+  def SetVersion(self, version_file, prefix):
+    output = ""
+    for line in FileToText(version_file).splitlines():
+      if line.startswith("#define MAJOR_VERSION"):
+        line = re.sub("\d+$", self[prefix + "major"], line)
+      elif line.startswith("#define MINOR_VERSION"):
+        line = re.sub("\d+$", self[prefix + "minor"], line)
+      elif line.startswith("#define BUILD_NUMBER"):
+        line = re.sub("\d+$", self[prefix + "build"], line)
+      elif line.startswith("#define PATCH_LEVEL"):
+        line = re.sub("\d+$", self[prefix + "patch"], line)
+      output += "%s\n" % line
+    TextToFile(output, version_file)
+
+  def SVNCommit(self, root, commit_message):
+    patch = self.GitDiff("HEAD^", "HEAD")
+    TextToFile(patch, self._config["PATCH_FILE"])
+    self.Command("svn", "update", cwd=self._options.svn)
+    if self.Command("svn", "status", cwd=self._options.svn) != "":
+      self.Die("SVN checkout not clean.")
+    if not self.Command("patch", "-d %s -p1 -i %s" %
+                        (root, self._config["PATCH_FILE"]),
+                        cwd=self._options.svn):
+      self.Die("Could not apply patch.")
+    self.Command(
+        "svn",
+        "commit --non-interactive --username=%s --config-dir=%s -m \"%s\"" %
+            (self._options.author, self._options.svn_config, commit_message),
+        cwd=self._options.svn)
+
 
 class UploadStep(Step):
   MESSAGE = "Upload for code review."
@@ -458,7 +545,8 @@
       print "Please enter the email address of a V8 reviewer for your patch: ",
       self.DieNoManualMode("A reviewer must be specified in forced mode.")
       reviewer = self.ReadLine()
-    self.GitUpload(reviewer, self._options.author, self._options.force_upload)
+    self.GitUpload(reviewer, self._options.author, self._options.force_upload,
+                   bypass_hooks=self._options.bypass_upload_hooks)
 
 
 class DetermineV8Sheriff(Step):
@@ -505,21 +593,19 @@
       message = step_class.MESSAGE
     except AttributeError:
       message = step_class.__name__
-    try:
-      requires = step_class.REQUIRES
-    except AttributeError:
-      requires = None
 
-    return step_class(message, requires, number=number, config=config,
+    return step_class(message, number=number, config=config,
                       state=state, options=options,
                       handler=side_effect_handler)
 
 
 class ScriptsBase(object):
   # TODO(machenbach): Move static config here.
-  def __init__(self, config, side_effect_handler=DEFAULT_SIDE_EFFECT_HANDLER,
+  def __init__(self,
+               config=None,
+               side_effect_handler=DEFAULT_SIDE_EFFECT_HANDLER,
                state=None):
-    self._config = config
+    self._config = config or self._Config()
     self._side_effect_handler = side_effect_handler
     self._state = state if state is not None else {}
 
@@ -535,10 +621,15 @@
   def _Steps(self):  # pragma: no cover
     raise Exception("Not implemented.")
 
+  def _Config(self):
+    return {}
+
   def MakeOptions(self, args=None):
     parser = argparse.ArgumentParser(description=self._Description())
     parser.add_argument("-a", "--author", default="",
                         help="The author email used for rietveld.")
+    parser.add_argument("--dry-run", default=False, action="store_true",
+                        help="Perform only read-only actions.")
     parser.add_argument("-g", "--googlers-mapping",
                         help="Path to the script mapping google accounts.")
     parser.add_argument("-r", "--reviewer", default="",
@@ -547,10 +638,14 @@
                         help=("Determine current sheriff to review CLs. On "
                               "success, this will overwrite the reviewer "
                               "option."))
+    parser.add_argument("--svn",
+                        help=("Optional full svn checkout for the commit."
+                              "The folder needs to be the svn root."))
+    parser.add_argument("--svn-config",
+                        help=("Optional folder used as svn --config-dir."))
     parser.add_argument("-s", "--step",
         help="Specify the step where to start work. Default: 0.",
         default=0, type=int)
-
     self._PrepareOptions(parser)
 
     if args is None:  # pragma: no cover
@@ -567,10 +662,15 @@
       print "To determine the current sheriff, requires the googler mapping"
       parser.print_help()
       return None
+    if options.svn and not options.svn_config:
+      print "Using pure svn for committing requires also --svn-config"
+      parser.print_help()
+      return None
 
     # Defaults for options, common to all scripts.
     options.manual = getattr(options, "manual", True)
     options.force = getattr(options, "force", False)
+    options.bypass_upload_hooks = False
 
     # Derived options.
     options.requires_editor = not options.force
@@ -589,7 +689,7 @@
     if not options:
       return 1
 
-    state_file = "%s-state.json" % self._config[PERSISTFILE_BASENAME]
+    state_file = "%s-state.json" % self._config["PERSISTFILE_BASENAME"]
     if options.step == 0 and os.path.exists(state_file):
       os.remove(state_file)
 
@@ -599,7 +699,7 @@
                             options, self._side_effect_handler))
     for step in steps[options.step:]:
       if step.Run():
-        return 1
+        return 0
     return 0
 
   def Run(self, args=None):
diff --git a/tools/push-to-trunk/git_recipes.py b/tools/push-to-trunk/git_recipes.py
index 8c1e314..0f8fcef 100644
--- a/tools/push-to-trunk/git_recipes.py
+++ b/tools/push-to-trunk/git_recipes.py
@@ -28,6 +28,51 @@
 
 import re
 
+SHA1_RE = re.compile('^[a-fA-F0-9]{40}$')
+ROLL_DEPS_GIT_SVN_ID_RE = re.compile('^git-svn-id: .*@([0-9]+) .*$')
+
+# Regular expression that matches a single commit footer line.
+COMMIT_FOOTER_ENTRY_RE = re.compile(r'([^:]+):\s+(.+)')
+
+# Footer metadata key for commit position.
+COMMIT_POSITION_FOOTER_KEY = 'Cr-Commit-Position'
+
+# Regular expression to parse a commit position
+COMMIT_POSITION_RE = re.compile(r'(.+)@\{#(\d+)\}')
+
+# Key for the 'git-svn' ID metadata commit footer entry.
+GIT_SVN_ID_FOOTER_KEY = 'git-svn-id'
+
+# e.g., git-svn-id: https://v8.googlecode.com/svn/trunk@23117
+#     ce2b1a6d-e550-0410-aec6-3dcde31c8c00
+GIT_SVN_ID_RE = re.compile(r'((?:\w+)://[^@]+)@(\d+)\s+(?:[a-zA-Z0-9\-]+)')
+
+
+# Copied from bot_update.py.
+def GetCommitMessageFooterMap(message):
+  """Returns: (dict) A dictionary of commit message footer entries.
+  """
+  footers = {}
+
+  # Extract the lines in the footer block.
+  lines = []
+  for line in message.strip().splitlines():
+    line = line.strip()
+    if len(line) == 0:
+      del(lines[:])
+      continue
+    lines.append(line)
+
+  # Parse the footer
+  for line in lines:
+    m = COMMIT_FOOTER_ENTRY_RE.match(line)
+    if not m:
+      # If any single line isn't valid, the entire footer is invalid.
+      footers.clear()
+      return footers
+    footers[m.group(1)] = m.group(2).strip()
+  return footers
+
 
 class GitFailedException(Exception):
   pass
@@ -49,51 +94,55 @@
 
 
 class GitRecipesMixin(object):
-  def GitIsWorkdirClean(self):
-    return self.Git("status -s -uno").strip() == ""
+  def GitIsWorkdirClean(self, **kwargs):
+    return self.Git("status -s -uno", **kwargs).strip() == ""
 
   @Strip
-  def GitBranch(self):
-    return self.Git("branch")
+  def GitBranch(self, **kwargs):
+    return self.Git("branch", **kwargs)
 
-  def GitCreateBranch(self, name, branch=""):
+  def GitCreateBranch(self, name, branch="", **kwargs):
     assert name
-    self.Git(MakeArgs(["checkout -b", name, branch]))
+    self.Git(MakeArgs(["checkout -b", name, branch]), **kwargs)
 
-  def GitDeleteBranch(self, name):
+  def GitDeleteBranch(self, name, **kwargs):
     assert name
-    self.Git(MakeArgs(["branch -D", name]))
+    self.Git(MakeArgs(["branch -D", name]), **kwargs)
 
-  def GitReset(self, name):
+  def GitReset(self, name, **kwargs):
     assert name
-    self.Git(MakeArgs(["reset --hard", name]))
+    self.Git(MakeArgs(["reset --hard", name]), **kwargs)
 
-  def GitRemotes(self):
-    return map(str.strip, self.Git(MakeArgs(["branch -r"])).splitlines())
+  def GitStash(self, **kwargs):
+    self.Git(MakeArgs(["stash"]), **kwargs)
 
-  def GitCheckout(self, name):
+  def GitRemotes(self, **kwargs):
+    return map(str.strip,
+               self.Git(MakeArgs(["branch -r"]), **kwargs).splitlines())
+
+  def GitCheckout(self, name, **kwargs):
     assert name
-    self.Git(MakeArgs(["checkout -f", name]))
+    self.Git(MakeArgs(["checkout -f", name]), **kwargs)
 
-  def GitCheckoutFile(self, name, branch_or_hash):
+  def GitCheckoutFile(self, name, branch_or_hash, **kwargs):
     assert name
     assert branch_or_hash
-    self.Git(MakeArgs(["checkout -f", branch_or_hash, "--", name]))
+    self.Git(MakeArgs(["checkout -f", branch_or_hash, "--", name]), **kwargs)
 
-  def GitCheckoutFileSafe(self, name, branch_or_hash):
+  def GitCheckoutFileSafe(self, name, branch_or_hash, **kwargs):
     try:
-      self.GitCheckoutFile(name, branch_or_hash)
+      self.GitCheckoutFile(name, branch_or_hash, **kwargs)
     except GitFailedException:  # pragma: no cover
       # The file doesn't exist in that revision.
       return False
     return True
 
-  def GitChangedFiles(self, git_hash):
+  def GitChangedFiles(self, git_hash, **kwargs):
     assert git_hash
     try:
       files = self.Git(MakeArgs(["diff --name-only",
                                  git_hash,
-                                 "%s^" % git_hash]))
+                                 "%s^" % git_hash]), **kwargs)
       return map(str.strip, files.splitlines())
     except GitFailedException:  # pragma: no cover
       # Git fails using "^" at branch roots.
@@ -101,15 +150,15 @@
 
 
   @Strip
-  def GitCurrentBranch(self):
-    for line in self.Git("status -s -b -uno").strip().splitlines():
+  def GitCurrentBranch(self, **kwargs):
+    for line in self.Git("status -s -b -uno", **kwargs).strip().splitlines():
       match = re.match(r"^## (.+)", line)
       if match: return match.group(1)
     raise Exception("Couldn't find curent branch.")  # pragma: no cover
 
   @Strip
   def GitLog(self, n=0, format="", grep="", git_hash="", parent_hash="",
-             branch="", reverse=False):
+             branch="", reverse=False, **kwargs):
     assert not (git_hash and parent_hash)
     args = ["log"]
     if n > 0:
@@ -125,26 +174,27 @@
     if parent_hash:
       args.append("%s^" % parent_hash)
     args.append(branch)
-    return self.Git(MakeArgs(args))
+    return self.Git(MakeArgs(args), **kwargs)
 
-  def GitGetPatch(self, git_hash):
+  def GitGetPatch(self, git_hash, **kwargs):
     assert git_hash
-    return self.Git(MakeArgs(["log", "-1", "-p", git_hash]))
+    return self.Git(MakeArgs(["log", "-1", "-p", git_hash]), **kwargs)
 
   # TODO(machenbach): Unused? Remove.
-  def GitAdd(self, name):
+  def GitAdd(self, name, **kwargs):
     assert name
-    self.Git(MakeArgs(["add", Quoted(name)]))
+    self.Git(MakeArgs(["add", Quoted(name)]), **kwargs)
 
-  def GitApplyPatch(self, patch_file, reverse=False):
+  def GitApplyPatch(self, patch_file, reverse=False, **kwargs):
     assert patch_file
     args = ["apply --index --reject"]
     if reverse:
       args.append("--reverse")
     args.append(Quoted(patch_file))
-    self.Git(MakeArgs(args))
+    self.Git(MakeArgs(args), **kwargs)
 
-  def GitUpload(self, reviewer="", author="", force=False):
+  def GitUpload(self, reviewer="", author="", force=False, cq=False,
+                bypass_hooks=False, **kwargs):
     args = ["cl upload --send-mail"]
     if author:
       args += ["--email", Quoted(author)]
@@ -152,51 +202,108 @@
       args += ["-r", Quoted(reviewer)]
     if force:
       args.append("-f")
+    if cq:
+      args.append("--use-commit-queue")
+    if bypass_hooks:
+      args.append("--bypass-hooks")
     # TODO(machenbach): Check output in forced mode. Verify that all required
     # base files were uploaded, if not retry.
-    self.Git(MakeArgs(args), pipe=False)
+    self.Git(MakeArgs(args), pipe=False, **kwargs)
 
-  def GitCommit(self, message="", file_name=""):
+  def GitCommit(self, message="", file_name="", author=None, **kwargs):
     assert message or file_name
     args = ["commit"]
     if file_name:
       args += ["-aF", Quoted(file_name)]
     if message:
       args += ["-am", Quoted(message)]
-    self.Git(MakeArgs(args))
+    if author:
+      args += ["--author", "\"%s <%s>\"" % (author, author)]
+    self.Git(MakeArgs(args), **kwargs)
 
-  def GitPresubmit(self):
-    self.Git("cl presubmit", "PRESUBMIT_TREE_CHECK=\"skip\"")
+  def GitPresubmit(self, **kwargs):
+    self.Git("cl presubmit", "PRESUBMIT_TREE_CHECK=\"skip\"", **kwargs)
 
-  def GitDCommit(self):
-    self.Git("cl dcommit -f --bypass-hooks", retry_on=lambda x: x is None)
+  def GitDCommit(self, **kwargs):
+    self.Git(
+        "cl dcommit -f --bypass-hooks", retry_on=lambda x: x is None, **kwargs)
 
-  def GitDiff(self, loc1, loc2):
-    return self.Git(MakeArgs(["diff", loc1, loc2]))
+  def GitDiff(self, loc1, loc2, **kwargs):
+    return self.Git(MakeArgs(["diff", loc1, loc2]), **kwargs)
 
-  def GitPull(self):
-    self.Git("pull")
+  def GitPull(self, **kwargs):
+    self.Git("pull", **kwargs)
 
-  def GitSVNFetch(self):
-    self.Git("svn fetch")
+  def GitFetchOrigin(self, **kwargs):
+    self.Git("fetch origin", **kwargs)
+
+  def GitConvertToSVNRevision(self, git_hash, **kwargs):
+    result = self.Git(MakeArgs(["rev-list", "-n", "1", git_hash]), **kwargs)
+    if not result or not SHA1_RE.match(result):
+      raise GitFailedException("Git hash %s is unknown." % git_hash)
+    log = self.GitLog(n=1, format="%B", git_hash=git_hash, **kwargs)
+    for line in reversed(log.splitlines()):
+      match = ROLL_DEPS_GIT_SVN_ID_RE.match(line.strip())
+      if match:
+        return match.group(1)
+    raise GitFailedException("Couldn't convert %s to SVN." % git_hash)
+
+  @Strip
+  # Copied from bot_update.py and modified for svn-like numbers only.
+  def GetCommitPositionNumber(self, git_hash, **kwargs):
+    """Dumps the 'git' log for a specific revision and parses out the commit
+    position number.
+
+    If a commit position metadata key is found, its number will be returned.
+
+    Otherwise, we will search for a 'git-svn' metadata entry. If one is found,
+    its SVN revision value is returned.
+    """
+    git_log = self.GitLog(format='%B', n=1, git_hash=git_hash, **kwargs)
+    footer_map = GetCommitMessageFooterMap(git_log)
+
+    # Search for commit position metadata
+    value = footer_map.get(COMMIT_POSITION_FOOTER_KEY)
+    if value:
+      match = COMMIT_POSITION_RE.match(value)
+      if match:
+        return match.group(2)
+
+    # Extract the svn revision from 'git-svn' metadata
+    value = footer_map.get(GIT_SVN_ID_FOOTER_KEY)
+    if value:
+      match = GIT_SVN_ID_RE.match(value)
+      if match:
+        return match.group(2)
+    return None
+
+  ### Git svn stuff
+
+  def GitSVNFetch(self, **kwargs):
+    self.Git("svn fetch", **kwargs)
+
+  def GitSVNRebase(self, **kwargs):
+    self.Git("svn rebase", **kwargs)
 
   # TODO(machenbach): Unused? Remove.
   @Strip
-  def GitSVNLog(self):
-    return self.Git("svn log -1 --oneline")
+  def GitSVNLog(self, **kwargs):
+    return self.Git("svn log -1 --oneline", **kwargs)
 
   @Strip
-  def GitSVNFindGitHash(self, revision, branch=""):
+  def GitSVNFindGitHash(self, revision, branch="", **kwargs):
     assert revision
-    return self.Git(MakeArgs(["svn find-rev", "r%s" % revision, branch]))
+    return self.Git(
+        MakeArgs(["svn find-rev", "r%s" % revision, branch]), **kwargs)
 
   @Strip
-  def GitSVNFindSVNRev(self, git_hash, branch=""):
-    return self.Git(MakeArgs(["svn find-rev", git_hash, branch]))
+  def GitSVNFindSVNRev(self, git_hash, branch="", **kwargs):
+    return self.Git(MakeArgs(["svn find-rev", git_hash, branch]), **kwargs)
 
-  def GitSVNDCommit(self):
-    return self.Git("svn dcommit 2>&1", retry_on=lambda x: x is None)
+  def GitSVNDCommit(self, **kwargs):
+    return self.Git("svn dcommit 2>&1", retry_on=lambda x: x is None, **kwargs)
 
-  def GitSVNTag(self, version):
+  def GitSVNTag(self, version, **kwargs):
     self.Git(("svn tag %s -m \"Tagging version %s\"" % (version, version)),
-             retry_on=lambda x: x is None)
+             retry_on=lambda x: x is None,
+             **kwargs)
diff --git a/tools/push-to-trunk/merge_to_branch.py b/tools/push-to-trunk/merge_to_branch.py
index bd9531f..3fd3450 100755
--- a/tools/push-to-trunk/merge_to_branch.py
+++ b/tools/push-to-trunk/merge_to_branch.py
@@ -32,35 +32,18 @@
 
 from common_includes import *
 
-ALREADY_MERGING_SENTINEL_FILE = "ALREADY_MERGING_SENTINEL_FILE"
-COMMIT_HASHES_FILE = "COMMIT_HASHES_FILE"
-TEMPORARY_PATCH_FILE = "TEMPORARY_PATCH_FILE"
-
-CONFIG = {
-  BRANCHNAME: "prepare-merge",
-  PERSISTFILE_BASENAME: "/tmp/v8-merge-to-branch-tempfile",
-  ALREADY_MERGING_SENTINEL_FILE:
-      "/tmp/v8-merge-to-branch-tempfile-already-merging",
-  DOT_GIT_LOCATION: ".git",
-  VERSION_FILE: "src/version.cc",
-  TEMPORARY_PATCH_FILE: "/tmp/v8-prepare-merge-tempfile-temporary-patch",
-  COMMITMSG_FILE: "/tmp/v8-prepare-merge-tempfile-commitmsg",
-  COMMIT_HASHES_FILE: "/tmp/v8-merge-to-branch-tempfile-PATCH_COMMIT_HASHES",
-}
-
-
 class Preparation(Step):
   MESSAGE = "Preparation."
 
   def RunStep(self):
-    if os.path.exists(self.Config(ALREADY_MERGING_SENTINEL_FILE)):
+    if os.path.exists(self.Config("ALREADY_MERGING_SENTINEL_FILE")):
       if self._options.force:
-        os.remove(self.Config(ALREADY_MERGING_SENTINEL_FILE))
+        os.remove(self.Config("ALREADY_MERGING_SENTINEL_FILE"))
       elif self._options.step == 0:  # pragma: no cover
         self.Die("A merge is already in progress")
-    open(self.Config(ALREADY_MERGING_SENTINEL_FILE), "a").close()
+    open(self.Config("ALREADY_MERGING_SENTINEL_FILE"), "a").close()
 
-    self.InitialEnvironmentChecks()
+    self.InitialEnvironmentChecks(self.default_cwd)
     if self._options.revert_bleeding_edge:
       self["merge_to_branch"] = "bleeding_edge"
     elif self._options.branch:
@@ -76,7 +59,7 @@
   MESSAGE = "Create a fresh branch for the patch."
 
   def RunStep(self):
-    self.GitCreateBranch(self.Config(BRANCHNAME),
+    self.GitCreateBranch(self.Config("BRANCHNAME"),
                          "svn/%s" % self["merge_to_branch"])
 
 
@@ -159,8 +142,8 @@
       print("Applying patch for %s to %s..."
             % (commit_hash, self["merge_to_branch"]))
       patch = self.GitGetPatch(commit_hash)
-      TextToFile(patch, self.Config(TEMPORARY_PATCH_FILE))
-      self.ApplyPatch(self.Config(TEMPORARY_PATCH_FILE), self._options.revert)
+      TextToFile(patch, self.Config("TEMPORARY_PATCH_FILE"))
+      self.ApplyPatch(self.Config("TEMPORARY_PATCH_FILE"), self._options.revert)
     if self._options.patch:
       self.ApplyPatch(self._options.patch, self._options.revert)
 
@@ -185,14 +168,14 @@
     if self.Confirm("Automatically increment PATCH_LEVEL? (Saying 'n' will "
                     "fire up your EDITOR on %s so you can make arbitrary "
                     "changes. When you're done, save the file and exit your "
-                    "EDITOR.)" % self.Config(VERSION_FILE)):
-      text = FileToText(self.Config(VERSION_FILE))
+                    "EDITOR.)" % VERSION_FILE):
+      text = FileToText(os.path.join(self.default_cwd, VERSION_FILE))
       text = MSub(r"(?<=#define PATCH_LEVEL)(?P<space>\s+)\d*$",
                   r"\g<space>%s" % new_patch,
                   text)
-      TextToFile(text, self.Config(VERSION_FILE))
+      TextToFile(text, os.path.join(self.default_cwd, VERSION_FILE))
     else:
-      self.Editor(self.Config(VERSION_FILE))
+      self.Editor(os.path.join(self.default_cwd, VERSION_FILE))
     self.ReadAndPersistVersion("new_")
     self["version"] = "%s.%s.%s.%s" % (self["new_major"],
                                        self["new_minor"],
@@ -215,15 +198,15 @@
       title = ("Version %s (merged %s)"
                % (self["version"], self["revision_list"]))
     self["new_commit_msg"] = "%s\n\n%s" % (title, self["new_commit_msg"])
-    TextToFile(self["new_commit_msg"], self.Config(COMMITMSG_FILE))
-    self.GitCommit(file_name=self.Config(COMMITMSG_FILE))
+    TextToFile(self["new_commit_msg"], self.Config("COMMITMSG_FILE"))
+    self.GitCommit(file_name=self.Config("COMMITMSG_FILE"))
 
 
 class CommitRepository(Step):
   MESSAGE = "Commit to the repository."
 
   def RunStep(self):
-    self.GitCheckout(self.Config(BRANCHNAME))
+    self.GitCheckout(self.Config("BRANCHNAME"))
     self.WaitForLGTM()
     self.GitPresubmit()
     self.GitDCommit()
@@ -309,8 +292,19 @@
       if not options.message:
         print "You must specify a merge comment if no patches are specified"
         return False
+    options.bypass_upload_hooks = True
     return True
 
+  def _Config(self):
+    return {
+      "BRANCHNAME": "prepare-merge",
+      "PERSISTFILE_BASENAME": "/tmp/v8-merge-to-branch-tempfile",
+      "ALREADY_MERGING_SENTINEL_FILE":
+          "/tmp/v8-merge-to-branch-tempfile-already-merging",
+      "TEMPORARY_PATCH_FILE": "/tmp/v8-prepare-merge-tempfile-temporary-patch",
+      "COMMITMSG_FILE": "/tmp/v8-prepare-merge-tempfile-commitmsg",
+    }
+
   def _Steps(self):
     return [
       Preparation,
@@ -330,4 +324,4 @@
 
 
 if __name__ == "__main__":  # pragma: no cover
-  sys.exit(MergeToBranch(CONFIG).Run())
+  sys.exit(MergeToBranch().Run())
diff --git a/tools/push-to-trunk/push_to_trunk.py b/tools/push-to-trunk/push_to_trunk.py
index c317bdc..8a9629e 100755
--- a/tools/push-to-trunk/push_to_trunk.py
+++ b/tools/push-to-trunk/push_to_trunk.py
@@ -27,50 +27,36 @@
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 import argparse
+import os
 import sys
 import tempfile
 import urllib2
 
 from common_includes import *
 
-TRUNKBRANCH = "TRUNKBRANCH"
-
-CONFIG = {
-  BRANCHNAME: "prepare-push",
-  TRUNKBRANCH: "trunk-push",
-  PERSISTFILE_BASENAME: "/tmp/v8-push-to-trunk-tempfile",
-  DOT_GIT_LOCATION: ".git",
-  VERSION_FILE: "src/version.cc",
-  CHANGELOG_FILE: "ChangeLog",
-  CHANGELOG_ENTRY_FILE: "/tmp/v8-push-to-trunk-tempfile-changelog-entry",
-  PATCH_FILE: "/tmp/v8-push-to-trunk-tempfile-patch-file",
-  COMMITMSG_FILE: "/tmp/v8-push-to-trunk-tempfile-commitmsg",
-}
-
 PUSH_MESSAGE_SUFFIX = " (based on bleeding_edge revision r%d)"
 PUSH_MESSAGE_RE = re.compile(r".* \(based on bleeding_edge revision r(\d+)\)$")
 
-
 class Preparation(Step):
   MESSAGE = "Preparation."
 
   def RunStep(self):
-    self.InitialEnvironmentChecks()
+    self.InitialEnvironmentChecks(self.default_cwd)
     self.CommonPrepare()
 
-    if(self["current_branch"] == self.Config(TRUNKBRANCH)
-       or self["current_branch"] == self.Config(BRANCHNAME)):
+    if(self["current_branch"] == self.Config("TRUNKBRANCH")
+       or self["current_branch"] == self.Config("BRANCHNAME")):
       print "Warning: Script started on branch %s" % self["current_branch"]
 
     self.PrepareBranch()
-    self.DeleteBranch(self.Config(TRUNKBRANCH))
+    self.DeleteBranch(self.Config("TRUNKBRANCH"))
 
 
 class FreshBranch(Step):
   MESSAGE = "Create a fresh branch."
 
   def RunStep(self):
-    self.GitCreateBranch(self.Config(BRANCHNAME), "svn/bleeding_edge")
+    self.GitCreateBranch(self.Config("BRANCHNAME"), "svn/bleeding_edge")
 
 
 class PreparePushRevision(Step):
@@ -124,29 +110,59 @@
     self["last_push_bleeding_edge"] = last_push_bleeding_edge
 
 
+# TODO(machenbach): Code similarities with bump_up_version.py. Merge after
+# turning this script into a pure git script.
+class GetCurrentBleedingEdgeVersion(Step):
+  MESSAGE = "Get latest bleeding edge version."
+
+  def RunStep(self):
+    self.GitCheckoutFile(VERSION_FILE, "svn/bleeding_edge")
+
+    # Store latest version.
+    self.ReadAndPersistVersion("latest_")
+    self["latest_version"] = self.ArrayToVersion("latest_")
+    print "Bleeding edge version: %s" % self["latest_version"]
+
+
 class IncrementVersion(Step):
   MESSAGE = "Increment version number."
 
   def RunStep(self):
     # Retrieve current version from last trunk push.
-    self.GitCheckoutFile(self.Config(VERSION_FILE), self["last_push_trunk"])
+    self.GitCheckoutFile(VERSION_FILE, self["last_push_trunk"])
     self.ReadAndPersistVersion()
+    self["trunk_version"] = self.ArrayToVersion("")
+
+    if self["latest_build"] == "9999":  # pragma: no cover
+      # If version control on bleeding edge was switched off, just use the last
+      # trunk version.
+      self["latest_version"] = self["trunk_version"]
+
+    if SortingKey(self["trunk_version"]) < SortingKey(self["latest_version"]):
+      # If the version on bleeding_edge is newer than on trunk, use it.
+      self.GitCheckoutFile(VERSION_FILE, "svn/bleeding_edge")
+      self.ReadAndPersistVersion()
 
     if self.Confirm(("Automatically increment BUILD_NUMBER? (Saying 'n' will "
                      "fire up your EDITOR on %s so you can make arbitrary "
                      "changes. When you're done, save the file and exit your "
-                     "EDITOR.)" % self.Config(VERSION_FILE))):
-      text = FileToText(self.Config(VERSION_FILE))
+                     "EDITOR.)" % VERSION_FILE)):
+
+      text = FileToText(os.path.join(self.default_cwd, VERSION_FILE))
       text = MSub(r"(?<=#define BUILD_NUMBER)(?P<space>\s+)\d*$",
                   r"\g<space>%s" % str(int(self["build"]) + 1),
                   text)
-      TextToFile(text, self.Config(VERSION_FILE))
+      TextToFile(text, os.path.join(self.default_cwd, VERSION_FILE))
     else:
-      self.Editor(self.Config(VERSION_FILE))
+      self.Editor(os.path.join(self.default_cwd, VERSION_FILE))
 
     # Variables prefixed with 'new_' contain the new version numbers for the
     # ongoing trunk push.
     self.ReadAndPersistVersion("new_")
+
+    # Make sure patch level is 0 in a new push.
+    self["new_patch"] = "0"
+
     self["version"] = "%s.%s.%s" % (self["new_major"],
                                     self["new_minor"],
                                     self["new_build"])
@@ -176,7 +192,7 @@
   def RunStep(self):
     self["date"] = self.GetDate()
     output = "%s: Version %s\n\n" % (self["date"], self["version"])
-    TextToFile(output, self.Config(CHANGELOG_ENTRY_FILE))
+    TextToFile(output, self.Config("CHANGELOG_ENTRY_FILE"))
     commits = self.GitLog(format="%H",
         git_hash="%s..%s" % (self["last_push_bleeding_edge"],
                              self["push_hash"]))
@@ -192,17 +208,17 @@
 
     # Auto-format commit messages.
     body = MakeChangeLogBody(commit_messages, auto_format=True)
-    AppendToFile(body, self.Config(CHANGELOG_ENTRY_FILE))
+    AppendToFile(body, self.Config("CHANGELOG_ENTRY_FILE"))
 
     msg = ("        Performance and stability improvements on all platforms."
            "\n#\n# The change log above is auto-generated. Please review if "
            "all relevant\n# commit messages from the list below are included."
            "\n# All lines starting with # will be stripped.\n#\n")
-    AppendToFile(msg, self.Config(CHANGELOG_ENTRY_FILE))
+    AppendToFile(msg, self.Config("CHANGELOG_ENTRY_FILE"))
 
     # Include unformatted commit messages as a reference in a comment.
     comment_body = MakeComment(MakeChangeLogBody(commit_messages))
-    AppendToFile(comment_body, self.Config(CHANGELOG_ENTRY_FILE))
+    AppendToFile(comment_body, self.Config("CHANGELOG_ENTRY_FILE"))
 
 
 class EditChangeLog(Step):
@@ -213,10 +229,10 @@
            "entry, then edit its contents to your liking. When you're done, "
            "save the file and exit your EDITOR. ")
     self.ReadLine(default="")
-    self.Editor(self.Config(CHANGELOG_ENTRY_FILE))
+    self.Editor(self.Config("CHANGELOG_ENTRY_FILE"))
 
     # Strip comments and reformat with correct indentation.
-    changelog_entry = FileToText(self.Config(CHANGELOG_ENTRY_FILE)).rstrip()
+    changelog_entry = FileToText(self.Config("CHANGELOG_ENTRY_FILE")).rstrip()
     changelog_entry = StripComments(changelog_entry)
     changelog_entry = "\n".join(map(Fill80, changelog_entry.splitlines()))
     changelog_entry = changelog_entry.lstrip()
@@ -225,7 +241,7 @@
       self.Die("Empty ChangeLog entry.")
 
     # Safe new change log for adding it later to the trunk patch.
-    TextToFile(changelog_entry, self.Config(CHANGELOG_ENTRY_FILE))
+    TextToFile(changelog_entry, self.Config("CHANGELOG_ENTRY_FILE"))
 
 
 class StragglerCommits(Step):
@@ -244,10 +260,10 @@
     # Instead of relying on "git rebase -i", we'll just create a diff, because
     # that's easier to automate.
     TextToFile(self.GitDiff("svn/trunk", self["push_hash"]),
-               self.Config(PATCH_FILE))
+               self.Config("PATCH_FILE"))
 
     # Convert the ChangeLog entry to commit message format.
-    text = FileToText(self.Config(CHANGELOG_ENTRY_FILE))
+    text = FileToText(self.Config("CHANGELOG_ENTRY_FILE"))
 
     # Remove date and trailing white space.
     text = re.sub(r"^%s: " % self["date"], "", text.rstrip())
@@ -267,22 +283,22 @@
 
     if not text:  # pragma: no cover
       self.Die("Commit message editing failed.")
-    TextToFile(text, self.Config(COMMITMSG_FILE))
+    TextToFile(text, self.Config("COMMITMSG_FILE"))
 
 
 class NewBranch(Step):
   MESSAGE = "Create a new branch from trunk."
 
   def RunStep(self):
-    self.GitCreateBranch(self.Config(TRUNKBRANCH), "svn/trunk")
+    self.GitCreateBranch(self.Config("TRUNKBRANCH"), "svn/trunk")
 
 
 class ApplyChanges(Step):
   MESSAGE = "Apply squashed changes."
 
   def RunStep(self):
-    self.ApplyPatch(self.Config(PATCH_FILE))
-    Command("rm", "-f %s*" % self.Config(PATCH_FILE))
+    self.ApplyPatch(self.Config("PATCH_FILE"))
+    os.remove(self.Config("PATCH_FILE"))
 
 
 class AddChangeLog(Step):
@@ -292,12 +308,12 @@
     # The change log has been modified by the patch. Reset it to the version
     # on trunk and apply the exact changes determined by this PrepareChangeLog
     # step above.
-    self.GitCheckoutFile(self.Config(CHANGELOG_FILE), "svn/trunk")
-    changelog_entry = FileToText(self.Config(CHANGELOG_ENTRY_FILE))
-    old_change_log = FileToText(self.Config(CHANGELOG_FILE))
+    self.GitCheckoutFile(self.Config("CHANGELOG_FILE"), "svn/trunk")
+    changelog_entry = FileToText(self.Config("CHANGELOG_ENTRY_FILE"))
+    old_change_log = FileToText(self.Config("CHANGELOG_FILE"))
     new_change_log = "%s\n\n\n%s" % (changelog_entry, old_change_log)
-    TextToFile(new_change_log, self.Config(CHANGELOG_FILE))
-    os.remove(self.Config(CHANGELOG_ENTRY_FILE))
+    TextToFile(new_change_log, self.Config("CHANGELOG_FILE"))
+    os.remove(self.Config("CHANGELOG_ENTRY_FILE"))
 
 
 class SetVersion(Step):
@@ -306,29 +322,16 @@
   def RunStep(self):
     # The version file has been modified by the patch. Reset it to the version
     # on trunk and apply the correct version.
-    self.GitCheckoutFile(self.Config(VERSION_FILE), "svn/trunk")
-    output = ""
-    for line in FileToText(self.Config(VERSION_FILE)).splitlines():
-      if line.startswith("#define MAJOR_VERSION"):
-        line = re.sub("\d+$", self["new_major"], line)
-      elif line.startswith("#define MINOR_VERSION"):
-        line = re.sub("\d+$", self["new_minor"], line)
-      elif line.startswith("#define BUILD_NUMBER"):
-        line = re.sub("\d+$", self["new_build"], line)
-      elif line.startswith("#define PATCH_LEVEL"):
-        line = re.sub("\d+$", "0", line)
-      elif line.startswith("#define IS_CANDIDATE_VERSION"):
-        line = re.sub("\d+$", "0", line)
-      output += "%s\n" % line
-    TextToFile(output, self.Config(VERSION_FILE))
+    self.GitCheckoutFile(VERSION_FILE, "svn/trunk")
+    self.SetVersion(os.path.join(self.default_cwd, VERSION_FILE), "new_")
 
 
 class CommitTrunk(Step):
   MESSAGE = "Commit to local trunk branch."
 
   def RunStep(self):
-    self.GitCommit(file_name = self.Config(COMMITMSG_FILE))
-    Command("rm", "-f %s*" % self.Config(COMMITMSG_FILE))
+    self.GitCommit(file_name = self.Config("COMMITMSG_FILE"))
+    os.remove(self.Config("COMMITMSG_FILE"))
 
 
 class SanityCheck(Step):
@@ -339,7 +342,7 @@
     # prepare push process.
     if not self.Confirm("Please check if your local checkout is sane: Inspect "
         "%s, compile, run tests. Do you want to commit this new trunk "
-        "revision to the repository?" % self.Config(VERSION_FILE)):
+        "revision to the repository?" % VERSION_FILE):
       self.Die("Execution canceled.")  # pragma: no cover
 
 
@@ -385,8 +388,8 @@
     print "%s\ttrunk\t%s" % (self["version"], self["trunk_revision"])
 
     self.CommonCleanup()
-    if self.Config(TRUNKBRANCH) != self["current_branch"]:
-      self.GitDeleteBranch(self.Config(TRUNKBRANCH))
+    if self.Config("TRUNKBRANCH") != self["current_branch"]:
+      self.GitDeleteBranch(self.Config("TRUNKBRANCH"))
 
 
 class PushToTrunk(ScriptsBase):
@@ -422,12 +425,24 @@
     options.tbr_commit = not options.manual
     return True
 
+  def _Config(self):
+    return {
+      "BRANCHNAME": "prepare-push",
+      "TRUNKBRANCH": "trunk-push",
+      "PERSISTFILE_BASENAME": "/tmp/v8-push-to-trunk-tempfile",
+      "CHANGELOG_FILE": "ChangeLog",
+      "CHANGELOG_ENTRY_FILE": "/tmp/v8-push-to-trunk-tempfile-changelog-entry",
+      "PATCH_FILE": "/tmp/v8-push-to-trunk-tempfile-patch-file",
+      "COMMITMSG_FILE": "/tmp/v8-push-to-trunk-tempfile-commitmsg",
+    }
+
   def _Steps(self):
     return [
       Preparation,
       FreshBranch,
       PreparePushRevision,
       DetectLastPush,
+      GetCurrentBleedingEdgeVersion,
       IncrementVersion,
       PrepareChangeLog,
       EditChangeLog,
@@ -446,4 +461,4 @@
 
 
 if __name__ == "__main__":  # pragma: no cover
-  sys.exit(PushToTrunk(CONFIG).Run())
+  sys.exit(PushToTrunk().Run())
diff --git a/tools/push-to-trunk/releases.py b/tools/push-to-trunk/releases.py
index 2a22b91..1d26198 100755
--- a/tools/push-to-trunk/releases.py
+++ b/tools/push-to-trunk/releases.py
@@ -20,15 +20,9 @@
 
 from common_includes import *
 
-DEPS_FILE = "DEPS_FILE"
-CHROMIUM = "CHROMIUM"
-
 CONFIG = {
-  BRANCHNAME: "retrieve-v8-releases",
-  PERSISTFILE_BASENAME: "/tmp/v8-releases-tempfile",
-  DOT_GIT_LOCATION: ".git",
-  VERSION_FILE: "src/version.cc",
-  DEPS_FILE: "DEPS",
+  "BRANCHNAME": "retrieve-v8-releases",
+  "PERSISTFILE_BASENAME": "/tmp/v8-releases-tempfile",
 }
 
 # Expression for retrieving the bleeding edge revision from a commit message.
@@ -47,20 +41,15 @@
 
 # Expression with three versions (historical) for extracting the v8 revision
 # from the chromium DEPS file.
-DEPS_RE = re.compile(r'^\s*(?:"v8_revision": "'
-                      '|\(Var\("googlecode_url"\) % "v8"\) \+ "\/trunk@'
-                      '|"http\:\/\/v8\.googlecode\.com\/svn\/trunk@)'
-                      '([0-9]+)".*$', re.M)
+DEPS_RE = re.compile(r"""^\s*(?:["']v8_revision["']: ["']"""
+                     """|\(Var\("googlecode_url"\) % "v8"\) \+ "\/trunk@"""
+                     """|"http\:\/\/v8\.googlecode\.com\/svn\/trunk@)"""
+                     """([^"']+)["'].*$""", re.M)
 
-
-def SortingKey(version):
-  """Key for sorting version number strings: '3.11' > '3.2.1.1'"""
-  version_keys = map(int, version.split("."))
-  # Fill up to full version numbers to normalize comparison.
-  while len(version_keys) < 4:
-    version_keys.append(0)
-  # Fill digits.
-  return ".".join(map("{0:03d}".format, version_keys))
+# Expression to pick tag and revision for bleeding edge tags. To be used with
+# output of 'svn log'.
+BLEEDING_EDGE_TAGS_RE = re.compile(
+    r"A \/tags\/([^\s]+) \(from \/branches\/bleeding_edge\:(\d+)\)")
 
 
 def SortBranches(branches):
@@ -150,24 +139,14 @@
         patches = "-%s" % patches
     return patches
 
-  def GetRelease(self, git_hash, branch):
-    self.ReadAndPersistVersion()
-    base_version = [self["major"], self["minor"], self["build"]]
-    version = ".".join(base_version)
-    body = self.GitLog(n=1, format="%B", git_hash=git_hash)
-
-    patches = ""
-    if self["patch"] != "0":
-      version += ".%s" % self["patch"]
-      patches = self.GetMergedPatches(body)
-
-    title = self.GitLog(n=1, format="%s", git_hash=git_hash)
+  def GetReleaseDict(
+      self, git_hash, bleeding_edge_rev, branch, version, patches, cl_body):
     revision = self.GitSVNFindSVNRev(git_hash)
     return {
       # The SVN revision on the branch.
       "revision": revision,
       # The SVN revision on bleeding edge (only for newer trunk pushes).
-      "bleeding_edge": self.GetBleedingEdgeFromPush(title),
+      "bleeding_edge": bleeding_edge_rev,
       # The branch name.
       "branch": branch,
       # The version for displaying in the form 3.26.3 or 3.26.3.12.
@@ -182,22 +161,53 @@
       "chromium_branch": "",
       # Link to the CL on code review. Trunk pushes are not uploaded, so this
       # field will be populated below with the recent roll CL link.
-      "review_link": MatchSafe(REVIEW_LINK_RE.search(body)),
+      "review_link": MatchSafe(REVIEW_LINK_RE.search(cl_body)),
       # Link to the commit message on google code.
       "revision_link": ("https://code.google.com/p/v8/source/detail?r=%s"
                         % revision),
-    }, self["patch"]
+    }
+
+  def GetRelease(self, git_hash, branch):
+    self.ReadAndPersistVersion()
+    base_version = [self["major"], self["minor"], self["build"]]
+    version = ".".join(base_version)
+    body = self.GitLog(n=1, format="%B", git_hash=git_hash)
+
+    patches = ""
+    if self["patch"] != "0":
+      version += ".%s" % self["patch"]
+      patches = self.GetMergedPatches(body)
+
+    title = self.GitLog(n=1, format="%s", git_hash=git_hash)
+    return self.GetReleaseDict(
+        git_hash, self.GetBleedingEdgeFromPush(title), branch, version,
+        patches, body), self["patch"]
+
+  def GetReleasesFromBleedingEdge(self):
+    tag_text = self.SVN("log https://v8.googlecode.com/svn/tags -v --limit 20")
+    releases = []
+    for (tag, revision) in re.findall(BLEEDING_EDGE_TAGS_RE, tag_text):
+      git_hash = self.GitSVNFindGitHash(revision)
+
+      # Add bleeding edge release. It does not contain patches or a code
+      # review link, as tags are not uploaded.
+      releases.append(self.GetReleaseDict(
+        git_hash, revision, "bleeding_edge", tag, "", ""))
+    return releases
 
   def GetReleasesFromBranch(self, branch):
     self.GitReset("svn/%s" % branch)
+    if branch == 'bleeding_edge':
+      return self.GetReleasesFromBleedingEdge()
+
     releases = []
     try:
       for git_hash in self.GitLog(format="%H").splitlines():
-        if self._config[VERSION_FILE] not in self.GitChangedFiles(git_hash):
+        if VERSION_FILE not in self.GitChangedFiles(git_hash):
           continue
         if self.ExceedsMax(releases):
           break  # pragma: no cover
-        if not self.GitCheckoutFileSafe(self._config[VERSION_FILE], git_hash):
+        if not self.GitCheckoutFileSafe(VERSION_FILE, git_hash):
           break  # pragma: no cover
 
         release, patch_level = self.GetRelease(git_hash, branch)
@@ -215,11 +225,11 @@
       pass
 
     # Clean up checked-out version file.
-    self.GitCheckoutFileSafe(self._config[VERSION_FILE], "HEAD")
+    self.GitCheckoutFileSafe(VERSION_FILE, "HEAD")
     return releases
 
   def RunStep(self):
-    self.GitCreateBranch(self._config[BRANCHNAME])
+    self.GitCreateBranch(self._config["BRANCHNAME"])
     # Get relevant remote branches, e.g. "svn/3.25".
     branches = filter(lambda s: re.match(r"^svn/\d+\.\d+$", s),
                       self.GitRemotes())
@@ -235,14 +245,16 @@
       releases += self.GetReleasesFromBranch(stable)
       releases += self.GetReleasesFromBranch(beta)
       releases += self.GetReleasesFromBranch("trunk")
+      releases += self.GetReleasesFromBranch("bleeding_edge")
     elif self._options.branch == 'all':  # pragma: no cover
       # Retrieve the full release history.
       for branch in branches:
         releases += self.GetReleasesFromBranch(branch)
       releases += self.GetReleasesFromBranch("trunk")
+      releases += self.GetReleasesFromBranch("bleeding_edge")
     else:  # pragma: no cover
       # Retrieve history for a specified branch.
-      assert self._options.branch in branches + ["trunk"]
+      assert self._options.branch in branches + ["trunk", "bleeding_edge"]
       releases += self.GetReleasesFromBranch(self._options.branch)
 
     self["releases"] = sorted(releases,
@@ -250,68 +262,68 @@
                               reverse=True)
 
 
-# TODO(machenbach): Parts of the Chromium setup are c/p from the chromium_roll
-# script -> unify.
-class CheckChromium(Step):
-  MESSAGE = "Check the chromium checkout."
-
-  def Run(self):
-    self["chrome_path"] = self._options.chromium
-
-
 class SwitchChromium(Step):
   MESSAGE = "Switch to Chromium checkout."
-  REQUIRES = "chrome_path"
 
   def RunStep(self):
-    self["v8_path"] = os.getcwd()
-    os.chdir(self["chrome_path"])
+    cwd = self._options.chromium
     # Check for a clean workdir.
-    if not self.GitIsWorkdirClean():  # pragma: no cover
+    if not self.GitIsWorkdirClean(cwd=cwd):  # pragma: no cover
       self.Die("Workspace is not clean. Please commit or undo your changes.")
     # Assert that the DEPS file is there.
-    if not os.path.exists(self.Config(DEPS_FILE)):  # pragma: no cover
+    if not os.path.exists(os.path.join(cwd, "DEPS")):  # pragma: no cover
       self.Die("DEPS file not present.")
 
 
 class UpdateChromiumCheckout(Step):
   MESSAGE = "Update the checkout and create a new branch."
-  REQUIRES = "chrome_path"
 
   def RunStep(self):
-    os.chdir(self["chrome_path"])
-    self.GitCheckout("master")
-    self.GitPull()
-    self.GitCreateBranch(self.Config(BRANCHNAME))
+    cwd = self._options.chromium
+    self.GitCheckout("master", cwd=cwd)
+    self.GitPull(cwd=cwd)
+    self.GitCreateBranch(self.Config("BRANCHNAME"), cwd=cwd)
+
+
+def ConvertToCommitNumber(step, revision):
+  # Simple check for git hashes.
+  if revision.isdigit() and len(revision) < 8:
+    return revision
+  return step.GitConvertToSVNRevision(
+      revision, cwd=os.path.join(step._options.chromium, "v8"))
 
 
 class RetrieveChromiumV8Releases(Step):
   MESSAGE = "Retrieve V8 releases from Chromium DEPS."
-  REQUIRES = "chrome_path"
 
   def RunStep(self):
-    os.chdir(self["chrome_path"])
-
-    trunk_releases = filter(lambda r: r["branch"] == "trunk", self["releases"])
-    if not trunk_releases:  # pragma: no cover
-      print "No trunk releases detected. Skipping chromium history."
+    cwd = self._options.chromium
+    releases = filter(
+        lambda r: r["branch"] in ["trunk", "bleeding_edge"], self["releases"])
+    if not releases:  # pragma: no cover
+      print "No releases detected. Skipping chromium history."
       return True
 
-    oldest_v8_rev = int(trunk_releases[-1]["revision"])
+    # Update v8 checkout in chromium.
+    self.GitFetchOrigin(cwd=os.path.join(cwd, "v8"))
+
+    oldest_v8_rev = int(releases[-1]["revision"])
 
     cr_releases = []
     try:
-      for git_hash in self.GitLog(format="%H", grep="V8").splitlines():
-        if self._config[DEPS_FILE] not in self.GitChangedFiles(git_hash):
+      for git_hash in self.GitLog(
+          format="%H", grep="V8", cwd=cwd).splitlines():
+        if "DEPS" not in self.GitChangedFiles(git_hash, cwd=cwd):
           continue
-        if not self.GitCheckoutFileSafe(self._config[DEPS_FILE], git_hash):
+        if not self.GitCheckoutFileSafe("DEPS", git_hash, cwd=cwd):
           break  # pragma: no cover
-        deps = FileToText(self.Config(DEPS_FILE))
+        deps = FileToText(os.path.join(cwd, "DEPS"))
         match = DEPS_RE.search(deps)
         if match:
-          svn_rev = self.GitSVNFindSVNRev(git_hash)
-          v8_rev = match.group(1)
-          cr_releases.append([svn_rev, v8_rev])
+          cr_rev = self.GetCommitPositionNumber(git_hash, cwd=cwd)
+          if cr_rev:
+            v8_rev = ConvertToCommitNumber(self, match.group(1))
+            cr_releases.append([cr_rev, v8_rev])
 
           # Stop after reaching beyond the last v8 revision we want to update.
           # We need a small buffer for possible revert/reland frenzies.
@@ -324,23 +336,21 @@
       pass
 
     # Clean up.
-    self.GitCheckoutFileSafe(self._config[DEPS_FILE], "HEAD")
+    self.GitCheckoutFileSafe("DEPS", "HEAD", cwd=cwd)
 
-    # Add the chromium ranges to the v8 trunk releases.
+    # Add the chromium ranges to the v8 trunk and bleeding_edge releases.
     all_ranges = BuildRevisionRanges(cr_releases)
-    trunk_dict = dict((r["revision"], r) for r in trunk_releases)
+    releases_dict = dict((r["revision"], r) for r in releases)
     for revision, ranges in all_ranges.iteritems():
-      trunk_dict.get(revision, {})["chromium_revision"] = ranges
+      releases_dict.get(revision, {})["chromium_revision"] = ranges
 
 
 # TODO(machenbach): Unify common code with method above.
 class RietrieveChromiumBranches(Step):
   MESSAGE = "Retrieve Chromium branch information."
-  REQUIRES = "chrome_path"
 
   def RunStep(self):
-    os.chdir(self["chrome_path"])
-
+    cwd = self._options.chromium
     trunk_releases = filter(lambda r: r["branch"] == "trunk", self["releases"])
     if not trunk_releases:  # pragma: no cover
       print "No trunk releases detected. Skipping chromium history."
@@ -350,7 +360,7 @@
 
     # Filter out irrelevant branches.
     branches = filter(lambda r: re.match(r"branch-heads/\d+", r),
-                      self.GitRemotes())
+                      self.GitRemotes(cwd=cwd))
 
     # Transform into pure branch numbers.
     branches = map(lambda r: int(re.match(r"branch-heads/(\d+)", r).group(1)),
@@ -361,13 +371,14 @@
     cr_branches = []
     try:
       for branch in branches:
-        if not self.GitCheckoutFileSafe(self._config[DEPS_FILE],
-                                        "branch-heads/%d" % branch):
+        if not self.GitCheckoutFileSafe("DEPS",
+                                        "branch-heads/%d" % branch,
+                                        cwd=cwd):
           break  # pragma: no cover
-        deps = FileToText(self.Config(DEPS_FILE))
+        deps = FileToText(os.path.join(cwd, "DEPS"))
         match = DEPS_RE.search(deps)
         if match:
-          v8_rev = match.group(1)
+          v8_rev = ConvertToCommitNumber(self, match.group(1))
           cr_branches.append([str(branch), v8_rev])
 
           # Stop after reaching beyond the last v8 revision we want to update.
@@ -381,7 +392,7 @@
       pass
 
     # Clean up.
-    self.GitCheckoutFileSafe(self._config[DEPS_FILE], "HEAD")
+    self.GitCheckoutFileSafe("DEPS", "HEAD", cwd=cwd)
 
     # Add the chromium branches to the v8 trunk releases.
     all_ranges = BuildRevisionRanges(cr_branches)
@@ -390,20 +401,12 @@
       trunk_dict.get(revision, {})["chromium_branch"] = ranges
 
 
-class SwitchV8(Step):
-  MESSAGE = "Returning to V8 checkout."
-  REQUIRES = "chrome_path"
-
-  def RunStep(self):
-    self.GitCheckout("master")
-    self.GitDeleteBranch(self.Config(BRANCHNAME))
-    os.chdir(self["v8_path"])
-
-
 class CleanUp(Step):
   MESSAGE = "Clean up."
 
   def RunStep(self):
+    self.GitCheckout("master", cwd=self._options.chromium)
+    self.GitDeleteBranch(self.Config("BRANCHNAME"), cwd=self._options.chromium)
     self.CommonCleanup()
 
 
@@ -444,20 +447,24 @@
   def _ProcessOptions(self, options):  # pragma: no cover
     return True
 
+  def _Config(self):
+    return {
+      "BRANCHNAME": "retrieve-v8-releases",
+      "PERSISTFILE_BASENAME": "/tmp/v8-releases-tempfile",
+    }
+
   def _Steps(self):
     return [
       Preparation,
       RetrieveV8Releases,
-      CheckChromium,
       SwitchChromium,
       UpdateChromiumCheckout,
       RetrieveChromiumV8Releases,
       RietrieveChromiumBranches,
-      SwitchV8,
       CleanUp,
       WriteOutput,
     ]
 
 
 if __name__ == "__main__":  # pragma: no cover
-  sys.exit(Releases(CONFIG).Run())
+  sys.exit(Releases().Run())
diff --git a/tools/push-to-trunk/test_scripts.py b/tools/push-to-trunk/test_scripts.py
index bc79cfd..b0d1c58 100644
--- a/tools/push-to-trunk/test_scripts.py
+++ b/tools/push-to-trunk/test_scripts.py
@@ -27,13 +27,13 @@
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 import os
+import shutil
 import tempfile
 import traceback
 import unittest
 
 import auto_push
 from auto_push import CheckLastPush
-from auto_push import SETTINGS_LOCATION
 import auto_roll
 import common_includes
 from common_includes import *
@@ -42,30 +42,31 @@
 import push_to_trunk
 from push_to_trunk import *
 import chromium_roll
-from chromium_roll import CHROMIUM
-from chromium_roll import DEPS_FILE
 from chromium_roll import ChromiumRoll
 import releases
 from releases import Releases
+import bump_up_version
+from bump_up_version import BumpUpVersion
+from bump_up_version import LastChangeBailout
+from bump_up_version import LKGRVersionUpToDateBailout
+from auto_tag import AutoTag
 
 
 TEST_CONFIG = {
-  BRANCHNAME: "test-prepare-push",
-  TRUNKBRANCH: "test-trunk-push",
-  PERSISTFILE_BASENAME: "/tmp/test-v8-push-to-trunk-tempfile",
-  DOT_GIT_LOCATION: None,
-  VERSION_FILE: None,
-  CHANGELOG_FILE: None,
-  CHANGELOG_ENTRY_FILE: "/tmp/test-v8-push-to-trunk-tempfile-changelog-entry",
-  PATCH_FILE: "/tmp/test-v8-push-to-trunk-tempfile-patch",
-  COMMITMSG_FILE: "/tmp/test-v8-push-to-trunk-tempfile-commitmsg",
-  CHROMIUM: "/tmp/test-v8-push-to-trunk-tempfile-chromium",
-  DEPS_FILE: "/tmp/test-v8-push-to-trunk-tempfile-chromium/DEPS",
-  SETTINGS_LOCATION: None,
-  ALREADY_MERGING_SENTINEL_FILE:
+  "DEFAULT_CWD": None,
+  "BRANCHNAME": "test-prepare-push",
+  "TRUNKBRANCH": "test-trunk-push",
+  "PERSISTFILE_BASENAME": "/tmp/test-v8-push-to-trunk-tempfile",
+  "CHANGELOG_FILE": None,
+  "CHANGELOG_ENTRY_FILE": "/tmp/test-v8-push-to-trunk-tempfile-changelog-entry",
+  "PATCH_FILE": "/tmp/test-v8-push-to-trunk-tempfile-patch",
+  "COMMITMSG_FILE": "/tmp/test-v8-push-to-trunk-tempfile-commitmsg",
+  "CHROMIUM": "/tmp/test-v8-push-to-trunk-tempfile-chromium",
+  "SETTINGS_LOCATION": None,
+  "ALREADY_MERGING_SENTINEL_FILE":
       "/tmp/test-merge-to-branch-tempfile-already-merging",
-  COMMIT_HASHES_FILE: "/tmp/test-merge-to-branch-tempfile-PATCH_COMMIT_HASHES",
-  TEMPORARY_PATCH_FILE: "/tmp/test-merge-to-branch-tempfile-temporary-patch",
+  "TEMPORARY_PATCH_FILE": "/tmp/test-merge-to-branch-tempfile-temporary-patch",
+  "CLUSTERFUZZ_API_KEY_FILE": "/tmp/test-fake-cf-api-key",
 }
 
 
@@ -245,19 +246,26 @@
                                                 "BUG=1234567890\n"))
 
 
-def Git(*args, **kwargs):
-  """Convenience function returning a git test expectation."""
+def Cmd(*args, **kwargs):
+  """Convenience function returning a shell command test expectation."""
   return {
-    "name": "git",
-    "args": args[:-1],
+    "name": "command",
+    "args": args,
     "ret": args[-1],
     "cb": kwargs.get("cb"),
+    "cwd": kwargs.get("cwd", TEST_CONFIG["DEFAULT_CWD"]),
   }
 
 
 def RL(text, cb=None):
   """Convenience function returning a readline test expectation."""
-  return {"name": "readline", "args": [], "ret": text, "cb": cb}
+  return {
+    "name": "readline",
+    "args": [],
+    "ret": text,
+    "cb": cb,
+    "cwd": None,
+  }
 
 
 def URL(*args, **kwargs):
@@ -267,19 +275,19 @@
     "args": args[:-1],
     "ret": args[-1],
     "cb": kwargs.get("cb"),
+    "cwd": None,
   }
 
 
 class SimpleMock(object):
-  def __init__(self, name):
-    self._name = name
+  def __init__(self):
     self._recipe = []
     self._index = -1
 
   def Expect(self, recipe):
     self._recipe = recipe
 
-  def Call(self, name, *args):  # pragma: no cover
+  def Call(self, name, *args, **kwargs):  # pragma: no cover
     self._index += 1
     try:
       expected_call = self._recipe[self._index]
@@ -287,21 +295,33 @@
       raise NoRetryException("Calling %s %s" % (name, " ".join(args)))
 
     if not isinstance(expected_call, dict):
-      raise NoRetryException("Found wrong expectation type for %s %s"
-                             % (name, " ".join(args)))
+      raise NoRetryException("Found wrong expectation type for %s %s" %
+                             (name, " ".join(args)))
 
+    if expected_call["name"] != name:
+      raise NoRetryException("Expected action: %s %s - Actual: %s" %
+          (expected_call["name"], expected_call["args"], name))
+
+    # Check if the given working directory matches the expected one.
+    if expected_call["cwd"] != kwargs.get("cwd"):
+      raise NoRetryException("Expected cwd: %s in %s %s - Actual: %s" %
+          (expected_call["cwd"],
+           expected_call["name"],
+           expected_call["args"],
+           kwargs.get("cwd")))
 
     # The number of arguments in the expectation must match the actual
     # arguments.
     if len(args) > len(expected_call['args']):
       raise NoRetryException("When calling %s with arguments, the "
-          "expectations must consist of at least as many arguments." % name)
+          "expectations must consist of at least as many arguments." %
+          name)
 
     # Compare expected and actual arguments.
     for (expected_arg, actual_arg) in zip(expected_call['args'], args):
       if expected_arg != actual_arg:
-        raise NoRetryException("Expected: %s - Actual: %s"
-                               % (expected_arg, actual_arg))
+        raise NoRetryException("Expected: %s - Actual: %s" %
+                               (expected_arg, actual_arg))
 
     # The expected call contains an optional callback for checking the context
     # at the time of the call.
@@ -319,8 +339,8 @@
 
   def AssertFinished(self):  # pragma: no cover
     if self._index < len(self._recipe) -1:
-      raise NoRetryException("Called %s too seldom: %d vs. %d"
-                             % (self._name, self._index, len(self._recipe)))
+      raise NoRetryException("Called mock too seldom: %d vs. %d" %
+                             (self._index, len(self._recipe)))
 
 
 class ScriptTest(unittest.TestCase):
@@ -330,8 +350,17 @@
     self._tmp_files.append(name)
     return name
 
+  def MakeEmptyTempDirectory(self):
+    name = tempfile.mkdtemp()
+    self._tmp_files.append(name)
+    return name
+
+
   def WriteFakeVersionFile(self, minor=22, build=4, patch=0):
-    with open(TEST_CONFIG[VERSION_FILE], "w") as f:
+    version_file = os.path.join(TEST_CONFIG["DEFAULT_CWD"], VERSION_FILE)
+    if not os.path.exists(os.path.dirname(version_file)):
+      os.makedirs(os.path.dirname(version_file))
+    with open(version_file, "w") as f:
       f.write("  // Some line...\n")
       f.write("\n")
       f.write("#define MAJOR_VERSION    3\n")
@@ -350,39 +379,30 @@
 
   def RunStep(self, script=PushToTrunk, step_class=Step, args=None):
     """Convenience wrapper."""
-    args = args or ["-m"]
+    args = args if args is not None else ["-m"]
     return script(TEST_CONFIG, self, self._state).RunSteps([step_class], args)
 
-  def GitMock(self, cmd, args="", pipe=True):
-    print "%s %s" % (cmd, args)
-    return self._git_mock.Call("git", args)
-
-  def LogMock(self, cmd, args=""):
-    print "Log: %s %s" % (cmd, args)
-
-  MOCKS = {
-    "git": GitMock,
-    # TODO(machenbach): Little hack to reuse the git mock for the one svn call
-    # in merge-to-branch. The command should be made explicit in the test
-    # expectations.
-    "svn": GitMock,
-    "vi": LogMock,
-  }
-
   def Call(self, fun, *args, **kwargs):
     print "Calling %s with %s and %s" % (str(fun), str(args), str(kwargs))
 
-  def Command(self, cmd, args="", prefix="", pipe=True):
-    return ScriptTest.MOCKS[cmd](self, cmd, args)
+  def Command(self, cmd, args="", prefix="", pipe=True, cwd=None):
+    print "%s %s" % (cmd, args)
+    print "in %s" % cwd
+    return self._mock.Call("command", cmd + " " + args, cwd=cwd)
 
   def ReadLine(self):
-    return self._rl_mock.Call("readline")
+    return self._mock.Call("readline")
 
   def ReadURL(self, url, params):
     if params is not None:
-      return self._url_mock.Call("readurl", url, params)
+      return self._mock.Call("readurl", url, params)
     else:
-      return self._url_mock.Call("readurl", url)
+      return self._mock.Call("readurl", url)
+
+  def ReadClusterFuzzAPI(self, api_key, **params):
+    # TODO(machenbach): Use a mock for this and add a test that stops rolling
+    # due to clustefuzz results.
+    return []
 
   def Sleep(self, seconds):
     pass
@@ -390,90 +410,85 @@
   def GetDate(self):
     return "1999-07-31"
 
-  def ExpectGit(self, *args):
-    """Convenience wrapper."""
-    self._git_mock.Expect(*args)
+  def GetUTCStamp(self):
+    return "100000"
 
-  def ExpectReadline(self, *args):
+  def Expect(self, *args):
     """Convenience wrapper."""
-    self._rl_mock.Expect(*args)
-
-  def ExpectReadURL(self, *args):
-    """Convenience wrapper."""
-    self._url_mock.Expect(*args)
+    self._mock.Expect(*args)
 
   def setUp(self):
-    self._git_mock = SimpleMock("git")
-    self._rl_mock = SimpleMock("readline")
-    self._url_mock = SimpleMock("readurl")
+    self._mock = SimpleMock()
     self._tmp_files = []
     self._state = {}
+    TEST_CONFIG["DEFAULT_CWD"] = self.MakeEmptyTempDirectory()
 
   def tearDown(self):
-    Command("rm", "-rf %s*" % TEST_CONFIG[PERSISTFILE_BASENAME])
+    if os.path.exists(TEST_CONFIG["PERSISTFILE_BASENAME"]):
+      shutil.rmtree(TEST_CONFIG["PERSISTFILE_BASENAME"])
 
     # Clean up temps. Doesn't work automatically.
     for name in self._tmp_files:
-      if os.path.exists(name):
+      if os.path.isfile(name):
         os.remove(name)
+      if os.path.isdir(name):
+        shutil.rmtree(name)
 
-    self._git_mock.AssertFinished()
-    self._rl_mock.AssertFinished()
-    self._url_mock.AssertFinished()
-
-  def testGitOrig(self):
-    self.assertTrue(Command("git", "--version").startswith("git version"))
+    self._mock.AssertFinished()
 
   def testGitMock(self):
-    self.ExpectGit([Git("--version", "git version 1.2.3"), Git("dummy", "")])
+    self.Expect([Cmd("git --version", "git version 1.2.3"),
+                 Cmd("git dummy", "")])
     self.assertEquals("git version 1.2.3", self.MakeStep().Git("--version"))
     self.assertEquals("", self.MakeStep().Git("dummy"))
 
   def testCommonPrepareDefault(self):
-    self.ExpectGit([
-      Git("status -s -uno", ""),
-      Git("status -s -b -uno", "## some_branch"),
-      Git("svn fetch", ""),
-      Git("branch", "  branch1\n* %s" % TEST_CONFIG[BRANCHNAME]),
-      Git("branch -D %s" % TEST_CONFIG[BRANCHNAME], ""),
+    self.Expect([
+      Cmd("git status -s -uno", ""),
+      Cmd("git status -s -b -uno", "## some_branch"),
+      Cmd("git svn fetch", ""),
+      Cmd("git branch", "  branch1\n* %s" % TEST_CONFIG["BRANCHNAME"]),
+      RL("Y"),
+      Cmd("git branch -D %s" % TEST_CONFIG["BRANCHNAME"], ""),
     ])
-    self.ExpectReadline([RL("Y")])
     self.MakeStep().CommonPrepare()
     self.MakeStep().PrepareBranch()
     self.assertEquals("some_branch", self._state["current_branch"])
 
   def testCommonPrepareNoConfirm(self):
-    self.ExpectGit([
-      Git("status -s -uno", ""),
-      Git("status -s -b -uno", "## some_branch"),
-      Git("svn fetch", ""),
-      Git("branch", "  branch1\n* %s" % TEST_CONFIG[BRANCHNAME]),
+    self.Expect([
+      Cmd("git status -s -uno", ""),
+      Cmd("git status -s -b -uno", "## some_branch"),
+      Cmd("git svn fetch", ""),
+      Cmd("git branch", "  branch1\n* %s" % TEST_CONFIG["BRANCHNAME"]),
+      RL("n"),
     ])
-    self.ExpectReadline([RL("n")])
     self.MakeStep().CommonPrepare()
     self.assertRaises(Exception, self.MakeStep().PrepareBranch)
     self.assertEquals("some_branch", self._state["current_branch"])
 
   def testCommonPrepareDeleteBranchFailure(self):
-    self.ExpectGit([
-      Git("status -s -uno", ""),
-      Git("status -s -b -uno", "## some_branch"),
-      Git("svn fetch", ""),
-      Git("branch", "  branch1\n* %s" % TEST_CONFIG[BRANCHNAME]),
-      Git("branch -D %s" % TEST_CONFIG[BRANCHNAME], None),
+    self.Expect([
+      Cmd("git status -s -uno", ""),
+      Cmd("git status -s -b -uno", "## some_branch"),
+      Cmd("git svn fetch", ""),
+      Cmd("git branch", "  branch1\n* %s" % TEST_CONFIG["BRANCHNAME"]),
+      RL("Y"),
+      Cmd("git branch -D %s" % TEST_CONFIG["BRANCHNAME"], None),
     ])
-    self.ExpectReadline([RL("Y")])
     self.MakeStep().CommonPrepare()
     self.assertRaises(Exception, self.MakeStep().PrepareBranch)
     self.assertEquals("some_branch", self._state["current_branch"])
 
   def testInitialEnvironmentChecks(self):
-    TEST_CONFIG[DOT_GIT_LOCATION] = self.MakeEmptyTempFile()
+    TextToFile("", os.path.join(TEST_CONFIG["DEFAULT_CWD"], ".git"))
     os.environ["EDITOR"] = "vi"
-    self.MakeStep().InitialEnvironmentChecks()
+    self.Expect([
+      Cmd("which vi", "/usr/bin/vi"),
+    ])
+    self.MakeStep().InitialEnvironmentChecks(TEST_CONFIG["DEFAULT_CWD"])
 
   def testReadAndPersistVersion(self):
-    TEST_CONFIG[VERSION_FILE] = self.MakeEmptyTempFile()
     self.WriteFakeVersionFile(build=5)
     step = self.MakeStep()
     step.ReadAndPersistVersion()
@@ -505,40 +520,35 @@
 
   def testPreparePushRevision(self):
     # Tests the default push hash used when the --revision option is not set.
-    self.ExpectGit([
-      Git("log -1 --format=%H HEAD", "push_hash")
+    self.Expect([
+      Cmd("git log -1 --format=%H HEAD", "push_hash")
     ])
 
     self.RunStep(PushToTrunk, PreparePushRevision)
     self.assertEquals("push_hash", self._state["push_hash"])
 
   def testPrepareChangeLog(self):
-    TEST_CONFIG[VERSION_FILE] = self.MakeEmptyTempFile()
     self.WriteFakeVersionFile()
-    TEST_CONFIG[CHANGELOG_ENTRY_FILE] = self.MakeEmptyTempFile()
+    TEST_CONFIG["CHANGELOG_ENTRY_FILE"] = self.MakeEmptyTempFile()
 
-    self.ExpectGit([
-      Git("log --format=%H 1234..push_hash", "rev1\nrev2\nrev3\nrev4"),
-      Git("log -1 --format=%s rev1", "Title text 1"),
-      Git("log -1 --format=%B rev1", "Title\n\nBUG=\nLOG=y\n"),
-      Git("log -1 --format=%an rev1", "author1@chromium.org"),
-      Git("log -1 --format=%s rev2", "Title text 2."),
-      Git("log -1 --format=%B rev2", "Title\n\nBUG=123\nLOG= \n"),
-      Git("log -1 --format=%an rev2", "author2@chromium.org"),
-      Git("log -1 --format=%s rev3", "Title text 3"),
-      Git("log -1 --format=%B rev3", "Title\n\nBUG=321\nLOG=true\n"),
-      Git("log -1 --format=%an rev3", "author3@chromium.org"),
-      Git("log -1 --format=%s rev4", "Title text 4"),
-      Git("log -1 --format=%B rev4",
+    self.Expect([
+      Cmd("git log --format=%H 1234..push_hash", "rev1\nrev2\nrev3\nrev4"),
+      Cmd("git log -1 --format=%s rev1", "Title text 1"),
+      Cmd("git log -1 --format=%B rev1", "Title\n\nBUG=\nLOG=y\n"),
+      Cmd("git log -1 --format=%an rev1", "author1@chromium.org"),
+      Cmd("git log -1 --format=%s rev2", "Title text 2."),
+      Cmd("git log -1 --format=%B rev2", "Title\n\nBUG=123\nLOG= \n"),
+      Cmd("git log -1 --format=%an rev2", "author2@chromium.org"),
+      Cmd("git log -1 --format=%s rev3", "Title text 3"),
+      Cmd("git log -1 --format=%B rev3", "Title\n\nBUG=321\nLOG=true\n"),
+      Cmd("git log -1 --format=%an rev3", "author3@chromium.org"),
+      Cmd("git log -1 --format=%s rev4", "Title text 4"),
+      Cmd("git log -1 --format=%B rev4",
        ("Title\n\nBUG=456\nLOG=Y\n\n"
         "Review URL: https://codereview.chromium.org/9876543210\n")),
-      Git("log -1 --format=%an rev4", "author4@chromium.org"),
-    ])
-
-    # The cl for rev4 on rietveld has an updated LOG flag.
-    self.ExpectReadURL([
       URL("https://codereview.chromium.org/9876543210/description",
           "Title\n\nBUG=456\nLOG=N\n\n"),
+      Cmd("git log -1 --format=%an rev4", "author4@chromium.org"),
     ])
 
     self._state["last_push_bleeding_edge"] = "1234"
@@ -546,7 +556,7 @@
     self._state["version"] = "3.22.5"
     self.RunStep(PushToTrunk, PrepareChangeLog)
 
-    actual_cl = FileToText(TEST_CONFIG[CHANGELOG_ENTRY_FILE])
+    actual_cl = FileToText(TEST_CONFIG["CHANGELOG_ENTRY_FILE"])
 
     expected_cl = """1999-07-31: Version 3.22.5
 
@@ -577,29 +587,31 @@
     self.assertEquals(expected_cl, actual_cl)
 
   def testEditChangeLog(self):
-    TEST_CONFIG[CHANGELOG_ENTRY_FILE] = self.MakeEmptyTempFile()
-    TextToFile("  New  \n\tLines  \n", TEST_CONFIG[CHANGELOG_ENTRY_FILE])
+    TEST_CONFIG["CHANGELOG_ENTRY_FILE"] = self.MakeEmptyTempFile()
+    TextToFile("  New  \n\tLines  \n", TEST_CONFIG["CHANGELOG_ENTRY_FILE"])
     os.environ["EDITOR"] = "vi"
-
-    self.ExpectReadline([
+    self.Expect([
       RL(""),  # Open editor.
+      Cmd("vi %s" % TEST_CONFIG["CHANGELOG_ENTRY_FILE"], ""),
     ])
 
     self.RunStep(PushToTrunk, EditChangeLog)
 
     self.assertEquals("New\n        Lines",
-                      FileToText(TEST_CONFIG[CHANGELOG_ENTRY_FILE]))
+                      FileToText(TEST_CONFIG["CHANGELOG_ENTRY_FILE"]))
 
+  # Version on trunk: 3.22.4.0. Version on master (bleeding_edge): 3.22.6.
+  # Make sure that the increment is 3.22.7.0.
   def testIncrementVersion(self):
-    TEST_CONFIG[VERSION_FILE] = self.MakeEmptyTempFile()
     self.WriteFakeVersionFile()
     self._state["last_push_trunk"] = "hash1"
+    self._state["latest_build"] = "6"
+    self._state["latest_version"] = "3.22.6.0"
 
-    self.ExpectGit([
-      Git("checkout -f hash1 -- %s" % TEST_CONFIG[VERSION_FILE], "")
-    ])
-
-    self.ExpectReadline([
+    self.Expect([
+      Cmd("git checkout -f hash1 -- src/version.cc", ""),
+      Cmd("git checkout -f svn/bleeding_edge -- src/version.cc",
+          "", cb=lambda: self.WriteFakeVersionFile(22, 6)),
       RL("Y"),  # Increment build number.
     ])
 
@@ -607,26 +619,26 @@
 
     self.assertEquals("3", self._state["new_major"])
     self.assertEquals("22", self._state["new_minor"])
-    self.assertEquals("5", self._state["new_build"])
+    self.assertEquals("7", self._state["new_build"])
     self.assertEquals("0", self._state["new_patch"])
 
   def _TestSquashCommits(self, change_log, expected_msg):
-    TEST_CONFIG[CHANGELOG_ENTRY_FILE] = self.MakeEmptyTempFile()
-    with open(TEST_CONFIG[CHANGELOG_ENTRY_FILE], "w") as f:
+    TEST_CONFIG["CHANGELOG_ENTRY_FILE"] = self.MakeEmptyTempFile()
+    with open(TEST_CONFIG["CHANGELOG_ENTRY_FILE"], "w") as f:
       f.write(change_log)
 
-    self.ExpectGit([
-      Git("diff svn/trunk hash1", "patch content"),
-      Git("svn find-rev hash1", "123455\n"),
+    self.Expect([
+      Cmd("git diff svn/trunk hash1", "patch content"),
+      Cmd("git svn find-rev hash1", "123455\n"),
     ])
 
     self._state["push_hash"] = "hash1"
     self._state["date"] = "1999-11-11"
 
     self.RunStep(PushToTrunk, SquashCommits)
-    self.assertEquals(FileToText(TEST_CONFIG[COMMITMSG_FILE]), expected_msg)
+    self.assertEquals(FileToText(TEST_CONFIG["COMMITMSG_FILE"]), expected_msg)
 
-    patch = FileToText(TEST_CONFIG[ PATCH_FILE])
+    patch = FileToText(TEST_CONFIG["PATCH_FILE"])
     self.assertTrue(re.search(r"patch content", patch))
 
   def testSquashCommitsUnformatted(self):
@@ -663,17 +675,16 @@
     self._TestSquashCommits(change_log, commit_msg)
 
   def _PushToTrunk(self, force=False, manual=False):
-    TEST_CONFIG[DOT_GIT_LOCATION] = self.MakeEmptyTempFile()
+    TextToFile("", os.path.join(TEST_CONFIG["DEFAULT_CWD"], ".git"))
 
     # The version file on bleeding edge has build level 5, while the version
     # file from trunk has build level 4.
-    TEST_CONFIG[VERSION_FILE] = self.MakeEmptyTempFile()
     self.WriteFakeVersionFile(build=5)
 
-    TEST_CONFIG[CHANGELOG_ENTRY_FILE] = self.MakeEmptyTempFile()
-    TEST_CONFIG[CHANGELOG_FILE] = self.MakeEmptyTempFile()
+    TEST_CONFIG["CHANGELOG_ENTRY_FILE"] = self.MakeEmptyTempFile()
+    TEST_CONFIG["CHANGELOG_FILE"] = self.MakeEmptyTempFile()
     bleeding_edge_change_log = "2014-03-17: Sentinel\n"
-    TextToFile(bleeding_edge_change_log, TEST_CONFIG[CHANGELOG_FILE])
+    TextToFile(bleeding_edge_change_log, TEST_CONFIG["CHANGELOG_FILE"])
     os.environ["EDITOR"] = "vi"
 
     def ResetChangeLog():
@@ -682,21 +693,22 @@
       trunk_change_log = """1999-04-05: Version 3.22.4
 
         Performance and stability improvements on all platforms.\n"""
-      TextToFile(trunk_change_log, TEST_CONFIG[CHANGELOG_FILE])
+      TextToFile(trunk_change_log, TEST_CONFIG["CHANGELOG_FILE"])
 
     def ResetToTrunk():
       ResetChangeLog()
       self.WriteFakeVersionFile()
 
     def CheckSVNCommit():
-      commit = FileToText(TEST_CONFIG[COMMITMSG_FILE])
+      commit = FileToText(TEST_CONFIG["COMMITMSG_FILE"])
       self.assertEquals(
 """Version 3.22.5 (based on bleeding_edge revision r123455)
 
 Log text 1 (issue 321).
 
 Performance and stability improvements on all platforms.""", commit)
-      version = FileToText(TEST_CONFIG[VERSION_FILE])
+      version = FileToText(
+          os.path.join(TEST_CONFIG["DEFAULT_CWD"], VERSION_FILE))
       self.assertTrue(re.search(r"#define MINOR_VERSION\s+22", version))
       self.assertTrue(re.search(r"#define BUILD_NUMBER\s+5", version))
       self.assertFalse(re.search(r"#define BUILD_NUMBER\s+6", version))
@@ -704,7 +716,7 @@
       self.assertTrue(re.search(r"#define IS_CANDIDATE_VERSION\s+0", version))
 
       # Check that the change log on the trunk branch got correctly modified.
-      change_log = FileToText(TEST_CONFIG[CHANGELOG_FILE])
+      change_log = FileToText(TEST_CONFIG["CHANGELOG_FILE"])
       self.assertEquals(
 """1999-07-31: Version 3.22.5
 
@@ -719,59 +731,73 @@
           change_log)
 
     force_flag = " -f" if not manual else ""
-    self.ExpectGit([
-      Git("status -s -uno", ""),
-      Git("status -s -b -uno", "## some_branch\n"),
-      Git("svn fetch", ""),
-      Git("branch", "  branch1\n* branch2\n"),
-      Git("branch", "  branch1\n* branch2\n"),
-      Git("checkout -b %s svn/bleeding_edge" % TEST_CONFIG[BRANCHNAME], ""),
-      Git("svn find-rev r123455", "push_hash\n"),
-      Git(("log -1 --format=%H --grep="
+    expectations = []
+    if not force:
+      expectations.append(Cmd("which vi", "/usr/bin/vi"))
+    expectations += [
+      Cmd("git status -s -uno", ""),
+      Cmd("git status -s -b -uno", "## some_branch\n"),
+      Cmd("git svn fetch", ""),
+      Cmd("git branch", "  branch1\n* branch2\n"),
+      Cmd("git branch", "  branch1\n* branch2\n"),
+      Cmd("git checkout -b %s svn/bleeding_edge" % TEST_CONFIG["BRANCHNAME"],
+          ""),
+      Cmd("git svn find-rev r123455", "push_hash\n"),
+      Cmd(("git log -1 --format=%H --grep="
            "\"^Version [[:digit:]]*\.[[:digit:]]*\.[[:digit:]]* (based\" "
            "svn/trunk"), "hash2\n"),
-      Git("log -1 hash2", "Log message\n"),
-      Git("log -1 --format=%s hash2",
-       "Version 3.4.5 (based on bleeding_edge revision r1234)\n"),
-      Git("svn find-rev r1234", "hash3\n"),
-      Git("checkout -f hash2 -- %s" % TEST_CONFIG[VERSION_FILE], "",
-          cb=self.WriteFakeVersionFile),
-      Git("log --format=%H hash3..push_hash", "rev1\n"),
-      Git("log -1 --format=%s rev1", "Log text 1.\n"),
-      Git("log -1 --format=%B rev1", "Text\nLOG=YES\nBUG=v8:321\nText\n"),
-      Git("log -1 --format=%an rev1", "author1@chromium.org\n"),
-      Git("svn fetch", "fetch result\n"),
-      Git("checkout -f svn/bleeding_edge", ""),
-      Git("diff svn/trunk push_hash", "patch content\n"),
-      Git("svn find-rev push_hash", "123455\n"),
-      Git("checkout -b %s svn/trunk" % TEST_CONFIG[TRUNKBRANCH], "",
-          cb=ResetToTrunk),
-      Git("apply --index --reject \"%s\"" % TEST_CONFIG[PATCH_FILE], ""),
-      Git("checkout -f svn/trunk -- %s" % TEST_CONFIG[CHANGELOG_FILE], "",
-          cb=ResetChangeLog),
-      Git("checkout -f svn/trunk -- %s" % TEST_CONFIG[VERSION_FILE], "",
-          cb=self.WriteFakeVersionFile),
-      Git("commit -aF \"%s\"" % TEST_CONFIG[COMMITMSG_FILE], "",
-          cb=CheckSVNCommit),
-      Git("svn dcommit 2>&1", "Some output\nCommitted r123456\nSome output\n"),
-      Git("svn tag 3.22.5 -m \"Tagging version 3.22.5\"", ""),
-      Git("checkout -f some_branch", ""),
-      Git("branch -D %s" % TEST_CONFIG[BRANCHNAME], ""),
-      Git("branch -D %s" % TEST_CONFIG[TRUNKBRANCH], ""),
-    ])
-
-    # Expected keyboard input in manual mode:
+      Cmd("git log -1 hash2", "Log message\n"),
+    ]
     if manual:
-      self.ExpectReadline([
-        RL("Y"),  # Confirm last push.
-        RL(""),  # Open editor.
-        RL("Y"),  # Increment build number.
-        RL("Y"),  # Sanity check.
-      ])
-
-    # Expected keyboard input in semi-automatic mode and forced mode:
-    if not manual:
-      self.ExpectReadline([])
+      expectations.append(RL("Y"))  # Confirm last push.
+    expectations += [
+      Cmd("git log -1 --format=%s hash2",
+       "Version 3.4.5 (based on bleeding_edge revision r1234)\n"),
+      Cmd("git svn find-rev r1234", "hash3\n"),
+      Cmd("git checkout -f svn/bleeding_edge -- src/version.cc",
+          "", cb=self.WriteFakeVersionFile),
+      Cmd("git checkout -f hash2 -- src/version.cc", "",
+          cb=self.WriteFakeVersionFile),
+    ]
+    if manual:
+      expectations.append(RL(""))  # Increment build number.
+    expectations += [
+      Cmd("git log --format=%H hash3..push_hash", "rev1\n"),
+      Cmd("git log -1 --format=%s rev1", "Log text 1.\n"),
+      Cmd("git log -1 --format=%B rev1", "Text\nLOG=YES\nBUG=v8:321\nText\n"),
+      Cmd("git log -1 --format=%an rev1", "author1@chromium.org\n"),
+    ]
+    if manual:
+      expectations.append(RL(""))  # Open editor.
+    if not force:
+      expectations.append(
+          Cmd("vi %s" % TEST_CONFIG["CHANGELOG_ENTRY_FILE"], ""))
+    expectations += [
+      Cmd("git svn fetch", "fetch result\n"),
+      Cmd("git checkout -f svn/bleeding_edge", ""),
+      Cmd("git diff svn/trunk push_hash", "patch content\n"),
+      Cmd("git svn find-rev push_hash", "123455\n"),
+      Cmd("git checkout -b %s svn/trunk" % TEST_CONFIG["TRUNKBRANCH"], "",
+          cb=ResetToTrunk),
+      Cmd("git apply --index --reject \"%s\"" % TEST_CONFIG["PATCH_FILE"], ""),
+      Cmd("git checkout -f svn/trunk -- %s" % TEST_CONFIG["CHANGELOG_FILE"], "",
+          cb=ResetChangeLog),
+      Cmd("git checkout -f svn/trunk -- src/version.cc", "",
+          cb=self.WriteFakeVersionFile),
+      Cmd("git commit -aF \"%s\"" % TEST_CONFIG["COMMITMSG_FILE"], "",
+          cb=CheckSVNCommit),
+    ]
+    if manual:
+      expectations.append(RL("Y"))  # Sanity check.
+    expectations += [
+      Cmd("git svn dcommit 2>&1",
+          "Some output\nCommitted r123456\nSome output\n"),
+      Cmd("git svn tag 3.22.5 -m \"Tagging version 3.22.5\"", ""),
+      Cmd("git checkout -f some_branch", ""),
+      Cmd("git branch -D %s" % TEST_CONFIG["BRANCHNAME"], ""),
+      Cmd("git branch -D %s" % TEST_CONFIG["TRUNKBRANCH"], ""),
+    ]
+    self.Expect(expectations)
 
     args = ["-a", "author@chromium.org", "--revision", "123455"]
     if force: args.append("-f")
@@ -779,7 +805,7 @@
     else: args += ["-r", "reviewer@chromium.org"]
     PushToTrunk(TEST_CONFIG, self).Run(args)
 
-    cl = FileToText(TEST_CONFIG[CHANGELOG_FILE])
+    cl = FileToText(TEST_CONFIG["CHANGELOG_FILE"])
     self.assertTrue(re.search(r"^\d\d\d\d\-\d+\-\d+: Version 3\.22\.5", cl))
     self.assertTrue(re.search(r"        Log text 1 \(issue 321\).", cl))
     self.assertTrue(re.search(r"1999\-04\-05: Version 3\.22\.4", cl))
@@ -797,8 +823,26 @@
   def testPushToTrunkForced(self):
     self._PushToTrunk(force=True)
 
-  def _ChromiumRoll(self, force=False, manual=False):
-    googlers_mapping_py = "%s-mapping.py" % TEST_CONFIG[PERSISTFILE_BASENAME]
+  C_V8_22624_LOG = """V8 CL.
+
+git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@22624 123
+
+"""
+
+  C_V8_123455_LOG = """V8 CL.
+
+git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@123455 123
+
+"""
+
+  C_V8_123456_LOG = """V8 CL.
+
+git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@123456 123
+
+"""
+
+  def testChromiumRoll(self):
+    googlers_mapping_py = "%s-mapping.py" % TEST_CONFIG["PERSISTFILE_BASENAME"]
     with open(googlers_mapping_py, "w") as f:
       f.write("""
 def list_to_dict(entries):
@@ -806,77 +850,61 @@
 def get_list():
   pass""")
 
-    TEST_CONFIG[DOT_GIT_LOCATION] = self.MakeEmptyTempFile()
-    if not os.path.exists(TEST_CONFIG[CHROMIUM]):
-      os.makedirs(TEST_CONFIG[CHROMIUM])
+    # Setup fake directory structures.
+    TEST_CONFIG["CHROMIUM"] = self.MakeEmptyTempDirectory()
+    TextToFile("", os.path.join(TEST_CONFIG["CHROMIUM"], ".git"))
+    chrome_dir = TEST_CONFIG["CHROMIUM"]
+    os.makedirs(os.path.join(chrome_dir, "v8"))
+
+    # Write fake deps file.
     TextToFile("Some line\n   \"v8_revision\": \"123444\",\n  some line",
-               TEST_CONFIG[DEPS_FILE])
+               os.path.join(chrome_dir, "DEPS"))
+    def WriteDeps():
+      TextToFile("Some line\n   \"v8_revision\": \"22624\",\n  some line",
+                 os.path.join(chrome_dir, "DEPS"))
 
-    os.environ["EDITOR"] = "vi"
-    force_flag = " -f" if not manual else ""
-    self.ExpectGit([
-      Git("status -s -uno", ""),
-      Git("status -s -b -uno", "## some_branch\n"),
-      Git("svn fetch", ""),
-      Git(("log -1 --format=%H --grep="
+    expectations = [
+      Cmd("git fetch origin", ""),
+      Cmd(("git log -1 --format=%H --grep="
            "\"^Version [[:digit:]]*\.[[:digit:]]*\.[[:digit:]]*\" "
-           "svn/trunk"), "push_hash\n"),
-      Git("svn find-rev push_hash", "123455\n"),
-      Git("log -1 --format=%s push_hash",
-          "Version 3.22.5 (based on bleeding_edge revision r123454)\n"),
-      Git("status -s -uno", ""),
-      Git("checkout -f master", ""),
-      Git("pull", ""),
-      Git("checkout -b v8-roll-123455", ""),
-      Git(("commit -am \"Update V8 to version 3.22.5 "
-           "(based on bleeding_edge revision r123454).\n\n"
-           "Please reply to the V8 sheriff c_name@chromium.org in "
-           "case of problems.\n\nTBR=c_name@chromium.org\""),
-          ""),
-      Git(("cl upload --send-mail --email \"author@chromium.org\"%s"
-           % force_flag), ""),
-    ])
-
-    self.ExpectReadURL([
+           "origin/master"), "push_hash\n"),
+      Cmd("git log -1 --format=%B push_hash", self.C_V8_22624_LOG),
+      Cmd("git log -1 --format=%s push_hash",
+          "Version 3.22.5 (based on bleeding_edge revision r22622)\n"),
       URL("https://chromium-build.appspot.com/p/chromium/sheriff_v8.js",
           "document.write('g_name')"),
-    ])
+      Cmd("git status -s -uno", "", cwd=chrome_dir),
+      Cmd("git checkout -f master", "", cwd=chrome_dir),
+      Cmd("gclient sync --nohooks", "syncing...", cwd=chrome_dir),
+      Cmd("git pull", "", cwd=chrome_dir),
+      Cmd("git fetch origin", ""),
+      Cmd("git checkout -b v8-roll-22624", "", cwd=chrome_dir),
+      Cmd("roll-dep v8 22624", "rolled", cb=WriteDeps, cwd=chrome_dir),
+      Cmd(("git commit -am \"Update V8 to version 3.22.5 "
+           "(based on bleeding_edge revision r22622).\n\n"
+           "Please reply to the V8 sheriff c_name@chromium.org in "
+           "case of problems.\n\nTBR=c_name@chromium.org\" "
+           "--author \"author@chromium.org <author@chromium.org>\""),
+          "", cwd=chrome_dir),
+      Cmd("git cl upload --send-mail --email \"author@chromium.org\" -f", "",
+          cwd=chrome_dir),
+    ]
+    self.Expect(expectations)
 
-    # Expected keyboard input in manual mode:
-    if manual:
-      self.ExpectReadline([
-        RL("c_name@chromium.org"),  # Chromium reviewer.
-      ])
-
-    # Expected keyboard input in semi-automatic mode and forced mode:
-    if not manual:
-      self.ExpectReadline([])
-
-    args = ["-a", "author@chromium.org", "-c", TEST_CONFIG[CHROMIUM],
-            "--sheriff", "--googlers-mapping", googlers_mapping_py]
-    if force: args.append("-f")
-    if manual: args.append("-m")
-    else: args += ["-r", "reviewer@chromium.org"]
+    args = ["-a", "author@chromium.org", "-c", chrome_dir,
+            "--sheriff", "--googlers-mapping", googlers_mapping_py,
+            "-r", "reviewer@chromium.org"]
     ChromiumRoll(TEST_CONFIG, self).Run(args)
 
-    deps = FileToText(TEST_CONFIG[DEPS_FILE])
-    self.assertTrue(re.search("\"v8_revision\": \"123455\"", deps))
-
-  def testChromiumRollManual(self):
-    self._ChromiumRoll(manual=True)
-
-  def testChromiumRollSemiAutomatic(self):
-    self._ChromiumRoll()
-
-  def testChromiumRollForced(self):
-    self._ChromiumRoll(force=True)
+    deps = FileToText(os.path.join(chrome_dir, "DEPS"))
+    self.assertTrue(re.search("\"v8_revision\": \"22624\"", deps))
 
   def testCheckLastPushRecently(self):
-    self.ExpectGit([
-      Git(("log -1 --format=%H --grep="
+    self.Expect([
+      Cmd(("git log -1 --format=%H --grep="
            "\"^Version [[:digit:]]*\.[[:digit:]]*\.[[:digit:]]* (based\" "
            "svn/trunk"), "hash2\n"),
-      Git("log -1 --format=%s hash2",
+      Cmd("git log -1 --format=%s hash2",
           "Version 3.4.5 (based on bleeding_edge revision r99)\n"),
     ])
 
@@ -887,45 +915,41 @@
                                                       AUTO_PUSH_ARGS))
 
   def testAutoPush(self):
-    TEST_CONFIG[DOT_GIT_LOCATION] = self.MakeEmptyTempFile()
-    TEST_CONFIG[SETTINGS_LOCATION] = "~/.doesnotexist"
+    TextToFile("", os.path.join(TEST_CONFIG["DEFAULT_CWD"], ".git"))
+    TEST_CONFIG["SETTINGS_LOCATION"] = "~/.doesnotexist"
 
-    self.ExpectReadURL([
+    self.Expect([
+      Cmd("git status -s -uno", ""),
+      Cmd("git status -s -b -uno", "## some_branch\n"),
+      Cmd("git svn fetch", ""),
       URL("https://v8-status.appspot.com/current?format=json",
           "{\"message\": \"Tree is throttled\"}"),
       URL("https://v8-status.appspot.com/lkgr", Exception("Network problem")),
       URL("https://v8-status.appspot.com/lkgr", "100"),
-    ])
-
-    self.ExpectGit([
-      Git("status -s -uno", ""),
-      Git("status -s -b -uno", "## some_branch\n"),
-      Git("svn fetch", ""),
-      Git(("log -1 --format=%H --grep=\""
+      Cmd(("git log -1 --format=%H --grep=\""
            "^Version [[:digit:]]*\.[[:digit:]]*\.[[:digit:]]* (based\""
            " svn/trunk"), "push_hash\n"),
-      Git("log -1 --format=%s push_hash",
+      Cmd("git log -1 --format=%s push_hash",
           "Version 3.4.5 (based on bleeding_edge revision r79)\n"),
     ])
 
     auto_push.AutoPush(TEST_CONFIG, self).Run(AUTO_PUSH_ARGS + ["--push"])
 
     state = json.loads(FileToText("%s-state.json"
-                                  % TEST_CONFIG[PERSISTFILE_BASENAME]))
+                                  % TEST_CONFIG["PERSISTFILE_BASENAME"]))
 
     self.assertEquals("100", state["lkgr"])
 
   def testAutoPushStoppedBySettings(self):
-    TEST_CONFIG[DOT_GIT_LOCATION] = self.MakeEmptyTempFile()
-    TEST_CONFIG[SETTINGS_LOCATION] = self.MakeEmptyTempFile()
-    TextToFile("{\"enable_auto_push\": false}", TEST_CONFIG[SETTINGS_LOCATION])
+    TextToFile("", os.path.join(TEST_CONFIG["DEFAULT_CWD"], ".git"))
+    TEST_CONFIG["SETTINGS_LOCATION"] = self.MakeEmptyTempFile()
+    TextToFile("{\"enable_auto_push\": false}",
+               TEST_CONFIG["SETTINGS_LOCATION"])
 
-    self.ExpectReadURL([])
-
-    self.ExpectGit([
-      Git("status -s -uno", ""),
-      Git("status -s -b -uno", "## some_branch\n"),
-      Git("svn fetch", ""),
+    self.Expect([
+      Cmd("git status -s -uno", ""),
+      Cmd("git status -s -b -uno", "## some_branch\n"),
+      Cmd("git svn fetch", ""),
     ])
 
     def RunAutoPush():
@@ -933,26 +957,23 @@
     self.assertRaises(Exception, RunAutoPush)
 
   def testAutoPushStoppedByTreeStatus(self):
-    TEST_CONFIG[DOT_GIT_LOCATION] = self.MakeEmptyTempFile()
-    TEST_CONFIG[SETTINGS_LOCATION] = "~/.doesnotexist"
+    TextToFile("", os.path.join(TEST_CONFIG["DEFAULT_CWD"], ".git"))
+    TEST_CONFIG["SETTINGS_LOCATION"] = "~/.doesnotexist"
 
-    self.ExpectReadURL([
+    self.Expect([
+      Cmd("git status -s -uno", ""),
+      Cmd("git status -s -b -uno", "## some_branch\n"),
+      Cmd("git svn fetch", ""),
       URL("https://v8-status.appspot.com/current?format=json",
           "{\"message\": \"Tree is throttled (no push)\"}"),
     ])
 
-    self.ExpectGit([
-      Git("status -s -uno", ""),
-      Git("status -s -b -uno", "## some_branch\n"),
-      Git("svn fetch", ""),
-    ])
-
     def RunAutoPush():
       auto_push.AutoPush(TEST_CONFIG, self).Run(AUTO_PUSH_ARGS)
     self.assertRaises(Exception, RunAutoPush)
 
   def testAutoRollExistingRoll(self):
-    self.ExpectReadURL([
+    self.Expect([
       URL("https://codereview.chromium.org/search",
           "owner=author%40chromium.org&limit=30&closed=3&format=json",
           ("{\"results\": [{\"subject\": \"different\"},"
@@ -960,13 +981,13 @@
     ])
 
     result = auto_roll.AutoRoll(TEST_CONFIG, self).Run(
-        AUTO_PUSH_ARGS + ["-c", TEST_CONFIG[CHROMIUM]])
-    self.assertEquals(1, result)
+        AUTO_PUSH_ARGS + ["-c", TEST_CONFIG["CHROMIUM"]])
+    self.assertEquals(0, result)
 
   # Snippet from the original DEPS file.
   FAKE_DEPS = """
 vars = {
-  "v8_revision": "123455",
+  "v8_revision": "abcd123455",
 }
 deps = {
   "src/v8":
@@ -976,56 +997,54 @@
 """
 
   def testAutoRollUpToDate(self):
-    self.ExpectReadURL([
+    TEST_CONFIG["CHROMIUM"] = self.MakeEmptyTempDirectory()
+    TextToFile(self.FAKE_DEPS, os.path.join(TEST_CONFIG["CHROMIUM"], "DEPS"))
+    self.Expect([
       URL("https://codereview.chromium.org/search",
           "owner=author%40chromium.org&limit=30&closed=3&format=json",
           ("{\"results\": [{\"subject\": \"different\"}]}")),
-      URL("http://src.chromium.org/svn/trunk/src/DEPS",
-          self.FAKE_DEPS),
-    ])
-
-    self.ExpectGit([
-      Git(("log -1 --format=%H --grep="
+      Cmd(("git log -1 --format=%H --grep="
            "\"^Version [[:digit:]]*\.[[:digit:]]*\.[[:digit:]]*\" "
-           "svn/trunk"), "push_hash\n"),
-      Git("svn find-rev push_hash", "123455\n"),
+           "origin/master"), "push_hash\n"),
+      Cmd("git log -1 --format=%B push_hash", self.C_V8_22624_LOG),
+      Cmd("git log -1 --format=%B abcd123455", self.C_V8_123455_LOG),
     ])
 
     result = auto_roll.AutoRoll(TEST_CONFIG, self).Run(
-        AUTO_PUSH_ARGS + ["-c", TEST_CONFIG[CHROMIUM]])
-    self.assertEquals(1, result)
+        AUTO_PUSH_ARGS + ["-c", TEST_CONFIG["CHROMIUM"]])
+    self.assertEquals(0, result)
 
   def testAutoRoll(self):
-    self.ExpectReadURL([
+    TEST_CONFIG["CHROMIUM"] = self.MakeEmptyTempDirectory()
+    TextToFile(self.FAKE_DEPS, os.path.join(TEST_CONFIG["CHROMIUM"], "DEPS"))
+    TEST_CONFIG["CLUSTERFUZZ_API_KEY_FILE"]  = self.MakeEmptyTempFile()
+    TextToFile("fake key", TEST_CONFIG["CLUSTERFUZZ_API_KEY_FILE"])
+
+    self.Expect([
       URL("https://codereview.chromium.org/search",
           "owner=author%40chromium.org&limit=30&closed=3&format=json",
           ("{\"results\": [{\"subject\": \"different\"}]}")),
-      URL("http://src.chromium.org/svn/trunk/src/DEPS",
-          self.FAKE_DEPS),
-    ])
-
-    self.ExpectGit([
-      Git(("log -1 --format=%H --grep="
+      Cmd(("git log -1 --format=%H --grep="
            "\"^Version [[:digit:]]*\.[[:digit:]]*\.[[:digit:]]*\" "
-           "svn/trunk"), "push_hash\n"),
-      Git("svn find-rev push_hash", "123456\n"),
+           "origin/master"), "push_hash\n"),
+      Cmd("git log -1 --format=%B push_hash", self.C_V8_123456_LOG),
+      Cmd("git log -1 --format=%B abcd123455", self.C_V8_123455_LOG),
     ])
 
     result = auto_roll.AutoRoll(TEST_CONFIG, self).Run(
-        AUTO_PUSH_ARGS + ["-c", TEST_CONFIG[CHROMIUM], "--roll"])
+        AUTO_PUSH_ARGS + ["-c", TEST_CONFIG["CHROMIUM"], "--roll"])
     self.assertEquals(0, result)
 
   def testMergeToBranch(self):
-    TEST_CONFIG[ALREADY_MERGING_SENTINEL_FILE] = self.MakeEmptyTempFile()
-    TEST_CONFIG[DOT_GIT_LOCATION] = self.MakeEmptyTempFile()
-    TEST_CONFIG[VERSION_FILE] = self.MakeEmptyTempFile()
+    TEST_CONFIG["ALREADY_MERGING_SENTINEL_FILE"] = self.MakeEmptyTempFile()
+    TextToFile("", os.path.join(TEST_CONFIG["DEFAULT_CWD"], ".git"))
     self.WriteFakeVersionFile(build=5)
     os.environ["EDITOR"] = "vi"
     extra_patch = self.MakeEmptyTempFile()
 
     def VerifyPatch(patch):
       return lambda: self.assertEquals(patch,
-          FileToText(TEST_CONFIG[TEMPORARY_PATCH_FILE]))
+          FileToText(TEST_CONFIG["TEMPORARY_PATCH_FILE"]))
 
     msg = """Version 3.22.5.1 (merged r12345, r23456, r34567, r45678, r56789)
 
@@ -1044,88 +1063,96 @@
 """
 
     def VerifySVNCommit():
-      commit = FileToText(TEST_CONFIG[COMMITMSG_FILE])
+      commit = FileToText(TEST_CONFIG["COMMITMSG_FILE"])
       self.assertEquals(msg, commit)
-      version = FileToText(TEST_CONFIG[VERSION_FILE])
+      version = FileToText(
+          os.path.join(TEST_CONFIG["DEFAULT_CWD"], VERSION_FILE))
       self.assertTrue(re.search(r"#define MINOR_VERSION\s+22", version))
       self.assertTrue(re.search(r"#define BUILD_NUMBER\s+5", version))
       self.assertTrue(re.search(r"#define PATCH_LEVEL\s+1", version))
       self.assertTrue(re.search(r"#define IS_CANDIDATE_VERSION\s+0", version))
 
-    self.ExpectGit([
-      Git("status -s -uno", ""),
-      Git("status -s -b -uno", "## some_branch\n"),
-      Git("svn fetch", ""),
-      Git("branch", "  branch1\n* branch2\n"),
-      Git("checkout -b %s svn/trunk" % TEST_CONFIG[BRANCHNAME], ""),
-      Git("log --format=%H --grep=\"Port r12345\" --reverse svn/bleeding_edge",
+    self.Expect([
+      Cmd("git status -s -uno", ""),
+      Cmd("git status -s -b -uno", "## some_branch\n"),
+      Cmd("git svn fetch", ""),
+      Cmd("git branch", "  branch1\n* branch2\n"),
+      Cmd("git checkout -b %s svn/trunk" % TEST_CONFIG["BRANCHNAME"], ""),
+      Cmd(("git log --format=%H --grep=\"Port r12345\" "
+           "--reverse svn/bleeding_edge"),
           "hash1\nhash2"),
-      Git("svn find-rev hash1 svn/bleeding_edge", "45678"),
-      Git("log -1 --format=%s hash1", "Title1"),
-      Git("svn find-rev hash2 svn/bleeding_edge", "23456"),
-      Git("log -1 --format=%s hash2", "Title2"),
-      Git("log --format=%H --grep=\"Port r23456\" --reverse svn/bleeding_edge",
+      Cmd("git svn find-rev hash1 svn/bleeding_edge", "45678"),
+      Cmd("git log -1 --format=%s hash1", "Title1"),
+      Cmd("git svn find-rev hash2 svn/bleeding_edge", "23456"),
+      Cmd("git log -1 --format=%s hash2", "Title2"),
+      Cmd(("git log --format=%H --grep=\"Port r23456\" "
+           "--reverse svn/bleeding_edge"),
           ""),
-      Git("log --format=%H --grep=\"Port r34567\" --reverse svn/bleeding_edge",
+      Cmd(("git log --format=%H --grep=\"Port r34567\" "
+           "--reverse svn/bleeding_edge"),
           "hash3"),
-      Git("svn find-rev hash3 svn/bleeding_edge", "56789"),
-      Git("log -1 --format=%s hash3", "Title3"),
-      Git("svn find-rev r12345 svn/bleeding_edge", "hash4"),
+      Cmd("git svn find-rev hash3 svn/bleeding_edge", "56789"),
+      Cmd("git log -1 --format=%s hash3", "Title3"),
+      RL("Y"),  # Automatically add corresponding ports (34567, 56789)?
+      Cmd("git svn find-rev r12345 svn/bleeding_edge", "hash4"),
       # Simulate svn being down which stops the script.
-      Git("svn find-rev r23456 svn/bleeding_edge", None),
+      Cmd("git svn find-rev r23456 svn/bleeding_edge", None),
       # Restart script in the failing step.
-      Git("svn find-rev r12345 svn/bleeding_edge", "hash4"),
-      Git("svn find-rev r23456 svn/bleeding_edge", "hash2"),
-      Git("svn find-rev r34567 svn/bleeding_edge", "hash3"),
-      Git("svn find-rev r45678 svn/bleeding_edge", "hash1"),
-      Git("svn find-rev r56789 svn/bleeding_edge", "hash5"),
-      Git("log -1 --format=%s hash4", "Title4"),
-      Git("log -1 --format=%s hash2", "Title2"),
-      Git("log -1 --format=%s hash3", "Title3"),
-      Git("log -1 --format=%s hash1", "Title1"),
-      Git("log -1 --format=%s hash5", "Revert \"Something\""),
-      Git("log -1 hash4", "Title4\nBUG=123\nBUG=234"),
-      Git("log -1 hash2", "Title2\n BUG = v8:123,345"),
-      Git("log -1 hash3", "Title3\nLOG=n\nBUG=567, 456"),
-      Git("log -1 hash1", "Title1\nBUG="),
-      Git("log -1 hash5", "Revert \"Something\"\nBUG=none"),
-      Git("log -1 -p hash4", "patch4"),
-      Git("apply --index --reject \"%s\"" % TEST_CONFIG[TEMPORARY_PATCH_FILE],
+      Cmd("git svn find-rev r12345 svn/bleeding_edge", "hash4"),
+      Cmd("git svn find-rev r23456 svn/bleeding_edge", "hash2"),
+      Cmd("git svn find-rev r34567 svn/bleeding_edge", "hash3"),
+      Cmd("git svn find-rev r45678 svn/bleeding_edge", "hash1"),
+      Cmd("git svn find-rev r56789 svn/bleeding_edge", "hash5"),
+      Cmd("git log -1 --format=%s hash4", "Title4"),
+      Cmd("git log -1 --format=%s hash2", "Title2"),
+      Cmd("git log -1 --format=%s hash3", "Title3"),
+      Cmd("git log -1 --format=%s hash1", "Title1"),
+      Cmd("git log -1 --format=%s hash5", "Revert \"Something\""),
+      Cmd("git log -1 hash4", "Title4\nBUG=123\nBUG=234"),
+      Cmd("git log -1 hash2", "Title2\n BUG = v8:123,345"),
+      Cmd("git log -1 hash3", "Title3\nLOG=n\nBUG=567, 456"),
+      Cmd("git log -1 hash1", "Title1\nBUG="),
+      Cmd("git log -1 hash5", "Revert \"Something\"\nBUG=none"),
+      Cmd("git log -1 -p hash4", "patch4"),
+      Cmd(("git apply --index --reject \"%s\"" %
+           TEST_CONFIG["TEMPORARY_PATCH_FILE"]),
           "", cb=VerifyPatch("patch4")),
-      Git("log -1 -p hash2", "patch2"),
-      Git("apply --index --reject \"%s\"" % TEST_CONFIG[TEMPORARY_PATCH_FILE],
+      Cmd("git log -1 -p hash2", "patch2"),
+      Cmd(("git apply --index --reject \"%s\"" %
+           TEST_CONFIG["TEMPORARY_PATCH_FILE"]),
           "", cb=VerifyPatch("patch2")),
-      Git("log -1 -p hash3", "patch3"),
-      Git("apply --index --reject \"%s\"" % TEST_CONFIG[TEMPORARY_PATCH_FILE],
+      Cmd("git log -1 -p hash3", "patch3"),
+      Cmd(("git apply --index --reject \"%s\"" %
+           TEST_CONFIG["TEMPORARY_PATCH_FILE"]),
           "", cb=VerifyPatch("patch3")),
-      Git("log -1 -p hash1", "patch1"),
-      Git("apply --index --reject \"%s\"" % TEST_CONFIG[TEMPORARY_PATCH_FILE],
+      Cmd("git log -1 -p hash1", "patch1"),
+      Cmd(("git apply --index --reject \"%s\"" %
+           TEST_CONFIG["TEMPORARY_PATCH_FILE"]),
           "", cb=VerifyPatch("patch1")),
-      Git("log -1 -p hash5", "patch5\n"),
-      Git("apply --index --reject \"%s\"" % TEST_CONFIG[TEMPORARY_PATCH_FILE],
+      Cmd("git log -1 -p hash5", "patch5\n"),
+      Cmd(("git apply --index --reject \"%s\"" %
+           TEST_CONFIG["TEMPORARY_PATCH_FILE"]),
           "", cb=VerifyPatch("patch5\n")),
-      Git("apply --index --reject \"%s\"" % extra_patch, ""),
-      Git("commit -aF \"%s\"" % TEST_CONFIG[COMMITMSG_FILE], ""),
-      Git("cl upload --send-mail -r \"reviewer@chromium.org\"", ""),
-      Git("checkout -f %s" % TEST_CONFIG[BRANCHNAME], ""),
-      Git("cl presubmit", "Presubmit successfull\n"),
-      Git("cl dcommit -f --bypass-hooks", "Closing issue\n", cb=VerifySVNCommit),
-      Git("svn fetch", ""),
-      Git(("log -1 --format=%%H --grep=\"%s\" svn/trunk"
+      Cmd("git apply --index --reject \"%s\"" % extra_patch, ""),
+      RL("Y"),  # Automatically increment patch level?
+      Cmd("git commit -aF \"%s\"" % TEST_CONFIG["COMMITMSG_FILE"], ""),
+      RL("reviewer@chromium.org"),  # V8 reviewer.
+      Cmd("git cl upload --send-mail -r \"reviewer@chromium.org\" "
+          "--bypass-hooks", ""),
+      Cmd("git checkout -f %s" % TEST_CONFIG["BRANCHNAME"], ""),
+      RL("LGTM"),  # Enter LGTM for V8 CL.
+      Cmd("git cl presubmit", "Presubmit successfull\n"),
+      Cmd("git cl dcommit -f --bypass-hooks", "Closing issue\n",
+          cb=VerifySVNCommit),
+      Cmd("git svn fetch", ""),
+      Cmd(("git log -1 --format=%%H --grep=\"%s\" svn/trunk"
            % msg.replace("\"", "\\\"")), "hash6"),
-      Git("svn find-rev hash6", "1324"),
-      Git(("copy -r 1324 https://v8.googlecode.com/svn/trunk "
+      Cmd("git svn find-rev hash6", "1324"),
+      Cmd(("svn copy -r 1324 https://v8.googlecode.com/svn/trunk "
            "https://v8.googlecode.com/svn/tags/3.22.5.1 -m "
            "\"Tagging version 3.22.5.1\""), ""),
-      Git("checkout -f some_branch", ""),
-      Git("branch -D %s" % TEST_CONFIG[BRANCHNAME], ""),
-    ])
-
-    self.ExpectReadline([
-      RL("Y"),  # Automatically add corresponding ports (34567, 56789)?
-      RL("Y"),  # Automatically increment patch level?
-      RL("reviewer@chromium.org"),  # V8 reviewer.
-      RL("LGTM"),  # Enter LGTM for V8 CL.
+      Cmd("git checkout -f some_branch", ""),
+      Cmd("git branch -D %s" % TEST_CONFIG["BRANCHNAME"], ""),
     ])
 
     # r12345 and r34567 are patches. r23456 (included) and r45678 are the MIPS
@@ -1142,17 +1169,64 @@
     MergeToBranch(TEST_CONFIG, self).Run(args)
 
   def testReleases(self):
+    tag_response_text = """
+------------------------------------------------------------------------
+r22631 | author1@chromium.org | 2014-07-28 02:05:29 +0200 (Mon, 28 Jul 2014)
+Changed paths:
+   A /tags/3.28.43 (from /trunk:22630)
+
+Tagging version 3.28.43
+------------------------------------------------------------------------
+r22629 | author2@chromium.org | 2014-07-26 05:09:29 +0200 (Sat, 26 Jul 2014)
+Changed paths:
+   A /tags/3.28.41 (from /branches/bleeding_edge:22626)
+
+Tagging version 3.28.41
+------------------------------------------------------------------------
+r22556 | author3@chromium.org | 2014-07-23 13:31:59 +0200 (Wed, 23 Jul 2014)
+Changed paths:
+   A /tags/3.27.34.7 (from /branches/3.27:22555)
+
+Tagging version 3.27.34.7
+------------------------------------------------------------------------
+r22627 | author4@chromium.org | 2014-07-26 01:39:15 +0200 (Sat, 26 Jul 2014)
+Changed paths:
+   A /tags/3.28.40 (from /branches/bleeding_edge:22624)
+
+Tagging version 3.28.40
+------------------------------------------------------------------------
+"""
+    c_hash2_commit_log = """Revert something.
+
+BUG=12345
+
+Reason:
+> Some reason.
+> Cr-Commit-Position: refs/heads/master@{#12345}
+> git-svn-id: svn://svn.chromium.org/chrome/trunk/src@12345 003-1c4
+
+Review URL: https://codereview.chromium.org/12345
+
+Cr-Commit-Position: refs/heads/master@{#4567}
+git-svn-id: svn://svn.chromium.org/chrome/trunk/src@4567 0039-1c4b
+
+"""
+    c_hash3_commit_log = """Simple.
+
+git-svn-id: svn://svn.chromium.org/chrome/trunk/src@3456 0039-1c4b
+
+"""
     json_output = self.MakeEmptyTempFile()
     csv_output = self.MakeEmptyTempFile()
-    TEST_CONFIG[VERSION_FILE] = self.MakeEmptyTempFile()
     self.WriteFakeVersionFile()
 
-    TEST_CONFIG[DOT_GIT_LOCATION] = self.MakeEmptyTempFile()
-    if not os.path.exists(TEST_CONFIG[CHROMIUM]):
-      os.makedirs(TEST_CONFIG[CHROMIUM])
+    TEST_CONFIG["CHROMIUM"] = self.MakeEmptyTempDirectory()
+    chrome_dir = TEST_CONFIG["CHROMIUM"]
+    chrome_v8_dir = os.path.join(chrome_dir, "v8")
+    os.makedirs(chrome_v8_dir)
     def WriteDEPS(revision):
       TextToFile("Line\n   \"v8_revision\": \"%s\",\n  line\n" % revision,
-                 TEST_CONFIG[DEPS_FILE])
+                 os.path.join(chrome_dir, "DEPS"))
     WriteDEPS(567)
 
     def ResetVersion(minor, build, patch=0):
@@ -1163,86 +1237,119 @@
     def ResetDEPS(revision):
       return lambda: WriteDEPS(revision)
 
-    self.ExpectGit([
-      Git("status -s -uno", ""),
-      Git("status -s -b -uno", "## some_branch\n"),
-      Git("svn fetch", ""),
-      Git("branch", "  branch1\n* branch2\n"),
-      Git("checkout -b %s" % TEST_CONFIG[BRANCHNAME], ""),
-      Git("branch -r", "  svn/3.21\n  svn/3.3\n"),
-      Git("reset --hard svn/3.3", ""),
-      Git("log --format=%H", "hash1\nhash2"),
-      Git("diff --name-only hash1 hash1^", ""),
-      Git("diff --name-only hash2 hash2^", TEST_CONFIG[VERSION_FILE]),
-      Git("checkout -f hash2 -- %s" % TEST_CONFIG[VERSION_FILE], "",
+    self.Expect([
+      Cmd("git status -s -uno", ""),
+      Cmd("git status -s -b -uno", "## some_branch\n"),
+      Cmd("git svn fetch", ""),
+      Cmd("git branch", "  branch1\n* branch2\n"),
+      Cmd("git checkout -b %s" % TEST_CONFIG["BRANCHNAME"], ""),
+      Cmd("git branch -r", "  svn/3.21\n  svn/3.3\n"),
+      Cmd("git reset --hard svn/3.3", ""),
+      Cmd("git log --format=%H", "hash1\nhash2"),
+      Cmd("git diff --name-only hash1 hash1^", ""),
+      Cmd("git diff --name-only hash2 hash2^", VERSION_FILE),
+      Cmd("git checkout -f hash2 -- %s" % VERSION_FILE, "",
           cb=ResetVersion(3, 1, 1)),
-      Git("log -1 --format=%B hash2",
+      Cmd("git log -1 --format=%B hash2",
           "Version 3.3.1.1 (merged 12)\n\nReview URL: fake.com\n"),
-      Git("log -1 --format=%s hash2", ""),
-      Git("svn find-rev hash2", "234"),
-      Git("log -1 --format=%ci hash2", "18:15"),
-      Git("checkout -f HEAD -- %s" % TEST_CONFIG[VERSION_FILE], "",
+      Cmd("git log -1 --format=%s hash2", ""),
+      Cmd("git svn find-rev hash2", "234"),
+      Cmd("git log -1 --format=%ci hash2", "18:15"),
+      Cmd("git checkout -f HEAD -- %s" % VERSION_FILE, "",
           cb=ResetVersion(22, 5)),
-      Git("reset --hard svn/3.21", ""),
-      Git("log --format=%H", "hash3\nhash4\nhash5\n"),
-      Git("diff --name-only hash3 hash3^", TEST_CONFIG[VERSION_FILE]),
-      Git("checkout -f hash3 -- %s" % TEST_CONFIG[VERSION_FILE], "",
+      Cmd("git reset --hard svn/3.21", ""),
+      Cmd("git log --format=%H", "hash3\nhash4\nhash5\n"),
+      Cmd("git diff --name-only hash3 hash3^", VERSION_FILE),
+      Cmd("git checkout -f hash3 -- %s" % VERSION_FILE, "",
           cb=ResetVersion(21, 2)),
-      Git("log -1 --format=%B hash3", ""),
-      Git("log -1 --format=%s hash3", ""),
-      Git("svn find-rev hash3", "123"),
-      Git("log -1 --format=%ci hash3", "03:15"),
-      Git("checkout -f HEAD -- %s" % TEST_CONFIG[VERSION_FILE], "",
+      Cmd("git log -1 --format=%B hash3", ""),
+      Cmd("git log -1 --format=%s hash3", ""),
+      Cmd("git svn find-rev hash3", "123"),
+      Cmd("git log -1 --format=%ci hash3", "03:15"),
+      Cmd("git checkout -f HEAD -- %s" % VERSION_FILE, "",
           cb=ResetVersion(22, 5)),
-      Git("reset --hard svn/trunk", ""),
-      Git("log --format=%H", "hash6\n"),
-      Git("diff --name-only hash6 hash6^", TEST_CONFIG[VERSION_FILE]),
-      Git("checkout -f hash6 -- %s" % TEST_CONFIG[VERSION_FILE], "",
+      Cmd("git reset --hard svn/trunk", ""),
+      Cmd("git log --format=%H", "hash6\n"),
+      Cmd("git diff --name-only hash6 hash6^", VERSION_FILE),
+      Cmd("git checkout -f hash6 -- %s" % VERSION_FILE, "",
           cb=ResetVersion(22, 3)),
-      Git("log -1 --format=%B hash6", ""),
-      Git("log -1 --format=%s hash6", ""),
-      Git("svn find-rev hash6", "345"),
-      Git("log -1 --format=%ci hash6", ""),
-      Git("checkout -f HEAD -- %s" % TEST_CONFIG[VERSION_FILE], "",
+      Cmd("git log -1 --format=%B hash6", ""),
+      Cmd("git log -1 --format=%s hash6", ""),
+      Cmd("git svn find-rev hash6", "345"),
+      Cmd("git log -1 --format=%ci hash6", ""),
+      Cmd("git checkout -f HEAD -- %s" % VERSION_FILE, "",
           cb=ResetVersion(22, 5)),
-      Git("status -s -uno", ""),
-      Git("checkout -f master", ""),
-      Git("pull", ""),
-      Git("checkout -b %s" % TEST_CONFIG[BRANCHNAME], ""),
-      Git("log --format=%H --grep=\"V8\"", "c_hash1\nc_hash2\n"),
-      Git("diff --name-only c_hash1 c_hash1^", ""),
-      Git("diff --name-only c_hash2 c_hash2^", TEST_CONFIG[DEPS_FILE]),
-      Git("checkout -f c_hash2 -- %s" % TEST_CONFIG[DEPS_FILE], "",
-          cb=ResetDEPS(345)),
-      Git("svn find-rev c_hash2", "4567"),
-      Git("checkout -f HEAD -- %s" % TEST_CONFIG[DEPS_FILE], "",
-          cb=ResetDEPS(567)),
-      Git("branch -r", " weird/123\n  branch-heads/7\n"),
-      Git("checkout -f branch-heads/7 -- %s" % TEST_CONFIG[DEPS_FILE], "",
-          cb=ResetDEPS(345)),
-      Git("checkout -f HEAD -- %s" % TEST_CONFIG[DEPS_FILE], "",
-          cb=ResetDEPS(567)),
-      Git("checkout -f master", ""),
-      Git("branch -D %s" % TEST_CONFIG[BRANCHNAME], ""),
-      Git("checkout -f some_branch", ""),
-      Git("branch -D %s" % TEST_CONFIG[BRANCHNAME], ""),
+      Cmd("git reset --hard svn/bleeding_edge", ""),
+      Cmd("svn log https://v8.googlecode.com/svn/tags -v --limit 20",
+          tag_response_text),
+      Cmd("git svn find-rev r22626", "hash_22626"),
+      Cmd("git svn find-rev hash_22626", "22626"),
+      Cmd("git log -1 --format=%ci hash_22626", "01:23"),
+      Cmd("git svn find-rev r22624", "hash_22624"),
+      Cmd("git svn find-rev hash_22624", "22624"),
+      Cmd("git log -1 --format=%ci hash_22624", "02:34"),
+      Cmd("git status -s -uno", "", cwd=chrome_dir),
+      Cmd("git checkout -f master", "", cwd=chrome_dir),
+      Cmd("git pull", "", cwd=chrome_dir),
+      Cmd("git checkout -b %s" % TEST_CONFIG["BRANCHNAME"], "", cwd=chrome_dir),
+      Cmd("git fetch origin", "", cwd=chrome_v8_dir),
+      Cmd("git log --format=%H --grep=\"V8\"", "c_hash1\nc_hash2\nc_hash3\n",
+          cwd=chrome_dir),
+      Cmd("git diff --name-only c_hash1 c_hash1^", "", cwd=chrome_dir),
+      Cmd("git diff --name-only c_hash2 c_hash2^", "DEPS", cwd=chrome_dir),
+      Cmd("git checkout -f c_hash2 -- DEPS", "",
+          cb=ResetDEPS("0123456789012345678901234567890123456789"),
+          cwd=chrome_dir),
+      Cmd("git log -1 --format=%B c_hash2", c_hash2_commit_log,
+          cwd=chrome_dir),
+      Cmd("git rev-list -n 1 0123456789012345678901234567890123456789",
+          "0123456789012345678901234567890123456789", cwd=chrome_v8_dir),
+      Cmd("git log -1 --format=%B 0123456789012345678901234567890123456789",
+          self.C_V8_22624_LOG, cwd=chrome_v8_dir),
+      Cmd("git diff --name-only c_hash3 c_hash3^", "DEPS", cwd=chrome_dir),
+      Cmd("git checkout -f c_hash3 -- DEPS", "", cb=ResetDEPS(345),
+          cwd=chrome_dir),
+      Cmd("git log -1 --format=%B c_hash3", c_hash3_commit_log,
+          cwd=chrome_dir),
+      Cmd("git checkout -f HEAD -- DEPS", "", cb=ResetDEPS(567),
+          cwd=chrome_dir),
+      Cmd("git branch -r", " weird/123\n  branch-heads/7\n", cwd=chrome_dir),
+      Cmd("git checkout -f branch-heads/7 -- DEPS", "", cb=ResetDEPS(345),
+          cwd=chrome_dir),
+      Cmd("git checkout -f HEAD -- DEPS", "", cb=ResetDEPS(567),
+          cwd=chrome_dir),
+      Cmd("git checkout -f master", "", cwd=chrome_dir),
+      Cmd("git branch -D %s" % TEST_CONFIG["BRANCHNAME"], "", cwd=chrome_dir),
+      Cmd("git checkout -f some_branch", ""),
+      Cmd("git branch -D %s" % TEST_CONFIG["BRANCHNAME"], ""),
     ])
 
-    args = ["-c", TEST_CONFIG[CHROMIUM],
+    args = ["-c", TEST_CONFIG["CHROMIUM"],
             "--json", json_output,
             "--csv", csv_output,
             "--max-releases", "1"]
     Releases(TEST_CONFIG, self).Run(args)
 
     # Check expected output.
-    csv = ("3.22.3,trunk,345,4567,\r\n"
+    csv = ("3.28.41,bleeding_edge,22626,,\r\n"
+           "3.28.40,bleeding_edge,22624,4567,\r\n"
+           "3.22.3,trunk,345,3456:4566,\r\n"
            "3.21.2,3.21,123,,\r\n"
            "3.3.1.1,3.3,234,,12\r\n")
     self.assertEquals(csv, FileToText(csv_output))
 
     expected_json = [
+      {"bleeding_edge": "22626", "patches_merged": "", "version": "3.28.41",
+       "chromium_revision": "", "branch": "bleeding_edge", "revision": "22626",
+       "review_link": "", "date": "01:23", "chromium_branch": "",
+       "revision_link": "https://code.google.com/p/v8/source/detail?r=22626"},
+      {"bleeding_edge": "22624", "patches_merged": "", "version": "3.28.40",
+       "chromium_revision": "4567", "branch": "bleeding_edge",
+       "revision": "22624", "review_link": "", "date": "02:34",
+       "chromium_branch": "",
+       "revision_link": "https://code.google.com/p/v8/source/detail?r=22624"},
       {"bleeding_edge": "", "patches_merged": "", "version": "3.22.3",
-       "chromium_revision": "4567", "branch": "trunk", "revision": "345",
+       "chromium_revision": "3456:4566", "branch": "trunk", "revision": "345",
        "review_link": "", "date": "", "chromium_branch": "7",
        "revision_link": "https://code.google.com/p/v8/source/detail?r=345"},
       {"patches_merged": "", "bleeding_edge": "", "version": "3.21.2",
@@ -1257,6 +1364,167 @@
     self.assertEquals(expected_json, json.loads(FileToText(json_output)))
 
 
+  def _bumpUpVersion(self):
+    self.WriteFakeVersionFile()
+
+    def ResetVersion(minor, build, patch=0):
+      return lambda: self.WriteFakeVersionFile(minor=minor,
+                                               build=build,
+                                               patch=patch)
+
+    return [
+      Cmd("git status -s -uno", ""),
+      Cmd("git checkout -f bleeding_edge", "", cb=ResetVersion(11, 4)),
+      Cmd("git pull", ""),
+      Cmd("git branch", ""),
+      Cmd("git checkout -f bleeding_edge", ""),
+      Cmd("git log -1 --format=%H", "latest_hash"),
+      Cmd("git diff --name-only latest_hash latest_hash^", ""),
+      URL("https://v8-status.appspot.com/lkgr", "12345"),
+      Cmd("git checkout -f bleeding_edge", ""),
+      Cmd(("git log --format=%H --grep="
+           "\"^git-svn-id: [^@]*@12345 [A-Za-z0-9-]*$\""),
+          "lkgr_hash"),
+      Cmd("git checkout -b auto-bump-up-version lkgr_hash", ""),
+      Cmd("git checkout -f bleeding_edge", ""),
+      Cmd("git branch", ""),
+      Cmd("git diff --name-only lkgr_hash lkgr_hash^", ""),
+      Cmd("git checkout -f master", "", cb=ResetVersion(11, 5)),
+      Cmd("git pull", ""),
+      URL("https://v8-status.appspot.com/current?format=json",
+          "{\"message\": \"Tree is open\"}"),
+      Cmd("git checkout -b auto-bump-up-version bleeding_edge", "",
+          cb=ResetVersion(11, 4)),
+      Cmd("git commit -am \"[Auto-roll] Bump up version to 3.11.6.0\n\n"
+          "TBR=author@chromium.org\" "
+          "--author \"author@chromium.org <author@chromium.org>\"", ""),
+    ]
+
+  def testBumpUpVersionGit(self):
+    expectations = self._bumpUpVersion()
+    expectations += [
+      Cmd("git cl upload --send-mail --email \"author@chromium.org\" -f "
+          "--bypass-hooks", ""),
+      Cmd("git cl dcommit -f --bypass-hooks", ""),
+      Cmd("git checkout -f bleeding_edge", ""),
+      Cmd("git branch", "auto-bump-up-version\n* bleeding_edge"),
+      Cmd("git branch -D auto-bump-up-version", ""),
+    ]
+    self.Expect(expectations)
+
+    BumpUpVersion(TEST_CONFIG, self).Run(["-a", "author@chromium.org"])
+
+  def testBumpUpVersionSvn(self):
+    svn_root = self.MakeEmptyTempDirectory()
+    expectations = self._bumpUpVersion()
+    expectations += [
+      Cmd("git diff HEAD^ HEAD", "patch content"),
+      Cmd("svn update", "", cwd=svn_root),
+      Cmd("svn status", "", cwd=svn_root),
+      Cmd("patch -d branches/bleeding_edge -p1 -i %s" %
+          TEST_CONFIG["PATCH_FILE"], "Applied patch...", cwd=svn_root),
+      Cmd("svn commit --non-interactive --username=author@chromium.org "
+          "--config-dir=[CONFIG_DIR] "
+          "-m \"[Auto-roll] Bump up version to 3.11.6.0\"",
+          "", cwd=svn_root),
+      Cmd("git checkout -f bleeding_edge", ""),
+      Cmd("git branch", "auto-bump-up-version\n* bleeding_edge"),
+      Cmd("git branch -D auto-bump-up-version", ""),
+    ]
+    self.Expect(expectations)
+
+    BumpUpVersion(TEST_CONFIG, self).Run(
+        ["-a", "author@chromium.org",
+         "--svn", svn_root,
+         "--svn-config", "[CONFIG_DIR]"])
+
+  def testAutoTag(self):
+    self.WriteFakeVersionFile()
+
+    def ResetVersion(minor, build, patch=0):
+      return lambda: self.WriteFakeVersionFile(minor=minor,
+                                               build=build,
+                                               patch=patch)
+
+    self.Expect([
+      Cmd("git status -s -uno", ""),
+      Cmd("git status -s -b -uno", "## some_branch\n"),
+      Cmd("git svn fetch", ""),
+      Cmd("git branch", "  branch1\n* branch2\n"),
+      Cmd("git checkout -f master", ""),
+      Cmd("git svn rebase", ""),
+      Cmd("git checkout -b %s" % TEST_CONFIG["BRANCHNAME"], "",
+          cb=ResetVersion(4, 5)),
+      Cmd("git branch -r",
+          "svn/tags/3.4.2\nsvn/tags/3.2.1.0\nsvn/branches/3.4"),
+      Cmd(("git log --format=%H --grep="
+           "\"\\[Auto\\-roll\\] Bump up version to\""),
+          "hash125\nhash118\nhash111\nhash101"),
+      Cmd("git checkout -f hash125 -- %s" % VERSION_FILE, "",
+          cb=ResetVersion(4, 4)),
+      Cmd("git checkout -f HEAD -- %s" % VERSION_FILE, "",
+          cb=ResetVersion(4, 5)),
+      Cmd("git checkout -f hash118 -- %s" % VERSION_FILE, "",
+          cb=ResetVersion(4, 3)),
+      Cmd("git checkout -f HEAD -- %s" % VERSION_FILE, "",
+          cb=ResetVersion(4, 5)),
+      Cmd("git checkout -f hash111 -- %s" % VERSION_FILE, "",
+          cb=ResetVersion(4, 2)),
+      Cmd("git checkout -f HEAD -- %s" % VERSION_FILE, "",
+          cb=ResetVersion(4, 5)),
+      URL("https://v8-status.appspot.com/revisions?format=json",
+          "[{\"revision\": \"126\", \"status\": true},"
+           "{\"revision\": \"123\", \"status\": true},"
+           "{\"revision\": \"112\", \"status\": true}]"),
+      Cmd("git svn find-rev hash118", "118"),
+      Cmd("git svn find-rev hash125", "125"),
+      Cmd("git svn find-rev r123", "hash123"),
+      Cmd("git log -1 --format=%at hash123", "1"),
+      Cmd("git reset --hard hash123", ""),
+      Cmd("git svn tag 3.4.3 -m \"Tagging version 3.4.3\"", ""),
+      Cmd("git checkout -f some_branch", ""),
+      Cmd("git branch -D %s" % TEST_CONFIG["BRANCHNAME"], ""),
+    ])
+
+    AutoTag(TEST_CONFIG, self).Run(["-a", "author@chromium.org"])
+
+  # Test that we bail out if the last change was a version change.
+  def testBumpUpVersionBailout1(self):
+    self._state["latest"] = "latest_hash"
+
+    self.Expect([
+      Cmd("git diff --name-only latest_hash latest_hash^", VERSION_FILE),
+    ])
+
+    self.assertEquals(0,
+        self.RunStep(BumpUpVersion, LastChangeBailout, ["--dry_run"]))
+
+  # Test that we bail out if the lkgr was a version change.
+  def testBumpUpVersionBailout2(self):
+    self._state["lkgr"] = "lkgr_hash"
+
+    self.Expect([
+      Cmd("git diff --name-only lkgr_hash lkgr_hash^", VERSION_FILE),
+    ])
+
+    self.assertEquals(0,
+        self.RunStep(BumpUpVersion, LKGRVersionUpToDateBailout, ["--dry_run"]))
+
+  # Test that we bail out if the last version is already newer than the lkgr's
+  # version.
+  def testBumpUpVersionBailout3(self):
+    self._state["lkgr"] = "lkgr_hash"
+    self._state["lkgr_version"] = "3.22.4.0"
+    self._state["latest_version"] = "3.22.5.0"
+
+    self.Expect([
+      Cmd("git diff --name-only lkgr_hash lkgr_hash^", ""),
+    ])
+
+    self.assertEquals(0,
+        self.RunStep(BumpUpVersion, LKGRVersionUpToDateBailout, ["--dry_run"]))
+
+
 class SystemTest(unittest.TestCase):
   def testReload(self):
     step = MakeStep(step_class=PrepareChangeLog, number=0, state={}, config={},
diff --git a/tools/run-deopt-fuzzer.py b/tools/run-deopt-fuzzer.py
index eafed01..57cb6b2 100755
--- a/tools/run-deopt-fuzzer.py
+++ b/tools/run-deopt-fuzzer.py
@@ -369,9 +369,12 @@
                         timeout, options.isolates,
                         options.command_prefix,
                         options.extra_flags,
-                        False,
+                        False,  # Keep i18n on by default.
                         options.random_seed,
-                        True)
+                        True,  # No sorting of test cases.
+                        0,  # Don't rerun failing tests.
+                        0,  # No use of a rerun-failing-tests maximum.
+                        False)  # No predictable mode.
 
   # Find available test suites and read test cases from them.
   variables = {
@@ -385,6 +388,7 @@
     "no_snap": False,
     "simulator": utils.UseSimulator(arch),
     "system": utils.GuessOS(),
+    "tsan": False,
   }
   all_tests = []
   num_tests = 0
diff --git a/tools/run-tests.py b/tools/run-tests.py
index 794c864..d48b70c 100755
--- a/tools/run-tests.py
+++ b/tools/run-tests.py
@@ -28,6 +28,7 @@
 # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 
+from collections import OrderedDict
 import itertools
 import multiprocessing
 import optparse
@@ -50,7 +51,34 @@
 
 
 ARCH_GUESS = utils.DefaultArch()
-DEFAULT_TESTS = ["mjsunit", "fuzz-natives", "cctest", "message", "preparser"]
+DEFAULT_TESTS = ["mjsunit", "fuzz-natives", "base-unittests",
+                 "cctest", "compiler-unittests", "heap-unittests",
+                 "libplatform-unittests", "message", "preparser"]
+
+# Map of test name synonyms to lists of test suites. Should be ordered by
+# expected runtimes (suites with slow test cases first). These groups are
+# invoked in seperate steps on the bots.
+TEST_MAP = {
+  "default": [
+    "mjsunit",
+    "fuzz-natives",
+    "cctest",
+    "message",
+    "preparser",
+  ],
+  "optimize_for_size": [
+    "mjsunit",
+    "cctest",
+    "webkit",
+  ],
+  "unittests": [
+    "compiler-unittests",
+    "heap-unittests",
+    "base-unittests",
+    "libplatform-unittests",
+  ],
+}
+
 TIMEOUT_DEFAULT = 60
 TIMEOUT_SCALEFACTOR = {"debug"   : 4,
                        "release" : 1 }
@@ -59,9 +87,10 @@
 VARIANT_FLAGS = {
     "default": [],
     "stress": ["--stress-opt", "--always-opt"],
+    "turbofan": ["--turbo-filter=*", "--always-opt"],
     "nocrankshaft": ["--nocrankshaft"]}
 
-VARIANTS = ["default", "stress", "nocrankshaft"]
+VARIANTS = ["default", "stress", "turbofan", "nocrankshaft"]
 
 MODE_FLAGS = {
     "debug"   : ["--nohard-abort", "--nodead-code-elimination",
@@ -83,9 +112,11 @@
                    "x87",
                    "mips",
                    "mipsel",
+                   "mips64el",
                    "nacl_ia32",
                    "nacl_x64",
                    "x64",
+                   "x32",
                    "arm64"]
 # Double the timeout for these:
 SLOW_ARCHS = ["android_arm",
@@ -94,6 +125,7 @@
               "arm",
               "mips",
               "mipsel",
+              "mips64el",
               "nacl_ia32",
               "nacl_x64",
               "x87",
@@ -170,6 +202,9 @@
                     help="Comma-separated list of testing variants")
   result.add_option("--outdir", help="Base directory with compile output",
                     default="out")
+  result.add_option("--predictable",
+                    help="Compare output of several reruns of each test",
+                    default=False, action="store_true")
   result.add_option("-p", "--progress",
                     help=("The style of progress indicator"
                           " (verbose, dots, color, mono)"),
@@ -180,6 +215,13 @@
                     default=False, action="store_true")
   result.add_option("--json-test-results",
                     help="Path to a file for storing json results.")
+  result.add_option("--rerun-failures-count",
+                    help=("Number of times to rerun each failing test case. "
+                          "Very slow tests will be rerun only once."),
+                    default=0, type="int")
+  result.add_option("--rerun-failures-max",
+                    help="Maximum number of failing test cases to rerun.",
+                    default=100, type="int")
   result.add_option("--shard-count",
                     help="Split testsuites into this number of shards",
                     default=1, type="int")
@@ -200,6 +242,9 @@
                     default=False, action="store_true")
   result.add_option("-t", "--timeout", help="Timeout in seconds",
                     default= -1, type="int")
+  result.add_option("--tsan",
+                    help="Regard test expectations for TSAN",
+                    default=False, action="store_true")
   result.add_option("-v", "--verbose", help="Verbose output",
                     default=False, action="store_true")
   result.add_option("--valgrind", help="Run tests through valgrind",
@@ -262,6 +307,9 @@
   if options.asan:
     options.extra_flags.append("--invoke-weak-callbacks")
 
+  if options.tsan:
+    VARIANTS = ["default"]
+
   if options.j == 0:
     options.j = multiprocessing.cpu_count()
 
@@ -273,10 +321,15 @@
     return reduce(lambda x, y: x + y, args) <= 1
 
   if not excl(options.no_stress, options.stress_only, options.no_variants,
-              bool(options.variants), options.quickcheck):
+              bool(options.variants)):
     print("Use only one of --no-stress, --stress-only, --no-variants, "
-          "--variants, or --quickcheck.")
+          "or --variants.")
     return False
+  if options.quickcheck:
+    VARIANTS = ["default", "stress"]
+    options.flaky_tests = "skip"
+    options.slow_tests = "skip"
+    options.pass_fail_tests = "skip"
   if options.no_stress:
     VARIANTS = ["default", "nocrankshaft"]
   if options.no_variants:
@@ -288,11 +341,11 @@
     if not set(VARIANTS).issubset(VARIANT_FLAGS.keys()):
       print "All variants must be in %s" % str(VARIANT_FLAGS.keys())
       return False
-  if options.quickcheck:
-    VARIANTS = ["default", "stress"]
-    options.flaky_tests = "skip"
-    options.slow_tests = "skip"
-    options.pass_fail_tests = "skip"
+  if options.predictable:
+    VARIANTS = ["default"]
+    options.extra_flags.append("--predictable")
+    options.extra_flags.append("--verify_predictable")
+    options.extra_flags.append("--no-inline-new")
 
   if not options.shell_dir:
     if options.shell:
@@ -351,14 +404,23 @@
 
   suite_paths = utils.GetSuitePaths(join(workspace, "test"))
 
+  # Expand arguments with grouped tests. The args should reflect the list of
+  # suites as otherwise filters would break.
+  def ExpandTestGroups(name):
+    if name in TEST_MAP:
+      return [suite for suite in TEST_MAP[arg]]
+    else:
+      return [name]
+  args = reduce(lambda x, y: x + y,
+         [ExpandTestGroups(arg) for arg in args],
+         [])
+
   if len(args) == 0:
     suite_paths = [ s for s in DEFAULT_TESTS if s in suite_paths ]
   else:
-    args_suites = set()
+    args_suites = OrderedDict() # Used as set
     for arg in args:
-      suite = arg.split(os.path.sep)[0]
-      if not suite in args_suites:
-        args_suites.add(suite)
+      args_suites[arg.split(os.path.sep)[0]] = True
     suite_paths = [ s for s in args_suites if s in suite_paths ]
 
   suites = []
@@ -408,6 +470,11 @@
       timeout = TIMEOUT_DEFAULT;
 
   timeout *= TIMEOUT_SCALEFACTOR[mode]
+
+  if options.predictable:
+    # Predictable mode is slower.
+    timeout *= 2
+
   ctx = context.Context(arch, mode, shell_dir,
                         mode_flags, options.verbose,
                         timeout, options.isolates,
@@ -415,7 +482,10 @@
                         options.extra_flags,
                         options.no_i18n,
                         options.random_seed,
-                        options.no_sorting)
+                        options.no_sorting,
+                        options.rerun_failures_count,
+                        options.rerun_failures_max,
+                        options.predictable)
 
   # TODO(all): Combine "simulator" and "simulator_run".
   simulator_run = not options.dont_skip_simulator_slow_tests and \
@@ -433,6 +503,7 @@
     "simulator_run": simulator_run,
     "simulator": utils.UseSimulator(arch),
     "system": utils.GuessOS(),
+    "tsan": options.tsan,
   }
   all_tests = []
   num_tests = 0
diff --git a/tools/run_benchmarks.py b/tools/run_benchmarks.py
deleted file mode 100755
index 1a07025..0000000
--- a/tools/run_benchmarks.py
+++ /dev/null
@@ -1,401 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2014 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-"""
-Performance runner for d8.
-
-Call e.g. with tools/run-benchmarks.py --arch ia32 some_suite.json
-
-The suite json format is expected to be:
-{
-  "path": <relative path chunks to benchmark resources and main file>,
-  "name": <optional suite name, file name is default>,
-  "archs": [<architecture name for which this suite is run>, ...],
-  "binary": <name of binary to run, default "d8">,
-  "flags": [<flag to d8>, ...],
-  "run_count": <how often will this suite run (optional)>,
-  "run_count_XXX": <how often will this suite run for arch XXX (optional)>,
-  "resources": [<js file to be loaded before main>, ...]
-  "main": <main js benchmark runner file>,
-  "results_regexp": <optional regexp>,
-  "results_processor": <optional python results processor script>,
-  "units": <the unit specification for the performance dashboard>,
-  "benchmarks": [
-    {
-      "name": <name of the benchmark>,
-      "results_regexp": <optional more specific regexp>,
-      "results_processor": <optional python results processor script>,
-      "units": <the unit specification for the performance dashboard>,
-    }, ...
-  ]
-}
-
-The benchmarks field can also nest other suites in arbitrary depth. A suite
-with a "main" file is a leaf suite that can contain one more level of
-benchmarks.
-
-A suite's results_regexp is expected to have one string place holder
-"%s" for the benchmark name. A benchmark's results_regexp overwrites suite
-defaults.
-
-A suite's results_processor may point to an optional python script. If
-specified, it is called after running the benchmarks like this (with a path
-relatve to the suite level's path):
-<results_processor file> <same flags as for d8> <suite level name> <output>
-
-The <output> is a temporary file containing d8 output. The results_regexp will
-be applied to the output of this script.
-
-A suite without "benchmarks" is considered a benchmark itself.
-
-Full example (suite with one runner):
-{
-  "path": ["."],
-  "flags": ["--expose-gc"],
-  "archs": ["ia32", "x64"],
-  "run_count": 5,
-  "run_count_ia32": 3,
-  "main": "run.js",
-  "results_regexp": "^%s: (.+)$",
-  "units": "score",
-  "benchmarks": [
-    {"name": "Richards"},
-    {"name": "DeltaBlue"},
-    {"name": "NavierStokes",
-     "results_regexp": "^NavierStokes: (.+)$"}
-  ]
-}
-
-Full example (suite with several runners):
-{
-  "path": ["."],
-  "flags": ["--expose-gc"],
-  "archs": ["ia32", "x64"],
-  "run_count": 5,
-  "units": "score",
-  "benchmarks": [
-    {"name": "Richards",
-     "path": ["richards"],
-     "main": "run.js",
-     "run_count": 3,
-     "results_regexp": "^Richards: (.+)$"},
-    {"name": "NavierStokes",
-     "path": ["navier_stokes"],
-     "main": "run.js",
-     "results_regexp": "^NavierStokes: (.+)$"}
-  ]
-}
-
-Path pieces are concatenated. D8 is always run with the suite's path as cwd.
-"""
-
-import json
-import optparse
-import os
-import re
-import sys
-
-from testrunner.local import commands
-from testrunner.local import utils
-
-ARCH_GUESS = utils.DefaultArch()
-SUPPORTED_ARCHS = ["android_arm",
-                   "android_arm64",
-                   "android_ia32",
-                   "arm",
-                   "ia32",
-                   "mips",
-                   "mipsel",
-                   "nacl_ia32",
-                   "nacl_x64",
-                   "x64",
-                   "arm64"]
-
-
-class Results(object):
-  """Place holder for result traces."""
-  def __init__(self, traces=None, errors=None):
-    self.traces = traces or []
-    self.errors = errors or []
-
-  def ToDict(self):
-    return {"traces": self.traces, "errors": self.errors}
-
-  def WriteToFile(self, file_name):
-    with open(file_name, "w") as f:
-      f.write(json.dumps(self.ToDict()))
-
-  def __add__(self, other):
-    self.traces += other.traces
-    self.errors += other.errors
-    return self
-
-  def __str__(self):  # pragma: no cover
-    return str(self.ToDict())
-
-
-class Node(object):
-  """Represents a node in the benchmark suite tree structure."""
-  def __init__(self, *args):
-    self._children = []
-
-  def AppendChild(self, child):
-    self._children.append(child)
-
-
-class DefaultSentinel(Node):
-  """Fake parent node with all default values."""
-  def __init__(self):
-    super(DefaultSentinel, self).__init__()
-    self.binary = "d8"
-    self.run_count = 10
-    self.path = []
-    self.graphs = []
-    self.flags = []
-    self.resources = []
-    self.results_regexp = None
-    self.units = "score"
-
-
-class Graph(Node):
-  """Represents a benchmark suite definition.
-
-  Can either be a leaf or an inner node that provides default values.
-  """
-  def __init__(self, suite, parent, arch):
-    super(Graph, self).__init__()
-    self._suite = suite
-
-    assert isinstance(suite.get("path", []), list)
-    assert isinstance(suite["name"], basestring)
-    assert isinstance(suite.get("flags", []), list)
-    assert isinstance(suite.get("resources", []), list)
-
-    # Accumulated values.
-    self.path = parent.path[:] + suite.get("path", [])
-    self.graphs = parent.graphs[:] + [suite["name"]]
-    self.flags = parent.flags[:] + suite.get("flags", [])
-    self.resources = parent.resources[:] + suite.get("resources", [])
-
-    # Descrete values (with parent defaults).
-    self.binary = suite.get("binary", parent.binary)
-    self.run_count = suite.get("run_count", parent.run_count)
-    self.run_count = suite.get("run_count_%s" % arch, self.run_count)
-    self.units = suite.get("units", parent.units)
-
-    # A regular expression for results. If the parent graph provides a
-    # regexp and the current suite has none, a string place holder for the
-    # suite name is expected.
-    # TODO(machenbach): Currently that makes only sense for the leaf level.
-    # Multiple place holders for multiple levels are not supported.
-    if parent.results_regexp:
-      regexp_default = parent.results_regexp % suite["name"]
-    else:
-      regexp_default = None
-    self.results_regexp = suite.get("results_regexp", regexp_default)
-
-
-class Trace(Graph):
-  """Represents a leaf in the benchmark suite tree structure.
-
-  Handles collection of measurements.
-  """
-  def __init__(self, suite, parent, arch):
-    super(Trace, self).__init__(suite, parent, arch)
-    assert self.results_regexp
-    self.results = []
-    self.errors = []
-
-  def ConsumeOutput(self, stdout):
-    try:
-      self.results.append(
-          re.search(self.results_regexp, stdout, re.M).group(1))
-    except:
-      self.errors.append("Regexp \"%s\" didn't match for benchmark %s."
-                         % (self.results_regexp, self.graphs[-1]))
-
-  def GetResults(self):
-    return Results([{
-      "graphs": self.graphs,
-      "units": self.units,
-      "results": self.results,
-    }], self.errors)
-
-
-class Runnable(Graph):
-  """Represents a runnable benchmark suite definition (i.e. has a main file).
-  """
-  @property
-  def main(self):
-    return self._suite["main"]
-
-  def ChangeCWD(self, suite_path):
-    """Changes the cwd to to path defined in the current graph.
-
-    The benchmarks are supposed to be relative to the suite configuration.
-    """
-    suite_dir = os.path.abspath(os.path.dirname(suite_path))
-    bench_dir = os.path.normpath(os.path.join(*self.path))
-    os.chdir(os.path.join(suite_dir, bench_dir))
-
-  def GetCommand(self, shell_dir):
-    # TODO(machenbach): This requires +.exe if run on windows.
-    return (
-      [os.path.join(shell_dir, self.binary)] +
-      self.flags +
-      self.resources +
-      [self.main]
-    )
-
-  def Run(self, runner):
-    """Iterates over several runs and handles the output for all traces."""
-    for stdout in runner():
-      for trace in self._children:
-        trace.ConsumeOutput(stdout)
-    return reduce(lambda r, t: r + t.GetResults(), self._children, Results())
-
-
-class RunnableTrace(Trace, Runnable):
-  """Represents a runnable benchmark suite definition that is a leaf."""
-  def __init__(self, suite, parent, arch):
-    super(RunnableTrace, self).__init__(suite, parent, arch)
-
-  def Run(self, runner):
-    """Iterates over several runs and handles the output."""
-    for stdout in runner():
-      self.ConsumeOutput(stdout)
-    return self.GetResults()
-
-
-def MakeGraph(suite, arch, parent):
-  """Factory method for making graph objects."""
-  if isinstance(parent, Runnable):
-    # Below a runnable can only be traces.
-    return Trace(suite, parent, arch)
-  elif suite.get("main"):
-    # A main file makes this graph runnable.
-    if suite.get("benchmarks"):
-      # This graph has subbenchmarks (traces).
-      return Runnable(suite, parent, arch)
-    else:
-      # This graph has no subbenchmarks, it's a leaf.
-      return RunnableTrace(suite, parent, arch)
-  elif suite.get("benchmarks"):
-    # This is neither a leaf nor a runnable.
-    return Graph(suite, parent, arch)
-  else:  # pragma: no cover
-    raise Exception("Invalid benchmark suite configuration.")
-
-
-def BuildGraphs(suite, arch, parent=None):
-  """Builds a tree structure of graph objects that corresponds to the suite
-  configuration.
-  """
-  parent = parent or DefaultSentinel()
-
-  # TODO(machenbach): Implement notion of cpu type?
-  if arch not in suite.get("archs", ["ia32", "x64"]):
-    return None
-
-  graph = MakeGraph(suite, arch, parent)
-  for subsuite in suite.get("benchmarks", []):
-    BuildGraphs(subsuite, arch, graph)
-  parent.AppendChild(graph)
-  return graph
-
-
-def FlattenRunnables(node):
-  """Generator that traverses the tree structure and iterates over all
-  runnables.
-  """
-  if isinstance(node, Runnable):
-    yield node
-  elif isinstance(node, Node):
-    for child in node._children:
-      for result in FlattenRunnables(child):
-        yield result
-  else:  # pragma: no cover
-    raise Exception("Invalid benchmark suite configuration.")
-
-
-# TODO: Implement results_processor.
-def Main(args):
-  parser = optparse.OptionParser()
-  parser.add_option("--arch",
-                    help=("The architecture to run tests for, "
-                          "'auto' or 'native' for auto-detect"),
-                    default="x64")
-  parser.add_option("--buildbot",
-                    help="Adapt to path structure used on buildbots",
-                    default=False, action="store_true")
-  parser.add_option("--json-test-results",
-                    help="Path to a file for storing json results.")
-  parser.add_option("--outdir", help="Base directory with compile output",
-                    default="out")
-  (options, args) = parser.parse_args(args)
-
-  if len(args) == 0:  # pragma: no cover
-    parser.print_help()
-    return 1
-
-  if options.arch in ["auto", "native"]:  # pragma: no cover
-    options.arch = ARCH_GUESS
-
-  if not options.arch in SUPPORTED_ARCHS:  # pragma: no cover
-    print "Unknown architecture %s" % options.arch
-    return 1
-
-  workspace = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
-
-  if options.buildbot:
-    shell_dir = os.path.join(workspace, options.outdir, "Release")
-  else:
-    shell_dir = os.path.join(workspace, options.outdir,
-                             "%s.release" % options.arch)
-
-  results = Results()
-  for path in args:
-    path = os.path.abspath(path)
-
-    if not os.path.exists(path):  # pragma: no cover
-      results.errors.append("Benchmark file %s does not exist." % path)
-      continue
-
-    with open(path) as f:
-      suite = json.loads(f.read())
-
-    # If no name is given, default to the file name without .json.
-    suite.setdefault("name", os.path.splitext(os.path.basename(path))[0])
-
-    for runnable in FlattenRunnables(BuildGraphs(suite, options.arch)):
-      print ">>> Running suite: %s" % "/".join(runnable.graphs)
-      runnable.ChangeCWD(path)
-
-      def Runner():
-        """Output generator that reruns several times."""
-        for i in xrange(0, max(1, runnable.run_count)):
-          # TODO(machenbach): Make timeout configurable in the suite definition.
-          # Allow timeout per arch like with run_count per arch.
-          output = commands.Execute(runnable.GetCommand(shell_dir), timeout=60)
-          print ">>> Stdout (#%d):" % (i + 1)
-          print output.stdout
-          if output.stderr:  # pragma: no cover
-            # Print stderr for debugging.
-            print ">>> Stderr (#%d):" % (i + 1)
-            print output.stderr
-          yield output.stdout
-
-      # Let runnable iterate over all runs and handle output.
-      results += runnable.Run(Runner)
-
-  if options.json_test_results:
-    results.WriteToFile(options.json_test_results)
-  else:  # pragma: no cover
-    print results
-
-  return min(1, len(results.errors))
-
-if __name__ == "__main__":  # pragma: no cover
-  sys.exit(Main(sys.argv[1:]))
diff --git a/tools/run_perf.py b/tools/run_perf.py
new file mode 100755
index 0000000..920c18d
--- /dev/null
+++ b/tools/run_perf.py
@@ -0,0 +1,493 @@
+#!/usr/bin/env python
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Performance runner for d8.
+
+Call e.g. with tools/run-perf.py --arch ia32 some_suite.json
+
+The suite json format is expected to be:
+{
+  "path": <relative path chunks to perf resources and main file>,
+  "name": <optional suite name, file name is default>,
+  "archs": [<architecture name for which this suite is run>, ...],
+  "binary": <name of binary to run, default "d8">,
+  "flags": [<flag to d8>, ...],
+  "run_count": <how often will this suite run (optional)>,
+  "run_count_XXX": <how often will this suite run for arch XXX (optional)>,
+  "resources": [<js file to be loaded before main>, ...]
+  "main": <main js perf runner file>,
+  "results_regexp": <optional regexp>,
+  "results_processor": <optional python results processor script>,
+  "units": <the unit specification for the performance dashboard>,
+  "tests": [
+    {
+      "name": <name of the trace>,
+      "results_regexp": <optional more specific regexp>,
+      "results_processor": <optional python results processor script>,
+      "units": <the unit specification for the performance dashboard>,
+    }, ...
+  ]
+}
+
+The tests field can also nest other suites in arbitrary depth. A suite
+with a "main" file is a leaf suite that can contain one more level of
+tests.
+
+A suite's results_regexp is expected to have one string place holder
+"%s" for the trace name. A trace's results_regexp overwrites suite
+defaults.
+
+A suite's results_processor may point to an optional python script. If
+specified, it is called after running the tests like this (with a path
+relatve to the suite level's path):
+<results_processor file> <same flags as for d8> <suite level name> <output>
+
+The <output> is a temporary file containing d8 output. The results_regexp will
+be applied to the output of this script.
+
+A suite without "tests" is considered a performance test itself.
+
+Full example (suite with one runner):
+{
+  "path": ["."],
+  "flags": ["--expose-gc"],
+  "archs": ["ia32", "x64"],
+  "run_count": 5,
+  "run_count_ia32": 3,
+  "main": "run.js",
+  "results_regexp": "^%s: (.+)$",
+  "units": "score",
+  "tests": [
+    {"name": "Richards"},
+    {"name": "DeltaBlue"},
+    {"name": "NavierStokes",
+     "results_regexp": "^NavierStokes: (.+)$"}
+  ]
+}
+
+Full example (suite with several runners):
+{
+  "path": ["."],
+  "flags": ["--expose-gc"],
+  "archs": ["ia32", "x64"],
+  "run_count": 5,
+  "units": "score",
+  "tests": [
+    {"name": "Richards",
+     "path": ["richards"],
+     "main": "run.js",
+     "run_count": 3,
+     "results_regexp": "^Richards: (.+)$"},
+    {"name": "NavierStokes",
+     "path": ["navier_stokes"],
+     "main": "run.js",
+     "results_regexp": "^NavierStokes: (.+)$"}
+  ]
+}
+
+Path pieces are concatenated. D8 is always run with the suite's path as cwd.
+"""
+
+import json
+import math
+import optparse
+import os
+import re
+import sys
+
+from testrunner.local import commands
+from testrunner.local import utils
+
+ARCH_GUESS = utils.DefaultArch()
+SUPPORTED_ARCHS = ["android_arm",
+                   "android_arm64",
+                   "android_ia32",
+                   "arm",
+                   "ia32",
+                   "mips",
+                   "mipsel",
+                   "nacl_ia32",
+                   "nacl_x64",
+                   "x64",
+                   "arm64"]
+
+GENERIC_RESULTS_RE = re.compile(
+    r"^Trace\(([^\)]+)\), Result\(([^\)]+)\), StdDev\(([^\)]+)\)$")
+
+
+def GeometricMean(values):
+  """Returns the geometric mean of a list of values.
+
+  The mean is calculated using log to avoid overflow.
+  """
+  values = map(float, values)
+  return str(math.exp(sum(map(math.log, values)) / len(values)))
+
+
+class Results(object):
+  """Place holder for result traces."""
+  def __init__(self, traces=None, errors=None):
+    self.traces = traces or []
+    self.errors = errors or []
+
+  def ToDict(self):
+    return {"traces": self.traces, "errors": self.errors}
+
+  def WriteToFile(self, file_name):
+    with open(file_name, "w") as f:
+      f.write(json.dumps(self.ToDict()))
+
+  def __add__(self, other):
+    self.traces += other.traces
+    self.errors += other.errors
+    return self
+
+  def __str__(self):  # pragma: no cover
+    return str(self.ToDict())
+
+
+class Node(object):
+  """Represents a node in the suite tree structure."""
+  def __init__(self, *args):
+    self._children = []
+
+  def AppendChild(self, child):
+    self._children.append(child)
+
+
+class DefaultSentinel(Node):
+  """Fake parent node with all default values."""
+  def __init__(self):
+    super(DefaultSentinel, self).__init__()
+    self.binary = "d8"
+    self.run_count = 10
+    self.timeout = 60
+    self.path = []
+    self.graphs = []
+    self.flags = []
+    self.resources = []
+    self.results_regexp = None
+    self.stddev_regexp = None
+    self.units = "score"
+    self.total = False
+
+
+class Graph(Node):
+  """Represents a suite definition.
+
+  Can either be a leaf or an inner node that provides default values.
+  """
+  def __init__(self, suite, parent, arch):
+    super(Graph, self).__init__()
+    self._suite = suite
+
+    assert isinstance(suite.get("path", []), list)
+    assert isinstance(suite["name"], basestring)
+    assert isinstance(suite.get("flags", []), list)
+    assert isinstance(suite.get("resources", []), list)
+
+    # Accumulated values.
+    self.path = parent.path[:] + suite.get("path", [])
+    self.graphs = parent.graphs[:] + [suite["name"]]
+    self.flags = parent.flags[:] + suite.get("flags", [])
+    self.resources = parent.resources[:] + suite.get("resources", [])
+
+    # Descrete values (with parent defaults).
+    self.binary = suite.get("binary", parent.binary)
+    self.run_count = suite.get("run_count", parent.run_count)
+    self.run_count = suite.get("run_count_%s" % arch, self.run_count)
+    self.timeout = suite.get("timeout", parent.timeout)
+    self.units = suite.get("units", parent.units)
+    self.total = suite.get("total", parent.total)
+
+    # A regular expression for results. If the parent graph provides a
+    # regexp and the current suite has none, a string place holder for the
+    # suite name is expected.
+    # TODO(machenbach): Currently that makes only sense for the leaf level.
+    # Multiple place holders for multiple levels are not supported.
+    if parent.results_regexp:
+      regexp_default = parent.results_regexp % re.escape(suite["name"])
+    else:
+      regexp_default = None
+    self.results_regexp = suite.get("results_regexp", regexp_default)
+
+    # A similar regular expression for the standard deviation (optional).
+    if parent.stddev_regexp:
+      stddev_default = parent.stddev_regexp % re.escape(suite["name"])
+    else:
+      stddev_default = None
+    self.stddev_regexp = suite.get("stddev_regexp", stddev_default)
+
+
+class Trace(Graph):
+  """Represents a leaf in the suite tree structure.
+
+  Handles collection of measurements.
+  """
+  def __init__(self, suite, parent, arch):
+    super(Trace, self).__init__(suite, parent, arch)
+    assert self.results_regexp
+    self.results = []
+    self.errors = []
+    self.stddev = ""
+
+  def ConsumeOutput(self, stdout):
+    try:
+      self.results.append(
+          re.search(self.results_regexp, stdout, re.M).group(1))
+    except:
+      self.errors.append("Regexp \"%s\" didn't match for test %s."
+                         % (self.results_regexp, self.graphs[-1]))
+
+    try:
+      if self.stddev_regexp and self.stddev:
+        self.errors.append("Test %s should only run once since a stddev "
+                           "is provided by the test." % self.graphs[-1])
+      if self.stddev_regexp:
+        self.stddev = re.search(self.stddev_regexp, stdout, re.M).group(1)
+    except:
+      self.errors.append("Regexp \"%s\" didn't match for test %s."
+                         % (self.stddev_regexp, self.graphs[-1]))
+
+  def GetResults(self):
+    return Results([{
+      "graphs": self.graphs,
+      "units": self.units,
+      "results": self.results,
+      "stddev": self.stddev,
+    }], self.errors)
+
+
+class Runnable(Graph):
+  """Represents a runnable suite definition (i.e. has a main file).
+  """
+  @property
+  def main(self):
+    return self._suite.get("main", "")
+
+  def ChangeCWD(self, suite_path):
+    """Changes the cwd to to path defined in the current graph.
+
+    The tests are supposed to be relative to the suite configuration.
+    """
+    suite_dir = os.path.abspath(os.path.dirname(suite_path))
+    bench_dir = os.path.normpath(os.path.join(*self.path))
+    os.chdir(os.path.join(suite_dir, bench_dir))
+
+  def GetCommand(self, shell_dir):
+    # TODO(machenbach): This requires +.exe if run on windows.
+    return (
+      [os.path.join(shell_dir, self.binary)] +
+      self.flags +
+      self.resources +
+      [self.main]
+    )
+
+  def Run(self, runner):
+    """Iterates over several runs and handles the output for all traces."""
+    for stdout in runner():
+      for trace in self._children:
+        trace.ConsumeOutput(stdout)
+    res = reduce(lambda r, t: r + t.GetResults(), self._children, Results())
+
+    if not res.traces or not self.total:
+      return res
+
+    # Assume all traces have the same structure.
+    if len(set(map(lambda t: len(t["results"]), res.traces))) != 1:
+      res.errors.append("Not all traces have the same number of results.")
+      return res
+
+    # Calculate the geometric means for all traces. Above we made sure that
+    # there is at least one trace and that the number of results is the same
+    # for each trace.
+    n_results = len(res.traces[0]["results"])
+    total_results = [GeometricMean(t["results"][i] for t in res.traces)
+                     for i in range(0, n_results)]
+    res.traces.append({
+      "graphs": self.graphs + ["Total"],
+      "units": res.traces[0]["units"],
+      "results": total_results,
+      "stddev": "",
+    })
+    return res
+
+class RunnableTrace(Trace, Runnable):
+  """Represents a runnable suite definition that is a leaf."""
+  def __init__(self, suite, parent, arch):
+    super(RunnableTrace, self).__init__(suite, parent, arch)
+
+  def Run(self, runner):
+    """Iterates over several runs and handles the output."""
+    for stdout in runner():
+      self.ConsumeOutput(stdout)
+    return self.GetResults()
+
+
+class RunnableGeneric(Runnable):
+  """Represents a runnable suite definition with generic traces."""
+  def __init__(self, suite, parent, arch):
+    super(RunnableGeneric, self).__init__(suite, parent, arch)
+
+  def Run(self, runner):
+    """Iterates over several runs and handles the output."""
+    traces = {}
+    for stdout in runner():
+      for line in stdout.strip().splitlines():
+        match = GENERIC_RESULTS_RE.match(line)
+        if match:
+          trace = match.group(1)
+          result = match.group(2)
+          stddev = match.group(3)
+          trace_result = traces.setdefault(trace, Results([{
+            "graphs": self.graphs + [trace],
+            "units": self.units,
+            "results": [],
+            "stddev": "",
+          }], []))
+          trace_result.traces[0]["results"].append(result)
+          trace_result.traces[0]["stddev"] = stddev
+
+    return reduce(lambda r, t: r + t, traces.itervalues(), Results())
+
+
+def MakeGraph(suite, arch, parent):
+  """Factory method for making graph objects."""
+  if isinstance(parent, Runnable):
+    # Below a runnable can only be traces.
+    return Trace(suite, parent, arch)
+  elif suite.get("main"):
+    # A main file makes this graph runnable.
+    if suite.get("tests"):
+      # This graph has subgraphs (traces).
+      return Runnable(suite, parent, arch)
+    else:
+      # This graph has no subgraphs, it's a leaf.
+      return RunnableTrace(suite, parent, arch)
+  elif suite.get("generic"):
+    # This is a generic suite definition. It is either a runnable executable
+    # or has a main js file.
+    return RunnableGeneric(suite, parent, arch)
+  elif suite.get("tests"):
+    # This is neither a leaf nor a runnable.
+    return Graph(suite, parent, arch)
+  else:  # pragma: no cover
+    raise Exception("Invalid suite configuration.")
+
+
+def BuildGraphs(suite, arch, parent=None):
+  """Builds a tree structure of graph objects that corresponds to the suite
+  configuration.
+  """
+  parent = parent or DefaultSentinel()
+
+  # TODO(machenbach): Implement notion of cpu type?
+  if arch not in suite.get("archs", ["ia32", "x64"]):
+    return None
+
+  graph = MakeGraph(suite, arch, parent)
+  for subsuite in suite.get("tests", []):
+    BuildGraphs(subsuite, arch, graph)
+  parent.AppendChild(graph)
+  return graph
+
+
+def FlattenRunnables(node):
+  """Generator that traverses the tree structure and iterates over all
+  runnables.
+  """
+  if isinstance(node, Runnable):
+    yield node
+  elif isinstance(node, Node):
+    for child in node._children:
+      for result in FlattenRunnables(child):
+        yield result
+  else:  # pragma: no cover
+    raise Exception("Invalid suite configuration.")
+
+
+# TODO: Implement results_processor.
+def Main(args):
+  parser = optparse.OptionParser()
+  parser.add_option("--arch",
+                    help=("The architecture to run tests for, "
+                          "'auto' or 'native' for auto-detect"),
+                    default="x64")
+  parser.add_option("--buildbot",
+                    help="Adapt to path structure used on buildbots",
+                    default=False, action="store_true")
+  parser.add_option("--json-test-results",
+                    help="Path to a file for storing json results.")
+  parser.add_option("--outdir", help="Base directory with compile output",
+                    default="out")
+  (options, args) = parser.parse_args(args)
+
+  if len(args) == 0:  # pragma: no cover
+    parser.print_help()
+    return 1
+
+  if options.arch in ["auto", "native"]:  # pragma: no cover
+    options.arch = ARCH_GUESS
+
+  if not options.arch in SUPPORTED_ARCHS:  # pragma: no cover
+    print "Unknown architecture %s" % options.arch
+    return 1
+
+  workspace = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
+
+  if options.buildbot:
+    shell_dir = os.path.join(workspace, options.outdir, "Release")
+  else:
+    shell_dir = os.path.join(workspace, options.outdir,
+                             "%s.release" % options.arch)
+
+  results = Results()
+  for path in args:
+    path = os.path.abspath(path)
+
+    if not os.path.exists(path):  # pragma: no cover
+      results.errors.append("Configuration file %s does not exist." % path)
+      continue
+
+    with open(path) as f:
+      suite = json.loads(f.read())
+
+    # If no name is given, default to the file name without .json.
+    suite.setdefault("name", os.path.splitext(os.path.basename(path))[0])
+
+    for runnable in FlattenRunnables(BuildGraphs(suite, options.arch)):
+      print ">>> Running suite: %s" % "/".join(runnable.graphs)
+      runnable.ChangeCWD(path)
+
+      def Runner():
+        """Output generator that reruns several times."""
+        for i in xrange(0, max(1, runnable.run_count)):
+          # TODO(machenbach): Allow timeout per arch like with run_count per
+          # arch.
+          output = commands.Execute(runnable.GetCommand(shell_dir),
+                                    timeout=runnable.timeout)
+          print ">>> Stdout (#%d):" % (i + 1)
+          print output.stdout
+          if output.stderr:  # pragma: no cover
+            # Print stderr for debugging.
+            print ">>> Stderr (#%d):" % (i + 1)
+            print output.stderr
+          if output.timed_out:
+            print ">>> Test timed out after %ss." % runnable.timeout
+          yield output.stdout
+
+      # Let runnable iterate over all runs and handle output.
+      results += runnable.Run(Runner)
+
+  if options.json_test_results:
+    results.WriteToFile(options.json_test_results)
+  else:  # pragma: no cover
+    print results
+
+  return min(1, len(results.errors))
+
+if __name__ == "__main__":  # pragma: no cover
+  sys.exit(Main(sys.argv[1:]))
diff --git a/tools/testrunner/local/execution.py b/tools/testrunner/local/execution.py
index ac69eab..36ce7be 100644
--- a/tools/testrunner/local/execution.py
+++ b/tools/testrunner/local/execution.py
@@ -27,6 +27,7 @@
 
 
 import os
+import shutil
 import time
 
 from pool import Pool
@@ -60,9 +61,11 @@
 class Runner(object):
 
   def __init__(self, suites, progress_indicator, context):
-    datapath = os.path.join("out", "testrunner_data")
-    self.perf_data_manager = perfdata.PerfDataManager(datapath)
+    self.datapath = os.path.join("out", "testrunner_data")
+    self.perf_data_manager = perfdata.PerfDataManager(self.datapath)
     self.perfdata = self.perf_data_manager.GetStore(context.arch, context.mode)
+    self.perf_failures = False
+    self.printed_allocations = False
     self.tests = [ t for s in suites for t in s.tests ]
     if not context.no_sorting:
       for t in self.tests:
@@ -79,6 +82,119 @@
     self.remaining = num_tests
     self.failed = []
     self.crashed = 0
+    self.reran_tests = 0
+
+  def _RunPerfSafe(self, fun):
+    try:
+      fun()
+    except Exception, e:
+      print("PerfData exception: %s" % e)
+      self.perf_failures = True
+
+  def _GetJob(self, test):
+    command = self.GetCommand(test)
+    timeout = self.context.timeout
+    if ("--stress-opt" in test.flags or
+        "--stress-opt" in self.context.mode_flags or
+        "--stress-opt" in self.context.extra_flags):
+      timeout *= 4
+    if test.dependency is not None:
+      dep_command = [ c.replace(test.path, test.dependency) for c in command ]
+    else:
+      dep_command = None
+    return Job(command, dep_command, test.id, timeout, self.context.verbose)
+
+  def _MaybeRerun(self, pool, test):
+    if test.run <= self.context.rerun_failures_count:
+      # Possibly rerun this test if its run count is below the maximum per
+      # test. <= as the flag controls reruns not including the first run.
+      if test.run == 1:
+        # Count the overall number of reran tests on the first rerun.
+        if self.reran_tests < self.context.rerun_failures_max:
+          self.reran_tests += 1
+        else:
+          # Don't rerun this if the overall number of rerun tests has been
+          # reached.
+          return
+      if test.run >= 2 and test.duration > self.context.timeout / 20.0:
+        # Rerun slow tests at most once.
+        return
+
+      # Rerun this test.
+      test.duration = None
+      test.output = None
+      test.run += 1
+      pool.add([self._GetJob(test)])
+      self.remaining += 1
+
+  def _ProcessTestNormal(self, test, result, pool):
+    self.indicator.AboutToRun(test)
+    test.output = result[1]
+    test.duration = result[2]
+    has_unexpected_output = test.suite.HasUnexpectedOutput(test)
+    if has_unexpected_output:
+      self.failed.append(test)
+      if test.output.HasCrashed():
+        self.crashed += 1
+    else:
+      self.succeeded += 1
+    self.remaining -= 1
+    # For the indicator, everything that happens after the first run is treated
+    # as unexpected even if it flakily passes in order to include it in the
+    # output.
+    self.indicator.HasRun(test, has_unexpected_output or test.run > 1)
+    if has_unexpected_output:
+      # Rerun test failures after the indicator has processed the results.
+      self._MaybeRerun(pool, test)
+    # Update the perf database if the test succeeded.
+    return not has_unexpected_output
+
+  def _ProcessTestPredictable(self, test, result, pool):
+    def HasDifferentAllocations(output1, output2):
+      def AllocationStr(stdout):
+        for line in reversed((stdout or "").splitlines()):
+          if line.startswith("### Allocations = "):
+            self.printed_allocations = True
+            return line
+        return ""
+      return (AllocationStr(output1.stdout) != AllocationStr(output2.stdout))
+
+    # Always pass the test duration for the database update.
+    test.duration = result[2]
+    if test.run == 1 and result[1].HasTimedOut():
+      # If we get a timeout in the first run, we are already in an
+      # unpredictable state. Just report it as a failure and don't rerun.
+      self.indicator.AboutToRun(test)
+      test.output = result[1]
+      self.remaining -= 1
+      self.failed.append(test)
+      self.indicator.HasRun(test, True)
+    if test.run > 1 and HasDifferentAllocations(test.output, result[1]):
+      # From the second run on, check for different allocations. If a
+      # difference is found, call the indicator twice to report both tests.
+      # All runs of each test are counted as one for the statistic.
+      self.indicator.AboutToRun(test)
+      self.remaining -= 1
+      self.failed.append(test)
+      self.indicator.HasRun(test, True)
+      self.indicator.AboutToRun(test)
+      test.output = result[1]
+      self.indicator.HasRun(test, True)
+    elif test.run >= 3:
+      # No difference on the third run -> report a success.
+      self.indicator.AboutToRun(test)
+      self.remaining -= 1
+      self.succeeded += 1
+      test.output = result[1]
+      self.indicator.HasRun(test, False)
+    else:
+      # No difference yet and less than three runs -> add another run and
+      # remember the output for comparison.
+      test.run += 1
+      test.output = result[1]
+      pool.add([self._GetJob(test)])
+    # Always update the perf database.
+    return True
 
   def Run(self, jobs):
     self.indicator.Starting()
@@ -100,50 +216,35 @@
       assert test.id >= 0
       test_map[test.id] = test
       try:
-        command = self.GetCommand(test)
+        queue.append([self._GetJob(test)])
       except Exception, e:
         # If this failed, save the exception and re-raise it later (after
         # all other tests have had a chance to run).
         queued_exception = e
         continue
-      timeout = self.context.timeout
-      if ("--stress-opt" in test.flags or
-          "--stress-opt" in self.context.mode_flags or
-          "--stress-opt" in self.context.extra_flags):
-        timeout *= 4
-      if test.dependency is not None:
-        dep_command = [ c.replace(test.path, test.dependency) for c in command ]
-      else:
-        dep_command = None
-      job = Job(command, dep_command, test.id, timeout, self.context.verbose)
-      queue.append([job])
     try:
       it = pool.imap_unordered(RunTest, queue)
       for result in it:
         test = test_map[result[0]]
-        self.indicator.AboutToRun(test)
-        test.output = result[1]
-        test.duration = result[2]
-        has_unexpected_output = test.suite.HasUnexpectedOutput(test)
-        if has_unexpected_output:
-          self.failed.append(test)
-          if test.output.HasCrashed():
-            self.crashed += 1
+        if self.context.predictable:
+          update_perf = self._ProcessTestPredictable(test, result, pool)
         else:
-          self.succeeded += 1
-        self.remaining -= 1
-        try:
-          self.perfdata.UpdatePerfData(test)
-        except Exception, e:
-          print("UpdatePerfData exception: %s" % e)
-          pass  # Just keep working.
-        self.indicator.HasRun(test, has_unexpected_output)
+          update_perf = self._ProcessTestNormal(test, result, pool)
+        if update_perf:
+          self._RunPerfSafe(lambda: self.perfdata.UpdatePerfData(test))
     finally:
       pool.terminate()
-      self.perf_data_manager.close()
+      self._RunPerfSafe(lambda: self.perf_data_manager.close())
+      if self.perf_failures:
+        # Nuke perf data in case of failures. This might not work on windows as
+        # some files might still be open.
+        print "Deleting perf test data due to db corruption."
+        shutil.rmtree(self.datapath)
     if queued_exception:
       raise queued_exception
 
+    # Make sure that any allocations were printed in predictable mode.
+    assert not self.context.predictable or self.printed_allocations
 
   def GetCommand(self, test):
     d8testflag = []
diff --git a/tools/testrunner/local/progress.py b/tools/testrunner/local/progress.py
index 870dcc6..8caa58c 100644
--- a/tools/testrunner/local/progress.py
+++ b/tools/testrunner/local/progress.py
@@ -319,16 +319,20 @@
   def HasRun(self, test, has_unexpected_output):
     self.progress_indicator.HasRun(test, has_unexpected_output)
     if not has_unexpected_output:
+      # Omit tests that run as expected. Passing tests of reruns after failures
+      # will have unexpected_output to be reported here has well.
       return
+
     self.results.append({
       "name": test.GetLabel(),
       "flags": test.flags,
       "command": EscapeCommand(self.runner.GetCommand(test)).replace(
           ABS_PATH_PREFIX, ""),
+      "run": test.run,
       "stdout": test.output.stdout,
       "stderr": test.output.stderr,
       "exit_code": test.output.exit_code,
-      "result": "CRASH" if test.output.HasCrashed() else "FAIL",
+      "result": test.suite.GetOutcome(test),
     })
 
 
diff --git a/tools/testrunner/local/statusfile.py b/tools/testrunner/local/statusfile.py
index df8bfac..7c3ca7f 100644
--- a/tools/testrunner/local/statusfile.py
+++ b/tools/testrunner/local/statusfile.py
@@ -53,7 +53,7 @@
 # Support arches, modes to be written as keywords instead of strings.
 VARIABLES = {ALWAYS: True}
 for var in ["debug", "release", "android_arm", "android_arm64", "android_ia32", "android_x87",
-            "arm", "arm64", "ia32", "mips", "mipsel", "x64", "x87", "nacl_ia32",
+            "arm", "arm64", "ia32", "mips", "mipsel", "mips64el", "x64", "x87", "nacl_ia32",
             "nacl_x64", "macos", "windows", "linux"]:
   VARIABLES[var] = var
 
diff --git a/tools/testrunner/local/testsuite.py b/tools/testrunner/local/testsuite.py
index ff51196..47bc08f 100644
--- a/tools/testrunner/local/testsuite.py
+++ b/tools/testrunner/local/testsuite.py
@@ -29,8 +29,10 @@
 import imp
 import os
 
+from . import commands
 from . import statusfile
 from . import utils
+from ..objects import testcase
 
 class TestSuite(object):
 
@@ -41,11 +43,13 @@
     try:
       (f, pathname, description) = imp.find_module("testcfg", [root])
       module = imp.load_module("testcfg", f, pathname, description)
-      suite = module.GetSuite(name, root)
+      return module.GetSuite(name, root)
+    except:
+      # Use default if no testcfg is present.
+      return GoogleTestSuite(name, root)
     finally:
       if f:
         f.close()
-    return suite
 
   def __init__(self, name, root):
     self.name = name  # string
@@ -190,18 +194,19 @@
     else:
       return execution_failed
 
-  def HasUnexpectedOutput(self, testcase):
+  def GetOutcome(self, testcase):
     if testcase.output.HasCrashed():
-      outcome = statusfile.CRASH
+      return statusfile.CRASH
     elif testcase.output.HasTimedOut():
-      outcome = statusfile.TIMEOUT
+      return statusfile.TIMEOUT
     elif self.HasFailed(testcase):
-      outcome = statusfile.FAIL
+      return statusfile.FAIL
     else:
-      outcome = statusfile.PASS
-    if not testcase.outcomes:
-      return outcome != statusfile.PASS
-    return not outcome in testcase.outcomes
+      return statusfile.PASS
+
+  def HasUnexpectedOutput(self, testcase):
+    outcome = self.GetOutcome(testcase)
+    return not outcome in (testcase.outcomes or [statusfile.PASS])
 
   def StripOutputForTransmit(self, testcase):
     if not self.HasUnexpectedOutput(testcase):
@@ -213,3 +218,40 @@
     for t in self.tests:
       self.total_duration += t.duration
     return self.total_duration
+
+
+class GoogleTestSuite(TestSuite):
+  def __init__(self, name, root):
+    super(GoogleTestSuite, self).__init__(name, root)
+
+  def ListTests(self, context):
+    shell = os.path.abspath(os.path.join(context.shell_dir, self.shell()))
+    if utils.IsWindows():
+      shell += ".exe"
+    output = commands.Execute(context.command_prefix +
+                              [shell, "--gtest_list_tests"] +
+                              context.extra_flags)
+    if output.exit_code != 0:
+      print output.stdout
+      print output.stderr
+      return []
+    tests = []
+    test_case = ''
+    for line in output.stdout.splitlines():
+      test_desc = line.strip().split()[0]
+      if test_desc.endswith('.'):
+        test_case = test_desc
+      elif test_case and test_desc:
+        test = testcase.TestCase(self, test_case + test_desc, dependency=None)
+        tests.append(test)
+    tests.sort()
+    return tests
+
+  def GetFlagsForTestCase(self, testcase, context):
+    return (testcase.flags + ["--gtest_filter=" + testcase.path] +
+            ["--gtest_random_seed=%s" % context.random_seed] +
+            ["--gtest_print_time=0"] +
+            context.mode_flags)
+
+  def shell(self):
+    return self.name
diff --git a/tools/testrunner/local/utils.py b/tools/testrunner/local/utils.py
index 707fa24..7bc21b1 100644
--- a/tools/testrunner/local/utils.py
+++ b/tools/testrunner/local/utils.py
@@ -36,9 +36,7 @@
 
 
 def GetSuitePaths(test_root):
-  def IsSuite(path):
-    return isdir(path) and exists(join(path, 'testcfg.py'))
-  return [ f for f in os.listdir(test_root) if IsSuite(join(test_root, f)) ]
+  return [ f for f in os.listdir(test_root) if isdir(join(test_root, f)) ]
 
 
 # Reads a file into an array of strings
diff --git a/tools/testrunner/objects/context.py b/tools/testrunner/objects/context.py
index f8f764b..937d908 100644
--- a/tools/testrunner/objects/context.py
+++ b/tools/testrunner/objects/context.py
@@ -29,7 +29,8 @@
 class Context():
   def __init__(self, arch, mode, shell_dir, mode_flags, verbose, timeout,
                isolates, command_prefix, extra_flags, noi18n, random_seed,
-               no_sorting):
+               no_sorting, rerun_failures_count, rerun_failures_max,
+               predictable):
     self.arch = arch
     self.mode = mode
     self.shell_dir = shell_dir
@@ -42,15 +43,19 @@
     self.noi18n = noi18n
     self.random_seed = random_seed
     self.no_sorting = no_sorting
+    self.rerun_failures_count = rerun_failures_count
+    self.rerun_failures_max = rerun_failures_max
+    self.predictable = predictable
 
   def Pack(self):
     return [self.arch, self.mode, self.mode_flags, self.timeout, self.isolates,
             self.command_prefix, self.extra_flags, self.noi18n,
-            self.random_seed, self.no_sorting]
+            self.random_seed, self.no_sorting, self.rerun_failures_count,
+            self.rerun_failures_max, self.predictable]
 
   @staticmethod
   def Unpack(packed):
     # For the order of the fields, refer to Pack() above.
     return Context(packed[0], packed[1], None, packed[2], False,
                    packed[3], packed[4], packed[5], packed[6], packed[7],
-                   packed[8], packed[9])
+                   packed[8], packed[9], packed[10], packed[11], packed[12])
diff --git a/tools/testrunner/objects/testcase.py b/tools/testrunner/objects/testcase.py
index cfc522e..ca82606 100644
--- a/tools/testrunner/objects/testcase.py
+++ b/tools/testrunner/objects/testcase.py
@@ -38,6 +38,7 @@
     self.output = None
     self.id = None  # int, used to map result back to TestCase instance
     self.duration = None  # assigned during execution
+    self.run = 1  # The nth time this test is executed.
 
   def CopyAddingFlags(self, flags):
     copy = TestCase(self.suite, self.path, self.flags + flags, self.dependency)
@@ -60,6 +61,7 @@
     test = TestCase(str(task[0]), task[1], task[2], task[3])
     test.outcomes = set(task[4])
     test.id = task[5]
+    test.run = 1
     return test
 
   def SetSuiteObject(self, suites):
diff --git a/tools/tickprocessor.js b/tools/tickprocessor.js
index 187e647..acd7a71 100644
--- a/tools/tickprocessor.js
+++ b/tools/tickprocessor.js
@@ -441,12 +441,6 @@
 
   if (this.ticks_.total == 0) return;
 
-  // Print the unknown ticks percentage if they are not ignored.
-  if (!this.ignoreUnknown_ && this.ticks_.unaccounted > 0) {
-    this.printHeader('Unknown');
-    this.printCounter(this.ticks_.unaccounted, this.ticks_.total);
-  }
-
   var flatProfile = this.profile_.getFlatProfile();
   var flatView = this.viewBuilder_.buildView(flatProfile);
   // Sort by self time, desc, then by name, desc.
@@ -457,33 +451,39 @@
   if (this.ignoreUnknown_) {
     totalTicks -= this.ticks_.unaccounted;
   }
-  // Our total time contains all the ticks encountered,
-  // while profile only knows about the filtered ticks.
-  flatView.head.totalTime = totalTicks;
 
   // Count library ticks
   var flatViewNodes = flatView.head.children;
   var self = this;
+
   var libraryTicks = 0;
-  this.processProfile(flatViewNodes,
+  this.printHeader('Shared libraries');
+  this.printEntries(flatViewNodes, totalTicks, null,
       function(name) { return self.isSharedLibrary(name); },
       function(rec) { libraryTicks += rec.selfTime; });
   var nonLibraryTicks = totalTicks - libraryTicks;
 
-  this.printHeader('Shared libraries');
-  this.printEntries(flatViewNodes, null,
-      function(name) { return self.isSharedLibrary(name); });
-
+  var jsTicks = 0;
   this.printHeader('JavaScript');
-  this.printEntries(flatViewNodes, nonLibraryTicks,
-      function(name) { return self.isJsCode(name); });
+  this.printEntries(flatViewNodes, totalTicks, nonLibraryTicks,
+      function(name) { return self.isJsCode(name); },
+      function(rec) { jsTicks += rec.selfTime; });
 
+  var cppTicks = 0;
   this.printHeader('C++');
-  this.printEntries(flatViewNodes, nonLibraryTicks,
-      function(name) { return self.isCppCode(name); });
+  this.printEntries(flatViewNodes, totalTicks, nonLibraryTicks,
+      function(name) { return self.isCppCode(name); },
+      function(rec) { cppTicks += rec.selfTime; });
 
-  this.printHeader('GC');
-  this.printCounter(this.ticks_.gc, totalTicks);
+  this.printHeader('Summary');
+  this.printLine('JavaScript', jsTicks, totalTicks, nonLibraryTicks);
+  this.printLine('C++', cppTicks, totalTicks, nonLibraryTicks);
+  this.printLine('GC', this.ticks_.gc, totalTicks, nonLibraryTicks);
+  this.printLine('Shared libraries', libraryTicks, totalTicks, null);
+  if (!this.ignoreUnknown_ && this.ticks_.unaccounted > 0) {
+    this.printLine('Unaccounted', this.ticks_.unaccounted,
+                   this.ticks_.total, null);
+  }
 
   this.printHeavyProfHeader();
   var heavyProfile = this.profile_.getBottomUpProfile();
@@ -517,6 +517,18 @@
 };
 
 
+TickProcessor.prototype.printLine = function(
+    entry, ticks, totalTicks, nonLibTicks) {
+  var pct = ticks * 100 / totalTicks;
+  var nonLibPct = nonLibTicks != null
+      ? padLeft((ticks * 100 / nonLibTicks).toFixed(1), 5) + '%  '
+      : '        ';
+  print('  ' + padLeft(ticks, 5) + '  ' +
+        padLeft(pct.toFixed(1), 5) + '%  ' +
+        nonLibPct +
+        entry);
+}
+
 TickProcessor.prototype.printHeavyProfHeader = function() {
   print('\n [Bottom up (heavy) profile]:');
   print('  Note: percentage shows a share of a particular caller in the ' +
@@ -529,12 +541,6 @@
 };
 
 
-TickProcessor.prototype.printCounter = function(ticksCount, totalTicksCount) {
-  var pct = ticksCount * 100.0 / totalTicksCount;
-  print('  ' + padLeft(ticksCount, 5) + '  ' + padLeft(pct.toFixed(1), 5) + '%');
-};
-
-
 TickProcessor.prototype.processProfile = function(
     profile, filterP, func) {
   for (var i = 0, n = profile.length; i < n; ++i) {
@@ -580,18 +586,13 @@
 };
 
 TickProcessor.prototype.printEntries = function(
-    profile, nonLibTicks, filterP) {
+    profile, totalTicks, nonLibTicks, filterP, callback) {
   var that = this;
   this.processProfile(profile, filterP, function (rec) {
     if (rec.selfTime == 0) return;
-    var nonLibPct = nonLibTicks != null ?
-        rec.selfTime * 100.0 / nonLibTicks : 0.0;
+    callback(rec);
     var funcName = that.formatFunctionName(rec.internalFuncName);
-
-    print('  ' + padLeft(rec.selfTime, 5) + '  ' +
-          padLeft(rec.selfPercent.toFixed(1), 5) + '%  ' +
-          padLeft(nonLibPct.toFixed(1), 5) + '%  ' +
-          funcName);
+    that.printLine(funcName, rec.selfTime, totalTicks, nonLibTicks);
   });
 };
 
diff --git a/tools/unittests/run_benchmarks_test.py b/tools/unittests/run_benchmarks_test.py
deleted file mode 100644
index f627d43..0000000
--- a/tools/unittests/run_benchmarks_test.py
+++ /dev/null
@@ -1,256 +0,0 @@
-#!/usr/bin/env python
-# Copyright 2014 the V8 project authors. All rights reserved.
-# Use of this source code is governed by a BSD-style license that can be
-# found in the LICENSE file.
-
-from collections import namedtuple
-import coverage
-import json
-from mock import DEFAULT
-from mock import MagicMock
-import os
-from os import path, sys
-import shutil
-import tempfile
-import unittest
-
-# Requires python-coverage and python-mock. Native python coverage
-# version >= 3.7.1 should be installed to get the best speed.
-
-TEST_WORKSPACE = path.join(tempfile.gettempdir(), "test-v8-run-benchmarks")
-
-V8_JSON = {
-  "path": ["."],
-  "binary": "d7",
-  "flags": ["--flag"],
-  "main": "run.js",
-  "run_count": 1,
-  "results_regexp": "^%s: (.+)$",
-  "benchmarks": [
-    {"name": "Richards"},
-    {"name": "DeltaBlue"},
-  ]
-}
-
-V8_NESTED_SUITES_JSON = {
-  "path": ["."],
-  "flags": ["--flag"],
-  "run_count": 1,
-  "units": "score",
-  "benchmarks": [
-    {"name": "Richards",
-     "path": ["richards"],
-     "binary": "d7",
-     "main": "run.js",
-     "resources": ["file1.js", "file2.js"],
-     "run_count": 2,
-     "results_regexp": "^Richards: (.+)$"},
-    {"name": "Sub",
-     "path": ["sub"],
-     "benchmarks": [
-       {"name": "Leaf",
-        "path": ["leaf"],
-        "run_count_x64": 3,
-        "units": "ms",
-        "main": "run.js",
-        "results_regexp": "^Simple: (.+) ms.$"},
-     ]
-    },
-    {"name": "DeltaBlue",
-     "path": ["delta_blue"],
-     "main": "run.js",
-     "flags": ["--flag2"],
-     "results_regexp": "^DeltaBlue: (.+)$"},
-    {"name": "ShouldntRun",
-     "path": ["."],
-     "archs": ["arm"],
-     "main": "run.js"},
-  ]
-}
-
-Output = namedtuple("Output", "stdout, stderr")
-
-class BenchmarksTest(unittest.TestCase):
-  @classmethod
-  def setUpClass(cls):
-    cls.base = path.dirname(path.dirname(path.abspath(__file__)))
-    sys.path.append(cls.base)
-    cls._cov = coverage.coverage(
-        include=([os.path.join(cls.base, "run_benchmarks.py")]))
-    cls._cov.start()
-    import run_benchmarks
-    from testrunner.local import commands
-    global commands
-    global run_benchmarks
-
-  @classmethod
-  def tearDownClass(cls):
-    cls._cov.stop()
-    print ""
-    print cls._cov.report()
-
-  def setUp(self):
-    self.maxDiff = None
-    if path.exists(TEST_WORKSPACE):
-      shutil.rmtree(TEST_WORKSPACE)
-    os.makedirs(TEST_WORKSPACE)
-
-  def tearDown(self):
-    if path.exists(TEST_WORKSPACE):
-      shutil.rmtree(TEST_WORKSPACE)
-
-  def _WriteTestInput(self, json_content):
-    self._test_input = path.join(TEST_WORKSPACE, "test.json")
-    with open(self._test_input, "w") as f:
-      f.write(json.dumps(json_content))
-
-  def _MockCommand(self, *args):
-    # Fake output for each benchmark run.
-    benchmark_outputs = [Output(stdout=arg, stderr=None) for arg in args[1]]
-    def execute(*args, **kwargs):
-      return benchmark_outputs.pop()
-    commands.Execute = MagicMock(side_effect=execute)
-
-    # Check that d8 is called from the correct cwd for each benchmark run.
-    dirs = [path.join(TEST_WORKSPACE, arg) for arg in args[0]]
-    def chdir(*args, **kwargs):
-      self.assertEquals(dirs.pop(), args[0])
-    os.chdir = MagicMock(side_effect=chdir)
-
-  def _CallMain(self, *args):
-    self._test_output = path.join(TEST_WORKSPACE, "results.json")
-    all_args=[
-      "--json-test-results",
-      self._test_output,
-      self._test_input,
-    ]
-    all_args += args
-    return run_benchmarks.Main(all_args)
-
-  def _LoadResults(self):
-    with open(self._test_output) as f:
-      return json.load(f)
-
-  def _VerifyResults(self, suite, units, traces):
-    self.assertEquals([
-      {"units": units,
-       "graphs": [suite, trace["name"]],
-       "results": trace["results"]} for trace in traces],
-        self._LoadResults()["traces"])
-
-  def _VerifyErrors(self, errors):
-    self.assertEquals(errors, self._LoadResults()["errors"])
-
-  def _VerifyMock(self, binary, *args):
-    arg = [path.join(path.dirname(self.base), binary)]
-    arg += args
-    commands.Execute.assert_called_with(arg, timeout=60)
-
-  def _VerifyMockMultiple(self, *args):
-    expected = []
-    for arg in args:
-      a = [path.join(path.dirname(self.base), arg[0])]
-      a += arg[1:]
-      expected.append(((a,), {"timeout": 60}))
-    self.assertEquals(expected, commands.Execute.call_args_list)
-
-  def testOneRun(self):
-    self._WriteTestInput(V8_JSON)
-    self._MockCommand(["."], ["x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n"])
-    self.assertEquals(0, self._CallMain())
-    self._VerifyResults("test", "score", [
-      {"name": "Richards", "results": ["1.234"]},
-      {"name": "DeltaBlue", "results": ["10657567"]},
-    ])
-    self._VerifyErrors([])
-    self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
-
-  def testTwoRuns_Units_SuiteName(self):
-    test_input = dict(V8_JSON)
-    test_input["run_count"] = 2
-    test_input["name"] = "v8"
-    test_input["units"] = "ms"
-    self._WriteTestInput(test_input)
-    self._MockCommand([".", "."],
-                      ["Richards: 100\nDeltaBlue: 200\n",
-                       "Richards: 50\nDeltaBlue: 300\n"])
-    self.assertEquals(0, self._CallMain())
-    self._VerifyResults("v8", "ms", [
-      {"name": "Richards", "results": ["50", "100"]},
-      {"name": "DeltaBlue", "results": ["300", "200"]},
-    ])
-    self._VerifyErrors([])
-    self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
-
-  def testTwoRuns_SubRegexp(self):
-    test_input = dict(V8_JSON)
-    test_input["run_count"] = 2
-    del test_input["results_regexp"]
-    test_input["benchmarks"][0]["results_regexp"] = "^Richards: (.+)$"
-    test_input["benchmarks"][1]["results_regexp"] = "^DeltaBlue: (.+)$"
-    self._WriteTestInput(test_input)
-    self._MockCommand([".", "."],
-                      ["Richards: 100\nDeltaBlue: 200\n",
-                       "Richards: 50\nDeltaBlue: 300\n"])
-    self.assertEquals(0, self._CallMain())
-    self._VerifyResults("test", "score", [
-      {"name": "Richards", "results": ["50", "100"]},
-      {"name": "DeltaBlue", "results": ["300", "200"]},
-    ])
-    self._VerifyErrors([])
-    self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
-
-  def testNestedSuite(self):
-    self._WriteTestInput(V8_NESTED_SUITES_JSON)
-    self._MockCommand(["delta_blue", "sub/leaf", "richards"],
-                      ["DeltaBlue: 200\n",
-                       "Simple: 1 ms.\n",
-                       "Simple: 2 ms.\n",
-                       "Simple: 3 ms.\n",
-                       "Richards: 100\n",
-                       "Richards: 50\n"])
-    self.assertEquals(0, self._CallMain())
-    self.assertEquals([
-      {"units": "score",
-       "graphs": ["test", "Richards"],
-       "results": ["50", "100"]},
-      {"units": "ms",
-       "graphs": ["test", "Sub", "Leaf"],
-       "results": ["3", "2", "1"]},
-      {"units": "score",
-       "graphs": ["test", "DeltaBlue"],
-       "results": ["200"]},
-      ], self._LoadResults()["traces"])
-    self._VerifyErrors([])
-    self._VerifyMockMultiple(
-        (path.join("out", "x64.release", "d7"), "--flag", "file1.js",
-         "file2.js", "run.js"),
-        (path.join("out", "x64.release", "d7"), "--flag", "file1.js",
-         "file2.js", "run.js"),
-        (path.join("out", "x64.release", "d8"), "--flag", "run.js"),
-        (path.join("out", "x64.release", "d8"), "--flag", "run.js"),
-        (path.join("out", "x64.release", "d8"), "--flag", "run.js"),
-        (path.join("out", "x64.release", "d8"), "--flag", "--flag2", "run.js"))
-
-  def testBuildbot(self):
-    self._WriteTestInput(V8_JSON)
-    self._MockCommand(["."], ["Richards: 1.234\nDeltaBlue: 10657567\n"])
-    self.assertEquals(0, self._CallMain("--buildbot"))
-    self._VerifyResults("test", "score", [
-      {"name": "Richards", "results": ["1.234"]},
-      {"name": "DeltaBlue", "results": ["10657567"]},
-    ])
-    self._VerifyErrors([])
-    self._VerifyMock(path.join("out", "Release", "d7"), "--flag", "run.js")
-
-  def testRegexpNoMatch(self):
-    self._WriteTestInput(V8_JSON)
-    self._MockCommand(["."], ["x\nRichaards: 1.234\nDeltaBlue: 10657567\ny\n"])
-    self.assertEquals(1, self._CallMain())
-    self._VerifyResults("test", "score", [
-      {"name": "Richards", "results": []},
-      {"name": "DeltaBlue", "results": ["10657567"]},
-    ])
-    self._VerifyErrors(
-        ["Regexp \"^Richards: (.+)$\" didn't match for benchmark Richards."])
-    self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
diff --git a/tools/unittests/run_perf_test.py b/tools/unittests/run_perf_test.py
new file mode 100644
index 0000000..76e8d23
--- /dev/null
+++ b/tools/unittests/run_perf_test.py
@@ -0,0 +1,370 @@
+#!/usr/bin/env python
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+from collections import namedtuple
+import coverage
+import json
+from mock import DEFAULT
+from mock import MagicMock
+import os
+from os import path, sys
+import shutil
+import tempfile
+import unittest
+
+# Requires python-coverage and python-mock. Native python coverage
+# version >= 3.7.1 should be installed to get the best speed.
+
+TEST_WORKSPACE = path.join(tempfile.gettempdir(), "test-v8-run-perf")
+
+V8_JSON = {
+  "path": ["."],
+  "binary": "d7",
+  "flags": ["--flag"],
+  "main": "run.js",
+  "run_count": 1,
+  "results_regexp": "^%s: (.+)$",
+  "tests": [
+    {"name": "Richards"},
+    {"name": "DeltaBlue"},
+  ]
+}
+
+V8_NESTED_SUITES_JSON = {
+  "path": ["."],
+  "flags": ["--flag"],
+  "run_count": 1,
+  "units": "score",
+  "tests": [
+    {"name": "Richards",
+     "path": ["richards"],
+     "binary": "d7",
+     "main": "run.js",
+     "resources": ["file1.js", "file2.js"],
+     "run_count": 2,
+     "results_regexp": "^Richards: (.+)$"},
+    {"name": "Sub",
+     "path": ["sub"],
+     "tests": [
+       {"name": "Leaf",
+        "path": ["leaf"],
+        "run_count_x64": 3,
+        "units": "ms",
+        "main": "run.js",
+        "results_regexp": "^Simple: (.+) ms.$"},
+     ]
+    },
+    {"name": "DeltaBlue",
+     "path": ["delta_blue"],
+     "main": "run.js",
+     "flags": ["--flag2"],
+     "results_regexp": "^DeltaBlue: (.+)$"},
+    {"name": "ShouldntRun",
+     "path": ["."],
+     "archs": ["arm"],
+     "main": "run.js"},
+  ]
+}
+
+V8_GENERIC_JSON = {
+  "path": ["."],
+  "binary": "cc",
+  "flags": ["--flag"],
+  "generic": True,
+  "run_count": 1,
+  "units": "ms",
+}
+
+Output = namedtuple("Output", "stdout, stderr, timed_out")
+
+class PerfTest(unittest.TestCase):
+  @classmethod
+  def setUpClass(cls):
+    cls.base = path.dirname(path.dirname(path.abspath(__file__)))
+    sys.path.append(cls.base)
+    cls._cov = coverage.coverage(
+        include=([os.path.join(cls.base, "run_perf.py")]))
+    cls._cov.start()
+    import run_perf
+    from testrunner.local import commands
+    global commands
+    global run_perf
+
+  @classmethod
+  def tearDownClass(cls):
+    cls._cov.stop()
+    print ""
+    print cls._cov.report()
+
+  def setUp(self):
+    self.maxDiff = None
+    if path.exists(TEST_WORKSPACE):
+      shutil.rmtree(TEST_WORKSPACE)
+    os.makedirs(TEST_WORKSPACE)
+
+  def tearDown(self):
+    if path.exists(TEST_WORKSPACE):
+      shutil.rmtree(TEST_WORKSPACE)
+
+  def _WriteTestInput(self, json_content):
+    self._test_input = path.join(TEST_WORKSPACE, "test.json")
+    with open(self._test_input, "w") as f:
+      f.write(json.dumps(json_content))
+
+  def _MockCommand(self, *args, **kwargs):
+    # Fake output for each test run.
+    test_outputs = [Output(stdout=arg,
+                           stderr=None,
+                           timed_out=kwargs.get("timed_out", False))
+                    for arg in args[1]]
+    def execute(*args, **kwargs):
+      return test_outputs.pop()
+    commands.Execute = MagicMock(side_effect=execute)
+
+    # Check that d8 is called from the correct cwd for each test run.
+    dirs = [path.join(TEST_WORKSPACE, arg) for arg in args[0]]
+    def chdir(*args, **kwargs):
+      self.assertEquals(dirs.pop(), args[0])
+    os.chdir = MagicMock(side_effect=chdir)
+
+  def _CallMain(self, *args):
+    self._test_output = path.join(TEST_WORKSPACE, "results.json")
+    all_args=[
+      "--json-test-results",
+      self._test_output,
+      self._test_input,
+    ]
+    all_args += args
+    return run_perf.Main(all_args)
+
+  def _LoadResults(self):
+    with open(self._test_output) as f:
+      return json.load(f)
+
+  def _VerifyResults(self, suite, units, traces):
+    self.assertEquals([
+      {"units": units,
+       "graphs": [suite, trace["name"]],
+       "results": trace["results"],
+       "stddev": trace["stddev"]} for trace in traces],
+      self._LoadResults()["traces"])
+
+  def _VerifyErrors(self, errors):
+    self.assertEquals(errors, self._LoadResults()["errors"])
+
+  def _VerifyMock(self, binary, *args, **kwargs):
+    arg = [path.join(path.dirname(self.base), binary)]
+    arg += args
+    commands.Execute.assert_called_with(
+        arg, timeout=kwargs.get("timeout", 60))
+
+  def _VerifyMockMultiple(self, *args, **kwargs):
+    expected = []
+    for arg in args:
+      a = [path.join(path.dirname(self.base), arg[0])]
+      a += arg[1:]
+      expected.append(((a,), {"timeout": kwargs.get("timeout", 60)}))
+    self.assertEquals(expected, commands.Execute.call_args_list)
+
+  def testOneRun(self):
+    self._WriteTestInput(V8_JSON)
+    self._MockCommand(["."], ["x\nRichards: 1.234\nDeltaBlue: 10657567\ny\n"])
+    self.assertEquals(0, self._CallMain())
+    self._VerifyResults("test", "score", [
+      {"name": "Richards", "results": ["1.234"], "stddev": ""},
+      {"name": "DeltaBlue", "results": ["10657567"], "stddev": ""},
+    ])
+    self._VerifyErrors([])
+    self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
+
+  def testTwoRuns_Units_SuiteName(self):
+    test_input = dict(V8_JSON)
+    test_input["run_count"] = 2
+    test_input["name"] = "v8"
+    test_input["units"] = "ms"
+    self._WriteTestInput(test_input)
+    self._MockCommand([".", "."],
+                      ["Richards: 100\nDeltaBlue: 200\n",
+                       "Richards: 50\nDeltaBlue: 300\n"])
+    self.assertEquals(0, self._CallMain())
+    self._VerifyResults("v8", "ms", [
+      {"name": "Richards", "results": ["50", "100"], "stddev": ""},
+      {"name": "DeltaBlue", "results": ["300", "200"], "stddev": ""},
+    ])
+    self._VerifyErrors([])
+    self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
+
+  def testTwoRuns_SubRegexp(self):
+    test_input = dict(V8_JSON)
+    test_input["run_count"] = 2
+    del test_input["results_regexp"]
+    test_input["tests"][0]["results_regexp"] = "^Richards: (.+)$"
+    test_input["tests"][1]["results_regexp"] = "^DeltaBlue: (.+)$"
+    self._WriteTestInput(test_input)
+    self._MockCommand([".", "."],
+                      ["Richards: 100\nDeltaBlue: 200\n",
+                       "Richards: 50\nDeltaBlue: 300\n"])
+    self.assertEquals(0, self._CallMain())
+    self._VerifyResults("test", "score", [
+      {"name": "Richards", "results": ["50", "100"], "stddev": ""},
+      {"name": "DeltaBlue", "results": ["300", "200"], "stddev": ""},
+    ])
+    self._VerifyErrors([])
+    self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
+
+  def testNestedSuite(self):
+    self._WriteTestInput(V8_NESTED_SUITES_JSON)
+    self._MockCommand(["delta_blue", "sub/leaf", "richards"],
+                      ["DeltaBlue: 200\n",
+                       "Simple: 1 ms.\n",
+                       "Simple: 2 ms.\n",
+                       "Simple: 3 ms.\n",
+                       "Richards: 100\n",
+                       "Richards: 50\n"])
+    self.assertEquals(0, self._CallMain())
+    self.assertEquals([
+      {"units": "score",
+       "graphs": ["test", "Richards"],
+       "results": ["50", "100"],
+       "stddev": ""},
+      {"units": "ms",
+       "graphs": ["test", "Sub", "Leaf"],
+       "results": ["3", "2", "1"],
+       "stddev": ""},
+      {"units": "score",
+       "graphs": ["test", "DeltaBlue"],
+       "results": ["200"],
+       "stddev": ""},
+      ], self._LoadResults()["traces"])
+    self._VerifyErrors([])
+    self._VerifyMockMultiple(
+        (path.join("out", "x64.release", "d7"), "--flag", "file1.js",
+         "file2.js", "run.js"),
+        (path.join("out", "x64.release", "d7"), "--flag", "file1.js",
+         "file2.js", "run.js"),
+        (path.join("out", "x64.release", "d8"), "--flag", "run.js"),
+        (path.join("out", "x64.release", "d8"), "--flag", "run.js"),
+        (path.join("out", "x64.release", "d8"), "--flag", "run.js"),
+        (path.join("out", "x64.release", "d8"), "--flag", "--flag2", "run.js"))
+
+  def testOneRunStdDevRegExp(self):
+    test_input = dict(V8_JSON)
+    test_input["stddev_regexp"] = "^%s\-stddev: (.+)$"
+    self._WriteTestInput(test_input)
+    self._MockCommand(["."], ["Richards: 1.234\nRichards-stddev: 0.23\n"
+                              "DeltaBlue: 10657567\nDeltaBlue-stddev: 106\n"])
+    self.assertEquals(0, self._CallMain())
+    self._VerifyResults("test", "score", [
+      {"name": "Richards", "results": ["1.234"], "stddev": "0.23"},
+      {"name": "DeltaBlue", "results": ["10657567"], "stddev": "106"},
+    ])
+    self._VerifyErrors([])
+    self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
+
+  def testTwoRunsStdDevRegExp(self):
+    test_input = dict(V8_JSON)
+    test_input["stddev_regexp"] = "^%s\-stddev: (.+)$"
+    test_input["run_count"] = 2
+    self._WriteTestInput(test_input)
+    self._MockCommand(["."], ["Richards: 3\nRichards-stddev: 0.7\n"
+                              "DeltaBlue: 6\nDeltaBlue-boom: 0.9\n",
+                              "Richards: 2\nRichards-stddev: 0.5\n"
+                              "DeltaBlue: 5\nDeltaBlue-stddev: 0.8\n"])
+    self.assertEquals(1, self._CallMain())
+    self._VerifyResults("test", "score", [
+      {"name": "Richards", "results": ["2", "3"], "stddev": "0.7"},
+      {"name": "DeltaBlue", "results": ["5", "6"], "stddev": "0.8"},
+    ])
+    self._VerifyErrors(
+        ["Test Richards should only run once since a stddev is provided "
+         "by the test.",
+         "Test DeltaBlue should only run once since a stddev is provided "
+         "by the test.",
+         "Regexp \"^DeltaBlue\-stddev: (.+)$\" didn't match for test "
+         "DeltaBlue."])
+    self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
+
+  def testBuildbot(self):
+    self._WriteTestInput(V8_JSON)
+    self._MockCommand(["."], ["Richards: 1.234\nDeltaBlue: 10657567\n"])
+    self.assertEquals(0, self._CallMain("--buildbot"))
+    self._VerifyResults("test", "score", [
+      {"name": "Richards", "results": ["1.234"], "stddev": ""},
+      {"name": "DeltaBlue", "results": ["10657567"], "stddev": ""},
+    ])
+    self._VerifyErrors([])
+    self._VerifyMock(path.join("out", "Release", "d7"), "--flag", "run.js")
+
+  def testBuildbotWithTotal(self):
+    test_input = dict(V8_JSON)
+    test_input["total"] = True
+    self._WriteTestInput(test_input)
+    self._MockCommand(["."], ["Richards: 1.234\nDeltaBlue: 10657567\n"])
+    self.assertEquals(0, self._CallMain("--buildbot"))
+    self._VerifyResults("test", "score", [
+      {"name": "Richards", "results": ["1.234"], "stddev": ""},
+      {"name": "DeltaBlue", "results": ["10657567"], "stddev": ""},
+      {"name": "Total", "results": ["3626.49109719"], "stddev": ""},
+    ])
+    self._VerifyErrors([])
+    self._VerifyMock(path.join("out", "Release", "d7"), "--flag", "run.js")
+
+  def testBuildbotWithTotalAndErrors(self):
+    test_input = dict(V8_JSON)
+    test_input["total"] = True
+    self._WriteTestInput(test_input)
+    self._MockCommand(["."], ["x\nRichaards: 1.234\nDeltaBlue: 10657567\ny\n"])
+    self.assertEquals(1, self._CallMain("--buildbot"))
+    self._VerifyResults("test", "score", [
+      {"name": "Richards", "results": [], "stddev": ""},
+      {"name": "DeltaBlue", "results": ["10657567"], "stddev": ""},
+    ])
+    self._VerifyErrors(
+        ["Regexp \"^Richards: (.+)$\" didn't match for test Richards.",
+         "Not all traces have the same number of results."])
+    self._VerifyMock(path.join("out", "Release", "d7"), "--flag", "run.js")
+
+  def testRegexpNoMatch(self):
+    self._WriteTestInput(V8_JSON)
+    self._MockCommand(["."], ["x\nRichaards: 1.234\nDeltaBlue: 10657567\ny\n"])
+    self.assertEquals(1, self._CallMain())
+    self._VerifyResults("test", "score", [
+      {"name": "Richards", "results": [], "stddev": ""},
+      {"name": "DeltaBlue", "results": ["10657567"], "stddev": ""},
+    ])
+    self._VerifyErrors(
+        ["Regexp \"^Richards: (.+)$\" didn't match for test Richards."])
+    self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
+
+  def testOneRunGeneric(self):
+    test_input = dict(V8_GENERIC_JSON)
+    self._WriteTestInput(test_input)
+    self._MockCommand(["."], [
+      "Trace(Test1), Result(1.234), StdDev(0.23)\n"
+      "Trace(Test2), Result(10657567), StdDev(106)\n"])
+    self.assertEquals(0, self._CallMain())
+    self._VerifyResults("test", "ms", [
+      {"name": "Test1", "results": ["1.234"], "stddev": "0.23"},
+      {"name": "Test2", "results": ["10657567"], "stddev": "106"},
+    ])
+    self._VerifyErrors([])
+    self._VerifyMock(path.join("out", "x64.release", "cc"), "--flag", "")
+
+  def testOneRunTimingOut(self):
+    test_input = dict(V8_JSON)
+    test_input["timeout"] = 70
+    self._WriteTestInput(test_input)
+    self._MockCommand(["."], [""], timed_out=True)
+    self.assertEquals(1, self._CallMain())
+    self._VerifyResults("test", "score", [
+      {"name": "Richards", "results": [], "stddev": ""},
+      {"name": "DeltaBlue", "results": [], "stddev": ""},
+    ])
+    self._VerifyErrors([
+      "Regexp \"^Richards: (.+)$\" didn't match for test Richards.",
+      "Regexp \"^DeltaBlue: (.+)$\" didn't match for test DeltaBlue.",
+    ])
+    self._VerifyMock(
+        path.join("out", "x64.release", "d7"), "--flag", "run.js", timeout=70)
diff --git a/tools/v8heapconst.py b/tools/v8heapconst.py
index c7f1ddc..5e3e841 100644
--- a/tools/v8heapconst.py
+++ b/tools/v8heapconst.py
@@ -31,26 +31,26 @@
 # List of known V8 instance types.
 INSTANCE_TYPES = {
   64: "STRING_TYPE",
-  68: "ASCII_STRING_TYPE",
+  68: "ONE_BYTE_STRING_TYPE",
   65: "CONS_STRING_TYPE",
-  69: "CONS_ASCII_STRING_TYPE",
+  69: "CONS_ONE_BYTE_STRING_TYPE",
   67: "SLICED_STRING_TYPE",
-  71: "SLICED_ASCII_STRING_TYPE",
+  71: "SLICED_ONE_BYTE_STRING_TYPE",
   66: "EXTERNAL_STRING_TYPE",
-  70: "EXTERNAL_ASCII_STRING_TYPE",
+  70: "EXTERNAL_ONE_BYTE_STRING_TYPE",
   74: "EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE",
   82: "SHORT_EXTERNAL_STRING_TYPE",
-  86: "SHORT_EXTERNAL_ASCII_STRING_TYPE",
+  86: "SHORT_EXTERNAL_ONE_BYTE_STRING_TYPE",
   90: "SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE",
   0: "INTERNALIZED_STRING_TYPE",
-  4: "ASCII_INTERNALIZED_STRING_TYPE",
+  4: "ONE_BYTE_INTERNALIZED_STRING_TYPE",
   1: "CONS_INTERNALIZED_STRING_TYPE",
-  5: "CONS_ASCII_INTERNALIZED_STRING_TYPE",
+  5: "CONS_ONE_BYTE_INTERNALIZED_STRING_TYPE",
   2: "EXTERNAL_INTERNALIZED_STRING_TYPE",
-  6: "EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE",
+  6: "EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE",
   10: "EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE",
   18: "SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE",
-  22: "SHORT_EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE",
+  22: "SHORT_EXTERNAL_ONE_BYTE_INTERNALIZED_STRING_TYPE",
   26: "SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE",
   128: "SYMBOL_TYPE",
   129: "MAP_TYPE",
@@ -135,7 +135,7 @@
   0x08081: (136, "ByteArrayMap"),
   0x080a9: (129, "MetaMap"),
   0x080d1: (131, "OddballMap"),
-  0x080f9: (4, "AsciiInternalizedStringMap"),
+  0x080f9: (4, "OneByteInternalizedStringMap"),
   0x08121: (179, "FixedArrayMap"),
   0x08149: (134, "HeapNumberMap"),
   0x08171: (137, "FreeSpaceMap"),
@@ -153,28 +153,28 @@
   0x08351: (179, "HashTableMap"),
   0x08379: (128, "SymbolMap"),
   0x083a1: (64, "StringMap"),
-  0x083c9: (68, "AsciiStringMap"),
+  0x083c9: (68, "OneByteStringMap"),
   0x083f1: (65, "ConsStringMap"),
-  0x08419: (69, "ConsAsciiStringMap"),
+  0x08419: (69, "ConsOneByteStringMap"),
   0x08441: (67, "SlicedStringMap"),
-  0x08469: (71, "SlicedAsciiStringMap"),
+  0x08469: (71, "SlicedOneByteStringMap"),
   0x08491: (66, "ExternalStringMap"),
   0x084b9: (74, "ExternalStringWithOneByteDataMap"),
-  0x084e1: (70, "ExternalAsciiStringMap"),
+  0x084e1: (70, "ExternalOneByteStringMap"),
   0x08509: (82, "ShortExternalStringMap"),
   0x08531: (90, "ShortExternalStringWithOneByteDataMap"),
   0x08559: (0, "InternalizedStringMap"),
   0x08581: (1, "ConsInternalizedStringMap"),
-  0x085a9: (5, "ConsAsciiInternalizedStringMap"),
+  0x085a9: (5, "ConsOneByteInternalizedStringMap"),
   0x085d1: (2, "ExternalInternalizedStringMap"),
   0x085f9: (10, "ExternalInternalizedStringWithOneByteDataMap"),
-  0x08621: (6, "ExternalAsciiInternalizedStringMap"),
+  0x08621: (6, "ExternalOneByteInternalizedStringMap"),
   0x08649: (18, "ShortExternalInternalizedStringMap"),
   0x08671: (26, "ShortExternalInternalizedStringWithOneByteDataMap"),
-  0x08699: (22, "ShortExternalAsciiInternalizedStringMap"),
-  0x086c1: (86, "ShortExternalAsciiStringMap"),
+  0x08699: (22, "ShortExternalOneByteInternalizedStringMap"),
+  0x086c1: (86, "ShortExternalOneByteStringMap"),
   0x086e9: (64, "UndetectableStringMap"),
-  0x08711: (68, "UndetectableAsciiStringMap"),
+  0x08711: (68, "UndetectableOneByteStringMap"),
   0x08739: (138, "ExternalInt8ArrayMap"),
   0x08761: (139, "ExternalUint8ArrayMap"),
   0x08789: (140, "ExternalInt16ArrayMap"),
diff --git a/tools/whitespace.txt b/tools/whitespace.txt
index 4517663..305e8ed 100644
--- a/tools/whitespace.txt
+++ b/tools/whitespace.txt
@@ -1,5 +1,8 @@
-You can modify this file to create no-op changelists (like this one).
+You can modify this file to create no-op changelists..
 
+Try to write something funny. And please don't add trailing whitespace.
 
-
-
+A Smi walks into a bar and says:
+"I'm so deoptimized today!"
+The doubles heard this and started to unbox.
+The Smi looked at them when a crazy v8-autoroll account showed up..............